summaryrefslogtreecommitdiff
path: root/tools/perf/scripts/python
diff options
context:
space:
mode:
authorPravin B Shelar <pshelar@nicira.com>2015-02-12 09:58:48 -0800
committerDavid S. Miller <davem@davemloft.net>2015-02-14 20:20:40 -0800
commit26ad0b83587fb6e9a20eef388b0587ada3da5d06 (patch)
tree7d22cac315134610c591bd8a2ba49a61efd3b410 /tools/perf/scripts/python
parent65bab84c79077f3acbf7dcf2aec0588226d43663 (diff)
openvswitch: Fix key serialization.
Fix typo where mask is used rather than key. Fixes: 74ed7ab9264("openvswitch: Add support for unique flow IDs.") Reported-by: Joe Stringer <joestringer@nicira.com> Signed-off-by: Pravin B Shelar <pshelar@nicira.com> Acked-by: Joe Stringer <joestringer@nicira.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools/perf/scripts/python')
0 files changed, 0 insertions, 0 deletions
r>space:mode:
Diffstat
-rw-r--r--.clang-format39
-rw-r--r--.gitignore4
-rw-r--r--.mailmap9
-rw-r--r--CREDITS6
-rw-r--r--Documentation/ABI/stable/vdso8
-rw-r--r--Documentation/ABI/testing/sysfs-block-zram7
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci72
-rw-r--r--Documentation/ABI/testing/sysfs-class-power45
-rw-r--r--Documentation/ABI/testing/sysfs-class-tee15
-rw-r--r--Documentation/ABI/testing/sysfs-devices-memory6
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu6
-rw-r--r--Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon8
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs27
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs56
-rw-r--r--Documentation/PCI/pci.rst2
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.rst28
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst8
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-dyntick.svg8
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-fqs.svg8
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg8
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-hotplug.svg4
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.rst3
-rw-r--r--Documentation/RCU/checklist.rst61
-rw-r--r--Documentation/RCU/whatisRCU.rst2
-rw-r--r--Documentation/accel/qaic/qaic.rst8
-rw-r--r--Documentation/admin-guide/LSM/index.rst1
-rw-r--r--Documentation/admin-guide/LSM/ipe.rst790
-rw-r--r--Documentation/admin-guide/blockdev/zram.rst66
-rw-r--r--Documentation/admin-guide/bug-bisect.rst208
-rw-r--r--Documentation/admin-guide/bug-hunting.rst17
-rw-r--r--Documentation/admin-guide/cgroup-v1/memory.rst32
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst65
-rw-r--r--Documentation/admin-guide/device-mapper/dm-crypt.rst11
-rw-r--r--Documentation/admin-guide/ext4.rst10
-rw-r--r--Documentation/admin-guide/hw-vuln/srso.rst69
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt147
-rw-r--r--Documentation/admin-guide/media/cec.rst87
-rw-r--r--Documentation/admin-guide/media/mgb4.rst23
-rw-r--r--Documentation/admin-guide/media/rkisp1.rst11
-rw-r--r--Documentation/admin-guide/media/vivid.rst6
-rw-r--r--Documentation/admin-guide/mm/damon/start.rst4
-rw-r--r--Documentation/admin-guide/mm/damon/usage.rst8
-rw-r--r--Documentation/admin-guide/mm/memory-hotplug.rst5
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst64
-rw-r--r--Documentation/admin-guide/perf/arm-ni.rst17
-rw-r--r--Documentation/admin-guide/perf/dwc_pcie_pmu.rst16
-rw-r--r--Documentation/admin-guide/perf/hisi-pcie-pmu.rst4
-rw-r--r--Documentation/admin-guide/perf/index.rst1
-rw-r--r--Documentation/admin-guide/pm/amd-pstate.rst15
-rw-r--r--Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst59
-rw-r--r--Documentation/admin-guide/ramoops.rst2
-rw-r--r--Documentation/admin-guide/tainted-kernels.rst2
-rw-r--r--Documentation/arch/arm/stm32/stm32-dma-mdma-chaining.rst4
-rw-r--r--Documentation/arch/arm64/cpu-hotplug.rst2
-rw-r--r--Documentation/arch/arm64/elf_hwcaps.rst2
-rw-r--r--Documentation/arch/arm64/silicon-errata.rst6
-rw-r--r--Documentation/arch/powerpc/ultravisor.rst2
-rw-r--r--Documentation/arch/riscv/vector.rst2
-rw-r--r--Documentation/arch/x86/mds.rst2
-rw-r--r--Documentation/arch/x86/x86_64/boot-options.rst12
-rw-r--r--Documentation/arch/x86/x86_64/fsgs.rst4
-rw-r--r--Documentation/block/bfq-iosched.rst22
-rw-r--r--Documentation/bpf/btf.rst39
-rw-r--r--Documentation/bpf/libbpf/program_types.rst30
-rw-r--r--Documentation/bpf/verifier.rst2
-rw-r--r--Documentation/core-api/cpu_hotplug.rst10
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/memory-allocation.rst5
-rw-r--r--Documentation/core-api/printk-formats.rst4
-rw-r--r--Documentation/core-api/union_find.rst106
-rw-r--r--Documentation/dev-tools/gcov.rst11
-rw-r--r--Documentation/dev-tools/kcsan.rst3
-rw-r--r--Documentation/dev-tools/kfence.rst7
-rw-r--r--Documentation/dev-tools/kunit/api/clk.rst10
-rw-r--r--Documentation/dev-tools/kunit/api/index.rst21
-rw-r--r--Documentation/dev-tools/kunit/api/of.rst13
-rw-r--r--Documentation/dev-tools/kunit/api/platformdevice.rst10
-rw-r--r--Documentation/dev-tools/kunit/style.rst29
-rw-r--r--Documentation/devicetree/bindings/arc/archs-pct.txt17
-rw-r--r--Documentation/devicetree/bindings/arc/snps,archs-pct.yaml33
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml16
-rw-r--r--Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/arm,corstone1000.yaml4
-rw-r--r--Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml3
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-sysregs.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.yaml38
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt24
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt24
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt30
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt43
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt23
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt25
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt28
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt27
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt29
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt26
-rw-r--r--Documentation/devicetree/bindings/arm/qcom.yaml19
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.yaml42
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip/pmu.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/stm32.yaml8
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.yaml9
-rw-r--r--Documentation/devicetree/bindings/arm/tegra.yaml42
-rw-r--r--Documentation/devicetree/bindings/arm/ti/k3.yaml1
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-platform.yaml33
-rw-r--r--Documentation/devicetree/bindings/ata/imx-sata.yaml47
-rw-r--r--Documentation/devicetree/bindings/ata/qcom-sata.txt48
-rw-r--r--Documentation/devicetree/bindings/board/fsl,bcsr.yaml32
-rw-r--r--Documentation/devicetree/bindings/board/fsl,fpga-qixis-i2c.yaml70
-rw-r--r--Documentation/devicetree/bindings/board/fsl,fpga-qixis.yaml81
-rw-r--r--Documentation/devicetree/bindings/board/fsl-board.txt81
-rw-r--r--Documentation/devicetree/bindings/bus/qcom,ebi2.txt138
-rw-r--r--Documentation/devicetree/bindings/bus/qcom,ebi2.yaml239
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,c3-pll-clkc.yaml7
-rw-r--r--Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml4
-rw-r--r--Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml8
-rw-r--r--Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml6
-rw-r--r--Documentation/devicetree/bindings/clock/idt,versaclock5.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml3
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,infracfg.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mt6795-sys-clock.yaml54
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mt8186-clock.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-clock.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mt8186-sys-clock.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mt8192-clock.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mt8192-sys-clock.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mt8195-clock.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-clock.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mt8195-sys-clock.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,pericfg.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,syscon.yaml93
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.txt30
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.yaml51
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.txt22
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.yaml35
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,a53pll.yaml4
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,ipq5332-gcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,qcs404-turingcc.yaml47
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml13
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm4450-camcc.yaml63
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm4450-dispcc.yaml71
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8150-camcc.yaml77
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml19
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml11
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,turingcc.txt19
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml8
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,rzv2h-cpg.yaml80
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3576-cru.yaml56
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml4
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml19
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml162
-rw-r--r--Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml10
-rw-r--r--Documentation/devicetree/bindings/cpu/idle-states.yaml2
-rw-r--r--Documentation/devicetree/bindings/cpu/nvidia,tegra186-ccplex-cluster.yaml37
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml5
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom,prng.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml21
-rw-r--r--Documentation/devicetree/bindings/display/fsl,lcdif.yaml20
-rw-r--r--Documentation/devicetree/bindings/display/lvds.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml17
-rw-r--r--Documentation/devicetree/bindings/display/msm/hdmi.yaml28
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml21
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,tv101wum-ll2.yaml63
-rw-r--r--Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml17
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml69
-rw-r--r--Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml32
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml14
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml15
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-qdma.yaml13
-rw-r--r--Documentation/devicetree/bindings/dma/loongson,ls1b-apbdma.yaml65
-rw-r--r--Documentation/devicetree/bindings/dma/marvell,xor-v2.yaml61
-rw-r--r--Documentation/devicetree/bindings/dma/mv-xor-v2.txt28
-rw-r--r--Documentation/devicetree/bindings/dma/nxp,lpc3220-dmamux.yaml49
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml1
-rw-r--r--Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml4
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.yaml1
-rw-r--r--Documentation/devicetree/bindings/firmware/arm,scmi.yaml20
-rw-r--r--Documentation/devicetree/bindings/firmware/nxp,imx95-scmi.yaml43
-rw-r--r--Documentation/devicetree/bindings/gnss/brcm,bcm4751.yaml1
-rw-r--r--Documentation/devicetree/bindings/gnss/gnss-common.yaml5
-rw-r--r--Documentation/devicetree/bindings/gnss/mediatek.yaml1
-rw-r--r--Documentation/devicetree/bindings/gnss/sirfstar.yaml1
-rw-r--r--Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml1
-rw-r--r--Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml11
-rw-r--r--Documentation/devicetree/bindings/gpio/fcs,fxl6408.yaml1
-rw-r--r--Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml11
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-davinci.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml11
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio_lpc32xx.txt43
-rw-r--r--Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml12
-rw-r--r--Documentation/devicetree/bindings/gpio/nxp,lpc3220-gpio.yaml50
-rw-r--r--Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml11
-rw-r--r--Documentation/devicetree/bindings/hwlock/sprd,hwspinlock-r3p0.yaml50
-rw-r--r--Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt23
-rw-r--r--Documentation/devicetree/bindings/hwmon/adt7475.yaml37
-rw-r--r--Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml94
-rw-r--r--Documentation/devicetree/bindings/hwmon/ltc2978.txt62
-rw-r--r--Documentation/devicetree/bindings/hwmon/maxim,max31790.yaml70
-rw-r--r--Documentation/devicetree/bindings/hwmon/sophgo,sg2042-hwmon-mcu.yaml43
-rw-r--r--Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml5
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-sprd.txt31
-rw-r--r--Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml27
-rw-r--r--Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,riic.yaml4
-rw-r--r--Documentation/devicetree/bindings/i2c/sprd,sc9860-i2c.yaml65
-rw-r--r--Documentation/devicetree/bindings/i2c/tsd,mule-i2c-mux.yaml69
-rw-r--r--Documentation/devicetree/bindings/iio/accel/lis302.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml197
-rw-r--r--Documentation/devicetree/bindings/incomplete-devices.yaml148
-rw-r--r--Documentation/devicetree/bindings/input/adi,adp5588.yaml38
-rw-r--r--Documentation/devicetree/bindings/input/elan,ekth6915.yaml4
-rw-r--r--Documentation/devicetree/bindings/input/qcom,pm8xxx-vib.yaml1
-rw-r--r--Documentation/devicetree/bindings/input/rotary-encoder.txt50
-rw-r--r--Documentation/devicetree/bindings/input/rotary-encoder.yaml90
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ad7879.txt71
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/adi,ad7879.yaml150
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ads7846.txt107
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml4
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt34
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/goodix.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml183
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/toradex,vf50-touchscreen.yaml77
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml10
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml14
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.txt23
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.yaml62
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt37
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.yaml51
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml1
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml5
-rw-r--r--Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/common.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lm3692x.txt65
-rw-r--r--Documentation/devicetree/bindings/leds/leds-sc27xx-bltc.txt43
-rw-r--r--Documentation/devicetree/bindings/leds/nxp,pca995x.yaml6
-rw-r--r--Documentation/devicetree/bindings/leds/sprd,sc2731-bltc.yaml84
-rw-r--r--Documentation/devicetree/bindings/leds/ti.lm36922.yaml110
-rw-r--r--Documentation/devicetree/bindings/mailbox/brcm,iproc-flexrm-mbox.txt2
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml3
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml107
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/i2c/thine,thp7312.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/renesas,fcp.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vin.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vsp1.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/rockchip-vpu.yaml7
-rw-r--r--Documentation/devicetree/bindings/media/samsung,exynos4210-fimc.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/fsl/fsl,imx-weim.yaml25
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/adi,adp5585.yaml99
-rw-r--r--Documentation/devicetree/bindings/mfd/mediatek,mt6357.yaml21
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.yaml3
-rw-r--r--Documentation/devicetree/bindings/mfd/twl6040.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml2
-rw-r--r--Documentation/devicetree/bindings/misc/aspeed,ast2400-cvic.yaml60
-rw-r--r--Documentation/devicetree/bindings/misc/aspeed,cvic.txt35
-rw-r--r--Documentation/devicetree/bindings/mmc/atmel,sama5d2-sdhci.yaml92
-rw-r--r--Documentation/devicetree/bindings/mmc/nuvoton,ma35d1-sdhci.yaml87
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml13
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-atmel.txt35
-rw-r--r--Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml95
-rw-r--r--Documentation/devicetree/bindings/mtd/ti,gpmc-nand.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml22
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/amlogic,w155s2-bt.yaml63
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/brcm,bluetooth.yaml (renamed from Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml)35
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/marvell,88w8897.yaml (renamed from Documentation/devicetree/bindings/net/marvell-bluetooth.yaml)6
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/mediatek,bluetooth.txt (renamed from Documentation/devicetree/bindings/net/mediatek-bluetooth.txt)0
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/nokia,h4p-bluetooth.txt (renamed from Documentation/devicetree/bindings/net/nokia-bluetooth.txt)0
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml16
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/realtek,bluetooth.yaml (renamed from Documentation/devicetree/bindings/net/realtek-bluetooth.yaml)5
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/ti,bluetooth.yaml (renamed from Documentation/devicetree/bindings/net/ti,bluetooth.yaml)5
-rw-r--r--Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml70
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt30
-rw-r--r--Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml22
-rw-r--r--Documentation/devicetree/bindings/net/can/rockchip,rk3568v2-canfd.yaml74
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml7
-rw-r--r--Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml32
-rw-r--r--Documentation/devicetree/bindings/net/fsl,cpm-enet.yaml59
-rw-r--r--Documentation/devicetree/bindings/net/fsl,cpm-mdio.yaml55
-rw-r--r--Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml10
-rw-r--r--Documentation/devicetree/bindings/net/maxim,ds26522.txt13
-rw-r--r--Documentation/devicetree/bindings/net/maxim,ds26522.yaml40
-rw-r--r--Documentation/devicetree/bindings/net/mdio.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/mediatek,net.yaml12
-rw-r--r--Documentation/devicetree/bindings/net/microchip,lan8650.yaml74
-rw-r--r--Documentation/devicetree/bindings/net/nxp,tja11xx.yaml62
-rw-r--r--Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml3
-rw-r--r--Documentation/devicetree/bindings/net/renesas,etheravb.yaml29
-rw-r--r--Documentation/devicetree/bindings/net/rockchip-dwmac.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/wireless/marvell,sd8787.yaml93
-rw-r--r--Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt70
-rw-r--r--Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/altera-pcie-msi.txt27
-rw-r--r--Documentation/devicetree/bindings/pci/altera-pcie.txt50
-rw-r--r--Documentation/devicetree/bindings/pci/altr,msi-controller.yaml65
-rw-r--r--Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml114
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml40
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml13
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml16
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml41
-rw-r--r--Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/host-generic-pci.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml68
-rw-r--r--Documentation/devicetree/bindings/pci/pci-ep.yaml14
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml7
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml27
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml10
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml8
-rw-r--r--Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml8
-rw-r--r--Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml10
-rw-r--r--Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml7
-rw-r--r--Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml36
-rw-r--r--Documentation/devicetree/bindings/perf/arm,cmn.yaml1
-rw-r--r--Documentation/devicetree/bindings/perf/arm,ni.yaml30
-rw-r--r--Documentation/devicetree/bindings/phy/apm-xgene-phy.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/hisilicon,hi3798cv200-combphy.yaml56
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,mt7988-xfi-tphy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,tphy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/nuvoton,ma35d1-usb2-phy.yaml45
-rw-r--r--Documentation/devicetree/bindings/phy/phy-hi3798cv200-combphy.txt59
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sata-phy.yaml55
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,usb-8x16-phy.txt76
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-apq8064-sata-phy.txt24
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-ipq806x-sata-phy.txt23
-rw-r--r--Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml8
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml8
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml7
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml7
-rw-r--r--Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt178
-rw-r--r--Documentation/devicetree/bindings/pinctrl/atmel,at91rm9200-pinctrl.yaml184
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mobileye,eyeq5-pinctrl.yaml242
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml70
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt95
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.yaml110
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt188
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.yaml129
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt85
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.yaml103
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt101
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.yaml108
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/sophgo,cv1800-pinctrl.yaml122
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml4
-rw-r--r--Documentation/devicetree/bindings/platform/microsoft,surface-sam.yaml47
-rw-r--r--Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/rockchip,power-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/rockchip-io-domain.yaml24
-rw-r--r--Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml6
-rw-r--r--Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml7
-rw-r--r--Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml72
-rw-r--r--Documentation/devicetree/bindings/power/wakeup-source.txt4
-rw-r--r--Documentation/devicetree/bindings/ptp/fsl,ptp.yaml22
-rw-r--r--Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml9
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-amlogic.yaml17
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml1
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml1
-rw-r--r--Documentation/devicetree/bindings/regulator/mediatek,mt6397-regulator.yaml238
-rw-r--r--Documentation/devicetree/bindings/regulator/microchip,mcp16502.yaml20
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6397-regulator.txt220
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,qca6390-pmu.yaml22
-rw-r--r--Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,glink-rpm-edge.yaml2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,rpm-proc.yaml4
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,k3-m4f-rproc.yaml125
-rw-r--r--Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml1
-rw-r--r--Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml1
-rw-r--r--Documentation/devicetree/bindings/reset/mobileye,eyeq5-reset.yaml43
-rw-r--r--Documentation/devicetree/bindings/reset/renesas,rst.yaml1
-rw-r--r--Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml8
-rw-r--r--Documentation/devicetree/bindings/riscv/extensions.yaml7
-rw-r--r--Documentation/devicetree/bindings/riscv/sophgo.yaml5
-rw-r--r--Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml61
-rw-r--r--Documentation/devicetree/bindings/rtc/fsl,ls-ftm-alarm.yaml2
-rw-r--r--Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml3
-rw-r--r--Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml49
-rw-r--r--Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt26
-rw-r--r--Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml28
-rw-r--r--Documentation/devicetree/bindings/rtc/trivial-rtc.yaml9
-rw-r--r--Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/serial-peripheral-props.yaml41
-rw-r--r--Documentation/devicetree/bindings/serial/serial.yaml26
-rw-r--r--Documentation/devicetree/bindings/soc/bcm/brcm,bcm2711-avs-monitor.yaml44
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-tsa.yaml210
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-ucc-qmc.yaml197
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,ucc-hdlc.yaml140
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt130
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml3
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/fsl,rcpm.yaml87
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/rcpm.txt69
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml5
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml74
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/renesas/renesas.yaml14
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml26
-rw-r--r--Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml15
-rw-r--r--Documentation/devicetree/bindings/soc/ti/ti,am654-serdes-ctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml20
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.yaml7
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml7
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,cs4271.yaml101
-rw-r--r--Documentation/devicetree/bindings/sound/cs4271.txt57
-rw-r--r--Documentation/devicetree/bindings/sound/da7213.txt45
-rw-r--r--Documentation/devicetree/bindings/sound/dlg,da7213.yaml103
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es8326.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,imx-audio-es8328.yaml111
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,saif.yaml83
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audio-es8328.txt60
-rw-r--r--Documentation/devicetree/bindings/sound/mediatek,mt8365-afe.yaml130
-rw-r--r--Documentation/devicetree/bindings/sound/mediatek,mt8365-mt6357.yaml107
-rw-r--r--Documentation/devicetree/bindings/sound/microchip,sama7g5-spdifrx.yaml5
-rw-r--r--Documentation/devicetree/bindings/sound/mxs-saif.txt41
-rw-r--r--Documentation/devicetree/bindings/sound/pcm512x.txt53
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,apq8016-sbc-sndcard.yaml205
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml22
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,sm8250.yaml137
-rw-r--r--Documentation/devicetree/bindings/sound/realtek,rt5616.yaml12
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.yaml6
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,odroid.yaml5
-rw-r--r--Documentation/devicetree/bindings/sound/serial-midi.yaml3
-rw-r--r--Documentation/devicetree/bindings/sound/st,sta350.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/ti,pcm512x.yaml101
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tlv320dac3100.yaml127
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tpa6130a2.yaml55
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic31xx.txt77
-rw-r--r--Documentation/devicetree/bindings/sound/tpa6130a2.txt27
-rw-r--r--Documentation/devicetree/bindings/spi/cdns,xspi.yaml32
-rw-r--r--Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml7
-rw-r--r--Documentation/devicetree/bindings/spi/nxp,sc18is.yaml51
-rw-r--r--Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rockchip.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sc18is602.txt23
-rw-r--r--Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml3
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-tsens.yaml3
-rw-r--r--Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.txt22
-rw-r--r--Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.yaml50
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,ftm-timer.txt31
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,ftm-timer.yaml59
-rw-r--r--Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.txt26
-rw-r--r--Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.yaml55
-rw-r--r--Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml1
-rw-r--r--Documentation/devicetree/bindings/timer/ti,da830-timer.yaml68
-rw-r--r--Documentation/devicetree/bindings/timer/ti,davinci-timer.txt37
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml26
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml6
-rw-r--r--Documentation/devicetree/bindings/watchdog/cirrus,ep9301-wdt.yaml42
-rw-r--r--Documentation/devicetree/bindings/watchdog/davinci-wdt.txt24
-rw-r--r--Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt19
-rw-r--r--Documentation/devicetree/bindings/watchdog/nxp,lpc1850-wwdt.yaml52
-rw-r--r--Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml17
-rw-r--r--Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml6
-rw-r--r--Documentation/devicetree/bindings/watchdog/ti,davinci-wdt.yaml55
-rw-r--r--Documentation/devicetree/bindings/watchdog/zii,rave-wdt.yaml49
-rw-r--r--Documentation/devicetree/bindings/watchdog/ziirave-wdt.txt19
-rw-r--r--Documentation/doc-guide/checktransupdate.rst54
-rw-r--r--Documentation/doc-guide/index.rst1
-rw-r--r--Documentation/dontdiff3
-rw-r--r--Documentation/driver-api/dpll.rst21
-rw-r--r--Documentation/driver-api/driver-model/devres.rst2
-rw-r--r--Documentation/driver-api/firewire.rst2
-rw-r--r--Documentation/driver-api/iio/buffers.rst8
-rw-r--r--Documentation/driver-api/iio/core.rst14
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-api/ipmi.rst2
-rw-r--r--Documentation/driver-api/media/mc-core.rst67
-rw-r--r--Documentation/driver-api/mmc/index.rst1
-rw-r--r--Documentation/driver-api/mmc/mmc-test.rst299
-rw-r--r--Documentation/driver-api/pwrseq.rst95
-rw-r--r--Documentation/driver-api/thermal/sysfs-api.rst68
-rw-r--r--Documentation/fault-injection/fault-injection.rst22
-rw-r--r--Documentation/features/vm/PG_uncached/arch-support.txt30
-rw-r--r--Documentation/filesystems/9p.rst2
-rw-r--r--Documentation/filesystems/autofs.rst4
-rw-r--r--Documentation/filesystems/bcachefs/CodingStyle.rst2
-rw-r--r--Documentation/filesystems/fsverity.rst27
-rw-r--r--Documentation/filesystems/idmappings.rst8
-rw-r--r--Documentation/filesystems/iomap/design.rst10
-rw-r--r--Documentation/filesystems/journalling.rst6
-rw-r--r--Documentation/filesystems/locking.rst6
-rw-r--r--Documentation/filesystems/netfs_library.rst2
-rw-r--r--Documentation/filesystems/nfs/index.rst1
-rw-r--r--Documentation/filesystems/nfs/localio.rst357
-rw-r--r--Documentation/filesystems/overlayfs.rst7
-rw-r--r--Documentation/filesystems/vfs.rst15
-rw-r--r--Documentation/gpu/amdgpu/driver-core.rst2
-rw-r--r--Documentation/gpu/introduction.rst10
-rw-r--r--Documentation/gpu/komeda-kms.rst2
-rw-r--r--Documentation/gpu/todo.rst69
-rw-r--r--Documentation/gpu/xe/xe_mm.rst15
-rw-r--r--Documentation/hid/intel-ish-hid.rst29
-rw-r--r--Documentation/hwmon/hwmon-kernel-api.rst7
-rw-r--r--Documentation/hwmon/ina2xx.rst4
-rw-r--r--Documentation/hwmon/index.rst1
-rw-r--r--Documentation/hwmon/lm92.rst26
-rw-r--r--Documentation/hwmon/max1619.rst4
-rw-r--r--Documentation/hwmon/oxp-sensors.rst56
-rw-r--r--Documentation/hwmon/sg2042-mcu.rst78
-rw-r--r--Documentation/i2c/slave-testunit-backend.rst120
-rw-r--r--Documentation/kbuild/kbuild.rst10
-rw-r--r--Documentation/kbuild/kconfig-language.rst6
-rw-r--r--Documentation/kbuild/makefiles.rst1
-rw-r--r--Documentation/kbuild/modules.rst224
-rw-r--r--Documentation/leds/leds-blinkm.rst29
-rw-r--r--Documentation/leds/leds-mlxcpld.rst2
-rw-r--r--Documentation/leds/well-known-leds.txt8
-rw-r--r--Documentation/livepatch/livepatch.rst2
-rw-r--r--Documentation/memory-barriers.txt3
-rw-r--r--Documentation/mm/damon/design.rst2
-rw-r--r--Documentation/mm/damon/maintainer-profile.rst86
-rw-r--r--Documentation/mm/hmm.rst10
-rw-r--r--Documentation/mm/page_migration.rst22
-rw-r--r--Documentation/mm/transhuge.rst6
-rw-r--r--Documentation/mm/unevictable-lru.rst8
-rw-r--r--Documentation/mm/vmalloced-kernel-stacks.rst8
-rw-r--r--Documentation/netlink/specs/dpll.yaml24
-rw-r--r--Documentation/netlink/specs/ethtool.yaml78
-rw-r--r--Documentation/netlink/specs/mptcp_pm.yaml1
-rw-r--r--Documentation/netlink/specs/netdev.yaml61
-rw-r--r--Documentation/netlink/specs/nftables.yaml270
-rw-r--r--Documentation/netlink/specs/rt_link.yaml41
-rw-r--r--Documentation/networking/device_drivers/ethernet/amazon/ena.rst5
-rw-r--r--Documentation/networking/device_drivers/ethernet/index.rst1
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst16
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst3
-rw-r--r--Documentation/networking/device_drivers/ethernet/meta/fbnic.rst29
-rw-r--r--Documentation/networking/devmem.rst269
-rw-r--r--Documentation/networking/ethtool-netlink.rst103
-rw-r--r--Documentation/networking/index.rst3
-rw-r--r--Documentation/networking/ip-sysctl.rst14
-rw-r--r--Documentation/networking/l2tp.rst54
-rw-r--r--Documentation/networking/mptcp-sysctl.rst11
-rw-r--r--Documentation/networking/multi-pf-netdev.rst10
-rw-r--r--Documentation/networking/net_cachelines/net_device.rst11
-rw-r--r--Documentation/networking/netdev-features.rst15
-rw-r--r--Documentation/networking/netdevices.rst4
-rw-r--r--Documentation/networking/oa-tc6-framework.rst497
-rw-r--r--Documentation/networking/phy-link-topology.rst121
-rw-r--r--Documentation/networking/switchdev.rst4
-rw-r--r--Documentation/networking/timestamping.rst20
-rw-r--r--Documentation/nvme/feature-and-quirk-policy.rst6
-rw-r--r--Documentation/power/pci.rst11
-rw-r--r--Documentation/power/runtime_pm.rst4
-rw-r--r--Documentation/process/backporting.rst6
-rw-r--r--Documentation/process/changes.rst7
-rw-r--r--Documentation/process/coding-style.rst2
-rw-r--r--Documentation/process/email-clients.rst2
-rw-r--r--Documentation/process/maintainer-tip.rst2
-rw-r--r--Documentation/process/submitting-patches.rst8
-rw-r--r--Documentation/rust/general-information.rst27
-rw-r--r--Documentation/rust/index.rst18
-rw-r--r--Documentation/rust/quick-start.rst4
-rw-r--r--Documentation/scheduler/completion.rst2
-rw-r--r--Documentation/scheduler/index.rst2
-rw-r--r--Documentation/scheduler/sched-deadline.rst14
-rw-r--r--Documentation/scheduler/sched-design-CFS.rst10
-rw-r--r--Documentation/scheduler/sched-eevdf.rst43
-rw-r--r--Documentation/scheduler/sched-ext.rst326
-rw-r--r--Documentation/security/index.rst1
-rw-r--r--Documentation/security/ipe.rst446
-rw-r--r--Documentation/sound/alsa-configuration.rst3
-rw-r--r--Documentation/sound/hd-audio/notes.rst6
-rw-r--r--Documentation/sound/index.rst1
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst25
-rw-r--r--Documentation/sound/utimers.rst126
-rw-r--r--Documentation/sphinx/kerneldoc-preamble.sty2
-rw-r--r--Documentation/spi/spi-summary.rst83
-rw-r--r--Documentation/staging/xz.rst157
-rw-r--r--Documentation/timers/timers-howto.rst2
-rw-r--r--Documentation/trace/debugging.rst159
-rw-r--r--Documentation/trace/ftrace.rst12
-rw-r--r--Documentation/translations/ko_KR/core-api/wrappers/memory-barriers.rst18
-rw-r--r--Documentation/translations/ko_KR/index.rst16
-rw-r--r--Documentation/translations/ko_KR/process/howto.rst (renamed from Documentation/translations/ko_KR/howto.rst)0
-rw-r--r--Documentation/translations/sp_SP/scheduler/index.rst1
-rw-r--r--Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst10
-rw-r--r--Documentation/translations/sp_SP/scheduler/sched-eevdf.rst58
-rw-r--r--Documentation/translations/zh_CN/admin-guide/index.rst1
-rw-r--r--Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst4
-rw-r--r--Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst8
-rw-r--r--Documentation/translations/zh_CN/admin-guide/reporting-issues.rst4
-rw-r--r--Documentation/translations/zh_CN/core-api/index.rst1
-rw-r--r--Documentation/translations/zh_CN/core-api/union_find.rst92
-rw-r--r--Documentation/translations/zh_CN/dev-tools/index.rst2
-rw-r--r--Documentation/translations/zh_CN/dev-tools/kcsan.rst320
-rw-r--r--Documentation/translations/zh_CN/doc-guide/checktransupdate.rst55
-rw-r--r--Documentation/translations/zh_CN/doc-guide/index.rst1
-rw-r--r--Documentation/translations/zh_CN/index.rst2
-rw-r--r--Documentation/translations/zh_CN/kbuild/gcc-plugins.rst126
-rw-r--r--Documentation/translations/zh_CN/kbuild/headers_install.rst39
-rw-r--r--Documentation/translations/zh_CN/kbuild/index.rst35
-rw-r--r--Documentation/translations/zh_CN/mm/page_migration.rst6
-rw-r--r--Documentation/translations/zh_CN/process/index.rst3
-rw-r--r--Documentation/translations/zh_CN/process/security-bugs.rst (renamed from Documentation/translations/zh_CN/admin-guide/security-bugs.rst)42
-rw-r--r--Documentation/translations/zh_CN/process/submitting-patches.rst2
-rw-r--r--Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst4
-rw-r--r--Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst8
-rw-r--r--Documentation/translations/zh_TW/admin-guide/reporting-issues.rst4
-rw-r--r--Documentation/translations/zh_TW/process/submitting-patches.rst2
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst2
-rw-r--r--Documentation/userspace-api/landlock.rst58
-rw-r--r--Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst6
-rw-r--r--Documentation/userspace-api/media/cec/cec-ioc-receive.rst15
-rw-r--r--Documentation/userspace-api/media/v4l/biblio.rst11
-rw-r--r--Documentation/userspace-api/media/v4l/buffer.rst35
-rw-r--r--Documentation/userspace-api/media/v4l/capture.c.rst6
-rw-r--r--Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst20
-rw-r--r--Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst2
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst57
-rw-r--r--Documentation/userspace-api/media/v4l/mt2110t.svg315
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-reserved.rst13
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst181
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-querycap.rst11
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst40
-rw-r--r--Documentation/userspace-api/media/videodev2.h.rst.exceptions1
-rw-r--r--Documentation/virt/hyperv/coco.rst260
-rw-r--r--Documentation/virt/hyperv/index.rst1
-rw-r--r--Documentation/virt/kvm/arm/hypercalls.rst98
-rw-r--r--Documentation/virt/kvm/index.rst1
-rw-r--r--Documentation/virt/kvm/loongarch/hypercalls.rst89
-rw-r--r--Documentation/virt/kvm/loongarch/index.rst10
-rw-r--r--Documentation/watchdog/watchdog-api.rst2
-rw-r--r--LICENSES/deprecated/0BSD23
-rw-r--r--MAINTAINERS358
-rw-r--r--Makefile28
-rw-r--r--arch/Kconfig27
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/include/uapi/asm/socket.h6
-rw-r--r--arch/alpha/kernel/osf_sys.c6
-rw-r--r--arch/arc/Kconfig3
-rw-r--r--arch/arc/configs/axs101_defconfig1
-rw-r--r--arch/arc/configs/axs103_defconfig1
-rw-r--r--arch/arc/configs/axs103_smp_defconfig1
-rw-r--r--arch/arc/configs/tb10x_defconfig1
-rw-r--r--arch/arc/include/asm/cmpxchg.h6
-rw-r--r--arch/arc/mm/mmap.c3
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/dts/amlogic/meson8b-ec100.dts2
-rw-r--r--arch/arm/boot/dts/arm/arm-realview-eb-mp.dtsi2
-rw-r--r--arch/arm/boot/dts/arm/arm-realview-pb11mp.dts2
-rw-r--r--arch/arm/boot/dts/arm/arm-realview-pba8.dts2
-rw-r--r--arch/arm/boot/dts/arm/arm-realview-pbx-a9.dts2
-rw-r--r--arch/arm/boot/dts/aspeed/Makefile3
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtjade.dts20
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts154
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-arm-stardragon4800-rep2.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-spc621d8hm3.dts12
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-x570d4u.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-catalina.dts1110
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts2
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts177
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts518
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge-4u.dts21
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge.dts1686
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts5
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts5
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-fuji.dts3876
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts5
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts6
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-fp5280g2.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-on5263m5.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-intel-s2600wf.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-opp-lanyang.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-opp-nicole.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-opp-romulus.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-opp-swift.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts5
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-opp-witherspoon.dts9
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-opp-zaius.dts2
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-supermicro-x11spi.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-g4.dtsi7
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-g5.dtsi18
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-g6.dtsi29
-rw-r--r--arch/arm/boot/dts/aspeed/ibm-power11-quad.dtsi1539
-rw-r--r--arch/arm/boot/dts/broadcom/bcm21664-garnet.dts4
-rw-r--r--arch/arm/boot/dts/broadcom/bcm21664.dtsi342
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2166x-common.dtsi334
-rw-r--r--arch/arm/boot/dts/broadcom/bcm23550.dtsi370
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2837.dtsi2
-rw-r--r--arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts12
-rw-r--r--arch/arm/boot/dts/broadcom/bcm958625-meraki-mx6x-common.dtsi12
-rw-r--r--arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_vining_fpga.dts4
-rw-r--r--arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi8
-rw-r--r--arch/arm/boot/dts/microchip/at91-sam9x60_curiosity.dts2
-rw-r--r--arch/arm/boot/dts/microchip/at91-sam9x60ek.dts4
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1.dtsi13
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts13
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama5d2_icp.dts13
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts13
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama7g5ek.dts53
-rw-r--r--arch/arm/boot/dts/microchip/at91rm9200.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9260.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9261.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9263.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9g20ek_2mmc.dts4
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9g25-gardena-smart-gateway.dts24
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9g45.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9n12.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9n12ek.dts6
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9rl.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9x5.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9x5cm.dtsi4
-rw-r--r--arch/arm/boot/dts/microchip/sam9x60.dtsi40
-rw-r--r--arch/arm/boot/dts/microchip/sama5d3.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/sama5d4.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/sama7g5.dtsi2
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts32
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-npcm750-evb.dts6
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts6
-rw-r--r--arch/arm/boot/dts/nvidia/tegra114-asus-tf701t.dts1558
-rw-r--r--arch/arm/boot/dts/nvidia/tegra20-trimslice.dts30
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx1.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx27.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-mba53.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-cm-fx6.dts15
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts15
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-tbs2910.dts9
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi15
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi9
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi15
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi9
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw553x.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi10
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw5910.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-gw5912.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-hummingboard.dtsi9
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-icore-rqs.dtsi8
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi11
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi13
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi14
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi20
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-tx6.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-var-dart.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-var-som.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-wandboard.dtsi9
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts12
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sl-warp.dts8
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sx-sabreauto.dts9
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi9
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sx-udoo-neo.dtsi5
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcexpress.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts14
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsom.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-liteboard.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin-peb-wlbt-05.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin.dtsi6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul-mainboard.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi18
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-myir-mys-6ulx.dtsi8
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi20
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi.dtsi6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ulz-bsh-smm-m2.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7-colibri.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi8
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d-nitrogen7.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d-pico.dtsi20
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts8
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d-sdb-reva.dts34
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts608
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7s-warp.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi6
-rw-r--r--arch/arm/boot/dts/nxp/lpc/lpc4357-ea4357-devkit.dts4
-rw-r--r--arch/arm/boot/dts/nxp/lpc/lpc4357-myd-lpc4357.dts2
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx23-evk.dts4
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx23-olinuxino.dts4
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts4
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx23-stmp378x_devb.dts4
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts4
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx23.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28-apx4devkit.dts20
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28-cfa10037.dts4
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28-lwe.dtsi38
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts2
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/pma8084.dtsi16
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8064-asus-nexus7-flo.dts4
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8064-cm-qs600.dts25
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts25
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8064-pins.dtsi362
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8064-sony-xperia-lagan-yuga.dts10
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8064.dtsi39
-rw-r--r--arch/arm/boot/dts/qcom/qcom-apq8084.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi34
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts27
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk01.1.dtsi26
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk04.1.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c1.dts8
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c2.dts2
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq8064-ap148.dts11
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq8064-rb3011.dts76
-rw-r--r--arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi116
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi26
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts3
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8226.dtsi136
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts3
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte-common.dtsi13
-rw-r--r--arch/arm/boot/dts/rockchip/rk3128.dtsi26
-rw-r--r--arch/arm/boot/dts/rockchip/rv1108-elgin-r1.dts4
-rw-r--r--arch/arm/boot/dts/rockchip/rv1126-pinctrl.dtsi256
-rw-r--r--arch/arm/boot/dts/rockchip/rv1126.dtsi151
-rw-r--r--arch/arm/boot/dts/st/Makefile3
-rw-r--r--arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi20
-rw-r--r--arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts44
-rw-r--r--arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi3
-rw-r--r--arch/arm/boot/dts/st/stm32mp151a-prtt1a.dts12
-rw-r--r--arch/arm/boot/dts/st/stm32mp151a-prtt1c.dts108
-rw-r--r--arch/arm/boot/dts/st/stm32mp151a-prtt1l.dtsi126
-rw-r--r--arch/arm/boot/dts/st/stm32mp151a-prtt1s.dts16
-rw-r--r--arch/arm/boot/dts/st/stm32mp151c-mecio1r0.dts48
-rw-r--r--arch/arm/boot/dts/st/stm32mp151c-mect1s.dts290
-rw-r--r--arch/arm/boot/dts/st/stm32mp153c-mecio1r1.dts48
-rw-r--r--arch/arm/boot/dts/st/stm32mp15x-mecio1-io.dtsi527
-rw-r--r--arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi12
-rw-r--r--arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi66
-rw-r--r--arch/arm/boot/dts/ti/omap/am335x-boneblue.dts12
-rw-r--r--arch/arm/boot/dts/ti/omap/am335x-nano.dts2
-rw-r--r--arch/arm/boot/dts/ti/omap/am335x-regor.dtsi10
-rw-r--r--arch/arm/boot/dts/ti/omap/am335x-wega.dtsi7
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi4
-rw-r--r--arch/arm/configs/at91_dt_defconfig1
-rw-r--r--arch/arm/configs/hisi_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/configs/shmobile_defconfig1
-rw-r--r--arch/arm/configs/socfpga_defconfig1
-rw-r--r--arch/arm/configs/spear13xx_defconfig1
-rw-r--r--arch/arm/configs/spear3xx_defconfig1
-rw-r--r--arch/arm/configs/spear6xx_defconfig1
-rw-r--r--arch/arm/crypto/Kconfig14
-rw-r--r--arch/arm/crypto/aes-ce-glue.c2
-rw-r--r--arch/arm/crypto/aes-cipher-glue.c5
-rw-r--r--arch/arm/crypto/aes-cipher.h13
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c133
-rw-r--r--arch/arm/include/asm/arm_pmuv3.h20
-rw-r--r--arch/arm/include/asm/cpu.h1
-rw-r--r--arch/arm/include/asm/dma-iommu.h2
-rw-r--r--arch/arm/include/asm/hypervisor.h2
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h5
-rw-r--r--arch/arm/kernel/setup.c14
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c10
-rw-r--r--arch/arm/lib/xor-neon.c1
-rw-r--r--arch/arm/mach-at91/Kconfig22
-rw-r--r--arch/arm/mach-at91/Makefile1
-rw-r--r--arch/arm/mach-at91/generic.h2
-rw-r--r--arch/arm/mach-at91/pm.c29
-rw-r--r--arch/arm/mach-at91/sam9x7.c33
-rw-r--r--arch/arm/mach-bcm/Kconfig1
-rw-r--r--arch/arm/mach-davinci/Makefile1
-rw-r--r--arch/arm/mach-davinci/common.h1
-rw-r--r--arch/arm/mach-davinci/cpuidle.c99
-rw-r--r--arch/arm/mach-davinci/cpuidle.h15
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c1
-rw-r--r--arch/arm/mach-davinci/mux.h5
-rw-r--r--arch/arm/mach-dove/common.c4
-rw-r--r--arch/arm/mach-ep93xx/vision_ep9307.c6
-rw-r--r--arch/arm/mach-imx/mach-imx6sx.c22
-rw-r--r--arch/arm/mach-imx/mach-imx7d.c2
-rw-r--r--arch/arm/mach-lpc32xx/Kconfig1
-rw-r--r--arch/arm/mach-mvebu/board-v7.c3
-rw-r--r--arch/arm/mach-omap1/omap-dma.c13
-rw-r--r--arch/arm/mach-omap1/pm.h4
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c2
-rw-r--r--arch/arm/mach-omap2/omap_device.c2
-rw-r--r--arch/arm/mach-orion5x/board-mss2.c2
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c6
-rw-r--r--arch/arm/mach-orion5x/kurobox_pro-setup.c2
-rw-r--r--arch/arm/mach-orion5x/mv2120-setup.c2
-rw-r--r--arch/arm/mach-orion5x/net2big-setup.c2
-rw-r--r--arch/arm/mach-orion5x/terastation_pro2-setup.c2
-rw-r--r--arch/arm/mach-orion5x/ts209-setup.c2
-rw-r--r--arch/arm/mach-orion5x/ts409-setup.c2
-rw-r--r--arch/arm/mach-pxa/spitz.c163
-rw-r--r--arch/arm/mach-s3c/irq-uart-s3c64xx.h2
-rw-r--r--arch/arm/mach-s3c/mach-crag6410-module.c2
-rw-r--r--arch/arm/mach-s3c/pm.h2
-rw-r--r--arch/arm/mach-s3c/s3c64xx.h11
-rw-r--r--arch/arm/mach-versatile/platsmp-realview.c1
-rw-r--r--arch/arm/mach-versatile/spc.c2
-rw-r--r--arch/arm/mm/dma-mapping.c12
-rw-r--r--arch/arm/mm/fault-armv.c6
-rw-r--r--arch/arm/mm/mmap.c7
-rw-r--r--arch/arm/mm/mmu.c6
-rw-r--r--arch/arm/vfp/vfpinstr.h48
-rw-r--r--arch/arm64/Kconfig47
-rw-r--r--arch/arm64/Makefile3
-rw-r--r--arch/arm64/boot/Makefile5
-rw-r--r--arch/arm64/boot/dts/allwinner/Makefile3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi16
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts37
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts27
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-sp.dts34
-rw-r--r--arch/arm64/boot/dts/amlogic/Makefile1
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a4-common.dtsi14
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-c3-c302x-aw409.dts231
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-c3-c308l-aw419.dts260
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi503
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg-s400.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-cm4io.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-mnt-reform2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2l.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-vero4k.dts5
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts145
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-s4.dtsi136
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air-gbit.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m2-pro.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-h96-max.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-x96-air-gbit.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-x96-air.dts4
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8.dtsi4
-rw-r--r--arch/arm64/boot/dts/arm/fvp-base-revc.dts4
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts4
-rw-r--r--arch/arm64/boot/dts/broadcom/Makefile1
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts64
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2712.dtsi283
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts2
-rw-r--r--arch/arm64/boot/dts/exynos/exynosautov9.dtsi46
-rw-r--r--arch/arm64/boot/dts/exynos/exynosautov920.dtsi40
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101.dtsi15
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile9
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts12
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi5
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts12
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi29
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi15
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi13
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts7
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts6
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi17
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi34
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-tqmlx2160a-mblx2160a.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi17
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi18
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-lvds0.dtsi63
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-lvds1.dtsi114
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-mipi0.dtsi129
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-mipi1.dtsi138
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dx-colibri.dtsi11
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dxl-evk.dts6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts19
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-emtop-baseboard.dts48
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi15
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso7
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso7
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rts-cts.dtso6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-imx219.dtso16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs422.dtso8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs485.dtso8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-imx219.dtso16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs422.dtso4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs485.dtso4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw75xx-0x.dts (renamed from arch/arm64/boot/dts/freescale/imx8mm-venice-gw7905-0x.dts)6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw75xx.dtsi (renamed from arch/arm64/boot/dts/freescale/imx8mm-venice-gw7905.dtsi)0
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi15
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts48
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts47
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk.dts46
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts66
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phycore-no-eth.dtso16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-var-som-symphony.dts11
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi359
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw75xx-2x.dts (renamed from arch/arm64/boot/dts/freescale/imx8mp-venice-gw7905-2x.dts)6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw75xx.dtsi (renamed from arch/arm64/boot/dts/freescale/imx8mp-venice-gw7905.dtsi)0
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi67
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-evk.dts24
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-mek.dts284
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-ss-lvds.dtsi76
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-ss-mipi.dtsi19
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm.dtsi27
-rw-r--r--arch/arm64/boot/dts/freescale/imx8x-colibri-aster.dtsi36
-rw-r--r--arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi36
-rw-r--r--arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi36
-rw-r--r--arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi212
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts185
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-14x14-evk.dts468
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts163
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-kontron-osm-s.dtsi628
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts301
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts320
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi172
-rw-r--r--arch/arm64/boot/dts/freescale/imx93.dtsi47
-rw-r--r--arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts340
-rw-r--r--arch/arm64/boot/dts/freescale/imx95.dtsi454
-rw-r--r--arch/arm64/boot/dts/freescale/mba8mx.dtsi9
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/s32g2.dtsi50
-rw-r--r--arch/arm64/boot/dts/freescale/s32g274a-evb.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/s32g3.dtsi52
-rw-r--r--arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts3
-rw-r--r--arch/arm64/boot/dts/freescale/s32v234.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6357.dtsi5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6358.dtsi5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6359.dtsi5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7981b.dtsi42
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi13
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi26
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi13
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186.dtsi345
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188.dtsi482
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry-dojo-r1.dts11
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts11
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts11
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi15
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8365-evk.dts86
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8365.dtsi43
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts2
-rw-r--r--arch/arm64/boot/dts/nuvoton/ma35d1-iot-512m.dts80
-rw-r--r--arch/arm64/boot/dts/nuvoton/ma35d1-som-256m.dts83
-rw-r--r--arch/arm64/boot/dts/nuvoton/ma35d1.dtsi151
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi64
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi53
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3701-0000.dtsi135
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi142
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3701.dtsi125
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts108
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3737-0000.dtsi90
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts230
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3740-0002.dtsi215
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts17
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi26
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234.dtsi33
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile10
-rw-r--r--arch/arm64/boot/dts/qcom/ipq5332.dtsi7
-rw-r--r--arch/arm64/boot/dts/qcom/ipq6018.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/ipq9574.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts27
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts24
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-gt58.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-j3-common.dtsi62
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-j3ltetw.dts31
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts20
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86518.dts87
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86528.dts158
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-wingtech-wt865x8.dtsi215
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8929-pm8916.dtsi162
-rw-r--r--arch/arm64/boot/dts/qcom/msm8929-wingtech-wt82918hd.dts17
-rw-r--r--arch/arm64/boot/dts/qcom/msm8929.dtsi7
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts27
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dts17
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dtsi252
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918hd.dts17
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8953.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8976.dtsi7
-rw-r--r--arch/arm64/boot/dts/qcom/msm8992-lg-h815.dts231
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi29
-rw-r--r--arch/arm64/boot/dts/qcom/pm8950.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/pmi8950.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/pmi8994.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/qcm2290.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/qcm6490-idp.dts33
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sa8155p.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sa8295p-adp.dts83
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi25
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p.dtsi1006
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts38
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi18
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x-primus.dts64
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x.dtsi181
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-crd.dts16
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts83
-rw-r--r--arch/arm64/boot/dts/qcom/sdm630.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sdx75-idp.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/sdx75.dtsi65
-rw-r--r--arch/arm64/boot/dts/qcom/sm4450.dtsi38
-rw-r--r--arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts343
-rw-r--r--arch/arm64/boot/dts/qcom/sm6115.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sm6125.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sm6375.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sm7125-xiaomi-common.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-mtp.dts5
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150.dtsi13
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi31
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-hdk.dts113
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-qrd.dts26
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550.dtsi252
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650-hdk.dts26
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650-qrd.dts28
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650.dtsi291
-rw-r--r--arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts807
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-crd.dts96
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts21
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi835
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus13.dts13
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus15.dts13
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-qcp.dts14
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100.dtsi484
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile5
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi6
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774b1.dtsi26
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi3
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774e1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77960.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi22
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0.dtsi128
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g0.dtsi207
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts51
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts212
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779h0.dtsi147
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g043.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g043u.dtsi53
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g043u11-smarc-du-adv7513.dtso62
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044c2-smarc.dts3
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g054.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r9a08g045.dtsi130
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g057.dtsi513
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts256
-rw-r--r--arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi3
-rw-r--r--arch/arm64/boot/dts/renesas/rz-smarc-du-adv7513.dtsi76
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi6
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi86
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi30
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi43
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi33
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi86
-rw-r--r--arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi5
-rw-r--r--arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi18
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile9
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core-mb.dts179
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core.dtsi320
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s-plus.dts32
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi20
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-base.dtsi3019
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-evb.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-firefly.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-op1.dtsi (renamed from arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi)2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi133
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi37
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts49
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-4se.dts17
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a-plus.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-t.dtsi (renamed from arch/arm64/boot/dts/rockchip/rk3399-t-opp.dtsi)2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi3070
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-lckfb-tspi.dts725
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-odroid-m1s.dts663
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts18
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts14
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts571
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-radxa-e25.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display-vz.dtso17
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display.dtsi121
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568.dtsi43
-rw-r--r--arch/arm64/boot/dts/rockchip/rk356x.dtsi16
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-base-pinctrl.dtsi10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-base.dtsi118
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts349
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6-lts.dts60
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts909
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi1041
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-gameforce-ace.dts1237
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-odroid-m2.dts903
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts54
-rw-r--r--arch/arm64/boot/dts/sprd/sc2731.dtsi5
-rw-r--r--arch/arm64/boot/dts/sprd/sc9836-openphone.dts3
-rw-r--r--arch/arm64/boot/dts/sprd/sc9836.dtsi3
-rw-r--r--arch/arm64/boot/dts/sprd/sc9860.dtsi3
-rw-r--r--arch/arm64/boot/dts/sprd/sc9863a.dtsi8
-rw-r--r--arch/arm64/boot/dts/sprd/sharkl64.dtsi3
-rw-r--r--arch/arm64/boot/dts/sprd/sp9860g-1h10.dts3
-rw-r--r--arch/arm64/boot/dts/sprd/ums512.dtsi4
-rw-r--r--arch/arm64/boot/dts/sprd/whale2.dtsi43
-rw-r--r--arch/arm64/boot/dts/ti/Makefile7
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-main.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-mcu.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-thermal.dtsi34
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-main.dtsi18
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi3
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-main.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-sk.dts2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-main.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi3
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi6
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-evm-nand.dtso6
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-evm.dts7
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts146
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-sk.dts3
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-sr-som.dtsi6
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts104
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg2.dtsi5
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi51
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi16
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts5
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-idk.dtso69
-rw-r--r--arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-ekey-pcie.dtso27
-rw-r--r--arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-usb3.dtso47
-rw-r--r--arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts393
-rw-r--r--arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi37
-rw-r--r--arch/arm64/boot/dts/ti/k3-am69-sk.dts53
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi21
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts4
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-sk.dts41
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi37
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-evm-gesi-exp-board.dtso2
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi37
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-j722s-evm.dts246
-rw-r--r--arch/arm64/boot/dts/ti/k3-j722s-main.dtsi62
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-evm.dts58
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi38
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi14
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4.dtsi8
-rw-r--r--arch/arm64/boot/dts/toshiba/tmpv7708.dtsi44
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts1
-rw-r--r--arch/arm64/configs/defconfig11
-rw-r--r--arch/arm64/crypto/poly1305-armv8.pl6
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/alternative-macros.h4
-rw-r--r--arch/arm64/include/asm/arm_pmuv3.h53
-rw-r--r--arch/arm64/include/asm/cpufeature.h6
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/el2_setup.h25
-rw-r--r--arch/arm64/include/asm/esr.h93
-rw-r--r--arch/arm64/include/asm/fpsimd.h2
-rw-r--r--arch/arm64/include/asm/hwcap.h1
-rw-r--r--arch/arm64/include/asm/hypervisor.h11
-rw-r--r--arch/arm64/include/asm/io.h4
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/include/asm/kvm_asm.h6
-rw-r--r--arch/arm64/include/asm/kvm_host.h34
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h6
-rw-r--r--arch/arm64/include/asm/kvm_nested.h40
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h42
-rw-r--r--arch/arm64/include/asm/mem_encrypt.h15
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/mman.h16
-rw-r--r--arch/arm64/include/asm/mmu.h2
-rw-r--r--arch/arm64/include/asm/mmu_context.h46
-rw-r--r--arch/arm64/include/asm/mmzone.h13
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h20
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h8
-rw-r--r--arch/arm64/include/asm/pgtable.h64
-rw-r--r--arch/arm64/include/asm/pkeys.h106
-rw-r--r--arch/arm64/include/asm/por.h33
-rw-r--r--arch/arm64/include/asm/processor.h6
-rw-r--r--arch/arm64/include/asm/ptdump.h43
-rw-r--r--arch/arm64/include/asm/set_memory.h1
-rw-r--r--arch/arm64/include/asm/sysreg.h26
-rw-r--r--arch/arm64/include/asm/thread_info.h2
-rw-r--r--arch/arm64/include/asm/topology.h1
-rw-r--r--arch/arm64/include/asm/traps.h1
-rw-r--r--arch/arm64/include/asm/vdso/getrandom.h50
-rw-r--r--arch/arm64/include/asm/vdso/vsyscall.h15
-rw-r--r--arch/arm64/include/asm/vncr_mapping.h1
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm64/include/uapi/asm/mman.h9
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h13
-rw-r--r--arch/arm64/kernel/cpu_errata.c10
-rw-r--r--arch/arm64/kernel/cpufeature.c23
-rw-r--r--arch/arm64/kernel/cpuinfo.c3
-rw-r--r--arch/arm64/kernel/entry-common.c2
-rw-r--r--arch/arm64/kernel/hibernate.c2
-rw-r--r--arch/arm64/kernel/pci.c191
-rw-r--r--arch/arm64/kernel/process.c97
-rw-r--r--arch/arm64/kernel/ptrace.c46
-rw-r--r--arch/arm64/kernel/signal.c62
-rw-r--r--arch/arm64/kernel/smp.c160
-rw-r--r--arch/arm64/kernel/traps.c26
-rw-r--r--arch/arm64/kernel/vdso.c6
-rw-r--r--arch/arm64/kernel/vdso/Makefile25
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S4
-rw-r--r--arch/arm64/kernel/vdso/vgetrandom-chacha.S172
-rw-r--r--arch/arm64/kernel/vdso/vgetrandom.c15
-rw-r--r--arch/arm64/kvm/Kconfig17
-rw-r--r--arch/arm64/kvm/Makefile3
-rw-r--r--arch/arm64/kvm/arm.c15
-rw-r--r--arch/arm64/kvm/at.c1101
-rw-r--r--arch/arm64/kvm/emulate-nested.c81
-rw-r--r--arch/arm64/kvm/fpsimd.c5
-rw-r--r--arch/arm64/kvm/guest.c6
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/fault.h5
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h3
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h27
-rw-r--r--arch/arm64/kvm/hyp/nvhe/ffa.c21
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-init.S2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c9
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c9
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c6
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c48
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c97
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c3
-rw-r--r--arch/arm64/kvm/nested.c56
-rw-r--r--arch/arm64/kvm/pmu-emul.c14
-rw-r--r--arch/arm64/kvm/pmu.c87
-rw-r--r--arch/arm64/kvm/ptdump.c268
-rw-r--r--arch/arm64/kvm/sys_regs.c422
-rw-r--r--arch/arm64/kvm/sys_regs.h23
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c12
-rw-r--r--arch/arm64/kvm/vgic/vgic.c14
-rw-r--r--arch/arm64/kvm/vgic/vgic.h6
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/arm64/mm/contpte.c6
-rw-r--r--arch/arm64/mm/fault.c55
-rw-r--r--arch/arm64/mm/init.c51
-rw-r--r--arch/arm64/mm/ioremap.c23
-rw-r--r--arch/arm64/mm/mem_encrypt.c50
-rw-r--r--arch/arm64/mm/mmap.c11
-rw-r--r--arch/arm64/mm/mmu.c45
-rw-r--r--arch/arm64/mm/proc.S4
-rw-r--r--arch/arm64/mm/ptdump.c70
-rw-r--r--arch/arm64/mm/trans_pgd.c6
-rw-r--r--arch/arm64/net/bpf_jit_comp.c508
-rw-r--r--arch/arm64/tools/cpucaps1
-rw-r--r--arch/arm64/tools/sysreg30
-rw-r--r--arch/csky/abiv1/mmap.c3
-rw-r--r--arch/csky/kernel/vdso.c28
-rw-r--r--arch/hexagon/kernel/vdso.c14
-rw-r--r--arch/loongarch/Kconfig2
-rw-r--r--arch/loongarch/configs/loongson3_defconfig1
-rw-r--r--arch/loongarch/include/asm/Kbuild2
-rw-r--r--arch/loongarch/include/asm/cpu-features.h1
-rw-r--r--arch/loongarch/include/asm/cpu.h2
-rw-r--r--arch/loongarch/include/asm/hardirq.h3
-rw-r--r--arch/loongarch/include/asm/irq.h31
-rw-r--r--arch/loongarch/include/asm/kvm_csr.h6
-rw-r--r--arch/loongarch/include/asm/kvm_host.h37
-rw-r--r--arch/loongarch/include/asm/kvm_para.h12
-rw-r--r--arch/loongarch/include/asm/kvm_vcpu.h11
-rw-r--r--arch/loongarch/include/asm/loongarch.h29
-rw-r--r--arch/loongarch/include/asm/mmzone.h16
-rw-r--r--arch/loongarch/include/asm/paravirt.h7
-rw-r--r--arch/loongarch/include/asm/qspinlock.h41
-rw-r--r--arch/loongarch/include/asm/smp.h2
-rw-r--r--arch/loongarch/include/asm/topology.h1
-rw-r--r--arch/loongarch/include/asm/vdso/getrandom.h39
-rw-r--r--arch/loongarch/include/asm/vdso/vdso.h6
-rw-r--r--arch/loongarch/include/asm/vdso/vsyscall.h8
-rw-r--r--arch/loongarch/include/uapi/asm/Kbuild2
-rw-r--r--arch/loongarch/include/uapi/asm/kvm.h20
-rw-r--r--arch/loongarch/include/uapi/asm/kvm_para.h21
-rw-r--r--arch/loongarch/kernel/cpu-probe.c3
-rw-r--r--arch/loongarch/kernel/irq.c12
-rw-r--r--arch/loongarch/kernel/numa.c21
-rw-r--r--arch/loongarch/kernel/paravirt.c52
-rw-r--r--arch/loongarch/kernel/setup.c2
-rw-r--r--arch/loongarch/kernel/smp.c10
-rw-r--r--arch/loongarch/kernel/vdso.c1
-rw-r--r--arch/loongarch/kvm/exit.c46
-rw-r--r--arch/loongarch/kvm/vcpu.c340
-rw-r--r--arch/loongarch/kvm/vm.c69
-rw-r--r--arch/loongarch/mm/mmap.c5
-rw-r--r--arch/loongarch/vdso/Makefile7
-rw-r--r--arch/loongarch/vdso/vdso.lds.S1
-rw-r--r--arch/loongarch/vdso/vgetrandom-chacha.S242
-rw-r--r--arch/loongarch/vdso/vgetrandom.c10
-rw-r--r--arch/m68k/configs/amiga_defconfig2
-rw-r--r--arch/m68k/configs/apollo_defconfig2
-rw-r--r--arch/m68k/configs/atari_defconfig2
-rw-r--r--arch/m68k/configs/bvme6000_defconfig2
-rw-r--r--arch/m68k/configs/hp300_defconfig2
-rw-r--r--arch/m68k/configs/mac_defconfig2
-rw-r--r--arch/m68k/configs/multi_defconfig2
-rw-r--r--arch/m68k/configs/mvme147_defconfig2
-rw-r--r--arch/m68k/configs/mvme16x_defconfig2
-rw-r--r--arch/m68k/configs/q40_defconfig2
-rw-r--r--arch/m68k/configs/sun3_defconfig2
-rw-r--r--arch/m68k/configs/sun3x_defconfig2
-rw-r--r--arch/m68k/include/asm/cmpxchg.h15
-rw-r--r--arch/m68k/kernel/process.c2
-rw-r--r--arch/m68k/kernel/setup_no.c2
-rw-r--r--arch/m68k/q40/q40ints.c2
-rw-r--r--arch/mips/Kconfig7
-rw-r--r--arch/mips/alchemy/common/dma.c23
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c2
-rw-r--r--arch/mips/configs/generic/board-ocelot.config1
-rw-r--r--arch/mips/crypto/crc32-mips.c70
-rw-r--r--arch/mips/include/asm/cmp.h8
-rw-r--r--arch/mips/include/asm/dec/prom.h1
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000_dma.h1
-rw-r--r--arch/mips/include/asm/mach-ip27/mmzone.h1
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h4
-rw-r--r--arch/mips/include/asm/mips-boards/generic.h3
-rw-r--r--arch/mips/include/asm/mips_mt.h2
-rw-r--r--arch/mips/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/mips/include/uapi/asm/socket.h6
-rw-r--r--arch/mips/jazz/setup.c2
-rw-r--r--arch/mips/kernel/mips-mt.c77
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/loongson64/numa.c28
-rw-r--r--arch/mips/mm/mmap.c5
-rw-r--r--arch/mips/ralink/irq-gic.c1
-rw-r--r--arch/mips/ralink/timer-gic.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c12
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c2
-rw-r--r--arch/nios2/mm/init.c12
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/include/asm/mman.h14
-rw-r--r--arch/parisc/include/asm/processor.h2
-rw-r--r--arch/parisc/include/uapi/asm/socket.h6
-rw-r--r--arch/parisc/kernel/entry.S6
-rw-r--r--arch/parisc/kernel/smp.c2
-rw-r--r--arch/parisc/kernel/sys_parisc.c5
-rw-r--r--arch/parisc/kernel/syscall.S14
-rw-r--r--arch/parisc/kernel/time.c261
-rw-r--r--arch/parisc/kernel/traps.c2
-rw-r--r--arch/parisc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/Kconfig39
-rw-r--r--arch/powerpc/Kconfig.debug6
-rw-r--r--arch/powerpc/boot/xz_config.h3
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/crypto/Kconfig1
-rw-r--r--arch/powerpc/crypto/curve25519-ppc64le-core.c1
-rw-r--r--arch/powerpc/include/asm/asm-compat.h8
-rw-r--r--arch/powerpc/include/asm/atomic.h5
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h20
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h29
-rw-r--r--arch/powerpc/include/asm/code-patching.h37
-rw-r--r--arch/powerpc/include/asm/eeh.h1
-rw-r--r--arch/powerpc/include/asm/interrupt.h6
-rw-r--r--arch/powerpc/include/asm/mman.h2
-rw-r--r--arch/powerpc/include/asm/mmu_context.h12
-rw-r--r--arch/powerpc/include/asm/mmzone.h6
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h3
-rw-r--r--arch/powerpc/include/asm/nohash/pgalloc.h8
-rw-r--r--arch/powerpc/include/asm/percpu.h2
-rw-r--r--arch/powerpc/include/asm/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/rtas.h1
-rw-r--r--arch/powerpc/include/asm/thread_info.h4
-rw-r--r--arch/powerpc/include/asm/uaccess.h7
-rw-r--r--arch/powerpc/include/asm/vdso/getrandom.h54
-rw-r--r--arch/powerpc/include/asm/vdso/vsyscall.h6
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h17
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/cacheinfo.c2
-rw-r--r--arch/powerpc/kernel/eeh.c201
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S17
-rw-r--r--arch/powerpc/kernel/head_8xx.S78
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S144
-rw-r--r--arch/powerpc/kernel/interrupt.c6
-rw-r--r--arch/powerpc/kernel/kprobes.c65
-rw-r--r--arch/powerpc/kernel/module_64.c5
-rw-r--r--arch/powerpc/kernel/nvram_64.c8
-rw-r--r--arch/powerpc/kernel/optprobes.c2
-rw-r--r--arch/powerpc/kernel/process.c22
-rw-r--r--arch/powerpc/kernel/rethook.c73
-rw-r--r--arch/powerpc/kernel/secvar-sysfs.c2
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/stacktrace.c6
-rw-r--r--arch/powerpc/kernel/static_call.c2
-rw-r--r--arch/powerpc/kernel/syscall.c2
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kernel/vdso.c35
-rw-r--r--arch/powerpc/kernel/vdso/Makefile57
-rw-r--r--arch/powerpc/kernel/vdso/cacheflush.S2
-rw-r--r--arch/powerpc/kernel/vdso/datapage.S4
-rw-r--r--arch/powerpc/kernel/vdso/getrandom.S58
-rw-r--r--arch/powerpc/kernel/vdso/gettimeofday.S17
-rw-r--r--arch/powerpc/kernel/vdso/vdso32.lds.S1
-rw-r--r--arch/powerpc/kernel/vdso/vdso64.lds.S1
-rw-r--r--arch/powerpc/kernel/vdso/vgetrandom-chacha.S365
-rw-r--r--arch/powerpc/kernel/vdso/vgetrandom.c14
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c18
-rw-r--r--arch/powerpc/kvm/powerpc.c12
-rw-r--r--arch/powerpc/lib/code-patching.c70
-rw-r--r--arch/powerpc/lib/crtsavres.S2
-rw-r--r--arch/powerpc/lib/test-code-patching.c41
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c2
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c2
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c20
-rw-r--r--arch/powerpc/mm/book3s64/slice.c10
-rw-r--r--arch/powerpc/mm/mem.c19
-rw-r--r--arch/powerpc/mm/nohash/8xx.c9
-rw-r--r--arch/powerpc/mm/numa.c26
-rw-r--r--arch/powerpc/mm/pgtable-frag.c6
-rw-r--r--arch/powerpc/mm/pgtable.c6
-rw-r--r--arch/powerpc/platforms/44x/ppc476.c2
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_lpbfifo.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c1
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c2
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig7
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype11
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c8
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c17
-rw-r--r--arch/powerpc/platforms/chrp/pegasos_eth.c7
-rw-r--r--arch/powerpc/platforms/maple/maple.h1
-rw-r--r--arch/powerpc/platforms/pasemi/gpio_mdio.c2
-rw-r--r--arch/powerpc/platforms/pasemi/pasemi.h1
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_base.c2
-rw-r--r--arch/powerpc/platforms/powermac/smp.c2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-kmsg.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-prd.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci.h1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c234
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c39
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c16
-rw-r--r--arch/powerpc/platforms/pseries/papr-vpd.c5
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c2
-rw-r--r--arch/powerpc/platforms/pseries/pmem.c2
-rw-r--r--arch/powerpc/platforms/pseries/vas-sysfs.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c2
-rw-r--r--arch/powerpc/sysdev/pmi.c2
-rw-r--r--arch/powerpc/xmon/xmon.c2
-rw-r--r--arch/riscv/Kconfig21
-rw-r--r--arch/riscv/Makefile6
-rw-r--r--arch/riscv/boot/Makefile3
-rw-r--r--arch/riscv/boot/dts/sophgo/cv1812h-huashan-pi.dts9
-rw-r--r--arch/riscv/boot/dts/sophgo/cv18xx.dtsi16
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts77
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2042.dtsi148
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-common.dtsi6
-rw-r--r--arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts17
-rw-r--r--arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi12
-rw-r--r--arch/riscv/boot/dts/thead/th1520-lichee-pi-4a.dts5
-rw-r--r--arch/riscv/boot/dts/thead/th1520.dtsi83
-rw-r--r--arch/riscv/configs/defconfig20
-rw-r--r--arch/riscv/configs/nommu_k210_defconfig1
-rw-r--r--arch/riscv/configs/nommu_k210_sdcard_defconfig1
-rw-r--r--arch/riscv/errata/sifive/errata_cip_453.S8
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/acpi.h2
-rw-r--r--arch/riscv/include/asm/bitops.h43
-rw-r--r--arch/riscv/include/asm/cacheflush.h18
-rw-r--r--arch/riscv/include/asm/exec.h8
-rw-r--r--arch/riscv/include/asm/fence.h1
-rw-r--r--arch/riscv/include/asm/hwcap.h1
-rw-r--r--arch/riscv/include/asm/irq.h60
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_pmu.h21
-rw-r--r--arch/riscv/include/asm/mmzone.h13
-rw-r--r--arch/riscv/include/asm/page.h29
-rw-r--r--arch/riscv/include/asm/pgtable.h28
-rw-r--r--arch/riscv/include/asm/sbi.h1
-rw-r--r--arch/riscv/include/asm/set_memory.h2
-rw-r--r--arch/riscv/include/asm/sparsemem.h2
-rw-r--r--arch/riscv/include/asm/string.h2
-rw-r--r--arch/riscv/include/asm/thread_info.h11
-rw-r--r--arch/riscv/include/asm/topology.h4
-rw-r--r--arch/riscv/include/asm/vmalloc.h1
-rw-r--r--arch/riscv/include/asm/xip_fixup.h30
-rw-r--r--arch/riscv/kernel/acpi.c31
-rw-r--r--arch/riscv/kernel/acpi_numa.c2
-rw-r--r--arch/riscv/kernel/asm-offsets.c7
-rw-r--r--arch/riscv/kernel/cacheinfo.c5
-rw-r--r--arch/riscv/kernel/cpufeature.c1
-rw-r--r--arch/riscv/kernel/elf_kexec.c6
-rw-r--r--arch/riscv/kernel/entry.S91
-rw-r--r--arch/riscv/kernel/module.c4
-rw-r--r--arch/riscv/kernel/perf_callchain.c46
-rw-r--r--arch/riscv/kernel/pi/Makefile4
-rw-r--r--arch/riscv/kernel/pi/archrandom_early.c30
-rw-r--r--arch/riscv/kernel/pi/cmdline_early.c10
-rw-r--r--arch/riscv/kernel/pi/fdt_early.c167
-rw-r--r--arch/riscv/kernel/pi/pi.h20
-rw-r--r--arch/riscv/kernel/process.c9
-rw-r--r--arch/riscv/kernel/riscv_ksyms.c3
-rw-r--r--arch/riscv/kernel/smp.c43
-rw-r--r--arch/riscv/kernel/stacktrace.c43
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/riscv/kernel/vendor_extensions/andes.c2
-rw-r--r--arch/riscv/kernel/vmcore_info.c7
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S5
-rw-r--r--arch/riscv/kvm/vcpu_pmu.c14
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c4
-rw-r--r--arch/riscv/lib/Makefile2
-rw-r--r--arch/riscv/lib/memset.S2
-rw-r--r--arch/riscv/lib/strcmp.S2
-rw-r--r--arch/riscv/lib/strlen.S1
-rw-r--r--arch/riscv/lib/strncmp.S2
-rw-r--r--arch/riscv/mm/cacheflush.c12
-rw-r--r--arch/riscv/mm/init.c28
-rw-r--r--arch/riscv/mm/pgtable.c13
-rw-r--r--arch/riscv/purgatory/Makefile2
-rw-r--r--arch/s390/Kconfig39
-rw-r--r--arch/s390/Makefile.postlink38
-rw-r--r--arch/s390/boot/Makefile36
-rw-r--r--arch/s390/boot/als.c49
-rw-r--r--arch/s390/boot/boot.h2
-rw-r--r--arch/s390/boot/head.S4
-rw-r--r--arch/s390/boot/ipl_parm.c2
-rw-r--r--arch/s390/boot/kaslr.c2
-rw-r--r--arch/s390/boot/pgm_check_info.c160
-rw-r--r--arch/s390/boot/physmem_info.c26
-rw-r--r--arch/s390/boot/printk.c124
-rw-r--r--arch/s390/boot/startup.c7
-rw-r--r--arch/s390/configs/debug_defconfig4
-rw-r--r--arch/s390/configs/defconfig4
-rw-r--r--arch/s390/crypto/Kconfig10
-rw-r--r--arch/s390/crypto/Makefile1
-rw-r--r--arch/s390/crypto/aes_s390.c120
-rw-r--r--arch/s390/crypto/hmac_s390.c359
-rw-r--r--arch/s390/crypto/paes_s390.c9
-rw-r--r--arch/s390/crypto/sha.h1
-rw-r--r--arch/s390/crypto/sha3_256_s390.c11
-rw-r--r--arch/s390/crypto/sha3_512_s390.c11
-rw-r--r--arch/s390/crypto/sha_common.c20
-rw-r--r--arch/s390/hypfs/hypfs.h1
-rw-r--r--arch/s390/hypfs/hypfs_diag.c7
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/alternative.h6
-rw-r--r--arch/s390/include/asm/arch_hweight.h15
-rw-r--r--arch/s390/include/asm/atomic_ops.h7
-rw-r--r--arch/s390/include/asm/barrier.h4
-rw-r--r--arch/s390/include/asm/cpacf.h207
-rw-r--r--arch/s390/include/asm/ctlreg.h5
-rw-r--r--arch/s390/include/asm/diag.h9
-rw-r--r--arch/s390/include/asm/facility.h37
-rw-r--r--arch/s390/include/asm/fpu-insn-asm.h22
-rw-r--r--arch/s390/include/asm/ftrace.h17
-rw-r--r--arch/s390/include/asm/hiperdispatch.h14
-rw-r--r--arch/s390/include/asm/irq.h2
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/march.h38
-rw-r--r--arch/s390/include/asm/mmzone.h17
-rw-r--r--arch/s390/include/asm/module.h14
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/pci.h9
-rw-r--r--arch/s390/include/asm/percpu.h7
-rw-r--r--arch/s390/include/asm/perf_event.h24
-rw-r--r--arch/s390/include/asm/pgtable.h1
-rw-r--r--arch/s390/include/asm/pkey.h4
-rw-r--r--arch/s390/include/asm/preempt.h7
-rw-r--r--arch/s390/include/asm/processor.h1
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/setup.h4
-rw-r--r--arch/s390/include/asm/smp.h4
-rw-r--r--arch/s390/include/asm/topology.h3
-rw-r--r--arch/s390/include/asm/trace/hiperdispatch.h58
-rw-r--r--arch/s390/include/asm/vdso-symbols.h17
-rw-r--r--arch/s390/include/asm/vdso.h12
-rw-r--r--arch/s390/include/asm/vdso/getrandom.h40
-rw-r--r--arch/s390/include/asm/vdso/vsyscall.h15
-rw-r--r--arch/s390/include/uapi/asm/pkey.h5
-rw-r--r--arch/s390/kernel/Makefile7
-rw-r--r--arch/s390/kernel/asm-offsets.c3
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/cpacf.c119
-rw-r--r--arch/s390/kernel/diag.c17
-rw-r--r--arch/s390/kernel/dis.c20
-rw-r--r--arch/s390/kernel/early.c38
-rw-r--r--arch/s390/kernel/early_printk.c16
-rw-r--r--arch/s390/kernel/earlypgm.S23
-rw-r--r--arch/s390/kernel/entry.S38
-rw-r--r--arch/s390/kernel/ftrace.c106
-rw-r--r--arch/s390/kernel/ftrace.h2
-rw-r--r--arch/s390/kernel/hiperdispatch.c430
-rw-r--r--arch/s390/kernel/irq.c1
-rw-r--r--arch/s390/kernel/kprobes.c15
-rw-r--r--arch/s390/kernel/mcount.S5
-rw-r--r--arch/s390/kernel/numa.c3
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c4
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c309
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c16
-rw-r--r--arch/s390/kernel/perf_pai_ext.c9
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c21
-rw-r--r--arch/s390/kernel/stacktrace.c19
-rw-r--r--arch/s390/kernel/topology.c76
-rw-r--r--arch/s390/kernel/uv.c23
-rw-r--r--arch/s390/kernel/vdso.c26
-rw-r--r--arch/s390/kernel/vdso64/Makefile9
-rw-r--r--arch/s390/kernel/vdso64/vdso.h1
-rw-r--r--arch/s390/kernel/vdso64/vdso64.lds.S7
-rw-r--r--arch/s390/kernel/vdso64/vgetrandom-chacha.S185
-rw-r--r--arch/s390/kernel/vdso64/vgetrandom.c14
-rw-r--r--arch/s390/kernel/wti.c215
-rw-r--r--arch/s390/mm/cmm.c18
-rw-r--r--arch/s390/mm/dump_pagetables.c191
-rw-r--r--arch/s390/mm/fault.c16
-rw-r--r--arch/s390/mm/init.c4
-rw-r--r--arch/s390/mm/mmap.c4
-rw-r--r--arch/s390/pci/Makefile3
-rw-r--r--arch/s390/pci/pci.c1
-rw-r--r--arch/s390/pci/pci_mmio.c22
-rw-r--r--arch/s390/pci/pci_sysfs.c14
-rw-r--r--arch/s390/tools/opcodes.txt52
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/include/asm/cmpxchg.h3
-rw-r--r--arch/sh/include/asm/mmzone.h3
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c14
-rw-r--r--arch/sh/mm/init.c7
-rw-r--r--arch/sh/mm/mmap.c5
-rw-r--r--arch/sh/mm/numa.c3
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/mmzone.h4
-rw-r--r--arch/sparc/include/asm/pgtable_64.h1
-rw-r--r--arch/sparc/include/uapi/asm/socket.h6
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c2
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c4
-rw-r--r--arch/sparc/mm/init_64.c11
-rw-r--r--arch/sparc/mm/leon_mm.c8
-rw-r--r--arch/um/kernel/kmsg_dump.c2
-rw-r--r--arch/x86/Kconfig153
-rw-r--r--arch/x86/Makefile11
-rw-r--r--arch/x86/boot/compressed/misc.c2
-rw-r--r--arch/x86/boot/compressed/misc.h2
-rw-r--r--arch/x86/configs/tiny.config4
-rw-r--r--arch/x86/crypto/Kconfig8
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c61
-rw-r--r--arch/x86/crypto/sha256-avx2-asm.S16
-rw-r--r--arch/x86/entry/common.c2
-rw-r--r--arch/x86/entry/vdso/vgetrandom.c2
-rw-r--r--arch/x86/entry/vdso/vma.c3
-rw-r--r--arch/x86/events/core.c63
-rw-r--r--arch/x86/events/intel/bts.c3
-rw-r--r--arch/x86/events/intel/core.c8
-rw-r--r--arch/x86/events/intel/cstate.c142
-rw-r--r--arch/x86/events/intel/pt.c29
-rw-r--r--arch/x86/events/intel/uncore.c9
-rw-r--r--arch/x86/events/intel/uncore.h2
-rw-r--r--arch/x86/events/intel/uncore_snb.c185
-rw-r--r--arch/x86/events/rapl.c47
-rw-r--r--arch/x86/hyperv/hv_init.c5
-rw-r--r--arch/x86/hyperv/ivm.c6
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/acpi.h8
-rw-r--r--arch/x86/include/asm/apic.h48
-rw-r--r--arch/x86/include/asm/bug.h12
-rw-r--r--arch/x86/include/asm/cpu_device_id.h20
-rw-r--r--arch/x86/include/asm/cpuid.h7
-rw-r--r--arch/x86/include/asm/entry-common.h13
-rw-r--r--arch/x86/include/asm/extable.h1
-rw-r--r--arch/x86/include/asm/fpu/signal.h2
-rw-r--r--arch/x86/include/asm/fred.h23
-rw-r--r--arch/x86/include/asm/hardirq.h8
-rw-r--r--arch/x86/include/asm/idtentry.h6
-rw-r--r--arch/x86/include/asm/intel-family.h87
-rw-r--r--arch/x86/include/asm/intel_telemetry.h2
-rw-r--r--arch/x86/include/asm/irq_vectors.h4
-rw-r--r--arch/x86/include/asm/mce.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h13
-rw-r--r--arch/x86/include/asm/mmzone.h6
-rw-r--r--arch/x86/include/asm/mmzone_32.h17
-rw-r--r--arch/x86/include/asm/mmzone_64.h18
-rw-r--r--arch/x86/include/asm/mshyperv.h1
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/include/asm/msr.h25
-rw-r--r--arch/x86/include/asm/mtrr.h2
-rw-r--r--arch/x86/include/asm/numa.h26
-rw-r--r--arch/x86/include/asm/pgtable.h150
-rw-r--r--arch/x86/include/asm/pgtable_64.h1
-rw-r--r--arch/x86/include/asm/pgtable_types.h2
-rw-r--r--arch/x86/include/asm/processor.h6
-rw-r--r--arch/x86/include/asm/pvclock.h1
-rw-r--r--arch/x86/include/asm/sev.h2
-rw-r--r--arch/x86/include/asm/sparsemem.h9
-rw-r--r--arch/x86/include/asm/switch_to.h6
-rw-r--r--arch/x86/include/asm/syscall.h7
-rw-r--r--arch/x86/include/asm/tlbflush.h9
-rw-r--r--arch/x86/include/asm/topology.h13
-rw-r--r--arch/x86/include/asm/uaccess_64.h11
-rw-r--r--arch/x86/include/asm/uv/uv_irq.h1
-rw-r--r--arch/x86/include/asm/vdso/getrandom.h13
-rw-r--r--arch/x86/include/asm/vdso/vsyscall.h10
-rw-r--r--arch/x86/include/uapi/asm/elf.h16
-rw-r--r--arch/x86/kernel/Makefile8
-rw-r--r--arch/x86/kernel/acpi/boot.c11
-rw-r--r--arch/x86/kernel/acpi/cppc.c172
-rw-r--r--arch/x86/kernel/amd_nb.c7
-rw-r--r--arch/x86/kernel/apic/apic.c81
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c119
-rw-r--r--arch/x86/kernel/apic/io_apic.c749
-rw-r--r--arch/x86/kernel/cpu/amd.c16
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c89
-rw-r--r--arch/x86/kernel/cpu/bugs.c60
-rw-r--r--arch/x86/kernel/cpu/common.c26
-rw-r--r--arch/x86/kernel/cpu/cpuid-deps.c1
-rw-r--r--arch/x86/kernel/cpu/feat_ctl.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c18
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c2
-rw-r--r--arch/x86/kernel/cpu/mce/apei.c18
-rw-r--r--arch/x86/kernel/cpu/mce/core.c38
-rw-r--r--arch/x86/kernel/cpu/mce/dev-mcelog.c2
-rw-r--r--arch/x86/kernel/cpu/mce/internal.h2
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c192
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c32
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c40
-rw-r--r--arch/x86/kernel/eisa.c8
-rw-r--r--arch/x86/kernel/fpu/signal.c27
-rw-r--r--arch/x86/kernel/fpu/xstate.c102
-rw-r--r--arch/x86/kernel/fpu/xstate.h2
-rw-r--r--arch/x86/kernel/fred.c45
-rw-r--r--arch/x86/kernel/head64.c3
-rw-r--r--arch/x86/kernel/i8253.c11
-rw-r--r--arch/x86/kernel/jailhouse.c1
-rw-r--r--arch/x86/kernel/machine_kexec_64.c27
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c1
-rw-r--r--arch/x86/kernel/mpparse.c13
-rw-r--r--arch/x86/kernel/process_64.c42
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S10
-rw-r--r--arch/x86/kernel/setup.c7
-rw-r--r--arch/x86/kernel/signal.c29
-rw-r--r--arch/x86/kernel/signal_64.c6
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/kernel/sys_x86_64.c21
-rw-r--r--arch/x86/kernel/traps.c87
-rw-r--r--arch/x86/kernel/tsc.c10
-rw-r--r--arch/x86/kernel/x86_init.c1
-rw-r--r--arch/x86/kvm/mmu/mmu.c8
-rw-r--r--arch/x86/kvm/svm/sev.c16
-rw-r--r--arch/x86/kvm/vmx/vmx.c10
-rw-r--r--arch/x86/mm/Makefile1
-rw-r--r--arch/x86/mm/amdtopology.c1
-rw-r--r--arch/x86/mm/cpu_entry_area.c2
-rw-r--r--arch/x86/mm/ident_map.c23
-rw-r--r--arch/x86/mm/ioremap.c3
-rw-r--r--arch/x86/mm/numa.c622
-rw-r--r--arch/x86/mm/numa_internal.h24
-rw-r--r--arch/x86/mm/pat/memtype.c25
-rw-r--r--arch/x86/mm/pgtable.c18
-rw-r--r--arch/x86/mm/srat.c6
-rw-r--r--arch/x86/mm/testmmiotrace.c1
-rw-r--r--arch/x86/mm/tlb.c19
-rw-r--r--arch/x86/net/bpf_jit_comp.c161
-rw-r--r--arch/x86/pci/fixup.c4
-rw-r--r--arch/x86/platform/geode/Makefile1
-rw-r--r--arch/x86/platform/geode/alix.c82
-rw-r--r--arch/x86/platform/geode/geode-common.c178
-rw-r--r--arch/x86/platform/geode/geode-common.h21
-rw-r--r--arch/x86/platform/geode/geos.c80
-rw-r--r--arch/x86/platform/geode/net5501.c69
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c3
-rw-r--r--arch/x86/platform/pvh/Makefile1
-rw-r--r--arch/x86/platform/pvh/enlighten.c6
-rw-r--r--arch/x86/um/vdso/vma.c12
-rw-r--r--arch/x86/xen/mmu_pv.c12
-rw-r--r--arch/x86/xen/p2m.c98
-rw-r--r--arch/x86/xen/setup.c202
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h2
-rw-r--r--arch/xtensa/kernel/syscall.c3
-rw-r--r--block/bdev.c11
-rw-r--r--block/bfq-cgroup.c8
-rw-r--r--block/bfq-iosched.c206
-rw-r--r--block/bfq-iosched.h8
-rw-r--r--block/bio-integrity.c1
-rw-r--r--block/bio.c112
-rw-r--r--block/blk-cgroup.c23
-rw-r--r--block/blk-cgroup.h1
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-integrity.c36
-rw-r--r--block/blk-iocost.c10
-rw-r--r--block/blk-ioprio.c57
-rw-r--r--block/blk-ioprio.h9
-rw-r--r--block/blk-merge.c166
-rw-r--r--block/blk-mq.c21
-rw-r--r--block/blk-rq-qos.c2
-rw-r--r--block/blk-settings.c42
-rw-r--r--block/blk-sysfs.c22
-rw-r--r--block/blk-throttle.c69
-rw-r--r--block/blk-throttle.h2
-rw-r--r--block/blk.h75
-rw-r--r--block/elevator.c23
-rw-r--r--block/elevator.h2
-rw-r--r--block/fops.c26
-rw-r--r--block/ioctl.c168
-rw-r--r--block/partitions/core.c8
-rw-r--r--block/t10-pi.c8
-rw-r--r--certs/Makefile2
-rw-r--r--certs/extract-cert.c138
-rw-r--r--crypto/Kconfig2
-rw-r--r--crypto/aegis128-core.c5
-rw-r--r--crypto/algapi.c71
-rw-r--r--crypto/algboss.c4
-rw-r--r--crypto/api.c71
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c7
-rw-r--r--crypto/chacha20poly1305.c2
-rw-r--r--crypto/dh.c4
-rw-r--r--crypto/internal.h3
-rw-r--r--crypto/jitterentropy.c6
-rw-r--r--crypto/rsa.c19
-rw-r--r--crypto/simd.c76
-rw-r--r--crypto/testmgr.c23
-rw-r--r--crypto/xor.c31
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/accel/drm_accel.c110
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c4
-rw-r--r--drivers/accel/qaic/qaic_drv.c4
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpi_apd.c6
-rw-r--r--drivers/acpi/acpi_pad.c10
-rw-r--r--drivers/acpi/acpi_processor.c6
-rw-r--r--drivers/acpi/acpica/acglobal.h6
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/dbconvert.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c58
-rw-r--r--drivers/acpi/acpica/exprep.c3
-rw-r--r--drivers/acpi/acpica/exsystem.c11
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c15
-rw-r--r--drivers/acpi/acpica/psargs.c47
-rw-r--r--drivers/acpi/acpica/rsaddr.c3
-rw-r--r--drivers/acpi/acpica/rsdump.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c4
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utosi.c1
-rw-r--r--drivers/acpi/acpica/utxfinit.c24
-rw-r--r--drivers/acpi/arm64/iort.c9
-rw-r--r--drivers/acpi/battery.c39
-rw-r--r--drivers/acpi/bus.c4
-rw-r--r--drivers/acpi/button.c6
-rw-r--r--drivers/acpi/cppc_acpi.c53
-rw-r--r--drivers/acpi/device_sysfs.c196
-rw-r--r--drivers/acpi/ec.c55
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/numa/srat.c1
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pci_mcfg.c12
-rw-r--r--drivers/acpi/pmic/tps68470_pmic.c6
-rw-r--r--drivers/acpi/prmt.c24
-rw-r--r--drivers/acpi/resource.c19
-rw-r--r--drivers/acpi/riscv/Makefile2
-rw-r--r--drivers/acpi/riscv/init.c13
-rw-r--r--drivers/acpi/riscv/init.h4
-rw-r--r--drivers/acpi/riscv/irq.c335
-rw-r--r--drivers/acpi/scan.c109
-rw-r--r--drivers/acpi/sleep.c14
-rw-r--r--drivers/acpi/thermal.c66
-rw-r--r--drivers/acpi/utils.c3
-rw-r--r--drivers/acpi/video_detect.c17
-rw-r--r--drivers/acpi/x86/utils.c1
-rw-r--r--drivers/amba/bus.c2
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_brcm.c4
-rw-r--r--drivers/ata/ahci_imx.c404
-rw-r--r--drivers/ata/ata_piix.c1
-rw-r--r--drivers/ata/libahci_platform.c19
-rw-r--r--drivers/ata/libata-core.c886
-rw-r--r--drivers/ata/libata-eh.c68
-rw-r--r--drivers/ata/libata-pmp.c3
-rw-r--r--drivers/ata/libata-sata.c137
-rw-r--r--drivers/ata/libata-scsi.c23
-rw-r--r--drivers/ata/libata-sff.c11
-rw-r--r--drivers/ata/libata-transport.c305
-rw-r--r--drivers/ata/libata-zpodd.c2
-rw-r--r--drivers/ata/libata.h26
-rw-r--r--drivers/ata/pata_cs5520.c6
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/ata/pata_ftide010.c1
-rw-r--r--drivers/ata/pata_hpt366.c10
-rw-r--r--drivers/ata/pata_hpt37x.c10
-rw-r--r--drivers/ata/pata_icside.c2
-rw-r--r--drivers/ata/pata_it821x.c4
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c1
-rw-r--r--drivers/ata/pata_mpc52xx.c1
-rw-r--r--drivers/ata/pata_octeon_cf.c1
-rw-r--r--drivers/ata/pata_serverworks.c16
-rw-r--r--drivers/ata/sata_gemini.c1
-rw-r--r--drivers/ata/sata_sil.c14
-rw-r--r--drivers/base/Kconfig1
-rw-r--r--drivers/base/arch_numa.c224
-rw-r--r--drivers/base/dd.c1
-rw-r--r--drivers/base/power/common.c45
-rw-r--r--drivers/base/regmap/regcache-flat.c2
-rw-r--r--drivers/base/regmap/regcache-maple.c2
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/base/regmap/regcache.c4
-rw-r--r--drivers/base/regmap/regmap-irq.c37
-rw-r--r--drivers/base/regmap/regmap-kunit.c19
-rw-r--r--drivers/base/regmap/regmap.c1
-rw-r--r--drivers/bcma/driver_pci_host.c10
-rw-r--r--drivers/block/drbd/drbd_int.h11
-rw-r--r--drivers/block/drbd/drbd_main.c8
-rw-r--r--drivers/block/drbd/drbd_state.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c19
-rw-r--r--drivers/block/nbd.c28
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rnbd/rnbd-srv.c11
-rw-r--r--drivers/block/ublk_drv.c62
-rw-r--r--drivers/block/zram/Kconfig77
-rw-r--r--drivers/block/zram/Makefile8
-rw-r--r--drivers/block/zram/backend_842.c61
-rw-r--r--drivers/block/zram/backend_842.h10
-rw-r--r--drivers/block/zram/backend_deflate.c146
-rw-r--r--drivers/block/zram/backend_deflate.h10
-rw-r--r--drivers/block/zram/backend_lz4.c127
-rw-r--r--drivers/block/zram/backend_lz4.h10
-rw-r--r--drivers/block/zram/backend_lz4hc.c128
-rw-r--r--drivers/block/zram/backend_lz4hc.h10
-rw-r--r--drivers/block/zram/backend_lzo.c59
-rw-r--r--drivers/block/zram/backend_lzo.h10
-rw-r--r--drivers/block/zram/backend_lzorle.c59
-rw-r--r--drivers/block/zram/backend_lzorle.h10
-rw-r--r--drivers/block/zram/backend_zstd.c226
-rw-r--r--drivers/block/zram/backend_zstd.h10
-rw-r--r--drivers/block/zram/zcomp.c194
-rw-r--r--drivers/block/zram/zcomp.h71
-rw-r--r--drivers/block/zram/zram_drv.c157
-rw-r--r--drivers/block/zram/zram_drv.h8
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/btintel_pcie.c18
-rw-r--r--drivers/bluetooth/btnxpuart.c1
-rw-r--r--drivers/bluetooth/btrtl.c23
-rw-r--r--drivers/bluetooth/btsdio.c1
-rw-r--r--drivers/bluetooth/btusb.c249
-rw-r--r--drivers/bluetooth/h4_recv.h7
-rw-r--r--drivers/bluetooth/hci_aml.c755
-rw-r--r--drivers/bluetooth/hci_ldisc.c11
-rw-r--r--drivers/bluetooth/hci_uart.h8
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/arm-integrator-lm.c1
-rw-r--r--drivers/bus/bt1-apb.c23
-rw-r--r--drivers/bus/bt1-axi.c23
-rw-r--r--drivers/bus/imx-weim.c14
-rw-r--r--drivers/bus/sunxi-rsb.c34
-rw-r--r--drivers/bus/ti-sysc.c7
-rw-r--r--drivers/char/adi.c9
-rw-r--r--drivers/char/hw_random/Kconfig15
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c4
-rw-r--r--drivers/char/hw_random/cctrng.c1
-rw-r--r--drivers/char/hw_random/mtk-rng.c2
-rw-r--r--drivers/char/hw_random/mxc-rnga.c16
-rw-r--r--drivers/char/hw_random/rockchip-rng.c228
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c27
-rw-r--r--drivers/char/mem.c3
-rw-r--r--drivers/char/random.c12
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c2
-rw-r--r--drivers/char/tpm/tpm-dev-common.c2
-rw-r--r--drivers/char/tpm/tpm2-sessions.c1
-rw-r--r--drivers/char/tpm/tpm2-space.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c2
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c2
-rw-r--r--drivers/clk/.kunitconfig2
-rw-r--r--drivers/clk/Kconfig11
-rw-r--r--drivers/clk/Makefile11
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c42
-rw-r--r--drivers/clk/at91/dt-compat.c5
-rw-r--r--drivers/clk/at91/pmc.h18
-rw-r--r--drivers/clk/at91/sam9x60.c7
-rw-r--r--drivers/clk/at91/sam9x7.c946
-rw-r--r--drivers/clk/at91/sama7g5.c47
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c2
-rw-r--r--drivers/clk/bcm/clk-bcm2711-dvp.c2
-rw-r--r--drivers/clk/bcm/clk-bcm53573-ilp.c2
-rw-r--r--drivers/clk/bcm/clk-bcm63xx-gate.c2
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c2
-rw-r--r--drivers/clk/clk-conf.c43
-rw-r--r--drivers/clk/clk-devres.c28
-rw-r--r--drivers/clk/clk-fixed-factor.c2
-rw-r--r--drivers/clk/clk-fixed-mmio.c2
-rw-r--r--drivers/clk/clk-fixed-rate.c2
-rw-r--r--drivers/clk/clk-fixed-rate_test.c380
-rw-r--r--drivers/clk/clk-fixed-rate_test.h8
-rw-r--r--drivers/clk/clk-lmk04832.c43
-rw-r--r--drivers/clk/clk-palmas.c2
-rw-r--r--drivers/clk/clk-pwm.c2
-rw-r--r--drivers/clk/clk-s2mps11.c2
-rw-r--r--drivers/clk/clk-scmi.c16
-rw-r--r--drivers/clk/clk-scpi.c2
-rw-r--r--drivers/clk/clk.c4
-rw-r--r--drivers/clk/clk_kunit_helpers.c207
-rw-r--r--drivers/clk/clk_parent_data_test.h10
-rw-r--r--drivers/clk/clk_test.c453
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c7
-rw-r--r--drivers/clk/hisilicon/clk-hi3519.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi3559a.c9
-rw-r--r--drivers/clk/hisilicon/crg-hi3516cv300.c2
-rw-r--r--drivers/clk/hisilicon/crg-hi3798cv200.c2
-rw-r--r--drivers/clk/imx/Kconfig1
-rw-r--r--drivers/clk/imx/clk-composite-7ulp.c7
-rw-r--r--drivers/clk/imx/clk-composite-8m.c53
-rw-r--r--drivers/clk/imx/clk-composite-93.c15
-rw-r--r--drivers/clk/imx/clk-fracn-gppll.c6
-rw-r--r--drivers/clk/imx/clk-imx6ul.c4
-rw-r--r--drivers/clk/imx/clk-imx7d.c6
-rw-r--r--drivers/clk/imx/clk-imx8-acm.c40
-rw-r--r--drivers/clk/imx/clk-imx8mm.c2
-rw-r--r--drivers/clk/imx/clk-imx8mn.c1
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c88
-rw-r--r--drivers/clk/imx/clk-imx8mp.c8
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c51
-rw-r--r--drivers/clk/imx/clk-imx95-blk-ctl.c30
-rw-r--r--drivers/clk/imx/clk-imxrt1050.c1
-rw-r--r--drivers/clk/imx/clk.c1
-rw-r--r--drivers/clk/imx/clk.h4
-rw-r--r--drivers/clk/keystone/sci-clk.c2
-rw-r--r--drivers/clk/kunit_clk_fixed_rate_test.dtso19
-rw-r--r--drivers/clk/kunit_clk_parent_data_test.dtso28
-rw-r--r--drivers/clk/mediatek/clk-mt2701-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-eth.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-g3d.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-hif.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-bdp.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-jpgdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6765-audio.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6765-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6765-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mipi0a.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6765-vcodec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-ipe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-pericfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-vdecsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-vencsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6797-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6797-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6797-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6797-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622-eth.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622-hif.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7629-hif.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7981-eth.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7981-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7981-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7986-eth.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7986-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7986-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7988-eth.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7988-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7988-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7988-xfipll.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8135-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8135.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mfgcfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-pericfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-vdecsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-vencsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-audio.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu0.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu1.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_adl.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_conn.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mfgcfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-infra_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-ipe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mcu.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mdp.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-wpe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-ccu.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-infra_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-ipe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-peri_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdo0.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdo1.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vpp0.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vpp1.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-wpe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-ipe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mdp.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-msdc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-scp_adsp.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apusys_pll.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-ccu.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-ipe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-peri_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-scp_adsp.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo0.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo1.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vpp0.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vpp1.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-wpe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-apu.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8516-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8516.c2
-rw-r--r--drivers/clk/mediatek/reset.c61
-rw-r--r--drivers/clk/mediatek/reset.h10
-rw-r--r--drivers/clk/meson/a1-peripherals.c3
-rw-r--r--drivers/clk/meson/a1-pll.c3
-rw-r--r--drivers/clk/meson/axg-aoclk.c1
-rw-r--r--drivers/clk/meson/axg-audio.c39
-rw-r--r--drivers/clk/meson/axg-audio.h2
-rw-r--r--drivers/clk/meson/axg.c1
-rw-r--r--drivers/clk/meson/c3-peripherals.c3
-rw-r--r--drivers/clk/meson/c3-pll.c3
-rw-r--r--drivers/clk/meson/clk-cpu-dyndiv.c3
-rw-r--r--drivers/clk/meson/clk-dualdiv.c5
-rw-r--r--drivers/clk/meson/clk-mpll.c5
-rw-r--r--drivers/clk/meson/clk-phase.c8
-rw-r--r--drivers/clk/meson/clk-pll.c7
-rw-r--r--drivers/clk/meson/clk-regmap.c13
-rw-r--r--drivers/clk/meson/g12a-aoclk.c1
-rw-r--r--drivers/clk/meson/g12a.c1
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c1
-rw-r--r--drivers/clk/meson/gxbb.c1
-rw-r--r--drivers/clk/meson/meson-aoclk.c3
-rw-r--r--drivers/clk/meson/meson-clkc-utils.c3
-rw-r--r--drivers/clk/meson/meson-eeclk.c3
-rw-r--r--drivers/clk/meson/s4-peripherals.c3
-rw-r--r--drivers/clk/meson/s4-pll.c3
-rw-r--r--drivers/clk/meson/sclk-div.c3
-rw-r--r--drivers/clk/meson/vclk.c5
-rw-r--r--drivers/clk/meson/vid-pll-div.c3
-rw-r--r--drivers/clk/mmp/clk-audio.c2
-rw-r--r--drivers/clk/mmp/clk-mix.c10
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c2
-rw-r--r--drivers/clk/mvebu/armada-37xx-tbg.c2
-rw-r--r--drivers/clk/mvebu/armada-37xx-xtal.c2
-rw-r--r--drivers/clk/qcom/Kconfig51
-rw-r--r--drivers/clk/qcom/Makefile5
-rw-r--r--drivers/clk/qcom/a53-pll.c1
-rw-r--r--drivers/clk/qcom/apcs-msm8916.c2
-rw-r--r--drivers/clk/qcom/apcs-sdx55.c2
-rw-r--r--drivers/clk/qcom/camcc-sm4450.c1688
-rw-r--r--drivers/clk/qcom/camcc-sm8150.c2159
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c90
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h7
-rw-r--r--drivers/clk/qcom/clk-cbf-8996.c2
-rw-r--r--drivers/clk/qcom/clk-rpmh.c2
-rw-r--r--drivers/clk/qcom/dispcc-sm4450.c770
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c12
-rw-r--r--drivers/clk/qcom/dispcc-sm8550.c198
-rw-r--r--drivers/clk/qcom/dispcc-sm8650.c1796
-rw-r--r--drivers/clk/qcom/gcc-ipq5332.c36
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c4
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c4
-rw-r--r--drivers/clk/qcom/gcc-mdm9615.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c8
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c54
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c64
-rw-r--r--drivers/clk/qcom/gcc-sc8180x.c442
-rw-r--r--drivers/clk/qcom/gcc-sm8250.c6
-rw-r--r--drivers/clk/qcom/gcc-sm8450.c4
-rw-r--r--drivers/clk/qcom/gpucc-sm4450.c805
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c8
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c8
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c50
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c30
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c52
-rw-r--r--drivers/clk/qcom/mmcc-msm8994.c8
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c8
-rw-r--r--drivers/clk/qcom/videocc-sm8550.c4
-rw-r--r--drivers/clk/renesas/Kconfig9
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/clk-mstp.c2
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c37
-rw-r--r--drivers/clk/renesas/r8a779f0-cpg-mssr.c30
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c38
-rw-r--r--drivers/clk/renesas/r8a779h0-cpg-mssr.c41
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c12
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c20
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c164
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c210
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.h36
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c2
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c71
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c853
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h190
-rw-r--r--drivers/clk/rockchip/Kconfig7
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-pll.c6
-rw-r--r--drivers/clk/rockchip/clk-px30.c10
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c7
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3308.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c10
-rw-r--r--drivers/clk/rockchip/clk-rk3576.c1818
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c42
-rw-r--r--drivers/clk/rockchip/clk.c3
-rw-r--r--drivers/clk/rockchip/clk.h54
-rw-r--r--drivers/clk/rockchip/rst-rk3576.c651
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c2
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c2
-rw-r--r--drivers/clk/samsung/clk-exynos7885.c93
-rw-r--r--drivers/clk/samsung/clk-exynos850.c7
-rw-r--r--drivers/clk/samsung/clk-exynosautov9.c83
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c1173
-rw-r--r--drivers/clk/samsung/clk-pll.c62
-rw-r--r--drivers/clk/samsung/clk-pll.h2
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-isp.c2
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-vout.c4
-rw-r--r--drivers/clk/stm32/clk-stm32mp1.c2
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c2
-rw-r--r--drivers/clk/ti/adpll.c2
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c1
-rw-r--r--drivers/clk/versatile/clk-sp810.c2
-rw-r--r--drivers/clk/visconti/pll.c6
-rw-r--r--drivers/clk/x86/clk-fch.c2
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c2
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c2
-rw-r--r--drivers/clk/xilinx/xlnx_vcu.c2
-rw-r--r--drivers/clocksource/acpi_pm.c32
-rw-r--r--drivers/clocksource/arm_arch_timer.c11
-rw-r--r--drivers/clocksource/asm9260_timer.c1
-rw-r--r--drivers/clocksource/hyperv_timer.c16
-rw-r--r--drivers/clocksource/i8253.c49
-rw-r--r--drivers/clocksource/ingenic-ost.c7
-rw-r--r--drivers/clocksource/jcore-pit.c7
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c6
-rw-r--r--drivers/clocksource/timer-qcom.c7
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm50
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c12
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c41
-rw-r--r--drivers/cpufreq/amd-pstate.c181
-rw-r--r--drivers/cpufreq/amd-pstate.h14
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c2
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c2
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c6
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c11
-rw-r--r--drivers/cpufreq/cpufreq.c27
-rw-r--r--drivers/cpufreq/intel_pstate.c240
-rw-r--r--drivers/cpufreq/loongson3_cpufreq.c2
-rw-r--r--drivers/cpufreq/maple-cpufreq.c1
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c3
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c3
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c3
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c1
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c1
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c2
-rw-r--r--drivers/cpufreq/spear-cpufreq.c18
-rw-r--r--drivers/cpufreq/sti-cpufreq.c2
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c2
-rw-r--r--drivers/cpufreq/ti-cpufreq.c31
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c17
-rw-r--r--drivers/cpuidle/cpuidle-psci.c26
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c28
-rw-r--r--drivers/cpuidle/cpuidle.c5
-rw-r--r--drivers/cpuidle/dt_idle_genpd.c14
-rw-r--r--drivers/crypto/Kconfig75
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl.h2
-rw-r--r--drivers/crypto/atmel-aes.c16
-rw-r--r--drivers/crypto/atmel-sha.c14
-rw-r--r--drivers/crypto/caam/caamalg_qi.c4
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c17
-rw-r--r--drivers/crypto/caam/caamhash.c1
-rw-r--r--drivers/crypto/caam/qi.c31
-rw-r--r--drivers/crypto/ccp/sev-dev.c28
-rw-r--r--drivers/crypto/ccp/sp-dev.h1
-rw-r--r--drivers/crypto/gemini/sl3516-ce.h2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c54
-rw-r--r--drivers/crypto/hisilicon/qm.c151
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c16
-rw-r--r--drivers/crypto/hisilicon/sgl.c14
-rw-r--r--drivers/crypto/hisilicon/trng/trng.c4
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c17
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c23
-rw-r--r--drivers/crypto/img-hash.c21
-rw-r--r--drivers/crypto/inside-secure/safexcel.h1
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c4
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c1
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_drv.c4
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c4
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c4
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c4
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c4
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.c29
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c44
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c9
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c14
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c194
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_vf_isr.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c4
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c4
-rw-r--r--drivers/crypto/marvell/Kconfig2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c273
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c264
-rw-r--r--drivers/crypto/n2_core.c1
-rw-r--r--drivers/crypto/nx/nx-842.h3
-rw-r--r--drivers/crypto/qcom-rng.c24
-rw-r--r--drivers/cxl/Kconfig2
-rw-r--r--drivers/dax/Kconfig2
-rw-r--r--drivers/dax/device.c6
-rw-r--r--drivers/devfreq/exynos-bus.c22
-rw-r--r--drivers/devfreq/governor_performance.c1
-rw-r--r--drivers/devfreq/governor_powersave.c1
-rw-r--r--drivers/devfreq/governor_simpleondemand.c1
-rw-r--r--drivers/devfreq/governor_userspace.c1
-rw-r--r--drivers/devfreq/imx-bus.c2
-rw-r--r--drivers/dma-buf/dma-fence-array.c78
-rw-r--r--drivers/dma-buf/dma-heap.c27
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c2
-rw-r--r--drivers/dma-buf/st-dma-fence.c2
-rw-r--r--drivers/dma/Kconfig20
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/acpi-dma.c4
-rw-r--r--drivers/dma/altera-msgdma.c4
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/amd/Kconfig14
-rw-r--r--drivers/dma/amd/Makefile3
-rw-r--r--drivers/dma/amd/qdma/Makefile5
-rw-r--r--drivers/dma/amd/qdma/qdma-comm-regs.c64
-rw-r--r--drivers/dma/amd/qdma/qdma.c1143
-rw-r--r--drivers/dma/amd/qdma/qdma.h266
-rw-r--r--drivers/dma/at_hdmac.c6
-rw-r--r--drivers/dma/bcm-sba-raid.c4
-rw-r--r--drivers/dma/bcm2835-dma.c2
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/ep93xx_dma.c4
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h6
-rw-r--r--drivers/dma/fsl-edma-main.c27
-rw-r--r--drivers/dma/hisi_dma.c2
-rw-r--r--drivers/dma/idma64.c8
-rw-r--r--drivers/dma/idxd/idxd.h7
-rw-r--r--drivers/dma/idxd/init.c9
-rw-r--r--drivers/dma/idxd/perfmon.c102
-rw-r--r--drivers/dma/idxd/submit.c2
-rw-r--r--drivers/dma/imx-dma.c3
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/lgm/lgm-dma.c2
-rw-r--r--drivers/dma/loongson1-apb-dma.c660
-rw-r--r--drivers/dma/lpc32xx-dmamux.c195
-rw-r--r--drivers/dma/ls2x-apb-dma.c4
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c4
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c2
-rw-r--r--drivers/dma/mv_xor.c4
-rw-r--r--drivers/dma/mv_xor.h2
-rw-r--r--drivers/dma/mv_xor_v2.c2
-rw-r--r--drivers/dma/nbpfaxi.c2
-rw-r--r--drivers/dma/of-dma.c4
-rw-r--r--drivers/dma/owl-dma.c2
-rw-r--r--drivers/dma/pl330.c5
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/ppc4xx/dma.h2
-rw-r--r--drivers/dma/ptdma/ptdma.h2
-rw-r--r--drivers/dma/qcom/bam_dma.c10
-rw-r--r--drivers/dma/qcom/gpi.c2
-rw-r--r--drivers/dma/qcom/qcom_adm.c2
-rw-r--r--drivers/dma/sh/rcar-dmac.c4
-rw-r--r--drivers/dma/sh/shdmac.c2
-rw-r--r--drivers/dma/ste_dma40.c6
-rw-r--r--drivers/dma/ste_dma40.h2
-rw-r--r--drivers/dma/ste_dma40_ll.h2
-rw-r--r--drivers/dma/tegra20-apb-dma.c2
-rw-r--r--drivers/dma/ti/k3-udma.h1
-rw-r--r--drivers/dma/xgene-dma.c2
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c101
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c27
-rw-r--r--drivers/dpll/dpll_netlink.c130
-rw-r--r--drivers/dpll/dpll_nl.c5
-rw-r--r--drivers/edac/Kconfig9
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/i10nm_base.c61
-rw-r--r--drivers/edac/igen6_edac.c2
-rw-r--r--drivers/edac/ppc4xx_edac.c1425
-rw-r--r--drivers/edac/ppc4xx_edac.h167
-rw-r--r--drivers/edac/sb_edac.c35
-rw-r--r--drivers/edac/skx_base.c52
-rw-r--r--drivers/edac/skx_common.c49
-rw-r--r--drivers/edac/skx_common.h8
-rw-r--r--drivers/edac/synopsys_edac.c35
-rw-r--r--drivers/extcon/extcon-axp288.c2
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c15
-rw-r--r--drivers/firewire/core-card.c91
-rw-r--r--drivers/firewire/core-cdev.c400
-rw-r--r--drivers/firewire/core-device.c202
-rw-r--r--drivers/firewire/core-iso.c49
-rw-r--r--drivers/firewire/core-topology.c7
-rw-r--r--drivers/firewire/core-transaction.c151
-rw-r--r--drivers/firewire/core.h28
-rw-r--r--drivers/firewire/ohci-serdes-test.c66
-rw-r--r--drivers/firewire/ohci.c574
-rw-r--r--drivers/firewire/ohci.h200
-rw-r--r--drivers/firmware/arm_ffa/driver.c240
-rw-r--r--drivers/firmware/arm_scmi/Kconfig120
-rw-r--r--drivers/firmware/arm_scmi/Makefile14
-rw-r--r--drivers/firmware/arm_scmi/base.c6
-rw-r--r--drivers/firmware/arm_scmi/clock.c1
-rw-r--r--drivers/firmware/arm_scmi/common.h208
-rw-r--r--drivers/firmware/arm_scmi/driver.c241
-rw-r--r--drivers/firmware/arm_scmi/msg.c32
-rw-r--r--drivers/firmware/arm_scmi/perf.c2
-rw-r--r--drivers/firmware/arm_scmi/pinctrl.c1
-rw-r--r--drivers/firmware/arm_scmi/power.c2
-rw-r--r--drivers/firmware/arm_scmi/reset.c2
-rw-r--r--drivers/firmware/arm_scmi/sensors.c2
-rw-r--r--drivers/firmware/arm_scmi/shmem.c85
-rw-r--r--drivers/firmware/arm_scmi/system.c2
-rw-r--r--drivers/firmware/arm_scmi/transports/Kconfig123
-rw-r--r--drivers/firmware/arm_scmi/transports/Makefile16
-rw-r--r--drivers/firmware/arm_scmi/transports/mailbox.c (renamed from drivers/firmware/arm_scmi/mailbox.c)84
-rw-r--r--drivers/firmware/arm_scmi/transports/optee.c (renamed from drivers/firmware/arm_scmi/optee.c)131
-rw-r--r--drivers/firmware/arm_scmi/transports/smc.c (renamed from drivers/firmware/arm_scmi/smc.c)62
-rw-r--r--drivers/firmware/arm_scmi/transports/virtio.c (renamed from drivers/firmware/arm_scmi/virtio.c)103
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Kconfig25
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Makefile3
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c383
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c318
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx95.rst886
-rw-r--r--drivers/firmware/arm_scmi/voltage.c6
-rw-r--r--drivers/firmware/efi/libstub/efistub.h2
-rw-r--r--drivers/firmware/efi/libstub/unaccepted_memory.c3
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c18
-rw-r--r--drivers/firmware/imx/Kconfig11
-rw-r--r--drivers/firmware/imx/Makefile1
-rw-r--r--drivers/firmware/imx/sm-misc.c119
-rw-r--r--drivers/firmware/qcom/Kconfig11
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c4
-rw-r--r--drivers/firmware/qcom/qcom_scm.c72
-rw-r--r--drivers/firmware/qemu_fw_cfg.c2
-rw-r--r--drivers/firmware/raspberrypi.c3
-rw-r--r--drivers/firmware/smccc/kvm_guest.c2
-rw-r--r--drivers/firmware/tegra/bpmp.c20
-rw-r--r--drivers/firmware/turris-mox-rwtm.c380
-rw-r--r--drivers/gpio/Kconfig7
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-adp5585.c229
-rw-r--r--drivers/gpio/gpio-ath79.c31
-rw-r--r--drivers/gpio/gpio-cadence.c23
-rw-r--r--drivers/gpio/gpio-davinci.c102
-rw-r--r--drivers/gpio/gpio-fxl6408.c2
-rw-r--r--drivers/gpio/gpio-ixp4xx.c10
-rw-r--r--drivers/gpio/gpio-lpc18xx.c23
-rw-r--r--drivers/gpio/gpio-max7300.c2
-rw-r--r--drivers/gpio/gpio-mb86s7x.c21
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c44
-rw-r--r--drivers/gpio/gpio-msc313.c5
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-pch.c1
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c18
-rw-r--r--drivers/gpio/gpio-stmpe.c47
-rw-r--r--drivers/gpio/gpio-stp-xway.c10
-rw-r--r--drivers/gpio/gpio-syscon.c1
-rw-r--r--drivers/gpio/gpio-tegra.c5
-rw-r--r--drivers/gpio/gpio-tegra186.c3
-rw-r--r--drivers/gpio/gpio-thunderx.c5
-rw-r--r--drivers/gpio/gpio-uniphier.c5
-rw-r--r--drivers/gpio/gpio-vf610.c31
-rw-r--r--drivers/gpio/gpio-virtuser.c13
-rw-r--r--drivers/gpio/gpio-visconti.c5
-rw-r--r--drivers/gpio/gpio-xilinx.c11
-rw-r--r--drivers/gpio/gpio-zynq.c10
-rw-r--r--drivers/gpio/gpiolib-acpi.c43
-rw-r--r--drivers/gpio/gpiolib-cdev.c8
-rw-r--r--drivers/gpio/gpiolib-devres.c120
-rw-r--r--drivers/gpio/gpiolib-legacy.c94
-rw-r--r--drivers/gpio/gpiolib-of.c92
-rw-r--r--drivers/gpio/gpiolib-swnode.c62
-rw-r--r--drivers/gpio/gpiolib-sysfs.c6
-rw-r--r--drivers/gpio/gpiolib.c164
-rw-r--r--drivers/gpio/gpiolib.h16
-rw-r--r--drivers/gpu/drm/Kconfig33
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c458
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c399
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c365
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c339
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c375
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c694
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c166
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c173
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c142
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c48
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c431
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c71
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c58
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h47
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c85
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c373
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c45
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c132
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c55
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/soc15_int.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c180
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c60
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c135
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c211
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c225
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c1469
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/Makefile13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn401/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/Makefile18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c114
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c167
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c987
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c224
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c631
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c169
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.c1250
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c590
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c590
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb_cm.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/Makefile15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c387
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c108
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c235
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/audio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/Makefile27
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/Makefile16
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.c1359
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c352
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c1726
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h55
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h110
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c151
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_debug.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c497
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h525
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h55
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h52
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c4
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h6
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c18
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h75
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h4
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h6
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c898
-rw-r--r--drivers/gpu/drm/ast/Makefile4
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c406
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c184
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c12
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h43
-rw-r--r--drivers/gpu/drm/ast/ast_main.c6
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c576
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h22
-rw-r--r--drivers/gpu/drm/ast/ast_sil164.c127
-rw-r--r--drivers/gpu/drm/ast/ast_vga.c127
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c5
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c22
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c17
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c35
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c103
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c8
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.h4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c30
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c66
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c7
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c45
-rw-r--r--drivers/gpu/drm/ci/arm64.config1
-rw-r--r--drivers/gpu/drm/ci/container.yml8
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml29
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh11
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml8
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh1
-rw-r--r--drivers/gpu/drm/ci/test.yml132
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt12
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt41
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt9
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt24
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt25
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt17
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt19
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt146
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt18
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt146
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt11
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt105
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt26
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt26
-rw-r--r--drivers/gpu/drm/ci/xfails/requirements.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt22
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt57
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt90
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt50
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt65
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt106
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c66
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c128
-rw-r--r--drivers/gpu/drm/drm_atomic.c6
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_bridge.c9
-rw-r--r--drivers/gpu/drm/drm_connector.c87
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h15
-rw-r--r--drivers/gpu/drm/drm_displayid.c3
-rw-r--r--drivers/gpu/drm/drm_drv.c100
-rw-r--r--drivers/gpu/drm/drm_edid.c24
-rw-r--r--drivers/gpu/drm/drm_exec.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c39
-rw-r--r--drivers/gpu/drm/drm_file.c44
-rw-r--r--drivers/gpu/drm/drm_gem.c7
-rw-r--r--drivers/gpu/drm/drm_internal.h5
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c225
-rw-r--r--drivers/gpu/drm/drm_mode_config.c2
-rw-r--r--drivers/gpu/drm/drm_modes.c1
-rw-r--r--drivers/gpu/drm/drm_panel.c18
-rw-r--r--drivers/gpu/drm/drm_panic.c406
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs1003
-rw-r--r--drivers/gpu/drm/drm_prime.c84
-rw-r--r--drivers/gpu/drm/drm_print.c13
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c12
-rw-r--r--drivers/gpu/drm/drm_rect.c1
-rw-r--r--drivers/gpu/drm/drm_syncobj.c23
-rw-r--r--drivers/gpu/drm/drm_vblank.c83
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c22
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h4
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c26
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h1
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c14
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c18
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c16
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c18
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c18
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c18
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c155
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c883
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c240
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c36
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h62
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c114
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c70
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c237
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c100
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c77
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c325
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c36
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c553
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c67
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c500
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c115
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c322
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h62
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c567
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c852
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c209
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c204
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c125
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h41
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c130
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c16
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c24
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c30
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c47
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c25
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c18
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c18
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c45
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c3
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c88
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c12
-rw-r--r--drivers/gpu/drm/i915/i915_mm.h3
-rw-r--r--drivers/gpu/drm/i915/i915_module.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.c3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c7
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c53
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h16
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h1
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c7
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h2
-rw-r--r--drivers/gpu/drm/i915/intel_step.c84
-rw-r--r--drivers/gpu/drm/i915/intel_step.h3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h2
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c14
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c2
-rw-r--r--drivers/gpu/drm/loongson/lsdc_ttm.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c82
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c36
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c106
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c29
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h3
-rw-r--r--drivers/gpu/drm/mgag200/Makefile1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_bmc.c111
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c40
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h58
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh.c11
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh3.c11
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c22
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c22
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ew3.c13
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c22
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200wb.c13
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c183
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_vga_bmc.c156
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_catalog.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c16
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c30
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c141
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c89
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c46
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h51
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c41
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c12
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c5
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c779
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h8
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml1118
-rw-r--r--drivers/gpu/drm/msm/registers/display/hdmi.xml89
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c5
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c57
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base827c.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc907d.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc37d.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac907d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c79
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head507d.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head827d.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly507e.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly827e.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly907e.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/pior507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor907d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sorc37d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h37
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/driver.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0000.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0002.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0003.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/layout.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c330
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo0039.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo5039.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo74c1.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo85b5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo9039.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo90b5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_boa0b5.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c389
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h61
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c194
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/oproxy.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/uevent.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c479
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c867
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c243
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c184
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c157
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c175
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h105
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c5
-rw-r--r--drivers/gpu/drm/panel/Kconfig12
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c2
-rw-r--r--drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c325
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c241
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c190
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c40
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c153
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c14
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9806e.c165
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c370
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c149
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c79
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c367
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c211
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c69
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c10
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c97
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c1122
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c29
-rw-r--r--drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c115
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c212
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/cik.c14
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c12
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c62
-rw-r--r--drivers/gpu/drm/radeon/ni.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c94
-rw-r--r--drivers/gpu/drm/radeon/r300.c6
-rw-r--r--drivers/gpu/drm/radeon/r420.c6
-rw-r--r--drivers/gpu/drm/radeon/r520.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c12
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c76
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/rs400.c6
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/Kconfig8
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/Makefile2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig8
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Makefile2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c8
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c11
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c3
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c (renamed from drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c)0
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h (renamed from drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi_regs.h)0
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c32
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c107
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c25
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c25
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c1
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c1
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c1
-rw-r--r--drivers/gpu/drm/stm/Kconfig1
-rw-r--r--drivers/gpu/drm/stm/drv.c7
-rw-r--r--drivers/gpu/drm/stm/ltdc.c107
-rw-r--r--drivers/gpu/drm/stm/lvds.c1
-rw-r--r--drivers/gpu/drm/tegra/drm.c12
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c46
-rw-r--r--drivers/gpu/drm/tegra/hub.c7
-rw-r--r--drivers/gpu/drm/tegra/output.c29
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c27
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c13
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c6
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c460
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c151
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c251
-rw-r--r--drivers/gpu/drm/udl/udl_edid.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c14
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h12
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c40
-rw-r--r--drivers/gpu/drm/v3d/v3d_performance_counters.h16
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c79
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c255
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c32
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c44
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c10
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c6
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h1
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/xe/Makefile44
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h1
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h19
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h7
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_step.h10
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c9
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c7
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c108
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h4
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c16
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c1
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c51
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c9
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c6
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h17
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h12
-rw-r--r--drivers/gpu/drm/xe/regs/xe_sriov_regs.h23
-rw-r--r--drivers/gpu/drm/xe/tests/Makefile6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c53
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.c21
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.h14
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c30
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.c20
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.c39
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c11
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c436
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.c20
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c44
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.c21
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.h14
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c82
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h3
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c219
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test.h10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c1
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c51
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h15
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h7
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c10
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.h4
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c121
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_device.c28
-rw-r--r--drivers/gpu/drm/xe/xe_device.h26
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h50
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c47
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c22
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c240
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h10
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c26
-rw-r--r--drivers/gpu/drm/xe/xe_execlist_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c16
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c490
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h28
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt_types.h54
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c23
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h12
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c61
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_debugfs.c71
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_debugfs.h14
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c45
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h10
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c40
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c55
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c214
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c1260
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h107
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c70
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c49
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.h29
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c27
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h43
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h10
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c13
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hwconfig.c97
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hwconfig.h3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_id_mgr.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c100
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c28
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.h10
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c19
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c164
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.h10
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c372
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.h29
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c95
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c2
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c37
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c559
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h43
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c213
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h1
-rw-r--r--drivers/gpu/drm/xe/xe_module.c54
-rw-r--r--drivers/gpu/drm/xe/xe_module.h2
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c11
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_observation.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c24
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c130
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c12
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c1355
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h14
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h48
-rw-r--r--drivers/gpu/drm/xe/xe_query.c6
-rw-r--r--drivers/gpu/drm/xe/xe_res_cursor.h1
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c44
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h4
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_helpers.h6
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c20
-rw-r--r--drivers/gpu/drm/xe/xe_sa_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c5
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c2
-rw-r--r--drivers/gpu/drm/xe/xe_step.c57
-rw-r--r--drivers/gpu/drm/xe/xe_step_types.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c31
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h1
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h52
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h10
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c1
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c21
-rw-r--r--drivers/gpu/drm/xe/xe_uc_debugfs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c42
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c732
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h55
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c5
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h7
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules7
-rw-r--r--drivers/gpu/drm/xe/xe_wait_user_fence.c2
-rw-r--r--drivers/gpu/host1x/dev.c7
-rw-r--r--drivers/gpu/host1x/dev.h2
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c37
-rw-r--r--drivers/gpu/host1x/intr.c21
-rw-r--r--drivers/gpu/host1x/intr.h5
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c3
-rw-r--r--drivers/hid/Kconfig6
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h2
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c4
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c6
-rw-r--r--drivers/hid/hid-apple.c2
-rw-r--r--drivers/hid/hid-asus.c9
-rw-r--r--drivers/hid/hid-aureal.c2
-rw-r--r--drivers/hid/hid-bigbenff.c6
-rw-r--r--drivers/hid/hid-cherry.c2
-rw-r--r--drivers/hid/hid-chicony.c4
-rw-r--r--drivers/hid/hid-cmedia.c6
-rw-r--r--drivers/hid/hid-core.c39
-rw-r--r--drivers/hid/hid-corsair.c4
-rw-r--r--drivers/hid/hid-cougar.c4
-rw-r--r--drivers/hid/hid-cp2112.c7
-rw-r--r--drivers/hid/hid-cypress.c2
-rw-r--r--drivers/hid/hid-dr.c8
-rw-r--r--drivers/hid/hid-elecom.c2
-rw-r--r--drivers/hid/hid-gembird.c2
-rw-r--r--drivers/hid/hid-glorious.c2
-rw-r--r--drivers/hid/hid-goodix-spi.c809
-rw-r--r--drivers/hid/hid-google-hammer.c27
-rw-r--r--drivers/hid/hid-holtek-kbd.c6
-rw-r--r--drivers/hid/hid-holtek-mouse.c4
-rw-r--r--drivers/hid/hid-ids.h18
-rw-r--r--drivers/hid/hid-input.c37
-rw-r--r--drivers/hid/hid-ite.c2
-rw-r--r--drivers/hid/hid-keytouch.c8
-rw-r--r--drivers/hid/hid-kye.c2
-rw-r--r--drivers/hid/hid-lenovo.c2
-rw-r--r--drivers/hid/hid-lg.c30
-rw-r--r--drivers/hid/hid-logitech-hidpp.c4
-rw-r--r--drivers/hid/hid-macally.c4
-rw-r--r--drivers/hid/hid-magicmouse.c4
-rw-r--r--drivers/hid/hid-maltron.c8
-rw-r--r--drivers/hid/hid-microsoft.c2
-rw-r--r--drivers/hid/hid-monterey.c2
-rw-r--r--drivers/hid/hid-multitouch.c30
-rw-r--r--drivers/hid/hid-nti.c2
-rw-r--r--drivers/hid/hid-ortek.c2
-rw-r--r--drivers/hid/hid-petalynx.c2
-rw-r--r--drivers/hid/hid-picolcd_backlight.c5
-rw-r--r--drivers/hid/hid-playstation.c20
-rw-r--r--drivers/hid/hid-prodikeys.c2
-rw-r--r--drivers/hid/hid-pxrc.c6
-rw-r--r--drivers/hid/hid-redragon.c2
-rw-r--r--drivers/hid/hid-saitek.c2
-rw-r--r--drivers/hid/hid-samsung.c2
-rw-r--r--drivers/hid/hid-semitek.c4
-rw-r--r--drivers/hid/hid-sensor-custom.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c2
-rw-r--r--drivers/hid/hid-sigmamicro.c4
-rw-r--r--drivers/hid/hid-sony.c14
-rw-r--r--drivers/hid/hid-steelseries.c8
-rw-r--r--drivers/hid/hid-sunplus.c2
-rw-r--r--drivers/hid/hid-topre.c4
-rw-r--r--drivers/hid/hid-uclogic-core.c4
-rw-r--r--drivers/hid/hid-uclogic-params.c4
-rw-r--r--drivers/hid/hid-uclogic-params.h10
-rw-r--r--drivers/hid/hid-uclogic-rdesc.c20
-rw-r--r--drivers/hid/hid-uclogic-rdesc.h20
-rw-r--r--drivers/hid/hid-viewsonic.c8
-rw-r--r--drivers/hid/hid-vrc2.c6
-rw-r--r--drivers/hid/hid-waltop.c30
-rw-r--r--drivers/hid/hid-winwing.c8
-rw-r--r--drivers/hid/hid-xiaomi.c8
-rw-r--r--drivers/hid/hid-zydacron.c2
-rw-r--r--drivers/hid/hidraw.c39
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c42
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-elan.c8
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c10
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.h1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h8
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/loader.c121
-rw-r--r--drivers/hid/wacom_wac.c87
-rw-r--r--drivers/hid/wacom_wac.h6
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c11
-rw-r--r--drivers/hv/hv.c6
-rw-r--r--drivers/hv/hv_common.c4
-rw-r--r--drivers/hv/hyperv_vmbus.h6
-rw-r--r--drivers/hv/vmbus_drv.c38
-rw-r--r--drivers/hwmon/Kconfig26
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/adt7470.c22
-rw-r--r--drivers/hwmon/adt7475.c130
-rw-r--r--drivers/hwmon/adt7x10.c18
-rw-r--r--drivers/hwmon/amc6821.c30
-rw-r--r--drivers/hwmon/aspeed-g6-pwm-tach.c4
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c8
-rw-r--r--drivers/hwmon/chipcap2.c33
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c8
-rw-r--r--drivers/hwmon/gsc-hwmon.c1
-rw-r--r--drivers/hwmon/hp-wmi-sensors.c22
-rw-r--r--drivers/hwmon/hwmon.c18
-rw-r--r--drivers/hwmon/ina2xx.c883
-rw-r--r--drivers/hwmon/ina3221.c7
-rw-r--r--drivers/hwmon/k10temp.c27
-rw-r--r--drivers/hwmon/lm90.c7
-rw-r--r--drivers/hwmon/lm92.c459
-rw-r--r--drivers/hwmon/lm95234.c807
-rw-r--r--drivers/hwmon/lm95245.c110
-rw-r--r--drivers/hwmon/ltc2947-core.c20
-rw-r--r--drivers/hwmon/ltc2992.c19
-rw-r--r--drivers/hwmon/max16065.c17
-rw-r--r--drivers/hwmon/max1619.c499
-rw-r--r--drivers/hwmon/max1668.c485
-rw-r--r--drivers/hwmon/max6639.c40
-rw-r--r--drivers/hwmon/max6697.c837
-rw-r--r--drivers/hwmon/nct6775-platform.c1
-rw-r--r--drivers/hwmon/nct7802.c69
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c5
-rw-r--r--drivers/hwmon/ntc_thermistor.c1
-rw-r--r--drivers/hwmon/oxp-sensors.c301
-rw-r--r--drivers/hwmon/pc87360.c2
-rw-r--r--drivers/hwmon/pmbus/max15301.c92
-rw-r--r--drivers/hwmon/pmbus/mpq7932.c2
-rw-r--r--drivers/hwmon/pmbus/pli1209bc.c26
-rw-r--r--drivers/hwmon/pmbus/pmbus.h16
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c109
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c64
-rw-r--r--drivers/hwmon/pmbus/zl6100.c66
-rw-r--r--drivers/hwmon/pwm-fan.c11
-rw-r--r--drivers/hwmon/sch5636.c3
-rw-r--r--drivers/hwmon/sch56xx-common.h1
-rw-r--r--drivers/hwmon/sg2042-mcu.c388
-rw-r--r--drivers/hwmon/sht21.c5
-rw-r--r--drivers/hwmon/stts751.c2
-rw-r--r--drivers/hwmon/surface_temp.c235
-rw-r--r--drivers/hwmon/tmp401.c17
-rw-r--r--drivers/hwmon/tmp421.c7
-rw-r--r--drivers/hwmon/tmp464.c40
-rw-r--r--drivers/hwmon/vexpress-hwmon.c2
-rw-r--r--drivers/i2c/Kconfig8
-rw-r--r--drivers/i2c/busses/Kconfig40
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c3
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c18
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c180
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h39
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c36
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c88
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c184
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c6
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c2
-rw-r--r--drivers/i2c/busses/i2c-emev2.c25
-rw-r--r--drivers/i2c/busses/i2c-i801.c9
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c11
-rw-r--r--drivers/i2c/busses/i2c-isch.c3
-rw-r--r--drivers/i2c/busses/i2c-ismt.c10
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c22
-rw-r--r--drivers/i2c/busses/i2c-keba.c598
-rw-r--r--drivers/i2c/busses/i2c-ljca.c6
-rw-r--r--drivers/i2c/busses/i2c-mpc.c23
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c5
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c6
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-pnx.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c27
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c12
-rw-r--r--drivers/i2c/busses/i2c-riic.c228
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-virtio.c4
-rw-r--r--drivers/i2c/busses/i2c-xiic.c60
-rw-r--r--drivers/i2c/i2c-core-base.c79
-rw-r--r--drivers/i2c/i2c-core-slave.c7
-rw-r--r--drivers/i2c/i2c-slave-testunit.c166
-rw-r--r--drivers/i2c/muxes/Kconfig16
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/i2c-mux-mule.c148
-rw-r--r--drivers/i3c/master.c12
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c2
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/Makefile3
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c12
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c36
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h10
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci_quirks.c44
-rw-r--r--drivers/i3c/master/svc-i3c-master.c84
-rw-r--r--drivers/idle/intel_idle.c60
-rw-r--r--drivers/infiniband/core/cache.c4
-rw-r--r--drivers/infiniband/core/core_priv.h3
-rw-r--r--drivers/infiniband/core/device.c48
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/mad.c19
-rw-r--r--drivers/infiniband/core/netlink.c1
-rw-r--r--drivers/infiniband/core/nldev.c187
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/ucma.c6
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c66
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c10
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h23
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c254
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h20
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c213
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c72
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h25
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h11
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c19
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h11
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h36
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h40
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c67
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c32
-rw-r--r--drivers/infiniband/hw/efa/efa.h2
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h3
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c1
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h1
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c1
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c4
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h3
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c26
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c87
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c5
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c29
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c22
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c16
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c4
-rw-r--r--drivers/infiniband/hw/mana/main.c8
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c10
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c21
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h2
-rw-r--r--drivers/infiniband/hw/mlx5/data_direct.c227
-rw-r--r--drivers/infiniband/hw/mlx5/data_direct.h23
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c22
-rw-r--r--drivers/infiniband/hw/mlx5/main.c324
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h60
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c418
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c405
-rw-r--r--drivers/infiniband/hw/mlx5/std_types.c76
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c96
-rw-r--r--drivers/infiniband/hw/mlx5/umr.h1
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hdr.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c4
-rw-r--r--drivers/infiniband/sw/siw/siw.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c37
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c92
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c51
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h2
-rw-r--r--drivers/input/evdev.c7
-rw-r--r--drivers/input/input.c49
-rw-r--r--drivers/input/joystick/adc-joystick.c16
-rw-r--r--drivers/input/keyboard/Kconfig34
-rw-r--r--drivers/input/keyboard/Makefile3
-rw-r--r--drivers/input/keyboard/adc-keys.c5
-rw-r--r--drivers/input/keyboard/adp5588-keys.c99
-rw-r--r--drivers/input/keyboard/atkbd.c37
-rw-r--r--drivers/input/keyboard/gpio_keys.c48
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c4
-rw-r--r--drivers/input/keyboard/imx-sm-bbm-key.c225
-rw-r--r--drivers/input/keyboard/iqs62x-keys.c7
-rw-r--r--drivers/input/keyboard/matrix_keypad.c334
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c268
-rw-r--r--drivers/input/keyboard/mt6779-keypad.c19
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c378
-rw-r--r--drivers/input/keyboard/qt1050.c15
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c24
-rw-r--r--drivers/input/keyboard/spear-keyboard.c16
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c3
-rw-r--r--drivers/input/keyboard/tegra-kbc.c117
-rw-r--r--drivers/input/matrix-keymap.c25
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/iqs269a.c7
-rw-r--r--drivers/input/misc/nxp-bbnsm-pwrkey.c38
-rw-r--r--drivers/input/misc/wistron_btns.c6
-rw-r--r--drivers/input/mouse/Kconfig12
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/alps.c48
-rw-r--r--drivers/input/mouse/bcm5974.c35
-rw-r--r--drivers/input/mouse/pixart_ps2.c300
-rw-r--r--drivers/input/mouse/pixart_ps2.h36
-rw-r--r--drivers/input/mouse/psmouse-base.c17
-rw-r--r--drivers/input/mouse/psmouse.h3
-rw-r--r--drivers/input/rmi4/rmi_f12.c43
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h37
-rw-r--r--drivers/input/serio/ps2-gpio.c6
-rw-r--r--drivers/input/touchscreen/Kconfig42
-rw-r--r--drivers/input/touchscreen/Makefile6
-rw-r--r--drivers/input/touchscreen/colibri-vf50-ts.c10
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c2174
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.h448
-rw-r--r--drivers/input/touchscreen/cyttsp4_i2c.c72
-rw-r--r--drivers/input/touchscreen/cyttsp4_spi.c187
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c39
-rw-r--r--drivers/input/touchscreen/cyttsp_core.h5
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c.c55
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c_common.c86
-rw-r--r--drivers/input/touchscreen/goodix_berlin.h1
-rw-r--r--drivers/input/touchscreen/goodix_berlin_core.c43
-rw-r--r--drivers/input/touchscreen/goodix_berlin_i2c.c1
-rw-r--r--drivers/input/touchscreen/goodix_berlin_spi.c1
-rw-r--r--drivers/input/touchscreen/hynitron_cstxxx.c2
-rw-r--r--drivers/input/touchscreen/ilitek_ts_i2c.c19
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c288
-rw-r--r--drivers/input/touchscreen/tsc2004.c6
-rw-r--r--drivers/input/touchscreen/tsc2005.c6
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c249
-rw-r--r--drivers/input/touchscreen/tsc200x-core.h1
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c956
-rw-r--r--drivers/input/touchscreen/zforce_ts.c474
-rw-r--r--drivers/input/touchscreen/zinitix.c134
-rw-r--r--drivers/iommu/Kconfig13
-rw-r--r--drivers/iommu/amd/amd_iommu.h26
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h35
-rw-r--r--drivers/iommu/amd/init.c16
-rw-r--r--drivers/iommu/amd/io_pgtable.c105
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c56
-rw-r--r--drivers/iommu/amd/iommu.c210
-rw-r--r--drivers/iommu/amd/pasid.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/Makefile1
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c83
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c578
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h135
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c909
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c28
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c2
-rw-r--r--drivers/iommu/dma-iommu.c137
-rw-r--r--drivers/iommu/fsl_pamu_domain.c4
-rw-r--r--drivers/iommu/intel/Kconfig1
-rw-r--r--drivers/iommu/intel/cache.c239
-rw-r--r--drivers/iommu/intel/dmar.c109
-rw-r--r--drivers/iommu/intel/iommu.c504
-rw-r--r--drivers/iommu/intel/iommu.h130
-rw-r--r--drivers/iommu/intel/irq_remapping.c11
-rw-r--r--drivers/iommu/intel/nested.c3
-rw-r--r--drivers/iommu/intel/pasid.c12
-rw-r--r--drivers/iommu/intel/perfmon.c111
-rw-r--r--drivers/iommu/intel/svm.c7
-rw-r--r--drivers/iommu/io-pgtable-arm.c31
-rw-r--r--drivers/iommu/iommu.c1
-rw-r--r--drivers/iommu/iommufd/device.c56
-rw-r--r--drivers/iommu/iommufd/fault.c5
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c3
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c16
-rw-r--r--drivers/iommu/iommufd/io_pagetable.h2
-rw-r--r--drivers/iommu/iommufd/ioas.c2
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h32
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h2
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c2
-rw-r--r--drivers/iommu/iommufd/main.c8
-rw-r--r--drivers/iommu/iommufd/pages.c10
-rw-r--r--drivers/iommu/iommufd/selftest.c18
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/iommu/mtk_iommu_v1.c3
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-apple-aic.c59
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c861
-rw-r--r--drivers/irqchip/irq-atmel-aic.c3
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c3
-rw-r--r--drivers/irqchip/irq-clps711x.c2
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c3
-rw-r--r--drivers/irqchip/irq-ftintc010.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic-v4.c2
-rw-r--r--drivers/irqchip/irq-ixp4xx.c3
-rw-r--r--drivers/irqchip/irq-loongarch-avec.c425
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c7
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c9
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c2
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c2
-rw-r--r--drivers/irqchip/irq-loongson-pch-lpc.c2
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c83
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c2
-rw-r--r--drivers/irqchip/irq-loongson.h27
-rw-r--r--drivers/irqchip/irq-mbigen.c24
-rw-r--r--drivers/irqchip/irq-omap-intc.c3
-rw-r--r--drivers/irqchip/irq-riscv-aplic-direct.c22
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.c71
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.h1
-rw-r--r--drivers/irqchip/irq-riscv-aplic-msi.c9
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c64
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c32
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c150
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h2
-rw-r--r--drivers/irqchip/irq-riscv-intc.c90
-rw-r--r--drivers/irqchip/irq-sa11x0.c3
-rw-r--r--drivers/irqchip/irq-sifive-plic.c99
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c2
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c2
-rw-r--r--drivers/leds/Kconfig8
-rw-r--r--drivers/leds/flash/leds-aat1290.c14
-rw-r--r--drivers/leds/flash/leds-as3645a.c8
-rw-r--r--drivers/leds/flash/leds-ktd2692.c15
-rw-r--r--drivers/leds/flash/leds-lm3601x.c19
-rw-r--r--drivers/leds/flash/leds-max77693.c20
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c163
-rw-r--r--drivers/leds/leds-88pm860x.c5
-rw-r--r--drivers/leds/leds-aw2013.c8
-rw-r--r--drivers/leds/leds-bcm6328.c7
-rw-r--r--drivers/leds/leds-bcm6358.c7
-rw-r--r--drivers/leds/leds-bd2606mvv.c23
-rw-r--r--drivers/leds/leds-blinkm.c220
-rw-r--r--drivers/leds/leds-gpio.c9
-rw-r--r--drivers/leds/leds-is31fl319x.c34
-rw-r--r--drivers/leds/leds-is31fl32xx.c14
-rw-r--r--drivers/leds/leds-lp55xx-common.c22
-rw-r--r--drivers/leds/leds-lp55xx-common.h1
-rw-r--r--drivers/leds/leds-mc13783.c24
-rw-r--r--drivers/leds/leds-mt6323.c22
-rw-r--r--drivers/leds/leds-netxbig.c19
-rw-r--r--drivers/leds/leds-pca9532.c12
-rw-r--r--drivers/leds/leds-pca995x.c78
-rw-r--r--drivers/leds/leds-sc27xx-bltc.c12
-rw-r--r--drivers/leds/leds-sun50i-a100.c2
-rw-r--r--drivers/leds/leds-turris-omnia.c9
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c14
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c24
-rw-r--r--drivers/macintosh/macio_asic.c4
-rw-r--r--drivers/macintosh/via-pmu-backlight.c2
-rw-r--r--drivers/macintosh/via-pmu.c2
-rw-r--r--drivers/md/dm-integrity.c4
-rw-r--r--drivers/md/dm-raid.c7
-rw-r--r--drivers/md/dm-verity-target.c118
-rw-r--r--drivers/md/dm-verity.h4
-rw-r--r--drivers/md/md-bitmap.c570
-rw-r--r--drivers/md/md-bitmap.h268
-rw-r--r--drivers/md/md-cluster.c91
-rw-r--r--drivers/md/md.c332
-rw-r--r--drivers/md/md.h13
-rw-r--r--drivers/md/raid1-10.c9
-rw-r--r--drivers/md/raid1.c99
-rw-r--r--drivers/md/raid10.c75
-rw-r--r--drivers/md/raid5-cache.c14
-rw-r--r--drivers/md/raid5.c157
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/media/cec/core/cec-adap.c53
-rw-r--r--drivers/media/cec/core/cec-api.c4
-rw-r--r--drivers/media/cec/core/cec-core.c31
-rw-r--r--drivers/media/cec/core/cec-priv.h2
-rw-r--r--drivers/media/cec/usb/Kconfig1
-rw-r--r--drivers/media/cec/usb/Makefile1
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig14
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile8
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.c657
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.h51
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c1836
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.h118
-rw-r--r--drivers/media/common/siano/smscoreapi.c15
-rw-r--r--drivers/media/common/siano/smscoreapi.h10
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c166
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c3
-rw-r--r--drivers/media/dvb-frontends/a8293.c2
-rw-r--r--drivers/media/dvb-frontends/af9013.c2
-rw-r--r--drivers/media/dvb-frontends/af9033.c2
-rw-r--r--drivers/media/dvb-frontends/au8522_decoder.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2099.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c2
-rw-r--r--drivers/media/dvb-frontends/mn88472.c2
-rw-r--r--drivers/media/dvb-frontends/mn88473.c2
-rw-r--r--drivers/media/dvb-frontends/mxl692.c2
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c4
-rw-r--r--drivers/media/dvb-frontends/si2165.c2
-rw-r--r--drivers/media/dvb-frontends/si2168.c2
-rw-r--r--drivers/media/dvb-frontends/sp2.c2
-rw-r--r--drivers/media/dvb-frontends/stv090x.c2
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c2
-rw-r--r--drivers/media/dvb-frontends/ts2020.c4
-rw-r--r--drivers/media/i2c/ad5820.c4
-rw-r--r--drivers/media/i2c/adp1653.c2
-rw-r--r--drivers/media/i2c/adv7170.c4
-rw-r--r--drivers/media/i2c/adv7175.c4
-rw-r--r--drivers/media/i2c/adv7183.c4
-rw-r--r--drivers/media/i2c/adv7343.c4
-rw-r--r--drivers/media/i2c/adv7393.c4
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/ak881x.c4
-rw-r--r--drivers/media/i2c/ar0521.c22
-rw-r--r--drivers/media/i2c/bt819.c6
-rw-r--r--drivers/media/i2c/bt856.c2
-rw-r--r--drivers/media/i2c/bt866.c2
-rw-r--r--drivers/media/i2c/ccs/ccs-reg-access.h3
-rw-r--r--drivers/media/i2c/cs3308.c2
-rw-r--r--drivers/media/i2c/cs5345.c2
-rw-r--r--drivers/media/i2c/cs53l32a.c2
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/i2c/ds90ub913.c5
-rw-r--r--drivers/media/i2c/dw9714.c4
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c2
-rw-r--r--drivers/media/i2c/gc05a2.c2
-rw-r--r--drivers/media/i2c/gc08a3.c2
-rw-r--r--drivers/media/i2c/imx274.c2
-rw-r--r--drivers/media/i2c/imx283.c33
-rw-r--r--drivers/media/i2c/imx335.c9
-rw-r--r--drivers/media/i2c/imx355.c12
-rw-r--r--drivers/media/i2c/isl7998x.c4
-rw-r--r--drivers/media/i2c/ks0127.c6
-rw-r--r--drivers/media/i2c/lm3560.c4
-rw-r--r--drivers/media/i2c/lm3646.c2
-rw-r--r--drivers/media/i2c/m52790.c2
-rw-r--r--drivers/media/i2c/max2175.c4
-rw-r--r--drivers/media/i2c/max96714.c18
-rw-r--r--drivers/media/i2c/max96717.c236
-rw-r--r--drivers/media/i2c/ml86v7667.c4
-rw-r--r--drivers/media/i2c/msp3400-driver.c2
-rw-r--r--drivers/media/i2c/mt9m001.c2
-rw-r--r--drivers/media/i2c/mt9m111.c2
-rw-r--r--drivers/media/i2c/mt9p031.c38
-rw-r--r--drivers/media/i2c/mt9t112.c2
-rw-r--r--drivers/media/i2c/mt9v011.c2
-rw-r--r--drivers/media/i2c/mt9v111.c3
-rw-r--r--drivers/media/i2c/og01a1b.c187
-rw-r--r--drivers/media/i2c/ov13858.c4
-rw-r--r--drivers/media/i2c/ov2640.c2
-rw-r--r--drivers/media/i2c/ov2659.c4
-rw-r--r--drivers/media/i2c/ov5640.c4
-rw-r--r--drivers/media/i2c/ov5645.c17
-rw-r--r--drivers/media/i2c/ov5647.c2
-rw-r--r--drivers/media/i2c/ov5675.c12
-rw-r--r--drivers/media/i2c/ov6650.c2
-rw-r--r--drivers/media/i2c/ov7640.c2
-rw-r--r--drivers/media/i2c/ov772x.c2
-rw-r--r--drivers/media/i2c/ov7740.c2
-rw-r--r--drivers/media/i2c/ov9640.c2
-rw-r--r--drivers/media/i2c/ov9650.c4
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c15
-rw-r--r--drivers/media/i2c/s5k5baf.c4
-rw-r--r--drivers/media/i2c/saa6588.c2
-rw-r--r--drivers/media/i2c/saa6752hs.c2
-rw-r--r--drivers/media/i2c/saa7110.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/saa7185.c2
-rw-r--r--drivers/media/i2c/sony-btf-mpx.c2
-rw-r--r--drivers/media/i2c/tc358743.c2
-rw-r--r--drivers/media/i2c/tc358746.c12
-rw-r--r--drivers/media/i2c/tda1997x.c2
-rw-r--r--drivers/media/i2c/tda7432.c2
-rw-r--r--drivers/media/i2c/tda9840.c2
-rw-r--r--drivers/media/i2c/tea6415c.c2
-rw-r--r--drivers/media/i2c/tea6420.c2
-rw-r--r--drivers/media/i2c/thp7312.c2
-rw-r--r--drivers/media/i2c/ths7303.c6
-rw-r--r--drivers/media/i2c/ths8200.c4
-rw-r--r--drivers/media/i2c/tlv320aic23b.c2
-rw-r--r--drivers/media/i2c/tvaudio.c2
-rw-r--r--drivers/media/i2c/tvp5150.c6
-rw-r--r--drivers/media/i2c/tvp7002.c2
-rw-r--r--drivers/media/i2c/tw2804.c2
-rw-r--r--drivers/media/i2c/tw9900.c2
-rw-r--r--drivers/media/i2c/tw9903.c2
-rw-r--r--drivers/media/i2c/tw9906.c2
-rw-r--r--drivers/media/i2c/tw9910.c2
-rw-r--r--drivers/media/i2c/uda1342.c2
-rw-r--r--drivers/media/i2c/upd64031a.c2
-rw-r--r--drivers/media/i2c/upd64083.c2
-rw-r--r--drivers/media/i2c/vp27smpx.c2
-rw-r--r--drivers/media/i2c/vpx3220.c6
-rw-r--r--drivers/media/i2c/wm8739.c2
-rw-r--r--drivers/media/i2c/wm8775.c2
-rw-r--r--drivers/media/mc/mc-request.c6
-rw-r--r--drivers/media/pci/intel/ipu6/Kconfig7
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6.c24
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c2
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.h2
-rw-r--r--drivers/media/pci/mgb4/mgb4_io.h29
-rw-r--r--drivers/media/pci/mgb4/mgb4_sysfs_out.c9
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c193
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.h3
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.c309
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.h5
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-p2m.c8
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c28
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c8
-rw-r--r--drivers/media/platform/chips-media/coda/coda-bit.c2
-rw-r--r--drivers/media/platform/imagination/Kconfig1
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c9
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c9
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c10
-rw-r--r--drivers/media/platform/microchip/microchip-isc-base.c19
-rw-r--r--drivers/media/platform/microchip/microchip-sama5d2-isc.c21
-rw-r--r--drivers/media/platform/microchip/microchip-sama7g5-isc.c21
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/h264.c10
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c19
-rw-r--r--drivers/media/platform/nxp/imx-pxp.h9
-rw-r--r--drivers/media/platform/nxp/imx8mq-mipi-csi2.c17
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c6
-rw-r--r--drivers/media/platform/qcom/camss/camss.c5
-rw-r--r--drivers/media/platform/qcom/venus/core.c1
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c6
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c8
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.h18
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h20
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c20
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c5
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/Kconfig1
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c21
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c22
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.c14
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h49
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c5
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c15
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c9
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c1041
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h23
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c4
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c51
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.c10
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.c10
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c10
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c12
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.c12
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c2
-rw-r--r--drivers/media/platform/ti/cal/cal.c8
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c14
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c2
-rw-r--r--drivers/media/platform/verisilicon/Kconfig8
-rw-r--r--drivers/media/platform/verisilicon/Makefile14
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c48
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2.c29
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c20
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_regs.h4
-rw-r--r--drivers/media/platform/verisilicon/hantro_hevc.c8
-rw-r--r--drivers/media/platform/verisilicon/hantro_hw.h38
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c6
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c6
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c3
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_regs.h10
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c1
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c9
-rw-r--r--drivers/media/radio/radio-tea5764.c2
-rw-r--r--drivers/media/radio/saa7706h.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/radio/si4713/si4713.c4
-rw-r--r--drivers/media/radio/tef6862.c4
-rw-r--r--drivers/media/rc/ene_ir.c3
-rw-r--r--drivers/media/rc/ite-cir.c1
-rw-r--r--drivers/media/rc/lirc_dev.c8
-rw-r--r--drivers/media/rc/meson-ir.c27
-rw-r--r--drivers/media/rc/rc-loopback.c1
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c6
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_demod.c2
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_tuner.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.c48
-rw-r--r--drivers/media/tuners/e4000.c2
-rw-r--r--drivers/media/tuners/fc2580.c2
-rw-r--r--drivers/media/tuners/m88rs6000t.c2
-rw-r--r--drivers/media/tuners/mt2060.c2
-rw-r--r--drivers/media/tuners/mxl301rf.c2
-rw-r--r--drivers/media/tuners/qm1d1b0004.c2
-rw-r--r--drivers/media/tuners/qm1d1c0042.c2
-rw-r--r--drivers/media/tuners/tda18212.c2
-rw-r--r--drivers/media/tuners/tda18250.c2
-rw-r--r--drivers/media/tuners/tua9001.c2
-rw-r--r--drivers/media/tuners/tuner-i2c.h4
-rw-r--r--drivers/media/usb/go7007/s2250-board.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c15
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c53
-rw-r--r--drivers/memory/atmel-ebi.c35
-rw-r--r--drivers/memory/emif.c31
-rw-r--r--drivers/memory/mtk-smi.c6
-rw-r--r--drivers/memory/omap-gpmc.c24
-rw-r--r--drivers/memory/pl172.c58
-rw-r--r--drivers/memory/pl353-smc.c57
-rw-r--r--drivers/memory/renesas-rpc-if.c2
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c90
-rw-r--r--drivers/memory/stm32-fmc2-ebi.c23
-rw-r--r--drivers/memory/tegra/mc.c11
-rw-r--r--drivers/memory/tegra/tegra124-emc.c7
-rw-r--r--drivers/memory/tegra/tegra186-emc.c5
-rw-r--r--drivers/memory/tegra/tegra20-emc.c7
-rw-r--r--drivers/memory/tegra/tegra210-emc-cc-r21021.c429
-rw-r--r--drivers/memory/tegra/tegra30-emc.c7
-rw-r--r--drivers/memory/ti-aemif.c74
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h60
-rw-r--r--drivers/message/fusion/mptbase.c10
-rw-r--r--drivers/message/fusion/mptbase.h3
-rw-r--r--drivers/message/fusion/mptfc.c7
-rw-r--r--drivers/mfd/88pm800.c2
-rw-r--r--drivers/mfd/88pm805.c4
-rw-r--r--drivers/mfd/88pm860x-core.c10
-rw-r--r--drivers/mfd/Kconfig12
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/adp5585.c205
-rw-r--r--drivers/mfd/atc260x-core.c4
-rw-r--r--drivers/mfd/axp20x.c25
-rw-r--r--drivers/mfd/bd9571mwv.c4
-rw-r--r--drivers/mfd/cros_ec_dev.c15
-rw-r--r--drivers/mfd/da9062-core.c12
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c2
-rw-r--r--drivers/mfd/gateworks-gsc.c2
-rw-r--r--drivers/mfd/hi655x-pmic.c2
-rw-r--r--drivers/mfd/intel-lpss-pci.c39
-rw-r--r--drivers/mfd/intel-m10-bmc-pmci.c2
-rw-r--r--drivers/mfd/intel-m10-bmc-spi.c2
-rw-r--r--drivers/mfd/intel_pmc_bxt.c3
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c17
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c1
-rw-r--r--drivers/mfd/intel_soc_pmic_mrfld.c3
-rw-r--r--drivers/mfd/max14577.c1
-rw-r--r--drivers/mfd/max77620.c5
-rw-r--r--drivers/mfd/mc13xxx-spi.c2
-rw-r--r--drivers/mfd/mt6360-core.c23
-rw-r--r--drivers/mfd/qcom-spmi-pmic.c5
-rw-r--r--drivers/mfd/retu-mfd.c12
-rw-r--r--drivers/mfd/rk8xx-core.c6
-rw-r--r--drivers/mfd/rk8xx-i2c.c25
-rw-r--r--drivers/mfd/rohm-bd71828.c8
-rw-r--r--drivers/mfd/rohm-bd718x7.c2
-rw-r--r--drivers/mfd/rohm-bd9576.c6
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c2
-rw-r--r--drivers/mfd/syscon.c20
-rw-r--r--drivers/mfd/tc3589x.c2
-rw-r--r--drivers/mfd/tps6105x.c2
-rw-r--r--drivers/mfd/tps65086.c2
-rw-r--r--drivers/mfd/tps65090.c2
-rw-r--r--drivers/mfd/tps65218.c2
-rw-r--r--drivers/mfd/tps65219.c4
-rw-r--r--drivers/mfd/tps65910.c6
-rw-r--r--drivers/mfd/tps65912-core.c2
-rw-r--r--drivers/mfd/twl6040.c2
-rw-r--r--drivers/mfd/wcd934x.c2
-rw-r--r--drivers/misc/Kconfig10
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/rpmb-core.c231
-rw-r--r--drivers/misc/xilinx_tmr_inject.c1
-rw-r--r--drivers/mmc/core/Kconfig1
-rw-r--r--drivers/mmc/core/block.c250
-rw-r--r--drivers/mmc/core/mmc.c106
-rw-r--r--drivers/mmc/core/mmc_ops.h14
-rw-r--r--drivers/mmc/core/regulator.c8
-rw-r--r--drivers/mmc/core/sd.c133
-rw-r--r--drivers/mmc/core/sd_ops.c3
-rw-r--r--drivers/mmc/host/Kconfig12
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/cqhci-core.c14
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c217
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c3
-rw-r--r--drivers/mmc/host/mtk-sd.c11
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c1
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c478
-rw-r--r--drivers/mmc/host/sdhci-of-ma35d1.c314
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c2
-rw-r--r--drivers/mmc/host/sdhci_am654.c54
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c7
-rw-r--r--drivers/mtd/devices/powernv_flash.c3
-rw-r--r--drivers/mtd/devices/slram.c2
-rw-r--r--drivers/mtd/mtdconcat.c2
-rw-r--r--drivers/mtd/mtdoops.c6
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c5
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c5
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c4
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c70
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c29
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c11
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c12
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c10
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c36
-rw-r--r--drivers/mtd/nand/raw/nandsim.c2
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c5
-rw-r--r--drivers/mtd/nand/raw/renesas-nand-controller.c12
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c5
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c7
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c4
-rw-r--r--drivers/mtd/nand/spi/core.c223
-rw-r--r--drivers/mtd/nand/spi/macronix.c146
-rw-r--r--drivers/mtd/nand/spi/winbond.c26
-rw-r--r--drivers/mtd/parsers/bcm47xxpart.c2
-rw-r--r--drivers/mtd/parsers/ofpart_core.c4
-rw-r--r--drivers/mtd/spi-nor/core.c3
-rw-r--r--drivers/mtd/spi-nor/micron-st.c2
-rw-r--r--drivers/mtd/spi-nor/spansion.c4
-rw-r--r--drivers/mtd/spi-nor/sst.c39
-rw-r--r--drivers/mtd/spi-nor/winbond.c26
-rw-r--r--drivers/net/amt.c4
-rw-r--r--drivers/net/bareudp.c28
-rw-r--r--drivers/net/bonding/bond_main.c122
-rw-r--r--drivers/net/can/Kconfig1
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/bxcan.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/cc770/cc770_isa.c2
-rw-r--r--drivers/net/can/cc770/cc770_platform.c34
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c2
-rw-r--r--drivers/net/can/dev/dev.c3
-rw-r--r--drivers/net/can/dev/netlink.c102
-rw-r--r--drivers/net/can/esd/esd_402_pci-core.c5
-rw-r--r--drivers/net/can/esd/esdacc.c55
-rw-r--r--drivers/net/can/esd/esdacc.h38
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c52
-rw-r--r--drivers/net/can/flexcan/flexcan.h2
-rw-r--r--drivers/net/can/grcan.c2
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c2
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/kvaser_pciefd.c29
-rw-r--r--drivers/net/can/m_can/m_can.c17
-rw-r--r--drivers/net/can/m_can/m_can_platform.c2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c3
-rw-r--r--drivers/net/can/rcar/rcar_can.c2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c2
-rw-r--r--drivers/net/can/rockchip/Kconfig9
-rw-r--r--drivers/net/can/rockchip/Makefile10
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c967
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-ethtool.c73
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-rx.c299
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-timestamp.c105
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-tx.c167
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd.h553
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c2
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/sun4i_can.c2
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/Kconfig3
-rw-r--r--drivers/net/can/usb/esd_usb.c6
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h26
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c63
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c41
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c114
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c3
-rw-r--r--drivers/net/can/xilinx_can.c2
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c7
-rw-r--r--drivers/net/dsa/microchip/Kconfig9
-rw-r--r--drivers/net/dsa/microchip/Makefile2
-rw-r--r--drivers/net/dsa/microchip/ksz8.c (renamed from drivers/net/dsa/microchip/ksz8795.c)123
-rw-r--r--drivers/net/dsa/microchip/ksz8.h3
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c4
-rw-r--r--drivers/net/dsa/microchip/ksz8_reg.h (renamed from drivers/net/dsa/microchip/ksz8795_reg.h)15
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c287
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h5
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h12
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c450
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h60
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c21
-rw-r--r--drivers/net/dsa/mt7530-mmio.c1
-rw-r--r--drivers/net/dsa/mt7530.c49
-rw-r--r--drivers/net/dsa/mt7530.h20
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c2
-rw-r--r--drivers/net/dsa/ocelot/felix.c5
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c13
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c2
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c10
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c8
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c10
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c482
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h2
-rw-r--r--drivers/net/dummy.c3
-rw-r--r--drivers/net/ethernet/Kconfig11
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c21
-rw-r--r--drivers/net/ethernet/alteon/acenic.c26
-rw-r--r--drivers/net/ethernet/alteon/acenic.h8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h72
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c173
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h68
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c163
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c27
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h10
-rw-r--r--drivers/net/ethernet/apple/bmac.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c25
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c4
-rw-r--r--drivers/net/ethernet/atheros/Kconfig4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c179
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c379
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c98
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h389
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h6
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c19
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.h3
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c21
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h38
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c106
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c157
-rw-r--r--drivers/net/ethernet/davicom/dm9051.c1
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h3
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c28
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c25
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c20
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c18
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c58
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c444
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h27
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c17
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c15
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c29
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c5
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c5
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c10
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.c17
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c5
-rw-r--r--drivers/net/ethernet/google/gve/gve.h6
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c182
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h59
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c79
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c33
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c10
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c221
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h10
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c172
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c183
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c40
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c24
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c59
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c89
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h13
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c160
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c25
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c46
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.h1
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.c506
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.h46
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c178
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c223
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c111
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c109
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.c2430
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.h540
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser_rt.c861
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c211
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c329
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c403
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c23
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c110
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c395
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h92
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c17
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h1
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h22
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c81
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c99
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h12
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c67
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c4
-rw-r--r--drivers/net/ethernet/lantiq_etop.c1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c5
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c18
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c116
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c124
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c59
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c3
-rw-r--r--drivers/net/ethernet/mediatek/airoha_eth.c533
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h14
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c10
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h926
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c2604
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c997
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c1300
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h361
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c260
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c480
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c2146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h834
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c1216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c579
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c640
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h514
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c780
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c1209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h270
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c493
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c158
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h20
-rw-r--r--drivers/net/ethernet/meta/Kconfig2
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h37
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c75
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c75
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c13
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h6
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c27
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h40
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c50
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c138
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c59
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h10
-rw-r--r--drivers/net/ethernet/microchip/Kconfig7
-rw-r--r--drivers/net/ethernet/microchip/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/fdma/Kconfig18
-rw-r--r--drivers/net/ethernet/microchip/fdma/Makefile7
-rw-r--r--drivers/net/ethernet/microchip/fdma/fdma_api.c146
-rw-r--r--drivers/net/ethernet/microchip/fdma/fdma_api.h243
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c127
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c646
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h4
-rw-r--r--drivers/net/ethernet/microchip/lan865x/Kconfig19
-rw-r--r--drivers/net/ethernet/microchip/lan865x/Makefile6
-rw-r--r--drivers/net/ethernet/microchip/lan865x/lan865x.c429
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c11
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c409
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h58
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c382
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h31
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c6
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c29
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c96
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c2
-rw-r--r--drivers/net/ethernet/oa_tc6.c1361
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h23
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c159
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c420
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/Kconfig19
-rw-r--r--drivers/net/ethernet/realtek/Makefile1
-rw-r--r--drivers/net/ethernet/realtek/r8169.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c46
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c3
-rw-r--r--drivers/net/ethernet/realtek/rtase/Makefile10
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h340
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c2288
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c4
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c2
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c127
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c7
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h1
-rw-r--r--drivers/net/ethernet/sfc/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c7
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c125
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h26
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c56
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.h4
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c597
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c164
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c78
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c108
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c292
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c153
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c34
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.h2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c77
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c390
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h39
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c287
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h62
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c3
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c72
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.h73
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_classifier.c1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c18
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c22
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c30
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c200
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h18
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c9
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c36
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h158
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c7
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c3
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c20
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c5
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h6
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h120
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c401
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c4
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/ipa/ipa_power.c7
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c4
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c3
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/macsec.c4
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/mdio/fwnode_mdio.c3
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c54
-rw-r--r--drivers/net/mdio/of_mdio.c5
-rw-r--r--drivers/net/net_failover.c4
-rw-r--r--drivers/net/netconsole.c192
-rw-r--r--drivers/net/netkit.c7
-rw-r--r--drivers/net/nlmon.c4
-rw-r--r--drivers/net/phy/Kconfig11
-rw-r--r--drivers/net/phy/Makefile4
-rw-r--r--drivers/net/phy/ax88796b_rust.rs7
-rw-r--r--drivers/net/phy/dp83822.c35
-rw-r--r--drivers/net/phy/dp83td510.c119
-rw-r--r--drivers/net/phy/dp83tg720.c154
-rw-r--r--drivers/net/phy/marvell-88x2222.c2
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/microchip_t1.c990
-rw-r--r--drivers/net/phy/microchip_t1s.c30
-rw-r--r--drivers/net/phy/motorcomm.c684
-rw-r--r--drivers/net/phy/open_alliance_helpers.c77
-rw-r--r--drivers/net/phy/open_alliance_helpers.h47
-rw-r--r--drivers/net/phy/phy.c22
-rw-r--r--drivers/net/phy/phy_device.c106
-rw-r--r--drivers/net/phy/phy_link_topology.c105
-rw-r--r--drivers/net/phy/phylink.c45
-rw-r--r--drivers/net/phy/qcom/at803x.c2
-rw-r--r--drivers/net/phy/qcom/qca807x.c12
-rw-r--r--drivers/net/phy/qcom/qca83xx.c10
-rw-r--r--drivers/net/phy/qt2025.rs103
-rw-r--r--drivers/net/phy/sfp-bus.c26
-rw-r--r--drivers/net/phy/vitesse.c183
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/pse-pd/tps23881.c21
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/sungem_phy.c35
-rw-r--r--drivers/net/team/team_core.c8
-rw-r--r--drivers/net/tun.c11
-rw-r--r--drivers/net/usb/cdc_ether.c3
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c173
-rw-r--r--drivers/net/vrf.c7
-rw-r--r--drivers/net/vsockmon.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c10
-rw-r--r--drivers/net/wireguard/device.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h23
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c12
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c354
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h126
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c59
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c6
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.c20
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c22
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h46
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.h6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h303
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c64
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h5
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/libertas_tf.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c426
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c29
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h57
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c73
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c69
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c76
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h49
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c36
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c202
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c104
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c7
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c66
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c89
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c62
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c20
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c12
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Kconfig5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig1
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c38
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c303
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c53
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h20
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h17
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c18
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h24
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c41
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h15
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c207
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig16
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile8
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c12
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c196
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c510
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h12
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c314
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h191
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c187
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c513
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h159
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c51
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h12
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c35
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c74
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h8
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h89
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c46
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c138
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c55
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c292
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c42
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c29
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.h24
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c211
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h20
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c848
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c418
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h23
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c93
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c52
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c264
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c151
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c338
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h23
-rw-r--r--drivers/net/wireless/rsi/rsi_debugfs.h1
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c2
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c47
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h9
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c53
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c34
-rw-r--r--drivers/net/xen-netback/hash.c5
-rw-r--r--drivers/nfc/pn533/usb.c1
-rw-r--r--drivers/ntb/core.c4
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c2
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c2
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c1
-rw-r--r--drivers/ntb/ntb_transport.c33
-rw-r--r--drivers/ntb/test/ntb_perf.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c43
-rw-r--r--drivers/nvdimm/nd_virtio.c9
-rw-r--r--drivers/nvdimm/of_pmem.c2
-rw-r--r--drivers/nvme/common/keyring.c58
-rw-r--r--drivers/nvme/host/Kconfig3
-rw-r--r--drivers/nvme/host/core.c52
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/fault_inject.c1
-rw-r--r--drivers/nvme/host/ioctl.c26
-rw-r--r--drivers/nvme/host/multipath.c14
-rw-r--r--drivers/nvme/host/nvme.h7
-rw-r--r--drivers/nvme/host/pci.c18
-rw-r--r--drivers/nvme/host/rdma.c12
-rw-r--r--drivers/nvme/host/sysfs.c91
-rw-r--r--drivers/nvme/host/tcp.c57
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/auth.c12
-rw-r--r--drivers/nvme/target/rdma.c4
-rw-r--r--drivers/nvmem/layouts.c2
-rw-r--r--drivers/of/.kunitconfig1
-rw-r--r--drivers/of/Kconfig10
-rw-r--r--drivers/of/Makefile3
-rw-r--r--drivers/of/address.c40
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/irq.c43
-rw-r--r--drivers/of/kunit_overlay_test.dtso9
-rw-r--r--drivers/of/of_kunit_helpers.c77
-rw-r--r--drivers/of/of_numa.c5
-rw-r--r--drivers/of/overlay.c12
-rw-r--r--drivers/of/overlay_test.c115
-rw-r--r--drivers/of/platform.c23
-rw-r--r--drivers/of/property.c41
-rw-r--r--drivers/of/resolver.c12
-rw-r--r--drivers/of/unittest.c8
-rw-r--r--drivers/opp/ti-opp-supply.c2
-rw-r--r--drivers/parisc/pdc_stable.c2
-rw-r--r--drivers/pci/Kconfig9
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/ats.c37
-rw-r--r--drivers/pci/controller/Kconfig2
-rw-r--r--drivers/pci/controller/cadence/Kconfig2
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c160
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c44
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h13
-rw-r--r--drivers/pci/controller/dwc/Kconfig5
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c11
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c1000
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c24
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h35
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c78
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.h14
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c41
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c133
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c13
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c37
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c11
-rw-r--r--drivers/pci/controller/pci-aardvark.c74
-rw-r--r--drivers/pci/controller/pci-tegra.c10
-rw-r--r--drivers/pci/controller/pci-xgene.c6
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c11
-rw-r--r--drivers/pci/controller/pcie-altera.c3
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c572
-rw-r--r--drivers/pci/controller/pcie-iproc.c18
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c193
-rw-r--r--drivers/pci/controller/pcie-mediatek.c12
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c10
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c64
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c150
-rw-r--r--drivers/pci/controller/pcie-xilinx.c9
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c11
-rw-r--r--drivers/pci/controller/vmd.c17
-rw-r--r--drivers/pci/devres.c11
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c14
-rw-r--r--drivers/pci/hotplug/TODO5
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c4
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c2
-rw-r--r--drivers/pci/hotplug/shpchp.h38
-rw-r--r--drivers/pci/hotplug/shpchp_core.c15
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c79
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c63
-rw-r--r--drivers/pci/iomap.c2
-rw-r--r--drivers/pci/npem.c595
-rw-r--r--drivers/pci/pci-acpi.c182
-rw-r--r--drivers/pci/pci-bridge-emul.c4
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.c75
-rw-r--r--drivers/pci/pci.h46
-rw-r--r--drivers/pci/pcie/aer_inject.c4
-rw-r--r--drivers/pci/probe.c37
-rw-r--r--drivers/pci/pwrctl/pci-pwrctl-pwrseq.c5
-rw-r--r--drivers/pci/quirks.c39
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/perf/Kconfig7
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c2
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c182
-rw-r--r--drivers/perf/arm-cmn.c318
-rw-r--r--drivers/perf/arm-ni.c781
-rw-r--r--drivers/perf/arm_pmu.c11
-rw-r--r--drivers/perf/arm_pmu_platform.c2
-rw-r--r--drivers/perf/arm_pmuv3.c148
-rw-r--r--drivers/perf/arm_spe_pmu.c9
-rw-r--r--drivers/perf/arm_v6_pmu.c6
-rw-r--r--drivers/perf/arm_v7_pmu.c77
-rw-r--r--drivers/perf/arm_xscale_pmu.c12
-rw-r--r--drivers/perf/dwc_pcie_pmu.c22
-rw-r--r--drivers/perf/hisilicon/hisi_pcie_pmu.c34
-rw-r--r--drivers/perf/riscv_pmu.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c38
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/broadcom/phy-bcm-cygnus-pcie.c20
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c21
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c95
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c677
-rw-r--r--drivers/phy/hisilicon/phy-hisi-inno-usb2.c12
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c4
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c30
-rw-r--r--drivers/phy/mediatek/phy-mtk-xsphy.c27
-rw-r--r--drivers/phy/nuvoton/Kconfig12
-rw-r--r--drivers/phy/nuvoton/Makefile3
-rw-r--r--drivers/phy/nuvoton/phy-ma35d1-usb2.c143
-rw-r--r--drivers/phy/phy-airoha-pcie.c6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c38
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-common.h19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c83
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c12
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c10
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usbc.c13
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c60
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c206
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c12
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c50
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c16
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c16
-rw-r--r--drivers/phy/ti/phy-tusb1210.c11
-rw-r--r--drivers/pinctrl/Kconfig16
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c1
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c9
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx-scmi.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c7
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mq.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c324
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h3
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorlake.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c7
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-c3.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-t7.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c24
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c24
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c16
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c16
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-s4.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c25
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h8
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8-pmx.c6
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c16
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c16
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c42
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c3
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c64
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32cc.c51
-rw-r--r--drivers/pinctrl/pinconf-generic.c2
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c14
-rw-r--r--drivers/pinctrl/pinctrl-eyeq5.c575
-rw-r--r--drivers/pinctrl/pinctrl-k210.c35
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c207
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h1
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c5
-rw-r--r--drivers/pinctrl/pinctrl-utils.c4
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c1
-rw-r--r--drivers/pinctrl/pinmux.c7
-rw-r--r--drivers/pinctrl/realtek/pinctrl-rtd.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c117
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c3
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm.c14
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c16
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c14
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c108
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h21
-rw-r--r--drivers/pinctrl/sophgo/Kconfig54
-rw-r--r--drivers/pinctrl/sophgo/Makefile7
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv1800b.c462
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv1812h.c771
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv18xx.c765
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv18xx.h155
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2000.c771
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2002.c542
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c14
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c58
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c7
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c36
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c10
-rw-r--r--drivers/platform/cznic/Kconfig2
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-trng.c4
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c5
-rw-r--r--drivers/platform/olpc/olpc-ec.c3
-rw-r--r--drivers/platform/olpc/olpc-xo175-ec.c4
-rw-r--r--drivers/platform/surface/aggregator/bus.c2
-rw-r--r--drivers/platform/surface/aggregator/controller.c67
-rw-r--r--drivers/platform/surface/aggregator/core.c82
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c45
-rw-r--r--drivers/platform/x86/Kconfig3
-rw-r--r--drivers/platform/x86/acer-wmi.c19
-rw-r--r--drivers/platform/x86/acerhdf.c33
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c31
-rw-r--r--drivers/platform/x86/amd/pmf/core.c20
-rw-r--r--drivers/platform/x86/amd/pmf/pmf-quirks.c8
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h73
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c51
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c40
-rw-r--r--drivers/platform/x86/asus-laptop.c3
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c4
-rw-r--r--drivers/platform/x86/asus-wmi.c234
-rw-r--r--drivers/platform/x86/dell/Kconfig1
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c417
-rw-r--r--drivers/platform/x86/dell/dell-smbios.h7
-rw-r--r--drivers/platform/x86/dell/dell-wmi-aio.c13
-rw-r--r--drivers/platform/x86/eeepc-laptop.c3
-rw-r--r--drivers/platform/x86/eeepc-wmi.c4
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c9
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c16
-rw-r--r--drivers/platform/x86/huawei-wmi.c14
-rw-r--r--drivers/platform/x86/ideapad-laptop.c191
-rw-r--r--drivers/platform/x86/ideapad-laptop.h139
-rw-r--r--drivers/platform/x86/intel/hid.c7
-rw-r--r--drivers/platform/x86/intel/ifs/core.c33
-rw-r--r--drivers/platform/x86/intel/ifs/ifs.h92
-rw-r--r--drivers/platform/x86/intel/ifs/load.c40
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c233
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile9
-rw-r--r--drivers/platform/x86/intel/int3472/common.c7
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c9
-rw-r--r--drivers/platform/x86/intel/oaktrail.c3
-rw-r--r--drivers/platform/x86/intel/pmc/adl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/cnp.c2
-rw-r--r--drivers/platform/x86/intel/pmc/core.c128
-rw-r--r--drivers/platform/x86/intel/pmc/core.h20
-rw-r--r--drivers/platform/x86/intel/pmc/core_ssram.c6
-rw-r--r--drivers/platform/x86/intel/pmc/icl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/spt.c2
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c2
-rw-r--r--drivers/platform/x86/intel/pmt/class.c28
-rw-r--r--drivers/platform/x86/intel/pmt/class.h10
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c2
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c12
-rw-r--r--drivers/platform/x86/intel/sdsi.c3
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c42
-rw-r--r--drivers/platform/x86/intel/tpmi.c3
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c42
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h17
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c165
-rw-r--r--drivers/platform/x86/intel/vsec.c8
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c2
-rw-r--r--drivers/platform/x86/intel_scu_pcidrv.c2
-rw-r--r--drivers/platform/x86/intel_scu_pltdrv.c2
-rw-r--r--drivers/platform/x86/intel_scu_wdt.c3
-rw-r--r--drivers/platform/x86/lenovo-ymc.c2
-rw-r--r--drivers/platform/x86/lg-laptop.c149
-rw-r--r--drivers/platform/x86/msi-wmi.c20
-rw-r--r--drivers/platform/x86/panasonic-laptop.c74
-rw-r--r--drivers/platform/x86/samsung-laptop.c5
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c32
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c145
-rw-r--r--drivers/platform/x86/toshiba-wmi.c15
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c26
-rw-r--r--drivers/platform/x86/wmi.c143
-rw-r--r--drivers/platform/x86/x86-android-tablets/Kconfig2
-rw-r--r--drivers/platform/x86/x86-android-tablets/asus.c8
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c20
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c16
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c22
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c40
-rw-r--r--drivers/platform/x86/x86-android-tablets/shared-psy-info.c4
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h2
-rw-r--r--drivers/pmdomain/amlogic/Kconfig11
-rw-r--r--drivers/pmdomain/amlogic/Makefile1
-rw-r--r--drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c380
-rw-r--r--drivers/pmdomain/apple/pmgr-pwrstate.c2
-rw-r--r--drivers/pmdomain/bcm/raspberrypi-power.c43
-rw-r--r--drivers/pmdomain/core.c94
-rw-r--r--drivers/pmdomain/imx/gpc.c14
-rw-r--r--drivers/pmdomain/imx/gpcv2.c8
-rw-r--r--drivers/pmdomain/imx/imx93-pd.c22
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c6
-rw-r--r--drivers/pmdomain/qcom/cpr.c92
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c11
-rw-r--r--drivers/pmdomain/qcom/rpmpd.c20
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c118
-rw-r--r--drivers/power/reset/brcmstb-reboot.c59
-rw-r--r--drivers/power/reset/pwr-mlxbf.c16
-rw-r--r--drivers/power/sequencing/pwrseq-qcom-wcn.c11
-rw-r--r--drivers/power/supply/ab8500_fg.c2
-rw-r--r--drivers/power/supply/axp20x_battery.c591
-rw-r--r--drivers/power/supply/axp20x_usb_power.c368
-rw-r--r--drivers/power/supply/bq256xx_charger.c15
-rw-r--r--drivers/power/supply/cpcap-charger.c2
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c22
-rw-r--r--drivers/power/supply/lenovo_yoga_c630_battery.c7
-rw-r--r--drivers/power/supply/max17042_battery.c5
-rw-r--r--drivers/power/supply/max1720x_battery.c209
-rw-r--r--drivers/power/supply/max77693_charger.c52
-rw-r--r--drivers/power/supply/max8998_charger.c1
-rw-r--r--drivers/power/supply/mp2629_charger.c15
-rw-r--r--drivers/power/supply/mt6360_charger.c13
-rw-r--r--drivers/power/supply/mt6370-charger.c13
-rw-r--r--drivers/power/supply/power_supply_core.c19
-rw-r--r--drivers/power/supply/power_supply_hwmon.c3
-rw-r--r--drivers/power/supply/power_supply_sysfs.c66
-rw-r--r--drivers/power/supply/qcom_battmgr.c37
-rw-r--r--drivers/power/supply/qcom_pmi8998_charger.c13
-rw-r--r--drivers/power/supply/rk817_charger.c9
-rw-r--r--drivers/power/supply/rn5t618_power.c13
-rw-r--r--drivers/power/supply/rt9467-charger.c16
-rw-r--r--drivers/power/supply/rt9471.c15
-rw-r--r--drivers/power/supply/twl4030_charger.c2
-rw-r--r--drivers/power/supply/ucs1002_power.c26
-rw-r--r--drivers/powercap/intel_rapl_common.c38
-rw-r--r--drivers/ptp/ptp_chardev.c8
-rw-r--r--drivers/ptp/ptp_idt82p33.c8
-rw-r--r--drivers/ptp/ptp_ines.c4
-rw-r--r--drivers/ptp/ptp_ocp.c20
-rw-r--r--drivers/pwm/Kconfig7
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c13
-rw-r--r--drivers/pwm/pwm-adp5585.c188
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c7
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c2
-rw-r--r--drivers/pwm/pwm-axi-pwmgen.c3
-rw-r--r--drivers/pwm/pwm-clk.c2
-rw-r--r--drivers/pwm/pwm-hibvt.c2
-rw-r--r--drivers/pwm/pwm-img.c2
-rw-r--r--drivers/pwm/pwm-lp3943.c10
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c2
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c4
-rw-r--r--drivers/pwm/pwm-rcar.c2
-rw-r--r--drivers/pwm/pwm-rockchip.c2
-rw-r--r--drivers/pwm/pwm-sifive.c2
-rw-r--r--drivers/pwm/pwm-stm32.c2
-rw-r--r--drivers/pwm/pwm-sun4i.c2
-rw-r--r--drivers/pwm/pwm-tegra.c2
-rw-r--r--drivers/pwm/pwm-tiecap.c2
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c2
-rw-r--r--drivers/ras/amd/atl/Kconfig4
-rw-r--r--drivers/ras/amd/atl/Makefile2
-rw-r--r--drivers/ras/amd/atl/internal.h10
-rw-r--r--drivers/ras/amd/atl/prm.c57
-rw-r--r--drivers/ras/amd/atl/umc.c5
-rw-r--r--drivers/regulator/Kconfig7
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/act8865-regulator.c4
-rw-r--r--drivers/regulator/axp20x-regulator.c4
-rw-r--r--drivers/regulator/bd718x7-regulator.c19
-rw-r--r--drivers/regulator/bd9576-regulator.c8
-rw-r--r--drivers/regulator/bd96801-regulator.c19
-rw-r--r--drivers/regulator/core.c310
-rw-r--r--drivers/regulator/da903x-regulator.c2
-rw-r--r--drivers/regulator/da9052-regulator.c22
-rw-r--r--drivers/regulator/da9055-regulator.c28
-rw-r--r--drivers/regulator/da9063-regulator.c4
-rw-r--r--drivers/regulator/da9121-regulator.c20
-rw-r--r--drivers/regulator/da9211-regulator.c2
-rw-r--r--drivers/regulator/devres.c18
-rw-r--r--drivers/regulator/fan53555.c2
-rw-r--r--drivers/regulator/fixed-helper.c2
-rw-r--r--drivers/regulator/fixed.c8
-rw-r--r--drivers/regulator/helpers.c8
-rw-r--r--drivers/regulator/hi6421-regulator.c14
-rw-r--r--drivers/regulator/hi6421v530-regulator.c27
-rw-r--r--drivers/regulator/hi6421v600-regulator.c10
-rw-r--r--drivers/regulator/internal.h13
-rw-r--r--drivers/regulator/irq_helpers.c17
-rw-r--r--drivers/regulator/max5970-regulator.c2
-rw-r--r--drivers/regulator/max77650-regulator.c31
-rw-r--r--drivers/regulator/max77802-regulator.c4
-rw-r--r--drivers/regulator/max77826-regulator.c4
-rw-r--r--drivers/regulator/max77857-regulator.c2
-rw-r--r--drivers/regulator/max8973-regulator.c7
-rw-r--r--drivers/regulator/max8997-regulator.c16
-rw-r--r--drivers/regulator/mcp16502.c17
-rw-r--r--drivers/regulator/mp5416.c4
-rw-r--r--drivers/regulator/mt6357-regulator.c2
-rw-r--r--drivers/regulator/mtk-dvfsrc-regulator.c10
-rw-r--r--drivers/regulator/of_regulator.c150
-rw-r--r--drivers/regulator/pcap-regulator.c12
-rw-r--r--drivers/regulator/pfuze100-regulator.c10
-rw-r--r--drivers/regulator/qcom-refgen-regulator.c4
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c17
-rw-r--r--drivers/regulator/qcom_smd-regulator.c13
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c12
-rw-r--r--drivers/regulator/rt5120-regulator.c4
-rw-r--r--drivers/regulator/s2mps11.c17
-rw-r--r--drivers/regulator/s5m8767.c17
-rw-r--r--drivers/regulator/scmi-regulator.c8
-rw-r--r--drivers/regulator/sm5703-regulator.c170
-rw-r--r--drivers/regulator/tps6287x-regulator.c2
-rw-r--r--drivers/regulator/tps65023-regulator.c6
-rw-r--r--drivers/regulator/wm831x-isink.c8
-rw-r--r--drivers/regulator/wm8400-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig13
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c10
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c2
-rw-r--r--drivers/remoteproc/imx_rproc.c93
-rw-r--r--drivers/remoteproc/imx_rproc.h4
-rw-r--r--drivers/remoteproc/ingenic_rproc.c3
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c21
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c93
-rw-r--r--drivers/remoteproc/st_slim_rproc.c6
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c108
-rw-r--r--drivers/remoteproc/ti_k3_m4_remoteproc.c667
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c130
-rw-r--r--drivers/remoteproc/ti_sci_proc.h26
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c141
-rw-r--r--drivers/reset/Kconfig13
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c17
-rw-r--r--drivers/reset/reset-berlin.c3
-rw-r--r--drivers/reset/reset-eyeq.c570
-rw-r--r--drivers/reset/reset-k210.c3
-rw-r--r--drivers/reset/reset-lpc18xx.c43
-rw-r--r--drivers/reset/reset-meson.c6
-rw-r--r--drivers/rpmsg/Makefile1
-rw-r--r--drivers/rpmsg/qcom_glink_native.c166
-rw-r--r--drivers/rpmsg/qcom_glink_trace.h406
-rw-r--r--drivers/rtc/Kconfig27
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-at91sam9.c1
-rw-r--r--drivers/rtc/rtc-imx-sm-bbm.c162
-rw-r--r--drivers/rtc/rtc-m48t59.c4
-rw-r--r--drivers/rtc/rtc-rc5t619.c13
-rw-r--r--drivers/rtc/rtc-s35390a.c1
-rw-r--r--drivers/rtc/rtc-sd2405al.c227
-rw-r--r--drivers/rtc/rtc-stm32.c281
-rw-r--r--drivers/rtc/rtc-sun6i.c1
-rw-r--r--drivers/rtc/rtc-twl.c4
-rw-r--r--drivers/s390/char/hmcdrv_dev.c3
-rw-r--r--drivers/s390/char/sclp_early.c1
-rw-r--r--drivers/s390/crypto/Makefile16
-rw-r--r--drivers/s390/crypto/ap_bus.c59
-rw-r--r--drivers/s390/crypto/ap_queue.c20
-rw-r--r--drivers/s390/crypto/pkey_api.c2657
-rw-r--r--drivers/s390/crypto/pkey_base.c362
-rw-r--r--drivers/s390/crypto/pkey_base.h195
-rw-r--r--drivers/s390/crypto/pkey_cca.c629
-rw-r--r--drivers/s390/crypto/pkey_ep11.c578
-rw-r--r--drivers/s390/crypto/pkey_pckmo.c557
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c648
-rw-r--r--drivers/s390/crypto/zcrypt_api.c29
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c8
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h6
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c28
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h14
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c10
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c37
-rw-r--r--drivers/scsi/NCR5380.c233
-rw-r--r--drivers/scsi/NCR5380.h20
-rw-r--r--drivers/scsi/aacraid/aachba.c28
-rw-r--r--drivers/scsi/aacraid/aacraid.h21
-rw-r--r--drivers/scsi/aacraid/commctrl.c4
-rw-r--r--drivers/scsi/aacraid/comminit.c3
-rw-r--r--drivers/scsi/aacraid/commsup.c5
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c6
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.c5
-rw-r--r--drivers/scsi/bfa/bfad_im.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h11
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c3
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c3
-rw-r--r--drivers/scsi/elx/libefc/efc_nport.c2
-rw-r--r--drivers/scsi/esas2r/esas2r.h1
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c5
-rw-r--r--drivers/scsi/fcoe/fcoe.c4
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c18
-rw-r--r--drivers/scsi/fnic/fnic_main.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c3
-rw-r--r--drivers/scsi/hosts.c9
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c5
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/libfc/fc_exch.c3
-rw-r--r--drivers/scsi/libfc/fc_rport.c3
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/libsas/sas_init.c4
-rw-r--r--drivers/scsi/lpfc/lpfc.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c79
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c3
-rw-r--r--drivers/scsi/mac_scsi.c170
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c4
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h10
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h10
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h7
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c36
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/myrb.c5
-rw-r--r--drivers/scsi/myrb.h1
-rw-r--r--drivers/scsi/myrs.c5
-rw-r--r--drivers/scsi/myrs.h1
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c20
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_lib.c23
-rw-r--r--drivers/scsi/scsi_transport_fc.c11
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h39
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c496
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c60
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h3
-rw-r--r--drivers/scsi/snic/snic_main.c10
-rw-r--r--drivers/scsi/stex.c6
-rw-r--r--drivers/scsi/sun3_scsi.c2
-rw-r--r--drivers/scsi/vmw_pvscsi.c3
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c10
-rw-r--r--drivers/soc/atmel/soc.c23
-rw-r--r--drivers/soc/atmel/soc.h9
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c5
-rw-r--r--drivers/soc/fsl/qe/Kconfig17
-rw-r--r--drivers/soc/fsl/qe/qe_common.c80
-rw-r--r--drivers/soc/fsl/qe/qmc.c667
-rw-r--r--drivers/soc/fsl/qe/tsa.c659
-rw-r--r--drivers/soc/fsl/qe/tsa.h3
-rw-r--r--drivers/soc/fsl/qe/ucc.c1
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c52
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c118
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/apr.c5
-rw-r--r--drivers/soc/qcom/icc-bwmon.c6
-rw-r--r--drivers/soc/qcom/ice.c14
-rw-r--r--drivers/soc/qcom/llcc-qcom.c32
-rw-r--r--drivers/soc/qcom/ocmem.c7
-rw-r--r--drivers/soc/qcom/qcom-pbs.c16
-rw-r--r--drivers/soc/qcom/qcom_aoss.c8
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c15
-rw-r--r--drivers/soc/qcom/smd-rpm.c41
-rw-r--r--drivers/soc/qcom/smp2p.c25
-rw-r--r--drivers/soc/qcom/socinfo.c4
-rw-r--r--drivers/soc/qcom/trace-smp2p.h98
-rw-r--r--drivers/soc/qcom/trace_icc-bwmon.h48
-rw-r--r--drivers/soc/rockchip/grf.c32
-rw-r--r--drivers/soc/rockchip/io-domain.c40
-rw-r--r--drivers/soc/tegra/pmc.c12
-rw-r--r--drivers/soc/ti/k3-ringacc.c12
-rw-r--r--drivers/soc/ti/knav_dma.c22
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c105
-rw-r--r--drivers/soc/ti/pm33xx.c52
-rw-r--r--drivers/soc/ti/pruss.c176
-rw-r--r--drivers/soc/versatile/Kconfig4
-rw-r--r--drivers/soc/versatile/soc-integrator.c1
-rw-r--r--drivers/soc/versatile/soc-realview.c20
-rw-r--r--drivers/soundwire/bus_type.c19
-rw-r--r--drivers/soundwire/cadence_master.c39
-rw-r--r--drivers/soundwire/cadence_master.h5
-rw-r--r--drivers/soundwire/intel.h9
-rw-r--r--drivers/soundwire/intel_ace2x.c20
-rw-r--r--drivers/soundwire/intel_auxdevice.c15
-rw-r--r--drivers/soundwire/intel_bus_common.c27
-rw-r--r--drivers/soundwire/stream.c8
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/atmel-quadspi.c46
-rw-r--r--drivers/spi/spi-airoha-snfi.c43
-rw-r--r--drivers/spi/spi-axi-spi-engine.c17
-rw-r--r--drivers/spi/spi-bcm63xx.c9
-rw-r--r--drivers/spi/spi-bcmbca-hsspi.c25
-rw-r--r--drivers/spi/spi-bitbang.c24
-rw-r--r--drivers/spi/spi-cadence-quadspi.c9
-rw-r--r--drivers/spi/spi-cadence-xspi.c692
-rw-r--r--drivers/spi/spi-davinci.c8
-rw-r--r--drivers/spi/spi-fsl-lpspi.c1
-rw-r--r--drivers/spi/spi-geni-qcom.c76
-rw-r--r--drivers/spi/spi-gpio.c12
-rw-r--r--drivers/spi/spi-meson-spicc.c22
-rw-r--r--drivers/spi/spi-mt65xx.c40
-rw-r--r--drivers/spi/spi-mxs.c11
-rw-r--r--drivers/spi/spi-nxp-fspi.c62
-rw-r--r--drivers/spi/spi-ppc4xx.c22
-rw-r--r--drivers/spi/spi-rpc-if.c7
-rw-r--r--drivers/spi/spi-s3c64xx.c1
-rw-r--r--drivers/spi/spi-slave-mt27xx.c12
-rw-r--r--drivers/spi/spi-slave-system-control.c2
-rw-r--r--drivers/spi/spi-slave-time.c2
-rw-r--r--drivers/spi/spi-wpcm-fiu.c17
-rw-r--r--drivers/spi/spi-zynq-qspi.c2
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c32
-rw-r--r--drivers/spi/spi.c27
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h4
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c2
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h4
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h6
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h6
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c5
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c3
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c3
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h4
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c3
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c12
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c259
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c8
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c44
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_dvs_info.h37
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_frac.h26
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.h1
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c10
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c10
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c40
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c2
-rw-r--r--drivers/staging/media/meson/vdec/vdec_1.c16
-rw-r--r--drivers/staging/media/meson/vdec/vdec_hevc.c43
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.c44
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.h2
-rw-r--r--drivers/staging/media/starfive/camss/stf-camss.c2
-rw-r--r--drivers/staging/media/starfive/camss/stf-capture.c4
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h5
-rw-r--r--drivers/tee/optee/Kconfig1
-rw-r--r--drivers/tee/optee/core.c96
-rw-r--r--drivers/tee/optee/device.c7
-rw-r--r--drivers/tee/optee/ffa_abi.c14
-rw-r--r--drivers/tee/optee/optee_ffa.h2
-rw-r--r--drivers/tee/optee/optee_private.h26
-rw-r--r--drivers/tee/optee/optee_rpc_cmd.h35
-rw-r--r--drivers/tee/optee/optee_smc.h2
-rw-r--r--drivers/tee/optee/rpc.c177
-rw-r--r--drivers/tee/optee/smc_abi.c14
-rw-r--r--drivers/tee/tee_core.c19
-rw-r--r--drivers/thermal/Kconfig11
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c7
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c15
-rw-r--r--drivers/thermal/gov_bang_bang.c14
-rw-r--r--drivers/thermal/hisi_thermal.c25
-rw-r--r--drivers/thermal/imx_sc_thermal.c3
-rw-r--r--drivers/thermal/imx_thermal.c36
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c22
-rw-r--r--drivers/thermal/qoriq_thermal.c10
-rw-r--r--drivers/thermal/renesas/rcar_gen3_thermal.c6
-rw-r--r--drivers/thermal/renesas/rcar_thermal.c2
-rw-r--r--drivers/thermal/sprd_thermal.c14
-rw-r--r--drivers/thermal/st/st_thermal.c32
-rw-r--r--drivers/thermal/st/st_thermal_memmap.c2
-rw-r--r--drivers/thermal/st/stm_thermal.c8
-rw-r--r--drivers/thermal/tegra/soctherm.c36
-rw-r--r--drivers/thermal/tegra/tegra30-tsensor.c57
-rw-r--r--drivers/thermal/testing/Makefile7
-rw-r--r--drivers/thermal/testing/command.c221
-rw-r--r--drivers/thermal/testing/thermal_testing.h11
-rw-r--r--drivers/thermal/testing/zone.c468
-rw-r--r--drivers/thermal/thermal_core.c243
-rw-r--r--drivers/thermal/thermal_core.h38
-rw-r--r--drivers/thermal/thermal_helpers.c32
-rw-r--r--drivers/thermal/thermal_of.c171
-rw-r--r--drivers/thermal/thermal_sysfs.c222
-rw-r--r--drivers/thermal/thermal_trip.c53
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.h4
-rw-r--r--drivers/tty/serial/8250/8250_core.c6
-rw-r--r--drivers/tty/serial/amba-pl011.c2
-rw-r--r--drivers/tty/serial/serial_core.c16
-rw-r--r--drivers/tty/sysrq.c1
-rw-r--r--drivers/tty/tty_io.c8
-rw-r--r--drivers/ufs/core/ufs-fault-injection.c1
-rw-r--r--drivers/ufs/core/ufs-sysfs.c91
-rw-r--r--drivers/ufs/core/ufs_trace.h (renamed from include/trace/events/ufs.h)6
-rw-r--r--drivers/ufs/core/ufshcd.c85
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c14
-rw-r--r--drivers/usb/typec/anx7411.c11
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c3
-rw-r--r--drivers/usb/typec/rt1719.c11
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c11
-rw-r--r--drivers/usb/typec/tipd/core.c9
-rw-r--r--drivers/usb/typec/ucsi/psy.c11
-rw-r--r--drivers/vdpa/Kconfig10
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h3
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h47
-rw-r--r--drivers/vdpa/mlx5/core/mr.c291
-rw-r--r--drivers/vdpa/mlx5/core/resources.c76
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c465
-rw-r--r--drivers/vdpa/pds/cmds.h1
-rw-r--r--drivers/vdpa/vdpa.c79
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c21
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c19
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h1
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c4
-rw-r--r--drivers/vfio/group.c6
-rw-r--r--drivers/vfio/mdev/mdev_private.h3
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c67
-rw-r--r--drivers/vfio/vfio_iommu_type1.c16
-rw-r--r--drivers/vfio/virqfd.c6
-rw-r--r--drivers/vhost/vdpa.c16
-rw-r--r--drivers/vhost/vsock.c4
-rw-r--r--drivers/video/backlight/l4f00242t03.c5
-rw-r--r--drivers/video/fbdev/core/fbcon.c16
-rw-r--r--drivers/video/fbdev/core/fbmem.c30
-rw-r--r--drivers/video/fbdev/efifb.c27
-rw-r--r--drivers/video/fbdev/hpfb.c1
-rw-r--r--drivers/video/fbdev/hyperv_fb.c2
-rw-r--r--drivers/video/fbdev/imsttfb.c4
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c6
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c36
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c15
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi.h2
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c4
-rw-r--r--drivers/video/fbdev/pxafb.c1
-rw-r--r--drivers/video/fbdev/xen-fbfront.c1
-rw-r--r--drivers/virt/acrn/irqfd.c6
-rw-r--r--drivers/virt/acrn/mm.c16
-rw-r--r--drivers/virt/coco/Kconfig2
-rw-r--r--drivers/virt/coco/Makefile1
-rw-r--r--drivers/virt/coco/pkvm-guest/Kconfig10
-rw-r--r--drivers/virt/coco/pkvm-guest/Makefile2
-rw-r--r--drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c127
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c132
-rw-r--r--drivers/virtio/virtio.c59
-rw-r--r--drivers/virtio/virtio_balloon.c18
-rw-r--r--drivers/watchdog/Kconfig9
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/iTCO_wdt.c4
-rw-r--r--drivers/watchdog/imx2_wdt.c10
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c21
-rw-r--r--drivers/watchdog/imx_sc_wdt.c46
-rw-r--r--drivers/watchdog/intel-mid_wdt.c5
-rw-r--r--drivers/watchdog/marvell_gti_wdt.c4
-rw-r--r--drivers/watchdog/pm8916_wdt.c2
-rw-r--r--drivers/watchdog/rzv2h_wdt.c273
-rw-r--r--drivers/xen/Kconfig4
-rw-r--r--drivers/xen/pci.c14
-rw-r--r--drivers/xen/privcmd.c10
-rw-r--r--drivers/xen/swiotlb-xen.c10
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c6
-rw-r--r--fs/9p/vfs_addr.c11
-rw-r--r--fs/Kconfig27
-rw-r--r--fs/Makefile1
-rw-r--r--fs/adfs/inode.c5
-rw-r--r--fs/affs/affs.h2
-rw-r--r--fs/affs/amigaffs.h3
-rw-r--r--fs/affs/dir.c44
-rw-r--r--fs/affs/file.c22
-rw-r--r--fs/afs/file.c30
-rw-r--r--fs/afs/fsclient.c9
-rw-r--r--fs/afs/write.c4
-rw-r--r--fs/afs/yfsclient.c9
-rw-r--r--fs/aio.c2
-rw-r--r--fs/autofs/autofs_i.h4
-rw-r--r--fs/autofs/dev-ioctl.c97
-rw-r--r--fs/autofs/expire.c7
-rw-r--r--fs/autofs/inode.c5
-rw-r--r--fs/bcachefs/Kconfig7
-rw-r--r--fs/bcachefs/Makefile1
-rw-r--r--fs/bcachefs/acl.c2
-rw-r--r--fs/bcachefs/alloc_background.c69
-rw-r--r--fs/bcachefs/alloc_background.h3
-rw-r--r--fs/bcachefs/alloc_foreground.c59
-rw-r--r--fs/bcachefs/alloc_foreground.h5
-rw-r--r--fs/bcachefs/backpointers.c106
-rw-r--r--fs/bcachefs/backpointers.h23
-rw-r--r--fs/bcachefs/bcachefs.h14
-rw-r--r--fs/bcachefs/bcachefs_format.h2
-rw-r--r--fs/bcachefs/bset.c182
-rw-r--r--fs/bcachefs/bset.h4
-rw-r--r--fs/bcachefs/btree_cache.c273
-rw-r--r--fs/bcachefs/btree_cache.h3
-rw-r--r--fs/bcachefs/btree_gc.c21
-rw-r--r--fs/bcachefs/btree_io.c8
-rw-r--r--fs/bcachefs/btree_io.h4
-rw-r--r--fs/bcachefs/btree_iter.c63
-rw-r--r--fs/bcachefs/btree_iter.h52
-rw-r--r--fs/bcachefs/btree_journal_iter.c2
-rw-r--r--fs/bcachefs/btree_key_cache.c405
-rw-r--r--fs/bcachefs/btree_key_cache_types.h18
-rw-r--r--fs/bcachefs/btree_locking.h13
-rw-r--r--fs/bcachefs/btree_trans_commit.c2
-rw-r--r--fs/bcachefs/btree_types.h60
-rw-r--r--fs/bcachefs/btree_update.c12
-rw-r--r--fs/bcachefs/btree_update_interior.c37
-rw-r--r--fs/bcachefs/btree_update_interior.h2
-rw-r--r--fs/bcachefs/buckets.c50
-rw-r--r--fs/bcachefs/buckets.h15
-rw-r--r--fs/bcachefs/buckets_types.h8
-rw-r--r--fs/bcachefs/checksum.c101
-rw-r--r--fs/bcachefs/clock.h9
-rw-r--r--fs/bcachefs/darray.c4
-rw-r--r--fs/bcachefs/darray.h26
-rw-r--r--fs/bcachefs/data_update.c2
-rw-r--r--fs/bcachefs/dirent.c66
-rw-r--r--fs/bcachefs/ec.c303
-rw-r--r--fs/bcachefs/ec.h15
-rw-r--r--fs/bcachefs/ec_format.h9
-rw-r--r--fs/bcachefs/ec_types.h1
-rw-r--r--fs/bcachefs/errcode.h14
-rw-r--r--fs/bcachefs/extents.c59
-rw-r--r--fs/bcachefs/extents.h47
-rw-r--r--fs/bcachefs/fs-common.c5
-rw-r--r--fs/bcachefs/fs-io-buffered.c49
-rw-r--r--fs/bcachefs/fs-io-buffered.h6
-rw-r--r--fs/bcachefs/fs-io-direct.c2
-rw-r--r--fs/bcachefs/fs-io-pagecache.c90
-rw-r--r--fs/bcachefs/fs-io-pagecache.h4
-rw-r--r--fs/bcachefs/fs-io.c178
-rw-r--r--fs/bcachefs/fs-ioctl.c4
-rw-r--r--fs/bcachefs/fs.c435
-rw-r--r--fs/bcachefs/fs.h25
-rw-r--r--fs/bcachefs/fsck.c18
-rw-r--r--fs/bcachefs/inode.c2
-rw-r--r--fs/bcachefs/io_read.c18
-rw-r--r--fs/bcachefs/io_write.c7
-rw-r--r--fs/bcachefs/journal_io.c6
-rw-r--r--fs/bcachefs/journal_reclaim.c7
-rw-r--r--fs/bcachefs/opts.c85
-rw-r--r--fs/bcachefs/opts.h61
-rw-r--r--fs/bcachefs/rcu_pending.c650
-rw-r--r--fs/bcachefs/rcu_pending.h27
-rw-r--r--fs/bcachefs/rebalance.c3
-rw-r--r--fs/bcachefs/recovery.c22
-rw-r--r--fs/bcachefs/recovery_passes.c10
-rw-r--r--fs/bcachefs/replicas.c12
-rw-r--r--fs/bcachefs/replicas_format.h9
-rw-r--r--fs/bcachefs/sb-clean.c2
-rw-r--r--fs/bcachefs/sb-members.c57
-rw-r--r--fs/bcachefs/sb-members.h22
-rw-r--r--fs/bcachefs/six.c2
-rw-r--r--fs/bcachefs/str_hash.h2
-rw-r--r--fs/bcachefs/subvolume.h45
-rw-r--r--fs/bcachefs/subvolume_types.h3
-rw-r--r--fs/bcachefs/super-io.c12
-rw-r--r--fs/bcachefs/super.c85
-rw-r--r--fs/bcachefs/sysfs.c57
-rw-r--r--fs/bcachefs/thread_with_file.c2
-rw-r--r--fs/bcachefs/time_stats.c14
-rw-r--r--fs/bcachefs/time_stats.h3
-rw-r--r--fs/bcachefs/trace.h465
-rw-r--r--fs/bcachefs/util.c16
-rw-r--r--fs/bcachefs/util.h2
-rw-r--r--fs/bcachefs/xattr.c81
-rw-r--r--fs/bcachefs/xattr_format.h2
-rw-r--r--fs/bfs/file.c4
-rw-r--r--fs/binfmt_elf.c57
-rw-r--r--fs/bpf_fs_kfuncs.c185
-rw-r--r--fs/btrfs/backref.c6
-rw-r--r--fs/btrfs/bio.c84
-rw-r--r--fs/btrfs/bio.h6
-rw-r--r--fs/btrfs/block-group.c34
-rw-r--r--fs/btrfs/block-group.h11
-rw-r--r--fs/btrfs/block-rsv.c2
-rw-r--r--fs/btrfs/block-rsv.h2
-rw-r--r--fs/btrfs/btrfs_inode.h25
-rw-r--r--fs/btrfs/compression.c82
-rw-r--r--fs/btrfs/compression.h16
-rw-r--r--fs/btrfs/ctree.c18
-rw-r--r--fs/btrfs/ctree.h13
-rw-r--r--fs/btrfs/defrag.c97
-rw-r--r--fs/btrfs/defrag.h3
-rw-r--r--fs/btrfs/delayed-ref.c36
-rw-r--r--fs/btrfs/delayed-ref.h4
-rw-r--r--fs/btrfs/dev-replace.c43
-rw-r--r--fs/btrfs/direct-io.c73
-rw-r--r--fs/btrfs/discard.c4
-rw-r--r--fs/btrfs/disk-io.c16
-rw-r--r--fs/btrfs/extent-io-tree.c55
-rw-r--r--fs/btrfs/extent-io-tree.h38
-rw-r--r--fs/btrfs/extent-tree.c4
-rw-r--r--fs/btrfs/extent_io.c861
-rw-r--r--fs/btrfs/extent_io.h12
-rw-r--r--fs/btrfs/extent_map.c9
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/file-item.h2
-rw-r--r--fs/btrfs/file.c60
-rw-r--r--fs/btrfs/fs.h2
-rw-r--r--fs/btrfs/inode-item.c10
-rw-r--r--fs/btrfs/inode-item.h4
-rw-r--r--fs/btrfs/inode.c372
-rw-r--r--fs/btrfs/ioctl.c15
-rw-r--r--fs/btrfs/lzo.c12
-rw-r--r--fs/btrfs/ordered-data.c30
-rw-r--r--fs/btrfs/ordered-data.h6
-rw-r--r--fs/btrfs/orphan.c24
-rw-r--r--fs/btrfs/qgroup.c66
-rw-r--r--fs/btrfs/qgroup.h1
-rw-r--r--fs/btrfs/raid-stripe-tree.c46
-rw-r--r--fs/btrfs/reflink.c35
-rw-r--r--fs/btrfs/relocation.c22
-rw-r--r--fs/btrfs/scrub.c12
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/btrfs/space-info.c25
-rw-r--r--fs/btrfs/space-info.h10
-rw-r--r--fs/btrfs/subpage.c277
-rw-r--r--fs/btrfs/subpage.h60
-rw-r--r--fs/btrfs/tests/extent-io-tests.c10
-rw-r--r--fs/btrfs/transaction.c5
-rw-r--r--fs/btrfs/tree-checker.c2
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/tree-mod-log.c14
-rw-r--r--fs/btrfs/tree-mod-log.h6
-rw-r--r--fs/btrfs/uuid-tree.c179
-rw-r--r--fs/btrfs/uuid-tree.h2
-rw-r--r--fs/btrfs/verity.c20
-rw-r--r--fs/btrfs/volumes.c228
-rw-r--r--fs/btrfs/volumes.h4
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/btrfs/zlib.c33
-rw-r--r--fs/btrfs/zoned.c36
-rw-r--r--fs/btrfs/zoned.h4
-rw-r--r--fs/btrfs/zstd.c35
-rw-r--r--fs/buffer.c71
-rw-r--r--fs/cachefiles/io.c19
-rw-r--r--fs/cachefiles/xattr.c34
-rw-r--r--fs/ceph/addr.c89
-rw-r--r--fs/ceph/dir.c1
-rw-r--r--fs/coda/inode.c43
-rw-r--r--fs/coredump.c166
-rw-r--r--fs/dcache.c10
-rw-r--r--fs/debugfs/inode.c8
-rw-r--r--fs/direct-io.c6
-rw-r--r--fs/dlm/config.c2
-rw-r--r--fs/dlm/dlm_internal.h5
-rw-r--r--fs/dlm/lock.c128
-rw-r--r--fs/dlm/lock.h2
-rw-r--r--fs/dlm/lockspace.c97
-rw-r--r--fs/dlm/lowcomms.c36
-rw-r--r--fs/dlm/main.c12
-rw-r--r--fs/dlm/member.c2
-rw-r--r--fs/dlm/memory.c28
-rw-r--r--fs/dlm/memory.h4
-rw-r--r--fs/dlm/recover.c9
-rw-r--r--fs/ecryptfs/mmap.c86
-rw-r--r--fs/erofs/Kconfig22
-rw-r--r--fs/erofs/Makefile1
-rw-r--r--fs/erofs/data.c109
-rw-r--r--fs/erofs/decompressor.c2
-rw-r--r--fs/erofs/erofs_fs.h5
-rw-r--r--fs/erofs/fileio.c192
-rw-r--r--fs/erofs/inode.c138
-rw-r--r--fs/erofs/internal.h26
-rw-r--r--fs/erofs/super.c80
-rw-r--r--fs/erofs/sysfs.c30
-rw-r--r--fs/erofs/zdata.c196
-rw-r--r--fs/erofs/zmap.c42
-rw-r--r--fs/eventfd.c4
-rw-r--r--fs/eventpoll.c37
-rw-r--r--fs/exec.c115
-rw-r--r--fs/exfat/balloc.c10
-rw-r--r--fs/exfat/exfat_fs.h24
-rw-r--r--fs/exfat/file.c112
-rw-r--r--fs/exfat/inode.c103
-rw-r--r--fs/exfat/namei.c17
-rw-r--r--fs/exfat/nls.c5
-rw-r--r--fs/exfat/super.c41
-rw-r--r--fs/ext2/dir.c32
-rw-r--r--fs/ext2/inode.c8
-rw-r--r--fs/ext4/bitmap.c8
-rw-r--r--fs/ext4/dir.c64
-rw-r--r--fs/ext4/ext4.h37
-rw-r--r--fs/ext4/extents.c823
-rw-r--r--fs/ext4/extents_status.c240
-rw-r--r--fs/ext4/extents_status.h28
-rw-r--r--fs/ext4/fast_commit.c47
-rw-r--r--fs/ext4/file.c20
-rw-r--r--fs/ext4/ialloc.c35
-rw-r--r--fs/ext4/indirect.c7
-rw-r--r--fs/ext4/inline.c61
-rw-r--r--fs/ext4/inode.c321
-rw-r--r--fs/ext4/ioctl.c6
-rw-r--r--fs/ext4/mballoc.c25
-rw-r--r--fs/ext4/migrate.c7
-rw-r--r--fs/ext4/move_extent.c90
-rw-r--r--fs/ext4/namei.c16
-rw-r--r--fs/ext4/readpage.c16
-rw-r--r--fs/ext4/resize.c3
-rw-r--r--fs/ext4/super.c65
-rw-r--r--fs/ext4/verity.c8
-rw-r--r--fs/ext4/xattr.c31
-rw-r--r--fs/ext4/xattr.h7
-rw-r--r--fs/f2fs/checkpoint.c17
-rw-r--r--fs/f2fs/compress.c63
-rw-r--r--fs/f2fs/data.c243
-rw-r--r--fs/f2fs/debug.c2
-rw-r--r--fs/f2fs/dir.c8
-rw-r--r--fs/f2fs/extent_cache.c4
-rw-r--r--fs/f2fs/f2fs.h148
-rw-r--r--fs/f2fs/file.c205
-rw-r--r--fs/f2fs/gc.c113
-rw-r--r--fs/f2fs/gc.h29
-rw-r--r--fs/f2fs/inline.c31
-rw-r--r--fs/f2fs/inode.c9
-rw-r--r--fs/f2fs/namei.c68
-rw-r--r--fs/f2fs/node.c46
-rw-r--r--fs/f2fs/segment.c72
-rw-r--r--fs/f2fs/segment.h5
-rw-r--r--fs/f2fs/super.c127
-rw-r--r--fs/f2fs/sysfs.c82
-rw-r--r--fs/f2fs/verity.c13
-rw-r--r--fs/f2fs/xattr.c14
-rw-r--r--fs/fat/inode.c9
-rw-r--r--fs/fcntl.c224
-rw-r--r--fs/fhandle.c33
-rw-r--r--fs/file.c28
-rw-r--r--fs/file_table.c26
-rw-r--r--fs/fs-writeback.c67
-rw-r--r--fs/fsopen.c6
-rw-r--r--fs/fuse/Makefile3
-rw-r--r--fs/fuse/acl.c10
-rw-r--r--fs/fuse/dev.c220
-rw-r--r--fs/fuse/dir.c152
-rw-r--r--fs/fuse/file.c231
-rw-r--r--fs/fuse/fuse_i.h42
-rw-r--r--fs/fuse/fuse_trace.h132
-rw-r--r--fs/fuse/inode.c13
-rw-r--r--fs/fuse/passthrough.c7
-rw-r--r--fs/fuse/virtio_fs.c206
-rw-r--r--fs/gfs2/aops.c30
-rw-r--r--fs/gfs2/file.c2
-rw-r--r--fs/gfs2/glock.c9
-rw-r--r--fs/gfs2/log.c12
-rw-r--r--fs/gfs2/meta_io.c24
-rw-r--r--fs/gfs2/ops_fstype.c3
-rw-r--r--fs/hfs/extent.c6
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfs/inode.c5
-rw-r--r--fs/hfsplus/extents.c6
-rw-r--r--fs/hfsplus/hfsplus_fs.h2
-rw-r--r--fs/hfsplus/inode.c5
-rw-r--r--fs/hostfs/hostfs_kern.c23
-rw-r--r--fs/hpfs/file.c9
-rw-r--r--fs/hugetlbfs/inode.c4
-rw-r--r--fs/inode.c124
-rw-r--r--fs/internal.h1
-rw-r--r--fs/ioctl.c30
-rw-r--r--fs/iomap/buffered-io.c201
-rw-r--r--fs/iomap/direct-io.c42
-rw-r--r--fs/isofs/rock.h2
-rw-r--r--fs/jbd2/checkpoint.c21
-rw-r--r--fs/jbd2/journal.c97
-rw-r--r--fs/jffs2/file.c88
-rw-r--r--fs/jffs2/gc.c25
-rw-r--r--fs/jfs/inode.c8
-rw-r--r--fs/jfs/jfs_discard.c11
-rw-r--r--fs/jfs/jfs_dmap.c11
-rw-r--r--fs/jfs/jfs_imap.c2
-rw-r--r--fs/jfs/xattr.c2
-rw-r--r--fs/kernel_read_file.c4
-rw-r--r--fs/libfs.c41
-rw-r--r--fs/lockd/host.c2
-rw-r--r--fs/lockd/svc.c9
-rw-r--r--fs/locks.c20
-rw-r--r--fs/minix/dir.c134
-rw-r--r--fs/minix/inode.c8
-rw-r--r--fs/minix/minix.h40
-rw-r--r--fs/minix/namei.c32
-rw-r--r--fs/mnt_idmapping.c34
-rw-r--r--fs/mount.h14
-rw-r--r--fs/namei.c95
-rw-r--r--fs/namespace.c108
-rw-r--r--fs/netfs/Makefile4
-rw-r--r--fs/netfs/buffered_read.c766
-rw-r--r--fs/netfs/buffered_write.c310
-rw-r--r--fs/netfs/direct_read.c147
-rw-r--r--fs/netfs/internal.h43
-rw-r--r--fs/netfs/io.c804
-rw-r--r--fs/netfs/iterator.c50
-rw-r--r--fs/netfs/locking.c22
-rw-r--r--fs/netfs/main.c11
-rw-r--r--fs/netfs/misc.c94
-rw-r--r--fs/netfs/objects.c16
-rw-r--r--fs/netfs/read_collect.c544
-rw-r--r--fs/netfs/read_pgpriv2.c264
-rw-r--r--fs/netfs/read_retry.c256
-rw-r--r--fs/netfs/stats.c27
-rw-r--r--fs/netfs/write_collect.c246
-rw-r--r--fs/netfs/write_issue.c93
-rw-r--r--fs/nfs/Kconfig1
-rw-r--r--fs/nfs/Makefile1
-rw-r--r--fs/nfs/callback.c2
-rw-r--r--fs/nfs/client.c21
-rw-r--r--fs/nfs/dir.c6
-rw-r--r--fs/nfs/file.c7
-rw-r--r--fs/nfs/filelayout/filelayout.c6
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c56
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c6
-rw-r--r--fs/nfs/fs_context.c8
-rw-r--r--fs/nfs/fscache.c19
-rw-r--r--fs/nfs/fscache.h7
-rw-r--r--fs/nfs/getroot.c2
-rw-r--r--fs/nfs/inode.c53
-rw-r--r--fs/nfs/internal.h54
-rw-r--r--fs/nfs/localio.c757
-rw-r--r--fs/nfs/nfs2xdr.c70
-rw-r--r--fs/nfs/nfs3xdr.c108
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c16
-rw-r--r--fs/nfs/nfs4state.c22
-rw-r--r--fs/nfs/nfs4xdr.c101
-rw-r--r--fs/nfs/nfstrace.h61
-rw-r--r--fs/nfs/pagelist.c16
-rw-r--r--fs/nfs/pnfs_nfs.c2
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/super.c3
-rw-r--r--fs/nfs/write.c21
-rw-r--r--fs/nfs_common/Makefile5
-rw-r--r--fs/nfs_common/common.c134
-rw-r--r--fs/nfs_common/nfslocalio.c172
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/Makefile1
-rw-r--r--fs/nfsd/auth.c14
-rw-r--r--fs/nfsd/auth.h2
-rw-r--r--fs/nfsd/blocklayout.c6
-rw-r--r--fs/nfsd/blocklayoutxdr.h2
-rw-r--r--fs/nfsd/cache.h2
-rw-r--r--fs/nfsd/export.c67
-rw-r--r--fs/nfsd/export.h7
-rw-r--r--fs/nfsd/filecache.c137
-rw-r--r--fs/nfsd/filecache.h6
-rw-r--r--fs/nfsd/localio.c169
-rw-r--r--fs/nfsd/netns.h19
-rw-r--r--fs/nfsd/nfs3proc.c44
-rw-r--r--fs/nfsd/nfs4callback.c8
-rw-r--r--fs/nfsd/nfs4idmap.c13
-rw-r--r--fs/nfsd/nfs4layouts.c1
-rw-r--r--fs/nfsd/nfs4proc.c69
-rw-r--r--fs/nfsd/nfs4recover.c13
-rw-r--r--fs/nfsd/nfs4state.c219
-rw-r--r--fs/nfsd/nfs4xdr.c29
-rw-r--r--fs/nfsd/nfsctl.c46
-rw-r--r--fs/nfsd/nfsd.h50
-rw-r--r--fs/nfsd/nfsfh.c185
-rw-r--r--fs/nfsd/nfsfh.h4
-rw-r--r--fs/nfsd/nfsproc.c49
-rw-r--r--fs/nfsd/nfssvc.c231
-rw-r--r--fs/nfsd/state.h1
-rw-r--r--fs/nfsd/trace.h145
-rw-r--r--fs/nfsd/vfs.c45
-rw-r--r--fs/nfsd/vfs.h6
-rw-r--r--fs/nfsd/xdr4.h1
-rw-r--r--fs/nilfs2/alloc.h2
-rw-r--r--fs/nilfs2/bmap.c2
-rw-r--r--fs/nilfs2/bmap.h20
-rw-r--r--fs/nilfs2/btnode.c63
-rw-r--r--fs/nilfs2/btree.c12
-rw-r--r--fs/nilfs2/btree.h1
-rw-r--r--fs/nilfs2/cpfile.c54
-rw-r--r--fs/nilfs2/dat.c17
-rw-r--r--fs/nilfs2/dir.c48
-rw-r--r--fs/nilfs2/inode.c89
-rw-r--r--fs/nilfs2/ioctl.c109
-rw-r--r--fs/nilfs2/mdt.c6
-rw-r--r--fs/nilfs2/nilfs.h27
-rw-r--r--fs/nilfs2/page.c21
-rw-r--r--fs/nilfs2/page.h4
-rw-r--r--fs/nilfs2/recovery.c27
-rw-r--r--fs/nilfs2/segment.c234
-rw-r--r--fs/nilfs2/segment.h10
-rw-r--r--fs/nilfs2/sufile.c52
-rw-r--r--fs/nilfs2/super.c9
-rw-r--r--fs/nilfs2/the_nilfs.c5
-rw-r--r--fs/nilfs2/the_nilfs.h6
-rw-r--r--fs/notify/dnotify/dnotify.c6
-rw-r--r--fs/notify/fanotify/fanotify_user.c12
-rw-r--r--fs/notify/inotify/inotify_user.c12
-rw-r--r--fs/nsfs.c102
-rw-r--r--fs/ntfs3/file.c9
-rw-r--r--fs/ntfs3/inode.c51
-rw-r--r--fs/ntfs3/ntfs_fs.h5
-rw-r--r--fs/ocfs2/aops.c14
-rw-r--r--fs/ocfs2/aops.h2
-rw-r--r--fs/ocfs2/buffer_head_io.c4
-rw-r--r--fs/ocfs2/cluster/heartbeat.c6
-rw-r--r--fs/ocfs2/dir.c15
-rw-r--r--fs/ocfs2/dlmglue.c7
-rw-r--r--fs/ocfs2/file.c28
-rw-r--r--fs/ocfs2/file.h1
-rw-r--r--fs/ocfs2/journal.c7
-rw-r--r--fs/ocfs2/localalloc.c19
-rw-r--r--fs/ocfs2/mmap.c6
-rw-r--r--fs/ocfs2/quota_global.c15
-rw-r--r--fs/ocfs2/quota_local.c8
-rw-r--r--fs/ocfs2/refcounttree.c13
-rw-r--r--fs/ocfs2/super.c10
-rw-r--r--fs/ocfs2/xattr.c15
-rw-r--r--fs/omfs/file.c4
-rw-r--r--fs/open.c75
-rw-r--r--fs/orangefs/inode.c39
-rw-r--r--fs/orangefs/orangefs-sysfs.c14
-rw-r--r--fs/overlayfs/copy_up.c49
-rw-r--r--fs/overlayfs/file.c68
-rw-r--r--fs/overlayfs/params.c38
-rw-r--r--fs/overlayfs/super.c10
-rw-r--r--fs/pipe.c10
-rw-r--r--fs/posix_acl.c4
-rw-r--r--fs/proc/base.c118
-rw-r--r--fs/proc/consoles.c7
-rw-r--r--fs/proc/fd.c18
-rw-r--r--fs/proc/generic.c4
-rw-r--r--fs/proc/inode.c31
-rw-r--r--fs/proc/internal.h16
-rw-r--r--fs/proc/kcore.c2
-rw-r--r--fs/proc/page.c11
-rw-r--r--fs/proc/proc_sysctl.c11
-rw-r--r--fs/proc/task_mmu.c21
-rw-r--r--fs/pstore/platform.c18
-rw-r--r--fs/qnx6/dir.c88
-rw-r--r--fs/qnx6/inode.c25
-rw-r--r--fs/qnx6/namei.c4
-rw-r--r--fs/qnx6/qnx6.h9
-rw-r--r--fs/quota/dquot.c14
-rw-r--r--fs/quota/quota.c8
-rw-r--r--fs/quota/quota_v1.c3
-rw-r--r--fs/quota/quota_v2.c9
-rw-r--r--fs/read_write.c291
-rw-r--r--fs/readdir.c20
-rw-r--r--fs/reiserfs/inode.c57
-rw-r--r--fs/remap_range.c2
-rw-r--r--fs/select.c25
-rw-r--r--fs/signalfd.c10
-rw-r--r--fs/smb/client/Kconfig14
-rw-r--r--fs/smb/client/Makefile2
-rw-r--r--fs/smb/client/cifs_debug.c7
-rw-r--r--fs/smb/client/cifsacl.c226
-rw-r--r--fs/smb/client/cifsacl.h99
-rw-r--r--fs/smb/client/cifsencrypt.c144
-rw-r--r--fs/smb/client/cifsfs.h5
-rw-r--r--fs/smb/client/cifsglob.h32
-rw-r--r--fs/smb/client/cifspdu.h6
-rw-r--r--fs/smb/client/cifsproto.h40
-rw-r--r--fs/smb/client/cifssmb.c25
-rw-r--r--fs/smb/client/compress.c390
-rw-r--r--fs/smb/client/compress.h90
-rw-r--r--fs/smb/client/compress/lz77.c235
-rw-r--r--fs/smb/client/compress/lz77.h15
-rw-r--r--fs/smb/client/connect.c71
-rw-r--r--fs/smb/client/dfs.c73
-rw-r--r--fs/smb/client/dfs.h42
-rw-r--r--fs/smb/client/dfs_cache.c218
-rw-r--r--fs/smb/client/file.c103
-rw-r--r--fs/smb/client/fs_context.c20
-rw-r--r--fs/smb/client/fs_context.h1
-rw-r--r--fs/smb/client/inode.c61
-rw-r--r--fs/smb/client/ioctl.c8
-rw-r--r--fs/smb/client/link.c3
-rw-r--r--fs/smb/client/misc.c15
-rw-r--r--fs/smb/client/namespace.c2
-rw-r--r--fs/smb/client/reparse.c10
-rw-r--r--fs/smb/client/reparse.h9
-rw-r--r--fs/smb/client/smb1ops.c2
-rw-r--r--fs/smb/client/smb2file.c6
-rw-r--r--fs/smb/client/smb2inode.c6
-rw-r--r--fs/smb/client/smb2maperror.c2
-rw-r--r--fs/smb/client/smb2misc.c2
-rw-r--r--fs/smb/client/smb2ops.c320
-rw-r--r--fs/smb/client/smb2pdu.c66
-rw-r--r--fs/smb/client/smb2pdu.h8
-rw-r--r--fs/smb/client/smb2proto.h2
-rw-r--r--fs/smb/client/smb2transport.c2
-rw-r--r--fs/smb/client/smbdirect.c88
-rw-r--r--fs/smb/client/trace.h6
-rw-r--r--fs/smb/client/transport.c7
-rw-r--r--fs/smb/client/xattr.c4
-rw-r--r--fs/smb/common/smb2status.h (renamed from fs/smb/client/smb2status.h)6
-rw-r--r--fs/smb/common/smbacl.h121
-rw-r--r--fs/smb/server/connection.c4
-rw-r--r--fs/smb/server/connection.h1
-rw-r--r--fs/smb/server/oplock.c57
-rw-r--r--fs/smb/server/server.c2
-rw-r--r--fs/smb/server/smb2misc.c2
-rw-r--r--fs/smb/server/smb2pdu.c2
-rw-r--r--fs/smb/server/smb_common.c2
-rw-r--r--fs/smb/server/smbacl.h111
-rw-r--r--fs/smb/server/smbstatus.h1822
-rw-r--r--fs/smb/server/transport_rdma.c2
-rw-r--r--fs/smb/server/vfs.c19
-rw-r--r--fs/smb/server/vfs_cache.c3
-rw-r--r--fs/splice.c22
-rw-r--r--fs/squashfs/decompressor_multi_percpu.c6
-rw-r--r--fs/squashfs/file.c86
-rw-r--r--fs/squashfs/file_direct.c19
-rw-r--r--fs/squashfs/page_actor.c11
-rw-r--r--fs/squashfs/page_actor.h6
-rw-r--r--fs/stat.c8
-rw-r--r--fs/statfs.c4
-rw-r--r--fs/super.c4
-rw-r--r--fs/sync.c14
-rw-r--r--fs/sysv/dir.c158
-rw-r--r--fs/sysv/itree.c8
-rw-r--r--fs/sysv/namei.c32
-rw-r--r--fs/sysv/sysv.h20
-rw-r--r--fs/timerfd.c8
-rw-r--r--fs/ubifs/dir.c64
-rw-r--r--fs/ubifs/file.c13
-rw-r--r--fs/udf/dir.c28
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/udf/inode.c12
-rw-r--r--fs/ufs/dir.c259
-rw-r--r--fs/ufs/inode.c12
-rw-r--r--fs/ufs/namei.c39
-rw-r--r--fs/ufs/ufs.h20
-rw-r--r--fs/ufs/util.h6
-rw-r--r--fs/userfaultfd.c171
-rw-r--r--fs/utimes.c4
-rw-r--r--fs/vboxsf/file.c24
-rw-r--r--fs/verity/signature.c18
-rw-r--r--fs/xattr.c36
-rw-r--r--fs/xfs/libxfs/xfs_ag.c94
-rw-r--r--fs/xfs/libxfs/xfs_ag.h14
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c6
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c23
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c103
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c24
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.h207
-rw-r--r--fs/xfs/libxfs/xfs_defer.c1
-rw-r--r--fs/xfs/libxfs/xfs_fs.h31
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c14
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h4
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c6
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c40
-rw-r--r--fs/xfs/libxfs/xfs_inode_util.c2
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c5
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c7
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c270
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.h61
-rw-r--r--fs/xfs/libxfs/xfs_sb.c92
-rw-r--r--fs/xfs/libxfs/xfs_sb.h3
-rw-r--r--fs/xfs/libxfs/xfs_shared.h3
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c4
-rw-r--r--fs/xfs/libxfs/xfs_types.h12
-rw-r--r--fs/xfs/scrub/bmap_repair.c2
-rw-r--r--fs/xfs/scrub/common.h29
-rw-r--r--fs/xfs/scrub/inode_repair.c12
-rw-r--r--fs/xfs/scrub/rtsummary.c11
-rw-r--r--fs/xfs/scrub/rtsummary.h2
-rw-r--r--fs/xfs/scrub/rtsummary_repair.c12
-rw-r--r--fs/xfs/scrub/scrub.h29
-rw-r--r--fs/xfs/scrub/tempfile.c2
-rw-r--r--fs/xfs/scrub/xfile.c6
-rw-r--r--fs/xfs/xfs_bmap_item.c17
-rw-r--r--fs/xfs/xfs_bmap_util.c49
-rw-r--r--fs/xfs/xfs_buf.h2
-rw-r--r--fs/xfs/xfs_buf_mem.c2
-rw-r--r--fs/xfs/xfs_discard.c17
-rw-r--r--fs/xfs/xfs_exchrange.c147
-rw-r--r--fs/xfs/xfs_exchrange.h16
-rw-r--r--fs/xfs/xfs_file.c427
-rw-r--r--fs/xfs/xfs_fsmap.c403
-rw-r--r--fs/xfs/xfs_fsmap.h6
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_handle.c6
-rw-r--r--fs/xfs/xfs_icache.c95
-rw-r--r--fs/xfs/xfs_inode.c86
-rw-r--r--fs/xfs/xfs_inode.h12
-rw-r--r--fs/xfs/xfs_ioctl.c162
-rw-r--r--fs/xfs/xfs_iomap.c19
-rw-r--r--fs/xfs/xfs_iops.c12
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_mount.c10
-rw-r--r--fs/xfs/xfs_mount.h5
-rw-r--r--fs/xfs/xfs_mru_cache.c3
-rw-r--r--fs/xfs/xfs_qm.c48
-rw-r--r--fs/xfs/xfs_qm.h3
-rw-r--r--fs/xfs/xfs_qm_syscalls.c13
-rw-r--r--fs/xfs/xfs_quotaops.c55
-rw-r--r--fs/xfs/xfs_rtalloc.c864
-rw-r--r--fs/xfs/xfs_super.c41
-rw-r--r--fs/xfs/xfs_symlink.c2
-rw-r--r--fs/xfs/xfs_trace.h61
-rw-r--r--fs/zonefs/file.c2
-rw-r--r--include/acpi/acconfig.h1
-rw-r--r--include/acpi/acoutput.h5
-rw-r--r--include/acpi/acpi_bus.h9
-rw-r--r--include/acpi/acpixf.h10
-rw-r--r--include/acpi/actbl1.h10
-rw-r--r--include/acpi/actbl2.h2
-rw-r--r--include/acpi/actbl3.h12
-rw-r--r--include/acpi/actypes.h1
-rw-r--r--include/acpi/cppc_acpi.h43
-rw-r--r--include/acpi/platform/acenv.h6
-rw-r--r--include/asm-generic/mm_hooks.h11
-rw-r--r--include/asm-generic/mmzone.h5
-rw-r--r--include/asm-generic/numa.h8
-rw-r--r--include/asm-generic/unaligned.h11
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/crypto/internal/simd.h12
-rw-r--r--include/drm/display/drm_dp.h4
-rw-r--r--include/drm/display/drm_dp_helper.h3
-rw-r--r--include/drm/display/drm_dp_mst_helper.h14
-rw-r--r--include/drm/drm_accel.h21
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_connector.h8
-rw-r--r--include/drm/drm_device.h5
-rw-r--r--include/drm/drm_drv.h28
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_fb_helper.h6
-rw-r--r--include/drm/drm_file.h5
-rw-r--r--include/drm/drm_fixed.h3
-rw-r--r--include/drm/drm_gem.h3
-rw-r--r--include/drm/drm_gem_dma_helper.h1
-rw-r--r--include/drm/drm_mipi_dsi.h12
-rw-r--r--include/drm/drm_mode_config.h16
-rw-r--r--include/drm/drm_panic.h21
-rw-r--r--include/drm/drm_prime.h3
-rw-r--r--include/drm/drm_print.h54
-rw-r--r--include/drm/drm_rect.h15
-rw-r--r--include/drm/drm_vblank.h37
-rw-r--r--include/drm/gpu_scheduler.h2
-rw-r--r--include/drm/ttm/ttm_bo.h48
-rw-r--r--include/drm/ttm/ttm_resource.h97
-rw-r--r--include/dt-bindings/arm/qcom,ids.h4
-rw-r--r--include/dt-bindings/clock/at91.h4
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h7
-rw-r--r--include/dt-bindings/clock/exynos7885.h32
-rw-r--r--include/dt-bindings/clock/exynos850.h1
-rw-r--r--include/dt-bindings/clock/nxp,imx95-clock.h3
-rw-r--r--include/dt-bindings/clock/px30-cru.h4
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h5
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8180x.h5
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-camcc.h106
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-dispcc.h51
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-gpucc.h62
-rw-r--r--include/dt-bindings/clock/qcom,sm8150-camcc.h135
l---------[-rw-r--r--]include/dt-bindings/clock/qcom,sm8650-dispcc.h103
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g057-cpg.h21
-rw-r--r--include/dt-bindings/clock/rk3036-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3228-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3288-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3308-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3328-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3368-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3399-cru.h4
-rw-r--r--include/dt-bindings/clock/rockchip,rk3576-cru.h592
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov9.h11
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov920.h191
-rw-r--r--include/dt-bindings/interconnect/qcom,ipq5332.h46
-rw-r--r--include/dt-bindings/interrupt-controller/arm-gic.h2
-rw-r--r--include/dt-bindings/mailbox/qcom-ipcc.h2
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv1800b.h63
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv1812h.h127
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv18xx.h19
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2000.h127
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2002.h79
-rw-r--r--include/dt-bindings/power/rockchip,rk3576-power.h30
-rw-r--r--include/dt-bindings/reset/rockchip,rk3576-cru.h564
-rw-r--r--include/dt-bindings/soc/qe-fsl,tsa.h13
-rw-r--r--include/keys/dns_resolver-type.h4
-rw-r--r--include/kunit/clk.h28
-rw-r--r--include/kunit/of.h115
-rw-r--r--include/kunit/platform_device.h20
-rw-r--r--include/kunit/visibility.h1
-rw-r--r--include/kvm/arm_pmu.h8
-rw-r--r--include/linux/acpi.h12
-rw-r--r--include/linux/acpi_pmtmr.h13
-rw-r--r--include/linux/alloc_tag.h26
-rw-r--r--include/linux/amba/bus.h2
-rw-r--r--include/linux/args.h6
-rw-r--r--include/linux/arm-smccc.h88
-rw-r--r--include/linux/arm_ffa.h12
-rw-r--r--include/linux/avf/virtchnl.h13
-rw-r--r--include/linux/bcma/bcma_driver_pci.h2
-rw-r--r--include/linux/bio.h4
-rw-r--r--include/linux/blk-integrity.h15
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blk_types.h7
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/bpf.h39
-rw-r--r--include/linux/bpf_lsm.h8
-rw-r--r--include/linux/bpf_verifier.h27
-rw-r--r--include/linux/btf.h5
-rw-r--r--include/linux/buffer_head.h17
-rw-r--r--include/linux/buildid.h4
-rw-r--r--include/linux/cgroup-defs.h25
-rw-r--r--include/linux/cgroup.h7
-rw-r--r--include/linux/cleanup.h2
-rw-r--r--include/linux/clk-provider.h14
-rw-r--r--include/linux/clk.h33
-rw-r--r--include/linux/cma.h16
-rw-r--r--include/linux/compiler.h3
-rw-r--r--include/linux/compiler_types.h7
-rw-r--r--include/linux/console.h158
-rw-r--r--include/linux/context_tracking.h32
-rw-r--r--include/linux/context_tracking_state.h60
-rw-r--r--include/linux/coredump.h30
-rw-r--r--include/linux/cpufreq.h6
-rw-r--r--include/linux/cpuhotplug.h5
-rw-r--r--include/linux/cpuset.h10
-rw-r--r--include/linux/damon.h3
-rw-r--r--include/linux/decompress/unxz.h5
-rw-r--r--include/linux/device.h7
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/dma-fence-array.h6
-rw-r--r--include/linux/dma-heap.h21
-rw-r--r--include/linux/dma-map-ops.h38
-rw-r--r--include/linux/dma-mapping.h25
-rw-r--r--include/linux/dma/ipu-dma.h174
-rw-r--r--include/linux/dma/k3-udma-glue.h2
-rw-r--r--include/linux/dpll.h15
-rw-r--r--include/linux/entry-common.h2
-rw-r--r--include/linux/err.h9
-rw-r--r--include/linux/etherdevice.h2
-rw-r--r--include/linux/ethtool.h7
-rw-r--r--include/linux/ethtool_netlink.h29
-rw-r--r--include/linux/f2fs_fs.h4
-rw-r--r--include/linux/falloc.h18
-rw-r--r--include/linux/fault-inject.h36
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/file.h55
-rw-r--r--include/linux/filelock.h14
-rw-r--r--include/linux/filter.h14
-rw-r--r--include/linux/firewire.h22
-rw-r--r--include/linux/firmware/imx/sm.h23
-rw-r--r--include/linux/folio_queue.h156
-rw-r--r--include/linux/fs.h211
-rw-r--r--include/linux/generic-radix-tree.h105
-rw-r--r--include/linux/gfp.h25
-rw-r--r--include/linux/gfp_types.h8
-rw-r--r--include/linux/gpio.h12
-rw-r--r--include/linux/hid.h12
-rw-r--r--include/linux/hid_bpf.h2
-rw-r--r--include/linux/hidraw.h1
-rw-r--r--include/linux/huge_mm.h158
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/hwmon.h1
-rw-r--r--include/linux/i2c.h3
-rw-r--r--include/linux/i3c/master.h16
-rw-r--r--include/linux/i8253.h2
-rw-r--r--include/linux/if_rmnet.h2
-rw-r--r--include/linux/input/matrix_keypad.h48
-rw-r--r--include/linux/intel_vsec.h (renamed from drivers/platform/x86/intel/vsec.h)47
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/io-pgtable.h4
-rw-r--r--include/linux/io_uring/cmd.h15
-rw-r--r--include/linux/io_uring_types.h3
-rw-r--r--include/linux/iomap.h13
-rw-r--r--include/linux/iommu-dma.h69
-rw-r--r--include/linux/iommufd.h12
-rw-r--r--include/linux/ioprio.h2
-rw-r--r--include/linux/ioremap.h1
-rw-r--r--include/linux/iov_iter.h104
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/irq.h6
-rw-r--r--include/linux/irqchip/riscv-imsic.h9
-rw-r--r--include/linux/irqdomain.h8
-rw-r--r--include/linux/jbd2.h4
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/kasan.h63
-rw-r--r--include/linux/kernel-page-flags.h3
-rw-r--r--include/linux/key.h3
-rw-r--r--include/linux/kfence.h2
-rw-r--r--include/linux/khugepaged.h1
-rw-r--r--include/linux/kmsg_dump.h22
-rw-r--r--include/linux/kprobes.h9
-rw-r--r--include/linux/leds.h2
-rw-r--r--include/linux/libata.h157
-rw-r--r--include/linux/linkmode.h5
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/lru_cache.h4
-rw-r--r--include/linux/lsm_count.h135
-rw-r--r--include/linux/lsm_hook_defs.h22
-rw-r--r--include/linux/lsm_hooks.h129
-rw-r--r--include/linux/maple_tree.h20
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/memcontrol.h67
-rw-r--r--include/linux/memory_hotplug.h48
-rw-r--r--include/linux/mfd/88pm80x.h2
-rw-r--r--include/linux/mfd/adp5585.h126
-rw-r--r--include/linux/mfd/axp20x.h27
-rw-r--r--include/linux/mfd/ds1wm.h29
-rw-r--r--include/linux/mfd/max77693-private.h5
-rw-r--r--include/linux/migrate.h3
-rw-r--r--include/linux/mii.h7
-rw-r--r--include/linux/mlx5/device.h39
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/mlx5/fs.h3
-rw-r--r--include/linux/mlx5/mlx5_ifc.h366
-rw-r--r--include/linux/mlx5/qp.h1
-rw-r--r--include/linux/mm.h339
-rw-r--r--include/linux/mm_types.h22
-rw-r--r--include/linux/mm_types_task.h3
-rw-r--r--include/linux/mmc/core.h12
-rw-r--r--include/linux/mmc/host.h28
-rw-r--r--include/linux/mmzone.h35
-rw-r--r--include/linux/mnt_idmapping.h1
-rw-r--r--include/linux/mnt_namespace.h4
-rw-r--r--include/linux/mpi.h192
-rw-r--r--include/linux/msi.h2
-rw-r--r--include/linux/mtd/nand.h90
-rw-r--r--include/linux/mtd/spinand.h19
-rw-r--r--include/linux/mv643xx.h921
-rw-r--r--include/linux/net.h19
-rw-r--r--include/linux/netdev_features.h16
-rw-r--r--include/linux/netdevice.h131
-rw-r--r--include/linux/netfs.h46
-rw-r--r--include/linux/netpoll.h1
-rw-r--r--include/linux/nfs.h9
-rw-r--r--include/linux/nfs4.h17
-rw-r--r--include/linux/nfs_common.h17
-rw-r--r--include/linux/nfs_fs_sb.h13
-rw-r--r--include/linux/nfs_xdr.h22
-rw-r--r--include/linux/nfslocalio.h74
-rw-r--r--include/linux/numa.h8
-rw-r--r--include/linux/numa_memblks.h58
-rw-r--r--include/linux/nvme-keyring.h6
-rw-r--r--include/linux/nvme-rdma.h6
-rw-r--r--include/linux/nvme.h8
-rw-r--r--include/linux/oa_tc6.h24
-rw-r--r--include/linux/omap-gpmc.h10
-rw-r--r--include/linux/page-flags.h202
-rw-r--r--include/linux/page_counter.h27
-rw-r--r--include/linux/pagemap.h126
-rw-r--r--include/linux/pagewalk.h58
-rw-r--r--include/linux/path.h6
-rw-r--r--include/linux/pci-ats.h3
-rw-r--r--include/linux/pci-epc.h3
-rw-r--r--include/linux/pci.h11
-rw-r--r--include/linux/pci_ids.h6
-rw-r--r--include/linux/percpu-rwsem.h2
-rw-r--r--include/linux/percpu.h1
-rw-r--r--include/linux/perf/arm_pmu.h10
-rw-r--r--include/linux/perf/arm_pmuv3.h9
-rw-r--r--include/linux/perf_event.h40
-rw-r--r--include/linux/pgalloc_tag.h31
-rw-r--r--include/linux/pgtable.h18
-rw-r--r--include/linux/phy.h6
-rw-r--r--include/linux/phy_link_topology.h82
-rw-r--r--include/linux/phylink.h2
-rw-r--r--include/linux/pinctrl/pinconf-generic.h3
-rw-r--r--include/linux/platform_data/amd_qdma.h36
-rw-r--r--include/linux/platform_data/cyttsp4.h62
-rw-r--r--include/linux/platform_data/gpio-ath79.h16
-rw-r--r--include/linux/platform_data/gpio-davinci.h21
-rw-r--r--include/linux/platform_data/keypad-nomadik-ske.h50
-rw-r--r--include/linux/platform_data/max6697.h33
-rw-r--r--include/linux/platform_data/mcs.h30
-rw-r--r--include/linux/platform_data/microchip-ksz.h4
-rw-r--r--include/linux/platform_data/mtd-davinci-aemif.h36
-rw-r--r--include/linux/platform_data/mtd-davinci.h88
-rw-r--r--include/linux/platform_data/ti-aemif.h45
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h38
-rw-r--r--include/linux/platform_data/x86/intel-mid_wdt.h (renamed from include/linux/platform_data/intel-mid_wdt.h)6
-rw-r--r--include/linux/platform_data/x86/intel_scu_ipc.h (renamed from arch/x86/include/asm/intel_scu_ipc.h)4
-rw-r--r--include/linux/platform_data/zforce_ts.h15
-rw-r--r--include/linux/pm_domain.h16
-rw-r--r--include/linux/posix-timers.h2
-rw-r--r--include/linux/power_supply.h3
-rw-r--r--include/linux/printk.h33
-rw-r--r--include/linux/prmt.h5
-rw-r--r--include/linux/pstore.h2
-rw-r--r--include/linux/ptp_clock_kernel.h36
-rw-r--r--include/linux/pwm.h10
-rw-r--r--include/linux/quota.h2
-rw-r--r--include/linux/ratelimit_types.h2
-rw-r--r--include/linux/rbtree.h67
-rw-r--r--include/linux/rcu_segcblist.h6
-rw-r--r--include/linux/rculist.h9
-rw-r--r--include/linux/rcupdate.h15
-rw-r--r--include/linux/rcutiny.h7
-rw-r--r--include/linux/rcutree.h3
-rw-r--r--include/linux/regmap.h4
-rw-r--r--include/linux/rfkill.h5
-rw-r--r--include/linux/ring_buffer.h20
-rw-r--r--include/linux/rmap.h11
-rw-r--r--include/linux/rpmb.h123
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/sched.h34
-rw-r--r--include/linux/sched/deadline.h14
-rw-r--r--include/linux/sched/ext.h215
-rw-r--r--include/linux/sched/mm.h27
-rw-r--r--include/linux/sched/prio.h1
-rw-r--r--include/linux/sched/rt.h33
-rw-r--r--include/linux/sched/signal.h7
-rw-r--r--include/linux/sched/task.h8
-rw-r--r--include/linux/sched/task_stack.h18
-rw-r--r--include/linux/scmi_imx_protocol.h59
-rw-r--r--include/linux/security.h59
-rw-r--r--include/linux/seqlock.h25
-rw-r--r--include/linux/serial_core.h117
-rw-r--r--include/linux/set_memory.h8
-rw-r--r--include/linux/sfp.h8
-rw-r--r--include/linux/shmem_fs.h15
-rw-r--r--include/linux/skbuff.h64
-rw-r--r--include/linux/skbuff_ref.h9
-rw-r--r--include/linux/slab.h245
-rw-r--r--include/linux/smp.h8
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/soundwire/sdw.h2
-rw-r--r--include/linux/soundwire/sdw_intel.h8
-rw-r--r--include/linux/spi/spi.h12
-rw-r--r--include/linux/spi/spi_bitbang.h1
-rw-r--r--include/linux/srcutree.h15
-rw-r--r--include/linux/stmmac.h29
-rw-r--r--include/linux/string.h24
-rw-r--r--include/linux/string_choices.h29
-rw-r--r--include/linux/sungem_phy.h2
-rw-r--r--include/linux/sunrpc/sched.h16
-rw-r--r--include/linux/sunrpc/svc.h51
-rw-r--r--include/linux/sunrpc/svc_rdma.h2
-rw-r--r--include/linux/sunrpc/svcauth.h6
-rw-r--r--include/linux/sunrpc/svcsock.h2
-rw-r--r--include/linux/sunrpc/xdrgen/_builtins.h243
-rw-r--r--include/linux/sunrpc/xdrgen/_defs.h26
-rw-r--r--include/linux/swap.h44
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/tee_core.h12
-rw-r--r--include/linux/thermal.h31
-rw-r--r--include/linux/timekeeper_internal.h2
-rw-r--r--include/linux/tracepoint.h20
-rw-r--r--include/linux/uaccess.h7
-rw-r--r--include/linux/ubsan.h5
-rw-r--r--include/linux/uio.h18
-rw-r--r--include/linux/union_find.h41
-rw-r--r--include/linux/unroll.h36
-rw-r--r--include/linux/uprobes.h48
-rw-r--r--include/linux/user_namespace.h6
-rw-r--r--include/linux/userfaultfd_k.h19
-rw-r--r--include/linux/vdpa.h9
-rw-r--r--include/linux/virtio.h11
-rw-r--r--include/linux/virtio_net.h3
-rw-r--r--include/linux/virtio_vsock.h6
-rw-r--r--include/linux/vm_event_item.h26
-rw-r--r--include/linux/vmalloc.h4
-rw-r--r--include/linux/vmstat.h1
-rw-r--r--include/linux/workqueue.h41
-rw-r--r--include/linux/writeback.h10
-rw-r--r--include/linux/xz.h81
-rw-r--r--include/linux/zstd.h167
-rw-r--r--include/linux/zswap.h16
-rw-r--r--include/media/cec.h33
-rw-r--r--include/media/rc-core.h2
-rw-r--r--include/media/v4l2-mc.h3
-rw-r--r--include/media/v4l2-subdev.h6
-rw-r--r--include/media/videobuf2-core.h3
-rw-r--r--include/net/addrconf.h16
-rw-r--r--include/net/af_vsock.h3
-rw-r--r--include/net/bluetooth/hci.h5
-rw-r--r--include/net/bluetooth/hci_core.h4
-rw-r--r--include/net/bluetooth/l2cap.h4
-rw-r--r--include/net/bond_3ad.h5
-rw-r--r--include/net/bond_alb.h2
-rw-r--r--include/net/busy_poll.h2
-rw-r--r--include/net/caif/caif_layer.h4
-rw-r--r--include/net/caif/cfpkt.h2
-rw-r--r--include/net/cfg80211.h25
-rw-r--r--include/net/dropreason-core.h6
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/dst_cache.h2
-rw-r--r--include/net/dst_metadata.h7
-rw-r--r--include/net/erspan.h4
-rw-r--r--include/net/hwbm.h4
-rw-r--r--include/net/inet6_hashtables.h14
-rw-r--r--include/net/inet_hashtables.h10
-rw-r--r--include/net/inet_sock.h3
-rw-r--r--include/net/inet_timewait_sock.h2
-rw-r--r--include/net/ip.h10
-rw-r--r--include/net/ip_fib.h7
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/ipv6.h16
-rw-r--r--include/net/ipv6_stubs.h2
-rw-r--r--include/net/iucv/iucv.h2
-rw-r--r--include/net/iw_handler.h12
-rw-r--r--include/net/lib80211.h8
-rw-r--r--include/net/libeth/tx.h129
-rw-r--r--include/net/libeth/types.h25
-rw-r--r--include/net/llc_pdu.h2
-rw-r--r--include/net/mac80211.h45
-rw-r--r--include/net/mac802154.h4
-rw-r--r--include/net/mana/mana.h23
-rw-r--r--include/net/mptcp.h4
-rw-r--r--include/net/ndisc.h15
-rw-r--r--include/net/net_namespace.h4
-rw-r--r--include/net/netdev_rx_queue.h7
-rw-r--r--include/net/netfilter/nf_conntrack_count.h6
-rw-r--r--include/net/netfilter/nf_tables.h48
-rw-r--r--include/net/netfilter/nf_tproxy.h1
-rw-r--r--include/net/netfilter/nft_fib.h4
-rw-r--r--include/net/netfilter/nft_meta.h3
-rw-r--r--include/net/netfilter/nft_reject.h3
-rw-r--r--include/net/netlabel.h2
-rw-r--r--include/net/netlink.h16
-rw-r--r--include/net/netmem.h132
-rw-r--r--include/net/netns/ipv4.h5
-rw-r--r--include/net/netns/sctp.h4
-rw-r--r--include/net/nexthop.h4
-rw-r--r--include/net/nfc/nci.h2
-rw-r--r--include/net/nfc/nfc.h8
-rw-r--r--include/net/nl802154.h2
-rw-r--r--include/net/page_pool/helpers.h39
-rw-r--r--include/net/page_pool/types.h23
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/red.h8
-rw-r--r--include/net/regulatory.h2
-rw-r--r--include/net/route.h5
-rw-r--r--include/net/rstreason.h39
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sctp/structs.h20
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/sock_reuseport.h2
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/net/udp.h16
-rw-r--r--include/net/x25.h2
-rw-r--r--include/net/xfrm.h45
-rw-r--r--include/rdma/ib_umem.h18
-rw-r--r--include/rdma/ib_verbs.h4
-rw-r--r--include/rdma/rdma_netlink.h12
-rw-r--r--include/scsi/fcoe_sysfs.h2
-rw-r--r--include/scsi/scsi_dbg.h7
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--include/scsi/scsi_transport_fc.h6
-rw-r--r--include/soc/fsl/qe/qe.h23
-rw-r--r--include/sound/aci.h1
-rw-r--r--include/sound/asoundef.h6
-rw-r--r--include/sound/control.h27
-rw-r--r--include/sound/core.h69
-rw-r--r--include/sound/cs35l56.h6
-rw-r--r--include/sound/es1688.h1
-rw-r--r--include/sound/memalloc.h7
-rw-r--r--include/sound/pcm.h49
-rw-r--r--include/sound/seq_kernel.h4
-rw-r--r--include/sound/snd_wavefront.h4
-rw-r--r--include/sound/soc-acpi-intel-match.h2
-rw-r--r--include/sound/soc-acpi.h6
-rw-r--r--include/sound/soc-card.h2
-rw-r--r--include/sound/soc-component.h3
-rw-r--r--include/sound/soc-dai.h6
-rw-r--r--include/sound/soc-dpcm.h19
-rw-r--r--include/sound/soc.h7
-rw-r--r--include/sound/soc_sdw_utils.h247
-rw-r--r--include/sound/soundfont.h6
-rw-r--r--include/sound/tas2563-tlv.h279
-rw-r--r--include/sound/tas2781-tlv.h260
-rw-r--r--include/sound/tas2781.h71
-rw-r--r--include/sound/ump.h12
-rw-r--r--include/sound/vx_core.h1
-rw-r--r--include/trace/events/asoc.h3
-rw-r--r--include/trace/events/btrfs.h18
-rw-r--r--include/trace/events/dma.h341
-rw-r--r--include/trace/events/ext4.h1
-rw-r--r--include/trace/events/f2fs.h3
-rw-r--r--include/trace/events/filemap.h84
-rw-r--r--include/trace/events/firewire.h4
-rw-r--r--include/trace/events/intel_ifs.h27
-rw-r--r--include/trace/events/mmflags.h40
-rw-r--r--include/trace/events/netfs.h144
-rw-r--r--include/trace/events/oom.h4
-rw-r--r--include/trace/events/page_pool.h12
-rw-r--r--include/trace/events/pwm.h10
-rw-r--r--include/trace/events/rcu.h20
-rw-r--r--include/trace/events/rpcrdma.h23
-rw-r--r--include/trace/events/sched_ext.h32
-rw-r--r--include/trace/events/tcp.h12
-rw-r--r--include/trace/events/writeback.h10
-rw-r--r--include/trace/misc/nfs.h1
-rw-r--r--include/uapi/asm-generic/socket.h6
-rw-r--r--include/uapi/drm/drm_fourcc.h25
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--include/uapi/drm/msm_drm.h2
-rw-r--r--include/uapi/drm/xe_drm.h10
-rw-r--r--include/uapi/linux/audit.h3
-rw-r--r--include/uapi/linux/auto_fs.h2
-rw-r--r--include/uapi/linux/blkdev.h14
-rw-r--r--include/uapi/linux/bpf.h21
-rw-r--r--include/uapi/linux/cec.h9
-rw-r--r--include/uapi/linux/dpll.h3
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/ethtool.h16
-rw-r--r--include/uapi/linux/ethtool_netlink.h36
-rw-r--r--include/uapi/linux/exfat.h25
-rw-r--r--include/uapi/linux/falloc.h1
-rw-r--r--include/uapi/linux/fcntl.h84
-rw-r--r--include/uapi/linux/fib_rules.h1
-rw-r--r--include/uapi/linux/fuse.h22
-rw-r--r--include/uapi/linux/hidraw.h1
-rw-r--r--include/uapi/linux/if_packet.h7
-rw-r--r--include/uapi/linux/in.h2
-rw-r--r--include/uapi/linux/inet_diag.h2
-rw-r--r--include/uapi/linux/io_uring.h42
-rw-r--r--include/uapi/linux/ioam6_iptunnel.h6
-rw-r--r--include/uapi/linux/iommufd.h2
-rw-r--r--include/uapi/linux/kernel-page-flags.h2
-rw-r--r--include/uapi/linux/kfd_ioctl.h106
-rw-r--r--include/uapi/linux/landlock.h30
-rw-r--r--include/uapi/linux/libc-compat.h36
-rw-r--r--include/uapi/linux/lsm.h1
-rw-r--r--include/uapi/linux/mdio.h1
-rw-r--r--include/uapi/linux/nbd.h8
-rw-r--r--include/uapi/linux/net_tstamp.h3
-rw-r--r--include/uapi/linux/netdev.h13
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/uapi/linux/nexthop.h10
-rw-r--r--include/uapi/linux/nsfs.h15
-rw-r--r--include/uapi/linux/pci_regs.h41
-rw-r--r--include/uapi/linux/pkt_cls.h23
-rw-r--r--include/uapi/linux/ptp_clock.h24
-rw-r--r--include/uapi/linux/rkisp1-config.h578
-rw-r--r--include/uapi/linux/sched.h1
-rw-r--r--include/uapi/linux/sched/types.h6
-rw-r--r--include/uapi/linux/serio.h1
-rw-r--r--include/uapi/linux/smc.h6
-rw-r--r--include/uapi/linux/spi/spi.h5
-rw-r--r--include/uapi/linux/uio.h18
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h5
-rw-r--r--include/uapi/linux/vdpa.h1
-rw-r--r--include/uapi/linux/videodev2.h2
-rw-r--r--include/uapi/linux/virtio_balloon.h16
-rw-r--r--include/uapi/linux/virtio_gpu.h1
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h13
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h9
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h4
-rw-r--r--include/uapi/rdma/rdma_netlink.h16
-rw-r--r--include/uapi/sound/asequencer.h2
-rw-r--r--include/uapi/sound/asound.h17
-rw-r--r--include/ufs/ufs.h4
-rw-r--r--include/ufs/ufshcd.h1
-rw-r--r--include/ufs/ufshci.h5
-rw-r--r--include/vdso/getrandom.h28
-rw-r--r--include/vdso/helpers.h1
-rw-r--r--include/vdso/unaligned.h15
-rw-r--r--include/video/vga.h58
-rw-r--r--init/Kconfig62
-rw-r--r--init/init_task.c14
-rw-r--r--init/initramfs.c3
-rw-r--r--init/main.c6
-rw-r--r--io_uring/Makefile4
-rw-r--r--io_uring/eventfd.c13
-rw-r--r--io_uring/fdinfo.c17
-rw-r--r--io_uring/io-wq.c25
-rw-r--r--io_uring/io_uring.c256
-rw-r--r--io_uring/io_uring.h13
-rw-r--r--io_uring/kbuf.c96
-rw-r--r--io_uring/kbuf.h94
-rw-r--r--io_uring/napi.c35
-rw-r--r--io_uring/napi.h16
-rw-r--r--io_uring/net.c27
-rw-r--r--io_uring/register.c91
-rw-r--r--io_uring/register.h1
-rw-r--r--io_uring/rsrc.c248
-rw-r--r--io_uring/rsrc.h15
-rw-r--r--io_uring/rw.c19
-rw-r--r--io_uring/sqpoll.c31
-rw-r--r--io_uring/uring_cmd.c7
-rw-r--r--ipc/mqueue.c50
-rw-r--r--ipc/shm.c8
-rw-r--r--kernel/Kconfig.preempt27
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/audit.c4
-rw-r--r--kernel/auditfilter.c2
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/bpf/Makefile6
-rw-r--r--kernel/bpf/arraymap.c17
-rw-r--r--kernel/bpf/bpf_inode_storage.c28
-rw-r--r--kernel/bpf/bpf_lsm.c65
-rw-r--r--kernel/bpf/bpf_struct_ops.c9
-rw-r--r--kernel/bpf/btf.c175
-rw-r--r--kernel/bpf/btf_iter.c2
-rw-r--r--kernel/bpf/btf_relocate.c2
-rw-r--r--kernel/bpf/cgroup.c2
-rw-r--r--kernel/bpf/core.c21
-rw-r--r--kernel/bpf/cpumap.c6
-rw-r--r--kernel/bpf/hashtab.c16
-rw-r--r--kernel/bpf/helpers.c94
-rw-r--r--kernel/bpf/inode.c4
-rw-r--r--kernel/bpf/local_storage.c4
-rw-r--r--kernel/bpf/map_in_map.c38
-rw-r--r--kernel/bpf/memalloc.c12
-rw-r--r--kernel/bpf/relo_core.c2
-rw-r--r--kernel/bpf/reuseport_array.c2
-rw-r--r--kernel/bpf/stackmap.c131
-rw-r--r--kernel/bpf/syscall.c228
-rw-r--r--kernel/bpf/token.c78
-rw-r--r--kernel/bpf/verifier.c1437
-rw-r--r--kernel/cgroup/Makefile1
-rw-r--r--kernel/cgroup/cgroup-internal.h2
-rw-r--r--kernel/cgroup/cgroup-v1.c17
-rw-r--r--kernel/cgroup/cgroup.c95
-rw-r--r--kernel/cgroup/cpuset-internal.h305
-rw-r--r--kernel/cgroup/cpuset-v1.c562
-rw-r--r--kernel/cgroup/cpuset.c1155
-rw-r--r--kernel/cgroup/pids.c32
-rw-r--r--kernel/configs/tiny.config6
-rw-r--r--kernel/context_tracking.c140
-rw-r--r--kernel/cpu.c22
-rw-r--r--kernel/crash_core.c33
-rw-r--r--kernel/crash_reserve.c3
-rw-r--r--kernel/dma/Kconfig7
-rw-r--r--kernel/dma/Makefile4
-rw-r--r--kernel/dma/direct.c8
-rw-r--r--kernel/dma/dummy.c21
-rw-r--r--kernel/dma/mapping.c154
-rw-r--r--kernel/dma/ops_helpers.c14
-rw-r--r--kernel/dma/pool.c4
-rw-r--r--kernel/dma/remap.c6
-rw-r--r--kernel/dma/swiotlb.c6
-rw-r--r--kernel/entry/common.c2
-rw-r--r--kernel/events/core.c609
-rw-r--r--kernel/events/uprobes.c534
-rw-r--r--kernel/exit.c59
-rw-r--r--kernel/fork.c26
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/futex/core.c1
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/cpuhotplug.c4
-rw-r--r--kernel/irq/irq_sim.c1
-rw-r--r--kernel/irq/irqdomain.c210
-rw-r--r--kernel/irq/manage.c21
-rw-r--r--kernel/irq/migration.c4
-rw-r--r--kernel/irq/msi.c4
-rw-r--r--kernel/irq/proc.c17
-rw-r--r--kernel/kcov.c31
-rw-r--r--kernel/kcsan/debugfs.c2
-rw-r--r--kernel/kexec_internal.h3
-rw-r--r--kernel/kthread.c10
-rw-r--r--kernel/locking/lockdep.c83
-rw-r--r--kernel/locking/rtmutex.c4
-rw-r--r--kernel/locking/rwsem.c4
-rw-r--r--kernel/locking/test-ww_mutex.c1
-rw-r--r--kernel/locking/ww_mutex.h2
-rw-r--r--kernel/module/Kconfig1
-rw-r--r--kernel/module/Makefile2
-rw-r--r--kernel/module/main.c2
-rw-r--r--kernel/nsproxy.c12
-rw-r--r--kernel/numa.c26
-rw-r--r--kernel/padata.c6
-rw-r--r--kernel/panic.c11
-rw-r--r--kernel/pid.c10
-rw-r--r--kernel/power/hibernate.c26
-rw-r--r--kernel/power/main.c76
-rw-r--r--kernel/power/snapshot.c5
-rw-r--r--kernel/printk/internal.h207
-rw-r--r--kernel/printk/nbcon.c934
-rw-r--r--kernel/printk/printk.c724
-rw-r--r--kernel/printk/printk_ringbuffer.h7
-rw-r--r--kernel/printk/printk_safe.c25
-rw-r--r--kernel/rcu/rcu.h12
-rw-r--r--kernel/rcu/rcu_segcblist.c11
-rw-r--r--kernel/rcu/rcu_segcblist.h11
-rw-r--r--kernel/rcu/rcuscale.c214
-rw-r--r--kernel/rcu/rcutorture.c121
-rw-r--r--kernel/rcu/refscale.c67
-rw-r--r--kernel/rcu/srcutree.c11
-rw-r--r--kernel/rcu/tasks.h214
-rw-r--r--kernel/rcu/tiny.c2
-rw-r--r--kernel/rcu/tree.c283
-rw-r--r--kernel/rcu/tree.h10
-rw-r--r--kernel/rcu/tree_exp.h128
-rw-r--r--kernel/rcu/tree_nocb.h279
-rw-r--r--kernel/rcu/tree_plugin.h11
-rw-r--r--kernel/rcu/tree_stall.h25
-rw-r--r--kernel/resource.c71
-rw-r--r--kernel/resource_kunit.c143
-rw-r--r--kernel/sched/build_policy.c11
-rw-r--r--kernel/sched/core.c540
-rw-r--r--kernel/sched/cpufreq_schedutil.c56
-rw-r--r--kernel/sched/deadline.c503
-rw-r--r--kernel/sched/debug.c201
-rw-r--r--kernel/sched/ext.c7191
-rw-r--r--kernel/sched/ext.h91
-rw-r--r--kernel/sched/fair.c803
-rw-r--r--kernel/sched/features.h30
-rw-r--r--kernel/sched/idle.c25
-rw-r--r--kernel/sched/pelt.c20
-rw-r--r--kernel/sched/pelt.h1
-rw-r--r--kernel/sched/rt.c261
-rw-r--r--kernel/sched/sched.h301
-rw-r--r--kernel/sched/stop_task.c18
-rw-r--r--kernel/sched/syscalls.c147
-rw-r--r--kernel/sched/topology.c8
-rw-r--r--kernel/signal.c61
-rw-r--r--kernel/smp.c38
-rw-r--r--kernel/softirq.c15
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/sys.c12
-rw-r--r--kernel/taskstats.c4
-rw-r--r--kernel/time/alarmtimer.c9
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--kernel/time/clocksource.c45
-rw-r--r--kernel/time/hrtimer.c26
-rw-r--r--kernel/time/ntp.c10
-rw-r--r--kernel/time/ntp_internal.h4
-rw-r--r--kernel/time/posix-cpu-timers.c207
-rw-r--r--kernel/time/posix-timers.c73
-rw-r--r--kernel/time/posix-timers.h3
-rw-r--r--kernel/time/timekeeping.c4
-rw-r--r--kernel/time/timer.c64
-rw-r--r--kernel/trace/bpf_trace.c146
-rw-r--r--kernel/trace/ring_buffer.c949
-rw-r--r--kernel/trace/trace.c376
-rw-r--r--kernel/trace/trace.h14
-rw-r--r--kernel/trace/trace_fprobe.c179
-rw-r--r--kernel/trace/trace_functions_graph.c23
-rw-r--r--kernel/trace/trace_osnoise.c14
-rw-r--r--kernel/trace/trace_output.c17
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_syscalls.c12
-rw-r--r--kernel/trace/trace_uprobe.c68
-rw-r--r--kernel/tracepoint.c42
-rw-r--r--kernel/user.c6
-rw-r--r--kernel/user_namespace.c5
-rw-r--r--kernel/vmcore_info.c8
-rw-r--r--kernel/watch_queue.c4
-rw-r--r--kernel/watchdog.c5
-rw-r--r--kernel/workqueue.c115
-rw-r--r--lib/.gitignore2
-rw-r--r--lib/Kconfig.debug63
-rw-r--r--lib/Kconfig.ubsan4
-rw-r--r--lib/Makefile41
-rw-r--r--lib/bcd.c4
-rw-r--r--lib/buildid.c397
-rw-r--r--lib/checksum_kunit.c9
-rw-r--r--lib/closure.c2
-rw-r--r--lib/crypto/mpi/Makefile2
-rw-r--r--lib/crypto/mpi/ec.c1507
-rw-r--r--lib/crypto/mpi/mpi-add.c89
-rw-r--r--lib/crypto/mpi/mpi-bit.c168
-rw-r--r--lib/crypto/mpi/mpi-cmp.c46
-rw-r--r--lib/crypto/mpi/mpi-div.c82
-rw-r--r--lib/crypto/mpi/mpi-internal.h21
-rw-r--r--lib/crypto/mpi/mpi-inv.c143
-rw-r--r--lib/crypto/mpi/mpi-mod.c148
-rw-r--r--lib/crypto/mpi/mpi-mul.c29
-rw-r--r--lib/crypto/mpi/mpicoder.c336
-rw-r--r--lib/crypto/mpi/mpih-mul.c25
-rw-r--r--lib/crypto/mpi/mpiutil.c184
-rw-r--r--lib/debugobjects.c27
-rw-r--r--lib/decompress_unxz.c40
-rw-r--r--lib/dim/Makefile2
-rw-r--r--lib/dump_stack.c1
-rw-r--r--lib/dynamic_debug.c4
-rw-r--r--lib/fault-inject.c1
-rw-r--r--lib/fortify_kunit.c3
-rw-r--r--lib/generic-radix-tree.c80
-rw-r--r--lib/glob.c2
-rw-r--r--lib/iov_iter.c240
-rw-r--r--lib/irq_poll.c2
-rw-r--r--lib/kunit/Makefile4
-rw-r--r--lib/kunit/platform-test.c224
-rw-r--r--lib/kunit/platform.c302
-rw-r--r--lib/kunit_iov_iter.c259
-rw-r--r--lib/list-test.c10
-rw-r--r--lib/lru_cache.c10
-rw-r--r--lib/lz4/lz4hc_compress.c1
-rw-r--r--lib/maple_tree.c805
-rw-r--r--lib/math/Makefile2
-rw-r--r--lib/math/div64.c115
-rw-r--r--lib/math/test_mul_u64_u64_div_u64.c99
-rw-r--r--lib/math/tests/Makefile3
-rw-r--r--lib/math/tests/int_pow_kunit.c52
-rw-r--r--lib/percpu_counter.c2
-rw-r--r--lib/rhashtable.c2
-rw-r--r--lib/sbitmap.c4
-rw-r--r--lib/scatterlist.c69
-rw-r--r--lib/slub_kunit.c31
-rw-r--r--lib/string_helpers.c3
-rw-r--r--lib/strncpy_from_user.c9
-rw-r--r--lib/strnlen_user.c9
-rw-r--r--lib/test_bpf.c3
-rw-r--r--lib/test_fortify/.gitignore2
-rw-r--r--lib/test_fortify/Makefile28
-rw-r--r--lib/test_fortify/test_fortify.sh (renamed from scripts/test_fortify.sh)0
-rw-r--r--lib/test_fpu_glue.c2
-rw-r--r--lib/test_hmm.c5
-rw-r--r--lib/test_objpool.c3
-rw-r--r--lib/test_printf.c26
-rw-r--r--lib/union_find.c49
-rw-r--r--lib/vdso/Makefile1
-rw-r--r--lib/vdso/getrandom.c27
-rw-r--r--lib/vsprintf.c21
-rw-r--r--lib/xz/Kconfig13
-rw-r--r--lib/xz/xz_crc32.c11
-rw-r--r--lib/xz/xz_dec_bcj.c191
-rw-r--r--lib/xz/xz_dec_lzma2.c15
-rw-r--r--lib/xz/xz_dec_stream.c13
-rw-r--r--lib/xz/xz_dec_syms.c14
-rw-r--r--lib/xz/xz_dec_test.c12
-rw-r--r--lib/xz/xz_lzma2.h5
-rw-r--r--lib/xz/xz_private.h40
-rw-r--r--lib/xz/xz_stream.h5
-rw-r--r--lib/zstd/compress/zstd_compress.c2
-rw-r--r--lib/zstd/zstd_compress_module.c49
-rw-r--r--lib/zstd/zstd_decompress_module.c36
-rw-r--r--mm/Kconfig86
-rw-r--r--mm/Kconfig.debug32
-rw-r--r--mm/Makefile8
-rw-r--r--mm/cma.c57
-rw-r--r--mm/compaction.c47
-rw-r--r--mm/damon/core.c24
-rw-r--r--mm/damon/dbgfs.c2
-rw-r--r--mm/damon/sysfs.c2
-rw-r--r--mm/damon/tests/.kunitconfig22
-rw-r--r--mm/damon/tests/core-kunit.h (renamed from mm/damon/core-test.h)35
-rw-r--r--mm/damon/tests/dbgfs-kunit.h (renamed from mm/damon/dbgfs-test.h)10
-rw-r--r--mm/damon/tests/sysfs-kunit.h (renamed from mm/damon/sysfs-test.h)0
-rw-r--r--mm/damon/tests/vaddr-kunit.h (renamed from mm/damon/vaddr-test.h)2
-rw-r--r--mm/damon/vaddr.c4
-rw-r--r--mm/debug.c31
-rw-r--r--mm/debug_vm_pgtable.c50
-rw-r--r--mm/fadvise.c4
-rw-r--r--mm/fail_page_alloc.c1
-rw-r--r--mm/failslab.c1
-rw-r--r--mm/filemap.c132
-rw-r--r--mm/folio-compat.c12
-rw-r--r--mm/gup.c81
-rw-r--r--mm/huge_memory.c656
-rw-r--r--mm/hugetlb.c462
-rw-r--r--mm/hugetlb_cgroup.c4
-rw-r--r--mm/hugetlb_vmemmap.c40
-rw-r--r--mm/internal.h227
-rw-r--r--mm/kasan/Makefile8
-rw-r--r--mm/kasan/common.c62
-rw-r--r--mm/kasan/kasan.h6
-rw-r--r--mm/kasan/kasan_test_c.c (renamed from mm/kasan/kasan_test.c)57
-rw-r--r--mm/kasan/kasan_test_rust.rs21
-rw-r--r--mm/kfence/core.c53
-rw-r--r--mm/kfence/kfence.h1
-rw-r--r--mm/kfence/report.c15
-rw-r--r--mm/khugepaged.c75
-rw-r--r--mm/kmemleak.c159
-rw-r--r--mm/ksm.c146
-rw-r--r--mm/madvise.c15
-rw-r--r--mm/memblock.c19
-rw-r--r--mm/memcontrol-v1.c138
-rw-r--r--mm/memcontrol-v1.h26
-rw-r--r--mm/memcontrol.c490
-rw-r--r--mm/memory-failure.c92
-rw-r--r--mm/memory-tiers.c25
-rw-r--r--mm/memory.c597
-rw-r--r--mm/memory_hotplug.c85
-rw-r--r--mm/mempolicy.c8
-rw-r--r--mm/migrate.c270
-rw-r--r--mm/migrate_device.c108
-rw-r--r--mm/mm_init.c12
-rw-r--r--mm/mmap.c2162
-rw-r--r--mm/mmu_notifier.c2
-rw-r--r--mm/mmzone.c2
-rw-r--r--mm/mprotect.c86
-rw-r--r--mm/mremap.c32
-rw-r--r--mm/mseal.c57
-rw-r--r--mm/nommu.c11
-rw-r--r--mm/numa.c69
-rw-r--r--mm/numa_emulation.c (renamed from arch/x86/mm/numa_emulation.c)42
-rw-r--r--mm/numa_memblks.c571
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c351
-rw-r--r--mm/page_counter.c48
-rw-r--r--mm/page_io.c113
-rw-r--r--mm/page_isolation.c36
-rw-r--r--mm/pagewalk.c202
-rw-r--r--mm/percpu.c31
-rw-r--r--mm/readahead.c93
-rw-r--r--mm/rmap.c71
-rw-r--r--mm/shmem.c468
-rw-r--r--mm/shmem_quota.c3
-rw-r--r--mm/show_mem.c11
-rw-r--r--mm/shrinker_debug.c2
-rw-r--r--mm/slab.h11
-rw-r--r--mm/slab_common.c319
-rw-r--r--mm/slub.c410
-rw-r--r--mm/swap.c298
-rw-r--r--mm/swap.h44
-rw-r--r--mm/swap_cgroup.c2
-rw-r--r--mm/swap_state.c78
-rw-r--r--mm/swapfile.c1482
-rw-r--r--mm/userfaultfd.c170
-rw-r--r--mm/util.c102
-rw-r--r--mm/vma.c2068
-rw-r--r--mm/vma.h558
-rw-r--r--mm/vma_internal.h49
-rw-r--r--mm/vmalloc.c139
-rw-r--r--mm/vmscan.c69
-rw-r--r--mm/vmstat.c28
-rw-r--r--mm/z3fold.c2
-rw-r--r--mm/zsmalloc.c38
-rw-r--r--mm/zswap.c307
-rw-r--r--net/6lowpan/ndisc.c6
-rw-r--r--net/8021q/vlan_dev.c10
-rw-r--r--net/8021q/vlanproc.c4
-rw-r--r--net/Kconfig6
-rw-r--r--net/batman-adv/soft-interface.c5
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/bluetooth/cmtp/Kconfig4
-rw-r--r--net/bluetooth/cmtp/capi.c32
-rw-r--r--net/bluetooth/hci_conn.c7
-rw-r--r--net/bluetooth/hci_sync.c5
-rw-r--r--net/bluetooth/leds.c2
-rw-r--r--net/bluetooth/mgmt.c13
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c2
-rw-r--r--net/bridge/br_device.c6
-rw-r--r--net/bridge/br_netfilter_hooks.c3
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c7
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c3
-rw-r--r--net/caif/cfpkt_skbuff.c6
-rw-r--r--net/caif/chnl_net.c2
-rw-r--r--net/can/bcm.c4
-rw-r--r--net/can/j1939/transport.c8
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c6
-rw-r--r--net/core/dev.c93
-rw-r--r--net/core/dev_addr_lists.c6
-rw-r--r--net/core/dev_ioctl.c9
-rw-r--r--net/core/devmem.c389
-rw-r--r--net/core/devmem.h180
-rw-r--r--net/core/fib_rules.c9
-rw-r--r--net/core/filter.c100
-rw-r--r--net/core/gro.c5
-rw-r--r--net/core/lwt_bpf.c3
-rw-r--r--net/core/mp_dmabuf_devmem.h44
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/net-sysfs.c13
-rw-r--r--net/core/net_namespace.c90
-rw-r--r--net/core/netdev-genl-gen.c23
-rw-r--r--net/core/netdev-genl-gen.h6
-rw-r--r--net/core/netdev-genl.c147
-rw-r--r--net/core/netdev_rx_queue.c81
-rw-r--r--net/core/netmem_priv.h31
-rw-r--r--net/core/netpoll.c44
-rw-r--r--net/core/page_pool.c119
-rw-r--r--net/core/page_pool_priv.h46
-rw-r--r--net/core/page_pool_user.c32
-rw-r--r--net/core/pktgen.c10
-rw-r--r--net/core/rtnetlink.c5
-rw-r--r--net/core/skbuff.c136
-rw-r--r--net/core/skmsg.c2
-rw-r--r--net/core/sock.c76
-rw-r--r--net/core/sock_map.c24
-rw-r--r--net/core/sock_reuseport.c5
-rw-r--r--net/core/utils.c2
-rw-r--r--net/dsa/tag_ksz.c11
-rw-r--r--net/dsa/user.c3
-rw-r--r--net/ethtool/Makefile3
-rw-r--r--net/ethtool/cabletest.c57
-rw-r--r--net/ethtool/channels.c20
-rw-r--r--net/ethtool/cmis.h1
-rw-r--r--net/ethtool/cmis_cdb.c14
-rw-r--r--net/ethtool/common.c61
-rw-r--r--net/ethtool/common.h7
-rw-r--r--net/ethtool/ioctl.c44
-rw-r--r--net/ethtool/linkinfo.c2
-rw-r--r--net/ethtool/linkmodes.c2
-rw-r--r--net/ethtool/netlink.c68
-rw-r--r--net/ethtool/netlink.h37
-rw-r--r--net/ethtool/phy.c306
-rw-r--r--net/ethtool/plca.c30
-rw-r--r--net/ethtool/pse-pd.c38
-rw-r--r--net/ethtool/rss.c233
-rw-r--r--net/ethtool/strset.c27
-rw-r--r--net/handshake/netlink.c4
-rw-r--r--net/hsr/hsr_device.c17
-rw-r--r--net/hsr/hsr_main.h1
-rw-r--r--net/hsr/hsr_slave.c11
-rw-r--r--net/ieee802154/6lowpan/core.c2
-rw-r--r--net/ieee802154/core.c10
-rw-r--r--net/ipv4/Kconfig3
-rw-r--r--net/ipv4/bpf_tcp_ca.c26
-rw-r--r--net/ipv4/devinet.c53
-rw-r--r--net/ipv4/esp4.c3
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_rules.c54
-rw-r--r--net/ipv4/fib_semantics.c6
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/fou_core.c4
-rw-r--r--net/ipv4/icmp.c119
-rw-r--r--net/ipv4/inet_connection_sock.c7
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/inet_hashtables.c12
-rw-r--r--net/ipv4/ip_gre.c7
-rw-r--r--net/ipv4/ip_input.c6
-rw-r--r--net/ipv4/ip_output.c5
-rw-r--r--net/ipv4/ip_tunnel.c15
-rw-r--r--net/ipv4/ip_vti.c2
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/ipmr.c12
-rw-r--r--net/ipv4/netfilter.c3
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c4
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c3
-rw-r--r--net/ipv4/netfilter/nf_dup_ipv4.c3
-rw-r--r--net/ipv4/netfilter/nft_dup_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c5
-rw-r--r--net/ipv4/nexthop.c55
-rw-r--r--net/ipv4/route.c16
-rw-r--r--net/ipv4/sysctl_net_ipv4.c32
-rw-r--r--net/ipv4/tcp.c291
-rw-r--r--net/ipv4/tcp_bpf.c4
-rw-r--r--net/ipv4/tcp_htcp.c2
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/tcp_ipv4.c23
-rw-r--r--net/ipv4/tcp_metrics.c10
-rw-r--r--net/ipv4/tcp_minisocks.c33
-rw-r--r--net/ipv4/tcp_output.c7
-rw-r--r--net/ipv4/tcp_timer.c7
-rw-r--r--net/ipv4/udp.c11
-rw-r--r--net/ipv4/udp_tunnel_core.c3
-rw-r--r--net/ipv6/addrconf.c26
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/esp6.c3
-rw-r--r--net/ipv6/fib6_rules.c43
-rw-r--r--net/ipv6/icmp.c28
-rw-r--r--net/ipv6/inet6_hashtables.c15
-rw-r--r--net/ipv6/ioam6_iptunnel.c86
-rw-r--r--net/ipv6/ip6_gre.c7
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_tunnel.c11
-rw-r--r--net/ipv6/ip6mr.c5
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/mcast.c5
-rw-r--r--net/ipv6/ndisc.c6
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/nft_dup_ipv6.c4
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/ipv6/rpl_iptunnel.c12
-rw-r--r--net/ipv6/sit.c11
-rw-r--r--net/ipv6/tcp_ipv6.c5
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/l2tp/l2tp_core.c382
-rw-r--r--net/l2tp/l2tp_core.h25
-rw-r--r--net/l2tp/l2tp_debugfs.c24
-rw-r--r--net/l2tp/l2tp_eth.c44
-rw-r--r--net/l2tp/l2tp_ip.c125
-rw-r--r--net/l2tp/l2tp_ip6.c123
-rw-r--r--net/l2tp/l2tp_netlink.c76
-rw-r--r--net/l2tp/l2tp_ppp.c154
-rw-r--r--net/mac80211/agg-rx.c15
-rw-r--r--net/mac80211/agg-tx.c15
-rw-r--r--net/mac80211/airtime.c140
-rw-r--r--net/mac80211/cfg.c49
-rw-r--r--net/mac80211/chan.c5
-rw-r--r--net/mac80211/ht.c15
-rw-r--r--net/mac80211/ieee80211_i.h33
-rw-r--r--net/mac80211/iface.c25
-rw-r--r--net/mac80211/link.c12
-rw-r--r--net/mac80211/main.c6
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/mlme.c45
-rw-r--r--net/mac80211/offchannel.c1
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/mac80211/scan.c16
-rw-r--r--net/mac80211/status.c1
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac80211/util.c100
-rw-r--r--net/mctp/af_mctp.c3
-rw-r--r--net/mpls/af_mpls.c6
-rw-r--r--net/mpls/mpls_iptunnel.c2
-rw-r--r--net/mptcp/ctrl.c133
-rw-r--r--net/mptcp/mib.c7
-rw-r--r--net/mptcp/mib.h7
-rw-r--r--net/mptcp/pm.c11
-rw-r--r--net/mptcp/pm_netlink.c91
-rw-r--r--net/mptcp/pm_userspace.c40
-rw-r--r--net/mptcp/protocol.c18
-rw-r--r--net/mptcp/protocol.h33
-rw-r--r--net/mptcp/subflow.c54
-rw-r--r--net/netfilter/core.c4
-rw-r--r--net/netfilter/nf_conncount.c15
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c9
-rw-r--r--net/netfilter/nf_flow_table_core.c6
-rw-r--r--net/netfilter/nf_flow_table_inet.c2
-rw-r--r--net/netfilter/nf_nat_core.c2
-rw-r--r--net/netfilter/nf_tables_api.c201
-rw-r--r--net/netfilter/nf_tables_core.c2
-rw-r--r--net/netfilter/nfnetlink.c14
-rw-r--r--net/netfilter/nfnetlink_queue.c12
-rw-r--r--net/netfilter/nft_bitwise.c4
-rw-r--r--net/netfilter/nft_byteorder.c2
-rw-r--r--net/netfilter/nft_cmp.c6
-rw-r--r--net/netfilter/nft_compat.c6
-rw-r--r--net/netfilter/nft_counter.c90
-rw-r--r--net/netfilter/nft_ct.c2
-rw-r--r--net/netfilter/nft_dup_netdev.c2
-rw-r--r--net/netfilter/nft_dynset.c22
-rw-r--r--net/netfilter/nft_exthdr.c2
-rw-r--r--net/netfilter/nft_fib.c3
-rw-r--r--net/netfilter/nft_flow_offload.c6
-rw-r--r--net/netfilter/nft_fwd_netdev.c9
-rw-r--r--net/netfilter/nft_hash.c2
-rw-r--r--net/netfilter/nft_immediate.c3
-rw-r--r--net/netfilter/nft_lookup.c5
-rw-r--r--net/netfilter/nft_masq.c7
-rw-r--r--net/netfilter/nft_meta.c8
-rw-r--r--net/netfilter/nft_nat.c11
-rw-r--r--net/netfilter/nft_objref.c2
-rw-r--r--net/netfilter/nft_osf.c3
-rw-r--r--net/netfilter/nft_payload.c2
-rw-r--r--net/netfilter/nft_queue.c5
-rw-r--r--net/netfilter/nft_range.c2
-rw-r--r--net/netfilter/nft_redir.c7
-rw-r--r--net/netfilter/nft_reject.c3
-rw-r--r--net/netfilter/nft_reject_inet.c3
-rw-r--r--net/netfilter/nft_reject_netdev.c3
-rw-r--r--net/netfilter/nft_rt.c3
-rw-r--r--net/netfilter/nft_socket.c51
-rw-r--r--net/netfilter/nft_synproxy.c3
-rw-r--r--net/netfilter/nft_tproxy.c7
-rw-r--r--net/netfilter/nft_xfrm.c3
-rw-r--r--net/netfilter/xt_connlimit.c15
-rw-r--r--net/netlink/af_netlink.h1
-rw-r--r--net/netrom/nr_route.c4
-rw-r--r--net/openvswitch/actions.c8
-rw-r--r--net/openvswitch/conntrack.c35
-rw-r--r--net/openvswitch/datapath.h3
-rw-r--r--net/openvswitch/flow_netlink.c2
-rw-r--r--net/openvswitch/vport-internal_dev.c11
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/rds/Kconfig9
-rw-r--r--net/rds/Makefile5
-rw-r--r--net/rds/ib.h4
-rw-r--r--net/rfkill/core.c8
-rw-r--r--net/rfkill/rfkill-gpio.c18
-rw-r--r--net/rxrpc/ar-internal.h2
-rw-r--r--net/sched/act_ct.c4
-rw-r--r--net/sched/act_vlan.c1
-rw-r--r--net/sched/sch_cake.c53
-rw-r--r--net/sched/sch_taprio.c4
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/smc/af_smc.c8
-rw-r--r--net/smc/smc_clc.h4
-rw-r--r--net/smc/smc_core.c72
-rw-r--r--net/smc/smc_core.h2
-rw-r--r--net/smc/smc_loopback.h1
-rw-r--r--net/smc/smc_pnet.c3
-rw-r--r--net/smc/smc_stats.c6
-rw-r--r--net/smc/smc_stats.h28
-rw-r--r--net/smc/smc_sysctl.c11
-rw-r--r--net/socket.c24
-rw-r--r--net/sunrpc/cache.c10
-rw-r--r--net/sunrpc/clnt.c13
-rw-r--r--net/sunrpc/sunrpc.h4
-rw-r--r--net/sunrpc/svc.c198
-rw-r--r--net/sunrpc/svc_xprt.c11
-rw-r--r--net/sunrpc/svcauth.c29
-rw-r--r--net/sunrpc/svcauth_unix.c3
-rw-r--r--net/sunrpc/svcsock.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c18
-rw-r--r--net/tipc/bcast.c2
-rw-r--r--net/tipc/bearer.c10
-rw-r--r--net/tipc/monitor.c2
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/tls/tls_sw.c2
-rw-r--r--net/unix/af_unix.c80
-rw-r--r--net/unix/garbage.c16
-rw-r--r--net/vmw_vsock/af_vsock.c58
-rw-r--r--net/vmw_vsock/virtio_transport.c148
-rw-r--r--net/vmw_vsock/virtio_transport_common.c35
-rw-r--r--net/vmw_vsock/vsock_loopback.c6
-rw-r--r--net/wireless/core.c10
-rw-r--r--net/wireless/core.h8
-rw-r--r--net/wireless/ibss.c2
-rw-r--r--net/wireless/lib80211.c10
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c2
-rw-r--r--net/wireless/lib80211_crypt_tkip.c2
-rw-r--r--net/wireless/lib80211_crypt_wep.c2
-rw-r--r--net/wireless/mesh.c2
-rw-r--r--net/wireless/mlme.c20
-rw-r--r--net/wireless/nl80211.c77
-rw-r--r--net/wireless/rdev-ops.h13
-rw-r--r--net/wireless/reg.c19
-rw-r--r--net/wireless/scan.c45
-rw-r--r--net/wireless/sme.c3
-rw-r--r--net/wireless/trace.h40
-rw-r--r--net/wireless/util.c14
-rw-r--r--net/xdp/xsk.c23
-rw-r--r--net/xdp/xsk_buff_pool.c40
-rw-r--r--net/xdp/xsk_queue.h5
-rw-r--r--net/xfrm/xfrm_device.c6
-rw-r--r--net/xfrm/xfrm_interface_core.c2
-rw-r--r--net/xfrm/xfrm_policy.c225
-rw-r--r--rust/Makefile56
-rw-r--r--rust/bindings/bindings_helper.h2
-rw-r--r--rust/exports.c1
-rw-r--r--rust/helpers.c239
-rw-r--r--rust/helpers/blk.c14
-rw-r--r--rust/helpers/bug.c8
-rw-r--r--rust/helpers/build_assert.c25
-rw-r--r--rust/helpers/build_bug.c9
-rw-r--r--rust/helpers/err.c19
-rw-r--r--rust/helpers/helpers.c26
-rw-r--r--rust/helpers/kunit.c9
-rw-r--r--rust/helpers/mutex.c9
-rw-r--r--rust/helpers/page.c19
-rw-r--r--rust/helpers/rbtree.c9
-rw-r--r--rust/helpers/refcount.c19
-rw-r--r--rust/helpers/signal.c9
-rw-r--r--rust/helpers/slab.c9
-rw-r--r--rust/helpers/spinlock.c24
-rw-r--r--rust/helpers/task.c19
-rw-r--r--rust/helpers/uaccess.c15
-rw-r--r--rust/helpers/wait.c9
-rw-r--r--rust/helpers/workqueue.c15
-rw-r--r--rust/kernel/alloc/box_ext.rs33
-rw-r--r--rust/kernel/error.rs5
-rw-r--r--rust/kernel/init.rs191
-rw-r--r--rust/kernel/init/__internal.rs29
-rw-r--r--rust/kernel/lib.rs3
-rw-r--r--rust/kernel/list.rs686
-rw-r--r--rust/kernel/list/arc.rs521
-rw-r--r--rust/kernel/list/arc_field.rs96
-rw-r--r--rust/kernel/list/impl_list_item_mod.rs274
-rw-r--r--rust/kernel/net/phy.rs90
-rw-r--r--rust/kernel/net/phy/reg.rs224
-rw-r--r--rust/kernel/prelude.rs2
-rw-r--r--rust/kernel/print.rs20
-rw-r--r--rust/kernel/rbtree.rs1278
-rw-r--r--rust/kernel/sizes.rs26
-rw-r--r--rust/kernel/std_vendor.rs2
-rw-r--r--rust/kernel/sync/arc.rs25
-rw-r--r--rust/kernel/types.rs63
-rw-r--r--rust/macros/lib.rs4
-rw-r--r--rust/macros/module.rs12
-rw-r--r--rust/uapi/uapi_helper.h1
-rw-r--r--samples/bpf/Makefile9
-rw-r--r--samples/bpf/tracex2.bpf.c99
-rw-r--r--samples/bpf/tracex2_user.c187
-rw-r--r--samples/bpf/tracex4.bpf.c4
-rw-r--r--samples/kmemleak/kmemleak-test.c2
-rw-r--r--samples/landlock/sandboxer.c73
-rw-r--r--scripts/Kconfig.include8
-rw-r--r--scripts/Makefile1
-rw-r--r--scripts/Makefile.build67
-rw-r--r--scripts/Makefile.compiler15
-rw-r--r--scripts/Makefile.dtbs142
-rw-r--r--scripts/Makefile.host5
-rw-r--r--scripts/Makefile.kasan84
-rw-r--r--scripts/Makefile.lib135
-rw-r--r--scripts/Makefile.modfinal9
-rw-r--r--scripts/Makefile.modinst8
-rw-r--r--scripts/Makefile.package3
-rw-r--r--scripts/Makefile.vmlinux18
-rw-r--r--scripts/Makefile.vmlinux_o3
-rw-r--r--scripts/basic/fixdep.c15
-rwxr-xr-xscripts/checktransupdate.py214
-rw-r--r--scripts/coccinelle/api/string_choices.cocci46
-rwxr-xr-xscripts/decode_stacktrace.sh51
-rw-r--r--scripts/dtc/checks.c16
-rwxr-xr-xscripts/dtc/dt-extract-compatibles13
-rw-r--r--scripts/dtc/fdtoverlay.c2
-rw-r--r--scripts/dtc/version_gen.h2
-rw-r--r--scripts/gdb/linux/kasan.py44
-rw-r--r--scripts/gdb/linux/proc.py4
-rw-r--r--scripts/gdb/linux/rbtree.py12
-rw-r--r--scripts/gdb/linux/stackdepot.py27
-rw-r--r--scripts/gdb/linux/timerlist.py31
-rw-r--r--scripts/gdb/vmlinux-gdb.py1
-rwxr-xr-xscripts/generate_builtin_ranges.awk508
-rw-r--r--scripts/generate_rust_target.rs98
-rwxr-xr-xscripts/get_maintainer.pl17
-rw-r--r--scripts/include/hash.h28
-rw-r--r--scripts/include/hashtable.h50
-rw-r--r--scripts/include/list.h69
-rw-r--r--scripts/include/xalloc.h53
-rw-r--r--scripts/ipe/Makefile2
-rw-r--r--scripts/ipe/polgen/.gitignore2
-rw-r--r--scripts/ipe/polgen/Makefile5
-rw-r--r--scripts/ipe/polgen/polgen.c145
-rw-r--r--scripts/kallsyms.c46
-rw-r--r--scripts/kconfig/confdata.c3
-rw-r--r--scripts/kconfig/expr.c482
-rw-r--r--scripts/kconfig/expr.h27
-rw-r--r--scripts/kconfig/internal.h6
-rw-r--r--scripts/kconfig/lexer.l1
-rw-r--r--scripts/kconfig/lkc.h6
-rw-r--r--scripts/kconfig/mconf.c1
-rw-r--r--scripts/kconfig/menu.c38
-rw-r--r--scripts/kconfig/nconf.c1
-rw-r--r--scripts/kconfig/nconf.gui.c1
-rw-r--r--scripts/kconfig/parser.y13
-rw-r--r--scripts/kconfig/preprocess.c1
-rw-r--r--scripts/kconfig/qconf.cc2
-rw-r--r--scripts/kconfig/symbol.c9
-rw-r--r--scripts/kconfig/util.c63
-rwxr-xr-xscripts/link-vmlinux.sh16
-rwxr-xr-xscripts/macro_checker.py131
-rw-r--r--scripts/mod/mk_elfconfig.c25
-rw-r--r--scripts/mod/modpost.c125
-rw-r--r--scripts/mod/modpost.h28
-rw-r--r--scripts/mod/sumversion.c6
-rw-r--r--scripts/mod/symsearch.c6
-rw-r--r--scripts/module-common.c25
-rw-r--r--scripts/package/PKGBUILD52
-rwxr-xr-xscripts/package/install-extmod-build55
-rwxr-xr-xscripts/remove-stale-files2
-rwxr-xr-xscripts/rustc-version.sh26
-rw-r--r--scripts/sign-file.c132
-rwxr-xr-xscripts/sphinx-pre-install2
-rw-r--r--scripts/ssl-common.h32
-rw-r--r--scripts/subarch.include2
-rwxr-xr-xscripts/verify_builtin_ranges.awk370
-rwxr-xr-xscripts/xz_wrap.sh158
-rw-r--r--security/Kconfig43
-rw-r--r--security/Makefile1
-rw-r--r--security/apparmor/include/net.h3
-rw-r--r--security/apparmor/lsm.c17
-rw-r--r--security/apparmor/net.c2
-rw-r--r--security/bpf/hooks.c1
-rw-r--r--security/commoncap.c11
-rw-r--r--security/inode.c27
-rw-r--r--security/integrity/evm/evm_main.c2
-rw-r--r--security/integrity/ima/ima.h2
-rw-r--r--security/integrity/ima/ima_iint.c20
-rw-r--r--security/integrity/ima/ima_main.c6
-rw-r--r--security/ipe/.gitignore2
-rw-r--r--security/ipe/Kconfig97
-rw-r--r--security/ipe/Makefile31
-rw-r--r--security/ipe/audit.c292
-rw-r--r--security/ipe/audit.h19
-rw-r--r--security/ipe/digest.c118
-rw-r--r--security/ipe/digest.h26
-rw-r--r--security/ipe/eval.c393
-rw-r--r--security/ipe/eval.h70
-rw-r--r--security/ipe/fs.c247
-rw-r--r--security/ipe/fs.h16
-rw-r--r--security/ipe/hooks.c314
-rw-r--r--security/ipe/hooks.h52
-rw-r--r--security/ipe/ipe.c98
-rw-r--r--security/ipe/ipe.h26
-rw-r--r--security/ipe/policy.c227
-rw-r--r--security/ipe/policy.h98
-rw-r--r--security/ipe/policy_fs.c472
-rw-r--r--security/ipe/policy_parser.c559
-rw-r--r--security/ipe/policy_parser.h11
-rw-r--r--security/ipe/policy_tests.c297
-rw-r--r--security/landlock/cred.h2
-rw-r--r--security/landlock/fs.c34
-rw-r--r--security/landlock/fs.h7
-rw-r--r--security/landlock/limits.h3
-rw-r--r--security/landlock/ruleset.c7
-rw-r--r--security/landlock/ruleset.h24
-rw-r--r--security/landlock/syscalls.c39
-rw-r--r--security/landlock/task.c193
-rw-r--r--security/loadpin/loadpin.c4
-rw-r--r--security/lockdown/lockdown.c2
-rw-r--r--security/security.c615
-rw-r--r--security/selinux/avc.c20
-rw-r--r--security/selinux/hooks.c189
-rw-r--r--security/selinux/include/audit.h46
-rw-r--r--security/selinux/include/objsec.h28
-rw-r--r--security/selinux/netlabel.c43
-rw-r--r--security/selinux/ss/avtab.c7
-rw-r--r--security/selinux/ss/ebitmap.c4
-rw-r--r--security/selinux/ss/hashtab.c4
-rw-r--r--security/selinux/ss/services.c36
-rw-r--r--security/smack/smack.h12
-rw-r--r--security/smack/smack_lsm.c122
-rw-r--r--security/smack/smack_netfilter.c8
-rw-r--r--security/smack/smackfs.c2
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c4
-rw-r--r--sound/core/Kconfig19
-rw-r--r--sound/core/compress_offload.c2
-rw-r--r--sound/core/control.c178
-rw-r--r--sound/core/control_compat.c47
-rw-r--r--sound/core/control_led.c4
-rw-r--r--sound/core/init.c2
-rw-r--r--sound/core/memalloc.c214
-rw-r--r--sound/core/memalloc_local.h16
-rw-r--r--sound/core/misc.c71
-rw-r--r--sound/core/oss/mixer_oss.c10
-rw-r--r--sound/core/oss/pcm_plugin.h5
-rw-r--r--sound/core/pcm.c6
-rw-r--r--sound/core/pcm_lib.c3
-rw-r--r--sound/core/pcm_memory.c68
-rw-r--r--sound/core/pcm_misc.c18
-rw-r--r--sound/core/pcm_native.c24
-rw-r--r--sound/core/pcm_timer.c3
-rw-r--r--sound/core/seq/oss/seq_oss_device.h4
-rw-r--r--sound/core/seq/seq_clientmgr.c26
-rw-r--r--sound/core/seq/seq_ports.c5
-rw-r--r--sound/core/seq/seq_ports.h2
-rw-r--r--sound/core/seq/seq_queue.h1
-rw-r--r--sound/core/seq/seq_timer.h2
-rw-r--r--sound/core/seq/seq_ump_client.c79
-rw-r--r--sound/core/seq/seq_ump_convert.c11
-rw-r--r--sound/core/timer.c229
-rw-r--r--sound/core/ump.c122
-rw-r--r--sound/drivers/aloop.c7
-rw-r--r--sound/drivers/dummy.c5
-rw-r--r--sound/drivers/mpu401/mpu401.c16
-rw-r--r--sound/drivers/mpu401/mpu401_uart.c31
-rw-r--r--sound/drivers/mtpav.c14
-rw-r--r--sound/drivers/mts64.c18
-rw-r--r--sound/drivers/opl3/opl3_lib.c18
-rw-r--r--sound/drivers/opl3/opl3_midi.c95
-rw-r--r--sound/drivers/opl3/opl3_oss.c12
-rw-r--r--sound/drivers/opl3/opl3_synth.c4
-rw-r--r--sound/drivers/opl4/opl4_lib.c8
-rw-r--r--sound/drivers/opl4/yrw801.c2
-rw-r--r--sound/drivers/pcsp/pcsp.c21
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c38
-rw-r--r--sound/drivers/pcsp/pcsp_mixer.c2
-rw-r--r--sound/drivers/portman2x4.c19
-rw-r--r--sound/drivers/serial-u16550.c41
-rw-r--r--sound/drivers/virmidi.c8
-rw-r--r--sound/drivers/vx/vx_core.c64
-rw-r--r--sound/drivers/vx/vx_hwdep.c4
-rw-r--r--sound/drivers/vx/vx_pcm.c23
-rw-r--r--sound/drivers/vx/vx_uer.c3
-rw-r--r--sound/firewire/amdtp-stream.c34
-rw-r--r--sound/firewire/bebob/bebob_pcm.c1
-rw-r--r--sound/firewire/dice/dice-pcm.c1
-rw-r--r--sound/firewire/digi00x/digi00x-pcm.c1
-rw-r--r--sound/firewire/fireface/ff-pcm.c1
-rw-r--r--sound/firewire/fireworks/fireworks_pcm.c1
-rw-r--r--sound/firewire/isight.c1
-rw-r--r--sound/firewire/motu/motu-pcm.c1
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c1
-rw-r--r--sound/firewire/tascam/tascam-pcm.c1
-rw-r--r--sound/hda/Kconfig5
-rw-r--r--sound/hda/intel-dsp-config.c57
-rw-r--r--sound/hda/intel-sdw-acpi.c5
-rw-r--r--sound/i2c/cs8427.c37
-rw-r--r--sound/i2c/other/ak4113.c2
-rw-r--r--sound/i2c/other/ak4114.c12
-rw-r--r--sound/i2c/other/ak4117.c13
-rw-r--r--sound/i2c/other/ak4xxx-adda.c2
-rw-r--r--sound/i2c/other/pt2258.c6
-rw-r--r--sound/i2c/tea6330t.c3
-rw-r--r--sound/isa/ad1816a/ad1816a.c16
-rw-r--r--sound/isa/ad1816a/ad1816a_lib.c16
-rw-r--r--sound/isa/als100.c16
-rw-r--r--sound/isa/azt2320.c14
-rw-r--r--sound/isa/cmi8328.c42
-rw-r--r--sound/isa/cmi8330.c36
-rw-r--r--sound/isa/cs423x/cs4236.c31
-rw-r--r--sound/isa/cs423x/cs4236_lib.c56
-rw-r--r--sound/isa/es1688/es1688.c2
-rw-r--r--sound/isa/es1688/es1688_lib.c55
-rw-r--r--sound/isa/es18xx.c87
-rw-r--r--sound/isa/gus/gus_dma.c39
-rw-r--r--sound/isa/gus/gus_io.c215
-rw-r--r--sound/isa/gus/gus_irq.c7
-rw-r--r--sound/isa/gus/gus_main.c29
-rw-r--r--sound/isa/gus/gus_mem.c2
-rw-r--r--sound/isa/gus/gus_pcm.c33
-rw-r--r--sound/isa/gus/gus_reset.c8
-rw-r--r--sound/isa/gus/gus_uart.c21
-rw-r--r--sound/isa/gus/gus_volume.c7
-rw-r--r--sound/isa/gus/gusclassic.c4
-rw-r--r--sound/isa/gus/gusextreme.c4
-rw-r--r--sound/isa/gus/gusmax.c16
-rw-r--r--sound/isa/gus/interwave.c61
-rw-r--r--sound/isa/msnd/msnd.c46
-rw-r--r--sound/isa/msnd/msnd_midi.c4
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c184
-rw-r--r--sound/isa/opl3sa2.c46
-rw-r--r--sound/isa/opti9xx/miro.c163
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c62
-rw-r--r--sound/isa/sb/emu8000.c11
-rw-r--r--sound/isa/sb/emu8000_patch.c1
-rw-r--r--sound/isa/sb/emu8000_synth.c2
-rw-r--r--sound/isa/sb/jazz16.c49
-rw-r--r--sound/isa/sb/sb16.c42
-rw-r--r--sound/isa/sb/sb16_csp.c38
-rw-r--r--sound/isa/sb/sb16_main.c13
-rw-r--r--sound/isa/sb/sb8.c12
-rw-r--r--sound/isa/sb/sb_common.c27
-rw-r--r--sound/isa/sb/sb_mixer.c4
-rw-r--r--sound/isa/sc6000.c177
-rw-r--r--sound/isa/sscape.c96
-rw-r--r--sound/isa/wavefront/wavefront.c61
-rw-r--r--sound/isa/wavefront/wavefront_fx.c36
-rw-r--r--sound/isa/wavefront/wavefront_midi.c15
-rw-r--r--sound/isa/wavefront/wavefront_synth.c196
-rw-r--r--sound/isa/wss/wss_lib.c178
-rw-r--r--sound/pci/ali5451/ali5451.c2
-rw-r--r--sound/pci/asihpi/asihpi.c101
-rw-r--r--sound/pci/asihpi/hpimsgx.c2
-rw-r--r--sound/pci/asihpi/hpioctl.c2
-rw-r--r--sound/pci/azt3328.c2
-rw-r--r--sound/pci/cmipci.c32
-rw-r--r--sound/pci/emu10k1/emu10k1_patch.c5
-rw-r--r--sound/pci/emu10k1/emupcm.c31
-rw-r--r--sound/pci/hda/cs35l41_hda_spi.c1
-rw-r--r--sound/pci/hda/hda_component.c19
-rw-r--r--sound/pci/hda/hda_component.h2
-rw-r--r--sound/pci/hda/hda_intel.c23
-rw-r--r--sound/pci/hda/hda_intel.h1
-rw-r--r--sound/pci/hda/patch_cmedia.c269
-rw-r--r--sound/pci/hda/patch_realtek.c365
-rw-r--r--sound/pci/hda/samsung_helper.c310
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c2
-rw-r--r--sound/pci/intel8x0.c3
-rw-r--r--sound/pci/intel8x0m.c3
-rw-r--r--sound/pci/korg1212/korg1212.c30
-rw-r--r--sound/pci/lx6464es/lx_core.c8
-rw-r--r--sound/pci/lx6464es/lx_core.h3
-rw-r--r--sound/pci/pcxhr/pcxhr_mix22.c2
-rw-r--r--sound/pci/riptide/riptide.c193
-rw-r--r--sound/pci/rme9652/hdsp.c24
-rw-r--r--sound/pci/rme9652/hdspm.c36
-rw-r--r--sound/pci/trident/trident.h5
-rw-r--r--sound/pci/trident/trident_memory.c10
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c21
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_core.c36
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_irq.c3
-rw-r--r--sound/pcmcia/vx/vxp_ops.c10
-rw-r--r--sound/pcmcia/vx/vxpocket.c27
-rw-r--r--sound/ppc/awacs.c4
-rw-r--r--sound/ppc/daca.c2
-rw-r--r--sound/ppc/keywest.c5
-rw-r--r--sound/ppc/pmac.c52
-rw-r--r--sound/ppc/powermac.c2
-rw-r--r--sound/ppc/tumbler.c21
-rw-r--r--sound/sh/aica.c7
-rw-r--r--sound/sh/sh_dac_audio.c8
-rw-r--r--sound/soc/Kconfig2
-rw-r--r--sound/soc/Makefile1
-rw-r--r--sound/soc/adi/axi-i2s.c6
-rw-r--r--sound/soc/adi/axi-spdif.c2
-rw-r--r--sound/soc/amd/acp-config.c12
-rw-r--r--sound/soc/amd/acp-pcm-dma.c2
-rw-r--r--sound/soc/amd/acp/Kconfig22
-rw-r--r--sound/soc/amd/acp/Makefile4
-rw-r--r--sound/soc/amd/acp/acp-i2s.c188
-rw-r--r--sound/soc/amd/acp/acp-legacy-common.c40
-rw-r--r--sound/soc/amd/acp/acp-legacy-mach.c7
-rw-r--r--sound/soc/amd/acp/acp-mach-common.c2
-rw-r--r--sound/soc/amd/acp/acp-mach.h1
-rw-r--r--sound/soc/amd/acp/acp-pci.c4
-rw-r--r--sound/soc/amd/acp/acp-pdm.c7
-rw-r--r--sound/soc/amd/acp/acp-platform.c117
-rw-r--r--sound/soc/amd/acp/acp-rembrandt.c2
-rw-r--r--sound/soc/amd/acp/acp-renoir.c2
-rw-r--r--sound/soc/amd/acp/acp-sdw-sof-mach.c509
-rw-r--r--sound/soc/amd/acp/acp-sof-mach.c7
-rw-r--r--sound/soc/amd/acp/acp63.c2
-rw-r--r--sound/soc/amd/acp/acp70.c87
-rw-r--r--sound/soc/amd/acp/amd-acp63-acpi-match.c90
-rw-r--r--sound/soc/amd/acp/amd.h27
-rw-r--r--sound/soc/amd/acp/chip_offset_byte.h84
-rw-r--r--sound/soc/amd/acp/soc_amd_sdw_common.h44
-rw-r--r--sound/soc/amd/mach-config.h2
-rw-r--r--sound/soc/amd/ps/ps-pdm-dma.c2
-rw-r--r--sound/soc/amd/ps/ps-sdw-dma.c2
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c2
-rw-r--r--sound/soc/amd/renoir/acp3x-pdm-dma.c2
-rw-r--r--sound/soc/amd/vangogh/acp5x-pcm-dma.c2
-rw-r--r--sound/soc/amd/yc/acp6x-pdm-dma.c2
-rw-r--r--sound/soc/apple/mca.c2
-rw-r--r--sound/soc/atmel/atmel-i2s.c2
-rw-r--r--sound/soc/atmel/atmel_wm8904.c2
-rw-r--r--sound/soc/atmel/mchp-i2s-mcc.c44
-rw-r--r--sound/soc/atmel/mchp-pdmc.c99
-rw-r--r--sound/soc/atmel/mchp-spdifrx.c2
-rw-r--r--sound/soc/atmel/mchp-spdiftx.c2
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c2
-rw-r--r--sound/soc/atmel/sam9x5_wm8731.c2
-rw-r--r--sound/soc/atmel/tse850-pcm5142.c2
-rw-r--r--sound/soc/au1x/ac97c.c2
-rw-r--r--sound/soc/au1x/i2sc.c2
-rw-r--r--sound/soc/au1x/psc-ac97.c2
-rw-r--r--sound/soc/au1x/psc-i2s.c2
-rw-r--r--sound/soc/bcm/bcm63xx-i2s-whistler.c2
-rw-r--r--sound/soc/bcm/cygnus-ssp.c2
-rw-r--r--sound/soc/cirrus/edb93xx.c2
-rw-r--r--sound/soc/cirrus/ep93xx-i2s.c2
-rw-r--r--sound/soc/codecs/Kconfig7
-rw-r--r--sound/soc/codecs/Makefile2
-rw-r--r--sound/soc/codecs/ak4613.c4
-rw-r--r--sound/soc/codecs/cs-amp-lib-test.c44
-rw-r--r--sound/soc/codecs/cs35l34.c21
-rw-r--r--sound/soc/codecs/cs35l36.c34
-rw-r--r--sound/soc/codecs/cs35l41.c34
-rw-r--r--sound/soc/codecs/cs35l45.c2
-rw-r--r--sound/soc/codecs/cs35l56-shared.c25
-rw-r--r--sound/soc/codecs/cs42l42-sdw.c12
-rw-r--r--sound/soc/codecs/cs42l43.c2
-rw-r--r--sound/soc/codecs/cs43130.c113
-rw-r--r--sound/soc/codecs/cs47l15.c2
-rw-r--r--sound/soc/codecs/cs47l24.c2
-rw-r--r--sound/soc/codecs/cs47l35.c2
-rw-r--r--sound/soc/codecs/cs47l85.c2
-rw-r--r--sound/soc/codecs/cs47l90.c2
-rw-r--r--sound/soc/codecs/cs47l92.c2
-rw-r--r--sound/soc/codecs/cs53l30.c24
-rw-r--r--sound/soc/codecs/es8326.c6
-rw-r--r--sound/soc/codecs/inno_rk3036.c2
-rw-r--r--sound/soc/codecs/lpass-rx-macro.c2
-rw-r--r--sound/soc/codecs/lpass-tx-macro.c2
-rw-r--r--sound/soc/codecs/lpass-va-macro.c2
-rw-r--r--sound/soc/codecs/lpass-wsa-macro.c27
-rw-r--r--sound/soc/codecs/msm8916-wcd-digital.c2
-rw-r--r--sound/soc/codecs/mt6357.c1855
-rw-r--r--sound/soc/codecs/mt6357.h660
-rw-r--r--sound/soc/codecs/peb2466.c17
-rw-r--r--sound/soc/codecs/rk817_codec.c2
-rw-r--r--sound/soc/codecs/rt1318.c3
-rw-r--r--sound/soc/codecs/rt1320-sdw.c2147
-rw-r--r--sound/soc/codecs/rt1320-sdw.h3
-rw-r--r--sound/soc/codecs/rt5682.c4
-rw-r--r--sound/soc/codecs/rt5682s.c4
-rw-r--r--sound/soc/codecs/spdif_receiver.c3
-rw-r--r--sound/soc/codecs/spdif_transmitter.c3
-rw-r--r--sound/soc/codecs/sti-sas.c21
-rw-r--r--sound/soc/codecs/tas2552.c1
-rw-r--r--sound/soc/codecs/tas2764.c1
-rw-r--r--sound/soc/codecs/tas2770.c1
-rw-r--r--sound/soc/codecs/tas2780.c1
-rw-r--r--sound/soc/codecs/tas2781-comlib.c35
-rw-r--r--sound/soc/codecs/tas2781-fmwlib.c61
-rw-r--r--sound/soc/codecs/tas2781-i2c.c955
-rw-r--r--sound/soc/codecs/tas5086.c9
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c102
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c9
-rw-r--r--sound/soc/codecs/wcd934x.c4
-rw-r--r--sound/soc/codecs/wcd937x.c2
-rw-r--r--sound/soc/codecs/wcd937x.h34
-rw-r--r--sound/soc/codecs/wcd938x.c20
-rw-r--r--sound/soc/codecs/wcd938x.h4
-rw-r--r--sound/soc/codecs/wcd939x.c2
-rw-r--r--sound/soc/codecs/wcd939x.h6
-rw-r--r--sound/soc/codecs/wm5102.c2
-rw-r--r--sound/soc/codecs/wm5110.c2
-rw-r--r--sound/soc/codecs/wm8994.c2
-rw-r--r--sound/soc/codecs/wm8997.c2
-rw-r--r--sound/soc/codecs/wm8998.c2
-rw-r--r--sound/soc/codecs/wsa881x.c44
-rw-r--r--sound/soc/codecs/wsa883x.c75
-rw-r--r--sound/soc/codecs/wsa884x.c240
-rw-r--r--sound/soc/dwc/dwc-i2s.c18
-rw-r--r--sound/soc/fsl/fsl_asrc.c2
-rw-r--r--sound/soc/fsl/fsl_aud2htx.c2
-rw-r--r--sound/soc/fsl/fsl_audmix.c14
-rw-r--r--sound/soc/fsl/fsl_dma.c2
-rw-r--r--sound/soc/fsl/fsl_easrc.c2
-rw-r--r--sound/soc/fsl/fsl_esai.c2
-rw-r--r--sound/soc/fsl/fsl_micfil.c2
-rw-r--r--sound/soc/fsl/fsl_mqs.c13
-rw-r--r--sound/soc/fsl/fsl_rpmsg.c11
-rw-r--r--sound/soc/fsl/fsl_sai.c2
-rw-r--r--sound/soc/fsl/fsl_spdif.c13
-rw-r--r--sound/soc/fsl/fsl_ssi.c8
-rw-r--r--sound/soc/fsl/fsl_xcvr.c4
-rw-r--r--sound/soc/fsl/imx-audmux.c8
-rw-r--r--sound/soc/fsl/imx-card.c3
-rw-r--r--sound/soc/fsl/imx-pcm-rpmsg.c16
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c2
-rw-r--r--sound/soc/fsl/lpc3xxx-i2s.c11
-rw-r--r--sound/soc/fsl/lpc3xxx-pcm.c2
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.c2
-rw-r--r--sound/soc/fsl/mpc5200_psc_i2s.c4
-rw-r--r--sound/soc/fsl/p1022_ds.c2
-rw-r--r--sound/soc/fsl/p1022_rdk.c2
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c2
-rw-r--r--sound/soc/generic/audio-graph-card.c16
-rw-r--r--sound/soc/generic/audio-graph-card2-custom-sample.c2
-rw-r--r--sound/soc/generic/audio-graph-card2.c15
-rw-r--r--sound/soc/generic/simple-card.c4
-rw-r--r--sound/soc/generic/test-component.c11
-rw-r--r--sound/soc/img/img-i2s-in.c2
-rw-r--r--sound/soc/img/img-i2s-out.c2
-rw-r--r--sound/soc/img/img-parallel-out.c2
-rw-r--r--sound/soc/img/img-spdif-in.c2
-rw-r--r--sound/soc/img/img-spdif-out.c2
-rw-r--r--sound/soc/img/pistachio-internal-dac.c2
-rw-r--r--sound/soc/intel/Kconfig120
-rw-r--r--sound/soc/intel/Makefile1
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c2
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c2
-rw-r--r--sound/soc/intel/avs/core.c8
-rw-r--r--sound/soc/intel/avs/pcm.c22
-rw-r--r--sound/soc/intel/boards/Kconfig156
-rw-r--r--sound/soc/intel/boards/Makefile31
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c720
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c670
-rw-r--r--sound/soc/intel/boards/bytcht_cx2072x.c4
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c4
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c4
-rw-r--r--sound/soc/intel/boards/bytcr_wm5102.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c4
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c4
-rw-r--r--sound/soc/intel/boards/ehl_rt5660.c3
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98357a.c688
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98927.c1175
-rw-r--r--sound/soc/intel/boards/kbl_rt5660.c567
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c1073
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c869
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.c168
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.h67
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c242
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c704
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_ssm4567.c751
-rw-r--r--sound/soc/intel/boards/skl_rt286.c568
-rw-r--r--sound/soc/intel/boards/sof_board_helpers.c152
-rw-r--r--sound/soc/intel/boards/sof_board_helpers.h3
-rw-r--r--sound/soc/intel/boards/sof_es8336.c14
-rw-r--r--sound/soc/intel/boards/sof_pcm512x.c5
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c7
-rw-r--r--sound/soc/intel/boards/sof_sdw.c1384
-rw-r--r--sound/soc/intel/boards/sof_sdw_common.h172
-rw-r--r--sound/soc/intel/boards/sof_sdw_hdmi.c16
-rw-r--r--sound/soc/intel/boards/sof_ssp_amp.c6
-rw-r--r--sound/soc/intel/boards/sof_wm8804.c6
-rw-r--r--sound/soc/intel/catpt/device.c2
-rw-r--r--sound/soc/intel/common/Makefile1
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-adl-match.c105
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-arl-match.c263
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cnl-match.c6
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-ehl-match.c1
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-hda-match.c18
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-icl-match.c6
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-kbl-match.c11
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-lnl-match.c105
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-mtl-match.c1
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-ptl-match.c145
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-skl-match.c5
-rw-r--r--sound/soc/intel/skylake/Makefile15
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c629
-rw-r--r--sound/soc/intel/skylake/cnl-sst-dsp.c266
-rw-r--r--sound/soc/intel/skylake/cnl-sst-dsp.h103
-rw-r--r--sound/soc/intel/skylake/cnl-sst.c508
-rw-r--r--sound/soc/intel/skylake/skl-debug.c248
-rw-r--r--sound/soc/intel/skylake/skl-i2s.h87
-rw-r--r--sound/soc/intel/skylake/skl-messages.c1419
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c269
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c1507
-rw-r--r--sound/soc/intel/skylake/skl-ssp-clk.c428
-rw-r--r--sound/soc/intel/skylake/skl-ssp-clk.h108
-rw-r--r--sound/soc/intel/skylake/skl-sst-cldma.c373
-rw-r--r--sound/soc/intel/skylake/skl-sst-cldma.h243
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.c462
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.h256
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c1071
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.h169
-rw-r--r--sound/soc/intel/skylake/skl-sst-utils.c425
-rw-r--r--sound/soc/intel/skylake/skl-sst.c599
-rw-r--r--sound/soc/intel/skylake/skl-topology.c3605
-rw-r--r--sound/soc/intel/skylake/skl-topology.h524
-rw-r--r--sound/soc/intel/skylake/skl.c1177
-rw-r--r--sound/soc/intel/skylake/skl.h207
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c2
-rw-r--r--sound/soc/loongson/loongson_card.c125
-rw-r--r--sound/soc/loongson/loongson_dma.c25
-rw-r--r--sound/soc/loongson/loongson_i2s.c115
-rw-r--r--sound/soc/loongson/loongson_i2s.h24
-rw-r--r--sound/soc/loongson/loongson_i2s_pci.c51
-rw-r--r--sound/soc/mediatek/Kconfig20
-rw-r--r--sound/soc/mediatek/Makefile1
-rw-r--r--sound/soc/mediatek/common/mtk-btcvsd.c2
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-cs42448.c16
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt7986/mt7986-afe-pcm.c9
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c4
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt8365/Makefile15
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-afe-clk.c421
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-afe-clk.h32
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-afe-common.h448
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-afe-pcm.c2274
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-dai-adda.c311
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-dai-dmic.c310
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-dai-i2s.c846
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-dai-pcm.c293
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-mt6357.c343
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-reg.h993
-rw-r--r--sound/soc/meson/aiu-fifo.h2
-rw-r--r--sound/soc/meson/aiu.c2
-rw-r--r--sound/soc/meson/axg-card.c16
-rw-r--r--sound/soc/meson/gx-card.c3
-rw-r--r--sound/soc/meson/meson-card-utils.c4
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c2
-rw-r--r--sound/soc/pxa/mmp-sspa.c2
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c2
-rw-r--r--sound/soc/qcom/common.c1
-rw-r--r--sound/soc/qcom/lpass-apq8016.c2
-rw-r--r--sound/soc/qcom/lpass-ipq806x.c2
-rw-r--r--sound/soc/qcom/lpass-sc7180.c2
-rw-r--r--sound/soc/qcom/lpass-sc7280.c2
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c31
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c2
-rw-r--r--sound/soc/qcom/sm8250.c8
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c2
-rw-r--r--sound/soc/rockchip/rockchip_i2s_tdm.c2
-rw-r--r--sound/soc/rockchip/rockchip_pdm.c2
-rw-r--r--sound/soc/rockchip/rockchip_rt5645.c2
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c2
-rw-r--r--sound/soc/samsung/arndale.c2
-rw-r--r--sound/soc/samsung/i2s.c2
-rw-r--r--sound/soc/samsung/odroid.c2
-rw-r--r--sound/soc/samsung/pcm.c2
-rw-r--r--sound/soc/samsung/snow.c2
-rw-r--r--sound/soc/samsung/spdif.c2
-rw-r--r--sound/soc/sdw_utils/Kconfig6
-rw-r--r--sound/soc/sdw_utils/Makefile11
-rw-r--r--sound/soc/sdw_utils/soc_sdw_bridge_cs35l56.c (renamed from sound/soc/intel/boards/bridge_cs35l56.c)56
-rw-r--r--sound/soc/sdw_utils/soc_sdw_cs42l42.c (renamed from sound/soc/intel/boards/sof_sdw_cs42l42.c)13
-rw-r--r--sound/soc/sdw_utils/soc_sdw_cs42l43.c (renamed from sound/soc/intel/boards/sof_sdw_cs42l43.c)38
-rw-r--r--sound/soc/sdw_utils/soc_sdw_cs_amp.c (renamed from sound/soc/intel/boards/sof_sdw_cs_amp.c)18
-rw-r--r--sound/soc/sdw_utils/soc_sdw_dmic.c (renamed from sound/soc/intel/boards/sof_sdw_dmic.c)10
-rw-r--r--sound/soc/sdw_utils/soc_sdw_maxim.c (renamed from sound/soc/intel/boards/sof_sdw_maxim.c)56
-rw-r--r--sound/soc/sdw_utils/soc_sdw_rt5682.c (renamed from sound/soc/intel/boards/sof_sdw_rt5682.c)12
-rw-r--r--sound/soc/sdw_utils/soc_sdw_rt700.c (renamed from sound/soc/intel/boards/sof_sdw_rt700.c)12
-rw-r--r--sound/soc/sdw_utils/soc_sdw_rt711.c (renamed from sound/soc/intel/boards/sof_sdw_rt711.c)38
-rw-r--r--sound/soc/sdw_utils/soc_sdw_rt712_sdca.c (renamed from sound/soc/intel/boards/sof_sdw_rt712_sdca.c)10
-rw-r--r--sound/soc/sdw_utils/soc_sdw_rt722_sdca.c (renamed from sound/soc/intel/boards/sof_sdw_rt722_sdca.c)10
-rw-r--r--sound/soc/sdw_utils/soc_sdw_rt_amp.c (renamed from sound/soc/intel/boards/sof_sdw_rt_amp.c)43
-rw-r--r--sound/soc/sdw_utils/soc_sdw_rt_amp_coeff_tables.h (renamed from sound/soc/intel/boards/sof_sdw_amp_coeff_tables.h)6
-rw-r--r--sound/soc/sdw_utils/soc_sdw_rt_dmic.c (renamed from sound/soc/intel/boards/sof_sdw_rt_dmic.c)11
-rw-r--r--sound/soc/sdw_utils/soc_sdw_rt_sdca_jack_common.c (renamed from sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c)40
-rw-r--r--sound/soc/sdw_utils/soc_sdw_utils.c1170
-rw-r--r--sound/soc/sh/Kconfig1
-rw-r--r--sound/soc/sh/fsi.c2
-rw-r--r--sound/soc/sh/hac.c2
-rw-r--r--sound/soc/sh/rcar/adg.c4
-rw-r--r--sound/soc/sh/rcar/core.c19
-rw-r--r--sound/soc/sh/rcar/dma.c75
-rw-r--r--sound/soc/sh/rcar/rsnd.h10
-rw-r--r--sound/soc/sh/rcar/ssi.c2
-rw-r--r--sound/soc/sh/rz-ssi.c259
-rw-r--r--sound/soc/sh/siu_dai.c2
-rw-r--r--sound/soc/soc-ac97.c4
-rw-r--r--sound/soc/soc-card-test.c57
-rw-r--r--sound/soc/soc-card.c10
-rw-r--r--sound/soc/soc-component.c12
-rw-r--r--sound/soc/soc-core.c21
-rw-r--r--sound/soc/soc-dai.c58
-rw-r--r--sound/soc/soc-dapm.c5
-rw-r--r--sound/soc/soc-pcm.c362
-rw-r--r--sound/soc/soc-topology-test.c132
-rw-r--r--sound/soc/soc-topology.c9
-rw-r--r--sound/soc/sof/amd/Kconfig11
-rw-r--r--sound/soc/sof/amd/Makefile4
-rw-r--r--sound/soc/sof/amd/acp-common.c3
-rw-r--r--sound/soc/sof/amd/acp-dsp-offset.h24
-rw-r--r--sound/soc/sof/amd/acp-loader.c2
-rw-r--r--sound/soc/sof/amd/acp.c88
-rw-r--r--sound/soc/sof/amd/acp.h11
-rw-r--r--sound/soc/sof/amd/acp70.c142
-rw-r--r--sound/soc/sof/amd/pci-acp63.c2
-rw-r--r--sound/soc/sof/amd/pci-acp70.c112
-rw-r--r--sound/soc/sof/amd/pci-rmb.c1
-rw-r--r--sound/soc/sof/amd/pci-rn.c1
-rw-r--r--sound/soc/sof/amd/pci-vangogh.c1
-rw-r--r--sound/soc/sof/imx/imx8.c2
-rw-r--r--sound/soc/sof/imx/imx8m.c2
-rw-r--r--sound/soc/sof/imx/imx8ulp.c2
-rw-r--r--sound/soc/sof/intel/Kconfig17
-rw-r--r--sound/soc/sof/intel/Makefile2
-rw-r--r--sound/soc/sof/intel/bdw.c2
-rw-r--r--sound/soc/sof/intel/byt.c2
-rw-r--r--sound/soc/sof/intel/hda-dsp.c1
-rw-r--r--sound/soc/sof/intel/hda-stream.c4
-rw-r--r--sound/soc/sof/intel/hda.c282
-rw-r--r--sound/soc/sof/intel/hda.h1
-rw-r--r--sound/soc/sof/intel/lnl.c27
-rw-r--r--sound/soc/sof/intel/mtl.c16
-rw-r--r--sound/soc/sof/intel/mtl.h2
-rw-r--r--sound/soc/sof/intel/pci-ptl.c77
-rw-r--r--sound/soc/sof/intel/shim.h1
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186.c2
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195.c2
-rw-r--r--sound/soc/sof/pcm.c2
-rw-r--r--sound/soc/sof/sof-audio.h8
-rw-r--r--sound/soc/sof/sof-priv.h16
-rw-r--r--sound/soc/sof/topology.c8
-rw-r--r--sound/soc/sprd/sprd-mcdt.c2
-rw-r--r--sound/soc/starfive/jh7110_pwmdac.c2
-rw-r--r--sound/soc/starfive/jh7110_tdm.c2
-rw-r--r--sound/soc/stm/stm32_adfsdm.c2
-rw-r--r--sound/soc/stm/stm32_i2s.c6
-rw-r--r--sound/soc/stm/stm32_sai_sub.c2
-rw-r--r--sound/soc/stm/stm32_spdifrx.c2
-rw-r--r--sound/soc/sunxi/sun4i-codec.c30
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c2
-rw-r--r--sound/soc/sunxi/sun4i-spdif.c2
-rw-r--r--sound/soc/sunxi/sun50i-dmic.c2
-rw-r--r--sound/soc/sunxi/sun8i-codec.c2
-rw-r--r--sound/soc/tegra/tegra186_asrc.c2
-rw-r--r--sound/soc/tegra/tegra186_dspk.c2
-rw-r--r--sound/soc/tegra/tegra20_ac97.c2
-rw-r--r--sound/soc/tegra/tegra20_i2s.c2
-rw-r--r--sound/soc/tegra/tegra210_admaif.c2
-rw-r--r--sound/soc/tegra/tegra210_adx.c2
-rw-r--r--sound/soc/tegra/tegra210_ahub.c2
-rw-r--r--sound/soc/tegra/tegra210_amx.c2
-rw-r--r--sound/soc/tegra/tegra210_dmic.c2
-rw-r--r--sound/soc/tegra/tegra210_i2s.c18
-rw-r--r--sound/soc/tegra/tegra210_mixer.c2
-rw-r--r--sound/soc/tegra/tegra210_mvc.c2
-rw-r--r--sound/soc/tegra/tegra210_ope.c2
-rw-r--r--sound/soc/tegra/tegra210_sfc.c2
-rw-r--r--sound/soc/tegra/tegra30_ahub.c2
-rw-r--r--sound/soc/tegra/tegra30_i2s.c2
-rw-r--r--sound/soc/tegra/tegra_audio_graph_card.c2
-rw-r--r--sound/soc/tegra/tegra_pcm.c2
-rw-r--r--sound/soc/ti/ams-delta.c2
-rw-r--r--sound/soc/ti/davinci-i2s.c2
-rw-r--r--sound/soc/ti/davinci-mcasp.c2
-rw-r--r--sound/soc/ti/omap-mcbsp.c2
-rw-r--r--sound/soc/uniphier/aio-ld11.c2
-rw-r--r--sound/soc/uniphier/aio-pxs2.c2
-rw-r--r--sound/soc/uniphier/evea.c2
-rw-r--r--sound/soc/ux500/mop500.c2
-rw-r--r--sound/soc/ux500/ux500_msp_dai.c2
-rw-r--r--sound/soc/xilinx/xlnx_formatter_pcm.c2
-rw-r--r--sound/soc/xilinx/xlnx_spdif.c2
-rw-r--r--sound/soc/xtensa/xtfpga-i2s.c2
-rw-r--r--sound/sparc/amd7930.c8
-rw-r--r--sound/sparc/cs4231.c78
-rw-r--r--sound/synth/emux/emux_hwdep.c6
-rw-r--r--sound/synth/emux/emux_oss.c11
-rw-r--r--sound/synth/emux/emux_seq.c13
-rw-r--r--sound/synth/emux/emux_synth.c12
-rw-r--r--sound/synth/emux/soundfont.c67
-rw-r--r--sound/usb/caiaq/audio.c15
-rw-r--r--sound/usb/card.c8
-rw-r--r--sound/usb/clock.c62
-rw-r--r--sound/usb/endpoint.c3
-rw-r--r--sound/usb/format.c6
-rw-r--r--sound/usb/helper.c34
-rw-r--r--sound/usb/helper.h10
-rw-r--r--sound/usb/midi.c6
-rw-r--r--sound/usb/midi2.c36
-rw-r--r--sound/usb/mixer.c37
-rw-r--r--sound/usb/mixer.h1
-rw-r--r--sound/usb/mixer_quirks.c593
-rw-r--r--sound/usb/mixer_scarlett.c4
-rw-r--r--sound/usb/power.c3
-rw-r--r--sound/usb/power.h1
-rw-r--r--sound/usb/quirks-table.h2445
-rw-r--r--sound/usb/quirks.c62
-rw-r--r--sound/usb/stream.c21
-rw-r--r--sound/usb/usbaudio.h12
-rw-r--r--sound/usb/usx2y/us122l.c45
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.c25
-rw-r--r--sound/usb/usx2y/usb_stream.c95
-rw-r--r--sound/usb/usx2y/usb_stream.h1
-rw-r--r--sound/usb/usx2y/usbusx2y.c7
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c67
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c54
-rw-r--r--tools/Makefile10
l---------tools/arch/arm64/vdso1
l---------tools/arch/loongarch/vdso1
l---------tools/arch/powerpc/vdso1
-rw-r--r--tools/arch/riscv/include/asm/barrier.h39
-rw-r--r--tools/arch/riscv/include/asm/fence.h13
l---------tools/arch/s390/vdso1
-rw-r--r--tools/arch/x86/kcpuid/cpuid.csv1430
-rw-r--r--tools/arch/x86/kcpuid/kcpuid.c109
l---------tools/arch/x86/vdso1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-gen.rst4
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst24
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool2
-rw-r--r--tools/bpf/bpftool/btf.c87
-rw-r--r--tools/bpf/bpftool/feature.c10
-rw-r--r--tools/bpf/bpftool/net.c80
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c4
-rw-r--r--tools/bpf/runqslower/Makefile3
-rw-r--r--tools/build/Build3
-rw-r--r--tools/build/Makefile11
-rw-r--r--tools/build/Makefile.feature2
-rw-r--r--tools/build/Makefile.include12
-rw-r--r--tools/build/feature/Makefile11
-rw-r--r--tools/build/feature/test-all.c4
-rw-r--r--tools/build/feature/test-llvm-perf.cpp14
-rw-r--r--tools/crypto/ccp/dbc.c1
-rw-r--r--tools/gpio/Makefile2
-rw-r--r--tools/gpio/gpio-hammer.c4
-rw-r--r--tools/hv/Makefile2
-rw-r--r--tools/hv/hv_fcopy_uio_daemon.c7
-rwxr-xr-x[-rw-r--r--]tools/hv/lsvmbus2
-rw-r--r--tools/include/asm/alternative.h10
-rw-r--r--tools/include/asm/barrier.h2
-rw-r--r--tools/include/generated/asm-offsets.h (renamed from Documentation/devicetree/bindings/media/s5p-mfc.txt)0
-rw-r--r--tools/include/generated/asm/cpucap-defs.h0
-rw-r--r--tools/include/generated/asm/sysreg-defs.h0
-rw-r--r--tools/include/linux/compiler.h8
-rw-r--r--tools/include/linux/coresight-pmu.h17
-rw-r--r--tools/include/linux/init.h (renamed from tools/testing/memblock/linux/init.h)19
-rw-r--r--tools/include/linux/linkage.h6
-rw-r--r--tools/include/linux/mm.h6
-rw-r--r--tools/include/linux/pfn.h1
-rw-r--r--tools/include/linux/ring_buffer.h2
-rw-r--r--tools/include/linux/string.h5
-rw-r--r--tools/include/nolibc/Makefile1
-rw-r--r--tools/include/nolibc/arch-aarch64.h4
-rw-r--r--tools/include/nolibc/arch-arm.h8
-rw-r--r--tools/include/nolibc/arch-i386.h4
-rw-r--r--tools/include/nolibc/arch-loongarch.h4
-rw-r--r--tools/include/nolibc/arch-mips.h8
-rw-r--r--tools/include/nolibc/arch-powerpc.h6
-rw-r--r--tools/include/nolibc/arch-riscv.h4
-rw-r--r--tools/include/nolibc/arch-s390.h4
-rw-r--r--tools/include/nolibc/arch-x86_64.h8
-rw-r--r--tools/include/nolibc/compiler.h24
-rw-r--r--tools/include/nolibc/crt.h25
-rw-r--r--tools/include/nolibc/nolibc.h3
-rw-r--r--tools/include/nolibc/stackprotector.h4
-rw-r--r--tools/include/nolibc/stdbool.h16
-rw-r--r--tools/include/nolibc/string.h1
-rw-r--r--tools/include/uapi/linux/bpf.h12
-rw-r--r--tools/include/uapi/linux/netdev.h13
-rw-r--r--tools/lib/api/Makefile4
-rw-r--r--tools/lib/api/fs/tracing_path.c2
-rw-r--r--tools/lib/bpf/.gitignore1
-rw-r--r--tools/lib/bpf/Makefile13
-rw-r--r--tools/lib/bpf/bpf.h4
-rw-r--r--tools/lib/bpf/bpf_helpers.h2
-rw-r--r--tools/lib/bpf/bpf_tracing.h25
-rw-r--r--tools/lib/bpf/btf.c8
-rw-r--r--tools/lib/bpf/btf.h2
-rw-r--r--tools/lib/bpf/btf_dump.c2
-rw-r--r--tools/lib/bpf/btf_relocate.c2
-rw-r--r--tools/lib/bpf/elf.c3
-rw-r--r--tools/lib/bpf/libbpf.c88
-rw-r--r--tools/lib/bpf/libbpf.h18
-rw-r--r--tools/lib/bpf/libbpf.map1
-rw-r--r--tools/lib/bpf/libbpf_legacy.h4
-rw-r--r--tools/lib/bpf/linker.c4
-rw-r--r--tools/lib/bpf/skel_internal.h2
-rw-r--r--tools/lib/bpf/usdt.bpf.h2
-rw-r--r--tools/lib/cmdline.c53
-rw-r--r--tools/lib/perf/.gitignore5
-rw-r--r--tools/lib/string.c13
-rw-r--r--tools/lib/subcmd/Makefile6
-rw-r--r--tools/lib/subcmd/parse-options.c8
-rw-r--r--tools/lib/symbol/Makefile4
-rw-r--r--tools/memory-model/Documentation/README24
-rw-r--r--tools/memory-model/Documentation/herd-representation.txt110
-rw-r--r--tools/memory-model/Documentation/simple.txt2
-rw-r--r--tools/mm/Makefile2
-rw-r--r--tools/mm/page-types.c13
-rw-r--r--tools/net/sunrpc/xdrgen/.gitignore2
-rw-r--r--tools/net/sunrpc/xdrgen/README244
-rw-r--r--tools/net/sunrpc/xdrgen/__init__.py2
-rw-r--r--tools/net/sunrpc/xdrgen/generators/__init__.py113
-rw-r--r--tools/net/sunrpc/xdrgen/generators/constant.py20
-rw-r--r--tools/net/sunrpc/xdrgen/generators/enum.py44
-rw-r--r--tools/net/sunrpc/xdrgen/generators/header_bottom.py33
-rw-r--r--tools/net/sunrpc/xdrgen/generators/header_top.py45
-rw-r--r--tools/net/sunrpc/xdrgen/generators/pointer.py272
-rw-r--r--tools/net/sunrpc/xdrgen/generators/program.py168
-rw-r--r--tools/net/sunrpc/xdrgen/generators/source_top.py32
-rw-r--r--tools/net/sunrpc/xdrgen/generators/struct.py272
-rw-r--r--tools/net/sunrpc/xdrgen/generators/typedef.py255
-rw-r--r--tools/net/sunrpc/xdrgen/generators/union.py243
-rw-r--r--tools/net/sunrpc/xdrgen/grammars/xdr.lark119
-rw-r--r--tools/net/sunrpc/xdrgen/subcmds/__init__.py2
-rw-r--r--tools/net/sunrpc/xdrgen/subcmds/declarations.py76
-rw-r--r--tools/net/sunrpc/xdrgen/subcmds/definitions.py78
-rw-r--r--tools/net/sunrpc/xdrgen/subcmds/lint.py33
-rw-r--r--tools/net/sunrpc/xdrgen/subcmds/source.py118
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/constants/definition.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/enum/declaration/close.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/enum/decoder/enum.j219
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/enum/definition/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/enum/definition/enumerator.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/enum/definition/open.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/enum/encoder/enum.j214
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/header_bottom/declaration/header.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/header_bottom/definition/header.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/header_top/declaration/header.j214
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/header_top/definition/header.j210
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/declaration/close.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/basic.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/close.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/fixed_length_array.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/fixed_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/open.j222
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/optional_data.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_array.j213
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_string.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/basic.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/fixed_length_array.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/fixed_length_opaque.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/open.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/optional_data.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_array.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_opaque.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_string.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/basic.j210
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/close.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/fixed_length_array.j212
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/fixed_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/open.j220
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/optional_data.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_array.j215
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_opaque.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_string.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/declaration/argument.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/declaration/result.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/decoder/argument.j221
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/decoder/result.j222
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/definition/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/definition/open.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/definition/procedure.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/encoder/argument.j216
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/encoder/result.j221
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/source_top/client.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/source_top/server.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/declaration/close.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/basic.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/close.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/fixed_length_array.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/fixed_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/open.j212
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/optional_data.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_array.j213
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_string.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/basic.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/fixed_length_array.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/fixed_length_opaque.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/open.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/optional_data.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_array.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_opaque.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_string.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/basic.j210
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/close.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/fixed_length_array.j212
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/fixed_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/open.j212
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/optional_data.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_array.j215
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_opaque.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_string.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/basic.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/fixed_length_array.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/fixed_length_opaque.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_array.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_opaque.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_string.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/basic.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_array.j225
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_array.j226
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_opaque.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_string.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/definition/basic.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/definition/fixed_length_array.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/definition/fixed_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_array.j29
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_string.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/basic.j221
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_array.j225
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_opaque.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_array.j230
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_opaque.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_string.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/basic.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/break.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/case_spec.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/close.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/default_spec.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/open.j212
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/optional_data.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/switch_spec.j27
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_array.j213
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_string.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/void.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/definition/case_spec.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/definition/close.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/definition/default_spec.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/definition/open.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/definition/switch_spec.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/basic.j210
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/break.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/case_spec.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/close.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/default_spec.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/open.j212
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/switch_spec.j27
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/void.j23
-rw-r--r--tools/net/sunrpc/xdrgen/tests/test.x36
-rw-r--r--tools/net/sunrpc/xdrgen/xdr_ast.py510
-rw-r--r--tools/net/sunrpc/xdrgen/xdr_parse.py36
-rwxr-xr-xtools/net/sunrpc/xdrgen/xdrgen132
-rw-r--r--tools/net/ynl/lib/.gitignore1
-rw-r--r--tools/net/ynl/lib/ynl.c4
-rw-r--r--tools/net/ynl/samples/netdev.c6
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py6
-rw-r--r--tools/objtool/check.c52
-rw-r--r--tools/objtool/noreturns.h2
-rw-r--r--tools/pci/Makefile2
-rw-r--r--tools/pci/pcitest.c2
-rw-r--r--tools/perf/Build1
-rw-r--r--tools/perf/Documentation/perf-annotate.txt3
-rw-r--r--tools/perf/Documentation/perf-check.txt82
-rw-r--r--tools/perf/Documentation/perf-ftrace.txt48
-rw-r--r--tools/perf/Documentation/perf-kvm.txt6
-rw-r--r--tools/perf/Documentation/perf-list.txt1
-rw-r--r--tools/perf/Documentation/perf-mem.txt94
-rw-r--r--tools/perf/Documentation/perf-record.txt14
-rw-r--r--tools/perf/Documentation/perf-report.txt1
-rw-r--r--tools/perf/Documentation/perf-sched.txt9
-rw-r--r--tools/perf/Documentation/perf-script.txt5
-rw-r--r--tools/perf/Documentation/perf-stat.txt8
-rw-r--r--tools/perf/Documentation/perf-top.txt4
-rw-r--r--tools/perf/Documentation/perf-trace.txt4
-rw-r--r--tools/perf/Documentation/topdown.txt30
-rw-r--r--tools/perf/Makefile8
-rw-r--r--tools/perf/Makefile.config51
-rw-r--r--tools/perf/Makefile.perf6
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c12
-rw-r--r--tools/perf/arch/arm/util/pmu.c3
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c3
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c108
-rw-r--r--tools/perf/arch/arm64/util/hisi-ptt.c1
-rw-r--r--tools/perf/arch/loongarch/annotate/instructions.c6
-rw-r--r--tools/perf/arch/powerpc/annotate/instructions.c254
-rw-r--r--tools/perf/arch/powerpc/util/dwarf-regs.c53
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c5
-rw-r--r--tools/perf/arch/x86/Makefile6
-rw-r--r--tools/perf/arch/x86/annotate/instructions.c389
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_32.tbl470
-rw-r--r--tools/perf/arch/x86/util/event.c4
-rw-r--r--tools/perf/arch/x86/util/evlist.c6
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c1
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c1
-rw-r--r--tools/perf/bench/synthesize.c2
-rw-r--r--tools/perf/builtin-annotate.c77
-rw-r--r--tools/perf/builtin-buildid-list.c10
-rw-r--r--tools/perf/builtin-c2c.c47
-rw-r--r--tools/perf/builtin-check.c180
-rw-r--r--tools/perf/builtin-daemon.c2
-rw-r--r--tools/perf/builtin-diff.c38
-rw-r--r--tools/perf/builtin-evlist.c18
-rw-r--r--tools/perf/builtin-ftrace.c462
-rw-r--r--tools/perf/builtin-help.c2
-rw-r--r--tools/perf/builtin-inject.c739
-rw-r--r--tools/perf/builtin-kmem.c22
-rw-r--r--tools/perf/builtin-kvm.c22
-rw-r--r--tools/perf/builtin-kwork.c36
-rw-r--r--tools/perf/builtin-list.c2
-rw-r--r--tools/perf/builtin-lock.c44
-rw-r--r--tools/perf/builtin-mem.c161
-rw-r--r--tools/perf/builtin-record.c79
-rw-r--r--tools/perf/builtin-report.c106
-rw-r--r--tools/perf/builtin-sched.c232
-rw-r--r--tools/perf/builtin-script.c184
-rw-r--r--tools/perf/builtin-stat.c42
-rw-r--r--tools/perf/builtin-timechart.c25
-rw-r--r--tools/perf/builtin-top.c10
-rw-r--r--tools/perf/builtin-trace.c497
-rw-r--r--tools/perf/builtin-version.c43
-rw-r--r--tools/perf/builtin.h17
-rwxr-xr-xtools/perf/check-headers.sh1
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/pmu-events/Build12
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereone/instruction.json3
-rw-r--r--tools/perf/pmu-events/arch/arm64/thead/yitian710/sys/ali_drw.json (renamed from tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/ali_drw.json)0
-rw-r--r--tools/perf/pmu-events/arch/arm64/thead/yitian710/sys/metrics.json (renamed from tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/metrics.json)0
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/cache.json20
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/datasource.json40
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/frontend.json30
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/locks.json10
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/memory.json30
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/others.json106
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pipeline.json45
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pmc.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/metricgroups.json142
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/mtl-metrics.json2535
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json57
-rw-r--r--tools/perf/pmu-events/empty-pmu-events.c894
-rwxr-xr-xtools/perf/pmu-events/jevents.py27
-rwxr-xr-xtools/perf/pmu-events/models.py73
-rw-r--r--tools/perf/pmu-events/pmu-events.h9
-rwxr-xr-xtools/perf/scripts/python/arm-cs-trace-disasm.py9
-rw-r--r--tools/perf/tests/bp_account.c4
-rw-r--r--tools/perf/tests/bp_signal.c3
-rw-r--r--tools/perf/tests/bp_signal_overflow.c3
-rw-r--r--tools/perf/tests/builtin-test.c1
-rw-r--r--tools/perf/tests/cpumap.c6
-rw-r--r--tools/perf/tests/dlfilter-test.c2
-rw-r--r--tools/perf/tests/dwarf-unwind.c2
-rw-r--r--tools/perf/tests/event_update.c9
-rw-r--r--tools/perf/tests/make4
-rw-r--r--tools/perf/tests/parse-events.c6
-rw-r--r--tools/perf/tests/pmu-events.c12
-rw-r--r--tools/perf/tests/pmu.c9
-rwxr-xr-xtools/perf/tests/shell/annotate.sh3
-rw-r--r--tools/perf/tests/shell/base_probe/settings.sh48
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_blacklisted.sh67
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_kernel.sh3
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_basic.sh80
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_invalid_options.sh79
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_line_semantics.sh55
-rwxr-xr-xtools/perf/tests/shell/base_report/setup.sh32
-rw-r--r--tools/perf/tests/shell/base_report/stderr-whitelist.txt5
-rwxr-xr-xtools/perf/tests/shell/base_report/test_basic.sh190
-rwxr-xr-xtools/perf/tests/shell/common/check_errors_whitelisted.pl51
-rw-r--r--tools/perf/tests/shell/common/init.sh31
-rw-r--r--tools/perf/tests/shell/common/settings.sh28
-rwxr-xr-xtools/perf/tests/shell/ftrace.sh89
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py10
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh11
-rwxr-xr-xtools/perf/tests/shell/perftool-testsuite_report.sh23
-rwxr-xr-xtools/perf/tests/shell/pipe_test.sh129
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh5
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh5
-rwxr-xr-xtools/perf/tests/shell/record.sh59
-rwxr-xr-xtools/perf/tests/shell/record_bpf_filter.sh86
-rwxr-xr-xtools/perf/tests/shell/record_lbr.sh161
-rwxr-xr-xtools/perf/tests/shell/script.sh3
-rwxr-xr-xtools/perf/tests/shell/test_stat_intel_tpebs.sh19
-rwxr-xr-xtools/perf/tests/shell/test_task_analyzer.sh7
-rwxr-xr-xtools/perf/tests/shell/test_uprobe_from_different_cu.sh7
-rwxr-xr-xtools/perf/tests/shell/trace_btf_enum.sh62
-rw-r--r--tools/perf/tests/stat.c6
-rw-r--r--tools/perf/tests/tests-scripts.c37
-rw-r--r--tools/perf/tests/tests.h1
-rw-r--r--tools/perf/tests/thread-map.c2
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c4
-rw-r--r--tools/perf/tests/workloads/Build1
-rw-r--r--tools/perf/tests/workloads/landlock.c66
-rw-r--r--tools/perf/tests/wp.c5
-rw-r--r--tools/perf/trace/beauty/beauty.h11
-rw-r--r--tools/perf/trace/beauty/perf_event_open.c6
-rw-r--r--tools/perf/trace/beauty/sockaddr.c2
-rw-r--r--tools/perf/trace/beauty/timespec.c2
-rw-r--r--tools/perf/ui/browsers/annotate-data.c376
-rw-r--r--tools/perf/ui/browsers/annotate.c20
-rw-r--r--tools/perf/ui/browsers/hists.c18
-rw-r--r--tools/perf/ui/hist.c10
-rw-r--r--tools/perf/ui/stdio/hist.c4
-rw-r--r--tools/perf/util/Build10
-rw-r--r--tools/perf/util/annotate-data.c1164
-rw-r--r--tools/perf/util/annotate-data.h86
-rw-r--r--tools/perf/util/annotate.c360
-rw-r--r--tools/perf/util/annotate.h33
-rw-r--r--tools/perf/util/arm-spe.c55
-rw-r--r--tools/perf/util/auxtrace.c16
-rw-r--r--tools/perf/util/auxtrace.h21
-rw-r--r--tools/perf/util/block-info.c66
-rw-r--r--tools/perf/util/block-info.h8
-rw-r--r--tools/perf/util/bpf-event.c4
-rw-r--r--tools/perf/util/bpf-filter.c631
-rw-r--r--tools/perf/util/bpf-filter.h19
-rw-r--r--tools/perf/util/bpf-filter.l28
-rw-r--r--tools/perf/util/bpf-filter.y28
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c6
-rw-r--r--tools/perf/util/bpf_ftrace.c8
-rw-r--r--tools/perf/util/bpf_kwork.c9
-rw-r--r--tools/perf/util/bpf_kwork_top.c7
-rw-r--r--tools/perf/util/bpf_lock_contention.c45
-rw-r--r--tools/perf/util/bpf_map.c3
-rw-r--r--tools/perf/util/bpf_off_cpu.c16
-rw-r--r--tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c222
-rw-r--r--tools/perf/util/bpf_skel/bperf_cgroup.bpf.c2
-rw-r--r--tools/perf/util/bpf_skel/func_latency.bpf.c7
-rw-r--r--tools/perf/util/bpf_skel/kwork_top.bpf.c2
-rw-r--r--tools/perf/util/bpf_skel/kwork_trace.bpf.c5
-rw-r--r--tools/perf/util/bpf_skel/lock_contention.bpf.c53
-rw-r--r--tools/perf/util/bpf_skel/lock_data.h4
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c9
-rw-r--r--tools/perf/util/bpf_skel/sample-filter.h13
-rw-r--r--tools/perf/util/bpf_skel/sample_filter.bpf.c105
-rw-r--r--tools/perf/util/bpf_skel/vmlinux/vmlinux.h7
-rw-r--r--tools/perf/util/branch.h1
-rw-r--r--tools/perf/util/build-id.c40
-rw-r--r--tools/perf/util/build-id.h8
-rw-r--r--tools/perf/util/callchain.c35
-rw-r--r--tools/perf/util/callchain.h6
-rw-r--r--tools/perf/util/cap.c63
-rw-r--r--tools/perf/util/cap.h23
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c36
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h2
-rw-r--r--tools/perf/util/cs-etm.c675
-rw-r--r--tools/perf/util/cs-etm.h12
-rw-r--r--tools/perf/util/data-convert-bt.c34
-rw-r--r--tools/perf/util/data-convert-json.c47
-rw-r--r--tools/perf/util/data.c7
-rw-r--r--tools/perf/util/debuginfo.h2
-rw-r--r--tools/perf/util/disasm.c852
-rw-r--r--tools/perf/util/disasm.h19
-rw-r--r--tools/perf/util/disasm_bpf.c195
-rw-r--r--tools/perf/util/disasm_bpf.h12
-rw-r--r--tools/perf/util/dso.c4
-rw-r--r--tools/perf/util/dso.h4
-rw-r--r--tools/perf/util/dsos.c12
-rw-r--r--tools/perf/util/dsos.h2
-rw-r--r--tools/perf/util/dump-insn.c2
-rw-r--r--tools/perf/util/dump-insn.h2
-rw-r--r--tools/perf/util/dwarf-aux.c18
-rw-r--r--tools/perf/util/dwarf-aux.h2
-rw-r--r--tools/perf/util/env.c15
-rw-r--r--tools/perf/util/env.h3
-rw-r--r--tools/perf/util/event.c54
-rw-r--r--tools/perf/util/event.h38
-rw-r--r--tools/perf/util/events_stats.h15
-rw-r--r--tools/perf/util/evlist.c89
-rw-r--r--tools/perf/util/evlist.h7
-rw-r--r--tools/perf/util/evsel.c122
-rw-r--r--tools/perf/util/evsel.h27
-rw-r--r--tools/perf/util/evsel_fprintf.c2
-rw-r--r--tools/perf/util/ftrace.h3
-rw-r--r--tools/perf/util/header.c157
-rw-r--r--tools/perf/util/header.h25
-rw-r--r--tools/perf/util/hisi-ptt.c6
-rw-r--r--tools/perf/util/hist.c63
-rw-r--r--tools/perf/util/hist.h4
-rw-r--r--tools/perf/util/include/dwarf-regs.h11
-rw-r--r--tools/perf/util/intel-bts.c37
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c5
-rw-r--r--tools/perf/util/intel-pt.c30
-rw-r--r--tools/perf/util/intel-tpebs.c432
-rw-r--r--tools/perf/util/intel-tpebs.h35
-rw-r--r--tools/perf/util/jit.h3
-rw-r--r--tools/perf/util/jitdump.c10
-rw-r--r--tools/perf/util/llvm-c-helpers.cpp197
-rw-r--r--tools/perf/util/llvm-c-helpers.h60
-rw-r--r--tools/perf/util/machine.c120
-rw-r--r--tools/perf/util/machine.h36
-rw-r--r--tools/perf/util/map.c25
-rw-r--r--tools/perf/util/map.h22
-rw-r--r--tools/perf/util/map_symbol.c18
-rw-r--r--tools/perf/util/map_symbol.h3
-rw-r--r--tools/perf/util/mem-events.c20
-rw-r--r--tools/perf/util/mem-events.h4
-rw-r--r--tools/perf/util/mem-info.c13
-rw-r--r--tools/perf/util/mem-info.h1
-rw-r--r--tools/perf/util/metricgroup.c10
-rw-r--r--tools/perf/util/mmap.c4
-rw-r--r--tools/perf/util/parse-events.c69
-rw-r--r--tools/perf/util/parse-events.h11
-rw-r--r--tools/perf/util/parse-events.l3
-rw-r--r--tools/perf/util/pmu.c75
-rw-r--r--tools/perf/util/pmu.h8
-rw-r--r--tools/perf/util/pmus.c22
-rw-r--r--tools/perf/util/pmus.h1
-rw-r--r--tools/perf/util/print-events.c3
-rw-r--r--tools/perf/util/print_insn.c14
-rw-r--r--tools/perf/util/s390-cpumsf.c11
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c16
-rw-r--r--tools/perf/util/session.c394
-rw-r--r--tools/perf/util/session.h61
-rw-r--r--tools/perf/util/setup.py4
-rw-r--r--tools/perf/util/sort.c66
-rw-r--r--tools/perf/util/sort.h3
-rw-r--r--tools/perf/util/srcline.c59
-rw-r--r--tools/perf/util/stat-display.c3
-rw-r--r--tools/perf/util/stat-shadow.c2
-rw-r--r--tools/perf/util/symbol.c8
-rw-r--r--tools/perf/util/symbol_conf.h2
-rw-r--r--tools/perf/util/synthetic-events.c181
-rw-r--r--tools/perf/util/synthetic-events.h89
-rw-r--r--tools/perf/util/syscalltbl.c4
-rw-r--r--tools/perf/util/thread.c4
-rw-r--r--tools/perf/util/thread.h1
-rw-r--r--tools/perf/util/time-utils.c4
-rw-r--r--tools/perf/util/tool.c294
-rw-r--r--tools/perf/util/tool.h19
-rw-r--r--tools/perf/util/trace_augment.h6
-rw-r--r--tools/perf/util/tsc.c2
-rw-r--r--tools/perf/util/util.c12
-rw-r--r--tools/power/cpupower/bindings/python/.gitignore7
-rw-r--r--tools/power/cpupower/bindings/python/Makefile33
-rw-r--r--tools/power/cpupower/bindings/python/README59
-rw-r--r--tools/power/cpupower/bindings/python/raw_pylibcpupower.swg247
-rwxr-xr-xtools/power/cpupower/bindings/python/test_raw_pylibcpupower.py42
-rw-r--r--tools/power/cpupower/lib/cpuidle.c8
-rw-r--r--tools/power/cpupower/lib/cpuidle.h2
-rw-r--r--tools/power/cpupower/lib/powercap.c8
-rw-r--r--tools/power/cpupower/utils/cpuidle-info.c4
-rw-r--r--tools/power/pm-graph/.gitignore3
-rw-r--r--tools/power/pm-graph/Makefile111
-rwxr-xr-xtools/rcu/rcu-updaters.sh2
-rw-r--r--tools/sched_ext/.gitignore2
-rw-r--r--tools/sched_ext/Makefile246
-rw-r--r--tools/sched_ext/README.md270
-rw-r--r--tools/sched_ext/include/bpf-compat/gnu/stubs.h11
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h412
-rw-r--r--tools/sched_ext/include/scx/common.h75
-rw-r--r--tools/sched_ext/include/scx/compat.bpf.h28
-rw-r--r--tools/sched_ext/include/scx/compat.h186
-rw-r--r--tools/sched_ext/include/scx/user_exit_info.h111
-rw-r--r--tools/sched_ext/scx_central.bpf.c361
-rw-r--r--tools/sched_ext/scx_central.c135
-rw-r--r--tools/sched_ext/scx_flatcg.bpf.c949
-rw-r--r--tools/sched_ext/scx_flatcg.c233
-rw-r--r--tools/sched_ext/scx_flatcg.h51
-rw-r--r--tools/sched_ext/scx_qmap.bpf.c827
-rw-r--r--tools/sched_ext/scx_qmap.c153
-rw-r--r--tools/sched_ext/scx_show_state.py40
-rw-r--r--tools/sched_ext/scx_simple.bpf.c156
-rw-r--r--tools/sched_ext/scx_simple.c107
-rwxr-xr-xtools/sound/dapm-graph44
-rw-r--r--tools/spi/spidev_fdx.c2
-rwxr-xr-x[-rw-r--r--]tools/testing/fault-injection/failcmd.sh12
-rwxr-xr-xtools/testing/ktest/ktest.pl28
-rw-r--r--tools/testing/kunit/kunit_kernel.py3
-rw-r--r--tools/testing/memblock/Makefile2
-rw-r--r--tools/testing/memblock/internal.h2
-rw-r--r--tools/testing/memblock/linux/kernel.h2
-rw-r--r--tools/testing/memblock/linux/mmzone.h1
-rw-r--r--tools/testing/radix-tree/.gitignore1
-rw-r--r--tools/testing/radix-tree/Makefile72
-rw-r--r--tools/testing/radix-tree/linux/init.h2
-rw-r--r--tools/testing/radix-tree/maple.c111
-rw-r--r--tools/testing/radix-tree/xarray.c10
-rw-r--r--tools/testing/selftests/Makefile6
-rw-r--r--tools/testing/selftests/acct/.gitignore3
-rw-r--r--tools/testing/selftests/acct/Makefile5
-rw-r--r--tools/testing/selftests/acct/acct_syscall.c78
-rw-r--r--tools/testing/selftests/alsa/Makefile4
-rw-r--r--tools/testing/selftests/alsa/global-timer.c87
-rw-r--r--tools/testing/selftests/alsa/utimer-test.c164
-rw-r--r--tools/testing/selftests/arm64/abi/hwcap.c14
-rw-r--r--tools/testing/selftests/arm64/abi/ptrace.c4
-rw-r--r--tools/testing/selftests/arm64/signal/.gitignore1
-rw-r--r--tools/testing/selftests/arm64/signal/Makefile2
-rw-r--r--tools/testing/selftests/arm64/signal/sve_helpers.c56
-rw-r--r--tools/testing/selftests/arm64/signal/sve_helpers.h21
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c46
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c30
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/poe_siginfo.c86
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/ssve_regs.c36
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c36
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/sve_regs.c32
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/testcases.c27
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/testcases.h28
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/za_no_regs.c32
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/za_regs.c36
-rw-r--r--tools/testing/selftests/bpf/.gitignore6
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.riscv643
-rw-r--r--tools/testing/selftests/bpf/Makefile151
-rw-r--r--tools/testing/selftests/bpf/README.rst32
-rw-r--r--tools/testing/selftests/bpf/bench.c13
-rw-r--r--tools/testing/selftests/bpf/bench.h1
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c83
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h26
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h11
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h6
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c286
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h12
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h15
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/config.riscv6484
-rw-r--r--tools/testing/selftests/bpf/disasm_helpers.c69
-rw-r--r--tools/testing/selftests/bpf/disasm_helpers.h12
-rw-r--r--tools/testing/selftests/bpf/get_cgroup_id_user.c151
-rw-r--r--tools/testing/selftests/bpf/jit_disasm_helpers.c245
-rw-r--r--tools/testing/selftests/bpf/jit_disasm_helpers.h10
-rw-r--r--tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c2
-rw-r--r--tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c2
-rw-r--r--tools/testing/selftests/bpf/map_tests/map_percpu_stats.c18
-rw-r--r--tools/testing/selftests/bpf/map_tests/sk_storage_map.c2
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c602
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_distill.c68
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/build_id.c118
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c141
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_dev.c125
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_get_current_cgroup_id.c46
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_storage.c96
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc_raw.c125
-rw-r--r--tools/testing/selftests/bpf/prog_tests/crypto_sanity.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c74
-rw-r--r--tools/testing/selftests/bpf/prog_tests/decap_sanity.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dynptr.c37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/iters.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfree_skb.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_call.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_buf.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_redirect.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_reroute.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/nested_trust.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pro_epilogue.c60
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/read_vsyscall.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reg_bounds.c32
-rw-r--r--tools/testing/selftests/bpf/prog_tests/resolve_btfids.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/select_reuseport.c37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/setget_sockopt.c47
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_lookup.c111
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_addr.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h149
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c125
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c385
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_opts.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_rtt.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_lsm.c46
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_mmap_inner_array.c57
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_strncmp.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c213
-rw-r--r--tools/testing/selftests/bpf/prog_tests/token.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c529
-rw-r--r--tools/testing/selftests/bpf/prog_tests/user_ringbuf.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c14
-rw-r--r--tools/testing/selftests/bpf/progs/arena_atomics.c32
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cubic.c6
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h64
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_syscall_macro.c2
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi.h2
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_ancestor.c40
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_storage.c24
-rw-r--r--tools/testing/selftests/bpf/progs/dev_cgroup.c4
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c31
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_success.c23
-rw-r--r--tools/testing/selftests/bpf/progs/epilogue_exit.c82
-rw-r--r--tools/testing/selftests/bpf/progs/epilogue_tailcall.c58
-rw-r--r--tools/testing/selftests/bpf/progs/err.h10
-rw-r--r--tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c26
-rw-r--r--tools/testing/selftests/bpf/progs/iters_testmod.c125
-rw-r--r--tools/testing/selftests/bpf/progs/iters_testmod_seq.c50
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_fail.c7
-rw-r--r--tools/testing/selftests/bpf/progs/local_kptr_stash.c30
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_tailcall.c34
-rw-r--r--tools/testing/selftests/bpf/progs/mmap_inner_array.c57
-rw-r--r--tools/testing/selftests/bpf/progs/nested_acquire.c33
-rw-r--r--tools/testing/selftests/bpf/progs/pro_epilogue.c154
-rw-r--r--tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c149
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_fail.c2
-rw-r--r--tools/testing/selftests/bpf/progs/read_vsyscall.c9
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c4
-rw-r--r--tools/testing/selftests/bpf/progs/setget_sockopt.c26
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h4
-rw-r--r--tools/testing/selftests/bpf/progs/syscall.c3
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c34
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c70
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c62
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c35
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_freplace.c23
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_success.c56
-rw-r--r--tools/testing/selftests/bpf/progs/tc_bpf2bpf.c22
-rw-r--r--tools/testing/selftests/bpf/progs/tc_dummy.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe.c64
-rw-r--r--tools/testing/selftests/bpf/progs/test_build_id.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_read_macros.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_get_xattr.c37
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func15.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_map_resize.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_rdonly_maps.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_sig_in_xattr.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c45
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c8
-rw-r--r--tools/testing/selftests/bpf/progs/token_lsm.c4
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c7
-rw-r--r--tools/testing/selftests/bpf/progs/unsupported_ops.c22
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c39
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c40
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bits_iter.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c900
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_const.c69
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_subprogs.c7
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_int_ptr.c15
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_jit_convergence.c114
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c48
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ldsx.c112
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_lsm.c162
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_scalar_ids.c256
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sdiv.c439
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spill_fill.c24
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subprog_precision.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c105
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_vfs_accept.c85
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_vfs_reject.c161
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_redirect_map.c6
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c174
-rw-r--r--tools/testing/selftests/bpf/test_cpp.cpp4
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c85
-rw-r--r--tools/testing/selftests/bpf/test_loader.c496
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c3
-rw-r--r--tools/testing/selftests/bpf/test_maps.c2
-rw-r--r--tools/testing/selftests/bpf/test_progs.c263
-rw-r--r--tools/testing/selftests/bpf/test_progs.h17
-rwxr-xr-xtools/testing/selftests/bpf/test_skb_cgroup_id.sh63
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_user.c183
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_veth.sh121
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c7
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c104
-rw-r--r--tools/testing/selftests/bpf/unpriv_helpers.c1
-rw-r--r--tools/testing/selftests/bpf/uprobe_multi.c41
-rw-r--r--tools/testing/selftests/bpf/uprobe_multi.ld11
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/map_kptr.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/precise.c28
-rw-r--r--tools/testing/selftests/bpf/veristat.c16
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh107
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c44
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h1
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c22
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h2
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_prs.sh56
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_v1_base.sh77
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c264
-rw-r--r--tools/testing/selftests/cgroup/test_zswap.c75
-rw-r--r--tools/testing/selftests/core/Makefile2
-rw-r--r--tools/testing/selftests/core/close_range_test.c39
-rw-r--r--tools/testing/selftests/core/unshare_test.c94
-rwxr-xr-xtools/testing/selftests/cpufreq/cpufreq.sh15
-rwxr-xr-xtools/testing/selftests/cpufreq/main.sh13
-rw-r--r--tools/testing/selftests/damon/.gitignore1
-rw-r--r--tools/testing/selftests/damon/Makefile2
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/damon_nr_regions.py0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/damos_apply_interval.py0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/damos_quota.py0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/damos_quota_goal.py0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/damos_tried_regions.py0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_hang.py0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_wss_estimation.py0
-rw-r--r--tools/testing/selftests/drivers/net/Makefile5
-rw-r--r--tools/testing/selftests/drivers/net/config4
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/pp_alloc_fail.py3
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_ctx.py80
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/env.py5
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_basic.sh234
-rwxr-xr-xtools/testing/selftests/drivers/net/stats.py33
-rw-r--r--tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c6
-rwxr-xr-xtools/testing/selftests/dt/test_unprobed_devices.sh15
-rw-r--r--tools/testing/selftests/exec/execveat.c2
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test_ns.c7
-rw-r--r--tools/testing/selftests/ftrace/config1
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/test_ownership.tc46
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe_module.tc61
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_uprobe.tc26
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc9
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc2
-rw-r--r--tools/testing/selftests/hid/.gitignore1
-rw-r--r--tools/testing/selftests/hid/Makefile2
-rw-r--r--tools/testing/selftests/hid/hid_bpf.c443
-rw-r--r--tools/testing/selftests/hid/hid_common.h436
-rw-r--r--tools/testing/selftests/hid/hidraw.c237
-rw-r--r--tools/testing/selftests/kselftest.h10
-rw-r--r--tools/testing/selftests/kselftest/runner.sh7
-rw-r--r--tools/testing/selftests/kselftest_harness.h18
-rw-r--r--tools/testing/selftests/kvm/Makefile2
-rw-r--r--tools/testing/selftests/kvm/aarch64/arch_timer_edge_cases.c1062
-rw-r--r--tools/testing/selftests/kvm/aarch64/get-reg-list.c14
-rw-r--r--tools/testing/selftests/kvm/aarch64/no-vgic-v3.c175
-rw-r--r--tools/testing/selftests/kvm/aarch64/set_id_regs.c1
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_irq.c11
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/arch_timer.h18
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/processor.h3
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c6
-rw-r--r--tools/testing/selftests/landlock/base_test.c2
-rw-r--r--tools/testing/selftests/landlock/common.h39
-rw-r--r--tools/testing/selftests/landlock/fs_test.c1
-rw-r--r--tools/testing/selftests/landlock/net_test.c31
-rw-r--r--tools/testing/selftests/landlock/scoped_abstract_unix_test.c1041
-rw-r--r--tools/testing/selftests/landlock/scoped_base_variants.h156
-rw-r--r--tools/testing/selftests/landlock/scoped_common.h28
-rw-r--r--tools/testing/selftests/landlock/scoped_multiple_domain_variants.h152
-rw-r--r--tools/testing/selftests/landlock/scoped_signal_test.c484
-rw-r--r--tools/testing/selftests/landlock/scoped_test.c33
-rw-r--r--tools/testing/selftests/lib/Makefile3
-rw-r--r--tools/testing/selftests/lib/config1
-rwxr-xr-xtools/testing/selftests/lib/strscpy.sh3
-rw-r--r--tools/testing/selftests/lsm/lsm_list_modules_test.c3
-rw-r--r--tools/testing/selftests/mm/Makefile5
-rwxr-xr-xtools/testing/selftests/mm/charge_reserved_hugetlb.sh2
-rw-r--r--tools/testing/selftests/mm/hugepage-mmap.c18
-rw-r--r--tools/testing/selftests/mm/hugepage-shm.c18
-rw-r--r--tools/testing/selftests/mm/hugepage-vmemmap.c17
-rw-r--r--tools/testing/selftests/mm/khugepaged.c4
-rw-r--r--tools/testing/selftests/mm/map_hugetlb.c18
-rw-r--r--tools/testing/selftests/mm/migration.c17
-rw-r--r--tools/testing/selftests/mm/mseal_test.c197
-rw-r--r--tools/testing/selftests/mm/pkey-arm64.h139
-rw-r--r--tools/testing/selftests/mm/pkey-helpers.h21
-rw-r--r--tools/testing/selftests/mm/pkey-powerpc.h3
-rw-r--r--tools/testing/selftests/mm/pkey-x86.h4
-rw-r--r--tools/testing/selftests/mm/pkey_sighandler_tests.c481
-rw-r--r--tools/testing/selftests/mm/protection_keys.c119
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh2
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c71
-rw-r--r--tools/testing/selftests/mm/thp_settings.c46
-rw-r--r--tools/testing/selftests/mm/thp_settings.h9
-rw-r--r--tools/testing/selftests/mm/vm_util.c22
-rw-r--r--tools/testing/selftests/mm/vm_util.h1
-rw-r--r--tools/testing/selftests/mm/write_to_hugetlbfs.c21
-rw-r--r--tools/testing/selftests/net/.gitignore2
-rw-r--r--tools/testing/selftests/net/Makefile12
-rw-r--r--tools/testing/selftests/net/af_unix/msg_oob.c23
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh9
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh55
-rwxr-xr-xtools/testing/selftests/net/fib_rule_tests.sh304
-rw-r--r--tools/testing/selftests/net/forwarding/README2
-rwxr-xr-xtools/testing/selftests/net/forwarding/custom_multipath_hash.sh8
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh8
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh8
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh7
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh.sh40
-rw-r--r--tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh13
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh_res.sh58
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multipath.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_actions.sh46
-rw-r--r--tools/testing/selftests/net/lib.sh15
-rw-r--r--tools/testing/selftests/net/lib/csum.c16
-rw-r--r--tools/testing/selftests/net/lib/py/ksft.py60
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile2
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh2
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh17
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh357
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh17
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh1
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh2
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c10
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh1
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh1
-rw-r--r--tools/testing/selftests/net/ncdevmem.c570
-rwxr-xr-xtools/testing/selftests/net/netdevice.sh60
-rw-r--r--tools/testing/selftests/net/netfilter/config2
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_queue.sh129
-rw-r--r--tools/testing/selftests/net/packetdrill/Makefile10
-rw-r--r--tools/testing/selftests/net/packetdrill/config11
-rwxr-xr-xtools/testing/selftests/net/packetdrill/defaults.sh63
-rwxr-xr-xtools/testing/selftests/net/packetdrill/ksft_runner.sh41
-rwxr-xr-xtools/testing/selftests/net/packetdrill/set_sysctls.py38
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_inq_client.pkt51
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_inq_server.pkt51
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_md5_md5-only-on-client-ack.pkt28
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-1pkt.pkt56
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt-send-5pkt.pkt33
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt-send-6pkt.pkt34
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt.pkt42
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-4pkt.pkt35
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-after-idle.pkt39
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-after-win-update.pkt50
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-app-limited-9-packets-out.pkt38
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-app-limited.pkt36
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-fq-ack-per-2pkt.pkt63
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt55
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt41
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt30
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt44
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt61
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt63
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt66
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt56
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt44
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt118
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt57
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh10
-rw-r--r--tools/testing/selftests/net/psock_fanout.c6
-rw-r--r--tools/testing/selftests/net/rds/Makefile12
-rw-r--r--tools/testing/selftests/net/rds/README.txt41
-rwxr-xr-xtools/testing/selftests/net/rds/config.sh53
-rwxr-xr-xtools/testing/selftests/net/rds/run.sh224
-rw-r--r--tools/testing/selftests/net/rds/test.py262
-rw-r--r--tools/testing/selftests/net/rxtimestamp.c18
-rw-r--r--tools/testing/selftests/net/sk_so_peek_off.c202
-rw-r--r--tools/testing/selftests/net/tcp_ao/Makefile3
-rw-r--r--tools/testing/selftests/net/tcp_ao/bench-lookups.c2
-rw-r--r--tools/testing/selftests/net/tcp_ao/config1
-rw-r--r--tools/testing/selftests/net/tcp_ao/connect-deny.c25
-rw-r--r--tools/testing/selftests/net/tcp_ao/connect.c6
-rw-r--r--tools/testing/selftests/net/tcp_ao/icmps-discard.c2
-rw-r--r--tools/testing/selftests/net/tcp_ao/key-management.c18
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/aolib.h180
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/ftrace-tcp.c559
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/ftrace.c543
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/kconfig.c31
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/setup.c17
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/sock.c1
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/utils.c26
-rw-r--r--tools/testing/selftests/net/tcp_ao/restore.c30
-rw-r--r--tools/testing/selftests/net/tcp_ao/rst.c2
-rw-r--r--tools/testing/selftests/net/tcp_ao/self-connect.c19
-rw-r--r--tools/testing/selftests/net/tcp_ao/seq-ext.c28
-rw-r--r--tools/testing/selftests/net/tcp_ao/setsockopt-closed.c6
-rw-r--r--tools/testing/selftests/net/tcp_ao/unsigned-md5.c35
-rw-r--r--tools/testing/selftests/net/txtimestamp.c6
-rwxr-xr-xtools/testing/selftests/net/unicast_extensions.sh9
-rwxr-xr-xtools/testing/selftests/net/vrf_route_leaking.sh3
-rwxr-xr-xtools/testing/selftests/net/xfrm_policy_add_speed.sh83
-rw-r--r--tools/testing/selftests/nolibc/Makefile41
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c9
-rwxr-xr-xtools/testing/selftests/nolibc/run-tests.sh16
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/exec_target.c16
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/torture.sh38
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFcommon2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFcommon.i6862
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFcommon.ppc64le1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFcommon.x86_642
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE07.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/TINY20
-rw-r--r--tools/testing/selftests/resctrl/cat_test.c7
-rw-r--r--tools/testing/selftests/ring-buffer/map_test.c24
-rw-r--r--tools/testing/selftests/rtc/rtctest.c7
-rw-r--r--tools/testing/selftests/rust/config3
-rw-r--r--tools/testing/selftests/sched_ext/.gitignore6
-rw-r--r--tools/testing/selftests/sched_ext/Makefile218
-rw-r--r--tools/testing/selftests/sched_ext/config9
-rw-r--r--tools/testing/selftests/sched_ext/create_dsq.bpf.c58
-rw-r--r--tools/testing/selftests/sched_ext/create_dsq.c57
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c42
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.c57
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c39
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.c56
-rw-r--r--tools/testing/selftests/sched_ext/dsp_local_on.bpf.c65
-rw-r--r--tools/testing/selftests/sched_ext/dsp_local_on.c58
-rw-r--r--tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c21
-rw-r--r--tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c60
-rw-r--r--tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c43
-rw-r--r--tools/testing/selftests/sched_ext/enq_select_cpu_fails.c61
-rw-r--r--tools/testing/selftests/sched_ext/exit.bpf.c84
-rw-r--r--tools/testing/selftests/sched_ext/exit.c55
-rw-r--r--tools/testing/selftests/sched_ext/exit_test.h20
-rw-r--r--tools/testing/selftests/sched_ext/hotplug.bpf.c61
-rw-r--r--tools/testing/selftests/sched_ext/hotplug.c168
-rw-r--r--tools/testing/selftests/sched_ext/hotplug_test.h15
-rw-r--r--tools/testing/selftests/sched_ext/init_enable_count.bpf.c53
-rw-r--r--tools/testing/selftests/sched_ext/init_enable_count.c166
-rw-r--r--tools/testing/selftests/sched_ext/maximal.bpf.c164
-rw-r--r--tools/testing/selftests/sched_ext/maximal.c51
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null.bpf.c36
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null.c49
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c25
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c28
-rw-r--r--tools/testing/selftests/sched_ext/minimal.bpf.c21
-rw-r--r--tools/testing/selftests/sched_ext/minimal.c58
-rw-r--r--tools/testing/selftests/sched_ext/prog_run.bpf.c33
-rw-r--r--tools/testing/selftests/sched_ext/prog_run.c78
-rw-r--r--tools/testing/selftests/sched_ext/reload_loop.c75
-rw-r--r--tools/testing/selftests/sched_ext/runner.c201
-rw-r--r--tools/testing/selftests/sched_ext/scx_test.h131
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c40
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl.c72
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c89
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.c72
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c41
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch.c70
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c37
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.c56
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c38
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.c56
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c92
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_vtime.c59
-rw-r--r--tools/testing/selftests/sched_ext/test_example.c49
-rw-r--r--tools/testing/selftests/sched_ext/util.c71
-rw-r--r--tools/testing/selftests/sched_ext/util.h13
-rw-r--r--tools/testing/selftests/timers/change_skew.c3
-rw-r--r--tools/testing/selftests/timers/posix_timers.c550
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c2
-rw-r--r--tools/testing/selftests/timers/threadtest.c4
-rwxr-xr-xtools/testing/selftests/tpm2/test_async.sh2
-rwxr-xr-xtools/testing/selftests/tpm2/test_smoke.sh4
-rwxr-xr-xtools/testing/selftests/tpm2/test_space.sh2
-rw-r--r--tools/testing/selftests/user/Makefile9
-rw-r--r--tools/testing/selftests/user/config1
-rwxr-xr-xtools/testing/selftests/user/test_user_copy.sh18
-rw-r--r--tools/testing/selftests/vDSO/Makefile21
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.c17
-rw-r--r--tools/testing/selftests/vDSO/vdso_call.h70
-rw-r--r--tools/testing/selftests/vDSO/vdso_config.h18
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_abi.c14
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_chacha.c103
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_correctness.c21
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_getcpu.c3
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_getrandom.c128
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_gettimeofday.c3
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/srso.c70
-rw-r--r--tools/testing/shared/autoconf.h (renamed from tools/testing/radix-tree/generated/autoconf.h)0
-rw-r--r--tools/testing/shared/linux.c (renamed from tools/testing/radix-tree/linux.c)26
-rw-r--r--tools/testing/shared/linux/bug.h (renamed from tools/testing/radix-tree/linux/bug.h)0
-rw-r--r--tools/testing/shared/linux/cpu.h (renamed from tools/testing/radix-tree/linux/cpu.h)0
-rw-r--r--tools/testing/shared/linux/idr.h (renamed from tools/testing/radix-tree/linux/idr.h)0
-rw-r--r--tools/testing/shared/linux/kconfig.h (renamed from tools/testing/radix-tree/linux/kconfig.h)0
-rw-r--r--tools/testing/shared/linux/kernel.h (renamed from tools/testing/radix-tree/linux/kernel.h)0
-rw-r--r--tools/testing/shared/linux/kmemleak.h (renamed from tools/testing/radix-tree/linux/kmemleak.h)0
-rw-r--r--tools/testing/shared/linux/local_lock.h (renamed from tools/testing/radix-tree/linux/local_lock.h)0
-rw-r--r--tools/testing/shared/linux/lockdep.h (renamed from tools/testing/radix-tree/linux/lockdep.h)0
-rw-r--r--tools/testing/shared/linux/maple_tree.h (renamed from tools/testing/radix-tree/linux/maple_tree.h)0
-rw-r--r--tools/testing/shared/linux/percpu.h (renamed from tools/testing/radix-tree/linux/percpu.h)0
-rw-r--r--tools/testing/shared/linux/preempt.h (renamed from tools/testing/radix-tree/linux/preempt.h)0
-rw-r--r--tools/testing/shared/linux/radix-tree.h (renamed from tools/testing/radix-tree/linux/radix-tree.h)0
-rw-r--r--tools/testing/shared/linux/rcupdate.h (renamed from tools/testing/radix-tree/linux/rcupdate.h)0
-rw-r--r--tools/testing/shared/linux/xarray.h (renamed from tools/testing/radix-tree/linux/xarray.h)0
-rw-r--r--tools/testing/shared/maple-shared.h9
-rw-r--r--tools/testing/shared/maple-shim.c7
-rw-r--r--tools/testing/shared/shared.h33
-rw-r--r--tools/testing/shared/shared.mk72
-rw-r--r--tools/testing/shared/trace/events/maple_tree.h (renamed from tools/testing/radix-tree/trace/events/maple_tree.h)0
-rw-r--r--tools/testing/shared/xarray-shared.c5
-rw-r--r--tools/testing/shared/xarray-shared.h4
-rw-r--r--tools/testing/vma/.gitignore7
-rw-r--r--tools/testing/vma/Makefile18
-rw-r--r--tools/testing/vma/linux/atomic.h12
-rw-r--r--tools/testing/vma/linux/mmzone.h38
-rw-r--r--tools/testing/vma/vma.c1563
-rw-r--r--tools/testing/vma/vma_internal.h923
-rw-r--r--tools/testing/vsock/util.c6
-rw-r--r--tools/testing/vsock/util.h3
-rw-r--r--tools/testing/vsock/vsock_test.c85
-rw-r--r--tools/virtio/ringtest/main.c2
-rw-r--r--virt/kvm/eventfd.c6
-rw-r--r--virt/kvm/kvm_main.c19
-rw-r--r--virt/kvm/vfio.c8
10195 files changed, 412856 insertions, 167463 deletions
diff --git a/.clang-format b/.clang-format
index 252820d9c80a..fe1aa1a30d40 100644
--- a/.clang-format
+++ b/.clang-format
@@ -141,11 +141,13 @@ ForEachMacros:
- 'damon_for_each_target_safe'
- 'damos_for_each_filter'
- 'damos_for_each_filter_safe'
+ - 'damos_for_each_quota_goal'
+ - 'damos_for_each_quota_goal_safe'
- 'data__for_each_file'
- 'data__for_each_file_new'
- 'data__for_each_file_start'
- 'device_for_each_child_node'
- - 'displayid_iter_for_each'
+ - 'device_for_each_child_node_scoped'
- 'dma_fence_array_for_each'
- 'dma_fence_chain_for_each'
- 'dma_fence_unwrap_for_each'
@@ -172,11 +174,14 @@ ForEachMacros:
- 'drm_for_each_plane'
- 'drm_for_each_plane_mask'
- 'drm_for_each_privobj'
- - 'drm_gem_for_each_gpuva'
- - 'drm_gem_for_each_gpuva_safe'
+ - 'drm_gem_for_each_gpuvm_bo'
+ - 'drm_gem_for_each_gpuvm_bo_safe'
- 'drm_gpuva_for_each_op'
- 'drm_gpuva_for_each_op_from_reverse'
+ - 'drm_gpuva_for_each_op_reverse'
- 'drm_gpuva_for_each_op_safe'
+ - 'drm_gpuvm_bo_for_each_va'
+ - 'drm_gpuvm_bo_for_each_va_safe'
- 'drm_gpuvm_for_each_va'
- 'drm_gpuvm_for_each_va_range'
- 'drm_gpuvm_for_each_va_range_safe'
@@ -192,11 +197,11 @@ ForEachMacros:
- 'dsa_switch_for_each_port_continue_reverse'
- 'dsa_switch_for_each_port_safe'
- 'dsa_switch_for_each_user_port'
+ - 'dsa_switch_for_each_user_port_continue_reverse'
- 'dsa_tree_for_each_cpu_port'
- 'dsa_tree_for_each_user_port'
- 'dsa_tree_for_each_user_port_continue_reverse'
- 'dso__for_each_symbol'
- - 'dsos__for_each_with_build_id'
- 'elf_hash_for_each_possible'
- 'elf_symtab__for_each_symbol'
- 'evlist__for_each_cpu'
@@ -216,6 +221,7 @@ ForEachMacros:
- 'for_each_and_bit'
- 'for_each_andnot_bit'
- 'for_each_available_child_of_node'
+ - 'for_each_available_child_of_node_scoped'
- 'for_each_bench'
- 'for_each_bio'
- 'for_each_board_func_rsrc'
@@ -234,6 +240,7 @@ ForEachMacros:
- 'for_each_card_widgets_safe'
- 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
+ - 'for_each_child_of_node_scoped'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_clear_bitrange'
@@ -251,6 +258,7 @@ ForEachMacros:
- 'for_each_cpu'
- 'for_each_cpu_and'
- 'for_each_cpu_andnot'
+ - 'for_each_cpu_from'
- 'for_each_cpu_or'
- 'for_each_cpu_wrap'
- 'for_each_dapm_widgets'
@@ -269,13 +277,14 @@ ForEachMacros:
- 'for_each_element'
- 'for_each_element_extid'
- 'for_each_element_id'
+ - 'for_each_enabled_cpu'
- 'for_each_endpoint_of_node'
- 'for_each_event'
- 'for_each_event_tps'
- 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu'
- 'for_each_fib6_walker_rt'
- - 'for_each_free_mem_pfn_range_in_zone'
+ - 'for_each_file_lock'
- 'for_each_free_mem_pfn_range_in_zone_from'
- 'for_each_free_mem_range'
- 'for_each_free_mem_range_reverse'
@@ -286,15 +295,18 @@ ForEachMacros:
- 'for_each_group_member'
- 'for_each_group_member_head'
- 'for_each_hstate'
+ - 'for_each_hwgpio'
- 'for_each_if'
- 'for_each_inject_fn'
- 'for_each_insn'
+ - 'for_each_insn_op_loc'
- 'for_each_insn_prefix'
- 'for_each_intid'
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
- 'for_each_lang'
+ - 'for_each_link_ch_maps'
- 'for_each_link_codecs'
- 'for_each_link_cpus'
- 'for_each_link_platforms'
@@ -332,6 +344,9 @@ ForEachMacros:
- 'for_each_new_plane_in_state_reverse'
- 'for_each_new_private_obj_in_state'
- 'for_each_new_reg'
+ - 'for_each_nhlt_endpoint'
+ - 'for_each_nhlt_endpoint_fmtcfg'
+ - 'for_each_nhlt_fmtcfg'
- 'for_each_node'
- 'for_each_node_by_name'
- 'for_each_node_by_type'
@@ -387,12 +402,15 @@ ForEachMacros:
- 'for_each_reloc_from'
- 'for_each_requested_gpio'
- 'for_each_requested_gpio_in_range'
+ - 'for_each_reserved_child_of_node'
- 'for_each_reserved_mem_range'
- 'for_each_reserved_mem_region'
+ - 'for_each_rtd_ch_maps'
- 'for_each_rtd_codec_dais'
- 'for_each_rtd_components'
- 'for_each_rtd_cpu_dais'
- 'for_each_rtd_dais'
+ - 'for_each_rtd_dais_reverse'
- 'for_each_sband_iftype_data'
- 'for_each_script'
- 'for_each_sec'
@@ -533,8 +551,6 @@ ForEachMacros:
- 'lwq_for_each_safe'
- 'map__for_each_symbol'
- 'map__for_each_symbol_by_name'
- - 'maps__for_each_entry'
- - 'maps__for_each_entry_safe'
- 'mas_for_each'
- 'mci_for_each_dimm'
- 'media_device_for_each_entity'
@@ -560,7 +576,9 @@ ForEachMacros:
- 'netdev_hw_addr_list_for_each'
- 'nft_rule_for_each_expr'
- 'nla_for_each_attr'
+ - 'nla_for_each_attr_type'
- 'nla_for_each_nested'
+ - 'nla_for_each_nested_type'
- 'nlmsg_for_each_attr'
- 'nlmsg_for_each_msg'
- 'nr_neigh_for_each'
@@ -579,6 +597,7 @@ ForEachMacros:
- 'perf_config_sections__for_each_entry'
- 'perf_config_set__for_each_entry'
- 'perf_cpu_map__for_each_cpu'
+ - 'perf_cpu_map__for_each_cpu_skip_any'
- 'perf_cpu_map__for_each_idx'
- 'perf_evlist__for_each_entry'
- 'perf_evlist__for_each_entry_reverse'
@@ -639,7 +658,6 @@ ForEachMacros:
- 'shost_for_each_device'
- 'sk_for_each'
- 'sk_for_each_bound'
- - 'sk_for_each_bound_bhash2'
- 'sk_for_each_entry_offset_rcu'
- 'sk_for_each_from'
- 'sk_for_each_rcu'
@@ -653,6 +671,7 @@ ForEachMacros:
- 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path'
+ - 'sparsebit_for_each_set_range'
- 'strlist__for_each_entry'
- 'strlist__for_each_entry_safe'
- 'sym_for_each_insn'
@@ -662,7 +681,6 @@ ForEachMacros:
- 'tcf_act_for_each_action'
- 'tcf_exts_for_each_action'
- 'ttm_resource_manager_for_each_res'
- - 'twsk_for_each_bound_bhash2'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
@@ -686,6 +704,9 @@ ForEachMacros:
- 'xbc_node_for_each_child'
- 'xbc_node_for_each_key_value'
- 'xbc_node_for_each_subkey'
+ - 'ynl_attr_for_each'
+ - 'ynl_attr_for_each_nested'
+ - 'ynl_attr_for_each_payload'
- 'zorro_for_each_dev'
IncludeBlocks: Preserve
diff --git a/.gitignore b/.gitignore
index 7902adf4f7f1..56972adb5031 100644
--- a/.gitignore
+++ b/.gitignore
@@ -24,6 +24,7 @@
*.dwo
*.elf
*.gcno
+*.gcda
*.gz
*.i
*.ko
@@ -46,7 +47,6 @@
*.so.dbg
*.su
*.symtypes
-*.symversions
*.tab.[ch]
*.tar
*.xz
@@ -70,6 +70,7 @@ modules.order
/Module.markers
/modules.builtin
/modules.builtin.modinfo
+/modules.builtin.ranges
/modules.nsdeps
#
@@ -142,7 +143,6 @@ GTAGS
# id-utils files
ID
-*.orig
*~
\#*#
diff --git a/.mailmap b/.mailmap
index f01d7bfca2a0..0374777cc662 100644
--- a/.mailmap
+++ b/.mailmap
@@ -154,6 +154,9 @@ Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com>
Christian Marangi <ansuelsmth@gmail.com>
Christophe Ricard <christophe.ricard@gmail.com>
Christoph Hellwig <hch@lst.de>
+Chuck Lever <chuck.lever@oracle.com> <cel@kernel.org>
+Chuck Lever <chuck.lever@oracle.com> <cel@netapp.com>
+Chuck Lever <chuck.lever@oracle.com> <cel@citi.umich.edu>
Claudiu Beznea <claudiu.beznea@tuxon.dev> <claudiu.beznea@microchip.com>
Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com>
Corey Minyard <minyard@acm.org>
@@ -313,6 +316,7 @@ Jiri Slaby <jirislaby@kernel.org> <xslaby@fi.muni.cz>
Jisheng Zhang <jszhang@kernel.org> <jszhang@marvell.com>
Jisheng Zhang <jszhang@kernel.org> <Jisheng.Zhang@synaptics.com>
Jishnu Prakash <quic_jprakash@quicinc.com> <jprakash@codeaurora.org>
+Joel Granados <joel.granados@kernel.org> <j.granados@samsung.com>
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
John Crispin <john@phrozen.org> <blogic@openwrt.org>
@@ -529,6 +533,7 @@ Pavankumar Kondeti <quic_pkondeti@quicinc.com> <pkondeti@codeaurora.org>
Peter A Jonsson <pj@ludd.ltu.se>
Peter Oruba <peter.oruba@amd.com>
Peter Oruba <peter@oruba.de>
+Pierre-Louis Bossart <pierre-louis.bossart@linux.dev> <pierre-louis.bossart@linux.intel.com>
Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
Praveen BP <praveenbp@ti.com>
Pradeep Kumar Chitrapu <quic_pradeepc@quicinc.com> <pradeepc@codeaurora.org>
@@ -612,6 +617,10 @@ Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
Sibi Sankar <quic_sibis@quicinc.com> <sibis@codeaurora.org>
Sid Manning <quic_sidneym@quicinc.com> <sidneym@codeaurora.org>
Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
+Simona Vetter <simona.vetter@ffwll.ch> <daniel.vetter@ffwll.ch>
+Simona Vetter <simona.vetter@ffwll.ch> <daniel.vetter@intel.com>
+Simona Vetter <simona.vetter@ffwll.ch> <daniel@ffwll.ch>
+Simona Vetter <simona.vetter@ffwll.ch> <daniel@biene.ffwll.ch>
Simon Horman <horms@kernel.org> <simon.horman@corigine.com>
Simon Horman <horms@kernel.org> <simon.horman@netronome.com>
Simon Kelley <simon@thekelleys.org.uk>
diff --git a/CREDITS b/CREDITS
index 053e5a5003eb..d439f5a1bc00 100644
--- a/CREDITS
+++ b/CREDITS
@@ -378,6 +378,9 @@ S: 1549 Hiironen Rd.
S: Brimson, MN 55602
S: USA
+N: Arnd Bergmann
+D: Maintainer of Cell Broadband Engine Architecture
+
N: Hennus Bergman
P: 1024/77D50909 76 99 FD 31 91 E1 96 1C 90 BB 22 80 62 F6 BD 63
D: Author and maintainer of the QIC-02 tape driver
@@ -1869,6 +1872,9 @@ S: K osmidomkum 723
S: 160 00 Praha 6
S: Czech Republic
+N: Jeremy Kerr
+D: Maintainer of SPU File System
+
N: Michael Kerrisk
E: mtk.manpages@gmail.com
W: https://man7.org/
diff --git a/Documentation/ABI/stable/vdso b/Documentation/ABI/stable/vdso
index 951838d42781..85dbb6a160df 100644
--- a/Documentation/ABI/stable/vdso
+++ b/Documentation/ABI/stable/vdso
@@ -9,9 +9,11 @@ maps an ELF DSO into that program's address space. This DSO is called
the vDSO and it often contains useful and highly-optimized alternatives
to real syscalls.
-These functions are called just like ordinary C function according to
-your platform's ABI. Call them from a sensible context. (For example,
-if you set CS on x86 to something strange, the vDSO functions are
+These functions are called according to your platform's ABI. On many
+platforms they are called just like ordinary C function. On other platforms
+(ex: powerpc) they are called with the same convention as system calls which
+is different from ordinary C functions. Call them from a sensible context.
+(For example, if you set CS on x86 to something strange, the vDSO functions are
within their rights to crash.) In addition, if you pass a bad
pointer to a vDSO function, you might get SIGSEGV instead of -EFAULT.
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
index 628a00fb20a9..1ef69e0271f9 100644
--- a/Documentation/ABI/testing/sysfs-block-zram
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -151,3 +151,10 @@ Contact: Sergey Senozhatsky <senozhatsky@chromium.org>
Description:
The recompress file is write-only and triggers re-compression
with secondary compression algorithms.
+
+What: /sys/block/zram<id>/algorithm_params
+Date: August 2024
+Contact: Sergey Senozhatsky <senozhatsky@chromium.org>
+Description:
+ The algorithm_params file is write-only and is used to setup
+ compression algorithm parameters.
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index ecf47559f495..7f63c7e97773 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -500,3 +500,75 @@ Description:
console drivers from the device. Raw users of pci-sysfs
resourceN attributes must be terminated prior to resizing.
Success of the resizing operation is not guaranteed.
+
+What: /sys/bus/pci/devices/.../leds/*:enclosure:*/brightness
+What: /sys/class/leds/*:enclosure:*/brightness
+Date: August 2024
+KernelVersion: 6.12
+Description:
+ LED indications on PCIe storage enclosures which are controlled
+ through the NPEM interface (Native PCIe Enclosure Management,
+ PCIe r6.1 sec 6.28) are accessible as led class devices, both
+ below /sys/class/leds and below NPEM-capable PCI devices.
+
+ Although these led class devices could be manipulated manually,
+ in practice they are typically manipulated automatically by an
+ application such as ledmon(8).
+
+ The name of a led class device is as follows:
+ <bdf>:enclosure:<indication>
+ where:
+
+ - <bdf> is the domain, bus, device and function number
+ (e.g. 10000:02:05.0)
+ - <indication> is a short description of the LED indication
+
+ Valid indications per PCIe r6.1 table 6-27 are:
+
+ - ok (drive is functioning normally)
+ - locate (drive is being identified by an admin)
+ - fail (drive is not functioning properly)
+ - rebuild (drive is part of an array that is rebuilding)
+ - pfa (drive is predicted to fail soon)
+ - hotspare (drive is marked to be used as a replacement)
+ - ica (drive is part of an array that is degraded)
+ - ifa (drive is part of an array that is failed)
+ - idt (drive is not the right type for the connector)
+ - disabled (drive is disabled, removal is safe)
+ - specific0 to specific7 (enclosure-specific indications)
+
+ Broadly, the indications fall into one of these categories:
+
+ - to signify drive state (ok, locate, fail, idt, disabled)
+ - to signify drive role or state in a software RAID array
+ (rebuild, pfa, hotspare, ica, ifa)
+ - to signify any other role or state (specific0 to specific7)
+
+ Mandatory indications per PCIe r6.1 sec 7.9.19.2 comprise:
+ ok, locate, fail, rebuild. All others are optional.
+ A led class device is only visible if the corresponding
+ indication is supported by the device.
+
+ To manipulate the indications, write 0 (LED_OFF) or 1 (LED_ON)
+ to the "brightness" file. Note that manipulating an indication
+ may implicitly manipulate other indications at the vendor's
+ discretion. E.g. when the user lights up the "ok" indication,
+ the vendor may choose to automatically turn off the "fail"
+ indication. The current state of an indication can be
+ retrieved by reading its "brightness" file.
+
+ The PCIe Base Specification allows vendors leeway to choose
+ different colors or blinking patterns for the indications,
+ but they typically follow the IBPI standard. E.g. the "locate"
+ indication is usually presented as one or two LEDs blinking at
+ 4 Hz frequency:
+ https://en.wikipedia.org/wiki/International_Blinking_Pattern_Interpretation
+
+ PCI Firmware Specification r3.3 sec 4.7 defines a DSM interface
+ to facilitate shared access by operating system and platform
+ firmware to a device's NPEM registers. The kernel will use
+ this DSM interface where available, instead of accessing NPEM
+ registers directly. The DSM interface does not support the
+ enclosure-specific indications "specific0" to "specific7",
+ hence the corresponding led class devices are unavailable if
+ the DSM interface is used.
diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power
index 7c81f0a25a48..45180b62d426 100644
--- a/Documentation/ABI/testing/sysfs-class-power
+++ b/Documentation/ABI/testing/sysfs-class-power
@@ -377,17 +377,33 @@ What: /sys/class/power_supply/<supply_name>/charge_type
Date: July 2009
Contact: linux-pm@vger.kernel.org
Description:
- Represents the type of charging currently being applied to the
- battery. "Trickle", "Fast", and "Standard" all mean different
- charging speeds. "Adaptive" means that the charger uses some
- algorithm to adjust the charge rate dynamically, without
- any user configuration required. "Custom" means that the charger
- uses the charge_control_* properties as configuration for some
- different algorithm. "Long Life" means the charger reduces its
- charging rate in order to prolong the battery health. "Bypass"
- means the charger bypasses the charging path around the
- integrated converter allowing for a "smart" wall adaptor to
- perform the power conversion externally.
+ Select the charging algorithm to use for a battery.
+
+ Standard:
+ Fully charge the battery at a moderate rate.
+ Fast:
+ Quickly charge the battery using fast-charge
+ technology. This is typically harder on the battery
+ than standard charging and may lower its lifespan.
+ Trickle:
+ Users who primarily operate the system while
+ plugged into an external power source can extend
+ battery life with this mode. Vendor tooling may
+ call this "Primarily AC Use".
+ Adaptive:
+ Automatically optimize battery charge rate based
+ on typical usage pattern.
+ Custom:
+ Use the charge_control_* properties to determine
+ when to start and stop charging. Advanced users
+ can use this to drastically extend battery life.
+ Long Life:
+ The charger reduces its charging rate in order to
+ prolong the battery health.
+ Bypass:
+ The charger bypasses the charging path around the
+ integrated converter allowing for a "smart" wall
+ adaptor to perform the power conversion externally.
Access: Read, Write
@@ -592,7 +608,12 @@ Description:
the supply, for example it can show if USB-PD capable source
is attached.
- Access: Read-Only
+ Access: For power-supplies which consume USB power such
+ as battery charger chips, this indicates the type of
+ the connected USB power source and is Read-Only.
+
+ For power-supplies which act as a USB power-source such as
+ e.g. the UCS1002 USB Port Power Controller this is writable.
Valid values:
"Unknown", "SDP", "DCP", "CDP", "ACA", "C", "PD",
diff --git a/Documentation/ABI/testing/sysfs-class-tee b/Documentation/ABI/testing/sysfs-class-tee
new file mode 100644
index 000000000000..c9144d16003e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-tee
@@ -0,0 +1,15 @@
+What: /sys/class/tee/tee{,priv}X/rpmb_routing_model
+Date: May 2024
+KernelVersion: 6.10
+Contact: op-tee@lists.trustedfirmware.org
+Description:
+ RPMB frames can be routed to the RPMB device via the
+ user-space daemon tee-supplicant or the RPMB subsystem
+ in the kernel. The value "user" means that the driver
+ will route the RPMB frames via user space. Conversely,
+ "kernel" means that the frames are routed via the RPMB
+ subsystem without assistance from tee-supplicant. It
+ should be assumed that RPMB frames are routed via user
+ space if the variable is absent. The primary purpose
+ of this variable is to let systemd know whether
+ tee-supplicant is needed in the early boot with initramfs.
diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory
index a95e0f17c35a..cec65827e602 100644
--- a/Documentation/ABI/testing/sysfs-devices-memory
+++ b/Documentation/ABI/testing/sysfs-devices-memory
@@ -115,6 +115,6 @@ What: /sys/devices/system/memory/crash_hotplug
Date: Aug 2023
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description:
- (RO) indicates whether or not the kernel directly supports
- modifying the crash elfcorehdr for memory hot un/plug and/or
- on/offline changes.
+ (RO) indicates whether or not the kernel updates relevant kexec
+ segments on memory hot un/plug and/or on/offline events, avoiding the
+ need to reload kdump kernel.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index de725ca3be82..206079d3bd5b 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -704,9 +704,9 @@ What: /sys/devices/system/cpu/crash_hotplug
Date: Aug 2023
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description:
- (RO) indicates whether or not the kernel directly supports
- modifying the crash elfcorehdr for CPU hot un/plug and/or
- on/offline changes.
+ (RO) indicates whether or not the kernel updates relevant kexec
+ segments on memory hot un/plug and/or on/offline events, avoiding the
+ need to reload kdump kernel.
What: /sys/devices/system/cpu/enabled
Date: Nov 2022
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
index 92fe7c5c5ac1..be4141a7522f 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
+++ b/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
@@ -75,3 +75,11 @@ Description: RO. Energy input of device or gt in microjoules.
for the gt.
Only supported for particular Intel i915 graphics platforms.
+
+What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/fan1_input
+Date: November 2024
+KernelVersion: 6.12
+Contact: intel-gfx@lists.freedesktop.org
+Description: RO. Fan speed of device in RPM.
+
+ Only supported for particular Intel i915 graphics platforms.
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index fe943ce76c60..5fa6655aee84 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -1532,3 +1532,30 @@ Contact: Bean Huo <beanhuo@micron.com>
Description:
rtc_update_ms indicates how often the host should synchronize or update the
UFS RTC. If set to 0, this will disable UFS RTC periodic update.
+
+What: /sys/devices/platform/.../ufshci_capabilities/version
+Date: August 2024
+Contact: Avri Altman <avri.altman@wdc.com>
+Description:
+ Host Capabilities register group: UFS version register.
+ Symbol - VER. This file shows the UFSHCD version.
+ Example: Version 3.12 would be represented as 0000_0312h.
+ The file is read only.
+
+What: /sys/devices/platform/.../ufshci_capabilities/product_id
+Date: August 2024
+Contact: Avri Altman <avri.altman@wdc.com>
+Description:
+ Host Capabilities register group: product ID register.
+ Symbol - HCPID. This file shows the UFSHCD product id.
+ The content of this register is vendor specific.
+ The file is read only.
+
+What: /sys/devices/platform/.../ufshci_capabilities/man_id
+Date: August 2024
+Contact: Avri Altman <avri.altman@wdc.com>
+Description:
+ Host Capabilities register group: manufacturer ID register.
+ Symbol - HCMID. This file shows the UFSHCD manufacturer id.
+ The Manufacturer ID is defined by JEDEC in JEDEC-JEP106.
+ The file is read only.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index cad6c3dc1f9c..fdedf1ea944b 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -579,6 +579,12 @@ Description: When ATGC is on, it controls age threshold to bypass GCing young
candidates whose age is not beyond the threshold, by default it was
initialized as 604800 seconds (equals to 7 days).
+What: /sys/fs/f2fs/<disk>/atgc_enabled
+Date: Feb 2024
+Contact: "Jinbao Liu" <liujinbao1@xiaomi.com>
+Description: It represents whether ATGC is on or off. The value is 1 which
+ indicates that ATGC is on, and 0 indicates that it is off.
+
What: /sys/fs/f2fs/<disk>/gc_reclaimed_segments
Date: July 2021
Contact: "Daeho Jeong" <daehojeong@google.com>
@@ -763,3 +769,53 @@ Date: November 2023
Contact: "Chao Yu" <chao@kernel.org>
Description: It controls to enable/disable IO aware feature for background discard.
By default, the value is 1 which indicates IO aware is on.
+
+What: /sys/fs/f2fs/<disk>/blkzone_alloc_policy
+Date: July 2024
+Contact: "Yuanhong Liao" <liaoyuanhong@vivo.com>
+Description: The zone UFS we are currently using consists of two parts:
+ conventional zones and sequential zones. It can be used to control which part
+ to prioritize for writes, with a default value of 0.
+
+ ======================== =========================================
+ value description
+ blkzone_alloc_policy = 0 Prioritize writing to sequential zones
+ blkzone_alloc_policy = 1 Only allow writing to sequential zones
+ blkzone_alloc_policy = 2 Prioritize writing to conventional zones
+ ======================== =========================================
+
+What: /sys/fs/f2fs/<disk>/migration_window_granularity
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: Controls migration window granularity of garbage collection on large
+ section. it can control the scanning window granularity for GC migration
+ in a unit of segment, while migration_granularity controls the number
+ of segments which can be migrated at the same turn.
+
+What: /sys/fs/f2fs/<disk>/reserved_segments
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: In order to fine tune GC behavior, we can control the number of
+ reserved segments.
+
+What: /sys/fs/f2fs/<disk>/gc_no_zoned_gc_percent
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: If the percentage of free sections over total sections is above this
+ number, F2FS do not garbage collection for zoned devices through the
+ background GC thread. the default number is "60".
+
+What: /sys/fs/f2fs/<disk>/gc_boost_zoned_gc_percent
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: If the percentage of free sections over total sections is under this
+ number, F2FS boosts garbage collection for zoned devices through the
+ background GC thread. the default number is "25".
+
+What: /sys/fs/f2fs/<disk>/gc_valid_thresh_ratio
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: It controls the valid block ratio threshold not to trigger excessive GC
+ for zoned deivces. The initial value of it is 95(%). F2FS will stop the
+ background GC thread from intiating GC for sections having valid blocks
+ exceeding the ratio.
diff --git a/Documentation/PCI/pci.rst b/Documentation/PCI/pci.rst
index dd7b1c0c21da..f4d2662871ab 100644
--- a/Documentation/PCI/pci.rst
+++ b/Documentation/PCI/pci.rst
@@ -52,7 +52,7 @@ driver generally needs to perform the following initialization:
- Enable DMA/processing engines
When done using the device, and perhaps the module needs to be unloaded,
-the driver needs to take the follow steps:
+the driver needs to take the following steps:
- Disable the device from generating IRQs
- Release the IRQ (free_irq())
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.rst b/Documentation/RCU/Design/Data-Structures/Data-Structures.rst
index b34990c7c377..04e16775c752 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.rst
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.rst
@@ -921,10 +921,10 @@ This portion of the ``rcu_data`` structure is declared as follows:
::
- 1 int dynticks_snap;
+ 1 int watching_snap;
2 unsigned long dynticks_fqs;
-The ``->dynticks_snap`` field is used to take a snapshot of the
+The ``->watching_snap`` field is used to take a snapshot of the
corresponding CPU's dyntick-idle state when forcing quiescent states,
and is therefore accessed from other CPUs. Finally, the
``->dynticks_fqs`` field is used to count the number of times this CPU
@@ -935,8 +935,8 @@ This portion of the rcu_data structure is declared as follows:
::
- 1 long dynticks_nesting;
- 2 long dynticks_nmi_nesting;
+ 1 long nesting;
+ 2 long nmi_nesting;
3 atomic_t dynticks;
4 bool rcu_need_heavy_qs;
5 bool rcu_urgent_qs;
@@ -945,14 +945,14 @@ These fields in the rcu_data structure maintain the per-CPU dyntick-idle
state for the corresponding CPU. The fields may be accessed only from
the corresponding CPU (and from tracing) unless otherwise stated.
-The ``->dynticks_nesting`` field counts the nesting depth of process
+The ``->nesting`` field counts the nesting depth of process
execution, so that in normal circumstances this counter has value zero
or one. NMIs, irqs, and tracers are counted by the
-``->dynticks_nmi_nesting`` field. Because NMIs cannot be masked, changes
+``->nmi_nesting`` field. Because NMIs cannot be masked, changes
to this variable have to be undertaken carefully using an algorithm
provided by Andy Lutomirski. The initial transition from idle adds one,
and nested transitions add two, so that a nesting level of five is
-represented by a ``->dynticks_nmi_nesting`` value of nine. This counter
+represented by a ``->nmi_nesting`` value of nine. This counter
can therefore be thought of as counting the number of reasons why this
CPU cannot be permitted to enter dyntick-idle mode, aside from
process-level transitions.
@@ -960,12 +960,12 @@ process-level transitions.
However, it turns out that when running in non-idle kernel context, the
Linux kernel is fully capable of entering interrupt handlers that never
exit and perhaps also vice versa. Therefore, whenever the
-``->dynticks_nesting`` field is incremented up from zero, the
-``->dynticks_nmi_nesting`` field is set to a large positive number, and
-whenever the ``->dynticks_nesting`` field is decremented down to zero,
-the ``->dynticks_nmi_nesting`` field is set to zero. Assuming that
+``->nesting`` field is incremented up from zero, the
+``->nmi_nesting`` field is set to a large positive number, and
+whenever the ``->nesting`` field is decremented down to zero,
+the ``->nmi_nesting`` field is set to zero. Assuming that
the number of misnested interrupts is not sufficient to overflow the
-counter, this approach corrects the ``->dynticks_nmi_nesting`` field
+counter, this approach corrects the ``->nmi_nesting`` field
every time the corresponding CPU enters the idle loop from process
context.
@@ -992,8 +992,8 @@ code.
+-----------------------------------------------------------------------+
| **Quick Quiz**: |
+-----------------------------------------------------------------------+
-| Why not simply combine the ``->dynticks_nesting`` and |
-| ``->dynticks_nmi_nesting`` counters into a single counter that just |
+| Why not simply combine the ``->nesting`` and |
+| ``->nmi_nesting`` counters into a single counter that just |
| counts the number of reasons that the corresponding CPU is non-idle? |
+-----------------------------------------------------------------------+
| **Answer**: |
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
index 728b1e690c64..1a5ff1a9f02e 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
@@ -147,10 +147,10 @@ RCU read-side critical sections preceding and following the current
idle sojourn.
This case is handled by calls to the strongly ordered
``atomic_add_return()`` read-modify-write atomic operation that
-is invoked within ``rcu_dynticks_eqs_enter()`` at idle-entry
-time and within ``rcu_dynticks_eqs_exit()`` at idle-exit time.
-The grace-period kthread invokes first ``ct_dynticks_cpu_acquire()``
-(preceded by a full memory barrier) and ``rcu_dynticks_in_eqs_since()``
+is invoked within ``ct_kernel_exit_state()`` at idle-entry
+time and within ``ct_kernel_enter_state()`` at idle-exit time.
+The grace-period kthread invokes first ``ct_rcu_watching_cpu_acquire()``
+(preceded by a full memory barrier) and ``rcu_watching_snap_stopped_since()``
(both of which rely on acquire semantics) to detect idle CPUs.
+-----------------------------------------------------------------------+
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-dyntick.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-dyntick.svg
index 423df00c4df9..3fbc19c48a58 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-dyntick.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-dyntick.svg
@@ -528,7 +528,7 @@
font-style="normal"
y="-8652.5312"
x="2466.7822"
- xml:space="preserve">dyntick_save_progress_counter()</text>
+ xml:space="preserve">rcu_watching_snap_save()</text>
<text
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
id="text202-7-2-7-2-0"
@@ -537,7 +537,7 @@
font-style="normal"
y="-8368.1475"
x="2463.3262"
- xml:space="preserve">rcu_implicit_dynticks_qs()</text>
+ xml:space="preserve">rcu_watching_snap_recheck()</text>
</g>
<g
id="g4504"
@@ -607,7 +607,7 @@
font-weight="bold"
font-size="192"
id="text202-7-5-3-27-6"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_dynticks_eqs_enter()</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">ct_kernel_exit_state()</text>
<text
xml:space="preserve"
x="3745.7725"
@@ -638,7 +638,7 @@
font-weight="bold"
font-size="192"
id="text202-7-5-3-27-6-1"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_dynticks_eqs_exit()</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">ct_kernel_enter_state()</text>
<text
xml:space="preserve"
x="3745.7725"
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-fqs.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-fqs.svg
index d82a77d03d8c..25c7acc8a4c2 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-fqs.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-fqs.svg
@@ -844,7 +844,7 @@
font-style="normal"
y="1547.8876"
x="4417.6396"
- xml:space="preserve">dyntick_save_progress_counter()</text>
+ xml:space="preserve">rcu_watching_snap_save()</text>
<g
style="fill:none;stroke-width:0.025in"
transform="translate(6501.9719,-10685.904)"
@@ -899,7 +899,7 @@
font-style="normal"
y="1858.8729"
x="4414.1836"
- xml:space="preserve">rcu_implicit_dynticks_qs()</text>
+ xml:space="preserve">rcu_watching_snap_recheck()</text>
<text
xml:space="preserve"
x="14659.87"
@@ -977,7 +977,7 @@
font-weight="bold"
font-size="192"
id="text202-7-5-3-27-6"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_dynticks_eqs_enter()</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">ct_kernel_exit_state()</text>
<text
xml:space="preserve"
x="3745.7725"
@@ -1008,7 +1008,7 @@
font-weight="bold"
font-size="192"
id="text202-7-5-3-27-6-1"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_dynticks_eqs_exit()</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">ct_kernel_enter_state()</text>
<text
xml:space="preserve"
x="3745.7725"
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
index 53e0dc2a2c79..d05bc7b27edb 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
@@ -2974,7 +2974,7 @@
font-style="normal"
y="38114.047"
x="-334.33856"
- xml:space="preserve">dyntick_save_progress_counter()</text>
+ xml:space="preserve">rcu_watching_snap_save()</text>
<g
style="fill:none;stroke-width:0.025in"
transform="translate(1749.9916,25880.249)"
@@ -3029,7 +3029,7 @@
font-style="normal"
y="38425.035"
x="-337.79462"
- xml:space="preserve">rcu_implicit_dynticks_qs()</text>
+ xml:space="preserve">rcu_watching_snap_recheck()</text>
<text
xml:space="preserve"
x="9907.8887"
@@ -3107,7 +3107,7 @@
font-weight="bold"
font-size="192"
id="text202-7-5-3-27-6"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_dynticks_eqs_enter()</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">ct_kernel_exit_state()</text>
<text
xml:space="preserve"
x="3745.7725"
@@ -3138,7 +3138,7 @@
font-weight="bold"
font-size="192"
id="text202-7-5-3-27-6-1"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_dynticks_eqs_exit()</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">ct_kernel_enter_state()</text>
<text
xml:space="preserve"
x="3745.7725"
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-hotplug.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-hotplug.svg
index 4fa7506082bf..a92356ce4011 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-hotplug.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-hotplug.svg
@@ -516,7 +516,7 @@
font-style="normal"
y="-8652.5312"
x="2466.7822"
- xml:space="preserve">dyntick_save_progress_counter()</text>
+ xml:space="preserve">rcu_watching_snap_save()</text>
<text
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
id="text202-7-2-7-2-0"
@@ -525,7 +525,7 @@
font-style="normal"
y="-8368.1475"
x="2463.3262"
- xml:space="preserve">rcu_implicit_dynticks_qs()</text>
+ xml:space="preserve">rcu_watching_snap_recheck()</text>
<text
sodipodi:linespacing="125%"
style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst
index f511476b4550..6125e7068d2c 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.rst
+++ b/Documentation/RCU/Design/Requirements/Requirements.rst
@@ -2649,8 +2649,7 @@ those that are idle from RCU's perspective) and then Tasks Rude RCU can
be removed from the kernel.
The tasks-rude-RCU API is also reader-marking-free and thus quite compact,
-consisting of call_rcu_tasks_rude(), synchronize_rcu_tasks_rude(),
-and rcu_barrier_tasks_rude().
+consisting solely of synchronize_rcu_tasks_rude().
Tasks Trace RCU
~~~~~~~~~~~~~~~
diff --git a/Documentation/RCU/checklist.rst b/Documentation/RCU/checklist.rst
index 3e6407de231c..7de3e308f330 100644
--- a/Documentation/RCU/checklist.rst
+++ b/Documentation/RCU/checklist.rst
@@ -194,14 +194,13 @@ over a rather long period of time, but improvements are always welcome!
when publicizing a pointer to a structure that can
be traversed by an RCU read-side critical section.
-5. If any of call_rcu(), call_srcu(), call_rcu_tasks(),
- call_rcu_tasks_rude(), or call_rcu_tasks_trace() is used,
- the callback function may be invoked from softirq context,
- and in any case with bottom halves disabled. In particular,
- this callback function cannot block. If you need the callback
- to block, run that code in a workqueue handler scheduled from
- the callback. The queue_rcu_work() function does this for you
- in the case of call_rcu().
+5. If any of call_rcu(), call_srcu(), call_rcu_tasks(), or
+ call_rcu_tasks_trace() is used, the callback function may be
+ invoked from softirq context, and in any case with bottom halves
+ disabled. In particular, this callback function cannot block.
+ If you need the callback to block, run that code in a workqueue
+ handler scheduled from the callback. The queue_rcu_work()
+ function does this for you in the case of call_rcu().
6. Since synchronize_rcu() can block, it cannot be called
from any sort of irq context. The same rule applies
@@ -254,10 +253,10 @@ over a rather long period of time, but improvements are always welcome!
corresponding readers must use rcu_read_lock_trace()
and rcu_read_unlock_trace().
- c. If an updater uses call_rcu_tasks_rude() or
- synchronize_rcu_tasks_rude(), then the corresponding
- readers must use anything that disables preemption,
- for example, preempt_disable() and preempt_enable().
+ c. If an updater uses synchronize_rcu_tasks_rude(),
+ then the corresponding readers must use anything that
+ disables preemption, for example, preempt_disable()
+ and preempt_enable().
Mixing things up will result in confusion and broken kernels, and
has even resulted in an exploitable security issue. Therefore,
@@ -326,11 +325,9 @@ over a rather long period of time, but improvements are always welcome!
d. Periodically invoke rcu_barrier(), permitting a limited
number of updates per grace period.
- The same cautions apply to call_srcu(), call_rcu_tasks(),
- call_rcu_tasks_rude(), and call_rcu_tasks_trace(). This is
- why there is an srcu_barrier(), rcu_barrier_tasks(),
- rcu_barrier_tasks_rude(), and rcu_barrier_tasks_rude(),
- respectively.
+ The same cautions apply to call_srcu(), call_rcu_tasks(), and
+ call_rcu_tasks_trace(). This is why there is an srcu_barrier(),
+ rcu_barrier_tasks(), and rcu_barrier_tasks_trace(), respectively.
Note that although these primitives do take action to avoid
memory exhaustion when any given CPU has too many callbacks,
@@ -383,17 +380,17 @@ over a rather long period of time, but improvements are always welcome!
must use whatever locking or other synchronization is required
to safely access and/or modify that data structure.
- Do not assume that RCU callbacks will be executed on
- the same CPU that executed the corresponding call_rcu(),
- call_srcu(), call_rcu_tasks(), call_rcu_tasks_rude(), or
- call_rcu_tasks_trace(). For example, if a given CPU goes offline
- while having an RCU callback pending, then that RCU callback
- will execute on some surviving CPU. (If this was not the case,
- a self-spawning RCU callback would prevent the victim CPU from
- ever going offline.) Furthermore, CPUs designated by rcu_nocbs=
- might well *always* have their RCU callbacks executed on some
- other CPUs, in fact, for some real-time workloads, this is the
- whole point of using the rcu_nocbs= kernel boot parameter.
+ Do not assume that RCU callbacks will be executed on the same
+ CPU that executed the corresponding call_rcu(), call_srcu(),
+ call_rcu_tasks(), or call_rcu_tasks_trace(). For example, if
+ a given CPU goes offline while having an RCU callback pending,
+ then that RCU callback will execute on some surviving CPU.
+ (If this was not the case, a self-spawning RCU callback would
+ prevent the victim CPU from ever going offline.) Furthermore,
+ CPUs designated by rcu_nocbs= might well *always* have their
+ RCU callbacks executed on some other CPUs, in fact, for some
+ real-time workloads, this is the whole point of using the
+ rcu_nocbs= kernel boot parameter.
In addition, do not assume that callbacks queued in a given order
will be invoked in that order, even if they all are queued on the
@@ -507,9 +504,9 @@ over a rather long period of time, but improvements are always welcome!
These debugging aids can help you find problems that are
otherwise extremely difficult to spot.
-17. If you pass a callback function defined within a module to one of
- call_rcu(), call_srcu(), call_rcu_tasks(), call_rcu_tasks_rude(),
- or call_rcu_tasks_trace(), then it is necessary to wait for all
+17. If you pass a callback function defined within a module
+ to one of call_rcu(), call_srcu(), call_rcu_tasks(), or
+ call_rcu_tasks_trace(), then it is necessary to wait for all
pending callbacks to be invoked before unloading that module.
Note that it is absolutely *not* sufficient to wait for a grace
period! For example, synchronize_rcu() implementation is *not*
@@ -522,7 +519,6 @@ over a rather long period of time, but improvements are always welcome!
- call_rcu() -> rcu_barrier()
- call_srcu() -> srcu_barrier()
- call_rcu_tasks() -> rcu_barrier_tasks()
- - call_rcu_tasks_rude() -> rcu_barrier_tasks_rude()
- call_rcu_tasks_trace() -> rcu_barrier_tasks_trace()
However, these barrier functions are absolutely *not* guaranteed
@@ -539,7 +535,6 @@ over a rather long period of time, but improvements are always welcome!
- Either synchronize_srcu() or synchronize_srcu_expedited(),
together with and srcu_barrier()
- synchronize_rcu_tasks() and rcu_barrier_tasks()
- - synchronize_tasks_rude() and rcu_barrier_tasks_rude()
- synchronize_tasks_trace() and rcu_barrier_tasks_trace()
If necessary, you can use something like workqueues to execute
diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst
index d585a5490aee..1ef5784c1b84 100644
--- a/Documentation/RCU/whatisRCU.rst
+++ b/Documentation/RCU/whatisRCU.rst
@@ -1103,7 +1103,7 @@ RCU-Tasks-Rude::
Critical sections Grace period Barrier
- N/A call_rcu_tasks_rude rcu_barrier_tasks_rude
+ N/A N/A
synchronize_rcu_tasks_rude
diff --git a/Documentation/accel/qaic/qaic.rst b/Documentation/accel/qaic/qaic.rst
index efb7771273bb..018d6cc173d7 100644
--- a/Documentation/accel/qaic/qaic.rst
+++ b/Documentation/accel/qaic/qaic.rst
@@ -93,7 +93,7 @@ commands (does not impact QAIC).
uAPI
====
-QAIC creates an accel device per phsyical PCIe device. This accel device exists
+QAIC creates an accel device per physical PCIe device. This accel device exists
for as long as the PCIe device is known to Linux.
The PCIe device may not be in the state to accept requests from userspace at
@@ -147,12 +147,6 @@ DRM_IOCTL_QAIC_PERF_STATS_BO
recent execution of a BO. This allows userspace to construct an end to end
timeline of the BO processing for a performance analysis.
-DRM_IOCTL_QAIC_PART_DEV
- This IOCTL allows userspace to request a duplicate "shadow device". This extra
- accelN device is associated with a specific partition of resources on the
- AIC100 device and can be used for limiting a process to some subset of
- resources.
-
DRM_IOCTL_QAIC_DETACH_SLICE_BO
This IOCTL allows userspace to remove the slicing information from a BO that
was originally provided by a call to DRM_IOCTL_QAIC_ATTACH_SLICE_BO. This
diff --git a/Documentation/admin-guide/LSM/index.rst b/Documentation/admin-guide/LSM/index.rst
index a6ba95fbaa9f..ce63be6d64ad 100644
--- a/Documentation/admin-guide/LSM/index.rst
+++ b/Documentation/admin-guide/LSM/index.rst
@@ -47,3 +47,4 @@ subdirectories.
tomoyo
Yama
SafeSetID
+ ipe
diff --git a/Documentation/admin-guide/LSM/ipe.rst b/Documentation/admin-guide/LSM/ipe.rst
new file mode 100644
index 000000000000..f38e641df0e9
--- /dev/null
+++ b/Documentation/admin-guide/LSM/ipe.rst
@@ -0,0 +1,790 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Integrity Policy Enforcement (IPE)
+==================================
+
+.. NOTE::
+
+ This is the documentation for admins, system builders, or individuals
+ attempting to use IPE. If you're looking for more developer-focused
+ documentation about IPE please see :doc:`the design docs </security/ipe>`.
+
+Overview
+--------
+
+Integrity Policy Enforcement (IPE) is a Linux Security Module that takes a
+complementary approach to access control. Unlike traditional access control
+mechanisms that rely on labels and paths for decision-making, IPE focuses
+on the immutable security properties inherent to system components. These
+properties are fundamental attributes or features of a system component
+that cannot be altered, ensuring a consistent and reliable basis for
+security decisions.
+
+To elaborate, in the context of IPE, system components primarily refer to
+files or the devices these files reside on. However, this is just a
+starting point. The concept of system components is flexible and can be
+extended to include new elements as the system evolves. The immutable
+properties include the origin of a file, which remains constant and
+unchangeable over time. For example, IPE policies can be crafted to trust
+files originating from the initramfs. Since initramfs is typically verified
+by the bootloader, its files are deemed trustworthy; "file is from
+initramfs" becomes an immutable property under IPE's consideration.
+
+The immutable property concept extends to the security features enabled on
+a file's origin, such as dm-verity or fs-verity, which provide a layer of
+integrity and trust. For example, IPE allows the definition of policies
+that trust files from a dm-verity protected device. dm-verity ensures the
+integrity of an entire device by providing a verifiable and immutable state
+of its contents. Similarly, fs-verity offers filesystem-level integrity
+checks, allowing IPE to enforce policies that trust files protected by
+fs-verity. These two features cannot be turned off once established, so
+they are considered immutable properties. These examples demonstrate how
+IPE leverages immutable properties, such as a file's origin and its
+integrity protection mechanisms, to make access control decisions.
+
+For the IPE policy, specifically, it grants the ability to enforce
+stringent access controls by assessing security properties against
+reference values defined within the policy. This assessment can be based on
+the existence of a security property (e.g., verifying if a file originates
+from initramfs) or evaluating the internal state of an immutable security
+property. The latter includes checking the roothash of a dm-verity
+protected device, determining whether dm-verity possesses a valid
+signature, assessing the digest of a fs-verity protected file, or
+determining whether fs-verity possesses a valid built-in signature. This
+nuanced approach to policy enforcement enables a highly secure and
+customizable system defense mechanism, tailored to specific security
+requirements and trust models.
+
+To enable IPE, ensure that ``CONFIG_SECURITY_IPE`` (under
+:menuselection:`Security -> Integrity Policy Enforcement (IPE)`) config
+option is enabled.
+
+Use Cases
+---------
+
+IPE works best in fixed-function devices: devices in which their purpose
+is clearly defined and not supposed to be changed (e.g. network firewall
+device in a data center, an IoT device, etcetera), where all software and
+configuration is built and provisioned by the system owner.
+
+IPE is a long-way off for use in general-purpose computing: the Linux
+community as a whole tends to follow a decentralized trust model (known as
+the web of trust), which IPE has no support for it yet. Instead, IPE
+supports PKI (public key infrastructure), which generally designates a
+set of trusted entities that provide a measure of absolute trust.
+
+Additionally, while most packages are signed today, the files inside
+the packages (for instance, the executables), tend to be unsigned. This
+makes it difficult to utilize IPE in systems where a package manager is
+expected to be functional, without major changes to the package manager
+and ecosystem behind it.
+
+The digest_cache LSM [#digest_cache_lsm]_ is a system that when combined with IPE,
+could be used to enable and support general-purpose computing use cases.
+
+Known Limitations
+-----------------
+
+IPE cannot verify the integrity of anonymous executable memory, such as
+the trampolines created by gcc closures and libffi (<3.4.2), or JIT'd code.
+Unfortunately, as this is dynamically generated code, there is no way
+for IPE to ensure the integrity of this code to form a trust basis.
+
+IPE cannot verify the integrity of programs written in interpreted
+languages when these scripts are invoked by passing these program files
+to the interpreter. This is because the way interpreters execute these
+files; the scripts themselves are not evaluated as executable code
+through one of IPE's hooks, but they are merely text files that are read
+(as opposed to compiled executables) [#interpreters]_.
+
+Threat Model
+------------
+
+IPE specifically targets the risk of tampering with user-space executable
+code after the kernel has initially booted, including the kernel modules
+loaded from userspace via ``modprobe`` or ``insmod``.
+
+To illustrate, consider a scenario where an untrusted binary, possibly
+malicious, is downloaded along with all necessary dependencies, including a
+loader and libc. The primary function of IPE in this context is to prevent
+the execution of such binaries and their dependencies.
+
+IPE achieves this by verifying the integrity and authenticity of all
+executable code before allowing them to run. It conducts a thorough
+check to ensure that the code's integrity is intact and that they match an
+authorized reference value (digest, signature, etc) as per the defined
+policy. If a binary does not pass this verification process, either
+because its integrity has been compromised or it does not meet the
+authorization criteria, IPE will deny its execution. Additionally, IPE
+generates audit logs which may be utilized to detect and analyze failures
+resulting from policy violation.
+
+Tampering threat scenarios include modification or replacement of
+executable code by a range of actors including:
+
+- Actors with physical access to the hardware
+- Actors with local network access to the system
+- Actors with access to the deployment system
+- Compromised internal systems under external control
+- Malicious end users of the system
+- Compromised end users of the system
+- Remote (external) compromise of the system
+
+IPE does not mitigate threats arising from malicious but authorized
+developers (with access to a signing certificate), or compromised
+developer tools used by them (i.e. return-oriented programming attacks).
+Additionally, IPE draws hard security boundary between userspace and
+kernelspace. As a result, kernel-level exploits are considered outside
+the scope of IPE and mitigation is left to other mechanisms.
+
+Policy
+------
+
+IPE policy is a plain-text [#devdoc]_ policy composed of multiple statements
+over several lines. There is one required line, at the top of the
+policy, indicating the policy name, and the policy version, for
+instance::
+
+ policy_name=Ex_Policy policy_version=0.0.0
+
+The policy name is a unique key identifying this policy in a human
+readable name. This is used to create nodes under securityfs as well as
+uniquely identify policies to deploy new policies vs update existing
+policies.
+
+The policy version indicates the current version of the policy (NOT the
+policy syntax version). This is used to prevent rollback of policy to
+potentially insecure previous versions of the policy.
+
+The next portion of IPE policy are rules. Rules are formed by key=value
+pairs, known as properties. IPE rules require two properties: ``action``,
+which determines what IPE does when it encounters a match against the
+rule, and ``op``, which determines when the rule should be evaluated.
+The ordering is significant, a rule must start with ``op``, and end with
+``action``. Thus, a minimal rule is::
+
+ op=EXECUTE action=ALLOW
+
+This example will allow any execution. Additional properties are used to
+assess immutable security properties about the files being evaluated.
+These properties are intended to be descriptions of systems within the
+kernel that can provide a measure of integrity verification, such that IPE
+can determine the trust of the resource based on the value of the property.
+
+Rules are evaluated top-to-bottom. As a result, any revocation rules,
+or denies should be placed early in the file to ensure that these rules
+are evaluated before a rule with ``action=ALLOW``.
+
+IPE policy supports comments. The character '#' will function as a
+comment, ignoring all characters to the right of '#' until the newline.
+
+The default behavior of IPE evaluations can also be expressed in policy,
+through the ``DEFAULT`` statement. This can be done at a global level,
+or a per-operation level::
+
+ # Global
+ DEFAULT action=ALLOW
+
+ # Operation Specific
+ DEFAULT op=EXECUTE action=ALLOW
+
+A default must be set for all known operations in IPE. If you want to
+preserve older policies being compatible with newer kernels that can introduce
+new operations, set a global default of ``ALLOW``, then override the
+defaults on a per-operation basis (as above).
+
+With configurable policy-based LSMs, there's several issues with
+enforcing the configurable policies at startup, around reading and
+parsing the policy:
+
+1. The kernel *should* not read files from userspace, so directly reading
+ the policy file is prohibited.
+2. The kernel command line has a character limit, and one kernel module
+ should not reserve the entire character limit for its own
+ configuration.
+3. There are various boot loaders in the kernel ecosystem, so handing
+ off a memory block would be costly to maintain.
+
+As a result, IPE has addressed this problem through a concept of a "boot
+policy". A boot policy is a minimal policy which is compiled into the
+kernel. This policy is intended to get the system to a state where
+userspace is set up and ready to receive commands, at which point a more
+complex policy can be deployed via securityfs. The boot policy can be
+specified via ``SECURITY_IPE_BOOT_POLICY`` config option, which accepts
+a path to a plain-text version of the IPE policy to apply. This policy
+will be compiled into the kernel. If not specified, IPE will be disabled
+until a policy is deployed and activated through securityfs.
+
+Deploying Policies
+~~~~~~~~~~~~~~~~~~
+
+Policies can be deployed from userspace through securityfs. These policies
+are signed through the PKCS#7 message format to enforce some level of
+authorization of the policies (prohibiting an attacker from gaining
+unconstrained root, and deploying an "allow all" policy). These
+policies must be signed by a certificate that chains to the
+``SYSTEM_TRUSTED_KEYRING``. With openssl, the policy can be signed by::
+
+ openssl smime -sign \
+ -in "$MY_POLICY" \
+ -signer "$MY_CERTIFICATE" \
+ -inkey "$MY_PRIVATE_KEY" \
+ -noattr \
+ -nodetach \
+ -nosmimecap \
+ -outform der \
+ -out "$MY_POLICY.p7b"
+
+Deploying the policies is done through securityfs, through the
+``new_policy`` node. To deploy a policy, simply cat the file into the
+securityfs node::
+
+ cat "$MY_POLICY.p7b" > /sys/kernel/security/ipe/new_policy
+
+Upon success, this will create one subdirectory under
+``/sys/kernel/security/ipe/policies/``. The subdirectory will be the
+``policy_name`` field of the policy deployed, so for the example above,
+the directory will be ``/sys/kernel/security/ipe/policies/Ex_Policy``.
+Within this directory, there will be seven files: ``pkcs7``, ``policy``,
+``name``, ``version``, ``active``, ``update``, and ``delete``.
+
+The ``pkcs7`` file is read-only. Reading it returns the raw PKCS#7 data
+that was provided to the kernel, representing the policy. If the policy being
+read is the boot policy, this will return ``ENOENT``, as it is not signed.
+
+The ``policy`` file is read only. Reading it returns the PKCS#7 inner
+content of the policy, which will be the plain text policy.
+
+The ``active`` file is used to set a policy as the currently active policy.
+This file is rw, and accepts a value of ``"1"`` to set the policy as active.
+Since only a single policy can be active at one time, all other policies
+will be marked inactive. The policy being marked active must have a policy
+version greater or equal to the currently-running version.
+
+The ``update`` file is used to update a policy that is already present
+in the kernel. This file is write-only and accepts a PKCS#7 signed
+policy. Two checks will always be performed on this policy: First, the
+``policy_names`` must match with the updated version and the existing
+version. Second the updated policy must have a policy version greater than
+or equal to the currently-running version. This is to prevent rollback attacks.
+
+The ``delete`` file is used to remove a policy that is no longer needed.
+This file is write-only and accepts a value of ``1`` to delete the policy.
+On deletion, the securityfs node representing the policy will be removed.
+However, delete the current active policy is not allowed and will return
+an operation not permitted error.
+
+Similarly, writing to both ``update`` and ``new_policy`` could result in
+bad message(policy syntax error) or file exists error. The latter error happens
+when trying to deploy a policy with a ``policy_name`` while the kernel already
+has a deployed policy with the same ``policy_name``.
+
+Deploying a policy will *not* cause IPE to start enforcing the policy. IPE will
+only enforce the policy marked active. Note that only one policy can be active
+at a time.
+
+Once deployment is successful, the policy can be activated, by writing file
+``/sys/kernel/security/ipe/policies/$policy_name/active``.
+For example, the ``Ex_Policy`` can be activated by::
+
+ echo 1 > "/sys/kernel/security/ipe/policies/Ex_Policy/active"
+
+From above point on, ``Ex_Policy`` is now the enforced policy on the
+system.
+
+IPE also provides a way to delete policies. This can be done via the
+``delete`` securityfs node,
+``/sys/kernel/security/ipe/policies/$policy_name/delete``.
+Writing ``1`` to that file deletes the policy::
+
+ echo 1 > "/sys/kernel/security/ipe/policies/$policy_name/delete"
+
+There is only one requirement to delete a policy: the policy being deleted
+must be inactive.
+
+.. NOTE::
+
+ If a traditional MAC system is enabled (SELinux, apparmor, smack), all
+ writes to ipe's securityfs nodes require ``CAP_MAC_ADMIN``.
+
+Modes
+~~~~~
+
+IPE supports two modes of operation: permissive (similar to SELinux's
+permissive mode) and enforced. In permissive mode, all events are
+checked and policy violations are logged, but the policy is not really
+enforced. This allows users to test policies before enforcing them.
+
+The default mode is enforce, and can be changed via the kernel command
+line parameter ``ipe.enforce=(0|1)``, or the securityfs node
+``/sys/kernel/security/ipe/enforce``.
+
+.. NOTE::
+
+ If a traditional MAC system is enabled (SELinux, apparmor, smack, etcetera),
+ all writes to ipe's securityfs nodes require ``CAP_MAC_ADMIN``.
+
+Audit Events
+~~~~~~~~~~~~
+
+1420 AUDIT_IPE_ACCESS
+^^^^^^^^^^^^^^^^^^^^^
+Event Examples::
+
+ type=1420 audit(1653364370.067:61): ipe_op=EXECUTE ipe_hook=MMAP enforcing=1 pid=2241 comm="ld-linux.so" path="/deny/lib/libc.so.6" dev="sda2" ino=14549020 rule="DEFAULT action=DENY"
+ type=1300 audit(1653364370.067:61): SYSCALL arch=c000003e syscall=9 success=no exit=-13 a0=7f1105a28000 a1=195000 a2=5 a3=812 items=0 ppid=2219 pid=2241 auid=0 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=pts0 ses=2 comm="ld-linux.so" exe="/tmp/ipe-test/lib/ld-linux.so" subj=unconfined key=(null)
+ type=1327 audit(1653364370.067:61): 707974686F6E3300746573742F6D61696E2E7079002D6E00
+
+ type=1420 audit(1653364735.161:64): ipe_op=EXECUTE ipe_hook=MMAP enforcing=1 pid=2472 comm="mmap_test" path=? dev=? ino=? rule="DEFAULT action=DENY"
+ type=1300 audit(1653364735.161:64): SYSCALL arch=c000003e syscall=9 success=no exit=-13 a0=0 a1=1000 a2=4 a3=21 items=0 ppid=2219 pid=2472 auid=0 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=pts0 ses=2 comm="mmap_test" exe="/root/overlake_test/upstream_test/vol_fsverity/bin/mmap_test" subj=unconfined key=(null)
+ type=1327 audit(1653364735.161:64): 707974686F6E3300746573742F6D61696E2E7079002D6E00
+
+This event indicates that IPE made an access control decision; the IPE
+specific record (1420) is always emitted in conjunction with a
+``AUDITSYSCALL`` record.
+
+Determining whether IPE is in permissive or enforced mode can be derived
+from ``success`` property and exit code of the ``AUDITSYSCALL`` record.
+
+
+Field descriptions:
+
++-----------+------------+-----------+---------------------------------------------------------------------------------+
+| Field | Value Type | Optional? | Description of Value |
++===========+============+===========+=================================================================================+
+| ipe_op | string | No | The IPE operation name associated with the log |
++-----------+------------+-----------+---------------------------------------------------------------------------------+
+| ipe_hook | string | No | The name of the LSM hook that triggered the IPE event |
++-----------+------------+-----------+---------------------------------------------------------------------------------+
+| enforcing | integer | No | The current IPE enforcing state 1 is in enforcing mode, 0 is in permissive mode |
++-----------+------------+-----------+---------------------------------------------------------------------------------+
+| pid | integer | No | The pid of the process that triggered the IPE event. |
++-----------+------------+-----------+---------------------------------------------------------------------------------+
+| comm | string | No | The command line program name of the process that triggered the IPE event |
++-----------+------------+-----------+---------------------------------------------------------------------------------+
+| path | string | Yes | The absolute path to the evaluated file |
++-----------+------------+-----------+---------------------------------------------------------------------------------+
+| ino | integer | Yes | The inode number of the evaluated file |
++-----------+------------+-----------+---------------------------------------------------------------------------------+
+| dev | string | Yes | The device name of the evaluated file, e.g. vda |
++-----------+------------+-----------+---------------------------------------------------------------------------------+
+| rule | string | No | The matched policy rule |
++-----------+------------+-----------+---------------------------------------------------------------------------------+
+
+1421 AUDIT_IPE_CONFIG_CHANGE
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Event Example::
+
+ type=1421 audit(1653425583.136:54): old_active_pol_name="Allow_All" old_active_pol_version=0.0.0 old_policy_digest=sha256:E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855 new_active_pol_name="boot_verified" new_active_pol_version=0.0.0 new_policy_digest=sha256:820EEA5B40CA42B51F68962354BA083122A20BB846F26765076DD8EED7B8F4DB auid=4294967295 ses=4294967295 lsm=ipe res=1
+ type=1300 audit(1653425583.136:54): SYSCALL arch=c000003e syscall=1 success=yes exit=2 a0=3 a1=5596fcae1fb0 a2=2 a3=2 items=0 ppid=184 pid=229 auid=4294967295 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=pts0 ses=4294967295 comm="python3" exe="/usr/bin/python3.10" key=(null)
+ type=1327 audit(1653425583.136:54): PROCTITLE proctitle=707974686F6E3300746573742F6D61696E2E7079002D66002E2
+
+This event indicates that IPE switched the active poliy from one to another
+along with the version and the hash digest of the two policies.
+Note IPE can only have one policy active at a time, all access decision
+evaluation is based on the current active policy.
+The normal procedure to deploy a new policy is loading the policy to deploy
+into the kernel first, then switch the active policy to it.
+
+This record will always be emitted in conjunction with a ``AUDITSYSCALL`` record for the ``write`` syscall.
+
+Field descriptions:
+
++------------------------+------------+-----------+---------------------------------------------------+
+| Field | Value Type | Optional? | Description of Value |
++========================+============+===========+===================================================+
+| old_active_pol_name | string | Yes | The name of previous active policy |
++------------------------+------------+-----------+---------------------------------------------------+
+| old_active_pol_version | string | Yes | The version of previous active policy |
++------------------------+------------+-----------+---------------------------------------------------+
+| old_policy_digest | string | Yes | The hash of previous active policy |
++------------------------+------------+-----------+---------------------------------------------------+
+| new_active_pol_name | string | No | The name of current active policy |
++------------------------+------------+-----------+---------------------------------------------------+
+| new_active_pol_version | string | No | The version of current active policy |
++------------------------+------------+-----------+---------------------------------------------------+
+| new_policy_digest | string | No | The hash of current active policy |
++------------------------+------------+-----------+---------------------------------------------------+
+| auid | integer | No | The login user ID |
++------------------------+------------+-----------+---------------------------------------------------+
+| ses | integer | No | The login session ID |
++------------------------+------------+-----------+---------------------------------------------------+
+| lsm | string | No | The lsm name associated with the event |
++------------------------+------------+-----------+---------------------------------------------------+
+| res | integer | No | The result of the audited operation(success/fail) |
++------------------------+------------+-----------+---------------------------------------------------+
+
+1422 AUDIT_IPE_POLICY_LOAD
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Event Example::
+
+ type=1422 audit(1653425529.927:53): policy_name="boot_verified" policy_version=0.0.0 policy_digest=sha256:820EEA5B40CA42B51F68962354BA083122A20BB846F26765076DD8EED7B8F4DB auid=4294967295 ses=4294967295 lsm=ipe res=1
+ type=1300 audit(1653425529.927:53): arch=c000003e syscall=1 success=yes exit=2567 a0=3 a1=5596fcae1fb0 a2=a07 a3=2 items=0 ppid=184 pid=229 auid=4294967295 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=pts0 ses=4294967295 comm="python3" exe="/usr/bin/python3.10" key=(null)
+ type=1327 audit(1653425529.927:53): PROCTITLE proctitle=707974686F6E3300746573742F6D61696E2E7079002D66002E2E
+
+This record indicates a new policy has been loaded into the kernel with the policy name, policy version and policy hash.
+
+This record will always be emitted in conjunction with a ``AUDITSYSCALL`` record for the ``write`` syscall.
+
+Field descriptions:
+
++----------------+------------+-----------+---------------------------------------------------+
+| Field | Value Type | Optional? | Description of Value |
++================+============+===========+===================================================+
+| policy_name | string | No | The policy_name |
++----------------+------------+-----------+---------------------------------------------------+
+| policy_version | string | No | The policy_version |
++----------------+------------+-----------+---------------------------------------------------+
+| policy_digest | string | No | The policy hash |
++----------------+------------+-----------+---------------------------------------------------+
+| auid | integer | No | The login user ID |
++----------------+------------+-----------+---------------------------------------------------+
+| ses | integer | No | The login session ID |
++----------------+------------+-----------+---------------------------------------------------+
+| lsm | string | No | The lsm name associated with the event |
++----------------+------------+-----------+---------------------------------------------------+
+| res | integer | No | The result of the audited operation(success/fail) |
++----------------+------------+-----------+---------------------------------------------------+
+
+
+1404 AUDIT_MAC_STATUS
+^^^^^^^^^^^^^^^^^^^^^
+
+Event Examples::
+
+ type=1404 audit(1653425689.008:55): enforcing=0 old_enforcing=1 auid=4294967295 ses=4294967295 enabled=1 old-enabled=1 lsm=ipe res=1
+ type=1300 audit(1653425689.008:55): arch=c000003e syscall=1 success=yes exit=2 a0=1 a1=55c1065e5c60 a2=2 a3=0 items=0 ppid=405 pid=441 auid=0 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=)
+ type=1327 audit(1653425689.008:55): proctitle="-bash"
+
+ type=1404 audit(1653425689.008:55): enforcing=1 old_enforcing=0 auid=4294967295 ses=4294967295 enabled=1 old-enabled=1 lsm=ipe res=1
+ type=1300 audit(1653425689.008:55): arch=c000003e syscall=1 success=yes exit=2 a0=1 a1=55c1065e5c60 a2=2 a3=0 items=0 ppid=405 pid=441 auid=0 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=)
+ type=1327 audit(1653425689.008:55): proctitle="-bash"
+
+This record will always be emitted in conjunction with a ``AUDITSYSCALL`` record for the ``write`` syscall.
+
+Field descriptions:
+
++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+
+| Field | Value Type | Optional? | Description of Value |
++===============+============+===========+=================================================================================================+
+| enforcing | integer | No | The enforcing state IPE is being switched to, 1 is in enforcing mode, 0 is in permissive mode |
++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+
+| old_enforcing | integer | No | The enforcing state IPE is being switched from, 1 is in enforcing mode, 0 is in permissive mode |
++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+
+| auid | integer | No | The login user ID |
++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+
+| ses | integer | No | The login session ID |
++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+
+| enabled | integer | No | The new TTY audit enabled setting |
++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+
+| old-enabled | integer | No | The old TTY audit enabled setting |
++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+
+| lsm | string | No | The lsm name associated with the event |
++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+
+| res | integer | No | The result of the audited operation(success/fail) |
++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+
+
+
+Success Auditing
+^^^^^^^^^^^^^^^^
+
+IPE supports success auditing. When enabled, all events that pass IPE
+policy and are not blocked will emit an audit event. This is disabled by
+default, and can be enabled via the kernel command line
+``ipe.success_audit=(0|1)`` or
+``/sys/kernel/security/ipe/success_audit`` securityfs file.
+
+This is *very* noisy, as IPE will check every userspace binary on the
+system, but is useful for debugging policies.
+
+.. NOTE::
+
+ If a traditional MAC system is enabled (SELinux, apparmor, smack, etcetera),
+ all writes to ipe's securityfs nodes require ``CAP_MAC_ADMIN``.
+
+Properties
+----------
+
+As explained above, IPE properties are ``key=value`` pairs expressed in IPE
+policy. Two properties are built-into the policy parser: 'op' and 'action'.
+The other properties are used to restrict immutable security properties
+about the files being evaluated. Currently those properties are:
+'``boot_verified``', '``dmverity_signature``', '``dmverity_roothash``',
+'``fsverity_signature``', '``fsverity_digest``'. A description of all
+properties supported by IPE are listed below:
+
+op
+~~
+
+Indicates the operation for a rule to apply to. Must be in every rule,
+as the first token. IPE supports the following operations:
+
+ ``EXECUTE``
+
+ Pertains to any file attempting to be executed, or loaded as an
+ executable.
+
+ ``FIRMWARE``:
+
+ Pertains to firmware being loaded via the firmware_class interface.
+ This covers both the preallocated buffer and the firmware file
+ itself.
+
+ ``KMODULE``:
+
+ Pertains to loading kernel modules via ``modprobe`` or ``insmod``.
+
+ ``KEXEC_IMAGE``:
+
+ Pertains to kernel images loading via ``kexec``.
+
+ ``KEXEC_INITRAMFS``
+
+ Pertains to initrd images loading via ``kexec --initrd``.
+
+ ``POLICY``:
+
+ Controls loading policies via reading a kernel-space initiated read.
+
+ An example of such is loading IMA policies by writing the path
+ to the policy file to ``$securityfs/ima/policy``
+
+ ``X509_CERT``:
+
+ Controls loading IMA certificates through the Kconfigs,
+ ``CONFIG_IMA_X509_PATH`` and ``CONFIG_EVM_X509_PATH``.
+
+action
+~~~~~~
+
+ Determines what IPE should do when a rule matches. Must be in every
+ rule, as the final clause. Can be one of:
+
+ ``ALLOW``:
+
+ If the rule matches, explicitly allow access to the resource to proceed
+ without executing any more rules.
+
+ ``DENY``:
+
+ If the rule matches, explicitly prohibit access to the resource to
+ proceed without executing any more rules.
+
+boot_verified
+~~~~~~~~~~~~~
+
+ This property can be utilized for authorization of files from initramfs.
+ The format of this property is::
+
+ boot_verified=(TRUE|FALSE)
+
+
+ .. WARNING::
+
+ This property will trust files from initramfs(rootfs). It should
+ only be used during early booting stage. Before mounting the real
+ rootfs on top of the initramfs, initramfs script will recursively
+ remove all files and directories on the initramfs. This is typically
+ implemented by using switch_root(8) [#switch_root]_. Therefore the
+ initramfs will be empty and not accessible after the real
+ rootfs takes over. It is advised to switch to a different policy
+ that doesn't rely on the property after this point.
+ This ensures that the trust policies remain relevant and effective
+ throughout the system's operation.
+
+dmverity_roothash
+~~~~~~~~~~~~~~~~~
+
+ This property can be utilized for authorization or revocation of
+ specific dm-verity volumes, identified via their root hashes. It has a
+ dependency on the DM_VERITY module. This property is controlled by
+ the ``IPE_PROP_DM_VERITY`` config option, it will be automatically
+ selected when ``SECURITY_IPE`` and ``DM_VERITY`` are all enabled.
+ The format of this property is::
+
+ dmverity_roothash=DigestName:HexadecimalString
+
+ The supported DigestNames for dmverity_roothash are [#dmveritydigests]_
+
+ + blake2b-512
+ + blake2s-256
+ + sha256
+ + sha384
+ + sha512
+ + sha3-224
+ + sha3-256
+ + sha3-384
+ + sha3-512
+ + sm3
+ + rmd160
+
+dmverity_signature
+~~~~~~~~~~~~~~~~~~
+
+ This property can be utilized for authorization of all dm-verity
+ volumes that have a signed roothash that validated by a keyring
+ specified by dm-verity's configuration, either the system trusted
+ keyring, or the secondary keyring. It depends on
+ ``DM_VERITY_VERIFY_ROOTHASH_SIG`` config option and is controlled by
+ the ``IPE_PROP_DM_VERITY_SIGNATURE`` config option, it will be automatically
+ selected when ``SECURITY_IPE``, ``DM_VERITY`` and
+ ``DM_VERITY_VERIFY_ROOTHASH_SIG`` are all enabled.
+ The format of this property is::
+
+ dmverity_signature=(TRUE|FALSE)
+
+fsverity_digest
+~~~~~~~~~~~~~~~
+
+ This property can be utilized for authorization of specific fsverity
+ enabled files, identified via their fsverity digests.
+ It depends on ``FS_VERITY`` config option and is controlled by
+ the ``IPE_PROP_FS_VERITY`` config option, it will be automatically
+ selected when ``SECURITY_IPE`` and ``FS_VERITY`` are all enabled.
+ The format of this property is::
+
+ fsverity_digest=DigestName:HexadecimalString
+
+ The supported DigestNames for fsverity_digest are [#fsveritydigest]_
+
+ + sha256
+ + sha512
+
+fsverity_signature
+~~~~~~~~~~~~~~~~~~
+
+ This property is used to authorize all fs-verity enabled files that have
+ been verified by fs-verity's built-in signature mechanism. The signature
+ verification relies on a key stored within the ".fs-verity" keyring. It
+ depends on ``FS_VERITY_BUILTIN_SIGNATURES`` config option and
+ it is controlled by the ``IPE_PROP_FS_VERITY`` config option,
+ it will be automatically selected when ``SECURITY_IPE``, ``FS_VERITY``
+ and ``FS_VERITY_BUILTIN_SIGNATURES`` are all enabled.
+ The format of this property is::
+
+ fsverity_signature=(TRUE|FALSE)
+
+Policy Examples
+---------------
+
+Allow all
+~~~~~~~~~
+
+::
+
+ policy_name=Allow_All policy_version=0.0.0
+ DEFAULT action=ALLOW
+
+Allow only initramfs
+~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ policy_name=Allow_Initramfs policy_version=0.0.0
+ DEFAULT action=DENY
+
+ op=EXECUTE boot_verified=TRUE action=ALLOW
+
+Allow any signed and validated dm-verity volume and the initramfs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ policy_name=Allow_Signed_DMV_And_Initramfs policy_version=0.0.0
+ DEFAULT action=DENY
+
+ op=EXECUTE boot_verified=TRUE action=ALLOW
+ op=EXECUTE dmverity_signature=TRUE action=ALLOW
+
+Prohibit execution from a specific dm-verity volume
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ policy_name=Deny_DMV_By_Roothash policy_version=0.0.0
+ DEFAULT action=DENY
+
+ op=EXECUTE dmverity_roothash=sha256:cd2c5bae7c6c579edaae4353049d58eb5f2e8be0244bf05345bc8e5ed257baff action=DENY
+
+ op=EXECUTE boot_verified=TRUE action=ALLOW
+ op=EXECUTE dmverity_signature=TRUE action=ALLOW
+
+Allow only a specific dm-verity volume
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ policy_name=Allow_DMV_By_Roothash policy_version=0.0.0
+ DEFAULT action=DENY
+
+ op=EXECUTE dmverity_roothash=sha256:401fcec5944823ae12f62726e8184407a5fa9599783f030dec146938 action=ALLOW
+
+Allow any fs-verity file with a valid built-in signature
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ policy_name=Allow_Signed_And_Validated_FSVerity policy_version=0.0.0
+ DEFAULT action=DENY
+
+ op=EXECUTE fsverity_signature=TRUE action=ALLOW
+
+Allow execution of a specific fs-verity file
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ policy_name=ALLOW_FSV_By_Digest policy_version=0.0.0
+ DEFAULT action=DENY
+
+ op=EXECUTE fsverity_digest=sha256:fd88f2b8824e197f850bf4c5109bea5cf0ee38104f710843bb72da796ba5af9e action=ALLOW
+
+Additional Information
+----------------------
+
+- `Github Repository <https://github.com/microsoft/ipe>`_
+- :doc:`Developer and design docs for IPE </security/ipe>`
+
+FAQ
+---
+
+Q:
+ What's the difference between other LSMs which provide a measure of
+ trust-based access control?
+
+A:
+
+ In general, there's two other LSMs that can provide similar functionality:
+ IMA, and Loadpin.
+
+ IMA and IPE are functionally very similar. The significant difference between
+ the two is the policy. [#devdoc]_
+
+ Loadpin and IPE differ fairly dramatically, as Loadpin only covers the IPE's
+ kernel read operations, whereas IPE is capable of controlling execution
+ on top of kernel read. The trust model is also different; Loadpin roots its
+ trust in the initial super-block, whereas trust in IPE is stemmed from kernel
+ itself (via ``SYSTEM_TRUSTED_KEYS``).
+
+-----------
+
+.. [#digest_cache_lsm] https://lore.kernel.org/lkml/20240415142436.2545003-1-roberto.sassu@huaweicloud.com/
+
+.. [#interpreters] There is `some interest in solving this issue <https://lore.kernel.org/lkml/20220321161557.495388-1-mic@digikod.net/>`_.
+
+.. [#devdoc] Please see :doc:`the design docs </security/ipe>` for more on
+ this topic.
+
+.. [#switch_root] https://man7.org/linux/man-pages/man8/switch_root.8.html
+
+.. [#dmveritydigests] These hash algorithms are based on values accepted by
+ the Linux crypto API; IPE does not impose any
+ restrictions on the digest algorithm itself;
+ thus, this list may be out of date.
+
+.. [#fsveritydigest] These hash algorithms are based on values accepted by the
+ kernel's fsverity support; IPE does not impose any
+ restrictions on the digest algorithm itself;
+ thus, this list may be out of date.
diff --git a/Documentation/admin-guide/blockdev/zram.rst b/Documentation/admin-guide/blockdev/zram.rst
index 091e8bb38887..678d70d6e1c3 100644
--- a/Documentation/admin-guide/blockdev/zram.rst
+++ b/Documentation/admin-guide/blockdev/zram.rst
@@ -102,17 +102,41 @@ Examples::
#select lzo compression algorithm
echo lzo > /sys/block/zram0/comp_algorithm
-For the time being, the `comp_algorithm` content does not necessarily
-show every compression algorithm supported by the kernel. We keep this
-list primarily to simplify device configuration and one can configure
-a new device with a compression algorithm that is not listed in
-`comp_algorithm`. The thing is that, internally, ZRAM uses Crypto API
-and, if some of the algorithms were built as modules, it's impossible
-to list all of them using, for instance, /proc/crypto or any other
-method. This, however, has an advantage of permitting the usage of
-custom crypto compression modules (implementing S/W or H/W compression).
-
-4) Set Disksize
+For the time being, the `comp_algorithm` content shows only compression
+algorithms that are supported by zram.
+
+4) Set compression algorithm parameters: Optional
+=================================================
+
+Compression algorithms may support specific parameters which can be
+tweaked for particular dataset. ZRAM has an `algorithm_params` device
+attribute which provides a per-algorithm params configuration.
+
+For example, several compression algorithms support `level` parameter.
+In addition, certain compression algorithms support pre-trained dictionaries,
+which significantly change algorithms' characteristics. In order to configure
+compression algorithm to use external pre-trained dictionary, pass full
+path to the `dict` along with other parameters::
+
+ #pass path to pre-trained zstd dictionary
+ echo "algo=zstd dict=/etc/dictioary" > /sys/block/zram0/algorithm_params
+
+ #same, but using algorithm priority
+ echo "priority=1 dict=/etc/dictioary" > \
+ /sys/block/zram0/algorithm_params
+
+ #pass path to pre-trained zstd dictionary and compression level
+ echo "algo=zstd level=8 dict=/etc/dictioary" > \
+ /sys/block/zram0/algorithm_params
+
+Parameters are algorithm specific: not all algorithms support pre-trained
+dictionaries, not all algorithms support `level`. Furthermore, for certain
+algorithms `level` controls the compression level (the higher the value the
+better the compression ratio, it even can take negatives values for some
+algorithms), for other algorithms `level` is acceleration level (the higher
+the value the lower the compression ratio).
+
+5) Set Disksize
===============
Set disk size by writing the value to sysfs node 'disksize'.
@@ -132,7 +156,7 @@ There is little point creating a zram of greater than twice the size of memory
since we expect a 2:1 compression ratio. Note that zram uses about 0.1% of the
size of the disk when not in use so a huge zram is wasteful.
-5) Set memory limit: Optional
+6) Set memory limit: Optional
=============================
Set memory limit by writing the value to sysfs node 'mem_limit'.
@@ -151,7 +175,7 @@ Examples::
# To disable memory limit
echo 0 > /sys/block/zram0/mem_limit
-6) Activate
+7) Activate
===========
::
@@ -162,7 +186,7 @@ Examples::
mkfs.ext4 /dev/zram1
mount /dev/zram1 /tmp
-7) Add/remove zram devices
+8) Add/remove zram devices
==========================
zram provides a control interface, which enables dynamic (on-demand) device
@@ -182,7 +206,7 @@ execute::
echo X > /sys/class/zram-control/hot_remove
-8) Stats
+9) Stats
========
Per-device statistics are exported as various nodes under /sys/block/zram<id>/
@@ -205,6 +229,7 @@ writeback_limit_enable RW show and set writeback_limit feature
max_comp_streams RW the number of possible concurrent compress
operations
comp_algorithm RW show and change the compression algorithm
+algorithm_params WO setup compression algorithm parameters
compact WO trigger memory compaction
debug_stat RO this file is used for zram debugging purposes
backing_dev RW set up backend storage for zram to write out
@@ -283,15 +308,15 @@ a single line of text and contains the following stats separated by whitespace:
Unit: 4K bytes
============== =============================================================
-9) Deactivate
-=============
+10) Deactivate
+==============
::
swapoff /dev/zram0
umount /dev/zram1
-10) Reset
+11) Reset
=========
Write any positive value to 'reset' sysfs node::
@@ -487,11 +512,14 @@ registered compression algorithms, increases our chances of finding the
algorithm that successfully compresses a particular page. Sometimes, however,
it is convenient (and sometimes even necessary) to limit recompression to
only one particular algorithm so that it will not try any other algorithms.
-This can be achieved by providing a algo=NAME parameter:::
+This can be achieved by providing a `algo` or `priority` parameter:::
#use zstd algorithm only (if registered)
echo "type=huge algo=zstd" > /sys/block/zramX/recompress
+ #use zstd algorithm only (if zstd was registered under priority 1)
+ echo "type=huge priority=1" > /sys/block/zramX/recompress
+
memory tracking
===============
diff --git a/Documentation/admin-guide/bug-bisect.rst b/Documentation/admin-guide/bug-bisect.rst
index 325c5d0ed34a..585630d14581 100644
--- a/Documentation/admin-guide/bug-bisect.rst
+++ b/Documentation/admin-guide/bug-bisect.rst
@@ -1,76 +1,144 @@
-Bisecting a bug
-+++++++++++++++
+.. SPDX-License-Identifier: (GPL-2.0+ OR CC-BY-4.0)
+.. [see the bottom of this file for redistribution information]
-Last updated: 28 October 2016
+======================
+Bisecting a regression
+======================
-Introduction
-============
+This document describes how to use a ``git bisect`` to find the source code
+change that broke something -- for example when some functionality stopped
+working after upgrading from Linux 6.0 to 6.1.
-Always try the latest kernel from kernel.org and build from source. If you are
-not confident in doing that please report the bug to your distribution vendor
-instead of to a kernel developer.
+The text focuses on the gist of the process. If you are new to bisecting the
+kernel, better follow Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst
+instead: it depicts everything from start to finish while covering multiple
+aspects even kernel developers occasionally forget. This includes detecting
+situations early where a bisection would be a waste of time, as nobody would
+care about the result -- for example, because the problem happens after the
+kernel marked itself as 'tainted', occurs in an abandoned version, was already
+fixed, or is caused by a .config change you or your Linux distributor performed.
-Finding bugs is not always easy. Have a go though. If you can't find it don't
-give up. Report as much as you have found to the relevant maintainer. See
-MAINTAINERS for who that is for the subsystem you have worked on.
+Finding the change causing a kernel issue using a bisection
+===========================================================
-Before you submit a bug report read
-'Documentation/admin-guide/reporting-issues.rst'.
+*Note: the following process assumes you prepared everything for a bisection.
+This includes having a Git clone with the appropriate sources, installing the
+software required to build and install kernels, as well as a .config file stored
+in a safe place (the following example assumes '~/prepared_kernel_.config') to
+use as pristine base at each bisection step; ideally, you have also worked out
+a fully reliable and straight-forward way to reproduce the regression, too.*
-Devices not appearing
-=====================
-
-Often this is caused by udev/systemd. Check that first before blaming it
-on the kernel.
-
-Finding patch that caused a bug
-===============================
-
-Using the provided tools with ``git`` makes finding bugs easy provided the bug
-is reproducible.
-
-Steps to do it:
-
-- build the Kernel from its git source
-- start bisect with [#f1]_::
-
- $ git bisect start
-
-- mark the broken changeset with::
-
- $ git bisect bad [commit]
-
-- mark a changeset where the code is known to work with::
-
- $ git bisect good [commit]
-
-- rebuild the Kernel and test
-- interact with git bisect by using either::
-
- $ git bisect good
-
- or::
-
- $ git bisect bad
-
- depending if the bug happened on the changeset you're testing
-- After some interactions, git bisect will give you the changeset that
- likely caused the bug.
-
-- For example, if you know that the current version is bad, and version
- 4.8 is good, you could do::
-
- $ git bisect start
- $ git bisect bad # Current version is bad
- $ git bisect good v4.8
-
-
-.. [#f1] You can, optionally, provide both good and bad arguments at git
- start with ``git bisect start [BAD] [GOOD]``
-
-For further references, please read:
-
-- The man page for ``git-bisect``
-- `Fighting regressions with git bisect <https://www.kernel.org/pub/software/scm/git/docs/git-bisect-lk2009.html>`_
-- `Fully automated bisecting with "git bisect run" <https://lwn.net/Articles/317154>`_
-- `Using Git bisect to figure out when brokenness was introduced <http://webchick.net/node/99>`_
+* Preparation: start the bisection and tell Git about the points in the history
+ you consider to be working and broken, which Git calls 'good' and 'bad'::
+
+ git bisect start
+ git bisect good v6.0
+ git bisect bad v6.1
+
+ Instead of Git tags like 'v6.0' and 'v6.1' you can specify commit-ids, too.
+
+1. Copy your prepared .config into the build directory and adjust it to the
+ needs of the codebase Git checked out for testing::
+
+ cp ~/prepared_kernel_.config .config
+ make olddefconfig
+
+2. Now build, install, and boot a kernel. This might fail for unrelated reasons,
+ for example, when a compile error happens at the current stage of the
+ bisection a later change resolves. In such cases run ``git bisect skip`` and
+ go back to step 1.
+
+3. Check if the functionality that regressed works in the kernel you just built.
+
+ If it works, execute::
+
+ git bisect good
+
+ If it is broken, run::
+
+ git bisect bad
+
+ Note, getting this wrong just once will send the rest of the bisection
+ totally off course. To prevent having to start anew later you thus want to
+ ensure what you tell Git is correct; it is thus often wise to spend a few
+ minutes more on testing in case your reproducer is unreliable.
+
+ After issuing one of these two commands, Git will usually check out another
+ bisection point and print something like 'Bisecting: 675 revisions left to
+ test after this (roughly 10 steps)'. In that case go back to step 1.
+
+ If Git instead prints something like 'cafecaca0c0dacafecaca0c0dacafecaca0c0da
+ is the first bad commit', then you have finished the bisection. In that case
+ move to the next point below. Note, right after displaying that line Git will
+ show some details about the culprit including its patch description; this can
+ easily fill your terminal, so you might need to scroll up to see the message
+ mentioning the culprit's commit-id.
+
+ In case you missed Git's output, you can always run ``git bisect log`` to
+ print the status: it will show how many steps remain or mention the result of
+ the bisection.
+
+* Recommended complementary task: put the bisection log and the current .config
+ file aside for the bug report; furthermore tell Git to reset the sources to
+ the state before the bisection::
+
+ git bisect log > ~/bisection-log
+ cp .config ~/bisection-config-culprit
+ git bisect reset
+
+* Recommended optional task: try reverting the culprit on top of the latest
+ codebase and check if that fixes your bug; if that is the case, it validates
+ the bisection and enables developers to resolve the regression through a
+ revert.
+
+ To try this, update your clone and check out latest mainline. Then tell Git
+ to revert the change by specifying its commit-id::
+
+ git revert --no-edit cafec0cacaca0
+
+ Git might reject this, for example when the bisection landed on a merge
+ commit. In that case, abandon the attempt. Do the same, if Git fails to revert
+ the culprit on its own because later changes depend on it -- at least unless
+ you bisected a stable or longterm kernel series, in which case you want to
+ check out its latest codebase and try a revert there.
+
+ If a revert succeeds, build and test another kernel to check if reverting
+ resolved your regression.
+
+With that the process is complete. Now report the regression as described by
+Documentation/admin-guide/reporting-issues.rst.
+
+
+Additional reading material
+---------------------------
+
+* The `man page for 'git bisect' <https://git-scm.com/docs/git-bisect>`_ and
+ `fighting regressions with 'git bisect' <https://git-scm.com/docs/git-bisect-lk2009.html>`_
+ in the Git documentation.
+* `Working with git bisect <https://nathanchance.dev/posts/working-with-git-bisect/>`_
+ from kernel developer Nathan Chancellor.
+* `Using Git bisect to figure out when brokenness was introduced <http://webchick.net/node/99>`_.
+* `Fully automated bisecting with 'git bisect run' <https://lwn.net/Articles/317154>`_.
+
+..
+ end-of-content
+..
+ This document is maintained by Thorsten Leemhuis <linux@leemhuis.info>. If
+ you spot a typo or small mistake, feel free to let him know directly and
+ he'll fix it. You are free to do the same in a mostly informal way if you
+ want to contribute changes to the text -- but for copyright reasons please CC
+ linux-doc@vger.kernel.org and 'sign-off' your contribution as
+ Documentation/process/submitting-patches.rst explains in the section 'Sign
+ your work - the Developer's Certificate of Origin'.
+..
+ This text is available under GPL-2.0+ or CC-BY-4.0, as stated at the top
+ of the file. If you want to distribute this text under CC-BY-4.0 only,
+ please use 'The Linux kernel development community' for author attribution
+ and link this as source:
+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/Documentation/admin-guide/bug-bisect.rst
+
+..
+ Note: Only the content of this RST file as found in the Linux kernel sources
+ is available under CC-BY-4.0, as versions of this text that were processed
+ (for example by the kernel's build system) might contain content taken from
+ files which use a more restrictive license.
diff --git a/Documentation/admin-guide/bug-hunting.rst b/Documentation/admin-guide/bug-hunting.rst
index 95299b08c405..1d0f8ceb3075 100644
--- a/Documentation/admin-guide/bug-hunting.rst
+++ b/Documentation/admin-guide/bug-hunting.rst
@@ -244,14 +244,14 @@ Reporting the bug
Once you find where the bug happened, by inspecting its location,
you could either try to fix it yourself or report it upstream.
-In order to report it upstream, you should identify the mailing list
-used for the development of the affected code. This can be done by using
-the ``get_maintainer.pl`` script.
+In order to report it upstream, you should identify the bug tracker, if any, or
+mailing list used for the development of the affected code. This can be done by
+using the ``get_maintainer.pl`` script.
For example, if you find a bug at the gspca's sonixj.c file, you can get
its maintainers with::
- $ ./scripts/get_maintainer.pl -f drivers/media/usb/gspca/sonixj.c
+ $ ./scripts/get_maintainer.pl --bug -f drivers/media/usb/gspca/sonixj.c
Hans Verkuil <hverkuil@xs4all.nl> (odd fixer:GSPCA USB WEBCAM DRIVER,commit_signer:1/1=100%)
Mauro Carvalho Chehab <mchehab@kernel.org> (maintainer:MEDIA INPUT INFRASTRUCTURE (V4L/DVB),commit_signer:1/1=100%)
Tejun Heo <tj@kernel.org> (commit_signer:1/1=100%)
@@ -267,11 +267,12 @@ Please notice that it will point to:
- The driver maintainer (Hans Verkuil);
- The subsystem maintainer (Mauro Carvalho Chehab);
- The driver and/or subsystem mailing list (linux-media@vger.kernel.org);
-- the Linux Kernel mailing list (linux-kernel@vger.kernel.org).
+- The Linux Kernel mailing list (linux-kernel@vger.kernel.org);
+- The bug reporting URIs for the driver/subsystem (none in the above example).
-Usually, the fastest way to have your bug fixed is to report it to mailing
-list used for the development of the code (linux-media ML) copying the
-driver maintainer (Hans).
+If the listing contains bug reporting URIs at the end, please prefer them over
+email. Otherwise, please report bugs to the mailing list used for the
+development of the code (linux-media ML) copying the driver maintainer (Hans).
If you are totally stumped as to whom to send the report, and
``get_maintainer.pl`` didn't provide you anything useful, send it to
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
index 9cde26d33843..270501db9f4e 100644
--- a/Documentation/admin-guide/cgroup-v1/memory.rst
+++ b/Documentation/admin-guide/cgroup-v1/memory.rst
@@ -78,18 +78,24 @@ Brief summary of control files.
memory.memsw.max_usage_in_bytes show max memory+Swap usage recorded
memory.soft_limit_in_bytes set/show soft limit of memory usage
This knob is not available on CONFIG_PREEMPT_RT systems.
+ This knob is deprecated and shouldn't be
+ used.
memory.stat show various statistics
memory.use_hierarchy set/show hierarchical account enabled
This knob is deprecated and shouldn't be
used.
memory.force_empty trigger forced page reclaim
memory.pressure_level set memory pressure notifications
+ This knob is deprecated and shouldn't be
+ used.
memory.swappiness set/show swappiness parameter of vmscan
(See sysctl's vm.swappiness)
memory.move_charge_at_immigrate set/show controls of moving charges
This knob is deprecated and shouldn't be
used.
memory.oom_control set/show oom controls.
+ This knob is deprecated and shouldn't be
+ used.
memory.numa_stat show the number of memory usage per numa
node
memory.kmem.limit_in_bytes Deprecated knob to set and read the kernel
@@ -105,10 +111,18 @@ Brief summary of control files.
memory.kmem.max_usage_in_bytes show max kernel memory usage recorded
memory.kmem.tcp.limit_in_bytes set/show hard limit for tcp buf memory
+ This knob is deprecated and shouldn't be
+ used.
memory.kmem.tcp.usage_in_bytes show current tcp buf memory allocation
+ This knob is deprecated and shouldn't be
+ used.
memory.kmem.tcp.failcnt show the number of tcp buf memory usage
hits limits
+ This knob is deprecated and shouldn't be
+ used.
memory.kmem.tcp.max_usage_in_bytes show max tcp buf memory usage recorded
+ This knob is deprecated and shouldn't be
+ used.
==================================== ==========================================
1. History
@@ -693,8 +707,10 @@ For compatibility reasons writing 1 to memory.use_hierarchy will always pass::
# echo 1 > memory.use_hierarchy
-7. Soft limits
-==============
+7. Soft limits (DEPRECATED)
+===========================
+
+THIS IS DEPRECATED!
Soft limits allow for greater sharing of memory. The idea behind soft limits
is to allow control groups to use as much of the memory as needed, provided
@@ -834,8 +850,10 @@ It's applicable for root and non-root cgroup.
.. _cgroup-v1-memory-oom-control:
-10. OOM Control
-===============
+10. OOM Control (DEPRECATED)
+============================
+
+THIS IS DEPRECATED!
memory.oom_control file is for OOM notification and other controls.
@@ -882,8 +900,10 @@ At reading, current status of OOM is shown.
The number of processes belonging to this cgroup killed by any
kind of OOM killer.
-11. Memory Pressure
-===================
+11. Memory Pressure (DEPRECATED)
+================================
+
+THIS IS DEPRECATED!
The pressure level notifications can be used to monitor the memory
allocation cost; based on the pressure, applications can implement
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 95c18bc17083..69af2173555f 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -533,10 +533,12 @@ cgroup namespace on namespace creation.
Because the resource control interface files in a given directory
control the distribution of the parent's resources, the delegatee
shouldn't be allowed to write to them. For the first method, this is
-achieved by not granting access to these files. For the second, the
-kernel rejects writes to all files other than "cgroup.procs" and
-"cgroup.subtree_control" on a namespace root from inside the
-namespace.
+achieved by not granting access to these files. For the second, files
+outside the namespace should be hidden from the delegatee by the means
+of at least mount namespacing, and the kernel rejects writes to all
+files on a namespace root from inside the cgroup namespace, except for
+those files listed in "/sys/kernel/cgroup/delegate" (including
+"cgroup.procs", "cgroup.threads", "cgroup.subtree_control", etc.).
The end results are equivalent for both delegation types. Once
delegated, the user can build sub-hierarchy under the directory,
@@ -981,6 +983,14 @@ All cgroup core files are prefixed with "cgroup."
A dying cgroup can consume system resources not exceeding
limits, which were active at the moment of cgroup deletion.
+ nr_subsys_<cgroup_subsys>
+ Total number of live cgroup subsystems (e.g memory
+ cgroup) at and beneath the current cgroup.
+
+ nr_dying_subsys_<cgroup_subsys>
+ Total number of dying cgroup subsystems (e.g. memory
+ cgroup) at and beneath the current cgroup.
+
cgroup.freeze
A read-write single value file which exists on non-root cgroups.
Allowed values are "0" and "1". The default is "0".
@@ -1333,11 +1343,14 @@ The following nested keys are defined.
all the existing limitations and potential future extensions.
memory.peak
- A read-only single value file which exists on non-root
- cgroups.
+ A read-write single value file which exists on non-root cgroups.
+
+ The max memory usage recorded for the cgroup and its descendants since
+ either the creation of the cgroup or the most recent reset for that FD.
- The max memory usage recorded for the cgroup and its
- descendants since the creation of the cgroup.
+ A write of any non-empty string to this file resets it to the
+ current memory usage for subsequent reads through the same
+ file descriptor.
memory.oom.group
A read-write single value file which exists on non-root
@@ -1614,6 +1627,25 @@ The following nested keys are defined.
Usually because failed to allocate some continuous swap space
for the huge page.
+ numa_pages_migrated (npn)
+ Number of pages migrated by NUMA balancing.
+
+ numa_pte_updates (npn)
+ Number of pages whose page table entries are modified by
+ NUMA balancing to produce NUMA hinting faults on access.
+
+ numa_hint_faults (npn)
+ Number of NUMA hinting faults.
+
+ pgdemote_kswapd
+ Number of pages demoted by kswapd.
+
+ pgdemote_direct
+ Number of pages demoted directly.
+
+ pgdemote_khugepaged
+ Number of pages demoted by khugepaged.
+
memory.numa_stat
A read-only nested-keyed file which exists on non-root cgroups.
@@ -1663,11 +1695,14 @@ The following nested keys are defined.
Healthy workloads are not expected to reach this limit.
memory.swap.peak
- A read-only single value file which exists on non-root
- cgroups.
+ A read-write single value file which exists on non-root cgroups.
+
+ The max swap usage recorded for the cgroup and its descendants since
+ the creation of the cgroup or the most recent reset for that FD.
- The max swap usage recorded for the cgroup and its
- descendants since the creation of the cgroup.
+ A write of any non-empty string to this file resets it to the
+ current memory usage for subsequent reads through the same
+ file descriptor.
memory.swap.max
A read-write single value file which exists on non-root
@@ -1731,6 +1766,8 @@ The following nested keys are defined.
Note that this is subtly different from setting memory.swap.max to
0, as it still allows for pages to be written to the zswap pool.
+ This setting has no effect if zswap is disabled, and swapping
+ is allowed unless memory.swap.max is set to 0.
memory.pressure
A read-only nested-keyed file.
@@ -2940,8 +2977,8 @@ Deprecated v1 Core Features
- "cgroup.clone_children" is removed.
-- /proc/cgroups is meaningless for v2. Use "cgroup.controllers" file
- at the root instead.
+- /proc/cgroups is meaningless for v2. Use "cgroup.controllers" or
+ "cgroup.stat" files at the root instead.
Issues with v1 and Rationales for v2
diff --git a/Documentation/admin-guide/device-mapper/dm-crypt.rst b/Documentation/admin-guide/device-mapper/dm-crypt.rst
index 552c9155165d..48a48bd09372 100644
--- a/Documentation/admin-guide/device-mapper/dm-crypt.rst
+++ b/Documentation/admin-guide/device-mapper/dm-crypt.rst
@@ -162,13 +162,18 @@ iv_large_sectors
Module parameters::
-
max_read_size
+ Maximum size of read requests. When a request larger than this size
+ is received, dm-crypt will split the request. The splitting improves
+ concurrency (the split requests could be encrypted in parallel by multiple
+ cores), but it also causes overhead. The user should tune this parameters to
+ fit the actual workload.
+
max_write_size
- Maximum size of read or write requests. When a request larger than this size
+ Maximum size of write requests. When a request larger than this size
is received, dm-crypt will split the request. The splitting improves
concurrency (the split requests could be encrypted in parallel by multiple
- cores), but it also causes overhead. The user should tune these parameters to
+ cores), but it also causes overhead. The user should tune this parameters to
fit the actual workload.
diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst
index 5740d85439ff..2418b0c2d3df 100644
--- a/Documentation/admin-guide/ext4.rst
+++ b/Documentation/admin-guide/ext4.rst
@@ -212,16 +212,6 @@ When mounting an ext4 filesystem, the following option are accepted:
that ext4's inode table readahead algorithm will pre-read into the
buffer cache. The default value is 32 blocks.
- nouser_xattr
- Disables Extended User Attributes. See the attr(5) manual page for
- more information about extended attributes.
-
- noacl
- This option disables POSIX Access Control List support. If ACL support
- is enabled in the kernel configuration (CONFIG_EXT4_FS_POSIX_ACL), ACL
- is enabled by default on mount. See the acl(5) manual page for more
- information about acl.
-
bsddf (*)
Make 'df' act like BSD.
diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst
index 4bd3ce3ba171..2ad1c05b8c88 100644
--- a/Documentation/admin-guide/hw-vuln/srso.rst
+++ b/Documentation/admin-guide/hw-vuln/srso.rst
@@ -158,3 +158,72 @@ poisoned BTB entry and using that safe one for all function returns.
In older Zen1 and Zen2, this is accomplished using a reinterpretation
technique similar to Retbleed one: srso_untrain_ret() and
srso_safe_ret().
+
+Checking the safe RET mitigation actually works
+-----------------------------------------------
+
+In case one wants to validate whether the SRSO safe RET mitigation works
+on a kernel, one could use two performance counters
+
+* PMC_0xc8 - Count of RET/RET lw retired
+* PMC_0xc9 - Count of RET/RET lw retired mispredicted
+
+and compare the number of RETs retired properly vs those retired
+mispredicted, in kernel mode. Another way of specifying those events
+is::
+
+ # perf list ex_ret_near_ret
+
+ List of pre-defined events (to be used in -e or -M):
+
+ core:
+ ex_ret_near_ret
+ [Retired Near Returns]
+ ex_ret_near_ret_mispred
+ [Retired Near Returns Mispredicted]
+
+Either the command using the event mnemonics::
+
+ # perf stat -e ex_ret_near_ret:k -e ex_ret_near_ret_mispred:k sleep 10s
+
+or using the raw PMC numbers::
+
+ # perf stat -e cpu/event=0xc8,umask=0/k -e cpu/event=0xc9,umask=0/k sleep 10s
+
+should give the same amount. I.e., every RET retired should be
+mispredicted::
+
+ [root@brent: ~/kernel/linux/tools/perf> ./perf stat -e cpu/event=0xc8,umask=0/k -e cpu/event=0xc9,umask=0/k sleep 10s
+
+ Performance counter stats for 'sleep 10s':
+
+ 137,167 cpu/event=0xc8,umask=0/k
+ 137,173 cpu/event=0xc9,umask=0/k
+
+ 10.004110303 seconds time elapsed
+
+ 0.000000000 seconds user
+ 0.004462000 seconds sys
+
+vs the case when the mitigation is disabled (spec_rstack_overflow=off)
+or not functioning properly, showing usually a lot smaller number of
+mispredicted retired RETs vs the overall count of retired RETs during
+a workload::
+
+ [root@brent: ~/kernel/linux/tools/perf> ./perf stat -e cpu/event=0xc8,umask=0/k -e cpu/event=0xc9,umask=0/k sleep 10s
+
+ Performance counter stats for 'sleep 10s':
+
+ 201,627 cpu/event=0xc8,umask=0/k
+ 4,074 cpu/event=0xc9,umask=0/k
+
+ 10.003267252 seconds time elapsed
+
+ 0.002729000 seconds user
+ 0.000000000 seconds sys
+
+Also, there is a selftest which performs the above, go to
+tools/testing/selftests/x86/ and do::
+
+ make srso
+ ./srso
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 09126bb8cc9f..bb48ae24ae69 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -333,12 +333,17 @@
allowed anymore to lift isolation
requirements as needed. This option
does not override iommu=pt
- force_enable - Force enable the IOMMU on platforms known
- to be buggy with IOMMU enabled. Use this
- option with care.
- pgtbl_v1 - Use v1 page table for DMA-API (Default).
- pgtbl_v2 - Use v2 page table for DMA-API.
- irtcachedis - Disable Interrupt Remapping Table (IRT) caching.
+ force_enable - Force enable the IOMMU on platforms known
+ to be buggy with IOMMU enabled. Use this
+ option with care.
+ pgtbl_v1 - Use v1 page table for DMA-API (Default).
+ pgtbl_v2 - Use v2 page table for DMA-API.
+ irtcachedis - Disable Interrupt Remapping Table (IRT) caching.
+ nohugepages - Limit page-sizes used for v1 page-tables
+ to 4 KiB.
+ v2_pgsizes_only - Limit page-sizes used for v1 page-tables
+ to 4KiB/2Mib/1GiB.
+
amd_iommu_dump= [HW,X86-64]
Enable AMD IOMMU driver option to dump the ACPI table
@@ -517,6 +522,18 @@
Format: <io>,<irq>,<mode>
See header of drivers/net/hamradio/baycom_ser_hdx.c.
+ bdev_allow_write_mounted=
+ Format: <bool>
+ Control the ability to open a mounted block device
+ for writing, i.e., allow / disallow writes that bypass
+ the FS. This was implemented as a means to prevent
+ fuzzers from crashing the kernel by overwriting the
+ metadata underneath a mounted FS without its awareness.
+ This also prevents destructive formatting of mounted
+ filesystems by naive storage tooling that don't use
+ O_EXCL. Default is Y and can be changed through the
+ Kconfig option CONFIG_BLK_DEV_WRITE_MOUNTED.
+
bert_disable [ACPI]
Disable BERT OS support on buggy BIOSes.
@@ -2350,6 +2367,18 @@
ipcmni_extend [KNL,EARLY] Extend the maximum number of unique System V
IPC identifiers from 32,768 to 16,777,216.
+ ipe.enforce= [IPE]
+ Format: <bool>
+ Determine whether IPE starts in permissive (0) or
+ enforce (1) mode. The default is enforce.
+
+ ipe.success_audit=
+ [IPE]
+ Format: <bool>
+ Start IPE with success auditing enabled, emitting
+ an audit event when a binary is allowed. The default
+ is 0.
+
irqaffinity= [SMP] Set the default irq affinity mask
The argument is a cpu list, as described above.
@@ -4123,6 +4152,21 @@
Disable NUMA, Only set up a single NUMA node
spanning all memory.
+ numa=fake=<size>[MG]
+ [KNL, ARM64, RISCV, X86, EARLY]
+ If given as a memory unit, fills all system RAM with
+ nodes of size interleaved over physical nodes.
+
+ numa=fake=<N>
+ [KNL, ARM64, RISCV, X86, EARLY]
+ If given as an integer, fills all system RAM with N
+ fake nodes interleaved over physical nodes.
+
+ numa=fake=<N>U
+ [KNL, ARM64, RISCV, X86, EARLY]
+ If given as an integer followed by 'U', it will
+ divide each physical node into N emulated nodes.
+
numa_balancing= [KNL,ARM64,PPC,RISCV,S390,X86] Enable or disable automatic
NUMA balancing.
Allowed values are enable and disable
@@ -4788,6 +4832,16 @@
printk.time= Show timing data prefixed to each printk message line
Format: <bool> (1/Y/y=enable, 0/N/n=disable)
+ proc_mem.force_override= [KNL]
+ Format: {always | ptrace | never}
+ Traditionally /proc/pid/mem allows memory permissions to be
+ overridden without restrictions. This option may be set to
+ restrict that. Can be one of:
+ - 'always': traditional behavior always allows mem overrides.
+ - 'ptrace': only allow mem overrides for active ptracers.
+ - 'never': never allow mem overrides.
+ If not specified, default is the CONFIG_PROC_MEM_* choice.
+
processor.max_cstate= [HW,ACPI]
Limit processor to maximum C-state
max_cstate=9 overrides any DMI blacklist limit.
@@ -4935,6 +4989,10 @@
Set maximum number of finished RCU callbacks to
process in one batch.
+ rcutree.csd_lock_suppress_rcu_stall= [KNL]
+ Do only a one-line RCU CPU stall warning when
+ there is an ongoing too-long CSD-lock wait.
+
rcutree.do_rcu_barrier= [KNL]
Request a call to rcu_barrier(). This is
throttled so that userspace tests can safely
@@ -5382,7 +5440,13 @@
Time to wait (s) after boot before inducing stall.
rcutorture.stall_cpu_irqsoff= [KNL]
- Disable interrupts while stalling if set.
+ Disable interrupts while stalling if set, but only
+ on the first stall in the set.
+
+ rcutorture.stall_cpu_repeat= [KNL]
+ Number of times to repeat the stall sequence,
+ so that rcutorture.stall_cpu_repeat=3 will result
+ in four stall sequences.
rcutorture.stall_gp_kthread= [KNL]
Duration (s) of forced sleep within RCU
@@ -5570,14 +5634,6 @@
of zero will disable batching. Batching is
always disabled for synchronize_rcu_tasks().
- rcupdate.rcu_tasks_rude_lazy_ms= [KNL]
- Set timeout in milliseconds RCU Tasks
- Rude asynchronous callback batching for
- call_rcu_tasks_rude(). A negative value
- will take the default. A value of zero will
- disable batching. Batching is always disabled
- for synchronize_rcu_tasks_rude().
-
rcupdate.rcu_tasks_trace_lazy_ms= [KNL]
Set timeout in milliseconds RCU Tasks
Trace asynchronous callback batching for
@@ -6614,6 +6670,15 @@
<deci-seconds>: poll all this frequency
0: no polling (default)
+ thp_anon= [KNL]
+ Format: <size>,<size>[KMG]:<state>;<size>-<size>[KMG]:<state>
+ state is one of "always", "madvise", "never" or "inherit".
+ Control the default behavior of the system with respect
+ to anonymous transparent hugepages.
+ Can be used multiple times for multiple anon THP sizes.
+ See Documentation/admin-guide/mm/transhuge.rst for more
+ details.
+
threadirqs [KNL,EARLY]
Force threading of all interrupt handlers except those
marked explicitly IRQF_NO_THREAD.
@@ -6743,6 +6808,51 @@
the same thing would happen if it was left off). The irq_handler_entry
event, and all events under the "initcall" system.
+ Flags can be added to the instance to modify its behavior when it is
+ created. The flags are separated by '^'.
+
+ The available flags are:
+
+ traceoff - Have the tracing instance tracing disabled after it is created.
+ traceprintk - Have trace_printk() write into this trace instance
+ (note, "printk" and "trace_printk" can also be used)
+
+ trace_instance=foo^traceoff^traceprintk,sched,irq
+
+ The flags must come before the defined events.
+
+ If memory has been reserved (see memmap for x86), the instance
+ can use that memory:
+
+ memmap=12M$0x284500000 trace_instance=boot_map@0x284500000:12M
+
+ The above will create a "boot_map" instance that uses the physical
+ memory at 0x284500000 that is 12Megs. The per CPU buffers of that
+ instance will be split up accordingly.
+
+ Alternatively, the memory can be reserved by the reserve_mem option:
+
+ reserve_mem=12M:4096:trace trace_instance=boot_map@trace
+
+ This will reserve 12 megabytes at boot up with a 4096 byte alignment
+ and place the ring buffer in this memory. Note that due to KASLR, the
+ memory may not be the same location each time, which will not preserve
+ the buffer content.
+
+ Also note that the layout of the ring buffer data may change between
+ kernel versions where the validator will fail and reset the ring buffer
+ if the layout is not the same as the previous kernel.
+
+ If the ring buffer is used for persistent bootups and has events enabled,
+ it is recommend to disable tracing so that events from a previous boot do not
+ mix with events of the current boot (unless you are debugging a random crash
+ at boot up).
+
+ reserve_mem=12M:4096:trace trace_instance=boot_map^traceoff^traceprintk@trace,sched,irq
+
+ See also Documentation/trace/debugging.rst
+
+
trace_options=[option-list]
[FTRACE] Enable or disable tracer options at boot.
The option-list is a comma delimited list of options
@@ -7352,6 +7462,13 @@
it can be updated at runtime by writing to the
corresponding sysfs file.
+ workqueue.panic_on_stall=<uint>
+ Panic when workqueue stall is detected by
+ CONFIG_WQ_WATCHDOG. It sets the number times of the
+ stall to trigger panic.
+
+ The default is 0, which disables the panic on stall.
+
workqueue.cpu_intensive_thresh_us=
Per-cpu work items which run for longer than this
threshold are automatically considered CPU intensive
diff --git a/Documentation/admin-guide/media/cec.rst b/Documentation/admin-guide/media/cec.rst
index 6b30e355cf23..92690e1f2183 100644
--- a/Documentation/admin-guide/media/cec.rst
+++ b/Documentation/admin-guide/media/cec.rst
@@ -42,10 +42,14 @@ dongles):
``persistent_config``: by default this is off, but when set to 1 the driver
will store the current settings to the device's internal eeprom and restore
it the next time the device is connected to the USB port.
+
- RainShadow Tech. Note: this driver does not support the persistent_config
module option of the Pulse-Eight driver. The hardware supports it, but I
have no plans to add this feature. But I accept patches :-)
+- Extron DA HD 4K PLUS HDMI Distribution Amplifier. See
+ :ref:`extron_da_hd_4k_plus` for more information.
+
Miscellaneous:
- vivid: emulates a CEC receiver and CEC transmitter.
@@ -378,3 +382,86 @@ it later using ``--analyze-pin``.
You can also use this as a full-fledged CEC device by configuring it
using ``cec-ctl --tv -p0.0.0.0`` or ``cec-ctl --playback -p1.0.0.0``.
+
+.. _extron_da_hd_4k_plus:
+
+Extron DA HD 4K PLUS CEC Adapter driver
+=======================================
+
+This driver is for the Extron DA HD 4K PLUS series of HDMI Distribution
+Amplifiers: https://www.extron.com/product/dahd4kplusseries
+
+The 2, 4 and 6 port models are supported.
+
+Firmware version 1.02.0001 or higher is required.
+
+Note that older Extron hardware revisions have a problem with the CEC voltage,
+which may mean that CEC will not work. This is fixed in hardware revisions
+E34814 and up.
+
+The CEC support has two modes: the first is a manual mode where userspace has
+to manually control CEC for the HDMI Input and all HDMI Outputs. While this gives
+full control, it is also complicated.
+
+The second mode is an automatic mode, which is selected if the module option
+``vendor_id`` is set. In that case the driver controls CEC and CEC messages
+received in the input will be distributed to the outputs. It is still possible
+to use the /dev/cecX devices to talk to the connected devices directly, but it is
+the driver that configures everything and deals with things like Hotplug Detect
+changes.
+
+The driver also takes care of the EDIDs: /dev/videoX devices are created to
+read the EDIDs and (for the HDMI Input port) to set the EDID.
+
+By default userspace is responsible to set the EDID for the HDMI Input
+according to the EDIDs of the connected displays. But if the ``manufacturer_name``
+module option is set, then the driver will take care of setting the EDID
+of the HDMI Input based on the supported resolutions of the connected displays.
+Currently the driver only supports resolutions 1080p60 and 4kp60: if all connected
+displays support 4kp60, then it will advertise 4kp60 on the HDMI input, otherwise
+it will fall back to an EDID that just reports 1080p60.
+
+The status of the Extron is reported in ``/sys/kernel/debug/cec/cecX/status``.
+
+The extron-da-hd-4k-plus driver implements the following module options:
+
+``debug``
+---------
+
+If set to 1, then all serial port traffic is shown.
+
+``vendor_id``
+-------------
+
+The CEC Vendor ID to report to connected displays.
+
+If set, then the driver will take care of distributing CEC messages received
+on the input to the HDMI outputs. This is done for the following CEC messages:
+
+- <Standby>
+- <Image View On> and <Text View On>
+- <Give Device Power Status>
+- <Set System Audio Mode>
+- <Request Current Latency>
+
+If not set, then userspace is responsible for this, and it will have to
+configure the CEC devices for HDMI Input and the HDMI Outputs manually.
+
+``manufacturer_name``
+---------------------
+
+A three character manufacturer name that is used in the EDID for the HDMI
+Input. If not set, then userspace is reponsible for configuring an EDID.
+If set, then the driver will update the EDID automatically based on the
+resolutions supported by the connected displays, and it will not be possible
+anymore to manually set the EDID for the HDMI Input.
+
+``hpd_never_low``
+-----------------
+
+If set, then the Hotplug Detect pin of the HDMI Input will always be high,
+even if nothing is connected to the HDMI Outputs. If not set (the default)
+then the Hotplug Detect pin of the HDMI input will go low if all the detected
+Hotplug Detect pins of the HDMI Outputs are also low.
+
+This option may be changed dynamically.
diff --git a/Documentation/admin-guide/media/mgb4.rst b/Documentation/admin-guide/media/mgb4.rst
index e434d4a9eeb3..b9da127c074d 100644
--- a/Documentation/admin-guide/media/mgb4.rst
+++ b/Documentation/admin-guide/media/mgb4.rst
@@ -227,8 +227,13 @@ Common FPDL3/GMSL output parameters
open.*
**frame_rate** (RW):
- Output video frame rate in frames per second. The default frame rate is
- 60Hz.
+ Output video signal frame rate limit in frames per second. Due to
+ the limited output pixel clock steps, the card can not always generate
+ a frame rate perfectly matching the value required by the connected display.
+ Using this parameter one can limit the frame rate by "crippling" the signal
+ so that the lines are not equal (the porches of the last line differ) but
+ the signal appears like having the exact frame rate to the connected display.
+ The default frame rate limit is 60Hz.
**hsync_polarity** (RW):
HSYNC signal polarity.
@@ -253,33 +258,33 @@ Common FPDL3/GMSL output parameters
and there is a non-linear stepping between two consecutive allowed
frequencies. The driver finds the nearest allowed frequency to the given
value and sets it. When reading this property, you get the exact
- frequency set by the driver. The default frequency is 70000kHz.
+ frequency set by the driver. The default frequency is 61150kHz.
*Note: This parameter can not be changed while the output v4l2 device is
open.*
**hsync_width** (RW):
- Width of the HSYNC signal in pixels. The default value is 16.
+ Width of the HSYNC signal in pixels. The default value is 40.
**vsync_width** (RW):
- Width of the VSYNC signal in video lines. The default value is 2.
+ Width of the VSYNC signal in video lines. The default value is 20.
**hback_porch** (RW):
Number of PCLK pulses between deassertion of the HSYNC signal and the first
- valid pixel in the video line (marked by DE=1). The default value is 32.
+ valid pixel in the video line (marked by DE=1). The default value is 50.
**hfront_porch** (RW):
Number of PCLK pulses between the end of the last valid pixel in the video
line (marked by DE=1) and assertion of the HSYNC signal. The default value
- is 32.
+ is 50.
**vback_porch** (RW):
Number of video lines between deassertion of the VSYNC signal and the video
- line with the first valid pixel (marked by DE=1). The default value is 2.
+ line with the first valid pixel (marked by DE=1). The default value is 31.
**vfront_porch** (RW):
Number of video lines between the end of the last valid pixel line (marked
- by DE=1) and assertion of the VSYNC signal. The default value is 2.
+ by DE=1) and assertion of the VSYNC signal. The default value is 30.
FPDL3 specific input parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/admin-guide/media/rkisp1.rst b/Documentation/admin-guide/media/rkisp1.rst
index 6f14d9561fa5..6c878c71442f 100644
--- a/Documentation/admin-guide/media/rkisp1.rst
+++ b/Documentation/admin-guide/media/rkisp1.rst
@@ -114,11 +114,18 @@ to be applied to the hardware during a video stream, allowing userspace
to dynamically modify values such as black level, cross talk corrections
and others.
-The buffer format is defined by struct :c:type:`rkisp1_params_cfg`, and
-userspace should set
+The ISP driver supports two different parameters configuration methods, the
+`fixed parameters format` or the `extensible parameters format`.
+
+When using the `fixed parameters` method the buffer format is defined by struct
+:c:type:`rkisp1_params_cfg`, and userspace should set
:ref:`V4L2_META_FMT_RK_ISP1_PARAMS <v4l2-meta-fmt-rk-isp1-params>` as the
dataformat.
+When using the `extensible parameters` method the buffer format is defined by
+struct :c:type:`rkisp1_ext_params_cfg`, and userspace should set
+:ref:`V4L2_META_FMT_RK_ISP1_EXT_PARAMS <v4l2-meta-fmt-rk-isp1-ext-params>` as
+the dataformat.
Capturing Video Frames Example
==============================
diff --git a/Documentation/admin-guide/media/vivid.rst b/Documentation/admin-guide/media/vivid.rst
index 1306f19ecb5a..034ca7c77fb9 100644
--- a/Documentation/admin-guide/media/vivid.rst
+++ b/Documentation/admin-guide/media/vivid.rst
@@ -328,7 +328,7 @@ and an HDMI input, one input for each input type. Those are described in more
detail below.
Special attention has been given to the rate at which new frames become
-available. The jitter will be around 1 jiffie (that depends on the HZ
+available. The jitter will be around 1 jiffy (that depends on the HZ
configuration of your kernel, so usually 1/100, 1/250 or 1/1000 of a second),
but the long-term behavior is exactly following the framerate. So a
framerate of 59.94 Hz is really different from 60 Hz. If the framerate
@@ -1343,7 +1343,7 @@ Some Future Improvements
Just as a reminder and in no particular order:
- Add a virtual alsa driver to test audio
-- Add virtual sub-devices and media controller support
+- Add virtual sub-devices
- Some support for testing compressed video
- Add support to loop raw VBI output to raw VBI input
- Add support to loop teletext sliced VBI output to VBI input
@@ -1358,4 +1358,4 @@ Just as a reminder and in no particular order:
- Make a thread for the RDS generation, that would help in particular for the
"Controls" RDS Rx I/O Mode as the read-only RDS controls could be updated
in real-time.
-- Changing the EDID should cause hotplug detect emulation to happen.
+- Changing the EDID doesn't wait 100 ms before setting the HPD signal.
diff --git a/Documentation/admin-guide/mm/damon/start.rst b/Documentation/admin-guide/mm/damon/start.rst
index 054010a7f3d8..c4dddf6733cd 100644
--- a/Documentation/admin-guide/mm/damon/start.rst
+++ b/Documentation/admin-guide/mm/damon/start.rst
@@ -7,7 +7,7 @@ Getting Started
This document briefly describes how you can use DAMON by demonstrating its
default user space tool. Please note that this document describes only a part
of its features for brevity. Please refer to the usage `doc
-<https://github.com/awslabs/damo/blob/next/USAGE.md>`_ of the tool for more
+<https://github.com/damonitor/damo/blob/next/USAGE.md>`_ of the tool for more
details.
@@ -26,7 +26,7 @@ User Space Tool
For the demonstration, we will use the default user space tool for DAMON,
called DAMON Operator (DAMO). It is available at
-https://github.com/awslabs/damo. The examples below assume that ``damo`` is on
+https://github.com/damonitor/damo. The examples below assume that ``damo`` is on
your ``$PATH``. It's not mandatory, though.
Because DAMO is using the sysfs interface (refer to :doc:`usage` for the
diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
index 26df6cfa4441..d9be9f7caa7d 100644
--- a/Documentation/admin-guide/mm/damon/usage.rst
+++ b/Documentation/admin-guide/mm/damon/usage.rst
@@ -7,19 +7,19 @@ Detailed Usages
DAMON provides below interfaces for different users.
- *DAMON user space tool.*
- `This <https://github.com/awslabs/damo>`_ is for privileged people such as
+ `This <https://github.com/damonitor/damo>`_ is for privileged people such as
system administrators who want a just-working human-friendly interface.
Using this, users can use the DAMON’s major features in a human-friendly way.
It may not be highly tuned for special cases, though. For more detail,
please refer to its `usage document
- <https://github.com/awslabs/damo/blob/next/USAGE.md>`_.
+ <https://github.com/damonitor/damo/blob/next/USAGE.md>`_.
- *sysfs interface.*
:ref:`This <sysfs_interface>` is for privileged user space programmers who
want more optimized use of DAMON. Using this, users can use DAMON’s major
features by reading from and writing to special sysfs files. Therefore,
you can write and use your personalized DAMON sysfs wrapper programs that
reads/writes the sysfs files instead of you. The `DAMON user space tool
- <https://github.com/awslabs/damo>`_ is one example of such programs.
+ <https://github.com/damonitor/damo>`_ is one example of such programs.
- *Kernel Space Programming Interface.*
:doc:`This </mm/damon/api>` is for kernel space programmers. Using this,
users can utilize every feature of DAMON most flexibly and efficiently by
@@ -543,7 +543,7 @@ memory rate becomes larger than 60%, or lower than 30%". ::
# echo 300 > watermarks/low
Please note that it's highly recommended to use user space tools like `damo
-<https://github.com/awslabs/damo>`_ rather than manually reading and writing
+<https://github.com/damonitor/damo>`_ rather than manually reading and writing
the files as above. Above is only for an example.
.. _tracepoint:
diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst
index 098f14d83e99..cb2c080f400c 100644
--- a/Documentation/admin-guide/mm/memory-hotplug.rst
+++ b/Documentation/admin-guide/mm/memory-hotplug.rst
@@ -294,8 +294,9 @@ The following files are currently defined:
``crash_hotplug`` read-only: when changes to the system memory map
occur due to hot un/plug of memory, this file contains
'1' if the kernel updates the kdump capture kernel memory
- map itself (via elfcorehdr), or '0' if userspace must update
- the kdump capture kernel memory map.
+ map itself (via elfcorehdr and other relevant kexec
+ segments), or '0' if userspace must update the kdump
+ capture kernel memory map.
Availability depends on the CONFIG_MEMORY_HOTPLUG kernel
configuration option.
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index 058485daf186..cfdd16a52e39 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -202,6 +202,16 @@ PMD-mappable transparent hugepage::
cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size
+All THPs at fault and collapse time will be added to _deferred_list,
+and will therefore be split under memory presure if they are considered
+"underused". A THP is underused if the number of zero-filled pages in
+the THP is above max_ptes_none (see below). It is possible to disable
+this behaviour by writing 0 to shrink_underused, and enable it by writing
+1 to it::
+
+ echo 0 > /sys/kernel/mm/transparent_hugepage/shrink_underused
+ echo 1 > /sys/kernel/mm/transparent_hugepage/shrink_underused
+
khugepaged will be automatically started when PMD-sized THP is enabled
(either of the per-size anon control or the top-level control are set
to "always" or "madvise"), and it'll be automatically shutdown when
@@ -284,13 +294,37 @@ that THP is shared. Exceeding the number would block the collapse::
A higher value may increase memory footprint for some workloads.
-Boot parameter
-==============
+Boot parameters
+===============
+
+You can change the sysfs boot time default for the top-level "enabled"
+control by passing the parameter ``transparent_hugepage=always`` or
+``transparent_hugepage=madvise`` or ``transparent_hugepage=never`` to the
+kernel command line.
+
+Alternatively, each supported anonymous THP size can be controlled by
+passing ``thp_anon=<size>,<size>[KMG]:<state>;<size>-<size>[KMG]:<state>``,
+where ``<size>`` is the THP size (must be a power of 2 of PAGE_SIZE and
+supported anonymous THP) and ``<state>`` is one of ``always``, ``madvise``,
+``never`` or ``inherit``.
+
+For example, the following will set 16K, 32K, 64K THP to ``always``,
+set 128K, 512K to ``inherit``, set 256K to ``madvise`` and 1M, 2M
+to ``never``::
-You can change the sysfs boot time defaults of Transparent Hugepage
-Support by passing the parameter ``transparent_hugepage=always`` or
-``transparent_hugepage=madvise`` or ``transparent_hugepage=never``
-to the kernel command line.
+ thp_anon=16K-64K:always;128K,512K:inherit;256K:madvise;1M-2M:never
+
+``thp_anon=`` may be specified multiple times to configure all THP sizes as
+required. If ``thp_anon=`` is specified at least once, any anon THP sizes
+not explicitly configured on the command line are implicitly set to
+``never``.
+
+``transparent_hugepage`` setting only affects the global toggle. If
+``thp_anon`` is not specified, PMD_ORDER THP will default to ``inherit``.
+However, if a valid ``thp_anon`` setting is provided by the user, the
+PMD_ORDER THP policy will be overridden. If the policy for PMD_ORDER
+is not defined within a valid ``thp_anon``, its policy will default to
+``never``.
Hugepages in tmpfs/shmem
========================
@@ -447,6 +481,12 @@ thp_deferred_split_page
splitting it would free up some memory. Pages on split queue are
going to be split under memory pressure.
+thp_underused_split_page
+ is incremented when a huge page on the split queue was split
+ because it was underused. A THP is underused if the number of
+ zero pages in the THP is above a certain threshold
+ (/sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_none).
+
thp_split_pmd
is incremented every time a PMD split into table of PTEs.
This can happen, for instance, when application calls mprotect() or
@@ -527,6 +567,18 @@ split_deferred
it would free up some memory. Pages on split queue are going to
be split under memory pressure, if splitting is possible.
+nr_anon
+ the number of anonymous THP we have in the whole system. These THPs
+ might be currently entirely mapped or have partially unmapped/unused
+ subpages.
+
+nr_anon_partially_mapped
+ the number of anonymous THP which are likely partially mapped, possibly
+ wasting memory, and have been queued for deferred memory reclamation.
+ Note that in corner some cases (e.g., failed migration), we might detect
+ an anonymous THP as "partially mapped" and count it here, even though it
+ is not actually partially mapped anymore.
+
As the system ages, allocating huge pages may be expensive as the
system uses memory compaction to copy data around memory to free a
huge page for use. There are some counters in ``/proc/vmstat`` to help
diff --git a/Documentation/admin-guide/perf/arm-ni.rst b/Documentation/admin-guide/perf/arm-ni.rst
new file mode 100644
index 000000000000..d26a8f697c36
--- /dev/null
+++ b/Documentation/admin-guide/perf/arm-ni.rst
@@ -0,0 +1,17 @@
+====================================
+Arm Network-on Chip Interconnect PMU
+====================================
+
+NI-700 and friends implement a distinct PMU for each clock domain within the
+interconnect. Correspondingly, the driver exposes multiple PMU devices named
+arm_ni_<x>_cd_<y>, where <x> is an (arbitrary) instance identifier and <y> is
+the clock domain ID within that particular instance. If multiple NI instances
+exist within a system, the PMU devices can be correlated with the underlying
+hardware instance via sysfs parentage.
+
+Each PMU exposes base event aliases for the interface types present in its clock
+domain. These require qualifying with the "eventid" and "nodeid" parameters
+to specify the event code to count and the interface at which to count it
+(per the configured hardware ID as reflected in the xxNI_NODE_INFO register).
+The exception is the "cycles" alias for the PMU cycle counter, which is encoded
+with the PMU node type and needs no further qualification.
diff --git a/Documentation/admin-guide/perf/dwc_pcie_pmu.rst b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst
index d47cd229d710..39b8e1fdd0cd 100644
--- a/Documentation/admin-guide/perf/dwc_pcie_pmu.rst
+++ b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst
@@ -46,16 +46,16 @@ Some of the events only exist for specific configurations.
DesignWare Cores (DWC) PCIe PMU Driver
=======================================
-This driver adds PMU devices for each PCIe Root Port named based on the BDF of
+This driver adds PMU devices for each PCIe Root Port named based on the SBDF of
the Root Port. For example,
- 30:03.0 PCI bridge: Device 1ded:8000 (rev 01)
+ 0001:30:03.0 PCI bridge: Device 1ded:8000 (rev 01)
-the PMU device name for this Root Port is dwc_rootport_3018.
+the PMU device name for this Root Port is dwc_rootport_13018.
The DWC PCIe PMU driver registers a perf PMU driver, which provides
description of available events and configuration options in sysfs, see
-/sys/bus/event_source/devices/dwc_rootport_{bdf}.
+/sys/bus/event_source/devices/dwc_rootport_{sbdf}.
The "format" directory describes format of the config fields of the
perf_event_attr structure. The "events" directory provides configuration
@@ -66,16 +66,16 @@ The "perf list" command shall list the available events from sysfs, e.g.::
$# perf list | grep dwc_rootport
<...>
- dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ [Kernel PMU event]
+ dwc_rootport_13018/Rx_PCIe_TLP_Data_Payload/ [Kernel PMU event]
<...>
- dwc_rootport_3018/rx_memory_read,lane=?/ [Kernel PMU event]
+ dwc_rootport_13018/rx_memory_read,lane=?/ [Kernel PMU event]
Time Based Analysis Event Usage
-------------------------------
Example usage of counting PCIe RX TLP data payload (Units of bytes)::
- $# perf stat -a -e dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/
+ $# perf stat -a -e dwc_rootport_13018/Rx_PCIe_TLP_Data_Payload/
The average RX/TX bandwidth can be calculated using the following formula:
@@ -88,7 +88,7 @@ Lane Event Usage
Each lane has the same event set and to avoid generating a list of hundreds
of events, the user need to specify the lane ID explicitly, e.g.::
- $# perf stat -a -e dwc_rootport_3018/rx_memory_read,lane=4/
+ $# perf stat -a -e dwc_rootport_13018/rx_memory_read,lane=4/
The driver does not support sampling, therefore "perf record" will not
work. Per-task (without "-a") perf sessions are not supported.
diff --git a/Documentation/admin-guide/perf/hisi-pcie-pmu.rst b/Documentation/admin-guide/perf/hisi-pcie-pmu.rst
index 5541ff40e06a..083ca50de896 100644
--- a/Documentation/admin-guide/perf/hisi-pcie-pmu.rst
+++ b/Documentation/admin-guide/perf/hisi-pcie-pmu.rst
@@ -28,7 +28,9 @@ The "identifier" sysfs file allows users to identify the version of the
PMU hardware device.
The "bus" sysfs file allows users to get the bus number of Root Ports
-monitored by PMU.
+monitored by PMU. Furthermore users can get the Root Ports range in
+[bdf_min, bdf_max] from "bdf_min" and "bdf_max" sysfs attributes
+respectively.
Example usage of perf::
diff --git a/Documentation/admin-guide/perf/index.rst b/Documentation/admin-guide/perf/index.rst
index 7eb3dcd6f4da..8502bc174640 100644
--- a/Documentation/admin-guide/perf/index.rst
+++ b/Documentation/admin-guide/perf/index.rst
@@ -16,6 +16,7 @@ Performance monitor support
starfive_starlink_pmu
arm-ccn
arm-cmn
+ arm-ni
xgene-pmu
arm_dsu_pmu
thunderx2-pmu
diff --git a/Documentation/admin-guide/pm/amd-pstate.rst b/Documentation/admin-guide/pm/amd-pstate.rst
index d0324d44f548..210a808b74ec 100644
--- a/Documentation/admin-guide/pm/amd-pstate.rst
+++ b/Documentation/admin-guide/pm/amd-pstate.rst
@@ -251,7 +251,9 @@ performance supported in `AMD CPPC Performance Capability <perf_cap_>`_).
In some ASICs, the highest CPPC performance is not the one in the ``_CPC``
table, so we need to expose it to sysfs. If boost is not active, but
still supported, this maximum frequency will be larger than the one in
-``cpuinfo``.
+``cpuinfo``. On systems that support preferred core, the driver will have
+different values for some cores than others and this will reflect the values
+advertised by the platform at bootup.
This attribute is read-only.
``amd_pstate_lowest_nonlinear_freq``
@@ -262,6 +264,17 @@ lowest non-linear performance in `AMD CPPC Performance Capability
<perf_cap_>`_.)
This attribute is read-only.
+``amd_pstate_hw_prefcore``
+
+Whether the platform supports the preferred core feature and it has been
+enabled. This attribute is read-only.
+
+``amd_pstate_prefcore_ranking``
+
+The performance ranking of the core. This number doesn't have any unit, but
+larger numbers are preferred at the time of reading. This can change at
+runtime based on platform conditions. This attribute is read-only.
+
``energy_performance_available_preferences``
A list of all the supported EPP preferences that could be used for
diff --git a/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst b/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst
index 5ab3440e6cee..5151ec312dc0 100644
--- a/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst
+++ b/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst
@@ -113,3 +113,62 @@ to apply at each uncore* level.
Support for "current_freq_khz" is available only at each fabric cluster
level (i.e., in uncore* directory).
+
+Efficiency vs. Latency Tradeoff
+-------------------------------
+
+The Efficiency Latency Control (ELC) feature improves performance
+per watt. With this feature hardware power management algorithms
+optimize trade-off between latency and power consumption. For some
+latency sensitive workloads further tuning can be done by SW to
+get desired performance.
+
+The hardware monitors the average CPU utilization across all cores
+in a power domain at regular intervals and decides an uncore frequency.
+While this may result in the best performance per watt, workload may be
+expecting higher performance at the expense of power. Consider an
+application that intermittently wakes up to perform memory reads on an
+otherwise idle system. In such cases, if hardware lowers uncore
+frequency, then there may be delay in ramp up of frequency to meet
+target performance.
+
+The ELC control defines some parameters which can be changed from SW.
+If the average CPU utilization is below a user-defined threshold
+(elc_low_threshold_percent attribute below), the user-defined uncore
+floor frequency will be used (elc_floor_freq_khz attribute below)
+instead of hardware calculated minimum.
+
+Similarly in high load scenario where the CPU utilization goes above
+the high threshold value (elc_high_threshold_percent attribute below)
+instead of jumping to maximum uncore frequency, frequency is increased
+in 100MHz steps. This avoids consuming unnecessarily high power
+immediately with CPU utilization spikes.
+
+Attributes for efficiency latency control:
+
+``elc_floor_freq_khz``
+ This attribute is used to get/set the efficiency latency floor frequency.
+ If this variable is lower than the 'min_freq_khz', it is ignored by
+ the firmware.
+
+``elc_low_threshold_percent``
+ This attribute is used to get/set the efficiency latency control low
+ threshold. This attribute is in percentages of CPU utilization.
+
+``elc_high_threshold_percent``
+ This attribute is used to get/set the efficiency latency control high
+ threshold. This attribute is in percentages of CPU utilization.
+
+``elc_high_threshold_enable``
+ This attribute is used to enable/disable the efficiency latency control
+ high threshold. Write '1' to enable, '0' to disable.
+
+Example system configuration below, which does following:
+ * when CPU utilization is less than 10%: sets uncore frequency to 800MHz
+ * when CPU utilization is higher than 95%: increases uncore frequency in
+ 100MHz steps, until power limit is reached
+
+ elc_floor_freq_khz:800000
+ elc_high_threshold_percent:95
+ elc_high_threshold_enable:1
+ elc_low_threshold_percent:10
diff --git a/Documentation/admin-guide/ramoops.rst b/Documentation/admin-guide/ramoops.rst
index 6f534a707b2a..2eabef31220d 100644
--- a/Documentation/admin-guide/ramoops.rst
+++ b/Documentation/admin-guide/ramoops.rst
@@ -129,7 +129,7 @@ Setting the ramoops parameters can be done in several different manners:
takes a size, alignment and name as arguments. The name is used
to map the memory to a label that can be retrieved by ramoops.
- reserver_mem=2M:4096:oops ramoops.mem_name=oops
+ reserve_mem=2M:4096:oops ramoops.mem_name=oops
You can specify either RAM memory or peripheral devices' memory. However, when
specifying RAM, be sure to reserve the memory by issuing memblock_reserve()
diff --git a/Documentation/admin-guide/tainted-kernels.rst b/Documentation/admin-guide/tainted-kernels.rst
index f92551539e8a..700aa72eecb1 100644
--- a/Documentation/admin-guide/tainted-kernels.rst
+++ b/Documentation/admin-guide/tainted-kernels.rst
@@ -182,3 +182,5 @@ More detailed explanation for tainting
produce extremely unusual kernel structure layouts (even performance
pathological ones), which is important to know when debugging. Set at
build time.
+
+ 18) ``N`` if an in-kernel test, such as a KUnit test, has been run.
diff --git a/Documentation/arch/arm/stm32/stm32-dma-mdma-chaining.rst b/Documentation/arch/arm/stm32/stm32-dma-mdma-chaining.rst
index 2945e0e33104..301aa30890ae 100644
--- a/Documentation/arch/arm/stm32/stm32-dma-mdma-chaining.rst
+++ b/Documentation/arch/arm/stm32/stm32-dma-mdma-chaining.rst
@@ -359,7 +359,7 @@ Driver updates for STM32 DMA-MDMA chaining support in foo driver
descriptor you want a callback to be called at the end of the transfer
(dmaengine_prep_slave_sg()) or the period (dmaengine_prep_dma_cyclic()).
Depending on the direction, set the callback on the descriptor that finishes
- the overal transfer:
+ the overall transfer:
* DMA_DEV_TO_MEM: set the callback on the "MDMA" descriptor
* DMA_MEM_TO_DEV: set the callback on the "DMA" descriptor
@@ -371,7 +371,7 @@ Driver updates for STM32 DMA-MDMA chaining support in foo driver
As STM32 MDMA channel transfer is triggered by STM32 DMA, you must issue
STM32 MDMA channel before STM32 DMA channel.
- If any, your callback will be called to warn you about the end of the overal
+ If any, your callback will be called to warn you about the end of the overall
transfer or the period completion.
Don't forget to terminate both channels. STM32 DMA channel is configured in
diff --git a/Documentation/arch/arm64/cpu-hotplug.rst b/Documentation/arch/arm64/cpu-hotplug.rst
index 76ba8d932c72..8fb438bf7781 100644
--- a/Documentation/arch/arm64/cpu-hotplug.rst
+++ b/Documentation/arch/arm64/cpu-hotplug.rst
@@ -26,7 +26,7 @@ There are no systems that support the physical addition (or removal) of CPUs
while the system is running, and ACPI is not able to sufficiently describe
them.
-e.g. New CPUs come with new caches, but the platform's cache toplogy is
+e.g. New CPUs come with new caches, but the platform's cache topology is
described in a static table, the PPTT. How caches are shared between CPUs is
not discoverable, and must be described by firmware.
diff --git a/Documentation/arch/arm64/elf_hwcaps.rst b/Documentation/arch/arm64/elf_hwcaps.rst
index 448c1664879b..694f67fa07d1 100644
--- a/Documentation/arch/arm64/elf_hwcaps.rst
+++ b/Documentation/arch/arm64/elf_hwcaps.rst
@@ -365,6 +365,8 @@ HWCAP2_SME_SF8DP2
HWCAP2_SME_SF8DP4
Functionality implied by ID_AA64SMFR0_EL1.SF8DP4 == 0b1.
+HWCAP2_POE
+ Functionality implied by ID_AA64MMFR3_EL1.S1POE == 0b0001.
4. Unused AT_HWCAP bits
-----------------------
diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
index 50327c05be8d..9eb5e70b4888 100644
--- a/Documentation/arch/arm64/silicon-errata.rst
+++ b/Documentation/arch/arm64/silicon-errata.rst
@@ -55,6 +55,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Ampere | AmpereOne | AC03_CPU_38 | AMPERE_ERRATUM_AC03_CPU_38 |
+----------------+-----------------+-----------------+-----------------------------+
+| Ampere | AmpereOne AC04 | AC04_CPU_10 | AMPERE_ERRATUM_AC03_CPU_38 |
++----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
+----------------+-----------------+-----------------+-----------------------------+
@@ -249,8 +251,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
-| Hisilicon | Hip08 SMMU PMCG | #162001900 | N/A |
-| | Hip09 SMMU PMCG | | |
+| Hisilicon | Hip{08,09,10,10C| #162001900 | N/A |
+| | ,11} SMMU PMCG | | |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
diff --git a/Documentation/arch/powerpc/ultravisor.rst b/Documentation/arch/powerpc/ultravisor.rst
index ba6b1bf1cc44..6d0407b2f5a1 100644
--- a/Documentation/arch/powerpc/ultravisor.rst
+++ b/Documentation/arch/powerpc/ultravisor.rst
@@ -134,7 +134,7 @@ Hardware
* PTCR and partition table entries (partition table is in secure
memory). An attempt to write to PTCR will cause a Hypervisor
- Emulation Assitance interrupt.
+ Emulation Assistance interrupt.
* LDBAR (LD Base Address Register) and IMC (In-Memory Collection)
non-architected registers. An attempt to write to them will cause a
diff --git a/Documentation/arch/riscv/vector.rst b/Documentation/arch/riscv/vector.rst
index 75dd88a62e1d..3987f5f76a9d 100644
--- a/Documentation/arch/riscv/vector.rst
+++ b/Documentation/arch/riscv/vector.rst
@@ -15,7 +15,7 @@ status for the use of Vector in userspace. The intended usage guideline for
these interfaces is to give init systems a way to modify the availability of V
for processes running under its domain. Calling these interfaces is not
recommended in libraries routines because libraries should not override policies
-configured from the parant process. Also, users must noted that these interfaces
+configured from the parent process. Also, users must note that these interfaces
are not portable to non-Linux, nor non-RISC-V environments, so it is discourage
to use in a portable code. To get the availability of V in an ELF program,
please read :c:macro:`COMPAT_HWCAP_ISA_V` bit of :c:macro:`ELF_HWCAP` in the
diff --git a/Documentation/arch/x86/mds.rst b/Documentation/arch/x86/mds.rst
index c58c72362911..5a2e6c0ef04a 100644
--- a/Documentation/arch/x86/mds.rst
+++ b/Documentation/arch/x86/mds.rst
@@ -162,7 +162,7 @@ Mitigation points
3. It would take a large number of these precisely-timed NMIs to mount
an actual attack. There's presumably not enough bandwidth.
4. The NMI in question occurs after a VERW, i.e. when user state is
- restored and most interesting data is already scrubbed. Whats left
+ restored and most interesting data is already scrubbed. What's left
is only the data that NMI touches, and that may or may not be of
any interest.
diff --git a/Documentation/arch/x86/x86_64/boot-options.rst b/Documentation/arch/x86/x86_64/boot-options.rst
index 137432d34109..98d4805f0823 100644
--- a/Documentation/arch/x86/x86_64/boot-options.rst
+++ b/Documentation/arch/x86/x86_64/boot-options.rst
@@ -170,18 +170,6 @@ NUMA
Don't parse the HMAT table for NUMA setup, or soft-reserved memory
partitioning.
- numa=fake=<size>[MG]
- If given as a memory unit, fills all system RAM with nodes of
- size interleaved over physical nodes.
-
- numa=fake=<N>
- If given as an integer, fills all system RAM with N fake nodes
- interleaved over physical nodes.
-
- numa=fake=<N>U
- If given as an integer followed by 'U', it will divide each
- physical node into N emulated nodes.
-
ACPI
====
diff --git a/Documentation/arch/x86/x86_64/fsgs.rst b/Documentation/arch/x86/x86_64/fsgs.rst
index 50960e09e1f6..d07e445dac5c 100644
--- a/Documentation/arch/x86/x86_64/fsgs.rst
+++ b/Documentation/arch/x86/x86_64/fsgs.rst
@@ -125,7 +125,7 @@ FSGSBASE instructions enablement
FSGSBASE instructions compiler support
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-GCC version 4.6.4 and newer provide instrinsics for the FSGSBASE
+GCC version 4.6.4 and newer provide intrinsics for the FSGSBASE
instructions. Clang 5 supports them as well.
=================== ===========================
@@ -135,7 +135,7 @@ instructions. Clang 5 supports them as well.
_writegsbase_u64() Write the GS base register
=================== ===========================
-To utilize these instrinsics <immintrin.h> must be included in the source
+To utilize these intrinsics <immintrin.h> must be included in the source
code and the compiler option -mfsgsbase has to be added.
Compiler support for FS/GS based addressing
diff --git a/Documentation/block/bfq-iosched.rst b/Documentation/block/bfq-iosched.rst
index df3a8a47f58c..a0ff0eb11e7f 100644
--- a/Documentation/block/bfq-iosched.rst
+++ b/Documentation/block/bfq-iosched.rst
@@ -9,7 +9,7 @@ controllers), BFQ's main features are:
- BFQ guarantees a high system and application responsiveness, and a
low latency for time-sensitive applications, such as audio or video
players;
-- BFQ distributes bandwidth, and not just time, among processes or
+- BFQ distributes bandwidth, not just time, among processes or
groups (switching back to time distribution when needed to keep
throughput high).
@@ -111,7 +111,7 @@ Higher speed for code-development tasks
If some additional workload happens to be executed in parallel, then
BFQ executes the I/O-related components of typical code-development
-tasks (compilation, checkout, merge, ...) much more quickly than CFQ,
+tasks (compilation, checkout, merge, etc.) much more quickly than CFQ,
NOOP or DEADLINE.
High throughput
@@ -127,9 +127,9 @@ Strong fairness, bandwidth and delay guarantees
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
BFQ distributes the device throughput, and not just the device time,
-among I/O-bound applications in proportion their weights, with any
+among I/O-bound applications in proportion to their weights, with any
workload and regardless of the device parameters. From these bandwidth
-guarantees, it is possible to compute tight per-I/O-request delay
+guarantees, it is possible to compute a tight per-I/O-request delay
guarantees by a simple formula. If not configured for strict service
guarantees, BFQ switches to time-based resource sharing (only) for
applications that would otherwise cause a throughput loss.
@@ -199,7 +199,7 @@ plus a lot of code, are borrowed from CFQ.
- On flash-based storage with internal queueing of commands
(typically NCQ), device idling happens to be always detrimental
- for throughput. So, with these devices, BFQ performs idling
+ to throughput. So, with these devices, BFQ performs idling
only when strictly needed for service guarantees, i.e., for
guaranteeing low latency or fairness. In these cases, overall
throughput may be sub-optimal. No solution currently exists to
@@ -212,7 +212,7 @@ plus a lot of code, are borrowed from CFQ.
and to reduce their latency. The most important action taken to
achieve this goal is to give to the queues associated with these
applications more than their fair share of the device
- throughput. For brevity, we call just "weight-raising" the whole
+ throughput. For brevity, we call it just "weight-raising" the whole
sets of actions taken by BFQ to privilege these queues. In
particular, BFQ provides a milder form of weight-raising for
interactive applications, and a stronger form for soft real-time
@@ -231,7 +231,7 @@ plus a lot of code, are borrowed from CFQ.
responsive in detecting interleaved I/O (cooperating processes),
that it enables BFQ to achieve a high throughput, by queue
merging, even for queues for which CFQ needs a different
- mechanism, preemption, to get a high throughput. As such EQM is a
+ mechanism, preemption, to get a high throughput. As such, EQM is a
unified mechanism to achieve a high throughput with interleaved
I/O.
@@ -254,7 +254,7 @@ plus a lot of code, are borrowed from CFQ.
- First, with any proportional-share scheduler, the maximum
deviation with respect to an ideal service is proportional to
the maximum budget (slice) assigned to queues. As a consequence,
- BFQ can keep this deviation tight not only because of the
+ BFQ can keep this deviation tight, not only because of the
accurate service of B-WF2Q+, but also because BFQ *does not*
need to assign a larger budget to a queue to let the queue
receive a higher fraction of the device throughput.
@@ -327,7 +327,7 @@ applications. Unset this tunable if you need/want to control weights.
slice_idle
----------
-This parameter specifies how long BFQ should idle for next I/O
+This parameter specifies how long BFQ should idle for the next I/O
request, when certain sync BFQ queues become empty. By default
slice_idle is a non-zero value. Idling has a double purpose: boosting
throughput and making sure that the desired throughput distribution is
@@ -365,7 +365,7 @@ terms of I/O-request dispatches. To guarantee that the actual service
order then corresponds to the dispatch order, the strict_guarantees
tunable must be set too.
-There is an important flipside for idling: apart from the above cases
+There is an important flip side to idling: apart from the above cases
where it is beneficial also for throughput, idling can severely impact
throughput. One important case is random workload. Because of this
issue, BFQ tends to avoid idling as much as possible, when it is not
@@ -475,7 +475,7 @@ max_budget
Maximum amount of service, measured in sectors, that can be provided
to a BFQ queue once it is set in service (of course within the limits
-of the above timeout). According to what said in the description of
+of the above timeout). According to what was said in the description of
the algorithm, larger values increase the throughput in proportion to
the percentage of sequential I/O requests issued. The price of larger
values is that they coarsen the granularity of short-term bandwidth
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst
index 257a7e1cdf5d..93060283b6fd 100644
--- a/Documentation/bpf/btf.rst
+++ b/Documentation/bpf/btf.rst
@@ -368,7 +368,7 @@ No additional type data follow ``btf_type``.
* ``info.kind_flag``: 0
* ``info.kind``: BTF_KIND_FUNC
* ``info.vlen``: linkage information (BTF_FUNC_STATIC, BTF_FUNC_GLOBAL
- or BTF_FUNC_EXTERN)
+ or BTF_FUNC_EXTERN - see :ref:`BTF_Function_Linkage_Constants`)
* ``type``: a BTF_KIND_FUNC_PROTO type
No additional type data follow ``btf_type``.
@@ -424,9 +424,8 @@ following data::
__u32 linkage;
};
-``struct btf_var`` encoding:
- * ``linkage``: currently only static variable 0, or globally allocated
- variable in ELF sections 1
+``btf_var.linkage`` may take the values: BTF_VAR_STATIC, BTF_VAR_GLOBAL_ALLOCATED or BTF_VAR_GLOBAL_EXTERN -
+see :ref:`BTF_Var_Linkage_Constants`.
Not all type of global variables are supported by LLVM at this point.
The following is currently available:
@@ -549,6 +548,38 @@ The ``btf_enum64`` encoding:
If the original enum value is signed and the size is less than 8,
that value will be sign extended into 8 bytes.
+2.3 Constant Values
+-------------------
+
+.. _BTF_Function_Linkage_Constants:
+
+2.3.1 Function Linkage Constant Values
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. table:: Function Linkage Values and Meanings
+
+ =================== ===== ===========
+ kind value description
+ =================== ===== ===========
+ ``BTF_FUNC_STATIC`` 0x0 definition of subprogram not visible outside containing compilation unit
+ ``BTF_FUNC_GLOBAL`` 0x1 definition of subprogram visible outside containing compilation unit
+ ``BTF_FUNC_EXTERN`` 0x2 declaration of a subprogram whose definition is outside the containing compilation unit
+ =================== ===== ===========
+
+
+.. _BTF_Var_Linkage_Constants:
+
+2.3.2 Variable Linkage Constant Values
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. table:: Variable Linkage Values and Meanings
+
+ ============================ ===== ===========
+ kind value description
+ ============================ ===== ===========
+ ``BTF_VAR_STATIC`` 0x0 definition of global variable not visible outside containing compilation unit
+ ``BTF_VAR_GLOBAL_ALLOCATED`` 0x1 definition of global variable visible outside containing compilation unit
+ ``BTF_VAR_GLOBAL_EXTERN`` 0x2 declaration of global variable whose definition is outside the containing compilation unit
+ ============================ ===== ===========
+
3. BTF Kernel API
=================
diff --git a/Documentation/bpf/libbpf/program_types.rst b/Documentation/bpf/libbpf/program_types.rst
index 63bb88846e50..218b020a2f81 100644
--- a/Documentation/bpf/libbpf/program_types.rst
+++ b/Documentation/bpf/libbpf/program_types.rst
@@ -121,6 +121,8 @@ described in more detail in the footnotes.
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_LWT_XMIT`` | | ``lwt_xmit`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
+| ``BPF_PROG_TYPE_NETFILTER`` | | ``netfilter`` | |
++-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_PERF_EVENT`` | | ``perf_event`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE`` | | ``raw_tp.w+`` [#rawtp]_ | |
@@ -131,11 +133,23 @@ described in more detail in the footnotes.
+ + +----------------------------------+-----------+
| | | ``raw_tracepoint+`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
-| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` | |
+| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` [#tc_legacy]_ | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
-| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` | |
+| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` [#tc_legacy]_ | |
+ + +----------------------------------+-----------+
-| | | ``tc`` | |
+| | | ``tc`` [#tc_legacy]_ | |
++ +----------------------------------------+----------------------------------+-----------+
+| | ``BPF_NETKIT_PRIMARY`` | ``netkit/primary`` | |
++ +----------------------------------------+----------------------------------+-----------+
+| | ``BPF_NETKIT_PEER`` | ``netkit/peer`` | |
++ +----------------------------------------+----------------------------------+-----------+
+| | ``BPF_TCX_INGRESS`` | ``tc/ingress`` | |
++ +----------------------------------------+----------------------------------+-----------+
+| | ``BPF_TCX_EGRESS`` | ``tc/egress`` | |
++ +----------------------------------------+----------------------------------+-----------+
+| | ``BPF_TCX_INGRESS`` | ``tcx/ingress`` | |
++ +----------------------------------------+----------------------------------+-----------+
+| | ``BPF_TCX_EGRESS`` | ``tcx/egress`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_SK_LOOKUP`` | ``BPF_SK_LOOKUP`` | ``sk_lookup`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
@@ -155,7 +169,9 @@ described in more detail in the footnotes.
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_SOCK_OPS`` | ``BPF_CGROUP_SOCK_OPS`` | ``sockops`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
-| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` | |
+| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` [#struct_ops]_ | |
++ + +----------------------------------+-----------+
+| | | ``struct_ops.s+`` [#struct_ops]_ | Yes |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_SYSCALL`` | | ``syscall`` | Yes |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
@@ -209,5 +225,11 @@ described in more detail in the footnotes.
``a-zA-Z0-9_.*?``.
.. [#lsm] The ``lsm`` attachment format is ``lsm[.s]/<hook>``.
.. [#rawtp] The ``raw_tp`` attach format is ``raw_tracepoint[.w]/<tracepoint>``.
+.. [#tc_legacy] The ``tc``, ``classifier`` and ``action`` attach types are deprecated, use
+ ``tcx/*`` instead.
+.. [#struct_ops] The ``struct_ops`` attach format supports ``struct_ops[.s]/<name>`` convention,
+ but ``name`` is ignored and it is recommended to just use plain
+ ``SEC("struct_ops[.s]")``. The attachments are defined in a struct initializer
+ that is tagged with ``SEC(".struct_ops[.link]")``.
.. [#tp] The ``tracepoint`` attach format is ``tracepoint/<category>/<name>``.
.. [#iter] The ``iter`` attach format is ``iter[.s]/<struct-name>``.
diff --git a/Documentation/bpf/verifier.rst b/Documentation/bpf/verifier.rst
index 356894399fbf..d23761540002 100644
--- a/Documentation/bpf/verifier.rst
+++ b/Documentation/bpf/verifier.rst
@@ -418,7 +418,7 @@ The rules for correspondence between registers / stack slots are as follows:
linked to the registers and stack slots of the parent state with the same
indices.
-* For the outer stack frames, only caller saved registers (r6-r9) and stack
+* For the outer stack frames, only callee saved registers (r6-r9) and stack
slots are linked to the registers and stack slots of the parent state with the
same indices.
diff --git a/Documentation/core-api/cpu_hotplug.rst b/Documentation/core-api/cpu_hotplug.rst
index dcb0e379e5e8..a21dbf261be7 100644
--- a/Documentation/core-api/cpu_hotplug.rst
+++ b/Documentation/core-api/cpu_hotplug.rst
@@ -737,8 +737,9 @@ can process the event further.
When changes to the CPUs in the system occur, the sysfs file
/sys/devices/system/cpu/crash_hotplug contains '1' if the kernel
-updates the kdump capture kernel list of CPUs itself (via elfcorehdr),
-or '0' if userspace must update the kdump capture kernel list of CPUs.
+updates the kdump capture kernel list of CPUs itself (via elfcorehdr and
+other relevant kexec segment), or '0' if userspace must update the kdump
+capture kernel list of CPUs.
The availability depends on the CONFIG_HOTPLUG_CPU kernel configuration
option.
@@ -750,8 +751,9 @@ file can be used in a udev rule as follows:
SUBSYSTEM=="cpu", ATTRS{crash_hotplug}=="1", GOTO="kdump_reload_end"
For a CPU hot un/plug event, if the architecture supports kernel updates
-of the elfcorehdr (which contains the list of CPUs), then the rule skips
-the unload-then-reload of the kdump capture kernel.
+of the elfcorehdr (which contains the list of CPUs) and other relevant
+kexec segments, then the rule skips the unload-then-reload of the kdump
+capture kernel.
Kernel Inline Documentations Reference
======================================
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index f147854700e4..e18a2ffe0787 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -49,6 +49,7 @@ Library functionality that is used throughout the kernel.
wrappers/atomic_t
wrappers/atomic_bitops
floating-point
+ union_find
Low level entry and exit
========================
diff --git a/Documentation/core-api/memory-allocation.rst b/Documentation/core-api/memory-allocation.rst
index 8b84eb4bdae7..0f19dd524323 100644
--- a/Documentation/core-api/memory-allocation.rst
+++ b/Documentation/core-api/memory-allocation.rst
@@ -45,8 +45,9 @@ here we briefly outline their recommended usage:
* If the allocation is performed from an atomic context, e.g interrupt
handler, use ``GFP_NOWAIT``. This flag prevents direct reclaim and
IO or filesystem operations. Consequently, under memory pressure
- ``GFP_NOWAIT`` allocation is likely to fail. Allocations which
- have a reasonable fallback should be using ``GFP_NOWARN``.
+ ``GFP_NOWAIT`` allocation is likely to fail. Users of this flag need
+ to provide a suitable fallback to cope with such failures where
+ appropriate.
* If you think that accessing memory reserves is justified and the kernel
will be stressed unless allocation succeeds, you may use ``GFP_ATOMIC``.
* Untrusted allocations triggered from userspace should be a subject
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index 4451ef501936..14e093da3ccd 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -576,13 +576,12 @@ The field width is passed by value, the bitmap is passed by reference.
Helper macros cpumask_pr_args() and nodemask_pr_args() are available to ease
printing cpumask and nodemask.
-Flags bitfields such as page flags, page_type, gfp_flags
+Flags bitfields such as page flags and gfp_flags
--------------------------------------------------------
::
%pGp 0x17ffffc0002036(referenced|uptodate|lru|active|private|node=0|zone=2|lastcpupid=0x1fffff)
- %pGt 0xffffff7f(buddy)
%pGg GFP_USER|GFP_DMA32|GFP_NOWARN
%pGv read|exec|mayread|maywrite|mayexec|denywrite
@@ -591,7 +590,6 @@ would construct the value. The type of flags is given by the third
character. Currently supported are:
- p - [p]age flags, expects value of type (``unsigned long *``)
- - t - page [t]ype, expects value of type (``unsigned int *``)
- v - [v]ma_flags, expects value of type (``unsigned long *``)
- g - [g]fp_flags, expects value of type (``gfp_t *``)
diff --git a/Documentation/core-api/union_find.rst b/Documentation/core-api/union_find.rst
new file mode 100644
index 000000000000..6df8b94fdb5a
--- /dev/null
+++ b/Documentation/core-api/union_find.rst
@@ -0,0 +1,106 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+Union-Find in Linux
+====================
+
+
+:Date: June 21, 2024
+:Author: Xavier <xavier_qy@163.com>
+
+What is union-find, and what is it used for?
+------------------------------------------------
+
+Union-find is a data structure used to handle the merging and querying
+of disjoint sets. The primary operations supported by union-find are:
+
+ Initialization: Resetting each element as an individual set, with
+ each set's initial parent node pointing to itself.
+
+ Find: Determine which set a particular element belongs to, usually by
+ returning a “representative element†of that set. This operation
+ is used to check if two elements are in the same set.
+
+ Union: Merge two sets into one.
+
+As a data structure used to maintain sets (groups), union-find is commonly
+utilized to solve problems related to offline queries, dynamic connectivity,
+and graph theory. It is also a key component in Kruskal's algorithm for
+computing the minimum spanning tree, which is crucial in scenarios like
+network routing. Consequently, union-find is widely referenced. Additionally,
+union-find has applications in symbolic computation, register allocation,
+and more.
+
+Space Complexity: O(n), where n is the number of nodes.
+
+Time Complexity: Using path compression can reduce the time complexity of
+the find operation, and using union by rank can reduce the time complexity
+of the union operation. These optimizations reduce the average time
+complexity of each find and union operation to O(α(n)), where α(n) is the
+inverse Ackermann function. This can be roughly considered a constant time
+complexity for practical purposes.
+
+This document covers use of the Linux union-find implementation. For more
+information on the nature and implementation of union-find, see:
+
+ Wikipedia entry on union-find
+ https://en.wikipedia.org/wiki/Disjoint-set_data_structure
+
+Linux implementation of union-find
+-----------------------------------
+
+Linux's union-find implementation resides in the file "lib/union_find.c".
+To use it, "#include <linux/union_find.h>".
+
+The union-find data structure is defined as follows::
+
+ struct uf_node {
+ struct uf_node *parent;
+ unsigned int rank;
+ };
+
+In this structure, parent points to the parent node of the current node.
+The rank field represents the height of the current tree. During a union
+operation, the tree with the smaller rank is attached under the tree with the
+larger rank to maintain balance.
+
+Initializing union-find
+-----------------------
+
+You can complete the initialization using either static or initialization
+interface. Initialize the parent pointer to point to itself and set the rank
+to 0.
+Example::
+
+ struct uf_node my_node = UF_INIT_NODE(my_node);
+
+or
+
+ uf_node_init(&my_node);
+
+Find the Root Node of union-find
+--------------------------------
+
+This operation is mainly used to determine whether two nodes belong to the same
+set in the union-find. If they have the same root, they are in the same set.
+During the find operation, path compression is performed to improve the
+efficiency of subsequent find operations.
+Example::
+
+ int connected;
+ struct uf_node *root1 = uf_find(&node_1);
+ struct uf_node *root2 = uf_find(&node_2);
+ if (root1 == root2)
+ connected = 1;
+ else
+ connected = 0;
+
+Union Two Sets in union-find
+----------------------------
+
+To union two sets in the union-find, you first find their respective root nodes
+and then link the smaller node to the larger node based on the rank of the root
+nodes.
+Example::
+
+ uf_union(&node_1, &node_2);
diff --git a/Documentation/dev-tools/gcov.rst b/Documentation/dev-tools/gcov.rst
index 5fce2b06f229..dbd26b02ff3c 100644
--- a/Documentation/dev-tools/gcov.rst
+++ b/Documentation/dev-tools/gcov.rst
@@ -75,6 +75,17 @@ Only files which are linked to the main kernel image or are compiled as
kernel modules are supported by this mechanism.
+Module specific configs
+-----------------------
+
+Gcov kernel configs for specific modules are described below:
+
+CONFIG_GCOV_PROFILE_RDS:
+ Enables GCOV profiling on RDS for checking which functions or
+ lines are executed. This config is used by the rds selftest to
+ generate coverage reports. If left unset the report is omitted.
+
+
Files
-----
diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst
index 02143f060b22..d81c42d1063e 100644
--- a/Documentation/dev-tools/kcsan.rst
+++ b/Documentation/dev-tools/kcsan.rst
@@ -361,7 +361,8 @@ Alternatives Considered
-----------------------
An alternative data race detection approach for the kernel can be found in the
-`Kernel Thread Sanitizer (KTSAN) <https://github.com/google/ktsan/wiki>`_.
+`Kernel Thread Sanitizer (KTSAN)
+<https://github.com/google/kernel-sanitizers/blob/master/KTSAN.md>`_.
KTSAN is a happens-before data race detector, which explicitly establishes the
happens-before order between memory operations, which can then be used to
determine data races as defined in `Data Races`_.
diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst
index 936f6aaa75c8..541899353865 100644
--- a/Documentation/dev-tools/kfence.rst
+++ b/Documentation/dev-tools/kfence.rst
@@ -53,6 +53,13 @@ configurable via the Kconfig option ``CONFIG_KFENCE_DEFERRABLE``.
The KUnit test suite is very likely to fail when using a deferrable timer
since it currently causes very unpredictable sample intervals.
+By default KFENCE will only sample 1 heap allocation within each sample
+interval. *Burst mode* allows to sample successive heap allocations, where the
+kernel boot parameter ``kfence.burst`` can be set to a non-zero value which
+denotes the *additional* successive allocations within a sample interval;
+setting ``kfence.burst=N`` means that ``1 + N`` successive allocations are
+attempted through KFENCE for each sample interval.
+
The KFENCE memory pool is of fixed size, and if the pool is exhausted, no
further KFENCE allocations occur. With ``CONFIG_KFENCE_NUM_OBJECTS`` (default
255), the number of available guarded objects can be controlled. Each object
diff --git a/Documentation/dev-tools/kunit/api/clk.rst b/Documentation/dev-tools/kunit/api/clk.rst
new file mode 100644
index 000000000000..eeaa50089453
--- /dev/null
+++ b/Documentation/dev-tools/kunit/api/clk.rst
@@ -0,0 +1,10 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========
+Clk API
+========
+
+The KUnit clk API is used to test clk providers and clk consumers.
+
+.. kernel-doc:: drivers/clk/clk_kunit_helpers.c
+ :export:
diff --git a/Documentation/dev-tools/kunit/api/index.rst b/Documentation/dev-tools/kunit/api/index.rst
index 2d8f756aab56..5cdb552a0808 100644
--- a/Documentation/dev-tools/kunit/api/index.rst
+++ b/Documentation/dev-tools/kunit/api/index.rst
@@ -9,11 +9,17 @@ API Reference
test
resource
functionredirection
+ clk
+ of
+ platformdevice
This page documents the KUnit kernel testing API. It is divided into the
following sections:
+Core KUnit API
+==============
+
Documentation/dev-tools/kunit/api/test.rst
- Documents all of the standard testing API
@@ -25,3 +31,18 @@ Documentation/dev-tools/kunit/api/resource.rst
Documentation/dev-tools/kunit/api/functionredirection.rst
- Documents the KUnit Function Redirection API
+
+Driver KUnit API
+================
+
+Documentation/dev-tools/kunit/api/clk.rst
+
+ - Documents the KUnit clk API
+
+Documentation/dev-tools/kunit/api/of.rst
+
+ - Documents the KUnit device tree (OF) API
+
+Documentation/dev-tools/kunit/api/platformdevice.rst
+
+ - Documents the KUnit platform device API
diff --git a/Documentation/dev-tools/kunit/api/of.rst b/Documentation/dev-tools/kunit/api/of.rst
new file mode 100644
index 000000000000..cb4193dcddbb
--- /dev/null
+++ b/Documentation/dev-tools/kunit/api/of.rst
@@ -0,0 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+Device Tree (OF) API
+====================
+
+The KUnit device tree API is used to test device tree (of_*) dependent code.
+
+.. kernel-doc:: include/kunit/of.h
+ :internal:
+
+.. kernel-doc:: drivers/of/of_kunit_helpers.c
+ :export:
diff --git a/Documentation/dev-tools/kunit/api/platformdevice.rst b/Documentation/dev-tools/kunit/api/platformdevice.rst
new file mode 100644
index 000000000000..49ddd5729003
--- /dev/null
+++ b/Documentation/dev-tools/kunit/api/platformdevice.rst
@@ -0,0 +1,10 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+Platform Device API
+===================
+
+The KUnit platform device API is used to test platform devices.
+
+.. kernel-doc:: lib/kunit/platform.c
+ :export:
diff --git a/Documentation/dev-tools/kunit/style.rst b/Documentation/dev-tools/kunit/style.rst
index b6d0d7359f00..eac81a714a29 100644
--- a/Documentation/dev-tools/kunit/style.rst
+++ b/Documentation/dev-tools/kunit/style.rst
@@ -188,15 +188,26 @@ For example, a Kconfig entry might look like:
Test File and Module Names
==========================
-KUnit tests can often be compiled as a module. These modules should be named
-after the test suite, followed by ``_test``. If this is likely to conflict with
-non-KUnit tests, the suffix ``_kunit`` can also be used.
+KUnit tests are often compiled as a separate module. To avoid conflicting
+with regular modules, KUnit modules should be named after the test suite,
+followed by ``_kunit`` (e.g. if "foobar" is the core module, then
+"foobar_kunit" is the KUnit test module).
-The easiest way of achieving this is to name the file containing the test suite
-``<suite>_test.c`` (or, as above, ``<suite>_kunit.c``). This file should be
-placed next to the code under test.
+Test source files, whether compiled as a separate module or an
+``#include`` in another source file, are best kept in a ``tests/``
+subdirectory to not conflict with other source files (e.g. for
+tab-completion).
+
+Note that the ``_test`` suffix has also been used in some existing
+tests. The ``_kunit`` suffix is preferred, as it makes the distinction
+between KUnit and non-KUnit tests clearer.
+
+So for the common case, name the file containing the test suite
+``tests/<suite>_kunit.c``. The ``tests`` directory should be placed at
+the same level as the code under test. For example, tests for
+``lib/string.c`` live in ``lib/tests/string_kunit.c``.
If the suite name contains some or all of the name of the test's parent
-directory, it may make sense to modify the source filename to reduce redundancy.
-For example, a ``foo_firmware`` suite could be in the ``foo/firmware_test.c``
-file.
+directory, it may make sense to modify the source filename to reduce
+redundancy. For example, a ``foo_firmware`` suite could be in the
+``foo/tests/firmware_kunit.c`` file.
diff --git a/Documentation/devicetree/bindings/arc/archs-pct.txt b/Documentation/devicetree/bindings/arc/archs-pct.txt
deleted file mode 100644
index e4b9dcee6d41..000000000000
--- a/Documentation/devicetree/bindings/arc/archs-pct.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-* ARC HS Performance Counters
-
-The ARC HS can be configured with a pipeline performance monitor for counting
-CPU and cache events like cache misses and hits. Like conventional PCT there
-are 100+ hardware conditions dynamically mapped to up to 32 counters.
-It also supports overflow interrupts.
-
-Required properties:
-
-- compatible : should contain
- "snps,archs-pct"
-
-Example:
-
-pmu {
- compatible = "snps,archs-pct";
-};
diff --git a/Documentation/devicetree/bindings/arc/snps,archs-pct.yaml b/Documentation/devicetree/bindings/arc/snps,archs-pct.yaml
new file mode 100644
index 000000000000..532f7584f59f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arc/snps,archs-pct.yaml
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arc/snps,archs-pct.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARC HS Performance Counters
+
+maintainers:
+ - Aryabhatta Dey <aryabhattadey35@gmail.com>
+
+description:
+ The ARC HS can be configured with a pipeline performance monitor for counting
+ CPU and cache events like cache misses and hits. Like conventional PCT there
+ are 100+ hardware conditions dynamically mapped to up to 32 counters.
+ It also supports overflow interrupts.
+
+properties:
+ compatible:
+ const: snps,archs-pct
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml b/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
index 7dff32f373cb..b4f6695a6015 100644
--- a/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
+++ b/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
@@ -25,10 +25,18 @@ select:
properties:
compatible:
- items:
- - const: amlogic,meson-gx-ao-secure
- - const: syscon
-
+ oneOf:
+ - items:
+ - const: amlogic,meson-gx-ao-secure
+ - const: syscon
+ - items:
+ - enum:
+ - amlogic,a4-ao-secure
+ - amlogic,c3-ao-secure
+ - amlogic,s4-ao-secure
+ - amlogic,t7-ao-secure
+ - const: amlogic,meson-gx-ao-secure
+ - const: syscon
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
index d50a60368e27..04a8c37b4aff 100644
--- a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
+++ b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
@@ -17,7 +17,7 @@ description: |
The Coresight dummy source component is for the specific coresight source
devices kernel don't have permission to access or configure. For some SOCs,
there would be Coresight source trace components on sub-processor which
- are conneted to AP processor via debug bus. For these devices, a dummy driver
+ are connected to AP processor via debug bus. For these devices, a dummy driver
is needed to register them as Coresight source devices, so that paths can be
created in the driver. It provides Coresight API for operations on dummy
source devices, such as enabling and disabling them. It also provides the
diff --git a/Documentation/devicetree/bindings/arm/arm,corstone1000.yaml b/Documentation/devicetree/bindings/arm/arm,corstone1000.yaml
index 693f3fe7be60..cff1cdaadb13 100644
--- a/Documentation/devicetree/bindings/arm/arm,corstone1000.yaml
+++ b/Documentation/devicetree/bindings/arm/arm,corstone1000.yaml
@@ -7,8 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: ARM Corstone1000
maintainers:
- - Vishnu Banavath <vishnu.banavath@arm.com>
- - Rui Miguel Silva <rui.silva@linaro.org>
+ - Abdellatif El Khlifi <abdellatif.elkhlifi@arm.com>
+ - Hugues Kamba Mpiana <hugues.kambampiana@arm.com>
description: |+
ARM's Corstone1000 includes pre-verified Corstone SSE-710 subsystem that
diff --git a/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml b/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml
index 95113df178cc..2f92b8ab08fa 100644
--- a/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml
+++ b/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml
@@ -79,6 +79,7 @@ properties:
- aspeed,ast2600-evb-a1
- asus,x4tf-bmc
- facebook,bletchley-bmc
+ - facebook,catalina-bmc
- facebook,cloudripper-bmc
- facebook,elbert-bmc
- facebook,fuji-bmc
@@ -86,7 +87,9 @@ properties:
- facebook,harma-bmc
- facebook,minerva-cmc
- facebook,yosemite4-bmc
+ - ibm,blueridge-bmc
- ibm,everest-bmc
+ - ibm,fuji-bmc
- ibm,rainier-bmc
- ibm,system1-bmc
- ibm,tacoma-bmc
diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
index 7374beb5a613..76e2b7978250 100644
--- a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
@@ -11,7 +11,8 @@ PIT Timer required properties:
shared across all System Controller members.
PIT64B Timer required properties:
-- compatible: Should be "microchip,sam9x60-pit64b"
+- compatible: Should be "microchip,sam9x60-pit64b" or
+ "microchip,sam9x7-pit64b", "microchip,sam9x60-pit64b"
- reg: Should contain registers location and length
- interrupts: Should contain interrupt for PIT64B timer
- clocks: Should contain the available clock sources for PIT64B timer.
@@ -31,7 +32,8 @@ RAMC SDRAM/DDR Controller required properties:
"atmel,at91sam9g45-ddramc",
"atmel,sama5d3-ddramc",
"microchip,sam9x60-ddramc",
- "microchip,sama7g5-uddrc"
+ "microchip,sama7g5-uddrc",
+ "microchip,sam9x7-ddramc", "atmel,sama5d3-ddramc".
- reg: Should contain registers location and length
Examples:
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index 80747d79418a..b39a7e031177 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -809,19 +809,19 @@ properties:
- const: kontron,sl-imx6ull # Kontron SL i.MX6ULL SoM
- const: fsl,imx6ull
- - description: TQ Systems TQMa6ULLx SoM on MBa6ULx board
+ - description: TQ-Systems TQMa6ULLx SoM on MBa6ULx board
items:
- enum:
- - tq,imx6ull-tqma6ull2-mba6ulx
- - const: tq,imx6ull-tqma6ull2 # MCIMX6Y2
+ - tq,imx6ull-tqma6ull2-mba6ulx # TQMa6ULL socketable SoM with MCIMX6Y2 on MBa6ULx EVK
+ - const: tq,imx6ull-tqma6ull2 # TQMa6ULL socketable SoM with MCIMX6Y2
- const: fsl,imx6ull
- - description: TQ Systems TQMa6ULLxL SoM on MBa6ULx[L] board
+ - description: TQ-Systems TQMa6ULLxL SoM on MBa6ULx[L] board
items:
- enum:
- - tq,imx6ull-tqma6ull2l-mba6ulx # using LGA adapter
- - tq,imx6ull-tqma6ull2l-mba6ulxl
- - const: tq,imx6ull-tqma6ull2l # MCIMX6Y2, LGA SoM variant
+ - tq,imx6ull-tqma6ull2l-mba6ulx # TQMa6ULLxL LGA SoM with socketable Adapter on MBa6ULx EVK
+ - tq,imx6ull-tqma6ull2l-mba6ulxl # TQMa6ULLxL LGA SoM on MBa6ULxL gateway board
+ - const: tq,imx6ull-tqma6ull2l # TQMa6ULLxL LGA SoM with MCIMX6Y2
- const: fsl,imx6ull
- description: Seeed Stuido i.MX6ULL SoM on dev boards
@@ -939,8 +939,8 @@ properties:
- fsl,imx8mm-ddr4-evk # i.MX8MM DDR4 EVK Board
- fsl,imx8mm-evk # i.MX8MM EVK Board
- fsl,imx8mm-evkb # i.MX8MM EVKB Board
+ - gateworks,imx8mm-gw75xx-0x # i.MX8MM Gateworks Board
- gateworks,imx8mm-gw7904
- - gateworks,imx8mm-gw7905-0x # i.MX8MM Gateworks Board
- gw,imx8mm-gw71xx-0x # i.MX8MM Gateworks Development Kit
- gw,imx8mm-gw72xx-0x # i.MX8MM Gateworks Development Kit
- gw,imx8mm-gw73xx-0x # i.MX8MM Gateworks Development Kit
@@ -953,7 +953,6 @@ properties:
- toradex,verdin-imx8mm # Verdin iMX8M Mini Modules
- toradex,verdin-imx8mm-nonwifi # Verdin iMX8M Mini Modules without Wi-Fi / BT
- toradex,verdin-imx8mm-wifi # Verdin iMX8M Mini Wi-Fi / BT Modules
- - variscite,var-som-mx8mm # i.MX8MM Variscite VAR-SOM-MX8MM module
- prt,prt8mm # i.MX8MM Protonic PRT8MM Board
- const: fsl,imx8mm
@@ -1082,7 +1081,7 @@ properties:
- gateworks,imx8mp-gw72xx-2x # i.MX8MP Gateworks Board
- gateworks,imx8mp-gw73xx-2x # i.MX8MP Gateworks Board
- gateworks,imx8mp-gw74xx # i.MX8MP Gateworks Board
- - gateworks,imx8mp-gw7905-2x # i.MX8MP Gateworks Board
+ - gateworks,imx8mp-gw75xx-2x # i.MX8MP Gateworks Board
- skov,imx8mp-skov-revb-hdmi # SKOV i.MX8MP climate control without panel
- skov,imx8mp-skov-revb-lt6 # SKOV i.MX8MP climate control with 7†panel
- skov,imx8mp-skov-revb-mi1010ait-1cp1 # SKOV i.MX8MP climate control with 10.1" panel
@@ -1168,6 +1167,12 @@ properties:
- const: tq,imx8mp-tqma8mpql # TQ-Systems GmbH i.MX8MP TQMa8MPQL SOM
- const: fsl,imx8mp
+ - description: Variscite VAR-SOM-MX8M Plus based boards
+ items:
+ - const: variscite,var-som-mx8mp-symphony
+ - const: variscite,var-som-mx8mp
+ - const: fsl,imx8mp
+
- description: i.MX8MQ based Boards
items:
- enum:
@@ -1293,6 +1298,7 @@ properties:
- enum:
- fsl,imx93-9x9-qsb # i.MX93 9x9 QSB Board
- fsl,imx93-11x11-evk # i.MX93 11x11 EVK Board
+ - fsl,imx93-14x14-evk # i.MX93 14x14 EVK Board
- const: fsl,imx93
- description: i.MX95 based Boards
@@ -1344,6 +1350,12 @@ properties:
- const: variscite,var-som-mx93
- const: fsl,imx93
+ - description: Kontron OSM-S i.MX93 SoM based boards
+ items:
+ - const: kontron,imx93-bl-osm-s # Kontron BL i.MX93 OSM-S board
+ - const: kontron,imx93-osm-s # Kontron OSM-S i.MX93 SoM
+ - const: fsl,imx93
+
- description:
Freescale Vybrid Platform Device Tree Bindings
@@ -1523,6 +1535,12 @@ properties:
- fsl,ls2080a-rdb
- const: fsl,ls2080a
+ - description: LS2081A based Boards
+ items:
+ - enum:
+ - fsl,ls2081a-rdb
+ - const: fsl,ls2081a
+
- description: LS2088A based Boards
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt
deleted file mode 100644
index 149567a38215..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Mediatek bdpsys controller
-============================
-
-The Mediatek bdpsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be:
- - "mediatek,mt2701-bdpsys", "syscon"
- - "mediatek,mt2712-bdpsys", "syscon"
- - "mediatek,mt7623-bdpsys", "mediatek,mt2701-bdpsys", "syscon"
-- #clock-cells: Must be 1
-
-The bdpsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-bdpsys: clock-controller@1c000000 {
- compatible = "mediatek,mt2701-bdpsys", "syscon";
- reg = <0 0x1c000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt
deleted file mode 100644
index a0ce82085ad0..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-MediaTek CAMSYS controller
-============================
-
-The MediaTek camsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt6765-camsys", "syscon"
- - "mediatek,mt6779-camsys", "syscon"
- - "mediatek,mt8183-camsys", "syscon"
-- #clock-cells: Must be 1
-
-The camsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-camsys: camsys@1a000000 {
- compatible = "mediatek,mt8183-camsys", "syscon";
- reg = <0 0x1a000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
deleted file mode 100644
index dce4c9241932..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Mediatek imgsys controller
-============================
-
-The Mediatek imgsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt2701-imgsys", "syscon"
- - "mediatek,mt2712-imgsys", "syscon"
- - "mediatek,mt6765-imgsys", "syscon"
- - "mediatek,mt6779-imgsys", "syscon"
- - "mediatek,mt6797-imgsys", "syscon"
- - "mediatek,mt7623-imgsys", "mediatek,mt2701-imgsys", "syscon"
- - "mediatek,mt8167-imgsys", "syscon"
- - "mediatek,mt8173-imgsys", "syscon"
- - "mediatek,mt8183-imgsys", "syscon"
-- #clock-cells: Must be 1
-
-The imgsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-imgsys: clock-controller@15000000 {
- compatible = "mediatek,mt8173-imgsys", "syscon";
- reg = <0 0x15000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt
deleted file mode 100644
index 2ce889b023d9..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Mediatek ipesys controller
-============================
-
-The Mediatek ipesys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt6779-ipesys", "syscon"
-- #clock-cells: Must be 1
-
-The ipesys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-ipesys: clock-controller@1b000000 {
- compatible = "mediatek,mt6779-ipesys", "syscon";
- reg = <0 0x1b000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt
deleted file mode 100644
index aabc8c5c8ed2..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-Mediatek IPU controller
-============================
-
-The Mediatek ipu controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt8183-ipu_conn", "syscon"
- - "mediatek,mt8183-ipu_adl", "syscon"
- - "mediatek,mt8183-ipu_core0", "syscon"
- - "mediatek,mt8183-ipu_core1", "syscon"
-- #clock-cells: Must be 1
-
-The ipu controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-ipu_conn: syscon@19000000 {
- compatible = "mediatek,mt8183-ipu_conn", "syscon";
- reg = <0 0x19000000 0 0x1000>;
- #clock-cells = <1>;
-};
-
-ipu_adl: syscon@19010000 {
- compatible = "mediatek,mt8183-ipu_adl", "syscon";
- reg = <0 0x19010000 0 0x1000>;
- #clock-cells = <1>;
-};
-
-ipu_core0: syscon@19180000 {
- compatible = "mediatek,mt8183-ipu_core0", "syscon";
- reg = <0 0x19180000 0 0x1000>;
- #clock-cells = <1>;
-};
-
-ipu_core1: syscon@19280000 {
- compatible = "mediatek,mt8183-ipu_core1", "syscon";
- reg = <0 0x19280000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt
deleted file mode 100644
index 2df799cd06a7..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Mediatek jpgdecsys controller
-============================
-
-The Mediatek jpgdecsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be:
- - "mediatek,mt2712-jpgdecsys", "syscon"
-- #clock-cells: Must be 1
-
-The jpgdecsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-jpgdecsys: syscon@19000000 {
- compatible = "mediatek,mt2712-jpgdecsys", "syscon";
- reg = <0 0x19000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt
deleted file mode 100644
index 2b882b7ca72e..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Mediatek mcucfg controller
-============================
-
-The Mediatek mcucfg controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt2712-mcucfg", "syscon"
- - "mediatek,mt8183-mcucfg", "syscon"
-- #clock-cells: Must be 1
-
-The mcucfg controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-mcucfg: syscon@10220000 {
- compatible = "mediatek,mt2712-mcucfg", "syscon";
- reg = <0 0x10220000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt
deleted file mode 100644
index 054424fb64b4..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Mediatek mfgcfg controller
-============================
-
-The Mediatek mfgcfg controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt2712-mfgcfg", "syscon"
- - "mediatek,mt6779-mfgcfg", "syscon"
- - "mediatek,mt8167-mfgcfg", "syscon"
- - "mediatek,mt8183-mfgcfg", "syscon"
-- #clock-cells: Must be 1
-
-The mfgcfg controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-mfgcfg: syscon@13000000 {
- compatible = "mediatek,mt2712-mfgcfg", "syscon";
- reg = <0 0x13000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt
deleted file mode 100644
index 1c671943ce4d..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Mediatek mipi0a (mipi_rx_ana_csi0a) controller
-============================
-
-The Mediatek mipi0a controller provides various clocks
-to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt6765-mipi0a", "syscon"
-- #clock-cells: Must be 1
-
-The mipi0a controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-The mipi0a controller also uses the common power domain from
-Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
-The available power domains are defined in dt-bindings/power/mt*-power.h.
-
-Example:
-
-mipi0a: clock-controller@11c10000 {
- compatible = "mediatek,mt6765-mipi0a", "syscon";
- reg = <0 0x11c10000 0 0x1000>;
- power-domains = <&scpsys MT6765_POWER_DOMAIN_CAM>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt
deleted file mode 100644
index f090147b7f1e..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Mediatek vcodecsys controller
-============================
-
-The Mediatek vcodecsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt6765-vcodecsys", "syscon"
-- #clock-cells: Must be 1
-
-The vcodecsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-The vcodecsys controller also uses the common power domain from
-Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
-The available power domains are defined in dt-bindings/power/mt*-power.h.
-
-Example:
-
-venc_gcon: clock-controller@17000000 {
- compatible = "mediatek,mt6765-vcodecsys", "syscon";
- reg = <0 0x17000000 0 0x10000>;
- power-domains = <&scpsys MT6765_POWER_DOMAIN_VCODEC>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
deleted file mode 100644
index 98195169176a..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Mediatek vdecsys controller
-============================
-
-The Mediatek vdecsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt2701-vdecsys", "syscon"
- - "mediatek,mt2712-vdecsys", "syscon"
- - "mediatek,mt6779-vdecsys", "syscon"
- - "mediatek,mt6797-vdecsys", "syscon"
- - "mediatek,mt7623-vdecsys", "mediatek,mt2701-vdecsys", "syscon"
- - "mediatek,mt8167-vdecsys", "syscon"
- - "mediatek,mt8173-vdecsys", "syscon"
- - "mediatek,mt8183-vdecsys", "syscon"
-- #clock-cells: Must be 1
-
-The vdecsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-vdecsys: clock-controller@16000000 {
- compatible = "mediatek,mt8173-vdecsys", "syscon";
- reg = <0 0x16000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt
deleted file mode 100644
index 3cc299fd7857..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Mediatek vencltsys controller
-============================
-
-The Mediatek vencltsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be:
- - "mediatek,mt8173-vencltsys", "syscon"
-- #clock-cells: Must be 1
-
-The vencltsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-vencltsys: clock-controller@19000000 {
- compatible = "mediatek,mt8173-vencltsys", "syscon";
- reg = <0 0x19000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt
deleted file mode 100644
index 6a6a14e15cd7..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Mediatek vencsys controller
-============================
-
-The Mediatek vencsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt2712-vencsys", "syscon"
- - "mediatek,mt6779-vencsys", "syscon"
- - "mediatek,mt6797-vencsys", "syscon"
- - "mediatek,mt8173-vencsys", "syscon"
- - "mediatek,mt8183-vencsys", "syscon"
-- #clock-cells: Must be 1
-
-The vencsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-vencsys: clock-controller@18000000 {
- compatible = "mediatek,mt8173-vencsys", "syscon";
- reg = <0 0x18000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index f08e13b61172..5cb54d69af0b 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -157,11 +157,18 @@ properties:
- items:
- enum:
+ - wingtech,wt82918hd
+ - const: qcom,msm8929
+
+ - items:
+ - enum:
- huawei,kiwi
- longcheer,l9100
- samsung,a7
- sony,kanuti-tulip
- square,apq8039-t2
+ - wingtech,wt82918
+ - wingtech,wt82918hdhw39
- const: qcom,msm8939
- items:
@@ -228,12 +235,15 @@ properties:
- samsung,grandprimelte
- samsung,gt510
- samsung,gt58
+ - samsung,j3ltetw
- samsung,j5
- samsung,j5x
- samsung,rossa
- samsung,serranove
- thwc,uf896
- thwc,ufi001c
+ - wingtech,wt86518
+ - wingtech,wt86528
- wingtech,wt88047
- yiming,uz801-v3
- const: qcom,msm8916
@@ -250,6 +260,7 @@ properties:
- items:
- enum:
- lg,bullhead
+ - lg,h815
- microsoft,talkman
- xiaomi,libra
- const: qcom,msm8992
@@ -1040,8 +1051,16 @@ properties:
- items:
- enum:
+ - lenovo,thinkpad-t14s
+ - const: qcom,x1e78100
+ - const: qcom,x1e80100
+
+ - items:
+ - enum:
- asus,vivobook-s15
- lenovo,yoga-slim7x
+ - microsoft,romulus13
+ - microsoft,romulus15
- qcom,x1e80100-crd
- qcom,x1e80100-qcp
- const: qcom,x1e80100
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index 1ef09fbfdfaf..687823e58c22 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -96,6 +96,13 @@ properties:
- const: coolpi,pi-cm5
- const: rockchip,rk3588
+ - description: Cool Pi CM5 GenBook
+ items:
+ - enum:
+ - coolpi,pi-cm5-genbook
+ - const: coolpi,pi-cm5
+ - const: rockchip,rk3588
+
- description: Cool Pi 4 Model B
items:
- const: coolpi,pi-4b
@@ -148,6 +155,12 @@ properties:
- const: engicam,px30-core
- const: rockchip,px30
+ - description: Firefly Core-PX30-JD4 on MB-JD4-PX30 baseboard
+ items:
+ - const: firefly,px30-jd4-core-mb
+ - const: firefly,px30-jd4-core
+ - const: rockchip,px30
+
- description: Firefly Firefly-RK3288
items:
- enum:
@@ -216,6 +229,7 @@ properties:
- friendlyarm,nanopi-r2c
- friendlyarm,nanopi-r2c-plus
- friendlyarm,nanopi-r2s
+ - friendlyarm,nanopi-r2s-plus
- const: rockchip,rk3328
- description: FriendlyElec NanoPi4 series boards
@@ -243,9 +257,11 @@ properties:
- friendlyarm,nanopi-r6s
- const: rockchip,rk3588s
- - description: FriendlyElec NanoPC T6
+ - description: FriendlyElec NanoPC T6 series boards
items:
- - const: friendlyarm,nanopc-t6
+ - enum:
+ - friendlyarm,nanopc-t6
+ - friendlyarm,nanopc-t6-lts
- const: rockchip,rk3588
- description: FriendlyElec CM3588-based boards
@@ -255,6 +271,11 @@ properties:
- const: friendlyarm,cm3588
- const: rockchip,rk3588
+ - description: GameForce Ace
+ items:
+ - const: gameforce,ace
+ - const: rockchip,rk3588s
+
- description: GameForce Chi
items:
- const: gameforce,chi
@@ -581,9 +602,19 @@ properties:
- description: Hardkernel Odroid M1
items:
- - const: rockchip,rk3568-odroid-m1
+ - const: hardkernel,odroid-m1
- const: rockchip,rk3568
+ - description: Hardkernel Odroid M1S
+ items:
+ - const: hardkernel,odroid-m1s
+ - const: rockchip,rk3566
+
+ - description: Hardkernel Odroid M2
+ items:
+ - const: hardkernel,odroid-m2
+ - const: rockchip,rk3588s
+
- description: Hugsun X99 TV Box
items:
- const: hugsun,x99
@@ -622,6 +653,11 @@ properties:
- const: leez,p710
- const: rockchip,rk3399
+ - description: LCKFB Taishan Pi RK3566
+ items:
+ - const: lckfb,tspi-rk3566
+ - const: rockchip,rk3566
+
- description: Lunzn FastRhino R66S / R68S
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml b/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml
index b79c81cd9f0e..932f981265cc 100644
--- a/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml
@@ -26,6 +26,7 @@ select:
- rockchip,rk3368-pmu
- rockchip,rk3399-pmu
- rockchip,rk3568-pmu
+ - rockchip,rk3576-pmu
- rockchip,rk3588-pmu
- rockchip,rv1126-pmu
@@ -43,6 +44,7 @@ properties:
- rockchip,rk3368-pmu
- rockchip,rk3399-pmu
- rockchip,rk3568-pmu
+ - rockchip,rk3576-pmu
- rockchip,rk3588-pmu
- rockchip,rv1126-pmu
- const: syscon
diff --git a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
index 58099949e8f3..703d4b574398 100644
--- a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
@@ -54,6 +54,8 @@ properties:
- description: ST STM32MP151 based Boards
items:
- enum:
+ - prt,mecio1r0 # Protonic MECIO1r0
+ - prt,mect1s # Protonic MECT1S
- prt,prtt1a # Protonic PRTT1A
- prt,prtt1c # Protonic PRTT1C
- prt,prtt1s # Protonic PRTT1S
@@ -71,6 +73,12 @@ properties:
- const: dh,stm32mp151a-dhcor-som
- const: st,stm32mp151
+ - description: ST STM32MP153 based Boards
+ items:
+ - enum:
+ - prt,mecio1r1 # Protonic MECIO1r1
+ - const: st,stm32mp153
+
- description: DH STM32MP153 DHCOM SoM based Boards
items:
- const: dh,stm32mp153c-dhcom-drc02
diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml
index 09dc6f424986..4aa15f3668e0 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.yaml
+++ b/Documentation/devicetree/bindings/arm/sunxi.yaml
@@ -61,14 +61,19 @@ properties:
- const: anbernic,rg35xx-2024
- const: allwinner,sun50i-h700
+ - description: Anbernic RG35XX H
+ items:
+ - const: anbernic,rg35xx-h
+ - const: allwinner,sun50i-h700
+
- description: Anbernic RG35XX Plus
items:
- const: anbernic,rg35xx-plus
- const: allwinner,sun50i-h700
- - description: Anbernic RG35XX H
+ - description: Anbernic RG35XX SP
items:
- - const: anbernic,rg35xx-h
+ - const: anbernic,rg35xx-sp
- const: allwinner,sun50i-h700
- description: Amarula A64 Relic
diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml b/Documentation/devicetree/bindings/arm/tegra.yaml
index 8fb4923517d0..2889fd0e6592 100644
--- a/Documentation/devicetree/bindings/arm/tegra.yaml
+++ b/Documentation/devicetree/bindings/arm/tegra.yaml
@@ -128,6 +128,48 @@ properties:
- const: nvidia,tegra132
- const: nvidia,tegra124
- items:
+ - const: google,nyan-blaze-rev10
+ - const: google,nyan-blaze-rev9
+ - const: google,nyan-blaze-rev8
+ - const: google,nyan-blaze-rev7
+ - const: google,nyan-blaze-rev6
+ - const: google,nyan-blaze-rev5
+ - const: google,nyan-blaze-rev4
+ - const: google,nyan-blaze-rev3
+ - const: google,nyan-blaze-rev2
+ - const: google,nyan-blaze-rev1
+ - const: google,nyan-blaze-rev0
+ - const: google,nyan-blaze
+ - const: google,nyan
+ - const: nvidia,tegra124
+ - items:
+ - const: google,nyan-big-rev10
+ - const: google,nyan-big-rev9
+ - const: google,nyan-big-rev8
+ - const: google,nyan-big-rev7
+ - const: google,nyan-big-rev6
+ - const: google,nyan-big-rev5
+ - const: google,nyan-big-rev4
+ - const: google,nyan-big-rev3
+ - const: google,nyan-big-rev2
+ - const: google,nyan-big-rev1
+ - const: google,nyan-big-rev0
+ - const: google,nyan-big
+ - const: google,nyan
+ - const: nvidia,tegra124
+ - items:
+ - const: google,nyan-big-rev7
+ - const: google,nyan-big-rev6
+ - const: google,nyan-big-rev5
+ - const: google,nyan-big-rev4
+ - const: google,nyan-big-rev3
+ - const: google,nyan-big-rev2
+ - const: google,nyan-big-rev1
+ - const: google,nyan-big-rev0
+ - const: google,nyan-big
+ - const: google,nyan
+ - const: nvidia,tegra124
+ - items:
- enum:
- nvidia,darcy
- nvidia,p2371-0000
diff --git a/Documentation/devicetree/bindings/arm/ti/k3.yaml b/Documentation/devicetree/bindings/arm/ti/k3.yaml
index 4d9c5fbb4c26..5df99e361c21 100644
--- a/Documentation/devicetree/bindings/arm/ti/k3.yaml
+++ b/Documentation/devicetree/bindings/arm/ti/k3.yaml
@@ -140,6 +140,7 @@ properties:
- description: K3 J722S SoC and Boards
items:
- enum:
+ - beagle,am67a-beagley-ai
- ti,j722s-evm
- const: ti,j722s
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.yaml b/Documentation/devicetree/bindings/ata/ahci-platform.yaml
index 358617115bb8..ef19468e3022 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.yaml
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.yaml
@@ -30,6 +30,8 @@ select:
- marvell,armada-3700-ahci
- marvell,armada-8k-ahci
- marvell,berlin2q-ahci
+ - qcom,apq8064-ahci
+ - qcom,ipq806x-ahci
- socionext,uniphier-pro4-ahci
- socionext,uniphier-pxs2-ahci
- socionext,uniphier-pxs3-ahci
@@ -45,6 +47,8 @@ properties:
- marvell,armada-8k-ahci
- marvell,berlin2-ahci
- marvell,berlin2q-ahci
+ - qcom,apq8064-ahci
+ - qcom,ipq806x-ahci
- socionext,uniphier-pro4-ahci
- socionext,uniphier-pxs2-ahci
- socionext,uniphier-pxs3-ahci
@@ -64,11 +68,11 @@ properties:
clocks:
minItems: 1
- maxItems: 3
+ maxItems: 5
clock-names:
minItems: 1
- maxItems: 3
+ maxItems: 5
interrupts:
maxItems: 1
@@ -97,6 +101,31 @@ required:
allOf:
- $ref: ahci-common.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,apq8064-ahci
+ - qcom,ipq806x-ahci
+ then:
+ properties:
+ clocks:
+ minItems: 5
+ clock-names:
+ items:
+ - const: slave_iface
+ - const: iface
+ - const: core
+ - const: rxoob
+ - const: pmalive
+ required:
+ - phys
+ - phy-names
+ - clocks
+ - clock-names
+
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/ata/imx-sata.yaml b/Documentation/devicetree/bindings/ata/imx-sata.yaml
index 68ffb97ddc9b..f4eb3550a096 100644
--- a/Documentation/devicetree/bindings/ata/imx-sata.yaml
+++ b/Documentation/devicetree/bindings/ata/imx-sata.yaml
@@ -19,6 +19,7 @@ properties:
- fsl,imx53-ahci
- fsl,imx6q-ahci
- fsl,imx6qp-ahci
+ - fsl,imx8qm-ahci
reg:
maxItems: 1
@@ -27,12 +28,14 @@ properties:
maxItems: 1
clocks:
+ minItems: 2
items:
- description: sata clock
- description: sata reference clock
- description: ahb clock
clock-names:
+ minItems: 2
items:
- const: sata
- const: sata_ref
@@ -58,6 +61,25 @@ properties:
$ref: /schemas/types.yaml#/definitions/flag
description: if present, disable spread-spectrum clocking on the SATA link.
+ phys:
+ items:
+ - description: phandle to SATA PHY.
+ Since "REXT" pin is only present for first lane of i.MX8QM PHY, it's
+ calibration result will be stored, passed through second lane, and
+ shared with all three lanes PHY. The first two lanes PHY are used as
+ calibration PHYs, although only the third lane PHY is used by SATA.
+ - description: phandle to the first lane PHY of i.MX8QM.
+ - description: phandle to the second lane PHY of i.MX8QM.
+
+ phy-names:
+ items:
+ - const: sata-phy
+ - const: cali-phy0
+ - const: cali-phy1
+
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
@@ -65,6 +87,31 @@ required:
- clocks
- clock-names
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx53-ahci
+ - fsl,imx6q-ahci
+ - fsl,imx6qp-ahci
+ then:
+ properties:
+ clock-names:
+ minItems: 3
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx8qm-ahci
+ then:
+ properties:
+ clock-names:
+ minItems: 2
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/ata/qcom-sata.txt b/Documentation/devicetree/bindings/ata/qcom-sata.txt
deleted file mode 100644
index 094de91cd9fd..000000000000
--- a/Documentation/devicetree/bindings/ata/qcom-sata.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-* Qualcomm AHCI SATA Controller
-
-SATA nodes are defined to describe on-chip Serial ATA controllers.
-Each SATA controller should have its own node.
-
-Required properties:
-- compatible : compatible list, must contain "generic-ahci"
-- interrupts : <interrupt mapping for SATA IRQ>
-- reg : <registers mapping>
-- phys : Must contain exactly one entry as specified
- in phy-bindings.txt
-- phy-names : Must be "sata-phy"
-
-Required properties for "qcom,ipq806x-ahci" compatible:
-- clocks : Must contain an entry for each entry in clock-names.
-- clock-names : Shall be:
- "slave_iface" - Fabric port AHB clock for SATA
- "iface" - AHB clock
- "core" - core clock
- "rxoob" - RX out-of-band clock
- "pmalive" - Power Module Alive clock
-- assigned-clocks : Shall be:
- SATA_RXOOB_CLK
- SATA_PMALIVE_CLK
-- assigned-clock-rates : Shall be:
- 100Mhz (100000000) for SATA_RXOOB_CLK
- 100Mhz (100000000) for SATA_PMALIVE_CLK
-
-Example:
- sata@29000000 {
- compatible = "qcom,ipq806x-ahci", "generic-ahci";
- reg = <0x29000000 0x180>;
-
- interrupts = <0 209 0x0>;
-
- clocks = <&gcc SFAB_SATA_S_H_CLK>,
- <&gcc SATA_H_CLK>,
- <&gcc SATA_A_CLK>,
- <&gcc SATA_RXOOB_CLK>,
- <&gcc SATA_PMALIVE_CLK>;
- clock-names = "slave_iface", "iface", "core",
- "rxoob", "pmalive";
- assigned-clocks = <&gcc SATA_RXOOB_CLK>, <&gcc SATA_PMALIVE_CLK>;
- assigned-clock-rates = <100000000>, <100000000>;
-
- phys = <&sata_phy>;
- phy-names = "sata-phy";
- };
diff --git a/Documentation/devicetree/bindings/board/fsl,bcsr.yaml b/Documentation/devicetree/bindings/board/fsl,bcsr.yaml
new file mode 100644
index 000000000000..df3dd8399671
--- /dev/null
+++ b/Documentation/devicetree/bindings/board/fsl,bcsr.yaml
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/board/fsl,bcsr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Board Control and Status
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,mpc8360mds-bcsr
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ board@f8000000 {
+ compatible = "fsl,mpc8360mds-bcsr";
+ reg = <0xf8000000 0x8000>;
+ };
+
diff --git a/Documentation/devicetree/bindings/board/fsl,fpga-qixis-i2c.yaml b/Documentation/devicetree/bindings/board/fsl,fpga-qixis-i2c.yaml
new file mode 100644
index 000000000000..28b37772fb65
--- /dev/null
+++ b/Documentation/devicetree/bindings/board/fsl,fpga-qixis-i2c.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/board/fsl,fpga-qixis-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale on-board FPGA connected on I2C bus
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - fsl,bsc9132qds-fpga
+ - const: fsl,fpga-qixis-i2c
+ - items:
+ - enum:
+ - fsl,ls1028aqds-fpga
+ - fsl,lx2160aqds-fpga
+ - const: fsl,fpga-qixis-i2c
+ - const: simple-mfd
+
+ interrupts:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+ mux-controller:
+ $ref: /schemas/mux/reg-mux.yaml
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ board-control@66 {
+ compatible = "fsl,bsc9132qds-fpga", "fsl,fpga-qixis-i2c";
+ reg = <0x66>;
+ };
+ };
+
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ board-control@66 {
+ compatible = "fsl,ls1028aqds-fpga", "fsl,fpga-qixis-i2c",
+ "simple-mfd";
+ reg = <0x66>;
+
+ mux-controller {
+ compatible = "reg-mux";
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x54 0xf0>; /* 0: reg 0x54, bits 7:4 */
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/board/fsl,fpga-qixis.yaml b/Documentation/devicetree/bindings/board/fsl,fpga-qixis.yaml
new file mode 100644
index 000000000000..5a3cd431ef6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/board/fsl,fpga-qixis.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/board/fsl,fpga-qixis.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale on-board FPGA/CPLD
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: fsl,p1022ds-fpga
+ - const: fsl,fpga-ngpixis
+ - items:
+ - enum:
+ - fsl,ls1088aqds-fpga
+ - fsl,ls1088ardb-fpga
+ - fsl,ls2080aqds-fpga
+ - fsl,ls2080ardb-fpga
+ - const: fsl,fpga-qixis
+ - items:
+ - enum:
+ - fsl,ls1043aqds-fpga
+ - fsl,ls1043ardb-fpga
+ - fsl,ls1046aqds-fpga
+ - fsl,ls1046ardb-fpga
+ - fsl,ls208xaqds-fpga
+ - const: fsl,fpga-qixis
+ - const: simple-mfd
+ - enum:
+ - fsl,ls1043ardb-cpld
+ - fsl,ls1046ardb-cpld
+ - fsl,t1040rdb-cpld
+ - fsl,t1042rdb-cpld
+ - fsl,t1042rdb_pi-cpld
+
+ interrupts:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges:
+ maxItems: 1
+
+patternProperties:
+ '^mdio-mux@[a-f0-9,]+$':
+ $ref: /schemas/net/mdio-mux-mmioreg.yaml
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ board-control@3 {
+ compatible = "fsl,p1022ds-fpga", "fsl,fpga-ngpixis";
+ reg = <3 0x30>;
+ interrupt-parent = <&mpic>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW 0 0>;
+ };
+
+ - |
+ board-control@3 {
+ compatible = "fsl,ls2080ardb-fpga", "fsl,fpga-qixis";
+ reg = <0x3 0x10000>;
+ };
+
diff --git a/Documentation/devicetree/bindings/board/fsl-board.txt b/Documentation/devicetree/bindings/board/fsl-board.txt
deleted file mode 100644
index 9cde57015921..000000000000
--- a/Documentation/devicetree/bindings/board/fsl-board.txt
+++ /dev/null
@@ -1,81 +0,0 @@
-Freescale Reference Board Bindings
-
-This document describes device tree bindings for various devices that
-exist on some Freescale reference boards.
-
-* Board Control and Status (BCSR)
-
-Required properties:
-
- - compatible : Should be "fsl,<board>-bcsr"
- - reg : Offset and length of the register set for the device
-
-Example:
-
- bcsr@f8000000 {
- compatible = "fsl,mpc8360mds-bcsr";
- reg = <f8000000 8000>;
- };
-
-* Freescale on-board FPGA
-
-This is the memory-mapped registers for on board FPGA.
-
-Required properties:
-- compatible: should be a board-specific string followed by a string
- indicating the type of FPGA. Example:
- "fsl,<board>-fpga", "fsl,fpga-pixis", or
- "fsl,<board>-fpga", "fsl,fpga-qixis"
-- reg: should contain the address and the length of the FPGA register set.
-
-Optional properties:
-- interrupts: should specify event (wakeup) IRQ.
-
-Example (P1022DS):
-
- board-control@3,0 {
- compatible = "fsl,p1022ds-fpga", "fsl,fpga-ngpixis";
- reg = <3 0 0x30>;
- interrupt-parent = <&mpic>;
- interrupts = <8 8 0 0>;
- };
-
-Example (LS2080A-RDB):
-
- cpld@3,0 {
- compatible = "fsl,ls2080ardb-fpga", "fsl,fpga-qixis";
- reg = <0x3 0 0x10000>;
- };
-
-* Freescale on-board FPGA connected on I2C bus
-
-Some Freescale boards like BSC9132QDS have on board FPGA connected on
-the i2c bus.
-
-Required properties:
-- compatible: Should be a board-specific string followed by a string
- indicating the type of FPGA. Example:
- "fsl,<board>-fpga", "fsl,fpga-qixis-i2c"
-- reg: Should contain the address of the FPGA
-
-Example:
- fpga: fpga@66 {
- compatible = "fsl,bsc9132qds-fpga", "fsl,fpga-qixis-i2c";
- reg = <0x66>;
- };
-
-* Freescale on-board CPLD
-
-Some Freescale boards like T1040RDB have an on board CPLD connected.
-
-Required properties:
-- compatible: Should be a board-specific string like "fsl,<board>-cpld"
- Example:
- "fsl,t1040rdb-cpld", "fsl,t1042rdb-cpld", "fsl,t1042rdb_pi-cpld"
-- reg: should describe CPLD registers
-
-Example:
- cpld@3,0 {
- compatible = "fsl,t1040rdb-cpld";
- reg = <3 0 0x300>;
- };
diff --git a/Documentation/devicetree/bindings/bus/qcom,ebi2.txt b/Documentation/devicetree/bindings/bus/qcom,ebi2.txt
deleted file mode 100644
index 5058aa2c63b2..000000000000
--- a/Documentation/devicetree/bindings/bus/qcom,ebi2.txt
+++ /dev/null
@@ -1,138 +0,0 @@
-Qualcomm External Bus Interface 2 (EBI2)
-
-The EBI2 contains two peripheral blocks: XMEM and LCDC. The XMEM handles any
-external memory (such as NAND or other memory-mapped peripherals) whereas
-LCDC handles LCD displays.
-
-As it says it connects devices to an external bus interface, meaning address
-lines (up to 9 address lines so can only address 1KiB external memory space),
-data lines (16 bits), OE (output enable), ADV (address valid, used on some
-NOR flash memories), WE (write enable). This on top of 6 different chip selects
-(CS0 thru CS5) so that in theory 6 different devices can be connected.
-
-Apparently this bus is clocked at 64MHz. It has dedicated pins on the package
-and the bus can only come out on these pins, however if some of the pins are
-unused they can be left unconnected or remuxed to be used as GPIO or in some
-cases other orthogonal functions as well.
-
-Also CS1 and CS2 has -A and -B signals. Why they have that is unclear to me.
-
-The chip selects have the following memory range assignments. This region of
-memory is referred to as "Chip Peripheral SS FPB0" and is 168MB big.
-
-Chip Select Physical address base
-CS0 GPIO134 0x1a800000-0x1b000000 (8MB)
-CS1 GPIO39 (A) / GPIO123 (B) 0x1b000000-0x1b800000 (8MB)
-CS2 GPIO40 (A) / GPIO124 (B) 0x1b800000-0x1c000000 (8MB)
-CS3 GPIO133 0x1d000000-0x25000000 (128 MB)
-CS4 GPIO132 0x1c800000-0x1d000000 (8MB)
-CS5 GPIO131 0x1c000000-0x1c800000 (8MB)
-
-The APQ8060 Qualcomm Application Processor User Guide, 80-N7150-14 Rev. A,
-August 6, 2012 contains some incomplete documentation of the EBI2.
-
-FIXME: the manual mentions "write precharge cycles" and "precharge cycles".
-We have not been able to figure out which bit fields these correspond to
-in the hardware, or what valid values exist. The current hypothesis is that
-this is something just used on the FAST chip selects and that the SLOW
-chip selects are understood fully. There is also a "byte device enable"
-flag somewhere for 8bit memories.
-
-FIXME: The chipselects have SLOW and FAST configuration registers. It's a bit
-unclear what this means, if they are mutually exclusive or can be used
-together, or if some chip selects are hardwired to be FAST and others are SLOW
-by design.
-
-The XMEM registers are totally undocumented but could be partially decoded
-because the Cypress AN49576 Antioch Westbridge apparently has suspiciously
-similar register layout, see: http://www.cypress.com/file/105771/download
-
-Required properties:
-- compatible: should be one of:
- "qcom,msm8660-ebi2"
- "qcom,apq8060-ebi2"
-- #address-cells: should be <2>: the first cell is the chipselect,
- the second cell is the offset inside the memory range
-- #size-cells: should be <1>
-- ranges: should be set to:
- ranges = <0 0x0 0x1a800000 0x00800000>,
- <1 0x0 0x1b000000 0x00800000>,
- <2 0x0 0x1b800000 0x00800000>,
- <3 0x0 0x1d000000 0x08000000>,
- <4 0x0 0x1c800000 0x00800000>,
- <5 0x0 0x1c000000 0x00800000>;
-- reg: two ranges of registers: EBI2 config and XMEM config areas
-- reg-names: should be "ebi2", "xmem"
-- clocks: two clocks, EBI_2X and EBI
-- clock-names: should be "ebi2x", "ebi2"
-
-Optional subnodes:
-- Nodes inside the EBI2 will be considered device nodes.
-
-The following optional properties are properties that can be tagged onto
-any device subnode. We are assuming that there can be only ONE device per
-chipselect subnode, else the properties will become ambiguous.
-
-Optional properties arrays for SLOW chip selects:
-- qcom,xmem-recovery-cycles: recovery cycles is the time the memory continues to
- drive the data bus after OE is de-asserted, in order to avoid contention on
- the data bus. They are inserted when reading one CS and switching to another
- CS or read followed by write on the same CS. Valid values 0 thru 15. Minimum
- value is actually 1, so a value of 0 will still yield 1 recovery cycle.
-- qcom,xmem-write-hold-cycles: write hold cycles, these are extra cycles
- inserted after every write minimum 1. The data out is driven from the time
- WE is asserted until CS is asserted. With a hold of 1 (value = 0), the CS
- stays active for 1 extra cycle etc. Valid values 0 thru 15.
-- qcom,xmem-write-delta-cycles: initial latency for write cycles inserted for
- the first write to a page or burst memory. Valid values 0 thru 255.
-- qcom,xmem-read-delta-cycles: initial latency for read cycles inserted for the
- first read to a page or burst memory. Valid values 0 thru 255.
-- qcom,xmem-write-wait-cycles: number of wait cycles for every write access, 0=1
- cycle. Valid values 0 thru 15.
-- qcom,xmem-read-wait-cycles: number of wait cycles for every read access, 0=1
- cycle. Valid values 0 thru 15.
-
-Optional properties arrays for FAST chip selects:
-- qcom,xmem-address-hold-enable: this is a boolean property stating that we
- shall hold the address for an extra cycle to meet hold time requirements
- with ADV assertion.
-- qcom,xmem-adv-to-oe-recovery-cycles: the number of cycles elapsed before an OE
- assertion, with respect to the cycle where ADV (address valid) is asserted.
- 2 means 2 cycles between ADV and OE. Valid values 0, 1, 2 or 3.
-- qcom,xmem-read-hold-cycles: the length in cycles of the first segment of a
- read transfer. For a single read transfer this will be the time from CS
- assertion to OE assertion. Valid values 0 thru 15.
-
-
-Example:
-
-ebi2@1a100000 {
- compatible = "qcom,apq8060-ebi2";
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0 0x0 0x1a800000 0x00800000>,
- <1 0x0 0x1b000000 0x00800000>,
- <2 0x0 0x1b800000 0x00800000>,
- <3 0x0 0x1d000000 0x08000000>,
- <4 0x0 0x1c800000 0x00800000>,
- <5 0x0 0x1c000000 0x00800000>;
- reg = <0x1a100000 0x1000>, <0x1a110000 0x1000>;
- reg-names = "ebi2", "xmem";
- clocks = <&gcc EBI2_2X_CLK>, <&gcc EBI2_CLK>;
- clock-names = "ebi2x", "ebi2";
- /* Make sure to set up the pin control for the EBI2 */
- pinctrl-names = "default";
- pinctrl-0 = <&foo_ebi2_pins>;
-
- foo-ebi2@2,0 {
- compatible = "foo";
- reg = <2 0x0 0x100>;
- (...)
- qcom,xmem-recovery-cycles = <0>;
- qcom,xmem-write-hold-cycles = <3>;
- qcom,xmem-write-delta-cycles = <31>;
- qcom,xmem-read-delta-cycles = <28>;
- qcom,xmem-write-wait-cycles = <9>;
- qcom,xmem-read-wait-cycles = <9>;
- };
-};
diff --git a/Documentation/devicetree/bindings/bus/qcom,ebi2.yaml b/Documentation/devicetree/bindings/bus/qcom,ebi2.yaml
new file mode 100644
index 000000000000..1b1fb3538e6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/qcom,ebi2.yaml
@@ -0,0 +1,239 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bus/qcom,ebi2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm External Bus Interface 2 (EBI2)
+
+description: |
+ The EBI2 contains two peripheral blocks: XMEM and LCDC. The XMEM handles any
+ external memory (such as NAND or other memory-mapped peripherals) whereas
+ LCDC handles LCD displays.
+
+ As it says it connects devices to an external bus interface, meaning address
+ lines (up to 9 address lines so can only address 1KiB external memory space),
+ data lines (16 bits), OE (output enable), ADV (address valid, used on some
+ NOR flash memories), WE (write enable). This on top of 6 different chip selects
+ (CS0 thru CS5) so that in theory 6 different devices can be connected.
+
+ Apparently this bus is clocked at 64MHz. It has dedicated pins on the package
+ and the bus can only come out on these pins, however if some of the pins are
+ unused they can be left unconnected or remuxed to be used as GPIO or in some
+ cases other orthogonal functions as well.
+
+ Also CS1 and CS2 has -A and -B signals. Why they have that is unclear to me.
+
+ The chip selects have the following memory range assignments. This region of
+ memory is referred to as "Chip Peripheral SS FPB0" and is 168MB big.
+
+ Chip Select Physical address base
+ CS0 GPIO134 0x1a800000-0x1b000000 (8MB)
+ CS1 GPIO39 (A) / GPIO123 (B) 0x1b000000-0x1b800000 (8MB)
+ CS2 GPIO40 (A) / GPIO124 (B) 0x1b800000-0x1c000000 (8MB)
+ CS3 GPIO133 0x1d000000-0x25000000 (128 MB)
+ CS4 GPIO132 0x1c800000-0x1d000000 (8MB)
+ CS5 GPIO131 0x1c000000-0x1c800000 (8MB)
+
+ The APQ8060 Qualcomm Application Processor User Guide, 80-N7150-14 Rev. A,
+ August 6, 2012 contains some incomplete documentation of the EBI2.
+
+ FIXME: the manual mentions "write precharge cycles" and "precharge cycles".
+ We have not been able to figure out which bit fields these correspond to
+ in the hardware, or what valid values exist. The current hypothesis is that
+ this is something just used on the FAST chip selects and that the SLOW
+ chip selects are understood fully. There is also a "byte device enable"
+ flag somewhere for 8bit memories.
+
+ FIXME: The chipselects have SLOW and FAST configuration registers. It's a bit
+ unclear what this means, if they are mutually exclusive or can be used
+ together, or if some chip selects are hardwired to be FAST and others are SLOW
+ by design.
+
+ The XMEM registers are totally undocumented but could be partially decoded
+ because the Cypress AN49576 Antioch Westbridge apparently has suspiciously
+ similar register layout, see: http://www.cypress.com/file/105771/download
+
+maintainers:
+ - Bjorn Andersson <andersson@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - qcom,apq8060-ebi2
+ - qcom,msm8660-ebi2
+
+ reg:
+ items:
+ - description: EBI2 config region
+ - description: XMEM config region
+
+ reg-names:
+ items:
+ - const: ebi2
+ - const: xmem
+
+ ranges: true
+
+ clocks:
+ items:
+ - description: EBI_2X clock
+ - description: EBI clock
+
+ clock-names:
+ items:
+ - const: ebi2x
+ - const: ebi2
+
+ '#address-cells':
+ const: 2
+
+ '#size-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - ranges
+ - clocks
+ - clock-names
+ - '#address-cells'
+ - '#size-cells'
+
+patternProperties:
+ "^.*@[0-5],[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+ properties:
+ reg:
+ maxItems: 1
+
+ # SLOW chip selects
+ qcom,xmem-recovery-cycles:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ The time the memory continues to drive the data bus after OE
+ is de-asserted, in order to avoid contention on the data bus.
+ They are inserted when reading one CS and switching to another
+ CS or read followed by write on the same CS. Minimum value is
+ actually 1, so a value of 0 will still yield 1 recovery cycle.
+ minimum: 0
+ maximum: 15
+
+ qcom,xmem-write-hold-cycles:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ The extra cycles inserted after every write minimum 1. The
+ data out is driven from the time WE is asserted until CS is
+ asserted. With a hold of 1 (value = 0), the CS stays active
+ for 1 extra cycle, etc.
+ minimum: 0
+ maximum: 15
+
+ qcom,xmem-write-delta-cycles:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ The initial latency for write cycles inserted for the first
+ write to a page or burst memory.
+ minimum: 0
+ maximum: 255
+
+ qcom,xmem-read-delta-cycles:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ The initial latency for read cycles inserted for the first
+ read to a page or burst memory.
+ minimum: 0
+ maximum: 255
+
+ qcom,xmem-write-wait-cycles:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ The number of wait cycles for every write access.
+ minimum: 0
+ maximum: 15
+
+ qcom,xmem-read-wait-cycles:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ The number of wait cycles for every read access.
+ minimum: 0
+ maximum: 15
+
+
+ # FAST chip selects
+ qcom,xmem-address-hold-enable:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ Holds the address for an extra cycle to meet hold time
+ requirements with ADV assertion, when set to 1.
+ enum: [ 0, 1 ]
+
+ qcom,xmem-adv-to-oe-recovery-cycles:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ The number of cycles elapsed before an OE assertion, with
+ respect to the cycle where ADV (address valid) is asserted.
+ minimum: 0
+ maximum: 3
+
+ qcom,xmem-read-hold-cycles:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ The length in cycles of the first segment of a read transfer.
+ For a single read transfer this will be the time from CS
+ assertion to OE assertion.
+ minimum: 0
+ maximum: 15
+
+ required:
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-msm8660.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ external-bus@1a100000 {
+ compatible = "qcom,msm8660-ebi2";
+ reg = <0x1a100000 0x1000>, <0x1a110000 0x1000>;
+ reg-names = "ebi2", "xmem";
+ ranges = <0 0x0 0x1a800000 0x00800000>,
+ <1 0x0 0x1b000000 0x00800000>,
+ <2 0x0 0x1b800000 0x00800000>,
+ <3 0x0 0x1d000000 0x08000000>,
+ <4 0x0 0x1c800000 0x00800000>,
+ <5 0x0 0x1c000000 0x00800000>;
+
+ clocks = <&gcc EBI2_2X_CLK>, <&gcc EBI2_CLK>;
+ clock-names = "ebi2x", "ebi2";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ ethernet@2,0 {
+ compatible = "smsc,lan9221", "smsc,lan9115";
+ reg = <2 0x0 0x100>;
+
+ interrupts-extended = <&pm8058_gpio 7 IRQ_TYPE_EDGE_FALLING>,
+ <&tlmm 29 IRQ_TYPE_EDGE_RISING>;
+ reset-gpios = <&tlmm 30 GPIO_ACTIVE_LOW>;
+
+ phy-mode = "mii";
+ reg-io-width = <2>;
+ smsc,force-external-phy;
+ smsc,irq-push-pull;
+
+ /* SLOW chipselect config */
+ qcom,xmem-recovery-cycles = <0>;
+ qcom,xmem-write-hold-cycles = <3>;
+ qcom,xmem-write-delta-cycles = <31>;
+ qcom,xmem-read-delta-cycles = <28>;
+ qcom,xmem-write-wait-cycles = <9>;
+ qcom,xmem-read-wait-cycles = <9>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/amlogic,c3-pll-clkc.yaml b/Documentation/devicetree/bindings/clock/amlogic,c3-pll-clkc.yaml
index 43de3c6fc1cf..700865cc9792 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,c3-pll-clkc.yaml
+++ b/Documentation/devicetree/bindings/clock/amlogic,c3-pll-clkc.yaml
@@ -24,11 +24,13 @@ properties:
items:
- description: input top pll
- description: input mclk pll
+ - description: input fix pll
clock-names:
items:
- const: top
- const: mclk
+ - const: fix
"#clock-cells":
const: 1
@@ -52,8 +54,9 @@ examples:
compatible = "amlogic,c3-pll-clkc";
reg = <0x0 0x8000 0x0 0x1a4>;
clocks = <&scmi_clk 2>,
- <&scmi_clk 5>;
- clock-names = "top", "mclk";
+ <&scmi_clk 5>,
+ <&scmi_clk 12>;
+ clock-names = "top", "mclk", "fix";
#clock-cells = <1>;
};
};
diff --git a/Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml b/Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml
index c1bdcd9058ed..c9eb60776b4d 100644
--- a/Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml
+++ b/Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml
@@ -42,6 +42,7 @@ properties:
- atmel,sama5d3-pmc
- atmel,sama5d4-pmc
- microchip,sam9x60-pmc
+ - microchip,sam9x7-pmc
- microchip,sama7g5-pmc
- const: syscon
@@ -88,6 +89,7 @@ allOf:
contains:
enum:
- microchip,sam9x60-pmc
+ - microchip,sam9x7-pmc
- microchip,sama7g5-pmc
then:
properties:
diff --git a/Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml b/Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml
index 7be29877e6d2..c2283cd07f05 100644
--- a/Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml
+++ b/Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml
@@ -18,7 +18,9 @@ properties:
- atmel,sama5d4-sckc
- microchip,sam9x60-sckc
- items:
- - const: microchip,sama7g5-sckc
+ - enum:
+ - microchip,sam9x7-sckc
+ - microchip,sama7g5-sckc
- const: microchip,sam9x60-sckc
reg:
diff --git a/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml
index bd4cefbb1244..30252c95700c 100644
--- a/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml
+++ b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml
@@ -134,9 +134,13 @@ properties:
"#reset-cells":
const: 1
- clocks: true
+ clocks:
+ minItems: 3
+ maxItems: 4
- clock-names: true
+ clock-names:
+ minItems: 3
+ maxItems: 4
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml b/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml
index 59de125647ec..ccff74eda9fb 100644
--- a/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml
+++ b/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml
@@ -67,9 +67,9 @@ properties:
minItems: 1
maxItems: 19
- clocks: true
- assigned-clocks: true
- assigned-clock-parents: true
+ clocks:
+ minItems: 1
+ maxItems: 19
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
index a2c6eea9871d..8b400da05fbe 100644
--- a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
+++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
@@ -126,8 +126,6 @@ required:
- compatible
- reg
- '#clock-cells'
- - idt,shutdown
- - idt,output-enable-active
allOf:
- if:
diff --git a/Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml b/Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml
index 0a6dc1a6e122..6588a17a7d9a 100644
--- a/Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml
+++ b/Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml
@@ -44,6 +44,9 @@ properties:
ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mp-clock.h
for the full list of i.MX8MP IMX8MP_CLK_AUDIOMIX_ clock IDs.
+ '#reset-cells':
+ const: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml b/Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml
index 685535846cbb..db5f48e4dd15 100644
--- a/Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml
@@ -35,7 +35,7 @@ properties:
- mediatek,mt2701-apmixedsys
- mediatek,mt2712-apmixedsys
- mediatek,mt6765-apmixedsys
- - mediatek,mt6779-apmixedsys
+ - mediatek,mt6779-apmixed
- mediatek,mt6795-apmixedsys
- mediatek,mt7629-apmixedsys
- mediatek,mt8167-apmixedsys
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.yaml b/Documentation/devicetree/bindings/clock/mediatek,infracfg.yaml
index 230b5188a88d..252c46d316ee 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,infracfg.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,infracfg.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,infracfg.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Infrastructure System Configuration Controller
diff --git a/Documentation/devicetree/bindings/clock/mediatek,mt6795-sys-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt6795-sys-clock.yaml
deleted file mode 100644
index 378b761237d3..000000000000
--- a/Documentation/devicetree/bindings/clock/mediatek,mt6795-sys-clock.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/clock/mediatek,mt6795-sys-clock.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: MediaTek System Clock Controller for MT6795
-
-maintainers:
- - AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
- - Chun-Jie Chen <chun-jie.chen@mediatek.com>
-
-description:
- The Mediatek system clock controller provides various clocks and system
- configuration like reset and bus protection on MT6795.
-
-properties:
- compatible:
- items:
- - enum:
- - mediatek,mt6795-apmixedsys
- - mediatek,mt6795-infracfg
- - mediatek,mt6795-pericfg
- - mediatek,mt6795-topckgen
- - const: syscon
-
- reg:
- maxItems: 1
-
- '#clock-cells':
- const: 1
-
- '#reset-cells':
- const: 1
-
-required:
- - compatible
- - reg
- - '#clock-cells'
-
-additionalProperties: false
-
-examples:
- - |
- soc {
- #address-cells = <2>;
- #size-cells = <2>;
-
- topckgen: clock-controller@10000000 {
- compatible = "mediatek,mt6795-topckgen", "syscon";
- reg = <0 0x10000000 0 0x1000>;
- #clock-cells = <1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8186-clock.yaml
index 7cd14b163abe..f4e58bfa504f 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,mt8186-clock.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8186-clock.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,mt8186-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Functional Clock Controller for MT8186
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8186-sys-clock.yaml
index 64c769416690..1c446fbc5108 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,mt8186-sys-clock.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8186-sys-clock.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,mt8186-sys-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek System Clock Controller for MT8186
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8192-clock.yaml
index dff4c8e8fd4b..b8d690e28bdc 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,mt8192-clock.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8192-clock.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,mt8192-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Functional Clock Controller for MT8192
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8192-sys-clock.yaml
index 8d608fddf3f9..bf8c9aacdf1e 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,mt8192-sys-clock.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8192-sys-clock.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,mt8192-sys-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek System Clock Controller for MT8192
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8195-clock.yaml
index d17164b0b13e..fcc963aff087 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,mt8195-clock.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8195-clock.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,mt8195-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Functional Clock Controller for MT8195
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8195-sys-clock.yaml
index 066c9b3d6ac9..69f096eb168d 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,mt8195-sys-clock.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8195-sys-clock.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,mt8195-sys-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek System Clock Controller for MT8195
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml b/Documentation/devicetree/bindings/clock/mediatek,pericfg.yaml
index 33c94c491828..2f06baecfd23 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,pericfg.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,pericfg.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,pericfg.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Peripheral Configuration Controller
diff --git a/Documentation/devicetree/bindings/clock/mediatek,syscon.yaml b/Documentation/devicetree/bindings/clock/mediatek,syscon.yaml
new file mode 100644
index 000000000000..10483e26878f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/mediatek,syscon.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/mediatek,syscon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek Clock controller syscon's
+
+maintainers:
+ - Matthias Brugger <matthias.bgg@gmail.com>
+ - AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+
+description:
+ The MediaTek clock controller syscon's provide various clocks to the system.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - mediatek,mt2701-bdpsys
+ - mediatek,mt2701-imgsys
+ - mediatek,mt2701-vdecsys
+ - mediatek,mt2712-bdpsys
+ - mediatek,mt2712-imgsys
+ - mediatek,mt2712-jpgdecsys
+ - mediatek,mt2712-mcucfg
+ - mediatek,mt2712-mfgcfg
+ - mediatek,mt2712-vdecsys
+ - mediatek,mt2712-vencsys
+ - mediatek,mt6765-camsys
+ - mediatek,mt6765-imgsys
+ - mediatek,mt6765-mipi0a
+ - mediatek,mt6765-vcodecsys
+ - mediatek,mt6779-camsys
+ - mediatek,mt6779-imgsys
+ - mediatek,mt6779-ipesys
+ - mediatek,mt6779-mfgcfg
+ - mediatek,mt6779-vdecsys
+ - mediatek,mt6779-vencsys
+ - mediatek,mt6797-imgsys
+ - mediatek,mt6797-vdecsys
+ - mediatek,mt6797-vencsys
+ - mediatek,mt8167-imgsys
+ - mediatek,mt8167-mfgcfg
+ - mediatek,mt8167-vdecsys
+ - mediatek,mt8173-imgsys
+ - mediatek,mt8173-vdecsys
+ - mediatek,mt8173-vencltsys
+ - mediatek,mt8173-vencsys
+ - mediatek,mt8183-camsys
+ - mediatek,mt8183-imgsys
+ - mediatek,mt8183-ipu_conn
+ - mediatek,mt8183-ipu_adl
+ - mediatek,mt8183-ipu_core0
+ - mediatek,mt8183-ipu_core1
+ - mediatek,mt8183-mcucfg
+ - mediatek,mt8183-mfgcfg
+ - mediatek,mt8183-vdecsys
+ - mediatek,mt8183-vencsys
+ - const: syscon
+ - items:
+ - const: mediatek,mt7623-bdpsys
+ - const: mediatek,mt2701-bdpsys
+ - const: syscon
+ - items:
+ - const: mediatek,mt7623-imgsys
+ - const: mediatek,mt2701-imgsys
+ - const: syscon
+ - items:
+ - const: mediatek,mt7623-vdecsys
+ - const: mediatek,mt2701-vdecsys
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@11220000 {
+ compatible = "mediatek,mt2701-bdpsys", "syscon";
+ reg = <0x11220000 0x2000>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml b/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml
index 2dffc02dcd8b..5dc360b2ea4b 100644
--- a/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml
+++ b/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml
@@ -16,6 +16,7 @@ properties:
- nxp,imx95-lvds-csr
- nxp,imx95-display-csr
- nxp,imx95-camera-csr
+ - nxp,imx95-netcmix-blk-ctrl
- nxp,imx95-vpu-csr
- const: syscon
diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.txt b/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.txt
deleted file mode 100644
index 20cbca3f41d8..000000000000
--- a/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-NXP LPC32xx Clock Controller
-
-Required properties:
-- compatible: should be "nxp,lpc3220-clk"
-- reg: should contain clock controller registers location and length
-- #clock-cells: must be 1, the cell holds id of a clock provided by the
- clock controller
-- clocks: phandles of external oscillators, the list must contain one
- 32768 Hz oscillator and may have one optional high frequency oscillator
-- clock-names: list of external oscillator clock names, must contain
- "xtal_32k" and may have optional "xtal"
-
-Examples:
-
- /* System Control Block */
- scb {
- compatible = "simple-bus";
- ranges = <0x0 0x040004000 0x00001000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- clk: clock-controller@0 {
- compatible = "nxp,lpc3220-clk";
- reg = <0x00 0x114>;
- #clock-cells = <1>;
-
- clocks = <&xtal_32k>, <&xtal>;
- clock-names = "xtal_32k", "xtal";
- };
- };
diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.yaml b/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.yaml
new file mode 100644
index 000000000000..16f79616d18a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/nxp,lpc3220-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC32xx Clock Controller
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+properties:
+ compatible:
+ const: nxp,lpc3220-clk
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ minItems: 1
+ items:
+ - description: External 32768 Hz oscillator.
+ - description: Optional high frequency oscillator.
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: xtal_32k
+ - const: xtal
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@0 {
+ compatible = "nxp,lpc3220-clk";
+ reg = <0x00 0x114>;
+ #clock-cells = <1>;
+ clocks = <&xtal_32k>, <&xtal>;
+ clock-names = "xtal_32k", "xtal";
+ };
diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.txt b/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.txt
deleted file mode 100644
index 0aa249409b51..000000000000
--- a/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-NXP LPC32xx USB Clock Controller
-
-Required properties:
-- compatible: should be "nxp,lpc3220-usb-clk"
-- reg: should contain clock controller registers location and length
-- #clock-cells: must be 1, the cell holds id of a clock provided by the
- USB clock controller
-
-Examples:
-
- usb {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- ranges = <0x0 0x31020000 0x00001000>;
-
- usbclk: clock-controller@f00 {
- compatible = "nxp,lpc3220-usb-clk";
- reg = <0xf00 0x100>;
- #clock-cells = <1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.yaml b/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.yaml
new file mode 100644
index 000000000000..10361d2292fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/nxp,lpc3220-usb-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC32xx USB Clock Controller
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+properties:
+ compatible:
+ const: nxp,lpc3220-usb-clk
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@f00 {
+ compatible = "nxp,lpc3220-usb-clk";
+ reg = <0xf00 0x100>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml
index 5ca927a8b1d5..47ceab641a4c 100644
--- a/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml
@@ -21,6 +21,7 @@ properties:
- qcom,ipq6018-a53pll
- qcom,ipq8074-a53pll
- qcom,ipq9574-a73pll
+ - qcom,msm8226-a7pll
- qcom,msm8916-a53pll
- qcom,msm8939-a53pll
@@ -40,6 +41,9 @@ properties:
operating-points-v2: true
+ opp-table:
+ type: object
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/clock/qcom,ipq5332-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,ipq5332-gcc.yaml
index adc30d84fa8f..9193de681de2 100644
--- a/Documentation/devicetree/bindings/clock/qcom,ipq5332-gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,ipq5332-gcc.yaml
@@ -31,6 +31,8 @@ properties:
- description: USB PCIE wrapper pipe clock source
'#power-domain-cells': false
+ '#interconnect-cells':
+ const: 1
required:
- compatible
diff --git a/Documentation/devicetree/bindings/clock/qcom,qcs404-turingcc.yaml b/Documentation/devicetree/bindings/clock/qcom,qcs404-turingcc.yaml
new file mode 100644
index 000000000000..033e010754a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,qcs404-turingcc.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,qcs404-turingcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Turing Clock & Reset Controller on QCS404
+
+maintainers:
+ - Bjorn Andersson <andersson@kernel.org>
+
+properties:
+ compatible:
+ const: qcom,qcs404-turingcc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+ - '#reset-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-qcs404.h>
+ clock-controller@800000 {
+ compatible = "qcom,qcs404-turingcc";
+ reg = <0x00800000 0x30000>;
+ clocks = <&gcc GCC_CDSP_CFG_AHB_CLK>;
+
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml b/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml
index 3665dd30604a..02fcffe93f1a 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml
@@ -139,7 +139,7 @@ examples:
- |
rpm {
rpm-requests {
- compatible = "qcom,rpm-msm8916";
+ compatible = "qcom,rpm-msm8916", "qcom,smd-rpm";
qcom,smd-channels = "rpm_requests";
clock-controller {
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml
index 3326dcd6766c..273d66e245c5 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml
@@ -18,9 +18,16 @@ description: |
properties:
compatible:
- enum:
- - qcom,sc8280xp-lpassaudiocc
- - qcom,sc8280xp-lpasscc
+ oneOf:
+ - enum:
+ - qcom,sc8280xp-lpassaudiocc
+ - qcom,sc8280xp-lpasscc
+ - items:
+ - const: qcom,x1e80100-lpassaudiocc
+ - const: qcom,sc8280xp-lpassaudiocc
+ - items:
+ - const: qcom,x1e80100-lpasscc
+ - const: qcom,sc8280xp-lpasscc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm4450-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm4450-camcc.yaml
new file mode 100644
index 000000000000..f54ce865880d
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,sm4450-camcc.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,sm4450-camcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Camera Clock & Reset Controller on SM4450
+
+maintainers:
+ - Ajit Pandey <quic_ajipan@quicinc.com>
+ - Taniya Das <quic_tdas@quicinc.com>
+
+description: |
+ Qualcomm camera clock control module provides the clocks, resets and power
+ domains on SM4450
+
+ See also:: include/dt-bindings/clock/qcom,sm4450-camcc.h
+
+properties:
+ compatible:
+ const: qcom,sm4450-camcc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Camera AHB clock source from GCC
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+ '#power-domain-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+ - '#reset-cells'
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/clock/qcom,sm4450-gcc.h>
+ clock-controller@ade0000 {
+ compatible = "qcom,sm4450-camcc";
+ reg = <0x0ade0000 0x20000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_CAMERA_AHB_CLK>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm4450-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm4450-dispcc.yaml
new file mode 100644
index 000000000000..2aa05353eff1
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,sm4450-dispcc.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,sm4450-dispcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Display Clock & Reset Controller on SM4450
+
+maintainers:
+ - Ajit Pandey <quic_ajipan@quicinc.com>
+ - Taniya Das <quic_tdas@quicinc.com>
+
+description: |
+ Qualcomm display clock control module provides the clocks, resets and power
+ domains on SM4450
+
+ See also:: include/dt-bindings/clock/qcom,sm4450-dispcc.h
+
+properties:
+ compatible:
+ const: qcom,sm4450-dispcc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Board active XO source
+ - description: Display AHB clock source from GCC
+ - description: sleep clock source
+ - description: Byte clock from DSI PHY0
+ - description: Pixel clock from DSI PHY0
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+ '#power-domain-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+ - '#reset-cells'
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/clock/qcom,sm4450-gcc.h>
+ clock-controller@af00000 {
+ compatible = "qcom,sm4450-dispcc";
+ reg = <0x0af00000 0x20000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&gcc GCC_DISP_AHB_CLK>,
+ <&sleep_clk>,
+ <&dsi0_phy_pll_out_byteclk>,
+ <&dsi0_phy_pll_out_dsiclk>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8150-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8150-camcc.yaml
new file mode 100644
index 000000000000..5e9f62d7866c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8150-camcc.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,sm8150-camcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Camera Clock & Reset Controller on SM8150
+
+maintainers:
+ - Satya Priya Kakitapalli <quic_skakitap@quicinc.com>
+
+description: |
+ Qualcomm camera clock control module provides the clocks, resets and
+ power domains on SM8150.
+
+ See also:: include/dt-bindings/clock/qcom,sm8150-camcc.h
+
+properties:
+ compatible:
+ const: qcom,sm8150-camcc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Camera AHB clock from GCC
+
+ power-domains:
+ maxItems: 1
+ description:
+ A phandle and PM domain specifier for the MMCX power domain.
+
+ required-opps:
+ maxItems: 1
+ description:
+ A phandle to an OPP node describing required MMCX performance point.
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+ '#power-domain-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - power-domains
+ - required-opps
+ - '#clock-cells'
+ - '#reset-cells'
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sm8150.h>
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+ clock-controller@ad00000 {
+ compatible = "qcom,sm8150-camcc";
+ reg = <0x0ad00000 0x10000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_CAMERA_AHB_CLK>;
+ power-domains = <&rpmhpd SM8150_MMCX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml
index f58edfc10f4c..26afbbe65511 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml
@@ -21,9 +21,6 @@ description: |
include/dt-bindings/clock/qcom,sm8650-camcc.h
include/dt-bindings/clock/qcom,x1e80100-camcc.h
-allOf:
- - $ref: qcom,gcc.yaml#
-
properties:
compatible:
enum:
@@ -57,7 +54,21 @@ required:
- compatible
- clocks
- power-domains
- - required-opps
+
+allOf:
+ - $ref: qcom,gcc.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sc8280xp-camcc
+ - qcom,sm8450-camcc
+ - qcom,sm8550-camcc
+ - qcom,x1e80100-camcc
+ then:
+ required:
+ - required-opps
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml
index d10bb002906e..2d2c59aa8c6b 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml
@@ -14,6 +14,7 @@ description: |
domains on Qualcomm SoCs.
See also::
+ include/dt-bindings/clock/qcom,sm4450-gpucc.h
include/dt-bindings/clock/qcom,sm8450-gpucc.h
include/dt-bindings/clock/qcom,sm8550-gpucc.h
include/dt-bindings/reset/qcom,sm8450-gpucc.h
@@ -23,6 +24,7 @@ description: |
properties:
compatible:
enum:
+ - qcom,sm4450-gpucc
- qcom,sm8450-gpucc
- qcom,sm8550-gpucc
- qcom,sm8650-gpucc
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
index b2792b4bb554..9829ba28fe0e 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
@@ -44,11 +44,20 @@ required:
- compatible
- clocks
- power-domains
- - required-opps
- '#power-domain-cells'
allOf:
- $ref: qcom,gcc.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8450-videocc
+ - qcom,sm8550-videocc
+ then:
+ required:
+ - required-opps
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/clock/qcom,turingcc.txt b/Documentation/devicetree/bindings/clock/qcom,turingcc.txt
deleted file mode 100644
index 126517de5f9a..000000000000
--- a/Documentation/devicetree/bindings/clock/qcom,turingcc.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Qualcomm Turing Clock & Reset Controller Binding
-------------------------------------------------
-
-Required properties :
-- compatible: shall contain "qcom,qcs404-turingcc".
-- reg: shall contain base register location and length.
-- clocks: ahb clock for the TuringCC
-- #clock-cells: from common clock binding, shall contain 1.
-- #reset-cells: from common reset binding, shall contain 1.
-
-Example:
- turingcc: clock-controller@800000 {
- compatible = "qcom,qcs404-turingcc";
- reg = <0x00800000 0x30000>;
- clocks = <&gcc GCC_CDSP_CFG_AHB_CLK>;
-
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml
index 9185d101737e..a0e09b7002f0 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml
@@ -32,12 +32,16 @@ properties:
reg:
maxItems: 1
- clocks: true
+ clocks:
+ minItems: 1
+ maxItems: 3
'#clock-cells':
const: 1
- clock-output-names: true
+ clock-output-names:
+ minItems: 3
+ maxItems: 17
renesas,mode:
description: Board-specific settings of the MD_CK* bits on R-Mobile A1
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
index 084259d30232..77ce3615c65a 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
@@ -31,6 +31,7 @@ properties:
- renesas,r8a7745-cpg-mssr # RZ/G1E
- renesas,r8a77470-cpg-mssr # RZ/G1C
- renesas,r8a774a1-cpg-mssr # RZ/G2M
+ - renesas,r8a774a3-cpg-mssr # RZ/G2M v3.0
- renesas,r8a774b1-cpg-mssr # RZ/G2N
- renesas,r8a774c0-cpg-mssr # RZ/G2E
- renesas,r8a774e1-cpg-mssr # RZ/G2H
diff --git a/Documentation/devicetree/bindings/clock/renesas,rzv2h-cpg.yaml b/Documentation/devicetree/bindings/clock/renesas,rzv2h-cpg.yaml
new file mode 100644
index 000000000000..926c503bed1f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/renesas,rzv2h-cpg.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/renesas,rzv2h-cpg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/V2H(P) Clock Pulse Generator (CPG)
+
+maintainers:
+ - Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+
+description:
+ On Renesas RZ/V2H(P) SoCs, the CPG (Clock Pulse Generator) handles generation
+ and control of clock signals for the IP modules, generation and control of resets,
+ and control over booting, low power consumption and power supply domains.
+
+properties:
+ compatible:
+ const: renesas,r9a09g057-cpg
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: AUDIO_EXTAL clock input
+ - description: RTXIN clock input
+ - description: QEXTAL clock input
+
+ clock-names:
+ items:
+ - const: audio_extal
+ - const: rtxin
+ - const: qextal
+
+ '#clock-cells':
+ description: |
+ - For CPG core clocks, the two clock specifier cells must be "CPG_CORE"
+ and a core clock reference, as defined in
+ <dt-bindings/clock/renesas,r9a09g057-cpg.h>,
+ - For module clocks, the two clock specifier cells must be "CPG_MOD" and
+ a module number. The module number is calculated as the CLKON register
+ offset index multiplied by 16, plus the actual bit in the register
+ used to turn the CLK ON. For example, for CGC_GIC_0_GICCLK, the
+ calculation is (1 * 16 + 3) = 0x13.
+ const: 2
+
+ '#power-domain-cells':
+ const: 0
+
+ '#reset-cells':
+ description:
+ The single reset specifier cell must be the reset number. The reset number
+ is calculated as the reset register offset index multiplied by 16, plus the
+ actual bit in the register used to reset the specific IP block. For example,
+ for SYS_0_PRESETN, the calculation is (3 * 16 + 0) = 0x30.
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#clock-cells'
+ - '#power-domain-cells'
+ - '#reset-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@10420000 {
+ compatible = "renesas,r9a09g057-cpg";
+ reg = <0x10420000 0x10000>;
+ clocks = <&audio_extal_clk>, <&rtxin_clk>, <&qextal_clk>;
+ clock-names = "audio_extal", "rtxin", "qextal";
+ #clock-cells = <2>;
+ #power-domain-cells = <0>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3576-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3576-cru.yaml
new file mode 100644
index 000000000000..9c9b36049c71
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3576-cru.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/rockchip,rk3576-cru.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip rk3576 Family Clock and Reset Control Module
+
+maintainers:
+ - Elaine Zhang <zhangqing@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+ - Detlev Casanova <detlev.casanova@collabora.com>
+
+description:
+ The RK3576 clock controller generates the clock and also implements a reset
+ controller for SoC peripherals. For example it provides SCLK_UART2 and
+ PCLK_UART2, as well as SRST_P_UART2 and SRST_S_UART2 for the second UART
+ module.
+
+properties:
+ compatible:
+ const: rockchip,rk3576-cru
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: xin24m
+ - const: xin32k
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@27200000 {
+ compatible = "rockchip,rk3576-cru";
+ reg = <0xfd7c0000 0x5c000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml
index 74cd3f3f229a..4ff175c4992b 100644
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml
@@ -42,10 +42,6 @@ properties:
- const: xin24m
- const: xin32k
- assigned-clocks: true
-
- assigned-clock-rates: true
-
rockchip,grf:
$ref: /schemas/types.yaml#/definitions/phandle
description: >
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml
index 55c4f94a14d1..32f39e543b36 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml
@@ -35,6 +35,7 @@ properties:
- samsung,exynosautov9-cmu-top
- samsung,exynosautov9-cmu-busmc
- samsung,exynosautov9-cmu-core
+ - samsung,exynosautov9-cmu-dpum
- samsung,exynosautov9-cmu-fsys0
- samsung,exynosautov9-cmu-fsys1
- samsung,exynosautov9-cmu-fsys2
@@ -113,6 +114,24 @@ allOf:
properties:
compatible:
contains:
+ const: samsung,exynosautov9-cmu-dpum
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (26 MHz)
+ - description: DPU Main bus clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: bus
+
+ - if:
+ properties:
+ compatible:
+ contains:
const: samsung,exynosautov9-cmu-fsys0
then:
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml
new file mode 100644
index 000000000000..3330b2727474
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml
@@ -0,0 +1,162 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/samsung,exynosautov920-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung ExynosAuto v920 SoC clock controller
+
+maintainers:
+ - Sunyeal Hong <sunyeal.hong@samsung.com>
+ - Chanwoo Choi <cw00.choi@samsung.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
+ - Sylwester Nawrocki <s.nawrocki@samsung.com>
+
+description: |
+ ExynosAuto v920 clock controller is comprised of several CMU units, generating
+ clocks for different domains. Those CMU units are modeled as separate device
+ tree nodes, and might depend on each other. Root clocks in that clock tree are
+ two external clocks:: OSCCLK/XTCXO (38.4 MHz) and RTCCLK/XrtcXTI (32768 Hz).
+ The external OSCCLK must be defined as fixed-rate clock in dts.
+
+ CMU_TOP is a top-level CMU, where all base clocks are prepared using PLLs and
+ dividers; all other clocks of function blocks (other CMUs) are usually
+ derived from CMU_TOP.
+
+ Each clock is assigned an identifier and client nodes can use this identifier
+ to specify the clock which they consume. All clocks available for usage
+ in clock consumer nodes are defined as preprocessor macros in
+ 'include/dt-bindings/clock/samsung,exynosautov920.h' header.
+
+properties:
+ compatible:
+ enum:
+ - samsung,exynosautov920-cmu-top
+ - samsung,exynosautov920-cmu-peric0
+ - samsung,exynosautov920-cmu-peric1
+ - samsung,exynosautov920-cmu-misc
+ - samsung,exynosautov920-cmu-hsi0
+ - samsung,exynosautov920-cmu-hsi1
+
+ clocks:
+ minItems: 1
+ maxItems: 4
+
+ clock-names:
+ minItems: 1
+ maxItems: 4
+
+ "#clock-cells":
+ const: 1
+
+ reg:
+ maxItems: 1
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynosautov920-cmu-top
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (38.4 MHz)
+
+ clock-names:
+ items:
+ - const: oscclk
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynosautov920-cmu-peric0
+ - samsung,exynosautov920-cmu-peric1
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (38.4 MHz)
+ - description: CMU_PERICn NOC clock (from CMU_TOP)
+ - description: CMU_PERICn IP clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: noc
+ - const: ip
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - samsung,exynosautov920-cmu-misc
+ - samsung,exynosautov920-cmu-hsi0
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (38.4 MHz)
+ - description: CMU_MISC/CMU_HSI0 NOC clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: noc
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynosautov920-cmu-hsi1
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (38.4 MHz)
+ - description: CMU_HSI1 NOC clock (from CMU_TOP)
+ - description: CMU_HSI1 USBDRD clock (from CMU_TOP)
+ - description: CMU_HSI1 MMC_CARD clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: noc
+ - const: usbdrd
+ - const: mmc_card
+
+required:
+ - compatible
+ - "#clock-cells"
+ - clocks
+ - clock-names
+ - reg
+
+additionalProperties: false
+
+examples:
+ # Clock controller node for CMU_PERIC0
+ - |
+ #include <dt-bindings/clock/samsung,exynosautov920.h>
+
+ cmu_peric0: clock-controller@10800000 {
+ compatible = "samsung,exynosautov920-cmu-peric0";
+ reg = <0x10800000 0x8000>;
+ #clock-cells = <1>;
+
+ clocks = <&xtcxo>,
+ <&cmu_top DOUT_CLKCMU_PERIC0_NOC>,
+ <&cmu_top DOUT_CLKCMU_PERIC0_IP>;
+ clock-names = "oscclk",
+ "noc",
+ "ip";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
index 5194be0b410e..9b3aaae546cb 100644
--- a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
+++ b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
@@ -60,8 +60,14 @@ properties:
- st,stm32mp1-rcc
- st,stm32mp13-rcc
- const: syscon
- clocks: true
- clock-names: true
+
+ clocks:
+ minItems: 1
+ maxItems: 5
+
+ clock-names:
+ minItems: 1
+ maxItems: 5
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/cpu/idle-states.yaml b/Documentation/devicetree/bindings/cpu/idle-states.yaml
index 239480ef7c30..385b0a511652 100644
--- a/Documentation/devicetree/bindings/cpu/idle-states.yaml
+++ b/Documentation/devicetree/bindings/cpu/idle-states.yaml
@@ -385,7 +385,7 @@ patternProperties:
This property is required in idle state nodes of device tree meant
for RISC-V systems. For more details on the suspend_type parameter
- refer the SBI specifiation v0.3 (or higher) [7].
+ refer the SBI specification v0.3 (or higher) [7].
local-timer-stop:
description:
diff --git a/Documentation/devicetree/bindings/cpu/nvidia,tegra186-ccplex-cluster.yaml b/Documentation/devicetree/bindings/cpu/nvidia,tegra186-ccplex-cluster.yaml
deleted file mode 100644
index 16a448974561..000000000000
--- a/Documentation/devicetree/bindings/cpu/nvidia,tegra186-ccplex-cluster.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/cpu/nvidia,tegra186-ccplex-cluster.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: NVIDIA Tegra186 CCPLEX Cluster
-
-maintainers:
- - Thierry Reding <thierry.reding@gmail.com>
- - Jon Hunter <jonathanh@nvidia.com>
-
-properties:
- compatible:
- const: nvidia,tegra186-ccplex-cluster
-
- reg:
- maxItems: 1
-
- nvidia,bpmp:
- description: phandle to the BPMP used to query CPU frequency tables
- $ref: /schemas/types.yaml#/definitions/phandle
-
-additionalProperties: false
-
-required:
- - compatible
- - reg
- - nvidia,bpmp
-
-examples:
- - |
- ccplex@e000000 {
- compatible = "nvidia,tegra186-ccplex-cluster";
- reg = <0x0e000000 0x400000>;
- nvidia,bpmp = <&bpmp>;
- };
diff --git a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml
index 0a9ed2848b7c..9c8c9991f29a 100644
--- a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml
+++ b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml
@@ -137,7 +137,10 @@ patternProperties:
- const: fsl,sec-v4.0-rtic
reg:
- maxItems: 1
+ items:
+ - description: RTIC control and status register space.
+ - description: RTIC recoverable error indication register space.
+ minItems: 1
ranges:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
index 89c88004b41b..048b769a73c0 100644
--- a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
@@ -17,6 +17,7 @@ properties:
- qcom,prng-ee # 8996 and later using EE
- items:
- enum:
+ - qcom,sa8255p-trng
- qcom,sa8775p-trng
- qcom,sc7280-trng
- qcom,sm8450-trng
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml
index 2ad0cd6dd49e..b78f64c9c5f4 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml
@@ -92,12 +92,31 @@ properties:
reference to a valid DPI output or input endpoint node.
port@2:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: |
eDP/DP output port. The remote endpoint phandle should be a
reference to a valid eDP panel input endpoint node. This port is
optional, treated as DP panel if not defined
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ toshiba,pre-emphasis:
+ description:
+ Display port output Pre-Emphasis settings for both DP lanes.
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ minItems: 2
+ maxItems: 2
+ items:
+ enum:
+ - 0 # No pre-emphasis
+ - 1 # 3.5dB pre-emphasis
+ - 2 # 6dB pre-emphasis
+
oneOf:
- required:
- port@0
diff --git a/Documentation/devicetree/bindings/display/fsl,lcdif.yaml b/Documentation/devicetree/bindings/display/fsl,lcdif.yaml
index 0681fc49aa1b..8e3a98aeec32 100644
--- a/Documentation/devicetree/bindings/display/fsl,lcdif.yaml
+++ b/Documentation/devicetree/bindings/display/fsl,lcdif.yaml
@@ -50,6 +50,14 @@ properties:
- const: disp_axi
minItems: 1
+ dmas:
+ items:
+ - description: DMA specifier for the RX DMA channel.
+
+ dma-names:
+ items:
+ - const: rx
+
interrupts:
items:
- description: LCDIF DMA interrupt
@@ -156,6 +164,18 @@ allOf:
interrupts:
maxItems: 1
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx28-lcdif
+ then:
+ properties:
+ dmas: false
+ dma-names: false
+
examples:
- |
#include <dt-bindings/clock/imx6sx-clock.h>
diff --git a/Documentation/devicetree/bindings/display/lvds.yaml b/Documentation/devicetree/bindings/display/lvds.yaml
index 224db4932011..b74efbea3be2 100644
--- a/Documentation/devicetree/bindings/display/lvds.yaml
+++ b/Documentation/devicetree/bindings/display/lvds.yaml
@@ -16,7 +16,7 @@ maintainers:
description:
This binding extends the data mapping defined in lvds-data-mapping.yaml.
It supports reversing the bit order on the formats defined there in order
- to accomodate for even more specialized data formats, since a variety of
+ to accommodate for even more specialized data formats, since a variety of
data formats and layouts is used to drive LVDS displays.
properties:
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml
index 5ca7679d5427..3a82aec9021c 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml
@@ -62,6 +62,9 @@ properties:
- const: default
- const: sleep
+ power-domains:
+ maxItems: 1
+
port:
$ref: /schemas/graph.yaml#/properties/port
description:
@@ -76,6 +79,20 @@ required:
- clock-names
- port
+allOf:
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - mediatek,mt6795-dpi
+ - mediatek,mt8173-dpi
+ - mediatek,mt8186-dpi
+ then:
+ properties:
+ power-domains: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/display/msm/hdmi.yaml b/Documentation/devicetree/bindings/display/msm/hdmi.yaml
index 47e97669821c..d4a2033afea8 100644
--- a/Documentation/devicetree/bindings/display/msm/hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/msm/hdmi.yaml
@@ -19,14 +19,15 @@ properties:
- qcom,hdmi-tx-8974
- qcom,hdmi-tx-8994
- qcom,hdmi-tx-8996
+ - qcom,hdmi-tx-8998
clocks:
minItems: 1
- maxItems: 5
+ maxItems: 8
clock-names:
minItems: 1
- maxItems: 5
+ maxItems: 8
reg:
minItems: 1
@@ -142,6 +143,7 @@ allOf:
properties:
clocks:
minItems: 5
+ maxItems: 5
clock-names:
items:
- const: mdp_core
@@ -151,6 +153,28 @@ allOf:
- const: extp
hdmi-mux-supplies: false
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,hdmi-tx-8998
+ then:
+ properties:
+ clocks:
+ minItems: 8
+ maxItems: 8
+ clock-names:
+ items:
+ - const: mdp_core
+ - const: iface
+ - const: core
+ - const: alt_iface
+ - const: extp
+ - const: bus
+ - const: mnoc
+ - const: iface_mmss
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml b/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml
index 5eaccce13c21..6a82bd1ec763 100644
--- a/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml
@@ -9,20 +9,20 @@ title: BOE TH101MB31IG002-28A WXGA DSI Display Panel
maintainers:
- Manuel Traut <manut@mecka.net>
-allOf:
- - $ref: panel-common.yaml#
-
properties:
compatible:
enum:
# BOE TH101MB31IG002-28A 10.1" WXGA TFT LCD panel
- boe,th101mb31ig002-28a
+ # The Starry-er88577 is a 10.1" WXGA TFT-LCD panel
+ - starry,er88577
reg:
maxItems: 1
backlight: true
enable-gpios: true
+ reset-gpios: true
power-supply: true
port: true
rotation: true
@@ -33,6 +33,20 @@ required:
- enable-gpios
- power-supply
+allOf:
+ - $ref: panel-common.yaml#
+ - if:
+ properties:
+ compatible:
+ # The Starry-er88577 is a 10.1" WXGA TFT-LCD panel
+ const: starry,er88577
+ then:
+ properties:
+ reset-gpios: false
+ else:
+ required:
+ - reset-gpios
+
additionalProperties: false
examples:
@@ -47,6 +61,7 @@ examples:
reg = <0>;
backlight = <&backlight_lcd0>;
enable-gpios = <&gpio 45 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 55 GPIO_ACTIVE_LOW>;
rotation = <90>;
power-supply = <&vcc_3v3>;
port {
diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv101wum-ll2.yaml b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-ll2.yaml
new file mode 100644
index 000000000000..dced98e1c69a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-ll2.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/boe,tv101wum-ll2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BOE TV101WUM-LL2 DSI Display Panel
+
+maintainers:
+ - Neil Armstrong <neil.armstrong@linaro.org>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: boe,tv101wum-ll2
+
+ reg:
+ maxItems: 1
+ description: DSI virtual channel
+
+ backlight: true
+ reset-gpios: true
+ vsp-supply: true
+ vsn-supply: true
+ port: true
+ rotation: true
+
+required:
+ - compatible
+ - reg
+ - reset-gpios
+ - vsp-supply
+ - vsn-supply
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "boe,tv101wum-ll2";
+ reg = <0>;
+
+ vsn-supply = <&vsn_lcd>;
+ vsp-supply = <&vsp_lcd>;
+
+ reset-gpios = <&pio 45 GPIO_ACTIVE_LOW>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
index 644387e4fb6f..75ccabff308b 100644
--- a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
+++ b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
@@ -15,14 +15,12 @@ description:
such as the HannStar HSD060BHW4 720x1440 TFT LCD panel connected with
a MIPI-DSI video interface.
-allOf:
- - $ref: panel-common.yaml#
-
properties:
compatible:
items:
- enum:
- hannstar,hsd060bhw4
+ - microchip,ac40t08a-mipi-panel
- powkiddy,x55-panel
- const: himax,hx8394
@@ -46,7 +44,6 @@ properties:
required:
- compatible
- reg
- - reset-gpios
- backlight
- port
- vcc-supply
@@ -54,6 +51,18 @@ required:
additionalProperties: false
+allOf:
+ - $ref: panel-common.yaml#
+ - if:
+ not:
+ properties:
+ compatible:
+ enum:
+ - microchip,ac40t08a-mipi-panel
+ then:
+ required:
+ - reset-gpios
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml
index cfd7cc9c8725..f80307579485 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml
@@ -16,6 +16,7 @@ properties:
compatible:
items:
- enum:
+ - densitron,dmt028vghmcmi-1d
- ortustech,com35h3p70ulc
- const: ilitek,ili9806e
diff --git a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
index 3d5bede98cf1..b8783eba3ddc 100644
--- a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
+++ b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
@@ -18,6 +18,7 @@ properties:
- enum:
- chongzhou,cz101b4001
- kingdisplay,kd101ne3-40ti
+ - melfas,lmfbx101117480
- radxa,display-10hd-ad001
- radxa,display-8hd-ad002
- const: jadard,jd9365da-h3
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml
index e78160d1aa24..10ed4b57232b 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml
@@ -84,11 +84,7 @@ properties:
- port@0
- port@1
- backlight: true
- enable-gpios: true
- power-supply: true
-
-additionalProperties: false
+unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 8a87e0100dcb..b89e39790579 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -158,6 +158,8 @@ properties:
- innolux,at070tn92
# Innolux G070ACE-L01 7" WVGA (800x480) TFT LCD panel
- innolux,g070ace-l01
+ # Innolux G070ACE-LH3 7" WVGA (800x480) TFT LCD panel with WLED backlight
+ - innolux,g070ace-lh3
# Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
- innolux,g070y2-l01
# Innolux G070Y2-T02 7" WVGA (800x480) TFT LCD TTL panel
@@ -222,6 +224,8 @@ properties:
- okaya,rs800480t-7x0gp
# Olimex 4.3" TFT LCD panel
- olimex,lcd-olinuxino-43-ts
+ # On Tat Industrial Company 5" DPI TFT panel.
+ - ontat,kd50g21-40nt-a1
# On Tat Industrial Company 7" DPI TFT panel.
- ontat,yx700wv03
# OrtusTech COM37H3M05DTC Blanview 3.7" VGA portrait TFT-LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
index b348f5bf0a98..b07f3eca669b 100644
--- a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
@@ -20,21 +20,19 @@ description: |
Densitron DMT028VGHMCMI-1A is 480x640, 2-lane MIPI DSI LCD panel
which has built-in ST7701 chip.
-allOf:
- - $ref: panel-common.yaml#
-
properties:
compatible:
items:
- enum:
- anbernic,rg-arc-panel
+ - anbernic,rg28xx-panel
- densitron,dmt028vghmcmi-1a
- elida,kd50t048a
- techstar,ts8550b
- const: sitronix,st7701
reg:
- description: DSI virtual channel used by that screen
+ description: DSI / SPI channel used by that screen
maxItems: 1
VCC-supply:
@@ -43,6 +41,13 @@ properties:
IOVCC-supply:
description: I/O system regulator
+ dc-gpios:
+ maxItems: 1
+ description:
+ Controller data/command selection (D/CX) in 4-line SPI mode.
+ If not set, the controller is in 3-line SPI mode.
+ Disallowed for DSI.
+
port: true
reset-gpios: true
rotation: true
@@ -57,7 +62,38 @@ required:
- port
- reset-gpios
-additionalProperties: false
+allOf:
+ - $ref: panel-common.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ # SPI connected panels
+ enum:
+ - anbernic,rg28xx-panel
+ then:
+ $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ # DSI or SPI without D/CX pin
+ enum:
+ - anbernic,rg-arc-panel
+ - anbernic,rg28xx-panel
+ - densitron,dmt028vghmcmi-1a
+ - elida,kd50t048a
+ - techstar,ts8550b
+ then:
+ required:
+ - dc-gpios
+ else:
+ properties:
+ dc-gpios: false
+
+unevaluatedProperties: false
examples:
- |
@@ -82,3 +118,26 @@ examples:
};
};
};
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "anbernic,rg28xx-panel", "sitronix,st7701";
+ reg = <0>;
+ spi-max-frequency = <3125000>;
+ VCC-supply = <&reg_lcd>;
+ IOVCC-supply = <&reg_lcd>;
+ reset-gpios = <&pio 8 14 GPIO_ACTIVE_HIGH>; /* LCD-RST: PI14 */
+ backlight = <&backlight>;
+
+ port {
+ panel_in_rgb: endpoint {
+ remote-endpoint = <&tcon_lcd0_out_lcd>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml b/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml
index 08e5b9478051..95e3d5e74b87 100644
--- a/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml
+++ b/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml
@@ -18,6 +18,7 @@ properties:
compatible:
oneOf:
- enum:
+ - renesas,r9a07g043u-du # RZ/G2UL
- renesas,r9a07g044-du # RZ/G2{L,LC}
- items:
- enum:
@@ -60,9 +61,6 @@ properties:
$ref: /schemas/graph.yaml#/properties/port
unevaluatedProperties: false
- required:
- - port@0
-
unevaluatedProperties: false
renesas,vsps:
@@ -88,6 +86,34 @@ required:
additionalProperties: false
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a07g043u-du
+ then:
+ properties:
+ ports:
+ properties:
+ port@0:
+ description: DPI
+
+ required:
+ - port@0
+ else:
+ properties:
+ ports:
+ properties:
+ port@0:
+ description: DSI
+ port@1:
+ description: DPI
+
+ required:
+ - port@0
+ - port@1
+
examples:
# RZ/G2L DU
- |
diff --git a/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml b/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml
index 902a11f65be2..75957f9fb58b 100644
--- a/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml
@@ -28,6 +28,14 @@ properties:
- description: DMA Error interrupt
minItems: 1
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: ipg
+ - const: ahb
+
"#dma-cells":
const: 1
@@ -42,15 +50,21 @@ required:
- reg
- interrupts
- "#dma-cells"
+ - clocks
+ - clock-names
additionalProperties: false
examples:
- |
+ #include <dt-bindings/clock/imx27-clock.h>
+
dma-controller@10001000 {
compatible = "fsl,imx27-dma";
reg = <0x10001000 0x1000>;
interrupts = <32 33>;
#dma-cells = <1>;
dma-channels = <16>;
+ clocks = <&clks IMX27_CLK_DMA_IPG_GATE>, <&clks IMX27_CLK_DMA_AHB_GATE>;
+ clock-names = "ipg", "ahb";
};
diff --git a/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml b/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml
index add9c77e8b52..a17cf2360dd4 100644
--- a/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml
@@ -11,6 +11,17 @@ maintainers:
allOf:
- $ref: dma-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,imx8qxp-dma-apbh
+ then:
+ required:
+ - power-domains
+ else:
+ properties:
+ power-domains: false
properties:
compatible:
@@ -20,6 +31,7 @@ properties:
- fsl,imx6q-dma-apbh
- fsl,imx6sx-dma-apbh
- fsl,imx7d-dma-apbh
+ - fsl,imx8qxp-dma-apbh
- const: fsl,imx28-dma-apbh
- enum:
- fsl,imx23-dma-apbh
@@ -42,6 +54,9 @@ properties:
dma-channels:
enum: [4, 8, 16]
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.yaml b/Documentation/devicetree/bindings/dma/fsl-qdma.yaml
index 1b9ebdbe528a..9401b1f6300d 100644
--- a/Documentation/devicetree/bindings/dma/fsl-qdma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl-qdma.yaml
@@ -11,11 +11,14 @@ maintainers:
properties:
compatible:
- enum:
- - fsl,ls1021a-qdma
- - fsl,ls1028a-qdma
- - fsl,ls1043a-qdma
- - fsl,ls1046a-qdma
+ oneOf:
+ - const: fsl,ls1021a-qdma
+ - items:
+ - enum:
+ - fsl,ls1028a-qdma
+ - fsl,ls1043a-qdma
+ - fsl,ls1046a-qdma
+ - const: fsl,ls1021a-qdma
reg:
items:
diff --git a/Documentation/devicetree/bindings/dma/loongson,ls1b-apbdma.yaml b/Documentation/devicetree/bindings/dma/loongson,ls1b-apbdma.yaml
new file mode 100644
index 000000000000..4c7d2fb7b292
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/loongson,ls1b-apbdma.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/loongson,ls1b-apbdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson-1 APB DMA Controller
+
+maintainers:
+ - Keguang Zhang <keguang.zhang@gmail.com>
+
+description:
+ Loongson-1 APB DMA controller provides 3 independent channels for
+ peripherals such as NAND, audio playback and capture.
+
+properties:
+ compatible:
+ oneOf:
+ - const: loongson,ls1b-apbdma
+ - items:
+ - enum:
+ - loongson,ls1a-apbdma
+ - loongson,ls1c-apbdma
+ - const: loongson,ls1b-apbdma
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: NAND interrupt
+ - description: Audio playback interrupt
+ - description: Audio capture interrupt
+
+ interrupt-names:
+ items:
+ - const: ch0
+ - const: ch1
+ - const: ch2
+
+ '#dma-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - '#dma-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ dma-controller@1fd01160 {
+ compatible = "loongson,ls1b-apbdma";
+ reg = <0x1fd01160 0x4>;
+ interrupt-parent = <&intc0>;
+ interrupts = <13 IRQ_TYPE_EDGE_RISING>,
+ <14 IRQ_TYPE_EDGE_RISING>,
+ <15 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ch0", "ch1", "ch2";
+ #dma-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/marvell,xor-v2.yaml b/Documentation/devicetree/bindings/dma/marvell,xor-v2.yaml
new file mode 100644
index 000000000000..646b4e779d8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/marvell,xor-v2.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/marvell,xor-v2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell XOR v2 engines
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+
+properties:
+ compatible:
+ oneOf:
+ - const: marvell,xor-v2
+ - items:
+ - enum:
+ - marvell,armada-7k-xor
+ - const: marvell,xor-v2
+
+ reg:
+ items:
+ - description: DMA registers
+ - description: global registers
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: core
+ - const: reg
+
+ msi-parent:
+ description:
+ Phandle to the MSI-capable interrupt controller used for
+ interrupts.
+ maxItems: 1
+
+ dma-coherent: true
+
+required:
+ - compatible
+ - reg
+ - msi-parent
+ - dma-coherent
+
+additionalProperties: false
+
+examples:
+ - |
+ xor0@6a0000 {
+ compatible = "marvell,armada-7k-xor", "marvell,xor-v2";
+ reg = <0x6a0000 0x1000>, <0x6b0000 0x1000>;
+ clocks = <&ap_clk 0>, <&ap_clk 1>;
+ clock-names = "core", "reg";
+ msi-parent = <&gic_v2m0>;
+ dma-coherent;
+ };
diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
deleted file mode 100644
index 9c38bbe7e6d7..000000000000
--- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Marvell XOR v2 engines
-
-Required properties:
-- compatible: one of the following values:
- "marvell,armada-7k-xor"
- "marvell,xor-v2"
-- reg: Should contain registers location and length (two sets)
- the first set is the DMA registers
- the second set is the global registers
-- msi-parent: Phandle to the MSI-capable interrupt controller used for
- interrupts.
-
-Optional properties:
-- clocks: Optional reference to the clocks used by the XOR engine.
-- clock-names: mandatory if there is a second clock, in this case the
- name must be "core" for the first clock and "reg" for the second
- one
-
-
-Example:
-
- xor0@400000 {
- compatible = "marvell,xor-v2";
- reg = <0x400000 0x1000>,
- <0x410000 0x1000>;
- msi-parent = <&gic_v2m0>;
- dma-coherent;
- };
diff --git a/Documentation/devicetree/bindings/dma/nxp,lpc3220-dmamux.yaml b/Documentation/devicetree/bindings/dma/nxp,lpc3220-dmamux.yaml
new file mode 100644
index 000000000000..32f208744154
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/nxp,lpc3220-dmamux.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/nxp,lpc3220-dmamux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: DMA multiplexer for LPC32XX SoC (DMA request router)
+
+maintainers:
+ - J.M.B. Downing <jonathan.downing@nautel.com>
+ - Piotr Wojtaszczyk <piotr.wojtaszczyk@timesys.com>
+
+allOf:
+ - $ref: dma-router.yaml#
+
+properties:
+ compatible:
+ const: nxp,lpc3220-dmamux
+
+ reg:
+ maxItems: 1
+
+ dma-masters:
+ description: phandle to a dma node compatible with arm,pl080
+ maxItems: 1
+
+ "#dma-cells":
+ const: 3
+ description: |
+ First two cells same as for device pointed in dma-masters.
+ Third cell represents mux value for the request.
+
+required:
+ - compatible
+ - reg
+ - dma-masters
+
+additionalProperties: false
+
+examples:
+ - |
+ dma-router@7c {
+ compatible = "nxp,lpc3220-dmamux";
+ reg = <0x7c 0x8>;
+ dma-masters = <&dma>;
+ #dma-cells = <3>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
index a42b6a26a6d3..ca24cf48769f 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
+++ b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
@@ -19,6 +19,7 @@ properties:
- renesas,r9a07g043-dmac # RZ/G2UL and RZ/Five
- renesas,r9a07g044-dmac # RZ/G2{L,LC}
- renesas,r9a07g054-dmac # RZ/V2L
+ - renesas,r9a08g045-dmac # RZ/G3S
- const: renesas,rz-dmac
reg:
diff --git a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt
index 47e477cce6d2..1f9831540c97 100644
--- a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt
+++ b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt
@@ -20,7 +20,7 @@ Optional properties:
memcpy channels in eDMA.
Notes:
-When requesting channel via ti,dra7-dma-crossbar, the DMA clinet must request
+When requesting channel via ti,dra7-dma-crossbar, the DMA client must request
the DMA event number as crossbar ID (input to the DMA crossbar).
For ti,am335x-edma-crossbar: the meaning of parameters of dmas for clients:
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml
index 769ce23aaac2..ac3198953b8e 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml
+++ b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml
@@ -24,7 +24,9 @@ properties:
const: 1
compatible:
- const: xlnx,zynqmp-dma-1.0
+ enum:
+ - amd,versal2-dma-1.0
+ - xlnx,zynqmp-dma-1.0
reg:
description: memory map for gdma/adma module access
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
index e396e47b2f13..b6239ec3512b 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -116,6 +116,7 @@ properties:
- const: atmel,24c02
- items:
- enum:
+ - giantec,gt24c04a
- onnn,cat24c04
- onnn,cat24c05
- rohm,br24g04
diff --git a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
index 4d823f3b1f0e..54d7d11bfed4 100644
--- a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
+++ b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
@@ -22,6 +22,9 @@ description: |
[0] https://developer.arm.com/documentation/den0056/latest
+anyOf:
+ - $ref: /schemas/firmware/nxp,imx95-scmi.yaml
+
properties:
$nodename:
const: scmi
@@ -121,6 +124,13 @@ properties:
atomic mode of operation, even if requested.
default: 0
+ max-rx-timeout-ms:
+ description:
+ An optional time value, expressed in milliseconds, representing the
+ transport maximum timeout value for the receive channel. The value should
+ be a non-zero value if set.
+ minimum: 1
+
arm,smc-id:
$ref: /schemas/types.yaml#/definitions/uint32
description:
@@ -145,6 +155,14 @@ properties:
required:
- '#power-domain-cells'
+ protocol@12:
+ $ref: '#/$defs/protocol-node'
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ const: 0x12
+
protocol@13:
$ref: '#/$defs/protocol-node'
unevaluatedProperties: false
@@ -284,7 +302,7 @@ properties:
required:
- reg
-additionalProperties: false
+unevaluatedProperties: false
$defs:
protocol-node:
diff --git a/Documentation/devicetree/bindings/firmware/nxp,imx95-scmi.yaml b/Documentation/devicetree/bindings/firmware/nxp,imx95-scmi.yaml
new file mode 100644
index 000000000000..1a95010a546b
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/nxp,imx95-scmi.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2024 NXP
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/firmware/nxp,imx95-scmi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: i.MX95 System Control and Management Interface(SCMI) Vendor Protocols Extension
+
+maintainers:
+ - Peng Fan <peng.fan@nxp.com>
+
+properties:
+ protocol@81:
+ $ref: '/schemas/firmware/arm,scmi.yaml#/$defs/protocol-node'
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ const: 0x81
+
+ protocol@84:
+ $ref: '/schemas/firmware/arm,scmi.yaml#/$defs/protocol-node'
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ const: 0x84
+
+ nxp,ctrl-ids:
+ description:
+ Each entry consists of 2 integers, represents the ctrl id and the value
+ items:
+ items:
+ - description: the ctrl id index
+ enum: [0, 1, 2, 3, 4, 5, 6, 7, 0x8000, 0x8001, 0x8002, 0x8003,
+ 0x8004, 0x8005, 0x8006, 0x8007]
+ - description: the value assigned to the ctrl id
+ minItems: 1
+ maxItems: 16
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/gnss/brcm,bcm4751.yaml b/Documentation/devicetree/bindings/gnss/brcm,bcm4751.yaml
index c21549e0fba6..089166089498 100644
--- a/Documentation/devicetree/bindings/gnss/brcm,bcm4751.yaml
+++ b/Documentation/devicetree/bindings/gnss/brcm,bcm4751.yaml
@@ -18,6 +18,7 @@ description:
allOf:
- $ref: gnss-common.yaml#
+ - $ref: /schemas/serial/serial-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/gnss/gnss-common.yaml b/Documentation/devicetree/bindings/gnss/gnss-common.yaml
index 963b926e30a7..d4430d2d6855 100644
--- a/Documentation/devicetree/bindings/gnss/gnss-common.yaml
+++ b/Documentation/devicetree/bindings/gnss/gnss-common.yaml
@@ -35,11 +35,6 @@ properties:
GPIO line, this is used.
maxItems: 1
- current-speed:
- description: The baudrate in bits per second of the device as it comes
- online, current active speed.
- $ref: /schemas/types.yaml#/definitions/uint32
-
additionalProperties: true
examples:
diff --git a/Documentation/devicetree/bindings/gnss/mediatek.yaml b/Documentation/devicetree/bindings/gnss/mediatek.yaml
index c0eb35beb2ef..2b9e5be4ebf3 100644
--- a/Documentation/devicetree/bindings/gnss/mediatek.yaml
+++ b/Documentation/devicetree/bindings/gnss/mediatek.yaml
@@ -15,6 +15,7 @@ description:
allOf:
- $ref: gnss-common.yaml#
+ - $ref: /schemas/serial/serial-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/gnss/sirfstar.yaml b/Documentation/devicetree/bindings/gnss/sirfstar.yaml
index 0bbe684d82e1..7e5da89a5ad7 100644
--- a/Documentation/devicetree/bindings/gnss/sirfstar.yaml
+++ b/Documentation/devicetree/bindings/gnss/sirfstar.yaml
@@ -21,6 +21,7 @@ description:
allOf:
- $ref: gnss-common.yaml#
+ - $ref: /schemas/serial/serial-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml b/Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml
index cd80668182b6..7d4b6d49e5ee 100644
--- a/Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml
+++ b/Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml
@@ -8,6 +8,7 @@ title: U-blox GNSS Receiver
allOf:
- $ref: gnss-common.yaml#
+ - $ref: /schemas/serial/serial-peripheral-props.yaml#
maintainers:
- Johan Hovold <johan@kernel.org>
diff --git a/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml b/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml
index c0ad70e66f76..e8bc9f018edb 100644
--- a/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml
+++ b/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml
@@ -36,19 +36,8 @@ properties:
patternProperties:
"^(hog-[0-9]+|.+-hog(-[0-9]+)?)$":
type: object
-
- properties:
- gpio-hog: true
- gpios: true
- output-high: true
- output-low: true
- line-name: true
-
required:
- gpio-hog
- - gpios
-
- additionalProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/gpio/fcs,fxl6408.yaml b/Documentation/devicetree/bindings/gpio/fcs,fxl6408.yaml
index 65b6970e42fb..b74fa81e7d05 100644
--- a/Documentation/devicetree/bindings/gpio/fcs,fxl6408.yaml
+++ b/Documentation/devicetree/bindings/gpio/fcs,fxl6408.yaml
@@ -28,6 +28,7 @@ properties:
patternProperties:
"^(hog-[0-9]+|.+-hog(-[0-9]+)?)$":
+ type: object
required:
- gpio-hog
diff --git a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml
index e1fc8bb6d379..6b06609c649e 100644
--- a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml
@@ -85,19 +85,8 @@ properties:
patternProperties:
"^(hog-[0-9]+|.+-hog(-[0-9]+)?)$":
type: object
- properties:
- gpio-hog: true
- gpios: true
- input: true
- output-high: true
- output-low: true
- line-name: true
-
required:
- gpio-hog
- - gpios
-
- additionalProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml b/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml
index 10e56cf306db..1434d08f8b74 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml
@@ -32,6 +32,8 @@ properties:
gpio-ranges: true
+ gpio-reserved-ranges: true
+
gpio-line-names:
description: strings describing the names of each gpio line.
minItems: 1
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml b/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
index 51e8390d6b32..7b1eb08fa055 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
@@ -107,19 +107,8 @@ properties:
patternProperties:
"^(hog-[0-9]+|.+-hog(-[0-9]+)?)$":
type: object
- properties:
- gpio-hog: true
- gpios: true
- input: true
- output-high: true
- output-low: true
- line-name: true
-
required:
- gpio-hog
- - gpios
-
- additionalProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/gpio/gpio_lpc32xx.txt b/Documentation/devicetree/bindings/gpio/gpio_lpc32xx.txt
deleted file mode 100644
index 49819367a011..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio_lpc32xx.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-NXP LPC32xx SoC GPIO controller
-
-Required properties:
-- compatible: must be "nxp,lpc3220-gpio"
-- reg: Physical base address and length of the controller's registers.
-- gpio-controller: Marks the device node as a GPIO controller.
-- #gpio-cells: Should be 3:
- 1) bank:
- 0: GPIO P0
- 1: GPIO P1
- 2: GPIO P2
- 3: GPIO P3
- 4: GPI P3
- 5: GPO P3
- 2) pin number
- 3) optional parameters:
- - bit 0 specifies polarity (0 for normal, 1 for inverted)
-- reg: Index of the GPIO group
-
-Example:
-
- gpio: gpio@40028000 {
- compatible = "nxp,lpc3220-gpio";
- reg = <0x40028000 0x1000>;
- gpio-controller;
- #gpio-cells = <3>; /* bank, pin, flags */
- };
-
- leds {
- compatible = "gpio-leds";
-
- led0 {
- gpios = <&gpio 5 1 1>; /* GPO_P3 1, active low */
- linux,default-trigger = "heartbeat";
- default-state = "off";
- };
-
- led1 {
- gpios = <&gpio 5 14 1>; /* GPO_P3 14, active low */
- linux,default-trigger = "timer";
- default-state = "off";
- };
- };
diff --git a/Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml b/Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml
index d61569b3f15b..d78da7dd2a56 100644
--- a/Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml
@@ -49,20 +49,8 @@ properties:
patternProperties:
"^.+-hog(-[0-9]+)?$":
type: object
-
- additionalProperties: false
-
- properties:
- gpio-hog: true
- gpios: true
- input: true
- output-high: true
- output-low: true
- line-name: true
-
required:
- gpio-hog
- - gpios
allOf:
- if:
diff --git a/Documentation/devicetree/bindings/gpio/nxp,lpc3220-gpio.yaml b/Documentation/devicetree/bindings/gpio/nxp,lpc3220-gpio.yaml
new file mode 100644
index 000000000000..25b5494393cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/nxp,lpc3220-gpio.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/nxp,lpc3220-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC3220 SoC GPIO controller
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+properties:
+ compatible:
+ const: nxp,lpc3220-gpio
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 3
+ description: |
+ 1) bank:
+ 0: GPIO P0
+ 1: GPIO P1
+ 2: GPIO P2
+ 3: GPIO P3
+ 4: GPI P3
+ 5: GPO P3
+ 2) pin number
+ 3) flags:
+ - bit 0 specifies polarity (0 for normal, 1 for inverted)
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - '#gpio-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ gpio@40028000 {
+ compatible = "nxp,lpc3220-gpio";
+ reg = <0x40028000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <3>; /* bank, pin, flags */
+ };
diff --git a/Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml b/Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml
index 228fa27ffdc3..36f5a0610471 100644
--- a/Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml
@@ -55,19 +55,8 @@ properties:
patternProperties:
"^.+-hog(-[0-9]+)?$":
type: object
- properties:
- gpio-hog: true
- gpios: true
- input: true
- output-high: true
- output-low: true
- line-name: true
-
required:
- gpio-hog
- - gpios
-
- additionalProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/hwlock/sprd,hwspinlock-r3p0.yaml b/Documentation/devicetree/bindings/hwlock/sprd,hwspinlock-r3p0.yaml
new file mode 100644
index 000000000000..abe11df25761
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwlock/sprd,hwspinlock-r3p0.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwlock/sprd,hwspinlock-r3p0.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Spreadtrum hardware spinlock
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+ compatible:
+ const: sprd,hwspinlock-r3p0
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: enable
+
+ '#hwlock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#hwlock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sprd,sc9860-clk.h>
+
+ hwlock@40500000 {
+ compatible = "sprd,hwspinlock-r3p0";
+ reg = <0x40500000 0x1000>;
+ clocks = <&aon_gate CLK_SPLK_EB>;
+ clock-names = "enable";
+ #hwlock-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt
deleted file mode 100644
index 581db9d941ba..000000000000
--- a/Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-SPRD Hardware Spinlock Device Binding
--------------------------------------
-
-Required properties :
-- compatible : should be "sprd,hwspinlock-r3p0".
-- reg : the register address of hwspinlock.
-- #hwlock-cells : hwlock users only use the hwlock id to represent a specific
- hwlock, so the number of cells should be <1> here.
-- clock-names : Must contain "enable".
-- clocks : Must contain a phandle entry for the clock in clock-names, see the
- common clock bindings.
-
-Please look at the generic hwlock binding for usage information for consumers,
-"Documentation/devicetree/bindings/hwlock/hwlock.txt"
-
-Example of hwlock provider:
- hwspinlock@40500000 {
- compatible = "sprd,hwspinlock-r3p0";
- reg = <0 0x40500000 0 0x1000>;
- #hwlock-cells = <1>;
- clock-names = "enable";
- clocks = <&clk_aon_apb_gates0 22>;
- };
diff --git a/Documentation/devicetree/bindings/hwmon/adt7475.yaml b/Documentation/devicetree/bindings/hwmon/adt7475.yaml
index 051c976ab711..79e8d62fa3b3 100644
--- a/Documentation/devicetree/bindings/hwmon/adt7475.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adt7475.yaml
@@ -45,12 +45,31 @@ properties:
the pwm uses a logic low output for 100% duty cycle. If set to 1 the pwm
uses a logic high output for 100% duty cycle.
$ref: /schemas/types.yaml#/definitions/uint32-array
+ deprecated: true
minItems: 3
maxItems: 3
items:
enum: [0, 1]
default: 1
+ "#pwm-cells":
+ const: 4
+ description: |
+ Number of cells in a PWM specifier.
+ - 0: The PWM channel
+ - 1: The PWM period in nanoseconds
+ - 90909091 (11 Hz)
+ - 71428571 (14 Hz)
+ - 45454545 (22 Hz)
+ - 34482759 (29 Hz)
+ - 28571429 (35 Hz)
+ - 22727273 (44 Hz)
+ - 17241379 (58 Hz)
+ - 11363636 (88 Hz)
+ - 44444 (22 kHz)
+ - 2: PWM flags 0 or PWM_POLARITY_INVERTED
+ - 3: The default PWM duty cycle in nanoseconds
+
patternProperties:
"^adi,bypass-attenuator-in[0-4]$":
description: |
@@ -81,6 +100,10 @@ patternProperties:
- smbalert#
- gpio
+ "^fan-[0-9]+$":
+ $ref: fan-common.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- reg
@@ -89,17 +112,27 @@ additionalProperties: false
examples:
- |
+ #include <dt-bindings/pwm/pwm.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
- hwmon@2e {
+ pwm: hwmon@2e {
compatible = "adi,adt7476";
reg = <0x2e>;
adi,bypass-attenuator-in0 = <1>;
adi,bypass-attenuator-in1 = <0>;
- adi,pwm-active-state = <1 0 1>;
adi,pin10-function = "smbalert#";
adi,pin14-function = "tach4";
+ #pwm-cells = <4>;
+
+ /* PWMs at 22.5 kHz frequency, 50% duty*/
+ fan-0 {
+ pwms = <&pwm 0 44444 0 22222>;
+ };
+
+ fan-1 {
+ pwms = <&pwm 2 44444 0 22222>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml b/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml
new file mode 100644
index 000000000000..1f98da32f3fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/lltc,ltc2978.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Octal Digital Power-supply monitor/supervisor/sequencer/margin controller.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - lltc,ltc2972
+ - lltc,ltc2974
+ - lltc,ltc2975
+ - lltc,ltc2977
+ - lltc,ltc2978
+ - lltc,ltc2979
+ - lltc,ltc2980
+ - lltc,ltc3880
+ - lltc,ltc3882
+ - lltc,ltc3883
+ - lltc,ltc3884
+ - lltc,ltc3886
+ - lltc,ltc3887
+ - lltc,ltc3889
+ - lltc,ltc7880
+ - lltc,ltm2987
+ - lltc,ltm4664
+ - lltc,ltm4675
+ - lltc,ltm4676
+ - lltc,ltm4677
+ - lltc,ltm4678
+ - lltc,ltm4680
+ - lltc,ltm4686
+ - lltc,ltm4700
+
+ reg:
+ maxItems: 1
+
+ regulators:
+ type: object
+ description: |
+ list of regulators provided by this controller.
+ Valid names of regulators depend on number of supplies supported per device:
+ * ltc2972 vout0 - vout1
+ * ltc2974, ltc2975 : vout0 - vout3
+ * ltc2977, ltc2979, ltc2980, ltm2987 : vout0 - vout7
+ * ltc2978 : vout0 - vout7
+ * ltc3880, ltc3882, ltc3884, ltc3886, ltc3887, ltc3889 : vout0 - vout1
+ * ltc7880 : vout0 - vout1
+ * ltc3883 : vout0
+ * ltm4664 : vout0 - vout1
+ * ltm4675, ltm4676, ltm4677, ltm4678 : vout0 - vout1
+ * ltm4680, ltm4686 : vout0 - vout1
+ * ltm4700 : vout0 - vout1
+
+ patternProperties:
+ "^vout[0-7]$":
+ $ref: /schemas/regulator/regulator.yaml#
+ type: object
+ unevaluatedProperties: false
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ regulator@5e {
+ compatible = "lltc,ltc2978";
+ reg = <0x5e>;
+
+ regulators {
+ vout0 {
+ regulator-name = "FPGA-2.5V";
+ };
+ vout2 {
+ regulator-name = "FPGA-1.5V";
+ };
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/hwmon/ltc2978.txt b/Documentation/devicetree/bindings/hwmon/ltc2978.txt
deleted file mode 100644
index 4e7f6215a453..000000000000
--- a/Documentation/devicetree/bindings/hwmon/ltc2978.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-ltc2978
-
-Required properties:
-- compatible: should contain one of:
- * "lltc,ltc2972"
- * "lltc,ltc2974"
- * "lltc,ltc2975"
- * "lltc,ltc2977"
- * "lltc,ltc2978"
- * "lltc,ltc2979"
- * "lltc,ltc2980"
- * "lltc,ltc3880"
- * "lltc,ltc3882"
- * "lltc,ltc3883"
- * "lltc,ltc3884"
- * "lltc,ltc3886"
- * "lltc,ltc3887"
- * "lltc,ltc3889"
- * "lltc,ltc7880"
- * "lltc,ltm2987"
- * "lltc,ltm4664"
- * "lltc,ltm4675"
- * "lltc,ltm4676"
- * "lltc,ltm4677"
- * "lltc,ltm4678"
- * "lltc,ltm4680"
- * "lltc,ltm4686"
- * "lltc,ltm4700"
-- reg: I2C slave address
-
-Optional properties:
-- regulators: A node that houses a sub-node for each regulator controlled by
- the device. Each sub-node is identified using the node's name, with valid
- values listed below. The content of each sub-node is defined by the
- standard binding for regulators; see regulator.txt.
-
-Valid names of regulators depend on number of supplies supported per device:
- * ltc2972 vout0 - vout1
- * ltc2974, ltc2975 : vout0 - vout3
- * ltc2977, ltc2979, ltc2980, ltm2987 : vout0 - vout7
- * ltc2978 : vout0 - vout7
- * ltc3880, ltc3882, ltc3884, ltc3886, ltc3887, ltc3889 : vout0 - vout1
- * ltc7880 : vout0 - vout1
- * ltc3883 : vout0
- * ltm4664 : vout0 - vout1
- * ltm4675, ltm4676, ltm4677, ltm4678 : vout0 - vout1
- * ltm4680, ltm4686 : vout0 - vout1
- * ltm4700 : vout0 - vout1
-
-Example:
-ltc2978@5e {
- compatible = "lltc,ltc2978";
- reg = <0x5e>;
- regulators {
- vout0 {
- regulator-name = "FPGA-2.5V";
- };
- vout2 {
- regulator-name = "FPGA-1.5V";
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/hwmon/maxim,max31790.yaml b/Documentation/devicetree/bindings/hwmon/maxim,max31790.yaml
new file mode 100644
index 000000000000..b1ff496f87f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/maxim,max31790.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/maxim,max31790.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: The Maxim MAX31790 Fan Controller
+
+maintainers:
+ - Guenter Roeck <linux@roeck-us.net>
+ - Chanh Nguyen <chanh@os.amperecomputing.com>
+
+description: >
+ The MAX31790 controls the speeds of up to six fans using six
+ independent PWM outputs. The desired fan speeds (or PWM duty cycles)
+ are written through the I2C interface.
+
+ Datasheets:
+ https://datasheets.maximintegrated.com/en/ds/MAX31790.pdf
+
+properties:
+ compatible:
+ const: maxim,max31790
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ "#pwm-cells":
+ const: 1
+
+patternProperties:
+ "^fan-[0-9]+$":
+ $ref: fan-common.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pwm_provider: fan-controller@20 {
+ compatible = "maxim,max31790";
+ reg = <0x20>;
+ clocks = <&sys_clk>;
+ resets = <&reset 0>;
+ #pwm-cells = <1>;
+
+ fan-0 {
+ pwms = <&pwm_provider 1>;
+ };
+
+ fan-1 {
+ pwms = <&pwm_provider 2>;
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/hwmon/sophgo,sg2042-hwmon-mcu.yaml b/Documentation/devicetree/bindings/hwmon/sophgo,sg2042-hwmon-mcu.yaml
new file mode 100644
index 000000000000..f0667ac41d75
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/sophgo,sg2042-hwmon-mcu.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/sophgo,sg2042-hwmon-mcu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo SG2042 onboard MCU support
+
+maintainers:
+ - Inochi Amaoto <inochiama@outlook.com>
+
+properties:
+ compatible:
+ const: sophgo,sg2042-hwmon-mcu
+
+ reg:
+ maxItems: 1
+
+ "#thermal-sensor-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - "#thermal-sensor-cells"
+
+allOf:
+ - $ref: /schemas/thermal/thermal-sensor.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hwmon@17 {
+ compatible = "sophgo,sg2042-hwmon-mcu";
+ reg = <0x17>;
+ #thermal-sensor-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml b/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
index 6df27b47b922..5b9bd2feda3b 100644
--- a/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
@@ -44,11 +44,6 @@ properties:
description: frequency of the bus clock in Hz defaults to 100 kHz when not
specified
- multi-master:
- type: boolean
- description:
- states that there is another master active on this bus
-
required:
- reg
- compatible
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
index 82b9d6682297..a9dae5b52f28 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
@@ -38,6 +38,7 @@ properties:
- rockchip,rk3308-i2c
- rockchip,rk3328-i2c
- rockchip,rk3568-i2c
+ - rockchip,rk3576-i2c
- rockchip,rk3588-i2c
- rockchip,rv1126-i2c
- const: rockchip,rk3399-i2c
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sprd.txt b/Documentation/devicetree/bindings/i2c/i2c-sprd.txt
deleted file mode 100644
index 7b6b3b8d0d11..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-sprd.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-I2C for Spreadtrum platforms
-
-Required properties:
-- compatible: Should be "sprd,sc9860-i2c".
-- reg: Specify the physical base address of the controller and length
- of memory mapped region.
-- interrupts: Should contain I2C interrupt.
-- clock-names: Should contain following entries:
- "i2c" for I2C clock,
- "source" for I2C source (parent) clock,
- "enable" for I2C module enable clock.
-- clocks: Should contain a clock specifier for each entry in clock-names.
-- clock-frequency: Contains desired I2C bus clock frequency in Hz.
-- #address-cells: Should be 1 to describe address cells for I2C device address.
-- #size-cells: Should be 0 means no size cell for I2C device address.
-
-Optional properties:
-- Child nodes conforming to I2C bus binding
-
-Examples:
-i2c0: i2c@70500000 {
- compatible = "sprd,sc9860-i2c";
- reg = <0 0x70500000 0 0x1000>;
- interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "i2c", "source", "enable";
- clocks = <&clk_i2c3>, <&ext_26m>, <&clk_ap_apb_gates 11>;
- clock-frequency = <400000>;
- #address-cells = <1>;
- #size-cells = <0>;
-};
-
diff --git a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml
index 92fbc1a2671a..b57ae6963e62 100644
--- a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml
@@ -103,6 +103,9 @@ properties:
items:
- const: i2c
+ power-domains:
+ maxItems: 1
+
dmas:
items:
- description: DMA channel for the reception FIFO
@@ -124,6 +127,8 @@ allOf:
- nvidia,tegra30-i2c
then:
properties:
+ clocks:
+ minItems: 2
clock-names:
items:
- const: div-clk
@@ -133,20 +138,13 @@ allOf:
properties:
compatible:
contains:
- const: nvidia,tegra114-i2c
- then:
- properties:
- clock-names:
- items:
- - const: div-clk
-
- - if:
- properties:
- compatible:
- contains:
- const: nvidia,tegra210-i2c
+ enum:
+ - nvidia,tegra114-i2c
+ - nvidia,tegra210-i2c
then:
properties:
+ clocks:
+ maxItems: 1
clock-names:
items:
- const: div-clk
@@ -158,6 +156,8 @@ allOf:
const: nvidia,tegra210-i2c-vi
then:
properties:
+ clocks:
+ minItems: 2
clock-names:
items:
- const: div-clk
@@ -165,6 +165,9 @@ allOf:
power-domains:
items:
- description: phandle to the VENC power domain
+ else:
+ properties:
+ power-domains: false
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
index c33ae7b63b84..7dab3852c7f8 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
@@ -130,6 +130,7 @@ allOf:
then:
properties:
clocks:
+ minItems: 4
maxItems: 4
clock-names:
items:
diff --git a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
index 7993fe463c4c..505a8ec92266 100644
--- a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
+++ b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
@@ -25,6 +25,10 @@ properties:
- renesas,riic-r9a07g054 # RZ/V2L
- const: renesas,riic-rz # RZ/A or RZ/G2L
+ - items:
+ - const: renesas,riic-r9a08g045 # RZ/G3S
+ - const: renesas,riic-r9a09g057 # RZ/V2H(P)
+
- const: renesas,riic-r9a09g057 # RZ/V2H(P)
reg:
diff --git a/Documentation/devicetree/bindings/i2c/sprd,sc9860-i2c.yaml b/Documentation/devicetree/bindings/i2c/sprd,sc9860-i2c.yaml
new file mode 100644
index 000000000000..ec0d39e73d26
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/sprd,sc9860-i2c.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/sprd,sc9860-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Spreadtrum SC9860 I2C controller
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ const: sprd,sc9860-i2c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: I2C clock
+ - description: I2C source (parent) clock
+ - description: I2C module enable clock
+
+ clock-names:
+ items:
+ - const: i2c
+ - const: source
+ - const: enable
+
+ clock-frequency: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - clock-frequency
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c@70500000 {
+ compatible = "sprd,sc9860-i2c";
+ reg = <0x70500000 0x1000>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_i2c3>, <&ext_26m>, <&clk_ap_apb_gates 11>;
+ clock-names = "i2c", "source", "enable";
+ clock-frequency = <400000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/tsd,mule-i2c-mux.yaml b/Documentation/devicetree/bindings/i2c/tsd,mule-i2c-mux.yaml
new file mode 100644
index 000000000000..28139b676661
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/tsd,mule-i2c-mux.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/tsd,mule-i2c-mux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Theobroma Systems Mule I2C multiplexer
+
+maintainers:
+ - Farouk Bouabid <farouk.bouabid@cherry.de>
+ - Quentin Schulz <quentin.schulz@cherry.de>
+
+description: |
+ Theobroma Systems Mule is an MCU that emulates a set of I2C devices, among
+ which devices that are reachable through an I2C-mux. The devices on the mux
+ can be selected by writing the appropriate device number to an I2C config
+ register.
+
+
+ +--------------------------------------------------+
+ | Mule |
+ 0x18| +---------------+ |
+ -------->|Config register|----+ |
+ | +---------------+ | |
+ | V_ |
+ | | \ +--------+ |
+ | | \-------->| dev #0 | |
+ | | | +--------+ |
+ 0x6f| | M |-------->| dev #1 | |
+ ---------------------------->| U | +--------+ |
+ | | X |-------->| dev #2 | |
+ | | | +--------+ |
+ | | /-------->| dev #3 | |
+ | |__/ +--------+ |
+ +--------------------------------------------------+
+
+
+allOf:
+ - $ref: /schemas/i2c/i2c-mux.yaml#
+
+properties:
+ compatible:
+ const: tsd,mule-i2c-mux
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c-mux {
+ compatible = "tsd,mule-i2c-mux";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@6f {
+ compatible = "isil,isl1208";
+ reg = <0x6f>;
+ };
+ };
+ };
+...
+
diff --git a/Documentation/devicetree/bindings/iio/accel/lis302.txt b/Documentation/devicetree/bindings/iio/accel/lis302.txt
index 764e28ec1a0a..457539647f36 100644
--- a/Documentation/devicetree/bindings/iio/accel/lis302.txt
+++ b/Documentation/devicetree/bindings/iio/accel/lis302.txt
@@ -36,7 +36,7 @@ Optional properties for all bus drivers:
- st,irq{1,2}-disable: disable IRQ 1/2
- st,irq{1,2}-ff-wu-1: raise IRQ 1/2 on FF_WU_1 condition
- st,irq{1,2}-ff-wu-2: raise IRQ 1/2 on FF_WU_2 condition
- - st,irq{1,2}-data-ready: raise IRQ 1/2 on data ready contition
+ - st,irq{1,2}-data-ready: raise IRQ 1/2 on data ready condition
- st,irq{1,2}-click: raise IRQ 1/2 on click condition
- st,irq-open-drain: consider IRQ lines open-drain
- st,irq-active-low: make IRQ lines active low
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml
new file mode 100644
index 000000000000..e413a9d8d2a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml
@@ -0,0 +1,197 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/adi,ad4000.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD4000 and similar Analog to Digital Converters
+
+maintainers:
+ - Marcelo Schmitt <marcelo.schmitt@analog.com>
+
+description: |
+ Analog Devices AD4000 family of Analog to Digital Converters with SPI support.
+ Specifications can be found at:
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad4000-4004-4008.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad4001-4005.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad4002-4006-4010.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad4003-4007-4011.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad4020-4021-4022.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4001.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4003.pdf
+
+$ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: adi,ad4000
+ - items:
+ - enum:
+ - adi,ad4004
+ - adi,ad4008
+ - const: adi,ad4000
+
+ - const: adi,ad4001
+ - items:
+ - enum:
+ - adi,ad4005
+ - const: adi,ad4001
+
+ - const: adi,ad4002
+ - items:
+ - enum:
+ - adi,ad4006
+ - adi,ad4010
+ - const: adi,ad4002
+
+ - const: adi,ad4003
+ - items:
+ - enum:
+ - adi,ad4007
+ - adi,ad4011
+ - const: adi,ad4003
+
+ - const: adi,ad4020
+ - items:
+ - enum:
+ - adi,ad4021
+ - adi,ad4022
+ - const: adi,ad4020
+
+ - const: adi,adaq4001
+
+ - const: adi,adaq4003
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 102040816 # for VIO > 2.7 V, 81300813 for VIO > 1.7 V
+
+ adi,sdi-pin:
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [ high, low, cs, sdi ]
+ default: sdi
+ description:
+ Describes how the ADC SDI pin is wired. A value of "sdi" indicates that
+ the ADC SDI is connected to host SDO. "high" indicates that the ADC SDI
+ pin is hard-wired to logic high (VIO). "low" indicates that it is
+ hard-wired low (GND). "cs" indicates that the ADC SDI pin is connected to
+ the host CS line.
+
+ '#daisy-chained-devices': true
+
+ vdd-supply:
+ description: A 1.8V supply that powers the chip (VDD).
+
+ vio-supply:
+ description:
+ A 1.8V to 5.5V supply for the digital inputs and outputs (VIO).
+
+ ref-supply:
+ description:
+ A 2.5 to 5V supply for the external reference voltage (REF).
+
+ cnv-gpios:
+ description:
+ When provided, this property indicates the GPIO that is connected to the
+ CNV pin.
+ maxItems: 1
+
+ adi,high-z-input:
+ type: boolean
+ description:
+ High-Z mode allows the amplifier and RC filter in front of the ADC to be
+ chosen based on the signal bandwidth of interest, rather than the settling
+ requirements of the switched capacitor SAR ADC inputs.
+
+ adi,gain-milli:
+ description: |
+ The hardware gain applied to the ADC input (in milli units).
+ The gain provided by the ADC input scaler is defined by the hardware
+ connections between chip pins OUT+, R1K-, R1K1-, R1K+, R1K1+, and OUT-.
+ If not present, default to 1000 (no actual gain applied).
+ $ref: /schemas/types.yaml#/definitions/uint16
+ enum: [454, 909, 1000, 1900]
+ default: 1000
+
+ interrupts:
+ description:
+ The SDO pin can also function as a busy indicator. This node should be
+ connected to an interrupt that is triggered when the SDO line goes low
+ while the SDI line is high and the CNV line is low ("3-wire" mode) or the
+ SDI line is low and the CNV line is high ("4-wire" mode); or when the SDO
+ line goes high while the SDI and CNV lines are high (chain mode),
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+ - vio-supply
+ - ref-supply
+
+allOf:
+ # The configuration register can only be accessed if SDI is connected to MOSI
+ - if:
+ required:
+ - adi,sdi-pin
+ then:
+ properties:
+ adi,high-z-input: false
+ # chain mode has lower SCLK max rate
+ - if:
+ required:
+ - '#daisy-chained-devices'
+ then:
+ properties:
+ spi-max-frequency:
+ maximum: 50000000 # for VIO > 2.7 V, 40000000 for VIO > 1.7 V
+ # Gain property only applies to ADAQ devices
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ enum:
+ - adi,adaq4001
+ - adi,adaq4003
+ then:
+ properties:
+ adi,gain-milli: false
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ adc@0 {
+ compatible = "adi,ad4020";
+ reg = <0>;
+ spi-max-frequency = <71000000>;
+ vdd-supply = <&supply_1_8V>;
+ vio-supply = <&supply_1_8V>;
+ ref-supply = <&supply_5V>;
+ adi,sdi-pin = "cs";
+ cnv-gpios = <&gpio0 88 GPIO_ACTIVE_HIGH>;
+ };
+ };
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ adc@0 {
+ compatible = "adi,adaq4003";
+ reg = <0>;
+ spi-max-frequency = <80000000>;
+ vdd-supply = <&supply_1_8V>;
+ vio-supply = <&supply_1_8V>;
+ ref-supply = <&supply_5V>;
+ adi,high-z-input;
+ adi,gain-milli = /bits/ 16 <454>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/incomplete-devices.yaml b/Documentation/devicetree/bindings/incomplete-devices.yaml
index cfc1d39441b1..4bb6c0141e9f 100644
--- a/Documentation/devicetree/bindings/incomplete-devices.yaml
+++ b/Documentation/devicetree/bindings/incomplete-devices.yaml
@@ -35,40 +35,184 @@ properties:
- description: Legacy compatibles used on Macintosh devices
enum:
+ - AAPL,3500
+ - AAPL,7500
+ - AAPL,8500
+ - AAPL,9500
+ - AAPL,accelerometer_1
+ - AAPL,e411
+ - AAPL,Gossamer
+ - AAPL,PowerBook1998
+ - AAPL,ShinerESB
- adm1030
+ - amd-0137
+ - B5221
- bmac+
+ - burgundy
+ - cobalt
+ - cy28508
+ - daca
+ - fcu
+ - gatwick
+ - gmac
+ - heathrow
+ - heathrow-ata
- heathrow-media-bay
+ - i2sbus
+ - i2s-modem
+ - iMac
+ - K2-GMAC
+ - k2-i2c
+ - K2-Keylargo
+ - K2-UATA
+ - kauai-ata
+ - Keylargo
+ - keylargo-ata
- keylargo-media-bay
- lm87cimt
- MAC,adm1030
- MAC,ds1775
+ - MacRISC
+ - MacRISC2
+ - MacRISC3
+ - MacRISC4
- max6690
+ - ohare
- ohare-media-bay
- ohare-swim3
+ - PowerBook1,1
+ - PowerBook2,1
+ - PowerBook2,2
+ - PowerBook3,1
+ - PowerBook3,2
+ - PowerBook3,3
+ - PowerBook3,4
+ - PowerBook3,5
+ - PowerBook4,1
+ - PowerBook4,2
+ - PowerBook4,3
+ - PowerBook5,1
+ - PowerBook5,2
+ - PowerBook5,3
+ - PowerBook5,4
+ - PowerBook5,5
+ - PowerBook5,6
+ - PowerBook5,7
+ - PowerBook5,8
+ - PowerBook5,9
+ - PowerBook6,3
+ - PowerBook6,5
+ - PowerBook6,7
+ - PowerMac10,1
+ - PowerMac10,2
+ - PowerMac1,1
+ - PowerMac11,2
+ - PowerMac12,1
+ - PowerMac2,1
+ - PowerMac2,2
+ - PowerMac3,1
+ - PowerMac3,4
+ - PowerMac3,5
+ - PowerMac3,6
+ - PowerMac4,1
+ - PowerMac4,2
+ - PowerMac4,4
+ - PowerMac4,5
+ - PowerMac7,2
+ - PowerMac7,3
+ - PowerMac8,1
+ - PowerMac8,2
+ - PowerMac9,1
+ - paddington
+ - RackMac1,1
+ - RackMac1,2
+ - RackMac3,1
+ - screamer
+ - shasta-ata
+ - sms
+ - smu-rpm-fans
- smu-sat
+ - smu-sensors
+ - snapper
- swim3
+ - tumbler
+ - u3-agp
+ - u3-dart
+ - u3-ht
+ - u4-dart
+ - u4-pcie
+ - U4-pcie
+ - uni-n-i2c
+ - uni-north
- description: Legacy compatibles used on other PowerPC devices
enum:
+ - 1682m-gizmo
+ - 1682m-gpio
- 1682m-rng
+ - 1682m-sdc
+ - amcc,ppc440epx-rng
+ - amcc,ppc460ex-bcsr
+ - amcc,ppc460ex-crypto
+ - amcc,ppc460ex-rng
+ - amcc,ppc460sx-crypto
+ - amcc,ppc4xx-crypto
+ - amcc,sata-460ex
+ - CBEA,platform-open-pic
+ - CBEA,platform-spider-pic
+ - direct-mapped
+ - display
+ - gpio-mdio
+ - hawk-bridge
+ - hawk-pci
+ - IBM,CBEA
- IBM,lhca
- IBM,lhea
- IBM,lhea-ethernet
+ - ibm,axon-msic
+ - Momentum,Apache
+ - Momentum,Maple
+ - mai-logic,articia-s
+ - mpc10x-pci
- mpc5200b-fec-phy
- mpc5200-serial
- mpc5200-sram
+ - nintendo,flipper
+ - nintendo,flipper-exi
+ - nintendo,flipper-pi
+ - nintendo,flipper-pic
+ - nintendo,hollywood
+ - nintendo,hollywood-pic
+ - nintendo,latte-exi
+ - nintendo,latte-srnprot
- ohci-be
- ohci-bigendian
- ohci-le
+ - PA6T-1682M
+ - pasemi,1682m-iob
+ - pasemi,localbus
+ - pasemi,localbus-nand
+ - pasemi,nemo
+ - pasemi,pwrficient
+ - pasemi,pwrficient-rng
+ - pasemi,rootbus
+ - pasemi,sdc
+ - soc
+ - sony,ps3
+ - sti,platform-spider-pic
- description: Legacy compatibles used on SPARC devices
enum:
- bq4802
- ds1287
+ - i2cpcf,8584
- isa-m5819p
- isa-m5823p
- m5819
+ - qcn
- sab82532
+ - su
+ - sun4v
- SUNW,bbc-beep
- SUNW,bbc-i2c
- SUNW,CS4231
@@ -96,9 +240,13 @@ properties:
- compat1
- compat2
- compat3
+ - gpio-mockup
+ - gpio-simulator
+ - gpio-virtuser
- linux,spi-loopback-test
- mailbox-test
- regulator-virtual-consumer
+ - test-device
- description:
Devices on MIPS platform, without any DTS users. These are
diff --git a/Documentation/devicetree/bindings/input/adi,adp5588.yaml b/Documentation/devicetree/bindings/input/adi,adp5588.yaml
index 26ea66834ae2..336bc352579a 100644
--- a/Documentation/devicetree/bindings/input/adi,adp5588.yaml
+++ b/Documentation/devicetree/bindings/input/adi,adp5588.yaml
@@ -49,7 +49,10 @@ properties:
interrupt-controller:
description:
This property applies if either keypad,num-rows lower than 8 or
- keypad,num-columns lower than 10.
+ keypad,num-columns lower than 10. This property is optional if
+ keypad,num-rows or keypad,num-columns are not specified as the
+ device is then configured to be used purely for gpio during which
+ interrupts may or may not be utilized.
'#interrupt-cells':
const: 2
@@ -65,13 +68,23 @@ properties:
minItems: 1
maxItems: 2
+dependencies:
+ keypad,num-rows:
+ - linux,keymap
+ - keypad,num-columns
+ keypad,num-columns:
+ - linux,keymap
+ - keypad,num-rows
+ linux,keymap:
+ - keypad,num-rows
+ - keypad,num-columns
+ - interrupts
+ interrupt-controller:
+ - interrupts
+
required:
- compatible
- reg
- - interrupts
- - keypad,num-rows
- - keypad,num-columns
- - linux,keymap
unevaluatedProperties: false
@@ -108,4 +121,19 @@ examples:
>;
};
};
+
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@34 {
+ compatible = "adi,adp5588";
+ reg = <0x34>;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+ };
+
...
diff --git a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml
index a62916d07a08..cb3e1801b0d3 100644
--- a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml
+++ b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml
@@ -23,7 +23,9 @@ properties:
- enum:
- elan,ekth5015m
- const: elan,ekth6915
- - const: elan,ekth6915
+ - enum:
+ - elan,ekth6915
+ - elan,ekth6a12nay
reg:
const: 0x10
diff --git a/Documentation/devicetree/bindings/input/qcom,pm8xxx-vib.yaml b/Documentation/devicetree/bindings/input/qcom,pm8xxx-vib.yaml
index 2025d6a5423e..76a286ec5959 100644
--- a/Documentation/devicetree/bindings/input/qcom,pm8xxx-vib.yaml
+++ b/Documentation/devicetree/bindings/input/qcom,pm8xxx-vib.yaml
@@ -19,6 +19,7 @@ properties:
- qcom,pmi632-vib
- items:
- enum:
+ - qcom,pm6150-vib
- qcom,pm7250b-vib
- qcom,pm7325b-vib
- qcom,pm7550ba-vib
diff --git a/Documentation/devicetree/bindings/input/rotary-encoder.txt b/Documentation/devicetree/bindings/input/rotary-encoder.txt
deleted file mode 100644
index a644408b33b8..000000000000
--- a/Documentation/devicetree/bindings/input/rotary-encoder.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-Rotary encoder DT bindings
-
-Required properties:
-- gpios: a spec for at least two GPIOs to be used, most significant first
-
-Optional properties:
-- linux,axis: the input subsystem axis to map to this rotary encoder.
- Defaults to 0 (ABS_X / REL_X)
-- rotary-encoder,steps: Number of steps in a full turnaround of the
- encoder. Only relevant for absolute axis. Defaults to 24 which is a
- typical value for such devices.
-- rotary-encoder,relative-axis: register a relative axis rather than an
- absolute one. Relative axis will only generate +1/-1 events on the input
- device, hence no steps need to be passed.
-- rotary-encoder,rollover: Automatic rollover when the rotary value becomes
- greater than the specified steps or smaller than 0. For absolute axis only.
-- rotary-encoder,steps-per-period: Number of steps (stable states) per period.
- The values have the following meaning:
- 1: Full-period mode (default)
- 2: Half-period mode
- 4: Quarter-period mode
-- wakeup-source: Boolean, rotary encoder can wake up the system.
-- rotary-encoder,encoding: String, the method used to encode steps.
- Supported are "gray" (the default and more common) and "binary".
-
-Deprecated properties:
-- rotary-encoder,half-period: Makes the driver work on half-period mode.
- This property is deprecated. Instead, a 'steps-per-period ' value should
- be used, such as "rotary-encoder,steps-per-period = <2>".
-
-See Documentation/input/devices/rotary-encoder.rst for more information.
-
-Example:
-
- rotary@0 {
- compatible = "rotary-encoder";
- gpios = <&gpio 19 1>, <&gpio 20 0>; /* GPIO19 is inverted */
- linux,axis = <0>; /* REL_X */
- rotary-encoder,encoding = "gray";
- rotary-encoder,relative-axis;
- };
-
- rotary@1 {
- compatible = "rotary-encoder";
- gpios = <&gpio 21 0>, <&gpio 22 0>;
- linux,axis = <1>; /* ABS_Y */
- rotary-encoder,steps = <24>;
- rotary-encoder,encoding = "binary";
- rotary-encoder,rollover;
- };
diff --git a/Documentation/devicetree/bindings/input/rotary-encoder.yaml b/Documentation/devicetree/bindings/input/rotary-encoder.yaml
new file mode 100644
index 000000000000..e315aab7f584
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/rotary-encoder.yaml
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/rotary-encoder.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rotary encoder
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description:
+ See Documentation/input/devices/rotary-encoder.rst for more information.
+
+properties:
+ compatible:
+ const: rotary-encoder
+
+ gpios:
+ minItems: 2
+
+ linux,axis:
+ default: 0
+ description:
+ the input subsystem axis to map to this rotary encoder.
+ Defaults to 0 (ABS_X / REL_X)
+
+ rotary-encoder,steps:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 24
+ description:
+ Number of steps in a full turnaround of the
+ encoder. Only relevant for absolute axis. Defaults to 24 which is a
+ typical value for such devices.
+
+ rotary-encoder,relative-axis:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ register a relative axis rather than an
+ absolute one. Relative axis will only generate +1/-1 events on the input
+ device, hence no steps need to be passed.
+
+ rotary-encoder,rollover:
+ $ref: /schemas/types.yaml#/definitions/int32
+ description:
+ Automatic rollover when the rotary value becomes
+ greater than the specified steps or smaller than 0. For absolute axis only.
+
+ rotary-encoder,steps-per-period:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 1
+ enum: [1, 2, 4]
+ description: |
+ Number of steps (stable states) per period.
+ The values have the following meaning:
+ 1: Full-period mode (default)
+ 2: Half-period mode
+ 4: Quarter-period mode
+
+ wakeup-source: true
+
+ rotary-encoder,encoding:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: the method used to encode steps.
+ enum: [gray, binary]
+
+ rotary-encoder,half-period:
+ $ref: /schemas/types.yaml#/definitions/flag
+ deprecated: true
+ description:
+ Makes the driver work on half-period mode.
+ This property is deprecated. Instead, a 'steps-per-period ' value should
+ be used, such as "rotary-encoder,steps-per-period = <2>".
+
+required:
+ - compatible
+ - gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ rotary {
+ compatible = "rotary-encoder";
+ gpios = <&gpio 19 1>, <&gpio 20 0>; /* GPIO19 is inverted */
+ linux,axis = <0>; /* REL_X */
+ rotary-encoder,encoding = "gray";
+ rotary-encoder,relative-axis;
+ };
+
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt b/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt
deleted file mode 100644
index afa38dc069f0..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-* Analog Devices AD7879(-1)/AD7889(-1) touchscreen interface (SPI/I2C)
-
-Required properties:
-- compatible : for SPI slave, use "adi,ad7879"
- for I2C slave, use "adi,ad7879-1"
-- reg : SPI chipselect/I2C slave address
- See spi-bus.txt for more SPI slave properties
-- interrupts : touch controller interrupt
-- touchscreen-max-pressure : maximum reported pressure
-- adi,resistance-plate-x : total resistance of X-plate (for pressure
- calculation)
-Optional properties:
-- touchscreen-swapped-x-y : X and Y axis are swapped (boolean)
-- adi,first-conversion-delay : 0-12: In 128us steps (starting with 128us)
- 13 : 2.560ms
- 14 : 3.584ms
- 15 : 4.096ms
- This property has to be a '/bits/ 8' value
-- adi,acquisition-time : 0: 2us
- 1: 4us
- 2: 8us
- 3: 16us
- This property has to be a '/bits/ 8' value
-- adi,median-filter-size : 0: disabled
- 1: 4 measurements
- 2: 8 measurements
- 3: 16 measurements
- This property has to be a '/bits/ 8' value
-- adi,averaging : 0: 2 middle values (1 if median disabled)
- 1: 4 middle values
- 2: 8 middle values
- 3: 16 values
- This property has to be a '/bits/ 8' value
-- adi,conversion-interval: : 0 : convert one time only
- 1-255: 515us + val * 35us (up to 9.440ms)
- This property has to be a '/bits/ 8' value
-- gpio-controller : Switch AUX/VBAT/GPIO pin to GPIO mode
-
-Example:
-
- touchscreen0@2c {
- compatible = "adi,ad7879-1";
- reg = <0x2c>;
- interrupt-parent = <&gpio1>;
- interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
- touchscreen-max-pressure = <4096>;
- adi,resistance-plate-x = <120>;
- adi,first-conversion-delay = /bits/ 8 <3>;
- adi,acquisition-time = /bits/ 8 <1>;
- adi,median-filter-size = /bits/ 8 <2>;
- adi,averaging = /bits/ 8 <1>;
- adi,conversion-interval = /bits/ 8 <255>;
- };
-
- touchscreen1@1 {
- compatible = "adi,ad7879";
- spi-max-frequency = <5000000>;
- reg = <1>;
- spi-cpol;
- spi-cpha;
- gpio-controller;
- interrupt-parent = <&gpio1>;
- interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
- touchscreen-max-pressure = <4096>;
- adi,resistance-plate-x = <120>;
- adi,first-conversion-delay = /bits/ 8 <3>;
- adi,acquisition-time = /bits/ 8 <1>;
- adi,median-filter-size = /bits/ 8 <2>;
- adi,averaging = /bits/ 8 <1>;
- adi,conversion-interval = /bits/ 8 <255>;
- };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/adi,ad7879.yaml b/Documentation/devicetree/bindings/input/touchscreen/adi,ad7879.yaml
new file mode 100644
index 000000000000..caa5fa3cc3f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/adi,ad7879.yaml
@@ -0,0 +1,150 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/adi,ad7879.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD7879(-1)/AD7889(-1) touchscreen interface (SPI/I2C)
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ description: |
+ for SPI slave, use "adi,ad7879"
+ for I2C slave, use "adi,ad7879-1"
+ enum:
+ - adi,ad7879
+ - adi,ad7879-1
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ touchscreen-max-pressure:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: maximum reported pressure
+
+ adi,resistance-plate-x:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: total resistance of X-plate (for pressure calculation)
+
+ touchscreen-swapped-x-y:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: X and Y axis are swapped (boolean)
+
+ adi,first-conversion-delay:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ default: 0
+ minimum: 0
+ maximum: 15
+ description: |
+ 0-12: In 128us steps (starting with 128us)
+ 13 : 2.560ms
+ 14 : 3.584ms
+ 15 : 4.096ms
+ This property has to be a '/bits/ 8' value
+
+ adi,acquisition-time:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ default: 0
+ enum: [0, 1, 2, 3]
+ description: |
+ 0: 2us
+ 1: 4us
+ 2: 8us
+ 3: 16us
+ This property has to be a '/bits/ 8' value
+
+ adi,median-filter-size:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ default: 0
+ enum: [0, 1, 2, 3]
+ description: |
+ 0: disabled
+ 1: 4 measurements
+ 2: 8 measurements
+ 3: 16 measurements
+ This property has to be a '/bits/ 8' value
+
+ adi,averaging:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ default: 0
+ enum: [0, 1, 2, 3]
+ description: |
+ 0: 2 middle values (1 if median disabled)
+ 1: 4 middle values
+ 2: 8 middle values
+ 3: 16 values
+ This property has to be a '/bits/ 8' value
+
+ adi,conversion-interval:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ default: 0
+ description: |
+ 0 : convert one time only
+ 1-255: 515us + val * 35us (up to 9.440ms)
+ This property has to be a '/bits/ 8' value
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen0@2c {
+ compatible = "adi,ad7879-1";
+ reg = <0x2c>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+ touchscreen-max-pressure = <4096>;
+ adi,resistance-plate-x = <120>;
+ adi,first-conversion-delay = /bits/ 8 <3>;
+ adi,acquisition-time = /bits/ 8 <1>;
+ adi,median-filter-size = /bits/ 8 <2>;
+ adi,averaging = /bits/ 8 <1>;
+ adi,conversion-interval = /bits/ 8 <255>;
+ };
+ };
+
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen1@1 {
+ compatible = "adi,ad7879";
+ reg = <1>;
+ spi-max-frequency = <5000000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+ touchscreen-max-pressure = <4096>;
+ adi,resistance-plate-x = <120>;
+ adi,first-conversion-delay = /bits/ 8 <3>;
+ adi,acquisition-time = /bits/ 8 <1>;
+ adi,median-filter-size = /bits/ 8 <2>;
+ adi,averaging = /bits/ 8 <1>;
+ adi,conversion-interval = /bits/ 8 <255>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt b/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt
deleted file mode 100644
index 399c87782935..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt
+++ /dev/null
@@ -1,107 +0,0 @@
-Device tree bindings for TI's ADS7843, ADS7845, ADS7846, ADS7873, TSC2046
-SPI driven touch screen controllers.
-
-The node for this driver must be a child node of a SPI controller, hence
-all mandatory properties described in
-
- Documentation/devicetree/bindings/spi/spi-bus.txt
-
-must be specified.
-
-Additional required properties:
-
- compatible Must be one of the following, depending on the
- model:
- "ti,tsc2046"
- "ti,ads7843"
- "ti,ads7845"
- "ti,ads7846"
- "ti,ads7873"
-
- interrupts An interrupt node describing the IRQ line the chip's
- !PENIRQ pin is connected to.
- vcc-supply A regulator node for the supply voltage.
-
-
-Optional properties:
-
- ti,vref-delay-usecs vref supply delay in usecs, 0 for
- external vref (u16).
- ti,vref-mv The VREF voltage, in millivolts (u16).
- Set to 0 to use internal references
- (ADS7846).
- ti,keep-vref-on set to keep vref on for differential
- measurements as well
- ti,settle-delay-usec Settling time of the analog signals;
- a function of Vcc and the capacitance
- on the X/Y drivers. If set to non-zero,
- two samples are taken with settle_delay
- us apart, and the second one is used.
- ~150 uSec with 0.01uF caps (u16).
- ti,penirq-recheck-delay-usecs If set to non-zero, after samples are
- taken this delay is applied and penirq
- is rechecked, to help avoid false
- events. This value is affected by the
- material used to build the touch layer
- (u16).
- ti,x-plate-ohms Resistance of the X-plate,
- in Ohms (u16).
- ti,y-plate-ohms Resistance of the Y-plate,
- in Ohms (u16).
- ti,x-min Minimum value on the X axis (u16).
- ti,y-min Minimum value on the Y axis (u16).
- ti,debounce-tol Tolerance used for filtering (u16).
- ti,debounce-rep Additional consecutive good readings
- required after the first two (u16).
- ti,pendown-gpio-debounce Platform specific debounce time for the
- pendown-gpio (u32).
- pendown-gpio GPIO handle describing the pin the !PENIRQ
- line is connected to.
- ti,hsync-gpios GPIO line to poll for hsync
- wakeup-source use any event on touchscreen as wakeup event.
- (Legacy property support: "linux,wakeup")
- touchscreen-size-x General touchscreen binding, see [1].
- touchscreen-size-y General touchscreen binding, see [1].
- touchscreen-max-pressure General touchscreen binding, see [1].
- touchscreen-min-pressure General touchscreen binding, see [1].
- touchscreen-average-samples General touchscreen binding, see [1].
- touchscreen-inverted-x General touchscreen binding, see [1].
- touchscreen-inverted-y General touchscreen binding, see [1].
- touchscreen-swapped-x-y General touchscreen binding, see [1].
-
-[1] All general touchscreen properties are described in
- Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt.
-
-Deprecated properties:
-
- ti,swap-xy swap x and y axis
- ti,x-max Maximum value on the X axis (u16).
- ti,y-max Maximum value on the Y axis (u16).
- ti,pressure-min Minimum reported pressure value
- (threshold) - u16.
- ti,pressure-max Maximum reported pressure value (u16).
- ti,debounce-max Max number of additional readings per
- sample (u16).
-
-Example for a TSC2046 chip connected to an McSPI controller of an OMAP SoC::
-
- spi_controller {
- tsc2046@0 {
- reg = <0>; /* CS0 */
- compatible = "ti,tsc2046";
- interrupt-parent = <&gpio1>;
- interrupts = <8 0>; /* BOOT6 / GPIO 8 */
- spi-max-frequency = <1000000>;
- pendown-gpio = <&gpio1 8 0>;
- vcc-supply = <&reg_vcc3>;
-
- ti,x-min = /bits/ 16 <0>;
- ti,x-max = /bits/ 16 <8000>;
- ti,y-min = /bits/ 16 <0>;
- ti,y-max = /bits/ 16 <4800>;
- ti,x-plate-ohms = /bits/ 16 <40>;
- ti,pressure-max = /bits/ 16 <255>;
-
- wakeup-source;
- };
- };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml b/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml
index 8cf371b99f19..e4dbbafb3779 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml
@@ -666,7 +666,7 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- touch@56 {
+ touchscreen@56 {
compatible = "azoteq,iqs7210a";
reg = <0x56>;
irq-gpios = <&gpio 4 GPIO_ACTIVE_LOW>;
@@ -704,7 +704,7 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- touch@56 {
+ touchscreen@56 {
compatible = "azoteq,iqs7211e";
reg = <0x56>;
irq-gpios = <&gpio 4 (GPIO_ACTIVE_LOW |
diff --git a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
deleted file mode 100644
index ca304357c374..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-* Toradex Colibri VF50 Touchscreen driver
-
-Required Properties:
-- compatible must be toradex,vf50-touchscreen
-- io-channels: adc channels being used by the Colibri VF50 module
- IIO ADC for Y-, X-, Y+, X+ connections
-- xp-gpios: FET gate driver for input of X+
-- xm-gpios: FET gate driver for input of X-
-- yp-gpios: FET gate driver for input of Y+
-- ym-gpios: FET gate driver for input of Y-
-- interrupts: pen irq interrupt for touch detection, signal from X plate
-- pinctrl-names: "idle", "default"
-- pinctrl-0: pinctrl node for pen/touch detection, pinctrl must provide
- pull-up resistor on X+, X-.
-- pinctrl-1: pinctrl node for X/Y and pressure measurement (ADC) state pinmux
-- vf50-ts-min-pressure: pressure level at which to stop measuring X/Y values
-
-Example:
-
- touchctrl: vf50_touchctrl {
- compatible = "toradex,vf50-touchscreen";
- io-channels = <&adc1 0>,<&adc0 0>,
- <&adc0 1>,<&adc1 2>;
- xp-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
- xm-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
- yp-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
- ym-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
- interrupt-parent = <&gpio0>;
- interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
- pinctrl-names = "idle","default";
- pinctrl-0 = <&pinctrl_touchctrl_idle>, <&pinctrl_touchctrl_gpios>;
- pinctrl-1 = <&pinctrl_touchctrl_default>, <&pinctrl_touchctrl_gpios>;
- vf50-ts-min-pressure = <200>;
- };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
index 51d48d4130d3..70a922e213f2 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
@@ -126,7 +126,7 @@ examples:
i2c {
#address-cells = <1>;
#size-cells = <0>;
- edt-ft5x06@38 {
+ touchscreen@38 {
compatible = "edt,edt-ft5406";
reg = <0x38>;
interrupt-parent = <&gpio2>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
index 2a2d86cfd104..eb4992f708b7 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
@@ -69,7 +69,7 @@ examples:
i2c {
#address-cells = <1>;
#size-cells = <0>;
- gt928@5d {
+ touchscreen@5d {
compatible = "goodix,gt928";
reg = <0x5d>;
interrupt-parent = <&gpio>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml b/Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml
new file mode 100644
index 000000000000..604921733d2c
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml
@@ -0,0 +1,183 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/ti,ads7843.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI's SPI driven touch screen controllers
+
+maintainers:
+ - Alexander Stein <alexander.stein@ew.tq-group.com>
+ - Dmitry Torokhov <dmitry.torokhov@gmail.com>
+ - Marek Vasut <marex@denx.de>
+
+description:
+ TI's ADS7843, ADS7845, ADS7846, ADS7873, TSC2046 SPI driven touch screen
+ controllers.
+
+properties:
+ compatible:
+ enum:
+ - ti,ads7843
+ - ti,ads7845
+ - ti,ads7846
+ - ti,ads7873
+ - ti,tsc2046
+
+ interrupts:
+ maxItems: 1
+
+ pendown-gpio:
+ maxItems: 1
+ description:
+ GPIO handle describing the pin the !PENIRQ line is connected to.
+
+ vcc-supply:
+ description:
+ A regulator node for the supply voltage.
+
+ wakeup-source: true
+
+ ti,debounce-max:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Max number of additional readings per sample.
+
+ ti,debounce-rep:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Additional consecutive good readings required after the first two.
+
+ ti,debounce-tol:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Tolerance used for filtering.
+
+ ti,hsync-gpios:
+ maxItems: 1
+ description:
+ GPIO line to poll for hsync.
+
+ ti,keep-vref-on:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set to keep Vref on for differential measurements as well.
+
+ ti,pendown-gpio-debounce:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Platform specific debounce time for the pendown-gpio.
+
+ ti,penirq-recheck-delay-usecs:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ If set to non-zero, after samples are taken this delay is applied and
+ penirq is rechecked, to help avoid false events. This value is
+ affected by the material used to build the touch layer.
+
+ ti,pressure-max:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Maximum reported pressure value.
+
+ ti,pressure-min:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Minimum reported pressure value (threshold).
+
+ ti,settle-delay-usec:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Settling time of the analog signals; a function of Vcc and the
+ capacitance on the X/Y drivers. If set to non-zero, two samples are
+ taken with settle_delay us apart, and the second one is used. ~150
+ uSec with 0.01uF caps.
+
+ ti,swap-xy:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Swap x and y axis.
+
+ ti,vref-delay-usecs:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Vref supply delay in usecs, 0 for external Vref.
+
+ ti,vref-mv:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ The VREF voltage, in millivolts.
+ Set to 0 to use internal references (ADS7846).
+
+ ti,x-plate-ohms:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Resistance of the X-plate, in Ohms.
+
+ ti,x-max:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Maximum value on the X axis.
+
+ ti,x-min:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Minimum value on the X axis.
+
+ ti,y-plate-ohms:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Resistance of the Y-plate, in Ohms.
+
+ ti,y-max:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Maximum value on the Y axis.
+
+ ti,y-min:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Minimum value on the Y axis.
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: touchscreen.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi{
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen@0 {
+ compatible = "ti,tsc2046";
+ reg = <0>; /* CS0 */
+ interrupt-parent = <&gpio1>;
+ interrupts = <8 0>; /* BOOT6 / GPIO 8 */
+ pendown-gpio = <&gpio1 8 0>;
+ spi-max-frequency = <1000000>;
+ vcc-supply = <&reg_vcc3>;
+ wakeup-source;
+
+ ti,pressure-max = /bits/ 16 <255>;
+ ti,x-max = /bits/ 16 <8000>;
+ ti,x-min = /bits/ 16 <0>;
+ ti,x-plate-ohms = /bits/ 16 <40>;
+ ti,y-max = /bits/ 16 <4800>;
+ ti,y-min = /bits/ 16 <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/toradex,vf50-touchscreen.yaml b/Documentation/devicetree/bindings/input/touchscreen/toradex,vf50-touchscreen.yaml
new file mode 100644
index 000000000000..5094c5183c74
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/toradex,vf50-touchscreen.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/toradex,vf50-touchscreen.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Toradex Colibri VF50 Touchscreen
+
+maintainers:
+ - Dmitry Torokhov <dmitry.torokhov@gmail.com>
+ - Sanchayan Maity <maitysanchayan@gmail.com>
+
+properties:
+ compatible:
+ const: toradex,vf50-touchscreen
+
+ interrupts:
+ maxItems: 1
+
+ io-channels:
+ maxItems: 4
+ description:
+ adc channels being used by the Colibri VF50 module
+ IIO ADC for Y-, X-, Y+, X+ connections
+
+ xp-gpios:
+ description: FET gate driver for input of X+
+
+ xm-gpios:
+ description: FET gate driver for input of X-
+
+ yp-gpios:
+ description: FET gate driver for input of Y+
+
+ ym-gpios:
+ description: FET gate driver for input of Y-
+
+ vf50-ts-min-pressure:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 50
+ maximum: 2000
+ description: pressure level at which to stop measuring X/Y values
+
+required:
+ - compatible
+ - io-channels
+ - xp-gpios
+ - xm-gpios
+ - yp-gpios
+ - ym-gpios
+ - interrupts
+ - vf50-ts-min-pressure
+
+allOf:
+ - $ref: touchscreen.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ touchscreen {
+ compatible = "toradex,vf50-touchscreen";
+ interrupt-parent = <&gpio0>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+ io-channels = <&adc1 0>, <&adc0 0>, <&adc0 1>, <&adc1 2>;
+ xp-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
+ xm-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
+ yp-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
+ ym-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "idle", "default";
+ pinctrl-0 = <&pinctrl_touchctrl_idle>, <&pinctrl_touchctrl_gpios>;
+ pinctrl-1 = <&pinctrl_touchctrl_default>, <&pinctrl_touchctrl_gpios>;
+ vf50-ts-min-pressure = <200>;
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml b/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml
index b1507463a03e..3f663ce3e44e 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml
@@ -16,6 +16,7 @@ maintainers:
allOf:
- $ref: touchscreen.yaml#
+ - $ref: ../input.yaml#
properties:
$nodename:
@@ -79,6 +80,15 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [1, 2]
+ linux,keycodes:
+ description:
+ This property specifies an array of keycodes assigned to the
+ touch-keys that can be present in some touchscreen configurations.
+ If the touch-keys are enabled, controller firmware will assign some
+ touch sense lines to those keys.
+ minItems: 1
+ maxItems: 8
+
touchscreen-size-x: true
touchscreen-size-y: true
touchscreen-fuzz-x: true
diff --git a/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml b/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
index 698588e9aa86..4be9b596a790 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
@@ -31,13 +31,25 @@ description: |
This device also represents the FIQ interrupt sources on platforms using AIC,
which do not go through a discrete interrupt controller.
+ IPIs may be performed via MMIO registers on all variants of AIC. Starting
+ from A11, system registers may also be used for "fast" IPIs. Starting from
+ M1, even faster IPIs within the same cluster may be achieved by writing to
+ a "local" fast IPI register as opposed to using the "global" fast IPI
+ register.
+
allOf:
- $ref: /schemas/interrupt-controller.yaml#
properties:
compatible:
items:
- - const: apple,t8103-aic
+ - enum:
+ - apple,s5l8960x-aic
+ - apple,t7000-aic
+ - apple,s8000-aic
+ - apple,t8010-aic
+ - apple,t8015-aic
+ - apple,t8103-aic
- const: apple,aic
interrupt-controller: true
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
index 0f4a062c9d6f..5f051c666cbe 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
@@ -60,7 +60,7 @@ properties:
The 4th cell is a phandle to a node describing a set of CPUs this
interrupt is affine to. The interrupt must be a PPI, and the node
pointed must be a subnode of the "ppi-partitions" subnode. For
- interrupt types other than PPI or PPIs that are not partitionned,
+ interrupt types other than PPI or PPIs that are not partitioned,
this cell must be zero. See the "ppi-partitions" node description
below.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.txt b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.txt
deleted file mode 100644
index e3fea0758d25..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Aspeed Vectored Interrupt Controller
-
-These bindings are for the Aspeed interrupt controller. The AST2400 and
-AST2500 SoC families include a legacy register layout before a re-designed
-layout, but the bindings do not prescribe the use of one or the other.
-
-Required properties:
-
-- compatible : "aspeed,ast2400-vic"
- "aspeed,ast2500-vic"
-
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. The value shall be 1.
-
-Example:
-
- vic: interrupt-controller@1e6c0080 {
- compatible = "aspeed,ast2400-vic";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0x1e6c0080 0x80>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.yaml b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.yaml
new file mode 100644
index 000000000000..73e8b9a39bd7
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/aspeed,ast2400-vic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Aspeed Vectored Interrupt Controller
+
+maintainers:
+ - Andrew Jeffery <andrew@codeconstruct.com.au>
+
+description:
+ The AST2400 and AST2500 SoC families include a legacy register layout before
+ a redesigned layout, but the bindings do not prescribe the use of one or the
+ other.
+
+properties:
+ compatible:
+ enum:
+ - aspeed,ast2400-vic
+ - aspeed,ast2500-vic
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 1
+ description:
+ Specifies the number of cells needed to encode an interrupt source. It
+ must be 1 as the VIC has no configuration options for interrupt sources.
+ The single cell defines the interrupt number.
+
+ valid-sources:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ maxItems: 2
+ description:
+ A bitmap of supported sources for the implementation.
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - "#interrupt-cells"
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@1e6c0080 {
+ compatible = "aspeed,ast2400-vic";
+ reg = <0x1e6c0080 0x80>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ valid-sources = <0xffffffff 0x0007ffff>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt
deleted file mode 100644
index 8ced1696c325..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-BCM2836 per-CPU interrupt controller
-
-The BCM2836 has a per-cpu interrupt controller for the timer, PMU
-events, and SMP IPIs. One of the CPUs may receive interrupts for the
-peripheral (GPU) events, which chain to the BCM2835-style interrupt
-controller.
-
-Required properties:
-
-- compatible: Should be "brcm,bcm2836-l1-intc"
-- reg: Specifies base physical address and size of the
- registers
-- interrupt-controller: Identifies the node as an interrupt controller
-- #interrupt-cells: Specifies the number of cells needed to encode an
- interrupt source. The value shall be 2
-
-Please refer to interrupts.txt in this directory for details of the common
-Interrupt Controllers bindings used by client devices.
-
-The interrupt sources are as follows:
-
-0: CNTPSIRQ
-1: CNTPNSIRQ
-2: CNTHPIRQ
-3: CNTVIRQ
-8: GPU_FAST
-9: PMU_FAST
-
-Example:
-
-local_intc: local_intc {
- compatible = "brcm,bcm2836-l1-intc";
- reg = <0x40000000 0x100>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupt-parent = <&local_intc>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.yaml
new file mode 100644
index 000000000000..5fda626c80ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/brcm,bcm2836-l1-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BCM2836 per-CPU interrupt controller
+
+maintainers:
+ - Stefan Wahren <wahrenst@gmx.net>
+ - Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com>
+
+description:
+ The BCM2836 has a per-cpu interrupt controller for the timer, PMU
+ events, and SMP IPIs. One of the CPUs may receive interrupts for the
+ peripheral (GPU) events, which chain to the BCM2835-style interrupt
+ controller.
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ const: brcm,bcm2836-l1-intc
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ local_intc: interrupt-controller@40000000 {
+ compatible = "brcm,bcm2836-l1-intc";
+ reg = <0x40000000 0x100>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&local_intc>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
index aae676ba30ed..6076ddf56bb5 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
@@ -17,6 +17,7 @@ properties:
- enum:
- fsl,imx8m-irqsteer
- fsl,imx8mp-irqsteer
+ - fsl,imx8qm-irqsteer
- fsl,imx8qxp-irqsteer
- const: fsl,imx-irqsteer
@@ -83,6 +84,7 @@ allOf:
contains:
enum:
- fsl,imx8mp-irqsteer
+ - fsl,imx8qm-irqsteer
- fsl,imx8qxp-irqsteer
then:
required:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml
index 985fa10abb99..b1ea08a41bb0 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml
@@ -27,6 +27,7 @@ properties:
items:
- enum:
- qcom,qdu1000-pdc
+ - qcom,sa8255p-pdc
- qcom,sa8775p-pdc
- qcom,sc7180-pdc
- qcom,sc7280-pdc
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
index 709b2211276b..7e1451f9786a 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
@@ -67,6 +67,7 @@ properties:
- allwinner,sun20i-d1-plic
- sophgo,cv1800b-plic
- sophgo,cv1812h-plic
+ - sophgo,sg2002-plic
- sophgo,sg2042-plic
- thead,th1520-plic
- const: thead,c900-plic
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index 280b4e49f219..92d350b8e01a 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -36,7 +36,9 @@ properties:
items:
- enum:
- qcom,qcm2290-smmu-500
+ - qcom,qcs8300-smmu-500
- qcom,qdu1000-smmu-500
+ - qcom,sa8255p-smmu-500
- qcom,sa8775p-smmu-500
- qcom,sc7180-smmu-500
- qcom,sc7280-smmu-500
@@ -84,6 +86,7 @@ properties:
items:
- enum:
- qcom,qcm2290-smmu-500
+ - qcom,sa8255p-smmu-500
- qcom,sa8775p-smmu-500
- qcom,sc7280-smmu-500
- qcom,sc8180x-smmu-500
@@ -552,7 +555,9 @@ allOf:
- cavium,smmu-v2
- marvell,ap806-smmu-500
- nvidia,smmu-500
+ - qcom,qcs8300-smmu-500
- qcom,qdu1000-smmu-500
+ - qcom,sa8255p-smmu-500
- qcom,sc7180-smmu-500
- qcom,sdm670-smmu-500
- qcom,sdm845-smmu-500
diff --git a/Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml b/Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml
index 54d6d1f08e24..17e971903ee9 100644
--- a/Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml
+++ b/Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml
@@ -66,7 +66,7 @@ patternProperties:
IMAXled = 160000 * (592 / 600.5) * (1 / max-current-switch-number)
And the minimum output current formula:
IMINled = 3300 * (592 / 600.5) * (1 / max-current-switch-number)
- where max-current-switch-number is determinated by led configuration
+ where max-current-switch-number is determined by led configuration
and depends on how leds are physically connected to the led driver.
allOf:
diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml
index 8a3c2398b10c..bf9a101e4d42 100644
--- a/Documentation/devicetree/bindings/leds/common.yaml
+++ b/Documentation/devicetree/bindings/leds/common.yaml
@@ -113,6 +113,8 @@ properties:
# LED indicates NAND memory activity (deprecated),
# in new implementations use "mtd"
- nand-disk
+ # LED indicates network activity
+ - netdev
# No trigger assigned to the LED. This is the default mode
# if trigger is absent
- none
diff --git a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt b/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
deleted file mode 100644
index b1103d961d6c..000000000000
--- a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
+++ /dev/null
@@ -1,65 +0,0 @@
-* Texas Instruments - LM3692x Highly Efficient White LED Driver
-
-The LM3692x is an ultra-compact, highly efficient,
-white-LED driver designed for LCD display backlighting.
-
-The main difference between the LM36922 and LM36923 is the number of
-LED strings it supports. The LM36922 supports two strings while the LM36923
-supports three strings.
-
-Required properties:
- - compatible:
- "ti,lm36922"
- "ti,lm36923"
- - reg : I2C slave address
- - #address-cells : 1
- - #size-cells : 0
-
-Optional properties:
- - enable-gpios : gpio pin to enable/disable the device.
- - vled-supply : LED supply
- - ti,ovp-microvolt: Overvoltage protection in
- micro-volt, can be 17000000, 21000000, 25000000 or
- 29000000. If ti,ovp-microvolt is not specified it
- defaults to 29000000.
-
-Required child properties:
- - reg : 0 - Will enable all LED sync paths
- 1 - Will enable the LED1 sync
- 2 - Will enable the LED2 sync
- 3 - Will enable the LED3 sync (LM36923 only)
-
-Optional child properties:
- - function : see Documentation/devicetree/bindings/leds/common.txt
- - color : see Documentation/devicetree/bindings/leds/common.txt
- - label : see Documentation/devicetree/bindings/leds/common.txt (deprecated)
- - linux,default-trigger :
- see Documentation/devicetree/bindings/leds/common.txt
- - led-max-microamp :
- see Documentation/devicetree/bindings/leds/common.txt
-
-Example:
-
-#include <dt-bindings/leds/common.h>
-
-led-controller@36 {
- compatible = "ti,lm3692x";
- reg = <0x36>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- enable-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
- vled-supply = <&vbatt>;
- ti,ovp-microvolt = <29000000>;
-
- led@0 {
- reg = <0>;
- function = LED_FUNCTION_BACKLIGHT;
- color = <LED_COLOR_ID_WHITE>;
- linux,default-trigger = "backlight";
- led-max-microamp = <20000>;
- };
-}
-
-For more product information please see the link below:
-https://www.ti.com/lit/ds/snvsa29/snvsa29.pdf
diff --git a/Documentation/devicetree/bindings/leds/leds-sc27xx-bltc.txt b/Documentation/devicetree/bindings/leds/leds-sc27xx-bltc.txt
deleted file mode 100644
index df2b4e1c492b..000000000000
--- a/Documentation/devicetree/bindings/leds/leds-sc27xx-bltc.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-LEDs connected to Spreadtrum SC27XX PMIC breathing light controller
-
-The SC27xx breathing light controller supports to 3 outputs:
-red LED, green LED and blue LED. Each LED can work at normal
-PWM mode or breath light mode.
-
-Required properties:
-- compatible: Should be "sprd,sc2731-bltc".
-- #address-cells: Must be 1.
-- #size-cells: Must be 0.
-- reg: Specify the controller address.
-
-Required child properties:
-- reg: Port this LED is connected to.
-
-Optional child properties:
-- function: See Documentation/devicetree/bindings/leds/common.txt.
-- color: See Documentation/devicetree/bindings/leds/common.txt.
-- label: See Documentation/devicetree/bindings/leds/common.txt (deprecated).
-
-Examples:
-
-led-controller@200 {
- compatible = "sprd,sc2731-bltc";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x200>;
-
- led@0 {
- color = <LED_COLOR_ID_RED>;
- reg = <0x0>;
- };
-
- led@1 {
- color = <LED_COLOR_ID_GREEN>;
- reg = <0x1>;
- };
-
- led@2 {
- color = <LED_COLOR_ID_BLUE>;
- reg = <0x2>;
- };
-};
diff --git a/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml b/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml
index 654915c1f687..ab8c90cbadb5 100644
--- a/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml
+++ b/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml
@@ -11,19 +11,21 @@ maintainers:
- Marek Vasut <marex@denx.de>
description:
- The NXP PCA9952/PCA9955B are programmable LED controllers connected via I2C
- that can drive 16 separate lines. Each of them can be individually switched
+ The NXP PCA995x family are programmable LED controllers connected via I2C
+ that can drive separate lines. Each of them can be individually switched
on and off, and brightness can be controlled via individual PWM.
Datasheets are available at
https://www.nxp.com/docs/en/data-sheet/PCA9952_PCA9955.pdf
https://www.nxp.com/docs/en/data-sheet/PCA9955B.pdf
+ https://www.nxp.com/docs/en/data-sheet/PCA9956B.pdf
properties:
compatible:
enum:
- nxp,pca9952
- nxp,pca9955b
+ - nxp,pca9956b
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/leds/sprd,sc2731-bltc.yaml b/Documentation/devicetree/bindings/leds/sprd,sc2731-bltc.yaml
new file mode 100644
index 000000000000..5853410c7a45
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/sprd,sc2731-bltc.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/sprd,sc2731-bltc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Spreadtrum SC2731 PMIC breathing light controller
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+description: |
+ The SC2731 breathing light controller supports up to 3 outputs:
+ red LED, green LED and blue LED. Each LED can work at normal PWM mode
+ or breath light mode.
+
+properties:
+ compatible:
+ const: sprd,sc2731-bltc
+
+ reg:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ "^led@[0-2]$":
+ type: object
+ $ref: common.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ minimum: 0
+ maximum: 2
+
+ required:
+ - reg
+
+required:
+ - compatible
+ - reg
+ - '#address-cells'
+ - '#size-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ pmic {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@200 {
+ compatible = "sprd,sc2731-bltc";
+ reg = <0x200>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0x0>;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@1 {
+ reg = <0x1>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@2 {
+ reg = <0x2>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/leds/ti.lm36922.yaml b/Documentation/devicetree/bindings/leds/ti.lm36922.yaml
new file mode 100644
index 000000000000..8ffbc6b785a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/ti.lm36922.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/ti.lm36922.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments - LM3692x Highly Efficient White LED Driver
+
+maintainers:
+ - Dan Murphy <dmurphy@ti.com>
+
+description: |
+ The LM3692x is an ultra-compact, highly efficient,
+ white-LED driver designed for LCD display backlighting.
+
+ The main difference between the LM36922 and LM36923 is the number of
+ LED strings it supports. The LM36922 supports two strings while the LM36923
+ supports three strings.
+
+ For more product information please see the link below:
+ https://www.ti.com/lit/ds/snvsa29/snvsa29.pdf
+
+properties:
+ compatible:
+ enum:
+ - ti,lm36922
+ - ti,lm36923
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ enable-gpios:
+ description: gpio pin to enable/disable the device.
+
+ vled-supply:
+ description: LED supply
+
+ ti,ovp-microvolt:
+ description: Overvoltage protection.
+ default: 29000000
+ enum: [17000000, 21000000, 25000000, 29000000]
+
+patternProperties:
+ '^led@[0-3]$':
+ type: object
+ $ref: common.yaml
+ properties:
+ reg:
+ enum: [0, 1, 2, 3]
+ description: |
+ 0 - Will enable all LED sync paths
+ 1 - Will enable the LED1 sync
+ 2 - Will enable the LED2 sync
+ 3 - Will enable the LED3 sync (LM36923 only)
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,lm36922
+ then:
+ properties:
+ led@3: false
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@36 {
+ compatible = "ti,lm36922";
+ reg = <0x36>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ enable-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ vled-supply = <&vbatt>;
+ ti,ovp-microvolt = <29000000>;
+
+ led@0 {
+ reg = <0>;
+ function = LED_FUNCTION_BACKLIGHT;
+ color = <LED_COLOR_ID_WHITE>;
+ linux,default-trigger = "backlight";
+ led-max-microamp = <20000>;
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/mailbox/brcm,iproc-flexrm-mbox.txt b/Documentation/devicetree/bindings/mailbox/brcm,iproc-flexrm-mbox.txt
index c80065a1eb97..bf0c998b8603 100644
--- a/Documentation/devicetree/bindings/mailbox/brcm,iproc-flexrm-mbox.txt
+++ b/Documentation/devicetree/bindings/mailbox/brcm,iproc-flexrm-mbox.txt
@@ -24,7 +24,7 @@ Required properties:
number of completion messages for which FlexRM will inject
one MSI interrupt to CPU.
- The 3nd cell contains MSI timer value representing time for
+ The 3rd cell contains MSI timer value representing time for
which FlexRM will wait to accumulate N completion messages
where N is the value specified by 2nd cell above. If FlexRM
does not get required number of completion messages in time
diff --git a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
index 55930f6107c9..47dce75aeae6 100644
--- a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
+++ b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
@@ -31,7 +31,8 @@ properties:
- items:
- enum:
- amlogic,gxbb-vdec # GXBB (S905)
- - amlogic,gxl-vdec # GXL (S905X, S905D)
+ - amlogic,gxl-vdec # GXL (S905D, S905W, S905X, S905Y)
+ - amlogic,gxlx-vdec # GXLX (S905L)
- amlogic,gxm-vdec # GXM (S912)
- const: amlogic,gx-vdec
- enum:
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml
new file mode 100644
index 000000000000..ca57c01739d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2023-2024 Linaro Ltd.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/ovti,og01a1b.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OmniVision OG01A1B Image Sensor
+
+maintainers:
+ - Vladimir Zapolskiy <vladimir.zapolskiy@linaro.org>
+
+description:
+ The OmniVision OG01A1B is black and white CMOS 1.3 Megapixel (1280x1024)
+ image sensor controlled over an I2C-compatible SCCB bus.
+ The sensor transmits images on a MIPI CSI-2 output interface with one or
+ two data lanes.
+
+allOf:
+ - $ref: /schemas/media/video-interface-devices.yaml#
+
+properties:
+ compatible:
+ const: ovti,og01a1b
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ reset-gpios:
+ description: Active low GPIO connected to XSHUTDOWN pad of the sensor.
+ maxItems: 1
+
+ strobe-gpios:
+ description: Input GPIO connected to strobe pad of the sensor.
+ maxItems: 1
+
+ avdd-supply:
+ description: Analogue circuit voltage supply.
+
+ dovdd-supply:
+ description: I/O circuit voltage supply.
+
+ dvdd-supply:
+ description: Digital circuit voltage supply.
+
+ port:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ additionalProperties: false
+ description:
+ Output port node, single endpoint describing the CSI-2 transmitter.
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ maxItems: 2
+ items:
+ enum: [1, 2]
+
+ link-frequencies: true
+
+ required:
+ - data-lanes
+ - link-frequencies
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - port
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sensor@60 {
+ compatible = "ovti,og01a1b";
+ reg = <0x60>;
+ clocks = <&clk 0>;
+ reset-gpios = <&gpio 117 GPIO_ACTIVE_LOW>;
+ avdd-supply = <&vreg_3v3>;
+ dovdd-supply = <&vreg_1p8>;
+ dvdd-supply = <&vreg_1p2>;
+
+ port {
+ og01a1b_ep: endpoint {
+ remote-endpoint = <&csiphy_ep>;
+ data-lanes = <1 2>;
+ link-frequencies = /bits/ 64 <500000000>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
index 106c36ee966d..77bf3a4ee89d 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
@@ -75,6 +75,8 @@ additionalProperties: false
examples:
- |
+ #include <dt-bindings/gpio/gpio.h>
+
i2c {
#address-cells = <1>;
#size-cells = <0>;
@@ -92,6 +94,8 @@ examples:
ovdd-supply = <&camera_vddo_1v8>;
dvdd-supply = <&camera_vddd_1v2>;
+ reset-gpios = <&gpio 50 GPIO_ACTIVE_LOW>;
+
port {
imx335: endpoint {
remote-endpoint = <&cam>;
diff --git a/Documentation/devicetree/bindings/media/i2c/thine,thp7312.yaml b/Documentation/devicetree/bindings/media/i2c/thine,thp7312.yaml
index 1978fbb77a6c..535acf2b88a9 100644
--- a/Documentation/devicetree/bindings/media/i2c/thine,thp7312.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/thine,thp7312.yaml
@@ -16,7 +16,7 @@ description:
can be connected to CMOS image sensors from various vendors, supporting both
MIPI CSI-2 and parallel interfaces. It can also output on either MIPI CSI-2
or parallel. The hardware is capable of transmitting and receiving MIPI
- interlaved data strams with data types or multiple virtual channel
+ interleaved data streams with data types or multiple virtual channel
identifiers.
allOf:
diff --git a/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml b/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml
index 8f9b6433aeb8..10c334e6b3dc 100644
--- a/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml
@@ -43,6 +43,7 @@ properties:
- const: vcodec_bus
iommus:
+ minItems: 1
maxItems: 2
interconnects:
diff --git a/Documentation/devicetree/bindings/media/renesas,fcp.yaml b/Documentation/devicetree/bindings/media/renesas,fcp.yaml
index c6abe719881b..f94dacd96278 100644
--- a/Documentation/devicetree/bindings/media/renesas,fcp.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,fcp.yaml
@@ -27,6 +27,7 @@ properties:
- renesas,fcpf # FCP for FDP
- items:
- enum:
+ - renesas,r9a07g043u-fcpvd # RZ/G2UL
- renesas,r9a07g044-fcpvd # RZ/G2{L,LC}
- renesas,r9a07g054-fcpvd # RZ/V2L
- const: renesas,fcpv # Generic FCP for VSP fallback
@@ -62,6 +63,7 @@ allOf:
compatible:
contains:
enum:
+ - renesas,r9a07g043u-fcpvd
- renesas,r9a07g044-fcpvd
- renesas,r9a07g054-fcpvd
then:
diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
index 5539d0f8e74d..cf54176f4fbd 100644
--- a/Documentation/devicetree/bindings/media/renesas,vin.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
@@ -52,8 +52,12 @@ properties:
- renesas,vin-r8a77980 # R-Car V3H
- renesas,vin-r8a77990 # R-Car E3
- renesas,vin-r8a77995 # R-Car D3
+ - items:
+ - enum:
- renesas,vin-r8a779a0 # R-Car V3U
- renesas,vin-r8a779g0 # R-Car V4H
+ - renesas,vin-r8a779h0 # R-Car V4M
+ - const: renesas,rcar-gen4-vin # Generic R-Car Gen4
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/renesas,vsp1.yaml b/Documentation/devicetree/bindings/media/renesas,vsp1.yaml
index 3265e922647c..1a03e67462a4 100644
--- a/Documentation/devicetree/bindings/media/renesas,vsp1.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,vsp1.yaml
@@ -23,6 +23,7 @@ properties:
- renesas,vsp2 # R-Car Gen3 and RZ/G2
- items:
- enum:
+ - renesas,r9a07g043u-vsp2 # RZ/G2UL
- renesas,r9a07g054-vsp2 # RZ/V2L
- const: renesas,r9a07g044-vsp2 # RZ/G2L fallback
diff --git a/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml b/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml
index 9d90d8d0565a..947ad699cc5e 100644
--- a/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml
@@ -17,6 +17,7 @@ properties:
compatible:
enum:
- rockchip,rk3568-vepu
+ - rockchip,rk3588-vepu121
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
index c57e1f488895..719aeb2dc593 100644
--- a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
@@ -26,11 +26,16 @@ properties:
- rockchip,rk3568-vpu
- rockchip,rk3588-av1-vpu
- items:
- - const: rockchip,rk3188-vpu
+ - enum:
+ - rockchip,rk3128-vpu
+ - rockchip,rk3188-vpu
- const: rockchip,rk3066-vpu
- items:
- const: rockchip,rk3228-vpu
- const: rockchip,rk3399-vpu
+ - items:
+ - const: rockchip,rk3588-vpu121
+ - const: rockchip,rk3568-vpu
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/samsung,exynos4210-fimc.yaml b/Documentation/devicetree/bindings/media/samsung,exynos4210-fimc.yaml
index 271d0577a83c..2ba27b230559 100644
--- a/Documentation/devicetree/bindings/media/samsung,exynos4210-fimc.yaml
+++ b/Documentation/devicetree/bindings/media/samsung,exynos4210-fimc.yaml
@@ -77,7 +77,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32-array
maxItems: 2
description: |
- An array specyfing minimum image size in pixels at the FIMC input and
+ An array specifying minimum image size in pixels at the FIMC input and
output DMA, in the first and second cell respectively. Default value
is <16 16>.
diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/fsl,imx-weim.yaml b/Documentation/devicetree/bindings/memory-controllers/fsl/fsl,imx-weim.yaml
index 3f40ca5b13f6..ce4ec94a561c 100644
--- a/Documentation/devicetree/bindings/memory-controllers/fsl/fsl,imx-weim.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/fsl/fsl,imx-weim.yaml
@@ -134,9 +134,8 @@ allOf:
properties:
fsl,weim-cs-timing:
items:
- items:
- - description: CSxU
- - description: CSxL
+ - description: CSxU
+ - description: CSxL
- if:
properties:
compatible:
@@ -151,10 +150,9 @@ allOf:
properties:
fsl,weim-cs-timing:
items:
- items:
- - description: CSCRxU
- - description: CSCRxL
- - description: CSCRxA
+ - description: CSCRxU
+ - description: CSCRxL
+ - description: CSCRxA
- if:
properties:
compatible:
@@ -171,13 +169,12 @@ allOf:
properties:
fsl,weim-cs-timing:
items:
- items:
- - description: CSxGCR1
- - description: CSxGCR2
- - description: CSxRCR1
- - description: CSxRCR2
- - description: CSxWCR1
- - description: CSxWCR2
+ - description: CSxGCR1
+ - description: CSxGCR2
+ - description: CSxRCR1
+ - description: CSxRCR2
+ - description: CSxWCR1
+ - description: CSxWCR2
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml b/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml
index d7745dd53b51..4f4bc953e31a 100644
--- a/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml
@@ -67,7 +67,9 @@ properties:
- const: dirmap
- const: wbuf
- clocks: true
+ clocks:
+ minItems: 1
+ maxItems: 2
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mfd/adi,adp5585.yaml b/Documentation/devicetree/bindings/mfd/adi,adp5585.yaml
new file mode 100644
index 000000000000..ee2272f754a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/adi,adp5585.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/adi,adp5585.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices ADP5585 Keypad Decoder and I/O Expansion
+
+maintainers:
+ - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+description:
+ The ADP5585 is a 10/11 input/output port expander with a built in keypad
+ matrix decoder, programmable logic, reset generator, and PWM generator.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - adi,adp5585-00 # Default
+ - adi,adp5585-01 # 11 GPIOs
+ - adi,adp5585-02 # No pull-up resistors by default on special pins
+ - adi,adp5585-03 # Alternate I2C address
+ - adi,adp5585-04 # Pull-down resistors on all pins by default
+ - const: adi,adp5585
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply: true
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ gpio-reserved-ranges: true
+
+ "#pwm-cells":
+ const: 3
+
+patternProperties:
+ "-hog(-[0-9]+)?$":
+ type: object
+
+ required:
+ - gpio-hog
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - "#gpio-cells"
+ - "#pwm-cells"
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: adi,adp5585-01
+ then:
+ properties:
+ gpio-reserved-ranges: false
+ else:
+ properties:
+ gpio-reserved-ranges:
+ maxItems: 1
+ items:
+ items:
+ - const: 5
+ - const: 1
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ io-expander@34 {
+ compatible = "adi,adp5585-00", "adi,adp5585";
+ reg = <0x34>;
+
+ vdd-supply = <&reg_3v3>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-reserved-ranges = <5 1>;
+
+ #pwm-cells = <3>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mfd/mediatek,mt6357.yaml b/Documentation/devicetree/bindings/mfd/mediatek,mt6357.yaml
index 37423c2e0fdf..b67fbe0e7a63 100644
--- a/Documentation/devicetree/bindings/mfd/mediatek,mt6357.yaml
+++ b/Documentation/devicetree/bindings/mfd/mediatek,mt6357.yaml
@@ -37,6 +37,24 @@ properties:
"#interrupt-cells":
const: 2
+ mediatek,hp-pull-down:
+ description:
+ Earphone driver positive output stage short to
+ the audio reference ground.
+ type: boolean
+
+ mediatek,micbias0-microvolt:
+ description: Selects MIC Bias 0 output voltage.
+ enum: [1700000, 1800000, 1900000, 2000000,
+ 2100000, 2500000, 2600000, 2700000]
+ default: 1700000
+
+ mediatek,micbias1-microvolt:
+ description: Selects MIC Bias 1 output voltage.
+ enum: [1700000, 1800000, 1900000, 2000000,
+ 2100000, 2500000, 2600000, 2700000]
+ default: 1700000
+
regulators:
type: object
$ref: /schemas/regulator/mediatek,mt6357-regulator.yaml
@@ -83,6 +101,9 @@ examples:
interrupt-controller;
#interrupt-cells = <2>;
+ mediatek,micbias0-microvolt = <1700000>;
+ mediatek,micbias1-microvolt = <1700000>;
+
regulators {
mt6357_vproc_reg: buck-vproc {
regulator-name = "vproc";
diff --git a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
index c6bd14ec5aa0..7d0b0b403150 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
+++ b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
@@ -21,6 +21,7 @@ properties:
- qcom,msm8998-tcsr
- qcom,qcm2290-tcsr
- qcom,qcs404-tcsr
+ - qcom,sa8775p-tcsr
- qcom,sc7180-tcsr
- qcom,sc7280-tcsr
- qcom,sc8280xp-tcsr
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml
index d381125a0a15..efee3de0d9ad 100644
--- a/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml
@@ -25,7 +25,7 @@ properties:
description:
The PMIC provides intb and errb IRQ lines. The errb IRQ line is used
for fatal IRQs which will cause the PMIC to shut down power outputs.
- In many systems this will shut down the SoC contolling the PMIC and
+ In many systems this will shut down the SoC controlling the PMIC and
connecting/handling the errb can be omitted. However, there are cases
where the SoC is not powered by the PMIC or has a short time backup
energy to handle shutdown of critical hardware. In that case it may be
diff --git a/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml b/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
index bc8b5940b1c5..a4be642de33c 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
+++ b/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
@@ -53,7 +53,7 @@ properties:
samsung,s2mps11-wrstbi-ground:
description: |
Indicates that WRSTBI pin of PMIC is pulled down. When the system is
- suspended it will always go down thus triggerring unwanted buck warm
+ suspended it will always go down thus triggering unwanted buck warm
reset (setting buck voltages to default values).
type: boolean
diff --git a/Documentation/devicetree/bindings/mfd/syscon.yaml b/Documentation/devicetree/bindings/mfd/syscon.yaml
index 9dc594ea3654..cc9b17ad69f2 100644
--- a/Documentation/devicetree/bindings/mfd/syscon.yaml
+++ b/Documentation/devicetree/bindings/mfd/syscon.yaml
@@ -103,6 +103,7 @@ select:
- rockchip,rk3368-qos
- rockchip,rk3399-qos
- rockchip,rk3568-qos
+ - rockchip,rk3576-qos
- rockchip,rk3588-qos
- rockchip,rv1126-qos
- st,spear1340-misc
@@ -113,6 +114,7 @@ select:
- ti,am625-dss-oldi-io-ctrl
- ti,am62p-cpsw-mac-efuse
- ti,am654-dss-oldi-io-ctrl
+ - ti,j784s4-acspcie-proxy-ctrl
- ti,j784s4-pcie-ctrl
- ti,keystone-pllctrl
required:
@@ -198,6 +200,7 @@ properties:
- rockchip,rk3368-qos
- rockchip,rk3399-qos
- rockchip,rk3568-qos
+ - rockchip,rk3576-qos
- rockchip,rk3588-qos
- rockchip,rv1126-qos
- st,spear1340-misc
diff --git a/Documentation/devicetree/bindings/mfd/twl6040.txt b/Documentation/devicetree/bindings/mfd/twl6040.txt
index 06e9dd7a0d96..dfd8683ede0c 100644
--- a/Documentation/devicetree/bindings/mfd/twl6040.txt
+++ b/Documentation/devicetree/bindings/mfd/twl6040.txt
@@ -2,7 +2,7 @@ Texas Instruments TWL6040 family
The TWL6040s are 8-channel high quality low-power audio codecs providing audio,
vibra and GPO functionality on OMAP4+ platforms.
-They are connected ot the host processor via i2c for commands, McPDM for audio
+They are connected to the host processor via i2c for commands, McPDM for audio
data and commands.
Required properties:
diff --git a/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml b/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml
index b8e8db0d58e9..14ab367fc887 100644
--- a/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml
+++ b/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml
@@ -274,7 +274,7 @@ properties:
Defines the work frequency of DC-DC in kHz.
patternProperties:
- "^(([a-f])?ldo[0-9]|dcdc[0-7a-e]|ldo(_|-)io(0|1)|(dc1)?sw|rtc(_|-)ldo|cpusldo|drivevbus|dc5ldo)$":
+ "^(([a-f])?ldo[0-9]|dcdc[0-7a-e]|ldo(_|-)io(0|1)|(dc1)?sw|rtc(_|-)ldo|cpusldo|drivevbus|dc5ldo|boost)$":
$ref: /schemas/regulator/regulator.yaml#
type: object
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/misc/aspeed,ast2400-cvic.yaml b/Documentation/devicetree/bindings/misc/aspeed,ast2400-cvic.yaml
new file mode 100644
index 000000000000..accf1a7ecf12
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/aspeed,ast2400-cvic.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/misc/aspeed,ast2400-cvic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Aspeed Coprocessor Vectored Interrupt Controller
+
+maintainers:
+ - Andrew Jeffery <andrew@codeconstruct.com.au>
+
+description:
+ The Aspeed AST2400 and AST2500 SoCs have a controller that provides interrupts
+ to the ColdFire coprocessor. It's not a normal interrupt controller and it
+ would be rather inconvenient to create an interrupt tree for it, as it
+ somewhat shares some of the same sources as the main ARM interrupt controller
+ but with different numbers.
+
+ The AST2500 also supports a software generated interrupt.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - aspeed,ast2400-cvic
+ - aspeed,ast2500-cvic
+ - const: aspeed,cvic
+
+ reg:
+ maxItems: 1
+
+ valid-sources:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ maxItems: 1
+ description:
+ A bitmap of supported sources for the implementation.
+
+ copro-sw-interrupts:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 32
+ description:
+ A list of interrupt numbers that can be used as software interrupts from
+ the ARM to the coprocessor.
+
+required:
+ - compatible
+ - reg
+ - valid-sources
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@1e6c2000 {
+ compatible = "aspeed,ast2500-cvic", "aspeed,cvic";
+ reg = <0x1e6c2000 0x80>;
+ valid-sources = <0xffffffff>;
+ copro-sw-interrupts = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/misc/aspeed,cvic.txt b/Documentation/devicetree/bindings/misc/aspeed,cvic.txt
deleted file mode 100644
index d62c783d1d5e..000000000000
--- a/Documentation/devicetree/bindings/misc/aspeed,cvic.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-* ASPEED AST2400 and AST2500 coprocessor interrupt controller
-
-This file describes the bindings for the interrupt controller present
-in the AST2400 and AST2500 BMC SoCs which provides interrupt to the
-ColdFire coprocessor.
-
-It is not a normal interrupt controller and it would be rather
-inconvenient to create an interrupt tree for it as it somewhat shares
-some of the same sources as the main ARM interrupt controller but with
-different numbers.
-
-The AST2500 supports a SW generated interrupt
-
-Required properties:
-- reg: address and length of the register for the device.
-- compatible: "aspeed,cvic" and one of:
- "aspeed,ast2400-cvic"
- or
- "aspeed,ast2500-cvic"
-
-- valid-sources: One cell, bitmap of supported sources for the implementation
-
-Optional properties;
-- copro-sw-interrupts: List of interrupt numbers that can be used as
- SW interrupts from the ARM to the coprocessor.
- (AST2500 only)
-
-Example:
-
- cvic: copro-interrupt-controller@1e6c2000 {
- compatible = "aspeed,ast2500-cvic";
- valid-sources = <0xffffffff>;
- copro-sw-interrupts = <1>;
- reg = <0x1e6c2000 0x80>;
- };
diff --git a/Documentation/devicetree/bindings/mmc/atmel,sama5d2-sdhci.yaml b/Documentation/devicetree/bindings/mmc/atmel,sama5d2-sdhci.yaml
new file mode 100644
index 000000000000..8c8ade88e8fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/atmel,sama5d2-sdhci.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/atmel,sama5d2-sdhci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel SDHCI controller
+
+maintainers:
+ - Aubin Constans <aubin.constans@microchip.com>
+ - Nicolas Ferre <nicolas.ferre@microchip.com>
+
+description:
+ Bindings for the SDHCI controller found in Atmel/Microchip SoCs.
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - atmel,sama5d2-sdhci
+ - microchip,sam9x60-sdhci
+ - items:
+ - enum:
+ - microchip,sam9x7-sdhci
+ - microchip,sama7g5-sdhci
+ - const: microchip,sam9x60-sdhci
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: hclock
+ - description: multclk
+ - description: baseclk
+ minItems: 2
+
+ clock-names:
+ items:
+ - const: hclock
+ - const: multclk
+ - const: baseclk
+ minItems: 2
+
+ microchip,sdcal-inverted:
+ type: boolean
+ description:
+ When present, polarity on the SDCAL SoC pin is inverted. The default
+ polarity for this signal is described in the datasheet. For instance on
+ SAMA5D2, the pin is usually tied to the GND with a resistor and a
+ capacitor (see "SDMMC I/O Calibration" chapter).
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+allOf:
+ - $ref: sdhci-common.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - atmel,sama5d2-sdhci
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ clock-names:
+ minItems: 3
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/at91.h>
+ mmc@a0000000 {
+ compatible = "atmel,sama5d2-sdhci";
+ reg = <0xa0000000 0x300>;
+ interrupts = <31 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&sdmmc0_hclk>, <&sdmmc0_gclk>, <&main>;
+ clock-names = "hclock", "multclk", "baseclk";
+ assigned-clocks = <&sdmmc0_gclk>;
+ assigned-clock-rates = <480000000>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/nuvoton,ma35d1-sdhci.yaml b/Documentation/devicetree/bindings/mmc/nuvoton,ma35d1-sdhci.yaml
new file mode 100644
index 000000000000..4d787147c300
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/nuvoton,ma35d1-sdhci.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/nuvoton,ma35d1-sdhci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton MA35D1 SD/SDIO/MMC Controller
+
+maintainers:
+ - Shan-Chun Hung <shanchun1218@gmail.com>
+
+allOf:
+ - $ref: sdhci-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - nuvoton,ma35d1-sdhci
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ pinctrl-names:
+ minItems: 1
+ items:
+ - const: default
+ - const: state_uhs
+
+ pinctrl-0:
+ description:
+ Should contain default/high speed pin ctrl.
+ maxItems: 1
+
+ pinctrl-1:
+ description:
+ Should contain uhs mode pin ctrl.
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ nuvoton,sys:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle to access GCR (Global Control Register) registers.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - pinctrl-names
+ - pinctrl-0
+ - resets
+ - nuvoton,sys
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/nuvoton,ma35d1-clk.h>
+ #include <dt-bindings/reset/nuvoton,ma35d1-reset.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ mmc@40190000 {
+ compatible = "nuvoton,ma35d1-sdhci";
+ reg = <0x0 0x40190000 0x0 0x2000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk SDH1_GATE>;
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&pinctrl_sdhci1>;
+ pinctrl-1 = <&pinctrl_sdhci1_uhs>;
+ resets = <&sys MA35D1_RESET_SDH1>;
+ nuvoton,sys = <&sys>;
+ vqmmc-supply = <&sdhci1_vqmmc_regulator>;
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
index 3d0e61e59856..af378b9ff3f4 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
+++ b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
@@ -18,6 +18,7 @@ properties:
- renesas,sdhi-r7s9210 # SH-Mobile AG5
- renesas,sdhi-r8a73a4 # R-Mobile APE6
- renesas,sdhi-r8a7740 # R-Mobile A1
+ - renesas,sdhi-r9a09g057 # RZ/V2H(P)
- renesas,sdhi-sh73a0 # R-Mobile APE6
- items:
- enum:
@@ -75,9 +76,13 @@ properties:
minItems: 1
maxItems: 3
- clocks: true
+ clocks:
+ minItems: 1
+ maxItems: 4
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 4
dmas:
minItems: 4
@@ -118,7 +123,9 @@ allOf:
properties:
compatible:
contains:
- const: renesas,rzg2l-sdhi
+ enum:
+ - renesas,sdhi-r9a09g057
+ - renesas,rzg2l-sdhi
then:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
index 211cd0b0bc5f..06df1269f247 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
@@ -43,6 +43,8 @@ properties:
- rockchip,rv1108-dw-mshc
- rockchip,rv1126-dw-mshc
- const: rockchip,rk3288-dw-mshc
+ # for Rockchip RK3576 with phase tuning inside the controller
+ - const: rockchip,rk3576-dw-mshc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt b/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
deleted file mode 100644
index a9fb0a91245f..000000000000
--- a/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-* Atmel SDHCI controller
-
-This file documents the differences between the core properties in
-Documentation/devicetree/bindings/mmc/mmc.txt and the properties used by the
-sdhci-of-at91 driver.
-
-Required properties:
-- compatible: Must be "atmel,sama5d2-sdhci" or "microchip,sam9x60-sdhci"
- or "microchip,sam9x7-sdhci", "microchip,sam9x60-sdhci".
-- clocks: Phandlers to the clocks.
-- clock-names: Must be "hclock", "multclk", "baseclk" for
- "atmel,sama5d2-sdhci".
- Must be "hclock", "multclk" for "microchip,sam9x60-sdhci".
- Must be "hclock", "multclk" for "microchip,sam9x7-sdhci".
-
-Optional properties:
-- assigned-clocks: The same with "multclk".
-- assigned-clock-rates The rate of "multclk" in order to not rely on the
- gck configuration set by previous components.
-- microchip,sdcal-inverted: when present, polarity on the SDCAL SoC pin is
- inverted. The default polarity for this signal is described in the datasheet.
- For instance on SAMA5D2, the pin is usually tied to the GND with a resistor
- and a capacitor (see "SDMMC I/O Calibration" chapter).
-
-Example:
-
-mmc0: sdio-host@a0000000 {
- compatible = "atmel,sama5d2-sdhci";
- reg = <0xa0000000 0x300>;
- interrupts = <31 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&sdmmc0_hclk>, <&sdmmc0_gclk>, <&main>;
- clock-names = "hclock", "multclk", "baseclk";
- assigned-clocks = <&sdmmc0_gclk>;
- assigned-clock-rates = <480000000>;
-};
diff --git a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
index 4d3031d9965f..c3d5e0230af1 100644
--- a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
@@ -10,18 +10,20 @@ maintainers:
- Ulf Hansson <ulf.hansson@linaro.org>
- Jisheng Zhang <Jisheng.Zhang@synaptics.com>
-allOf:
- - $ref: mmc-controller.yaml#
-
properties:
compatible:
- enum:
- - rockchip,rk3568-dwcmshc
- - rockchip,rk3588-dwcmshc
- - snps,dwcmshc-sdhci
- - sophgo,cv1800b-dwcmshc
- - sophgo,sg2002-dwcmshc
- - thead,th1520-dwcmshc
+ oneOf:
+ - items:
+ - const: rockchip,rk3576-dwcmshc
+ - const: rockchip,rk3588-dwcmshc
+ - enum:
+ - rockchip,rk3568-dwcmshc
+ - rockchip,rk3588-dwcmshc
+ - snps,dwcmshc-sdhci
+ - sophgo,cv1800b-dwcmshc
+ - sophgo,sg2002-dwcmshc
+ - sophgo,sg2042-dwcmshc
+ - thead,th1520-dwcmshc
reg:
maxItems: 1
@@ -31,22 +33,14 @@ properties:
clocks:
minItems: 1
- items:
- - description: core clock
- - description: bus clock for optional
- - description: axi clock for rockchip specified
- - description: block clock for rockchip specified
- - description: timer clock for rockchip specified
-
+ maxItems: 5
clock-names:
minItems: 1
- items:
- - const: core
- - const: bus
- - const: axi
- - const: block
- - const: timer
+ maxItems: 5
+
+ power-domains:
+ maxItems: 1
resets:
maxItems: 5
@@ -63,7 +57,6 @@ properties:
description: Specify the number of delay for tx sampling.
$ref: /schemas/types.yaml#/definitions/uint8
-
required:
- compatible
- reg
@@ -71,6 +64,60 @@ required:
- clocks
- clock-names
+allOf:
+ - $ref: mmc-controller.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: sophgo,sg2042-dwcmshc
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: core clock
+ - description: bus clock
+ - description: timer clock
+ clock-names:
+ items:
+ - const: core
+ - const: bus
+ - const: timer
+ else:
+ properties:
+ clocks:
+ minItems: 1
+ items:
+ - description: core clock
+ - description: bus clock for optional
+ - description: axi clock for rockchip specified
+ - description: block clock for rockchip specified
+ - description: timer clock for rockchip specified
+ clock-names:
+ minItems: 1
+ items:
+ - const: core
+ - const: bus
+ - const: axi
+ - const: block
+ - const: timer
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3576-dwcmshc
+
+ then:
+ required:
+ - power-domains
+
+ else:
+ properties:
+ power-domains: false
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/mtd/ti,gpmc-nand.yaml b/Documentation/devicetree/bindings/mtd/ti,gpmc-nand.yaml
index 115682fa81b7..00540302bcae 100644
--- a/Documentation/devicetree/bindings/mtd/ti,gpmc-nand.yaml
+++ b/Documentation/devicetree/bindings/mtd/ti,gpmc-nand.yaml
@@ -61,12 +61,9 @@ properties:
GPIO connection to R/B signal from NAND chip
maxItems: 1
-patternProperties:
- "@[0-9a-f]+$":
- $ref: /schemas/mtd/partitions/partition.yaml
-
allOf:
- $ref: /schemas/memory-controllers/ti,gpmc-child.yaml
+ - $ref: mtd.yaml#
required:
- compatible
diff --git a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
index ee7a65b528cd..d1e2bca3c503 100644
--- a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
@@ -58,18 +58,18 @@ allOf:
- const: timing-adjustment
amlogic,tx-delay-ns:
- $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 2, 4, 6]
+ default: 2
description:
- The internal RGMII TX clock delay (provided by this driver) in
- nanoseconds. Allowed values are 0ns, 2ns, 4ns, 6ns.
- When phy-mode is set to "rgmii" then the TX delay should be
- explicitly configured. When not configured a fallback of 2ns is
- used. When the phy-mode is set to either "rgmii-id" or "rgmii-txid"
- the TX clock delay is already provided by the PHY. In that case
- this property should be set to 0ns (which disables the TX clock
- delay in the MAC to prevent the clock from going off because both
- PHY and MAC are adding a delay).
- Any configuration is ignored when the phy-mode is set to "rmii".
+ The internal RGMII TX clock delay (provided by this driver)
+ in nanoseconds. When phy-mode is set to "rgmii" then the TX
+ delay should be explicitly configured. When the phy-mode is
+ set to either "rgmii-id" or "rgmii-txid" the TX clock delay
+ is already provided by the PHY. In that case this property
+ should be set to 0ns (which disables the TX clock delay in
+ the MAC to prevent the clock from going off because both
+ PHY and MAC are adding a delay). Any configuration is
+ ignored when the phy-mode is set to "rmii".
amlogic,rx-delay-ns:
deprecated: true
diff --git a/Documentation/devicetree/bindings/net/bluetooth/amlogic,w155s2-bt.yaml b/Documentation/devicetree/bindings/net/bluetooth/amlogic,w155s2-bt.yaml
new file mode 100644
index 000000000000..6fd7557039d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/bluetooth/amlogic,w155s2-bt.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2024 Amlogic, Inc. All rights reserved
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/bluetooth/amlogic,w155s2-bt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amlogic Bluetooth chips
+
+description:
+ The W155S2 is an Amlogic Bluetooth and Wi-Fi combo chip. It works on
+ the standard H4 protocol via a 4-wire UART interface, with baud rates
+ up to 4 Mbps.
+
+maintainers:
+ - Yang Li <yang.li@amlogic.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - amlogic,w265s1-bt
+ - amlogic,w265p1-bt
+ - const: amlogic,w155s2-bt
+ - enum:
+ - amlogic,w155s2-bt
+ - amlogic,w265s2-bt
+
+ clocks:
+ maxItems: 1
+ description: clock provided to the controller (32.768KHz)
+
+ enable-gpios:
+ maxItems: 1
+
+ vddio-supply:
+ description: VDD_IO supply regulator handle
+
+ firmware-name:
+ maxItems: 1
+ description: specify the path of firmware bin to load
+
+required:
+ - compatible
+ - clocks
+ - enable-gpios
+ - vddio-supply
+ - firmware-name
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ bluetooth {
+ compatible = "amlogic,w155s2-bt";
+ clocks = <&extclk>;
+ enable-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+ vddio-supply = <&wcn_3v3>;
+ firmware-name = "amlogic/aml_w155s2_bt_uart.bin";
+ };
+
diff --git a/Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/brcm,bluetooth.yaml
index 4a1bfc2b3584..3c410cadff23 100644
--- a/Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/brcm,bluetooth.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/net/broadcom-bluetooth.yaml#
+$id: http://devicetree.org/schemas/net/bluetooth/brcm,bluetooth.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Broadcom Bluetooth Chips
@@ -119,29 +119,28 @@ properties:
items:
- const: host-wakeup
- max-speed: true
- current-speed: true
-
required:
- compatible
dependencies:
brcm,requires-autobaud-mode: [ shutdown-gpios ]
-if:
- not:
- properties:
- compatible:
- contains:
- enum:
- - brcm,bcm20702a1
- - brcm,bcm4329-bt
- - brcm,bcm4330-bt
-then:
- properties:
- reset-gpios: false
-
-additionalProperties: false
+allOf:
+ - $ref: /schemas/serial/serial-peripheral-props.yaml#
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,bcm20702a1
+ - brcm,bcm4329-bt
+ - brcm,bcm4330-bt
+ then:
+ properties:
+ reset-gpios: false
+
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/net/marvell-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/marvell,88w8897.yaml
index 188a42ca6ceb..2fc36874deb7 100644
--- a/Documentation/devicetree/bindings/net/marvell-bluetooth.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/marvell,88w8897.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/net/marvell-bluetooth.yaml#
+$id: http://devicetree.org/schemas/net/bluetooth/marvell,88w8897.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Marvell Bluetooth chips
@@ -19,13 +19,13 @@ properties:
- mrvl,88w8897
- mrvl,88w8997
- max-speed:
- description: see Documentation/devicetree/bindings/serial/serial.yaml
+ max-speed: true
required:
- compatible
allOf:
+ - $ref: /schemas/serial/serial-peripheral-props.yaml#
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt b/Documentation/devicetree/bindings/net/bluetooth/mediatek,bluetooth.txt
index 988c72685cbf..988c72685cbf 100644
--- a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/bluetooth/mediatek,bluetooth.txt
diff --git a/Documentation/devicetree/bindings/net/nokia-bluetooth.txt b/Documentation/devicetree/bindings/net/bluetooth/nokia,h4p-bluetooth.txt
index 42be7dc9a70b..42be7dc9a70b 100644
--- a/Documentation/devicetree/bindings/net/nokia-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/bluetooth/nokia,h4p-bluetooth.txt
diff --git a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
index 68c5ed111417..7bb68311c609 100644
--- a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
@@ -72,7 +72,7 @@ properties:
description: VDD_RFA_CMN supply regulator handle
vddrfa0p8-supply:
- description: VDD_RFA_0P8 suppply regulator handle
+ description: VDD_RFA_0P8 supply regulator handle
vddrfa1p7-supply:
description: VDD_RFA_1P7 supply regulator handle
@@ -98,8 +98,7 @@ properties:
vddwlmx-supply:
description: VDD_WLMX supply regulator handle
- max-speed:
- description: see Documentation/devicetree/bindings/serial/serial.yaml
+ max-speed: true
firmware-name:
description: specify the name of nvm firmware to load
@@ -118,6 +117,7 @@ additionalProperties: false
allOf:
- $ref: bluetooth-controller.yaml#
+ - $ref: /schemas/serial/serial-peripheral-props.yaml#
- if:
properties:
compatible:
@@ -172,14 +172,14 @@ allOf:
- qcom,wcn6855-bt
then:
required:
- - enable-gpios
- - swctrl-gpios
- - vddio-supply
- - vddbtcxmx-supply
- vddrfacmn-supply
+ - vddaon-supply
+ - vddwlcx-supply
+ - vddwlmx-supply
+ - vddbtcmx-supply
- vddrfa0p8-supply
- vddrfa1p2-supply
- - vddrfa1p7-supply
+ - vddrfa1p8-supply
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/realtek,bluetooth.yaml
index 043e118c605c..7d567122bac9 100644
--- a/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/realtek,bluetooth.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: http://devicetree.org/schemas/net/realtek-bluetooth.yaml#
+$id: http://devicetree.org/schemas/net/bluetooth/realtek,bluetooth.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: RTL8723BS/RTL8723CS/RTL8821CS/RTL8822CS Bluetooth
@@ -46,6 +46,9 @@ properties:
required:
- compatible
+allOf:
+ - $ref: /schemas/serial/serial-peripheral-props.yaml#
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/net/ti,bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/ti,bluetooth.yaml
index 81616f9fb493..290abc22e18a 100644
--- a/Documentation/devicetree/bindings/net/ti,bluetooth.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/ti,bluetooth.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/net/ti,bluetooth.yaml#
+$id: http://devicetree.org/schemas/net/bluetooth/ti,bluetooth.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments Bluetooth Chips
@@ -74,6 +74,9 @@ properties:
required:
- compatible
+allOf:
+ - $ref: /schemas/serial/serial-peripheral-props.yaml#
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml b/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml
index f197d9b516bb..97dd1a7c5ed2 100644
--- a/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml
+++ b/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml
@@ -17,6 +17,7 @@ properties:
compatible:
oneOf:
- enum:
+ - fsl,imx95-flexcan
- fsl,imx93-flexcan
- fsl,imx8qm-flexcan
- fsl,imx8mp-flexcan
@@ -39,9 +40,6 @@ properties:
- fsl,imx6sx-flexcan
- const: fsl,imx6q-flexcan
- items:
- - const: fsl,imx95-flexcan
- - const: fsl,imx93-flexcan
- - items:
- enum:
- fsl,ls1028ar1-flexcan
- const: fsl,lx2160ar1-flexcan
@@ -80,6 +78,10 @@ properties:
node then controller is assumed to be little endian. If this property is
present then controller is assumed to be big endian.
+ can-transceiver:
+ $ref: can-transceiver.yaml#
+ unevaluatedProperties: false
+
fsl,stop-mode:
description: |
Register bits of stop mode control.
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml b/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml
new file mode 100644
index 000000000000..db446dde6842
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/can/microchip,mcp2510.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip MCP251X stand-alone CAN controller
+
+maintainers:
+ - Marc Kleine-Budde <mkl@pengutronix.de>
+
+properties:
+ compatible:
+ enum:
+ - microchip,mcp2510
+ - microchip,mcp2515
+ - microchip,mcp25625
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply:
+ description: Regulator that powers the CAN controller.
+
+ xceiver-supply:
+ description: Regulator that powers the CAN transceiver.
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ can@1 {
+ compatible = "microchip,mcp2515";
+ reg = <1>;
+ clocks = <&clk24m>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <13 IRQ_TYPE_LEVEL_LOW>;
+ vdd-supply = <&reg5v0>;
+ xceiver-supply = <&reg5v0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
deleted file mode 100644
index 381f8fb3e865..000000000000
--- a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-* Microchip MCP251X stand-alone CAN controller device tree bindings
-
-Required properties:
- - compatible: Should be one of the following:
- - "microchip,mcp2510" for MCP2510.
- - "microchip,mcp2515" for MCP2515.
- - "microchip,mcp25625" for MCP25625.
- - reg: SPI chip select.
- - clocks: The clock feeding the CAN controller.
- - interrupts: Should contain IRQ line for the CAN controller.
-
-Optional properties:
- - vdd-supply: Regulator that powers the CAN controller.
- - xceiver-supply: Regulator that powers the CAN transceiver.
- - gpio-controller: Indicates this device is a GPIO controller.
- - #gpio-cells: Should be two. The first cell is the pin number and
- the second cell is used to specify the gpio polarity.
-
-Example:
- can0: can@1 {
- compatible = "microchip,mcp2515";
- reg = <1>;
- clocks = <&clk24m>;
- interrupt-parent = <&gpio4>;
- interrupts = <13 IRQ_TYPE_LEVEL_LOW>;
- vdd-supply = <&reg5v0>;
- xceiver-supply = <&reg5v0>;
- gpio-controller;
- #gpio-cells = <2>;
- };
diff --git a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
index d3f45d29fa0a..7c5ac5d2e880 100644
--- a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
+++ b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
@@ -32,6 +32,7 @@ properties:
- enum:
- renesas,r8a779a0-canfd # R-Car V3U
- renesas,r8a779g0-canfd # R-Car V4H
+ - renesas,r8a779h0-canfd # R-Car V4M
- const: renesas,rcar-gen4-canfd # R-Car Gen4
- items:
@@ -163,14 +164,23 @@ allOf:
maxItems: 1
- if:
- not:
- properties:
- compatible:
- contains:
- const: renesas,rcar-gen4-canfd
+ properties:
+ compatible:
+ contains:
+ const: renesas,r8a779h0-canfd
then:
patternProperties:
- "^channel[2-7]$": false
+ "^channel[5-7]$": false
+ else:
+ if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: renesas,rcar-gen4-canfd
+ then:
+ patternProperties:
+ "^channel[2-7]$": false
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/net/can/rockchip,rk3568v2-canfd.yaml b/Documentation/devicetree/bindings/net/can/rockchip,rk3568v2-canfd.yaml
new file mode 100644
index 000000000000..a077c0330013
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/rockchip,rk3568v2-canfd.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/can/rockchip,rk3568v2-canfd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title:
+ Rockchip CAN-FD controller
+
+maintainers:
+ - Marc Kleine-Budde <mkl@pengutronix.de>
+
+allOf:
+ - $ref: can-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: rockchip,rk3568v2-canfd
+ - items:
+ - const: rockchip,rk3568v3-canfd
+ - const: rockchip,rk3568v2-canfd
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: baud
+ - const: pclk
+
+ resets:
+ maxItems: 2
+
+ reset-names:
+ items:
+ - const: core
+ - const: apb
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3568-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ can@fe570000 {
+ compatible = "rockchip,rk3568v2-canfd";
+ reg = <0x0 0xfe570000 0x0 0x1000>;
+ interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_CAN0>, <&cru PCLK_CAN0>;
+ clock-names = "baud", "pclk";
+ resets = <&cru SRST_CAN0>, <&cru SRST_P_CAN0>;
+ reset-names = "core", "apb";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
index 7e405ad96eb2..ea979bcae1d6 100644
--- a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
@@ -92,6 +92,10 @@ properties:
Built-in switch of the MT7988 SoC
const: mediatek,mt7988-switch
+ - description:
+ Built-in switch of the Airoha EN7581 SoC
+ const: airoha,en7581-switch
+
reg:
maxItems: 1
@@ -284,7 +288,9 @@ allOf:
- if:
properties:
compatible:
- const: mediatek,mt7988-switch
+ enum:
+ - mediatek,mt7988-switch
+ - airoha,en7581-switch
then:
$ref: "#/$defs/mt7530-dsa-port"
properties:
diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
index 52acc15ebcbf..30c0c3e6f37a 100644
--- a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
@@ -22,7 +22,9 @@ properties:
- microchip,ksz8794
- microchip,ksz8795
- microchip,ksz8863
+ - microchip,ksz8864 # 4-port version of KSZ8895 family switch
- microchip,ksz8873
+ - microchip,ksz8895 # 5-port version of KSZ8895 family switch
- microchip,ksz9477
- microchip,ksz9897
- microchip,ksz9896
@@ -51,6 +53,11 @@ properties:
Set if the output SYNCLKO clock should be disabled. Do not mix with
microchip,synclko-125.
+ microchip,pme-active-high:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Indicates if the PME pin polarity is active-high.
+
microchip,io-drive-strength-microamp:
description:
IO Pad Drive Strength
diff --git a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml
index b99d7a694b70..51cf574249be 100644
--- a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml
@@ -52,6 +52,25 @@ properties:
allOf:
- $ref: dsa.yaml#/$defs/ethernet-ports
+patternProperties:
+ "^(ethernet-)?ports$":
+ additionalProperties: true
+ patternProperties:
+ "^(ethernet-)?port@6$":
+ allOf:
+ - if:
+ properties:
+ phy-mode:
+ contains:
+ enum:
+ - rgmii
+ then:
+ properties:
+ rx-internal-delay-ps:
+ $ref: "#/$defs/internal-delay-ps"
+ tx-internal-delay-ps:
+ $ref: "#/$defs/internal-delay-ps"
+
# This checks if reg is a chipselect so the device is on an SPI
# bus, the if-clause will fail if reg is a tuple such as for a
# platform device.
@@ -67,6 +86,15 @@ required:
- compatible
- reg
+$defs:
+ internal-delay-ps:
+ description:
+ Disable tunable delay lines using 0 ps, or enable them and select
+ the phase between 1400 ps and 2000 ps in increments of 300 ps.
+ default: 2000
+ enum:
+ [0, 1400, 1700, 2000]
+
unevaluatedProperties: false
examples:
@@ -108,6 +136,8 @@ examples:
reg = <6>;
ethernet = <&gmac1>;
phy-mode = "rgmii";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
fixed-link {
speed = <1000>;
full-duplex;
@@ -150,6 +180,8 @@ examples:
ethernet-port@6 {
reg = <6>;
ethernet = <&enet0>;
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
phy-mode = "rgmii";
fixed-link {
speed = <1000>;
diff --git a/Documentation/devicetree/bindings/net/fsl,cpm-enet.yaml b/Documentation/devicetree/bindings/net/fsl,cpm-enet.yaml
new file mode 100644
index 000000000000..da836477e8ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl,cpm-enet.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/fsl,cpm-enet.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Network for cpm enet
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - fsl,cpm1-scc-enet
+ - fsl,cpm2-scc-enet
+ - fsl,cpm1-fec-enet
+ - fsl,cpm2-fcc-enet
+ - fsl,qe-enet
+ - items:
+ - enum:
+ - fsl,mpc8272-fcc-enet
+ - const: fsl,cpm2-fcc-enet
+
+ reg:
+ minItems: 1
+ maxItems: 3
+
+ interrupts:
+ maxItems: 1
+
+ fsl,cpm-command:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: cpm command
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+allOf:
+ - $ref: ethernet-controller.yaml
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ ethernet@11300 {
+ compatible = "fsl,mpc8272-fcc-enet",
+ "fsl,cpm2-fcc-enet";
+ reg = <0x11300 0x20 0x8400 0x100 0x11390 1>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <20 8>;
+ interrupt-parent = <&pic>;
+ phy-handle = <&phy0>;
+ fsl,cpm-command = <0x12000300>;
+ };
+
diff --git a/Documentation/devicetree/bindings/net/fsl,cpm-mdio.yaml b/Documentation/devicetree/bindings/net/fsl,cpm-mdio.yaml
new file mode 100644
index 000000000000..b1791a3c490e
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl,cpm-mdio.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/fsl,cpm-mdio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale CPM MDIO Device
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - fsl,pq1-fec-mdio
+ - fsl,cpm2-mdio-bitbang
+ - items:
+ - const: fsl,mpc8272ads-mdio-bitbang
+ - const: fsl,mpc8272-mdio-bitbang
+ - const: fsl,cpm2-mdio-bitbang
+
+ reg:
+ maxItems: 1
+
+ fsl,mdio-pin:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: pin of port C controlling mdio data
+
+ fsl,mdc-pin:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: pin of port C controlling mdio clock
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: mdio.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mdio@10d40 {
+ compatible = "fsl,mpc8272ads-mdio-bitbang",
+ "fsl,mpc8272-mdio-bitbang",
+ "fsl,cpm2-mdio-bitbang";
+ reg = <0x10d40 0x14>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ fsl,mdio-pin = <12>;
+ fsl,mdc-pin = <13>;
+ };
+
diff --git a/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml b/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml
index 42f9843d1868..be8a2163b73e 100644
--- a/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml
+++ b/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml
@@ -24,20 +24,12 @@ properties:
maxItems: 1
description: The DPMAC number
- phy-handle: true
-
- phy-connection-type: true
-
- phy-mode: true
-
pcs-handle:
maxItems: 1
description:
A reference to a node representing a PCS PHY device found on
the internal MDIO bus.
- managed: true
-
phys:
description: A reference to the SerDes lane(s)
maxItems: 1
@@ -45,7 +37,7 @@ properties:
required:
- reg
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/net/maxim,ds26522.txt b/Documentation/devicetree/bindings/net/maxim,ds26522.txt
deleted file mode 100644
index ee8bb725f245..000000000000
--- a/Documentation/devicetree/bindings/net/maxim,ds26522.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-* Maxim (Dallas) DS26522 Dual T1/E1/J1 Transceiver
-
-Required properties:
-- compatible: Should contain "maxim,ds26522".
-- reg: SPI CS.
-- spi-max-frequency: SPI clock.
-
-Example:
- slic@1 {
- compatible = "maxim,ds26522";
- reg = <1>;
- spi-max-frequency = <2000000>; /* input clock */
- };
diff --git a/Documentation/devicetree/bindings/net/maxim,ds26522.yaml b/Documentation/devicetree/bindings/net/maxim,ds26522.yaml
new file mode 100644
index 000000000000..6c97eda217e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/maxim,ds26522.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/maxim,ds26522.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Maxim (Dallas) DS26522 Dual T1/E1/J1 Transceiver
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ items:
+ - const: maxim,ds26522
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ transceiver@1 {
+ compatible = "maxim,ds26522";
+ reg = <1>;
+ spi-max-frequency = <2000000>; /* input clock */
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/mdio.yaml b/Documentation/devicetree/bindings/net/mdio.yaml
index a266ade918ca..bed3987a8fbf 100644
--- a/Documentation/devicetree/bindings/net/mdio.yaml
+++ b/Documentation/devicetree/bindings/net/mdio.yaml
@@ -19,7 +19,7 @@ description:
properties:
$nodename:
- pattern: "^mdio(@.*)?"
+ pattern: '^mdio(-(bus|external))?(@.+|-([0-9]+))?$'
"#address-cells":
const: 1
diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml
index 686b5c2fae40..9e02fd80af83 100644
--- a/Documentation/devicetree/bindings/net/mediatek,net.yaml
+++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml
@@ -30,8 +30,13 @@ properties:
reg:
maxItems: 1
- clocks: true
- clock-names: true
+ clocks:
+ minItems: 2
+ maxItems: 24
+
+ clock-names:
+ minItems: 2
+ maxItems: 24
interrupts:
minItems: 1
@@ -127,6 +132,7 @@ allOf:
then:
properties:
interrupts:
+ minItems: 3
maxItems: 3
clocks:
@@ -183,6 +189,7 @@ allOf:
then:
properties:
interrupts:
+ minItems: 3
maxItems: 3
clocks:
@@ -222,6 +229,7 @@ allOf:
then:
properties:
interrupts:
+ minItems: 3
maxItems: 3
clocks:
diff --git a/Documentation/devicetree/bindings/net/microchip,lan8650.yaml b/Documentation/devicetree/bindings/net/microchip,lan8650.yaml
new file mode 100644
index 000000000000..61e11d4a07c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/microchip,lan8650.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/microchip,lan8650.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip LAN8650/1 10BASE-T1S MACPHY Ethernet Controllers
+
+maintainers:
+ - Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+
+description:
+ The LAN8650/1 combines a Media Access Controller (MAC) and an Ethernet
+ PHY to enable 10BASE‑T1S networks. The Ethernet Media Access Controller
+ (MAC) module implements a 10 Mbps half duplex Ethernet MAC, compatible
+ with the IEEE 802.3 standard and a 10BASE-T1S physical layer transceiver
+ integrated into the LAN8650/1. The communication between the Host and
+ the MAC-PHY is specified in the OPEN Alliance 10BASE-T1x MACPHY Serial
+ Interface (TC6).
+
+allOf:
+ - $ref: /schemas/net/ethernet-controller.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: microchip,lan8650
+ - items:
+ - const: microchip,lan8651
+ - const: microchip,lan8650
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ Interrupt from MAC-PHY asserted in the event of Receive Chunks
+ Available, Transmit Chunk Credits Available and Extended Status
+ Event.
+ maxItems: 1
+
+ spi-max-frequency:
+ minimum: 15000000
+ maximum: 25000000
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - spi-max-frequency
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet@0 {
+ compatible = "microchip,lan8651", "microchip,lan8650";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&eth0_pins>;
+ interrupt-parent = <&gpio>;
+ interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
+ local-mac-address = [04 05 06 01 02 03];
+ spi-max-frequency = <15000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml b/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml
index 85bfa45f5122..a754a61adc2d 100644
--- a/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml
+++ b/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml
@@ -14,8 +14,53 @@ maintainers:
description:
Bindings for NXP TJA11xx automotive PHYs
+properties:
+ compatible:
+ enum:
+ - ethernet-phy-id0180.dc40
+ - ethernet-phy-id0180.dc41
+ - ethernet-phy-id0180.dc48
+ - ethernet-phy-id0180.dd00
+ - ethernet-phy-id0180.dd01
+ - ethernet-phy-id0180.dd02
+ - ethernet-phy-id0180.dc80
+ - ethernet-phy-id0180.dc82
+ - ethernet-phy-id001b.b010
+ - ethernet-phy-id001b.b013
+ - ethernet-phy-id001b.b030
+ - ethernet-phy-id001b.b031
+
allOf:
- $ref: ethernet-phy.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ethernet-phy-id0180.dc40
+ - ethernet-phy-id0180.dc41
+ - ethernet-phy-id0180.dc48
+ - ethernet-phy-id0180.dd00
+ - ethernet-phy-id0180.dd01
+ - ethernet-phy-id0180.dd02
+
+ then:
+ properties:
+ nxp,rmii-refclk-in:
+ type: boolean
+ description: |
+ The REF_CLK is provided for both transmitted and received data
+ in RMII mode. This clock signal is provided by the PHY and is
+ typically derived from an external 25MHz crystal. Alternatively,
+ a 50MHz clock signal generated by an external oscillator can be
+ connected to pin REF_CLK. A third option is to connect a 25MHz
+ clock to pin CLK_IN_OUT. So, the REF_CLK should be configured
+ as input or output according to the actual circuit connection.
+ If present, indicates that the REF_CLK will be configured as
+ interface reference clock input when RMII mode enabled.
+ If not present, the REF_CLK will be configured as interface
+ reference clock output when RMII mode enabled.
+ Only supported on TJA1100 and TJA1101.
patternProperties:
"^ethernet-phy@[0-9a-f]+$":
@@ -32,22 +77,6 @@ patternProperties:
description:
The ID number for the child PHY. Should be +1 of parent PHY.
- nxp,rmii-refclk-in:
- type: boolean
- description: |
- The REF_CLK is provided for both transmitted and received data
- in RMII mode. This clock signal is provided by the PHY and is
- typically derived from an external 25MHz crystal. Alternatively,
- a 50MHz clock signal generated by an external oscillator can be
- connected to pin REF_CLK. A third option is to connect a 25MHz
- clock to pin CLK_IN_OUT. So, the REF_CLK should be configured
- as input or output according to the actual circuit connection.
- If present, indicates that the REF_CLK will be configured as
- interface reference clock input when RMII mode enabled.
- If not present, the REF_CLK will be configured as interface
- reference clock output when RMII mode enabled.
- Only supported on TJA1100 and TJA1101.
-
required:
- reg
@@ -60,6 +89,7 @@ examples:
#size-cells = <0>;
tja1101_phy0: ethernet-phy@4 {
+ compatible = "ethernet-phy-id0180.dc40";
reg = <0x4>;
nxp,rmii-refclk-in;
};
diff --git a/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
index 6992d56832bf..d08abcb01211 100644
--- a/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
+++ b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
@@ -23,6 +23,9 @@ properties:
'#pse-cells':
const: 1
+ reset-gpios:
+ maxItems: 1
+
channels:
description: each set of 8 ports can be assigned to one physical
channels or two for PoE4. This parameter describes the configuration
diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
index 21a92f179093..1e00ef5b3acd 100644
--- a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
@@ -62,15 +62,27 @@ properties:
- renesas,r9a08g045-gbeth # RZ/G3S
- const: renesas,rzg2l-gbeth # RZ/{G2L,G2UL,V2L} family
- reg: true
+ reg:
+ minItems: 1
+ items:
+ - description: MAC register block
+ - description: Stream buffer
- interrupts: true
+ interrupts:
+ minItems: 1
+ maxItems: 29
- interrupt-names: true
+ interrupt-names:
+ minItems: 1
+ maxItems: 29
- clocks: true
+ clocks:
+ minItems: 1
+ maxItems: 3
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 3
iommus:
maxItems: 1
@@ -150,14 +162,11 @@ allOf:
then:
properties:
reg:
- items:
- - description: MAC register block
- - description: Stream buffer
+ minItems: 2
else:
properties:
reg:
- items:
- - description: MAC register block
+ maxItems: 1
- if:
properties:
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
index 6bbe96e35250..f8a576611d6c 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
@@ -25,6 +25,7 @@ select:
- rockchip,rk3368-gmac
- rockchip,rk3399-gmac
- rockchip,rk3568-gmac
+ - rockchip,rk3576-gmac
- rockchip,rk3588-gmac
- rockchip,rv1108-gmac
- rockchip,rv1126-gmac
@@ -52,6 +53,7 @@ properties:
- items:
- enum:
- rockchip,rk3568-gmac
+ - rockchip,rk3576-gmac
- rockchip,rk3588-gmac
- rockchip,rv1126-gmac
- const: snps,dwmac-4.20a
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 3eb65e63fdae..4e2ba1bf788c 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -80,6 +80,7 @@ properties:
- rockchip,rk3328-gmac
- rockchip,rk3366-gmac
- rockchip,rk3368-gmac
+ - rockchip,rk3576-gmac
- rockchip,rk3588-gmac
- rockchip,rk3399-gmac
- rockchip,rv1108-gmac
diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
index b0ebcef6801c..4eb63b303cff 100644
--- a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
+++ b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
@@ -41,13 +41,17 @@ properties:
minItems: 1
maxItems: 4
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 4
resets:
minItems: 1
maxItems: 2
- reset-names: true
+ reset-names:
+ minItems: 1
+ maxItems: 2
socionext,syscon-phy-mode:
$ref: /schemas/types.yaml#/definitions/phandle-array
diff --git a/Documentation/devicetree/bindings/net/wireless/marvell,sd8787.yaml b/Documentation/devicetree/bindings/net/wireless/marvell,sd8787.yaml
new file mode 100644
index 000000000000..1715b22e0dcf
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/marvell,sd8787.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/wireless/marvell,sd8787.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell 8787/8897/8978/8997 (sd8787/sd8897/sd8978/sd8997/pcie8997) SDIO/PCIE devices
+
+maintainers:
+ - Brian Norris <briannorris@chromium.org>
+ - Frank Li <Frank.Li@nxp.com>
+
+description:
+ This node provides properties for describing the Marvell SDIO/PCIE wireless device.
+ The node is expected to be specified as a child node to the SDIO/PCIE controller that
+ connects the device to the system.
+
+properties:
+ compatible:
+ enum:
+ - marvell,sd8787
+ - marvell,sd8897
+ - marvell,sd8978
+ - marvell,sd8997
+ - nxp,iw416
+ - pci11ab,2b42
+ - pci1b4b,2b42
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ wakeup-source: true
+
+ marvell,caldata-txpwrlimit-2g:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description: Calibration data for the 2GHz band.
+ maxItems: 566
+
+ marvell,caldata-txpwrlimit-5g-sub0:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description: Calibration data for sub-band 0 in the 5GHz band.
+ maxItems: 502
+
+ marvell,caldata-txpwrlimit-5g-sub1:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description: Calibration data for sub-band 1 in the 5GHz band.
+ maxItems: 688
+
+ marvell,caldata-txpwrlimit-5g-sub2:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description: Calibration data for sub-band 2 in the 5GHz band.
+ maxItems: 750
+
+ marvell,caldata-txpwrlimit-5g-sub3:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description: Calibration data for sub-band 3 in the 5GHz band.
+ maxItems: 502
+
+ marvell,wakeup-pin:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Provides the pin number for the wakeup pin from the device's point of
+ view. The wakeup pin is used for the device to wake the host system
+ from sleep. This property is only necessary if the wakeup pin is
+ wired in a non-standard way, such that the default pin assignments
+ are invalid.
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ mmc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wifi@1 {
+ compatible = "marvell,sd8897";
+ reg = <1>;
+ interrupt-parent = <&pio>;
+ interrupts = <38 IRQ_TYPE_LEVEL_LOW>;
+ marvell,wakeup-pin = <3>;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
deleted file mode 100644
index cdc303caf5f4..000000000000
--- a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-Marvell 8787/8897/8978/8997 (sd8787/sd8897/sd8978/sd8997/pcie8997) SDIO/PCIE devices
-------
-
-This node provides properties for controlling the Marvell SDIO/PCIE wireless device.
-The node is expected to be specified as a child node to the SDIO/PCIE controller that
-connects the device to the system.
-
-Required properties:
-
- - compatible : should be one of the following:
- * "marvell,sd8787"
- * "marvell,sd8897"
- * "marvell,sd8978"
- * "marvell,sd8997"
- * "nxp,iw416"
- * "pci11ab,2b42"
- * "pci1b4b,2b42"
-
-Optional properties:
-
- - marvell,caldata* : A series of properties with marvell,caldata prefix,
- represent calibration data downloaded to the device during
- initialization. This is an array of unsigned 8-bit values.
- the properties should follow below property name and
- corresponding array length:
- "marvell,caldata-txpwrlimit-2g" (length = 566).
- "marvell,caldata-txpwrlimit-5g-sub0" (length = 502).
- "marvell,caldata-txpwrlimit-5g-sub1" (length = 688).
- "marvell,caldata-txpwrlimit-5g-sub2" (length = 750).
- "marvell,caldata-txpwrlimit-5g-sub3" (length = 502).
- - marvell,wakeup-pin : a wakeup pin number of wifi chip which will be configured
- to firmware. Firmware will wakeup the host using this pin
- during suspend/resume.
- - interrupts : interrupt pin number to the cpu. driver will request an irq based on
- this interrupt number. during system suspend, the irq will be enabled
- so that the wifi chip can wakeup host platform under certain condition.
- during system resume, the irq will be disabled to make sure
- unnecessary interrupt is not received.
- - vmmc-supply: a phandle of a regulator, supplying VCC to the card
- - mmc-pwrseq: phandle to the MMC power sequence node. See "mmc-pwrseq-*"
- for documentation of MMC power sequence bindings.
-
-Example:
-
-Tx power limit calibration data is configured in below example.
-The calibration data is an array of unsigned values, the length
-can vary between hw versions.
-IRQ pin 38 is used as system wakeup source interrupt. wakeup pin 3 is configured
-so that firmware can wakeup host using this device side pin.
-
-&mmc3 {
- vmmc-supply = <&wlan_en_reg>;
- mmc-pwrseq = <&wifi_pwrseq>;
- bus-width = <4>;
- cap-power-off-card;
- keep-power-in-suspend;
-
- #address-cells = <1>;
- #size-cells = <0>;
- mwifiex: wifi@1 {
- compatible = "marvell,sd8897";
- reg = <1>;
- interrupt-parent = <&pio>;
- interrupts = <38 IRQ_TYPE_LEVEL_LOW>;
-
- marvell,caldata_00_txpwrlimit_2g_cfg_set = /bits/ 8 <
- 0x01 0x00 0x06 0x00 0x08 0x02 0x89 0x01>;
- marvell,wakeup-pin = <3>;
- };
-};
diff --git a/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml b/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml
index 02d1d2c17129..fd0c8d5c5f3e 100644
--- a/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml
+++ b/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml
@@ -19,7 +19,7 @@ description:
the hardware description for the scheme mentioned above.
maintainers:
- - Nishanth Menon <nm@ti.com>
+ - Dhruva Gole <d-gole@ti.com>
allOf:
- $ref: opp-v2-base.yaml#
diff --git a/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt b/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
deleted file mode 100644
index 9514c327d31b..000000000000
--- a/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* Altera PCIe MSI controller
-
-Required properties:
-- compatible: should contain "altr,msi-1.0"
-- reg: specifies the physical base address of the controller and
- the length of the memory mapped region.
-- reg-names: must include the following entries:
- "csr": CSR registers
- "vector_slave": vectors slave port region
-- interrupts: specifies the interrupt source of the parent interrupt
- controller. The format of the interrupt specifier depends on the
- parent interrupt controller.
-- num-vectors: number of vectors, range 1 to 32.
-- msi-controller: indicates that this is MSI controller node
-
-
-Example
-msi0: msi@0xFF200000 {
- compatible = "altr,msi-1.0";
- reg = <0xFF200000 0x00000010
- 0xFF200010 0x00000080>;
- reg-names = "csr", "vector_slave";
- interrupt-parent = <&hps_0_arm_gic_0>;
- interrupts = <0 42 4>;
- msi-controller;
- num-vectors = <32>;
-};
diff --git a/Documentation/devicetree/bindings/pci/altera-pcie.txt b/Documentation/devicetree/bindings/pci/altera-pcie.txt
deleted file mode 100644
index 816b244a221e..000000000000
--- a/Documentation/devicetree/bindings/pci/altera-pcie.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-* Altera PCIe controller
-
-Required properties:
-- compatible : should contain "altr,pcie-root-port-1.0" or "altr,pcie-root-port-2.0"
-- reg: a list of physical base address and length for TXS and CRA.
- For "altr,pcie-root-port-2.0", additional HIP base address and length.
-- reg-names: must include the following entries:
- "Txs": TX slave port region
- "Cra": Control register access region
- "Hip": Hard IP region (if "altr,pcie-root-port-2.0")
-- interrupts: specifies the interrupt source of the parent interrupt
- controller. The format of the interrupt specifier depends
- on the parent interrupt controller.
-- device_type: must be "pci"
-- #address-cells: set to <3>
-- #size-cells: set to <2>
-- #interrupt-cells: set to <1>
-- ranges: describes the translation of addresses for root ports and
- standard PCI regions.
-- interrupt-map-mask and interrupt-map: standard PCI properties to define the
- mapping of the PCIe interface to interrupt numbers.
-
-Optional properties:
-- msi-parent: Link to the hardware entity that serves as the MSI controller
- for this PCIe controller.
-- bus-range: PCI bus numbers covered
-
-Example
- pcie_0: pcie@c00000000 {
- compatible = "altr,pcie-root-port-1.0";
- reg = <0xc0000000 0x20000000>,
- <0xff220000 0x00004000>;
- reg-names = "Txs", "Cra";
- interrupt-parent = <&hps_0_arm_gic_0>;
- interrupts = <0 40 4>;
- interrupt-controller;
- #interrupt-cells = <1>;
- bus-range = <0x0 0xFF>;
- device_type = "pci";
- msi-parent = <&msi_to_gic_gen_0>;
- #address-cells = <3>;
- #size-cells = <2>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie_0 1>,
- <0 0 0 2 &pcie_0 2>,
- <0 0 0 3 &pcie_0 3>,
- <0 0 0 4 &pcie_0 4>;
- ranges = <0x82000000 0x00000000 0x00000000 0xc0000000 0x00000000 0x10000000
- 0x82000000 0x00000000 0x10000000 0xd0000000 0x00000000 0x10000000>;
- };
diff --git a/Documentation/devicetree/bindings/pci/altr,msi-controller.yaml b/Documentation/devicetree/bindings/pci/altr,msi-controller.yaml
new file mode 100644
index 000000000000..98814862d006
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/altr,msi-controller.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (C) 2015, 2024, Intel Corporation
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/altr,msi-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera PCIe MSI controller
+
+maintainers:
+ - Matthew Gerlach <matthew.gerlach@linux.intel.com>
+
+properties:
+ compatible:
+ enum:
+ - altr,msi-1.0
+
+ reg:
+ items:
+ - description: CSR registers
+ - description: Vectors slave port region
+
+ reg-names:
+ items:
+ - const: csr
+ - const: vector_slave
+
+ interrupts:
+ maxItems: 1
+
+ msi-controller: true
+
+ num-vectors:
+ description: number of vectors
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 32
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - msi-controller
+ - num-vectors
+
+allOf:
+ - $ref: /schemas/interrupt-controller/msi-controller.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ msi@ff200000 {
+ compatible = "altr,msi-1.0";
+ reg = <0xff200000 0x00000010>,
+ <0xff200010 0x00000080>;
+ reg-names = "csr", "vector_slave";
+ interrupt-parent = <&hps_0_arm_gic_0>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ msi-controller;
+ num-vectors = <32>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml b/Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml
new file mode 100644
index 000000000000..52533fccc134
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (C) 2015, 2019, 2024, Intel Corporation
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/altr,pcie-root-port.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera PCIe Root Port
+
+maintainers:
+ - Matthew Gerlach <matthew.gerlach@linux.intel.com>
+
+properties:
+ compatible:
+ enum:
+ - altr,pcie-root-port-1.0
+ - altr,pcie-root-port-2.0
+
+ reg:
+ items:
+ - description: TX slave port region
+ - description: Control register access region
+ - description: Hard IP region
+ minItems: 2
+
+ reg-names:
+ items:
+ - const: Txs
+ - const: Cra
+ - const: Hip
+ minItems: 2
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ interrupt-map-mask:
+ items:
+ - const: 0
+ - const: 0
+ - const: 0
+ - const: 7
+
+ interrupt-map:
+ maxItems: 4
+
+ "#interrupt-cells":
+ const: 1
+
+ msi-parent: true
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - "#interrupt-cells"
+ - interrupt-controller
+ - interrupt-map
+ - interrupt-map-mask
+
+allOf:
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
+ - if:
+ properties:
+ compatible:
+ enum:
+ - altr,pcie-root-port-1.0
+ then:
+ properties:
+ reg:
+ maxItems: 2
+
+ reg-names:
+ maxItems: 2
+
+ else:
+ properties:
+ reg:
+ minItems: 3
+
+ reg-names:
+ minItems: 3
+
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ pcie_0: pcie@c00000000 {
+ compatible = "altr,pcie-root-port-1.0";
+ reg = <0xc0000000 0x20000000>,
+ <0xff220000 0x00004000>;
+ reg-names = "Txs", "Cra";
+ interrupt-parent = <&hps_0_arm_gic_0>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ bus-range = <0x0 0xff>;
+ device_type = "pci";
+ msi-parent = <&msi_to_gic_gen_0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_0 0 0 0 1>,
+ <0 0 0 2 &pcie_0 0 0 0 2>,
+ <0 0 0 3 &pcie_0 0 0 0 3>,
+ <0 0 0 4 &pcie_0 0 0 0 4>;
+ ranges = <0x82000000 0x00000000 0x00000000 0xc0000000 0x00000000 0x10000000>,
+ <0x82000000 0x00000000 0x10000000 0xd0000000 0x00000000 0x10000000>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
index 11f8ea33240c..0925c520195a 100644
--- a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Brcmstb PCIe Host Controller
maintainers:
- - Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+ - Jim Quinlan <james.quinlan@broadcom.com>
properties:
compatible:
@@ -16,11 +16,12 @@ properties:
- brcm,bcm2711-pcie # The Raspberry Pi 4
- brcm,bcm4908-pcie
- brcm,bcm7211-pcie # Broadcom STB version of RPi4
- - brcm,bcm7278-pcie # Broadcom 7278 Arm
- brcm,bcm7216-pcie # Broadcom 7216 Arm
- - brcm,bcm7445-pcie # Broadcom 7445 Arm
+ - brcm,bcm7278-pcie # Broadcom 7278 Arm
- brcm,bcm7425-pcie # Broadcom 7425 MIPs
- brcm,bcm7435-pcie # Broadcom 7435 MIPs
+ - brcm,bcm7445-pcie # Broadcom 7445 Arm
+ - brcm,bcm7712-pcie # Broadcom STB sibling of Rpi 5
reg:
maxItems: 1
@@ -95,6 +96,14 @@ properties:
minItems: 1
maxItems: 3
+ resets:
+ minItems: 1
+ maxItems: 3
+
+ reset-names:
+ minItems: 1
+ maxItems: 3
+
required:
- compatible
- reg
@@ -118,8 +127,7 @@ allOf:
then:
properties:
resets:
- items:
- - description: reset controller handling the PERST# signal
+ maxItems: 1
reset-names:
items:
@@ -136,12 +144,32 @@ allOf:
then:
properties:
resets:
+ maxItems: 1
+
+ reset-names:
items:
- - description: phandle pointing to the RESCAL reset controller
+ - const: rescal
+
+ required:
+ - resets
+ - reset-names
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,bcm7712-pcie
+ then:
+ properties:
+ resets:
+ minItems: 3
+ maxItems: 3
reset-names:
items:
- const: rescal
+ - const: bridge
+ - const: swinit
required:
- resets
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml
index a06f75df8458..84ca12e8b25b 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml
@@ -65,12 +65,14 @@ allOf:
then:
properties:
reg:
- minItems: 2
- maxItems: 2
+ minItems: 4
+ maxItems: 4
reg-names:
items:
- const: dbi
- const: addr_space
+ - const: dbi2
+ - const: atu
- if:
properties:
@@ -129,8 +131,11 @@ examples:
pcie_ep: pcie-ep@33800000 {
compatible = "fsl,imx8mp-pcie-ep";
- reg = <0x33800000 0x000400000>, <0x18000000 0x08000000>;
- reg-names = "dbi", "addr_space";
+ reg = <0x33800000 0x100000>,
+ <0x18000000 0x8000000>,
+ <0x33900000 0x100000>,
+ <0x33b00000 0x100000>;
+ reg-names = "dbi", "addr_space", "dbi2", "atu";
clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
<&clk IMX8MP_CLK_HSIO_AXI>,
<&clk IMX8MP_CLK_PCIE_ROOT>;
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
index 8b8d77b1154b..1e05c560d797 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
@@ -30,6 +30,7 @@ properties:
- fsl,imx8mm-pcie
- fsl,imx8mp-pcie
- fsl,imx95-pcie
+ - fsl,imx8q-pcie
clocks:
minItems: 3
@@ -184,6 +185,21 @@ allOf:
- const: pcie_bus
- const: pcie_aux
+ - if:
+ properties:
+ compatible:
+ enum:
+ - fsl,imx8q-pcie
+ then:
+ properties:
+ clocks:
+ maxItems: 3
+ clock-names:
+ items:
+ - const: dbi
+ - const: mstr
+ - const: slv
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml
index 793986c5af7f..be79712836c4 100644
--- a/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml
@@ -22,18 +22,20 @@ description:
properties:
compatible:
- enum:
- - fsl,ls1021a-pcie
- - fsl,ls2080a-pcie
- - fsl,ls2085a-pcie
- - fsl,ls2088a-pcie
- - fsl,ls1088a-pcie
- - fsl,ls1046a-pcie
- - fsl,ls1043a-pcie
- - fsl,ls1012a-pcie
- - fsl,ls1028a-pcie
- - fsl,lx2160a-pcie
-
+ oneOf:
+ - enum:
+ - fsl,ls1012a-pcie
+ - fsl,ls1021a-pcie
+ - fsl,ls1028a-pcie
+ - fsl,ls1043a-pcie
+ - fsl,ls1046a-pcie
+ - fsl,ls1088a-pcie
+ - fsl,ls2080a-pcie
+ - fsl,ls2085a-pcie
+ - fsl,ls2088a-pcie
+ - items:
+ - const: fsl,lx2160ar2-pcie
+ - const: fsl,ls2088a-pcie
reg:
maxItems: 2
@@ -43,10 +45,15 @@ properties:
- const: config
fsl,pcie-scfg:
- $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle-array
description: A phandle to the SCFG device node. The second entry is the
physical PCIe controller index starting from '0'. This is used to get
SCFG PEXN registers.
+ items:
+ items:
+ - description: A phandle to the SCFG device node
+ - description: PCIe controller index starting from '0'
+ maxItems: 1
big-endian:
$ref: /schemas/types.yaml#/definitions/flag
@@ -67,6 +74,14 @@ properties:
minItems: 1
maxItems: 2
+ num-viewport:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ deprecated: true
+ description:
+ Number of outbound view ports configured in hardware. It's the same as
+ the number of outbound AT windows.
+ maximum: 256
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml b/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml
index c9f04999c9cf..e863519f3161 100644
--- a/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml
@@ -37,7 +37,8 @@ properties:
minItems: 3
maxItems: 4
- clocks: true
+ clocks:
+ maxItems: 5
clock-names:
items:
diff --git a/Documentation/devicetree/bindings/pci/host-generic-pci.yaml b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
index bcfbaf5582cc..420d551e9af9 100644
--- a/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
+++ b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
@@ -102,8 +102,6 @@ properties:
As described in IEEE Std 1275-1994, but must provide at least a
definition of non-prefetchable memory. One or both of prefetchable Memory
and IO Space may also be provided.
- minItems: 1
- maxItems: 3
dma-coherent: true
iommu-map: true
diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
index 76d742051f73..898c1be2d6a4 100644
--- a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
+++ b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
@@ -53,6 +53,7 @@ properties:
- mediatek,mt8195-pcie
- const: mediatek,mt8192-pcie
- const: mediatek,mt8192-pcie
+ - const: airoha,en7581-pcie
reg:
maxItems: 1
@@ -76,20 +77,20 @@ properties:
resets:
minItems: 1
- maxItems: 2
+ maxItems: 3
reset-names:
minItems: 1
- maxItems: 2
+ maxItems: 3
items:
- enum: [ phy, mac ]
+ enum: [ phy, mac, phy-lane0, phy-lane1, phy-lane2 ]
clocks:
- minItems: 4
+ minItems: 1
maxItems: 6
clock-names:
- minItems: 4
+ minItems: 1
maxItems: 6
assigned-clocks:
@@ -147,6 +148,9 @@ allOf:
const: mediatek,mt8192-pcie
then:
properties:
+ clocks:
+ minItems: 4
+
clock-names:
items:
- const: pl_250m
@@ -155,6 +159,15 @@ allOf:
- const: tl_32k
- const: peri_26m
- const: top_133m
+
+ resets:
+ minItems: 1
+ maxItems: 2
+
+ reset-names:
+ minItems: 1
+ maxItems: 2
+
- if:
properties:
compatible:
@@ -164,6 +177,9 @@ allOf:
- mediatek,mt8195-pcie
then:
properties:
+ clocks:
+ minItems: 4
+
clock-names:
items:
- const: pl_250m
@@ -172,6 +188,15 @@ allOf:
- const: tl_32k
- const: peri_26m
- const: peri_mem
+
+ resets:
+ minItems: 1
+ maxItems: 2
+
+ reset-names:
+ minItems: 1
+ maxItems: 2
+
- if:
properties:
compatible:
@@ -180,6 +205,9 @@ allOf:
- mediatek,mt7986-pcie
then:
properties:
+ clocks:
+ minItems: 4
+
clock-names:
items:
- const: pl_250m
@@ -187,6 +215,36 @@ allOf:
- const: peri_26m
- const: top_133m
+ resets:
+ minItems: 1
+ maxItems: 2
+
+ reset-names:
+ minItems: 1
+ maxItems: 2
+
+ - if:
+ properties:
+ compatible:
+ const: airoha,en7581-pcie
+ then:
+ properties:
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: sys-ck
+
+ resets:
+ minItems: 3
+
+ reset-names:
+ items:
+ - const: phy-lane0
+ - const: phy-lane1
+ - const: phy-lane2
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/pci/pci-ep.yaml b/Documentation/devicetree/bindings/pci/pci-ep.yaml
index d1eef4825207..f75000e3093d 100644
--- a/Documentation/devicetree/bindings/pci/pci-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/pci-ep.yaml
@@ -10,7 +10,8 @@ description: |
Common properties for PCI Endpoint Controller Nodes.
maintainers:
- - Kishon Vijay Abraham I <kishon@ti.com>
+ - Kishon Vijay Abraham I <kishon@kernel.org>
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
properties:
$nodename:
@@ -41,6 +42,17 @@ properties:
default: 1
maximum: 16
+ linux,pci-domain:
+ description:
+ If present this property assigns a fixed PCI domain number to a PCI
+ Endpoint Controller, otherwise an unstable (across boots) unique number
+ will be assigned. It is required to either not set this property at all
+ or set it for all PCI endpoint controllers in the system, otherwise
+ potentially conflicting domain numbers may be assigned to endpoint
+ controllers. The domain number for each endpoint controller in the system
+ must be unique.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
required:
- compatible
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml
index 0a39bbfcb28b..e18900c41576 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml
@@ -21,11 +21,11 @@ properties:
interrupts:
minItems: 1
- maxItems: 8
+ maxItems: 9
interrupt-names:
minItems: 1
- maxItems: 8
+ maxItems: 9
iommu-map:
minItems: 1
@@ -78,6 +78,9 @@ properties:
description: GPIO controlled connection to WAKE# signal
maxItems: 1
+ vddpe-3v3-supply:
+ description: PCIe endpoint power supply
+
required:
- reg
- reg-names
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
index 46802f7d9482..1226ee5d08d1 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
@@ -280,4 +280,5 @@ examples:
phy-names = "pciephy";
max-link-speed = <3>;
num-lanes = <2>;
+ linux,pci-domain = <0>;
};
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml
index 634da24ec3ed..76cb9fbfd476 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml
@@ -53,11 +53,19 @@ properties:
- const: aggre1 # Aggre NoC PCIe1 AXI clock
interrupts:
- maxItems: 1
+ minItems: 8
+ maxItems: 8
interrupt-names:
items:
- - const: msi
+ - const: msi0
+ - const: msi1
+ - const: msi2
+ - const: msi3
+ - const: msi4
+ - const: msi5
+ - const: msi6
+ - const: msi7
resets:
maxItems: 1
@@ -66,9 +74,6 @@ properties:
items:
- const: pci
- vddpe-3v3-supply:
- description: PCIe endpoint power supply
-
allOf:
- $ref: qcom,pcie-common.yaml#
@@ -137,8 +142,16 @@ examples:
dma-coherent;
- interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "msi";
+ interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0", "msi1", "msi2", "msi3",
+ "msi4", "msi5", "msi6", "msi7";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml
index 25c9f13ae977..15ba2385eb73 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml
@@ -58,9 +58,6 @@ properties:
items:
- const: pci
- vddpe-3v3-supply:
- description: A phandle to the PCIe endpoint power supply
-
required:
- interconnects
- interconnect-names
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml
index d8c0afaa4b19..46bd59eefadb 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml
@@ -55,8 +55,8 @@ properties:
- const: aggre1 # Aggre NoC PCIe1 AXI clock
interrupts:
- minItems: 8
- maxItems: 8
+ minItems: 9
+ maxItems: 9
interrupt-names:
items:
@@ -68,6 +68,7 @@ properties:
- const: msi5
- const: msi6
- const: msi7
+ - const: global
operating-points-v2: true
opp-table:
@@ -149,9 +150,10 @@ examples:
<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0", "msi1", "msi2", "msi3",
- "msi4", "msi5", "msi6", "msi7";
+ "msi4", "msi5", "msi6", "msi7", "global";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc 0 0 0 149 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
index f867746b1ae5..ffabbac57fc1 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
@@ -91,6 +91,9 @@ properties:
vdda_refclk-supply:
description: A phandle to the core analog power supply for IC which generates reference clock
+ vddpe-3v3-supply:
+ description: A phandle to the PCIe endpoint power supply
+
phys:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml
index 91b81ac75592..b23293314a6d 100644
--- a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml
@@ -19,6 +19,7 @@ properties:
- enum:
- renesas,r8a779f0-pcie-ep # R-Car S4-8
- renesas,r8a779g0-pcie-ep # R-Car V4H
+ - renesas,r8a779h0-pcie-ep # R-Car V4M
- const: renesas,rcar-gen4-pcie-ep # R-Car Gen4
reg:
diff --git a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml
index 955c664f1fbb..bb3f843c59d9 100644
--- a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml
+++ b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml
@@ -19,6 +19,7 @@ properties:
- enum:
- renesas,r8a779f0-pcie # R-Car S4-8
- renesas,r8a779g0-pcie # R-Car V4H
+ - renesas,r8a779h0-pcie # R-Car V4M
- const: renesas,rcar-gen4-pcie # R-Car Gen4
reg:
diff --git a/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml b/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml
index b288cdb1ec70..065b7508d288 100644
--- a/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml
+++ b/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml
@@ -42,9 +42,13 @@ properties:
interrupts:
maxItems: 1
- clocks: true
+ clocks:
+ minItems: 1
+ maxItems: 3
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 3
resets:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml
index f0d8e486a07d..93f3d0f4bb94 100644
--- a/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml
@@ -38,13 +38,17 @@ properties:
minItems: 1
maxItems: 2
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 2
resets:
minItems: 1
maxItems: 2
- reset-names: true
+ reset-names:
+ minItems: 1
+ maxItems: 2
num-ib-windows:
const: 16
diff --git a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
index 15a2658ceeef..69b499c96c71 100644
--- a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
+++ b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
@@ -38,6 +38,16 @@ properties:
- const: reg
- const: cfg
+ ti,syscon-acspcie-proxy-ctrl:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: Phandle to the ACSPCIE Proxy Control Register
+ - description: Bitmask corresponding to the PAD IO Buffer
+ output enable fields (Active Low).
+ description: Specifier for enabling the ACSPCIE PAD outputs to drive
+ the reference clock to the Endpoint device.
+
ti,syscon-pcie-ctrl:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
diff --git a/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml b/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml
index 9cad860c51a3..9de3c09efb6e 100644
--- a/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml
@@ -61,6 +61,11 @@ properties:
interrupt-map:
maxItems: 4
+ phys:
+ minItems: 1
+ maxItems: 4
+ description: One phy per logical lane, in order
+
power-domains:
maxItems: 1
@@ -110,6 +115,7 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/phy/phy.h>
#include <dt-bindings/power/xlnx-zynqmp-power.h>
soc {
#address-cells = <2>;
@@ -138,6 +144,7 @@ examples:
<0x0 0x0 0x0 0x3 &pcie_intc 0x3>,
<0x0 0x0 0x0 0x4 &pcie_intc 0x4>;
msi-parent = <&nwl_pcie>;
+ phys = <&psgtr 0 PHY_TYPE_PCIE 0 0>;
power-domains = <&zynqmp_firmware PD_PCIE>;
iommus = <&smmu 0x4d0>;
pcie_intc: legacy-interrupt-controller {
diff --git a/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml b/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
index 2f59b3a73dd2..f1efd919c351 100644
--- a/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
+++ b/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
@@ -14,10 +14,21 @@ allOf:
properties:
compatible:
- const: xlnx,xdma-host-3.00
+ enum:
+ - xlnx,xdma-host-3.00
+ - xlnx,qdma-host-3.00
reg:
- maxItems: 1
+ items:
+ - description: configuration region and XDMA bridge register.
+ - description: QDMA bridge register.
+ minItems: 1
+
+ reg-names:
+ items:
+ - const: cfg
+ - const: breg
+ minItems: 1
ranges:
maxItems: 2
@@ -76,6 +87,27 @@ required:
- "#interrupt-cells"
- interrupt-controller
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - xlnx,qdma-host-3.00
+then:
+ properties:
+ reg:
+ minItems: 2
+ reg-names:
+ minItems: 2
+ required:
+ - reg-names
+else:
+ properties:
+ reg:
+ maxItems: 1
+ reg-names:
+ maxItems: 1
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/perf/arm,cmn.yaml b/Documentation/devicetree/bindings/perf/arm,cmn.yaml
index 2e51072e794a..0e9d665584e6 100644
--- a/Documentation/devicetree/bindings/perf/arm,cmn.yaml
+++ b/Documentation/devicetree/bindings/perf/arm,cmn.yaml
@@ -16,6 +16,7 @@ properties:
- arm,cmn-600
- arm,cmn-650
- arm,cmn-700
+ - arm,cmn-s3
- arm,ci-700
reg:
diff --git a/Documentation/devicetree/bindings/perf/arm,ni.yaml b/Documentation/devicetree/bindings/perf/arm,ni.yaml
new file mode 100644
index 000000000000..d66fffa256d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/perf/arm,ni.yaml
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/perf/arm,ni.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Arm NI (Network-on-Chip Interconnect) Performance Monitors
+
+maintainers:
+ - Robin Murphy <robin.murphy@arm.com>
+
+properties:
+ compatible:
+ const: arm,ni-700
+
+ reg:
+ items:
+ - description: Complete configuration register space
+
+ interrupts:
+ minItems: 1
+ maxItems: 32
+ description: Overflow interrupts, one per clock domain, in order of domain ID
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt b/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt
index e1bb12711fbf..602cf952b92b 100644
--- a/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt
+++ b/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt
@@ -36,7 +36,7 @@ Optional properties:
3-tuple setting for each (up to 3) supported link
speed on the host. Range is 0 to 273000 in unit of
uV. Default is 0.
-- apm,tx-pre-cursor2 : 2st pre-cursor emphasis taps control. Two set of
+- apm,tx-pre-cursor2 : 2nd pre-cursor emphasis taps control. Two set of
3-tuple setting for each (up to 3) supported link
speed on the host. Range is 0 to 127400 in unit uV.
Default is 0x0.
diff --git a/Documentation/devicetree/bindings/phy/hisilicon,hi3798cv200-combphy.yaml b/Documentation/devicetree/bindings/phy/hisilicon,hi3798cv200-combphy.yaml
new file mode 100644
index 000000000000..81001966f657
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/hisilicon,hi3798cv200-combphy.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/hisilicon,hi3798cv200-combphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HiSilicon STB PCIE/SATA/USB3 PHY
+
+maintainers:
+ - Shawn Guo <shawn.guo@linaro.org>
+
+properties:
+ compatible:
+ const: hisilicon,hi3798cv200-combphy
+
+ reg:
+ maxItems: 1
+
+ '#phy-cells':
+ description: The cell contains the PHY mode
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ hisilicon,fixed-mode:
+ description: If the phy device doesn't support mode select but a fixed mode
+ setting, the property should be present to specify the particular mode.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 1, 2, 4] # SATA, PCIE, USB3
+
+ hisilicon,mode-select-bits:
+ description: If the phy device support mode select, this property should be
+ present to specify the register bits in peripheral controller.
+ items:
+ - description: register_offset
+ - description: bit shift
+ - description: bit mask
+
+required:
+ - compatible
+ - reg
+ - '#phy-cells'
+ - clocks
+ - resets
+
+oneOf:
+ - required: ['hisilicon,fixed-mode']
+ - required: ['hisilicon,mode-select-bits']
+
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/phy/mediatek,mt7988-xfi-tphy.yaml b/Documentation/devicetree/bindings/phy/mediatek,mt7988-xfi-tphy.yaml
index cfb3ca97f87c..cc9d0d4eeeeb 100644
--- a/Documentation/devicetree/bindings/phy/mediatek,mt7988-xfi-tphy.yaml
+++ b/Documentation/devicetree/bindings/phy/mediatek,mt7988-xfi-tphy.yaml
@@ -41,7 +41,7 @@ properties:
description:
One instance of the T-PHY on MT7988 suffers from a performance
problem in 10GBase-R mode which needs a work-around in the driver.
- This flag enables a work-around ajusting an analog phy setting and
+ This flag enables a work-around adjusting an analog phy setting and
is required for XFI Port0 of the MT7988 SoC to be in compliance with
the SFP specification.
diff --git a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
index acba0720125d..423b7c4e62f2 100644
--- a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
+++ b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
@@ -240,7 +240,7 @@ patternProperties:
The force mode is used to manually switch the shared phy mode between
USB3 and PCIe, when USB3 phy type is selected by the consumer, and
force-mode is set, will cause phy's power and pipe toggled and force
- phy as USB3 mode which switched from default PCIe mode. But perfer to
+ phy as USB3 mode which switched from default PCIe mode. But prefer to
use the property "mediatek,syscon-type" for newer SoCs that support it.
type: boolean
diff --git a/Documentation/devicetree/bindings/phy/nuvoton,ma35d1-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/nuvoton,ma35d1-usb2-phy.yaml
new file mode 100644
index 000000000000..fff858c909a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/nuvoton,ma35d1-usb2-phy.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/nuvoton,ma35d1-usb2-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton MA35D1 USB2 phy
+
+maintainers:
+ - Hui-Ping Chen <hpchen0nvt@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - nuvoton,ma35d1-usb2-phy
+
+ "#phy-cells":
+ const: 0
+
+ clocks:
+ maxItems: 1
+
+ nuvoton,sys:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle to syscon for checking the PHY clock status.
+
+required:
+ - compatible
+ - "#phy-cells"
+ - clocks
+ - nuvoton,sys
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/nuvoton,ma35d1-clk.h>
+
+ usb_phy: usb-phy {
+ compatible = "nuvoton,ma35d1-usb2-phy";
+ clocks = <&clk USBD_GATE>;
+ nuvoton,sys = <&sys>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-hi3798cv200-combphy.txt b/Documentation/devicetree/bindings/phy/phy-hi3798cv200-combphy.txt
deleted file mode 100644
index 17b0c761370a..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-hi3798cv200-combphy.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-HiSilicon STB PCIE/SATA/USB3 PHY
-
-Required properties:
-- compatible: Should be "hisilicon,hi3798cv200-combphy"
-- reg: Should be the address space for COMBPHY configuration and state
- registers in peripheral controller, e.g. PERI_COMBPHY0_CFG and
- PERI_COMBPHY0_STATE for COMBPHY0 Hi3798CV200 SoC.
-- #phy-cells: Should be 1. The cell number is used to select the phy mode
- as defined in <dt-bindings/phy/phy.h>.
-- clocks: The phandle to clock provider and clock specifier pair.
-- resets: The phandle to reset controller and reset specifier pair.
-
-Refer to phy/phy-bindings.txt for the generic PHY binding properties.
-
-Optional properties:
-- hisilicon,fixed-mode: If the phy device doesn't support mode select
- but a fixed mode setting, the property should be present to specify
- the particular mode.
-- hisilicon,mode-select-bits: If the phy device support mode select,
- this property should be present to specify the register bits in
- peripheral controller, as a 3 integers tuple:
- <register_offset bit_shift bit_mask>.
-
-Notes:
-- Between hisilicon,fixed-mode and hisilicon,mode-select-bits, one and only
- one of them should be present.
-- The device node should be a child of peripheral controller that contains
- COMBPHY configuration/state and PERI_CTRL register used to select PHY mode.
- Refer to arm/hisilicon/hisilicon.txt for the parent peripheral controller
- bindings.
-
-Examples:
-
-perictrl: peripheral-controller@8a20000 {
- compatible = "hisilicon,hi3798cv200-perictrl", "syscon",
- "simple-mfd";
- reg = <0x8a20000 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x8a20000 0x1000>;
-
- combphy0: phy@850 {
- compatible = "hisilicon,hi3798cv200-combphy";
- reg = <0x850 0x8>;
- #phy-cells = <1>;
- clocks = <&crg HISTB_COMBPHY0_CLK>;
- resets = <&crg 0x188 4>;
- hisilicon,fixed-mode = <PHY_TYPE_USB3>;
- };
-
- combphy1: phy@858 {
- compatible = "hisilicon,hi3798cv200-combphy";
- reg = <0x858 0x8>;
- #phy-cells = <1>;
- clocks = <&crg HISTB_COMBPHY1_CLK>;
- resets = <&crg 0x188 12>;
- hisilicon,mode-select-bits = <0x0008 11 (0x3 << 11)>;
- };
-};
diff --git a/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml b/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml
index 83fe4b39b56f..78607ee3e2e8 100644
--- a/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml
@@ -14,6 +14,7 @@ properties:
compatible:
enum:
- qcom,hdmi-phy-8996
+ - qcom,hdmi-phy-8998
reg:
maxItems: 6
diff --git a/Documentation/devicetree/bindings/phy/qcom,sata-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sata-phy.yaml
new file mode 100644
index 000000000000..0bf18d32c133
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom,sata-phy.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/qcom,sata-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SATA PHY Controller
+
+maintainers:
+ - Bjorn Andersson <andersson@kernel.org>
+ - Konrad Dybcio <konrad.dybcio@linaro.org>
+
+description:
+ The Qualcomm SATA PHY describes on-chip SATA Physical layer controllers.
+
+properties:
+ compatible:
+ enum:
+ - qcom,ipq806x-sata-phy
+ - qcom,apq8064-sata-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: cfg
+
+ '#phy-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-ipq806x.h>
+ sata_phy: sata-phy@1b400000 {
+ compatible = "qcom,ipq806x-sata-phy";
+ reg = <0x1b400000 0x200>;
+
+ clocks = <&gcc SATA_PHY_CFG_CLK>;
+ clock-names = "cfg";
+
+ #phy-cells = <0>;
+ };
+
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
index 03dbd02cf9e7..dcf4fa55fbba 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
@@ -40,6 +40,7 @@ properties:
- qcom,sm8650-qmp-gen4x2-pcie-phy
- qcom,x1e80100-qmp-gen3x2-pcie-phy
- qcom,x1e80100-qmp-gen4x2-pcie-phy
+ - qcom,x1e80100-qmp-gen4x4-pcie-phy
reg:
minItems: 1
@@ -118,6 +119,7 @@ allOf:
contains:
enum:
- qcom,sc8280xp-qmp-gen3x4-pcie-phy
+ - qcom,x1e80100-qmp-gen4x4-pcie-phy
then:
properties:
reg:
@@ -169,6 +171,7 @@ allOf:
- qcom,sc8280xp-qmp-gen3x1-pcie-phy
- qcom,sc8280xp-qmp-gen3x2-pcie-phy
- qcom,sc8280xp-qmp-gen3x4-pcie-phy
+ - qcom,x1e80100-qmp-gen4x4-pcie-phy
then:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
index 90d79491e281..d16a543a7848 100644
--- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
@@ -43,7 +43,7 @@ properties:
qcom,tune-usb2-amplitude:
$ref: /schemas/types.yaml#/definitions/uint8
- description: High-Speed trasmit amplitude
+ description: High-Speed transmit amplitude
minimum: 0
maximum: 15
default: 8
diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-8x16-phy.txt b/Documentation/devicetree/bindings/phy/qcom,usb-8x16-phy.txt
deleted file mode 100644
index 2cb2168cef41..000000000000
--- a/Documentation/devicetree/bindings/phy/qcom,usb-8x16-phy.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-Qualcomm's APQ8016/MSM8916 USB transceiver controller
-
-- compatible:
- Usage: required
- Value type: <string>
- Definition: Should contain "qcom,usb-8x16-phy".
-
-- reg:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: USB PHY base address and length of the register map
-
-- clocks:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: See clock-bindings.txt section "consumers". List of
- two clock specifiers for interface and core controller
- clocks.
-
-- clock-names:
- Usage: required
- Value type: <string>
- Definition: Must contain "iface" and "core" strings.
-
-- vddcx-supply:
- Usage: required
- Value type: <phandle>
- Definition: phandle to the regulator VDCCX supply node.
-
-- v1p8-supply:
- Usage: required
- Value type: <phandle>
- Definition: phandle to the regulator 1.8V supply node.
-
-- v3p3-supply:
- Usage: required
- Value type: <phandle>
- Definition: phandle to the regulator 3.3V supply node.
-
-- resets:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: See reset.txt section "consumers". PHY reset specifier.
-
-- reset-names:
- Usage: required
- Value type: <string>
- Definition: Must contain "phy" string.
-
-- switch-gpio:
- Usage: optional
- Value type: <prop-encoded-array>
- Definition: Some boards are using Dual SPDT USB Switch, witch is
- controlled by GPIO to de/multiplex D+/D- USB lines
- between connectors.
-
-Example:
- usb_phy: phy@78d9000 {
- compatible = "qcom,usb-8x16-phy";
- reg = <0x78d9000 0x400>;
-
- vddcx-supply = <&pm8916_s1_corner>;
- v1p8-supply = <&pm8916_l7>;
- v3p3-supply = <&pm8916_l13>;
-
- clocks = <&gcc GCC_USB_HS_AHB_CLK>,
- <&gcc GCC_USB_HS_SYSTEM_CLK>;
- clock-names = "iface", "core";
-
- resets = <&gcc GCC_USB2A_PHY_BCR>;
- reset-names = "phy";
-
- // D+/D- lines: 1 - Routed to HUB, 0 - Device connector
- switch-gpio = <&pm8916_gpios 4 GPIO_ACTIVE_HIGH>;
- };
-
diff --git a/Documentation/devicetree/bindings/phy/qcom-apq8064-sata-phy.txt b/Documentation/devicetree/bindings/phy/qcom-apq8064-sata-phy.txt
deleted file mode 100644
index 952f6c96bab9..000000000000
--- a/Documentation/devicetree/bindings/phy/qcom-apq8064-sata-phy.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Qualcomm APQ8064 SATA PHY Controller
-------------------------------------
-
-SATA PHY nodes are defined to describe on-chip SATA Physical layer controllers.
-Each SATA PHY controller should have its own node.
-
-Required properties:
-- compatible: compatible list, contains "qcom,apq8064-sata-phy".
-- reg: offset and length of the SATA PHY register set;
-- #phy-cells: must be zero
-- clocks: a list of phandles and clock-specifier pairs, one for each entry in
- clock-names.
-- clock-names: must be "cfg" for phy config clock.
-
-Example:
- sata_phy: sata-phy@1b400000 {
- compatible = "qcom,apq8064-sata-phy";
- reg = <0x1b400000 0x200>;
-
- clocks = <&gcc SATA_PHY_CFG_CLK>;
- clock-names = "cfg";
-
- #phy-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/phy/qcom-ipq806x-sata-phy.txt b/Documentation/devicetree/bindings/phy/qcom-ipq806x-sata-phy.txt
deleted file mode 100644
index 76bfbd056202..000000000000
--- a/Documentation/devicetree/bindings/phy/qcom-ipq806x-sata-phy.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Qualcomm IPQ806x SATA PHY Controller
-------------------------------------
-
-SATA PHY nodes are defined to describe on-chip SATA Physical layer controllers.
-Each SATA PHY controller should have its own node.
-
-Required properties:
-- compatible: compatible list, contains "qcom,ipq806x-sata-phy"
-- reg: offset and length of the SATA PHY register set;
-- #phy-cells: must be zero
-- clocks: must be exactly one entry
-- clock-names: must be "cfg"
-
-Example:
- sata_phy: sata-phy@1b400000 {
- compatible = "qcom,ipq806x-sata-phy";
- reg = <0x1b400000 0x200>;
-
- clocks = <&gcc SATA_PHY_CFG_CLK>;
- clock-names = "cfg";
-
- #phy-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
index f82649a55e91..af275cea3456 100644
--- a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
@@ -13,7 +13,9 @@ properties:
compatible:
oneOf:
- items:
- - const: renesas,usb2-phy-r8a77470 # RZ/G1C
+ - enum:
+ - renesas,usb2-phy-r8a77470 # RZ/G1C
+ - renesas,usb2-phy-r9a08g045 # RZ/G3S
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml b/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml
index 54e822c715f3..84fe59dbcf48 100644
--- a/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml
@@ -27,6 +27,9 @@ properties:
- const: ref
- const: apb
+ "#clock-cells":
+ const: 0
+
"#phy-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
index de3cffc850bc..e34b875a1bb8 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
@@ -30,13 +30,17 @@ properties:
minItems: 1
maxItems: 2
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 6
resets:
minItems: 2
maxItems: 6
- reset-names: true
+ reset-names:
+ minItems: 2
+ maxItems: 6
allOf:
- if:
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
index b3ed2f74a414..9fc0e87c508e 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
@@ -31,13 +31,17 @@ properties:
minItems: 1
maxItems: 2
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 2
resets:
minItems: 1
maxItems: 2
- reset-names: true
+ reset-names:
+ minItems: 1
+ maxItems: 2
socionext,syscon:
$ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
index 2107d98ace15..25c4159f86e4 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
@@ -34,12 +34,15 @@ properties:
minItems: 2
maxItems: 3
- clock-names: true
+ clock-names:
+ minItems: 2
+ maxItems: 3
resets:
maxItems: 2
- reset-names: true
+ reset-names:
+ maxItems: 2
vbus-supply:
description: A phandle to the regulator for USB VBUS
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
index 8f5aa6238bf3..1f663e9901da 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
@@ -35,12 +35,15 @@ properties:
minItems: 2
maxItems: 3
- clock-names: true
+ clock-names:
+ minItems: 2
+ maxItems: 3
resets:
maxItems: 2
- reset-names: true
+ reset-names:
+ maxItems: 2
vbus-supply:
description: A phandle to the regulator for USB VBUS, only for USB host
diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
deleted file mode 100644
index 0aa1a53012d6..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
+++ /dev/null
@@ -1,178 +0,0 @@
-* Atmel AT91 Pinmux Controller
-
-The AT91 Pinmux Controller, enables the IC
-to share one PAD to several functional blocks. The sharing is done by
-multiplexing the PAD input/output signals. For each PAD there are up to
-8 muxing options (called periph modes). Since different modules require
-different PAD settings (like pull up, keeper, etc) the controller controls
-also the PAD settings parameters.
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices, including the meaning of the
-phrase "pin configuration node".
-
-Atmel AT91 pin configuration node is a node of a group of pins which can be
-used for a specific device or function. This node represents both mux and config
-of the pins in that group. The 'pins' selects the function mode(also named pin
-mode) this pin can work on and the 'config' configures various pad settings
-such as pull-up, multi drive, etc.
-
-Required properties for iomux controller:
-- compatible: "atmel,at91rm9200-pinctrl" or "atmel,at91sam9x5-pinctrl"
- or "atmel,sama5d3-pinctrl" or "microchip,sam9x60-pinctrl"
- or "microchip,sam9x7-pinctrl", "microchip,sam9x60-pinctrl"
-- atmel,mux-mask: array of mask (periph per bank) to describe if a pin can be
- configured in this periph mode. All the periph and bank need to be describe.
-
-How to create such array:
-
-Each column will represent the possible peripheral of the pinctrl
-Each line will represent a pio bank
-
-Take an example on the 9260
-Peripheral: 2 ( A and B)
-Bank: 3 (A, B and C)
-=>
-
- /* A B */
- 0xffffffff 0xffc00c3b /* pioA */
- 0xffffffff 0x7fff3ccf /* pioB */
- 0xffffffff 0x007fffff /* pioC */
-
-For each peripheral/bank we will describe in a u32 if a pin can be
-configured in it by putting 1 to the pin bit (1 << pin)
-
-Let's take the pioA on peripheral B
-From the datasheet Table 10-2.
-Peripheral B
-PA0 MCDB0
-PA1 MCCDB
-PA2
-PA3 MCDB3
-PA4 MCDB2
-PA5 MCDB1
-PA6
-PA7
-PA8
-PA9
-PA10 ETX2
-PA11 ETX3
-PA12
-PA13
-PA14
-PA15
-PA16
-PA17
-PA18
-PA19
-PA20
-PA21
-PA22 ETXER
-PA23 ETX2
-PA24 ETX3
-PA25 ERX2
-PA26 ERX3
-PA27 ERXCK
-PA28 ECRS
-PA29 ECOL
-PA30 RXD4
-PA31 TXD4
-
-=> 0xffc00c3b
-
-Required properties for pin configuration node:
-- atmel,pins: 4 integers array, represents a group of pins mux and config
- setting. The format is atmel,pins = <PIN_BANK PIN_BANK_NUM PERIPH CONFIG>.
- The PERIPH 0 means gpio, PERIPH 1 is periph A, PERIPH 2 is periph B...
- PIN_BANK 0 is pioA, PIN_BANK 1 is pioB...
-
-Bits used for CONFIG:
-PULL_UP (1 << 0): indicate this pin needs a pull up.
-MULTIDRIVE (1 << 1): indicate this pin needs to be configured as multi-drive.
- Multi-drive is equivalent to open-drain type output.
-DEGLITCH (1 << 2): indicate this pin needs deglitch.
-PULL_DOWN (1 << 3): indicate this pin needs a pull down.
-DIS_SCHMIT (1 << 4): indicate this pin needs to the disable schmitt trigger.
-DRIVE_STRENGTH (3 << 5): indicate the drive strength of the pin using the
- following values:
- 00 - No change (reset state value kept)
- 01 - Low
- 10 - Medium
- 11 - High
-OUTPUT (1 << 7): indicate this pin need to be configured as an output.
-OUTPUT_VAL (1 << 8): output val (1 = high, 0 = low)
-SLEWRATE (1 << 9): slew rate of the pin: 0 = disable, 1 = enable
-DEBOUNCE (1 << 16): indicate this pin needs debounce.
-DEBOUNCE_VAL (0x3fff << 17): debounce value.
-
-NOTE:
-Some requirements for using atmel,at91rm9200-pinctrl binding:
-1. We have pin function node defined under at91 controller node to represent
- what pinmux functions this SoC supports.
-2. The driver can use the function node's name and pin configuration node's
- name describe the pin function and group hierarchy.
- For example, Linux at91 pinctrl driver takes the function node's name
- as the function name and pin configuration node's name as group name to
- create the map table.
-3. Each pin configuration node should have a phandle, devices can set pins
- configurations by referring to the phandle of that pin configuration node.
-4. The gpio controller must be describe in the pinctrl simple-bus.
-
-For each bank the required properties are:
-- compatible: "atmel,at91sam9x5-gpio" or "atmel,at91rm9200-gpio" or
- "microchip,sam9x60-gpio"
- or "microchip,sam9x7-gpio", "microchip,sam9x60-gpio", "atmel,at91rm9200-gpio"
-- reg: physical base address and length of the controller's registers
-- interrupts: interrupt outputs from the controller
-- interrupt-controller: marks the device node as an interrupt controller
-- #interrupt-cells: should be 2; refer to ../interrupt-controller/interrupts.txt
- for more details.
-- gpio-controller
-- #gpio-cells: should be 2; the first cell is the GPIO number and the second
- cell specifies GPIO flags as defined in <dt-bindings/gpio/gpio.h>.
-- clocks: bank clock
-
-Examples:
-
-pinctrl@fffff400 {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- compatible = "atmel,at91rm9200-pinctrl", "simple-bus";
- reg = <0xfffff400 0x600>;
-
- pioA: gpio@fffff400 {
- compatible = "atmel,at91sam9x5-gpio";
- reg = <0xfffff400 0x200>;
- interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>;
- #gpio-cells = <2>;
- gpio-controller;
- interrupt-controller;
- #interrupt-cells = <2>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
- };
-
- atmel,mux-mask = <
- /* A B */
- 0xffffffff 0xffc00c3b /* pioA */
- 0xffffffff 0x7fff3ccf /* pioB */
- 0xffffffff 0x007fffff /* pioC */
- >;
-
- /* shared pinctrl settings */
- dbgu {
- pinctrl_dbgu: dbgu-0 {
- atmel,pins =
- <1 14 0x1 0x0 /* PB14 periph A */
- 1 15 0x1 0x1>; /* PB15 periph A with pullup */
- };
- };
-};
-
-dbgu: serial@fffff200 {
- compatible = "atmel,at91sam9260-usart";
- reg = <0xfffff200 0x200>;
- interrupts = <1 4 7>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_dbgu>;
-};
diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91rm9200-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/atmel,at91rm9200-pinctrl.yaml
new file mode 100644
index 000000000000..1bb386b42039
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91rm9200-pinctrl.yaml
@@ -0,0 +1,184 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/atmel,at91rm9200-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip PIO3 Pinmux Controller
+
+maintainers:
+ - Manikandan Muralidharan <manikandan.m@microchip.com>
+
+description:
+ The AT91 Pinmux Controller, enables the IC to share one PAD to several
+ functional blocks. The sharing is done by multiplexing the PAD input/output
+ signals. For each PAD there are up to 8 muxing options (called periph modes).
+ Since different modules require different PAD settings (like pull up, keeper,
+ etc) the controller controls also the PAD settings parameters.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - atmel,at91rm9200-pinctrl
+ - atmel,at91sam9x5-pinctrl
+ - atmel,sama5d3-pinctrl
+ - microchip,sam9x60-pinctrl
+ - const: simple-mfd
+ - items:
+ - enum:
+ - microchip,sam9x7-pinctrl
+ - const: microchip,sam9x60-pinctrl
+ - const: simple-mfd
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 1
+
+ ranges: true
+
+ atmel,mux-mask:
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ description: |
+ Array of mask (periph per bank) to describe if a pin can be
+ configured in this periph mode. All the periph and bank need to
+ be described.
+
+ #How to create such array:
+
+ Each column will represent the possible peripheral of the pinctrl
+ Each line will represent a pio bank
+
+ #Example:
+
+ In at91sam9260.dtsi,
+ Peripheral: 2 ( A and B)
+ Bank: 3 (A, B and C)
+
+ # A B
+ 0xffffffff 0xffc00c3b # pioA
+ 0xffffffff 0x7fff3ccf # pioB
+ 0xffffffff 0x007fffff # pioC
+
+ For each peripheral/bank we will describe in a u32 if a pin can be
+ configured in it by putting 1 to the pin bit (1 << pin)
+
+ Let's take the pioA on peripheral B whose value is 0xffc00c3b
+ From the datasheet Table 10-2.
+ Peripheral B
+ PA0 MCDB0
+ PA1 MCCDB
+ PA2
+ PA3 MCDB3
+ PA4 MCDB2
+ PA5 MCDB1
+ PA6
+ PA7
+ PA8
+ PA9
+ PA10 ETX2
+ PA11 ETX3
+ PA12
+ PA13
+ PA14
+ PA15
+ PA16
+ PA17
+ PA18
+ PA19
+ PA20
+ PA21
+ PA22 ETXER
+ PA23 ETX2
+ PA24 ETX3
+ PA25 ERX2
+ PA26 ERX3
+ PA27 ERXCK
+ PA28 ECRS
+ PA29 ECOL
+ PA30 RXD4
+ PA31 TXD4
+
+allOf:
+ - $ref: pinctrl.yaml#
+
+required:
+ - compatible
+ - ranges
+ - "#address-cells"
+ - "#size-cells"
+ - atmel,mux-mask
+
+patternProperties:
+ 'gpio@[0-9a-f]+$':
+ $ref: /schemas/gpio/atmel,at91rm9200-gpio.yaml
+ unevaluatedProperties: false
+
+additionalProperties:
+ type: object
+ additionalProperties:
+ type: object
+ additionalProperties: false
+
+ properties:
+ atmel,pins:
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ description: |
+ Each entry consists of 4 integers and represents the pins
+ mux and config setting.The format is
+ atmel,pins = <PIN_BANK PIN_BANK_NUM PERIPH CONFIG>.
+ Supported pin number and mux varies for different SoCs, and
+ are defined in <include/dt-bindings/pinctrl/at91.h>.
+ items:
+ items:
+ - description:
+ Pin bank
+ - description:
+ Pin bank index
+ - description:
+ Peripheral function
+ - description:
+ Pad configuration
+
+examples:
+ - |
+ #include <dt-bindings/clock/at91.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/pinctrl/at91.h>
+
+ pinctrl@fffff400 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "atmel,at91rm9200-pinctrl", "simple-mfd";
+ ranges = <0xfffff400 0xfffff400 0x600>;
+
+ atmel,mux-mask = <
+ /* A B */
+ 0xffffffff 0xffc00c3b /* pioA */
+ 0xffffffff 0x7fff3ccf /* pioB */
+ 0xffffffff 0x007fffff /* pioC */
+ >;
+
+ dbgu {
+ pinctrl_dbgu: dbgu-0 {
+ atmel,pins =
+ <AT91_PIOB 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+ };
+ };
+
+ pioA: gpio@fffff400 {
+ compatible = "atmel,at91rm9200-gpio";
+ reg = <0xfffff400 0x200>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/mobileye,eyeq5-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mobileye,eyeq5-pinctrl.yaml
deleted file mode 100644
index 5f00604bf48c..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/mobileye,eyeq5-pinctrl.yaml
+++ /dev/null
@@ -1,242 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/pinctrl/mobileye,eyeq5-pinctrl.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Mobileye EyeQ5 pin controller
-
-description: >
- The EyeQ5 pin controller handles the two pin banks of the system. It belongs
- to a system-controller block called OLB.
-
- Pin control is about bias (pull-down, pull-up), drive strength and muxing. Pin
- muxing supports two functions for each pin: first is GPIO, second is
- pin-dependent.
-
- Pins and groups are bijective.
-
-maintainers:
- - Grégory Clement <gregory.clement@bootlin.com>
- - Théo Lebrun <theo.lebrun@bootlin.com>
- - Vladimir Kondratiev <vladimir.kondratiev@mobileye.com>
-
-$ref: pinctrl.yaml#
-
-properties:
- compatible:
- enum:
- - mobileye,eyeq5-pinctrl
-
- reg:
- maxItems: 1
-
-patternProperties:
- "-pins?$":
- type: object
- description: Pin muxing configuration.
- $ref: pinmux-node.yaml#
- additionalProperties: false
- properties:
- pins: true
- function:
- enum: [gpio,
- # Bank A
- timer0, timer1, timer2, timer5, uart0, uart1, can0, can1, spi0,
- spi1, refclk0,
- # Bank B
- timer3, timer4, timer6, uart2, can2, spi2, spi3, mclk0]
- bias-disable: true
- bias-pull-down: true
- bias-pull-up: true
- drive-strength: true
- required:
- - pins
- - function
- allOf:
- - if:
- properties:
- function:
- const: gpio
- then:
- properties:
- pins:
- items: # PA0 - PA28, PB0 - PB22
- pattern: '^(P(A|B)1?[0-9]|PA2[0-8]|PB2[0-2])$'
- - if:
- properties:
- function:
- const: timer0
- then:
- properties:
- pins:
- items:
- enum: [PA0, PA1]
- - if:
- properties:
- function:
- const: timer1
- then:
- properties:
- pins:
- items:
- enum: [PA2, PA3]
- - if:
- properties:
- function:
- const: timer2
- then:
- properties:
- pins:
- items:
- enum: [PA4, PA5]
- - if:
- properties:
- function:
- const: timer5
- then:
- properties:
- pins:
- items:
- enum: [PA6, PA7, PA8, PA9]
- - if:
- properties:
- function:
- const: uart0
- then:
- properties:
- pins:
- items:
- enum: [PA10, PA11]
- - if:
- properties:
- function:
- const: uart1
- then:
- properties:
- pins:
- items:
- enum: [PA12, PA13]
- - if:
- properties:
- function:
- const: can0
- then:
- properties:
- pins:
- items:
- enum: [PA14, PA15]
- - if:
- properties:
- function:
- const: can1
- then:
- properties:
- pins:
- items:
- enum: [PA16, PA17]
- - if:
- properties:
- function:
- const: spi0
- then:
- properties:
- pins:
- items:
- enum: [PA18, PA19, PA20, PA21, PA22]
- - if:
- properties:
- function:
- const: spi1
- then:
- properties:
- pins:
- items:
- enum: [PA23, PA24, PA25, PA26, PA27]
- - if:
- properties:
- function:
- const: refclk0
- then:
- properties:
- pins:
- items:
- enum: [PA28]
- - if:
- properties:
- function:
- const: timer3
- then:
- properties:
- pins:
- items:
- enum: [PB0, PB1]
- - if:
- properties:
- function:
- const: timer4
- then:
- properties:
- pins:
- items:
- enum: [PB2, PB3]
- - if:
- properties:
- function:
- const: timer6
- then:
- properties:
- pins:
- items:
- enum: [PB4, PB5, PB6, PB7]
- - if:
- properties:
- function:
- const: uart2
- then:
- properties:
- pins:
- items:
- enum: [PB8, PB9]
- - if:
- properties:
- function:
- const: can2
- then:
- properties:
- pins:
- items:
- enum: [PB10, PB11]
- - if:
- properties:
- function:
- const: spi2
- then:
- properties:
- pins:
- items:
- enum: [PB12, PB13, PB14, PB15, PB16]
- - if:
- properties:
- function:
- const: spi3
- then:
- properties:
- pins:
- items:
- enum: [PB17, PB18, PB19, PB20, PB21]
- - if:
- properties:
- function:
- const: mclk0
- then:
- properties:
- pins:
- items:
- enum: [PB22]
-
-required:
- - compatible
- - reg
-
-additionalProperties: false
diff --git a/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml
index 814b9598edd1..8cd1f442240e 100644
--- a/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml
@@ -71,51 +71,49 @@ patternProperties:
One or more groups of pins to mux to a certain function
items:
enum: [ iox1, iox2, smb1d, smb2d, lkgpo1, lkgpo2, ioxh, gspi,
- smb5b, smb5c, lkgpo0, pspi, jm1, jm2, smb4den, smb4b,
- smb4c, smb15, smb16, smb17, smb18, smb19, smb20, smb21,
- smb22, smb23, smb23b, smb4d, smb14, smb5, smb4, smb3,
- spi0cs1, spi0cs2, spi0cs3, spi1cs0, spi1cs1, spi1cs2,
- spi1cs3, spi1cs23, smb3c, smb3b, bmcuart0a, uart1, jtag2,
- bmcuart1, uart2, sg1mdio, bmcuart0b, r1err, r1md, r1oen,
- r2oen, rmii3, r3oen, smb3d, fanin0, fanin1, fanin2, fanin3,
- fanin4, fanin5, fanin6, fanin7, fanin8, fanin9, fanin10,
- fanin11, fanin12, fanin13, fanin14, fanin15, pwm0, pwm1, pwm2,
- pwm3, r2, r2err, r2md, r3rxer, ga20kbc, smb5d, lpc, espi, rg2,
- ddr, i3c0, i3c1, i3c2, i3c3, i3c4, i3c5, smb0, smb1, smb2,
- smb2c, smb2b, smb1c, smb1b, smb8, smb9, smb10, smb11, sd1,
- sd1pwr, pwm4, pwm5, pwm6, pwm7, pwm8, pwm9, pwm10, pwm11,
- mmc8, mmc, mmcwp, mmccd, mmcrst, clkout, serirq, lpcclk,
- scipme, smi, smb6, smb6b, smb6c, smb6d, smb7, smb7b, smb7c,
- smb7d, spi1, faninx, r1, spi3, spi3cs1, spi3quad, spi3cs2,
- spi3cs3, nprd_smi, smb0b, smb0c, smb0den, smb0d, ddc, rg2mdio,
- wdog1, wdog2, smb12, smb13, spix, spixcs1, clkreq, hgpio0,
- hgpio1, hgpio2, hgpio3, hgpio4, hgpio5, hgpio6, hgpio7, bu4,
- bu4b, bu5, bu5b, bu6, gpo187 ]
+ smb5b, smb5c, lkgpo0, pspi, jm1, jm2, smb4b, smb4c, smb15,
+ smb16, smb17, smb18, smb19, smb20, smb21, smb22, smb23,
+ smb23b, smb4d, smb14, smb5, smb4, smb3, spi0cs1, spi0cs2,
+ spi0cs3, spi1cs0, spi1cs1, spi1cs2, spi1cs3, spi1cs23, smb3c,
+ smb3b, bmcuart0a, uart1, jtag2, bmcuart1, uart2, sg1mdio,
+ bmcuart0b, r1err, r1md, r1oen, r2oen, rmii3, r3oen, smb3d,
+ fanin0, fanin1, fanin2, fanin3, fanin4, fanin5, fanin6,
+ fanin7, fanin8, fanin9, fanin10, fanin11, fanin12, fanin13,
+ fanin14, fanin15, pwm0, pwm1, pwm2, pwm3, r2, r2err, r2md,
+ r3rxer, ga20kbc, smb5d, lpc, espi, rg2, ddr, i3c0, i3c1,
+ i3c2, i3c3, i3c4, i3c5, smb0, smb1, smb2, smb2c, smb2b, smb1c,
+ smb1b, smb8, smb9, smb10, smb11, sd1, sd1pwr, pwm4, pwm5,
+ pwm6, pwm7, pwm8, pwm9, pwm10, pwm11, mmc8, mmc, mmcwp, mmccd,
+ mmcrst, clkout, serirq, scipme, smi, smb6, smb6b, smb6c,
+ smb6d, smb7, smb7b, smb7c, smb7d, spi1, faninx, r1, spi3,
+ spi3cs1, spi3quad, spi3cs2, spi3cs3, nprd_smi, smb0b, smb0c,
+ smb0den, smb0d, ddc, rg2mdio, wdog1, wdog2, smb12, smb13,
+ spix, spixcs1, clkreq, hgpio0, hgpio1, hgpio2, hgpio3, hgpio4,
+ hgpio5, hgpio6, hgpio7, bu4, bu4b, bu5, bu5b, bu6, gpo187 ]
function:
description:
The function that a group of pins is muxed to
- enum: [ iox1, iox2, smb1d, smb2d, lkgpo1, lkgpo2, ioxh, gspi,
- smb5b, smb5c, lkgpo0, pspi, jm1, jm2, smb4den, smb4b,
- smb4c, smb15, smb16, smb17, smb18, smb19, smb20, smb21,
- smb22, smb23, smb23b, smb4d, smb14, smb5, smb4, smb3,
- spi0cs1, spi0cs2, spi0cs3, spi1cs0, spi1cs1, spi1cs2,
- spi1cs3, spi1cs23, smb3c, smb3b, bmcuart0a, uart1, jtag2,
- bmcuart1, uart2, sg1mdio, bmcuart0b, r1err, r1md, r1oen,
- r2oen, rmii3, r3oen, smb3d, fanin0, fanin1, fanin2, fanin3,
- fanin4, fanin5, fanin6, fanin7, fanin8, fanin9, fanin10,
+ enum: [ iox1, iox2, smb1d, smb2d, lkgpo1, lkgpo2, ioxh, gspi, smb5b,
+ smb5c, lkgpo0, pspi, jm1, jm2, smb4b, smb4c, smb15, smb16,
+ smb17, smb18, smb19, smb20, smb21, smb22, smb23, smb23b, smb4d,
+ smb14, smb5, smb4, smb3, spi0cs1, spi0cs2, spi0cs3, spi1cs0,
+ spi1cs1, spi1cs2, spi1cs3, spi1cs23, smb3c, smb3b, bmcuart0a,
+ uart1, jtag2, bmcuart1, uart2, sg1mdio, bmcuart0b, r1err, r1md,
+ r1oen, r2oen, rmii3, r3oen, smb3d, fanin0, fanin1, fanin2,
+ fanin3, fanin4, fanin5, fanin6, fanin7, fanin8, fanin9, fanin10,
fanin11, fanin12, fanin13, fanin14, fanin15, pwm0, pwm1, pwm2,
pwm3, r2, r2err, r2md, r3rxer, ga20kbc, smb5d, lpc, espi, rg2,
ddr, i3c0, i3c1, i3c2, i3c3, i3c4, i3c5, smb0, smb1, smb2,
smb2c, smb2b, smb1c, smb1b, smb8, smb9, smb10, smb11, sd1,
sd1pwr, pwm4, pwm5, pwm6, pwm7, pwm8, pwm9, pwm10, pwm11,
- mmc8, mmc, mmcwp, mmccd, mmcrst, clkout, serirq, lpcclk,
- scipme, smi, smb6, smb6b, smb6c, smb6d, smb7, smb7b, smb7c,
- smb7d, spi1, faninx, r1, spi3, spi3cs1, spi3quad, spi3cs2,
- spi3cs3, nprd_smi, smb0b, smb0c, smb0den, smb0d, ddc, rg2mdio,
- wdog1, wdog2, smb12, smb13, spix, spixcs1, clkreq, hgpio0,
- hgpio1, hgpio2, hgpio3, hgpio4, hgpio5, hgpio6, hgpio7, bu4,
- bu4b, bu5, bu5b, bu6, gpo187 ]
+ mmc8, mmc, mmcwp, mmccd, mmcrst, clkout, serirq, scipme, smi,
+ smb6, smb6b, smb6c, smb6d, smb7, smb7b, smb7c, smb7d, spi1,
+ faninx, r1, spi3, spi3cs1, spi3quad, spi3cs2, spi3cs3, nprd_smi,
+ smb0b, smb0c, smb0den, smb0d, ddc, rg2mdio, wdog1, wdog2,
+ smb12, smb13, spix, spixcs1, clkreq, hgpio0, hgpio1, hgpio2,
+ hgpio3, hgpio4, hgpio5, hgpio6, hgpio7, bu4, bu4b, bu5, bu5b,
+ bu6, gpo187 ]
dependencies:
groups: [ function ]
diff --git a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
index d0af21a564b4..cbfcf215e571 100644
--- a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
@@ -96,6 +96,9 @@ properties:
type: boolean
description: disable schmitt-trigger mode
+ input-schmitt-microvolt:
+ description: threshold strength for schmitt-trigger
+
input-debounce:
$ref: /schemas/types.yaml#/definitions/uint32-array
description: Takes the debounce time in usec as argument or 0 to disable
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
deleted file mode 100644
index 4e90ddd77784..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
+++ /dev/null
@@ -1,95 +0,0 @@
-Qualcomm APQ8064 TLMM block
-
-Required properties:
-- compatible: "qcom,apq8064-pinctrl"
-- reg: Should be the base address and length of the TLMM block.
-- interrupts: Should be the parent IRQ of the TLMM block.
-- interrupt-controller: Marks the device node as an interrupt controller.
-- #interrupt-cells: Should be two.
-- gpio-controller: Marks the device node as a GPIO controller.
-- #gpio-cells : Should be two.
- The first cell is the gpio pin number and the
- second cell is used for optional parameters.
-- gpio-ranges: see ../gpio/gpio.txt
-
-Optional properties:
-
-- gpio-reserved-ranges: see ../gpio/gpio.txt
-
-Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
-a general description of GPIO and interrupt bindings.
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices, including the meaning of the
-phrase "pin configuration node".
-
-Qualcomm's pin configuration nodes act as a container for an arbitrary number of
-subnodes. Each of these subnodes represents some desired configuration for a
-pin, a group, or a list of pins or groups. This configuration can include the
-mux function to select on those pin(s)/group(s), and various pin configuration
-parameters, such as pull-up, drive strength, etc.
-
-The name of each subnode is not important; all subnodes should be enumerated
-and processed purely based on their content.
-
-Each subnode only affects those parameters that are explicitly listed. In
-other words, a subnode that lists a mux function but no pin configuration
-parameters implies no information about any pin configuration parameters.
-Similarly, a pin subnode that describes a pullup parameter implies no
-information about e.g. the mux function.
-
-
-The following generic properties as defined in pinctrl-bindings.txt are valid
-to specify in a pin configuration subnode:
-
- pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-strength,
- output-low, output-high.
-
-Non-empty subnodes must specify the 'pins' property.
-
-Valid values for pins are:
- gpio0-gpio89
-
-Valid values for function are:
- cam_mclk, codec_mic_i2s, codec_spkr_i2s, gp_clk_0a, gp_clk_0b, gp_clk_1a,
- gp_clk_1b, gp_clk_2a, gp_clk_2b, gpio, gsbi1, gsbi2, gsbi3, gsbi4,
- gsbi4_cam_i2c, gsbi5, gsbi5_spi_cs1, gsbi5_spi_cs2, gsbi5_spi_cs3, gsbi6,
- gsbi6_spi_cs1, gsbi6_spi_cs2, gsbi6_spi_cs3, gsbi7, gsbi7_spi_cs1,
- gsbi7_spi_cs2, gsbi7_spi_cs3, gsbi_cam_i2c, hdmi, mi2s, riva_bt, riva_fm,
- riva_wlan, sdc2, sdc4, slimbus, spkr_i2s, tsif1, tsif2, usb2_hsic, ps_hold
-
-Example:
-
- msmgpio: pinctrl@800000 {
- compatible = "qcom,apq8064-pinctrl";
- reg = <0x800000 0x4000>;
-
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <0 16 0x4>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&gsbi5_uart_default>;
- gpio-ranges = <&msmgpio 0 0 90>;
-
- gsbi5_uart_default: gsbi5_uart_default {
- mux {
- pins = "gpio51", "gpio52";
- function = "gsbi5";
- };
-
- tx {
- pins = "gpio51";
- drive-strength = <4>;
- bias-disable;
- };
-
- rx {
- pins = "gpio52";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.yaml
new file mode 100644
index 000000000000..f251dcd4bb7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,apq8064-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. APQ8064 TLMM block
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ Top Level Mode Multiplexer pin controller in Qualcomm APQ8064 SoC.
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,apq8064-pinctrl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ gpio-reserved-ranges: true
+
+patternProperties:
+ "-state$":
+ oneOf:
+ - $ref: "#/$defs/qcom-apq8064-tlmm-state"
+ - patternProperties:
+ "-pins$":
+ $ref: "#/$defs/qcom-apq8064-tlmm-state"
+ additionalProperties: false
+
+$defs:
+ qcom-apq8064-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state
+ unevaluatedProperties: false
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-8][0-9])$"
+ - enum: [ sdc1_clk, sdc1_cmd, sdc1_data, sdc3_clk, sdc3_cmd, sdc3_data ]
+ minItems: 1
+ maxItems: 36
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+ enum: [ cam_mclk, codec_mic_i2s, codec_spkr_i2s, gp_clk_0a,
+ gp_clk_0b, gp_clk_1a, gp_clk_1b, gp_clk_2a, gp_clk_2b,
+ gpio, gsbi1, gsbi2, gsbi3, gsbi4, gsbi4_cam_i2c,
+ gsbi5, gsbi5_spi_cs1, gsbi5_spi_cs2, gsbi5_spi_cs3,
+ gsbi6, gsbi6_spi_cs1, gsbi6_spi_cs2, gsbi6_spi_cs3,
+ gsbi7, gsbi7_spi_cs1, gsbi7_spi_cs2, gsbi7_spi_cs3,
+ gsbi_cam_i2c, hdmi, mi2s, riva_bt, riva_fm, riva_wlan,
+ sdc2, sdc4, slimbus, spkr_i2s, tsif1, tsif2, usb2_hsic,
+ ps_hold ]
+
+ required:
+ - pins
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ tlmm: pinctrl@800000 {
+ compatible = "qcom,apq8064-pinctrl";
+ reg = <0x800000 0x4000>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 90>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+
+ uart-state {
+ rx-pins {
+ pins = "gpio52";
+ function = "gsbi5";
+ bias-pull-up;
+ };
+
+ tx-pins {
+ pins = "gpio51";
+ function = "gsbi5";
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
deleted file mode 100644
index c9782397ff14..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
+++ /dev/null
@@ -1,188 +0,0 @@
-Qualcomm APQ8084 TLMM block
-
-This binding describes the Top Level Mode Multiplexer block found in the
-MSM8960 platform.
-
-- compatible:
- Usage: required
- Value type: <string>
- Definition: must be "qcom,apq8084-pinctrl"
-
-- reg:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: the base address and size of the TLMM register space.
-
-- interrupts:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: should specify the TLMM summary IRQ.
-
-- interrupt-controller:
- Usage: required
- Value type: <none>
- Definition: identifies this node as an interrupt controller
-
-- #interrupt-cells:
- Usage: required
- Value type: <u32>
- Definition: must be 2. Specifying the pin number and flags, as defined
- in <dt-bindings/interrupt-controller/irq.h>
-
-- gpio-controller:
- Usage: required
- Value type: <none>
- Definition: identifies this node as a gpio controller
-
-- #gpio-cells:
- Usage: required
- Value type: <u32>
- Definition: must be 2. Specifying the pin number and flags, as defined
- in <dt-bindings/gpio/gpio.h>
-
-- gpio-ranges:
- Usage: required
- Definition: see ../gpio/gpio.txt
-
-- gpio-reserved-ranges:
- Usage: optional
- Definition: see ../gpio/gpio.txt
-
-Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
-a general description of GPIO and interrupt bindings.
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices, including the meaning of the
-phrase "pin configuration node".
-
-The pin configuration nodes act as a container for an arbitrary number of
-subnodes. Each of these subnodes represents some desired configuration for a
-pin, a group, or a list of pins or groups. This configuration can include the
-mux function to select on those pin(s)/group(s), and various pin configuration
-parameters, such as pull-up, drive strength, etc.
-
-
-PIN CONFIGURATION NODES:
-
-The name of each subnode is not important; all subnodes should be enumerated
-and processed purely based on their content.
-
-Each subnode only affects those parameters that are explicitly listed. In
-other words, a subnode that lists a mux function but no pin configuration
-parameters implies no information about any pin configuration parameters.
-Similarly, a pin subnode that describes a pullup parameter implies no
-information about e.g. the mux function.
-
-
-The following generic properties as defined in pinctrl-bindings.txt are valid
-to specify in a pin configuration subnode:
-
-- pins:
- Usage: required
- Value type: <string-array>
- Definition: List of gpio pins affected by the properties specified in
- this subnode. Valid pins are:
- gpio0-gpio146,
- sdc1_clk,
- sdc1_cmd,
- sdc1_data
- sdc2_clk,
- sdc2_cmd,
- sdc2_data
-
-- function:
- Usage: required
- Value type: <string>
- Definition: Specify the alternative function to be configured for the
- specified pins. Functions are only valid for gpio pins.
- Valid values are:
- adsp_ext, audio_ref, blsp_i2c1, blsp_i2c2, blsp_i2c3,
- blsp_i2c4, blsp_i2c5, blsp_i2c6, blsp_i2c7, blsp_i2c8,
- blsp_i2c9, blsp_i2c10, blsp_i2c11, blsp_i2c12,
- blsp_spi1, blsp_spi2, blsp_spi3, blsp_spi4, blsp_spi5,
- blsp_spi6, blsp_spi7, blsp_spi8, blsp_spi9, blsp_spi10,
- blsp_spi11, blsp_spi12, blsp_uart1, blsp_uart2, blsp_uart3,
- blsp_uart4, blsp_uart5, blsp_uart6, blsp_uart7, blsp_uart8,
- blsp_uart9, blsp_uart10, blsp_uart11, blsp_uart12,
- blsp_uim1, blsp_uim2, blsp_uim3, blsp_uim4, blsp_uim5,
- blsp_uim6, blsp_uim7, blsp_uim8, blsp_uim9, blsp_uim10,
- blsp_uim11, blsp_uim12, cam_mclk0, cam_mclk1, cam_mclk2,
- cam_mclk3, cci_async, cci_async_in0, cci_i2c0, cci_i2c1,
- cci_timer0, cci_timer1, cci_timer2, cci_timer3, cci_timer4,
- edp_hpd, gcc_gp1, gcc_gp2, gcc_gp3, gcc_obt, gcc_vtt,i
- gp_mn, gp_pdm0, gp_pdm1, gp_pdm2, gp0_clk, gp1_clk, gpio,
- hdmi_cec, hdmi_ddc, hdmi_dtest, hdmi_hpd, hdmi_rcv, hsic,
- ldo_en, ldo_update, mdp_vsync, pci_e0, pci_e0_n, pci_e0_rst,
- pci_e1, pci_e1_rst, pci_e1_rst_n, pci_e1_clkreq_n, pri_mi2s,
- qua_mi2s, sata_act, sata_devsleep, sata_devsleep_n,
- sd_write, sdc_emmc_mode, sdc3, sdc4, sec_mi2s, slimbus,
- spdif_tx, spkr_i2s, spkr_i2s_ws, spss_geni, ter_mi2s, tsif1,
- tsif2, uim, uim_batt_alarm
-
-- bias-disable:
- Usage: optional
- Value type: <none>
- Definition: The specified pins should be configured as no pull.
-
-- bias-pull-down:
- Usage: optional
- Value type: <none>
- Definition: The specified pins should be configured as pull down.
-
-- bias-pull-up:
- Usage: optional
- Value type: <none>
- Definition: The specified pins should be configured as pull up.
-
-- output-high:
- Usage: optional
- Value type: <none>
- Definition: The specified pins are configured in output mode, driven
- high.
- Not valid for sdc pins.
-
-- output-low:
- Usage: optional
- Value type: <none>
- Definition: The specified pins are configured in output mode, driven
- low.
- Not valid for sdc pins.
-
-- drive-strength:
- Usage: optional
- Value type: <u32>
- Definition: Selects the drive strength for the specified pins, in mA.
- Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
-
-Example:
-
- tlmm: pinctrl@fd510000 {
- compatible = "qcom,apq8084-pinctrl";
- reg = <0xfd510000 0x4000>;
-
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&tlmm 0 0 147>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <0 208 0>;
-
- uart2: uart2-default {
- mux {
- pins = "gpio4", "gpio5";
- function = "blsp_uart2";
- };
-
- tx {
- pins = "gpio4";
- drive-strength = <4>;
- bias-disable;
- };
-
- rx {
- pins = "gpio5";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.yaml
new file mode 100644
index 000000000000..38877d8b97ff
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.yaml
@@ -0,0 +1,129 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,apq8084-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. APQ8084 TLMM block
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ Top Level Mode Multiplexer pin controller in Qualcomm APQ8084 SoC.
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,apq8084-pinctrl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ gpio-reserved-ranges: true
+
+patternProperties:
+ "-state$":
+ oneOf:
+ - $ref: "#/$defs/qcom-apq8084-tlmm-state"
+ - patternProperties:
+ "-pins$":
+ $ref: "#/$defs/qcom-apq8084-tlmm-state"
+ additionalProperties: false
+
+$defs:
+ qcom-apq8084-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state
+ unevaluatedProperties: false
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-9][0-9]|1[0-3][0-9]|14[0-6])$"
+ - enum: [ sdc1_clk, sdc1_cmd, sdc1_data, sdc2_clk, sdc2_cmd,
+ sdc2_data ]
+ minItems: 1
+ maxItems: 36
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+ enum: [ adsp_ext, audio_ref, blsp_i2c1, blsp_i2c2, blsp_i2c3,
+ blsp_i2c4, blsp_i2c5, blsp_i2c6, blsp_i2c7, blsp_i2c8,
+ blsp_i2c9, blsp_i2c10, blsp_i2c11, blsp_i2c12,
+ blsp_spi1, blsp_spi1_cs1, blsp_spi1_cs2, blsp_spi1_cs3,
+ blsp_spi2, blsp_spi3, blsp_spi3_cs1, blsp_spi3_cs2,
+ blsp_spi3_cs3, blsp_spi4, blsp_spi5, blsp_spi6,
+ blsp_spi7, blsp_spi8, blsp_spi9, blsp_spi10,
+ blsp_spi10_cs1, blsp_spi10_cs2, blsp_spi10_cs3,
+ blsp_spi11, blsp_spi12, blsp_uart1, blsp_uart2,
+ blsp_uart3, blsp_uart4, blsp_uart5, blsp_uart6,
+ blsp_uart7, blsp_uart8, blsp_uart9, blsp_uart10,
+ blsp_uart11, blsp_uart12, blsp_uim1, blsp_uim2,
+ blsp_uim3, blsp_uim4, blsp_uim5, blsp_uim6, blsp_uim7,
+ blsp_uim8, blsp_uim9, blsp_uim10, blsp_uim11,
+ blsp_uim12, cam_mclk0, cam_mclk1, cam_mclk2, cam_mclk3,
+ cci_async, cci_async_in0, cci_i2c0, cci_i2c1,
+ cci_timer0, cci_timer1, cci_timer2, cci_timer3,
+ cci_timer4, edp_hpd, gcc_gp1, gcc_gp2, gcc_gp3,
+ gcc_obt, gcc_vtt, gp_mn, gp_pdm0, gp_pdm1, gp_pdm2,
+ gp0_clk, gp1_clk, gpio, hdmi_cec, hdmi_ddc, hdmi_dtest,
+ hdmi_hpd, hdmi_rcv, hsic, ldo_en, ldo_update,
+ mdp_vsync, pci_e0, pci_e0_n, pci_e0_rst, pci_e1,
+ pci_e1_rst, pci_e1_rst_n, pci_e1_clkreq_n, pri_mi2s,
+ qua_mi2s, sata_act, sata_devsleep, sata_devsleep_n,
+ sd_write, sdc_emmc_mode, sdc3, sdc4, sec_mi2s, slimbus,
+ spdif_tx, spkr_i2s, spkr_i2s_ws, spss_geni, ter_mi2s,
+ tsif1, tsif2, uim, uim_batt_alarm ]
+
+ required:
+ - pins
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ tlmm: pinctrl@fd510000 {
+ compatible = "qcom,apq8084-pinctrl";
+ reg = <0xfd510000 0x4000>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 147>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+
+ uart-state {
+ rx-pins {
+ pins = "gpio5";
+ function = "blsp_uart2";
+ bias-pull-up;
+ };
+
+ tx-pins {
+ pins = "gpio4";
+ function = "blsp_uart2";
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
deleted file mode 100644
index 97858a7c07a2..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
+++ /dev/null
@@ -1,85 +0,0 @@
-Qualcomm Atheros IPQ4019 TLMM block
-
-This is the Top Level Mode Multiplexor block found on the Qualcomm IPQ8019
-platform, it provides pinctrl, pinmux, pinconf, and gpiolib facilities.
-
-Required properties:
-- compatible: "qcom,ipq4019-pinctrl"
-- reg: Should be the base address and length of the TLMM block.
-- interrupts: Should be the parent IRQ of the TLMM block.
-- interrupt-controller: Marks the device node as an interrupt controller.
-- #interrupt-cells: Should be two.
-- gpio-controller: Marks the device node as a GPIO controller.
-- #gpio-cells : Should be two.
- The first cell is the gpio pin number and the
- second cell is used for optional parameters.
-- gpio-ranges: see ../gpio/gpio.txt
-
-Optional properties:
-
-- gpio-reserved-ranges: see ../gpio/gpio.txt
-
-Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
-a general description of GPIO and interrupt bindings.
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices, including the meaning of the
-phrase "pin configuration node".
-
-The pin configuration nodes act as a container for an arbitrary number of
-subnodes. Each of these subnodes represents some desired configuration for a
-pin, a group, or a list of pins or groups. This configuration can include the
-mux function to select on those pin(s)/group(s), and various pin configuration
-parameters, such as pull-up, drive strength, etc.
-
-The name of each subnode is not important; all subnodes should be enumerated
-and processed purely based on their content.
-
-Each subnode only affects those parameters that are explicitly listed. In
-other words, a subnode that lists a mux function but no pin configuration
-parameters implies no information about any pin configuration parameters.
-Similarly, a pin subnode that describes a pullup parameter implies no
-information about e.g. the mux function.
-
-
-The following generic properties as defined in pinctrl-bindings.txt are valid
-to specify in a pin configuration subnode:
- pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-open-drain,
- drive-strength.
-
-Non-empty subnodes must specify the 'pins' property.
-Note that not all properties are valid for all pins.
-
-
-Valid values for qcom,pins are:
- gpio0-gpio99
- Supports mux, bias and drive-strength
-
-Valid values for qcom,function are:
-aud_pin, audio_pwm, blsp_i2c0, blsp_i2c1, blsp_spi0, blsp_spi1, blsp_uart0,
-blsp_uart1, chip_rst, gpio, i2s_rx, i2s_spdif_in, i2s_spdif_out, i2s_td, i2s_tx,
-jtag, led0, led1, led2, led3, led4, led5, led6, led7, led8, led9, led10, led11,
-mdc, mdio, pcie, pmu, prng_rosc, qpic, rgmii, rmii, sdio, smart0, smart1,
-smart2, smart3, tm, wifi0, wifi1
-
-Example:
-
- tlmm: pinctrl@1000000 {
- compatible = "qcom,ipq4019-pinctrl";
- reg = <0x1000000 0x300000>;
-
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&tlmm 0 0 100>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <0 208 0>;
-
- serial_pins: serial_pinmux {
- mux {
- pins = "gpio60", "gpio61";
- function = "blsp_uart0";
- bias-disable;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.yaml
new file mode 100644
index 000000000000..cc5de9f77680
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,ipq4019-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. IPQ4019 TLMM block
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ Top Level Mode Multiplexer pin controller in Qualcomm IPQ4019 SoC.
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,ipq4019-pinctrl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ gpio-reserved-ranges: true
+
+patternProperties:
+ "-state$":
+ oneOf:
+ - $ref: "#/$defs/qcom-ipq4019-tlmm-state"
+ - patternProperties:
+ "-pins$":
+ $ref: "#/$defs/qcom-ipq4019-tlmm-state"
+ additionalProperties: false
+
+ "-hog(-[0-9]+)?$":
+ type: object
+ required:
+ - gpio-hog
+
+$defs:
+ qcom-ipq4019-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state
+ unevaluatedProperties: false
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ pattern: "^gpio([0-9]|[1-9][0-9])$"
+ minItems: 1
+ maxItems: 36
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+ enum: [ aud_pin, audio_pwm, blsp_i2c0, blsp_i2c1, blsp_spi0,
+ blsp_spi1, blsp_uart0, blsp_uart1, chip_rst, gpio,
+ i2s_rx, i2s_spdif_in, i2s_spdif_out, i2s_td, i2s_tx,
+ jtag, led0, led1, led2, led3, led4, led5, led6, led7,
+ led8, led9, led10, led11, mdc, mdio, pcie, pmu,
+ prng_rosc, qpic, rgmii, rmii, sdio, smart0, smart1,
+ smart2, smart3, tm, wifi0, wifi1 ]
+
+ required:
+ - pins
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ tlmm: pinctrl@1000000 {
+ compatible = "qcom,ipq4019-pinctrl";
+ reg = <0x01000000 0x300000>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 100>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+
+ uart-state {
+ pins = "gpio16", "gpio17";
+ function = "blsp_uart0";
+ bias-disable;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
deleted file mode 100644
index a7aaaa7db83b..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
+++ /dev/null
@@ -1,101 +0,0 @@
-Qualcomm IPQ8064 TLMM block
-
-Required properties:
-- compatible: "qcom,ipq8064-pinctrl"
-- reg: Should be the base address and length of the TLMM block.
-- interrupts: Should be the parent IRQ of the TLMM block.
-- interrupt-controller: Marks the device node as an interrupt controller.
-- #interrupt-cells: Should be two.
-- gpio-controller: Marks the device node as a GPIO controller.
-- #gpio-cells : Should be two.
- The first cell is the gpio pin number and the
- second cell is used for optional parameters.
-- gpio-ranges: see ../gpio/gpio.txt
-
-Optional properties:
-
-- gpio-reserved-ranges: see ../gpio/gpio.txt
-
-Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
-a general description of GPIO and interrupt bindings.
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices, including the meaning of the
-phrase "pin configuration node".
-
-Qualcomm's pin configuration nodes act as a container for an arbitrary number of
-subnodes. Each of these subnodes represents some desired configuration for a
-pin, a group, or a list of pins or groups. This configuration can include the
-mux function to select on those pin(s)/group(s), and various pin configuration
-parameters, such as pull-up, drive strength, etc.
-
-The name of each subnode is not important; all subnodes should be enumerated
-and processed purely based on their content.
-
-Each subnode only affects those parameters that are explicitly listed. In
-other words, a subnode that lists a mux function but no pin configuration
-parameters implies no information about any pin configuration parameters.
-Similarly, a pin subnode that describes a pullup parameter implies no
-information about e.g. the mux function.
-
-
-The following generic properties as defined in pinctrl-bindings.txt are valid
-to specify in a pin configuration subnode:
-
- pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-strength,
- output-low, output-high.
-
-Non-empty subnodes must specify the 'pins' property.
-
-Valid values for qcom,pins are:
- gpio0-gpio68
- Supports mux, bias, and drive-strength
-
- sdc3_clk, sdc3_cmd, sdc3_data
- Supports bias and drive-strength
-
-
-Valid values for function are:
- mdio, mi2s, pdm, ssbi, spmi, audio_pcm, gpio, gsbi1, gsbi2, gsbi4, gsbi5,
- gsbi5_spi_cs1, gsbi5_spi_cs2, gsbi5_spi_cs3, gsbi6, gsbi7, nss_spi, sdc1,
- spdif, nand, tsif1, tsif2, usb_fs_n, usb_fs, usb2_hsic, rgmii2, sata,
- pcie1_rst, pcie1_prsnt, pcie1_pwren_n, pcie1_pwren, pcie1_pwrflt,
- pcie1_clk_req, pcie2_rst, pcie2_prsnt, pcie2_pwren_n, pcie2_pwren,
- pcie2_pwrflt, pcie2_clk_req, pcie3_rst, pcie3_prsnt, pcie3_pwren_n,
- pcie3_pwren, pcie3_pwrflt, pcie3_clk_req, ps_hold
-
-Example:
-
- pinmux: pinctrl@800000 {
- compatible = "qcom,ipq8064-pinctrl";
- reg = <0x800000 0x4000>;
-
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pinmux 0 0 69>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <0 32 0x4>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&gsbi5_uart_default>;
-
- gsbi5_uart_default: gsbi5_uart_default {
- mux {
- pins = "gpio18", "gpio19";
- function = "gsbi5";
- };
-
- tx {
- pins = "gpio18";
- drive-strength = <4>;
- bias-disable;
- };
-
- rx {
- pins = "gpio19";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.yaml
new file mode 100644
index 000000000000..58f11e1bdd4f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.yaml
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,ipq8064-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. IPQ8064 TLMM block
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ Top Level Mode Multiplexer pin controller in Qualcomm IPQ8064 SoC.
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,ipq8064-pinctrl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ gpio-reserved-ranges: true
+
+patternProperties:
+ "-state$":
+ oneOf:
+ - $ref: "#/$defs/qcom-ipq8064-tlmm-state"
+ - patternProperties:
+ "-pins$":
+ $ref: "#/$defs/qcom-ipq8064-tlmm-state"
+ additionalProperties: false
+
+$defs:
+ qcom-ipq8064-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state
+ unevaluatedProperties: false
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-5][0-9]|6[0-8])$"
+ - enum: [ sdc3_clk, sdc3_cmd, sdc3_data ]
+ minItems: 1
+ maxItems: 36
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+ enum: [ mdio, mi2s, pdm, ssbi, spmi, audio_pcm, gpio, gsbi1, gsbi2, gsbi4, gsbi5,
+ gsbi5_spi_cs1, gsbi5_spi_cs2, gsbi5_spi_cs3, gsbi6, gsbi7, nss_spi, sdc1,
+ spdif, nand, tsif1, tsif2, usb_fs_n, usb_fs, usb2_hsic, rgmii2, sata,
+ pcie1_rst, pcie1_prsnt, pcie1_pwren_n, pcie1_pwren, pcie1_pwrflt,
+ pcie1_clk_req, pcie2_rst, pcie2_prsnt, pcie2_pwren_n, pcie2_pwren,
+ pcie2_pwrflt, pcie2_clk_req, pcie3_rst, pcie3_prsnt, pcie3_pwren_n,
+ pcie3_pwren, pcie3_pwrflt, pcie3_clk_req, ps_hold ]
+
+ required:
+ - pins
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ tlmm: pinctrl@800000 {
+ compatible = "qcom,ipq8064-pinctrl";
+ reg = <0x00800000 0x4000>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 69>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+
+ uart-state {
+ rx-pins {
+ pins = "gpio19";
+ function = "gsbi5";
+ bias-pull-up;
+ };
+
+ tx-pins {
+ pins = "gpio18";
+ function = "gsbi5";
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
index 2784d32fdde2..c1b799167d81 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
@@ -425,6 +425,7 @@ patternProperties:
additionalProperties: false
"-hog(-[0-9]+)?$":
+ type: object
required:
- gpio-hog
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml
index dfe5616b9b85..0f331844608c 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml
@@ -43,6 +43,7 @@ patternProperties:
additionalProperties: false
"-hog(-[0-9]+)?$":
+ type: object
required:
- gpio-hog
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
index 5d84364d1358..cfe004573366 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
@@ -25,6 +25,7 @@ properties:
- renesas,pfc-r8a7745 # RZ/G1E
- renesas,pfc-r8a77470 # RZ/G1C
- renesas,pfc-r8a774a1 # RZ/G2M
+ - renesas,pfc-r8a774a3 # RZ/G2M v3.0
- renesas,pfc-r8a774b1 # RZ/G2N
- renesas,pfc-r8a774c0 # RZ/G2E
- renesas,pfc-r8a774e1 # RZ/G2H
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
index 20e806dce1ec..6a23d845f1f2 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
@@ -45,6 +45,7 @@ properties:
- rockchip,rk3368-pinctrl
- rockchip,rk3399-pinctrl
- rockchip,rk3568-pinctrl
+ - rockchip,rk3576-pinctrl
- rockchip,rk3588-pinctrl
- rockchip,rv1108-pinctrl
- rockchip,rv1126-pinctrl
diff --git a/Documentation/devicetree/bindings/pinctrl/sophgo,cv1800-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/sophgo,cv1800-pinctrl.yaml
new file mode 100644
index 000000000000..1e6a55afe26a
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/sophgo,cv1800-pinctrl.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/sophgo,cv1800-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo CV1800 Pin Controller
+
+maintainers:
+ - Inochi Amaoto <inochiama@outlook.com>
+
+properties:
+ compatible:
+ enum:
+ - sophgo,cv1800b-pinctrl
+ - sophgo,cv1812h-pinctrl
+ - sophgo,sg2000-pinctrl
+ - sophgo,sg2002-pinctrl
+
+ reg:
+ items:
+ - description: pinctrl for system domain
+ - description: pinctrl for rtc domain
+
+ reg-names:
+ items:
+ - const: sys
+ - const: rtc
+
+ resets:
+ maxItems: 1
+
+patternProperties:
+ '-cfg$':
+ type: object
+ description:
+ A pinctrl node should contain at least one subnode representing the
+ pinctrl groups available on the machine.
+
+ additionalProperties: false
+
+ patternProperties:
+ '-pins$':
+ type: object
+ description: |
+ Each subnode will list the pins it needs, and how they should
+ be configured, with regard to muxer configuration, bias, input
+ enable/disable, input schmitt trigger, slew-rate, drive strength
+ and bus hold state. In addition, all pins in the same subnode
+ should have the same power domain. For configuration detail,
+ refer to https://github.com/sophgo/sophgo-doc/.
+
+ allOf:
+ - $ref: pincfg-node.yaml#
+ - $ref: pinmux-node.yaml#
+
+ properties:
+ pinmux:
+ description: |
+ The list of GPIOs and their mux settings that properties in the
+ node apply to. This should be set using the GPIOMUX or GPIOMUX2
+ macro.
+
+ bias-pull-up:
+ type: boolean
+
+ bias-pull-down:
+ type: boolean
+
+ drive-strength-microamp:
+ description: typical current when output high level.
+
+ input-schmitt-microvolt:
+ description: typical threshold for schmitt trigger.
+
+ power-source:
+ description: power supplies at X mV.
+ enum: [ 1800, 3300 ]
+
+ slew-rate:
+ description: slew rate for output buffer (0 is fast, 1 is slow)
+ enum: [ 0, 1 ]
+
+ bias-bus-hold: true
+
+ required:
+ - pinmux
+ - power-source
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/pinctrl/pinctrl-cv1800b.h>
+
+ pinctrl@3001000 {
+ compatible = "sophgo,cv1800b-pinctrl";
+ reg = <0x03001000 0x1000>,
+ <0x05027000 0x1000>;
+ reg-names = "sys", "rtc";
+
+ uart0_cfg: uart0-cfg {
+ uart0-pins {
+ pinmux = <PINMUX(PIN_UART0_TX, 0)>,
+ <PINMUX(PIN_UART0_RX, 0)>;
+ bias-pull-up;
+ drive-strength-microamp = <10800>;
+ input-schmitt-microvolt = <0>;
+ power-source = <3300>;
+ slew-rate = <0>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index e1eb45a9eda4..a28d77748095 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -11,7 +11,7 @@ maintainers:
- Alexandre TORGUE <alexandre.torgue@foss.st.com>
description: |
- STMicroelectronics's STM32 MCUs intregrate a GPIO and Pin mux/config hardware
+ STMicroelectronics's STM32 MCUs integrate a GPIO and Pin mux/config hardware
controller. It controls the input/output settings on the available pins and
also provides ability to multiplex and configure the output of various
on-chip controllers onto these pads.
@@ -164,7 +164,7 @@ patternProperties:
This macro is available here:
- include/dt-bindings/pinctrl/stm32-pinfunc.h
Some examples of using macro:
- /* GPIO A9 set as alernate function 2 */
+ /* GPIO A9 set as alternate function 2 */
... {
pinmux = <STM32_PINMUX('A', 9, AF2)>;
};
diff --git a/Documentation/devicetree/bindings/platform/microsoft,surface-sam.yaml b/Documentation/devicetree/bindings/platform/microsoft,surface-sam.yaml
new file mode 100644
index 000000000000..b33d26f15b2a
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/microsoft,surface-sam.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/platform/microsoft,surface-sam.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Surface System Aggregator Module (SAM, SSAM)
+
+maintainers:
+ - Konrad Dybcio <konradybcio@kernel.org>
+
+description: |
+ Surface devices use a standardized embedded controller to let the
+ operating system interface with various hardware functions. The
+ specific functionalities are modeled as subdevices and matched on
+ five levels: domain, category, target, instance and function.
+
+properties:
+ compatible:
+ const: microsoft,surface-sam
+
+ interrupts:
+ maxItems: 1
+
+ current-speed: true
+
+required:
+ - compatible
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ uart {
+ embedded-controller {
+ compatible = "microsoft,surface-sam";
+
+ interrupts-extended = <&tlmm 91 IRQ_TYPE_EDGE_RISING>;
+
+ pinctrl-0 = <&ssam_state>;
+ pinctrl-names = "default";
+
+ current-speed = <4000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml
index e76fb273490f..347571e2545a 100644
--- a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml
+++ b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml
@@ -25,6 +25,7 @@ properties:
- renesas,r8a7745-sysc # RZ/G1E
- renesas,r8a77470-sysc # RZ/G1C
- renesas,r8a774a1-sysc # RZ/G2M
+ - renesas,r8a774a3-sysc # RZ/G2M v3.0
- renesas,r8a774b1-sysc # RZ/G2N
- renesas,r8a774c0-sysc # RZ/G2E
- renesas,r8a774e1-sysc # RZ/G2H
diff --git a/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml b/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml
index 0d5e999a58f1..650dc0aae6f5 100644
--- a/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml
+++ b/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml
@@ -41,6 +41,7 @@ properties:
- rockchip,rk3368-power-controller
- rockchip,rk3399-power-controller
- rockchip,rk3568-power-controller
+ - rockchip,rk3576-power-controller
- rockchip,rk3588-power-controller
- rockchip,rv1126-power-controller
diff --git a/Documentation/devicetree/bindings/power/rockchip-io-domain.yaml b/Documentation/devicetree/bindings/power/rockchip-io-domain.yaml
index d71fc72d4464..c434277218ea 100644
--- a/Documentation/devicetree/bindings/power/rockchip-io-domain.yaml
+++ b/Documentation/devicetree/bindings/power/rockchip-io-domain.yaml
@@ -50,6 +50,7 @@ properties:
- rockchip,rk3188-io-voltage-domain
- rockchip,rk3228-io-voltage-domain
- rockchip,rk3288-io-voltage-domain
+ - rockchip,rk3308-io-voltage-domain
- rockchip,rk3328-io-voltage-domain
- rockchip,rk3368-io-voltage-domain
- rockchip,rk3368-pmu-io-voltage-domain
@@ -71,6 +72,7 @@ allOf:
- $ref: "#/$defs/rk3188"
- $ref: "#/$defs/rk3228"
- $ref: "#/$defs/rk3288"
+ - $ref: "#/$defs/rk3308"
- $ref: "#/$defs/rk3328"
- $ref: "#/$defs/rk3368"
- $ref: "#/$defs/rk3368-pmu"
@@ -194,6 +196,28 @@ $defs:
wifi-supply:
description: The supply connected to APIO3_VDD. Also known as SDIO0.
+ rk3308:
+ if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3308-io-voltage-domain
+
+ then:
+ properties:
+ vccio0-supply:
+ description: The supply connected to VCCIO0.
+ vccio1-supply:
+ description: The supply connected to VCCIO1.
+ vccio2-supply:
+ description: The supply connected to VCCIO2.
+ vccio3-supply:
+ description: The supply connected to VCCIO3.
+ vccio4-supply:
+ description: The supply connected to VCCIO4.
+ vccio5-supply:
+ description: The supply connected to VCCIO5.
+
rk3328:
if:
properties:
diff --git a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml
index de43e45a43b7..9108a2841caf 100644
--- a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml
+++ b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml
@@ -27,6 +27,9 @@ properties:
battery-detect-gpios:
maxItems: 1
+ interrupts:
+ maxItems: 1
+
io-channels:
items:
- description: Battery Temperature ADC
@@ -53,6 +56,7 @@ required:
- compatible
- reg
- battery-detect-gpios
+ - interrupts
- io-channels
- io-channel-names
- nvmem-cells
@@ -88,6 +92,8 @@ examples:
compatible = "sprd,sc2731-fgu";
reg = <0xa00>;
battery-detect-gpios = <&pmic_eic 9 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&sc2731_pmic>;
+ interrupts = <4>;
io-channels = <&pmic_adc 5>, <&pmic_adc 14>;
io-channel-names = "bat-temp", "charge-vol";
nvmem-cells = <&fgu_calib>;
diff --git a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml
index e0b95ecbbebd..5ccd375eb294 100644
--- a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml
+++ b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml
@@ -23,11 +23,18 @@ properties:
- const: x-powers,axp202-battery-power-supply
- const: x-powers,axp209-battery-power-supply
- const: x-powers,axp221-battery-power-supply
+ - const: x-powers,axp717-battery-power-supply
- items:
- const: x-powers,axp803-battery-power-supply
- const: x-powers,axp813-battery-power-supply
- const: x-powers,axp813-battery-power-supply
+ monitored-battery:
+ description:
+ Specifies the phandle of an optional simple-battery connected to
+ this gauge.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
required:
- compatible
diff --git a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml
index 34b7959d6772..2ec036405ae4 100644
--- a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml
+++ b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml
@@ -15,9 +15,6 @@ maintainers:
- Chen-Yu Tsai <wens@csie.org>
- Sebastian Reichel <sre@kernel.org>
-allOf:
- - $ref: power-supply.yaml#
-
properties:
compatible:
oneOf:
@@ -26,13 +23,82 @@ properties:
- x-powers,axp202-usb-power-supply
- x-powers,axp221-usb-power-supply
- x-powers,axp223-usb-power-supply
+ - x-powers,axp717-usb-power-supply
- x-powers,axp813-usb-power-supply
- items:
- const: x-powers,axp803-usb-power-supply
- const: x-powers,axp813-usb-power-supply
+ input-current-limit-microamp:
+ description:
+ Optional value to clamp the maximum input current limit to for
+ the device. If omitted, the programmed value from the EFUSE will
+ be used.
+ minimum: 100000
+ maximum: 4000000
required:
- compatible
+allOf:
+ - $ref: power-supply.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - x-powers,axp192-usb-power-supply
+ then:
+ properties:
+ input-current-limit-microamp:
+ enum: [100000, 500000]
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - x-powers,axp202-usb-power-supply
+ - x-powers,axp223-usb-power-supply
+ then:
+ properties:
+ input-current-limit-microamp:
+ enum: [100000, 500000, 900000]
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - x-powers,axp221-usb-power-supply
+ then:
+ properties:
+ input-current-limit-microamp:
+ enum: [500000, 900000]
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - x-powers,axp717-usb-power-supply
+ then:
+ properties:
+ input-current-limit-microamp:
+ description: Maximum input current in increments of 50000 uA.
+ minimum: 100000
+ maximum: 3250000
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - x-powers,axp813-usb-power-supply
+ then:
+ properties:
+ input-current-limit-microamp:
+ enum: [100000, 500000, 900000, 1500000, 2000000, 2500000,
+ 3000000, 3500000, 4000000]
+
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/power/wakeup-source.txt b/Documentation/devicetree/bindings/power/wakeup-source.txt
index a6c8978964aa..27f1797be963 100644
--- a/Documentation/devicetree/bindings/power/wakeup-source.txt
+++ b/Documentation/devicetree/bindings/power/wakeup-source.txt
@@ -25,8 +25,8 @@ List of legacy properties and respective binding document
2. "has-tpo" Documentation/devicetree/bindings/rtc/rtc-opal.txt
3. "linux,wakeup" Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
Documentation/devicetree/bindings/mfd/tc3589x.txt
- Documentation/devicetree/bindings/input/touchscreen/ads7846.txt
-4. "linux,keypad-wakeup" Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt
+ Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml
+4. "linux,keypad-wakeup" Documentation/devicetree/bindings/input/qcom,pm8921-keypad.yaml
5. "linux,input-wakeup" Documentation/devicetree/bindings/input/samsung,s3c6410-keypad.yaml
6. "nvidia,wakeup-source" Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt
diff --git a/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml b/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml
index 3bb8615e3e91..42ca895f3c4e 100644
--- a/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml
+++ b/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml
@@ -11,11 +11,14 @@ maintainers:
properties:
compatible:
- enum:
- - fsl,etsec-ptp
- - fsl,fman-ptp-timer
- - fsl,dpaa2-ptp
- - fsl,enetc-ptp
+ oneOf:
+ - enum:
+ - fsl,etsec-ptp
+ - fsl,fman-ptp-timer
+ - fsl,dpaa2-ptp
+ - items:
+ - const: pci1957,ee02
+ - const: fsl,enetc-ptp
reg:
maxItems: 1
@@ -123,6 +126,15 @@ required:
- compatible
- reg
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,enetc-ptp
+ then:
+ $ref: /schemas/pci/pci-device.yaml
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml b/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
index 66e400f2a3a4..1b192e197b11 100644
--- a/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
@@ -46,10 +46,11 @@ properties:
- description: Module Clock
- description: Bus Clock
- # Even though it only applies to subschemas under the conditionals,
- # not listing them here will trigger a warning because of the
- # additionalsProperties set to false.
- clock-names: true
+ clock-names:
+ minItems: 1
+ items:
+ - const: mod
+ - const: bus
resets:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/pwm/pwm-amlogic.yaml b/Documentation/devicetree/bindings/pwm/pwm-amlogic.yaml
index 1d71d4f8f328..e021cf59421a 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-amlogic.yaml
+++ b/Documentation/devicetree/bindings/pwm/pwm-amlogic.yaml
@@ -39,6 +39,10 @@ properties:
- amlogic,meson-s4-pwm
- items:
- enum:
+ - amlogic,meson-a1-pwm
+ - const: amlogic,meson-s4-pwm
+ - items:
+ - enum:
- amlogic,meson8b-pwm-v2
- amlogic,meson-gxbb-pwm-v2
- amlogic,meson-axg-pwm-v2
@@ -56,6 +60,9 @@ properties:
minItems: 1
maxItems: 2
+ power-domains:
+ maxItems: 1
+
"#pwm-cells":
const: 3
@@ -136,6 +143,16 @@ allOf:
required:
- clocks
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - amlogic,meson-a1-pwm
+ then:
+ required:
+ - power-domains
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
index 6b6a302a175c..2fe1992e2908 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
+++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
@@ -37,6 +37,7 @@ properties:
- renesas,pwm-r8a77995 # R-Car D3
- renesas,pwm-r8a779a0 # R-Car V3U
- renesas,pwm-r8a779g0 # R-Car V4H
+ - renesas,pwm-r8a779h0 # R-Car V4M
- const: renesas,pwm-rcar
reg:
diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
index a3e52b22dd18..a4dfa09344dd 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
@@ -41,6 +41,7 @@ properties:
- renesas,tpu-r8a77980 # R-Car V3H
- renesas,tpu-r8a779a0 # R-Car V3U
- renesas,tpu-r8a779g0 # R-Car V4H
+ - renesas,tpu-r8a779h0 # R-Car V4M
- const: renesas,tpu
reg:
diff --git a/Documentation/devicetree/bindings/regulator/mediatek,mt6397-regulator.yaml b/Documentation/devicetree/bindings/regulator/mediatek,mt6397-regulator.yaml
new file mode 100644
index 000000000000..50db6782a090
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mediatek,mt6397-regulator.yaml
@@ -0,0 +1,238 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/mediatek,mt6397-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT6397 Regulator
+
+maintainers:
+ - Sen Chu <sen.chu@mediatek.com>
+ - Macpaul Lin <macpaul.lin@mediatek.com>
+
+description:
+ Regulator node of the PMIC. This node should under the PMIC's device node.
+ All voltage regulators provided by the PMIC are described as sub-nodes of
+ this node.
+
+properties:
+ compatible:
+ items:
+ - const: mediatek,mt6397-regulator
+
+patternProperties:
+ "^(buck_)?v(core|drm|gpu|io18|pca(7|15)|sramca(7|15))$":
+ description: Buck regulators
+ type: object
+ $ref: regulator.yaml#
+ properties:
+ regulator-allowed-modes:
+ description: |
+ BUCK regulators can set regulator-initial-mode and regulator-allowed-modes to
+ values specified in dt-bindings/regulator/mediatek,mt6397-regulator.h
+ items:
+ enum: [0, 1]
+ unevaluatedProperties: false
+
+ "^(ldo_)?v(tcxo|(a|io)28)$":
+ description: LDOs with fixed 2.8V output and 0~100/10mV tuning
+ type: object
+ $ref: regulator.yaml#
+ properties:
+ regulator-allowed-modes: false
+ unevaluatedProperties: false
+
+ "^(ldo_)?vusb$":
+ description: LDOs with fixed 3.0V output and 0~100/10mV tuning
+ type: object
+ $ref: regulator.yaml#
+ properties:
+ regulator-allowed-modes: false
+ unevaluatedProperties: false
+
+ "^(ldo_)?v(cama|emc3v3|gp[123456]|ibr|mc|mch)$":
+ description: LDOs with variable output and 0~100/10mV tuning
+ type: object
+ $ref: regulator.yaml#
+ properties:
+ regulator-allowed-modes: false
+ unevaluatedProperties: false
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ mt6397_regulators: regulators {
+ compatible = "mediatek,mt6397-regulator";
+
+ mt6397_vpca15_reg: buck_vpca15 {
+ regulator-name = "vpca15";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <200>;
+ };
+
+ mt6397_vpca7_reg: buck_vpca7 {
+ regulator-name = "vpca7";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ };
+
+ mt6397_vsramca15_reg: buck_vsramca15 {
+ regulator-name = "vsramca15";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ };
+
+ mt6397_vsramca7_reg: buck_vsramca7 {
+ regulator-name = "vsramca7";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ };
+
+ mt6397_vcore_reg: buck_vcore {
+ regulator-name = "vcore";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ };
+
+ mt6397_vgpu_reg: buck_vgpu {
+ regulator-name = "vgpu";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ };
+
+ mt6397_vdrm_reg: buck_vdrm {
+ regulator-name = "vdrm";
+ regulator-min-microvolt = < 800000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <500>;
+ };
+
+ mt6397_vio18_reg: buck_vio18 {
+ regulator-name = "vio18";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2120000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <500>;
+ };
+
+ mt6397_vtcxo_reg: ldo_vtcxo {
+ regulator-name = "vtcxo";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <90>;
+ };
+
+ mt6397_va28_reg: ldo_va28 {
+ regulator-name = "va28";
+ /* fixed output 2.8 V */
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vcama_reg: ldo_vcama {
+ regulator-name = "vcama";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vio28_reg: ldo_vio28 {
+ regulator-name = "vio28";
+ /* fixed output 2.8 V */
+ regulator-enable-ramp-delay = <240>;
+ };
+
+ mt6397_usb_reg: ldo_vusb {
+ regulator-name = "vusb";
+ /* fixed output 3.3 V */
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vmc_reg: ldo_vmc {
+ regulator-name = "vmc";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vmch_reg: ldo_vmch {
+ regulator-name = "vmch";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+ regulator-name = "vemc_3v3";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp1_reg: ldo_vgp1 {
+ regulator-name = "vcamd";
+ regulator-min-microvolt = <1220000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+
+ mt6397_vgp2_reg: ldo_vgp2 {
+ regulator-name = "vcamio";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp3_reg: ldo_vgp3 {
+ regulator-name = "vcamaf";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp4_reg: ldo_vgp4 {
+ regulator-name = "vgp4";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp5_reg: ldo_vgp5 {
+ regulator-name = "vgp5";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp6_reg: ldo_vgp6 {
+ regulator-name = "vgp6";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vibr_reg: ldo_vibr {
+ regulator-name = "vibr";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/microchip,mcp16502.yaml b/Documentation/devicetree/bindings/regulator/microchip,mcp16502.yaml
index 1aca3646789e..c3e1fc6e260e 100644
--- a/Documentation/devicetree/bindings/regulator/microchip,mcp16502.yaml
+++ b/Documentation/devicetree/bindings/regulator/microchip,mcp16502.yaml
@@ -28,6 +28,21 @@ properties:
reg:
maxItems: 1
+ lvin-supply:
+ description: Input supply phandle for LDO1 and LDO2
+
+ pvin1-supply:
+ description: Input supply phandle for VDD_IO (BUCK1)
+
+ pvin2-supply:
+ description: Input supply phandle for VDD_DDR (BUCK2)
+
+ pvin3-supply:
+ description: Input supply phandle for VDD_CORE (BUCK3)
+
+ pvin4-supply:
+ description: Input supply phandle for VDD_OTHER (BUCK4)
+
regulators:
type: object
additionalProperties: false
@@ -68,6 +83,11 @@ examples:
pmic@5b {
compatible = "microchip,mcp16502";
reg = <0x5b>;
+ lvin-supply = <&reg_5v>;
+ pvin1-supply = <&reg_5v>;
+ pvin2-supply = <&reg_5v>;
+ pvin3-supply = <&reg_5v>;
+ pvin4-supply = <&reg_5v>;
regulators {
VDD_IO {
diff --git a/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
deleted file mode 100644
index c080086d3e62..000000000000
--- a/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
+++ /dev/null
@@ -1,220 +0,0 @@
-Mediatek MT6397 Regulator
-
-Required properties:
-- compatible: "mediatek,mt6397-regulator"
-- mt6397regulator: List of regulators provided by this controller. It is named
- according to its regulator type, buck_<name> and ldo_<name>.
- The definition for each of these nodes is defined using the standard binding
- for regulators at Documentation/devicetree/bindings/regulator/regulator.txt.
-
-The valid names for regulators are::
-BUCK:
- buck_vpca15, buck_vpca7, buck_vsramca15, buck_vsramca7, buck_vcore, buck_vgpu,
- buck_vdrm, buck_vio18
-LDO:
- ldo_vtcxo, ldo_va28, ldo_vcama, ldo_vio28, ldo_vusb, ldo_vmc, ldo_vmch,
- ldo_vemc3v3, ldo_vgp1, ldo_vgp2, ldo_vgp3, ldo_vgp4, ldo_vgp5, ldo_vgp6,
- ldo_vibr
-
-BUCK regulators can set regulator-initial-mode and regulator-allowed-modes to
-values specified in dt-bindings/regulator/mediatek,mt6397-regulator.h
-
-Example:
- pmic {
- compatible = "mediatek,mt6397";
-
- mt6397regulator: mt6397regulator {
- compatible = "mediatek,mt6397-regulator";
-
- mt6397_vpca15_reg: buck_vpca15 {
- regulator-compatible = "buck_vpca15";
- regulator-name = "vpca15";
- regulator-min-microvolt = < 850000>;
- regulator-max-microvolt = <1350000>;
- regulator-ramp-delay = <12500>;
- regulator-enable-ramp-delay = <200>;
- };
-
- mt6397_vpca7_reg: buck_vpca7 {
- regulator-compatible = "buck_vpca7";
- regulator-name = "vpca7";
- regulator-min-microvolt = < 850000>;
- regulator-max-microvolt = <1350000>;
- regulator-ramp-delay = <12500>;
- regulator-enable-ramp-delay = <115>;
- };
-
- mt6397_vsramca15_reg: buck_vsramca15 {
- regulator-compatible = "buck_vsramca15";
- regulator-name = "vsramca15";
- regulator-min-microvolt = < 850000>;
- regulator-max-microvolt = <1350000>;
- regulator-ramp-delay = <12500>;
- regulator-enable-ramp-delay = <115>;
-
- };
-
- mt6397_vsramca7_reg: buck_vsramca7 {
- regulator-compatible = "buck_vsramca7";
- regulator-name = "vsramca7";
- regulator-min-microvolt = < 850000>;
- regulator-max-microvolt = <1350000>;
- regulator-ramp-delay = <12500>;
- regulator-enable-ramp-delay = <115>;
-
- };
-
- mt6397_vcore_reg: buck_vcore {
- regulator-compatible = "buck_vcore";
- regulator-name = "vcore";
- regulator-min-microvolt = < 850000>;
- regulator-max-microvolt = <1350000>;
- regulator-ramp-delay = <12500>;
- regulator-enable-ramp-delay = <115>;
- };
-
- mt6397_vgpu_reg: buck_vgpu {
- regulator-compatible = "buck_vgpu";
- regulator-name = "vgpu";
- regulator-min-microvolt = < 700000>;
- regulator-max-microvolt = <1350000>;
- regulator-ramp-delay = <12500>;
- regulator-enable-ramp-delay = <115>;
- };
-
- mt6397_vdrm_reg: buck_vdrm {
- regulator-compatible = "buck_vdrm";
- regulator-name = "vdrm";
- regulator-min-microvolt = < 800000>;
- regulator-max-microvolt = <1400000>;
- regulator-ramp-delay = <12500>;
- regulator-enable-ramp-delay = <500>;
- };
-
- mt6397_vio18_reg: buck_vio18 {
- regulator-compatible = "buck_vio18";
- regulator-name = "vio18";
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <2120000>;
- regulator-ramp-delay = <12500>;
- regulator-enable-ramp-delay = <500>;
- };
-
- mt6397_vtcxo_reg: ldo_vtcxo {
- regulator-compatible = "ldo_vtcxo";
- regulator-name = "vtcxo";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- regulator-enable-ramp-delay = <90>;
- };
-
- mt6397_va28_reg: ldo_va28 {
- regulator-compatible = "ldo_va28";
- regulator-name = "va28";
- /* fixed output 2.8 V */
- regulator-enable-ramp-delay = <218>;
- };
-
- mt6397_vcama_reg: ldo_vcama {
- regulator-compatible = "ldo_vcama";
- regulator-name = "vcama";
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <2800000>;
- regulator-enable-ramp-delay = <218>;
- };
-
- mt6397_vio28_reg: ldo_vio28 {
- regulator-compatible = "ldo_vio28";
- regulator-name = "vio28";
- /* fixed output 2.8 V */
- regulator-enable-ramp-delay = <240>;
- };
-
- mt6397_usb_reg: ldo_vusb {
- regulator-compatible = "ldo_vusb";
- regulator-name = "vusb";
- /* fixed output 3.3 V */
- regulator-enable-ramp-delay = <218>;
- };
-
- mt6397_vmc_reg: ldo_vmc {
- regulator-compatible = "ldo_vmc";
- regulator-name = "vmc";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- regulator-enable-ramp-delay = <218>;
- };
-
- mt6397_vmch_reg: ldo_vmch {
- regulator-compatible = "ldo_vmch";
- regulator-name = "vmch";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3300000>;
- regulator-enable-ramp-delay = <218>;
- };
-
- mt6397_vemc_3v3_reg: ldo_vemc3v3 {
- regulator-compatible = "ldo_vemc3v3";
- regulator-name = "vemc_3v3";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3300000>;
- regulator-enable-ramp-delay = <218>;
- };
-
- mt6397_vgp1_reg: ldo_vgp1 {
- regulator-compatible = "ldo_vgp1";
- regulator-name = "vcamd";
- regulator-min-microvolt = <1220000>;
- regulator-max-microvolt = <3300000>;
- regulator-enable-ramp-delay = <240>;
- };
-
- mt6397_vgp2_reg: ldo_vgp2 {
- egulator-compatible = "ldo_vgp2";
- regulator-name = "vcamio";
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <3300000>;
- regulator-enable-ramp-delay = <218>;
- };
-
- mt6397_vgp3_reg: ldo_vgp3 {
- regulator-compatible = "ldo_vgp3";
- regulator-name = "vcamaf";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3300000>;
- regulator-enable-ramp-delay = <218>;
- };
-
- mt6397_vgp4_reg: ldo_vgp4 {
- regulator-compatible = "ldo_vgp4";
- regulator-name = "vgp4";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3300000>;
- regulator-enable-ramp-delay = <218>;
- };
-
- mt6397_vgp5_reg: ldo_vgp5 {
- regulator-compatible = "ldo_vgp5";
- regulator-name = "vgp5";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3000000>;
- regulator-enable-ramp-delay = <218>;
- };
-
- mt6397_vgp6_reg: ldo_vgp6 {
- regulator-compatible = "ldo_vgp6";
- regulator-name = "vgp6";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3300000>;
- regulator-enable-ramp-delay = <218>;
- };
-
- mt6397_vibr_reg: ldo_vibr {
- regulator-compatible = "ldo_vibr";
- regulator-name = "vibr";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3300000>;
- regulator-enable-ramp-delay = <218>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/regulator/qcom,qca6390-pmu.yaml b/Documentation/devicetree/bindings/regulator/qcom,qca6390-pmu.yaml
index 3aaa9653419a..11ed04c95542 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,qca6390-pmu.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,qca6390-pmu.yaml
@@ -18,6 +18,7 @@ properties:
compatible:
enum:
- qcom,qca6390-pmu
+ - qcom,wcn6855-pmu
- qcom,wcn7850-pmu
vdd-supply:
@@ -65,7 +66,11 @@ properties:
bt-enable-gpios:
maxItems: 1
- description: GPIO line enabling the ATH11K Bluetooth module supplied by the PMU
+ description: GPIO line enabling the Bluetooth module supplied by the PMU
+
+ swctrl-gpios:
+ maxItems: 1
+ description: GPIO line indicating the state of the clock supply to the BT module
clocks:
maxItems: 1
@@ -108,6 +113,21 @@ allOf:
properties:
compatible:
contains:
+ const: qcom,wcn6855-pmu
+ then:
+ required:
+ - vddio-supply
+ - vddaon-supply
+ - vddpmu-supply
+ - vddrfa0p95-supply
+ - vddrfa1p3-supply
+ - vddrfa1p9-supply
+ - vddpcie1p3-supply
+ - vddpcie1p9-supply
+ - if:
+ properties:
+ compatible:
+ contains:
const: qcom,wcn7850-pmu
then:
required:
diff --git a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
index c5dc3c2820d7..adc6b3f36fde 100644
--- a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
@@ -93,7 +93,7 @@ patternProperties:
Each SCP core has own cache memory. The SRAM and L1TCM are shared by
cores. The power of cache, SRAM and L1TCM power should be enabled
before booting SCP cores. The size of cache, SRAM, and L1TCM are varied
- on differnt SoCs.
+ on different SoCs.
The SCP cores do not use an MMU, but has a set of registers to
control the translations between 32-bit CPU addresses into system bus
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,glink-rpm-edge.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,glink-rpm-edge.yaml
index 3766d4513b37..c54234247ab3 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,glink-rpm-edge.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,glink-rpm-edge.yaml
@@ -90,7 +90,7 @@ examples:
qcom,rpm-msg-ram = <&rpm_msg_ram>;
rpm-requests {
- compatible = "qcom,rpm-msm8996";
+ compatible = "qcom,rpm-msm8996", "qcom,glink-smd-rpm";
qcom,glink-channels = "rpm_requests";
/* ... */
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,rpm-proc.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,rpm-proc.yaml
index 61cf4fe19ca5..540bdfca53d9 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,rpm-proc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,rpm-proc.yaml
@@ -142,7 +142,7 @@ examples:
qcom,smd-edge = <15>;
rpm-requests {
- compatible = "qcom,rpm-msm8916";
+ compatible = "qcom,rpm-msm8916", "qcom,smd-rpm";
qcom,smd-channels = "rpm_requests";
/* ... */
};
@@ -163,7 +163,7 @@ examples:
mboxes = <&apcs_glb 0>;
rpm-requests {
- compatible = "qcom,rpm-qcm2290";
+ compatible = "qcom,rpm-qcm2290", "qcom,glink-smd-rpm";
qcom,glink-channels = "rpm_requests";
/* ... */
};
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml
index 73fda7565cd1..d7fad7b3c2c6 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml
@@ -16,6 +16,7 @@ description:
properties:
compatible:
enum:
+ - qcom,sdx75-mpss-pas
- qcom,sm8550-adsp-pas
- qcom,sm8550-cdsp-pas
- qcom,sm8550-mpss-pas
@@ -113,6 +114,7 @@ allOf:
properties:
compatible:
enum:
+ - qcom,sdx75-mpss-pas
- qcom,sm8650-mpss-pas
then:
properties:
@@ -146,6 +148,7 @@ allOf:
properties:
compatible:
enum:
+ - qcom,sdx75-mpss-pas
- qcom,sm8550-mpss-pas
- qcom,sm8650-mpss-pas
then:
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,k3-m4f-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,k3-m4f-rproc.yaml
new file mode 100644
index 000000000000..2bd0752b6ba9
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/ti,k3-m4f-rproc.yaml
@@ -0,0 +1,125 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/ti,k3-m4f-rproc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI K3 M4F processor subsystems
+
+maintainers:
+ - Hari Nagalla <hnagalla@ti.com>
+ - Mathieu Poirier <mathieu.poirier@linaro.org>
+
+description: |
+ Some K3 family SoCs have Arm Cortex M4F cores. AM64x is a SoC in K3
+ family with a M4F core. Typically safety oriented applications may use
+ the M4F core in isolation without an IPC. Where as some industrial and
+ home automation applications, may use the M4F core as a remote processor
+ with IPC communications.
+
+$ref: /schemas/arm/keystone/ti,k3-sci-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ti,am64-m4fss
+
+ power-domains:
+ maxItems: 1
+
+ "#address-cells":
+ const: 2
+
+ "#size-cells":
+ const: 2
+
+ reg:
+ items:
+ - description: IRAM internal memory region
+ - description: DRAM internal memory region
+
+ reg-names:
+ items:
+ - const: iram
+ - const: dram
+
+ resets:
+ maxItems: 1
+
+ firmware-name:
+ maxItems: 1
+ description: Name of firmware to load for the M4F core
+
+ mboxes:
+ description:
+ OMAP Mailbox specifier denoting the sub-mailbox, to be used for
+ communication with the remote processor. This property should match
+ with the sub-mailbox node used in the firmware image.
+ maxItems: 1
+
+ memory-region:
+ description:
+ phandle to the reserved memory nodes to be associated with the
+ remoteproc device. Optional memory regions available for firmware
+ specific purposes.
+ (see reserved-memory/reserved-memory.yaml in dtschema project)
+ maxItems: 8
+ items:
+ - description: regions used for DMA allocations like vrings, vring buffers
+ and memory dedicated to firmware's specific purposes.
+ additionalItems: true
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - ti,sci
+ - ti,sci-dev-id
+ - ti,sci-proc-ids
+ - resets
+ - firmware-name
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ mcu_m4fss_dma_memory_region: m4f-dma-memory@9cb00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9cb00000 0x00 0x100000>;
+ no-map;
+ };
+
+ mcu_m4fss_memory_region: m4f-memory@9cc00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9cc00000 0x00 0xe00000>;
+ no-map;
+ };
+ };
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ mailbox0_cluster0: mailbox-0 {
+ #mbox-cells = <1>;
+ };
+
+ remoteproc@5000000 {
+ compatible = "ti,am64-m4fss";
+ reg = <0x00 0x5000000 0x00 0x30000>,
+ <0x00 0x5040000 0x00 0x10000>;
+ reg-names = "iram", "dram";
+ resets = <&k3_reset 9 1>;
+ firmware-name = "am62-mcu-m4f0_0-fw";
+ mboxes = <&mailbox0_cluster0>, <&mbox_m4_0>;
+ memory-region = <&mcu_m4fss_dma_memory_region>,
+ <&mcu_m4fss_memory_region>;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <9>;
+ ti,sci-proc-ids = <0x18 0xff>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml b/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml
index 6f13da11f593..ee63c03949c9 100644
--- a/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml
@@ -62,6 +62,7 @@ properties:
patternProperties:
"^r(.*)@[0-9a-f]+$":
type: object
+ additionalProperties: false
description: |
The RPU is located in the Low Power Domain of the Processor Subsystem.
Each processor includes separate L1 instruction and data caches and
diff --git a/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml b/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
index f0c6c0df0ce3..695ef38a7bb3 100644
--- a/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
@@ -19,6 +19,7 @@ properties:
- amlogic,meson-a1-reset # Reset Controller on A1 and compatible SoCs
- amlogic,meson-s4-reset # Reset Controller on S4 and compatible SoCs
- amlogic,c3-reset # Reset Controller on C3 and compatible SoCs
+ - amlogic,t7-reset
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/reset/mobileye,eyeq5-reset.yaml b/Documentation/devicetree/bindings/reset/mobileye,eyeq5-reset.yaml
deleted file mode 100644
index 062b4518347b..000000000000
--- a/Documentation/devicetree/bindings/reset/mobileye,eyeq5-reset.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/reset/mobileye,eyeq5-reset.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Mobileye EyeQ5 reset controller
-
-description:
- The EyeQ5 reset driver handles three reset domains. Its registers live in a
- shared region called OLB.
-
-maintainers:
- - Grégory Clement <gregory.clement@bootlin.com>
- - Théo Lebrun <theo.lebrun@bootlin.com>
- - Vladimir Kondratiev <vladimir.kondratiev@mobileye.com>
-
-properties:
- compatible:
- const: mobileye,eyeq5-reset
-
- reg:
- maxItems: 3
-
- reg-names:
- items:
- - const: d0
- - const: d1
- - const: d2
-
- "#reset-cells":
- const: 2
- description:
- The first cell is the domain (0 to 2 inclusive) and the second one is the
- reset index inside that domain.
-
-required:
- - compatible
- - reg
- - reg-names
- - "#reset-cells"
-
-additionalProperties: false
diff --git a/Documentation/devicetree/bindings/reset/renesas,rst.yaml b/Documentation/devicetree/bindings/reset/renesas,rst.yaml
index 58b4a45d3380..7a81491379b0 100644
--- a/Documentation/devicetree/bindings/reset/renesas,rst.yaml
+++ b/Documentation/devicetree/bindings/reset/renesas,rst.yaml
@@ -29,6 +29,7 @@ properties:
- renesas,r8a7745-rst # RZ/G1E
- renesas,r8a77470-rst # RZ/G1C
- renesas,r8a774a1-rst # RZ/G2M
+ - renesas,r8a774a3-rst # RZ/G2M v3.0
- renesas,r8a774b1-rst # RZ/G2N
- renesas,r8a774c0-rst # RZ/G2E
- renesas,r8a774e1-rst # RZ/G2H
diff --git a/Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml b/Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml
index fa253c518d79..babc563ae61e 100644
--- a/Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml
@@ -38,13 +38,17 @@ properties:
minItems: 1
maxItems: 2
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 2
resets:
minItems: 1
maxItems: 2
- reset-names: true
+ reset-names:
+ minItems: 1
+ maxItems: 2
allOf:
- if:
diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml
index a06dbc6b4928..2cf2026cff57 100644
--- a/Documentation/devicetree/bindings/riscv/extensions.yaml
+++ b/Documentation/devicetree/bindings/riscv/extensions.yaml
@@ -171,6 +171,13 @@ properties:
memory types as ratified in the 20191213 version of the privileged
ISA specification.
+ - const: svvptc
+ description:
+ The standard Svvptc supervisor-level extension for
+ address-translation cache behaviour with respect to invalid entries
+ as ratified at commit 4a69197e5617 ("Update to ratified state") of
+ riscv-svvptc.
+
- const: zacas
description: |
The Zacas extension for Atomic Compare-and-Swap (CAS) instructions
diff --git a/Documentation/devicetree/bindings/riscv/sophgo.yaml b/Documentation/devicetree/bindings/riscv/sophgo.yaml
index 9bc813dad098..a14cb10ff3f0 100644
--- a/Documentation/devicetree/bindings/riscv/sophgo.yaml
+++ b/Documentation/devicetree/bindings/riscv/sophgo.yaml
@@ -28,6 +28,11 @@ properties:
- const: sophgo,cv1812h
- items:
- enum:
+ - sipeed,licheerv-nano-b
+ - const: sipeed,licheerv-nano
+ - const: sophgo,sg2002
+ - items:
+ - enum:
- milkv,pioneer
- const: sophgo,sg2042
diff --git a/Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml b/Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml
new file mode 100644
index 000000000000..e0595814a6d9
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/rockchip,rk3568-rng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RK3568 TRNG
+
+description: True Random Number Generator on Rockchip RK3568 SoC
+
+maintainers:
+ - Aurelien Jarno <aurelien@aurel32.net>
+ - Daniel Golle <daniel@makrotopia.org>
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk3568-rng
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: TRNG clock
+ - description: TRNG AHB clock
+
+ clock-names:
+ items:
+ - const: core
+ - const: ahb
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3568-cru.h>
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ rng@fe388000 {
+ compatible = "rockchip,rk3568-rng";
+ reg = <0x0 0xfe388000 0x0 0x4000>;
+ clocks = <&cru CLK_TRNG_NS>, <&cru HCLK_TRNG_NS>;
+ clock-names = "core", "ahb";
+ resets = <&cru SRST_TRNG_NS>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/rtc/fsl,ls-ftm-alarm.yaml b/Documentation/devicetree/bindings/rtc/fsl,ls-ftm-alarm.yaml
index 388102ae30cd..3ec111f2fdc4 100644
--- a/Documentation/devicetree/bindings/rtc/fsl,ls-ftm-alarm.yaml
+++ b/Documentation/devicetree/bindings/rtc/fsl,ls-ftm-alarm.yaml
@@ -42,7 +42,7 @@ properties:
minItems: 1
description:
phandle to rcpm node, Please refer
- Documentation/devicetree/bindings/soc/fsl/rcpm.txt
+ Documentation/devicetree/bindings/soc/fsl/fsl,rcpm.yaml
big-endian:
$ref: /schemas/types.yaml#/definitions/flag
diff --git a/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml b/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml
index 5ade5dfad048..cda8ad7c1203 100644
--- a/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml
+++ b/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml
@@ -22,6 +22,9 @@ properties:
interrupts:
maxItems: 1
+ "#clock-cells":
+ const: 0
+
trickle-resistor-ohms:
enum:
- 3000
diff --git a/Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml b/Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml
new file mode 100644
index 000000000000..f3d20e976965
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/sprd,sc2731-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Spreadtrum SC2731 Real Time Clock
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+ compatible:
+ const: sprd,sc2731-rtc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+allOf:
+ - $ref: rtc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ pmic {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@280 {
+ compatible = "sprd,sc2731-rtc";
+ reg = <0x280>;
+ interrupt-parent = <&sc2731_pmic>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt b/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
deleted file mode 100644
index 1f5754299d31..000000000000
--- a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Spreadtrum SC27xx Real Time Clock
-
-Required properties:
-- compatible: should be "sprd,sc2731-rtc".
-- reg: address offset of rtc register.
-- interrupts: rtc alarm interrupt.
-
-Example:
-
- sc2731_pmic: pmic@0 {
- compatible = "sprd,sc2731";
- reg = <0>;
- spi-max-frequency = <26000000>;
- interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-controller;
- #interrupt-cells = <2>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- rtc@280 {
- compatible = "sprd,sc2731-rtc";
- reg = <0x280>;
- interrupt-parent = <&sc2731_pmic>;
- interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
- };
- };
diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
index 7a0fab721cf1..aae06e570c22 100644
--- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
@@ -53,6 +53,28 @@ properties:
override default rtc_ck parent clock phandle of the new parent clock of rtc_ck
maxItems: 1
+patternProperties:
+ "^rtc-[a-z]+-[0-9]+$":
+ type: object
+ $ref: /schemas/pinctrl/pinmux-node.yaml
+ description: |
+ Configuration of STM32 RTC pins description. STM32 RTC is able to output
+ some signals on specific pins:
+ - LSCO (Low Speed Clock Output) that allow to output LSE clock on a pin.
+ - Alarm out that allow to send a pulse on a pin when alarm A of the RTC
+ expires.
+ additionalProperties: false
+ properties:
+ function:
+ enum:
+ - lsco
+ - alarm-a
+ pins:
+ enum:
+ - out1
+ - out2
+ - out2_rmp
+
allOf:
- if:
properties:
@@ -68,6 +90,9 @@ allOf:
clock-names: false
+ patternProperties:
+ "^rtc-[a-z]+-[0-9]+$": false
+
required:
- st,syscfg
@@ -83,6 +108,9 @@ allOf:
minItems: 2
maxItems: 2
+ patternProperties:
+ "^rtc-[a-z]+-[0-9]+$": false
+
required:
- clock-names
- st,syscfg
diff --git a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
index fffd759c603f..7330a7200831 100644
--- a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
@@ -38,12 +38,13 @@ properties:
- dallas,ds1672
# Extremely Accurate I²C RTC with Integrated Crystal and SRAM
- dallas,ds3232
+ # SD2405AL Real-Time Clock
+ - dfrobot,sd2405al
# EM Microelectronic EM3027 RTC
- emmicro,em3027
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE
- epson,rx8010
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE
- - epson,rx8025
- epson,rx8035
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE with Battery Backed RAM
- epson,rx8111
@@ -52,10 +53,6 @@ properties:
- epson,rx8581
# Android Goldfish Real-time Clock
- google,goldfish-rtc
- # Intersil ISL1208 Low Power RTC with Battery Backed SRAM
- - isil,isl1208
- # Intersil ISL1218 Low Power RTC with Battery Backed SRAM
- - isil,isl1218
# Mvebu Real-time Clock
- marvell,orion-rtc
# Maxim DS1742/DS1743 Real-time Clock
@@ -68,8 +65,6 @@ properties:
- microcrystal,rv8523
# NXP LPC32xx SoC Real-time Clock
- nxp,lpc3220-rtc
- # Real-time Clock Module
- - pericom,pt7c4338
# I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
- ricoh,r2025sd
# I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
diff --git a/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml b/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml
index a5d67563cd53..29d48da81531 100644
--- a/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml
+++ b/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml
@@ -78,7 +78,7 @@ properties:
we use nvidia,adjust-baud-rates.
As an example, consider there is deviation observed in TX for baud rates as listed below. 0
- to 9600 has 1% deviation 9600 to 115200 2% deviation. This slight deviation is expcted and
+ to 9600 has 1% deviation 9600 to 115200 2% deviation. This slight deviation is expected and
Tegra UART is expected to handle it. Due to the issue stated above, baud rate on Tegra UART
should be set equal to or above deviation observed for avoiding frame errors. Property
should be set like this:
diff --git a/Documentation/devicetree/bindings/serial/serial-peripheral-props.yaml b/Documentation/devicetree/bindings/serial/serial-peripheral-props.yaml
new file mode 100644
index 000000000000..b4a73214d20d
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/serial-peripheral-props.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/serial-peripheral-props.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common Properties for Serial-attached Devices
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+ - Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+description:
+ Devices connected over serial/UART, expressed as children of a serial
+ controller, might need similar properties, e.g. for configuring the baud
+ rate.
+
+properties:
+ max-speed:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The maximum baud rate the device operates at.
+ This should only be present if the maximum is less than the slave
+ device can support. For example, a particular board has some
+ signal quality issue or the host processor can't support higher
+ baud rates.
+
+ current-speed:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ The current baud rate the device operates at.
+ This should only be present in case a driver has no chance to know
+ the baud rate of the slave device.
+ Examples:
+ * device supports auto-baud
+ * the rate is setup by a bootloader and there is no way to reset
+ the device
+ * device baud rate is configured by its firmware but there is no
+ way to request the actual settings
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/serial/serial.yaml b/Documentation/devicetree/bindings/serial/serial.yaml
index ffc9198ae214..6aa9cfae417b 100644
--- a/Documentation/devicetree/bindings/serial/serial.yaml
+++ b/Documentation/devicetree/bindings/serial/serial.yaml
@@ -88,10 +88,12 @@ properties:
TX FIFO threshold configuration (in bytes).
patternProperties:
- "^(bluetooth|bluetooth-gnss|gnss|gps|mcu|onewire)$":
+ "^(bluetooth|bluetooth-gnss|embedded-controller|gnss|gps|mcu|onewire)$":
if:
type: object
then:
+ additionalProperties: true
+ $ref: serial-peripheral-props.yaml#
description:
Serial attached devices shall be a child node of the host UART device
the slave device is attached to. It is expected that the attached
@@ -103,28 +105,6 @@ patternProperties:
description:
Compatible of the device connected to the serial port.
- max-speed:
- $ref: /schemas/types.yaml#/definitions/uint32
- description:
- The maximum baud rate the device operates at.
- This should only be present if the maximum is less than the slave
- device can support. For example, a particular board has some
- signal quality issue or the host processor can't support higher
- baud rates.
-
- current-speed:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: |
- The current baud rate the device operates at.
- This should only be present in case a driver has no chance to know
- the baud rate of the slave device.
- Examples:
- * device supports auto-baud
- * the rate is setup by a bootloader and there is no way to reset
- the device
- * device baud rate is configured by its firmware but there is no
- way to request the actual settings
-
required:
- compatible
diff --git a/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2711-avs-monitor.yaml b/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2711-avs-monitor.yaml
new file mode 100644
index 000000000000..e02d9d7e7d9a
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2711-avs-monitor.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/bcm/brcm,bcm2711-avs-monitor.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom AVS Monitor
+
+maintainers:
+ - Stefan Wahren <wahrenst@gmx.net>
+
+properties:
+ compatible:
+ items:
+ - const: brcm,bcm2711-avs-monitor
+ - const: syscon
+ - const: simple-mfd
+
+ reg:
+ maxItems: 1
+
+ thermal:
+ $ref: /schemas/thermal/brcm,avs-ro-thermal.yaml
+ description: Broadcom AVS ring oscillator thermal
+
+required:
+ - compatible
+ - reg
+ - thermal
+
+additionalProperties: false
+
+examples:
+ - |
+ avs-monitor@7d5d2000 {
+ compatible = "brcm,bcm2711-avs-monitor", "syscon", "simple-mfd";
+ reg = <0x7d5d2000 0xf00>;
+
+ thermal: thermal {
+ compatible = "brcm,bcm2711-thermal";
+ #thermal-sensor-cells = <0>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-tsa.yaml b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-tsa.yaml
new file mode 100644
index 000000000000..3b50e0a003ca
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-tsa.yaml
@@ -0,0 +1,210 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/fsl/cpm_qe/fsl,qe-tsa.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: PowerQUICC QE Time-slot assigner (TSA) controller
+
+maintainers:
+ - Herve Codina <herve.codina@bootlin.com>
+
+description:
+ The TSA is the time-slot assigner that can be found on some PowerQUICC SoC.
+ Its purpose is to route some TDM time-slots to other internal serial
+ controllers.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - fsl,mpc8321-tsa
+ - const: fsl,qe-tsa
+
+ reg:
+ items:
+ - description: SI (Serial Interface) register base
+ - description: SI RAM base
+
+ reg-names:
+ items:
+ - const: si_regs
+ - const: si_ram
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ '^tdm@[0-3]$':
+ description:
+ The TDM managed by this controller
+ type: object
+
+ additionalProperties: false
+
+ properties:
+ reg:
+ minimum: 0
+ maximum: 3
+ description:
+ The TDM number for this TDM, 0 for TDMa, 1 for TDMb, 2 for TDMc and 3
+ for TDMd.
+
+ fsl,common-rxtx-pins:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ The hardware can use four dedicated pins for Tx clock, Tx sync, Rx
+ clock and Rx sync or use only two pins, Tx/Rx clock and Tx/Rx sync.
+ Without the 'fsl,common-rxtx-pins' property, the four pins are used.
+ With the 'fsl,common-rxtx-pins' property, two pins are used.
+
+ clocks:
+ minItems: 2
+ items:
+ - description: Receive sync clock
+ - description: Receive data clock
+ - description: Transmit sync clock
+ - description: Transmit data clock
+
+ clock-names:
+ minItems: 2
+ items:
+ - const: rsync
+ - const: rclk
+ - const: tsync
+ - const: tclk
+
+ fsl,rx-frame-sync-delay-bits:
+ enum: [0, 1, 2, 3]
+ default: 0
+ description: |
+ Receive frame sync delay in number of bits.
+ Indicates the delay between the Rx sync and the first bit of the Rx
+ frame.
+
+ fsl,tx-frame-sync-delay-bits:
+ enum: [0, 1, 2, 3]
+ default: 0
+ description: |
+ Transmit frame sync delay in number of bits.
+ Indicates the delay between the Tx sync and the first bit of the Tx
+ frame.
+
+ fsl,clock-falling-edge:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Data is sent on falling edge of the clock (and received on the rising
+ edge). If not present, data is sent on the rising edge (and received
+ on the falling edge).
+
+ fsl,fsync-rising-edge:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Frame sync pulses are sampled with the rising edge of the channel
+ clock. If not present, pulses are sampled with the falling edge.
+
+ fsl,fsync-active-low:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Frame sync signals are active on low logic level.
+ If not present, sync signals are active on high level.
+
+ fsl,double-speed-clock:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ The channel clock is twice the data rate.
+
+ patternProperties:
+ '^fsl,[rt]x-ts-routes$':
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ description: |
+ A list of tuple that indicates the Tx or Rx time-slots routes.
+ items:
+ items:
+ - description:
+ The number of time-slots
+ minimum: 1
+ maximum: 64
+ - description: |
+ The source (Tx) or destination (Rx) serial interface
+ (dt-bindings/soc/qe-fsl,tsa.h defines these values)
+ - 0: No destination
+ - 1: UCC1
+ - 2: UCC2
+ - 3: UCC3
+ - 4: UCC4
+ - 5: UCC5
+ enum: [0, 1, 2, 3, 4, 5]
+ minItems: 1
+ maxItems: 64
+
+ allOf:
+ # If fsl,common-rxtx-pins is present, only 2 clocks are needed.
+ # Else, the 4 clocks must be present.
+ - if:
+ required:
+ - fsl,common-rxtx-pins
+ then:
+ properties:
+ clocks:
+ maxItems: 2
+ clock-names:
+ maxItems: 2
+ else:
+ properties:
+ clocks:
+ minItems: 4
+ clock-names:
+ minItems: 4
+
+ required:
+ - reg
+ - clocks
+ - clock-names
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - '#address-cells'
+ - '#size-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/soc/qe-fsl,tsa.h>
+
+ tsa@ae0 {
+ compatible = "fsl,mpc8321-tsa", "fsl,qe-tsa";
+ reg = <0xae0 0x10>,
+ <0xc00 0x200>;
+ reg-names = "si_regs", "si_ram";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tdm@0 {
+ /* TDMa */
+ reg = <0>;
+
+ clocks = <&clk_l1rsynca>, <&clk_l1rclka>;
+ clock-names = "rsync", "rclk";
+
+ fsl,common-rxtx-pins;
+ fsl,fsync-rising-edge;
+
+ fsl,tx-ts-routes = <2 0>, /* TS 0..1 */
+ <24 FSL_QE_TSA_UCC4>, /* TS 2..25 */
+ <1 0>, /* TS 26 */
+ <5 FSL_QE_TSA_UCC3>; /* TS 27..31 */
+
+ fsl,rx-ts-routes = <2 0>, /* TS 0..1 */
+ <24 FSL_QE_TSA_UCC4>, /* 2..25 */
+ <1 0>, /* TS 26 */
+ <5 FSL_QE_TSA_UCC3>; /* TS 27..31 */
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-ucc-qmc.yaml b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-ucc-qmc.yaml
new file mode 100644
index 000000000000..71ae64cb8a4f
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-ucc-qmc.yaml
@@ -0,0 +1,197 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/fsl/cpm_qe/fsl,qe-ucc-qmc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: PowerQUICC QE QUICC Multichannel Controller (QMC)
+
+maintainers:
+ - Herve Codina <herve.codina@bootlin.com>
+
+description:
+ The QMC (QUICC Multichannel Controller) emulates up to 64 channels within one
+ serial controller using the same TDM physical interface routed from TSA.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - fsl,mpc8321-ucc-qmc
+ - const: fsl,qe-ucc-qmc
+
+ reg:
+ items:
+ - description: UCC (Unified communication controller) register base
+ - description: Dual port ram base
+
+ reg-names:
+ items:
+ - const: ucc_regs
+ - const: dpram
+
+ interrupts:
+ maxItems: 1
+ description: UCC interrupt line in the QE interrupt controller
+
+ fsl,tsa-serial:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: phandle to TSA node
+ - enum: [1, 2, 3, 4, 5]
+ description: |
+ TSA serial interface (dt-bindings/soc/qe-fsl,tsa.h defines these
+ values)
+ - 1: UCC1
+ - 2: UCC2
+ - 3: UCC3
+ - 4: UCC4
+ - 5: UCC5
+ description:
+ Should be a phandle/number pair. The phandle to TSA node and the TSA
+ serial interface to use.
+
+ fsl,soft-qmc:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ Soft QMC firmware name to load. If this property is omitted, no firmware
+ are used.
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ '^channel@([0-9]|[1-5][0-9]|6[0-3])$':
+ description:
+ A channel managed by this controller
+ type: object
+ additionalProperties: false
+
+ properties:
+ compatible:
+ items:
+ - enum:
+ - fsl,mpc8321-ucc-qmc-hdlc
+ - const: fsl,qe-ucc-qmc-hdlc
+ - const: fsl,qmc-hdlc
+
+ reg:
+ minimum: 0
+ maximum: 63
+ description:
+ The channel number
+
+ fsl,operational-mode:
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [transparent, hdlc]
+ default: transparent
+ description: |
+ The channel operational mode
+ - hdlc: The channel handles HDLC frames
+ - transparent: The channel handles raw data without any processing
+
+ fsl,reverse-data:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ The bit order as seen on the channels is reversed,
+ transmitting/receiving the MSB of each octet first.
+ This flag is used only in 'transparent' mode.
+
+ fsl,tx-ts-mask:
+ $ref: /schemas/types.yaml#/definitions/uint64
+ description:
+ Channel assigned Tx time-slots within the Tx time-slots routed by the
+ TSA to this cell.
+
+ fsl,rx-ts-mask:
+ $ref: /schemas/types.yaml#/definitions/uint64
+ description:
+ Channel assigned Rx time-slots within the Rx time-slots routed by the
+ TSA to this cell.
+
+ fsl,framer:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle to the framer node. The framer is in charge of an E1/T1 line
+ interface connected to the TDM bus. It can be used to get the E1/T1 line
+ status such as link up/down.
+
+ allOf:
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ const: fsl,qmc-hdlc
+ then:
+ properties:
+ fsl,framer: false
+
+ required:
+ - reg
+ - fsl,tx-ts-mask
+ - fsl,rx-ts-mask
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - fsl,tsa-serial
+ - '#address-cells'
+ - '#size-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/soc/qe-fsl,tsa.h>
+
+ qmc@a60 {
+ compatible = "fsl,mpc8321-ucc-qmc", "fsl,qe-ucc-qmc";
+ reg = <0x3200 0x200>,
+ <0x10000 0x1000>;
+ reg-names = "ucc_regs", "dpram";
+ interrupts = <35>;
+ interrupt-parent = <&qeic>;
+ fsl,soft-qmc = "fsl_qe_ucode_qmc_8321_11.bin";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsl,tsa-serial = <&tsa FSL_QE_TSA_UCC4>;
+
+ channel@16 {
+ /* Ch16 : First 4 even TS from all routed from TSA */
+ reg = <16>;
+ fsl,operational-mode = "transparent";
+ fsl,reverse-data;
+ fsl,tx-ts-mask = <0x00000000 0x000000aa>;
+ fsl,rx-ts-mask = <0x00000000 0x000000aa>;
+ };
+
+ channel@17 {
+ /* Ch17 : First 4 odd TS from all routed from TSA */
+ reg = <17>;
+ fsl,operational-mode = "transparent";
+ fsl,reverse-data;
+ fsl,tx-ts-mask = <0x00000000 0x00000055>;
+ fsl,rx-ts-mask = <0x00000000 0x00000055>;
+ };
+
+ channel@19 {
+ /* Ch19 : 8 TS (TS 8..15) from all routed from TSA */
+ compatible = "fsl,mpc8321-ucc-qmc-hdlc",
+ "fsl,qe-ucc-qmc-hdlc",
+ "fsl,qmc-hdlc";
+ reg = <19>;
+ fsl,operational-mode = "hdlc";
+ fsl,tx-ts-mask = <0x00000000 0x0000ff00>;
+ fsl,rx-ts-mask = <0x00000000 0x0000ff00>;
+ fsl,framer = <&framer>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,ucc-hdlc.yaml b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,ucc-hdlc.yaml
new file mode 100644
index 000000000000..64ffbf75dd9d
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,ucc-hdlc.yaml
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/fsl/cpm_qe/fsl,ucc-hdlc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: High-Level Data Link Control(HDLC)
+
+description: HDLC part in Universal communication controllers (UCCs)
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,ucc-hdlc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ cell-index:
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ rx-clock-name:
+ $ref: /schemas/types.yaml#/definitions/string
+ oneOf:
+ - pattern: "^brg([0-9]|1[0-6])$"
+ - pattern: "^clk([0-9]|1[0-9]|2[0-4])$"
+
+ tx-clock-name:
+ $ref: /schemas/types.yaml#/definitions/string
+ oneOf:
+ - pattern: "^brg([0-9]|1[0-6])$"
+ - pattern: "^clk([0-9]|1[0-9]|2[0-4])$"
+
+ fsl,tdm-interface:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Specify that hdlc is based on tdm-interface
+
+ fsl,rx-sync-clock:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: rx-sync
+ enum:
+ - none
+ - rsync_pin
+ - brg9
+ - brg10
+ - brg11
+ - brg13
+ - brg14
+ - brg15
+
+ fsl,tx-sync-clock:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: tx-sync
+ enum:
+ - none
+ - tsync_pin
+ - brg9
+ - brg10
+ - brg11
+ - brg13
+ - brg14
+ - brg15
+
+ fsl,tdm-framer-type:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: required for tdm interface
+ enum: [e1, t1]
+
+ fsl,tdm-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: number of TDM ID
+
+ fsl,tx-timeslot-mask:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ required for tdm interface.
+ time slot mask for TDM operation. Indicates which time
+ slots used for transmitting and receiving.
+
+ fsl,rx-timeslot-mask:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ required for tdm interface.
+ time slot mask for TDM operation. Indicates which time
+ slots used for transmitting and receiving.
+
+ fsl,siram-entry-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ required for tdm interface
+ Must be 0,2,4...64. the number of TDM entry.
+
+ fsl,tdm-internal-loopback:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ optional for tdm interface
+ Internal loopback connecting on TDM layer.
+
+ fsl,hmask:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description: |
+ HDLC address recognition. Set to zero to disable
+ address filtering of packets:
+ fsl,hmask = /bits/ 16 <0x0000>;
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ communication@2000 {
+ compatible = "fsl,ucc-hdlc";
+ reg = <0x2000 0x200>;
+ rx-clock-name = "clk8";
+ tx-clock-name = "clk9";
+ fsl,rx-sync-clock = "rsync_pin";
+ fsl,tx-sync-clock = "tsync_pin";
+ fsl,tx-timeslot-mask = <0xfffffffe>;
+ fsl,rx-timeslot-mask = <0xfffffffe>;
+ fsl,tdm-framer-type = "e1";
+ fsl,tdm-id = <0>;
+ fsl,siram-entry-id = <0>;
+ fsl,tdm-interface;
+ };
+
+ - |
+ communication@2000 {
+ compatible = "fsl,ucc-hdlc";
+ reg = <0x2000 0x200>;
+ rx-clock-name = "brg1";
+ tx-clock-name = "brg1";
+ };
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt
deleted file mode 100644
index 6d2dd8a31482..000000000000
--- a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt
+++ /dev/null
@@ -1,130 +0,0 @@
-* Network
-
-Currently defined compatibles:
-- fsl,cpm1-scc-enet
-- fsl,cpm2-scc-enet
-- fsl,cpm1-fec-enet
-- fsl,cpm2-fcc-enet (third resource is GFEMR)
-- fsl,qe-enet
-
-Example:
-
- ethernet@11300 {
- compatible = "fsl,mpc8272-fcc-enet",
- "fsl,cpm2-fcc-enet";
- reg = <11300 20 8400 100 11390 1>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <20 8>;
- interrupt-parent = <&PIC>;
- phy-handle = <&PHY0>;
- fsl,cpm-command = <12000300>;
- };
-
-* MDIO
-
-Currently defined compatibles:
-fsl,pq1-fec-mdio (reg is same as first resource of FEC device)
-fsl,cpm2-mdio-bitbang (reg is port C registers)
-
-Properties for fsl,cpm2-mdio-bitbang:
-fsl,mdio-pin : pin of port C controlling mdio data
-fsl,mdc-pin : pin of port C controlling mdio clock
-
-Example:
- mdio@10d40 {
- compatible = "fsl,mpc8272ads-mdio-bitbang",
- "fsl,mpc8272-mdio-bitbang",
- "fsl,cpm2-mdio-bitbang";
- reg = <10d40 14>;
- #address-cells = <1>;
- #size-cells = <0>;
- fsl,mdio-pin = <12>;
- fsl,mdc-pin = <13>;
- };
-
-* HDLC
-
-Currently defined compatibles:
-- fsl,ucc-hdlc
-
-Properties for fsl,ucc-hdlc:
-- rx-clock-name
-- tx-clock-name
- Usage: required
- Value type: <string>
- Definition : Must be "brg1"-"brg16" for internal clock source,
- Must be "clk1"-"clk24" for external clock source.
-
-- fsl,tdm-interface
- Usage: optional
- Value type: <empty>
- Definition : Specify that hdlc is based on tdm-interface
-
-The property below is dependent on fsl,tdm-interface:
-- fsl,rx-sync-clock
- Usage: required
- Value type: <string>
- Definition : Must be "none", "rsync_pin", "brg9-11" and "brg13-15".
-
-- fsl,tx-sync-clock
- Usage: required
- Value type: <string>
- Definition : Must be "none", "tsync_pin", "brg9-11" and "brg13-15".
-
-- fsl,tdm-framer-type
- Usage: required for tdm interface
- Value type: <string>
- Definition : "e1" or "t1".Now e1 and t1 are used, other framer types
- are not supported.
-
-- fsl,tdm-id
- Usage: required for tdm interface
- Value type: <u32>
- Definition : number of TDM ID
-
-- fsl,tx-timeslot-mask
-- fsl,rx-timeslot-mask
- Usage: required for tdm interface
- Value type: <u32>
- Definition : time slot mask for TDM operation. Indicates which time
- slots used for transmitting and receiving.
-
-- fsl,siram-entry-id
- Usage: required for tdm interface
- Value type: <u32>
- Definition : Must be 0,2,4...64. the number of TDM entry.
-
-- fsl,tdm-internal-loopback
- usage: optional for tdm interface
- value type: <empty>
- Definition : Internal loopback connecting on TDM layer.
-- fsl,hmask
- usage: optional
- Value type: <u16>
- Definition: HDLC address recognition. Set to zero to disable
- address filtering of packets:
- fsl,hmask = /bits/ 16 <0x0000>;
-
-Example for tdm interface:
-
- ucc@2000 {
- compatible = "fsl,ucc-hdlc";
- rx-clock-name = "clk8";
- tx-clock-name = "clk9";
- fsl,rx-sync-clock = "rsync_pin";
- fsl,tx-sync-clock = "tsync_pin";
- fsl,tx-timeslot-mask = <0xfffffffe>;
- fsl,rx-timeslot-mask = <0xfffffffe>;
- fsl,tdm-framer-type = "e1";
- fsl,tdm-id = <0>;
- fsl,siram-entry-id = <0>;
- fsl,tdm-interface;
- };
-
-Example for hdlc without tdm interface:
-
- ucc@2000 {
- compatible = "fsl,ucc-hdlc";
- rx-clock-name = "brg1";
- tx-clock-name = "brg1";
- };
diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml
index 2a456c8af992..2958ef45b3e9 100644
--- a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml
+++ b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml
@@ -23,6 +23,9 @@ properties:
- fsl,ls1028a-scfg
- fsl,ls1043a-scfg
- fsl,ls1046a-scfg
+ - fsl,ls1088a-isc
+ - fsl,ls2080a-isc
+ - fsl,lx2160a-isc
- const: syscon
reg:
diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,rcpm.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,rcpm.yaml
new file mode 100644
index 000000000000..03d71ab930d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/fsl,rcpm.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/fsl/fsl,rcpm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Run Control and Power Management
+
+description:
+ The RCPM performs all device-level tasks associated with device run control
+ and power management.
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - fsl,p2041-rcpm
+ - fsl,p3041-rcpm
+ - fsl,p4080-rcpm
+ - fsl,p5020-rcpm
+ - fsl,p5040-rcpm
+ - const: fsl,qoriq-rcpm-1.0
+ - items:
+ - enum:
+ - fsl,b4420-rcpm
+ - fsl,b4860-rcpm
+ - fsl,t4240-rcpm
+ - const: fsl,qoriq-rcpm-2.0
+ - items:
+ - enum:
+ - fsl,t1040-rcpm
+ - const: fsl,qoriq-rcpm-2.1
+ - items:
+ - enum:
+ - fsl,ls1012a-rcpm
+ - fsl,ls1021a-rcpm
+ - fsl,ls1028a-rcpm
+ - fsl,ls1043a-rcpm
+ - fsl,ls1046a-rcpm
+ - fsl,ls1088a-rcpm
+ - fsl,ls208xa-rcpm
+ - fsl,lx2160a-rcpm
+ - const: fsl,qoriq-rcpm-2.1+
+
+ reg:
+ maxItems: 1
+
+ "#fsl,rcpm-wakeup-cells":
+ description: |
+ The number of IPPDEXPCR register cells in the
+ fsl,rcpm-wakeup property.
+
+ Freescale RCPM Wakeup Source Device Tree Bindings
+
+ Required fsl,rcpm-wakeup property should be added to a device node if
+ the device can be used as a wakeup source.
+
+ fsl,rcpm-wakeup: Consists of a phandle to the rcpm node and the IPPDEXPCR
+ register cells. The number of IPPDEXPCR register cells is defined in
+ "#fsl,rcpm-wakeup-cells" in the rcpm node. The first register cell is
+ the bit mask that should be set in IPPDEXPCR0, and the second register
+ cell is for IPPDEXPCR1, and so on.
+
+ Note: IPPDEXPCR(IP Powerdown Exception Control Register) provides a
+ mechanism for keeping certain blocks awake during STANDBY and MEM, in
+ order to use them as wake-up sources.
+
+ little-endian:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ RCPM register block is Little Endian. Without it RCPM
+ will be Big Endian (default case).
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ global-utilities@e2000 {
+ compatible = "fsl,t4240-rcpm", "fsl,qoriq-rcpm-2.0";
+ reg = <0xe2000 0x1000>;
+ #fsl,rcpm-wakeup-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/fsl/rcpm.txt b/Documentation/devicetree/bindings/soc/fsl/rcpm.txt
deleted file mode 100644
index 5a33619d881d..000000000000
--- a/Documentation/devicetree/bindings/soc/fsl/rcpm.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-* Run Control and Power Management
--------------------------------------------
-The RCPM performs all device-level tasks associated with device run control
-and power management.
-
-Required properites:
- - reg : Offset and length of the register set of the RCPM block.
- - #fsl,rcpm-wakeup-cells : The number of IPPDEXPCR register cells in the
- fsl,rcpm-wakeup property.
- - compatible : Must contain a chip-specific RCPM block compatible string
- and (if applicable) may contain a chassis-version RCPM compatible
- string. Chip-specific strings are of the form "fsl,<chip>-rcpm",
- such as:
- * "fsl,p2041-rcpm"
- * "fsl,p5020-rcpm"
- * "fsl,t4240-rcpm"
-
- Chassis-version strings are of the form "fsl,qoriq-rcpm-<version>",
- such as:
- * "fsl,qoriq-rcpm-1.0": for chassis 1.0 rcpm
- * "fsl,qoriq-rcpm-2.0": for chassis 2.0 rcpm
- * "fsl,qoriq-rcpm-2.1": for chassis 2.1 rcpm
- * "fsl,qoriq-rcpm-2.1+": for chassis 2.1+ rcpm
-
-All references to "1.0" and "2.0" refer to the QorIQ chassis version to
-which the chip complies.
-Chassis Version Example Chips
---------------- -------------------------------
-1.0 p4080, p5020, p5040, p2041, p3041
-2.0 t4240, b4860, b4420
-2.1 t1040,
-2.1+ ls1021a, ls1012a, ls1043a, ls1046a
-
-Optional properties:
- - little-endian : RCPM register block is Little Endian. Without it RCPM
- will be Big Endian (default case).
-
-Example:
-The RCPM node for T4240:
- rcpm: global-utilities@e2000 {
- compatible = "fsl,t4240-rcpm", "fsl,qoriq-rcpm-2.0";
- reg = <0xe2000 0x1000>;
- #fsl,rcpm-wakeup-cells = <2>;
- };
-
-* Freescale RCPM Wakeup Source Device Tree Bindings
--------------------------------------------
-Required fsl,rcpm-wakeup property should be added to a device node if the device
-can be used as a wakeup source.
-
- - fsl,rcpm-wakeup: Consists of a phandle to the rcpm node and the IPPDEXPCR
- register cells. The number of IPPDEXPCR register cells is defined in
- "#fsl,rcpm-wakeup-cells" in the rcpm node. The first register cell is
- the bit mask that should be set in IPPDEXPCR0, and the second register
- cell is for IPPDEXPCR1, and so on.
-
- Note: IPPDEXPCR(IP Powerdown Exception Control Register) provides a
- mechanism for keeping certain blocks awake during STANDBY and MEM, in
- order to use them as wake-up sources.
-
-Example:
- lpuart0: serial@2950000 {
- compatible = "fsl,ls1021a-lpuart";
- reg = <0x0 0x2950000 0x0 0x1000>;
- interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sysclk>;
- clock-names = "ipg";
- fsl,rcpm-wakeup = <&rcpm 0x0 0x40000000>;
- };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
index 4512390f90f0..2d3fe0b54243 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
@@ -32,6 +32,11 @@ properties:
- const: qcom,pmic-glink
- items:
- enum:
+ - qcom,sm7325-pmic-glink
+ - const: qcom,qcm6490-pmic-glink
+ - const: qcom,pmic-glink
+ - items:
+ - enum:
- qcom,sm8650-pmic-glink
- qcom,x1e80100-pmic-glink
- const: qcom,sm8550-pmic-glink
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
index 2fa725b8af5d..270bcd079f88 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
@@ -30,31 +30,37 @@ maintainers:
properties:
compatible:
- enum:
- - qcom,rpm-apq8084
- - qcom,rpm-ipq6018
- - qcom,rpm-ipq9574
- - qcom,rpm-mdm9607
- - qcom,rpm-msm8226
- - qcom,rpm-msm8610
- - qcom,rpm-msm8909
- - qcom,rpm-msm8916
- - qcom,rpm-msm8917
- - qcom,rpm-msm8936
- - qcom,rpm-msm8937
- - qcom,rpm-msm8952
- - qcom,rpm-msm8953
- - qcom,rpm-msm8974
- - qcom,rpm-msm8976
- - qcom,rpm-msm8994
- - qcom,rpm-msm8996
- - qcom,rpm-msm8998
- - qcom,rpm-qcm2290
- - qcom,rpm-qcs404
- - qcom,rpm-sdm660
- - qcom,rpm-sm6115
- - qcom,rpm-sm6125
- - qcom,rpm-sm6375
+ oneOf:
+ - items:
+ - enum:
+ - qcom,rpm-apq8084
+ - qcom,rpm-mdm9607
+ - qcom,rpm-msm8226
+ - qcom,rpm-msm8610
+ - qcom,rpm-msm8909
+ - qcom,rpm-msm8916
+ - qcom,rpm-msm8917
+ - qcom,rpm-msm8936
+ - qcom,rpm-msm8937
+ - qcom,rpm-msm8952
+ - qcom,rpm-msm8953
+ - qcom,rpm-msm8974
+ - qcom,rpm-msm8976
+ - qcom,rpm-msm8994
+ - const: qcom,smd-rpm
+ - items:
+ - enum:
+ - qcom,rpm-ipq6018
+ - qcom,rpm-ipq9574
+ - qcom,rpm-msm8996
+ - qcom,rpm-msm8998
+ - qcom,rpm-qcm2290
+ - qcom,rpm-qcs404
+ - qcom,rpm-sdm660
+ - qcom,rpm-sm6115
+ - qcom,rpm-sm6125
+ - qcom,rpm-sm6375
+ - const: qcom,glink-smd-rpm
clock-controller:
$ref: /schemas/clock/qcom,rpmcc.yaml#
@@ -84,21 +90,7 @@ if:
properties:
compatible:
contains:
- enum:
- - qcom,rpm-apq8084
- - qcom,rpm-mdm9607
- - qcom,rpm-msm8226
- - qcom,rpm-msm8610
- - qcom,rpm-msm8909
- - qcom,rpm-msm8916
- - qcom,rpm-msm8917
- - qcom,rpm-msm8936
- - qcom,rpm-msm8937
- - qcom,rpm-msm8952
- - qcom,rpm-msm8953
- - qcom,rpm-msm8974
- - qcom,rpm-msm8976
- - qcom,rpm-msm8994
+ const: qcom,smd-rpm
then:
properties:
qcom,glink-channels: false
@@ -129,7 +121,7 @@ examples:
qcom,smd-edge = <15>;
rpm-requests {
- compatible = "qcom,rpm-msm8916";
+ compatible = "qcom,rpm-msm8916", "qcom,smd-rpm";
qcom,smd-channels = "rpm_requests";
clock-controller {
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml
index 4819ce90d206..d9fabefc8147 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml
@@ -56,7 +56,7 @@ examples:
qcom,smd-edge = <15>;
rpm-requests {
- compatible = "qcom,rpm-msm8974";
+ compatible = "qcom,rpm-msm8974", "qcom,smd-rpm";
qcom,smd-channels = "rpm_requests";
clock-controller {
diff --git a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
index 09d3ce97efa2..b7acb65bdecd 100644
--- a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
+++ b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
@@ -127,6 +127,18 @@ properties:
- const: hoperun,hihope-rzg2m
- const: renesas,r8a774a1
+ - description: RZ/G2M v3.0 (R8A774A3)
+ items:
+ - enum:
+ - hoperun,hihope-rzg2m # HopeRun HiHope RZ/G2M platform
+ - const: renesas,r8a774a3
+
+ - items:
+ - enum:
+ - hoperun,hihope-rzg2-ex # HopeRun expansion board for HiHope RZ/G2 platforms
+ - const: hoperun,hihope-rzg2m
+ - const: renesas,r8a774a3
+
- description: RZ/G2N (R8A774B1)
items:
- enum:
@@ -516,6 +528,8 @@ properties:
- description: RZ/V2H(P) (R9A09G057)
items:
- enum:
+ - renesas,rzv2h-evk # RZ/V2H EVK
+ - enum:
- renesas,r9a09g057h41 # RZ/V2H
- renesas,r9a09g057h42 # RZ/V2H with Mali-G31 support
- renesas,r9a09g057h44 # RZ/V2HP with Mali-G31 + Mali-C55 support
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
index 78c6d5b64138..50d727f4b76c 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -20,6 +20,20 @@ properties:
- rockchip,rk3568-pipe-grf
- rockchip,rk3568-pipe-phy-grf
- rockchip,rk3568-usb2phy-grf
+ - rockchip,rk3576-bigcore-grf
+ - rockchip,rk3576-cci-grf
+ - rockchip,rk3576-gpu-grf
+ - rockchip,rk3576-litcore-grf
+ - rockchip,rk3576-npu-grf
+ - rockchip,rk3576-php-grf
+ - rockchip,rk3576-pipe-phy-grf
+ - rockchip,rk3576-pmu1-grf
+ - rockchip,rk3576-sdgmac-grf
+ - rockchip,rk3576-sys-grf
+ - rockchip,rk3576-usb-grf
+ - rockchip,rk3576-usbdpphy-grf
+ - rockchip,rk3576-vo0-grf
+ - rockchip,rk3576-vop-grf
- rockchip,rk3588-bigcore0-grf
- rockchip,rk3588-bigcore1-grf
- rockchip,rk3588-hdptxphy-grf
@@ -31,11 +45,17 @@ properties:
- rockchip,rk3588-pcie3-pipe-grf
- rockchip,rk3588-usb-grf
- rockchip,rk3588-usbdpphy-grf
- - rockchip,rk3588-vo-grf
+ - rockchip,rk3588-vo0-grf
+ - rockchip,rk3588-vo1-grf
- rockchip,rk3588-vop-grf
- rockchip,rv1108-usbgrf
- const: syscon
- items:
+ - const: rockchip,rk3588-vo-grf
+ - const: syscon
+ deprecated: true
+ description: Use rockchip,rk3588-vo{0,1}-grf instead.
+ - items:
- enum:
- rockchip,px30-grf
- rockchip,px30-pmugrf
@@ -58,6 +78,8 @@ properties:
- rockchip,rk3399-pmugrf
- rockchip,rk3568-grf
- rockchip,rk3568-pmugrf
+ - rockchip,rk3576-ioc-grf
+ - rockchip,rk3576-pmu0-grf
- rockchip,rk3588-usb2phy-grf
- rockchip,rv1108-grf
- rockchip,rv1108-pmugrf
@@ -262,6 +284,8 @@ allOf:
contains:
enum:
- rockchip,rk3588-vo-grf
+ - rockchip,rk3588-vo0-grf
+ - rockchip,rk3588-vo1-grf
then:
required:
diff --git a/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml b/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
index 8b478d6cdc30..f80fcbc3128b 100644
--- a/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
+++ b/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
@@ -32,11 +32,16 @@ properties:
- enum:
- samsung,exynos850-usi
- reg: true
+ reg:
+ maxItems: 1
- clocks: true
+ clocks:
+ maxItems: 2
- clock-names: true
+ clock-names:
+ items:
+ - const: pclk
+ - const: ipclk
ranges: true
@@ -113,9 +118,7 @@ then:
- description: Operating clock for UART/SPI/I2C protocol
clock-names:
- items:
- - const: pclk
- - const: ipclk
+ maxItems: 2
required:
- reg
diff --git a/Documentation/devicetree/bindings/soc/ti/ti,am654-serdes-ctrl.yaml b/Documentation/devicetree/bindings/soc/ti/ti,am654-serdes-ctrl.yaml
index a10a3b89ae05..94b36943a50f 100644
--- a/Documentation/devicetree/bindings/soc/ti/ti,am654-serdes-ctrl.yaml
+++ b/Documentation/devicetree/bindings/soc/ti/ti,am654-serdes-ctrl.yaml
@@ -14,6 +14,7 @@ properties:
items:
- const: ti,am654-serdes-ctrl
- const: syscon
+ - const: simple-mfd
reg:
maxItems: 1
@@ -31,7 +32,7 @@ additionalProperties: false
examples:
- |
clock@4080 {
- compatible = "ti,am654-serdes-ctrl", "syscon";
+ compatible = "ti,am654-serdes-ctrl", "syscon", "simple-mfd";
reg = <0x4080 0x4>;
mux-controller {
diff --git a/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml b/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
index c402cb2928e8..3cb1471cc6b6 100644
--- a/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
+++ b/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
@@ -278,6 +278,26 @@ patternProperties:
additionalProperties: false
+ ^pa-stats@[a-f0-9]+$:
+ description: |
+ PA-STATS sub-module represented as a SysCon. PA_STATS is a set of
+ registers where different statistics related to ICSSG, are dumped by
+ ICSSG firmware. This syscon sub-module will help the device to
+ access/read/write those statistics.
+
+ type: object
+
+ additionalProperties: false
+
+ properties:
+ compatible:
+ items:
+ - const: ti,pruss-pa-st
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
interrupt-controller@[a-f0-9]+$:
description: |
PRUSS INTC Node. Each PRUSS has a single interrupt controller instance
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.yaml b/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.yaml
index 5db718e4d0e7..4f13e8ab50b2 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.yaml
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.yaml
@@ -26,6 +26,13 @@ properties:
A list off component DAPM widget. Each entry is a pair of strings,
the first being the widget type, the second being the widget name
+ clocks:
+ minItems: 1
+ maxItems: 3
+ description:
+ Base PLL clocks of audio susbsytem, used to configure base clock
+ frequencies for different audio use-cases.
+
patternProperties:
"^dai-link-[0-9]+$":
type: object
diff --git a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
index 0ecdaf7190e9..413b47778181 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
+++ b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
@@ -27,6 +27,13 @@ properties:
A list off component DAPM widget. Each entry is a pair of strings,
the first being the widget type, the second being the widget name
+ clocks:
+ minItems: 1
+ maxItems: 3
+ description:
+ Base PLL clocks of audio susbsytem, used to configure base clock
+ frequencies for different audio use-cases.
+
patternProperties:
"^dai-link-[0-9]+$":
type: object
diff --git a/Documentation/devicetree/bindings/sound/cirrus,cs4271.yaml b/Documentation/devicetree/bindings/sound/cirrus,cs4271.yaml
new file mode 100644
index 000000000000..68fbf5cc208f
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cirrus,cs4271.yaml
@@ -0,0 +1,101 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/cirrus,cs4271.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic CS4271 audio CODEC
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+description:
+ The CS4271 is a stereo audio codec. This device supports both the I2C
+ and the SPI bus.
+
+allOf:
+ - $ref: dai-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+ compatible:
+ const: cirrus,cs4271
+
+ reg:
+ maxItems: 1
+
+ spi-cpha: true
+
+ spi-cpol: true
+
+ '#sound-dai-cells':
+ const: 0
+
+ reset-gpios:
+ description:
+ This pin will be deasserted before communication to the codec starts.
+ maxItems: 1
+
+ va-supply:
+ description: Analog power supply.
+
+ vd-supply:
+ description: Digital power supply.
+
+ vl-supply:
+ description: Serial Control Port power supply.
+
+ port:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
+ cirrus,amuteb-eq-bmutec:
+ description:
+ When given, the Codec's AMUTEB=BMUTEC flag is enabled.
+ type: boolean
+
+ cirrus,enable-soft-reset:
+ description: |
+ The CS4271 requires its LRCLK and MCLK to be stable before its RESET
+ line is de-asserted. That also means that clocks cannot be changed
+ without putting the chip back into hardware reset, which also requires
+ a complete re-initialization of all registers.
+
+ One (undocumented) workaround is to assert and de-assert the PDN bit
+ in the MODE2 register. This workaround can be enabled with this DT
+ property.
+
+ Note that this is not needed in case the clocks are stable
+ throughout the entire runtime of the codec.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ codec@0 {
+ compatible = "cirrus,cs4271";
+ reg = <0>;
+ #sound-dai-cells = <0>;
+ spi-max-frequency = <6000000>;
+ spi-cpol;
+ spi-cpha;
+ reset-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
+ port {
+ endpoint {
+ remote-endpoint = <&i2s_ep>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/cs4271.txt b/Documentation/devicetree/bindings/sound/cs4271.txt
deleted file mode 100644
index 6e699ceabacd..000000000000
--- a/Documentation/devicetree/bindings/sound/cs4271.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-Cirrus Logic CS4271 DT bindings
-
-This driver supports both the I2C and the SPI bus.
-
-Required properties:
-
- - compatible: "cirrus,cs4271"
-
-For required properties on SPI, please consult
-Documentation/devicetree/bindings/spi/spi-bus.txt
-
-Required properties on I2C:
-
- - reg: the i2c address
-
-
-Optional properties:
-
- - reset-gpio: a GPIO spec to define which pin is connected to the chip's
- !RESET pin
- - cirrus,amuteb-eq-bmutec: When given, the Codec's AMUTEB=BMUTEC flag
- is enabled.
- - cirrus,enable-soft-reset:
- The CS4271 requires its LRCLK and MCLK to be stable before its RESET
- line is de-asserted. That also means that clocks cannot be changed
- without putting the chip back into hardware reset, which also requires
- a complete re-initialization of all registers.
-
- One (undocumented) workaround is to assert and de-assert the PDN bit
- in the MODE2 register. This workaround can be enabled with this DT
- property.
-
- Note that this is not needed in case the clocks are stable
- throughout the entire runtime of the codec.
-
- - vd-supply: Digital power
- - vl-supply: Logic power
- - va-supply: Analog Power
-
-Examples:
-
- codec_i2c: cs4271@10 {
- compatible = "cirrus,cs4271";
- reg = <0x10>;
- reset-gpio = <&gpio 23 0>;
- vd-supply = <&vdd_3v3_reg>;
- vl-supply = <&vdd_3v3_reg>;
- va-supply = <&vdd_3v3_reg>;
- };
-
- codec_spi: cs4271@0 {
- compatible = "cirrus,cs4271";
- reg = <0x0>;
- reset-gpio = <&gpio 23 0>;
- spi-max-frequency = <6000000>;
- };
-
diff --git a/Documentation/devicetree/bindings/sound/da7213.txt b/Documentation/devicetree/bindings/sound/da7213.txt
deleted file mode 100644
index 94584c96c4ae..000000000000
--- a/Documentation/devicetree/bindings/sound/da7213.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-Dialog Semiconductor DA7212/DA7213 Audio Codec bindings
-
-======
-
-Required properties:
-- compatible : Should be "dlg,da7212" or "dlg,da7213"
-- reg: Specifies the I2C slave address
-
-Optional properties:
-- clocks : phandle and clock specifier for codec MCLK.
-- clock-names : Clock name string for 'clocks' attribute, should be "mclk".
-
-- dlg,micbias1-lvl : Voltage (mV) for Mic Bias 1
- [<1600>, <2200>, <2500>, <3000>]
-- dlg,micbias2-lvl : Voltage (mV) for Mic Bias 2
- [<1600>, <2200>, <2500>, <3000>]
-- dlg,dmic-data-sel : DMIC channel select based on clock edge.
- ["lrise_rfall", "lfall_rrise"]
-- dlg,dmic-samplephase : When to sample audio from DMIC.
- ["on_clkedge", "between_clkedge"]
-- dlg,dmic-clkrate : DMIC clock frequency (Hz).
- [<1500000>, <3000000>]
-
- - VDDA-supply : Regulator phandle for Analogue power supply
- - VDDMIC-supply : Regulator phandle for Mic Bias
- - VDDIO-supply : Regulator phandle for I/O power supply
-
-======
-
-Example:
-
- codec_i2c: da7213@1a {
- compatible = "dlg,da7213";
- reg = <0x1a>;
-
- clocks = <&clks 201>;
- clock-names = "mclk";
-
- dlg,micbias1-lvl = <2500>;
- dlg,micbias2-lvl = <2500>;
-
- dlg,dmic-data-sel = "lrise_rfall";
- dlg,dmic-samplephase = "between_clkedge";
- dlg,dmic-clkrate = <3000000>;
- };
diff --git a/Documentation/devicetree/bindings/sound/dlg,da7213.yaml b/Documentation/devicetree/bindings/sound/dlg,da7213.yaml
new file mode 100644
index 000000000000..c2dede1e82ff
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/dlg,da7213.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/dlg,da7213.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Dialog Semiconductor DA7212/DA7213 Audio Codec
+
+maintainers:
+ - Support Opensource <support.opensource@diasemi.com>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - dlg,da7212
+ - dlg,da7213
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: mclk
+
+ "#sound-dai-cells":
+ const: 0
+
+ dlg,micbias1-lvl:
+ description: Voltage (mV) for Mic Bias 1
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 1600, 2200, 2500, 3000 ]
+
+ dlg,micbias2-lvl:
+ description: Voltage (mV) for Mic Bias 2
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 1600, 2200, 2500, 3000 ]
+
+ dlg,dmic-data-sel:
+ description: DMIC channel select based on clock edge
+ enum: [ lrise_rfall, lfall_rrise ]
+
+ dlg,dmic-samplephase:
+ description: When to sample audio from DMIC
+ enum: [ on_clkedge, between_clkedge ]
+
+ dlg,dmic-clkrate:
+ description: DMIC clock frequency (Hz)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 1500000, 3000000 ]
+
+ VDDA-supply:
+ description: Analogue power supply
+
+ VDDIO-supply:
+ description: I/O power supply
+
+ VDDMIC-supply:
+ description: Mic Bias
+
+ VDDSP-supply:
+ description: Speaker supply
+
+ ports:
+ $ref: audio-graph-port.yaml#/definitions/ports
+
+ port:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@1a {
+ compatible = "dlg,da7213";
+ reg = <0x1a>;
+
+ clocks = <&clks 201>;
+ clock-names = "mclk";
+
+ #sound-dai-cells = <0>;
+
+ dlg,micbias1-lvl = <2500>;
+ dlg,micbias2-lvl = <2500>;
+
+ dlg,dmic-data-sel = "lrise_rfall";
+ dlg,dmic-samplephase = "between_clkedge";
+ dlg,dmic-clkrate = <3000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/everest,es8326.yaml b/Documentation/devicetree/bindings/sound/everest,es8326.yaml
index 8c82d47375ec..d51431df7acf 100644
--- a/Documentation/devicetree/bindings/sound/everest,es8326.yaml
+++ b/Documentation/devicetree/bindings/sound/everest,es8326.yaml
@@ -32,7 +32,7 @@ properties:
description: |
just the value of reg 57. Bit(3) decides whether the jack polarity is inverted.
Bit(2) decides whether the button on the headset is inverted.
- Bit(1)/(0) decides the mic properity to be OMTP/CTIA or auto.
+ Bit(1)/(0) decides the mic property to be OMTP/CTIA or auto.
minimum: 0x00
maximum: 0x0f
default: 0x0f
diff --git a/Documentation/devicetree/bindings/sound/fsl,imx-audio-es8328.yaml b/Documentation/devicetree/bindings/sound/fsl,imx-audio-es8328.yaml
new file mode 100644
index 000000000000..5eb6f5812cf2
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,imx-audio-es8328.yaml
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/fsl,imx-audio-es8328.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX audio complex with ES8328 codec
+
+maintainers:
+ - Shawn Guo <shawnguo@kernel.org>
+ - Sascha Hauer <s.hauer@pengutronix.de>
+
+allOf:
+ - $ref: sound-card-common.yaml#
+
+properties:
+ compatible:
+ const: fsl,imx-audio-es8328
+
+ model:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: The user-visible name of this sound complex
+
+ ssi-controller:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of the i.MX SSI controller
+
+ jack-gpio:
+ description: Optional GPIO for headphone jack
+ maxItems: 1
+
+ audio-amp-supply:
+ description: Power regulator for speaker amps
+
+ audio-codec:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle to the ES8328 audio codec
+
+ audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ A list of the connections between audio components. Each entry
+ is a pair of strings, the first being the connection's sink, the second
+ being the connection's source. Valid names could be power supplies,
+ ES8328 pins, and the jacks on the board:
+
+ Power supplies:
+ * audio-amp
+
+ ES8328 pins:
+ * LOUT1
+ * LOUT2
+ * ROUT1
+ * ROUT2
+ * LINPUT1
+ * LINPUT2
+ * RINPUT1
+ * RINPUT2
+ * Mic PGA
+
+ Board connectors:
+ * Headphone
+ * Speaker
+ * Mic Jack
+
+ mux-int-port:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: The internal port of the i.MX audio muxer (AUDMUX)
+ enum: [1, 2, 7]
+ default: 1
+
+ mux-ext-port:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: The external port of the i.MX audio muxer (AUDMIX)
+ enum: [3, 4, 5, 6]
+ default: 3
+
+required:
+ - compatible
+ - model
+ - ssi-controller
+ - jack-gpio
+ - audio-amp-supply
+ - audio-codec
+ - audio-routing
+ - mux-int-port
+ - mux-ext-port
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sound {
+ compatible = "fsl,imx-audio-es8328";
+ model = "imx-audio-es8328";
+ ssi-controller = <&ssi1>;
+ audio-codec = <&codec>;
+ jack-gpio = <&gpio5 15 0>;
+ audio-amp-supply = <&reg_audio_amp>;
+ audio-routing =
+ "Speaker", "LOUT2",
+ "Speaker", "ROUT2",
+ "Speaker", "audio-amp",
+ "Headphone", "ROUT1",
+ "Headphone", "LOUT1",
+ "LINPUT1", "Mic Jack",
+ "RINPUT1", "Mic Jack",
+ "Mic Jack", "Mic Bias";
+ mux-int-port = <1>;
+ mux-ext-port = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/fsl,saif.yaml b/Documentation/devicetree/bindings/sound/fsl,saif.yaml
new file mode 100644
index 000000000000..0b5db6bb1b7c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,saif.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/fsl,saif.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale MXS Serial Audio Interface (SAIF)
+
+maintainers:
+ - Lukasz Majewski <lukma@denx.de>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+description:
+ The SAIF is based on I2S module that is used to communicate with audio codecs,
+ but only with half-duplex manner (i.e. it can either transmit or receive PCM
+ audio).
+
+properties:
+ compatible:
+ const: fsl,imx28-saif
+
+ reg:
+ maxItems: 1
+
+ "#sound-dai-cells":
+ const: 0
+
+ interrupts:
+ maxItems: 1
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ const: rx-tx
+
+ "#clock-cells":
+ description: Configure the I2S device as MCLK clock provider.
+ const: 0
+
+ clocks:
+ maxItems: 1
+
+ fsl,saif-master:
+ description: Indicate that saif is a slave and its phandle points to master
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+required:
+ - compatible
+ - reg
+ - "#sound-dai-cells"
+ - interrupts
+ - dmas
+ - dma-names
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ saif0: saif@80042000 {
+ compatible = "fsl,imx28-saif";
+ reg = <0x80042000 2000>;
+ #sound-dai-cells = <0>;
+ interrupts = <59>;
+ dmas = <&dma_apbx 4>;
+ dma-names = "rx-tx";
+ #clock-cells = <0>;
+ clocks = <&clks 53>;
+ };
+ - |
+ saif1: saif@80046000 {
+ compatible = "fsl,imx28-saif";
+ reg = <0x80046000 2000>;
+ #sound-dai-cells = <0>;
+ interrupts = <58>;
+ dmas = <&dma_apbx 5>;
+ dma-names = "rx-tx";
+ clocks = <&clks 53>;
+ fsl,saif-master = <&saif0>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/imx-audio-es8328.txt b/Documentation/devicetree/bindings/sound/imx-audio-es8328.txt
deleted file mode 100644
index 07b68ab206fb..000000000000
--- a/Documentation/devicetree/bindings/sound/imx-audio-es8328.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-Freescale i.MX audio complex with ES8328 codec
-
-Required properties:
-- compatible : "fsl,imx-audio-es8328"
-- model : The user-visible name of this sound complex
-- ssi-controller : The phandle of the i.MX SSI controller
-- jack-gpio : Optional GPIO for headphone jack
-- audio-amp-supply : Power regulator for speaker amps
-- audio-codec : The phandle of the ES8328 audio codec
-- audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the
- connection's sink, the second being the connection's
- source. Valid names could be power supplies, ES8328
- pins, and the jacks on the board:
-
- Power supplies:
- * audio-amp
-
- ES8328 pins:
- * LOUT1
- * LOUT2
- * ROUT1
- * ROUT2
- * LINPUT1
- * LINPUT2
- * RINPUT1
- * RINPUT2
- * Mic PGA
-
- Board connectors:
- * Headphone
- * Speaker
- * Mic Jack
-- mux-int-port : The internal port of the i.MX audio muxer (AUDMUX)
-- mux-ext-port : The external port of the i.MX audio muxer (AUDMIX)
-
-Note: The AUDMUX port numbering should start at 1, which is consistent with
-hardware manual.
-
-Example:
-
-sound {
- compatible = "fsl,imx-audio-es8328";
- model = "imx-audio-es8328";
- ssi-controller = <&ssi1>;
- audio-codec = <&codec>;
- jack-gpio = <&gpio5 15 0>;
- audio-amp-supply = <&reg_audio_amp>;
- audio-routing =
- "Speaker", "LOUT2",
- "Speaker", "ROUT2",
- "Speaker", "audio-amp",
- "Headphone", "ROUT1",
- "Headphone", "LOUT1",
- "LINPUT1", "Mic Jack",
- "RINPUT1", "Mic Jack",
- "Mic Jack", "Mic Bias";
- mux-int-port = <1>;
- mux-ext-port = <3>;
-};
diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt8365-afe.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt8365-afe.yaml
new file mode 100644
index 000000000000..45ad56d37234
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mediatek,mt8365-afe.yaml
@@ -0,0 +1,130 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/mediatek,mt8365-afe.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek Audio Front End PCM controller for MT8365
+
+maintainers:
+ - Alexandre Mergnat <amergnat@baylibre.com>
+
+properties:
+ compatible:
+ const: mediatek,mt8365-afe-pcm
+
+ reg:
+ maxItems: 1
+
+ "#sound-dai-cells":
+ const: 0
+
+ clocks:
+ items:
+ - description: 26M clock
+ - description: mux for audio clock
+ - description: audio i2s0 mck
+ - description: audio i2s1 mck
+ - description: audio i2s2 mck
+ - description: audio i2s3 mck
+ - description: engen 1 clock
+ - description: engen 2 clock
+ - description: audio 1 clock
+ - description: audio 2 clock
+ - description: mux for i2s0
+ - description: mux for i2s1
+ - description: mux for i2s2
+ - description: mux for i2s3
+
+ clock-names:
+ items:
+ - const: top_clk26m_clk
+ - const: top_audio_sel
+ - const: audio_i2s0_m
+ - const: audio_i2s1_m
+ - const: audio_i2s2_m
+ - const: audio_i2s3_m
+ - const: engen1
+ - const: engen2
+ - const: aud1
+ - const: aud2
+ - const: i2s0_m_sel
+ - const: i2s1_m_sel
+ - const: i2s2_m_sel
+ - const: i2s3_m_sel
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ mediatek,dmic-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Indicates how many data pins are used to transmit two channels of PDM
+ signal. 1 means two wires, 0 means one wire. Default value is 0.
+ enum:
+ - 0 # one wire
+ - 1 # two wires
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - power-domains
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mediatek,mt8365-clk.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/power/mediatek,mt8365-power.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ audio-controller@11220000 {
+ compatible = "mediatek,mt8365-afe-pcm";
+ reg = <0 0x11220000 0 0x1000>;
+ #sound-dai-cells = <0>;
+ clocks = <&clk26m>,
+ <&topckgen CLK_TOP_AUDIO_SEL>,
+ <&topckgen CLK_TOP_AUD_I2S0_M>,
+ <&topckgen CLK_TOP_AUD_I2S1_M>,
+ <&topckgen CLK_TOP_AUD_I2S2_M>,
+ <&topckgen CLK_TOP_AUD_I2S3_M>,
+ <&topckgen CLK_TOP_AUD_ENGEN1_SEL>,
+ <&topckgen CLK_TOP_AUD_ENGEN2_SEL>,
+ <&topckgen CLK_TOP_AUD_1_SEL>,
+ <&topckgen CLK_TOP_AUD_2_SEL>,
+ <&topckgen CLK_TOP_APLL_I2S0_SEL>,
+ <&topckgen CLK_TOP_APLL_I2S1_SEL>,
+ <&topckgen CLK_TOP_APLL_I2S2_SEL>,
+ <&topckgen CLK_TOP_APLL_I2S3_SEL>;
+ clock-names = "top_clk26m_clk",
+ "top_audio_sel",
+ "audio_i2s0_m",
+ "audio_i2s1_m",
+ "audio_i2s2_m",
+ "audio_i2s3_m",
+ "engen1",
+ "engen2",
+ "aud1",
+ "aud2",
+ "i2s0_m_sel",
+ "i2s1_m_sel",
+ "i2s2_m_sel",
+ "i2s3_m_sel";
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_AUDIO>;
+ mediatek,dmic-mode = <1>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt8365-mt6357.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt8365-mt6357.yaml
new file mode 100644
index 000000000000..ff9ebb63a05f
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mediatek,mt8365-mt6357.yaml
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/mediatek,mt8365-mt6357.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT8365 ASoC sound card
+
+maintainers:
+ - Alexandre Mergnat <amergnat@baylibre.com>
+
+properties:
+ compatible:
+ const: mediatek,mt8365-mt6357
+
+ pinctrl-names:
+ minItems: 1
+ items:
+ - const: default
+ - const: dmic
+ - const: miso_off
+ - const: miso_on
+ - const: mosi_off
+ - const: mosi_on
+
+ mediatek,platform:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of MT8365 ASoC platform.
+
+patternProperties:
+ "^dai-link-[0-9]+$":
+ type: object
+ description:
+ Container for dai-link level properties and CODEC sub-nodes.
+
+ properties:
+ codec:
+ type: object
+ description: Holds subnode which indicates codec dai.
+
+ properties:
+ sound-dai:
+ maxItems: 1
+ description: phandle of the codec DAI
+
+ additionalProperties: false
+
+ link-name:
+ description: Indicates dai-link name and PCM stream name
+ enum:
+ - I2S_IN_BE
+ - I2S_OUT_BE
+ - PCM1_BE
+ - PDM1_BE
+ - PDM2_BE
+ - PDM3_BE
+ - PDM4_BE
+ - SPDIF_IN_BE
+ - SPDIF_OUT_BE
+ - TDM_IN_BE
+ - TDM_OUT_BE
+
+ sound-dai:
+ maxItems: 1
+ description: phandle of the CPU DAI
+
+ required:
+ - link-name
+ - sound-dai
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - pinctrl-names
+ - mediatek,platform
+
+additionalProperties: false
+
+examples:
+ - |
+ sound {
+ compatible = "mediatek,mt8365-mt6357";
+ pinctrl-names = "default",
+ "dmic",
+ "miso_off",
+ "miso_on",
+ "mosi_off",
+ "mosi_on";
+ pinctrl-0 = <&aud_default_pins>;
+ pinctrl-1 = <&aud_dmic_pins>;
+ pinctrl-2 = <&aud_miso_off_pins>;
+ pinctrl-3 = <&aud_miso_on_pins>;
+ pinctrl-4 = <&aud_mosi_off_pins>;
+ pinctrl-5 = <&aud_mosi_on_pins>;
+ mediatek,platform = <&afe>;
+
+ /* hdmi interface */
+ dai-link-0 {
+ link-name = "I2S_OUT_BE";
+ sound-dai = <&afe>;
+
+ codec {
+ sound-dai = <&it66121hdmitx>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/microchip,sama7g5-spdifrx.yaml b/Documentation/devicetree/bindings/sound/microchip,sama7g5-spdifrx.yaml
index 2f43c684ab88..7fbab5871be4 100644
--- a/Documentation/devicetree/bindings/sound/microchip,sama7g5-spdifrx.yaml
+++ b/Documentation/devicetree/bindings/sound/microchip,sama7g5-spdifrx.yaml
@@ -13,6 +13,9 @@ description:
The Microchip Sony/Philips Digital Interface Receiver is a serial port
compliant with the IEC-60958 standard.
+allOf:
+ - $ref: dai-common.yaml#
+
properties:
"#sound-dai-cells":
const: 0
@@ -53,7 +56,7 @@ required:
- dmas
- dma-names
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/sound/mxs-saif.txt b/Documentation/devicetree/bindings/sound/mxs-saif.txt
deleted file mode 100644
index 7ba07a118e37..000000000000
--- a/Documentation/devicetree/bindings/sound/mxs-saif.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-* Freescale MXS Serial Audio Interface (SAIF)
-
-Required properties:
-- compatible: Should be "fsl,<chip>-saif"
-- reg: Should contain registers location and length
-- interrupts: Should contain ERROR interrupt number
-- dmas: DMA specifier, consisting of a phandle to DMA controller node
- and SAIF DMA channel ID.
- Refer to dma.txt and fsl-mxs-dma.txt for details.
-- dma-names: Must be "rx-tx".
-
-Optional properties:
-- fsl,saif-master: phandle to the master SAIF. It's only required for
- the slave SAIF.
-
-Note: Each SAIF controller should have an alias correctly numbered
-in "aliases" node.
-
-Example:
-
-aliases {
- saif0 = &saif0;
- saif1 = &saif1;
-};
-
-saif0: saif@80042000 {
- compatible = "fsl,imx28-saif";
- reg = <0x80042000 2000>;
- interrupts = <59>;
- dmas = <&dma_apbx 4>;
- dma-names = "rx-tx";
-};
-
-saif1: saif@80046000 {
- compatible = "fsl,imx28-saif";
- reg = <0x80046000 2000>;
- interrupts = <58>;
- dmas = <&dma_apbx 5>;
- dma-names = "rx-tx";
- fsl,saif-master = <&saif0>;
-};
diff --git a/Documentation/devicetree/bindings/sound/pcm512x.txt b/Documentation/devicetree/bindings/sound/pcm512x.txt
deleted file mode 100644
index 47878a6df608..000000000000
--- a/Documentation/devicetree/bindings/sound/pcm512x.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-PCM512x and TAS575x audio CODECs/amplifiers
-
-These devices support both I2C and SPI (configured with pin strapping
-on the board). The TAS575x devices only support I2C.
-
-Required properties:
-
- - compatible : One of "ti,pcm5121", "ti,pcm5122", "ti,pcm5141",
- "ti,pcm5142", "ti,pcm5242", "ti,tas5754" or "ti,tas5756"
-
- - reg : the I2C address of the device for I2C, the chip select
- number for SPI.
-
- - AVDD-supply, DVDD-supply, and CPVDD-supply : power supplies for the
- device, as covered in bindings/regulator/regulator.txt
-
-Optional properties:
-
- - clocks : A clock specifier for the clock connected as SCLK. If this
- is absent the device will be configured to clock from BCLK. If pll-in
- and pll-out are specified in addition to a clock, the device is
- configured to accept clock input on a specified gpio pin.
-
- - pll-in, pll-out : gpio pins used to connect the pll using <1>
- through <6>. The device will be configured for clock input on the
- given pll-in pin and PLL output on the given pll-out pin. An
- external connection from the pll-out pin to the SCLK pin is assumed.
- Caution: the TAS-desvices only support gpios 1,2 and 3
-
-Examples:
-
- pcm5122: pcm5122@4c {
- compatible = "ti,pcm5122";
- reg = <0x4c>;
-
- AVDD-supply = <&reg_3v3_analog>;
- DVDD-supply = <&reg_1v8>;
- CPVDD-supply = <&reg_3v3>;
- };
-
-
- pcm5142: pcm5142@4c {
- compatible = "ti,pcm5142";
- reg = <0x4c>;
-
- AVDD-supply = <&reg_3v3_analog>;
- DVDD-supply = <&reg_1v8>;
- CPVDD-supply = <&reg_3v3>;
-
- clocks = <&sck>;
- pll-in = <3>;
- pll-out = <6>;
- };
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc-sndcard.yaml b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc-sndcard.yaml
new file mode 100644
index 000000000000..6ad451549036
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc-sndcard.yaml
@@ -0,0 +1,205 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/qcom,apq8016-sbc-sndcard.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm APQ8016 and similar sound cards
+
+maintainers:
+ - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ - Stephan Gerhold <stephan@gerhold.net>
+
+properties:
+ compatible:
+ enum:
+ - qcom,apq8016-sbc-sndcard
+ - qcom,msm8916-qdsp6-sndcard
+
+ reg:
+ items:
+ - description: Microphone I/O mux register address
+ - description: Speaker I/O mux register address
+
+ reg-names:
+ items:
+ - const: mic-iomux
+ - const: spkr-iomux
+
+ audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description:
+ A list of the connections between audio components. Each entry is a
+ pair of strings, the first being the connection's sink, the second
+ being the connection's source. Valid names could be power supplies,
+ MicBias of codec and the jacks on the board.
+
+ aux-devs:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ List of phandles pointing to auxiliary devices, such
+ as amplifiers, to be added to the sound card.
+
+ model:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: User visible long sound card name
+
+ pin-switches:
+ description: List of widget names for which pin switches should be created.
+ $ref: /schemas/types.yaml#/definitions/string-array
+
+ widgets:
+ description: User specified audio sound widgets.
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+
+patternProperties:
+ ".*-dai-link$":
+ description:
+ Each subnode represents a dai link. Subnodes of each dai links would be
+ cpu/codec dais.
+
+ type: object
+
+ properties:
+ link-name:
+ description: Indicates dai-link name and PCM stream name.
+ $ref: /schemas/types.yaml#/definitions/string
+ maxItems: 1
+
+ cpu:
+ description: Holds subnode which indicates cpu dai.
+ type: object
+ additionalProperties: false
+
+ properties:
+ sound-dai:
+ maxItems: 1
+
+ platform:
+ description: Holds subnode which indicates platform dai.
+ type: object
+ additionalProperties: false
+
+ properties:
+ sound-dai:
+ maxItems: 1
+
+ codec:
+ description: Holds subnode which indicates codec dai.
+ type: object
+ additionalProperties: false
+
+ properties:
+ sound-dai:
+ minItems: 1
+ maxItems: 8
+
+ required:
+ - link-name
+ - cpu
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - model
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/sound/qcom,lpass.h>
+ sound@7702000 {
+ compatible = "qcom,apq8016-sbc-sndcard";
+ reg = <0x07702000 0x4>, <0x07702004 0x4>;
+ reg-names = "mic-iomux", "spkr-iomux";
+
+ model = "DB410c";
+ audio-routing =
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS External1";
+
+ pinctrl-0 = <&cdc_pdm_lines_act &ext_sec_tlmm_lines_act &ext_mclk_tlmm_lines_act>;
+ pinctrl-1 = <&cdc_pdm_lines_sus &ext_sec_tlmm_lines_sus &ext_mclk_tlmm_lines_sus>;
+ pinctrl-names = "default", "sleep";
+
+ quaternary-dai-link {
+ link-name = "ADV7533";
+ cpu {
+ sound-dai = <&lpass MI2S_QUATERNARY>;
+ };
+ codec {
+ sound-dai = <&adv_bridge 0>;
+ };
+ };
+
+ primary-dai-link {
+ link-name = "WCD";
+ cpu {
+ sound-dai = <&lpass MI2S_PRIMARY>;
+ };
+ codec {
+ sound-dai = <&lpass_codec 0>, <&wcd_codec 0>;
+ };
+ };
+
+ tertiary-dai-link {
+ link-name = "WCD-Capture";
+ cpu {
+ sound-dai = <&lpass MI2S_TERTIARY>;
+ };
+ codec {
+ sound-dai = <&lpass_codec 1>, <&wcd_codec 1>;
+ };
+ };
+ };
+
+ - |
+ #include <dt-bindings/sound/qcom,q6afe.h>
+ #include <dt-bindings/sound/qcom,q6asm.h>
+ sound@7702000 {
+ compatible = "qcom,msm8916-qdsp6-sndcard";
+ reg = <0x07702000 0x4>, <0x07702004 0x4>;
+ reg-names = "mic-iomux", "spkr-iomux";
+
+ model = "msm8916";
+ widgets =
+ "Speaker", "Speaker",
+ "Headphone", "Headphones";
+ pin-switches = "Speaker";
+ audio-routing =
+ "Speaker", "Speaker Amp OUT",
+ "Speaker Amp IN", "HPH_R",
+ "Headphones", "HPH_L",
+ "Headphones", "HPH_R",
+ "AMIC1", "MIC BIAS Internal1",
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS Internal3";
+ aux-devs = <&speaker_amp>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&cdc_pdm_lines_act>;
+ pinctrl-1 = <&cdc_pdm_lines_sus>;
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ primary-dai-link {
+ link-name = "Primary MI2S";
+ cpu {
+ sound-dai = <&q6afedai PRIMARY_MI2S_RX>;
+ };
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ codec {
+ sound-dai = <&lpass_codec 0>, <&wcd_codec 0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml b/Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml
index 06b5f7be3608..6f5644a89feb 100644
--- a/Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml
@@ -64,6 +64,7 @@ allOf:
compatible:
enum:
- qcom,sc7280-lpass-wsa-macro
+ - qcom,sm8250-lpass-wsa-macro
- qcom,sm8450-lpass-wsa-macro
- qcom,sc8280xp-lpass-wsa-macro
then:
@@ -82,24 +83,6 @@ allOf:
- if:
properties:
compatible:
- enum:
- - qcom,sm8250-lpass-wsa-macro
- then:
- properties:
- clocks:
- minItems: 6
- clock-names:
- items:
- - const: mclk
- - const: npl
- - const: macro
- - const: dcodec
- - const: va
- - const: fsgen
-
- - if:
- properties:
- compatible:
contains:
enum:
- qcom,sm8550-lpass-wsa-macro
@@ -130,8 +113,7 @@ examples:
<&audiocc 0>,
<&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
<&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
- <&aoncc LPASS_CDC_VA_MCLK>,
<&vamacro>;
- clock-names = "mclk", "npl", "macro", "dcodec", "va", "fsgen";
+ clock-names = "mclk", "npl", "macro", "dcodec", "fsgen";
clock-output-names = "mclk";
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
index c9076dcd44c1..1d3acdc0c733 100644
--- a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
@@ -27,9 +27,7 @@ properties:
- qcom,sm8650-sndcard
- const: qcom,sm8450-sndcard
- enum:
- - qcom,apq8016-sbc-sndcard
- qcom,apq8096-sndcard
- - qcom,msm8916-qdsp6-sndcard
- qcom,qcm6490-idp-sndcard
- qcom,qcs6490-rb3gen2-sndcard
- qcom,qrb5165-rb5-sndcard
@@ -58,18 +56,6 @@ properties:
$ref: /schemas/types.yaml#/definitions/string
description: User visible long sound card name
- pin-switches:
- description: List of widget names for which pin switches should be created.
- $ref: /schemas/types.yaml#/definitions/string-array
-
- widgets:
- description: User specified audio sound widgets.
- $ref: /schemas/types.yaml#/definitions/non-unique-string-array
-
- # Only valid for some compatibles (see allOf if below)
- reg: true
- reg-names: true
-
patternProperties:
".*-dai-link$":
description:
@@ -122,34 +108,6 @@ required:
- compatible
- model
-allOf:
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,apq8016-sbc-sndcard
- - qcom,msm8916-qdsp6-sndcard
- then:
- properties:
- reg:
- items:
- - description: Microphone I/O mux register address
- - description: Speaker I/O mux register address
- reg-names:
- items:
- - const: mic-iomux
- - const: spkr-iomux
- required:
- - compatible
- - model
- - reg
- - reg-names
- else:
- properties:
- reg: false
- reg-names: false
-
additionalProperties: false
examples:
@@ -231,98 +189,3 @@ examples:
};
};
};
-
- - |
- #include <dt-bindings/sound/qcom,lpass.h>
- sound@7702000 {
- compatible = "qcom,apq8016-sbc-sndcard";
- reg = <0x07702000 0x4>, <0x07702004 0x4>;
- reg-names = "mic-iomux", "spkr-iomux";
-
- model = "DB410c";
- audio-routing =
- "AMIC2", "MIC BIAS Internal2",
- "AMIC3", "MIC BIAS External1";
-
- pinctrl-0 = <&cdc_pdm_lines_act &ext_sec_tlmm_lines_act &ext_mclk_tlmm_lines_act>;
- pinctrl-1 = <&cdc_pdm_lines_sus &ext_sec_tlmm_lines_sus &ext_mclk_tlmm_lines_sus>;
- pinctrl-names = "default", "sleep";
-
- quaternary-dai-link {
- link-name = "ADV7533";
- cpu {
- sound-dai = <&lpass MI2S_QUATERNARY>;
- };
- codec {
- sound-dai = <&adv_bridge 0>;
- };
- };
-
- primary-dai-link {
- link-name = "WCD";
- cpu {
- sound-dai = <&lpass MI2S_PRIMARY>;
- };
- codec {
- sound-dai = <&lpass_codec 0>, <&wcd_codec 0>;
- };
- };
-
- tertiary-dai-link {
- link-name = "WCD-Capture";
- cpu {
- sound-dai = <&lpass MI2S_TERTIARY>;
- };
- codec {
- sound-dai = <&lpass_codec 1>, <&wcd_codec 1>;
- };
- };
- };
-
- - |
- #include <dt-bindings/sound/qcom,q6afe.h>
- #include <dt-bindings/sound/qcom,q6asm.h>
- sound@7702000 {
- compatible = "qcom,msm8916-qdsp6-sndcard";
- reg = <0x07702000 0x4>, <0x07702004 0x4>;
- reg-names = "mic-iomux", "spkr-iomux";
-
- model = "msm8916";
- widgets =
- "Speaker", "Speaker",
- "Headphone", "Headphones";
- pin-switches = "Speaker";
- audio-routing =
- "Speaker", "Speaker Amp OUT",
- "Speaker Amp IN", "HPH_R",
- "Headphones", "HPH_L",
- "Headphones", "HPH_R",
- "AMIC1", "MIC BIAS Internal1",
- "AMIC2", "MIC BIAS Internal2",
- "AMIC3", "MIC BIAS Internal3";
- aux-devs = <&speaker_amp>;
-
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&cdc_pdm_lines_act>;
- pinctrl-1 = <&cdc_pdm_lines_sus>;
-
- mm1-dai-link {
- link-name = "MultiMedia1";
- cpu {
- sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
- };
- };
-
- primary-dai-link {
- link-name = "Primary MI2S";
- cpu {
- sound-dai = <&q6afedai PRIMARY_MI2S_RX>;
- };
- platform {
- sound-dai = <&q6routing>;
- };
- codec {
- sound-dai = <&lpass_codec 0>, <&wcd_codec 0>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/sound/realtek,rt5616.yaml b/Documentation/devicetree/bindings/sound/realtek,rt5616.yaml
index 248320804e5f..29071044c66e 100644
--- a/Documentation/devicetree/bindings/sound/realtek,rt5616.yaml
+++ b/Documentation/devicetree/bindings/sound/realtek,rt5616.yaml
@@ -30,6 +30,18 @@ properties:
reg:
maxItems: 1
+ clocks:
+ items:
+ - description: Master clock to the CODEC
+
+ clock-names:
+ items:
+ - const: mclk
+
+ port:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
index 07ec6247d9de..3bc93c59535e 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
@@ -112,6 +112,12 @@ properties:
description: List of necessary clock names.
# details are defined below
+ post-init-providers:
+ description: At least if rsnd is using DPCM connection on Audio-Graph-Card2,
+ fw_devlink might doesn't have enough information to break the cycle. rsnd
+ driver will not be probed in such case. Same problem might occur with
+ Multi-CPU/Codec or Codec2Codec.
+
# ports is below
port:
$ref: audio-graph-port.yaml#/definitions/port-base
diff --git a/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml b/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml
index 8b9695f5decc..f4610eaed1e1 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml
+++ b/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml
@@ -87,6 +87,10 @@ properties:
'#sound-dai-cells':
const: 0
+ port:
+ $ref: audio-graph-port.yaml#/definitions/port-base
+ description: Connection to controller providing I2S signals
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/sound/samsung,odroid.yaml b/Documentation/devicetree/bindings/sound/samsung,odroid.yaml
index b77284e3e26a..c3dea852cc8d 100644
--- a/Documentation/devicetree/bindings/sound/samsung,odroid.yaml
+++ b/Documentation/devicetree/bindings/sound/samsung,odroid.yaml
@@ -27,11 +27,6 @@ properties:
- const: samsung,odroid-xu4-audio
deprecated: true
- assigned-clock-parents: true
- assigned-clock-rates: true
- assigned-clocks: true
- clocks: true
-
cpu:
type: object
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/sound/serial-midi.yaml b/Documentation/devicetree/bindings/sound/serial-midi.yaml
index f6a807329a5a..3b2f6dd5bffa 100644
--- a/Documentation/devicetree/bindings/sound/serial-midi.yaml
+++ b/Documentation/devicetree/bindings/sound/serial-midi.yaml
@@ -22,6 +22,9 @@ description:
configure the clocks of the parent serial device so that a requested baud of 38.4 kBaud
results in the standard MIDI baud rate, and set the 'current-speed' property to 38400 (default)
+allOf:
+ - $ref: /schemas/serial/serial-peripheral-props.yaml#
+
properties:
compatible:
const: serial-midi
diff --git a/Documentation/devicetree/bindings/sound/st,sta350.txt b/Documentation/devicetree/bindings/sound/st,sta350.txt
index 307398ef2317..e3d84864e0e4 100644
--- a/Documentation/devicetree/bindings/sound/st,sta350.txt
+++ b/Documentation/devicetree/bindings/sound/st,sta350.txt
@@ -77,7 +77,7 @@ Optional properties:
- st,odd-pwm-speed-mode:
If present, PWM speed mode run on odd speed mode (341.3 kHz) on all
- channels. If not present, normal PWM spped mode (384 kHz) will be used.
+ channels. If not present, normal PWM speed mode (384 kHz) will be used.
- st,distortion-compensation:
If present, distortion compensation variable uses DCC coefficient.
diff --git a/Documentation/devicetree/bindings/sound/ti,pcm512x.yaml b/Documentation/devicetree/bindings/sound/ti,pcm512x.yaml
new file mode 100644
index 000000000000..21ea9ff5a2bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ti,pcm512x.yaml
@@ -0,0 +1,101 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/ti,pcm512x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: PCM512x and TAS575x audio CODECs/amplifiers
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ti,pcm5121
+ - ti,pcm5122
+ - ti,pcm5141
+ - ti,pcm5142
+ - ti,pcm5242
+ - ti,tas5754
+ - ti,tas5756
+
+ reg:
+ maxItems: 1
+
+ AVDD-supply: true
+
+ DVDD-supply: true
+
+ CPVDD-supply: true
+
+ clocks:
+ maxItems: 1
+ description: A clock specifier for the clock connected as SCLK. If this is
+ absent the device will be configured to clock from BCLK. If pll-in and
+ pll-out are specified in addition to a clock, the device is configured to
+ accept clock input on a specified gpio pin.
+
+ '#sound-dai-cells':
+ const: 0
+
+ pll-in:
+ description: GPIO pin used to connect the pll using <1> through <6>. The
+ device will be configured for clock input on the given pll-in pin.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 6
+
+ pll-out:
+ description: GPIO pin used to connect the pll using <1> through <6>. The
+ device will be configured for PLL output on the given pll-out pin. An
+ external connection from the pll-out pin to the SCLK pin is assumed.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 6
+
+required:
+ - compatible
+ - reg
+ - AVDD-supply
+ - DVDD-supply
+ - CPVDD-supply
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ti,tas5754
+ - ti,tas5756
+
+then:
+ properties:
+ pll-in:
+ maximum: 3
+
+ pll-out:
+ maximum: 3
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ codec@4c {
+ compatible = "ti,pcm5142";
+ reg = <0x4c>;
+ AVDD-supply = <&reg_3v3_analog>;
+ DVDD-supply = <&reg_1v8>;
+ CPVDD-supply = <&reg_3v3>;
+ #sound-dai-cells = <0>;
+ clocks = <&sck>;
+ pll-in = <3>;
+ pll-out = <6>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/ti,tlv320dac3100.yaml b/Documentation/devicetree/bindings/sound/ti,tlv320dac3100.yaml
new file mode 100644
index 000000000000..85e937e34962
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ti,tlv320dac3100.yaml
@@ -0,0 +1,127 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/ti,tlv320dac3100.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments - tlv320aic31xx Codec module
+
+maintainers:
+ - Shenghao Ding <shenghao-ding@ti.com>
+
+description: |
+ CODEC output pins:
+ * HPL
+ * HPR
+ * SPL, devices with stereo speaker amp
+ * SPR, devices with stereo speaker amp
+ * SPK, devices with mono speaker amp
+ * MICBIAS
+
+ CODEC input pins:
+ * MIC1LP, devices with ADC
+ * MIC1RP, devices with ADC
+ * MIC1LM, devices with ADC
+ * AIN1, devices without ADC
+ * AIN2, devices without ADC
+
+ The pins can be used in referring sound node's audio-routing property.
+
+properties:
+ compatible:
+ enum:
+ - ti,tlv320aic310x # - Generic TLV320AIC31xx with mono speaker amp
+ - ti,tlv320aic311x # - Generic TLV320AIC31xx with stereo speaker amp
+ - ti,tlv320aic3100 # - TLV320AIC3100 (mono speaker amp, no MiniDSP)
+ - ti,tlv320aic3110 # - TLV320AIC3110 (stereo speaker amp, no MiniDSP)
+ - ti,tlv320aic3120 # - TLV320AIC3120 (mono speaker amp, MiniDSP)
+ - ti,tlv320aic3111 # - TLV320AIC3111 (stereo speaker amp, MiniDSP)
+ - ti,tlv320dac3100 # - TLV320DAC3100 (no ADC, mono speaker amp, no MiniDSP)
+ - ti,tlv320dac3101 # - TLV320DAC3101 (no ADC, stereo speaker amp, no MiniDSP)
+
+ reg:
+ maxItems: 1
+
+ '#sound-dai-cells':
+ const: 0
+
+ HPVDD-supply: true
+
+ SPRVDD-supply: true
+
+ SPLVDD-supply: true
+
+ AVDD-supply: true
+
+ IOVDD-supply: true
+
+ DVDD-supply: true
+
+ reset-gpios:
+ description: GPIO specification for the active low RESET input.
+
+ ai31xx-micbias-vg:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 1
+ enum: [1, 2, 3]
+ description: |
+ MicBias Voltage setting
+ 1 or MICBIAS_2_0V - MICBIAS output is powered to 2.0V
+ 2 or MICBIAS_2_5V - MICBIAS output is powered to 2.5V
+ 3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD
+
+ ai31xx-ocmv:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+ description: |
+ output common-mode voltage setting
+ 0 - 1.35V,
+ 1 - 1.5V,
+ 2 - 1.65V,
+ 3 - 1.8V
+
+ gpio-reset:
+ description: gpio pin number used for codec reset
+ deprecated: true
+
+
+required:
+ - compatible
+ - reg
+ - HPVDD-supply
+ - SPRVDD-supply
+ - SPLVDD-supply
+ - AVDD-supply
+ - IOVDD-supply
+ - DVDD-supply
+
+allOf:
+ - $ref: dai-common.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/sound/tlv320aic31xx.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sound@18 {
+ compatible = "ti,tlv320aic311x";
+ reg = <0x18>;
+
+ ai31xx-micbias-vg = <MICBIAS_2_0V>;
+ reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+
+ HPVDD-supply = <&regulator>;
+ SPRVDD-supply = <&regulator>;
+ SPLVDD-supply = <&regulator>;
+ AVDD-supply = <&regulator>;
+ IOVDD-supply = <&regulator>;
+ DVDD-supply = <&regulator>;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/ti,tpa6130a2.yaml b/Documentation/devicetree/bindings/sound/ti,tpa6130a2.yaml
new file mode 100644
index 000000000000..a42bf9bde694
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ti,tpa6130a2.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/ti,tpa6130a2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments - tpa6130a2 Codec module
+
+maintainers:
+ - Sebastian Reichel <sre@kernel.org>
+
+description:
+ Stereo, analog input headphone amplifier
+
+properties:
+ compatible:
+ enum:
+ - ti,tpa6130a2
+ - ti,tpa6140a2
+
+ reg:
+ maxItems: 1
+
+ Vdd-supply:
+ description: power supply regulator
+
+ power-gpio:
+ description: gpio pin to power the device
+
+required:
+ - compatible
+ - reg
+ - Vdd-supply
+
+allOf:
+ - $ref: dai-common.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ amplifier@60 {
+ compatible = "ti,tpa6130a2";
+ reg = <0x60>;
+ Vdd-supply = <&vmmc2>;
+ power-gpio = <&gpio4 2 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
deleted file mode 100644
index bbad98d5b986..000000000000
--- a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-Texas Instruments - tlv320aic31xx Codec module
-
-The tlv320aic31xx serial control bus communicates through I2C protocols
-
-Required properties:
-
-- compatible - "string" - One of:
- "ti,tlv320aic310x" - Generic TLV320AIC31xx with mono speaker amp
- "ti,tlv320aic311x" - Generic TLV320AIC31xx with stereo speaker amp
- "ti,tlv320aic3100" - TLV320AIC3100 (mono speaker amp, no MiniDSP)
- "ti,tlv320aic3110" - TLV320AIC3110 (stereo speaker amp, no MiniDSP)
- "ti,tlv320aic3120" - TLV320AIC3120 (mono speaker amp, MiniDSP)
- "ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP)
- "ti,tlv320dac3100" - TLV320DAC3100 (no ADC, mono speaker amp, no MiniDSP)
- "ti,tlv320dac3101" - TLV320DAC3101 (no ADC, stereo speaker amp, no MiniDSP)
-
-- reg - <int> - I2C slave address
-- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
- DVDD-supply : power supplies for the device as covered in
- Documentation/devicetree/bindings/regulator/regulator.txt
-
-
-Optional properties:
-
-- reset-gpios - GPIO specification for the active low RESET input.
-- ai31xx-micbias-vg - MicBias Voltage setting
- 1 or MICBIAS_2_0V - MICBIAS output is powered to 2.0V
- 2 or MICBIAS_2_5V - MICBIAS output is powered to 2.5V
- 3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD
- If this node is not mentioned or if the value is unknown, then
- micbias is set to 2.0V.
-- ai31xx-ocmv - output common-mode voltage setting
- 0 - 1.35V,
- 1 - 1.5V,
- 2 - 1.65V,
- 3 - 1.8V
-
-Deprecated properties:
-
-- gpio-reset - gpio pin number used for codec reset
-
-CODEC output pins:
- * HPL
- * HPR
- * SPL, devices with stereo speaker amp
- * SPR, devices with stereo speaker amp
- * SPK, devices with mono speaker amp
- * MICBIAS
-
-CODEC input pins:
- * MIC1LP, devices with ADC
- * MIC1RP, devices with ADC
- * MIC1LM, devices with ADC
- * AIN1, devices without ADC
- * AIN2, devices without ADC
-
-The pins can be used in referring sound node's audio-routing property.
-
-Example:
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/sound/tlv320aic31xx.h>
-
-tlv320aic31xx: tlv320aic31xx@18 {
- compatible = "ti,tlv320aic311x";
- reg = <0x18>;
-
- ai31xx-micbias-vg = <MICBIAS_OFF>;
-
- reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
-
- HPVDD-supply = <&regulator>;
- SPRVDD-supply = <&regulator>;
- SPLVDD-supply = <&regulator>;
- AVDD-supply = <&regulator>;
- IOVDD-supply = <&regulator>;
- DVDD-supply = <&regulator>;
-};
diff --git a/Documentation/devicetree/bindings/sound/tpa6130a2.txt b/Documentation/devicetree/bindings/sound/tpa6130a2.txt
deleted file mode 100644
index 6dfa740e4b2d..000000000000
--- a/Documentation/devicetree/bindings/sound/tpa6130a2.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Texas Instruments - tpa6130a2 Codec module
-
-The tpa6130a2 serial control bus communicates through I2C protocols
-
-Required properties:
-
-- compatible - "string" - One of:
- "ti,tpa6130a2" - TPA6130A2
- "ti,tpa6140a2" - TPA6140A2
-
-
-- reg - <int> - I2C slave address
-
-- Vdd-supply - <phandle> - power supply regulator
-
-Optional properties:
-
-- power-gpio - gpio pin to power the device
-
-Example:
-
-tpa6130a2: tpa6130a2@60 {
- compatible = "ti,tpa6130a2";
- reg = <0x60>;
- Vdd-supply = <&vmmc2>;
- power-gpio = <&gpio4 2 GPIO_ACTIVE_HIGH>;
-};
diff --git a/Documentation/devicetree/bindings/spi/cdns,xspi.yaml b/Documentation/devicetree/bindings/spi/cdns,xspi.yaml
index eb0f92468185..38a5795589de 100644
--- a/Documentation/devicetree/bindings/spi/cdns,xspi.yaml
+++ b/Documentation/devicetree/bindings/spi/cdns,xspi.yaml
@@ -15,24 +15,27 @@ description: |
single, dual, quad or octal wire transmission modes for
read/write access to slaves such as SPI-NOR flash.
-allOf:
- - $ref: spi-controller.yaml#
-
properties:
compatible:
- const: cdns,xspi-nor
+ enum:
+ - cdns,xspi-nor
+ - marvell,cn10-xspi-nor
reg:
items:
- description: address and length of the controller register set
- description: address and length of the Slave DMA data port
- description: address and length of the auxiliary registers
+ - description: address and length of the xfer registers
+ minItems: 3
reg-names:
items:
- const: io
- const: sdma
- const: aux
+ - const: xfer
+ minItems: 3
interrupts:
maxItems: 1
@@ -42,6 +45,27 @@ required:
- reg
- interrupts
+allOf:
+ - $ref: spi-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - marvell,cn10-xspi-nor
+ then:
+ properties:
+ reg:
+ minItems: 4
+ reg-names:
+ minItems: 4
+ else:
+ properties:
+ reg:
+ maxItems: 3
+ reg-names:
+ maxItems: 3
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml b/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml
index b6249880c3f9..e1f5bfa4433c 100644
--- a/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml
+++ b/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml
@@ -33,6 +33,7 @@ properties:
- const: mediatek,mt6765-spi
- items:
- enum:
+ - mediatek,mt7981-spi-ipm
- mediatek,mt7986-spi-ipm
- mediatek,mt8188-spi-ipm
- const: mediatek,spi-ipm
diff --git a/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml b/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml
index ffa8d1b48f8b..62a568bdbfa0 100644
--- a/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml
@@ -17,9 +17,14 @@ properties:
compatible:
oneOf:
- items:
- - const: microchip,mpfs-qspi
+ - enum:
+ - microchip,mpfs-qspi
+ - microchip,pic64gx-qspi
- const: microchip,coreqspi-rtl-v2
- const: microchip,coreqspi-rtl-v2 # FPGA QSPI
+ - items:
+ - const: microchip,pic64gx-spi
+ - const: microchip,mpfs-spi
- const: microchip,mpfs-spi
reg:
diff --git a/Documentation/devicetree/bindings/spi/nxp,sc18is.yaml b/Documentation/devicetree/bindings/spi/nxp,sc18is.yaml
new file mode 100644
index 000000000000..43753a94837c
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/nxp,sc18is.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/nxp,sc18is.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP SC18IS602/SC18IS603 I2C to SPI bridge
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - nxp,sc18is602
+ - nxp,sc18is602b
+ - nxp,sc18is603
+
+ reg:
+ maxItems: 1
+
+ clock-frequency:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 7372000
+ description:
+ external oscillator clock frequency. The clock-frequency property is
+ relevant and needed only if the chip has an external oscillator
+ (SC18IS603).
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ spi@28 {
+ compatible = "nxp,sc18is603";
+ reg = <0x28>;
+ clock-frequency = <14744000>;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml
index 4a5f41bde00f..902db92da832 100644
--- a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml
@@ -21,6 +21,7 @@ properties:
- nxp,imx8mm-fspi
- nxp,imx8mp-fspi
- nxp,imx8qxp-fspi
+ - nxp,imx8ulp-fspi
- nxp,lx2160a-fspi
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
index e4941e9212d1..46d9d6ee0923 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
@@ -35,6 +35,7 @@ properties:
- rockchip,rk3368-spi
- rockchip,rk3399-spi
- rockchip,rk3568-spi
+ - rockchip,rk3576-spi
- rockchip,rk3588-spi
- rockchip,rv1126-spi
- const: rockchip,rk3066-spi
diff --git a/Documentation/devicetree/bindings/spi/spi-sc18is602.txt b/Documentation/devicetree/bindings/spi/spi-sc18is602.txt
deleted file mode 100644
index 02f9033270a2..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-sc18is602.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-NXP SC18IS602/SCIS603
-
-Required properties:
- - compatible : Should be one of
- "nxp,sc18is602"
- "nxp,sc18is602b"
- "nxp,sc18is603"
- - reg: I2C bus address
-
-Optional properties:
- - clock-frequency : external oscillator clock frequency. If not
- specified, the SC18IS602 default frequency (7372000) will be used.
-
-The clock-frequency property is relevant and needed only if the chip has an
-external oscillator (SC18IS603).
-
-Example:
-
- sc18is603@28 {
- compatible = "nxp,sc18is603";
- reg = <0x28>;
- clock-frequency = <14744000>;
- }
diff --git a/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml b/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
index 725303e1a364..70b273271754 100644
--- a/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
@@ -32,6 +32,9 @@ properties:
clocks:
maxItems: 1
+ power-domains:
+ maxItems: 1
+
amlogic,ao-secure:
description: phandle to the ao-secure syscon
$ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
index 72048c5a0412..a12fddc81955 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
@@ -51,6 +51,7 @@ properties:
- qcom,msm8996-tsens
- qcom,msm8998-tsens
- qcom,qcm2290-tsens
+ - qcom,sa8255p-tsens
- qcom,sa8775p-tsens
- qcom,sc7180-tsens
- qcom,sc7280-tsens
@@ -310,7 +311,7 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
- // Example 1 (new calbiration data: for pre v1 IP):
+ // Example 1 (new calibration data: for pre v1 IP):
thermal-sensor@4a9000 {
compatible = "qcom,msm8916-tsens", "qcom,tsens-v0_1";
reg = <0x4a9000 0x1000>, /* TM */
diff --git a/Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.txt b/Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.txt
deleted file mode 100644
index 844bd5fbd04c..000000000000
--- a/Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-BCM2835 System Timer
-
-The System Timer peripheral provides four 32-bit timer channels and a
-single 64-bit free running counter. Each channel has an output compare
-register, which is compared against the 32 least significant bits of the
-free running counter values, and generates an interrupt.
-
-Required properties:
-
-- compatible : should be "brcm,bcm2835-system-timer"
-- reg : Specifies base physical address and size of the registers.
-- interrupts : A list of 4 interrupt sinks; one per timer channel.
-- clock-frequency : The frequency of the clock that drives the counter, in Hz.
-
-Example:
-
-timer {
- compatible = "brcm,bcm2835-system-timer";
- reg = <0x7e003000 0x1000>;
- interrupts = <1 0>, <1 1>, <1 2>, <1 3>;
- clock-frequency = <1000000>;
-};
diff --git a/Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.yaml b/Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.yaml
new file mode 100644
index 000000000000..f5804b5b0e63
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/brcm,bcm2835-system-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BCM2835 System Timer
+
+maintainers:
+ - Stefan Wahren <wahrenst@gmx.net>
+ - Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com>
+
+description:
+ The System Timer peripheral provides four 32-bit timer channels and a
+ single 64-bit free running counter. Each channel has an output compare
+ register, which is compared against the 32 least significant bits of the
+ free running counter values, and generates an interrupt.
+
+properties:
+ compatible:
+ const: brcm,bcm2835-system-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: System Timer Compare 0 match (used by VideoCore GPU)
+ - description: System Timer Compare 1 match (usable for ARM core)
+ - description: System Timer Compare 2 match (used by VideoCore GPU)
+ - description: System Timer Compare 3 match (usable for ARM core)
+
+ clock-frequency: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@7e003000 {
+ compatible = "brcm,bcm2835-system-timer";
+ reg = <0x7e003000 0x1000>;
+ interrupts = <1 0>, <1 1>, <1 2>, <1 3>;
+ clock-frequency = <1000000>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/timer/fsl,ftm-timer.txt b/Documentation/devicetree/bindings/timer/fsl,ftm-timer.txt
deleted file mode 100644
index aa8c40230e5e..000000000000
--- a/Documentation/devicetree/bindings/timer/fsl,ftm-timer.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Freescale FlexTimer Module (FTM) Timer
-
-Required properties:
-
-- compatible : should be "fsl,ftm-timer"
-- reg : Specifies base physical address and size of the register sets for the
- clock event device and clock source device.
-- interrupts : Should be the clock event device interrupt.
-- clocks : The clocks provided by the SoC to drive the timer, must contain an
- entry for each entry in clock-names.
-- clock-names : Must include the following entries:
- o "ftm-evt"
- o "ftm-src"
- o "ftm-evt-counter-en"
- o "ftm-src-counter-en"
-- big-endian: One boolean property, the big endian mode will be in use if it is
- present, or the little endian mode will be in use for all the device registers.
-
-Example:
-ftm: ftm@400b8000 {
- compatible = "fsl,ftm-timer";
- reg = <0x400b8000 0x1000 0x400b9000 0x1000>;
- interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "ftm-evt", "ftm-src",
- "ftm-evt-counter-en", "ftm-src-counter-en";
- clocks = <&clks VF610_CLK_FTM2>,
- <&clks VF610_CLK_FTM3>,
- <&clks VF610_CLK_FTM2_EXT_FIX_EN>,
- <&clks VF610_CLK_FTM3_EXT_FIX_EN>;
- big-endian;
-};
diff --git a/Documentation/devicetree/bindings/timer/fsl,ftm-timer.yaml b/Documentation/devicetree/bindings/timer/fsl,ftm-timer.yaml
new file mode 100644
index 000000000000..0e4a8ddc3de3
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/fsl,ftm-timer.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/fsl,ftm-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale FlexTimer Module (FTM) Timer
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+properties:
+ compatible:
+ const: fsl,ftm-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ description: The clocks provided by the SoC to drive the timer, must
+ contain an entry for each entry in clock-names.
+ minItems: 4
+ maxItems: 4
+
+ clock-names:
+ items:
+ - const: ftm-evt
+ - const: ftm-src
+ - const: ftm-evt-counter-en
+ - const: ftm-src-counter-en
+
+ big-endian: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/vf610-clock.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ ftm@400b8000 {
+ compatible = "fsl,ftm-timer";
+ reg = <0x400b8000 0x1000>;
+ interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "ftm-evt", "ftm-src", "ftm-evt-counter-en", "ftm-src-counter-en";
+ clocks = <&clks VF610_CLK_FTM2>, <&clks VF610_CLK_FTM3>,
+ <&clks VF610_CLK_FTM2_EXT_FIX_EN>, <&clks VF610_CLK_FTM3_EXT_FIX_EN>;
+ big-endian;
+ };
diff --git a/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.txt b/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.txt
deleted file mode 100644
index 51b05a0e70d1..000000000000
--- a/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-* NXP LPC3220 timer
-
-The NXP LPC3220 timer is used on a wide range of NXP SoCs. This
-includes LPC32xx, LPC178x, LPC18xx and LPC43xx parts.
-
-Required properties:
-- compatible:
- Should be "nxp,lpc3220-timer".
-- reg:
- Address and length of the register set.
-- interrupts:
- Reference to the timer interrupt
-- clocks:
- Should contain a reference to timer clock.
-- clock-names:
- Should contain "timerclk".
-
-Example:
-
-timer1: timer@40085000 {
- compatible = "nxp,lpc3220-timer";
- reg = <0x40085000 0x1000>;
- interrupts = <13>;
- clocks = <&ccu1 CLK_CPU_TIMER1>;
- clock-names = "timerclk";
-};
diff --git a/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.yaml b/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.yaml
new file mode 100644
index 000000000000..3ae2eb0625da
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/nxp,lpc3220-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC3220 timer
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+description: |
+ The NXP LPC3220 timer is used on a wide range of NXP SoCs. This includes
+ LPC32xx, LPC178x, LPC18xx and LPC43xx parts.
+
+properties:
+ compatible:
+ const: nxp,lpc3220-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: timerclk
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/lpc32xx-clock.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ timer@4004c000 {
+ compatible = "nxp,lpc3220-timer";
+ reg = <0x4004c000 0x1000>;
+ interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&clk LPC32XX_CLK_TIMER1>;
+ clock-names = "timerclk";
+ };
diff --git a/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml
index 19e56b7577a0..6d0eb0014eee 100644
--- a/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml
@@ -24,6 +24,7 @@ properties:
- rockchip,rk3228-timer
- rockchip,rk3229-timer
- rockchip,rk3368-timer
+ - rockchip,rk3576-timer
- rockchip,rk3588-timer
- rockchip,px30-timer
- const: rockchip,rk3288-timer
diff --git a/Documentation/devicetree/bindings/timer/ti,da830-timer.yaml b/Documentation/devicetree/bindings/timer/ti,da830-timer.yaml
new file mode 100644
index 000000000000..e9646f4e86cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/ti,da830-timer.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/ti,da830-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI DaVinci Timer
+
+maintainers:
+ - Kousik Sanagavarapu <five231003@gmail.com>
+
+description: |
+ This is a 64-bit timer found on TI's DaVinci architecture devices. The timer
+ can be configured as a general-purpose 64-bit timer, dual general-purpose
+ 32-bit timers. When configured as dual 32-bit timers, each half can operate
+ in conjunction (chain mode) or independently (unchained mode) of each other.
+
+ The timer is a free running up-counter and can generate interrupts when the
+ counter reaches preset counter values.
+
+properties:
+ compatible:
+ const: ti,da830-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 2
+ maxItems: 10
+
+ interrupt-names:
+ minItems: 2
+ items:
+ - const: tint12
+ - const: tint34
+ - const: cmpint0
+ - const: cmpint1
+ - const: cmpint2
+ - const: cmpint3
+ - const: cmpint4
+ - const: cmpint5
+ - const: cmpint6
+ - const: cmpint7
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ timer@20000 {
+ compatible = "ti,da830-timer";
+ reg = <0x20000 0x1000>;
+ interrupts = <21>, <22>;
+ interrupt-names = "tint12", "tint34";
+ clocks = <&pll0_auxclk>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/timer/ti,davinci-timer.txt b/Documentation/devicetree/bindings/timer/ti,davinci-timer.txt
deleted file mode 100644
index 29bf91ccf5b7..000000000000
--- a/Documentation/devicetree/bindings/timer/ti,davinci-timer.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-* Device tree bindings for Texas Instruments DaVinci timer
-
-This document provides bindings for the 64-bit timer in the DaVinci
-architecture devices. The timer can be configured as a general-purpose 64-bit
-timer, dual general-purpose 32-bit timers. When configured as dual 32-bit
-timers, each half can operate in conjunction (chain mode) or independently
-(unchained mode) of each other.
-
-The timer is a free running up-counter and can generate interrupts when the
-counter reaches preset counter values.
-
-Also see ../watchdog/davinci-wdt.txt for timers that are configurable as
-watchdog timers.
-
-Required properties:
-
-- compatible : should be "ti,da830-timer".
-- reg : specifies base physical address and count of the registers.
-- interrupts : interrupts generated by the timer.
-- interrupt-names: should be "tint12", "tint34", "cmpint0", "cmpint1",
- "cmpint2", "cmpint3", "cmpint4", "cmpint5", "cmpint6",
- "cmpint7" ("cmpintX" may be omitted if not present in the
- hardware).
-- clocks : the clock feeding the timer clock.
-
-Example:
-
- clocksource: timer@20000 {
- compatible = "ti,da830-timer";
- reg = <0x20000 0x1000>;
- interrupts = <21>, <22>, <74>, <75>, <76>, <77>, <78>, <79>,
- <80>, <81>;
- interrupt-names = "tint12", "tint34", "cmpint0", "cmpint1",
- "cmpint2", "cmpint3", "cmpint4", "cmpint5",
- "cmpint6", "cmpint7";
- clocks = <&pll0_auxclk>;
- };
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 7913ca9b6b54..0108d7507215 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -33,15 +33,12 @@ properties:
# Acbel fsg032 power supply
- acbel,fsg032
# SMBus/I2C Digital Temperature Sensor in 6-Pin SOT with SMBus Alert and Over Temperature Pin
- - ad,ad7414
+ - ad,ad7414 # Deprecated, use adi,ad7414
+ - adi,ad7414
# ADM9240: Complete System Hardware Monitor for uProcessor-Based Systems
- ad,adm9240
# AD5110 - Nonvolatile Digital Potentiometer
- adi,ad5110
- # Analog Devices ADP5585 Keypad Decoder and I/O Expansion
- - adi,adp5585
- # Analog Devices ADP5585 Keypad Decoder and I/O Expansion with support for Row5
- - adi,adp5585-02
# Analog Devices ADP5589 Keypad Decoder and I/O Expansion
- adi,adp5589
# Analog Devices LT7182S Dual Channel 6A, 20V PolyPhase Step-Down Silent Switcher
@@ -50,8 +47,6 @@ properties:
- ams,iaq-core
# Temperature monitoring of Astera Labs PT5161L PCIe retimer
- asteralabs,pt5161l
- # i2c serial eeprom (24cxx)
- - at,24c08
# i2c h/w elliptic curve crypto module
- atmel,atecc508a
# ATSHA204 - i2c h/w symmetric crypto module
@@ -74,14 +69,10 @@ properties:
- dallas,ds1631
# Total-Elapsed-Time Recorder with Alarm
- dallas,ds1682
- # Tiny Digital Thermometer and Thermostat
- - dallas,ds1775
# CPU Peripheral Monitor
- dallas,ds1780
# CPU Supervisor with Nonvolatile Memory and Programmable I/O
- dallas,ds4510
- # Digital Thermometer and Thermostat
- - dallas,ds75
# Delta AHE-50DC Open19 power shelf fan control module
- delta,ahe50dc-fan
# Delta Electronics DPS-650-AB power supply
@@ -110,6 +101,8 @@ properties:
- domintech,dmard09
# DMARD10: 3-axis Accelerometer
- domintech,dmard10
+ # Elgin SPI-controlled LCD
+ - elgin,jg10309-01
# MMA7660FC: 3-Axis Orientation/Motion Detection Sensor
- fsl,mma7660
# MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer
@@ -164,6 +157,8 @@ properties:
- isil,isl29030
# Intersil ISL68137 Digital Output Configurable PWM Controller
- isil,isl68137
+ # Intersil ISL69260 PMBus Voltage Regulator
+ - isil,isl69260
# Intersil ISL69269 PMBus Voltage Regulator
- isil,isl69269
# Intersil ISL76682 Ambient Light Sensor
@@ -182,8 +177,6 @@ properties:
- maxim,ds1803-100
# 10 kOhm digital potentiometer with I2C interface
- maxim,ds3502
- # Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
- - maxim,max1237
# Temperature Sensor, I2C interface
- maxim,max1619
# 3-Channel Remote Temperature Sensor
@@ -198,8 +191,6 @@ properties:
- maxim,max5484
# PECI-to-I2C translator for PECI-to-SMBus/I2C protocol conversion
- maxim,max6621
- # 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
- - maxim,max6625
# mCube 3-axis 8-bit digital accelerometer
- mcube,mc3230
# Measurement Specialities I2C temperature and humidity sensor
@@ -364,8 +355,6 @@ properties:
- skyworks,sky81452
# SparkFun Qwiic Joystick (COM-15168) with i2c interface
- sparkfun,qwiic-joystick
- # i2c serial eeprom (24cxx)
- - st,24c256
# Sierra Wireless mangOH Green SPI IoT interface
- swir,mangoh-iotport-spi
# Ambient Light Sensor with SMBUS/Two Wire Serial Interface
@@ -397,8 +386,6 @@ properties:
- ti,tmp121
- ti,tmp122
- ti,tmp125
- # Digital Temperature Sensor
- - ti,tmp275
# TI DC-DC converter on PMBus
- ti,tps40400
# TI Dual channel DCAP+ multiphase controller TPS53676 with AVSBus
@@ -412,6 +399,7 @@ properties:
- ti,tps544b25
- ti,tps544c20
- ti,tps544c25
+ - ti,tps546d24
# I2C Touch-Screen Controller
- ti,tsc2003
# Vicor Corporation Digital Supervisor
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index a70ce43b3dc0..adb15f4c8f6c 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -368,6 +368,8 @@ patternProperties:
description: Devantech, Ltd.
"^dfi,.*":
description: DFI Inc.
+ "^dfrobot,.*":
+ description: DFRobot Corporation
"^dh,.*":
description: DH electronics GmbH
"^difrnce,.*":
@@ -804,6 +806,8 @@ patternProperties:
description: Lantiq Semiconductor
"^lattice,.*":
description: Lattice Semiconductor
+ "^lckfb,.*":
+ description: Shenzhen JLC Technology Group Co., Ltd.
"^lctech,.*":
description: Shenzen LC Technology Co., Ltd.
"^leadtek,.*":
@@ -1476,6 +1480,8 @@ patternProperties:
description: Terasic Inc.
"^tesla,.*":
description: Tesla, Inc.
+ "^test,.*":
+ description: Reserved for use by tests. For example, KUnit.
"^tfc,.*":
description: Three Five Corp
"^thead,.*":
diff --git a/Documentation/devicetree/bindings/watchdog/cirrus,ep9301-wdt.yaml b/Documentation/devicetree/bindings/watchdog/cirrus,ep9301-wdt.yaml
new file mode 100644
index 000000000000..5dbe891c70c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/cirrus,ep9301-wdt.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/cirrus,ep9301-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic EP93xx Watchdog Timer
+
+maintainers:
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+
+allOf:
+ - $ref: watchdog.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9301-wdt
+ - items:
+ - enum:
+ - cirrus,ep9302-wdt
+ - cirrus,ep9307-wdt
+ - cirrus,ep9312-wdt
+ - cirrus,ep9315-wdt
+ - const: cirrus,ep9301-wdt
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ watchdog@80940000 {
+ compatible = "cirrus,ep9301-wdt";
+ reg = <0x80940000 0x08>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/davinci-wdt.txt b/Documentation/devicetree/bindings/watchdog/davinci-wdt.txt
deleted file mode 100644
index aa10b8ec36e2..000000000000
--- a/Documentation/devicetree/bindings/watchdog/davinci-wdt.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Texas Instruments DaVinci/Keystone Watchdog Timer (WDT) Controller
-
-Required properties:
-- compatible : Should be "ti,davinci-wdt", "ti,keystone-wdt"
-- reg : Should contain WDT registers location and length
-
-Optional properties:
-- timeout-sec : Contains the watchdog timeout in seconds
-- clocks : the clock feeding the watchdog timer.
- Needed if platform uses clocks.
- See clock-bindings.txt
-
-Documentation:
-Davinci DM646x - https://www.ti.com/lit/ug/spruer5b/spruer5b.pdf
-Keystone - https://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf
-
-Examples:
-
-wdt: wdt@2320000 {
- compatible = "ti,davinci-wdt";
- reg = <0x02320000 0x80>;
- timeout-sec = <30>;
- clocks = <&clkwdtimer0>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt b/Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt
deleted file mode 100644
index 09f6b24969e0..000000000000
--- a/Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-* NXP LPC18xx Watchdog Timer (WDT)
-
-Required properties:
-- compatible: Should be "nxp,lpc1850-wwdt"
-- reg: Should contain WDT registers location and length
-- clocks: Must contain an entry for each entry in clock-names.
-- clock-names: Should contain "wdtclk" and "reg"; the watchdog counter
- clock and register interface clock respectively.
-- interrupts: Should contain WDT interrupt
-
-Examples:
-
-watchdog@40080000 {
- compatible = "nxp,lpc1850-wwdt";
- reg = <0x40080000 0x24>;
- clocks = <&cgu BASE_SAFE_CLK>, <&ccu1 CLK_CPU_WWDT>;
- clock-names = "wdtclk", "reg";
- interrupts = <49>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/nxp,lpc1850-wwdt.yaml b/Documentation/devicetree/bindings/watchdog/nxp,lpc1850-wwdt.yaml
new file mode 100644
index 000000000000..52878fdbe3ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/nxp,lpc1850-wwdt.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/nxp,lpc1850-wwdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC18xx Watchdog Timer (WDT)
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+properties:
+ compatible:
+ const: nxp,lpc1850-wwdt
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Watchdog counter clock
+ - description: Register interface clock
+
+ clock-names:
+ items:
+ - const: wdtclk
+ - const: reg
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/lpc18xx-cgu.h>
+ #include <dt-bindings/clock/lpc18xx-ccu.h>
+
+ watchdog@40080000 {
+ compatible = "nxp,lpc1850-wwdt";
+ reg = <0x40080000 0x24>;
+ clocks = <&cgu BASE_SAFE_CLK>, <&ccu1 CLK_CPU_WWDT>;
+ clock-names = "wdtclk", "reg";
+ interrupts = <49>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
index 47587971fb0b..932393f8c649 100644
--- a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
@@ -26,6 +26,7 @@ properties:
- qcom,apss-wdt-msm8994
- qcom,apss-wdt-qcm2290
- qcom,apss-wdt-qcs404
+ - qcom,apss-wdt-sa8255p
- qcom,apss-wdt-sa8775p
- qcom,apss-wdt-sc7180
- qcom,apss-wdt-sc7280
diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
index eba454d1680f..29ada89fdcdc 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
@@ -75,6 +75,8 @@ properties:
- renesas,r8a779h0-wdt # R-Car V4M
- const: renesas,rcar-gen4-wdt # R-Car Gen4
+ - const: renesas,r9a09g057-wdt # RZ/V2H(P)
+
reg:
maxItems: 1
@@ -113,7 +115,6 @@ properties:
required:
- compatible
- reg
- - interrupts
- clocks
allOf:
@@ -137,6 +138,7 @@ allOf:
compatible:
contains:
enum:
+ - renesas,r9a09g057-wdt
- renesas,rzg2l-wdt
- renesas,rzv2m-wdt
then:
@@ -171,6 +173,19 @@ allOf:
interrupts:
maxItems: 1
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a09g057-wdt
+ then:
+ properties:
+ interrupts: false
+ interrupt-names: false
+ else:
+ required:
+ - interrupts
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml b/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml
index c7aab0418a32..b5a3dc377070 100644
--- a/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml
@@ -29,6 +29,7 @@ properties:
- rockchip,rk3368-wdt
- rockchip,rk3399-wdt
- rockchip,rk3568-wdt
+ - rockchip,rk3576-wdt
- rockchip,rk3588-wdt
- rockchip,rv1108-wdt
- const: snps,dw-wdt
diff --git a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml
index 6b13bfc11e11..86bd39d50850 100644
--- a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml
+++ b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml
@@ -36,6 +36,12 @@ properties:
minItems: 1
maxItems: 2
+ interrupts:
+ maxItems: 1
+ description: Pre-timeout interrupt from the watchdog.
+
+ wakeup-source: true
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/watchdog/ti,davinci-wdt.yaml b/Documentation/devicetree/bindings/watchdog/ti,davinci-wdt.yaml
new file mode 100644
index 000000000000..3c78f60f5f48
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/ti,davinci-wdt.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/ti,davinci-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI DaVinci/Keystone Watchdog Timer Controller
+
+maintainers:
+ - Kousik Sanagavarapu <five231003@gmail.com>
+
+description: |
+ TI's Watchdog Timer Controller for DaVinci and Keystone Processors.
+
+ Datasheets
+
+ Davinci DM646x - https://www.ti.com/lit/ug/spruer5b/spruer5b.pdf
+ Keystone - https://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf
+
+allOf:
+ - $ref: watchdog.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: ti,keystone-wdt
+ - const: ti,davinci-wdt
+ - items:
+ - const: ti,davinci-wdt
+
+ reg:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ wdt: watchdog@22f0080 {
+ compatible = "ti,keystone-wdt", "ti,davinci-wdt";
+ reg = <0x022f0080 0x80>;
+ clocks = <&clkwdtimer0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/watchdog/zii,rave-wdt.yaml b/Documentation/devicetree/bindings/watchdog/zii,rave-wdt.yaml
new file mode 100644
index 000000000000..9dbaa941538e
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/zii,rave-wdt.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/zii,rave-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Zodiac RAVE Watchdog Timer
+
+maintainers:
+ - Martyn Welch <martyn.welch@collabora.co.uk>
+ - Guenter Roeck <linux@roeck-us.net>
+ - Wim Van Sebroeck <wim@iguana.be>
+
+properties:
+ compatible:
+ const: zii,rave-wdt
+
+ reg:
+ maxItems: 1
+ description: i2c slave address of device, usually 0x38
+
+ reset-duration-ms:
+ description:
+ Duration of the pulse generated when the watchdog times
+ out.
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: watchdog.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ watchdog@38 {
+ compatible = "zii,rave-wdt";
+ reg = <0x38>;
+ timeout-sec = <30>;
+ reset-duration-ms = <30>;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/watchdog/ziirave-wdt.txt b/Documentation/devicetree/bindings/watchdog/ziirave-wdt.txt
deleted file mode 100644
index 3d878184ec3f..000000000000
--- a/Documentation/devicetree/bindings/watchdog/ziirave-wdt.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Zodiac RAVE Watchdog Timer
-
-Required properties:
-- compatible: must be "zii,rave-wdt"
-- reg: i2c slave address of device, usually 0x38
-
-Optional Properties:
-- timeout-sec: Watchdog timeout value in seconds.
-- reset-duration-ms: Duration of the pulse generated when the watchdog times
- out. Value in milliseconds.
-
-Example:
-
- watchdog@38 {
- compatible = "zii,rave-wdt";
- reg = <0x38>;
- timeout-sec = <30>;
- reset-duration-ms = <30>;
- };
diff --git a/Documentation/doc-guide/checktransupdate.rst b/Documentation/doc-guide/checktransupdate.rst
new file mode 100644
index 000000000000..dfaf9d373747
--- /dev/null
+++ b/Documentation/doc-guide/checktransupdate.rst
@@ -0,0 +1,54 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Checking for needed translation updates
+=======================================
+
+This script helps track the translation status of the documentation in
+different locales, i.e., whether the documentation is up-to-date with
+the English counterpart.
+
+How it works
+------------
+
+It uses ``git log`` command to track the latest English commit from the
+translation commit (order by author date) and the latest English commits
+from HEAD. If any differences occur, the file is considered as out-of-date,
+then commits that need to be updated will be collected and reported.
+
+Features implemented
+
+- check all files in a certain locale
+- check a single file or a set of files
+- provide options to change output format
+- track the translation status of files that have no translation
+
+Usage
+-----
+
+::
+
+ ./scripts/checktransupdate.py --help
+
+Please refer to the output of argument parser for usage details.
+
+Samples
+
+- ``./scripts/checktransupdate.py -l zh_CN``
+ This will print all the files that need to be updated in the zh_CN locale.
+- ``./scripts/checktransupdate.py Documentation/translations/zh_CN/dev-tools/testing-overview.rst``
+ This will only print the status of the specified file.
+
+Then the output is something like:
+
+::
+
+ Documentation/dev-tools/kfence.rst
+ No translation in the locale of zh_CN
+
+ Documentation/translations/zh_CN/dev-tools/testing-overview.rst
+ commit 42fb9cfd5b18 ("Documentation: dev-tools: Add link to RV docs")
+ 1 commits needs resolving in total
+
+Features to be implemented
+
+- files can be a folder instead of only a file
diff --git a/Documentation/doc-guide/index.rst b/Documentation/doc-guide/index.rst
index 7c7d97784626..24d058faa75c 100644
--- a/Documentation/doc-guide/index.rst
+++ b/Documentation/doc-guide/index.rst
@@ -12,6 +12,7 @@ How to write kernel documentation
parse-headers
contributing
maintainer-profile
+ checktransupdate
.. only:: subproject and html
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 3c399f132e2d..de2cb8de6112 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -180,6 +180,7 @@ modpost
modules-only.symvers
modules.builtin
modules.builtin.modinfo
+modules.builtin.ranges
modules.nsdeps
modules.order
modversions.h*
@@ -262,7 +263,7 @@ vsyscall_32.lds
wanxlfw.inc
uImage
unifdef
-utf8data.h
+utf8data.c
wakeup.bin
wakeup.elf
wakeup.lds
diff --git a/Documentation/driver-api/dpll.rst b/Documentation/driver-api/dpll.rst
index ea8d16600e16..e6855cd37e85 100644
--- a/Documentation/driver-api/dpll.rst
+++ b/Documentation/driver-api/dpll.rst
@@ -214,6 +214,27 @@ offset values are fractional with 3-digit decimal places and shell be
divided with ``DPLL_PIN_PHASE_OFFSET_DIVIDER`` to get integer part and
modulo divided to get fractional part.
+Embedded SYNC
+=============
+
+Device may provide ability to use Embedded SYNC feature. It allows
+to embed additional SYNC signal into the base frequency of a pin - a one
+special pulse of base frequency signal every time SYNC signal pulse
+happens. The user can configure the frequency of Embedded SYNC.
+The Embedded SYNC capability is always related to a given base frequency
+and HW capabilities. The user is provided a range of Embedded SYNC
+frequencies supported, depending on current base frequency configured for
+the pin.
+
+ ========================================= =================================
+ ``DPLL_A_PIN_ESYNC_FREQUENCY`` current Embedded SYNC frequency
+ ``DPLL_A_PIN_ESYNC_FREQUENCY_SUPPORTED`` nest available Embedded SYNC
+ frequency ranges
+ ``DPLL_A_PIN_FREQUENCY_MIN`` attr minimum value of frequency
+ ``DPLL_A_PIN_FREQUENCY_MAX`` attr maximum value of frequency
+ ``DPLL_A_PIN_ESYNC_PULSE`` pulse type of Embedded SYNC
+ ========================================= =================================
+
Configuration commands group
============================
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index ac9ee7441887..5f2ee8d717b1 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -391,7 +391,7 @@ PCI
devm_pci_remap_cfgspace() : ioremap PCI configuration space
devm_pci_remap_cfg_resource() : ioremap PCI configuration space resource
- pcim_enable_device() : after success, all PCI ops become managed
+ pcim_enable_device() : after success, some PCI ops become managed
pcim_iomap() : do iomap() on a single BAR
pcim_iomap_regions() : do request_region() and iomap() on multiple BARs
pcim_iomap_regions_request_all() : do request_region() on all and iomap() on multiple BARs
diff --git a/Documentation/driver-api/firewire.rst b/Documentation/driver-api/firewire.rst
index d3cfa73cbb2b..28a32410f7d2 100644
--- a/Documentation/driver-api/firewire.rst
+++ b/Documentation/driver-api/firewire.rst
@@ -43,6 +43,8 @@ Firewire core transaction interfaces
Firewire Isochronous I/O interfaces
===================================
+.. kernel-doc:: include/linux/firewire.h
+ :functions: fw_iso_context_schedule_flush_completions
.. kernel-doc:: drivers/firewire/core-iso.c
:export:
diff --git a/Documentation/driver-api/iio/buffers.rst b/Documentation/driver-api/iio/buffers.rst
index e83026aebe97..63f364e862d1 100644
--- a/Documentation/driver-api/iio/buffers.rst
+++ b/Documentation/driver-api/iio/buffers.rst
@@ -15,8 +15,8 @@ trigger source. Multiple data channels can be read at once from
IIO buffer sysfs interface
==========================
An IIO buffer has an associated attributes directory under
-:file:`/sys/bus/iio/iio:device{X}/buffer/*`. Here are some of the existing
-attributes:
+:file:`/sys/bus/iio/devices/iio:device{X}/buffer/*`. Here are some of the
+existing attributes:
* :file:`length`, the total number of data samples (capacity) that can be
stored by the buffer.
@@ -28,8 +28,8 @@ IIO buffer setup
The meta information associated with a channel reading placed in a buffer is
called a scan element. The important bits configuring scan elements are
exposed to userspace applications via the
-:file:`/sys/bus/iio/iio:device{X}/scan_elements/` directory. This directory contains
-attributes of the following form:
+:file:`/sys/bus/iio/devices/iio:device{X}/scan_elements/` directory. This
+directory contains attributes of the following form:
* :file:`enable`, used for enabling a channel. If and only if its attribute
is non *zero*, then a triggered capture will contain data samples for this
diff --git a/Documentation/driver-api/iio/core.rst b/Documentation/driver-api/iio/core.rst
index 715cf29482a1..dfe438dc91a7 100644
--- a/Documentation/driver-api/iio/core.rst
+++ b/Documentation/driver-api/iio/core.rst
@@ -24,7 +24,7 @@ then we will show how a device driver makes use of an IIO device.
There are two ways for a user space application to interact with an IIO driver.
-1. :file:`/sys/bus/iio/iio:device{X}/`, this represents a hardware sensor
+1. :file:`/sys/bus/iio/devices/iio:device{X}/`, this represents a hardware sensor
and groups together the data channels of the same chip.
2. :file:`/dev/iio:device{X}`, character device node interface used for
buffered data transfer and for events information retrieval.
@@ -51,8 +51,8 @@ IIO device sysfs interface
Attributes are sysfs files used to expose chip info and also allowing
applications to set various configuration parameters. For device with
-index X, attributes can be found under /sys/bus/iio/iio:deviceX/ directory.
-Common attributes are:
+index X, attributes can be found under /sys/bus/iio/devices/iio:deviceX/
+directory. Common attributes are:
* :file:`name`, description of the physical chip.
* :file:`dev`, shows the major:minor pair associated with
@@ -140,16 +140,16 @@ Here is how we can make use of the channel's modifiers::
This channel's definition will generate two separate sysfs files for raw data
retrieval:
-* :file:`/sys/bus/iio/iio:device{X}/in_intensity_ir_raw`
-* :file:`/sys/bus/iio/iio:device{X}/in_intensity_both_raw`
+* :file:`/sys/bus/iio/devices/iio:device{X}/in_intensity_ir_raw`
+* :file:`/sys/bus/iio/devices/iio:device{X}/in_intensity_both_raw`
one file for processed data:
-* :file:`/sys/bus/iio/iio:device{X}/in_illuminance_input`
+* :file:`/sys/bus/iio/devices/iio:device{X}/in_illuminance_input`
and one shared sysfs file for sampling frequency:
-* :file:`/sys/bus/iio/iio:device{X}/sampling_frequency`.
+* :file:`/sys/bus/iio/devices/iio:device{X}/sampling_frequency`.
Here is how we can make use of the channel's indexing::
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index f10decc2c14b..7f83e05769b4 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -124,6 +124,7 @@ Subsystem-specific APIs
pps
ptp
pwm
+ pwrseq
regulator
reset
rfkill
diff --git a/Documentation/driver-api/ipmi.rst b/Documentation/driver-api/ipmi.rst
index e224e47b6b09..dfa021eacd63 100644
--- a/Documentation/driver-api/ipmi.rst
+++ b/Documentation/driver-api/ipmi.rst
@@ -540,7 +540,7 @@ at module load time (for a module) with::
alerts_broken
The addresses are normal I2C addresses. The adapter is the string
-name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
+name of the adapter, as shown in /sys/bus/i2c/devices/i2c-<n>/name.
It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring
spaces, so if the name is "This is an I2C chip" you can say
adapter_name=ThisisanI2cchip. This is because it's hard to pass in
diff --git a/Documentation/driver-api/media/mc-core.rst b/Documentation/driver-api/media/mc-core.rst
index 2456950ce8ff..1d010bd7ec49 100644
--- a/Documentation/driver-api/media/mc-core.rst
+++ b/Documentation/driver-api/media/mc-core.rst
@@ -144,7 +144,8 @@ valid values are described at :c:func:`media_create_pad_link()` and
Graph traversal
^^^^^^^^^^^^^^^
-The media framework provides APIs to iterate over entities in a graph.
+The media framework provides APIs to traverse media graphs, locating connected
+entities and links.
To iterate over all entities belonging to a media device, drivers can use
the media_device_for_each_entity macro, defined in
@@ -159,31 +160,6 @@ the media_device_for_each_entity macro, defined in
...
}
-Drivers might also need to iterate over all entities in a graph that can be
-reached only through enabled links starting at a given entity. The media
-framework provides a depth-first graph traversal API for that purpose.
-
-.. note::
-
- Graphs with cycles (whether directed or undirected) are **NOT**
- supported by the graph traversal API. To prevent infinite loops, the graph
- traversal code limits the maximum depth to ``MEDIA_ENTITY_ENUM_MAX_DEPTH``,
- currently defined as 16.
-
-Drivers initiate a graph traversal by calling
-:c:func:`media_graph_walk_start()`
-
-The graph structure, provided by the caller, is initialized to start graph
-traversal at the given entity.
-
-Drivers can then retrieve the next entity by calling
-:c:func:`media_graph_walk_next()`
-
-When the graph traversal is complete the function will return ``NULL``.
-
-Graph traversal can be interrupted at any moment. No cleanup function call
-is required and the graph structure can be freed normally.
-
Helper functions can be used to find a link between two given pads, or a pad
connected to another pad through an enabled link
(:c:func:`media_entity_find_link()`, :c:func:`media_pad_remote_pad_first()`,
@@ -276,6 +252,45 @@ Subsystems should facilitate link validation by providing subsystem specific
helper functions to provide easy access for commonly needed information, and
in the end provide a way to use driver-specific callbacks.
+Pipeline traversal
+^^^^^^^^^^^^^^^^^^
+
+Once a pipeline has been constructed with :c:func:`media_pipeline_start()`,
+drivers can iterate over entities or pads in the pipeline with the
+:c:macro:´media_pipeline_for_each_entity` and
+:c:macro:´media_pipeline_for_each_pad` macros. Iterating over pads is
+straightforward:
+
+.. code-block:: c
+
+ media_pipeline_pad_iter iter;
+ struct media_pad *pad;
+
+ media_pipeline_for_each_pad(pipe, &iter, pad) {
+ /* 'pad' will point to each pad in turn */
+ ...
+ }
+
+To iterate over entities, the iterator needs to be initialized and cleaned up
+as an additional steps:
+
+.. code-block:: c
+
+ media_pipeline_entity_iter iter;
+ struct media_entity *entity;
+ int ret;
+
+ ret = media_pipeline_entity_iter_init(pipe, &iter);
+ if (ret)
+ ...;
+
+ media_pipeline_for_each_entity(pipe, &iter, entity) {
+ /* 'entity' will point to each entity in turn */
+ ...
+ }
+
+ media_pipeline_entity_iter_cleanup(&iter);
+
Media Controller Device Allocator API
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/driver-api/mmc/index.rst b/Documentation/driver-api/mmc/index.rst
index 7339736ac774..8188863e5959 100644
--- a/Documentation/driver-api/mmc/index.rst
+++ b/Documentation/driver-api/mmc/index.rst
@@ -10,4 +10,5 @@ MMC/SD/SDIO card support
mmc-dev-attrs
mmc-dev-parts
mmc-async-req
+ mmc-test
mmc-tools
diff --git a/Documentation/driver-api/mmc/mmc-test.rst b/Documentation/driver-api/mmc/mmc-test.rst
new file mode 100644
index 000000000000..1fe33eb43742
--- /dev/null
+++ b/Documentation/driver-api/mmc/mmc-test.rst
@@ -0,0 +1,299 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========================
+MMC Test Framework
+========================
+
+Overview
+========
+
+The `mmc_test` framework is designed to test the performance and reliability of host controller drivers and all devices handled by the MMC subsystem. This includes not only MMC devices but also SD cards and other devices supported by the subsystem.
+
+The framework provides a variety of tests to evaluate different aspects of the host controller and device interactions, such as read and write performance, data integrity, and error handling. These tests help ensure that the host controller drivers and devices operate correctly under various conditions.
+
+The `mmc_test` framework is particularly useful for:
+
+- Verifying the functionality and performance of MMC and SD host controller drivers.
+- Ensuring compatibility and reliability of MMC and SD devices.
+- Identifying and diagnosing issues in the MMC subsystem.
+
+The results of the tests are logged in the kernel log, providing detailed information about the test outcomes and any encountered issues.
+
+Note: whatever is on your card will be overwritten by these tests.
+
+Initialization
+==============
+
+To use the ``mmc_test`` framework, follow these steps:
+
+1. **Enable the MMC Test Framework**:
+
+ Ensure that the ``CONFIG_MMC_TEST`` kernel configuration option is enabled. This can be done by configuring the kernel:
+
+ .. code-block:: none
+
+ make menuconfig
+
+ Navigate to:
+
+ Device Drivers --->
+ <*> MMC/SD/SDIO card support --->
+ [*] MMC host test driver
+
+ Alternatively, you can enable it directly in the kernel configuration file:
+
+ .. code-block:: none
+
+ echo "CONFIG_MMC_TEST=y" >> .config
+
+ Rebuild and install the kernel if necessary.
+
+2. **Load the MMC Test Module**:
+
+ If the ``mmc_test`` framework is built as a module, you need to load it using ``modprobe``:
+
+ .. code-block:: none
+
+ modprobe mmc_test
+
+Binding the MMC Card for Testing
+================================
+
+To enable MMC testing, you need to unbind the MMC card from the ``mmcblk`` driver and bind it to the ``mmc_test`` driver. This allows the ``mmc_test`` framework to take control of the MMC card for testing purposes.
+
+1. Identify the MMC card:
+
+ .. code-block:: sh
+
+ ls /sys/bus/mmc/devices/
+
+ This will list the MMC devices, such as ``mmc0:0001``.
+
+2. Unbind the MMC card from the ``mmcblk`` driver:
+
+ .. code-block:: sh
+
+ echo 'mmc0:0001' > /sys/bus/mmc/drivers/mmcblk/unbind
+
+3. Bind the MMC card to the ``mmc_test`` driver:
+
+ .. code-block:: sh
+
+ echo 'mmc0:0001' > /sys/bus/mmc/drivers/mmc_test/bind
+
+After binding, you should see a line in the kernel log indicating that the card has been claimed for testing:
+
+.. code-block:: none
+
+ mmc_test mmc0:0001: Card claimed for testing.
+
+
+Usage - Debugfs Entries
+=======================
+
+Once the ``mmc_test`` framework is enabled, you can interact with the following debugfs entries located in ``/sys/kernel/debug/mmc0/mmc0:0001``:
+
+1. **test**:
+
+ This file is used to run specific tests. Write the test number to this file to execute a test.
+
+ .. code-block:: sh
+
+ echo <test_number> > /sys/kernel/debug/mmc0/mmc0:0001/test
+
+ The test result is indicated in the kernel log info. You can view the kernel log using the `dmesg` command or by checking the log file in `/var/log/`.
+
+ .. code-block:: sh
+
+ dmesg | grep mmc0
+
+ Example:
+
+ To run test number 4 (Basic read with data verification):
+
+ .. code-block:: sh
+
+ echo 4 > /sys/kernel/debug/mmc0/mmc0:0001/test
+
+ Check the kernel log for the result:
+
+ .. code-block:: sh
+
+ dmesg | grep mmc0
+
+2. **testlist**:
+
+ This file lists all available tests. You can read this file to see the list of tests and their corresponding numbers.
+
+ .. code-block:: sh
+
+ cat /sys/kernel/debug/mmc0/mmc0:0001/testlist
+
+ The available tests are listed in the table below:
+
++------+--------------------------------+---------------------------------------------+
+| Test | Test Name | Test Description |
++======+================================+=============================================+
+| 0 | Run all tests | Runs all available tests |
++------+--------------------------------+---------------------------------------------+
+| 1 | Basic write | Performs a basic write operation of a |
+| | | single 512-Byte block to the MMC card |
+| | | without data verification. |
++------+--------------------------------+---------------------------------------------+
+| 2 | Basic read | Same for read |
++------+--------------------------------+---------------------------------------------+
+| 3 | Basic write | Performs a basic write operation of a |
+| | (with data verification) | single 512-Byte block to the MMC card |
+| | | with data verification by reading back |
+| | | the written data and comparing it. |
++------+--------------------------------+---------------------------------------------+
+| 4 | Basic read | Same for read |
+| | (with data verification) | |
++------+--------------------------------+---------------------------------------------+
+| 5 | Multi-block write | Performs a multi-block write operation of |
+| | | 8 blocks (each 512 bytes) to the MMC card. |
++------+--------------------------------+---------------------------------------------+
+| 6 | Multi-block read | Same for read |
++------+--------------------------------+---------------------------------------------+
+| 7 | Power of two block writes | Performs write operations with block sizes |
+| | | that are powers of two, starting from 1 |
+| | | byte up to 256 bytes, to the MMC card. |
++------+--------------------------------+---------------------------------------------+
+| 8 | Power of two block reads | Same for read |
++------+--------------------------------+---------------------------------------------+
+| 9 | Weird sized block writes | Performs write operations with varying |
+| | | block sizes starting from 3 bytes and |
+| | | increasing by 7 bytes each iteration, up |
+| | | to 511 bytes, to the MMC card. |
++------+--------------------------------+---------------------------------------------+
+| 10 | Weird sized block reads | same for read |
++------+--------------------------------+---------------------------------------------+
+| 11 | Badly aligned write | Performs write operations with buffers |
+| | | starting at different alignments (0 to 7 |
+| | | bytes offset) to test how the MMC card |
+| | | handles unaligned data transfers. |
++------+--------------------------------+---------------------------------------------+
+| 12 | Badly aligned read | same for read |
++------+--------------------------------+---------------------------------------------+
+| 13 | Badly aligned multi-block write| same for multi-write |
++------+--------------------------------+---------------------------------------------+
+| 14 | Badly aligned multi-block read | same for multi-read |
++------+--------------------------------+---------------------------------------------+
+| 15 | Proper xfer_size at write | intentionally create a broken transfer by |
+| | (Start failure) | modifying the MMC request in a way that it |
+| | | will not perform as expected, e.g. use |
+| | | MMC_WRITE_BLOCK for a multi-block transfer |
++------+--------------------------------+---------------------------------------------+
+| 16 | Proper xfer_size at read | same for read |
+| | (Start failure) | |
++------+--------------------------------+---------------------------------------------+
+| 17 | Proper xfer_size at write | same for 2 blocks |
+| | (Midway failure) | |
++------+--------------------------------+---------------------------------------------+
+| 18 | Proper xfer_size at read | same for read |
+| | (Midway failure) | |
++------+--------------------------------+---------------------------------------------+
+| 19 | Highmem write | use a high memory page |
++------+--------------------------------+---------------------------------------------+
+| 20 | Highmem read | same for read |
++------+--------------------------------+---------------------------------------------+
+| 21 | Multi-block highmem write | same for multi-write |
++------+--------------------------------+---------------------------------------------+
+| 22 | Multi-block highmem read | same for mult-read |
++------+--------------------------------+---------------------------------------------+
+| 23 | Best-case read performance | Performs 512K sequential read (non sg) |
++------+--------------------------------+---------------------------------------------+
+| 24 | Best-case write performance | same for write |
++------+--------------------------------+---------------------------------------------+
+| 25 | Best-case read performance | Same using sg |
+| | (Into scattered pages) | |
++------+--------------------------------+---------------------------------------------+
+| 26 | Best-case write performance | same for write |
+| | (From scattered pages) | |
++------+--------------------------------+---------------------------------------------+
+| 27 | Single read performance | By transfer size |
++------+--------------------------------+---------------------------------------------+
+| 28 | Single write performance | By transfer size |
++------+--------------------------------+---------------------------------------------+
+| 29 | Single trim performance | By transfer size |
++------+--------------------------------+---------------------------------------------+
+| 30 | Consecutive read performance | By transfer size |
++------+--------------------------------+---------------------------------------------+
+| 31 | Consecutive write performance | By transfer size |
++------+--------------------------------+---------------------------------------------+
+| 32 | Consecutive trim performance | By transfer size |
++------+--------------------------------+---------------------------------------------+
+| 33 | Random read performance | By transfer size |
++------+--------------------------------+---------------------------------------------+
+| 34 | Random write performance | By transfer size |
++------+--------------------------------+---------------------------------------------+
+| 35 | Large sequential read | Into scattered pages |
++------+--------------------------------+---------------------------------------------+
+| 36 | Large sequential write | From scattered pages |
++------+--------------------------------+---------------------------------------------+
+| 37 | Write performance | With blocking req 4k to 4MB |
++------+--------------------------------+---------------------------------------------+
+| 38 | Write performance | With non-blocking req 4k to 4MB |
++------+--------------------------------+---------------------------------------------+
+| 39 | Read performance | With blocking req 4k to 4MB |
++------+--------------------------------+---------------------------------------------+
+| 40 | Read performance | With non-blocking req 4k to 4MB |
++------+--------------------------------+---------------------------------------------+
+| 41 | Write performance | Blocking req 1 to 512 sg elems |
++------+--------------------------------+---------------------------------------------+
+| 42 | Write performance | Non-blocking req 1 to 512 sg elems |
++------+--------------------------------+---------------------------------------------+
+| 43 | Read performance | Blocking req 1 to 512 sg elems |
++------+--------------------------------+---------------------------------------------+
+| 44 | Read performance | Non-blocking req 1 to 512 sg elems |
++------+--------------------------------+---------------------------------------------+
+| 45 | Reset test | |
++------+--------------------------------+---------------------------------------------+
+| 46 | Commands during read | No Set Block Count (CMD23) |
++------+--------------------------------+---------------------------------------------+
+| 47 | Commands during write | No Set Block Count (CMD23) |
++------+--------------------------------+---------------------------------------------+
+| 48 | Commands during read | Use Set Block Count (CMD23) |
++------+--------------------------------+---------------------------------------------+
+| 49 | Commands during write | Use Set Block Count (CMD23) |
++------+--------------------------------+---------------------------------------------+
+| 50 | Commands during non-blocking | Read - use Set Block Count (CMD23) |
++------+--------------------------------+---------------------------------------------+
+| 51 | Commands during non-blocking | Write - use Set Block Count (CMD23) |
++------+--------------------------------+---------------------------------------------+
+
+Test Results
+============
+
+The results of the tests are logged in the kernel log. Each test logs the start, end, and result of the test. The possible results are:
+
+- **OK**: The test completed successfully.
+- **FAILED**: The test failed.
+- **UNSUPPORTED (by host)**: The test is unsupported by the host.
+- **UNSUPPORTED (by card)**: The test is unsupported by the card.
+- **ERROR**: An error occurred during the test.
+
+Example Kernel Log Output
+=========================
+
+When running a test, you will see log entries similar to the following in the kernel log:
+
+.. code-block:: none
+
+ [ 1234.567890] mmc0: Starting tests of card mmc0:0001...
+ [ 1234.567891] mmc0: Test case 4. Basic read (with data verification)...
+ [ 1234.567892] mmc0: Result: OK
+ [ 1234.567893] mmc0: Tests completed.
+
+In this example, test case 4 (Basic read with data verification) was executed, and the result was OK.
+
+
+Contributing
+============
+
+Contributions to the `mmc_test` framework are welcome. Please follow the standard Linux kernel contribution guidelines and submit patches to the appropriate maintainers.
+
+Contact
+=======
+
+For more information or to report issues, please contact the MMC subsystem maintainers.
diff --git a/Documentation/driver-api/pwrseq.rst b/Documentation/driver-api/pwrseq.rst
new file mode 100644
index 000000000000..a644084ded17
--- /dev/null
+++ b/Documentation/driver-api/pwrseq.rst
@@ -0,0 +1,95 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+.. Copyright 2024 Linaro Ltd.
+
+====================
+Power Sequencing API
+====================
+
+:Author: Bartosz Golaszewski
+
+Introduction
+============
+
+This framework is designed to abstract complex power-up sequences that are
+shared between multiple logical devices in the linux kernel.
+
+The intention is to allow consumers to obtain a power sequencing handle
+exposed by the power sequence provider and delegate the actual requesting and
+control of the underlying resources as well as to allow the provider to
+mitigate any potential conflicts between multiple users behind the scenes.
+
+Glossary
+--------
+
+The power sequencing API uses a number of terms specific to the subsystem:
+
+Unit
+
+ A unit is a discreet chunk of a power sequence. For instance one unit may
+ enable a set of regulators, another may enable a specific GPIO. Units can
+ define dependencies in the form of other units that must be enabled before
+ it itself can be.
+
+Target
+
+ A target is a set of units (composed of the "final" unit and its
+ dependencies) that a consumer selects by its name when requesting a handle
+ to the power sequencer. Via the dependency system, multiple targets may
+ share the same parts of a power sequence but ignore parts that are
+ irrelevant.
+
+Descriptor
+
+ A handle passed by the pwrseq core to every consumer that serves as the
+ entry point to the provider layer. It ensures coherence between different
+ users and keeps reference counting consistent.
+
+Consumer interface
+==================
+
+The consumer API is aimed to be as simple as possible. The driver interested in
+getting a descriptor from the power sequencer should call pwrseq_get() and
+specify the name of the target it wants to reach in the sequence after calling
+pwrseq_power_up(). The descriptor can be released by calling pwrseq_put() and
+the consumer can request the powering down of its target with
+pwrseq_power_off(). Note that there is no guarantee that pwrseq_power_off()
+will have any effect as there may be multiple users of the underlying resources
+who may keep them active.
+
+Provider interface
+==================
+
+The provider API is admittedly not nearly as straightforward as the one for
+consumers but it makes up for it in flexibility.
+
+Each provider can logically split the power-up sequence into descrete chunks
+(units) and define their dependencies. They can then expose named targets that
+consumers may use as the final point in the sequence that they wish to reach.
+
+To that end the providers fill out a set of configuration structures and
+register with the pwrseq subsystem by calling pwrseq_device_register().
+
+Dynamic consumer matching
+-------------------------
+
+The main difference between pwrseq and other linux kernel providers is the
+mechanism for dynamic matching of consumers and providers. Every power sequence
+provider driver must implement the `match()` callback and pass it to the pwrseq
+core when registering with the subsystems.
+
+When a client requests a sequencer handle, the core will call this callback for
+every registered provider and let it flexibly figure out whether the proposed
+client device is indeed its consumer. For example: if the provider binds to the
+device-tree node representing a power management unit of a chipset and the
+consumer driver controls one of its modules, the provider driver may parse the
+relevant regulator supply properties in device tree and see if they lead from
+the PMU to the consumer.
+
+API reference
+=============
+
+.. kernel-doc:: include/linux/pwrseq/provider.h
+ :internal:
+
+.. kernel-doc:: drivers/power/sequencing/core.c
+ :export:
diff --git a/Documentation/driver-api/thermal/sysfs-api.rst b/Documentation/driver-api/thermal/sysfs-api.rst
index 978198f8a18b..c803b89b7248 100644
--- a/Documentation/driver-api/thermal/sysfs-api.rst
+++ b/Documentation/driver-api/thermal/sysfs-api.rst
@@ -58,10 +58,9 @@ temperature) and throttle appropriate devices.
ops:
thermal zone device call-backs.
- .bind:
- bind the thermal zone device with a thermal cooling device.
- .unbind:
- unbind the thermal zone device with a thermal cooling device.
+ .should_bind:
+ check whether or not a given cooling device should be bound to
+ a given trip point in this thermal zone.
.get_temp:
get the current temperature of the thermal zone.
.set_trips:
@@ -246,56 +245,6 @@ temperature) and throttle appropriate devices.
It deletes the corresponding entry from /sys/class/thermal folder and
unbinds itself from all the thermal zone devices using it.
-1.3 interface for binding a thermal zone device with a thermal cooling device
------------------------------------------------------------------------------
-
- ::
-
- int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
- int trip, struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower, unsigned int weight);
-
- This interface function binds a thermal cooling device to a particular trip
- point of a thermal zone device.
-
- This function is usually called in the thermal zone device .bind callback.
-
- tz:
- the thermal zone device
- cdev:
- thermal cooling device
- trip:
- indicates which trip point in this thermal zone the cooling device
- is associated with.
- upper:
- the Maximum cooling state for this trip point.
- THERMAL_NO_LIMIT means no upper limit,
- and the cooling device can be in max_state.
- lower:
- the Minimum cooling state can be used for this trip point.
- THERMAL_NO_LIMIT means no lower limit,
- and the cooling device can be in cooling state 0.
- weight:
- the influence of this cooling device in this thermal
- zone. See 1.4.1 below for more information.
-
- ::
-
- int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
- int trip, struct thermal_cooling_device *cdev);
-
- This interface function unbinds a thermal cooling device from a particular
- trip point of a thermal zone device. This function is usually called in
- the thermal zone device .unbind callback.
-
- tz:
- the thermal zone device
- cdev:
- thermal cooling device
- trip:
- indicates which trip point in this thermal zone the cooling device
- is associated with.
-
1.4 Thermal Zone Parameters
---------------------------
@@ -366,8 +315,6 @@ Thermal cooling device sys I/F, created once it's registered::
Then next two dynamic attributes are created/removed in pairs. They represent
the relationship between a thermal zone and its associated cooling device.
-They are created/removed for each successful execution of
-thermal_zone_bind_cooling_device/thermal_zone_unbind_cooling_device.
::
@@ -459,14 +406,7 @@ are supposed to implement the callback. If they don't, the thermal
framework calculated the trend by comparing the previous and the current
temperature values.
-4.2. get_thermal_instance
--------------------------
-
-This function returns the thermal_instance corresponding to a given
-{thermal_zone, cooling_device, trip_point} combination. Returns NULL
-if such an instance does not exist.
-
-4.3. thermal_cdev_update
+4.2. thermal_cdev_update
------------------------
This function serves as an arbitrator to set the state of a cooling
diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst
index 70380a2a01b4..8b8aeea71c68 100644
--- a/Documentation/fault-injection/fault-injection.rst
+++ b/Documentation/fault-injection/fault-injection.rst
@@ -141,6 +141,14 @@ configuration of fault-injection capabilities.
default is 'Y', setting it to 'N' will also inject failures into
highmem/user allocations (__GFP_HIGHMEM allocations).
+- /sys/kernel/debug/failslab/cache-filter
+ Format: { 'Y' | 'N' }
+
+ default is 'N', setting it to 'Y' will only inject failures when
+ objects are requests from certain caches.
+
+ Select the cache by writing '1' to /sys/kernel/slab/<cache>/failslab:
+
- /sys/kernel/debug/failslab/ignore-gfp-wait:
- /sys/kernel/debug/fail_page_alloc/ignore-gfp-wait:
@@ -283,7 +291,7 @@ kernel may crash because it may not be able to handle the error.
There are 4 types of errors defined in include/asm-generic/error-injection.h
EI_ETYPE_NULL
- This function will return `NULL` if it fails. e.g. return an allocateed
+ This function will return `NULL` if it fails. e.g. return an allocated
object address.
EI_ETYPE_ERRNO
@@ -459,6 +467,18 @@ Application Examples
losetup -d $DEVICE
rm testfile.img
+------------------------------------------------------------------------------
+
+- Inject only skbuff allocation failures ::
+
+ # mark skbuff_head_cache as faulty
+ echo 1 > /sys/kernel/slab/skbuff_head_cache/failslab
+ # Turn on cache filter (off by default)
+ echo 1 > /sys/kernel/debug/failslab/cache-filter
+ # Turn on fault injection
+ echo 1 > /sys/kernel/debug/failslab/times
+ echo 1 > /sys/kernel/debug/failslab/probability
+
Tool to run command with failslab or fail_page_alloc
----------------------------------------------------
diff --git a/Documentation/features/vm/PG_uncached/arch-support.txt b/Documentation/features/vm/PG_uncached/arch-support.txt
deleted file mode 100644
index 5a7508b8c967..000000000000
--- a/Documentation/features/vm/PG_uncached/arch-support.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Feature name: PG_uncached
-# Kconfig: ARCH_USES_PG_UNCACHED
-# description: arch supports the PG_uncached page flag
-#
- -----------------------
- | arch |status|
- -----------------------
- | alpha: | TODO |
- | arc: | TODO |
- | arm: | TODO |
- | arm64: | TODO |
- | csky: | TODO |
- | hexagon: | TODO |
- | loongarch: | TODO |
- | m68k: | TODO |
- | microblaze: | TODO |
- | mips: | TODO |
- | nios2: | TODO |
- | openrisc: | TODO |
- | parisc: | TODO |
- | powerpc: | TODO |
- | riscv: | TODO |
- | s390: | TODO |
- | sh: | TODO |
- | sparc: | TODO |
- | um: | TODO |
- | x86: | ok |
- | xtensa: | TODO |
- -----------------------
diff --git a/Documentation/filesystems/9p.rst b/Documentation/filesystems/9p.rst
index 514ed13a0122..2bbf68b56b0d 100644
--- a/Documentation/filesystems/9p.rst
+++ b/Documentation/filesystems/9p.rst
@@ -31,7 +31,7 @@ Other applications are described in the following papers:
* PROSE I/O: Using 9p to enable Application Partitions
http://plan9.escet.urjc.es/iwp9/cready/PROSE_iwp9_2006.pdf
* VirtFS: A Virtualization Aware File System pass-through
- http://goo.gl/3WPDg
+ https://kernel.org/doc/ols/2010/ols2010-pages-109-120.pdf
Usage
=====
diff --git a/Documentation/filesystems/autofs.rst b/Documentation/filesystems/autofs.rst
index 3b6e38e646cd..1ac576458c69 100644
--- a/Documentation/filesystems/autofs.rst
+++ b/Documentation/filesystems/autofs.rst
@@ -18,7 +18,7 @@ key advantages:
2. The names and locations of filesystems can be stored in
a remote database and can change at any time. The content
- in that data base at the time of access will be used to provide
+ in that database at the time of access will be used to provide
a target for the access. The interpretation of names in the
filesystem can even be programmatic rather than database-backed,
allowing wildcards for example, and can vary based on the user who
@@ -423,7 +423,7 @@ The available ioctl commands are:
and objects are expired if the are not in use.
**AUTOFS_EXP_FORCED** causes the in use status to be ignored
- and objects are expired ieven if they are in use. This assumes
+ and objects are expired even if they are in use. This assumes
that the daemon has requested this because it is capable of
performing the umount.
diff --git a/Documentation/filesystems/bcachefs/CodingStyle.rst b/Documentation/filesystems/bcachefs/CodingStyle.rst
index 0c45829a4899..01de555e21d8 100644
--- a/Documentation/filesystems/bcachefs/CodingStyle.rst
+++ b/Documentation/filesystems/bcachefs/CodingStyle.rst
@@ -175,7 +175,7 @@ errors in our thinking by running our code and seeing what happens. If your
time is being wasted because your tools are bad or too slow - don't accept it,
fix it.
-Put effort into your documentation, commmit messages, and code comments - but
+Put effort into your documentation, commit messages, and code comments - but
don't go overboard. A good commit message is wonderful - but if the information
was important enough to go in a commit message, ask yourself if it would be
even better as a code comment.
diff --git a/Documentation/filesystems/fsverity.rst b/Documentation/filesystems/fsverity.rst
index 13e4b18e5dbb..0e2fac7a16da 100644
--- a/Documentation/filesystems/fsverity.rst
+++ b/Documentation/filesystems/fsverity.rst
@@ -86,6 +86,16 @@ authenticating fs-verity file hashes include:
signature in their "security.ima" extended attribute, as controlled
by the IMA policy. For more information, see the IMA documentation.
+- Integrity Policy Enforcement (IPE). IPE supports enforcing access
+ control decisions based on immutable security properties of files,
+ including those protected by fs-verity's built-in signatures.
+ "IPE policy" specifically allows for the authorization of fs-verity
+ files using properties ``fsverity_digest`` for identifying
+ files by their verity digest, and ``fsverity_signature`` to authorize
+ files with a verified fs-verity's built-in signature. For
+ details on configuring IPE policies and understanding its operational
+ modes, please refer to :doc:`IPE admin guide </admin-guide/LSM/ipe>`.
+
- Trusted userspace code in combination with `Built-in signature
verification`_. This approach should be used only with great care.
@@ -457,7 +467,11 @@ Enabling this option adds the following:
On success, the ioctl persists the signature alongside the Merkle
tree. Then, any time the file is opened, the kernel verifies the
file's actual digest against this signature, using the certificates
- in the ".fs-verity" keyring.
+ in the ".fs-verity" keyring. This verification happens as long as the
+ file's signature exists, regardless of the state of the sysctl variable
+ "fs.verity.require_signatures" described in the next item. The IPE LSM
+ relies on this behavior to recognize and label fsverity files
+ that contain a verified built-in fsverity signature.
3. A new sysctl "fs.verity.require_signatures" is made available.
When set to 1, the kernel requires that all verity files have a
@@ -481,7 +495,7 @@ be carefully considered before using them:
- Builtin signature verification does *not* make the kernel enforce
that any files actually have fs-verity enabled. Thus, it is not a
- complete authentication policy. Currently, if it is used, the only
+ complete authentication policy. Currently, if it is used, one
way to complete the authentication policy is for trusted userspace
code to explicitly check whether files have fs-verity enabled with a
signature before they are accessed. (With
@@ -490,6 +504,15 @@ be carefully considered before using them:
could just store the signature alongside the file and verify it
itself using a cryptographic library, instead of using this feature.
+- Another approach is to utilize fs-verity builtin signature
+ verification in conjunction with the IPE LSM, which supports defining
+ a kernel-enforced, system-wide authentication policy that allows only
+ files with a verified fs-verity builtin signature to perform certain
+ operations, such as execution. Note that IPE doesn't require
+ fs.verity.require_signatures=1.
+ Please refer to :doc:`IPE admin guide </admin-guide/LSM/ipe>` for
+ more details.
+
- A file's builtin signature can only be set at the same time that
fs-verity is being enabled on the file. Changing or deleting the
builtin signature later requires re-creating the file.
diff --git a/Documentation/filesystems/idmappings.rst b/Documentation/filesystems/idmappings.rst
index ac0af679e61e..77930c77fcfe 100644
--- a/Documentation/filesystems/idmappings.rst
+++ b/Documentation/filesystems/idmappings.rst
@@ -821,7 +821,7 @@ the same idmapping to the mount. We now perform three steps:
/* Map the userspace id down into a kernel id in the filesystem's idmapping. */
make_kuid(u0:k20000:r10000, u1000) = k21000
-2. Verify that the caller's kernel ids can be mapped to userspace ids in the
+3. Verify that the caller's kernel ids can be mapped to userspace ids in the
filesystem's idmapping::
from_kuid(u0:k20000:r10000, k21000) = u1000
@@ -854,10 +854,10 @@ The same translation algorithm works with the third example.
/* Map the userspace id down into a kernel id in the filesystem's idmapping. */
make_kuid(u0:k0:r4294967295, u1000) = k1000
-2. Verify that the caller's kernel ids can be mapped to userspace ids in the
+3. Verify that the caller's kernel ids can be mapped to userspace ids in the
filesystem's idmapping::
- from_kuid(u0:k0:r4294967295, k21000) = u1000
+ from_kuid(u0:k0:r4294967295, k1000) = u1000
So the ownership that lands on disk will be ``u1000``.
@@ -994,7 +994,7 @@ from above:::
/* Map the userspace id down into a kernel id in the filesystem's idmapping. */
make_kuid(u0:k0:r4294967295, u1000) = k1000
-2. Verify that the caller's filesystem ids can be mapped to userspace ids in the
+3. Verify that the caller's filesystem ids can be mapped to userspace ids in the
filesystem's idmapping::
from_kuid(u0:k0:r4294967295, k1000) = u1000
diff --git a/Documentation/filesystems/iomap/design.rst b/Documentation/filesystems/iomap/design.rst
index f8ee3427bc1a..b0d0188a095e 100644
--- a/Documentation/filesystems/iomap/design.rst
+++ b/Documentation/filesystems/iomap/design.rst
@@ -142,9 +142,9 @@ Definitions
* **pure overwrite**: A write operation that does not require any
metadata or zeroing operations to perform during either submission
or completion.
- This implies that the fileystem must have already allocated space
+ This implies that the filesystem must have already allocated space
on disk as ``IOMAP_MAPPED`` and the filesystem must not place any
- constaints on IO alignment or size.
+ constraints on IO alignment or size.
The only constraints on I/O alignment are device level (minimum I/O
size and alignment, typically sector size).
@@ -165,7 +165,7 @@ structure below:
u16 flags;
struct block_device *bdev;
struct dax_device *dax_dev;
- voidw *inline_data;
+ void *inline_data;
void *private;
const struct iomap_folio_ops *folio_ops;
u64 validity_cookie;
@@ -394,7 +394,7 @@ iomap is concerned:
* The **upper** level primitive is provided by the filesystem to
coordinate access to different iomap operations.
- The exact primitive is specifc to the filesystem and operation,
+ The exact primitive is specific to the filesystem and operation,
but is often a VFS inode, pagecache invalidation, or folio lock.
For example, a filesystem might take ``i_rwsem`` before calling
``iomap_file_buffered_write`` and ``iomap_file_unshare`` to prevent
@@ -426,7 +426,7 @@ iomap is concerned:
The exact locking requirements are specific to the filesystem; for
certain operations, some of these locks can be elided.
-All further mention of locking are *recommendations*, not mandates.
+All further mentions of locking are *recommendations*, not mandates.
Each filesystem author must figure out the locking for themself.
Bugs and Limitations
diff --git a/Documentation/filesystems/journalling.rst b/Documentation/filesystems/journalling.rst
index e18f90ffc6fd..0254f7d57429 100644
--- a/Documentation/filesystems/journalling.rst
+++ b/Documentation/filesystems/journalling.rst
@@ -137,7 +137,7 @@ Fast commits
JBD2 to also allows you to perform file-system specific delta commits known as
fast commits. In order to use fast commits, you will need to set following
-callbacks that perform correspodning work:
+callbacks that perform corresponding work:
`journal->j_fc_cleanup_cb`: Cleanup function called after every full commit and
fast commit.
@@ -149,7 +149,7 @@ File system is free to perform fast commits as and when it wants as long as it
gets permission from JBD2 to do so by calling the function
:c:func:`jbd2_fc_begin_commit()`. Once a fast commit is done, the client
file system should tell JBD2 about it by calling
-:c:func:`jbd2_fc_end_commit()`. If file system wants JBD2 to perform a full
+:c:func:`jbd2_fc_end_commit()`. If the file system wants JBD2 to perform a full
commit immediately after stopping the fast commit it can do so by calling
:c:func:`jbd2_fc_end_commit_fallback()`. This is useful if fast commit operation
fails for some reason and the only way to guarantee consistency is for JBD2 to
@@ -199,7 +199,7 @@ Journal Level
.. kernel-doc:: fs/jbd2/recovery.c
:internal:
-Transasction Level
+Transaction Level
~~~~~~~~~~~~~~~~~~
.. kernel-doc:: fs/jbd2/transaction.c
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index e664061ed55d..f5e3676db954 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -251,10 +251,10 @@ prototypes::
void (*readahead)(struct readahead_control *);
int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata);
+ struct folio **foliop, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
+ struct folio *folio, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
bool (*release_folio)(struct folio *, gfp_t);
@@ -280,7 +280,7 @@ read_folio: yes, unlocks shared
writepages:
dirty_folio: maybe
readahead: yes, unlocks shared
-write_begin: locks the page exclusive
+write_begin: locks the folio exclusive
write_end: yes, unlocks exclusive
bmap:
invalidate_folio: yes exclusive
diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst
index 4cc657d743f7..f0d2cb257bb8 100644
--- a/Documentation/filesystems/netfs_library.rst
+++ b/Documentation/filesystems/netfs_library.rst
@@ -116,7 +116,7 @@ The following services are provided:
* Handle local caching, allowing cached data and server-read data to be
interleaved for a single request.
- * Handle clearing of bufferage that aren't on the server.
+ * Handle clearing of bufferage that isn't on the server.
* Handle retrying of reads that failed, switching reads from the cache to the
server as necessary.
diff --git a/Documentation/filesystems/nfs/index.rst b/Documentation/filesystems/nfs/index.rst
index 8536134f31fd..95c2c009874c 100644
--- a/Documentation/filesystems/nfs/index.rst
+++ b/Documentation/filesystems/nfs/index.rst
@@ -8,6 +8,7 @@ NFS
client-identifier
exporting
+ localio
pnfs
rpc-cache
rpc-server-gss
diff --git a/Documentation/filesystems/nfs/localio.rst b/Documentation/filesystems/nfs/localio.rst
new file mode 100644
index 000000000000..bd1967e2eab3
--- /dev/null
+++ b/Documentation/filesystems/nfs/localio.rst
@@ -0,0 +1,357 @@
+===========
+NFS LOCALIO
+===========
+
+Overview
+========
+
+The LOCALIO auxiliary RPC protocol allows the Linux NFS client and
+server to reliably handshake to determine if they are on the same
+host. Select "NFS client and server support for LOCALIO auxiliary
+protocol" in menuconfig to enable CONFIG_NFS_LOCALIO in the kernel
+config (both CONFIG_NFS_FS and CONFIG_NFSD must also be enabled).
+
+Once an NFS client and server handshake as "local", the client will
+bypass the network RPC protocol for read, write and commit operations.
+Due to this XDR and RPC bypass, these operations will operate faster.
+
+The LOCALIO auxiliary protocol's implementation, which uses the same
+connection as NFS traffic, follows the pattern established by the NFS
+ACL protocol extension.
+
+The LOCALIO auxiliary protocol is needed to allow robust discovery of
+clients local to their servers. In a private implementation that
+preceded use of this LOCALIO protocol, a fragile sockaddr network
+address based match against all local network interfaces was attempted.
+But unlike the LOCALIO protocol, the sockaddr-based matching didn't
+handle use of iptables or containers.
+
+The robust handshake between local client and server is just the
+beginning, the ultimate use case this locality makes possible is the
+client is able to open files and issue reads, writes and commits
+directly to the server without having to go over the network. The
+requirement is to perform these loopback NFS operations as efficiently
+as possible, this is particularly useful for container use cases
+(e.g. kubernetes) where it is possible to run an IO job local to the
+server.
+
+The performance advantage realized from LOCALIO's ability to bypass
+using XDR and RPC for reads, writes and commits can be extreme, e.g.:
+
+fio for 20 secs with directio, qd of 8, 16 libaio threads:
+ - With LOCALIO:
+ 4K read: IOPS=979k, BW=3825MiB/s (4011MB/s)(74.7GiB/20002msec)
+ 4K write: IOPS=165k, BW=646MiB/s (678MB/s)(12.6GiB/20002msec)
+ 128K read: IOPS=402k, BW=49.1GiB/s (52.7GB/s)(982GiB/20002msec)
+ 128K write: IOPS=11.5k, BW=1433MiB/s (1503MB/s)(28.0GiB/20004msec)
+
+ - Without LOCALIO:
+ 4K read: IOPS=79.2k, BW=309MiB/s (324MB/s)(6188MiB/20003msec)
+ 4K write: IOPS=59.8k, BW=234MiB/s (245MB/s)(4671MiB/20002msec)
+ 128K read: IOPS=33.9k, BW=4234MiB/s (4440MB/s)(82.7GiB/20004msec)
+ 128K write: IOPS=11.5k, BW=1434MiB/s (1504MB/s)(28.0GiB/20011msec)
+
+fio for 20 secs with directio, qd of 8, 1 libaio thread:
+ - With LOCALIO:
+ 4K read: IOPS=230k, BW=898MiB/s (941MB/s)(17.5GiB/20001msec)
+ 4K write: IOPS=22.6k, BW=88.3MiB/s (92.6MB/s)(1766MiB/20001msec)
+ 128K read: IOPS=38.8k, BW=4855MiB/s (5091MB/s)(94.8GiB/20001msec)
+ 128K write: IOPS=11.4k, BW=1428MiB/s (1497MB/s)(27.9GiB/20001msec)
+
+ - Without LOCALIO:
+ 4K read: IOPS=77.1k, BW=301MiB/s (316MB/s)(6022MiB/20001msec)
+ 4K write: IOPS=32.8k, BW=128MiB/s (135MB/s)(2566MiB/20001msec)
+ 128K read: IOPS=24.4k, BW=3050MiB/s (3198MB/s)(59.6GiB/20001msec)
+ 128K write: IOPS=11.4k, BW=1430MiB/s (1500MB/s)(27.9GiB/20001msec)
+
+FAQ
+===
+
+1. What are the use cases for LOCALIO?
+
+ a. Workloads where the NFS client and server are on the same host
+ realize improved IO performance. In particular, it is common when
+ running containerised workloads for jobs to find themselves
+ running on the same host as the knfsd server being used for
+ storage.
+
+2. What are the requirements for LOCALIO?
+
+ a. Bypass use of the network RPC protocol as much as possible. This
+ includes bypassing XDR and RPC for open, read, write and commit
+ operations.
+ b. Allow client and server to autonomously discover if they are
+ running local to each other without making any assumptions about
+ the local network topology.
+ c. Support the use of containers by being compatible with relevant
+ namespaces (e.g. network, user, mount).
+ d. Support all versions of NFS. NFSv3 is of particular importance
+ because it has wide enterprise usage and pNFS flexfiles makes use
+ of it for the data path.
+
+3. Why doesn’t LOCALIO just compare IP addresses or hostnames when
+ deciding if the NFS client and server are co-located on the same
+ host?
+
+ Since one of the main use cases is containerised workloads, we cannot
+ assume that IP addresses will be shared between the client and
+ server. This sets up a requirement for a handshake protocol that
+ needs to go over the same connection as the NFS traffic in order to
+ identify that the client and the server really are running on the
+ same host. The handshake uses a secret that is sent over the wire,
+ and can be verified by both parties by comparing with a value stored
+ in shared kernel memory if they are truly co-located.
+
+4. Does LOCALIO improve pNFS flexfiles?
+
+ Yes, LOCALIO complements pNFS flexfiles by allowing it to take
+ advantage of NFS client and server locality. Policy that initiates
+ client IO as closely to the server where the data is stored naturally
+ benefits from the data path optimization LOCALIO provides.
+
+5. Why not develop a new pNFS layout to enable LOCALIO?
+
+ A new pNFS layout could be developed, but doing so would put the
+ onus on the server to somehow discover that the client is co-located
+ when deciding to hand out the layout.
+ There is value in a simpler approach (as provided by LOCALIO) that
+ allows the NFS client to negotiate and leverage locality without
+ requiring more elaborate modeling and discovery of such locality in a
+ more centralized manner.
+
+6. Why is having the client perform a server-side file OPEN, without
+ using RPC, beneficial? Is the benefit pNFS specific?
+
+ Avoiding the use of XDR and RPC for file opens is beneficial to
+ performance regardless of whether pNFS is used. Especially when
+ dealing with small files its best to avoid going over the wire
+ whenever possible, otherwise it could reduce or even negate the
+ benefits of avoiding the wire for doing the small file I/O itself.
+ Given LOCALIO's requirements the current approach of having the
+ client perform a server-side file open, without using RPC, is ideal.
+ If in the future requirements change then we can adapt accordingly.
+
+7. Why is LOCALIO only supported with UNIX Authentication (AUTH_UNIX)?
+
+ Strong authentication is usually tied to the connection itself. It
+ works by establishing a context that is cached by the server, and
+ that acts as the key for discovering the authorisation token, which
+ can then be passed to rpc.mountd to complete the authentication
+ process. On the other hand, in the case of AUTH_UNIX, the credential
+ that was passed over the wire is used directly as the key in the
+ upcall to rpc.mountd. This simplifies the authentication process, and
+ so makes AUTH_UNIX easier to support.
+
+8. How do export options that translate RPC user IDs behave for LOCALIO
+ operations (eg. root_squash, all_squash)?
+
+ Export options that translate user IDs are managed by nfsd_setuser()
+ which is called by nfsd_setuser_and_check_port() which is called by
+ __fh_verify(). So they get handled exactly the same way for LOCALIO
+ as they do for non-LOCALIO.
+
+9. How does LOCALIO make certain that object lifetimes are managed
+ properly given NFSD and NFS operate in different contexts?
+
+ See the detailed "NFS Client and Server Interlock" section below.
+
+RPC
+===
+
+The LOCALIO auxiliary RPC protocol consists of a single "UUID_IS_LOCAL"
+RPC method that allows the Linux NFS client to verify the local Linux
+NFS server can see the nonce (single-use UUID) the client generated and
+made available in nfs_common. This protocol isn't part of an IETF
+standard, nor does it need to be considering it is Linux-to-Linux
+auxiliary RPC protocol that amounts to an implementation detail.
+
+The UUID_IS_LOCAL method encodes the client generated uuid_t in terms of
+the fixed UUID_SIZE (16 bytes). The fixed size opaque encode and decode
+XDR methods are used instead of the less efficient variable sized
+methods.
+
+The RPC program number for the NFS_LOCALIO_PROGRAM is 400122 (as assigned
+by IANA, see https://www.iana.org/assignments/rpc-program-numbers/ ):
+Linux Kernel Organization 400122 nfslocalio
+
+The LOCALIO protocol spec in rpcgen syntax is::
+
+ /* raw RFC 9562 UUID */
+ #define UUID_SIZE 16
+ typedef u8 uuid_t<UUID_SIZE>;
+
+ program NFS_LOCALIO_PROGRAM {
+ version LOCALIO_V1 {
+ void
+ NULL(void) = 0;
+
+ void
+ UUID_IS_LOCAL(uuid_t) = 1;
+ } = 1;
+ } = 400122;
+
+LOCALIO uses the same transport connection as NFS traffic. As such,
+LOCALIO is not registered with rpcbind.
+
+NFS Common and Client/Server Handshake
+======================================
+
+fs/nfs_common/nfslocalio.c provides interfaces that enable an NFS client
+to generate a nonce (single-use UUID) and associated short-lived
+nfs_uuid_t struct, register it with nfs_common for subsequent lookup and
+verification by the NFS server and if matched the NFS server populates
+members in the nfs_uuid_t struct. The NFS client then uses nfs_common to
+transfer the nfs_uuid_t from its nfs_uuids to the nn->nfsd_serv
+clients_list from the nfs_common's uuids_list. See:
+fs/nfs/localio.c:nfs_local_probe()
+
+nfs_common's nfs_uuids list is the basis for LOCALIO enablement, as such
+it has members that point to nfsd memory for direct use by the client
+(e.g. 'net' is the server's network namespace, through it the client can
+access nn->nfsd_serv with proper rcu read access). It is this client
+and server synchronization that enables advanced usage and lifetime of
+objects to span from the host kernel's nfsd to per-container knfsd
+instances that are connected to nfs client's running on the same local
+host.
+
+NFS Client and Server Interlock
+===============================
+
+LOCALIO provides the nfs_uuid_t object and associated interfaces to
+allow proper network namespace (net-ns) and NFSD object refcounting:
+
+ We don't want to keep a long-term counted reference on each NFSD's
+ net-ns in the client because that prevents a server container from
+ completely shutting down.
+
+ So we avoid taking a reference at all and rely on the per-cpu
+ reference to the server (detailed below) being sufficient to keep
+ the net-ns active. This involves allowing the NFSD's net-ns exit
+ code to iterate all active clients and clear their ->net pointers
+ (which are needed to find the per-cpu-refcount for the nfsd_serv).
+
+ Details:
+
+ - Embed nfs_uuid_t in nfs_client. nfs_uuid_t provides a list_head
+ that can be used to find the client. It does add the 16-byte
+ uuid_t to nfs_client so it is bigger than needed (given that
+ uuid_t is only used during the initial NFS client and server
+ LOCALIO handshake to determine if they are local to each other).
+ If that is really a problem we can find a fix.
+
+ - When the nfs server confirms that the uuid_t is local, it moves
+ the nfs_uuid_t onto a per-net-ns list in NFSD's nfsd_net.
+
+ - When each server's net-ns is shutting down - in a "pre_exit"
+ handler, all these nfs_uuid_t have their ->net cleared. There is
+ an rcu_synchronize() call between pre_exit() handlers and exit()
+ handlers so any caller that sees nfs_uuid_t ->net as not NULL can
+ safely manage the per-cpu-refcount for nfsd_serv.
+
+ - The client's nfs_uuid_t is passed to nfsd_open_local_fh() so it
+ can safely dereference ->net in a private rcu_read_lock() section
+ to allow safe access to the associated nfsd_net and nfsd_serv.
+
+So LOCALIO required the introduction and use of NFSD's percpu_ref to
+interlock nfsd_destroy_serv() and nfsd_open_local_fh(), to ensure each
+nn->nfsd_serv is not destroyed while in use by nfsd_open_local_fh(), and
+warrants a more detailed explanation:
+
+ nfsd_open_local_fh() uses nfsd_serv_try_get() before opening its
+ nfsd_file handle and then the caller (NFS client) must drop the
+ reference for the nfsd_file and associated nn->nfsd_serv using
+ nfs_file_put_local() once it has completed its IO.
+
+ This interlock working relies heavily on nfsd_open_local_fh() being
+ afforded the ability to safely deal with the possibility that the
+ NFSD's net-ns (and nfsd_net by association) may have been destroyed
+ by nfsd_destroy_serv() via nfsd_shutdown_net() -- which is only
+ possible given the nfs_uuid_t ->net pointer managemenet detailed
+ above.
+
+All told, this elaborate interlock of the NFS client and server has been
+verified to fix an easy to hit crash that would occur if an NFSD
+instance running in a container, with a LOCALIO client mounted, is
+shutdown. Upon restart of the container and associated NFSD the client
+would go on to crash due to NULL pointer dereference that occurred due
+to the LOCALIO client's attempting to nfsd_open_local_fh(), using
+nn->nfsd_serv, without having a proper reference on nn->nfsd_serv.
+
+NFS Client issues IO instead of Server
+======================================
+
+Because LOCALIO is focused on protocol bypass to achieve improved IO
+performance, alternatives to the traditional NFS wire protocol (SUNRPC
+with XDR) must be provided to access the backing filesystem.
+
+See fs/nfs/localio.c:nfs_local_open_fh() and
+fs/nfsd/localio.c:nfsd_open_local_fh() for the interface that makes
+focused use of select nfs server objects to allow a client local to a
+server to open a file pointer without needing to go over the network.
+
+The client's fs/nfs/localio.c:nfs_local_open_fh() will call into the
+server's fs/nfsd/localio.c:nfsd_open_local_fh() and carefully access
+both the associated nfsd network namespace and nn->nfsd_serv in terms of
+RCU. If nfsd_open_local_fh() finds that the client no longer sees valid
+nfsd objects (be it struct net or nn->nfsd_serv) it returns -ENXIO
+to nfs_local_open_fh() and the client will try to reestablish the
+LOCALIO resources needed by calling nfs_local_probe() again. This
+recovery is needed if/when an nfsd instance running in a container were
+to reboot while a LOCALIO client is connected to it.
+
+Once the client has an open nfsd_file pointer it will issue reads,
+writes and commits directly to the underlying local filesystem (normally
+done by the nfs server). As such, for these operations, the NFS client
+is issuing IO to the underlying local filesystem that it is sharing with
+the NFS server. See: fs/nfs/localio.c:nfs_local_doio() and
+fs/nfs/localio.c:nfs_local_commit().
+
+Security
+========
+
+Localio is only supported when UNIX-style authentication (AUTH_UNIX, aka
+AUTH_SYS) is used.
+
+Care is taken to ensure the same NFS security mechanisms are used
+(authentication, etc) regardless of whether LOCALIO or regular NFS
+access is used. The auth_domain established as part of the traditional
+NFS client access to the NFS server is also used for LOCALIO.
+
+Relative to containers, LOCALIO gives the client access to the network
+namespace the server has. This is required to allow the client to access
+the server's per-namespace nfsd_net struct. With traditional NFS, the
+client is afforded this same level of access (albeit in terms of the NFS
+protocol via SUNRPC). No other namespaces (user, mount, etc) have been
+altered or purposely extended from the server to the client.
+
+Testing
+=======
+
+The LOCALIO auxiliary protocol and associated NFS LOCALIO read, write
+and commit access have proven stable against various test scenarios:
+
+- Client and server both on the same host.
+
+- All permutations of client and server support enablement for both
+ local and remote client and server.
+
+- Testing against NFS storage products that don't support the LOCALIO
+ protocol was also performed.
+
+- Client on host, server within a container (for both v3 and v4.2).
+ The container testing was in terms of podman managed containers and
+ includes successful container stop/restart scenario.
+
+- Formalizing these test scenarios in terms of existing test
+ infrastructure is on-going. Initial regular coverage is provided in
+ terms of ktest running xfstests against a LOCALIO-enabled NFS loopback
+ mount configuration, and includes lockdep and KASAN coverage, see:
+ https://evilpiepirate.org/~testdashboard/ci?user=snitzer&branch=snitm-nfs-next
+ https://github.com/koverstreet/ktest
+
+- Various kdevops testing (in terms of "Chuck's BuildBot") has been
+ performed to regularly verify the LOCALIO changes haven't caused any
+ regressions to non-LOCALIO NFS use cases.
+
+- All of Hammerspace's various sanity tests pass with LOCALIO enabled
+ (this includes numerous pNFS and flexfiles tests).
diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
index 165514401441..343644712340 100644
--- a/Documentation/filesystems/overlayfs.rst
+++ b/Documentation/filesystems/overlayfs.rst
@@ -367,8 +367,11 @@ Metadata only copy up
When the "metacopy" feature is enabled, overlayfs will only copy
up metadata (as opposed to whole file), when a metadata specific operation
-like chown/chmod is performed. Full file will be copied up later when
-file is opened for WRITE operation.
+like chown/chmod is performed. An upper file in this state is marked with
+"trusted.overlayfs.metacopy" xattr which indicates that the upper file
+contains no data. The data will be copied up later when file is opened for
+WRITE operation. After the lower file's data is copied up,
+the "trusted.overlayfs.metacopy" xattr is removed from the upper file.
In other words, this is delayed data copy up operation and data is copied
up when there is a need to actually modify data.
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index 6e903a903f8f..0b18af3f954e 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -810,7 +810,7 @@ cache in your filesystem. The following members are defined:
struct page **pagep, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
+ struct folio *folio, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
bool (*release_folio)(struct folio *, gfp_t);
@@ -913,8 +913,7 @@ cache in your filesystem. The following members are defined:
stop attempting I/O, it can simply return. The caller will
remove the remaining pages from the address space, unlock them
and decrement the page refcount. Set PageUptodate if the I/O
- completes successfully. Setting PageError on any page will be
- ignored; simply unlock the page if an I/O error occurs.
+ completes successfully.
``write_begin``
Called by the generic buffered write code to ask the filesystem
@@ -926,12 +925,12 @@ cache in your filesystem. The following members are defined:
(if they haven't been read already) so that the updated blocks
can be written out properly.
- The filesystem must return the locked pagecache page for the
- specified offset, in ``*pagep``, for the caller to write into.
+ The filesystem must return the locked pagecache folio for the
+ specified offset, in ``*foliop``, for the caller to write into.
It must be able to cope with short writes (where the length
passed to write_begin is greater than the number of bytes copied
- into the page).
+ into the folio).
A void * may be returned in fsdata, which then gets passed into
write_end.
@@ -944,8 +943,8 @@ cache in your filesystem. The following members are defined:
called. len is the original len passed to write_begin, and
copied is the amount that was able to be copied.
- The filesystem must take care of unlocking the page and
- releasing it refcount, and updating i_size.
+ The filesystem must take care of unlocking the folio,
+ decrementing its refcount, and updating i_size.
Returns < 0 on failure, otherwise the number of bytes (<=
'copied') that were able to be copied into pagecache.
diff --git a/Documentation/gpu/amdgpu/driver-core.rst b/Documentation/gpu/amdgpu/driver-core.rst
index 467e6843aef6..32723a925377 100644
--- a/Documentation/gpu/amdgpu/driver-core.rst
+++ b/Documentation/gpu/amdgpu/driver-core.rst
@@ -179,4 +179,4 @@ IP Blocks
:doc: IP Blocks
.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
- :identifiers: amd_ip_block_type amd_ip_funcs
+ :identifiers: amd_ip_block_type amd_ip_funcs DC_DEBUG_MASK
diff --git a/Documentation/gpu/introduction.rst b/Documentation/gpu/introduction.rst
index b7c0baf97dbe..3cd0c8860b94 100644
--- a/Documentation/gpu/introduction.rst
+++ b/Documentation/gpu/introduction.rst
@@ -154,11 +154,11 @@ Conference talks
* `An Overview of the Linux and Userspace Graphics Stack <https://www.youtube.com/watch?v=wjAJmqwg47k>`_ - Paul Kocialkowski (2020)
* `Getting pixels on screen on Linux: introduction to Kernel Mode Setting <https://www.youtube.com/watch?v=haes4_Xnc5Q>`_ - Simon Ser (2020)
-* `Everything Great about Upstream Graphics <https://www.youtube.com/watch?v=kVzHOgt6WGE>`_ - Daniel Vetter (2019)
+* `Everything Great about Upstream Graphics <https://www.youtube.com/watch?v=kVzHOgt6WGE>`_ - Simona Vetter (2019)
* `An introduction to the Linux DRM subsystem <https://www.youtube.com/watch?v=LbDOCJcDRoo>`_ - Maxime Ripard (2017)
-* `Embrace the Atomic (Display) Age <https://www.youtube.com/watch?v=LjiB_JeDn2M>`_ - Daniel Vetter (2016)
+* `Embrace the Atomic (Display) Age <https://www.youtube.com/watch?v=LjiB_JeDn2M>`_ - Simona Vetter (2016)
* `Anatomy of an Atomic KMS Driver <https://www.youtube.com/watch?v=lihqR9sENpc>`_ - Laurent Pinchart (2015)
-* `Atomic Modesetting for Drivers <https://www.youtube.com/watch?v=kl9suFgbTc8>`_ - Daniel Vetter (2015)
+* `Atomic Modesetting for Drivers <https://www.youtube.com/watch?v=kl9suFgbTc8>`_ - Simona Vetter (2015)
* `Anatomy of an Embedded KMS Driver <https://www.youtube.com/watch?v=Ja8fM7rTae4>`_ - Laurent Pinchart (2013)
Slides and articles
@@ -169,8 +169,8 @@ Slides and articles
* `Understanding the Linux Graphics Stack <https://bootlin.com/doc/training/graphics/graphics-slides.pdf>`_ - Bootlin (2022)
* `DRM KMS overview <https://wiki.st.com/stm32mpu/wiki/DRM_KMS_overview>`_ - STMicroelectronics (2021)
* `Linux graphic stack <https://studiopixl.com/2017-05-13/linux-graphic-stack-an-overview>`_ - Nathan Gauër (2017)
-* `Atomic mode setting design overview, part 1 <https://lwn.net/Articles/653071/>`_ - Daniel Vetter (2015)
-* `Atomic mode setting design overview, part 2 <https://lwn.net/Articles/653466/>`_ - Daniel Vetter (2015)
+* `Atomic mode setting design overview, part 1 <https://lwn.net/Articles/653071/>`_ - Simona Vetter (2015)
+* `Atomic mode setting design overview, part 2 <https://lwn.net/Articles/653466/>`_ - Simona Vetter (2015)
* `The DRM/KMS subsystem from a newbie’s point of view <https://bootlin.com/pub/conferences/2014/elce/brezillon-drm-kms/brezillon-drm-kms.pdf>`_ - Boris Brezillon (2014)
* `A brief introduction to the Linux graphics stack <https://blogs.igalia.com/itoral/2014/07/29/a-brief-introduction-to-the-linux-graphics-stack/>`_ - Iago Toral (2014)
* `The Linux Graphics Stack <https://blog.mecheye.net/2012/06/the-linux-graphics-stack/>`_ - Jasper St. Pierre (2012)
diff --git a/Documentation/gpu/komeda-kms.rst b/Documentation/gpu/komeda-kms.rst
index 633a016563ae..eaea40eb725b 100644
--- a/Documentation/gpu/komeda-kms.rst
+++ b/Documentation/gpu/komeda-kms.rst
@@ -86,7 +86,7 @@ types of working mode:
- Single display mode
Two pipelines work together to drive only one display output.
- On this mode, pipeline_B doesn't work indenpendently, but outputs its
+ On this mode, pipeline_B doesn't work independently, but outputs its
composition result into pipeline_A, and its pixel timing also derived from
pipeline_A.timing_ctrlr. The pipeline_B works just like a "slave" of
pipeline_A(master)
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 2ea6ffc9b22b..2b281e3c75a4 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -37,7 +37,7 @@ Audit each individual driver, make sure it'll work with the generic
implementation (there's lots of outdated locking leftovers in various
implementations), and then remove it.
-Contact: Daniel Vetter, respective driver maintainers
+Contact: Simona Vetter, respective driver maintainers
Level: Intermediate
@@ -61,7 +61,7 @@ do by directly using the new atomic helper driver callbacks.
.. [2] https://lwn.net/Articles/653071/
.. [3] https://lwn.net/Articles/653466/
-Contact: Daniel Vetter, respective driver maintainers
+Contact: Simona Vetter, respective driver maintainers
Level: Advanced
@@ -75,7 +75,7 @@ helper should also be moved from drm_plane_helper.c to the atomic helpers, to
avoid confusion - the other helpers in that file are all deprecated legacy
helpers.
-Contact: Ville Syrjälä, Daniel Vetter, driver maintainers
+Contact: Ville Syrjälä, Simona Vetter, driver maintainers
Level: Advanced
@@ -97,7 +97,7 @@ with the current helpers:
- Then we could go through all the drivers and remove the more-or-less confused
checks for plane_state->fb and plane_state->crtc.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Advanced
@@ -116,7 +116,7 @@ Somewhat related is the legacy_cursor_update hack, which should be replaced with
the new atomic_async_check/commit functionality in the helpers in drivers that
still look at that flag.
-Contact: Daniel Vetter, respective driver maintainers
+Contact: Simona Vetter, respective driver maintainers
Level: Advanced
@@ -169,7 +169,7 @@ interfaces to fix these issues:
``_helper_funcs`` since they are not part of the core ABI. There's a
``FIXME`` comment in the kerneldoc for each such case in ``drm_crtc.h``.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Intermediate
@@ -194,7 +194,7 @@ performance-critical drivers it might also be better to go with a more
fine-grained per-buffer object and per-context lockings scheme. Currently only
the ``msm`` and `i915` drivers use ``struct_mutex``.
-Contact: Daniel Vetter, respective driver maintainers
+Contact: Simona Vetter, respective driver maintainers
Level: Advanced
@@ -251,7 +251,7 @@ being rewritten without dependencies on the fbdev module. Some of the
helpers could further benefit from using struct iosys_map instead of
raw pointers.
-Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
+Contact: Thomas Zimmermann <tzimmermann@suse.de>, Simona Vetter
Level: Advanced
@@ -297,7 +297,7 @@ Various hold-ups:
version of the varios drm_gem_fb_create functions. Maybe called
drm_gem_fb_create/_with_dirty/_with_funcs as needed.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Intermediate
@@ -329,7 +329,7 @@ everything after it has done the write-protect/mkwrite trickery:
Might be good to also have some igt testcases for this.
-Contact: Daniel Vetter, Noralf Tronnes
+Contact: Simona Vetter, Noralf Tronnes
Level: Advanced
@@ -359,7 +359,7 @@ between setting up the &drm_driver structure and calling drm_dev_register().
- Once all drivers are converted, remove the load/unload callbacks.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Intermediate
@@ -422,7 +422,7 @@ The task is to use struct iosys_map where it makes sense.
* TTM might benefit from using struct iosys_map internally.
* Framebuffer copying and blitting helpers should operate on struct iosys_map.
-Contact: Thomas Zimmermann <tzimmermann@suse.de>, Christian König, Daniel Vetter
+Contact: Thomas Zimmermann <tzimmermann@suse.de>, Christian König, Simona Vetter
Level: Intermediate
@@ -475,25 +475,22 @@ Remove disable/unprepare in remove/shutdown in panel-simple and panel-edp
As of commit d2aacaf07395 ("drm/panel: Check for already prepared/enabled in
drm_panel"), we have a check in the drm_panel core to make sure nobody
double-calls prepare/enable/disable/unprepare. Eventually that should probably
-be turned into a WARN_ON() or somehow made louder, but right now we actually
-expect it to trigger and so we don't want it to be too loud.
-
-Specifically, that warning will trigger for panel-edp and panel-simple at
-shutdown time because those panels hardcode a call to drm_panel_disable()
-and drm_panel_unprepare() at shutdown and remove time that they call regardless
-of panel state. On systems with a properly coded DRM modeset driver that
-calls drm_atomic_helper_shutdown() this is pretty much guaranteed to cause
-the warning to fire.
-
-Unfortunately we can't safely remove the calls in panel-edp and panel-simple
-until we're sure that all DRM modeset drivers that are used with those panels
-properly call drm_atomic_helper_shutdown(). This TODO item is to validate
-that all DRM modeset drivers used with panel-edp and panel-simple properly
-call drm_atomic_helper_shutdown() and then remove the calls to
-disable/unprepare from those panels. Alternatively, this TODO item could be
-removed by convincing stakeholders that those calls are fine and downgrading
-the error message in drm_panel_disable() / drm_panel_unprepare() to a
-debug-level message.
+be turned into a WARN_ON() or somehow made louder.
+
+At the moment, we expect that we may still encounter the warnings in the
+drm_panel core when using panel-simple and panel-edp. Since those panel
+drivers are used with a lot of different DRM modeset drivers they still
+make an extra effort to disable/unprepare the panel themsevles at shutdown
+time. Specifically we could still encounter those warnings if the panel
+driver gets shutdown() _before_ the DRM modeset driver and the DRM modeset
+driver properly calls drm_atomic_helper_shutdown() in its own shutdown()
+callback. Warnings could be avoided in such a case by using something like
+device links to ensure that the panel gets shutdown() after the DRM modeset
+driver.
+
+Once all DRM modeset drivers are known to shutdown properly, the extra
+calls to disable/unprepare in remove/shutdown in panel-simple and panel-edp
+should be removed and this TODO item marked complete.
Contact: Douglas Anderson <dianders@chromium.org>
@@ -561,7 +558,7 @@ This is a really varied tasks with lots of little bits and pieces:
<https://lore.kernel.org/lkml/1446217392-11981-1-git-send-email-alexandru.murtaza@intel.com/>`_
for some example code that could be reused.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Advanced
@@ -590,7 +587,7 @@ There's a bunch of issues with it:
this (together with the drm_minor->drm_device move) would allow us to remove
debugfs_init.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Intermediate
@@ -611,7 +608,7 @@ Both these problems can be solved by switching over to drmm_kzalloc(), and the
various convenience wrappers provided, e.g. drmm_crtc_alloc_with_planes(),
drmm_universal_plane_alloc(), ... and so on.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Intermediate
@@ -631,7 +628,7 @@ cache is also tied to &drm_gem_object.import_attach. Meanwhile we paper over
this problem for USB devices by fishing out the USB host controller device, as
long as that supports DMA. Otherwise importing can still needlessly fail.
-Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
+Contact: Thomas Zimmermann <tzimmermann@suse.de>, Simona Vetter
Level: Advanced
@@ -712,7 +709,7 @@ Plan to fix this:
2. In all, only look at one of the three status bits set by the above helpers.
3. Remove the other two status bits.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Intermediate
diff --git a/Documentation/gpu/xe/xe_mm.rst b/Documentation/gpu/xe/xe_mm.rst
index 6c8fd8b4a466..95864a4502dd 100644
--- a/Documentation/gpu/xe/xe_mm.rst
+++ b/Documentation/gpu/xe/xe_mm.rst
@@ -7,6 +7,21 @@ Memory Management
.. kernel-doc:: drivers/gpu/drm/xe/xe_bo_doc.h
:doc: Buffer Objects (BO)
+GGTT
+====
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_ggtt.c
+ :doc: Global Graphics Translation Table (GGTT)
+
+GGTT Internal API
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_ggtt_types.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_ggtt.c
+ :internal:
+
Pagetable building
==================
diff --git a/Documentation/hid/intel-ish-hid.rst b/Documentation/hid/intel-ish-hid.rst
index 55cbaa719a79..2adc174fb576 100644
--- a/Documentation/hid/intel-ish-hid.rst
+++ b/Documentation/hid/intel-ish-hid.rst
@@ -404,6 +404,35 @@ For more detailed information, please refer to the flow descriptions provided be
| ISHTP Driver | | ISH Bootloader |
+---------------+ +-----------------+
+Vendor Custom Firmware Loading
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The firmware running inside ISH can be provided by Intel or developed by vendors using the Firmware Development Kit (FDK) provided by Intel.
+Intel will upstream the Intel-built firmware to the ``linux-firmware.git`` repository, located under the path ``intel/ish/``. For the Lunar Lake platform, the Intel-built ISH firmware will be named ``ish_lnlm.bin``.
+Vendors who wish to upstream their custom firmware should follow these guidelines for naming their firmware files:
+
+- The firmware filename should use one of the following patterns:
+
+ - ``ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}_${PRODUCT_NAME_CRC32}_${PRODUCT_SKU_CRC32}.bin``
+ - ``ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}_${PRODUCT_SKU_CRC32}.bin``
+ - ``ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}_${PRODUCT_NAME_CRC32}.bin``
+ - ``ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}.bin``
+
+- ``${intel_plat_gen}`` indicates the Intel platform generation (e.g., ``lnlm`` for Lunar Lake) and must not exceed 8 characters in length.
+- ``${SYS_VENDOR_CRC32}`` is the CRC32 checksum of the ``sys_vendor`` value from the DMI field ``DMI_SYS_VENDOR``.
+- ``${PRODUCT_NAME_CRC32}`` is the CRC32 checksum of the ``product_name`` value from the DMI field ``DMI_PRODUCT_NAME``.
+- ``${PRODUCT_SKU_CRC32}`` is the CRC32 checksum of the ``product_sku`` value from the DMI field ``DMI_PRODUCT_SKU``.
+
+During system boot, the ISH Linux driver will attempt to load the firmware in the following order, prioritizing custom firmware with more precise matching patterns:
+
+1. ``intel/ish/ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}_${PRODUCT_NAME_CRC32}_${PRODUCT_SKU_CRC32}.bin``
+2. ``intel/ish/ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}_${PRODUCT_SKU_CRC32}.bin``
+3. ``intel/ish/ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}_${PRODUCT_NAME_CRC32}.bin``
+4. ``intel/ish/ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}.bin``
+5. ``intel/ish/ish_${intel_plat_gen}.bin``
+
+The driver will load the first matching firmware and skip the rest. If no matching firmware is found, it will proceed to the next pattern in the specified order. If all searches fail, the default Intel firmware, listed last in the order above, will be loaded.
+
ISH Debugging
-------------
diff --git a/Documentation/hwmon/hwmon-kernel-api.rst b/Documentation/hwmon/hwmon-kernel-api.rst
index 6cacf7daf25c..8297acfa3a2d 100644
--- a/Documentation/hwmon/hwmon-kernel-api.rst
+++ b/Documentation/hwmon/hwmon-kernel-api.rst
@@ -38,8 +38,6 @@ register/unregister functions::
void hwmon_device_unregister(struct device *dev);
- void devm_hwmon_device_unregister(struct device *dev);
-
char *hwmon_sanitize_name(const char *name);
char *devm_hwmon_sanitize_name(struct device *dev, const char *name);
@@ -64,11 +62,6 @@ monitoring device structure. This function must be called from the driver
remove function if the hardware monitoring device was registered with
hwmon_device_register_with_info.
-devm_hwmon_device_unregister does not normally have to be called. It is only
-needed for error handling, and only needed if the driver probe fails after
-the call to devm_hwmon_device_register_with_info and if the automatic (device
-managed) removal would be too late.
-
All supported hwmon device registration functions only accept valid device
names. Device names including invalid characters (whitespace, '*', or '-')
will be rejected. The 'name' parameter is mandatory.
diff --git a/Documentation/hwmon/ina2xx.rst b/Documentation/hwmon/ina2xx.rst
index 27d2e39bc8ac..7f1939b40f74 100644
--- a/Documentation/hwmon/ina2xx.rst
+++ b/Documentation/hwmon/ina2xx.rst
@@ -99,6 +99,10 @@ Sysfs entries for ina226, ina230 and ina231 only
------------------------------------------------
======================= ====================================================
+curr1_lcrit Critical low current
+curr1_crit Critical high current
+curr1_lcrit_alarm Current critical low alarm
+curr1_crit_alarm Current critical high alarm
in0_lcrit Critical low shunt voltage
in0_crit Critical high shunt voltage
in0_lcrit_alarm Shunt voltage critical low alarm
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index 913c11390a45..ea3b5be8fe4f 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -206,6 +206,7 @@ Hardware Monitoring Kernel Drivers
sch5636
scpi-hwmon
sfctemp
+ sg2042-mcu
sht15
sht21
sht3x
diff --git a/Documentation/hwmon/lm92.rst b/Documentation/hwmon/lm92.rst
index c131b923ed36..d71cdb2af339 100644
--- a/Documentation/hwmon/lm92.rst
+++ b/Documentation/hwmon/lm92.rst
@@ -3,29 +3,29 @@ Kernel driver lm92
Supported chips:
- * National Semiconductor LM92
+ * National Semiconductor / Texas Instruments LM92
Prefix: 'lm92'
Addresses scanned: I2C 0x48 - 0x4b
- Datasheet: http://www.national.com/pf/LM/LM92.html
+ Datasheet: https://www.ti.com/lit/gpn/LM92
- * National Semiconductor LM76
+ * National Semiconductor / Texas Instruments LM76
Prefix: 'lm92'
- Addresses scanned: none, force parameter needed
+ Addresses scanned: none, must be instantiated explicitly
- Datasheet: http://www.national.com/pf/LM/LM76.html
+ Datasheet: https://www.ti.com/lit/gpn/LM76
- * Maxim MAX6633/MAX6634/MAX6635
+ * Maxim /Analog Devices MAX6633/MAX6634/MAX6635
Prefix: 'max6635'
- Addresses scanned: none, force parameter needed
+ Addresses scanned: none, must be instantiated explicitly
- Datasheet: http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3074
+ Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/max6633-max6635.pdf
Authors:
@@ -36,13 +36,13 @@ Authors:
Description
-----------
-This driver implements support for the National Semiconductor LM92
-temperature sensor.
+This driver implements support for the National Semiconductor / Texas
+Instruments LM92 temperature sensor.
Each LM92 temperature sensor supports a single temperature sensor. There are
alarms for high, low, and critical thresholds. There's also an hysteresis to
control the thresholds for resetting alarms.
-Support was added later for the LM76 and Maxim MAX6633/MAX6634/MAX6635,
-which are mostly compatible. They have not all been tested, so you
-may need to use the force parameter.
+The driver also supports LM76 and Maxim MAX6633/MAX6634/MAX6635, which are
+mostly compatible but do not have a vendor ID register and therefore must be
+instantiated explicitly.
diff --git a/Documentation/hwmon/max1619.rst b/Documentation/hwmon/max1619.rst
index e25956e70f73..b5fc175ae18d 100644
--- a/Documentation/hwmon/max1619.rst
+++ b/Documentation/hwmon/max1619.rst
@@ -27,7 +27,3 @@ All temperature values are given in degrees Celsius. Resolution
is 1.0 degree for the local temperature and for the remote temperature.
Only the external sensor has high and low limits.
-
-The max1619 driver will not update its values more frequently than every
-other second; reading them more often will do no harm, but will return
-'old' values.
diff --git a/Documentation/hwmon/oxp-sensors.rst b/Documentation/hwmon/oxp-sensors.rst
index 55b1ef61625e..581c4dafbfa1 100644
--- a/Documentation/hwmon/oxp-sensors.rst
+++ b/Documentation/hwmon/oxp-sensors.rst
@@ -10,41 +10,59 @@ Authors:
Description:
------------
-Handheld devices from One Netbook and Aya Neo provide fan readings and fan
-control through their embedded controllers.
+Handheld devices from OneNetbook, AOKZOE, AYANEO, And OrangePi provide fan
+readings and fan control through their embedded controllers.
-Currently only supports AMD boards from One X Player, AOK ZOE, and some Aya
-Neo devices. One X Player Intel boards could be supported if we could figure
-out the EC registers and values to write to since the EC layout and model is
-different. Aya Neo devices preceding the AIR may not be supportable as the EC
-model is different and do not appear to have manual control capabilities.
+Currently supports OneXPlayer devices, AOKZOE, AYANEO, and OrangePi
+handheld devices. AYANEO devices preceding the AIR and OneXPlayer devices
+preceding the Mini A07 are not supportable as the EC model is different
+and do not have manual control capabilities.
-Some models have a toggle for changing the behaviour of the "Turbo/Silent"
-button of the device. It will change the key event that it triggers with
-a flip of the `tt_toggle` attribute. See below for boards that support this
-function.
+Some OneXPlayer and AOKZOE models have a toggle for changing the behaviour
+of the "Turbo/Silent" button of the device. It will change the key event
+that it triggers with a flip of the `tt_toggle` attribute. See below for
+boards that support this function.
Supported devices
-----------------
Currently the driver supports the following handhelds:
- - AOK ZOE A1
- - AOK ZOE A1 PRO
- - Aya Neo 2
- - Aya Neo AIR
- - Aya Neo AIR Plus (Mendocino)
- - Aya Neo AIR Pro
- - Aya Neo Geek
+ - AOKZOE A1
+ - AOKZOE A1 PRO
+ - AYANEO 2
+ - AYANEO 2S
+ - AYANEO AIR
+ - AYANEO AIR 1S
+ - AYANEO AIR Plus (Mendocino)
+ - AYANEO AIR Pro
+ - AYANEO Flip DS
+ - AYANEO Flip KB
+ - AYANEO Geek
+ - AYANEO Geek 1S
+ - AYANEO KUN
+ - OneXPlayer 2
+ - OneXPlayer 2 Pro
- OneXPlayer AMD
- OneXPlayer mini AMD
- OneXPlayer mini AMD PRO
+ - OneXPlayer OneXFly
+ - OneXPlayer X1 A
+ - OneXPlayer X1 i
+ - OneXPlayer X1 mini
+ - OrangePi NEO-01
"Turbo/Silent" button behaviour toggle is only supported on:
- AOK ZOE A1
- AOK ZOE A1 PRO
+ - OneXPlayer 2
+ - OneXPlayer 2 Pro
- OneXPlayer mini AMD (only with updated alpha BIOS)
- OneXPlayer mini AMD PRO
+ - OneXPlayer OneXFly
+ - OneXPlayer X1 A
+ - OneXPlayer X1 i
+ - OneXPlayer X1 mini
Sysfs entries
-------------
@@ -52,7 +70,7 @@ Sysfs entries
The following attributes are supported:
fan1_input
- Read Only. Reads current fan RMP.
+ Read Only. Reads current fan RPM.
pwm1_enable
Read Write. Enable manual fan control. Write "1" to set to manual, write "0"
diff --git a/Documentation/hwmon/sg2042-mcu.rst b/Documentation/hwmon/sg2042-mcu.rst
new file mode 100644
index 000000000000..077e79841d2e
--- /dev/null
+++ b/Documentation/hwmon/sg2042-mcu.rst
@@ -0,0 +1,78 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver sg2042-mcu
+========================
+
+Supported chips:
+
+ * Onboard MCU for sg2042
+
+ Addresses scanned: -
+
+ Prefix: 'sg2042-mcu'
+
+Authors:
+
+ - Inochi Amaoto <inochiama@outlook.com>
+
+Description
+-----------
+
+This driver supprts hardware monitoring for onboard MCU with
+i2c interface.
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate
+the devices explicitly.
+Please see Documentation/i2c/instantiating-devices.rst for details.
+
+Sysfs Attributes
+----------------
+
+The following table shows the standard entries support by the driver:
+
+================= =====================================================
+Name Description
+================= =====================================================
+temp1_input Measured temperature of SoC
+temp1_crit Critical high temperature
+temp1_crit_hyst hysteresis temperature restore from Critical
+temp2_input Measured temperature of the base board
+================= =====================================================
+
+The following table shows the extra entries support by the driver
+(the MCU device is in i2c subsystem):
+
+================= ======= =============================================
+Name Perm Description
+================= ======= =============================================
+reset_count RO Reset count of the SoC
+uptime RO Seconds after the MCU is powered
+reset_reason RO Reset reason for the last reset
+repower_policy RW Execution policy when triggering repower
+================= ======= =============================================
+
+``repower_policy``
+ The repower is triggered when the temperature of the SoC falls below
+ the hysteresis temperature after triggering a shutdown due to
+ reaching the critical temperature.
+ The valid values for this entry are "repower" and "keep". "keep" will
+ leave the SoC down when the triggering repower, and "repower" will
+ boot the SoC.
+
+Debugfs Interfaces
+------------------
+
+If debugfs is available, this driver exposes some hardware specific
+data in ``/sys/kernel/debug/sg2042-mcu/*/``.
+
+================= ======= =============================================
+Name Format Description
+================= ======= =============================================
+firmware_version 0x%02x firmware version of the MCU
+pcb_version 0x%02x version number of the base board
+board_type 0x%02x identifiers for the base board
+mcu_type %d type of the MCU: 0 is STM32, 1 is GD32
+================= ======= =============================================
diff --git a/Documentation/i2c/slave-testunit-backend.rst b/Documentation/i2c/slave-testunit-backend.rst
index 37142a48ab35..d752f433be07 100644
--- a/Documentation/i2c/slave-testunit-backend.rst
+++ b/Documentation/i2c/slave-testunit-backend.rst
@@ -20,11 +20,25 @@ Instantiating the device is regular. Example for bus 0, address 0x30::
# echo "slave-testunit 0x1030" > /sys/bus/i2c/devices/i2c-0/new_device
-After that, you will have a write-only device listening. Reads will just return
-an 8-bit version number of the testunit. When writing, the device consists of 4
-8-bit registers and, except for some "partial" commands, all registers must be
-written to start a testcase, i.e. you usually write 4 bytes to the device. The
-registers are:
+Or using firmware nodes. Here is a devicetree example (note this is only a
+debug device, so there are no official DT bindings)::
+
+ &i2c0 {
+ ...
+
+ testunit@30 {
+ compatible = "slave-testunit";
+ reg = <(0x30 | I2C_OWN_SLAVE_ADDRESS)>;
+ };
+ };
+
+After that, you will have the device listening. Reading will return a single
+byte. Its value is 0 if the testunit is idle, otherwise the command number of
+the currently running command.
+
+When writing, the device consists of 4 8-bit registers and, except for some
+"partial" commands, all registers must be written to start a testcase, i.e. you
+usually write 4 bytes to the device. The registers are:
.. csv-table::
:header: "Offset", "Name", "Description"
@@ -75,7 +89,7 @@ from another device on the bus. If the bus master under test also wants to
access the bus at the same time, the bus will be busy. Example to read 128
bytes from device 0x50 after 50ms of delay::
- # i2cset -y 0 0x30 0x01 0x50 0x80 0x05 i
+ # i2cset -y 0 0x30 1 0x50 0x80 5 i
0x02 SMBUS_HOST_NOTIFY
~~~~~~~~~~~~~~~~~~~~~~
@@ -95,9 +109,9 @@ bytes from device 0x50 after 50ms of delay::
Also needs master mode. This test will send an SMBUS_HOST_NOTIFY message to the
host. Note that the status word is currently ignored in the Linux Kernel.
-Example to send a notification after 10ms::
+Example to send a notification with status word 0x6442 after 10ms::
- # i2cset -y 0 0x30 0x02 0x42 0x64 0x01 i
+ # i2cset -y 0 0x30 2 0x42 0x64 1 i
If the host controller supports HostNotify, this message with debug level
should appear (Linux 6.11 and later)::
@@ -116,7 +130,7 @@ should appear (Linux 6.11 and later)::
- DELAY
* - 0x03
- - must be '1', i.e. one further byte will be written
+ - 0x01 (i.e. one further byte will be written)
- number of bytes to be sent back
- leave out, partial command!
@@ -131,5 +145,91 @@ from length-1 to 0. Here is an example which emulates
i2c_smbus_block_process_call() using i2ctransfer (you need i2c-tools v4.2 or
later)::
- # i2ctransfer -y 0 w3@0x30 0x03 0x01 0x10 r?
+ # i2ctransfer -y 0 w3@0x30 3 1 0x10 r?
0x10 0x0f 0x0e 0x0d 0x0c 0x0b 0x0a 0x09 0x08 0x07 0x06 0x05 0x04 0x03 0x02 0x01 0x00
+
+0x04 GET_VERSION_WITH_REP_START
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. list-table::
+ :header-rows: 1
+
+ * - CMD
+ - DATAL
+ - DATAH
+ - DELAY
+
+ * - 0x04
+ - currently unused
+ - currently unused
+ - leave out, partial command!
+
+Partial command. After sending this command, the testunit will reply to a read
+message with a NUL terminated version string based on UTS_RELEASE. The first
+character is always a 'v' and the length of the version string is at maximum
+128 bytes. However, it will only respond if the read message is connected to
+the write message via repeated start. If your controller driver handles
+repeated start correctly, this will work::
+
+ # i2ctransfer -y 0 w3@0x30 4 0 0 r128
+ 0x76 0x36 0x2e 0x31 0x31 0x2e 0x30 0x2d 0x72 0x63 0x31 0x2d 0x30 0x30 0x30 0x30 ...
+
+If you have i2c-tools 4.4 or later, you can print out the data right away::
+
+ # i2ctransfer -y -b 0 w3@0x30 4 0 0 r128
+ v6.11.0-rc1-00009-gd37a1b4d3fd0
+
+STOP/START combinations between the two messages will *not* work because they
+are not equivalent to a REPEATED START. As an example, this returns just the
+default response::
+
+ # i2cset -y 0 0x30 4 0 0 i; i2cget -y 0 0x30
+ 0x00
+
+0x05 SMBUS_ALERT_REQUEST
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. list-table::
+ :header-rows: 1
+
+ * - CMD
+ - DATAL
+ - DATAH
+ - DELAY
+
+ * - 0x05
+ - response value (7 MSBs interpreted as I2C address)
+ - currently unused
+ - n * 10ms
+
+This test raises an interrupt via the SMBAlert pin which the host controller
+must handle. The pin must be connected to the testunit as a GPIO. GPIO access
+is not allowed to sleep. Currently, this can only be described using firmware
+nodes. So, for devicetree, you would add something like this to the testunit
+node::
+
+ gpios = <&gpio1 24 GPIO_ACTIVE_LOW>;
+
+The following command will trigger the alert with a response of 0xc9 after 1
+second of delay::
+
+ # i2cset -y 0 0x30 5 0xc9 0x00 100 i
+
+If the host controller supports SMBusAlert, this message with debug level
+should appear::
+
+ smbus_alert 0-000c: SMBALERT# from dev 0x64, flag 1
+
+This message may appear more than once because the testunit is software not
+hardware and, thus, may not be able to react to the response of the host fast
+enough. The interrupt count should increase only by one, though::
+
+ # cat /proc/interrupts | grep smbus_alert
+ 93: 1 gpio-rcar 26 Edge smbus_alert
+
+If the host does not respond to the alert within 1 second, the test will be
+aborted and the testunit will report an error.
+
+For this test, the testunit will shortly drop its assigned address and listen
+on the SMBus Alert Response Address (0x0c). It will reassign its original
+address afterwards.
diff --git a/Documentation/kbuild/kbuild.rst b/Documentation/kbuild/kbuild.rst
index 9c8d1d046ea5..1796b3eba37b 100644
--- a/Documentation/kbuild/kbuild.rst
+++ b/Documentation/kbuild/kbuild.rst
@@ -22,6 +22,11 @@ modules.builtin.modinfo
This file contains modinfo from all modules that are built into the kernel.
Unlike modinfo of a separate module, all fields are prefixed with module name.
+modules.builtin.ranges
+----------------------
+This file contains address offset ranges (per ELF section) for all modules
+that are built into the kernel. Together with System.map, it can be used
+to associate module names with symbols.
Environment variables
=====================
@@ -129,6 +134,11 @@ KBUILD_OUTPUT
-------------
Specify the output directory when building the kernel.
+This variable can also be used to point to the kernel output directory when
+building external modules against a pre-built kernel in a separate build
+directory. Please note that this does NOT specify the output directory for the
+external modules themselves.
+
The output directory can also be specified using "O=...".
Setting "O=..." takes precedence over KBUILD_OUTPUT.
diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst
index 71b38a7670f3..43037be96a16 100644
--- a/Documentation/kbuild/kconfig-language.rst
+++ b/Documentation/kbuild/kconfig-language.rst
@@ -70,7 +70,11 @@ applicable everywhere (see syntax).
Every menu entry can have at most one prompt, which is used to display
to the user. Optionally dependencies only for this prompt can be added
- with "if".
+ with "if". If a prompt is not present, the config option is a non-visible
+ symbol, meaning its value cannot be directly changed by the user (such as
+ altering the value in ``.config``) and the option will not appear in any
+ config menus. Its value can only be set via "default" and "select" (see
+ below).
- default value: "default" <expr> ["if" <expr>]
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index be43990f1e7f..7964e0c245ae 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -1665,6 +1665,5 @@ Credits
TODO
====
-- Describe how kbuild supports shipped files with _shipped.
- Generating offset header files.
- Add more variables to chapters 7 or 9?
diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst
index 131863142cbb..cd5a54d91e6d 100644
--- a/Documentation/kbuild/modules.rst
+++ b/Documentation/kbuild/modules.rst
@@ -4,41 +4,12 @@ Building External Modules
This document describes how to build an out-of-tree kernel module.
-.. Table of Contents
-
- === 1 Introduction
- === 2 How to Build External Modules
- --- 2.1 Command Syntax
- --- 2.2 Options
- --- 2.3 Targets
- --- 2.4 Building Separate Files
- === 3. Creating a Kbuild File for an External Module
- --- 3.1 Shared Makefile
- --- 3.2 Separate Kbuild file and Makefile
- --- 3.3 Binary Blobs
- --- 3.4 Building Multiple Modules
- === 4. Include Files
- --- 4.1 Kernel Includes
- --- 4.2 Single Subdirectory
- --- 4.3 Several Subdirectories
- === 5. Module Installation
- --- 5.1 INSTALL_MOD_PATH
- --- 5.2 INSTALL_MOD_DIR
- === 6. Module Versioning
- --- 6.1 Symbols From the Kernel (vmlinux + modules)
- --- 6.2 Symbols and External Modules
- --- 6.3 Symbols From Another External Module
- === 7. Tips & Tricks
- --- 7.1 Testing for CONFIG_FOO_BAR
-
-
-
-1. Introduction
-===============
+Introduction
+============
"kbuild" is the build system used by the Linux kernel. Modules must use
kbuild to stay compatible with changes in the build infrastructure and
-to pick up the right flags to "gcc." Functionality for building modules
+to pick up the right flags to the compiler. Functionality for building modules
both in-tree and out-of-tree is provided. The method for building
either is similar, and all modules are initially developed and built
out-of-tree.
@@ -48,11 +19,11 @@ in building out-of-tree (or "external") modules. The author of an
external module should supply a makefile that hides most of the
complexity, so one only has to type "make" to build the module. This is
easily accomplished, and a complete example will be presented in
-section 3.
+section `Creating a Kbuild File for an External Module`_.
-2. How to Build External Modules
-================================
+How to Build External Modules
+=============================
To build external modules, you must have a prebuilt kernel available
that contains the configuration and header files used in the build.
@@ -69,12 +40,12 @@ NOTE: "modules_prepare" will not build Module.symvers even if
CONFIG_MODVERSIONS is set; therefore, a full kernel build needs to be
executed to make module versioning work.
-2.1 Command Syntax
-==================
+Command Syntax
+--------------
The command to build an external module is::
- $ make -C <path_to_kernel_src> M=$PWD
+ $ make -C <path_to_kernel_dir> M=$PWD
The kbuild system knows that an external module is being built
due to the "M=<dir>" option given in the command.
@@ -88,15 +59,18 @@ executed to make module versioning work.
$ make -C /lib/modules/`uname -r`/build M=$PWD modules_install
-2.2 Options
-===========
+Options
+-------
- ($KDIR refers to the path of the kernel source directory.)
+ ($KDIR refers to the path of the kernel source directory, or the path
+ of the kernel output directory if the kernel was built in a separate
+ build directory.)
make -C $KDIR M=$PWD
-C $KDIR
- The directory where the kernel source is located.
+ The directory that contains the kernel and relevant build
+ artifacts used for building an external module.
"make" will actually change to the specified directory
when executing and will change back when finished.
@@ -106,8 +80,8 @@ executed to make module versioning work.
directory where the external module (kbuild file) is
located.
-2.3 Targets
-===========
+Targets
+-------
When building an external module, only a subset of the "make"
targets are available.
@@ -129,7 +103,8 @@ executed to make module versioning work.
modules_install
Install the external module(s). The default location is
/lib/modules/<kernel_release>/updates/, but a prefix may
- be added with INSTALL_MOD_PATH (discussed in section 5).
+ be added with INSTALL_MOD_PATH (discussed in section
+ `Module Installation`_).
clean
Remove all generated files in the module directory only.
@@ -137,8 +112,8 @@ executed to make module versioning work.
help
List the available targets for external modules.
-2.4 Building Separate Files
-===========================
+Building Separate Files
+-----------------------
It is possible to build single files that are part of a module.
This works equally well for the kernel, a module, and even for
@@ -152,8 +127,8 @@ executed to make module versioning work.
make -C $KDIR M=$PWD ./
-3. Creating a Kbuild File for an External Module
-================================================
+Creating a Kbuild File for an External Module
+=============================================
In the last section we saw the command to build a module for the
running kernel. The module is not actually built, however, because a
@@ -180,10 +155,9 @@ module 8123.ko, which is built from the following files::
8123_if.c
8123_if.h
8123_pci.c
- 8123_bin.o_shipped <= Binary blob
-3.1 Shared Makefile
--------------------
+Shared Makefile
+---------------
An external module always includes a wrapper makefile that
supports building the module using "make" with no arguments.
@@ -198,7 +172,7 @@ module 8123.ko, which is built from the following files::
ifneq ($(KERNELRELEASE),)
# kbuild part of makefile
obj-m := 8123.o
- 8123-y := 8123_if.o 8123_pci.o 8123_bin.o
+ 8123-y := 8123_if.o 8123_pci.o
else
# normal makefile
@@ -207,10 +181,6 @@ module 8123.ko, which is built from the following files::
default:
$(MAKE) -C $(KDIR) M=$$PWD
- # Module specific targets
- genbin:
- echo "X" > 8123_bin.o_shipped
-
endif
The check for KERNELRELEASE is used to separate the two parts
@@ -221,19 +191,18 @@ module 8123.ko, which is built from the following files::
line; the second pass is by the kbuild system, which is
initiated by the parameterized "make" in the default target.
-3.2 Separate Kbuild File and Makefile
--------------------------------------
+Separate Kbuild File and Makefile
+---------------------------------
- In newer versions of the kernel, kbuild will first look for a
- file named "Kbuild," and only if that is not found, will it
- then look for a makefile. Utilizing a "Kbuild" file allows us
- to split up the makefile from example 1 into two files:
+ Kbuild will first look for a file named "Kbuild", and if it is not
+ found, it will then look for "Makefile". Utilizing a "Kbuild" file
+ allows us to split up the "Makefile" from example 1 into two files:
Example 2::
--> filename: Kbuild
obj-m := 8123.o
- 8123-y := 8123_if.o 8123_pci.o 8123_bin.o
+ 8123-y := 8123_if.o 8123_pci.o
--> filename: Makefile
KDIR ?= /lib/modules/`uname -r`/build
@@ -241,68 +210,13 @@ module 8123.ko, which is built from the following files::
default:
$(MAKE) -C $(KDIR) M=$$PWD
- # Module specific targets
- genbin:
- echo "X" > 8123_bin.o_shipped
-
The split in example 2 is questionable due to the simplicity of
each file; however, some external modules use makefiles
consisting of several hundred lines, and here it really pays
off to separate the kbuild part from the rest.
- The next example shows a backward compatible version.
-
- Example 3::
-
- --> filename: Kbuild
- obj-m := 8123.o
- 8123-y := 8123_if.o 8123_pci.o 8123_bin.o
-
- --> filename: Makefile
- ifneq ($(KERNELRELEASE),)
- # kbuild part of makefile
- include Kbuild
-
- else
- # normal makefile
- KDIR ?= /lib/modules/`uname -r`/build
-
- default:
- $(MAKE) -C $(KDIR) M=$$PWD
-
- # Module specific targets
- genbin:
- echo "X" > 8123_bin.o_shipped
-
- endif
-
- Here the "Kbuild" file is included from the makefile. This
- allows an older version of kbuild, which only knows of
- makefiles, to be used when the "make" and kbuild parts are
- split into separate files.
-
-3.3 Binary Blobs
-----------------
-
- Some external modules need to include an object file as a blob.
- kbuild has support for this, but requires the blob file to be
- named <filename>_shipped. When the kbuild rules kick in, a copy
- of <filename>_shipped is created with _shipped stripped off,
- giving us <filename>. This shortened filename can be used in
- the assignment to the module.
-
- Throughout this section, 8123_bin.o_shipped has been used to
- build the kernel module 8123.ko; it has been included as
- 8123_bin.o::
-
- 8123-y := 8123_if.o 8123_pci.o 8123_bin.o
-
- Although there is no distinction between the ordinary source
- files and the binary file, kbuild will pick up different rules
- when creating the object file for the module.
-
-3.4 Building Multiple Modules
-=============================
+Building Multiple Modules
+-------------------------
kbuild supports building multiple modules with a single build
file. For example, if you wanted to build two modules, foo.ko
@@ -315,8 +229,8 @@ module 8123.ko, which is built from the following files::
It is that simple!
-4. Include Files
-================
+Include Files
+=============
Within the kernel, header files are kept in standard locations
according to the following rule:
@@ -334,19 +248,19 @@ according to the following rule:
include/scsi; and architecture specific headers are located
under arch/$(SRCARCH)/include/.
-4.1 Kernel Includes
--------------------
+Kernel Includes
+---------------
To include a header file located under include/linux/, simply
use::
#include <linux/module.h>
- kbuild will add options to "gcc" so the relevant directories
+ kbuild will add options to the compiler so the relevant directories
are searched.
-4.2 Single Subdirectory
------------------------
+Single Subdirectory
+-------------------
External modules tend to place header files in a separate
include/ directory where their source is located, although this
@@ -360,15 +274,11 @@ according to the following rule:
--> filename: Kbuild
obj-m := 8123.o
- ccflags-y := -Iinclude
- 8123-y := 8123_if.o 8123_pci.o 8123_bin.o
-
- Note that in the assignment there is no space between -I and
- the path. This is a limitation of kbuild: there must be no
- space present.
+ ccflags-y := -I $(src)/include
+ 8123-y := 8123_if.o 8123_pci.o
-4.3 Several Subdirectories
---------------------------
+Several Subdirectories
+----------------------
kbuild can handle files that are spread over several directories.
Consider the following example::
@@ -407,8 +317,8 @@ according to the following rule:
file is located.
-5. Module Installation
-======================
+Module Installation
+===================
Modules which are included in the kernel are installed in the
directory:
@@ -419,8 +329,8 @@ And external modules are installed in:
/lib/modules/$(KERNELRELEASE)/updates/
-5.1 INSTALL_MOD_PATH
---------------------
+INSTALL_MOD_PATH
+----------------
Above are the default directories but as always some level of
customization is possible. A prefix can be added to the
@@ -434,8 +344,8 @@ And external modules are installed in:
calling "make." This has effect when installing both in-tree
and out-of-tree modules.
-5.2 INSTALL_MOD_DIR
--------------------
+INSTALL_MOD_DIR
+---------------
External modules are by default installed to a directory under
/lib/modules/$(KERNELRELEASE)/updates/, but you may wish to
@@ -448,8 +358,8 @@ And external modules are installed in:
=> Install dir: /lib/modules/$(KERNELRELEASE)/gandalf/
-6. Module Versioning
-====================
+Module Versioning
+=================
Module versioning is enabled by the CONFIG_MODVERSIONS tag, and is used
as a simple ABI consistency check. A CRC value of the full prototype
@@ -461,8 +371,8 @@ module.
Module.symvers contains a list of all exported symbols from a kernel
build.
-6.1 Symbols From the Kernel (vmlinux + modules)
------------------------------------------------
+Symbols From the Kernel (vmlinux + modules)
+-------------------------------------------
During a kernel build, a file named Module.symvers will be
generated. Module.symvers contains all exported symbols from
@@ -486,8 +396,8 @@ build.
1) It lists all exported symbols from vmlinux and all modules.
2) It lists the CRC if CONFIG_MODVERSIONS is enabled.
-6.2 Symbols and External Modules
---------------------------------
+Symbols and External Modules
+----------------------------
When building an external module, the build system needs access
to the symbols from the kernel to check if all external symbols
@@ -496,8 +406,8 @@ build.
tree. During the MODPOST step, a new Module.symvers file will be
written containing all exported symbols from that external module.
-6.3 Symbols From Another External Module
-----------------------------------------
+Symbols From Another External Module
+------------------------------------
Sometimes, an external module uses exported symbols from
another external module. Kbuild needs to have full knowledge of
@@ -537,11 +447,11 @@ build.
initialization of its symbol tables.
-7. Tips & Tricks
-================
+Tips & Tricks
+=============
-7.1 Testing for CONFIG_FOO_BAR
-------------------------------
+Testing for CONFIG_FOO_BAR
+--------------------------
Modules often need to check for certain `CONFIG_` options to
decide if a specific feature is included in the module. In
@@ -553,9 +463,3 @@ build.
ext2-y := balloc.o bitmap.o dir.o
ext2-$(CONFIG_EXT2_FS_XATTR) += xattr.o
-
- External modules have traditionally used "grep" to check for
- specific `CONFIG_` settings directly in .config. This usage is
- broken. As introduced before, external modules should use
- kbuild for building and can therefore use the same methods as
- in-tree modules when testing for `CONFIG_` definitions.
diff --git a/Documentation/leds/leds-blinkm.rst b/Documentation/leds/leds-blinkm.rst
index 2d3c226a371a..647be1c6c552 100644
--- a/Documentation/leds/leds-blinkm.rst
+++ b/Documentation/leds/leds-blinkm.rst
@@ -13,9 +13,31 @@ The device accepts RGB and HSB color values through separate commands.
Also you can store blinking sequences as "scripts" in
the controller and run them. Also fading is an option.
-The interface this driver provides is 2-fold:
+The interface this driver provides is 3-fold:
-a) LED class interface for use with triggers
+a) LED multicolor class interface for use with triggers
+#######################################################
+
+The registration follows the scheme::
+
+ blinkm-<i2c-bus-nr>-<i2c-device-nr>:rgb:indicator
+
+ $ ls -h /sys/class/leds/blinkm-1-9:rgb:indicator
+ brightness device max_brightness multi_index multi_intensity power subsystem trigger uevent
+
+Hue is controlled by the multi_intensity file and lightness is controlled by
+the brightness file.
+
+The order in which to write the intensity values can be found in multi_index.
+Exactly three values between 0 and 255 must be written to multi_intensity to
+change the color::
+
+ $ echo 255 100 50 > multi_intensity
+
+The overall lightness be changed by writing a value between 0 and 255 to the
+brightness file.
+
+b) LED class interface for use with triggers
############################################
The registration follows the scheme::
@@ -79,6 +101,7 @@ E.g.::
-as of 6/2012
+as of 07/2024
dl9pf <at> gmx <dot> de
+jstrauss <at> mailbox <dot> org
diff --git a/Documentation/leds/leds-mlxcpld.rst b/Documentation/leds/leds-mlxcpld.rst
index 528582429e0b..c520a134d91e 100644
--- a/Documentation/leds/leds-mlxcpld.rst
+++ b/Documentation/leds/leds-mlxcpld.rst
@@ -115,4 +115,4 @@ Driver provides the following LEDs for the system "msn2100":
- [1,1,1,1] = Blue blink 6Hz
Driver supports HW blinking at 3Hz and 6Hz frequency (50% duty cycle).
-For 3Hz duty cylce is about 167 msec, for 6Hz is about 83 msec.
+For 3Hz duty cycle is about 167 msec, for 6Hz is about 83 msec.
diff --git a/Documentation/leds/well-known-leds.txt b/Documentation/leds/well-known-leds.txt
index 67b44704801f..17ef78faf1f3 100644
--- a/Documentation/leds/well-known-leds.txt
+++ b/Documentation/leds/well-known-leds.txt
@@ -72,6 +72,14 @@ Good: "platform:*:charging" (allwinner sun50i, leds-cht-wcove)
Good: ":backlight" (Motorola Droid 4)
+* Indicators
+
+Good: ":indicator" (Blinkm)
+
+* RGB
+
+Good: ":rgb" (Blinkm)
+
* Ethernet LEDs
Currently two types of Network LEDs are support, those controlled by
diff --git a/Documentation/livepatch/livepatch.rst b/Documentation/livepatch/livepatch.rst
index 68e3651e8af9..acb90164929e 100644
--- a/Documentation/livepatch/livepatch.rst
+++ b/Documentation/livepatch/livepatch.rst
@@ -50,7 +50,7 @@ some limitations, see below.
3. Consistency model
====================
-Functions are there for a reason. They take some input parameters, get or
+Functions are there for a reason. They take some input parameters, acquire or
release locks, read, process, and even write some data in a defined way,
have return values. In other words, each function has a defined semantic.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 4202174a6262..93d58d9a428b 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -88,7 +88,6 @@ CONTENTS
(*) The effects of the cpu cache.
- - Cache coherency.
- Cache coherency vs DMA.
- Cache coherency vs MMIO.
@@ -677,8 +676,6 @@ include/linux/rcupdate.h. This permits the current target of an RCU'd
pointer to be replaced with a new modified target, without the replacement
target appearing to be incompletely initialised.
-See also the subsection on "Cache Coherency" for a more thorough example.
-
CONTROL DEPENDENCIES
--------------------
diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst
index 8730c246ceaa..f9c50525bdbf 100644
--- a/Documentation/mm/damon/design.rst
+++ b/Documentation/mm/damon/design.rst
@@ -586,7 +586,7 @@ API, and return the results to the user-space.
The ABIs are designed to be used for user space applications development,
rather than human beings' fingers. Human users are recommended to use such
user space tools. One such Python-written user space tool is available at
-Github (https://github.com/awslabs/damo), Pypi
+Github (https://github.com/damonitor/damo), Pypi
(https://pypistats.org/packages/damo), and Fedora
(https://packages.fedoraproject.org/pkgs/python-damo/damo/).
diff --git a/Documentation/mm/damon/maintainer-profile.rst b/Documentation/mm/damon/maintainer-profile.rst
index feccf6a0f6c3..2365c9a3c1f0 100644
--- a/Documentation/mm/damon/maintainer-profile.rst
+++ b/Documentation/mm/damon/maintainer-profile.rst
@@ -7,23 +7,27 @@ The DAMON subsystem covers the files that are listed in 'DATA ACCESS MONITOR'
section of 'MAINTAINERS' file.
The mailing lists for the subsystem are damon@lists.linux.dev and
-linux-mm@kvack.org. Patches should be made against the mm-unstable tree [1]_
-whenever possible and posted to the mailing lists.
+linux-mm@kvack.org. Patches should be made against the mm-unstable `tree
+<https://git.kernel.org/akpm/mm/h/mm-unstable>` whenever possible and posted to
+the mailing lists.
SCM Trees
---------
There are multiple Linux trees for DAMON development. Patches under
-development or testing are queued in damon/next [2]_ by the DAMON maintainer.
-Sufficiently reviewed patches will be queued in mm-unstable [1]_ by the memory
-management subsystem maintainer. After more sufficient tests, the patches will
-be queued in mm-stable [3]_ , and finally pull-requested to the mainline by the
-memory management subsystem maintainer.
-
-Note again the patches for mm-unstable tree [1]_ are queued by the memory
+development or testing are queued in `damon/next
+<https://git.kernel.org/sj/h/damon/next>` by the DAMON maintainer.
+Sufficiently reviewed patches will be queued in `mm-unstable
+<https://git.kernel.org/akpm/mm/h/mm-unstable>` by the memory management
+subsystem maintainer. After more sufficient tests, the patches will be queued
+in `mm-stable <https://git.kernel.org/akpm/mm/h/mm-stable>` , and finally
+pull-requested to the mainline by the memory management subsystem maintainer.
+
+Note again the patches for mm-unstable `tree
+<https://git.kernel.org/akpm/mm/h/mm-unstable>` are queued by the memory
management subsystem maintainer. If the patches requires some patches in
-damon/next tree [2]_ which not yet merged in mm-unstable, please make sure the
-requirement is clearly specified.
+damon/next `tree <https://git.kernel.org/sj/h/damon/next>` which not yet merged
+in mm-unstable, please make sure the requirement is clearly specified.
Submit checklist addendum
-------------------------
@@ -32,18 +36,27 @@ When making DAMON changes, you should do below.
- Build changes related outputs including kernel and documents.
- Ensure the builds introduce no new errors or warnings.
-- Run and ensure no new failures for DAMON selftests [4]_ and kunittests [5]_ .
+- Run and ensure no new failures for DAMON `selftests
+ <https://github.com/awslabs/damon-tests/blob/master/corr/run.sh#L49>` and
+ `kunittests
+ <https://github.com/awslabs/damon-tests/blob/master/corr/tests/kunit.sh>`.
Further doing below and putting the results will be helpful.
-- Run damon-tests/corr [6]_ for normal changes.
-- Run damon-tests/perf [7]_ for performance changes.
+- Run `damon-tests/corr
+ <https://github.com/awslabs/damon-tests/tree/master/corr>` for normal
+ changes.
+- Run `damon-tests/perf
+ <https://github.com/awslabs/damon-tests/tree/master/perf>` for performance
+ changes.
Key cycle dates
---------------
-Patches can be sent anytime. Key cycle dates of the mm-unstable [1]_ and
-mm-stable [3]_ trees depend on the memory management subsystem maintainer.
+Patches can be sent anytime. Key cycle dates of the `mm-unstable
+<https://git.kernel.org/akpm/mm/h/mm-unstable>` and `mm-stable
+<https://git.kernel.org/akpm/mm/h/mm-stable>` trees depend on the memory
+management subsystem maintainer.
Review cadence
--------------
@@ -58,16 +71,17 @@ Mailing tool
Like many other Linux kernel subsystems, DAMON uses the mailing lists
(damon@lists.linux.dev and linux-mm@kvack.org) as the major communication
-channel. There is a simple tool called HacKerMaiL (``hkml``) [8]_ , which is
-for people who are not very familiar with the mailing lists based
-communication. The tool could be particularly helpful for DAMON community
-members since it is developed and maintained by DAMON maintainer. The tool is
-also officially announced to support DAMON and general Linux kernel development
-workflow.
-
-In other words, ``hkml`` [8]_ is a mailing tool for DAMON community, which
-DAMON maintainer is committed to support. Please feel free to try and report
-issues or feature requests for the tool to the maintainer.
+channel. There is a simple tool called `HacKerMaiL
+<https://github.com/damonitor/hackermail>` (``hkml``), which is for people who
+are not very familiar with the mailing lists based communication. The tool
+could be particularly helpful for DAMON community members since it is developed
+and maintained by DAMON maintainer. The tool is also officially announced to
+support DAMON and general Linux kernel development workflow.
+
+In other words, `hkml <https://github.com/damonitor/hackermail>` is a mailing
+tool for DAMON community, which DAMON maintainer is committed to support.
+Please feel free to try and report issues or feature requests for the tool to
+the maintainer.
Community meetup
----------------
@@ -83,17 +97,9 @@ members including the maintainer. The maintainer shares the available time
slots, and attendees should reserve one of those at least 24 hours before the
time slot, by reaching out to the maintainer.
-Schedules and available reservation time slots are available at the Google doc
-[9]_ . DAMON maintainer will also provide periodic reminder to the mailing
-list (damon@lists.linux.dev).
-
-
-.. [1] https://git.kernel.org/akpm/mm/h/mm-unstable
-.. [2] https://git.kernel.org/sj/h/damon/next
-.. [3] https://git.kernel.org/akpm/mm/h/mm-stable
-.. [4] https://github.com/awslabs/damon-tests/blob/master/corr/run.sh#L49
-.. [5] https://github.com/awslabs/damon-tests/blob/master/corr/tests/kunit.sh
-.. [6] https://github.com/awslabs/damon-tests/tree/master/corr
-.. [7] https://github.com/awslabs/damon-tests/tree/master/perf
-.. [8] https://github.com/damonitor/hackermail
-.. [9] https://docs.google.com/document/d/1v43Kcj3ly4CYqmAkMaZzLiM2GEnWfgdGbZAH3mi2vpM/edit?usp=sharing
+Schedules and available reservation time slots are available at the Google `doc
+<https://docs.google.com/document/d/1v43Kcj3ly4CYqmAkMaZzLiM2GEnWfgdGbZAH3mi2vpM/edit?usp=sharing>`.
+There is also a public Google `calendar
+<https://calendar.google.com/calendar/u/0?cid=ZDIwOTA4YTMxNjc2MDQ3NTIyMmUzYTM5ZmQyM2U4NDA0ZGIwZjBiYmJlZGQxNDM0MmY4ZTRjOTE0NjdhZDRiY0Bncm91cC5jYWxlbmRhci5nb29nbGUuY29t>`
+that has the events. Anyone can subscribe it. DAMON maintainer will also
+provide periodic reminder to the mailing list (damon@lists.linux.dev).
diff --git a/Documentation/mm/hmm.rst b/Documentation/mm/hmm.rst
index 0595098a74d9..f6d53c37a2ca 100644
--- a/Documentation/mm/hmm.rst
+++ b/Documentation/mm/hmm.rst
@@ -66,7 +66,7 @@ combinatorial explosion in the library entry points.
Finally, with the advance of high level language constructs (in C++ but in
other languages too) it is now possible for the compiler to leverage GPUs and
other devices without programmer knowledge. Some compiler identified patterns
-are only do-able with a shared address space. It is also more reasonable to use
+are only doable with a shared address space. It is also more reasonable to use
a shared address space for all other patterns.
@@ -267,7 +267,7 @@ functions are designed to make drivers easier to write and to centralize common
code across drivers.
Before migrating pages to device private memory, special device private
-``struct page`` need to be created. These will be used as special "swap"
+``struct page`` needs to be created. These will be used as special "swap"
page table entries so that a CPU process will fault if it tries to access
a page that has been migrated to device private memory.
@@ -322,7 +322,7 @@ between device driver specific code and shared common code:
The ``invalidate_range_start()`` callback is passed a
``struct mmu_notifier_range`` with the ``event`` field set to
``MMU_NOTIFY_MIGRATE`` and the ``owner`` field set to
- the ``args->pgmap_owner`` field passed to migrate_vma_setup(). This is
+ the ``args->pgmap_owner`` field passed to migrate_vma_setup(). This
allows the device driver to skip the invalidation callback and only
invalidate device private MMU mappings that are actually migrating.
This is explained more in the next section.
@@ -405,7 +405,7 @@ can be used to make a memory range inaccessible from userspace.
This replaces all mappings for pages in the given range with special swap
entries. Any attempt to access the swap entry results in a fault which is
-resovled by replacing the entry with the original mapping. A driver gets
+resolved by replacing the entry with the original mapping. A driver gets
notified that the mapping has been changed by MMU notifiers, after which point
it will no longer have exclusive access to the page. Exclusive access is
guaranteed to last until the driver drops the page lock and page reference, at
@@ -431,7 +431,7 @@ Same decision was made for memory cgroup. Device memory pages are accounted
against same memory cgroup a regular page would be accounted to. This does
simplify migration to and from device memory. This also means that migration
back from device memory to regular memory cannot fail because it would
-go above memory cgroup limit. We might revisit this choice latter on once we
+go above memory cgroup limit. We might revisit this choice later on once we
get more experience in how device memory is used and its impact on memory
resource control.
diff --git a/Documentation/mm/page_migration.rst b/Documentation/mm/page_migration.rst
index f1ce67a26615..519b35a4caf5 100644
--- a/Documentation/mm/page_migration.rst
+++ b/Documentation/mm/page_migration.rst
@@ -63,15 +63,15 @@ and then a low level description of how the low level details work.
In kernel use of migrate_pages()
================================
-1. Remove pages from the LRU.
+1. Remove folios from the LRU.
- Lists of pages to be migrated are generated by scanning over
- pages and moving them into lists. This is done by
- calling isolate_lru_page().
- Calling isolate_lru_page() increases the references to the page
- so that it cannot vanish while the page migration occurs.
+ Lists of folios to be migrated are generated by scanning over
+ folios and moving them into lists. This is done by
+ calling folio_isolate_lru().
+ Calling folio_isolate_lru() increases the references to the folio
+ so that it cannot vanish while the folio migration occurs.
It also prevents the swapper or other scans from encountering
- the page.
+ the folio.
2. We need to have a function of type new_folio_t that can be
passed to migrate_pages(). This function should figure out
@@ -84,10 +84,10 @@ In kernel use of migrate_pages()
How migrate_pages() works
=========================
-migrate_pages() does several passes over its list of pages. A page is moved
-if all references to a page are removable at the time. The page has
-already been removed from the LRU via isolate_lru_page() and the refcount
-is increased so that the page cannot be freed while page migration occurs.
+migrate_pages() does several passes over its list of folios. A folio is moved
+if all references to a folio are removable at the time. The folio has
+already been removed from the LRU via folio_isolate_lru() and the refcount
+is increased so that the folio cannot be freed while folio migration occurs.
Steps:
diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst
index 1ba0ad63246c..a2cd8800d527 100644
--- a/Documentation/mm/transhuge.rst
+++ b/Documentation/mm/transhuge.rst
@@ -31,10 +31,10 @@ Design principles
feature that applies to all dynamic high order allocations in the
kernel)
-get_user_pages and follow_page
-==============================
+get_user_pages and pin_user_pages
+=================================
-get_user_pages and follow_page if run on a hugepage, will return the
+get_user_pages and pin_user_pages if run on a hugepage, will return the
head or tail pages as usual (exactly as they would do on
hugetlbfs). Most GUP users will only care about the actual physical
address of the page and its temporary pinning to release after the I/O
diff --git a/Documentation/mm/unevictable-lru.rst b/Documentation/mm/unevictable-lru.rst
index 2feb2ed51ae2..8d11fe6a0854 100644
--- a/Documentation/mm/unevictable-lru.rst
+++ b/Documentation/mm/unevictable-lru.rst
@@ -80,7 +80,7 @@ on an additional LRU list for a few reasons:
(2) We want to be able to migrate unevictable folios between nodes for memory
defragmentation, workload management and memory hotplug. The Linux kernel
can only migrate folios that it can successfully isolate from the LRU
- lists (or "Movable" pages: outside of consideration here). If we were to
+ lists (or "Movable" folios: outside of consideration here). If we were to
maintain folios elsewhere than on an LRU-like list, where they can be
detected by folio_isolate_lru(), we would prevent their migration.
@@ -230,7 +230,7 @@ In Nick's patch, he used one of the struct page LRU list link fields as a count
of VM_LOCKED VMAs that map the page (Rik van Riel had the same idea three years
earlier). But this use of the link field for a count prevented the management
of the pages on an LRU list, and thus mlocked pages were not migratable as
-isolate_lru_page() could not detect them, and the LRU list link field was not
+folio_isolate_lru() could not detect them, and the LRU list link field was not
available to the migration subsystem.
Nick resolved this by putting mlocked pages back on the LRU list before
@@ -253,8 +253,8 @@ Basic Management
mlocked pages - pages mapped into a VM_LOCKED VMA - are a class of unevictable
pages. When such a page has been "noticed" by the memory management subsystem,
-the page is marked with the PG_mlocked flag. This can be manipulated using the
-PageMlocked() functions.
+the folio is marked with the PG_mlocked flag. This can be manipulated using
+folio_set_mlocked() and folio_clear_mlocked() functions.
A PG_mlocked page will be placed on the unevictable list when it is added to
the LRU. Such pages can be "noticed" by memory management in several places:
diff --git a/Documentation/mm/vmalloced-kernel-stacks.rst b/Documentation/mm/vmalloced-kernel-stacks.rst
index 4edca515bfd7..5bc0f7ceea89 100644
--- a/Documentation/mm/vmalloced-kernel-stacks.rst
+++ b/Documentation/mm/vmalloced-kernel-stacks.rst
@@ -110,7 +110,7 @@ Bulk of the code is in:
`kernel/fork.c <https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/kernel/fork.c>`.
stack_vm_area pointer in task_struct keeps track of the virtually allocated
-stack and a non-null stack_vm_area pointer serves as a indication that the
+stack and a non-null stack_vm_area pointer serves as an indication that the
virtually mapped kernel stacks are enabled.
::
@@ -120,8 +120,8 @@ virtually mapped kernel stacks are enabled.
Stack overflow handling
-----------------------
-Leading and trailing guard pages help detect stack overflows. When stack
-overflows into the guard pages, handlers have to be careful not overflow
+Leading and trailing guard pages help detect stack overflows. When the stack
+overflows into the guard pages, handlers have to be careful not to overflow
the stack again. When handlers are called, it is likely that very little
stack space is left.
@@ -148,6 +148,6 @@ Conclusions
- THREAD_INFO_IN_TASK gets rid of arch-specific thread_info entirely and
simply embed the thread_info (containing only flags) and 'int cpu' into
task_struct.
-- The thread stack can be free'ed as soon as the task is dead (without
+- The thread stack can be freed as soon as the task is dead (without
waiting for RCU) and then, if vmapped stacks are in use, cache the
entire stack for reuse on the same cpu.
diff --git a/Documentation/netlink/specs/dpll.yaml b/Documentation/netlink/specs/dpll.yaml
index 94132d30e0e0..f2894ca35de8 100644
--- a/Documentation/netlink/specs/dpll.yaml
+++ b/Documentation/netlink/specs/dpll.yaml
@@ -345,6 +345,26 @@ attribute-sets:
Value is in PPM (parts per million).
This may be implemented for example for pin of type
PIN_TYPE_SYNCE_ETH_PORT.
+ -
+ name: esync-frequency
+ type: u64
+ doc: |
+ Frequency of Embedded SYNC signal. If provided, the pin is configured
+ with a SYNC signal embedded into its base clock frequency.
+ -
+ name: esync-frequency-supported
+ type: nest
+ multi-attr: true
+ nested-attributes: frequency-range
+ doc: |
+ If provided a pin is capable of embedding a SYNC signal (within given
+ range) into its base frequency signal.
+ -
+ name: esync-pulse
+ type: u32
+ doc: |
+ A ratio of high to low state of a SYNC signal pulse embedded
+ into base clock frequency. Value is in percents.
-
name: pin-parent-device
subset-of: pin
@@ -510,6 +530,9 @@ operations:
- phase-adjust-max
- phase-adjust
- fractional-frequency-offset
+ - esync-frequency
+ - esync-frequency-supported
+ - esync-pulse
dump:
request:
@@ -536,6 +559,7 @@ operations:
- parent-device
- parent-pin
- phase-adjust
+ - esync-frequency
-
name: pin-create-ntf
doc: Notification about pin appearing
diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
index ea21fe135b97..6a050d755b9c 100644
--- a/Documentation/netlink/specs/ethtool.yaml
+++ b/Documentation/netlink/specs/ethtool.yaml
@@ -39,6 +39,11 @@ definitions:
- ovld-detected
- power-not-available
- short-detected
+ -
+ name: phy-upstream-type
+ enum-name:
+ type: enum
+ entries: [ mac, phy ]
attribute-sets:
-
@@ -54,6 +59,9 @@ attribute-sets:
name: flags
type: u32
enum: header-flags
+ -
+ name: phy-index
+ type: u32
-
name: bitset-bit
@@ -659,6 +667,9 @@ attribute-sets:
-
name: code
type: u8
+ -
+ name: src
+ type: u32
-
name: cable-fault-length
attributes:
@@ -668,6 +679,9 @@ attribute-sets:
-
name: cm
type: u32
+ -
+ name: src
+ type: u32
-
name: cable-nest
attributes:
@@ -1022,12 +1036,16 @@ attribute-sets:
-
name: indir
type: binary
+ sub-type: u32
-
name: hkey
type: binary
-
name: input_xfrm
type: u32
+ -
+ name: start-context
+ type: u32
-
name: plca
attributes:
@@ -1085,6 +1103,35 @@ attribute-sets:
-
name: total
type: uint
+ -
+ name: phy
+ attributes:
+ -
+ name: header
+ type: nest
+ nested-attributes: header
+ -
+ name: index
+ type: u32
+ -
+ name: drvname
+ type: string
+ -
+ name: name
+ type: string
+ -
+ name: upstream-type
+ type: u32
+ enum: phy-upstream-type
+ -
+ name: upstream-index
+ type: u32
+ -
+ name: upstream-sfp-name
+ type: string
+ -
+ name: downstream-sfp-name
+ type: string
operations:
enum-model: directional
@@ -1749,12 +1796,12 @@ operations:
attribute-set: rss
- do: &rss-get-op
+ do:
request:
attributes:
- header
- context
- reply:
+ reply: &rss-reply
attributes:
- header
- context
@@ -1762,6 +1809,12 @@ operations:
- indir
- hkey
- input_xfrm
+ dump:
+ request:
+ attributes:
+ - header
+ - start-context
+ reply: *rss-reply
-
name: plca-get-cfg
doc: Get PLCA params.
@@ -1877,3 +1930,24 @@ operations:
- status-msg
- done
- total
+ -
+ name: phy-get
+ doc: Get PHY devices attached to an interface
+
+ attribute-set: phy
+
+ do: &phy-get-op
+ request:
+ attributes:
+ - header
+ reply:
+ attributes:
+ - header
+ - index
+ - drvname
+ - name
+ - upstream-type
+ - upstream-index
+ - upstream-sfp-name
+ - downstream-sfp-name
+ dump: *phy-get-op
diff --git a/Documentation/netlink/specs/mptcp_pm.yaml b/Documentation/netlink/specs/mptcp_pm.yaml
index af525ed29792..30d8342cacc8 100644
--- a/Documentation/netlink/specs/mptcp_pm.yaml
+++ b/Documentation/netlink/specs/mptcp_pm.yaml
@@ -109,7 +109,6 @@ attribute-sets:
-
name: port
type: u16
- byte-order: big-endian
-
name: flags
type: u32
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index 959755be4d7f..08412c279297 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -167,6 +167,10 @@ attribute-sets:
"re-attached", they are just waiting to disappear.
Attribute is absent if Page Pool has not been detached, and
can still be used to allocate new memory.
+ -
+ name: dmabuf
+ doc: ID of the dmabuf this page-pool is attached to.
+ type: u32
-
name: page-pool-info
subset-of: page-pool
@@ -268,6 +272,10 @@ attribute-sets:
name: napi-id
doc: ID of the NAPI instance which services this queue.
type: u32
+ -
+ name: dmabuf
+ doc: ID of the dmabuf attached to this queue, if any.
+ type: u32
-
name: qstats
@@ -457,6 +465,39 @@ attribute-sets:
Number of times driver re-started accepting send
requests to this queue from the stack.
type: uint
+ -
+ name: queue-id
+ subset-of: queue
+ attributes:
+ -
+ name: id
+ -
+ name: type
+ -
+ name: dmabuf
+ attributes:
+ -
+ name: ifindex
+ doc: netdev ifindex to bind the dmabuf to.
+ type: u32
+ checks:
+ min: 1
+ -
+ name: queues
+ doc: receive queues to bind the dmabuf to.
+ type: nest
+ nested-attributes: queue-id
+ multi-attr: true
+ -
+ name: fd
+ doc: dmabuf file descriptor to bind.
+ type: u32
+ -
+ name: id
+ doc: id of the dmabuf binding
+ type: u32
+ checks:
+ min: 1
operations:
list:
@@ -510,6 +551,7 @@ operations:
- inflight
- inflight-mem
- detach-time
+ - dmabuf
dump:
reply: *pp-reply
config-cond: page-pool
@@ -574,6 +616,7 @@ operations:
- type
- napi-id
- ifindex
+ - dmabuf
dump:
request:
attributes:
@@ -619,6 +662,24 @@ operations:
- rx-bytes
- tx-packets
- tx-bytes
+ -
+ name: bind-rx
+ doc: Bind dmabuf to netdev
+ attribute-set: dmabuf
+ flags: [ admin-perm ]
+ do:
+ request:
+ attributes:
+ - ifindex
+ - fd
+ - queues
+ reply:
+ attributes:
+ - id
+
+kernel-family:
+ headers: [ "linux/list.h"]
+ sock-priv: struct list_head
mcast-groups:
list:
diff --git a/Documentation/netlink/specs/nftables.yaml b/Documentation/netlink/specs/nftables.yaml
index dff2a18f3d90..bd938bd01b6b 100644
--- a/Documentation/netlink/specs/nftables.yaml
+++ b/Documentation/netlink/specs/nftables.yaml
@@ -63,6 +63,13 @@ definitions:
- sdifname
- bri-broute
-
+ name: bitwise-ops
+ type: enum
+ entries:
+ - bool
+ - lshift
+ - rshift
+ -
name: cmp-ops
type: enum
entries:
@@ -125,6 +132,99 @@ definitions:
- object
- concat
- expr
+ -
+ name: lookup-flags
+ type: flags
+ entries:
+ - invert
+ -
+ name: ct-keys
+ type: enum
+ entries:
+ - state
+ - direction
+ - status
+ - mark
+ - secmark
+ - expiration
+ - helper
+ - l3protocol
+ - src
+ - dst
+ - protocol
+ - proto-src
+ - proto-dst
+ - labels
+ - pkts
+ - bytes
+ - avgpkt
+ - zone
+ - eventmask
+ - src-ip
+ - dst-ip
+ - src-ip6
+ - dst-ip6
+ - ct-id
+ -
+ name: ct-direction
+ type: enum
+ entries:
+ - original
+ - reply
+ -
+ name: quota-flags
+ type: flags
+ entries:
+ - invert
+ - depleted
+ -
+ name: verdict-code
+ type: enum
+ entries:
+ - name: continue
+ value: 0xffffffff
+ - name: break
+ value: 0xfffffffe
+ - name: jump
+ value: 0xfffffffd
+ - name: goto
+ value: 0xfffffffc
+ - name: return
+ value: 0xfffffffb
+ - name: drop
+ value: 0
+ - name: accept
+ value: 1
+ - name: stolen
+ value: 2
+ - name: queue
+ value: 3
+ - name: repeat
+ value: 4
+ -
+ name: fib-result
+ type: enum
+ entries:
+ - oif
+ - oifname
+ - addrtype
+ -
+ name: fib-flags
+ type: flags
+ entries:
+ - saddr
+ - daddr
+ - mark
+ - iif
+ - oif
+ - present
+ -
+ name: reject-types
+ type: enum
+ entries:
+ - icmp-unreach
+ - tcp-rst
+ - icmpx-unreach
attribute-sets:
-
@@ -611,9 +711,10 @@ attribute-sets:
type: u64
byte-order: big-endian
-
- name: flags # TODO
+ name: flags
type: u32
byte-order: big-endian
+ enum: quota-flags
-
name: pad
type: pad
@@ -665,6 +766,38 @@ attribute-sets:
type: nest
nested-attributes: hook-dev-attrs
-
+ name: expr-bitwise-attrs
+ attributes:
+ -
+ name: sreg
+ type: u32
+ byte-order: big-endian
+ -
+ name: dreg
+ type: u32
+ byte-order: big-endian
+ -
+ name: len
+ type: u32
+ byte-order: big-endian
+ -
+ name: mask
+ type: nest
+ nested-attributes: data-attrs
+ -
+ name: xor
+ type: nest
+ nested-attributes: data-attrs
+ -
+ name: op
+ type: u32
+ byte-order: big-endian
+ enum: bitwise-ops
+ -
+ name: data
+ type: nest
+ nested-attributes: data-attrs
+ -
name: expr-cmp-attrs
attributes:
-
@@ -698,6 +831,7 @@ attribute-sets:
name: code
type: u32
byte-order: big-endian
+ enum: verdict-code
-
name: chain
type: string
@@ -719,6 +853,43 @@ attribute-sets:
name: pad
type: pad
-
+ name: expr-fib-attrs
+ attributes:
+ -
+ name: dreg
+ type: u32
+ byte-order: big-endian
+ -
+ name: result
+ type: u32
+ byte-order: big-endian
+ enum: fib-result
+ -
+ name: flags
+ type: u32
+ byte-order: big-endian
+ enum: fib-flags
+ -
+ name: expr-ct-attrs
+ attributes:
+ -
+ name: dreg
+ type: u32
+ byte-order: big-endian
+ -
+ name: key
+ type: u32
+ byte-order: big-endian
+ enum: ct-keys
+ -
+ name: direction
+ type: u8
+ enum: ct-direction
+ -
+ name: sreg
+ type: u32
+ byte-order: big-endian
+ -
name: expr-flow-offload-attrs
attributes:
-
@@ -737,6 +908,31 @@ attribute-sets:
type: nest
nested-attributes: data-attrs
-
+ name: expr-lookup-attrs
+ attributes:
+ -
+ name: set
+ type: string
+ doc: Name of set to use
+ -
+ name: set id
+ type: u32
+ byte-order: big-endian
+ doc: ID of set to use
+ -
+ name: sreg
+ type: u32
+ byte-order: big-endian
+ -
+ name: dreg
+ type: u32
+ byte-order: big-endian
+ -
+ name: flags
+ type: u32
+ byte-order: big-endian
+ enum: lookup-flags
+ -
name: expr-meta-attrs
attributes:
-
@@ -821,6 +1017,30 @@ attribute-sets:
type: u32
byte-order: big-endian
-
+ name: expr-reject-attrs
+ attributes:
+ -
+ name: type
+ type: u32
+ byte-order: big-endian
+ enum: reject-types
+ -
+ name: icmp-code
+ type: u8
+ -
+ name: expr-target-attrs
+ attributes:
+ -
+ name: name
+ type: string
+ -
+ name: rev
+ type: u32
+ byte-order: big-endian
+ -
+ name: info
+ type: binary
+ -
name: expr-tproxy-attrs
attributes:
-
@@ -835,13 +1055,38 @@ attribute-sets:
name: reg-port
type: u32
byte-order: big-endian
+ -
+ name: expr-objref-attrs
+ attributes:
+ -
+ name: imm-type
+ type: u32
+ byte-order: big-endian
+ -
+ name: imm-name
+ type: string
+ doc: object name
+ -
+ name: set-sreg
+ type: u32
+ byte-order: big-endian
+ -
+ name: set-name
+ type: string
+ doc: name of object map
+ -
+ name: set-id
+ type: u32
+ byte-order: big-endian
+ doc: id of object map
sub-messages:
-
name: expr-ops
formats:
-
- value: bitwise # TODO
+ value: bitwise
+ attribute-set: expr-bitwise-attrs
-
value: cmp
attribute-set: expr-cmp-attrs
@@ -849,7 +1094,11 @@ sub-messages:
value: counter
attribute-set: expr-counter-attrs
-
- value: ct # TODO
+ value: ct
+ attribute-set: expr-ct-attrs
+ -
+ value: fib
+ attribute-set: expr-fib-attrs
-
value: flow_offload
attribute-set: expr-flow-offload-attrs
@@ -857,7 +1106,8 @@ sub-messages:
value: immediate
attribute-set: expr-immediate-attrs
-
- value: lookup # TODO
+ value: lookup
+ attribute-set: expr-lookup-attrs
-
value: meta
attribute-set: expr-meta-attrs
@@ -865,9 +1115,21 @@ sub-messages:
value: nat
attribute-set: expr-nat-attrs
-
+ value: objref
+ attribute-set: expr-objref-attrs
+ -
value: payload
attribute-set: expr-payload-attrs
-
+ value: quota
+ attribute-set: quota-attrs
+ -
+ value: reject
+ attribute-set: expr-reject-attrs
+ -
+ value: target
+ attribute-set: expr-target-attrs
+ -
value: tproxy
attribute-set: expr-tproxy-attrs
-
diff --git a/Documentation/netlink/specs/rt_link.yaml b/Documentation/netlink/specs/rt_link.yaml
index de08c12fd56f..0c4d5d40cae9 100644
--- a/Documentation/netlink/specs/rt_link.yaml
+++ b/Documentation/netlink/specs/rt_link.yaml
@@ -903,6 +903,22 @@ definitions:
- cfm-config
- cfm-status
- mst
+ -
+ name: netkit-policy
+ type: enum
+ entries:
+ -
+ name: forward
+ value: 0
+ -
+ name: blackhole
+ value: 2
+ -
+ name: netkit-mode
+ type: enum
+ entries:
+ - name: l2
+ - name: l3
attribute-sets:
-
@@ -2109,6 +2125,28 @@ attribute-sets:
-
name: id
type: u32
+ -
+ name: linkinfo-netkit-attrs
+ name-prefix: ifla-netkit-
+ attributes:
+ -
+ name: peer-info
+ type: binary
+ -
+ name: primary
+ type: u8
+ -
+ name: policy
+ type: u32
+ enum: netkit-policy
+ -
+ name: peer-policy
+ type: u32
+ enum: netkit-policy
+ -
+ name: mode
+ type: u32
+ enum: netkit-mode
sub-messages:
-
@@ -2147,6 +2185,9 @@ sub-messages:
-
value: vrf
attribute-set: linkinfo-vrf-attrs
+ -
+ value: netkit
+ attribute-set: linkinfo-netkit-attrs
-
name: linkinfo-member-data-msg
formats:
diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
index a4c7d0c65fd7..4561e8ab9e08 100644
--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
@@ -230,6 +230,11 @@ per-queue stats) from the device.
In addition the driver logs the stats to syslog upon device reset.
+On supported instance types, the statistics will also include the
+ENA Express data (fields prefixed with `ena_srd`). For a complete
+documentation of ENA Express data refer to
+https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ena-express.html#ena-express-monitor
+
MTU
===
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index 6932d8c043c2..6fc1961492b7 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -44,6 +44,7 @@ Contents:
marvell/octeon_ep
marvell/octeon_ep_vf
mellanox/mlx5/index
+ meta/fbnic
microsoft/netvsc
neterion/s2io
netronome/nfp
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
index 3bd72577af9a..99d95be4d159 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
@@ -218,6 +218,22 @@ the software port.
[#accel]_.
- Informative
+ * - `rx[i]_hds_nosplit_packets`
+ - Number of packets that were not split in header/data split mode. A
+ packet will not get split when the hardware does not support its
+ protocol splitting. An example such a protocol is ICMPv4/v6. Currently
+ TCP and UDP with IPv4/IPv6 are supported for header/data split
+ [#accel]_.
+ - Informative
+
+ * - `rx[i]_hds_nosplit_bytes`
+ - Number of bytes for packets that were not split in header/data split
+ mode. A packet will not get split when the hardware does not support its
+ protocol splitting. An example such a protocol is ICMPv4/v6. Currently
+ TCP and UDP with IPv4/IPv6 are supported for header/data split
+ [#accel]_.
+ - Informative
+
* - `rx[i]_lro_packets`
- The number of LRO packets received on ring i [#accel]_.
- Acceleration
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst
index 20d3b7e87049..34e911480108 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst
@@ -130,6 +130,9 @@ Enabling the driver and kconfig options
| Build support for software-managed steering in the NIC.
+**CONFIG_MLX5_HW_STEERING=(y/n)**
+
+| Build support for hardware-managed steering in the NIC.
**CONFIG_MLX5_TC_CT=(y/n)**
diff --git a/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst b/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst
new file mode 100644
index 000000000000..32ff114f5c26
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst
@@ -0,0 +1,29 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+=====================================
+Meta Platforms Host Network Interface
+=====================================
+
+Firmware Versions
+-----------------
+
+fbnic has three components stored on the flash which are provided in one PLDM
+image:
+
+1. fw - The control firmware used to view and modify firmware settings, request
+ firmware actions, and retrieve firmware counters outside of the data path.
+ This is the firmware which fbnic_fw.c interacts with.
+2. bootloader - The firmware which validate firmware security and control basic
+ operations including loading and updating the firmware. This is also known
+ as the cmrt firmware.
+3. undi - This is the UEFI driver which is based on the Linux driver.
+
+fbnic stores two copies of these three components on flash. This allows fbnic
+to fall back to an older version of firmware automatically in case firmware
+fails to boot. Version information for both is provided as running and stored.
+The undi is only provided in stored as it is not actively running once the Linux
+driver takes over.
+
+devlink dev info provides version information for all three components. In
+addition to the version the hg commit hash of the build is included as a
+separate entry.
diff --git a/Documentation/networking/devmem.rst b/Documentation/networking/devmem.rst
new file mode 100644
index 000000000000..a55bf21f671c
--- /dev/null
+++ b/Documentation/networking/devmem.rst
@@ -0,0 +1,269 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+Device Memory TCP
+=================
+
+
+Intro
+=====
+
+Device memory TCP (devmem TCP) enables receiving data directly into device
+memory (dmabuf). The feature is currently implemented for TCP sockets.
+
+
+Opportunity
+-----------
+
+A large number of data transfers have device memory as the source and/or
+destination. Accelerators drastically increased the prevalence of such
+transfers. Some examples include:
+
+- Distributed training, where ML accelerators, such as GPUs on different hosts,
+ exchange data.
+
+- Distributed raw block storage applications transfer large amounts of data with
+ remote SSDs. Much of this data does not require host processing.
+
+Typically the Device-to-Device data transfers in the network are implemented as
+the following low-level operations: Device-to-Host copy, Host-to-Host network
+transfer, and Host-to-Device copy.
+
+The flow involving host copies is suboptimal, especially for bulk data transfers,
+and can put significant strains on system resources such as host memory
+bandwidth and PCIe bandwidth.
+
+Devmem TCP optimizes this use case by implementing socket APIs that enable
+the user to receive incoming network packets directly into device memory.
+
+Packet payloads go directly from the NIC to device memory.
+
+Packet headers go to host memory and are processed by the TCP/IP stack
+normally. The NIC must support header split to achieve this.
+
+Advantages:
+
+- Alleviate host memory bandwidth pressure, compared to existing
+ network-transfer + device-copy semantics.
+
+- Alleviate PCIe bandwidth pressure, by limiting data transfer to the lowest
+ level of the PCIe tree, compared to the traditional path which sends data
+ through the root complex.
+
+
+More Info
+---------
+
+ slides, video
+ https://netdevconf.org/0x17/sessions/talk/device-memory-tcp.html
+
+ patchset
+ [PATCH net-next v24 00/13] Device Memory TCP
+ https://lore.kernel.org/netdev/20240831004313.3713467-1-almasrymina@google.com/
+
+
+Interface
+=========
+
+
+Example
+-------
+
+tools/testing/selftests/net/ncdevmem.c:do_server shows an example of setting up
+the RX path of this API.
+
+
+NIC Setup
+---------
+
+Header split, flow steering, & RSS are required features for devmem TCP.
+
+Header split is used to split incoming packets into a header buffer in host
+memory, and a payload buffer in device memory.
+
+Flow steering & RSS are used to ensure that only flows targeting devmem land on
+an RX queue bound to devmem.
+
+Enable header split & flow steering::
+
+ # enable header split
+ ethtool -G eth1 tcp-data-split on
+
+
+ # enable flow steering
+ ethtool -K eth1 ntuple on
+
+Configure RSS to steer all traffic away from the target RX queue (queue 15 in
+this example)::
+
+ ethtool --set-rxfh-indir eth1 equal 15
+
+
+The user must bind a dmabuf to any number of RX queues on a given NIC using
+the netlink API::
+
+ /* Bind dmabuf to NIC RX queue 15 */
+ struct netdev_queue *queues;
+ queues = malloc(sizeof(*queues) * 1);
+
+ queues[0]._present.type = 1;
+ queues[0]._present.idx = 1;
+ queues[0].type = NETDEV_RX_QUEUE_TYPE_RX;
+ queues[0].idx = 15;
+
+ *ys = ynl_sock_create(&ynl_netdev_family, &yerr);
+
+ req = netdev_bind_rx_req_alloc();
+ netdev_bind_rx_req_set_ifindex(req, 1 /* ifindex */);
+ netdev_bind_rx_req_set_dmabuf_fd(req, dmabuf_fd);
+ __netdev_bind_rx_req_set_queues(req, queues, n_queue_index);
+
+ rsp = netdev_bind_rx(*ys, req);
+
+ dmabuf_id = rsp->dmabuf_id;
+
+
+The netlink API returns a dmabuf_id: a unique ID that refers to this dmabuf
+that has been bound.
+
+The user can unbind the dmabuf from the netdevice by closing the netlink socket
+that established the binding. We do this so that the binding is automatically
+unbound even if the userspace process crashes.
+
+Note that any reasonably well-behaved dmabuf from any exporter should work with
+devmem TCP, even if the dmabuf is not actually backed by devmem. An example of
+this is udmabuf, which wraps user memory (non-devmem) in a dmabuf.
+
+
+Socket Setup
+------------
+
+The socket must be flow steered to the dmabuf bound RX queue::
+
+ ethtool -N eth1 flow-type tcp4 ... queue 15
+
+
+Receiving data
+--------------
+
+The user application must signal to the kernel that it is capable of receiving
+devmem data by passing the MSG_SOCK_DEVMEM flag to recvmsg::
+
+ ret = recvmsg(fd, &msg, MSG_SOCK_DEVMEM);
+
+Applications that do not specify the MSG_SOCK_DEVMEM flag will receive an EFAULT
+on devmem data.
+
+Devmem data is received directly into the dmabuf bound to the NIC in 'NIC
+Setup', and the kernel signals such to the user via the SCM_DEVMEM_* cmsgs::
+
+ for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
+ if (cm->cmsg_level != SOL_SOCKET ||
+ (cm->cmsg_type != SCM_DEVMEM_DMABUF &&
+ cm->cmsg_type != SCM_DEVMEM_LINEAR))
+ continue;
+
+ dmabuf_cmsg = (struct dmabuf_cmsg *)CMSG_DATA(cm);
+
+ if (cm->cmsg_type == SCM_DEVMEM_DMABUF) {
+ /* Frag landed in dmabuf.
+ *
+ * dmabuf_cmsg->dmabuf_id is the dmabuf the
+ * frag landed on.
+ *
+ * dmabuf_cmsg->frag_offset is the offset into
+ * the dmabuf where the frag starts.
+ *
+ * dmabuf_cmsg->frag_size is the size of the
+ * frag.
+ *
+ * dmabuf_cmsg->frag_token is a token used to
+ * refer to this frag for later freeing.
+ */
+
+ struct dmabuf_token token;
+ token.token_start = dmabuf_cmsg->frag_token;
+ token.token_count = 1;
+ continue;
+ }
+
+ if (cm->cmsg_type == SCM_DEVMEM_LINEAR)
+ /* Frag landed in linear buffer.
+ *
+ * dmabuf_cmsg->frag_size is the size of the
+ * frag.
+ */
+ continue;
+
+ }
+
+Applications may receive 2 cmsgs:
+
+- SCM_DEVMEM_DMABUF: this indicates the fragment landed in the dmabuf indicated
+ by dmabuf_id.
+
+- SCM_DEVMEM_LINEAR: this indicates the fragment landed in the linear buffer.
+ This typically happens when the NIC is unable to split the packet at the
+ header boundary, such that part (or all) of the payload landed in host
+ memory.
+
+Applications may receive no SO_DEVMEM_* cmsgs. That indicates non-devmem,
+regular TCP data that landed on an RX queue not bound to a dmabuf.
+
+
+Freeing frags
+-------------
+
+Frags received via SCM_DEVMEM_DMABUF are pinned by the kernel while the user
+processes the frag. The user must return the frag to the kernel via
+SO_DEVMEM_DONTNEED::
+
+ ret = setsockopt(client_fd, SOL_SOCKET, SO_DEVMEM_DONTNEED, &token,
+ sizeof(token));
+
+The user must ensure the tokens are returned to the kernel in a timely manner.
+Failure to do so will exhaust the limited dmabuf that is bound to the RX queue
+and will lead to packet drops.
+
+
+Implementation & Caveats
+========================
+
+Unreadable skbs
+---------------
+
+Devmem payloads are inaccessible to the kernel processing the packets. This
+results in a few quirks for payloads of devmem skbs:
+
+- Loopback is not functional. Loopback relies on copying the payload, which is
+ not possible with devmem skbs.
+
+- Software checksum calculation fails.
+
+- TCP Dump and bpf can't access devmem packet payloads.
+
+
+Testing
+=======
+
+More realistic example code can be found in the kernel source under
+``tools/testing/selftests/net/ncdevmem.c``
+
+ncdevmem is a devmem TCP netcat. It works very similarly to netcat, but
+receives data directly into a udmabuf.
+
+To run ncdevmem, you need to run it on a server on the machine under test, and
+you need to run netcat on a peer to provide the TX data.
+
+ncdevmem has a validation mode as well that expects a repeating pattern of
+incoming data and validates it as such. For example, you can launch
+ncdevmem on the server by::
+
+ ncdevmem -s <server IP> -c <client IP> -f eth1 -d 3 -n 0000:06:00.0 -l \
+ -p 5201 -v 7
+
+On client side, use regular netcat to send TX data to ncdevmem process
+on the server::
+
+ yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06) | \
+ tr \\n \\0 | head -c 5G | nc <server IP> 5201 -p 5201
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index d5f246aceb9f..295563e91082 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -57,6 +57,7 @@ Structure of this header is
``ETHTOOL_A_HEADER_DEV_INDEX`` u32 device ifindex
``ETHTOOL_A_HEADER_DEV_NAME`` string device name
``ETHTOOL_A_HEADER_FLAGS`` u32 flags common for all requests
+ ``ETHTOOL_A_HEADER_PHY_INDEX`` u32 phy device index
============================== ====== =============================
``ETHTOOL_A_HEADER_DEV_INDEX`` and ``ETHTOOL_A_HEADER_DEV_NAME`` identify the
@@ -81,6 +82,12 @@ the behaviour is backward compatible, i.e. requests from old clients not aware
of the flag should be interpreted the way the client expects. A client must
not set flags it does not understand.
+``ETHTOOL_A_HEADER_PHY_INDEX`` identifies the Ethernet PHY the message relates to.
+As there are numerous commands that are related to PHY configuration, and because
+there may be more than one PHY on the link, the PHY index can be passed in the
+request for the commands that needs it. It is, however, not mandatory, and if it
+is not passed for commands that target a PHY, the net_device.phydev pointer
+is used.
Bit sets
========
@@ -934,18 +941,18 @@ Request contents:
==================================== ====== ===========================
Kernel checks that requested ring sizes do not exceed limits reported by
-driver. Driver may impose additional constraints and may not suspport all
+driver. Driver may impose additional constraints and may not support all
attributes.
``ETHTOOL_A_RINGS_CQE_SIZE`` specifies the completion queue event size.
-Completion queue events(CQE) are the events posted by NIC to indicate the
-completion status of a packet when the packet is sent(like send success or
-error) or received(like pointers to packet fragments). The CQE size parameter
+Completion queue events (CQE) are the events posted by NIC to indicate the
+completion status of a packet when the packet is sent (like send success or
+error) or received (like pointers to packet fragments). The CQE size parameter
enables to modify the CQE size other than default size if NIC supports it.
-A bigger CQE can have more receive buffer pointers inturn NIC can transfer
-a bigger frame from wire. Based on the NIC hardware, the overall completion
-queue size can be adjusted in the driver if CQE size is modified.
+A bigger CQE can have more receive buffer pointers, and in turn the NIC can
+transfer a bigger frame from wire. Based on the NIC hardware, the overall
+completion queue size can be adjusted in the driver if CQE size is modified.
CHANNELS_GET
============
@@ -989,7 +996,7 @@ Request contents:
===================================== ====== ==========================
Kernel checks that requested channel counts do not exceed limits reported by
-driver. Driver may impose additional constraints and may not suspport all
+driver. Driver may impose additional constraints and may not support all
attributes.
@@ -1307,12 +1314,17 @@ information.
+-+-+-----------------------------------------+--------+---------------------+
| | | ``ETHTOOL_A_CABLE_RESULTS_CODE`` | u8 | result code |
+-+-+-----------------------------------------+--------+---------------------+
+ | | | ``ETHTOOL_A_CABLE_RESULT_SRC`` | u32 | information source |
+ +-+-+-----------------------------------------+--------+---------------------+
| | ``ETHTOOL_A_CABLE_NEST_FAULT_LENGTH`` | nested | cable length |
+-+-+-----------------------------------------+--------+---------------------+
| | | ``ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR`` | u8 | pair number |
+-+-+-----------------------------------------+--------+---------------------+
| | | ``ETHTOOL_A_CABLE_FAULT_LENGTH_CM`` | u32 | length in cm |
+-+-+-----------------------------------------+--------+---------------------+
+ | | | ``ETHTOOL_A_CABLE_FAULT_LENGTH_SRC`` | u32 | information source |
+ +-+-+-----------------------------------------+--------+---------------------+
+
CABLE_TEST TDR
==============
@@ -1756,7 +1768,7 @@ Kernel response contents:
When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` attribute identifies
the operational state of the PoDL PSE functions. The operational state of the
PSE function can be changed using the ``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL``
-action. This option is corresponding to ``IEEE 802.3-2018`` 30.15.1.1.2
+action. This attribute corresponds to ``IEEE 802.3-2018`` 30.15.1.1.2
aPoDLPSEAdminState. Possible values are:
.. kernel-doc:: include/uapi/linux/ethtool.h
@@ -1770,8 +1782,8 @@ The same goes for ``ETHTOOL_A_C33_PSE_ADMIN_STATE`` implementing
When set, the optional ``ETHTOOL_A_PODL_PSE_PW_D_STATUS`` attribute identifies
the power detection status of the PoDL PSE. The status depend on internal PSE
-state machine and automatic PD classification support. This option is
-corresponding to ``IEEE 802.3-2018`` 30.15.1.1.3 aPoDLPSEPowerDetectionStatus.
+state machine and automatic PD classification support. This attribute
+corresponds to ``IEEE 802.3-2018`` 30.15.1.1.3 aPoDLPSEPowerDetectionStatus.
Possible values are:
.. kernel-doc:: include/uapi/linux/ethtool.h
@@ -1785,12 +1797,13 @@ The same goes for ``ETHTOOL_A_C33_PSE_ADMIN_PW_D_STATUS`` implementing
When set, the optional ``ETHTOOL_A_C33_PSE_PW_CLASS`` attribute identifies
the power class of the C33 PSE. It depends on the class negotiated between
-the PSE and the PD. This option is corresponding to ``IEEE 802.3-2022``
+the PSE and the PD. This attribute corresponds to ``IEEE 802.3-2022``
30.9.1.1.8 aPSEPowerClassification.
When set, the optional ``ETHTOOL_A_C33_PSE_ACTUAL_PW`` attribute identifies
-This option is corresponding to ``IEEE 802.3-2022`` 30.9.1.1.23 aPSEActualPower.
-Actual power is reported in mW.
+the actual power drawn by the C33 PSE. This attribute corresponds to
+``IEEE 802.3-2022`` 30.9.1.1.23 aPSEActualPower. Actual power is reported
+in mW.
When set, the optional ``ETHTOOL_A_C33_PSE_EXT_STATE`` attribute identifies
the extended error state of the C33 PSE. Possible values are:
@@ -1839,7 +1852,7 @@ Request contents:
====================================== ====== =============================
When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL`` attribute is used
-to control PoDL PSE Admin functions. This option is implementing
+to control PoDL PSE Admin functions. This option implements
``IEEE 802.3-2018`` 30.15.1.2.1 acPoDLPSEAdminControl. See
``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` for supported values.
@@ -1866,10 +1879,18 @@ RSS context of an interface similar to ``ETHTOOL_GRSSH`` ioctl request.
Request contents:
-===================================== ====== ==========================
+===================================== ====== ============================
``ETHTOOL_A_RSS_HEADER`` nested request header
``ETHTOOL_A_RSS_CONTEXT`` u32 context number
-===================================== ====== ==========================
+ ``ETHTOOL_A_RSS_START_CONTEXT`` u32 start context number (dumps)
+===================================== ====== ============================
+
+``ETHTOOL_A_RSS_CONTEXT`` specifies which RSS context number to query,
+if not set context 0 (the main context) is queried. Dumps can be filtered
+by device (only listing contexts of a given netdev). Filtering single
+context number is not supported but ``ETHTOOL_A_RSS_START_CONTEXT``
+can be used to start dumping context from the given number (primarily
+used to ignore context 0s and only dump additional contexts).
Kernel response contents:
@@ -1927,7 +1948,7 @@ When set, the optional ``ETHTOOL_A_PLCA_VERSION`` attribute indicates which
standard and version the PLCA management interface complies to. When not set,
the interface is vendor-specific and (possibly) supplied by the driver.
The OPEN Alliance SIG specifies a standard register map for 10BASE-T1S PHYs
-embedding the PLCA Reconcialiation Sublayer. See "10BASE-T1S PLCA Management
+embedding the PLCA Reconciliation Sublayer. See "10BASE-T1S PLCA Management
Registers" at https://www.opensig.org/about/specifications/.
When set, the optional ``ETHTOOL_A_PLCA_ENABLED`` attribute indicates the
@@ -1989,7 +2010,7 @@ Request contents:
``ETHTOOL_A_PLCA_ENABLED`` u8 PLCA Admin State
``ETHTOOL_A_PLCA_NODE_ID`` u8 PLCA unique local node ID
``ETHTOOL_A_PLCA_NODE_CNT`` u8 Number of PLCA nodes on the
- netkork, including the
+ network, including the
coordinator
``ETHTOOL_A_PLCA_TO_TMR`` u8 Transmit Opportunity Timer
value in bit-times (BT)
@@ -2176,6 +2197,49 @@ string.
The ``ETHTOOL_A_MODULE_FW_FLASH_DONE`` and ``ETHTOOL_A_MODULE_FW_FLASH_TOTAL``
attributes encode the completed and total amount of work, respectively.
+PHY_GET
+=======
+
+Retrieve information about a given Ethernet PHY sitting on the link. The DO
+operation returns all available information about dev->phydev. User can also
+specify a PHY_INDEX, in which case the DO request returns information about that
+specific PHY.
+
+As there can be more than one PHY, the DUMP operation can be used to list the PHYs
+present on a given interface, by passing an interface index or name in
+the dump request.
+
+For more information, refer to :ref:`phy_link_topology`
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_PHY_HEADER`` nested request header
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ===================================== ====== ===============================
+ ``ETHTOOL_A_PHY_HEADER`` nested request header
+ ``ETHTOOL_A_PHY_INDEX`` u32 the phy's unique index, that can
+ be used for phy-specific
+ requests
+ ``ETHTOOL_A_PHY_DRVNAME`` string the phy driver name
+ ``ETHTOOL_A_PHY_NAME`` string the phy device name
+ ``ETHTOOL_A_PHY_UPSTREAM_TYPE`` u32 the type of device this phy is
+ connected to
+ ``ETHTOOL_A_PHY_UPSTREAM_INDEX`` u32 the PHY index of the upstream
+ PHY
+ ``ETHTOOL_A_PHY_UPSTREAM_SFP_NAME`` string if this PHY is connected to
+ its parent PHY through an SFP
+ bus, the name of this sfp bus
+ ``ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME`` string if the phy controls an sfp bus,
+ the name of the sfp bus
+ ===================================== ====== ===============================
+
+When ``ETHTOOL_A_PHY_UPSTREAM_TYPE`` is PHY_UPSTREAM_PHY, the PHY's parent is
+another PHY.
+
Request translation
===================
@@ -2283,4 +2347,5 @@ are netlink only.
n/a ``ETHTOOL_MSG_MM_GET``
n/a ``ETHTOOL_MSG_MM_SET``
n/a ``ETHTOOL_MSG_MODULE_FW_FLASH_ACT``
+ n/a ``ETHTOOL_MSG_PHY_GET``
=================================== =====================================
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index d1af04b952f8..803dfc1efb75 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -49,6 +49,7 @@ Contents:
cdc_mbim
dccp
dctcp
+ devmem
dns_resolver
driver
eql
@@ -87,10 +88,12 @@ Contents:
nexthop-group-resilient
nf_conntrack-sysctl
nf_flowtable
+ oa-tc6-framework
openvswitch
operstates
packet_mmap
phonet
+ phy-link-topology
pktgen
plip
ppp_generic
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 3616389c8c2d..eacf8983e230 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -2362,6 +2362,20 @@ ra_honor_pio_life - BOOLEAN
Default: 0 (disabled)
+ra_honor_pio_pflag - BOOLEAN
+ The Prefix Information Option P-flag indicates the network can
+ allocate a unique IPv6 prefix per client using DHCPv6-PD.
+ This sysctl can be enabled when a userspace DHCPv6-PD client
+ is running to cause the P-flag to take effect: i.e. the
+ P-flag suppresses any effects of the A-flag within the same
+ PIO. For a given PIO, P=1 and A=1 is treated as A=0.
+
+ - If disabled, the P-flag is ignored.
+ - If enabled, the P-flag will disable SLAAC autoconfiguration
+ for the given Prefix Information Option.
+
+ Default: 0 (disabled)
+
accept_ra_rt_info_min_plen - INTEGER
Minimum prefix length of Route Information in RA.
diff --git a/Documentation/networking/l2tp.rst b/Documentation/networking/l2tp.rst
index 8496b467dea4..e8cf8b3e60ac 100644
--- a/Documentation/networking/l2tp.rst
+++ b/Documentation/networking/l2tp.rst
@@ -638,9 +638,8 @@ Tunnels are identified by a unique tunnel id. The id is 16-bit for
L2TPv2 and 32-bit for L2TPv3. Internally, the id is stored as a 32-bit
value.
-Tunnels are kept in a per-net list, indexed by tunnel id. The tunnel
-id namespace is shared by L2TPv2 and L2TPv3. The tunnel context can be
-derived from the socket's sk_user_data.
+Tunnels are kept in a per-net list, indexed by tunnel id. The
+tunnel id namespace is shared by L2TPv2 and L2TPv3.
Handling tunnel socket close is perhaps the most tricky part of the
L2TP implementation. If userspace closes a tunnel socket, the L2TP
@@ -652,9 +651,7 @@ socket's encap_destroy handler is invoked, which L2TP uses to initiate
its tunnel close actions. For L2TPIP sockets, the socket's close
handler initiates the same tunnel close actions. All sessions are
first closed. Each session drops its tunnel ref. When the tunnel ref
-reaches zero, the tunnel puts its socket ref. When the socket is
-eventually destroyed, its sk_destruct finally frees the L2TP tunnel
-context.
+reaches zero, the tunnel drops its socket ref.
Sessions
--------
@@ -667,10 +664,7 @@ pseudowire) or other data types such as PPP, ATM, HDLC or Frame
Relay. Linux currently implements only Ethernet and PPP session types.
Some L2TP session types also have a socket (PPP pseudowires) while
-others do not (Ethernet pseudowires). We can't therefore use the
-socket reference count as the reference count for session
-contexts. The L2TP implementation therefore has its own internal
-reference counts on the session contexts.
+others do not (Ethernet pseudowires).
Like tunnels, L2TP sessions are identified by a unique
session id. Just as with tunnel ids, the session id is 16-bit for
@@ -680,21 +674,19 @@ value.
Sessions hold a ref on their parent tunnel to ensure that the tunnel
stays extant while one or more sessions references it.
-Sessions are kept in a per-tunnel list, indexed by session id. L2TPv3
-sessions are also kept in a per-net list indexed by session id,
-because L2TPv3 session ids are unique across all tunnels and L2TPv3
-data packets do not contain a tunnel id in the header. This list is
-therefore needed to find the session context associated with a
-received data packet when the tunnel context cannot be derived from
-the tunnel socket.
+Sessions are kept in a per-net list. L2TPv2 sessions and L2TPv3
+sessions are stored in separate lists. L2TPv2 sessions are keyed
+by a 32-bit key made up of the 16-bit tunnel ID and 16-bit
+session ID. L2TPv3 sessions are keyed by the 32-bit session ID, since
+L2TPv3 session ids are unique across all tunnels.
Although the L2TPv3 RFC specifies that L2TPv3 session ids are not
-scoped by the tunnel, the kernel does not police this for L2TPv3 UDP
-tunnels and does not add sessions of L2TPv3 UDP tunnels into the
-per-net session list. In the UDP receive code, we must trust that the
-tunnel can be identified using the tunnel socket's sk_user_data and
-lookup the session in the tunnel's session list instead of the per-net
-session list.
+scoped by the tunnel, the Linux implementation has historically
+allowed this. Such session id collisions are supported using a per-net
+hash table keyed by sk and session ID. When looking up L2TPv3
+sessions, the list entry may link to multiple sessions with that
+session ID, in which case the session matching the given sk (tunnel)
+is used.
PPP
---
@@ -714,10 +706,9 @@ The L2TP PPP implementation handles the closing of a PPPoL2TP socket
by closing its corresponding L2TP session. This is complicated because
it must consider racing with netlink session create/destroy requests
and pppol2tp_connect trying to reconnect with a session that is in the
-process of being closed. Unlike tunnels, PPP sessions do not hold a
-ref on their associated socket, so code must be careful to sock_hold
-the socket where necessary. For all the details, see commit
-3d609342cc04129ff7568e19316ce3d7451a27e8.
+process of being closed. PPP sessions hold a ref on their associated
+socket in order that the socket remains extants while the session
+references it.
Ethernet
--------
@@ -761,15 +752,10 @@ Limitations
The current implementation has a number of limitations:
- 1) Multiple UDP sockets with the same 5-tuple address cannot be
- used. The kernel's tunnel context is identified using private
- data associated with the socket so it is important that each
- socket is uniquely identified by its address.
-
- 2) Interfacing with openvswitch is not yet implemented. It may be
+ 1) Interfacing with openvswitch is not yet implemented. It may be
useful to map OVS Ethernet and VLAN ports into L2TPv3 tunnels.
- 3) VLAN pseudowires are implemented using an ``l2tpethN`` interface
+ 2) VLAN pseudowires are implemented using an ``l2tpethN`` interface
configured with a VLAN sub-interface. Since L2TPv3 VLAN
pseudowires carry one and only one VLAN, it may be better to use
a single netdevice rather than an ``l2tpethN`` and ``l2tpethN``:M
diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst
index fd514bba8c43..95598c21fc8e 100644
--- a/Documentation/networking/mptcp-sysctl.rst
+++ b/Documentation/networking/mptcp-sysctl.rst
@@ -34,6 +34,17 @@ available_schedulers - STRING
Shows the available schedulers choices that are registered. More packet
schedulers may be available, but not loaded.
+blackhole_timeout - INTEGER (seconds)
+ Initial time period in second to disable MPTCP on active MPTCP sockets
+ when a MPTCP firewall blackhole issue happens. This time period will
+ grow exponentially when more blackhole issues get detected right after
+ MPTCP is re-enabled and will reset to the initial value when the
+ blackhole issue goes away.
+
+ 0 to disable the blackhole detection.
+
+ Default: 3600
+
checksum_enabled - BOOLEAN
Control whether DSS checksum can be enabled.
diff --git a/Documentation/networking/multi-pf-netdev.rst b/Documentation/networking/multi-pf-netdev.rst
index 268819225866..2cd25d81aaa7 100644
--- a/Documentation/networking/multi-pf-netdev.rst
+++ b/Documentation/networking/multi-pf-netdev.rst
@@ -111,11 +111,11 @@ The relation between PF, irq, napi, and queue can be observed via netlink spec::
Here you can clearly observe our channels distribution policy::
$ ls /proc/irq/{36,39,40,41,42}/mlx5* -d -1
- /proc/irq/36/mlx5_comp1@pci:0000:08:00.0
- /proc/irq/39/mlx5_comp1@pci:0000:09:00.0
- /proc/irq/40/mlx5_comp2@pci:0000:08:00.0
- /proc/irq/41/mlx5_comp2@pci:0000:09:00.0
- /proc/irq/42/mlx5_comp3@pci:0000:08:00.0
+ /proc/irq/36/mlx5_comp0@pci:0000:08:00.0
+ /proc/irq/39/mlx5_comp0@pci:0000:09:00.0
+ /proc/irq/40/mlx5_comp1@pci:0000:08:00.0
+ /proc/irq/41/mlx5_comp1@pci:0000:09:00.0
+ /proc/irq/42/mlx5_comp2@pci:0000:08:00.0
Steering
========
diff --git a/Documentation/networking/net_cachelines/net_device.rst b/Documentation/networking/net_cachelines/net_device.rst
index 70c4fb9d4e5c..22b07c814f4a 100644
--- a/Documentation/networking/net_cachelines/net_device.rst
+++ b/Documentation/networking/net_cachelines/net_device.rst
@@ -7,6 +7,8 @@ net_device struct fast path usage breakdown
Type Name fastpath_tx_access fastpath_rx_access Comments
..struct ..net_device
+unsigned_long:32 priv_flags read_mostly - __dev_queue_xmit(tx)
+unsigned_long:1 lltx read_mostly - HARD_TX_LOCK,HARD_TX_TRYLOCK,HARD_TX_UNLOCK(tx)
char name[16] - -
struct_netdev_name_node* name_node
struct_dev_ifalias* ifalias
@@ -23,7 +25,6 @@ struct_list_head ptype_specific
struct adj_list
unsigned_int flags read_mostly read_mostly __dev_queue_xmit,__dev_xmit_skb,ip6_output,__ip6_finish_output(tx);ip6_rcv_core(rx)
xdp_features_t xdp_features
-unsigned_long_long priv_flags read_mostly - __dev_queue_xmit(tx)
struct_net_device_ops* netdev_ops read_mostly - netdev_core_pick_tx,netdev_start_xmit(tx)
struct_xdp_metadata_ops* xdp_metadata_ops
int ifindex - read_mostly ip6_rcv_core
@@ -98,7 +99,7 @@ unsigned_int num_rx_queues
unsigned_int real_num_rx_queues - read_mostly get_rps_cpu
struct_bpf_prog* xdp_prog - read_mostly netif_elide_gro()
unsigned_long gro_flush_timeout - read_mostly napi_complete_done
-int napi_defer_hard_irqs - read_mostly napi_complete_done
+u32 napi_defer_hard_irqs - read_mostly napi_complete_done
unsigned_int gro_max_size - read_mostly skb_gro_receive
unsigned_int gro_ipv4_max_size - read_mostly skb_gro_receive
rx_handler_func_t* rx_handler read_mostly - __netif_receive_skb_core
@@ -163,6 +164,10 @@ struct_lock_class_key* qdisc_tx_busylock
bool proto_down
unsigned:1 wol_enabled
unsigned:1 threaded - - napi_poll(napi_enable,dev_set_threaded)
+unsigned_long:1 see_all_hwtstamp_requests
+unsigned_long:1 change_proto_down
+unsigned_long:1 netns_local
+unsigned_long:1 fcoe_mtu
struct_list_head net_notifier_list
struct_macsec_ops* macsec_ops
struct_udp_tunnel_nic_info* udp_tunnel_nic_info
@@ -176,3 +181,5 @@ netdevice_tracker dev_registered_tracker
struct_rtnl_hw_stats64* offload_xstats_l3
struct_devlink_port* devlink_port
struct_dpll_pin* dpll_pin
+struct hlist_head page_pools
+struct dim_irq_moder* irq_moder
diff --git a/Documentation/networking/netdev-features.rst b/Documentation/networking/netdev-features.rst
index d7b15bb64deb..5014f7cc1398 100644
--- a/Documentation/networking/netdev-features.rst
+++ b/Documentation/networking/netdev-features.rst
@@ -139,21 +139,6 @@ chained skbs (skb->next/prev list).
Features contained in NETIF_F_SOFT_FEATURES are features of networking
stack. Driver should not change behaviour based on them.
- * LLTX driver (deprecated for hardware drivers)
-
-NETIF_F_LLTX is meant to be used by drivers that don't need locking at all,
-e.g. software tunnels.
-
-This is also used in a few legacy drivers that implement their
-own locking, don't use it for new (hardware) drivers.
-
- * netns-local device
-
-NETIF_F_NETNS_LOCAL is set for devices that are not allowed to move between
-network namespaces (e.g. loopback).
-
-Don't use it in drivers.
-
* VLAN challenged
NETIF_F_VLAN_CHALLENGED should be set for devices which can't cope with VLAN
diff --git a/Documentation/networking/netdevices.rst b/Documentation/networking/netdevices.rst
index c2476917a6c3..857c9784f87e 100644
--- a/Documentation/networking/netdevices.rst
+++ b/Documentation/networking/netdevices.rst
@@ -258,11 +258,11 @@ ndo_get_stats:
ndo_start_xmit:
Synchronization: __netif_tx_lock spinlock.
- When the driver sets NETIF_F_LLTX in dev->features this will be
+ When the driver sets dev->lltx this will be
called without holding netif_tx_lock. In this case the driver
has to lock by itself when needed.
The locking there should also properly protect against
- set_rx_mode. WARNING: use of NETIF_F_LLTX is deprecated.
+ set_rx_mode. WARNING: use of dev->lltx is deprecated.
Don't use it for new drivers.
Context: Process with BHs disabled or BH (timer),
diff --git a/Documentation/networking/oa-tc6-framework.rst b/Documentation/networking/oa-tc6-framework.rst
new file mode 100644
index 000000000000..fe2aabde923a
--- /dev/null
+++ b/Documentation/networking/oa-tc6-framework.rst
@@ -0,0 +1,497 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+=========================================================================
+OPEN Alliance 10BASE-T1x MAC-PHY Serial Interface (TC6) Framework Support
+=========================================================================
+
+Introduction
+------------
+
+The IEEE 802.3cg project defines two 10 Mbit/s PHYs operating over a
+single pair of conductors. The 10BASE-T1L (Clause 146) is a long reach
+PHY supporting full duplex point-to-point operation over 1 km of single
+balanced pair of conductors. The 10BASE-T1S (Clause 147) is a short reach
+PHY supporting full / half duplex point-to-point operation over 15 m of
+single balanced pair of conductors, or half duplex multidrop bus
+operation over 25 m of single balanced pair of conductors.
+
+Furthermore, the IEEE 802.3cg project defines the new Physical Layer
+Collision Avoidance (PLCA) Reconciliation Sublayer (Clause 148) meant to
+provide improved determinism to the CSMA/CD media access method. PLCA
+works in conjunction with the 10BASE-T1S PHY operating in multidrop mode.
+
+The aforementioned PHYs are intended to cover the low-speed / low-cost
+applications in industrial and automotive environment. The large number
+of pins (16) required by the MII interface, which is specified by the
+IEEE 802.3 in Clause 22, is one of the major cost factors that need to be
+addressed to fulfil this objective.
+
+The MAC-PHY solution integrates an IEEE Clause 4 MAC and a 10BASE-T1x PHY
+exposing a low pin count Serial Peripheral Interface (SPI) to the host
+microcontroller. This also enables the addition of Ethernet functionality
+to existing low-end microcontrollers which do not integrate a MAC
+controller.
+
+Overview
+--------
+
+The MAC-PHY is specified to carry both data (Ethernet frames) and control
+(register access) transactions over a single full-duplex serial peripheral
+interface.
+
+Protocol Overview
+-----------------
+
+Two types of transactions are defined in the protocol: data transactions
+for Ethernet frame transfers and control transactions for register
+read/write transfers. A chunk is the basic element of data transactions
+and is composed of 4 bytes of overhead plus 64 bytes of payload size for
+each chunk. Ethernet frames are transferred over one or more data chunks.
+Control transactions consist of one or more register read/write control
+commands.
+
+SPI transactions are initiated by the SPI host with the assertion of CSn
+low to the MAC-PHY and ends with the deassertion of CSn high. In between
+each SPI transaction, the SPI host may need time for additional
+processing and to setup the next SPI data or control transaction.
+
+SPI data transactions consist of an equal number of transmit (TX) and
+receive (RX) chunks. Chunks in both transmit and receive directions may
+or may not contain valid frame data independent from each other, allowing
+for the simultaneous transmission and reception of different length
+frames.
+
+Each transmit data chunk begins with a 32-bit data header followed by a
+data chunk payload on MOSI. The data header indicates whether transmit
+frame data is present and provides the information to determine which
+bytes of the payload contain valid frame data.
+
+In parallel, receive data chunks are received on MISO. Each receive data
+chunk consists of a data chunk payload ending with a 32-bit data footer.
+The data footer indicates if there is receive frame data present within
+the payload or not and provides the information to determine which bytes
+of the payload contain valid frame data.
+
+Reference
+---------
+
+10BASE-T1x MAC-PHY Serial Interface Specification,
+
+Link: https://opensig.org/download/document/OPEN_Alliance_10BASET1x_MAC-PHY_Serial_Interface_V1.1.pdf
+
+Hardware Architecture
+---------------------
+
+.. code-block:: none
+
+ +----------+ +-------------------------------------+
+ | | | MAC-PHY |
+ | |<---->| +-----------+ +-------+ +-------+ |
+ | SPI Host | | | SPI Slave | | MAC | | PHY | |
+ | | | +-----------+ +-------+ +-------+ |
+ +----------+ +-------------------------------------+
+
+Software Architecture
+---------------------
+
+.. code-block:: none
+
+ +----------------------------------------------------------+
+ | Networking Subsystem |
+ +----------------------------------------------------------+
+ / \ / \
+ | |
+ | |
+ \ / |
+ +----------------------+ +-----------------------------+
+ | MAC Driver |<--->| OPEN Alliance TC6 Framework |
+ +----------------------+ +-----------------------------+
+ / \ / \
+ | |
+ | |
+ | \ /
+ +----------------------------------------------------------+
+ | SPI Subsystem |
+ +----------------------------------------------------------+
+ / \
+ |
+ |
+ \ /
+ +----------------------------------------------------------+
+ | 10BASE-T1x MAC-PHY Device |
+ +----------------------------------------------------------+
+
+Implementation
+--------------
+
+MAC Driver
+~~~~~~~~~~
+
+- Probed by SPI subsystem.
+
+- Initializes OA TC6 framework for the MAC-PHY.
+
+- Registers and configures the network device.
+
+- Sends the tx ethernet frames from n/w subsystem to OA TC6 framework.
+
+OPEN Alliance TC6 Framework
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Initializes PHYLIB interface.
+
+- Registers mac-phy interrupt.
+
+- Performs mac-phy register read/write operation using the control
+ transaction protocol specified in the OPEN Alliance 10BASE-T1x MAC-PHY
+ Serial Interface specification.
+
+- Performs Ethernet frames transaction using the data transaction protocol
+ for Ethernet frames specified in the OPEN Alliance 10BASE-T1x MAC-PHY
+ Serial Interface specification.
+
+- Forwards the received Ethernet frame from 10Base-T1x MAC-PHY to n/w
+ subsystem.
+
+Data Transaction
+~~~~~~~~~~~~~~~~
+
+The Ethernet frames that are typically transferred from the SPI host to
+the MAC-PHY will be converted into multiple transmit data chunks. Each
+transmit data chunk will have a 4 bytes header which contains the
+information needed to determine the validity and the location of the
+transmit frame data within the 64 bytes data chunk payload.
+
+.. code-block:: none
+
+ +---------------------------------------------------+
+ | Tx Chunk |
+ | +---------------------------+ +----------------+ | MOSI
+ | | 64 bytes chunk payload | | 4 bytes header | |------------>
+ | +---------------------------+ +----------------+ |
+ +---------------------------------------------------+
+
+4 bytes header contains the below fields,
+
+DNC (Bit 31) - Data-Not-Control flag. This flag specifies the type of SPI
+ transaction. For TX data chunks, this bit shall be ’1’.
+ 0 - Control command
+ 1 - Data chunk
+
+SEQ (Bit 30) - Data Chunk Sequence. This bit is used to indicate an
+ even/odd transmit data chunk sequence to the MAC-PHY.
+
+NORX (Bit 29) - No Receive flag. The SPI host may set this bit to prevent
+ the MAC-PHY from conveying RX data on the MISO for the
+ current chunk (DV = 0 in the footer), indicating that the
+ host would not process it. Typically, the SPI host should
+ set NORX = 0 indicating that it will accept and process
+ any receive frame data within the current chunk.
+
+RSVD (Bit 28..24) - Reserved: All reserved bits shall be ‘0’.
+
+VS (Bit 23..22) - Vendor Specific. These bits are implementation specific.
+ If the MAC-PHY does not implement these bits, the host
+ shall set them to ‘0’.
+
+DV (Bit 21) - Data Valid flag. The SPI host uses this bit to indicate
+ whether the current chunk contains valid transmit frame data
+ (DV = 1) or not (DV = 0). When ‘0’, the MAC-PHY ignores the
+ chunk payload. Note that the receive path is unaffected by
+ the setting of the DV bit in the data header.
+
+SV (Bit 20) - Start Valid flag. The SPI host shall set this bit when the
+ beginning of an Ethernet frame is present in the current
+ transmit data chunk payload. Otherwise, this bit shall be
+ zero. This bit is not to be confused with the Start-of-Frame
+ Delimiter (SFD) byte described in IEEE 802.3 [2].
+
+SWO (Bit 19..16) - Start Word Offset. When SV = 1, this field shall
+ contain the 32-bit word offset into the transmit data
+ chunk payload that points to the start of a new
+ Ethernet frame to be transmitted. The host shall write
+ this field as zero when SV = 0.
+
+RSVD (Bit 15) - Reserved: All reserved bits shall be ‘0’.
+
+EV (Bit 14) - End Valid flag. The SPI host shall set this bit when the end
+ of an Ethernet frame is present in the current transmit data
+ chunk payload. Otherwise, this bit shall be zero.
+
+EBO (Bit 13..8) - End Byte Offset. When EV = 1, this field shall contain
+ the byte offset into the transmit data chunk payload
+ that points to the last byte of the Ethernet frame to
+ transmit. This field shall be zero when EV = 0.
+
+TSC (Bit 7..6) - Timestamp Capture. Request a timestamp capture when the
+ frame is transmitted onto the network.
+ 00 - Do not capture a timestamp
+ 01 - Capture timestamp into timestamp capture register A
+ 10 - Capture timestamp into timestamp capture register B
+ 11 - Capture timestamp into timestamp capture register C
+
+RSVD (Bit 5..1) - Reserved: All reserved bits shall be ‘0’.
+
+P (Bit 0) - Parity. Parity bit calculated over the transmit data header.
+ Method used is odd parity.
+
+The number of buffers available in the MAC-PHY to store the incoming
+transmit data chunk payloads is represented as transmit credits. The
+available transmit credits in the MAC-PHY can be read either from the
+Buffer Status Register or footer (Refer below for the footer info)
+received from the MAC-PHY. The SPI host should not write more data chunks
+than the available transmit credits as this will lead to transmit buffer
+overflow error.
+
+In case the previous data footer had no transmit credits available and
+once the transmit credits become available for transmitting transmit data
+chunks, the MAC-PHY interrupt is asserted to SPI host. On reception of the
+first data header this interrupt will be deasserted and the received
+footer for the first data chunk will have the transmit credits available
+information.
+
+The Ethernet frames that are typically transferred from MAC-PHY to SPI
+host will be sent as multiple receive data chunks. Each receive data
+chunk will have 64 bytes of data chunk payload followed by 4 bytes footer
+which contains the information needed to determine the validity and the
+location of the receive frame data within the 64 bytes data chunk payload.
+
+.. code-block:: none
+
+ +---------------------------------------------------+
+ | Rx Chunk |
+ | +----------------+ +---------------------------+ | MISO
+ | | 4 bytes footer | | 64 bytes chunk payload | |------------>
+ | +----------------+ +---------------------------+ |
+ +---------------------------------------------------+
+
+4 bytes footer contains the below fields,
+
+EXST (Bit 31) - Extended Status. This bit is set when any bit in the
+ STATUS0 or STATUS1 registers are set and not masked.
+
+HDRB (Bit 30) - Received Header Bad. When set, indicates that the MAC-PHY
+ received a control or data header with a parity error.
+
+SYNC (Bit 29) - Configuration Synchronized flag. This bit reflects the
+ state of the SYNC bit in the CONFIG0 configuration
+ register (see Table 12). A zero indicates that the MAC-PHY
+ configuration may not be as expected by the SPI host.
+ Following configuration, the SPI host sets the
+ corresponding bitin the configuration register which is
+ reflected in this field.
+
+RCA (Bit 28..24) - Receive Chunks Available. The RCA field indicates to
+ the SPI host the minimum number of additional receive
+ data chunks of frame data that are available for
+ reading beyond the current receive data chunk. This
+ field is zero when there is no receive frame data
+ pending in the MAC-PHY’s buffer for reading.
+
+VS (Bit 23..22) - Vendor Specific. These bits are implementation specific.
+ If not implemented, the MAC-PHY shall set these bits to
+ ‘0’.
+
+DV (Bit 21) - Data Valid flag. The MAC-PHY uses this bit to indicate
+ whether the current receive data chunk contains valid
+ receive frame data (DV = 1) or not (DV = 0). When ‘0’, the
+ SPI host shall ignore the chunk payload.
+
+SV (Bit 20) - Start Valid flag. The MAC-PHY sets this bit when the current
+ chunk payload contains the start of an Ethernet frame.
+ Otherwise, this bit is zero. The SV bit is not to be
+ confused with the Start-of-Frame Delimiter (SFD) byte
+ described in IEEE 802.3 [2].
+
+SWO (Bit 19..16) - Start Word Offset. When SV = 1, this field contains the
+ 32-bit word offset into the receive data chunk payload
+ containing the first byte of a new received Ethernet
+ frame. When a receive timestamp has been added to the
+ beginning of the received Ethernet frame (RTSA = 1)
+ then SWO points to the most significant byte of the
+ timestamp. This field will be zero when SV = 0.
+
+FD (Bit 15) - Frame Drop. When set, this bit indicates that the MAC has
+ detected a condition for which the SPI host should drop the
+ received Ethernet frame. This bit is only valid at the end
+ of a received Ethernet frame (EV = 1) and shall be zero at
+ all other times.
+
+EV (Bit 14) - End Valid flag. The MAC-PHY sets this bit when the end of a
+ received Ethernet frame is present in this receive data
+ chunk payload.
+
+EBO (Bit 13..8) - End Byte Offset: When EV = 1, this field contains the
+ byte offset into the receive data chunk payload that
+ locates the last byte of the received Ethernet frame.
+ This field is zero when EV = 0.
+
+RTSA (Bit 7) - Receive Timestamp Added. This bit is set when a 32-bit or
+ 64-bit timestamp has been added to the beginning of the
+ received Ethernet frame. The MAC-PHY shall set this bit to
+ zero when SV = 0.
+
+RTSP (Bit 6) - Receive Timestamp Parity. Parity bit calculated over the
+ 32-bit/64-bit timestamp added to the beginning of the
+ received Ethernet frame. Method used is odd parity. The
+ MAC-PHY shall set this bit to zero when RTSA = 0.
+
+TXC (Bit 5..1) - Transmit Credits. This field contains the minimum number
+ of transmit data chunks of frame data that the SPI host
+ can write in a single transaction without incurring a
+ transmit buffer overflow error.
+
+P (Bit 0) - Parity. Parity bit calculated over the receive data footer.
+ Method used is odd parity.
+
+SPI host will initiate the data receive transaction based on the receive
+chunks available in the MAC-PHY which is provided in the receive chunk
+footer (RCA - Receive Chunks Available). SPI host will create data invalid
+transmit data chunks (empty chunks) or data valid transmit data chunks in
+case there are valid Ethernet frames to transmit to the MAC-PHY. The
+receive chunks available in MAC-PHY can be read either from the Buffer
+Status Register or footer.
+
+In case the previous data footer had no receive data chunks available and
+once the receive data chunks become available again for reading, the
+MAC-PHY interrupt is asserted to SPI host. On reception of the first data
+header this interrupt will be deasserted and the received footer for the
+first data chunk will have the receive chunks available information.
+
+MAC-PHY Interrupt
+~~~~~~~~~~~~~~~~~
+
+The MAC-PHY interrupt is asserted when the following conditions are met.
+
+Receive chunks available - This interrupt is asserted when the previous
+data footer had no receive data chunks available and once the receive
+data chunks become available for reading. On reception of the first data
+header this interrupt will be deasserted.
+
+Transmit chunk credits available - This interrupt is asserted when the
+previous data footer indicated no transmit credits available and once the
+transmit credits become available for transmitting transmit data chunks.
+On reception of the first data header this interrupt will be deasserted.
+
+Extended status event - This interrupt is asserted when the previous data
+footer indicated no extended status and once the extended event become
+available. In this case the host should read status #0 register to know
+the corresponding error/event. On reception of the first data header this
+interrupt will be deasserted.
+
+Control Transaction
+~~~~~~~~~~~~~~~~~~~
+
+4 bytes control header contains the below fields,
+
+DNC (Bit 31) - Data-Not-Control flag. This flag specifies the type of SPI
+ transaction. For control commands, this bit shall be ‘0’.
+ 0 - Control command
+ 1 - Data chunk
+
+HDRB (Bit 30) - Received Header Bad. When set by the MAC-PHY, indicates
+ that a header was received with a parity error. The SPI
+ host should always clear this bit. The MAC-PHY ignores the
+ HDRB value sent by the SPI host on MOSI.
+
+WNR (Bit 29) - Write-Not-Read. This bit indicates if data is to be written
+ to registers (when set) or read from registers
+ (when clear).
+
+AID (Bit 28) - Address Increment Disable. When clear, the address will be
+ automatically post-incremented by one following each
+ register read or write. When set, address auto increment is
+ disabled allowing successive reads and writes to occur at
+ the same register address.
+
+MMS (Bit 27..24) - Memory Map Selector. This field selects the specific
+ register memory map to access.
+
+ADDR (Bit 23..8) - Address. Address of the first register within the
+ selected memory map to access.
+
+LEN (Bit 7..1) - Length. Specifies the number of registers to read/write.
+ This field is interpreted as the number of registers
+ minus 1 allowing for up to 128 consecutive registers read
+ or written starting at the address specified in ADDR. A
+ length of zero shall read or write a single register.
+
+P (Bit 0) - Parity. Parity bit calculated over the control command header.
+ Method used is odd parity.
+
+Control transactions consist of one or more control commands. Control
+commands are used by the SPI host to read and write registers within the
+MAC-PHY. Each control commands are composed of a 4 bytes control command
+header followed by register write data in case of control write command.
+
+The MAC-PHY ignores the final 4 bytes of data from the SPI host at the end
+of the control write command. The control write command is also echoed
+from the MAC-PHY back to the SPI host to identify which register write
+failed in case of any bus errors. The echoed Control write command will
+have the first 4 bytes unused value to be ignored by the SPI host
+followed by 4 bytes echoed control header followed by echoed register
+write data. Control write commands can write either a single register or
+multiple consecutive registers. When multiple consecutive registers are
+written, the address is automatically post-incremented by the MAC-PHY.
+Writing to any unimplemented or undefined registers shall be ignored and
+yield no effect.
+
+The MAC-PHY ignores all data from the SPI host following the control
+header for the remainder of the control read command. The control read
+command is also echoed from the MAC-PHY back to the SPI host to identify
+which register read is failed in case of any bus errors. The echoed
+Control read command will have the first 4 bytes of unused value to be
+ignored by the SPI host followed by 4 bytes echoed control header followed
+by register read data. Control read commands can read either a single
+register or multiple consecutive registers. When multiple consecutive
+registers are read, the address is automatically post-incremented by the
+MAC-PHY. Reading any unimplemented or undefined registers shall return
+zero.
+
+Device drivers API
+==================
+
+The include/linux/oa_tc6.h defines the following functions:
+
+.. c:function:: struct oa_tc6 *oa_tc6_init(struct spi_device *spi, \
+ struct net_device *netdev)
+
+Initialize OA TC6 lib.
+
+.. c:function:: void oa_tc6_exit(struct oa_tc6 *tc6)
+
+Free allocated OA TC6 lib.
+
+.. c:function:: int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, \
+ u32 value)
+
+Write a single register in the MAC-PHY.
+
+.. c:function:: int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, \
+ u32 value[], u8 length)
+
+Writing multiple consecutive registers starting from @address in the MAC-PHY.
+Maximum of 128 consecutive registers can be written starting at @address.
+
+.. c:function:: int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, \
+ u32 *value)
+
+Read a single register in the MAC-PHY.
+
+.. c:function:: int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, \
+ u32 value[], u8 length)
+
+Reading multiple consecutive registers starting from @address in the MAC-PHY.
+Maximum of 128 consecutive registers can be read starting at @address.
+
+.. c:function:: netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, \
+ struct sk_buff *skb);
+
+The transmit Ethernet frame in the skb is or going to be transmitted through
+the MAC-PHY.
+
+.. c:function:: int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6);
+
+Zero align receive frame feature can be enabled to align all receive ethernet
+frames data to start at the beginning of any receive data chunk payload with a
+start word offset (SWO) of zero.
diff --git a/Documentation/networking/phy-link-topology.rst b/Documentation/networking/phy-link-topology.rst
new file mode 100644
index 000000000000..4dec5d7d6513
--- /dev/null
+++ b/Documentation/networking/phy-link-topology.rst
@@ -0,0 +1,121 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. _phy_link_topology:
+
+=================
+PHY link topology
+=================
+
+Overview
+========
+
+The PHY link topology representation in the networking stack aims at representing
+the hardware layout for any given Ethernet link.
+
+An Ethernet interface from userspace's point of view is nothing but a
+:c:type:`struct net_device <net_device>`, which exposes configuration options
+through the legacy ioctls and the ethtool netlink commands. The base assumption
+when designing these configuration APIs were that the link looks something like ::
+
+ +-----------------------+ +----------+ +--------------+
+ | Ethernet Controller / | | Ethernet | | Connector / |
+ | MAC | ------ | PHY | ---- | Port | ---... to LP
+ +-----------------------+ +----------+ +--------------+
+ struct net_device struct phy_device
+
+Commands that needs to configure the PHY will go through the net_device.phydev
+field to reach the PHY and perform the relevant configuration.
+
+This assumption falls apart in more complex topologies that can arise when,
+for example, using SFP transceivers (although that's not the only specific case).
+
+Here, we have 2 basic scenarios. Either the MAC is able to output a serialized
+interface, that can directly be fed to an SFP cage, such as SGMII, 1000BaseX,
+10GBaseR, etc.
+
+The link topology then looks like this (when an SFP module is inserted) ::
+
+ +-----+ SGMII +------------+
+ | MAC | ------- | SFP Module |
+ +-----+ +------------+
+
+Knowing that some modules embed a PHY, the actual link is more like ::
+
+ +-----+ SGMII +--------------+
+ | MAC | -------- | PHY (on SFP) |
+ +-----+ +--------------+
+
+In this case, the SFP PHY is handled by phylib, and registered by phylink through
+its SFP upstream ops.
+
+Now some Ethernet controllers aren't able to output a serialized interface, so
+we can't directly connect them to an SFP cage. However, some PHYs can be used
+as media-converters, to translate the non-serialized MAC MII interface to a
+serialized MII interface fed to the SFP ::
+
+ +-----+ RGMII +-----------------------+ SGMII +--------------+
+ | MAC | ------- | PHY (media converter) | ------- | PHY (on SFP) |
+ +-----+ +-----------------------+ +--------------+
+
+This is where the model of having a single net_device.phydev pointer shows its
+limitations, as we now have 2 PHYs on the link.
+
+The phy_link topology framework aims at providing a way to keep track of every
+PHY on the link, for use by both kernel drivers and subsystems, but also to
+report the topology to userspace, allowing to target individual PHYs in configuration
+commands.
+
+API
+===
+
+The :c:type:`struct phy_link_topology <phy_link_topology>` is a per-netdevice
+resource, that gets initialized at netdevice creation. Once it's initialized,
+it is then possible to register PHYs to the topology through :
+
+:c:func:`phy_link_topo_add_phy`
+
+Besides registering the PHY to the topology, this call will also assign a unique
+index to the PHY, which can then be reported to userspace to refer to this PHY
+(akin to the ifindex). This index is a u32, ranging from 1 to U32_MAX. The value
+0 is reserved to indicate the PHY doesn't belong to any topology yet.
+
+The PHY can then be removed from the topology through
+
+:c:func:`phy_link_topo_del_phy`
+
+These function are already hooked into the phylib subsystem, so all PHYs that
+are linked to a net_device through :c:func:`phy_attach_direct` will automatically
+join the netdev's topology.
+
+PHYs that are on a SFP module will also be automatically registered IF the SFP
+upstream is phylink (so, no media-converter).
+
+PHY drivers that can be used as SFP upstream need to call :c:func:`phy_sfp_attach_phy`
+and :c:func:`phy_sfp_detach_phy`, which can be used as a
+.attach_phy / .detach_phy implementation for the
+:c:type:`struct sfp_upstream_ops <sfp_upstream_ops>`.
+
+UAPI
+====
+
+There exist a set of netlink commands to query the link topology from userspace,
+see ``Documentation/networking/ethtool-netlink.rst``.
+
+The whole point of having a topology representation is to assign the phyindex
+field in :c:type:`struct phy_device <phy_device>`. This index is reported to
+userspace using the ``ETHTOOL_MSG_PHY_GET`` ethtnl command. Performing a DUMP operation
+will result in all PHYs from all net_device being listed. The DUMP command
+accepts either a ``ETHTOOL_A_HEADER_DEV_INDEX`` or ``ETHTOOL_A_HEADER_DEV_NAME``
+to be passed in the request to filter the DUMP to a single net_device.
+
+The retrieved index can then be passed as a request parameter using the
+``ETHTOOL_A_HEADER_PHY_INDEX`` field in the following ethnl commands :
+
+* ``ETHTOOL_MSG_STRSET_GET`` to get the stats string set from a given PHY
+* ``ETHTOOL_MSG_CABLE_TEST_ACT`` and ``ETHTOOL_MSG_CABLE_TEST_ACT``, to perform
+ cable testing on a given PHY on the link (most likely the outermost PHY)
+* ``ETHTOOL_MSG_PSE_SET`` and ``ETHTOOL_MSG_PSE_GET`` for PHY-controlled PoE and PSE settings
+* ``ETHTOOL_MSG_PLCA_GET_CFG``, ``ETHTOOL_MSG_PLCA_SET_CFG`` and ``ETHTOOL_MSG_PLCA_GET_STATUS``
+ to set the PLCA (Physical Layer Collision Avoidance) parameters
+
+Note that the PHY index can be passed to other requests, which will silently
+ignore it if present and irrelevant.
diff --git a/Documentation/networking/switchdev.rst b/Documentation/networking/switchdev.rst
index 758f1dae3fce..f355f0166f1b 100644
--- a/Documentation/networking/switchdev.rst
+++ b/Documentation/networking/switchdev.rst
@@ -137,10 +137,10 @@ would be sub-port 0 on port 1 on switch 1.
Port Features
^^^^^^^^^^^^^
-NETIF_F_NETNS_LOCAL
+dev->netns_local
If the switchdev driver (and device) only supports offloading of the default
-network namespace (netns), the driver should set this feature flag to prevent
+network namespace (netns), the driver should set this private flag to prevent
the port netdev from being moved out of the default netns. A netns-aware
driver/device would not set this flag and be responsible for partitioning
hardware to preserve netns containment. This means hardware cannot forward
diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst
index 5e93cd71f99f..8199e6917671 100644
--- a/Documentation/networking/timestamping.rst
+++ b/Documentation/networking/timestamping.rst
@@ -158,7 +158,8 @@ SOF_TIMESTAMPING_SYS_HARDWARE:
SOF_TIMESTAMPING_RAW_HARDWARE:
Report hardware timestamps as generated by
- SOF_TIMESTAMPING_TX_HARDWARE when available.
+ SOF_TIMESTAMPING_TX_HARDWARE or SOF_TIMESTAMPING_RX_HARDWARE
+ when available.
1.3.3 Timestamp Options
@@ -266,6 +267,23 @@ SOF_TIMESTAMPING_OPT_TX_SWHW:
two separate messages will be looped to the socket's error queue,
each containing just one timestamp.
+SOF_TIMESTAMPING_OPT_RX_FILTER:
+ Filter out spurious receive timestamps: report a receive timestamp
+ only if the matching timestamp generation flag is enabled.
+
+ Receive timestamps are generated early in the ingress path, before a
+ packet's destination socket is known. If any socket enables receive
+ timestamps, packets for all socket will receive timestamped packets.
+ Including those that request timestamp reporting with
+ SOF_TIMESTAMPING_SOFTWARE and/or SOF_TIMESTAMPING_RAW_HARDWARE, but
+ do not request receive timestamp generation. This can happen when
+ requesting transmit timestamps only.
+
+ Receiving spurious timestamps is generally benign. A process can
+ ignore the unexpected non-zero value. But it makes behavior subtly
+ dependent on other sockets. This flag isolates the socket for more
+ deterministic behavior.
+
New applications are encouraged to pass SOF_TIMESTAMPING_OPT_ID to
disambiguate timestamps and SOF_TIMESTAMPING_OPT_TSONLY to operate
regardless of the setting of sysctl net.core.tstamp_allow_data.
diff --git a/Documentation/nvme/feature-and-quirk-policy.rst b/Documentation/nvme/feature-and-quirk-policy.rst
index c01d836d8e41..e21966bf20a8 100644
--- a/Documentation/nvme/feature-and-quirk-policy.rst
+++ b/Documentation/nvme/feature-and-quirk-policy.rst
@@ -1,8 +1,8 @@
.. SPDX-License-Identifier: GPL-2.0
-=======================================
-Linux NVMe feature and and quirk policy
-=======================================
+===================================
+Linux NVMe feature and quirk policy
+===================================
This file explains the policy used to decide what is supported by the
Linux NVMe driver and what is not.
diff --git a/Documentation/power/pci.rst b/Documentation/power/pci.rst
index e2c1fb8a569a..9ebecb7b00b2 100644
--- a/Documentation/power/pci.rst
+++ b/Documentation/power/pci.rst
@@ -979,18 +979,17 @@ subsections can be defined as a separate function, it often is convenient to
point two or more members of struct dev_pm_ops to the same routine. There are
a few convenience macros that can be used for this purpose.
-The SIMPLE_DEV_PM_OPS macro declares a struct dev_pm_ops object with one
+The DEFINE_SIMPLE_DEV_PM_OPS() declares a struct dev_pm_ops object with one
suspend routine pointed to by the .suspend(), .freeze(), and .poweroff()
members and one resume routine pointed to by the .resume(), .thaw(), and
.restore() members. The other function pointers in this struct dev_pm_ops are
unset.
-The UNIVERSAL_DEV_PM_OPS macro is similar to SIMPLE_DEV_PM_OPS, but it
-additionally sets the .runtime_resume() pointer to the same value as
-.resume() (and .thaw(), and .restore()) and the .runtime_suspend() pointer to
-the same value as .suspend() (and .freeze() and .poweroff()).
+The DEFINE_RUNTIME_DEV_PM_OPS() is similar to DEFINE_SIMPLE_DEV_PM_OPS(), but it
+additionally sets the .runtime_resume() pointer to pm_runtime_force_resume()
+and the .runtime_suspend() pointer to pm_runtime_force_suspend().
-The SET_SYSTEM_SLEEP_PM_OPS can be used inside of a declaration of struct
+The SYSTEM_SLEEP_PM_OPS() can be used inside of a declaration of struct
dev_pm_ops to indicate that one suspend routine is to be pointed to by the
.suspend(), .freeze(), and .poweroff() members and one resume routine is to
be pointed to by the .resume(), .thaw(), and .restore() members.
diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst
index 5c4e730f38d0..53d1996460ab 100644
--- a/Documentation/power/runtime_pm.rst
+++ b/Documentation/power/runtime_pm.rst
@@ -811,8 +811,8 @@ subsystem-level dev_pm_ops structure.
Device drivers that wish to use the same function as a system suspend, freeze,
poweroff and runtime suspend callback, and similarly for system resume, thaw,
-restore, and runtime resume, can achieve this with the help of the
-UNIVERSAL_DEV_PM_OPS macro defined in include/linux/pm.h (possibly setting its
+restore, and runtime resume, can achieve similar behaviour with the help of the
+DEFINE_RUNTIME_DEV_PM_OPS() defined in include/linux/pm_runtime.h (possibly setting its
last argument to NULL).
8. "No-Callback" Devices
diff --git a/Documentation/process/backporting.rst b/Documentation/process/backporting.rst
index e1a6ea0a1e8a..a71480fcf3b4 100644
--- a/Documentation/process/backporting.rst
+++ b/Documentation/process/backporting.rst
@@ -73,7 +73,7 @@ Once you have the patch in git, you can go ahead and cherry-pick it into
your source tree. Don't forget to cherry-pick with ``-x`` if you want a
written record of where the patch came from!
-Note that if you are submiting a patch for stable, the format is
+Note that if you are submitting a patch for stable, the format is
slightly different; the first line after the subject line needs tobe
either::
@@ -147,7 +147,7 @@ divergence.
It's important to always identify the commit or commits that caused the
conflict, as otherwise you cannot be confident in the correctness of
your resolution. As an added bonus, especially if the patch is in an
-area you're not that famliar with, the changelogs of these commits will
+area you're not that familiar with, the changelogs of these commits will
often give you the context to understand the code and potential problems
or pitfalls with your conflict resolution.
@@ -197,7 +197,7 @@ git blame
Another way to find prerequisite commits (albeit only the most recent
one for a given conflict) is to run ``git blame``. In this case, you
need to run it against the parent commit of the patch you are
-cherry-picking and the file where the conflict appared, i.e.::
+cherry-picking and the file where the conflict appeared, i.e.::
git blame <commit>^ -- <path>
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 3fc63f27c226..00f1ed7c59c3 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -64,6 +64,7 @@ GNU tar 1.28 tar --version
gtags (optional) 6.6.5 gtags --version
mkimage (optional) 2017.01 mkimage --version
Python (optional) 3.5.x python3 --version
+GNU AWK (optional) 5.1.0 gawk --version
====================== =============== ========================================
.. [#f1] Sphinx is needed only to build the Kernel documentation
@@ -192,6 +193,12 @@ platforms. The tool is available via the ``u-boot-tools`` package or can be
built from the U-Boot source code. See the instructions at
https://docs.u-boot.org/en/latest/build/tools.html#building-tools-for-linux
+GNU AWK
+-------
+
+GNU AWK is needed if you want kernel builds to generate address range data for
+builtin modules (CONFIG_BUILTIN_MODULE_RANGES).
+
System utilities
****************
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
index 8e30c8f7697d..19d2ed47ff79 100644
--- a/Documentation/process/coding-style.rst
+++ b/Documentation/process/coding-style.rst
@@ -986,7 +986,7 @@ that can go into these 5 milliseconds.
A reasonable rule of thumb is to not put inline at functions that have more
than 3 lines of code in them. An exception to this rule are the cases where
-a parameter is known to be a compiletime constant, and as a result of this
+a parameter is known to be a compile time constant, and as a result of this
constantness you *know* the compiler will be able to optimize most of your
function away at compile time. For a good example of this later case, see
the kmalloc() inline function.
diff --git a/Documentation/process/email-clients.rst b/Documentation/process/email-clients.rst
index dd22c46d1d02..e6b9173a1845 100644
--- a/Documentation/process/email-clients.rst
+++ b/Documentation/process/email-clients.rst
@@ -216,7 +216,7 @@ Mutt is highly customizable. Here is a minimum configuration to start
using Mutt to send patches through Gmail::
# .muttrc
- # ================ IMAP ====================
+ # ================ IMAP ====================
set imap_user = 'yourusername@gmail.com'
set imap_pass = 'yourpassword'
set spoolfile = imaps://imap.gmail.com/INBOX
diff --git a/Documentation/process/maintainer-tip.rst b/Documentation/process/maintainer-tip.rst
index ba312345d030..349a27a53343 100644
--- a/Documentation/process/maintainer-tip.rst
+++ b/Documentation/process/maintainer-tip.rst
@@ -154,7 +154,7 @@ Examples for illustration:
We modify the hot cpu handling to cancel the delayed work on the dying
cpu and run the worker immediately on a different cpu in same domain. We
- donot flush the worker because the MBM overflow worker reschedules the
+ do not flush the worker because the MBM overflow worker reschedules the
worker on same CPU and scans the domain->cpu_mask to get the domain
pointer.
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index f310f2f36666..1518bd57adab 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -842,6 +842,14 @@ Make sure that base commit is in an official maintainer/mainline tree
and not in some internal, accessible only to you tree - otherwise it
would be worthless.
+Tooling
+-------
+
+Many of the technical aspects of this process can be automated using
+b4, documented at <https://b4.docs.kernel.org/en/latest/>. This can
+help with things like tracking dependencies, running checkpatch and
+with formatting and sending mails.
+
References
----------
diff --git a/Documentation/rust/general-information.rst b/Documentation/rust/general-information.rst
index e3f388ef4ee4..6146b49b6a98 100644
--- a/Documentation/rust/general-information.rst
+++ b/Documentation/rust/general-information.rst
@@ -15,6 +15,8 @@ but not `std <https://doc.rust-lang.org/std/>`_. Crates for use in the
kernel must opt into this behavior using the ``#![no_std]`` attribute.
+.. _rust_code_documentation:
+
Code documentation
------------------
@@ -22,10 +24,17 @@ Rust kernel code is documented using ``rustdoc``, its built-in documentation
generator.
The generated HTML docs include integrated search, linked items (e.g. types,
-functions, constants), source code, etc. They may be read at (TODO: link when
-in mainline and generated alongside the rest of the documentation):
+functions, constants), source code, etc. They may be read at:
+
+ https://rust.docs.kernel.org
+
+For linux-next, please see:
+
+ https://rust.docs.kernel.org/next/
- http://kernel.org/
+There are also tags for each main release, e.g.:
+
+ https://rust.docs.kernel.org/6.10/
The docs can also be easily generated and read locally. This is quite fast
(same order as compiling the code itself) and no special tools or environment
@@ -75,7 +84,7 @@ should provide as-safe-as-possible abstractions as needed.
.. code-block::
rust/bindings/
- (rust/helpers.c)
+ (rust/helpers/)
include/ -----+ <-+
| |
@@ -112,7 +121,7 @@ output files in the ``rust/bindings/`` directory.
For parts of the C header that ``bindgen`` does not auto generate, e.g. C
``inline`` functions or non-trivial macros, it is acceptable to add a small
-wrapper function to ``rust/helpers.c`` to make it available for the Rust side as
+wrapper function to ``rust/helpers/`` to make it available for the Rust side as
well.
Abstractions
@@ -142,3 +151,11 @@ configuration:
#[cfg(CONFIG_X="y")] // Enabled as a built-in (`y`)
#[cfg(CONFIG_X="m")] // Enabled as a module (`m`)
#[cfg(not(CONFIG_X))] // Disabled
+
+For other predicates that Rust's ``cfg`` does not support, e.g. expressions with
+numerical comparisons, one may define a new Kconfig symbol:
+
+.. code-block:: kconfig
+
+ config RUSTC_VERSION_MIN_107900
+ def_bool y if RUSTC_VERSION >= 107900
diff --git a/Documentation/rust/index.rst b/Documentation/rust/index.rst
index 46d35bd395cf..55dcde9e9e7e 100644
--- a/Documentation/rust/index.rst
+++ b/Documentation/rust/index.rst
@@ -25,13 +25,27 @@ support is still in development/experimental, especially for certain kernel
configurations.
+Code documentation
+------------------
+
+Given a kernel configuration, the kernel may generate Rust code documentation,
+i.e. HTML rendered by the ``rustdoc`` tool.
+
.. only:: rustdoc and html
- You can also browse `rustdoc documentation <rustdoc/kernel/index.html>`_.
+ This kernel documentation was built with `Rust code documentation
+ <rustdoc/kernel/index.html>`_.
.. only:: not rustdoc and html
- This documentation does not include rustdoc generated information.
+ This kernel documentation was not built with Rust code documentation.
+
+A pregenerated version is provided at:
+
+ https://rust.docs.kernel.org
+
+Please see the :ref:`Code documentation <rust_code_documentation>` section for
+more details.
.. toctree::
:maxdepth: 1
diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst
index 8e3ad9678719..2d107982c87b 100644
--- a/Documentation/rust/quick-start.rst
+++ b/Documentation/rust/quick-start.rst
@@ -39,8 +39,8 @@ of the box, e.g.::
Debian
******
-Debian Unstable (Sid), outside of the freeze period, provides recent Rust
-releases and thus it should generally work out of the box, e.g.::
+Debian Testing and Debian Unstable (Sid), outside of the freeze period, provide
+recent Rust releases and thus they should generally work out of the box, e.g.::
apt install rustc rust-src bindgen rustfmt rust-clippy
diff --git a/Documentation/scheduler/completion.rst b/Documentation/scheduler/completion.rst
index f19aca2062bd..adf0c0a56d02 100644
--- a/Documentation/scheduler/completion.rst
+++ b/Documentation/scheduler/completion.rst
@@ -51,7 +51,7 @@ which has only two fields::
struct completion {
unsigned int done;
- wait_queue_head_t wait;
+ struct swait_queue_head wait;
};
This provides the ->wait waitqueue to place tasks on for waiting (if any), and
diff --git a/Documentation/scheduler/index.rst b/Documentation/scheduler/index.rst
index 43bd8a145b7a..5dd53e47bc0c 100644
--- a/Documentation/scheduler/index.rst
+++ b/Documentation/scheduler/index.rst
@@ -12,6 +12,7 @@ Scheduler
sched-bwc
sched-deadline
sched-design-CFS
+ sched-eevdf
sched-domains
sched-capacity
sched-energy
@@ -20,6 +21,7 @@ Scheduler
sched-nice-design
sched-rt-group
sched-stats
+ sched-ext
sched-debug
text_files
diff --git a/Documentation/scheduler/sched-deadline.rst b/Documentation/scheduler/sched-deadline.rst
index 9fe4846079bb..22838ed8e13a 100644
--- a/Documentation/scheduler/sched-deadline.rst
+++ b/Documentation/scheduler/sched-deadline.rst
@@ -749,21 +749,19 @@ Appendix A. Test suite
of the command line options. Please refer to rt-app documentation for more
details (`<rt-app-sources>/doc/*.json`).
- The second testing application is a modification of schedtool, called
- schedtool-dl, which can be used to setup SCHED_DEADLINE parameters for a
- certain pid/application. schedtool-dl is available at:
- https://github.com/scheduler-tools/schedtool-dl.git.
+ The second testing application is done using chrt which has support
+ for SCHED_DEADLINE.
The usage is straightforward::
- # schedtool -E -t 10000000:100000000 -e ./my_cpuhog_app
+ # chrt -d -T 10000000 -D 100000000 0 ./my_cpuhog_app
With this, my_cpuhog_app is put to run inside a SCHED_DEADLINE reservation
- of 10ms every 100ms (note that parameters are expressed in microseconds).
- You can also use schedtool to create a reservation for an already running
+ of 10ms every 100ms (note that parameters are expressed in nanoseconds).
+ You can also use chrt to create a reservation for an already running
application, given that you know its pid::
- # schedtool -E -t 10000000:100000000 my_app_pid
+ # chrt -d -T 10000000 -D 100000000 -p 0 my_app_pid
Appendix B. Minimal main()
==========================
diff --git a/Documentation/scheduler/sched-design-CFS.rst b/Documentation/scheduler/sched-design-CFS.rst
index bc1e507269c6..8786f219fc73 100644
--- a/Documentation/scheduler/sched-design-CFS.rst
+++ b/Documentation/scheduler/sched-design-CFS.rst
@@ -8,10 +8,12 @@ CFS Scheduler
1. OVERVIEW
============
-CFS stands for "Completely Fair Scheduler," and is the new "desktop" process
-scheduler implemented by Ingo Molnar and merged in Linux 2.6.23. It is the
-replacement for the previous vanilla scheduler's SCHED_OTHER interactivity
-code.
+CFS stands for "Completely Fair Scheduler," and is the "desktop" process
+scheduler implemented by Ingo Molnar and merged in Linux 2.6.23. When
+originally merged, it was the replacement for the previous vanilla
+scheduler's SCHED_OTHER interactivity code. Nowadays, CFS is making room
+for EEVDF, for which documentation can be found in
+Documentation/scheduler/sched-eevdf.rst.
80% of CFS's design can be summed up in a single sentence: CFS basically models
an "ideal, precise multi-tasking CPU" on real hardware.
diff --git a/Documentation/scheduler/sched-eevdf.rst b/Documentation/scheduler/sched-eevdf.rst
new file mode 100644
index 000000000000..83efe7c0a30d
--- /dev/null
+++ b/Documentation/scheduler/sched-eevdf.rst
@@ -0,0 +1,43 @@
+===============
+EEVDF Scheduler
+===============
+
+The "Earliest Eligible Virtual Deadline First" (EEVDF) was first introduced
+in a scientific publication in 1995 [1]. The Linux kernel began
+transitioning to EEVDF in version 6.6 (as a new option in 2024), moving
+away from the earlier Completely Fair Scheduler (CFS) in favor of a version
+of EEVDF proposed by Peter Zijlstra in 2023 [2-4]. More information
+regarding CFS can be found in
+Documentation/scheduler/sched-design-CFS.rst.
+
+Similarly to CFS, EEVDF aims to distribute CPU time equally among all
+runnable tasks with the same priority. To do so, it assigns a virtual run
+time to each task, creating a "lag" value that can be used to determine
+whether a task has received its fair share of CPU time. In this way, a task
+with a positive lag is owed CPU time, while a negative lag means the task
+has exceeded its portion. EEVDF picks tasks with lag greater or equal to
+zero and calculates a virtual deadline (VD) for each, selecting the task
+with the earliest VD to execute next. It's important to note that this
+allows latency-sensitive tasks with shorter time slices to be prioritized,
+which helps with their responsiveness.
+
+There are ongoing discussions on how to manage lag, especially for sleeping
+tasks; but at the time of writing EEVDF uses a "decaying" mechanism based
+on virtual run time (VRT). This prevents tasks from exploiting the system
+by sleeping briefly to reset their negative lag: when a task sleeps, it
+remains on the run queue but marked for "deferred dequeue," allowing its
+lag to decay over VRT. Hence, long-sleeping tasks eventually have their lag
+reset. Finally, tasks can preempt others if their VD is earlier, and tasks
+can request specific time slices using the new sched_setattr() system call,
+which further facilitates the job of latency-sensitive applications.
+
+REFERENCES
+==========
+
+[1] https://citeseerx.ist.psu.edu/document?repid=rep1&type=pdf&doi=805acf7726282721504c8f00575d91ebfd750564
+
+[2] https://lore.kernel.org/lkml/a79014e6-ea83-b316-1e12-2ae056bda6fa@linux.vnet.ibm.com/
+
+[3] https://lwn.net/Articles/969062/
+
+[4] https://lwn.net/Articles/925371/
diff --git a/Documentation/scheduler/sched-ext.rst b/Documentation/scheduler/sched-ext.rst
new file mode 100644
index 000000000000..6c0d70e2e27d
--- /dev/null
+++ b/Documentation/scheduler/sched-ext.rst
@@ -0,0 +1,326 @@
+==========================
+Extensible Scheduler Class
+==========================
+
+sched_ext is a scheduler class whose behavior can be defined by a set of BPF
+programs - the BPF scheduler.
+
+* sched_ext exports a full scheduling interface so that any scheduling
+ algorithm can be implemented on top.
+
+* The BPF scheduler can group CPUs however it sees fit and schedule them
+ together, as tasks aren't tied to specific CPUs at the time of wakeup.
+
+* The BPF scheduler can be turned on and off dynamically anytime.
+
+* The system integrity is maintained no matter what the BPF scheduler does.
+ The default scheduling behavior is restored anytime an error is detected,
+ a runnable task stalls, or on invoking the SysRq key sequence
+ :kbd:`SysRq-S`.
+
+* When the BPF scheduler triggers an error, debug information is dumped to
+ aid debugging. The debug dump is passed to and printed out by the
+ scheduler binary. The debug dump can also be accessed through the
+ `sched_ext_dump` tracepoint. The SysRq key sequence :kbd:`SysRq-D`
+ triggers a debug dump. This doesn't terminate the BPF scheduler and can
+ only be read through the tracepoint.
+
+Switching to and from sched_ext
+===============================
+
+``CONFIG_SCHED_CLASS_EXT`` is the config option to enable sched_ext and
+``tools/sched_ext`` contains the example schedulers. The following config
+options should be enabled to use sched_ext:
+
+.. code-block:: none
+
+ CONFIG_BPF=y
+ CONFIG_SCHED_CLASS_EXT=y
+ CONFIG_BPF_SYSCALL=y
+ CONFIG_BPF_JIT=y
+ CONFIG_DEBUG_INFO_BTF=y
+ CONFIG_BPF_JIT_ALWAYS_ON=y
+ CONFIG_BPF_JIT_DEFAULT_ON=y
+ CONFIG_PAHOLE_HAS_SPLIT_BTF=y
+ CONFIG_PAHOLE_HAS_BTF_TAG=y
+
+sched_ext is used only when the BPF scheduler is loaded and running.
+
+If a task explicitly sets its scheduling policy to ``SCHED_EXT``, it will be
+treated as ``SCHED_NORMAL`` and scheduled by CFS until the BPF scheduler is
+loaded.
+
+When the BPF scheduler is loaded and ``SCX_OPS_SWITCH_PARTIAL`` is not set
+in ``ops->flags``, all ``SCHED_NORMAL``, ``SCHED_BATCH``, ``SCHED_IDLE``, and
+``SCHED_EXT`` tasks are scheduled by sched_ext.
+
+However, when the BPF scheduler is loaded and ``SCX_OPS_SWITCH_PARTIAL`` is
+set in ``ops->flags``, only tasks with the ``SCHED_EXT`` policy are scheduled
+by sched_ext, while tasks with ``SCHED_NORMAL``, ``SCHED_BATCH`` and
+``SCHED_IDLE`` policies are scheduled by CFS.
+
+Terminating the sched_ext scheduler program, triggering :kbd:`SysRq-S`, or
+detection of any internal error including stalled runnable tasks aborts the
+BPF scheduler and reverts all tasks back to CFS.
+
+.. code-block:: none
+
+ # make -j16 -C tools/sched_ext
+ # tools/sched_ext/scx_simple
+ local=0 global=3
+ local=5 global=24
+ local=9 global=44
+ local=13 global=56
+ local=17 global=72
+ ^CEXIT: BPF scheduler unregistered
+
+The current status of the BPF scheduler can be determined as follows:
+
+.. code-block:: none
+
+ # cat /sys/kernel/sched_ext/state
+ enabled
+ # cat /sys/kernel/sched_ext/root/ops
+ simple
+
+You can check if any BPF scheduler has ever been loaded since boot by examining
+this monotonically incrementing counter (a value of zero indicates that no BPF
+scheduler has been loaded):
+
+.. code-block:: none
+
+ # cat /sys/kernel/sched_ext/enable_seq
+ 1
+
+``tools/sched_ext/scx_show_state.py`` is a drgn script which shows more
+detailed information:
+
+.. code-block:: none
+
+ # tools/sched_ext/scx_show_state.py
+ ops : simple
+ enabled : 1
+ switching_all : 1
+ switched_all : 1
+ enable_state : enabled (2)
+ bypass_depth : 0
+ nr_rejected : 0
+ enable_seq : 1
+
+If ``CONFIG_SCHED_DEBUG`` is set, whether a given task is on sched_ext can
+be determined as follows:
+
+.. code-block:: none
+
+ # grep ext /proc/self/sched
+ ext.enabled : 1
+
+The Basics
+==========
+
+Userspace can implement an arbitrary BPF scheduler by loading a set of BPF
+programs that implement ``struct sched_ext_ops``. The only mandatory field
+is ``ops.name`` which must be a valid BPF object name. All operations are
+optional. The following modified excerpt is from
+``tools/sched_ext/scx_simple.bpf.c`` showing a minimal global FIFO scheduler.
+
+.. code-block:: c
+
+ /*
+ * Decide which CPU a task should be migrated to before being
+ * enqueued (either at wakeup, fork time, or exec time). If an
+ * idle core is found by the default ops.select_cpu() implementation,
+ * then dispatch the task directly to SCX_DSQ_LOCAL and skip the
+ * ops.enqueue() callback.
+ *
+ * Note that this implementation has exactly the same behavior as the
+ * default ops.select_cpu implementation. The behavior of the scheduler
+ * would be exactly same if the implementation just didn't define the
+ * simple_select_cpu() struct_ops prog.
+ */
+ s32 BPF_STRUCT_OPS(simple_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+ {
+ s32 cpu;
+ /* Need to initialize or the BPF verifier will reject the program */
+ bool direct = false;
+
+ cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &direct);
+
+ if (direct)
+ scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
+
+ return cpu;
+ }
+
+ /*
+ * Do a direct dispatch of a task to the global DSQ. This ops.enqueue()
+ * callback will only be invoked if we failed to find a core to dispatch
+ * to in ops.select_cpu() above.
+ *
+ * Note that this implementation has exactly the same behavior as the
+ * default ops.enqueue implementation, which just dispatches the task
+ * to SCX_DSQ_GLOBAL. The behavior of the scheduler would be exactly same
+ * if the implementation just didn't define the simple_enqueue struct_ops
+ * prog.
+ */
+ void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags)
+ {
+ scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+ }
+
+ s32 BPF_STRUCT_OPS_SLEEPABLE(simple_init)
+ {
+ /*
+ * By default, all SCHED_EXT, SCHED_OTHER, SCHED_IDLE, and
+ * SCHED_BATCH tasks should use sched_ext.
+ */
+ return 0;
+ }
+
+ void BPF_STRUCT_OPS(simple_exit, struct scx_exit_info *ei)
+ {
+ exit_type = ei->type;
+ }
+
+ SEC(".struct_ops")
+ struct sched_ext_ops simple_ops = {
+ .select_cpu = (void *)simple_select_cpu,
+ .enqueue = (void *)simple_enqueue,
+ .init = (void *)simple_init,
+ .exit = (void *)simple_exit,
+ .name = "simple",
+ };
+
+Dispatch Queues
+---------------
+
+To match the impedance between the scheduler core and the BPF scheduler,
+sched_ext uses DSQs (dispatch queues) which can operate as both a FIFO and a
+priority queue. By default, there is one global FIFO (``SCX_DSQ_GLOBAL``),
+and one local dsq per CPU (``SCX_DSQ_LOCAL``). The BPF scheduler can manage
+an arbitrary number of dsq's using ``scx_bpf_create_dsq()`` and
+``scx_bpf_destroy_dsq()``.
+
+A CPU always executes a task from its local DSQ. A task is "dispatched" to a
+DSQ. A non-local DSQ is "consumed" to transfer a task to the consuming CPU's
+local DSQ.
+
+When a CPU is looking for the next task to run, if the local DSQ is not
+empty, the first task is picked. Otherwise, the CPU tries to consume the
+global DSQ. If that doesn't yield a runnable task either, ``ops.dispatch()``
+is invoked.
+
+Scheduling Cycle
+----------------
+
+The following briefly shows how a waking task is scheduled and executed.
+
+1. When a task is waking up, ``ops.select_cpu()`` is the first operation
+ invoked. This serves two purposes. First, CPU selection optimization
+ hint. Second, waking up the selected CPU if idle.
+
+ The CPU selected by ``ops.select_cpu()`` is an optimization hint and not
+ binding. The actual decision is made at the last step of scheduling.
+ However, there is a small performance gain if the CPU
+ ``ops.select_cpu()`` returns matches the CPU the task eventually runs on.
+
+ A side-effect of selecting a CPU is waking it up from idle. While a BPF
+ scheduler can wake up any cpu using the ``scx_bpf_kick_cpu()`` helper,
+ using ``ops.select_cpu()`` judiciously can be simpler and more efficient.
+
+ A task can be immediately dispatched to a DSQ from ``ops.select_cpu()`` by
+ calling ``scx_bpf_dispatch()``. If the task is dispatched to
+ ``SCX_DSQ_LOCAL`` from ``ops.select_cpu()``, it will be dispatched to the
+ local DSQ of whichever CPU is returned from ``ops.select_cpu()``.
+ Additionally, dispatching directly from ``ops.select_cpu()`` will cause the
+ ``ops.enqueue()`` callback to be skipped.
+
+ Note that the scheduler core will ignore an invalid CPU selection, for
+ example, if it's outside the allowed cpumask of the task.
+
+2. Once the target CPU is selected, ``ops.enqueue()`` is invoked (unless the
+ task was dispatched directly from ``ops.select_cpu()``). ``ops.enqueue()``
+ can make one of the following decisions:
+
+ * Immediately dispatch the task to either the global or local DSQ by
+ calling ``scx_bpf_dispatch()`` with ``SCX_DSQ_GLOBAL`` or
+ ``SCX_DSQ_LOCAL``, respectively.
+
+ * Immediately dispatch the task to a custom DSQ by calling
+ ``scx_bpf_dispatch()`` with a DSQ ID which is smaller than 2^63.
+
+ * Queue the task on the BPF side.
+
+3. When a CPU is ready to schedule, it first looks at its local DSQ. If
+ empty, it then looks at the global DSQ. If there still isn't a task to
+ run, ``ops.dispatch()`` is invoked which can use the following two
+ functions to populate the local DSQ.
+
+ * ``scx_bpf_dispatch()`` dispatches a task to a DSQ. Any target DSQ can
+ be used - ``SCX_DSQ_LOCAL``, ``SCX_DSQ_LOCAL_ON | cpu``,
+ ``SCX_DSQ_GLOBAL`` or a custom DSQ. While ``scx_bpf_dispatch()``
+ currently can't be called with BPF locks held, this is being worked on
+ and will be supported. ``scx_bpf_dispatch()`` schedules dispatching
+ rather than performing them immediately. There can be up to
+ ``ops.dispatch_max_batch`` pending tasks.
+
+ * ``scx_bpf_consume()`` tranfers a task from the specified non-local DSQ
+ to the dispatching DSQ. This function cannot be called with any BPF
+ locks held. ``scx_bpf_consume()`` flushes the pending dispatched tasks
+ before trying to consume the specified DSQ.
+
+4. After ``ops.dispatch()`` returns, if there are tasks in the local DSQ,
+ the CPU runs the first one. If empty, the following steps are taken:
+
+ * Try to consume the global DSQ. If successful, run the task.
+
+ * If ``ops.dispatch()`` has dispatched any tasks, retry #3.
+
+ * If the previous task is an SCX task and still runnable, keep executing
+ it (see ``SCX_OPS_ENQ_LAST``).
+
+ * Go idle.
+
+Note that the BPF scheduler can always choose to dispatch tasks immediately
+in ``ops.enqueue()`` as illustrated in the above simple example. If only the
+built-in DSQs are used, there is no need to implement ``ops.dispatch()`` as
+a task is never queued on the BPF scheduler and both the local and global
+DSQs are consumed automatically.
+
+``scx_bpf_dispatch()`` queues the task on the FIFO of the target DSQ. Use
+``scx_bpf_dispatch_vtime()`` for the priority queue. Internal DSQs such as
+``SCX_DSQ_LOCAL`` and ``SCX_DSQ_GLOBAL`` do not support priority-queue
+dispatching, and must be dispatched to with ``scx_bpf_dispatch()``. See the
+function documentation and usage in ``tools/sched_ext/scx_simple.bpf.c`` for
+more information.
+
+Where to Look
+=============
+
+* ``include/linux/sched/ext.h`` defines the core data structures, ops table
+ and constants.
+
+* ``kernel/sched/ext.c`` contains sched_ext core implementation and helpers.
+ The functions prefixed with ``scx_bpf_`` can be called from the BPF
+ scheduler.
+
+* ``tools/sched_ext/`` hosts example BPF scheduler implementations.
+
+ * ``scx_simple[.bpf].c``: Minimal global FIFO scheduler example using a
+ custom DSQ.
+
+ * ``scx_qmap[.bpf].c``: A multi-level FIFO scheduler supporting five
+ levels of priority implemented with ``BPF_MAP_TYPE_QUEUE``.
+
+ABI Instability
+===============
+
+The APIs provided by sched_ext to BPF schedulers programs have no stability
+guarantees. This includes the ops table callbacks and constants defined in
+``include/linux/sched/ext.h``, as well as the ``scx_bpf_`` kfuncs defined in
+``kernel/sched/ext.c``.
+
+While we will attempt to provide a relatively stable API surface when
+possible, they are subject to change without warning between kernel
+versions.
diff --git a/Documentation/security/index.rst b/Documentation/security/index.rst
index 59f8fc106cb0..3e0a7114a862 100644
--- a/Documentation/security/index.rst
+++ b/Documentation/security/index.rst
@@ -19,3 +19,4 @@ Security Documentation
digsig
landlock
secrets/index
+ ipe
diff --git a/Documentation/security/ipe.rst b/Documentation/security/ipe.rst
new file mode 100644
index 000000000000..4a7d953abcdc
--- /dev/null
+++ b/Documentation/security/ipe.rst
@@ -0,0 +1,446 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Integrity Policy Enforcement (IPE) - Kernel Documentation
+=========================================================
+
+.. NOTE::
+
+ This is documentation targeted at developers, instead of administrators.
+ If you're looking for documentation on the usage of IPE, please see
+ :doc:`IPE admin guide </admin-guide/LSM/ipe>`.
+
+Historical Motivation
+---------------------
+
+The original issue that prompted IPE's implementation was the creation
+of a locked-down system. This system would be born-secure, and have
+strong integrity guarantees over both the executable code, and specific
+*data files* on the system, that were critical to its function. These
+specific data files would not be readable unless they passed integrity
+policy. A mandatory access control system would be present, and
+as a result, xattrs would have to be protected. This lead to a selection
+of what would provide the integrity claims. At the time, there were two
+main mechanisms considered that could guarantee integrity for the system
+with these requirements:
+
+ 1. IMA + EVM Signatures
+ 2. DM-Verity
+
+Both options were carefully considered, however the choice to use DM-Verity
+over IMA+EVM as the *integrity mechanism* in the original use case of IPE
+was due to three main reasons:
+
+ 1. Protection of additional attack vectors:
+
+ * With IMA+EVM, without an encryption solution, the system is vulnerable
+ to offline attack against the aforementioned specific data files.
+
+ Unlike executables, read operations (like those on the protected data
+ files), cannot be enforced to be globally integrity verified. This means
+ there must be some form of selector to determine whether a read should
+ enforce the integrity policy, or it should not.
+
+ At the time, this was done with mandatory access control labels. An IMA
+ policy would indicate what labels required integrity verification, which
+ presented an issue: EVM would protect the label, but if an attacker could
+ modify filesystem offline, the attacker could wipe all the xattrs -
+ including the SELinux labels that would be used to determine whether the
+ file should be subject to integrity policy.
+
+ With DM-Verity, as the xattrs are saved as part of the Merkel tree, if
+ offline mount occurs against the filesystem protected by dm-verity, the
+ checksum no longer matches and the file fails to be read.
+
+ * As userspace binaries are paged in Linux, dm-verity also offers the
+ additional protection against a hostile block device. In such an attack,
+ the block device reports the appropriate content for the IMA hash
+ initially, passing the required integrity check. Then, on the page fault
+ that accesses the real data, will report the attacker's payload. Since
+ dm-verity will check the data when the page fault occurs (and the disk
+ access), this attack is mitigated.
+
+ 2. Performance:
+
+ * dm-verity provides integrity verification on demand as blocks are
+ read versus requiring the entire file being read into memory for
+ validation.
+
+ 3. Simplicity of signing:
+
+ * No need for two signatures (IMA, then EVM): one signature covers
+ an entire block device.
+ * Signatures can be stored externally to the filesystem metadata.
+ * The signature supports an x.509-based signing infrastructure.
+
+The next step was to choose a *policy* to enforce the integrity mechanism.
+The minimum requirements for the policy were:
+
+ 1. The policy itself must be integrity verified (preventing trivial
+ attack against it).
+ 2. The policy itself must be resistant to rollback attacks.
+ 3. The policy enforcement must have a permissive-like mode.
+ 4. The policy must be able to be updated, in its entirety, without
+ a reboot.
+ 5. Policy updates must be atomic.
+ 6. The policy must support *revocations* of previously authored
+ components.
+ 7. The policy must be auditable, at any point-of-time.
+
+IMA, as the only integrity policy mechanism at the time, was
+considered against these list of requirements, and did not fulfill
+all of the minimum requirements. Extending IMA to cover these
+requirements was considered, but ultimately discarded for a
+two reasons:
+
+ 1. Regression risk; many of these changes would result in
+ dramatic code changes to IMA, which is already present in the
+ kernel, and therefore might impact users.
+
+ 2. IMA was used in the system for measurement and attestation;
+ separation of measurement policy from local integrity policy
+ enforcement was considered favorable.
+
+Due to these reasons, it was decided that a new LSM should be created,
+whose responsibility would be only the local integrity policy enforcement.
+
+Role and Scope
+--------------
+
+IPE, as its name implies, is fundamentally an integrity policy enforcement
+solution; IPE does not mandate how integrity is provided, but instead
+leaves that decision to the system administrator to set the security bar,
+via the mechanisms that they select that suit their individual needs.
+There are several different integrity solutions that provide a different
+level of security guarantees; and IPE allows sysadmins to express policy for
+theoretically all of them.
+
+IPE does not have an inherent mechanism to ensure integrity on its own.
+Instead, there are more effective layers available for building systems that
+can guarantee integrity. It's important to note that the mechanism for proving
+integrity is independent of the policy for enforcing that integrity claim.
+
+Therefore, IPE was designed around:
+
+ 1. Easy integrations with integrity providers.
+ 2. Ease of use for platform administrators/sysadmins.
+
+Design Rationale:
+-----------------
+
+IPE was designed after evaluating existing integrity policy solutions
+in other operating systems and environments. In this survey of other
+implementations, there were a few pitfalls identified:
+
+ 1. Policies were not readable by humans, usually requiring a binary
+ intermediary format.
+ 2. A single, non-customizable action was implicitly taken as a default.
+ 3. Debugging the policy required manual steps to determine what rule was violated.
+ 4. Authoring a policy required an in-depth knowledge of the larger system,
+ or operating system.
+
+IPE attempts to avoid all of these pitfalls.
+
+Policy
+~~~~~~
+
+Plain Text
+^^^^^^^^^^
+
+IPE's policy is plain-text. This introduces slightly larger policy files than
+other LSMs, but solves two major problems that occurs with some integrity policy
+solutions on other platforms.
+
+The first issue is one of code maintenance and duplication. To author policies,
+the policy has to be some form of string representation (be it structured,
+through XML, JSON, YAML, etcetera), to allow the policy author to understand
+what is being written. In a hypothetical binary policy design, a serializer
+is necessary to write the policy from the human readable form, to the binary
+form, and a deserializer is needed to interpret the binary form into a data
+structure in the kernel.
+
+Eventually, another deserializer will be needed to transform the binary from
+back into the human-readable form with as much information preserved. This is because a
+user of this access control system will have to keep a lookup table of a checksum
+and the original file itself to try to understand what policies have been deployed
+on this system and what policies have not. For a single user, this may be alright,
+as old policies can be discarded almost immediately after the update takes hold.
+For users that manage computer fleets in the thousands, if not hundreds of thousands,
+with multiple different operating systems, and multiple different operational needs,
+this quickly becomes an issue, as stale policies from years ago may be present,
+quickly resulting in the need to recover the policy or fund extensive infrastructure
+to track what each policy contains.
+
+With now three separate serializer/deserializers, maintenance becomes costly. If the
+policy avoids the binary format, there is only one required serializer: from the
+human-readable form to the data structure in kernel, saving on code maintenance,
+and retaining operability.
+
+The second issue with a binary format is one of transparency. As IPE controls
+access based on the trust of the system's resources, it's policy must also be
+trusted to be changed. This is done through signatures, resulting in needing
+signing as a process. Signing, as a process, is typically done with a
+high security bar, as anything signed can be used to attack integrity
+enforcement systems. It is also important that, when signing something, that
+the signer is aware of what they are signing. A binary policy can cause
+obfuscation of that fact; what signers see is an opaque binary blob. A
+plain-text policy, on the other hand, the signers see the actual policy
+submitted for signing.
+
+Boot Policy
+~~~~~~~~~~~
+
+IPE, if configured appropriately, is able to enforce a policy as soon as a
+kernel is booted and usermode starts. That implies some level of storage
+of the policy to apply the minute usermode starts. Generally, that storage
+can be handled in one of three ways:
+
+ 1. The policy file(s) live on disk and the kernel loads the policy prior
+ to an code path that would result in an enforcement decision.
+ 2. The policy file(s) are passed by the bootloader to the kernel, who
+ parses the policy.
+ 3. There is a policy file that is compiled into the kernel that is
+ parsed and enforced on initialization.
+
+The first option has problems: the kernel reading files from userspace
+is typically discouraged and very uncommon in the kernel.
+
+The second option also has problems: Linux supports a variety of bootloaders
+across its entire ecosystem - every bootloader would have to support this
+new methodology or there must be an independent source. It would likely
+result in more drastic changes to the kernel startup than necessary.
+
+The third option is the best but it's important to be aware that the policy
+will take disk space against the kernel it's compiled in. It's important to
+keep this policy generalized enough that userspace can load a new, more
+complicated policy, but restrictive enough that it will not overauthorize
+and cause security issues.
+
+The initramfs provides a way that this bootup path can be established. The
+kernel starts with a minimal policy, that trusts the initramfs only. Inside
+the initramfs, when the real rootfs is mounted, but not yet transferred to,
+it deploys and activates a policy that trusts the new root filesystem.
+This prevents overauthorization at any step, and keeps the kernel policy
+to a minimal size.
+
+Startup
+^^^^^^^
+
+Not every system, however starts with an initramfs, so the startup policy
+compiled into the kernel will need some flexibility to express how trust
+is established for the next phase of the bootup. To this end, if we just
+make the compiled-in policy a full IPE policy, it allows system builders
+to express the first stage bootup requirements appropriately.
+
+Updatable, Rebootless Policy
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As requirements change over time (vulnerabilities are found in previously
+trusted applications, keys roll, etcetera). Updating a kernel to change the
+meet those security goals is not always a suitable option, as updates are not
+always risk-free, and blocking a security update leaves systems vulnerable.
+This means IPE requires a policy that can be completely updated (allowing
+revocations of existing policy) from a source external to the kernel (allowing
+policies to be updated without updating the kernel).
+
+Additionally, since the kernel is stateless between invocations, and reading
+policy files off the disk from kernel space is a bad idea(tm), then the
+policy updates have to be done rebootlessly.
+
+To allow an update from an external source, it could be potentially malicious,
+so this policy needs to have a way to be identified as trusted. This is
+done via a signature chained to a trust source in the kernel. Arbitrarily,
+this is the ``SYSTEM_TRUSTED_KEYRING``, a keyring that is initially
+populated at kernel compile-time, as this matches the expectation that the
+author of the compiled-in policy described above is the same entity that can
+deploy policy updates.
+
+Anti-Rollback / Anti-Replay
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Over time, vulnerabilities are found and trusted resources may not be
+trusted anymore. IPE's policy has no exception to this. There can be
+instances where a mistaken policy author deploys an insecure policy,
+before correcting it with a secure policy.
+
+Assuming that as soon as the insecure policy is signed, and an attacker
+acquires the insecure policy, IPE needs a way to prevent rollback
+from the secure policy update to the insecure policy update.
+
+Initially, IPE's policy can have a policy_version that states the
+minimum required version across all policies that can be active on
+the system. This will prevent rollback while the system is live.
+
+.. WARNING::
+
+ However, since the kernel is stateless across boots, this policy
+ version will be reset to 0.0.0 on the next boot. System builders
+ need to be aware of this, and ensure the new secure policies are
+ deployed ASAP after a boot to ensure that the window of
+ opportunity is minimal for an attacker to deploy the insecure policy.
+
+Implicit Actions:
+~~~~~~~~~~~~~~~~~
+
+The issue of implicit actions only becomes visible when you consider
+a mixed level of security bars across multiple operations in a system.
+For example, consider a system that has strong integrity guarantees
+over both the executable code, and specific *data files* on the system,
+that were critical to its function. In this system, three types of policies
+are possible:
+
+ 1. A policy in which failure to match any rules in the policy results
+ in the action being denied.
+ 2. A policy in which failure to match any rules in the policy results
+ in the action being allowed.
+ 3. A policy in which the action taken when no rules are matched is
+ specified by the policy author.
+
+The first option could make a policy like this::
+
+ op=EXECUTE integrity_verified=YES action=ALLOW
+
+In the example system, this works well for the executables, as all
+executables should have integrity guarantees, without exception. The
+issue becomes with the second requirement about specific data files.
+This would result in a policy like this (assuming each line is
+evaluated in order)::
+
+ op=EXECUTE integrity_verified=YES action=ALLOW
+
+ op=READ integrity_verified=NO label=critical_t action=DENY
+ op=READ action=ALLOW
+
+This is somewhat clear if you read the docs, understand the policy
+is executed in order and that the default is a denial; however, the
+last line effectively changes that default to an ALLOW. This is
+required, because in a realistic system, there are some unverified
+reads (imagine appending to a log file).
+
+The second option, matching no rules results in an allow, is clearer
+for the specific data files::
+
+ op=READ integrity_verified=NO label=critical_t action=DENY
+
+And, like the first option, falls short with the execution scenario,
+effectively needing to override the default::
+
+ op=EXECUTE integrity_verified=YES action=ALLOW
+ op=EXECUTE action=DENY
+
+ op=READ integrity_verified=NO label=critical_t action=DENY
+
+This leaves the third option. Instead of making users be clever
+and override the default with an empty rule, force the end-user
+to consider what the appropriate default should be for their
+scenario and explicitly state it::
+
+ DEFAULT op=EXECUTE action=DENY
+ op=EXECUTE integrity_verified=YES action=ALLOW
+
+ DEFAULT op=READ action=ALLOW
+ op=READ integrity_verified=NO label=critical_t action=DENY
+
+Policy Debugging:
+~~~~~~~~~~~~~~~~~
+
+When developing a policy, it is useful to know what line of the policy
+is being violated to reduce debugging costs; narrowing the scope of the
+investigation to the exact line that resulted in the action. Some integrity
+policy systems do not provide this information, instead providing the
+information that was used in the evaluation. This then requires a correlation
+with the policy to evaluate what went wrong.
+
+Instead, IPE just emits the rule that was matched. This limits the scope
+of the investigation to the exact policy line (in the case of a specific
+rule), or the section (in the case of a DEFAULT). This decreases iteration
+and investigation times when policy failures are observed while evaluating
+policies.
+
+IPE's policy engine is also designed in a way that it makes it obvious to
+a human of how to investigate a policy failure. Each line is evaluated in
+the sequence that is written, so the algorithm is very simple to follow
+for humans to recreate the steps and could have caused the failure. In other
+surveyed systems, optimizations occur (sorting rules, for instance) when loading
+the policy. In those systems, it requires multiple steps to debug, and the
+algorithm may not always be clear to the end-user without reading the code first.
+
+Simplified Policy:
+~~~~~~~~~~~~~~~~~~
+
+Finally, IPE's policy is designed for sysadmins, not kernel developers. Instead
+of covering individual LSM hooks (or syscalls), IPE covers operations. This means
+instead of sysadmins needing to know that the syscalls ``mmap``, ``mprotect``,
+``execve``, and ``uselib`` must have rules protecting them, they must simple know
+that they want to restrict code execution. This limits the amount of bypasses that
+could occur due to a lack of knowledge of the underlying system; whereas the
+maintainers of IPE, being kernel developers can make the correct choice to determine
+whether something maps to these operations, and under what conditions.
+
+Implementation Notes
+--------------------
+
+Anonymous Memory
+~~~~~~~~~~~~~~~~
+
+Anonymous memory isn't treated any differently from any other access in IPE.
+When anonymous memory is mapped with ``+X``, it still comes into the ``file_mmap``
+or ``file_mprotect`` hook, but with a ``NULL`` file object. This is submitted to
+the evaluation, like any other file. However, all current trust properties will
+evaluate to false, as they are all file-based and the operation is not
+associated with a file.
+
+.. WARNING::
+
+ This also occurs with the ``kernel_load_data`` hook, when the kernel is
+ loading data from a userspace buffer that is not backed by a file. In this
+ scenario all current trust properties will also evaluate to false.
+
+Securityfs Interface
+~~~~~~~~~~~~~~~~~~~~
+
+The per-policy securityfs tree is somewhat unique. For example, for
+a standard securityfs policy tree::
+
+ MyPolicy
+ |- active
+ |- delete
+ |- name
+ |- pkcs7
+ |- policy
+ |- update
+ |- version
+
+The policy is stored in the ``->i_private`` data of the MyPolicy inode.
+
+Tests
+-----
+
+IPE has KUnit Tests for the policy parser. Recommended kunitconfig::
+
+ CONFIG_KUNIT=y
+ CONFIG_SECURITY=y
+ CONFIG_SECURITYFS=y
+ CONFIG_PKCS7_MESSAGE_PARSER=y
+ CONFIG_SYSTEM_DATA_VERIFICATION=y
+ CONFIG_FS_VERITY=y
+ CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
+ CONFIG_BLOCK=y
+ CONFIG_MD=y
+ CONFIG_BLK_DEV_DM=y
+ CONFIG_DM_VERITY=y
+ CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
+ CONFIG_NET=y
+ CONFIG_AUDIT=y
+ CONFIG_AUDITSYSCALL=y
+ CONFIG_BLK_DEV_INITRD=y
+
+ CONFIG_SECURITY_IPE=y
+ CONFIG_IPE_PROP_DM_VERITY=y
+ CONFIG_IPE_PROP_DM_VERITY_SIGNATURE=y
+ CONFIG_IPE_PROP_FS_VERITY=y
+ CONFIG_IPE_PROP_FS_VERITY_BUILTIN_SIG=y
+ CONFIG_SECURITY_IPE_KUNIT_TEST=y
+
+In addition, IPE has a python based integration
+`test suite <https://github.com/microsoft/ipe/tree/test-suite>`_ that
+can test both user interfaces and enforcement functionalities.
diff --git a/Documentation/sound/alsa-configuration.rst b/Documentation/sound/alsa-configuration.rst
index 829c672d9fe6..04254474fa04 100644
--- a/Documentation/sound/alsa-configuration.rst
+++ b/Documentation/sound/alsa-configuration.rst
@@ -1059,6 +1059,9 @@ power_save
Automatic power-saving timeout (in second, 0 = disable)
power_save_controller
Reset HD-audio controller in power-saving mode (default = on)
+pm_blacklist
+ Enable / disable power-management deny-list (default = look up PM
+ deny-list, 0 = skip PM deny-list, 1 = force to turn off runtime PM)
align_buffer_size
Force rounding of buffer/period sizes to multiples of 128 bytes.
This is more efficient in terms of memory access but isn't
diff --git a/Documentation/sound/hd-audio/notes.rst b/Documentation/sound/hd-audio/notes.rst
index ef6a4513cce7..e199131bf5ab 100644
--- a/Documentation/sound/hd-audio/notes.rst
+++ b/Documentation/sound/hd-audio/notes.rst
@@ -321,12 +321,6 @@ Kernel Configuration
--------------------
In general, I recommend you to enable the sound debug option,
``CONFIG_SND_DEBUG=y``, no matter whether you are debugging or not.
-This enables snd_printd() macro and others, and you'll get additional
-kernel messages at probing.
-
-In addition, you can enable ``CONFIG_SND_DEBUG_VERBOSE=y``. But this
-will give you far more messages. Thus turn this on only when you are
-sure to want it.
Don't forget to turn on the appropriate ``CONFIG_SND_HDA_CODEC_*``
options. Note that each of them corresponds to the codec chip, not
diff --git a/Documentation/sound/index.rst b/Documentation/sound/index.rst
index 7e67e12730d3..c437f2a4bc85 100644
--- a/Documentation/sound/index.rst
+++ b/Documentation/sound/index.rst
@@ -13,6 +13,7 @@ Sound Subsystem Documentation
alsa-configuration
hd-audio/index
cards/index
+ utimers
.. only:: subproject and html
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
index 801b0bb57e97..895752cbcedd 100644
--- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -4030,31 +4030,6 @@ located in the new subdirectory, sound/pci/xyz.
Useful Functions
================
-:c:func:`snd_printk()` and friends
-----------------------------------
-
-.. note:: This subsection describes a few helper functions for
- decorating a bit more on the standard :c:func:`printk()` & co.
- However, in general, the use of such helpers is no longer recommended.
- If possible, try to stick with the standard functions like
- :c:func:`dev_err()` or :c:func:`pr_err()`.
-
-ALSA provides a verbose version of the :c:func:`printk()` function.
-If a kernel config ``CONFIG_SND_VERBOSE_PRINTK`` is set, this function
-prints the given message together with the file name and the line of the
-caller. The ``KERN_XXX`` prefix is processed as well as the original
-:c:func:`printk()` does, so it's recommended to add this prefix,
-e.g. snd_printk(KERN_ERR "Oh my, sorry, it's extremely bad!\\n");
-
-There are also :c:func:`printk()`'s for debugging.
-:c:func:`snd_printd()` can be used for general debugging purposes.
-If ``CONFIG_SND_DEBUG`` is set, this function is compiled, and works
-just like :c:func:`snd_printk()`. If the ALSA is compiled without
-the debugging flag, it's ignored.
-
-:c:func:`snd_printdd()` is compiled in only when
-``CONFIG_SND_DEBUG_VERBOSE`` is set.
-
:c:func:`snd_BUG()`
-------------------
diff --git a/Documentation/sound/utimers.rst b/Documentation/sound/utimers.rst
new file mode 100644
index 000000000000..ec21567d3f72
--- /dev/null
+++ b/Documentation/sound/utimers.rst
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+Userspace-driven timers
+=======================
+
+:Author: Ivan Orlov <ivan.orlov0322@gmail.com>
+
+Preface
+=======
+
+This document describes the userspace-driven timers: virtual ALSA timers
+which could be created and controlled by userspace applications using
+IOCTL calls. Such timers could be useful when synchronizing audio
+stream with timer sources which we don't have ALSA timers exported for
+(e.g. PTP clocks), and when synchronizing the audio stream going through
+two virtual sound devices using ``snd-aloop`` (for instance, when
+we have a network application sending frames to one snd-aloop device,
+and another sound application listening on the other end of snd-aloop).
+
+Enabling userspace-driven timers
+================================
+
+The userspace-driven timers could be enabled in the kernel using the
+``CONFIG_SND_UTIMER`` configuration option. It depends on the
+``CONFIG_SND_TIMER`` option, so it also should be enabled.
+
+Userspace-driven timers API
+===========================
+
+Userspace application can create a userspace-driven ALSA timer by
+executing the ``SNDRV_TIMER_IOCTL_CREATE`` ioctl call on the
+``/dev/snd/timer`` device file descriptor. The ``snd_timer_uinfo``
+structure should be passed as an ioctl argument:
+
+::
+
+ struct snd_timer_uinfo {
+ __u64 resolution;
+ int fd;
+ unsigned int id;
+ unsigned char reserved[16];
+ }
+
+The ``resolution`` field sets the desired resolution in nanoseconds for
+the virtual timer. ``resolution`` field simply provides an information
+about the virtual timer, but does not affect the timing itself. ``id``
+field gets overwritten by the ioctl, and the identifier you get in this
+field after the call can be used as a timer subdevice number when
+passing the timer to ``snd-aloop`` kernel module or other userspace
+applications. There could be up to 128 userspace-driven timers in the
+system at one moment of time, thus the id value ranges from 0 to 127.
+
+Besides from overwriting the ``snd_timer_uinfo`` struct, ioctl stores
+a timer file descriptor, which can be used to trigger the timer, in the
+``fd`` field of the ``snd_timer_uinfo`` struct. Allocation of a file
+descriptor for the timer guarantees that the timer can only be triggered
+by the process which created it. The timer then can be triggered with
+``SNDRV_TIMER_IOCTL_TRIGGER`` ioctl call on the timer file descriptor.
+
+So, the example code for creating and triggering the timer would be:
+
+::
+
+ static struct snd_timer_uinfo utimer_info = {
+ /* Timer is going to tick (presumably) every 1000000 ns */
+ .resolution = 1000000ULL,
+ .id = -1,
+ };
+
+ int timer_device_fd = open("/dev/snd/timer", O_RDWR | O_CLOEXEC);
+
+ if (ioctl(timer_device_fd, SNDRV_TIMER_IOCTL_CREATE, &utimer_info)) {
+ perror("Failed to create the timer");
+ return -1;
+ }
+
+ ...
+
+ /*
+ * Now we want to trigger the timer. Callbacks of all of the
+ * timer instances binded to this timer will be executed after
+ * this call.
+ */
+ ioctl(utimer_info.fd, SNDRV_TIMER_IOCTL_TRIGGER, NULL);
+
+ ...
+
+ /* Now, destroy the timer */
+ close(timer_info.fd);
+
+
+More detailed example of creating and ticking the timer could be found
+in the utimer ALSA selftest.
+
+Userspace-driven timers and snd-aloop
+-------------------------------------
+
+Userspace-driven timers could be easily used with ``snd-aloop`` module
+when synchronizing two sound applications on both ends of the virtual
+sound loopback. For instance, if one of the applications receives sound
+frames from network and sends them to snd-aloop pcm device, and another
+application listens for frames on the other snd-aloop pcm device, it
+makes sense that the ALSA middle layer should initiate a data
+transaction when the new period of data is received through network, but
+not when the certain amount of jiffies elapses. Userspace-driven ALSA
+timers could be used to achieve this.
+
+To use userspace-driven ALSA timer as a timer source of snd-aloop, pass
+the following string as the snd-aloop ``timer_source`` parameter:
+
+::
+
+ # modprobe snd-aloop timer_source="-1.4.<utimer_id>"
+
+Where ``utimer_id`` is the id of the timer you created with
+``SNDRV_TIMER_IOCTL_CREATE``, and ``4`` is the number of
+userspace-driven timers device (``SNDRV_TIMER_GLOBAL_UDRIVEN``).
+
+``resolution`` for the userspace-driven ALSA timer used with snd-aloop
+should be calculated as ``1000000000ULL / frame_rate * period_size`` as
+the timer is going to tick every time a new period of frames is ready.
+
+After that, each time you trigger the timer with
+``SNDRV_TIMER_IOCTL_TRIGGER`` the new period of data will be transferred
+from one snd-aloop device to another.
diff --git a/Documentation/sphinx/kerneldoc-preamble.sty b/Documentation/sphinx/kerneldoc-preamble.sty
index d479cfa73658..5d68395539fe 100644
--- a/Documentation/sphinx/kerneldoc-preamble.sty
+++ b/Documentation/sphinx/kerneldoc-preamble.sty
@@ -199,6 +199,8 @@
% Inactivate CJK after tableofcontents
\apptocmd{\sphinxtableofcontents}{\kerneldocCJKoff}{}{}
\xeCJKsetup{CJKspace = true}% For inter-phrase space of Korean TOC
+ % Suppress extra white space at latin .. non-latin in literal blocks
+ \AtBeginEnvironment{sphinxVerbatim}{\CJKsetecglue{}}
}{ % Don't enable CJK
% Custom macros to on/off CJK and switch CJK fonts (Dummy)
\newcommand{\kerneldocCJKon}{}
diff --git a/Documentation/spi/spi-summary.rst b/Documentation/spi/spi-summary.rst
index 7f8accfae6f9..6e21e6f86912 100644
--- a/Documentation/spi/spi-summary.rst
+++ b/Documentation/spi/spi-summary.rst
@@ -614,6 +614,89 @@ queue, and then start some asynchronous transfer engine (unless it's
already running).
+Extensions to the SPI protocol
+------------------------------
+The fact that SPI doesn't have a formal specification or standard permits chip
+manufacturers to implement the SPI protocol in slightly different ways. In most
+cases, SPI protocol implementations from different vendors are compatible among
+each other. For example, in SPI mode 0 (CPOL=0, CPHA=0) the bus lines may behave
+like the following:
+
+::
+
+ nCSx ___ ___
+ \_________________________________________________________________/
+ • •
+ • •
+ SCLK ___ ___ ___ ___ ___ ___ ___ ___
+ _______/ \___/ \___/ \___/ \___/ \___/ \___/ \___/ \_____
+ • : ; : ; : ; : ; : ; : ; : ; : ; •
+ • : ; : ; : ; : ; : ; : ; : ; : ; •
+ MOSI XXX__________ _______ _______ ________XXX
+ 0xA5 XXX__/ 1 \_0_____/ 1 \_0_______0_____/ 1 \_0_____/ 1 \_XXX
+ • ; ; ; ; ; ; ; ; •
+ • ; ; ; ; ; ; ; ; •
+ MISO XXX__________ _______________________ _______ XXX
+ 0xBA XXX__/ 1 \_____0_/ 1 1 1 \_____0__/ 1 \____0__XXX
+
+Legend::
+
+ • marks the start/end of transmission;
+ : marks when data is clocked into the peripheral;
+ ; marks when data is clocked into the controller;
+ X marks when line states are not specified.
+
+In some few cases, chips extend the SPI protocol by specifying line behaviors
+that other SPI protocols don't (e.g. data line state for when CS is not
+asserted). Those distinct SPI protocols, modes, and configurations are supported
+by different SPI mode flags.
+
+MOSI idle state configuration
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Common SPI protocol implementations don't specify any state or behavior for the
+MOSI line when the controller is not clocking out data. However, there do exist
+peripherals that require specific MOSI line state when data is not being clocked
+out. For example, if the peripheral expects the MOSI line to be high when the
+controller is not clocking out data (``SPI_MOSI_IDLE_HIGH``), then a transfer in
+SPI mode 0 would look like the following:
+
+::
+
+ nCSx ___ ___
+ \_________________________________________________________________/
+ • •
+ • •
+ SCLK ___ ___ ___ ___ ___ ___ ___ ___
+ _______/ \___/ \___/ \___/ \___/ \___/ \___/ \___/ \_____
+ • : ; : ; : ; : ; : ; : ; : ; : ; •
+ • : ; : ; : ; : ; : ; : ; : ; : ; •
+ MOSI _____ _______ _______ _______________ ___
+ 0x56 \_0_____/ 1 \_0_____/ 1 \_0_____/ 1 1 \_0_____/
+ • ; ; ; ; ; ; ; ; •
+ • ; ; ; ; ; ; ; ; •
+ MISO XXX__________ _______________________ _______ XXX
+ 0xBA XXX__/ 1 \_____0_/ 1 1 1 \_____0__/ 1 \____0__XXX
+
+Legend::
+
+ • marks the start/end of transmission;
+ : marks when data is clocked into the peripheral;
+ ; marks when data is clocked into the controller;
+ X marks when line states are not specified.
+
+In this extension to the usual SPI protocol, the MOSI line state is specified to
+be kept high when CS is asserted but the controller is not clocking out data to
+the peripheral and also when CS is not asserted.
+
+Peripherals that require this extension must request it by setting the
+``SPI_MOSI_IDLE_HIGH`` bit into the mode attribute of their ``struct
+spi_device`` and call spi_setup(). Controllers that support this extension
+should indicate it by setting ``SPI_MOSI_IDLE_HIGH`` in the mode_bits attribute
+of their ``struct spi_controller``. The configuration to idle MOSI low is
+analogous but uses the ``SPI_MOSI_IDLE_LOW`` mode bit.
+
+
THANKS TO
---------
Contributors to Linux-SPI discussions include (in alphabetical order,
diff --git a/Documentation/staging/xz.rst b/Documentation/staging/xz.rst
index b2f5ff12a161..6953a189e5f2 100644
--- a/Documentation/staging/xz.rst
+++ b/Documentation/staging/xz.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: 0BSD
+
============================
XZ data compression in Linux
============================
@@ -6,62 +8,55 @@ Introduction
============
XZ is a general purpose data compression format with high compression
-ratio and relatively fast decompression. The primary compression
-algorithm (filter) is LZMA2. Additional filters can be used to improve
-compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters
-improve compression ratio of executable data.
-
-The XZ decompressor in Linux is called XZ Embedded. It supports
-the LZMA2 filter and optionally also BCJ filters. CRC32 is supported
-for integrity checking. The home page of XZ Embedded is at
-<https://tukaani.org/xz/embedded.html>, where you can find the
-latest version and also information about using the code outside
-the Linux kernel.
-
-For userspace, XZ Utils provide a zlib-like compression library
-and a gzip-like command line tool. XZ Utils can be downloaded from
-<https://tukaani.org/xz/>.
+ratio. The XZ decompressor in Linux is called XZ Embedded. It supports
+the LZMA2 filter and optionally also Branch/Call/Jump (BCJ) filters
+for executable code. CRC32 is supported for integrity checking.
+
+See the `XZ Embedded`_ home page for the latest version which includes
+a few optional extra features that aren't required in the Linux kernel
+and information about using the code outside the Linux kernel.
+
+For userspace, `XZ Utils`_ provide a zlib-like compression library
+and a gzip-like command line tool.
+
+.. _XZ Embedded: https://tukaani.org/xz/embedded.html
+.. _XZ Utils: https://tukaani.org/xz/
XZ related components in the kernel
===================================
The xz_dec module provides XZ decompressor with single-call (buffer
-to buffer) and multi-call (stateful) APIs. The usage of the xz_dec
-module is documented in include/linux/xz.h.
-
-The xz_dec_test module is for testing xz_dec. xz_dec_test is not
-useful unless you are hacking the XZ decompressor. xz_dec_test
-allocates a char device major dynamically to which one can write
-.xz files from userspace. The decompressed output is thrown away.
-Keep an eye on dmesg to see diagnostics printed by xz_dec_test.
-See the xz_dec_test source code for the details.
+to buffer) and multi-call (stateful) APIs in include/linux/xz.h.
For decompressing the kernel image, initramfs, and initrd, there
is a wrapper function in lib/decompress_unxz.c. Its API is the
same as in other decompress_*.c files, which is defined in
include/linux/decompress/generic.h.
-scripts/xz_wrap.sh is a wrapper for the xz command line tool found
-from XZ Utils. The wrapper sets compression options to values suitable
-for compressing the kernel image.
+For kernel makefiles, three commands are provided for use with
+``$(call if_changed)``. They require the xz tool from XZ Utils.
+
+- ``$(call if_changed,xzkern)`` is for compressing the kernel image.
+ It runs the script scripts/xz_wrap.sh which uses arch-optimized
+ options and a big LZMA2 dictionary.
+
+- ``$(call if_changed,xzkern_with_size)`` is like ``xzkern`` above but
+ this also appends a four-byte trailer containing the uncompressed size
+ of the file. The trailer is needed by the boot code on some archs.
-For kernel makefiles, two commands are provided for use with
-$(call if_needed). The kernel image should be compressed with
-$(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2
-dictionary. It will also append a four-byte trailer containing the
-uncompressed size of the file, which is needed by the boot code.
-Other things should be compressed with $(call if_needed,xzmisc)
-which will use no BCJ filter and 1 MiB LZMA2 dictionary.
+- Other things can be compressed with ``$(call if_needed,xzmisc)``
+ which will use no BCJ filter and 1 MiB LZMA2 dictionary.
Notes on compression options
============================
-Since the XZ Embedded supports only streams with no integrity check or
-CRC32, make sure that you don't use some other integrity check type
-when encoding files that are supposed to be decoded by the kernel. With
-liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32
-when encoding. With the xz command line tool, use --check=none or
---check=crc32.
+Since the XZ Embedded supports only streams with CRC32 or no integrity
+check, make sure that you don't use some other integrity check type
+when encoding files that are supposed to be decoded by the kernel.
+With liblzma from XZ Utils, you need to use either ``LZMA_CHECK_CRC32``
+or ``LZMA_CHECK_NONE`` when encoding. With the ``xz`` command line tool,
+use ``--check=crc32`` or ``--check=none`` to override the default
+``--check=crc64``.
Using CRC32 is strongly recommended unless there is some other layer
which will verify the integrity of the uncompressed data anyway.
@@ -71,57 +66,33 @@ by the decoder; you can only change the integrity check type (or
disable it) for the actual uncompressed data.
In userspace, LZMA2 is typically used with dictionary sizes of several
-megabytes. The decoder needs to have the dictionary in RAM, thus big
-dictionaries cannot be used for files that are intended to be decoded
-by the kernel. 1 MiB is probably the maximum reasonable dictionary
-size for in-kernel use (maybe more is OK for initramfs). The presets
-in XZ Utils may not be optimal when creating files for the kernel,
-so don't hesitate to use custom settings. Example::
-
- xz --check=crc32 --lzma2=dict=512KiB inputfile
-
-An exception to above dictionary size limitation is when the decoder
-is used in single-call mode. Decompressing the kernel itself is an
-example of this situation. In single-call mode, the memory usage
-doesn't depend on the dictionary size, and it is perfectly fine to
-use a big dictionary: for maximum compression, the dictionary should
-be at least as big as the uncompressed data itself.
-
-Future plans
-============
+megabytes. The decoder needs to have the dictionary in RAM:
+
+- In multi-call mode the dictionary is allocated as part of the
+ decoder state. The reasonable maximum dictionary size for in-kernel
+ use will depend on the target hardware: a few megabytes is fine for
+ desktop systems while 64 KiB to 1 MiB might be more appropriate on
+ some embedded systems.
+
+- In single-call mode the output buffer is used as the dictionary
+ buffer. That is, the size of the dictionary doesn't affect the
+ decompressor memory usage at all. Only the base data structures
+ are allocated which take a little less than 30 KiB of memory.
+ For the best compression, the dictionary should be at least
+ as big as the uncompressed data. A notable example of single-call
+ mode is decompressing the kernel itself (except on PowerPC).
+
+The compression presets in XZ Utils may not be optimal when creating
+files for the kernel, so don't hesitate to use custom settings to,
+for example, set the dictionary size. Also, xz may produce a smaller
+file in single-threaded mode so setting that explicitly is recommended.
+Example::
+
+ xz --threads=1 --check=crc32 --lzma2=dict=512KiB inputfile
+
+xz_dec API
+==========
+
+This is available with ``#include <linux/xz.h>``.
-Creating a limited XZ encoder may be considered if people think it is
-useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at
-the fastest settings, so it isn't clear if LZMA2 encoder is wanted
-into the kernel.
-
-Support for limited random-access reading is planned for the
-decompression code. I don't know if it could have any use in the
-kernel, but I know that it would be useful in some embedded projects
-outside the Linux kernel.
-
-Conformance to the .xz file format specification
-================================================
-
-There are a couple of corner cases where things have been simplified
-at expense of detecting errors as early as possible. These should not
-matter in practice all, since they don't cause security issues. But
-it is good to know this if testing the code e.g. with the test files
-from XZ Utils.
-
-Reporting bugs
-==============
-
-Before reporting a bug, please check that it's not fixed already
-at upstream. See <https://tukaani.org/xz/embedded.html> to get the
-latest code.
-
-Report bugs to <lasse.collin@tukaani.org> or visit #tukaani on
-Freenode and talk to Larhzu. I don't actively read LKML or other
-kernel-related mailing lists, so if there's something I should know,
-you should email to me personally or use IRC.
-
-Don't bother Igor Pavlov with questions about the XZ implementation
-in the kernel or about XZ Utils. While these two implementations
-include essential code that is directly based on Igor Pavlov's code,
-these implementations aren't maintained nor supported by him.
+.. kernel-doc:: include/linux/xz.h
diff --git a/Documentation/timers/timers-howto.rst b/Documentation/timers/timers-howto.rst
index 5c169e3d29a8..ef7a4652ccc9 100644
--- a/Documentation/timers/timers-howto.rst
+++ b/Documentation/timers/timers-howto.rst
@@ -19,7 +19,7 @@ it really need to delay in atomic context?" If so...
ATOMIC CONTEXT:
You must use the `*delay` family of functions. These
- functions use the jiffie estimation of clock speed
+ functions use the jiffy estimation of clock speed
and will busy wait for enough loop cycles to achieve
the desired delay:
diff --git a/Documentation/trace/debugging.rst b/Documentation/trace/debugging.rst
new file mode 100644
index 000000000000..54fb16239d70
--- /dev/null
+++ b/Documentation/trace/debugging.rst
@@ -0,0 +1,159 @@
+==============================
+Using the tracer for debugging
+==============================
+
+Copyright 2024 Google LLC.
+
+:Author: Steven Rostedt <rostedt@goodmis.org>
+:License: The GNU Free Documentation License, Version 1.2
+ (dual licensed under the GPL v2)
+
+- Written for: 6.12
+
+Introduction
+------------
+The tracing infrastructure can be very useful for debugging the Linux
+kernel. This document is a place to add various methods of using the tracer
+for debugging.
+
+First, make sure that the tracefs file system is mounted::
+
+ $ sudo mount -t tracefs tracefs /sys/kernel/tracing
+
+
+Using trace_printk()
+--------------------
+
+trace_printk() is a very lightweight utility that can be used in any context
+inside the kernel, with the exception of "noinstr" sections. It can be used
+in normal, softirq, interrupt and even NMI context. The trace data is
+written to the tracing ring buffer in a lockless way. To make it even
+lighter weight, when possible, it will only record the pointer to the format
+string, and save the raw arguments into the buffer. The format and the
+arguments will be post processed when the ring buffer is read. This way the
+trace_printk() format conversions are not done during the hot path, where
+the trace is being recorded.
+
+trace_printk() is meant only for debugging, and should never be added into
+a subsystem of the kernel. If you need debugging traces, add trace events
+instead. If a trace_printk() is found in the kernel, the following will
+appear in the dmesg::
+
+ **********************************************************
+ ** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **
+ ** **
+ ** trace_printk() being used. Allocating extra memory. **
+ ** **
+ ** This means that this is a DEBUG kernel and it is **
+ ** unsafe for production use. **
+ ** **
+ ** If you see this message and you are not debugging **
+ ** the kernel, report this immediately to your vendor! **
+ ** **
+ ** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **
+ **********************************************************
+
+Debugging kernel crashes
+------------------------
+There is various methods of acquiring the state of the system when a kernel
+crash occurs. This could be from the oops message in printk, or one could
+use kexec/kdump. But these just show what happened at the time of the crash.
+It can be very useful in knowing what happened up to the point of the crash.
+The tracing ring buffer, by default, is a circular buffer than will
+overwrite older events with newer ones. When a crash happens, the content of
+the ring buffer will be all the events that lead up to the crash.
+
+There are several kernel command line parameters that can be used to help in
+this. The first is "ftrace_dump_on_oops". This will dump the tracing ring
+buffer when a oops occurs to the console. This can be useful if the console
+is being logged somewhere. If a serial console is used, it may be prudent to
+make sure the ring buffer is relatively small, otherwise the dumping of the
+ring buffer may take several minutes to hours to finish. Here's an example
+of the kernel command line::
+
+ ftrace_dump_on_oops trace_buf_size=50K
+
+Note, the tracing buffer is made up of per CPU buffers where each of these
+buffers is broken up into sub-buffers that are by default PAGE_SIZE. The
+above trace_buf_size option above sets each of the per CPU buffers to 50K,
+so, on a machine with 8 CPUs, that's actually 400K total.
+
+Persistent buffers across boots
+-------------------------------
+If the system memory allows it, the tracing ring buffer can be specified at
+a specific location in memory. If the location is the same across boots and
+the memory is not modified, the tracing buffer can be retrieved from the
+following boot. There's two ways to reserve memory for the use of the ring
+buffer.
+
+The more reliable way (on x86) is to reserve memory with the "memmap" kernel
+command line option and then use that memory for the trace_instance. This
+requires a bit of knowledge of the physical memory layout of the system. The
+advantage of using this method, is that the memory for the ring buffer will
+always be the same::
+
+ memmap==12M$0x284500000 trace_instance=boot_map@0x284500000:12M
+
+The memmap above reserves 12 megabytes of memory at the physical memory
+location 0x284500000. Then the trace_instance option will create a trace
+instance "boot_map" at that same location with the same amount of memory
+reserved. As the ring buffer is broke up into per CPU buffers, the 12
+megabytes will be broken up evenly between those CPUs. If you have 8 CPUs,
+each per CPU ring buffer will be 1.5 megabytes in size. Note, that also
+includes meta data, so the amount of memory actually used by the ring buffer
+will be slightly smaller.
+
+Another more generic but less robust way to allocate a ring buffer mapping
+at boot is with the "reserve_mem" option::
+
+ reserve_mem=12M:4096:trace trace_instance=boot_map@trace
+
+The reserve_mem option above will find 12 megabytes that are available at
+boot up, and align it by 4096 bytes. It will label this memory as "trace"
+that can be used by later command line options.
+
+The trace_instance option creates a "boot_map" instance and will use the
+memory reserved by reserve_mem that was labeled as "trace". This method is
+more generic but may not be as reliable. Due to KASLR, the memory reserved
+by reserve_mem may not be located at the same location. If this happens,
+then the ring buffer will not be from the previous boot and will be reset.
+
+Sometimes, by using a larger alignment, it can keep KASLR from moving things
+around in such a way that it will move the location of the reserve_mem. By
+using a larger alignment, you may find better that the buffer is more
+consistent to where it is placed::
+
+ reserve_mem=12M:0x2000000:trace trace_instance=boot_map@trace
+
+On boot up, the memory reserved for the ring buffer is validated. It will go
+through a series of tests to make sure that the ring buffer contains valid
+data. If it is, it will then set it up to be available to read from the
+instance. If it fails any of the tests, it will clear the entire ring buffer
+and initialize it as new.
+
+The layout of this mapped memory may not be consistent from kernel to
+kernel, so only the same kernel is guaranteed to work if the mapping is
+preserved. Switching to a different kernel version may find a different
+layout and mark the buffer as invalid.
+
+Using trace_printk() in the boot instance
+-----------------------------------------
+By default, the content of trace_printk() goes into the top level tracing
+instance. But this instance is never preserved across boots. To have the
+trace_printk() content, and some other internal tracing go to the preserved
+buffer (like dump stacks), either set the instance to be the trace_printk()
+destination from the kernel command line, or set it after boot up via the
+trace_printk_dest option.
+
+After boot up::
+
+ echo 1 > /sys/kernel/tracing/instances/boot_map/options/trace_printk_dest
+
+From the kernel command line::
+
+ reserve_mem=12M:4096:trace trace_instance=boot_map^traceprintk^traceoff@trace
+
+If setting it from the kernel command line, it is recommended to also
+disable tracing with the "traceoff" flag, and enable tracing after boot up.
+Otherwise the trace from the most recent boot will be mixed with the trace
+from the previous boot, and may make it confusing to read.
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index 5aba74872ba7..4073ca48af4a 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -1186,6 +1186,18 @@ Here are the available options:
trace_printk
Can disable trace_printk() from writing into the buffer.
+ trace_printk_dest
+ Set to have trace_printk() and similar internal tracing functions
+ write into this instance. Note, only one trace instance can have
+ this set. By setting this flag, it clears the trace_printk_dest flag
+ of the instance that had it set previously. By default, the top
+ level trace has this set, and will get it set again if another
+ instance has it set then clears it.
+
+ This flag cannot be cleared by the top level instance, as it is the
+ default instance. The only way the top level instance has this flag
+ cleared, is by it being set in another instance.
+
annotate
It is sometimes confusing when the CPU buffers are full
and one CPU buffer had a lot of events recently, thus
diff --git a/Documentation/translations/ko_KR/core-api/wrappers/memory-barriers.rst b/Documentation/translations/ko_KR/core-api/wrappers/memory-barriers.rst
new file mode 100644
index 000000000000..526ae534dd86
--- /dev/null
+++ b/Documentation/translations/ko_KR/core-api/wrappers/memory-barriers.rst
@@ -0,0 +1,18 @@
+.. SPDX-License-Identifier: GPL-2.0
+ This is a simple wrapper to bring memory-barriers.txt into the RST world
+ until such a time as that file can be converted directly.
+
+=========================
+리눅스 ì»¤ë„ ë©”ëª¨ë¦¬ 배리어
+=========================
+
+.. raw:: latex
+
+ \footnotesize
+
+.. include:: ../../memory-barriers.txt
+ :literal:
+
+.. raw:: latex
+
+ \normalsize
diff --git a/Documentation/translations/ko_KR/index.rst b/Documentation/translations/ko_KR/index.rst
index 4add6b2fe1f2..a20772f9d61c 100644
--- a/Documentation/translations/ko_KR/index.rst
+++ b/Documentation/translations/ko_KR/index.rst
@@ -11,19 +11,9 @@
.. toctree::
:maxdepth: 1
- howto
-
-
-리눅스 ì»¤ë„ ë©”ëª¨ë¦¬ 배리어
--------------------------
-
-.. raw:: latex
-
- \footnotesize
-
-.. include:: ./memory-barriers.txt
- :literal:
+ process/howto
+ core-api/wrappers/memory-barriers.rst
.. raw:: latex
- }\kerneldocEndKR
+ }\kerneldocEndKR
diff --git a/Documentation/translations/ko_KR/howto.rst b/Documentation/translations/ko_KR/process/howto.rst
index 34f14899c155..34f14899c155 100644
--- a/Documentation/translations/ko_KR/howto.rst
+++ b/Documentation/translations/ko_KR/process/howto.rst
diff --git a/Documentation/translations/sp_SP/scheduler/index.rst b/Documentation/translations/sp_SP/scheduler/index.rst
index 768488d6f001..32f9fd7517b2 100644
--- a/Documentation/translations/sp_SP/scheduler/index.rst
+++ b/Documentation/translations/sp_SP/scheduler/index.rst
@@ -6,3 +6,4 @@
:maxdepth: 1
sched-design-CFS
+ sched-eevdf
diff --git a/Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst b/Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst
index 90a153cad4e8..dc728c739e28 100644
--- a/Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst
+++ b/Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst
@@ -14,10 +14,10 @@ Gestor de tareas CFS
CFS viene de las siglas en inglés de "Gestor de tareas totalmente justo"
("Completely Fair Scheduler"), y es el nuevo gestor de tareas de escritorio
-implementado por Ingo Molnar e integrado en Linux 2.6.23. Es el sustituto de
-el previo gestor de tareas SCHED_OTHER.
-
-Nota: El planificador EEVDF fue incorporado más recientemente al kernel.
+implementado por Ingo Molnar e integrado en Linux 2.6.23. Es el sustituto
+del previo gestor de tareas SCHED_OTHER. Hoy en día se está abriendo camino
+para el gestor de tareas EEVDF, cuya documentación se puede ver en
+Documentation/scheduler/sched-eevdf.rst
El 80% del diseño de CFS puede ser resumido en una única frase: CFS
básicamente modela una "CPU ideal, precisa y multi-tarea" sobre hardware
@@ -109,7 +109,7 @@ para que se ejecute, y la tarea en ejecución es interrumpida.
==================================
CFS usa una granularidad de nanosegundos y no depende de ningún
-jiffie o detalles como HZ. De este modo, el gestor de tareas CFS no tiene
+jiffy o detalles como HZ. De este modo, el gestor de tareas CFS no tiene
noción de "ventanas de tiempo" de la forma en que tenía el gestor de
tareas previo, y tampoco tiene heurísticos. Únicamente hay un parámetro
central ajustable (se ha de cambiar en CONFIG_SCHED_DEBUG):
diff --git a/Documentation/translations/sp_SP/scheduler/sched-eevdf.rst b/Documentation/translations/sp_SP/scheduler/sched-eevdf.rst
new file mode 100644
index 000000000000..d54736f297b8
--- /dev/null
+++ b/Documentation/translations/sp_SP/scheduler/sched-eevdf.rst
@@ -0,0 +1,58 @@
+
+.. include:: ../disclaimer-sp.rst
+
+:Original: Documentation/scheduler/sched-eevdf.rst
+:Translator: Sergio González Collado <sergio.collado@gmail.com>
+
+======================
+Gestor de tareas EEVDF
+======================
+
+El gestor de tareas EEVDF, del inglés: "Earliest Eligible Virtual Deadline
+First", fue presentado por primera vez en una publicación científica en
+1995 [1]. El kernel de Linux comenzó a transicionar hacia EEVPF en la
+versión 6.6 (y como una nueva opción en 2024), alejándose del gestor
+de tareas CFS, en favor de una versión de EEVDF propuesta por Peter
+Zijlstra en 2023 [2-4]. Más información relativa a CFS puede encontrarse
+en Documentation/scheduler/sched-design-CFS.rst.
+
+De forma parecida a CFS, EEVDF intenta distribuir el tiempo de ejecución
+de la CPU de forma equitativa entre todas las tareas que tengan la misma
+prioridad y puedan ser ejecutables. Para eso, asigna un tiempo de
+ejecución virtual a cada tarea, creando un "retraso" que puede ser usado
+para determinar si una tarea ha recibido su cantidad justa de tiempo
+de ejecución en la CPU. De esta manera, una tarea con un "retraso"
+positivo, es porque se le debe tiempo de ejecución, mientras que una
+con "retraso" negativo implica que la tarea ha excedido su cuota de
+tiempo. EEVDF elige las tareas con un "retraso" mayor igual a cero y
+calcula un tiempo límite de ejecución virtual (VD, del inglés: virtual
+deadline) para cada una, eligiendo la tarea con la VD más próxima para
+ser ejecutada a continuación. Es importante darse cuenta que esto permite
+que la tareas que sean sensibles a la latencia que tengan porciones de
+tiempos de ejecución de CPU más cortos ser priorizadas, lo cual ayuda con
+su menor tiempo de respuesta.
+
+Ahora mismo se está discutiendo cómo gestionar esos "retrasos", especialmente
+en tareas que estén en un estado durmiente; pero en el momento en el que
+se escribe este texto EEVDF usa un mecanismo de "decaimiento" basado en el
+tiempo virtual de ejecución (VRT, del inglés: virtual run time). Esto previene
+a las tareas de abusar del sistema simplemente durmiendo brevemente para
+reajustar su retraso negativo: cuando una tarea duerme, esta permanece en
+la cola de ejecución pero marcada para "desencolado diferido", permitiendo
+a su retraso decaer a lo largo de VRT. Por tanto, las tareas que duerman
+por más tiempo eventualmente eliminarán su retraso. Finalmente, las tareas
+pueden adelantarse a otras si su VD es más próximo en el tiempo, y las
+tareas podrán pedir porciones de tiempo específicas con la nueva llamada
+del sistema sched_setattr(), todo esto facilitara el trabajo de las aplicaciones
+que sean sensibles a las latencias.
+
+REFERENCIAS
+===========
+
+[1] https://citeseerx.ist.psu.edu/document?repid=rep1&type=pdf&doi=805acf7726282721504c8f00575d91ebfd750564
+
+[2] https://lore.kernel.org/lkml/a79014e6-ea83-b316-1e12-2ae056bda6fa@linux.vnet.ibm.com/
+
+[3] https://lwn.net/Articles/969062/
+
+[4] https://lwn.net/Articles/925371/
diff --git a/Documentation/translations/zh_CN/admin-guide/index.rst b/Documentation/translations/zh_CN/admin-guide/index.rst
index 0db80ab830a0..15d9ab5993a7 100644
--- a/Documentation/translations/zh_CN/admin-guide/index.rst
+++ b/Documentation/translations/zh_CN/admin-guide/index.rst
@@ -37,7 +37,6 @@ Todolist:
reporting-issues
reporting-regressions
- security-bugs
bug-hunting
bug-bisect
tainted-kernels
diff --git a/Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst b/Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst
index bf21ff84f396..cff7b6f98c59 100644
--- a/Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst
+++ b/Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst
@@ -15,7 +15,7 @@
本文通过演示DAMON的默认用户空间工具,简è¦åœ°ä»‹ç»äº†å¦‚何使用DAMON。请注æ„,为了简æ´
èµ·è§ï¼Œæœ¬æ–‡æ¡£åªæè¿°äº†å®ƒçš„部分功能。更多细节请å‚考该工具的使用文档。
-`doc <https://github.com/awslabs/damo/blob/next/USAGE.md>`_ .
+`doc <https://github.com/damonitor/damo/blob/next/USAGE.md>`_ .
剿æ¡ä»¶
@@ -31,7 +31,7 @@
------------
在演示中,我们将使用DAMON的默认用户空间工具,称为DAMON Operator(DAMO)。它å¯ä»¥åœ¨
-https://github.com/awslabs/damo找到。下é¢çš„例å­å‡è®¾DAMO在你的$PATH上。当然,但
+https://github.com/damonitor/damo找到。下é¢çš„例å­å‡è®¾DAMO在你的$PATH上。当然,但
è¿™å¹¶ä¸æ˜¯å¼ºåˆ¶æ€§çš„。
因为DAMO使用了DAMONçš„sysfs接å£ï¼ˆè¯¦æƒ…请å‚考:doc:`usage`),你应该确ä¿
diff --git a/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst b/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst
index da2745464ece..50f6f0b6bf11 100644
--- a/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst
+++ b/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst
@@ -16,16 +16,16 @@
DAMON 为ä¸åŒçš„用户æä¾›äº†ä¸‹é¢è¿™äº›æŽ¥å£ã€‚
- *DAMON用户空间工具。*
- `è¿™ <https://github.com/awslabs/damo>`_ 为有这特æƒçš„人, 如系统管ç†å‘˜ï¼Œå¸Œæœ›æœ‰ä¸€ä¸ªåˆšå¥½
+ `è¿™ <https://github.com/damonitor/damo>`_ 为有这特æƒçš„人, 如系统管ç†å‘˜ï¼Œå¸Œæœ›æœ‰ä¸€ä¸ªåˆšå¥½
å¯ä»¥å·¥ä½œçš„人性化界é¢ã€‚
使用它,用户å¯ä»¥ä»¥äººæ€§åŒ–的方å¼ä½¿ç”¨DAMON的主è¦åŠŸèƒ½ã€‚ä¸è¿‡ï¼Œå®ƒå¯èƒ½ä¸ä¼šä¸ºç‰¹æ®Šæƒ…况进行高度调整。
å®ƒåŒæ—¶æ”¯æŒè™šæ‹Ÿå’Œç‰©ç†åœ°å€ç©ºé—´çš„监测。更多细节,请å‚考它的 `使用文档
- <https://github.com/awslabs/damo/blob/next/USAGE.md>`_。
+ <https://github.com/damonitor/damo/blob/next/USAGE.md>`_。
- *sysfs接å£ã€‚*
:ref:`è¿™ <sysfs_interface>` 是为那些希望更高级的使用DAMON的特æƒç”¨æˆ·ç©ºé—´ç¨‹åºå‘˜å‡†å¤‡çš„。
使用它,用户å¯ä»¥é€šè¿‡è¯»å–和写入特殊的sysfs文件æ¥ä½¿ç”¨DAMON的主è¦åŠŸèƒ½ã€‚å› æ­¤ï¼Œä½ å¯ä»¥ç¼–写和使
用你个性化的DAMON sysfs包装程åºï¼Œä»£æ›¿ä½ è¯»/写sysfs文件。 `DAMON用户空间工具
- <https://github.com/awslabs/damo>`_ 就是这ç§ç¨‹åºçš„ä¸€ä¸ªä¾‹å­ å®ƒåŒæ—¶æ”¯æŒè™šæ‹Ÿå’Œç‰©ç†åœ°å€
+ <https://github.com/damonitor/damo>`_ 就是这ç§ç¨‹åºçš„ä¸€ä¸ªä¾‹å­ å®ƒåŒæ—¶æ”¯æŒè™šæ‹Ÿå’Œç‰©ç†åœ°å€
空间的监测。注æ„,这个界é¢åªæä¾›ç®€å•的监测结果 :ref:`统计 <damos_stats>`。对于详细的监测
结果,DAMONæä¾›äº†ä¸€ä¸ª:ref:`跟踪点 <tracepoint>`。
- *debugfs interface.*
@@ -332,7 +332,7 @@ tried_regions/<N>/
# echo 500 > watermarks/mid
# echo 300 > watermarks/low
-请注æ„,我们强烈建议使用用户空间的工具,如 `damo <https://github.com/awslabs/damo>`_ ,
+请注æ„,我们强烈建议使用用户空间的工具,如 `damo <https://github.com/damonitor/damo>`_ ,
è€Œä¸æ˜¯åƒä¸Šé¢é‚£æ ·æ‰‹åŠ¨è¯»å†™æ–‡ä»¶ã€‚ä»¥ä¸Šåªæ˜¯ä¸€ä¸ªä¾‹å­ã€‚
debugfs接å£
diff --git a/Documentation/translations/zh_CN/admin-guide/reporting-issues.rst b/Documentation/translations/zh_CN/admin-guide/reporting-issues.rst
index 59e51e3539b4..9ff4ba94391d 100644
--- a/Documentation/translations/zh_CN/admin-guide/reporting-issues.rst
+++ b/Documentation/translations/zh_CN/admin-guide/reporting-issues.rst
@@ -300,7 +300,7 @@ Documentation/admin-guide/reporting-regressions.rst 对此进行了更详细的è
添加到回归跟踪列表中,以确ä¿å®ƒä¸ä¼šè¢«å¿½ç•¥ã€‚
什么是安全问题留给您自己判断。在继续之å‰ï¼Œè¯·è€ƒè™‘阅读
-Documentation/translations/zh_CN/admin-guide/security-bugs.rst ,
+Documentation/translations/zh_CN/process/security-bugs.rst ,
因为它æä¾›äº†å¦‚何最æ°å½“地处ç†å®‰å…¨é—®é¢˜çš„é¢å¤–细节。
当å‘生了完全无法接å—的糟糕事情时,此问题就是一个“éžå¸¸ä¸¥é‡çš„问题â€ã€‚例如,
@@ -983,7 +983,7 @@ Documentation/admin-guide/reporting-regressions.rst ;它还æä¾›äº†å¤§é‡å…¶ä
报告,请将报告的文本转å‘到这些地å€ï¼›ä½†è¯·åœ¨æŠ¥å‘Šçš„顶部加上注释,表明您æäº¤äº†
报告,并附上工å•链接。
-更多信æ¯è¯·å‚è§ Documentation/translations/zh_CN/admin-guide/security-bugs.rst 。
+更多信æ¯è¯·å‚è§ Documentation/translations/zh_CN/process/security-bugs.rst 。
å‘布报告åŽçš„责任
diff --git a/Documentation/translations/zh_CN/core-api/index.rst b/Documentation/translations/zh_CN/core-api/index.rst
index 922cabf7b5dd..453a02cd1f44 100644
--- a/Documentation/translations/zh_CN/core-api/index.rst
+++ b/Documentation/translations/zh_CN/core-api/index.rst
@@ -49,6 +49,7 @@
generic-radix-tree
packing
this_cpu_ops
+ union_find
=======
diff --git a/Documentation/translations/zh_CN/core-api/union_find.rst b/Documentation/translations/zh_CN/core-api/union_find.rst
new file mode 100644
index 000000000000..bb93fa8c6533
--- /dev/null
+++ b/Documentation/translations/zh_CN/core-api/union_find.rst
@@ -0,0 +1,92 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/core-api/union_find.rst
+
+=============================
+Linux中的并查集(Union-Find)
+=============================
+
+
+:日期: 2024年6月21日
+:作者: Xavier <xavier_qy@163.com>
+
+何为并查集,它有什么用?
+------------------------
+
+å¹¶æŸ¥é›†æ˜¯ä¸€ç§æ•°æ®ç»“构,用于处ç†ä¸€äº›ä¸äº¤é›†çš„åˆå¹¶åŠæŸ¥è¯¢é—®é¢˜ã€‚并查集支æŒçš„ä¸»è¦æ“作:
+ åˆå§‹åŒ–:将æ¯ä¸ªå…ƒç´ åˆå§‹åŒ–为å•独的集åˆï¼Œæ¯ä¸ªé›†åˆçš„åˆå§‹çˆ¶èŠ‚ç‚¹æŒ‡å‘自身。
+
+ 查询:查询æŸä¸ªå…ƒç´ å±žäºŽå“ªä¸ªé›†åˆï¼Œé€šå¸¸æ˜¯è¿”回集åˆä¸­çš„一个“代表元素â€ã€‚这个æ“作是为
+ 了判断两个元素是å¦åœ¨åŒä¸€ä¸ªé›†åˆä¹‹ä¸­ã€‚
+
+ åˆå¹¶ï¼šå°†ä¸¤ä¸ªé›†åˆåˆå¹¶ä¸ºä¸€ä¸ªã€‚
+
+并查集作为一ç§ç”¨äºŽç»´æŠ¤é›†åˆï¼ˆç»„)的数æ®ç»“构,它通常用于解决一些离线查询ã€åЍæ€è¿žé€šæ€§å’Œ
+å›¾è®ºç­‰ç›¸å…³é—®é¢˜ï¼ŒåŒæ—¶ä¹Ÿæ˜¯ç”¨äºŽè®¡ç®—最å°ç”Ÿæˆæ ‘çš„å…‹é²æ–¯å…‹å°”算法中的关键,由于最å°ç”Ÿæˆæ ‘在
+网络路由等场景下å分é‡è¦ï¼Œå¹¶æŸ¥é›†ä¹Ÿå¾—到了广泛的引用。此外,并查集在符å·è®¡ç®—,寄存器分
+é…等方é¢ä¹Ÿæœ‰åº”用。
+
+ç©ºé—´å¤æ‚度: O(n),n为节点数。
+
+æ—¶é—´å¤æ‚度:使用路径压缩å¯ä»¥å‡å°‘查找æ“ä½œçš„æ—¶é—´å¤æ‚度,使用按秩åˆå¹¶å¯ä»¥å‡å°‘åˆå¹¶æ“作的
+æ—¶é—´å¤æ‚度,使得并查集æ¯ä¸ªæŸ¥è¯¢å’Œåˆå¹¶æ“ä½œçš„å¹³å‡æ—¶é—´å¤æ‚度仅为O(α(n)),其中α(n)是å阿
+克曼函数,å¯ä»¥ç²—略地认为并查集的æ“ä½œæœ‰å¸¸æ•°çš„æ—¶é—´å¤æ‚度。
+
+本文档涵盖了对Linux并查集实现的使用方法。更多关于并查集的性质和实现的信æ¯ï¼Œå‚è§ï¼š
+
+ ç»´åŸºç™¾ç§‘å¹¶æŸ¥é›†è¯æ¡
+ https://en.wikipedia.org/wiki/Disjoint-set_data_structure
+
+并查集的Linux实现
+------------------
+
+Linux的并查集实现在文件“lib/union_find.câ€ä¸­ã€‚è¦ä½¿ç”¨å®ƒï¼Œéœ€è¦
+“#include <linux/union_find.h>â€ã€‚
+
+并查集的数æ®ç»“构定义如下::
+
+ struct uf_node {
+ struct uf_node *parent;
+ unsigned int rank;
+ };
+
+其中parent为当å‰èŠ‚ç‚¹çš„çˆ¶èŠ‚ç‚¹ï¼Œrankä¸ºå½“å‰æ ‘的高度,在åˆå¹¶æ—¶å°†rankå°çš„节点接到rank大
+的节点下é¢ä»¥å¢žåŠ å¹³è¡¡æ€§ã€‚
+
+åˆå§‹åŒ–并查集
+-------------
+
+å¯ä»¥é‡‡ç”¨é™æ€æˆ–åˆå§‹åŒ–接å£å®Œæˆåˆå§‹åŒ–æ“作。åˆå§‹åŒ–时,parent 指针指å‘自身,rank 设置
+为 0。
+示例::
+
+ struct uf_node my_node = UF_INIT_NODE(my_node);
+
+或
+
+ uf_node_init(&my_node);
+
+查找并查集的根节点
+------------------
+
+主è¦ç”¨äºŽåˆ¤æ–­ä¸¤ä¸ªå¹¶æŸ¥é›†æ˜¯å¦å±žäºŽä¸€ä¸ªé›†åˆï¼Œå¦‚果根相åŒï¼Œé‚£ä¹ˆä»–们就是一个集åˆã€‚在查找过程中
+会对路径进行压缩,æé«˜åŽç»­æŸ¥æ‰¾æ•ˆçŽ‡ã€‚
+示例::
+
+ int connected;
+ struct uf_node *root1 = uf_find(&node_1);
+ struct uf_node *root2 = uf_find(&node_2);
+ if (root1 == root2)
+ connected = 1;
+ else
+ connected = 0;
+
+åˆå¹¶ä¸¤ä¸ªå¹¶æŸ¥é›†
+--------------
+
+对于两个相交的并查集进行åˆå¹¶ï¼Œä¼šé¦–先查找它们å„è‡ªçš„æ ¹èŠ‚ç‚¹ï¼Œç„¶åŽæ ¹æ®æ ¹èŠ‚ç‚¹ç§©å¤§å°ï¼Œå°†å°çš„
+节点连接到大的节点下é¢ã€‚
+示例::
+
+ uf_union(&node_1, &node_2);
diff --git a/Documentation/translations/zh_CN/dev-tools/index.rst b/Documentation/translations/zh_CN/dev-tools/index.rst
index c540e4a7d5db..6a8c637c0be1 100644
--- a/Documentation/translations/zh_CN/dev-tools/index.rst
+++ b/Documentation/translations/zh_CN/dev-tools/index.rst
@@ -21,6 +21,7 @@ Documentation/translations/zh_CN/dev-tools/testing-overview.rst
testing-overview
sparse
kcov
+ kcsan
gcov
kasan
ubsan
@@ -32,7 +33,6 @@ Todolist:
- checkpatch
- coccinelle
- kmsan
- - kcsan
- kfence
- kgdb
- kselftest
diff --git a/Documentation/translations/zh_CN/dev-tools/kcsan.rst b/Documentation/translations/zh_CN/dev-tools/kcsan.rst
new file mode 100644
index 000000000000..8c495c17f109
--- /dev/null
+++ b/Documentation/translations/zh_CN/dev-tools/kcsan.rst
@@ -0,0 +1,320 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/dev-tools/kcsan.rst
+:Translator: 刘浩阳 Haoyang Liu <tttturtleruss@hust.edu.cn>
+
+å†…æ ¸å¹¶å‘æ¶ˆæ¯’剂(KCSAN)
+=====================
+
+å†…æ ¸å¹¶å‘æ¶ˆæ¯’剂(KCSAN)是一个动æ€ç«žäº‰æ£€æµ‹å™¨ï¼Œä¾èµ–ç¼–è¯‘æ—¶æ’æ¡©ï¼Œå¹¶ä¸”使用基于观察
+ç‚¹çš„é‡‡æ ·æ–¹æ³•æ¥æ£€æµ‹ç«žäº‰ã€‚KCSAN 的主è¦ç›®çš„æ˜¯æ£€æµ‹ `æ•°æ®ç«žäº‰`_。
+
+使用
+----
+
+KCSAN å— GCC å’Œ Clang 支æŒã€‚使用 GCC 需è¦ç‰ˆæœ¬ 11 或更高,使用 Clang 也需è¦
+版本 11 或更高。
+
+为了å¯ç”¨ KCSANï¼Œç”¨å¦‚ä¸‹å‚æ•°é…置内核::
+
+ CONFIG_KCSAN = y
+
+KCSAN æä¾›äº†å‡ ä¸ªå…¶ä»–çš„é…置选项æ¥è‡ªå®šä¹‰è¡Œä¸ºï¼ˆè§ ``lib/Kconfig.kcsan`` 中的å„自的
+å¸®åŠ©æ–‡æ¡£ä»¥èŽ·å–æ›´å¤šä¿¡æ¯ï¼‰ã€‚
+
+错误报告
+~~~~~~~~
+
+一个典型数æ®ç«žäº‰çš„æŠ¥å‘Šå¦‚下所示::
+
+ ==================================================================
+ BUG: KCSAN: data-race in test_kernel_read / test_kernel_write
+
+ write to 0xffffffffc009a628 of 8 bytes by task 487 on cpu 0:
+ test_kernel_write+0x1d/0x30
+ access_thread+0x89/0xd0
+ kthread+0x23e/0x260
+ ret_from_fork+0x22/0x30
+
+ read to 0xffffffffc009a628 of 8 bytes by task 488 on cpu 6:
+ test_kernel_read+0x10/0x20
+ access_thread+0x89/0xd0
+ kthread+0x23e/0x260
+ ret_from_fork+0x22/0x30
+
+ value changed: 0x00000000000009a6 -> 0x00000000000009b2
+
+ Reported by Kernel Concurrency Sanitizer on:
+ CPU: 6 PID: 488 Comm: access_thread Not tainted 5.12.0-rc2+ #1
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
+ ==================================================================
+
+报告的头部æä¾›äº†ä¸€ä¸ªå…³äºŽç«žäº‰ä¸­æ¶‰åŠåˆ°çš„函数的简短总结。éšåŽæ˜¯ç«žäº‰ä¸­çš„两个线程的
+访问类型和堆栈信æ¯ã€‚如果 KCSAN å‘现了一个值的å˜åŒ–,那么那个值的旧值和新值会在
+“value changedâ€è¿™ä¸€è¡Œå•独显示。
+
+å¦ä¸€ä¸ªä¸å¤ªå¸¸è§çš„æ•°æ®ç«žäº‰ç±»åž‹çš„æŠ¥å‘Šå¦‚下所示::
+
+ ==================================================================
+ BUG: KCSAN: data-race in test_kernel_rmw_array+0x71/0xd0
+
+ race at unknown origin, with read to 0xffffffffc009bdb0 of 8 bytes by task 515 on cpu 2:
+ test_kernel_rmw_array+0x71/0xd0
+ access_thread+0x89/0xd0
+ kthread+0x23e/0x260
+ ret_from_fork+0x22/0x30
+
+ value changed: 0x0000000000002328 -> 0x0000000000002329
+
+ Reported by Kernel Concurrency Sanitizer on:
+ CPU: 2 PID: 515 Comm: access_thread Not tainted 5.12.0-rc2+ #1
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
+ ==================================================================
+
+这个报告是当å¦ä¸€ä¸ªç«žäº‰çº¿ç¨‹ä¸å¯èƒ½è¢«å‘现,但是å¯ä»¥ä»Žè§‚测的内存地å€çš„值改å˜è€ŒæŽ¨æ–­
+出æ¥çš„æ—¶å€™ç”Ÿæˆçš„。这类报告总是会带有“value changedâ€è¡Œã€‚这类报告的出现通常是因
+ä¸ºåœ¨ç«žäº‰çº¿ç¨‹ä¸­ç¼ºå°‘æ’æ¡©ï¼Œä¹Ÿå¯èƒ½æ˜¯å› ä¸ºå…¶ä»–原因,比如 DMA 访问。这类报告åªä¼šåœ¨
+è®¾ç½®äº†å†…æ ¸å‚æ•° ``CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=y`` æ—¶æ‰ä¼šå‡ºçŽ°ï¼Œè€Œè¿™
+ä¸ªå‚æ•°æ˜¯é»˜è®¤å¯ç”¨çš„。
+
+选择性分æž
+~~~~~~~~~~
+
+对于一些特定的访问,函数,编译å•元或者整个å­ç³»ç»Ÿï¼Œå¯èƒ½éœ€è¦ç¦ç”¨æ•°æ®ç«žäº‰æ£€æµ‹ã€‚
+å¯¹äºŽé™æ€é»‘åå•,有如下å¯ç”¨çš„傿•°ï¼š
+
+* KCSAN 支æŒä½¿ç”¨ ``data_race(expr)`` 注解,这个注解告诉 KCSAN 任何由访问
+ ``expr`` 所引起的数æ®ç«žäº‰éƒ½åº”è¯¥è¢«å¿½ç•¥ï¼Œå…¶äº§ç”Ÿçš„è¡Œä¸ºåŽæžœè¢«è®¤ä¸ºæ˜¯å®‰å…¨çš„。请查阅
+ `在 LKMM 中 "标记共享内存访问"`_ 获得更多信æ¯ã€‚
+
+* 与 ``data_race(...)`` 相似,å¯ä»¥ä½¿ç”¨ç±»åž‹é™å®šç¬¦ ``__data_racy`` æ¥æ ‡è®°ä¸€ä¸ªå˜é‡
+ ,所有访问该å˜é‡è€Œå¯¼è‡´çš„æ•°æ®ç«žäº‰éƒ½æ˜¯æ•…æ„为之并且应该被 KCSAN 忽略::
+
+ struct foo {
+ ...
+ int __data_racy stats_counter;
+ ...
+ };
+
+* 使用函数属性 ``__no_kcsan`` å¯ä»¥å¯¹æ•´ä¸ªå‡½æ•°ç¦ç”¨æ•°æ®ç«žäº‰æ£€æµ‹::
+
+ __no_kcsan
+ void foo(void) {
+ ...
+
+ 为了动æ€é™åˆ¶è¯¥ä¸ºå“ªäº›å‡½æ•°ç”ŸæˆæŠ¥å‘Šï¼ŒæŸ¥é˜… `Debug 文件系统接å£`_ 黑åå•/白åå•特性。
+
+* 为特定的编译å•å…ƒç¦ç”¨æ•°æ®ç«žäº‰æ£€æµ‹ï¼Œå°†ä¸‹åˆ—傿•°åŠ å…¥åˆ° ``Makefile`` 中::
+
+ KCSAN_SANITIZE_file.o := n
+
+* 为 ``Makefile`` 中的所有编译å•å…ƒç¦ç”¨æ•°æ®ç«žäº‰æ£€æµ‹ï¼Œå°†ä¸‹åˆ—傿•°æ·»åŠ åˆ°ç›¸åº”çš„
+ ``Makefile`` 中::
+
+ KCSAN_SANITIZE := n
+
+.. _在 LKMM 中 "标记共享内存访问": https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/access-marking.txt
+
+此外,KCSAN å¯ä»¥æ ¹æ®å好设置显示或éšè—整个类别的数æ®ç«žäº‰ã€‚å¯ä»¥ä½¿ç”¨å¦‚下
+Kconfig 傿•°è¿›è¡Œæ›´æ”¹:
+
+* ``CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY``: 如果å¯ç”¨äº†è¯¥å‚数并且通过观测点
+ (watchpoint) 观测到一个有冲çªçš„写æ“作,但是对应的内存地å€ä¸­å­˜å‚¨çš„值没有改å˜ï¼Œ
+ 则ä¸ä¼šæŠ¥å‘Šè¿™èµ·æ•°æ®ç«žäº‰ã€‚
+
+* ``CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC``: å‡è®¾é»˜è®¤æƒ…况下,ä¸è¶…过字大å°çš„简
+ å•对é½å†™å…¥æ“作是原å­çš„。å‡è®¾è¿™äº›å†™å…¥æ“作ä¸ä¼šå—到ä¸å®‰å…¨çš„编译器优化影å“,从而导
+ 致数æ®ç«žäº‰ã€‚该选项使 KCSAN ä¸æŠ¥å‘Šä»…ç”±ä¸è¶…过字大å°çš„简å•对é½å†™å…¥æ“作引起
+ çš„å†²çªæ‰€å¯¼è‡´çš„æ•°æ®ç«žäº‰ã€‚
+
+* ``CONFIG_KCSAN_PERMISSIVE``: å¯ç”¨é¢å¤–的宽æ¾è§„则æ¥å¿½ç•¥æŸäº›å¸¸è§ç±»åž‹çš„æ•°æ®ç«žäº‰ã€‚
+ 与上é¢çš„规则ä¸åŒï¼Œè¿™æ¡è§„åˆ™æ›´åŠ å¤æ‚,涉åŠåˆ°å€¼æ”¹å˜æ¨¡å¼ï¼Œè®¿é—®ç±»åž‹å’Œåœ°å€ã€‚这个
+ 选项ä¾èµ–编译选项 ``CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=y``。请查看
+ ``kernel/kcsan/permissive.h`` èŽ·å–æ›´å¤šç»†èŠ‚ã€‚å¯¹äºŽåªä¾§é‡äºŽç‰¹å®šå­ç³»ç»Ÿè€Œä¸æ˜¯æ•´ä¸ª
+ 内核报告的测试者和维护者,建议ç¦ç”¨è¯¥é€‰é¡¹ã€‚
+
+è¦ä½¿ç”¨å°½å¯èƒ½ä¸¥æ ¼çš„规则,选择 ``CONFIG_KCSAN_STRICT=y``,这将é…ç½® KCSAN å°½å¯
+能紧密地éµå¾ª Linux 内核内存一致性模型(LKMM)。
+
+Debug 文件系统接å£
+~~~~~~~~~~~~~~~~~~
+
+文件 ``/sys/kernel/debug/kcsan`` æä¾›äº†å¦‚下接å£ï¼š
+
+* 读 ``/sys/kernel/debug/kcsan`` 返回ä¸åŒçš„è¿è¡Œæ—¶ç»Ÿè®¡æ•°æ®ã€‚
+
+* å°† ``on`` 或 ``off`` 写入 ``/sys/kernel/debug/kcsan`` å…许打开或关闭 KCSAN。
+
+* 将 ``!some_func_name`` 写入 ``/sys/kernel/debug/kcsan`` 会将
+ ``some_func_name`` 添加到报告过滤列表中,该列表(默认)会将数æ®ç«žäº‰æŠ¥å‘Šä¸­çš„é¡¶
+ 层堆栈帧是列表中函数的情况列入黑åå•。
+
+* å°† ``blacklist`` 或 ``whitelist`` 写入 ``/sys/kernel/debug/kcsan`` ä¼šæ”¹å˜æŠ¥å‘Š
+ 过滤行为。例如,黑åå•的特性å¯ä»¥ç”¨æ¥è¿‡æ»¤æŽ‰ç»å¸¸å‘生的数æ®ç«žäº‰ã€‚白åå•特性å¯ä»¥å¸®
+ 助å¤çŽ°å’Œä¿®å¤æµ‹è¯•。
+
+性能调优
+~~~~~~~~
+
+å½±å“ KCSAN 整体的性能和 bug æ£€æµ‹èƒ½åŠ›çš„æ ¸å¿ƒå‚æ•°æ˜¯ä½œä¸ºå†…æ ¸å‘½ä»¤è¡Œå‚æ•°å…¬å¼€çš„,其默认
+值也å¯ä»¥é€šè¿‡ç›¸åº”çš„ Kconfig 选项更改。
+
+* ``kcsan.skip_watch`` (``CONFIG_KCSAN_SKIP_WATCH``): 在å¦ä¸€ä¸ªè§‚æµ‹ç‚¹è®¾ç½®ä¹‹å‰æ¯
+ 个 CPU è¦è·³è¿‡çš„内存æ“作次数。更加频ç¹çš„设置观测点将增加观察到竞争情况的å¯èƒ½æ€§
+ ã€‚è¿™ä¸ªå‚æ•°å¯¹ç³»ç»Ÿæ•´ä½“çš„æ€§èƒ½å’Œç«žäº‰æ£€æµ‹èƒ½åŠ›å½±å“æœ€æ˜¾è‘—。
+
+* ``kcsan.udelay_task`` (``CONFIG_KCSAN_UDELAY_TASK``): 对于任务,观测点设置之
+ åŽæš‚åœæ‰§è¡Œçš„微秒延迟。值越大,检测到竞争情况的å¯èƒ½æ€§è¶Šé«˜ã€‚
+
+* ``kcsan.udelay_interrupt`` (``CONFIG_KCSAN_UDELAY_INTERRUPT``): 对于中断,
+ è§‚æµ‹ç‚¹è®¾ç½®ä¹‹åŽæš‚åœæ‰§è¡Œçš„å¾®ç§’å»¶è¿Ÿã€‚ä¸­æ–­å¯¹äºŽå»¶è¿Ÿçš„è¦æ±‚更加严格,其延迟通常应该å°
+ 于为任务选择的延迟。
+
+它们å¯ä»¥é€šè¿‡ ``/sys/module/kcsan/parameters/`` 在è¿è¡Œæ—¶è¿›è¡Œè°ƒæ•´ã€‚
+
+æ•°æ®ç«žäº‰
+--------
+
+在一次执行中,如果两个内存访问存在 *冲çª*,在ä¸åŒçš„çº¿ç¨‹ä¸­å¹¶å‘æ‰§è¡Œï¼Œå¹¶ä¸”至少
+有一个访问是 *简å•访问*,则它们就形æˆäº† *æ•°æ®ç«žäº‰*。如果它们访问了åŒä¸€ä¸ªå†…存地å€å¹¶ä¸”
+至少有一个是写æ“作,则称它们存在 *冲çª*。有关更详细的讨论和定义,è§
+`LKMM 中的 "简å•访问和数æ®ç«žäº‰"`_。
+
+.. _LKMM 中的 "简å•访问和数æ®ç«žäº‰": https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/explanation.txt#n1922
+
+与 Linux 内核内存一致性模型(LKMM)的关系
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+LKMM 定义了å„ç§å†…å­˜æ“作的传播和排åºè§„则,让开å‘者å¯ä»¥æŽ¨ç†å¹¶å‘代ç ã€‚最终这å…许确
+定并å‘代ç å¯èƒ½çš„æ‰§è¡Œæƒ…å†µå¹¶åˆ¤æ–­è¿™äº›ä»£ç æ˜¯å¦å­˜åœ¨æ•°æ®ç«žäº‰ã€‚
+
+KCSAN å¯ä»¥è¯†åˆ« *è¢«æ ‡è®°çš„åŽŸå­æ“作* ( ``READ_ONCE``, ``WRITE_ONCE`` , ``atomic_*``
+等),以åŠå†…å­˜å±éšœæ‰€éšå«çš„一部分顺åºä¿è¯ã€‚å¯ç”¨ ``CONFIG_KCSAN_WEAK_MEMORY=y``
+é…置,KCSAN 会对加载或存储缓冲区进行建模,并å¯ä»¥æ£€æµ‹é—æ¼çš„
+``smp_mb()``, ``smp_wmb()``, ``smp_rmb()``, ``smp_store_release()``ï¼Œä»¥åŠæ‰€æœ‰çš„
+具有等效éšå«å†…å­˜å±éšœçš„ ``atomic_*`` æ“作。
+
+请注æ„,KCSAN ä¸ä¼šæŠ¥å‘Šæ‰€æœ‰ç”±äºŽç¼ºå¤±å†…存顺åºè€Œå¯¼è‡´çš„æ•°æ®ç«žäº‰ï¼Œç‰¹åˆ«æ˜¯åœ¨éœ€è¦å†…å­˜å±éšœ
+æ¥ç¦æ­¢åŽç»­å†…å­˜æ“作在å±éšœä¹‹å‰é‡æ–°æŽ’åºçš„æƒ…况下。因此,开å‘人员应该仔细考虑那些未
+被检查的内存顺åºè¦æ±‚。
+
+æ•°æ®ç«žäº‰ä»¥å¤–的竞争检测
+---------------------------
+
+对于有ç€å¤æ‚å¹¶å‘设计的代ç ï¼Œç«žäº‰çж况䏿€»æ˜¯è¡¨çŽ°ä¸ºæ•°æ®ç«žäº‰ã€‚å¦‚æžœå¹¶å‘æ“作引起了æ„
+料之外的系统行为,则认为å‘生了竞争状况。å¦ä¸€æ–¹é¢ï¼Œæ•°æ®ç«žäº‰æ˜¯åœ¨ C 语言层é¢å®šä¹‰
+的。内核定义了一些å®å®šä¹‰ç”¨æ¥æ£€æµ‹éžæ•°æ®ç«žäº‰çš„æ¼æ´žå¹¶å‘代ç çš„属性。
+
+.. note::
+ 为了ä¸å¼•入新的文档编译警告,这里ä¸å±•示å®å®šä¹‰çš„具体内容,如果想查看具体
+ å®å®šä¹‰å¯ä»¥ç»“åˆåŽŸæ–‡ï¼ˆDocumentation/dev-tools/kcsan.rst)阅读。
+
+实现细节
+--------
+
+KCSAN 需è¦è§‚测两个并å‘访问。特别é‡è¦çš„æ˜¯ï¼Œæˆ‘们想è¦ï¼ˆa)增加观测到竞争的机会(尤
+其是很少å‘生的竞争),以åŠï¼ˆb)能够实际观测到这些竞争。我们å¯ä»¥é€šè¿‡ï¼ˆa)注入
+ä¸åŒçš„延迟,以åŠï¼ˆb)使用地å€è§‚测点(或断点)æ¥å®žçŽ°ã€‚
+
+如果我们在设置了地å€è§‚察点的情况下故æ„延迟一个内存访问,然åŽè§‚察到观察点被触å‘
+,那么两个对åŒä¸€åœ°å€çš„访问就å‘生了竞争。使用硬件观察点,这是 `DataCollider
+<http://usenix.org/legacy/events/osdi10/tech/full_papers/Erickson.pdf>`_ 中采用
+的方法。与 DataCollider ä¸åŒï¼ŒKCSAN ä¸ä½¿ç”¨ç¡¬ä»¶è§‚察点,而是ä¾èµ–äºŽç¼–è¯‘å™¨æ’æ¡©å’Œâ€œè½¯
+观测点â€ã€‚
+
+在 KCSAN 中,观察点是通过一ç§é«˜æ•ˆçš„ç¼–ç å®žçŽ°çš„ï¼Œè¯¥ç¼–ç å°†è®¿é—®ç±»åž‹ã€å¤§å°å’Œåœ°å€å­˜å‚¨
+在一个长整型å˜é‡ä¸­ï¼›ä½¿ç”¨â€œè½¯è§‚察点â€çš„好处是具有å¯ç§»æ¤æ€§å’Œæ›´å¤§çš„çµæ´»æ€§ã€‚ç„¶åŽï¼Œ
+KCSANä¾èµ–äºŽç¼–è¯‘å™¨å¯¹æ™®é€šè®¿é—®çš„æ’æ¡©ã€‚对于æ¯ä¸ªæ’桩的普通访问:
+
+1. 检测是å¦å­˜åœ¨ä¸€ä¸ªç¬¦åˆçš„观测点,如果存在,并且至少有一个æ“作是写æ“作,则我们å‘
+ 现了一个竞争访问。
+
+2. 如果ä¸å­˜åœ¨åŒ¹é…çš„è§‚å¯Ÿç‚¹ï¼Œåˆ™å®šæœŸçš„è®¾ç½®ä¸€ä¸ªè§‚æµ‹ç‚¹å¹¶éšæœºå»¶è¿Ÿä¸€å°æ®µæ—¶é—´ã€‚
+
+3. åœ¨å»¶è¿Ÿå‰æ£€æŸ¥æ•°æ®å€¼ï¼Œå¹¶åœ¨å»¶è¿ŸåŽé‡æ–°æ£€æŸ¥æ•°æ®å€¼ï¼›å¦‚果值ä¸åŒ¹é…,我们推测存在一个
+ æœªçŸ¥æ¥æºçš„竞争状况。
+
+为了检测普通访问和标记访问之间的数æ®ç«žäº‰ï¼ŒKCSAN 也对标记访问进行标记,但仅用于
+检查是å¦å­˜åœ¨è§‚å¯Ÿç‚¹ï¼›å³ KCSAN ä¸ä¼šåœ¨æ ‡è®°è®¿é—®ä¸Šè®¾ç½®è§‚察点。通过ä¸åœ¨æ ‡è®°æ“作上设
+置观察点,如果对一个å˜é‡çš„æ‰€æœ‰å¹¶å‘访问都被正确标记,KCSAN 将永远ä¸ä¼šè§¦å‘观察点
+,因此也ä¸ä¼šæŠ¥å‘Šè¿™äº›è®¿é—®ã€‚
+
+弱内存建模
+~~~~~~~~~~
+
+KCSAN é€šè¿‡å»ºæ¨¡è®¿é—®é‡æ–°æŽ’åºï¼ˆä½¿ç”¨ ``CONFIG_KCSAN_WEAK_MEMORY=y``ï¼‰æ¥æ£€æµ‹ç”±äºŽç¼ºå°‘
+内存å±éšœè€Œå¯¼è‡´çš„æ•°æ®ç«žäº‰ã€‚æ¯ä¸ªè®¾ç½®äº†è§‚察点的普通内存访问也会被选择在其函数范围
+å†…è¿›è¡Œæ¨¡æ‹Ÿé‡æ–°æŽ’åºï¼ˆæœ€å¤šä¸€ä¸ªæ­£åœ¨è¿›è¡Œçš„访问)。
+
+一旦æŸä¸ªè®¿é—®è¢«é€‰æ‹©ç”¨äºŽé‡æ–°æŽ’åºï¼Œå®ƒå°†åœ¨å‡½æ•°èŒƒå›´å†…与æ¯ä¸ªå…¶ä»–访问进行检查。如果é‡
+到适当的内存å±éšœï¼Œè¯¥è®¿é—®å°†ä¸å†è¢«è€ƒè™‘è¿›è¡Œæ¨¡æ‹Ÿé‡æ–°æŽ’åºã€‚
+
+当内存æ“作的结果应该由å±éšœæŽ’åºæ—¶ï¼ŒKCSAN å¯ä»¥æ£€æµ‹åˆ°ä»…由于缺失å±éšœè€Œå¯¼è‡´çš„冲çªçš„
+æ•°æ®ç«žäº‰ã€‚考虑下é¢çš„例å­::
+
+ int x, flag;
+ void T1(void)
+ {
+ x = 1; // data race!
+ WRITE_ONCE(flag, 1); // correct: smp_store_release(&flag, 1)
+ }
+ void T2(void)
+ {
+ while (!READ_ONCE(flag)); // correct: smp_load_acquire(&flag)
+ ... = x; // data race!
+ }
+
+当å¯ç”¨äº†å¼±å†…存建模,KCSAN 将考虑对 ``T1`` 中的 ``x`` è¿›è¡Œæ¨¡æ‹Ÿé‡æ–°æŽ’åºã€‚在写入
+``flag`` 之åŽï¼Œx冿¬¡è¢«æ£€æŸ¥æ˜¯å¦æœ‰å¹¶å‘访问:因为 ``T2`` å¯ä»¥åœ¨å†™å…¥
+``flag`` 之åŽç»§ç»­è¿›è¡Œï¼Œå› æ­¤æ£€æµ‹åˆ°æ•°æ®ç«žäº‰ã€‚如果é‡åˆ°äº†æ­£ç¡®çš„å±éšœï¼Œ ``x`` 在正确
+释放 ``flag`` åŽå°†ä¸ä¼šè¢«è€ƒè™‘釿–°æŽ’åºï¼Œå› æ­¤ä¸ä¼šæ£€æµ‹åˆ°æ•°æ®ç«žäº‰ã€‚
+
+åœ¨å¤æ‚性上的æƒè¡¡ä»¥åŠå®žé™…çš„é™åˆ¶æ„味ç€åªèƒ½æ£€æµ‹åˆ°ä¸€éƒ¨åˆ†ç”±äºŽç¼ºå¤±å†…å­˜å±éšœè€Œå¯¼è‡´çš„æ•°
+æ®ç«žäº‰ã€‚由于当å‰å¯ç”¨çš„编译器支æŒï¼ŒKCSAN 的实现仅é™äºŽå»ºæ¨¡â€œç¼“冲â€ï¼ˆå»¶è¿Ÿè®¿é—®ï¼‰çš„
+效果,因为è¿è¡Œæ—¶ä¸èƒ½â€œé¢„å–â€è®¿é—®ã€‚åŒæ—¶è¦æ³¨æ„,观测点åªè®¾ç½®åœ¨æ™®é€šè®¿é—®ä¸Šï¼Œè¿™æ˜¯å”¯
+一一个 KCSAN ä¼šæ¨¡æ‹Ÿé‡æ–°æŽ’åºçš„访问类型。这æ„å‘³ç€æ ‡è®°è®¿é—®çš„釿–°æŽ’åºä¸ä¼šè¢«å»ºæ¨¡ã€‚
+
+ä¸Šè¿°æƒ…å†µçš„ä¸€ä¸ªåŽæžœæ˜¯èŽ·å– (acquire) æ“作ä¸éœ€è¦å±éšœæ’æ¡©ï¼ˆä¸éœ€è¦é¢„å–)。此外,引
+å…¥åœ°å€æˆ–控制ä¾èµ–的标记访问ä¸éœ€è¦ç‰¹æ®Šå¤„ç†ï¼ˆæ ‡è®°è®¿é—®ä¸èƒ½é‡æ–°æŽ’åºï¼ŒåŽç»­ä¾èµ–的访问
+ä¸èƒ½è¢«é¢„å–)。
+
+关键属性
+~~~~~~~~
+
+1. **内存开销**ï¼šæ•´ä½“çš„å†…å­˜å¼€é”€åªæœ‰å‡  MiB,å–决于é…置。当å‰çš„实现是使用一个å°é•¿
+ 整型数组æ¥ç¼–ç è§‚测点信æ¯ï¼Œå‡ ä¹Žå¯ä»¥å¿½ç•¥ä¸è®¡ã€‚
+
+2. **性能开销**:KCSAN çš„è¿è¡Œæ—¶æ—¨åœ¨æ€§èƒ½å¼€é”€æœ€å°åŒ–,使用一个高效的观测点编ç ï¼Œåœ¨
+ 快速路径中ä¸éœ€è¦èŽ·å–任何é”。在拥有 8 个 CPU 的系统上的内核å¯åЍæ¥è¯´ï¼š
+
+ - 使用默认 KCSAN é…ç½®æ—¶ï¼Œæ€§èƒ½ä¸‹é™ 5 å€ï¼›
+ - ä»…å› è¿è¡Œæ—¶å¿«é€Ÿè·¯å¾„å¼€é”€å¯¼è‡´æ€§èƒ½ä¸‹é™ 2.8 å€ï¼ˆè®¾ç½®éžå¸¸å¤§çš„
+ ``KCSAN_SKIP_WATCH`` å¹¶å–æ¶ˆè®¾ç½® ``KCSAN_SKIP_WATCH_RANDOMIZE``)。
+
+3. **注解开销**:KCSAN è¿è¡Œæ—¶ä¹‹å¤–需è¦çš„æ³¨é‡Šå¾ˆå°‘。因此,éšç€å†…核的å‘展维护的开
+ 销也很å°ã€‚
+
+4. **检测设备的竞争写入**:由于设置观测点时会检查数æ®å€¼ï¼Œè®¾å¤‡çš„竞争写入也å¯ä»¥
+ 被检测到。
+
+5. **内存排åº**:KCSAN åªäº†è§£ä¸€éƒ¨åˆ† LKMM 排åºè§„则;这å¯èƒ½ä¼šå¯¼è‡´æ¼æŠ¥æ•°æ®ç«žäº‰ï¼ˆ
+ å‡é˜´æ€§ï¼‰ã€‚
+
+6. **分æžå‡†ç¡®çއ**: å¯¹äºŽè§‚å¯Ÿåˆ°çš„æ‰§è¡Œï¼Œç”±äºŽä½¿ç”¨é‡‡æ ·ç­–ç•¥ï¼Œåˆ†æžæ˜¯ *ä¸å¥å…¨* çš„
+ (å¯èƒ½æœ‰å‡é˜´æ€§ï¼‰ï¼Œä½†æœŸæœ›å¾—到完整的分æžï¼ˆæ²¡æœ‰å‡é˜³æ€§ï¼‰ã€‚
+
+考虑的替代方案
+--------------
+
+一个内核数æ®ç«žäº‰æ£€æµ‹çš„æ›¿ä»£æ–¹æ³•是 `Kernel Thread Sanitizer (KTSAN)
+<https://github.com/google/kernel-sanitizers/blob/master/KTSAN.md>`_。KTSAN 是一
+个基于先行å‘生关系(happens-before)的数æ®ç«žäº‰æ£€æµ‹å™¨ï¼Œå®ƒæ˜¾å¼å»ºç«‹å†…å­˜æ“作之间的先
+åŽå‘生顺åºï¼Œè¿™å¯ä»¥ç”¨æ¥ç¡®å®š `æ•°æ®ç«žäº‰`_ 中定义的数æ®ç«žäº‰ã€‚
+
+为了建立正确的先行å‘生关系,KTSAN 必须了解 LKMM 的所有排åºè§„åˆ™å’ŒåŒæ­¥åŽŸè¯­ã€‚ä¸å¹¸
+çš„æ˜¯ï¼Œä»»ä½•é—æ¼éƒ½ä¼šå¯¼è‡´å¤§é‡çš„å‡é˜³æ€§ï¼Œè¿™åœ¨åŒ…å«ä¼—å¤šè‡ªå®šä¹‰åŒæ­¥æœºåˆ¶çš„内核上下文中特
+别有害。为了跟踪å‰å› åŽæžœå…³ç³»ï¼ŒKTSAN 的实现需è¦ä¸ºæ¯ä¸ªå†…å­˜ä½ç½®æä¾›å…ƒæ•°æ®ï¼ˆå½±å­å†…
+存),这æ„å‘³ç€æ¯é¡µå†…存对应 4 页影å­å†…存,在大型系统上å¯èƒ½ä¼šå¸¦æ¥æ•°å GiB 的开销
+。
diff --git a/Documentation/translations/zh_CN/doc-guide/checktransupdate.rst b/Documentation/translations/zh_CN/doc-guide/checktransupdate.rst
new file mode 100644
index 000000000000..d20b4ce66b9f
--- /dev/null
+++ b/Documentation/translations/zh_CN/doc-guide/checktransupdate.rst
@@ -0,0 +1,55 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/doc-guide/checktransupdate.rst
+
+:译者: 慕冬亮 Dongliang Mu <dzm91@hust.edu.cn>
+
+检查翻译更新
+
+这个脚本帮助跟踪ä¸åŒè¯­è¨€çš„æ–‡æ¡£ç¿»è¯‘状æ€ï¼Œå³æ–‡æ¡£æ˜¯å¦ä¸Žå¯¹åº”çš„è‹±æ–‡ç‰ˆæœ¬ä¿æŒæ›´æ–°ã€‚
+
+工作原ç†
+------------
+
+它使用 ``git log`` 命令æ¥è·Ÿè¸ªç¿»è¯‘æäº¤çš„æœ€æ–°è‹±æ–‡æäº¤ï¼ˆæŒ‰ä½œè€…日期排åºï¼‰å’Œè‹±æ–‡æ–‡æ¡£çš„
+最新æäº¤ã€‚如果有任何差异,则该文件被认为是过期的,然åŽéœ€è¦æ›´æ–°çš„æäº¤å°†è¢«æ”¶é›†å¹¶æŠ¥å‘Šã€‚
+
+实现的功能
+
+- 检查特定语言中的所有文件
+- 检查å•个文件或一组文件
+- æä¾›æ›´æ”¹è¾“出格å¼çš„选项
+- 跟踪没有翻译过的文件的翻译状æ€
+
+用法
+-----
+
+::
+
+ ./scripts/checktransupdate.py --help
+
+具体用法请å‚è€ƒå‚æ•°è§£æžå™¨çš„输出
+
+示例
+
+- ``./scripts/checktransupdate.py -l zh_CN``
+ è¿™å°†æ‰“å° zh_CN è¯­è¨€ä¸­éœ€è¦æ›´æ–°çš„æ‰€æœ‰æ–‡ä»¶ã€‚
+- ``./scripts/checktransupdate.py Documentation/translations/zh_CN/dev-tools/testing-overview.rst``
+ è¿™å°†åªæ‰“å°æŒ‡å®šæ–‡ä»¶çš„状æ€ã€‚
+
+ç„¶åŽè¾“出类似如下的内容:
+
+::
+
+ Documentation/dev-tools/kfence.rst
+ No translation in the locale of zh_CN
+
+ Documentation/translations/zh_CN/dev-tools/testing-overview.rst
+ commit 42fb9cfd5b18 ("Documentation: dev-tools: Add link to RV docs")
+ 1 commits needs resolving in total
+
+待实现的功能
+
+- æ–‡ä»¶å‚æ•°å¯ä»¥æ˜¯æ–‡ä»¶å¤¹è€Œä¸ä»…仅是å•个文件
diff --git a/Documentation/translations/zh_CN/doc-guide/index.rst b/Documentation/translations/zh_CN/doc-guide/index.rst
index 78c2e9a1697f..0ac1fc9315ea 100644
--- a/Documentation/translations/zh_CN/doc-guide/index.rst
+++ b/Documentation/translations/zh_CN/doc-guide/index.rst
@@ -18,6 +18,7 @@
parse-headers
contributing
maintainer-profile
+ checktransupdate
.. only:: subproject and html
diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst
index 20b9d4270d1f..7574e1673180 100644
--- a/Documentation/translations/zh_CN/index.rst
+++ b/Documentation/translations/zh_CN/index.rst
@@ -89,10 +89,10 @@ TODOList:
admin-guide/index
admin-guide/reporting-issues.rst
userspace-api/index
+ 内核构建系统 <kbuild/index>
TODOList:
-* 内核构建系统 <kbuild/index>
* 用户空间工具 <tools/index>
也å¯å‚考独立于内核文档的 `Linux 手册页 <https://www.kernel.org/doc/man-pages/>`_ 。
diff --git a/Documentation/translations/zh_CN/kbuild/gcc-plugins.rst b/Documentation/translations/zh_CN/kbuild/gcc-plugins.rst
new file mode 100644
index 000000000000..67a8abbf5887
--- /dev/null
+++ b/Documentation/translations/zh_CN/kbuild/gcc-plugins.rst
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/kbuild/gcc-plugins.rst
+:Translator: 慕冬亮 Dongliang Mu <dzm91@hust.edu.cn>
+
+================
+GCC æ’件基础设施
+================
+
+
+介ç»
+====
+
+GCC æ’件是为编译器æä¾›é¢å¤–功能的å¯åŠ è½½æ¨¡å— [1]_。它们对于è¿è¡Œæ—¶æ’è£…å’Œé™æ€åˆ†æžéžå¸¸æœ‰ç”¨ã€‚
+我们å¯ä»¥åœ¨ç¼–译过程中通过回调 [2]_,GIMPLE [3]_,IPA [4]_ å’Œ RTL Passes [5]_
+(译者注:Pass 是编译器所采用的一ç§ç»“构化技术,用于完æˆç¼–译对象的分æžã€ä¼˜åŒ–或转æ¢ç­‰åŠŸèƒ½ï¼‰
+æ¥åˆ†æžã€ä¿®æ”¹å’Œæ·»åŠ æ›´å¤šçš„ä»£ç ã€‚
+
+内核的 GCC æ’ä»¶åŸºç¡€è®¾æ–½æ”¯æŒæž„建树外模å—ã€äº¤å‰ç¼–译和在å•独的目录中构建。æ’ä»¶æºæ–‡ä»¶å¿…须由
+C++ 编译器编译。
+
+ç›®å‰ GCC æ’ä»¶åŸºç¡€è®¾æ–½åªæ”¯æŒä¸€äº›æž¶æž„。æœç´¢ "select HAVE_GCC_PLUGINS" æ¥æŸ¥æ‰¾æ”¯æŒ
+GCC æ’件的架构。
+
+这个基础设施是从 grsecurity [6]_ å’Œ PaX [7]_ ç§»æ¤è¿‡æ¥çš„。
+
+--
+
+.. [1] https://gcc.gnu.org/onlinedocs/gccint/Plugins.html
+.. [2] https://gcc.gnu.org/onlinedocs/gccint/Plugin-API.html#Plugin-API
+.. [3] https://gcc.gnu.org/onlinedocs/gccint/GIMPLE.html
+.. [4] https://gcc.gnu.org/onlinedocs/gccint/IPA.html
+.. [5] https://gcc.gnu.org/onlinedocs/gccint/RTL.html
+.. [6] https://grsecurity.net/
+.. [7] https://pax.grsecurity.net/
+
+
+目的
+====
+
+GCC æ’件的设计目的是æä¾›ä¸€ä¸ªç”¨äºŽè¯•验 GCC 或 Clang 上游没有的潜在编译器功能的场所。
+一旦它们的实用性得到验è¯ï¼Œè¿™äº›åŠŸèƒ½å°†è¢«æ·»åŠ åˆ° GCC(和 Clang)的上游。éšåŽï¼Œåœ¨æ‰€æœ‰
+支æŒçš„ GCC 版本都支æŒè¿™äº›åŠŸèƒ½åŽï¼Œå®ƒä»¬ä¼šè¢«ä»Žå†…核中移除。
+
+具体æ¥è¯´ï¼Œæ–°æ’件应该åªå®žçŽ°ä¸Šæ¸¸ç¼–è¯‘å™¨ï¼ˆGCC å’Œ Clangï¼‰ä¸æ”¯æŒçš„功能。
+
+当 Clang 中存在 GCC 中ä¸å­˜åœ¨çš„æŸé¡¹åŠŸèƒ½æ—¶ï¼Œåº”åŠªåŠ›å°†è¯¥åŠŸèƒ½åšåˆ° GCC 上游(而ä¸ä»…ä»…
+是作为内核专用的 GCC æ’件),以使整个生æ€éƒ½èƒ½ä»Žä¸­å—益。
+
+类似的,如果 GCC æ’ä»¶æä¾›çš„功能在 Clang 中 **ä¸** å­˜åœ¨ï¼Œä½†è¯¥åŠŸèƒ½è¢«è¯æ˜Žæ˜¯æœ‰ç”¨çš„,也应
+努力将该功能上传到 GCC(和 Clang)。
+
+在上游 GCC æä¾›äº†æŸé¡¹åŠŸèƒ½åŽï¼Œè¯¥æ’件将无法在相应的 GCC ç‰ˆæœ¬ï¼ˆä»¥åŠæ›´é«˜ç‰ˆæœ¬ï¼‰ä¸‹ç¼–译。
+一旦所有内核支æŒçš„ GCC 版本都æä¾›äº†è¯¥åŠŸèƒ½ï¼Œè¯¥æ’件将从内核中移除。
+
+
+文件
+====
+
+**$(src)/scripts/gcc-plugins**
+
+ 这是 GCC æ’件的目录。
+
+**$(src)/scripts/gcc-plugins/gcc-common.h**
+
+ 这是 GCC æ’件的兼容性头文件。
+ 应始终包å«å®ƒï¼Œè€Œä¸æ˜¯å•独的 GCC 头文件。
+
+**$(src)/scripts/gcc-plugins/gcc-generate-gimple-pass.h,
+$(src)/scripts/gcc-plugins/gcc-generate-ipa-pass.h,
+$(src)/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h,
+$(src)/scripts/gcc-plugins/gcc-generate-rtl-pass.h**
+
+ 这些头文件å¯ä»¥è‡ªåŠ¨ç”Ÿæˆ GIMPLEã€SIMPLE_IPAã€IPA å’Œ RTL passes 的注册结构。
+ ä¸Žæ‰‹åŠ¨åˆ›å»ºç»“æž„ç›¸æ¯”ï¼Œå®ƒä»¬æ›´å—æ¬¢è¿Žã€‚
+
+
+用法
+====
+
+你必须为你的 GCC 版本安装 GCC æ’件头文件,以 Ubuntu 上的 gcc-10 为例::
+
+ apt-get install gcc-10-plugin-dev
+
+或者在 Fedora 上::
+
+ dnf install gcc-plugin-devel libmpc-devel
+
+或者在 Fedora ä¸Šä½¿ç”¨åŒ…å«æ’件的交å‰ç¼–译器时::
+
+ dnf install libmpc-devel
+
+在内核é…置中å¯ç”¨ GCC æ’件基础设施与一些你想使用的æ’ä»¶::
+
+ CONFIG_GCC_PLUGINS=y
+ CONFIG_GCC_PLUGIN_LATENT_ENTROPY=y
+ ...
+
+è¿è¡Œ gcc(本地或交å‰ç¼–译器),确ä¿èƒ½å¤Ÿæ£€æµ‹åˆ°æ’件头文件::
+
+ gcc -print-file-name=plugin
+ CROSS_COMPILE=arm-linux-gnu- ${CROSS_COMPILE}gcc -print-file-name=plugin
+
+"plugin" è¿™ä¸ªè¯æ„味ç€å®ƒä»¬æ²¡æœ‰è¢«æ£€æµ‹åˆ°::
+
+ plugin
+
+完整的路径则表示æ’ä»¶å·²ç»è¢«æ£€æµ‹åˆ°::
+
+ /usr/lib/gcc/x86_64-redhat-linux/12/plugin
+
+编译包括æ’件在内的最å°å·¥å…·é›†::
+
+ make scripts
+
+或者直接在内核中è¿è¡Œ makeï¼Œä½¿ç”¨å¾ªçŽ¯å¤æ‚性 GCC æ’件编译整个内核。
+
+
+4. 如何添加新的 GCC æ’ä»¶
+========================
+
+GCC æ’ä»¶ä½äºŽ scripts/gcc-plugins/。你需è¦å°†æ’ä»¶æºæ–‡ä»¶æ”¾åœ¨ scripts/gcc-plugins/ 目录下。
+å­ç›®å½•åˆ›å»ºå¹¶ä¸æ”¯æŒï¼Œä½ å¿…须添加在 scripts/gcc-plugins/Makefileã€scripts/Makefile.gcc-plugins
+和相关的 Kconfig 文件中。
diff --git a/Documentation/translations/zh_CN/kbuild/headers_install.rst b/Documentation/translations/zh_CN/kbuild/headers_install.rst
new file mode 100644
index 000000000000..02cb8896e555
--- /dev/null
+++ b/Documentation/translations/zh_CN/kbuild/headers_install.rst
@@ -0,0 +1,39 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/kbuild/headers_install.rst
+:Translator: 慕冬亮 Dongliang Mu <dzm91@hust.edu.cn>
+
+============================
+导出内核头文件供用户空间使用
+============================
+
+"make headers_install" 命令以适åˆäºŽç”¨æˆ·ç©ºé—´ç¨‹åºçš„å½¢å¼å¯¼å‡ºå†…核头文件。
+
+Linux 内核导出的头文件æè¿°äº†ç”¨æˆ·ç©ºé—´ç¨‹åºå°è¯•使用内核æœåŠ¡çš„ API。这些内核
+头文件被系统的 C 库(例如 glibc å’Œ uClibc)用于定义å¯ç”¨çš„系统调用,以åŠ
+与这些系统调用一起使用的常é‡å’Œç»“构。C 库的头文件包括æ¥è‡ª linux å­ç›®å½•çš„
+内核头文件。系统的 libc 头文件通常被安装在默认ä½ç½® /usr/include,而内核
+头文件在该ä½ç½®çš„å­ç›®å½•ä¸­ï¼ˆä¸»è¦æ˜¯ /usr/include/linux å’Œ /usr/include/asm)。
+
+内核头文件å‘åŽå…¼å®¹ï¼Œä½†ä¸å‘å‰å…¼å®¹ã€‚è¿™æ„味ç€ä½¿ç”¨æ—§å†…核头文件的 C 库构建的程åº
+å¯ä»¥åœ¨æ–°å†…核上è¿è¡Œï¼ˆå°½ç®¡å®ƒå¯èƒ½æ— æ³•访问新特性),但使用新内核头文件构建的程åº
+å¯èƒ½æ— æ³•在旧内核上è¿è¡Œã€‚
+
+"make headers_install" 命令å¯ä»¥åœ¨å†…æ ¸æºä»£ç çš„顶层目录中è¿è¡Œï¼ˆæˆ–使用标准
+的树外构建)。它接å—两个å¯é€‰å‚æ•°::
+
+ make headers_install ARCH=i386 INSTALL_HDR_PATH=/usr
+
+ARCH 表明为其生æˆå¤´æ–‡ä»¶çš„æž¶æž„ï¼Œé»˜è®¤ä¸ºå½“å‰æž¶æž„。导出内核头文件的 linux/asm
+目录是基于特定平å°çš„ï¼Œè¦æŸ¥çœ‹æ”¯æŒæž¶æž„的完整列表,使用以下命令::
+
+ ls -d include/asm-* | sed 's/.*-//'
+
+INSTALL_HDR_PATH 表明头文件的安装ä½ç½®ï¼Œé»˜è®¤ä¸º "./usr"。
+
+该命令会在 INSTALL_HDR_PATH 中自动创建创建一个 'include' 目录,而头文件
+会被安装在 INSTALL_HDR_PATH/include 中。
+
+内核头文件导出的基础设施由 David Woodhouse <dwmw2@infradead.org> 维护。
diff --git a/Documentation/translations/zh_CN/kbuild/index.rst b/Documentation/translations/zh_CN/kbuild/index.rst
new file mode 100644
index 000000000000..b51655d981f6
--- /dev/null
+++ b/Documentation/translations/zh_CN/kbuild/index.rst
@@ -0,0 +1,35 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/kbuild/index.rst
+:Translator: 慕冬亮 Dongliang Mu <dzm91@hust.edu.cn>
+
+============
+内核编译系统
+============
+
+.. toctree::
+ :maxdepth: 1
+
+ headers_install
+ gcc-plugins
+
+TODO:
+
+- kconfig-language
+- kconfig-macro-language
+- kbuild
+- kconfig
+- makefiles
+- modules
+- issues
+- reproducible-builds
+- llvm
+
+.. only:: subproject and html
+
+ 目录
+ =====
+
+ * :ref:`genindex`
diff --git a/Documentation/translations/zh_CN/mm/page_migration.rst b/Documentation/translations/zh_CN/mm/page_migration.rst
index f95063826a15..8c8461c6cb9f 100644
--- a/Documentation/translations/zh_CN/mm/page_migration.rst
+++ b/Documentation/translations/zh_CN/mm/page_migration.rst
@@ -50,8 +50,8 @@ mbind()设置一个新的内存策略。一个进程的页é¢ä¹Ÿå¯ä»¥é€šè¿‡sys_
1. 从LRU中移除页é¢ã€‚
- è¦è¿ç§»çš„页é¢åˆ—表是通过扫æé¡µé¢å¹¶æŠŠå®ƒä»¬ç§»åˆ°åˆ—表中æ¥ç”Ÿæˆçš„。这是通过调用 isolate_lru_page()
- æ¥å®Œæˆçš„。调用isolate_lru_page()增加了对该页的引用,这样在页é¢è¿ç§»å‘生时它就ä¸ä¼š
+ è¦è¿ç§»çš„页é¢åˆ—表是通过扫æé¡µé¢å¹¶æŠŠå®ƒä»¬ç§»åˆ°åˆ—表中æ¥ç”Ÿæˆçš„。这是通过调用 folio_isolate_lru()
+ æ¥å®Œæˆçš„。调用folio_isolate_lru()增加了对该页的引用,这样在页é¢è¿ç§»å‘生时它就ä¸ä¼š
消失。它还å¯ä»¥é˜²æ­¢äº¤æ¢å™¨æˆ–其他扫æå™¨é‡åˆ°è¯¥é¡µã€‚
@@ -65,7 +65,7 @@ migrate_pages()如何工作
=======================
migrate_pages()对它的页é¢åˆ—表进行了多次处ç†ã€‚如果当时对一个页é¢çš„æ‰€æœ‰å¼•用都å¯ä»¥è¢«ç§»é™¤ï¼Œ
-那么这个页é¢å°±ä¼šè¢«ç§»åŠ¨ã€‚è¯¥é¡µå·²ç»é€šè¿‡isolate_lru_page()从LRU中移除,并且refcount被
+那么这个页é¢å°±ä¼šè¢«ç§»åŠ¨ã€‚è¯¥é¡µå·²ç»é€šè¿‡folio_isolate_lru()从LRU中移除,并且refcount被
增加,以便在页é¢è¿ç§»å‘生时ä¸é‡Šæ”¾è¯¥é¡µã€‚
步骤:
diff --git a/Documentation/translations/zh_CN/process/index.rst b/Documentation/translations/zh_CN/process/index.rst
index 5a5cd7c01c62..3bcb3bdaf533 100644
--- a/Documentation/translations/zh_CN/process/index.rst
+++ b/Documentation/translations/zh_CN/process/index.rst
@@ -49,10 +49,11 @@ TODOLIST:
embargoed-hardware-issues
cve
+ security-bugs
TODOLIST:
-* security-bugs
+* handling-regressions
其它大多数开å‘人员感兴趣的社区指å—:
diff --git a/Documentation/translations/zh_CN/admin-guide/security-bugs.rst b/Documentation/translations/zh_CN/process/security-bugs.rst
index d6b8f8a4e7f6..a8f5fcbfadc9 100644
--- a/Documentation/translations/zh_CN/admin-guide/security-bugs.rst
+++ b/Documentation/translations/zh_CN/process/security-bugs.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
.. include:: ../disclaimer-zh_CN.rst
:Original: :doc:`../../../process/security-bugs`
@@ -5,6 +7,7 @@
:译者:
å´æƒ³æˆ Wu XiangCheng <bobwxc@email.cn>
+ 慕冬亮 Dongliang Mu <dzm91@hust.edu.cn>
安全缺陷
=========
@@ -17,13 +20,13 @@ Linux内核开å‘人员éžå¸¸é‡è§†å®‰å…¨æ€§ã€‚因此我们想知é“何时å‘现
å¯ä»¥é€šè¿‡ç”µå­é‚®ä»¶<security@kernel.org>è”ç³»Linux内核安全团队。这是一个安全人员
çš„ç§æœ‰åˆ—表,他们将帮助验è¯é”™è¯¯æŠ¥å‘Šå¹¶å¼€å‘å’Œå‘布修å¤ç¨‹åºã€‚å¦‚æžœæ‚¨å·²ç»æœ‰äº†ä¸€ä¸ª
-ä¿®å¤ï¼Œè¯·å°†å…¶åŒ…å«åœ¨æ‚¨çš„æŠ¥å‘Šä¸­ï¼Œè¿™æ ·å¯ä»¥å¤§å¤§åŠ å¿«è¿›ç¨‹ã€‚å®‰å…¨å›¢é˜Ÿå¯èƒ½ä¼šä»ŽåŒºåŸŸç»´æŠ¤
+ä¿®å¤ï¼Œè¯·å°†å…¶åŒ…å«åœ¨æ‚¨çš„æŠ¥å‘Šä¸­ï¼Œè¿™æ ·å¯ä»¥å¤§å¤§åŠ å¿«å¤„ç†è¿›ç¨‹ã€‚安全团队å¯èƒ½ä¼šä»ŽåŒºåŸŸç»´æŠ¤
人员那里获得é¢å¤–的帮助,以ç†è§£å’Œä¿®å¤å®‰å…¨æ¼æ´žã€‚
与任何缺陷一样,æä¾›çš„ä¿¡æ¯è¶Šå¤šï¼Œè¯Šæ–­å’Œä¿®å¤å°±è¶Šå®¹æ˜“ã€‚å¦‚æžœæ‚¨ä¸æ¸…æ¥šå“ªäº›ä¿¡æ¯æœ‰ç”¨ï¼Œ
请查看“Documentation/translations/zh_CN/admin-guide/reporting-issues.rstâ€ä¸­
-æ¦‚è¿°çš„æ­¥éª¤ã€‚ä»»ä½•åˆ©ç”¨æ¼æ´žçš„æ”»å‡»ä»£ç éƒ½éžå¸¸æœ‰ç”¨ï¼Œæœªç»æŠ¥å‘Šè€…åŒæ„ä¸ä¼šå¯¹å¤–å‘布,除
-éžå·²ç»å…¬å¼€ã€‚
+æ¦‚è¿°çš„æ­¥éª¤ã€‚ä»»ä½•åˆ©ç”¨æ¼æ´žçš„æ”»å‡»ä»£ç éƒ½éžå¸¸æœ‰ç”¨ï¼Œæœªç»æŠ¥å‘Šè€…åŒæ„ä¸ä¼šå¯¹å¤–å‘布,
+除éžå·²ç»å…¬å¼€ã€‚
请尽å¯èƒ½å‘逿— é™„件的纯文本电å­é‚®ä»¶ã€‚如果所有的细节都è—在附件里,那么就很难对
ä¸€ä¸ªå¤æ‚的问题进行上下文引用的讨论。把它想象æˆä¸€ä¸ª
@@ -49,24 +52,31 @@ Linux内核开å‘人员éžå¸¸é‡è§†å®‰å…¨æ€§ã€‚因此我们想知é“何时å‘现
æ¢å¥è¯è¯´ï¼Œæˆ‘们唯一感兴趣的是修å¤ç¼ºé™·ã€‚æäº¤ç»™å®‰å…¨åˆ—表的所有其他资料以åŠå¯¹æŠ¥å‘Š
的任何åŽç»­è®¨è®ºï¼Œå³ä½¿åœ¨è§£é™¤é™åˆ¶ä¹‹åŽï¼Œä¹Ÿå°†æ°¸ä¹…ä¿å¯†ã€‚
-åè°ƒ
-------
+与其他团队åè°ƒ
+--------------
+
+è™½ç„¶å†…æ ¸å®‰å…¨å›¢é˜Ÿä»…å…³æ³¨ä¿®å¤æ¼æ´žï¼Œä½†è¿˜æœ‰å…¶ä»–组织关注修å¤å‘行版上的安全问题以åŠåè°ƒ
+æ“ä½œç³»ç»ŸåŽ‚å•†çš„æ¼æ´žæŠ«éœ²ã€‚å调通常由 "linux-distros" 邮件列表处ç†ï¼Œè€ŒæŠ«éœ²åˆ™ç”±
+公共 "oss-security" 邮件列表进行。两者紧密关è”且被展示在 linux-distros 维基:
+<https://oss-security.openwall.org/wiki/mailing-lists/distros>
+
+请注æ„,这三个列表的å„自政策和规则是ä¸åŒçš„,因为它们追求ä¸åŒçš„目标。内核安全团队
+与其他团队之间的å调很困难,因为对于内核安全团队,ä¿å¯†æœŸï¼ˆå³æœ€å¤§å…许天数)是从补ä¸
+å¯ç”¨æ—¶å¼€å§‹ï¼Œè€Œ "linux-distros" 则从首次å‘布到列表时开始计算,无论是å¦å­˜åœ¨è¡¥ä¸ã€‚
-å¯¹æ•æ„Ÿç¼ºé™·ï¼ˆä¾‹å¦‚那些å¯èƒ½å¯¼è‡´æƒé™æå‡çš„缺陷)的修å¤å¯èƒ½éœ€è¦ä¸Žç§æœ‰é‚®ä»¶åˆ—表
-<linux-distros@vs.openwall.org>进行å调,以便分å‘供应商åšå¥½å‡†å¤‡ï¼Œåœ¨å…¬å¼€æŠ«éœ²
-ä¸Šæ¸¸è¡¥ä¸æ—¶å‘布一个已修å¤çš„内核。å‘行版将需è¦ä¸€äº›æ—¶é—´æ¥æµ‹è¯•建议的补ä¸ï¼Œé€šå¸¸
-ä¼šè¦æ±‚至少几天的é™åˆ¶ï¼Œè€Œä¾›åº”商更新å‘布更倾å‘于周二至周四。若åˆé€‚,安全团队
-å¯ä»¥å助这ç§å调,或者报告者å¯ä»¥ä»Žä¸€å¼€å§‹å°±åŒ…括linuxå‘è¡Œç‰ˆã€‚åœ¨è¿™ç§æƒ…况下,请
-è®°ä½åœ¨ç”µå­é‚®ä»¶ä¸»é¢˜è¡Œå‰é¢åŠ ä¸Šâ€œ[vs]â€ï¼Œå¦‚linuxå‘行版wiki中所述:
-<http://oss-security.openwall.org/wiki/mailing-lists/distros#how-to-use-the-lists>。
+å› æ­¤ï¼Œå†…æ ¸å®‰å…¨å›¢é˜Ÿå¼ºçƒˆå»ºè®®ï¼Œä½œä¸ºä¸€ä½æ½œåœ¨å®‰å…¨é—®é¢˜çš„æŠ¥å‘Šè€…,在å—å½±å“代ç çš„维护者
+接å—è¡¥ä¸ä¹‹å‰ï¼Œä¸”在您阅读上述å‘行版维基页é¢å¹¶å®Œå…¨ç†è§£è”ç³» "linux-distros"
+é‚®ä»¶åˆ—è¡¨ä¼šå¯¹æ‚¨å’Œå†…æ ¸ç¤¾åŒºæ–½åŠ çš„è¦æ±‚之å‰ï¼Œä¸è¦è”ç³» "linux-distros" 邮件列表。
+这也æ„味ç€é€šå¸¸æƒ…况下ä¸è¦åŒæ—¶æŠ„é€ä¸¤ä¸ªé‚®ä»¶åˆ—表,除éžåœ¨å调时有已接å—但尚未åˆå¹¶çš„è¡¥ä¸ã€‚
+æ¢å¥è¯è¯´ï¼Œåœ¨è¡¥ä¸è¢«æŽ¥å—之å‰ï¼Œä¸è¦æŠ„é€ "linux-distros";在修å¤ç¨‹åºè¢«åˆå¹¶ä¹‹åŽï¼Œ
+ä¸è¦æŠ„é€å†…核安全团队。
CVE分é…
--------
-安全团队通常ä¸åˆ†é…CVE,我们也ä¸éœ€è¦å®ƒä»¬æ¥è¿›è¡ŒæŠ¥å‘Šæˆ–ä¿®å¤ï¼Œå› ä¸ºè¿™ä¼šä½¿è¿‡ç¨‹ä¸å¿…
-è¦çš„夿‚化,并å¯èƒ½è€½è¯¯ç¼ºé™·å¤„ç†ã€‚如果报告者希望在公开披露之å‰åˆ†é…一个CVEç¼–å·ï¼Œ
-他们需è¦è”ç³»ä¸Šè¿°çš„ç§æœ‰linux-distros列表。当在æä¾›è¡¥ä¸ä¹‹å‰å·²æœ‰è¿™æ ·çš„CVEç¼–å·æ—¶ï¼Œ
-如报告者愿æ„,最好在æäº¤æ¶ˆæ¯ä¸­æåŠå®ƒã€‚
+安全团队ä¸åˆ†é… CVEï¼ŒåŒæ—¶æˆ‘们也ä¸éœ€è¦ CVE æ¥æŠ¥å‘Šæˆ–ä¿®å¤æ¼æ´žï¼Œå› ä¸ºè¿™ä¼šä½¿è¿‡ç¨‹ä¸å¿…è¦
+çš„å¤æ‚化,并å¯èƒ½å»¶è¯¯æ¼æ´žå¤„ç†ã€‚如果报告者希望为确认的问题分é…一个 CVE ç¼–å·ï¼Œ
+å¯ä»¥è”ç³» :doc:`内核 CVE 分é…团队 <../process/cve>` 获å–。
ä¿å¯†åè®®
---------
diff --git a/Documentation/translations/zh_CN/process/submitting-patches.rst b/Documentation/translations/zh_CN/process/submitting-patches.rst
index 7864107e60a8..7ca16bda3709 100644
--- a/Documentation/translations/zh_CN/process/submitting-patches.rst
+++ b/Documentation/translations/zh_CN/process/submitting-patches.rst
@@ -208,7 +208,7 @@ torvalds@linux-foundation.org 。他收到的邮件很多,所以一般æ¥è¯´æœ
如果您有修å¤å¯åˆ©ç”¨å®‰å…¨æ¼æ´žçš„è¡¥ä¸ï¼Œè¯·å°†è¯¥è¡¥ä¸å‘é€åˆ° security@kernel.org 。对于
严é‡çš„bug,å¯ä»¥è€ƒè™‘短期ç¦ä»¤ä»¥å…许分销商(有时间)å‘用户å‘布补ä¸ï¼›åœ¨è¿™ç§æƒ…况下,
显然ä¸åº”将补ä¸å‘é€åˆ°ä»»ä½•公共列表。
-å‚è§ Documentation/translations/zh_CN/admin-guide/security-bugs.rst 。
+å‚è§ Documentation/translations/zh_CN/process/security-bugs.rst 。
ä¿®å¤å·²å‘布内核中严é‡é”™è¯¯çš„è¡¥ä¸ç¨‹åºåº”该抄é€ç»™ç¨³å®šç‰ˆç»´æŠ¤äººå‘˜ï¼Œæ–¹æ³•是把以下列行
放进补ä¸çš„签准区(注æ„ï¼Œä¸æ˜¯ç”µå­é‚®ä»¶æ”¶ä»¶äººï¼‰::
diff --git a/Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst b/Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst
index 1822956be0e0..57d36bfbb1b3 100644
--- a/Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst
+++ b/Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst
@@ -15,7 +15,7 @@
æœ¬æ–‡é€šéŽæ¼”示DAMON的默èªç”¨æˆ¶ç©ºé–“工具,簡è¦åœ°ä»‹ç´¹çž­å¦‚何使用DAMON。請注æ„,爲了簡潔
èµ·è¦‹ï¼Œæœ¬æ–‡æª”åªæè¿°äº†å®ƒçš„éƒ¨åˆ†åŠŸèƒ½ã€‚æ›´å¤šç´°ç¯€è«‹åƒè€ƒè©²å·¥å…·çš„使用文檔。
-`doc <https://github.com/awslabs/damo/blob/next/USAGE.md>`_ .
+`doc <https://github.com/damonitor/damo/blob/next/USAGE.md>`_ .
剿æ¢ä»¶
@@ -31,7 +31,7 @@
------------
在演示中,我們將使用DAMON的默èªç”¨æˆ¶ç©ºé–“工具,稱爲DAMON Operator(DAMO)。它å¯ä»¥åœ¨
-https://github.com/awslabs/damo找到。下é¢çš„例å­å‡è¨­DAMO在你的$PATH上。當然,但
+https://github.com/damonitor/damo找到。下é¢çš„例å­å‡è¨­DAMO在你的$PATH上。當然,但
這䏦䏿˜¯å¼·åˆ¶æ€§çš„。
因爲DAMO使用了DAMONçš„sysfs接å£ï¼ˆè©³æƒ…è«‹åƒè€ƒ:doc:`usage`),你應該確ä¿
diff --git a/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst b/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst
index 7464279f9b7d..fbbbbad59ee4 100644
--- a/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst
+++ b/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst
@@ -16,16 +16,16 @@
DAMON 爲ä¸åŒçš„用戶æä¾›äº†ä¸‹é¢é€™äº›æŽ¥å£ã€‚
- *DAMON用戶空間工具。*
- `這 <https://github.com/awslabs/damo>`_ 爲有這特權的人, 如系統管ç†å“¡ï¼Œå¸Œæœ›æœ‰ä¸€å€‹å‰›å¥½
+ `這 <https://github.com/damonitor/damo>`_ 爲有這特權的人, 如系統管ç†å“¡ï¼Œå¸Œæœ›æœ‰ä¸€å€‹å‰›å¥½
å¯ä»¥å·¥ä½œçš„人性化界é¢ã€‚
使用它,用戶å¯ä»¥ä»¥äººæ€§åŒ–的方å¼ä½¿ç”¨DAMON的主è¦åŠŸèƒ½ã€‚ä¸éŽï¼Œå®ƒå¯èƒ½ä¸æœƒçˆ²ç‰¹æ®Šæƒ…æ³é€²è¡Œé«˜åº¦èª¿æ•´ã€‚
å®ƒåŒæ™‚支æŒè™›æ“¬å’Œç‰©ç†åœ°å€ç©ºé–“的監測。更多細節,請åƒè€ƒå®ƒçš„ `使用文檔
- <https://github.com/awslabs/damo/blob/next/USAGE.md>`_。
+ <https://github.com/damonitor/damo/blob/next/USAGE.md>`_。
- *sysfs接å£ã€‚*
:ref:`這 <sysfs_interface>` 是爲那些希望更高級的使用DAMON的特權用戶空間程åºå“¡æº–備的。
使用它,用戶å¯ä»¥é€šéŽè®€å–和寫入特殊的sysfs文件來使用DAMON的主è¦åŠŸèƒ½ã€‚å› æ­¤ï¼Œä½ å¯ä»¥ç·¨å¯«å’Œä½¿
用你個性化的DAMON sysfs包è£ç¨‹åºï¼Œä»£æ›¿ä½ è®€/寫sysfs文件。 `DAMON用戶空間工具
- <https://github.com/awslabs/damo>`_ 就是這種程åºçš„ä¸€å€‹ä¾‹å­ å®ƒåŒæ™‚支æŒè™›æ“¬å’Œç‰©ç†åœ°å€
+ <https://github.com/damonitor/damo>`_ 就是這種程åºçš„ä¸€å€‹ä¾‹å­ å®ƒåŒæ™‚支æŒè™›æ“¬å’Œç‰©ç†åœ°å€
空間的監測。注æ„,這個界é¢åªæä¾›ç°¡å–®çš„ç›£æ¸¬çµæžœ :ref:`統計 <damos_stats>`ã€‚å°æ–¼è©³ç´°çš„監測
çµæžœï¼ŒDAMONæä¾›äº†ä¸€å€‹:ref:`跟蹤點 <tracepoint>`。
- *debugfs interface.*
@@ -332,7 +332,7 @@ tried_regions/<N>/
# echo 500 > watermarks/mid
# echo 300 > watermarks/low
-請注æ„,我們強烈建議使用用戶空間的工具,如 `damo <https://github.com/awslabs/damo>`_ ,
+請注æ„,我們強烈建議使用用戶空間的工具,如 `damo <https://github.com/damonitor/damo>`_ ,
è€Œä¸æ˜¯åƒä¸Šé¢é‚£æ¨£æ‰‹å‹•è®€å¯«æ–‡ä»¶ã€‚ä»¥ä¸Šåªæ˜¯ä¸€å€‹ä¾‹å­ã€‚
debugfs接å£
diff --git a/Documentation/translations/zh_TW/admin-guide/reporting-issues.rst b/Documentation/translations/zh_TW/admin-guide/reporting-issues.rst
index bc132b25f2ae..1d4e4c7a6750 100644
--- a/Documentation/translations/zh_TW/admin-guide/reporting-issues.rst
+++ b/Documentation/translations/zh_TW/admin-guide/reporting-issues.rst
@@ -301,7 +301,7 @@ Documentation/admin-guide/reporting-regressions.rst å°æ­¤é€²è¡Œäº†æ›´è©³ç´°çš„è
添加到迴歸跟蹤列表中,以確ä¿å®ƒä¸æœƒè¢«å¿½ç•¥ã€‚
什麼是安全å•題留給您自己判斷。在繼續之å‰ï¼Œè«‹è€ƒæ…®é–±è®€
-Documentation/translations/zh_CN/admin-guide/security-bugs.rst ,
+Documentation/translations/zh_CN/process/security-bugs.rst ,
因爲它æä¾›çž­å¦‚何最æ°ç•¶åœ°è™•ç†å®‰å…¨å•題的é¡å¤–細節。
當發生了完全無法接å—的糟糕事情時,此å•題就是一個“éžå¸¸åš´é‡çš„å•題â€ã€‚例如,
@@ -984,7 +984,7 @@ Documentation/admin-guide/reporting-regressions.rst ;它還æä¾›äº†å¤§é‡å…¶ä
報告,請將報告的文本轉發到這些地å€ï¼›ä½†è«‹åœ¨å ±å‘Šçš„頂部加上註釋,表明您æäº¤äº†
å ±å‘Šï¼Œä¸¦é™„ä¸Šå·¥å–®éˆæŽ¥ã€‚
-更多信æ¯è«‹åƒè¦‹ Documentation/translations/zh_CN/admin-guide/security-bugs.rst 。
+更多信æ¯è«‹åƒè¦‹ Documentation/translations/zh_CN/process/security-bugs.rst 。
發佈報告後的責任
diff --git a/Documentation/translations/zh_TW/process/submitting-patches.rst b/Documentation/translations/zh_TW/process/submitting-patches.rst
index f12f2f193f85..64de92c07906 100644
--- a/Documentation/translations/zh_TW/process/submitting-patches.rst
+++ b/Documentation/translations/zh_TW/process/submitting-patches.rst
@@ -209,7 +209,7 @@ torvalds@linux-foundation.org 。他收到的郵件很多,所以一般來說æœ
如果您有修復å¯åˆ©ç”¨å®‰å…¨æ¼æ´žçš„補ä¸ï¼Œè«‹å°‡è©²è£œä¸ç™¼é€åˆ° security@kernel.org ã€‚å°æ–¼
åš´é‡çš„bug,å¯ä»¥è€ƒæ…®çŸ­æœŸç¦ä»¤ä»¥å…許分銷商(有時間)å‘用戶發佈補ä¸ï¼›åœ¨é€™ç¨®æƒ…æ³ä¸‹ï¼Œ
é¡¯ç„¶ä¸æ‡‰å°‡è£œä¸ç™¼é€åˆ°ä»»ä½•公共列表。
-åƒè¦‹ Documentation/translations/zh_CN/admin-guide/security-bugs.rst 。
+åƒè¦‹ Documentation/translations/zh_CN/process/security-bugs.rst 。
修復已發佈內核中嚴é‡éŒ¯èª¤çš„補ä¸ç¨‹åºæ‡‰è©²æŠ„é€çµ¦ç©©å®šç‰ˆç¶­è­·äººå“¡ï¼Œæ–¹æ³•是把以下列行
放進補ä¸çš„籤準å€ï¼ˆæ³¨æ„ï¼Œä¸æ˜¯é›»å­éƒµä»¶æ”¶ä»¶äººï¼‰::
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index e91c0376ee59..e4be1378ba26 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -78,6 +78,7 @@ Code Seq# Include File Comments
0x03 all linux/hdreg.h
0x04 D2-DC linux/umsdos_fs.h Dead since 2.6.11, but don't reuse these.
0x06 all linux/lp.h
+0x07 9F-D0 linux/vmw_vmci_defs.h, uapi/linux/vm_sockets.h
0x09 all linux/raid/md_u.h
0x10 00-0F drivers/char/s390/vmcp.h
0x10 10-1F arch/s390/include/uapi/sclp_ctl.h
@@ -292,6 +293,7 @@ Code Seq# Include File Comments
't' 80-8F linux/isdn_ppp.h
't' 90-91 linux/toshiba.h toshiba and toshiba_acpi SMM
'u' 00-1F linux/smb_fs.h gone
+'u' 00-2F linux/ublk_cmd.h conflict!
'u' 20-3F linux/uvcvideo.h USB video class host driver
'u' 40-4f linux/udmabuf.h userspace dma-buf misc device
'v' 00-1F linux/ext2_fs.h conflict!
diff --git a/Documentation/userspace-api/landlock.rst b/Documentation/userspace-api/landlock.rst
index 37dafce8038b..c8d3e46badc5 100644
--- a/Documentation/userspace-api/landlock.rst
+++ b/Documentation/userspace-api/landlock.rst
@@ -8,7 +8,7 @@ Landlock: unprivileged access control
=====================================
:Author: Mickaël Salaün
-:Date: July 2024
+:Date: September 2024
The goal of Landlock is to enable to restrict ambient rights (e.g. global
filesystem or network access) for a set of processes. Because Landlock
@@ -81,6 +81,9 @@ to be explicit about the denied-by-default access rights.
.handled_access_net =
LANDLOCK_ACCESS_NET_BIND_TCP |
LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .scoped =
+ LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET |
+ LANDLOCK_SCOPE_SIGNAL,
};
Because we may not know on which kernel version an application will be
@@ -119,6 +122,11 @@ version, and only use the available subset of access rights:
case 4:
/* Removes LANDLOCK_ACCESS_FS_IOCTL_DEV for ABI < 5 */
ruleset_attr.handled_access_fs &= ~LANDLOCK_ACCESS_FS_IOCTL_DEV;
+ __attribute__((fallthrough));
+ case 5:
+ /* Removes LANDLOCK_SCOPE_* for ABI < 6 */
+ ruleset_attr.scoped &= ~(LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET |
+ LANDLOCK_SCOPE_SIGNAL);
}
This enables to create an inclusive ruleset that will contain our rules.
@@ -306,6 +314,38 @@ To be allowed to use :manpage:`ptrace(2)` and related syscalls on a target
process, a sandboxed process should have a subset of the target process rules,
which means the tracee must be in a sub-domain of the tracer.
+IPC scoping
+-----------
+
+Similar to the implicit `Ptrace restrictions`_, we may want to further restrict
+interactions between sandboxes. Each Landlock domain can be explicitly scoped
+for a set of actions by specifying it on a ruleset. For example, if a
+sandboxed process should not be able to :manpage:`connect(2)` to a
+non-sandboxed process through abstract :manpage:`unix(7)` sockets, we can
+specify such restriction with ``LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET``.
+Moreover, if a sandboxed process should not be able to send a signal to a
+non-sandboxed process, we can specify this restriction with
+``LANDLOCK_SCOPE_SIGNAL``.
+
+A sandboxed process can connect to a non-sandboxed process when its domain is
+not scoped. If a process's domain is scoped, it can only connect to sockets
+created by processes in the same scope.
+Moreover, If a process is scoped to send signal to a non-scoped process, it can
+only send signals to processes in the same scope.
+
+A connected datagram socket behaves like a stream socket when its domain is
+scoped, meaning if the domain is scoped after the socket is connected , it can
+still :manpage:`send(2)` data just like a stream socket. However, in the same
+scenario, a non-connected datagram socket cannot send data (with
+:manpage:`sendto(2)`) outside its scope.
+
+A process with a scoped domain can inherit a socket created by a non-scoped
+process. The process cannot connect to this socket since it has a scoped
+domain.
+
+IPC scoping does not support exceptions, so if a domain is scoped, no rules can
+be added to allow access to resources or processes outside of the scope.
+
Truncating files
----------------
@@ -404,7 +444,7 @@ Access rights
-------------
.. kernel-doc:: include/uapi/linux/landlock.h
- :identifiers: fs_access net_access
+ :identifiers: fs_access net_access scope
Creating a new ruleset
----------------------
@@ -541,6 +581,20 @@ earlier ABI.
Starting with the Landlock ABI version 5, it is possible to restrict the use of
:manpage:`ioctl(2)` using the new ``LANDLOCK_ACCESS_FS_IOCTL_DEV`` right.
+Abstract UNIX socket scoping (ABI < 6)
+--------------------------------------
+
+Starting with the Landlock ABI version 6, it is possible to restrict
+connections to an abstract :manpage:`unix(7)` socket by setting
+``LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET`` to the ``scoped`` ruleset attribute.
+
+Signal scoping (ABI < 6)
+------------------------
+
+Starting with the Landlock ABI version 6, it is possible to restrict
+:manpage:`signal(7)` sending by setting ``LANDLOCK_SCOPE_SIGNAL`` to the
+``scoped`` ruleset attribute.
+
.. _kernel_support:
Kernel support
diff --git a/Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst b/Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst
index d5e014ce19b5..1d5248979a6d 100644
--- a/Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst
+++ b/Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst
@@ -137,6 +137,12 @@ returns the information to the application. The ioctl never fails.
- 0x00000100
- If this capability is set, then :ref:`CEC_ADAP_G_CONNECTOR_INFO` can
be used.
+ * .. _`CEC-CAP-REPLY-VENDOR-ID`:
+
+ - ``CEC_CAP_REPLY_VENDOR_ID``
+ - 0x00000200
+ - If this capability is set, then
+ :ref:`CEC_MSG_FL_REPLY_VENDOR_ID <cec-msg-flags>` can be used.
Return Value
============
diff --git a/Documentation/userspace-api/media/cec/cec-ioc-receive.rst b/Documentation/userspace-api/media/cec/cec-ioc-receive.rst
index 364938ad34df..3e6c511e054f 100644
--- a/Documentation/userspace-api/media/cec/cec-ioc-receive.rst
+++ b/Documentation/userspace-api/media/cec/cec-ioc-receive.rst
@@ -232,6 +232,21 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV').
capability. If that is not set, then the ``EPERM`` error code is
returned.
+ * .. _`CEC-MSG-FL-REPLY-VENDOR-ID`:
+
+ - ``CEC_MSG_FL_REPLY_VENDOR_ID``
+ - 4
+ - This flag is only available if the ``CEC_CAP_REPLY_VENDOR_ID`` capability
+ is set. If this flag is set, then the reply is expected to consist of
+ the ``CEC_MSG_VENDOR_COMMAND_WITH_ID`` opcode followed by the Vendor ID
+ (in bytes 1-4 of the message), followed by the ``struct cec_msg``
+ ``reply`` field.
+
+ Note that this assumes that the byte after the Vendor ID is a
+ vendor-specific opcode.
+
+ This flag makes it easier to wait for replies to vendor commands.
+
.. tabularcolumns:: |p{5.6cm}|p{0.9cm}|p{10.8cm}|
.. _cec-tx-status:
diff --git a/Documentation/userspace-api/media/v4l/biblio.rst b/Documentation/userspace-api/media/v4l/biblio.rst
index 72aef1759b60..35674eeae20d 100644
--- a/Documentation/userspace-api/media/v4l/biblio.rst
+++ b/Documentation/userspace-api/media/v4l/biblio.rst
@@ -334,6 +334,17 @@ VESA DMT
:author: Video Electronics Standards Association (http://www.vesa.org)
+.. _vesaeddc:
+
+E-DDC
+=====
+
+
+:title: VESA Enhanced Display Data Channel (E-DDC) Standard
+:subtitle: Version 1.3
+
+:author: Video Electronics Standards Association (http://www.vesa.org)
+
.. _vesaedid:
EDID
diff --git a/Documentation/userspace-api/media/v4l/buffer.rst b/Documentation/userspace-api/media/v4l/buffer.rst
index 52bbee81c080..856874341882 100644
--- a/Documentation/userspace-api/media/v4l/buffer.rst
+++ b/Documentation/userspace-api/media/v4l/buffer.rst
@@ -694,41 +694,6 @@ enum v4l2_memory
- 4
- The buffer is used for :ref:`DMA shared buffer <dmabuf>` I/O.
-.. _memory-flags:
-
-Memory Consistency Flags
-------------------------
-
-.. raw:: latex
-
- \small
-
-.. tabularcolumns:: |p{7.0cm}|p{2.1cm}|p{8.4cm}|
-
-.. cssclass:: longtable
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
- :widths: 3 1 4
-
- * .. _`V4L2-MEMORY-FLAG-NON-COHERENT`:
-
- - ``V4L2_MEMORY_FLAG_NON_COHERENT``
- - 0x00000001
- - A buffer is allocated either in coherent (it will be automatically
- coherent between the CPU and the bus) or non-coherent memory. The
- latter can provide performance gains, for instance the CPU cache
- sync/flush operations can be avoided if the buffer is accessed by the
- corresponding device only and the CPU does not read/write to/from that
- buffer. However, this requires extra care from the driver -- it must
- guarantee memory consistency by issuing a cache flush/sync when
- consistency is needed. If this flag is set V4L2 will attempt to
- allocate the buffer in non-coherent memory. The flag takes effect
- only if the buffer is used for :ref:`memory mapping <mmap>` I/O and the
- queue reports the :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS
- <V4L2-BUF-CAP-SUPPORTS-MMAP-CACHE-HINTS>` capability.
-
.. raw:: latex
\normalsize
diff --git a/Documentation/userspace-api/media/v4l/capture.c.rst b/Documentation/userspace-api/media/v4l/capture.c.rst
index eef6772967a1..349541b1dac0 100644
--- a/Documentation/userspace-api/media/v4l/capture.c.rst
+++ b/Documentation/userspace-api/media/v4l/capture.c.rst
@@ -333,7 +333,7 @@ file: media/v4l/capture.c
if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
fprintf(stderr, "%s does not support "
- "memory mappingn", dev_name);
+ "memory mapping\n", dev_name);
exit(EXIT_FAILURE);
} else {
errno_exit("VIDIOC_REQBUFS");
@@ -391,7 +391,7 @@ file: media/v4l/capture.c
if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
fprintf(stderr, "%s does not support "
- "user pointer i/on", dev_name);
+ "user pointer i/o\n", dev_name);
exit(EXIT_FAILURE);
} else {
errno_exit("VIDIOC_REQBUFS");
@@ -547,7 +547,7 @@ file: media/v4l/capture.c
}
if (!S_ISCHR(st.st_mode)) {
- fprintf(stderr, "%s is no devicen", dev_name);
+ fprintf(stderr, "%s is no device\n", dev_name);
exit(EXIT_FAILURE);
}
diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
index 22bde00d42df..0da635691fdc 100644
--- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
+++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
@@ -2993,7 +2993,11 @@ This structure contains all loop filter related parameters. See sections
- Applications and drivers must set this to zero.
* - __u16
- ``max_frame_width_minus_1``
- - specifies the maximum frame width minus 1 for the frames represented by
+ - Specifies the maximum frame width minus 1 for the frames represented by
+ this sequence header.
+ * - __u16
+ - ``max_frame_height_minus_1``
+ - Specifies the maximum frame height minus 1 for the frames represented by
this sequence header.
.. _av1_sequence_flags:
@@ -3374,7 +3378,7 @@ semantics" of :ref:`av1`.
- ``uv_pri_strength[V4L2_AV1_CDEF_MAX]``
- Specifies the strength of the primary filter.
* - __u8
- - ``uv_secondary_strength[V4L2_AV1_CDEF_MAX]``
+ - ``uv_sec_strength[V4L2_AV1_CDEF_MAX]``
- Specifies the strength of the secondary filter.
.. c:type:: v4l2_av1_segment_feature
@@ -3439,7 +3443,7 @@ semantics" of :ref:`av1`.
- Bitmask defining which features are enabled in each segment. Use
V4L2_AV1_SEGMENT_FEATURE_ENABLED to build a suitable mask.
* - __u16
- - `feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX]``
+ - ``feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX]``
- Data attached to each feature. Data entry is only valid if the feature
is enabled.
@@ -3490,7 +3494,7 @@ AV1 Loop filter params as defined in section 6.8.10 "Loop filter semantics" of
.. tabularcolumns:: |p{1.5cm}|p{5.8cm}|p{10.0cm}|
-.. flat-table:: struct v4l2_av1_global_motion
+.. flat-table:: struct v4l2_av1_loop_filter
:header-rows: 0
:stub-columns: 0
:widths: 1 1 2
@@ -3806,12 +3810,12 @@ AV1 Tx mode as described in section 6.8.21 "TX mode semantics" of :ref:`av1`.
* - struct :c:type:`v4l2_av1_quantization`
- ``quantization``
- Quantization parameters.
- * - struct :c:type:`v4l2_av1_segmentation`
- - ``segmentation``
- - Segmentation parameters.
* - __u8
- ``superres_denom``
- The denominator for the upscaling ratio.
+ * - struct :c:type:`v4l2_av1_segmentation`
+ - ``segmentation``
+ - Segmentation parameters.
* - struct :c:type:`v4l2_av1_loop_filter`
- ``loop_filter``
- Loop filter params
@@ -3829,7 +3833,7 @@ AV1 Tx mode as described in section 6.8.21 "TX mode semantics" of :ref:`av1`.
* - struct :c:type:`v4l2_av1_loop_restoration`
- ``loop_restoration``
- Loop restoration parameters.
- * - struct :c:type:`v4l2_av1_loop_global_motion`
+ * - struct :c:type:`v4l2_av1_global_motion`
- ``global_motion``
- Global motion parameters.
* - __u32
diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst
index b1c2ab2854af..27803dca8d3e 100644
--- a/Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst
+++ b/Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst
@@ -31,7 +31,7 @@ Image Process Control IDs
Pixel sampling rate in the device's pixel array. This control is
read-only and its unit is pixels / second.
- Some devices use horizontal and vertical balanking to configure the frame
+ Some devices use horizontal and vertical blanking to configure the frame
rate. The frame rate can be calculated from the pixel rate, analogue crop
rectangle as well as horizontal and vertical blanking. The pixel rate
control may be present in a different sub-device than the blanking controls
diff --git a/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst b/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst
index fa04f00bcd2e..959f6bde8695 100644
--- a/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst
+++ b/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst
@@ -1,28 +1,67 @@
.. SPDX-License-Identifier: GPL-2.0
-.. _v4l2-meta-fmt-rk-isp1-params:
-
.. _v4l2-meta-fmt-rk-isp1-stat-3a:
-*****************************************************************************
-V4L2_META_FMT_RK_ISP1_PARAMS ('rk1p'), V4L2_META_FMT_RK_ISP1_STAT_3A ('rk1s')
-*****************************************************************************
+************************************************************************************************************************
+V4L2_META_FMT_RK_ISP1_PARAMS ('rk1p'), V4L2_META_FMT_RK_ISP1_STAT_3A ('rk1s'), V4L2_META_FMT_RK_ISP1_EXT_PARAMS ('rk1e')
+************************************************************************************************************************
+========================
Configuration parameters
========================
-The configuration parameters are passed to the
+The configuration of the RkISP1 ISP is performed by userspace by providing
+parameters for the ISP to the driver using the :c:type:`v4l2_meta_format`
+interface.
+
+There are two methods that allow to configure the ISP, the `fixed parameters`
+configuration format and the `extensible parameters` configuration
+format.
+
+.. _v4l2-meta-fmt-rk-isp1-params:
+
+Fixed parameters configuration format
+=====================================
+
+When using the fixed configuration format, parameters are passed to the
:ref:`rkisp1_params <rkisp1_params>` metadata output video node, using
-the :c:type:`v4l2_meta_format` interface. The buffer contains
-a single instance of the C structure :c:type:`rkisp1_params_cfg` defined in
-``rkisp1-config.h``. So the structure can be obtained from the buffer by:
+the `V4L2_META_FMT_RK_ISP1_PARAMS` meta format.
+
+The buffer contains a single instance of the C structure
+:c:type:`rkisp1_params_cfg` defined in ``rkisp1-config.h``. So the structure can
+be obtained from the buffer by:
.. code-block:: c
struct rkisp1_params_cfg *params = (struct rkisp1_params_cfg*) buffer;
+This method supports a subset of the ISP features only, new applications should
+use the extensible parameters method.
+
+.. _v4l2-meta-fmt-rk-isp1-ext-params:
+
+Extensible parameters configuration format
+==========================================
+
+When using the extensible configuration format, parameters are passed to the
+:ref:`rkisp1_params <rkisp1_params>` metadata output video node, using
+the `V4L2_META_FMT_RK_ISP1_EXT_PARAMS` meta format.
+
+The buffer contains a single instance of the C structure
+:c:type:`rkisp1_ext_params_cfg` defined in ``rkisp1-config.h``. The
+:c:type:`rkisp1_ext_params_cfg` structure is designed to allow userspace to
+populate the data buffer with only the configuration data for the ISP blocks it
+intends to configure. The extensible parameters format design allows developers
+to define new block types to support new configuration parameters, and defines a
+versioning scheme so that it can be extended and versioned without breaking
+compatibility with existing applications.
+
+For these reasons, this configuration method is preferred over the `fixed
+parameters` format alternative.
+
.. rkisp1_stat_buffer
+===========================
3A and histogram statistics
===========================
diff --git a/Documentation/userspace-api/media/v4l/mt2110t.svg b/Documentation/userspace-api/media/v4l/mt2110t.svg
new file mode 100644
index 000000000000..a6e82f2c73df
--- /dev/null
+++ b/Documentation/userspace-api/media/v4l/mt2110t.svg
@@ -0,0 +1,315 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-no-invariants-or-later -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg version="1.2" width="140mm" height="220mm" viewBox="0 0 14000 22000" preserveAspectRatio="xMidYMid" fill-rule="evenodd" stroke-width="28.222" stroke-linejoin="round" xmlns="http://www.w3.org/2000/svg" xmlns:ooo="http://xml.openoffice.org/svg/export" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:presentation="http://sun.com/xmlns/staroffice/presentation" xmlns:smil="http://www.w3.org/2001/SMIL20/" xmlns:anim="urn:oasis:names:tc:opendocument:xmlns:animation:1.0" xmlns:svg="urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0" xml:space="preserve">
+ <defs class="ClipPathGroup">
+ <clipPath id="presentation_clip_path" clipPathUnits="userSpaceOnUse">
+ <rect x="0" y="0" width="14000" height="22000"/>
+ </clipPath>
+ <clipPath id="presentation_clip_path_shrink" clipPathUnits="userSpaceOnUse">
+ <rect x="14" y="22" width="13972" height="21956"/>
+ </clipPath>
+ </defs>
+ <defs>
+ <font id="EmbeddedFont_1" horiz-adv-x="2048">
+ <font-face font-family="Liberation Sans embedded" units-per-em="2048" font-weight="normal" font-style="normal" ascent="1852" descent="423"/>
+ <missing-glyph horiz-adv-x="2048" d="M 0,0 L 2047,0 2047,2047 0,2047 0,0 Z"/>
+ <glyph unicode="y" horiz-adv-x="1033" d="M 191,-425 C 142,-425 100,-421 67,-414 L 67,-279 C 92,-283 120,-285 151,-285 263,-285 352,-203 417,-38 L 434,5 5,1082 197,1082 425,484 C 428,475 432,464 437,451 442,438 457,394 482,320 507,246 521,205 523,196 L 593,393 830,1082 1020,1082 604,0 C 559,-115 518,-201 479,-258 440,-314 398,-356 351,-384 304,-411 250,-425 191,-425 Z"/>
+ <glyph unicode="x" horiz-adv-x="1006" d="M 801,0 L 510,444 217,0 23,0 408,556 41,1082 240,1082 510,661 778,1082 979,1082 612,558 1002,0 801,0 Z"/>
+ <glyph unicode="w" horiz-adv-x="1509" d="M 1174,0 L 965,0 776,765 740,934 C 734,904 725,861 712,805 699,748 631,480 508,0 L 300,0 -3,1082 175,1082 358,347 C 363,331 377,265 401,149 L 418,223 644,1082 837,1082 1026,339 1072,149 1103,288 1308,1082 1484,1082 1174,0 Z"/>
+ <glyph unicode="u" horiz-adv-x="874" d="M 314,1082 L 314,396 C 314,325 321,269 335,230 349,191 371,162 402,145 433,128 478,119 537,119 624,119 692,149 742,208 792,267 817,350 817,455 L 817,1082 997,1082 997,231 C 997,105 999,28 1003,0 L 833,0 C 832,3 832,12 831,27 830,42 830,59 829,78 828,97 826,132 825,185 L 822,185 C 781,110 733,58 679,27 624,-4 557,-20 476,-20 357,-20 271,10 216,69 161,128 133,225 133,361 L 133,1082 314,1082 Z"/>
+ <glyph unicode="t" horiz-adv-x="531" d="M 554,8 C 495,-8 434,-16 372,-16 228,-16 156,66 156,229 L 156,951 31,951 31,1082 163,1082 216,1324 336,1324 336,1082 536,1082 536,951 336,951 336,268 C 336,216 345,180 362,159 379,138 408,127 450,127 474,127 509,132 554,141 L 554,8 Z"/>
+ <glyph unicode="s" horiz-adv-x="901" d="M 950,299 C 950,197 912,118 835,63 758,8 650,-20 511,-20 376,-20 273,2 200,47 127,91 79,160 57,254 L 216,285 C 231,227 263,185 311,158 359,131 426,117 511,117 602,117 669,131 712,159 754,187 775,229 775,285 775,328 760,362 731,389 702,416 654,438 589,455 L 460,489 C 357,516 283,542 240,568 196,593 162,624 137,661 112,698 100,743 100,796 100,895 135,970 206,1022 276,1073 378,1099 513,1099 632,1099 727,1078 798,1036 868,994 912,927 931,834 L 769,814 C 759,862 732,899 689,925 645,950 586,963 513,963 432,963 372,951 333,926 294,901 275,864 275,814 275,783 283,758 299,738 315,718 339,701 370,687 401,673 467,654 568,629 663,605 732,583 774,563 816,542 849,520 874,495 898,470 917,442 930,410 943,377 950,340 950,299 Z"/>
+ <glyph unicode="r" horiz-adv-x="530" d="M 142,0 L 142,830 C 142,906 140,990 136,1082 L 306,1082 C 311,959 314,886 314,861 L 318,861 C 347,954 380,1017 417,1051 454,1085 507,1102 575,1102 599,1102 623,1099 648,1092 L 648,927 C 624,934 592,937 552,937 477,937 420,905 381,841 342,776 322,684 322,564 L 322,0 142,0 Z"/>
+ <glyph unicode="p" horiz-adv-x="953" d="M 1053,546 C 1053,169 920,-20 655,-20 488,-20 376,43 319,168 L 314,168 C 317,163 318,106 318,-2 L 318,-425 138,-425 138,861 C 138,972 136,1046 132,1082 L 306,1082 C 307,1079 308,1070 309,1054 310,1037 312,1012 314,978 315,944 316,921 316,908 L 320,908 C 352,975 394,1024 447,1055 500,1086 569,1101 655,1101 788,1101 888,1056 954,967 1020,878 1053,737 1053,546 Z M 864,542 C 864,693 844,800 803,865 762,930 698,962 609,962 538,962 482,947 442,917 401,887 371,840 350,777 329,713 318,630 318,528 318,386 341,281 386,214 431,147 505,113 607,113 696,113 762,146 803,212 844,277 864,387 864,542 Z"/>
+ <glyph unicode="o" horiz-adv-x="980" d="M 1053,542 C 1053,353 1011,212 928,119 845,26 724,-20 565,-20 407,-20 288,28 207,125 126,221 86,360 86,542 86,915 248,1102 571,1102 736,1102 858,1057 936,966 1014,875 1053,733 1053,542 Z M 864,542 C 864,691 842,800 798,868 753,935 679,969 574,969 469,969 393,935 346,866 299,797 275,689 275,542 275,399 298,292 345,221 391,149 464,113 563,113 671,113 748,148 795,217 841,286 864,395 864,542 Z"/>
+ <glyph unicode="l" horiz-adv-x="187" d="M 138,0 L 138,1484 318,1484 318,0 138,0 Z"/>
+ <glyph unicode="i" horiz-adv-x="187" d="M 137,1312 L 137,1484 317,1484 317,1312 137,1312 Z M 137,0 L 137,1082 317,1082 317,0 137,0 Z"/>
+ <glyph unicode="f" horiz-adv-x="557" d="M 361,951 L 361,0 181,0 181,951 29,951 29,1082 181,1082 181,1204 C 181,1303 203,1374 246,1417 289,1460 356,1482 445,1482 495,1482 537,1478 572,1470 L 572,1333 C 542,1338 515,1341 492,1341 446,1341 413,1329 392,1306 371,1283 361,1240 361,1179 L 361,1082 572,1082 572,951 361,951 Z"/>
+ <glyph unicode="e" horiz-adv-x="980" d="M 276,503 C 276,379 302,283 353,216 404,149 479,115 578,115 656,115 719,131 766,162 813,193 844,233 861,281 L 1019,236 C 954,65 807,-20 578,-20 418,-20 296,28 213,123 129,218 87,360 87,548 87,727 129,864 213,959 296,1054 416,1102 571,1102 889,1102 1048,910 1048,527 L 1048,503 276,503 Z M 862,641 C 852,755 823,838 775,891 727,943 658,969 568,969 481,969 412,940 361,882 310,823 282,743 278,641 L 862,641 Z"/>
+ <glyph unicode="b" horiz-adv-x="953" d="M 1053,546 C 1053,169 920,-20 655,-20 573,-20 505,-5 451,25 396,54 352,102 318,168 L 316,168 C 316,147 315,116 312,74 309,31 307,7 306,0 L 132,0 C 136,36 138,110 138,223 L 138,1484 318,1484 318,1061 C 318,1018 317,967 314,908 L 318,908 C 351,977 396,1027 451,1057 506,1087 574,1102 655,1102 792,1102 892,1056 957,964 1021,872 1053,733 1053,546 Z M 864,540 C 864,691 844,800 804,865 764,930 699,963 609,963 508,963 434,928 388,859 341,790 318,680 318,529 318,387 341,282 386,215 431,147 505,113 607,113 698,113 763,147 804,214 844,281 864,389 864,540 Z"/>
+ <glyph unicode="8" horiz-adv-x="980" d="M 1050,393 C 1050,263 1009,162 926,89 843,16 725,-20 570,-20 419,-20 302,16 217,87 132,158 89,260 89,391 89,483 115,560 168,623 221,686 288,724 370,737 L 370,741 C 293,759 233,798 189,858 144,918 122,988 122,1069 122,1176 162,1263 243,1330 323,1397 431,1430 566,1430 705,1430 814,1397 895,1332 975,1267 1015,1178 1015,1067 1015,986 993,916 948,856 903,796 842,758 765,743 L 765,739 C 855,724 925,686 975,625 1025,563 1050,486 1050,393 Z M 828,1057 C 828,1216 741,1296 566,1296 481,1296 417,1276 373,1236 328,1196 306,1136 306,1057 306,976 329,915 375,873 420,830 485,809 568,809 653,809 717,829 762,868 806,907 828,970 828,1057 Z M 863,410 C 863,497 837,563 785,608 733,652 660,674 566,674 475,674 403,650 352,603 301,555 275,489 275,406 275,212 374,115 572,115 670,115 743,139 791,186 839,233 863,307 863,410 Z"/>
+ <glyph unicode="6" horiz-adv-x="980" d="M 1049,461 C 1049,312 1009,195 928,109 847,23 736,-20 594,-20 435,-20 314,39 230,157 146,275 104,447 104,672 104,916 148,1103 235,1234 322,1365 447,1430 608,1430 821,1430 955,1334 1010,1143 L 838,1112 C 803,1227 725,1284 606,1284 503,1284 424,1236 368,1141 311,1045 283,906 283,725 316,786 362,832 421,864 480,895 548,911 625,911 755,911 858,870 935,789 1011,708 1049,598 1049,461 Z M 866,453 C 866,555 841,634 791,689 741,744 671,772 582,772 498,772 430,748 379,699 327,650 301,582 301,496 301,387 328,298 382,229 435,160 504,125 588,125 675,125 743,154 792,213 841,271 866,351 866,453 Z"/>
+ <glyph unicode="4" horiz-adv-x="1060" d="M 881,319 L 881,0 711,0 711,319 47,319 47,459 692,1409 881,1409 881,461 1079,461 1079,319 881,319 Z M 711,1206 C 710,1202 700,1184 683,1153 666,1122 653,1100 644,1087 L 283,555 229,481 213,461 711,461 711,1206 Z"/>
+ <glyph unicode="3" horiz-adv-x="1006" d="M 1049,389 C 1049,259 1008,158 925,87 842,16 724,-20 571,-20 428,-20 315,12 230,77 145,141 94,236 78,362 L 264,379 C 288,212 390,129 571,129 662,129 733,151 785,196 836,241 862,307 862,395 862,472 833,532 774,575 715,618 629,639 518,639 L 416,639 416,795 514,795 C 613,795 689,817 744,860 798,903 825,962 825,1038 825,1113 803,1173 759,1217 714,1260 648,1282 561,1282 482,1282 418,1262 369,1221 320,1180 291,1123 283,1049 L 102,1063 C 115,1178 163,1268 246,1333 328,1398 434,1430 563,1430 704,1430 814,1397 893,1332 971,1266 1010,1174 1010,1057 1010,967 985,894 935,838 884,781 811,743 715,723 L 715,719 C 820,708 902,672 961,613 1020,554 1049,479 1049,389 Z"/>
+ <glyph unicode="2" horiz-adv-x="954" d="M 103,0 L 103,127 C 137,205 179,274 228,334 277,393 328,447 382,496 436,544 490,589 543,630 596,671 643,713 686,754 729,795 763,839 790,884 816,929 829,981 829,1038 829,1115 806,1175 761,1218 716,1261 653,1282 572,1282 495,1282 432,1261 383,1220 333,1178 304,1119 295,1044 L 111,1061 C 124,1174 172,1263 255,1330 337,1397 443,1430 572,1430 714,1430 823,1397 900,1330 976,1263 1014,1167 1014,1044 1014,989 1002,935 977,881 952,827 914,773 865,719 816,665 721,581 582,468 505,405 444,349 399,299 354,248 321,200 301,153 L 1036,153 1036,0 103,0 Z"/>
+ <glyph unicode="1" horiz-adv-x="927" d="M 156,0 L 156,153 515,153 515,1237 197,1010 197,1180 530,1409 696,1409 696,153 1039,153 1039,0 156,0 Z"/>
+ <glyph unicode=" " horiz-adv-x="556"/>
+ </font>
+ </defs>
+ <defs class="TextShapeIndex">
+ <g ooo:slide="id1" ooo:id-list="id3 id4 id5 id6 id7 id8 id9 id10 id11 id12 id13 id14 id15 id16 id17 id18 id19 id20 id21 id22 id23 id24 id25 id26 id27 id28 id29 id30"/>
+ </defs>
+ <defs class="EmbeddedBulletChars">
+ <g id="bullet-char-template-57356" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 580,1141 L 1163,571 580,0 -4,571 580,1141 Z"/>
+ </g>
+ <g id="bullet-char-template-57354" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 8,1128 L 1137,1128 1137,0 8,0 8,1128 Z"/>
+ </g>
+ <g id="bullet-char-template-10146" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 174,0 L 602,739 174,1481 1456,739 174,0 Z M 1358,739 L 309,1346 659,739 1358,739 Z"/>
+ </g>
+ <g id="bullet-char-template-10132" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 2015,739 L 1276,0 717,0 1260,543 174,543 174,936 1260,936 717,1481 1274,1481 2015,739 Z"/>
+ </g>
+ <g id="bullet-char-template-10007" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 0,-2 C -7,14 -16,27 -25,37 L 356,567 C 262,823 215,952 215,954 215,979 228,992 255,992 264,992 276,990 289,987 310,991 331,999 354,1012 L 381,999 492,748 772,1049 836,1024 860,1049 C 881,1039 901,1025 922,1006 886,937 835,863 770,784 769,783 710,716 594,584 L 774,223 C 774,196 753,168 711,139 L 727,119 C 717,90 699,76 672,76 641,76 570,178 457,381 L 164,-76 C 142,-110 111,-127 72,-127 30,-127 9,-110 8,-76 1,-67 -2,-52 -2,-32 -2,-23 -1,-13 0,-2 Z"/>
+ </g>
+ <g id="bullet-char-template-10004" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 285,-33 C 182,-33 111,30 74,156 52,228 41,333 41,471 41,549 55,616 82,672 116,743 169,778 240,778 293,778 328,747 346,684 L 369,508 C 377,444 397,411 428,410 L 1163,1116 C 1174,1127 1196,1133 1229,1133 1271,1133 1292,1118 1292,1087 L 1292,965 C 1292,929 1282,901 1262,881 L 442,47 C 390,-6 338,-33 285,-33 Z"/>
+ </g>
+ <g id="bullet-char-template-9679" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 813,0 C 632,0 489,54 383,161 276,268 223,411 223,592 223,773 276,916 383,1023 489,1130 632,1184 813,1184 992,1184 1136,1130 1245,1023 1353,916 1407,772 1407,592 1407,412 1353,268 1245,161 1136,54 992,0 813,0 Z"/>
+ </g>
+ <g id="bullet-char-template-8226" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 346,457 C 273,457 209,483 155,535 101,586 74,649 74,723 74,796 101,859 155,911 209,963 273,989 346,989 419,989 480,963 531,910 582,859 608,796 608,723 608,648 583,586 532,535 482,483 420,457 346,457 Z"/>
+ </g>
+ <g id="bullet-char-template-8211" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M -4,459 L 1135,459 1135,606 -4,606 -4,459 Z"/>
+ </g>
+ <g id="bullet-char-template-61548" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 173,740 C 173,903 231,1043 346,1159 462,1274 601,1332 765,1332 928,1332 1067,1274 1183,1159 1299,1043 1357,903 1357,740 1357,577 1299,437 1183,322 1067,206 928,148 765,148 601,148 462,206 346,322 231,437 173,577 173,740 Z"/>
+ </g>
+ </defs>
+ <g>
+ <g id="id2" class="Master_Slide">
+ <g id="bg-id2" class="Background"/>
+ <g id="bo-id2" class="BackgroundObjects"/>
+ </g>
+ </g>
+ <g class="SlideGroup">
+ <g>
+ <g id="container-id1">
+ <g id="id1" class="Slide" clip-path="url(#presentation_clip_path)">
+ <g class="Page">
+ <g class="com.sun.star.drawing.MeasureShape">
+ <g id="id3">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="325" width="10003" height="15477"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 2037,950 L 11463,950"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 1750,950 L 2050,1050 2050,850 1750,950 Z"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 11750,950 L 11450,850 11450,1050 11750,950 Z"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 1750,15800 L 1750,750"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 11750,1650 L 11750,750"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="5953" y="768"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">16 px</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.MeasureShape">
+ <g id="id4">
+ <rect class="BoundingBox" stroke="none" fill="none" x="11795" y="1649" width="1853" height="19913"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 13446,1937 L 13446,21273"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 13446,1650 L 13346,1950 13546,1950 13446,1650 Z"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 13446,21560 L 13546,21260 13346,21260 13446,21560 Z"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 11796,1650 L 13646,1650"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 11796,21560 L 13646,21560"/>
+ <text class="SVGTextShape" transform="rotate(-90 13395 12369)"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="13395" y="12369"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">32 px</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id5">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="2449" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,4200 L 1750,4200 1750,2450 11750,2450 11750,4200 6750,4200 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="3545"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id6">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="1699" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,2450 L 1750,2450 1750,1700 11750,1700 11750,2450 6750,2450 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3860" y="2295"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id7">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="1609" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,4150 L 1750,4150 1750,1650 11750,1650 11750,4150 6750,4150 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.MeasureShape">
+ <g id="id8">
+ <rect class="BoundingBox" stroke="none" fill="none" x="385" y="1199" width="1294" height="3971"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 1027,5079 L 1027,2737"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 1027,2450 L 927,2750 1127,2750 1027,2450 Z"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 1027,1413 L 1027,1200"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 1027,1700 L 1127,1400 927,1400 1027,1700 Z"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 1027,2450 L 1027,1700"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 1677,2450 L 827,2450"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 1677,1700 L 827,1700"/>
+ <text class="SVGTextShape" transform="rotate(-90 845 5217)"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="845" y="5217"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">16 bytes</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id9">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="4929" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,6680 L 1750,6680 1750,4930 11750,4930 11750,6680 6750,6680 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="6025"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id10">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="4179" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,4930 L 1750,4930 1750,4180 11750,4180 11750,4930 6750,4930 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="4775"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id11">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="4089" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,6630 L 1750,6630 1750,4130 11750,4130 11750,6630 6750,6630 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id12">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="7409" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,9160 L 1750,9160 1750,7410 11750,7410 11750,9160 6750,9160 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="8505"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id13">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="6659" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,7410 L 1750,7410 1750,6660 11750,6660 11750,7410 6750,7410 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="7255"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id14">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="6569" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,9110 L 1750,9110 1750,6610 11750,6610 11750,9110 6750,9110 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id15">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="9889" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,11640 L 1750,11640 1750,9890 11750,9890 11750,11640 6750,11640 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="10985"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id16">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="9139" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,9890 L 1750,9890 1750,9140 11750,9140 11750,9890 6750,9890 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="9735"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id17">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="9049" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,11590 L 1750,11590 1750,9090 11750,9090 11750,11590 6750,11590 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id18">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="12369" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,14120 L 1750,14120 1750,12370 11750,12370 11750,14120 6750,14120 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="13465"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id19">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="11619" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,12370 L 1750,12370 1750,11620 11750,11620 11750,12370 6750,12370 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="12215"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id20">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="11529" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,14070 L 1750,14070 1750,11570 11750,11570 11750,14070 6750,14070 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id21">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="14849" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,16600 L 1750,16600 1750,14850 11750,14850 11750,16600 6750,16600 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="15945"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id22">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="14099" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,14850 L 1750,14850 1750,14100 11750,14100 11750,14850 6750,14850 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="14695"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id23">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="14009" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,16550 L 1750,16550 1750,14050 11750,14050 11750,16550 6750,16550 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id24">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="17329" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,19080 L 1750,19080 1750,17330 11750,17330 11750,19080 6750,19080 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="18425"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id25">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="16579" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,17330 L 1750,17330 1750,16580 11750,16580 11750,17330 6750,17330 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="17175"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id26">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="16489" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,19030 L 1750,19030 1750,16530 11750,16530 11750,19030 6750,19030 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id27">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="19809" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,21560 L 1750,21560 1750,19810 11750,19810 11750,21560 6750,21560 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="20905"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id28">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="19059" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,19810 L 1750,19810 1750,19060 11750,19060 11750,19810 6750,19810 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="19655"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id29">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="18969" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,21510 L 1750,21510 1750,19010 11750,19010 11750,21510 6750,21510 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.MeasureShape">
+ <g id="id30">
+ <rect class="BoundingBox" stroke="none" fill="none" x="11849" y="1949" width="1237" height="4987"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 12443,6845 L 12443,4487"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 12443,4200 L 12343,4500 12543,4500 12443,4200 Z"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 12443,2163 L 12443,1950"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 12443,2450 L 12543,2150 12343,2150 12443,2450 Z"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 12443,4200 L 12443,2450"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 11850,4200 L 12643,4200"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 11850,2450 L 12643,2450"/>
+ <text class="SVGTextShape" transform="rotate(-90 12953 6967)"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="12953" y="6967"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">64 bytes</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+</svg>
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst b/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst
index 886ba7b08d6b..ac52485252d9 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst
@@ -275,19 +275,6 @@ please make a proposal on the linux-media mailing list.
Decoder's implementation can be found here,
`aspeed_codec <https://github.com/AspeedTech-BMC/aspeed_codec/>`__
- * .. _V4L2-PIX-FMT-MT2110T:
-
- - ``V4L2_PIX_FMT_MT2110T``
- - 'MT2110T'
- - This format is two-planar 10-Bit tile mode and having similitude with
- ``V4L2_PIX_FMT_MM21`` in term of alignment and tiling. Used for VP9, AV1
- and HEVC.
- * .. _V4L2-PIX-FMT-MT2110R:
-
- - ``V4L2_PIX_FMT_MT2110R``
- - 'MT2110R'
- - This format is two-planar 10-Bit raster mode and having similitude with
- ``V4L2_PIX_FMT_MM21`` in term of alignment and tiling. Used for AVC.
* .. _V4L2-PIX-FMT-HEXTILE:
- ``V4L2_PIX_FMT_HEXTILE``
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
index 1840224faa41..b788f6933855 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
@@ -144,6 +144,20 @@ All components are stored with the same number of bits per component.
- Cb, Cr
- Yes
- 4x4 tiles
+ * - V4L2_PIX_FMT_MT2110T
+ - 'MT2T'
+ - 15
+ - 4:2:0
+ - Cb, Cr
+ - No
+ - 16x32 / 16x16 tiles tiled low bits
+ * - V4L2_PIX_FMT_MT2110R
+ - 'MT2R'
+ - 15
+ - 4:2:0
+ - Cb, Cr
+ - No
+ - 16x32 / 16x16 tiles raster low bits
* - V4L2_PIX_FMT_NV16
- 'NV16'
- 8
@@ -295,8 +309,6 @@ of the luma plane.
.. _V4L2-PIX-FMT-NV12-32L32:
.. _V4L2-PIX-FMT-NV12M-8L128:
.. _V4L2-PIX-FMT-NV12-8L128:
-.. _V4L2-PIX-FMT-NV12M-10BE-8L128:
-.. _V4L2-PIX-FMT-NV12-10BE-8L128:
.. _V4L2-PIX-FMT-MM21:
Tiled NV12
@@ -322,6 +334,22 @@ If the vertical resolution is an odd number of tiles, the last row of
tiles is stored in linear order. The layouts of the luma and chroma
planes are identical.
+.. _nv12mt:
+
+.. kernel-figure:: nv12mt.svg
+ :alt: nv12mt.svg
+ :align: center
+
+ V4L2_PIX_FMT_NV12MT macroblock Z shape memory layout
+
+.. _nv12mt_ex:
+
+.. kernel-figure:: nv12mt_example.svg
+ :alt: nv12mt_example.svg
+ :align: center
+
+ Example V4L2_PIX_FMT_NV12MT memory layout of tiles
+
``V4L2_PIX_FMT_NV12_4L4`` stores pixels in 4x4 tiles, and stores
tiles linearly in memory. The line stride and image height must be
aligned to a multiple of 4. The layouts of the luma and chroma planes are
@@ -345,6 +373,27 @@ The layouts of the luma and chroma planes are identical.
``V4L2_PIX_FMT_NV12_8L128`` is similar to ``V4L2_PIX_FMT_NV12M_8L128`` but stores
two planes in one memory.
+``V4L2_PIX_FMT_MM21`` store luma pixel in 16x32 tiles, and chroma pixels
+in 16x16 tiles. The line stride must be aligned to a multiple of 16 and the
+image height must be aligned to a multiple of 32. The number of luma and chroma
+tiles are identical, even though the tile size differ. The image is formed of
+two non-contiguous planes.
+
+
+.. _V4L2-PIX-FMT-NV15-4L4:
+.. _V4L2-PIX-FMT-NV12M-10BE-8L128:
+.. _V4L2-PIX-FMT-NV12-10BE-8L128:
+.. _V4L2-PIX-FMT-MT2110T:
+.. _V4L2-PIX-FMT-MT2110R:
+
+Tiled NV15
+----------
+
+``V4L2_PIX_FMT_NV15_4L4`` Semi-planar 10-bit YUV 4:2:0 formats, using 4x4 tiling.
+All components are packed without any padding between each other.
+As a side-effect, each group of 4 components are stored over 5 bytes
+(YYYY or UVUV = 4 * 10 bits = 40 bits = 5 bytes).
+
``V4L2_PIX_FMT_NV12M_10BE_8L128`` is similar to ``V4L2_PIX_FMT_NV12M`` but stores
10 bits pixels in 2D 8x128 tiles, and stores tiles linearly in memory.
the data is arranged in big endian order.
@@ -363,37 +412,119 @@ byte 4: Y3(bits 7-0)
``V4L2_PIX_FMT_NV12_10BE_8L128`` is similar to ``V4L2_PIX_FMT_NV12M_10BE_8L128`` but stores
two planes in one memory.
-``V4L2_PIX_FMT_MM21`` store luma pixel in 16x32 tiles, and chroma pixels
-in 16x16 tiles. The line stride must be aligned to a multiple of 16 and the
-image height must be aligned to a multiple of 32. The number of luma and chroma
-tiles are identical, even though the tile size differ. The image is formed of
-two non-contiguous planes.
-
-.. _nv12mt:
+``V4L2_PIX_FMT_MT2110T`` is one of Mediatek packed 10bit YUV 4:2:0 formats.
+It is fully packed 10bit 4:2:0 format like NV15 (15 bits per pixel), except
+that the lower two bits data is stored in separate partitions. The format is
+composed of 16x32 luma tiles, and 16x16 chroma tiles. Each tiles is 640 bytes
+long, divided into 8 partitions of 80 bytes. The first 16 bytes of the
+partition represent the 2 least significant bits of pixel data. The remaining
+64 bytes represent the 8 most significant bits of pixel data.
-.. kernel-figure:: nv12mt.svg
- :alt: nv12mt.svg
+.. kernel-figure:: mt2110t.svg
+ :alt: mt2110t.svg
:align: center
- V4L2_PIX_FMT_NV12MT macroblock Z shape memory layout
-
-.. _nv12mt_ex:
+ Layout of MT2110T Chroma Tile
-.. kernel-figure:: nv12mt_example.svg
- :alt: nv12mt_example.svg
- :align: center
+Filtering out the upper part of each partitions results in a valid
+``V4L2_PIX_FMT_MM21`` frame. A partition is a sub-tile of size 16 x 4. The
+lower two bits is said to be tiled since each bytes contains the lower two
+bits of the column of for pixel matching the same index. The chroma tiles
+only have 4 partitions.
- Example V4L2_PIX_FMT_NV12MT memory layout of tiles
+.. flat-table:: MT2110T LSB bits layout
+ :header-rows: 1
+ :stub-columns: 1
-.. _V4L2-PIX-FMT-NV15-4L4:
+ * -
+ - start + 0:
+ - start + 1:
+ - . . .
+ - start\ +\ 15:
+ * - Bits 1:0
+ - Y'\ :sub:`0:0`
+ - Y'\ :sub:`0:1`
+ - . . .
+ - Y'\ :sub:`0:15`
+ * - Bit 3:2
+ - Y'\ :sub:`1:0`
+ - Y'\ :sub:`1:1`
+ - . . .
+ - Y'\ :sub:`1:15`
+ * - Bits 5:4
+ - Y'\ :sub:`2:0`
+ - Y'\ :sub:`2:1`
+ - . . .
+ - Y'\ :sub:`2:15`
+ * - Bits 7:6
+ - Y'\ :sub:`3:0`
+ - Y'\ :sub:`3:1`
+ - . . .
+ - Y'\ :sub:`3:15`
+
+``V4L2_PIX_FMT_MT2110R`` is identical to ``V4L2_PIX_FMT_MT2110T`` except that
+the least significant two bits layout is in raster order. This means the first byte
+contains 4 pixels of the first row, with 4 bytes per line.
+
+.. flat-table:: MT2110R LSB bits layout
+ :header-rows: 2
+ :stub-columns: 1
-Tiled NV15
-----------
+ * -
+ - :cspan:`3` Byte 0
+ - ...
+ - :cspan:`3` Byte 3
+ * -
+ - 7:6
+ - 5:4
+ - 3:2
+ - 1:0
+ - ...
+ - 7:6
+ - 5:4
+ - 3:2
+ - 1:0
+ * - start + 0:
+ - Y'\ :sub:`0:3`
+ - Y'\ :sub:`0:2`
+ - Y'\ :sub:`0:1`
+ - Y'\ :sub:`0:0`
+ - ...
+ - Y'\ :sub:`0:15`
+ - Y'\ :sub:`0:14`
+ - Y'\ :sub:`0:13`
+ - Y'\ :sub:`0:12`
+ * - start + 4:
+ - Y'\ :sub:`1:3`
+ - Y'\ :sub:`1:2`
+ - Y'\ :sub:`1:1`
+ - Y'\ :sub:`1:0`
+ - ...
+ - Y'\ :sub:`1:15`
+ - Y'\ :sub:`1:14`
+ - Y'\ :sub:`1:13`
+ - Y'\ :sub:`1:12`
+ * - start + 8:
+ - Y'\ :sub:`2:3`
+ - Y'\ :sub:`2:2`
+ - Y'\ :sub:`2:1`
+ - Y'\ :sub:`2:0`
+ - ...
+ - Y'\ :sub:`2:15`
+ - Y'\ :sub:`2:14`
+ - Y'\ :sub:`2:13`
+ - Y'\ :sub:`2:12`
+ * - start\ +\ 12:
+ - Y'\ :sub:`3:3`
+ - Y'\ :sub:`3:2`
+ - Y'\ :sub:`3:1`
+ - Y'\ :sub:`3:0`
+ - ...
+ - Y'\ :sub:`3:15`
+ - Y'\ :sub:`3:14`
+ - Y'\ :sub:`3:13`
+ - Y'\ :sub:`3:12`
-Semi-planar 10-bit YUV 4:2:0 formats, using 4x4 tiling.
-All components are packed without any padding between each other.
-As a side-effect, each group of 4 components are stored over 5 bytes
-(YYYY or UVUV = 4 * 10 bits = 40 bits = 5 bytes).
.. _V4L2-PIX-FMT-NV16:
.. _V4L2-PIX-FMT-NV61:
diff --git a/Documentation/userspace-api/media/v4l/vidioc-querycap.rst b/Documentation/userspace-api/media/v4l/vidioc-querycap.rst
index 6c57b8428356..3d11d86d9cbf 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-querycap.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-querycap.rst
@@ -244,6 +244,17 @@ specification the ioctl returns an ``EINVAL`` error code.
- 0x01000000
- The device supports the :c:func:`read()` and/or
:c:func:`write()` I/O methods.
+ * - ``V4L2_CAP_EDID``
+ - 0x02000000
+ - The device stores the EDID for a video input, or retrieves the EDID for a video
+ output. It is a standalone EDID device, so no video streaming etc. will take place.
+
+ For a video input this is typically an eeprom that supports the
+ :ref:`VESA Enhanced Display Data Channel Standard <vesaeddc>`. It can be something
+ else as well, for example a micro controller.
+
+ For a video output this is typically read from an external device such as an
+ HDMI splitter accessed by a serial port.
* - ``V4L2_CAP_STREAMING``
- 0x04000000
- The device supports the :ref:`streaming <mmap>` I/O method.
diff --git a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst
index bbc22dd76032..daf9a6621b50 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst
@@ -73,6 +73,8 @@ aborting or finishing any DMA in progress, an implicit
.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.5cm}|
+.. cssclass:: longtable
+
.. flat-table:: struct v4l2_requestbuffers
:header-rows: 0
:stub-columns: 0
@@ -123,14 +125,6 @@ aborting or finishing any DMA in progress, an implicit
.. _V4L2-BUF-CAP-SUPPORTS-MAX-NUM-BUFFERS:
.. _V4L2-BUF-CAP-SUPPORTS-REMOVE-BUFS:
-.. raw:: latex
-
- \footnotesize
-
-.. tabularcolumns:: |p{8.1cm}|p{2.2cm}|p{7.0cm}|
-
-.. cssclass:: longtable
-
.. flat-table:: V4L2 Buffer Capabilities Flags
:header-rows: 0
:stub-columns: 0
@@ -166,6 +160,36 @@ aborting or finishing any DMA in progress, an implicit
:ref:`V4L2_BUF_FLAG_NO_CACHE_INVALIDATE <V4L2-BUF-FLAG-NO-CACHE-INVALIDATE>`,
:ref:`V4L2_BUF_FLAG_NO_CACHE_CLEAN <V4L2-BUF-FLAG-NO-CACHE-CLEAN>` and
:ref:`V4L2_MEMORY_FLAG_NON_COHERENT <V4L2-MEMORY-FLAG-NON-COHERENT>`.
+ * - ``V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS``
+ - 0x00000080
+ - If set, then the ``max_num_buffers`` field in ``struct v4l2_create_buffers``
+ is valid. If not set, then the maximum is ``VIDEO_MAX_FRAME`` buffers.
+ * - ``V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS``
+ - 0x00000100
+ - If set, then ``VIDIOC_REMOVE_BUFS`` is supported.
+
+.. _memory-flags:
+.. _V4L2-MEMORY-FLAG-NON-COHERENT:
+
+.. flat-table:: Memory Consistency Flags
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 3 1 4
+
+ * - ``V4L2_MEMORY_FLAG_NON_COHERENT``
+ - 0x00000001
+ - A buffer is allocated either in coherent (it will be automatically
+ coherent between the CPU and the bus) or non-coherent memory. The
+ latter can provide performance gains, for instance the CPU cache
+ sync/flush operations can be avoided if the buffer is accessed by the
+ corresponding device only and the CPU does not read/write to/from that
+ buffer. However, this requires extra care from the driver -- it must
+ guarantee memory consistency by issuing a cache flush/sync when
+ consistency is needed. If this flag is set V4L2 will attempt to
+ allocate the buffer in non-coherent memory. The flag takes effect
+ only if the buffer is used for :ref:`memory mapping <mmap>` I/O and the
+ queue reports the :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS
+ <V4L2-BUF-CAP-SUPPORTS-MMAP-CACHE-HINTS>` capability.
.. raw:: latex
diff --git a/Documentation/userspace-api/media/videodev2.h.rst.exceptions b/Documentation/userspace-api/media/videodev2.h.rst.exceptions
index bdc628e8c1d6..d67fd4038d22 100644
--- a/Documentation/userspace-api/media/videodev2.h.rst.exceptions
+++ b/Documentation/userspace-api/media/videodev2.h.rst.exceptions
@@ -197,6 +197,7 @@ replace define V4L2_CAP_META_OUTPUT device-capabilities
replace define V4L2_CAP_DEVICE_CAPS device-capabilities
replace define V4L2_CAP_TOUCH device-capabilities
replace define V4L2_CAP_IO_MC device-capabilities
+replace define V4L2_CAP_EDID device-capabilities
# V4L2 pix flags
replace define V4L2_PIX_FMT_PRIV_MAGIC :c:type:`v4l2_pix_format`
diff --git a/Documentation/virt/hyperv/coco.rst b/Documentation/virt/hyperv/coco.rst
new file mode 100644
index 000000000000..c15d6fe34b4e
--- /dev/null
+++ b/Documentation/virt/hyperv/coco.rst
@@ -0,0 +1,260 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Confidential Computing VMs
+==========================
+Hyper-V can create and run Linux guests that are Confidential Computing
+(CoCo) VMs. Such VMs cooperate with the physical processor to better protect
+the confidentiality and integrity of data in the VM's memory, even in the
+face of a hypervisor/VMM that has been compromised and may behave maliciously.
+CoCo VMs on Hyper-V share the generic CoCo VM threat model and security
+objectives described in Documentation/security/snp-tdx-threat-model.rst. Note
+that Hyper-V specific code in Linux refers to CoCo VMs as "isolated VMs" or
+"isolation VMs".
+
+A Linux CoCo VM on Hyper-V requires the cooperation and interaction of the
+following:
+
+* Physical hardware with a processor that supports CoCo VMs
+
+* The hardware runs a version of Windows/Hyper-V with support for CoCo VMs
+
+* The VM runs a version of Linux that supports being a CoCo VM
+
+The physical hardware requirements are as follows:
+
+* AMD processor with SEV-SNP. Hyper-V does not run guest VMs with AMD SME,
+ SEV, or SEV-ES encryption, and such encryption is not sufficient for a CoCo
+ VM on Hyper-V.
+
+* Intel processor with TDX
+
+To create a CoCo VM, the "Isolated VM" attribute must be specified to Hyper-V
+when the VM is created. A VM cannot be changed from a CoCo VM to a normal VM,
+or vice versa, after it is created.
+
+Operational Modes
+-----------------
+Hyper-V CoCo VMs can run in two modes. The mode is selected when the VM is
+created and cannot be changed during the life of the VM.
+
+* Fully-enlightened mode. In this mode, the guest operating system is
+ enlightened to understand and manage all aspects of running as a CoCo VM.
+
+* Paravisor mode. In this mode, a paravisor layer between the guest and the
+ host provides some operations needed to run as a CoCo VM. The guest operating
+ system can have fewer CoCo enlightenments than is required in the
+ fully-enlightened case.
+
+Conceptually, fully-enlightened mode and paravisor mode may be treated as
+points on a spectrum spanning the degree of guest enlightenment needed to run
+as a CoCo VM. Fully-enlightened mode is one end of the spectrum. A full
+implementation of paravisor mode is the other end of the spectrum, where all
+aspects of running as a CoCo VM are handled by the paravisor, and a normal
+guest OS with no knowledge of memory encryption or other aspects of CoCo VMs
+can run successfully. However, the Hyper-V implementation of paravisor mode
+does not go this far, and is somewhere in the middle of the spectrum. Some
+aspects of CoCo VMs are handled by the Hyper-V paravisor while the guest OS
+must be enlightened for other aspects. Unfortunately, there is no
+standardized enumeration of feature/functions that might be provided in the
+paravisor, and there is no standardized mechanism for a guest OS to query the
+paravisor for the feature/functions it provides. The understanding of what
+the paravisor provides is hard-coded in the guest OS.
+
+Paravisor mode has similarities to the `Coconut project`_, which aims to provide
+a limited paravisor to provide services to the guest such as a virtual TPM.
+However, the Hyper-V paravisor generally handles more aspects of CoCo VMs
+than is currently envisioned for Coconut, and so is further toward the "no
+guest enlightenments required" end of the spectrum.
+
+.. _Coconut project: https://github.com/coconut-svsm/svsm
+
+In the CoCo VM threat model, the paravisor is in the guest security domain
+and must be trusted by the guest OS. By implication, the hypervisor/VMM must
+protect itself against a potentially malicious paravisor just like it
+protects against a potentially malicious guest.
+
+The hardware architectural approach to fully-enlightened vs. paravisor mode
+varies depending on the underlying processor.
+
+* With AMD SEV-SNP processors, in fully-enlightened mode the guest OS runs in
+ VMPL 0 and has full control of the guest context. In paravisor mode, the
+ guest OS runs in VMPL 2 and the paravisor runs in VMPL 0. The paravisor
+ running in VMPL 0 has privileges that the guest OS in VMPL 2 does not have.
+ Certain operations require the guest to invoke the paravisor. Furthermore, in
+ paravisor mode the guest OS operates in "virtual Top Of Memory" (vTOM) mode
+ as defined by the SEV-SNP architecture. This mode simplifies guest management
+ of memory encryption when a paravisor is used.
+
+* With Intel TDX processor, in fully-enlightened mode the guest OS runs in an
+ L1 VM. In paravisor mode, TD partitioning is used. The paravisor runs in the
+ L1 VM, and the guest OS runs in a nested L2 VM.
+
+Hyper-V exposes a synthetic MSR to guests that describes the CoCo mode. This
+MSR indicates if the underlying processor uses AMD SEV-SNP or Intel TDX, and
+whether a paravisor is being used. It is straightforward to build a single
+kernel image that can boot and run properly on either architecture, and in
+either mode.
+
+Paravisor Effects
+-----------------
+Running in paravisor mode affects the following areas of generic Linux kernel
+CoCo VM functionality:
+
+* Initial guest memory setup. When a new VM is created in paravisor mode, the
+ paravisor runs first and sets up the guest physical memory as encrypted. The
+ guest Linux does normal memory initialization, except for explicitly marking
+ appropriate ranges as decrypted (shared). In paravisor mode, Linux does not
+ perform the early boot memory setup steps that are particularly tricky with
+ AMD SEV-SNP in fully-enlightened mode.
+
+* #VC/#VE exception handling. In paravisor mode, Hyper-V configures the guest
+ CoCo VM to route #VC and #VE exceptions to VMPL 0 and the L1 VM,
+ respectively, and not the guest Linux. Consequently, these exception handlers
+ do not run in the guest Linux and are not a required enlightenment for a
+ Linux guest in paravisor mode.
+
+* CPUID flags. Both AMD SEV-SNP and Intel TDX provide a CPUID flag in the
+ guest indicating that the VM is operating with the respective hardware
+ support. While these CPUID flags are visible in fully-enlightened CoCo VMs,
+ the paravisor filters out these flags and the guest Linux does not see them.
+ Throughout the Linux kernel, explicitly testing these flags has mostly been
+ eliminated in favor of the cc_platform_has() function, with the goal of
+ abstracting the differences between SEV-SNP and TDX. But the
+ cc_platform_has() abstraction also allows the Hyper-V paravisor configuration
+ to selectively enable aspects of CoCo VM functionality even when the CPUID
+ flags are not set. The exception is early boot memory setup on SEV-SNP, which
+ tests the CPUID SEV-SNP flag. But not having the flag in Hyper-V paravisor
+ mode VM achieves the desired effect or not running SEV-SNP specific early
+ boot memory setup.
+
+* Device emulation. In paravisor mode, the Hyper-V paravisor provides
+ emulation of devices such as the IO-APIC and TPM. Because the emulation
+ happens in the paravisor in the guest context (instead of the hypervisor/VMM
+ context), MMIO accesses to these devices must be encrypted references instead
+ of the decrypted references that would be used in a fully-enlightened CoCo
+ VM. The __ioremap_caller() function has been enhanced to make a callback to
+ check whether a particular address range should be treated as encrypted
+ (private). See the "is_private_mmio" callback.
+
+* Encrypt/decrypt memory transitions. In a CoCo VM, transitioning guest
+ memory between encrypted and decrypted requires coordinating with the
+ hypervisor/VMM. This is done via callbacks invoked from
+ __set_memory_enc_pgtable(). In fully-enlightened mode, the normal SEV-SNP and
+ TDX implementations of these callbacks are used. In paravisor mode, a Hyper-V
+ specific set of callbacks is used. These callbacks invoke the paravisor so
+ that the paravisor can coordinate the transitions and inform the hypervisor
+ as necessary. See hv_vtom_init() where these callback are set up.
+
+* Interrupt injection. In fully enlightened mode, a malicious hypervisor
+ could inject interrupts into the guest OS at times that violate x86/x64
+ architectural rules. For full protection, the guest OS should include
+ enlightenments that use the interrupt injection management features provided
+ by CoCo-capable processors. In paravisor mode, the paravisor mediates
+ interrupt injection into the guest OS, and ensures that the guest OS only
+ sees interrupts that are "legal". The paravisor uses the interrupt injection
+ management features provided by the CoCo-capable physical processor, thereby
+ masking these complexities from the guest OS.
+
+Hyper-V Hypercalls
+------------------
+When in fully-enlightened mode, hypercalls made by the Linux guest are routed
+directly to the hypervisor, just as in a non-CoCo VM. But in paravisor mode,
+normal hypercalls trap to the paravisor first, which may in turn invoke the
+hypervisor. But the paravisor is idiosyncratic in this regard, and a few
+hypercalls made by the Linux guest must always be routed directly to the
+hypervisor. These hypercall sites test for a paravisor being present, and use
+a special invocation sequence. See hv_post_message(), for example.
+
+Guest communication with Hyper-V
+--------------------------------
+Separate from the generic Linux kernel handling of memory encryption in Linux
+CoCo VMs, Hyper-V has VMBus and VMBus devices that communicate using memory
+shared between the Linux guest and the host. This shared memory must be
+marked decrypted to enable communication. Furthermore, since the threat model
+includes a compromised and potentially malicious host, the guest must guard
+against leaking any unintended data to the host through this shared memory.
+
+These Hyper-V and VMBus memory pages are marked as decrypted:
+
+* VMBus monitor pages
+
+* Synthetic interrupt controller (synic) related pages (unless supplied by
+ the paravisor)
+
+* Per-cpu hypercall input and output pages (unless running with a paravisor)
+
+* VMBus ring buffers. The direct mapping is marked decrypted in
+ __vmbus_establish_gpadl(). The secondary mapping created in
+ hv_ringbuffer_init() must also include the "decrypted" attribute.
+
+When the guest writes data to memory that is shared with the host, it must
+ensure that only the intended data is written. Padding or unused fields must
+be initialized to zeros before copying into the shared memory so that random
+kernel data is not inadvertently given to the host.
+
+Similarly, when the guest reads memory that is shared with the host, it must
+validate the data before acting on it so that a malicious host cannot induce
+the guest to expose unintended data. Doing such validation can be tricky
+because the host can modify the shared memory areas even while or after
+validation is performed. For messages passed from the host to the guest in a
+VMBus ring buffer, the length of the message is validated, and the message is
+copied into a temporary (encrypted) buffer for further validation and
+processing. The copying adds a small amount of overhead, but is the only way
+to protect against a malicious host. See hv_pkt_iter_first().
+
+Many drivers for VMBus devices have been "hardened" by adding code to fully
+validate messages received over VMBus, instead of assuming that Hyper-V is
+acting cooperatively. Such drivers are marked as "allowed_in_isolated" in the
+vmbus_devs[] table. Other drivers for VMBus devices that are not needed in a
+CoCo VM have not been hardened, and they are not allowed to load in a CoCo
+VM. See vmbus_is_valid_offer() where such devices are excluded.
+
+Two VMBus devices depend on the Hyper-V host to do DMA data transfers:
+storvsc for disk I/O and netvsc for network I/O. storvsc uses the normal
+Linux kernel DMA APIs, and so bounce buffering through decrypted swiotlb
+memory is done implicitly. netvsc has two modes for data transfers. The first
+mode goes through send and receive buffer space that is explicitly allocated
+by the netvsc driver, and is used for most smaller packets. These send and
+receive buffers are marked decrypted by __vmbus_establish_gpadl(). Because
+the netvsc driver explicitly copies packets to/from these buffers, the
+equivalent of bounce buffering between encrypted and decrypted memory is
+already part of the data path. The second mode uses the normal Linux kernel
+DMA APIs, and is bounce buffered through swiotlb memory implicitly like in
+storvsc.
+
+Finally, the VMBus virtual PCI driver needs special handling in a CoCo VM.
+Linux PCI device drivers access PCI config space using standard APIs provided
+by the Linux PCI subsystem. On Hyper-V, these functions directly access MMIO
+space, and the access traps to Hyper-V for emulation. But in CoCo VMs, memory
+encryption prevents Hyper-V from reading the guest instruction stream to
+emulate the access. So in a CoCo VM, these functions must make a hypercall
+with arguments explicitly describing the access. See
+_hv_pcifront_read_config() and _hv_pcifront_write_config() and the
+"use_calls" flag indicating to use hypercalls.
+
+load_unaligned_zeropad()
+------------------------
+When transitioning memory between encrypted and decrypted, the caller of
+set_memory_encrypted() or set_memory_decrypted() is responsible for ensuring
+the memory isn't in use and isn't referenced while the transition is in
+progress. The transition has multiple steps, and includes interaction with
+the Hyper-V host. The memory is in an inconsistent state until all steps are
+complete. A reference while the state is inconsistent could result in an
+exception that can't be cleanly fixed up.
+
+However, the kernel load_unaligned_zeropad() mechanism may make stray
+references that can't be prevented by the caller of set_memory_encrypted() or
+set_memory_decrypted(), so there's specific code in the #VC or #VE exception
+handler to fixup this case. But a CoCo VM running on Hyper-V may be
+configured to run with a paravisor, with the #VC or #VE exception routed to
+the paravisor. There's no architectural way to forward the exceptions back to
+the guest kernel, and in such a case, the load_unaligned_zeropad() fixup code
+in the #VC/#VE handlers doesn't run.
+
+To avoid this problem, the Hyper-V specific functions for notifying the
+hypervisor of the transition mark pages as "not present" while a transition
+is in progress. If load_unaligned_zeropad() causes a stray reference, a
+normal page fault is generated instead of #VC or #VE, and the page-fault-
+based handlers for load_unaligned_zeropad() fixup the reference. When the
+encrypted/decrypted transition is complete, the pages are marked as "present"
+again. See hv_vtom_clear_present() and hv_vtom_set_host_visibility().
diff --git a/Documentation/virt/hyperv/index.rst b/Documentation/virt/hyperv/index.rst
index de447e11b4a5..79bc4080329e 100644
--- a/Documentation/virt/hyperv/index.rst
+++ b/Documentation/virt/hyperv/index.rst
@@ -11,3 +11,4 @@ Hyper-V Enlightenments
vmbus
clocks
vpci
+ coco
diff --git a/Documentation/virt/kvm/arm/hypercalls.rst b/Documentation/virt/kvm/arm/hypercalls.rst
index 17be111f493f..af7bc2c2e0cb 100644
--- a/Documentation/virt/kvm/arm/hypercalls.rst
+++ b/Documentation/virt/kvm/arm/hypercalls.rst
@@ -44,3 +44,101 @@ Provides a discovery mechanism for other KVM/arm64 hypercalls.
----------------------------------------
See ptp_kvm.rst
+
+``ARM_SMCCC_KVM_FUNC_HYP_MEMINFO``
+----------------------------------
+
+Query the memory protection parameters for a pKVM protected virtual machine.
+
++---------------------+-------------------------------------------------------------+
+| Presence: | Optional; pKVM protected guests only. |
++---------------------+-------------------------------------------------------------+
+| Calling convention: | HVC64 |
++---------------------+----------+--------------------------------------------------+
+| Function ID: | (uint32) | 0xC6000002 |
++---------------------+----------+----+---------------------------------------------+
+| Arguments: | (uint64) | R1 | Reserved / Must be zero |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R2 | Reserved / Must be zero |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R3 | Reserved / Must be zero |
++---------------------+----------+----+---------------------------------------------+
+| Return Values: | (int64) | R0 | ``INVALID_PARAMETER (-3)`` on error, else |
+| | | | memory protection granule in bytes |
++---------------------+----------+----+---------------------------------------------+
+
+``ARM_SMCCC_KVM_FUNC_MEM_SHARE``
+--------------------------------
+
+Share a region of memory with the KVM host, granting it read, write and execute
+permissions. The size of the region is equal to the memory protection granule
+advertised by ``ARM_SMCCC_KVM_FUNC_HYP_MEMINFO``.
+
++---------------------+-------------------------------------------------------------+
+| Presence: | Optional; pKVM protected guests only. |
++---------------------+-------------------------------------------------------------+
+| Calling convention: | HVC64 |
++---------------------+----------+--------------------------------------------------+
+| Function ID: | (uint32) | 0xC6000003 |
++---------------------+----------+----+---------------------------------------------+
+| Arguments: | (uint64) | R1 | Base IPA of memory region to share |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R2 | Reserved / Must be zero |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R3 | Reserved / Must be zero |
++---------------------+----------+----+---------------------------------------------+
+| Return Values: | (int64) | R0 | ``SUCCESS (0)`` |
+| | | +---------------------------------------------+
+| | | | ``INVALID_PARAMETER (-3)`` |
++---------------------+----------+----+---------------------------------------------+
+
+``ARM_SMCCC_KVM_FUNC_MEM_UNSHARE``
+----------------------------------
+
+Revoke access permission from the KVM host to a memory region previously shared
+with ``ARM_SMCCC_KVM_FUNC_MEM_SHARE``. The size of the region is equal to the
+memory protection granule advertised by ``ARM_SMCCC_KVM_FUNC_HYP_MEMINFO``.
+
++---------------------+-------------------------------------------------------------+
+| Presence: | Optional; pKVM protected guests only. |
++---------------------+-------------------------------------------------------------+
+| Calling convention: | HVC64 |
++---------------------+----------+--------------------------------------------------+
+| Function ID: | (uint32) | 0xC6000004 |
++---------------------+----------+----+---------------------------------------------+
+| Arguments: | (uint64) | R1 | Base IPA of memory region to unshare |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R2 | Reserved / Must be zero |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R3 | Reserved / Must be zero |
++---------------------+----------+----+---------------------------------------------+
+| Return Values: | (int64) | R0 | ``SUCCESS (0)`` |
+| | | +---------------------------------------------+
+| | | | ``INVALID_PARAMETER (-3)`` |
++---------------------+----------+----+---------------------------------------------+
+
+``ARM_SMCCC_KVM_FUNC_MMIO_GUARD``
+----------------------------------
+
+Request that a given memory region is handled as MMIO by the hypervisor,
+allowing accesses to this region to be emulated by the KVM host. The size of the
+region is equal to the memory protection granule advertised by
+``ARM_SMCCC_KVM_FUNC_HYP_MEMINFO``.
+
++---------------------+-------------------------------------------------------------+
+| Presence: | Optional; pKVM protected guests only. |
++---------------------+-------------------------------------------------------------+
+| Calling convention: | HVC64 |
++---------------------+----------+--------------------------------------------------+
+| Function ID: | (uint32) | 0xC6000007 |
++---------------------+----------+----+---------------------------------------------+
+| Arguments: | (uint64) | R1 | Base IPA of MMIO memory region |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R2 | Reserved / Must be zero |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R3 | Reserved / Must be zero |
++---------------------+----------+----+---------------------------------------------+
+| Return Values: | (int64) | R0 | ``SUCCESS (0)`` |
+| | | +---------------------------------------------+
+| | | | ``INVALID_PARAMETER (-3)`` |
++---------------------+----------+----+---------------------------------------------+
diff --git a/Documentation/virt/kvm/index.rst b/Documentation/virt/kvm/index.rst
index ad13ec55ddfe..9ca5a45c2140 100644
--- a/Documentation/virt/kvm/index.rst
+++ b/Documentation/virt/kvm/index.rst
@@ -14,6 +14,7 @@ KVM
s390/index
ppc-pv
x86/index
+ loongarch/index
locking
vcpu-requests
diff --git a/Documentation/virt/kvm/loongarch/hypercalls.rst b/Documentation/virt/kvm/loongarch/hypercalls.rst
new file mode 100644
index 000000000000..2d6b94031f1b
--- /dev/null
+++ b/Documentation/virt/kvm/loongarch/hypercalls.rst
@@ -0,0 +1,89 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================================
+The LoongArch paravirtual interface
+===================================
+
+KVM hypercalls use the HVCL instruction with code 0x100 and the hypercall
+number is put in a0. Up to five arguments may be placed in registers a1 - a5.
+The return value is placed in v0 (an alias of a0).
+
+Source code for this interface can be found in arch/loongarch/kvm*.
+
+Querying for existence
+======================
+
+To determine if the host is running on KVM, we can utilize the cpucfg()
+function at index CPUCFG_KVM_BASE (0x40000000).
+
+The CPUCFG_KVM_BASE range, spanning from 0x40000000 to 0x400000FF, The
+CPUCFG_KVM_BASE range between 0x40000000 - 0x400000FF is marked as reserved.
+Consequently, all current and future processors will not implement any
+feature within this range.
+
+On a KVM-virtualized Linux system, a read operation on cpucfg() at index
+CPUCFG_KVM_BASE (0x40000000) returns the magic string 'KVM\0'.
+
+Once you have determined that your host is running on a paravirtualization-
+capable KVM, you may now use hypercalls as described below.
+
+KVM hypercall ABI
+=================
+
+The KVM hypercall ABI is simple, with one scratch register a0 (v0) and at most
+five generic registers (a1 - a5) used as input parameters. The FP (Floating-
+point) and vector registers are not utilized as input registers and must
+remain unmodified during a hypercall.
+
+Hypercall functions can be inlined as it only uses one scratch register.
+
+The parameters are as follows:
+
+ ======== ================= ================
+ Register IN OUT
+ ======== ================= ================
+ a0 function number Return code
+ a1 1st parameter -
+ a2 2nd parameter -
+ a3 3rd parameter -
+ a4 4th parameter -
+ a5 5th parameter -
+ ======== ================= ================
+
+The return codes may be one of the following:
+
+ ==== =========================
+ Code Meaning
+ ==== =========================
+ 0 Success
+ -1 Hypercall not implemented
+ -2 Bad Hypercall parameter
+ ==== =========================
+
+KVM Hypercalls Documentation
+============================
+
+The template for each hypercall is as follows:
+
+1. Hypercall name
+2. Purpose
+
+1. KVM_HCALL_FUNC_IPI
+------------------------
+
+:Purpose: Send IPIs to multiple vCPUs.
+
+- a0: KVM_HCALL_FUNC_IPI
+- a1: Lower part of the bitmap for destination physical CPUIDs
+- a2: Higher part of the bitmap for destination physical CPUIDs
+- a3: The lowest physical CPUID in the bitmap
+
+The hypercall lets a guest send multiple IPIs (Inter-Process Interrupts) with
+at most 128 destinations per hypercall. The destinations are represented in a
+bitmap contained in the first two input registers (a1 and a2).
+
+Bit 0 of a1 corresponds to the physical CPUID in the third input register (a3)
+and bit 1 corresponds to the physical CPUID in a3+1, and so on.
+
+PV IPI on LoongArch includes both PV IPI multicast sending and PV IPI receiving,
+and SWI is used for PV IPI inject since there is no VM-exits accessing SWI registers.
diff --git a/Documentation/virt/kvm/loongarch/index.rst b/Documentation/virt/kvm/loongarch/index.rst
new file mode 100644
index 000000000000..83387b4c5345
--- /dev/null
+++ b/Documentation/virt/kvm/loongarch/index.rst
@@ -0,0 +1,10 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================
+KVM for LoongArch systems
+=========================
+
+.. toctree::
+ :maxdepth: 2
+
+ hypercalls.rst
diff --git a/Documentation/watchdog/watchdog-api.rst b/Documentation/watchdog/watchdog-api.rst
index 800dcd7586f2..78e228c272cf 100644
--- a/Documentation/watchdog/watchdog-api.rst
+++ b/Documentation/watchdog/watchdog-api.rst
@@ -249,7 +249,7 @@ Note that not all devices support these two calls, and some only
support the GETBOOTSTATUS call.
Some drivers can measure the temperature using the GETTEMP ioctl. The
-returned value is the temperature in degrees fahrenheit::
+returned value is the temperature in degrees Fahrenheit::
int temperature;
ioctl(fd, WDIOC_GETTEMP, &temperature);
diff --git a/LICENSES/deprecated/0BSD b/LICENSES/deprecated/0BSD
new file mode 100644
index 000000000000..e4b95b749966
--- /dev/null
+++ b/LICENSES/deprecated/0BSD
@@ -0,0 +1,23 @@
+Valid-License-Identifier: 0BSD
+SPDX-URL: https://spdx.org/licenses/0BSD.html
+Usage-Guide:
+ To use the BSD Zero Clause License put the following SPDX tag/value
+ pair into a comment according to the placement guidelines in the
+ licensing rules documentation:
+ SPDX-License-Identifier: 0BSD
+License-Text:
+
+BSD Zero Clause License
+
+Copyright (c) <year> <copyright holders>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/MAINTAINERS b/MAINTAINERS
index 10430778c998..9b899a1327a0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -334,6 +334,7 @@ L: linux-acpi@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/acpi/arm64
+F: include/linux/acpi_iort.h
ACPI FOR RISC-V (ACPI/riscv)
M: Sunil V L <sunilvl@ventanamicro.com>
@@ -537,6 +538,17 @@ F: drivers/leds/leds-adp5520.c
F: drivers/mfd/adp5520.c
F: drivers/video/backlight/adp5520_bl.c
+ADP5585 GPIO EXPANDER, PWM AND KEYPAD CONTROLLER DRIVER
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L: linux-gpio@vger.kernel.org
+L: linux-pwm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/*/adi,adp5585*.yaml
+F: drivers/gpio/gpio-adp5585.c
+F: drivers/mfd/adp5585.c
+F: drivers/pwm/pwm-adp5585.c
+F: include/linux/mfd/adp5585.h
+
ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587)
M: Michael Hennerich <michael.hennerich@analog.com>
S: Supported
@@ -1013,6 +1025,13 @@ S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/display/
+AMD DISPLAY CORE - DML
+M: Chaitanya Dhere <chaitanya.dhere@amd.com>
+M: Jun Lei <jun.lei@amd.com>
+S: Supported
+F: drivers/gpu/drm/amd/display/dc/dml/
+F: drivers/gpu/drm/amd/display/dc/dml2/
+
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
M: Huang Rui <ray.huang@amd.com>
L: linux-hwmon@vger.kernel.org
@@ -1128,6 +1147,14 @@ L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/ptdma/
+AMD QDMA DRIVER
+M: Nishad Saraf <nishads@amd.com>
+M: Lizhi Hou <lizhi.hou@amd.com>
+L: dmaengine@vger.kernel.org
+S: Supported
+F: drivers/dma/amd/qdma/
+F: include/linux/platform_data/amd_qdma.h
+
AMD SEATTLE DEVICE TREE SUPPORT
M: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
M: Tom Lendacky <thomas.lendacky@amd.com>
@@ -1153,6 +1180,13 @@ S: Supported
F: arch/arm64/boot/dts/amd/amd-seattle-xgbe*.dtsi
F: drivers/net/ethernet/amd/xgbe/
+AMLOGIC BLUETOOTH DRIVER
+M: Yang Li <yang.li@amlogic.com>
+L: linux-bluetooth@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/bluetooth/amlogic,w155s2-bt.yaml
+F: drivers/bluetooth/hci_aml.c
+
AMLOGIC DDR PMU DRIVER
M: Jiucheng Xu <jiucheng.xu@amlogic.com>
L: linux-amlogic@lists.infradead.org
@@ -1202,6 +1236,13 @@ W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
F: drivers/iio/dac/ad3552r.c
+ANALOG DEVICES INC AD4000 DRIVER
+M: Marcelo Schmitt <marcelo.schmitt@analog.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml
+
ANALOG DEVICES INC AD4130 DRIVER
M: Cosmin Tanislav <cosmin.tanislav@analog.com>
L: linux-iio@vger.kernel.org
@@ -1609,6 +1650,14 @@ F: Documentation/admin-guide/perf/xgene-pmu.rst
F: Documentation/devicetree/bindings/perf/apm-xgene-pmu.txt
F: drivers/perf/xgene_pmu.c
+APPLIED MICRO QT2025 PHY DRIVER
+M: FUJITA Tomonori <fujita.tomonori@gmail.com>
+R: Trevor Gross <tmgross@umich.edu>
+L: netdev@vger.kernel.org
+L: rust-for-linux@vger.kernel.org
+S: Maintained
+F: drivers/net/phy/qt2025.rs
+
APTINA CAMERA SENSOR PLL
M: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
@@ -1737,6 +1786,17 @@ F: drivers/mtd/maps/physmap-versatile.*
F: drivers/power/reset/arm-versatile-reboot.c
F: drivers/soc/versatile/
+ARM INTERCONNECT PMU DRIVERS
+M: Robin Murphy <robin.murphy@arm.com>
+S: Supported
+F: Documentation/admin-guide/perf/arm-cmn.rst
+F: Documentation/admin-guide/perf/arm-ni.rst
+F: Documentation/devicetree/bindings/perf/arm,cmn.yaml
+F: Documentation/devicetree/bindings/perf/arm,ni.yaml
+F: drivers/perf/arm-cmn.c
+F: drivers/perf/arm-ni.c
+F: tools/perf/pmu-events/arch/arm64/arm/cmn/
+
ARM KOMEDA DRM-KMS DRIVER
M: Liviu Dudau <liviu.dudau@arm.com>
S: Supported
@@ -1754,6 +1814,7 @@ L: dri-devel@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/panfrost.rst
+F: drivers/gpu/drm/ci/xfails/panfrost*
F: drivers/gpu/drm/panfrost/
F: include/uapi/drm/panfrost_drm.h
@@ -2432,18 +2493,28 @@ N: lpc18xx
ARM/LPC32XX SOC SUPPORT
M: Vladimir Zapolskiy <vz@mleia.com>
+M: Piotr Wojtaszczyk <piotr.wojtaszczyk@timesys.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://github.com/vzapolskiy/linux-lpc32xx.git
F: Documentation/devicetree/bindings/i2c/nxp,pnx-i2c.yaml
F: arch/arm/boot/dts/nxp/lpc/lpc32*
F: arch/arm/mach-lpc32xx/
+F: drivers/dma/lpc32xx-dmamux.c
F: drivers/i2c/busses/i2c-pnx.c
F: drivers/net/ethernet/nxp/lpc_eth.c
F: drivers/usb/host/ohci-nxp.c
F: drivers/watchdog/pnx4008_wdt.c
N: lpc32xx
+LPC32XX DMAMUX SUPPORT
+M: J.M.B. Downing <jonathan.downing@nautel.com>
+M: Piotr Wojtaszczyk <piotr.wojtaszczyk@timesys.com>
+R: Vladimir Zapolskiy <vz@mleia.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/dma/nxp,lpc3220-dmamux.yaml
+
ARM/Marvell Dove/MV78xx0/Orion SOC support
M: Andrew Lunn <andrew@lunn.ch>
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@@ -2731,7 +2802,7 @@ F: drivers/iommu/msm*
F: drivers/mfd/ssbi.c
F: drivers/mmc/host/mmci_qcom*
F: drivers/mmc/host/sdhci-msm.c
-F: drivers/pci/controller/dwc/pcie-qcom.c
+F: drivers/pci/controller/dwc/pcie-qcom*
F: drivers/phy/qualcomm/
F: drivers/power/*/msm*
F: drivers/reset/reset-qcom-*
@@ -3786,10 +3857,9 @@ F: Documentation/filesystems/befs.rst
F: fs/befs/
BFQ I/O SCHEDULER
-M: Paolo Valente <paolo.valente@unimore.it>
-M: Jens Axboe <axboe@kernel.dk>
+M: Yu Kuai <yukuai3@huawei.com>
L: linux-block@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: Documentation/block/bfq-iosched.rst
F: block/bfq-*
@@ -3936,7 +4006,7 @@ F: Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml
F: drivers/iio/imu/bmi323/
BPF JIT for ARC
-M: Shahab Vahedi <shahab@synopsys.com>
+M: Shahab Vahedi <list+bpf@vahedi.org>
L: bpf@vger.kernel.org
S: Maintained
F: arch/arc/net/
@@ -4103,6 +4173,7 @@ F: include/uapi/linux/btf*
F: include/uapi/linux/filter.h
F: kernel/bpf/
F: kernel/trace/bpf_trace.c
+F: lib/buildid.c
F: lib/test_bpf.c
F: net/bpf/
F: net/core/filter.c
@@ -4223,6 +4294,7 @@ L: bpf@vger.kernel.org
S: Maintained
F: kernel/bpf/stackmap.c
F: kernel/trace/bpf_trace.c
+F: lib/buildid.c
BROADCOM ASP 2.0 ETHERNET DRIVER
M: Justin Chen <justin.chen@broadcom.com>
@@ -5101,10 +5173,8 @@ F: Documentation/devicetree/bindings/media/cec/cec-gpio.yaml
F: drivers/media/cec/platform/cec-gpio/
CELL BROADBAND ENGINE ARCHITECTURE
-M: Arnd Bergmann <arnd@arndb.de>
L: linuxppc-dev@lists.ozlabs.org
-S: Supported
-W: http://www.ibm.com/developerworks/power/cell/
+S: Orphan
F: arch/powerpc/include/asm/cell*.h
F: arch/powerpc/include/asm/spu*.h
F: arch/powerpc/include/uapi/asm/spu*.h
@@ -5145,6 +5215,7 @@ S: Maintained
F: Documentation/admin-guide/module-signing.rst
F: certs/
F: scripts/sign-file.c
+F: scripts/ssl-common.h
F: tools/certs/
CFAG12864B LCD DRIVER
@@ -5703,9 +5774,12 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
F: Documentation/admin-guide/cgroup-v1/cpusets.rst
F: include/linux/cpuset.h
+F: kernel/cgroup/cpuset-internal.h
+F: kernel/cgroup/cpuset-v1.c
F: kernel/cgroup/cpuset.c
F: tools/testing/selftests/cgroup/test_cpuset.c
F: tools/testing/selftests/cgroup/test_cpuset_prs.sh
+F: tools/testing/selftests/cgroup/test_cpuset_v1_base.sh
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
@@ -5824,6 +5898,9 @@ CPU POWER MONITORING SUBSYSTEM
M: Thomas Renninger <trenn@suse.com>
M: Shuah Khan <shuah@kernel.org>
M: Shuah Khan <skhan@linuxfoundation.org>
+M: John B. Wyatt IV <jwyatt@redhat.com>
+M: John B. Wyatt IV <sageofredondo@gmail.com>
+M: John Kacur <jkacur@redhat.com>
L: linux-pm@vger.kernel.org
S: Maintained
F: tools/power/cpupower/
@@ -6480,6 +6557,12 @@ F: include/net/devlink.h
F: include/uapi/linux/devlink.h
F: net/devlink/
+DFROBOT SD2405AL RTC DRIVER
+M: Tóth János <gomba007@gmail.com>
+L: linux-rtc@vger.kernel.org
+S: Maintained
+F: drivers/rtc/rtc-sd2405al.c
+
DH ELECTRONICS IMX6 DHCOM/DHCOR BOARD SUPPORT
M: Christoph Niedermaier <cniedermaier@dh-electronics.com>
L: kernel@dh-electronics.com
@@ -6506,6 +6589,7 @@ F: Documentation/devicetree/bindings/regulator/da92*.txt
F: Documentation/devicetree/bindings/regulator/dlg,da9*.yaml
F: Documentation/devicetree/bindings/regulator/dlg,slg51000.yaml
F: Documentation/devicetree/bindings/sound/da[79]*.txt
+F: Documentation/devicetree/bindings/sound/dlg,da7213.yaml
F: Documentation/devicetree/bindings/thermal/dlg,da9062-thermal.yaml
F: Documentation/devicetree/bindings/watchdog/dlg,da9062-watchdog.yaml
F: Documentation/hwmon/da90??.rst
@@ -6666,6 +6750,7 @@ F: drivers/dma-buf/dma-heap.c
F: drivers/dma-buf/heaps/*
F: include/linux/dma-heap.h
F: include/uapi/linux/dma-heap.h
+F: tools/testing/selftests/dmabuf-heaps/
DMC FREQUENCY DRIVER FOR SAMSUNG EXYNOS5422
M: Lukasz Luba <lukasz.luba@arm.com>
@@ -6717,6 +6802,7 @@ DOCUMENTATION PROCESS
M: Jonathan Corbet <corbet@lwn.net>
L: workflows@vger.kernel.org
S: Maintained
+F: Documentation/dev-tools/
F: Documentation/maintainer/
F: Documentation/process/
@@ -6724,6 +6810,7 @@ DOCUMENTATION REPORTING ISSUES
M: Thorsten Leemhuis <linux@leemhuis.info>
L: linux-doc@vger.kernel.org
S: Maintained
+F: Documentation/admin-guide/bug-bisect.rst
F: Documentation/admin-guide/quickly-build-trimmed-linux.rst
F: Documentation/admin-guide/reporting-issues.rst
F: Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst
@@ -7338,10 +7425,10 @@ F: drivers/gpu/drm/udl/
DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
M: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
-M: Melissa Wen <melissa.srw@gmail.com>
M: Maíra Canal <mairacanal@riseup.net>
R: Haneen Mohammed <hamohammed.sa@gmail.com>
-R: Daniel Vetter <daniel@ffwll.ch>
+R: Simona Vetter <simona@ffwll.ch>
+R: Melissa Wen <melissa.srw@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
@@ -7374,7 +7461,7 @@ F: drivers/gpu/drm/panel/panel-widechips-ws2401.c
DRM DRIVERS
M: David Airlie <airlied@gmail.com>
-M: Daniel Vetter <daniel@ffwll.ch>
+M: Simona Vetter <simona@ffwll.ch>
L: dri-devel@lists.freedesktop.org
S: Maintained
B: https://gitlab.freedesktop.org/drm
@@ -7470,7 +7557,6 @@ M: Kyungmin Park <kyungmin.park@samsung.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
-F: Documentation/devicetree/bindings/display/exynos/
F: Documentation/devicetree/bindings/display/samsung/
F: drivers/gpu/drm/exynos/
F: include/uapi/drm/exynos_drm.h
@@ -8345,6 +8431,7 @@ F: include/linux/mii.h
F: include/linux/of_net.h
F: include/linux/phy.h
F: include/linux/phy_fixed.h
+F: include/linux/phy_link_topology.h
F: include/linux/phylib_stubs.h
F: include/linux/platform_data/mdio-bcm-unimac.h
F: include/linux/platform_data/mdio-gpio.h
@@ -8360,6 +8447,7 @@ L: netdev@vger.kernel.org
L: rust-for-linux@vger.kernel.org
S: Maintained
F: rust/kernel/net/phy.rs
+F: rust/kernel/net/phy/reg.rs
EXEC & BINFMT API, ELF
R: Eric Biederman <ebiederm@xmission.com>
@@ -8384,6 +8472,7 @@ N: binfmt
EXFAT FILE SYSTEM
M: Namjae Jeon <linkinjeon@kernel.org>
M: Sungjong Seo <sj1557.seo@samsung.com>
+R: Yuezhang Mo <yuezhang.mo@sony.com>
L: linux-fsdevel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linkinjeon/exfat.git
@@ -8467,6 +8556,13 @@ F: lib/bootconfig.c
F: tools/bootconfig/*
F: tools/bootconfig/scripts/*
+EXTRON DA HD 4K PLUS CEC DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media_tree.git
+F: drivers/media/cec/usb/extron-da-hd-4k-plus/
+
EXYNOS DP DRIVER
M: Jingoo Han <jingoohan1@gmail.com>
L: dri-devel@lists.freedesktop.org
@@ -8542,6 +8638,7 @@ M: Akinobu Mita <akinobu.mita@gmail.com>
S: Supported
F: Documentation/fault-injection/
F: lib/fault-inject.c
+F: tools/testing/fault-injection/
FBTFT Framebuffer drivers
L: dri-devel@lists.freedesktop.org
@@ -8603,6 +8700,7 @@ M: Christian Brauner <brauner@kernel.org>
R: Jan Kara <jack@suse.cz>
L: linux-fsdevel@vger.kernel.org
S: Maintained
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs.git
F: fs/*
F: include/linux/fs.h
F: include/linux/fs_types.h
@@ -8778,7 +8876,6 @@ F: include/linux/fortify-string.h
F: lib/fortify_kunit.c
F: lib/memcpy_kunit.c
F: lib/test_fortify/*
-F: scripts/test_fortify.sh
K: \b__NO_FORTIFY\b
FPGA DFL DRIVERS
@@ -8815,7 +8912,7 @@ W: https://floatingpoint.billm.au/
F: arch/x86/math-emu/
FRAMEBUFFER CORE
-M: Daniel Vetter <daniel@ffwll.ch>
+M: Simona Vetter <simona@ffwll.ch>
S: Odd Fixes
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/video/fbdev/core/
@@ -9012,6 +9109,7 @@ M: Herve Codina <herve.codina@bootlin.com>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,cpm1-scc-qmc.yaml
+F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-ucc-qmc.yaml
F: drivers/soc/fsl/qe/qmc.c
F: include/soc/fsl/qe/qmc.h
@@ -9027,9 +9125,11 @@ M: Herve Codina <herve.codina@bootlin.com>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,cpm1-tsa.yaml
+F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-tsa.yaml
F: drivers/soc/fsl/qe/tsa.c
F: drivers/soc/fsl/qe/tsa.h
F: include/dt-bindings/soc/cpm1-fsl,tsa.h
+F: include/dt-bindings/soc/qe-fsl,tsa.h
FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
L: netdev@vger.kernel.org
@@ -10979,6 +11079,7 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml
F: Documentation/devicetree/bindings/gpu/img,powervr-sgx.yaml
F: Documentation/gpu/imagination/
+F: drivers/gpu/drm/ci/xfails/powervr*
F: drivers/gpu/drm/imagination/
F: include/uapi/drm/pvr_drm.h
@@ -11104,10 +11205,17 @@ F: Documentation/devicetree/bindings/serio/
F: Documentation/input/
F: drivers/input/
F: include/dt-bindings/input/
+F: include/linux/gameport.h
+F: include/linux/i8042.h
F: include/linux/input.h
F: include/linux/input/
+F: include/linux/libps2.h
+F: include/linux/serio.h
+F: include/uapi/linux/gameport.h
F: include/uapi/linux/input-event-codes.h
F: include/uapi/linux/input.h
+F: include/uapi/linux/serio.h
+F: include/uapi/linux/uinput.h
INPUT MULTITOUCH (MT) PROTOCOL
M: Henrik Rydberg <rydberg@bitmath.org>
@@ -11134,6 +11242,16 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git
F: security/integrity/
F: security/integrity/ima/
+INTEGRITY POLICY ENFORCEMENT (IPE)
+M: Fan Wu <wufan@linux.microsoft.com>
+L: linux-security-module@vger.kernel.org
+S: Supported
+T: git https://github.com/microsoft/ipe.git
+F: Documentation/admin-guide/LSM/ipe.rst
+F: Documentation/security/ipe.rst
+F: scripts/ipe/
+F: security/ipe/
+
INTEL 810/815 FRAMEBUFFER DRIVER
M: Antonino Daplas <adaplas@gmail.com>
L: linux-fbdev@vger.kernel.org
@@ -11156,12 +11274,12 @@ F: drivers/gpio/gpio-i8255.h
INTEL ASoC DRIVERS
M: Cezary Rojewski <cezary.rojewski@intel.com>
-M: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
M: Liam Girdwood <liam.r.girdwood@linux.intel.com>
M: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
M: Bard Liao <yung-chuan.liao@linux.intel.com>
M: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
M: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+R: Pierre-Louis Bossart <pierre-louis.bossart@linux.dev>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/intel/
@@ -11477,6 +11595,24 @@ S: Maintained
F: Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
F: drivers/fpga/intel-m10-bmc-sec-update.c
+INTEL MID (Mobile Internet Device) PLATFORM
+M: Andy Shevchenko <andy@kernel.org>
+L: linux-kernel@vger.kernel.org
+S: Supported
+F: arch/x86/include/asm/intel-mid.h
+F: arch/x86/pci/intel_mid_pci.c
+F: arch/x86/platform/intel-mid/
+F: drivers/extcon/extcon-intel-mrfld.c
+F: drivers/iio/adc/intel_mrfld_adc.c
+F: drivers/mfd/intel_soc_pmic_mrfld.c
+F: drivers/platform/x86/intel/mrfld_pwrbtn.c
+F: drivers/platform/x86/intel_scu_*
+F: drivers/staging/media/atomisp/
+F: drivers/watchdog/intel-mid_wdt.c
+F: include/linux/mfd/intel_soc_pmic_mrfld.h
+F: include/linux/platform_data/x86/intel-mid_wdt.h
+F: include/linux/platform_data/x86/intel_scu_ipc.h
+
INTEL P-Unit IPC DRIVER
M: Zha Qipeng <qipeng.zha@intel.com>
L: platform-driver-x86@vger.kernel.org
@@ -11539,8 +11675,8 @@ F: drivers/counter/intel-qep.c
INTEL SCU DRIVERS
M: Mika Westerberg <mika.westerberg@linux.intel.com>
S: Maintained
-F: arch/x86/include/asm/intel_scu_ipc.h
F: drivers/platform/x86/intel_scu_*
+F: include/linux/platform_data/x86/intel_scu_ipc.h
INTEL SDSI DRIVER
M: David E. Box <david.e.box@linux.intel.com>
@@ -11615,7 +11751,8 @@ F: drivers/platform/x86/intel/uncore-frequency/
INTEL VENDOR SPECIFIC EXTENDED CAPABILITIES DRIVER
M: David E. Box <david.e.box@linux.intel.com>
S: Supported
-F: drivers/platform/x86/intel/vsec.*
+F: drivers/platform/x86/intel/vsec.c
+F: include/linux/intel_vsec.h
INTEL VIRTUAL BUTTON DRIVER
M: AceLan Kao <acelan.kao@canonical.com>
@@ -11738,6 +11875,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/dma-iommu.c
F: drivers/iommu/dma-iommu.h
F: drivers/iommu/iova.c
+F: include/linux/iommu-dma.h
F: include/linux/iova.h
IOMMU SUBSYSTEM
@@ -12308,6 +12446,7 @@ L: kvm@vger.kernel.org
L: loongarch@lists.linux.dev
S: Maintained
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
+F: Documentation/virt/kvm/loongarch/
F: arch/loongarch/include/asm/kvm*
F: arch/loongarch/include/uapi/asm/kvm*
F: arch/loongarch/kvm/
@@ -12926,6 +13065,7 @@ M: Michael Ellerman <mpe@ellerman.id.au>
R: Nicholas Piggin <npiggin@gmail.com>
R: Christophe Leroy <christophe.leroy@csgroup.eu>
R: Naveen N Rao <naveen@kernel.org>
+R: Madhavan Srinivasan <maddy@linux.ibm.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
W: https://github.com/linuxppc/wiki/wiki
@@ -13009,6 +13149,7 @@ R: Daniel Lustig <dlustig@nvidia.com>
R: Joel Fernandes <joel@joelfernandes.org>
L: linux-kernel@vger.kernel.org
L: linux-arch@vger.kernel.org
+L: lkmm@lists.linux.dev
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
F: Documentation/atomic_bitops.txt
@@ -13517,7 +13658,7 @@ S: Maintained
F: Documentation/devicetree/bindings/mfd/marvell,88pm886-a1.yaml
F: drivers/input/misc/88pm886-onkey.c
F: drivers/mfd/88pm886.c
-F: drivers/regulators/88pm886-regulator.c
+F: drivers/regulator/88pm886-regulator.c
F: include/linux/mfd/88pm886.h
MARVELL ARMADA 3700 PHY DRIVERS
@@ -13576,7 +13717,6 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/marvell/mv643xx_eth.*
-F: include/linux/mv643xx.h
MARVELL MV88X3310 PHY DRIVER
M: Russell King <linux@armlinux.org.uk>
@@ -14228,8 +14368,8 @@ M: Sean Wang <sean.wang@mediatek.com>
L: linux-bluetooth@vger.kernel.org
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/net/bluetooth/mediatek,bluetooth.txt
F: Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7921s-bluetooth.yaml
-F: Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
F: drivers/bluetooth/btmtkuart.c
MEDIATEK BOARD LEVEL SHUTDOWN DRIVERS
@@ -14507,7 +14647,7 @@ MELLANOX ETHERNET DRIVER (mlx4_en)
M: Tariq Toukan <tariqt@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
-W: http://www.mellanox.com
+W: https://www.nvidia.com/networking/
Q: https://patchwork.kernel.org/project/netdevbpf/list/
F: drivers/net/ethernet/mellanox/mlx4/en_*
@@ -14516,7 +14656,7 @@ M: Saeed Mahameed <saeedm@nvidia.com>
M: Tariq Toukan <tariqt@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
-W: http://www.mellanox.com
+W: https://www.nvidia.com/networking/
Q: https://patchwork.kernel.org/project/netdevbpf/list/
F: drivers/net/ethernet/mellanox/mlx5/core/en_*
@@ -14524,7 +14664,7 @@ MELLANOX ETHERNET INNOVA DRIVERS
R: Boris Pismenny <borisp@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
-W: http://www.mellanox.com
+W: https://www.nvidia.com/networking/
Q: https://patchwork.kernel.org/project/netdevbpf/list/
F: drivers/net/ethernet/mellanox/mlx5/core/en_accel/*
F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
@@ -14535,7 +14675,7 @@ M: Ido Schimmel <idosch@nvidia.com>
M: Petr Machata <petrm@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
-W: http://www.mellanox.com
+W: https://www.nvidia.com/networking/
Q: https://patchwork.kernel.org/project/netdevbpf/list/
F: drivers/net/ethernet/mellanox/mlxsw/
F: tools/testing/selftests/drivers/net/mlxsw/
@@ -14544,7 +14684,7 @@ MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
M: mlxsw@nvidia.com
L: netdev@vger.kernel.org
S: Supported
-W: http://www.mellanox.com
+W: https://www.nvidia.com/networking/
Q: https://patchwork.kernel.org/project/netdevbpf/list/
F: drivers/net/ethernet/mellanox/mlxfw/
@@ -14563,7 +14703,7 @@ M: Tariq Toukan <tariqt@nvidia.com>
L: netdev@vger.kernel.org
L: linux-rdma@vger.kernel.org
S: Supported
-W: http://www.mellanox.com
+W: https://www.nvidia.com/networking/
Q: https://patchwork.kernel.org/project/netdevbpf/list/
F: drivers/net/ethernet/mellanox/mlx4/
F: include/linux/mlx4/
@@ -14572,7 +14712,7 @@ MELLANOX MLX4 IB driver
M: Yishai Hadas <yishaih@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
-W: http://www.mellanox.com
+W: https://www.nvidia.com/networking/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
F: drivers/infiniband/hw/mlx4/
F: include/linux/mlx4/
@@ -14585,7 +14725,7 @@ M: Tariq Toukan <tariqt@nvidia.com>
L: netdev@vger.kernel.org
L: linux-rdma@vger.kernel.org
S: Supported
-W: http://www.mellanox.com
+W: https://www.nvidia.com/networking/
Q: https://patchwork.kernel.org/project/netdevbpf/list/
F: Documentation/networking/device_drivers/ethernet/mellanox/
F: drivers/net/ethernet/mellanox/mlx5/core/
@@ -14595,7 +14735,7 @@ MELLANOX MLX5 IB driver
M: Leon Romanovsky <leonro@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
-W: http://www.mellanox.com
+W: https://www.nvidia.com/networking/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
F: drivers/infiniband/hw/mlx5/
F: include/linux/mlx5/
@@ -14823,6 +14963,7 @@ M: Alexander Duyck <alexanderduyck@fb.com>
M: Jakub Kicinski <kuba@kernel.org>
R: kernel-team@meta.com
S: Supported
+F: Documentation/networking/device_drivers/ethernet/meta/
F: drivers/net/ethernet/meta/
METHODE UDPU SUPPORT
@@ -14969,6 +15110,13 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/microchip/lan743x_*
+MICROCHIP LAN8650/1 10BASE-T1S MACPHY ETHERNET DRIVER
+M: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/microchip,lan8650.yaml
+F: drivers/net/ethernet/microchip/lan865x/lan865x.c
+
MICROCHIP LAN87xx/LAN937x T1 PHY DRIVER
M: Arun Ramadoss <arun.ramadoss@microchip.com>
R: UNGLinuxDriver@microchip.com
@@ -15218,6 +15366,12 @@ S: Maintained
F: Documentation/hwmon/surface_fan.rst
F: drivers/hwmon/surface_fan.c
+MICROSOFT SURFACE SENSOR THERMAL DRIVER
+M: Maximilian Luz <luzmaximilian@gmail.com>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: drivers/hwmon/surface_temp.c
+
MICROSOFT SURFACE GPE LID SUPPORT DRIVER
M: Maximilian Luz <luzmaximilian@gmail.com>
L: platform-driver-x86@vger.kernel.org
@@ -15788,6 +15942,7 @@ M: Breno Leitao <leitao@debian.org>
S: Maintained
F: Documentation/networking/netconsole.rst
F: drivers/net/netconsole.c
+F: tools/testing/selftests/drivers/net/netcons_basic.sh
NETDEVSIM
M: Jakub Kicinski <kuba@kernel.org>
@@ -16831,6 +16986,7 @@ OMNIVISION OG01A1B SENSOR DRIVER
M: Sakari Ailus <sakari.ailus@linux.intel.com>
L: linux-media@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml
F: drivers/media/i2c/og01a1b.c
OMNIVISION OV01A10 SENSOR DRIVER
@@ -17101,6 +17257,14 @@ L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/ulp/opa_vnic
+OPEN ALLIANCE 10BASE-T1S MACPHY SERIAL INTERFACE FRAMEWORK
+M: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: Documentation/networking/oa-tc6-framework.rst
+F: drivers/include/linux/oa_tc6.h
+F: drivers/net/ethernet/oa_tc6.c
+
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Rob Herring <robh@kernel.org>
M: Saravana Kannan <saravanak@google.com>
@@ -17412,7 +17576,7 @@ PCI DRIVER FOR ALTERA PCIE IP
M: Joyce Ooi <joyce.ooi@intel.com>
L: linux-pci@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/pci/altera-pcie.txt
+F: Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml
F: drivers/pci/controller/pcie-altera.c
PCI DRIVER FOR APPLIEDMICRO XGENE
@@ -17644,7 +17808,7 @@ PCI MSI DRIVER FOR ALTERA MSI IP
M: Joyce Ooi <joyce.ooi@intel.com>
L: linux-pci@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
+F: Documentation/devicetree/bindings/pci/altr,msi-controller.yaml
F: drivers/pci/controller/pcie-altera-msi.c
PCI MSI DRIVER FOR APPLIEDMICRO XGENE
@@ -17797,6 +17961,7 @@ M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-pci@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
+F: drivers/pci/controller/dwc/pcie-qcom-common.c
F: drivers/pci/controller/dwc/pcie-qcom.c
PCIE DRIVER FOR ROCKCHIP
@@ -17833,6 +17998,7 @@ L: linux-pci@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
+F: drivers/pci/controller/dwc/pcie-qcom-common.c
F: drivers/pci/controller/dwc/pcie-qcom-ep.c
PCMCIA SUBSYSTEM
@@ -18244,6 +18410,7 @@ M: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-pm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
+F: Documentation/driver-api/pwrseq.rst
F: drivers/power/sequencing/
F: include/linux/pwrseq/
@@ -18354,7 +18521,7 @@ F: tools/testing/selftests/proc/
PROC SYSCTL
M: Luis Chamberlain <mcgrof@kernel.org>
M: Kees Cook <kees@kernel.org>
-M: Joel Granados <j.granados@samsung.com>
+M: Joel Granados <joel.granados@kernel.org>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
S: Maintained
@@ -18407,6 +18574,7 @@ L: netdev@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/pse-pd/
F: drivers/net/pse-pd/
+F: net/ethtool/pse-pd.c
PSTORE FILESYSTEM
M: Kees Cook <kees@kernel.org>
@@ -18787,7 +18955,7 @@ M: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
L: linux-media@vger.kernel.org
S: Maintained
F: Documentation/admin-guide/media/qcom_camss.rst
-F: Documentation/devicetree/bindings/media/*camss*
+F: Documentation/devicetree/bindings/media/qcom,*camss*
F: drivers/media/platform/qcom/camss/
QUALCOMM CLOCK DRIVERS
@@ -18802,7 +18970,6 @@ F: include/dt-bindings/clock/qcom,*
QUALCOMM CLOUD AI (QAIC) DRIVER
M: Jeffrey Hugo <quic_jhugo@quicinc.com>
R: Carl Vanderlip <quic_carlv@quicinc.com>
-R: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Supported
@@ -18897,6 +19064,7 @@ L: linux-arm-msm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
F: drivers/soc/qcom/icc-bwmon.c
+F: drivers/soc/qcom/trace_icc-bwmon.h
QUALCOMM IOMMU
M: Rob Clark <robdclark@gmail.com>
@@ -19236,6 +19404,7 @@ S: Supported
W: https://oss.oracle.com/projects/rds/
F: Documentation/networking/rds.rst
F: net/rds/
+F: tools/testing/selftests/net/rds/
RDT - RESOURCE ALLOCATION
M: Fenghua Yu <fenghua.yu@intel.com>
@@ -19738,6 +19907,14 @@ F: Documentation/ABI/*/sysfs-driver-hid-roccat*
F: drivers/hid/hid-roccat*
F: include/linux/hid-roccat*
+ROCKCHIP CAN-FD DRIVER
+M: Marc Kleine-Budde <mkl@pengutronix.de>
+R: kernel@pengutronix.de
+L: linux-can@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/can/rockchip,rk3568v2-canfd.yaml
+F: drivers/net/can/rockchip/
+
ROCKCHIP CRYPTO DRIVERS
M: Corentin Labbe <clabbe@baylibre.com>
L: linux-crypto@vger.kernel.org
@@ -19764,6 +19941,13 @@ F: Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst
F: drivers/media/platform/rockchip/rkisp1
F: include/uapi/linux/rkisp1-config.h
+ROCKCHIP RK3568 RANDOM NUMBER GENERATOR SUPPORT
+M: Daniel Golle <daniel@makrotopia.org>
+M: Aurelien Jarno <aurelien@aurel32.net>
+S: Maintained
+F: Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml
+F: drivers/char/hw_random/rockchip-rng.c
+
ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER
M: Jacob Chen <jacob-chen@iotwrt.com>
M: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
@@ -19880,12 +20064,26 @@ T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml
F: drivers/media/platform/sunxi/sun8i-rotate/
+RPMB SUBSYSTEM
+M: Jens Wiklander <jens.wiklander@linaro.org>
+L: linux-kernel@vger.kernel.org
+S: Supported
+F: drivers/misc/rpmb-core.c
+F: include/linux/rpmb.h
+
RPMSG TTY DRIVER
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
L: linux-remoteproc@vger.kernel.org
S: Maintained
F: drivers/tty/rpmsg_tty.c
+RTASE ETHERNET DRIVER
+M: Justin Lai <justinlai0215@realtek.com>
+M: Larry Chiu <larry.chiu@realtek.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/realtek/rtase/
+
RTL2830 MEDIA DRIVER
L: linux-media@vger.kernel.org
S: Orphan
@@ -19952,6 +20150,7 @@ R: Björn Roy Baron <bjorn3_gh@protonmail.com>
R: Benno Lossin <benno.lossin@proton.me>
R: Andreas Hindborg <a.hindborg@kernel.org>
R: Alice Ryhl <aliceryhl@google.com>
+R: Trevor Gross <tmgross@umich.edu>
L: rust-for-linux@vger.kernel.org
S: Supported
W: https://rust-for-linux.com
@@ -20150,6 +20349,16 @@ B: mailto:linux-samsung-soc@vger.kernel.org
F: Documentation/devicetree/bindings/sound/samsung*
F: sound/soc/samsung/
+SAMSUNG EXYNOS850 SoC SUPPORT
+M: Sam Protsenko <semen.protsenko@linaro.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-samsung-soc@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml
+F: arch/arm64/boot/dts/exynos/exynos850*
+F: drivers/clk/samsung/clk-exynos850.c
+F: include/dt-bindings/clock/exynos850.h
+
SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-crypto@vger.kernel.org
@@ -20337,6 +20546,19 @@ F: include/linux/wait.h
F: include/uapi/linux/sched.h
F: kernel/sched/
+SCHEDULER - SCHED_EXT
+R: Tejun Heo <tj@kernel.org>
+R: David Vernet <void@manifault.com>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+W: https://github.com/sched-ext/scx
+T: git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext.git
+F: include/linux/sched/ext.h
+F: kernel/sched/ext.h
+F: kernel/sched/ext.c
+F: tools/sched_ext/
+F: tools/testing/selftests/sched_ext
+
SCIOSENSE ENS160 MULTI-GAS SENSOR DRIVER
M: Gustavo Silva <gustavograzs@gmail.com>
S: Maintained
@@ -21390,13 +21612,13 @@ S: Maintained
F: tools/sound/dapm-graph
SOUND - SOUND OPEN FIRMWARE (SOF) DRIVERS
-M: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
M: Liam Girdwood <lgirdwood@gmail.com>
M: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
M: Bard Liao <yung-chuan.liao@linux.intel.com>
M: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
M: Daniel Baluta <daniel.baluta@nxp.com>
R: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+R: Pierre-Louis Bossart <pierre-louis.bossart@linux.dev>
L: sound-open-firmware@alsa-project.org (moderated for non-subscribers)
S: Supported
W: https://github.com/thesofproject/linux/
@@ -21405,7 +21627,7 @@ F: sound/soc/sof/
SOUNDWIRE SUBSYSTEM
M: Vinod Koul <vkoul@kernel.org>
M: Bard Liao <yung-chuan.liao@linux.intel.com>
-R: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+R: Pierre-Louis Bossart <pierre-louis.bossart@linux.dev>
R: Sanyog Kale <sanyog.r.kale@intel.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
@@ -21537,10 +21759,8 @@ F: include/linux/spmi.h
F: include/trace/events/spmi.h
SPU FILE SYSTEM
-M: Jeremy Kerr <jk@ozlabs.org>
L: linuxppc-dev@lists.ozlabs.org
-S: Supported
-W: http://www.ibm.com/developerworks/power/cell/
+S: Orphan
F: Documentation/filesystems/spufs/spufs.rst
F: arch/powerpc/platforms/cell/spufs/
@@ -22469,6 +22689,7 @@ M: Jens Wiklander <jens.wiklander@linaro.org>
R: Sumit Garg <sumit.garg@linaro.org>
L: op-tee@lists.trustedfirmware.org
S: Maintained
+F: Documentation/ABI/testing/sysfs-class-tee
F: Documentation/driver-api/tee.rst
F: Documentation/tee/
F: Documentation/userspace-api/tee.rst
@@ -22514,6 +22735,7 @@ M: Thierry Reding <thierry.reding@gmail.com>
R: Krishna Reddy <vdumpa@nvidia.com>
L: linux-tegra@vger.kernel.org
S: Supported
+F: drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
F: drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
F: drivers/iommu/tegra*
@@ -22619,12 +22841,11 @@ F: Documentation/devicetree/bindings/sound/tas2552.txt
F: Documentation/devicetree/bindings/sound/ti,tas2562.yaml
F: Documentation/devicetree/bindings/sound/ti,tas2770.yaml
F: Documentation/devicetree/bindings/sound/ti,tas27xx.yaml
+F: Documentation/devicetree/bindings/sound/ti,tpa6130a2.yaml
F: Documentation/devicetree/bindings/sound/ti,pcm1681.yaml
F: Documentation/devicetree/bindings/sound/ti,pcm3168a.yaml
F: Documentation/devicetree/bindings/sound/ti,tlv320*.yaml
F: Documentation/devicetree/bindings/sound/ti,tlv320adcx140.yaml
-F: Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
-F: Documentation/devicetree/bindings/sound/tpa6130a2.txt
F: include/sound/tas2*.h
F: include/sound/tlv320*.h
F: include/sound/tpa6130a2-plat.h
@@ -23202,6 +23423,7 @@ Q: https://patchwork.kernel.org/project/linux-integrity/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git
F: Documentation/devicetree/bindings/tpm/
F: drivers/char/tpm/
+F: tools/testing/selftests/tpm2/
TPS546D24 DRIVER
M: Duke Du <dukedu83@gmail.com>
@@ -23214,9 +23436,8 @@ TQ SYSTEMS BOARD & DRIVER SUPPORT
L: linux@ew.tq-group.com
S: Supported
W: https://www.tq-group.com/en/products/tq-embedded/
-F: arch/arm/boot/dts/imx*mba*.dts*
-F: arch/arm/boot/dts/imx*tqma*.dts*
-F: arch/arm/boot/dts/mba*.dtsi
+F: arch/arm/boot/dts/nxp/imx/*mba*.dts*
+F: arch/arm/boot/dts/nxp/imx/*tqma*.dts*
F: arch/arm64/boot/dts/freescale/fsl-*tqml*.dts*
F: arch/arm64/boot/dts/freescale/imx*mba*.dts*
F: arch/arm64/boot/dts/freescale/imx*tqma*.dts*
@@ -23501,6 +23722,15 @@ F: drivers/cdrom/cdrom.c
F: include/linux/cdrom.h
F: include/uapi/linux/cdrom.h
+UNION-FIND
+M: Xavier <xavier_qy@163.com>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: Documentation/core-api/union_find.rst
+F: Documentation/translations/zh_CN/core-api/union_find.rst
+F: include/linux/union_find.h
+F: lib/union_find.c
+
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
R: Alim Akhtar <alim.akhtar@samsung.com>
R: Avri Altman <avri.altman@wdc.com>
@@ -24234,6 +24464,7 @@ F: include/linux/vdpa.h
F: include/linux/virtio*.h
F: include/linux/vringh.h
F: include/uapi/linux/virtio_*.h
+F: net/vmw_vsock/virtio*
F: tools/virtio/
F: tools/testing/selftests/drivers/net/virtio_net/
@@ -24424,6 +24655,20 @@ F: include/uapi/linux/vsockmon.h
F: net/vmw_vsock/
F: tools/testing/vsock/
+VMA
+M: Andrew Morton <akpm@linux-foundation.org>
+R: Liam R. Howlett <Liam.Howlett@oracle.com>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+L: linux-mm@kvack.org
+S: Maintained
+W: https://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: mm/vma.c
+F: mm/vma.h
+F: mm/vma_internal.h
+F: tools/testing/vma/
+
VMALLOC
M: Andrew Morton <akpm@linux-foundation.org>
R: Uladzislau Rezki <urezki@gmail.com>
@@ -24813,6 +25058,17 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
F: Documentation/arch/x86/
F: Documentation/devicetree/bindings/x86/
F: arch/x86/
+F: tools/testing/selftests/x86
+
+X86 CPUID DATABASE
+M: Borislav Petkov <bp@alien8.de>
+M: Thomas Gleixner <tglx@linutronix.de>
+M: x86@kernel.org
+R: Ahmed S. Darwish <darwi@linutronix.de>
+L: x86-cpuid@lists.linux.dev
+S: Maintained
+W: https://x86-cpuid.org
+F: tools/arch/x86/kcpuid/cpuid.csv
X86 ENTRY CODE
M: Andy Lutomirski <luto@kernel.org>
@@ -25255,6 +25511,19 @@ S: Maintained
F: drivers/spi/spi-xtensa-xtfpga.c
F: sound/soc/xtensa/xtfpga-i2s.c
+XZ EMBEDDED
+M: Lasse Collin <lasse.collin@tukaani.org>
+S: Maintained
+W: https://tukaani.org/xz/embedded.html
+B: https://github.com/tukaani-project/xz-embedded/issues
+C: irc://irc.libera.chat/tukaani
+F: Documentation/staging/xz.rst
+F: include/linux/decompress/unxz.h
+F: include/linux/xz.h
+F: lib/decompress_unxz.c
+F: lib/xz/
+F: scripts/xz_wrap.sh
+
YAM DRIVER FOR AX.25
M: Jean-Paul Roubelat <jpr@f6fbb.org>
L: linux-hams@vger.kernel.org
@@ -25279,7 +25548,6 @@ F: tools/net/ynl/
YEALINK PHONE DRIVER
M: Henk Vergonet <Henk.Vergonet@gmail.com>
-L: usbb2k-api-dev@nongnu.org
S: Maintained
F: Documentation/input/devices/yealink.rst
F: drivers/input/misc/yealink.*
diff --git a/Makefile b/Makefile
index 2966d5fbbf17..265dd990a9b6 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -579,10 +579,6 @@ else
RUSTC_OR_CLIPPY = $(RUSTC)
endif
-ifdef RUST_LIB_SRC
- export RUST_LIB_SRC
-endif
-
# Allows the usage of unstable features in stable compilers.
export RUSTC_BOOTSTRAP := 1
@@ -649,9 +645,11 @@ endif
# The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
# Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
-# CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
-# and from include/config/auto.conf.cmd to detect the compiler upgrade.
+# CC_VERSION_TEXT and RUSTC_VERSION_TEXT are referenced from Kconfig (so they
+# need export), and from include/config/auto.conf.cmd to detect the compiler
+# upgrade.
CC_VERSION_TEXT = $(subst $(pound),,$(shell LC_ALL=C $(CC) --version 2>/dev/null | head -n 1))
+RUSTC_VERSION_TEXT = $(subst $(pound),,$(shell $(RUSTC) --version 2>/dev/null))
ifneq ($(findstring clang,$(CC_VERSION_TEXT)),)
include $(srctree)/scripts/Makefile.clang
@@ -672,7 +670,7 @@ ifdef config-build
# KBUILD_DEFCONFIG may point out an alternative default configuration
# used for 'make defconfig'
include $(srctree)/arch/$(SRCARCH)/Makefile
-export KBUILD_DEFCONFIG KBUILD_KCONFIG CC_VERSION_TEXT
+export KBUILD_DEFCONFIG KBUILD_KCONFIG CC_VERSION_TEXT RUSTC_VERSION_TEXT
config: outputmakefile scripts_basic FORCE
$(Q)$(MAKE) $(build)=scripts/kconfig $@
@@ -928,6 +926,7 @@ ifdef CONFIG_SHADOW_CALL_STACK
ifndef CONFIG_DYNAMIC_SCS
CC_FLAGS_SCS := -fsanitize=shadow-call-stack
KBUILD_CFLAGS += $(CC_FLAGS_SCS)
+KBUILD_RUSTFLAGS += -Zsanitizer=shadow-call-stack
endif
export CC_FLAGS_SCS
endif
@@ -952,6 +951,16 @@ endif
ifdef CONFIG_CFI_CLANG
CC_FLAGS_CFI := -fsanitize=kcfi
+ifdef CONFIG_CFI_ICALL_NORMALIZE_INTEGERS
+ CC_FLAGS_CFI += -fsanitize-cfi-icall-experimental-normalize-integers
+endif
+ifdef CONFIG_RUST
+ # Always pass -Zsanitizer-cfi-normalize-integers as CONFIG_RUST selects
+ # CONFIG_CFI_ICALL_NORMALIZE_INTEGERS.
+ RUSTC_FLAGS_CFI := -Zsanitizer=kcfi -Zsanitizer-cfi-normalize-integers
+ KBUILD_RUSTFLAGS += $(RUSTC_FLAGS_CFI)
+ export RUSTC_FLAGS_CFI
+endif
KBUILD_CFLAGS += $(CC_FLAGS_CFI)
export CC_FLAGS_CFI
endif
@@ -1483,6 +1492,7 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_FILES += vmlinux.symvers modules-only.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
+ modules.builtin.ranges vmlinux.o.map \
compile_commands.json rust/test \
rust-project.json .vmlinux.objs .vmlinux.export.c
@@ -1947,7 +1957,7 @@ clean: $(clean-dirs)
-o -name '*.c.[012]*.*' \
-o -name '*.ll' \
-o -name '*.gcno' \
- -o -name '*.*.symversions' \) -type f -print \
+ \) -type f -print \
-o -name '.tmp_*' -print \
| xargs rm -rf
diff --git a/arch/Kconfig b/arch/Kconfig
index 975dd22a2dbd..98157b38f5cf 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -17,6 +17,15 @@ config CPU_MITIGATIONS
def_bool y
endif
+#
+# Selected by architectures that need custom DMA operations for e.g. legacy
+# IOMMUs not handled by dma-iommu. Drivers must never select this symbol.
+#
+config ARCH_HAS_DMA_OPS
+ depends on HAS_DMA
+ select DMA_OPS_HELPERS
+ bool
+
menu "General architecture-dependent options"
config ARCH_HAS_SUBPAGE_FAULTS
@@ -826,6 +835,22 @@ config CFI_CLANG
https://clang.llvm.org/docs/ControlFlowIntegrity.html
+config CFI_ICALL_NORMALIZE_INTEGERS
+ bool "Normalize CFI tags for integers"
+ depends on CFI_CLANG
+ depends on $(cc-option,-fsanitize=kcfi -fsanitize-cfi-icall-experimental-normalize-integers)
+ help
+ This option normalizes the CFI tags for integer types so that all
+ integer types of the same size and signedness receive the same CFI
+ tag.
+
+ The option is separate from CONFIG_RUST because it affects the ABI.
+ When working with build systems that care about the ABI, it is
+ convenient to be able to turn on this flag first, before Rust is
+ turned on.
+
+ This option is necessary for using CFI with Rust. If unsure, say N.
+
config CFI_PERMISSIVE
bool "Use CFI in permissive mode"
depends on CFI_CLANG
@@ -862,7 +887,7 @@ config HAVE_CONTEXT_TRACKING_USER_OFFSTACK
Architecture neither relies on exception_enter()/exception_exit()
nor on schedule_user(). Also preempt_schedule_notrace() and
preempt_schedule_irq() can't be called in a preemptible section
- while context tracking is CONTEXT_USER. This feature reflects a sane
+ while context tracking is CT_STATE_USER. This feature reflects a sane
entry implementation where the following requirements are met on
critical entry code, ie: before user_exit() or after user_enter():
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 50ff06d5b799..109a4cddcd13 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -4,12 +4,12 @@ config ALPHA
default y
select ARCH_32BIT_USTAT_F_TINODE
select ARCH_HAS_CURRENT_STACK_POINTER
+ select ARCH_HAS_DMA_OPS if PCI
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_NO_PREEMPT
select ARCH_NO_SG_CHAIN
select ARCH_USE_CMPXCHG_LOCKREF
- select DMA_OPS if PCI
select FORCE_PCI
select PCI_DOMAINS if PCI
select PCI_SYSCALL if PCI
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index e94f621903fe..251b73c5481e 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -140,6 +140,12 @@
#define SO_PASSPIDFD 76
#define SO_PEERPIDFD 77
+#define SO_DEVMEM_LINEAR 78
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 79
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 80
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index e5f881bc8288..c0424de9e7cd 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -160,10 +160,10 @@ SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd,
.count = count
};
- if (!arg.file)
+ if (!fd_file(arg))
return -EBADF;
- error = iterate_dir(arg.file, &buf.ctx);
+ error = iterate_dir(fd_file(arg), &buf.ctx);
if (error >= 0)
error = buf.error;
if (count != buf.count)
@@ -1229,7 +1229,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
unsigned long limit;
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index fd0b0a0d4686..5b2488142041 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -13,6 +13,7 @@ config ARC
select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_NEED_CMPXCHG_1_EMU
select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
select ARCH_32BIT_OFF_T
select BUILDTIME_TABLE_SORT
@@ -553,7 +554,7 @@ config ARC_BUILTIN_DTB_NAME
string "Built in DTB"
help
Set the name of the DTB to embed in the vmlinux binary
- Leaving it blank selects the minimal "skeleton" dtb
+ Leaving it blank selects the "nsim_700" dtb.
endmenu # "ARC Architecture Configuration"
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 89720d6d7e0d..319bbe270322 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -66,6 +66,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_DRM=m
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 73ec01ed0492..8c1f1a111a17 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -66,6 +66,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 4da0f626fa9d..75cab9f25b5b 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -66,6 +66,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_DRM=m
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 1a68e4beebca..5aba3d850fa2 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -60,6 +60,7 @@ CONFIG_SERIAL_8250_DW=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index e138fde067de..58045c898340 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -8,6 +8,7 @@
#include <linux/build_bug.h>
#include <linux/types.h>
+#include <linux/cmpxchg-emu.h>
#include <asm/barrier.h>
#include <asm/smp.h>
@@ -46,6 +47,9 @@
__typeof__(*(ptr)) _prev_; \
\
switch(sizeof((_p_))) { \
+ case 1: \
+ _prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \
+ break; \
case 4: \
_prev_ = __cmpxchg(_p_, _o_, _n_); \
break; \
@@ -65,8 +69,6 @@
__typeof__(*(ptr)) _prev_; \
unsigned long __flags; \
\
- BUILD_BUG_ON(sizeof(_p_) != 4); \
- \
/* \
* spin lock/unlock provide the needed smp_mb() before/after \
*/ \
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 69a915297155..2185afe8d59f 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -23,7 +23,8 @@
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 173159e93c99..749179a1d162 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -10,6 +10,7 @@ config ARM
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DMA_ALLOC if MMU
+ select ARCH_HAS_DMA_OPS
select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
@@ -54,7 +55,6 @@ config ARM
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
select DMA_DECLARE_COHERENT
select DMA_GLOBAL_POOL if !MMU
- select DMA_OPS
select DMA_NONCOHERENT_MMAP if MMU
select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB
@@ -64,6 +64,7 @@ config ARM
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_IRQ_IPI if SMP
select GENERIC_CPU_AUTOPROBE
+ select GENERIC_CPU_DEVICES
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_MULTI_HANDLER
diff --git a/arch/arm/boot/dts/amlogic/meson8b-ec100.dts b/arch/arm/boot/dts/amlogic/meson8b-ec100.dts
index 3da47349eaaf..49890eb12781 100644
--- a/arch/arm/boot/dts/amlogic/meson8b-ec100.dts
+++ b/arch/arm/boot/dts/amlogic/meson8b-ec100.dts
@@ -34,8 +34,6 @@
gpio-keys {
compatible = "gpio-keys-polled";
- #address-cells = <1>;
- #size-cells = <0>;
poll-interval = <100>;
pal-switch {
diff --git a/arch/arm/boot/dts/arm/arm-realview-eb-mp.dtsi b/arch/arm/boot/dts/arm/arm-realview-eb-mp.dtsi
index 26783d053ac7..40f7515aa068 100644
--- a/arch/arm/boot/dts/arm/arm-realview-eb-mp.dtsi
+++ b/arch/arm/boot/dts/arm/arm-realview-eb-mp.dtsi
@@ -103,7 +103,7 @@
};
/* PMU with one IRQ line per core */
- pmu: pmu@0 {
+ pmu: pmu {
compatible = "arm,arm11mpcore-pmu";
interrupt-parent = <&intc>;
interrupts = <0 17 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/arm/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm/arm-realview-pb11mp.dts
index ce35748f3d25..db1b6793cd2c 100644
--- a/arch/arm/boot/dts/arm/arm-realview-pb11mp.dts
+++ b/arch/arm/boot/dts/arm/arm-realview-pb11mp.dts
@@ -92,7 +92,7 @@
<0x1f000100 0x100>;
};
- L2: cache-controller {
+ L2: cache-controller@1f002000 {
compatible = "arm,l220-cache";
reg = <0x1f002000 0x1000>;
interrupt-parent = <&intc_tc11mp>;
diff --git a/arch/arm/boot/dts/arm/arm-realview-pba8.dts b/arch/arm/boot/dts/arm/arm-realview-pba8.dts
index d3238c252b59..d2e0082245f9 100644
--- a/arch/arm/boot/dts/arm/arm-realview-pba8.dts
+++ b/arch/arm/boot/dts/arm/arm-realview-pba8.dts
@@ -40,7 +40,7 @@
};
};
- pmu: pmu@0 {
+ pmu: pmu {
compatible = "arm,cortex-a8-pmu";
interrupt-parent = <&intc>;
interrupts = <0 47 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/arm/arm-realview-pbx-a9.dts b/arch/arm/boot/dts/arm/arm-realview-pbx-a9.dts
index 85d3968fbb91..507ad7ac4974 100644
--- a/arch/arm/boot/dts/arm/arm-realview-pbx-a9.dts
+++ b/arch/arm/boot/dts/arm/arm-realview-pbx-a9.dts
@@ -97,7 +97,7 @@
interrupts = <1 14 0xf04>;
};
- pmu: pmu@0 {
+ pmu: pmu {
compatible = "arm,cortex-a9-pmu";
interrupt-parent = <&intc>;
interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/aspeed/Makefile b/arch/arm/boot/dts/aspeed/Makefile
index e51c6d203725..c4f064e4b073 100644
--- a/arch/arm/boot/dts/aspeed/Makefile
+++ b/arch/arm/boot/dts/aspeed/Makefile
@@ -17,6 +17,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-bmc-bytedance-g220a.dtb \
aspeed-bmc-delta-ahe50dc.dtb \
aspeed-bmc-facebook-bletchley.dtb \
+ aspeed-bmc-facebook-catalina.dtb \
aspeed-bmc-facebook-cmm.dtb \
aspeed-bmc-facebook-elbert.dtb \
aspeed-bmc-facebook-fuji.dtb \
@@ -32,8 +33,10 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-bmc-facebook-yamp.dtb \
aspeed-bmc-facebook-yosemitev2.dtb \
aspeed-bmc-facebook-yosemite4.dtb \
+ aspeed-bmc-ibm-blueridge.dtb \
aspeed-bmc-ibm-bonnell.dtb \
aspeed-bmc-ibm-everest.dtb \
+ aspeed-bmc-ibm-fuji.dtb \
aspeed-bmc-ibm-rainier.dtb \
aspeed-bmc-ibm-rainier-1s4u.dtb \
aspeed-bmc-ibm-rainier-4u.dtb \
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtjade.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtjade.dts
index 8ab5f301f926..31c5d319aa0a 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtjade.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtjade.dts
@@ -49,6 +49,11 @@
*/
i2c80 = &nvme_m2_0;
i2c81 = &nvme_m2_1;
+
+ /*
+ * i2c bus 82 assigned to OCP slot
+ */
+ i2c82 = &ocpslot;
};
chosen {
@@ -420,6 +425,17 @@
reg = <0x70>;
i2c-mux-idle-disconnect;
+ ocpslot: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+
+ ocpslot_temp: temperature-sensor@1f {
+ compatible = "ti,tmp421";
+ reg = <0x1f>;
+ };
+ };
+
nvmeslot_0_7: i2c@3 {
#address-cells = <1>;
#size-cells = <0>;
@@ -672,10 +688,6 @@
memory-region = <&gfx_memory>;
};
-&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
-};
-
&pwm_tacho {
status = "okay";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts
index 3c8925034a8c..0295f5adcfbc 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts
@@ -16,6 +16,32 @@
serial8 = &uart9;
/*
+ * I2C temperature alias port
+ */
+ i2c20 = &i2c4_bus70_chn0;
+ i2c21 = &i2c4_bus70_chn1;
+ i2c22 = &i2c4_bus70_chn2;
+ i2c23 = &i2c4_bus70_chn3;
+
+ /*
+ * i2c bus 30-31 assigned to OCP slot 0-1
+ */
+ i2c30 = &ocpslot_0;
+ i2c31 = &ocpslot_1;
+
+ /*
+ * i2c bus 32-33 assigned to Riser slot 0-1
+ */
+ i2c32 = &i2c_riser0;
+ i2c33 = &i2c_riser1;
+
+ /*
+ * i2c bus 38-39 assigned to FRU on Riser slot 0-1
+ */
+ i2c38 = &i2c_riser0_chn_0;
+ i2c39 = &i2c_riser1_chn_0;
+
+ /*
* I2C NVMe alias port
*/
i2c100 = &backplane_0;
@@ -87,6 +113,37 @@
};
};
+ leds {
+ compatible = "gpio-leds";
+ /*
+ * Use gpio-leds to configure GPIOW5 (bmc-ready) pin to be reseted when
+ * watchdog timeout.
+ */
+ led-bmc-ready {
+ gpios = <&gpio0 ASPEED_GPIO(W, 5) (GPIO_ACTIVE_HIGH | GPIO_TRANSITORY)>;
+ };
+
+ led-sw-heartbeat {
+ gpios = <&gpio0 ASPEED_GPIO(N, 3) GPIO_ACTIVE_HIGH>;
+ };
+
+ led-identify {
+ gpios = <&gpio0 ASPEED_GPIO(S, 3) GPIO_ACTIVE_HIGH>;
+ };
+
+ led-fault {
+ gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_HIGH>;
+ };
+
+ led-fan-fault {
+ gpios = <&gpio_expander1 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-psu-fault {
+ gpios = <&gpio_expander1 1 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
voltage_mon_reg: voltage-mon-regulator {
compatible = "regulator-fixed";
regulator-name = "ltc2497_reg";
@@ -515,6 +572,80 @@
#size-cells = <0>;
reg = <0x70>;
i2c-mux-idle-disconnect;
+
+ ocpslot_0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+
+ ocpslot_0_temp: temperature-sensor@1f {
+ compatible = "ti,tmp421";
+ reg = <0x1f>;
+ };
+ };
+
+ ocpslot_1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>;
+
+ ocpslot_1_temp: temperature-sensor@1f {
+ compatible = "ti,tmp421";
+ reg = <0x1f>;
+ };
+ };
+
+ i2c_riser0: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+
+ i2c-mux@72 {
+ compatible = "nxp,pca9546";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x72>;
+ i2c-mux-idle-disconnect;
+
+ i2c_riser0_chn_0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+ };
+ };
+ };
+
+ i2c_riser1: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>;
+
+ i2c-mux@72 {
+ compatible = "nxp,pca9546";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x72>;
+ i2c-mux-idle-disconnect;
+
+ i2c_riser1_chn_0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+ };
+ };
+ };
};
};
@@ -790,6 +921,10 @@
};
};
+&i2c10 {
+ status = "okay";
+};
+
&i2c11 {
status = "okay";
ssif-bmc@10 {
@@ -812,6 +947,25 @@
};
};
+&i2c15 {
+ status = "okay";
+ gpio_expander1: gpio-expander@22 {
+ compatible = "nxp,pca9535";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names =
+ "fan-fault","psu-fault",
+ "","",
+ "","",
+ "","",
+ "","",
+ "","",
+ "","",
+ "","";
+ };
+};
+
&adc0 {
status = "okay";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-arm-stardragon4800-rep2.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-arm-stardragon4800-rep2.dts
index 7c6af7f226e7..29c68c37e7f5 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-arm-stardragon4800-rep2.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-arm-stardragon4800-rep2.dts
@@ -200,10 +200,6 @@
status = "okay";
};
-&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
-};
-
&gpio {
pin_gpio_c7 {
gpio-hog;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-spc621d8hm3.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-spc621d8hm3.dts
index 555485871e7a..c4097e4f2ca4 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-spc621d8hm3.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-spc621d8hm3.dts
@@ -110,11 +110,15 @@
compatible = "st,24c128", "atmel,24c128";
reg = <0x50>;
pagesize = <16>;
- #address-cells = <1>;
- #size-cells = <1>;
- eth0_macaddress: macaddress@3f80 {
- reg = <0x3f80 6>;
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eth0_macaddress: macaddress@3f80 {
+ reg = <0x3f80 6>;
+ };
};
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-x570d4u.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-x570d4u.dts
index 8dee4faa9e07..0943e0bf1305 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-x570d4u.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-x570d4u.dts
@@ -254,10 +254,6 @@
status = "okay";
};
-&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
-};
-
&vhub {
status = "okay";
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-catalina.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-catalina.dts
new file mode 100644
index 000000000000..82835e96317d
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-catalina.dts
@@ -0,0 +1,1110 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021 Facebook Inc.
+/dts-v1/;
+
+#include "aspeed-g6.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/usb/pd.h>
+#include <dt-bindings/leds/leds-pca955x.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/i2c/i2c.h>
+
+/ {
+ model = "Facebook Catalina BMC";
+ compatible = "facebook,catalina-bmc", "aspeed,ast2600";
+
+ aliases {
+ serial0 = &uart1;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
+ i2c16 = &i2c1mux0ch0;
+ i2c17 = &i2c1mux0ch1;
+ i2c18 = &i2c1mux0ch2;
+ i2c19 = &i2c1mux0ch3;
+ i2c20 = &i2c1mux0ch4;
+ i2c21 = &i2c1mux0ch5;
+ i2c22 = &i2c1mux0ch6;
+ i2c23 = &i2c1mux0ch7;
+ i2c24 = &i2c0mux0ch0;
+ i2c25 = &i2c0mux0ch1;
+ i2c26 = &i2c0mux0ch2;
+ i2c27 = &i2c0mux0ch3;
+ i2c28 = &i2c0mux1ch0;
+ i2c29 = &i2c0mux1ch1;
+ i2c30 = &i2c0mux1ch2;
+ i2c31 = &i2c0mux1ch3;
+ i2c32 = &i2c0mux2ch0;
+ i2c33 = &i2c0mux2ch1;
+ i2c34 = &i2c0mux2ch2;
+ i2c35 = &i2c0mux2ch3;
+ i2c36 = &i2c0mux3ch0;
+ i2c37 = &i2c0mux3ch1;
+ i2c38 = &i2c0mux3ch2;
+ i2c39 = &i2c0mux3ch3;
+ i2c40 = &i2c0mux4ch0;
+ i2c41 = &i2c0mux4ch1;
+ i2c42 = &i2c0mux4ch2;
+ i2c43 = &i2c0mux4ch3;
+ i2c44 = &i2c0mux5ch0;
+ i2c45 = &i2c0mux5ch1;
+ i2c46 = &i2c0mux5ch2;
+ i2c47 = &i2c0mux5ch3;
+ i2c48 = &i2c30mux0ch0;
+ i2c49 = &i2c30mux0ch1;
+ i2c50 = &i2c30mux0ch2;
+ i2c51 = &i2c30mux0ch3;
+ i2c52 = &i2c30mux0ch4;
+ i2c53 = &i2c30mux0ch5;
+ i2c54 = &i2c30mux0ch6;
+ i2c55 = &i2c30mux0ch7;
+ };
+
+ chosen {
+ stdout-path = "serial4:57600n8";
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x80000000>;
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>,
+ <&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>,
+ <&adc1 2>;
+ };
+
+ spi1_gpio: spi {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sck-gpios = <&gpio0 ASPEED_GPIO(Z, 3) GPIO_ACTIVE_HIGH>;
+ mosi-gpios = <&gpio0 ASPEED_GPIO(Z, 4) GPIO_ACTIVE_HIGH>;
+ miso-gpios = <&gpio0 ASPEED_GPIO(Z, 5) GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&gpio0 ASPEED_GPIO(Z, 0) GPIO_ACTIVE_LOW>;
+ num-chipselects = <1>;
+
+ tpm@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ spi-max-frequency = <33000000>;
+ reg = <0>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ label = "bmc_heartbeat_amber";
+ gpios = <&gpio0 ASPEED_GPIO(P, 7) GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-1 {
+ label = "fp_id_amber";
+ default-state = "off";
+ gpios = <&gpio0 ASPEED_GPIO(B, 5) GPIO_ACTIVE_HIGH>;
+ };
+
+ led-2 {
+ label = "bmc_ready_noled";
+ gpios = <&gpio0 ASPEED_GPIO(B, 3) (GPIO_ACTIVE_HIGH|GPIO_TRANSITORY)>;
+ };
+
+ led-3 {
+ label = "bmc_ready_cpld_noled";
+ gpios = <&gpio0 ASPEED_GPIO(P, 5) (GPIO_ACTIVE_HIGH|GPIO_TRANSITORY)>;
+ };
+ };
+
+ p1v8_bmc_aux: regulator-p1v8-bmc-aux {
+ compatible = "regulator-fixed";
+ regulator-name = "p1v8_bmc_aux";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ p2v5_bmc_aux: regulator-p2v5-bmc-aux {
+ compatible = "regulator-fixed";
+ regulator-name = "p2v5_bmc_aux";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart3 {
+ status = "okay";
+};
+
+&uart4 {
+ status = "okay";
+};
+
+&uart5 {
+ status = "okay";
+};
+
+&mac3 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ncsi4_default>;
+ use-ncsi;
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ spi-max-frequency = <50000000>;
+#include "openbmc-flash-layout-128.dtsi"
+ };
+ flash@1 {
+ status = "okay";
+ m25p,fast-read;
+ label = "alt-bmc";
+ spi-max-frequency = <50000000>;
+ };
+};
+
+&i2c0 {
+ status = "okay";
+
+ i2c-mux@71 {
+ compatible = "nxp,pca9546";
+ reg = <0x71>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c0mux0ch0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+ i2c0mux0ch1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ i2c0mux0ch2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+ i2c0mux0ch3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+
+ i2c-mux@72 {
+ compatible = "nxp,pca9546";
+ reg = <0x72>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c0mux1ch0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+ i2c0mux1ch1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ // IO Mezz 0 IOEXP
+ io_expander7: gpio@20 {
+ compatible = "nxp,pca9535";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ // IO Mezz 0 FRU EEPROM
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+ i2c0mux1ch2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ i2c-mux@70 {
+ compatible = "nxp,pca9548";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c30mux0ch0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+ i2c30mux0ch1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ i2c30mux0ch2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+ i2c30mux0ch3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ i2c30mux0ch4: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ };
+ i2c30mux0ch5: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+ i2c30mux0ch6: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ // HDD FRU EEPROM
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+ };
+ i2c30mux0ch7: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+
+ power-sensor@40 {
+ compatible = "ti,ina230";
+ reg = <0x40>;
+ shunt-resistor = <2000>;
+ };
+ power-sensor@41 {
+ compatible = "ti,ina230";
+ reg = <0x41>;
+ shunt-resistor = <2000>;
+ };
+ power-sensor@44 {
+ compatible = "ti,ina230";
+ reg = <0x44>;
+ shunt-resistor = <2000>;
+ };
+ power-sensor@45 {
+ compatible = "ti,ina230";
+ reg = <0x45>;
+ shunt-resistor = <2000>;
+ };
+ };
+ };
+ };
+ i2c0mux1ch3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+
+ i2c-mux@73 {
+ compatible = "nxp,pca9546";
+ reg = <0x73>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c0mux2ch0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+ i2c0mux2ch1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ i2c0mux2ch2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+ i2c0mux2ch3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+
+ i2c-mux@75 {
+ compatible = "nxp,pca9546";
+ reg = <0x75>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c0mux3ch0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+ i2c0mux3ch1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ i2c0mux3ch2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+ i2c0mux3ch3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+
+ i2c-mux@76 {
+ compatible = "nxp,pca9546";
+ reg = <0x76>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c0mux4ch0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+ i2c0mux4ch1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ // IO Mezz 1 IOEXP
+ io_expander8: gpio@21 {
+ compatible = "nxp,pca9535";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ // IO Mezz 1 FRU EEPROM
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+ i2c0mux4ch2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+ i2c0mux4ch3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+
+ i2c-mux@77 {
+ compatible = "nxp,pca9546";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c0mux5ch0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+ i2c0mux5ch1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ i2c0mux5ch2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+ i2c0mux5ch3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+&i2c1 {
+ status = "okay";
+ i2c-mux@70 {
+ compatible = "nxp,pca9548";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x70>;
+ i2c-mux-idle-disconnect;
+
+ i2c1mux0ch0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+
+ power-sensor@41 {
+ compatible = "ti,ina238";
+ reg = <0x41>;
+ shunt-resistor = <500>;
+ };
+ power-sensor@42 {
+ compatible = "ti,ina238";
+ reg = <0x42>;
+ shunt-resistor = <500>;
+ };
+ power-sensor@44 {
+ compatible = "ti,ina238";
+ reg = <0x44>;
+ shunt-resistor = <500>;
+ };
+ };
+ i2c1mux0ch1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>;
+
+ power-sensor@41 {
+ compatible = "ti,ina238";
+ reg = <0x41>;
+ };
+ power-sensor@43 {
+ compatible = "ti,ina238";
+ reg = <0x43>;
+ };
+ };
+ i2c1mux0ch2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+ };
+ i2c1mux0ch3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>;
+ };
+ i2c1mux0ch4: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x4>;
+
+ power-monitor@42 {
+ compatible = "lltc,ltc4287";
+ reg = <0x42>;
+ shunt-resistor-micro-ohms = <200>;
+ };
+ power-monitor@43 {
+ compatible = "lltc,ltc4287";
+ reg = <0x43>;
+ shunt-resistor-micro-ohms = <200>;
+ };
+ };
+ i2c1mux0ch5: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x5>;
+
+ // PDB FRU EEPROM
+ eeprom@54 {
+ compatible = "atmel,24c64";
+ reg = <0x54>;
+ };
+
+ // PDB TEMP SENSOR
+ temperature-sensor@4f {
+ compatible = "ti,tmp75";
+ reg = <0x4f>;
+ };
+ };
+ i2c1mux0ch6: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6>;
+
+ // PDB IOEXP
+ io_expander5: gpio@27 {
+ compatible = "nxp,pca9554";
+ reg = <0x27>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ // OSFP IOEXP
+ io_expander6: gpio@25 {
+ compatible = "nxp,pca9555";
+ reg = <0x25>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ // OSFP FRU EEPROM
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+ };
+ i2c1mux0ch7: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x7>;
+
+ // FIO FRU EEPROM
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+
+ // FIO TEMP SENSOR
+ temperature-sensor@4b {
+ compatible = "ti,tmp75";
+ reg = <0x4b>;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ // Module 0 IOEXP
+ io_expander0: gpio@20 {
+ compatible = "nxp,pca9555";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(B, 4) IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ // Module 1 IOEXP
+ io_expander1: gpio@21 {
+ compatible = "nxp,pca9555";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(B, 4) IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ // HMC IOEXP
+ io_expander2: gpio@27 {
+ compatible = "nxp,pca9555";
+ reg = <0x27>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(B, 4) IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ // Module 0 EEPROM
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ // Module 1 EEPROM
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+};
+
+&i2c3 {
+ status = "okay";
+};
+
+&i2c4 {
+ status = "okay";
+};
+
+&i2c5 {
+ status = "okay";
+};
+
+&i2c6 {
+ status = "okay";
+
+ // BMC IOEXP on Module 0
+ io_expander3: gpio@21 {
+ compatible = "nxp,pca9555";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ rtc@6f {
+ compatible = "nuvoton,nct3018y";
+ reg = <0x6f>;
+ };
+};
+
+&i2c7 {
+ status = "okay";
+};
+
+&i2c8 {
+ status = "okay";
+};
+
+&i2c9 {
+ status = "okay";
+
+ // SCM CPLD IOEXP
+ io_expander4: gpio@4f {
+ compatible = "nxp,pca9555";
+ reg = <0x4f>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ // SCM TEMP SENSOR
+ temperature-sensor@4b {
+ compatible = "ti,tmp75";
+ reg = <0x4b>;
+ };
+
+ // SCM FRU EEPROM
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ // BSM FRU EEPROM
+ eeprom@56 {
+ compatible = "atmel,24c64";
+ reg = <0x56>;
+ };
+};
+
+&i2c10 {
+ status = "okay";
+
+ // OCP NIC0 TEMP
+ temperature-sensor@1f {
+ compatible = "ti,tmp421";
+ reg = <0x1f>;
+ };
+
+ // OCP NIC0 FRU EEPROM
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+};
+
+&i2c11 {
+ status = "okay";
+
+ ssif-bmc@10 {
+ compatible = "ssif-bmc";
+ reg = <0x10>;
+ };
+};
+
+&i2c12 {
+ status = "okay";
+
+ // Module 1 FRU EEPROM
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+};
+
+&i2c13 {
+ status = "okay";
+
+ // Module 0 FRU EEPROM
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ // Left CBC FRU EEPROM
+ eeprom@54 {
+ compatible = "atmel,24c02";
+ reg = <0x54>;
+ };
+
+ // Right CBC FRU EEPROM
+ eeprom@55 {
+ compatible = "atmel,24c02";
+ reg = <0x55>;
+ };
+
+ // HMC FRU EEPROM
+ eeprom@57 {
+ compatible = "atmel,24c02";
+ reg = <0x57>;
+ };
+};
+
+&i2c14 {
+ status = "okay";
+
+ // PDB CPLD IOEXP 0x10
+ io_expander9: gpio@10 {
+ compatible = "nxp,pca9555";
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(I, 6) IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x10>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ // PDB CPLD IOEXP 0x11
+ io_expander10: gpio@11 {
+ compatible = "nxp,pca9555";
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(I, 6) IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x11>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ // PDB CPLD IOEXP 0x12
+ io_expander11: gpio@12 {
+ compatible = "nxp,pca9555";
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(I, 6) IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x12>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ // PDB CPLD IOEXP 0x13
+ io_expander12: gpio@13 {
+ compatible = "nxp,pca9555";
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(I, 6) IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x13>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ // PDB CPLD IOEXP 0x14
+ io_expander13: gpio@14 {
+ compatible = "nxp,pca9555";
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(I, 6) IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x14>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ // PDB CPLD IOEXP 0x15
+ io_expander14: gpio@15 {
+ compatible = "nxp,pca9555";
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(I, 6) IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x15>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+};
+
+&i2c15 {
+ status = "okay";
+
+ // OCP NIC1 TEMP
+ temperature-sensor@1f {
+ compatible = "ti,tmp421";
+ reg = <0x1f>;
+ };
+
+ // OCP NIC1 FRU EEPROM
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+};
+
+&adc0 {
+ vref-supply = <&p1v8_bmc_aux>;
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default
+ &pinctrl_adc2_default &pinctrl_adc3_default
+ &pinctrl_adc4_default &pinctrl_adc5_default
+ &pinctrl_adc6_default &pinctrl_adc7_default>;
+};
+
+&adc1 {
+ vref-supply = <&p2v5_bmc_aux>;
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc10_default>;
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&wdt1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdtrst1_default>;
+ aspeed,reset-type = "soc";
+ aspeed,external-signal;
+ aspeed,ext-push-pull;
+ aspeed,ext-active-high;
+ aspeed,ext-pulse-duration = <256>;
+};
+
+&pinctrl {
+ pinctrl_ncsi3_default: ncsi3_default {
+ function = "RMII3";
+ groups = "NCSI3";
+ };
+
+ pinctrl_ncsi4_default: ncsi4_default {
+ function = "RMII4";
+ groups = "NCSI4";
+ };
+};
+
+&gpio0 {
+ gpio-line-names =
+ /*A0-A7*/ "","","","","","","","",
+ /*B0-B7*/ "BATTERY_DETECT","PRSNT1_HPM_SCM_N",
+ "BMC_I2C1_FPGA_ALERT_L","BMC_READY",
+ "IOEXP_INT_L","FM_ID_LED",
+ "","",
+ /*C0-C7*/ "","","","",
+ "PMBUS_REQ_N","PSU_FW_UPDATE_REQ_N",
+ "","BMC_I2C_SSIF_ALERT_L",
+ /*D0-D7*/ "","","","","","","","",
+ /*E0-E7*/ "","","","","","","","",
+ /*F0-F7*/ "","","","","","","","",
+ /*G0-G7*/ "","","","","","",
+ "FM_DEBUG_PORT_PRSNT_N","FM_BMC_DBP_PRESENT_N",
+ /*H0-H7*/ "PWR_BRAKE_L","RUN_POWER_EN",
+ "SHDN_FORCE_L","SHDN_REQ_L",
+ "","","","",
+ /*I0-I7*/ "","","","",
+ "","FLASH_WP_STATUS",
+ "FM_PDB_HEALTH_N","RUN_POWER_PG",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "PCIE_EP_RST_EN","BMC_FRU_WP",
+ "SCM_HPM_STBY_RST_N","SCM_HPM_STBY_EN",
+ "STBY_POWER_PG_3V3","TH500_SHDN_OK_L","","",
+ /*N0-N7*/ "LED_POSTCODE_0","LED_POSTCODE_1",
+ "LED_POSTCODE_2","LED_POSTCODE_3",
+ "LED_POSTCODE_4","LED_POSTCODE_5",
+ "LED_POSTCODE_6","LED_POSTCODE_7",
+ /*O0-O7*/ "HMC_I2C3_FPGA_ALERT_L","FPGA_READY_HMC",
+ "CHASSIS_AC_LOSS_L","BSM_PRSNT_R_N",
+ "PSU_SMB_ALERT_L","FM_TPM_PRSNT_0_N",
+ "","USBDBG_IPMI_EN_L",
+ /*P0-P7*/ "PWR_BTN_BMC_N","IPEX_CABLE_PRSNT_L",
+ "ID_RST_BTN_BMC_N","RST_BMC_RSTBTN_OUT_N",
+ "host0-ready","BMC_READY_CPLD","","BMC_HEARTBEAT_N",
+ /*Q0-Q7*/ "IRQ_PCH_TPM_SPI_N","USB_OC0_REAR_R_N",
+ "UART_MUX_SEL","I2C_MUX_RESET_L",
+ "RSVD_NV_PLT_DETECT","SPI_TPM_INT_L",
+ "CPU_JTAG_MUX_SELECT","THERM_BB_OVERT_L",
+ /*R0-R7*/ "THERM_BB_WARN_L","SPI_BMC_FPGA_INT_L",
+ "CPU_BOOT_DONE","PMBUS_GNT_L",
+ "CHASSIS_PWR_BRK_L","PCIE_WAKE_L",
+ "PDB_THERM_OVERT_L","HMC_I2C2_FPGA_ALERT_L",
+ /*S0-S7*/ "","","SYS_BMC_PWRBTN_R_N","FM_TPM_PRSNT_1_N",
+ "FM_BMC_DEBUG_SW_N","UID_LED_N",
+ "SYS_FAULT_LED_N","RUN_POWER_FAULT_L",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "L2_RST_REQ_OUT_L","L0L1_RST_REQ_OUT_L",
+ "BMC_ID_BEEP_SEL","BMC_I2C0_FPGA_ALERT_L",
+ "SMB_BMC_TMP_ALERT","PWR_LED_N",
+ "SYS_RST_OUT_L","IRQ_TPM_SPI_N",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","RST_BMC_SELF_HW",
+ "FM_FLASH_LATCH_N","BMC_EMMC_RST_N",
+ "","","","",
+ /*Z0-Z7*/ "","","","","","","","";
+};
+
+&io_expander0 {
+ gpio-line-names =
+ "FPGA_THERM_OVERT_L","FPGA_READY_BMC",
+ "HMC_BMC_DETECT","HMC_PGOOD",
+ "","BMC_SELF_PWR_CYCLE",
+ "FPGA_EROT_FATAL_ERROR_L","WP_HW_EXT_CTRL_L",
+ "EROT_FPGA_RST_L","FPGA_EROT_RECOVERY_L",
+ "BMC_EROT_FPGA_SPI_MUX_SEL","USB2_HUB_RESET_L",
+ "NCSI_CS1_SEL","SGPIO_EN_L",
+ "B2B_IOEXP_INT_L","I2C_BUS_MUX_RESET_L";
+};
+
+&io_expander1 {
+ gpio-line-names =
+ "SEC_FPGA_THERM_OVERT_L","SEC_FPGA_READY_BMC",
+ "","",
+ "","",
+ "SEC_FPGA_EROT_FATAL_ERROR_L","SEC_WP_HW_EXT_CTRL_L",
+ "SEC_EROT_FPGA_RST_L","SEC_FPGA_EROT_RECOVERY_L",
+ "SEC_BMC_EROT_FPGA_SPI_MUX_SEL","",
+ "","",
+ "","SEC_I2C_BUS_MUX_RESET_L";
+};
+
+&io_expander2 {
+ gpio-line-names =
+ "HMC_PRSNT_L","HMC_READY",
+ "HMC_EROT_FATAL_ERROR_L","I2C_MUX_SEL",
+ "HMC_EROT_SPI_MUX_SEL","HMC_EROT_RECOVERY_L",
+ "HMC_EROT_RST_L","GLOBAL_WP_HMC",
+ "FPGA_RST_L","USB2_HUB_RST",
+ "CPU_UART_MUX_SEL","",
+ "","","","";
+};
+
+&io_expander3 {
+ gpio-line-names =
+ "RTC_MUX_SEL","PCI_MUX_SEL","TPM_MUX_SEL","FAN_MUX-SEL",
+ "SGMII_MUX_SEL","DP_MUX_SEL","UPHY3_USB_SEL","NCSI_MUX_SEL",
+ "BMC_PHY_RST","RTC_CLR_L","BMC_12V_CTRL","PS_RUN_IO0_PG",
+ "","","","";
+};
+
+&io_expander4 {
+ gpio-line-names =
+ "stby_power_en_cpld","stby_power_gd_cpld","","",
+ "","","","",
+ "","","","",
+ "","","","";
+};
+
+&io_expander5 {
+ gpio-line-names =
+ "JTAG_MUX_SEL","IOX_BMC_RESET","","",
+ "","","","";
+};
+
+&io_expander6 {
+ gpio-line-names =
+ "OSFP_PHASE_ID0","OSFP_PHASE_ID1",
+ "OSFP_PHASE_ID2","OSFP_PHASE_ID3",
+ "","","","",
+ "OSFP_BOARD_ID0","OSFP_BOARD_ID1",
+ "OSFP_BOARD_ID2","PWRGD_P3V3_N1",
+ "PWRGD_P3V3_N2","","","";
+};
+
+&io_expander7 {
+ gpio-line-names =
+ "RST_CX7_0","RST_CX7_1",
+ "CX0_SSD0_PRSNT_L","CX1_SSD1_PRSNT_L",
+ "CX_BOOT_CMPLT_CX0","CX_BOOT_CMPLT_CX1",
+ "CX_TWARN_CX0_L","CX_TWARN_CX1_L",
+ "CX_OVT_SHDN_CX0","CX_OVT_SHDN_CX1",
+ "FNP_L_CX0","FNP_L_CX1",
+ "","MCU_GPIO","MCU_RST_N","MCU_RECOVERY_N";
+};
+
+&io_expander8 {
+ gpio-line-names =
+ "SEC_RST_CX7_0","SEC_RST_CX7_1",
+ "SEC_CX0_SSD0_PRSNT_L","SEC_CX1_SSD1_PRSNT_L",
+ "SEC_CX_BOOT_CMPLT_CX0","SEC_CX_BOOT_CMPLT_CX1",
+ "SEC_CX_TWARN_CX0_L","SEC_CX_TWARN_CX1_L",
+ "SEC_CX_OVT_SHDN_CX0","SEC_CX_OVT_SHDN_CX1",
+ "SEC_FNP_L_CX0","SEC_FNP_L_CX1",
+ "","SEC_MCU_GPIO","SEC_MCU_RST_N","SEC_MCU_RECOVERY_N";
+};
+
+&io_expander9 {
+ gpio-line-names =
+ "LEAK3_DETECT_R","LEAK1_DETECT_R",
+ "LEAK2_DETECT_R","LEAK0_DETECT_R",
+ "CHASSIS3_LEAK_Q_N_PLD","CHASSIS1_LEAK_Q_N_PLD",
+ "CHASSIS2_LEAK_Q_N_PLD","CHASSIS0_LEAK_Q_N_PLD",
+ "P12V_AUX_FAN_ALERT_PLD_N","P12V_AUX_FAN_OC_PLD_N",
+ "P12V_AUX_FAN_FAULT_PLD_N","LEAK_DETECT_RMC_N_R",
+ "RSVD_RMC_GPIO3_R","SMB_RJ45_FIO_TMP_ALERT",
+ "","";
+};
+
+&io_expander10 {
+ gpio-line-names =
+ "FM_P12V_NIC1_FLTB_R_N","FM_P3V3_NIC1_FAULT_R_N",
+ "OCP_V3_2_PWRBRK_FROM_HOST_ISO_PLD_N",
+ "P12V_AUX_NIC1_SENSE_ALERT_R_N",
+ "FM_P12V_NIC0_FLTB_R_N","FM_P3V3_NIC0_FAULT_R_N",
+ "OCP_SFF_PWRBRK_FROM_HOST_ISO_PLD_N",
+ "P12V_AUX_NIC0_SENSE_ALERT_R_N",
+ "P12V_AUX_PSU_SMB_ALERT_R_L","P12V_SCM_SENSE_ALERT_R_N",
+ "NODEB_PSU_SMB_ALERT_R_L","NODEA_PSU_SMB_ALERT_R_L",
+ "P52V_SENSE_ALERT_PLD_N","P48V_HS2_FAULT_N_PLD",
+ "P48V_HS1_FAULT_N_PLD","";
+};
+
+&io_expander11 {
+ gpio-line-names =
+ "FAN_7_PRESENT_N","FAN_6_PRESENT_N",
+ "FAN_5_PRESENT_N","FAN_4_PRESENT_N",
+ "FAN_3_PRESENT_N","FAN_2_PRESENT_N",
+ "FAN_1_PRESENT_N","FAN_0_PRESENT_N",
+ "PRSNT_CHASSIS3_LEAK_CABLE_R_N","PRSNT_CHASSIS1_LEAK_CABLE_R_N",
+ "PRSNT_CHASSIS2_LEAK_CABLE_R_N","PRSNT_CHASSIS0_LEAK_CABLE_R_N",
+ "PRSNT_RJ45_FIO_N_R","PRSNT_HDDBD_POWER_CABLE_N",
+ "PRSNT_OSFP_POWER_CABLE_N","";
+};
+
+&io_expander12 {
+ gpio-line-names =
+ "RST_OCP_V3_1_R_N","NIC0_PERST_N",
+ "OCP_SFF_PERST_FROM_HOST_ISO_PLD_N","OCP_SFF_MAIN_PWR_EN",
+ "FM_OCP_SFF_PWR_GOOD_PLD","OCP_SFF_AUX_PWR_PLD_EN_R",
+ "HP_LVC3_OCP_V3_1_PWRGD_PLD","HP_OCP_V3_1_HSC_PWRGD_PLD_R",
+ "RST_OCP_V3_2_R_N","NIC1_PERST_N",
+ "OCP_V3_2_PERST_FROM_HOST_ISO_PLD_N","OCP_V3_2_MAIN_PWR_EN",
+ "FM_OCP_V3_2_PWR_GOOD_PLD","OCP_V3_2_AUX_PWR_PLD_EN_R",
+ "HP_LVC3_OCP_V3_2_PWRGD_PLD","HP_OCP_V3_2_HSC_PWRGD_PLD_R";
+};
+
+&io_expander13 {
+ gpio-line-names =
+ "NODEA_NODEB_PWOK_PLD_ISO_R","PWR_EN_NICS",
+ "PWRGD_P12V_AUX_FAN_PLD","P12V_AUX_FAN_EN_PLD",
+ "PWRGD_P3V3_AUX_PLD","PWRGD_P12V_AUX_PLD_ISO_R",
+ "FM_MAIN_PWREN_FROM_RMC_R","FM_MAIN_PWREN_RMC_EN_ISO_R",
+ "PWRGD_RMC_R","PWRGD_P12V_AUX_FAN_PLD",
+ "P12V_AUX_FAN_EN_PLD","FM_SYS_THROTTLE_N",
+ "HP_LVC3_OCP_V3_2_PRSNT2_PLD_N","HP_LVC3_OCP_V3_1_PRSNT2_PLD_N",
+ "","";
+};
+
+&io_expander14 {
+ gpio-line-names =
+ "","","","","","","","",
+ "FM_BOARD_BMC_SKU_ID3","FM_BOARD_BMC_SKU_ID2",
+ "FM_BOARD_BMC_SKU_ID1","FM_BOARD_BMC_SKU_ID0",
+ "FAB_BMC_REV_ID2","FAB_BMC_REV_ID1",
+ "FAB_BMC_REV_ID0","";
+};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts
index 998598c15fd0..49914a4a179f 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts
@@ -201,7 +201,7 @@
&i2c12 {
status = "okay";
temperature-sensor@4f {
- compatible = "lm75";
+ compatible = "national,lm75";
reg = <0x4f>;
};
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts
index c118d473a76f..cf3f807a38fe 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts
@@ -20,10 +20,6 @@
i2c21 = &imux21;
i2c22 = &imux22;
i2c23 = &imux23;
- i2c24 = &imux24;
- i2c25 = &imux25;
- i2c26 = &imux26;
- i2c27 = &imux27;
i2c28 = &imux28;
i2c29 = &imux29;
i2c30 = &imux30;
@@ -70,19 +66,19 @@
};
};
- spi_gpio: spi-gpio {
+ spi_gpio: spi {
status = "okay";
compatible = "spi-gpio";
#address-cells = <1>;
#size-cells = <0>;
- gpio-sck = <&gpio0 ASPEED_GPIO(Z, 3) GPIO_ACTIVE_HIGH>;
- gpio-mosi = <&gpio0 ASPEED_GPIO(Z, 4) GPIO_ACTIVE_HIGH>;
- gpio-miso = <&gpio0 ASPEED_GPIO(Z, 5) GPIO_ACTIVE_HIGH>;
+ sck-gpios = <&gpio0 ASPEED_GPIO(Z, 3) GPIO_ACTIVE_HIGH>;
+ mosi-gpios = <&gpio0 ASPEED_GPIO(Z, 4) GPIO_ACTIVE_HIGH>;
+ miso-gpios = <&gpio0 ASPEED_GPIO(Z, 5) GPIO_ACTIVE_HIGH>;
num-chipselects = <1>;
cs-gpios = <&gpio0 ASPEED_GPIO(Z, 0) GPIO_ACTIVE_LOW>;
- tpmdev@0 {
+ tpm@0 {
compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
spi-max-frequency = <33000000>;
reg = <0>;
@@ -137,7 +133,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_rmii4_default>;
use-ncsi;
- mellanox,multi-host;
};
&rtc {
@@ -198,6 +193,35 @@
#address-cells = <1>;
#size-cells = <0>;
};
+
+ power-sensor@40 {
+ compatible = "ti,ina238";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@41 {
+ compatible = "ti,ina238";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@44 {
+ compatible = "ti,ina238";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@45 {
+ compatible = "ti,ina238";
+ reg = <0x45>;
+ shunt-resistor = <1000>;
+ };
+
+ temperature-sensor@4b {
+ compatible = "ti,tmp75";
+ reg = <0x4b>;
+ };
};
&i2c1 {
@@ -224,6 +248,35 @@
#address-cells = <1>;
#size-cells = <0>;
};
+
+ power-sensor@40 {
+ compatible = "ti,ina238";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@41 {
+ compatible = "ti,ina238";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@44 {
+ compatible = "ti,ina238";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@45 {
+ compatible = "ti,ina238";
+ reg = <0x45>;
+ shunt-resistor = <1000>;
+ };
+
+ temperature-sensor@4b {
+ compatible = "ti,tmp75";
+ reg = <0x4b>;
+ };
};
&i2c3 {
@@ -276,11 +329,15 @@
reg = <0x49>;
};
- power-monitor@22 {
- compatible = "lltc,ltc4286";
- reg = <0x22>;
- adi,vrange-low-enable;
- shunt-resistor-micro-ohms = <500>;
+ power-monitor@44 {
+ compatible = "lltc,ltc4287";
+ reg = <0x44>;
+ shunt-resistor-micro-ohms = <250>;
+ };
+
+ power-monitor@40 {
+ compatible = "infineon,xdp710";
+ reg = <0x40>;
};
};
@@ -321,6 +378,14 @@
&i2c9 {
status = "okay";
+ mctp-controller;
+ multi-master;
+
+ mctp@10 {
+ compatible = "mctp-i2c-controller";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+ };
+
gpio@30 {
compatible = "nxp,pca9555";
reg = <0x30>;
@@ -340,33 +405,6 @@
"","","","";
};
- i2c-mux@71 {
- compatible = "nxp,pca9546";
- reg = <0x71>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- imux24: i2c@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- };
- imux25: i2c@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- };
- imux26: i2c@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
- };
- imux27: i2c@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <3>;
- };
- };
// PTTV FRU
eeprom@52 {
compatible = "atmel,24c64";
@@ -376,6 +414,31 @@
&i2c11 {
status = "okay";
+
+ gpio@30 {
+ compatible = "nxp,pca9555";
+ reg = <0x30>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ gpio@31 {
+ compatible = "nxp,pca9555";
+ reg = <0x31>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "","","","",
+ "","","presence-cmm","",
+ "","","","",
+ "","","","";
+ };
+
+ // Aegis FRU
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
};
&i2c12 {
@@ -399,6 +462,30 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
+ power-monitor@61 {
+ compatible = "isil,isl69260";
+ reg = <0x61>;
+ };
+ power-monitor@62 {
+ compatible = "isil,isl69260";
+ reg = <0x62>;
+ };
+ power-monitor@63 {
+ compatible = "isil,isl69260";
+ reg = <0x63>;
+ };
+ power-monitor@64 {
+ compatible = "infineon,xdpe152c4";
+ reg = <0x64>;
+ };
+ power-monitor@66 {
+ compatible = "infineon,xdpe152c4";
+ reg = <0x66>;
+ };
+ power-monitor@68 {
+ compatible = "infineon,xdpe152c4";
+ reg = <0x68>;
+ };
};
imux29: i2c@1 {
#address-cells = <1>;
@@ -497,13 +584,14 @@
/*O0-O7*/ "","","","","","","","",
/*P0-P7*/ "power-button","power-host-control",
"reset-button","","led-power","","","",
- /*Q0-Q7*/ "","","","","","","","",
+ /*Q0-Q7*/ "","","","","","power-chassis-control","","",
/*R0-R7*/ "","","","","","","","",
/*S0-S7*/ "","","","","","","","",
/*T0-T7*/ "","","","","","","","",
/*U0-U7*/ "","","","","","","led-identify-gate","",
/*V0-V7*/ "","","","",
- "rtc-battery-voltage-read-enable","","","",
+ "rtc-battery-voltage-read-enable","",
+ "power-chassis-good","",
/*W0-W7*/ "","","","","","","","",
/*X0-X7*/ "","","","","","","","",
/*Y0-Y7*/ "","","","","","","","",
@@ -521,7 +609,6 @@
&sgpiom0 {
status = "okay";
- max-ngpios = <128>;
ngpios = <128>;
bus-frequency = <2000000>;
gpio-line-names =
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts
index 942e53d5c714..41e2246cfbd1 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts
@@ -11,7 +11,8 @@
compatible = "facebook,minerva-cmc", "aspeed,ast2600";
aliases {
- serial5 = &uart5;
+ serial4 = &uart5;
+ serial5 = &uart6;
/*
* PCA9548 (2-0077) provides 8 channels connecting to
* 6 pcs of FCB (Fan Controller Board).
@@ -22,6 +23,8 @@
i2c19 = &imux19;
i2c20 = &imux20;
i2c21 = &imux21;
+
+ spi1 = &spi_gpio;
};
chosen {
@@ -43,11 +46,54 @@
leds {
compatible = "gpio-leds";
- led-fan-fault {
- label = "led-fan-fault";
+ led-0 {
+ label = "bmc_heartbeat_amber";
+ gpios = <&gpio0 ASPEED_GPIO(P, 7) GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-1 {
+ label = "fp_id_amber";
+ default-state = "off";
+ gpios = <&gpio0 ASPEED_GPIO(B, 5) GPIO_ACTIVE_HIGH>;
+ };
+
+ led-2 {
+ label = "power_blue";
+ default-state = "off";
+ gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_HIGH>;
+ };
+
+ led-3 {
+ label = "fan_status_led";
gpios = <&leds_gpio 9 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
+
+ led-4 {
+ label = "fan_fault_led_n";
+ gpios = <&leds_gpio 10 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ spi_gpio: spi {
+ status = "okay";
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sck-gpios = <&gpio0 ASPEED_GPIO(Z, 3) GPIO_ACTIVE_HIGH>;
+ mosi-gpios = <&gpio0 ASPEED_GPIO(Z, 4) GPIO_ACTIVE_HIGH>;
+ miso-gpios = <&gpio0 ASPEED_GPIO(Z, 5) GPIO_ACTIVE_HIGH>;
+ num-chipselects = <1>;
+ cs-gpios = <&gpio0 ASPEED_GPIO(Z, 0) GPIO_ACTIVE_LOW>;
+
+ tpm@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ spi-max-frequency = <33000000>;
+ reg = <0>;
+ };
};
};
@@ -77,6 +123,10 @@
};
};
+&mdio3 {
+ status = "okay";
+};
+
&fmc {
status = "okay";
flash@0 {
@@ -94,10 +144,6 @@
};
};
-&rtc {
- status = "okay";
-};
-
&sgpiom0 {
status = "okay";
ngpios = <128>;
@@ -119,14 +165,15 @@
shunt-resistor = <1000>;
};
- power-monitor@67 {
- compatible = "adi,ltc2945";
- reg = <0x67>;
+ power-monitor@44 {
+ compatible = "lltc,ltc4287";
+ reg = <0x44>;
+ shunt-resistor-micro-ohms = <2000>;
};
- power-monitor@68 {
- compatible = "adi,ltc2945";
- reg = <0x68>;
+ power-monitor@43 {
+ compatible = "infineon,xdp710";
+ reg = <0x43>;
};
leds_gpio: gpio@19 {
@@ -145,9 +192,9 @@
reg = <0x4b>;
};
- temperature-sensor@48 {
+ temperature-sensor@4f {
compatible = "ti,tmp75";
- reg = <0x48>;
+ reg = <0x4f>;
};
eeprom@54 {
@@ -182,6 +229,35 @@
#address-cells = <1>;
#size-cells = <0>;
};
+
+ power-sensor@40 {
+ compatible = "ti,ina238";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@41 {
+ compatible = "ti,ina238";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@44 {
+ compatible = "ti,ina238";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@45 {
+ compatible = "ti,ina238";
+ reg = <0x45>;
+ shunt-resistor = <1000>;
+ };
+
+ temperature-sensor@4b {
+ compatible = "ti,tmp75";
+ reg = <0x4b>;
+ };
};
imux17: i2c@1 {
@@ -200,6 +276,35 @@
#address-cells = <1>;
#size-cells = <0>;
};
+
+ power-sensor@40 {
+ compatible = "ti,ina238";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@41 {
+ compatible = "ti,ina238";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@44 {
+ compatible = "ti,ina238";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@45 {
+ compatible = "ti,ina238";
+ reg = <0x45>;
+ shunt-resistor = <1000>;
+ };
+
+ temperature-sensor@4b {
+ compatible = "ti,tmp75";
+ reg = <0x4b>;
+ };
};
imux18: i2c@2 {
@@ -218,6 +323,35 @@
#address-cells = <1>;
#size-cells = <0>;
};
+
+ power-sensor@40 {
+ compatible = "ti,ina238";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@41 {
+ compatible = "ti,ina238";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@44 {
+ compatible = "ti,ina238";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@45 {
+ compatible = "ti,ina238";
+ reg = <0x45>;
+ shunt-resistor = <1000>;
+ };
+
+ temperature-sensor@4b {
+ compatible = "ti,tmp75";
+ reg = <0x4b>;
+ };
};
imux19: i2c@3 {
@@ -236,9 +370,38 @@
#address-cells = <1>;
#size-cells = <0>;
};
+
+ power-sensor@40 {
+ compatible = "ti,ina238";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@41 {
+ compatible = "ti,ina238";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@44 {
+ compatible = "ti,ina238";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@45 {
+ compatible = "ti,ina238";
+ reg = <0x45>;
+ shunt-resistor = <1000>;
+ };
+
+ temperature-sensor@4b {
+ compatible = "ti,tmp75";
+ reg = <0x4b>;
+ };
};
- imux20: i2c@4 {
+ imux20: i2c@5 {
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
@@ -254,9 +417,37 @@
#address-cells = <1>;
#size-cells = <0>;
};
+
+ power-sensor@40 {
+ compatible = "ti,ina238";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@41 {
+ compatible = "ti,ina238";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@44 {
+ compatible = "ti,ina238";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@45 {
+ compatible = "ti,ina238";
+ reg = <0x45>;
+ shunt-resistor = <1000>;
+ };
+ temperature-sensor@4b {
+ compatible = "ti,tmp75";
+ reg = <0x4b>;
+ };
};
- imux21: i2c@5 {
+ imux21: i2c@4 {
#address-cells = <1>;
#size-cells = <0>;
reg = <5>;
@@ -272,6 +463,34 @@
#address-cells = <1>;
#size-cells = <0>;
};
+
+ power-sensor@40 {
+ compatible = "ti,ina238";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@41 {
+ compatible = "ti,ina238";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@44 {
+ compatible = "ti,ina238";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+
+ power-sensor@45 {
+ compatible = "ti,ina238";
+ reg = <0x45>;
+ shunt-resistor = <1000>;
+ };
+ temperature-sensor@4b {
+ compatible = "ti,tmp75";
+ reg = <0x4b>;
+ };
};
};
};
@@ -302,14 +521,16 @@
&i2c9 {
status = "okay";
-};
-&i2c10 {
- status = "okay";
-};
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
-&i2c11 {
- status = "okay";
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
};
&i2c12 {
@@ -338,6 +559,11 @@
compatible = "atmel,24c128";
reg = <0x50>;
};
+
+ eeprom@56 {
+ compatible = "atmel,24c64";
+ reg = <0x56>;
+ };
};
&adc0 {
@@ -355,6 +581,10 @@
pinctrl-0 = <&pinctrl_adc10_default>;
};
+&ehci0 {
+ status = "okay";
+};
+
&ehci1 {
status = "okay";
};
@@ -381,12 +611,12 @@
/*N0-N7*/ "","","","","","","","",
/*O0-O7*/ "","","","","","","","",
/*P0-P7*/ "","","","","","","","",
- /*Q0-Q7*/ "","","","","","","","",
+ /*Q0-Q7*/ "","","","","","power-chassis-control","","",
/*R0-R7*/ "","","","","","","","",
- /*S0-S7*/ "","","","","","","","",
+ /*S0-S7*/ "","","","","","","","host0-ready",
/*T0-T7*/ "","","","","","","","",
/*U0-U7*/ "","","","","","","","",
- /*V0-V7*/ "","","","","BAT_DETECT","","","",
+ /*V0-V7*/ "","","","","BAT_DETECT","","power-chassis-good","",
/*W0-W7*/ "","","","","","","","",
/*X0-X7*/ "","","BLADE_UART_SEL3","","","","","",
/*Y0-Y7*/ "","","","","","","","",
@@ -397,118 +627,118 @@
gpio-line-names =
/*"input pin","output pin"*/
/*A0 - A7*/
- "PRSNT_MTIA_BLADE0_N","PWREN_MTIA_BLADE0_EN",
- "PRSNT_MTIA_BLADE1_N","PWREN_MTIA_BLADE1_EN",
- "PRSNT_MTIA_BLADE2_N","PWREN_MTIA_BLADE2_EN",
- "PRSNT_MTIA_BLADE3_N","PWREN_MTIA_BLADE3_EN",
- "PRSNT_MTIA_BLADE4_N","PWREN_MTIA_BLADE4_EN",
- "PRSNT_MTIA_BLADE5_N","PWREN_MTIA_BLADE5_EN",
- "PRSNT_MTIA_BLADE6_N","PWREN_MTIA_BLADE6_EN",
- "PRSNT_MTIA_BLADE7_N","PWREN_MTIA_BLADE7_EN",
+ "PRSNT_MTIA_BLADE0_N","PWREN_MTIA_BLADE0_EN_N",
+ "PRSNT_MTIA_BLADE1_N","PWREN_MTIA_BLADE1_EN_N",
+ "PRSNT_MTIA_BLADE2_N","PWREN_MTIA_BLADE2_EN_N",
+ "PRSNT_MTIA_BLADE3_N","PWREN_MTIA_BLADE3_EN_N",
+ "PRSNT_MTIA_BLADE4_N","PWREN_MTIA_BLADE4_EN_N",
+ "PRSNT_MTIA_BLADE5_N","PWREN_MTIA_BLADE5_EN_N",
+ "PRSNT_MTIA_BLADE6_N","PWREN_MTIA_BLADE6_EN_N",
+ "PRSNT_MTIA_BLADE7_N","PWREN_MTIA_BLADE7_EN_N",
/*B0 - B7*/
- "PRSNT_MTIA_BLADE8_N","PWREN_MTIA_BLADE8_EN",
- "PRSNT_MTIA_BLADE9_N","PWREN_MTIA_BLADE9_EN",
- "PRSNT_MTIA_BLADE10_N","PWREN_MTIA_BLADE10_EN",
- "PRSNT_MTIA_BLADE11_N","PWREN_MTIA_BLADE11_EN",
- "PRSNT_MTIA_BLADE12_N","PWREN_MTIA_BLADE12_EN",
- "PRSNT_MTIA_BLADE13_N","PWREN_MTIA_BLADE13_EN",
- "PRSNT_MTIA_BLADE14_N","PWREN_MTIA_BLADE14_EN",
- "PRSNT_MTIA_BLADE15_N","PWREN_MTIA_BLADE15_EN",
+ "PRSNT_MTIA_BLADE8_N","PWREN_MTIA_BLADE8_EN_N",
+ "PRSNT_MTIA_BLADE9_N","PWREN_MTIA_BLADE9_EN_N",
+ "PRSNT_MTIA_BLADE10_N","PWREN_MTIA_BLADE10_EN_N",
+ "PRSNT_MTIA_BLADE11_N","PWREN_MTIA_BLADE11_EN_N",
+ "PRSNT_MTIA_BLADE12_N","PWREN_MTIA_BLADE12_EN_N",
+ "PRSNT_MTIA_BLADE13_N","PWREN_MTIA_BLADE13_EN_N",
+ "PRSNT_MTIA_BLADE14_N","PWREN_MTIA_BLADE14_EN_N",
+ "PRSNT_MTIA_BLADE15_N","PWREN_MTIA_BLADE15_EN_N",
/*C0 - C7*/
- "PRSNT_NW_BLADE0_N","PWREN_NW_BLADE0_EN",
- "PRSNT_NW_BLADE1_N","PWREN_NW_BLADE1_EN",
- "PRSNT_NW_BLADE2_N","PWREN_NW_BLADE2_EN",
- "PRSNT_NW_BLADE3_N","PWREN_NW_BLADE3_EN",
- "PRSNT_NW_BLADE4_N","PWREN_NW_BLADE4_EN",
- "PRSNT_NW_BLADE5_N","PWREN_NW_BLADE5_EN",
- "PRSNT_FCB_TOP_0_N","PWREN_MTIA_BLADE0_HSC_EN",
- "PRSNT_FCB_TOP_1_N","PWREN_MTIA_BLADE1_HSC_EN",
+ "PRSNT_NW_BLADE0_N","PWREN_NW_BLADE0_EN_N",
+ "PRSNT_NW_BLADE1_N","PWREN_NW_BLADE1_EN_N",
+ "PRSNT_NW_BLADE2_N","PWREN_NW_BLADE2_EN_N",
+ "PRSNT_NW_BLADE3_N","PWREN_NW_BLADE3_EN_N",
+ "PRSNT_NW_BLADE4_N","PWREN_NW_BLADE4_EN_N",
+ "PRSNT_NW_BLADE5_N","PWREN_NW_BLADE5_EN_N",
+ "PRSNT_FCB_TOP_0_N","PWREN_MTIA_BLADE0_HSC_EN_N",
+ "PRSNT_FCB_TOP_1_N","PWREN_MTIA_BLADE1_HSC_EN_N",
/*D0 - D7*/
- "PRSNT_FCB_MIDDLE_0_N","PWREN_MTIA_BLADE2_HSC_EN",
- "PRSNT_FCB_MIDDLE_1_N","PWREN_MTIA_BLADE3_HSC_EN",
- "PRSNT_FCB_BOTTOM_0_N","PWREN_MTIA_BLADE4_HSC_EN",
- "PRSNT_FCB_BOTTOM_1_N","PWREN_MTIA_BLADE5_HSC_EN",
- "PWRGD_MTIA_BLADE0_PWROK_L_BUF","PWREN_MTIA_BLADE6_HSC_EN",
- "PWRGD_MTIA_BLADE1_PWROK_L_BUF","PWREN_MTIA_BLADE7_HSC_EN",
- "PWRGD_MTIA_BLADE2_PWROK_L_BUF","PWREN_MTIA_BLADE8_HSC_EN",
- "PWRGD_MTIA_BLADE3_PWROK_L_BUF","PWREN_MTIA_BLADE9_HSC_EN",
+ "PRSNT_FCB_MIDDLE_0_N","PWREN_MTIA_BLADE2_HSC_EN_N",
+ "PRSNT_FCB_MIDDLE_1_N","PWREN_MTIA_BLADE3_HSC_EN_N",
+ "PRSNT_FCB_BOTTOM_1_N","PWREN_MTIA_BLADE4_HSC_EN_N",
+ "PRSNT_FCB_BOTTOM_0_N","PWREN_MTIA_BLADE5_HSC_EN_N",
+ "PWRGD_MTIA_BLADE0_PWROK_N","PWREN_MTIA_BLADE6_HSC_EN_N",
+ "PWRGD_MTIA_BLADE1_PWROK_N","PWREN_MTIA_BLADE7_HSC_EN_N",
+ "PWRGD_MTIA_BLADE2_PWROK_N","PWREN_MTIA_BLADE8_HSC_EN_N",
+ "PWRGD_MTIA_BLADE3_PWROK_N","PWREN_MTIA_BLADE9_HSC_EN_N",
/*E0 - E7*/
- "PWRGD_MTIA_BLADE4_PWROK_L_BUF","PWREN_MTIA_BLADE10_HSC_EN",
- "PWRGD_MTIA_BLADE5_PWROK_L_BUF","PWREN_MTIA_BLADE11_HSC_EN",
- "PWRGD_MTIA_BLADE6_PWROK_L_BUF","PWREN_MTIA_BLADE12_HSC_EN",
- "PWRGD_MTIA_BLADE7_PWROK_L_BUF","PWREN_MTIA_BLADE13_HSC_EN",
- "PWRGD_MTIA_BLADE8_PWROK_L_BUF","PWREN_MTIA_BLADE14_HSC_EN",
- "PWRGD_MTIA_BLADE9_PWROK_L_BUF","PWREN_MTIA_BLADE15_HSC_EN",
- "PWRGD_MTIA_BLADE10_PWROK_L_BUF","PWREN_NW_BLADE0_HSC_EN",
- "PWRGD_MTIA_BLADE11_PWROK_L_BUF","PWREN_NW_BLADE1_HSC_EN",
+ "PWRGD_MTIA_BLADE4_PWROK_N","PWREN_MTIA_BLADE10_HSC_EN_N",
+ "PWRGD_MTIA_BLADE5_PWROK_N","PWREN_MTIA_BLADE11_HSC_EN_N",
+ "PWRGD_MTIA_BLADE6_PWROK_N","PWREN_MTIA_BLADE12_HSC_EN_N",
+ "PWRGD_MTIA_BLADE7_PWROK_N","PWREN_MTIA_BLADE13_HSC_EN_N",
+ "PWRGD_MTIA_BLADE8_PWROK_N","PWREN_MTIA_BLADE14_HSC_EN_N",
+ "PWRGD_MTIA_BLADE9_PWROK_N","PWREN_MTIA_BLADE15_HSC_EN_N",
+ "PWRGD_MTIA_BLADE10_PWROK_N","PWREN_NW_BLADE0_HSC_EN_N",
+ "PWRGD_MTIA_BLADE11_PWROK_N","PWREN_NW_BLADE1_HSC_EN_N",
/*F0 - F7*/
- "PWRGD_MTIA_BLADE12_PWROK_L_BUF","PWREN_NW_BLADE2_HSC_EN",
- "PWRGD_MTIA_BLADE13_PWROK_L_BUF","PWREN_NW_BLADE3_HSC_EN",
- "PWRGD_MTIA_BLADE14_PWROK_L_BUF","PWREN_NW_BLADE4_HSC_EN",
- "PWRGD_MTIA_BLADE15_PWROK_L_BUF","PWREN_NW_BLADE5_HSC_EN",
- "PWRGD_NW_BLADE0_PWROK_L_BUF","PWREN_FCB_TOP_L_EN",
- "PWRGD_NW_BLADE1_PWROK_L_BUF","PWREN_FCB_TOP_R_EN",
- "PWRGD_NW_BLADE2_PWROK_L_BUF","PWREN_FCB_MIDDLE_L_EN",
- "PWRGD_NW_BLADE3_PWROK_L_BUF","PWREN_FCB_MIDDLE_R_EN",
+ "PWRGD_MTIA_BLADE12_PWROK_N","PWREN_NW_BLADE2_HSC_EN_N",
+ "PWRGD_MTIA_BLADE13_PWROK_N","PWREN_NW_BLADE3_HSC_EN_N",
+ "PWRGD_MTIA_BLADE14_PWROK_N","PWREN_NW_BLADE4_HSC_EN_N",
+ "PWRGD_MTIA_BLADE15_PWROK_N","PWREN_NW_BLADE5_HSC_EN_N",
+ "PWRGD_NW_BLADE0_PWROK_N","PWREN_FCB_TOP_0_EN_N",
+ "PWRGD_NW_BLADE1_PWROK_N","PWREN_FCB_TOP_1_EN_N",
+ "PWRGD_NW_BLADE2_PWROK_N","PWREN_FCB_MIDDLE_0_EN_N",
+ "PWRGD_NW_BLADE3_PWROK_N","PWREN_FCB_MIDDLE_1_EN_N",
/*G0 - G7*/
- "PWRGD_NW_BLADE4_PWROK_L_BUF","PWREN_FCB_BOTTOM_L_EN",
- "PWRGD_NW_BLADE5_PWROK_L_BUF","PWREN_FCB_BOTTOM_R_EN",
- "PWRGD_FCB_TOP_0_PWROK_L_BUF","FM_CMM_AC_CYCLE_N",
- "PWRGD_FCB_TOP_1_PWROK_L_BUF","MGMT_SFP_TX_DIS",
- "PWRGD_FCB_MIDDLE_0_PWROK_L_BUF","",
- "PWRGD_FCB_MIDDLE_1_PWROK_L_BUF","RST_I2CRST_MTIA_BLADE0_1_N",
- "PWRGD_FCB_BOTTOM_0_PWROK_L_BUF","RST_I2CRST_MTIA_BLADE2_3_N",
- "PWRGD_FCB_BOTTOM_1_PWROK_L_BUF","RST_I2CRST_MTIA_BLADE4_5_N",
+ "PWRGD_NW_BLADE4_PWROK_N","PWREN_FCB_BOTTOM_1_EN_N",
+ "PWRGD_NW_BLADE5_PWROK_N","PWREN_FCB_BOTTOM_0_EN_N",
+ "PWRGD_FCB_TOP_0_PWROK_N","FM_CMM_AC_CYCLE_N",
+ "PWRGD_FCB_TOP_1_PWROK_N","MGMT_SFP_TX_DIS",
+ "PWRGD_FCB_MIDDLE_0_PWROK_N","FM_MDIO_SW_SEL",
+ "PWRGD_FCB_MIDDLE_1_PWROK_N","FM_P24V_SMPWR_EN",
+ "PWRGD_FCB_BOTTOM_1_PWROK_N","",
+ "PWRGD_FCB_BOTTOM_0_PWROK_N","",
/*H0 - H7*/
- "LEAK_DETECT_MTIA_BLADE0_N_BUF","RST_I2CRST_MTIA_BLADE6_7_N",
- "LEAK_DETECT_MTIA_BLADE1_N_BUF","RST_I2CRST_MTIA_BLADE8_9_N",
- "LEAK_DETECT_MTIA_BLADE2_N_BUF","RST_I2CRST_MTIA_BLADE10_11_N",
- "LEAK_DETECT_MTIA_BLADE3_N_BUF","RST_I2CRST_MTIA_BLADE12_13_N",
- "LEAK_DETECT_MTIA_BLADE4_N_BUF","RST_I2CRST_MTIA_BLADE14_15_N",
- "LEAK_DETECT_MTIA_BLADE5_N_BUF","RST_I2CRST_NW_BLADE0_1_2_N",
- "LEAK_DETECT_MTIA_BLADE6_N_BUF","RST_I2CRST_NW_BLADE3_4_5_N",
- "LEAK_DETECT_MTIA_BLADE7_N_BUF","RST_I2CRST_FCB_N",
+ "LEAK_DETECT_MTIA_BLADE0_N","",
+ "LEAK_DETECT_MTIA_BLADE1_N","",
+ "LEAK_DETECT_MTIA_BLADE2_N","",
+ "LEAK_DETECT_MTIA_BLADE3_N","",
+ "LEAK_DETECT_MTIA_BLADE4_N","",
+ "LEAK_DETECT_MTIA_BLADE5_N","",
+ "LEAK_DETECT_MTIA_BLADE6_N","",
+ "LEAK_DETECT_MTIA_BLADE7_N","",
/*I0 - I7*/
- "LEAK_DETECT_MTIA_BLADE8_N_BUF","RST_I2CRST_FCB_B_L_N",
- "LEAK_DETECT_MTIA_BLADE9_N_BUF","RST_I2CRST_FCB_B_R_N",
- "LEAK_DETECT_MTIA_BLADE10_N_BUF","RST_I2CRST_FCB_M_L_N",
- "LEAK_DETECT_MTIA_BLADE11_N_BUF","RST_I2CRST_FCB_M_R_N",
- "LEAK_DETECT_MTIA_BLADE12_N_BUF","RST_I2CRST_FCB_T_L_N",
- "LEAK_DETECT_MTIA_BLADE13_N_BUF","RST_I2CRST_FCB_T_R_N",
- "LEAK_DETECT_MTIA_BLADE14_N_BUF","BMC_READY",
- "LEAK_DETECT_MTIA_BLADE15_N_BUF","wFM_88E6393X_BIN_UPDATE_EN_N",
+ "LEAK_DETECT_MTIA_BLADE8_N","RST_I2CRST_FCB_BOTTOM_1_N",
+ "LEAK_DETECT_MTIA_BLADE9_N","RST_I2CRST_FCB_BOTTOM_0_N",
+ "LEAK_DETECT_MTIA_BLADE10_N","RST_I2CRST_FCB_MIDDLE_0_N",
+ "LEAK_DETECT_MTIA_BLADE11_N","RST_I2CRST_FCB_MIDDLE_1_N",
+ "LEAK_DETECT_MTIA_BLADE12_N","RST_I2CRST_FCB_TOP_0_N",
+ "LEAK_DETECT_MTIA_BLADE13_N","RST_I2CRST_FCB_TOP_1_N",
+ "LEAK_DETECT_MTIA_BLADE14_N","BMC_READY",
+ "LEAK_DETECT_MTIA_BLADE15_N","FM_88E6393X_BIN_UPDATE_EN_N",
/*J0 - J7*/
- "LEAK_DETECT_NW_BLADE0_N_BUF","WATER_VALVE_CLOSED_N",
- "LEAK_DETECT_NW_BLADE1_N_BUF","",
- "LEAK_DETECT_NW_BLADE2_N_BUF","",
- "LEAK_DETECT_NW_BLADE3_N_BUF","",
- "LEAK_DETECT_NW_BLADE4_N_BUF","",
- "LEAK_DETECT_NW_BLADE5_N_BUF","",
- "MTIA_BLADE0_STATUS_LED","",
- "MTIA_BLADE1_STATUS_LED","",
+ "LEAK_DETECT_NW_BLADE0_N","WATER_VALVE_CLOSED_N",
+ "LEAK_DETECT_NW_BLADE1_N","",
+ "LEAK_DETECT_NW_BLADE2_N","",
+ "LEAK_DETECT_NW_BLADE3_N","",
+ "LEAK_DETECT_NW_BLADE4_N","",
+ "LEAK_DETECT_NW_BLADE5_N","",
+ "PWRGD_MTIA_BLADE0_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE1_HSC_PWROK_N","",
/*K0 - K7*/
- "MTIA_BLADE2_STATUS_LED","",
- "MTIA_BLADE3_STATUS_LED","",
- "MTIA_BLADE4_STATUS_LED","",
- "MTIA_BLADE5_STATUS_LED","",
- "MTIA_BLADE6_STATUS_LED","",
- "MTIA_BLADE7_STATUS_LED","",
- "MTIA_BLADE8_STATUS_LED","",
- "MTIA_BLADE9_STATUS_LED","",
+ "PWRGD_MTIA_BLADE2_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE3_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE4_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE5_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE6_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE7_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE8_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE9_HSC_PWROK_N","",
/*L0 - L7*/
- "MTIA_BLADE10_STATUS_LED","",
- "MTIA_BLADE11_STATUS_LED","",
- "MTIA_BLADE12_STATUS_LED","",
- "MTIA_BLADE13_STATUS_LED","",
- "MTIA_BLADE14_STATUS_LED","",
- "MTIA_BLADE15_STATUS_LED","",
- "NW_BLADE0_STATUS_LED","",
- "NW_BLADE1_STATUS_LED","",
+ "PWRGD_MTIA_BLADE10_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE11_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE12_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE13_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE14_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE15_HSC_PWROK_N","",
+ "PWRGD_NW_BLADE0_HSC_PWROK_N","",
+ "PWRGD_NW_BLADE1_HSC_PWROK_N","",
/*M0 - M7*/
- "NW_BLADE2_STATUS_LED","",
- "NW_BLADE3_STATUS_LED","",
- "NW_BLADE4_STATUS_LED","",
- "NW_BLADE5_STATUS_LED","",
+ "PWRGD_NW_BLADE2_HSC_PWROK_N","",
+ "PWRGD_NW_BLADE3_HSC_PWROK_N","",
+ "PWRGD_NW_BLADE4_HSC_PWROK_N","",
+ "PWRGD_NW_BLADE5_HSC_PWROK_N","",
"RPU_READY","",
"IT_GEAR_RPU_LINK_N","",
"IT_GEAR_LEAK","",
@@ -516,28 +746,28 @@
/*N0 - N7*/
"VALVE_STS0","",
"VALVE_STS1","",
- "VALVE_STS2","",
- "VALVE_STS3","",
- "CR_TOGGLE_BOOT_BUF_N","",
- "CMM_LC_RDY_LED_N","",
- "CMM_LC_UNRDY_LED_N","",
+ "PCA9555_IRQ0_N","",
+ "PCA9555_IRQ1_N","",
+ "CR_TOGGLE_BOOT_N","",
+ "IRQ_FCB_TOP0_N","",
+ "IRQ_FCB_TOP1_N","",
"CMM_CABLE_CARTRIDGE_PRSNT_BOT_N","",
/*O0 - O7*/
"CMM_CABLE_CARTRIDGE_PRSNT_TOP_N","",
"BOT_BCB_CABLE_PRSNT_N","",
"TOP_BCB_CABLE_PRSNT_N","",
- "CHASSIS0_LEAK_Q_N","",
- "CHASSIS1_LEAK_Q_N","",
- "LEAK0_DETECT","",
- "LEAK1_DETECT","",
- "MGMT_SFP_PRSNT_N","",
+ "IRQ_FCB_MID0_N","",
+ "IRQ_FCB_MID1_N","",
+ "CHASSIS_LEAK0_DETECT_N","",
+ "CHASSIS_LEAK1_DETECT_N","",
+ "VALVE_RMON_A_1","",
/*P0 - P7*/
- "MGMT_SFP_TX_FAULT","",
- "MGMT_SFP_RX_LOS","",
- "","",
- "","",
- "","",
- "","",
- "","",
- "","";
+ "VALVE_RMON_A_2","",
+ "VALVE_RMON_B_1","",
+ "VALVE_RMON_B_2","",
+ "RPU_READY_SPARE","",
+ "IT_GEAR_LEAK_SPARE","",
+ "IT_GEAR_RPU_LINK_SPARE_N","",
+ "IRQ_FCB_BOT0_N","",
+ "IRQ_FCB_BOT0_N","";
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge-4u.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge-4u.dts
new file mode 100644
index 000000000000..839aad4ddd91
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge-4u.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright 2024 IBM Corp.
+/dts-v1/;
+
+#include "aspeed-bmc-ibm-blueridge.dts"
+
+/ {
+ model = "Blueridge 4U";
+};
+
+&i2c3 {
+ power-supply@6a {
+ compatible = "ibm,cffps";
+ reg = <0x6a>;
+ };
+
+ power-supply@6b {
+ compatible = "ibm,cffps";
+ reg = <0x6b>;
+ };
+};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge.dts
new file mode 100644
index 000000000000..dfe5cc3edb52
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge.dts
@@ -0,0 +1,1686 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright 2024 IBM Corp.
+/dts-v1/;
+
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/i2c/i2c.h>
+#include <dt-bindings/leds/leds-pca955x.h>
+#include "aspeed-g6.dtsi"
+#include "ibm-power11-quad.dtsi"
+
+/ {
+ model = "Blueridge 2U";
+ compatible = "ibm,blueridge-bmc", "aspeed,ast2600";
+
+ aliases {
+ serial4 = &uart5;
+ i2c16 = &i2c2mux0;
+ i2c17 = &i2c2mux1;
+ i2c18 = &i2c2mux2;
+ i2c19 = &i2c2mux3;
+ i2c20 = &i2c4mux0chn0;
+ i2c21 = &i2c4mux0chn1;
+ i2c22 = &i2c4mux0chn2;
+ i2c23 = &i2c5mux0chn0;
+ i2c24 = &i2c5mux0chn1;
+ i2c25 = &i2c6mux0chn0;
+ i2c26 = &i2c6mux0chn1;
+ i2c27 = &i2c6mux0chn2;
+ i2c28 = &i2c6mux0chn3;
+ i2c29 = &i2c11mux0chn0;
+ i2c30 = &i2c11mux0chn1;
+ };
+
+ chosen {
+ stdout-path = &uart5;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ event_log: region@b3d00000 {
+ reg = <0xb3d00000 0x100000>;
+ no-map;
+ };
+
+ ramoops@b3e00000 {
+ compatible = "ramoops";
+ reg = <0xb3e00000 0x200000>; /* 16 * (4 * 0x8000) */
+ record-size = <0x8000>;
+ console-size = <0x8000>;
+ ftrace-size = <0x8000>;
+ pmsg-size = <0x8000>;
+ max-reason = <3>; /* KMSG_DUMP_EMERG */
+ };
+
+ /* LPC FW cycle bridge region requires natural alignment */
+ flash_memory: region@b4000000 {
+ reg = <0xb4000000 0x04000000>; /* 64M */
+ no-map;
+ };
+
+ /* VGA region is dictated by hardware strapping */
+ vga_memory: region@bf000000 {
+ compatible = "shared-dma-pool";
+ reg = <0xbf000000 0x01000000>; /* 16M */
+ no-map;
+ };
+ };
+
+ i2c-mux {
+ compatible = "i2c-mux-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-parent = <&i2c2>;
+ idle-state = <0>;
+ mux-gpios = <&gpio0 ASPEED_GPIO(G, 4) GPIO_ACTIVE_HIGH>,
+ <&gpio0 ASPEED_GPIO(G, 5) GPIO_ACTIVE_HIGH>;
+
+ i2c2mux0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c2mux1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c2mux2: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c2mux3: i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ /* BMC Card fault LED at the back */
+ led-bmc-ingraham0 {
+ gpios = <&gpio0 ASPEED_GPIO(H, 1) GPIO_ACTIVE_LOW>;
+ };
+
+ /* Enclosure ID LED at the back */
+ led-rear-enc-id0 {
+ gpios = <&gpio0 ASPEED_GPIO(H, 2) GPIO_ACTIVE_LOW>;
+ };
+
+ /* Enclosure fault LED at the back */
+ led-rear-enc-fault0 {
+ gpios = <&gpio0 ASPEED_GPIO(H, 3) GPIO_ACTIVE_LOW>;
+ };
+
+ /* PCIE slot power LED */
+ led-pcieslot-power {
+ gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ poll-interval = <1000>;
+
+ event-fan0-presence {
+ gpios = <&pca0 6 GPIO_ACTIVE_LOW>;
+ label = "fan0-presence";
+ linux,code = <6>;
+ };
+
+ event-fan1-presence {
+ gpios = <&pca0 7 GPIO_ACTIVE_LOW>;
+ label = "fan1-presence";
+ linux,code = <7>;
+ };
+
+ event-fan2-presence {
+ gpios = <&pca0 8 GPIO_ACTIVE_LOW>;
+ label = "fan2-presence";
+ linux,code = <8>;
+ };
+
+ event-fan3-presence {
+ gpios = <&pca0 9 GPIO_ACTIVE_LOW>;
+ label = "fan3-presence";
+ linux,code = <9>;
+ };
+
+ event-fan4-presence {
+ gpios = <&pca0 10 GPIO_ACTIVE_LOW>;
+ label = "fan4-presence";
+ linux,code = <10>;
+ };
+
+ event-fan5-presence {
+ gpios = <&pca0 11 GPIO_ACTIVE_LOW>;
+ label = "fan5-presence";
+ linux,code = <11>;
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc1 7>;
+ };
+};
+
+&adc1 {
+ status = "okay";
+ aspeed,int-vref-microvolt = <2500000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default
+ &pinctrl_adc10_default &pinctrl_adc11_default
+ &pinctrl_adc12_default &pinctrl_adc13_default
+ &pinctrl_adc14_default &pinctrl_adc15_default>;
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&uhci {
+ status = "okay";
+};
+
+&gpio0 {
+ gpio-line-names =
+ /*A0-A7*/ "","","","","","","","",
+ /*B0-B7*/ "bmc-management-ready","","","","","","checkstop","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "","","","","","","","",
+ /*E0-E7*/ "","","","","","","","",
+ /*F0-F7*/ "","","rtc-battery-voltage-read-enable","reset-cause-pinhole","","",
+ "factory-reset-toggle","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "","bmc-ingraham0","rear-enc-id0","rear-enc-fault0","","","","",
+ /*I0-I7*/ "","","","","","","bmc-secure-boot","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "","","","usb-power","","","","",
+ /*P0-P7*/ "","","","","pcieslot-power","","","",
+ /*Q0-Q7*/ "cfam-reset","","regulator-standby-faulted","","","","","",
+ /*R0-R7*/ "bmc-tpm-reset","power-chassis-control","power-chassis-good","","","","",
+ "",
+ /*S0-S7*/ "presence-ps0","presence-ps1","presence-ps2","presence-ps3",
+ "power-ffs-sync-history","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","","";
+
+ i2c3-mux-oe-n-hog {
+ gpio-hog;
+ gpios = <ASPEED_GPIO(G, 6) GPIO_ACTIVE_LOW>;
+ line-name = "I2C3_MUX_OE_N";
+ output-high;
+ };
+
+ usb-power-hog {
+ gpio-hog;
+ gpios = <ASPEED_GPIO(O, 3) GPIO_ACTIVE_LOW>;
+ output-high;
+ };
+};
+
+&emmc_controller {
+ status = "okay";
+};
+
+&pinctrl_emmc_default {
+ bias-disable;
+};
+
+&emmc {
+ status = "okay";
+ clk-phase-mmc-hs200 = <180>, <180>;
+};
+
+&ibt {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+
+ gpio@20 {
+ compatible = "ti,tca9554";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names = "",
+ "RUSSEL_FW_I2C_ENABLE_N",
+ "RUSSEL_OPPANEL_PRESENCE_N",
+ "BLYTH_OPPANEL_PRESENCE_N",
+ "CPU_TPM_CARD_PRESENT_N",
+ "DASD_BP2_PRESENT_N",
+ "DASD_BP1_PRESENT_N",
+ "DASD_BP0_PRESENT_N";
+ };
+};
+
+&i2c1 {
+ status = "okay";
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+
+ power-supply@68 {
+ compatible = "ibm,cffps";
+ reg = <0x68>;
+ };
+
+ power-supply@69 {
+ compatible = "ibm,cffps";
+ reg = <0x69>;
+ };
+
+ led-controller@61 {
+ compatible = "nxp,pca9552";
+ reg = <0x61>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "SLOT0_PRSNT_EN_RSVD", "SLOT1_PRSNT_EN_RSVD",
+ "SLOT2_PRSNT_EN_RSVD", "SLOT3_PRSNT_EN_RSVD",
+ "SLOT4_PRSNT_EN_RSVD", "SLOT0_EXPANDER_PRSNT_N",
+ "SLOT1_EXPANDER_PRSNT_N", "SLOT2_EXPANDER_PRSNT_N",
+ "SLOT3_EXPANDER_PRSNT_N", "SLOT4_EXPANDER_PRSNT_N",
+ "", "", "", "", "", "";
+ };
+};
+
+&i2c4 {
+ status = "okay";
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp275";
+ reg = <0x48>;
+ };
+
+ temperature-sensor@49 {
+ compatible = "ti,tmp275";
+ reg = <0x49>;
+ };
+
+ temperature-sensor@4a {
+ compatible = "ti,tmp275";
+ reg = <0x4a>;
+ };
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c4mux0chn0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9551";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard0-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard0-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+
+ i2c4mux0chn1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+ };
+
+ i2c4mux0chn2: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+ };
+ };
+};
+
+&i2c5 {
+ status = "okay";
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp275";
+ reg = <0x48>;
+ };
+
+ temperature-sensor@49 {
+ compatible = "ti,tmp275";
+ reg = <0x49>;
+ };
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c5mux0chn0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9551";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard3-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard3-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+
+ i2c5mux0chn1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+
+ led-controller@61 {
+ compatible = "nxp,pca9551";
+ reg = <0x61>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard4-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard4-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+ };
+};
+
+&i2c6 {
+ status = "okay";
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp275";
+ reg = <0x48>;
+ };
+
+ temperature-sensor@4a {
+ compatible = "ti,tmp275";
+ reg = <0x4a>;
+ };
+
+ temperature-sensor@4b {
+ compatible = "ti,tmp275";
+ reg = <0x4b>;
+ };
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c6mux0chn0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
+
+ i2c6mux0chn1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+ };
+
+ i2c6mux0chn2: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ i2c6mux0chn3: i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+ };
+ };
+};
+
+&i2c7 {
+ multi-master;
+ status = "okay";
+
+ led-controller@30 {
+ compatible = "ibm,pca9552";
+ reg = <0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "pcieslot0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "pcieslot1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "pcieslot2";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "pcieslot3";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "pcieslot4";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "cpu1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "cpu-vrm1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ reg = <8>;
+ default-state = "keep";
+ label = "lcd-russel";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ led-controller@31 {
+ compatible = "ibm,pca9552";
+ reg = <0x31>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "ddimm0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "ddimm1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "ddimm2";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "ddimm3";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "ddimm4";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "ddimm5";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "ddimm6";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ reg = <7>;
+ default-state = "keep";
+ label = "ddimm7";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ reg = <8>;
+ default-state = "keep";
+ label = "ddimm8";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ reg = <9>;
+ default-state = "keep";
+ label = "ddimm9";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ reg = <10>;
+ default-state = "keep";
+ label = "ddimm10";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ reg = <11>;
+ default-state = "keep";
+ label = "ddimm11";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ reg = <12>;
+ default-state = "keep";
+ label = "ddimm12";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ reg = <13>;
+ default-state = "keep";
+ label = "ddimm13";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ reg = <14>;
+ default-state = "keep";
+ label = "ddimm14";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ reg = <15>;
+ default-state = "keep";
+ label = "ddimm15";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ led-controller@32 {
+ compatible = "ibm,pca9552";
+ reg = <0x32>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "ddimm16";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "ddimm17";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "ddimm18";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "ddimm19";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "ddimm20";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "ddimm21";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "ddimm22";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ reg = <7>;
+ default-state = "keep";
+ label = "ddimm23";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ reg = <8>;
+ default-state = "keep";
+ label = "ddimm24";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ reg = <9>;
+ default-state = "keep";
+ label = "ddimm25";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ reg = <10>;
+ default-state = "keep";
+ label = "ddimm26";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ reg = <11>;
+ default-state = "keep";
+ label = "ddimm27";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ reg = <12>;
+ default-state = "keep";
+ label = "ddimm28";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ reg = <13>;
+ default-state = "keep";
+ label = "ddimm29";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ reg = <14>;
+ default-state = "keep";
+ label = "ddimm30";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ reg = <15>;
+ default-state = "keep";
+ label = "ddimm31";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ led-controller@33 {
+ compatible = "ibm,pca9552";
+ reg = <0x33>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "planar";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cpu0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "dasd-pyramid0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "dasd-pyramid1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "dasd-pyramid2";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "cpu0-vrm0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ reg = <7>;
+ default-state = "keep";
+ label = "rtc-battery";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ reg = <8>;
+ default-state = "keep";
+ label = "base-blyth";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ reg = <9>;
+ default-state = "keep";
+ label = "pcieslot6";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ reg = <10>;
+ default-state = "keep";
+ label = "pcieslot7";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ reg = <11>;
+ default-state = "keep";
+ label = "pcieslot8";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ reg = <12>;
+ default-state = "keep";
+ label = "pcieslot9";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ reg = <13>;
+ default-state = "keep";
+ label = "pcieslot10";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ reg = <14>;
+ default-state = "keep";
+ label = "pcieslot11";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ reg = <15>;
+ default-state = "keep";
+ label = "tpm-wilson";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ humidity-sensor@40 {
+ compatible = "silabs,si7020";
+ reg = <0x40>;
+ };
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp275";
+ reg = <0x48>;
+ };
+
+ pwm@52 {
+ compatible = "maxim,max31785a";
+ reg = <0x52>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9551";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "front-sys-id0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "front-check-log0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "front-enc-fault1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "front-sys-pwron0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ pca0: led-controller@61 {
+ compatible = "nxp,pca9552";
+ reg = <0x61>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "fan0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "fan1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "fan2";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "fan3";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "fan4";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "fan5";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ lcd-controller@62 {
+ compatible = "ibm,op-panel";
+ reg = <(0x62 | I2C_OWN_SLAVE_ADDRESS)>;
+ };
+
+ pressure-sensor@76 {
+ compatible = "infineon,dps310";
+ reg = <0x76>;
+ #io-channel-cells = <0>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+};
+
+&i2c8 {
+ status = "okay";
+
+ pmic@11 {
+ compatible = "ti,ucd90320";
+ reg = <0x11>;
+ };
+
+ rtc@32 {
+ compatible = "epson,rx8900";
+ reg = <0x32>;
+ };
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp275";
+ reg = <0x48>;
+ };
+
+ temperature-sensor@4a {
+ compatible = "ti,tmp275";
+ reg = <0x4a>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9552";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "power-config-full-load", "";
+ };
+
+ led-controller@61 {
+ compatible = "nxp,pca9552";
+ reg = <0x61>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "SLOT6_PRSNT_EN_RSVD", "SLOT7_PRSNT_EN_RSVD",
+ "SLOT8_PRSNT_EN_RSVD", "SLOT9_PRSNT_EN_RSVD",
+ "SLOT10_PRSNT_EN_RSVD", "SLOT11_PRSNT_EN_RSVD",
+ "SLOT6_EXPANDER_PRSNT_N", "SLOT7_EXPANDER_PRSNT_N",
+ "SLOT8_EXPANDER_PRSNT_N", "SLOT9_EXPANDER_PRSNT_N",
+ "SLOT10_EXPANDER_PRSNT_N", "SLOT11_EXPANDER_PRSNT_N",
+ "", "", "", "";
+ };
+
+};
+
+&i2c9 {
+ status = "okay";
+
+ temperature-sensor@4c {
+ compatible = "ti,tmp423";
+ reg = <0x4c>;
+ };
+
+ temperature-sensor@4d {
+ compatible = "ti,tmp423";
+ reg = <0x4d>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+};
+
+&i2c10 {
+ status = "okay";
+
+ temperature-sensor@4c {
+ compatible = "ti,tmp423";
+ reg = <0x4c>;
+ };
+
+ temperature-sensor@4d {
+ compatible = "ti,tmp423";
+ reg = <0x4d>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+};
+
+&i2c11 {
+ status = "okay";
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp275";
+ reg = <0x48>;
+ };
+
+ temperature-sensor@49 {
+ compatible = "ti,tmp275";
+ reg = <0x49>;
+ };
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c11mux0chn0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9551";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard10-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard10-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+
+ i2c11mux0chn1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+ };
+ };
+};
+
+&i2c12 {
+ status = "okay";
+
+ tpm@2e {
+ compatible = "nuvoton,npct75x", "tcg,tpm-tis-i2c";
+ reg = <0x2e>;
+ memory-region = <&event_log>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+};
+
+&i2c13 {
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9552";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "nvme0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "nvme1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "nvme2";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "nvme3";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "nvme4";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "nvme5";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "nvme6";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ reg = <7>;
+ default-state = "keep";
+ label = "nvme7";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+};
+
+&i2c14 {
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9552";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "nvme8";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "nvme9";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "nvme10";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "nvme11";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "nvme12";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "nvme13";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "nvme14";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ reg = <7>;
+ default-state = "keep";
+ label = "nvme15";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+};
+
+&i2c15 {
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9552";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "nvme16";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "nvme17";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "nvme18";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "nvme19";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "nvme20";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "nvme21";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "nvme22";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ reg = <7>;
+ default-state = "keep";
+ label = "nvme23";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&vuart1 {
+ status = "okay";
+};
+
+&vuart2 {
+ status = "okay";
+};
+
+&lpc_ctrl {
+ status = "okay";
+ memory-region = <&flash_memory>;
+};
+
+&mac2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii3_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC3CLK>,
+ <&syscon ASPEED_CLK_MAC3RCLK>;
+ clock-names = "MACCLK", "RCLK";
+ use-ncsi;
+};
+
+&mac3 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii4_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC4CLK>,
+ <&syscon ASPEED_CLK_MAC4RCLK>;
+ clock-names = "MACCLK", "RCLK";
+ use-ncsi;
+};
+
+&wdt1 {
+ aspeed,reset-type = "none";
+ aspeed,external-signal;
+ aspeed,ext-push-pull;
+ aspeed,ext-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdtrst1_default>;
+};
+
+&wdt2 {
+ status = "okay";
+};
+
+&kcs2 {
+ status = "okay";
+ aspeed,lpc-io-reg = <0xca8 0xcac>;
+};
+
+&kcs3 {
+ status = "okay";
+ aspeed,lpc-io-reg = <0xca2>;
+ aspeed,lpc-interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts
index 6fdda42575df..7364adc6b80d 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts
@@ -570,11 +570,6 @@
status = "okay";
};
-&xdma {
- status = "okay";
- memory-region = <&vga_memory>;
-};
-
&kcs2 {
status = "okay";
aspeed,lpc-io-reg = <0xca8 0xcac>;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts
index 214b2e6a4c6d..513077a1f4be 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts
@@ -2486,11 +2486,6 @@
status = "okay";
};
-&xdma {
- status = "okay";
- memory-region = <&vga_memory>;
-};
-
&kcs2 {
status = "okay";
aspeed,lpc-io-reg = <0xca8 0xcac>;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-fuji.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-fuji.dts
new file mode 100644
index 000000000000..c24e464e5faa
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-fuji.dts
@@ -0,0 +1,3876 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright 2024 IBM Corp.
+/dts-v1/;
+
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/i2c/i2c.h>
+#include <dt-bindings/leds/leds-pca955x.h>
+#include "aspeed-g6.dtsi"
+#include "ibm-power11-quad.dtsi"
+
+/ {
+ model = "Fuji";
+ compatible = "ibm,fuji-bmc", "aspeed,ast2600";
+
+ aliases {
+ i2c500 = &cfam4_i2c0;
+ i2c501 = &cfam4_i2c1;
+ i2c510 = &cfam4_i2c10;
+ i2c511 = &cfam4_i2c11;
+ i2c512 = &cfam4_i2c12;
+ i2c513 = &cfam4_i2c13;
+ i2c514 = &cfam4_i2c14;
+ i2c515 = &cfam4_i2c15;
+ i2c602 = &cfam5_i2c2;
+ i2c603 = &cfam5_i2c3;
+ i2c610 = &cfam5_i2c10;
+ i2c611 = &cfam5_i2c11;
+ i2c614 = &cfam5_i2c14;
+ i2c615 = &cfam5_i2c15;
+ i2c616 = &cfam5_i2c16;
+ i2c617 = &cfam5_i2c17;
+ i2c700 = &cfam6_i2c0;
+ i2c701 = &cfam6_i2c1;
+ i2c710 = &cfam6_i2c10;
+ i2c711 = &cfam6_i2c11;
+ i2c712 = &cfam6_i2c12;
+ i2c713 = &cfam6_i2c13;
+ i2c714 = &cfam6_i2c14;
+ i2c715 = &cfam6_i2c15;
+ i2c802 = &cfam7_i2c2;
+ i2c803 = &cfam7_i2c3;
+ i2c810 = &cfam7_i2c10;
+ i2c811 = &cfam7_i2c11;
+ i2c814 = &cfam7_i2c14;
+ i2c815 = &cfam7_i2c15;
+ i2c816 = &cfam7_i2c16;
+ i2c817 = &cfam7_i2c17;
+
+ i2c16 = &i2c4mux0chn0;
+ i2c17 = &i2c4mux0chn1;
+ i2c18 = &i2c4mux0chn2;
+ i2c19 = &i2c5mux0chn0;
+ i2c20 = &i2c5mux0chn1;
+ i2c21 = &i2c5mux0chn2;
+ i2c22 = &i2c5mux0chn3;
+ i2c23 = &i2c6mux0chn0;
+ i2c24 = &i2c6mux0chn1;
+ i2c25 = &i2c6mux0chn2;
+ i2c26 = &i2c6mux0chn3;
+ i2c27 = &i2c14mux0chn0;
+ i2c28 = &i2c14mux0chn1;
+ i2c29 = &i2c14mux0chn2;
+ i2c30 = &i2c14mux0chn3;
+ i2c31 = &i2c14mux1chn0;
+ i2c32 = &i2c14mux1chn1;
+ i2c33 = &i2c14mux1chn2;
+ i2c34 = &i2c14mux1chn3;
+ i2c35 = &i2c15mux0chn0;
+ i2c36 = &i2c15mux0chn1;
+ i2c37 = &i2c15mux0chn2;
+ i2c38 = &i2c15mux0chn3;
+ i2c39 = &i2c15mux1chn0;
+ i2c40 = &i2c15mux1chn1;
+ i2c41 = &i2c15mux1chn2;
+ i2c42 = &i2c15mux1chn3;
+ i2c43 = &i2c15mux2chn0;
+ i2c44 = &i2c15mux2chn1;
+ i2c45 = &i2c15mux2chn2;
+ i2c46 = &i2c15mux2chn3;
+ i2c47 = &i2c8mux0chn0;
+ i2c48 = &i2c8mux0chn1;
+
+ serial4 = &uart5;
+
+ sbefifo500 = &sbefifo500;
+ sbefifo501 = &sbefifo501;
+ sbefifo510 = &sbefifo510;
+ sbefifo511 = &sbefifo511;
+ sbefifo512 = &sbefifo512;
+ sbefifo513 = &sbefifo513;
+ sbefifo514 = &sbefifo514;
+ sbefifo515 = &sbefifo515;
+ sbefifo602 = &sbefifo602;
+ sbefifo603 = &sbefifo603;
+ sbefifo610 = &sbefifo610;
+ sbefifo611 = &sbefifo611;
+ sbefifo614 = &sbefifo614;
+ sbefifo615 = &sbefifo615;
+ sbefifo616 = &sbefifo616;
+ sbefifo617 = &sbefifo617;
+ sbefifo700 = &sbefifo700;
+ sbefifo701 = &sbefifo701;
+ sbefifo710 = &sbefifo710;
+ sbefifo711 = &sbefifo711;
+ sbefifo712 = &sbefifo712;
+ sbefifo713 = &sbefifo713;
+ sbefifo714 = &sbefifo714;
+ sbefifo715 = &sbefifo715;
+ sbefifo802 = &sbefifo802;
+ sbefifo803 = &sbefifo803;
+ sbefifo810 = &sbefifo810;
+ sbefifo811 = &sbefifo811;
+ sbefifo814 = &sbefifo814;
+ sbefifo815 = &sbefifo815;
+ sbefifo816 = &sbefifo816;
+ sbefifo817 = &sbefifo817;
+
+ scom500 = &scom500;
+ scom501 = &scom501;
+ scom510 = &scom510;
+ scom511 = &scom511;
+ scom512 = &scom512;
+ scom513 = &scom513;
+ scom514 = &scom514;
+ scom515 = &scom515;
+ scom602 = &scom602;
+ scom603 = &scom603;
+ scom610 = &scom610;
+ scom611 = &scom611;
+ scom614 = &scom614;
+ scom615 = &scom615;
+ scom616 = &scom616;
+ scom617 = &scom617;
+ scom700 = &scom700;
+ scom701 = &scom701;
+ scom710 = &scom710;
+ scom711 = &scom711;
+ scom712 = &scom712;
+ scom713 = &scom713;
+ scom714 = &scom714;
+ scom715 = &scom715;
+ scom802 = &scom802;
+ scom803 = &scom803;
+ scom810 = &scom810;
+ scom811 = &scom811;
+ scom814 = &scom814;
+ scom815 = &scom815;
+ scom816 = &scom816;
+ scom817 = &scom817;
+
+ spi50 = &cfam4_spi0;
+ spi51 = &cfam4_spi1;
+ spi52 = &cfam4_spi2;
+ spi53 = &cfam4_spi3;
+ spi60 = &cfam5_spi0;
+ spi61 = &cfam5_spi1;
+ spi62 = &cfam5_spi2;
+ spi63 = &cfam5_spi3;
+ spi70 = &cfam6_spi0;
+ spi71 = &cfam6_spi1;
+ spi72 = &cfam6_spi2;
+ spi73 = &cfam6_spi3;
+ spi80 = &cfam7_spi0;
+ spi81 = &cfam7_spi1;
+ spi82 = &cfam7_spi2;
+ spi83 = &cfam7_spi3;
+ };
+
+ chosen {
+ stdout-path = &uart5;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ event_log: region@b3d00000 {
+ reg = <0xb3d00000 0x100000>;
+ no-map;
+ };
+
+ ramoops@b3e00000 {
+ compatible = "ramoops";
+ reg = <0xb3e00000 0x200000>; /* 16 * (4 * 0x8000) */
+ record-size = <0x8000>;
+ console-size = <0x8000>;
+ ftrace-size = <0x8000>;
+ pmsg-size = <0x8000>;
+ max-reason = <3>; /* KMSG_DUMP_EMERG */
+ };
+
+ /* LPC FW cycle bridge region requires natural alignment */
+ flash_memory: region@b4000000 {
+ reg = <0xb4000000 0x04000000>; /* 64M */
+ no-map;
+ };
+
+ /* VGA region is dictated by hardware strapping */
+ vga_memory: region@bf000000 {
+ compatible = "shared-dma-pool";
+ reg = <0xbf000000 0x01000000>; /* 16M */
+ no-map;
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ poll-interval = <1000>;
+
+ event-fan0-presence {
+ gpios = <&pca0 15 GPIO_ACTIVE_LOW>;
+ label = "fan0-presence";
+ linux,code = <15>;
+ };
+
+ event-fan1-presence {
+ gpios = <&pca0 14 GPIO_ACTIVE_LOW>;
+ label = "fan1-presence";
+ linux,code = <14>;
+ };
+
+ event-fan2-presence {
+ gpios = <&pca0 13 GPIO_ACTIVE_LOW>;
+ label = "fan2-presence";
+ linux,code = <13>;
+ };
+
+ event-fan3-presence {
+ gpios = <&pca0 12 GPIO_ACTIVE_LOW>;
+ label = "fan3-presence";
+ linux,code = <12>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ /* RTC battery fault LED at the back */
+ led-rtc-battery {
+ gpios = <&gpio0 ASPEED_GPIO(H, 0) GPIO_ACTIVE_LOW>;
+ };
+
+ /* BMC Card fault LED at the back */
+ led-bmc {
+ gpios = <&gpio0 ASPEED_GPIO(H, 1) GPIO_ACTIVE_LOW>;
+ };
+
+ /* Enclosure Identify LED at the back */
+ led-rear-enc-id0 {
+ gpios = <&gpio0 ASPEED_GPIO(H, 2) GPIO_ACTIVE_LOW>;
+ };
+
+ /* Enclosure fault LED at the back */
+ led-rear-enc-fault0 {
+ gpios = <&gpio0 ASPEED_GPIO(H, 3) GPIO_ACTIVE_LOW>;
+ };
+
+ /* PCIE slot power LED */
+ led-pcieslot-power {
+ gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc1 7>;
+ };
+};
+
+&adc1 {
+ status = "okay";
+ aspeed,int-vref-microvolt = <2500000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default
+ &pinctrl_adc10_default &pinctrl_adc11_default
+ &pinctrl_adc12_default &pinctrl_adc13_default
+ &pinctrl_adc14_default &pinctrl_adc15_default>;
+};
+
+&gpio0 {
+ gpio-line-names =
+ /*A0-A7*/ "","","","","","","","",
+ /*B0-B7*/ "bmc-management-ready","","","","","","checkstop","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "","","","","","","","",
+ /*E0-E7*/ "","","","","","","","",
+ /*F0-F7*/ "","","rtc-battery-voltage-read-enable","reset-cause-pinhole","","",
+ "factory-reset-toggle","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "led-rtc-battery","led-bmc","led-rear-enc-id0","led-rear-enc-fault0","","",
+ "","",
+ /*I0-I7*/ "","","","","","","bmc-secure-boot","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "","","","usb-power","","","","",
+ /*P0-P7*/ "","","","","led-pcieslot-power","","","",
+ /*Q0-Q7*/ "","","regulator-standby-faulted","","","","","",
+ /*R0-R7*/ "bmc-tpm-reset","power-chassis-control","power-chassis-good","","",
+ "I2C_FLASH_MICRO_N","","",
+ /*S0-S7*/ "","","","","power-ffs-sync-history","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","BMC_3RESTART_ATTEMPT_P","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","","";
+
+ usb-power-hog {
+ gpio-hog;
+ gpios = <ASPEED_GPIO(O, 3) GPIO_ACTIVE_LOW>;
+ output-high;
+ };
+};
+
+&i2c0 {
+ status = "okay";
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+
+ led-controller@62 {
+ compatible = "nxp,pca9552";
+ reg = <0x62>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "presence-ps0",
+ "presence-ps1",
+ "presence-ps2",
+ "presence-ps3",
+ "presence-pdb",
+ "presence-tpm",
+ "", "",
+ "presence-cp0",
+ "presence-cp1",
+ "presence-cp2",
+ "presence-cp3",
+ "presence-dasd",
+ "presence-lcd-op",
+ "presence-base-op",
+ "";
+ };
+};
+
+&i2c1 {
+ status = "okay";
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+
+ eeprom@54 {
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+
+ power-supply@68 {
+ compatible = "ibm,cffps";
+ reg = <0x68>;
+ };
+
+ power-supply@69 {
+ compatible = "ibm,cffps";
+ reg = <0x69>;
+ };
+
+ power-supply@6b {
+ compatible = "ibm,cffps";
+ reg = <0x6b>;
+ };
+
+ power-supply@6d {
+ compatible = "ibm,cffps";
+ reg = <0x6d>;
+ };
+};
+
+&i2c4 {
+ status = "okay";
+
+ led-controller@65 {
+ compatible = "nxp,pca9552";
+ reg = <0x65>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "presence-cable-card1",
+ "presence-cable-card2",
+ "presence-cable-card3",
+ "presence-cable-card4",
+ "presence-cable-card5",
+ "expander-cable-card1",
+ "expander-cable-card2",
+ "expander-cable-card3",
+ "expander-cable-card4",
+ "expander-cable-card5";
+ };
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c4mux0chn0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+
+ led-controller@62 {
+ compatible = "nxp,pca9551";
+ reg = <0x62>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard-c01-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard-c01-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+
+ i2c4mux0chn1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9551";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard-c02-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard-c02-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+
+ i2c4mux0chn2: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+
+ led-controller@61 {
+ compatible = "nxp,pca9551";
+ reg = <0x61>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard-c03-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard-c03-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+ };
+};
+
+&i2c5 {
+ status = "okay";
+
+ led-controller@66 {
+ compatible = "nxp,pca9552";
+ reg = <0x66>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "presence-cable-card6",
+ "presence-cable-card7",
+ "presence-cable-card8",
+ "presence-cable-card9",
+ "presence-cable-card10",
+ "presence-cable-card11",
+ "expander-cable-card6",
+ "expander-cable-card7",
+ "expander-cable-card8",
+ "expander-cable-card9",
+ "expander-cable-card10",
+ "expander-cable-card11";
+ };
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c5mux0chn0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9551";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard-c04-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard-c04-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+
+ i2c5mux0chn1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+
+ led-controller@61 {
+ compatible = "nxp,pca9551";
+ reg = <0x61>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard-c05-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard-c05-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+
+ i2c5mux0chn2: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+
+ led-controller@62 {
+ compatible = "nxp,pca9551";
+ reg = <0x62>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard-c06-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard-c06-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+
+ i2c5mux0chn3: i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+
+ led-controller@63 {
+ compatible = "nxp,pca9551";
+ reg = <0x63>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard-c07-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard-c07-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+ };
+};
+
+&i2c6 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c6mux0chn0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9551";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard-c08-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard-c08-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+
+ i2c6mux0chn1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+
+ led-controller@62 {
+ compatible = "nxp,pca9551";
+ reg = <0x62>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard-c09-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard-c09-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+
+ i2c6mux0chn2: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+
+ led-controller@63 {
+ compatible = "nxp,pca9551";
+ reg = <0x63>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard-c10-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard-c10-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+
+ i2c6mux0chn3: i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+
+ led-controller@61 {
+ compatible = "nxp,pca9551";
+ reg = <0x61>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "cablecard-c11-cxp-top";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "cablecard-c11-cxp-bot";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+ };
+
+ led-controller@65 {
+ compatible = "nxp,pca9552";
+ reg = <0x65>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "pcieslot-c01";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "pcieslot-c02";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "pcieslot-c03";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "pcieslot-c04";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "pcieslot-c05";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "pcieslot-c06";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ reg = <7>;
+ default-state = "keep";
+ label = "pcieslot-c07";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ reg = <8>;
+ default-state = "keep";
+ label = "pcieslot-c08";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ reg = <9>;
+ default-state = "keep";
+ label = "pcieslot-c09";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ reg = <10>;
+ default-state = "keep";
+ label = "pcieslot-c10";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ reg = <11>;
+ default-state = "keep";
+ label = "pcieslot-c11";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+};
+
+&i2c7 {
+ status = "okay";
+
+ led-controller@31 {
+ compatible = "ibm,pca9552";
+ reg = <0x31>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "ddimm0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "ddimm1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "ddimm2";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "ddimm3";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "ddimm4";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "ddimm5";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "ddimm6";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ reg = <7>;
+ default-state = "keep";
+ label = "ddimm7";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ reg = <8>;
+ default-state = "keep";
+ label = "ddimm8";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ reg = <9>;
+ default-state = "keep";
+ label = "ddimm9";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ reg = <10>;
+ default-state = "keep";
+ label = "ddimm10";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ reg = <11>;
+ default-state = "keep";
+ label = "ddimm11";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ reg = <12>;
+ default-state = "keep";
+ label = "ddimm12";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ reg = <13>;
+ default-state = "keep";
+ label = "ddimm13";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ reg = <14>;
+ default-state = "keep";
+ label = "ddimm14";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ reg = <15>;
+ default-state = "keep";
+ label = "ddimm15";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ led-controller@32 {
+ compatible = "ibm,pca9552";
+ reg = <0x32>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "ddimm16";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "ddimm17";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "ddimm18";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "ddimm19";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "ddimm20";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "ddimm21";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "ddimm22";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ reg = <7>;
+ default-state = "keep";
+ label = "ddimm23";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ reg = <8>;
+ default-state = "keep";
+ label = "ddimm24";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ reg = <9>;
+ default-state = "keep";
+ label = "ddimm25";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ reg = <10>;
+ default-state = "keep";
+ label = "ddimm26";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ reg = <11>;
+ default-state = "keep";
+ label = "ddimm27";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ reg = <12>;
+ default-state = "keep";
+ label = "ddimm28";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ reg = <13>;
+ default-state = "keep";
+ label = "ddimm29";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ reg = <14>;
+ default-state = "keep";
+ label = "ddimm30";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ reg = <15>;
+ default-state = "keep";
+ label = "ddimm31";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ led-controller@33 {
+ compatible = "ibm,pca9552";
+ reg = <0x33>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "ddimm32";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "ddimm33";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "ddimm34";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "ddimm35";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "ddimm36";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "ddimm37";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "ddimm38";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ reg = <7>;
+ default-state = "keep";
+ label = "ddimm39";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ reg = <8>;
+ default-state = "keep";
+ label = "ddimm40";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ reg = <9>;
+ default-state = "keep";
+ label = "ddimm41";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ reg = <10>;
+ default-state = "keep";
+ label = "ddimm42";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ reg = <11>;
+ default-state = "keep";
+ label = "ddimm43";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ reg = <12>;
+ default-state = "keep";
+ label = "ddimm44";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ reg = <13>;
+ default-state = "keep";
+ label = "ddimm45";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ reg = <14>;
+ default-state = "keep";
+ label = "ddimm46";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ reg = <15>;
+ default-state = "keep";
+ label = "ddimm47";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ led-controller@30 {
+ compatible = "ibm,pca9552";
+ reg = <0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "ddimm48";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "ddimm49";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "ddimm50";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "ddimm51";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "ddimm52";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "ddimm53";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "ddimm54";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ reg = <7>;
+ default-state = "keep";
+ label = "ddimm55";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ reg = <8>;
+ default-state = "keep";
+ label = "ddimm56";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ reg = <9>;
+ default-state = "keep";
+ label = "ddimm57";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ reg = <10>;
+ default-state = "keep";
+ label = "ddimm58";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ reg = <11>;
+ default-state = "keep";
+ label = "ddimm59";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ reg = <12>;
+ default-state = "keep";
+ label = "ddimm60";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ reg = <13>;
+ default-state = "keep";
+ label = "ddimm61";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ reg = <14>;
+ default-state = "keep";
+ label = "ddimm62";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ reg = <15>;
+ default-state = "keep";
+ label = "ddimm63";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ led-controller@34 {
+ compatible = "ibm,pca9552";
+ reg = <0x34>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "planar";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "tpm";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "cpu3-c61";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "cpu0-c14";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "opencapi-connector3";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "opencapi-connector4";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "opencapi-connector5";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ reg = <8>;
+ default-state = "keep";
+ label = "vrm4";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ reg = <9>;
+ default-state = "keep";
+ label = "vrm5";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ reg = <10>;
+ default-state = "keep";
+ label = "vrm6";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ reg = <11>;
+ default-state = "keep";
+ label = "vrm7";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ reg = <12>;
+ default-state = "keep";
+ label = "vrm12";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ reg = <13>;
+ default-state = "keep";
+ label = "vrm13";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ reg = <14>;
+ default-state = "keep";
+ label = "vrm14";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ reg = <15>;
+ default-state = "keep";
+ label = "vrm15";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ led-controller@35 {
+ compatible = "ibm,pca9552";
+ reg = <0x35>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "dasd-backplane";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "power-distribution";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "cpu1-c19";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "cpu2-c56";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "opencapi-connector0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "opencapi-connector1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "opencapi-connector2";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ reg = <8>;
+ default-state = "keep";
+ label = "vrm0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ reg = <9>;
+ default-state = "keep";
+ label = "vrm1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ reg = <10>;
+ default-state = "keep";
+ label = "vrm2";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ reg = <11>;
+ default-state = "keep";
+ label = "vrm3";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ reg = <12>;
+ default-state = "keep";
+ label = "vrm8";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ reg = <13>;
+ default-state = "keep";
+ label = "vrm9";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ reg = <14>;
+ default-state = "keep";
+ label = "vrm10";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ reg = <15>;
+ default-state = "keep";
+ label = "vrm11";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+};
+
+&i2c8 {
+ status = "okay";
+
+ pmic@11 {
+ compatible = "ti,ucd90320";
+ reg = <0x11>;
+ };
+
+ rtc@32 {
+ compatible = "epson,rx8900";
+ reg = <0x32>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+ reset-gpio = <&gpio0 ASPEED_GPIO(S, 5) GPIO_ACTIVE_LOW>;
+
+ i2c8mux0chn0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c8mux0chn1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+&i2c9 {
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@53 {
+ compatible = "atmel,24c128";
+ reg = <0x53>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c128";
+ reg = <0x52>;
+ };
+};
+
+&i2c10 {
+ status = "okay";
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@53 {
+ compatible = "atmel,24c128";
+ reg = <0x53>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c128";
+ reg = <0x52>;
+ };
+};
+
+&i2c11 {
+ status = "okay";
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@53 {
+ compatible = "atmel,24c128";
+ reg = <0x53>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c128";
+ reg = <0x52>;
+ };
+};
+
+&i2c12 {
+ status = "okay";
+
+ tpm@2e {
+ compatible = "nuvoton,npct75x", "tcg,tpm-tis-i2c";
+ reg = <0x2e>;
+ memory-region = <&event_log>;
+ };
+};
+
+&i2c13 {
+ status = "okay";
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@53 {
+ compatible = "atmel,24c128";
+ reg = <0x53>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c128";
+ reg = <0x52>;
+ };
+};
+
+&i2c14 {
+ multi-master;
+ status = "okay";
+
+ lcd-controller@62 {
+ compatible = "ibm,op-panel";
+ reg = <(0x62 | I2C_OWN_SLAVE_ADDRESS)>;
+ };
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ idle-state = <1>;
+
+ i2c14mux0chn0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ i2c14mux0chn1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@51 {
+ compatible = "atmel,24c32";
+ reg = <0x51>;
+ };
+ };
+
+ i2c14mux0chn2: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9551";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "front-sys-id0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "front-check-log0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "front-enc-fault1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "front-sys-pwron0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+ };
+
+ i2c14mux0chn3: i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pwm@52 {
+ compatible = "maxim,max31785a";
+ reg = <0x52>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ led-controller@60 {
+ compatible = "nxp,pca9552";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ reg = <0>;
+ default-state = "keep";
+ label = "nvme0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-state = "keep";
+ label = "nvme1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ default-state = "keep";
+ label = "nvme2";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ reg = <3>;
+ default-state = "keep";
+ label = "nvme3";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ reg = <4>;
+ default-state = "keep";
+ label = "nvme4";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ reg = <5>;
+ default-state = "keep";
+ label = "nvme5";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ reg = <6>;
+ default-state = "keep";
+ label = "nvme6";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ reg = <7>;
+ default-state = "keep";
+ label = "nvme7";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ reg = <8>;
+ default-state = "keep";
+ label = "nvme8";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ reg = <9>;
+ default-state = "keep";
+ label = "nvme9";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ reg = <10>;
+ default-state = "keep";
+ label = "fan0";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ reg = <11>;
+ default-state = "keep";
+ label = "fan1";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ reg = <12>;
+ default-state = "keep";
+ label = "fan2";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ reg = <13>;
+ default-state = "keep";
+ label = "fan3";
+ retain-state-shutdown;
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ pca0: led-controller@61 {
+ compatible = "nxp,pca9552";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x61>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "","","","",
+ "","","","",
+ "","","","",
+ "presence-fan3",
+ "presence-fan2",
+ "presence-fan1",
+ "presence-fan0";
+ };
+ };
+ };
+
+ i2c-mux@71 {
+ compatible = "nxp,pca9546";
+ reg = <0x71>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c14mux1chn0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+ };
+
+ i2c14mux1chn1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+ };
+
+ i2c14mux1chn2: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+ };
+
+ i2c14mux1chn3: i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+ };
+ };
+};
+
+&i2c15 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c15mux0chn0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
+
+ i2c15mux0chn1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
+
+ i2c15mux0chn2: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
+
+ i2c15mux0chn3: i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
+ };
+
+ i2c-mux@71 {
+ compatible = "nxp,pca9546";
+ reg = <0x71>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c15mux1chn0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
+
+ i2c15mux1chn1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
+
+ i2c15mux1chn2: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
+
+ i2c15mux1chn3: i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
+ };
+
+ i2c-mux@72 {
+ compatible = "nxp,pca9546";
+ reg = <0x72>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c15mux2chn0: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
+
+ i2c15mux2chn1: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
+
+ i2c15mux2chn2: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c15mux2chn3: i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&uhci {
+ status = "okay";
+};
+
+&emmc_controller {
+ status = "okay";
+};
+
+&pinctrl_emmc_default {
+ bias-disable;
+};
+
+&emmc {
+ status = "okay";
+ clk-phase-mmc-hs200 = <210>, <228>;
+};
+
+&ibt {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&vuart1 {
+ status = "okay";
+};
+
+&vuart2 {
+ status = "okay";
+};
+
+&lpc_ctrl {
+ status = "okay";
+ memory-region = <&flash_memory>;
+};
+
+&mac2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii3_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC3CLK>,
+ <&syscon ASPEED_CLK_MAC3RCLK>;
+ clock-names = "MACCLK", "RCLK";
+ use-ncsi;
+};
+
+&mac3 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii4_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC4CLK>,
+ <&syscon ASPEED_CLK_MAC4RCLK>;
+ clock-names = "MACCLK", "RCLK";
+ use-ncsi;
+};
+
+&wdt1 {
+ aspeed,reset-type = "none";
+ aspeed,external-signal;
+ aspeed,ext-push-pull;
+ aspeed,ext-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdtrst1_default>;
+};
+
+&wdt2 {
+ status = "okay";
+};
+
+&kcs2 {
+ status = "okay";
+ aspeed,lpc-io-reg = <0xca8 0xcac>;
+};
+
+&kcs3 {
+ status = "okay";
+ aspeed,lpc-io-reg = <0xca2>;
+ aspeed,lpc-interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&fsi_hub0 {
+ cfam@4,0 { /* DCM2_C0 */
+ reg = <4 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <4>;
+
+ scom@1000 {
+ compatible = "ibm,p9-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,i2c-fsi";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam4_i2c0: i2c-bus@0 {
+ reg = <0>; /* OM01 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom500: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo500: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam4_i2c1: i2c-bus@1 {
+ reg = <1>; /* OM23 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom501: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo501: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam4_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom510: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo510: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam4_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom511: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo511: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam4_i2c12: i2c-bus@c {
+ reg = <12>; /* OP4A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom512: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo512: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam4_i2c13: i2c-bus@d {
+ reg = <13>; /* OP4B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom513: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo513: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam4_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom514: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo514: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam4_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom515: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo515: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam4_spi0: spi@0 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam4_spi1: spi@20 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam4_spi2: spi@40 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam4_spi3: spi@60 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+
+ occ {
+ compatible = "ibm,p10-occ";
+
+ hwmon {
+ compatible = "ibm,p10-occ-hwmon";
+ ibm,no-poll-on-init;
+ };
+ };
+ };
+
+ fsi@3400 {
+ compatible = "ibm,p9-fsi-controller";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ no-scan-on-init;
+ };
+ };
+
+ cfam@5,0 { /* DCM2_C1 */
+ reg = <5 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <5>;
+
+ scom@1000 {
+ compatible = "ibm,p9-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,i2c-fsi";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam5_i2c2: i2c-bus@2 {
+ reg = <2>; /* OM45 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom602: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo602: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam5_i2c3: i2c-bus@3 {
+ reg = <3>; /* OM67 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom603: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo603: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam5_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom610: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo610: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam5_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom611: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo611: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam5_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom614: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo614: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam5_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom615: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo615: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam5_i2c16: i2c-bus@10 {
+ reg = <16>; /* OP6A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom616: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo616: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam5_i2c17: i2c-bus@11 {
+ reg = <17>; /* OP6B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom617: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo617: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam5_spi0: spi@0 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam5_spi1: spi@20 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam5_spi2: spi@40 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam5_spi3: spi@60 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+
+ occ {
+ compatible = "ibm,p10-occ";
+
+ hwmon {
+ compatible = "ibm,p10-occ-hwmon";
+ ibm,no-poll-on-init;
+ };
+ };
+ };
+
+ fsi@3400 {
+ compatible = "ibm,p9-fsi-controller";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ no-scan-on-init;
+ };
+ };
+
+ cfam@6,0 { /* DCM3_C0 */
+ reg = <6 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <6>;
+
+ scom@1000 {
+ compatible = "ibm,p9-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,i2c-fsi";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam6_i2c0: i2c-bus@0 {
+ reg = <0>; /* OM01 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom700: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo700: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam6_i2c1: i2c-bus@1 {
+ reg = <1>; /* OM23 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom701: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo701: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam6_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom710: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo710: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam6_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom711: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo711: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam6_i2c12: i2c-bus@c {
+ reg = <12>; /* OP4A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom712: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo712: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam6_i2c13: i2c-bus@d {
+ reg = <13>; /* OP4B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom713: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo713: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam6_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom714: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo714: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam6_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom715: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo715: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam6_spi0: spi@0 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam6_spi1: spi@20 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam6_spi2: spi@40 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam6_spi3: spi@60 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+
+ occ {
+ compatible = "ibm,p10-occ";
+
+ hwmon {
+ compatible = "ibm,p10-occ-hwmon";
+ ibm,no-poll-on-init;
+ };
+ };
+ };
+
+ fsi@3400 {
+ compatible = "ibm,p9-fsi-controller";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ no-scan-on-init;
+ };
+ };
+
+ cfam@7,0 { /* DCM3_C1 */
+ reg = <7 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <7>;
+
+ scom@1000 {
+ compatible = "ibm,p9-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,i2c-fsi";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam7_i2c2: i2c-bus@2 {
+ reg = <2>; /* OM45 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom802: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo802: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam7_i2c3: i2c-bus@3 {
+ reg = <3>; /* OM67 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom803: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo803: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam7_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom810: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo810: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam7_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom811: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo811: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam7_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom814: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo814: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam7_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom815: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo815: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam7_i2c16: i2c-bus@10 {
+ reg = <16>; /* OP6A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom816: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo816: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam7_i2c17: i2c-bus@11 {
+ reg = <17>; /* OP6B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom817: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo817: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam7_spi0: spi@0 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam7_spi1: spi@20 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam7_spi2: spi@40 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam7_spi3: spi@60 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+
+ occ {
+ compatible = "ibm,p10-occ";
+
+ hwmon {
+ compatible = "ibm,p10-occ-hwmon";
+ ibm,no-poll-on-init;
+ };
+ };
+ };
+
+ fsi@3400 {
+ compatible = "ibm,p9-fsi-controller";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ no-scan-on-init;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts
index 5cb0094e21e0..0776b72c2199 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts
@@ -1722,11 +1722,6 @@
status = "okay";
};
-&xdma {
- status = "okay";
- memory-region = <&vga_memory>;
-};
-
&kcs2 {
status = "okay";
aspeed,lpc-io-reg = <0xca8 0xcac>;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts
index dcbc16308ab5..f3efecc7eb8d 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts
@@ -1138,7 +1138,7 @@
reg = <6>;
temperature-sensor@4c {
- compatible = "ti,tmp423";
+ compatible = "ti,tmp432";
reg = <0x4c>;
};
};
@@ -1599,7 +1599,7 @@
reg = <6>;
temperature-sensor@4c {
- compatible = "ti,tmp423";
+ compatible = "ti,tmp432";
reg = <0x4c>;
};
};
@@ -1615,7 +1615,7 @@
};
temperature-sensor@4c {
- compatible = "ti,tmp423";
+ compatible = "ti,tmp432";
reg = <0x4c>;
};
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-fp5280g2.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-fp5280g2.dts
index 0dea014e4f30..78a5656ef75d 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-fp5280g2.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-fp5280g2.dts
@@ -814,10 +814,6 @@
memory-region = <&gfx_memory>;
};
-&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
-};
-
&wdt1 {
aspeed,reset-type = "none";
aspeed,external-signal;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-on5263m5.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-on5263m5.dts
index 5a98a19f445e..7a78c34cff40 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-on5263m5.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-on5263m5.dts
@@ -123,10 +123,6 @@
status = "okay";
};
-&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
-};
-
&pwm_tacho {
status = "okay";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-intel-s2600wf.dts
index d5b7d28cda88..da55e7b29fac 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-intel-s2600wf.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-intel-s2600wf.dts
@@ -118,10 +118,6 @@
status = "okay";
};
-&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
-};
-
&pwm_tacho {
status = "okay";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-lanyang.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-lanyang.dts
index c0847636f20b..370738572a55 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-lanyang.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-lanyang.dts
@@ -263,10 +263,6 @@
status = "okay";
};
-&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
-};
-
&gpio {
pin_gpio_b0 {
gpio-hog;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-nicole.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-nicole.dts
index ac0d666ca10e..b1d0ff85d397 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-nicole.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-nicole.dts
@@ -284,10 +284,6 @@
memory-region = <&gfx_memory>;
};
-&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
-};
-
&ibt {
status = "okay";
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-romulus.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-romulus.dts
index 893e621ecab1..24df24ad9c80 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-romulus.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-romulus.dts
@@ -289,10 +289,6 @@
memory-region = <&gfx_memory>;
};
-&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
-};
-
&pwm_tacho {
status = "okay";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-swift.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-swift.dts
index bbf864f84d37..a0e8c97e944a 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-swift.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-swift.dts
@@ -938,10 +938,6 @@
memory-region = <&gfx_memory>;
};
-&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
-};
-
&wdt1 {
aspeed,reset-type = "none";
aspeed,external-signal;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
index 213023bc5aec..b31eb8e58c6b 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
@@ -870,11 +870,6 @@
<&pinctrl_lsirq_default>;
};
-&xdma {
- status = "okay";
- memory-region = <&vga_memory>;
-};
-
&kcs2 {
status = "okay";
aspeed,lpc-io-reg = <0xca8 0xcac>;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-witherspoon.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-witherspoon.dts
index a20a532fc280..8b1e82c8cdfe 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-witherspoon.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-witherspoon.dts
@@ -661,10 +661,6 @@
memory-region = <&gfx_memory>;
};
-&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
-};
-
&wdt1 {
aspeed,reset-type = "none";
aspeed,external-signal;
@@ -696,9 +692,4 @@
memory-region = <&video_engine_memory>;
};
-&xdma {
- status = "okay";
- memory-region = <&vga_memory>;
-};
-
#include "ibm-power9-dual.dtsi"
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-zaius.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-zaius.dts
index 3d2d8db73ca6..9904f0a58cfa 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-zaius.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-zaius.dts
@@ -466,8 +466,6 @@
};
&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
-
pinctrl_gpioh_unbiased: gpioi_unbiased {
pins = "A8", "C7", "B7", "A7", "D7", "B6", "A6", "E7";
bias-disable;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-supermicro-x11spi.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-supermicro-x11spi.dts
index 50f3c6a5c0c8..b961dff388d1 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-supermicro-x11spi.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-supermicro-x11spi.dts
@@ -123,10 +123,6 @@
status = "okay";
};
-&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
-};
-
&pwm_tacho {
status = "okay";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
index c669ec202085..78c967812492 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
@@ -122,8 +122,8 @@
reg = <0x1e6c0080 0x80>;
};
- cvic: copro-interrupt-controller@1e6c2000 {
- compatible = "aspeed,ast2400-cvic", "aspeed-cvic";
+ cvic: interrupt-controller@1e6c2000 {
+ compatible = "aspeed,ast2400-cvic", "aspeed,cvic";
valid-sources = <0x7fffffff>;
reg = <0x1e6c2000 0x80>;
};
@@ -230,6 +230,9 @@
sram: sram@1e720000 {
compatible = "mmio-sram";
reg = <0x1e720000 0x8000>; // 32K
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
};
video: video@1e700000 {
diff --git a/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
index 6e05cbcce49c..57a699a7c149 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
@@ -139,8 +139,8 @@
reg = <0x1e6c0080 0x80>;
};
- cvic: copro-interrupt-controller@1e6c2000 {
- compatible = "aspeed,ast2500-cvic", "aspeed-cvic";
+ cvic: interrupt-controller@1e6c2000 {
+ compatible = "aspeed,ast2500-cvic", "aspeed,cvic";
valid-sources = <0xffffffff>;
copro-sw-interrupts = <1>;
reg = <0x1e6c2000 0x80>;
@@ -281,17 +281,6 @@
interrupts = <0x19>;
};
- xdma: xdma@1e6e7000 {
- compatible = "aspeed,ast2500-xdma";
- reg = <0x1e6e7000 0x100>;
- clocks = <&syscon ASPEED_CLK_GATE_BCLK>;
- resets = <&syscon ASPEED_RESET_XDMA>;
- interrupts-extended = <&vic 6>, <&scu_ic ASPEED_AST2500_SCU_IC_PCIE_RESET_LO_TO_HI>;
- aspeed,pcie-device = "bmc";
- aspeed,scu = <&syscon>;
- status = "disabled";
- };
-
adc: adc@1e6e9000 {
compatible = "aspeed,ast2500-adc";
reg = <0x1e6e9000 0xb0>;
@@ -314,6 +303,9 @@
sram: sram@1e720000 {
compatible = "mmio-sram";
reg = <0x1e720000 0x9000>; // 36K
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
};
sdmmc: sd-controller@1e740000 {
diff --git a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
index 0c00882f111a..8ed715bd53aa 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
@@ -231,41 +231,33 @@
resets = <&syscon ASPEED_RESET_MII>;
};
- mac0: ftgmac@1e660000 {
+ mac0: ethernet@1e660000 {
compatible = "aspeed,ast2600-mac", "faraday,ftgmac100";
reg = <0x1e660000 0x180>;
- #address-cells = <1>;
- #size-cells = <0>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>;
status = "disabled";
};
- mac1: ftgmac@1e680000 {
+ mac1: ethernet@1e680000 {
compatible = "aspeed,ast2600-mac", "faraday,ftgmac100";
reg = <0x1e680000 0x180>;
- #address-cells = <1>;
- #size-cells = <0>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&syscon ASPEED_CLK_GATE_MAC2CLK>;
status = "disabled";
};
- mac2: ftgmac@1e670000 {
+ mac2: ethernet@1e670000 {
compatible = "aspeed,ast2600-mac", "faraday,ftgmac100";
reg = <0x1e670000 0x180>;
- #address-cells = <1>;
- #size-cells = <0>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&syscon ASPEED_CLK_GATE_MAC3CLK>;
status = "disabled";
};
- mac3: ftgmac@1e690000 {
+ mac3: ethernet@1e690000 {
compatible = "aspeed,ast2600-mac", "faraday,ftgmac100";
reg = <0x1e690000 0x180>;
- #address-cells = <1>;
- #size-cells = <0>;
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&syscon ASPEED_CLK_GATE_MAC4CLK>;
status = "disabled";
@@ -398,19 +390,6 @@
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
};
- xdma: xdma@1e6e7000 {
- compatible = "aspeed,ast2600-xdma";
- reg = <0x1e6e7000 0x100>;
- clocks = <&syscon ASPEED_CLK_GATE_BCLK>;
- resets = <&syscon ASPEED_RESET_DEV_XDMA>, <&syscon ASPEED_RESET_RC_XDMA>;
- reset-names = "device", "root-complex";
- interrupts-extended = <&gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
- <&scu_ic0 ASPEED_AST2600_SCU_IC0_PCIE_PERST_LO_TO_HI>;
- aspeed,pcie-device = "bmc";
- aspeed,scu = <&syscon>;
- status = "disabled";
- };
-
adc0: adc@1e6e9000 {
compatible = "aspeed,ast2600-adc0";
reg = <0x1e6e9000 0x100>;
diff --git a/arch/arm/boot/dts/aspeed/ibm-power11-quad.dtsi b/arch/arm/boot/dts/aspeed/ibm-power11-quad.dtsi
new file mode 100644
index 000000000000..68c941a194b6
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/ibm-power11-quad.dtsi
@@ -0,0 +1,1539 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright 2024 IBM Corp.
+
+/ {
+ aliases {
+ i2c100 = &cfam0_i2c0;
+ i2c101 = &cfam0_i2c1;
+ i2c110 = &cfam0_i2c10;
+ i2c111 = &cfam0_i2c11;
+ i2c112 = &cfam0_i2c12;
+ i2c113 = &cfam0_i2c13;
+ i2c114 = &cfam0_i2c14;
+ i2c115 = &cfam0_i2c15;
+ i2c202 = &cfam1_i2c2;
+ i2c203 = &cfam1_i2c3;
+ i2c210 = &cfam1_i2c10;
+ i2c211 = &cfam1_i2c11;
+ i2c214 = &cfam1_i2c14;
+ i2c215 = &cfam1_i2c15;
+ i2c216 = &cfam1_i2c16;
+ i2c217 = &cfam1_i2c17;
+ i2c300 = &cfam2_i2c0;
+ i2c301 = &cfam2_i2c1;
+ i2c310 = &cfam2_i2c10;
+ i2c311 = &cfam2_i2c11;
+ i2c312 = &cfam2_i2c12;
+ i2c313 = &cfam2_i2c13;
+ i2c314 = &cfam2_i2c14;
+ i2c315 = &cfam2_i2c15;
+ i2c402 = &cfam3_i2c2;
+ i2c403 = &cfam3_i2c3;
+ i2c410 = &cfam3_i2c10;
+ i2c411 = &cfam3_i2c11;
+ i2c414 = &cfam3_i2c14;
+ i2c415 = &cfam3_i2c15;
+ i2c416 = &cfam3_i2c16;
+ i2c417 = &cfam3_i2c17;
+
+ sbefifo100 = &sbefifo100;
+ sbefifo101 = &sbefifo101;
+ sbefifo110 = &sbefifo110;
+ sbefifo111 = &sbefifo111;
+ sbefifo112 = &sbefifo112;
+ sbefifo113 = &sbefifo113;
+ sbefifo114 = &sbefifo114;
+ sbefifo115 = &sbefifo115;
+ sbefifo202 = &sbefifo202;
+ sbefifo203 = &sbefifo203;
+ sbefifo210 = &sbefifo210;
+ sbefifo211 = &sbefifo211;
+ sbefifo214 = &sbefifo214;
+ sbefifo215 = &sbefifo215;
+ sbefifo216 = &sbefifo216;
+ sbefifo217 = &sbefifo217;
+ sbefifo300 = &sbefifo300;
+ sbefifo301 = &sbefifo301;
+ sbefifo310 = &sbefifo310;
+ sbefifo311 = &sbefifo311;
+ sbefifo312 = &sbefifo312;
+ sbefifo313 = &sbefifo313;
+ sbefifo314 = &sbefifo314;
+ sbefifo315 = &sbefifo315;
+ sbefifo402 = &sbefifo402;
+ sbefifo403 = &sbefifo403;
+ sbefifo410 = &sbefifo410;
+ sbefifo411 = &sbefifo411;
+ sbefifo414 = &sbefifo414;
+ sbefifo415 = &sbefifo415;
+ sbefifo416 = &sbefifo416;
+ sbefifo417 = &sbefifo417;
+
+ scom100 = &scom100;
+ scom101 = &scom101;
+ scom110 = &scom110;
+ scom111 = &scom111;
+ scom112 = &scom112;
+ scom113 = &scom113;
+ scom114 = &scom114;
+ scom115 = &scom115;
+ scom202 = &scom202;
+ scom203 = &scom203;
+ scom210 = &scom210;
+ scom211 = &scom211;
+ scom214 = &scom214;
+ scom215 = &scom215;
+ scom216 = &scom216;
+ scom217 = &scom217;
+ scom300 = &scom300;
+ scom301 = &scom301;
+ scom310 = &scom310;
+ scom311 = &scom311;
+ scom312 = &scom312;
+ scom313 = &scom313;
+ scom314 = &scom314;
+ scom315 = &scom315;
+ scom402 = &scom402;
+ scom403 = &scom403;
+ scom410 = &scom410;
+ scom411 = &scom411;
+ scom414 = &scom414;
+ scom415 = &scom415;
+ scom416 = &scom416;
+ scom417 = &scom417;
+
+ spi10 = &cfam0_spi0;
+ spi11 = &cfam0_spi1;
+ spi12 = &cfam0_spi2;
+ spi13 = &cfam0_spi3;
+ spi20 = &cfam1_spi0;
+ spi21 = &cfam1_spi1;
+ spi22 = &cfam1_spi2;
+ spi23 = &cfam1_spi3;
+ spi30 = &cfam2_spi0;
+ spi31 = &cfam2_spi1;
+ spi32 = &cfam2_spi2;
+ spi33 = &cfam2_spi3;
+ spi40 = &cfam3_spi0;
+ spi41 = &cfam3_spi1;
+ spi42 = &cfam3_spi2;
+ spi43 = &cfam3_spi3;
+ };
+};
+
+&fsim0 {
+ #address-cells = <2>;
+ #size-cells = <0>;
+ status = "okay";
+ bus-frequency = <100000000>;
+ cfam-reset-gpios = <&gpio0 ASPEED_GPIO(Q, 0) GPIO_ACTIVE_HIGH>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom@1000 {
+ compatible = "ibm,p9-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,i2c-fsi";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam0_i2c0: i2c-bus@0 {
+ reg = <0>; /* OMI01 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom100: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo100: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam0_i2c1: i2c-bus@1 {
+ reg = <1>; /* OMI23 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom101: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo101: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam0_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom110: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo110: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam0_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom111: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo111: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam0_i2c12: i2c-bus@c {
+ reg = <12>; /* OP4A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom112: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo112: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam0_i2c13: i2c-bus@d {
+ reg = <13>; /* OP4B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom113: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo113: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam0_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom114: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo114: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam0_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom115: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo115: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam0_spi0: spi@0 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam0_spi1: spi@20 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam0_spi2: spi@40 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam0_spi3: spi@60 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+
+ occ {
+ compatible = "ibm,p10-occ";
+
+ hwmon {
+ compatible = "ibm,p10-occ-hwmon";
+ ibm,no-poll-on-init;
+ };
+ };
+ };
+
+ fsi_hub0: fsi@3400 {
+ compatible = "ibm,p9-fsi-controller";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+&fsi_hub0 {
+ cfam@1,0 {
+ reg = <1 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <1>;
+
+ scom@1000 {
+ compatible = "ibm,p9-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,i2c-fsi";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam1_i2c2: i2c-bus@2 {
+ reg = <2>; /* OMI45 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom202: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo202: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam1_i2c3: i2c-bus@3 {
+ reg = <3>; /* OMI67 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom203: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo203: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam1_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom210: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo210: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam1_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom211: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo211: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam1_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom214: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo214: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam1_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom215: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo215: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam1_i2c16: i2c-bus@10 {
+ reg = <16>; /* OP6A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom216: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo216: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam1_i2c17: i2c-bus@11 {
+ reg = <17>; /* OP6B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom217: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo217: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam1_spi0: spi@0 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam1_spi1: spi@20 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam1_spi2: spi@40 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam1_spi3: spi@60 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+
+ occ {
+ compatible = "ibm,p10-occ";
+
+ hwmon {
+ compatible = "ibm,p10-occ-hwmon";
+ ibm,no-poll-on-init;
+ };
+ };
+ };
+
+ fsi@3400 {
+ compatible = "ibm,p9-fsi-controller";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ no-scan-on-init;
+ };
+ };
+
+ cfam@2,0 {
+ reg = <2 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <2>;
+
+ scom@1000 {
+ compatible = "ibm,p9-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,i2c-fsi";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam2_i2c0: i2c-bus@0 {
+ reg = <0>; /* OM01 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom300: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo300: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam2_i2c1: i2c-bus@1 {
+ reg = <1>; /* OM23 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom301: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo301: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam2_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom310: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo310: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam2_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom311: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo311: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam2_i2c12: i2c-bus@c {
+ reg = <12>; /* OP4A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom312: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo312: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam2_i2c13: i2c-bus@d {
+ reg = <13>; /* OP4B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom313: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo313: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam2_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom314: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo314: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam2_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom315: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo315: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam2_spi0: spi@0 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam2_spi1: spi@20 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam2_spi2: spi@40 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam2_spi3: spi@60 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+
+ occ {
+ compatible = "ibm,p10-occ";
+
+ hwmon {
+ compatible = "ibm,p10-occ-hwmon";
+ ibm,no-poll-on-init;
+ };
+ };
+ };
+
+ fsi@3400 {
+ compatible = "ibm,p9-fsi-controller";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ no-scan-on-init;
+ };
+ };
+
+ cfam@3,0 {
+ reg = <3 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <3>;
+
+ scom@1000 {
+ compatible = "ibm,p9-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,i2c-fsi";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam3_i2c2: i2c-bus@2 {
+ reg = <2>; /* OM45 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom402: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo402: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam3_i2c3: i2c-bus@3 {
+ reg = <3>; /* OM67 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom403: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo403: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam3_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom410: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo410: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam3_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom411: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo411: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam3_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom414: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo414: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam3_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom415: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo415: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam3_i2c16: i2c-bus@10 {
+ reg = <16>; /* OP6A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom416: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo416: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+
+ cfam3_i2c17: i2c-bus@11 {
+ reg = <17>; /* OP6B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom417: scom@1000 {
+ compatible = "ibm,i2cr-scom";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo417: sbefifo@2400 {
+ compatible = "ibm,odyssey-sbefifo";
+ reg = <0x2400 0x400>;
+ };
+ };
+ };
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam3_spi0: spi@0 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam3_spi1: spi@20 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam3_spi2: spi@40 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+
+ cfam3_spi3: spi@60 {
+ compatible = "ibm,spi-fsi";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ compatible = "atmel,at25";
+ reg = <0>;
+ address-width = <24>;
+ pagesize = <256>;
+ size = <0x80000>;
+ spi-max-frequency = <10000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+
+ occ {
+ compatible = "ibm,p10-occ";
+
+ hwmon {
+ compatible = "ibm,p10-occ-hwmon";
+ ibm,no-poll-on-init;
+ };
+ };
+ };
+
+ fsi@3400 {
+ compatible = "ibm,p9-fsi-controller";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ no-scan-on-init;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/broadcom/bcm21664-garnet.dts b/arch/arm/boot/dts/broadcom/bcm21664-garnet.dts
index 8789fae178bf..4f8ddc1b3ab7 100644
--- a/arch/arm/boot/dts/broadcom/bcm21664-garnet.dts
+++ b/arch/arm/boot/dts/broadcom/bcm21664-garnet.dts
@@ -11,6 +11,10 @@
model = "BCM21664 Garnet board";
compatible = "brcm,bcm21664-garnet", "brcm,bcm21664";
+ chosen {
+ bootargs = "console=ttyS0,115200n8";
+ };
+
memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x40000000>; /* 1 GB */
diff --git a/arch/arm/boot/dts/broadcom/bcm21664.dtsi b/arch/arm/boot/dts/broadcom/bcm21664.dtsi
index fa73600e883e..f0d0300079b6 100644
--- a/arch/arm/boot/dts/broadcom/bcm21664.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm21664.dtsi
@@ -1,21 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2014 Broadcom Corporation
-#include <dt-bindings/clock/bcm21664.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/interrupt-controller/irq.h>
+#include "bcm2166x-common.dtsi"
/ {
- #address-cells = <1>;
- #size-cells = <1>;
- model = "BCM21664 SoC";
- compatible = "brcm,bcm21664";
interrupt-parent = <&gic>;
- chosen {
- bootargs = "console=ttyS0,115200n8";
- };
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -34,312 +24,46 @@
reg = <1>;
};
};
+};
- gic: interrupt-controller@3ff00100 {
- compatible = "arm,cortex-a9-gic";
- #interrupt-cells = <3>;
- #address-cells = <0>;
- interrupt-controller;
- reg = <0x3ff01000 0x1000>,
- <0x3ff00100 0x100>;
- };
-
- smc@3404e000 {
- compatible = "brcm,bcm21664-smc", "brcm,kona-smc";
- reg = <0x3404e000 0x400>; /* 1 KiB in SRAM */
- };
-
- uartb: serial@3e000000 {
- compatible = "brcm,bcm21664-dw-apb-uart", "snps,dw-apb-uart";
- reg = <0x3e000000 0x118>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB>;
- interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- status = "disabled";
- };
-
- uartb2: serial@3e001000 {
- compatible = "brcm,bcm21664-dw-apb-uart", "snps,dw-apb-uart";
- reg = <0x3e001000 0x118>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB2>;
- interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- status = "disabled";
- };
-
- uartb3: serial@3e002000 {
- compatible = "brcm,bcm21664-dw-apb-uart", "snps,dw-apb-uart";
- reg = <0x3e002000 0x118>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB3>;
- interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- status = "disabled";
- };
-
- L2: cache-controller@3ff20000 {
- compatible = "arm,pl310-cache";
- reg = <0x3ff20000 0x1000>;
- cache-unified;
- cache-level = <2>;
- };
-
- brcm,resetmgr@35001f00 {
- compatible = "brcm,bcm21664-resetmgr";
- reg = <0x35001f00 0x24>;
- };
-
- timer@35006000 {
- compatible = "brcm,kona-timer";
- reg = <0x35006000 0x1c>;
- interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&aon_ccu BCM21664_AON_CCU_HUB_TIMER>;
- };
-
- gpio: gpio@35003000 {
- compatible = "brcm,bcm21664-gpio", "brcm,kona-gpio";
- reg = <0x35003000 0x524>;
- interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- gpio-controller;
- interrupt-controller;
- };
-
- sdio1: mmc@3f180000 {
- compatible = "brcm,kona-sdhci";
- reg = <0x3f180000 0x801c>;
- interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO1>;
- status = "disabled";
- };
-
- sdio2: mmc@3f190000 {
- compatible = "brcm,kona-sdhci";
- reg = <0x3f190000 0x801c>;
- interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO2>;
- status = "disabled";
- };
-
- sdio3: mmc@3f1a0000 {
- compatible = "brcm,kona-sdhci";
- reg = <0x3f1a0000 0x801c>;
- interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO3>;
- status = "disabled";
- };
-
- sdio4: mmc@3f1b0000 {
- compatible = "brcm,kona-sdhci";
- reg = <0x3f1b0000 0x801c>;
- interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO4>;
- status = "disabled";
- };
-
- bsc1: i2c@3e016000 {
- compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c";
- reg = <0x3e016000 0x70>;
- interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC1>;
- status = "disabled";
- };
-
- bsc2: i2c@3e017000 {
- compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c";
- reg = <0x3e017000 0x70>;
- interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC2>;
- status = "disabled";
- };
-
- bsc3: i2c@3e018000 {
- compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c";
- reg = <0x3e018000 0x70>;
- interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC3>;
- status = "disabled";
- };
-
- bsc4: i2c@3e01c000 {
- compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c";
- reg = <0x3e01c000 0x70>;
- interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC4>;
- status = "disabled";
- };
-
- clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- /*
- * Fixed clocks are defined before CCUs whose
- * clocks may depend on them.
- */
-
- ref_32k_clk: ref_32k {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <32768>;
- };
-
- bbl_32k_clk: bbl_32k {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <32768>;
- };
-
- ref_13m_clk: ref_13m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <13000000>;
- };
-
- var_13m_clk: var_13m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <13000000>;
- };
-
- dft_19_5m_clk: dft_19_5m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <19500000>;
- };
-
- ref_crystal_clk: ref_crystal {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <26000000>;
- };
-
- ref_52m_clk: ref_52m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <52000000>;
- };
-
- var_52m_clk: var_52m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <52000000>;
- };
-
- usb_otg_ahb_clk: usb_otg_ahb {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <52000000>;
- };
-
- ref_96m_clk: ref_96m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <96000000>;
- };
-
- var_96m_clk: var_96m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <96000000>;
- };
-
- ref_104m_clk: ref_104m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <104000000>;
- };
-
- var_104m_clk: var_104m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <104000000>;
- };
-
- ref_156m_clk: ref_156m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <156000000>;
+&apps {
+ gic: interrupt-controller@1c01000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x01c01000 0x1000>,
+ <0x01c00100 0x100>;
};
- var_156m_clk: var_156m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <156000000>;
+ L2: cache-controller@1c20000 {
+ compatible = "arm,pl310-cache";
+ reg = <0x01c20000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
};
+};
- root_ccu: root_ccu@35001000 {
- compatible = "brcm,bcm21664-root-ccu";
- reg = <0x35001000 0x0f00>;
- #clock-cells = <1>;
- clock-output-names = "frac_1m";
- };
+&bsc1 {
+ compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c";
+};
- aon_ccu: aon_ccu@35002000 {
- compatible = "brcm,bcm21664-aon-ccu";
- reg = <0x35002000 0x0f00>;
- #clock-cells = <1>;
- clock-output-names = "hub_timer";
- };
+&bsc2 {
+ compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c";
+};
- master_ccu: master_ccu@3f001000 {
- compatible = "brcm,bcm21664-master-ccu";
- reg = <0x3f001000 0x0f00>;
- #clock-cells = <1>;
- clock-output-names = "sdio1",
- "sdio2",
- "sdio3",
- "sdio4",
- "sdio1_sleep",
- "sdio2_sleep",
- "sdio3_sleep",
- "sdio4_sleep";
- };
+&bsc3 {
+ compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c";
+};
- slave_ccu: slave_ccu@3e011000 {
- compatible = "brcm,bcm21664-slave-ccu";
- reg = <0x3e011000 0x0f00>;
- #clock-cells = <1>;
- clock-output-names = "uartb",
- "uartb2",
- "uartb3",
- "bsc1",
- "bsc2",
- "bsc3",
- "bsc4";
- };
- };
+&bsc4 {
+ compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c";
+};
- usbotg: usb@3f120000 {
- compatible = "snps,dwc2";
- reg = <0x3f120000 0x10000>;
- interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&usb_otg_ahb_clk>;
- clock-names = "otg";
- phys = <&usbphy>;
- phy-names = "usb2-phy";
- status = "disabled";
- };
+&gpio {
+ compatible = "brcm,bcm21664-gpio", "brcm,kona-gpio";
+};
- usbphy: usb-phy@3f130000 {
- compatible = "brcm,kona-usb2-phy";
- reg = <0x3f130000 0x28>;
- #phy-cells = <0>;
- status = "disabled";
- };
+&smc {
+ compatible = "brcm,bcm21664-smc", "brcm,kona-smc";
};
diff --git a/arch/arm/boot/dts/broadcom/bcm2166x-common.dtsi b/arch/arm/boot/dts/broadcom/bcm2166x-common.dtsi
new file mode 100644
index 000000000000..87180b7fd695
--- /dev/null
+++ b/arch/arm/boot/dts/broadcom/bcm2166x-common.dtsi
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Common device tree for components shared between the BCM21664 and BCM23550
+ * SoCs.
+ *
+ * Copyright (C) 2016 Broadcom
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/clock/bcm21664.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* Hub bus */
+ hub: hub-bus@34000000 {
+ compatible = "simple-bus";
+ ranges = <0 0x34000000 0x102f83ac>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ smc: smc@4e000 {
+ /* Compatible filled by SoC DTSI */
+ reg = <0x0004e000 0x400>; /* 1 KiB in SRAM */
+ };
+
+ resetmgr: reset-controller@1001f00 {
+ compatible = "brcm,bcm21664-resetmgr";
+ reg = <0x01001f00 0x24>;
+ };
+
+ gpio: gpio@1003000 {
+ /* Compatible filled by SoC DTSI */
+ reg = <0x01003000 0x524>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ };
+
+ timer@1006000 {
+ compatible = "brcm,kona-timer";
+ reg = <0x01006000 0x1c>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&aon_ccu BCM21664_AON_CCU_HUB_TIMER>;
+ };
+ };
+
+ /* Slaves bus */
+ slaves: slaves-bus@3e000000 {
+ compatible = "simple-bus";
+ ranges = <0 0x3e000000 0x0001c070>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ uartb: serial@0 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x00000000 0x118>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uartb2: serial@1000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x00001000 0x118>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB2>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uartb3: serial@2000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x00002000 0x118>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB3>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ bsc1: i2c@16000 {
+ /* Compatible filled by SoC DTSI */
+ reg = <0x00016000 0x70>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC1>;
+ status = "disabled";
+ };
+
+ bsc2: i2c@17000 {
+ /* Compatible filled by SoC DTSI */
+ reg = <0x00017000 0x70>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC2>;
+ status = "disabled";
+ };
+
+ bsc3: i2c@18000 {
+ /* Compatible filled by SoC DTSI */
+ reg = <0x00018000 0x70>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC3>;
+ status = "disabled";
+ };
+
+ bsc4: i2c@1c000 {
+ /* Compatible filled by SoC DTSI */
+ reg = <0x0001c000 0x70>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC4>;
+ status = "disabled";
+ };
+ };
+
+ /* Apps bus */
+ apps: apps-bus@3e300000 {
+ compatible = "simple-bus";
+ ranges = <0 0x3e300000 0x01c02000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ usbotg: usb@e20000 {
+ compatible = "snps,dwc2";
+ reg = <0x00e20000 0x10000>;
+ interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&usb_otg_ahb_clk>;
+ clock-names = "otg";
+ phys = <&usbphy>;
+ phy-names = "usb2-phy";
+ status = "disabled";
+ };
+
+ usbphy: usb-phy@e30000 {
+ compatible = "brcm,kona-usb2-phy";
+ reg = <0x00e30000 0x28>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ sdio1: mmc@e80000 {
+ compatible = "brcm,kona-sdhci";
+ reg = <0x00e80000 0x801c>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO1>;
+ status = "disabled";
+ };
+
+ sdio2: mmc@e90000 {
+ compatible = "brcm,kona-sdhci";
+ reg = <0x00e90000 0x801c>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO2>;
+ status = "disabled";
+ };
+
+ sdio3: mmc@ea0000 {
+ compatible = "brcm,kona-sdhci";
+ reg = <0x00ea0000 0x801c>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO3>;
+ status = "disabled";
+ };
+
+ sdio4: mmc@eb0000 {
+ compatible = "brcm,kona-sdhci";
+ reg = <0x00eb0000 0x801c>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO4>;
+ status = "disabled";
+ };
+ };
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /*
+ * Fixed clocks are defined before CCUs whose
+ * clocks may depend on them.
+ */
+
+ ref_32k_clk: ref_32k {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ };
+
+ bbl_32k_clk: bbl_32k {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ };
+
+ ref_13m_clk: ref_13m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <13000000>;
+ };
+
+ var_13m_clk: var_13m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <13000000>;
+ };
+
+ dft_19_5m_clk: dft_19_5m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <19500000>;
+ };
+
+ ref_crystal_clk: ref_crystal {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <26000000>;
+ };
+
+ ref_52m_clk: ref_52m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <52000000>;
+ };
+
+ var_52m_clk: var_52m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <52000000>;
+ };
+
+ usb_otg_ahb_clk: usb_otg_ahb {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <52000000>;
+ };
+
+ ref_96m_clk: ref_96m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <96000000>;
+ };
+
+ var_96m_clk: var_96m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <96000000>;
+ };
+
+ ref_104m_clk: ref_104m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <104000000>;
+ };
+
+ var_104m_clk: var_104m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <104000000>;
+ };
+
+ ref_156m_clk: ref_156m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <156000000>;
+ };
+
+ var_156m_clk: var_156m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <156000000>;
+ };
+
+ root_ccu: root_ccu@35001000 {
+ compatible = "brcm,bcm21664-root-ccu";
+ reg = <0x35001000 0x0f00>;
+ #clock-cells = <1>;
+ clock-output-names = "frac_1m";
+ };
+
+ aon_ccu: aon_ccu@35002000 {
+ compatible = "brcm,bcm21664-aon-ccu";
+ reg = <0x35002000 0x0f00>;
+ #clock-cells = <1>;
+ clock-output-names = "hub_timer";
+ };
+
+ slave_ccu: slave_ccu@3e011000 {
+ compatible = "brcm,bcm21664-slave-ccu";
+ reg = <0x3e011000 0x0f00>;
+ #clock-cells = <1>;
+ clock-output-names = "uartb",
+ "uartb2",
+ "uartb3",
+ "bsc1",
+ "bsc2",
+ "bsc3",
+ "bsc4";
+ };
+
+ master_ccu: master_ccu@3f001000 {
+ compatible = "brcm,bcm21664-master-ccu";
+ reg = <0x3f001000 0x0f00>;
+ #clock-cells = <1>;
+ clock-output-names = "sdio1",
+ "sdio2",
+ "sdio3",
+ "sdio4",
+ "sdio1_sleep",
+ "sdio2_sleep",
+ "sdio3_sleep",
+ "sdio4_sleep";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/broadcom/bcm23550.dtsi b/arch/arm/boot/dts/broadcom/bcm23550.dtsi
index 50ebe93d6bd0..c1c69381286b 100644
--- a/arch/arm/boot/dts/broadcom/bcm23550.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm23550.dtsi
@@ -1,45 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause
/*
- * BSD LICENSE
+ * Device tree for the BCM23550 SoC.
*
- * Copyright(c) 2016 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (C) 2016 Broadcom
*/
-/* BCM23550 and BCM21664 have almost identical clocks */
-#include <dt-bindings/clock/bcm21664.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/interrupt-controller/irq.h>
+#include "bcm2166x-common.dtsi"
/ {
- #address-cells = <1>;
- #size-cells = <1>;
- model = "BCM23550 SoC";
- compatible = "brcm,bcm23550";
interrupt-parent = <&gic>;
cpus {
@@ -80,180 +48,9 @@
clock-frequency = <1000000000>;
};
};
+};
- /* Hub bus */
- hub@34000000 {
- compatible = "simple-bus";
- ranges = <0 0x34000000 0x102f83ac>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- smc@4e000 {
- compatible = "brcm,bcm23550-smc", "brcm,kona-smc";
- reg = <0x0004e000 0x400>; /* 1 KiB in SRAM */
- };
-
- resetmgr: reset-controller@1001f00 {
- compatible = "brcm,bcm21664-resetmgr";
- reg = <0x01001f00 0x24>;
- };
-
- gpio: gpio@1003000 {
- compatible = "brcm,bcm23550-gpio", "brcm,kona-gpio";
- reg = <0x01003000 0x524>;
- interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- gpio-controller;
- interrupt-controller;
- };
-
- timer@1006000 {
- compatible = "brcm,kona-timer";
- reg = <0x01006000 0x1c>;
- interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&aon_ccu BCM21664_AON_CCU_HUB_TIMER>;
- };
- };
-
- /* Slaves bus */
- slaves@3e000000 {
- compatible = "simple-bus";
- ranges = <0 0x3e000000 0x0001c070>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- uartb: serial@0 {
- compatible = "snps,dw-apb-uart";
- reg = <0x00000000 0x118>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB>;
- interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- status = "disabled";
- };
-
- uartb2: serial@1000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x00001000 0x118>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB2>;
- interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- status = "disabled";
- };
-
- uartb3: serial@2000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x00002000 0x118>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB3>;
- interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- status = "disabled";
- };
-
- bsc1: i2c@16000 {
- compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c";
- reg = <0x00016000 0x70>;
- interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC1>;
- status = "disabled";
- };
-
- bsc2: i2c@17000 {
- compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c";
- reg = <0x00017000 0x70>;
- interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC2>;
- status = "disabled";
- };
-
- bsc3: i2c@18000 {
- compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c";
- reg = <0x00018000 0x70>;
- interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC3>;
- status = "disabled";
- };
-
- bsc4: i2c@1c000 {
- compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c";
- reg = <0x0001c000 0x70>;
- interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC4>;
- status = "disabled";
- };
- };
-
- /* Apps bus */
- apps@3e300000 {
- compatible = "simple-bus";
- ranges = <0 0x3e300000 0x01b77000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- usbotg: usb@e20000 {
- compatible = "snps,dwc2";
- reg = <0x00e20000 0x10000>;
- interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&usb_otg_ahb_clk>;
- clock-names = "otg";
- phys = <&usbphy>;
- phy-names = "usb2-phy";
- status = "disabled";
- };
-
- usbphy: usb-phy@e30000 {
- compatible = "brcm,kona-usb2-phy";
- reg = <0x00e30000 0x28>;
- #phy-cells = <0>;
- status = "disabled";
- };
-
- sdio1: mmc@e80000 {
- compatible = "brcm,kona-sdhci";
- reg = <0x00e80000 0x801c>;
- interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO1>;
- status = "disabled";
- };
-
- sdio2: mmc@e90000 {
- compatible = "brcm,kona-sdhci";
- reg = <0x00e90000 0x801c>;
- interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO2>;
- status = "disabled";
- };
-
- sdio3: mmc@ea0000 {
- compatible = "brcm,kona-sdhci";
- reg = <0x00ea0000 0x801c>;
- interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO3>;
- status = "disabled";
- };
-
- sdio4: mmc@eb0000 {
- compatible = "brcm,kona-sdhci";
- reg = <0x00eb0000 0x801c>;
- interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO4>;
- status = "disabled";
- };
-
+&apps {
cdc: cdc@1b0e000 {
compatible = "brcm,bcm23550-cdc";
reg = <0x01b0e000 0x78>;
@@ -267,147 +64,28 @@
reg = <0x01b21000 0x1000>,
<0x01b22000 0x1000>;
};
- };
-
- clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- /*
- * Fixed clocks are defined before CCUs whose
- * clocks may depend on them.
- */
-
- ref_32k_clk: ref_32k {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <32768>;
- };
-
- bbl_32k_clk: bbl_32k {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <32768>;
- };
-
- ref_13m_clk: ref_13m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <13000000>;
- };
-
- var_13m_clk: var_13m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <13000000>;
- };
-
- dft_19_5m_clk: dft_19_5m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <19500000>;
- };
-
- ref_crystal_clk: ref_crystal {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <26000000>;
- };
-
- ref_52m_clk: ref_52m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <52000000>;
- };
-
- var_52m_clk: var_52m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <52000000>;
- };
-
- usb_otg_ahb_clk: usb_otg_ahb {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <52000000>;
- };
-
- ref_96m_clk: ref_96m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <96000000>;
- };
-
- var_96m_clk: var_96m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <96000000>;
- };
-
- ref_104m_clk: ref_104m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <104000000>;
- };
-
- var_104m_clk: var_104m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <104000000>;
- };
+};
- ref_156m_clk: ref_156m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <156000000>;
- };
+&bsc1 {
+ compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c";
+};
- var_156m_clk: var_156m {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <156000000>;
- };
+&bsc2 {
+ compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c";
+};
- root_ccu: root_ccu@35001000 {
- compatible = "brcm,bcm21664-root-ccu";
- reg = <0x35001000 0x0f00>;
- #clock-cells = <1>;
- clock-output-names = "frac_1m";
- };
+&bsc3 {
+ compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c";
+};
- aon_ccu: aon_ccu@35002000 {
- compatible = "brcm,bcm21664-aon-ccu";
- reg = <0x35002000 0x0f00>;
- #clock-cells = <1>;
- clock-output-names = "hub_timer";
- };
+&bsc4 {
+ compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c";
+};
- slave_ccu: slave_ccu@3e011000 {
- compatible = "brcm,bcm21664-slave-ccu";
- reg = <0x3e011000 0x0f00>;
- #clock-cells = <1>;
- clock-output-names = "uartb",
- "uartb2",
- "uartb3",
- "bsc1",
- "bsc2",
- "bsc3",
- "bsc4";
- };
+&gpio {
+ compatible = "brcm,bcm23550-gpio", "brcm,kona-gpio";
+};
- master_ccu: master_ccu@3f001000 {
- compatible = "brcm,bcm21664-master-ccu";
- reg = <0x3f001000 0x0f00>;
- #clock-cells = <1>;
- clock-output-names = "sdio1",
- "sdio2",
- "sdio3",
- "sdio4",
- "sdio1_sleep",
- "sdio2_sleep",
- "sdio3_sleep",
- "sdio4_sleep";
- };
- };
+&smc {
+ compatible = "brcm,bcm23550-smc", "brcm,kona-smc";
};
diff --git a/arch/arm/boot/dts/broadcom/bcm2837.dtsi b/arch/arm/boot/dts/broadcom/bcm2837.dtsi
index 84c08b46519d..c281697142b1 100644
--- a/arch/arm/boot/dts/broadcom/bcm2837.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm2837.dtsi
@@ -9,7 +9,7 @@
<0x40000000 0x40000000 0x00001000>;
dma-ranges = <0xc0000000 0x00000000 0x3f000000>;
- local_intc: local_intc@40000000 {
+ local_intc: interrupt-controller@40000000 {
compatible = "brcm,bcm2836-l1-intc";
reg = <0x40000000 0x100>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
index 25eeacf6a248..45bd27906f29 100644
--- a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
+++ b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
@@ -215,11 +215,15 @@
reg = <0x50>;
pagesize = <32>;
read-only;
- #address-cells = <1>;
- #size-cells = <1>;
- mac_address: mac-address@66 {
- reg = <0x66 0x6>;
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ mac_address: mac-address@66 {
+ reg = <0x66 0x6>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/broadcom/bcm958625-meraki-mx6x-common.dtsi b/arch/arm/boot/dts/broadcom/bcm958625-meraki-mx6x-common.dtsi
index b0854d881ac6..71a8b77b46f4 100644
--- a/arch/arm/boot/dts/broadcom/bcm958625-meraki-mx6x-common.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm958625-meraki-mx6x-common.dtsi
@@ -55,11 +55,15 @@
reg = <0x50>;
pagesize = <32>;
read-only;
- #address-cells = <1>;
- #size-cells = <1>;
- mac_address: mac-address@66 {
- reg = <0x66 0x6>;
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ mac_address: mac-address@66 {
+ reg = <0x66 0x6>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_vining_fpga.dts
index 65f390bf8975..84f39dec3c42 100644
--- a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_vining_fpga.dts
+++ b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_vining_fpga.dts
@@ -130,8 +130,8 @@
#gpio-cells = <2>;
};
- temp: lm75@48 {
- compatible = "lm75";
+ temp: temperature-sensor@48 {
+ compatible = "national,lm75";
reg = <0x48>;
};
diff --git a/arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi b/arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi
index f3a3cb6ac311..8208c6a9627a 100644
--- a/arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi
+++ b/arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi
@@ -423,14 +423,14 @@
status = "okay";
/* U26 temperature sensor placed near SoC */
- temp1: nct75@4c {
- compatible = "lm75";
+ temp1: temperature-sensor@4c {
+ compatible = "ti,tmp75c";
reg = <0x4c>;
};
/* U27 temperature sensor placed near RTC battery */
- temp2: nct75@4d {
- compatible = "lm75";
+ temp2: temperature-sensor@4d {
+ compatible = "ti,tmp75c";
reg = <0x4d>;
};
diff --git a/arch/arm/boot/dts/microchip/at91-sam9x60_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sam9x60_curiosity.dts
index c6fbdd29019f..b9ffd9e5faac 100644
--- a/arch/arm/boot/dts/microchip/at91-sam9x60_curiosity.dts
+++ b/arch/arm/boot/dts/microchip/at91-sam9x60_curiosity.dts
@@ -198,8 +198,6 @@
dmas = <0>, <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flx0_default>;
- #address-cells = <1>;
- #size-cells = <0>;
i2c-analog-filter;
i2c-digital-filter;
i2c-digital-filter-width-ns = <35>;
diff --git a/arch/arm/boot/dts/microchip/at91-sam9x60ek.dts b/arch/arm/boot/dts/microchip/at91-sam9x60ek.dts
index f3cbb675cea4..3b38707d736e 100644
--- a/arch/arm/boot/dts/microchip/at91-sam9x60ek.dts
+++ b/arch/arm/boot/dts/microchip/at91-sam9x60ek.dts
@@ -207,8 +207,6 @@
status = "okay";
i2c0: i2c@600 {
- #address-cells = <1>;
- #size-cells = <0>;
dmas = <0>, <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flx0_default>;
@@ -254,8 +252,6 @@
status = "okay";
i2c6: i2c@600 {
- #address-cells = <1>;
- #size-cells = <0>;
dmas = <0>, <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flx6_default>;
diff --git a/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1.dtsi
index 4617805c7748..c173f49cb910 100644
--- a/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1.dtsi
+++ b/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1.dtsi
@@ -31,6 +31,14 @@
};
};
+ reg_5v: regulator-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_MAIN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
wifi_pwrseq: wifi_pwrseq {
compatible = "mmc-pwrseq-wilc1000";
reset-gpios = <&pioA PIN_PA27 GPIO_ACTIVE_HIGH>;
@@ -70,6 +78,11 @@
mcp16502@5b {
compatible = "microchip,mcp16502";
reg = <0x5b>;
+ lvin-supply = <&reg_5v>;
+ pvin1-supply = <&reg_5v>;
+ pvin2-supply = <&reg_5v>;
+ pvin3-supply = <&reg_5v>;
+ pvin4-supply = <&reg_5v>;
status = "okay";
lpm-gpios = <&pioBU 0 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts
index 6b02b7bcfd49..951a0c97d3c6 100644
--- a/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts
@@ -84,6 +84,14 @@
device_type = "memory";
reg = <0x20000000 0x20000000>;
};
+
+ reg_5v: regulator-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "5V_MAIN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
};
&adc {
@@ -144,6 +152,11 @@
mcp16502@5b {
compatible = "microchip,mcp16502";
reg = <0x5b>;
+ lvin-supply = <&reg_5v>;
+ pvin1-supply = <&reg_5v>;
+ pvin2-supply = <&reg_5v>;
+ pvin3-supply = <&reg_5v>;
+ pvin4-supply = <&reg_5v>;
status = "okay";
lpm-gpios = <&pioBU 0 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/microchip/at91-sama5d2_icp.dts b/arch/arm/boot/dts/microchip/at91-sama5d2_icp.dts
index 999adeca6f33..5e2bb517a480 100644
--- a/arch/arm/boot/dts/microchip/at91-sama5d2_icp.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama5d2_icp.dts
@@ -78,6 +78,14 @@
linux,default-trigger = "heartbeat";
};
};
+
+ reg_5v: regulator-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_MAIN_5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
};
&adc {
@@ -190,6 +198,11 @@
mcp16502@5b {
compatible = "microchip,mcp16502";
reg = <0x5b>;
+ lvin-supply = <&reg_5v>;
+ pvin1-supply = <&reg_5v>;
+ pvin2-supply = <&reg_5v>;
+ pvin3-supply = <&reg_5v>;
+ pvin4-supply = <&reg_5v>;
status = "okay";
lpm-gpios = <&pioBU 7 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
index 009d2c832421..645e49fdb7fe 100644
--- a/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
@@ -72,6 +72,14 @@
device_type = "memory";
reg = <0x60000000 0x10000000>; /* 256 MiB DDR3L-1066 16-bit */
};
+
+ reg_5v: regulator-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "5V_MAIN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
};
&adc {
@@ -189,6 +197,11 @@
pmic@5b {
compatible = "microchip,mcp16502";
reg = <0x5b>;
+ lvin-supply = <&reg_5v>;
+ pvin1-supply = <&reg_5v>;
+ pvin2-supply = <&reg_5v>;
+ pvin3-supply = <&reg_5v>;
+ pvin4-supply = <&reg_5v>;
regulators {
vdd_3v3: VDD_IO {
diff --git a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
index 20b2497657ae..ed75d491a246 100644
--- a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
@@ -88,6 +88,14 @@
reg = <0x60000000 0x20000000>;
};
+ reg_5v: regulator-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "5V_MAIN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
sound: sound {
compatible = "simple-audio-card";
simple-audio-card,name = "sama7g5ek audio";
@@ -239,6 +247,11 @@
mcp16502@5b {
compatible = "microchip,mcp16502";
reg = <0x5b>;
+ lvin-supply = <&reg_5v>;
+ pvin1-supply = <&reg_5v>;
+ pvin2-supply = <&reg_5v>;
+ pvin3-supply = <&reg_5v>;
+ pvin4-supply = <&reg_5v>;
status = "okay";
regulators {
@@ -403,6 +416,42 @@
i2c-digital-filter;
i2c-digital-filter-width-ns = <35>;
status = "okay";
+
+ eeprom0: eeprom@52 {
+ compatible = "microchip,24aa025e48";
+ reg = <0x52>;
+ size = <256>;
+ pagesize = <16>;
+ vcc-supply = <&vdd_3v3>;
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eeprom0_eui48: eui48@fa {
+ reg = <0xfa 0x6>;
+ };
+ };
+ };
+
+ eeprom1: eeprom@53 {
+ compatible = "microchip,24aa025e48";
+ reg = <0x53>;
+ size = <256>;
+ pagesize = <16>;
+ vcc-supply = <&vdd_3v3>;
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eeprom1_eui48: eui48@fa {
+ reg = <0xfa 0x6>;
+ };
+ };
+ };
};
};
@@ -440,6 +489,8 @@
&pinctrl_gmac0_txck_default
&pinctrl_gmac0_phy_irq>;
phy-mode = "rgmii-id";
+ nvmem-cells = <&eeprom0_eui48>;
+ nvmem-cell-names = "mac-address";
status = "okay";
ethernet-phy@7 {
@@ -457,6 +508,8 @@
&pinctrl_gmac1_mdio_default
&pinctrl_gmac1_phy_irq>;
phy-mode = "rmii";
+ nvmem-cells = <&eeprom1_eui48>;
+ nvmem-cell-names = "mac-address";
status = "okay"; /* Conflict with pdmc0. */
ethernet-phy@0 {
diff --git a/arch/arm/boot/dts/microchip/at91rm9200.dtsi b/arch/arm/boot/dts/microchip/at91rm9200.dtsi
index 16c675e3a890..02a838541dc3 100644
--- a/arch/arm/boot/dts/microchip/at91rm9200.dtsi
+++ b/arch/arm/boot/dts/microchip/at91rm9200.dtsi
@@ -225,7 +225,7 @@
pinctrl@fffff400 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "atmel,at91rm9200-pinctrl", "simple-bus";
+ compatible = "atmel,at91rm9200-pinctrl", "simple-mfd";
ranges = <0xfffff400 0xfffff400 0x800>;
atmel,mux-mask = <
diff --git a/arch/arm/boot/dts/microchip/at91sam9260.dtsi b/arch/arm/boot/dts/microchip/at91sam9260.dtsi
index e56d5546554c..0038183e9a53 100644
--- a/arch/arm/boot/dts/microchip/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9260.dtsi
@@ -170,7 +170,7 @@
pinctrl: pinctrl@fffff400 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "atmel,at91rm9200-pinctrl", "simple-bus";
+ compatible = "atmel,at91rm9200-pinctrl", "simple-mfd";
ranges = <0xfffff400 0xfffff400 0x600>;
atmel,mux-mask = <
diff --git a/arch/arm/boot/dts/microchip/at91sam9261.dtsi b/arch/arm/boot/dts/microchip/at91sam9261.dtsi
index 307b60658014..b57a7fd67197 100644
--- a/arch/arm/boot/dts/microchip/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9261.dtsi
@@ -317,7 +317,7 @@
pinctrl@fffff400 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "atmel,at91rm9200-pinctrl", "simple-bus";
+ compatible = "atmel,at91rm9200-pinctrl", "simple-mfd";
ranges = <0xfffff400 0xfffff400 0x600>;
atmel,mux-mask =
diff --git a/arch/arm/boot/dts/microchip/at91sam9263.dtsi b/arch/arm/boot/dts/microchip/at91sam9263.dtsi
index 75d8ff2d12c8..b95d4016ae9f 100644
--- a/arch/arm/boot/dts/microchip/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9263.dtsi
@@ -167,7 +167,7 @@
pinctrl@fffff200 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "atmel,at91rm9200-pinctrl", "simple-bus";
+ compatible = "atmel,at91rm9200-pinctrl", "simple-mfd";
ranges = <0xfffff200 0xfffff200 0xa00>;
atmel,mux-mask = <
diff --git a/arch/arm/boot/dts/microchip/at91sam9g20ek_2mmc.dts b/arch/arm/boot/dts/microchip/at91sam9g20ek_2mmc.dts
index 172af6ff4b18..3e5eab57d1a5 100644
--- a/arch/arm/boot/dts/microchip/at91sam9g20ek_2mmc.dts
+++ b/arch/arm/boot/dts/microchip/at91sam9g20ek_2mmc.dts
@@ -40,13 +40,13 @@
leds {
compatible = "gpio-leds";
- ds1 {
+ led-ds1 {
label = "ds1";
gpios = <&pioB 9 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
- ds5 {
+ led-ds5 {
label = "ds5";
gpios = <&pioB 8 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/microchip/at91sam9g25-gardena-smart-gateway.dts b/arch/arm/boot/dts/microchip/at91sam9g25-gardena-smart-gateway.dts
index af70eb8a3a02..e0c1e8df81b1 100644
--- a/arch/arm/boot/dts/microchip/at91sam9g25-gardena-smart-gateway.dts
+++ b/arch/arm/boot/dts/microchip/at91sam9g25-gardena-smart-gateway.dts
@@ -37,71 +37,71 @@
leds {
compatible = "gpio-leds";
- power_blue {
+ led-power-blue {
label = "smartgw:power:blue";
gpios = <&pioC 21 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- power_green {
+ led-power-green {
label = "smartgw:power:green";
gpios = <&pioC 20 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
- power_red {
+ led-power-red {
label = "smartgw:power:red";
gpios = <&pioC 19 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- radio_blue {
+ led-radio-blue {
label = "smartgw:radio:blue";
gpios = <&pioC 18 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- radio_green {
+ led-radio-green {
label = "smartgw:radio:green";
gpios = <&pioC 17 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- radio_red {
+ led-radio-red {
label = "smartgw:radio:red";
gpios = <&pioC 16 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- internet_blue {
+ led-internet-blue {
label = "smartgw:internet:blue";
gpios = <&pioC 15 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- internet_green {
+ led-internet-green {
label = "smartgw:internet:green";
gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- internet_red {
+ led-internet-red {
label = "smartgw:internet:red";
gpios = <&pioC 13 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- heartbeat {
+ led-heartbeat {
label = "smartgw:heartbeat";
gpios = <&pioB 8 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
- pb18 {
+ led-pb18 {
status = "disabled";
};
- pd21 {
+ led-pd21 {
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/microchip/at91sam9g45.dtsi b/arch/arm/boot/dts/microchip/at91sam9g45.dtsi
index 325c63a53118..c54eb21d5cba 100644
--- a/arch/arm/boot/dts/microchip/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9g45.dtsi
@@ -190,7 +190,7 @@
pinctrl@fffff200 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "atmel,at91rm9200-pinctrl", "simple-bus";
+ compatible = "atmel,at91rm9200-pinctrl", "simple-mfd";
ranges = <0xfffff200 0xfffff200 0xa00>;
atmel,mux-mask = <
diff --git a/arch/arm/boot/dts/microchip/at91sam9n12.dtsi b/arch/arm/boot/dts/microchip/at91sam9n12.dtsi
index 8dc04e9031a6..844bd50943fc 100644
--- a/arch/arm/boot/dts/microchip/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9n12.dtsi
@@ -226,7 +226,7 @@
pinctrl@fffff400 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
+ compatible = "atmel,at91sam9x5-pinctrl", "simple-mfd";
ranges = <0xfffff400 0xfffff400 0x800>;
atmel,mux-mask = <
diff --git a/arch/arm/boot/dts/microchip/at91sam9n12ek.dts b/arch/arm/boot/dts/microchip/at91sam9n12ek.dts
index 4c644d4c6be7..643c3b2ab97e 100644
--- a/arch/arm/boot/dts/microchip/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/microchip/at91sam9n12ek.dts
@@ -207,19 +207,19 @@
leds {
compatible = "gpio-leds";
- d8 {
+ led-d8 {
label = "d8";
gpios = <&pioB 4 GPIO_ACTIVE_LOW>;
linux,default-trigger = "mmc0";
};
- d9 {
+ led-d9 {
label = "d9";
gpios = <&pioB 5 GPIO_ACTIVE_LOW>;
linux,default-trigger = "nand-disk";
};
- d10 {
+ led-d10 {
label = "d10";
gpios = <&pioB 6 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
diff --git a/arch/arm/boot/dts/microchip/at91sam9rl.dtsi b/arch/arm/boot/dts/microchip/at91sam9rl.dtsi
index 7436b5c862b1..1fec9fcc7cd1 100644
--- a/arch/arm/boot/dts/microchip/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9rl.dtsi
@@ -339,7 +339,7 @@
pinctrl@fffff400 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "atmel,at91rm9200-pinctrl", "simple-bus";
+ compatible = "atmel,at91rm9200-pinctrl", "simple-mfd";
ranges = <0xfffff400 0xfffff400 0x800>;
atmel,mux-mask =
diff --git a/arch/arm/boot/dts/microchip/at91sam9x5.dtsi b/arch/arm/boot/dts/microchip/at91sam9x5.dtsi
index a7456c2191fa..27c1f2861cc3 100644
--- a/arch/arm/boot/dts/microchip/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9x5.dtsi
@@ -202,7 +202,7 @@
pinctrl: pinctrl@fffff400 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
+ compatible = "atmel,at91sam9x5-pinctrl", "simple-mfd";
ranges = <0xfffff400 0xfffff400 0x800>;
/* shared pinctrl settings */
diff --git a/arch/arm/boot/dts/microchip/at91sam9x5cm.dtsi b/arch/arm/boot/dts/microchip/at91sam9x5cm.dtsi
index cdd37f67280b..fb3c19bdfcb6 100644
--- a/arch/arm/boot/dts/microchip/at91sam9x5cm.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9x5cm.dtsi
@@ -120,13 +120,13 @@
leds {
compatible = "gpio-leds";
- pb18 {
+ led-pb18 {
label = "pb18";
gpios = <&pioB 18 GPIO_ACTIVE_LOW>;
linux,default-trigger = "heartbeat";
};
- pd21 {
+ led-pd21 {
label = "pd21";
gpios = <&pioD 21 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/microchip/sam9x60.dtsi b/arch/arm/boot/dts/microchip/sam9x60.dtsi
index 291540e5d81e..04a6d716ecaf 100644
--- a/arch/arm/boot/dts/microchip/sam9x60.dtsi
+++ b/arch/arm/boot/dts/microchip/sam9x60.dtsi
@@ -215,6 +215,8 @@
compatible = "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <13 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 13>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -284,6 +286,8 @@
compatible = "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <14 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 14>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -394,6 +398,8 @@
compatible = "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <32 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 32>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -443,6 +449,8 @@
compatible = "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <33 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 33>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -600,6 +608,8 @@
compatible = "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <9 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 9>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -649,6 +659,8 @@
compatible = "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <10 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 10>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -698,6 +710,8 @@
compatible = "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <11 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 11>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -766,6 +780,8 @@
compatible = "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 5>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -834,6 +850,8 @@
compatible = "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <6 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 6>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -902,6 +920,8 @@
compatible = "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <7 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 7>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -970,6 +990,8 @@
compatible = "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <8 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 8>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -1074,6 +1096,8 @@
compatible = "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <15 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 15>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -1123,6 +1147,8 @@
compatible = "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <16 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 16>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -1223,7 +1249,7 @@
pinctrl: pinctrl@fffff400 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
+ compatible = "microchip,sam9x60-pinctrl", "simple-mfd";
ranges = <0xfffff400 0xfffff400 0x800>;
/* mux-mask corresponding to sam9x60 SoC in TFBGA228L package */
@@ -1236,7 +1262,7 @@
>;
pioA: gpio@fffff400 {
- compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
+ compatible = "microchip,sam9x60-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>;
#gpio-cells = <2>;
@@ -1247,7 +1273,7 @@
};
pioB: gpio@fffff600 {
- compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
+ compatible = "microchip,sam9x60-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff600 0x200>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH 1>;
#gpio-cells = <2>;
@@ -1259,7 +1285,7 @@
};
pioC: gpio@fffff800 {
- compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
+ compatible = "microchip,sam9x60-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff800 0x200>;
interrupts = <4 IRQ_TYPE_LEVEL_HIGH 1>;
#gpio-cells = <2>;
@@ -1270,7 +1296,7 @@
};
pioD: gpio@fffffa00 {
- compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
+ compatible = "microchip,sam9x60-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffffa00 0x200>;
interrupts = <44 IRQ_TYPE_LEVEL_HIGH 1>;
#gpio-cells = <2>;
@@ -1312,7 +1338,7 @@
compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
reg = <0xfffffe20 0x20>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
- clocks = <&clk32k 0>;
+ clocks = <&clk32k 1>;
};
pit: timer@fffffe40 {
@@ -1338,7 +1364,7 @@
compatible = "microchip,sam9x60-rtc", "atmel,at91sam9x5-rtc";
reg = <0xfffffea8 0x100>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
- clocks = <&clk32k 0>;
+ clocks = <&clk32k 1>;
};
watchdog: watchdog@ffffff80 {
diff --git a/arch/arm/boot/dts/microchip/sama5d3.dtsi b/arch/arm/boot/dts/microchip/sama5d3.dtsi
index d4fc0c1dfc10..39865133aa56 100644
--- a/arch/arm/boot/dts/microchip/sama5d3.dtsi
+++ b/arch/arm/boot/dts/microchip/sama5d3.dtsi
@@ -493,7 +493,7 @@
pinctrl: pinctrl@fffff200 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus";
+ compatible = "atmel,sama5d3-pinctrl", "simple-mfd";
ranges = <0xfffff200 0xfffff200 0xa00>;
atmel,mux-mask = <
/* A B C */
diff --git a/arch/arm/boot/dts/microchip/sama5d4.dtsi b/arch/arm/boot/dts/microchip/sama5d4.dtsi
index 58ceed997889..b253ba33fc38 100644
--- a/arch/arm/boot/dts/microchip/sama5d4.dtsi
+++ b/arch/arm/boot/dts/microchip/sama5d4.dtsi
@@ -791,7 +791,7 @@
pinctrl: pinctrl@fc06a000 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus";
+ compatible = "atmel,sama5d3-pinctrl", "simple-mfd";
ranges = <0xfc068000 0xfc068000 0x100
0xfc06a000 0xfc06a000 0x4000>;
/* WARNING: revisit as pin spec has changed */
diff --git a/arch/arm/boot/dts/microchip/sama7g5.dtsi b/arch/arm/boot/dts/microchip/sama7g5.dtsi
index 75778be126a3..17bcdcf0cf4a 100644
--- a/arch/arm/boot/dts/microchip/sama7g5.dtsi
+++ b/arch/arm/boot/dts/microchip/sama7g5.dtsi
@@ -272,7 +272,7 @@
compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
reg = <0xe001d020 0x30>;
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk32k 0>;
+ clocks = <&clk32k 1>;
};
clk32k: clock-controller@e001d050 {
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts b/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts
index 1f07ba382910..886a87dfcd0d 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts
@@ -531,8 +531,8 @@
reg = <4>;
// INLET1_T
- lm75@5c {
- compatible = "ti,lm75";
+ temperature-sensor@5c {
+ compatible = "national,lm75";
reg = <0x5c>;
};
};
@@ -543,8 +543,8 @@
reg = <5>;
// OUTLET1_T
- lm75@5c {
- compatible = "ti,lm75";
+ temperature-sensor@5c {
+ compatible = "national,lm75";
reg = <0x5c>;
};
};
@@ -555,8 +555,8 @@
reg = <6>;
// OUTLET2_T
- lm75@5c {
- compatible = "ti,lm75";
+ temperature-sensor@5c {
+ compatible = "national,lm75";
reg = <0x5c>;
};
};
@@ -567,8 +567,8 @@
reg = <7>;
// OUTLET3_T
- lm75@5c {
- compatible = "ti,lm75";
+ temperature-sensor@5c {
+ compatible = "national,lm75";
reg = <0x5c>;
};
};
@@ -697,8 +697,8 @@
reg = <3>;
// M2_ZONE_T
- lm75@28 {
- compatible = "ti,lm75";
+ temperature-sensor@28 {
+ compatible = "national,lm75";
reg = <0x28>;
};
};
@@ -709,8 +709,8 @@
reg = <4>;
// BATT_ZONE_T
- lm75@29 {
- compatible = "ti,lm75";
+ temperature-sensor@29 {
+ compatible = "national,lm75";
reg = <0x29>;
};
};
@@ -721,8 +721,8 @@
reg = <5>;
// NBM1_ZONE_T
- lm75@28 {
- compatible = "ti,lm75";
+ temperature-sensor@28 {
+ compatible = "national,lm75";
reg = <0x28>;
};
};
@@ -732,8 +732,8 @@
reg = <6>;
// NBM2_ZONE_T
- lm75@29 {
- compatible = "ti,lm75";
+ temperature-sensor@29 {
+ compatible = "national,lm75";
reg = <0x29>;
};
};
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-evb.dts b/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-evb.dts
index f53d45fa1de8..bcdcb30c7bf6 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-evb.dts
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-evb.dts
@@ -198,7 +198,7 @@
clock-frequency = <100000>;
status = "okay";
lm75@48 {
- compatible = "lm75";
+ compatible = "national,lm75";
reg = <0x48>;
status = "okay";
};
@@ -208,8 +208,8 @@
&i2c1 {
clock-frequency = <100000>;
status = "okay";
- lm75@48 {
- compatible = "lm75";
+ temperature-sensor@48 {
+ compatible = "national,lm75";
reg = <0x48>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts b/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts
index b78c116cbc18..edb907f740bf 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts
@@ -34,7 +34,7 @@
pinctrl-names = "default";
pinctrl-0 = <&key_pins>;
- uid {
+ button-uid {
label = "UID button";
linux,code = <KEY_HOME>;
gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
@@ -46,12 +46,12 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins>;
- uid {
+ led-uid {
label = "UID";
gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
};
- heartbeat {
+ led-heartbeat {
label = "heartbeat";
gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/nvidia/tegra114-asus-tf701t.dts b/arch/arm/boot/dts/nvidia/tegra114-asus-tf701t.dts
index 763ab812eb87..f02e2cf65fe8 100644
--- a/arch/arm/boot/dts/nvidia/tegra114-asus-tf701t.dts
+++ b/arch/arm/boot/dts/nvidia/tegra114-asus-tf701t.dts
@@ -57,10 +57,24 @@
};
host1x@50000000 {
+ hdmi@54280000 {
+ status = "okay";
+
+ hdmi-supply = <&hdmi_5v0_sys>;
+ pll-supply = <&avdd_hdmi_pll>;
+ vdd-supply = <&avdd_hdmi>;
+
+ port {
+ hdmi_out: endpoint {
+ remote-endpoint = <&connector_in>;
+ };
+ };
+ };
+
dsi@54300000 {
status = "okay";
- avdd-dsi-csi-supply = <&tps65913_ldo2>;
+ avdd-dsi-csi-supply = <&avdd_dsi_csi>;
nvidia,ganged-mode = <&dsib>;
@@ -70,7 +84,7 @@
link2 = <&panel_secondary>;
- power-supply = <&vdd_lcd>;
+ power-supply = <&dvdd_1v8_lcd>;
backlight = <&backlight>;
};
};
@@ -78,7 +92,7 @@
dsi@54400000 {
status = "okay";
- avdd-dsi-csi-supply = <&tps65913_ldo2>;
+ avdd-dsi-csi-supply = <&avdd_dsi_csi>;
panel_secondary: panel@0 {
compatible = "sharp,lq101r1sx01";
@@ -87,66 +101,377 @@
};
};
+ vde@6001a000 {
+ assigned-clocks = <&tegra_car TEGRA114_CLK_VDE>;
+ assigned-clock-parents = <&tegra_car TEGRA114_CLK_PLL_P>;
+ assigned-clock-rates = <408000000>;
+ };
+
pinmux@70000868 {
- asus_pad_ec_default: pinmux-asus-pad-ec-default {
- ec-interrupt {
- nvidia,pins = "kb_col5_pq5";
- nvidia,function = "kbc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&state_default>;
+
+ state_default: pinmux {
+ /* WLAN SDIO pinmux */
+ sdmmc1-clk {
+ nvidia,pins = "sdmmc1_clk_pz0";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ sdmmc1-cmd {
+ nvidia,pins = "sdmmc1_cmd_pz1",
+ "sdmmc1_dat0_py7",
+ "sdmmc1_dat1_py6",
+ "sdmmc1_dat2_py5",
+ "sdmmc1_dat3_py4";
+ nvidia,function = "sdmmc1";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- ec-request {
- nvidia,pins = "kb_col2_pq2";
- nvidia,function = "kbc";
+ wlan-power {
+ nvidia,pins = "clk2_req_pcc5";
+ nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- };
- backlight_default: pinmux-backlight-default {
- backlight-enable {
- nvidia,pins = "gmi_ad10_ph2";
+ wlan-reset {
+ nvidia,pins = "gpio_x7_aud_px7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ wlan-host-wake {
+ nvidia,pins = "pu5";
+ nvidia,function = "pwm2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ wlan-3v3-com {
+ nvidia,pins = "pu1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* UART-A pinmux */
+ uarta-cts {
+ nvidia,pins = "kb_row10_ps2";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ uarta-rts {
+ nvidia,pins = "kb_row9_ps1";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* GNSS UART-B pinmux */
+ uartb-cts {
+ nvidia,pins = "uart2_cts_n_pj5";
+ nvidia,function = "uartb";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ uartb-rts {
+ nvidia,pins = "uart2_rts_n_pj6";
+ nvidia,function = "uartb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ uartb-rxd {
+ nvidia,pins = "uart2_rxd_pc3";
+ nvidia,function = "irda";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ uartb-txd {
+ nvidia,pins = "uart2_txd_pc2";
+ nvidia,function = "irda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Bluetooth UART-C pinmux */
+ uartc-cts-rxd {
+ nvidia,pins = "uart3_cts_n_pa1",
+ "uart3_rxd_pw7";
+ nvidia,function = "uartc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ uartc-rts-txd {
+ nvidia,pins = "uart3_rts_n_pc0",
+ "uart3_txd_pw6";
+ nvidia,function = "uartc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ bt-shutdown {
+ nvidia,pins = "kb_col6_pq6",
+ "kb_col7_pq7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ bt-dev-wake {
+ nvidia,pins = "clk3_req_pee1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ bt-host-wake {
+ nvidia,pins = "pu6";
+ nvidia,function = "pwm3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ bt-pcm-dap4-out {
+ nvidia,pins = "dap4_fs_pp4",
+ "dap4_dout_pp6",
+ "dap4_sclk_pp7";
+ nvidia,function = "i2s3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ bt-pcm-dap4-in {
+ nvidia,pins = "dap4_din_pp5";
+ nvidia,function = "i2s3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* UART-D pinmux */
+ uartd-cts {
+ nvidia,pins = "gmi_a17_pb0";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ uartd-rts {
+ nvidia,pins = "gmi_a16_pj7",
+ "gmi_a19_pk7";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* MicroSD pinmux */
+ sdmmc3-clk {
+ nvidia,pins = "sdmmc3_clk_pa6";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ sdmmc3-data {
+ nvidia,pins = "sdmmc3_cmd_pa7",
+ "sdmmc3_dat0_pb7",
+ "sdmmc3_dat1_pb6",
+ "sdmmc3_dat2_pb5",
+ "sdmmc3_dat3_pb4",
+ "kb_col4_pq4",
+ "sdmmc3_cd_n_pv2",
+ "sdmmc3_clk_lb_out_pee4",
+ "sdmmc3_clk_lb_in_pee5";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ microsd-pwr {
+ nvidia,pins = "gmi_clk_pk1";
nvidia,function = "gmi";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- };
- codec_default: pinmux-codec-default {
- interrupt {
- nvidia,pins = "gpio_w2_aud_pw2",
- "gpio_w3_aud_pw3";
- nvidia,function = "spi6";
+ /* EMMC pinmux */
+ sdmmc4-clk-cmd {
+ nvidia,pins = "sdmmc4_clk_pcc4";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ sdmmc4-data {
+ nvidia,pins = "sdmmc4_cmd_pt7",
+ "sdmmc4_dat0_paa0",
+ "sdmmc4_dat1_paa1",
+ "sdmmc4_dat2_paa2",
+ "sdmmc4_dat3_paa3",
+ "sdmmc4_dat4_paa4",
+ "sdmmc4_dat5_paa5",
+ "sdmmc4_dat6_paa6",
+ "sdmmc4_dat7_paa7";
+ nvidia,function = "sdmmc4";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- ldo1-en {
- nvidia,pins = "sdmmc1_wp_n_pv3";
- nvidia,function = "sdmmc1";
+ /* I2C pinmux */
+ gen1-i2c {
+ nvidia,pins = "gen1_i2c_scl_pc4",
+ "gen1_i2c_sda_pc5";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ nvidia,lock = <TEGRA_PIN_DISABLE>;
+ };
+
+ gen2-i2c {
+ nvidia,pins = "gen2_i2c_scl_pt5",
+ "gen2_i2c_sda_pt6";
+ nvidia,function = "i2c2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ nvidia,lock = <TEGRA_PIN_DISABLE>;
+ };
+
+ cam-i2c {
+ nvidia,pins = "cam_i2c_scl_pbb1",
+ "cam_i2c_sda_pbb2";
+ nvidia,function = "i2c3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ nvidia,lock = <TEGRA_PIN_DISABLE>;
+ };
+
+ ddc-i2c {
+ nvidia,pins = "ddc_scl_pv4",
+ "ddc_sda_pv5";
+ nvidia,function = "i2c4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,lock = <TEGRA_PIN_DISABLE>;
+ };
+
+ pwr-i2c {
+ nvidia,pins = "pwr_i2c_scl_pz6",
+ "pwr_i2c_sda_pz7";
+ nvidia,function = "i2cpwr";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ nvidia,lock = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* SPI pinmux */
+ spi1-out {
+ nvidia,pins = "ulpi_clk_py0",
+ "ulpi_nxt_py2",
+ "ulpi_stp_py3";
+ nvidia,function = "spi1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- };
- gpio_hall_sensor_default: pinmux-gpio-hall-sensor-default {
- ulpi_data4_po5 {
+ spi1-in {
+ nvidia,pins = "ulpi_dir_py1";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ spi2 {
+ nvidia,pins = "ulpi_data4_po5",
+ "ulpi_data7_po0";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ spi4-out {
+ nvidia,pins = "gmi_ad6_pg6",
+ "gmi_wr_n_pi0";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ spi4-in {
+ nvidia,pins = "gmi_ad5_pg5",
+ "gmi_ad7_pg7";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* GPIO keys pinmux */
+ hall-switch {
nvidia,pins = "ulpi_data4_po5";
nvidia,function = "spi2";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- };
- gpio_keys_default: pinmux-gpio-keys-default {
- power {
+ lineout-switch {
+ nvidia,pins = "gpio_x5_aud_px5";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ power-key {
nvidia,pins = "kb_col0_pq0";
nvidia,function = "kbc";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
@@ -154,111 +479,722 @@
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- volume {
+ volume-keys {
nvidia,pins = "kb_row1_pr1",
- "kb_row2_pr2";
+ "kb_row2_pr2";
nvidia,function = "rsvd2";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- };
- hp_det_default: pinmux-hp-det-default {
- gmi_iordy_pi5 {
- nvidia,pins = "kb_row7_pr7";
- nvidia,function = "rsvd2";
+ /* Sensors pinmux */
+ nct-irq {
+ nvidia,pins = "ulpi_data3_po4";
+ nvidia,function = "ulpi";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- };
- imu_default: pinmux-imu-default {
- kb_row3_pr3 {
+ mpu-irq {
nvidia,pins = "kb_row3_pr3";
nvidia,function = "rsvd3";
nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- };
- pwm_default: pinmux-pwm-default {
- gmi_ad9_ph1 {
+ /* HDMI pinmux */
+ hdmi-hpd {
+ nvidia,pins = "hdmi_int_pn7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ hdmi-en {
+ nvidia,pins = "dap3_dout_pp2";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ hdmi-cec {
+ nvidia,pins = "hdmi_cec_pee3";
+ nvidia,function = "cec";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* LED pinmux */
+ backlight-pwm {
nvidia,pins = "gmi_ad9_ph1";
nvidia,function = "pwm1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- };
- /* XXX make this something more sensible */
- pwm_sleep: pinmux-pwm-sleep {
- gmi_ad9_ph1 {
- nvidia,pins = "gmi_ad9_ph1";
+ backlight-en {
+ nvidia,pins = "gmi_ad10_ph2";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Touchscreen pinmux */
+ touch-irq {
+ nvidia,pins = "gmi_cs4_n_pk2";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ touch-rst {
+ nvidia,pins = "gmi_cs3_n_pk4";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ touch-pwr {
+ nvidia,pins = "gmi_ad8_ph0";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ touch-vio {
+ nvidia,pins = "gmi_ad12_ph4";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* AUDIO pinmux */
+ audio-ldo1 {
+ nvidia,pins = "sdmmc1_wp_n_pv3";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ hp-detect {
+ nvidia,pins = "kb_row7_pr7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ dap-i2s0-in {
+ nvidia,pins = "dap1_din_pn1";
+ nvidia,function = "i2s0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ dap-i2s0-out {
+ nvidia,pins = "dap1_dout_pn2",
+ "dap1_fs_pn0",
+ "dap1_sclk_pn3";
+ nvidia,function = "i2s0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ dap-i2s1-in {
+ nvidia,pins = "dap2_din_pa4";
+ nvidia,function = "i2s1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ dap-i2s1-out {
+ nvidia,pins = "dap2_dout_pa5",
+ "dap2_fs_pa2",
+ "dap2_sclk_pa3";
+ nvidia,function = "i2s1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ dap-i2s2-in {
+ nvidia,pins = "dap3_fs_pp0",
+ "dap3_sclk_pp3";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ dap-i2s2-out {
+ nvidia,pins = "dap3_din_pp1";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ spdif-in {
+ nvidia,pins = "spdif_in_pk6";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ spdif-out {
+ nvidia,pins = "spdif_out_pk5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* AsusEC pinmux */
+ ec-irq {
+ nvidia,pins = "kb_col5_pq5";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ ec-req {
+ nvidia,pins = "kb_col2_pq2";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ hotplug-i2c {
+ nvidia,pins = "ulpi_data7_po0";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ ps2-irq {
+ nvidia,pins = "gpio_w2_aud_pw2";
+ nvidia,function = "spi6";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ kbd-irq {
+ nvidia,pins = "gmi_cs0_n_pj0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ dvfs-pin {
+ nvidia,pins = "dvfs_pwm_px0",
+ "dvfs_clk_px2";
+ nvidia,function = "cldvfs";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Core pinmux */
+ clk-32k-out {
+ nvidia,pins = "clk_32k_out_pa0";
+ nvidia,function = "soc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ sys-clk-req {
+ nvidia,pins = "sys_clk_req_pz5";
+ nvidia,function = "sysclk";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ core-pwr-req {
+ nvidia,pins = "core_pwr_req";
+ nvidia,function = "pwron";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ cpu-pwr-req {
+ nvidia,pins = "cpu_pwr_req";
+ nvidia,function = "cpu";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ pwr-int-n {
+ nvidia,pins = "pwr_int_n";
+ nvidia,function = "pmi";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ clk-32k-in {
+ nvidia,pins = "clk_32k_in";
+ nvidia,function = "clk";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ owr {
+ nvidia,pins = "owr";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ reset-out-n {
+ nvidia,pins = "reset_out_n";
+ nvidia,function = "reset_out_n";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* ULPI pinmux */
+ ulpi-data0-6 {
+ nvidia,pins = "ulpi_data0_po1",
+ "ulpi_data6_po7";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ ulpi-data1-5 {
+ nvidia,pins = "ulpi_data1_po2",
+ "ulpi_data5_po6";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ ulpi-data2-3 {
+ nvidia,pins = "ulpi_data2_po3",
+ "ulpi_data3_po4";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* PORT V */
+ pv0-gpio {
+ nvidia,pins = "pv0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ pv1-gpio {
+ nvidia,pins = "pv1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* PORT U */
+ pu0-gpio {
+ nvidia,pins = "pu0";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ pu2-gpio {
+ nvidia,pins = "pu2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* PWM pinmux */
+ pwm0 {
+ nvidia,pins = "pu3";
+ nvidia,function = "pwm0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ pwm1 {
+ nvidia,pins = "pu4";
nvidia,function = "pwm1";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- };
- sdmmc3_default: pinmux-sdmmc3-default {
- drive_sdio3 {
- nvidia,pins = "drive_sdio3";
- nvidia,high-speed-mode = <TEGRA_PIN_ENABLE>;
- nvidia,schmitt = <TEGRA_PIN_DISABLE>;
- nvidia,pull-down-strength = <22>;
- nvidia,pull-up-strength = <36>;
- nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_FASTEST>;
- nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ /* EXTPERIPH pinmux */
+ clk1-out {
+ nvidia,pins = "clk1_out_pw4";
+ nvidia,function = "extperiph1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- sdmmc3_clk_pa6 {
- nvidia,pins = "sdmmc3_clk_pa6";
- nvidia,function = "sdmmc3";
+ clk2-out {
+ nvidia,pins = "clk2_out_pw5";
+ nvidia,function = "extperiph2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ clk3-out {
+ nvidia,pins = "clk3_out_pee0";
+ nvidia,function = "extperiph3";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ clk1-req {
+ nvidia,pins = "clk1_req_pee2";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* GMI pinmux */
+ gmi-wp-n {
+ nvidia,pins = "gmi_wp_n_pc7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- sdmmc3_cmd_pa7 {
- nvidia,pins = "sdmmc3_cmd_pa7",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
- "kb_col4_pq4",
- "sdmmc3_clk_lb_out_pee4",
- "sdmmc3_clk_lb_in_pee5",
- "sdmmc3_cd_n_pv2";
- nvidia,function = "sdmmc3";
+ gmi-adv {
+ nvidia,pins = "gmi_adv_n_pk0";
+ nvidia,function = "rsvd1";
nvidia,pull = <TEGRA_PIN_PULL_UP>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- };
- sdmmc3_vdd_default: pinmux-sdmmc3-vdd-default {
- gmi_clk_pk1 {
- nvidia,pins = "gmi_clk_pk1";
+ gmi-ad0-ad1 {
+ nvidia,pins = "gmi_ad0_pg0",
+ "gmi_ad1_pg1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ gmi-ad2-ad3 {
+ nvidia,pins = "gmi_ad2_pg2",
+ "gmi_ad3_pg3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ gmi-iordy {
+ nvidia,pins = "gmi_iordy_pi5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ gmi-a18 {
+ nvidia,pins = "gmi_a18_pb1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ gmi-wait {
+ nvidia,pins = "gmi_wait_pi7";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ gmi-cs6-n {
+ nvidia,pins = "gmi_cs6_n_pi3";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ gmi-cs7-n {
+ nvidia,pins = "gmi_cs7_n_pi6";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ gmi-dqs-p {
+ nvidia,pins = "gmi_dqs_p_pj3";
+ nvidia,function = "nand";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ gmi-cs2-ad {
+ nvidia,pins = "gmi_cs2_n_pk3",
+ "gmi_ad14_ph6",
+ "gmi_ad15_ph7";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ gmi-cs4-clk {
+ nvidia,pins = "gmi_cs4_n_pk2",
+ "gmi_clk_lb";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ gmi-ad11 {
+ nvidia,pins = "gmi_ad11_ph3";
nvidia,function = "gmi";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
- };
- vdd_lcd_default: pinmux-vdd-lcd-default {
- sdmmc4_clk_pcc4 {
- nvidia,pins = "sdmmc4_clk_pcc4";
- nvidia,function = "sdmmc4";
+ gmi-cs1-oe {
+ nvidia,pins = "gmi_cs1_n_pj2",
+ "gmi_oe_n_pi1";
+ nvidia,function = "soc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ gmi-ad4 {
+ nvidia,pins = "gmi_ad4_pg4";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ gmi-ad13 {
+ nvidia,pins = "gmi_ad13_ph5";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ gmi-rst-n {
+ nvidia,pins = "gmi_rst_n_pi4";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* PORT CC */
+ pcc-gpio {
+ nvidia,pins = "pcc1", "pcc2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* PORT BB */
+ pbb3-gpio {
+ nvidia,pins = "pbb3";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ pbb4-5-6-gpio {
+ nvidia,pins = "pbb4", "pbb5", "pbb6";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ pbb7-gpio {
+ nvidia,pins = "pbb7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* KBC pinmux */
+ kb-r0-c1 {
+ nvidia,pins = "kb_row0_pr0",
+ "kb_col1_pq1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ kb-row4 {
+ nvidia,pins = "kb_row4_pr4";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ kb-row5 {
+ nvidia,pins = "kb_row5_pr5";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ kb-row6 {
+ nvidia,pins = "kb_row6_pr6";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ kb-r8-c3 {
+ nvidia,pins = "kb_row8_ps0",
+ "kb_col3_pq3";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* VI pinmux */
+ cam-mclk {
+ nvidia,pins = "cam_mclk_pcc0",
+ "pbb0";
+ nvidia,function = "vi_alt3";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
+
+ /* AUD pinmux */
+ gpio-x4-aud {
+ nvidia,pins = "gpio_x4_aud_px4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ gpio-x1-aud {
+ nvidia,pins = "gpio_x1_aud_px1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ gpio-x3-aud {
+ nvidia,pins = "gpio_x3_aud_px3";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ gpio-x6-aud {
+ nvidia,pins = "gpio_x6_aud_px6";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ usb-vbus {
+ nvidia,pins = "usb_vbus_en0_pn4",
+ "usb_vbus_en1_pn5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* GPIO power/drive control */
+ drive-sdio1 {
+ nvidia,pins = "drive_sdio1";
+ nvidia,high-speed-mode = <TEGRA_PIN_ENABLE>;
+ nvidia,schmitt = <TEGRA_PIN_DISABLE>;
+ nvidia,pull-down-strength = <36>;
+ nvidia,pull-up-strength = <20>;
+ nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_SLOW>;
+ nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_SLOW>;
+ };
+
+ drive-sdio3 {
+ nvidia,pins = "drive_sdio3";
+ nvidia,high-speed-mode = <TEGRA_PIN_ENABLE>;
+ nvidia,schmitt = <TEGRA_PIN_DISABLE>;
+ nvidia,pull-down-strength = <22>;
+ nvidia,pull-up-strength = <36>;
+ nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ };
+
+ drive-gma {
+ nvidia,pins = "drive_gma";
+ nvidia,high-speed-mode = <TEGRA_PIN_ENABLE>;
+ nvidia,schmitt = <TEGRA_PIN_DISABLE>;
+ nvidia,pull-down-strength = <2>;
+ nvidia,pull-up-strength = <2>;
+ nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ };
};
};
@@ -267,7 +1203,33 @@
};
serial@70006200 {
- /* Bluetooth */
+ compatible = "nvidia,tegra114-hsuart", "nvidia,tegra30-hsuart";
+ reset-names = "serial";
+ /delete-property/ reg-shift;
+ status = "okay";
+
+ nvidia,adjust-baud-rates = <0 9600 100>,
+ <9600 115200 200>,
+ <1000000 4000000 136>;
+
+ bluetooth {
+ compatible = "brcm,bcm4334-bt";
+ max-speed = <4000000>;
+
+ clocks = <&tegra_pmc TEGRA_PMC_CLK_BLINK>;
+ clock-names = "txco";
+
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(U, 6) IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "host-wakeup";
+
+ device-wakeup-gpios = <&gpio TEGRA_GPIO(EE, 1) GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio TEGRA_GPIO(Q, 6) GPIO_ACTIVE_LOW>;
+
+ vbat-supply = <&vdd_3v3_com>;
+ vddio-supply = <&vdd_1v8_vio>;
+ };
};
serial@70006300 {
@@ -278,10 +1240,6 @@
pwm@7000a000 {
status = "okay";
-
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&pwm_default>;
- pinctrl-1 = <&pwm_sleep>;
};
i2c@7000c000 {
@@ -292,27 +1250,35 @@
compatible = "asahi-kasei,ak09911";
reg = <0xc>;
- vdd-supply = <&vdd_3v3_sys>;
+ /* no DRDY (polling) */
+
+ vdd-supply = <&vdd_2v85_sen>;
+ vid-supply = <&vdd_1v8_vio>;
+
+ mount-matrix = "0", "1", "0",
+ "1", "0", "0",
+ "0", "0","-1";
};
rt5639: audio-codec@1c {
compatible = "realtek,rt5639";
reg = <0x1c>;
- interrupt-parent = <&gpio>;
- interrupts = <TEGRA_GPIO(W, 3) IRQ_TYPE_EDGE_FALLING>;
-
- realtek,ldo1-en-gpios = <&gpio TEGRA_GPIO(V, 3) GPIO_ACTIVE_HIGH>;
+ realtek,ldo1-en-gpios =
+ <&gpio TEGRA_GPIO(V, 3) GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&codec_default>;
+ clocks = <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
+ clock-names = "mclk";
};
temp_sensor: temperature-sensor@4c {
compatible = "onnn,nct1008";
reg = <0x4c>;
- vcc-supply = <&vdd_3v3_sys>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(O, 4) IRQ_TYPE_EDGE_FALLING>;
+
+ vcc-supply = <&vdd_1v8_vio>;
#thermal-sensor-cells = <1>;
};
@@ -323,12 +1289,12 @@
interrupt-parent = <&gpio>;
interrupts = <TEGRA_GPIO(R, 3) IRQ_TYPE_LEVEL_HIGH>;
+ vdd-supply = <&vdd_2v85_sen>;
+ vddio-supply = <&vdd_1v8_vio>;
+
mount-matrix = "0", "-1", "0",
"1", "0", "0",
"0", "0", "1";
-
- pinctrl-names = "default";
- pinctrl-0 = <&imu_default>;
};
};
@@ -339,6 +1305,8 @@
power-sensor@44 {
compatible = "ti,ina230";
reg = <0x44>;
+
+ shunt-resistor = <5000>;
};
};
@@ -350,12 +1318,13 @@
compatible = "dynaimage,al3320a";
reg = <0x1c>;
- vdd-supply = <&vdd_3v3_sys>;
+ vdd-supply = <&vdd_1v8_vio>;
};
};
- i2c@7000c700 {
- /* HDMI DDC */
+ hdmi_ddc: i2c@7000c700 {
+ status = "okay";
+ clock-frequency = <10000>;
};
i2c@7000d000 {
@@ -372,12 +1341,36 @@
ti,system-power-controller;
+ palmas_gpadc: adc {
+ compatible = "ti,palmas-gpadc";
+ interrupts = <18 IRQ_TYPE_NONE>,
+ <16 IRQ_TYPE_NONE>,
+ <17 IRQ_TYPE_NONE>;
+
+ ti,channel0-current-microamp = <5>;
+ ti,channel3-current-microamp = <400>;
+ ti,enable-extended-delay;
+
+ #io-channel-cells = <1>;
+ };
+
+ palmas_extcon: extcon {
+ compatible = "ti,palmas-usb-vid";
+ ti,enable-vbus-detection;
+ ti,enable-id-detection;
+ };
+
palmas_gpio: gpio {
compatible = "ti,palmas-gpio";
gpio-controller;
#gpio-cells = <2>;
};
+ palmas_clk32kg@0 {
+ compatible = "ti,palmas-clk32kg";
+ #clock-cells = <0>;
+ };
+
pinmux {
compatible = "ti,tps65913-pinctrl";
ti,palmas-enable-dvfs1;
@@ -441,17 +1434,18 @@
pmic {
compatible = "ti,tps65913-pmic", "ti,palmas-pmic";
- ldo1-in-supply = <&tps65913_smps7>;
- ldo2-in-supply = <&tps65913_smps7>;
- ldo4-in-supply = <&tps65913_smps8>;
- ldo5-in-supply = <&tps65913_smps9>;
- ldo6-in-supply = <&tps65913_smps9>;
- ldo7-in-supply = <&tps65913_smps9>;
- ldo9-in-supply = <&tps65913_smps9>;
+ ldo1-in-supply = <&vddio_ddr>;
+ ldo2-in-supply = <&vddio_ddr>;
+ ldo4-in-supply = <&vdd_1v8_vio>;
+ ldo5-in-supply = <&vcore_emmc>;
+ ldo6-in-supply = <&vcore_emmc>;
+ ldo7-in-supply = <&vcore_emmc>;
+ ldo9-in-supply = <&vcore_emmc>;
+ ldoln-in-supply = <&vdd_smps10_out2>;
regulators {
- tps65913_smps123: smps123 {
- regulator-name = "vdd-cpu";
+ vdd_cpu: smps123 {
+ regulator-name = "vdd_cpu";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1350000>;
regulator-always-on;
@@ -460,8 +1454,8 @@
ti,mode-sleep = <3>;
};
- tps65913_smps45: smps45 {
- regulator-name = "vdd-core";
+ vdd_core: smps45 {
+ regulator-name = "vdd_core";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1400000>;
regulator-always-on;
@@ -469,101 +1463,95 @@
ti,roof-floor = <3>;
};
- smps6 {
- regulator-name = "va-lcd-hv";
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1000000>;
- regulator-always-on;
- regulator-boot-on;
- };
+ /* smps6 disabled */
- tps65913_smps7: smps7 {
- regulator-name = "vdd-ddr";
+ vddio_ddr: smps7 {
+ regulator-name = "vddio_ddr";
regulator-min-microvolt = <1350000>;
regulator-max-microvolt = <1350000>;
regulator-always-on;
regulator-boot-on;
};
- tps65913_smps8: smps8 {
- regulator-name = "vdd-1v8";
+ vdd_1v8_vio: smps8 {
+ regulator-name = "vdd_1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
regulator-boot-on;
};
- tps65913_smps9: smps9 {
- regulator-name = "vdd-sd";
+ vcore_emmc: smps9 {
+ regulator-name = "vdd_emmc";
regulator-min-microvolt = <2900000>;
regulator-max-microvolt = <2900000>;
- regulator-always-on;
+ regulator-boot-on;
};
- tps65913_smps10_out1: smps10_out1 {
- regulator-name = "vd-smps10-out1";
+ smps10_out1 {
+ regulator-name = "vd_smps10_out1";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
regulator-always-on;
regulator-boot-on;
};
- tps65913_smps10_out2: smps10_out2 {
- regulator-name = "vd-smps10-out2";
+ vdd_smps10_out2: smps10_out2 {
+ regulator-name = "vd_smps10_out2";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
regulator-always-on;
regulator-boot-on;
};
- tps65913_ldo1: ldo1 {
- regulator-name = "vdd-hdmi-pll";
+ avdd_hdmi_pll: ldo1 {
+ regulator-name = "avdd_hdmi_pll";
regulator-min-microvolt = <1050000>;
regulator-max-microvolt = <1050000>;
regulator-always-on;
+ regulator-boot-on;
ti,roof-floor = <3>;
};
- tps65913_ldo2: ldo2 {
- regulator-name = "vdd-2v8-dsi-csi";
+ avdd_dsi_csi: ldo2 {
+ regulator-name = "avdd_dsi_csi";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-boot-on;
};
ldo3 {
- regulator-name = "vpp-fuse";
+ regulator-name = "vpp_fuse";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
- ldo4 {
- regulator-name = "vdd-1v2-cam";
+ vdd_1v2_cam: ldo4 {
+ regulator-name = "vdd_1v2_cam";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
};
- ldo5 {
- regulator-name = "vdd-cam";
+ avdd_2v8_cam: ldo5 {
+ regulator-name = "avdd_cam2";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
};
- ldo6 {
- regulator-name = "vdd-dev";
+ vdd_2v85_sen: ldo6 {
+ regulator-name = "vdd_dev";
regulator-min-microvolt = <2850000>;
regulator-max-microvolt = <2850000>;
- regulator-boot-on;
};
- ldo7 {
- regulator-name = "vdd-2v8-cam";
+ avdd_2v8_af: ldo7 {
+ regulator-name = "avdd_2v8_cam";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
};
- tps65913_ldo8: ldo8 {
- regulator-name = "vdd-rtc";
+ ldo8 {
+ regulator-name = "vdd_rtc";
regulator-min-microvolt = <950000>;
regulator-max-microvolt = <950000>;
regulator-always-on;
@@ -571,23 +1559,24 @@
ti,enable-ldo8-tracking;
};
- tps65913_ldo9: ldo9 {
- regulator-name = "vdd-sdmmc";
- regulator-min-microvolt = <1800000>;
+ vddio_usd: ldo9 {
+ regulator-name = "vddio_usd";
+ /* min voltage of 1.8v is not stable */
+ regulator-min-microvolt = <2900000>;
regulator-max-microvolt = <2900000>;
};
- tps65913_ldoln: ldoln {
- regulator-name = "vdd-hdmi";
+ avdd_hdmi: ldoln {
+ regulator-name = "avdd_hdmi";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
};
- ldousb {
- regulator-name = "vdd-usb";
+ avdd_usb: ldousb {
+ regulator-name = "avdd_usb";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- regulator-always-on;
regulator-boot-on;
};
};
@@ -596,19 +1585,89 @@
rtc {
compatible = "ti,palmas-rtc";
interrupt-parent = <&palmas>;
- interrupts = <8 0>;
+ interrupts = <8 IRQ_TYPE_NONE>;
};
};
};
+ pmc@7000e400 {
+ status = "okay";
+ nvidia,suspend-mode = <2>;
+ nvidia,cpu-pwr-good-time = <300>;
+ nvidia,cpu-pwr-off-time = <300>;
+ nvidia,core-pwr-good-time = <641 3845>;
+ nvidia,core-pwr-off-time = <2000>;
+ nvidia,core-power-req-active-high;
+ nvidia,sys-clock-req-active-high;
+
+ /* Clear DEV_ON bit in DEV_CTRL register of TPS65913 PMIC */
+ i2c-thermtrip {
+ nvidia,i2c-controller-id = <4>;
+ nvidia,bus-addr = <0x58>;
+ nvidia,reg-addr = <0xA0>;
+ nvidia,reg-data = <0x00>;
+ };
+ };
+
ahub@70080000 {
- i2s@70080300 {
+ /* HIFI CODEC (i2s1) */
+ i2s@70080400 {
status = "okay";
};
+
+ /* BT SCO (i2s3) */
+ i2s@70080600 {
+ status = "okay";
+ };
+ };
+
+ brcm_wifi_pwrseq: pwrseq-wifi {
+ compatible = "mmc-pwrseq-simple";
+
+ clocks = <&tegra_pmc TEGRA_PMC_CLK_BLINK>;
+ clock-names = "ext_clock";
+
+ reset-gpios = <&gpio TEGRA_GPIO(X, 7) GPIO_ACTIVE_LOW>;
+ post-power-on-delay-ms = <300>;
+ power-off-delay-us = <300>;
};
+ /* WiFi */
mmc@78000000 {
- /* WiFi */
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ assigned-clocks = <&tegra_car TEGRA114_CLK_SDMMC1>;
+ assigned-clock-parents = <&tegra_car TEGRA114_CLK_PLL_P>;
+ assigned-clock-rates = <82000000>;
+
+ max-frequency = <82000000>;
+ keep-power-in-suspend;
+ bus-width = <4>;
+ non-removable;
+
+ sd-uhs-ddr50;
+ mmc-ddr-1_8v;
+
+ power-gpios = <&gpio TEGRA_GPIO(CC, 5) GPIO_ACTIVE_HIGH>;
+
+ nvidia,default-tap = <0x2>;
+ nvidia,default-trim = <0x2>;
+
+ mmc-pwrseq = <&brcm_wifi_pwrseq>;
+ vmmc-supply = <&vdd_3v3_com>;
+ vqmmc-supply = <&vdd_1v8_vio>;
+
+ wifi@1 {
+ compatible = "brcm,bcm4329-fmac";
+ reg = <1>;
+
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(U, 5) IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ };
};
/* MicroSD card */
@@ -621,33 +1680,38 @@
nvidia,default-tap = <0x3>;
nvidia,default-trim = <0x3>;
- vmmc-supply = <&vdd_usd>;
- vqmmc-supply = <&tps65913_ldo9>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&sdmmc3_default>;
+ vmmc-supply = <&vdd_2v9_usd>;
+ vqmmc-supply = <&vddio_usd>;
};
+ /* eMMC */
mmc@78000600 {
- /* eMMC */
+ status = "okay";
+ bus-width = <8>;
+
+ non-removable;
+ mmc-ddr-1_8v;
+
+ vmmc-supply = <&vcore_emmc>;
+ vqmmc-supply = <&vdd_1v8_vio>;
};
+ /* Peripheral USB via ASUS connector */
usb@7d000000 {
compatible = "nvidia,tegra114-udc";
status = "okay";
dr_mode = "peripheral";
-
- /* Peripheral USB via ASUS connector */
};
usb-phy@7d000000 {
status = "okay";
+ dr_mode = "peripheral";
+ vbus-supply = <&avdd_usb>;
};
+ /* Host USB via dock */
usb@7d008000 {
status = "okay";
-
- /* Host USB via dock */
};
usb-phy@7d008000 {
@@ -658,16 +1722,12 @@
backlight: backlight {
compatible = "pwm-backlight";
- enable-gpios = <&gpio TEGRA_GPIO(H, 2) GPIO_ACTIVE_HIGH>;
- power-supply = <&vdd_5v0_sys>;
+ power-supply = <&vdd_3v7_bl>;
pwms = <&pwm 1 1000000>;
brightness-levels = <1 255>;
num-interpolated-steps = <254>;
default-brightness-level = <224>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&backlight_default>;
};
/* PMIC has a built-in 32KHz oscillator which is used by PMC */
@@ -678,13 +1738,22 @@
clock-output-names = "pmic-oscillator";
};
- gpio-hall-sensor {
- compatible = "gpio-keys";
+ connector {
+ compatible = "hdmi-connector";
+ type = "d";
- label = "GPIO Hall Effect Sensor";
+ hpd-gpios = <&gpio TEGRA_GPIO(N, 7) GPIO_ACTIVE_HIGH>;
+ ddc-i2c-bus = <&hdmi_ddc>;
- pinctrl-names = "default";
- pinctrl-0 = <&gpio_hall_sensor_default>;
+ port {
+ connector_in: endpoint {
+ remote-endpoint = <&hdmi_out>;
+ };
+ };
+ };
+
+ extcon-keys {
+ compatible = "gpio-keys";
switch-hall-sensor {
label = "Hall Effect Sensor";
@@ -694,17 +1763,20 @@
linux,can-disable;
wakeup-source;
};
+
+ switch-lineout-detect {
+ label = "Audio dock line-out detect";
+ gpios = <&gpio TEGRA_GPIO(X, 5) GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LINEOUT_INSERT>;
+ debounce-interval = <10>;
+ };
};
gpio-keys {
compatible = "gpio-keys";
- label = "GPIO Buttons";
-
- pinctrl-names = "default";
- pinctrl-0 = <&gpio_keys_default>;
-
- button-power {
+ key-power {
label = "Power";
gpios = <&gpio TEGRA_GPIO(Q, 0) GPIO_ACTIVE_LOW>;
linux,code = <KEY_POWER>;
@@ -712,14 +1784,14 @@
wakeup-source;
};
- button-volume-down {
+ key-volume-down {
label = "Volume Down";
gpios = <&gpio TEGRA_GPIO(R, 1) GPIO_ACTIVE_LOW>;
linux,code = <KEY_VOLUMEDOWN>;
debounce-interval = <10>;
};
- button-volume-up {
+ key-volume-up {
label = "Volume Up";
gpios = <&gpio TEGRA_GPIO(R, 2) GPIO_ACTIVE_LOW>;
linux,code = <KEY_VOLUMEUP>;
@@ -739,13 +1811,16 @@
"Speakers", "SPORN",
"Speakers", "SPOLP",
"Speakers", "SPOLN",
- "Mic Jack", "MICBIAS1",
- "IN2P", "Mic Jack";
+ "IN1P", "Mic Jack",
+ "IN1N", "Mic Jack",
+ "DMIC1", "Int Mic",
+ "DMIC2", "Int Mic";
- nvidia,i2s-controller = <&tegra_i2s0>;
+ nvidia,i2s-controller = <&tegra_i2s1>;
nvidia,audio-codec = <&rt5639>;
nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(R, 7) GPIO_ACTIVE_LOW>;
+ nvidia,int-mic-en-gpios = <&gpio TEGRA_GPIO(K, 3) GPIO_ACTIVE_HIGH>;
clocks = <&tegra_car TEGRA114_CLK_PLL_A>,
<&tegra_car TEGRA114_CLK_PLL_A_OUT0>,
@@ -757,14 +1832,11 @@
assigned-clock-parents = <&tegra_car TEGRA114_CLK_PLL_A_OUT0>,
<&tegra_car TEGRA114_CLK_EXTERN1>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&hp_det_default>;
};
vdd_5v0_sys: regulator-5v0-sys {
compatible = "regulator-fixed";
- regulator-name = "vdd_5v0";
+ regulator-name = "vdd_5v0_sys";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
regulator-always-on;
@@ -773,37 +1845,119 @@
vdd_3v3_sys: regulator-3v3-sys {
compatible = "regulator-fixed";
- regulator-name = "vdd_3v3";
+ regulator-name = "vdd_3v3_sys";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
regulator-boot-on;
};
- vdd_lcd: regulator-vdd-lcd {
+ dvdd_1v8_lcd: regulator-vdd-lcd {
compatible = "regulator-fixed";
- regulator-name = "vdd_lcd_1v8";
+ regulator-name = "dvdd_1v8_lcd";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- vin-supply = <&tps65913_smps8>;
+ regulator-boot-on;
+ gpio = <&palmas_gpio 4 GPIO_ACTIVE_HIGH>;
enable-active-high;
- gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
+ vin-supply = <&vdd_1v8_vio>;
+ };
+
+ vdd_3v7_bl: regulator-bl-en {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_3v7_bl";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
regulator-boot-on;
+ gpio = <&gpio TEGRA_GPIO(H, 2) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_5v0_sys>;
+ };
- pinctrl-names = "default";
- pinctrl-0 = <&vdd_lcd_default>;
+ hdmi_5v0_sys: regulator-hdmi {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_5v0_hdmi";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ gpio = <&gpio TEGRA_GPIO(P, 2) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_smps10_out2>;
};
- vdd_usd: regulator-vdd-usd {
+ vdd_2v9_usd: regulator-vdd-usd {
compatible = "regulator-fixed";
regulator-name = "vdd_sd_slot";
regulator-min-microvolt = <2900000>;
regulator-max-microvolt = <2900000>;
- vin-supply = <&tps65913_smps9>;
- enable-active-high;
+ regulator-boot-on;
gpio = <&gpio TEGRA_GPIO(K, 1) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vcore_emmc>;
+ };
- pinctrl-names = "default";
- pinctrl-0 = <&sdmmc3_vdd_default>;
+ vdd_1v8_cam: regulator-cam-vio {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_1v8_cam";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ gpio = <&palmas_gpio 6 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_1v8_vio>;
+ };
+
+ vdd_1v2_xusb: regulator-xusb-vio {
+ compatible = "regulator-fixed";
+ regulator-name = "avddio_1v2_xusb";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-boot-on;
+ gpio = <&palmas_gpio 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vdd_3v3_xusb: regulator-xusb-vdd {
+ compatible = "regulator-fixed";
+ regulator-name = "hvdd_3v3_xusb";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ gpio = <&palmas_gpio 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vdd_3v3_com: regulator-com {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_3v3_com";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&gpio TEGRA_GPIO(U, 1) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_3v3_sys>;
+ };
+
+ vdd_3v3_touch: regulator-touch-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_3v3_touch";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ gpio = <&gpio TEGRA_GPIO(H, 0) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_3v3_sys>;
+ };
+
+ vdd_1v8_touch: regulator-touch-vio {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_1v8_touch";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ gpio = <&gpio TEGRA_GPIO(H, 4) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_3v3_sys>;
};
};
diff --git a/arch/arm/boot/dts/nvidia/tegra20-trimslice.dts b/arch/arm/boot/dts/nvidia/tegra20-trimslice.dts
index 7cae6ad57544..4caeeb9f1e1d 100644
--- a/arch/arm/boot/dts/nvidia/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/nvidia/tegra20-trimslice.dts
@@ -2,6 +2,7 @@
/dts-v1/;
#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
#include "tegra20.dtsi"
#include "tegra20-cpu-opp.dtsi"
@@ -201,16 +202,17 @@
conf_ata {
nvidia,pins = "ata", "atc", "atd", "ate",
"crtp", "dap2", "dap3", "dap4", "dta",
- "dtb", "dtc", "dtd", "dte", "gmb",
- "gme", "i2cp", "pta", "slxc", "slxd",
- "spdi", "spdo", "uda";
+ "dtb", "dtc", "dtd", "gmb", "gme",
+ "i2cp", "pta", "slxc", "slxd", "spdi",
+ "spdo", "uda";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_ENABLE>;
};
conf_atb {
nvidia,pins = "atb", "cdev1", "cdev2", "dap1",
- "gma", "gmc", "gmd", "gpu", "gpu7",
- "gpv", "sdio1", "slxa", "slxk", "uac";
+ "dte", "gma", "gmc", "gmd", "gpu",
+ "gpu7", "gpv", "sdio1", "slxa", "slxk",
+ "uac";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
};
@@ -408,6 +410,24 @@
};
};
+ leds {
+ compatible = "gpio-leds";
+
+ led-ds2 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <2>;
+ gpios = <&gpio TEGRA_GPIO(D, 2) GPIO_ACTIVE_LOW>;
+ };
+
+ led-ds3 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <3>;
+ gpios = <&gpio TEGRA_GPIO(BB, 5) GPIO_ACTIVE_LOW>;
+ };
+ };
+
poweroff {
compatible = "gpio-poweroff";
gpios = <&gpio TEGRA_GPIO(X, 7) GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx1.dtsi b/arch/arm/boot/dts/nxp/imx/imx1.dtsi
index 389ecb1ebf8f..a1a89ccacf05 100644
--- a/arch/arm/boot/dts/nxp/imx/imx1.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx1.dtsi
@@ -134,7 +134,7 @@
clock-names = "ipg", "per";
};
- dma: dma@209000 {
+ dma: dma-controller@209000 {
compatible = "fsl,imx1-dma";
reg = <0x00209000 0x1000>;
interrupts = <61 60>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx27.dtsi b/arch/arm/boot/dts/nxp/imx/imx27.dtsi
index ec3ccc8f4095..989b7659b669 100644
--- a/arch/arm/boot/dts/nxp/imx/imx27.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx27.dtsi
@@ -88,7 +88,7 @@
reg = <0x10000000 0x20000>;
ranges;
- dma: dma@10001000 {
+ dma: dma-controller@10001000 {
compatible = "fsl,imx27-dma";
reg = <0x10001000 0x1000>;
interrupts = <32>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts b/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts
index 2117de872703..0d336cbdb451 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts
@@ -175,8 +175,8 @@
gpio-controller;
};
- sensor2: lm75@49 {
- compatible = "lm75";
+ sensor2: temperature-sensor@49 {
+ compatible = "national,lm75b";
reg = <0x49>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso b/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
index 151e9cee3c87..2527bfe13145 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
+++ b/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
@@ -34,9 +34,7 @@
&display0 {
status = "okay";
-};
-&display0 {
port@1 {
display0_out: endpoint {
remote-endpoint = <&sii9022_in>;
@@ -83,7 +81,3 @@
&panel_dpi {
status = "disabled";
};
-
-&tve {
- status = "disabled";
-};
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi b/arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi
index b2d7271d1d24..c34ee84bd716 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi
@@ -254,8 +254,8 @@
interrupts = <6 4>; /* PATA_DATA6, active high */
};
- sensor1: lm75@48 {
- compatible = "lm75";
+ sensor1: temperature-sensor@48 {
+ compatible = "national,lm75b";
reg = <0x48>;
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-cm-fx6.dts b/arch/arm/boot/dts/nxp/imx/imx6q-cm-fx6.dts
index 95b49fc83f7b..299106fbe51c 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-cm-fx6.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-cm-fx6.dts
@@ -127,12 +127,21 @@
};
};
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
+ spdif_in: spdif-in {
+ compatible = "linux,spdif-dir";
+ #sound-dai-cells = <0>;
+ };
+
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
- spdif-controller = <&spdif>;
- spdif-out;
- spdif-in;
+ audio-cpu = <&spdif>;
+ audio-codec = <&spdif_out>, <&spdif_in>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts b/arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts
index a7d5693c5ab7..8d2b608e0b90 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts
@@ -111,12 +111,21 @@
};
};
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
+ spdif_in: spdif-in {
+ compatible = "linux,spdif-dir";
+ #sound-dai-cells = <0>;
+ };
+
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
- spdif-controller = <&spdif>;
- spdif-in;
- spdif-out;
+ audio-cpu = <&spdif>;
+ audio-codec = <&spdif_out>, <&spdif_in>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-tbs2910.dts b/arch/arm/boot/dts/nxp/imx/imx6q-tbs2910.dts
index 7c298d9aa21e..5353a0c24420 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-tbs2910.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-tbs2910.dts
@@ -90,11 +90,16 @@
ssi-controller = <&ssi1>;
};
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "On-board SPDIF";
- spdif-controller = <&spdif>;
- spdif-out;
+ audio-cpu = <&spdif>;
+ audio-codec = <&spdif_out>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
index ea40623d12e5..edf55760a5c1 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
@@ -197,11 +197,20 @@
ssi-controller = <&ssi1>;
};
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
+ spdif_in: spdif-in {
+ compatible = "linux,spdif-dir";
+ #sound-dai-cells = <0>;
+ };
+
sound_spdif: sound-spdif {
compatible = "fsl,imx-audio-spdif";
- spdif-controller = <&spdif>;
- spdif-in;
- spdif-out;
+ audio-cpu = <&spdif>;
+ audio-codec = <&spdif_out>, <&spdif_in>;
model = "imx-spdif";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi
index 3a46ade3b6bd..9e97ef5e43f2 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi
@@ -121,11 +121,16 @@
mux-ext-port = <3>;
};
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
- spdif-controller = <&spdif>;
- spdif-out;
+ audio-cpu = <&spdif>;
+ audio-codec = <&spdif_out>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi
index 758eaf9d93d2..f7fac86f0a6b 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi
@@ -506,7 +506,7 @@
>;
};
- pinctrl_gpmi_nand: gpmi-nand {
+ pinctrl_gpmi_nand: gpminandgrp {
fsl,pins = <
MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
index d3a7a6eeb8e0..b01670cdd52c 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
@@ -142,12 +142,21 @@
ssi-controller = <&ssi1>;
};
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
+ spdif_in: spdif-in {
+ compatible = "linux,spdif-dir";
+ #sound-dai-cells = <0>;
+ };
+
/* Optional S/PDIF in on SODIMM 88 and out on SODIMM 90, 137 or 168 */
sound_spdif: sound-spdif {
compatible = "fsl,imx-audio-spdif";
- spdif-controller = <&spdif>;
- spdif-in;
- spdif-out;
+ audio-cpu = <&spdif>;
+ audio-codec = <&spdif_out>, <&spdif_in>;
model = "imx-spdif";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi
index 761566ae3cf5..bd66430c1d78 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi
@@ -100,12 +100,17 @@
vin-supply = <&v_5v0>;
};
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "Integrated SPDIF";
/* IMX6 doesn't implement this yet */
- spdif-controller = <&spdif>;
- spdif-out;
+ audio-cpu = <&spdif>;
+ audio-codec = <&spdif_out>;
};
gpio-keys {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi
index 082a2e3a391f..b57f4073f881 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi
@@ -761,7 +761,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x170b9
@@ -774,7 +774,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi
index 8ec442038ea0..090c0057d117 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi
@@ -750,7 +750,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
@@ -763,7 +763,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi
index 9df9f79affae..0ed6d25024a2 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi
@@ -833,7 +833,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
@@ -846,7 +846,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw553x.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw553x.dtsi
index 7f16c602cc07..c6e231de674a 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw553x.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw553x.dtsi
@@ -704,7 +704,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
@@ -717,7 +717,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi
index 7693f92195d5..d0f648938cae 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi
@@ -896,7 +896,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
@@ -909,7 +909,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi
index 9d0836df0fed..71911df881cc 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi
@@ -680,7 +680,7 @@
>;
};
- pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_NANDF_D3__GPIO2_IO03 0x4001b0b0 /* EMMY_EN */
MX6QDL_PAD_NANDF_D4__GPIO2_IO04 0x4001b0b0 /* EMMY_CFG1# */
@@ -710,7 +710,7 @@
>;
};
- pinctrl_usdhc2_100mhz: usdhc2grp100mhz {
+ pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9
MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9
@@ -723,7 +723,7 @@
>;
};
- pinctrl_usdhc2_200mhz: usdhc2grp200mhz {
+ pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9
MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9
@@ -752,7 +752,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
@@ -768,7 +768,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi
index f4cb9e1d34a9..716c324a7458 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi
@@ -817,7 +817,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
@@ -833,7 +833,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5910.dtsi
index 424dc7fcd533..453dee4d9227 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5910.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5910.dtsi
@@ -629,7 +629,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x170b9
@@ -642,7 +642,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5912.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5912.dtsi
index 49ea25c71967..add700bc11cc 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5912.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5912.dtsi
@@ -569,7 +569,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
@@ -582,7 +582,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-hummingboard.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-hummingboard.dtsi
index a955c77cd499..d1ad65ab6b72 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-hummingboard.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-hummingboard.dtsi
@@ -140,12 +140,17 @@
};
};
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "On-board SPDIF";
/* IMX6 doesn't implement this yet */
- spdif-controller = <&spdif>;
- spdif-out;
+ audio-cpu = <&spdif>;
+ audio-codec = <&spdif_out>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-icore-rqs.dtsi
index d339957cc097..dff184a119f3 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-icore-rqs.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-icore-rqs.dtsi
@@ -397,7 +397,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp_100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170B1
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100B1
@@ -408,7 +408,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp_200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170F9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100F9
@@ -434,7 +434,7 @@
>;
};
- pinctrl_usdhc4_100mhz: usdhc4grp_100mhz {
+ pinctrl_usdhc4_100mhz: usdhc4-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD4_CMD__SD4_CMD 0x170B1
MX6QDL_PAD_SD4_CLK__SD4_CLK 0x100B1
@@ -449,7 +449,7 @@
>;
};
- pinctrl_usdhc4_200mhz: usdhc4grp_200mhz {
+ pinctrl_usdhc4_200mhz: usdhc4-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD4_CMD__SD4_CMD 0x170F9
MX6QDL_PAD_SD4_CLK__SD4_CLK 0x100F9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi
index 807f3c95e3ce..aca320ee8f47 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi
@@ -13,7 +13,7 @@
&i2c1 {
lm75: temperature-sensor@49 {
- compatible = "national,lm75";
+ compatible = "national,lm75a";
reg = <0x49>;
vs-supply = <&reg_mba6_3p3v>;
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi
index 789733a45b95..c7bbd6195fef 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi
@@ -23,7 +23,7 @@
&i2c3 {
lm75: temperature-sensor@49 {
- compatible = "national,lm75";
+ compatible = "national,lm75a";
reg = <0x49>;
vs-supply = <&reg_mba6_3p3v>;
};
@@ -50,12 +50,3 @@
reg = <0x68>;
};
};
-
-&iomuxc {
- pinctrl_i2c1: i2c1grp {
- fsl,pins = <
- MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b899
- MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001b899
- >;
- };
-};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi
index 0a3deaf92eea..35b6bec7a3fa 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi
@@ -143,12 +143,17 @@
"AIN2R", "Line In Jack";
};
+ spdif_in: spdif-in {
+ compatible = "linux,spdif-dir";
+ #sound-dai-cells = <0>;
+ };
+
sound-spdif {
compatible = "fsl,imx-sabreauto-spdif",
"fsl,imx-audio-spdif";
model = "imx-spdif";
- spdif-controller = <&spdif>;
- spdif-in;
+ audio-cpu = <&spdif>;
+ audio-codec = <&spdif_in>;
};
backlight {
@@ -690,7 +695,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
@@ -705,7 +710,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi
index 344ea935c7da..6152a9ed4768 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi
@@ -59,20 +59,6 @@
>;
};
- pinctrl_i2c3: i2c3grp {
- fsl,pins = <
- MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b899
- MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b899
- >;
- };
-
- pinctrl_i2c3_recovery: i2c3recoverygrp {
- fsl,pins = <
- MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x4001b899
- MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x4001b899
- >;
- };
-
pinctrl_pmic: pmicgrp {
fsl,pins = <
MX6QDL_PAD_NANDF_RB0__GPIO6_IO10 0x1b099 /* PMIC irq */
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi
index 68525f0205d3..828996382f24 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi
@@ -27,8 +27,8 @@
reg = <0x08>;
};
- sensor@48 {
- compatible = "national,lm75";
+ temperature-sensor@48 {
+ compatible = "national,lm75a";
reg = <0x48>;
vs-supply = <&reg_3p3v>;
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi
index aeba0a273600..1d0966b8d99e 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi
@@ -20,8 +20,8 @@
reg = <0x08>;
};
- sensor@48 {
- compatible = "national,lm75";
+ temperature-sensor@48 {
+ compatible = "national,lm75a";
reg = <0x48>;
vs-supply = <&reg_3p3v>;
};
@@ -33,3 +33,19 @@
vcc-supply = <&reg_3p3v>;
};
};
+
+&iomuxc {
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b899
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b899
+ >;
+ };
+
+ pinctrl_i2c3_recovery: i2c3recoverygrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x4001b899
+ MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x4001b899
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6.dtsi
index e2fe337f7d9e..5a194f4c0cb9 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6.dtsi
@@ -373,7 +373,7 @@
>;
};
- pinctrl_disp0_1: disp0grp-1 {
+ pinctrl_disp0_1: disp0-1-grp {
fsl,pins = <
MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x10
@@ -406,7 +406,7 @@
>;
};
- pinctrl_disp0_2: disp0grp-2 {
+ pinctrl_disp0_2: disp0-2-grp {
fsl,pins = <
MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x10
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-var-dart.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-var-dart.dtsi
index 200559d7158d..d8283eade43e 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-var-dart.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-var-dart.dtsi
@@ -346,7 +346,7 @@
>;
};
- pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD1_CMD__SD1_CMD 0x170B9
MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100B9
@@ -357,7 +357,7 @@
>;
};
- pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD1_CMD__SD1_CMD 0x170F9
MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100F9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-var-som.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-var-som.dtsi
index a1ea33c4eeb7..59833e8d11d8 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-var-som.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-var-som.dtsi
@@ -436,7 +436,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp100mhzgrp {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170B9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100B9
@@ -451,7 +451,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp200mhzgrp {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170F9
MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100F9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-wandboard.dtsi
index 38abb6b50f6c..7130b9c3b3aa 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-wandboard.dtsi
@@ -26,11 +26,16 @@
mux-ext-port = <3>;
};
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
- spdif-controller = <&spdif>;
- spdif-out;
+ audio-cpu = <&spdif>;
+ audio-codec = <&spdif_out>;
};
reg_1p5v: regulator-1p5v {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts b/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts
index 31eee0419af7..7c899291ab0d 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts
@@ -457,7 +457,7 @@
>;
};
- pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp {
fsl,pins = <
MX6SL_PAD_SD1_CMD__SD1_CMD 0x170b9
MX6SL_PAD_SD1_CLK__SD1_CLK 0x100b9
@@ -472,7 +472,7 @@
>;
};
- pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp {
fsl,pins = <
MX6SL_PAD_SD1_CMD__SD1_CMD 0x170f9
MX6SL_PAD_SD1_CLK__SD1_CLK 0x100f9
@@ -498,7 +498,7 @@
>;
};
- pinctrl_usdhc2_100mhz: usdhc2grp100mhz {
+ pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp {
fsl,pins = <
MX6SL_PAD_SD2_CMD__SD2_CMD 0x170b9
MX6SL_PAD_SD2_CLK__SD2_CLK 0x100b9
@@ -509,7 +509,7 @@
>;
};
- pinctrl_usdhc2_200mhz: usdhc2grp200mhz {
+ pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp {
fsl,pins = <
MX6SL_PAD_SD2_CMD__SD2_CMD 0x170f9
MX6SL_PAD_SD2_CLK__SD2_CLK 0x100f9
@@ -531,7 +531,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6SL_PAD_SD3_CMD__SD3_CMD 0x170b9
MX6SL_PAD_SD3_CLK__SD3_CLK 0x100b9
@@ -542,7 +542,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6SL_PAD_SD3_CMD__SD3_CMD 0x170f9
MX6SL_PAD_SD3_CLK__SD3_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sl-warp.dts b/arch/arm/boot/dts/nxp/imx/imx6sl-warp.dts
index 9d7c8884892a..2545c0fe47c8 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sl-warp.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6sl-warp.dts
@@ -166,7 +166,7 @@
>;
};
- pinctrl_usdhc2_100mhz: usdhc2grp100mhz {
+ pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp {
fsl,pins = <
MX6SL_PAD_SD2_CMD__SD2_CMD 0x4170b9
MX6SL_PAD_SD2_CLK__SD2_CLK 0x4100b9
@@ -182,7 +182,7 @@
>;
};
- pinctrl_usdhc2_200mhz: usdhc2grp200mhz {
+ pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp {
fsl,pins = <
MX6SL_PAD_SD2_CMD__SD2_CMD 0x4170f9
MX6SL_PAD_SD2_CLK__SD2_CLK 0x4100f9
@@ -209,7 +209,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX6SL_PAD_SD3_CMD__SD3_CMD 0x4170b9
MX6SL_PAD_SD3_CLK__SD3_CLK 0x4100b9
@@ -220,7 +220,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX6SL_PAD_SD3_CMD__SD3_CMD 0x4170f9
MX6SL_PAD_SD3_CLK__SD3_CLK 0x4100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sx-sabreauto.dts b/arch/arm/boot/dts/nxp/imx/imx6sx-sabreauto.dts
index b0c27b9b0244..dfbfb8119bf3 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sx-sabreauto.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6sx-sabreauto.dts
@@ -97,11 +97,16 @@
"AIN2R", "Line In Jack";
};
+ spdif_in: spdif-in {
+ compatible = "linux,spdif-dir";
+ #sound-dai-cells = <0>;
+ };
+
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
- spdif-controller = <&spdif>;
- spdif-in;
+ audio-cpu = <&spdif>;
+ audio-codec = <&spdif_in>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi b/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi
index 7d4170c27732..277a6e039045 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi
@@ -183,12 +183,17 @@
};
};
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
sound-spdif {
compatible = "fsl,imx6sx-sdb-spdif",
"fsl,imx-audio-spdif";
model = "imx-spdif";
- spdif-controller = <&spdif>;
- spdif-out;
+ audio-cpu = <&spdif>;
+ audio-codec = <&spdif_out>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/nxp/imx/imx6sx-udoo-neo.dtsi
index 725d0b5cb55f..bbf792ac4896 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sx-udoo-neo.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6sx-udoo-neo.dtsi
@@ -72,6 +72,11 @@
};
};
+&clks {
+ assigned-clocks = <&clks IMX6SX_CLK_ENET_REF>;
+ assigned-clock-rates = <50000000>;
+};
+
&fec1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet1>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi
index 9cfb99ac9e9d..b74ee8948a78 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi
@@ -608,7 +608,7 @@
>;
};
- pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9
@@ -620,7 +620,7 @@
>;
};
- pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcexpress.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcexpress.dts
index ad7f63ca521a..0d3b1ab82eab 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcexpress.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcexpress.dts
@@ -112,7 +112,7 @@
>;
};
- pinctrl_ecspi3_master: ecspi3grp1 {
+ pinctrl_ecspi3_master: ecspi3-1-grp {
fsl,pins = <
MX6UL_PAD_UART2_RX_DATA__ECSPI3_SCLK 0x10b0
MX6UL_PAD_UART2_CTS_B__ECSPI3_MOSI 0x10b0
@@ -121,7 +121,7 @@
>;
};
- pinctrl_ecspi3_slave: ecspi3grp2 {
+ pinctrl_ecspi3_slave: ecspi3-2-grp {
fsl,pins = <
MX6UL_PAD_UART2_RX_DATA__ECSPI3_SCLK 0x10b0
MX6UL_PAD_UART2_CTS_B__ECSPI3_MOSI 0x10b0
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts
index ed61ae8524fa..8aea8c99e2af 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts
@@ -248,7 +248,7 @@
>;
};
- pinctrl_ecspi1_master: ecspi1grp1 {
+ pinctrl_ecspi1_master: ecspi1-1-grp {
fsl,pins = <
MX6UL_PAD_LCD_DATA20__ECSPI1_SCLK 0x10b0
MX6UL_PAD_LCD_DATA22__ECSPI1_MOSI 0x10b0
@@ -309,7 +309,7 @@
>;
};
- pinctrl_lcdif_dat0_17: lcdifdatgrp0-17 {
+ pinctrl_lcdif_dat0_17: lcdifdat0-17-grp {
fsl,pins = <
MX6UL_PAD_LCD_DATA00__LCDIF_DATA00 0x79
MX6UL_PAD_LCD_DATA01__LCDIF_DATA01 0x79
@@ -332,14 +332,14 @@
>;
};
- pinctrl_lcdif_clken: lcdifctrlgrp1 {
+ pinctrl_lcdif_clken: lcdifctrl-1-grp {
fsl,pins = <
MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x17050
MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x79
>;
};
- pinctrl_lcdif_hvsync: lcdifctrlgrp2 {
+ pinctrl_lcdif_hvsync: lcdifctrl-2-grp {
fsl,pins = <
MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC 0x79
MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC 0x79
@@ -370,7 +370,7 @@
>;
};
- pinctrl_sai2_sleep: sai2grp-sleep {
+ pinctrl_sai2_sleep: sai2-sleep-grp {
fsl,pins = <
MX6UL_PAD_JTAG_TRST_B__GPIO1_IO15 0x3000
MX6UL_PAD_JTAG_TCK__GPIO1_IO14 0x3000
@@ -381,7 +381,7 @@
>;
};
- pinctrl_uart2_4wires: uart2grp-4wires {
+ pinctrl_uart2_4wires: uart2-4wires-grp {
fsl,pins = <
MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX 0x1b0b1
MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX 0x1b0b1
@@ -390,7 +390,7 @@
>;
};
- pinctrl_uart3_2wires: uart3grp-2wires {
+ pinctrl_uart3_2wires: uart3-2wires-grp {
fsl,pins = <
MX6UL_PAD_UART3_TX_DATA__UART3_DCE_TX 0x1b0b1
MX6UL_PAD_UART3_RX_DATA__UART3_DCE_RX 0x1b0b1
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsom.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsom.dtsi
index 4a03ea6d24dc..9cc3eebb6b05 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsom.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsom.dtsi
@@ -232,7 +232,7 @@
>;
};
- pinctrl_usdhc1_sleep: usdhc1grp-sleep {
+ pinctrl_usdhc1_sleep: usdhc1-sleep-grp {
fsl,pins = <
MX6UL_PAD_SD1_CMD__GPIO2_IO16 0x3000
MX6UL_PAD_SD1_CLK__GPIO2_IO17 0x3000
@@ -250,7 +250,7 @@
>;
};
- pinctrl_wifibt_ctrl_sleep: wifibt-ctrl-grp-sleep {
+ pinctrl_wifibt_ctrl_sleep: wifibt-ctrl-sleep-grp {
fsl,pins = <
MX6UL_PAD_SNVS_TAMPER0__GPIO5_IO00 0x3000
MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09 0x3000
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts
index cdbb8c435cd6..2a6bb5ff808a 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts
@@ -365,7 +365,7 @@
};
pinctrl_tsc: tscgrp {
- fsl,pin = <
+ fsl,pins = <
MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0xb0
MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0xb0
MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0
@@ -410,7 +410,7 @@
>;
};
- pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9
@@ -421,7 +421,7 @@
>;
};
- pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi
index ee86c36205f9..118df2a457c9 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi
@@ -346,7 +346,7 @@
>;
};
- pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9
@@ -357,7 +357,7 @@
>;
};
- pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi
index d8f7877349c9..29d2f86d5e34 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi
@@ -351,7 +351,7 @@
>;
};
- pinctrl_usbotg1: usbotg1 {
+ pinctrl_usbotg1: usbotg1grp {
fsl,pins = <
MX6UL_PAD_GPIO1_IO04__GPIO1_IO04 0x1b0b0
>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-liteboard.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-liteboard.dts
index 1d863a16bcf0..5e62272acfba 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-liteboard.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-liteboard.dts
@@ -100,7 +100,7 @@
>;
};
- pinctrl_usb_otg1_vbus: usb-otg1-vbus {
+ pinctrl_usb_otg1_vbus: usb-otg1-vbus-grp {
fsl,pins = <
MX6UL_PAD_ENET2_RX_DATA0__GPIO2_IO08 0x79
>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin-peb-wlbt-05.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin-peb-wlbt-05.dtsi
index 04477fd4b9a9..4a45fb784ff7 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin-peb-wlbt-05.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin-peb-wlbt-05.dtsi
@@ -31,7 +31,7 @@
>;
};
- pinctrl_uart2_bt: uart2grp-bt {
+ pinctrl_uart2_bt: uart2-bt-grp {
fsl,pins = <
MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX 0x17059
MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX 0x17059
@@ -40,7 +40,7 @@
>;
};
- pinctrl_usdhc2_wl: usdhc2grp-wl {
+ pinctrl_usdhc2_wl: usdhc2-wl-grp {
fsl,pins = <
MX6UL_PAD_LCD_DATA18__USDHC2_CMD 0x10051
MX6UL_PAD_LCD_DATA19__USDHC2_CLK 0x10061
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin.dtsi
index 38ea4dcfa228..bef5eb38a90d 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin.dtsi
@@ -219,7 +219,7 @@
>;
};
- pinctrl_flexcan1: flexcan1 {
+ pinctrl_flexcan1: flexcan1grp {
fsl,pins = <
MX6UL_PAD_UART3_CTS_B__FLEXCAN1_TX 0x0b0b0
MX6UL_PAD_UART3_RTS_B__FLEXCAN1_RX 0x0b0b0
@@ -275,7 +275,7 @@
>;
};
- pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9
@@ -286,7 +286,7 @@
>;
};
- pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi
index 57e647fc3237..c9c0794f01a2 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi
@@ -202,7 +202,7 @@
>;
};
- pinctrl_pmic: pmic {
+ pinctrl_pmic: pmicgrp {
fsl,pins = <
/* PMIC irq */
MX6UL_PAD_CSI_DATA03__GPIO4_IO24 0x1b099
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul-mainboard.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul-mainboard.dts
index ef76ece21010..20c810a81403 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul-mainboard.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul-mainboard.dts
@@ -198,7 +198,7 @@
>;
};
- pinctrl_disp0_3: disp0grp-3 {
+ pinctrl_disp0_3: disp0-3-grp {
fsl,pins = <
MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x10 /* LSCLK */
MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x10 /* OE_ACD */
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi
index 864173e30709..278120404d31 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi
@@ -578,19 +578,13 @@
};
&iomuxc {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_hog>;
-
- pinctrl_hog: hoggrp {
- };
-
pinctrl_led: ledgrp {
fsl,pins = <
MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09 0x0b0b0 /* LED */
>;
};
- pinctrl_disp0_1: disp0grp-1 {
+ pinctrl_disp0_1: disp0-1-grp {
fsl,pins = <
MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x10 /* LSCLK */
MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x10 /* OE_ACD */
@@ -623,7 +617,7 @@
>;
};
- pinctrl_disp0_2: disp0grp-2 {
+ pinctrl_disp0_2: disp0-2-grp {
fsl,pins = <
MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x10 /* LSCLK */
MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x10 /* OE_ACD */
@@ -713,25 +707,25 @@
>;
};
- pinctrl_etnphy0_int: etnphy-intgrp-0 {
+ pinctrl_etnphy0_int: etnphy-int-0-grp {
fsl,pins = <
MX6UL_PAD_SNVS_TAMPER5__GPIO5_IO05 0x0b0b0 /* ETN PHY INT */
>;
};
- pinctrl_etnphy0_rst: etnphy-rstgrp-0 {
+ pinctrl_etnphy0_rst: etnphy-rst-0-grp {
fsl,pins = <
MX6UL_PAD_SNVS_TAMPER6__GPIO5_IO06 0x0b0b0 /* ETN PHY RESET */
>;
};
- pinctrl_etnphy1_int: etnphy-intgrp-1 {
+ pinctrl_etnphy1_int: etnphy-int-1-grp {
fsl,pins = <
MX6UL_PAD_CSI_DATA06__GPIO4_IO27 0x0b0b0 /* ETN PHY INT */
>;
};
- pinctrl_etnphy1_rst: etnphy-rstgrp-1 {
+ pinctrl_etnphy1_rst: etnphy-rst-1-grp {
fsl,pins = <
MX6UL_PAD_CSI_DATA07__GPIO4_IO28 0x0b0b0 /* ETN PHY RESET */
>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-myir-mys-6ulx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-myir-mys-6ulx.dtsi
index d03694feaf5c..83b9de17cee2 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ull-myir-mys-6ulx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-myir-mys-6ulx.dtsi
@@ -169,7 +169,7 @@
>;
};
- pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9
@@ -180,7 +180,7 @@
>;
};
- pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9
@@ -206,7 +206,7 @@
>;
};
- pinctrl_usdhc2_100mhz: usdhc2grp100mhz {
+ pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp {
fsl,pins = <
MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x100b9
MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x170b9
@@ -221,7 +221,7 @@
>;
};
- pinctrl_usdhc2_200mhz: usdhc2grp200mhz {
+ pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp {
fsl,pins = <
MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x100f9
MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x170f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi
index 6bb12e0bbc7e..28fddbcdc55e 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi
@@ -323,7 +323,7 @@
>;
};
- pinctrl_reg_vmmc: usdhc1regvmmc {
+ pinctrl_reg_vmmc: usdhc1regvmmc-grp {
fsl,pins = <
MX6UL_PAD_GPIO1_IO09__GPIO1_IO09 0x17059
>;
@@ -339,14 +339,14 @@
};
pinctrl_uart1: uart1grp {
- fsl,pin = <
+ fsl,pins = <
MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1
MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX 0x1b0b1
>;
};
pinctrl_uart2: uart2grp {
- fsl,pin = <
+ fsl,pins = <
MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX 0x1b0b1
MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX 0x1b0b1
MX6UL_PAD_UART2_CTS_B__UART2_DCE_CTS 0x1b0b1
@@ -355,7 +355,7 @@
};
pinctrl_uart3: uart3grp {
- fsl,pin = <
+ fsl,pins = <
MX6UL_PAD_UART3_TX_DATA__UART3_DCE_TX 0x1b0b1
MX6UL_PAD_UART3_RX_DATA__UART3_DCE_RX 0x1b0b1
MX6UL_PAD_UART3_CTS_B__UART3_DCE_CTS 0x1b0b1
@@ -364,21 +364,21 @@
};
pinctrl_uart4: uart4grp {
- fsl,pin = <
+ fsl,pins = <
MX6UL_PAD_UART4_TX_DATA__UART4_DCE_TX 0x1b0b1
MX6UL_PAD_UART4_RX_DATA__UART4_DCE_RX 0x1b0b1
>;
};
pinctrl_uart5: uart5grp {
- fsl,pin = <
+ fsl,pins = <
MX6UL_PAD_UART5_TX_DATA__UART5_DCE_TX 0x1b0b1
MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x1b0b1
>;
};
pinctrl_usb_otg1_id: usbotg1idgrp {
- fsl,pin = <
+ fsl,pins = <
MX6UL_PAD_GPIO1_IO00__ANATOP_OTG1_ID 0x17059
>;
};
@@ -394,7 +394,7 @@
>;
};
- pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9
@@ -405,7 +405,7 @@
>;
};
- pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9
@@ -416,7 +416,7 @@
>;
};
- pinctrl_usdhc1_cd: usdhc1cd {
+ pinctrl_usdhc1_cd: usdhc1cd-grp {
fsl,pins = <
MX6UL_PAD_UART1_RTS_B__GPIO1_IO19 0x17059
>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi.dtsi
index f5ad6b5c1ad0..278152875f8e 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi.dtsi
@@ -102,7 +102,7 @@
>;
};
- pinctrl_reg_vqmmc: usdhc1regvqmmc {
+ pinctrl_reg_vqmmc: usdhc1regvqmmcgrp {
fsl,pins = <
MX6UL_PAD_GPIO1_IO05__GPIO1_IO05 0x17059
>;
@@ -123,7 +123,7 @@
>;
};
- pinctrl_usdhc2_100mhz: usdhc2grp100mhz {
+ pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp {
fsl,pins = <
MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x100b9
MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x170b9
@@ -138,7 +138,7 @@
>;
};
- pinctrl_usdhc2_200mhz: usdhc2grp200mhz {
+ pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp {
fsl,pins = <
MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x100f9
MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x170f9
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ulz-bsh-smm-m2.dts b/arch/arm/boot/dts/nxp/imx/imx6ulz-bsh-smm-m2.dts
index c92e4e2f6ab9..6159ed70d966 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ulz-bsh-smm-m2.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ulz-bsh-smm-m2.dts
@@ -94,7 +94,7 @@
};
&iomuxc {
- pinctrl_gpmi_nand: gpmi-nand {
+ pinctrl_gpmi_nand: gpminandgrp {
fsl,pins = <
MX6UL_PAD_NAND_CLE__RAWNAND_CLE 0xb0b1
MX6UL_PAD_NAND_ALE__RAWNAND_ALE 0xb0b1
diff --git a/arch/arm/boot/dts/nxp/imx/imx7-colibri.dtsi b/arch/arm/boot/dts/nxp/imx/imx7-colibri.dtsi
index 9fe51884af79..62e41edcaf1d 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7-colibri.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7-colibri.dtsi
@@ -903,7 +903,7 @@
>;
};
- pinctrl_lvds_transceiver: lvdstx {
+ pinctrl_lvds_transceiver: lvdstxgrp {
fsl,pins = <
MX7D_PAD_ENET1_RGMII_RD2__GPIO7_IO2 0x14 /* SODIMM 63 */
MX7D_PAD_ENET1_RGMII_RD3__GPIO7_IO3 0x74 /* SODIMM 55 */
diff --git a/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi b/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
index 52869e68f833..e1c401f468e1 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
@@ -81,6 +81,12 @@
};
};
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc1 0>, <&adc1 1>, <&adc1 2>, <&adc1 3>,
+ <&adc2 0>, <&adc2 1>, <&adc2 2>, <&adc2 3>;
+ };
+
reg_sd1_vmmc: regulator-sd1-vmmc {
compatible = "regulator-fixed";
regulator-name = "VCC3V3_SD1";
@@ -310,7 +316,7 @@
&i2c1 {
lm75: temperature-sensor@49 {
- compatible = "national,lm75";
+ compatible = "national,lm75a";
reg = <0x49>;
vs-supply = <&reg_vcc_3v3>;
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-nitrogen7.dts b/arch/arm/boot/dts/nxp/imx/imx7d-nitrogen7.dts
index 9c6476bda4a0..7ee66be8bccb 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d-nitrogen7.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7d-nitrogen7.dts
@@ -419,7 +419,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hog_1 &pinctrl_j2>;
- pinctrl_hog_1: hoggrp-1 {
+ pinctrl_hog_1: hoggrp {
fsl,pins = <
MX7D_PAD_SD3_RESET_B__GPIO6_IO11 0x5d
MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x7d
@@ -665,7 +665,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hog_2>;
- pinctrl_hog_2: hoggrp-2 {
+ pinctrl_hog_2: hoggrp {
fsl,pins = <
MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x7d
MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2 0x7d
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-pico.dtsi b/arch/arm/boot/dts/nxp/imx/imx7d-pico.dtsi
index 8d5037ac03c7..a1574ccec89c 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d-pico.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7d-pico.dtsi
@@ -444,14 +444,14 @@
>;
};
- pinctrl_can1: can1frp {
+ pinctrl_can1: can1frpgrp {
fsl,pins = <
MX7D_PAD_SAI1_RX_DATA__FLEXCAN1_RX 0x59
MX7D_PAD_SAI1_TX_BCLK__FLEXCAN1_TX 0x59
>;
};
- pinctrl_can2: can2frp {
+ pinctrl_can2: can2frpgrp {
fsl,pins = <
MX7D_PAD_SAI1_TX_SYNC__FLEXCAN2_RX 0x59
MX7D_PAD_SAI1_TX_DATA__FLEXCAN2_TX 0x59
@@ -499,19 +499,19 @@
>;
};
- pinctrl_pwm1: pwm1 {
+ pinctrl_pwm1: pwm1grp {
fsl,pins = <
MX7D_PAD_GPIO1_IO08__PWM1_OUT 0x7f
>;
};
- pinctrl_pwm2: pwm2 {
+ pinctrl_pwm2: pwm2grp {
fsl,pins = <
MX7D_PAD_GPIO1_IO09__PWM2_OUT 0x7f
>;
};
- pinctrl_pwm3: pwm3 {
+ pinctrl_pwm3: pwm3grp {
fsl,pins = <
MX7D_PAD_GPIO1_IO10__PWM3_OUT 0x7f
>;
@@ -563,7 +563,7 @@
>;
};
- pinctrl_usbotg1_pwr: usbotg_pwr {
+ pinctrl_usbotg1_pwr: usbotgpwrgrp {
fsl,pins = <
MX7D_PAD_UART3_TX_DATA__GPIO4_IO5 0x14
>;
@@ -581,7 +581,7 @@
>;
};
- pinctrl_usdhc1_100mhz: usdhc1grp_100mhz {
+ pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp {
fsl,pins = <
MX7D_PAD_SD1_CMD__SD1_CMD 0x5a
MX7D_PAD_SD1_CLK__SD1_CLK 0x1a
@@ -593,7 +593,7 @@
>;
};
- pinctrl_usdhc1_200mhz: usdhc1grp_200mhz {
+ pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp {
fsl,pins = <
MX7D_PAD_SD1_CMD__SD1_CMD 0x5b
MX7D_PAD_SD1_CLK__SD1_CLK 0x1b
@@ -631,7 +631,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp_100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX7D_PAD_SD3_CMD__SD3_CMD 0x5a
MX7D_PAD_SD3_CLK__SD3_CLK 0x1a
@@ -646,7 +646,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp_200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX7D_PAD_SD3_CMD__SD3_CMD 0x5b
MX7D_PAD_SD3_CLK__SD3_CLK 0x1b
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts b/arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts
index 92cb45dacda6..eec526a96311 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts
@@ -508,7 +508,7 @@
>;
};
- pinctrl_usdhc2_100mhz: usdhc2grp_100mhz {
+ pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp {
fsl,pins = <
MX7D_PAD_SD2_CMD__SD2_CMD 0x5a
MX7D_PAD_SD2_CLK__SD2_CLK 0x1a
@@ -519,7 +519,7 @@
>;
};
- pinctrl_usdhc2_200mhz: usdhc2grp_200mhz {
+ pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp {
fsl,pins = <
MX7D_PAD_SD2_CMD__SD2_CMD 0x5b
MX7D_PAD_SD2_CLK__SD2_CLK 0x1b
@@ -546,7 +546,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp_100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX7D_PAD_SD3_CMD__SD3_CMD 0x5a
MX7D_PAD_SD3_CLK__SD3_CLK 0x1a
@@ -562,7 +562,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp_200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX7D_PAD_SD3_CMD__SD3_CMD 0x5b
MX7D_PAD_SD3_CLK__SD3_CLK 0x1b
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-sdb-reva.dts b/arch/arm/boot/dts/nxp/imx/imx7d-sdb-reva.dts
index cabdaa6dc518..40156cd9195f 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d-sdb-reva.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7d-sdb-reva.dts
@@ -21,23 +21,21 @@
};
&iomuxc {
- imx7d-sdb {
- pinctrl_tsc2046_pendown: tsc2046_pendown {
- fsl,pins = <
- MX7D_PAD_EPDC_DATA13__GPIO2_IO13 0x59
- >;
- };
-
- pinctrl_hog: hoggrp {
- fsl,pins = <
- MX7D_PAD_ECSPI2_SS0__GPIO4_IO23 0x34 /* bt reg on */
- >;
- };
-
- pinctrl_usb_otg2_vbus_reg_reva: usbotg2vbusregrevagrp {
- fsl,pins = <
- MX7D_PAD_UART3_CTS_B__GPIO4_IO7 0x14
- >;
- };
+ pinctrl_tsc2046_pendown: tsc2046-pendowngrp {
+ fsl,pins = <
+ MX7D_PAD_EPDC_DATA13__GPIO2_IO13 0x59
+ >;
+ };
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX7D_PAD_ECSPI2_SS0__GPIO4_IO23 0x34 /* bt reg on */
+ >;
+ };
+
+ pinctrl_usb_otg2_vbus_reg_reva: usbotg2vbusregrevagrp {
+ fsl,pins = <
+ MX7D_PAD_UART3_CTS_B__GPIO4_IO7 0x14
+ >;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts b/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts
index 0462e43ec09b..f712537fca16 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts
@@ -537,342 +537,340 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hog>;
- imx7d-sdb {
- pinctrl_brcm_reg: brcmreggrp {
- fsl,pins = <
- MX7D_PAD_ECSPI2_MOSI__GPIO4_IO21 0x14
- >;
- };
+ pinctrl_brcm_reg: brcmreggrp {
+ fsl,pins = <
+ MX7D_PAD_ECSPI2_MOSI__GPIO4_IO21 0x14
+ >;
+ };
- pinctrl_ecspi3: ecspi3grp {
- fsl,pins = <
- MX7D_PAD_SAI2_TX_SYNC__ECSPI3_MISO 0x2
- MX7D_PAD_SAI2_TX_BCLK__ECSPI3_MOSI 0x2
- MX7D_PAD_SAI2_RX_DATA__ECSPI3_SCLK 0x2
- MX7D_PAD_SD2_CD_B__GPIO5_IO9 0x59
- >;
- };
+ pinctrl_ecspi3: ecspi3grp {
+ fsl,pins = <
+ MX7D_PAD_SAI2_TX_SYNC__ECSPI3_MISO 0x2
+ MX7D_PAD_SAI2_TX_BCLK__ECSPI3_MOSI 0x2
+ MX7D_PAD_SAI2_RX_DATA__ECSPI3_SCLK 0x2
+ MX7D_PAD_SD2_CD_B__GPIO5_IO9 0x59
+ >;
+ };
- pinctrl_enet1: enet1grp {
- fsl,pins = <
- MX7D_PAD_GPIO1_IO10__ENET1_MDIO 0x3
- MX7D_PAD_GPIO1_IO11__ENET1_MDC 0x3
- MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x1
- MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x1
- MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x1
- MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x1
- MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x1
- MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x1
- MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x1
- MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x1
- MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x1
- MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x1
- MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x1
- MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x1
- >;
- };
+ pinctrl_enet1: enet1grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO10__ENET1_MDIO 0x3
+ MX7D_PAD_GPIO1_IO11__ENET1_MDC 0x3
+ MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x1
+ MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x1
+ MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x1
+ MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x1
+ MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x1
+ MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x1
+ MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x1
+ MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x1
+ MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x1
+ MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x1
+ MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x1
+ MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x1
+ >;
+ };
- pinctrl_enet2: enet2grp {
- fsl,pins = <
- MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x1
- MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x1
- MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x1
- MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x1
- MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x1
- MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x1
- MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x1
- MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x1
- MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x1
- MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x1
- MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x1
- MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x1
- >;
- };
+ pinctrl_enet2: enet2grp {
+ fsl,pins = <
+ MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x1
+ MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x1
+ MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x1
+ MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x1
+ MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x1
+ MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x1
+ MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x1
+ MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x1
+ MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x1
+ MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x1
+ MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x1
+ MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x1
+ >;
+ };
- pinctrl_enet2_reg: enet2reggrp {
- fsl,pins = <
- MX7D_PAD_LPSR_GPIO1_IO04__GPIO1_IO4 0x14
- >;
- };
+ pinctrl_enet2_reg: enet2reggrp {
+ fsl,pins = <
+ MX7D_PAD_LPSR_GPIO1_IO04__GPIO1_IO4 0x14
+ >;
+ };
- pinctrl_flexcan2: flexcan2grp {
- fsl,pins = <
- MX7D_PAD_GPIO1_IO14__FLEXCAN2_RX 0x59
- MX7D_PAD_GPIO1_IO15__FLEXCAN2_TX 0x59
- >;
- };
+ pinctrl_flexcan2: flexcan2grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO14__FLEXCAN2_RX 0x59
+ MX7D_PAD_GPIO1_IO15__FLEXCAN2_TX 0x59
+ >;
+ };
- pinctrl_flexcan2_reg: flexcan2reggrp {
- fsl,pins = <
- MX7D_PAD_EPDC_DATA14__GPIO2_IO14 0x59 /* CAN_STBY */
- >;
- };
+ pinctrl_flexcan2_reg: flexcan2reggrp {
+ fsl,pins = <
+ MX7D_PAD_EPDC_DATA14__GPIO2_IO14 0x59 /* CAN_STBY */
+ >;
+ };
- pinctrl_gpio_keys: gpio_keysgrp {
- fsl,pins = <
- MX7D_PAD_SD2_RESET_B__GPIO5_IO11 0x59
- MX7D_PAD_SD2_WP__GPIO5_IO10 0x59
- >;
- };
+ pinctrl_gpio_keys: gpio-keysgrp {
+ fsl,pins = <
+ MX7D_PAD_SD2_RESET_B__GPIO5_IO11 0x59
+ MX7D_PAD_SD2_WP__GPIO5_IO10 0x59
+ >;
+ };
- pinctrl_hog: hoggrp {
- fsl,pins = <
- MX7D_PAD_ECSPI2_SS0__GPIO4_IO23 0x34 /* bt reg on */
- MX7D_PAD_EPDC_BDR0__GPIO2_IO28 0x59 /* headphone detect */
- >;
- };
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX7D_PAD_ECSPI2_SS0__GPIO4_IO23 0x34 /* bt reg on */
+ MX7D_PAD_EPDC_BDR0__GPIO2_IO28 0x59 /* headphone detect */
+ >;
+ };
- pinctrl_i2c1: i2c1grp {
- fsl,pins = <
- MX7D_PAD_I2C1_SDA__I2C1_SDA 0x4000007f
- MX7D_PAD_I2C1_SCL__I2C1_SCL 0x4000007f
- >;
- };
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX7D_PAD_I2C1_SDA__I2C1_SDA 0x4000007f
+ MX7D_PAD_I2C1_SCL__I2C1_SCL 0x4000007f
+ >;
+ };
- pinctrl_i2c2: i2c2grp {
- fsl,pins = <
- MX7D_PAD_I2C2_SDA__I2C2_SDA 0x4000007f
- MX7D_PAD_I2C2_SCL__I2C2_SCL 0x4000007f
- >;
- };
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX7D_PAD_I2C2_SDA__I2C2_SDA 0x4000007f
+ MX7D_PAD_I2C2_SCL__I2C2_SCL 0x4000007f
+ >;
+ };
- pinctrl_i2c3: i2c3grp {
- fsl,pins = <
- MX7D_PAD_I2C3_SDA__I2C3_SDA 0x4000007f
- MX7D_PAD_I2C3_SCL__I2C3_SCL 0x4000007f
- >;
- };
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX7D_PAD_I2C3_SDA__I2C3_SDA 0x4000007f
+ MX7D_PAD_I2C3_SCL__I2C3_SCL 0x4000007f
+ >;
+ };
- pinctrl_i2c4: i2c4grp {
- fsl,pins = <
- MX7D_PAD_SAI1_RX_BCLK__I2C4_SDA 0x4000007f
- MX7D_PAD_SAI1_RX_SYNC__I2C4_SCL 0x4000007f
- >;
- };
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX7D_PAD_SAI1_RX_BCLK__I2C4_SDA 0x4000007f
+ MX7D_PAD_SAI1_RX_SYNC__I2C4_SCL 0x4000007f
+ >;
+ };
- pinctrl_lcdif: lcdifgrp {
- fsl,pins = <
- MX7D_PAD_LCD_DATA00__LCD_DATA0 0x79
- MX7D_PAD_LCD_DATA01__LCD_DATA1 0x79
- MX7D_PAD_LCD_DATA02__LCD_DATA2 0x79
- MX7D_PAD_LCD_DATA03__LCD_DATA3 0x79
- MX7D_PAD_LCD_DATA04__LCD_DATA4 0x79
- MX7D_PAD_LCD_DATA05__LCD_DATA5 0x79
- MX7D_PAD_LCD_DATA06__LCD_DATA6 0x79
- MX7D_PAD_LCD_DATA07__LCD_DATA7 0x79
- MX7D_PAD_LCD_DATA08__LCD_DATA8 0x79
- MX7D_PAD_LCD_DATA09__LCD_DATA9 0x79
- MX7D_PAD_LCD_DATA10__LCD_DATA10 0x79
- MX7D_PAD_LCD_DATA11__LCD_DATA11 0x79
- MX7D_PAD_LCD_DATA12__LCD_DATA12 0x79
- MX7D_PAD_LCD_DATA13__LCD_DATA13 0x79
- MX7D_PAD_LCD_DATA14__LCD_DATA14 0x79
- MX7D_PAD_LCD_DATA15__LCD_DATA15 0x79
- MX7D_PAD_LCD_DATA16__LCD_DATA16 0x79
- MX7D_PAD_LCD_DATA17__LCD_DATA17 0x79
- MX7D_PAD_LCD_DATA18__LCD_DATA18 0x79
- MX7D_PAD_LCD_DATA19__LCD_DATA19 0x79
- MX7D_PAD_LCD_DATA20__LCD_DATA20 0x79
- MX7D_PAD_LCD_DATA21__LCD_DATA21 0x79
- MX7D_PAD_LCD_DATA22__LCD_DATA22 0x79
- MX7D_PAD_LCD_DATA23__LCD_DATA23 0x79
- MX7D_PAD_LCD_CLK__LCD_CLK 0x79
- MX7D_PAD_LCD_ENABLE__LCD_ENABLE 0x79
- MX7D_PAD_LCD_VSYNC__LCD_VSYNC 0x79
- MX7D_PAD_LCD_HSYNC__LCD_HSYNC 0x79
- MX7D_PAD_LCD_RESET__LCD_RESET 0x79
- >;
- };
+ pinctrl_lcdif: lcdifgrp {
+ fsl,pins = <
+ MX7D_PAD_LCD_DATA00__LCD_DATA0 0x79
+ MX7D_PAD_LCD_DATA01__LCD_DATA1 0x79
+ MX7D_PAD_LCD_DATA02__LCD_DATA2 0x79
+ MX7D_PAD_LCD_DATA03__LCD_DATA3 0x79
+ MX7D_PAD_LCD_DATA04__LCD_DATA4 0x79
+ MX7D_PAD_LCD_DATA05__LCD_DATA5 0x79
+ MX7D_PAD_LCD_DATA06__LCD_DATA6 0x79
+ MX7D_PAD_LCD_DATA07__LCD_DATA7 0x79
+ MX7D_PAD_LCD_DATA08__LCD_DATA8 0x79
+ MX7D_PAD_LCD_DATA09__LCD_DATA9 0x79
+ MX7D_PAD_LCD_DATA10__LCD_DATA10 0x79
+ MX7D_PAD_LCD_DATA11__LCD_DATA11 0x79
+ MX7D_PAD_LCD_DATA12__LCD_DATA12 0x79
+ MX7D_PAD_LCD_DATA13__LCD_DATA13 0x79
+ MX7D_PAD_LCD_DATA14__LCD_DATA14 0x79
+ MX7D_PAD_LCD_DATA15__LCD_DATA15 0x79
+ MX7D_PAD_LCD_DATA16__LCD_DATA16 0x79
+ MX7D_PAD_LCD_DATA17__LCD_DATA17 0x79
+ MX7D_PAD_LCD_DATA18__LCD_DATA18 0x79
+ MX7D_PAD_LCD_DATA19__LCD_DATA19 0x79
+ MX7D_PAD_LCD_DATA20__LCD_DATA20 0x79
+ MX7D_PAD_LCD_DATA21__LCD_DATA21 0x79
+ MX7D_PAD_LCD_DATA22__LCD_DATA22 0x79
+ MX7D_PAD_LCD_DATA23__LCD_DATA23 0x79
+ MX7D_PAD_LCD_CLK__LCD_CLK 0x79
+ MX7D_PAD_LCD_ENABLE__LCD_ENABLE 0x79
+ MX7D_PAD_LCD_VSYNC__LCD_VSYNC 0x79
+ MX7D_PAD_LCD_HSYNC__LCD_HSYNC 0x79
+ MX7D_PAD_LCD_RESET__LCD_RESET 0x79
+ >;
+ };
- pinctrl_sai1: sai1grp {
- fsl,pins = <
- MX7D_PAD_SAI1_MCLK__SAI1_MCLK 0x1f
- MX7D_PAD_ENET1_RX_CLK__SAI1_TX_BCLK 0x1f
- MX7D_PAD_ENET1_CRS__SAI1_TX_SYNC 0x1f
- MX7D_PAD_ENET1_COL__SAI1_TX_DATA0 0x30
- MX7D_PAD_ENET1_TX_CLK__SAI1_RX_DATA0 0x1f
- >;
- };
+ pinctrl_sai1: sai1grp {
+ fsl,pins = <
+ MX7D_PAD_SAI1_MCLK__SAI1_MCLK 0x1f
+ MX7D_PAD_ENET1_RX_CLK__SAI1_TX_BCLK 0x1f
+ MX7D_PAD_ENET1_CRS__SAI1_TX_SYNC 0x1f
+ MX7D_PAD_ENET1_COL__SAI1_TX_DATA0 0x30
+ MX7D_PAD_ENET1_TX_CLK__SAI1_RX_DATA0 0x1f
+ >;
+ };
- pinctrl_sai2: sai2grp {
- fsl,pins = <
- MX7D_PAD_SAI2_TX_BCLK__SAI2_TX_BCLK 0x1f
- MX7D_PAD_SAI2_TX_SYNC__SAI2_TX_SYNC 0x1f
- MX7D_PAD_SAI2_TX_DATA__SAI2_TX_DATA0 0x30
- MX7D_PAD_SAI2_RX_DATA__SAI2_RX_DATA0 0x1f
- >;
- };
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ MX7D_PAD_SAI2_TX_BCLK__SAI2_TX_BCLK 0x1f
+ MX7D_PAD_SAI2_TX_SYNC__SAI2_TX_SYNC 0x1f
+ MX7D_PAD_SAI2_TX_DATA__SAI2_TX_DATA0 0x30
+ MX7D_PAD_SAI2_RX_DATA__SAI2_RX_DATA0 0x1f
+ >;
+ };
- pinctrl_sai3: sai3grp {
- fsl,pins = <
- MX7D_PAD_UART3_TX_DATA__SAI3_TX_BCLK 0x1f
- MX7D_PAD_UART3_CTS_B__SAI3_TX_SYNC 0x1f
- MX7D_PAD_UART3_RTS_B__SAI3_TX_DATA0 0x30
- >;
- };
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <
+ MX7D_PAD_UART3_TX_DATA__SAI3_TX_BCLK 0x1f
+ MX7D_PAD_UART3_CTS_B__SAI3_TX_SYNC 0x1f
+ MX7D_PAD_UART3_RTS_B__SAI3_TX_DATA0 0x30
+ >;
+ };
- pinctrl_spi4: spi4grp {
- fsl,pins = <
- MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59
- MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59
- MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59
- >;
- };
+ pinctrl_spi4: spi4grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59
+ MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59
+ MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59
+ >;
+ };
- pinctrl_tsc2046_pendown: tsc2046_pendown {
- fsl,pins = <
- MX7D_PAD_EPDC_BDR1__GPIO2_IO29 0x59
- >;
- };
+ pinctrl_tsc2046_pendown: tsc2046-pendowngrp {
+ fsl,pins = <
+ MX7D_PAD_EPDC_BDR1__GPIO2_IO29 0x59
+ >;
+ };
- pinctrl_uart1: uart1grp {
- fsl,pins = <
- MX7D_PAD_UART1_TX_DATA__UART1_DCE_TX 0x79
- MX7D_PAD_UART1_RX_DATA__UART1_DCE_RX 0x79
- >;
- };
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX7D_PAD_UART1_TX_DATA__UART1_DCE_TX 0x79
+ MX7D_PAD_UART1_RX_DATA__UART1_DCE_RX 0x79
+ >;
+ };
- pinctrl_uart5: uart5grp {
- fsl,pins = <
- MX7D_PAD_SAI1_TX_BCLK__UART5_DCE_TX 0x79
- MX7D_PAD_SAI1_RX_DATA__UART5_DCE_RX 0x79
- MX7D_PAD_SAI1_TX_SYNC__UART5_DCE_CTS 0x79
- MX7D_PAD_SAI1_TX_DATA__UART5_DCE_RTS 0x79
- >;
- };
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ MX7D_PAD_SAI1_TX_BCLK__UART5_DCE_TX 0x79
+ MX7D_PAD_SAI1_RX_DATA__UART5_DCE_RX 0x79
+ MX7D_PAD_SAI1_TX_SYNC__UART5_DCE_CTS 0x79
+ MX7D_PAD_SAI1_TX_DATA__UART5_DCE_RTS 0x79
+ >;
+ };
- pinctrl_uart6: uart6grp {
- fsl,pins = <
- MX7D_PAD_ECSPI1_MOSI__UART6_DCE_TX 0x79
- MX7D_PAD_ECSPI1_SCLK__UART6_DCE_RX 0x79
- MX7D_PAD_ECSPI1_SS0__UART6_DCE_CTS 0x79
- MX7D_PAD_ECSPI1_MISO__UART6_DCE_RTS 0x79
- >;
- };
+ pinctrl_uart6: uart6grp {
+ fsl,pins = <
+ MX7D_PAD_ECSPI1_MOSI__UART6_DCE_TX 0x79
+ MX7D_PAD_ECSPI1_SCLK__UART6_DCE_RX 0x79
+ MX7D_PAD_ECSPI1_SS0__UART6_DCE_CTS 0x79
+ MX7D_PAD_ECSPI1_MISO__UART6_DCE_RTS 0x79
+ >;
+ };
- pinctrl_usdhc1_gpio: usdhc1_gpiogrp {
- fsl,pins = <
- MX7D_PAD_SD1_CD_B__GPIO5_IO0 0x59 /* CD */
- MX7D_PAD_SD1_WP__GPIO5_IO1 0x59 /* WP */
- MX7D_PAD_SD1_RESET_B__GPIO5_IO2 0x59 /* vmmc */
- MX7D_PAD_GPIO1_IO08__SD1_VSELECT 0x59 /* VSELECT */
- >;
- };
+ pinctrl_usdhc1_gpio: usdhc1-gpiogrp {
+ fsl,pins = <
+ MX7D_PAD_SD1_CD_B__GPIO5_IO0 0x59 /* CD */
+ MX7D_PAD_SD1_WP__GPIO5_IO1 0x59 /* WP */
+ MX7D_PAD_SD1_RESET_B__GPIO5_IO2 0x59 /* vmmc */
+ MX7D_PAD_GPIO1_IO08__SD1_VSELECT 0x59 /* VSELECT */
+ >;
+ };
- pinctrl_usdhc1: usdhc1grp {
- fsl,pins = <
- MX7D_PAD_SD1_CMD__SD1_CMD 0x59
- MX7D_PAD_SD1_CLK__SD1_CLK 0x19
- MX7D_PAD_SD1_DATA0__SD1_DATA0 0x59
- MX7D_PAD_SD1_DATA1__SD1_DATA1 0x59
- MX7D_PAD_SD1_DATA2__SD1_DATA2 0x59
- MX7D_PAD_SD1_DATA3__SD1_DATA3 0x59
- >;
- };
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX7D_PAD_SD1_CMD__SD1_CMD 0x59
+ MX7D_PAD_SD1_CLK__SD1_CLK 0x19
+ MX7D_PAD_SD1_DATA0__SD1_DATA0 0x59
+ MX7D_PAD_SD1_DATA1__SD1_DATA1 0x59
+ MX7D_PAD_SD1_DATA2__SD1_DATA2 0x59
+ MX7D_PAD_SD1_DATA3__SD1_DATA3 0x59
+ >;
+ };
- pinctrl_usdhc1_100mhz: usdhc1grp_100mhz {
- fsl,pins = <
- MX7D_PAD_SD1_CMD__SD1_CMD 0x5a
- MX7D_PAD_SD1_CLK__SD1_CLK 0x1a
- MX7D_PAD_SD1_DATA0__SD1_DATA0 0x5a
- MX7D_PAD_SD1_DATA1__SD1_DATA1 0x5a
- MX7D_PAD_SD1_DATA2__SD1_DATA2 0x5a
- MX7D_PAD_SD1_DATA3__SD1_DATA3 0x5a
- >;
- };
+ pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp {
+ fsl,pins = <
+ MX7D_PAD_SD1_CMD__SD1_CMD 0x5a
+ MX7D_PAD_SD1_CLK__SD1_CLK 0x1a
+ MX7D_PAD_SD1_DATA0__SD1_DATA0 0x5a
+ MX7D_PAD_SD1_DATA1__SD1_DATA1 0x5a
+ MX7D_PAD_SD1_DATA2__SD1_DATA2 0x5a
+ MX7D_PAD_SD1_DATA3__SD1_DATA3 0x5a
+ >;
+ };
- pinctrl_usdhc1_200mhz: usdhc1grp_200mhz {
- fsl,pins = <
- MX7D_PAD_SD1_CMD__SD1_CMD 0x5b
- MX7D_PAD_SD1_CLK__SD1_CLK 0x1b
- MX7D_PAD_SD1_DATA0__SD1_DATA0 0x5b
- MX7D_PAD_SD1_DATA1__SD1_DATA1 0x5b
- MX7D_PAD_SD1_DATA2__SD1_DATA2 0x5b
- MX7D_PAD_SD1_DATA3__SD1_DATA3 0x5b
- >;
- };
+ pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp {
+ fsl,pins = <
+ MX7D_PAD_SD1_CMD__SD1_CMD 0x5b
+ MX7D_PAD_SD1_CLK__SD1_CLK 0x1b
+ MX7D_PAD_SD1_DATA0__SD1_DATA0 0x5b
+ MX7D_PAD_SD1_DATA1__SD1_DATA1 0x5b
+ MX7D_PAD_SD1_DATA2__SD1_DATA2 0x5b
+ MX7D_PAD_SD1_DATA3__SD1_DATA3 0x5b
+ >;
+ };
- pinctrl_usdhc2: usdhc2grp {
- fsl,pins = <
- MX7D_PAD_SD2_CMD__SD2_CMD 0x59
- MX7D_PAD_SD2_CLK__SD2_CLK 0x19
- MX7D_PAD_SD2_DATA0__SD2_DATA0 0x59
- MX7D_PAD_SD2_DATA1__SD2_DATA1 0x59
- MX7D_PAD_SD2_DATA2__SD2_DATA2 0x59
- MX7D_PAD_SD2_DATA3__SD2_DATA3 0x59
- >;
- };
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX7D_PAD_SD2_CMD__SD2_CMD 0x59
+ MX7D_PAD_SD2_CLK__SD2_CLK 0x19
+ MX7D_PAD_SD2_DATA0__SD2_DATA0 0x59
+ MX7D_PAD_SD2_DATA1__SD2_DATA1 0x59
+ MX7D_PAD_SD2_DATA2__SD2_DATA2 0x59
+ MX7D_PAD_SD2_DATA3__SD2_DATA3 0x59
+ >;
+ };
- pinctrl_usdhc2_100mhz: usdhc2grp_100mhz {
- fsl,pins = <
- MX7D_PAD_SD2_CMD__SD2_CMD 0x5a
- MX7D_PAD_SD2_CLK__SD2_CLK 0x1a
- MX7D_PAD_SD2_DATA0__SD2_DATA0 0x5a
- MX7D_PAD_SD2_DATA1__SD2_DATA1 0x5a
- MX7D_PAD_SD2_DATA2__SD2_DATA2 0x5a
- MX7D_PAD_SD2_DATA3__SD2_DATA3 0x5a
- >;
- };
+ pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp {
+ fsl,pins = <
+ MX7D_PAD_SD2_CMD__SD2_CMD 0x5a
+ MX7D_PAD_SD2_CLK__SD2_CLK 0x1a
+ MX7D_PAD_SD2_DATA0__SD2_DATA0 0x5a
+ MX7D_PAD_SD2_DATA1__SD2_DATA1 0x5a
+ MX7D_PAD_SD2_DATA2__SD2_DATA2 0x5a
+ MX7D_PAD_SD2_DATA3__SD2_DATA3 0x5a
+ >;
+ };
- pinctrl_usdhc2_200mhz: usdhc2grp_200mhz {
- fsl,pins = <
- MX7D_PAD_SD2_CMD__SD2_CMD 0x5b
- MX7D_PAD_SD2_CLK__SD2_CLK 0x1b
- MX7D_PAD_SD2_DATA0__SD2_DATA0 0x5b
- MX7D_PAD_SD2_DATA1__SD2_DATA1 0x5b
- MX7D_PAD_SD2_DATA2__SD2_DATA2 0x5b
- MX7D_PAD_SD2_DATA3__SD2_DATA3 0x5b
- >;
- };
+ pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp {
+ fsl,pins = <
+ MX7D_PAD_SD2_CMD__SD2_CMD 0x5b
+ MX7D_PAD_SD2_CLK__SD2_CLK 0x1b
+ MX7D_PAD_SD2_DATA0__SD2_DATA0 0x5b
+ MX7D_PAD_SD2_DATA1__SD2_DATA1 0x5b
+ MX7D_PAD_SD2_DATA2__SD2_DATA2 0x5b
+ MX7D_PAD_SD2_DATA3__SD2_DATA3 0x5b
+ >;
+ };
- pinctrl_usdhc3: usdhc3grp {
- fsl,pins = <
- MX7D_PAD_SD3_CMD__SD3_CMD 0x59
- MX7D_PAD_SD3_CLK__SD3_CLK 0x19
- MX7D_PAD_SD3_DATA0__SD3_DATA0 0x59
- MX7D_PAD_SD3_DATA1__SD3_DATA1 0x59
- MX7D_PAD_SD3_DATA2__SD3_DATA2 0x59
- MX7D_PAD_SD3_DATA3__SD3_DATA3 0x59
- MX7D_PAD_SD3_DATA4__SD3_DATA4 0x59
- MX7D_PAD_SD3_DATA5__SD3_DATA5 0x59
- MX7D_PAD_SD3_DATA6__SD3_DATA6 0x59
- MX7D_PAD_SD3_DATA7__SD3_DATA7 0x59
- MX7D_PAD_SD3_STROBE__SD3_STROBE 0x19
- >;
- };
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX7D_PAD_SD3_CMD__SD3_CMD 0x59
+ MX7D_PAD_SD3_CLK__SD3_CLK 0x19
+ MX7D_PAD_SD3_DATA0__SD3_DATA0 0x59
+ MX7D_PAD_SD3_DATA1__SD3_DATA1 0x59
+ MX7D_PAD_SD3_DATA2__SD3_DATA2 0x59
+ MX7D_PAD_SD3_DATA3__SD3_DATA3 0x59
+ MX7D_PAD_SD3_DATA4__SD3_DATA4 0x59
+ MX7D_PAD_SD3_DATA5__SD3_DATA5 0x59
+ MX7D_PAD_SD3_DATA6__SD3_DATA6 0x59
+ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x59
+ MX7D_PAD_SD3_STROBE__SD3_STROBE 0x19
+ >;
+ };
- pinctrl_usdhc3_100mhz: usdhc3grp_100mhz {
- fsl,pins = <
- MX7D_PAD_SD3_CMD__SD3_CMD 0x5a
- MX7D_PAD_SD3_CLK__SD3_CLK 0x1a
- MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5a
- MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5a
- MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5a
- MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5a
- MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5a
- MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5a
- MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5a
- MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5a
- MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1a
- >;
- };
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
+ fsl,pins = <
+ MX7D_PAD_SD3_CMD__SD3_CMD 0x5a
+ MX7D_PAD_SD3_CLK__SD3_CLK 0x1a
+ MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5a
+ MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5a
+ MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5a
+ MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5a
+ MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5a
+ MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5a
+ MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5a
+ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5a
+ MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1a
+ >;
+ };
- pinctrl_usdhc3_200mhz: usdhc3grp_200mhz {
- fsl,pins = <
- MX7D_PAD_SD3_CMD__SD3_CMD 0x5b
- MX7D_PAD_SD3_CLK__SD3_CLK 0x1b
- MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5b
- MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5b
- MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5b
- MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5b
- MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5b
- MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5b
- MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5b
- MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5b
- MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1b
- >;
- };
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
+ fsl,pins = <
+ MX7D_PAD_SD3_CMD__SD3_CMD 0x5b
+ MX7D_PAD_SD3_CLK__SD3_CLK 0x1b
+ MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5b
+ MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5b
+ MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5b
+ MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5b
+ MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5b
+ MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5b
+ MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5b
+ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5b
+ MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1b
+ >;
};
};
@@ -901,7 +899,7 @@
>;
};
- pinctrl_sai3_mclk: sai3grp_mclk {
+ pinctrl_sai3_mclk: sai3-mclk-grp {
fsl,pins = <
MX7D_PAD_LPSR_GPIO1_IO03__SAI3_MCLK 0x1f
>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts b/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts
index 521493342fe9..8f5566027c25 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts
@@ -350,7 +350,7 @@
&iomuxc_lpsr {
pinctrl_enet1_phy_interrupt: enet1phyinterruptgrp {
- fsl,phy = <
+ fsl,pins = <
MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x08
>;
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts b/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
index 7bab113ca6da..af4acc311572 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
@@ -459,7 +459,7 @@
>;
};
- pinctrl_usdhc3_100mhz: usdhc3grp_100mhz {
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
fsl,pins = <
MX7D_PAD_SD3_CMD__SD3_CMD 0x5a
MX7D_PAD_SD3_CLK__SD3_CLK 0x1a
@@ -475,7 +475,7 @@
>;
};
- pinctrl_usdhc3_200mhz: usdhc3grp_200mhz {
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
fsl,pins = <
MX7D_PAD_SD3_CMD__SD3_CMD 0x5b
MX7D_PAD_SD3_CLK__SD3_CLK 0x1b
diff --git a/arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi b/arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi
index e78d0a7d8cd2..941d9860218e 100644
--- a/arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi
@@ -505,7 +505,7 @@
>;
};
- pinctrl_uart6dte: uart6dte {
+ pinctrl_uart6dte: uart6dtegrp {
fsl,pins = <
MX6UL_PAD_CSI_PIXCLK__UART6_DTE_TX 0x1b0b1
MX6UL_PAD_CSI_MCLK__UART6_DTE_RX 0x1b0b1
@@ -537,7 +537,7 @@
>;
};
- pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x00017069
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x000170b9
@@ -552,7 +552,7 @@
>;
};
- pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp {
fsl,pins = <
MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x00017069
MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x000170f9
diff --git a/arch/arm/boot/dts/nxp/lpc/lpc4357-ea4357-devkit.dts b/arch/arm/boot/dts/nxp/lpc/lpc4357-ea4357-devkit.dts
index 224f80a4a31d..4aefbc01dfc0 100644
--- a/arch/arm/boot/dts/nxp/lpc/lpc4357-ea4357-devkit.dts
+++ b/arch/arm/boot/dts/nxp/lpc/lpc4357-ea4357-devkit.dts
@@ -482,8 +482,8 @@
reg = <0x1d>;
};
- lm75@48 {
- compatible = "nxp,lm75";
+ temperature-sensor@48 {
+ compatible = "national,lm75b";
reg = <0x48>;
};
diff --git a/arch/arm/boot/dts/nxp/lpc/lpc4357-myd-lpc4357.dts b/arch/arm/boot/dts/nxp/lpc/lpc4357-myd-lpc4357.dts
index 1f84654df50c..846afb8ccbf1 100644
--- a/arch/arm/boot/dts/nxp/lpc/lpc4357-myd-lpc4357.dts
+++ b/arch/arm/boot/dts/nxp/lpc/lpc4357-myd-lpc4357.dts
@@ -511,7 +511,7 @@
clock-frequency = <400000>;
sensor@49 {
- compatible = "lm75";
+ compatible = "national,lm75";
reg = <0x49>;
};
diff --git a/arch/arm/boot/dts/nxp/mxs/imx23-evk.dts b/arch/arm/boot/dts/nxp/mxs/imx23-evk.dts
index 7365fe4581a3..33b36af1656f 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx23-evk.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx23-evk.dts
@@ -52,7 +52,7 @@
};
apb@80000000 {
- apbh@80000000 {
+ apbh-bus@80000000 {
nand-controller@8000c000 {
pinctrl-names = "default";
pinctrl-0 = <&gpmi_pins_a &gpmi_pins_fixup>;
@@ -99,7 +99,7 @@
};
};
- apbx@80040000 {
+ apbx-bus@80040000 {
lradc@80050000 {
status = "okay";
fsl,lradc-touchscreen-wires = <4>;
diff --git a/arch/arm/boot/dts/nxp/mxs/imx23-olinuxino.dts b/arch/arm/boot/dts/nxp/mxs/imx23-olinuxino.dts
index 229e727b222e..e372e9327a47 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx23-olinuxino.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx23-olinuxino.dts
@@ -19,7 +19,7 @@
};
apb@80000000 {
- apbh@80000000 {
+ apbh-bus@80000000 {
ssp0: spi@80010000 {
compatible = "fsl,imx23-mmc";
pinctrl-names = "default";
@@ -64,7 +64,7 @@
};
};
- apbx@80040000 {
+ apbx-bus@80040000 {
lradc@80050000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts b/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts
index b23e7ada9c80..cb661bf2d157 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts
@@ -55,7 +55,7 @@
};
apb@80000000 {
- apbh@80000000 {
+ apbh-bus@80000000 {
ssp0: spi@80010000 {
compatible = "fsl,imx23-mmc";
pinctrl-names = "default";
@@ -100,7 +100,7 @@
};
};
- apbx@80040000 {
+ apbx-bus@80040000 {
pwm: pwm@80064000 {
pinctrl-names = "default";
pinctrl-0 = <&pwm2_pins_a>;
diff --git a/arch/arm/boot/dts/nxp/mxs/imx23-stmp378x_devb.dts b/arch/arm/boot/dts/nxp/mxs/imx23-stmp378x_devb.dts
index 69124ba6a666..b2b6f8514999 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx23-stmp378x_devb.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx23-stmp378x_devb.dts
@@ -16,7 +16,7 @@
};
apb@80000000 {
- apbh@80000000 {
+ apbh-bus@80000000 {
ssp0: spi@80010000 {
compatible = "fsl,imx23-mmc";
pinctrl-names = "default";
@@ -44,7 +44,7 @@
};
};
- apbx@80040000 {
+ apbx-bus@80040000 {
auart0: serial@8006c000 {
pinctrl-names = "default";
pinctrl-0 = <&auart0_pins_a>;
diff --git a/arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts b/arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts
index 28341d8315c2..0b088c8ab6b6 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts
@@ -54,7 +54,7 @@
};
apb@80000000 {
- apbh@80000000 {
+ apbh-bus@80000000 {
ssp0: spi@80010000 {
compatible = "fsl,imx23-mmc";
pinctrl-names = "default";
@@ -101,7 +101,7 @@
};
};
- apbx@80040000 {
+ apbx-bus@80040000 {
i2c: i2c@80058000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c_pins_a>;
diff --git a/arch/arm/boot/dts/nxp/mxs/imx23.dtsi b/arch/arm/boot/dts/nxp/mxs/imx23.dtsi
index 0309592af1e1..5e21252fb7c9 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx23.dtsi
+++ b/arch/arm/boot/dts/nxp/mxs/imx23.dtsi
@@ -45,7 +45,7 @@
reg = <0x80000000 0x80000>;
ranges;
- apbh@80000000 {
+ apbh-bus@80000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -476,7 +476,7 @@
};
};
- apbx@80040000 {
+ apbx-bus@80040000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-apx4devkit.dts b/arch/arm/boot/dts/nxp/mxs/imx28-apx4devkit.dts
index f9bf40d96568..4c4ea91c286f 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx28-apx4devkit.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx28-apx4devkit.dts
@@ -11,19 +11,13 @@
reg = <0x40000000 0x04000000>;
};
- regulators {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- reg_3p3v: regulator@0 {
- compatible = "regulator-fixed";
- reg = <0>;
- regulator-name = "3P3V";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
};
sound {
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-cfa10037.dts b/arch/arm/boot/dts/nxp/mxs/imx28-cfa10037.dts
index c72fe2d392f1..fd177daa6385 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx28-cfa10037.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx28-cfa10037.dts
@@ -14,7 +14,7 @@
compatible = "crystalfontz,cfa10037", "crystalfontz,cfa10036", "fsl,imx28";
apb@80000000 {
- apbh@80000000 {
+ apbh-bus@80000000 {
pinctrl@80018000 {
usb_pins_cfa10037: usb-10037@0 {
reg = <0>;
@@ -38,7 +38,7 @@
};
};
- apbx@80040000 {
+ apbx-bus@80040000 {
usbphy1: usbphy@8007e000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-lwe.dtsi b/arch/arm/boot/dts/nxp/mxs/imx28-lwe.dtsi
index 69fcb0dde940..410dfe17f8ca 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx28-lwe.dtsi
+++ b/arch/arm/boot/dts/nxp/mxs/imx28-lwe.dtsi
@@ -55,23 +55,6 @@
status = "okay";
};
-&saif0 {
- pinctrl-names = "default";
- pinctrl-0 = <&saif0_pins_a>;
- #sound-dai-cells = <0>;
- assigned-clocks = <&clks 53>;
- assigned-clock-rates = <12000000>;
- status = "okay";
-};
-
-&saif1 {
- pinctrl-names = "default";
- pinctrl-0 = <&saif1_pins_a>;
- fsl,saif-master = <&saif0>;
- #sound-dai-cells = <0>;
- status = "okay";
-};
-
&spi3_pins_a {
fsl,pinmux-ids = <
MX28_PAD_AUART2_RX__SSP3_D4
@@ -109,7 +92,7 @@
flash@0 {
compatible = "jedec,spi-nor";
- spi-max-frequency = <40000000>;
+ spi-max-frequency = <20000000>;
reg = <0>;
partitions {
@@ -133,14 +116,21 @@
reg = <0x90000 0x10000>;
};
- partition@100000 {
- label = "kernel";
- reg = <0x100000 0x400000>;
+ partition@a0000 {
+ label = "rescue";
+ reg = <0xa0000 0xf40000>;
+ };
+
+ partition@fe0000 {
+ /* 1st sector for SPL boot img source data */
+ label = "spl-boot-data1";
+ reg = <0xfe0000 0x10000>;
};
- partition@500000 {
- label = "swupdate";
- reg = <0x500000 0x800000>;
+ partition@ff0000 {
+ /* 2nd sector for SPL boot img source data */
+ label = "spl-boot-data2";
+ reg = <0xff0000 0x10000>;
};
};
};
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts b/arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts
index d38183edf0fd..9290635352f1 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts
@@ -615,13 +615,13 @@
&saif0 {
pinctrl-names = "default";
pinctrl-0 = <&saif0_pins_b>;
- fsl,saif-master;
status = "okay";
};
&saif1 {
pinctrl-names = "default";
pinctrl-0 = <&saif1_pins_a>;
+ fsl,saif-master = <&saif0>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28.dtsi b/arch/arm/boot/dts/nxp/mxs/imx28.dtsi
index 4817fba2d938..bbea8b77386f 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx28.dtsi
+++ b/arch/arm/boot/dts/nxp/mxs/imx28.dtsi
@@ -56,7 +56,7 @@
reg = <0x80000000 0x80000>;
ranges;
- apbh@80000000 {
+ apbh-bus@80000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -1092,7 +1092,7 @@
};
};
- apbx@80040000 {
+ apbx-bus@80040000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/qcom/pma8084.dtsi b/arch/arm/boot/dts/qcom/pma8084.dtsi
index 2985f4805b93..309f5256754b 100644
--- a/arch/arm/boot/dts/qcom/pma8084.dtsi
+++ b/arch/arm/boot/dts/qcom/pma8084.dtsi
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/iio/qcom,spmi-vadc.h>
+#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/spmi/spmi.h>
@@ -19,12 +20,17 @@
interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
};
- pwrkey@800 {
- compatible = "qcom,pm8941-pwrkey";
+ pon@800 {
+ compatible = "qcom,pm8941-pon";
reg = <0x800>;
- interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
- debounce = <15625>;
- bias-pull-up;
+
+ pwrkey {
+ compatible = "qcom,pm8941-pwrkey";
+ interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+ linux,code = <KEY_POWER>;
+ };
};
pma8084_gpios: gpio@c000 {
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064-asus-nexus7-flo.dts b/arch/arm/boot/dts/qcom/qcom-apq8064-asus-nexus7-flo.dts
index d460743fbb94..947183992850 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8064-asus-nexus7-flo.dts
+++ b/arch/arm/boot/dts/qcom/qcom-apq8064-asus-nexus7-flo.dts
@@ -125,8 +125,6 @@
&gsbi1_i2c {
status = "okay";
clock-frequency = <200000>;
- pinctrl-0 = <&i2c1_pins>;
- pinctrl-names = "default";
eeprom@52 {
compatible = "atmel,24c128";
@@ -148,8 +146,6 @@
&gsbi3_i2c {
clock-frequency = <200000>;
- pinctrl-0 = <&i2c3_pins>;
- pinctrl-names = "default";
status = "okay";
trackpad@10 {
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064-cm-qs600.dts b/arch/arm/boot/dts/qcom/qcom-apq8064-cm-qs600.dts
index 671d58cc2741..178c55c1efeb 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8064-cm-qs600.dts
+++ b/arch/arm/boot/dts/qcom/qcom-apq8064-cm-qs600.dts
@@ -188,24 +188,17 @@
};
&tlmm_pinmux {
- card_detect: card_detect {
- mux {
- pins = "gpio26";
- function = "gpio";
- bias-disable;
- };
+ card_detect: card-detect-state {
+ pins = "gpio26";
+ function = "gpio";
+ bias-disable;
};
- pcie_pins: pcie_pinmux {
- mux {
- pins = "gpio27";
- function = "gpio";
- };
- conf {
- pins = "gpio27";
- drive-strength = <12>;
- bias-disable;
- };
+ pcie_pins: pcie-state {
+ pins = "gpio27";
+ function = "gpio";
+ drive-strength = <12>;
+ bias-disable;
};
};
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts b/arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts
index ed86b24119c9..b3ff8010b149 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts
+++ b/arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts
@@ -321,24 +321,17 @@
};
&tlmm_pinmux {
- card_detect: card_detect {
- mux {
- pins = "gpio26";
- function = "gpio";
- bias-disable;
- };
+ card_detect: card-detect-state {
+ pins = "gpio26";
+ function = "gpio";
+ bias-disable;
};
- pcie_pins: pcie_pinmux {
- mux {
- pins = "gpio27";
- function = "gpio";
- };
- conf {
- pins = "gpio27";
- drive-strength = <12>;
- bias-disable;
- };
+ pcie_pins: pcie-state {
+ pins = "gpio27";
+ function = "gpio";
+ drive-strength = <12>;
+ bias-disable;
};
};
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064-pins.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8064-pins.dtsi
index 7c545c50847b..e53de709e9d1 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8064-pins.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-apq8064-pins.dtsi
@@ -1,318 +1,218 @@
// SPDX-License-Identifier: GPL-2.0
&tlmm_pinmux {
- sdc4_gpios: sdc4-gpios {
- pios {
- pins = "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68";
- function = "sdc4";
- };
- };
-
- sdcc1_pins: sdcc1-pin-active {
- clk {
+ sdcc1_default_state: sdcc1-default-state {
+ clk-pins {
pins = "sdc1_clk";
- drive-strengh = <16>;
+ drive-strength = <16>;
bias-disable;
};
- cmd {
+ cmd-pins {
pins = "sdc1_cmd";
- drive-strengh = <10>;
+ drive-strength = <10>;
bias-pull-up;
};
- data {
+ data-pins {
pins = "sdc1_data";
- drive-strengh = <10>;
+ drive-strength = <10>;
bias-pull-up;
};
};
- sdcc3_pins: sdcc3-pin-active {
- clk {
+ sdcc3_default_state: sdcc3-default-state {
+ clk-pins {
pins = "sdc3_clk";
- drive-strengh = <8>;
+ drive-strength = <8>;
bias-disable;
};
- cmd {
+ cmd-pins {
pins = "sdc3_cmd";
- drive-strengh = <8>;
+ drive-strength = <8>;
bias-pull-up;
};
- data {
+ data-pins {
pins = "sdc3_data";
- drive-strengh = <8>;
+ drive-strength = <8>;
bias-pull-up;
};
};
- ps_hold: ps_hold {
- mux {
- pins = "gpio78";
- function = "ps_hold";
- };
+ sdc4_default_state: sdc4-default-state {
+ pins = "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68";
+ function = "sdc4";
};
- i2c1_pins: i2c1 {
- mux {
- pins = "gpio20", "gpio21";
- function = "gsbi1";
- };
+ gsbi1_uart_2pins: gsbi1-uart-2pins-state {
+ pins = "gpio18", "gpio19";
+ function = "gsbi1";
+ };
- pinconf {
- pins = "gpio20", "gpio21";
- drive-strength = <16>;
- bias-disable;
- };
+ gsbi1_uart_4pins: gsbi1-uart-4pins-state {
+ pins = "gpio18", "gpio19", "gpio20", "gpio21";
+ function = "gsbi1";
};
- i2c1_pins_sleep: i2c1_pins_sleep {
- mux {
- pins = "gpio20", "gpio21";
- function = "gpio";
- };
- pinconf {
- pins = "gpio20", "gpio21";
+ gsbi4_uart_pin_a: gsbi4-uart-pin-active-state {
+ rx-pins {
+ pins = "gpio11";
+ function = "gsbi4";
drive-strength = <2>;
bias-disable;
};
- };
- gsbi1_uart_2pins: gsbi1_uart_2pins {
- mux {
- pins = "gpio18", "gpio19";
- function = "gsbi1";
+ tx-pins {
+ pins = "gpio10";
+ function = "gsbi4";
+ drive-strength = <4>;
+ bias-disable;
};
};
- gsbi1_uart_4pins: gsbi1_uart_4pins {
- mux {
- pins = "gpio18", "gpio19", "gpio20", "gpio21";
- function = "gsbi1";
- };
+ gsbi6_uart_2pins: gsbi6-uart-2pins-state {
+ pins = "gpio14", "gpio15";
+ function = "gsbi6";
};
- i2c2_pins: i2c2 {
- mux {
- pins = "gpio24", "gpio25";
- function = "gsbi2";
- };
-
- pinconf {
- pins = "gpio24", "gpio25";
- drive-strength = <16>;
- bias-disable;
- };
+ gsbi6_uart_4pins: gsbi6-uart-4pins-state {
+ pins = "gpio14", "gpio15", "gpio16", "gpio17";
+ function = "gsbi6";
};
- i2c2_pins_sleep: i2c2_pins_sleep {
- mux {
- pins = "gpio24", "gpio25";
- function = "gpio";
- };
-
- pinconf {
- pins = "gpio24", "gpio25";
- drive-strength = <2>;
- bias-disable;
- };
+ gsbi7_uart_2pins: gsbi7-uart-2pins-state {
+ pins = "gpio82", "gpio83";
+ function = "gsbi7";
};
- i2c3_pins: i2c3 {
- mux {
- pins = "gpio8", "gpio9";
- function = "gsbi3";
- };
-
- pinconf {
- pins = "gpio8", "gpio9";
- drive-strength = <16>;
- bias-disable;
- };
+ gsbi7_uart_4pins: gsbi7_uart_4pins-state {
+ pins = "gpio82", "gpio83", "gpio84", "gpio85";
+ function = "gsbi7";
};
- i2c3_pins_sleep: i2c3_pins_sleep {
- mux {
- pins = "gpio8", "gpio9";
- function = "gpio";
- };
- pinconf {
- pins = "gpio8", "gpio9";
- drive-strength = <2>;
- bias-disable;
- };
+ i2c1_default_state: i2c1-default-state {
+ pins = "gpio20", "gpio21";
+ function = "gsbi1";
+ drive-strength = <16>;
+ bias-disable;
};
- i2c4_pins: i2c4 {
- mux {
- pins = "gpio12", "gpio13";
- function = "gsbi4";
- };
-
- pinconf {
- pins = "gpio12", "gpio13";
- drive-strength = <16>;
- bias-disable;
- };
+ i2c1_sleep_state: i2c1-sleep-state {
+ pins = "gpio20", "gpio21";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
};
- i2c4_pins_sleep: i2c4_pins_sleep {
- mux {
- pins = "gpio12", "gpio13";
- function = "gpio";
- };
- pinconf {
- pins = "gpio12", "gpio13";
- drive-strength = <2>;
- bias-disable;
- };
+ i2c2_default_state: i2c2-default-state {
+ pins = "gpio24", "gpio25";
+ function = "gsbi2";
+ drive-strength = <16>;
+ bias-disable;
};
- spi5_default: spi5_default {
- pinmux {
- pins = "gpio51", "gpio52", "gpio54";
- function = "gsbi5";
- };
-
- pinmux_cs {
- function = "gpio";
- pins = "gpio53";
- };
-
- pinconf {
- pins = "gpio51", "gpio52", "gpio54";
- drive-strength = <16>;
- bias-disable;
- };
-
- pinconf_cs {
- pins = "gpio53";
- drive-strength = <16>;
- bias-disable;
- output-high;
- };
+ i2c2_sleep_state: i2c2-sleep-state {
+ pins = "gpio24", "gpio25";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
};
- spi5_sleep: spi5_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio51", "gpio52", "gpio53", "gpio54";
- };
-
- pinconf {
- pins = "gpio51", "gpio52", "gpio53", "gpio54";
- drive-strength = <2>;
- bias-pull-down;
- };
+ i2c3_default_state: i2c3-default-state {
+ pins = "gpio8", "gpio9";
+ function = "gsbi3";
+ drive-strength = <16>;
+ bias-disable;
};
- i2c6_pins: i2c6 {
- mux {
- pins = "gpio16", "gpio17";
- function = "gsbi6";
- };
-
- pinconf {
- pins = "gpio16", "gpio17";
- drive-strength = <16>;
- bias-disable;
- };
+ i2c3_sleep_state: i2c3-sleep-state {
+ pins = "gpio8", "gpio9";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
};
- i2c6_pins_sleep: i2c6_pins_sleep {
- mux {
- pins = "gpio16", "gpio17";
- function = "gpio";
- };
- pinconf {
- pins = "gpio16", "gpio17";
- drive-strength = <2>;
- bias-disable;
- };
+ i2c4_default_state: i2c4-default-state {
+ pins = "gpio12", "gpio13";
+ function = "gsbi4";
+ drive-strength = <16>;
+ bias-disable;
};
- gsbi4_uart_pin_a: gsbi4-uart-pin-active-state {
- rx-pins {
- pins = "gpio11";
- function = "gsbi4";
- drive-strength = <2>;
- bias-disable;
- };
-
- tx-pins {
- pins = "gpio10";
- function = "gsbi4";
- drive-strength = <4>;
- bias-disable;
- };
+ i2c4_sleep_state: i2c4-sleep-state {
+ pins = "gpio12", "gpio13";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
};
- gsbi6_uart_2pins: gsbi6_uart_2pins {
- mux {
- pins = "gpio14", "gpio15";
- function = "gsbi6";
- };
+ i2c6_default_state: i2c6-default-state {
+ pins = "gpio16", "gpio17";
+ function = "gsbi6";
+ drive-strength = <16>;
+ bias-disable;
};
- gsbi6_uart_4pins: gsbi6_uart_4pins {
- mux {
- pins = "gpio14", "gpio15", "gpio16", "gpio17";
- function = "gsbi6";
- };
+ i2c6_sleep_state: i2c6-sleep-state {
+ pins = "gpio16", "gpio17";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
};
- gsbi7_uart_2pins: gsbi7_uart_2pins {
- mux {
- pins = "gpio82", "gpio83";
- function = "gsbi7";
- };
+ i2c7_default_state: i2c7-default-state {
+ pins = "gpio84", "gpio85";
+ function = "gsbi7";
+ drive-strength = <16>;
+ bias-disable;
};
- gsbi7_uart_4pins: gsbi7_uart_4pins {
- mux {
- pins = "gpio82", "gpio83", "gpio84", "gpio85";
- function = "gsbi7";
- };
+ i2c7_sleep_state: i2c7-sleep-state {
+ pins = "gpio84", "gpio85";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
};
- i2c7_pins: i2c7 {
- mux {
- pins = "gpio84", "gpio85";
- function = "gsbi7";
+ spi5_default_state: spi5-default-state {
+ spi5-pins {
+ pins = "gpio51", "gpio52", "gpio54";
+ function = "gsbi5";
+ drive-strength = <16>;
+ bias-disable;
};
- pinconf {
- pins = "gpio84", "gpio85";
+ spi5-cs-pins {
+ pins = "gpio53";
+ function = "gpio";
drive-strength = <16>;
bias-disable;
+ output-high;
};
};
- i2c7_pins_sleep: i2c7_pins_sleep {
- mux {
- pins = "gpio84", "gpio85";
+ spi5_sleep_state: spi5-sleep-state {
+ spi5-pins {
+ pins = "gpio51", "gpio52", "gpio53", "gpio54";
function = "gpio";
- };
- pinconf {
- pins = "gpio84", "gpio85";
drive-strength = <2>;
- bias-disable;
+ bias-pull-down;
};
};
- riva_fm_pin_a: riva-fm-active {
+ riva_fm_pin_a: riva-fm-active-state {
pins = "gpio14", "gpio15";
function = "riva_fm";
};
- riva_bt_pin_a: riva-bt-active {
+ riva_bt_pin_a: riva-bt-active-state {
pins = "gpio16", "gpio17";
function = "riva_bt";
};
- riva_wlan_pin_a: riva-wlan-active {
+ riva_wlan_pin_a: riva-wlan-active-state {
pins = "gpio64", "gpio65", "gpio66", "gpio67", "gpio68";
function = "riva_wlan";
@@ -320,22 +220,24 @@
bias-pull-down;
};
- hdmi_pinctrl: hdmi-pinctrl {
- mux {
- pins = "gpio70", "gpio71", "gpio72";
- function = "hdmi";
- };
-
- pinconf_ddc {
+ hdmi_pinctrl: hdmi-pinctrl-state {
+ ddc-pins {
pins = "gpio70", "gpio71";
+ function = "hdmi";
bias-pull-up;
drive-strength = <2>;
};
- pinconf_hpd {
+ hpd-pins {
pins = "gpio72";
+ function = "hdmi";
bias-pull-down;
drive-strength = <16>;
};
};
+
+ ps_hold_default_state: ps-hold-default-state {
+ pins = "gpio78";
+ function = "ps_hold";
+ };
};
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064-sony-xperia-lagan-yuga.dts b/arch/arm/boot/dts/qcom/qcom-apq8064-sony-xperia-lagan-yuga.dts
index 2412aa3e3e8d..7752f07973f9 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8064-sony-xperia-lagan-yuga.dts
+++ b/arch/arm/boot/dts/qcom/qcom-apq8064-sony-xperia-lagan-yuga.dts
@@ -373,21 +373,21 @@
cd-gpios = <&tlmm_pinmux 26 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
- pinctrl-0 = <&sdcc3_pins>, <&sdcc3_cd_pin_a>;
+ pinctrl-0 = <&sdcc3_default_state>, <&sdcc3_cd_pin_a>;
status = "okay";
};
&tlmm_pinmux {
- gsbi5_uart_pin_a: gsbi5-uart-pin-active {
- rx {
+ gsbi5_uart_pin_a: gsbi5-uart-pin-active-state {
+ rx-pins {
pins = "gpio52";
function = "gsbi5";
drive-strength = <2>;
bias-pull-up;
};
- tx {
+ tx-pins {
pins = "gpio51";
function = "gsbi5";
drive-strength = <4>;
@@ -396,7 +396,7 @@
};
- sdcc3_cd_pin_a: sdcc3-cd-pin-active {
+ sdcc3_cd_pin_a: sdcc3-cd-pin-active-state {
pins = "gpio26";
function = "gpio";
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
index 769e151747c3..ac7494ed633e 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
@@ -302,7 +302,7 @@
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
- pinctrl-0 = <&ps_hold>;
+ pinctrl-0 = <&ps_hold_default_state>;
};
sfpb_wrapper_mutex: syscon@1200000 {
@@ -435,8 +435,8 @@
gsbi1_i2c: i2c@12460000 {
compatible = "qcom,i2c-qup-v1.1.1";
- pinctrl-0 = <&i2c1_pins>;
- pinctrl-1 = <&i2c1_pins_sleep>;
+ pinctrl-0 = <&i2c1_default_state>;
+ pinctrl-1 = <&i2c1_sleep_state>;
pinctrl-names = "default", "sleep";
reg = <0x12460000 0x1000>;
interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
@@ -465,8 +465,8 @@
gsbi2_i2c: i2c@124a0000 {
compatible = "qcom,i2c-qup-v1.1.1";
reg = <0x124a0000 0x1000>;
- pinctrl-0 = <&i2c2_pins>;
- pinctrl-1 = <&i2c2_pins_sleep>;
+ pinctrl-0 = <&i2c2_default_state>;
+ pinctrl-1 = <&i2c2_sleep_state>;
pinctrl-names = "default", "sleep";
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GSBI2_QUP_CLK>, <&gcc GSBI2_H_CLK>;
@@ -489,8 +489,8 @@
ranges;
gsbi3_i2c: i2c@16280000 {
compatible = "qcom,i2c-qup-v1.1.1";
- pinctrl-0 = <&i2c3_pins>;
- pinctrl-1 = <&i2c3_pins_sleep>;
+ pinctrl-0 = <&i2c3_default_state>;
+ pinctrl-1 = <&i2c3_sleep_state>;
pinctrl-names = "default", "sleep";
reg = <0x16280000 0x1000>;
interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
@@ -528,8 +528,8 @@
gsbi4_i2c: i2c@16380000 {
compatible = "qcom,i2c-qup-v1.1.1";
- pinctrl-0 = <&i2c4_pins>;
- pinctrl-1 = <&i2c4_pins_sleep>;
+ pinctrl-0 = <&i2c4_default_state>;
+ pinctrl-1 = <&i2c4_sleep_state>;
pinctrl-names = "default", "sleep";
reg = <0x16380000 0x1000>;
interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
@@ -565,8 +565,8 @@
compatible = "qcom,spi-qup-v1.1.1";
reg = <0x1a280000 0x1000>;
interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
- pinctrl-0 = <&spi5_default>;
- pinctrl-1 = <&spi5_sleep>;
+ pinctrl-0 = <&spi5_default_state>;
+ pinctrl-1 = <&spi5_sleep_state>;
pinctrl-names = "default", "sleep";
clocks = <&gcc GSBI5_QUP_CLK>, <&gcc GSBI5_H_CLK>;
clock-names = "core", "iface";
@@ -599,8 +599,8 @@
gsbi6_i2c: i2c@16580000 {
compatible = "qcom,i2c-qup-v1.1.1";
- pinctrl-0 = <&i2c6_pins>;
- pinctrl-1 = <&i2c6_pins_sleep>;
+ pinctrl-0 = <&i2c6_default_state>;
+ pinctrl-1 = <&i2c6_sleep_state>;
pinctrl-names = "default", "sleep";
reg = <0x16580000 0x1000>;
interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
@@ -635,8 +635,8 @@
gsbi7_i2c: i2c@16680000 {
compatible = "qcom,i2c-qup-v1.1.1";
- pinctrl-0 = <&i2c7_pins>;
- pinctrl-1 = <&i2c7_pins_sleep>;
+ pinctrl-0 = <&i2c7_default_state>;
+ pinctrl-1 = <&i2c7_sleep_state>;
pinctrl-names = "default", "sleep";
reg = <0x16680000 0x1000>;
interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
@@ -871,7 +871,6 @@
compatible = "qcom,apq8064-sata-phy";
status = "disabled";
reg = <0x1b400000 0x200>;
- reg-names = "phy_mem";
clocks = <&gcc SATA_PHY_CFG_CLK>;
clock-names = "cfg";
#phy-cells = <0>;
@@ -890,9 +889,9 @@
<&gcc SATA_PMALIVE_CLK>;
clock-names = "slave_iface",
"iface",
- "bus",
+ "core",
"rxoob",
- "core_pmalive";
+ "pmalive";
assigned-clocks = <&gcc SATA_RXOOB_CLK>,
<&gcc SATA_PMALIVE_CLK>;
@@ -945,7 +944,7 @@
dmas = <&sdcc4bam 2>, <&sdcc4bam 1>;
dma-names = "tx", "rx";
pinctrl-names = "default";
- pinctrl-0 = <&sdc4_gpios>;
+ pinctrl-0 = <&sdc4_default_state>;
};
sdcc4bam: dma-controller@121c2000 {
@@ -962,7 +961,7 @@
status = "disabled";
compatible = "arm,pl18x", "arm,primecell";
pinctrl-names = "default";
- pinctrl-0 = <&sdcc1_pins>;
+ pinctrl-0 = <&sdcc1_default_state>;
arm,primecell-periphid = <0x00051180>;
reg = <0x12400000 0x2000>;
interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi
index 2b52e5d5eb51..014e6c5ee889 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi
@@ -792,7 +792,7 @@
qcom,smd-edge = <15>;
rpm-requests {
- compatible = "qcom,rpm-apq8084";
+ compatible = "qcom,rpm-apq8084", "qcom,smd-rpm";
qcom,smd-channels = "rpm_requests";
regulators-0 {
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi
index da67d55fa557..0d23c03fae33 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi
@@ -28,46 +28,42 @@
};
&tlmm {
- i2c0_pins: i2c0_pinmux {
- mux_i2c {
- function = "blsp_i2c0";
- pins = "gpio58", "gpio59";
- drive-strength = <16>;
- bias-disable;
- };
+ i2c0_pins: i2c0-state {
+ function = "blsp_i2c0";
+ pins = "gpio58", "gpio59";
+ drive-strength = <16>;
+ bias-disable;
};
- mdio_pins: mdio_pinmux {
- mux_mdio {
+ mdio_pins: mdio-state {
+ mdio-pins {
pins = "gpio53";
function = "mdio";
bias-pull-up;
};
- mux_mdc {
+ mdc-pins {
pins = "gpio52";
function = "mdc";
bias-pull-up;
};
};
- serial0_pins: serial0_pinmux {
- mux_uart {
- pins = "gpio60", "gpio61";
- function = "blsp_uart0";
- bias-disable;
- };
+ serial0_pins: serial0-state {
+ pins = "gpio60", "gpio61";
+ function = "blsp_uart0";
+ bias-disable;
};
- spi0_pins: spi0_pinmux {
- mux_spi {
+ spi0_pins: spi0-state {
+ spi0-pins {
function = "blsp_spi0";
pins = "gpio55", "gpio56", "gpio57";
drive-strength = <12>;
bias-disable;
};
- mux_cs {
+ spi0-cs-pins {
function = "gpio";
pins = "gpio54", "gpio4";
drive-strength = <2>;
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts b/arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts
index 365fbac417fd..ac3b30072a22 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts
+++ b/arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts
@@ -11,40 +11,35 @@
};
&tlmm {
- mdio_pins: mdio_pinmux {
- pinmux_1 {
+ mdio_pins: mdio-state {
+ mdio-pins {
pins = "gpio53";
function = "mdio";
+ bias-pull-up;
};
- pinmux_2 {
+ mdc-pins {
pins = "gpio52";
function = "mdc";
- };
-
- pinconf {
- pins = "gpio52", "gpio53";
bias-pull-up;
};
};
- serial_pins: serial_pinmux {
- mux {
- pins = "gpio60", "gpio61";
- function = "blsp_uart0";
- bias-disable;
- };
+ serial_pins: serial-state{
+ pins = "gpio60", "gpio61";
+ function = "blsp_uart0";
+ bias-disable;
};
- spi_0_pins: spi_0_pinmux {
- pin {
+ spi_0_pins: spi-0-state {
+ spi0-pins {
function = "blsp_spi0";
pins = "gpio55", "gpio56", "gpio57";
drive-strength = <2>;
bias-disable;
};
- pin_cs {
+ spi0-cs-pins {
function = "gpio";
pins = "gpio54", "gpio59";
drive-strength = <2>;
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk01.1.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk01.1.dtsi
index f7ac8f9d0b6f..efbe89dd4793 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk01.1.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk01.1.dtsi
@@ -34,30 +34,22 @@
};
&tlmm {
- serial_pins: serial_pinmux {
- mux {
- pins = "gpio60", "gpio61";
- function = "blsp_uart0";
- bias-disable;
- };
+ serial_pins: serial-state {
+ pins = "gpio60", "gpio61";
+ function = "blsp_uart0";
+ bias-disable;
};
- spi_0_pins: spi_0_pinmux {
- pinmux {
- function = "blsp_spi0";
- pins = "gpio55", "gpio56", "gpio57";
- };
- pinmux_cs {
- function = "gpio";
- pins = "gpio54";
- };
- pinconf {
+ spi_0_pins: spi-0-state {
+ spi0-pins {
pins = "gpio55", "gpio56", "gpio57";
+ function = "blsp_spi0";
drive-strength = <12>;
bias-disable;
};
- pinconf_cs {
+ spi0-cs-pins {
pins = "gpio54";
+ function = "gpio";
drive-strength = <2>;
bias-disable;
output-high;
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk04.1.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk04.1.dtsi
index 374af6dd360a..91e296d2ea82 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk04.1.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk04.1.dtsi
@@ -24,26 +24,26 @@
soc {
pinctrl@1000000 {
- serial_0_pins: serial0-pinmux {
+ serial_0_pins: serial0-state {
pins = "gpio16", "gpio17";
function = "blsp_uart0";
bias-disable;
};
- serial_1_pins: serial1-pinmux {
+ serial_1_pins: serial1-state {
pins = "gpio8", "gpio9",
"gpio10", "gpio11";
function = "blsp_uart1";
bias-disable;
};
- spi_0_pins: spi-0-pinmux {
- pinmux {
+ spi_0_pins: spi-0-state {
+ spi0-pins {
function = "blsp_spi0";
pins = "gpio13", "gpio14", "gpio15";
bias-disable;
};
- pinmux_cs {
+ spi0-cs-pins {
function = "gpio";
pins = "gpio12";
bias-disable;
@@ -51,13 +51,13 @@
};
};
- i2c_0_pins: i2c-0-pinmux {
+ i2c_0_pins: i2c-0-state {
pins = "gpio20", "gpio21";
function = "blsp_i2c0";
bias-disable;
};
- nand_pins: nand-pins {
+ nand_pins: nand-state {
pins = "gpio53", "gpio55", "gpio56",
"gpio57", "gpio58", "gpio59",
"gpio60", "gpio62", "gpio63",
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c1.dts b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c1.dts
index ea2987fcbff8..41c5874f6f97 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c1.dts
+++ b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c1.dts
@@ -19,20 +19,20 @@
};
pinctrl@1000000 {
- serial_1_pins: serial1-pinmux {
+ serial_1_pins: serial1-state {
pins = "gpio8", "gpio9",
"gpio10", "gpio11";
function = "blsp_uart1";
bias-disable;
};
- spi_0_pins: spi-0-pinmux {
- pinmux {
+ spi_0_pins: spi-0-state {
+ spi0-pins {
function = "blsp_spi0";
pins = "gpio13", "gpio14", "gpio15";
bias-disable;
};
- pinmux_cs {
+ spio-cs-pins {
function = "gpio";
pins = "gpio12";
bias-disable;
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c2.dts b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c2.dts
index bd3553dd2070..67ee99d69757 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c2.dts
+++ b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c2.dts
@@ -9,7 +9,7 @@
soc {
pinctrl@1000000 {
- serial_1_pins: serial1-pinmux {
+ serial_1_pins: serial1-state {
pins = "gpio8", "gpio9";
function = "blsp_uart1";
bias-disable;
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi
index 7ef635997efa..cc88cf5f0d9b 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi
@@ -24,19 +24,19 @@
soc {
pinctrl@1000000 {
- serial_0_pins: serial0-pinmux {
+ serial_0_pins: serial0-state {
pins = "gpio16", "gpio17";
function = "blsp_uart0";
bias-disable;
};
- i2c_0_pins: i2c-0-pinmux {
+ i2c_0_pins: i2c-0-state {
pins = "gpio20", "gpio21";
function = "blsp_i2c0";
bias-disable;
};
- nand_pins: nand-pins {
+ nand_pins: nand-state {
pins = "gpio53", "gpio55", "gpio56",
"gpio57", "gpio58", "gpio59",
"gpio60", "gpio62", "gpio63",
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq8064-ap148.dts b/arch/arm/boot/dts/qcom/qcom-ipq8064-ap148.dts
index a654d3c22c4f..5a8bf1a6f559 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq8064-ap148.dts
+++ b/arch/arm/boot/dts/qcom/qcom-ipq8064-ap148.dts
@@ -7,12 +7,11 @@
soc {
pinmux@800000 {
- buttons_pins: buttons_pins {
- mux {
- pins = "gpio54", "gpio65";
- drive-strength = <2>;
- bias-pull-up;
- };
+ buttons_pins: buttons-state {
+ pins = "gpio54", "gpio65";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
};
};
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq8064-rb3011.dts b/arch/arm/boot/dts/qcom/qcom-ipq8064-rb3011.dts
index 12e806adcda8..f09da9460c86 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq8064-rb3011.dts
+++ b/arch/arm/boot/dts/qcom/qcom-ipq8064-rb3011.dts
@@ -404,59 +404,49 @@
};
&qcom_pinmux {
- buttons_pins: buttons_pins {
- mux {
- pins = "gpio66";
- drive-strength = <16>;
- bias-disable;
- };
+ buttons_pins: buttons-state {
+ pins = "gpio66";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
};
- leds_pins: leds_pins {
- mux {
- pins = "gpio33";
- drive-strength = <16>;
- bias-disable;
- };
+ leds_pins: leds-state {
+ pins = "gpio33";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
};
- mdio1_pins: mdio1_pins {
- mux {
- pins = "gpio10", "gpio11";
- function = "gpio";
- drive-strength = <8>;
- bias-disable;
- };
+ mdio1_pins: mdio1-state {
+ pins = "gpio10", "gpio11";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable;
};
- sw0_reset_pin: sw0_reset_pin {
- mux {
- pins = "gpio16";
- drive-strength = <16>;
- function = "gpio";
- bias-disable;
- input-disable;
- };
+ sw0_reset_pin: sw0-reset-state {
+ pins = "gpio16";
+ drive-strength = <16>;
+ function = "gpio";
+ bias-disable;
+ input-disable;
};
- sw1_reset_pin: sw1_reset_pin {
- mux {
- pins = "gpio17";
- drive-strength = <16>;
- function = "gpio";
- bias-disable;
- input-disable;
- };
+ sw1_reset_pin: sw1-reset-state {
+ pins = "gpio17";
+ drive-strength = <16>;
+ function = "gpio";
+ bias-disable;
+ input-disable;
};
- usb1_pwr_en_pins: usb1_pwr_en_pins {
- mux {
- pins = "gpio4";
- function = "gpio";
- drive-strength = <16>;
- bias-disable;
- output-high;
- };
+ usb1_pwr_en_pins: usb1-pwr-en-state {
+ pins = "gpio4";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-high;
};
};
diff --git a/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi
index da0fd75f4711..759a59c2bdbc 100644
--- a/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi
@@ -399,70 +399,58 @@
#interrupt-cells = <2>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
- pcie0_pins: pcie0_pinmux {
- mux {
- pins = "gpio3";
- function = "pcie1_rst";
- drive-strength = <12>;
- bias-disable;
- };
+ pcie0_pins: pcie0-state {
+ pins = "gpio3";
+ function = "pcie1_rst";
+ drive-strength = <12>;
+ bias-disable;
};
- pcie1_pins: pcie1_pinmux {
- mux {
- pins = "gpio48";
- function = "pcie2_rst";
- drive-strength = <12>;
- bias-disable;
- };
+ pcie1_pins: pcie1-state {
+ pins = "gpio48";
+ function = "pcie2_rst";
+ drive-strength = <12>;
+ bias-disable;
};
- pcie2_pins: pcie2_pinmux {
- mux {
- pins = "gpio63";
- function = "pcie3_rst";
- drive-strength = <12>;
- bias-disable;
- };
+ pcie2_pins: pcie2-state {
+ pins = "gpio63";
+ function = "pcie3_rst";
+ drive-strength = <12>;
+ bias-disable;
};
- i2c4_pins: i2c4-default {
+ i2c4_pins: i2c4-state {
pins = "gpio12", "gpio13";
function = "gsbi4";
drive-strength = <12>;
bias-disable;
};
- spi_pins: spi_pins {
- mux {
- pins = "gpio18", "gpio19", "gpio21";
- function = "gsbi5";
- drive-strength = <10>;
- bias-none;
- };
+ spi_pins: spi-state {
+ pins = "gpio18", "gpio19", "gpio21";
+ function = "gsbi5";
+ drive-strength = <10>;
+ bias-disable;
};
- leds_pins: leds_pins {
- mux {
- pins = "gpio7", "gpio8", "gpio9",
- "gpio26", "gpio53";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-down;
- output-low;
- };
+ leds_pins: leds-state {
+ pins = "gpio7", "gpio8", "gpio9",
+ "gpio26", "gpio53";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
};
- buttons_pins: buttons_pins {
- mux {
- pins = "gpio54";
- drive-strength = <2>;
- bias-pull-up;
- };
+ buttons_pins: buttons-state {
+ pins = "gpio54";
+ drive-strength = <2>;
+ bias-pull-up;
};
- nand_pins: nand_pins {
- mux {
+ nand_pins: nand-state {
+ nand-pins {
pins = "gpio34", "gpio35", "gpio36",
"gpio37", "gpio38", "gpio39",
"gpio40", "gpio41", "gpio42",
@@ -473,14 +461,14 @@
bias-disable;
};
- pullups {
+ nand-pullup-pins {
pins = "gpio39";
function = "nand";
drive-strength = <10>;
bias-pull-up;
};
- hold {
+ nand-hold-pins {
pins = "gpio40", "gpio41", "gpio42",
"gpio43", "gpio44", "gpio45",
"gpio46", "gpio47";
@@ -490,25 +478,21 @@
};
};
- mdio0_pins: mdio0-pins {
- mux {
- pins = "gpio0", "gpio1";
- function = "mdio";
- drive-strength = <8>;
- bias-disable;
- };
+ mdio0_pins: mdio0-state {
+ pins = "gpio0", "gpio1";
+ function = "mdio";
+ drive-strength = <8>;
+ bias-disable;
};
- rgmii2_pins: rgmii2-pins {
- mux {
- pins = "gpio27", "gpio28", "gpio29",
- "gpio30", "gpio31", "gpio32",
- "gpio51", "gpio52", "gpio59",
- "gpio60", "gpio61", "gpio62";
- function = "rgmii2";
- drive-strength = <8>;
- bias-disable;
- };
+ rgmii2_pins: rgmii2-state {
+ pins = "gpio27", "gpio28", "gpio29",
+ "gpio30", "gpio31", "gpio32",
+ "gpio51", "gpio52", "gpio59",
+ "gpio60", "gpio61", "gpio62";
+ function = "rgmii2";
+ drive-strength = <8>;
+ bias-disable;
};
};
@@ -1292,7 +1276,7 @@
<&gcc SATA_A_CLK>,
<&gcc SATA_RXOOB_CLK>,
<&gcc SATA_PMALIVE_CLK>;
- clock-names = "slave_face", "iface", "core",
+ clock-names = "slave_iface", "iface", "core",
"rxoob", "pmalive";
assigned-clocks = <&gcc SATA_RXOOB_CLK>, <&gcc SATA_PMALIVE_CLK>;
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi
index 8839b23fc693..ca76bf8af75e 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi
@@ -84,6 +84,32 @@
};
};
+&blsp1_i2c2 {
+ status = "okay";
+
+ magnetometer: magnetometer@c {
+ compatible = "asahi-kasei,ak09911";
+ reg = <0x0c>;
+
+ vdd-supply = <&pm8226_l15>;
+ vid-supply = <&pm8226_l6>;
+ };
+
+ accelerometer: accelerometer@1e {
+ compatible = "kionix,kx022-1020";
+ reg = <0x1e>;
+
+ interrupts-extended = <&tlmm 63 IRQ_TYPE_EDGE_RISING>;
+
+ vdd-supply = <&pm8226_l15>;
+ vddio-supply = <&pm8226_l6>;
+
+ mount-matrix = "1", "0", "0",
+ "0", "-1", "0",
+ "0", "0", "1";
+ };
+};
+
&blsp1_i2c5 {
status = "okay";
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts
index 992b7115b5f8..a28a83cb5340 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts
@@ -10,6 +10,9 @@
#include "qcom-msm8226-microsoft-common.dtsi"
+/* This device has no magnetometer */
+/delete-node/ &magnetometer;
+
/ {
model = "Nokia Lumia 630";
compatible = "microsoft,moneypenny", "qcom,msm8226";
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
index b2f92ad6499a..3a685ff7e8cc 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
@@ -12,6 +12,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/reset/qcom,gcc-msm8974.h>
+#include <dt-bindings/thermal/thermal.h>
/ {
#address-cells = <1>;
@@ -44,8 +45,11 @@
device_type = "cpu";
reg = <0>;
next-level-cache = <&L2>;
+ clocks = <&apcs>;
+ operating-points-v2 = <&cpu_opp_table>;
qcom,acc = <&acc0>;
qcom,saw = <&saw0>;
+ #cooling-cells = <2>;
};
CPU1: cpu@1 {
@@ -54,8 +58,11 @@
device_type = "cpu";
reg = <1>;
next-level-cache = <&L2>;
+ clocks = <&apcs>;
+ operating-points-v2 = <&cpu_opp_table>;
qcom,acc = <&acc1>;
qcom,saw = <&saw1>;
+ #cooling-cells = <2>;
};
CPU2: cpu@2 {
@@ -64,8 +71,11 @@
device_type = "cpu";
reg = <2>;
next-level-cache = <&L2>;
+ clocks = <&apcs>;
+ operating-points-v2 = <&cpu_opp_table>;
qcom,acc = <&acc2>;
qcom,saw = <&saw2>;
+ #cooling-cells = <2>;
};
CPU3: cpu@3 {
@@ -74,8 +84,11 @@
device_type = "cpu";
reg = <3>;
next-level-cache = <&L2>;
+ clocks = <&apcs>;
+ operating-points-v2 = <&cpu_opp_table>;
qcom,acc = <&acc3>;
qcom,saw = <&saw3>;
+ #cooling-cells = <2>;
};
L2: l2-cache {
@@ -98,6 +111,29 @@
reg = <0x0 0x0>;
};
+ cpu_opp_table: opp-table-cpu {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ };
+
+ opp-384000000 {
+ opp-hz = /bits/ 64 <384000000>;
+ };
+
+ opp-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ };
+
+ opp-787200000 {
+ opp-hz = /bits/ 64 <787200000>;
+ };
+
+ /* Higher CPU frequencies need speedbin support */
+ };
+
pmu {
compatible = "arm,cortex-a7-pmu";
interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) |
@@ -121,11 +157,11 @@
smd-edge {
interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 0>;
+ mboxes = <&apcs 0>;
qcom,smd-edge = <15>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-msm8226";
+ compatible = "qcom,rpm-msm8226", "qcom,smd-rpm";
qcom,smd-channels = "rpm_requests";
rpmcc: clock-controller {
@@ -199,7 +235,7 @@
interrupt-parent = <&intc>;
interrupts = <GIC_SPI 158 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 10>;
+ mboxes = <&apcs 10>;
qcom,local-pid = <0>;
qcom,remote-pid = <2>;
@@ -231,9 +267,75 @@
#interrupt-cells = <3>;
};
- apcs: syscon@f9011000 {
- compatible = "syscon";
+ apcs: mailbox@f9011000 {
+ compatible = "qcom,msm8226-apcs-kpss-global",
+ "qcom,msm8916-apcs-kpss-global", "syscon";
reg = <0xf9011000 0x1000>;
+ #mbox-cells = <1>;
+ clocks = <&a7pll>, <&gcc GPLL0_VOTE>;
+ clock-names = "pll", "aux";
+ #clock-cells = <0>;
+ };
+
+ a7pll: clock@f9016000 {
+ compatible = "qcom,msm8226-a7pll";
+ reg = <0xf9016000 0x40>;
+ #clock-cells = <0>;
+ clocks = <&xo_board>;
+ clock-names = "xo";
+ operating-points-v2 = <&a7pll_opp_table>;
+
+ a7pll_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-768000000 {
+ opp-hz = /bits/ 64 <768000000>;
+ };
+
+ opp-787200000 {
+ opp-hz = /bits/ 64 <787200000>;
+ };
+
+ opp-998400000 {
+ opp-hz = /bits/ 64 <998400000>;
+ };
+
+ opp-1094400000 {
+ opp-hz = /bits/ 64 <1094400000>;
+ };
+
+ opp-1190400000 {
+ opp-hz = /bits/ 64 <1190400000>;
+ };
+
+ opp-1305600000 {
+ opp-hz = /bits/ 64 <1305600000>;
+ };
+
+ opp-1344000000 {
+ opp-hz = /bits/ 64 <1344000000>;
+ };
+
+ opp-1401600000 {
+ opp-hz = /bits/ 64 <1401600000>;
+ };
+
+ opp-1497600000 {
+ opp-hz = /bits/ 64 <1497600000>;
+ };
+
+ opp-1593600000 {
+ opp-hz = /bits/ 64 <1593600000>;
+ };
+
+ opp-1689600000 {
+ opp-hz = /bits/ 64 <1689600000>;
+ };
+
+ opp-1785600000 {
+ opp-hz = /bits/ 64 <1785600000>;
+ };
+ };
};
saw_l2: power-manager@f9012000 {
@@ -571,7 +673,7 @@
#reset-cells = <1>;
#power-domain-cells = <1>;
- clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
+ clocks = <&xo_board>,
<&sleep_clk>;
clock-names = "xo",
"sleep_clk";
@@ -1130,7 +1232,7 @@
smd-edge {
interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 8>;
+ mboxes = <&apcs 8>;
qcom,smd-edge = <1>;
label = "lpass";
@@ -1159,6 +1261,16 @@
thermal-sensors = <&tsens 5>;
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
trips {
cpu_alert0: trip0 {
temperature = <75000>;
@@ -1180,6 +1292,16 @@
thermal-sensors = <&tsens 2>;
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
trips {
cpu_alert1: trip0 {
temperature = <75000>;
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts b/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts
index 53a6d4e85959..55077a5f2e34 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts
+++ b/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts
@@ -13,6 +13,9 @@
/* This device has touchscreen on i2c1 instead */
/delete-node/ &touchscreen;
+/* The magnetometer used on this device is currently unknown */
+/delete-node/ &magnetometer;
+
/ {
model = "Nokia Lumia 830";
compatible = "microsoft,tesla", "qcom,msm8926", "qcom,msm8226";
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
index 15568579459a..1bd87170252d 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
@@ -136,7 +136,7 @@
qcom,smd-edge = <15>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-msm8974";
+ compatible = "qcom,rpm-msm8974", "qcom,smd-rpm";
qcom,smd-channels = "rpm_requests";
rpmcc: clock-controller {
@@ -149,7 +149,7 @@
};
};
- reserved-memory {
+ reserved_memory: reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte-common.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte-common.dtsi
index b5443fd5b425..d3959741d2ea 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte-common.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte-common.dtsi
@@ -438,6 +438,19 @@
};
};
+&reserved_memory {
+ ramoops@3e8e0000 {
+ compatible = "ramoops";
+ reg = <0x3e8e0000 0x200000>;
+
+ console-size = <0x100000>;
+ record-size = <0x10000>;
+ ftrace-size = <0x10000>;
+ pmsg-size = <0x80000>;
+ ecc-size = <8>;
+ };
+};
+
&remoteproc_adsp {
status = "okay";
cx-supply = <&pma8084_s2>;
diff --git a/arch/arm/boot/dts/rockchip/rk3128.dtsi b/arch/arm/boot/dts/rockchip/rk3128.dtsi
index 23e633387c24..d4572146d135 100644
--- a/arch/arm/boot/dts/rockchip/rk3128.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk3128.dtsi
@@ -254,6 +254,30 @@
};
};
+ vpu: video-codec@10106000 {
+ compatible = "rockchip,rk3128-vpu", "rockchip,rk3066-vpu";
+ reg = <0x10106000 0x800>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "vepu", "vdpu";
+ clocks = <&cru ACLK_VDPU>, <&cru HCLK_VDPU>,
+ <&cru ACLK_VEPU>, <&cru HCLK_VEPU>;
+ clock-names = "aclk_vdpu", "hclk_vdpu",
+ "aclk_vepu", "hclk_vepu";
+ iommus = <&vpu_mmu>;
+ power-domains = <&power RK3128_PD_VIDEO>;
+ };
+
+ vpu_mmu: iommu@10106800 {
+ compatible = "rockchip,iommu";
+ reg = <0x10106800 0x100>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_VEPU>, <&cru HCLK_VDPU>;
+ clock-names = "aclk", "iface";
+ power-domains = <&power RK3128_PD_VIDEO>;
+ #iommu-cells = <0>;
+ };
+
vop: vop@1010e000 {
compatible = "rockchip,rk3126-vop";
reg = <0x1010e000 0x300>;
@@ -429,7 +453,7 @@
compatible = "rockchip,sfc";
reg = <0x1020c000 0x8000>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru SCLK_SFC>, <&cru 479>;
+ clocks = <&cru SCLK_SFC>, <&cru HCLK_SFC>;
clock-names = "clk_sfc", "hclk_sfc";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rockchip/rv1108-elgin-r1.dts b/arch/arm/boot/dts/rockchip/rv1108-elgin-r1.dts
index 2d9994379eb2..89ca2f8d3809 100644
--- a/arch/arm/boot/dts/rockchip/rv1108-elgin-r1.dts
+++ b/arch/arm/boot/dts/rockchip/rv1108-elgin-r1.dts
@@ -168,8 +168,8 @@
pinctrl-0 = <&spim1_clk &spim1_cs0 &spim1_tx &spim1_rx>;
status = "okay";
- dh2228fv: dac@0 {
- compatible = "rohm,dh2228fv";
+ display: display@0 {
+ compatible = "elgin,jg10309-01";
reg = <0>;
spi-max-frequency = <24000000>;
spi-cpha;
diff --git a/arch/arm/boot/dts/rockchip/rv1126-pinctrl.dtsi b/arch/arm/boot/dts/rockchip/rv1126-pinctrl.dtsi
index 06b1d7f2d858..35ef6732281f 100644
--- a/arch/arm/boot/dts/rockchip/rv1126-pinctrl.dtsi
+++ b/arch/arm/boot/dts/rockchip/rv1126-pinctrl.dtsi
@@ -97,6 +97,156 @@
<0 RK_PC3 1 &pcfg_pull_none_drv_level_0_smt>;
};
};
+ i2c3 {
+ /omit-if-no-ref/
+ i2c3m0_xfer: i2c3m0-xfer {
+ rockchip,pins =
+ /* i2c3_scl_m0 */
+ <3 RK_PA4 5 &pcfg_pull_none>,
+ /* i2c3_sda_m0 */
+ <3 RK_PA5 5 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ i2c3m1_xfer: i2c3m1-xfer {
+ rockchip,pins =
+ /* i2c3_scl_m1 */
+ <2 RK_PD4 7 &pcfg_pull_none>,
+ /* i2c3_sda_m1 */
+ <2 RK_PD5 7 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ i2c3m2_xfer: i2c3m2-xfer {
+ rockchip,pins =
+ /* i2c3_scl_m2 */
+ <1 RK_PD6 3 &pcfg_pull_none>,
+ /* i2c3_sda_m2 */
+ <1 RK_PD7 3 &pcfg_pull_none>;
+ };
+ };
+ i2s0 {
+ i2s0m0_lrck_tx: i2s0m0-lrck-tx {
+ rockchip,pins =
+ /* i2s0_lrck_tx_m0 */
+ <3 RK_PD3 1 &pcfg_pull_none>;
+ };
+ i2s0m0_lrck_rx: i2s0m0-lrck-rx {
+ rockchip,pins =
+ /* i2s0_lrck_rx_m0 */
+ <3 RK_PD4 1 &pcfg_pull_none>;
+ };
+ i2s0m0_mclk: i2s0m0-mclk {
+ rockchip,pins =
+ /* i2s0_mclk_m0 */
+ <3 RK_PD2 1 &pcfg_pull_none>;
+ };
+ i2s0m0_sclk_rx: i2s0m0-sclk-rx {
+ rockchip,pins =
+ /* i2s0_sclk_rx_m0 */
+ <3 RK_PD1 1 &pcfg_pull_none>;
+ };
+ i2s0m0_sclk_tx: i2s0m0-sclk-tx {
+ rockchip,pins =
+ /* i2s0_sclk_tx_m0 */
+ <3 RK_PD0 1 &pcfg_pull_none>;
+ };
+ i2s0m0_sdi0: i2s0m0-sdi0 {
+ rockchip,pins =
+ /* i2s0_sdi0_m0 */
+ <3 RK_PD6 1 &pcfg_pull_none>;
+ };
+ i2s0m0_sdo0: i2s0m0-sdo0 {
+ rockchip,pins =
+ /* i2s0_sdo0_m0 */
+ <3 RK_PD5 1 &pcfg_pull_none>;
+ };
+ i2s0m0_sdo1_sdi3: i2s0m0-sdo1-sdi3 {
+ rockchip,pins =
+ /* i2s0_sdo1_sdi3_m0 */
+ <3 RK_PD7 1 &pcfg_pull_none>;
+ };
+ i2s0m0_sdo2_sdi2: i2s0m0-sdo2-sdi2 {
+ rockchip,pins =
+ /* i2s0_sdo2_sdi2_m0 */
+ <4 RK_PA0 1 &pcfg_pull_none>;
+ };
+ i2s0m0_sdo3_sdi1: i2s0m0-sdo3-sdi1 {
+ rockchip,pins =
+ /* i2s0_sdo3_sdi1_m0 */
+ <4 RK_PA1 1 &pcfg_pull_none>;
+ };
+ i2s0m1_lrck_tx: i2s0m1-lrck-tx {
+ rockchip,pins =
+ /* i2s0_lrck_tx_m1 */
+ <3 RK_PA5 3 &pcfg_pull_none>;
+ };
+ i2s0m1_lrck_rx: i2s0m1-lrck-rx {
+ rockchip,pins =
+ /* i2s0_lrck_rx_m1 */
+ <3 RK_PB2 3 &pcfg_pull_none>;
+ };
+ i2s0m1_mclk: i2s0m1-mclk {
+ rockchip,pins =
+ /* i2s0_mclk_m1 */
+ <3 RK_PB0 3 &pcfg_pull_none>;
+ };
+ i2s0m1_sclk_rx: i2s0m1-sclk-rx {
+ rockchip,pins =
+ /* i2s0_sclk_rx_m1 */
+ <3 RK_PB1 3 &pcfg_pull_none>;
+ };
+ i2s0m1_sclk_tx: i2s0m1-sclk-tx {
+ rockchip,pins =
+ /* i2s0_sclk_tx_m1 */
+ <3 RK_PA4 3 &pcfg_pull_none>;
+ };
+ i2s0m1_sdi0: i2s0m1-sdi0 {
+ rockchip,pins =
+ /* i2s0_sdi0_m1 */
+ <3 RK_PA7 3 &pcfg_pull_none>;
+ };
+ i2s0m1_sdo0: i2s0m1-sdo0 {
+ rockchip,pins =
+ /* i2s0_sdo0_m1 */
+ <3 RK_PA6 3 &pcfg_pull_none>;
+ };
+ i2s0m1_sdo1_sdi3: i2s0m1-sdo1-sdi3 {
+ rockchip,pins =
+ /* i2s0_sdo1_sdi3_m1 */
+ <3 RK_PB3 3 &pcfg_pull_none>;
+ };
+ i2s0m1_sdo2_sdi2: i2s0m1-sdo2-sdi2 {
+ rockchip,pins =
+ /* i2s0_sdo2_sdi2_m1 */
+ <3 RK_PB4 3 &pcfg_pull_none>;
+ };
+ i2s0m1_sdo3_sdi1: i2s0m1-sdo3-sdi1 {
+ rockchip,pins =
+ /* i2s0_sdo3_sdi1_m1 */
+ <3 RK_PB5 3 &pcfg_pull_none>;
+ };
+ };
+ pwm0 {
+ /omit-if-no-ref/
+ pwm0m0_pins: pwm0m0-pins {
+ rockchip,pins =
+ /* pwm0_pin_m0 */
+ <0 RK_PB6 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ pwm0m1_pins: pwm0m1-pins {
+ rockchip,pins =
+ /* pwm0_pin_m1 */
+ <2 RK_PB3 5 &pcfg_pull_none>;
+ };
+ };
+ pwm1 {
+ /omit-if-no-ref/
+ pwm1m0_pins: pwm1m0-pins {
+ rockchip,pins =
+ /* pwm1_pin_m0 */
+ <0 RK_PB7 3 &pcfg_pull_none>;
+ };
+ };
pwm2 {
/omit-if-no-ref/
pwm2m0_pins: pwm2m0-pins {
@@ -104,6 +254,106 @@
/* pwm2_pin_m0 */
<0 RK_PC0 3 &pcfg_pull_none>;
};
+ /omit-if-no-ref/
+ pwm2m1_pins: pwm2m1-pins {
+ rockchip,pins =
+ /* pwm2_pin_m1 */
+ <2 RK_PB1 5 &pcfg_pull_none>;
+ };
+ };
+ pwm3 {
+ /omit-if-no-ref/
+ pwm3m0_pins: pwm3m0-pins {
+ rockchip,pins =
+ /* pwm3_pin_m0 */
+ <0 RK_PC1 3 &pcfg_pull_none>;
+ };
+ };
+ pwm4 {
+ /omit-if-no-ref/
+ pwm4m0_pins: pwm4m0-pins {
+ rockchip,pins =
+ /* pwm4_pin_m0 */
+ <0 RK_PC2 3 &pcfg_pull_none>;
+ };
+ };
+ pwm5 {
+ /omit-if-no-ref/
+ pwm5m0_pins: pwm5m0-pins {
+ rockchip,pins =
+ /* pwm5_pin_m0 */
+ <0 RK_PC3 3 &pcfg_pull_none>;
+ };
+ };
+ pwm6 {
+ /omit-if-no-ref/
+ pwm6m0_pins: pwm6m0-pins {
+ rockchip,pins =
+ /* pwm6_pin_m0 */
+ <0 RK_PB2 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ pwm6m1_pins: pwm6m1-pins {
+ rockchip,pins =
+ /* pwm6_pin_m1 */
+ <2 RK_PD4 5 &pcfg_pull_none>;
+ };
+ };
+ pwm7 {
+ /omit-if-no-ref/
+ pwm7m0_pins: pwm7m0-pins {
+ rockchip,pins =
+ /* pwm7_pin_m0 */
+ <0 RK_PB1 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ pwm7m1_pins: pwm7m1-pins {
+ rockchip,pins =
+ /* pwm7_pin_m1 */
+ <3 RK_PA0 5 &pcfg_pull_none>;
+ };
+ };
+ pwm8 {
+ /omit-if-no-ref/
+ pwm8m0_pins: pwm8m0-pins {
+ rockchip,pins =
+ /* pwm8_pin_m0 */
+ <3 RK_PA4 6 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ pwm8m1_pins: pwm8m1-pins {
+ rockchip,pins =
+ /* pwm8_pin_m1 */
+ <2 RK_PD7 5 &pcfg_pull_none>;
+ };
+ };
+ pwm9 {
+ /omit-if-no-ref/
+ pwm9m0_pins: pwm9m0-pins {
+ rockchip,pins =
+ /* pwm9_pin_m0 */
+ <3 RK_PA5 6 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ pwm9m1_pins: pwm9m1-pins {
+ rockchip,pins =
+ /* pwm9_pin_m1 */
+ <2 RK_PD6 5 &pcfg_pull_none>;
+ };
+ };
+ pwm10 {
+ /omit-if-no-ref/
+ pwm10m0_pins: pwm10m0-pins {
+ rockchip,pins =
+ /* pwm10_pin_m0 */
+ <3 RK_PA6 6 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ pwm10m1_pins: pwm10m1-pins {
+ rockchip,pins =
+ /* pwm10_pin_m1 */
+ <2 RK_PD5 5 &pcfg_pull_none>;
+ };
};
pwm11 {
/omit-if-no-ref/
@@ -112,6 +362,12 @@
/* pwm11_pin_m0 */
<3 RK_PA7 6 &pcfg_pull_none>;
};
+ /omit-if-no-ref/
+ pwm11m1_pins: pwm11m1-pins {
+ rockchip,pins =
+ /* pwm11_pin_m1 */
+ <3 RK_PA1 5 &pcfg_pull_none>;
+ };
};
rgmii {
/omit-if-no-ref/
diff --git a/arch/arm/boot/dts/rockchip/rv1126.dtsi b/arch/arm/boot/dts/rockchip/rv1126.dtsi
index bb603cae13df..434846b85c95 100644
--- a/arch/arm/boot/dts/rockchip/rv1126.dtsi
+++ b/arch/arm/boot/dts/rockchip/rv1126.dtsi
@@ -22,6 +22,7 @@
aliases {
i2c0 = &i2c0;
i2c2 = &i2c2;
+ i2c3 = &i2c3;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -268,6 +269,28 @@
status = "disabled";
};
+ pwm0: pwm@ff430000 {
+ compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm";
+ reg = <0xff430000 0x10>;
+ clock-names = "pwm", "pclk";
+ clocks = <&pmucru CLK_PWM0>, <&pmucru PCLK_PWM0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@ff430010 {
+ compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm";
+ reg = <0xff430010 0x10>;
+ clock-names = "pwm", "pclk";
+ clocks = <&pmucru CLK_PWM0>, <&pmucru PCLK_PWM0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm1m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
pwm2: pwm@ff430020 {
compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm";
reg = <0xff430020 0x10>;
@@ -279,6 +302,61 @@
status = "disabled";
};
+ pwm3: pwm@ff430030 {
+ compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm";
+ reg = <0xff430030 0x10>;
+ clock-names = "pwm", "pclk";
+ clocks = <&pmucru CLK_PWM0>, <&pmucru PCLK_PWM0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm3m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@ff440000 {
+ compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm";
+ reg = <0xff440000 0x10>;
+ clock-names = "pwm", "pclk";
+ clocks = <&pmucru CLK_PWM1>, <&pmucru PCLK_PWM1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm4m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm5: pwm@ff440010 {
+ compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm";
+ reg = <0xff440010 0x10>;
+ clock-names = "pwm", "pclk";
+ clocks = <&pmucru CLK_PWM1>, <&pmucru PCLK_PWM1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm5m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm6: pwm@ff440020 {
+ compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm";
+ reg = <0xff440020 0x10>;
+ clock-names = "pwm", "pclk";
+ clocks = <&pmucru CLK_PWM1>, <&pmucru PCLK_PWM1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm6m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm7: pwm@ff440030 {
+ compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm";
+ reg = <0xff440030 0x10>;
+ clock-names = "pwm", "pclk";
+ clocks = <&pmucru CLK_PWM1>, <&pmucru PCLK_PWM1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm7m0_pins>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
pmucru: clock-controller@ff480000 {
compatible = "rockchip,rv1126-pmucru";
reg = <0xff480000 0x1000>;
@@ -308,6 +386,53 @@
clock-names = "apb_pclk";
};
+ i2c3: i2c@ff520000 {
+ compatible = "rockchip,rv1126-i2c", "rockchip,rk3399-i2c";
+ reg = <0xff520000 0x1000>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_I2C3>, <&cru PCLK_I2C3>;
+ clock-names = "i2c", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3m0_xfer>;
+ rockchip,grf = <&pmugrf>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ pwm8: pwm@ff550000 {
+ compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm";
+ reg = <0xff550000 0x10>;
+ clock-names = "pwm", "pclk";
+ clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>;
+ pinctrl-0 = <&pwm8m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm9: pwm@ff550010 {
+ compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm";
+ reg = <0xff550010 0x10>;
+ clock-names = "pwm", "pclk";
+ clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>;
+ pinctrl-0 = <&pwm9m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm10: pwm@ff550020 {
+ compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm";
+ reg = <0xff550020 0x10>;
+ clock-names = "pwm", "pclk";
+ clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>;
+ pinctrl-0 = <&pwm10m0_pins>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
pwm11: pwm@ff550030 {
compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm";
reg = <0xff550030 0x10>;
@@ -419,6 +544,32 @@
clock-names = "pclk", "timer";
};
+ i2s0: i2s@ff800000 {
+ compatible = "rockchip,rv1126-i2s-tdm";
+ reg = <0xff800000 0x1000>;
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru MCLK_I2S0_TX>, <&cru MCLK_I2S0_RX>, <&cru HCLK_I2S0>;
+ clock-names = "mclk_tx", "mclk_rx", "hclk";
+ dmas = <&dmac 20>, <&dmac 19>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0m0_sclk_tx>,
+ <&i2s0m0_sclk_rx>,
+ <&i2s0m0_mclk>,
+ <&i2s0m0_lrck_tx>,
+ <&i2s0m0_lrck_rx>,
+ <&i2s0m0_sdi0>,
+ <&i2s0m0_sdo0>,
+ <&i2s0m0_sdo1_sdi3>,
+ <&i2s0m0_sdo2_sdi2>,
+ <&i2s0m0_sdo3_sdi1>;
+ resets = <&cru SRST_I2S0_TX_M>, <&cru SRST_I2S0_RX_M>;
+ reset-names = "tx-m", "rx-m";
+ rockchip,grf = <&grf>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
vop: vop@ffb00000 {
compatible = "rockchip,rv1126-vop";
reg = <0xffb00000 0x200>, <0xffb00a00 0x400>;
diff --git a/arch/arm/boot/dts/st/Makefile b/arch/arm/boot/dts/st/Makefile
index 015903d09323..eab3a9bd435f 100644
--- a/arch/arm/boot/dts/st/Makefile
+++ b/arch/arm/boot/dts/st/Makefile
@@ -35,8 +35,11 @@ dtb-$(CONFIG_ARCH_STM32) += \
stm32mp151a-prtt1c.dtb \
stm32mp151a-prtt1s.dtb \
stm32mp151a-dhcor-testbench.dtb \
+ stm32mp151c-mecio1r0.dtb \
+ stm32mp151c-mect1s.dtb \
stm32mp153c-dhcom-drc02.dtb \
stm32mp153c-dhcor-drc-compact.dtb \
+ stm32mp153c-mecio1r1.dtb \
stm32mp157a-avenger96.dtb \
stm32mp157a-dhcor-avenger96.dtb \
stm32mp157a-dk1.dtb \
diff --git a/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi
index c9f588a65094..8db1ec4a3b26 100644
--- a/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi
@@ -94,14 +94,20 @@
/omit-if-no-ref/
eth1_rgmii_sleep_pins_a: eth1-rgmii-sleep-0 {
pins1 {
+ pinmux = <STM32_PINMUX('A', 2, AF11)>, /* ETH_MDIO */
+ <STM32_PINMUX('G', 2, AF11)>; /* ETH_MDC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+
+ pins2 {
pinmux = <STM32_PINMUX('G', 13, ANALOG)>, /* ETH_RGMII_TXD0 */
<STM32_PINMUX('G', 14, ANALOG)>, /* ETH_RGMII_TXD1 */
<STM32_PINMUX('C', 2, ANALOG)>, /* ETH_RGMII_TXD2 */
<STM32_PINMUX('E', 5, ANALOG)>, /* ETH_RGMII_TXD3 */
<STM32_PINMUX('B', 11, ANALOG)>, /* ETH_RGMII_TX_CTL */
<STM32_PINMUX('C', 1, ANALOG)>, /* ETH_RGMII_GTX_CLK */
- <STM32_PINMUX('A', 2, ANALOG)>, /* ETH_MDIO */
- <STM32_PINMUX('G', 2, ANALOG)>, /* ETH_MDC */
<STM32_PINMUX('C', 4, ANALOG)>, /* ETH_RGMII_RXD0 */
<STM32_PINMUX('C', 5, ANALOG)>, /* ETH_RGMII_RXD1 */
<STM32_PINMUX('B', 0, ANALOG)>, /* ETH_RGMII_RXD1 */
@@ -178,14 +184,20 @@
/omit-if-no-ref/
eth2_rgmii_sleep_pins_a: eth2-rgmii-sleep-0 {
pins1 {
+ pinmux = <STM32_PINMUX('B', 6, AF11)>, /* ETH_MDIO */
+ <STM32_PINMUX('G', 5, AF10)>; /* ETH_MDC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+
+ pins2 {
pinmux = <STM32_PINMUX('F', 7, ANALOG)>, /* ETH_RGMII_TXD0 */
<STM32_PINMUX('G', 11, ANALOG)>, /* ETH_RGMII_TXD1 */
<STM32_PINMUX('G', 1, ANALOG)>, /* ETH_RGMII_TXD2 */
<STM32_PINMUX('E', 6, ANALOG)>, /* ETH_RGMII_TXD3 */
<STM32_PINMUX('F', 6, ANALOG)>, /* ETH_RGMII_TX_CTL */
<STM32_PINMUX('G', 3, ANALOG)>, /* ETH_RGMII_GTX_CLK */
- <STM32_PINMUX('B', 6, ANALOG)>, /* ETH_MDIO */
- <STM32_PINMUX('G', 5, ANALOG)>, /* ETH_MDC */
<STM32_PINMUX('F', 4, ANALOG)>, /* ETH_RGMII_RXD0 */
<STM32_PINMUX('E', 2, ANALOG)>, /* ETH_RGMII_RXD1 */
<STM32_PINMUX('H', 6, ANALOG)>, /* ETH_RGMII_RXD2 */
diff --git a/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts b/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts
index bacb70b4256b..853dc21449d9 100644
--- a/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts
+++ b/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts
@@ -75,6 +75,8 @@
};
&ethernet1 {
+ nvmem-cell-names = "mac-address";
+ nvmem-cells = <&ethernet_mac1_address>;
phy-handle = <&ethphy1>;
phy-mode = "rgmii-id";
pinctrl-0 = <&eth1_rgmii_pins_a>;
@@ -94,14 +96,36 @@
interrupt-parent = <&gpiog>;
interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
reg = <1>;
+ realtek,clkout-disable;
reset-assert-us = <15000>;
reset-deassert-us = <55000>;
reset-gpios = <&gpioa 11 GPIO_ACTIVE_LOW>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_WAN;
+ linux,default-trigger = "netdev";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_WAN;
+ linux,default-trigger = "netdev";
+ };
+ };
};
};
};
&ethernet2 {
+ nvmem-cell-names = "mac-address";
+ nvmem-cells = <&ethernet_mac2_address>;
phy-handle = <&ethphy2>;
phy-mode = "rgmii-id";
pinctrl-0 = <&eth2_rgmii_pins_a>;
@@ -121,9 +145,29 @@
interrupt-parent = <&gpiog>;
interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
reg = <1>;
+ realtek,clkout-disable;
reset-assert-us = <15000>;
reset-deassert-us = <55000>;
reset-gpios = <&gpiog 8 GPIO_ACTIVE_LOW>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ linux,default-trigger = "netdev";
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_LAN;
+ linux,default-trigger = "netdev";
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi
index ae83e7b10232..70e132dc6147 100644
--- a/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi
@@ -2229,6 +2229,9 @@
<STM32_PINMUX('A', 9, AF10)>, /* SDMMC2_D5 */
<STM32_PINMUX('E', 5, AF9)>, /* SDMMC2_D6 */
<STM32_PINMUX('C', 7, AF10)>; /* SDMMC2_D7 */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
};
};
diff --git a/arch/arm/boot/dts/st/stm32mp151a-prtt1a.dts b/arch/arm/boot/dts/st/stm32mp151a-prtt1a.dts
index 75874eafde11..8e1dd84e0c0a 100644
--- a/arch/arm/boot/dts/st/stm32mp151a-prtt1a.dts
+++ b/arch/arm/boot/dts/st/stm32mp151a-prtt1a.dts
@@ -28,16 +28,12 @@
};
};
-&pwm5_pins_a {
- pins {
- pinmux = <STM32_PINMUX('A', 0, AF2)>; /* TIM5_CH1 */
- };
+&{pwm5_pins_a/pins} {
+ pinmux = <STM32_PINMUX('A', 0, AF2)>; /* TIM5_CH1 */
};
-&pwm5_sleep_pins_a {
- pins {
- pinmux = <STM32_PINMUX('A', 0, ANALOG)>; /* TIM5_CH1 */
- };
+&{pwm5_sleep_pins_a/pins} {
+ pinmux = <STM32_PINMUX('A', 0, ANALOG)>; /* TIM5_CH1 */
};
&timers5 {
diff --git a/arch/arm/boot/dts/st/stm32mp151a-prtt1c.dts b/arch/arm/boot/dts/st/stm32mp151a-prtt1c.dts
index c90d815f906b..3b33b7093b68 100644
--- a/arch/arm/boot/dts/st/stm32mp151a-prtt1c.dts
+++ b/arch/arm/boot/dts/st/stm32mp151a-prtt1c.dts
@@ -168,52 +168,42 @@
status = "okay";
};
-&sdmmc2_b4_od_pins_a {
- pins1 {
- pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
- <STM32_PINMUX('B', 7, AF10)>, /* SDMMC2_D1 */
- <STM32_PINMUX('B', 3, AF9)>, /* SDMMC2_D2 */
- <STM32_PINMUX('B', 4, AF9)>; /* SDMMC2_D3 */
- };
+&{sdmmc2_b4_od_pins_a/pins1} {
+ pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
+ <STM32_PINMUX('B', 7, AF10)>, /* SDMMC2_D1 */
+ <STM32_PINMUX('B', 3, AF9)>, /* SDMMC2_D2 */
+ <STM32_PINMUX('B', 4, AF9)>; /* SDMMC2_D3 */
};
-&sdmmc2_b4_pins_a {
- pins1 {
- pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
- <STM32_PINMUX('B', 7, AF10)>, /* SDMMC2_D1 */
- <STM32_PINMUX('B', 3, AF9)>, /* SDMMC2_D2 */
- <STM32_PINMUX('B', 4, AF9)>, /* SDMMC2_D3 */
- <STM32_PINMUX('G', 6, AF10)>; /* SDMMC2_CMD */
- };
+&{sdmmc2_b4_pins_a/pins1} {
+ pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
+ <STM32_PINMUX('B', 7, AF10)>, /* SDMMC2_D1 */
+ <STM32_PINMUX('B', 3, AF9)>, /* SDMMC2_D2 */
+ <STM32_PINMUX('B', 4, AF9)>, /* SDMMC2_D3 */
+ <STM32_PINMUX('G', 6, AF10)>; /* SDMMC2_CMD */
};
-&sdmmc2_b4_sleep_pins_a {
- pins {
- pinmux = <STM32_PINMUX('B', 14, ANALOG)>, /* SDMMC2_D0 */
- <STM32_PINMUX('B', 7, ANALOG)>, /* SDMMC2_D1 */
- <STM32_PINMUX('B', 3, ANALOG)>, /* SDMMC2_D2 */
- <STM32_PINMUX('B', 4, ANALOG)>, /* SDMMC2_D3 */
- <STM32_PINMUX('E', 3, ANALOG)>, /* SDMMC2_CK */
- <STM32_PINMUX('G', 6, ANALOG)>; /* SDMMC2_CMD */
- };
+&{sdmmc2_b4_sleep_pins_a/pins} {
+ pinmux = <STM32_PINMUX('B', 14, ANALOG)>, /* SDMMC2_D0 */
+ <STM32_PINMUX('B', 7, ANALOG)>, /* SDMMC2_D1 */
+ <STM32_PINMUX('B', 3, ANALOG)>, /* SDMMC2_D2 */
+ <STM32_PINMUX('B', 4, ANALOG)>, /* SDMMC2_D3 */
+ <STM32_PINMUX('E', 3, ANALOG)>, /* SDMMC2_CK */
+ <STM32_PINMUX('G', 6, ANALOG)>; /* SDMMC2_CMD */
};
-&sdmmc2_d47_pins_a {
- pins {
- pinmux = <STM32_PINMUX('A', 8, AF9)>, /* SDMMC2_D4 */
- <STM32_PINMUX('A', 9, AF10)>, /* SDMMC2_D5 */
- <STM32_PINMUX('C', 6, AF10)>, /* SDMMC2_D6 */
- <STM32_PINMUX('C', 7, AF10)>; /* SDMMC2_D7 */
- };
+&{sdmmc2_d47_pins_a/pins} {
+ pinmux = <STM32_PINMUX('A', 8, AF9)>, /* SDMMC2_D4 */
+ <STM32_PINMUX('A', 9, AF10)>, /* SDMMC2_D5 */
+ <STM32_PINMUX('C', 6, AF10)>, /* SDMMC2_D6 */
+ <STM32_PINMUX('C', 7, AF10)>; /* SDMMC2_D7 */
};
-&sdmmc2_d47_sleep_pins_a {
- pins {
- pinmux = <STM32_PINMUX('A', 8, ANALOG)>, /* SDMMC2_D4 */
- <STM32_PINMUX('A', 9, ANALOG)>, /* SDMMC2_D5 */
- <STM32_PINMUX('C', 6, ANALOG)>, /* SDMMC2_D6 */
- <STM32_PINMUX('D', 3, ANALOG)>; /* SDMMC2_D7 */
- };
+&{sdmmc2_d47_sleep_pins_a/pins} {
+ pinmux = <STM32_PINMUX('A', 8, ANALOG)>, /* SDMMC2_D4 */
+ <STM32_PINMUX('A', 9, ANALOG)>, /* SDMMC2_D5 */
+ <STM32_PINMUX('C', 6, ANALOG)>, /* SDMMC2_D6 */
+ <STM32_PINMUX('D', 3, ANALOG)>; /* SDMMC2_D7 */
};
&sdmmc3 {
@@ -238,34 +228,28 @@
};
};
-&sdmmc3_b4_od_pins_b {
- pins1 {
- pinmux = <STM32_PINMUX('D', 1, AF10)>, /* SDMMC3_D0 */
- <STM32_PINMUX('D', 4, AF10)>, /* SDMMC3_D1 */
- <STM32_PINMUX('D', 5, AF10)>, /* SDMMC3_D2 */
- <STM32_PINMUX('D', 7, AF10)>; /* SDMMC3_D3 */
- };
+&{sdmmc3_b4_od_pins_b/pins1} {
+ pinmux = <STM32_PINMUX('D', 1, AF10)>, /* SDMMC3_D0 */
+ <STM32_PINMUX('D', 4, AF10)>, /* SDMMC3_D1 */
+ <STM32_PINMUX('D', 5, AF10)>, /* SDMMC3_D2 */
+ <STM32_PINMUX('D', 7, AF10)>; /* SDMMC3_D3 */
};
-&sdmmc3_b4_pins_b {
- pins1 {
- pinmux = <STM32_PINMUX('D', 1, AF10)>, /* SDMMC3_D0 */
- <STM32_PINMUX('D', 4, AF10)>, /* SDMMC3_D1 */
- <STM32_PINMUX('D', 5, AF10)>, /* SDMMC3_D2 */
- <STM32_PINMUX('D', 7, AF10)>, /* SDMMC3_D3 */
- <STM32_PINMUX('D', 0, AF10)>; /* SDMMC3_CMD */
- };
+&{sdmmc3_b4_pins_b/pins1} {
+ pinmux = <STM32_PINMUX('D', 1, AF10)>, /* SDMMC3_D0 */
+ <STM32_PINMUX('D', 4, AF10)>, /* SDMMC3_D1 */
+ <STM32_PINMUX('D', 5, AF10)>, /* SDMMC3_D2 */
+ <STM32_PINMUX('D', 7, AF10)>, /* SDMMC3_D3 */
+ <STM32_PINMUX('D', 0, AF10)>; /* SDMMC3_CMD */
};
-&sdmmc3_b4_sleep_pins_b {
- pins {
- pinmux = <STM32_PINMUX('D', 1, ANALOG)>, /* SDMMC3_D0 */
- <STM32_PINMUX('D', 4, ANALOG)>, /* SDMMC3_D1 */
- <STM32_PINMUX('D', 5, ANALOG)>, /* SDMMC3_D2 */
- <STM32_PINMUX('D', 7, ANALOG)>, /* SDMMC3_D3 */
- <STM32_PINMUX('G', 15, ANALOG)>, /* SDMMC3_CK */
- <STM32_PINMUX('D', 0, ANALOG)>; /* SDMMC3_CMD */
- };
+&{sdmmc3_b4_sleep_pins_b/pins} {
+ pinmux = <STM32_PINMUX('D', 1, ANALOG)>, /* SDMMC3_D0 */
+ <STM32_PINMUX('D', 4, ANALOG)>, /* SDMMC3_D1 */
+ <STM32_PINMUX('D', 5, ANALOG)>, /* SDMMC3_D2 */
+ <STM32_PINMUX('D', 7, ANALOG)>, /* SDMMC3_D3 */
+ <STM32_PINMUX('G', 15, ANALOG)>, /* SDMMC3_CK */
+ <STM32_PINMUX('D', 0, ANALOG)>; /* SDMMC3_CMD */
};
&spi1 {
diff --git a/arch/arm/boot/dts/st/stm32mp151a-prtt1l.dtsi b/arch/arm/boot/dts/st/stm32mp151a-prtt1l.dtsi
index 3938d357e198..98a31c2b5d45 100644
--- a/arch/arm/boot/dts/st/stm32mp151a-prtt1l.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp151a-prtt1l.dtsi
@@ -69,30 +69,27 @@
status = "okay";
};
-&ethernet0_rmii_pins_a {
- pins1 {
- pinmux = <STM32_PINMUX('B', 12, AF11)>, /* ETH1_RMII_TXD0 */
- <STM32_PINMUX('B', 13, AF11)>, /* ETH1_RMII_TXD1 */
- <STM32_PINMUX('B', 11, AF11)>; /* ETH1_RMII_TX_EN */
- };
- pins2 {
- pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH1_RMII_RXD0 */
- <STM32_PINMUX('C', 5, AF11)>, /* ETH1_RMII_RXD1 */
- <STM32_PINMUX('A', 1, AF11)>, /* ETH1_RMII_REF_CLK input */
- <STM32_PINMUX('A', 7, AF11)>; /* ETH1_RMII_CRS_DV */
- };
+&{ethernet0_rmii_pins_a/pins1} {
+ pinmux = <STM32_PINMUX('B', 12, AF11)>, /* ETH1_RMII_TXD0 */
+ <STM32_PINMUX('B', 13, AF11)>, /* ETH1_RMII_TXD1 */
+ <STM32_PINMUX('B', 11, AF11)>; /* ETH1_RMII_TX_EN */
};
-&ethernet0_rmii_sleep_pins_a {
- pins1 {
- pinmux = <STM32_PINMUX('B', 12, ANALOG)>, /* ETH1_RMII_TXD0 */
- <STM32_PINMUX('B', 13, ANALOG)>, /* ETH1_RMII_TXD1 */
- <STM32_PINMUX('B', 11, ANALOG)>, /* ETH1_RMII_TX_EN */
- <STM32_PINMUX('C', 4, ANALOG)>, /* ETH1_RMII_RXD0 */
- <STM32_PINMUX('C', 5, ANALOG)>, /* ETH1_RMII_RXD1 */
- <STM32_PINMUX('A', 1, ANALOG)>, /* ETH1_RMII_REF_CLK */
- <STM32_PINMUX('A', 7, ANALOG)>; /* ETH1_RMII_CRS_DV */
- };
+&{ethernet0_rmii_pins_a/pins2} {
+ pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH1_RMII_RXD0 */
+ <STM32_PINMUX('C', 5, AF11)>, /* ETH1_RMII_RXD1 */
+ <STM32_PINMUX('A', 1, AF11)>, /* ETH1_RMII_REF_CLK input */
+ <STM32_PINMUX('A', 7, AF11)>; /* ETH1_RMII_CRS_DV */
+};
+
+&{ethernet0_rmii_sleep_pins_a/pins1} {
+ pinmux = <STM32_PINMUX('B', 12, ANALOG)>, /* ETH1_RMII_TXD0 */
+ <STM32_PINMUX('B', 13, ANALOG)>, /* ETH1_RMII_TXD1 */
+ <STM32_PINMUX('B', 11, ANALOG)>, /* ETH1_RMII_TX_EN */
+ <STM32_PINMUX('C', 4, ANALOG)>, /* ETH1_RMII_RXD0 */
+ <STM32_PINMUX('C', 5, ANALOG)>, /* ETH1_RMII_RXD1 */
+ <STM32_PINMUX('A', 1, ANALOG)>, /* ETH1_RMII_REF_CLK */
+ <STM32_PINMUX('A', 7, ANALOG)>; /* ETH1_RMII_CRS_DV */
};
&iwdg2 {
@@ -122,12 +119,11 @@
};
};
-&qspi_bk1_pins_a {
- pins1 {
- bias-pull-up;
- drive-push-pull;
- slew-rate = <1>;
- };
+&{qspi_bk1_pins_a/pins} {
+ /delete-property/ bias-disable;
+ bias-pull-up;
+ drive-push-pull;
+ slew-rate = <1>;
};
&rng1 {
@@ -147,22 +143,24 @@
status = "okay";
};
-&sdmmc1_b4_od_pins_a {
- pins1 {
- bias-pull-up;
- };
- pins2 {
- bias-pull-up;
- };
+&{sdmmc1_b4_od_pins_a/pins1} {
+ /delete-property/ bias-disable;
+ bias-pull-up;
};
-&sdmmc1_b4_pins_a {
- pins1 {
- bias-pull-up;
- };
- pins2 {
- bias-pull-up;
- };
+&{sdmmc1_b4_od_pins_a/pins2} {
+ /delete-property/ bias-disable;
+ bias-pull-up;
+};
+
+&{sdmmc1_b4_pins_a/pins1} {
+ /delete-property/ bias-disable;
+ bias-pull-up;
+};
+
+&{sdmmc1_b4_pins_a/pins2} {
+ /delete-property/ bias-disable;
+ bias-pull-up;
};
&uart4 {
@@ -175,34 +173,30 @@
status = "okay";
};
-&uart4_idle_pins_a {
- pins1 {
- pinmux = <STM32_PINMUX('B', 9, ANALOG)>; /* UART4_TX */
- };
- pins2 {
- pinmux = <STM32_PINMUX('B', 2, AF8)>; /* UART4_RX */
- bias-pull-up;
- };
+&{uart4_idle_pins_a/pins1} {
+ pinmux = <STM32_PINMUX('B', 9, ANALOG)>; /* UART4_TX */
};
-&uart4_pins_a {
- pins1 {
- pinmux = <STM32_PINMUX('B', 9, AF8)>; /* UART4_TX */
- bias-disable;
- drive-push-pull;
- slew-rate = <0>;
- };
- pins2 {
- pinmux = <STM32_PINMUX('B', 2, AF8)>; /* UART4_RX */
- bias-pull-up;
- };
+&{uart4_idle_pins_a/pins2} {
+ pinmux = <STM32_PINMUX('B', 2, AF8)>; /* UART4_RX */
+ /delete-property/ bias-disable;
+ bias-pull-up;
};
-&uart4_sleep_pins_a {
- pins {
- pinmux = <STM32_PINMUX('B', 9, ANALOG)>, /* UART4_TX */
- <STM32_PINMUX('B', 2, ANALOG)>; /* UART4_RX */
- };
+&{uart4_pins_a/pins1} {
+ pinmux = <STM32_PINMUX('B', 9, AF8)>; /* UART4_TX */
+ slew-rate = <0>;
+};
+
+&{uart4_pins_a/pins2} {
+ pinmux = <STM32_PINMUX('B', 2, AF8)>; /* UART4_RX */
+ /delete-property/ bias-disable;
+ bias-pull-up;
+};
+
+&{uart4_sleep_pins_a/pins} {
+ pinmux = <STM32_PINMUX('B', 9, ANALOG)>, /* UART4_TX */
+ <STM32_PINMUX('B', 2, ANALOG)>; /* UART4_RX */
};
&usbh_ehci {
diff --git a/arch/arm/boot/dts/st/stm32mp151a-prtt1s.dts b/arch/arm/boot/dts/st/stm32mp151a-prtt1s.dts
index ad25929e64e6..b6be61b159e7 100644
--- a/arch/arm/boot/dts/st/stm32mp151a-prtt1s.dts
+++ b/arch/arm/boot/dts/st/stm32mp151a-prtt1s.dts
@@ -36,18 +36,14 @@
};
};
-&i2c1_pins_a {
- pins {
- pinmux = <STM32_PINMUX('D', 12, AF5)>, /* I2C1_SCL */
- <STM32_PINMUX('D', 13, AF5)>; /* I2C1_SDA */
- };
+&{i2c1_pins_a/pins} {
+ pinmux = <STM32_PINMUX('D', 12, AF5)>, /* I2C1_SCL */
+ <STM32_PINMUX('D', 13, AF5)>; /* I2C1_SDA */
};
-&i2c1_sleep_pins_a {
- pins {
- pinmux = <STM32_PINMUX('D', 12, ANALOG)>, /* I2C1_SCL */
- <STM32_PINMUX('D', 13, ANALOG)>; /* I2C1_SDA */
- };
+&{i2c1_sleep_pins_a/pins} {
+ pinmux = <STM32_PINMUX('D', 12, ANALOG)>, /* I2C1_SCL */
+ <STM32_PINMUX('D', 13, ANALOG)>; /* I2C1_SDA */
};
&mdio0 {
diff --git a/arch/arm/boot/dts/st/stm32mp151c-mecio1r0.dts b/arch/arm/boot/dts/st/stm32mp151c-mecio1r0.dts
new file mode 100644
index 000000000000..a5ea1431c399
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32mp151c-mecio1r0.dts
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) Protonic Holland
+ * Author: David Jander <david@protonic.nl>
+ */
+/dts-v1/;
+
+#include "stm32mp151.dtsi"
+#include "stm32mp15xc.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxaa-pinctrl.dtsi"
+#include "stm32mp15x-mecio1-io.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "Protonic MECIO1r0";
+ compatible = "prt,mecio1r0", "st,stm32mp151";
+
+ led {
+ compatible = "gpio-leds";
+
+ led-0 {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_DEBUG;
+ gpios = <&gpioa 13 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DEBUG;
+ gpios = <&gpioa 14 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
+
+&clk_hse {
+ clock-frequency = <25000000>;
+};
+
+&ethernet0 {
+ assigned-clocks = <&rcc ETHCK_K>, <&rcc PLL3_Q>;
+ assigned-clock-parents = <&rcc PLL3_Q>;
+ assigned-clock-rates = <125000000>; /* Clock PLL3 to 625Mhz in tf-a. */
+ st,eth-clk-sel;
+};
diff --git a/arch/arm/boot/dts/st/stm32mp151c-mect1s.dts b/arch/arm/boot/dts/st/stm32mp151c-mect1s.dts
new file mode 100644
index 000000000000..a1b8c3646e98
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32mp151c-mect1s.dts
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) Protonic Holland
+ * Author: David Jander <david@protonic.nl>
+ */
+/dts-v1/;
+
+#include "stm32mp151.dtsi"
+#include "stm32mp15xc.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxaa-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "Protonic MECT1S";
+ compatible = "prt,mect1s", "st,stm32mp151";
+
+ chosen {
+ stdout-path = "serial0:1500000n8";
+ };
+
+ aliases {
+ serial0 = &uart4;
+ ethernet0 = &ethernet0;
+ ethernet1 = &ethernet1;
+ ethernet2 = &ethernet2;
+ ethernet3 = &ethernet3;
+ ethernet4 = &ethernet4;
+ };
+
+ v3v3: regulator-v3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ v5v: regulator-v5v {
+ compatible = "regulator-fixed";
+ regulator-name = "v5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ led {
+ compatible = "gpio-leds";
+
+ led-0 {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_DEBUG;
+ gpios = <&gpioa 13 GPIO_ACTIVE_LOW>;
+ };
+
+ led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DEBUG;
+ gpios = <&gpioa 14 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
+
+&clk_hse {
+ clock-frequency = <24000000>;
+};
+
+&clk_lse {
+ status = "disabled";
+};
+
+&ethernet0 {
+ status = "okay";
+ pinctrl-0 = <&ethernet0_rmii_pins_a>;
+ pinctrl-1 = <&ethernet0_rmii_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ phy-mode = "rmii";
+ max-speed = <100>;
+ st,eth-clk-sel;
+
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+
+ mdio0: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ };
+};
+
+&{ethernet0_rmii_pins_a/pins1} {
+ pinmux = <STM32_PINMUX('B', 12, AF11)>, /* ETH1_RMII_TXD0 */
+ <STM32_PINMUX('B', 13, AF11)>, /* ETH1_RMII_TXD1 */
+ <STM32_PINMUX('B', 11, AF11)>, /* ETH1_RMII_TX_EN */
+ <STM32_PINMUX('A', 2, AF11)>, /* ETH1_MDIO */
+ <STM32_PINMUX('C', 1, AF11)>; /* ETH1_MDC */
+};
+
+&{ethernet0_rmii_pins_a/pins2} {
+ pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH1_RMII_RXD0 */
+ <STM32_PINMUX('C', 5, AF11)>, /* ETH1_RMII_RXD1 */
+ <STM32_PINMUX('A', 1, AF11)>, /* ETH1_RMII_REF_CLK input */
+ <STM32_PINMUX('A', 7, AF11)>; /* ETH1_RMII_CRS_DV */
+};
+
+&{ethernet0_rmii_sleep_pins_a/pins1} {
+ pinmux = <STM32_PINMUX('B', 12, ANALOG)>, /* ETH1_RMII_TXD0 */
+ <STM32_PINMUX('B', 13, ANALOG)>, /* ETH1_RMII_TXD1 */
+ <STM32_PINMUX('B', 11, ANALOG)>, /* ETH1_RMII_TX_EN */
+ <STM32_PINMUX('C', 4, ANALOG)>, /* ETH1_RMII_RXD0 */
+ <STM32_PINMUX('C', 5, ANALOG)>, /* ETH1_RMII_RXD1 */
+ <STM32_PINMUX('A', 1, ANALOG)>, /* ETH1_RMII_REF_CLK */
+ <STM32_PINMUX('A', 7, ANALOG)>; /* ETH1_RMII_CRS_DV */
+};
+
+&mdio0 {
+ /* All this DP83TG720R PHYs can't be probed before switch@0 is
+ * probed so we need to use compatible with PHYid
+ */
+ /* TI DP83TG720R */
+ t1_phy0: ethernet-phy@8 {
+ compatible = "ethernet-phy-id2000.a284";
+ reg = <8>;
+ interrupts-extended = <&gpioi 5 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpioh 13 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10>;
+ reset-deassert-us = <35>;
+ };
+
+ /* TI DP83TG720R */
+ t1_phy1: ethernet-phy@c {
+ compatible = "ethernet-phy-id2000.a284";
+ reg = <12>;
+ interrupts-extended = <&gpioj 0 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpioh 14 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10>;
+ reset-deassert-us = <35>;
+ };
+
+ /* TI DP83TG720R */
+ t1_phy2: ethernet-phy@4 {
+ compatible = "ethernet-phy-id2000.a284";
+ reg = <4>;
+ interrupts-extended = <&gpioi 7 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpioh 15 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10>;
+ reset-deassert-us = <35>;
+ };
+
+ /* TI DP83TG720R */
+ t1_phy3: ethernet-phy@d {
+ compatible = "ethernet-phy-id2000.a284";
+ reg = <13>;
+ interrupts-extended = <&gpioi 15 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpioi 13 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <1000>;
+ };
+};
+
+&qspi {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qspi_clk_pins_a
+ &qspi_bk1_pins_a
+ &qspi_cs1_pins_a>;
+ pinctrl-1 = <&qspi_clk_sleep_pins_a
+ &qspi_bk1_sleep_pins_a
+ &qspi_cs1_sleep_pins_a>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+&{qspi_bk1_pins_a/pins} {
+ /delete-property/ bias-disable;
+ bias-pull-up;
+ drive-push-pull;
+ slew-rate = <1>;
+};
+
+&spi2 {
+ pinctrl-0 = <&spi2_pins_b>;
+ pinctrl-names = "default";
+ cs-gpios = <&gpioj 3 GPIO_ACTIVE_LOW>;
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+
+ switch@0 {
+ compatible = "nxp,sja1105q";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ spi-rx-delay-us = <1>;
+ spi-tx-delay-us = <1>;
+ spi-cpha;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet1: port@0 {
+ reg = <0>;
+ label = "t10";
+ phy-mode = "rgmii-id";
+ phy-handle = <&t1_phy0>;
+ };
+
+ ethernet2: port@1 {
+ reg = <1>;
+ label = "t11";
+ phy-mode = "rgmii-id";
+ phy-handle = <&t1_phy1>;
+ };
+
+ ethernet3: port@2 {
+ reg = <2>;
+ label = "t12";
+ phy-mode = "rgmii-id";
+ phy-handle = <&t1_phy2>;
+ };
+
+ ethernet4: port@3 {
+ reg = <3>;
+ label = "t13";
+ phy-mode = "rgmii-id";
+ phy-handle = <&t1_phy3>;
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "cpu";
+ ethernet = <&ethernet0>;
+ phy-mode = "rmii";
+
+ /* RGMII mode is not working properly, using RMII instead. */
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+ };
+ };
+};
+
+&uart4 {
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&uart4_pins_a>;
+ pinctrl-1 = <&uart4_sleep_pins_a>;
+ pinctrl-2 = <&uart4_idle_pins_a>;
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+};
+
+&usbh_ehci {
+ status = "okay";
+};
+
+&usbotg_hs {
+ dr_mode = "host";
+ pinctrl-0 = <&usbotg_hs_pins_a>;
+ pinctrl-names = "default";
+ phys = <&usbphyc_port1 0>;
+ phy-names = "usb2-phy";
+ vbus-supply = <&v5v>;
+ status = "okay";
+};
+
+&usbphyc {
+ status = "okay";
+};
+
+&usbphyc_port0 {
+ phy-supply = <&v3v3>;
+};
+
+&usbphyc_port1 {
+ phy-supply = <&v3v3>;
+};
diff --git a/arch/arm/boot/dts/st/stm32mp153c-mecio1r1.dts b/arch/arm/boot/dts/st/stm32mp153c-mecio1r1.dts
new file mode 100644
index 000000000000..16b814c19350
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32mp153c-mecio1r1.dts
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) Protonic Holland
+ * Author: David Jander <david@protonic.nl>
+ */
+/dts-v1/;
+
+#include "stm32mp153.dtsi"
+#include "stm32mp15xc.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxaa-pinctrl.dtsi"
+#include "stm32mp15x-mecio1-io.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "Protonic MECIO1r1";
+ compatible = "prt,mecio1r1", "st,stm32mp153";
+
+ led {
+ compatible = "gpio-leds";
+
+ led-0 {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_DEBUG;
+ gpios = <&gpioa 13 GPIO_ACTIVE_LOW>;
+ };
+
+ led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DEBUG;
+ gpios = <&gpioa 14 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
+
+&clk_hse {
+ clock-frequency = <24000000>;
+};
+
+&m_can1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&m_can1_pins_b>;
+ pinctrl-1 = <&m_can1_sleep_pins_b>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/st/stm32mp15x-mecio1-io.dtsi b/arch/arm/boot/dts/st/stm32mp15x-mecio1-io.dtsi
new file mode 100644
index 000000000000..915ba2526f45
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32mp15x-mecio1-io.dtsi
@@ -0,0 +1,527 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) Protonic Holland
+ * Author: David Jander <david@protonic.nl>
+ */
+
+#include "stm32mp15xc.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxaa-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ chosen {
+ stdout-path = "serial0:1500000n8";
+ };
+
+ aliases {
+ serial0 = &uart4;
+ ethernet0 = &ethernet0;
+ spi1 = &spi1;
+ spi2 = &spi2;
+ spi3 = &spi3;
+ spi4 = &spi4;
+ spi5 = &spi5;
+ spi6 = &spi6;
+ };
+
+ memory@c0000000 {
+ device_type = "memory";
+ reg = <0xC0000000 0x10000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mcuram2: mcuram2@10000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10000000 0x40000>;
+ no-map;
+ };
+
+ vdev0vring0: vdev0vring0@10040000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10040000 0x1000>;
+ no-map;
+ };
+
+ vdev0vring1: vdev0vring1@10041000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10041000 0x1000>;
+ no-map;
+ };
+
+ vdev0buffer: vdev0buffer@10042000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10042000 0x4000>;
+ no-map;
+ };
+
+ mcuram: mcuram@30000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x30000000 0x40000>;
+ no-map;
+ };
+
+ retram: retram@38000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x38000000 0x10000>;
+ no-map;
+ };
+ };
+
+ v3v3: regulator-v3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ v5v: regulator-v5v {
+ compatible = "regulator-fixed";
+ regulator-name = "v5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+};
+
+&adc {
+ /* ANA0, ANA1 are dedicated pins and don't need pinctrl: only in6. */
+ pinctrl-0 = <&adc12_pins_mecsbc>;
+ pinctrl-names = "default";
+ vdd-supply = <&v3v3>;
+ vdda-supply = <&v3v3>;
+ vref-supply = <&v3v3>;
+ status = "okay";
+};
+
+&adc1 {
+ status = "okay";
+
+ channel@0 {
+ reg = <0>;
+ /* 16.5 ck_cycles sampling time */
+ st,min-sample-time-ns = <5000>;
+ label = "p24v_stp";
+ };
+
+ channel@1 {
+ reg = <1>;
+ st,min-sample-time-ns = <5000>;
+ label = "p24v_hpdcm";
+ };
+
+ channel@2 {
+ reg = <2>;
+ st,min-sample-time-ns = <5000>;
+ label = "ain0";
+ };
+
+ channel@3 {
+ reg = <3>;
+ st,min-sample-time-ns = <5000>;
+ label = "hpdcm1_i2";
+ };
+
+ channel@5 {
+ reg = <5>;
+ st,min-sample-time-ns = <5000>;
+ label = "hpout1_i";
+ };
+
+ channel@6 {
+ reg = <6>;
+ st,min-sample-time-ns = <5000>;
+ label = "ain1";
+ };
+
+ channel@9 {
+ reg = <9>;
+ st,min-sample-time-ns = <5000>;
+ label = "hpout0_i";
+ };
+
+ channel@10 {
+ reg = <10>;
+ st,min-sample-time-ns = <5000>;
+ label = "phint0_ain";
+ };
+
+ channel@13 {
+ reg = <13>;
+ st,min-sample-time-ns = <5000>;
+ label = "phint1_ain";
+ };
+
+ channel@15 {
+ reg = <15>;
+ st,min-sample-time-ns = <5000>;
+ label = "hpdcm0_i1";
+ };
+
+ channel@16 {
+ reg = <16>;
+ st,min-sample-time-ns = <5000>;
+ label = "lsin";
+ };
+
+ channel@18 {
+ reg = <18>;
+ st,min-sample-time-ns = <5000>;
+ label = "hpdcm0_i2";
+ };
+
+ channel@19 {
+ reg = <19>;
+ st,min-sample-time-ns = <5000>;
+ label = "hpdcm1_i1";
+ };
+};
+
+&adc2 {
+ status = "okay";
+
+ channel@2 {
+ reg = <2>;
+ /* 16.5 ck_cycles sampling time */
+ st,min-sample-time-ns = <5000>;
+ label = "ain2";
+ };
+
+ channel@6 {
+ reg = <6>;
+ st,min-sample-time-ns = <5000>;
+ label = "ain3";
+ };
+};
+
+&ethernet0 {
+ status = "okay";
+ pinctrl-0 = <&ethernet0_rgmii_pins_x>;
+ pinctrl-1 = <&ethernet0_rgmii_sleep_pins_x>;
+ pinctrl-names = "default", "sleep";
+ phy-mode = "rgmii-id";
+ max-speed = <1000>;
+ phy-handle = <&phy0>;
+ st,eth-clk-sel;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0: ethernet-phy@8 {
+ reg = <8>;
+ interrupts-extended = <&gpiog 7 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpiog 10 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10>;
+ reset-deassert-us = <35>;
+ };
+ };
+};
+
+&gpiod {
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "STP_RESETN", "STP_ENABLEN", "HPOUT0", "HPOUT0_ALERTN";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog_d_mecsbc>;
+};
+
+&gpioe {
+ gpio-line-names = "HPOUT0_RESETN", "HPOUT1", "HPOUT1_ALERTN", "",
+ "", "", "HPOUT1_RESETN",
+ "LPOUT0", "LPOUT0_ALERTN", "GPOUT0_RESETN",
+ "LPOUT1", "LPOUT1_ALERTN", "GPOUT1_RESETN",
+ "LPOUT2", "LPOUT2_ALERTN", "GPOUT2_RESETN";
+};
+
+&gpiof {
+ gpio-line-names = "LPOUT3", "LPOUT3_ALERTN", "GPOUT3_RESETN",
+ "LPOUT4", "LPOUT4_ALERTN", "GPOUT4_RESETN",
+ "", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&gpiog {
+ gpio-line-names = "LPOUT5", "LPOUT5_ALERTN", "", "LPOUT5_RESETN",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&gpioh {
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "GPIO0_RESETN", "", "", "",
+ "", "", "", "";
+};
+
+&gpioi {
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "HPDCM0_SLEEPN", "HPDCM1_SLEEPN", "GPIO1_RESETN", "",
+ "", "", "", "";
+};
+
+&gpioj {
+ gpio-line-names = "HSIN10", "HSIN11", "HSIN12", "HSIN13",
+ "HSIN14", "HSIN15", "", "",
+ "", "", "", "",
+ "", "RTD_RESETN", "", "";
+};
+
+&gpiok {
+ gpio-line-names = "", "", "HSIN0", "HSIN1",
+ "HSIN2", "HSIN3", "HSIN4", "HSIN5";
+};
+
+&gpioz {
+ gpio-line-names = "", "", "", "HSIN6",
+ "HSIN7", "HSIN8", "HSIN9", "";
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins_a>;
+ pinctrl-1 = <&i2c2_sleep_pins_a>;
+ status = "okay";
+
+ gpio0: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "HSIN0_BIAS", "HSIN1_BIAS", "HSIN2_BIAS", "HSIN3_BIAS",
+ "", "", "HSIN_VREF0_LVL", "HSIN_VREF1_LVL",
+ "HSIN4_BIAS", "HSIN5_BIAS", "HSIN6_BIAS", "HSIN9_BIAS",
+ "", "", "", "";
+ };
+
+ gpio1: gpio@21 {
+ compatible = "ti,tca6416";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "HSIN8_BIAS", "HSIN9_BIAS", "HSIN10_BIAS", "HSIN11_BIAS",
+ "", "", "HSIN_VREF2_LVL", "HSIN_VREF3_LVL",
+ "HSIN12_BIAS", "HSIN13_BIAS", "HSIN14_BIAS", "HSIN15_BIAS",
+ "", "", "LSIN8_BIAS", "LSIN9_BIAS";
+ };
+};
+
+&qspi {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qspi_clk_pins_a
+ &qspi_bk1_pins_a
+ &qspi_cs1_pins_a>;
+ pinctrl-1 = <&qspi_clk_sleep_pins_a
+ &qspi_bk1_sleep_pins_a
+ &qspi_cs1_sleep_pins_a>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <104000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+&{qspi_bk1_pins_a/pins} {
+ pinmux = <STM32_PINMUX('F', 8, AF10)>, /* QSPI_BK1_IO0 */
+ <STM32_PINMUX('F', 9, AF10)>, /* QSPI_BK1_IO1 */
+ <STM32_PINMUX('F', 7, AF9)>, /* QSPI_BK1_IO2 */
+ <STM32_PINMUX('F', 6, AF9)>; /* QSPI_BK1_IO3 */
+ /delete-property/ bias-disable;
+ bias-pull-up;
+};
+
+&timers1 {
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+
+ hpdcm0_pwm: pwm {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pwm1_pins_mecio1>;
+ pinctrl-1 = <&pwm1_sleep_pins_mecio1>;
+ status = "okay";
+ };
+};
+
+&timers8 {
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+
+ hpdcm1_pwm: pwm {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pwm8_pins_mecio1>;
+ pinctrl-1 = <&pwm8_sleep_pins_mecio1>;
+ status = "okay";
+ };
+};
+
+&uart4 {
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&uart4_pins_a>;
+ pinctrl-1 = <&uart4_sleep_pins_a>;
+ pinctrl-2 = <&uart4_idle_pins_a>;
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+};
+
+&{uart4_pins_a/pins1} {
+ pinmux = <STM32_PINMUX('B', 9, AF8)>; /* UART4_TX */
+};
+
+&{uart4_pins_a/pins2} {
+ pinmux = <STM32_PINMUX('B', 2, AF8)>; /* UART4_RX */
+ /delete-property/ bias-disable;
+ bias-pull-up;
+};
+
+&usbotg_hs {
+ dr_mode = "host";
+ pinctrl-0 = <&usbotg_hs_pins_a>;
+ pinctrl-names = "default";
+ phys = <&usbphyc_port1 0>;
+ phy-names = "usb2-phy";
+ vbus-supply = <&v5v>;
+ status = "okay";
+};
+
+&usbphyc {
+ status = "okay";
+};
+
+&usbphyc_port0 {
+ phy-supply = <&v3v3>;
+};
+
+&usbphyc_port1 {
+ phy-supply = <&v3v3>;
+};
+
+&pinctrl {
+ adc12_pins_mecsbc: adc12-ain-mecsbc-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 11, ANALOG)>, /* ADC1_INP2 */
+ <STM32_PINMUX('F', 12, ANALOG)>, /* ADC1_INP6 */
+ <STM32_PINMUX('F', 13, ANALOG)>, /* ADC2_INP2 */
+ <STM32_PINMUX('F', 14, ANALOG)>, /* ADC2_INP6 */
+ <STM32_PINMUX('A', 0, ANALOG)>, /* ADC1_INP16 */
+ <STM32_PINMUX('A', 3, ANALOG)>, /* ADC1_INP15 */
+ <STM32_PINMUX('A', 4, ANALOG)>, /* ADC1_INP18 */
+ <STM32_PINMUX('A', 5, ANALOG)>, /* ADC1_INP19 */
+ <STM32_PINMUX('A', 6, ANALOG)>, /* ADC1_INP3 */
+ <STM32_PINMUX('B', 0, ANALOG)>, /* ADC1_INP9 */
+ <STM32_PINMUX('B', 1, ANALOG)>, /* ADC1_INP5 */
+ <STM32_PINMUX('C', 0, ANALOG)>, /* ADC1_INP10 */
+ <STM32_PINMUX('C', 3, ANALOG)>; /* ADC1_INP13 */
+ };
+ };
+
+ pinctrl_hog_d_mecsbc: hog-d-0 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 12, GPIO)>; /* STP_RESETn */
+ bias-pull-up;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ pwm1_pins_mecio1: pwm1-mecio1-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 8, AF1)>, /* TIM1_CH1 */
+ <STM32_PINMUX('A', 8, AF1)>; /* TIM1_CH2 */
+ bias-pull-down;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ pwm1_sleep_pins_mecio1: pwm1-sleep-mecio1-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 8, ANALOG)>, /* TIM1_CH1 */
+ <STM32_PINMUX('A', 8, ANALOG)>; /* TIM1_CH2 */
+ };
+ };
+
+ pwm8_pins_mecio1: pwm8-mecio1-0 {
+ pins {
+ pinmux = <STM32_PINMUX('I', 5, AF3)>, /* TIM8_CH1 */
+ <STM32_PINMUX('I', 6, AF3)>; /* TIM8_CH2 */
+ bias-pull-down;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ pwm8_sleep_pins_mecio1: pwm8-sleep-mecio1-0 {
+ pins {
+ pinmux = <STM32_PINMUX('I', 5, ANALOG)>, /* TIM8_CH1 */
+ <STM32_PINMUX('I', 6, ANALOG)>; /* TIM8_CH2 */
+ };
+ };
+
+ ethernet0_rgmii_pins_x: rgmii-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 5, AF11)>, /* ETH_RGMII_CLK125 */
+ <STM32_PINMUX('G', 4, AF11)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('B', 12, AF11)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('B', 13, AF11)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('C', 2, AF11)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('B', 8, AF11)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('B', 11, AF11)>, /* ETH_RGMII_TX_CTL */
+ <STM32_PINMUX('C', 1, AF11)>; /* ETH_MDC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <3>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('A', 2, AF11)>; /* ETH_MDIO */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins3 {
+ pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('C', 5, AF11)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('H', 6, AF11)>, /* ETH_RGMII_RXD2 */
+ <STM32_PINMUX('H', 7, AF11)>, /* ETH_RGMII_RXD3 */
+ <STM32_PINMUX('A', 1, AF11)>, /* ETH_RGMII_RX_CLK */
+ <STM32_PINMUX('A', 7, AF11)>; /* ETH_RGMII_RX_CTL */
+ bias-disable;
+ };
+ };
+
+ ethernet0_rgmii_sleep_pins_x: rgmii-sleep-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 5, ANALOG)>, /* ETH_RGMII_CLK125 */
+ <STM32_PINMUX('G', 4, ANALOG)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('B', 12, ANALOG)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('B', 13, ANALOG)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('C', 2, ANALOG)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('B', 8, ANALOG)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('B', 11, ANALOG)>, /* ETH_RGMII_TX_CTL */
+ <STM32_PINMUX('A', 2, ANALOG)>, /* ETH_MDIO */
+ <STM32_PINMUX('C', 1, ANALOG)>, /* ETH_MDC */
+ <STM32_PINMUX('C', 4, ANALOG)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('C', 5, ANALOG)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('H', 6, ANALOG)>, /* ETH_RGMII_RXD2 */
+ <STM32_PINMUX('H', 7, ANALOG)>, /* ETH_RGMII_RXD3 */
+ <STM32_PINMUX('A', 1, ANALOG)>, /* ETH_RGMII_RX_CLK */
+ <STM32_PINMUX('A', 7, ANALOG)>; /* ETH_RGMII_RX_CTL */
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi
index 466d9701add0..171d7c7658fa 100644
--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi
@@ -192,15 +192,11 @@
sgtl5000_tx_endpoint: endpoint@0 {
reg = <0>;
remote-endpoint = <&sai2a_endpoint>;
- frame-master = <&sgtl5000_tx_endpoint>;
- bitclock-master = <&sgtl5000_tx_endpoint>;
};
sgtl5000_rx_endpoint: endpoint@1 {
reg = <1>;
remote-endpoint = <&sai2b_endpoint>;
- frame-master = <&sgtl5000_rx_endpoint>;
- bitclock-master = <&sgtl5000_rx_endpoint>;
};
};
@@ -245,10 +241,12 @@
sai2a_port: port {
sai2a_endpoint: endpoint {
remote-endpoint = <&sgtl5000_tx_endpoint>;
+ bitclock-master;
dai-format = "i2s";
- mclk-fs = <512>;
dai-tdm-slot-num = <2>;
dai-tdm-slot-width = <16>;
+ frame-master;
+ mclk-fs = <256>;
};
};
};
@@ -263,10 +261,12 @@
sai2b_port: port {
sai2b_endpoint: endpoint {
remote-endpoint = <&sgtl5000_rx_endpoint>;
+ bitclock-master;
dai-format = "i2s";
- mclk-fs = <512>;
dai-tdm-slot-num = <2>;
dai-tdm-slot-width = <16>;
+ frame-master;
+ mclk-fs = <256>;
};
};
};
diff --git a/arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi b/arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi
index 2d0216840ff5..a0fb431aec84 100644
--- a/arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi
@@ -221,10 +221,14 @@
reg = <0x50>;
vcc-supply = <&ldo4_reg>;
- #address-cells = <1>;
- #size-cells = <1>;
- baseboard_data: baseboard_data@0 {
- reg = <0 0x100>;
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ baseboard_data: baseboard_data@0 {
+ reg = <0 0x100>;
+ };
};
};
};
@@ -239,40 +243,60 @@
cape_eeprom0: cape_eeprom0@54 {
compatible = "atmel,24c256";
reg = <0x54>;
- #address-cells = <1>;
- #size-cells = <1>;
- cape0_data: cape_data@0 {
- reg = <0 0x100>;
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cape0_data: cape_data@0 {
+ reg = <0 0x100>;
+ };
};
};
cape_eeprom1: cape_eeprom1@55 {
compatible = "atmel,24c256";
reg = <0x55>;
- #address-cells = <1>;
- #size-cells = <1>;
- cape1_data: cape_data@0 {
- reg = <0 0x100>;
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cape1_data: cape_data@0 {
+ reg = <0 0x100>;
+ };
};
};
cape_eeprom2: cape_eeprom2@56 {
compatible = "atmel,24c256";
reg = <0x56>;
- #address-cells = <1>;
- #size-cells = <1>;
- cape2_data: cape_data@0 {
- reg = <0 0x100>;
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cape2_data: cape_data@0 {
+ reg = <0 0x100>;
+ };
};
};
cape_eeprom3: cape_eeprom3@57 {
compatible = "atmel,24c256";
reg = <0x57>;
- #address-cells = <1>;
- #size-cells = <1>;
- cape3_data: cape_data@0 {
- reg = <0 0x100>;
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cape3_data: cape_data@0 {
+ reg = <0 0x100>;
+ };
};
};
};
@@ -385,7 +409,7 @@
/* Support GPIO reset on revision C3 boards */
reset-gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
reset-assert-us = <300>;
- reset-deassert-us = <6500>;
+ reset-deassert-us = <13000>;
};
};
diff --git a/arch/arm/boot/dts/ti/omap/am335x-boneblue.dts b/arch/arm/boot/dts/ti/omap/am335x-boneblue.dts
index 801399702547..8878da773d67 100644
--- a/arch/arm/boot/dts/ti/omap/am335x-boneblue.dts
+++ b/arch/arm/boot/dts/ti/omap/am335x-boneblue.dts
@@ -317,10 +317,14 @@
compatible = "atmel,24c256";
reg = <0x50>;
- #address-cells = <1>;
- #size-cells = <1>;
- baseboard_data: baseboard_data@0 {
- reg = <0 0x100>;
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ baseboard_data: baseboard_data@0 {
+ reg = <0 0x100>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/ti/omap/am335x-nano.dts b/arch/arm/boot/dts/ti/omap/am335x-nano.dts
index 26b5510cb3d1..56929059f5af 100644
--- a/arch/arm/boot/dts/ti/omap/am335x-nano.dts
+++ b/arch/arm/boot/dts/ti/omap/am335x-nano.dts
@@ -231,7 +231,7 @@
};
temperature-sensor@48 {
- compatible = "lm75";
+ compatible = "national,lm75";
reg = <0x48>;
};
diff --git a/arch/arm/boot/dts/ti/omap/am335x-regor.dtsi b/arch/arm/boot/dts/ti/omap/am335x-regor.dtsi
index 625db3bcd365..287d209a0ea9 100644
--- a/arch/arm/boot/dts/ti/omap/am335x-regor.dtsi
+++ b/arch/arm/boot/dts/ti/omap/am335x-regor.dtsi
@@ -5,6 +5,9 @@
*
*/
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/am33xx.h>
+
/ {
model = "Phytec AM335x phyBOARD-REGOR";
compatible = "phytec,am335x-regor", "phytec,am335x-phycore-som", "ti,am33xx";
@@ -188,7 +191,7 @@
pinctrl-single,pins = <
AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
};
@@ -198,4 +201,9 @@
pinctrl-0 = <&uart1_rs485_pins>;
status = "okay";
linux,rs485-enabled-at-boot-time;
+ /*
+ * un-intuitively, yet with the default (active-high),
+ * am335x RTS is high on idle and gets low on active !
+ */
+ rs485-rts-active-low;
};
diff --git a/arch/arm/boot/dts/ti/omap/am335x-wega.dtsi b/arch/arm/boot/dts/ti/omap/am335x-wega.dtsi
index cb27ff464dbe..d0c290d7d062 100644
--- a/arch/arm/boot/dts/ti/omap/am335x-wega.dtsi
+++ b/arch/arm/boot/dts/ti/omap/am335x-wega.dtsi
@@ -14,7 +14,7 @@
simple-audio-card,format = "i2s";
simple-audio-card,bitclock-master = <&sound_iface_main>;
simple-audio-card,frame-master = <&sound_iface_main>;
- simple-audio-card,mclk-fs = <32>;
+ simple-audio-card,mclk-fs = <512>;
simple-audio-card,widgets =
"Line", "Line In",
"Line", "Line Out",
@@ -27,13 +27,12 @@
"LINE1L", "Line In",
"LINE1R", "Line In";
- simple-audio-card,cpu {
+ sound_iface_main: simple-audio-card,cpu {
sound-dai = <&mcasp0>;
};
- sound_iface_main: simple-audio-card,codec {
+ simple-audio-card,codec {
sound-dai = <&tlv320aic3007>;
- clocks = <&mcasp0_fck>;
};
};
diff --git a/arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi b/arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi
index dfb1fbafe3aa..33b02e05ce82 100644
--- a/arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi
+++ b/arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi
@@ -97,9 +97,9 @@
status = "okay";
clock-frequency = <400000>;
- stlm75@49 {
+ temperature-sensor@49 {
status = "okay";
- compatible = "lm75";
+ compatible = "st,stlm75";
reg = <0x49>;
};
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index 6eabe2313c9a..2022a7fca0f9 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -16,6 +16,7 @@ CONFIG_ARCH_AT91=y
CONFIG_SOC_AT91RM9200=y
CONFIG_SOC_AT91SAM9=y
CONFIG_SOC_SAM9X60=y
+CONFIG_SOC_SAM9X7=y
# CONFIG_ATMEL_CLOCKSOURCE_PIT is not set
CONFIG_AEABI=y
CONFIG_UACCESS_WITH_MEMCPY=y
diff --git a/arch/arm/configs/hisi_defconfig b/arch/arm/configs/hisi_defconfig
index 0376a65e8bc1..e19c1039fb93 100644
--- a/arch/arm/configs/hisi_defconfig
+++ b/arch/arm/configs/hisi_defconfig
@@ -43,6 +43,7 @@ CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 62734530a3d6..9a5f5c439b87 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -413,6 +413,7 @@ CONFIG_I2C_AT91=m
CONFIG_I2C_BCM2835=y
CONFIG_I2C_CADENCE=y
CONFIG_I2C_DAVINCI=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_DIGICOLOR=m
CONFIG_I2C_EMEV2=m
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index f2ca5c9131b5..e1cb170c2bf0 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -277,6 +277,7 @@ CONFIG_HW_RANDOM=y
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_MUX_PCA954x=m
CONFIG_I2C_MUX_PINCTRL=m
+CONFIG_I2C_DESIGNWARE_CORE=m
CONFIG_I2C_DESIGNWARE_PLATFORM=m
CONFIG_I2C_GPIO=y
CONFIG_I2C_PXA_SLAVE=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 56925adfe842..0e380e450a62 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -24,6 +24,7 @@ CONFIG_CPUFREQ_DT=y
CONFIG_VFP=y
CONFIG_NEON=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_CMA=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index e82c3866b810..294906c8f16e 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -83,6 +83,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=2
CONFIG_SERIAL_8250_DW=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
CONFIG_SPI_CADENCE_QUADSPI=y
diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig
index c8128a6180e7..a8f992fdb30d 100644
--- a/arch/arm/configs/spear13xx_defconfig
+++ b/arch/arm/configs/spear13xx_defconfig
@@ -62,6 +62,7 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_RAW_DRIVER=y
CONFIG_I2C=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
diff --git a/arch/arm/configs/spear3xx_defconfig b/arch/arm/configs/spear3xx_defconfig
index 97ea2e9a6f07..8dc5a388759c 100644
--- a/arch/arm/configs/spear3xx_defconfig
+++ b/arch/arm/configs/spear3xx_defconfig
@@ -42,6 +42,7 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_RAW_DRIVER=y
CONFIG_I2C=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
diff --git a/arch/arm/configs/spear6xx_defconfig b/arch/arm/configs/spear6xx_defconfig
index a7a3413ac968..4e9e1a6ff381 100644
--- a/arch/arm/configs/spear6xx_defconfig
+++ b/arch/arm/configs/spear6xx_defconfig
@@ -33,6 +33,7 @@ CONFIG_STMMAC_ETH=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_I2C=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 847b7a003356..5ff49a5e9afc 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -166,10 +166,9 @@ config CRYPTO_AES_ARM
config CRYPTO_AES_ARM_BS
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (bit-sliced NEON)"
depends on KERNEL_MODE_NEON
+ select CRYPTO_AES_ARM
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
- select CRYPTO_AES
- select CRYPTO_CBC
select CRYPTO_SIMD
help
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
@@ -183,8 +182,15 @@ config CRYPTO_AES_ARM_BS
Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
and for XTS mode encryption, CBC and XTS mode decryption speedup is
around 25%. (CBC encryption speed is not affected by this driver.)
- This implementation does not rely on any lookup tables so it is
- believed to be invulnerable to cache timing attacks.
+
+ The bit sliced AES code does not use lookup tables, so it is believed
+ to be invulnerable to cache timing attacks. However, since the bit
+ sliced AES code cannot process single blocks efficiently, in certain
+ cases table-based code with some countermeasures against cache timing
+ attacks will still be used as a fallback method; specifically CBC
+ encryption (not CBC decryption), the encryption of XTS tweaks, XTS
+ ciphertext stealing when the message isn't a multiple of 16 bytes, and
+ CTR when invoked in a context in which NEON instructions are unusable.
config CRYPTO_AES_ARM_CE
tristate "Ciphers: AES, modes: ECB/CBC/CTS/CTR/XTS (ARMv8 Crypto Extensions)"
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index b668c97663ec..f5b66f4cf45d 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -711,7 +711,7 @@ static int __init aes_init(void)
algname = aes_algs[i].base.cra_name + 2;
drvname = aes_algs[i].base.cra_driver_name + 2;
basename = aes_algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(algname, drvname, basename);
+ simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto unregister_simds;
diff --git a/arch/arm/crypto/aes-cipher-glue.c b/arch/arm/crypto/aes-cipher-glue.c
index 6dfaef2d8f91..29efb7833960 100644
--- a/arch/arm/crypto/aes-cipher-glue.c
+++ b/arch/arm/crypto/aes-cipher-glue.c
@@ -9,9 +9,10 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
+#include "aes-cipher.h"
-asmlinkage void __aes_arm_encrypt(u32 *rk, int rounds, const u8 *in, u8 *out);
-asmlinkage void __aes_arm_decrypt(u32 *rk, int rounds, const u8 *in, u8 *out);
+EXPORT_SYMBOL_GPL(__aes_arm_encrypt);
+EXPORT_SYMBOL_GPL(__aes_arm_decrypt);
static void aes_arm_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
diff --git a/arch/arm/crypto/aes-cipher.h b/arch/arm/crypto/aes-cipher.h
new file mode 100644
index 000000000000..d5db2b87eb69
--- /dev/null
+++ b/arch/arm/crypto/aes-cipher.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef ARM_CRYPTO_AES_CIPHER_H
+#define ARM_CRYPTO_AES_CIPHER_H
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds,
+ const u8 *in, u8 *out);
+asmlinkage void __aes_arm_decrypt(const u32 rk[], int rounds,
+ const u8 *in, u8 *out);
+
+#endif /* ARM_CRYPTO_AES_CIPHER_H */
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index 201eb35dde37..f6be80b5938b 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -9,24 +9,22 @@
#include <asm/simd.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
#include <linux/module.h>
+#include "aes-cipher.h"
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_DESCRIPTION("Bit sliced AES using NEON instructions");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("ecb(aes)");
-MODULE_ALIAS_CRYPTO("cbc(aes)-all");
+MODULE_ALIAS_CRYPTO("cbc(aes)");
MODULE_ALIAS_CRYPTO("ctr(aes)");
MODULE_ALIAS_CRYPTO("xts(aes)");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
-
asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds);
asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
@@ -52,13 +50,13 @@ struct aesbs_ctx {
struct aesbs_cbc_ctx {
struct aesbs_ctx key;
- struct crypto_skcipher *enc_tfm;
+ struct crypto_aes_ctx fallback;
};
struct aesbs_xts_ctx {
struct aesbs_ctx key;
- struct crypto_cipher *cts_tfm;
- struct crypto_cipher *tweak_tfm;
+ struct crypto_aes_ctx fallback;
+ struct crypto_aes_ctx tweak_key;
};
struct aesbs_ctr_ctx {
@@ -129,37 +127,49 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_aes_ctx rk;
int err;
- err = aes_expandkey(&rk, in_key, key_len);
+ err = aes_expandkey(&ctx->fallback, in_key, key_len);
if (err)
return err;
ctx->key.rounds = 6 + key_len / 4;
kernel_neon_begin();
- aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
+ aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
kernel_neon_end();
- memzero_explicit(&rk, sizeof(rk));
- return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len);
+ return 0;
}
static int cbc_encrypt(struct skcipher_request *req)
{
- struct skcipher_request *subreq = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
- skcipher_request_set_tfm(subreq, ctx->enc_tfm);
- skcipher_request_set_callback(subreq,
- skcipher_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
+ err = skcipher_walk_virt(&walk, req, false);
- return crypto_skcipher_encrypt(subreq);
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ u8 *prev = walk.iv;
+
+ do {
+ crypto_xor_cpy(dst, src, prev, AES_BLOCK_SIZE);
+ __aes_arm_encrypt(ctx->fallback.key_enc,
+ ctx->key.rounds, dst, dst);
+ prev = dst;
+ src += AES_BLOCK_SIZE;
+ dst += AES_BLOCK_SIZE;
+ nbytes -= AES_BLOCK_SIZE;
+ } while (nbytes >= AES_BLOCK_SIZE);
+ memcpy(walk.iv, prev, AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+ return err;
}
static int cbc_decrypt(struct skcipher_request *req)
@@ -190,30 +200,6 @@ static int cbc_decrypt(struct skcipher_request *req)
return err;
}
-static int cbc_init(struct crypto_skcipher *tfm)
-{
- struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned int reqsize;
-
- ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->enc_tfm))
- return PTR_ERR(ctx->enc_tfm);
-
- reqsize = sizeof(struct skcipher_request);
- reqsize += crypto_skcipher_reqsize(ctx->enc_tfm);
- crypto_skcipher_set_reqsize(tfm, reqsize);
-
- return 0;
-}
-
-static void cbc_exit(struct crypto_skcipher *tfm)
-{
- struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_skcipher(ctx->enc_tfm);
-}
-
static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -271,16 +257,8 @@ static int ctr_encrypt(struct skcipher_request *req)
static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
{
struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned long flags;
-
- /*
- * Temporarily disable interrupts to avoid races where
- * cachelines are evicted when the CPU is interrupted
- * to do something else.
- */
- local_irq_save(flags);
- aes_encrypt(&ctx->fallback, dst, src);
- local_irq_restore(flags);
+
+ __aes_arm_encrypt(ctx->fallback.key_enc, ctx->key.rounds, src, dst);
}
static int ctr_encrypt_sync(struct skcipher_request *req)
@@ -302,45 +280,23 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
return err;
key_len /= 2;
- err = crypto_cipher_setkey(ctx->cts_tfm, in_key, key_len);
+ err = aes_expandkey(&ctx->fallback, in_key, key_len);
if (err)
return err;
- err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len);
+ err = aes_expandkey(&ctx->tweak_key, in_key + key_len, key_len);
if (err)
return err;
return aesbs_setkey(tfm, in_key, key_len);
}
-static int xts_init(struct crypto_skcipher *tfm)
-{
- struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->cts_tfm))
- return PTR_ERR(ctx->cts_tfm);
-
- ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->tweak_tfm))
- crypto_free_cipher(ctx->cts_tfm);
-
- return PTR_ERR_OR_ZERO(ctx->tweak_tfm);
-}
-
-static void xts_exit(struct crypto_skcipher *tfm)
-{
- struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_cipher(ctx->tweak_tfm);
- crypto_free_cipher(ctx->cts_tfm);
-}
-
static int __xts_crypt(struct skcipher_request *req, bool encrypt,
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[], int))
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const int rounds = ctx->key.rounds;
int tail = req->cryptlen % AES_BLOCK_SIZE;
struct skcipher_request subreq;
u8 buf[2 * AES_BLOCK_SIZE];
@@ -364,7 +320,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
if (err)
return err;
- crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
+ __aes_arm_encrypt(ctx->tweak_key.key_enc, rounds, walk.iv, walk.iv);
while (walk.nbytes >= AES_BLOCK_SIZE) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -378,7 +334,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
kernel_neon_begin();
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
- ctx->key.rounds, blocks, walk.iv, reorder_last_tweak);
+ rounds, blocks, walk.iv, reorder_last_tweak);
kernel_neon_end();
err = skcipher_walk_done(&walk,
walk.nbytes - blocks * AES_BLOCK_SIZE);
@@ -396,9 +352,9 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
if (encrypt)
- crypto_cipher_encrypt_one(ctx->cts_tfm, buf, buf);
+ __aes_arm_encrypt(ctx->fallback.key_enc, rounds, buf, buf);
else
- crypto_cipher_decrypt_one(ctx->cts_tfm, buf, buf);
+ __aes_arm_decrypt(ctx->fallback.key_dec, rounds, buf, buf);
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
@@ -439,8 +395,7 @@ static struct skcipher_alg aes_algs[] = { {
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL |
- CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -449,8 +404,6 @@ static struct skcipher_alg aes_algs[] = { {
.setkey = aesbs_cbc_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- .init = cbc_init,
- .exit = cbc_exit,
}, {
.base.cra_name = "__ctr(aes)",
.base.cra_driver_name = "__ctr-aes-neonbs",
@@ -500,8 +453,6 @@ static struct skcipher_alg aes_algs[] = { {
.setkey = aesbs_xts_setkey,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
- .init = xts_init,
- .exit = xts_exit,
} };
static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
@@ -540,7 +491,7 @@ static int __init aes_init(void)
algname = aes_algs[i].base.cra_name + 2;
drvname = aes_algs[i].base.cra_driver_name + 2;
basename = aes_algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(algname, drvname, basename);
+ simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto unregister_simds;
diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
index a41b503b7dcd..f63ba8986b24 100644
--- a/arch/arm/include/asm/arm_pmuv3.h
+++ b/arch/arm/include/asm/arm_pmuv3.h
@@ -127,6 +127,12 @@ static inline u32 read_pmuver(void)
return (dfr0 >> 24) & 0xf;
}
+static inline bool pmuv3_has_icntr(void)
+{
+ /* FEAT_PMUv3_ICNTR not accessible for 32-bit */
+ return false;
+}
+
static inline void write_pmcr(u32 val)
{
write_sysreg(val, PMCR);
@@ -152,6 +158,13 @@ static inline u64 read_pmccntr(void)
return read_sysreg(PMCCNTR);
}
+static inline void write_pmicntr(u64 val) {}
+
+static inline u64 read_pmicntr(void)
+{
+ return 0;
+}
+
static inline void write_pmcntenset(u32 val)
{
write_sysreg(val, PMCNTENSET);
@@ -177,6 +190,13 @@ static inline void write_pmccfiltr(u32 val)
write_sysreg(val, PMCCFILTR);
}
+static inline void write_pmicfiltr(u64 val) {}
+
+static inline u64 read_pmicfiltr(void)
+{
+ return 0;
+}
+
static inline void write_pmovsclr(u32 val)
{
write_sysreg(val, PMOVSR);
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h
index bd6fdb4b922d..9d8863537aa5 100644
--- a/arch/arm/include/asm/cpu.h
+++ b/arch/arm/include/asm/cpu.h
@@ -11,7 +11,6 @@
#include <linux/cpu.h>
struct cpuinfo_arm {
- struct cpu cpu;
u32 cpuid;
#ifdef CONFIG_SMP
unsigned int loops_per_jiffy;
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 82ec1ccf1fee..2ce4c5683e6d 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -24,7 +24,7 @@ struct dma_iommu_mapping {
};
struct dma_iommu_mapping *
-arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size);
+arm_iommu_create_mapping(struct device *dev, dma_addr_t base, u64 size);
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
diff --git a/arch/arm/include/asm/hypervisor.h b/arch/arm/include/asm/hypervisor.h
index bd61502b9715..8a648e506540 100644
--- a/arch/arm/include/asm/hypervisor.h
+++ b/arch/arm/include/asm/hypervisor.h
@@ -7,4 +7,6 @@
void kvm_init_hyp_services(void);
bool kvm_arm_hyp_service_available(u32 func_id);
+static inline void kvm_arch_init_hyp_services(void) { };
+
#endif
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index dfab3e982cbf..944fc9955528 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -106,6 +106,11 @@
/*
* TTBCR register bits.
+ *
+ * The ORGN0 and IRGN0 bits enables different forms of caching when
+ * walking the translation table. Clearing these bits (which is claimed
+ * to be the reset default) means "normal memory, [outer|inner]
+ * non-cacheable"
*/
#define TTBCR_EAE (1 << 31)
#define TTBCR_IMP (1 << 30)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 7b33b157fca0..e6a857bf0ce6 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1201,20 +1201,10 @@ void __init setup_arch(char **cmdline_p)
mdesc->init_early();
}
-
-static int __init topology_init(void)
+bool arch_cpu_is_hotpluggable(int num)
{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
- cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
- register_cpu(&cpuinfo->cpu, cpu);
- }
-
- return 0;
+ return platform_can_hotplug_cpu(num);
}
-subsys_initcall(topology_init);
#ifdef CONFIG_HAVE_PROC_CPU
static int __init proc_cpu_init(void)
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index d00f4040a9f5..f5781ff54a5c 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -239,19 +239,19 @@ asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
struct flock64 flock;
long err = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
goto out;
switch (cmd) {
case F_GETLK64:
case F_OFD_GETLK:
- err = security_file_fcntl(f.file, cmd, arg);
+ err = security_file_fcntl(fd_file(f), cmd, arg);
if (err)
break;
err = get_oabi_flock(&flock, argp);
if (err)
break;
- err = fcntl_getlk64(f.file, cmd, &flock);
+ err = fcntl_getlk64(fd_file(f), cmd, &flock);
if (!err)
err = put_oabi_flock(&flock, argp);
break;
@@ -259,13 +259,13 @@ asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
case F_SETLKW64:
case F_OFD_SETLK:
case F_OFD_SETLKW:
- err = security_file_fcntl(f.file, cmd, arg);
+ err = security_file_fcntl(fd_file(f), cmd, arg);
if (err)
break;
err = get_oabi_flock(&flock, argp);
if (err)
break;
- err = fcntl_setlk64(fd, f.file, cmd, &flock);
+ err = fcntl_setlk64(fd, fd_file(f), cmd, &flock);
break;
default:
err = sys_fcntl64(fd, cmd, arg);
diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
index 522510baed49..cf57fca97908 100644
--- a/arch/arm/lib/xor-neon.c
+++ b/arch/arm/lib/xor-neon.c
@@ -8,6 +8,7 @@
#include <linux/raid/xor.h>
#include <linux/module.h>
+MODULE_DESCRIPTION("NEON accelerated XOR implementation");
MODULE_LICENSE("GPL");
#ifndef __ARM_NEON__
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index a8c022b4c053..344f5305f69a 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -141,11 +141,27 @@ config SOC_SAM9X60
help
Select this if you are using Microchip's SAM9X60 SoC
+config SOC_SAM9X7
+ bool "SAM9X7"
+ depends on ARCH_MULTI_V5
+ select ATMEL_AIC5_IRQ
+ select ATMEL_PM if PM
+ select CPU_ARM926T
+ select HAVE_AT91_USB_CLK
+ select HAVE_AT91_GENERATED_CLK
+ select HAVE_AT91_SAM9X60_PLL
+ select MEMORY
+ select PINCTRL_AT91
+ select SOC_SAM_V4_V5
+ select SRAM if PM
+ help
+ Select this if you are using Microchip's SAM9X7 SoC
+
comment "Clocksource driver selection"
config ATMEL_CLOCKSOURCE_PIT
bool "Periodic Interval Timer (PIT) support"
- depends on SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAMA5
+ depends on SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAM9X7 || SOC_SAMA5
default SOC_AT91SAM9 || SOC_SAMA5
select ATMEL_PIT
help
@@ -155,7 +171,7 @@ config ATMEL_CLOCKSOURCE_PIT
config ATMEL_CLOCKSOURCE_TCB
bool "Timer Counter Blocks (TCB) support"
- default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAMA5
+ default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAM9X7 || SOC_SAMA5
select ATMEL_TCB_CLKSRC
help
Select this to get a high precision clocksource based on a
@@ -166,7 +182,7 @@ config ATMEL_CLOCKSOURCE_TCB
config MICROCHIP_CLOCKSOURCE_PIT64B
bool "64-bit Periodic Interval Timer (PIT64B) support"
- default SOC_SAM9X60 || SOC_SAMA7
+ default SOC_SAM9X60 || SOC_SAM9X7 || SOC_SAMA7
select MICROCHIP_PIT64B
help
Select this to get a high resolution clockevent (SAM9X60) or
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 794bd12ab0a8..7d8a7bc44e65 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -7,6 +7,7 @@
obj-$(CONFIG_SOC_AT91RM9200) += at91rm9200.o
obj-$(CONFIG_SOC_AT91SAM9) += at91sam9.o
obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o
+obj-$(CONFIG_SOC_SAM9X7) += sam9x7.o
obj-$(CONFIG_SOC_SAMA5) += sama5.o sam_secure.o
obj-$(CONFIG_SOC_SAMA7) += sama7.o
obj-$(CONFIG_SOC_SAMV7) += samv7.o
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index 0c3960a8b3eb..acf0b3c82a30 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -12,6 +12,7 @@
extern void __init at91rm9200_pm_init(void);
extern void __init at91sam9_pm_init(void);
extern void __init sam9x60_pm_init(void);
+extern void __init sam9x7_pm_init(void);
extern void __init sama5_pm_init(void);
extern void __init sama5d2_pm_init(void);
extern void __init sama7_pm_init(void);
@@ -19,6 +20,7 @@ extern void __init sama7_pm_init(void);
static inline void __init at91rm9200_pm_init(void) { }
static inline void __init at91sam9_pm_init(void) { }
static inline void __init sam9x60_pm_init(void) { }
+static inline void __init sam9x7_pm_init(void) { }
static inline void __init sama5_pm_init(void) { }
static inline void __init sama5d2_pm_init(void) { }
static inline void __init sama7_pm_init(void) { }
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 345b91dc6627..b9b995f8a36e 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -233,6 +233,13 @@ static const struct of_device_id sama7g5_ws_ids[] = {
{ /* sentinel */ }
};
+static const struct of_device_id sam9x7_ws_ids[] = {
+ { .compatible = "microchip,sam9x7-rtc", .data = &ws_info[1] },
+ { .compatible = "microchip,sam9x7-rtt", .data = &ws_info[4] },
+ { .compatible = "microchip,sam9x7-gem", .data = &ws_info[5] },
+ { /* sentinel */ }
+};
+
static int at91_pm_config_ws(unsigned int pm_mode, bool set)
{
const struct wakeup_source_info *wsi;
@@ -1361,6 +1368,7 @@ static const struct of_device_id atmel_pmc_ids[] __initconst = {
{ .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
{ .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
{ .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] },
+ { .compatible = "microchip,sam9x7-pmc", .data = &pmc_infos[4] },
{ .compatible = "microchip,sama7g5-pmc", .data = &pmc_infos[5] },
{ /* sentinel */ },
};
@@ -1499,6 +1507,27 @@ void __init sam9x60_pm_init(void)
soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
}
+void __init sam9x7_pm_init(void)
+{
+ static const int modes[] __initconst = {
+ AT91_PM_STANDBY, AT91_PM_ULP0,
+ };
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_SOC_SAM9X7))
+ return;
+
+ at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
+ ret = at91_dt_ramc(false);
+ if (ret)
+ return;
+
+ at91_pm_init(NULL);
+
+ soc_pm.ws_ids = sam9x7_ws_ids;
+ soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
+}
+
void __init at91sam9_pm_init(void)
{
int ret;
diff --git a/arch/arm/mach-at91/sam9x7.c b/arch/arm/mach-at91/sam9x7.c
new file mode 100644
index 000000000000..e1ff30b5b09b
--- /dev/null
+++ b/arch/arm/mach-at91/sam9x7.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Setup code for SAM9X7.
+ *
+ * Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Varshini Rajendran <varshini.rajendran@microchip.com>
+ */
+
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include <asm/mach/arch.h>
+
+#include "generic.h"
+
+static void __init sam9x7_init(void)
+{
+ of_platform_default_populate(NULL, NULL, NULL);
+
+ sam9x7_pm_init();
+}
+
+static const char * const sam9x7_dt_board_compat[] __initconst = {
+ "microchip,sam9x7",
+ NULL
+};
+
+DT_MACHINE_START(sam9x7_dt, "Microchip SAM9X7")
+ /* Maintainer: Microchip */
+ .init_machine = sam9x7_init,
+ .dt_compat = sam9x7_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 7318d8789e24..24bc6e18d806 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -185,6 +185,7 @@ config ARCH_BRCMSTB
select ARCH_HAS_RESET_CONTROLLER
select ARM_AMBA
select ARM_GIC
+ select ARM_GIC_V3
select ARM_ERRATA_798181 if SMP
select HAVE_ARM_ARCH_TIMER
select ZONE_DMA if ARM_LPAE
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index 450883ea0e73..31d22a5d8e1e 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_ARCH_DAVINCI_DA850) += da850.o pdata-quirks.o
obj-y += da8xx-dt.o
# Power Management
-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_HAVE_CLK) += pm_domain.o
ifeq ($(CONFIG_SUSPEND),y)
obj-$(CONFIG_ARCH_DAVINCI_DA850) += pm.o sleep.o
diff --git a/arch/arm/mach-davinci/common.h b/arch/arm/mach-davinci/common.h
index 8aa6d4fc3f6f..81a2b06b46e9 100644
--- a/arch/arm/mach-davinci/common.h
+++ b/arch/arm/mach-davinci/common.h
@@ -52,7 +52,6 @@ struct davinci_soc_info {
extern struct davinci_soc_info davinci_soc_info;
extern void davinci_common_init(const struct davinci_soc_info *soc_info);
-extern void davinci_init_ide(void);
void davinci_init_late(void);
#ifdef CONFIG_SUSPEND
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
deleted file mode 100644
index 78a1575c387d..000000000000
--- a/arch/arm/mach-davinci/cpuidle.c
+++ /dev/null
@@ -1,99 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * CPU idle for DaVinci SoCs
- *
- * Copyright (C) 2009 Texas Instruments Incorporated. https://www.ti.com/
- *
- * Derived from Marvell Kirkwood CPU idle code
- * (arch/arm/mach-kirkwood/cpuidle.c)
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/cpuidle.h>
-#include <linux/io.h>
-#include <linux/export.h>
-#include <asm/cpuidle.h>
-
-#include "cpuidle.h"
-#include "ddr2.h"
-
-#define DAVINCI_CPUIDLE_MAX_STATES 2
-
-static void __iomem *ddr2_reg_base;
-static bool ddr2_pdown;
-
-static void davinci_save_ddr_power(int enter, bool pdown)
-{
- u32 val;
-
- val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
-
- if (enter) {
- if (pdown)
- val |= DDR2_SRPD_BIT;
- else
- val &= ~DDR2_SRPD_BIT;
- val |= DDR2_LPMODEN_BIT;
- } else {
- val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
- }
-
- __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
-}
-
-/* Actual code that puts the SoC in different idle states */
-static __cpuidle int davinci_enter_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- davinci_save_ddr_power(1, ddr2_pdown);
- cpu_do_idle();
- davinci_save_ddr_power(0, ddr2_pdown);
-
- return index;
-}
-
-static struct cpuidle_driver davinci_idle_driver = {
- .name = "cpuidle-davinci",
- .owner = THIS_MODULE,
- .states[0] = ARM_CPUIDLE_WFI_STATE,
- .states[1] = {
- .enter = davinci_enter_idle,
- .exit_latency = 10,
- .target_residency = 10000,
- .name = "DDR SR",
- .desc = "WFI and DDR Self Refresh",
- },
- .state_count = DAVINCI_CPUIDLE_MAX_STATES,
-};
-
-static int __init davinci_cpuidle_probe(struct platform_device *pdev)
-{
- struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
-
- if (!pdata) {
- dev_err(&pdev->dev, "cannot get platform data\n");
- return -ENOENT;
- }
-
- ddr2_reg_base = pdata->ddr2_ctlr_base;
-
- ddr2_pdown = pdata->ddr2_pdown;
-
- return cpuidle_register(&davinci_idle_driver, NULL);
-}
-
-static struct platform_driver davinci_cpuidle_driver = {
- .driver = {
- .name = "cpuidle-davinci",
- },
-};
-
-static int __init davinci_cpuidle_init(void)
-{
- return platform_driver_probe(&davinci_cpuidle_driver,
- davinci_cpuidle_probe);
-}
-device_initcall(davinci_cpuidle_init);
-
diff --git a/arch/arm/mach-davinci/cpuidle.h b/arch/arm/mach-davinci/cpuidle.h
deleted file mode 100644
index 976d43073597..000000000000
--- a/arch/arm/mach-davinci/cpuidle.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * TI DaVinci cpuidle platform support
- *
- * 2009 (C) Texas Instruments, Inc. https://www.ti.com/
- */
-#ifndef _MACH_DAVINCI_CPUIDLE_H
-#define _MACH_DAVINCI_CPUIDLE_H
-
-struct davinci_cpuidle_config {
- u32 ddr2_pdown;
- void __iomem *ddr2_ctlr_base;
-};
-
-#endif
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 6939166c33c2..5e73a725d5da 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -21,7 +21,6 @@
#include "common.h"
#include "cputype.h"
#include "da8xx.h"
-#include "cpuidle.h"
#include "irqs.h"
#include "sram.h"
diff --git a/arch/arm/mach-davinci/mux.h b/arch/arm/mach-davinci/mux.h
index 38f0e427291e..05fd3902df65 100644
--- a/arch/arm/mach-davinci/mux.h
+++ b/arch/arm/mach-davinci/mux.h
@@ -654,14 +654,9 @@ enum davinci_da850_index {
#ifdef CONFIG_DAVINCI_MUX
/* setup pin muxing */
extern int davinci_cfg_reg(unsigned long reg_cfg);
-extern int davinci_cfg_reg_list(const short pins[]);
#else
/* boot loader does it all (no warnings from CONFIG_DAVINCI_MUX_WARNINGS) */
static inline int davinci_cfg_reg(unsigned long reg_cfg) { return 0; }
-static inline int davinci_cfg_reg_list(const short pins[])
-{
- return 0;
-}
#endif
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 3aea90bbb41a..35e978514591 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -82,7 +82,7 @@ static void __init dove_clk_init(void)
{
struct clk *usb0, *usb1, *sata, *pex0, *pex1, *sdio0, *sdio1;
struct clk *nand, *camera, *i2s0, *i2s1, *crypto, *ac97, *pdma;
- struct clk *xor0, *xor1, *ge, *gephy;
+ struct clk *xor0, *xor1, *ge;
tclk = clk_register_fixed_rate(NULL, "tclk", NULL, 0, dove_tclk);
@@ -102,7 +102,7 @@ static void __init dove_clk_init(void)
pdma = dove_register_gate("pdma", "tclk", CLOCK_GATING_BIT_PDMA);
xor0 = dove_register_gate("xor0", "tclk", CLOCK_GATING_BIT_XOR0);
xor1 = dove_register_gate("xor1", "tclk", CLOCK_GATING_BIT_XOR1);
- gephy = dove_register_gate("gephy", "tclk", CLOCK_GATING_BIT_GIGA_PHY);
+ dove_register_gate("gephy", "tclk", CLOCK_GATING_BIT_GIGA_PHY);
ge = dove_register_gate("ge", "gephy", CLOCK_GATING_BIT_GBE);
orion_clkdev_add(NULL, "orion_spi.0", tclk);
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index 9471938df64c..85f0dd7255a9 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -76,8 +76,7 @@ static int vision_lcd_setup(struct platform_device *pdev)
{
int err;
- err = gpio_request_one(VISION_LCD_ENABLE, GPIOF_INIT_HIGH,
- dev_name(&pdev->dev));
+ err = gpio_request_one(VISION_LCD_ENABLE, GPIOF_OUT_INIT_HIGH, dev_name(&pdev->dev));
if (err)
return err;
@@ -293,8 +292,7 @@ static void __init vision_init_machine(void)
* Request the gpio expander's interrupt gpio line now to prevent
* the kernel from doing a WARN in gpiolib:gpio_ensure_requested().
*/
- if (gpio_request_one(EP93XX_GPIO_LINE_F(7), GPIOF_DIR_IN,
- "pca9539:74"))
+ if (gpio_request_one(EP93XX_GPIO_LINE_F(7), GPIOF_IN, "pca9539:74"))
pr_warn("cannot request interrupt gpio for pca9539:74\n");
vision_i2c_info[1].irq = gpio_to_irq(EP93XX_GPIO_LINE_F(7));
diff --git a/arch/arm/mach-imx/mach-imx6sx.c b/arch/arm/mach-imx/mach-imx6sx.c
index 9ababf4ac210..3feb31ab556e 100644
--- a/arch/arm/mach-imx/mach-imx6sx.c
+++ b/arch/arm/mach-imx/mach-imx6sx.c
@@ -7,37 +7,15 @@
#include <linux/of_platform.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
-#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <asm/mach/arch.h>
#include "common.h"
#include "cpuidle.h"
-static void __init imx6sx_enet_clk_sel(void)
-{
- struct regmap *gpr;
-
- gpr = syscon_regmap_lookup_by_compatible("fsl,imx6sx-iomuxc-gpr");
- if (!IS_ERR(gpr)) {
- regmap_update_bits(gpr, IOMUXC_GPR1,
- IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_MASK, 0);
- regmap_update_bits(gpr, IOMUXC_GPR1,
- IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK, 0);
- } else {
- pr_err("failed to find fsl,imx6sx-iomux-gpr regmap\n");
- }
-}
-
-static inline void imx6sx_enet_init(void)
-{
- imx6sx_enet_clk_sel();
-}
-
static void __init imx6sx_init_machine(void)
{
of_platform_default_populate(NULL, NULL, NULL);
- imx6sx_enet_init();
imx_anatop_init();
imx6sx_pm_init();
}
diff --git a/arch/arm/mach-imx/mach-imx7d.c b/arch/arm/mach-imx/mach-imx7d.c
index 9587885fb1ac..87632ae0201c 100644
--- a/arch/arm/mach-imx/mach-imx7d.c
+++ b/arch/arm/mach-imx/mach-imx7d.c
@@ -48,7 +48,7 @@ static void __init imx7d_enet_clk_sel(void)
}
}
-static inline void imx7d_enet_init(void)
+static void __init imx7d_enet_init(void)
{
imx7d_enet_phy_init();
imx7d_enet_clk_sel();
diff --git a/arch/arm/mach-lpc32xx/Kconfig b/arch/arm/mach-lpc32xx/Kconfig
index 35730d3696d0..138599545c24 100644
--- a/arch/arm/mach-lpc32xx/Kconfig
+++ b/arch/arm/mach-lpc32xx/Kconfig
@@ -8,5 +8,6 @@ config ARCH_LPC32XX
select CLKSRC_LPC32XX
select CPU_ARM926T
select GPIOLIB
+ select LPC32XX_DMAMUX if AMBA_PL08X
help
Support for the NXP LPC32XX family of processors
diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c
index fd5d0c8ff695..04ad651d13a0 100644
--- a/arch/arm/mach-mvebu/board-v7.c
+++ b/arch/arm/mach-mvebu/board-v7.c
@@ -86,6 +86,9 @@ static int __init mvebu_scan_mem(unsigned long node, const char *uname,
base = dt_mem_next_cell(dt_root_addr_cells, &reg);
size = dt_mem_next_cell(dt_root_size_cells, &reg);
+ if (size < MVEBU_DDR_TRAINING_AREA_SZ)
+ pr_warn("Too little memory to reserve for DDR training\n");
+
memblock_reserve(base, MVEBU_DDR_TRAINING_AREA_SZ);
}
diff --git a/arch/arm/mach-omap1/omap-dma.c b/arch/arm/mach-omap1/omap-dma.c
index 9ee472f8ead1..f091f78631d0 100644
--- a/arch/arm/mach-omap1/omap-dma.c
+++ b/arch/arm/mach-omap1/omap-dma.c
@@ -59,19 +59,6 @@ static struct omap_dma_dev_attr *d;
static int enable_1510_mode;
static u32 errata;
-struct dma_link_info {
- int *linked_dmach_q;
- int no_of_lchs_linked;
-
- int q_count;
- int q_tail;
- int q_head;
-
- int chain_state;
- int chain_mode;
-
-};
-
static int dma_lch_count;
static int dma_chan_count;
static int omap_dma_reserve_channels;
diff --git a/arch/arm/mach-omap1/pm.h b/arch/arm/mach-omap1/pm.h
index d4373a5c4697..b2763fb097ea 100644
--- a/arch/arm/mach-omap1/pm.h
+++ b/arch/arm/mach-omap1/pm.h
@@ -114,13 +114,9 @@ extern void omap1_pm_suspend(void);
extern void omap1510_cpu_suspend(unsigned long, unsigned long);
extern void omap1610_cpu_suspend(unsigned long, unsigned long);
-extern void omap1510_idle_loop_suspend(void);
-extern void omap1610_idle_loop_suspend(void);
extern unsigned int omap1510_cpu_suspend_sz;
extern unsigned int omap1610_cpu_suspend_sz;
-extern unsigned int omap1510_idle_loop_suspend_sz;
-extern unsigned int omap1610_idle_loop_suspend_sz;
#ifdef CONFIG_OMAP_SERIAL_WAKE
extern void omap_serial_wake_trigger(int enable);
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 7ad74db951f6..f18ef45e2fe1 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -333,7 +333,7 @@ int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
omap_pm_ops.scu_prepare(cpu, power_state);
/*
- * CPU never retuns back if targeted power state is OFF mode.
+ * CPU never returns back if targeted power state is OFF mode.
* CPU ONLINE follows normal CPU ONLINE ptah via
* omap4_secondary_startup().
*/
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index fca7869c8075..800980057373 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -315,7 +315,7 @@ static struct omap_device *omap_device_alloc(struct platform_device *pdev,
od->hwmods_cnt = oh_cnt;
- hwmods = kmemdup(ohs, sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL);
+ hwmods = kmemdup_array(ohs, oh_cnt, sizeof(*hwmods), GFP_KERNEL);
if (!hwmods)
goto oda_exit2;
diff --git a/arch/arm/mach-orion5x/board-mss2.c b/arch/arm/mach-orion5x/board-mss2.c
index b0f16d223adf..9e3d69891d2f 100644
--- a/arch/arm/mach-orion5x/board-mss2.c
+++ b/arch/arm/mach-orion5x/board-mss2.c
@@ -82,5 +82,5 @@ static void mss2_power_off(void)
void __init mss2_init(void)
{
/* register mss2 specific power-off method */
- pm_power_off = mss2_power_off;
+ register_platform_power_off(mss2_power_off);
}
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index 062109efa0ec..fcd38ff7ca45 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -700,7 +700,7 @@ static void __init dns323_init(void)
if (gpio_request(DNS323_GPIO_POWER_OFF, "POWEROFF") != 0 ||
gpio_direction_output(DNS323_GPIO_POWER_OFF, 0) != 0)
pr_err("DNS-323: failed to setup power-off GPIO\n");
- pm_power_off = dns323a_power_off;
+ register_platform_power_off(dns323a_power_off);
break;
case DNS323_REV_B1:
/* 5182 built-in SATA init */
@@ -717,7 +717,7 @@ static void __init dns323_init(void)
if (gpio_request(DNS323_GPIO_POWER_OFF, "POWEROFF") != 0 ||
gpio_direction_output(DNS323_GPIO_POWER_OFF, 0) != 0)
pr_err("DNS-323: failed to setup power-off GPIO\n");
- pm_power_off = dns323b_power_off;
+ register_platform_power_off(dns323b_power_off);
break;
case DNS323_REV_C1:
/* 5182 built-in SATA init */
@@ -727,7 +727,7 @@ static void __init dns323_init(void)
if (gpio_request(DNS323C_GPIO_POWER_OFF, "POWEROFF") != 0 ||
gpio_direction_output(DNS323C_GPIO_POWER_OFF, 0) != 0)
pr_err("DNS-323: failed to setup power-off GPIO\n");
- pm_power_off = dns323c_power_off;
+ register_platform_power_off(dns323c_power_off);
/* Now, -this- should theoretically be done by the sata_mv driver
* once I figure out what's going on there. Maybe the behaviour
diff --git a/arch/arm/mach-orion5x/kurobox_pro-setup.c b/arch/arm/mach-orion5x/kurobox_pro-setup.c
index acba06618080..339b10891808 100644
--- a/arch/arm/mach-orion5x/kurobox_pro-setup.c
+++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c
@@ -373,7 +373,7 @@ static void __init kurobox_pro_init(void)
i2c_register_board_info(0, &kurobox_pro_i2c_rtc, 1);
/* register Kurobox Pro specific power-off method */
- pm_power_off = kurobox_pro_power_off;
+ register_platform_power_off(kurobox_pro_power_off);
}
#ifdef CONFIG_MACH_KUROBOX_PRO
diff --git a/arch/arm/mach-orion5x/mv2120-setup.c b/arch/arm/mach-orion5x/mv2120-setup.c
index b7327a612835..5b0249f109cd 100644
--- a/arch/arm/mach-orion5x/mv2120-setup.c
+++ b/arch/arm/mach-orion5x/mv2120-setup.c
@@ -238,7 +238,7 @@ static void __init mv2120_init(void)
if (gpio_request(MV2120_GPIO_POWER_OFF, "POWEROFF") != 0 ||
gpio_direction_output(MV2120_GPIO_POWER_OFF, 1) != 0)
pr_err("mv2120: failed to setup power-off GPIO\n");
- pm_power_off = mv2120_power_off;
+ register_platform_power_off(mv2120_power_off);
}
/* Warning: HP uses a wrong mach-type (=526) in their bootloader */
diff --git a/arch/arm/mach-orion5x/net2big-setup.c b/arch/arm/mach-orion5x/net2big-setup.c
index 6ad9740b426b..4afd9b4c71a9 100644
--- a/arch/arm/mach-orion5x/net2big-setup.c
+++ b/arch/arm/mach-orion5x/net2big-setup.c
@@ -423,7 +423,7 @@ static void __init net2big_init(void)
if (gpio_request(NET2BIG_GPIO_POWER_OFF, "power-off") == 0 &&
gpio_direction_output(NET2BIG_GPIO_POWER_OFF, 0) == 0)
- pm_power_off = net2big_power_off;
+ register_platform_power_off(net2big_power_off);
else
pr_err("net2big: failed to configure power-off GPIO\n");
diff --git a/arch/arm/mach-orion5x/terastation_pro2-setup.c b/arch/arm/mach-orion5x/terastation_pro2-setup.c
index 23a5521c6833..a9f01859d101 100644
--- a/arch/arm/mach-orion5x/terastation_pro2-setup.c
+++ b/arch/arm/mach-orion5x/terastation_pro2-setup.c
@@ -349,7 +349,7 @@ static void __init tsp2_init(void)
i2c_register_board_info(0, &tsp2_i2c_rtc, 1);
/* register Terastation Pro II specific power-off method */
- pm_power_off = tsp2_power_off;
+ register_platform_power_off(tsp2_power_off);
}
MACHINE_START(TERASTATION_PRO2, "Buffalo Terastation Pro II/Live")
diff --git a/arch/arm/mach-orion5x/ts209-setup.c b/arch/arm/mach-orion5x/ts209-setup.c
index bab8ba0e01ab..de9092e992c5 100644
--- a/arch/arm/mach-orion5x/ts209-setup.c
+++ b/arch/arm/mach-orion5x/ts209-setup.c
@@ -314,7 +314,7 @@ static void __init qnap_ts209_init(void)
i2c_register_board_info(0, &qnap_ts209_i2c_rtc, 1);
/* register tsx09 specific power-off method */
- pm_power_off = qnap_tsx09_power_off;
+ register_platform_power_off(qnap_tsx09_power_off);
}
MACHINE_START(TS209, "QNAP TS-109/TS-209")
diff --git a/arch/arm/mach-orion5x/ts409-setup.c b/arch/arm/mach-orion5x/ts409-setup.c
index 8131982c10d9..725688aa5cba 100644
--- a/arch/arm/mach-orion5x/ts409-setup.c
+++ b/arch/arm/mach-orion5x/ts409-setup.c
@@ -312,7 +312,7 @@ static void __init qnap_ts409_init(void)
platform_device_register(&ts409_leds);
/* register tsx09 specific power-off method */
- pm_power_off = qnap_tsx09_power_off;
+ register_platform_power_off(qnap_tsx09_power_off);
}
MACHINE_START(TS409, "QNAP TS-409")
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 452bf7aac1fa..33533e35720f 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -378,38 +378,56 @@ static const uint32_t spitz_keymap[] = {
KEY(6, 8, KEY_RIGHT),
};
-static const struct matrix_keymap_data spitz_keymap_data = {
- .keymap = spitz_keymap,
- .keymap_size = ARRAY_SIZE(spitz_keymap),
+static const struct software_node_ref_args spitz_mkp_row_gpios[] = {
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 12, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 17, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 91, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 34, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 36, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 38, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 39, GPIO_ACTIVE_HIGH),
};
-static const uint32_t spitz_row_gpios[] =
- { 12, 17, 91, 34, 36, 38, 39 };
-static const uint32_t spitz_col_gpios[] =
- { 88, 23, 24, 25, 26, 27, 52, 103, 107, 108, 114 };
-
-static struct matrix_keypad_platform_data spitz_mkp_pdata = {
- .keymap_data = &spitz_keymap_data,
- .row_gpios = spitz_row_gpios,
- .col_gpios = spitz_col_gpios,
- .num_row_gpios = ARRAY_SIZE(spitz_row_gpios),
- .num_col_gpios = ARRAY_SIZE(spitz_col_gpios),
- .col_scan_delay_us = 10,
- .debounce_ms = 10,
- .wakeup = 1,
+static const struct software_node_ref_args spitz_mkp_col_gpios[] = {
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 88, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 23, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 24, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 25, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 26, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 27, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 52, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 103, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 107, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 108, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 114, GPIO_ACTIVE_HIGH),
};
-static struct platform_device spitz_mkp_device = {
+static const struct property_entry spitz_mkp_properties[] = {
+ PROPERTY_ENTRY_U32_ARRAY("linux,keymap", spitz_keymap),
+ PROPERTY_ENTRY_REF_ARRAY("row-gpios", spitz_mkp_row_gpios),
+ PROPERTY_ENTRY_REF_ARRAY("col-gpios", spitz_mkp_col_gpios),
+ PROPERTY_ENTRY_U32("col-scan-delay-us", 10),
+ PROPERTY_ENTRY_U32("debounce-delay-ms", 10),
+ PROPERTY_ENTRY_BOOL("wakeup-source"),
+ { }
+};
+
+static const struct platform_device_info spitz_mkp_info __initconst = {
.name = "matrix-keypad",
- .id = -1,
- .dev = {
- .platform_data = &spitz_mkp_pdata,
- },
+ .id = PLATFORM_DEVID_NONE,
+ .properties = spitz_mkp_properties,
};
+
static void __init spitz_mkp_init(void)
{
- platform_device_register(&spitz_mkp_device);
+ struct platform_device *pd;
+ int err;
+
+ pd = platform_device_register_full(&spitz_mkp_info);
+ err = PTR_ERR_OR_ZERO(pd);
+ if (err)
+ pr_err("failed to create keypad device: %d\n", err);
}
#else
static inline void spitz_mkp_init(void) {}
@@ -419,45 +437,82 @@ static inline void spitz_mkp_init(void) {}
* GPIO keys
******************************************************************************/
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
-static struct gpio_keys_button spitz_gpio_keys[] = {
- {
- .type = EV_PWR,
- .code = KEY_SUSPEND,
- .gpio = SPITZ_GPIO_ON_KEY,
- .desc = "On Off",
- .wakeup = 1,
- },
- /* Two buttons detecting the lid state */
- {
- .type = EV_SW,
- .code = 0,
- .gpio = SPITZ_GPIO_SWA,
- .desc = "Display Down",
- },
- {
- .type = EV_SW,
- .code = 1,
- .gpio = SPITZ_GPIO_SWB,
- .desc = "Lid Closed",
- },
+static const struct software_node spitz_gpio_keys_node = {
+ .name = "spitz-gpio-keys",
};
-static struct gpio_keys_platform_data spitz_gpio_keys_platform_data = {
- .buttons = spitz_gpio_keys,
- .nbuttons = ARRAY_SIZE(spitz_gpio_keys),
+static const struct property_entry spitz_suspend_key_props[] = {
+ PROPERTY_ENTRY_U32("linux,input-type", EV_PWR),
+ PROPERTY_ENTRY_U32("linux,code", KEY_SUSPEND),
+ PROPERTY_ENTRY_GPIO("gpios", &pxa2xx_gpiochip_node,
+ SPITZ_GPIO_ON_KEY, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_STRING("label", "On Off"),
+ PROPERTY_ENTRY_BOOL("wakeup-source"),
+ { }
};
-static struct platform_device spitz_gpio_keys_device = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &spitz_gpio_keys_platform_data,
- },
+static const struct software_node spitz_suspend_key_node = {
+ .parent = &spitz_gpio_keys_node,
+ .properties = spitz_suspend_key_props,
+};
+
+static const struct property_entry spitz_sw1_props[] = {
+ PROPERTY_ENTRY_U32("linux,input-type", EV_SW),
+ PROPERTY_ENTRY_U32("linux,code", 0),
+ PROPERTY_ENTRY_GPIO("gpios", &pxa2xx_gpiochip_node,
+ SPITZ_GPIO_SWA, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_STRING("label", "Display Down"),
+ { }
+};
+
+static const struct software_node spitz_sw1_node = {
+ .parent = &spitz_gpio_keys_node,
+ .properties = spitz_sw1_props,
+};
+
+static const struct property_entry spitz_sw2_props[] = {
+ PROPERTY_ENTRY_U32("linux,input-type", EV_SW),
+ PROPERTY_ENTRY_U32("linux,code", 1),
+ PROPERTY_ENTRY_GPIO("gpios", &pxa2xx_gpiochip_node,
+ SPITZ_GPIO_SWB, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_STRING("label", "Lid Closed"),
+ { }
+};
+
+static const struct software_node spitz_sw2_node = {
+ .parent = &spitz_gpio_keys_node,
+ .properties = spitz_sw2_props,
+};
+
+static const struct software_node *spitz_gpio_keys_swnodes[] = {
+ &spitz_gpio_keys_node,
+ &spitz_suspend_key_node,
+ &spitz_sw1_node,
+ &spitz_sw2_node,
+ NULL
};
static void __init spitz_keys_init(void)
{
- platform_device_register(&spitz_gpio_keys_device);
+ struct platform_device_info keys_info = {
+ .name = "gpio-keys",
+ .id = PLATFORM_DEVID_NONE,
+ };
+ struct platform_device *pd;
+ int err;
+
+ err = software_node_register_node_group(spitz_gpio_keys_swnodes);
+ if (err) {
+ pr_err("failed to register gpio-keys software nodes: %d\n", err);
+ return;
+ }
+
+ keys_info.fwnode = software_node_fwnode(&spitz_gpio_keys_node);
+
+ pd = platform_device_register_full(&keys_info);
+ err = PTR_ERR_OR_ZERO(pd);
+ if (err)
+ pr_err("failed to create gpio-keys device: %d\n", err);
}
#else
static inline void spitz_keys_init(void) {}
diff --git a/arch/arm/mach-s3c/irq-uart-s3c64xx.h b/arch/arm/mach-s3c/irq-uart-s3c64xx.h
index 78eccdce95a7..e754b0359c8a 100644
--- a/arch/arm/mach-s3c/irq-uart-s3c64xx.h
+++ b/arch/arm/mach-s3c/irq-uart-s3c64xx.h
@@ -12,5 +12,3 @@ struct s3c_uart_irq {
unsigned int parent_irq;
};
-extern void s3c_init_uart_irqs(struct s3c_uart_irq *irq, unsigned int nr_irqs);
-
diff --git a/arch/arm/mach-s3c/mach-crag6410-module.c b/arch/arm/mach-s3c/mach-crag6410-module.c
index 2de1a89f6e99..4ffcf024b09d 100644
--- a/arch/arm/mach-s3c/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c/mach-crag6410-module.c
@@ -446,7 +446,7 @@ static int wlf_gf_module_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id wlf_gf_module_id[] = {
- { "wlf-gf-module", 0 },
+ { "wlf-gf-module" },
{ }
};
diff --git a/arch/arm/mach-s3c/pm.h b/arch/arm/mach-s3c/pm.h
index 35d266ab6958..d48d15088525 100644
--- a/arch/arm/mach-s3c/pm.h
+++ b/arch/arm/mach-s3c/pm.h
@@ -48,8 +48,6 @@ extern unsigned long s3c_pm_flags;
/* from sleep.S */
-extern int s3c2410_cpu_suspend(unsigned long);
-
#ifdef CONFIG_PM_SLEEP
extern int s3c_irq_wake(struct irq_data *data, unsigned int state);
extern void s3c_cpu_resume(void);
diff --git a/arch/arm/mach-s3c/s3c64xx.h b/arch/arm/mach-s3c/s3c64xx.h
index 92258e4f60f6..0505728f3f7b 100644
--- a/arch/arm/mach-s3c/s3c64xx.h
+++ b/arch/arm/mach-s3c/s3c64xx.h
@@ -23,17 +23,6 @@ struct device_node;
void s3c64xx_set_xtal_freq(unsigned long freq);
void s3c64xx_set_xusbxti_freq(unsigned long freq);
-#ifdef CONFIG_CPU_S3C6400
-
-extern int s3c6400_init(void);
-extern void s3c6400_init_irq(void);
-extern void s3c6400_map_io(void);
-
-#else
-#define s3c6400_map_io NULL
-#define s3c6400_init NULL
-#endif
-
#ifdef CONFIG_CPU_S3C6410
extern int s3c6410_init(void);
diff --git a/arch/arm/mach-versatile/platsmp-realview.c b/arch/arm/mach-versatile/platsmp-realview.c
index 6965a1de727b..d38b2e174257 100644
--- a/arch/arm/mach-versatile/platsmp-realview.c
+++ b/arch/arm/mach-versatile/platsmp-realview.c
@@ -70,6 +70,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
return;
}
map = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(map)) {
pr_err("PLATSMP: No syscon regmap\n");
return;
diff --git a/arch/arm/mach-versatile/spc.c b/arch/arm/mach-versatile/spc.c
index 5e44170e1a9a..790092734cf6 100644
--- a/arch/arm/mach-versatile/spc.c
+++ b/arch/arm/mach-versatile/spc.c
@@ -73,7 +73,7 @@
/*
* Even though the SPC takes max 3-5 ms to complete any OPP/COMMS
- * operation, the operation could start just before jiffie is about
+ * operation, the operation could start just before jiffy is about
* to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz
*/
#define TIMEOUT_US 20000
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 5adf1769eee4..88c2d68a69c9 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1532,7 +1532,7 @@ static const struct dma_map_ops iommu_ops = {
/**
* arm_iommu_create_mapping
- * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @dev: pointer to the client device (for IOMMU calls)
* @base: start address of the valid IO address space
* @size: maximum size of the valid IO address space
*
@@ -1544,7 +1544,7 @@ static const struct dma_map_ops iommu_ops = {
* arm_iommu_attach_device function.
*/
struct dma_iommu_mapping *
-arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size)
+arm_iommu_create_mapping(struct device *dev, dma_addr_t base, u64 size)
{
unsigned int bits = size >> PAGE_SHIFT;
unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
@@ -1585,9 +1585,11 @@ arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size)
spin_lock_init(&mapping->lock);
- mapping->domain = iommu_domain_alloc(bus);
- if (!mapping->domain)
+ mapping->domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(mapping->domain)) {
+ err = PTR_ERR(mapping->domain);
goto err4;
+ }
kref_init(&mapping->kref);
return mapping;
@@ -1718,7 +1720,7 @@ static void arm_setup_iommu_dma_ops(struct device *dev)
dma_base = dma_range_map_min(dev->dma_range_map);
size = dma_range_map_max(dev->dma_range_map) - dma_base;
}
- mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
+ mapping = arm_iommu_create_mapping(dev, dma_base, size);
if (IS_ERR(mapping)) {
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
size, dev_name(dev));
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 2286c2ea60ec..831793cd6ff9 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -61,7 +61,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
return ret;
}
-#if USE_SPLIT_PTE_PTLOCKS
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
/*
* If we are using split PTE locks, then we need to take the page
* lock here. Otherwise we are using shared mm->page_table_lock
@@ -80,10 +80,10 @@ static inline void do_pte_unlock(spinlock_t *ptl)
{
spin_unlock(ptl);
}
-#else /* !USE_SPLIT_PTE_PTLOCKS */
+#else /* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
static inline void do_pte_lock(spinlock_t *ptl) {}
static inline void do_pte_unlock(spinlock_t *ptl) {}
-#endif /* USE_SPLIT_PTE_PTLOCKS */
+#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn)
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index d65d0e6ed10a..3dbb383c26d5 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -28,7 +28,8 @@
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -78,8 +79,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 3f774856ca67..f85c177cdf8d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1638,7 +1638,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
{
pgtables_remap *lpae_pgtables_remap;
unsigned long pa_pgd;
- unsigned int cr, ttbcr;
+ u32 cr, ttbcr, tmp;
long long offset;
if (!mdesc->pv_fixup)
@@ -1688,7 +1688,9 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
cr = get_cr();
set_cr(cr & ~(CR_I | CR_C));
ttbcr = cpu_get_ttbcr();
- cpu_set_ttbcr(ttbcr & ~(3 << 8 | 3 << 10));
+ /* Disable all kind of caching of the translation table */
+ tmp = ttbcr & ~(TTBCR_ORGN0_MASK | TTBCR_IRGN0_MASK);
+ cpu_set_ttbcr(tmp);
flush_cache_all();
/*
diff --git a/arch/arm/vfp/vfpinstr.h b/arch/arm/vfp/vfpinstr.h
index 3c7938fd40aa..32090b0fb250 100644
--- a/arch/arm/vfp/vfpinstr.h
+++ b/arch/arm/vfp/vfpinstr.h
@@ -64,33 +64,37 @@
#ifdef CONFIG_AS_VFP_VMRS_FPINST
-#define fmrx(_vfp_) ({ \
- u32 __v; \
- asm(".fpu vfpv2\n" \
- "vmrs %0, " #_vfp_ \
- : "=r" (__v) : : "cc"); \
- __v; \
- })
-
-#define fmxr(_vfp_,_var_) \
- asm(".fpu vfpv2\n" \
- "vmsr " #_vfp_ ", %0" \
- : : "r" (_var_) : "cc")
+#define fmrx(_vfp_) ({ \
+ u32 __v; \
+ asm volatile (".fpu vfpv2\n" \
+ "vmrs %0, " #_vfp_ \
+ : "=r" (__v) : : "cc"); \
+ __v; \
+})
+
+#define fmxr(_vfp_, _var_) ({ \
+ asm volatile (".fpu vfpv2\n" \
+ "vmsr " #_vfp_ ", %0" \
+ : : "r" (_var_) : "cc"); \
+})
#else
#define vfpreg(_vfp_) #_vfp_
-#define fmrx(_vfp_) ({ \
- u32 __v; \
- asm("mrc p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \
- : "=r" (__v) : : "cc"); \
- __v; \
- })
-
-#define fmxr(_vfp_,_var_) \
- asm("mcr p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \
- : : "r" (_var_) : "cc")
+#define fmrx(_vfp_) ({ \
+ u32 __v; \
+ asm volatile ("mrc p10, 7, %0, " vfpreg(_vfp_) "," \
+ "cr0, 0 @ fmrx %0, " #_vfp_ \
+ : "=r" (__v) : : "cc"); \
+ __v; \
+})
+
+#define fmxr(_vfp_, _var_) ({ \
+ asm volatile ("mcr p10, 7, %0, " vfpreg(_vfp_) "," \
+ "cr0, 0 @ fmxr " #_vfp_ ", %0" \
+ : : "r" (_var_) : "cc"); \
+})
#endif
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a2f8ff354ca6..3e29b44d2d7b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -24,6 +24,7 @@ config ARM64
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
+ select ARCH_HAS_DMA_OPS if XEN
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_FAST_MULTIPLIER
@@ -34,6 +35,7 @@ config ARM64
select ARCH_HAS_KERNEL_FPU_SUPPORT if KERNEL_MODE_NEON
select ARCH_HAS_KEEPINITRD
select ARCH_HAS_MEMBARRIER_SYNC_CORE
+ select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_DEVMAP
@@ -99,6 +101,8 @@ config ARM64
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
select ARCH_SUPPORTS_PER_VMA_LOCK
+ select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
+ select ARCH_SUPPORTS_RT
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_BPF_JIT
@@ -231,7 +235,7 @@ config ARM64
select HAVE_FUNCTION_ARG_ACCESS_API
select MMU_GATHER_RCU_TABLE_FREE
select HAVE_RSEQ
- select HAVE_RUST if CPU_LITTLE_ENDIAN
+ select HAVE_RUST if RUSTC_SUPPORTS_ARM64
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
@@ -262,9 +266,22 @@ config ARM64
select TRACE_IRQFLAGS_NMI_SUPPORT
select HAVE_SOFTIRQ_ON_OWN_STACK
select USER_STACKTRACE_SUPPORT
+ select VDSO_GETRANDOM
help
ARM 64-bit (AArch64) Linux support.
+config RUSTC_SUPPORTS_ARM64
+ def_bool y
+ depends on CPU_LITTLE_ENDIAN
+ # Shadow call stack is only supported on certain rustc versions.
+ #
+ # When using the UNWIND_PATCH_PAC_INTO_SCS option, rustc version 1.80+ is
+ # required due to use of the -Zfixed-x18 flag.
+ #
+ # Otherwise, rustc version 1.82+ is required due to use of the
+ # -Zsanitizer=shadow-call-stack flag.
+ depends on !SHADOW_CALL_STACK || RUSTC_VERSION >= 108200 || RUSTC_VERSION >= 108000 && UNWIND_PATCH_PAC_INTO_SCS
+
config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS
def_bool CC_IS_CLANG
# https://github.com/ClangBuiltLinux/linux/issues/1507
@@ -423,7 +440,7 @@ config AMPERE_ERRATUM_AC03_CPU_38
default y
help
This option adds an alternative code sequence to work around Ampere
- erratum AC03_CPU_38 on AmpereOne.
+ errata AC03_CPU_38 and AC04_CPU_10 on AmpereOne.
The affected design reports FEAT_HAFDBS as not implemented in
ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0
@@ -2100,7 +2117,8 @@ config ARM64_MTE
depends on ARM64_PAN
select ARCH_HAS_SUBPAGE_FAULTS
select ARCH_USES_HIGH_VMA_FLAGS
- select ARCH_USES_PG_ARCH_X
+ select ARCH_USES_PG_ARCH_2
+ select ARCH_USES_PG_ARCH_3
help
Memory Tagging (part of the ARMv8.5 Extensions) provides
architectural support for run-time, always-on detection of
@@ -2137,6 +2155,29 @@ config ARM64_EPAN
if the cpu does not implement the feature.
endmenu # "ARMv8.7 architectural features"
+menu "ARMv8.9 architectural features"
+
+config ARM64_POE
+ prompt "Permission Overlay Extension"
+ def_bool y
+ select ARCH_USES_HIGH_VMA_FLAGS
+ select ARCH_HAS_PKEYS
+ help
+ The Permission Overlay Extension is used to implement Memory
+ Protection Keys. Memory Protection Keys provides a mechanism for
+ enforcing page-based protections, but without requiring modification
+ of the page tables when an application changes protection domains.
+
+ For details, see Documentation/core-api/protection-keys.rst
+
+ If unsure, say y.
+
+config ARCH_PKEY_BITS
+ int
+ default 3
+
+endmenu # "ARMv8.9 architectural features"
+
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
default y
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index f6bc3da1ef11..b058c4803efb 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -57,9 +57,11 @@ KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
ifneq ($(CONFIG_UNWIND_TABLES),y)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
+KBUILD_RUSTFLAGS += -Cforce-unwind-tables=n
else
KBUILD_CFLAGS += -fasynchronous-unwind-tables
KBUILD_AFLAGS += -fasynchronous-unwind-tables
+KBUILD_RUSTFLAGS += -Cforce-unwind-tables=y -Zuse-sync-unwind=n
endif
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
@@ -114,6 +116,7 @@ endif
ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
KBUILD_CFLAGS += -ffixed-x18
+KBUILD_RUSTFLAGS += -Zfixed-x18
endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 607a67a649c4..b5a08333bc57 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -17,7 +17,7 @@
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo \
- Image.zst image.fit
+ Image.zst Image.xz image.fit
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
@@ -40,6 +40,9 @@ $(obj)/Image.lzo: $(obj)/Image FORCE
$(obj)/Image.zst: $(obj)/Image FORCE
$(call if_changed,zstd)
+$(obj)/Image.xz: $(obj)/Image FORCE
+ $(call if_changed,xzkern)
+
$(obj)/image.fit: $(obj)/Image $(obj)/dts/dtbs-list FORCE
$(call if_changed,fit)
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
index 0db7b60b49a1..00bed412ee31 100644
--- a/arch/arm64/boot/dts/allwinner/Makefile
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -49,5 +49,6 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero2w.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero3.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-transpeed-8k618-t.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-2024.dtb
-dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-plus.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-h.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-plus.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-sp.dtb
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index e868ca5ae753..a5c3920e0f04 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -263,6 +263,14 @@
polling-delay-passive = <0>;
polling-delay = <0>;
thermal-sensors = <&ths 1>;
+
+ trips {
+ gpu0_crit: gpu0-crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
};
gpu1_thermal: gpu1-thermal {
@@ -270,6 +278,14 @@
polling-delay-passive = <0>;
polling-delay = <0>;
thermal-sensors = <&ths 2>;
+
+ trips {
+ gpu1_crit: gpu1-crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
index b69032c44557..526443bb736c 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
@@ -45,16 +45,40 @@
startup-delay-us = <100000>;
enable-active-high;
gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&reg_vcc3v3>;
+ };
+
+ reg_gmac_2v5: gmac-2v5 {
+ /* 2V5 supply for GMAC PHY IO */
+ compatible = "regulator-fixed";
+ regulator-name = "gmac-2v5";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ vin-supply = <&reg_vcc3v3>;
+ };
+
+ reg_vcc5v: regulator-vcc5v {
+ /* board 5V supply from micro USB or pin headers */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
};
reg_vcc3v3: vcc3v3 {
+ /* board 3V3 supply by SY8089A */
compatible = "regulator-fixed";
regulator-name = "vcc3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ vin-supply = <&reg_vcc5v>;
};
vdd_cpux: gpio-regulator {
+ /* cpu voltage regulator MP2143DJ */
compatible = "regulator-gpio";
regulator-name = "vdd-cpux";
regulator-type = "voltage";
@@ -66,6 +90,7 @@
gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>;
gpios-states = <0x1>;
states = <1100000 0>, <1300000 1>;
+ vin-supply = <&reg_vcc5v>;
};
wifi_pwrseq: pwrseq {
@@ -146,6 +171,18 @@
status = "okay";
};
+&pio {
+ vcc-pa-supply = <&reg_vcc3v3>;
+ vcc-pc-supply = <&reg_vcc3v3>;
+ vcc-pd-supply = <&reg_gmac_2v5>;
+ vcc-pf-supply = <&reg_vcc3v3>;
+ vcc-pg-supply = <&reg_vcc3v3>;
+};
+
+&r_pio {
+ vcc-pl-supply = <&reg_vcc3v3>;
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pa_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi
index b29ce7321317..e88c1fbac6ac 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi
@@ -914,6 +914,8 @@
dmas = <&dma 48>, <&dma 48>;
dma-names = "rx", "tx";
resets = <&r_ccu RST_R_APB2_I2C>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&r_i2c_pins>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
index afb49e65859f..80ccab7b5ba7 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
@@ -21,6 +21,12 @@
serial0 = &uart0;
};
+ battery: battery {
+ compatible = "simple-battery";
+ constant-charge-current-max-microamp = <1024000>;
+ voltage-max-design-microvolt = <4200000>;
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
@@ -201,12 +207,12 @@
vcc-pi-supply = <&reg_cldo3>;
};
-&r_rsb {
+&r_i2c {
status = "okay";
- axp717: pmic@3a3 {
+ axp717: pmic@34 {
compatible = "x-powers,axp717";
- reg = <0x3a3>;
+ reg = <0x34>;
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&nmi_intc>;
@@ -217,6 +223,16 @@
vin3-supply = <&reg_vcc5v>;
vin4-supply = <&reg_vcc5v>;
+ axp_adc: adc {
+ compatible = "x-powers,axp717-adc";
+ #io-channel-cells = <1>;
+ };
+
+ battery_power: battery-power {
+ compatible = "x-powers,axp717-battery-power-supply";
+ monitored-battery = <&battery>;
+ };
+
regulators {
reg_dcdc1: dcdc1 {
regulator-always-on;
@@ -307,6 +323,11 @@
/* unused */
};
};
+
+ usb_power: usb-power {
+ compatible = "x-powers,axp717-usb-power-supply";
+ input-current-limit-microamp = <1500000>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-sp.dts b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-sp.dts
new file mode 100644
index 000000000000..0cf16dc903cd
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-sp.dts
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (C) 2024 Ryan Walklin <ryan@testtoast.com>.
+ * Copyright (C) 2024 Chris Morgan <macroalpha82@gmail.com>.
+ */
+
+#include <dt-bindings/input/gpio-keys.h>
+#include "sun50i-h700-anbernic-rg35xx-plus.dts"
+
+/ {
+ model = "Anbernic RG35XX SP";
+ compatible = "anbernic,rg35xx-sp", "allwinner,sun50i-h700";
+
+ gpio-keys-lid {
+ compatible = "gpio-keys";
+
+ lid-switch {
+ label = "Lid Switch";
+ gpios = <&pio 4 7 GPIO_ACTIVE_LOW>; /* PE7 */
+ linux,can-disable;
+ linux,code = <SW_LID>;
+ linux,input-type = <EV_SW>;
+ wakeup-event-action = <EV_ACT_DEASSERTED>;
+ wakeup-source;
+ };
+ };
+};
+
+&r_i2c {
+ rtc_ext: rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index 29417f04f886..2fbda8419c65 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -2,6 +2,7 @@
dtb-$(CONFIG_ARCH_MESON) += amlogic-a4-a113l2-ba400.dtb
dtb-$(CONFIG_ARCH_MESON) += amlogic-a5-a113x2-av400.dtb
dtb-$(CONFIG_ARCH_MESON) += amlogic-c3-c302x-aw409.dtb
+dtb-$(CONFIG_ARCH_MESON) += amlogic-c3-c308l-aw419.dtb
dtb-$(CONFIG_ARCH_MESON) += amlogic-t7-a311d2-an400.dtb
dtb-$(CONFIG_ARCH_MESON) += amlogic-t7-a311d2-khadas-vim4.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-a1-ad401.dtb
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a4-common.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-a4-common.dtsi
index b6106ad4a072..54d7a2d56ef6 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-a4-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a4-common.dtsi
@@ -52,6 +52,12 @@
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0xfe000000 0x0 0x480000>;
+ watchdog@2100 {
+ compatible = "amlogic,a4-wdt", "amlogic,t7-wdt";
+ reg = <0x0 0x2100 0x0 0x10>;
+ clocks = <&xtal>;
+ };
+
uart_b: serial@7a000 {
compatible = "amlogic,a4-uart",
"amlogic,meson-s4-uart";
@@ -61,6 +67,14 @@
clock-names = "xtal", "pclk", "baud";
status = "disabled";
};
+
+ sec_ao: ao-secure@10220 {
+ compatible = "amlogic,a4-ao-secure",
+ "amlogic,meson-gx-ao-secure",
+ "syscon";
+ reg = <0x0 0x10220 0x0 0x140>;
+ amlogic,has-chip-id;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi
index 43f68a7da2f7..17a6316de891 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi
@@ -4,6 +4,7 @@
*/
#include "amlogic-a4-common.dtsi"
+#include <dt-bindings/power/amlogic,a5-pwrc.h>
/ {
cpus {
#address-cells = <2>;
@@ -37,4 +38,13 @@
enable-method = "psci";
};
};
+
+ sm: secure-monitor {
+ compatible = "amlogic,meson-gxbb-sm";
+
+ pwrc: power-controller {
+ compatible = "amlogic,a5-pwrc";
+ #power-domain-cells = <1>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-c3-c302x-aw409.dts b/arch/arm64/boot/dts/amlogic/amlogic-c3-c302x-aw409.dts
index edce8850b338..a6736ad2a648 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-c3-c302x-aw409.dts
+++ b/arch/arm64/boot/dts/amlogic/amlogic-c3-c302x-aw409.dts
@@ -16,14 +16,245 @@
aliases {
serial0 = &uart_b;
+ spi0 = &spifc;
};
memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x10000000>;
};
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* 9 MiB reserved for ARM Trusted Firmware */
+ secmon_reserved: secmon@7f00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x07f00000 0x0 0x900000>;
+ no-map;
+ };
+ };
+
+ main_12v: regulator-main-12v {
+ compatible = "regulator-fixed";
+ regulator-name = "12V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vcc_5v: regulator-vcc-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&main_12v>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vddq: regulator-vddq {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDQ";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ vin-supply = <&main_12v>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vddao_3v3: regulator-vddao-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&main_12v>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vddao_1v8: regulator-vddao-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ddr4_2v5: regulator-ddr4-2v5 {
+ compatible = "regulator-fixed";
+ regulator-name = "DDR4_2V5";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vcc_3v3: regulator-vcc-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vcc_1v8: regulator-vcc-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_1v8: regulator-vdd-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD1V8_BOOT";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vddio_b: regulator-vddio-3v3-b {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDIO_B";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sdcard: regulator-sdcard {
+ compatible = "regulator-fixed";
+ regulator-name = "SDCARD_POWER";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vddao_3v3>;
+ gpio = <&gpio GPIOA_4 GPIO_ACTIVE_LOW>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
};
&uart_b {
status = "okay";
};
+
+&nand {
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-0 = <&nand_pins>;
+ pinctrl-names = "default";
+
+ nand@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ nand-on-flash-bbt;
+
+ partition@0 {
+ label = "boot";
+ reg = <0x0 0x00200000>;
+ };
+ partition@200000 {
+ label = "env";
+ reg = <0x00200000 0x00400000>;
+ };
+ partition@600000 {
+ label = "system";
+ reg = <0x00600000 0x00a00000>;
+ };
+ partition@1000000 {
+ label = "rootfs";
+ reg = <0x01000000 0x03000000>;
+ };
+ partition@4000000 {
+ label = "media";
+ reg = <0x04000000 0x8000000>;
+ };
+ };
+};
+
+&ethmac {
+ status = "okay";
+ phy-handle = <&internal_ephy>;
+ phy-mode = "rmii";
+};
+
+&spifc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-0 = <&spifc_pins>;
+ pinctrl-names = "default";
+
+ nand@0 {
+ compatible = "spi-nand";
+ reg = <0>;
+ spi-max-frequency = <83000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ status = "disabled";
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "boot";
+ reg = <0 0x200000>;
+ };
+
+ partition@200000 {
+ label = "env";
+ reg = <0x200000 0x400000>;
+ };
+
+ partition@600000 {
+ label = "system";
+ reg = <0x600000 0xa00000>;
+ };
+
+ partition@1000000 {
+ label = "rootfs";
+ reg = <0x1000000 0x3000000>;
+ };
+
+ partition@4000000 {
+ label = "data";
+ reg = <0x4000000 0x8000000>;
+ };
+ };
+ };
+};
+
+&sd {
+ status = "okay";
+ pinctrl-0 = <&sdcard_pins>;
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default","clk-gate";
+
+ bus-width = <4>;
+ cap-sd-highspeed;
+ max-frequency = <50000000>;
+ disable-wp;
+
+ cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&sdcard>;
+ vqmmc-supply = <&sdcard>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-c3-c308l-aw419.dts b/arch/arm64/boot/dts/amlogic/amlogic-c3-c308l-aw419.dts
new file mode 100644
index 000000000000..45f8631f9feb
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/amlogic-c3-c308l-aw419.dts
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Amlogic, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "amlogic-c3.dtsi"
+
+/ {
+ model = "Amlogic C308l aw419 Development Board";
+ compatible = "amlogic,aw419", "amlogic,c3";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ serial0 = &uart_b;
+ spi0 = &spifc;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* 9 MiB reserved for ARM Trusted Firmware */
+ secmon_reserved: secmon@7f00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x07f00000 0x0 0x900000>;
+ no-map;
+ };
+ };
+
+ main_12v: regulator-main-12v {
+ compatible = "regulator-fixed";
+ regulator-name = "12V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vcc_5v: regulator-vcc-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&main_12v>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vddq: regulator-vddq {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDQ";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ vin-supply = <&main_12v>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vddao_3v3: regulator-vddao-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&main_12v>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vddao_1v8: regulator-vddao-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ddr4_2v5: regulator-ddr4-2v5 {
+ compatible = "regulator-fixed";
+ regulator-name = "DDR4_2V5";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vcc_3v3: regulator-vcc-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vcc_1v8: regulator-vcc-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_1v8: regulator-vdd-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD1V8_BOOT";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vddio_b: regulator-vddio-3v3-b {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDIO_B";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sdcard: regulator-sdcard {
+ compatible = "regulator-fixed";
+ regulator-name = "SDCARD_POWER";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vddao_3v3>;
+ gpio = <&gpio GPIOA_4 GPIO_ACTIVE_LOW>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+};
+
+&uart_b {
+ status = "okay";
+};
+
+&nand {
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-0 = <&nand_pins>;
+ pinctrl-names = "default";
+
+ nand@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ nand-on-flash-bbt;
+
+ partition@0 {
+ label = "boot";
+ reg = <0x0 0x00200000>;
+ };
+ partition@200000 {
+ label = "env";
+ reg = <0x00200000 0x00400000>;
+ };
+ partition@600000 {
+ label = "system";
+ reg = <0x00600000 0x00a00000>;
+ };
+ partition@1000000 {
+ label = "rootfs";
+ reg = <0x01000000 0x03000000>;
+ };
+ partition@4000000 {
+ label = "media";
+ reg = <0x04000000 0x8000000>;
+ };
+ };
+};
+
+&ethmac {
+ status = "okay";
+ phy-handle = <&internal_ephy>;
+ phy-mode = "rmii";
+};
+
+&spifc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-0 = <&spifc_pins>;
+ pinctrl-names = "default";
+
+ nand@0 {
+ compatible = "spi-nand";
+ reg = <0>;
+ spi-max-frequency = <83000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ status = "disabled";
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "boot";
+ reg = <0 0x200000>;
+ };
+
+ partition@200000 {
+ label = "env";
+ reg = <0x200000 0x400000>;
+ };
+
+ partition@600000 {
+ label = "system";
+ reg = <0x600000 0xa00000>;
+ };
+
+ partition@1000000 {
+ label = "rootfs";
+ reg = <0x1000000 0x3000000>;
+ };
+
+ partition@4000000 {
+ label = "data";
+ reg = <0x4000000 0x8000000>;
+ };
+ };
+ };
+};
+
+&sd {
+ status = "okay";
+ pinctrl-0 = <&sdcard_pins>;
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default","clk-gate";
+
+ bus-width = <4>;
+ cap-sd-highspeed;
+ max-frequency = <50000000>;
+ disable-wp;
+
+ cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&sdcard>;
+ vqmmc-supply = <&sdcard>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi
index f8fb060c49ae..d0cda759c25d 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi
@@ -7,6 +7,11 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/reset/amlogic,c3-reset.h>
+#include <dt-bindings/clock/amlogic,c3-pll-clkc.h>
+#include <dt-bindings/clock/amlogic,c3-scmi-clkc.h>
+#include <dt-bindings/clock/amlogic,c3-peripherals-clkc.h>
+#include <dt-bindings/power/amlogic,c3-pwrc.h>
+#include <dt-bindings/gpio/amlogic-c3-gpio.h>
/ {
cpus {
@@ -57,6 +62,34 @@
};
};
+ sram@7f50e00 {
+ compatible = "mmio-sram";
+ reg = <0x0 0x07f50e00 0x0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x07f50e00 0x100>;
+
+ scmi_shmem: sram@0 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0x100>;
+ };
+ };
+
+ firmware {
+ scmi: scmi {
+ compatible = "arm,scmi-smc";
+ arm,smc-id = <0x820000C1>;
+ shmem = <&scmi_shmem>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_clk: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
+ };
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <2>;
@@ -82,6 +115,44 @@
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0xfe000000 0x0 0x480000>;
+ clkc_periphs: clock-controller@0 {
+ compatible = "amlogic,c3-peripherals-clkc";
+ reg = <0x0 0x0 0x0 0x49c>;
+ #clock-cells = <1>;
+ clocks = <&xtal>,
+ <&scmi_clk CLKID_OSC>,
+ <&scmi_clk CLKID_FIXED_PLL_OSC>,
+ <&clkc_pll CLKID_FCLK_DIV2>,
+ <&clkc_pll CLKID_FCLK_DIV2P5>,
+ <&clkc_pll CLKID_FCLK_DIV3>,
+ <&clkc_pll CLKID_FCLK_DIV4>,
+ <&clkc_pll CLKID_FCLK_DIV5>,
+ <&clkc_pll CLKID_FCLK_DIV7>,
+ <&clkc_pll CLKID_GP0_PLL>,
+ <&scmi_clk CLKID_GP1_PLL_OSC>,
+ <&clkc_pll CLKID_HIFI_PLL>,
+ <&scmi_clk CLKID_SYS_CLK>,
+ <&scmi_clk CLKID_AXI_CLK>,
+ <&scmi_clk CLKID_SYS_PLL_DIV16>,
+ <&scmi_clk CLKID_CPU_CLK_DIV16>;
+ clock-names = "xtal_24m",
+ "oscin",
+ "fix",
+ "fdiv2",
+ "fdiv2p5",
+ "fdiv3",
+ "fdiv4",
+ "fdiv5",
+ "fdiv7",
+ "gp0",
+ "gp1",
+ "hifi",
+ "sysclk",
+ "axiclk",
+ "sysplldiv16",
+ "cpudiv16";
+ };
+
reset: reset-controller@2000 {
compatible = "amlogic,c3-reset";
reg = <0x0 0x2000 0x0 0x98>;
@@ -98,16 +169,247 @@
compatible = "amlogic,c3-periphs-pinctrl";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x0 0x0 0x0 0x4000 0x0 0x02de>;
- gpio: bank@4000 {
- reg = <0x0 0x4000 0x0 0x004c>,
- <0x0 0x4100 0x0 0x01de>;
+ gpio: bank@0 {
+ reg = <0x0 0x0 0x0 0x004c>,
+ <0x0 0x100 0x0 0x01de>;
reg-names = "mux", "gpio";
gpio-controller;
#gpio-cells = <2>;
gpio-ranges = <&periphs_pinctrl 0 0 55>;
};
+
+ i2c0_pins1: i2c0-pins1 {
+ mux {
+ groups = "i2c0_sda_e",
+ "i2c0_scl_e";
+ function = "i2c0";
+ bias-disable;
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ i2c0_pins2: i2c0-pins2 {
+ mux {
+ groups = "i2c0_sda_d",
+ "i2c0_scl_d";
+ function = "i2c0";
+ bias-disable;
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ i2c1_pins1: i2c1-pins1 {
+ mux {
+ groups = "i2c1_sda_x",
+ "i2c1_scl_x";
+ function = "i2c1";
+ bias-disable;
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ i2c1_pins2: i2c1-pins2 {
+ mux {
+ groups = "i2c1_sda_d",
+ "i2c1_scl_d";
+ function = "i2c1";
+ bias-disable;
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ i2c1_pins3: i2c1-pins3 {
+ mux {
+ groups = "i2c1_sda_a",
+ "i2c1_scl_a";
+ function = "i2c1";
+ bias-disable;
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ i2c1_pins4: i2c1-pins4 {
+ mux {
+ groups = "i2c1_sda_b",
+ "i2c1_scl_b";
+ function = "i2c1";
+ bias-disable;
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ i2c2_pins1: i2c2-pins1 {
+ mux {
+ groups = "i2c2_sda",
+ "i2c2_scl";
+ function = "i2c2";
+ bias-disable;
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ i2c3_pins1: i2c3-pins1 {
+ mux {
+ groups = "i2c3_sda_c",
+ "i2c3_scl_c";
+ function = "i2c3";
+ bias-disable;
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ i2c3_pins2: i2c3-pins2 {
+ mux {
+ groups = "i2c3_sda_x",
+ "i2c3_scl_x";
+ function = "i2c3";
+ bias-disable;
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ i2c3_pins3: i2c3-pins3 {
+ mux {
+ groups = "i2c3_sda_d",
+ "i2c3_scl_d";
+ function = "i2c3";
+ bias-disable;
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ nand_pins: nand-pins {
+ mux {
+ groups = "emmc_nand_d0",
+ "emmc_nand_d1",
+ "emmc_nand_d2",
+ "emmc_nand_d3",
+ "emmc_nand_d4",
+ "emmc_nand_d5",
+ "emmc_nand_d6",
+ "emmc_nand_d7",
+ "nand_ce0",
+ "nand_ale",
+ "nand_cle",
+ "nand_wen_clk",
+ "nand_ren_wr";
+ function = "nand";
+ input-enable;
+ };
+ };
+
+ sdcard_pins: sdcard-pins {
+ mux {
+ groups = "sdcard_d0",
+ "sdcard_d1",
+ "sdcard_d2",
+ "sdcard_d3",
+ "sdcard_clk",
+ "sdcard_cmd";
+ function = "sdcard";
+ bias-pull-up;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ sdcard_clk_gate_pins: sdcard-clk-cmd-pins {
+ mux {
+ groups = "GPIOC_4";
+ function = "gpio_periphs";
+ bias-pull-down;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ sdio_m_clk_gate_pins: sdio-m-clk-cmd-pins {
+ mux {
+ groups = "sdio_clk";
+ function = "sdio";
+ bias-pull-down;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ sdio_m_pins: sdio-m-all-pins {
+ mux {
+ groups = "sdio_d0",
+ "sdio_d1",
+ "sdio_d2",
+ "sdio_d3",
+ "sdio_clk",
+ "sdio_cmd";
+ function = "sdio";
+ input-enable;
+ bias-pull-up;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ spicc0_pins1: spicc0-pins1 {
+ mux {
+ groups = "spi_a_mosi_b",
+ "spi_a_miso_b",
+ "spi_a_clk_b";
+ function = "spi_a";
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ spicc0_pins2: spicc0-pins2 {
+ mux {
+ groups = "spi_a_mosi_c",
+ "spi_a_miso_c",
+ "spi_a_clk_c";
+ function = "spi_a";
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ spicc0_pins3: spicc0-pins3 {
+ mux {
+ groups = "spi_a_mosi_x",
+ "spi_a_miso_x",
+ "spi_a_clk_x";
+ function = "spi_a";
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ spicc1_pins1: spicc1-pins1 {
+ mux {
+ groups = "spi_b_mosi_d",
+ "spi_b_miso_d",
+ "spi_b_clk_d";
+ function = "spi_b";
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ spicc1_pins2: spicc1-pins2 {
+ mux {
+ groups = "spi_b_mosi_x",
+ "spi_b_miso_x",
+ "spi_b_clk_x";
+ function = "spi_b";
+ drive-strength-microamp = <3000>;
+ };
+ };
+
+ spifc_pins: spifc-pins {
+ mux {
+ groups = "spif_mo",
+ "spif_mi",
+ "spif_clk",
+ "spif_cs",
+ "spif_hold",
+ "spif_wp",
+ "spif_clk_loop";
+ function = "spif";
+ drive-strength-microamp = <4000>;
+ };
+ };
};
gpio_intc: interrupt-controller@4080 {
@@ -119,16 +421,207 @@
<10 11 12 13 14 15 16 17 18 19 20 21>;
};
+ clkc_pll: clock-controller@8000 {
+ compatible = "amlogic,c3-pll-clkc";
+ reg = <0x0 0x8000 0x0 0x1a4>;
+ #clock-cells = <1>;
+ clocks = <&scmi_clk CLKID_TOP_PLL_OSC>,
+ <&scmi_clk CLKID_MCLK_PLL_OSC>,
+ <&scmi_clk CLKID_FIXED_PLL_OSC>;
+ clock-names = "top",
+ "mclk",
+ "fix";
+ };
+
+ eth_phy: mdio-multiplexer@28000 {
+ compatible = "amlogic,g12a-mdio-mux";
+ reg = <0x0 0x28000 0x0 0xa4>;
+
+ clocks = <&clkc_periphs CLKID_SYS_ETH_PHY>,
+ <&xtal>,
+ <&clkc_pll CLKID_FCLK_50M>;
+ clock-names = "pclk", "clkin0", "clkin1";
+ mdio-parent-bus = <&mdio0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ext_mdio: mdio@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ int_mdio: mdio@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ internal_ephy: ethernet_phy@8 {
+ compatible = "ethernet-phy-id0180.3301",
+ "ethernet-phy-ieee802.3-c22";
+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <8>;
+ max-speed = <100>;
+ };
+ };
+ };
+
+ spicc0: spi@50000 {
+ compatible = "amlogic,meson-g12a-spicc";
+ reg = <0x0 0x50000 0x0 0x44>;
+ interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkc_periphs CLKID_SYS_SPICC_0>,
+ <&clkc_periphs CLKID_SPICC_A>;
+ clock-names = "core", "pclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spicc1: spi@52000 {
+ compatible = "amlogic,meson-g12a-spicc";
+ reg = <0x0 0x52000 0x0 0x44>;
+ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkc_periphs CLKID_SYS_SPICC_1>,
+ <&clkc_periphs CLKID_SPICC_B>;
+ clock-names = "core", "pclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spifc: spi@56000 {
+ compatible = "amlogic,a1-spifc";
+ reg = <0x0 0x56000 0x0 0x290>;
+ interrupts = <GIC_SPI 182 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc_periphs CLKID_SPIFC>;
+ clock-names = "core";
+ status = "disabled";
+ };
+
+ i2c0: i2c@66000 {
+ compatible = "amlogic,meson-axg-i2c";
+ reg = <0x0 0x66000 0x0 0x24>;
+ interrupts = <GIC_SPI 160 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkc_periphs CLKID_SYS_I2C_M_A>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@68000 {
+ compatible = "amlogic,meson-axg-i2c";
+ reg = <0x0 0x68000 0x0 0x24>;
+ interrupts = <GIC_SPI 161 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkc_periphs CLKID_SYS_I2C_M_B>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@6a000 {
+ compatible = "amlogic,meson-axg-i2c";
+ reg = <0x0 0x6a000 0x0 0x24>;
+ interrupts = <GIC_SPI 162 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkc_periphs CLKID_SYS_I2C_M_C>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@6c000 {
+ compatible = "amlogic,meson-axg-i2c";
+ reg = <0x0 0x6c000 0x0 0x24>;
+ interrupts = <GIC_SPI 163 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkc_periphs CLKID_SYS_I2C_M_D>;
+ status = "disabled";
+ };
+
uart_b: serial@7a000 {
compatible = "amlogic,meson-s4-uart",
"amlogic,meson-ao-uart";
reg = <0x0 0x7a000 0x0 0x18>;
interrupts = <GIC_SPI 169 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
- clocks = <&xtal>, <&xtal>, <&xtal>;
+ clocks = <&xtal>, <&clkc_periphs CLKID_SYS_UART_B>, <&xtal>;
clock-names = "xtal", "pclk", "baud";
};
+ sec_ao: ao-secure@10220 {
+ compatible = "amlogic,c3-ao-secure",
+ "amlogic,meson-gx-ao-secure",
+ "syscon";
+ reg = <0x0 0x10220 0x0 0x140>;
+ amlogic,has-chip-id;
+ };
+
+ sdio: mmc@88000 {
+ compatible = "amlogic,meson-axg-mmc";
+ reg = <0x0 0x88000 0x0 0x800>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_EDGE_RISING>;
+ power-domains = <&pwrc PWRC_C3_SDIOA_ID>;
+ clocks = <&clkc_periphs CLKID_SYS_SD_EMMC_A>,
+ <&clkc_periphs CLKID_SD_EMMC_A>,
+ <&clkc_pll CLKID_FCLK_DIV2>;
+ clock-names = "core","clkin0", "clkin1";
+ no-mmc;
+ no-sd;
+ resets = <&reset RESET_SD_EMMC_A>;
+ status = "disabled";
+ };
+
+ sd: mmc@8a000 {
+ compatible = "amlogic,meson-axg-mmc";
+ reg = <0x0 0x8a000 0x0 0x800>;
+ interrupts = <GIC_SPI 177 IRQ_TYPE_EDGE_RISING>;
+ power-domains = <&pwrc PWRC_C3_SDCARD_ID>;
+ clocks = <&clkc_periphs CLKID_SYS_SD_EMMC_B>,
+ <&clkc_periphs CLKID_SD_EMMC_B>,
+ <&clkc_pll CLKID_FCLK_DIV2>;
+ clock-names = "core", "clkin0", "clkin1";
+ no-mmc;
+ no-sdio;
+ resets = <&reset RESET_SD_EMMC_B>;
+ status = "disabled";
+ };
+
+ nand: nand-controller@8d000 {
+ compatible = "amlogic,meson-axg-nfc";
+ reg = <0x0 0x8d000 0x0 0x200>,
+ <0x0 0x8C000 0x0 0x4>;
+ reg-names = "nfc", "emmc";
+ interrupts = <GIC_SPI 87 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc_periphs CLKID_SYS_SD_EMMC_C>,
+ <&clkc_pll CLKID_FCLK_DIV2>;
+ clock-names = "core", "device";
+ status = "disabled";
+ };
+ };
+
+ ethmac: ethernet@fdc00000 {
+ compatible = "amlogic,meson-g12a-dwmac",
+ "snps,dwmac-3.70a",
+ "snps,dwmac";
+ reg = <0x0 0xfdc00000 0x0 0x10000>,
+ <0x0 0xfe024000 0x0 0x8>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ power-domains = <&pwrc PWRC_C3_ETH_ID>;
+ clocks = <&clkc_periphs CLKID_SYS_ETH_MAC>,
+ <&clkc_pll CLKID_FCLK_DIV2>,
+ <&clkc_pll CLKID_FCLK_50M>;
+ clock-names = "stmmaceth", "clkin0", "clkin1";
+ rx-fifo-depth = <4096>;
+ tx-fifo-depth = <2048>;
+ status = "disabled";
+
+ mdio0: mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi
index c23efc6c7ac0..ec743cad57db 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi
+++ b/arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi
@@ -194,6 +194,14 @@
interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
+
+ sec_ao: ao-secure@10220 {
+ compatible = "amlogic,t7-ao-secure",
+ "amlogic,meson-gx-ao-secure",
+ "syscon";
+ reg = <0x0 0x10220 0x0 0x140>;
+ amlogic,has-chip-id;
+ };
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
index 7ed526f45175..9611775b81ee 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
@@ -268,6 +268,10 @@
"Speaker1 Right", "SPK1 OUT_D",
"Linein AINL", "Linein",
"Linein AINR", "Linein";
+ clocks = <&clkc CLKID_HIFI_PLL>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_HIFI_PLL>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts
index af211d8f3952..a457b3f4397b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts
@@ -176,6 +176,10 @@
"SPDIFOUT_A IN 1", "FRDDR_B OUT 3",
"SPDIFOUT_A IN 2", "FRDDR_C OUT 3";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
index 15b9bc280706..c779a5da7d1e 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
@@ -138,6 +138,10 @@
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
index 61cb8135a392..ea51341f031b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
@@ -201,6 +201,10 @@
"TODDR_B IN 1", "TDMIN_B OUT",
"TODDR_C IN 1", "TDMIN_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
index 0e239939ade6..f70a46967e2b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
@@ -238,6 +238,10 @@
"Lineout", "10U2 OUTL",
"Lineout", "10U2 OUTR";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
index 05c7a1e3f1b7..32f98a192494 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
@@ -158,6 +158,10 @@
"SPDIFOUT_A IN 1", "FRDDR_B OUT 3",
"SPDIFOUT_A IN 2", "FRDDR_C OUT 3";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-cm4io.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-cm4io.dts
index 13d478f9c891..2d74456e685d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-cm4io.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-cm4io.dts
@@ -70,6 +70,10 @@
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-mnt-reform2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-mnt-reform2.dts
index 003efed529ba..0f48c32bec97 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-mnt-reform2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-mnt-reform2.dts
@@ -79,6 +79,10 @@
"LINPUT1", "Mic Jack",
"Mic Jack", "MICB";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi
index 6a346cb86a53..d4e1990b5f26 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi
@@ -194,6 +194,10 @@
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi
index 3a24c2411552..de35fa2d7a6d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi
@@ -38,6 +38,12 @@
"SPDIFOUT_A IN 0", "FRDDR_A OUT 3",
"SPDIFOUT_A IN 1", "FRDDR_B OUT 3",
"SPDIFOUT_A IN 2", "FRDDR_C OUT 3";
+
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts
index bb73e10b5e74..369c5cf889b6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts
@@ -48,6 +48,10 @@
"TDMOUT_A IN 2", "FRDDR_C OUT 1",
"TDM_A Playback", "TDMOUT_A OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts
index 6eeedd54ab91..654449afd3a4 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts
@@ -49,6 +49,10 @@
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts
index 0da386cabe1a..e20311386745 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts
@@ -37,6 +37,10 @@
"SPDIFOUT_A IN 1", "FRDDR_B OUT 3",
"SPDIFOUT_A IN 2", "FRDDR_C OUT 3";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts
index eed2a23047ca..e21831dfceee 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts
@@ -234,6 +234,10 @@
"Internal Speakers", "Speaker Amplifier OUTL",
"Internal Speakers", "Speaker Amplifier OUTR";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
index 86eb81112232..3bca8023638d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
@@ -95,6 +95,10 @@
"Lineout", "U19 OUTL",
"Lineout", "U19 OUTR";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2l.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2l.dts
index e26f3e3258e1..1b9097a30251 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2l.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2l.dts
@@ -39,6 +39,10 @@
"TODDR_B IN 6", "TDMIN_LB OUT",
"TODDR_C IN 6", "TDMIN_LB OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts
index 8445701100d0..39feba7f2d08 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts
@@ -176,6 +176,10 @@
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts
index 6396f190d703..4c1a75b926ee 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts
@@ -32,6 +32,10 @@
"SPDIFOUT_A IN 1", "FRDDR_B OUT 3",
"SPDIFOUT_A IN 2", "FRDDR_C OUT 3";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
index efd662a452e8..d38c3a224fbe 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
@@ -194,6 +194,10 @@
"AU2 INR", "ACODEC LORN",
"7J4-14 LEFT", "AU2 OUTL",
"7J4-11 RIGHT", "AU2 OUTR";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index 08d6b69ba469..45ccddd1aaf0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -129,6 +129,10 @@
"AU2 INR", "ACODEC LORN",
"Lineout", "AU2 OUTL",
"Lineout", "AU2 OUTR";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
index f28452b9f00f..073b47ce8c3c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
@@ -45,6 +45,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "KII-PRO";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
index 1fd2e56e6b08..cf2e2ef81680 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
@@ -135,6 +135,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "NANOPI-K2";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
index cca129ce2c58..7d7dde93fff3 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -142,6 +142,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "NEXBOX-A95X";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index c37cc6b036cd..959bd8d77a82 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -177,6 +177,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "ODROID-C2";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts
index 7f94716876d3..bfac00e76ba3 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts
@@ -68,6 +68,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "P200";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts
index 6f81eed83bec..c10f66031ecd 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts
@@ -17,6 +17,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "P201";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index 255e93a0b36d..3807a184810b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -108,6 +108,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "VEGA-S95";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts
index af9ea32a2876..ec281a9e9e77 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts
@@ -16,6 +16,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "WETEK-HUB";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
index 376760d86766..924414861b72 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
@@ -48,6 +48,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "WETEK-PLAY2";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts
index 90ef9c17d80b..c6132fb71dfc 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts
@@ -123,6 +123,10 @@
"Speaker", "9J5-2 RIGHT";
audio-routing = "9J5-3 LEFT", "ACODEC LOLN",
"9J5-2 RIGHT", "ACODEC LORN";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts
index 08a4718219b1..c5e2306ad7a4 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts
@@ -128,6 +128,10 @@
"AU2 INR", "ACODEC LORN",
"Lineout", "AU2 OUTL",
"Lineout", "AU2 OUTR";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
index fea65f20523a..a80f0ea2773b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
@@ -67,6 +67,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "KHADAS-VIM";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts
index 63b20860067c..6cbdfde00e12 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts
@@ -160,6 +160,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "LIBRETECH-CC-V2";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
index 8b26c9661be1..401064b0428d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -142,6 +142,10 @@
"AU2 INR", "ACODEC LORN",
"Lineout", "AU2 OUTL",
"Lineout", "AU2 OUTR";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
index 9b4ea6a49398..8b41e340f919 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
@@ -50,6 +50,10 @@
"AU2 INR", "ACODEC LORN",
"Lineout", "AU2 OUTL",
"Lineout", "AU2 OUTR";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-vero4k.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-vero4k.dts
index de996e930b82..a9c5881c9783 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-vero4k.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-vero4k.dts
@@ -90,6 +90,11 @@
"AU2 INR", "ACODEC LORN",
"Lineout", "AU2 OUTL",
"Lineout", "AU2 OUTR";
+
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts b/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts
index 1221f4545130..942df754a0ed 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts
@@ -38,10 +38,6 @@
};
};
-&saradc {
- compatible = "amlogic,meson-gxlx-saradc", "amlogic,meson-saradc";
-};
-
&usb {
dr_mode = "host";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
index 07e7c3bedea0..96a3dd2d8a99 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
@@ -150,6 +150,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "KHADAS-VIM2";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
index ad2dd4ad0a31..773107cc47dd 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
@@ -86,6 +86,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "NEXBOX-A1";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
index d05dde8da5c5..7356d3b628b1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
@@ -101,6 +101,10 @@
sound {
compatible = "amlogic,gx-sound-card";
model = "RBOX-PRO";
+ clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
index e78cc9b577a0..7daa9b122d5c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
@@ -182,6 +182,10 @@
"TODDR_B IN 0", "TDMIN_A OUT",
"TODDR_C IN 0", "TDMIN_A OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi b/arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi
index 082b72703cdf..929e4720ae76 100644
--- a/arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi
@@ -200,6 +200,10 @@
<&tdmin_a>, <&tdmin_b>, <&tdmin_c>,
<&dioo2133>;
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts b/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts
index 983caddc409c..6730c44642d2 100644
--- a/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts
@@ -34,6 +34,111 @@
no-map;
};
};
+
+ sdio_32k: sdio-32k {
+ compatible = "pwm-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
+ clocks = <&sdio_32k>;
+ clock-names = "ext_clock";
+ };
+
+ main_12v: regulator-main-12v {
+ compatible = "regulator-fixed";
+ regulator-name = "12V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ };
+
+ vddao_3v3: regulator-vddao-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&main_12v>;
+ regulator-always-on;
+ };
+
+ vddio_ao1v8: regulator-vddio-ao1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDIO_AO1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ };
+
+ /* SY8120B1ABC DC/DC Regulator. */
+ vddcpu: regulator-vddcpu {
+ compatible = "pwm-regulator";
+
+ regulator-name = "VDDCPU";
+ regulator-min-microvolt = <689000>;
+ regulator-max-microvolt = <1049000>;
+
+ vin-supply = <&main_12v>;
+
+ pwms = <&pwm_ij 1 1500 0>;
+ pwm-dutycycle-range = <100 0>;
+
+ regulator-boot-on;
+ regulator-always-on;
+ /* Voltage Duty-Cycle */
+ voltage-table = <1049000 0>,
+ <1039000 3>,
+ <1029000 6>,
+ <1019000 9>,
+ <1009000 12>,
+ <999000 14>,
+ <989000 17>,
+ <979000 20>,
+ <969000 23>,
+ <959000 26>,
+ <949000 29>,
+ <939000 31>,
+ <929000 34>,
+ <919000 37>,
+ <909000 40>,
+ <899000 43>,
+ <889000 45>,
+ <879000 48>,
+ <869000 51>,
+ <859000 54>,
+ <849000 56>,
+ <839000 59>,
+ <829000 62>,
+ <819000 65>,
+ <809000 68>,
+ <799000 70>,
+ <789000 73>,
+ <779000 76>,
+ <769000 79>,
+ <759000 81>,
+ <749000 84>,
+ <739000 87>,
+ <729000 89>,
+ <719000 92>,
+ <709000 95>,
+ <699000 98>,
+ <689000 100>;
+ };
+};
+
+&pwm_ef {
+ status = "okay";
+ pinctrl-0 = <&pwm_e_pins1>;
+ pinctrl-names = "default";
+};
+
+&pwm_ij {
+ status = "okay";
};
&uart_b {
@@ -46,6 +151,40 @@
pinctrl-names = "default";
};
+&sdio {
+ pinctrl-0 = <&sdio_pins>;
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ max-frequency = <200000000>;
+ non-removable;
+ disable-wp;
+ no-sd;
+ no-mmc;
+ vmmc-supply = <&vddao_3v3>;
+ vqmmc-supply = <&vddio_ao1v8>;
+};
+
+&sd {
+ status = "okay";
+ pinctrl-0 = <&sdcard_pins>;
+ pinctrl-1 = <&sdcard_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
+ bus-width = <4>;
+ cap-sd-highspeed;
+ max-frequency = <200000000>;
+ disable-wp;
+
+ cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&vddao_3v3>;
+ vqmmc-supply = <&vddao_3v3>;
+};
+
&nand {
status = "okay";
#address-cells = <1>;
@@ -90,3 +229,9 @@
pinctrl-0 = <&spicc0_pins_x>;
cs-gpios = <&gpio GPIOX_10 GPIO_ACTIVE_LOW>;
};
+
+&ethmac {
+ status = "okay";
+ phy-handle = <&internal_ephy>;
+ phy-mode = "rmii";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
index b686eacb9662..957577d986c0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
@@ -10,6 +10,7 @@
#include <dt-bindings/clock/amlogic,s4-pll-clkc.h>
#include <dt-bindings/clock/amlogic,s4-peripherals-clkc.h>
#include <dt-bindings/power/meson-s4-power.h>
+#include <dt-bindings/reset/amlogic,meson-s4-reset.h>
/ {
cpus {
@@ -466,6 +467,93 @@
};
};
+ sdcard_pins: sdcard-pins {
+ mux {
+ groups = "sdcard_d0_c",
+ "sdcard_d1_c",
+ "sdcard_d2_c",
+ "sdcard_d3_c",
+ "sdcard_clk_c",
+ "sdcard_cmd_c";
+ function = "sdcard";
+ bias-pull-up;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ sdcard_clk_gate_pins: sdcard-clk-gate-pins {
+ mux {
+ groups = "GPIOC_4";
+ function = "gpio_periphs";
+ bias-pull-down;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ emmc_pins: emmc-pins {
+ mux-0 {
+ groups = "emmc_nand_d0",
+ "emmc_nand_d1",
+ "emmc_nand_d2",
+ "emmc_nand_d3",
+ "emmc_nand_d4",
+ "emmc_nand_d5",
+ "emmc_nand_d6",
+ "emmc_nand_d7",
+ "emmc_cmd";
+ function = "emmc";
+ bias-pull-up;
+ drive-strength-microamp = <4000>;
+ };
+ mux-1 {
+ groups = "emmc_clk";
+ function = "emmc";
+ bias-pull-up;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ emmc_ds_pins: emmc-ds-pins {
+ mux {
+ groups = "emmc_nand_ds";
+ function = "emmc";
+ bias-pull-down;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ emmc_clk_gate_pins: emmc-clk-gate-pins {
+ mux {
+ groups = "GPIOB_8";
+ function = "gpio_periphs";
+ bias-pull-down;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ sdio_pins: sdio-pins {
+ mux {
+ groups = "sdio_d0",
+ "sdio_d1",
+ "sdio_d2",
+ "sdio_d3",
+ "sdio_clk",
+ "sdio_cmd";
+ function = "sdio";
+ bias-pull-up;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ sdio_clk_gate_pins: sdio-clk-gate-pins {
+ mux {
+ groups = "GPIOX_4";
+ function = "gpio_periphs";
+ bias-pull-down;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
spicc0_pins_x: spicc0-pins_x {
mux {
groups = "spi_a_mosi_x",
@@ -675,6 +763,14 @@
#reset-cells = <1>;
};
+ sec_ao: ao-secure@10220 {
+ compatible = "amlogic,s4-ao-secure",
+ "amlogic,meson-gx-ao-secure",
+ "syscon";
+ reg = <0x0 0x10220 0x0 0x140>;
+ amlogic,has-chip-id;
+ };
+
ir: ir@84040 {
compatible = "amlogic,meson-s4-ir";
reg = <0x0 0x84040 0x0 0x30>;
@@ -712,5 +808,45 @@
compatible = "snps,dwmac-mdio";
};
};
+
+ sdio: mmc@fe088000 {
+ compatible = "amlogic,meson-axg-mmc";
+ reg = <0x0 0xfe088000 0x0 0x800>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkc_periphs CLKID_SDEMMC_A>,
+ <&xtal>,
+ <&clkc_pll CLKID_FCLK_DIV2>;
+ clock-names = "core", "clkin0", "clkin1";
+ resets = <&reset RESET_SD_EMMC_A>;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ status = "disabled";
+ };
+
+ sd: mmc@fe08a000 {
+ compatible = "amlogic,meson-axg-mmc";
+ reg = <0x0 0xfe08a000 0x0 0x800>;
+ interrupts = <GIC_SPI 177 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc_periphs CLKID_SDEMMC_B>,
+ <&clkc_periphs CLKID_SD_EMMC_B>,
+ <&clkc_pll CLKID_FCLK_DIV2>;
+ clock-names = "core", "clkin0", "clkin1";
+ resets = <&reset RESET_SD_EMMC_B>;
+ status = "disabled";
+ };
+
+ emmc: mmc@fe08c000 {
+ compatible = "amlogic,meson-axg-mmc";
+ reg = <0x0 0xfe08c000 0x0 0x800>;
+ interrupts = <GIC_SPI 178 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc_periphs CLKID_NAND>,
+ <&xtal>,
+ <&clkc_pll CLKID_FCLK_DIV2>;
+ clock-names = "core", "clkin0", "clkin1";
+ resets = <&reset RESET_NAND_EMMC>;
+ no-sdio;
+ no-sd;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air-gbit.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air-gbit.dts
index 9b2eb6e42651..3c43d3490e14 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air-gbit.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air-gbit.dts
@@ -22,6 +22,10 @@
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air.dts
index 6e34fd80ed71..445c1671ede7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air.dts
@@ -22,6 +22,10 @@
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m2-pro.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m2-pro.dts
index 586034316ec3..eeaff22edade 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m2-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m2-pro.dts
@@ -22,6 +22,10 @@
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
index f045bf851638..697855fec476 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
@@ -57,6 +57,10 @@
"Lineout", "ACODEC LOLP",
"Lineout", "ACODEC LORP";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-h96-max.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-h96-max.dts
index e6e9410d40cb..7b3a014d4cde 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-h96-max.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-h96-max.dts
@@ -22,6 +22,10 @@
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
index 951eb8e3f0c0..7b0e9817a615 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
@@ -174,6 +174,10 @@
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
index 3581e14cbf18..2e3397e55da2 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
@@ -239,6 +239,10 @@
"TODDR_B IN 1", "TDMIN_B OUT",
"TODDR_C IN 1", "TDMIN_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air-gbit.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air-gbit.dts
index fc9b961133cd..e4a3a2a8ad06 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air-gbit.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air-gbit.dts
@@ -22,6 +22,10 @@
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air.dts
index 9ea969255b4f..fff92e0d6dd5 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air.dts
@@ -22,6 +22,10 @@
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT";
+ clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 532401bc9c66..6ad4703925dc 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -997,7 +997,7 @@
compatible = "apm,xgene-mdio";
#address-cells = <1>;
#size-cells = <0>;
- menetphy: menetphy@3 {
+ menetphy: ethernet-phy@3 {
compatible = "ethernet-phy-id001c.c915";
reg = <0x3>;
};
diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
index 93f1e7c026b8..083be35495b3 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi
+++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
@@ -18,7 +18,9 @@
#address-cells = <2>;
#size-cells = <2>;
- chosen { };
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
aliases {
serial0 = &v2m_serial0;
diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
index 85f1c15cc65d..19973ab4ea6b 100644
--- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts
+++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
@@ -24,7 +24,9 @@
#address-cells = <2>;
#size-cells = <2>;
- chosen { };
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
aliases {
serial0 = &v2m_serial0;
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
index afdf954206f1..7f7226711d4b 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
@@ -23,7 +23,9 @@
#address-cells = <2>;
#size-cells = <2>;
- chosen { };
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
aliases {
serial0 = &v2m_serial0;
diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile
index 8b4591ddd27c..92565e9781ad 100644
--- a/arch/arm64/boot/dts/broadcom/Makefile
+++ b/arch/arm64/boot/dts/broadcom/Makefile
@@ -6,6 +6,7 @@ DTC_FLAGS := -@
dtb-$(CONFIG_ARCH_BCM2835) += bcm2711-rpi-400.dtb \
bcm2711-rpi-4-b.dtb \
bcm2711-rpi-cm4-io.dtb \
+ bcm2712-rpi-5-b.dtb \
bcm2837-rpi-3-a-plus.dtb \
bcm2837-rpi-3-b.dtb \
bcm2837-rpi-3-b-plus.dtb \
diff --git a/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts b/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts
new file mode 100644
index 000000000000..2bdbb6780242
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include "bcm2712.dtsi"
+
+/ {
+ compatible = "raspberrypi,5-model-b", "brcm,bcm2712";
+ model = "Raspberry Pi 5";
+
+ aliases {
+ serial10 = &uart10;
+ };
+
+ chosen: chosen {
+ stdout-path = "serial10:115200n8";
+ };
+
+ /* Will be filled by the bootloader */
+ memory@0 {
+ device_type = "memory";
+ reg = <0 0 0 0x28000000>;
+ };
+
+ sd_io_1v8_reg: sd-io-1v8-reg {
+ compatible = "regulator-gpio";
+ regulator-name = "vdd-sd-io";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-settling-time-us = <5000>;
+ gpios = <&gio_aon 3 GPIO_ACTIVE_HIGH>;
+ states = <1800000 1>,
+ <3300000 0>;
+ };
+
+ sd_vcc_reg: sd-vcc-reg {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-sd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ enable-active-high;
+ gpios = <&gio_aon 4 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+/* The Debug UART, on Rpi5 it's on JST-SH 1.0mm 3-pin connector
+ * labeled "UART", i.e. the interface with the system console.
+ */
+&uart10 {
+ status = "okay";
+};
+
+/* SDIO1 is used to drive the SD card */
+&sdio1 {
+ vqmmc-supply = <&sd_io_1v8_reg>;
+ vmmc-supply = <&sd_vcc_reg>;
+ bus-width = <4>;
+ sd-uhs-sdr50;
+ sd-uhs-ddr50;
+ sd-uhs-sdr104;
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
new file mode 100644
index 000000000000..6e5a984c1d4e
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "brcm,bcm2712";
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ interrupt-parent = <&gicv2>;
+
+ clocks {
+ /* The oscillator is the root of the clock tree. */
+ clk_osc: clk-osc {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-output-names = "osc";
+ clock-frequency = <54000000>;
+ };
+
+ clk_vpu: clk-vpu {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <750000000>;
+ clock-output-names = "vpu-clock";
+ };
+
+ clk_uart: clk-uart {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <9216000>;
+ clock-output-names = "uart-clock";
+ };
+
+ clk_emmc2: clk-emmc2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <200000000>;
+ clock-output-names = "emmc2-clock";
+ };
+ };
+
+ cpus: cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Source for L1 d/i cache-line-size, cache-sets, cache-size
+ * https://developer.arm.com/documentation/100798/0401/L1-memory-system/About-the-L1-memory-system?lang=en
+ * Source for L2 cache-line-size and cache-sets:
+ * https://developer.arm.com/documentation/100798/0401/L2-memory-system/About-the-L2-memory-system?lang=en
+ * and for cache-size:
+ * https://www.raspberrypi.com/documentation/computers/processors.html#bcm2712
+ */
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a76";
+ reg = <0x000>;
+ enable-method = "psci";
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set
+ next-level-cache = <&l2_cache_l0>;
+
+ l2_cache_l0: l2-cache-l0 {
+ compatible = "cache";
+ cache-size = <0x80000>;
+ cache-line-size = <128>;
+ cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a76";
+ reg = <0x100>;
+ enable-method = "psci";
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set
+ next-level-cache = <&l2_cache_l1>;
+
+ l2_cache_l1: l2-cache-l1 {
+ compatible = "cache";
+ cache-size = <0x80000>;
+ cache-line-size = <128>;
+ cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a76";
+ reg = <0x200>;
+ enable-method = "psci";
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set
+ next-level-cache = <&l2_cache_l2>;
+
+ l2_cache_l2: l2-cache-l2 {
+ compatible = "cache";
+ cache-size = <0x80000>;
+ cache-line-size = <128>;
+ cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a76";
+ reg = <0x300>;
+ enable-method = "psci";
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set
+ next-level-cache = <&l2_cache_l3>;
+
+ l2_cache_l3: l2-cache-l3 {
+ compatible = "cache";
+ cache-size = <0x80000>;
+ cache-line-size = <128>;
+ cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+ };
+
+ /* Source for cache-line-size and cache-sets:
+ * https://developer.arm.com/documentation/100453/0401/L3-cache?lang=en
+ * Source for cache-size:
+ * https://www.raspberrypi.com/documentation/computers/processors.html#bcm2712
+ */
+ l3_cache: l3-cache {
+ compatible = "cache";
+ cache-size = <0x200000>;
+ cache-line-size = <64>;
+ cache-sets = <2048>; // 2MiB(size)/64(line-size)=32768ways/16-way set
+ cache-level = <3>;
+ cache-unified;
+ };
+ };
+
+ psci {
+ method = "smc";
+ compatible = "arm,psci-1.0", "arm,psci-0.2";
+ };
+
+ rmem: reserved-memory {
+ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ atf@0 {
+ reg = <0x0 0x0 0x0 0x80000>;
+ no-map;
+ };
+
+ cma: linux,cma {
+ compatible = "shared-dma-pool";
+ size = <0x0 0x4000000>; /* 64MB */
+ reusable;
+ linux,cma-default;
+ alloc-ranges = <0x0 0x00000000 0x0 0x40000000>;
+ };
+ };
+
+ soc: soc@107c000000 {
+ compatible = "simple-bus";
+ ranges = <0x00000000 0x10 0x00000000 0x80000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sdio1: mmc@fff000 {
+ compatible = "brcm,bcm2712-sdhci",
+ "brcm,sdhci-brcmstb";
+ reg = <0x00fff000 0x260>,
+ <0x00fff400 0x200>;
+ reg-names = "host", "cfg";
+ interrupts = <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_emmc2>;
+ clock-names = "sw_sdio";
+ mmc-ddr-3_3v;
+ };
+
+ system_timer: timer@7c003000 {
+ compatible = "brcm,bcm2835-system-timer";
+ reg = <0x7c003000 0x1000>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <1000000>;
+ };
+
+ mailbox: mailbox@7c013880 {
+ compatible = "brcm,bcm2835-mbox";
+ reg = <0x7c013880 0x40>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <0>;
+ };
+
+ local_intc: interrupt-controller@7cd00000 {
+ compatible = "brcm,bcm2836-l1-intc";
+ reg = <0x7cd00000 0x100>;
+ };
+
+ uart10: serial@7d001000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x7d001000 0x200>;
+ interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_uart>, <&clk_vpu>;
+ clock-names = "uartclk", "apb_pclk";
+ arm,primecell-periphid = <0x00241011>;
+ status = "disabled";
+ };
+
+ interrupt-controller@7d517000 {
+ compatible = "brcm,bcm7271-l2-intc";
+ reg = <0x7d517000 0x10>;
+ interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gio_aon: gpio@7d517c00 {
+ compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio";
+ reg = <0x7d517c00 0x40>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ brcm,gpio-bank-widths = <17 6>;
+ /* The lack of 'interrupt-controller' property here is intended:
+ * don't use GIO_AON as an interrupt controller because it will
+ * clash with the firmware monitoring the PMIC interrupt via the VPU.
+ */
+ };
+
+ gicv2: interrupt-controller@7fff9000 {
+ compatible = "arm,gic-400";
+ reg = <0x7fff9000 0x1000>,
+ <0x7fffa000 0x2000>,
+ <0x7fffc000 0x2000>,
+ <0x7fffe000 0x2000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 12 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_LOW)>;
+ };
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts b/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts
index 47a389d9ff7d..9d74fa6bfed9 100644
--- a/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts
+++ b/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts
@@ -32,7 +32,7 @@
device_type = "memory";
reg = <0x0 0x80000000 0x3da00000>,
<0x0 0xc0000000 0x40000000>,
- <0x8 0x80000000 0x40000000>;
+ <0x8 0x80000000 0x80000000>;
};
gpio-keys {
diff --git a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
index 0248329da49a..b36292a7db64 100644
--- a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
@@ -251,6 +251,52 @@
"dout_fsys2_clkcmu_ethernet";
};
+ cmu_dpum: clock-controller@18c00000 {
+ compatible = "samsung,exynosautov9-cmu-dpum";
+ reg = <0x18c00000 0x8000>;
+ #clock-cells = <1>;
+
+ clocks = <&xtcxo>,
+ <&cmu_top DOUT_CLKCMU_DPUM_BUS>;
+ clock-names = "oscclk", "bus";
+ };
+
+ sysmmu_dpum_0: sysmmu@18c80000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x18c80000 0x10000>;
+ interrupts = <GIC_SPI 459 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu_dpum CLK_GOUT_DPUM_SYSMMU_D0_CLK>;
+ clock-names = "sysmmu";
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_dpum_1: sysmmu@18c90000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x18c90000 0x10000>;
+ interrupts = <GIC_SPI 459 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu_dpum CLK_GOUT_DPUM_SYSMMU_D1_CLK>;
+ clock-names = "sysmmu";
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_dpum_2: sysmmu@18ca0000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x18ca0000 0x10000>;
+ interrupts = <GIC_SPI 459 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu_dpum CLK_GOUT_DPUM_SYSMMU_D2_CLK>;
+ clock-names = "sysmmu";
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_dpum_3: sysmmu@18cb0000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x18cb0000 0x10000>;
+ interrupts = <GIC_SPI 459 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu_dpum CLK_GOUT_DPUM_SYSMMU_D3_CLK>;
+ clock-names = "sysmmu";
+ #iommu-cells = <0>;
+ };
+
cmu_core: clock-controller@1b030000 {
compatible = "samsung,exynosautov9-cmu-core";
reg = <0x1b030000 0x8000>;
diff --git a/arch/arm64/boot/dts/exynos/exynosautov920.dtsi b/arch/arm64/boot/dts/exynos/exynosautov920.dtsi
index c1c8566d74f5..91882b37fdb3 100644
--- a/arch/arm64/boot/dts/exynos/exynosautov920.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynosautov920.dtsi
@@ -6,6 +6,7 @@
*
*/
+#include <dt-bindings/clock/samsung,exynosautov920.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/soc/samsung,exynos-usi.h>
@@ -38,17 +39,6 @@
clock-output-names = "oscclk";
};
- /*
- * FIXME: Keep the stub clock for serial driver, until proper clock
- * driver is implemented.
- */
- clock_usi: clock-usi {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <200000000>;
- clock-output-names = "usi";
- };
-
cpus: cpus {
#address-cells = <2>;
#size-cells = <0>;
@@ -192,6 +182,19 @@
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
};
+ cmu_peric0: clock-controller@10800000 {
+ compatible = "samsung,exynosautov920-cmu-peric0";
+ reg = <0x10800000 0x8000>;
+ #clock-cells = <1>;
+
+ clocks = <&xtcxo>,
+ <&cmu_top DOUT_CLKCMU_PERIC0_NOC>,
+ <&cmu_top DOUT_CLKCMU_PERIC0_IP>;
+ clock-names = "oscclk",
+ "noc",
+ "ip";
+ };
+
syscon_peric0: syscon@10820000 {
compatible = "samsung,exynosautov920-peric0-sysreg",
"syscon";
@@ -213,7 +216,8 @@
#address-cells = <1>;
#size-cells = <1>;
ranges;
- clocks = <&clock_usi>, <&clock_usi>;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI00_USI>;
clock-names = "pclk", "ipclk";
status = "disabled";
@@ -224,7 +228,8 @@
interrupts = <GIC_SPI 764 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&uart0_bus>;
- clocks = <&clock_usi>, <&clock_usi>;
+ clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>,
+ <&cmu_peric0 CLK_DOUT_PERIC0_USI00_USI>;
clock-names = "uart", "clk_uart_baud0";
samsung,uart-fifosize = <256>;
status = "disabled";
@@ -254,6 +259,15 @@
interrupts = <GIC_SPI 781 IRQ_TYPE_LEVEL_HIGH>;
};
+ cmu_top: clock-controller@11000000 {
+ compatible = "samsung,exynosautov920-cmu-top";
+ reg = <0x11000000 0x8000>;
+ #clock-cells = <1>;
+
+ clocks = <&xtcxo>;
+ clock-names = "oscclk";
+ };
+
pinctrl_alive: pinctrl@11850000 {
compatible = "samsung,exynosautov920-pinctrl";
reg = <0x11850000 0x10000>;
diff --git a/arch/arm64/boot/dts/exynos/google/gs101.dtsi b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
index eadb8822e6d4..302c5beb224a 100644
--- a/arch/arm64/boot/dts/exynos/google/gs101.dtsi
+++ b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
@@ -1394,6 +1394,21 @@
pmu_system_controller: system-controller@17460000 {
compatible = "google,gs101-pmu", "syscon";
reg = <0x17460000 0x10000>;
+
+ poweroff: syscon-poweroff {
+ compatible = "syscon-poweroff";
+ regmap = <&pmu_system_controller>;
+ offset = <0x3e9c>; /* PAD_CTRL_PWR_HOLD */
+ mask = <0x100>; /* reset value */
+ };
+
+ reboot: syscon-reboot {
+ compatible = "syscon-reboot";
+ regmap = <&pmu_system_controller>;
+ offset = <0x3a00>; /* SYSTEM_CONFIGURATION */
+ mask = <0x2>; /* SWRESET_SYSTEM */
+ value = <0x2>; /* reset value */
+ };
};
pinctrl_gpio_alive: pinctrl@174d0000 {
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index f04c22b7de72..9d3df8b218a2 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -129,11 +129,11 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mm-var-som-symphony.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw71xx-0x.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw72xx-0x.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw73xx-0x.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw75xx-0x.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw7901.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw7902.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw7903.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw7904.dtb
-dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw7905-0x.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-verdin-nonwifi-dahlia.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-verdin-nonwifi-dev.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-verdin-nonwifi-mallow.dtb
@@ -174,16 +174,19 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mp-icore-mx8mp-edimm2.2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-msc-sm2s-ep1.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-navqp.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-phyboard-pollux-rdk.dtb
+imx8mp-phyboard-pollux-rdk-no-eth-dtbs += imx8mp-phyboard-pollux-rdk.dtb imx8mp-phycore-no-eth.dtbo
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-phyboard-pollux-rdk-no-eth.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-hdmi.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-lt6.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-mi1010ait-1cp1.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mpxl.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mp-ras314.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-var-som-symphony.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw71xx-2x.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw72xx-2x.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw73xx-2x.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw74xx.dtb
-dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw7905-2x.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw75xx-2x.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-verdin-nonwifi-dahlia.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-verdin-nonwifi-dev.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-verdin-nonwifi-mallow.dtb
@@ -238,6 +241,8 @@ dtb-$(CONFIG_ARCH_MXC) += imx8qxp-tqma8xqp-mba8xx.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8ulp-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-9x9-qsb.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-11x11-evk.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx93-14x14-evk.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx93-kontron-bl-osm-s.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-phyboard-segin.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-tqma9352-mba93xxca.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-tqma9352-mba93xxla.dtb
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
index 2517528f684f..75081ce3e9a6 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
@@ -20,6 +20,12 @@
clock-frequency = <25000000>;
};
+ sc16is7xx_clk: clock-sc16is7xx {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+
reg_1p8v: regulator-1p8v {
compatible = "regulator-fixed";
regulator-name = "1P8V";
@@ -69,12 +75,6 @@
clocks = <&sc16is7xx_clk>;
interrupt-parent = <&gpio1>;
interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
-
- sc16is7xx_clk: clock-sc16is7xx {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- };
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index e61ea7e0737e..dd479889658d 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -164,7 +164,6 @@
QORIQ_CLK_PLL_DIV(1)>;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
- big-endian;
bus-width = <4>;
status = "disabled";
};
@@ -183,7 +182,6 @@
QORIQ_CLK_PLL_DIV(1)>;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
- big-endian;
broken-cd;
bus-width = <4>;
status = "disabled";
@@ -541,7 +539,6 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
- num-viewport = <2>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
@@ -556,7 +553,7 @@
status = "disabled";
};
- rcpm: power-controller@1ee2140 {
+ rcpm: wakeup-controller@1ee2140 {
compatible = "fsl,ls1012a-rcpm", "fsl,qoriq-rcpm-2.1+";
reg = <0x0 0x1ee2140 0x0 0x4>;
#fsl,rcpm-wakeup-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts
index 195bdbafdf7c..d9fac647f432 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts
@@ -26,6 +26,13 @@
cooling-levels = <1 128 192 255>;
};
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
sound {
#address-cells = <1>;
#size-cells = <0>;
@@ -107,6 +114,11 @@
clock-names = "mclk";
assigned-clocks = <&mclk>;
assigned-clock-rates = <1250000>;
+ AVDD-supply = <&reg_3p3v>;
+ CPVDD-supply = <&reg_3p3v>;
+ DBVDD-supply = <&reg_3p3v>;
+ DCVDD-supply = <&reg_3p3v>;
+ MICVDD-supply = <&reg_3p3v>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index acf293310f7a..7d172d7e5737 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -112,13 +112,6 @@
};
};
- reboot {
- compatible = "syscon-reboot";
- regmap = <&rst>;
- offset = <0>;
- mask = <0x02>;
- };
-
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
@@ -150,6 +143,7 @@
its: msi-controller@6020000 {
compatible = "arm,gic-v3-its";
msi-controller;
+ #msi-cells = <1>;
reg = <0x0 0x06020000 0 0x20000>;/* GIC Translater */
};
};
@@ -235,10 +229,16 @@
};
};
- rst: syscon@1e60000 {
- compatible = "syscon";
+ syscon@1e60000 {
+ compatible = "fsl,ls1028a-reset", "syscon", "simple-mfd";
reg = <0x0 0x1e60000 0x0 0x10000>;
little-endian;
+
+ reboot {
+ compatible = "syscon-reboot";
+ offset = <0>;
+ mask = <0x02>;
+ };
};
sfp: efuse@1e80000 {
@@ -381,7 +381,6 @@
dmas = <&edma0 0 62>, <&edma0 0 60>;
dma-names = "tx", "rx";
spi-num-chipselects = <4>;
- little-endian;
status = "disabled";
};
@@ -397,7 +396,6 @@
dmas = <&edma0 0 58>, <&edma0 0 56>;
dma-names = "tx", "rx";
spi-num-chipselects = <4>;
- little-endian;
status = "disabled";
};
@@ -413,7 +411,6 @@
dmas = <&edma0 0 54>, <&edma0 0 2>;
dma-names = "tx", "rx";
spi-num-chipselects = <3>;
- little-endian;
status = "disabled";
};
@@ -662,7 +659,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x80 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x80 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
@@ -701,7 +698,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x88 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x88 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
@@ -1080,7 +1077,7 @@
reg = <0x01 0xf0000000 0x0 0x100000>;
#address-cells = <3>;
#size-cells = <2>;
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
device_type = "pci";
bus-range = <0x0 0x0>;
dma-coherent;
@@ -1319,7 +1316,7 @@
status = "disabled";
};
- rcpm: power-controller@1e34040 {
+ rcpm: wakeup-controller@1e34040 {
compatible = "fsl,ls1028a-rcpm", "fsl,qoriq-rcpm-2.1+";
reg = <0x0 0x1e34040 0x0 0x1c>;
#fsl,rcpm-wakeup-cells = <7>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
index 5c4d7eef8b61..ca7cd7a33c01 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
@@ -29,6 +29,7 @@
enet1: ethernet@e2000 {
pcsphy-handle = <&pcsphy1>, <&qsgmiib_pcs1>;
+ pcs-handle = <&pcsphy1>, <&qsgmiib_pcs1>;
pcs-handle-names = "sgmii", "qsgmii";
};
@@ -40,11 +41,13 @@
enet4: ethernet@e8000 {
pcsphy-handle = <&pcsphy4>, <&qsgmiib_pcs2>;
+ pcs-handle = <&pcsphy4>, <&qsgmiib_pcs2>;
pcs-handle-names = "sgmii", "qsgmii";
};
enet5: ethernet@ea000 {
pcsphy-handle = <&pcsphy5>, <&qsgmiib_pcs3>;
+ pcs-handle = <&pcsphy5>, <&qsgmiib_pcs3>;
pcs-handle-names = "sgmii", "qsgmii";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
index 11b1356e95d5..e850551b16ac 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
@@ -211,7 +211,7 @@
};
&fpga {
- mdio-mux-emi1@54 {
+ mdio-mux@54 {
compatible = "mdio-mux-mmioreg", "mdio-mux";
mdio-parent-bus = <&mdio0>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index ab4c919e3e16..c0e3e8fa1e79 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -431,7 +431,6 @@
clock-frequency = <0>;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
- big-endian;
bus-width = <4>;
};
@@ -439,7 +438,6 @@
compatible = "fsl,qoriq-memory-controller";
reg = <0x0 0x1080000 0x0 0x1000>;
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
- big-endian;
};
tmu: tmu@1f00000 {
@@ -653,7 +651,7 @@
#interrupt-cells = <2>;
};
- uqe: uqe@2400000 {
+ uqe: uqe-bus@2400000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,qe", "simple-bus";
@@ -667,7 +665,6 @@
qeic: qeic@80 {
compatible = "fsl,qe-ic";
reg = <0x80 0x80>;
- #address-cells = <0>;
interrupt-controller;
#interrupt-cells = <1>;
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
@@ -675,16 +672,12 @@
};
si1: si@700 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "fsl,ls1043-qe-si",
"fsl,t1040-qe-si";
reg = <0x700 0x80>;
};
siram1: siram@1000 {
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "fsl,ls1043-qe-siram",
"fsl,t1040-qe-siram";
reg = <0x1000 0x800>;
@@ -804,7 +797,7 @@
QORIQ_CLK_PLL_DIV(1)>;
};
- aux_bus: aux-bus {
+ aux_bus: bus {
#address-cells = <2>;
#size-cells = <2>;
compatible = "simple-bus";
@@ -962,7 +955,7 @@
};
qdma: dma-controller@8380000 {
- compatible = "fsl,ls1021a-qdma", "fsl,ls1043a-qdma";
+ compatible = "fsl,ls1043a-qdma", "fsl,ls1021a-qdma";
reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */
<0x0 0x8390000 0x0 0x10000>, /* Status regs */
<0x0 0x83a0000 0x0 0x40000>; /* Block regs */
@@ -983,7 +976,7 @@
big-endian;
};
- rcpm: power-controller@1ee2140 {
+ rcpm: wakeup-controller@1ee2140 {
compatible = "fsl,ls1043a-rcpm", "fsl,qoriq-rcpm-2.1+";
reg = <0x0 0x1ee2140 0x0 0x4>;
#fsl,rcpm-wakeup-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
index 4e3345093943..15ff7c569d28 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
@@ -24,6 +24,7 @@
/* these aliases provide the FMan ports mapping */
enet0: ethernet@e0000 {
pcsphy-handle = <&qsgmiib_pcs3>;
+ pcs-handle = <&qsgmiib_pcs3>;
pcs-handle-names = "qsgmii";
};
@@ -38,11 +39,13 @@
enet4: ethernet@e8000 {
pcsphy-handle = <&pcsphy4>, <&qsgmiib_pcs1>;
+ pcs-handle = <&pcsphy4>, <&qsgmiib_pcs1>;
pcs-handle-names = "sgmii", "qsgmii";
};
enet5: ethernet@ea000 {
pcsphy-handle = <&pcsphy5>, <&pcsphy5>;
+ pcs-handle = <&pcsphy5>, <&pcsphy5>;
pcs-handle-names = "sgmii", "qsgmii";
};
@@ -51,6 +54,7 @@
enet7: ethernet@f2000 {
pcsphy-handle = <&pcsphy7>, <&qsgmiib_pcs2>, <&pcsphy7>;
+ pcs-handle = <&pcsphy7>, <&qsgmiib_pcs2>, <&pcsphy7>;
pcs-handle-names = "sgmii", "qsgmii", "xfi";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
index e5296e51f656..a1d9102ff32b 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
@@ -237,7 +237,7 @@
#address-cells = <1>;
#size-cells = <1>;
- mdio-mux-emi1 {
+ mdio-mux@54 {
compatible = "mdio-mux-mmioreg", "mdio-mux";
mdio-parent-bus = <&mdio0>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 55019866d6a2..0baf256b4400 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -282,7 +282,6 @@
compatible = "fsl,qoriq-memory-controller";
reg = <0x0 0x1080000 0x0 0x1000>;
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
- big-endian;
};
ifc: memory-controller@1530000 {
@@ -315,7 +314,6 @@
clocks = <&clockgen QORIQ_CLK_HWACCEL 1>;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
- big-endian;
bus-width = <4>;
};
@@ -694,7 +692,6 @@
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(2)>;
- big-endian;
};
edma0: dma-controller@2c00000 {
@@ -715,7 +712,7 @@
QORIQ_CLK_PLL_DIV(2)>;
};
- aux_bus: aux-bus {
+ aux_bus: bus {
#address-cells = <2>;
#size-cells = <2>;
compatible = "simple-bus";
@@ -823,7 +820,7 @@
};
pcie_ep1: pcie_ep@3400000 {
- compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep";
+ compatible = "fsl,ls1046a-pcie-ep";
reg = <0x00 0x03400000 0x0 0x00100000>,
<0x40 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
@@ -862,7 +859,7 @@
};
pcie_ep2: pcie_ep@3500000 {
- compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep";
+ compatible = "fsl,ls1046a-pcie-ep";
reg = <0x00 0x03500000 0x0 0x00100000>,
<0x48 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
@@ -901,7 +898,7 @@
};
pcie_ep3: pcie_ep@3600000 {
- compatible = "fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep";
+ compatible = "fsl,ls1046a-pcie-ep";
reg = <0x00 0x03600000 0x0 0x00100000>,
<0x50 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
@@ -935,7 +932,7 @@
big-endian;
};
- rcpm: power-controller@1ee2140 {
+ rcpm: wakeup-controller@1ee2140 {
compatible = "fsl,ls1046a-rcpm", "fsl,qoriq-rcpm-2.1+";
reg = <0x0 0x1ee2140 0x0 0x4>;
#fsl,rcpm-wakeup-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
index ee8e932628d1..2df16bfb901c 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
@@ -170,6 +170,13 @@
/* IRQ_RTC_B -> IRQ0_B(CPLD) -> IRQ00(CPU), active low */
interrupts-extended = <&extirq 0 IRQ_TYPE_LEVEL_LOW>;
};
+
+ rtc@53 {
+ compatible = "nxp,pcf2131";
+ reg = <0x53>;
+ /* IRQ_RTC_B -> IRQ0_B(CPLD) -> IRQ00(CPU), active low */
+ interrupts-extended = <&extirq 0 IRQ_TYPE_LEVEL_LOW>;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
index d4867d6cf47c..bc0d89427fbe 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
@@ -220,7 +220,7 @@
#gpio-cells = <2>;
gpio-controller;
- admin_led_lower {
+ admin-led-lower-hog {
gpio-hog;
gpios = <13 GPIO_ACTIVE_HIGH>;
output-low;
@@ -323,9 +323,9 @@
reg = <0x580000 0x40000>;
};
- partition@5C0000 {
+ partition@5c0000 {
label = "dpc";
- reg = <0x5C0000 0x40000>;
+ reg = <0x5c0000 0x40000>;
};
partition@600000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
index e3a7db21fe29..9d5726378aa0 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
@@ -126,6 +126,7 @@
its: msi-controller@6020000 {
compatible = "arm,gic-v3-its";
msi-controller;
+ #msi-cells = <1>;
reg = <0x0 0x6020000 0 0x20000>;
};
};
@@ -575,7 +576,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 0 109 IRQ_TYPE_LEVEL_HIGH>,
@@ -587,7 +588,7 @@
};
pcie_ep1: pcie-ep@3400000 {
- compatible = "fsl,ls1088a-pcie-ep", "fsl,ls-pcie-ep";
+ compatible = "fsl,ls1088a-pcie-ep";
reg = <0x00 0x03400000 0x0 0x00100000>,
<0x20 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
@@ -614,7 +615,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 0 114 IRQ_TYPE_LEVEL_HIGH>,
@@ -626,7 +627,7 @@
};
pcie_ep2: pcie-ep@3500000 {
- compatible = "fsl,ls1088a-pcie-ep", "fsl,ls-pcie-ep";
+ compatible = "fsl,ls1088a-pcie-ep";
reg = <0x00 0x03500000 0x0 0x00100000>,
<0x28 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
@@ -652,7 +653,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 0 119 IRQ_TYPE_LEVEL_HIGH>,
@@ -664,7 +665,7 @@
};
pcie_ep3: pcie-ep@3600000 {
- compatible = "fsl,ls1088a-pcie-ep", "fsl,ls-pcie-ep";
+ compatible = "fsl,ls1088a-pcie-ep";
reg = <0x00 0x03600000 0x0 0x00100000>,
<0x30 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
@@ -964,7 +965,7 @@
compatible = "fsl,qoriq-mc";
reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
<0x00000000 0x08340000 0 0x40000>; /* MC control reg */
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */
dma-coherent;
#address-cells = <3>;
@@ -1033,7 +1034,7 @@
};
};
- rcpm: power-controller@1e34040 {
+ rcpm: wakeup-controller@1e34040 {
compatible = "fsl,ls1088a-rcpm", "fsl,qoriq-rcpm-2.1+";
reg = <0x0 0x1e34040 0x0 0x18>;
#fsl,rcpm-wakeup-cells = <6>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
index 9178cd61c786..556d8c5f3180 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
@@ -64,7 +64,7 @@
reg = <3 0 0x1000>;
ranges = <0 3 0 0x1000>;
- mdio-mux-emi1@54 {
+ mdio-mux@54 {
compatible = "mdio-mux-mmioreg", "mdio-mux";
mdio-parent-bus = <&emdio1>;
reg = <0x54 1>; /* BRDCFG4 */
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
index 1b306d6802ce..9421fdd7e30e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -63,20 +63,20 @@
its: msi-controller@6020000 {
compatible = "arm,gic-v3-its";
msi-controller;
+ #msi-cells = <1>;
reg = <0x0 0x6020000 0 0x20000>;
};
};
rstcr: syscon@1e60000 {
- compatible = "fsl,ls2080a-rstcr", "syscon";
+ compatible = "fsl,ls1028a-reset", "syscon", "simple-mfd";
reg = <0x0 0x1e60000 0x0 0x4>;
- };
- reboot {
- compatible = "syscon-reboot";
- regmap = <&rstcr>;
- offset = <0x0>;
- mask = <0x2>;
+ reboot {
+ compatible = "syscon-reboot";
+ offset = <0x0>;
+ mask = <0x2>;
+ };
};
thermal-zones {
@@ -758,7 +758,7 @@
compatible = "fsl,qoriq-mc";
reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
<0x00000000 0x08340000 0 0x40000>; /* MC control reg */
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */
dma-coherent;
#address-cells = <3>;
@@ -1075,7 +1075,7 @@
};
pcie1: pcie@3400000 {
- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie";
+ compatible = "fsl,ls2080a-pcie";
reg-names = "regs", "config";
interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "intr";
@@ -1085,7 +1085,7 @@
dma-coherent;
num-viewport = <6>;
bus-range = <0x0 0xff>;
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>,
@@ -1097,7 +1097,7 @@
};
pcie2: pcie@3500000 {
- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie";
+ compatible = "fsl,ls2080a-pcie";
reg-names = "regs", "config";
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "intr";
@@ -1107,7 +1107,7 @@
dma-coherent;
num-viewport = <6>;
bus-range = <0x0 0xff>;
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>,
@@ -1119,7 +1119,7 @@
};
pcie3: pcie@3600000 {
- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie";
+ compatible = "fsl,ls2080a-pcie";
reg-names = "regs", "config";
interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "intr";
@@ -1129,7 +1129,7 @@
dma-coherent;
num-viewport = <256>;
bus-range = <0x0 0xff>;
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>,
@@ -1141,7 +1141,7 @@
};
pcie4: pcie@3700000 {
- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie";
+ compatible = "fsl,ls2080a-pcie";
reg-names = "regs", "config";
interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "intr";
@@ -1151,7 +1151,7 @@
dma-coherent;
num-viewport = <6>;
bus-range = <0x0 0xff>;
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>,
@@ -1218,7 +1218,7 @@
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
};
- rcpm: power-controller@1e34040 {
+ rcpm: wakeup-controller@1e34040 {
compatible = "fsl,ls208xa-rcpm", "fsl,qoriq-rcpm-2.1+";
reg = <0x0 0x1e34040 0x0 0x18>;
#fsl,rcpm-wakeup-cells = <6>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-tqmlx2160a-mblx2160a.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-tqmlx2160a-mblx2160a.dts
index da0f58e26b9a..f6a4f8d54301 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-tqmlx2160a-mblx2160a.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-tqmlx2160a-mblx2160a.dts
@@ -320,7 +320,7 @@
reg = <1>;
peer-hub = <&hub_3_0>;
reset-gpios = <&gpioex1 0 GPIO_ACTIVE_LOW>;
- vcc-supply = <&reg_vcc3v3>;
+ vdd-supply = <&reg_vcc3v3>;
};
hub_3_0: hub@2 {
@@ -328,7 +328,7 @@
reg = <2>;
peer-hub = <&hub_2_0>;
reset-gpios = <&gpioex1 0 GPIO_ACTIVE_LOW>;
- vcc-supply = <&reg_vcc3v3>;
+ vdd-supply = <&reg_vcc3v3>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index bd75a658767d..927ecf66a740 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -398,6 +398,7 @@
its: msi-controller@6020000 {
compatible = "arm,gic-v3-its";
msi-controller;
+ #msi-cells = <1>;
reg = <0x0 0x6020000 0 0x20000>;
};
};
@@ -1078,7 +1079,7 @@
timeout-sec = <30>;
};
- rcpm: power-controller@1e34040 {
+ rcpm: wakeup-controller@1e34040 {
compatible = "fsl,lx2160a-rcpm", "fsl,qoriq-rcpm-2.1+";
reg = <0x0 0x1e34040 0x0 0x1c>;
#fsl,rcpm-wakeup-cells = <7>;
@@ -1181,7 +1182,7 @@
ppio-wins = <8>;
bus-range = <0x0 0xff>;
ranges = <0x82000000 0x0 0x40000000 0x80 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
@@ -1209,7 +1210,7 @@
ppio-wins = <8>;
bus-range = <0x0 0xff>;
ranges = <0x82000000 0x0 0x40000000 0x88 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
@@ -1237,7 +1238,7 @@
ppio-wins = <24>;
bus-range = <0x0 0xff>;
ranges = <0x82000000 0x0 0x40000000 0x90 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
@@ -1265,7 +1266,7 @@
ppio-wins = <8>;
bus-range = <0x0 0xff>;
ranges = <0x82000000 0x0 0x40000000 0x98 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
@@ -1293,7 +1294,7 @@
ppio-wins = <24>;
bus-range = <0x0 0xff>;
ranges = <0x82000000 0x0 0x40000000 0xa0 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
@@ -1321,7 +1322,7 @@
ppio-wins = <8>;
bus-range = <0x0 0xff>;
ranges = <0x82000000 0x0 0x40000000 0xa8 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
@@ -1777,7 +1778,7 @@
compatible = "fsl,qoriq-mc";
reg = <0x00000008 0x0c000000 0 0x40>,
<0x00000000 0x08340000 0 0x40000>;
- msi-parent = <&its>;
+ msi-parent = <&its 0>;
/* iommu-map property is fixed up by u-boot */
iommu-map = <0 &smmu 0 0>;
dma-coherent;
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
index f7a91d43a0ff..575be8115e42 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
@@ -34,6 +34,8 @@ dma_subsys: bus@5a000000 {
assigned-clocks = <&clk IMX_SC_R_SPI_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
power-domains = <&pd IMX_SC_R_SPI_0>;
+ dmas = <&edma2 1 0 0>, <&edma2 0 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -50,6 +52,8 @@ dma_subsys: bus@5a000000 {
assigned-clocks = <&clk IMX_SC_R_SPI_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
power-domains = <&pd IMX_SC_R_SPI_1>;
+ dmas = <&edma2 3 0 0>, <&edma2 2 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -66,6 +70,8 @@ dma_subsys: bus@5a000000 {
assigned-clocks = <&clk IMX_SC_R_SPI_2 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
power-domains = <&pd IMX_SC_R_SPI_2>;
+ dmas = <&edma2 5 0 0>, <&edma2 4 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -82,6 +88,8 @@ dma_subsys: bus@5a000000 {
assigned-clocks = <&clk IMX_SC_R_SPI_3 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
power-domains = <&pd IMX_SC_R_SPI_3>;
+ dmas = <&edma2 7 0 0>, <&edma2 6 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -303,6 +311,8 @@ dma_subsys: bus@5a000000 {
i2c0: i2c@5a800000 {
reg = <0x5a800000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&i2c0_lpcg IMX_LPCG_CLK_0>,
<&i2c0_lpcg IMX_LPCG_CLK_4>;
@@ -315,6 +325,8 @@ dma_subsys: bus@5a000000 {
i2c1: i2c@5a810000 {
reg = <0x5a810000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&i2c1_lpcg IMX_LPCG_CLK_0>,
<&i2c1_lpcg IMX_LPCG_CLK_4>;
@@ -327,6 +339,8 @@ dma_subsys: bus@5a000000 {
i2c2: i2c@5a820000 {
reg = <0x5a820000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&i2c2_lpcg IMX_LPCG_CLK_0>,
<&i2c2_lpcg IMX_LPCG_CLK_4>;
@@ -339,6 +353,8 @@ dma_subsys: bus@5a000000 {
i2c3: i2c@5a830000 {
reg = <0x5a830000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&i2c3_lpcg IMX_LPCG_CLK_0>,
<&i2c3_lpcg IMX_LPCG_CLK_4>;
@@ -362,7 +378,7 @@ dma_subsys: bus@5a000000 {
assigned-clock-rates = <24000000>;
power-domains = <&pd IMX_SC_R_ADC_0>;
status = "disabled";
- };
+ };
adc1: adc@5a890000 {
compatible = "nxp,imx8qxp-adc";
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi
index 77d2928997b4..d39242c1b9f7 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi
@@ -26,7 +26,6 @@ img_subsys: bus@58000000 {
assigned-clock-rates = <200000000>, <200000000>;
power-domains = <&pd IMX_SC_R_MJPEG_DEC_MP>,
<&pd IMX_SC_R_MJPEG_DEC_S0>;
- slot = <0>;
};
jpegenc: jpegenc@58450000 {
@@ -39,7 +38,6 @@ img_subsys: bus@58000000 {
assigned-clock-rates = <200000000>, <200000000>;
power-domains = <&pd IMX_SC_R_MJPEG_ENC_MP>,
<&pd IMX_SC_R_MJPEG_ENC_S0>;
- slot = <0>;
};
img_jpeg_dec_lpcg: clock-controller@585d0000 {
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lvds0.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lvds0.dtsi
new file mode 100644
index 000000000000..d00036204a8c
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-lvds0.dtsi
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-only and MIT
+
+/*
+ * Copyright 2024 NXP
+ */
+
+lvds0_subsys: bus@56240000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x56240000 0x0 0x56240000 0x10000>;
+
+ qm_lvds0_lis_lpcg: qxp_mipi1_lis_lpcg: clock-controller@56243000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x56243000 0x4>;
+ #clock-cells = <1>;
+ clock-output-names = "mipi1_lis_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_1>;
+ };
+
+ qm_lvds0_pwm_lpcg: qxp_mipi1_pwm_lpcg: clock-controller@5624300c {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5624300c 0x4>;
+ #clock-cells = <1>;
+ clock-output-names = "mipi1_pwm_lpcg_clk",
+ "mipi1_pwm_lpcg_ipg_clk",
+ "mipi1_pwm_lpcg_32k_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_1_PWM_0>;
+ };
+
+ qm_lvds0_i2c0_lpcg: qxp_mipi1_i2c0_lpcg: clock-controller@56243010 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x56243010 0x4>;
+ #clock-cells = <1>;
+ clock-output-names = "mipi1_i2c0_lpcg_clk",
+ "mipi1_i2c0_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_1_I2C_0>;
+ };
+
+ qm_pwm_lvds0: qxp_pwm_mipi_lvds1: pwm@56244000 {
+ compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm";
+ reg = <0x56244000 0x1000>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&clk IMX_SC_R_MIPI_1_PWM_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ #pwm-cells = <3>;
+ power-domains = <&pd IMX_SC_R_MIPI_1_PWM_0>;
+ status = "disabled";
+ };
+
+ qm_i2c0_lvds0: qxp_i2c0_mipi_lvds1: i2c@56246000 {
+ compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x56246000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <8>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_MIPI_1_I2C_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_MIPI_1_I2C_0>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lvds1.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lvds1.dtsi
new file mode 100644
index 000000000000..12ae4f48e1e1
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-lvds1.dtsi
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0-only and MIT
+
+/*
+ * Copyright 2024 NXP
+ */
+
+lvds1_subsys: bus@57240000 {
+ compatible = "simple-bus";
+ interrupt-parent = <&irqsteer_lvds1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x57240000 0x0 0x57240000 0x10000>;
+
+ irqsteer_lvds1: interrupt-controller@57240000 {
+ compatible = "fsl,imx8qm-irqsteer", "fsl,imx-irqsteer";
+ reg = <0x57240000 0x1000>;
+ interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ #interrupt-cells = <1>;
+ clocks = <&lvds1_lis_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "ipg";
+ power-domains = <&pd IMX_SC_R_LVDS_1>;
+ fsl,channel = <0>;
+ fsl,num-irqs = <32>;
+ };
+
+ lvds1_lis_lpcg: clock-controller@57243000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x57243000 0x4>;
+ #clock-cells = <1>;
+ clocks = <&lvds_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_4>;
+ clock-output-names = "lvds1_lis_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_LVDS_1>;
+ };
+
+ lvds1_pwm_lpcg: clock-controller@5724300c {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5724300c 0x4>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_LVDS_1_PWM_0 IMX_SC_PM_CLK_PER>,
+ <&lvds_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "lvds1_pwm_lpcg_clk",
+ "lvds1_pwm_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_LVDS_1_PWM_0>;
+ };
+
+ lvds1_i2c0_lpcg: clock-controller@57243010 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x57243010 0x4>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_LVDS_1_I2C_0 IMX_SC_PM_CLK_PER>,
+ <&lvds_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "lvds1_i2c0_lpcg_clk",
+ "lvds1_i2c0_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_LVDS_1_I2C_0>;
+ };
+
+ lvds1_i2c1_lpcg: clock-controller@57243014 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x57243014 0x4>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_LVDS_1_I2C_0 IMX_SC_PM_CLK_PER>,
+ <&lvds_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "lvds1_i2c1_lpcg_clk",
+ "lvds1_i2c1_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_LVDS_1_I2C_0>;
+ };
+
+ pwm_lvds1: pwm@57244000 {
+ compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm";
+ reg = <0x57244000 0x1000>;
+ clocks = <&lvds1_pwm_lpcg IMX_LPCG_CLK_4>,
+ <&lvds1_pwm_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&clk IMX_SC_R_LVDS_1_PWM_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ #pwm-cells = <3>;
+ power-domains = <&pd IMX_SC_R_LVDS_1_PWM_0>;
+ status = "disabled";
+ };
+
+ i2c0_lvds1: i2c@57246000 {
+ compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x57246000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <8>;
+ clocks = <&lvds1_i2c0_lpcg IMX_LPCG_CLK_0>,
+ <&lvds1_i2c0_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_LVDS_1_I2C_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_LVDS_1_I2C_0>;
+ status = "disabled";
+ };
+
+ i2c1_lvds1: i2c@57247000 {
+ compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x57247000 0x1000>;
+ interrupts = <9>;
+ clocks = <&lvds1_i2c1_lpcg IMX_LPCG_CLK_0>,
+ <&lvds1_i2c1_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_LVDS_1_I2C_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_LVDS_1_I2C_0>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-mipi0.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-mipi0.dtsi
new file mode 100644
index 000000000000..9c5b0cbdfcbd
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-mipi0.dtsi
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only and MIT
+
+/*
+ * Copyright 2024 NXP
+ */
+
+mipi0_subsys: bus@56220000 {
+ compatible = "simple-bus";
+ interrupt-parent = <&irqsteer_mipi0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x56220000 0x0 0x56220000 0x10000>;
+
+ irqsteer_mipi0: interrupt-controller@56220000 {
+ compatible = "fsl,imx8qxp-irqsteer", "fsl,imx-irqsteer";
+ reg = <0x56220000 0x1000>;
+ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ #interrupt-cells = <1>;
+ clocks = <&mipi0_lis_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg";
+ power-domains = <&pd IMX_SC_R_MIPI_0>;
+ fsl,channel = <0>;
+ fsl,num-irqs = <32>;
+ };
+
+ mipi0_lis_lpcg: clock-controller@56223000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x56223000 0x4>;
+ #clock-cells = <1>;
+ power-domains = <&pd IMX_SC_R_MIPI_0>;
+ };
+
+ mipi0_pwm_lpcg: clock-controller@5622300c {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5622300c 0x4>;
+ #clock-cells = <1>;
+ power-domains = <&pd IMX_SC_R_MIPI_0_PWM_0>;
+ };
+
+ mipi0_i2c0_lpcg_ipg_clk: clock-controller@56223014 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x56223014 0x4>;
+ #clock-cells = <1>;
+ clocks = <&mipi0_i2c0_lpcg_ipg_s_clk IMX_LPCG_CLK_0>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi0_i2c0_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_0_I2C_0>;
+ };
+
+ mipi0_i2c0_lpcg_ipg_s_clk: clock-controller@56223018 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x56223018 0x4>;
+ #clock-cells = <1>;
+ clocks = <&dsi_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi0_i2c0_lpcg_ipg_s_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_0_I2C_0>;
+ };
+
+ mipi0_i2c0_lpcg_clk: clock-controller@5622301c {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5622301c 0x4>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_MIPI_0_I2C_0 IMX_SC_PM_CLK_MISC2>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi0_i2c0_lpcg_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_0_I2C_0>;
+ };
+
+ mipi0_i2c1_lpcg_ipg_clk: clock-controller@56223024 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x56223024 0x4>;
+ #clock-cells = <1>;
+ clocks = <&mipi0_i2c1_lpcg_ipg_s_clk IMX_LPCG_CLK_0>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi0_i2c1_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_0_I2C_1>;
+ };
+
+ mipi0_i2c1_lpcg_ipg_s_clk: clock-controller@56223028 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x56223028 0x4>;
+ #clock-cells = <1>;
+ clocks = <&dsi_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi0_i2c1_lpcg_ipg_s_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_0_I2C_1>;
+ };
+
+ mipi0_i2c1_lpcg_clk: clock-controller@5622302c {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5622302c 0x4>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_MIPI_0_I2C_1 IMX_SC_PM_CLK_MISC2>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi0_i2c1_lpcg_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_0_I2C_1>;
+ };
+
+ pwm_mipi0: pwm@56224000 {
+ compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm";
+ reg = <0x56224000 0x1000>;
+ clocks = <&mipi0_pwm_lpcg IMX_LPCG_CLK_4>,
+ <&mipi0_pwm_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&clk IMX_SC_R_MIPI_0_PWM_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ #pwm-cells = <3>;
+ power-domains = <&pd IMX_SC_R_MIPI_0_PWM_0>;
+ status = "disabled";
+ };
+
+ i2c0_mipi0: i2c@56226000 {
+ compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x56226000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <8>;
+ clocks = <&mipi0_i2c0_lpcg_clk IMX_LPCG_CLK_0>,
+ <&mipi0_i2c0_lpcg_ipg_clk IMX_LPCG_CLK_0>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&mipi0_i2c0_lpcg_clk IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_MIPI_0_I2C_0>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-mipi1.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-mipi1.dtsi
new file mode 100644
index 000000000000..5b1f08e412b2
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-mipi1.dtsi
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-only and MIT
+
+/*
+ * Copyright 2024 NXP
+ */
+
+mipi1_subsys: bus@57220000 {
+ compatible = "simple-bus";
+ interrupt-parent = <&irqsteer_mipi1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x57220000 0x0 0x57220000 0x10000>;
+
+ irqsteer_mipi1: interrupt-controller@57220000 {
+ compatible = "fsl,imx8qm-irqsteer", "fsl,imx-irqsteer";
+ reg = <0x57220000 0x1000>;
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ #interrupt-cells = <1>;
+ clocks = <&mipi1_lis_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg";
+ power-domains = <&pd IMX_SC_R_MIPI_1>;
+ fsl,channel = <0>;
+ fsl,num-irqs = <32>;
+ };
+
+ mipi1_lis_lpcg: clock-controller@57223000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x57223000 0x4>;
+ #clock-cells = <1>;
+ clocks = <&dsi_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi1_lis_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_1>;
+ };
+
+ mipi1_pwm_lpcg: clock-controller@5722300c {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5722300c 0x4>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_MIPI_1_PWM_0 IMX_SC_PM_CLK_PER>,
+ <&dsi_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "mipi1_pwm_lpcg_clk",
+ "mipi1_pwm_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_1_PWM_0>;
+ };
+
+ mipi1_i2c0_lpcg_clk: clock-controller@5722301c {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5722301c 0x4>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_MIPI_1_I2C_0 IMX_SC_PM_CLK_MISC2>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi1_i2c0_lpcg_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_1_I2C_0>;
+ };
+
+ mipi1_i2c0_lpcg_ipg_clk: clock-controller@57223014 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x57223014 0x4>;
+ #clock-cells = <1>;
+ clocks = <&mipi1_i2c0_lpcg_ipg_s_clk IMX_LPCG_CLK_0>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi1_i2c0_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_1_I2C_0>;
+ };
+
+ mipi1_i2c0_lpcg_ipg_s_clk: clock-controller@57223018 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x57223018 0x4>;
+ #clock-cells = <1>;
+ clocks = <&dsi_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi1_i2c0_lpcg_ipg_s_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_1_I2C_0>;
+ };
+
+ mipi1_i2c1_lpcg_ipg_clk: clock-controller@57223024 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x57223024 0x4>;
+ #clock-cells = <1>;
+ clocks = <&mipi1_i2c1_lpcg_ipg_s_clk IMX_LPCG_CLK_0>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi1_i2c1_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_1_I2C_1>;
+ };
+
+ mipi1_i2c1_lpcg_ipg_s_clk: clock-controller@57223028 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x57223028 0x4>;
+ #clock-cells = <1>;
+ clocks = <&dsi_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi1_i2c1_lpcg_ipg_s_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_1_I2C_1>;
+ };
+
+ mipi1_i2c1_lpcg_clk: clock-controller@5722302c {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5722302c 0x4>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_MIPI_1_I2C_1 IMX_SC_PM_CLK_MISC2>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi1_i2c1_lpcg_clk";
+ power-domains = <&pd IMX_SC_R_MIPI_1_I2C_1>;
+ };
+
+ pwm_mipi1: pwm@57224000 {
+ compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm";
+ reg = <0x57224000 0x1000>;
+ clocks = <&mipi1_pwm_lpcg IMX_LPCG_CLK_4>,
+ <&mipi1_pwm_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&clk IMX_SC_R_MIPI_1_PWM_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ #pwm-cells = <3>;
+ power-domains = <&pd IMX_SC_R_MIPI_1_PWM_0>;
+ status = "disabled";
+ };
+
+ i2c0_mipi1: i2c@57226000 {
+ compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x57226000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <8>;
+ interrupt-parent = <&irqsteer_mipi1>;
+ clocks = <&mipi1_i2c0_lpcg_clk IMX_LPCG_CLK_0>,
+ <&mipi1_i2c0_lpcg_ipg_clk IMX_LPCG_CLK_0>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&mipi1_i2c0_lpcg_clk IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_MIPI_1_I2C_0>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8dx-colibri.dtsi b/arch/arm64/boot/dts/freescale/imx8dx-colibri.dtsi
index 66b0fcc6687d..4d1ad052c5b6 100644
--- a/arch/arm64/boot/dts/freescale/imx8dx-colibri.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8dx-colibri.dtsi
@@ -9,3 +9,14 @@
/ {
model = "Toradex Colibri iMX8DX Module";
};
+
+&thermal_zones {
+ pmic-thermal {
+ cooling-maps {
+ map0 {
+ cooling-device = <&A35_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
index 1a74ac3ee4ee..4caaecc19227 100644
--- a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
@@ -722,12 +722,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lpspi3>;
status = "okay";
-
- spidev0: spi@0 {
- reg = <0>;
- compatible = "rohm,dh2228fv";
- spi-max-frequency = <30000000>;
- };
};
&iomuxc {
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
index 6086dae2e5fb..ea1d5b9c6bae 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
@@ -56,6 +56,20 @@
enable-gpios = <&pca6416_1 2 GPIO_ACTIVE_LOW>;
};
+ reg_1v5: regulator-1v5 {
+ compatible = "regulator-fixed";
+ regulator-name = "1V5";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ };
+
+ reg_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
reg_audio: regulator-audio {
compatible = "regulator-fixed";
regulator-name = "3v3_aud";
@@ -187,6 +201,8 @@
assigned-clock-parents = <&clk IMX8MM_CLK_24M>;
assigned-clock-rates = <24000000>;
AVDD-supply = <&reg_camera>; /* 2.8v */
+ DVDD-supply = <&reg_1v5>;
+ DOVDD-supply = <&reg_1v8>;
powerdown-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts
index 905c98cb080d..97ff1ddd6318 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts
@@ -62,8 +62,8 @@
compatible = "adi,adv7535";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hdmi_bridge>;
- reg = <0x3d>, <0x3c>, <0x3e>, <0x3f>;
- reg-names = "main", "cec", "edid", "packet";
+ reg = <0x3d>, <0x3e>, <0x3c>, <0x3f>;
+ reg-names = "main", "edid", "cec", "packet";
adi,dsi-lanes = <4>;
avdd-supply = <&reg_hdmi>;
a2vdd-supply = <&reg_hdmi>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts b/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts
index b1f2beb40a98..472c584fb3bd 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts
@@ -168,7 +168,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec1>;
phy-mode = "rgmii-id";
- phy-handle = <&fec1_phy>;
+ phy-handle = <&fec1_phy_bcm>;
phy-supply = <&buck4_reg>;
fsl,magic-packet;
status = "okay";
@@ -178,7 +178,7 @@
#size-cells = <0>;
/* Atheros AR8031 PHY */
- fec1_phy: ethernet-phy@0 {
+ fec1_phy_ath: ethernet-phy@0 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0>;
/*
@@ -191,6 +191,7 @@
reset-deassert-us = <10000>;
qca,keep-pll-enabled;
vddio-supply = <&vddio>;
+ status = "disabled";
vddio: vddio-regulator {
regulator-name = "VDDIO";
@@ -202,6 +203,20 @@
regulator-name = "VDDH";
};
};
+
+ /* Broadcom BCM54213PE PHY */
+ fec1_phy_bcm: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ /*
+ * Dedicated ENET_INT# and ENET_WOL# signals are
+ * unused, the PHY does not provide cable detect
+ * interrupt.
+ */
+ reset-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <10000>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-emtop-baseboard.dts b/arch/arm64/boot/dts/freescale/imx8mm-emtop-baseboard.dts
index 1c4e4d175989..7d2cb74c64ee 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-emtop-baseboard.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-emtop-baseboard.dts
@@ -11,5 +11,53 @@
model = "Emtop Embedded Solutions i.MX8M Mini Baseboard V1";
compatible = "ees,imx8mm-emtop-baseboard", "ees,imx8mm-emtop-som",
"fsl,imx8mm";
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@4 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <4>;
+ reset-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ vddio-supply = <&vddio>;
+
+ vddio: vddio-regulator {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+};
+&iomuxc {
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x3
+ MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x3
+ MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f
+ MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f
+ MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f
+ MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f
+ MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91
+ MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91
+ MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91
+ MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91
+ MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
+ MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
+ MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
+ MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ MX8MM_IOMUXC_SAI2_RXC_GPIO4_IO22 0x19
+ >;
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
index 930e14fec423..5f8336217bb8 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
@@ -180,12 +180,21 @@
};
};
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
+ spdif_in: spdif-in {
+ compatible = "linux,spdif-dir";
+ #sound-dai-cells = <0>;
+ };
+
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
- spdif-controller = <&spdif1>;
- spdif-out;
- spdif-in;
+ audio-cpu = <&spdif1>;
+ audio-codec = <&spdif_out>, <&spdif_in>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts
index 92e62fe31929..5eacbd9611ee 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts
@@ -220,6 +220,7 @@
};
&rv3028 {
+ aux-voltage-chargeable = <1>;
trickle-resistor-ohms = <3000>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso
index 353ace3601dc..78f4e8d5814d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso
@@ -14,16 +14,11 @@
/dts-v1/;
/plugin/;
-&{/} {
- compatible = "phytec,imx8mm-phygate-tauri-l";
-
-};
-
&gpio3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpio3_hog>;
- uart4_rs485_en {
+ uart4-rs485-en-hog {
gpio-hog;
gpios = <20 GPIO_ACTIVE_HIGH>;
output-low;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso
index 8a75d6783ad2..66288948bdd3 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso
@@ -15,16 +15,11 @@
/dts-v1/;
/plugin/;
-&{/} {
- compatible = "phytec,imx8mm-phygate-tauri-l";
-
-};
-
&gpio3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpio3_hog>;
- uart4_rs485_en {
+ uart4-rs485-en-hog {
gpio-hog;
gpios = <20 GPIO_ACTIVE_HIGH>;
output-high;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rts-cts.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rts-cts.dtso
index 107f743fbb1c..4719f5fbad03 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rts-cts.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rts-cts.dtso
@@ -14,12 +14,6 @@
/dts-v1/;
/plugin/;
-
-&{/} {
- compatible = "phytec,imx8mm-phygate-tauri-l";
-
-};
-
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
index ba6ce3c7f477..c3835b2d860a 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
@@ -215,6 +215,7 @@
/* RTC */
&rv3028 {
+ aux-voltage-chargeable = <1>;
trickle-resistor-ohms = <3000>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
index ca0205b9019e..8f58c84e14c8 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
@@ -83,7 +83,6 @@
};
&i2c1 {
- clock-frequency = <100000>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1>;
pinctrl-1 = <&pinctrl_i2c1_gpio>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
index d7830df5b6f9..cdfacbc35db5 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
@@ -8,7 +8,6 @@
/ {
model = "Variscite VAR-SOM-MX8MM module";
- compatible = "variscite,var-som-mx8mm", "fsl,imx8mm";
chosen {
stdout-path = &uart4;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-imx219.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-imx219.dtso
index 4eaf8aabcbff..c09aa80d2ba2 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-imx219.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-imx219.dtso
@@ -13,6 +13,20 @@
&{/} {
compatible = "gw,imx8mm-gw72xx-0x", "fsl,imx8mm";
+ reg_vana: regulator-2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ reg_vddl: regulator-1p2v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P2V";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
reg_cam: regulator-cam {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_cam>;
@@ -45,6 +59,8 @@
reg = <0x10>;
clocks = <&cam24m>;
VDIG-supply = <&reg_cam>;
+ VANA-supply = <&reg_vana>;
+ VDDL-supply = <&reg_vddl>;
port {
/* MIPI CSI-2 bus endpoint */
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso
index f6ad1a4b8b66..bb2056746f8c 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso
@@ -15,12 +15,8 @@
/dts-v1/;
/plugin/;
-&{/} {
- compatible = "gw,imx8mm-gw72xx-0x";
-};
-
&gpio4 {
- rs485_en {
+ rs485-en-hog {
gpio-hog;
gpios = <0 GPIO_ACTIVE_HIGH>;
output-low;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs422.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs422.dtso
index c3cd9f2b0db3..45ac8bdce869 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs422.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs422.dtso
@@ -18,19 +18,15 @@
/dts-v1/;
/plugin/;
-&{/} {
- compatible = "gw,imx8mm-gw72xx-0x";
-};
-
&gpio4 {
- rs485_en {
+ rs485-en-hog {
gpio-hog;
gpios = <0 GPIO_ACTIVE_HIGH>;
output-high;
line-name = "rs485_en";
};
- rs485_hd {
+ rs485-hd-hog {
gpio-hog;
gpios = <2 GPIO_ACTIVE_HIGH>;
output-low;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs485.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs485.dtso
index cc0a287226ab..30aa620d7004 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs485.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs485.dtso
@@ -18,19 +18,15 @@
/dts-v1/;
/plugin/;
-&{/} {
- compatible = "gw,imx8mm-gw72xx-0x";
-};
-
&gpio4 {
- rs485_en {
+ rs485-en-hog {
gpio-hog;
gpios = <0 GPIO_ACTIVE_HIGH>;
output-high;
line-name = "rs485_en";
};
- rs485_hd {
+ rs485-hd-hog {
gpio-hog;
gpios = <2 GPIO_ACTIVE_HIGH>;
output-high;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-imx219.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-imx219.dtso
index f3ece4b7fbbd..cfc014eb038d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-imx219.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-imx219.dtso
@@ -13,6 +13,20 @@
&{/} {
compatible = "gw,imx8mm-gw73xx-0x", "fsl,imx8mm";
+ reg_vana: regulator-2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ reg_vddl: regulator-1p2v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P2V";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
reg_cam: regulator-cam {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_cam>;
@@ -45,6 +59,8 @@
reg = <0x10>;
clocks = <&cam24m>;
VDIG-supply = <&reg_cam>;
+ VANA-supply = <&reg_vana>;
+ VDDL-supply = <&reg_vddl>;
port {
/* MIPI CSI-2 bus endpoint */
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso
index 1f8ea20dfafc..9bee7159a67b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso
@@ -20,7 +20,7 @@
};
&gpio4 {
- rs485_en {
+ rs485-en-hog {
gpio-hog;
gpios = <0 GPIO_ACTIVE_HIGH>;
output-low;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs422.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs422.dtso
index 3e6404340d52..e98f50bcec57 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs422.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs422.dtso
@@ -23,14 +23,14 @@
};
&gpio4 {
- rs485_en {
+ rs485-en-hog {
gpio-hog;
gpios = <0 GPIO_ACTIVE_HIGH>;
output-high;
line-name = "rs485_en";
};
- rs485_hd {
+ rs485-hd-hog {
gpio-hog;
gpios = <2 GPIO_ACTIVE_HIGH>;
output-low;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs485.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs485.dtso
index 2c71ab9854cb..e875ff4637bd 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs485.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs485.dtso
@@ -23,14 +23,14 @@
};
&gpio4 {
- rs485_en {
+ rs485-en-hog {
gpio-hog;
gpios = <0 GPIO_ACTIVE_HIGH>;
output-high;
line-name = "rs485_en";
};
- rs485_hd {
+ rs485-hd-hog {
gpio-hog;
gpios = <2 GPIO_ACTIVE_HIGH>;
output-high;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7905-0x.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw75xx-0x.dts
index 914753f062cd..04f06a55da5c 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7905-0x.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw75xx-0x.dts
@@ -7,11 +7,11 @@
#include "imx8mm.dtsi"
#include "imx8mm-venice-gw700x.dtsi"
-#include "imx8mm-venice-gw7905.dtsi"
+#include "imx8mm-venice-gw75xx.dtsi"
/ {
- model = "Gateworks Venice GW7905-0x i.MX8MM Development Kit";
- compatible = "gateworks,imx8mm-gw7905-0x", "fsl,imx8mm";
+ model = "Gateworks Venice GW75xx-0x i.MX8MM Development Kit";
+ compatible = "gateworks,imx8mm-gw75xx-0x", "fsl,imx8mm";
chosen {
stdout-path = &uart2;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7905.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw75xx.dtsi
index 5eb92005195c..5eb92005195c 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7905.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw75xx.dtsi
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
index 136cb30df03a..35ae0faa815b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
@@ -364,6 +364,8 @@
interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
adc {
compatible = "gw,gsc-adc";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
index 1d56f2a6c06a..c11260c26d0b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
@@ -314,6 +314,8 @@
interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
adc {
compatible = "gw,gsc-adc";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
index 45470160f98f..db1737bf637d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
@@ -280,6 +280,8 @@
interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
adc {
compatible = "gw,gsc-adc";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts
index ef951bc9f0dd..05489a31e7fd 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts
@@ -330,6 +330,8 @@
interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
adc {
compatible = "gw,gsc-adc";
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi
index 20018ee2c803..77d14ea459e5 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi
@@ -40,6 +40,20 @@
};
};
+ reg_1v5: regulator-1v5 {
+ compatible = "regulator-fixed";
+ regulator-name = "1V5";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ };
+
+ reg_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
reg_audio: regulator-audio {
compatible = "regulator-fixed";
regulator-name = "3v3_aud";
@@ -158,6 +172,8 @@
assigned-clock-parents = <&clk IMX8MN_CLK_24M>;
assigned-clock-rates = <24000000>;
AVDD-supply = <&reg_camera>; /* 2.8v */
+ DVDD-supply = <&reg_1v5>;
+ DOVDD-supply = <&reg_1v8>;
powerdown-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts
index bbd80896db96..1df5ceb11387 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts
@@ -62,8 +62,8 @@
compatible = "adi,adv7535";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hdmi_bridge>;
- reg = <0x3d>, <0x3c>, <0x3e>, <0x3f>;
- reg-names = "main", "cec", "edid", "packet";
+ reg = <0x3d>, <0x3e>, <0x3c>, <0x3f>;
+ reg-names = "main", "edid", "cec", "packet";
adi,dsi-lanes = <4>;
avdd-supply = <&reg_hdmi>;
a2vdd-supply = <&reg_hdmi>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
index 9e0259ddf4bc..33d73f3dc187 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
@@ -124,12 +124,21 @@
"Line Out Jack", "LINEVOUTR";
};
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
+ spdif_in: spdif-in {
+ compatible = "linux,spdif-dir";
+ #sound-dai-cells = <0>;
+ };
+
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
- spdif-controller = <&spdif1>;
- spdif-out;
- spdif-in;
+ audio-cpu = <&spdif1>;
+ audio-codec = <&spdif_out>, <&spdif_in>;
};
sound-micfil {
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
index 72004ab6bda5..0b1fa04f1d67 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
@@ -312,6 +312,8 @@
interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
adc {
compatible = "gw,gsc-adc";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
index cc9b81d46188..31c33acb560c 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
@@ -105,6 +105,17 @@
};
};
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector: endpoint {
+ remote-endpoint = <&hdmi_to_connector>;
+ };
+ };
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -282,6 +293,26 @@
};
};
+&hdmi_tx {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+ status = "okay";
+
+ ports {
+ port@1 {
+ reg = <1>;
+
+ hdmi_to_connector:endpoint {
+ remote-endpoint = <&hdmi_connector>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
&i2c2 {
clock-frequency = <384000>;
pinctrl-names = "default";
@@ -344,6 +375,10 @@
};
};
+&hdmi_pvi {
+ status = "okay";
+};
+
&i2c3 {
/* Connected to USB Hub */
usb-typec@52 {
@@ -464,6 +499,10 @@
status = "okay";
};
+&lcdif3 {
+ status = "okay";
+};
+
&micfil {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pdm>;
@@ -646,6 +685,15 @@
>;
};
+ pinctrl_hdmi: hdmigrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x400001c2
+ MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x400001c2
+ MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x40000010
+ MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x40000010
+ >;
+ };
+
pinctrl_i2c2: i2c2grp {
fsl,pins = <
MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c2
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
index 7e1b58dbe23a..d0fc5977258f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
@@ -59,6 +59,18 @@
pwms = <&pwm4 0 83 0>;
};
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "J17";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
panel: panel {
/* Compatible string is filled in by panel board DT Overlay. */
backlight = <&backlight>;
@@ -311,6 +323,33 @@
"", "SPI3_CS#", "", "", "", "", "", "";
};
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ ddc-i2c-bus = <&i2c5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+ status = "okay";
+
+ ports {
+ port@1 {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
+&lcdif3 {
+ status = "okay";
+};
+
&i2c1 {
clock-frequency = <100000>;
pinctrl-names = "default", "gpio";
@@ -499,7 +538,6 @@
};
&sai3 {
- #clock-cells = <0>;
#sound-dai-cells = <0>;
assigned-clocks = <&clk IMX8MP_CLK_SAI3>;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>;
@@ -682,6 +720,13 @@
>;
};
+ pinctrl_hdmi: hdmi-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x154
+ MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x154
+ >;
+ };
+
pinctrl_hog_feature: hog-feature-grp {
fsl,pins = <
/* GPIO5_IO03 */
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
index 938347704136..d26930f1a9e9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
@@ -56,6 +56,18 @@
<0x1 0x00000000 0 0xc0000000>;
};
+ native-hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "HDMI OUT";
+ type = "a";
+
+ port {
+ hdmi_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+ };
+
pcie0_refclk: pcie0-refclk {
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -408,6 +420,28 @@
status = "disabled";/* can2 pin conflict with pdm */
};
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+ status = "okay";
+
+ ports {
+ port@1 {
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&hdmi_in>;
+ };
+ };
+ };
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
&i2c1 {
clock-frequency = <400000>;
pinctrl-names = "default";
@@ -604,6 +638,10 @@
status = "okay";
};
+&lcdif3 {
+ status = "okay";
+};
+
&micfil {
#sound-dai-cells = <0>;
pinctrl-names = "default";
@@ -858,6 +896,14 @@
>;
};
+ pinctrl_hdmi: hdmigrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x1c2
+ MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x1c2
+ MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x10
+ >;
+ };
+
pinctrl_hog: hoggrp {
fsl,pins = <
MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x40000010
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
index 00a240484c25..50debe821c42 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
@@ -6,6 +6,7 @@
/dts-v1/;
+#include <dt-bindings/phy/phy-imx8-pcie.h>
#include <dt-bindings/leds/leds-pca9532.h>
#include <dt-bindings/pwm/pwm.h>
#include "imx8mp-phycore-som.dtsi"
@@ -43,6 +44,15 @@
};
};
+ reg_vcc_5v_sw: regulator-vcc-5v-sw {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <5000000>;
+ regulator-name = "VCC_5V_SW";
+ };
+
reg_can1_stby: regulator-can1-stby {
compatible = "regulator-fixed";
pinctrl-names = "default";
@@ -103,6 +113,22 @@
};
};
+/* TPM */
+&ecspi1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "okay";
+
+ tpm: tpm@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ reg = <0>;
+ spi-max-frequency = <38000000>;
+ };
+};
+
&eqos {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_eqos>;
@@ -155,6 +181,7 @@
compatible = "atmel,24c02";
reg = <0x51>;
pagesize = <16>;
+ vcc-supply = <&reg_vcc_3v3_sw>;
};
leds@62 {
@@ -195,6 +222,23 @@
status = "okay";
};
+&pcie_phy {
+ clocks = <&hsio_blk_ctrl>;
+ clock-names = "ref";
+ fsl,refclk-pad-mode = <IMX8_PCIE_REFCLK_PAD_OUTPUT>;
+ fsl,clkreq-unsupported;
+ status = "okay";
+};
+
+/* Mini PCIe */
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie0>;
+ reset-gpio = <&gpio1 8 GPIO_ACTIVE_LOW>;
+ vpcie-supply = <&reg_vcc_3v3_sw>;
+ status = "okay";
+};
+
&pwm3 {
status = "okay";
pinctrl-names = "default";
@@ -206,6 +250,7 @@
pinctrl-0 = <&pinctrl_rtc>;
interrupt-parent = <&gpio4>;
interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
+ aux-voltage-chargeable = <1>;
wakeup-source;
trickle-resistor-ohms = <3000>;
};
@@ -234,6 +279,7 @@
/* USB2 4-port USB3.0 HUB */
&usb3_phy1 {
+ vbus-supply = <&reg_vcc_5v_sw>;
status = "okay";
};
@@ -267,7 +313,9 @@
pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_pins>;
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_pins>;
cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ disable-wp;
vmmc-supply = <&reg_usdhc2_vmmc>;
+ vqmmc-supply = <&ldo5>;
bus-width = <4>;
status = "okay";
};
@@ -300,6 +348,15 @@
};
&iomuxc {
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_ECSPI1_MISO__ECSPI1_MISO 0x80
+ MX8MP_IOMUXC_ECSPI1_MOSI__ECSPI1_MOSI 0x80
+ MX8MP_IOMUXC_ECSPI1_SCLK__ECSPI1_SCLK 0x80
+ MX8MP_IOMUXC_ECSPI1_SS0__GPIO5_IO09 0x00
+ >;
+ };
+
pinctrl_eqos: eqosgrp {
fsl,pins = <
MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x2
@@ -366,6 +423,15 @@
>;
};
+ pinctrl_pcie0: pcie0grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO08__GPIO1_IO08 0x40
+ MX8MP_IOMUXC_GPIO1_IO10__GPIO1_IO10 0x60
+ MX8MP_IOMUXC_GPIO1_IO11__GPIO1_IO11 0x60 /* open drain, pull up */
+ MX8MP_IOMUXC_GPIO1_IO14__GPIO1_IO14 0x40
+ >;
+ };
+
pinctrl_pwm3: pwm3grp {
fsl,pins = <
MX8MP_IOMUXC_SPDIF_TX__PWM3_OUT 0x12
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-no-eth.dtso b/arch/arm64/boot/dts/freescale/imx8mp-phycore-no-eth.dtso
new file mode 100644
index 000000000000..5f0278bf61ee
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-no-eth.dtso
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 PHYTEC Messtechnik GmbH
+ * Author: Cem Tenruh <c.tenruh@phytec.de>
+ */
+
+/dts-v1/;
+/plugin/;
+
+&ethphy1 {
+ status = "disabled";
+};
+
+&fec {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
index e6ffa6a6b68b..a5ecdca8bc0e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
@@ -20,6 +20,15 @@
device_type = "memory";
reg = <0x0 0x40000000 0 0x80000000>;
};
+
+ reg_vdd_io: regulator-vdd-io {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "VDD_IO";
+ };
};
&A53_0 {
@@ -170,6 +179,7 @@
compatible = "atmel,24c32";
reg = <0x51>;
pagesize = <32>;
+ vcc-supply = <&reg_vdd_io>;
};
rv3028: rtc@52 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-var-som-symphony.dts b/arch/arm64/boot/dts/freescale/imx8mp-var-som-symphony.dts
new file mode 100644
index 000000000000..36d3eb865202
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-var-som-symphony.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024 Variscite Ltd.
+ */
+
+#include "imx8mp-var-som.dtsi"
+
+/ {
+ model = "Variscite VAR-SOM-MX8M-PLUS on Symphony-Board";
+ compatible = "variscite,var-som-mx8mp-symphony", "variscite,var-som-mx8mp", "fsl,imx8mp";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi
new file mode 100644
index 000000000000..b2ac2583a592
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024 Variscite Ltd.
+ *
+ * Author: Tarang Raval <tarang.raval@siliconsignals.io>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/phy/phy-imx8-pcie.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/usb/pd.h>
+#include "imx8mp.dtsi"
+
+/ {
+ model = "Variscite VAR-SOM-MX8M Plus module";
+
+ chosen {
+ stdout-path = &uart2;
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ function = LED_FUNCTION_POWER;
+ gpios = <&pca9534 0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0 0xc0000000>,
+ <0x1 0x00000000 0 0xc0000000>;
+ };
+
+
+ reg_usdhc2_vmmc: regulator-usdhc2-vmmc {
+ compatible = "regulator-fixed";
+ regulator-name = "VSD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpios = <&gpio4 22 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ startup-delay-us = <100>;
+ off-on-delay-us = <12000>;
+ };
+};
+
+&A53_0 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_1 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_2 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_3 {
+ cpu-supply = <&buck2>;
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pmic@25 {
+ compatible = "nxp,pca9450c";
+ reg = <0x25>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ buck1: BUCK1 {
+ regulator-name = "BUCK1";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <2187500>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck2: BUCK2 {
+ regulator-name = "BUCK2";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <2187500>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ nxp,dvs-run-voltage = <950000>;
+ nxp,dvs-standby-voltage = <850000>;
+ };
+
+ buck4: BUCK4 {
+ regulator-name = "BUCK4";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck5: BUCK5 {
+ regulator-name = "BUCK5";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck6: BUCK6 {
+ regulator-name = "BUCK6";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1: LDO1 {
+ regulator-name = "LDO1";
+ regulator-min-microvolt = <1600000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2: LDO2 {
+ regulator-name = "LDO2";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3: LDO3 {
+ regulator-name = "LDO3";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4: LDO4 {
+ regulator-name = "LDO4";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ ldo5: LDO5 {
+ regulator-name = "LDO5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ /* GPIO expander */
+ pca9534: gpio@20 {
+ compatible = "nxp,pca9534";
+ reg = <0x20>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pca9534>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <15 IRQ_TYPE_EDGE_FALLING>;
+ wakeup-source;
+
+ usb3-sata-sel-hog {
+ gpio-hog;
+ gpios = <4 0>;
+ output-low;
+ line-name = "usb3_sata_sel";
+ };
+ };
+};
+
+/* Console */
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+/* SD-card */
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ bus-width = <4>;
+ status = "okay";
+};
+
+/* eMMC */
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD1_DATA4__I2C1_SCL 0x400001c2
+ MX8MP_IOMUXC_SD1_DATA5__I2C1_SDA 0x400001c2
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c2
+ MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c2
+ >;
+ };
+
+ pinctrl_pca9534: pca9534grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO15__GPIO1_IO15 0xc0
+ >;
+ };
+
+ pinctrl_pmic: pmicgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SPDIF_RX__GPIO5_IO04 0x1c0
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x40
+ MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x40
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2-gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO14__GPIO1_IO14 0x1c4
+ MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22 0x10
+ MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0xc0
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d0
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d0
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x196
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d6
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d6
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x190
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d0
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d0
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d0
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d0
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d0
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d0
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d0
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d0
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d0
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x190
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x194
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d4
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d4
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d4
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d4
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d4
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d4
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d4
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d4
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d4
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x194
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x196
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d6
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d6
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d6
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d6
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d6
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d6
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d6
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d6
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d6
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x196
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO02__WDOG1_WDOG_B 0xc6
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso
index edf22ff549a4..7d9fcdee58a7 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso
@@ -11,7 +11,19 @@
/plugin/;
&{/} {
- compatible = "gw,imx8mp-gw74xx", "fsl,imx8mp";
+ reg_vana: regulator-2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ reg_vddl: regulator-1p2v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P2V";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
reg_cam: regulator-cam {
pinctrl-names = "default";
@@ -41,6 +53,8 @@
reg = <0x10>;
clocks = <&cam24m>;
VDIG-supply = <&reg_cam>;
+ VANA-supply = <&reg_vana>;
+ VDDL-supply = <&reg_vddl>;
port {
/* MIPI CSI-2 bus endpoint */
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw7905-2x.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw75xx-2x.dts
index 4a1bbbbe19e6..7ca68df9e516 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw7905-2x.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw75xx-2x.dts
@@ -7,11 +7,11 @@
#include "imx8mp.dtsi"
#include "imx8mp-venice-gw702x.dtsi"
-#include "imx8mp-venice-gw7905.dtsi"
+#include "imx8mp-venice-gw75xx.dtsi"
/ {
- model = "Gateworks Venice GW7905-2x i.MX8MP Development Kit";
- compatible = "gateworks,imx8mp-gw7905-2x", "fsl,imx8mp";
+ model = "Gateworks Venice GW75xx-2x i.MX8MP Development Kit";
+ compatible = "gateworks,imx8mp-gw75xx-2x", "fsl,imx8mp";
chosen {
stdout-path = &uart2;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw7905.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw75xx.dtsi
index 0d40cb0f05f6..0d40cb0f05f6 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw7905.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw75xx.dtsi
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
index fbcd93e33aea..da8902c5f7e5 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
@@ -65,6 +65,11 @@
};
};
+/* Verdin HDMI_1 Audio */
+&aud2htx {
+ status = "okay";
+};
+
&backlight {
power-supply = <&reg_3p3v>;
};
@@ -219,6 +224,11 @@
status = "okay";
};
+/* Verdin HDMI_1 Audio */
+&sound_hdmi {
+ status = "okay";
+};
+
/* Verdin UART_1 */
&uart1 {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
index 09733fea036d..a38e7c947a42 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
@@ -64,6 +64,11 @@
};
};
+/* Verdin HDMI_1 Audio */
+&aud2htx {
+ status = "okay";
+};
+
&backlight {
power-supply = <&reg_3p3v>;
};
@@ -215,6 +220,11 @@
status = "okay";
};
+/* Verdin HDMI_1 Audio */
+&sound_hdmi {
+ status = "okay";
+};
+
/* Verdin UART_1, connector X50 through RS485 transceiver */
&uart1 {
linux,rs485-enabled-at-boot-time;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi
index 3a40338cf2d8..11cf3bdc95c4 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi
@@ -62,6 +62,11 @@
};
};
+/* Verdin HDMI_1 Audio */
+&aud2htx {
+ status = "okay";
+};
+
&backlight {
power-supply = <&reg_3p3v>;
};
@@ -182,6 +187,11 @@
vin-supply = <&reg_3p3v>;
};
+/* Verdin HDMI_1 Audio */
+&sound_hdmi {
+ status = "okay";
+};
+
/* Verdin UART_1 */
&uart1 {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi
index efcab00c0142..cae06cb67cd3 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi
@@ -75,7 +75,6 @@
&usdhc1 {
bus-width = <4>;
keep-power-in-suspend;
- max-frequency = <100000000>;
non-removable;
pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc1>, <&pinctrl_wifi_ctrl>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi
index 533b7fe218ce..cc389cda2af2 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi
@@ -85,6 +85,11 @@
};
};
+/* Verdin HDMI_1 Audio */
+&aud2htx {
+ status = "okay";
+};
+
&backlight {
power-supply = <&reg_3p3v>;
};
@@ -192,6 +197,11 @@
vin-supply = <&reg_3p3v>;
};
+/* Verdin HDMI_1 Audio */
+&sound_hdmi {
+ status = "okay";
+};
+
/* Verdin UART_1 */
&uart1 {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
index d23a3942174d..a19ad5ee7f79 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
@@ -77,6 +77,14 @@
};
};
+ sound_hdmi: sound-hdmi {
+ compatible = "fsl,imx-audio-hdmi";
+ model = "audio-hdmi";
+ audio-cpu = <&aud2htx>;
+ hdmi-out;
+ status = "disabled";
+ };
+
/* Carrier Board Supplies */
reg_1p8v: regulator-1p8v {
compatible = "regulator-fixed";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index 603dfe80216f..f3531cfb0d79 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -1673,6 +1673,50 @@
};
};
+ isp_0: isp@32e10000 {
+ compatible = "fsl,imx8mp-isp";
+ reg = <0x32e10000 0x10000>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_MEDIA_ISP_ROOT>,
+ <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>,
+ <&clk IMX8MP_CLK_MEDIA_APB_ROOT>;
+ clock-names = "isp", "aclk", "hclk";
+ power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_ISP>;
+ fsl,blk-ctrl = <&media_blk_ctrl 0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
+ isp_1: isp@32e20000 {
+ compatible = "fsl,imx8mp-isp";
+ reg = <0x32e20000 0x10000>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_MEDIA_ISP_ROOT>,
+ <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>,
+ <&clk IMX8MP_CLK_MEDIA_APB_ROOT>;
+ clock-names = "isp", "aclk", "hclk";
+ power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_ISP>;
+ fsl,blk-ctrl = <&media_blk_ctrl 1>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
dewarp: dwe@32e30000 {
compatible = "nxp,imx8mp-dw100";
reg = <0x32e30000 0x10000>;
@@ -1687,7 +1731,7 @@
compatible = "fsl,imx8mp-mipi-csi2", "fsl,imx8mm-mipi-csi2";
reg = <0x32e40000 0x10000>;
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
- clock-frequency = <266000000>;
+ clock-frequency = <250000000>;
clocks = <&clk IMX8MP_CLK_MEDIA_APB_ROOT>,
<&clk IMX8MP_CLK_MEDIA_CAM1_PIX_ROOT>,
<&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT>,
@@ -1695,9 +1739,8 @@
clock-names = "pclk", "wrap", "phy", "axi";
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_CAM1_PIX>,
<&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF>;
- assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>,
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_250M>,
<&clk IMX8MP_CLK_24M>;
- assigned-clock-rates = <266000000>;
power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_MIPI_CSI2_1>;
status = "disabled";
@@ -1723,7 +1766,7 @@
compatible = "fsl,imx8mp-mipi-csi2", "fsl,imx8mm-mipi-csi2";
reg = <0x32e50000 0x10000>;
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
- clock-frequency = <266000000>;
+ clock-frequency = <250000000>;
clocks = <&clk IMX8MP_CLK_MEDIA_APB_ROOT>,
<&clk IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT>,
<&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT>,
@@ -1731,9 +1774,8 @@
clock-names = "pclk", "wrap", "phy", "axi";
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_CAM2_PIX>,
<&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF>;
- assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>,
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_250M>,
<&clk IMX8MP_CLK_24M>;
- assigned-clock-rates = <266000000>;
power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_MIPI_CSI2_2>;
status = "disabled";
@@ -1871,17 +1913,26 @@
clock-names = "apb", "axi", "cam1", "cam2",
"disp1", "disp2", "isp", "phy";
+ /*
+ * The ISP maximum frequency is 400MHz in normal mode
+ * and 500MHz in overdrive mode. The 400MHz operating
+ * point hasn't been successfully tested yet, so set
+ * IMX8MP_CLK_MEDIA_ISP to 500MHz for the time being.
+ */
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_AXI>,
<&clk IMX8MP_CLK_MEDIA_APB>,
<&clk IMX8MP_CLK_MEDIA_DISP1_PIX>,
<&clk IMX8MP_CLK_MEDIA_DISP2_PIX>,
+ <&clk IMX8MP_CLK_MEDIA_ISP>,
<&clk IMX8MP_VIDEO_PLL1>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>,
<&clk IMX8MP_SYS_PLL1_800M>,
<&clk IMX8MP_VIDEO_PLL1_OUT>,
- <&clk IMX8MP_VIDEO_PLL1_OUT>;
+ <&clk IMX8MP_VIDEO_PLL1_OUT>,
+ <&clk IMX8MP_SYS_PLL2_500M>;
assigned-clock-rates = <500000000>, <200000000>,
- <0>, <0>, <1039500000>;
+ <0>, <0>, <500000000>,
+ <1039500000>;
#power-domain-cells = <1>;
lvds_bridge: bridge@5c {
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
index 7507548cdb16..a87d0692c3bb 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
@@ -125,19 +125,33 @@
};
};
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
+ spdif_in: spdif-in {
+ compatible = "linux,spdif-dir";
+ #sound-dai-cells = <0>;
+ };
+
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
- spdif-controller = <&spdif1>;
- spdif-out;
- spdif-in;
+ audio-cpu = <&spdif1>;
+ audio-codec = <&spdif_out>, <&spdif_in>;
+ };
+
+ hdmi_arc_in: hdmi-arc-in {
+ compatible = "linux,spdif-dir";
+ #sound-dai-cells = <0>;
};
sound-hdmi-arc {
compatible = "fsl,imx-audio-spdif";
model = "imx-hdmi-arc";
- spdif-controller = <&spdif2>;
- spdif-in;
+ audio-cpu = <&spdif2>;
+ audio-codec = <&hdmi_arc_in>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
index 778741dbbb33..62203eed6a6c 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
@@ -6,6 +6,7 @@
/dts-v1/;
+#include <dt-bindings/usb/pd.h>
#include "imx8qm.dtsi"
/ {
@@ -31,6 +32,99 @@
reg = <0x00000000 0x80000000 0 0x40000000>;
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ vdev0vring0: memory@90000000 {
+ reg = <0 0x90000000 0 0x8000>;
+ no-map;
+ };
+
+ vdev0vring1: memory@90008000 {
+ reg = <0 0x90008000 0 0x8000>;
+ no-map;
+ };
+
+ vdev1vring0: memory@90010000 {
+ reg = <0 0x90010000 0 0x8000>;
+ no-map;
+ };
+
+ vdev1vring1: memory@90018000 {
+ reg = <0 0x90018000 0 0x8000>;
+ no-map;
+ };
+
+ rsc_table0: memory@900ff000 {
+ reg = <0 0x900ff000 0 0x1000>;
+ no-map;
+ };
+
+ vdev2vring0: memory@90100000 {
+ reg = <0 0x90100000 0 0x8000>;
+ no-map;
+ };
+
+ vdev2vring1: memory@90108000 {
+ reg = <0 0x90108000 0 0x8000>;
+ no-map;
+ };
+
+ vdev3vring0: memory@90110000 {
+ reg = <0 0x90110000 0 0x8000>;
+ no-map;
+ };
+
+ vdev3vring1: memory@90118000 {
+ reg = <0 0x90118000 0 0x8000>;
+ no-map;
+ };
+
+ rsc_table1: memory@901ff000 {
+ reg = <0 0x901ff000 0 0x1000>;
+ no-map;
+ };
+
+ vdevbuffer: memory@90400000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x90400000 0 0x100000>;
+ no-map;
+ };
+ };
+
+ lvds_backlight0: backlight-lvds0 {
+ compatible = "pwm-backlight";
+ pwms = <&qm_pwm_lvds0 0 100000 0>;
+ brightness-levels = <0 100>;
+ num-interpolated-steps = <100>;
+ default-brightness-level = <80>;
+ };
+
+ lvds_backlight1: backlight-lvds1 {
+ compatible = "pwm-backlight";
+ pwms = <&pwm_lvds1 0 100000 0>;
+ brightness-levels = <0 100>;
+ num-interpolated-steps = <100>;
+ default-brightness-level = <80>;
+ };
+
+ mux-controller {
+ compatible = "nxp,cbdtu02043", "gpio-sbu-mux";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_typec_mux>;
+ select-gpios = <&lsio_gpio4 6 GPIO_ACTIVE_LOW>;
+ enable-gpios = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
+ orientation-switch;
+
+ port {
+ usb3_data_ss: endpoint {
+ remote-endpoint = <&typec_con_ss>;
+ };
+ };
+ };
+
reg_usdhc2_vmmc: usdhc2-vmmc {
compatible = "regulator-fixed";
regulator-name = "SD1_SPWR";
@@ -133,6 +227,37 @@
"LINPUT1", "Mic Jack",
"Mic Jack", "MICB";
};
+
+ imx8qm-cm4-0 {
+ compatible = "fsl,imx8qm-cm4";
+ clocks = <&clk_dummy>;
+ mbox-names = "tx", "rx", "rxdb";
+ mboxes = <&lsio_mu5 0 1
+ &lsio_mu5 1 1
+ &lsio_mu5 3 1>;
+ memory-region = <&vdevbuffer>, <&vdev0vring0>, <&vdev0vring1>,
+ <&vdev1vring0>, <&vdev1vring1>, <&rsc_table0>;
+ power-domains = <&pd IMX_SC_R_M4_0_PID0>, <&pd IMX_SC_R_M4_0_MU_1A>;
+
+ fsl,resource-id = <IMX_SC_R_M4_0_PID0>;
+ fsl,entry-address = <0x34fe0000>;
+ };
+
+ imx8qm-cm4-1 {
+ compatible = "fsl,imx8qm-cm4";
+ clocks = <&clk_dummy>;
+ mbox-names = "tx", "rx", "rxdb";
+ mboxes = <&lsio_mu6 0 1
+ &lsio_mu6 1 1
+ &lsio_mu6 3 1>;
+ memory-region = <&vdevbuffer>, <&vdev2vring0>, <&vdev2vring1>,
+ <&vdev3vring0>, <&vdev3vring1>, <&rsc_table1>;
+ power-domains = <&pd IMX_SC_R_M4_1_PID0>, <&pd IMX_SC_R_M4_1_MU_1A>;
+
+ fsl,resource-id = <IMX_SC_R_M4_1_PID0>;
+ fsl,entry-address = <0x38fe0000>;
+ };
+
};
&adc0 {
@@ -212,6 +337,44 @@
compatible = "st,l3g4200d-gyro";
reg = <0x69>;
};
+
+ ptn5110: tcpc@51 {
+ compatible = "nxp,ptn5110", "tcpci";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_typec>;
+ reg = <0x51>;
+ interrupt-parent = <&lsio_gpio4>;
+ interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+ status = "okay";
+
+ usb_con1: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ power-role = "source";
+ data-role = "dual";
+ source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ typec_dr_sw: endpoint {
+ remote-endpoint = <&usb3_drd_sw>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ typec_con_ss: endpoint {
+ remote-endpoint = <&usb3_data_ss>;
+ };
+ };
+ };
+ };
+ };
};
&i2c1 {
@@ -241,6 +404,34 @@
};
};
+&i2c1_lvds0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lvds0_lpi2c1>;
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+&i2c1_lvds1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lvds1_lpi2c1>;
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+&i2c0_mipi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mipi0_lpi2c0>;
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+&i2c0_mipi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mipi1_lpi2c0>;
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
&flexcan1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flexcan1>;
@@ -287,12 +478,6 @@
pinctrl-0 = <&pinctrl_lpspi2 &pinctrl_lpspi2_cs>;
cs-gpios = <&lsio_gpio3 10 GPIO_ACTIVE_LOW>;
status = "okay";
-
- spidev0: spi@0 {
- reg = <0>;
- compatible = "rohm,dh2228fv";
- spi-max-frequency = <30000000>;
- };
};
&lsio_mu5 {
@@ -356,6 +541,18 @@
status = "okay";
};
+&qm_pwm_lvds0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm_lvds0>;
+ status = "okay";
+};
+
+&pwm_lvds1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm_lvds1>;
+ status = "okay";
+};
+
&usdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
@@ -376,6 +573,26 @@
status = "okay";
};
+&usb3_phy {
+ status = "okay";
+};
+
+&usbotg3 {
+ status = "okay";
+};
+
+&usbotg3_cdns3 {
+ dr_mode = "otg";
+ usb-role-switch;
+ status = "okay";
+
+ port {
+ usb3_drd_sw: endpoint {
+ remote-endpoint = <&typec_dr_sw>;
+ };
+ };
+};
+
&sai0 {
#sound-dai-cells = <0>;
assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
@@ -501,6 +718,22 @@
>;
};
+ pinctrl_mipi0_lpi2c0: mipi0_lpi2c0grp {
+ fsl,pins = <
+ IMX8QM_MIPI_DSI0_I2C0_SCL_MIPI_DSI0_I2C0_SCL 0xc6000020
+ IMX8QM_MIPI_DSI0_I2C0_SDA_MIPI_DSI0_I2C0_SDA 0xc6000020
+ IMX8QM_MIPI_DSI0_GPIO0_01_LSIO_GPIO1_IO19 0x00000020
+ >;
+ };
+
+ pinctrl_mipi1_lpi2c0: mipi1_lpi2c0grp {
+ fsl,pins = <
+ IMX8QM_MIPI_DSI1_I2C0_SCL_MIPI_DSI1_I2C0_SCL 0xc6000020
+ IMX8QM_MIPI_DSI1_I2C0_SDA_MIPI_DSI1_I2C0_SDA 0xc6000020
+ IMX8QM_MIPI_DSI1_GPIO0_01_LSIO_GPIO1_IO23 0x00000020
+ >;
+ };
+
pinctrl_flexspi0: flexspi0grp {
fsl,pins = <
IMX8QM_QSPI0A_DATA0_LSIO_QSPI0A_DATA0 0x06000021
@@ -582,6 +815,32 @@
>;
};
+ pinctrl_lvds0_lpi2c1: lvds0lpi2c1grp {
+ fsl,pins = <
+ IMX8QM_LVDS0_I2C1_SCL_LVDS0_I2C1_SCL 0xc600004c
+ IMX8QM_LVDS0_I2C1_SDA_LVDS0_I2C1_SDA 0xc600004c
+ >;
+ };
+
+ pinctrl_lvds1_lpi2c1: lvds1lpi2c1grp {
+ fsl,pins = <
+ IMX8QM_LVDS1_I2C1_SCL_LVDS1_I2C1_SCL 0xc600004c
+ IMX8QM_LVDS1_I2C1_SDA_LVDS1_I2C1_SDA 0xc600004c
+ >;
+ };
+
+ pinctrl_pwm_lvds0: pwmlvds0grp {
+ fsl,pins = <
+ IMX8QM_LVDS0_GPIO00_LVDS0_PWM0_OUT 0x00000020
+ >;
+ };
+
+ pinctrl_pwm_lvds1: pwmlvds1grp {
+ fsl,pins = <
+ IMX8QM_LVDS1_GPIO00_LVDS1_PWM0_OUT 0x00000020
+ >;
+ };
+
pinctrl_sai0: sai0grp {
fsl,pins = <
IMX8QM_SPI0_CS1_AUD_SAI0_TXC 0x0600004c
@@ -600,6 +859,19 @@
>;
};
+ pinctrl_typec: typecgrp {
+ fsl,pins = <
+ IMX8QM_QSPI1A_DATA0_LSIO_GPIO4_IO26 0x00000021
+ >;
+ };
+
+ pinctrl_typec_mux: typecmuxgrp {
+ fsl,pins = <
+ IMX8QM_QSPI1A_SS0_B_LSIO_GPIO4_IO19 0x60
+ IMX8QM_USB_SS3_TC3_LSIO_GPIO4_IO06 0x60
+ >;
+ };
+
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
IMX8QM_EMMC0_CLK_CONN_EMMC0_CLK 0x06000041
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-lvds.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-lvds.dtsi
new file mode 100644
index 000000000000..0514d8b2af75
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-lvds.dtsi
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2024 NXP
+ */
+
+&qm_lvds0_lis_lpcg {
+ clocks = <&lvds_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_4>;
+};
+
+&qm_lvds0_pwm_lpcg {
+ clocks = <&clk IMX_SC_R_LVDS_0_PWM_0 IMX_SC_PM_CLK_PER>,
+ <&lvds_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+};
+
+&qm_lvds0_i2c0_lpcg {
+ clocks = <&clk IMX_SC_R_LVDS_0_I2C_0 IMX_SC_PM_CLK_PER>,
+ <&lvds_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+};
+
+&qm_pwm_lvds0 {
+ clocks = <&qm_lvds0_pwm_lpcg IMX_LPCG_CLK_4>,
+ <&qm_lvds0_pwm_lpcg IMX_LPCG_CLK_0>;
+};
+
+&qm_i2c0_lvds0 {
+ clocks = <&qm_lvds0_i2c0_lpcg IMX_LPCG_CLK_0>,
+ <&qm_lvds0_i2c0_lpcg IMX_LPCG_CLK_4>;
+};
+
+&lvds0_subsys {
+ interrupt-parent = <&irqsteer_lvds0>;
+
+ irqsteer_lvds0: interrupt-controller@56240000 {
+ compatible = "fsl,imx8qm-irqsteer", "fsl,imx-irqsteer";
+ reg = <0x56240000 0x1000>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ #interrupt-cells = <1>;
+ clocks = <&qm_lvds0_lis_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "ipg";
+ power-domains = <&pd IMX_SC_R_LVDS_0>;
+
+ fsl,channel = <0>;
+ fsl,num-irqs = <32>;
+ };
+
+ lvds0_i2c1_lpcg: clock-controller@56243014 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x56243014 0x4>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_LVDS_0_I2C_0 IMX_SC_PM_CLK_PER>,
+ <&lvds_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "lvds0_i2c1_lpcg_clk",
+ "lvds0_i2c1_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_LVDS_0_I2C_0>;
+ };
+
+ i2c1_lvds0: i2c@56247000 {
+ compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x56247000 0x1000>;
+ interrupts = <9>;
+ clocks = <&lvds0_i2c1_lpcg IMX_LPCG_CLK_0>,
+ <&lvds0_i2c1_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_LVDS_0_I2C_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_LVDS_0_I2C_0>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-mipi.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-mipi.dtsi
new file mode 100644
index 000000000000..f4c393fe7204
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-mipi.dtsi
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2024 NXP
+ */
+
+&mipi0_lis_lpcg {
+ clocks = <&dsi_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>;
+ clock-output-names = "mipi0_lis_lpcg_ipg_clk";
+};
+
+&mipi0_pwm_lpcg {
+ clocks = <&clk IMX_SC_R_MIPI_0_PWM_0 IMX_SC_PM_CLK_PER>,
+ <&dsi_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "mipi0_pwm_lpcg_clk",
+ "mipi0_pwm_lpcg_ipg_clk";
+}; \ No newline at end of file
diff --git a/arch/arm64/boot/dts/freescale/imx8qm.dtsi b/arch/arm64/boot/dts/freescale/imx8qm.dtsi
index 61986e0639e5..3ee6e2869e3c 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm.dtsi
@@ -560,11 +560,36 @@
clock-output-names = "spdif1_rx";
};
+ lvds_ipg_clk: clock-controller-lvds-ipg {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "lvds0_ipg_clk";
+ };
+
+ dsi_ipg_clk: clock-controller-dsi-ipg {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <120000000>;
+ clock-output-names = "dsi_ipg_clk";
+ };
+
+ mipi_pll_div2_clk: clock-controller-mipi-div2-pll {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <432000000>;
+ clock-output-names = "mipi_pll_div2_clk";
+ };
+
/* sorted in register address */
#include "imx8-ss-cm41.dtsi"
#include "imx8-ss-audio.dtsi"
#include "imx8-ss-vpu.dtsi"
#include "imx8-ss-gpu0.dtsi"
+ #include "imx8-ss-mipi0.dtsi"
+ #include "imx8-ss-lvds0.dtsi"
+ #include "imx8-ss-mipi1.dtsi"
+ #include "imx8-ss-lvds1.dtsi"
#include "imx8-ss-img.dtsi"
#include "imx8-ss-dma.dtsi"
#include "imx8-ss-conn.dtsi"
@@ -576,3 +601,5 @@
#include "imx8qm-ss-conn.dtsi"
#include "imx8qm-ss-lsio.dtsi"
#include "imx8qm-ss-audio.dtsi"
+#include "imx8qm-ss-lvds.dtsi"
+#include "imx8qm-ss-mipi.dtsi"
diff --git a/arch/arm64/boot/dts/freescale/imx8x-colibri-aster.dtsi b/arch/arm64/boot/dts/freescale/imx8x-colibri-aster.dtsi
index bc659066e19a..f7bbb2153ae0 100644
--- a/arch/arm64/boot/dts/freescale/imx8x-colibri-aster.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8x-colibri-aster.dtsi
@@ -3,10 +3,24 @@
* Copyright 2018-2021 Toradex
*/
+/* Colibri Analogue Inputs */
+&adc0 {
+ status = "okay";
+};
+
+/* Colibri PWM_A */
+&adma_pwm {
+ status = "okay";
+};
+
&colibri_gpio_keys {
status = "okay";
};
+&extcon_usbc_det {
+ status = "okay";
+};
+
/* Colibri Ethernet */
&fec1 {
status = "okay";
@@ -38,6 +52,28 @@
status = "okay";
};
+/* USB PHY for usbotg3 */
+&usb3_phy {
+ status = "okay";
+};
+
+&usbotg1 {
+ status = "okay";
+};
+
+&usbotg3 {
+ status = "okay";
+};
+
+&usbotg3_cdns3 {
+ status = "okay";
+};
+
+/* USB PHY for usbotg1 */
+&usbphy1 {
+ status = "okay";
+};
+
/* Colibri SDCard */
&usdhc2 {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi b/arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi
index 9af769ab8ceb..f75499765d85 100644
--- a/arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi
@@ -19,10 +19,24 @@
};
};
+/* Colibri Analogue Inputs */
+&adc0 {
+ status = "okay";
+};
+
+/* Colibri PWM_A */
+&adma_pwm {
+ status = "okay";
+};
+
&colibri_gpio_keys {
status = "okay";
};
+&extcon_usbc_det {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
@@ -90,6 +104,28 @@
status = "okay";
};
+/* USB PHY for usbotg3 */
+&usb3_phy {
+ status = "okay";
+};
+
+&usbotg1 {
+ status = "okay";
+};
+
+&usbotg3 {
+ status = "okay";
+};
+
+&usbotg3_cdns3 {
+ status = "okay";
+};
+
+/* USB PHY for usbotg1 */
+&usbphy1 {
+ status = "okay";
+};
+
/* Colibri SD/MMC Card */
&usdhc2 {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi b/arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi
index 8d06925a8ebd..54393a0c5cbf 100644
--- a/arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi
@@ -17,10 +17,24 @@
};
};
+/* Colibri Analogue Inputs */
+&adc0 {
+ status = "okay";
+};
+
+/* Colibri PWM_A */
+&adma_pwm {
+ status = "okay";
+};
+
&colibri_gpio_keys {
status = "okay";
};
+&extcon_usbc_det {
+ status = "okay";
+};
+
/* Colibri FastEthernet */
&fec1 {
status = "okay";
@@ -108,6 +122,28 @@
status = "okay";
};
+/* USB PHY for usbotg3 */
+&usb3_phy {
+ status = "okay";
+};
+
+&usbotg1 {
+ status = "okay";
+};
+
+&usbotg3 {
+ status = "okay";
+};
+
+&usbotg3_cdns3 {
+ status = "okay";
+};
+
+/* USB PHY for usbotg1 */
+&usbphy1 {
+ status = "okay";
+};
+
/* Colibri SD/MMC Card */
&usdhc2 {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi b/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi
index 49d105eb4769..edba5b582414 100644
--- a/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi
@@ -23,17 +23,76 @@
};
};
+ extcon_usbc_det: usbc-det {
+ compatible = "linux,extcon-usb-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbc_det>;
+ id-gpios = <&lsio_gpio5 9 GPIO_ACTIVE_HIGH>;
+ status = "disabled";
+ };
+
reg_module_3v3: regulator-module-3v3 {
compatible = "regulator-fixed";
regulator-name = "+V3.3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
+
+ reg_module_3v3_avdd: regulator-module-3v3-avdd {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "+V3.3_AVDD_AUDIO";
+ };
+
+ reg_module_vref_1v8: regulator-module-vref-1v8 {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "vref-1v8";
+ };
+
+ reg_usbh_vbus: regulator-usbh-vbus {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbh1_reg>;
+ gpio = <&lsio_gpio4 3 GPIO_ACTIVE_LOW>;
+ regulator-always-on;
+ regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <5000000>;
+ regulator-name = "usbh_vbus";
+ };
+
+ sound-card {
+ compatible = "simple-audio-card";
+ simple-audio-card,bitclock-master = <&dailink_master>;
+ simple-audio-card,format = "i2s";
+ simple-audio-card,frame-master = <&dailink_master>;
+ simple-audio-card,name = "colibri-imx8x";
+
+ dailink_master: simple-audio-card,codec {
+ clocks = <&mclkout0_lpcg IMX_LPCG_CLK_0>;
+ sound-dai = <&sgtl5000_a>;
+ };
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai0>;
+ };
+ };
};
-/* TODO Analogue Inputs */
+/* Colibri Analogue Inputs */
+&adc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc0>;
+ vref-supply = <&reg_module_vref_1v8>;
+};
-/* TODO Cooling maps for DX */
+/* Colibri PWM_A */
+&adma_pwm {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm_a>;
+};
&cpu_alert0 {
hysteresis = <2000>;
@@ -47,9 +106,20 @@
type = "critical";
};
-/* TODO flexcan1 - 3 */
-
-/* TODO GPU */
+&enet0_lpcg {
+ clocks = <&clk IMX_SC_R_ENET_0 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_ENET_0 IMX_SC_PM_CLK_PER>,
+ <&conn_axi_clk>,
+ <&clk IMX_SC_R_ENET_0 IMX_SC_C_DISABLE_50>,
+ <&conn_ipg_clk>,
+ <&conn_ipg_clk>;
+ clock-output-names = "enet0_lpcg_timer_clk",
+ "enet0_lpcg_txc_sampling_clk",
+ "enet0_lpcg_ahb_clk",
+ "enet0_lpcg_ref_50mhz_clk",
+ "enet0_lpcg_ipg_clk",
+ "enet0_lpcg_ipg_s_clk";
+};
/* On-module I2C */
&i2c0 {
@@ -60,6 +130,41 @@
pinctrl-0 = <&pinctrl_i2c0>, <&pinctrl_sgtl5000_usb_clk>;
status = "okay";
+ /* USB HUB USB3803 */
+ usb-hub@8 {
+ compatible = "smsc,usb3803";
+ reg = <0x8>;
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&mclkout0_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>, <49152000>, <12000000>, <12000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb3503a>;
+ bypass-gpios = <&gpio_expander_43 5 GPIO_ACTIVE_LOW>;
+ clocks = <&mclkout0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "refclk";
+ disabled-ports = <2>;
+ initial-mode = <1>;
+ intn-gpios = <&lsio_gpio3 4 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&gpio_expander_43 4 GPIO_ACTIVE_LOW>;
+ };
+
+ sgtl5000_a: audio-codec@a {
+ compatible = "fsl,sgtl5000";
+ reg = <0xa>;
+ #sound-dai-cells = <0>;
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&mclkout0_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>, <49152000>, <12000000>, <12000000>;
+ clocks = <&mclkout0_lpcg IMX_LPCG_CLK_0>;
+ VDDA-supply = <&reg_module_3v3_avdd>;
+ VDDD-supply = <&reg_module_vref_1v8>;
+ VDDIO-supply = <&reg_module_3v3>;
+ };
+
/* Touch controller */
touchscreen@2c {
compatible = "adi,ad7879-1";
@@ -77,6 +182,21 @@
adi,conversion-interval = /bits/ 8 <255>;
status = "disabled";
};
+
+ gpio_expander_43: gpio@43 {
+ compatible = "fcs,fxl6408";
+ reg = <0x43>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "Wi-Fi_W_DISABLE",
+ "Wi-Fi_WKUP_WLAN",
+ "PWR_EN_+V3.3_WiFi_N",
+ "PCIe_REF_CLK_EN",
+ "USB_RESET_N",
+ "USB_BYPASS_N",
+ "Wi-Fi_PDn",
+ "Wi-Fi_WKUP_BT";
+ };
};
/* TODO i2c lvds0 accessible on FFC (X2) */
@@ -321,13 +441,74 @@
pinctrl-names = "default";
};
+/* VPU Mailboxes */
+&mu_m0 {
+ status="okay";
+};
+
+&mu1_m0 {
+ status="okay";
+};
+
/* TODO MIPI CSI */
/* TODO MIPI DSI with DSI-to-HDMI bridge lt8912 */
/* TODO on-module PCIe for Wi-Fi */
-/* TODO On-module i2s / Audio */
+/* On-module I2S */
+&sai0 {
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai0>;
+ status = "okay";
+};
+
+&thermal_zones {
+ pmic-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <2000>;
+ thermal-sensors = <&tsens IMX_SC_R_PMIC_0>;
+
+ trips {
+ pmic_alert0: trip0 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ pmic_crit0: trip1 {
+ temperature = <125000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ pmic_cooling_map0: map0 {
+ trip = <&pmic_alert0>;
+ cooling-device = <&A35_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+};
+
+&usbotg1 {
+ adp-disable;
+ disable-over-current;
+ extcon = <&extcon_usbc_det &extcon_usbc_det>;
+ hnp-disable;
+ power-active-high;
+ srp-disable;
+ vbus-supply = <&reg_usbh_vbus>;
+};
+
+&usbotg3_cdns3 {
+ dr_mode = "host";
+};
/* On-module eMMC */
&usdhc1 {
@@ -356,11 +537,24 @@
no-1-8-v;
};
-/* TODO USB Client/Host */
+&vpu {
+ compatible = "nxp,imx8qxp-vpu";
+ status = "okay";
+};
-/* TODO USB Host */
+/* VPU Decoder */
+&vpu_core0 {
+ reg = <0x2d040000 0x10000>;
+ memory-region = <&decoder_boot>, <&decoder_rpc>;
+ status = "okay";
+};
-/* TODO VPU Encoder/Decoder */
+/* VPU Encoder */
+&vpu_core1 {
+ reg = <0x2d050000 0x10000>;
+ memory-region = <&encoder_boot>, <&encoder_rpc>;
+ status = "okay";
+};
&iomuxc {
/* On-module touch pen-down interrupt */
diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
index a15987f49e8d..8d036b3962e9 100644
--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
@@ -62,6 +62,15 @@
};
+ reg_vdd_12v: regulator-vdd-12v {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_12V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ gpio = <&pcal6524 14 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
reg_vref_1v8: regulator-adc-vref {
compatible = "regulator-fixed";
regulator-name = "vref_1v8";
@@ -80,6 +89,68 @@
off-on-delay-us = <12000>;
enable-active-high;
};
+
+ backlight_lvds: backlight-lvds {
+ compatible = "pwm-backlight";
+ pwms = <&adp5585 0 100000 0>;
+ brightness-levels = <0 100>;
+ num-interpolated-steps = <100>;
+ default-brightness-level = <100>;
+ power-supply = <&reg_vdd_12v>;
+ enable-gpios = <&adp5585 9 GPIO_ACTIVE_HIGH>;
+ status = "disabled";
+ };
+
+ bt_sco_codec: bt-sco-codec {
+ compatible = "linux,bt-sco";
+ #sound-dai-cells = <1>;
+ };
+
+ sound-bt-sco {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "bt-sco-audio";
+ simple-audio-card,format = "dsp_a";
+ simple-audio-card,bitclock-inversion;
+ simple-audio-card,frame-master = <&btcpu>;
+ simple-audio-card,bitclock-master = <&btcpu>;
+
+ btcpu: simple-audio-card,cpu {
+ sound-dai = <&sai1>;
+ dai-tdm-slot-num = <2>;
+ dai-tdm-slot-width = <16>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&bt_sco_codec 1>;
+ };
+ };
+
+ sound-micfil {
+ compatible = "fsl,imx-audio-card";
+ model = "micfil-audio";
+
+ pri-dai-link {
+ link-name = "micfil hifi";
+ format = "i2s";
+
+ cpu {
+ sound-dai = <&micfil>;
+ };
+ };
+ };
+
+ sound-xcvr {
+ compatible = "fsl,imx-audio-card";
+ model = "imx-audio-xcvr";
+
+ pri-dai-link {
+ link-name = "XCVR PCM";
+
+ cpu {
+ sound-dai = <&xcvr>;
+ };
+ };
+ };
};
&adc1 {
@@ -145,9 +216,19 @@
};
};
+&lpi2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c1>;
+ status = "okay";
+
+ inertial-meter@6a {
+ compatible = "st,lsm6dso";
+ reg = <0x6a>;
+ };
+};
+
&lpi2c2 {
- #address-cells = <1>;
- #size-cells = <0>;
clock-frequency = <400000>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&pinctrl_lpi2c2>;
@@ -241,11 +322,19 @@
};
};
};
+
+ adp5585: io-expander@34 {
+ compatible = "adi,adp5585-00", "adi,adp5585";
+ reg = <0x34>;
+ vdd-supply = <&buck4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-reserved-ranges = <5 1>;
+ #pwm-cells = <3>;
+ };
};
&lpi2c3 {
- #address-cells = <1>;
- #size-cells = <0>;
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lpi2c3>;
@@ -337,6 +426,16 @@
status = "okay";
};
+&micfil {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pinctrl_pdm>;
+ pinctrl-1 = <&pinctrl_pdm_sleep>;
+ assigned-clocks = <&clk IMX93_CLK_PDM>;
+ assigned-clock-parents = <&clk IMX93_CLK_AUDIO_PLL>;
+ assigned-clock-rates = <49152000>;
+ status = "okay";
+};
+
&mu1 {
status = "okay";
};
@@ -345,6 +444,17 @@
status = "okay";
};
+&sai1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pinctrl_sai1>;
+ pinctrl-1 = <&pinctrl_sai1_sleep>;
+ assigned-clocks = <&clk IMX93_CLK_SAI1>;
+ assigned-clock-parents = <&clk IMX93_CLK_AUDIO_PLL>;
+ assigned-clock-rates = <12288000>;
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
&usbotg1 {
dr_mode = "otg";
hnp-disable;
@@ -408,6 +518,18 @@
status = "okay";
};
+&xcvr {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pinctrl_spdif>;
+ pinctrl-1 = <&pinctrl_spdif_sleep>;
+ assigned-clocks = <&clk IMX93_CLK_SPDIF>,
+ <&clk IMX93_CLK_AUDIO_XCVR>;
+ assigned-clock-parents = <&clk IMX93_CLK_AUDIO_PLL>,
+ <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
+ assigned-clock-rates = <12288000>, <200000000>;
+ status = "okay";
+};
+
&iomuxc {
pinctrl_eqos: eqosgrp {
fsl,pins = <
@@ -508,6 +630,13 @@
>;
};
+ pinctrl_lpi2c1: lpi2c1grp {
+ fsl,pins = <
+ MX93_PAD_I2C1_SCL__LPI2C1_SCL 0x40000b9e
+ MX93_PAD_I2C1_SDA__LPI2C1_SDA 0x40000b9e
+ >;
+ };
+
pinctrl_lpi2c2: lpi2c2grp {
fsl,pins = <
MX93_PAD_I2C2_SCL__LPI2C2_SCL 0x40000b9e
@@ -528,6 +657,40 @@
>;
};
+ pinctrl_pdm: pdmgrp {
+ fsl,pins = <
+ MX93_PAD_PDM_CLK__PDM_CLK 0x31e
+ MX93_PAD_PDM_BIT_STREAM0__PDM_BIT_STREAM00 0x31e
+ MX93_PAD_PDM_BIT_STREAM1__PDM_BIT_STREAM01 0x31e
+ >;
+ };
+
+ pinctrl_pdm_sleep: pdmsleepgrp {
+ fsl,pins = <
+ MX93_PAD_PDM_CLK__GPIO1_IO08 0x31e
+ MX93_PAD_PDM_BIT_STREAM0__GPIO1_IO09 0x31e
+ MX93_PAD_PDM_BIT_STREAM1__GPIO1_IO10 0x31e
+ >;
+ };
+
+ pinctrl_sai1: sai1grp {
+ fsl,pins = <
+ MX93_PAD_SAI1_TXC__SAI1_TX_BCLK 0x31e
+ MX93_PAD_SAI1_TXFS__SAI1_TX_SYNC 0x31e
+ MX93_PAD_SAI1_TXD0__SAI1_TX_DATA00 0x31e
+ MX93_PAD_SAI1_RXD0__SAI1_RX_DATA00 0x31e
+ >;
+ };
+
+ pinctrl_sai1_sleep: sai1sleepgrp {
+ fsl,pins = <
+ MX93_PAD_SAI1_TXC__GPIO1_IO12 0x51e
+ MX93_PAD_SAI1_TXFS__GPIO1_IO11 0x51e
+ MX93_PAD_SAI1_TXD0__GPIO1_IO13 0x51e
+ MX93_PAD_SAI1_RXD0__GPIO1_IO14 0x51e
+ >;
+ };
+
/* need to config the SION for data and cmd pad, refer to ERR052021 */
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
@@ -585,6 +748,20 @@
>;
};
+ pinctrl_spdif: spdifgrp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO22__SPDIF_IN 0x31e
+ MX93_PAD_GPIO_IO23__SPDIF_OUT 0x31e
+ >;
+ };
+
+ pinctrl_spdif_sleep: spdifsleepgrp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO22__GPIO2_IO22 0x31e
+ MX93_PAD_GPIO_IO23__GPIO2_IO23 0x31e
+ >;
+ };
+
pinctrl_usdhc2_gpio: usdhc2gpiogrp {
fsl,pins = <
MX93_PAD_SD2_CD_B__GPIO3_IO00 0x31e
diff --git a/arch/arm64/boot/dts/freescale/imx93-14x14-evk.dts b/arch/arm64/boot/dts/freescale/imx93-14x14-evk.dts
new file mode 100644
index 000000000000..236a44c1782a
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx93-14x14-evk.dts
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024 NXP
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/usb/pd.h>
+#include "imx93.dtsi"
+
+/ {
+ model = "NXP i.MX93 14X14 EVK board";
+ compatible = "fsl,imx93-14x14-evk", "fsl,imx93";
+
+ chosen {
+ stdout-path = &lpuart1;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ alloc-ranges = <0 0x80000000 0 0x40000000>;
+ size = <0 0x10000000>;
+ linux,cma-default;
+ };
+
+ vdev0vring0: vdev0vring0@a4000000 {
+ reg = <0 0xa4000000 0 0x8000>;
+ no-map;
+ };
+
+ vdev0vring1: vdev0vring1@a4008000 {
+ reg = <0 0xa4008000 0 0x8000>;
+ no-map;
+ };
+
+ vdev1vring0: vdev1vring0@a4010000 {
+ reg = <0 0xa4010000 0 0x8000>;
+ no-map;
+ };
+
+ vdev1vring1: vdev1vring1@a4018000 {
+ reg = <0 0xa4018000 0 0x8000>;
+ no-map;
+ };
+
+ rsc_table: rsc-table@2021e000 {
+ reg = <0 0x2021e000 0 0x1000>;
+ no-map;
+ };
+
+ vdevbuffer: vdevbuffer@a4020000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0xa4020000 0 0x100000>;
+ no-map;
+ };
+ };
+
+ reg_can1_stby: regulator-can1-stby {
+ compatible = "regulator-fixed";
+ regulator-name = "can1-stby";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pcal6524_2 10 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_can1_en>;
+ };
+
+ reg_can1_en: regulator-can1-en {
+ compatible = "regulator-fixed";
+ regulator-name = "can1-en";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pcal6524_2 12 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_can2_stby: regulator-can2-stby {
+ compatible = "regulator-fixed";
+ regulator-name = "can2-stby";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pcal6524_2 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_can2_en>;
+ };
+
+ reg_can2_en: regulator-can2-en {
+ compatible = "regulator-fixed";
+ regulator-name = "can2-en";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pcal6524_2 13 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
+ regulator-name = "VSD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ off-on-delay-us = <12000>;
+ };
+
+ reg_vdd_12v: regulator-vdd-12v {
+ compatible = "regulator-fixed";
+ regulator-name = "reg_vdd_12v";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ gpio = <&pcal6524 14 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_vref_1v8: regulator-adc-vref {
+ compatible = "regulator-fixed";
+ regulator-name = "vref_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+};
+
+&adc1 {
+ vref-supply = <&reg_vref_1v8>;
+ status = "okay";
+};
+
+&cm33 {
+ mbox-names = "tx", "rx", "rxdb";
+ mboxes = <&mu1 0 1>,
+ <&mu1 1 1>,
+ <&mu1 3 1>;
+ memory-region = <&vdevbuffer>, <&vdev0vring0>, <&vdev0vring1>,
+ <&vdev1vring0>, <&vdev1vring1>, <&rsc_table>;
+ status = "okay";
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy2>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <5000000>;
+
+ ethphy2: ethernet-phy@2 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <2>;
+ eee-broken-1000t;
+ reset-gpios = <&pcal6524 16 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <80000>;
+ realtek,clkout-disable;
+ };
+ };
+};
+
+&flexcan1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ xceiver-supply = <&reg_can1_stby>;
+ status = "okay";
+};
+
+&flexcan2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan2>;
+ xceiver-supply = <&reg_can2_stby>;
+ status = "okay";
+};
+
+&lpi2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c1>;
+ status = "okay";
+
+ lsm6dsm@6a {
+ compatible = "st,lsm6dso";
+ reg = <0x6a>;
+ };
+};
+
+&lpi2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c2>;
+ status = "okay";
+
+ pcal6524_2: gpio@20 {
+ compatible = "nxp,pcal6524";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ pcal6524: gpio@22 {
+ compatible = "nxp,pcal6524";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcal6524>;
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&lpi2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c3>;
+ status = "okay";
+};
+
+&lpuart1 { /* console */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&mu1 {
+ status = "okay";
+};
+
+&mu2 {
+ status = "okay";
+};
+
+&usbotg1 {
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ disable-over-current;
+ samsung,picophy-pre-emp-curr-control = <3>;
+ samsung,picophy-dc-vol-level-adjust = <7>;
+ status = "okay";
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ disable-over-current;
+ samsung,picophy-pre-emp-curr-control = <3>;
+ samsung,picophy-dc-vol-level-adjust = <7>;
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio3 00 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ bus-width = <4>;
+ no-mmc;
+ status = "okay";
+};
+
+&wdog3 {
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = <
+ MX93_PAD_PDM_CLK__CAN1_TX 0x139e
+ MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x139e
+ >;
+ };
+
+ pinctrl_flexcan2: flexcan2grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO25__CAN2_TX 0x139e
+ MX93_PAD_GPIO_IO27__CAN2_RX 0x139e
+ >;
+ };
+
+ pinctrl_lpi2c1: lpi2c1grp {
+ fsl,pins = <
+ MX93_PAD_I2C1_SCL__LPI2C1_SCL 0x40000b9e
+ MX93_PAD_I2C1_SDA__LPI2C1_SDA 0x40000b9e
+ >;
+ };
+
+ pinctrl_lpi2c2: lpi2c2grp {
+ fsl,pins = <
+ MX93_PAD_I2C2_SCL__LPI2C2_SCL 0x40000b9e
+ MX93_PAD_I2C2_SDA__LPI2C2_SDA 0x40000b9e
+ >;
+ };
+
+ pinctrl_lpi2c3: lpi2c3grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x40000b9e
+ MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x40000b9e
+ >;
+ };
+
+ pinctrl_pcal6524: pcal6524grp {
+ fsl,pins = <
+ MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x31e
+ >;
+ };
+
+ pinctrl_fec: fecgrp {
+ fsl,pins = <
+ MX93_PAD_ENET2_MDC__ENET1_MDC 0x57e
+ MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x57e
+ MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x57e
+ MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x57e
+ MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x57e
+ MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x57e
+ MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x58e
+ MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x57e
+ MX93_PAD_ENET2_TD0__ENET1_RGMII_TD0 0x57e
+ MX93_PAD_ENET2_TD1__ENET1_RGMII_TD1 0x57e
+ MX93_PAD_ENET2_TD2__ENET1_RGMII_TD2 0x57e
+ MX93_PAD_ENET2_TD3__ENET1_RGMII_TD3 0x57e
+ MX93_PAD_ENET2_TXC__ENET1_RGMII_TXC 0x58e
+ MX93_PAD_ENET2_TX_CTL__ENET1_RGMII_TX_CTL 0x57e
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX93_PAD_UART1_RXD__LPUART1_RX 0x31e
+ MX93_PAD_UART1_TXD__LPUART1_TX 0x31e
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ MX93_PAD_DAP_TDO_TRACESWO__LPUART5_TX 0x31e
+ MX93_PAD_DAP_TDI__LPUART5_RX 0x31e
+ MX93_PAD_DAP_TMS_SWDIO__LPUART5_RTS_B 0x31e
+ MX93_PAD_DAP_TCLK_SWCLK__LPUART5_CTS_B 0x31e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x1582
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x40001382
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x40001382
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x40001382
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x40001382
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x40001382
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x40001382
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x40001382
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x40001382
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x40001382
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x1582
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x158e
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x4000138e
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x4000138e
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x4000138e
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x4000138e
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x4000138e
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x4000138e
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x4000138e
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x4000138e
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x4000138e
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x158e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x15fe
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x400013fe
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x400013fe
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x400013fe
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x400013fe
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x400013fe
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x400013fe
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x400013fe
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x400013fe
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x400013fe
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe
+ >;
+ };
+
+ pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_RESET_B__GPIO3_IO07 0x31e
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CD_B__GPIO3_IO00 0x31e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x1582
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x40001382
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x40001382
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x40001382
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x40001382
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x40001382
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x158e
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000138e
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000138e
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000138e
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000138e
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x15fe
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x400013fe
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x400013fe
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x400013fe
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x400013fe
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x400013fe
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts b/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts
index 950dece83c24..f8a73612fa05 100644
--- a/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts
@@ -178,8 +178,6 @@
};
&lpi2c2 {
- #address-cells = <1>;
- #size-cells = <0>;
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lpi2c2>;
diff --git a/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts b/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts
new file mode 100644
index 000000000000..89e97c604bd3
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Copyright (C) 2024 Kontron Electronics GmbH
+ */
+
+/dts-v1/;
+
+#include "imx93-kontron-osm-s.dtsi"
+
+/ {
+ model = "Kontron BL i.MX93 OSM-S";
+ compatible = "kontron,imx93-bl-osm-s", "kontron,imx93-osm-s", "fsl,imx93";
+
+ aliases {
+ ethernet0 = &fec;
+ ethernet1 = &eqos;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led1 {
+ label = "led1";
+ gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ pwm-beeper {
+ compatible = "pwm-beeper";
+ pwms = <&tpm6 1 5000 0>;
+ };
+
+ reg_vcc_panel: regulator-vcc-panel {
+ compatible = "regulator-fixed";
+ gpio = <&gpio4 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "VCC_PANEL";
+ };
+};
+
+&eqos { /* Second ethernet (OSM-S ETH_B) */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos_rgmii>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy1>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-id4f51.e91b";
+ reg = <1>;
+ reset-assert-us = <10000>;
+ reset-gpios = <&gpio1 10 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&fec { /* First ethernet (OSM-S ETH_A) */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_rgmii>;
+ phy-connection-type = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@1 {
+ compatible = "ethernet-phy-id4f51.e91b";
+ reg = <1>;
+ reset-assert-us = <10000>;
+ reset-gpios = <&gpio2 18 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&flexcan1 {
+ status = "okay";
+};
+
+&lpi2c2 {
+ status = "okay";
+
+ gpio_expander_dio: gpio@20 {
+ compatible = "ti,tca6408";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "DIO1_OUT","DIO1_IN", "DIO2_OUT","DIO2_IN",
+ "DIO3_OUT","DIO3_IN", "DIO4_OUT","DIO4_IN";
+ interrupt-parent = <&gpio4>;
+ interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&lpspi8 {
+ assigned-clocks = <&clk IMX93_CLK_LPSPI8>;
+ assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD0_DIV2>;
+ assigned-clock-rates = <100000000>;
+ status = "okay";
+
+ eeram@0 {
+ compatible = "microchip,48l640";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ };
+};
+
+&lpuart1 {
+ status = "okay";
+};
+
+&lpuart7 {
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&lpuart6 {
+ linux,rs485-enabled-at-boot-time;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&tpm6 {
+ status = "okay";
+};
+
+&usbotg1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ disable-over-current;
+ dr_mode = "host";
+ status = "okay";
+
+ usb1@1 {
+ compatible = "usb424,2514";
+ reg = <1>;
+ };
+};
+
+&usbotg2 {
+ adp-disable;
+ hnp-disable;
+ srp-disable;
+ disable-over-current;
+ dr_mode = "otg";
+ usb-role-switch;
+ status = "okay";
+};
+
+&usdhc2 {
+ vmmc-supply = <&reg_vdd_3v3>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx93-kontron-osm-s.dtsi b/arch/arm64/boot/dts/freescale/imx93-kontron-osm-s.dtsi
new file mode 100644
index 000000000000..47c1363a2f99
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx93-kontron-osm-s.dtsi
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Copyright (C) 2024 Kontron Electronics GmbH
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "imx93.dtsi"
+
+/ {
+ model = "Kontron OSM-S i.MX93";
+ compatible = "kontron,imx93-osm-s", "fsl,imx93";
+
+ aliases {
+ rtc0 = &rv3028;
+ rtc1 = &bbnsm_rtc;
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0 0x80000000>;
+ };
+
+ chosen {
+ stdout-path = &lpuart1;
+ };
+
+ reg_usdhc2_vcc: regulator-usdhc2-vcc {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usdhc2_vcc>;
+ gpio = <&gpio3 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "VCC_SDIO_A";
+ };
+
+ reg_vdd_carrier: regulator-vdd-carrier {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_vdd_carrier>;
+ gpio = <&gpio4 29 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "VDD_CARRIER";
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+
+ regulator-state-disk {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&flexcan1 { /* OSM-S CAN_A */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+};
+
+&flexcan2 { /* OSM-S CAN_B */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan2>;
+};
+
+&gpio1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio1>;
+ gpio-line-names = "", "", "I2C_A_SCL", "I2C_A_SDA",
+ "UART_CON_RX", "UART_CON_TX", "UART_C_RX", "UART_C_TX",
+ "CAN_A_TX", "CAN_A_RX", "GPIO_A_0", "SPI_A_CS0",
+ "SPI_A_SDI", "SPI_A_SCK","SPI_A_SDO";
+};
+
+&gpio2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio2>;
+ gpio-line-names = "I2C_B_SDA", "I2C_B_SCL", "GPIO_B_1", "GPIO_A_2",
+ "UART_B_TX", "UART_B_RX", "UART_B_RTS", "UART_B_CTS",
+ "UART_A_TX", "UART_A_RX", "UART_A_RTS", "UART_A_CTS",
+ "SPI_B_CS0", "SPI_B_SDI", "SPI_B_SDO", "SPI_B_SCK",
+ "I2S_BITCLK", "I2S_MCLK", "GPIO_A_1", "I2S_A_DATA_OUT",
+ "I2S_A_DATA_IN", "PWM_2", "GPIO_A_3", "PWM_1",
+ "PWM_0", "CAN_B_TX", "I2S_LRCLK", "CAN_B_RX", "GPIO_A_4",
+ "GPIO_A_5";
+};
+
+&gpio3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio3>;
+ gpio-line-names = "SDIO_A_CD", "SDIO_A_CLK", "SDIO_A_CMD", "SDIO_A_D0",
+ "SDIO_A_D1", "SDIO_A_D2", "SDIO_A_D3", "SDIO_A_PWR_EN",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "SDIO_B_CLK", "SDIO_B_CMD", "SDIO_B_D0", "SDIO_B_D1",
+ "SDIO_B_D2", "SDIO_B_D3", "GPIO_A_6", "GPIO_A_7";
+};
+
+&gpio4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio4>;
+ gpio-line-names = "ETH_B_MDC", "ETH_B_MDIO", "ETH_B_TXD4", "ETH_B_TXD3",
+ "ETH_B_TXD2", "ETH_B_TXD1", "ETH_B_TX_EN", "ETH_B_TX_CLK",
+ "ETH_B_RX_CTL", "ETH_B_RX_CLK", "ETH_B_RXD0", "ETH_B_RXD1",
+ "ETH_B_RXD2", "ETH_B_RXD3", "ETH_MDC", "ETH_MDIO",
+ "ETH_A_TXD3", "ETH_A_TXD2", "ETH_A_TXD1", "ETH_A_TXD0",
+ "ETH_A_TX_EN", "ETH_A_TX_CLK", "ETH_A_RX_CTL", "ETH_A_RX_CLK",
+ "ETH_A_RXD0", "ETH_A_RXD1", "ETH_A_RXD2", "ETH_A_RXD3",
+ "GPIO_B_0", "CARRIER_PWR_EN";
+};
+
+&lpi2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c1>;
+ status = "okay";
+
+ pca9451: pmic@25 {
+ compatible = "nxp,pca9451a";
+ reg = <0x25>;
+ nxp,i2c-lt-enable;
+
+ regulators {
+ reg_vdd_soc: BUCK1 { /* dual phase with BUCK3 */
+ regulator-name = "+0V8_VDD_SOC (BUCK1)";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <950000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ reg_vddq_ddr: BUCK2 {
+ regulator-name = "+0V6_VDDQ_DDR (BUCK2)";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <600000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ reg_vdd_3v3: BUCK4 {
+ regulator-name = "+3V3 (BUCK4)";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_vdd_1v8: BUCK5 {
+ regulator-name = "+1V8 (BUCK5)";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_nvcc_dram: BUCK6 {
+ regulator-name = "+1V1_NVCC_DRAM (BUCK6)";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_nvcc_snvs: LDO1 {
+ regulator-name = "+1V8_NVCC_SNVS (LDO1)";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_vdd_ana: LDO4 {
+ regulator-name = "+0V8_VDD_ANA (LDO4)";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_nvcc_sd: LDO5 {
+ regulator-name = "NVCC_SD (LDO5)";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ };
+ };
+
+ eeprom@50 {
+ compatible = "onnn,n24s64b", "atmel,24c64";
+ reg = <0x50>;
+ pagesize = <32>;
+ size = <8192>;
+ num-addresses = <1>;
+ };
+
+ rv3028: rtc@52 {
+ compatible = "microcrystal,rv3028";
+ reg = <0x52>;
+ };
+};
+
+&lpi2c2 { /* OSM-S I2C_A */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c2>;
+};
+
+&lpi2c3 { /* OSM-S I2C_B */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c3>;
+};
+
+&lpspi1 { /* OSM-S SPI_A */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpspi1>;
+ cs-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
+};
+
+&lpspi8 { /* OSM-S SPI_B */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpspi8>;
+ cs-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+};
+
+&lpuart1 { /* OSM-S UART_CON */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart1>;
+};
+
+&lpuart2 { /* OSM-S UART_C */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart2>;
+};
+
+&lpuart6 { /* OSM-S UART_B */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart6>;
+};
+
+&lpuart7 { /* OSM-S UART_A */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart7>;
+};
+
+&tpm3 { /* OSM-S PWM_0 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpm3>;
+};
+
+&tpm4 { /* OSM-S PWM_2 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpm4>;
+};
+
+&tpm6 { /* OSM-S PWM_1 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpm6>;
+};
+
+&usdhc1 { /* eMMC */
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ vmmc-supply = <&reg_vdd_3v3>;
+ vqmmc-supply = <&reg_vdd_1v8>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&usdhc2 { /* OSM-S SDIO_A */
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ vmmc-supply = <&reg_usdhc2_vcc>;
+ cd-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
+};
+
+&usdhc3 { /* OSM-S SDIO_B */
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ vqmmc-supply = <&reg_vdd_1v8>;
+};
+
+&wdog3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_enet_rgmii: enetrgmiigrp {
+ fsl,pins = <
+ MX93_PAD_ENET2_MDC__ENET1_MDC 0x57e /* ETH_MDC */
+ MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x57e /* ETH_MDIO */
+ MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x57e /* ETH_A_(S)(R)(G)MII_RXD0 */
+ MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x57e /* ETH_A_(S)(R)(G)MII_RXD1 */
+ MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x57e /* ETH_A_(R)(G)MII_RXD2 */
+ MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x57e /* ETH_A_(R)(G)MII_RXD3 */
+ MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x5fe /* ETH_A_(R)(G)MII_RX_CLK */
+ MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x57e /* ETH_A_(R)(G)MII_RX_DV(_ER) */
+ MX93_PAD_ENET2_TD0__ENET1_RGMII_TD0 0x57e /* ETH_A_(S)(R)(G)MII_TXD0 */
+ MX93_PAD_ENET2_TD1__ENET1_RGMII_TD1 0x57e /* ETH_A_(S)(R)(G)MII_TXD1 */
+ MX93_PAD_ENET2_TD2__ENET1_RGMII_TD2 0x57e /* ETH_A_(S)(R)(G)MII_TXD2 */
+ MX93_PAD_ENET2_TD3__ENET1_RGMII_TD3 0x57e /* ETH_A_(S)(R)(G)MII_TXD3 */
+ MX93_PAD_ENET2_TXC__ENET1_RGMII_TXC 0x5fe /* ETH_A_(R)(G)MII_TX_CLK */
+ MX93_PAD_ENET2_TX_CTL__ENET1_RGMII_TX_CTL 0x57e /* ETH_A_(R)(G)MII_TX_EN(_ER) */
+ >;
+ };
+
+ pinctrl_eqos_rgmii: eqosrgmiigrp {
+ fsl,pins = <
+ MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x57e /* ETH_B_MDC */
+ MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x57e /* ETH_B_MDIO */
+ MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x57e /* ETH_B_(S)(R)(G)MII_RXD0 */
+ MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x57e /* ETH_B_(S)(R)(G)MII_RXD1 */
+ MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x57e /* ETH_B_(R)(G)MII_RXD2 */
+ MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x57e /* ETH_B_(R)(G)MII_RXD3 */
+ MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x57e /* ETH_B_(R)(G)MII_RX_CLK */
+ MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x57e /* ETH_B_(R)(G)MII_RX_DV(_ER) */
+ MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x57e /* ETH_B_(S)(R)(G)MII_TXD0 */
+ MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x57e /* ETH_B_(S)(R)(G)MII_TXD1 */
+ MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x57e /* ETH_B_(S)(R)(G)MII_TXD2 */
+ MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x57e /* ETH_B_(S)(R)(G)MII_TXD3 */
+ MX93_PAD_ENET1_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x57e /* ETH_B_(R)(G)MII_TX_CLK */
+ MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x57e /* ETH_B_(R)(G)MII_TX_EN(_ER) */
+ >;
+ };
+
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = <
+ MX93_PAD_PDM_CLK__CAN1_TX 0x139e /* CAN_A_TX */
+ MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x139e /* CAN_A_RX */
+ >;
+ };
+
+ pinctrl_flexcan2: flexcan2grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO25__CAN2_TX 0x139e /* CAN_B_TX */
+ MX93_PAD_GPIO_IO27__CAN2_RX 0x139e /* CAN_B_RX */
+ >;
+ };
+
+ pinctrl_gpio1: gpio1grp {
+ fsl,pins = <
+ MX93_PAD_PDM_BIT_STREAM1__GPIO1_IO10 0x31e /* GPIO_A_0 */
+ >;
+ };
+
+ pinctrl_gpio2: gpio2grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO18__GPIO2_IO18 0x31e /* GPIO_A_1 */
+ MX93_PAD_GPIO_IO03__GPIO2_IO03 0x31e /* GPIO_A_2 */
+ MX93_PAD_GPIO_IO22__GPIO2_IO22 0x31e /* GPIO_A_3 */
+ MX93_PAD_GPIO_IO28__GPIO2_IO28 0x31e /* GPIO_A_4 */
+ MX93_PAD_GPIO_IO29__GPIO2_IO29 0x31e /* GPIO_A_5 */
+ MX93_PAD_GPIO_IO02__GPIO2_IO02 0x31e /* GPIO_B_1 */
+ >;
+ };
+
+ pinctrl_gpio3: gpio3grp {
+ fsl,pins = <
+ MX93_PAD_CCM_CLKO1__GPIO3_IO26 0x31e /* GPIO_A_6 */
+ MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x31e /* GPIO_A_7 */
+ >;
+ };
+
+ pinctrl_gpio4: gpio4grp {
+ fsl,pins = <
+ MX93_PAD_CCM_CLKO3__GPIO4_IO28 0x31e /* GPIO_B_0 */
+ >;
+ };
+
+ pinctrl_lpi2c1: lpi2c1grp {
+ fsl,pins = <
+ MX93_PAD_I2C1_SCL__LPI2C1_SCL 0x40000b9e
+ MX93_PAD_I2C1_SDA__LPI2C1_SDA 0x40000b9e
+ >;
+ };
+
+ pinctrl_lpi2c2: lpi2c2grp {
+ fsl,pins = <
+ MX93_PAD_I2C2_SCL__LPI2C2_SCL 0x40000b9e /* I2C_A_SCL */
+ MX93_PAD_I2C2_SDA__LPI2C2_SDA 0x40000b9e /* I2C_A_SDA */
+ >;
+ };
+
+ pinctrl_lpi2c3: lpi2c3grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO01__LPI2C3_SCL 0x40000b9e /* I2C_B_SCL */
+ MX93_PAD_GPIO_IO00__LPI2C3_SDA 0x40000b9e /* I2C_B_SDA */
+ >;
+ };
+
+ pinctrl_lpspi1: lpspi1grp {
+ fsl,pins = <
+ MX93_PAD_SAI1_TXC__LPSPI1_SIN 0x3fe /* SPI_A_SDI_(IO0) */
+ MX93_PAD_SAI1_RXD0__LPSPI1_SOUT 0x3fe /* SPI_A_SDO_(IO1) */
+ MX93_PAD_SAI1_TXD0__LPSPI1_SCK 0x3fe /* SPI_A_SCK */
+ MX93_PAD_SAI1_TXFS__GPIO1_IO11 0x3fe /* SPI_A_CS0# */
+ >;
+ };
+
+ pinctrl_lpspi8: lpspi8grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO13__LPSPI8_SIN 0x3fe /* SPI_B_SDI */
+ MX93_PAD_GPIO_IO14__LPSPI8_SOUT 0x3fe /* SPI_B_SDO */
+ MX93_PAD_GPIO_IO15__LPSPI8_SCK 0x3fe /* SPI_B_SCK */
+ MX93_PAD_GPIO_IO12__GPIO2_IO12 0x3fe /* SPI_B_CS0# */
+ >;
+ };
+
+ pinctrl_lpuart1: lpuart1grp {
+ fsl,pins = <
+ MX93_PAD_UART1_RXD__LPUART1_RX 0x31e /* UART_CON_RX */
+ MX93_PAD_UART1_TXD__LPUART1_TX 0x31e /* UART_CON_TX */
+ >;
+ };
+
+ pinctrl_lpuart2: lpuart2grp {
+ fsl,pins = <
+ MX93_PAD_UART2_RXD__LPUART2_RX 0x31e /* UART_C_RX */
+ MX93_PAD_UART2_TXD__LPUART2_TX 0x31e /* UART_C_TX */
+ >;
+ };
+
+ pinctrl_lpuart6: lpuart6grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO05__LPUART6_RX 0x31e /* UART_B_RX */
+ MX93_PAD_GPIO_IO04__LPUART6_TX 0x31e /* UART_B_TX */
+ MX93_PAD_GPIO_IO07__LPUART6_RTS_B 0x31e /* UART_B_CTS */
+ MX93_PAD_GPIO_IO06__LPUART6_CTS_B 0x31e /* UART_B_RTS */
+ >;
+ };
+
+ pinctrl_lpuart7: lpuart7grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO09__LPUART7_RX 0x31e /* UART_A_RX */
+ MX93_PAD_GPIO_IO08__LPUART7_TX 0x31e /* UART_A_TX */
+ MX93_PAD_GPIO_IO11__LPUART7_RTS_B 0x31e /* UART_A_CTS */
+ MX93_PAD_GPIO_IO10__LPUART7_CTS_B 0x31e /* UART_A_RTS */
+ >;
+ };
+
+ pinctrl_reg_usdhc2_vcc: regusdhc2vccgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_RESET_B__GPIO3_IO07 0x31e /* SDIO_A_PWR_EN */
+ >;
+ };
+
+ pinctrl_reg_vdd_carrier: regvddcarriergrp {
+ fsl,pins = <
+ MX93_PAD_CCM_CLKO4__GPIO4_IO29 0x31e /* CARRIER_PWR_EN */
+ >;
+ };
+
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO20__SAI3_RX_DATA00 0x31e /* I2S_A_DATA_IN */
+ MX93_PAD_GPIO_IO19__SAI3_TX_DATA00 0x31e /* I2S_A_DATA_OUT */
+ MX93_PAD_GPIO_IO17__SAI3_MCLK 0x31e /* I2S_MCLK */
+ MX93_PAD_GPIO_IO26__SAI3_TX_SYNC 0x31e /* I2S_LRCLK */
+ MX93_PAD_GPIO_IO16__SAI3_TX_BCLK 0x31e /* I2S_BITCLK */
+ >;
+ };
+
+ pinctrl_tpm3: tpm3grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO24__TPM3_CH3 0x57e /* PWM_0 */
+ >;
+ };
+
+ pinctrl_tpm4: tpm4grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO21__TPM4_CH1 0x57e /* PWM_2 */
+ >;
+ };
+
+ pinctrl_tpm6: tpm6grp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO23__TPM6_CH1 0x57e /* PWM_1 */
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x1582
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x40001382
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x40001382
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x40001382
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x40001382
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x40001382
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x40001382
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x40001382
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x40001382
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x40001382
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x1582
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x158e
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x4000138e
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x4000138e
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x4000138e
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x4000138e
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x4000138e
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x4000138e
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x4000138e
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x4000138e
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x4000138e
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x158e
+ >;
+ };
+
+ /* need to config the SION for data and cmd pad, refer to ERR052021 */
+ pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x15fe
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x400013fe
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x400013fe
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x400013fe
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x400013fe
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x400013fe
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x400013fe
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x400013fe
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x400013fe
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x400013fe
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x1582 /* SDIO_A_CLK */
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x40001382 /* SDIO_A_CMD */
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x40001382 /* SDIO_A_D0 */
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x40001382 /* SDIO_A_D1 */
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x40001382 /* SDIO_A_D2 */
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x40001382 /* SDIO_A_D3 */
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x158e /* SDIO_A_CLK */
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000138e /* SDIO_A_CMD */
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000138e /* SDIO_A_D0 */
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e /* SDIO_A_D1 */
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000138e /* SDIO_A_D2 */
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000138e /* SDIO_A_D3 */
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x15fe /* SDIO_A_CLK */
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x400013fe /* SDIO_A_CMD */
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x400013fe /* SDIO_A_D0 */
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x400013fe /* SDIO_A_D1 */
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x400013fe /* SDIO_A_D2 */
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x400013fe /* SDIO_A_D3 */
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+ fsl,pins = <
+ MX93_PAD_SD2_CD_B__GPIO3_IO00 0x31e /* SDIO_A_CD# */
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX93_PAD_SD3_CLK__USDHC3_CLK 0x1582 /* SDIO_B_CLK */
+ MX93_PAD_SD3_CMD__USDHC3_CMD 0x40001382 /* SDIO_B_CMD */
+ MX93_PAD_SD3_DATA0__USDHC3_DATA0 0x40001382 /* SDIO_B_D0 */
+ MX93_PAD_SD3_DATA1__USDHC3_DATA1 0x40001382 /* SDIO_B_D1 */
+ MX93_PAD_SD3_DATA2__USDHC3_DATA2 0x40001382 /* SDIO_B_D2 */
+ MX93_PAD_SD3_DATA3__USDHC3_DATA3 0x40001382 /* SDIO_B_D3 */
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD3_CLK__USDHC3_CLK 0x158e /* SDIO_B_CLK */
+ MX93_PAD_SD3_CMD__USDHC3_CMD 0x4000138e /* SDIO_B_CMD */
+ MX93_PAD_SD3_DATA0__USDHC3_DATA0 0x4000138e /* SDIO_B_D0 */
+ MX93_PAD_SD3_DATA1__USDHC3_DATA1 0x4000138e /* SDIO_B_D1 */
+ MX93_PAD_SD3_DATA2__USDHC3_DATA2 0x4000138e /* SDIO_B_D2 */
+ MX93_PAD_SD3_DATA3__USDHC3_DATA3 0x4000138e /* SDIO_B_D3 */
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp {
+ fsl,pins = <
+ MX93_PAD_SD3_CLK__USDHC3_CLK 0x15fe /* SDIO_B_CLK */
+ MX93_PAD_SD3_CMD__USDHC3_CMD 0x400013fe /* SDIO_B_CMD */
+ MX93_PAD_SD3_DATA0__USDHC3_DATA0 0x400013fe /* SDIO_B_D0 */
+ MX93_PAD_SD3_DATA1__USDHC3_DATA1 0x400013fe /* SDIO_B_D1 */
+ MX93_PAD_SD3_DATA2__USDHC3_DATA2 0x400013fe /* SDIO_B_D2 */
+ MX93_PAD_SD3_DATA3__USDHC3_DATA3 0x400013fe /* SDIO_B_D3 */
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX93_PAD_WDOG_ANY__WDOG1_WDOG_ANY 0xc6
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
index 852dd3d2eac7..599df32976e2 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
@@ -26,6 +26,8 @@
aliases {
eeprom0 = &eeprom0;
+ ethernet0 = &fec;
+ ethernet1 = &eqos;
rtc0 = &pcf85063;
rtc1 = &bbnsm_rtc;
};
@@ -274,6 +276,16 @@
};
&gpio1 {
+ gpio-line-names =
+ /* 00 */ "", "", "USB_C_ALERT#", "PMIC_IRQ#",
+ /* 04 */ "", "", "", "",
+ /* 08 */ "", "", "", "BM2_TEMP_EVENT_MOD#",
+ /* 12 */ "PEX_INT#", "", "RTC_EVENT#", "",
+ /* 16 */ "", "", "", "",
+ /* 20 */ "", "", "", "",
+ /* 24 */ "", "", "", "",
+ /* 28 */ "", "", "", "";
+
expander-irq-hog {
gpio-hog;
gpios = <12 GPIO_ACTIVE_LOW>;
@@ -289,6 +301,45 @@
};
};
+&gpio2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio2>;
+
+ gpio-line-names =
+ /* 00 */ "SPI6_PCS0#", "", "", "",
+ /* 04 */ "", "", "", "",
+ /* 08 */ "", "FAN_RPM", "MIPI_CSI_TRIGGER", "MIPI_CSI_SYNC",
+ /* 12 */ "", "", "", "",
+ /* 16 */ "X1_11", "X1_21", "X1_17", "X1_13",
+ /* 20 */ "X1_15", "X1_9", "", "",
+ /* 24 */ "", "", "X1_7", "",
+ /* 28 */ "", "", "", "";
+};
+
+&gpio3 {
+ gpio-line-names =
+ /* 00 */ "SD2_CD#", "", "", "",
+ /* 04 */ "", "", "", "SD2_RST#",
+ /* 08 */ "", "", "", "",
+ /* 12 */ "", "", "", "",
+ /* 16 */ "", "", "", "",
+ /* 20 */ "", "", "", "",
+ /* 24 */ "", "", "ENET1_INT#", "ENET2_INT#",
+ /* 28 */ "", "", "", "";
+};
+
+&gpio4 {
+ gpio-line-names =
+ /* 00 */ "", "", "", "",
+ /* 04 */ "", "", "", "",
+ /* 08 */ "", "", "", "",
+ /* 12 */ "", "", "", "",
+ /* 16 */ "", "", "", "",
+ /* 20 */ "", "", "", "",
+ /* 24 */ "", "", "", "",
+ /* 28 */ "", "DP_INT", "", "";
+};
+
&lpi2c3 {
#address-cells = <1>;
#size-cells = <0>;
@@ -495,6 +546,22 @@
status = "okay";
};
+&pcf85063 {
+ /* RTC_EVENT# from SoM is connected on mainboard */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcf85063>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <14 IRQ_TYPE_EDGE_FALLING>;
+};
+
+&se97_som {
+ /* TEMP_EVENT# from SoM is connected on mainboard */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_temp_sensor_som>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+};
+
&tpm5 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_tpm5>;
@@ -533,7 +600,7 @@
samsung,picophy-dc-vol-level-adjust = <7>;
status = "okay";
- hub_2_0: hub@1 {
+ hub_2_0: usb-hub@1 {
compatible = "usb424,2517";
reg = <1>;
reset-gpios = <&expander1 2 GPIO_ACTIVE_LOW>;
@@ -559,22 +626,23 @@
pinctrl_eqos: eqosgrp {
fsl,pins = <
/* PD | FSEL_2 | DSE X4 */
- MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x51e
- MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000051e
- /* PD | FSEL_2 | DSE X6 */
- MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x57e
- MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x57e
- MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x57e
- MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x57e
- /* PD | FSEL_3 | DSE X6 */
- MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x5fe
- MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x57e
+ MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x51e
+ /* SION | HYS | FSEL_2 | DSE X4 */
+ MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000111e
+ /* HYS | FSEL_0 | DSE no drive */
+ MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x1000
+ MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x1000
+ MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x1000
+ MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x1000
+ MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x1000
+ /* HYS | PD | FSEL_0 | DSE no drive */
+ MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x1400
/* PD | FSEL_2 | DSE X4 */
- MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x51e
- MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x51e
- MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x51e
- MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x51e
- MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x51e
+ MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x51e
+ MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x51e
+ MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x51e
+ MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x51e
+ MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x51e
/* PD | FSEL_3 | DSE X3 */
MX93_PAD_ENET1_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x58e
>;
@@ -582,7 +650,8 @@
pinctrl_eqos_phy: eqosphygrp {
fsl,pins = <
- MX93_PAD_CCM_CLKO1__GPIO3_IO26 0x1306
+ /* HYS | FSEL_0 | DSE no drive */
+ MX93_PAD_CCM_CLKO1__GPIO3_IO26 0x1000
>;
};
@@ -590,15 +659,16 @@
fsl,pins = <
/* PD | FSEL_2 | DSE X4 */
MX93_PAD_ENET2_MDC__ENET1_MDC 0x51e
- MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000051e
- /* PD | FSEL_2 | DSE X6 */
- MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x57e
- MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x57e
- MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x57e
- MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x57e
- /* PD | FSEL_3 | DSE X6 */
- MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x5fe
- MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x57e
+ /* SION | HYS | FSEL_2 | DSE X4 */
+ MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000111e
+ /* HYS | FSEL_0 | DSE no drive */
+ MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x1000
+ MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x1000
+ MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x1000
+ MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x1000
+ MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x1000
+ /* HYS | PD | FSEL_0 | DSE no drive */
+ MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x1400
/* PD | FSEL_2 | DSE X4 */
MX93_PAD_ENET2_TD0__ENET1_RGMII_TD0 0x51e
MX93_PAD_ENET2_TD1__ENET1_RGMII_TD1 0x51e
@@ -612,147 +682,224 @@
pinctrl_fec_phy: fecphygrp {
fsl,pins = <
- MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x1306
+ /* HYS | FSEL_0 | DSE no drive */
+ MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x1000
>;
};
pinctrl_flexcan1: flexcan1grp {
fsl,pins = <
- MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x139e
- MX93_PAD_PDM_CLK__CAN1_TX 0x139e
+ /* HYS | PU | FSEL_0 | DSE no drive */
+ MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x1200
+ /* PU | FSEL_3 | DSE X4 */
+ MX93_PAD_PDM_CLK__CAN1_TX 0x039e
>;
};
pinctrl_flexcan2: flexcan2grp {
fsl,pins = <
- MX93_PAD_GPIO_IO25__CAN2_TX 0x139e
- MX93_PAD_GPIO_IO27__CAN2_RX 0x139e
+ /* HYS | PU | FSEL_0 | DSE no drive */
+ MX93_PAD_GPIO_IO27__CAN2_RX 0x1200
+ /* PU | FSEL_3 | DSE X4 */
+ MX93_PAD_GPIO_IO25__CAN2_TX 0x039e
+ >;
+ };
+
+ pinctrl_gpio2: gpio2grp {
+ fsl,pins = <
+ /* HYS | PD | FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO16__GPIO2_IO16 0x151e
+ MX93_PAD_GPIO_IO17__GPIO2_IO17 0x151e
+ MX93_PAD_GPIO_IO18__GPIO2_IO18 0x151e
+ MX93_PAD_GPIO_IO19__GPIO2_IO19 0x151e
+ MX93_PAD_GPIO_IO20__GPIO2_IO20 0x151e
+ MX93_PAD_GPIO_IO21__GPIO2_IO21 0x151e
+ MX93_PAD_GPIO_IO26__GPIO2_IO26 0x151e
+ >;
+ };
+
+ pinctrl_jtag: jtaggrp {
+ fsl,pins = <
+ MX93_PAD_DAP_TCLK_SWCLK__JTAG_MUX_TCK 0x051e
+ MX93_PAD_DAP_TDI__JTAG_MUX_TDI 0x1200
+ MX93_PAD_DAP_TDO_TRACESWO__JTAG_MUX_TDO 0x031e
+ MX93_PAD_DAP_TMS_SWDIO__JTAG_MUX_TMS 0x1200
>;
};
pinctrl_lpi2c3: lpi2c3grp {
fsl,pins = <
- MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x40000b9e
- MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x40000b9e
+ /* SION | HYS | OD | FSEL_3 | DSE X4 */
+ MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x4000199e
+ MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x4000199e
>;
};
pinctrl_lpi2c5: lpi2c5grp {
fsl,pins = <
- MX93_PAD_GPIO_IO22__LPI2C5_SDA 0x40000b9e
- MX93_PAD_GPIO_IO23__LPI2C5_SCL 0x40000b9e
+ /* SION | HYS | OD | FSEL_3 | DSE X4 */
+ MX93_PAD_GPIO_IO22__LPI2C5_SDA 0x4000199e
+ MX93_PAD_GPIO_IO23__LPI2C5_SCL 0x4000199e
>;
};
pinctrl_lpspi6: lpspi6grp {
fsl,pins = <
- MX93_PAD_GPIO_IO00__LPSPI6_PCS0 0x3fe
- MX93_PAD_GPIO_IO01__LPSPI6_SIN 0x3fe
- MX93_PAD_GPIO_IO02__LPSPI6_SOUT 0x3fe
- MX93_PAD_GPIO_IO03__LPSPI6_SCK 0x3fe
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO00__LPSPI6_PCS0 0x011e
+ /* HYS | PD | FSEL_0 | DSE no drive */
+ MX93_PAD_GPIO_IO01__LPSPI6_SIN 0x1400
+ /* PD | FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO02__LPSPI6_SOUT 0x051e
+ MX93_PAD_GPIO_IO03__LPSPI6_SCK 0x051e
+ >;
+ };
+
+ pinctrl_pcf85063: pcf85063grp {
+ fsl,pins = <
+ MX93_PAD_SAI1_RXD0__GPIO1_IO14 0x1000
+ >;
+ };
+
+ pinctrl_mipi_csi: mipicsigrp {
+ fsl,pins = <
+ MX93_PAD_CCM_CLKO3__CCMSRCGPCMIX_CLKO3 0x051e /* MCLK */
+ MX93_PAD_GPIO_IO10__GPIO2_IO10 0x051e /* TRIGGER */
+ MX93_PAD_GPIO_IO11__GPIO2_IO11 0x1400 /* SYNC */
>;
};
pinctrl_pexp_irq: pexpirqgrp {
fsl,pins = <
- MX93_PAD_SAI1_TXC__GPIO1_IO12 0x1306
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_SAI1_TXC__GPIO1_IO12 0x1000
>;
};
pinctrl_pwmfan: pwmfangrp {
fsl,pins = <
- MX93_PAD_GPIO_IO09__GPIO2_IO09 0x1306
+ /* HYS | PU | FSEL_0 | no DSE */
+ MX93_PAD_GPIO_IO09__GPIO2_IO09 0x1200
+ >;
+ };
+
+ pinctrl_temp_sensor_som: tempsensorsomgrp {
+ fsl,pins = <
+ /* HYS | FSEL_0 | no DSE */
+ MX93_PAD_SAI1_TXFS__GPIO1_IO11 0x1000
+ >;
+ };
+
+ pinctrl_tc9595: tc9595-grp {
+ fsl,pins = <
+ /* HYS | PD | FSEL_0 | no DSE */
+ MX93_PAD_CCM_CLKO4__GPIO4_IO29 0x1400
>;
};
pinctrl_tpm5: tpm5grp {
fsl,pins = <
- MX93_PAD_GPIO_IO06__TPM5_CH0 0x57e
+ MX93_PAD_GPIO_IO06__TPM5_CH0 0x57e
>;
};
pinctrl_tpm6: tpm6grp {
fsl,pins = <
- MX93_PAD_GPIO_IO08__TPM6_CH0 0x57e
+ MX93_PAD_GPIO_IO08__TPM6_CH0 0x57e
>;
};
pinctrl_typec: typecgrp {
fsl,pins = <
- MX93_PAD_I2C2_SCL__GPIO1_IO02 0x1306
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_I2C2_SCL__GPIO1_IO02 0x1000
>;
};
pinctrl_uart1: uart1grp {
fsl,pins = <
- MX93_PAD_UART1_RXD__LPUART1_RX 0x31e
- MX93_PAD_UART1_TXD__LPUART1_TX 0x31e
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_UART1_RXD__LPUART1_RX 0x1000
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_UART1_TXD__LPUART1_TX 0x011e
>;
};
pinctrl_uart2: uart2grp {
fsl,pins = <
- MX93_PAD_UART2_TXD__LPUART2_TX 0x31e
- MX93_PAD_UART2_RXD__LPUART2_RX 0x31e
- MX93_PAD_SAI1_TXD0__LPUART2_RTS_B 0x51e
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_UART2_RXD__LPUART2_RX 0x1000
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_UART2_TXD__LPUART2_TX 0x011e
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_SAI1_TXD0__LPUART2_RTS_B 0x011e
>;
};
pinctrl_uart3: uart3grp {
fsl,pins = <
- MX93_PAD_GPIO_IO14__LPUART3_TX 0x31e
- MX93_PAD_GPIO_IO15__LPUART3_RX 0x31e
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_GPIO_IO15__LPUART3_RX 0x1000
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO14__LPUART3_TX 0x011e
>;
};
pinctrl_uart6: uart6grp {
fsl,pins = <
- MX93_PAD_GPIO_IO04__LPUART6_TX 0x31e
- MX93_PAD_GPIO_IO05__LPUART6_RX 0x31e
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_GPIO_IO05__LPUART6_RX 0x1000
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO04__LPUART6_TX 0x011e
>;
};
pinctrl_uart8: uart8grp {
fsl,pins = <
- MX93_PAD_GPIO_IO12__LPUART8_TX 0x31e
- MX93_PAD_GPIO_IO13__LPUART8_RX 0x31e
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_GPIO_IO13__LPUART8_RX 0x1000
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO12__LPUART8_TX 0x011e
>;
};
pinctrl_usdhc2_gpio: usdhc2gpiogrp {
fsl,pins = <
- MX93_PAD_SD2_CD_B__GPIO3_IO00 0x31e
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_SD2_CD_B__GPIO3_IO00 0x1000
>;
};
+ /* enable SION for data and cmd pad due to ERR052021 */
pinctrl_usdhc2_hs: usdhc2hsgrp {
fsl,pins = <
- /* HYS | PD | PU | FSEL_3 | DSE X5 */
- MX93_PAD_SD2_CLK__USDHC2_CLK 0x17be
- /* HYS | PD | PU | FSEL_3 | DSE X4 */
- MX93_PAD_SD2_CMD__USDHC2_CMD 0x139e
- /* HYS | PD | PU | FSEL_3 | DSE X3 */
- MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x138e
- MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x138e
- MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x138e
- MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x138e
- /* PD | PU | FSEL_2 | DSE X3 */
- MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x50e
+ /* PD | FSEL_3 | DSE X5 */
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x05be
+ /* HYS | PU | FSEL_3 | DSE X4 */
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e
+ /* HYS | PU | FSEL_3 | DSE X3 */
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000138e
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000138e
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000138e
+ /* FSEL_2 | DSE X3 */
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x010e
>;
};
+ /* enable SION for data and cmd pad due to ERR052021 */
pinctrl_usdhc2_uhs: usdhc2uhsgrp {
fsl,pins = <
- /* HYS | PD | PU | FSEL_3 | DSE X6 */
- MX93_PAD_SD2_CLK__USDHC2_CLK 0x17fe
- /* HYS | PD | PU | FSEL_3 | DSE X4 */
- MX93_PAD_SD2_CMD__USDHC2_CMD 0x139e
- MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x139e
- MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x139e
- MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x139e
- MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x139e
- /* PD | PU | FSEL_2 | DSE X3 */
- MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x50e
+ /* PD | FSEL_3 | DSE X6 */
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x05fe
+ /* HYS | PU | FSEL_3 | DSE X4 */
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000139e
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000139e
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000139e
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000139e
+ /* FSEL_2 | DSE X3 */
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x010e
>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
index e2ee9f5a042c..0b4b3bb866d0 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
@@ -26,6 +26,8 @@
aliases {
eeprom0 = &eeprom0;
+ ethernet0 = &fec;
+ ethernet1 = &eqos;
rtc0 = &pcf85063;
rtc1 = &bbnsm_rtc;
};
@@ -207,6 +209,16 @@
};
&gpio1 {
+ gpio-line-names =
+ /* 00 */ "", "", "USB_C_ALERT#", "PMIC_IRQ#",
+ /* 04 */ "", "", "", "",
+ /* 08 */ "", "", "", "BM2_TEMP_EVENT_MOD#",
+ /* 12 */ "PEX_INT#", "", "RTC_EVENT#", "",
+ /* 16 */ "", "", "", "",
+ /* 20 */ "", "", "", "",
+ /* 24 */ "", "", "", "",
+ /* 28 */ "", "", "", "";
+
expander-irq-hog {
gpio-hog;
gpios = <12 GPIO_ACTIVE_LOW>;
@@ -222,19 +234,63 @@
};
};
+&gpio2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio2>;
+
+ gpio-line-names =
+ /* 00 */ "", "", "", "",
+ /* 04 */ "", "", "", "AFE_RESET#",
+ /* 08 */ "AFE_SYNC", "AFE_DRDY", "MIPI_CSI_TRIGGER", "MIPI_CSI_SYNC",
+ /* 12 */ "", "", "", "",
+ /* 16 */ "X1_19", "X1_29", "X1_25", "X1_21",
+ /* 20 */ "X1_23", "X1_17", "", "",
+ /* 24 */ "AFE_INT#", "", "X1_15", "",
+ /* 28 */ "", "", "", "";
+};
+
&gpio3 {
+ gpio-line-names =
+ /* 00 */ "SD2_CD#", "", "", "",
+ /* 04 */ "", "", "", "SD2_RST#",
+ /* 08 */ "", "", "", "",
+ /* 12 */ "", "", "", "",
+ /* 16 */ "", "", "", "",
+ /* 20 */ "", "", "", "",
+ /* 24 */ "", "", "ENET1_INT#", "ENET2_INT#",
+ /* 28 */ "", "", "", "";
+
ethphy-eqos-irq-hog {
gpio-hog;
gpios = <26 GPIO_ACTIVE_LOW>;
input;
- line-name = "ENET0_IRQ#";
+ line-name = "ENET1_INT#";
};
ethphy-fec-irq-hog {
gpio-hog;
gpios = <27 GPIO_ACTIVE_LOW>;
input;
- line-name = "ENET1_IRQ#";
+ line-name = "ENET2_INT#";
+ };
+};
+
+&gpio4 {
+ gpio-line-names =
+ /* 00 */ "", "", "", "",
+ /* 04 */ "", "", "", "",
+ /* 08 */ "", "", "", "",
+ /* 12 */ "", "", "", "",
+ /* 16 */ "", "", "", "",
+ /* 20 */ "", "", "", "",
+ /* 24 */ "", "", "", "",
+ /* 28 */ "", "DP_INT", "", "";
+
+ dp-int-hog {
+ gpio-hog;
+ gpios = <29 GPIO_ACTIVE_LOW>;
+ input;
+ line-name = "DP_INT";
};
};
@@ -371,7 +427,7 @@
#gpio-cells = <2>;
vcc-supply = <&reg_3v3>;
gpio-line-names = "LCD_RESET#", "LCD_PWR_EN",
- "LCD_BL_EN", "DP_EN",
+ "LCD_BLT_EN", "DP_EN",
"MIPI_CSI_EN", "MIPI_CSI_RST#",
"USER_LED1", "USER_LED2";
};
@@ -414,6 +470,13 @@
};
};
+&lpspi6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpspi6>, <&pinctrl_lpspi6_cs>;
+ cs-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
&lpuart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
@@ -447,13 +510,21 @@
};
&pcf85063 {
- /* RTC_EVENT# is connected on MBa93xxLA */
+ /* RTC_EVENT# from SoM is connected on mainboard */
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pcf85063>;
interrupt-parent = <&gpio1>;
interrupts = <14 IRQ_TYPE_EDGE_FALLING>;
};
+&se97_som {
+ /* TEMP_EVENT# from SoM is connected on mainboard */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_temp_sensor_som>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+};
+
&tpm5 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_tpm5>;
@@ -486,7 +557,7 @@
samsung,picophy-dc-vol-level-adjust = <7>;
status = "okay";
- hub_2_0: hub@1 {
+ hub_2_0: usb-hub@1 {
compatible = "usb424,2517";
reg = <1>;
reset-gpios = <&expander1 2 GPIO_ACTIVE_LOW>;
@@ -509,25 +580,39 @@
};
&iomuxc {
+ pinctrl_afe: afegrp {
+ fsl,pins = <
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO07__GPIO2_IO07 0x011e
+ /* PD | FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO08__GPIO2_IO08 0x051e
+ /* HYS | PD */
+ MX93_PAD_GPIO_IO09__GPIO2_IO09 0x1400
+ /* HYS */
+ MX93_PAD_GPIO_IO24__GPIO2_IO24 0x1000
+ >;
+ };
+
pinctrl_eqos: eqosgrp {
fsl,pins = <
/* PD | FSEL_2 | DSE X4 */
- MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x51e
- MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000051e
- /* PD | FSEL_2 | DSE X6 */
- MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x57e
- MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x57e
- MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x57e
- MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x57e
- /* PD | FSEL_3 | DSE X6 */
- MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x5fe
- MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x57e
+ MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x51e
+ /* SION | HYS | FSEL_2 | DSE X4 */
+ MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000111e
+ /* HYS | FSEL_0 | DSE no drive */
+ MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x1000
+ MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x1000
+ MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x1000
+ MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x1000
+ MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x1000
+ /* HYS | PD | FSEL_0 | DSE no drive */
+ MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x1400
/* PD | FSEL_2 | DSE X4 */
- MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x51e
- MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x51e
- MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x51e
- MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x51e
- MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x51e
+ MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x51e
+ MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x51e
+ MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x51e
+ MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x51e
+ MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x51e
/* PD | FSEL_3 | DSE X3 */
MX93_PAD_ENET1_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x58e
>;
@@ -535,7 +620,8 @@
pinctrl_eqos_phy: eqosphygrp {
fsl,pins = <
- MX93_PAD_CCM_CLKO1__GPIO3_IO26 0x1306
+ /* HYS | FSEL_0 | DSE no drive */
+ MX93_PAD_CCM_CLKO1__GPIO3_IO26 0x1000
>;
};
@@ -543,15 +629,16 @@
fsl,pins = <
/* PD | FSEL_2 | DSE X4 */
MX93_PAD_ENET2_MDC__ENET1_MDC 0x51e
- MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000051e
- /* PD | FSEL_2 | DSE X6 */
- MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x57e
- MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x57e
- MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x57e
- MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x57e
- /* PD | FSEL_3 | DSE X6 */
- MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x5fe
- MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x57e
+ /* SION | HYS | FSEL_2 | DSE X4 */
+ MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000111e
+ /* HYS | FSEL_0 | DSE no drive */
+ MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x1000
+ MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x1000
+ MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x1000
+ MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x1000
+ MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x1000
+ /* HYS | PD | FSEL_0 | DSE no drive */
+ MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x1400
/* PD | FSEL_2 | DSE X4 */
MX93_PAD_ENET2_TD0__ENET1_RGMII_TD0 0x51e
MX93_PAD_ENET2_TD1__ENET1_RGMII_TD1 0x51e
@@ -565,139 +652,216 @@
pinctrl_fec_phy: fecphygrp {
fsl,pins = <
- MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x1306
+ /* HYS | FSEL_0 | DSE no drive */
+ MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x1000
>;
};
pinctrl_flexcan1: flexcan1grp {
fsl,pins = <
- MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x139e
- MX93_PAD_PDM_CLK__CAN1_TX 0x139e
+ /* HYS | PU | FSEL_0 | DSE no drive */
+ MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x1200
+ /* PU | FSEL_3 | DSE X4 */
+ MX93_PAD_PDM_CLK__CAN1_TX 0x039e
>;
};
pinctrl_flexcan2: flexcan2grp {
fsl,pins = <
- MX93_PAD_GPIO_IO25__CAN2_TX 0x139e
- MX93_PAD_GPIO_IO27__CAN2_RX 0x139e
+ /* HYS | PU | FSEL_0 | DSE no drive */
+ MX93_PAD_GPIO_IO27__CAN2_RX 0x1200
+ /* PU | FSEL_3 | DSE X4 */
+ MX93_PAD_GPIO_IO25__CAN2_TX 0x039e
+ >;
+ };
+
+ pinctrl_gpio2: gpio2grp {
+ fsl,pins = <
+ /* HYS | PD | FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO16__GPIO2_IO16 0x151e
+ MX93_PAD_GPIO_IO17__GPIO2_IO17 0x151e
+ MX93_PAD_GPIO_IO18__GPIO2_IO18 0x151e
+ MX93_PAD_GPIO_IO19__GPIO2_IO19 0x151e
+ MX93_PAD_GPIO_IO20__GPIO2_IO20 0x151e
+ MX93_PAD_GPIO_IO21__GPIO2_IO21 0x151e
+ MX93_PAD_GPIO_IO26__GPIO2_IO26 0x151e
+ >;
+ };
+
+ pinctrl_jtag: jtaggrp {
+ fsl,pins = <
+ MX93_PAD_DAP_TCLK_SWCLK__JTAG_MUX_TCK 0x051e
+ MX93_PAD_DAP_TDI__JTAG_MUX_TDI 0x1200
+ MX93_PAD_DAP_TDO_TRACESWO__JTAG_MUX_TDO 0x031e
+ MX93_PAD_DAP_TMS_SWDIO__JTAG_MUX_TMS 0x1200
>;
};
pinctrl_lpi2c3: lpi2c3grp {
fsl,pins = <
- MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x40000b9e
- MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x40000b9e
+ /* SION | HYS | OD | FSEL_3 | DSE X4 */
+ MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x4000199e
+ MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x4000199e
>;
};
pinctrl_lpi2c5: lpi2c5grp {
fsl,pins = <
- MX93_PAD_GPIO_IO22__LPI2C5_SDA 0x40000b9e
- MX93_PAD_GPIO_IO23__LPI2C5_SCL 0x40000b9e
+ /* SION | HYS | OD | FSEL_3 | DSE X4 */
+ MX93_PAD_GPIO_IO22__LPI2C5_SDA 0x4000199e
+ MX93_PAD_GPIO_IO23__LPI2C5_SCL 0x4000199e
+ >;
+ };
+
+ pinctrl_lpspi6: lpspi6grp {
+ fsl,pins = <
+ /* HYS | PD | FSEL_0 | DSE no drive */
+ MX93_PAD_GPIO_IO01__LPSPI6_SIN 0x1400
+ /* PD | FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO02__LPSPI6_SOUT 0x051e
+ MX93_PAD_GPIO_IO03__LPSPI6_SCK 0x051e
+ >;
+ };
+
+ pinctrl_lpspi6_cs: lpspi6csgrp {
+ fsl,pins = <
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO00__GPIO2_IO00 0x011e
+ >;
+ };
+
+ pinctrl_mipi_csi: mipicsigrp {
+ fsl,pins = <
+ MX93_PAD_CCM_CLKO3__CCMSRCGPCMIX_CLKO3 0x051e /* MCLK */
+ MX93_PAD_GPIO_IO10__GPIO2_IO10 0x051e /* TRIGGER */
+ MX93_PAD_GPIO_IO11__GPIO2_IO11 0x1400 /* SYNC */
>;
};
pinctrl_pcf85063: pcf85063grp {
fsl,pins = <
- MX93_PAD_SAI1_RXD0__GPIO1_IO14 0x1306
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_SAI1_RXD0__GPIO1_IO14 0x1000
>;
};
pinctrl_pexp_irq: pexpirqgrp {
fsl,pins = <
- MX93_PAD_SAI1_TXC__GPIO1_IO12 0x1306
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_SAI1_TXC__GPIO1_IO12 0x1000
>;
};
pinctrl_tc9595: tc9595-grp {
fsl,pins = <
- /* DP_IRQ */
- MX93_PAD_CCM_CLKO4__GPIO4_IO29 0x1306
+ /* HYS | PD | FSEL_0 | no DSE */
+ MX93_PAD_CCM_CLKO4__GPIO4_IO29 0x1400
+ >;
+ };
+
+ pinctrl_temp_sensor_som: tempsensorsomgrp {
+ fsl,pins = <
+ /* HYS | FSEL_0 | no DSE */
+ MX93_PAD_SAI1_TXFS__GPIO1_IO11 0x1000
>;
};
pinctrl_tpm5: tpm5grp {
fsl,pins = <
- MX93_PAD_GPIO_IO06__TPM5_CH0 0x57e
+ MX93_PAD_GPIO_IO06__TPM5_CH0 0x57e
>;
};
pinctrl_typec: typecgrp {
fsl,pins = <
- MX93_PAD_I2C2_SCL__GPIO1_IO02 0x1306
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_I2C2_SCL__GPIO1_IO02 0x1000
>;
};
pinctrl_uart1: uart1grp {
fsl,pins = <
- MX93_PAD_UART1_RXD__LPUART1_RX 0x31e
- MX93_PAD_UART1_TXD__LPUART1_TX 0x31e
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_UART1_RXD__LPUART1_RX 0x1000
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_UART1_TXD__LPUART1_TX 0x011e
>;
};
pinctrl_uart2: uart2grp {
fsl,pins = <
- MX93_PAD_UART2_TXD__LPUART2_TX 0x31e
- MX93_PAD_UART2_RXD__LPUART2_RX 0x31e
- MX93_PAD_SAI1_TXD0__LPUART2_RTS_B 0x51e
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_UART2_RXD__LPUART2_RX 0x1000
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_UART2_TXD__LPUART2_TX 0x011e
+ MX93_PAD_SAI1_TXD0__LPUART2_RTS_B 0x011e
>;
};
pinctrl_uart3: uart3grp {
fsl,pins = <
- MX93_PAD_GPIO_IO14__LPUART3_TX 0x31e
- MX93_PAD_GPIO_IO15__LPUART3_RX 0x31e
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_GPIO_IO15__LPUART3_RX 0x1000
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO14__LPUART3_TX 0x011e
>;
};
pinctrl_uart6: uart6grp {
fsl,pins = <
- MX93_PAD_GPIO_IO04__LPUART6_TX 0x31e
- MX93_PAD_GPIO_IO05__LPUART6_RX 0x31e
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_GPIO_IO05__LPUART6_RX 0x1000
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO04__LPUART6_TX 0x011e
>;
};
pinctrl_uart8: uart8grp {
fsl,pins = <
- MX93_PAD_GPIO_IO12__LPUART8_TX 0x31e
- MX93_PAD_GPIO_IO13__LPUART8_RX 0x31e
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_GPIO_IO13__LPUART8_RX 0x1000
+ /* FSEL_2 | DSE X4 */
+ MX93_PAD_GPIO_IO12__LPUART8_TX 0x011e
>;
};
pinctrl_usdhc2_gpio: usdhc2gpiogrp {
fsl,pins = <
- MX93_PAD_SD2_CD_B__GPIO3_IO00 0x31e
+ /* HYS | FSEL_0 | No DSE */
+ MX93_PAD_SD2_CD_B__GPIO3_IO00 0x1000
>;
};
+ /* enable SION for data and cmd pad due to ERR052021 */
pinctrl_usdhc2_hs: usdhc2hsgrp {
fsl,pins = <
- /* HYS | PD | PU | FSEL_3 | DSE X5 */
- MX93_PAD_SD2_CLK__USDHC2_CLK 0x17be
- /* HYS | PD | PU | FSEL_3 | DSE X4 */
- MX93_PAD_SD2_CMD__USDHC2_CMD 0x139e
- /* HYS | PD | PU | FSEL_3 | DSE X3 */
- MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x138e
- MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x138e
- MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x138e
- MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x138e
- /* PD | PU | FSEL_2 | DSE X3 */
- MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x50e
+ /* PD | FSEL_3 | DSE X5 */
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x05be
+ /* HYS | PU | FSEL_3 | DSE X4 */
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e
+ /* HYS | PU | FSEL_3 | DSE X3 */
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000138e
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000138e
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000138e
+ /* FSEL_2 | DSE X3 */
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x010e
>;
};
+ /* enable SION for data and cmd pad due to ERR052021 */
pinctrl_usdhc2_uhs: usdhc2uhsgrp {
fsl,pins = <
- /* HYS | PD | PU | FSEL_3 | DSE X6 */
- MX93_PAD_SD2_CLK__USDHC2_CLK 0x17fe
- /* HYS | PD | PU | FSEL_3 | DSE X4 */
- MX93_PAD_SD2_CMD__USDHC2_CMD 0x139e
- MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x139e
- MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x139e
- MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x139e
- MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x139e
- /* PD | PU | FSEL_2 | DSE X3 */
- MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x50e
+ /* PD | FSEL_3 | DSE X6 */
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x05fe
+ /* HYS | PU | FSEL_3 | DSE X4 */
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000139e
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000139e
+ MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000139e
+ MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000139e
+ /* FSEL_2 | DSE X3 */
+ MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x010e
>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
index 72a9a5d4e27a..2cabdae24227 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
@@ -25,20 +25,6 @@
};
};
- reg_v1v8: regulator-v1v8 {
- compatible = "regulator-fixed";
- regulator-name = "V_1V8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- reg_v3v3: regulator-v3v3 {
- compatible = "regulator-fixed";
- regulator-name = "V_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
-
/* SD2 RST# via PMIC SW_EN */
reg_usdhc2_vmmc: regulator-usdhc2 {
compatible = "regulator-fixed";
@@ -47,14 +33,14 @@
regulator-name = "VSD_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- vin-supply = <&reg_v3v3>;
+ vin-supply = <&buck4>;
gpio = <&gpio3 7 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
};
&adc1 {
- vref-supply = <&reg_v1v8>;
+ vref-supply = <&buck5>;
};
&flexspi1 {
@@ -105,6 +91,91 @@
reg = <0x1b>;
};
+ pca9451a: pmic@25 {
+ compatible = "nxp,pca9451a";
+ reg = <0x25>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pca9451>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ /* V_0V8_SOC - hw developer guide: 0.75 .. 0.9 */
+ buck1: BUCK1 {
+ regulator-name = "BUCK1";
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ /* V_DDRQ - 1.1 LPDDR4 or 0.6 LPDDR4X */
+ buck2: BUCK2 {
+ regulator-name = "BUCK2";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ /* V_3V3 - EEPROM, RTC, ... */
+ buck4: BUCK4 {
+ regulator-name = "BUCK4";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* V_1V8 - SPI NOR, eMMC, RAM VDD1... */
+ buck5: BUCK5 {
+ regulator-name = "BUCK5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* V_1V1 - RAM VDD2*/
+ buck6: BUCK6 {
+ regulator-name = "BUCK6";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* V_1V8_BBSM, fix 1.8 */
+ ldo1: LDO1 {
+ regulator-name = "LDO1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* V_0V8_ANA */
+ ldo4: LDO4 {
+ regulator-name = "LDO4";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* V_SD2 - 3.3/1.8V USDHC2 io Voltage */
+ ldo5: LDO5 {
+ regulator-name = "LDO5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+
pcf85063: rtc@51 {
compatible = "nxp,pcf85063a";
reg = <0x51>;
@@ -116,28 +187,28 @@
reg = <0x53>;
pagesize = <16>;
read-only;
- vcc-supply = <&reg_v3v3>;
+ vcc-supply = <&buck4>;
};
eeprom1: eeprom@57 {
compatible = "atmel,24c64";
reg = <0x57>;
pagesize = <32>;
- vcc-supply = <&reg_v3v3>;
+ vcc-supply = <&buck4>;
};
/* protectable identification memory (part of M24C64-D @57) */
eeprom@5f {
compatible = "atmel,24c64d-wl";
reg = <0x5f>;
- vcc-supply = <&reg_v3v3>;
+ vcc-supply = <&buck4>;
};
imu@6a {
compatible = "st,ism330dhcx";
reg = <0x6a>;
- vdd-supply = <&reg_v3v3>;
- vddio-supply = <&reg_v3v3>;
+ vdd-supply = <&buck4>;
+ vddio-supply = <&buck4>;
};
};
@@ -146,6 +217,8 @@
pinctrl-0 = <&pinctrl_usdhc1>;
pinctrl-1 = <&pinctrl_usdhc1>;
pinctrl-2 = <&pinctrl_usdhc1>;
+ vmmc-supply = <&buck4>;
+ vqmmc-supply = <&buck5>;
bus-width = <8>;
non-removable;
no-sdio;
@@ -163,55 +236,64 @@
&iomuxc {
pinctrl_flexspi1: flexspi1grp {
fsl,pins = <
- MX93_PAD_SD3_CMD__FLEXSPI1_A_SS0_B 0x3fe
- MX93_PAD_SD3_CLK__FLEXSPI1_A_SCLK 0x3fe
- MX93_PAD_SD3_DATA0__FLEXSPI1_A_DATA00 0x3fe
- MX93_PAD_SD3_DATA1__FLEXSPI1_A_DATA01 0x3fe
- MX93_PAD_SD3_DATA2__FLEXSPI1_A_DATA02 0x3fe
- MX93_PAD_SD3_DATA3__FLEXSPI1_A_DATA03 0x3fe
+ /* FSEL 3 | DSE X6 */
+ MX93_PAD_SD3_CMD__FLEXSPI1_A_SS0_B 0x01fe
+ MX93_PAD_SD3_CLK__FLEXSPI1_A_SCLK 0x01fe
+ /* HYS | PU | FSEL 3 | DSE X6 */
+ MX93_PAD_SD3_DATA0__FLEXSPI1_A_DATA00 0x13fe
+ MX93_PAD_SD3_DATA1__FLEXSPI1_A_DATA01 0x13fe
+ /* HYS | FSEL 3 | DSE X6 (external PU) */
+ MX93_PAD_SD3_DATA2__FLEXSPI1_A_DATA02 0x11fe
+ MX93_PAD_SD3_DATA3__FLEXSPI1_A_DATA03 0x11fe
>;
};
pinctrl_lpi2c1: lpi2c1grp {
fsl,pins = <
- MX93_PAD_I2C1_SCL__LPI2C1_SCL 0x40000b9e
- MX93_PAD_I2C1_SDA__LPI2C1_SDA 0x40000b9e
+ /* SION | OD | FSEL 3 | DSE X4 */
+ MX93_PAD_I2C1_SCL__LPI2C1_SCL 0x4000199e
+ MX93_PAD_I2C1_SDA__LPI2C1_SDA 0x4000199e
>;
};
pinctrl_pca9451: pca9451grp {
fsl,pins = <
- MX93_PAD_I2C2_SDA__GPIO1_IO03 0x1306
+ /* HYS | PU */
+ MX93_PAD_I2C2_SDA__GPIO1_IO03 0x1200
>;
};
pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
fsl,pins = <
- MX93_PAD_SD2_RESET_B__GPIO3_IO07 0x1306
+ /* FSEL 2 | DSE X2 */
+ MX93_PAD_SD2_RESET_B__GPIO3_IO07 0x106
>;
};
+ /* enable SION for data and cmd pad due to ERR052021 */
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
- /* HYS | PU | PD | FSEL_3 | X5 */
- MX93_PAD_SD1_CLK__USDHC1_CLK 0x17be
- MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x17be
- /* HYS | PU | FSEL_3 | X5 */
- MX93_PAD_SD1_CMD__USDHC1_CMD 0x13be
- /* HYS | PU | FSEL_3 | X4 */
- MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x139e
- MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x139e
- MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x139e
- MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x139e
- MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x139e
- MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x139e
- MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x139e
- MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x139e
+ /* PD | FSEL 3 | DSE X5 */
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x5be
+ /* HYS | FSEL 0 | no drive */
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x1000
+ /* HYS | FSEL 3 | X5 */
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x400011be
+ /* HYS | FSEL 3 | X4 */
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x4000119e
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x4000119e
+ MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x4000119e
+ MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x4000119e
+ MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x4000119e
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x4000119e
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x4000119e
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x4000119e
>;
};
pinctrl_wdog: wdoggrp {
fsl,pins = <
+ /* PU | FSEL 1 | DSE X4 */
MX93_PAD_WDOG_ANY__WDOG1_WDOG_ANY 0x31e
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
index a0993022c102..04b9b3d31f4f 100644
--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
@@ -69,6 +69,13 @@
enable-method = "psci";
#cooling-cells = <2>;
cpu-idle-states = <&cpu_pd_wait>;
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l0>;
};
A55_1: cpu@100 {
@@ -78,8 +85,43 @@
enable-method = "psci";
#cooling-cells = <2>;
cpu-idle-states = <&cpu_pd_wait>;
+ i-cache-size = <32768>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <128>;
+ d-cache-size = <32768>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l1>;
};
+ l2_cache_l0: l2-cache-l0 {
+ compatible = "cache";
+ cache-size = <65536>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l2_cache_l1: l2-cache-l1 {
+ compatible = "cache";
+ cache-size = <65536>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_cache>;
+ };
+
+ l3_cache: l3-cache {
+ compatible = "cache";
+ cache-size = <262144>;
+ cache-line-size = <64>;
+ cache-sets = <256>;
+ cache-level = <3>;
+ cache-unified;
+ };
};
osc_32k: clock-osc-32k {
@@ -425,6 +467,7 @@
clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
dmas = <&edma1 22 0 FSL_EDMA_RX>, <&edma1 21 0 0>;
dma-names = "rx", "tx";
+ #sound-dai-cells = <0>;
status = "disabled";
};
@@ -524,6 +567,7 @@
clock-names = "ipg_clk", "ipg_clk_app", "pll8k";
dmas = <&edma1 29 0 5>;
dma-names = "rx";
+ #sound-dai-cells = <0>;
status = "disabled";
};
@@ -846,6 +890,7 @@
clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
dmas = <&edma2 59 0 FSL_EDMA_RX>, <&edma2 58 0 0>;
dma-names = "rx", "tx";
+ #sound-dai-cells = <0>;
status = "disabled";
};
@@ -859,6 +904,7 @@
clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
dmas = <&edma2 61 0 FSL_EDMA_RX>, <&edma2 60 0 0>;
dma-names = "rx", "tx";
+ #sound-dai-cells = <0>;
status = "disabled";
};
@@ -878,6 +924,7 @@
clock-names = "ipg", "phy", "spba", "pll_ipg";
dmas = <&edma2 65 0 FSL_EDMA_RX>, <&edma2 66 0 0>;
dma-names = "rx", "tx";
+ #sound-dai-cells = <0>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts b/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
index d14a54ab4fd4..37a1d4ca1b20 100644
--- a/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
@@ -5,6 +5,7 @@
/dts-v1/;
+#include <dt-bindings/pwm/pwm.h>
#include "imx95.dtsi"
/ {
@@ -17,6 +18,11 @@
serial0 = &lpuart1;
};
+ bt_sco_codec: audio-codec-bt-sco {
+ #sound-dai-cells = <1>;
+ compatible = "linux,bt-sco";
+ };
+
chosen {
stdout-path = &lpuart1;
};
@@ -26,6 +32,13 @@
reg = <0x0 0x80000000 0 0x80000000>;
};
+ fan0: pwm-fan {
+ compatible = "pwm-fan";
+ #cooling-cells = <2>;
+ pwms = <&tpm6 2 4000000 PWM_POLARITY_INVERTED>;
+ cooling-levels = <64 128 192 255>;
+ };
+
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
@@ -40,6 +53,34 @@
};
};
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "+V3.3_SW";
+ };
+
+ reg_audio_pwr: regulator-audio-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-pwr";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&i2c4_gpio_expander_21 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ reg_audio_slot: regulator-audio-slot {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-wm8962";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&i2c4_gpio_expander_21 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ status = "disabled";
+ };
+
reg_m2_pwr: regulator-m2-pwr {
compatible = "regulator-fixed";
regulator-name = "M.2-power";
@@ -79,6 +120,116 @@
enable-active-high;
off-on-delay-us = <12000>;
};
+
+ sound-bt-sco {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "bt-sco-audio";
+ simple-audio-card,format = "dsp_a";
+ simple-audio-card,bitclock-inversion;
+ simple-audio-card,frame-master = <&btcpu>;
+ simple-audio-card,bitclock-master = <&btcpu>;
+
+ btcpu: simple-audio-card,cpu {
+ sound-dai = <&sai1>;
+ dai-tdm-slot-num = <2>;
+ dai-tdm-slot-width = <16>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&bt_sco_codec 1>;
+ };
+ };
+
+ sound-micfil {
+ compatible = "fsl,imx-audio-card";
+ model = "micfil-audio";
+
+ pri-dai-link {
+ link-name = "micfil hifi";
+ format = "i2s";
+ cpu {
+ sound-dai = <&micfil>;
+ };
+ };
+ };
+
+ sound-wm8962 {
+ compatible = "fsl,imx-audio-wm8962";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hp>;
+ model = "wm8962-audio";
+ audio-cpu = <&sai3>;
+ audio-codec = <&wm8962>;
+ hp-det-gpio = <&gpio2 11 GPIO_ACTIVE_HIGH>;
+ audio-routing = "Headphone Jack", "HPOUTL",
+ "Headphone Jack", "HPOUTR",
+ "Ext Spk", "SPKOUTL",
+ "Ext Spk", "SPKOUTR",
+ "AMIC", "MICBIAS",
+ "IN3R", "AMIC",
+ "IN1R", "AMIC";
+ };
+};
+
+&flexspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexspi1>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexspi1_reset>;
+ reset-gpios = <&gpio5 11 GPIO_ACTIVE_LOW>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ spi-max-frequency = <200000000>;
+ spi-tx-bus-width = <8>;
+ spi-rx-bus-width = <8>;
+ };
+};
+
+&lpi2c4 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c4>;
+ status = "okay";
+
+ wm8962: audio-codec@1a {
+ compatible = "wlf,wm8962";
+ reg = <0x1a>;
+ clocks = <&scmi_clk IMX95_CLK_SAI3>;
+ DCVDD-supply = <&reg_audio_pwr>;
+ DBVDD-supply = <&reg_audio_pwr>;
+ AVDD-supply = <&reg_audio_pwr>;
+ CPVDD-supply = <&reg_audio_pwr>;
+ MICVDD-supply = <&reg_audio_pwr>;
+ PLLVDD-supply = <&reg_audio_pwr>;
+ SPKVDD1-supply = <&reg_audio_pwr>;
+ SPKVDD2-supply = <&reg_audio_pwr>;
+ gpio-cfg = < 0x0000 /* 0:Default */
+ 0x0000 /* 1:Default */
+ 0x0000 /* 2:FN_DMICCLK */
+ 0x0000 /* 3:Default */
+ 0x0000 /* 4:FN_DMICCDAT */
+ 0x0000 /* 5:Default */
+ >;
+ };
+
+ i2c4_gpio_expander_21: gpio@21 {
+ compatible = "nxp,pcal6408";
+ reg = <0x21>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4_pcal6408>;
+ vcc-supply = <&reg_3p3v>;
+ };
};
&lpi2c7 {
@@ -108,6 +259,23 @@
status = "okay";
};
+&micfil {
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pdm>;
+ assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2>,
+ <&scmi_clk IMX95_CLK_PDM>;
+ assigned-clock-parents = <0>, <0>, <0>, <0>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>;
+ assigned-clock-rates = <3932160000>,
+ <3612672000>, <393216000>,
+ <361267200>, <49152000>;
+ status = "okay";
+};
+
&mu7 {
status = "okay";
};
@@ -128,6 +296,42 @@
status = "okay";
};
+&sai1 {
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai1>;
+ assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2>,
+ <&scmi_clk IMX95_CLK_SAI1>;
+ assigned-clock-parents = <0>, <0>, <0>, <0>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>;
+ assigned-clock-rates = <3932160000>,
+ <3612672000>, <393216000>,
+ <361267200>, <12288000>;
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
+&sai3 {
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai3>;
+ assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2>,
+ <&scmi_clk IMX95_CLK_SAI3>;
+ assigned-clock-parents = <0>, <0>, <0>, <0>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>;
+ assigned-clock-rates = <3932160000>,
+ <3612672000>, <393216000>,
+ <361267200>, <12288000>;
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
&usdhc1 {
pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
pinctrl-0 = <&pinctrl_usdhc1>;
@@ -159,12 +363,53 @@
};
&scmi_iomuxc {
+ pinctrl_flexspi1: flexspi1grp {
+ fsl,pins = <
+ IMX95_PAD_XSPI1_SS0_B__FLEXSPI1_A_SS0_B 0x3fe
+ IMX95_PAD_XSPI1_SCLK__FLEXSPI1_A_SCLK 0x3fe
+ IMX95_PAD_XSPI1_DQS__FLEXSPI1_A_DQS 0x3fe
+ IMX95_PAD_XSPI1_DATA0__FLEXSPI1_A_DATA_BIT0 0x3fe
+ IMX95_PAD_XSPI1_DATA1__FLEXSPI1_A_DATA_BIT1 0x3fe
+ IMX95_PAD_XSPI1_DATA2__FLEXSPI1_A_DATA_BIT2 0x3fe
+ IMX95_PAD_XSPI1_DATA3__FLEXSPI1_A_DATA_BIT3 0x3fe
+ IMX95_PAD_XSPI1_DATA4__FLEXSPI1_A_DATA_BIT4 0x3fe
+ IMX95_PAD_XSPI1_DATA5__FLEXSPI1_A_DATA_BIT5 0x3fe
+ IMX95_PAD_XSPI1_DATA6__FLEXSPI1_A_DATA_BIT6 0x3fe
+ IMX95_PAD_XSPI1_DATA7__FLEXSPI1_A_DATA_BIT7 0x3fe
+ >;
+ };
+
+ pinctrl_flexspi1_reset: flexspi1-reset-grp {
+ fsl,pins = <
+ IMX95_PAD_XSPI1_SS1_B__GPIO5_IO_BIT11 0x3fe
+ >;
+ };
+
+ pinctrl_hp: hpgrp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO11__GPIO2_IO_BIT11 0x31e
+ >;
+ };
+
+ pinctrl_i2c4_pcal6408: i2c4pcal6498grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO18__GPIO2_IO_BIT18 0x31e
+ >;
+ };
+
pinctrl_i2c7_pcal6524: i2c7pcal6524grp {
fsl,pins = <
IMX95_PAD_GPIO_IO36__GPIO5_IO_BIT16 0x31e
>;
};
+ pinctrl_lpi2c4: lpi2c4grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO30__LPI2C4_SDA 0x40000b9e
+ IMX95_PAD_GPIO_IO31__LPI2C4_SCL 0x40000b9e
+ >;
+ };
+
pinctrl_lpi2c7: lpi2c7grp {
fsl,pins = <
IMX95_PAD_GPIO_IO08__LPI2C7_SDA 0x40000b9e
@@ -184,6 +429,54 @@
>;
};
+ pinctrl_pdm: pdmgrp {
+ fsl,pins = <
+ IMX95_PAD_PDM_CLK__AONMIX_TOP_PDM_CLK 0x31e
+ IMX95_PAD_PDM_BIT_STREAM0__AONMIX_TOP_PDM_BIT_STREAM_BIT0 0x31e
+ >;
+ };
+
+ pinctrl_sai1: sai1grp {
+ fsl,pins = <
+ IMX95_PAD_SAI1_RXD0__AONMIX_TOP_SAI1_RX_DATA_BIT0 0x31e
+ IMX95_PAD_SAI1_TXC__AONMIX_TOP_SAI1_TX_BCLK 0x31e
+ IMX95_PAD_SAI1_TXFS__AONMIX_TOP_SAI1_TX_SYNC 0x31e
+ IMX95_PAD_SAI1_TXD0__AONMIX_TOP_SAI1_TX_DATA_BIT0 0x31e
+ >;
+ };
+
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ IMX95_PAD_ENET2_MDIO__NETCMIX_TOP_SAI2_RX_BCLK 0x31e
+ IMX95_PAD_ENET2_MDC__NETCMIX_TOP_SAI2_RX_SYNC 0x31e
+ IMX95_PAD_ENET2_TD3__NETCMIX_TOP_SAI2_RX_DATA_BIT0 0x31e
+ IMX95_PAD_ENET2_TD2__NETCMIX_TOP_SAI2_RX_DATA_BIT1 0x31e
+ IMX95_PAD_ENET2_TXC__NETCMIX_TOP_SAI2_TX_BCLK 0x31e
+ IMX95_PAD_ENET2_TX_CTL__NETCMIX_TOP_SAI2_TX_SYNC 0x31e
+ IMX95_PAD_ENET2_RX_CTL__NETCMIX_TOP_SAI2_TX_DATA_BIT0 0x31e
+ IMX95_PAD_ENET2_RXC__NETCMIX_TOP_SAI2_TX_DATA_BIT1 0x31e
+ IMX95_PAD_ENET2_RD0__NETCMIX_TOP_SAI2_TX_DATA_BIT2 0x31e
+ IMX95_PAD_ENET2_RD1__NETCMIX_TOP_SAI2_TX_DATA_BIT3 0x31e
+ IMX95_PAD_ENET2_RD2__NETCMIX_TOP_SAI2_MCLK 0x31e
+ >;
+ };
+
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO17__SAI3_MCLK 0x31e
+ IMX95_PAD_GPIO_IO16__SAI3_TX_BCLK 0x31e
+ IMX95_PAD_GPIO_IO26__SAI3_TX_SYNC 0x31e
+ IMX95_PAD_GPIO_IO20__SAI3_RX_DATA_BIT0 0x31e
+ IMX95_PAD_GPIO_IO21__SAI3_TX_DATA_BIT0 0x31e
+ >;
+ };
+
+ pinctrl_tpm6: tpm6grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO19__TPM6_CH2 0x51e
+ >;
+ };
+
pinctrl_uart1: uart1grp {
fsl,pins = <
IMX95_PAD_UART1_RXD__AONMIX_TOP_LPUART1_RX 0x31e
@@ -287,3 +580,50 @@
>;
};
};
+
+&thermal_zones {
+ a55-thermal {
+ trips {
+ atrip2: trip2 {
+ temperature = <55000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ atrip3: trip3 {
+ temperature = <65000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ atrip4: trip4 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map1 {
+ trip = <&atrip2>;
+ cooling-device = <&fan0 0 1>;
+ };
+
+ map2 {
+ trip = <&atrip3>;
+ cooling-device = <&fan0 1 2>;
+ };
+
+ map3 {
+ trip = <&atrip4>;
+ cooling-device = <&fan0 2 3>;
+ };
+ };
+ };
+};
+
+&tpm6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpm6>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
index 425272aa5a81..03661e76550f 100644
--- a/arch/arm64/boot/dts/freescale/imx95.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
@@ -3,6 +3,7 @@
* Copyright 2024 NXP
*/
+#include <dt-bindings/dma/fsl-edma.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -221,6 +222,13 @@
};
};
+ dummy: clock-dummy {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ clock-output-names = "dummy";
+ };
+
clk_ext1: clock-ext1 {
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -281,7 +289,7 @@
firmware {
scmi {
compatible = "arm,scmi";
- mboxes = <&mu2 5 0>, <&mu2 3 0>, <&mu2 3 1>;
+ mboxes = <&mu2 5 0>, <&mu2 3 0>, <&mu2 3 1>, <&mu2 5 1>;
shmem = <&scmi_buf0>, <&scmi_buf1>;
#address-cells = <1>;
#size-cells = <0>;
@@ -318,7 +326,7 @@
interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_HIGH)>;
};
- thermal-zones {
+ thermal_zones: thermal-zones {
a55-thermal {
polling-delay-passive = <250>;
polling-delay = <2000>;
@@ -405,6 +413,152 @@
#address-cells = <1>;
#size-cells = <1>;
+ edma2: dma-controller@42000000 {
+ compatible = "fsl,imx95-edma5";
+ reg = <0x42000000 0x210000>;
+ #dma-cells = <3>;
+ dma-channels = <64>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "dma";
+ };
+
+ edma3: dma-controller@42210000 {
+ compatible = "fsl,imx95-edma5";
+ reg = <0x42210000 0x210000>;
+ #dma-cells = <3>;
+ dma-channels = <64>;
+ interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 276 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 276 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 285 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 285 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>;
+ clock-names = "dma";
+ };
+
mu7: mailbox@42430000 {
compatible = "fsl,imx95-mu";
reg = <0x42430000 0x10000>;
@@ -464,6 +618,8 @@
clock-names = "per", "ipg";
#address-cells = <1>;
#size-cells = <0>;
+ dmas = <&edma2 8 0 0>, <&edma2 9 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -476,6 +632,8 @@
clock-names = "per", "ipg";
#address-cells = <1>;
#size-cells = <0>;
+ dmas = <&edma2 10 0 0>, <&edma2 11 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -488,6 +646,8 @@
clocks = <&scmi_clk IMX95_CLK_LPSPI3>,
<&scmi_clk IMX95_CLK_BUSWAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 12 0 0>, <&edma2 13 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -500,6 +660,8 @@
clocks = <&scmi_clk IMX95_CLK_LPSPI4>,
<&scmi_clk IMX95_CLK_BUSWAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 14 0 0>, <&edma2 15 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -510,6 +672,8 @@
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&scmi_clk IMX95_CLK_LPUART3>;
clock-names = "ipg";
+ dmas = <&edma2 18 0 FSL_EDMA_RX>, <&edma2 17 0 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -520,6 +684,8 @@
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&scmi_clk IMX95_CLK_LPUART4>;
clock-names = "ipg";
+ dmas = <&edma2 20 0 FSL_EDMA_RX>, <&edma2 19 0 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -530,6 +696,8 @@
interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&scmi_clk IMX95_CLK_LPUART5>;
clock-names = "ipg";
+ dmas = <&edma2 22 0 FSL_EDMA_RX>, <&edma2 21 0 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -540,6 +708,110 @@
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&scmi_clk IMX95_CLK_LPUART6>;
clock-names = "ipg";
+ dmas = <&edma2 24 0 FSL_EDMA_RX>, <&edma2 23 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ flexcan2: can@425b0000 {
+ compatible = "fsl,imx95-flexcan";
+ reg = <0x425b0000 0x10000>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX95_CLK_CAN2>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&scmi_clk IMX95_CLK_CAN2>;
+ assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
+ assigned-clock-rates = <40000000>;
+ fsl,clk-source = /bits/ 8 <0>;
+ status = "disabled";
+ };
+
+ flexcan3: can@42600000 {
+ compatible = "fsl,imx95-flexcan";
+ reg = <0x42600000 0x10000>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX95_CLK_CAN3>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&scmi_clk IMX95_CLK_CAN3>;
+ assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
+ assigned-clock-rates = <40000000>;
+ fsl,clk-source = /bits/ 8 <0>;
+ status = "disabled";
+ };
+
+ flexspi1: spi@425e0000 {
+ compatible = "nxp,imx8mm-fspi";
+ reg = <0x425e0000 0x10000>, <0x28000000 0x8000000>;
+ reg-names = "fspi_base", "fspi_mmap";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_FLEXSPI1>,
+ <&scmi_clk IMX95_CLK_FLEXSPI1>;
+ clock-names = "fspi_en", "fspi";
+ assigned-clocks = <&scmi_clk IMX95_CLK_FLEXSPI1>;
+ assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1>;
+ assigned-clock-rates = <200000000>;
+ status = "disabled";
+ };
+
+ sai3: sai@42650000 {
+ compatible = "fsl,imx95-sai";
+ reg = <0x42650000 0x10000>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>, <&dummy>,
+ <&scmi_clk IMX95_CLK_SAI3>, <&dummy>,
+ <&dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dmas = <&edma2 61 0 FSL_EDMA_RX>, <&edma2 60 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ sai4: sai@42660000 {
+ compatible = "fsl,imx95-sai";
+ reg = <0x42660000 0x10000>;
+ interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>, <&dummy>,
+ <&scmi_clk IMX95_CLK_SAI4>, <&dummy>,
+ <&dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dmas = <&edma2 68 0 FSL_EDMA_RX>, <&edma2 67 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ sai5: sai@42670000 {
+ compatible = "fsl,imx95-sai";
+ reg = <0x42670000 0x10000>;
+ interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>, <&dummy>,
+ <&scmi_clk IMX95_CLK_SAI5>, <&dummy>,
+ <&dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dmas = <&edma2 70 0 FSL_EDMA_RX>, <&edma2 69 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ xcvr: xcvr@42680000 {
+ compatible = "fsl,imx95-xcvr";
+ reg = <0x42680000 0x800>, <0x42680800 0x400>,
+ <0x42680c00 0x080>, <0x42680e00 0x080>;
+ reg-names = "ram", "regs", "rxfifo", "txfifo";
+ interrupts = /* XCVR IRQ 0 */
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ /* XCVR IRQ 1 */
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX95_CLK_SPDIF>,
+ <&dummy>,
+ <&scmi_clk IMX95_CLK_AUDIOXCVR>;
+ clock-names = "ipg", "phy", "spba", "pll_ipg";
+ dmas = <&edma2 65 0 1>, <&edma2 66 0 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -550,6 +822,8 @@
interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&scmi_clk IMX95_CLK_LPUART7>;
clock-names = "ipg";
+ dmas = <&edma2 26 0 FSL_EDMA_RX>, <&edma2 25 0 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -560,6 +834,8 @@
interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&scmi_clk IMX95_CLK_LPUART8>;
clock-names = "ipg";
+ dmas = <&edma2 28 0 FSL_EDMA_RX>, <&edma2 27 0 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -572,6 +848,8 @@
clock-names = "per", "ipg";
#address-cells = <1>;
#size-cells = <0>;
+ dmas = <&edma2 71 0 0>, <&edma2 72 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -584,6 +862,8 @@
clock-names = "per", "ipg";
#address-cells = <1>;
#size-cells = <0>;
+ dmas = <&edma2 73 0 0>, <&edma2 74 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -596,6 +876,8 @@
clock-names = "per", "ipg";
#address-cells = <1>;
#size-cells = <0>;
+ dmas = <&edma2 75 0 0>, <&edma2 76 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -608,6 +890,8 @@
clock-names = "per", "ipg";
#address-cells = <1>;
#size-cells = <0>;
+ dmas = <&edma2 77 0 0>, <&edma2 78 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -620,6 +904,8 @@
clocks = <&scmi_clk IMX95_CLK_LPSPI5>,
<&scmi_clk IMX95_CLK_BUSWAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 79 0 0>, <&edma2 80 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -632,6 +918,8 @@
clocks = <&scmi_clk IMX95_CLK_LPSPI6>,
<&scmi_clk IMX95_CLK_BUSWAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 81 0 0>, <&edma2 82 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -644,6 +932,8 @@
clocks = <&scmi_clk IMX95_CLK_LPSPI7>,
<&scmi_clk IMX95_CLK_BUSWAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 83 0 0>, <&edma2 84 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -656,6 +946,8 @@
clocks = <&scmi_clk IMX95_CLK_LPSPI8>,
<&scmi_clk IMX95_CLK_BUSWAKEUP>;
clock-names = "per", "ipg";
+ dmas = <&edma2 85 0 0>, <&edma2 86 0 FSL_EDMA_RX>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -667,6 +959,34 @@
#mbox-cells = <2>;
status = "disabled";
};
+
+ flexcan4: can@427c0000 {
+ compatible = "fsl,imx95-flexcan";
+ reg = <0x427c0000 0x10000>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX95_CLK_CAN4>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&scmi_clk IMX95_CLK_CAN4>;
+ assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
+ assigned-clock-rates = <40000000>;
+ fsl,clk-source = /bits/ 8 <0>;
+ status = "disabled";
+ };
+
+ flexcan5: can@427d0000 {
+ compatible = "fsl,imx95-flexcan";
+ reg = <0x427d0000 0x10000>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX95_CLK_CAN5>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&scmi_clk IMX95_CLK_CAN5>;
+ assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
+ assigned-clock-rates = <40000000>;
+ fsl,clk-source = /bits/ 8 <0>;
+ status = "disabled";
+ };
};
aips3: bus@42800000 {
@@ -796,6 +1116,46 @@
#address-cells = <1>;
#size-cells = <1>;
+ edma1: dma-controller@44000000 {
+ compatible = "fsl,imx93-edma3";
+ reg = <0x44000000 0x200000>;
+ #dma-cells = <3>;
+ dma-channels = <31>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSAON>;
+ clock-names = "dma";
+ };
+
mu1: mailbox@44220000 {
compatible = "fsl,imx95-mu";
reg = <0x44220000 0x10000>;
@@ -830,6 +1190,8 @@
clock-names = "per", "ipg";
#address-cells = <1>;
#size-cells = <0>;
+ dmas = <&edma1 12 0 0>, <&edma1 13 0 FSL_EDMA_RX> ;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -842,6 +1204,8 @@
clock-names = "per", "ipg";
#address-cells = <1>;
#size-cells = <0>;
+ dmas = <&edma1 14 0 0>, <&edma1 15 0 FSL_EDMA_RX> ;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -854,6 +1218,8 @@
clocks = <&scmi_clk IMX95_CLK_LPSPI1>,
<&scmi_clk IMX95_CLK_BUSAON>;
clock-names = "per", "ipg";
+ dmas = <&edma1 16 0 FSL_EDMA_RX>, <&edma1 17 0 0> ;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -866,6 +1232,8 @@
clocks = <&scmi_clk IMX95_CLK_LPSPI2>,
<&scmi_clk IMX95_CLK_BUSAON>;
clock-names = "per", "ipg";
+ dmas = <&edma1 18 0 FSL_EDMA_RX>, <&edma1 19 0 0> ;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -876,6 +1244,8 @@
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&scmi_clk IMX95_CLK_LPUART1>;
clock-names = "ipg";
+ dmas = <&edma1 21 0 FSL_EDMA_RX>, <&edma1 20 0 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -886,6 +1256,54 @@
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&scmi_clk IMX95_CLK_LPUART2>;
clock-names = "ipg";
+ dmas = <&edma1 23 0 FSL_EDMA_RX>, <&edma1 22 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ flexcan1: can@443a0000 {
+ compatible = "fsl,imx95-flexcan";
+ reg = <0x443a0000 0x10000>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSAON>,
+ <&scmi_clk IMX95_CLK_CAN1>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&scmi_clk IMX95_CLK_CAN1>;
+ assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
+ assigned-clock-rates = <40000000>;
+ fsl,clk-source = /bits/ 8 <0>;
+ status = "disabled";
+ };
+
+ sai1: sai@443b0000 {
+ compatible = "fsl,imx95-sai";
+ reg = <0x443b0000 0x10000>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSAON>, <&dummy>,
+ <&scmi_clk IMX95_CLK_SAI1>, <&dummy>,
+ <&dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ dmas = <&edma1 25 0 FSL_EDMA_RX>, <&edma1 24 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ micfil: micfil@44520000 {
+ compatible = "fsl,imx95-micfil", "fsl,imx93-micfil";
+ reg = <0x44520000 0x10000>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSAON>,
+ <&scmi_clk IMX95_CLK_PDM>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2>,
+ <&dummy>;
+ clock-names = "ipg_clk", "ipg_clk_app",
+ "pll8k", "pll11k", "clkext3";
+ dmas = <&edma1 6 0 5>;
+ dma-names = "rx";
status = "disabled";
};
@@ -1188,5 +1606,37 @@
power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
status = "disabled";
};
+
+ netcmix_blk_ctrl: syscon@4c810000 {
+ compatible = "nxp,imx95-netcmix-blk-ctrl", "syscon";
+ reg = <0x0 0x4c810000 0x0 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&scmi_clk IMX95_CLK_BUSNETCMIX>;
+ assigned-clocks = <&scmi_clk IMX95_CLK_BUSNETCMIX>;
+ assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
+ assigned-clock-rates = <133333333>;
+ power-domains = <&scmi_devpd IMX95_PD_NETC>;
+ status = "disabled";
+ };
+
+ sai2: sai@4c880000 {
+ compatible = "fsl,imx95-sai";
+ reg = <0x0 0x4c880000 0x0 0x10000>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk IMX95_CLK_BUSNETCMIX>, <&dummy>,
+ <&scmi_clk IMX95_CLK_SAI2>, <&dummy>,
+ <&dummy>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3";
+ power-domains = <&scmi_devpd IMX95_PD_NETC>;
+ dmas = <&edma2 59 0 FSL_EDMA_RX>, <&edma2 58 0 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ ddr-pmu@4e090dc0 {
+ compatible = "fsl,imx95-ddr-pmu", "fsl,imx93-ddr-pmu";
+ reg = <0x0 0x4e090dc0 0x0 0x200>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/mba8mx.dtsi b/arch/arm64/boot/dts/freescale/mba8mx.dtsi
index 815241526a0d..c60c7a9e54af 100644
--- a/arch/arm64/boot/dts/freescale/mba8mx.dtsi
+++ b/arch/arm64/boot/dts/freescale/mba8mx.dtsi
@@ -185,6 +185,8 @@
reset-gpios = <&expander2 7 GPIO_ACTIVE_LOW>;
reset-assert-us = <500000>;
reset-deassert-us = <500>;
+ interrupt-parent = <&expander2>;
+ interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
};
};
};
@@ -237,7 +239,6 @@
};
&i2c2 {
- clock-frequency = <100000>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
pinctrl-1 = <&pinctrl_i2c2_gpio>;
@@ -258,6 +259,11 @@
reg = <0x1f>;
};
+ /*
+ * TUSB8041 is at 0x41, but not connected by default
+ * Note: TUSB8041 only supports 100 kHz!
+ */
+
eeprom3: eeprom@57 {
compatible = "nxp,se97b", "atmel,24c02";
reg = <0x57>;
@@ -274,7 +280,6 @@
};
&i2c3 {
- clock-frequency = <100000>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c3>;
pinctrl-1 = <&pinctrl_i2c3_gpio>;
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
index 65f7b5a50eb5..1b2b20c6126d 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
@@ -27,6 +27,7 @@ fman@1a00000 {
reg = <0xf0000 0x1000>;
fsl,fman-ports = <&fman0_rx_0x10 &fman0_tx_0x30>;
pcsphy-handle = <&pcsphy6>;
+ pcs-handle = <&pcsphy6>;
};
mdio@f1000 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
index 3f70482c98c3..55d78f6f7c6c 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
@@ -27,6 +27,7 @@ fman@1a00000 {
reg = <0xf2000 0x1000>;
fsl,fman-ports = <&fman0_rx_0x11 &fman0_tx_0x31>;
pcsphy-handle = <&pcsphy7>;
+ pcs-handle = <&pcsphy7>;
};
mdio@f3000 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
index 78841c1f3252..18916a860c2e 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
@@ -26,6 +26,7 @@ fman@1a00000 {
fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>;
ptp-timer = <&ptp_timer0>;
pcsphy-handle = <&pcsphy0>;
+ pcs-handle = <&pcsphy0>;
};
mdio@e1000 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
index 1f43fa666222..e90af445a293 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
@@ -26,6 +26,7 @@ fman@1a00000 {
fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>;
ptp-timer = <&ptp_timer0>;
pcsphy-handle = <&pcsphy1>;
+ pcs-handle = <&pcsphy1>;
};
mdio@e3000 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
index de0aa017701d..fec93905bc81 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
@@ -26,6 +26,7 @@ fman@1a00000 {
fsl,fman-ports = <&fman0_rx_0x0a &fman0_tx_0x2a>;
ptp-timer = <&ptp_timer0>;
pcsphy-handle = <&pcsphy2>;
+ pcs-handle = <&pcsphy2>;
};
mdio@e5000 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
index 6904aa5d8e54..2aa953faa62b 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
@@ -26,6 +26,7 @@ fman@1a00000 {
fsl,fman-ports = <&fman0_rx_0x0b &fman0_tx_0x2b>;
ptp-timer = <&ptp_timer0>;
pcsphy-handle = <&pcsphy3>;
+ pcs-handle = <&pcsphy3>;
};
mdio@e7000 {
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
index a3d29d470297..948e39411415 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
@@ -26,6 +26,7 @@ fman@1a00000 {
fsl,fman-ports = <&fman0_rx_0x0c &fman0_tx_0x2c>;
ptp-timer = <&ptp_timer0>;
pcsphy-handle = <&pcsphy4>;
+ pcs-handle = <&pcsphy4>;
};
mdio@e9000 {
diff --git a/arch/arm64/boot/dts/freescale/s32g2.dtsi b/arch/arm64/boot/dts/freescale/s32g2.dtsi
index fc19ae2e8d3b..fa054bfe7d5c 100644
--- a/arch/arm64/boot/dts/freescale/s32g2.dtsi
+++ b/arch/arm64/boot/dts/freescale/s32g2.dtsi
@@ -114,6 +114,56 @@
#size-cells = <1>;
ranges = <0 0 0 0x80000000>;
+ pinctrl: pinctrl@4009c240 {
+ compatible = "nxp,s32g2-siul2-pinctrl";
+ /* MSCR0-MSCR101 registers on siul2_0 */
+ reg = <0x4009c240 0x198>,
+ /* MSCR112-MSCR122 registers on siul2_1 */
+ <0x44010400 0x2c>,
+ /* MSCR144-MSCR190 registers on siul2_1 */
+ <0x44010480 0xbc>,
+ /* IMCR0-IMCR83 registers on siul2_0 */
+ <0x4009ca40 0x150>,
+ /* IMCR119-IMCR397 registers on siul2_1 */
+ <0x44010c1c 0x45c>,
+ /* IMCR430-IMCR495 registers on siul2_1 */
+ <0x440110f8 0x108>;
+
+ jtag_pins: jtag-pins {
+ jtag-grp0 {
+ pinmux = <0x0>;
+ input-enable;
+ bias-pull-up;
+ slew-rate = <166>;
+ };
+
+ jtag-grp1 {
+ pinmux = <0x11>;
+ slew-rate = <166>;
+ };
+
+ jtag-grp2 {
+ pinmux = <0x40>;
+ input-enable;
+ bias-pull-down;
+ slew-rate = <166>;
+ };
+
+ jtag-grp3 {
+ pinmux = <0x23c0>,
+ <0x23d0>,
+ <0x2320>;
+ };
+
+ jtag-grp4 {
+ pinmux = <0x51>;
+ input-enable;
+ bias-pull-up;
+ slew-rate = <166>;
+ };
+ };
+ };
+
uart0: serial@401c8000 {
compatible = "nxp,s32g2-linflexuart",
"fsl,s32v234-linflexuart";
diff --git a/arch/arm64/boot/dts/freescale/s32g274a-evb.dts b/arch/arm64/boot/dts/freescale/s32g274a-evb.dts
index 00070c949e2a..dbe498798bd9 100644
--- a/arch/arm64/boot/dts/freescale/s32g274a-evb.dts
+++ b/arch/arm64/boot/dts/freescale/s32g274a-evb.dts
@@ -34,5 +34,6 @@
};
&usdhc0 {
+ disable-wp;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts b/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts
index b3fc12899cae..ab1e5caaeae7 100644
--- a/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts
+++ b/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts
@@ -40,5 +40,6 @@
};
&usdhc0 {
+ disable-wp;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/s32g3.dtsi b/arch/arm64/boot/dts/freescale/s32g3.dtsi
index c1b08992754b..b4226a9143c8 100644
--- a/arch/arm64/boot/dts/freescale/s32g3.dtsi
+++ b/arch/arm64/boot/dts/freescale/s32g3.dtsi
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
- * Copyright 2021-2023 NXP
+ * Copyright 2021-2024 NXP
*
* Authors: Ghennadi Procopciuc <ghennadi.procopciuc@nxp.com>
* Ciprian Costea <ciprianmarian.costea@nxp.com>
@@ -171,6 +171,56 @@
#size-cells = <1>;
ranges = <0 0 0 0x80000000>;
+ pinctrl: pinctrl@4009c240 {
+ compatible = "nxp,s32g2-siul2-pinctrl";
+ /* MSCR0-MSCR101 registers on siul2_0 */
+ reg = <0x4009c240 0x198>,
+ /* MSCR112-MSCR122 registers on siul2_1 */
+ <0x44010400 0x2c>,
+ /* MSCR144-MSCR190 registers on siul2_1 */
+ <0x44010480 0xbc>,
+ /* IMCR0-IMCR83 registers on siul2_0 */
+ <0x4009ca40 0x150>,
+ /* IMCR119-IMCR397 registers on siul2_1 */
+ <0x44010c1c 0x45c>,
+ /* IMCR430-IMCR495 registers on siul2_1 */
+ <0x440110f8 0x108>;
+
+ jtag_pins: jtag-pins {
+ jtag-grp0 {
+ pinmux = <0x0>;
+ input-enable;
+ bias-pull-up;
+ slew-rate = <166>;
+ };
+
+ jtag-grp1 {
+ pinmux = <0x11>;
+ slew-rate = <166>;
+ };
+
+ jtag-grp2 {
+ pinmux = <0x40>;
+ input-enable;
+ bias-pull-down;
+ slew-rate = <166>;
+ };
+
+ jtag-grp3 {
+ pinmux = <0x23c0>,
+ <0x23d0>,
+ <0x2320>;
+ };
+
+ jtag-grp4 {
+ pinmux = <0x51>;
+ input-enable;
+ bias-pull-up;
+ slew-rate = <166>;
+ };
+ };
+ };
+
uart0: serial@401c8000 {
compatible = "nxp,s32g3-linflexuart",
"fsl,s32v234-linflexuart";
diff --git a/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts b/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts
index 9d674819876e..176e5af191c8 100644
--- a/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts
+++ b/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
- * Copyright 2021-2023 NXP
+ * Copyright 2021-2024 NXP
*
* NXP S32G3 Reference Design Board 3 (S32G-VNP-RDB3)
*/
@@ -41,5 +41,6 @@
&usdhc0 {
bus-width = <8>;
+ disable-wp;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/s32v234.dtsi b/arch/arm64/boot/dts/freescale/s32v234.dtsi
index 42409ec56792..bf608ded5dda 100644
--- a/arch/arm64/boot/dts/freescale/s32v234.dtsi
+++ b/arch/arm64/boot/dts/freescale/s32v234.dtsi
@@ -89,7 +89,7 @@
};
gic: interrupt-controller@7d001000 {
- compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ compatible = "arm,cortex-a15-gic";
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
diff --git a/arch/arm64/boot/dts/mediatek/mt6357.dtsi b/arch/arm64/boot/dts/mediatek/mt6357.dtsi
index 3330a03c2f74..5fafa842d312 100644
--- a/arch/arm64/boot/dts/mediatek/mt6357.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt6357.dtsi
@@ -10,6 +10,11 @@
mt6357_pmic: pmic {
compatible = "mediatek,mt6357";
+ pmic_adc: adc {
+ compatible = "mediatek,mt6357-auxadc";
+ #io-channel-cells = <1>;
+ };
+
regulators {
mt6357_vproc_reg: buck-vproc {
regulator-name = "vproc";
diff --git a/arch/arm64/boot/dts/mediatek/mt6358.dtsi b/arch/arm64/boot/dts/mediatek/mt6358.dtsi
index a1b96013f814..641d452fbc08 100644
--- a/arch/arm64/boot/dts/mediatek/mt6358.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt6358.dtsi
@@ -10,6 +10,11 @@
interrupt-controller;
#interrupt-cells = <2>;
+ pmic_adc: adc {
+ compatible = "mediatek,mt6358-auxadc";
+ #io-channel-cells = <1>;
+ };
+
mt6358codec: mt6358codec {
compatible = "mediatek,mt6358-sound";
mediatek,dmic-mode = <0>; /* two-wires */
diff --git a/arch/arm64/boot/dts/mediatek/mt6359.dtsi b/arch/arm64/boot/dts/mediatek/mt6359.dtsi
index df3e822232d3..8e1b8c85c6ed 100644
--- a/arch/arm64/boot/dts/mediatek/mt6359.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt6359.dtsi
@@ -9,6 +9,11 @@
interrupt-controller;
#interrupt-cells = <2>;
+ pmic_adc: adc {
+ compatible = "mediatek,mt6359-auxadc";
+ #io-channel-cells = <1>;
+ };
+
mt6359codec: mt6359codec {
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7981b.dtsi b/arch/arm64/boot/dts/mediatek/mt7981b.dtsi
index 64aeeb24efac..b096009ef99c 100644
--- a/arch/arm64/boot/dts/mediatek/mt7981b.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7981b.dtsi
@@ -109,6 +109,48 @@
status = "disabled";
};
+ spi@11009000 {
+ compatible = "mediatek,mt7981-spi-ipm", "mediatek,spi-ipm";
+ reg = <0 0x11009000 0 0x1000>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topckgen CLK_TOP_CB_M_D2>,
+ <&topckgen CLK_TOP_SPI_SEL>,
+ <&infracfg CLK_INFRA_SPI2_CK>,
+ <&infracfg CLK_INFRA_SPI2_HCK_CK>;
+ clock-names = "parent-clk", "sel-clk", "spi-clk", "hclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi@1100a000 {
+ compatible = "mediatek,mt7981-spi-ipm", "mediatek,spi-ipm";
+ reg = <0 0x1100a000 0 0x1000>;
+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topckgen CLK_TOP_CB_M_D2>,
+ <&topckgen CLK_TOP_SPI_SEL>,
+ <&infracfg CLK_INFRA_SPI0_CK>,
+ <&infracfg CLK_INFRA_SPI0_HCK_CK>;
+ clock-names = "parent-clk", "sel-clk", "spi-clk", "hclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi@1100b000 {
+ compatible = "mediatek,mt7981-spi-ipm", "mediatek,spi-ipm";
+ reg = <0 0x1100b000 0 0x1000>;
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topckgen CLK_TOP_CB_M_D2>,
+ <&topckgen CLK_TOP_SPI_SEL>,
+ <&infracfg CLK_INFRA_SPI1_CK>,
+ <&infracfg CLK_INFRA_SPI1_HCK_CK>;
+ clock-names = "parent-clk", "sel-clk", "spi-clk", "hclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
pio: pinctrl@11d00000 {
compatible = "mediatek,mt7981-pinctrl";
reg = <0 0x11d00000 0 0x1000>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
index fa4ab4d2899f..783c333107bc 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
@@ -91,16 +91,11 @@
&dsi0 {
status = "okay";
- /delete-property/#size-cells;
- /delete-property/#address-cells;
/delete-node/panel@0;
- ports {
- port {
- dsi_out: endpoint {
- remote-endpoint = <&anx7625_in>;
- };
- };
- };
+};
+
+&dsi_out {
+ remote-endpoint = <&anx7625_in>;
};
&i2c0 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index 6345e969efae..22924f61ec9e 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -24,7 +24,7 @@
backlight_lcd0: backlight_lcd0 {
compatible = "pwm-backlight";
pwms = <&pwm0 0 500000>;
- power-supply = <&bl_pp5000>;
+ power-supply = <&reg_vsys>;
enable-gpios = <&pio 176 0>;
brightness-levels = <0 1023>;
num-interpolated-steps = <1023>;
@@ -47,10 +47,9 @@
it6505_pp18_reg: regulator0 {
compatible = "regulator-fixed";
regulator-name = "it6505_pp18";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
gpio = <&pio 178 0>;
enable-active-high;
+ vin-supply = <&pp1800_alw>;
};
lcd_pp3300: regulator1 {
@@ -62,27 +61,16 @@
regulator-boot-on;
};
- bl_pp5000: regulator2 {
- compatible = "regulator-fixed";
- regulator-name = "bl_pp5000";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
mmc1_fixed_power: regulator3 {
compatible = "regulator-fixed";
regulator-name = "mmc1_power";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
+ vin-supply = <&pp3300_alw>;
};
mmc1_fixed_io: regulator4 {
compatible = "regulator-fixed";
regulator-name = "mmc1_io";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+ vin-supply = <&pp1800_alw>;
};
pp1800_alw: regulator5 {
@@ -92,6 +80,7 @@
regulator-boot-on;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ vin-supply = <&reg_vsys>;
};
pp3300_alw: regulator6 {
@@ -101,6 +90,7 @@
regulator-boot-on;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ vin-supply = <&reg_vsys>;
};
/* system wide semi-regulated power rail from charger */
@@ -868,10 +858,6 @@
domain-supply = <&mt6358_vgpu_reg>;
};
-&soc_data {
- status = "okay";
-};
-
&spi0 {
pinctrl-names = "default";
pinctrl-0 = <&spi0_pins>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index fbf145639b8c..266441e999f2 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -872,8 +872,6 @@
mfg_async: power-domain@MT8183_POWER_DOMAIN_MFG_ASYNC {
reg = <MT8183_POWER_DOMAIN_MFG_ASYNC>;
- clocks = <&topckgen CLK_TOP_MUX_MFG>;
- clock-names = "mfg";
#address-cells = <1>;
#size-cells = <0>;
#power-domain-cells = <1>;
@@ -1838,6 +1836,17 @@
phy-names = "dphy";
};
+ dpi0: dpi@14015000 {
+ compatible = "mediatek,mt8183-dpi";
+ reg = <0 0x14015000 0 0x1000>;
+ interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ clocks = <&mmsys CLK_MM_DPI_IF>,
+ <&mmsys CLK_MM_DPI_MM>,
+ <&apmixedsys CLK_APMIXED_TVDPLL>;
+ clock-names = "pixel", "engine", "pll";
+ };
+
mutex: mutex@14016000 {
compatible = "mediatek,mt8183-disp-mutex";
reg = <0 0x14016000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
index afdab5724eaa..682c6ad2574d 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
@@ -169,7 +169,7 @@
adsp_mem: memory@60000000 {
compatible = "shared-dma-pool";
- reg = <0 0x60000000 0 0xA00000>;
+ reg = <0 0x60000000 0 0x1000000>;
no-map;
};
@@ -353,7 +353,8 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&dpi_pins_default>;
pinctrl-1 = <&dpi_pins_sleep>;
- status = "okay";
+ /* TODO Re-enable after DP to Type-C port muxing can be described */
+ status = "disabled";
};
&dpi_out {
diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
index 4763ed5dc86c..148c332018b0 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
@@ -13,6 +13,8 @@
#include <dt-bindings/power/mt8186-power.h>
#include <dt-bindings/phy/phy.h>
#include <dt-bindings/reset/mt8186-resets.h>
+#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/thermal/mediatek,lvts-thermal.h>
/ {
compatible = "mediatek,mt8186";
@@ -731,7 +733,7 @@
opp-900000000-3 {
opp-hz = /bits/ 64 <900000000>;
opp-microvolt = <850000>;
- opp-supported-hw = <0x8>;
+ opp-supported-hw = <0xcf>;
};
opp-900000000-4 {
@@ -743,13 +745,13 @@
opp-900000000-5 {
opp-hz = /bits/ 64 <900000000>;
opp-microvolt = <825000>;
- opp-supported-hw = <0x30>;
+ opp-supported-hw = <0x20>;
};
opp-950000000-3 {
opp-hz = /bits/ 64 <950000000>;
opp-microvolt = <900000>;
- opp-supported-hw = <0x8>;
+ opp-supported-hw = <0xcf>;
};
opp-950000000-4 {
@@ -761,13 +763,13 @@
opp-950000000-5 {
opp-hz = /bits/ 64 <950000000>;
opp-microvolt = <850000>;
- opp-supported-hw = <0x30>;
+ opp-supported-hw = <0x20>;
};
opp-1000000000-3 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <950000>;
- opp-supported-hw = <0x8>;
+ opp-supported-hw = <0xcf>;
};
opp-1000000000-4 {
@@ -779,7 +781,7 @@
opp-1000000000-5 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <875000>;
- opp-supported-hw = <0x30>;
+ opp-supported-hw = <0x20>;
};
};
@@ -1361,6 +1363,29 @@
status = "disabled";
};
+ lvts: thermal-sensor@1100b000 {
+ compatible = "mediatek,mt8186-lvts";
+ reg = <0 0x1100b000 0 0x1000>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&infracfg_ao CLK_INFRA_AO_THERM>;
+ resets = <&infracfg_ao MT8186_INFRA_THERMAL_CTRL_RST>;
+ nvmem-cells = <&lvts_efuse_data1 &lvts_efuse_data2>;
+ nvmem-cell-names = "lvts-calib-data-1", "lvts-calib-data-2";
+ #thermal-sensor-cells = <1>;
+ };
+
+ svs: svs@1100bc00 {
+ compatible = "mediatek,mt8186-svs";
+ reg = <0 0x1100bc00 0 0x400>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&infracfg_ao CLK_INFRA_AO_THERM>;
+ clock-names = "main";
+ nvmem-cells = <&svs_calibration>, <&lvts_efuse_data1>;
+ nvmem-cell-names = "svs-calibration-data", "t-calibration-data";
+ resets = <&infracfg_ao MT8186_INFRA_PTP_CTRL_RST>;
+ reset-names = "svs_rst";
+ };
+
pwm0: pwm@1100e000 {
compatible = "mediatek,mt8186-disp-pwm", "mediatek,mt8183-disp-pwm";
reg = <0 0x1100e000 0 0x1000>;
@@ -1676,6 +1701,18 @@
#address-cells = <1>;
#size-cells = <1>;
+ lvts_efuse_data1: lvts1-calib@1cc {
+ reg = <0x1cc 0x14>;
+ };
+
+ lvts_efuse_data2: lvts2-calib@2f8 {
+ reg = <0x2f8 0x14>;
+ };
+
+ svs_calibration: calib@550 {
+ reg = <0x550 0x50>;
+ };
+
gpu_speedbin: gpu-speedbin@59c {
reg = <0x59c 0x4>;
bits = <0 3>;
@@ -1824,6 +1861,7 @@
assigned-clocks = <&topckgen CLK_TOP_DPI>;
assigned-clock-parents = <&topckgen CLK_TOP_TVDPLL_D2>;
interrupts = <GIC_SPI 309 IRQ_TYPE_LEVEL_LOW 0>;
+ power-domains = <&spm MT8186_POWER_DOMAIN_DIS>;
status = "disabled";
port {
@@ -2178,4 +2216,299 @@
power-domains = <&spm MT8186_POWER_DOMAIN_IPE>;
};
};
+
+ thermal_zones: thermal-zones {
+ cpu-little0-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <150>;
+ thermal-sensors = <&lvts MT8186_LITTLE_CPU0>;
+
+ trips {
+ cpu_little0_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_little0_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_little0_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_little0_alert0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-little1-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <150>;
+ thermal-sensors = <&lvts MT8186_LITTLE_CPU1>;
+
+ trips {
+ cpu_little1_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_little1_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_little1_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_little1_alert0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-little2-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <150>;
+ thermal-sensors = <&lvts MT8186_LITTLE_CPU2>;
+
+ trips {
+ cpu_little2_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_little2_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_little2_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_little2_alert0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cam-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&lvts MT8186_CAM>;
+
+ trips {
+ cam_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cam_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cam_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ nna-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&lvts MT8186_NNA>;
+
+ trips {
+ nna_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ nna_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ nna_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ adsp-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&lvts MT8186_ADSP>;
+
+ trips {
+ adsp_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ adsp_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ adsp_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpu-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&lvts MT8186_GPU>;
+
+ trips {
+ gpu_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ gpu_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ gpu_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&gpu_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-big0-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ thermal-sensors = <&lvts MT8186_BIG_CPU0>;
+
+ trips {
+ cpu_big0_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_big0_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_big0_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_big0_alert0>;
+ cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-big1-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ thermal-sensors = <&lvts MT8186_BIG_CPU1>;
+
+ trips {
+ cpu_big1_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_big1_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_big1_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_big1_alert0>;
+ cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188.dtsi b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
index 29d012d28edb..cd27966d2e3c 100644
--- a/arch/arm64/boot/dts/mediatek/mt8188.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
@@ -12,6 +12,9 @@
#include <dt-bindings/phy/phy.h>
#include <dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h>
#include <dt-bindings/power/mediatek,mt8188-power.h>
+#include <dt-bindings/reset/mt8188-resets.h>
+#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/thermal/mediatek,lvts-thermal.h>
/ {
compatible = "mediatek,mt8188";
@@ -417,6 +420,450 @@
method = "smc";
};
+ thermal_zones: thermal-zones {
+ cpu-little0-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <150>;
+ thermal-sensors = <&lvts_mcu MT8188_MCU_LITTLE_CPU0>;
+
+ trips {
+ cpu_little0_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_little0_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_little0_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_little0_alert0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-little1-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <150>;
+ thermal-sensors = <&lvts_mcu MT8188_MCU_LITTLE_CPU1>;
+
+ trips {
+ cpu_little1_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_little1_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_little1_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_little1_alert0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-little2-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <150>;
+ thermal-sensors = <&lvts_mcu MT8188_MCU_LITTLE_CPU2>;
+
+ trips {
+ cpu_little2_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_little2_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_little2_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_little2_alert0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-little3-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <150>;
+ thermal-sensors = <&lvts_mcu MT8188_MCU_LITTLE_CPU3>;
+
+ trips {
+ cpu_little3_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_little3_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_little3_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_little3_alert0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-big0-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ thermal-sensors = <&lvts_mcu MT8188_MCU_BIG_CPU0>;
+
+ trips {
+ cpu_big0_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_big0_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_big0_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_big0_alert0>;
+ cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-big1-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ thermal-sensors = <&lvts_mcu MT8188_MCU_BIG_CPU1>;
+
+ trips {
+ cpu_big1_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_big1_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_big1_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_big1_alert0>;
+ cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ apu-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&lvts_ap MT8188_AP_APU>;
+
+ trips {
+ apu_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ apu_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ apu_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpu-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&lvts_ap MT8188_AP_GPU0>;
+
+ trips {
+ gpu_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ gpu_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ gpu_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&gpu_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ gpu1-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&lvts_ap MT8188_AP_GPU1>;
+
+ trips {
+ gpu1_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ gpu1_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ gpu1_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&gpu1_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ adsp-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&lvts_ap MT8188_AP_ADSP>;
+
+ trips {
+ soc_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ soc_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ soc_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ vdo-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&lvts_ap MT8188_AP_VDO>;
+
+ trips {
+ soc1_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ soc1_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ soc1_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ infra-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&lvts_ap MT8188_AP_INFRA>;
+
+ trips {
+ soc2_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ soc2_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ soc2_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ cam1-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&lvts_ap MT8188_AP_CAM1>;
+
+ trips {
+ cam1_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cam1_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cam1_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ cam2-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&lvts_ap MT8188_AP_CAM2>;
+
+ trips {
+ cam2_alert0: trip-alert0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cam2_alert1: trip-alert1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cam2_crit: trip-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
timer: timer {
compatible = "arm,armv8-timer";
interrupt-parent = <&gic>;
@@ -464,6 +911,7 @@
compatible = "mediatek,mt8188-infracfg-ao", "syscon";
reg = <0 0x10001000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
pericfg: syscon@10003000 {
@@ -937,6 +1385,17 @@
status = "disabled";
};
+ lvts_ap: thermal-sensor@1100b000 {
+ compatible = "mediatek,mt8188-lvts-ap";
+ reg = <0 0x1100b000 0 0xc00>;
+ interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&infracfg_ao CLK_INFRA_AO_THERM>;
+ resets = <&infracfg_ao MT8188_INFRA_RST1_THERMAL_CTRL_RST>;
+ nvmem-cells = <&lvts_efuse_data1>;
+ nvmem-cell-names = "lvts-calib-data-1";
+ #thermal-sensor-cells = <1>;
+ };
+
spi1: spi@11010000 {
compatible = "mediatek,mt8188-spi-ipm", "mediatek,spi-ipm";
#address-cells = <1>;
@@ -1050,6 +1509,17 @@
status = "disabled";
};
+ lvts_mcu: thermal-sensor@11278000 {
+ compatible = "mediatek,mt8188-lvts-mcu";
+ reg = <0 0x11278000 0 0x1000>;
+ interrupts = <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&infracfg_ao CLK_INFRA_AO_THERM>;
+ resets = <&infracfg_ao MT8188_INFRA_RST1_THERMAL_MCU_RST>;
+ nvmem-cells = <&lvts_efuse_data1>;
+ nvmem-cell-names = "lvts-calib-data-1";
+ #thermal-sensor-cells = <1>;
+ };
+
i2c0: i2c@11280000 {
compatible = "mediatek,mt8188-i2c";
reg = <0 0x11280000 0 0x1000>,
@@ -1273,6 +1743,17 @@
#clock-cells = <1>;
};
+ efuse: efuse@11f20000 {
+ compatible = "mediatek,mt8188-efuse", "mediatek,efuse";
+ reg = <0 0x11f20000 0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ lvts_efuse_data1: lvts1-calib@1ac {
+ reg = <0x1ac 0x40>;
+ };
+ };
+
gpu: gpu@13000000 {
compatible = "mediatek,mt8188-mali", "arm,mali-valhall-jm";
reg = <0 0x13000000 0 0x4000>;
@@ -1287,6 +1768,7 @@
<&spm MT8188_POWER_DOMAIN_MFG3>,
<&spm MT8188_POWER_DOMAIN_MFG4>;
power-domain-names = "core0", "core1", "core2";
+ #cooling-cells = <2>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-dojo-r1.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-dojo-r1.dts
index 88123842c818..49664de99b88 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-dojo-r1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-dojo-r1.dts
@@ -82,12 +82,17 @@
pins-low-power-hdmi-disable {
pinmux = <PINMUX_GPIO31__FUNC_GPIO31>,
<PINMUX_GPIO32__FUNC_GPIO32>,
- <PINMUX_GPIO33__FUNC_GPIO33>,
- <PINMUX_GPIO34__FUNC_GPIO34>,
- <PINMUX_GPIO35__FUNC_GPIO35>;
+ <PINMUX_GPIO33__FUNC_GPIO33>;
input-enable;
bias-pull-down;
};
+
+ pins-low-power-hdmi-rsel-disable {
+ pinmux = <PINMUX_GPIO34__FUNC_GPIO34>,
+ <PINMUX_GPIO35__FUNC_GPIO35>;
+ input-enable;
+ bias-pull-down = <75000>;
+ };
};
&sound {
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
index 2fe20e0dad83..2d6522c144b7 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
@@ -19,13 +19,18 @@
pins-low-power-hdmi-disable {
pinmux = <PINMUX_GPIO31__FUNC_GPIO31>,
<PINMUX_GPIO32__FUNC_GPIO32>,
- <PINMUX_GPIO33__FUNC_GPIO33>,
- <PINMUX_GPIO34__FUNC_GPIO34>,
- <PINMUX_GPIO35__FUNC_GPIO35>;
+ <PINMUX_GPIO33__FUNC_GPIO33>;
input-enable;
bias-pull-down;
};
+ pins-low-power-hdmi-rsel-disable {
+ pinmux = <PINMUX_GPIO34__FUNC_GPIO34>,
+ <PINMUX_GPIO35__FUNC_GPIO35>;
+ input-enable;
+ bias-pull-down = <75000>;
+ };
+
pins-low-power-pcie0-disable {
pinmux = <PINMUX_GPIO19__FUNC_GPIO19>,
<PINMUX_GPIO20__FUNC_GPIO20>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
index dd294ca98194..9049d362a5e0 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
@@ -20,13 +20,18 @@
pins-low-power-hdmi-disable {
pinmux = <PINMUX_GPIO31__FUNC_GPIO31>,
<PINMUX_GPIO32__FUNC_GPIO32>,
- <PINMUX_GPIO33__FUNC_GPIO33>,
- <PINMUX_GPIO34__FUNC_GPIO34>,
- <PINMUX_GPIO35__FUNC_GPIO35>;
+ <PINMUX_GPIO33__FUNC_GPIO33>;
input-enable;
bias-pull-down;
};
+ pins-low-power-hdmi-rsel-disable {
+ pinmux = <PINMUX_GPIO34__FUNC_GPIO34>,
+ <PINMUX_GPIO35__FUNC_GPIO35>;
+ input-enable;
+ bias-pull-down = <75000>;
+ };
+
pins-low-power-pcie0-disable {
pinmux = <PINMUX_GPIO19__FUNC_GPIO19>,
<PINMUX_GPIO20__FUNC_GPIO20>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
index fe5400e17b0f..75d56b2d5a3d 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
@@ -1228,10 +1228,6 @@
spi-max-frequency = <3000000>;
wakeup-source;
- keyboard-backlight {
- compatible = "google,cros-kbd-led-backlight";
- };
-
i2c_tunnel: i2c-tunnel {
compatible = "google,cros-ec-i2c-tunnel";
google,remote-bus = <0>;
@@ -1401,9 +1397,11 @@
&xhci1 {
status = "okay";
+ phys = <&u2port1 PHY_TYPE_USB2>;
rx-fifo-depth = <3072>;
vusb33-supply = <&mt6359_vusb_ldo_reg>;
vbus-supply = <&usb_vbus>;
+ mediatek,u3p-dis-msk = <1>;
};
&xhci2 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index 2ee45752583c..e89ba384c4aa 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -1444,7 +1444,7 @@
<0 0x11293e00 0 0x0100>;
reg-names = "mac", "ippc";
interrupts = <GIC_SPI 530 IRQ_TYPE_LEVEL_HIGH 0>;
- phys = <&u2port1 PHY_TYPE_USB2>;
+ phys = <&u2port1 PHY_TYPE_USB2>, <&u3port1 PHY_TYPE_USB3>;
assigned-clocks = <&topckgen CLK_TOP_USB_TOP_1P>,
<&topckgen CLK_TOP_SSUSB_XHCI_1P>;
assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D5_D4>,
@@ -2037,6 +2037,7 @@
mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0x1000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_VPP0_MDP_RDMA_SOF>,
<CMDQ_EVENT_VPP0_MDP_RDMA_FRAME_DONE>;
+ mediatek,scp = <&scp>;
power-domains = <&spm MT8195_POWER_DOMAIN_VPPSYS0>;
iommus = <&iommu_vpp M4U_PORT_L4_MDP_RDMA>;
clocks = <&vppsys0 CLK_VPP0_MDP_RDMA>;
@@ -3251,10 +3252,10 @@
compatible = "mediatek,mt8195-dp-intf";
reg = <0 0x1c015000 0 0x1000>;
interrupts = <GIC_SPI 657 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&vdosys0 CLK_VDO0_DP_INTF0>,
- <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>,
+ clocks = <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>,
+ <&vdosys0 CLK_VDO0_DP_INTF0>,
<&apmixedsys CLK_APMIXED_TVDPLL1>;
- clock-names = "engine", "pixel", "pll";
+ clock-names = "pixel", "engine", "pll";
status = "disabled";
};
@@ -3521,10 +3522,10 @@
reg = <0 0x1c113000 0 0x1000>;
interrupts = <GIC_SPI 513 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
- clocks = <&vdosys1 CLK_VDO1_DP_INTF0_MM>,
- <&vdosys1 CLK_VDO1_DPINTF>,
+ clocks = <&vdosys1 CLK_VDO1_DPINTF>,
+ <&vdosys1 CLK_VDO1_DP_INTF0_MM>,
<&apmixedsys CLK_APMIXED_TVDPLL2>;
- clock-names = "engine", "pixel", "pll";
+ clock-names = "pixel", "engine", "pll";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8365-evk.dts b/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
index 4211a992dd9d..7d90112a7e27 100644
--- a/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
@@ -4,6 +4,7 @@
* Authors:
* Fabien Parent <fparent@baylibre.com>
* Bernhard Rosenkränzer <bero@baylibre.com>
+ * Alexandre Mergnat <amergnat@baylibre.com>
*/
/dts-v1/;
@@ -86,6 +87,28 @@
reg = <0 0x43200000 0 0x00c00000>;
};
};
+
+ sound: sound {
+ compatible = "mediatek,mt8365-mt6357";
+ pinctrl-names = "default",
+ "dmic",
+ "miso_off",
+ "miso_on",
+ "mosi_off",
+ "mosi_on";
+ pinctrl-0 = <&aud_default_pins>;
+ pinctrl-1 = <&aud_dmic_pins>;
+ pinctrl-2 = <&aud_miso_off_pins>;
+ pinctrl-3 = <&aud_miso_on_pins>;
+ pinctrl-4 = <&aud_mosi_off_pins>;
+ pinctrl-5 = <&aud_mosi_on_pins>;
+ mediatek,platform = <&afe>;
+ };
+};
+
+&afe {
+ mediatek,dmic-mode = <1>;
+ status = "okay";
};
&cpu0 {
@@ -178,9 +201,72 @@
interrupts-extended = <&pio 145 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <2>;
+ mediatek,micbias0-microvolt = <1900000>;
+ mediatek,micbias1-microvolt = <1700000>;
};
&pio {
+ aud_default_pins: audiodefault-pins {
+ clk-dat-pins {
+ pinmux = <MT8365_PIN_72_CMDAT4__FUNC_I2S3_BCK>,
+ <MT8365_PIN_73_CMDAT5__FUNC_I2S3_LRCK>,
+ <MT8365_PIN_74_CMDAT6__FUNC_I2S3_MCK>,
+ <MT8365_PIN_75_CMDAT7__FUNC_I2S3_DO>;
+ };
+ };
+
+ aud_dmic_pins: audiodmic-pins {
+ clk-dat-pins {
+ pinmux = <MT8365_PIN_117_DMIC0_CLK__FUNC_DMIC0_CLK>,
+ <MT8365_PIN_118_DMIC0_DAT0__FUNC_DMIC0_DAT0>,
+ <MT8365_PIN_119_DMIC0_DAT1__FUNC_DMIC0_DAT1>;
+ };
+ };
+
+ aud_miso_off_pins: misooff-pins {
+ clk-dat-pins {
+ pinmux = <MT8365_PIN_53_AUD_CLK_MISO__FUNC_GPIO53>,
+ <MT8365_PIN_54_AUD_SYNC_MISO__FUNC_GPIO54>,
+ <MT8365_PIN_55_AUD_DAT_MISO0__FUNC_GPIO55>,
+ <MT8365_PIN_56_AUD_DAT_MISO1__FUNC_GPIO56>;
+ input-enable;
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
+ aud_miso_on_pins: misoon-pins {
+ clk-dat-pins {
+ pinmux = <MT8365_PIN_53_AUD_CLK_MISO__FUNC_AUD_CLK_MISO>,
+ <MT8365_PIN_54_AUD_SYNC_MISO__FUNC_AUD_SYNC_MISO>,
+ <MT8365_PIN_55_AUD_DAT_MISO0__FUNC_AUD_DAT_MISO0>,
+ <MT8365_PIN_56_AUD_DAT_MISO1__FUNC_AUD_DAT_MISO1>;
+ drive-strength = <6>;
+ };
+ };
+
+ aud_mosi_off_pins: mosioff-pins {
+ clk-dat-pins {
+ pinmux = <MT8365_PIN_49_AUD_CLK_MOSI__FUNC_GPIO49>,
+ <MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_GPIO50>,
+ <MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_GPIO51>,
+ <MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_GPIO52>;
+ input-enable;
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
+ aud_mosi_on_pins: mosion-pins {
+ clk-dat-pins {
+ pinmux = <MT8365_PIN_49_AUD_CLK_MOSI__FUNC_AUD_CLK_MOSI>,
+ <MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_AUD_SYNC_MOSI>,
+ <MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_AUD_DAT_MOSI0>,
+ <MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_AUD_DAT_MOSI1>;
+ drive-strength = <6>;
+ };
+ };
+
ethernet_pins: ethernet-pins {
phy_reset_pins {
pinmux = <MT8365_PIN_133_TDM_TX_DATA1__FUNC_GPIO133>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8365.dtsi b/arch/arm64/boot/dts/mediatek/mt8365.dtsi
index eb449bfa8803..9c91fe8ea0f9 100644
--- a/arch/arm64/boot/dts/mediatek/mt8365.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8365.dtsi
@@ -2,9 +2,11 @@
/*
* (C) 2018 MediaTek Inc.
* Copyright (C) 2022 BayLibre SAS
- * Fabien Parent <fparent@baylibre.com>
- * Bernhard Rosenkränzer <bero@baylibre.com>
+ * Authors: Fabien Parent <fparent@baylibre.com>
+ * Bernhard Rosenkränzer <bero@baylibre.com>
+ * Alexandre Mergnat <amergnat@baylibre.com>
*/
+
#include <dt-bindings/clock/mediatek,mt8365-clk.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
@@ -812,6 +814,43 @@
reg = <0 0x19020000 0 0x1000>;
#clock-cells = <1>;
};
+
+ afe: audio-controller@11220000 {
+ compatible = "mediatek,mt8365-afe-pcm";
+ reg = <0 0x11220000 0 0x1000>;
+ #sound-dai-cells = <0>;
+ clocks = <&clk26m>,
+ <&topckgen CLK_TOP_AUDIO_SEL>,
+ <&topckgen CLK_TOP_AUD_I2S0_M>,
+ <&topckgen CLK_TOP_AUD_I2S1_M>,
+ <&topckgen CLK_TOP_AUD_I2S2_M>,
+ <&topckgen CLK_TOP_AUD_I2S3_M>,
+ <&topckgen CLK_TOP_AUD_ENGEN1_SEL>,
+ <&topckgen CLK_TOP_AUD_ENGEN2_SEL>,
+ <&topckgen CLK_TOP_AUD_1_SEL>,
+ <&topckgen CLK_TOP_AUD_2_SEL>,
+ <&topckgen CLK_TOP_APLL_I2S0_SEL>,
+ <&topckgen CLK_TOP_APLL_I2S1_SEL>,
+ <&topckgen CLK_TOP_APLL_I2S2_SEL>,
+ <&topckgen CLK_TOP_APLL_I2S3_SEL>;
+ clock-names = "top_clk26m_clk",
+ "top_audio_sel",
+ "audio_i2s0_m",
+ "audio_i2s1_m",
+ "audio_i2s2_m",
+ "audio_i2s3_m",
+ "engen1",
+ "engen2",
+ "aud1",
+ "aud2",
+ "i2s0_m_sel",
+ "i2s1_m_sel",
+ "i2s2_m_sel",
+ "i2s3_m_sel";
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_AUDIO>;
+ status = "disabled";
+ };
};
timer {
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
index a06610fff8ad..1ef6262b65c9 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
@@ -904,8 +904,6 @@
};
&xhci1 {
- phys = <&u2port1 PHY_TYPE_USB2>,
- <&u3port1 PHY_TYPE_USB3>;
vusb33-supply = <&mt6359_vusb_ldo_reg>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
index e4b2af9489a8..e2e75b8ff918 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
@@ -1111,6 +1111,7 @@
/* USB2.0 M.2 Key-B */
&xhci1 {
+ phys = <&u2port1 PHY_TYPE_USB2>;
vusb33-supply = <&mt6359_vusb_ldo_reg>;
mediatek,u3p-dis-msk = <0x01>;
status = "okay";
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
index 4b5f6cf16f70..14ec970c4e49 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
@@ -894,10 +894,12 @@
};
&xhci1 {
+ phys = <&u2port1 PHY_TYPE_USB2>;
/* MT7921's USB Bluetooth has issues with USB2 LPM */
usb2-lpm-disable;
vusb33-supply = <&mt6359_vusb_ldo_reg>;
vbus-supply = <&vsys>;
+ mediatek,u3p-dis-msk = <1>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/nuvoton/ma35d1-iot-512m.dts b/arch/arm64/boot/dts/nuvoton/ma35d1-iot-512m.dts
index b89e2be6abae..9482bec1aa57 100644
--- a/arch/arm64/boot/dts/nuvoton/ma35d1-iot-512m.dts
+++ b/arch/arm64/boot/dts/nuvoton/ma35d1-iot-512m.dts
@@ -14,6 +14,10 @@
aliases {
serial0 = &uart0;
+ serial10 = &uart10;
+ serial12 = &uart12;
+ serial13 = &uart13;
+ serial14 = &uart14;
};
chosen {
@@ -33,10 +37,6 @@
};
};
-&uart0 {
- status = "okay";
-};
-
&clk {
assigned-clocks = <&clk CAPLL>,
<&clk DDRPLL>,
@@ -54,3 +54,75 @@
"integer",
"integer";
};
+
+&pinctrl {
+ uart-grp {
+ pinctrl_uart0: uart0-pins {
+ nuvoton,pins = <4 14 1>,
+ <4 15 1>;
+ bias-disable;
+ power-source = <1>;
+ };
+
+ pinctrl_uart10: uart10-pins {
+ nuvoton,pins = <7 4 2>,
+ <7 5 2>,
+ <7 6 2>,
+ <7 7 2>;
+ bias-disable;
+ power-source = <1>;
+ };
+
+ pinctrl_uart12: uart12-pins {
+ nuvoton,pins = <2 13 2>,
+ <2 14 2>,
+ <2 15 2>;
+ bias-disable;
+ power-source = <1>;
+ };
+
+ pinctrl_uart13: uart13-pins {
+ nuvoton,pins = <7 12 3>,
+ <7 13 3>;
+ bias-disable;
+ power-source = <1>;
+ };
+
+ pinctrl_uart14: uart14-pins {
+ nuvoton,pins = <7 14 2>,
+ <7 15 2>;
+ bias-disable;
+ power-source = <1>;
+ };
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0>;
+ status = "okay";
+};
+
+&uart10 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart10>;
+ status = "okay";
+};
+
+&uart12 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart12>;
+ status = "okay";
+};
+
+&uart13 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart13>;
+ status = "okay";
+};
+
+&uart14 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart14>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/nuvoton/ma35d1-som-256m.dts b/arch/arm64/boot/dts/nuvoton/ma35d1-som-256m.dts
index a1ebddecb7f8..f6f20a17e501 100644
--- a/arch/arm64/boot/dts/nuvoton/ma35d1-som-256m.dts
+++ b/arch/arm64/boot/dts/nuvoton/ma35d1-som-256m.dts
@@ -14,6 +14,10 @@
aliases {
serial0 = &uart0;
+ serial11 = &uart11;
+ serial12 = &uart12;
+ serial14 = &uart14;
+ serial16 = &uart16;
};
chosen {
@@ -33,10 +37,6 @@
};
};
-&uart0 {
- status = "okay";
-};
-
&clk {
assigned-clocks = <&clk CAPLL>,
<&clk DDRPLL>,
@@ -54,3 +54,78 @@
"integer",
"integer";
};
+
+&pinctrl {
+ uart-grp {
+ pinctrl_uart0: uart0-pins {
+ nuvoton,pins = <4 14 1>,
+ <4 15 1>;
+ bias-disable;
+ power-source = <1>;
+ };
+
+ pinctrl_uart11: uart11-pins {
+ nuvoton,pins = <11 0 2>,
+ <11 1 2>,
+ <11 2 2>,
+ <11 3 2>;
+ bias-disable;
+ power-source = <1>;
+ };
+
+ pinctrl_uart12: uart12-pins {
+ nuvoton,pins = <8 1 2>,
+ <8 2 2>,
+ <8 3 2>;
+ bias-disable;
+ power-source = <1>;
+ };
+
+ pinctrl_uart14: uart14-pins {
+ nuvoton,pins = <8 5 2>,
+ <8 6 2>,
+ <8 7 2>;
+ bias-disable;
+ power-source = <1>;
+ };
+
+ pinctrl_uart16: uart16-pins {
+ nuvoton,pins = <10 0 2>,
+ <10 1 2>,
+ <10 2 2>,
+ <10 3 2>;
+ bias-disable;
+ power-source = <1>;
+ };
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0>;
+ status = "okay";
+};
+
+&uart11 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart11>;
+ status = "okay";
+};
+
+&uart12 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart12>;
+ status = "okay";
+};
+
+&uart14 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart14>;
+ status = "okay";
+};
+
+&uart16 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart16>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/nuvoton/ma35d1.dtsi b/arch/arm64/boot/dts/nuvoton/ma35d1.dtsi
index 781cdae566a0..e51b98f5bdce 100644
--- a/arch/arm64/boot/dts/nuvoton/ma35d1.dtsi
+++ b/arch/arm64/boot/dts/nuvoton/ma35d1.dtsi
@@ -83,7 +83,7 @@
ranges;
sys: system-management@40460000 {
- compatible = "nuvoton,ma35d1-reset";
+ compatible = "nuvoton,ma35d1-reset", "syscon";
reg = <0x0 0x40460000 0x0 0x200>;
#reset-cells = <1>;
};
@@ -95,6 +95,155 @@
clocks = <&clk_hxt>;
};
+ pinctrl: pinctrl@40040000 {
+ compatible = "nuvoton,ma35d1-pinctrl";
+ reg = <0x0 0x40040000 0x0 0xc00>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ nuvoton,sys = <&sys>;
+ ranges = <0x0 0x0 0x40040000 0x400>;
+
+ gpioa: gpio@0 {
+ reg = <0x0 0x40>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPA_GATE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpiob: gpio@40 {
+ reg = <0x40 0x40>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPB_GATE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpioc: gpio@80 {
+ reg = <0x80 0x40>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPC_GATE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpiod: gpio@c0 {
+ reg = <0xc0 0x40>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPD_GATE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpioe: gpio@100 {
+ reg = <0x100 0x40>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPE_GATE>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpiof: gpio@140 {
+ reg = <0x140 0x40>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPF_GATE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpiog: gpio@180 {
+ reg = <0x180 0x40>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPG_GATE>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpioh: gpio@1c0 {
+ reg = <0x1c0 0x40>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPH_GATE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpioi: gpio@200 {
+ reg = <0x200 0x40>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPI_GATE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpioj: gpio@240 {
+ reg = <0x240 0x40>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPJ_GATE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpiok: gpio@280 {
+ reg = <0x280 0x40>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPK_GATE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpiol: gpio@2c0 {
+ reg = <0x2c0 0x40>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPL_GATE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpiom: gpio@300 {
+ reg = <0x300 0x40>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPM_GATE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpion: gpio@340 {
+ reg = <0x340 0x40>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk GPN_GATE>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
uart0: serial@40700000 {
compatible = "nuvoton,ma35d1-uart";
reg = <0x0 0x40700000 0x0 0x100>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
index 0ae5a44f7d07..c00db75e3910 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
@@ -33,6 +33,51 @@
status = "okay";
};
+ serial@70006300 {
+ /delete-property/ reg-shift;
+ status = "okay";
+ compatible = "nvidia,tegra30-hsuart";
+ reset-names = "serial";
+
+ bluetooth {
+ compatible = "brcm,bcm43540-bt";
+ device-wakeup-gpios = <&gpio TEGRA_GPIO(H, 3) GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio TEGRA_GPIO(H, 4) GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(H, 5) IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "host-wakeup";
+ };
+ };
+
+ i2c@7000c400 {
+ status = "okay";
+
+ power-sensor@40 {
+ compatible = "ti,ina3221";
+ reg = <0x40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ input@0 {
+ reg = <0x0>;
+ label = "VDD_IN";
+ shunt-resistor-micro-ohms = <20000>;
+ };
+
+ input@1 {
+ reg = <0x1>;
+ label = "VDD_GPU";
+ shunt-resistor-micro-ohms = <10000>;
+ };
+
+ input@2 {
+ reg = <0x2>;
+ label = "VDD_CPU";
+ shunt-resistor-micro-ohms = <10000>;
+ };
+ };
+ };
+
i2c@7000c500 {
status = "okay";
@@ -295,6 +340,25 @@
nvidia,sys-clock-req-active-high;
};
+ mmc@700b0200 {
+ status = "okay";
+ bus-width = <4>;
+ non-removable;
+ power-gpios = <&gpio TEGRA_GPIO(H, 0) GPIO_ACTIVE_HIGH>;
+ vqmmc-supply = <&vdd_1v8>;
+ vmmc-supply = <&vdd_3v3_sys>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wifi@1 {
+ compatible = "brcm,bcm4354-fmac";
+ reg = <1>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(H, 2) IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ };
+ };
+
/* eMMC */
mmc@700b0600 {
status = "okay";
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index b4a1108c2dd7..63b94a04308e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -1319,6 +1319,56 @@
status = "okay";
clock-frequency = <100000>;
+ power-sensor@42 {
+ compatible = "ti,ina3221";
+ reg = <0x42>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ input@0 {
+ reg = <0x0>;
+ label = "VDD_MUX";
+ shunt-resistor-micro-ohms = <20000>;
+ };
+
+ input@1 {
+ reg = <0x1>;
+ label = "VDD_5V_IO_SYS";
+ shunt-resistor-micro-ohms = <5000>;
+ };
+
+ input@2 {
+ reg = <0x2>;
+ label = "VDD_3V3_SYS";
+ shunt-resistor-micro-ohms = <10000>;
+ };
+ };
+
+ power-sensor@43 {
+ compatible = "ti,ina3221";
+ reg = <0x43>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ input@0 {
+ reg = <0x0>;
+ label = "VDD_3V3_IO";
+ shunt-resistor-micro-ohms = <10000>;
+ };
+
+ input@1 {
+ reg = <0x1>;
+ label = "VDD_1V8_IO";
+ shunt-resistor-micro-ohms = <10000>;
+ };
+
+ input@2 {
+ reg = <0x2>;
+ label = "VDD_M2_IN";
+ shunt-resistor-micro-ohms = <10000>;
+ };
+ };
+
exp1: gpio@74 {
compatible = "ti,tca9539";
reg = <0x74>;
@@ -1517,6 +1567,7 @@
bus-width = <4>;
cd-gpios = <&gpio TEGRA_GPIO(Z, 1) GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio TEGRA_GPIO(Z, 4) GPIO_ACTIVE_HIGH>;
vqmmc-supply = <&vddio_sdmmc>;
vmmc-supply = <&vdd_3v3_sd>;
@@ -1603,7 +1654,7 @@
regulator-name = "VDD_3V3_SD";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- gpio = <&gpio TEGRA_GPIO(Z, 4) GPIO_ACTIVE_HIGH>;
+ gpio = <&gpio TEGRA_GPIO(Z, 3) GPIO_ACTIVE_HIGH>;
enable-active-high;
vin-supply = <&vdd_3v3_sys>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3701-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3701-0000.dtsi
index cb792041fc62..d977f4901c09 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3701-0000.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3701-0000.dtsi
@@ -1,146 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
-#include "tegra234.dtsi"
#include "tegra234-p3701.dtsi"
/ {
model = "NVIDIA Jetson AGX Orin";
compatible = "nvidia,p3701-0000", "nvidia,tegra234";
- bus@0 {
- i2c@3160000 {
- status = "okay";
-
- eeprom@50 {
- compatible = "atmel,24c02";
- reg = <0x50>;
-
- label = "module";
- vcc-supply = <&vdd_1v8_hs>;
- address-width = <8>;
- pagesize = <8>;
- size = <256>;
- read-only;
- };
- };
-
- spi@3270000 {
- status = "okay";
-
- flash@0 {
- compatible = "jedec,spi-nor";
- reg = <0>;
- spi-max-frequency = <102000000>;
- spi-tx-bus-width = <4>;
- spi-rx-bus-width = <4>;
- };
- };
-
- mmc@3400000 {
- status = "okay";
- bus-width = <4>;
- cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_LOW>;
- disable-wp;
- };
-
- mmc@3460000 {
- status = "okay";
- bus-width = <8>;
- non-removable;
- };
-
- padctl@3520000 {
- vclamp-usb-supply = <&vdd_1v8_ao>;
- avdd-usb-supply = <&vdd_3v3_ao>;
-
- ports {
- usb2-0 {
- vbus-supply = <&vdd_5v0_sys>;
- };
-
- usb2-1 {
- vbus-supply = <&vdd_5v0_sys>;
- };
-
- usb2-2 {
- vbus-supply = <&vdd_5v0_sys>;
- };
-
- usb2-3 {
- vbus-supply = <&vdd_5v0_sys>;
- };
- };
- };
-
- rtc@c2a0000 {
- status = "okay";
- };
-
- pmc@c360000 {
- nvidia,invert-interrupt;
- };
- };
-
- vdd_5v0_sys: regulator-vdd-5v0-sys {
- compatible = "regulator-fixed";
- regulator-name = "VIN_SYS_5V0";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- vdd_1v8_ls: regulator-vdd-1v8-ls {
- compatible = "regulator-fixed";
- regulator-name = "VDD_1V8_LS";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- };
-
- vdd_1v8_hs: regulator-vdd-1v8-hs {
- compatible = "regulator-fixed";
- regulator-name = "VDD_1V8_HS";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- };
-
- vdd_1v8_ao: regulator-vdd-1v8-ao {
- compatible = "regulator-fixed";
- regulator-name = "VDD_1V8_AO";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- };
-
- vdd_3v3_ao: regulator-vdd-3v3-ao {
- compatible = "regulator-fixed";
- regulator-name = "VDD_3V3_AO";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- vdd_3v3_pcie: regulator-vdd-3v3-pcie {
- compatible = "regulator-fixed";
- regulator-name = "VDD_3V3_PCIE";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio TEGRA234_MAIN_GPIO(H, 4) GPIO_ACTIVE_HIGH>;
- regulator-boot-on;
- enable-active-high;
- };
-
- vdd_12v_pcie: regulator-vdd-12v-pcie {
- compatible = "regulator-fixed";
- regulator-name = "VDD_12V_PCIE";
- regulator-min-microvolt = <12000000>;
- regulator-max-microvolt = <12000000>;
- gpio = <&gpio TEGRA234_MAIN_GPIO(A, 1) GPIO_ACTIVE_LOW>;
- regulator-boot-on;
- };
-
thermal-zones {
tj-thermal {
polling-delay = <1000>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi
index 553fa4ba1cd4..0809634e5732 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi
@@ -1,145 +1,29 @@
// SPDX-License-Identifier: GPL-2.0
-#include "tegra234.dtsi"
#include "tegra234-p3701.dtsi"
/ {
compatible = "nvidia,p3701-0008", "nvidia,tegra234";
- bus@0 {
- i2c@3160000 {
+ thermal-zones {
+ tj-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <1000>;
status = "okay";
- eeprom@50 {
- compatible = "atmel,24c02";
- reg = <0x50>;
- label = "module";
- vcc-supply = <&vdd_1v8_hs>;
- address-width = <8>;
- pagesize = <8>;
- size = <256>;
- read-only;
- };
- };
-
- spi@3270000 {
- status = "okay";
-
- flash@0 {
- compatible = "jedec,spi-nor";
- reg = <0>;
- spi-max-frequency = <102000000>;
- spi-tx-bus-width = <4>;
- spi-rx-bus-width = <4>;
- };
- };
-
- mmc@3460000 {
- status = "okay";
- bus-width = <8>;
- non-removable;
- };
-
- i2c@c240000 {
- status = "okay";
- };
-
- i2c@c250000 {
- power-sensor@41 {
- compatible = "ti,ina3221";
- reg = <0x41>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- input@0 {
- reg = <0x0>;
- label = "CVB_ATX_12V";
- shunt-resistor-micro-ohms = <2000>;
- };
-
- input@1 {
- reg = <0x1>;
- label = "CVB_ATX_3V3";
- shunt-resistor-micro-ohms = <2000>;
+ trips {
+ tj_trip_active0: active-0 {
+ temperature = <85000>;
+ hysteresis = <4000>;
+ type = "active";
};
- input@2 {
- reg = <0x2>;
- label = "CVB_ATX_5V";
- shunt-resistor-micro-ohms = <2000>;
+ tj_trip_active1: active-1 {
+ temperature = <105000>;
+ hysteresis = <4000>;
+ type = "active";
};
};
-
- power-sensor@44 {
- compatible = "ti,ina219";
- reg = <0x44>;
- shunt-resistor = <2000>;
- };
- };
-
- rtc@c2a0000 {
- status = "okay";
};
-
- pmc@c360000 {
- nvidia,invert-interrupt;
- };
- };
-
- bpmp {
- i2c {
- status = "okay";
-
- thermal-sensor@4c {
- status = "okay";
- reg = <0x4c>;
- vcc-supply = <&vdd_1v8_ao>;
- };
- };
-
- thermal {
- status = "okay";
- };
- };
-
- vdd_1v8_ao: regulator-vdd-1v8-ao {
- compatible = "regulator-fixed";
- regulator-name = "VDD_1V8_AO";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- };
-
- vdd_1v8_hs: regulator-vdd-1v8-hs {
- compatible = "regulator-fixed";
- regulator-name = "VDD_1V8_HS";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- };
-
- vdd_1v8_ls: regulator-vdd-1v8-ls {
- compatible = "regulator-fixed";
- regulator-name = "VDD_1V8_LS";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- };
-
- vdd_3v3_ao: regulator-vdd-3v3-ao {
- compatible = "regulator-fixed";
- regulator-name = "vdd-AO-3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- vdd_5v0_sys: regulator-vdd-5v0-sys {
- compatible = "regulator-fixed";
- regulator-name = "VIN_SYS_5V0";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- regulator-always-on;
- regulator-boot-on;
};
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3701.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3701.dtsi
index 320c8e9b06b4..9086a0d010e5 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3701.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3701.dtsi
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+#include "tegra234.dtsi"
+
/ {
compatible = "nvidia,p3701", "nvidia,tegra234";
@@ -45,6 +47,63 @@
};
};
+ i2c@3160000 {
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+
+ label = "module";
+ vcc-supply = <&vdd_1v8_hs>;
+ address-width = <8>;
+ pagesize = <8>;
+ size = <256>;
+ read-only;
+ };
+ };
+
+ spi@3270000 {
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <102000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ };
+ };
+
+ mmc@3460000 {
+ status = "okay";
+ bus-width = <8>;
+ non-removable;
+ };
+
+ padctl@3520000 {
+ vclamp-usb-supply = <&vdd_1v8_ao>;
+ avdd-usb-supply = <&vdd_3v3_ao>;
+
+ ports {
+ usb2-0 {
+ vbus-supply = <&vdd_5v0_sys>;
+ };
+
+ usb2-1 {
+ vbus-supply = <&vdd_5v0_sys>;
+ };
+
+ usb2-2 {
+ vbus-supply = <&vdd_5v0_sys>;
+ };
+
+ usb2-3 {
+ vbus-supply = <&vdd_5v0_sys>;
+ };
+ };
+ };
+
i2c@c240000 {
status = "okay";
@@ -97,5 +156,71 @@
};
};
};
+
+ rtc@c2a0000 {
+ status = "okay";
+ };
+
+ pmc@c360000 {
+ nvidia,invert-interrupt;
+ };
+ };
+
+ bpmp {
+ i2c {
+ status = "okay";
+
+ thermal-sensor@4c {
+ compatible = "ti,tmp451";
+ status = "okay";
+ reg = <0x4c>;
+ vcc-supply = <&vdd_1v8_ao>;
+ };
+ };
+
+ thermal {
+ status = "okay";
+ };
+ };
+
+ vdd_1v8_ao: regulator-vdd-1v8-ao {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_1V8_AO";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ vdd_1v8_hs: regulator-vdd-1v8-hs {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_1V8_HS";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ vdd_1v8_ls: regulator-vdd-1v8-ls {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_1V8_LS";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ vdd_3v3_ao: regulator-vdd-3v3-ao {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_3V3_AO";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vdd_5v0_sys: regulator-vdd-5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "VIN_SYS_5V0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
};
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
index 69db584253da..90f12277aede 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
@@ -3,9 +3,9 @@
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/sound/rt5640.h>
#include "tegra234-p3701-0000.dtsi"
-#include "tegra234-p3737-0000.dtsi"
/ {
model = "NVIDIA Jetson AGX Orin Developer Kit";
@@ -22,23 +22,97 @@
};
bus@0 {
+ aconnect@2900000 {
+ ahub@2900800 {
+ i2s@2901000 {
+ ports {
+ port@1 {
+ endpoint {
+ dai-format = "i2s";
+ remote-endpoint = <&rt5640_ep>;
+ };
+ };
+ };
+ };
+ };
+ };
+
serial@3100000 {
compatible = "nvidia,tegra194-hsuart";
reset-names = "serial";
status = "okay";
};
+ i2c@3160000 {
+ status = "okay";
+
+ eeprom@56 {
+ compatible = "atmel,24c02";
+ reg = <0x56>;
+
+ label = "system";
+ vcc-supply = <&vdd_1v8_sys>;
+ address-width = <8>;
+ pagesize = <8>;
+ size = <256>;
+ read-only;
+ };
+ };
+
serial@31d0000 {
current-speed = <115200>;
status = "okay";
};
+ i2c@31e0000 {
+ status = "okay";
+
+ audio-codec@1c {
+ compatible = "realtek,rt5640";
+ reg = <0x1c>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA234_MAIN_GPIO(AC, 5) GPIO_ACTIVE_HIGH>;
+ clocks = <&bpmp TEGRA234_CLK_AUD_MCLK>;
+ clock-names = "mclk";
+ realtek,dmic1-data-pin = <RT5640_DMIC1_DATA_PIN_NONE>;
+ realtek,dmic2-data-pin = <RT5640_DMIC2_DATA_PIN_NONE>;
+ realtek,jack-detect-source = <RT5640_JD_SRC_HDA_HEADER>;
+ sound-name-prefix = "CVB-RT";
+
+ port {
+ rt5640_ep: endpoint {
+ remote-endpoint = <&i2s1_dap>;
+ mclk-fs = <256>;
+ };
+ };
+ };
+ };
+
+ pwm@3280000 {
+ status = "okay";
+ };
+
pwm@32a0000 {
assigned-clocks = <&bpmp TEGRA234_CLK_PWM3>;
assigned-clock-parents = <&bpmp TEGRA234_CLK_PLLP_OUT0>;
status = "okay";
};
+ pwm@32c0000 {
+ status = "okay";
+ };
+
+ pwm@32f0000 {
+ status = "okay";
+ };
+
+ mmc@3400000 {
+ status = "okay";
+ bus-width = <4>;
+ cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_LOW>;
+ disable-wp;
+ };
+
hda@3510000 {
nvidia,model = "NVIDIA Jetson AGX Orin HDA";
status = "okay";
@@ -341,8 +415,11 @@
};
};
- pwm-fan {
+ fan: pwm-fan {
+ compatible = "pwm-fan";
cooling-levels = <66 215 255>;
+ pwms = <&pwm3 0 45334>;
+ #cooling-cells = <2>;
};
serial {
@@ -444,4 +521,31 @@
};
};
};
+
+ vdd_1v8_sys: regulator-vdd-1v8-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_1V8_SYS";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ vdd_3v3_pcie: regulator-vdd-3v3-pcie {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_3V3_PCIE";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio TEGRA234_MAIN_GPIO(H, 4) GPIO_ACTIVE_HIGH>;
+ regulator-boot-on;
+ enable-active-high;
+ };
+
+ vdd_12v_pcie: regulator-vdd-12v-pcie {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_12V_PCIE";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ gpio = <&gpio TEGRA234_MAIN_GPIO(A, 1) GPIO_ACTIVE_LOW>;
+ regulator-boot-on;
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000.dtsi
deleted file mode 100644
index eb79e80a9852..000000000000
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000.dtsi
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <dt-bindings/sound/rt5640.h>
-
-/ {
- compatible = "nvidia,p3737-0000";
-
- bus@0 {
- aconnect@2900000 {
- ahub@2900800 {
- i2s@2901000 {
- ports {
- port@1 {
- endpoint {
- dai-format = "i2s";
- remote-endpoint = <&rt5640_ep>;
- };
- };
- };
- };
- };
- };
-
- i2c@3160000 {
- status = "okay";
-
- eeprom@56 {
- compatible = "atmel,24c02";
- reg = <0x56>;
-
- label = "system";
- vcc-supply = <&vdd_1v8_sys>;
- address-width = <8>;
- pagesize = <8>;
- size = <256>;
- read-only;
- };
- };
-
- i2c@31e0000 {
- status = "okay";
-
- audio-codec@1c {
- compatible = "realtek,rt5640";
- reg = <0x1c>;
- interrupt-parent = <&gpio>;
- interrupts = <TEGRA234_MAIN_GPIO(AC, 5) GPIO_ACTIVE_HIGH>;
- clocks = <&bpmp TEGRA234_CLK_AUD_MCLK>;
- clock-names = "mclk";
- realtek,dmic1-data-pin = <RT5640_DMIC1_DATA_PIN_NONE>;
- realtek,dmic2-data-pin = <RT5640_DMIC2_DATA_PIN_NONE>;
- realtek,jack-detect-source = <RT5640_JD_SRC_HDA_HEADER>;
- sound-name-prefix = "CVB-RT";
-
- port {
- rt5640_ep: endpoint {
- remote-endpoint = <&i2s1_dap>;
- mclk-fs = <256>;
- };
- };
- };
- };
-
- pwm@3280000 {
- status = "okay";
- };
-
- pwm@32c0000 {
- status = "okay";
- };
-
- pwm@32f0000 {
- status = "okay";
- };
- };
-
- fan: pwm-fan {
- compatible = "pwm-fan";
- pwms = <&pwm3 0 45334>;
- #cooling-cells = <2>;
- };
-
- vdd_1v8_sys: regulator-vdd-1v8-sys {
- compatible = "regulator-fixed";
- regulator-name = "VDD_1V8_SYS";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- };
-};
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts
index bac611d735c5..36e888053746 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts
@@ -3,8 +3,8 @@
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/sound/rt5640.h>
#include "tegra234-p3701-0008.dtsi"
-#include "tegra234-p3740-0002.dtsi"
/ {
model = "NVIDIA IGX Orin Development Kit";
@@ -20,6 +20,32 @@
};
bus@0 {
+ aconnect@2900000 {
+ ahub@2900800 {
+ i2s@2901300 {
+ ports {
+ port@1 {
+ endpoint {
+ dai-format = "i2s";
+ remote-endpoint = <&rt5640_ep>;
+ };
+ };
+ };
+ };
+
+ i2s@2901500 {
+ ports {
+ port@1 {
+ endpoint {
+ bitclock-master;
+ frame-master;
+ };
+ };
+ };
+ };
+ };
+ };
+
serial@3100000 {
compatible = "nvidia,tegra194-hsuart";
reset-names = "serial";
@@ -45,6 +71,40 @@
i2c@31c0000 {
status = "okay";
+ rt5640: audio-codec@1c {
+ compatible = "realtek,rt5640";
+ reg = <0x1c>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA234_MAIN_GPIO(F, 3) GPIO_ACTIVE_HIGH>;
+ clocks = <&bpmp TEGRA234_CLK_AUD_MCLK>;
+ clock-names = "mclk";
+
+ realtek,dmic1-data-pin = <RT5640_DMIC1_DATA_PIN_NONE>;
+ realtek,dmic2-data-pin = <RT5640_DMIC2_DATA_PIN_NONE>;
+ realtek,jack-detect-source = <RT5640_JD_SRC_HDA_HEADER>;
+
+ sound-name-prefix = "CVB-RT";
+
+ port {
+ rt5640_ep: endpoint {
+ remote-endpoint = <&i2s4_dap>;
+ mclk-fs = <256>;
+ };
+ };
+ };
+
+ /* carrier board ID EEPROM */
+ eeprom@55 {
+ compatible = "atmel,24c02";
+ reg = <0x55>;
+
+ label = "system";
+ vcc-supply = <&vdd_1v8_ls>;
+ address-width = <8>;
+ pagesize = <8>;
+ size = <256>;
+ read-only;
+ };
};
i2c@31e0000 {
@@ -60,6 +120,115 @@
status = "okay";
};
+ padctl@3520000 {
+ status = "okay";
+
+ pads {
+ usb2 {
+ lanes {
+ usb2-0 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb2-1 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb2-2 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb2-3 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+ };
+ };
+
+ usb3 {
+ lanes {
+ usb3-0 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb3-1 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb3-2 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+ };
+ };
+ };
+
+ ports {
+ usb2-0 {
+ mode = "otg";
+ usb-role-switch;
+ status = "okay";
+ };
+
+ usb2-1 {
+ mode = "host";
+ status = "okay";
+ };
+
+ usb2-2 {
+ mode = "host";
+ status = "okay";
+ };
+
+ usb2-3 {
+ mode = "host";
+ status = "okay";
+ };
+
+ usb3-0 {
+ nvidia,usb2-companion = <2>;
+ status = "okay";
+ };
+
+ usb3-1 {
+ nvidia,usb2-companion = <0>;
+ status = "okay";
+ };
+
+ usb3-2 {
+ nvidia,usb2-companion = <1>;
+ status = "okay";
+ };
+ };
+ };
+
+ usb@3550000 {
+ status = "okay";
+
+ phys = <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-0}>,
+ <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-1}>;
+ phy-names = "usb2-0", "usb3-0";
+ };
+
+ usb@3610000 {
+ status = "okay";
+
+ phys = <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-0}>,
+ <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-1}>,
+ <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-2}>,
+ <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-3}>,
+ <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-0}>,
+ <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-1}>,
+ <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-2}>;
+ phy-names = "usb2-0", "usb2-1", "usb2-2", "usb2-3",
+ "usb3-0", "usb3-1", "usb3-2";
+ };
+
fuse@3810000 {
status = "okay";
};
@@ -70,6 +239,37 @@
i2c@c250000 {
status = "okay";
+
+ power-sensor@41 {
+ compatible = "ti,ina3221";
+ reg = <0x41>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ input@0 {
+ reg = <0x0>;
+ label = "CVB_ATX_12V";
+ shunt-resistor-micro-ohms = <2000>;
+ };
+
+ input@1 {
+ reg = <0x1>;
+ label = "CVB_ATX_3V3";
+ shunt-resistor-micro-ohms = <2000>;
+ };
+
+ input@2 {
+ reg = <0x2>;
+ label = "CVB_ATX_5V";
+ shunt-resistor-micro-ohms = <2000>;
+ };
+ };
+
+ power-sensor@44 {
+ compatible = "ti,ina219";
+ reg = <0x44>;
+ shunt-resistor = <2000>;
+ };
};
host1x@13e00000 {
@@ -235,4 +435,32 @@
"CVB-RT DMIC1", "CVB-RT MIC",
"CVB-RT DMIC2", "CVB-RT MIC";
};
+
+ vdd_3v3_dp: regulator-vdd-3v3-dp {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_3V3_DP";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vdd_3v3_sys>;
+ gpio = <&gpio TEGRA234_MAIN_GPIO(H, 6) 0>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ vdd_3v3_sys: regulator-vdd-3v3-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_3V3_SYS";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vdd_3v3_wifi: regulator-vdd-3v3-wifi {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_3V3_WIFI";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio TEGRA234_MAIN_GPIO(G, 3) GPIO_ACTIVE_HIGH>;
+ regulator-boot-on;
+ enable-active-high;
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002.dtsi
deleted file mode 100644
index 527f2f3aee3a..000000000000
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002.dtsi
+++ /dev/null
@@ -1,215 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <dt-bindings/sound/rt5640.h>
-
-/ {
- compatible = "nvidia,p3740-0002";
-
- bus@0 {
- aconnect@2900000 {
- ahub@2900800 {
- i2s@2901300 {
- ports {
- port@1 {
- endpoint {
- dai-format = "i2s";
- remote-endpoint = <&rt5640_ep>;
- };
- };
- };
- };
-
- i2s@2901500 {
- ports {
- port@1 {
- endpoint {
- bitclock-master;
- frame-master;
- };
- };
- };
- };
- };
- };
-
- i2c@31c0000 {
- rt5640: audio-codec@1c {
- compatible = "realtek,rt5640";
- reg = <0x1c>;
- interrupt-parent = <&gpio>;
- interrupts = <TEGRA234_MAIN_GPIO(F, 3) GPIO_ACTIVE_HIGH>;
- clocks = <&bpmp TEGRA234_CLK_AUD_MCLK>;
- clock-names = "mclk";
-
- realtek,dmic1-data-pin = <RT5640_DMIC1_DATA_PIN_NONE>;
- realtek,dmic2-data-pin = <RT5640_DMIC2_DATA_PIN_NONE>;
- realtek,jack-detect-source = <RT5640_JD_SRC_HDA_HEADER>;
-
- sound-name-prefix = "CVB-RT";
-
- port {
- rt5640_ep: endpoint {
- remote-endpoint = <&i2s4_dap>;
- mclk-fs = <256>;
- };
- };
- };
-
- /* carrier board ID EEPROM */
- eeprom@55 {
- compatible = "atmel,24c02";
- reg = <0x55>;
-
- label = "system";
- vcc-supply = <&vdd_1v8_ls>;
- address-width = <8>;
- pagesize = <8>;
- size = <256>;
- read-only;
- };
- };
-
- padctl@3520000 {
- vclamp-usb-supply = <&vdd_1v8_ao>;
- avdd-usb-supply = <&vdd_3v3_ao>;
- status = "okay";
-
- pads {
- usb2 {
- lanes {
- usb2-0 {
- nvidia,function = "xusb";
- status = "okay";
- };
-
- usb2-1 {
- nvidia,function = "xusb";
- status = "okay";
- };
-
- usb2-2 {
- nvidia,function = "xusb";
- status = "okay";
- };
-
- usb2-3 {
- nvidia,function = "xusb";
- status = "okay";
- };
- };
- };
-
- usb3 {
- lanes {
- usb3-0 {
- nvidia,function = "xusb";
- status = "okay";
- };
-
- usb3-1 {
- nvidia,function = "xusb";
- status = "okay";
- };
-
- usb3-2 {
- nvidia,function = "xusb";
- status = "okay";
- };
- };
- };
- };
-
- ports {
- usb2-0 {
- mode = "otg";
- usb-role-switch;
- status = "okay";
- vbus-supply = <&vdd_5v0_sys>;
- };
-
- usb2-1 {
- mode = "host";
- status = "okay";
- vbus-supply = <&vdd_5v0_sys>;
- };
-
- usb2-2 {
- mode = "host";
- status = "okay";
- vbus-supply = <&vdd_5v0_sys>;
- };
-
- usb2-3 {
- mode = "host";
- status = "okay";
- vbus-supply = <&vdd_5v0_sys>;
- };
-
- usb3-0 {
- nvidia,usb2-companion = <2>;
- status = "okay";
- };
-
- usb3-1 {
- nvidia,usb2-companion = <0>;
- status = "okay";
- };
-
- usb3-2 {
- nvidia,usb2-companion = <1>;
- status = "okay";
- };
- };
- };
-
- usb@3550000 {
- status = "okay";
-
- phys = <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-0}>,
- <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-1}>;
- phy-names = "usb2-0", "usb3-0";
- };
-
- usb@3610000 {
- status = "okay";
-
- phys = <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-0}>,
- <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-1}>,
- <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-2}>,
- <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-3}>,
- <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-0}>,
- <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-1}>,
- <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-2}>;
- phy-names = "usb2-0", "usb2-1", "usb2-2", "usb2-3",
- "usb3-0", "usb3-1", "usb3-2";
- };
- };
-
- vdd_3v3_dp: regulator-vdd-3v3-dp {
- compatible = "regulator-fixed";
- regulator-name = "VDD_3V3_DP";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- vin-supply = <&vdd_3v3_sys>;
- gpio = <&gpio TEGRA234_MAIN_GPIO(H, 6) 0>;
- enable-active-high;
- regulator-always-on;
- };
-
- vdd_3v3_sys: regulator-vdd-3v3-sys {
- compatible = "regulator-fixed";
- regulator-name = "VDD_3V3_SYS";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
-
- vdd_3v3_wifi: regulator-vdd-3v3-wifi {
- compatible = "regulator-fixed";
- regulator-name = "VDD_3V3_WIFI";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio TEGRA234_MAIN_GPIO(G, 3) GPIO_ACTIVE_HIGH>;
- regulator-boot-on;
- enable-active-high;
- };
-};
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts
index 82a59e33c46c..5dc974bb360c 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts
@@ -7,24 +7,7 @@
compatible = "nvidia,p3768-0000+p3767-0000", "nvidia,p3767-0000", "nvidia,tegra234";
model = "NVIDIA Jetson Orin NX Engineering Reference Developer Kit";
- aliases {
- serial1 = &uarta;
- serial2 = &uarte;
- };
-
bus@0 {
- serial@3100000 {
- compatible = "nvidia,tegra194-hsuart";
- reset-names = "serial";
- status = "okay";
- };
-
- serial@3140000 {
- compatible = "nvidia,tegra194-hsuart";
- reset-names = "serial";
- status = "okay";
- };
-
hda@3510000 {
nvidia,model = "NVIDIA Jetson Orin NX HDA";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi
index 6d64a24fa251..19340d13f789 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi
@@ -9,6 +9,8 @@
aliases {
serial0 = &tcu;
+ serial1 = &uarta;
+ serial2 = &uarte;
};
chosen {
@@ -16,6 +18,18 @@
};
bus@0 {
+ serial@3100000 {
+ compatible = "nvidia,tegra194-hsuart";
+ reset-names = "serial";
+ status = "okay";
+ };
+
+ serial@3140000 {
+ compatible = "nvidia,tegra194-hsuart";
+ reset-names = "serial";
+ status = "okay";
+ };
+
i2c@3160000 {
status = "okay";
@@ -172,6 +186,18 @@
phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3";
};
+ pcie-ep@14160000 {/* C4 - End Point */
+ phys = <&p2u_hsio_4>, <&p2u_hsio_5>, <&p2u_hsio_6>,
+ <&p2u_hsio_7>;
+ phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3";
+ reset-gpios = <&gpio
+ TEGRA234_MAIN_GPIO(L, 1)
+ GPIO_ACTIVE_LOW>;
+ nvidia,refclk-select-gpios = <&gpio_aon
+ TEGRA234_AON_GPIO(AA, 4)
+ GPIO_ACTIVE_HIGH>;
+ };
+
/* C7 - M.2 Key-M */
pcie@141e0000 {
status = "okay";
diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
index f2e2d8d6845b..984c85eab41a 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
@@ -2763,6 +2763,8 @@
interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA234_CLK_UARTA>;
resets = <&bpmp TEGRA234_RESET_UARTA>;
+ dmas = <&gpcdma 8>, <&gpcdma 8>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -4840,6 +4842,37 @@
status = "disabled";
};
+ pcie-ep@14160000 {
+ compatible = "nvidia,tegra234-pcie-ep";
+ power-domains = <&bpmp TEGRA234_POWER_DOMAIN_PCIEX4BB>;
+ reg = <0x00 0x14160000 0x0 0x00020000 /* appl registers (128K) */
+ 0x00 0x36040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
+ 0x00 0x36080000 0x0 0x00040000 /* DBI space (256K) */
+ 0x21 0x40000000 0x3 0x00000000>; /* Address Space (12G) */
+ reg-names = "appl", "atu_dma", "dbi", "addr_space";
+ num-lanes = <4>;
+ clocks = <&bpmp TEGRA234_CLK_PEX0_C4_CORE>;
+ clock-names = "core";
+ resets = <&bpmp TEGRA234_RESET_PEX0_CORE_4_APB>,
+ <&bpmp TEGRA234_RESET_PEX0_CORE_4>;
+ reset-names = "apb", "core";
+
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "intr";
+ nvidia,bpmp = <&bpmp 4>;
+ nvidia,enable-ext-refclk;
+ nvidia,aspm-cmrt-us = <60>;
+ nvidia,aspm-pwr-on-t-us = <20>;
+ nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+ interconnects = <&mc TEGRA234_MEMORY_CLIENT_PCIE4R &emc>,
+ <&mc TEGRA234_MEMORY_CLIENT_PCIE4W &emc>;
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu_niso0 TEGRA234_SID_PCIE4>;
+ dma-coherent;
+ status = "disabled";
+ };
+
pcie@14180000 {
compatible = "nvidia,tegra234-pcie";
power-domains = <&bpmp TEGRA234_POWER_DOMAIN_PCIEX4BA>;
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 0e5c810304fb..ae002c7cf126 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -48,18 +48,24 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-grandmax.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-grandprimelte.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-gt510.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-gt58.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-j3ltetw.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-j5.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-j5x.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-rossa.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-serranove.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-thwc-uf896.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-thwc-ufi001c.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-wingtech-wt86518.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-wingtech-wt86528.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-wingtech-wt88047.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-yiming-uz801v3.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8929-wingtech-wt82918hd.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8939-huawei-kiwi.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8939-longcheer-l9100.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8939-samsung-a7.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8939-sony-xperia-kanuti-tulip.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8939-wingtech-wt82918.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8939-wingtech-wt82918hd.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8953-motorola-potter.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8953-xiaomi-daisy.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8953-xiaomi-mido.dtb
@@ -69,6 +75,7 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8956-sony-xperia-loire-kugo.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8956-sony-xperia-loire-suzu.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8992-lg-bullhead-rev-10.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8992-lg-bullhead-rev-101.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8992-lg-h815.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8992-msft-lumia-octagon-talkman.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8992-xiaomi-libra.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8994-huawei-angler-rev-101.dtb
@@ -261,7 +268,10 @@ dtb-$(CONFIG_ARCH_QCOM) += sm8650-hdk-display-card.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8650-hdk.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8650-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8650-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM) += x1e78100-lenovo-thinkpad-t14s.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e80100-asus-vivobook-s15.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e80100-crd.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e80100-lenovo-yoga-slim7x.dtb
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-microsoft-romulus13.dtb
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-microsoft-romulus15.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e80100-qcp.dtb
diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
index 0a74ed4f72cc..71328b223531 100644
--- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
@@ -7,6 +7,7 @@
#include <dt-bindings/clock/qcom,apss-ipq.h>
#include <dt-bindings/clock/qcom,ipq5332-gcc.h>
+#include <dt-bindings/interconnect/qcom,ipq5332.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
@@ -208,6 +209,7 @@
reg = <0x01800000 0x80000>;
#clock-cells = <1>;
#reset-cells = <1>;
+ #interconnect-cells = <1>;
clocks = <&xo_board>,
<&sleep_clk>,
<0>,
@@ -327,11 +329,9 @@
"dm_hs_phy_irq";
clocks = <&gcc GCC_USB0_MASTER_CLK>,
- <&gcc GCC_SNOC_USB_CLK>,
<&gcc GCC_USB0_SLEEP_CLK>,
<&gcc GCC_USB0_MOCK_UTMI_CLK>;
clock-names = "core",
- "iface",
"sleep",
"mock_utmi";
@@ -342,6 +342,9 @@
#address-cells = <1>;
#size-cells = <1>;
ranges;
+ interconnects = <&gcc MASTER_SNOC_USB &gcc SLAVE_SNOC_USB>,
+ <&gcc MASTER_SNOC_USB &gcc SLAVE_SNOC_USB>;
+ interconnect-names = "usb-ddr", "apps-usb";
status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
index e1e45da7f787..8edd535a188f 100644
--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
@@ -168,7 +168,7 @@
mboxes = <&apcs_glb 0>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-ipq6018";
+ compatible = "qcom,rpm-ipq6018", "qcom,glink-smd-rpm";
qcom,glink-channels = "rpm_requests";
regulators {
diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
index 48dfafea46a7..08a82a5cf667 100644
--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
@@ -181,7 +181,7 @@
mboxes = <&apcs_glb 0>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-ipq9574";
+ compatible = "qcom,rpm-ipq9574", "qcom,glink-smd-rpm";
qcom,glink-channels = "rpm_requests";
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts
index 3b7fdb6797a9..2cc54eaf7202 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts
@@ -125,6 +125,26 @@
};
};
};
+
+ flash-led-controller@53 {
+ compatible = "silergy,sy7802";
+ reg = <0x53>;
+
+ enable-gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&camera_rear_flash_default>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ led-sources = <0>, <1>;
+ };
+ };
};
&blsp_i2c3 {
@@ -278,6 +298,13 @@
bias-disable;
};
+ camera_rear_flash_default: camera-rear-flash-default-state {
+ pins = "gpio9", "gpio16", "gpio117";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
gpio_keys_default: gpio-keys-default-state {
pins = "gpio107";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi
index 81b3e0760154..7a7e99b015d9 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi
@@ -262,6 +262,8 @@
pinctrl-0 = <&tsp_int_default>;
pinctrl-names = "default";
+
+ linux,keycodes = <KEY_APPSELECT KEY_BACK>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts
index 135df1739dbd..5ddb69bf8e78 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts
@@ -47,12 +47,34 @@
constant-charge-voltage-max-microvolt = <4400000>;
};
+&blsp_i2c5 {
+ status = "okay";
+
+ touchscreen@50 {
+ compatible = "imagis,ist3038";
+ reg = <0x50>;
+
+ interrupts-extended = <&tlmm 13 IRQ_TYPE_EDGE_FALLING>;
+
+ touchscreen-size-x = <720>;
+ touchscreen-size-y = <1280>;
+
+ vdd-supply = <&reg_vdd_tsp_a>;
+ vddio-supply = <&pm8916_l6>;
+
+ pinctrl-0 = <&ts_int_default>;
+ pinctrl-names = "default";
+
+ linux,keycodes = <KEY_APPSELECT KEY_BACK>;
+ };
+};
+
&reg_motor_vdd {
gpio = <&tlmm 72 GPIO_ACTIVE_HIGH>;
};
&reg_touch_key {
- status = "disabled";
+ status = "disabled"; /* Using Imagis touch key */
};
&sound {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-gt58.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-gt58.dts
index 579312ed53ce..3d6d9dd3b8a8 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-gt58.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-gt58.dts
@@ -75,6 +75,7 @@
touchscreen-size-x = <768>;
touchscreen-size-y = <1024>;
+ linux,keycodes = <KEY_APPSELECT KEY_BACK>;
vcca-supply = <&reg_vdd_tsp>;
vdd-supply = <&pm8916_l6>;
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-j3-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-j3-common.dtsi
new file mode 100644
index 000000000000..1d74cccc438a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-j3-common.dtsi
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "msm8916-samsung-j5-common.dtsi"
+
+/ {
+ reserved-memory {
+ /delete-node/ tz-apps@85500000;
+
+ /* Additional memory used by Samsung firmware modifications */
+ tz-apps@85800000 {
+ reg = <0x0 0x85800000 0x0 0x800000>;
+ no-map;
+ };
+ };
+
+ reg_vdd_tsp_a: regulator-vdd-tsp-a {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_tsp_a";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ gpio = <&tlmm 16 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&tsp_ldo_en_default>;
+ pinctrl-names = "default";
+ };
+};
+
+&accelerometer {
+ vdd-supply = <&pm8916_l5>;
+ vddio-supply = <&pm8916_l5>;
+
+ mount-matrix = "0", "-1", "0",
+ "1", "0", "0",
+ "0", "0", "-1";
+};
+
+&gpio_hall_sensor {
+ status = "disabled";
+};
+
+&i2c_muic {
+ /* GPIO pins vary depending on model variant */
+};
+
+&i2c_sensors {
+ /* GPIO pins vary depending on model variant */
+};
+
+&touchscreen {
+ vdd-supply = <&reg_vdd_tsp_a>;
+};
+
+&tlmm {
+ tsp_ldo_en_default: tsp-ldo-en-default-state {
+ pins = "gpio16";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-j3ltetw.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-j3ltetw.dts
new file mode 100644
index 000000000000..a26d2fd13c94
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-j3ltetw.dts
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8916-samsung-j3-common.dtsi"
+
+/ {
+ model = "Samsung Galaxy J3 (2016) (SM-J320YZ)";
+ compatible = "samsung,j3ltetw", "qcom,msm8916";
+ chassis-type = "handset";
+};
+
+&i2c_muic {
+ sda-gpios = <&tlmm 0 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&tlmm 1 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+};
+
+&i2c_sensors {
+ /* I2C2 */
+ sda-gpios = <&tlmm 6 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&tlmm 7 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+};
+
+&muic_i2c_default {
+ pins = "gpio0", "gpio1";
+};
+
+&sensors_i2c_default {
+ /* I2C2 */
+ pins = "gpio6", "gpio7";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts
index 1981bb71f6a9..3413b0970c4a 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts
@@ -16,6 +16,26 @@
constant-charge-voltage-max-microvolt = <4400000>;
};
+&blsp_i2c5 {
+ touchscreen@50 {
+ compatible = "imagis,ist3038";
+ reg = <0x50>;
+
+ interrupts-extended = <&tlmm 13 IRQ_TYPE_EDGE_FALLING>;
+
+ touchscreen-size-x = <480>;
+ touchscreen-size-y = <800>;
+
+ vdd-supply = <&reg_vdd_tsp_a>;
+ vddio-supply = <&pm8916_l6>;
+
+ pinctrl-0 = <&tsp_int_default>;
+ pinctrl-names = "default";
+
+ linux,keycodes = <KEY_APPSELECT KEY_BACK>;
+ };
+};
+
&mpss_mem {
/* Firmware for rossa needs more space */
reg = <0x0 0x86800000 0x0 0x5800000>;
diff --git a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86518.dts b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86518.dts
new file mode 100644
index 000000000000..3cfa80e38a9e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86518.dts
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8916-wingtech-wt865x8.dtsi"
+
+/ {
+ model = "Lenovo A6000 (Wingtech WT86518)";
+ compatible = "wingtech,wt86518", "qcom,msm8916";
+ chassis-type = "handset";
+
+ speaker_amp: audio-amplifier {
+ compatible = "awinic,aw8738";
+
+ pinctrl-0 = <&spk_ext_pa_default>;
+ pinctrl-names = "default";
+
+ mode-gpios = <&tlmm 119 GPIO_ACTIVE_HIGH>;
+ sound-name-prefix = "Speaker Amp";
+ awinic,mode = <1>;
+ };
+};
+
+&blsp_i2c2 {
+ accelerometer@e {
+ compatible = "kionix,kxcj91008";
+ reg = <0xe>;
+
+ vdd-supply = <&pm8916_l6>;
+ vddio-supply = <&pm8916_l6>;
+
+ mount-matrix = "0", "-1", "0",
+ "-1", "0", "0",
+ "0", "0", "1";
+ };
+};
+
+&headphones_switch {
+ VCC-supply = <&pm8916_l17>;
+};
+
+&pm8916_bms {
+ power-supplies = <&pm8916_charger>;
+};
+
+&pm8916_charger {
+ qcom,fast-charge-safe-current = <900000>;
+ qcom,fast-charge-safe-voltage = <4300000>;
+
+ monitored-battery = <&battery>;
+
+ status = "okay";
+};
+
+&sound {
+ model = "wt88047";
+ widgets = "Speaker", "Speaker",
+ "Headphone", "Headphones";
+ pin-switches = "Speaker", "Headphones";
+ audio-routing = "Speaker", "Speaker Amp OUT",
+ "Speaker Amp IN", "HPH_R",
+ "Headphones", "Headphones Switch OUTL",
+ "Headphones", "Headphones Switch OUTR",
+ "Headphones Switch INL", "HPH_L",
+ "Headphones Switch INR", "HPH_R",
+ "AMIC1", "MIC BIAS Internal1",
+ "AMIC2", "MIC BIAS Internal2";
+ aux-devs = <&speaker_amp>, <&headphones_switch>;
+};
+
+&usb {
+ dr_mode = "peripheral";
+ extcon = <&pm8916_charger>;
+};
+
+&usb_hs_phy {
+ extcon = <&pm8916_charger>;
+};
+
+&tlmm {
+ spk_ext_pa_default: spk-ext-pa-default-state {
+ pins = "gpio119";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86528.dts b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86528.dts
new file mode 100644
index 000000000000..ec2c4dcd3ead
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86528.dts
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8916-wingtech-wt865x8.dtsi"
+
+/ {
+ model = "Lenovo A6010 (Wingtech WT86528)";
+ compatible = "wingtech,wt86528", "qcom,msm8916";
+ chassis-type = "handset";
+
+ /* left AW8736 */
+ speaker_amp_left: audio-amplifier-left {
+ compatible = "awinic,aw8738";
+
+ pinctrl-0 = <&spk_ext_pa_left_default>;
+ pinctrl-names = "default";
+
+ mode-gpios = <&tlmm 119 GPIO_ACTIVE_HIGH>;
+ sound-name-prefix = "Speaker Amp L";
+ awinic,mode = <3>;
+ };
+
+ /* right AW8736 */
+ speaker_amp_right: audio-amplifier-right {
+ compatible = "awinic,aw8738";
+
+ pinctrl-0 = <&spk_ext_pa_right_default>;
+ pinctrl-names = "default";
+
+ mode-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>;
+ sound-name-prefix = "Speaker Amp R";
+ awinic,mode = <3>;
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ pinctrl-0 = <&gpio_leds_default>;
+ pinctrl-names = "default";
+
+ led-0 {
+ gpios = <&tlmm 16 GPIO_ACTIVE_LOW>;
+ label = "red";
+ default-state = "off";
+ retain-state-suspended;
+ };
+
+ led-1 {
+ gpios = <&tlmm 17 GPIO_ACTIVE_HIGH>;
+ label = "green";
+ default-state = "off";
+ retain-state-suspended;
+ };
+ };
+
+ usb_id: usb-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpios = <&tlmm 110 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&usb_id_default>;
+ pinctrl-names = "default";
+ };
+};
+
+&blsp_i2c2 {
+ magnetometer@c {
+ compatible = "asahi-kasei,ak09911";
+ reg = <0x0c>;
+
+ vdd-supply = <&pm8916_l17>;
+ vid-supply = <&pm8916_l6>;
+ };
+
+ imu@68 {
+ compatible = "invensense,mpu6880";
+ reg = <0x68>;
+
+ interrupts-extended = <&tlmm 115 IRQ_TYPE_EDGE_RISING>;
+
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l6>;
+
+ pinctrl-0 = <&imu_default>;
+ pinctrl-names = "default";
+
+ mount-matrix = "1", "0", "0",
+ "0", "-1", "0",
+ "0", "0", "1";
+ };
+};
+
+&pm8916_codec {
+ qcom,micbias1-ext-cap;
+};
+
+&sound {
+ model = "wt86528";
+ widgets = "Speaker", "Speaker",
+ "Headphone", "Headphones";
+ pin-switches = "Speaker", "Headphones";
+ audio-routing = "Speaker", "Speaker Amp L OUT",
+ "Speaker", "Speaker Amp R OUT",
+ "Speaker Amp L IN", "HPH_L",
+ "Speaker Amp R IN", "HPH_R",
+ "Headphones", "Headphones Switch OUTL",
+ "Headphones", "Headphones Switch OUTR",
+ "Headphones Switch INL", "HPH_L",
+ "Headphones Switch INR", "HPH_R",
+ "AMIC1", "MIC BIAS External1",
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS External1";
+ aux-devs = <&speaker_amp_left>, <&speaker_amp_right>, <&headphones_switch>;
+};
+
+&usb {
+ extcon = <&usb_id>, <&usb_id>;
+};
+
+&usb_hs_phy {
+ extcon = <&usb_id>;
+};
+
+&tlmm {
+ gpio_leds_default: gpio-leds-default-state {
+ pins = "gpio16", "gpio17";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ imu_default: imu-default-state {
+ pins = "gpio115";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ spk_ext_pa_left_default: spk-ext-pa-left-default-state {
+ pins = "gpio119";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ spk_ext_pa_right_default: spk-ext-pa-right-default-state {
+ pins = "gpio121";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb_id_default: usb-id-default-state {
+ pins = "gpio110";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt865x8.dtsi b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt865x8.dtsi
new file mode 100644
index 000000000000..1a7c347dc3f0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt865x8.dtsi
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "msm8916-pm8916.dtsi"
+#include "msm8916-modem-qdsp6.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
+
+/ {
+ aliases {
+ mmc0 = &sdhc_1; /* eMMC */
+ mmc1 = &sdhc_2; /* SD card */
+ serial0 = &blsp_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ headphones_switch: audio-switch {
+ compatible = "simple-audio-amplifier";
+
+ pinctrl-0 = <&headphones_switch_default>;
+ pinctrl-names = "default";
+
+ enable-gpios = <&tlmm 120 GPIO_ACTIVE_HIGH>;
+ sound-name-prefix = "Headphones Switch";
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pm8916_pwm 0 100000>;
+
+ brightness-levels = <0 255>;
+ num-interpolated-steps = <255>;
+ default-brightness-level = <255>;
+ };
+
+ battery: battery {
+ compatible = "simple-battery";
+ voltage-min-design-microvolt = <3400000>;
+ voltage-max-design-microvolt = <4350000>;
+ energy-full-design-microwatt-hours = <8740000>;
+ charge-full-design-microamp-hours = <2300000>;
+
+ ocv-capacity-celsius = <25>;
+ ocv-capacity-table-0 = <4328000 100>, <4266000 95>, <4208000 90>,
+ <4154000 85>, <4102000 80>, <4062000 75>, <3992000 70>,
+ <3960000 65>, <3914000 60>, <3870000 55>, <3840000 50>,
+ <3818000 45>, <3800000 40>, <3784000 35>, <3770000 30>,
+ <3756000 25>, <3736000 20>, <3714000 16>, <3696000 13>,
+ <3690000 11>, <3689000 10>, <3688000 9>, <3686000 8>,
+ <3682000 7>, <3670000 6>, <3639000 5>, <3592000 4>,
+ <3530000 3>, <3448000 2>, <3320000 1>, <3000000 0>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&gpio_keys_default>;
+ pinctrl-names = "default";
+
+ label = "GPIO Buttons";
+
+ volume-up-button {
+ label = "Volume Up";
+ gpios = <&tlmm 107 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
+};
+
+&blsp_i2c5 {
+ status = "okay";
+
+ touchscreen@38 {
+ compatible = "edt,edt-ft5306";
+ reg = <0x38>;
+
+ interrupts-extended = <&tlmm 13 IRQ_TYPE_EDGE_FALLING>;
+
+ vcc-supply = <&pm8916_l17>;
+ iovcc-supply = <&pm8916_l6>;
+
+ reset-gpios = <&tlmm 12 GPIO_ACTIVE_LOW>;
+
+ touchscreen-size-x = <720>;
+ touchscreen-size-y = <1280>;
+
+ pinctrl-0 = <&touchscreen_default>;
+ pinctrl-names = "default";
+ };
+};
+
+&blsp_uart2 {
+ status = "okay";
+};
+
+&mpss_mem {
+ reg = <0x0 0x86800000 0x0 0x5500000>;
+};
+
+&pm8916_bms {
+ monitored-battery = <&battery>;
+ status = "okay";
+};
+
+&pm8916_codec {
+ qcom,micbias-lvl = <2800>;
+ qcom,mbhc-vthreshold-low = <75 150 237 450 500>;
+ qcom,mbhc-vthreshold-high = <75 150 237 450 500>;
+ qcom,hphl-jack-type-normally-open;
+};
+
+&pm8916_pwm {
+ pinctrl-0 = <&pwm_out>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&pm8916_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+ status = "okay";
+};
+
+&pm8916_rpm_regulators {
+ pm8916_l17: l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+};
+
+&pm8916_vib {
+ status = "okay";
+};
+
+&sdhc_1 {
+ status = "okay";
+};
+
+&sdhc_2 {
+ pinctrl-0 = <&sdc2_default>;
+ pinctrl-1 = <&sdc2_sleep>;
+ pinctrl-names = "default", "sleep";
+
+ non-removable;
+
+ status = "okay";
+};
+
+&usb {
+ status = "okay";
+};
+
+&venus {
+ status = "okay";
+};
+
+&venus_mem {
+ status = "okay";
+};
+
+&wcnss {
+ status = "okay";
+};
+
+&wcnss_iris {
+ compatible = "qcom,wcn3620";
+};
+
+&wcnss_mem {
+ status = "okay";
+};
+
+&tlmm {
+ gpio_keys_default: gpio-keys-default-state {
+ pins = "gpio107";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ headphones_switch_default: headphones-switch-default-state {
+ pins = "gpio120";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ touchscreen_default: touchscreen-default-state {
+ touchscreen-pins {
+ pins = "gpio13";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ reset-pins {
+ pins = "gpio12";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+};
+
+&pm8916_mpps {
+ pwm_out: mpp4-state {
+ pins = "mpp4";
+ function = "digital";
+ power-source = <PM8916_MPP_VPH>;
+ output-low;
+ qcom,dtest = <1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 7383bcc603ab..0ee44706b70b 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -312,7 +312,7 @@
qcom,smd-edge = <15>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-msm8916";
+ compatible = "qcom,rpm-msm8916", "qcom,smd-rpm";
qcom,smd-channels = "rpm_requests";
rpmcc: clock-controller {
diff --git a/arch/arm64/boot/dts/qcom/msm8929-pm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8929-pm8916.dtsi
new file mode 100644
index 000000000000..c2bf25997e9b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8929-pm8916.dtsi
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * msm8929-pm8916.dtsi describes common properties (e.g. regulator connections)
+ * that apply to most devices that make use of the MSM8929 SoC and PM8916 PMIC.
+ * Many regulators have a fixed purpose in the original reference design and
+ * were rarely re-used for different purposes. Devices that deviate from the
+ * typical reference design should not make use of this include and instead add
+ * the necessary properties in the board-specific device tree.
+ */
+
+#include "msm8929.dtsi"
+#include "pm8916.dtsi"
+
+&mdss_dsi0 {
+ vdda-supply = <&pm8916_l2>;
+ vddio-supply = <&pm8916_l6>;
+};
+
+&mdss_dsi0_phy {
+ vddio-supply = <&pm8916_l6>;
+};
+
+&mdss_dsi1 {
+ vdda-supply = <&pm8916_l2>;
+ vddio-supply = <&pm8916_l6>;
+};
+
+&mdss_dsi1_phy {
+ vddio-supply = <&pm8916_l6>;
+};
+
+&mpss {
+ pll-supply = <&pm8916_l7>;
+};
+
+&pm8916_codec {
+ vdd-cdc-io-supply = <&pm8916_l5>;
+ vdd-cdc-tx-rx-cx-supply = <&pm8916_l5>;
+ vdd-micbias-supply = <&pm8916_l13>;
+};
+
+&rpm_requests {
+ pm8916_rpm_regulators: regulators {
+ compatible = "qcom,rpm-pm8916-regulators";
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+ vdd_l7-supply = <&pm8916_s4>;
+
+ /* pm8916_s1 is managed by rpmpd (MSM8939_VDDMDCX) */
+ /* pm8916_s2 is managed by rpmpd (MSM8939_VDDCX) */
+ pm8916_s3: s3 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on; /* Needed for L2 */
+ };
+ pm8916_s4: s4 {
+ regulator-min-microvolt = <1850000>;
+ regulator-max-microvolt = <2150000>;
+ regulator-always-on; /* Needed for L5/L7 */
+ };
+
+ /*
+ * Some of the regulators are unused or managed by another
+ * processor (e.g. the modem). We should still define nodes for
+ * them to ensure the vote from the application processor can be
+ * dropped in case the regulators are already on during boot.
+ *
+ * The labels for these nodes are omitted on purpose because
+ * boards should configure a proper voltage before using them.
+ */
+ l1 {};
+
+ pm8916_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on; /* Needed for LPDDR RAM */
+ };
+
+ /* pm8916_l3 is managed by rpmpd (MSM8939_VDDMX) */
+
+ l4 {};
+
+ pm8916_l5: l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on; /* Needed for most digital I/O */
+ };
+
+ pm8916_l6: l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8916_l7: l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on; /* Needed for CPU PLL */
+ };
+
+ pm8916_l8: l8 {
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ };
+
+ pm8916_l9: l9 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l10 {};
+
+ pm8916_l11: l11 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ regulator-system-load = <200000>;
+ };
+
+ pm8916_l12: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8916_l13: l13 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+
+ l14 {};
+ l15 {};
+ l16 {};
+ l17 {};
+ l18 {};
+ };
+};
+
+&sdhc_1 {
+ vmmc-supply = <&pm8916_l8>;
+ vqmmc-supply = <&pm8916_l5>;
+};
+
+&sdhc_2 {
+ vmmc-supply = <&pm8916_l11>;
+ vqmmc-supply = <&pm8916_l12>;
+};
+
+&usb_hs_phy {
+ v1p8-supply = <&pm8916_l7>;
+ v3p3-supply = <&pm8916_l13>;
+};
+
+&wcnss {
+ vddpx-supply = <&pm8916_l7>;
+};
+
+&wcnss_iris {
+ vddxo-supply = <&pm8916_l7>;
+ vddrfa-supply = <&pm8916_s3>;
+ vddpa-supply = <&pm8916_l9>;
+ vdddig-supply = <&pm8916_l5>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8929-wingtech-wt82918hd.dts b/arch/arm64/boot/dts/qcom/msm8929-wingtech-wt82918hd.dts
new file mode 100644
index 000000000000..8feecffb16bf
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8929-wingtech-wt82918hd.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8929-pm8916.dtsi"
+#include "msm8939-wingtech-wt82918.dtsi"
+
+/ {
+ model = "Lenovo Vibe K5 (HD) (Wingtech WT82918)";
+ compatible = "wingtech,wt82918hd", "qcom,msm8929";
+ chassis-type = "handset";
+};
+
+&touchscreen {
+ touchscreen-size-x = <720>;
+ touchscreen-size-y = <1280>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8929.dtsi b/arch/arm64/boot/dts/qcom/msm8929.dtsi
new file mode 100644
index 000000000000..ef7bb1ced954
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8929.dtsi
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "msm8939.dtsi"
+
+&opp_table {
+ /delete-node/ opp-550000000;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts b/arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts
index e3404c4455cf..b845da4fa23e 100644
--- a/arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts
+++ b/arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts
@@ -159,6 +159,26 @@
};
};
};
+
+ flash-led-controller@53 {
+ compatible = "silergy,sy7802";
+ reg = <0x53>;
+
+ enable-gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&camera_rear_flash_default>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ led-sources = <0>, <1>;
+ };
+ };
};
&blsp_i2c3 {
@@ -318,6 +338,13 @@
bias-disable;
};
+ camera_rear_flash_default: camera-rear-flash-default-state {
+ pins = "gpio9", "gpio16", "gpio51";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
gpio_hall_sensor_default: gpio-hall-sensor-default-state {
pins = "gpio20";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts b/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts
index 91acdb160227..ceba6e73b211 100644
--- a/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts
+++ b/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts
@@ -198,7 +198,7 @@
};
};
- pwm_vibrator: pwm-vibrator {
+ pwm_vibrator: pwm {
compatible = "clk-pwm";
#pwm-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dts b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dts
new file mode 100644
index 000000000000..aa6b699aa2a1
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8939-pm8916.dtsi"
+#include "msm8939-wingtech-wt82918.dtsi"
+
+/ {
+ model = "Lenovo Vibe K5 (Wingtech WT82918)";
+ compatible = "wingtech,wt82918", "qcom,msm8939";
+ chassis-type = "handset";
+};
+
+&touchscreen {
+ touchscreen-size-x = <1080>;
+ touchscreen-size-y = <1920>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dtsi b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dtsi
new file mode 100644
index 000000000000..800e0747a2f7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dtsi
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "msm8916-modem-qdsp6.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
+
+/ {
+ aliases {
+ mmc0 = &sdhc_1; /* eMMC */
+ mmc1 = &sdhc_2; /* SD card */
+ serial0 = &blsp_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pm8916_pwm 0 100000>;
+ brightness-levels = <0 255>;
+ num-interpolated-steps = <255>;
+ default-brightness-level = <128>;
+ };
+
+ flash-led-controller {
+ compatible = "sgmicro,sgm3140";
+ enable-gpios = <&tlmm 31 GPIO_ACTIVE_HIGH>;
+ flash-gpios = <&tlmm 32 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&camera_front_flash_default>;
+ pinctrl-names = "default";
+
+ flash_led: led {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&gpio_keys_default>;
+ pinctrl-names = "default";
+
+ label = "GPIO Buttons";
+
+ button-volume-up {
+ label = "Volume Up";
+ gpios = <&tlmm 107 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ pinctrl-0 = <&gpio_leds_default>;
+ pinctrl-names = "default";
+
+ led-0 {
+ gpios = <&tlmm 69 GPIO_ACTIVE_LOW>;
+ function = LED_FUNCTION_CHARGING;
+ color = <LED_COLOR_ID_RED>;
+ default-state = "off";
+ retain-state-suspended;
+ };
+
+ led-1 {
+ gpios = <&tlmm 36 GPIO_ACTIVE_HIGH>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
+ retain-state-suspended;
+ };
+ };
+
+ usb_id: usb-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpios = <&tlmm 110 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&usb_id_default>;
+ pinctrl-names = "default";
+ };
+};
+
+&blsp_i2c2 {
+ status = "okay";
+
+ accelerometer@68 {
+ compatible = "invensense,icm20608";
+ reg = <0x68>;
+
+ interrupts-extended = <&tlmm 115 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-0 = <&accelerometer_default>;
+ pinctrl-names = "default";
+
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l6>;
+
+ mount-matrix = "-1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "1";
+ };
+};
+
+&blsp_i2c5 {
+ status = "okay";
+
+ touchscreen: touchscreen@38 {
+ compatible = "edt,edt-ft5306";
+ reg = <0x38>;
+
+ interrupts-extended = <&tlmm 13 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&touchscreen_default>;
+ pinctrl-names = "default";
+
+ vcc-supply = <&pm8916_l17>;
+ iovcc-supply = <&pm8916_l6>;
+
+ reset-gpios = <&tlmm 12 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&blsp_uart2 {
+ status = "okay";
+};
+
+&mpss_mem {
+ reg = <0x0 0x86800000 0x0 0x5500000>;
+};
+
+&pm8916_pwm {
+ pinctrl-0 = <&pwm_out>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&pm8916_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+ status = "okay";
+};
+
+&pm8916_rpm_regulators {
+ pm8916_l17: l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+};
+
+&pm8916_vib {
+ status = "okay";
+};
+
+&sdhc_1 {
+ status = "okay";
+};
+
+&sdhc_2 {
+ pinctrl-0 = <&sdc2_default>;
+ pinctrl-1 = <&sdc2_sleep>;
+ pinctrl-names = "default", "sleep";
+ non-removable;
+ status = "okay";
+};
+
+&usb {
+ extcon = <&usb_id>, <&usb_id>;
+ status = "okay";
+};
+
+&usb_hs_phy {
+ extcon = <&usb_id>;
+};
+
+&wcnss {
+ status = "okay";
+};
+
+&wcnss_iris {
+ compatible = "qcom,wcn3620";
+};
+
+&wcnss_mem {
+ status = "okay";
+};
+
+&tlmm {
+ accelerometer_default: accelerometer-default-state {
+ pins = "gpio115";
+ function = "gpio";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+
+ camera_front_flash_default: camera-front-flash-default-state {
+ pins = "gpio31", "gpio32";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ gpio_keys_default: gpio-keys-default-state {
+ pins = "gpio107";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ gpio_leds_default: gpio-leds-default-state {
+ pins = "gpio36", "gpio69";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ touchscreen_default: touchscreen-default-state {
+ reset-pins {
+ pins = "gpio12";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ touchscreen-pins {
+ pins = "gpio13";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ usb_id_default: usb-id-default-state {
+ pins = "gpio110";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+};
+
+&pm8916_mpps {
+ pwm_out: mpp4-state {
+ pins = "mpp4";
+ function = "digital";
+ power-source = <PM8916_MPP_VPH>;
+ output-low;
+ qcom,dtest = <1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918hd.dts b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918hd.dts
new file mode 100644
index 000000000000..59414db42508
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918hd.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8939-pm8916.dtsi"
+#include "msm8939-wingtech-wt82918.dtsi"
+
+/ {
+ model = "Lenovo Vibe K5 (HD) (Wingtech WT82918)";
+ compatible = "wingtech,wt82918hdhw39", "qcom,msm8939";
+ chassis-type = "handset";
+};
+
+&touchscreen {
+ touchscreen-size-x = <720>;
+ touchscreen-size-y = <1280>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
index 46d9480cd464..28634789a8a9 100644
--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
@@ -252,7 +252,7 @@
qcom,smd-edge = <15>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-msm8936";
+ compatible = "qcom,rpm-msm8936", "qcom,smd-rpm";
qcom,smd-channels = "rpm_requests";
rpmcc: clock-controller {
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index a4bfb624fb8a..d20fd3d7c46e 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -199,7 +199,7 @@
qcom,smd-edge = <15>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-msm8953";
+ compatible = "qcom,rpm-msm8953", "qcom,smd-rpm";
qcom,smd-channels = "rpm_requests";
rpmcc: clock-controller {
diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi
index d62dcb76fa48..06af6e5ec578 100644
--- a/arch/arm64/boot/dts/qcom/msm8976.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi
@@ -247,7 +247,7 @@
qcom,smd-edge = <15>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-msm8976";
+ compatible = "qcom,rpm-msm8976", "qcom,smd-rpm";
qcom,smd-channels = "rpm_requests";
rpmcc: clock-controller {
@@ -663,6 +663,11 @@
#thermal-sensor-cells = <1>;
};
+ restart@4ab000 {
+ compatible = "qcom,pshold";
+ reg = <0x004ab000 0x4>;
+ };
+
tlmm: pinctrl@1000000 {
compatible = "qcom,msm8976-pinctrl";
reg = <0x01000000 0x300000>;
diff --git a/arch/arm64/boot/dts/qcom/msm8992-lg-h815.dts b/arch/arm64/boot/dts/qcom/msm8992-lg-h815.dts
new file mode 100644
index 000000000000..38b305816d2f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8992-lg-h815.dts
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * MSM8992 LG G4 (h815) device tree.
+ *
+ * Copyright (c) 2024, Alexander Reimelt <alexander.reimelt@posteo.de>
+ */
+
+/dts-v1/;
+
+#include "msm8992.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
+#include <dt-bindings/leds/common.h>
+
+/* different mapping */
+/delete-node/ &cont_splash_mem;
+
+/* disabled downstream */
+/delete-node/ &dfps_data_mem;
+
+/ {
+ model = "LG G4 (H815)";
+ compatible = "lg,h815", "qcom,msm8992";
+ chassis-type = "handset";
+
+ qcom,msm-id = <0xfb 0x0>;
+ qcom,pmic-id = <0x10009 0x1000a 0x0 0x0>;
+ qcom,board-id = <0xb64 0x0>;
+
+ /* psci is broken */
+ /delete-node/ psci;
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ spin-table@6000000 {
+ reg = <0x0 0x06000000 0x0 0x00001000>;
+ no-map;
+ };
+
+ ramoops@ff00000 {
+ compatible = "ramoops";
+ reg = <0x0 0x0ff00000 0x0 0x00100000>;
+ console-size = <0x20000>;
+ pmsg-size = <0x20000>;
+ record-size = <0x10000>;
+ ecc-size = <0x10>;
+ };
+
+ cont_splash_mem: fb@3400000 {
+ reg = <0x0 0x03400000 0x0 0x00c00000>;
+ no-map;
+ };
+
+ crash_fb_mem: crash-fb@4000000 {
+ reg = <0x0 0x04000000 0x0 0x00c00000>;
+ no-map;
+ };
+ };
+
+ gpio-hall-sensor {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&hall_sensor_default>;
+ pinctrl-names = "default";
+
+ label = "Hall Effect Sensor";
+
+ event-hall-sensor {
+ gpios = <&tlmm 75 GPIO_ACTIVE_LOW>;
+ label = "hall effect sensor";
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ linux,can-disable;
+ wakeup-source;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ key-vol-up {
+ label = "volume up";
+ gpios = <&pm8994_gpios 3 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ wakeup-source;
+ debounce-interval = <15>;
+ };
+ };
+};
+
+&CPU0 {
+ enable-method = "spin-table";
+};
+
+&CPU1 {
+ enable-method = "spin-table";
+};
+
+&CPU2 {
+ enable-method = "spin-table";
+};
+
+&CPU3 {
+ enable-method = "spin-table";
+};
+
+&CPU4 {
+ enable-method = "spin-table";
+};
+
+&CPU5 {
+ enable-method = "spin-table";
+};
+
+&pm8994_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+ status = "okay";
+};
+
+&rpm_requests {
+ regulators-0 {
+ compatible = "qcom,rpm-pm8994-regulators";
+
+ vdd_s3-supply = <&vph_pwr>;
+ vdd_s4-supply = <&vph_pwr>;
+ vdd_s5-supply = <&vph_pwr>;
+ vdd_s7-supply = <&vph_pwr>;
+ vdd_l1-supply = <&pmi8994_s1>;
+ vdd_l2_26_28-supply = <&pm8994_s3>;
+ vdd_l3_11-supply = <&pm8994_s3>;
+ vdd_l4_27_31-supply = <&pm8994_s3>;
+ vdd_l5_7-supply = <&pm8994_s5>;
+ vdd_l6_12_32-supply = <&pm8994_s5>;
+ vdd_l8_16_30-supply = <&vph_pwr>;
+ vdd_l9_10_18_22-supply = <&pmi8994_bby>;
+ vdd_l13_19_23_24-supply = <&pmi8994_bby>;
+ vdd_l14_15-supply = <&pm8994_s5>;
+ vdd_l17_29-supply = <&pmi8994_bby>;
+ vdd_l20_21-supply = <&pmi8994_bby>;
+ vdd_l25-supply = <&pm8994_s5>;
+ vdd_lvs1_2-supply = <&pm8994_s4>;
+
+ pm8994_s3: s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ /* sdhc1 vqmmc and bcm */
+ pm8994_s4: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-system-load = <325000>;
+ regulator-allow-set-load;
+ };
+
+ pm8994_s5: s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+
+ /* sdhc2 vqmmc */
+ pm8994_l13: l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-system-load = <22000>;
+ regulator-allow-set-load;
+ };
+
+ /* sdhc1 vmmc */
+ pm8994_l20: l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-system-load = <570000>;
+ regulator-allow-set-load;
+ };
+
+ /* sdhc2 vmmc */
+ pm8994_l21: l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-system-load = <800000>;
+ regulator-allow-set-load;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,rpm-pmi8994-regulators";
+
+ vdd_s1-supply = <&vph_pwr>;
+ vdd_bst_byp-supply = <&vph_pwr>;
+
+ pmi8994_s1: s1 {
+ regulator-min-microvolt = <1025000>;
+ regulator-max-microvolt = <1025000>;
+ };
+
+ /* S2 & S3 - VDD_GFX */
+
+ pmi8994_bby: boost-bypass {
+ regulator-min-microvolt = <3150000>;
+ regulator-max-microvolt = <3600000>;
+ };
+ };
+};
+
+&sdhc1 {
+ mmc-hs400-1_8v;
+ vmmc-supply = <&pm8994_l20>;
+ vqmmc-supply = <&pm8994_s4>;
+ non-removable;
+ status = "okay";
+};
+
+&sdhc2 {
+ vmmc-supply = <&pm8994_l21>;
+ vqmmc-supply = <&pm8994_l13>;
+ cd-gpios = <&pm8994_gpios 8 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&tlmm {
+ hall_sensor_default: hall-sensor-default-state {
+ pins = "gpio75";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
index 917fa246857d..fc2a7f13f690 100644
--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
@@ -188,7 +188,7 @@
qcom,remote-pid = <6>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-msm8994";
+ compatible = "qcom,rpm-msm8994", "qcom,smd-rpm";
qcom,smd-channels = "rpm_requests";
rpmcc: clock-controller {
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 0fd2b1b944a5..e5966724f37c 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -472,7 +472,7 @@
mboxes = <&apcs_glb 0>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-msm8996";
+ compatible = "qcom,rpm-msm8996", "qcom,glink-smd-rpm";
qcom,glink-channels = "rpm_requests";
rpmcc: clock-controller {
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index 7f44807b1b97..9aa9c5cee355 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -352,7 +352,7 @@
mboxes = <&apcs_glb 0>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-msm8998";
+ compatible = "qcom,rpm-msm8998", "qcom,glink-smd-rpm";
qcom,glink-channels = "rpm_requests";
rpmcc: clock-controller {
@@ -1586,6 +1586,33 @@
"gpll0";
};
+ lpass_q6_smmu: iommu@5100000 {
+ compatible = "qcom,msm8998-smmu-v2", "qcom,smmu-v2";
+ reg = <0x05100000 0x40000>;
+ clocks = <&gcc HLOS1_VOTE_LPASS_ADSP_SMMU_CLK>;
+ clock-names = "bus";
+
+ #global-interrupts = <0>;
+ #iommu-cells = <1>;
+ interrupts =
+ <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+
+ power-domains = <&gcc LPASS_ADSP_GDSC>;
+ status = "disabled";
+ };
+
remoteproc_slpi: remoteproc@5800000 {
compatible = "qcom,msm8998-slpi-pas";
reg = <0x05800000 0x4040>;
diff --git a/arch/arm64/boot/dts/qcom/pm8950.dtsi b/arch/arm64/boot/dts/qcom/pm8950.dtsi
index f03095779de0..ed72c6101813 100644
--- a/arch/arm64/boot/dts/qcom/pm8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8950.dtsi
@@ -18,7 +18,7 @@
#address-cells = <1>;
#size-cells = <0>;
- pon@800 {
+ pm8950_pon: pon@800 {
compatible = "qcom,pm8916-pon";
reg = <0x0800>;
mode-bootloader = <0x2>;
@@ -31,6 +31,14 @@
bias-pull-up;
linux,code = <KEY_POWER>;
};
+
+ pm8950_resin: resin {
+ compatible = "qcom,pm8941-resin";
+ interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+ status = "disabled";
+ };
};
pm8950_temp: temp-alarm@2400 {
diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
index b4822cb17a37..4aff437263a2 100644
--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
@@ -84,9 +84,8 @@
#address-cells = <1>;
#size-cells = <0>;
- pmi8950_pwm: pwm@b000 {
+ pmi8950_pwm: pwm {
compatible = "qcom,pmi8950-pwm";
- reg = <0xb000 0x100>;
#pwm-cells = <2>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
index 36d6a1fb553a..9ee59e6d2cdb 100644
--- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
@@ -57,8 +57,11 @@
interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
<0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "ovp", "short";
+ label = "backlight";
+
qcom,cabc;
qcom,external-pfet;
+
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
index 8f3be4c75db3..79bc42ffb6a1 100644
--- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
@@ -215,7 +215,7 @@
mboxes = <&apcs_glb 0>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-qcm2290";
+ compatible = "qcom,rpm-qcm2290", "qcom,glink-smd-rpm";
qcom,glink-channels = "rpm_requests";
rpmcc: clock-controller {
diff --git a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
index a0668f767e4b..84c45419cb8d 100644
--- a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
+++ b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
@@ -641,6 +641,21 @@
status = "okay";
};
+&sdc2_clk {
+ bias-disable;
+ drive-strength = <16>;
+};
+
+&sdc2_cmd {
+ bias-pull-up;
+ drive-strength = <10>;
+};
+
+&sdc2_data {
+ bias-pull-up;
+ drive-strength = <10>;
+};
+
&sdhc_1 {
non-removable;
no-sd;
@@ -652,9 +667,27 @@
status = "okay";
};
+&sdhc_2 {
+ status = "okay";
+
+ pinctrl-0 = <&sdc2_clk>, <&sdc2_cmd>, <&sdc2_data>, <&sd_cd>;
+ pinctrl-1 = <&sdc2_clk_sleep>, <&sdc2_cmd_sleep>, <&sdc2_data_sleep>, <&sd_cd>;
+
+ vmmc-supply = <&vreg_l9c_2p96>;
+ vqmmc-supply = <&vreg_l6c_2p96>;
+
+ cd-gpios = <&tlmm 91 GPIO_ACTIVE_LOW>;
+};
+
&tlmm {
gpio-reserved-ranges = <32 2>, /* ADSP */
<48 4>; /* NFC */
+
+ sd_cd: sd-cd-state {
+ pins = "gpio91";
+ function = "gpio";
+ bias-pull-up;
+ };
};
&uart5 {
diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
index c291bbed6073..cddc16bac0ce 100644
--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
@@ -177,7 +177,7 @@
mboxes = <&apcs_glb 0>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-qcs404";
+ compatible = "qcom,rpm-qcs404", "qcom,glink-smd-rpm";
qcom,glink-channels = "rpm_requests";
rpmcc: clock-controller {
diff --git a/arch/arm64/boot/dts/qcom/sa8155p.dtsi b/arch/arm64/boot/dts/qcom/sa8155p.dtsi
index 9e70effc72e1..d678ed822378 100644
--- a/arch/arm64/boot/dts/qcom/sa8155p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8155p.dtsi
@@ -9,6 +9,10 @@
#include "sm8150.dtsi"
+&camcc {
+ power-domains = <&rpmhpd SA8155P_CX>;
+};
+
&dispcc {
power-domains = <&rpmhpd SA8155P_CX>;
};
diff --git a/arch/arm64/boot/dts/qcom/sa8295p-adp.dts b/arch/arm64/boot/dts/qcom/sa8295p-adp.dts
index 78e933c42c31..2fd1dafe63ce 100644
--- a/arch/arm64/boot/dts/qcom/sa8295p-adp.dts
+++ b/arch/arm64/boot/dts/qcom/sa8295p-adp.dts
@@ -9,6 +9,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
#include "sa8540p.dtsi"
#include "sa8540p-pmics.dtsi"
@@ -109,6 +110,46 @@
};
};
+ regulator-usb2-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "USB2_VBUS";
+ gpio = <&pmm8540c_gpios 9 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&usb2_en>;
+ pinctrl-names = "default";
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ regulator-usb3-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "USB3_VBUS";
+ gpio = <&pmm8540e_gpios 5 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&usb3_en>;
+ pinctrl-names = "default";
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ regulator-usb4-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "USB4_VBUS";
+ gpio = <&pmm8540g_gpios 5 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&usb4_en>;
+ pinctrl-names = "default";
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ regulator-usb5-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "USB5_VBUS";
+ gpio = <&pmm8540g_gpios 9 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&usb5_en>;
+ pinctrl-names = "default";
+ enable-active-high;
+ regulator-always-on;
+ };
+
reserved-memory {
gpu_mem: gpu-mem@8bf00000 {
reg = <0 0x8bf00000 0 0x2000>;
@@ -637,6 +678,10 @@
status = "okay";
};
+&usb_2 {
+ status = "okay";
+};
+
&usb_2_hsphy0 {
vdda-pll-supply = <&vreg_l5a>;
vdda18-supply = <&vreg_l7g>;
@@ -697,6 +742,44 @@
};
};
+&pmm8540c_gpios {
+ usb2_en: usb2-en-state {
+ pins = "gpio9";
+ function = "normal";
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
+ output-enable;
+ power-source = <0>;
+ };
+};
+
+&pmm8540e_gpios {
+ usb3_en: usb3-en-state {
+ pins = "gpio5";
+ function = "normal";
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
+ output-enable;
+ power-source = <0>;
+ };
+};
+
+&pmm8540g_gpios {
+ usb4_en: usb4-en-state {
+ pins = "gpio5";
+ function = "normal";
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
+ output-enable;
+ power-source = <0>;
+ };
+
+ usb5_en: usb5-en-state {
+ pins = "gpio9";
+ function = "normal";
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
+ output-enable;
+ power-source = <0>;
+ };
+};
+
&tlmm {
pcie2a_default: pcie2a-default-state {
clkreq-n-pins {
diff --git a/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
index 2a6170623ea9..0c1b21def4b6 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
@@ -702,6 +702,31 @@
status = "okay";
};
+&remoteproc_adsp {
+ firmware-name = "qcom/sa8775p/adsp.mbn";
+ status = "okay";
+};
+
+&remoteproc_cdsp0 {
+ firmware-name = "qcom/sa8775p/cdsp0.mbn";
+ status = "okay";
+};
+
+&remoteproc_cdsp1 {
+ firmware-name = "qcom/sa8775p/cdsp1.mbn";
+ status = "okay";
+};
+
+&remoteproc_gpdsp0 {
+ firmware-name = "qcom/sa8775p/gpdsp0.mbn";
+ status = "okay";
+};
+
+&remoteproc_gpdsp1 {
+ firmware-name = "qcom/sa8775p/gpdsp1.mbn";
+ status = "okay";
+};
+
&uart10 {
compatible = "qcom,geni-debug-uart";
pinctrl-0 = <&qup_uart10_default>;
diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
index 23f1b2e5e624..e8dbc8d820a6 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
@@ -10,6 +10,8 @@
#include <dt-bindings/clock/qcom,sa8775p-gpucc.h>
#include <dt-bindings/interconnect/qcom,sa8775p-rpmh.h>
#include <dt-bindings/mailbox/qcom-ipcc.h>
+#include <dt-bindings/firmware/qcom,scm.h>
+#include <dt-bindings/power/qcom,rpmhpd.h>
#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
@@ -42,6 +44,8 @@
enable-method = "psci";
qcom,freq-domain = <&cpufreq_hw 0>;
next-level-cache = <&L2_0>;
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
L2_0: l2-cache {
compatible = "cache";
cache-level = <2>;
@@ -62,6 +66,8 @@
enable-method = "psci";
qcom,freq-domain = <&cpufreq_hw 0>;
next-level-cache = <&L2_1>;
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
L2_1: l2-cache {
compatible = "cache";
cache-level = <2>;
@@ -77,6 +83,8 @@
enable-method = "psci";
qcom,freq-domain = <&cpufreq_hw 0>;
next-level-cache = <&L2_2>;
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
L2_2: l2-cache {
compatible = "cache";
cache-level = <2>;
@@ -92,6 +100,8 @@
enable-method = "psci";
qcom,freq-domain = <&cpufreq_hw 0>;
next-level-cache = <&L2_3>;
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
L2_3: l2-cache {
compatible = "cache";
cache-level = <2>;
@@ -107,6 +117,8 @@
enable-method = "psci";
qcom,freq-domain = <&cpufreq_hw 1>;
next-level-cache = <&L2_4>;
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
L2_4: l2-cache {
compatible = "cache";
cache-level = <2>;
@@ -128,6 +140,8 @@
enable-method = "psci";
qcom,freq-domain = <&cpufreq_hw 1>;
next-level-cache = <&L2_5>;
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
L2_5: l2-cache {
compatible = "cache";
cache-level = <2>;
@@ -143,6 +157,8 @@
enable-method = "psci";
qcom,freq-domain = <&cpufreq_hw 1>;
next-level-cache = <&L2_6>;
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
L2_6: l2-cache {
compatible = "cache";
cache-level = <2>;
@@ -158,6 +174,8 @@
enable-method = "psci";
qcom,freq-domain = <&cpufreq_hw 1>;
next-level-cache = <&L2_7>;
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
L2_7: l2-cache {
compatible = "cache";
cache-level = <2>;
@@ -203,6 +221,48 @@
};
};
};
+
+ idle-states {
+ entry-method = "psci";
+
+ GOLD_CPU_SLEEP_0: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "gold-power-collapse";
+ arm,psci-suspend-param = <0x40000003>;
+ entry-latency-us = <549>;
+ exit-latency-us = <901>;
+ min-residency-us = <1774>;
+ local-timer-stop;
+ };
+
+ GOLD_RAIL_CPU_SLEEP_0: cpu-sleep-1 {
+ compatible = "arm,idle-state";
+ idle-state-name = "gold-rail-power-collapse";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <702>;
+ exit-latency-us = <1061>;
+ min-residency-us = <4488>;
+ local-timer-stop;
+ };
+ };
+
+ domain-idle-states {
+ CLUSTER_SLEEP_GOLD: cluster-sleep-0 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x41000044>;
+ entry-latency-us = <2752>;
+ exit-latency-us = <3048>;
+ min-residency-us = <6118>;
+ };
+
+ CLUSTER_SLEEP_APSS_RSC_PC: cluster-sleep-1 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x42000144>;
+ entry-latency-us = <3263>;
+ exit-latency-us = <6562>;
+ min-residency-us = <9987>;
+ };
+ };
};
dummy-sink {
@@ -332,6 +392,79 @@
psci {
compatible = "arm,psci-1.0";
method = "smc";
+
+ CPU_PD0: power-domain-cpu0 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_0_PD>;
+ domain-idle-states = <&GOLD_CPU_SLEEP_0>,
+ <&GOLD_RAIL_CPU_SLEEP_0>;
+ };
+
+ CPU_PD1: power-domain-cpu1 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_0_PD>;
+ domain-idle-states = <&GOLD_CPU_SLEEP_0>,
+ <&GOLD_RAIL_CPU_SLEEP_0>;
+ };
+
+ CPU_PD2: power-domain-cpu2 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_0_PD>;
+ domain-idle-states = <&GOLD_CPU_SLEEP_0>,
+ <&GOLD_RAIL_CPU_SLEEP_0>;
+ };
+
+ CPU_PD3: power-domain-cpu3 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_0_PD>;
+ domain-idle-states = <&GOLD_CPU_SLEEP_0>,
+ <&GOLD_RAIL_CPU_SLEEP_0>;
+ };
+
+ CPU_PD4: power-domain-cpu4 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_1_PD>;
+ domain-idle-states = <&GOLD_CPU_SLEEP_0>,
+ <&GOLD_RAIL_CPU_SLEEP_0>;
+ };
+
+ CPU_PD5: power-domain-cpu5 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_1_PD>;
+ domain-idle-states = <&GOLD_CPU_SLEEP_0>,
+ <&GOLD_RAIL_CPU_SLEEP_0>;
+ };
+
+ CPU_PD6: power-domain-cpu6 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_1_PD>;
+ domain-idle-states = <&GOLD_CPU_SLEEP_0>,
+ <&GOLD_RAIL_CPU_SLEEP_0>;
+ };
+
+ CPU_PD7: power-domain-cpu7 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_1_PD>;
+ domain-idle-states = <&GOLD_CPU_SLEEP_0>,
+ <&GOLD_RAIL_CPU_SLEEP_0>;
+ };
+
+ CLUSTER_0_PD: power-domain-cluster0 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_2_PD>;
+ domain-idle-states = <&CLUSTER_SLEEP_GOLD>;
+ };
+
+ CLUSTER_1_PD: power-domain-cluster1 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_2_PD>;
+ domain-idle-states = <&CLUSTER_SLEEP_GOLD>;
+ };
+
+ CLUSTER_2_PD: power-domain-cluster2 {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&CLUSTER_SLEEP_APSS_RSC_PC>;
+ };
};
reserved-memory {
@@ -564,6 +697,121 @@
};
};
+ smp2p-adsp {
+ compatible = "qcom,smp2p";
+ qcom,smem = <443>, <429>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ smp2p_adsp_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_adsp_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-cdsp0 {
+ compatible = "qcom,smp2p";
+ qcom,smem = <94>, <432>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_CDSP IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <5>;
+
+ smp2p_cdsp0_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_cdsp0_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-cdsp1 {
+ compatible = "qcom,smp2p";
+ qcom,smem = <617>, <616>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_NSP1
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_NSP1 IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <12>;
+
+ smp2p_cdsp1_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_cdsp1_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-gpdsp0 {
+ compatible = "qcom,smp2p";
+ qcom,smem = <617>, <616>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_GPDSP0
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_GPDSP0 IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <17>;
+
+ smp2p_gpdsp0_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_gpdsp0_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-gpdsp1 {
+ compatible = "qcom,smp2p";
+ qcom,smem = <617>, <616>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_GPDSP1
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_GPDSP1 IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <18>;
+
+ smp2p_gpdsp1_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_gpdsp1_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
soc: soc@0 {
compatible = "simple-bus";
#address-cells = <2>;
@@ -2892,6 +3140,101 @@
status = "disabled";
};
+ pmu@9091000 {
+ compatible = "qcom,sa8775p-llcc-bwmon", "qcom,sc7280-llcc-bwmon";
+ reg = <0x0 0x9091000 0x0 0x1000>;
+ interrupts = <GIC_SPI 620 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>;
+
+ operating-points-v2 = <&llcc_bwmon_opp_table>;
+
+ llcc_bwmon_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-0 {
+ opp-peak-kBps = <762000>;
+ };
+
+ opp-1 {
+ opp-peak-kBps = <1720000>;
+ };
+
+ opp-2 {
+ opp-peak-kBps = <2086000>;
+ };
+
+ opp-3 {
+ opp-peak-kBps = <2601000>;
+ };
+
+ opp-4 {
+ opp-peak-kBps = <2929000>;
+ };
+
+ opp-5 {
+ opp-peak-kBps = <5931000>;
+ };
+
+ opp-6 {
+ opp-peak-kBps = <6515000>;
+ };
+
+ opp-7 {
+ opp-peak-kBps = <7984000>;
+ };
+
+ opp-8 {
+ opp-peak-kBps = <10437000>;
+ };
+
+ opp-9 {
+ opp-peak-kBps = <12195000>;
+ };
+ };
+ };
+
+ pmu@90b5400 {
+ compatible = "qcom,sa8775p-cpu-bwmon", "qcom,sdm845-bwmon";
+ reg = <0x0 0x90b5400 0x0 0x600>;
+ interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>;
+
+ operating-points-v2 = <&cpu_bwmon_opp_table>;
+
+ cpu_bwmon_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-0 {
+ opp-peak-kBps = <9155000>;
+ };
+
+ opp-1 {
+ opp-peak-kBps = <12298000>;
+ };
+
+ opp-2 {
+ opp-peak-kBps = <14236000>;
+ };
+
+ opp-3 {
+ opp-peak-kBps = <16265000>;
+ };
+ };
+
+ };
+
+ pmu@90b6400 {
+ compatible = "qcom,sa8775p-cpu-bwmon", "qcom,sdm845-bwmon";
+ reg = <0x0 0x90b6400 0x0 0x600>;
+ interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>;
+
+ operating-points-v2 = <&cpu_bwmon_opp_table>;
+ };
+
llcc: system-cache-controller@9200000 {
compatible = "qcom,sa8775p-llcc";
reg = <0x0 0x09200000 0x0 0x80000>,
@@ -3070,6 +3413,7 @@
reg = <0x0 0x15000000 0x0 0x100000>;
#iommu-cells = <2>;
#global-interrupts = <2>;
+ dma-coherent;
interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
@@ -3208,6 +3552,7 @@
reg = <0x0 0x15200000 0x0 0x80000>;
#iommu-cells = <2>;
#global-interrupts = <2>;
+ dma-coherent;
interrupts = <GIC_SPI 920 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 921 IRQ_TYPE_LEVEL_HIGH>,
@@ -3445,6 +3790,92 @@
#freq-domain-cells = <1>;
};
+ remoteproc_gpdsp0: remoteproc@20c00000 {
+ compatible = "qcom,sa8775p-gpdsp0-pas";
+ reg = <0x0 0x20c00000 0x0 0x10000>;
+
+ interrupts-extended = <&intc GIC_SPI 768 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_gpdsp0_in 0 0>,
+ <&smp2p_gpdsp0_in 2 0>,
+ <&smp2p_gpdsp0_in 1 0>,
+ <&smp2p_gpdsp0_in 3 0>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_CX>,
+ <&rpmhpd RPMHPD_MXC>;
+ power-domain-names = "cx", "mxc";
+
+ interconnects = <&gpdsp_anoc MASTER_DSP0 0
+ &config_noc SLAVE_CLK_CTL 0>;
+
+ memory-region = <&pil_gdsp0_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_gpdsp0_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_GPDSP0
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_GPDSP0
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "gpdsp0";
+ qcom,remote-pid = <17>;
+ };
+ };
+
+ remoteproc_gpdsp1: remoteproc@21c00000 {
+ compatible = "qcom,sa8775p-gpdsp1-pas";
+ reg = <0x0 0x21c00000 0x0 0x10000>;
+
+ interrupts-extended = <&intc GIC_SPI 624 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_gpdsp1_in 0 0>,
+ <&smp2p_gpdsp1_in 2 0>,
+ <&smp2p_gpdsp1_in 1 0>,
+ <&smp2p_gpdsp1_in 3 0>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_CX>,
+ <&rpmhpd RPMHPD_MXC>;
+ power-domain-names = "cx", "mxc";
+
+ interconnects = <&gpdsp_anoc MASTER_DSP1 0
+ &config_noc SLAVE_CLK_CTL 0>;
+
+ memory-region = <&pil_gdsp1_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_gpdsp1_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_GPDSP1
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_GPDSP1
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "gpdsp1";
+ qcom,remote-pid = <18>;
+ };
+ };
+
ethernet1: ethernet@23000000 {
compatible = "qcom,sa8775p-ethqos";
reg = <0x0 0x23000000 0x0 0x10000>,
@@ -3464,6 +3895,12 @@
"ptp_ref",
"phyaux";
+ interconnects = <&aggre1_noc MASTER_EMAC_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_EMAC1_CFG QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "mac-mem", "cpu-mac";
+
power-domains = <&gcc EMAC1_GDSC>;
phys = <&serdes1>;
@@ -3499,6 +3936,12 @@
"ptp_ref",
"phyaux";
+ interconnects = <&aggre1_noc MASTER_EMAC QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_EMAC_CFG QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "mac-mem", "cpu-mac";
+
power-domains = <&gcc EMAC0_GDSC>;
phys = <&serdes0>;
@@ -3514,6 +3957,569 @@
status = "disabled";
};
+
+ remoteproc_cdsp0: remoteproc@26300000 {
+ compatible = "qcom,sa8775p-cdsp0-pas";
+ reg = <0x0 0x26300000 0x0 0x10000>;
+
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp0_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp0_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp0_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp0_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_CX>,
+ <&rpmhpd RPMHPD_MXC>,
+ <&rpmhpd RPMHPD_NSP0>;
+ power-domain-names = "cx", "mxc", "nsp";
+
+ interconnects = <&nspa_noc MASTER_CDSP_PROC 0
+ &mc_virt SLAVE_EBI1 0>;
+
+ memory-region = <&pil_cdsp0_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_cdsp0_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "cdsp";
+ qcom,remote-pid = <5>;
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "cdsp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@1 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <1>;
+ iommus = <&apps_smmu 0x2141 0x04a0>,
+ <&apps_smmu 0x2161 0x04a0>,
+ <&apps_smmu 0x2181 0x0400>,
+ <&apps_smmu 0x21c1 0x04a0>,
+ <&apps_smmu 0x21e1 0x04a0>,
+ <&apps_smmu 0x2541 0x04a0>,
+ <&apps_smmu 0x2561 0x04a0>,
+ <&apps_smmu 0x2581 0x0400>,
+ <&apps_smmu 0x25c1 0x04a0>,
+ <&apps_smmu 0x25e1 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@2 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <2>;
+ iommus = <&apps_smmu 0x2142 0x04a0>,
+ <&apps_smmu 0x2162 0x04a0>,
+ <&apps_smmu 0x2182 0x0400>,
+ <&apps_smmu 0x21c2 0x04a0>,
+ <&apps_smmu 0x21e2 0x04a0>,
+ <&apps_smmu 0x2542 0x04a0>,
+ <&apps_smmu 0x2562 0x04a0>,
+ <&apps_smmu 0x2582 0x0400>,
+ <&apps_smmu 0x25c2 0x04a0>,
+ <&apps_smmu 0x25e2 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x2143 0x04a0>,
+ <&apps_smmu 0x2163 0x04a0>,
+ <&apps_smmu 0x2183 0x0400>,
+ <&apps_smmu 0x21c3 0x04a0>,
+ <&apps_smmu 0x21e3 0x04a0>,
+ <&apps_smmu 0x2543 0x04a0>,
+ <&apps_smmu 0x2563 0x04a0>,
+ <&apps_smmu 0x2583 0x0400>,
+ <&apps_smmu 0x25c3 0x04a0>,
+ <&apps_smmu 0x25e3 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x2144 0x04a0>,
+ <&apps_smmu 0x2164 0x04a0>,
+ <&apps_smmu 0x2184 0x0400>,
+ <&apps_smmu 0x21c4 0x04a0>,
+ <&apps_smmu 0x21e4 0x04a0>,
+ <&apps_smmu 0x2544 0x04a0>,
+ <&apps_smmu 0x2564 0x04a0>,
+ <&apps_smmu 0x2584 0x0400>,
+ <&apps_smmu 0x25c4 0x04a0>,
+ <&apps_smmu 0x25e4 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&apps_smmu 0x2145 0x04a0>,
+ <&apps_smmu 0x2165 0x04a0>,
+ <&apps_smmu 0x2185 0x0400>,
+ <&apps_smmu 0x21c5 0x04a0>,
+ <&apps_smmu 0x21e5 0x04a0>,
+ <&apps_smmu 0x2545 0x04a0>,
+ <&apps_smmu 0x2565 0x04a0>,
+ <&apps_smmu 0x2585 0x0400>,
+ <&apps_smmu 0x25c5 0x04a0>,
+ <&apps_smmu 0x25e5 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@6 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <6>;
+ iommus = <&apps_smmu 0x2146 0x04a0>,
+ <&apps_smmu 0x2166 0x04a0>,
+ <&apps_smmu 0x2186 0x0400>,
+ <&apps_smmu 0x21c6 0x04a0>,
+ <&apps_smmu 0x21e6 0x04a0>,
+ <&apps_smmu 0x2546 0x04a0>,
+ <&apps_smmu 0x2566 0x04a0>,
+ <&apps_smmu 0x2586 0x0400>,
+ <&apps_smmu 0x25c6 0x04a0>,
+ <&apps_smmu 0x25e6 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@7 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <7>;
+ iommus = <&apps_smmu 0x2147 0x04a0>,
+ <&apps_smmu 0x2167 0x04a0>,
+ <&apps_smmu 0x2187 0x0400>,
+ <&apps_smmu 0x21c7 0x04a0>,
+ <&apps_smmu 0x21e7 0x04a0>,
+ <&apps_smmu 0x2547 0x04a0>,
+ <&apps_smmu 0x2567 0x04a0>,
+ <&apps_smmu 0x2587 0x0400>,
+ <&apps_smmu 0x25c7 0x04a0>,
+ <&apps_smmu 0x25e7 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@8 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <8>;
+ iommus = <&apps_smmu 0x2148 0x04a0>,
+ <&apps_smmu 0x2168 0x04a0>,
+ <&apps_smmu 0x2188 0x0400>,
+ <&apps_smmu 0x21c8 0x04a0>,
+ <&apps_smmu 0x21e8 0x04a0>,
+ <&apps_smmu 0x2548 0x04a0>,
+ <&apps_smmu 0x2568 0x04a0>,
+ <&apps_smmu 0x2588 0x0400>,
+ <&apps_smmu 0x25c8 0x04a0>,
+ <&apps_smmu 0x25e8 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@9 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <9>;
+ iommus = <&apps_smmu 0x2149 0x04a0>,
+ <&apps_smmu 0x2169 0x04a0>,
+ <&apps_smmu 0x2189 0x0400>,
+ <&apps_smmu 0x21c9 0x04a0>,
+ <&apps_smmu 0x21e9 0x04a0>,
+ <&apps_smmu 0x2549 0x04a0>,
+ <&apps_smmu 0x2569 0x04a0>,
+ <&apps_smmu 0x2589 0x0400>,
+ <&apps_smmu 0x25c9 0x04a0>,
+ <&apps_smmu 0x25e9 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@10 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <10>;
+ iommus = <&apps_smmu 0x214a 0x04a0>,
+ <&apps_smmu 0x216a 0x04a0>,
+ <&apps_smmu 0x218a 0x0400>,
+ <&apps_smmu 0x21ca 0x04a0>,
+ <&apps_smmu 0x21ea 0x04a0>,
+ <&apps_smmu 0x254a 0x04a0>,
+ <&apps_smmu 0x256a 0x04a0>,
+ <&apps_smmu 0x258a 0x0400>,
+ <&apps_smmu 0x25ca 0x04a0>,
+ <&apps_smmu 0x25ea 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@11 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <11>;
+ iommus = <&apps_smmu 0x214b 0x04a0>,
+ <&apps_smmu 0x216b 0x04a0>,
+ <&apps_smmu 0x218b 0x0400>,
+ <&apps_smmu 0x21cb 0x04a0>,
+ <&apps_smmu 0x21eb 0x04a0>,
+ <&apps_smmu 0x254b 0x04a0>,
+ <&apps_smmu 0x256b 0x04a0>,
+ <&apps_smmu 0x258b 0x0400>,
+ <&apps_smmu 0x25cb 0x04a0>,
+ <&apps_smmu 0x25eb 0x04a0>;
+ dma-coherent;
+ };
+ };
+ };
+ };
+
+ remoteproc_cdsp1: remoteproc@2a300000 {
+ compatible = "qcom,sa8775p-cdsp1-pas";
+ reg = <0x0 0x2A300000 0x0 0x10000>;
+
+ interrupts-extended = <&intc GIC_SPI 798 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp1_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp1_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp1_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp1_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_CX>,
+ <&rpmhpd RPMHPD_MXC>,
+ <&rpmhpd RPMHPD_NSP1>;
+ power-domain-names = "cx", "mxc", "nsp";
+
+ interconnects = <&nspb_noc MASTER_CDSP_PROC_B 0
+ &mc_virt SLAVE_EBI1 0>;
+
+ memory-region = <&pil_cdsp1_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_cdsp1_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_NSP1
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_NSP1
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "cdsp";
+ qcom,remote-pid = <12>;
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "cdsp1";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@1 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <1>;
+ iommus = <&apps_smmu 0x2941 0x04a0>,
+ <&apps_smmu 0x2961 0x04a0>,
+ <&apps_smmu 0x2981 0x0400>,
+ <&apps_smmu 0x29c1 0x04a0>,
+ <&apps_smmu 0x29e1 0x04a0>,
+ <&apps_smmu 0x2d41 0x04a0>,
+ <&apps_smmu 0x2d61 0x04a0>,
+ <&apps_smmu 0x2d81 0x0400>,
+ <&apps_smmu 0x2dc1 0x04a0>,
+ <&apps_smmu 0x2de1 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@2 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <2>;
+ iommus = <&apps_smmu 0x2942 0x04a0>,
+ <&apps_smmu 0x2962 0x04a0>,
+ <&apps_smmu 0x2982 0x0400>,
+ <&apps_smmu 0x29c2 0x04a0>,
+ <&apps_smmu 0x29e2 0x04a0>,
+ <&apps_smmu 0x2d42 0x04a0>,
+ <&apps_smmu 0x2d62 0x04a0>,
+ <&apps_smmu 0x2d82 0x0400>,
+ <&apps_smmu 0x2dc2 0x04a0>,
+ <&apps_smmu 0x2de2 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x2943 0x04a0>,
+ <&apps_smmu 0x2963 0x04a0>,
+ <&apps_smmu 0x2983 0x0400>,
+ <&apps_smmu 0x29c3 0x04a0>,
+ <&apps_smmu 0x29e3 0x04a0>,
+ <&apps_smmu 0x2d43 0x04a0>,
+ <&apps_smmu 0x2d63 0x04a0>,
+ <&apps_smmu 0x2d83 0x0400>,
+ <&apps_smmu 0x2dc3 0x04a0>,
+ <&apps_smmu 0x2de3 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x2944 0x04a0>,
+ <&apps_smmu 0x2964 0x04a0>,
+ <&apps_smmu 0x2984 0x0400>,
+ <&apps_smmu 0x29c4 0x04a0>,
+ <&apps_smmu 0x29e4 0x04a0>,
+ <&apps_smmu 0x2d44 0x04a0>,
+ <&apps_smmu 0x2d64 0x04a0>,
+ <&apps_smmu 0x2d84 0x0400>,
+ <&apps_smmu 0x2dc4 0x04a0>,
+ <&apps_smmu 0x2de4 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&apps_smmu 0x2945 0x04a0>,
+ <&apps_smmu 0x2965 0x04a0>,
+ <&apps_smmu 0x2985 0x0400>,
+ <&apps_smmu 0x29c5 0x04a0>,
+ <&apps_smmu 0x29e5 0x04a0>,
+ <&apps_smmu 0x2d45 0x04a0>,
+ <&apps_smmu 0x2d65 0x04a0>,
+ <&apps_smmu 0x2d85 0x0400>,
+ <&apps_smmu 0x2dc5 0x04a0>,
+ <&apps_smmu 0x2de5 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@6 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <6>;
+ iommus = <&apps_smmu 0x2946 0x04a0>,
+ <&apps_smmu 0x2966 0x04a0>,
+ <&apps_smmu 0x2986 0x0400>,
+ <&apps_smmu 0x29c6 0x04a0>,
+ <&apps_smmu 0x29e6 0x04a0>,
+ <&apps_smmu 0x2d46 0x04a0>,
+ <&apps_smmu 0x2d66 0x04a0>,
+ <&apps_smmu 0x2d86 0x0400>,
+ <&apps_smmu 0x2dc6 0x04a0>,
+ <&apps_smmu 0x2de6 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@7 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <7>;
+ iommus = <&apps_smmu 0x2947 0x04a0>,
+ <&apps_smmu 0x2967 0x04a0>,
+ <&apps_smmu 0x2987 0x0400>,
+ <&apps_smmu 0x29c7 0x04a0>,
+ <&apps_smmu 0x29e7 0x04a0>,
+ <&apps_smmu 0x2d47 0x04a0>,
+ <&apps_smmu 0x2d67 0x04a0>,
+ <&apps_smmu 0x2d87 0x0400>,
+ <&apps_smmu 0x2dc7 0x04a0>,
+ <&apps_smmu 0x2de7 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@8 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <8>;
+ iommus = <&apps_smmu 0x2948 0x04a0>,
+ <&apps_smmu 0x2968 0x04a0>,
+ <&apps_smmu 0x2988 0x0400>,
+ <&apps_smmu 0x29c8 0x04a0>,
+ <&apps_smmu 0x29e8 0x04a0>,
+ <&apps_smmu 0x2d48 0x04a0>,
+ <&apps_smmu 0x2d68 0x04a0>,
+ <&apps_smmu 0x2d88 0x0400>,
+ <&apps_smmu 0x2dc8 0x04a0>,
+ <&apps_smmu 0x2de8 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@9 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <9>;
+ iommus = <&apps_smmu 0x2949 0x04a0>,
+ <&apps_smmu 0x2969 0x04a0>,
+ <&apps_smmu 0x2989 0x0400>,
+ <&apps_smmu 0x29c9 0x04a0>,
+ <&apps_smmu 0x29e9 0x04a0>,
+ <&apps_smmu 0x2d49 0x04a0>,
+ <&apps_smmu 0x2d69 0x04a0>,
+ <&apps_smmu 0x2d89 0x0400>,
+ <&apps_smmu 0x2dc9 0x04a0>,
+ <&apps_smmu 0x2de9 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@10 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <10>;
+ iommus = <&apps_smmu 0x294a 0x04a0>,
+ <&apps_smmu 0x296a 0x04a0>,
+ <&apps_smmu 0x298a 0x0400>,
+ <&apps_smmu 0x29ca 0x04a0>,
+ <&apps_smmu 0x29ea 0x04a0>,
+ <&apps_smmu 0x2d4a 0x04a0>,
+ <&apps_smmu 0x2d6a 0x04a0>,
+ <&apps_smmu 0x2d8a 0x0400>,
+ <&apps_smmu 0x2dca 0x04a0>,
+ <&apps_smmu 0x2dea 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@11 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <11>;
+ iommus = <&apps_smmu 0x294b 0x04a0>,
+ <&apps_smmu 0x296b 0x04a0>,
+ <&apps_smmu 0x298b 0x0400>,
+ <&apps_smmu 0x29cb 0x04a0>,
+ <&apps_smmu 0x29eb 0x04a0>,
+ <&apps_smmu 0x2d4b 0x04a0>,
+ <&apps_smmu 0x2d6b 0x04a0>,
+ <&apps_smmu 0x2d8b 0x0400>,
+ <&apps_smmu 0x2dcb 0x04a0>,
+ <&apps_smmu 0x2deb 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@12 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <12>;
+ iommus = <&apps_smmu 0x294c 0x04a0>,
+ <&apps_smmu 0x296c 0x04a0>,
+ <&apps_smmu 0x298c 0x0400>,
+ <&apps_smmu 0x29cc 0x04a0>,
+ <&apps_smmu 0x29ec 0x04a0>,
+ <&apps_smmu 0x2d4c 0x04a0>,
+ <&apps_smmu 0x2d6c 0x04a0>,
+ <&apps_smmu 0x2d8c 0x0400>,
+ <&apps_smmu 0x2dcc 0x04a0>,
+ <&apps_smmu 0x2dec 0x04a0>;
+ dma-coherent;
+ };
+
+ compute-cb@13 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <13>;
+ iommus = <&apps_smmu 0x294d 0x04a0>,
+ <&apps_smmu 0x296d 0x04a0>,
+ <&apps_smmu 0x298d 0x0400>,
+ <&apps_smmu 0x29Cd 0x04a0>,
+ <&apps_smmu 0x29ed 0x04a0>,
+ <&apps_smmu 0x2d4d 0x04a0>,
+ <&apps_smmu 0x2d6d 0x04a0>,
+ <&apps_smmu 0x2d8d 0x0400>,
+ <&apps_smmu 0x2dcd 0x04a0>,
+ <&apps_smmu 0x2ded 0x04a0>;
+ dma-coherent;
+ };
+ };
+ };
+ };
+
+ remoteproc_adsp: remoteproc@30000000 {
+ compatible = "qcom,sa8775p-adsp-pas";
+ reg = <0x0 0x30000000 0x0 0x100>;
+
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready", "handover",
+ "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_LCX>,
+ <&rpmhpd RPMHPD_LMX>;
+ power-domain-names = "lcx", "lmx";
+
+ interconnects = <&lpass_ag_noc MASTER_LPASS_PROC 0 &mc_virt SLAVE_EBI1 0>;
+
+ memory-region = <&pil_adsp_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_adsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ remoteproc_adsp_glink: glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "lpass";
+ qcom,remote-pid = <2>;
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "adsp";
+ memory-region = <&adsp_rpc_remote_heap_mem>;
+ qcom,vmids = <QCOM_SCM_VMID_LPASS
+ QCOM_SCM_VMID_ADSP_HEAP>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x3003 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x3004 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&apps_smmu 0x3005 0x0>;
+ qcom,nsessions = <5>;
+ dma-coherent;
+ };
+ };
+ };
+ };
};
thermal-zones {
diff --git a/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts b/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts
index 5b226577f9d8..62de4774c556 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts
+++ b/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts
@@ -484,6 +484,10 @@
status = "okay";
};
+&pmc8180_pwrkey {
+ status = "okay";
+};
+
&pmc8180c_lpg {
status = "okay";
};
@@ -557,6 +561,40 @@
status = "okay";
};
+&usb_mp {
+ status = "okay";
+};
+
+&usb_mp_hsphy0 {
+ vdda-pll-supply = <&vreg_l5e_0p88>;
+ vdda18-supply = <&vreg_l12a_1p8>;
+ vdda33-supply = <&vreg_l16e_3p0>;
+
+ status = "okay";
+};
+
+&usb_mp_hsphy1 {
+ vdda-pll-supply = <&vreg_l5e_0p88>;
+ vdda18-supply = <&vreg_l12a_1p8>;
+ vdda33-supply = <&vreg_l16e_3p0>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy0 {
+ vdda-phy-supply = <&vreg_l3c_1p2>;
+ vdda-pll-supply = <&vreg_l5e_0p88>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy1 {
+ vdda-phy-supply = <&vreg_l3c_1p2>;
+ vdda-pll-supply = <&vreg_l5e_0p88>;
+
+ status = "okay";
+};
+
&usb_prim_hsphy {
vdda-pll-supply = <&vreg_l5e_0p88>;
vdda18-supply = <&vreg_l12a_1p8>;
diff --git a/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi b/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi
index 1c6f12fafe1d..451c9b984f1f 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi
@@ -75,7 +75,7 @@
pon: pon@800 {
compatible = "qcom,pm8916-pon";
reg = <0x0800>;
- pwrkey {
+ pmc8180_pwrkey: pwrkey {
compatible = "qcom,pm8941-pwrkey";
interrupts = <0x0 0x8 0x0 IRQ_TYPE_EDGE_BOTH>;
debounce = <15625>;
@@ -139,11 +139,11 @@
interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>;
};
- pmc8180_gpios: gpio@c000 {
+ pmc8180_1_gpios: gpio@c000 {
compatible = "qcom,pmc8180-gpio", "qcom,spmi-gpio";
reg = <0xc000>;
gpio-controller;
- gpio-ranges = <&pmc8180_gpios 0 0 10>;
+ gpio-ranges = <&pmc8180_1_gpios 0 0 10>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -198,11 +198,21 @@
#size-cells = <0>;
};
- pmic@8 {
+ pmc8180_2: pmic@8 {
compatible = "qcom,pm8150", "qcom,spmi-pmic";
reg = <0x8 SPMI_USID>;
#address-cells = <1>;
#size-cells = <0>;
+
+ pmc8180_2_gpios: gpio@c000 {
+ compatible = "qcom,pmc8180-gpio", "qcom,spmi-gpio";
+ reg = <0xc000>;
+ gpio-controller;
+ gpio-ranges = <&pmc8180_2_gpios 0 0 10>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
};
pmic@a {
diff --git a/arch/arm64/boot/dts/qcom/sc8180x-primus.dts b/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
index 65d923497a05..79b4d293ea1e 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
+++ b/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
@@ -223,6 +223,32 @@
vin-supply = <&vph_pwr>;
};
+ vreg_usb2_host_en: regulator-usb2-host-en {
+ compatible = "regulator-fixed";
+ regulator-name = "usb2_host_en";
+
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ gpio = <&pmc8180_1_gpios 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ regulator-always-on;
+ };
+
+ vreg_usb3_host_en: regulator-usb3-host-en {
+ compatible = "regulator-fixed";
+ regulator-name = "usb3_host_en";
+
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ gpio = <&pmc8180_2_gpios 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ regulator-always-on;
+ };
+
usbprim-sbu-mux {
compatible = "pericom,pi3usb102", "gpio-sbu-mux";
@@ -552,6 +578,10 @@
status = "okay";
};
+&pmc8180_pwrkey {
+ status = "okay";
+};
+
&pmc8180c_lpg {
status = "okay";
};
@@ -623,6 +653,40 @@
status = "okay";
};
+&usb_mp {
+ status = "okay";
+};
+
+&usb_mp_hsphy0 {
+ vdda-pll-supply = <&vreg_l5e_0p88>;
+ vdda18-supply = <&vreg_l12a_1p8>;
+ vdda33-supply = <&vreg_l16e_3p0>;
+
+ status = "okay";
+};
+
+&usb_mp_hsphy1 {
+ vdda-pll-supply = <&vreg_l5e_0p88>;
+ vdda18-supply = <&vreg_l12a_1p8>;
+ vdda33-supply = <&vreg_l16e_3p0>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy0 {
+ vdda-phy-supply = <&vreg_l3c_1p2>;
+ vdda-pll-supply = <&vreg_l5e_0p88>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy1 {
+ vdda-phy-supply = <&vreg_l3c_1p2>;
+ vdda-pll-supply = <&vreg_l5e_0p88>;
+
+ status = "okay";
+};
+
&usb_prim_hsphy {
vdda-pll-supply = <&vreg_l5e_0p88>;
vdda18-supply = <&vreg_l12a_1p8>;
diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
index 6e707d993aeb..0e9429684dd9 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
@@ -2507,6 +2507,34 @@
status = "disabled";
};
+ usb_mp_hsphy0: phy@88e4000 {
+ compatible = "qcom,sc8180x-usb-hs-phy",
+ "qcom,usb-snps-hs-7nm-phy";
+ reg = <0 0x088e4000 0 0x400>;
+ #phy-cells = <0>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "ref";
+
+ resets = <&gcc GCC_QUSB2PHY_MP0_BCR>;
+
+ status = "disabled";
+ };
+
+ usb_mp_hsphy1: phy@88e5000 {
+ compatible = "qcom,sc8180x-usb-hs-phy",
+ "qcom,usb-snps-hs-7nm-phy";
+ reg = <0 0x088e5000 0 0x400>;
+ #phy-cells = <0>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "ref";
+
+ resets = <&gcc GCC_QUSB2PHY_MP1_BCR>;
+
+ status = "disabled";
+ };
+
usb_prim_qmpphy: phy@88e8000 {
compatible = "qcom,sc8180x-qmp-usb3-dp-phy";
reg = <0 0x088e8000 0 0x3000>;
@@ -2555,6 +2583,60 @@
};
};
+ usb_mp_qmpphy0: phy@88eb000 {
+ compatible = "qcom,sc8180x-qmp-usb3-uni-phy";
+ reg = <0 0x088eb000 0 0x1000>;
+
+ clocks = <&gcc GCC_USB3_MP_PHY_AUX_CLK>,
+ <&gcc GCC_USB3_PRIM_CLKREF_CLK>,
+ <&gcc GCC_USB3_MP_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_MP_PHY_PIPE_0_CLK>;
+ clock-names = "aux",
+ "ref",
+ "com_aux",
+ "pipe";
+
+ resets = <&gcc GCC_USB3_UNIPHY_MP0_BCR>,
+ <&gcc GCC_USB3UNIPHY_PHY_MP0_BCR>;
+ reset-names = "phy", "phy_phy";
+
+ power-domains = <&gcc USB30_MP_GDSC>;
+
+ #clock-cells = <0>;
+ clock-output-names = "usb2_phy0_pipe_clk";
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb_mp_qmpphy1: phy@88ec000 {
+ compatible = "qcom,sc8180x-qmp-usb3-uni-phy";
+ reg = <0 0x088ec000 0 0x1000>;
+
+ clocks = <&gcc GCC_USB3_MP_PHY_AUX_CLK>,
+ <&gcc GCC_USB3_PRIM_CLKREF_CLK>,
+ <&gcc GCC_USB3_MP_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_MP_PHY_PIPE_1_CLK>;
+ clock-names = "aux",
+ "ref",
+ "com_aux",
+ "pipe";
+
+ resets = <&gcc GCC_USB3_UNIPHY_MP1_BCR>,
+ <&gcc GCC_USB3UNIPHY_PHY_MP1_BCR>;
+ reset-names = "phy", "phy_phy";
+
+ power-domains = <&gcc USB30_MP_GDSC>;
+
+ #clock-cells = <0>;
+ clock-output-names = "usb2_phy1_pipe_clk";
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
usb_sec_qmpphy: phy@88ee000 {
compatible = "qcom,sc8180x-qmp-usb3-dp-phy";
reg = <0 0x088ed000 0 0x3000>;
@@ -2622,17 +2704,89 @@
qcom,bcm-voters = <&apps_bcm_voter>;
};
+ usb_mp: usb@a4f8800 {
+ compatible = "qcom,sc8180x-dwc3-mp", "qcom,dwc3";
+ reg = <0 0x0a4f8800 0 0x400>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ dma-ranges;
+
+ clocks = <&gcc GCC_CFG_NOC_USB3_MP_AXI_CLK>,
+ <&gcc GCC_USB30_MP_MASTER_CLK>,
+ <&gcc GCC_AGGRE_USB3_MP_AXI_CLK>,
+ <&gcc GCC_USB30_MP_SLEEP_CLK>,
+ <&gcc GCC_USB30_MP_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB3_SEC_CLKREF_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi",
+ "xo";
+
+ interconnects = <&aggre1_noc MASTER_USB3_2 0 &mc_virt SLAVE_EBI_CH0 0>,
+ <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_USB3_2 0>;
+ interconnect-names = "usb-ddr", "apps-usb";
+
+ assigned-clocks = <&gcc GCC_USB30_MP_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_MP_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 656 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 655 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 658 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 657 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 59 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 46 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 71 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 68 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 7 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 30 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event_1", "pwr_event_2",
+ "hs_phy_1", "hs_phy_2",
+ "dp_hs_phy_1", "dm_hs_phy_1",
+ "dp_hs_phy_2", "dm_hs_phy_2",
+ "ss_phy_1", "ss_phy_2";
+
+ power-domains = <&gcc USB30_MP_GDSC>;
+
+ resets = <&gcc GCC_USB30_MP_BCR>;
+
+ status = "disabled";
+
+ usb_mp_dwc3: usb@a400000 {
+ compatible = "snps,dwc3";
+ reg = <0 0x0a400000 0 0xcd00>;
+ interrupts = <GIC_SPI 654 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x60 0>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ phys = <&usb_mp_hsphy0>,
+ <&usb_mp_qmpphy0>,
+ <&usb_mp_hsphy1>,
+ <&usb_mp_qmpphy1>;
+ phy-names = "usb2-0",
+ "usb3-0",
+ "usb2-1",
+ "usb3-1";
+ dr_mode = "host";
+ };
+ };
+
usb_prim: usb@a6f8800 {
compatible = "qcom,sc8180x-dwc3", "qcom,dwc3";
reg = <0 0x0a6f8800 0 0x400>;
- interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 9 IRQ_TYPE_EDGE_BOTH>,
<&pdc 8 IRQ_TYPE_EDGE_BOTH>,
- <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
- interrupt-names = "hs_phy_irq",
- "ss_phy_irq",
+ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event",
+ "hs_phy_irq",
+ "dp_hs_phy_irq",
"dm_hs_phy_irq",
- "dp_hs_phy_irq";
+ "ss_phy_irq";
clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
@@ -2714,12 +2868,17 @@
"xo";
resets = <&gcc GCC_USB30_SEC_BCR>;
power-domains = <&gcc USB30_SEC_GDSC>;
- interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 40 IRQ_TYPE_LEVEL_HIGH>,
+
+ interrupts-extended = <&intc GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 11 IRQ_TYPE_EDGE_BOTH>,
<&pdc 10 IRQ_TYPE_EDGE_BOTH>,
- <&pdc 11 IRQ_TYPE_EDGE_BOTH>;
- interrupt-names = "hs_phy_irq", "ss_phy_irq",
- "dm_hs_phy_irq", "dp_hs_phy_irq";
+ <&pdc 40 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event",
+ "hs_phy_irq",
+ "dp_hs_phy_irq",
+ "dm_hs_phy_irq",
+ "ss_phy_irq";
assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
<&gcc GCC_USB30_SEC_MASTER_CLK>;
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
index b98b2f7752b5..6020582b0a59 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
@@ -848,15 +848,15 @@
pins = "gpio143";
function = "gpio";
drive-strength = <2>;
- bias-pull-down;
+ bias-disable;
};
wake-n-pins {
- pins = "gpio145";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-up;
- };
+ pins = "gpio145";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
};
pcie3a_default: pcie3a-default-state {
@@ -871,7 +871,7 @@
pins = "gpio151";
function = "gpio";
drive-strength = <2>;
- bias-pull-down;
+ bias-disable;
};
wake-n-pins {
@@ -894,7 +894,7 @@
pins = "gpio141";
function = "gpio";
drive-strength = <2>;
- bias-pull-down;
+ bias-disable;
};
wake-n-pins {
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
index b27143f81867..6a28cab97189 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
@@ -592,6 +592,57 @@
};
};
+&camss {
+ vdda-phy-supply = <&vreg_l6d>;
+ vdda-pll-supply = <&vreg_l4d>;
+
+ status = "okay";
+
+ ports {
+ port@0 {
+ csiphy0_lanes01_ep: endpoint@0 {
+ reg = <0>;
+ clock-lanes = <7>;
+ data-lanes = <0 1>;
+ remote-endpoint = <&ov5675_ep>;
+ };
+ };
+ };
+};
+
+&cci2 {
+ status = "okay";
+};
+
+&cci2_i2c1 {
+ camera@10 {
+ compatible = "ovti,ov5675";
+ reg = <0x10>;
+
+ reset-gpios = <&tlmm 15 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_rgb_default>;
+
+ clocks = <&camcc CAMCC_MCLK3_CLK>;
+
+ orientation = <0>; /* Front facing */
+
+ avdd-supply = <&vreg_l6q>;
+ dvdd-supply = <&vreg_l2q>;
+ dovdd-supply = <&vreg_l7q>;
+
+ port {
+ ov5675_ep: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ link-frequencies = /bits/ 64 <450000000>;
+ remote-endpoint = <&csiphy0_lanes01_ep>;
+ };
+ };
+
+ };
+};
+
&dispcc0 {
status = "okay";
};
@@ -1436,6 +1487,22 @@
bias-disable;
};
+ cam_rgb_default: cam-rgb-default-state {
+ mclk-pins {
+ pins = "gpio17";
+ function = "cam_mclk";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ sc-rgb-xshut-n-pins {
+ pins = "gpio15";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
edp_reg_en: edp-reg-en-state {
pins = "gpio25";
function = "gpio";
@@ -1509,15 +1576,15 @@
pins = "gpio143";
function = "gpio";
drive-strength = <2>;
- bias-pull-down;
+ bias-disable;
};
wake-n-pins {
- pins = "gpio145";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-up;
- };
+ pins = "gpio145";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
};
pcie3a_default: pcie3a-default-state {
@@ -1532,7 +1599,7 @@
pins = "gpio151";
function = "gpio";
drive-strength = <2>;
- bias-pull-down;
+ bias-disable;
};
wake-n-pins {
@@ -1555,7 +1622,7 @@
pins = "gpio141";
function = "gpio";
drive-strength = <2>;
- bias-pull-down;
+ bias-disable;
};
wake-n-pins {
diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
index c7e3764a8cf3..c8da5cb8d04e 100644
--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
@@ -372,7 +372,7 @@
mboxes = <&apcs_glb 0>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-sdm660";
+ compatible = "qcom,rpm-sdm660", "qcom,glink-smd-rpm";
qcom,glink-channels = "rpm_requests";
rpmcc: clock-controller {
diff --git a/arch/arm64/boot/dts/qcom/sdx75-idp.dts b/arch/arm64/boot/dts/qcom/sdx75-idp.dts
index fde16308c7e2..f1bbe7ab01ab 100644
--- a/arch/arm64/boot/dts/qcom/sdx75-idp.dts
+++ b/arch/arm64/boot/dts/qcom/sdx75-idp.dts
@@ -282,6 +282,12 @@
status = "okay";
};
+&remoteproc_mpss {
+ firmware-name = "qcom/sdx75/modem.mbn",
+ "qcom/sdx75/modem_dtb.mbn";
+ status = "okay";
+};
+
&sdhc {
cd-gpios = <&tlmm 103 GPIO_ACTIVE_LOW>;
vmmc-supply = <&reg_2v95_vdd>;
diff --git a/arch/arm64/boot/dts/qcom/sdx75.dtsi b/arch/arm64/boot/dts/qcom/sdx75.dtsi
index 9b93f6501d55..7cf3fcb469a8 100644
--- a/arch/arm64/boot/dts/qcom/sdx75.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdx75.dtsi
@@ -366,7 +366,12 @@
no-map;
};
- qdss_mem: qdss@88800000 {
+ qdss_mem: qdss@88500000 {
+ reg = <0x0 0x88500000 0x0 0x300000>;
+ no-map;
+ };
+
+ qlink_logging_mem: qlink-logging@88800000 {
reg = <0x0 0x88800000 0x0 0x300000>;
no-map;
};
@@ -377,8 +382,13 @@
no-map;
};
- mpss_dsmharq_mem: mpss-dsmharq@88f00000 {
- reg = <0x0 0x88f00000 0x0 0x5080000>;
+ mpss_dsm_mem_2: mpss-dsm-2@88f00000 {
+ reg = <0x0 0x88f00000 0x0 0x2500000>;
+ no-map;
+ };
+
+ mpss_dsm_mem: mpss-dsm@8b400000 {
+ reg = <0x0 0x8b400000 0x0 0x2b80000>;
no-map;
};
@@ -388,7 +398,7 @@
};
mpssadsp_mem: mpssadsp@8e000000 {
- reg = <0x0 0x8e000000 0x0 0xf400000>;
+ reg = <0x0 0x8e000000 0x0 0xf100000>;
no-map;
};
@@ -881,6 +891,53 @@
reg = <0x0 0x01fc0000 0x0 0x30000>;
};
+ remoteproc_mpss: remoteproc@4080000 {
+ compatible = "qcom,sdx75-mpss-pas";
+ reg = <0 0x04080000 0 0x4040>;
+
+ interrupts-extended = <&intc GIC_SPI 250 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 3 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 7 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack",
+ "shutdown-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_CX>,
+ <&rpmhpd RPMHPD_MSS>;
+ power-domain-names = "cx",
+ "mss";
+
+ memory-region = <&mpssadsp_mem>, <&q6_mpss_dtb_mem>,
+ <&mpss_dsm_mem>, <&mpss_dsm_mem_2>,
+ <&qlink_logging_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_modem_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_PING
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_PING>;
+ label = "mpss";
+ qcom,remote-pid = <1>;
+ };
+ };
+
sdhc: mmc@8804000 {
compatible = "qcom,sdx75-sdhci", "qcom,sdhci-msm-v5";
reg = <0x0 0x08804000 0x0 0x1000>;
diff --git a/arch/arm64/boot/dts/qcom/sm4450.dtsi b/arch/arm64/boot/dts/qcom/sm4450.dtsi
index 9c9919e78fbd..1e05cd00b635 100644
--- a/arch/arm64/boot/dts/qcom/sm4450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm4450.dtsi
@@ -4,7 +4,10 @@
*/
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/clock/qcom,sm4450-camcc.h>
+#include <dt-bindings/clock/qcom,sm4450-dispcc.h>
#include <dt-bindings/clock/qcom,sm4450-gcc.h>
+#include <dt-bindings/clock/qcom,sm4450-gpucc.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
@@ -422,6 +425,41 @@
#hwlock-cells = <1>;
};
+ gpucc: clock-controller@3d90000 {
+ compatible = "qcom,sm4450-gpucc";
+ reg = <0x0 0x03d90000 0x0 0xa000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_GPU_GPLL0_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ camcc: clock-controller@ade0000 {
+ compatible = "qcom,sm4450-camcc";
+ reg = <0x0 0x0ade0000 0x0 0x20000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_CAMERA_AHB_CLK>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ dispcc: clock-controller@af00000 {
+ compatible = "qcom,sm4450-dispcc";
+ reg = <0x0 0x0af00000 0x0 0x20000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&gcc GCC_DISP_AHB_CLK>,
+ <&sleep_clk>,
+ <0>,
+ <0>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
pdc: interrupt-controller@b220000 {
compatible = "qcom,sm4450-pdc", "qcom,pdc";
reg = <0 0x0b220000 0 0x30000>, <0 0x174000f0 0 0x64>;
diff --git a/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts b/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts
index 4a30024aa48f..f60d36c03b9b 100644
--- a/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts
+++ b/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts
@@ -1,13 +1,16 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
- * Copyright (c) 2023, Dang Huynh <danct12@riseup.net>
+ * Copyright (c) 2023 - 2024, Dang Huynh <danct12@riseup.net>
*/
/dts-v1/;
#include "sm6115.dtsi"
#include "pm6125.dtsi"
+#include "pmi632.dtsi"
#include <dt-bindings/arm/qcom,ids.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/usb/pd.h>
/ {
model = "F(x)tec Pro1X (QX1050)";
@@ -32,12 +35,48 @@
};
};
+ disp_elvdd_supply: disp-elvdd-supply {
+ compatible = "regulator-fixed";
+ regulator-name = "disp_elvdd_supply";
+ };
+
+ disp_elvss_supply: disp-elvss-supply {
+ compatible = "regulator-fixed";
+ regulator-name = "disp_elvss_supply";
+ };
+
+ disp_vcc_supply: disp-vcc-supply {
+ compatible = "regulator-fixed";
+ regulator-name = "disp_vcc_supply";
+ };
+
+ disp_vci_supply: disp-vci-supply {
+ compatible = "regulator-fixed";
+ regulator-name = "disp_vci_supply";
+ };
+
gpio-keys {
compatible = "gpio-keys";
- pinctrl-0 = <&vol_up_n>;
+ pinctrl-0 = <&hall_sensor_n>, <&key_camera_n>, <&vol_up_n>;
pinctrl-names = "default";
+ hall-switch {
+ label = "Hall Switch";
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_KEYPAD_SLIDE>;
+ gpios = <&tlmm 96 GPIO_ACTIVE_HIGH>;
+ debounce-interval = <90>;
+ wakeup-source;
+ };
+
+ key-camera {
+ label = "Camera Button";
+ linux,code = <KEY_CAMERA>;
+ gpios = <&tlmm 18 GPIO_ACTIVE_LOW>;
+ debounce-interval = <15>;
+ };
+
key-volume-up {
label = "Volume Up";
linux,code = <KEY_VOLUMEUP>;
@@ -47,11 +86,119 @@
wakeup-source;
};
};
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ capslock-led {
+ label = "green:capslock";
+ function = LED_FUNCTION_CAPSLOCK;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&pca9534 1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "kbd-capslock";
+ default-state = "off";
+ };
+ };
+
+ ts_vdd_supply: ts-vdd-supply {
+ compatible = "regulator-fixed";
+ regulator-name = "ts_vdd_supply";
+ gpio = <&pca9534 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ ts_vddio_supply: ts-vddio-supply {
+ compatible = "regulator-fixed";
+ regulator-name = "ts_vddio_supply";
+ gpio = <&pca9534 2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
};
-&dispcc {
- /* HACK: disable until a panel driver is ready to retain simplefb */
- status = "disabled";
+&gpi_dma0 {
+ status = "okay";
+};
+
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ firmware-name = "qcom/sm6115/Fxtec/QX1050/a610_zap.mbn";
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+
+ status = "okay";
+
+ pca9534: gpio@21 {
+ compatible = "nxp,pca9534";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+};
+
+&i2c2 {
+ status = "okay";
+ /* Clock frequency was not specified downstream, let's park it to 100 KHz */
+ clock-frequency = <100000>;
+
+ touchscreen@14 {
+ compatible = "goodix,gt9286";
+ reg = <0x14>;
+
+ interrupts-extended = <&tlmm 80 IRQ_TYPE_LEVEL_LOW>;
+
+ irq-gpios = <&tlmm 80 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&tlmm 71 GPIO_ACTIVE_HIGH>;
+ AVDD28-supply = <&ts_vdd_supply>;
+ VDDIO-supply = <&ts_vddio_supply>;
+
+ pinctrl-0 = <&ts_int_n>, <&ts_rst_n>;
+ pinctrl-names = "default";
+ };
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dsi0 {
+ vdda-supply = <&pm6125_l18a>;
+ status = "okay";
+
+ panel: panel@0 {
+ compatible = "boe,bf060y8m-aj0";
+ reg = <0>;
+
+ reset-gpios = <&tlmm 82 GPIO_ACTIVE_LOW>;
+
+ elvdd-supply = <&disp_elvdd_supply>;
+ elvss-supply = <&disp_elvss_supply>;
+ vcc-supply = <&disp_vcc_supply>;
+ vci-supply = <&disp_vci_supply>;
+ vddio-supply = <&pm6125_l9a>;
+
+ pinctrl-0 = <&mdss_dsi_n &panel_en_n>;
+ pinctrl-names = "default";
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+};
+
+&mdss_dsi0_out {
+ data-lanes = <0 1 2 3>;
+ remote-endpoint = <&panel_in>;
+};
+
+&mdss_dsi0_phy {
+ status = "okay";
};
&pm6125_gpios {
@@ -64,6 +211,73 @@
};
};
+&pmi632_lpg {
+ status = "okay";
+
+ multi-led {
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_STATUS;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@3 {
+ reg = <3>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+};
+
+&pmi632_typec {
+ status = "okay";
+
+ connector {
+ compatible = "usb-c-connector";
+
+ power-role = "dual";
+ data-role = "dual";
+ self-powered;
+
+ typec-power-opmode = "default";
+ pd-disable;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ pmi632_hs_in: endpoint {
+ remote-endpoint = <&usb_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ pmi632_ss_in: endpoint {
+ remote-endpoint = <&usb_qmpphy_out>;
+ };
+ };
+ };
+ };
+};
+
+&pmi632_vbus {
+ regulator-min-microamp = <500000>;
+ regulator-max-microamp = <1000000>;
+ status = "okay";
+};
+
&pon_pwrkey {
status = "okay";
};
@@ -73,6 +287,25 @@
status = "okay";
};
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/sm6115/Fxtec/QX1050/adsp.mbn";
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/sm6115/Fxtec/QX1050/cdsp.mbn";
+ status = "okay";
+};
+
+&remoteproc_mpss {
+ firmware-name = "qcom/sm6115/Fxtec/QX1050/modem.mbn";
+ status = "okay";
+};
+
&rpm_requests {
regulators-0 {
compatible = "qcom,rpm-pm6125-regulators";
@@ -105,6 +338,7 @@
pm6125_l5a: l5 {
regulator-min-microvolt = <1648000>;
regulator-max-microvolt = <3056000>;
+ regulator-allow-set-load;
};
pm6125_l6a: l6 {
@@ -206,12 +440,84 @@
};
};
+&sdc2_state_off {
+ cd-pins {
+ pins = "gpio88";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
+
+&sdc2_state_on {
+ cd-pins {
+ pins = "gpio88";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+};
+
+&sdhc_2 {
+ pinctrl-0 = <&sdc2_state_on>;
+ pinctrl-1 = <&sdc2_state_off>;
+ pinctrl-names = "default", "sleep";
+
+ cd-gpios = <&tlmm 88 GPIO_ACTIVE_LOW>;
+
+ vmmc-supply = <&pm6125_l22a>;
+ vqmmc-supply = <&pm6125_l5a>;
+
+ status = "okay";
+};
+
&sleep_clk {
clock-frequency = <32764>;
};
&tlmm {
gpio-reserved-ranges = <0 4>, <14 4>;
+
+ key_camera_n: key-camera-n-state {
+ pins = "gpio18";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ panel_en_n: panel-en-n-state {
+ pins = "gpio65";
+ function = "gpio";
+ bias-disable;
+ };
+
+ ts_rst_n: ts-rst-n-state {
+ pins = "gpio71";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ ts_int_n: ts-int-n-state {
+ pins = "gpio80";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ mdss_dsi_n: mdss-dsi-n-state {
+ pins = "gpio82";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ hall_sensor_n: hall-sensor-n-state {
+ pins = "gpio96";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
};
&ufs_mem_hc {
@@ -233,10 +539,8 @@
status = "okay";
};
-&usb_dwc3 {
- /delete-property/ usb-role-switch;
- maximum-speed = "high-speed";
- dr_mode = "peripheral";
+&usb_dwc3_hs {
+ remote-endpoint = <&pmi632_hs_in>;
};
&usb_hsphy {
@@ -246,6 +550,27 @@
status = "okay";
};
+&usb_qmpphy {
+ vdda-phy-supply = <&pm6125_l4a>;
+ vdda-pll-supply = <&pm6125_l12a>;
+ status = "okay";
+};
+
+&usb_qmpphy_out {
+ remote-endpoint = <&pmi632_ss_in>;
+};
+
+&wifi {
+ vdd-0.8-cx-mx-supply = <&pm6125_l8a>;
+ vdd-1.8-xo-supply = <&pm6125_l16a>;
+ vdd-1.3-rfa-supply = <&pm6125_l17a>;
+ vdd-3.3-ch0-supply = <&pm6125_l23a>;
+
+ qcom,ath10k-calibration-variant = "Fxtec_QX1050";
+
+ status = "okay";
+};
+
&xo_board {
clock-frequency = <19200000>;
};
diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
index e374733f3b85..41216cc319d6 100644
--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
@@ -376,7 +376,7 @@
mboxes = <&apcs_glb 0>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-sm6115";
+ compatible = "qcom,rpm-sm6115", "qcom,glink-smd-rpm";
qcom,glink-channels = "rpm_requests";
rpmcc: clock-controller {
diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
index 777c380c2fa0..133610d14fc4 100644
--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
@@ -192,7 +192,7 @@
mboxes = <&apcs_glb 0>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-sm6125";
+ compatible = "qcom,rpm-sm6125", "qcom,glink-smd-rpm";
qcom,glink-channels = "rpm_requests";
rpmcc: clock-controller {
diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi
index ddea681b536d..4d519dd6e7ef 100644
--- a/arch/arm64/boot/dts/qcom/sm6375.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi
@@ -653,7 +653,7 @@
mboxes = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP>;
rpm_requests: rpm-requests {
- compatible = "qcom,rpm-sm6375";
+ compatible = "qcom,rpm-sm6375", "qcom,glink-smd-rpm";
qcom,glink-channels = "rpm_requests";
rpmcc: clock-controller {
diff --git a/arch/arm64/boot/dts/qcom/sm7125-xiaomi-common.dtsi b/arch/arm64/boot/dts/qcom/sm7125-xiaomi-common.dtsi
index 29289fa41b13..b9cff60efe6f 100644
--- a/arch/arm64/boot/dts/qcom/sm7125-xiaomi-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm7125-xiaomi-common.dtsi
@@ -411,6 +411,8 @@
};
&ufs_mem_hc {
+ reset-gpios = <&tlmm 119 GPIO_ACTIVE_LOW>;
+
vcc-supply = <&vreg_l19a_3p0>;
vcc-max-microamp = <600000>;
vccq2-supply = <&vreg_l12a_1p8>;
diff --git a/arch/arm64/boot/dts/qcom/sm8150-mtp.dts b/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
index 286350ac7751..256a1ba94945 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
@@ -355,11 +355,6 @@
};
&gpu {
- /*
- * NOTE: "amd,imageon" makes Adreno start in headless mode, remove it
- * after display support is added on this board.
- */
- compatible = "qcom,adreno-640.1", "qcom,adreno", "amd,imageon";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index 3e236adb9397..27f87835bc55 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -17,6 +17,7 @@
#include <dt-bindings/clock/qcom,videocc-sm8150.h>
#include <dt-bindings/interconnect/qcom,osm-l3.h>
#include <dt-bindings/interconnect/qcom,sm8150.h>
+#include <dt-bindings/clock/qcom,sm8150-camcc.h>
#include <dt-bindings/thermal/thermal.h>
/ {
@@ -3759,6 +3760,18 @@
qcom,bcm-voters = <&apps_bcm_voter>;
};
+ camcc: clock-controller@ad00000 {
+ compatible = "qcom,sm8150-camcc";
+ reg = <0 0x0ad00000 0 0x10000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_CAMERA_AHB_CLK>;
+ power-domains = <&rpmhpd SM8150_MMCX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
mdss: display-subsystem@ae00000 {
compatible = "qcom,sm8150-mdss";
reg = <0 0x0ae00000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index 9d6c97d1fd9d..630f4eff20bf 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -8,8 +8,6 @@
#include <dt-bindings/clock/qcom,gcc-sm8250.h>
#include <dt-bindings/clock/qcom,gpucc-sm8250.h>
#include <dt-bindings/clock/qcom,rpmh.h>
-#include <dt-bindings/clock/qcom,sm8250-lpass-aoncc.h>
-#include <dt-bindings/clock/qcom,sm8250-lpass-audiocc.h>
#include <dt-bindings/dma/qcom-gpi.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interconnect/qcom,osm-l3.h>
@@ -2633,14 +2631,13 @@
wsamacro: codec@3240000 {
compatible = "qcom,sm8250-lpass-wsa-macro";
reg = <0 0x03240000 0 0x1000>;
- clocks = <&audiocc LPASS_CDC_WSA_MCLK>,
- <&audiocc LPASS_CDC_WSA_NPL>,
+ clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
<&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
<&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
- <&aoncc LPASS_CDC_VA_MCLK>,
<&vamacro>;
- clock-names = "mclk", "npl", "macro", "dcodec", "va", "fsgen";
+ clock-names = "mclk", "npl", "macro", "dcodec", "fsgen";
#clock-cells = <0>;
clock-output-names = "mclk";
@@ -2674,20 +2671,10 @@
status = "disabled";
};
- audiocc: clock-controller@3300000 {
- compatible = "qcom,sm8250-lpass-audiocc";
- reg = <0 0x03300000 0 0x30000>;
- #clock-cells = <1>;
- clocks = <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
- <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
- <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
- clock-names = "core", "audio", "bus";
- };
-
vamacro: codec@3370000 {
compatible = "qcom,sm8250-lpass-va-macro";
reg = <0 0x03370000 0 0x1000>;
- clocks = <&aoncc LPASS_CDC_VA_MCLK>,
+ clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
<&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
<&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
@@ -2792,16 +2779,6 @@
#size-cells = <0>;
};
- aoncc: clock-controller@3380000 {
- compatible = "qcom,sm8250-lpass-aoncc";
- reg = <0 0x03380000 0 0x40000>;
- #clock-cells = <1>;
- clocks = <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
- <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
- <&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
- clock-names = "core", "audio", "bus";
- };
-
lpass_tlmm: pinctrl@33c0000 {
compatible = "qcom,sm8250-lpass-lpi-pinctrl";
reg = <0 0x033c0000 0x0 0x20000>,
diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
index 38ee0850c335..37a2aba0d4ca 100644
--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
@@ -2251,6 +2251,12 @@
resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
};
+ refgen: regulator@88e7000 {
+ compatible = "qcom,sm8350-refgen-regulator",
+ "qcom,sm8250-refgen-regulator";
+ reg = <0x0 0x088e7000 0x0 0x84>;
+ };
+
usb_1_qmpphy: phy@88e8000 {
compatible = "qcom,sm8350-qmp-usb3-dp-phy";
reg = <0 0x088e8000 0 0x3000>;
@@ -2490,8 +2496,12 @@
reg-names = "mdss";
interconnects = <&mmss_noc MASTER_MDP0 0 &mc_virt SLAVE_EBI1 0>,
- <&mmss_noc MASTER_MDP1 0 &mc_virt SLAVE_EBI1 0>;
- interconnect-names = "mdp0-mem", "mdp1-mem";
+ <&mmss_noc MASTER_MDP1 0 &mc_virt SLAVE_EBI1 0>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "mdp0-mem",
+ "mdp1-mem",
+ "cpu-cfg";
power-domains = <&dispcc MDSS_GDSC>;
resets = <&dispcc DISP_CC_MDSS_CORE_BCR>;
@@ -2706,6 +2716,7 @@
operating-points-v2 = <&dsi0_opp_table>;
power-domains = <&rpmhpd RPMHPD_MMCX>;
+ refgen-supply = <&refgen>;
phys = <&mdss_dsi0_phy>;
@@ -2804,6 +2815,7 @@
operating-points-v2 = <&dsi1_opp_table>;
power-domains = <&rpmhpd RPMHPD_MMCX>;
+ refgen-supply = <&refgen>;
phys = <&mdss_dsi1_phy>;
diff --git a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
index 2e12219006c9..01c921602605 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
@@ -279,6 +279,65 @@
};
};
};
+
+ wcn7850-pmu {
+ compatible = "qcom,wcn7850-pmu";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en>, <&bt_default>, <&pmk8550_sleep_clk>;
+
+ wlan-enable-gpios = <&tlmm 80 GPIO_ACTIVE_HIGH>;
+ bt-enable-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>;
+
+ vdd-supply = <&vreg_s5g_0p85>;
+ vddio-supply = <&vreg_l15b_1p8>;
+ vddaon-supply = <&vreg_s2g_0p85>;
+ vdddig-supply = <&vreg_s4e_0p95>;
+ vddrfa1p2-supply = <&vreg_s4g_1p25>;
+ vddrfa1p8-supply = <&vreg_s6g_1p86>;
+
+ regulators {
+ vreg_pmu_rfa_cmn: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn";
+ };
+
+ vreg_pmu_aon_0p59: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p59";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p85: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p85";
+ };
+
+ vreg_pmu_btcmx_0p85: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p85";
+ };
+
+ vreg_pmu_rfa_0p8: ldo5 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo6 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p8: ldo7 {
+ regulator-name = "vreg_pmu_rfa_1p8";
+ };
+
+ vreg_pmu_pcie_0p9: ldo8 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_pcie_1p8: ldo9 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+ };
+ };
};
&apps_rsc {
@@ -953,6 +1012,23 @@
status = "okay";
};
+&pcieport0 {
+ wifi@0 {
+ compatible = "pci17cb,1107";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
+ vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
+ vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
+ };
+};
+
&pcie0_phy {
vdda-phy-supply = <&vreg_l1e_0p88>;
vdda-pll-supply = <&vreg_l3e_1p2>;
@@ -1041,6 +1117,17 @@
status = "okay";
};
+&pmk8550_gpios {
+ pmk8550_sleep_clk: sleep-clk-state {
+ pins = "gpio3";
+ function = "func1";
+ input-disable;
+ output-enable;
+ bias-disable;
+ power-source = <0>;
+ };
+};
+
&qupv3_id_0 {
status = "okay";
};
@@ -1203,6 +1290,13 @@
bias-disable;
output-low;
};
+
+ wlan_en: wlan-en-state {
+ pins = "gpio80";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-down;
+ };
};
&uart7 {
@@ -1215,20 +1309,15 @@
bluetooth {
compatible = "qcom,wcn7850-bt";
- vddio-supply = <&vreg_l15b_1p8>;
- vddaon-supply = <&vreg_s4e_0p95>;
- vdddig-supply = <&vreg_s4e_0p95>;
- vddrfa0p8-supply = <&vreg_s4e_0p95>;
- vddrfa1p2-supply = <&vreg_s4g_1p25>;
- vddrfa1p9-supply = <&vreg_s6g_1p86>;
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
max-speed = <3200000>;
-
- enable-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>;
- swctrl-gpios = <&tlmm 82 GPIO_ACTIVE_HIGH>;
-
- pinctrl-0 = <&bt_default>;
- pinctrl-names = "default";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
index 774bdfcffec3..6052dd922ec5 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
@@ -219,13 +219,10 @@
compatible = "qcom,wcn7850-pmu";
pinctrl-names = "default";
- pinctrl-0 = <&wlan_en>, <&pmk8550_sleep_clk>;
+ pinctrl-0 = <&wlan_en>, <&bt_default>, <&pmk8550_sleep_clk>;
wlan-enable-gpios = <&tlmm 80 GPIO_ACTIVE_HIGH>;
- /*
- * TODO Add bt-enable-gpios once the Bluetooth driver is
- * converted to using the power sequencer.
- */
+ bt-enable-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>;
vdd-supply = <&vreg_s5g_0p85>;
vddio-supply = <&vreg_l15b_1p8>;
@@ -1175,20 +1172,15 @@
bluetooth {
compatible = "qcom,wcn7850-bt";
- vddio-supply = <&vreg_l15b_1p8>;
- vddaon-supply = <&vreg_s4e_0p95>;
- vdddig-supply = <&vreg_s4e_0p95>;
- vddrfa0p8-supply = <&vreg_s4e_0p95>;
- vddrfa1p2-supply = <&vreg_s4g_1p25>;
- vddrfa1p9-supply = <&vreg_s6g_1p86>;
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
max-speed = <3200000>;
-
- enable-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>;
- swctrl-gpios = <&tlmm 82 GPIO_ACTIVE_HIGH>;
-
- pinctrl-0 = <&bt_default>;
- pinctrl-names = "default";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
index 4c9820adcf52..9dc0ee3eb98f 100644
--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
@@ -2747,6 +2747,98 @@
#power-domain-cells = <1>;
};
+ cci0: cci@ac15000 {
+ compatible = "qcom,sm8550-cci", "qcom,msm8996-cci";
+ reg = <0 0x0ac15000 0 0x1000>;
+ interrupts = <GIC_SPI 426 IRQ_TYPE_EDGE_RISING>;
+ power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>;
+ clocks = <&camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&camcc CAM_CC_CPAS_AHB_CLK>,
+ <&camcc CAM_CC_CCI_0_CLK>;
+ clock-names = "camnoc_axi",
+ "cpas_ahb",
+ "cci";
+ pinctrl-0 = <&cci0_0_default &cci0_1_default>;
+ pinctrl-1 = <&cci0_0_sleep &cci0_1_sleep>;
+ pinctrl-names = "default", "sleep";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cci0_i2c0: i2c-bus@0 {
+ reg = <0>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ cci0_i2c1: i2c-bus@1 {
+ reg = <1>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ cci1: cci@ac16000 {
+ compatible = "qcom,sm8550-cci", "qcom,msm8996-cci";
+ reg = <0 0x0ac16000 0 0x1000>;
+ interrupts = <GIC_SPI 427 IRQ_TYPE_EDGE_RISING>;
+ power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>;
+ clocks = <&camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&camcc CAM_CC_CPAS_AHB_CLK>,
+ <&camcc CAM_CC_CCI_1_CLK>;
+ clock-names = "camnoc_axi",
+ "cpas_ahb",
+ "cci";
+ pinctrl-0 = <&cci1_0_default>;
+ pinctrl-1 = <&cci1_0_sleep>;
+ pinctrl-names = "default", "sleep";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cci1_i2c0: i2c-bus@0 {
+ reg = <0>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ cci2: cci@ac17000 {
+ compatible = "qcom,sm8550-cci", "qcom,msm8996-cci";
+ reg = <0 0x0ac17000 0 0x1000>;
+ interrupts = <GIC_SPI 428 IRQ_TYPE_EDGE_RISING>;
+ power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>;
+ clocks = <&camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&camcc CAM_CC_CPAS_AHB_CLK>,
+ <&camcc CAM_CC_CCI_2_CLK>;
+ clock-names = "camnoc_axi",
+ "cpas_ahb",
+ "cci";
+ pinctrl-0 = <&cci2_0_default &cci2_1_default>;
+ pinctrl-1 = <&cci2_0_sleep &cci2_1_sleep>;
+ pinctrl-names = "default", "sleep";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cci2_i2c0: i2c-bus@0 {
+ reg = <0>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ cci2_i2c1: i2c-bus@1 {
+ reg = <1>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
camcc: clock-controller@ade0000 {
compatible = "qcom,sm8550-camcc";
reg = <0 0x0ade0000 0 0x20000>;
@@ -3393,6 +3485,166 @@
gpio-ranges = <&tlmm 0 0 211>;
wakeup-parent = <&pdc>;
+ cci0_0_default: cci0-0-default-state {
+ sda-pins {
+ pins = "gpio110";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+
+ scl-pins {
+ pins = "gpio111";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+ };
+
+ cci0_0_sleep: cci0-0-sleep-state {
+ sda-pins {
+ pins = "gpio110";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ scl-pins {
+ pins = "gpio111";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ cci0_1_default: cci0-1-default-state {
+ sda-pins {
+ pins = "gpio112";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+
+ scl-pins {
+ pins = "gpio113";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+ };
+
+ cci0_1_sleep: cci0-1-sleep-state {
+ sda-pins {
+ pins = "gpio112";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ scl-pins {
+ pins = "gpio113";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ cci1_0_default: cci1-0-default-state {
+ sda-pins {
+ pins = "gpio114";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+
+ scl-pins {
+ pins = "gpio115";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+ };
+
+ cci1_0_sleep: cci1-0-sleep-state {
+ sda-pins {
+ pins = "gpio114";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ scl-pins {
+ pins = "gpio115";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ cci2_0_default: cci2-0-default-state {
+ sda-pins {
+ pins = "gpio74";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+
+ scl-pins {
+ pins = "gpio75";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+ };
+
+ cci2_0_sleep: cci2-0-sleep-state {
+ sda-pins {
+ pins = "gpio74";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ scl-pins {
+ pins = "gpio75";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ cci2_1_default: cci2-1-default-state {
+ sda-pins {
+ pins = "gpio0";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+
+ scl-pins {
+ pins = "gpio1";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+ };
+
+ cci2_1_sleep: cci2-1-sleep-state {
+ sda-pins {
+ pins = "gpio0";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ scl-pins {
+ pins = "gpio1";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
hub_i2c0_data_clk: hub-i2c0-data-clk-state {
/* SDA, SCL */
pins = "gpio16", "gpio17";
diff --git a/arch/arm64/boot/dts/qcom/sm8650-hdk.dts b/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
index 591e6ab9bf5b..127c7aacd4fc 100644
--- a/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
@@ -271,13 +271,10 @@
compatible = "qcom,wcn7850-pmu";
pinctrl-names = "default";
- pinctrl-0 = <&wlan_en>;
+ pinctrl-0 = <&wlan_en>, <&bt_default>;
wlan-enable-gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>;
- /*
- * TODO Add bt-enable-gpios once the Bluetooth driver is
- * converted to using the power sequencer.
- */
+ bt-enable-gpios = <&tlmm 17 GPIO_ACTIVE_HIGH>;
vdd-supply = <&vreg_s4i_0p85>;
vddio-supply = <&vreg_l15b_1p8>;
@@ -1272,20 +1269,15 @@
bluetooth {
compatible = "qcom,wcn7850-bt";
- vddio-supply = <&vreg_l3c_1p2>;
- vddaon-supply = <&vreg_l15b_1p8>;
- vdddig-supply = <&vreg_s3c_0p9>;
- vddrfa0p8-supply = <&vreg_s3c_0p9>;
- vddrfa1p2-supply = <&vreg_s1c_1p2>;
- vddrfa1p9-supply = <&vreg_s6c_1p8>;
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
max-speed = <3200000>;
-
- enable-gpios = <&tlmm 17 GPIO_ACTIVE_HIGH>;
- swctrl-gpios = <&tlmm 18 GPIO_ACTIVE_HIGH>;
-
- pinctrl-0 = <&bt_default>;
- pinctrl-names = "default";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
index b0d7927b708f..8ca0d28eba9b 100644
--- a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
@@ -208,13 +208,10 @@
compatible = "qcom,wcn7850-pmu";
pinctrl-names = "default";
- pinctrl-0 = <&wlan_en>;
+ pinctrl-0 = <&wlan_en>, <&bt_default>;
wlan-enable-gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>;
- /*
- * TODO Add bt-enable-gpios once the Bluetooth driver is
- * converted to using the power sequencer.
- */
+ bt-enable-gpios = <&tlmm 17 GPIO_ACTIVE_HIGH>;
vdd-supply = <&vreg_s4i_0p85>;
vddio-supply = <&vreg_l15b_1p8>;
@@ -1255,22 +1252,15 @@
bluetooth {
compatible = "qcom,wcn7850-bt";
- clocks = <&rpmhcc RPMH_RF_CLK1>;
-
- vddio-supply = <&vreg_l3c_1p2>;
- vddaon-supply = <&vreg_l15b_1p8>;
- vdddig-supply = <&vreg_s3c_0p9>;
- vddrfa0p8-supply = <&vreg_s3c_0p9>;
- vddrfa1p2-supply = <&vreg_s1c_1p2>;
- vddrfa1p9-supply = <&vreg_s6c_1p8>;
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
max-speed = <3200000>;
-
- enable-gpios = <&tlmm 17 GPIO_ACTIVE_HIGH>;
- swctrl-gpios = <&tlmm 18 GPIO_ACTIVE_HIGH>;
-
- pinctrl-0 = <&bt_default>;
- pinctrl-names = "default";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
index 9d9bbb9aca64..01ac3769ffa6 100644
--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
@@ -3329,6 +3329,105 @@
#power-domain-cells = <1>;
};
+ cci0: cci@ac15000 {
+ compatible = "qcom,sm8650-cci", "qcom,msm8996-cci";
+ reg = <0 0x0ac15000 0 0x1000>;
+ interrupts = <GIC_SPI 426 IRQ_TYPE_EDGE_RISING>;
+ power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>;
+ clocks = <&camcc CAM_CC_CAMNOC_AXI_NRT_CLK>,
+ <&camcc CAM_CC_CPAS_AHB_CLK>,
+ <&camcc CAM_CC_CCI_0_CLK>;
+ clock-names = "camnoc_axi",
+ "cpas_ahb",
+ "cci";
+ pinctrl-0 = <&cci0_0_default &cci0_1_default>;
+ pinctrl-1 = <&cci0_0_sleep &cci0_1_sleep>;
+ pinctrl-names = "default", "sleep";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cci0_i2c0: i2c-bus@0 {
+ reg = <0>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ cci0_i2c1: i2c-bus@1 {
+ reg = <1>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ cci1: cci@ac16000 {
+ compatible = "qcom,sm8650-cci", "qcom,msm8996-cci";
+ reg = <0 0x0ac16000 0 0x1000>;
+ interrupts = <GIC_SPI 427 IRQ_TYPE_EDGE_RISING>;
+ power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>;
+ clocks = <&camcc CAM_CC_CAMNOC_AXI_NRT_CLK>,
+ <&camcc CAM_CC_CPAS_AHB_CLK>,
+ <&camcc CAM_CC_CCI_1_CLK>;
+ clock-names = "camnoc_axi",
+ "cpas_ahb",
+ "cci";
+ pinctrl-0 = <&cci1_0_default &cci1_1_default>;
+ pinctrl-1 = <&cci1_0_sleep &cci1_1_sleep>;
+ pinctrl-names = "default", "sleep";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cci1_i2c0: i2c-bus@0 {
+ reg = <0>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ cci1_i2c1: i2c-bus@1 {
+ reg = <1>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ cci2: cci@ac17000 {
+ compatible = "qcom,sm8650-cci", "qcom,msm8996-cci";
+ reg = <0 0x0ac17000 0 0x1000>;
+ interrupts = <GIC_SPI 428 IRQ_TYPE_EDGE_RISING>;
+ power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>;
+ clocks = <&camcc CAM_CC_CAMNOC_AXI_NRT_CLK>,
+ <&camcc CAM_CC_CPAS_AHB_CLK>,
+ <&camcc CAM_CC_CCI_2_CLK>;
+ clock-names = "camnoc_axi",
+ "cpas_ahb",
+ "cci";
+ pinctrl-0 = <&cci2_0_default &cci2_1_default>;
+ pinctrl-1 = <&cci2_0_sleep &cci2_1_sleep>;
+ pinctrl-names = "default", "sleep";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cci2_i2c0: i2c-bus@0 {
+ reg = <0>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ cci2_i2c1: i2c-bus@1 {
+ reg = <1>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
camcc: clock-controller@ade0000 {
compatible = "qcom,sm8650-camcc";
reg = <0 0x0ade0000 0 0x20000>;
@@ -4029,6 +4128,198 @@
wakeup-parent = <&pdc>;
+ cci0_0_default: cci0-0-default-state {
+ sda-pins {
+ pins = "gpio113";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+
+ scl-pins {
+ pins = "gpio114";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+ };
+
+ cci0_0_sleep: cci0-0-sleep-state {
+ sda-pins {
+ pins = "gpio113";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ scl-pins {
+ pins = "gpio114";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ cci0_1_default: cci0-1-default-state {
+ sda-pins {
+ pins = "gpio115";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+
+ scl-pins {
+ pins = "gpio116";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+ };
+
+ cci0_1_sleep: cci0-1-sleep-state {
+ sda-pins {
+ pins = "gpio115";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ scl-pins {
+ pins = "gpio116";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ cci1_0_default: cci1-0-default-state {
+ sda-pins {
+ pins = "gpio117";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+
+ scl-pins {
+ pins = "gpio118";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+ };
+
+ cci1_0_sleep: cci1-0-sleep-state {
+ sda-pins {
+ pins = "gpio117";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ scl-pins {
+ pins = "gpio118";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ cci1_1_default: cci1-1-default-state {
+ sda-pins {
+ pins = "gpio12";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+
+ scl-pins {
+ pins = "gpio13";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+ };
+
+ cci1_1_sleep: cci1-1-sleep-state {
+ sda-pins {
+ pins = "gpio12";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ scl-pins {
+ pins = "gpio13";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ cci2_0_default: cci2-0-default-state {
+ sda-pins {
+ pins = "gpio112";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+
+ scl-pins {
+ pins = "gpio153";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+ };
+
+ cci2_0_sleep: cci2-0-sleep-state {
+ sda-pins {
+ pins = "gpio112";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ scl-pins {
+ pins = "gpio153";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ cci2_1_default: cci2-1-default-state {
+ sda-pins {
+ pins = "gpio119";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+
+ scl-pins {
+ pins = "gpio120";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-up = <2200>;
+ };
+ };
+
+ cci2_1_sleep: cci2-1-sleep-state {
+ sda-pins {
+ pins = "gpio119";
+ function = "cci_i2c_sda";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ scl-pins {
+ pins = "gpio120";
+ function = "cci_i2c_scl";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
hub_i2c0_data_clk: hub-i2c0-data-clk-state {
/* SDA, SCL */
pins = "gpio64", "gpio65";
diff --git a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
new file mode 100644
index 000000000000..941dfddd6713
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
@@ -0,0 +1,807 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+#include "x1e80100.dtsi"
+#include "x1e80100-pmics.dtsi"
+
+/ {
+ model = "Lenovo ThinkPad T14s Gen 6";
+ compatible = "lenovo,thinkpad-t14s", "qcom,x1e78100", "qcom,x1e80100";
+ chassis-type = "laptop";
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&hall_int_n_default>;
+ pinctrl-names = "default";
+
+ switch-lid {
+ gpios = <&tlmm 92 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ wakeup-source;
+ wakeup-event-action = <EV_ACT_DEASSERTED>;
+ };
+ };
+
+ pmic-glink {
+ compatible = "qcom,x1e80100-pmic-glink",
+ "qcom,sm8550-pmic-glink",
+ "qcom,pmic-glink";
+ orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>,
+ <&tlmm 123 GPIO_ACTIVE_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Display-adjacent port */
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss0_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ };
+ };
+ };
+ };
+
+ /* User-adjacent port */
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss1_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ };
+ };
+ };
+ };
+ };
+
+ reserved-memory {
+ linux,cma {
+ compatible = "shared-dma-pool";
+ size = <0x0 0x8000000>;
+ reusable;
+ linux,cma-default;
+ };
+ };
+
+ vreg_edp_3p3: regulator-edp-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_EDP_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 70 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&edp_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_nvme: regulator-nvme {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_NVME_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&nvme_reg_en>;
+ pinctrl-names = "default";
+ };
+
+ vph_pwr: regulator-vph-pwr {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-bob1-supply = <&vph_pwr>;
+ vdd-bob2-supply = <&vph_pwr>;
+ vdd-l1-l4-l10-supply = <&vreg_s4c_1p8>;
+ vdd-l2-l13-l14-supply = <&vreg_bob1>;
+ vdd-l5-l16-supply = <&vreg_bob1>;
+ vdd-l6-l7-supply = <&vreg_bob2>;
+ vdd-l8-l9-supply = <&vreg_bob1>;
+ vdd-l12-supply = <&vreg_s5j_1p2>;
+ vdd-l15-supply = <&vreg_s4c_1p8>;
+ vdd-l17-supply = <&vreg_bob2>;
+
+ vreg_bob1: bob1 {
+ regulator-name = "vreg_bob1";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob2: bob2 {
+ regulator-name = "vreg_bob2";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p0: ldo2 {
+ regulator-name = "vreg_l2b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4b_1p8: ldo4 {
+ regulator-name = "vreg_l4b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b_1p8: ldo6 {
+ regulator-name = "vreg_l6b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8b_3p0: ldo8 {
+ regulator-name = "vreg_l8b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9b_2p9: ldo9 {
+ regulator-name = "vreg_l9b_2p9";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10b_1p8: ldo10 {
+ regulator-name = "vreg_l10b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12b_1p2: ldo12 {
+ regulator-name = "vreg_l12b_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13b_3p0: ldo13 {
+ regulator-name = "vreg_l13b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b_3p0: ldo14 {
+ regulator-name = "vreg_l14b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15b_1p8: ldo15 {
+ regulator-name = "vreg_l15b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17b_2p5: ldo17 {
+ regulator-name = "vreg_l17b_2p5";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s4-supply = <&vph_pwr>;
+
+ vreg_s4c_1p8: smps4 {
+ regulator-name = "vreg_s4c_1p8";
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c_1p2: ldo1 {
+ regulator-name = "vreg_l1c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c_0p8: ldo2 {
+ regulator-name = "vreg_l2c_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c_0p8: ldo3 {
+ regulator-name = "vreg_l3c_0p8";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "d";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s4c_1p8>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_l1d_0p8: ldo1 {
+ regulator-name = "vreg_l1d_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2d_0p9: ldo2 {
+ regulator-name = "vreg_l2d_0p9";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3d_1p8: ldo3 {
+ regulator-name = "vreg_l3d_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-3 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+
+ vreg_l2e_0p8: ldo2 {
+ regulator-name = "vreg_l2e_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3e_1p2: ldo3 {
+ regulator-name = "vreg_l3e_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_s1f_0p7: smps1 {
+ regulator-name = "vreg_s1f_0p7";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-6 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "i";
+
+ vdd-l1-supply = <&vreg_s4c_1p8>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+
+ vreg_l1i_1p8: ldo1 {
+ regulator-name = "vreg_l1i_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2i_1p2: ldo2 {
+ regulator-name = "vreg_l2i_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3i_0p8: ldo3 {
+ regulator-name = "vreg_l3i_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-7 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "j";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s5-supply = <&vph_pwr>;
+
+ vreg_s5j_1p2: smps5 {
+ regulator-name = "vreg_s5j_1p2";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1j_0p8: ldo1 {
+ regulator-name = "vreg_l1j_0p8";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2j_1p2: ldo2 {
+ regulator-name = "vreg_l2j_1p2";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1256000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3j_0p8: ldo3 {
+ regulator-name = "vreg_l3j_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ firmware-name = "qcom/x1e80100/LENOVO/21N1/qcdxkmsuc8380.mbn";
+ };
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ /* ELAN06E2 or ELAN06E3 */
+ touchpad@15 {
+ compatible = "hid-over-i2c";
+ reg = <0x15>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 3 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&tpad_default>;
+ pinctrl-names = "default";
+
+ wakeup-source;
+ };
+
+ /* TODO: second-sourced SYNA8022 or SYNA8024 touchpad @ 0x2c */
+
+ /* ELAN06F1 or SYNA06F2 */
+ keyboard@3a {
+ compatible = "hid-over-i2c";
+ reg = <0x3a>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 67 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&kybd_default>;
+ pinctrl-names = "default";
+
+ wakeup-source;
+ };
+};
+
+&i2c8 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ /* ILIT2911 or GTCH1563 */
+ touchscreen@10 {
+ compatible = "hid-over-i2c";
+ reg = <0x10>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 51 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&ts0_default>;
+ pinctrl-names = "default";
+ };
+
+ /* TODO: second-sourced touchscreen @ 0x41 */
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dp3 {
+ compatible = "qcom,x1e80100-dp";
+ /delete-property/ #sound-dai-cells;
+
+ status = "okay";
+
+ aux-bus {
+ panel {
+ compatible = "edp-panel";
+ enable-gpios = <&pmc8380_3_gpios 4 GPIO_ACTIVE_HIGH>;
+ power-supply = <&vreg_edp_3p3>;
+
+ pinctrl-0 = <&edp_bl_en>;
+ pinctrl-names = "default";
+
+ port {
+ edp_panel_in: endpoint {
+ remote-endpoint = <&mdss_dp3_out>;
+ };
+ };
+ };
+ };
+
+ ports {
+ port@1 {
+ reg = <1>;
+
+ mdss_dp3_out: endpoint {
+ data-lanes = <0 1 2 3>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+
+ remote-endpoint = <&edp_panel_in>;
+ };
+ };
+ };
+};
+
+&mdss_dp3_phy {
+ vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-pll-supply = <&vreg_l2j_1p2>;
+
+ status = "okay";
+};
+
+&pcie4 {
+ perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&pcie4_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie4_phy {
+ vdda-phy-supply = <&vreg_l3i_0p8>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+
+ status = "okay";
+};
+
+&pcie6a {
+ perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_nvme>;
+
+ pinctrl-0 = <&pcie6a_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie6a_phy {
+ vdda-phy-supply = <&vreg_l1d_0p8>;
+ vdda-pll-supply = <&vreg_l2j_1p2>;
+
+ status = "okay";
+};
+
+&pmc8380_3_gpios {
+ edp_bl_en: edp-bl-en-state {
+ pins = "gpio4";
+ function = "normal";
+ power-source = <1>;
+ input-disable;
+ output-enable;
+ };
+};
+
+&qupv3_0 {
+ status = "okay";
+};
+
+&qupv3_1 {
+ status = "okay";
+};
+
+&qupv3_2 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/x1e80100/LENOVO/21N1/qcadsp8380.mbn",
+ "qcom/x1e80100/LENOVO/21N1/adsp_dtbs.elf";
+
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/x1e80100/LENOVO/21N1/qccdsp8380.mbn",
+ "qcom/x1e80100/LENOVO/21N1/cdsp_dtbs.elf";
+
+ status = "okay";
+};
+
+&smb2360_0_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l2b_3p0>;
+};
+
+&smb2360_1_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l14b_3p0>;
+};
+
+&tlmm {
+ gpio-reserved-ranges = <34 2>, /* Unused */
+ <44 4>, /* SPI (TPM) */
+ <72 2>, /* Secure EC I2C connection (?) */
+ <238 1>; /* UFS Reset */
+
+ tpad_default: tpad-default-state {
+ pins = "gpio3";
+ function = "gpio";
+ bias-pull-up;
+ };
+
+ nvme_reg_en: nvme-reg-en-state {
+ pins = "gpio18";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ ts0_default: ts0-default-state {
+ reset-n-pins {
+ pins = "gpio48";
+ function = "gpio";
+ output-high;
+ drive-strength = <16>;
+ };
+
+ int-n-pins {
+ pins = "gpio51";
+ function = "gpio";
+ bias-disable;
+ };
+ };
+
+ kybd_default: kybd-default-state {
+ pins = "gpio67";
+ function = "gpio";
+ bias-disable;
+ };
+
+ edp_reg_en: edp-reg-en-state {
+ pins = "gpio70";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ hall_int_n_default: hall-int-n-state {
+ pins = "gpio92";
+ function = "gpio";
+ bias-disable;
+ };
+
+ pcie4_default: pcie4-default-state {
+ clkreq-n-pins {
+ pins = "gpio147";
+ function = "pcie4_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio146";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio148";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie6a_default: pcie6a-default-state {
+ clkreq-n-pins {
+ pins = "gpio153";
+ function = "pcie6a_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio152";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio154";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ wcd_default: wcd-reset-n-active-state {
+ pins = "gpio191";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+};
+
+&usb_1_ss0_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_0_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss0_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l1j_0p8>;
+
+ status = "okay";
+};
+
+&usb_1_ss0 {
+ status = "okay";
+};
+
+&usb_1_ss0_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss0_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss0_hs_in>;
+};
+
+&usb_1_ss0_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss0_ss_in>;
+};
+
+&usb_1_ss1_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_1_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss1_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
+ status = "okay";
+};
+
+&usb_1_ss1 {
+ status = "okay";
+};
+
+&usb_1_ss1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss1_hs_in>;
+};
+
+&usb_1_ss1_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
index 9caa14dda585..20616bd4aa6c 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
@@ -501,10 +501,6 @@
vdd3-supply = <&vreg_l14b_3p0>;
};
-&smb2360_2 {
- status = "disabled";
-};
-
&tlmm {
gpio-reserved-ranges = <34 2>, /* Unused */
<44 4>, /* SPI (TPM) */
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
index e17ab8251e2a..10b28d870f08 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
@@ -6,6 +6,8 @@
/dts-v1/;
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/input/input.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include "x1e80100.dtsi"
@@ -49,6 +51,21 @@
stdout-path = "serial0:115200n8";
};
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&hall_int_n_default>;
+ pinctrl-names = "default";
+
+ switch-lid {
+ gpios = <&tlmm 92 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ wakeup-source;
+ wakeup-event-action = <EV_ACT_DEASSERTED>;
+ };
+ };
+
pmic-glink {
compatible = "qcom,x1e80100-pmic-glink",
"qcom,sm8550-pmic-glink",
@@ -284,6 +301,22 @@
pinctrl-names = "default";
pinctrl-0 = <&nvme_reg_en>;
};
+
+ vreg_wwan: regulator-wwan {
+ compatible = "regulator-fixed";
+
+ regulator-name = "SDX_VPH_PWR";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 221 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&wwan_sw_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
};
&apps_rsc {
@@ -783,6 +816,25 @@
status = "okay";
};
+&pcie5 {
+ perst-gpios = <&tlmm 149 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 151 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_wwan>;
+
+ pinctrl-0 = <&pcie5_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie5_phy {
+ vdda-phy-supply = <&vreg_l3i_0p8>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+
+ status = "okay";
+};
+
&pcie6a {
perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
@@ -848,6 +900,10 @@
vdd3-supply = <&vreg_l14b_3p0>;
};
+&smb2360_2 {
+ status = "okay";
+};
+
&smb2360_2_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l8b_3p0>;
@@ -868,6 +924,7 @@
sound-name-prefix = "WooferLeft";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <1 2 3 7 10 13>;
};
/* WSA8845, Left Tweeter */
@@ -879,6 +936,7 @@
sound-name-prefix = "TwitterLeft";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <4 5 6 7 11 13>;
};
};
@@ -919,6 +977,7 @@
sound-name-prefix = "WooferRight";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <1 2 3 7 10 13>;
};
/* WSA8845, Right Tweeter */
@@ -930,6 +989,7 @@
sound-name-prefix = "TwitterRight";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <4 5 6 7 11 13>;
};
};
@@ -945,6 +1005,12 @@
bias-disable;
};
+ hall_int_n_default: hall-int-n-state {
+ pins = "gpio92";
+ function = "gpio";
+ bias-disable;
+ };
+
kybd_default: kybd-default-state {
pins = "gpio67";
function = "gpio";
@@ -981,6 +1047,29 @@
};
};
+ pcie5_default: pcie5-default-state {
+ clkreq-n-pins {
+ pins = "gpio150";
+ function = "pcie5_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio149";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio151";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
pcie6a_default: pcie6a-default-state {
clkreq-n-pins {
pins = "gpio153";
@@ -1032,6 +1121,13 @@
bias-disable;
output-low;
};
+
+ wwan_sw_en: wwan-sw-en-state {
+ pins = "gpio221";
+ function = "gpio";
+ drive-strength = <4>;
+ bias-disable;
+ };
};
&uart21 {
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
index 1943bdbfb8c0..3c13331a9ef4 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
@@ -190,7 +190,6 @@
pinctrl-0 = <&edp_reg_en>;
pinctrl-names = "default";
- regulator-always-on;
regulator-boot-on;
};
@@ -592,9 +591,13 @@
aux-bus {
panel {
- compatible = "edp-panel";
+ compatible = "samsung,atna45dc02", "samsung,atna33xc20";
+ enable-gpios = <&pmc8380_3_gpios 4 GPIO_ACTIVE_HIGH>;
power-supply = <&vreg_edp_3p3>;
+ pinctrl-0 = <&edp_bl_en>;
+ pinctrl-names = "default";
+
port {
edp_panel_in: endpoint {
remote-endpoint = <&mdss_dp3_out>;
@@ -669,6 +672,16 @@
status = "okay";
};
+&pmc8380_3_gpios {
+ edp_bl_en: edp-bl-en-state {
+ pins = "gpio4";
+ function = "normal";
+ power-source = <0>;
+ input-disable;
+ output-enable;
+ };
+};
+
&qupv3_0 {
status = "okay";
};
@@ -704,6 +717,10 @@
vdd3-supply = <&vreg_l14b_3p0>;
};
+&smb2360_2 {
+ status = "okay";
+};
+
&smb2360_2_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l8b_3p0>;
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
new file mode 100644
index 000000000000..42e02ad6a9c3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
@@ -0,0 +1,835 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+#include "x1e80100.dtsi"
+#include "x1e80100-pmics.dtsi"
+
+/ {
+ aliases {
+ serial0 = &uart2;
+ i2c0 = &i2c0;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ i2c7 = &i2c7;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pmk8550_pwm 0 5000000>;
+ enable-gpios = <&pmc8380_3_gpios 4 GPIO_ACTIVE_HIGH>;
+ /* TODO: power-supply? */
+
+ pinctrl-0 = <&edp_bl_en>, <&edp_bl_pwm>;
+ pinctrl-names = "default";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_indicator_en>;
+
+ led-camera-indicator {
+ label = "white:camera-indicator";
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_WHITE>;
+ gpios = <&tlmm 225 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "none";
+ default-state = "off";
+ /* Reuse as a panic indicator until we get a "camera on" trigger */
+ panic-indicator;
+ };
+ };
+
+ pmic-glink {
+ compatible = "qcom,x1e80100-pmic-glink",
+ "qcom,sm8550-pmic-glink",
+ "qcom,pmic-glink";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>,
+ <&tlmm 123 GPIO_ACTIVE_HIGH>;
+
+ /* Left-side rear port */
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss0_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ };
+ };
+ };
+ };
+
+ /* Left-side front port */
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss1_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ };
+ };
+ };
+ };
+ };
+
+ reserved-memory {
+ linux,cma {
+ compatible = "shared-dma-pool";
+ size = <0x0 0x8000000>;
+ reusable;
+ linux,cma-default;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vreg_edp_3p3: regulator-edp-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_EDP_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 70 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&edp_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_nvme: regulator-nvme {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_NVME_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&nvme_reg_en>;
+ pinctrl-names = "default";
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-bob1-supply = <&vph_pwr>;
+ vdd-bob2-supply = <&vph_pwr>;
+ vdd-l1-l4-l10-supply = <&vreg_s4c>;
+ vdd-l2-l13-l14-supply = <&vreg_bob1>;
+ vdd-l5-l16-supply = <&vreg_bob1>;
+ vdd-l6-l7-supply = <&vreg_bob2>;
+ vdd-l8-l9-supply = <&vreg_bob1>;
+ vdd-l12-supply = <&vreg_s5j>;
+ vdd-l15-supply = <&vreg_s4c>;
+ vdd-l17-supply = <&vreg_bob2>;
+
+ vreg_bob1: bob1 {
+ regulator-name = "vreg_bob1";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob2: bob2 {
+ regulator-name = "vreg_bob2";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1b: ldo1 {
+ regulator-name = "vreg_l1b";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b: ldo2 {
+ regulator-name = "vreg_l2b";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4b: ldo4 {
+ regulator-name = "vreg_l4b";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5b: ldo5 {
+ regulator-name = "vreg_l5b";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b: ldo6 {
+ regulator-name = "vreg_l6b";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7b: ldo7 {
+ regulator-name = "vreg_l7b";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8b: ldo8 {
+ regulator-name = "vreg_l8b";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9b: ldo9 {
+ regulator-name = "vreg_l9b";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10b: ldo10 {
+ regulator-name = "vreg_l10b";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12b: ldo12 {
+ regulator-name = "vreg_l12b";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13b: ldo13 {
+ regulator-name = "vreg_l13b";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b: ldo14 {
+ regulator-name = "vreg_l14b";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15b: ldo15 {
+ regulator-name = "vreg_l15b";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16b: ldo16 {
+ regulator-name = "vreg_l16b";
+ regulator-min-microvolt = <2912000>;
+ regulator-max-microvolt = <2912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17b: ldo17 {
+ regulator-name = "vreg_l17b";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-l1-supply = <&vreg_s5j>;
+ vdd-l2-supply = <&vreg_s1f>;
+ vdd-l3-supply = <&vreg_s1f>;
+ vdd-s4-supply = <&vph_pwr>;
+
+ vreg_s4c: smps4 {
+ regulator-name = "vreg_s4c";
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c: ldo1 {
+ regulator-name = "vreg_l1c";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c: ldo2 {
+ regulator-name = "vreg_l2c";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c: ldo3 {
+ regulator-name = "vreg_l3c";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "d";
+
+ vdd-l1-supply = <&vreg_s1f>;
+ vdd-l2-supply = <&vreg_s1f>;
+ vdd-l3-supply = <&vreg_s4c>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_l1d: ldo1 {
+ regulator-name = "vreg_l1d";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2d: ldo2 {
+ regulator-name = "vreg_l2d";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3d: ldo3 {
+ regulator-name = "vreg_l3d";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-3 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vdd-l2-supply = <&vreg_s1f>;
+ vdd-l3-supply = <&vreg_s5j>;
+
+ vreg_l2e: ldo2 {
+ regulator-name = "vreg_l2e";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3e: ldo3 {
+ regulator-name = "vreg_l3e";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-l1-supply = <&vreg_s5j>;
+ vdd-l2-supply = <&vreg_s5j>;
+ vdd-l3-supply = <&vreg_s5j>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_s1f: smps1 {
+ regulator-name = "vreg_s1f";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1f: ldo1 {
+ regulator-name = "vreg_l1f";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2f: ldo2 {
+ regulator-name = "vreg_l2f";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3f: ldo3 {
+ regulator-name = "vreg_l3f";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-6 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "i";
+
+ vdd-l1-supply = <&vreg_s4c>;
+ vdd-l2-supply = <&vreg_s5j>;
+ vdd-l3-supply = <&vreg_s1f>;
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+
+ vreg_s1i: smps1 {
+ regulator-name = "vreg_s1i";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s2i: smps2 {
+ regulator-name = "vreg_s2i";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1i: ldo1 {
+ regulator-name = "vreg_l1i";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2i: ldo2 {
+ regulator-name = "vreg_l2i";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3i: ldo3 {
+ regulator-name = "vreg_l3i";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-7 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "j";
+
+ vdd-l1-supply = <&vreg_s1f>;
+ vdd-l2-supply = <&vreg_s5j>;
+ vdd-l3-supply = <&vreg_s1f>;
+ vdd-s5-supply = <&vph_pwr>;
+
+ vreg_s5j: smps5 {
+ regulator-name = "vreg_s5j";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1j: ldo1 {
+ regulator-name = "vreg_l1j";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2j: ldo2 {
+ regulator-name = "vreg_l2j";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1256000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3j: ldo3 {
+ regulator-name = "vreg_l3j";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ memory-region = <&gpu_microcode_mem>;
+ firmware-name = "qcom/x1e80100/microsoft/qcdxkmsuc8380.mbn";
+ };
+};
+
+&i2c0 {
+ clock-frequency = <100000>;
+
+ status = "okay";
+
+ /* Something @39, @3e, @44 */
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ /* PS8830 USB retimer @8 */
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ /* Something @18, @2c, @2e */
+};
+
+&i2c5 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ /* Something @4f */
+};
+
+&i2c7 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ /* PS8830 USB retimer @8 */
+};
+
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dp3 {
+ compatible = "qcom,x1e80100-dp";
+ /delete-property/ #sound-dai-cells;
+
+ status = "okay";
+
+ aux-bus {
+ panel {
+ compatible = "edp-panel";
+
+ backlight = <&backlight>;
+ power-supply = <&vreg_edp_3p3>;
+
+ port {
+ edp_panel_in: endpoint {
+ remote-endpoint = <&mdss_dp3_out>;
+ };
+ };
+ };
+ };
+
+ ports {
+ port@1 {
+ reg = <1>;
+
+ mdss_dp3_out: endpoint {
+ data-lanes = <0 1 2 3>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+
+ remote-endpoint = <&edp_panel_in>;
+ };
+ };
+ };
+};
+
+&mdss_dp3_phy {
+ vdda-phy-supply = <&vreg_l3j>;
+ vdda-pll-supply = <&vreg_l2j>;
+
+ status = "okay";
+};
+
+&pcie4 {
+ status = "okay";
+};
+
+&pcie4_phy {
+ vdda-phy-supply = <&vreg_l3i>;
+ vdda-pll-supply = <&vreg_l3e>;
+
+ status = "okay";
+};
+
+&pcie6a {
+ perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_nvme>;
+
+ pinctrl-0 = <&pcie6a_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie6a_phy {
+ vdda-phy-supply = <&vreg_l1d>;
+ vdda-pll-supply = <&vreg_l2j>;
+
+ status = "okay";
+};
+
+&pmc8380_3_gpios {
+ edp_bl_en: edp-bl-en-state {
+ pins = "gpio4";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ input-disable;
+ output-enable;
+ };
+};
+
+&pmk8550_pwm {
+ status = "okay";
+};
+
+&pmk8550_gpios {
+ edp_bl_pwm: edp-bl-pwm-state {
+ pins = "gpio5";
+ function = "func3";
+ };
+};
+
+&qupv3_0 {
+ status = "okay";
+};
+
+&qupv3_1 {
+ status = "okay";
+};
+
+&qupv3_2 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/x1e80100/microsoft/Romulus/qcadsp8380.mbn",
+ "qcom/x1e80100/microsoft/Romulus/adsp_dtb.mbn";
+
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/x1e80100/microsoft/Romulus/qccdsp8380.mbn",
+ "qcom/x1e80100/microsoft/Romulus/cdsp_dtb.mbn";
+
+ status = "okay";
+};
+
+&smb2360_0_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d>;
+ vdd3-supply = <&vreg_l2b>;
+};
+
+&smb2360_1_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d>;
+ vdd3-supply = <&vreg_l14b>;
+};
+
+&tlmm {
+ gpio-reserved-ranges = <44 4>, /* SPI (TPM) */
+ <238 1>; /* UFS Reset */
+
+ nvme_reg_en: nvme-reg-en-state {
+ pins = "gpio18";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ edp_reg_en: edp-reg-en-state {
+ pins = "gpio70";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ ssam_state: ssam-state-state {
+ pins = "gpio91";
+ function = "gpio";
+ bias-disable;
+ };
+
+ pcie6a_default: pcie6a-default-state {
+ perst-n-pins {
+ pins = "gpio152";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ clkreq-n-pins {
+ pins = "gpio153";
+ function = "pcie6a_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wake-n-pins {
+ pins = "gpio154";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ cam_indicator_en: cam-indicator-en-state {
+ pins = "gpio225";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
+
+&uart2 {
+ status = "okay";
+
+ embedded-controller {
+ compatible = "microsoft,surface-sam";
+
+ interrupts-extended = <&tlmm 91 IRQ_TYPE_EDGE_RISING>;
+
+ current-speed = <4000000>;
+
+ pinctrl-0 = <&ssam_state>;
+ pinctrl-names = "default";
+ };
+};
+
+&usb_1_ss0_hsphy {
+ vdd-supply = <&vreg_l3j>;
+ vdda12-supply = <&vreg_l2j>;
+
+ phys = <&smb2360_0_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss0_qmpphy {
+ vdda-phy-supply = <&vreg_l3e>;
+ vdda-pll-supply = <&vreg_l1j>;
+
+ status = "okay";
+};
+
+&usb_1_ss0 {
+ status = "okay";
+};
+
+&usb_1_ss0_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss0_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss0_hs_in>;
+};
+
+&usb_1_ss0_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss0_ss_in>;
+};
+
+&usb_1_ss1_hsphy {
+ vdd-supply = <&vreg_l3j>;
+ vdda12-supply = <&vreg_l2j>;
+
+ phys = <&smb2360_1_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss1_qmpphy {
+ vdda-phy-supply = <&vreg_l3e>;
+ vdda-pll-supply = <&vreg_l2d>;
+
+ status = "okay";
+};
+
+&usb_1_ss1 {
+ status = "okay";
+};
+
+&usb_1_ss1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss1_hs_in>;
+};
+
+&usb_1_ss1_qmpphy_out {
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus13.dts b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus13.dts
new file mode 100644
index 000000000000..eb7580dd9684
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus13.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "x1e80100-microsoft-romulus.dtsi"
+
+/ {
+ model = "Microsoft Surface Laptop 7 (13.8 inch)";
+ compatible = "microsoft,romulus13", "qcom,x1e80100";
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus15.dts b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus15.dts
new file mode 100644
index 000000000000..4751ad9b510b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus15.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "x1e80100-microsoft-romulus.dtsi"
+
+/ {
+ model = "Microsoft Surface Laptop 7 (15 inch)";
+ compatible = "microsoft,romulus15", "qcom,x1e80100";
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
index e34e70922cd3..5b54ee79f048 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
@@ -249,6 +249,14 @@
interrupt-controller;
#interrupt-cells = <2>;
};
+
+ pmk8550_pwm: pwm {
+ compatible = "qcom,pmk8550-pwm";
+
+ #pwm-cells = <2>;
+
+ status = "disabled";
+ };
};
/* PMC8380C */
@@ -509,6 +517,8 @@
#address-cells = <1>;
#size-cells = <0>;
+ status = "disabled";
+
smb2360_2_eusb2_repeater: phy@fd00 {
compatible = "qcom,smb2360-eusb2-repeater";
reg = <0xfd00>;
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
index 8098e6730ae5..1c3a6a7b3ed6 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
@@ -729,10 +729,6 @@
status = "okay";
};
-&smb2360_3 {
- status = "okay";
-};
-
&smb2360_0_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l2b_3p0>;
@@ -743,11 +739,19 @@
vdd3-supply = <&vreg_l14b_3p0>;
};
+&smb2360_2 {
+ status = "okay";
+};
+
&smb2360_2_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l8b_3p0>;
};
+&smb2360_3 {
+ status = "okay";
+};
+
&swr0 {
pinctrl-0 = <&wsa_swr_active>, <&spkr_01_sd_n_active>;
pinctrl-names = "default";
@@ -763,6 +767,7 @@
sound-name-prefix = "SpkrLeft";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <1 2 3 7 10 13>;
};
/* WSA8845, Right Speaker */
@@ -774,6 +779,7 @@
sound-name-prefix = "SpkrRight";
vdd-1p8-supply = <&vreg_l15b_1p8>;
vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <4 5 6 7 11 13>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
index cd732ef88cd8..a36076e3c56b 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
@@ -4,6 +4,7 @@
*/
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/clock/qcom,sc8280xp-lpasscc.h>
#include <dt-bindings/clock/qcom,x1e80100-dispcc.h>
#include <dt-bindings/clock/qcom,x1e80100-gcc.h>
#include <dt-bindings/clock/qcom,x1e80100-gpucc.h>
@@ -745,7 +746,7 @@
<&sleep_clk>,
<0>,
<&pcie4_phy>,
- <0>,
+ <&pcie5_phy>,
<&pcie6a_phy>,
<0>,
<&usb_1_ss0_qmpphy QMP_USB43DP_USB3_PIPE_CLK>,
@@ -1979,7 +1980,7 @@
i2c0: i2c@b80000 {
compatible = "qcom,geni-i2c";
- reg = <0 0xb80000 0 0x4000>;
+ reg = <0 0x00b80000 0 0x4000>;
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
@@ -2142,9 +2143,31 @@
status = "disabled";
};
+ uart2: serial@b88000 {
+ compatible = "qcom,geni-uart";
+ reg = <0 0x00b88000 0 0x4000>;
+
+ interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ pinctrl-0 = <&qup_uart2_default>;
+ pinctrl-names = "default";
+
+ status = "disabled";
+ };
+
spi2: spi@b88000 {
compatible = "qcom,geni-spi";
- reg = <0 0xb88000 0 0x4000>;
+ reg = <0 0x00b88000 0 0x4000>;
interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>;
@@ -2243,7 +2266,7 @@
i2c4: i2c@b90000 {
compatible = "qcom,geni-i2c";
- reg = <0 0xb90000 0 0x4000>;
+ reg = <0 0x00b90000 0 0x4000>;
interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
@@ -2603,6 +2626,8 @@
#clock-cells = <1>;
#phy-cells = <1>;
+ orientation-switch;
+
status = "disabled";
ports {
@@ -2671,6 +2696,8 @@
#clock-cells = <1>;
#phy-cells = <1>;
+ orientation-switch;
+
status = "disabled";
ports {
@@ -2739,6 +2766,8 @@
#clock-cells = <1>;
#phy-cells = <1>;
+ orientation-switch;
+
status = "disabled";
ports {
@@ -2772,7 +2801,7 @@
cnoc_main: interconnect@1500000 {
compatible = "qcom,x1e80100-cnoc-main";
- reg = <0 0x1500000 0 0x14400>;
+ reg = <0 0x01500000 0 0x14400>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -2781,7 +2810,7 @@
config_noc: interconnect@1600000 {
compatible = "qcom,x1e80100-cnoc-cfg";
- reg = <0 0x1600000 0 0x6600>;
+ reg = <0 0x01600000 0 0x6600>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -2790,7 +2819,7 @@
system_noc: interconnect@1680000 {
compatible = "qcom,x1e80100-system-noc";
- reg = <0 0x1680000 0 0x1c080>;
+ reg = <0 0x01680000 0 0x1c080>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -2799,7 +2828,7 @@
pcie_south_anoc: interconnect@16c0000 {
compatible = "qcom,x1e80100-pcie-south-anoc";
- reg = <0 0x16c0000 0 0xd080>;
+ reg = <0 0x016c0000 0 0xd080>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -2808,7 +2837,7 @@
pcie_center_anoc: interconnect@16d0000 {
compatible = "qcom,x1e80100-pcie-center-anoc";
- reg = <0 0x16d0000 0 0x7000>;
+ reg = <0 0x016d0000 0 0x7000>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -2817,7 +2846,7 @@
aggre1_noc: interconnect@16e0000 {
compatible = "qcom,x1e80100-aggre1-noc";
- reg = <0 0x16E0000 0 0x14400>;
+ reg = <0 0x016e0000 0 0x14400>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -2826,7 +2855,7 @@
aggre2_noc: interconnect@1700000 {
compatible = "qcom,x1e80100-aggre2-noc";
- reg = <0 0x1700000 0 0x1c400>;
+ reg = <0 0x01700000 0 0x1c400>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -2835,7 +2864,7 @@
pcie_north_anoc: interconnect@1740000 {
compatible = "qcom,x1e80100-pcie-north-anoc";
- reg = <0 0x1740000 0 0x9080>;
+ reg = <0 0x01740000 0 0x9080>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -2844,7 +2873,7 @@
usb_center_anoc: interconnect@1750000 {
compatible = "qcom,x1e80100-usb-center-anoc";
- reg = <0 0x1750000 0 0x8800>;
+ reg = <0 0x01750000 0 0x8800>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -2853,7 +2882,7 @@
usb_north_anoc: interconnect@1760000 {
compatible = "qcom,x1e80100-usb-north-anoc";
- reg = <0 0x1760000 0 0x7080>;
+ reg = <0 0x01760000 0 0x7080>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -2862,7 +2891,7 @@
usb_south_anoc: interconnect@1770000 {
compatible = "qcom,x1e80100-usb-south-anoc";
- reg = <0 0x1770000 0 0xf080>;
+ reg = <0 0x01770000 0 0xf080>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -2871,7 +2900,7 @@
mmss_noc: interconnect@1780000 {
compatible = "qcom,x1e80100-mmss-noc";
- reg = <0 0x1780000 0 0x5B800>;
+ reg = <0 0x01780000 0 0x5B800>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -3000,6 +3029,126 @@
status = "disabled";
};
+ pcie5: pci@1c00000 {
+ device_type = "pci";
+ compatible = "qcom,pcie-x1e80100";
+ reg = <0 0x01c00000 0 0x3000>,
+ <0 0x7e000000 0 0xf1d>,
+ <0 0x7e000f40 0 0xa8>,
+ <0 0x7e001000 0 0x1000>,
+ <0 0x7e100000 0 0x100000>,
+ <0 0x01c03000 0 0x1000>;
+ reg-names = "parf",
+ "dbi",
+ "elbi",
+ "atu",
+ "config",
+ "mhi";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x7e200000 0x0 0x100000>,
+ <0x02000000 0x0 0x7e300000 0x0 0x7e300000 0x0 0x1d00000>;
+ bus-range = <0x00 0xff>;
+
+ dma-coherent;
+
+ linux,pci-domain = <5>;
+ num-lanes = <2>;
+
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 0 70 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 0 0 71 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 0 0 72 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 0 0 73 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_PCIE_5_AUX_CLK>,
+ <&gcc GCC_PCIE_5_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_5_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_5_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_5_SLV_Q2A_AXI_CLK>,
+ <&gcc GCC_CFG_NOC_PCIE_ANOC_NORTH_AHB_CLK>,
+ <&gcc GCC_CNOC_PCIE_NORTH_SF_AXI_CLK>;
+ clock-names = "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave",
+ "slave_q2a",
+ "noc_aggr",
+ "cnoc_sf_axi";
+
+ assigned-clocks = <&gcc GCC_PCIE_5_AUX_CLK>;
+ assigned-clock-rates = <19200000>;
+
+ interconnects = <&pcie_south_anoc MASTER_PCIE_5 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &cnoc_main SLAVE_PCIE_5 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "pcie-mem",
+ "cpu-pcie";
+
+ resets = <&gcc GCC_PCIE_5_BCR>,
+ <&gcc GCC_PCIE_5_LINK_DOWN_BCR>;
+ reset-names = "pci",
+ "link_down";
+
+ power-domains = <&gcc GCC_PCIE_5_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
+
+ phys = <&pcie5_phy>;
+ phy-names = "pciephy";
+
+ status = "disabled";
+ };
+
+ pcie5_phy: phy@1c06000 {
+ compatible = "qcom,x1e80100-qmp-gen3x2-pcie-phy";
+ reg = <0 0x01c06000 0 0x2000>;
+
+ clocks = <&gcc GCC_PCIE_5_AUX_CLK>,
+ <&gcc GCC_PCIE_5_CFG_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_PCIE_5_PHY_RCHNG_CLK>,
+ <&gcc GCC_PCIE_5_PIPE_CLK>;
+ clock-names = "aux",
+ "cfg_ahb",
+ "ref",
+ "rchng",
+ "pipe";
+
+ resets = <&gcc GCC_PCIE_5_PHY_BCR>;
+ reset-names = "phy";
+
+ assigned-clocks = <&gcc GCC_PCIE_5_PHY_RCHNG_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ power-domains = <&gcc GCC_PCIE_5_PHY_GDSC>;
+
+ #clock-cells = <0>;
+ clock-output-names = "pcie5_pipe_clk";
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
pcie4: pci@1c08000 {
device_type = "pci";
compatible = "qcom,pcie-x1e80100";
@@ -3350,7 +3499,7 @@
nsp_noc: interconnect@320c0000 {
compatible = "qcom,x1e80100-nsp-noc";
- reg = <0 0x320C0000 0 0xE080>;
+ reg = <0 0x320C0000 0 0xe080>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -3385,6 +3534,8 @@
pinctrl-0 = <&wsa2_swr_active>;
pinctrl-names = "default";
+ resets = <&lpass_audiocc LPASS_AUDIO_SWR_WSA2_CGCR>;
+ reset-names = "swr_audio_cgcr";
qcom,din-ports = <4>;
qcom,dout-ports = <9>;
@@ -3433,6 +3584,8 @@
pinctrl-0 = <&rx_swr_active>;
pinctrl-names = "default";
+ resets = <&lpass_audiocc LPASS_AUDIO_SWR_RX_CGCR>;
+ reset-names = "swr_audio_cgcr";
qcom,din-ports = <1>;
qcom,dout-ports = <11>;
@@ -3497,6 +3650,8 @@
pinctrl-0 = <&wsa_swr_active>;
pinctrl-names = "default";
+ resets = <&lpass_audiocc LPASS_AUDIO_SWR_WSA_CGCR>;
+ reset-names = "swr_audio_cgcr";
qcom,din-ports = <4>;
qcom,dout-ports = <9>;
@@ -3517,6 +3672,13 @@
status = "disabled";
};
+ lpass_audiocc: clock-controller@6b6c000 {
+ compatible = "qcom,x1e80100-lpassaudiocc", "qcom,sc8280xp-lpassaudiocc";
+ reg = <0 0x06b6c000 0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
swr2: soundwire@6d30000 {
compatible = "qcom,soundwire-v2.0.0";
reg = <0 0x06d30000 0 0x10000>;
@@ -3526,6 +3688,8 @@
<GIC_SPI 520 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "core", "wakeup";
label = "TX";
+ resets = <&lpasscc LPASS_AUDIO_SWR_TX_CGCR>;
+ reset-names = "swr_audio_cgcr";
pinctrl-0 = <&tx_swr_active>;
pinctrl-names = "default";
@@ -3682,9 +3846,16 @@
};
};
+ lpasscc: clock-controller@6ea0000 {
+ compatible = "qcom,x1e80100-lpasscc", "qcom,sc8280xp-lpasscc";
+ reg = <0 0x06ea0000 0 0x12000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
lpass_ag_noc: interconnect@7e40000 {
compatible = "qcom,x1e80100-lpass-ag-noc";
- reg = <0 0x7e40000 0 0xE080>;
+ reg = <0 0x07e40000 0 0xe080>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -3693,7 +3864,7 @@
lpass_lpiaon_noc: interconnect@7400000 {
compatible = "qcom,x1e80100-lpass-lpiaon-noc";
- reg = <0 0x7400000 0 0x19080>;
+ reg = <0 0x07400000 0 0x19080>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -3702,7 +3873,7 @@
lpass_lpicx_noc: interconnect@7430000 {
compatible = "qcom,x1e80100-lpass-lpicx-noc";
- reg = <0 0x7430000 0 0x3A200>;
+ reg = <0 0x07430000 0 0x3A200>;
qcom,bcm-voters = <&apps_bcm_voter>;
@@ -3723,6 +3894,90 @@
status = "disabled";
};
+ usb_mp_hsphy0: phy@88e1000 {
+ compatible = "qcom,x1e80100-snps-eusb2-phy",
+ "qcom,sm8550-snps-eusb2-phy";
+ reg = <0 0x088e1000 0 0x154>;
+ #phy-cells = <0>;
+
+ clocks = <&tcsr TCSR_USB3_MP0_CLKREF_EN>;
+ clock-names = "ref";
+
+ resets = <&gcc GCC_QUSB2PHY_HS0_MP_BCR>;
+
+ status = "disabled";
+ };
+
+ usb_mp_hsphy1: phy@88e2000 {
+ compatible = "qcom,x1e80100-snps-eusb2-phy",
+ "qcom,sm8550-snps-eusb2-phy";
+ reg = <0 0x088e2000 0 0x154>;
+ #phy-cells = <0>;
+
+ clocks = <&tcsr TCSR_USB3_MP1_CLKREF_EN>;
+ clock-names = "ref";
+
+ resets = <&gcc GCC_QUSB2PHY_HS1_MP_BCR>;
+
+ status = "disabled";
+ };
+
+ usb_mp_qmpphy0: phy@88e3000 {
+ compatible = "qcom,x1e80100-qmp-usb3-uni-phy";
+ reg = <0 0x088e3000 0 0x2000>;
+
+ clocks = <&gcc GCC_USB3_MP_PHY_AUX_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_USB3_MP_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_MP_PHY_PIPE_0_CLK>;
+ clock-names = "aux",
+ "ref",
+ "com_aux",
+ "pipe";
+
+ resets = <&gcc GCC_USB3_UNIPHY_MP0_BCR>,
+ <&gcc GCC_USB3UNIPHY_PHY_MP0_BCR>;
+ reset-names = "phy",
+ "phy_phy";
+
+ power-domains = <&gcc GCC_USB3_MP_SS0_PHY_GDSC>;
+
+ #clock-cells = <0>;
+ clock-output-names = "usb_mp_phy0_pipe_clk";
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb_mp_qmpphy1: phy@88e5000 {
+ compatible = "qcom,x1e80100-qmp-usb3-uni-phy";
+ reg = <0 0x088e5000 0 0x2000>;
+
+ clocks = <&gcc GCC_USB3_MP_PHY_AUX_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_USB3_MP_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_MP_PHY_PIPE_1_CLK>;
+ clock-names = "aux",
+ "ref",
+ "com_aux",
+ "pipe";
+
+ resets = <&gcc GCC_USB3_UNIPHY_MP1_BCR>,
+ <&gcc GCC_USB3UNIPHY_PHY_MP1_BCR>;
+ reset-names = "phy",
+ "phy_phy";
+
+ power-domains = <&gcc GCC_USB3_MP_SS1_PHY_GDSC>;
+
+ #clock-cells = <0>;
+ clock-output-names = "usb_mp_phy1_pipe_clk";
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
usb_1_ss2: usb@a0f8800 {
compatible = "qcom,x1e80100-dwc3", "qcom,dwc3";
reg = <0 0x0a0f8800 0 0x400>;
@@ -3897,6 +4152,92 @@
};
};
+ usb_mp: usb@a4f8800 {
+ compatible = "qcom,x1e80100-dwc3-mp", "qcom,dwc3";
+ reg = <0 0x0a4f8800 0 0x400>;
+
+ clocks = <&gcc GCC_CFG_NOC_USB3_MP_AXI_CLK>,
+ <&gcc GCC_USB30_MP_MASTER_CLK>,
+ <&gcc GCC_AGGRE_USB3_MP_AXI_CLK>,
+ <&gcc GCC_USB30_MP_SLEEP_CLK>,
+ <&gcc GCC_USB30_MP_MOCK_UTMI_CLK>,
+ <&gcc GCC_AGGRE_USB_NOC_AXI_CLK>,
+ <&gcc GCC_AGGRE_NOC_USB_NORTH_AXI_CLK>,
+ <&gcc GCC_AGGRE_NOC_USB_SOUTH_AXI_CLK>,
+ <&gcc GCC_SYS_NOC_USB_AXI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi",
+ "noc_aggr",
+ "noc_aggr_north",
+ "noc_aggr_south",
+ "noc_sys";
+
+ assigned-clocks = <&gcc GCC_USB30_MP_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_MP_MASTER_CLK>;
+ assigned-clock-rates = <19200000>,
+ <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 52 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 51 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 54 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 53 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 55 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 56 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event_1", "pwr_event_2",
+ "hs_phy_1", "hs_phy_2",
+ "dp_hs_phy_1", "dm_hs_phy_1",
+ "dp_hs_phy_2", "dm_hs_phy_2",
+ "ss_phy_1", "ss_phy_2";
+
+ power-domains = <&gcc GCC_USB30_MP_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
+
+ resets = <&gcc GCC_USB30_MP_BCR>;
+
+ interconnects = <&usb_north_anoc MASTER_USB3_MP QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_USB3_MP QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "usb-ddr",
+ "apps-usb";
+
+ wakeup-source;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled";
+
+ usb_mp_dwc3: usb@a400000 {
+ compatible = "snps,dwc3";
+ reg = <0 0x0a400000 0 0xcd00>;
+
+ interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+
+ iommus = <&apps_smmu 0x1400 0x0>;
+
+ phys = <&usb_mp_hsphy0>, <&usb_mp_qmpphy0>,
+ <&usb_mp_hsphy1>, <&usb_mp_qmpphy1>;
+ phy-names = "usb2-0", "usb3-0",
+ "usb2-1", "usb3-1";
+ dr_mode = "host";
+
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ snps,usb3_lpm_capable;
+
+ dma-coherent;
+ };
+ };
+
usb_1_ss0: usb@a6f8800 {
compatible = "qcom,x1e80100-dwc3", "qcom,dwc3";
reg = <0 0x0a6f8800 0 0x400>;
@@ -4215,11 +4556,11 @@
mdss_dp0: displayport-controller@ae90000 {
compatible = "qcom,x1e80100-dp";
- reg = <0 0xae90000 0 0x200>,
- <0 0xae90200 0 0x200>,
- <0 0xae90400 0 0x600>,
- <0 0xae91000 0 0x400>,
- <0 0xae91400 0 0x400>;
+ reg = <0 0x0ae90000 0 0x200>,
+ <0 0x0ae90200 0 0x200>,
+ <0 0x0ae90400 0 0x600>,
+ <0 0x0ae91000 0 0x400>,
+ <0 0x0ae91400 0 0x400>;
interrupts-extended = <&mdss 12>;
@@ -4298,11 +4639,11 @@
mdss_dp1: displayport-controller@ae98000 {
compatible = "qcom,x1e80100-dp";
- reg = <0 0xae98000 0 0x200>,
- <0 0xae98200 0 0x200>,
- <0 0xae98400 0 0x600>,
- <0 0xae99000 0 0x400>,
- <0 0xae99400 0 0x400>;
+ reg = <0 0x0ae98000 0 0x200>,
+ <0 0x0ae98200 0 0x200>,
+ <0 0x0ae98400 0 0x600>,
+ <0 0x0ae99000 0 0x400>,
+ <0 0x0ae99400 0 0x400>;
interrupts-extended = <&mdss 13>;
@@ -4381,11 +4722,11 @@
mdss_dp2: displayport-controller@ae9a000 {
compatible = "qcom,x1e80100-dp";
- reg = <0 0xae9a000 0 0x200>,
- <0 0xae9a200 0 0x200>,
- <0 0xae9a400 0 0x600>,
- <0 0xae9b000 0 0x400>,
- <0 0xae9b400 0 0x400>;
+ reg = <0 0x0ae9a000 0 0x200>,
+ <0 0x0ae9a200 0 0x200>,
+ <0 0x0ae9a400 0 0x600>,
+ <0 0x0ae9b000 0 0x400>,
+ <0 0x0ae9b400 0 0x400>;
interrupts-extended = <&mdss 14>;
@@ -4402,14 +4743,14 @@
assigned-clocks = <&dispcc DISP_CC_MDSS_DPTX2_LINK_CLK_SRC>,
<&dispcc DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC>;
- assigned-clock-parents = <&mdss_dp2_phy 0>,
- <&mdss_dp2_phy 1>;
+ assigned-clock-parents = <&usb_1_ss2_qmpphy QMP_USB43DP_DP_LINK_CLK>,
+ <&usb_1_ss2_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
operating-points-v2 = <&mdss_dp2_opp_table>;
power-domains = <&rpmhpd RPMHPD_MMCX>;
- phys = <&mdss_dp2_phy>;
+ phys = <&usb_1_ss2_qmpphy QMP_USB43DP_DP_PHY>;
phy-names = "dp";
#sound-dai-cells = <0>;
@@ -4463,11 +4804,11 @@
mdss_dp3: displayport-controller@aea0000 {
compatible = "qcom,x1e80100-dp";
- reg = <0 0xaea0000 0 0x200>,
- <0 0xaea0200 0 0x200>,
- <0 0xaea0400 0 0x600>,
- <0 0xaea1000 0 0x400>,
- <0 0xaea1400 0 0x400>;
+ reg = <0 0x0aea0000 0 0x200>,
+ <0 0x0aea0200 0 0x200>,
+ <0 0x0aea0400 0 0x600>,
+ <0 0x0aea1000 0 0x400>,
+ <0 0x0aea1400 0 0x400>;
interrupts-extended = <&mdss 15>;
@@ -4597,8 +4938,8 @@
<&usb_1_ss0_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>,
<&usb_1_ss1_qmpphy QMP_USB43DP_DP_LINK_CLK>, /* dp1 */
<&usb_1_ss1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>,
- <&mdss_dp2_phy 0>, /* dp2 */
- <&mdss_dp2_phy 1>,
+ <&usb_1_ss2_qmpphy QMP_USB43DP_DP_LINK_CLK>, /* dp2 */
+ <&usb_1_ss2_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>,
<&mdss_dp3_phy 0>, /* dp3 */
<&mdss_dp3_phy 1>;
power-domains = <&rpmhpd RPMHPD_MMCX>;
@@ -4631,6 +4972,11 @@
#clock-cells = <0>;
};
+ sram@c3f0000 {
+ compatible = "qcom,rpmh-stats";
+ reg = <0 0x0c3f0000 0 0x400>;
+ };
+
spmi: arbiter@c400000 {
compatible = "qcom,x1e80100-spmi-pmic-arb";
reg = <0 0x0c400000 0 0x3000>,
@@ -5241,12 +5587,50 @@
bias-disable;
};
+ qup_uart2_default: qup-uart2-default-state {
+ cts-pins {
+ pins = "gpio8";
+ function = "qup0_se2";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rts-pins {
+ pins = "gpio9";
+ function = "qup0_se2";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tx-pins {
+ pins = "gpio10";
+ function = "qup0_se2";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rx-pins {
+ pins = "gpio11";
+ function = "qup0_se2";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
qup_uart21_default: qup-uart21-default-state {
- /* TX, RX */
- pins = "gpio86", "gpio87";
- function = "qup2_se5";
- drive-strength = <2>;
- bias-disable;
+ tx-pins {
+ pins = "gpio86";
+ function = "qup2_se5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rx-pins {
+ pins = "gpio87";
+ function = "qup2_se5";
+ drive-strength = <2>;
+ bias-disable;
+ };
};
};
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index fbd214a1a638..97228a3cb99c 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -112,9 +112,12 @@ dtb-$(CONFIG_ARCH_R8A77965) += r8a779m5-salvator-xs-panel-aa104xd12.dtb
dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043u11-smarc.dtb
dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043u11-smarc-cru-csi-ov5645.dtbo
+dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043u11-smarc-du-adv7513.dtbo
dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043-smarc-pmod.dtbo
r9a07g043u11-smarc-cru-csi-ov5645-dtbs := r9a07g043u11-smarc.dtb r9a07g043u11-smarc-cru-csi-ov5645.dtbo
dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043u11-smarc-cru-csi-ov5645.dtb
+r9a07g043u11-smarc-du-adv7513-dtbs := r9a07g043u11-smarc.dtb r9a07g043u11-smarc-du-adv7513.dtbo
+dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043u11-smarc-du-adv7513.dtb
r9a07g043u11-smarc-pmod-dtbs := r9a07g043u11-smarc.dtb r9a07g043-smarc-pmod.dtbo
dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043u11-smarc-pmod.dtb
@@ -137,5 +140,7 @@ dtb-$(CONFIG_ARCH_R9A08G045) += r9a08g045s33-smarc.dtb
dtb-$(CONFIG_ARCH_R9A09G011) += r9a09g011-v2mevk2.dtb
+dtb-$(CONFIG_ARCH_R9A09G057) += r9a09g057h44-rzv2h-evk.dtb
+
dtb-$(CONFIG_ARCH_RCAR_GEN3) += draak-ebisu-panel-aa104xd12.dtbo
dtb-$(CONFIG_ARCH_RCAR_GEN3) += salvator-panel-aa104xd12.dtbo
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index 1dbf9d56c68d..f065ee90649a 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -2277,6 +2277,7 @@
max-frequency = <200000000>;
power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
resets = <&cpg 314>;
+ iommus = <&ipmmu_ds1 32>;
status = "disabled";
};
@@ -2290,6 +2291,7 @@
max-frequency = <200000000>;
power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
resets = <&cpg 313>;
+ iommus = <&ipmmu_ds1 33>;
status = "disabled";
};
@@ -2303,6 +2305,7 @@
max-frequency = <200000000>;
power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
resets = <&cpg 312>;
+ iommus = <&ipmmu_ds1 34>;
status = "disabled";
};
@@ -2316,6 +2319,7 @@
max-frequency = <200000000>;
power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
resets = <&cpg 311>;
+ iommus = <&ipmmu_ds1 35>;
status = "disabled";
};
@@ -2464,6 +2468,7 @@
clocks = <&cpg CPG_MOD 615>;
power-domains = <&sysc R8A774A1_PD_A3VC>;
resets = <&cpg 615>;
+ iommus = <&ipmmu_vc0 16>;
};
fcpvb0: fcp@fe96f000 {
@@ -2472,6 +2477,7 @@
clocks = <&cpg CPG_MOD 607>;
power-domains = <&sysc R8A774A1_PD_A3VC>;
resets = <&cpg 607>;
+ iommus = <&ipmmu_vi0 5>;
};
fcpvd0: fcp@fea27000 {
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
index 10f22c52e79e..117cb6950f91 100644
--- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
@@ -2004,6 +2004,14 @@
resets = <&cpg 502>;
#dma-cells = <1>;
dma-channels = <16>;
+ iommus = <&ipmmu_mp 0>, <&ipmmu_mp 1>,
+ <&ipmmu_mp 2>, <&ipmmu_mp 3>,
+ <&ipmmu_mp 4>, <&ipmmu_mp 5>,
+ <&ipmmu_mp 6>, <&ipmmu_mp 7>,
+ <&ipmmu_mp 8>, <&ipmmu_mp 9>,
+ <&ipmmu_mp 10>, <&ipmmu_mp 11>,
+ <&ipmmu_mp 12>, <&ipmmu_mp 13>,
+ <&ipmmu_mp 14>, <&ipmmu_mp 15>;
};
audma1: dma-controller@ec720000 {
@@ -2038,6 +2046,14 @@
resets = <&cpg 501>;
#dma-cells = <1>;
dma-channels = <16>;
+ iommus = <&ipmmu_mp 16>, <&ipmmu_mp 17>,
+ <&ipmmu_mp 18>, <&ipmmu_mp 19>,
+ <&ipmmu_mp 20>, <&ipmmu_mp 21>,
+ <&ipmmu_mp 22>, <&ipmmu_mp 23>,
+ <&ipmmu_mp 24>, <&ipmmu_mp 25>,
+ <&ipmmu_mp 26>, <&ipmmu_mp 27>,
+ <&ipmmu_mp 28>, <&ipmmu_mp 29>,
+ <&ipmmu_mp 30>, <&ipmmu_mp 31>;
};
xhci0: usb@ee000000 {
@@ -2145,6 +2161,7 @@
max-frequency = <200000000>;
power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>;
resets = <&cpg 314>;
+ iommus = <&ipmmu_ds1 32>;
status = "disabled";
};
@@ -2158,6 +2175,7 @@
max-frequency = <200000000>;
power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>;
resets = <&cpg 313>;
+ iommus = <&ipmmu_ds1 33>;
status = "disabled";
};
@@ -2171,6 +2189,7 @@
max-frequency = <200000000>;
power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>;
resets = <&cpg 312>;
+ iommus = <&ipmmu_ds1 34>;
status = "disabled";
};
@@ -2184,6 +2203,7 @@
max-frequency = <200000000>;
power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>;
resets = <&cpg 311>;
+ iommus = <&ipmmu_ds1 35>;
status = "disabled";
};
@@ -2211,6 +2231,7 @@
clocks = <&cpg CPG_MOD 815>;
power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>;
resets = <&cpg 815>;
+ iommus = <&ipmmu_hc 2>;
status = "disabled";
};
@@ -2343,6 +2364,7 @@
clocks = <&cpg CPG_MOD 615>;
power-domains = <&sysc R8A774B1_PD_A3VP>;
resets = <&cpg 615>;
+ iommus = <&ipmmu_vp0 0>;
};
vspb: vsp@fe960000 {
@@ -2395,6 +2417,7 @@
clocks = <&cpg CPG_MOD 607>;
power-domains = <&sysc R8A774B1_PD_A3VP>;
resets = <&cpg 607>;
+ iommus = <&ipmmu_vp0 5>;
};
fcpvd0: fcp@fea27000 {
@@ -2403,6 +2426,7 @@
clocks = <&cpg CPG_MOD 603>;
power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>;
resets = <&cpg 603>;
+ iommus = <&ipmmu_vi0 8>;
};
fcpvd1: fcp@fea2f000 {
@@ -2411,6 +2435,7 @@
clocks = <&cpg CPG_MOD 602>;
power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>;
resets = <&cpg 602>;
+ iommus = <&ipmmu_vi0 9>;
};
fcpvi0: fcp@fe9af000 {
@@ -2419,6 +2444,7 @@
clocks = <&cpg CPG_MOD 611>;
power-domains = <&sysc R8A774B1_PD_A3VP>;
resets = <&cpg 611>;
+ iommus = <&ipmmu_vp0 8>;
};
csi20: csi2@fea80000 {
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 3e2af50ce7c6..7655d5e3a034 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -1637,6 +1637,7 @@
max-frequency = <200000000>;
power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
resets = <&cpg 314>;
+ iommus = <&ipmmu_ds1 32>;
status = "disabled";
};
@@ -1650,6 +1651,7 @@
max-frequency = <200000000>;
power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
resets = <&cpg 313>;
+ iommus = <&ipmmu_ds1 33>;
status = "disabled";
};
@@ -1663,6 +1665,7 @@
max-frequency = <200000000>;
power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
resets = <&cpg 311>;
+ iommus = <&ipmmu_ds1 35>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
index 1eeb4c7b4c4b..f845ca604de0 100644
--- a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
@@ -2652,6 +2652,7 @@
clocks = <&cpg CPG_MOD 615>;
power-domains = <&sysc R8A774E1_PD_A3VP>;
resets = <&cpg 615>;
+ iommus = <&ipmmu_vp0 0>;
};
fcpf1: fcp@fe951000 {
@@ -2660,6 +2661,7 @@
clocks = <&cpg CPG_MOD 614>;
power-domains = <&sysc R8A774E1_PD_A3VP>;
resets = <&cpg 614>;
+ iommus = <&ipmmu_vp1 1>;
};
fcpvb0: fcp@fe96f000 {
@@ -2668,6 +2670,7 @@
clocks = <&cpg CPG_MOD 607>;
power-domains = <&sysc R8A774E1_PD_A3VP>;
resets = <&cpg 607>;
+ iommus = <&ipmmu_vp0 5>;
};
fcpvb1: fcp@fe92f000 {
@@ -2676,6 +2679,7 @@
clocks = <&cpg CPG_MOD 606>;
power-domains = <&sysc R8A774E1_PD_A3VP>;
resets = <&cpg 606>;
+ iommus = <&ipmmu_vp1 7>;
};
fcpvi0: fcp@fe9af000 {
@@ -2684,6 +2688,7 @@
clocks = <&cpg CPG_MOD 611>;
power-domains = <&sysc R8A774E1_PD_A3VP>;
resets = <&cpg 611>;
+ iommus = <&ipmmu_vp0 8>;
};
fcpvi1: fcp@fe9bf000 {
@@ -2692,6 +2697,7 @@
clocks = <&cpg CPG_MOD 610>;
power-domains = <&sysc R8A774E1_PD_A3VP>;
resets = <&cpg 610>;
+ iommus = <&ipmmu_vp1 9>;
};
fcpvd0: fcp@fea27000 {
@@ -2700,6 +2706,7 @@
clocks = <&cpg CPG_MOD 603>;
power-domains = <&sysc R8A774E1_PD_ALWAYS_ON>;
resets = <&cpg 603>;
+ iommus = <&ipmmu_vi0 8>;
};
fcpvd1: fcp@fea2f000 {
@@ -2708,6 +2715,7 @@
clocks = <&cpg CPG_MOD 602>;
power-domains = <&sysc R8A774E1_PD_ALWAYS_ON>;
resets = <&cpg 602>;
+ iommus = <&ipmmu_vi0 9>;
};
csi20: csi2@fea80000 {
diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
index 1122c470b72f..ee80f52dc7cf 100644
--- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
@@ -2652,6 +2652,7 @@
clocks = <&cpg CPG_MOD 615>;
power-domains = <&sysc R8A7796_PD_A3VC>;
resets = <&cpg 615>;
+ iommus = <&ipmmu_vc0 16>;
};
fcpvb0: fcp@fe96f000 {
@@ -2660,6 +2661,7 @@
clocks = <&cpg CPG_MOD 607>;
power-domains = <&sysc R8A7796_PD_A3VC>;
resets = <&cpg 607>;
+ iommus = <&ipmmu_vi0 5>;
};
fcpvi0: fcp@fe9af000 {
diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
index bf1130af7de3..3b9066043a71 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
@@ -2502,6 +2502,7 @@
clocks = <&cpg CPG_MOD 615>;
power-domains = <&sysc R8A77961_PD_A3VC>;
resets = <&cpg 615>;
+ iommus = <&ipmmu_vc0 16>;
};
fcpvb0: fcp@fe96f000 {
@@ -2510,6 +2511,7 @@
clocks = <&cpg CPG_MOD 607>;
power-domains = <&sysc R8A77961_PD_A3VC>;
resets = <&cpg 607>;
+ iommus = <&ipmmu_vi0 5>;
};
fcpvi0: fcp@fe9af000 {
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index f02d1547b881..557bdf8fab17 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -2185,6 +2185,14 @@
resets = <&cpg 502>;
#dma-cells = <1>;
dma-channels = <16>;
+ iommus = <&ipmmu_mp 0>, <&ipmmu_mp 1>,
+ <&ipmmu_mp 2>, <&ipmmu_mp 3>,
+ <&ipmmu_mp 4>, <&ipmmu_mp 5>,
+ <&ipmmu_mp 6>, <&ipmmu_mp 7>,
+ <&ipmmu_mp 8>, <&ipmmu_mp 9>,
+ <&ipmmu_mp 10>, <&ipmmu_mp 11>,
+ <&ipmmu_mp 12>, <&ipmmu_mp 13>,
+ <&ipmmu_mp 14>, <&ipmmu_mp 15>;
};
audma1: dma-controller@ec720000 {
@@ -2219,6 +2227,14 @@
resets = <&cpg 501>;
#dma-cells = <1>;
dma-channels = <16>;
+ iommus = <&ipmmu_mp 16>, <&ipmmu_mp 17>,
+ <&ipmmu_mp 18>, <&ipmmu_mp 19>,
+ <&ipmmu_mp 20>, <&ipmmu_mp 21>,
+ <&ipmmu_mp 22>, <&ipmmu_mp 23>,
+ <&ipmmu_mp 24>, <&ipmmu_mp 25>,
+ <&ipmmu_mp 26>, <&ipmmu_mp 27>,
+ <&ipmmu_mp 28>, <&ipmmu_mp 29>,
+ <&ipmmu_mp 30>, <&ipmmu_mp 31>;
};
xhci0: usb@ee000000 {
@@ -2396,6 +2412,7 @@
clocks = <&cpg CPG_MOD 815>;
power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
resets = <&cpg 815>;
+ iommus = <&ipmmu_hc 2>;
status = "disabled";
};
@@ -2490,6 +2507,7 @@
clocks = <&cpg CPG_MOD 615>;
power-domains = <&sysc R8A77965_PD_A3VP>;
resets = <&cpg 615>;
+ iommus = <&ipmmu_vp0 0>;
};
vspb: vsp@fe960000 {
@@ -2542,6 +2560,7 @@
clocks = <&cpg CPG_MOD 607>;
power-domains = <&sysc R8A77965_PD_A3VP>;
resets = <&cpg 607>;
+ iommus = <&ipmmu_vp0 5>;
};
fcpvd0: fcp@fea27000 {
@@ -2550,6 +2569,7 @@
clocks = <&cpg CPG_MOD 603>;
power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
resets = <&cpg 603>;
+ iommus = <&ipmmu_vi0 8>;
};
fcpvd1: fcp@fea2f000 {
@@ -2558,6 +2578,7 @@
clocks = <&cpg CPG_MOD 602>;
power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
resets = <&cpg 602>;
+ iommus = <&ipmmu_vi0 9>;
};
fcpvi0: fcp@fe9af000 {
@@ -2566,6 +2587,7 @@
clocks = <&cpg CPG_MOD 611>;
power-domains = <&sysc R8A77965_PD_A3VP>;
resets = <&cpg 611>;
+ iommus = <&ipmmu_vp0 8>;
};
cmm0: cmm@fea40000 {
diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
index 64fb95b1c89a..38145fd6acf0 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
@@ -1092,6 +1092,7 @@
clocks = <&cpg CPG_MOD 603>;
power-domains = <&sysc R8A77970_PD_ALWAYS_ON>;
resets = <&cpg 603>;
+ iommus = <&ipmmu_vi0 8>;
};
csi40: csi2@feaa0000 {
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index 0c2b157036e7..55a6c622f873 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -1266,6 +1266,7 @@
clocks = <&cpg CPG_MOD 813>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 813>;
+ iommus = <&ipmmu_ds1 34>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1430,6 +1431,7 @@
clocks = <&cpg CPG_MOD 603>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 603>;
+ iommus = <&ipmmu_vi0 8>;
};
csi40: csi2@feaa0000 {
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
index d76347001cc1..1f4ab27acc33 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
@@ -707,6 +707,7 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
+ iommus = <&ipmmu_ds1 0>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -755,6 +756,7 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
+ iommus = <&ipmmu_ds1 1>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -803,6 +805,7 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
+ iommus = <&ipmmu_ds1 2>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -851,6 +854,7 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
+ iommus = <&ipmmu_ds1 3>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -899,6 +903,7 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
+ iommus = <&ipmmu_ds1 4>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -947,6 +952,7 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
+ iommus = <&ipmmu_ds1 11>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1168,7 +1174,8 @@
};
vin00: video@e6ef0000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef0000 0 0x1000>;
interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 730>;
@@ -1196,7 +1203,8 @@
};
vin01: video@e6ef1000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef1000 0 0x1000>;
interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 731>;
@@ -1224,7 +1232,8 @@
};
vin02: video@e6ef2000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef2000 0 0x1000>;
interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 800>;
@@ -1252,7 +1261,8 @@
};
vin03: video@e6ef3000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef3000 0 0x1000>;
interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 801>;
@@ -1280,7 +1290,8 @@
};
vin04: video@e6ef4000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef4000 0 0x1000>;
interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 802>;
@@ -1308,7 +1319,8 @@
};
vin05: video@e6ef5000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef5000 0 0x1000>;
interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 803>;
@@ -1336,7 +1348,8 @@
};
vin06: video@e6ef6000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef6000 0 0x1000>;
interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 804>;
@@ -1364,7 +1377,8 @@
};
vin07: video@e6ef7000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef7000 0 0x1000>;
interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 805>;
@@ -1392,7 +1406,8 @@
};
vin08: video@e6ef8000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef8000 0 0x1000>;
interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 806>;
@@ -1420,7 +1435,8 @@
};
vin09: video@e6ef9000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef9000 0 0x1000>;
interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 807>;
@@ -1448,7 +1464,8 @@
};
vin10: video@e6efa000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efa000 0 0x1000>;
interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 808>;
@@ -1476,7 +1493,8 @@
};
vin11: video@e6efb000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efb000 0 0x1000>;
interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 809>;
@@ -1504,7 +1522,8 @@
};
vin12: video@e6efc000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efc000 0 0x1000>;
interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 810>;
@@ -1532,7 +1551,8 @@
};
vin13: video@e6efd000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efd000 0 0x1000>;
interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 811>;
@@ -1560,7 +1580,8 @@
};
vin14: video@e6efe000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efe000 0 0x1000>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 812>;
@@ -1588,7 +1609,8 @@
};
vin15: video@e6eff000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6eff000 0 0x1000>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 813>;
@@ -1616,7 +1638,8 @@
};
vin16: video@e6ed0000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ed0000 0 0x1000>;
interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 814>;
@@ -1644,7 +1667,8 @@
};
vin17: video@e6ed1000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ed1000 0 0x1000>;
interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 815>;
@@ -1672,7 +1696,8 @@
};
vin18: video@e6ed2000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ed2000 0 0x1000>;
interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 816>;
@@ -1700,7 +1725,8 @@
};
vin19: video@e6ed3000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ed3000 0 0x1000>;
interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 817>;
@@ -1728,7 +1754,8 @@
};
vin20: video@e6ed4000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ed4000 0 0x1000>;
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 818>;
@@ -1756,7 +1783,8 @@
};
vin21: video@e6ed5000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ed5000 0 0x1000>;
interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 819>;
@@ -1784,7 +1812,8 @@
};
vin22: video@e6ed6000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ed6000 0 0x1000>;
interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 820>;
@@ -1812,7 +1841,8 @@
};
vin23: video@e6ed7000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ed7000 0 0x1000>;
interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 821>;
@@ -1840,7 +1870,8 @@
};
vin24: video@e6ed8000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ed8000 0 0x1000>;
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 822>;
@@ -1868,7 +1899,8 @@
};
vin25: video@e6ed9000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ed9000 0 0x1000>;
interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 823>;
@@ -1896,7 +1928,8 @@
};
vin26: video@e6eda000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6eda000 0 0x1000>;
interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 824>;
@@ -1924,7 +1957,8 @@
};
vin27: video@e6edb000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6edb000 0 0x1000>;
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 825>;
@@ -1952,7 +1986,8 @@
};
vin28: video@e6edc000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6edc000 0 0x1000>;
interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 826>;
@@ -1980,7 +2015,8 @@
};
vin29: video@e6edd000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6edd000 0 0x1000>;
interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 827>;
@@ -2008,7 +2044,8 @@
};
vin30: video@e6ede000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ede000 0 0x1000>;
interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 828>;
@@ -2036,7 +2073,8 @@
};
vin31: video@e6edf000 {
- compatible = "renesas,vin-r8a779a0";
+ compatible = "renesas,vin-r8a779a0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6edf000 0 0x1000>;
interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 829>;
@@ -2096,6 +2134,14 @@
resets = <&cpg 709>;
#dma-cells = <1>;
dma-channels = <16>;
+ iommus = <&ipmmu_ds0 0>, <&ipmmu_ds0 1>,
+ <&ipmmu_ds0 2>, <&ipmmu_ds0 3>,
+ <&ipmmu_ds0 4>, <&ipmmu_ds0 5>,
+ <&ipmmu_ds0 6>, <&ipmmu_ds0 7>,
+ <&ipmmu_ds0 8>, <&ipmmu_ds0 9>,
+ <&ipmmu_ds0 10>, <&ipmmu_ds0 11>,
+ <&ipmmu_ds0 12>, <&ipmmu_ds0 13>,
+ <&ipmmu_ds0 14>, <&ipmmu_ds0 15>;
};
dmac2: dma-controller@e7351000 {
@@ -2121,6 +2167,10 @@
resets = <&cpg 710>;
#dma-cells = <1>;
dma-channels = <8>;
+ iommus = <&ipmmu_ds0 16>, <&ipmmu_ds0 17>,
+ <&ipmmu_ds0 18>, <&ipmmu_ds0 19>,
+ <&ipmmu_ds0 20>, <&ipmmu_ds0 21>,
+ <&ipmmu_ds0 22>, <&ipmmu_ds0 23>;
};
mmc0: mmc@ee140000 {
@@ -2278,6 +2328,7 @@
clocks = <&cpg CPG_MOD 508>;
power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
resets = <&cpg 508>;
+ iommus = <&ipmmu_vi1 6>;
};
fcpvd1: fcp@fea11000 {
@@ -2286,6 +2337,7 @@
clocks = <&cpg CPG_MOD 509>;
power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
resets = <&cpg 509>;
+ iommus = <&ipmmu_vi1 7>;
};
vspd0: vsp@fea20000 {
@@ -2449,7 +2501,8 @@
};
isp0: isp@fed00000 {
- compatible = "renesas,r8a779a0-isp";
+ compatible = "renesas,r8a779a0-isp",
+ "renesas,rcar-gen4-isp";
reg = <0 0xfed00000 0 0x10000>;
interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 612>;
@@ -2532,7 +2585,8 @@
};
isp1: isp@fed20000 {
- compatible = "renesas,r8a779a0-isp";
+ compatible = "renesas,r8a779a0-isp",
+ "renesas,rcar-gen4-isp";
reg = <0 0xfed20000 0 0x10000>;
interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 613>;
@@ -2615,7 +2669,8 @@
};
isp2: isp@fed30000 {
- compatible = "renesas,r8a779a0-isp";
+ compatible = "renesas,r8a779a0-isp",
+ "renesas,rcar-gen4-isp";
reg = <0 0xfed30000 0 0x10000>;
interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 614>;
@@ -2698,7 +2753,8 @@
};
isp3: isp@fed40000 {
- compatible = "renesas,r8a779a0-isp";
+ compatible = "renesas,r8a779a0-isp",
+ "renesas,rcar-gen4-isp";
reg = <0 0xfed40000 0 0x10000>;
interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 615>;
diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
index 53d1d4d8197a..12900ebd098b 100644
--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
@@ -175,6 +175,20 @@
clock-frequency = <0>;
};
+ pcie0_clkref: pcie0-clkref {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ pcie1_clkref: pcie1-clkref {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
pmu_a76 {
compatible = "arm,cortex-a76-pmu";
interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
@@ -553,6 +567,20 @@
status = "disabled";
};
+ tsn0: ethernet@e6460000 {
+ compatible = "renesas,r8a779g0-ethertsn", "renesas,rcar-gen4-ethertsn";
+ reg = <0 0xe6460000 0 0x7000>,
+ <0 0xe6449000 0 0x500>;
+ reg-names = "tsnes", "gptp";
+ interrupts = <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
+ clocks = <&cpg CPG_MOD 2723>;
+ power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
+ resets = <&cpg 2723>;
+ status = "disabled";
+ };
+
i2c0: i2c@e6500000 {
compatible = "renesas,i2c-r8a779g0",
"renesas,rcar-gen4-i2c";
@@ -723,6 +751,126 @@
status = "disabled";
};
+ pciec0: pcie@e65d0000 {
+ compatible = "renesas,r8a779g0-pcie",
+ "renesas,rcar-gen4-pcie";
+ reg = <0 0xe65d0000 0 0x1000>, <0 0xe65d2000 0 0x0800>,
+ <0 0xe65d3000 0 0x2000>, <0 0xe65d5000 0 0x1200>,
+ <0 0xe65d6200 0 0x0e00>, <0 0xe65d7000 0 0x0400>,
+ <0 0xfe000000 0 0x400000>;
+ reg-names = "dbi", "dbi2", "atu", "dma", "app", "phy", "config";
+ interrupts = <GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 455 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi", "dma", "sft_ce", "app";
+ clocks = <&cpg CPG_MOD 624>, <&pcie0_clkref>;
+ clock-names = "core", "ref";
+ power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
+ resets = <&cpg 624>;
+ reset-names = "pwr";
+ max-link-speed = <4>;
+ num-lanes = <2>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x00 0xff>;
+ device_type = "pci";
+ ranges = <0x01000000 0 0x00000000 0 0xfe000000 0 0x00400000>,
+ <0x02000000 0 0x30000000 0 0x30000000 0 0x10000000>;
+ dma-ranges = <0x42000000 0 0x00000000 0 0x00000000 1 0x00000000>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &gic GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gic GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gic GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gic GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>;
+ snps,enable-cdm-check;
+ status = "disabled";
+ };
+
+ pciec1: pcie@e65d8000 {
+ compatible = "renesas,r8a779g0-pcie",
+ "renesas,rcar-gen4-pcie";
+ reg = <0 0xe65d8000 0 0x1000>, <0 0xe65da000 0 0x0800>,
+ <0 0xe65db000 0 0x2000>, <0 0xe65dd000 0 0x1200>,
+ <0 0xe65de200 0 0x0e00>, <0 0xe65df000 0 0x0400>,
+ <0 0xee900000 0 0x400000>;
+ reg-names = "dbi", "dbi2", "atu", "dma", "app", "phy", "config";
+ interrupts = <GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 457 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi", "dma", "sft_ce", "app";
+ clocks = <&cpg CPG_MOD 625>, <&pcie1_clkref>;
+ clock-names = "core", "ref";
+ power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
+ resets = <&cpg 625>;
+ reset-names = "pwr";
+ max-link-speed = <4>;
+ num-lanes = <2>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x00 0xff>;
+ device_type = "pci";
+ ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00400000>,
+ <0x02000000 0 0xc0000000 0 0xc0000000 0 0x10000000>;
+ dma-ranges = <0x42000000 0 0x00000000 0 0x00000000 1 0x00000000>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &gic GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gic GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gic GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gic GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>;
+ snps,enable-cdm-check;
+ status = "disabled";
+ };
+
+ pciec0_ep: pcie-ep@e65d0000 {
+ compatible = "renesas,r8a779g0-pcie-ep",
+ "renesas,rcar-gen4-pcie-ep";
+ reg = <0 0xe65d0000 0 0x2000>, <0 0xe65d2000 0 0x1000>,
+ <0 0xe65d3000 0 0x2000>, <0 0xe65d5000 0 0x1200>,
+ <0 0xe65d6200 0 0x0e00>, <0 0xe65d7000 0 0x0400>,
+ <0 0xfe000000 0 0x400000>;
+ reg-names = "dbi", "dbi2", "atu", "dma", "app", "phy", "addr_space";
+ interrupts = <GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 455 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dma", "sft_ce", "app";
+ clocks = <&cpg CPG_MOD 624>, <&pcie0_clkref>;
+ clock-names = "core", "ref";
+ power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
+ resets = <&cpg 624>;
+ reset-names = "pwr";
+ max-link-speed = <4>;
+ num-lanes = <2>;
+ max-functions = /bits/ 8 <2>;
+ status = "disabled";
+ };
+
+ pciec1_ep: pcie-ep@e65d8000 {
+ compatible = "renesas,r8a779g0-pcie-ep",
+ "renesas,rcar-gen4-pcie-ep";
+ reg = <0 0xe65d8000 0 0x2000>, <0 0xe65da000 0 0x1000>,
+ <0 0xe65db000 0 0x2000>, <0 0xe65dd000 0 0x1200>,
+ <0 0xe65de200 0 0x0e00>, <0 0xe65df000 0 0x0400>,
+ <0 0xee900000 0 0x400000>;
+ reg-names = "dbi", "dbi2", "atu", "dma", "app", "phy", "addr_space";
+ interrupts = <GIC_SPI 457 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dma", "sft_ce", "app";
+ clocks = <&cpg CPG_MOD 625>, <&pcie1_clkref>;
+ clock-names = "core", "ref";
+ power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
+ resets = <&cpg 625>;
+ reset-names = "pwr";
+ max-link-speed = <4>;
+ num-lanes = <2>;
+ max-functions = /bits/ 8 <2>;
+ status = "disabled";
+ };
+
canfd: can@e6660000 {
compatible = "renesas,r8a779g0-canfd",
"renesas,rcar-gen4-canfd";
@@ -815,6 +963,7 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
+ iommus = <&ipmmu_hc 0>;
status = "disabled";
};
@@ -860,6 +1009,7 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
+ iommus = <&ipmmu_hc 1>;
status = "disabled";
};
@@ -905,6 +1055,7 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
+ iommus = <&ipmmu_hc 2>;
status = "disabled";
};
@@ -1184,7 +1335,8 @@
};
vin00: video@e6ef0000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef0000 0 0x1000>;
interrupts = <GIC_SPI 529 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 730>;
@@ -1212,7 +1364,8 @@
};
vin01: video@e6ef1000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef1000 0 0x1000>;
interrupts = <GIC_SPI 530 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 731>;
@@ -1240,7 +1393,8 @@
};
vin02: video@e6ef2000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef2000 0 0x1000>;
interrupts = <GIC_SPI 531 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 800>;
@@ -1268,7 +1422,8 @@
};
vin03: video@e6ef3000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef3000 0 0x1000>;
interrupts = <GIC_SPI 532 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 801>;
@@ -1296,7 +1451,8 @@
};
vin04: video@e6ef4000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef4000 0 0x1000>;
interrupts = <GIC_SPI 533 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 802>;
@@ -1324,7 +1480,8 @@
};
vin05: video@e6ef5000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef5000 0 0x1000>;
interrupts = <GIC_SPI 534 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 803>;
@@ -1352,7 +1509,8 @@
};
vin06: video@e6ef6000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef6000 0 0x1000>;
interrupts = <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 804>;
@@ -1380,7 +1538,8 @@
};
vin07: video@e6ef7000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef7000 0 0x1000>;
interrupts = <GIC_SPI 536 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 805>;
@@ -1408,7 +1567,8 @@
};
vin08: video@e6ef8000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef8000 0 0x1000>;
interrupts = <GIC_SPI 537 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 806>;
@@ -1436,7 +1596,8 @@
};
vin09: video@e6ef9000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef9000 0 0x1000>;
interrupts = <GIC_SPI 538 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 807>;
@@ -1464,7 +1625,8 @@
};
vin10: video@e6efa000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efa000 0 0x1000>;
interrupts = <GIC_SPI 539 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 808>;
@@ -1492,7 +1654,8 @@
};
vin11: video@e6efb000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efb000 0 0x1000>;
interrupts = <GIC_SPI 540 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 809>;
@@ -1520,7 +1683,8 @@
};
vin12: video@e6efc000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efc000 0 0x1000>;
interrupts = <GIC_SPI 541 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 810>;
@@ -1548,7 +1712,8 @@
};
vin13: video@e6efd000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efd000 0 0x1000>;
interrupts = <GIC_SPI 542 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 811>;
@@ -1576,7 +1741,8 @@
};
vin14: video@e6efe000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efe000 0 0x1000>;
interrupts = <GIC_SPI 543 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 812>;
@@ -1604,7 +1770,8 @@
};
vin15: video@e6eff000 {
- compatible = "renesas,vin-r8a779g0";
+ compatible = "renesas,vin-r8a779g0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6eff000 0 0x1000>;
interrupts = <GIC_SPI 544 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 813>;
@@ -1987,6 +2154,7 @@
clocks = <&cpg CPG_MOD 508>;
power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
resets = <&cpg 508>;
+ iommus = <&ipmmu_vi1 6>;
};
fcpvd1: fcp@fea11000 {
@@ -1995,6 +2163,7 @@
clocks = <&cpg CPG_MOD 509>;
power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
resets = <&cpg 509>;
+ iommus = <&ipmmu_vi1 7>;
};
vspd0: vsp@fea20000 {
@@ -2054,7 +2223,8 @@
};
isp0: isp@fed00000 {
- compatible = "renesas,r8a779g0-isp";
+ compatible = "renesas,r8a779g0-isp",
+ "renesas,rcar-gen4-isp";
reg = <0 0xfed00000 0 0x10000>;
interrupts = <GIC_SPI 473 IRQ_TYPE_LEVEL_LOW>;
clocks = <&cpg CPG_MOD 612>;
@@ -2137,7 +2307,8 @@
};
isp1: isp@fed20000 {
- compatible = "renesas,r8a779g0-isp";
+ compatible = "renesas,r8a779g0-isp",
+ "renesas,rcar-gen4-isp";
reg = <0 0xfed20000 0 0x10000>;
interrupts = <GIC_SPI 474 IRQ_TYPE_LEVEL_LOW>;
clocks = <&cpg CPG_MOD 613>;
diff --git a/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts
index 2f79e5a61248..50a428572d9b 100644
--- a/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts
+++ b/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts
@@ -24,3 +24,54 @@
groups = "hscif0_data", "hscif0_ctrl";
function = "hscif0";
};
+
+&pfc {
+ tsn0_pins: tsn0 {
+ mux {
+ groups = "tsn0_link", "tsn0_mdio", "tsn0_rgmii",
+ "tsn0_txcrefclk";
+ function = "tsn0";
+ };
+
+ link {
+ groups = "tsn0_link";
+ bias-disable;
+ };
+
+ mdio {
+ groups = "tsn0_mdio";
+ drive-strength = <24>;
+ bias-disable;
+ };
+
+ rgmii {
+ groups = "tsn0_rgmii";
+ drive-strength = <24>;
+ bias-disable;
+ };
+ };
+};
+
+&tsn0 {
+ pinctrl-0 = <&tsn0_pins>;
+ pinctrl-names = "default";
+ phy-mode = "rgmii";
+ phy-handle = <&phy3>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <4000>;
+
+ phy3: ethernet-phy@0 {
+ compatible = "ethernet-phy-id002b.0980",
+ "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
index 2b9a19bb1c5d..9a1917b87f61 100644
--- a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
+++ b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
@@ -5,10 +5,31 @@
* Copyright (C) 2023 Renesas Electronics Corp.
* Copyright (C) 2024 Glider bv
*/
+/*
+ * [How to use Sound]
+ *
+ * Because R-Car V4M has only 1 SSI, it cannot handle both Playback/Capture
+ * at the same time. You need to switch the direction which is controlled
+ * by the GP0_01 pin via amixer.
+ *
+ * Playback (CN9500)
+ * > amixer set "MUX" "Playback" // for GP0_01
+ * > amixer set "DAC 1" 85%
+ * > aplay xxx.wav
+ *
+ * Capture (CN9501)
+ * > amixer set "MUX" "Capture" // for GP0_01
+ * > amixer set "Mic 1" 80%
+ * > amixer set "ADC 1" on
+ * > amixer set 'ADC 1' 80%
+ * > arecord xxx hoge.wav
+ */
/dts-v1/;
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
#include "r8a779h0.dtsi"
@@ -26,11 +47,74 @@
ethernet0 = &avb0;
};
+ can_transceiver0: can-phy0 {
+ compatible = "nxp,tjr1443";
+ #phy-cells = <0>;
+ enable-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
+ max-bitrate = <5000000>;
+ };
+
chosen {
bootargs = "ignore_loglevel rw root=/dev/nfs ip=on";
stdout-path = "serial0:921600n8";
};
+ keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&keys_pins>;
+ pinctrl-names = "default";
+
+ key-1 {
+ gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_1>;
+ label = "SW47";
+ wakeup-source;
+ debounce-interval = <20>;
+ };
+
+ key-2 {
+ gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_2>;
+ label = "SW48";
+ wakeup-source;
+ debounce-interval = <20>;
+ };
+
+ key-3 {
+ gpios = <&gpio5 2 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_3>;
+ label = "SW49";
+ wakeup-source;
+ debounce-interval = <20>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-1 {
+ gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <1>;
+ };
+
+ led-2 {
+ gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <2>;
+ };
+
+ led-3 {
+ gpios = <&gpio7 2 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <3>;
+ };
+ };
+
memory@48000000 {
device_type = "memory";
/* first 128MB is reserved for secure area. */
@@ -59,6 +143,24 @@
regulator-boot-on;
regulator-always-on;
};
+
+ sound_mux: sound-mux {
+ compatible = "simple-audio-mux";
+ mux-gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+ state-labels = "Playback", "Capture";
+ };
+
+ sound_card: sound {
+ compatible = "audio-graph-card2";
+ label = "rcar-sound";
+ aux-devs = <&sound_mux>; // for GP0_01
+
+ links = <&rsnd_port>; // AK4619 Audio Codec
+ };
+};
+
+&audio_clkin {
+ clock-frequency = <24576000>;
};
&avb0 {
@@ -79,6 +181,25 @@
};
};
+&can_clk {
+ clock-frequency = <40000000>;
+};
+
+&canfd {
+ pinctrl-0 = <&canfd0_pins>, <&canfd1_pins>, <&can_clk_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ channel0 {
+ status = "okay";
+ phys = <&can_transceiver0>;
+ };
+
+ channel1 {
+ status = "okay";
+ };
+};
+
&extal_clk {
clock-frequency = <16666666>;
};
@@ -87,6 +208,15 @@
clock-frequency = <32768>;
};
+&gpio1 {
+ audio-power-hog {
+ gpio-hog;
+ gpios = <8 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "Audio-Power";
+ };
+};
+
&hscif0 {
pinctrl-0 = <&hscif0_pins>;
pinctrl-names = "default";
@@ -139,6 +269,29 @@
};
};
+&i2c3 {
+ pinctrl-0 = <&i2c3_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+ clock-frequency = <400000>;
+
+ codec@10 {
+ compatible = "asahi-kasei,ak4619";
+ reg = <0x10>;
+
+ clocks = <&rcar_sound>;
+ clock-names = "mclk";
+
+ #sound-dai-cells = <0>;
+ port {
+ ak4619_endpoint: endpoint {
+ remote-endpoint = <&rsnd_endpoint>;
+ };
+ };
+ };
+};
+
&mmc0 {
pinctrl-0 = <&mmc_pins>;
pinctrl-1 = <&mmc_pins>;
@@ -178,6 +331,21 @@
};
};
+ can_clk_pins: can-clk {
+ groups = "can_clk";
+ function = "can_clk";
+ };
+
+ canfd0_pins: canfd0 {
+ groups = "canfd0_data";
+ function = "canfd0";
+ };
+
+ canfd1_pins: canfd1 {
+ groups = "canfd1_data";
+ function = "canfd1";
+ };
+
hscif0_pins: hscif0 {
groups = "hscif0_data", "hscif0_ctrl";
function = "hscif0";
@@ -193,6 +361,16 @@
function = "i2c0";
};
+ i2c3_pins: i2c3 {
+ groups = "i2c3";
+ function = "i2c3";
+ };
+
+ keys_pins: keys {
+ pins = "GP_5_0", "GP_5_1", "GP_5_2";
+ bias-pull-up;
+ };
+
mmc_pins: mmc {
groups = "mmc_data8", "mmc_ctrl", "mmc_ds";
function = "mmc";
@@ -213,6 +391,40 @@
groups = "scif_clk2";
function = "scif_clk2";
};
+
+ sound_clk_pins: sound_clk {
+ groups = "audio_clkin", "audio_clkout";
+ function = "audio_clk";
+ };
+
+ sound_pins: sound {
+ groups = "ssi_ctrl", "ssi_data";
+ function = "ssi";
+ };
+};
+
+&rcar_sound {
+ pinctrl-0 = <&sound_clk_pins>, <&sound_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ /* audio_clkout */
+ clock-frequency = <12288000>;
+
+ ports {
+ rsnd_port: port {
+ rsnd_endpoint: endpoint {
+ remote-endpoint = <&ak4619_endpoint>;
+ bitclock-master;
+ frame-master;
+
+ /* see above [How to use Sound] */
+ playback = <&ssi0>;
+ capture = <&ssi0>;
+ };
+ };
+ };
};
&rpc {
diff --git a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
index a03ab2b6a859..12d8be3fd579 100644
--- a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
@@ -21,6 +21,13 @@
clock-frequency = <0>;
};
+ /* External CAN clock - to be overridden by boards that provide it */
+ can_clk: can-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
cluster0_opp: opp-table-0 {
compatible = "operating-points-v2";
@@ -636,6 +643,40 @@
status = "disabled";
};
+ canfd: can@e6660000 {
+ compatible = "renesas,r8a779h0-canfd",
+ "renesas,rcar-gen4-canfd";
+ reg = <0 0xe6660000 0 0x8500>;
+ interrupts = <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch_int", "g_int";
+ clocks = <&cpg CPG_MOD 328>,
+ <&cpg CPG_CORE R8A779H0_CLK_CANFD>,
+ <&can_clk>;
+ clock-names = "fck", "canfd", "can_clk";
+ assigned-clocks = <&cpg CPG_CORE R8A779H0_CLK_CANFD>;
+ assigned-clock-rates = <80000000>;
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 328>;
+ status = "disabled";
+
+ channel0 {
+ status = "disabled";
+ };
+
+ channel1 {
+ status = "disabled";
+ };
+
+ channel2 {
+ status = "disabled";
+ };
+
+ channel3 {
+ status = "disabled";
+ };
+ };
+
avb0: ethernet@e6800000 {
compatible = "renesas,etheravb-r8a779h0",
"renesas,etheravb-rcar-gen4";
@@ -728,6 +769,7 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
+ iommus = <&ipmmu_hc 1>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -776,11 +818,62 @@
phy-mode = "rgmii";
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
+ iommus = <&ipmmu_hc 2>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
+ pwm0: pwm@e6e30000 {
+ compatible = "renesas,pwm-r8a779h0", "renesas,pwm-rcar";
+ reg = <0 0xe6e30000 0 0x10>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 628>;
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 628>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@e6e31000 {
+ compatible = "renesas,pwm-r8a779h0", "renesas,pwm-rcar";
+ reg = <0 0xe6e31000 0 0x10>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 628>;
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 628>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@e6e32000 {
+ compatible = "renesas,pwm-r8a779h0", "renesas,pwm-rcar";
+ reg = <0 0xe6e32000 0 0x10>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 628>;
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 628>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@e6e33000 {
+ compatible = "renesas,pwm-r8a779h0", "renesas,pwm-rcar";
+ reg = <0 0xe6e33000 0 0x10>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 628>;
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 628>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@e6e34000 {
+ compatible = "renesas,pwm-r8a779h0", "renesas,pwm-rcar";
+ reg = <0 0xe6e34000 0 0x10>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 628>;
+ power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>;
+ resets = <&cpg 628>;
+ status = "disabled";
+ };
+
scif0: serial@e6e60000 {
compatible = "renesas,scif-r8a779h0",
"renesas,rcar-gen4-scif", "renesas,scif";
@@ -946,7 +1039,8 @@
};
vin00: video@e6ef0000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef0000 0 0x1000>;
interrupts = <GIC_SPI 529 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 730>;
@@ -974,7 +1068,8 @@
};
vin01: video@e6ef1000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef1000 0 0x1000>;
interrupts = <GIC_SPI 530 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 731>;
@@ -1002,7 +1097,8 @@
};
vin02: video@e6ef2000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef2000 0 0x1000>;
interrupts = <GIC_SPI 531 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 800>;
@@ -1030,7 +1126,8 @@
};
vin03: video@e6ef3000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef3000 0 0x1000>;
interrupts = <GIC_SPI 532 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 801>;
@@ -1058,7 +1155,8 @@
};
vin04: video@e6ef4000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef4000 0 0x1000>;
interrupts = <GIC_SPI 533 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 802>;
@@ -1086,7 +1184,8 @@
};
vin05: video@e6ef5000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef5000 0 0x1000>;
interrupts = <GIC_SPI 534 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 803>;
@@ -1114,7 +1213,8 @@
};
vin06: video@e6ef6000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef6000 0 0x1000>;
interrupts = <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 804>;
@@ -1142,7 +1242,8 @@
};
vin07: video@e6ef7000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef7000 0 0x1000>;
interrupts = <GIC_SPI 536 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 805>;
@@ -1170,7 +1271,8 @@
};
vin08: video@e6ef8000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef8000 0 0x1000>;
interrupts = <GIC_SPI 537 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 806>;
@@ -1198,7 +1300,8 @@
};
vin09: video@e6ef9000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6ef9000 0 0x1000>;
interrupts = <GIC_SPI 538 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 807>;
@@ -1226,7 +1329,8 @@
};
vin10: video@e6efa000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efa000 0 0x1000>;
interrupts = <GIC_SPI 539 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 808>;
@@ -1254,7 +1358,8 @@
};
vin11: video@e6efb000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efb000 0 0x1000>;
interrupts = <GIC_SPI 540 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 809>;
@@ -1282,7 +1387,8 @@
};
vin12: video@e6efc000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efc000 0 0x1000>;
interrupts = <GIC_SPI 541 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 810>;
@@ -1310,7 +1416,8 @@
};
vin13: video@e6efd000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efd000 0 0x1000>;
interrupts = <GIC_SPI 542 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 811>;
@@ -1338,7 +1445,8 @@
};
vin14: video@e6efe000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6efe000 0 0x1000>;
interrupts = <GIC_SPI 543 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 812>;
@@ -1366,7 +1474,8 @@
};
vin15: video@e6eff000 {
- compatible = "renesas,vin-r8a779h0";
+ compatible = "renesas,vin-r8a779h0",
+ "renesas,rcar-gen4-vin";
reg = <0 0xe6eff000 0 0x1000>;
interrupts = <GIC_SPI 544 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 813>;
@@ -1720,7 +1829,8 @@
};
isp0: isp@fed00000 {
- compatible = "renesas,r8a779h0-isp";
+ compatible = "renesas,r8a779h0-isp",
+ "renesas,rcar-gen4-isp";
reg = <0 0xfed00000 0 0x10000>;
interrupts = <GIC_SPI 473 IRQ_TYPE_LEVEL_LOW>;
clocks = <&cpg CPG_MOD 612>;
@@ -1803,7 +1913,8 @@
};
isp1: isp@fed20000 {
- compatible = "renesas,r8a779h0-isp";
+ compatible = "renesas,r8a779h0-isp",
+ "renesas,rcar-gen4-isp";
reg = <0 0xfed20000 0 0x10000>;
interrupts = <GIC_SPI 474 IRQ_TYPE_LEVEL_LOW>;
clocks = <&cpg CPG_MOD 613>;
diff --git a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
index 2eccab9c8962..593c66b27ad1 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
@@ -725,6 +725,10 @@
power-domains = <&cpg>;
#reset-cells = <1>;
status = "disabled";
+
+ usb0_vbus_otg: regulator-vbus {
+ regulator-name = "vbus";
+ };
};
ohci0: usb@11c50000 {
diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
index 18ef297db933..a3998e5928f7 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
@@ -129,6 +129,55 @@
};
};
+ vspd: vsp@10870000 {
+ compatible = "renesas,r9a07g043u-vsp2", "renesas,r9a07g044-vsp2";
+ reg = <0 0x10870000 0 0x10000>;
+ interrupts = <SOC_PERIPHERAL_IRQ(149) IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD R9A07G043_LCDC_CLK_A>,
+ <&cpg CPG_MOD R9A07G043_LCDC_CLK_P>,
+ <&cpg CPG_MOD R9A07G043_LCDC_CLK_D>;
+ clock-names = "aclk", "pclk", "vclk";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G043_LCDC_RESET_N>;
+ renesas,fcp = <&fcpvd>;
+ };
+
+ fcpvd: fcp@10880000 {
+ compatible = "renesas,r9a07g043u-fcpvd", "renesas,fcpv";
+ reg = <0 0x10880000 0 0x10000>;
+ clocks = <&cpg CPG_MOD R9A07G043_LCDC_CLK_A>,
+ <&cpg CPG_MOD R9A07G043_LCDC_CLK_P>,
+ <&cpg CPG_MOD R9A07G043_LCDC_CLK_D>;
+ clock-names = "aclk", "pclk", "vclk";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G043_LCDC_RESET_N>;
+ };
+
+ du: display@10890000 {
+ compatible = "renesas,r9a07g043u-du";
+ reg = <0 0x10890000 0 0x10000>;
+ interrupts = <SOC_PERIPHERAL_IRQ(152) IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD R9A07G043_LCDC_CLK_A>,
+ <&cpg CPG_MOD R9A07G043_LCDC_CLK_P>,
+ <&cpg CPG_MOD R9A07G043_LCDC_CLK_D>;
+ clock-names = "aclk", "pclk", "vclk";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G043_LCDC_RESET_N>;
+ renesas,vsps = <&vspd 0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ du_out_rgb: endpoint {
+ };
+ };
+ };
+ };
+
irqc: interrupt-controller@110a0000 {
compatible = "renesas,r9a07g043u-irqc",
"renesas,rzg2l-irqc";
@@ -210,8 +259,8 @@
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
- reg = <0x0 0x11900000 0 0x40000>,
- <0x0 0x11940000 0 0x60000>;
+ reg = <0x0 0x11900000 0 0x20000>,
+ <0x0 0x11940000 0 0x40000>;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc-du-adv7513.dtso b/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc-du-adv7513.dtso
new file mode 100644
index 000000000000..ecd43a671000
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc-du-adv7513.dtso
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree overlay for the RZ/G2UL SMARC EVK with ADV7513 transmitter
+ * connected to DU enabled.
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+
+#define ADV7513_PARENT_I2C i2c1
+#include "rz-smarc-du-adv7513.dtsi"
+
+&pinctrl {
+ du_pins: du {
+ data {
+ pinmux = <RZG2L_PORT_PINMUX(11, 2, 6)>,
+ <RZG2L_PORT_PINMUX(13, 1, 6)>,
+ <RZG2L_PORT_PINMUX(13, 0, 6)>,
+ <RZG2L_PORT_PINMUX(13, 4, 6)>,
+ <RZG2L_PORT_PINMUX(13, 3, 6)>,
+ <RZG2L_PORT_PINMUX(12, 1, 6)>,
+ <RZG2L_PORT_PINMUX(13, 2, 6)>,
+ <RZG2L_PORT_PINMUX(14, 0, 6)>,
+ <RZG2L_PORT_PINMUX(14, 2, 6)>,
+ <RZG2L_PORT_PINMUX(14, 1, 6)>,
+ <RZG2L_PORT_PINMUX(16, 0, 6)>,
+ <RZG2L_PORT_PINMUX(15, 0, 6)>,
+ <RZG2L_PORT_PINMUX(16, 1, 6)>,
+ <RZG2L_PORT_PINMUX(15, 1, 6)>,
+ <RZG2L_PORT_PINMUX(15, 3, 6)>,
+ <RZG2L_PORT_PINMUX(18, 0, 6)>,
+ <RZG2L_PORT_PINMUX(15, 2, 6)>,
+ <RZG2L_PORT_PINMUX(17, 0, 6)>,
+ <RZG2L_PORT_PINMUX(17, 2, 6)>,
+ <RZG2L_PORT_PINMUX(17, 1, 6)>,
+ <RZG2L_PORT_PINMUX(18, 1, 6)>,
+ <RZG2L_PORT_PINMUX(18, 2, 6)>,
+ <RZG2L_PORT_PINMUX(17, 3, 6)>,
+ <RZG2L_PORT_PINMUX(18, 3, 6)>;
+ drive-strength = <2>;
+ };
+
+ sync {
+ pinmux = <RZG2L_PORT_PINMUX(11, 0, 6)>, /* HSYNC */
+ <RZG2L_PORT_PINMUX(12, 0, 6)>; /* VSYNC */
+ drive-strength = <2>;
+ };
+
+ de {
+ pinmux = <RZG2L_PORT_PINMUX(11, 1, 6)>; /* DE */
+ drive-strength = <2>;
+ };
+
+ clk {
+ pinmux = <RZG2L_PORT_PINMUX(11, 3, 6)>; /* CLK */
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
index d3838e5820fc..6b1c77cd8261 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
@@ -1043,8 +1043,8 @@
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
- reg = <0x0 0x11900000 0 0x40000>,
- <0x0 0x11940000 0 0x60000>;
+ reg = <0x0 0x11900000 0 0x20000>,
+ <0x0 0x11940000 0 0x40000>;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
};
@@ -1129,6 +1129,10 @@
power-domains = <&cpg>;
#reset-cells = <1>;
status = "disabled";
+
+ usb0_vbus_otg: regulator-vbus {
+ regulator-name = "vbus";
+ };
};
ohci0: usb@11c50000 {
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044c2-smarc.dts b/arch/arm64/boot/dts/renesas/r9a07g044c2-smarc.dts
index 0b90367b6d1e..ee5bf2c58051 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g044c2-smarc.dts
+++ b/arch/arm64/boot/dts/renesas/r9a07g044c2-smarc.dts
@@ -47,6 +47,9 @@
#error "Cannot set as both PMOD_MTU3 and SW_RSPI_CAN are mutually exclusive"
#endif
+/* Please set SW_I2S0_I2S1. Default value is 0 */
+#define SW_I2S0_I2S1 0
+
#include "r9a07g044c2.dtsi"
#include "rzg2lc-smarc-som.dtsi"
#include "rzg2lc-smarc.dtsi"
diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
index 1de2e5f0917d..01f59914dd09 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
@@ -1051,8 +1051,8 @@
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
- reg = <0x0 0x11900000 0 0x40000>,
- <0x0 0x11940000 0 0x60000>;
+ reg = <0x0 0x11900000 0 0x20000>,
+ <0x0 0x11940000 0 0x40000>;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
};
@@ -1137,6 +1137,10 @@
power-domains = <&cpg>;
#reset-cells = <1>;
status = "disabled";
+
+ usb0_vbus_otg: regulator-vbus {
+ regulator-name = "vbus";
+ };
};
ohci0: usb@11c50000 {
diff --git a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi
index 0d5c47a65e46..067a26a66c24 100644
--- a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi
@@ -72,6 +72,94 @@
status = "disabled";
};
+ i2c0: i2c@10090000 {
+ compatible = "renesas,riic-r9a08g045", "renesas,riic-r9a09g057";
+ reg = <0 0x10090000 0 0x400>;
+ interrupts = <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 263 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD R9A08G045_I2C0_PCLK>;
+ clock-frequency = <100000>;
+ resets = <&cpg R9A08G045_I2C0_MRST>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@10090400 {
+ compatible = "renesas,riic-r9a08g045", "renesas,riic-r9a09g057";
+ reg = <0 0x10090400 0 0x400>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 271 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 272 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD R9A08G045_I2C1_PCLK>;
+ clock-frequency = <100000>;
+ resets = <&cpg R9A08G045_I2C1_MRST>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@10090800 {
+ compatible = "renesas,riic-r9a08g045", "renesas,riic-r9a09g057";
+ reg = <0 0x10090800 0 0x400>;
+ interrupts = <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 279 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 280 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 276 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD R9A08G045_I2C2_PCLK>;
+ clock-frequency = <100000>;
+ resets = <&cpg R9A08G045_I2C2_MRST>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@10090c00 {
+ compatible = "renesas,riic-r9a08g045", "renesas,riic-r9a09g057";
+ reg = <0 0x10090c00 0 0x400>;
+ interrupts = <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 287 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 288 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 285 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD R9A08G045_I2C3_PCLK>;
+ clock-frequency = <100000>;
+ resets = <&cpg R9A08G045_I2C3_MRST>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
cpg: clock-controller@11010000 {
compatible = "renesas,r9a08g045-cpg";
reg = <0 0x11010000 0 0x10000>;
@@ -181,6 +269,44 @@
resets = <&cpg R9A08G045_IA55_RESETN>;
};
+ dmac: dma-controller@11820000 {
+ compatible = "renesas,r9a08g045-dmac",
+ "renesas,rz-dmac";
+ reg = <0 0x11820000 0 0x10000>,
+ <0 0x11830000 0 0x10000>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 112 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 113 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 114 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 115 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 116 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 117 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 118 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 119 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 120 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 121 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 122 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 123 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 124 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 125 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 126 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 127 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15";
+ clocks = <&cpg CPG_MOD R9A08G045_DMAC_ACLK>,
+ <&cpg CPG_MOD R9A08G045_DMAC_PCLK>;
+ clock-names = "main", "register";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A08G045_DMAC_ARESETN>,
+ <&cpg R9A08G045_DMAC_RST_ASYNC>;
+ reset-names = "arst", "rst_async";
+ #dma-cells = <1>;
+ dma-channels = <16>;
+ };
+
sdhi0: mmc@11c00000 {
compatible = "renesas,sdhi-r9a08g045", "renesas,rzg2l-sdhi";
reg = <0x0 0x11c00000 0 0x10000>;
@@ -269,8 +395,8 @@
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
- reg = <0x0 0x12400000 0 0x40000>,
- <0x0 0x12440000 0 0x60000>;
+ reg = <0x0 0x12400000 0 0x20000>,
+ <0x0 0x12440000 0 0x40000>;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
};
diff --git a/arch/arm64/boot/dts/renesas/r9a09g057.dtsi b/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
new file mode 100644
index 000000000000..1ad5a1b6917f
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/V2H(P) SoC
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <dt-bindings/clock/renesas,r9a09g057-cpg.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "renesas,r9a09g057";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ audio_extal_clk: audio-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a55";
+ reg = <0>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ };
+
+ cpu1: cpu@100 {
+ compatible = "arm,cortex-a55";
+ reg = <0x100>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ };
+
+ cpu2: cpu@200 {
+ compatible = "arm,cortex-a55";
+ reg = <0x200>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ };
+
+ cpu3: cpu@300 {
+ compatible = "arm,cortex-a55";
+ reg = <0x300>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ };
+
+ L3_CA55: cache-controller-0 {
+ compatible = "cache";
+ cache-unified;
+ cache-size = <0x100000>;
+ cache-level = <3>;
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0", "arm,psci-0.2";
+ method = "smc";
+ };
+
+ qextal_clk: qextal-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ rtxin_clk: rtxin-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ soc: soc {
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ pinctrl: pinctrl@10410000 {
+ compatible = "renesas,r9a09g057-pinctrl";
+ reg = <0 0x10410000 0 0x10000>;
+ clocks = <&cpg CPG_CORE R9A09G057_IOTOP_0_SHCLK>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 0 96>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ power-domains = <&cpg>;
+ resets = <&cpg 0xa5>, <&cpg 0xa6>;
+ };
+
+ cpg: clock-controller@10420000 {
+ compatible = "renesas,r9a09g057-cpg";
+ reg = <0 0x10420000 0 0x10000>;
+ clocks = <&audio_extal_clk>, <&rtxin_clk>, <&qextal_clk>;
+ clock-names = "audio_extal", "rtxin", "qextal";
+ #clock-cells = <2>;
+ #reset-cells = <1>;
+ #power-domain-cells = <0>;
+ };
+
+ sys: system-controller@10430000 {
+ compatible = "renesas,r9a09g057-sys";
+ reg = <0 0x10430000 0 0x10000>;
+ clocks = <&cpg CPG_CORE R9A09G057_SYS_0_PCLK>;
+ resets = <&cpg 0x30>;
+ status = "disabled";
+ };
+
+ ostm0: timer@11800000 {
+ compatible = "renesas,r9a09g057-ostm", "renesas,ostm";
+ reg = <0x0 0x11800000 0x0 0x1000>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&cpg CPG_MOD 0x43>;
+ resets = <&cpg 0x6d>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ ostm1: timer@11801000 {
+ compatible = "renesas,r9a09g057-ostm", "renesas,ostm";
+ reg = <0x0 0x11801000 0x0 0x1000>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&cpg CPG_MOD 0x44>;
+ resets = <&cpg 0x6e>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ ostm2: timer@14000000 {
+ compatible = "renesas,r9a09g057-ostm", "renesas,ostm";
+ reg = <0x0 0x14000000 0x0 0x1000>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&cpg CPG_MOD 0x45>;
+ resets = <&cpg 0x6f>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ ostm3: timer@14001000 {
+ compatible = "renesas,r9a09g057-ostm", "renesas,ostm";
+ reg = <0x0 0x14001000 0x0 0x1000>;
+ interrupts = <GIC_SPI 20 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&cpg CPG_MOD 0x46>;
+ resets = <&cpg 0x70>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ ostm4: timer@12c00000 {
+ compatible = "renesas,r9a09g057-ostm", "renesas,ostm";
+ reg = <0x0 0x12c00000 0x0 0x1000>;
+ interrupts = <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&cpg CPG_MOD 0x47>;
+ resets = <&cpg 0x71>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ ostm5: timer@12c01000 {
+ compatible = "renesas,r9a09g057-ostm", "renesas,ostm";
+ reg = <0x0 0x12c01000 0x0 0x1000>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&cpg CPG_MOD 0x48>;
+ resets = <&cpg 0x72>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ ostm6: timer@12c02000 {
+ compatible = "renesas,r9a09g057-ostm", "renesas,ostm";
+ reg = <0x0 0x12c02000 0x0 0x1000>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&cpg CPG_MOD 0x49>;
+ resets = <&cpg 0x73>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ ostm7: timer@12c03000 {
+ compatible = "renesas,r9a09g057-ostm", "renesas,ostm";
+ reg = <0x0 0x12c03000 0x0 0x1000>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&cpg CPG_MOD 0x4a>;
+ resets = <&cpg 0x74>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ wdt0: watchdog@11c00400 {
+ compatible = "renesas,r9a09g057-wdt";
+ reg = <0 0x11c00400 0 0x400>;
+ clocks = <&cpg CPG_MOD 0x4b>, <&cpg CPG_MOD 0x4c>;
+ clock-names = "pclk", "oscclk";
+ resets = <&cpg 0x75>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ wdt1: watchdog@14400000 {
+ compatible = "renesas,r9a09g057-wdt";
+ reg = <0 0x14400000 0 0x400>;
+ clocks = <&cpg CPG_MOD 0x4d>, <&cpg CPG_MOD 0x4e>;
+ clock-names = "pclk", "oscclk";
+ resets = <&cpg 0x76>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ wdt2: watchdog@13000000 {
+ compatible = "renesas,r9a09g057-wdt";
+ reg = <0 0x13000000 0 0x400>;
+ clocks = <&cpg CPG_MOD 0x4f>, <&cpg CPG_MOD 0x50>;
+ clock-names = "pclk", "oscclk";
+ resets = <&cpg 0x77>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ wdt3: watchdog@13000400 {
+ compatible = "renesas,r9a09g057-wdt";
+ reg = <0 0x13000400 0 0x400>;
+ clocks = <&cpg CPG_MOD 0x51>, <&cpg CPG_MOD 0x52>;
+ clock-names = "pclk", "oscclk";
+ resets = <&cpg 0x78>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ scif: serial@11c01400 {
+ compatible = "renesas,scif-r9a09g057";
+ reg = <0 0x11c01400 0 0x400>;
+ interrupts = <GIC_SPI 529 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 532 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 533 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 530 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 534 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 531 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 536 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 537 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eri", "rxi", "txi", "bri", "dri",
+ "tei", "tei-dri", "rxi-edge", "txi-edge";
+ clocks = <&cpg CPG_MOD 0x8f>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ resets = <&cpg 0x95>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@14400400 {
+ compatible = "renesas,riic-r9a09g057";
+ reg = <0 0x14400400 0 0x400>;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 507 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 506 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x94>;
+ resets = <&cpg 0x98>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@14400800 {
+ compatible = "renesas,riic-r9a09g057";
+ reg = <0 0x14400800 0 0x400>;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 509 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 508 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x95>;
+ resets = <&cpg 0x99>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@14400c00 {
+ compatible = "renesas,riic-r9a09g057";
+ reg = <0 0x14400c00 0 0x400>;
+ interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 511 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 510 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x96>;
+ resets = <&cpg 0x9a>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@14401000 {
+ compatible = "renesas,riic-r9a09g057";
+ reg = <0 0x14401000 0 0x400>;
+ interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 513 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 512 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x97>;
+ resets = <&cpg 0x9b>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@14401400 {
+ compatible = "renesas,riic-r9a09g057";
+ reg = <0 0x14401400 0 0x400>;
+ interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 515 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 514 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x98>;
+ resets = <&cpg 0x9c>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@14401800 {
+ compatible = "renesas,riic-r9a09g057";
+ reg = <0 0x14401800 0 0x400>;
+ interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 517 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 516 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x99>;
+ resets = <&cpg 0x9d>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@14401c00 {
+ compatible = "renesas,riic-r9a09g057";
+ reg = <0 0x14401c00 0 0x400>;
+ interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 519 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 518 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x9a>;
+ resets = <&cpg 0x9e>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@14402000 {
+ compatible = "renesas,riic-r9a09g057";
+ reg = <0 0x14402000 0 0x400>;
+ interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 521 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 520 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x9b>;
+ resets = <&cpg 0x9f>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c8: i2c@11c01000 {
+ compatible = "renesas,riic-r9a09g057";
+ reg = <0 0x11c01000 0 0x400>;
+ interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 523 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 522 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x93>;
+ resets = <&cpg 0xa0>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ gic: interrupt-controller@14900000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0x14900000 0 0x20000>,
+ <0x0 0x14940000 0 0x80000>;
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ sdhi0: mmc@15c00000 {
+ compatible = "renesas,sdhi-r9a09g057";
+ reg = <0x0 0x15c00000 0 0x10000>;
+ interrupts = <GIC_SPI 735 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 736 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 0xa3>, <&cpg CPG_MOD 0xa5>,
+ <&cpg CPG_MOD 0xa4>, <&cpg CPG_MOD 0xa6>;
+ clock-names = "core", "clkh", "cd", "aclk";
+ resets = <&cpg 0xa7>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ sdhi1: mmc@15c10000 {
+ compatible = "renesas,sdhi-r9a09g057";
+ reg = <0x0 0x15c10000 0 0x10000>;
+ interrupts = <GIC_SPI 737 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 738 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 0xa7>, <&cpg CPG_MOD 0xa9>,
+ <&cpg CPG_MOD 0xa8>, <&cpg CPG_MOD 0xaa>;
+ clock-names = "core", "clkh", "cd", "aclk";
+ resets = <&cpg 0xa8>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ sdhi2: mmc@15c20000 {
+ compatible = "renesas,sdhi-r9a09g057";
+ reg = <0x0 0x15c20000 0 0x10000>;
+ interrupts = <GIC_SPI 739 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 740 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 0xab>, <&cpg CPG_MOD 0xad>,
+ <&cpg CPG_MOD 0xac>, <&cpg CPG_MOD 0xae>;
+ clock-names = "core", "clkh", "cd", "aclk";
+ resets = <&cpg 0xa9>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys", "hyp-virt";
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts b/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts
new file mode 100644
index 000000000000..4703da8e9cff
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/V2H EVK board
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "r9a09g057.dtsi"
+
+/ {
+ model = "Renesas RZ/V2H EVK Board based on r9a09g057h44";
+ compatible = "renesas,rzv2h-evk", "renesas,r9a09g057h44", "renesas,r9a09g057";
+
+ aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c6 = &i2c6;
+ i2c7 = &i2c7;
+ i2c8 = &i2c8;
+ mmc1 = &sdhi1;
+ serial0 = &scif;
+ };
+
+ chosen {
+ bootargs = "ignore_loglevel";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@48000000 {
+ device_type = "memory";
+ /* first 128MB is reserved for secure area. */
+ reg = <0x0 0x48000000 0x1 0xF8000000>;
+ };
+
+ memory@240000000 {
+ device_type = "memory";
+ reg = <0x2 0x40000000 0x2 0x00000000>;
+ };
+
+ reg_3p3v: regulator1 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vqmmc_sdhi1: regulator-vccq-sdhi1 {
+ compatible = "regulator-gpio";
+ regulator-name = "SDHI1 VccQ";
+ gpios = <&pinctrl RZG2L_GPIO(10, 2) GPIO_ACTIVE_HIGH>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ gpios-states = <0>;
+ states = <3300000 0>, <1800000 1>;
+ };
+};
+
+&audio_extal_clk {
+ clock-frequency = <22579200>;
+};
+
+&i2c0 {
+ pinctrl-0 = <&i2c0_pins>;
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-0 = <&i2c1_pins>;
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+
+ status = "okay";
+};
+
+&i2c2 {
+ pinctrl-0 = <&i2c2_pins>;
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+
+ status = "okay";
+};
+
+&i2c3 {
+ pinctrl-0 = <&i2c3_pins>;
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+
+ status = "okay";
+};
+
+&i2c6 {
+ pinctrl-0 = <&i2c6_pins>;
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+
+ status = "okay";
+};
+
+&i2c7 {
+ pinctrl-0 = <&i2c7_pins>;
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+
+ status = "okay";
+};
+
+&i2c8 {
+ pinctrl-0 = <&i2c8_pins>;
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+
+ status = "okay";
+};
+
+&ostm0 {
+ status = "okay";
+};
+
+&ostm1 {
+ status = "okay";
+};
+
+&ostm2 {
+ status = "okay";
+};
+
+&ostm3 {
+ status = "okay";
+};
+
+&ostm4 {
+ status = "okay";
+};
+
+&ostm5 {
+ status = "okay";
+};
+
+&ostm6 {
+ status = "okay";
+};
+
+&ostm7 {
+ status = "okay";
+};
+
+&pinctrl {
+ i2c0_pins: i2c0 {
+ pinmux = <RZG2L_PORT_PINMUX(3, 0, 1)>, /* I2C0_SDA */
+ <RZG2L_PORT_PINMUX(3, 1, 1)>; /* I2C0_SCL */
+ };
+
+ i2c1_pins: i2c1 {
+ pinmux = <RZG2L_PORT_PINMUX(3, 2, 1)>, /* I2C1_SDA */
+ <RZG2L_PORT_PINMUX(3, 3, 1)>; /* I2C1_SCL */
+ };
+
+ i2c2_pins: i2c2 {
+ pinmux = <RZG2L_PORT_PINMUX(2, 0, 4)>, /* I2C2_SDA */
+ <RZG2L_PORT_PINMUX(2, 1, 4)>; /* I2C2_SCL */
+ };
+
+ i2c3_pins: i2c3 {
+ pinmux = <RZG2L_PORT_PINMUX(3, 6, 1)>, /* I2C3_SDA */
+ <RZG2L_PORT_PINMUX(3, 7, 1)>; /* I2C3_SCL */
+ };
+
+ i2c6_pins: i2c6 {
+ pinmux = <RZG2L_PORT_PINMUX(4, 4, 1)>, /* I2C6_SDA */
+ <RZG2L_PORT_PINMUX(4, 5, 1)>; /* I2C6_SCL */
+ };
+
+ i2c7_pins: i2c7 {
+ pinmux = <RZG2L_PORT_PINMUX(4, 6, 1)>, /* I2C7_SDA */
+ <RZG2L_PORT_PINMUX(4, 7, 1)>; /* I2C7_SCL */
+ };
+
+ i2c8_pins: i2c8 {
+ pinmux = <RZG2L_PORT_PINMUX(0, 6, 1)>, /* I2C8_SDA */
+ <RZG2L_PORT_PINMUX(0, 7, 1)>; /* I2C8_SCL */
+ };
+
+ scif_pins: scif {
+ pins = "SCIF_TXD", "SCIF_RXD";
+ renesas,output-impedance = <1>;
+ };
+
+ sd1-pwr-en-hog {
+ gpio-hog;
+ gpios = <RZG2L_GPIO(10, 3) GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "sd1_pwr_en";
+ };
+
+ sdhi1_pins: sd1 {
+ sd1_dat_cmd {
+ pins = "SD1DAT0", "SD1DAT1", "SD1DAT2", "SD1DAT3", "SD1CMD";
+ input-enable;
+ renesas,output-impedance = <3>;
+ slew-rate = <0>;
+ };
+
+ sd1_clk {
+ pins = "SD1CLK";
+ renesas,output-impedance = <3>;
+ slew-rate = <0>;
+ };
+
+ sd1_cd {
+ pinmux = <RZG2L_PORT_PINMUX(9, 4, 14)>; /* SD1_CD */
+ };
+ };
+};
+
+&qextal_clk {
+ clock-frequency = <24000000>;
+};
+
+&rtxin_clk {
+ clock-frequency = <32768>;
+};
+
+&scif {
+ pinctrl-0 = <&scif_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&sdhi1 {
+ pinctrl-0 = <&sdhi1_pins>;
+ pinctrl-1 = <&sdhi1_pins>;
+ pinctrl-names = "default", "state_uhs";
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&vqmmc_sdhi1>;
+ bus-width = <4>;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ status = "okay";
+};
+
+&wdt1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi b/arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi
index b34855956ae0..63fa5cf1061b 100644
--- a/arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi
@@ -131,9 +131,6 @@
&phyrst {
status = "okay";
- usb0_vbus_otg: regulator-vbus {
- regulator-name = "vbus";
- };
};
&scif0 {
diff --git a/arch/arm64/boot/dts/renesas/rz-smarc-du-adv7513.dtsi b/arch/arm64/boot/dts/renesas/rz-smarc-du-adv7513.dtsi
new file mode 100644
index 000000000000..36707576030d
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/rz-smarc-du-adv7513.dtsi
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common Device Tree for the RZ/G2UL SMARC EVK (and alike EVKs) with
+ * ADV7513 transmitter connected to DU enabled.
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+&{/} {
+ hdmi-out {
+ compatible = "hdmi-connector";
+ type = "d";
+
+ port {
+ hdmi_con_out: endpoint {
+ remote-endpoint = <&adv7513_out>;
+ };
+ };
+ };
+};
+
+&du {
+ pinctrl-0 = <&du_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ ports {
+ port@0 {
+ du_out_rgb: endpoint {
+ remote-endpoint = <&adv7513_in>;
+ };
+ };
+ };
+};
+
+&ADV7513_PARENT_I2C {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adv7513: adv7513@39 {
+ compatible = "adi,adv7513";
+ reg = <0x39>;
+
+ adi,input-depth = <8>;
+ adi,input-colorspace = "rgb";
+ adi,input-clock = "1x";
+
+ avdd-supply = <&reg_1p8v>;
+ dvdd-supply = <&reg_1p8v>;
+ pvdd-supply = <&reg_1p8v>;
+ dvdd-3v-supply = <&reg_3p3v>;
+ bgvdd-supply = <&reg_1p8v>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ adv7513_in: endpoint {
+ remote-endpoint = <&du_out_rgb>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ adv7513_out: endpoint {
+ remote-endpoint = <&hdmi_con_out>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi
index 18c526c7a4cf..e9f244c33d55 100644
--- a/arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi
@@ -143,6 +143,12 @@
<RZG2L_PORT_PINMUX(45, 3, 1)>; /* RXD */
};
+ ssi1_pins: ssi1 {
+ pinmux = <RZG2L_PORT_PINMUX(46, 0, 1)>, /* BCK */
+ <RZG2L_PORT_PINMUX(46, 1, 1)>, /* RCK */
+ <RZG2L_PORT_PINMUX(46, 2, 1)>; /* TXD */
+ };
+
usb0_pins: usb0 {
pinmux = <RZG2L_PORT_PINMUX(4, 0, 1)>, /* VBUS */
<RZG2L_PORT_PINMUX(5, 0, 1)>, /* OVC */
diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
index 4409c47239b9..83f5642d0d35 100644
--- a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
@@ -180,41 +180,63 @@
};
eth0_pins: eth0 {
- pinmux = <RZG2L_PORT_PINMUX(28, 1, 1)>, /* ET0_LINKSTA */
- <RZG2L_PORT_PINMUX(27, 1, 1)>, /* ET0_MDC */
- <RZG2L_PORT_PINMUX(28, 0, 1)>, /* ET0_MDIO */
- <RZG2L_PORT_PINMUX(20, 0, 1)>, /* ET0_TXC */
- <RZG2L_PORT_PINMUX(20, 1, 1)>, /* ET0_TX_CTL */
- <RZG2L_PORT_PINMUX(20, 2, 1)>, /* ET0_TXD0 */
- <RZG2L_PORT_PINMUX(21, 0, 1)>, /* ET0_TXD1 */
- <RZG2L_PORT_PINMUX(21, 1, 1)>, /* ET0_TXD2 */
- <RZG2L_PORT_PINMUX(22, 0, 1)>, /* ET0_TXD3 */
- <RZG2L_PORT_PINMUX(24, 0, 1)>, /* ET0_RXC */
- <RZG2L_PORT_PINMUX(24, 1, 1)>, /* ET0_RX_CTL */
- <RZG2L_PORT_PINMUX(25, 0, 1)>, /* ET0_RXD0 */
- <RZG2L_PORT_PINMUX(25, 1, 1)>, /* ET0_RXD1 */
- <RZG2L_PORT_PINMUX(26, 0, 1)>, /* ET0_RXD2 */
- <RZG2L_PORT_PINMUX(26, 1, 1)>, /* ET0_RXD3 */
- <RZG2L_PORT_PINMUX(1, 0, 1)>; /* IRQ2 */
+ txc {
+ pinmux = <RZG2L_PORT_PINMUX(20, 0, 1)>; /* ET0_TXC */
+ power-source = <1800>;
+ output-enable;
+ };
+
+ mux {
+ pinmux = <RZG2L_PORT_PINMUX(28, 1, 1)>, /* ET0_LINKSTA */
+ <RZG2L_PORT_PINMUX(27, 1, 1)>, /* ET0_MDC */
+ <RZG2L_PORT_PINMUX(28, 0, 1)>, /* ET0_MDIO */
+ <RZG2L_PORT_PINMUX(20, 1, 1)>, /* ET0_TX_CTL */
+ <RZG2L_PORT_PINMUX(20, 2, 1)>, /* ET0_TXD0 */
+ <RZG2L_PORT_PINMUX(21, 0, 1)>, /* ET0_TXD1 */
+ <RZG2L_PORT_PINMUX(21, 1, 1)>, /* ET0_TXD2 */
+ <RZG2L_PORT_PINMUX(22, 0, 1)>, /* ET0_TXD3 */
+ <RZG2L_PORT_PINMUX(24, 0, 1)>, /* ET0_RXC */
+ <RZG2L_PORT_PINMUX(24, 1, 1)>, /* ET0_RX_CTL */
+ <RZG2L_PORT_PINMUX(25, 0, 1)>, /* ET0_RXD0 */
+ <RZG2L_PORT_PINMUX(25, 1, 1)>, /* ET0_RXD1 */
+ <RZG2L_PORT_PINMUX(26, 0, 1)>, /* ET0_RXD2 */
+ <RZG2L_PORT_PINMUX(26, 1, 1)>; /* ET0_RXD3 */
+ power-source = <1800>;
+ };
+
+ irq {
+ pinmux = <RZG2L_PORT_PINMUX(1, 0, 1)>; /* IRQ2 */
+ };
};
eth1_pins: eth1 {
- pinmux = <RZG2L_PORT_PINMUX(37, 2, 1)>, /* ET1_LINKSTA */
- <RZG2L_PORT_PINMUX(37, 0, 1)>, /* ET1_MDC */
- <RZG2L_PORT_PINMUX(37, 1, 1)>, /* ET1_MDIO */
- <RZG2L_PORT_PINMUX(29, 0, 1)>, /* ET1_TXC */
- <RZG2L_PORT_PINMUX(29, 1, 1)>, /* ET1_TX_CTL */
- <RZG2L_PORT_PINMUX(30, 0, 1)>, /* ET1_TXD0 */
- <RZG2L_PORT_PINMUX(30, 1, 1)>, /* ET1_TXD1 */
- <RZG2L_PORT_PINMUX(31, 0, 1)>, /* ET1_TXD2 */
- <RZG2L_PORT_PINMUX(31, 1, 1)>, /* ET1_TXD3 */
- <RZG2L_PORT_PINMUX(33, 1, 1)>, /* ET1_RXC */
- <RZG2L_PORT_PINMUX(34, 0, 1)>, /* ET1_RX_CTL */
- <RZG2L_PORT_PINMUX(34, 1, 1)>, /* ET1_RXD0 */
- <RZG2L_PORT_PINMUX(35, 0, 1)>, /* ET1_RXD1 */
- <RZG2L_PORT_PINMUX(35, 1, 1)>, /* ET1_RXD2 */
- <RZG2L_PORT_PINMUX(36, 0, 1)>, /* ET1_RXD3 */
- <RZG2L_PORT_PINMUX(1, 1, 1)>; /* IRQ3 */
+ txc {
+ pinmux = <RZG2L_PORT_PINMUX(29, 0, 1)>; /* ET1_TXC */
+ power-source = <1800>;
+ output-enable;
+ };
+
+ mux {
+ pinmux = <RZG2L_PORT_PINMUX(37, 2, 1)>, /* ET1_LINKSTA */
+ <RZG2L_PORT_PINMUX(37, 0, 1)>, /* ET1_MDC */
+ <RZG2L_PORT_PINMUX(37, 1, 1)>, /* ET1_MDIO */
+ <RZG2L_PORT_PINMUX(29, 1, 1)>, /* ET1_TX_CTL */
+ <RZG2L_PORT_PINMUX(30, 0, 1)>, /* ET1_TXD0 */
+ <RZG2L_PORT_PINMUX(30, 1, 1)>, /* ET1_TXD1 */
+ <RZG2L_PORT_PINMUX(31, 0, 1)>, /* ET1_TXD2 */
+ <RZG2L_PORT_PINMUX(31, 1, 1)>, /* ET1_TXD3 */
+ <RZG2L_PORT_PINMUX(33, 1, 1)>, /* ET1_RXC */
+ <RZG2L_PORT_PINMUX(34, 0, 1)>, /* ET1_RX_CTL */
+ <RZG2L_PORT_PINMUX(34, 1, 1)>, /* ET1_RXD0 */
+ <RZG2L_PORT_PINMUX(35, 0, 1)>, /* ET1_RXD1 */
+ <RZG2L_PORT_PINMUX(35, 1, 1)>, /* ET1_RXD2 */
+ <RZG2L_PORT_PINMUX(36, 0, 1)>; /* ET1_RXD3 */
+ power-source = <1800>;
+ };
+
+ irq {
+ pinmux = <RZG2L_PORT_PINMUX(1, 1, 1)>; /* IRQ3 */
+ };
};
gpio-sd0-pwr-en-hog {
diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi
index 887dffe14910..ee3d96fdb616 100644
--- a/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi
@@ -30,6 +30,12 @@
};
};
};
+
+ sound_card {
+ compatible = "audio-graph-card";
+ label = "HDMI-Audio";
+ dais = <&i2s2_port>;
+ };
};
&cpu_dai {
@@ -88,6 +94,13 @@
remote-endpoint = <&hdmi_con_out>;
};
};
+
+ port@2 {
+ reg = <2>;
+ codec_endpoint: endpoint {
+ remote-endpoint = <&i2s2_cpu_endpoint>;
+ };
+ };
};
};
};
@@ -170,6 +183,23 @@
status = "okay";
};
+&ssi1 {
+ pinctrl-0 = <&ssi1_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ i2s2_port: port {
+ i2s2_cpu_endpoint: endpoint {
+ remote-endpoint = <&codec_endpoint>;
+ dai-format = "i2s";
+
+ bitclock-master = <&i2s2_cpu_endpoint>;
+ frame-master = <&i2s2_cpu_endpoint>;
+ };
+ };
+};
+
&vccq_sdhi1 {
gpios = <&pinctrl RZG2L_GPIO(39, 1) GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
index 5e4209d6fb42..b4ef5ea8a9e3 100644
--- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
@@ -128,22 +128,33 @@
&pinctrl {
eth0_pins: eth0 {
- pinmux = <RZG2L_PORT_PINMUX(28, 1, 1)>, /* ET0_LINKSTA */
- <RZG2L_PORT_PINMUX(27, 1, 1)>, /* ET0_MDC */
- <RZG2L_PORT_PINMUX(28, 0, 1)>, /* ET0_MDIO */
- <RZG2L_PORT_PINMUX(20, 0, 1)>, /* ET0_TXC */
- <RZG2L_PORT_PINMUX(20, 1, 1)>, /* ET0_TX_CTL */
- <RZG2L_PORT_PINMUX(20, 2, 1)>, /* ET0_TXD0 */
- <RZG2L_PORT_PINMUX(21, 0, 1)>, /* ET0_TXD1 */
- <RZG2L_PORT_PINMUX(21, 1, 1)>, /* ET0_TXD2 */
- <RZG2L_PORT_PINMUX(22, 0, 1)>, /* ET0_TXD3 */
- <RZG2L_PORT_PINMUX(24, 0, 1)>, /* ET0_RXC */
- <RZG2L_PORT_PINMUX(24, 1, 1)>, /* ET0_RX_CTL */
- <RZG2L_PORT_PINMUX(25, 0, 1)>, /* ET0_RXD0 */
- <RZG2L_PORT_PINMUX(25, 1, 1)>, /* ET0_RXD1 */
- <RZG2L_PORT_PINMUX(26, 0, 1)>, /* ET0_RXD2 */
- <RZG2L_PORT_PINMUX(26, 1, 1)>, /* ET0_RXD3 */
- <RZG2L_PORT_PINMUX(0, 0, 1)>; /* IRQ0 */
+ txc {
+ pinmux = <RZG2L_PORT_PINMUX(20, 0, 1)>; /* ET0_TXC */
+ power-source = <1800>;
+ output-enable;
+ };
+
+ mux {
+ pinmux = <RZG2L_PORT_PINMUX(28, 1, 1)>, /* ET0_LINKSTA */
+ <RZG2L_PORT_PINMUX(27, 1, 1)>, /* ET0_MDC */
+ <RZG2L_PORT_PINMUX(28, 0, 1)>, /* ET0_MDIO */
+ <RZG2L_PORT_PINMUX(20, 1, 1)>, /* ET0_TX_CTL */
+ <RZG2L_PORT_PINMUX(20, 2, 1)>, /* ET0_TXD0 */
+ <RZG2L_PORT_PINMUX(21, 0, 1)>, /* ET0_TXD1 */
+ <RZG2L_PORT_PINMUX(21, 1, 1)>, /* ET0_TXD2 */
+ <RZG2L_PORT_PINMUX(22, 0, 1)>, /* ET0_TXD3 */
+ <RZG2L_PORT_PINMUX(24, 0, 1)>, /* ET0_RXC */
+ <RZG2L_PORT_PINMUX(24, 1, 1)>, /* ET0_RX_CTL */
+ <RZG2L_PORT_PINMUX(25, 0, 1)>, /* ET0_RXD0 */
+ <RZG2L_PORT_PINMUX(25, 1, 1)>, /* ET0_RXD1 */
+ <RZG2L_PORT_PINMUX(26, 0, 1)>, /* ET0_RXD2 */
+ <RZG2L_PORT_PINMUX(26, 1, 1)>; /* ET0_RXD3 */
+ power-source = <1800>;
+ };
+
+ irq {
+ pinmux = <RZG2L_PORT_PINMUX(0, 0, 1)>; /* IRQ0 */
+ };
};
gpio-sd0-pwr-en-hog {
diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi
index f21508640b6e..377849cbb462 100644
--- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi
@@ -33,6 +33,16 @@
};
};
};
+
+#if (SW_I2S0_I2S1)
+ /delete-node/ sound;
+
+ sound_card {
+ compatible = "audio-graph-card";
+ label = "HDMI-Audio";
+ dais = <&i2s2_port>;
+ };
+#endif
};
#if (SW_SCIF_CAN || SW_RSPI_CAN)
@@ -48,9 +58,11 @@
};
#endif
+#if (!SW_I2S0_I2S1)
&cpu_dai {
sound-dai = <&ssi0>;
};
+#endif
&dsi {
status = "okay";
@@ -104,6 +116,15 @@
remote-endpoint = <&hdmi_con_out>;
};
};
+
+#if (SW_I2S0_I2S1)
+ port@2 {
+ reg = <2>;
+ codec_endpoint: endpoint {
+ remote-endpoint = <&i2s2_cpu_endpoint>;
+ };
+ };
+#endif
};
};
};
@@ -177,6 +198,18 @@
pinctrl-names = "default";
status = "okay";
+
+#if (SW_I2S0_I2S1)
+ i2s2_port: port {
+ i2s2_cpu_endpoint: endpoint {
+ remote-endpoint = <&codec_endpoint>;
+ dai-format = "i2s";
+
+ bitclock-master = <&i2s2_cpu_endpoint>;
+ frame-master = <&i2s2_cpu_endpoint>;
+ };
+ };
+#endif
};
#if (SW_RSPI_CAN)
diff --git a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
index 97cdad2a12e2..79443fb3f581 100644
--- a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
@@ -142,41 +142,63 @@
};
eth0_pins: eth0 {
- pinmux = <RZG2L_PORT_PINMUX(4, 5, 1)>, /* ET0_LINKSTA */
- <RZG2L_PORT_PINMUX(4, 3, 1)>, /* ET0_MDC */
- <RZG2L_PORT_PINMUX(4, 4, 1)>, /* ET0_MDIO */
- <RZG2L_PORT_PINMUX(1, 0, 1)>, /* ET0_TXC */
- <RZG2L_PORT_PINMUX(1, 1, 1)>, /* ET0_TX_CTL */
- <RZG2L_PORT_PINMUX(1, 2, 1)>, /* ET0_TXD0 */
- <RZG2L_PORT_PINMUX(1, 3, 1)>, /* ET0_TXD1 */
- <RZG2L_PORT_PINMUX(1, 4, 1)>, /* ET0_TXD2 */
- <RZG2L_PORT_PINMUX(2, 0, 1)>, /* ET0_TXD3 */
- <RZG2L_PORT_PINMUX(3, 0, 1)>, /* ET0_RXC */
- <RZG2L_PORT_PINMUX(3, 1, 1)>, /* ET0_RX_CTL */
- <RZG2L_PORT_PINMUX(3, 2, 1)>, /* ET0_RXD0 */
- <RZG2L_PORT_PINMUX(3, 3, 1)>, /* ET0_RXD1 */
- <RZG2L_PORT_PINMUX(4, 0, 1)>, /* ET0_RXD2 */
- <RZG2L_PORT_PINMUX(4, 1, 1)>, /* ET0_RXD3 */
- <RZG2L_PORT_PINMUX(5, 1, 7)>; /* IRQ2 */
+ txc {
+ pinmux = <RZG2L_PORT_PINMUX(1, 0, 1)>; /* ET0_TXC */
+ power-source = <1800>;
+ output-enable;
+ };
+
+ mux {
+ pinmux = <RZG2L_PORT_PINMUX(4, 5, 1)>, /* ET0_LINKSTA */
+ <RZG2L_PORT_PINMUX(4, 3, 1)>, /* ET0_MDC */
+ <RZG2L_PORT_PINMUX(4, 4, 1)>, /* ET0_MDIO */
+ <RZG2L_PORT_PINMUX(1, 1, 1)>, /* ET0_TX_CTL */
+ <RZG2L_PORT_PINMUX(1, 2, 1)>, /* ET0_TXD0 */
+ <RZG2L_PORT_PINMUX(1, 3, 1)>, /* ET0_TXD1 */
+ <RZG2L_PORT_PINMUX(1, 4, 1)>, /* ET0_TXD2 */
+ <RZG2L_PORT_PINMUX(2, 0, 1)>, /* ET0_TXD3 */
+ <RZG2L_PORT_PINMUX(3, 0, 1)>, /* ET0_RXC */
+ <RZG2L_PORT_PINMUX(3, 1, 1)>, /* ET0_RX_CTL */
+ <RZG2L_PORT_PINMUX(3, 2, 1)>, /* ET0_RXD0 */
+ <RZG2L_PORT_PINMUX(3, 3, 1)>, /* ET0_RXD1 */
+ <RZG2L_PORT_PINMUX(4, 0, 1)>, /* ET0_RXD2 */
+ <RZG2L_PORT_PINMUX(4, 1, 1)>; /* ET0_RXD3 */
+ power-source = <1800>;
+ };
+
+ irq {
+ pinmux = <RZG2L_PORT_PINMUX(5, 1, 7)>; /* IRQ2 */
+ };
};
eth1_pins: eth1 {
- pinmux = <RZG2L_PORT_PINMUX(10, 4, 1)>, /* ET1_LINKSTA */
- <RZG2L_PORT_PINMUX(10, 2, 1)>, /* ET1_MDC */
- <RZG2L_PORT_PINMUX(10, 3, 1)>, /* ET1_MDIO */
- <RZG2L_PORT_PINMUX(7, 0, 1)>, /* ET1_TXC */
- <RZG2L_PORT_PINMUX(7, 1, 1)>, /* ET1_TX_CTL */
- <RZG2L_PORT_PINMUX(7, 2, 1)>, /* ET1_TXD0 */
- <RZG2L_PORT_PINMUX(7, 3, 1)>, /* ET1_TXD1 */
- <RZG2L_PORT_PINMUX(7, 4, 1)>, /* ET1_TXD2 */
- <RZG2L_PORT_PINMUX(8, 0, 1)>, /* ET1_TXD3 */
- <RZG2L_PORT_PINMUX(8, 4, 1)>, /* ET1_RXC */
- <RZG2L_PORT_PINMUX(9, 0, 1)>, /* ET1_RX_CTL */
- <RZG2L_PORT_PINMUX(9, 1, 1)>, /* ET1_RXD0 */
- <RZG2L_PORT_PINMUX(9, 2, 1)>, /* ET1_RXD1 */
- <RZG2L_PORT_PINMUX(9, 3, 1)>, /* ET1_RXD2 */
- <RZG2L_PORT_PINMUX(10, 0, 1)>, /* ET1_RXD3 */
- <RZG2L_PORT_PINMUX(18, 5, 1)>; /* IRQ7 */
+ txc {
+ pinmux = <RZG2L_PORT_PINMUX(7, 0, 1)>; /* ET1_TXC */
+ power-source = <1800>;
+ output-enable;
+ };
+
+ mux {
+ pinmux = <RZG2L_PORT_PINMUX(10, 4, 1)>, /* ET1_LINKSTA */
+ <RZG2L_PORT_PINMUX(10, 2, 1)>, /* ET1_MDC */
+ <RZG2L_PORT_PINMUX(10, 3, 1)>, /* ET1_MDIO */
+ <RZG2L_PORT_PINMUX(7, 1, 1)>, /* ET1_TX_CTL */
+ <RZG2L_PORT_PINMUX(7, 2, 1)>, /* ET1_TXD0 */
+ <RZG2L_PORT_PINMUX(7, 3, 1)>, /* ET1_TXD1 */
+ <RZG2L_PORT_PINMUX(7, 4, 1)>, /* ET1_TXD2 */
+ <RZG2L_PORT_PINMUX(8, 0, 1)>, /* ET1_TXD3 */
+ <RZG2L_PORT_PINMUX(8, 4, 1)>, /* ET1_RXC */
+ <RZG2L_PORT_PINMUX(9, 0, 1)>, /* ET1_RX_CTL */
+ <RZG2L_PORT_PINMUX(9, 1, 1)>, /* ET1_RXD0 */
+ <RZG2L_PORT_PINMUX(9, 2, 1)>, /* ET1_RXD1 */
+ <RZG2L_PORT_PINMUX(9, 3, 1)>, /* ET1_RXD2 */
+ <RZG2L_PORT_PINMUX(10, 0, 1)>; /* ET1_RXD3 */
+ power-source = <1800>;
+ };
+
+ irq {
+ pinmux = <RZG2L_PORT_PINMUX(18, 5, 1)>; /* IRQ7 */
+ };
};
sdhi0_emmc_pins: sd0emmc {
diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
index 8a3d302f1535..21bfa4e03972 100644
--- a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
@@ -32,6 +32,7 @@
compatible = "renesas,rzg3s-smarcm", "renesas,r9a08g045s33", "renesas,r9a08g045";
aliases {
+ i2c1 = &i2c1;
mmc0 = &sdhi0;
#if SW_CONFIG3 == SW_OFF
mmc2 = &sdhi2;
@@ -150,6 +151,10 @@
clock-frequency = <24000000>;
};
+&i2c1 {
+ status = "okay";
+};
+
#if SW_CONFIG2 == SW_ON
/* SD0 slot */
&sdhi0 {
diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
index deb2ad37bb2e..7945d44e6ee1 100644
--- a/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
@@ -11,6 +11,7 @@
/ {
aliases {
+ i2c0 = &i2c0;
serial0 = &scif0;
mmc1 = &sdhi1;
};
@@ -66,6 +67,12 @@
};
};
+&i2c0 {
+ status = "okay";
+
+ clock-frequency = <1000000>;
+};
+
&pinctrl {
key-1-gpio-hog {
gpio-hog;
diff --git a/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi
index 80496fb3d476..3845b413bd24 100644
--- a/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi
@@ -117,6 +117,12 @@
};
};
+ pcie_clk: clk-9fgv0841-pci {
+ compatible = "fixed-clock";
+ clock-frequency = <100000000>;
+ #clock-cells = <0>;
+ };
+
reg_1p2v: regulator-1p2v {
compatible = "regulator-fixed";
regulator-name = "fixed-1.2V";
@@ -288,6 +294,18 @@
status = "okay";
};
+&pcie0_clkref {
+ compatible = "gpio-gate-clock";
+ clocks = <&pcie_clk>;
+ enable-gpios = <&gpio4 21 GPIO_ACTIVE_LOW>;
+ /delete-property/ clock-frequency;
+};
+
+&pciec0 {
+ reset-gpio = <&io_expander_a 0 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
&pfc {
pinctrl-0 = <&scif_clk_pins>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index fda1b980eb4b..09423070c992 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -3,6 +3,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-engicam-px30-core-ctouch2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-engicam-px30-core-ctouch2-of10.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-engicam-px30-core-edimm2.2.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-firefly-jd4-core-mb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-ringneck-haikou.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-roc-cc.dtb
@@ -20,6 +21,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-nanopi-r2c.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-nanopi-r2c-plus.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-nanopi-r2s.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-nanopi-r2s-plus.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-orangepi-r1-plus.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-orangepi-r1-plus-lts.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-rock64.dtb
@@ -81,6 +83,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg353ps.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg353v.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg353vs.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg503.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-odroid-m1s.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-orangepi-3b-v1.1.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-orangepi-3b-v2.1.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-pinenote-v1.1.dtb
@@ -102,6 +105,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-soquartz-blade.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-soquartz-cm4.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-soquartz-model-a.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-box-demo.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-lckfb-tspi.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-lubancat-1.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-bpi-r2-pro.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-evb1-v10.dtb
@@ -118,9 +122,11 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-roc-pc.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-rock-3a.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-rock-3b.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5-display-vz.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5-io-expander.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-armsom-sige7.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-coolpi-cm5-evb.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-coolpi-cm5-genbook.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6a-io.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6a-wifi.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6b-io.dtb
@@ -128,6 +134,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-evb1-v10.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-friendlyelec-cm3588-nas.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-jaguar.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-nanopc-t6.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-nanopc-t6-lts.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-ok3588-c.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-orangepi-5-plus.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-quartzpro64.dtb
@@ -139,9 +146,11 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-tiger-haikou.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-toybrick-x0.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-turing-rk1.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-coolpi-4b.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-gameforce-ace.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-indiedroid-nova.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-khadas-edge2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-nanopi-r6s.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-nanopi-r6c.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-rock-5a.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-odroid-m2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-orangepi-5.dtb
diff --git a/arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core-mb.dts b/arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core-mb.dts
new file mode 100644
index 000000000000..d03e6aef54dc
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core-mb.dts
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
+ */
+
+/dts-v1/;
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include "px30-firefly-jd4-core.dtsi"
+
+/ {
+ compatible = "firefly,px30-jd4-core-mb", "firefly,px30-jd4-core",
+ "rockchip,px30";
+ model = "Firefly Core-PX30-JD4 on MB-JD4-PX30 baseboard";
+
+ aliases {
+ ethernet0 = &gmac;
+ mmc0 = &sdmmc;
+ mmc1 = &sdio;
+ mmc2 = &emmc;
+ };
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ dc_12v: dc-12v-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "dc_12v";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 2>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1500000>;
+ poll-interval = <100>;
+
+ button-recovery {
+ label = "Recovery";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <18000>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blue_led>, <&green_led>;
+
+ blue-led {
+ color = <LED_COLOR_ID_BLUE>;
+ default-state = "on";
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio1 RK_PB5 GPIO_ACTIVE_HIGH>;
+ label = "px30-mb-jd4:blue:work";
+ linux,default-trigger = "heartbeat";
+ };
+
+ green-led {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio1 RK_PB4 GPIO_ACTIVE_HIGH>;
+ label = "px30-mb-jd4:blue:diy";
+ linux,default-trigger = "default-on";
+ };
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_enable_h>;
+
+ /*
+ * On the module itself this is one of these (depending
+ * on the actual card populated):
+ * - SDIO_RESET_L_WL_REG_ON
+ * - PDN (power down when low)
+ */
+ reset-gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>; /* GPIO3_A4 */
+ };
+
+ vcc5v0_baseboard: vcc5v0-baseboard-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_baseboard";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&dc_12v>;
+ };
+};
+
+&gmac {
+ clock_in_out = "output";
+ phy-supply = <&vcc_rmii>;
+ snps,reset-gpio = <&gpio2 13 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 50000 50000>;
+ status = "okay";
+};
+
+&pinctrl {
+ leds {
+ blue_led: blue-led {
+ rockchip,pins = <1 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ green_led: green-led {
+ rockchip,pins = <1 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sdio-pwrseq {
+ wifi_enable_h: wifi-enable-h {
+ rockchip,pins =
+ <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ card-detect-delay = <800>;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&sdio {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ keep-power-in-suspend;
+ non-removable;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ sd-uhs-sdr104;
+ status = "okay";
+};
+
+&u2phy {
+ status = "okay";
+
+ u2phy_host: host-port {
+ status = "okay";
+ };
+
+ u2phy_otg: otg-port {
+ status = "okay";
+ };
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2m1_xfer>;
+ status = "okay";
+};
+
+&usb20_otg {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core.dtsi b/arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core.dtsi
new file mode 100644
index 000000000000..f18d7eb9a9c7
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core.dtsi
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "px30.dtsi"
+
+/ {
+ compatible = "firefly,px30-jd4-core", "rockchip,px30";
+
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ pinctrl-0 = <&emmc_reset>;
+ pinctrl-names = "default";
+ reset-gpios = <&gpio1 RK_PB3 GPIO_ACTIVE_HIGH>;
+ };
+
+ vcc5v0_sys: vcc5v0-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_baseboard>;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&emmc {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ mmc-hs200-1_8v;
+ non-removable;
+ mmc-pwrseq = <&emmc_pwrseq>;
+ vmmc-supply = <&vcc_3v0>;
+ vqmmc-supply = <&vccio_flash>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_log>;
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ rk809: pmic@20 {
+ compatible = "rockchip,rk809";
+ reg = <0x20>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int>;
+ rockchip,system-power-controller;
+ wakeup-source;
+ #clock-cells = <0>;
+ clock-output-names = "xin32k";
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc3v3_sys>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+
+ regulators {
+ vdd_log: DCDC_REG1 {
+ regulator-name = "vdd_log";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vdd_arm: DCDC_REG2 {
+ regulator-name = "vdd_arm";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_3v0: vcc_rmii: DCDC_REG4 {
+ regulator-name = "vcc_3v0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcc3v3_sys: DCDC_REG5 {
+ regulator-name = "vcc3v3_sys";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_1v0: LDO_REG1 {
+ regulator-name = "vcc_1v0";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_1v8: vccio_flash: vccio_sdio: LDO_REG2 {
+ regulator-name = "vcc_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_1v0: LDO_REG3 {
+ regulator-name = "vdd_1v0";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc3v0_pmu: LDO_REG4 {
+ regulator-name = "vcc3v0_pmu";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_sd: LDO_REG6 {
+ regulator-name = "vcc_sd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc2v8_dvp: LDO_REG7 {
+ regulator-name = "vcc2v8_dvp";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <2800000>;
+ };
+ };
+
+ vcc1v8_dvp: LDO_REG8 {
+ regulator-name = "vcc1v8_dvp";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc1v5_dvp: LDO_REG9 {
+ regulator-name = "vcc1v5_dvp";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1500000>;
+ };
+ };
+
+ vcc3v3_lcd: SWITCH_REG1 {
+ regulator-name = "vcc3v3_lcd";
+ regulator-boot-on;
+ };
+
+ vcc5v0_host: SWITCH_REG2 {
+ regulator-name = "vcc5v0_host";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
+ };
+};
+
+&io_domains {
+ vccio1-supply = <&vccio_sdio>;
+ vccio2-supply = <&vccio_sd>;
+ vccio3-supply = <&vcc_3v0>;
+ vccio4-supply = <&vcc3v0_pmu>;
+ vccio5-supply = <&vcc_3v0>;
+ vccio6-supply = <&vccio_flash>;
+ status = "okay";
+};
+
+&pinctrl {
+ emmc {
+ emmc_reset: emmc-reset {
+ rockchip,pins = <1 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int: pmic_int {
+ rockchip,pins =
+ <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc3v0_pmu>;
+ pmuio2-supply = <&vcc3v0_pmu>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <1>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s-plus.dts b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s-plus.dts
new file mode 100644
index 000000000000..cb81ba3f23ff
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s-plus.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * (C) Copyright 2018 FriendlyElec Computer Tech. Co., Ltd.
+ * (http://www.friendlyarm.com)
+ *
+ * (C) Copyright 2016 Rockchip Electronics Co., Ltd
+ */
+
+/dts-v1/;
+#include "rk3328-nanopi-r2s.dts"
+
+/ {
+ compatible = "friendlyarm,nanopi-r2s-plus", "rockchip,rk3328";
+ model = "FriendlyElec NanoPi R2S Plus";
+
+ aliases {
+ mmc1 = &emmc;
+ };
+};
+
+&emmc {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ disable-wp;
+ mmc-hs200-1_8v;
+ non-removable;
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
+ supports-emmc;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
index a608a219543e..3e08e2fd0a78 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
@@ -387,7 +387,7 @@
pmic {
pmic_int_l: pmic-int-l {
- rockchip,pins = <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index b01efd6d042c..16b4faa22e4f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -910,6 +910,8 @@
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
max-frequency = <150000000>;
+ resets = <&cru SRST_MMC0>;
+ reset-names = "reset";
status = "disabled";
};
@@ -922,6 +924,8 @@
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
max-frequency = <150000000>;
+ resets = <&cru SRST_SDIO>;
+ reset-names = "reset";
status = "disabled";
};
@@ -934,6 +938,8 @@
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
max-frequency = <150000000>;
+ resets = <&cru SRST_EMMC>;
+ reset-names = "reset";
status = "disabled";
};
@@ -1036,6 +1042,20 @@
status = "disabled";
};
+ sdmmc_ext: mmc@ff5f0000 {
+ compatible = "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xff5f0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_SDMMC_EXT>, <&cru SCLK_SDMMC_EXT>,
+ <&cru SCLK_SDMMC_EXT_DRV>, <&cru SCLK_SDMMC_EXT_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
+ max-frequency = <150000000>;
+ resets = <&cru SRST_SDMMCEXT>;
+ reset-names = "reset";
+ status = "disabled";
+ };
+
usbdrd3: usb@ff600000 {
compatible = "rockchip,rk3328-dwc3", "snps,dwc3";
reg = <0x0 0xff600000 0x0 0x100000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-base.dtsi
new file mode 100644
index 000000000000..9d5f5b083e3c
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-base.dtsi
@@ -0,0 +1,3019 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ */
+
+#include <dt-bindings/clock/rk3399-cru.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/power/rk3399-power.h>
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+ compatible = "rockchip,rk3399";
+
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ gpio2 = &gpio2;
+ gpio3 = &gpio3;
+ gpio4 = &gpio4;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ i2c6 = &i2c6;
+ i2c7 = &i2c7;
+ i2c8 = &i2c8;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ serial4 = &uart4;
+ spi0 = &spi0;
+ spi1 = &spi1;
+ spi2 = &spi2;
+ spi3 = &spi3;
+ spi4 = &spi4;
+ spi5 = &spi5;
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 { /* Cortex-A53 */
+ core0 {
+ cpu = <&cpu_l0>;
+ };
+ core1 {
+ cpu = <&cpu_l1>;
+ };
+ core2 {
+ cpu = <&cpu_l2>;
+ };
+ core3 {
+ cpu = <&cpu_l3>;
+ };
+ };
+
+ cluster1 { /* Cortex-A72 */
+ core0 {
+ cpu = <&cpu_b0>;
+ };
+ core1 {
+ cpu = <&cpu_b1>;
+ };
+ };
+ };
+
+ cpu_l0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <485>;
+ clocks = <&cru ARMCLKL>;
+ #cooling-cells = <2>; /* min followed by max */
+ dynamic-power-coefficient = <100>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l>;
+ };
+
+ cpu_l1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <485>;
+ clocks = <&cru ARMCLKL>;
+ #cooling-cells = <2>; /* min followed by max */
+ dynamic-power-coefficient = <100>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l>;
+ };
+
+ cpu_l2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <485>;
+ clocks = <&cru ARMCLKL>;
+ #cooling-cells = <2>; /* min followed by max */
+ dynamic-power-coefficient = <100>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l>;
+ };
+
+ cpu_l3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <485>;
+ clocks = <&cru ARMCLKL>;
+ #cooling-cells = <2>; /* min followed by max */
+ dynamic-power-coefficient = <100>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_cache_l>;
+ };
+
+ cpu_b0: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ clocks = <&cru ARMCLKB>;
+ #cooling-cells = <2>; /* min followed by max */
+ dynamic-power-coefficient = <436>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ i-cache-size = <0xC000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_b>;
+
+ thermal-idle {
+ #cooling-cells = <2>;
+ duration-us = <10000>;
+ exit-latency-us = <500>;
+ };
+ };
+
+ cpu_b1: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ clocks = <&cru ARMCLKB>;
+ #cooling-cells = <2>; /* min followed by max */
+ dynamic-power-coefficient = <436>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ i-cache-size = <0xC000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_b>;
+
+ thermal-idle {
+ #cooling-cells = <2>;
+ duration-us = <10000>;
+ exit-latency-us = <500>;
+ };
+ };
+
+ l2_cache_l: l2-cache-cluster0 {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x80000>;
+ cache-line-size = <64>;
+ cache-sets = <512>;
+ };
+
+ l2_cache_b: l2-cache-cluster1 {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x100000>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ CPU_SLEEP: cpu-sleep {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x0010000>;
+ entry-latency-us = <120>;
+ exit-latency-us = <250>;
+ min-residency-us = <900>;
+ };
+
+ CLUSTER_SLEEP: cluster-sleep {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x1010000>;
+ entry-latency-us = <400>;
+ exit-latency-us = <500>;
+ min-residency-us = <2000>;
+ };
+ };
+ };
+
+ display-subsystem {
+ compatible = "rockchip,display-subsystem";
+ ports = <&vopl_out>, <&vopb_out>;
+ };
+
+ dmc: memory-controller {
+ compatible = "rockchip,rk3399-dmc";
+ rockchip,pmu = <&pmugrf>;
+ devfreq-events = <&dfi>;
+ clocks = <&cru SCLK_DDRC>;
+ clock-names = "dmc_clk";
+ status = "disabled";
+ };
+
+ pmu_a53 {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW &ppi_cluster0>;
+ };
+
+ pmu_a72 {
+ compatible = "arm,cortex-a72-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW &ppi_cluster1>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW 0>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW 0>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW 0>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW 0>;
+ arm,no-tick-in-suspend;
+ };
+
+ xin24m: xin24m {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "xin24m";
+ #clock-cells = <0>;
+ };
+
+ pcie0: pcie@f8000000 {
+ compatible = "rockchip,rk3399-pcie";
+ reg = <0x0 0xf8000000 0x0 0x2000000>,
+ <0x0 0xfd000000 0x0 0x1000000>;
+ reg-names = "axi-base", "apb-base";
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ aspm-no-l0s;
+ bus-range = <0x0 0x1f>;
+ clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>,
+ <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>;
+ clock-names = "aclk", "aclk-perf",
+ "hclk", "pm";
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "sys", "legacy", "client";
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie0_intc 0>,
+ <0 0 0 2 &pcie0_intc 1>,
+ <0 0 0 3 &pcie0_intc 2>,
+ <0 0 0 4 &pcie0_intc 3>;
+ max-link-speed = <1>;
+ msi-map = <0x0 &its 0x0 0x1000>;
+ phys = <&pcie_phy 0>, <&pcie_phy 1>,
+ <&pcie_phy 2>, <&pcie_phy 3>;
+ phy-names = "pcie-phy-0", "pcie-phy-1",
+ "pcie-phy-2", "pcie-phy-3";
+ ranges = <0x82000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x1e00000>,
+ <0x81000000 0x0 0xfbe00000 0x0 0xfbe00000 0x0 0x100000>;
+ resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
+ <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
+ <&cru SRST_A_PCIE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+ "pm", "pclk", "aclk";
+ status = "disabled";
+
+ pcie0_intc: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ pcie0_ep: pcie-ep@f8000000 {
+ compatible = "rockchip,rk3399-pcie-ep";
+ reg = <0x0 0xfd000000 0x0 0x1000000>,
+ <0x0 0xfa000000 0x0 0x2000000>;
+ reg-names = "apb-base", "mem-base";
+ clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>,
+ <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>;
+ clock-names = "aclk", "aclk-perf",
+ "hclk", "pm";
+ max-functions = /bits/ 8 <8>;
+ num-lanes = <4>;
+ resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
+ <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
+ <&cru SRST_A_PCIE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+ "pm", "pclk", "aclk";
+ phys = <&pcie_phy 0>, <&pcie_phy 1>,
+ <&pcie_phy 2>, <&pcie_phy 3>;
+ phy-names = "pcie-phy-0", "pcie-phy-1",
+ "pcie-phy-2", "pcie-phy-3";
+ rockchip,max-outbound-regions = <32>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_clkreqnb_cpm>;
+ status = "disabled";
+ };
+
+ gmac: ethernet@fe300000 {
+ compatible = "rockchip,rk3399-gmac";
+ reg = <0x0 0xfe300000 0x0 0x10000>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "macirq";
+ clocks = <&cru SCLK_MAC>, <&cru SCLK_MAC_RX>,
+ <&cru SCLK_MAC_TX>, <&cru SCLK_MACREF>,
+ <&cru SCLK_MACREF_OUT>, <&cru ACLK_GMAC>,
+ <&cru PCLK_GMAC>;
+ clock-names = "stmmaceth", "mac_clk_rx",
+ "mac_clk_tx", "clk_mac_ref",
+ "clk_mac_refout", "aclk_mac",
+ "pclk_mac";
+ power-domains = <&power RK3399_PD_GMAC>;
+ resets = <&cru SRST_A_GMAC>;
+ reset-names = "stmmaceth";
+ rockchip,grf = <&grf>;
+ snps,txpbl = <0x4>;
+ status = "disabled";
+ };
+
+ sdio0: mmc@fe310000 {
+ compatible = "rockchip,rk3399-dw-mshc",
+ "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xfe310000 0x0 0x4000>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH 0>;
+ max-frequency = <150000000>;
+ clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
+ <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
+ power-domains = <&power RK3399_PD_SDIOAUDIO>;
+ resets = <&cru SRST_SDIO0>;
+ reset-names = "reset";
+ status = "disabled";
+ };
+
+ sdmmc: mmc@fe320000 {
+ compatible = "rockchip,rk3399-dw-mshc",
+ "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xfe320000 0x0 0x4000>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH 0>;
+ max-frequency = <150000000>;
+ assigned-clocks = <&cru HCLK_SD>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
+ <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
+ power-domains = <&power RK3399_PD_SD>;
+ resets = <&cru SRST_SDMMC>;
+ reset-names = "reset";
+ status = "disabled";
+ };
+
+ sdhci: mmc@fe330000 {
+ compatible = "rockchip,rk3399-sdhci-5.1", "arasan,sdhci-5.1";
+ reg = <0x0 0xfe330000 0x0 0x10000>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH 0>;
+ arasan,soc-ctl-syscon = <&grf>;
+ assigned-clocks = <&cru SCLK_EMMC>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_EMMC>, <&cru ACLK_EMMC>;
+ clock-names = "clk_xin", "clk_ahb";
+ clock-output-names = "emmc_cardclock";
+ #clock-cells = <0>;
+ phys = <&emmc_phy>;
+ phy-names = "phy_arasan";
+ power-domains = <&power RK3399_PD_EMMC>;
+ disable-cqe-dcmd;
+ status = "disabled";
+ };
+
+ usb_host0_ehci: usb@fe380000 {
+ compatible = "generic-ehci";
+ reg = <0x0 0xfe380000 0x0 0x20000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>,
+ <&u2phy0>;
+ phys = <&u2phy0_host>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ usb_host0_ohci: usb@fe3a0000 {
+ compatible = "generic-ohci";
+ reg = <0x0 0xfe3a0000 0x0 0x20000>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>,
+ <&u2phy0>;
+ phys = <&u2phy0_host>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ usb_host1_ehci: usb@fe3c0000 {
+ compatible = "generic-ehci";
+ reg = <0x0 0xfe3c0000 0x0 0x20000>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>,
+ <&u2phy1>;
+ phys = <&u2phy1_host>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ usb_host1_ohci: usb@fe3e0000 {
+ compatible = "generic-ohci";
+ reg = <0x0 0xfe3e0000 0x0 0x20000>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>,
+ <&u2phy1>;
+ phys = <&u2phy1_host>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ debug@fe430000 {
+ compatible = "arm,coresight-cpu-debug", "arm,primecell";
+ reg = <0 0xfe430000 0 0x1000>;
+ clocks = <&cru PCLK_COREDBG_L>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu_l0>;
+ };
+
+ debug@fe432000 {
+ compatible = "arm,coresight-cpu-debug", "arm,primecell";
+ reg = <0 0xfe432000 0 0x1000>;
+ clocks = <&cru PCLK_COREDBG_L>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu_l1>;
+ };
+
+ debug@fe434000 {
+ compatible = "arm,coresight-cpu-debug", "arm,primecell";
+ reg = <0 0xfe434000 0 0x1000>;
+ clocks = <&cru PCLK_COREDBG_L>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu_l2>;
+ };
+
+ debug@fe436000 {
+ compatible = "arm,coresight-cpu-debug", "arm,primecell";
+ reg = <0 0xfe436000 0 0x1000>;
+ clocks = <&cru PCLK_COREDBG_L>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu_l3>;
+ };
+
+ debug@fe610000 {
+ compatible = "arm,coresight-cpu-debug", "arm,primecell";
+ reg = <0 0xfe610000 0 0x1000>;
+ clocks = <&cru PCLK_COREDBG_B>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu_b0>;
+ };
+
+ debug@fe710000 {
+ compatible = "arm,coresight-cpu-debug", "arm,primecell";
+ reg = <0 0xfe710000 0 0x1000>;
+ clocks = <&cru PCLK_COREDBG_B>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu_b1>;
+ };
+
+ usbdrd3_0: usb@fe800000 {
+ compatible = "rockchip,rk3399-dwc3";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ clocks = <&cru SCLK_USB3OTG0_REF>, <&cru SCLK_USB3OTG0_SUSPEND>,
+ <&cru ACLK_USB3OTG0>, <&cru ACLK_USB3_RKSOC_AXI_PERF>,
+ <&cru ACLK_USB3>, <&cru ACLK_USB3_GRF>;
+ clock-names = "ref_clk", "suspend_clk",
+ "bus_clk", "aclk_usb3_rksoc_axi_perf",
+ "aclk_usb3", "grf_clk";
+ resets = <&cru SRST_A_USB3_OTG0>;
+ reset-names = "usb3-otg";
+ status = "disabled";
+
+ usbdrd_dwc3_0: usb@fe800000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0xfe800000 0x0 0x100000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_USB3OTG0_REF>, <&cru ACLK_USB3OTG0>,
+ <&cru SCLK_USB3OTG0_SUSPEND>;
+ clock-names = "ref", "bus_early", "suspend";
+ dr_mode = "otg";
+ phys = <&u2phy0_otg>, <&tcphy0_usb3>;
+ phy-names = "usb2-phy", "usb3-phy";
+ phy_type = "utmi_wide";
+ snps,dis_enblslpm_quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis_u2_susphy_quirk;
+ snps,dis-del-phy-power-chg-quirk;
+ snps,dis-tx-ipgap-linecheck-quirk;
+ power-domains = <&power RK3399_PD_USB3>;
+ status = "disabled";
+ };
+ };
+
+ usbdrd3_1: usb@fe900000 {
+ compatible = "rockchip,rk3399-dwc3";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ clocks = <&cru SCLK_USB3OTG1_REF>, <&cru SCLK_USB3OTG1_SUSPEND>,
+ <&cru ACLK_USB3OTG1>, <&cru ACLK_USB3_RKSOC_AXI_PERF>,
+ <&cru ACLK_USB3>, <&cru ACLK_USB3_GRF>;
+ clock-names = "ref_clk", "suspend_clk",
+ "bus_clk", "aclk_usb3_rksoc_axi_perf",
+ "aclk_usb3", "grf_clk";
+ resets = <&cru SRST_A_USB3_OTG1>;
+ reset-names = "usb3-otg";
+ status = "disabled";
+
+ usbdrd_dwc3_1: usb@fe900000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0xfe900000 0x0 0x100000>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_USB3OTG1_REF>, <&cru ACLK_USB3OTG1>,
+ <&cru SCLK_USB3OTG1_SUSPEND>;
+ clock-names = "ref", "bus_early", "suspend";
+ dr_mode = "otg";
+ phys = <&u2phy1_otg>, <&tcphy1_usb3>;
+ phy-names = "usb2-phy", "usb3-phy";
+ phy_type = "utmi_wide";
+ snps,dis_enblslpm_quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis_u2_susphy_quirk;
+ snps,dis-del-phy-power-chg-quirk;
+ snps,dis-tx-ipgap-linecheck-quirk;
+ power-domains = <&power RK3399_PD_USB3>;
+ status = "disabled";
+ };
+ };
+
+ cdn_dp: dp@fec00000 {
+ compatible = "rockchip,rk3399-cdn-dp";
+ reg = <0x0 0xfec00000 0x0 0x100000>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH 0>;
+ assigned-clocks = <&cru SCLK_DP_CORE>, <&cru SCLK_SPDIF_REC_DPTX>;
+ assigned-clock-rates = <100000000>, <200000000>;
+ clocks = <&cru SCLK_DP_CORE>, <&cru PCLK_DP_CTRL>,
+ <&cru SCLK_SPDIF_REC_DPTX>, <&cru PCLK_VIO_GRF>;
+ clock-names = "core-clk", "pclk", "spdif", "grf";
+ phys = <&tcphy0_dp>, <&tcphy1_dp>;
+ power-domains = <&power RK3399_PD_HDCP>;
+ resets = <&cru SRST_DPTX_SPDIF_REC>, <&cru SRST_P_UPHY0_DPTX>,
+ <&cru SRST_P_UPHY0_APB>, <&cru SRST_DP_CORE>;
+ reset-names = "spdif", "dptx", "apb", "core";
+ rockchip,grf = <&grf>;
+ #sound-dai-cells = <1>;
+ status = "disabled";
+
+ ports {
+ dp_in: port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dp_in_vopb: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vopb_out_dp>;
+ };
+
+ dp_in_vopl: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vopl_out_dp>;
+ };
+ };
+ };
+ };
+
+ gic: interrupt-controller@fee00000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <4>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ interrupt-controller;
+
+ reg = <0x0 0xfee00000 0 0x10000>, /* GICD */
+ <0x0 0xfef00000 0 0xc0000>, /* GICR */
+ <0x0 0xfff00000 0 0x10000>, /* GICC */
+ <0x0 0xfff10000 0 0x10000>, /* GICH */
+ <0x0 0xfff20000 0 0x10000>; /* GICV */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH 0>;
+ its: msi-controller@fee20000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x0 0xfee20000 0x0 0x20000>;
+ };
+
+ ppi-partitions {
+ ppi_cluster0: interrupt-partition-0 {
+ affinity = <&cpu_l0 &cpu_l1 &cpu_l2 &cpu_l3>;
+ };
+
+ ppi_cluster1: interrupt-partition-1 {
+ affinity = <&cpu_b0 &cpu_b1>;
+ };
+ };
+ };
+
+ saradc: saradc@ff100000 {
+ compatible = "rockchip,rk3399-saradc";
+ reg = <0x0 0xff100000 0x0 0x100>;
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH 0>;
+ #io-channel-cells = <1>;
+ clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
+ clock-names = "saradc", "apb_pclk";
+ resets = <&cru SRST_P_SARADC>;
+ reset-names = "saradc-apb";
+ status = "disabled";
+ };
+
+ crypto0: crypto@ff8b0000 {
+ compatible = "rockchip,rk3399-crypto";
+ reg = <0x0 0xff8b0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru HCLK_M_CRYPTO0>, <&cru HCLK_S_CRYPTO0>, <&cru SCLK_CRYPTO0>;
+ clock-names = "hclk_master", "hclk_slave", "sclk";
+ resets = <&cru SRST_CRYPTO0>, <&cru SRST_CRYPTO0_S>, <&cru SRST_CRYPTO0_M>;
+ reset-names = "master", "slave", "crypto-rst";
+ };
+
+ crypto1: crypto@ff8b8000 {
+ compatible = "rockchip,rk3399-crypto";
+ reg = <0x0 0xff8b8000 0x0 0x4000>;
+ interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru HCLK_M_CRYPTO1>, <&cru HCLK_S_CRYPTO1>, <&cru SCLK_CRYPTO1>;
+ clock-names = "hclk_master", "hclk_slave", "sclk";
+ resets = <&cru SRST_CRYPTO1>, <&cru SRST_CRYPTO1_S>, <&cru SRST_CRYPTO1_M>;
+ reset-names = "master", "slave", "crypto-rst";
+ };
+
+ i2c1: i2c@ff110000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff110000 0x0 0x1000>;
+ assigned-clocks = <&cru SCLK_I2C1>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_I2C1>, <&cru PCLK_I2C1>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@ff120000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff120000 0x0 0x1000>;
+ assigned-clocks = <&cru SCLK_I2C2>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_I2C2>, <&cru PCLK_I2C2>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@ff130000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff130000 0x0 0x1000>;
+ assigned-clocks = <&cru SCLK_I2C3>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_I2C3>, <&cru PCLK_I2C3>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@ff140000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff140000 0x0 0x1000>;
+ assigned-clocks = <&cru SCLK_I2C5>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_I2C5>, <&cru PCLK_I2C5>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@ff150000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff150000 0x0 0x1000>;
+ assigned-clocks = <&cru SCLK_I2C6>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_I2C6>, <&cru PCLK_I2C6>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@ff160000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff160000 0x0 0x1000>;
+ assigned-clocks = <&cru SCLK_I2C7>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_I2C7>, <&cru PCLK_I2C7>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c7_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ uart0: serial@ff180000 {
+ compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff180000 0x0 0x100>;
+ clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
+ clock-names = "baudclk", "apb_pclk";
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH 0>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_xfer>;
+ status = "disabled";
+ };
+
+ uart1: serial@ff190000 {
+ compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff190000 0x0 0x100>;
+ clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
+ clock-names = "baudclk", "apb_pclk";
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH 0>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_xfer>;
+ status = "disabled";
+ };
+
+ uart2: serial@ff1a0000 {
+ compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff1a0000 0x0 0x100>;
+ clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
+ clock-names = "baudclk", "apb_pclk";
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH 0>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2c_xfer>;
+ status = "disabled";
+ };
+
+ uart3: serial@ff1b0000 {
+ compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff1b0000 0x0 0x100>;
+ clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>;
+ clock-names = "baudclk", "apb_pclk";
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH 0>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_xfer>;
+ status = "disabled";
+ };
+
+ spi0: spi@ff1c0000 {
+ compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xff1c0000 0x0 0x1000>;
+ clocks = <&cru SCLK_SPI0>, <&cru PCLK_SPI0>;
+ clock-names = "spiclk", "apb_pclk";
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dmac_peri 10>, <&dmac_peri 11>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_clk &spi0_tx &spi0_rx &spi0_cs0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi1: spi@ff1d0000 {
+ compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xff1d0000 0x0 0x1000>;
+ clocks = <&cru SCLK_SPI1>, <&cru PCLK_SPI1>;
+ clock-names = "spiclk", "apb_pclk";
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dmac_peri 12>, <&dmac_peri 13>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_clk &spi1_tx &spi1_rx &spi1_cs0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi2: spi@ff1e0000 {
+ compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xff1e0000 0x0 0x1000>;
+ clocks = <&cru SCLK_SPI2>, <&cru PCLK_SPI2>;
+ clock-names = "spiclk", "apb_pclk";
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dmac_peri 14>, <&dmac_peri 15>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_clk &spi2_tx &spi2_rx &spi2_cs0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi4: spi@ff1f0000 {
+ compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xff1f0000 0x0 0x1000>;
+ clocks = <&cru SCLK_SPI4>, <&cru PCLK_SPI4>;
+ clock-names = "spiclk", "apb_pclk";
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dmac_peri 18>, <&dmac_peri 19>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi4_clk &spi4_tx &spi4_rx &spi4_cs0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi5: spi@ff200000 {
+ compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xff200000 0x0 0x1000>;
+ clocks = <&cru SCLK_SPI5>, <&cru PCLK_SPI5>;
+ clock-names = "spiclk", "apb_pclk";
+ interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dmac_bus 8>, <&dmac_bus 9>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi5_clk &spi5_tx &spi5_rx &spi5_cs0>;
+ power-domains = <&power RK3399_PD_SDIOAUDIO>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ thermal_zones: thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsadc 0>;
+
+ trips {
+ cpu_alert0: cpu_alert0 {
+ temperature = <70000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_alert1: cpu_alert1 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit: cpu_crit {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device =
+ <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu_b1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu_alert1>;
+ cooling-device =
+ <&cpu_l0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu_l1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu_l2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu_l3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu_b1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ gpu_thermal: gpu-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsadc 1>;
+
+ trips {
+ gpu_alert0: gpu_alert0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ gpu_crit: gpu_crit {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&gpu_alert0>;
+ cooling-device =
+ <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
+ tsadc: tsadc@ff260000 {
+ compatible = "rockchip,rk3399-tsadc";
+ reg = <0x0 0xff260000 0x0 0x100>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH 0>;
+ assigned-clocks = <&cru SCLK_TSADC>;
+ assigned-clock-rates = <750000>;
+ clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
+ clock-names = "tsadc", "apb_pclk";
+ resets = <&cru SRST_TSADC>;
+ reset-names = "tsadc-apb";
+ rockchip,grf = <&grf>;
+ rockchip,hw-tshut-temp = <95000>;
+ pinctrl-names = "init", "default", "sleep";
+ pinctrl-0 = <&otp_pin>;
+ pinctrl-1 = <&otp_out>;
+ pinctrl-2 = <&otp_pin>;
+ #thermal-sensor-cells = <1>;
+ status = "disabled";
+ };
+
+ qos_emmc: qos@ffa58000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffa58000 0x0 0x20>;
+ };
+
+ qos_gmac: qos@ffa5c000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffa5c000 0x0 0x20>;
+ };
+
+ qos_pcie: qos@ffa60080 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffa60080 0x0 0x20>;
+ };
+
+ qos_usb_host0: qos@ffa60100 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffa60100 0x0 0x20>;
+ };
+
+ qos_usb_host1: qos@ffa60180 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffa60180 0x0 0x20>;
+ };
+
+ qos_usb_otg0: qos@ffa70000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffa70000 0x0 0x20>;
+ };
+
+ qos_usb_otg1: qos@ffa70080 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffa70080 0x0 0x20>;
+ };
+
+ qos_sd: qos@ffa74000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffa74000 0x0 0x20>;
+ };
+
+ qos_sdioaudio: qos@ffa76000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffa76000 0x0 0x20>;
+ };
+
+ qos_hdcp: qos@ffa90000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffa90000 0x0 0x20>;
+ };
+
+ qos_iep: qos@ffa98000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffa98000 0x0 0x20>;
+ };
+
+ qos_isp0_m0: qos@ffaa0000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffaa0000 0x0 0x20>;
+ };
+
+ qos_isp0_m1: qos@ffaa0080 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffaa0080 0x0 0x20>;
+ };
+
+ qos_isp1_m0: qos@ffaa8000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffaa8000 0x0 0x20>;
+ };
+
+ qos_isp1_m1: qos@ffaa8080 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffaa8080 0x0 0x20>;
+ };
+
+ qos_rga_r: qos@ffab0000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffab0000 0x0 0x20>;
+ };
+
+ qos_rga_w: qos@ffab0080 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffab0080 0x0 0x20>;
+ };
+
+ qos_video_m0: qos@ffab8000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffab8000 0x0 0x20>;
+ };
+
+ qos_video_m1_r: qos@ffac0000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffac0000 0x0 0x20>;
+ };
+
+ qos_video_m1_w: qos@ffac0080 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffac0080 0x0 0x20>;
+ };
+
+ qos_vop_big_r: qos@ffac8000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffac8000 0x0 0x20>;
+ };
+
+ qos_vop_big_w: qos@ffac8080 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffac8080 0x0 0x20>;
+ };
+
+ qos_vop_little: qos@ffad0000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffad0000 0x0 0x20>;
+ };
+
+ qos_perihp: qos@ffad8080 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffad8080 0x0 0x20>;
+ };
+
+ qos_gpu: qos@ffae0000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffae0000 0x0 0x20>;
+ };
+
+ pmu: power-management@ff310000 {
+ compatible = "rockchip,rk3399-pmu", "syscon", "simple-mfd";
+ reg = <0x0 0xff310000 0x0 0x1000>;
+
+ /*
+ * Note: RK3399 supports 6 voltage domains including VD_CORE_L,
+ * VD_CORE_B, VD_CENTER, VD_GPU, VD_LOGIC and VD_PMU.
+ * Some of the power domains are grouped together for every
+ * voltage domain.
+ * The detail contents as below.
+ */
+ power: power-controller {
+ compatible = "rockchip,rk3399-power-controller";
+ #power-domain-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* These power domains are grouped by VD_CENTER */
+ power-domain@RK3399_PD_IEP {
+ reg = <RK3399_PD_IEP>;
+ clocks = <&cru ACLK_IEP>,
+ <&cru HCLK_IEP>;
+ pm_qos = <&qos_iep>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_RGA {
+ reg = <RK3399_PD_RGA>;
+ clocks = <&cru ACLK_RGA>,
+ <&cru HCLK_RGA>;
+ pm_qos = <&qos_rga_r>,
+ <&qos_rga_w>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_VCODEC {
+ reg = <RK3399_PD_VCODEC>;
+ clocks = <&cru ACLK_VCODEC>,
+ <&cru HCLK_VCODEC>;
+ pm_qos = <&qos_video_m0>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_VDU {
+ reg = <RK3399_PD_VDU>;
+ clocks = <&cru ACLK_VDU>,
+ <&cru HCLK_VDU>,
+ <&cru SCLK_VDU_CA>,
+ <&cru SCLK_VDU_CORE>;
+ pm_qos = <&qos_video_m1_r>,
+ <&qos_video_m1_w>;
+ #power-domain-cells = <0>;
+ };
+
+ /* These power domains are grouped by VD_GPU */
+ power-domain@RK3399_PD_GPU {
+ reg = <RK3399_PD_GPU>;
+ clocks = <&cru ACLK_GPU>;
+ pm_qos = <&qos_gpu>;
+ #power-domain-cells = <0>;
+ };
+
+ /* These power domains are grouped by VD_LOGIC */
+ power-domain@RK3399_PD_EDP {
+ reg = <RK3399_PD_EDP>;
+ clocks = <&cru PCLK_EDP_CTRL>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_EMMC {
+ reg = <RK3399_PD_EMMC>;
+ clocks = <&cru ACLK_EMMC>;
+ pm_qos = <&qos_emmc>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_GMAC {
+ reg = <RK3399_PD_GMAC>;
+ clocks = <&cru ACLK_GMAC>,
+ <&cru PCLK_GMAC>;
+ pm_qos = <&qos_gmac>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_SD {
+ reg = <RK3399_PD_SD>;
+ clocks = <&cru HCLK_SDMMC>,
+ <&cru SCLK_SDMMC>;
+ pm_qos = <&qos_sd>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_SDIOAUDIO {
+ reg = <RK3399_PD_SDIOAUDIO>;
+ clocks = <&cru HCLK_SDIO>;
+ pm_qos = <&qos_sdioaudio>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_TCPD0 {
+ reg = <RK3399_PD_TCPD0>;
+ clocks = <&cru SCLK_UPHY0_TCPDCORE>,
+ <&cru SCLK_UPHY0_TCPDPHY_REF>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_TCPD1 {
+ reg = <RK3399_PD_TCPD1>;
+ clocks = <&cru SCLK_UPHY1_TCPDCORE>,
+ <&cru SCLK_UPHY1_TCPDPHY_REF>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_USB3 {
+ reg = <RK3399_PD_USB3>;
+ clocks = <&cru ACLK_USB3>;
+ pm_qos = <&qos_usb_otg0>,
+ <&qos_usb_otg1>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_VIO {
+ reg = <RK3399_PD_VIO>;
+ #power-domain-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-domain@RK3399_PD_HDCP {
+ reg = <RK3399_PD_HDCP>;
+ clocks = <&cru ACLK_HDCP>,
+ <&cru HCLK_HDCP>,
+ <&cru PCLK_HDCP>;
+ pm_qos = <&qos_hdcp>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_ISP0 {
+ reg = <RK3399_PD_ISP0>;
+ clocks = <&cru ACLK_ISP0>,
+ <&cru HCLK_ISP0>;
+ pm_qos = <&qos_isp0_m0>,
+ <&qos_isp0_m1>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_ISP1 {
+ reg = <RK3399_PD_ISP1>;
+ clocks = <&cru ACLK_ISP1>,
+ <&cru HCLK_ISP1>;
+ pm_qos = <&qos_isp1_m0>,
+ <&qos_isp1_m1>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_VO {
+ reg = <RK3399_PD_VO>;
+ #power-domain-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-domain@RK3399_PD_VOPB {
+ reg = <RK3399_PD_VOPB>;
+ clocks = <&cru ACLK_VOP0>,
+ <&cru HCLK_VOP0>;
+ pm_qos = <&qos_vop_big_r>,
+ <&qos_vop_big_w>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_VOPL {
+ reg = <RK3399_PD_VOPL>;
+ clocks = <&cru ACLK_VOP1>,
+ <&cru HCLK_VOP1>;
+ pm_qos = <&qos_vop_little>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+ };
+ };
+
+ pmugrf: syscon@ff320000 {
+ compatible = "rockchip,rk3399-pmugrf", "syscon", "simple-mfd";
+ reg = <0x0 0xff320000 0x0 0x1000>;
+
+ pmu_io_domains: io-domains {
+ compatible = "rockchip,rk3399-pmu-io-voltage-domain";
+ status = "disabled";
+ };
+ };
+
+ spi3: spi@ff350000 {
+ compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xff350000 0x0 0x1000>;
+ clocks = <&pmucru SCLK_SPI3_PMU>, <&pmucru PCLK_SPI3_PMU>;
+ clock-names = "spiclk", "apb_pclk";
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi3_clk &spi3_tx &spi3_rx &spi3_cs0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ uart4: serial@ff370000 {
+ compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff370000 0x0 0x100>;
+ clocks = <&pmucru SCLK_UART4_PMU>, <&pmucru PCLK_UART4_PMU>;
+ clock-names = "baudclk", "apb_pclk";
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH 0>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_xfer>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@ff3c0000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff3c0000 0x0 0x1000>;
+ assigned-clocks = <&pmucru SCLK_I2C0_PMU>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&pmucru SCLK_I2C0_PMU>, <&pmucru PCLK_I2C0_PMU>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@ff3d0000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff3d0000 0x0 0x1000>;
+ assigned-clocks = <&pmucru SCLK_I2C4_PMU>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&pmucru SCLK_I2C4_PMU>, <&pmucru PCLK_I2C4_PMU>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c8: i2c@ff3e0000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff3e0000 0x0 0x1000>;
+ assigned-clocks = <&pmucru SCLK_I2C8_PMU>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&pmucru SCLK_I2C8_PMU>, <&pmucru PCLK_I2C8_PMU>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c8_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ pwm0: pwm@ff420000 {
+ compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm";
+ reg = <0x0 0xff420000 0x0 0x10>;
+ #pwm-cells = <3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_pin>;
+ clocks = <&pmucru PCLK_RKPWM_PMU>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@ff420010 {
+ compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm";
+ reg = <0x0 0xff420010 0x0 0x10>;
+ #pwm-cells = <3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm1_pin>;
+ clocks = <&pmucru PCLK_RKPWM_PMU>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@ff420020 {
+ compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm";
+ reg = <0x0 0xff420020 0x0 0x10>;
+ #pwm-cells = <3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm2_pin>;
+ clocks = <&pmucru PCLK_RKPWM_PMU>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@ff420030 {
+ compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm";
+ reg = <0x0 0xff420030 0x0 0x10>;
+ #pwm-cells = <3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm3a_pin>;
+ clocks = <&pmucru PCLK_RKPWM_PMU>;
+ status = "disabled";
+ };
+
+ dfi: dfi@ff630000 {
+ reg = <0x00 0xff630000 0x00 0x4000>;
+ compatible = "rockchip,rk3399-dfi";
+ rockchip,pmu = <&pmugrf>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_DDR_MON>;
+ clock-names = "pclk_ddr_mon";
+ };
+
+ vpu: video-codec@ff650000 {
+ compatible = "rockchip,rk3399-vpu";
+ reg = <0x0 0xff650000 0x0 0x800>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "vepu", "vdpu";
+ clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>;
+ clock-names = "aclk", "hclk";
+ iommus = <&vpu_mmu>;
+ power-domains = <&power RK3399_PD_VCODEC>;
+ };
+
+ vpu_mmu: iommu@ff650800 {
+ compatible = "rockchip,iommu";
+ reg = <0x0 0xff650800 0x0 0x40>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>;
+ clock-names = "aclk", "iface";
+ #iommu-cells = <0>;
+ power-domains = <&power RK3399_PD_VCODEC>;
+ };
+
+ vdec: video-codec@ff660000 {
+ compatible = "rockchip,rk3399-vdec";
+ reg = <0x0 0xff660000 0x0 0x480>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
+ <&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
+ clock-names = "axi", "ahb", "cabac", "core";
+ iommus = <&vdec_mmu>;
+ power-domains = <&power RK3399_PD_VDU>;
+ };
+
+ vdec_mmu: iommu@ff660480 {
+ compatible = "rockchip,iommu";
+ reg = <0x0 0xff660480 0x0 0x40>, <0x0 0xff6604c0 0x0 0x40>;
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>;
+ clock-names = "aclk", "iface";
+ power-domains = <&power RK3399_PD_VDU>;
+ #iommu-cells = <0>;
+ };
+
+ iep_mmu: iommu@ff670800 {
+ compatible = "rockchip,iommu";
+ reg = <0x0 0xff670800 0x0 0x40>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_IEP>, <&cru HCLK_IEP>;
+ clock-names = "aclk", "iface";
+ #iommu-cells = <0>;
+ status = "disabled";
+ };
+
+ rga: rga@ff680000 {
+ compatible = "rockchip,rk3399-rga";
+ reg = <0x0 0xff680000 0x0 0x10000>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_RGA>, <&cru HCLK_RGA>, <&cru SCLK_RGA_CORE>;
+ clock-names = "aclk", "hclk", "sclk";
+ resets = <&cru SRST_RGA_CORE>, <&cru SRST_A_RGA>, <&cru SRST_H_RGA>;
+ reset-names = "core", "axi", "ahb";
+ power-domains = <&power RK3399_PD_RGA>;
+ };
+
+ efuse0: efuse@ff690000 {
+ compatible = "rockchip,rk3399-efuse";
+ reg = <0x0 0xff690000 0x0 0x80>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cru PCLK_EFUSE1024NS>;
+ clock-names = "pclk_efuse";
+
+ /* Data cells */
+ cpu_id: cpu-id@7 {
+ reg = <0x07 0x10>;
+ };
+ cpub_leakage: cpu-leakage@17 {
+ reg = <0x17 0x1>;
+ };
+ gpu_leakage: gpu-leakage@18 {
+ reg = <0x18 0x1>;
+ };
+ center_leakage: center-leakage@19 {
+ reg = <0x19 0x1>;
+ };
+ cpul_leakage: cpu-leakage@1a {
+ reg = <0x1a 0x1>;
+ };
+ logic_leakage: logic-leakage@1b {
+ reg = <0x1b 0x1>;
+ };
+ wafer_info: wafer-info@1c {
+ reg = <0x1c 0x1>;
+ };
+ };
+
+ dmac_bus: dma-controller@ff6d0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff6d0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH 0>;
+ #dma-cells = <1>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC0_PERILP>;
+ clock-names = "apb_pclk";
+ };
+
+ dmac_peri: dma-controller@ff6e0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff6e0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH 0>;
+ #dma-cells = <1>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC1_PERILP>;
+ clock-names = "apb_pclk";
+ };
+
+ pmucru: clock-controller@ff750000 {
+ compatible = "rockchip,rk3399-pmucru";
+ reg = <0x0 0xff750000 0x0 0x1000>;
+ clocks = <&xin24m>;
+ clock-names = "xin24m";
+ rockchip,grf = <&pmugrf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ assigned-clocks = <&pmucru PLL_PPLL>;
+ assigned-clock-rates = <676000000>;
+ };
+
+ cru: clock-controller@ff760000 {
+ compatible = "rockchip,rk3399-cru";
+ reg = <0x0 0xff760000 0x0 0x1000>;
+ clocks = <&xin24m>;
+ clock-names = "xin24m";
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ assigned-clocks =
+ <&cru PLL_GPLL>, <&cru PLL_CPLL>,
+ <&cru PLL_NPLL>,
+ <&cru ACLK_PERIHP>, <&cru HCLK_PERIHP>,
+ <&cru PCLK_PERIHP>,
+ <&cru ACLK_PERILP0>, <&cru HCLK_PERILP0>,
+ <&cru PCLK_PERILP0>, <&cru ACLK_CCI>,
+ <&cru HCLK_PERILP1>, <&cru PCLK_PERILP1>,
+ <&cru ACLK_VIO>, <&cru ACLK_HDCP>,
+ <&cru ACLK_GIC_PRE>,
+ <&cru PCLK_DDR>,
+ <&cru ACLK_VDU>;
+ assigned-clock-rates =
+ <594000000>, <800000000>,
+ <1000000000>,
+ <150000000>, <75000000>,
+ <37500000>,
+ <100000000>, <100000000>,
+ <50000000>, <600000000>,
+ <100000000>, <50000000>,
+ <400000000>, <400000000>,
+ <200000000>,
+ <200000000>,
+ <400000000>;
+ };
+
+ grf: syscon@ff770000 {
+ compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
+ reg = <0x0 0xff770000 0x0 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ io_domains: io-domains {
+ compatible = "rockchip,rk3399-io-voltage-domain";
+ status = "disabled";
+ };
+
+ mipi_dphy_rx0: mipi-dphy-rx0 {
+ compatible = "rockchip,rk3399-mipi-dphy-rx0";
+ clocks = <&cru SCLK_MIPIDPHY_REF>,
+ <&cru SCLK_DPHY_RX0_CFG>,
+ <&cru PCLK_VIO_GRF>;
+ clock-names = "dphy-ref", "dphy-cfg", "grf";
+ power-domains = <&power RK3399_PD_VIO>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ u2phy0: usb2phy@e450 {
+ compatible = "rockchip,rk3399-usb2phy";
+ reg = <0xe450 0x10>;
+ clocks = <&cru SCLK_USB2PHY0_REF>;
+ clock-names = "phyclk";
+ #clock-cells = <0>;
+ clock-output-names = "clk_usbphy0_480m";
+ status = "disabled";
+
+ u2phy0_host: host-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "linestate";
+ status = "disabled";
+ };
+
+ u2phy0_otg: otg-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "otg-bvalid", "otg-id",
+ "linestate";
+ status = "disabled";
+ };
+ };
+
+ u2phy1: usb2phy@e460 {
+ compatible = "rockchip,rk3399-usb2phy";
+ reg = <0xe460 0x10>;
+ clocks = <&cru SCLK_USB2PHY1_REF>;
+ clock-names = "phyclk";
+ #clock-cells = <0>;
+ clock-output-names = "clk_usbphy1_480m";
+ status = "disabled";
+
+ u2phy1_host: host-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "linestate";
+ status = "disabled";
+ };
+
+ u2phy1_otg: otg-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "otg-bvalid", "otg-id",
+ "linestate";
+ status = "disabled";
+ };
+ };
+
+ emmc_phy: phy@f780 {
+ compatible = "rockchip,rk3399-emmc-phy";
+ reg = <0xf780 0x24>;
+ clocks = <&sdhci>;
+ clock-names = "emmcclk";
+ drive-impedance-ohm = <50>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ pcie_phy: pcie-phy {
+ compatible = "rockchip,rk3399-pcie-phy";
+ clocks = <&cru SCLK_PCIEPHY_REF>;
+ clock-names = "refclk";
+ #phy-cells = <1>;
+ resets = <&cru SRST_PCIEPHY>;
+ reset-names = "phy";
+ status = "disabled";
+ };
+ };
+
+ tcphy0: phy@ff7c0000 {
+ compatible = "rockchip,rk3399-typec-phy";
+ reg = <0x0 0xff7c0000 0x0 0x40000>;
+ clocks = <&cru SCLK_UPHY0_TCPDCORE>,
+ <&cru SCLK_UPHY0_TCPDPHY_REF>;
+ clock-names = "tcpdcore", "tcpdphy-ref";
+ assigned-clocks = <&cru SCLK_UPHY0_TCPDCORE>;
+ assigned-clock-rates = <50000000>;
+ power-domains = <&power RK3399_PD_TCPD0>;
+ resets = <&cru SRST_UPHY0>,
+ <&cru SRST_UPHY0_PIPE_L00>,
+ <&cru SRST_P_UPHY0_TCPHY>;
+ reset-names = "uphy", "uphy-pipe", "uphy-tcphy";
+ rockchip,grf = <&grf>;
+ status = "disabled";
+
+ tcphy0_dp: dp-port {
+ #phy-cells = <0>;
+ };
+
+ tcphy0_usb3: usb3-port {
+ #phy-cells = <0>;
+ };
+ };
+
+ tcphy1: phy@ff800000 {
+ compatible = "rockchip,rk3399-typec-phy";
+ reg = <0x0 0xff800000 0x0 0x40000>;
+ clocks = <&cru SCLK_UPHY1_TCPDCORE>,
+ <&cru SCLK_UPHY1_TCPDPHY_REF>;
+ clock-names = "tcpdcore", "tcpdphy-ref";
+ assigned-clocks = <&cru SCLK_UPHY1_TCPDCORE>;
+ assigned-clock-rates = <50000000>;
+ power-domains = <&power RK3399_PD_TCPD1>;
+ resets = <&cru SRST_UPHY1>,
+ <&cru SRST_UPHY1_PIPE_L00>,
+ <&cru SRST_P_UPHY1_TCPHY>;
+ reset-names = "uphy", "uphy-pipe", "uphy-tcphy";
+ rockchip,grf = <&grf>;
+ status = "disabled";
+
+ tcphy1_dp: dp-port {
+ #phy-cells = <0>;
+ };
+
+ tcphy1_usb3: usb3-port {
+ #phy-cells = <0>;
+ };
+ };
+
+ watchdog@ff848000 {
+ compatible = "rockchip,rk3399-wdt", "snps,dw-wdt";
+ reg = <0x0 0xff848000 0x0 0x100>;
+ clocks = <&cru PCLK_WDT>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH 0>;
+ };
+
+ rktimer: rktimer@ff850000 {
+ compatible = "rockchip,rk3399-timer";
+ reg = <0x0 0xff850000 0x0 0x1000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_TIMER0>, <&cru SCLK_TIMER00>;
+ clock-names = "pclk", "timer";
+ };
+
+ spdif: spdif@ff870000 {
+ compatible = "rockchip,rk3399-spdif";
+ reg = <0x0 0xff870000 0x0 0x1000>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dmac_bus 7>;
+ dma-names = "tx";
+ clock-names = "mclk", "hclk";
+ clocks = <&cru SCLK_SPDIF_8CH>, <&cru HCLK_SPDIF>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spdif_bus>;
+ power-domains = <&power RK3399_PD_SDIOAUDIO>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ i2s0: i2s@ff880000 {
+ compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s";
+ reg = <0x0 0xff880000 0x0 0x1000>;
+ rockchip,grf = <&grf>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dmac_bus 0>, <&dmac_bus 1>;
+ dma-names = "tx", "rx";
+ clock-names = "i2s_clk", "i2s_hclk";
+ clocks = <&cru SCLK_I2S0_8CH>, <&cru HCLK_I2S0_8CH>;
+ pinctrl-names = "bclk_on", "bclk_off";
+ pinctrl-0 = <&i2s0_8ch_bus>;
+ pinctrl-1 = <&i2s0_8ch_bus_bclk_off>;
+ power-domains = <&power RK3399_PD_SDIOAUDIO>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ i2s1: i2s@ff890000 {
+ compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s";
+ reg = <0x0 0xff890000 0x0 0x1000>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dmac_bus 2>, <&dmac_bus 3>;
+ dma-names = "tx", "rx";
+ clock-names = "i2s_clk", "i2s_hclk";
+ clocks = <&cru SCLK_I2S1_8CH>, <&cru HCLK_I2S1_8CH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1_2ch_bus>;
+ power-domains = <&power RK3399_PD_SDIOAUDIO>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ i2s2: i2s@ff8a0000 {
+ compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s";
+ reg = <0x0 0xff8a0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dmac_bus 4>, <&dmac_bus 5>;
+ dma-names = "tx", "rx";
+ clock-names = "i2s_clk", "i2s_hclk";
+ clocks = <&cru SCLK_I2S2_8CH>, <&cru HCLK_I2S2_8CH>;
+ power-domains = <&power RK3399_PD_SDIOAUDIO>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ vopl: vop@ff8f0000 {
+ compatible = "rockchip,rk3399-vop-lit";
+ reg = <0x0 0xff8f0000 0x0 0x2000>, <0x0 0xff8f2000 0x0 0x400>;
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH 0>;
+ assigned-clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>;
+ assigned-clock-rates = <400000000>, <100000000>;
+ clocks = <&cru ACLK_VOP1>, <&cru DCLK_VOP1>, <&cru HCLK_VOP1>;
+ clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
+ iommus = <&vopl_mmu>;
+ power-domains = <&power RK3399_PD_VOPL>;
+ resets = <&cru SRST_A_VOP1>, <&cru SRST_H_VOP1>, <&cru SRST_D_VOP1>;
+ reset-names = "axi", "ahb", "dclk";
+ status = "disabled";
+
+ vopl_out: port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vopl_out_mipi: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&mipi_in_vopl>;
+ };
+
+ vopl_out_edp: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&edp_in_vopl>;
+ };
+
+ vopl_out_hdmi: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&hdmi_in_vopl>;
+ };
+
+ vopl_out_mipi1: endpoint@3 {
+ reg = <3>;
+ remote-endpoint = <&mipi1_in_vopl>;
+ };
+
+ vopl_out_dp: endpoint@4 {
+ reg = <4>;
+ remote-endpoint = <&dp_in_vopl>;
+ };
+ };
+ };
+
+ vopl_mmu: iommu@ff8f3f00 {
+ compatible = "rockchip,iommu";
+ reg = <0x0 0xff8f3f00 0x0 0x100>;
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>;
+ clock-names = "aclk", "iface";
+ power-domains = <&power RK3399_PD_VOPL>;
+ #iommu-cells = <0>;
+ status = "disabled";
+ };
+
+ vopb: vop@ff900000 {
+ compatible = "rockchip,rk3399-vop-big";
+ reg = <0x0 0xff900000 0x0 0x2000>, <0x0 0xff902000 0x0 0x1000>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH 0>;
+ assigned-clocks = <&cru ACLK_VOP0>, <&cru HCLK_VOP0>;
+ assigned-clock-rates = <400000000>, <100000000>;
+ clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>;
+ clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
+ iommus = <&vopb_mmu>;
+ power-domains = <&power RK3399_PD_VOPB>;
+ resets = <&cru SRST_A_VOP0>, <&cru SRST_H_VOP0>, <&cru SRST_D_VOP0>;
+ reset-names = "axi", "ahb", "dclk";
+ status = "disabled";
+
+ vopb_out: port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vopb_out_edp: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&edp_in_vopb>;
+ };
+
+ vopb_out_mipi: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&mipi_in_vopb>;
+ };
+
+ vopb_out_hdmi: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&hdmi_in_vopb>;
+ };
+
+ vopb_out_mipi1: endpoint@3 {
+ reg = <3>;
+ remote-endpoint = <&mipi1_in_vopb>;
+ };
+
+ vopb_out_dp: endpoint@4 {
+ reg = <4>;
+ remote-endpoint = <&dp_in_vopb>;
+ };
+ };
+ };
+
+ vopb_mmu: iommu@ff903f00 {
+ compatible = "rockchip,iommu";
+ reg = <0x0 0xff903f00 0x0 0x100>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_VOP0>, <&cru HCLK_VOP0>;
+ clock-names = "aclk", "iface";
+ power-domains = <&power RK3399_PD_VOPB>;
+ #iommu-cells = <0>;
+ status = "disabled";
+ };
+
+ isp0: isp0@ff910000 {
+ compatible = "rockchip,rk3399-cif-isp";
+ reg = <0x0 0xff910000 0x0 0x4000>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_ISP0>,
+ <&cru ACLK_ISP0_WRAPPER>,
+ <&cru HCLK_ISP0_WRAPPER>;
+ clock-names = "isp", "aclk", "hclk";
+ iommus = <&isp0_mmu>;
+ phys = <&mipi_dphy_rx0>;
+ phy-names = "dphy";
+ power-domains = <&power RK3399_PD_ISP0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+ };
+
+ isp0_mmu: iommu@ff914000 {
+ compatible = "rockchip,iommu";
+ reg = <0x0 0xff914000 0x0 0x100>, <0x0 0xff915000 0x0 0x100>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_ISP0_WRAPPER>, <&cru HCLK_ISP0_WRAPPER>;
+ clock-names = "aclk", "iface";
+ #iommu-cells = <0>;
+ power-domains = <&power RK3399_PD_ISP0>;
+ rockchip,disable-mmu-reset;
+ };
+
+ isp1: isp1@ff920000 {
+ compatible = "rockchip,rk3399-cif-isp";
+ reg = <0x0 0xff920000 0x0 0x4000>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_ISP1>,
+ <&cru ACLK_ISP1_WRAPPER>,
+ <&cru HCLK_ISP1_WRAPPER>;
+ clock-names = "isp", "aclk", "hclk";
+ iommus = <&isp1_mmu>;
+ phys = <&mipi_dsi1>;
+ phy-names = "dphy";
+ power-domains = <&power RK3399_PD_ISP1>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+ };
+
+ isp1_mmu: iommu@ff924000 {
+ compatible = "rockchip,iommu";
+ reg = <0x0 0xff924000 0x0 0x100>, <0x0 0xff925000 0x0 0x100>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_ISP1_WRAPPER>, <&cru HCLK_ISP1_WRAPPER>;
+ clock-names = "aclk", "iface";
+ #iommu-cells = <0>;
+ power-domains = <&power RK3399_PD_ISP1>;
+ rockchip,disable-mmu-reset;
+ };
+
+ hdmi_sound: hdmi-sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,mclk-fs = <256>;
+ simple-audio-card,name = "hdmi-sound";
+ status = "disabled";
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s2>;
+ };
+ simple-audio-card,codec {
+ sound-dai = <&hdmi>;
+ };
+ };
+
+ hdmi: hdmi@ff940000 {
+ compatible = "rockchip,rk3399-dw-hdmi";
+ reg = <0x0 0xff940000 0x0 0x20000>;
+ reg-io-width = <4>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_HDMI_CTRL>,
+ <&cru SCLK_HDMI_SFR>,
+ <&cru SCLK_HDMI_CEC>,
+ <&cru PCLK_VIO_GRF>,
+ <&cru PLL_VPLL>;
+ clock-names = "iahb", "isfr", "cec", "grf", "ref";
+ power-domains = <&power RK3399_PD_HDCP>;
+ rockchip,grf = <&grf>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi_in: port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi_in_vopb: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vopb_out_hdmi>;
+ };
+ hdmi_in_vopl: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vopl_out_hdmi>;
+ };
+ };
+
+ hdmi_out: port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
+ mipi_dsi: dsi@ff960000 {
+ compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi";
+ reg = <0x0 0xff960000 0x0 0x8000>;
+ interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI0>,
+ <&cru SCLK_DPHY_TX0_CFG>, <&cru PCLK_VIO_GRF>;
+ clock-names = "ref", "pclk", "phy_cfg", "grf";
+ power-domains = <&power RK3399_PD_VIO>;
+ resets = <&cru SRST_P_MIPI_DSI0>;
+ reset-names = "apb";
+ rockchip,grf = <&grf>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mipi_in: port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mipi_in_vopb: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vopb_out_mipi>;
+ };
+
+ mipi_in_vopl: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vopl_out_mipi>;
+ };
+ };
+
+ mipi_out: port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
+ mipi_dsi1: dsi@ff968000 {
+ compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi";
+ reg = <0x0 0xff968000 0x0 0x8000>;
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI1>,
+ <&cru SCLK_DPHY_TX1RX1_CFG>, <&cru PCLK_VIO_GRF>;
+ clock-names = "ref", "pclk", "phy_cfg", "grf";
+ power-domains = <&power RK3399_PD_VIO>;
+ resets = <&cru SRST_P_MIPI_DSI1>;
+ reset-names = "apb";
+ rockchip,grf = <&grf>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #phy-cells = <0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mipi1_in: port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mipi1_in_vopb: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vopb_out_mipi1>;
+ };
+
+ mipi1_in_vopl: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vopl_out_mipi1>;
+ };
+ };
+
+ mipi1_out: port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
+ edp: dp@ff970000 {
+ compatible = "rockchip,rk3399-edp";
+ reg = <0x0 0xff970000 0x0 0x8000>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>, <&cru PCLK_VIO_GRF>;
+ clock-names = "dp", "pclk", "grf";
+ pinctrl-names = "default";
+ pinctrl-0 = <&edp_hpd>;
+ power-domains = <&power RK3399_PD_EDP>;
+ resets = <&cru SRST_P_EDP_CTRL>;
+ reset-names = "dp";
+ rockchip,grf = <&grf>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ edp_in: port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ edp_in_vopb: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vopb_out_edp>;
+ };
+
+ edp_in_vopl: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vopl_out_edp>;
+ };
+ };
+
+ edp_out: port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
+ gpu: gpu@ff9a0000 {
+ compatible = "rockchip,rk3399-mali", "arm,mali-t860";
+ reg = <0x0 0xff9a0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "job", "mmu", "gpu";
+ clocks = <&cru ACLK_GPU>;
+ #cooling-cells = <2>;
+ dynamic-power-coefficient = <2640>;
+ power-domains = <&power RK3399_PD_GPU>;
+ status = "disabled";
+ };
+
+ pinctrl: pinctrl {
+ compatible = "rockchip,rk3399-pinctrl";
+ rockchip,grf = <&grf>;
+ rockchip,pmu = <&pmugrf>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gpio0: gpio@ff720000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff720000 0x0 0x100>;
+ clocks = <&pmucru PCLK_GPIO0_PMU>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH 0>;
+
+ gpio-controller;
+ #gpio-cells = <0x2>;
+
+ interrupt-controller;
+ #interrupt-cells = <0x2>;
+ };
+
+ gpio1: gpio@ff730000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff730000 0x0 0x100>;
+ clocks = <&pmucru PCLK_GPIO1_PMU>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH 0>;
+
+ gpio-controller;
+ #gpio-cells = <0x2>;
+
+ interrupt-controller;
+ #interrupt-cells = <0x2>;
+ };
+
+ gpio2: gpio@ff780000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff780000 0x0 0x100>;
+ clocks = <&cru PCLK_GPIO2>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH 0>;
+
+ gpio-controller;
+ #gpio-cells = <0x2>;
+
+ interrupt-controller;
+ #interrupt-cells = <0x2>;
+ };
+
+ gpio3: gpio@ff788000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff788000 0x0 0x100>;
+ clocks = <&cru PCLK_GPIO3>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>;
+
+ gpio-controller;
+ #gpio-cells = <0x2>;
+
+ interrupt-controller;
+ #interrupt-cells = <0x2>;
+ };
+
+ gpio4: gpio@ff790000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff790000 0x0 0x100>;
+ clocks = <&cru PCLK_GPIO4>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH 0>;
+
+ gpio-controller;
+ #gpio-cells = <0x2>;
+
+ interrupt-controller;
+ #interrupt-cells = <0x2>;
+ };
+
+ pcfg_pull_up: pcfg-pull-up {
+ bias-pull-up;
+ };
+
+ pcfg_pull_down: pcfg-pull-down {
+ bias-pull-down;
+ };
+
+ pcfg_pull_none: pcfg-pull-none {
+ bias-disable;
+ };
+
+ pcfg_pull_none_12ma: pcfg-pull-none-12ma {
+ bias-disable;
+ drive-strength = <12>;
+ };
+
+ pcfg_pull_none_13ma: pcfg-pull-none-13ma {
+ bias-disable;
+ drive-strength = <13>;
+ };
+
+ pcfg_pull_none_18ma: pcfg-pull-none-18ma {
+ bias-disable;
+ drive-strength = <18>;
+ };
+
+ pcfg_pull_none_20ma: pcfg-pull-none-20ma {
+ bias-disable;
+ drive-strength = <20>;
+ };
+
+ pcfg_pull_up_2ma: pcfg-pull-up-2ma {
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pcfg_pull_up_8ma: pcfg-pull-up-8ma {
+ bias-pull-up;
+ drive-strength = <8>;
+ };
+
+ pcfg_pull_up_18ma: pcfg-pull-up-18ma {
+ bias-pull-up;
+ drive-strength = <18>;
+ };
+
+ pcfg_pull_up_20ma: pcfg-pull-up-20ma {
+ bias-pull-up;
+ drive-strength = <20>;
+ };
+
+ pcfg_pull_down_4ma: pcfg-pull-down-4ma {
+ bias-pull-down;
+ drive-strength = <4>;
+ };
+
+ pcfg_pull_down_8ma: pcfg-pull-down-8ma {
+ bias-pull-down;
+ drive-strength = <8>;
+ };
+
+ pcfg_pull_down_12ma: pcfg-pull-down-12ma {
+ bias-pull-down;
+ drive-strength = <12>;
+ };
+
+ pcfg_pull_down_18ma: pcfg-pull-down-18ma {
+ bias-pull-down;
+ drive-strength = <18>;
+ };
+
+ pcfg_pull_down_20ma: pcfg-pull-down-20ma {
+ bias-pull-down;
+ drive-strength = <20>;
+ };
+
+ pcfg_output_high: pcfg-output-high {
+ output-high;
+ };
+
+ pcfg_output_low: pcfg-output-low {
+ output-low;
+ };
+
+ pcfg_input_enable: pcfg-input-enable {
+ input-enable;
+ };
+
+ pcfg_input_pull_up: pcfg-input-pull-up {
+ input-enable;
+ bias-pull-up;
+ };
+
+ pcfg_input_pull_down: pcfg-input-pull-down {
+ input-enable;
+ bias-pull-down;
+ };
+
+ clock {
+ clk_32k: clk-32k {
+ rockchip,pins = <0 RK_PA0 2 &pcfg_pull_none>;
+ };
+ };
+
+ cif {
+ cif_clkin: cif-clkin {
+ rockchip,pins =
+ <2 RK_PB2 3 &pcfg_pull_none>;
+ };
+
+ cif_clkouta: cif-clkouta {
+ rockchip,pins =
+ <2 RK_PB3 3 &pcfg_pull_none>;
+ };
+ };
+
+ edp {
+ edp_hpd: edp-hpd {
+ rockchip,pins =
+ <4 RK_PC7 2 &pcfg_pull_none>;
+ };
+ };
+
+ gmac {
+ rgmii_pins: rgmii-pins {
+ rockchip,pins =
+ /* mac_txclk */
+ <3 RK_PC1 1 &pcfg_pull_none_13ma>,
+ /* mac_rxclk */
+ <3 RK_PB6 1 &pcfg_pull_none>,
+ /* mac_mdio */
+ <3 RK_PB5 1 &pcfg_pull_none>,
+ /* mac_txen */
+ <3 RK_PB4 1 &pcfg_pull_none_13ma>,
+ /* mac_clk */
+ <3 RK_PB3 1 &pcfg_pull_none>,
+ /* mac_rxdv */
+ <3 RK_PB1 1 &pcfg_pull_none>,
+ /* mac_mdc */
+ <3 RK_PB0 1 &pcfg_pull_none>,
+ /* mac_rxd1 */
+ <3 RK_PA7 1 &pcfg_pull_none>,
+ /* mac_rxd0 */
+ <3 RK_PA6 1 &pcfg_pull_none>,
+ /* mac_txd1 */
+ <3 RK_PA5 1 &pcfg_pull_none_13ma>,
+ /* mac_txd0 */
+ <3 RK_PA4 1 &pcfg_pull_none_13ma>,
+ /* mac_rxd3 */
+ <3 RK_PA3 1 &pcfg_pull_none>,
+ /* mac_rxd2 */
+ <3 RK_PA2 1 &pcfg_pull_none>,
+ /* mac_txd3 */
+ <3 RK_PA1 1 &pcfg_pull_none_13ma>,
+ /* mac_txd2 */
+ <3 RK_PA0 1 &pcfg_pull_none_13ma>;
+ };
+
+ rmii_pins: rmii-pins {
+ rockchip,pins =
+ /* mac_mdio */
+ <3 RK_PB5 1 &pcfg_pull_none>,
+ /* mac_txen */
+ <3 RK_PB4 1 &pcfg_pull_none_13ma>,
+ /* mac_clk */
+ <3 RK_PB3 1 &pcfg_pull_none>,
+ /* mac_rxer */
+ <3 RK_PB2 1 &pcfg_pull_none>,
+ /* mac_rxdv */
+ <3 RK_PB1 1 &pcfg_pull_none>,
+ /* mac_mdc */
+ <3 RK_PB0 1 &pcfg_pull_none>,
+ /* mac_rxd1 */
+ <3 RK_PA7 1 &pcfg_pull_none>,
+ /* mac_rxd0 */
+ <3 RK_PA6 1 &pcfg_pull_none>,
+ /* mac_txd1 */
+ <3 RK_PA5 1 &pcfg_pull_none_13ma>,
+ /* mac_txd0 */
+ <3 RK_PA4 1 &pcfg_pull_none_13ma>;
+ };
+ };
+
+ i2c0 {
+ i2c0_xfer: i2c0-xfer {
+ rockchip,pins =
+ <1 RK_PB7 2 &pcfg_pull_none>,
+ <1 RK_PC0 2 &pcfg_pull_none>;
+ };
+ };
+
+ i2c1 {
+ i2c1_xfer: i2c1-xfer {
+ rockchip,pins =
+ <4 RK_PA2 1 &pcfg_pull_none>,
+ <4 RK_PA1 1 &pcfg_pull_none>;
+ };
+ };
+
+ i2c2 {
+ i2c2_xfer: i2c2-xfer {
+ rockchip,pins =
+ <2 RK_PA1 2 &pcfg_pull_none_12ma>,
+ <2 RK_PA0 2 &pcfg_pull_none_12ma>;
+ };
+ };
+
+ i2c3 {
+ i2c3_xfer: i2c3-xfer {
+ rockchip,pins =
+ <4 RK_PC1 1 &pcfg_pull_none>,
+ <4 RK_PC0 1 &pcfg_pull_none>;
+ };
+ };
+
+ i2c4 {
+ i2c4_xfer: i2c4-xfer {
+ rockchip,pins =
+ <1 RK_PB4 1 &pcfg_pull_none>,
+ <1 RK_PB3 1 &pcfg_pull_none>;
+ };
+ };
+
+ i2c5 {
+ i2c5_xfer: i2c5-xfer {
+ rockchip,pins =
+ <3 RK_PB3 2 &pcfg_pull_none>,
+ <3 RK_PB2 2 &pcfg_pull_none>;
+ };
+ };
+
+ i2c6 {
+ i2c6_xfer: i2c6-xfer {
+ rockchip,pins =
+ <2 RK_PB2 2 &pcfg_pull_none>,
+ <2 RK_PB1 2 &pcfg_pull_none>;
+ };
+ };
+
+ i2c7 {
+ i2c7_xfer: i2c7-xfer {
+ rockchip,pins =
+ <2 RK_PB0 2 &pcfg_pull_none>,
+ <2 RK_PA7 2 &pcfg_pull_none>;
+ };
+ };
+
+ i2c8 {
+ i2c8_xfer: i2c8-xfer {
+ rockchip,pins =
+ <1 RK_PC5 1 &pcfg_pull_none>,
+ <1 RK_PC4 1 &pcfg_pull_none>;
+ };
+ };
+
+ i2s0 {
+ i2s0_2ch_bus: i2s0-2ch-bus {
+ rockchip,pins =
+ <3 RK_PD0 1 &pcfg_pull_none>,
+ <3 RK_PD1 1 &pcfg_pull_none>,
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ <3 RK_PD7 1 &pcfg_pull_none>,
+ <4 RK_PA0 1 &pcfg_pull_none>;
+ };
+
+ i2s0_2ch_bus_bclk_off: i2s0-2ch-bus-bclk-off {
+ rockchip,pins =
+ <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
+ <3 RK_PD1 1 &pcfg_pull_none>,
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ <3 RK_PD7 1 &pcfg_pull_none>,
+ <4 RK_PA0 1 &pcfg_pull_none>;
+ };
+
+ i2s0_8ch_bus: i2s0-8ch-bus {
+ rockchip,pins =
+ <3 RK_PD0 1 &pcfg_pull_none>,
+ <3 RK_PD1 1 &pcfg_pull_none>,
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ <3 RK_PD4 1 &pcfg_pull_none>,
+ <3 RK_PD5 1 &pcfg_pull_none>,
+ <3 RK_PD6 1 &pcfg_pull_none>,
+ <3 RK_PD7 1 &pcfg_pull_none>,
+ <4 RK_PA0 1 &pcfg_pull_none>;
+ };
+
+ i2s0_8ch_bus_bclk_off: i2s0-8ch-bus-bclk-off {
+ rockchip,pins =
+ <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
+ <3 RK_PD1 1 &pcfg_pull_none>,
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ <3 RK_PD4 1 &pcfg_pull_none>,
+ <3 RK_PD5 1 &pcfg_pull_none>,
+ <3 RK_PD6 1 &pcfg_pull_none>,
+ <3 RK_PD7 1 &pcfg_pull_none>,
+ <4 RK_PA0 1 &pcfg_pull_none>;
+ };
+ };
+
+ i2s1 {
+ i2s1_2ch_bus: i2s1-2ch-bus {
+ rockchip,pins =
+ <4 RK_PA3 1 &pcfg_pull_none>,
+ <4 RK_PA4 1 &pcfg_pull_none>,
+ <4 RK_PA5 1 &pcfg_pull_none>,
+ <4 RK_PA6 1 &pcfg_pull_none>,
+ <4 RK_PA7 1 &pcfg_pull_none>;
+ };
+
+ i2s1_2ch_bus_bclk_off: i2s1-2ch-bus-bclk-off {
+ rockchip,pins =
+ <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>,
+ <4 RK_PA4 1 &pcfg_pull_none>,
+ <4 RK_PA5 1 &pcfg_pull_none>,
+ <4 RK_PA6 1 &pcfg_pull_none>,
+ <4 RK_PA7 1 &pcfg_pull_none>;
+ };
+ };
+
+ sdio0 {
+ sdio0_bus1: sdio0-bus1 {
+ rockchip,pins =
+ <2 RK_PC4 1 &pcfg_pull_up>;
+ };
+
+ sdio0_bus4: sdio0-bus4 {
+ rockchip,pins =
+ <2 RK_PC4 1 &pcfg_pull_up>,
+ <2 RK_PC5 1 &pcfg_pull_up>,
+ <2 RK_PC6 1 &pcfg_pull_up>,
+ <2 RK_PC7 1 &pcfg_pull_up>;
+ };
+
+ sdio0_cmd: sdio0-cmd {
+ rockchip,pins =
+ <2 RK_PD0 1 &pcfg_pull_up>;
+ };
+
+ sdio0_clk: sdio0-clk {
+ rockchip,pins =
+ <2 RK_PD1 1 &pcfg_pull_none>;
+ };
+
+ sdio0_cd: sdio0-cd {
+ rockchip,pins =
+ <2 RK_PD2 1 &pcfg_pull_up>;
+ };
+
+ sdio0_pwr: sdio0-pwr {
+ rockchip,pins =
+ <2 RK_PD3 1 &pcfg_pull_up>;
+ };
+
+ sdio0_bkpwr: sdio0-bkpwr {
+ rockchip,pins =
+ <2 RK_PD4 1 &pcfg_pull_up>;
+ };
+
+ sdio0_wp: sdio0-wp {
+ rockchip,pins =
+ <0 RK_PA3 1 &pcfg_pull_up>;
+ };
+
+ sdio0_int: sdio0-int {
+ rockchip,pins =
+ <0 RK_PA4 1 &pcfg_pull_up>;
+ };
+ };
+
+ sdmmc {
+ sdmmc_bus1: sdmmc-bus1 {
+ rockchip,pins =
+ <4 RK_PB0 1 &pcfg_pull_up>;
+ };
+
+ sdmmc_bus4: sdmmc-bus4 {
+ rockchip,pins =
+ <4 RK_PB0 1 &pcfg_pull_up>,
+ <4 RK_PB1 1 &pcfg_pull_up>,
+ <4 RK_PB2 1 &pcfg_pull_up>,
+ <4 RK_PB3 1 &pcfg_pull_up>;
+ };
+
+ sdmmc_clk: sdmmc-clk {
+ rockchip,pins =
+ <4 RK_PB4 1 &pcfg_pull_none>;
+ };
+
+ sdmmc_cmd: sdmmc-cmd {
+ rockchip,pins =
+ <4 RK_PB5 1 &pcfg_pull_up>;
+ };
+
+ sdmmc_cd: sdmmc-cd {
+ rockchip,pins =
+ <0 RK_PA7 1 &pcfg_pull_up>;
+ };
+
+ sdmmc_wp: sdmmc-wp {
+ rockchip,pins =
+ <0 RK_PB0 1 &pcfg_pull_up>;
+ };
+ };
+
+ suspend {
+ ap_pwroff: ap-pwroff {
+ rockchip,pins = <1 RK_PA5 1 &pcfg_pull_none>;
+ };
+
+ ddrio_pwroff: ddrio-pwroff {
+ rockchip,pins = <0 RK_PA1 1 &pcfg_pull_none>;
+ };
+ };
+
+ spdif {
+ spdif_bus: spdif-bus {
+ rockchip,pins =
+ <4 RK_PC5 1 &pcfg_pull_none>;
+ };
+
+ spdif_bus_1: spdif-bus-1 {
+ rockchip,pins =
+ <3 RK_PC0 3 &pcfg_pull_none>;
+ };
+ };
+
+ spi0 {
+ spi0_clk: spi0-clk {
+ rockchip,pins =
+ <3 RK_PA6 2 &pcfg_pull_up>;
+ };
+ spi0_cs0: spi0-cs0 {
+ rockchip,pins =
+ <3 RK_PA7 2 &pcfg_pull_up>;
+ };
+ spi0_cs1: spi0-cs1 {
+ rockchip,pins =
+ <3 RK_PB0 2 &pcfg_pull_up>;
+ };
+ spi0_tx: spi0-tx {
+ rockchip,pins =
+ <3 RK_PA5 2 &pcfg_pull_up>;
+ };
+ spi0_rx: spi0-rx {
+ rockchip,pins =
+ <3 RK_PA4 2 &pcfg_pull_up>;
+ };
+ };
+
+ spi1 {
+ spi1_clk: spi1-clk {
+ rockchip,pins =
+ <1 RK_PB1 2 &pcfg_pull_up>;
+ };
+ spi1_cs0: spi1-cs0 {
+ rockchip,pins =
+ <1 RK_PB2 2 &pcfg_pull_up>;
+ };
+ spi1_rx: spi1-rx {
+ rockchip,pins =
+ <1 RK_PA7 2 &pcfg_pull_up>;
+ };
+ spi1_tx: spi1-tx {
+ rockchip,pins =
+ <1 RK_PB0 2 &pcfg_pull_up>;
+ };
+ };
+
+ spi2 {
+ spi2_clk: spi2-clk {
+ rockchip,pins =
+ <2 RK_PB3 1 &pcfg_pull_up>;
+ };
+ spi2_cs0: spi2-cs0 {
+ rockchip,pins =
+ <2 RK_PB4 1 &pcfg_pull_up>;
+ };
+ spi2_rx: spi2-rx {
+ rockchip,pins =
+ <2 RK_PB1 1 &pcfg_pull_up>;
+ };
+ spi2_tx: spi2-tx {
+ rockchip,pins =
+ <2 RK_PB2 1 &pcfg_pull_up>;
+ };
+ };
+
+ spi3 {
+ spi3_clk: spi3-clk {
+ rockchip,pins =
+ <1 RK_PC1 1 &pcfg_pull_up>;
+ };
+ spi3_cs0: spi3-cs0 {
+ rockchip,pins =
+ <1 RK_PC2 1 &pcfg_pull_up>;
+ };
+ spi3_rx: spi3-rx {
+ rockchip,pins =
+ <1 RK_PB7 1 &pcfg_pull_up>;
+ };
+ spi3_tx: spi3-tx {
+ rockchip,pins =
+ <1 RK_PC0 1 &pcfg_pull_up>;
+ };
+ };
+
+ spi4 {
+ spi4_clk: spi4-clk {
+ rockchip,pins =
+ <3 RK_PA2 2 &pcfg_pull_up>;
+ };
+ spi4_cs0: spi4-cs0 {
+ rockchip,pins =
+ <3 RK_PA3 2 &pcfg_pull_up>;
+ };
+ spi4_rx: spi4-rx {
+ rockchip,pins =
+ <3 RK_PA0 2 &pcfg_pull_up>;
+ };
+ spi4_tx: spi4-tx {
+ rockchip,pins =
+ <3 RK_PA1 2 &pcfg_pull_up>;
+ };
+ };
+
+ spi5 {
+ spi5_clk: spi5-clk {
+ rockchip,pins =
+ <2 RK_PC6 2 &pcfg_pull_up>;
+ };
+ spi5_cs0: spi5-cs0 {
+ rockchip,pins =
+ <2 RK_PC7 2 &pcfg_pull_up>;
+ };
+ spi5_rx: spi5-rx {
+ rockchip,pins =
+ <2 RK_PC4 2 &pcfg_pull_up>;
+ };
+ spi5_tx: spi5-tx {
+ rockchip,pins =
+ <2 RK_PC5 2 &pcfg_pull_up>;
+ };
+ };
+
+ testclk {
+ test_clkout0: test-clkout0 {
+ rockchip,pins =
+ <0 RK_PA0 1 &pcfg_pull_none>;
+ };
+
+ test_clkout1: test-clkout1 {
+ rockchip,pins =
+ <2 RK_PD1 2 &pcfg_pull_none>;
+ };
+
+ test_clkout2: test-clkout2 {
+ rockchip,pins =
+ <0 RK_PB0 3 &pcfg_pull_none>;
+ };
+ };
+
+ tsadc {
+ otp_pin: otp-pin {
+ rockchip,pins = <1 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ otp_out: otp-out {
+ rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none>;
+ };
+ };
+
+ uart0 {
+ uart0_xfer: uart0-xfer {
+ rockchip,pins =
+ <2 RK_PC0 1 &pcfg_pull_up>,
+ <2 RK_PC1 1 &pcfg_pull_none>;
+ };
+
+ uart0_cts: uart0-cts {
+ rockchip,pins =
+ <2 RK_PC2 1 &pcfg_pull_none>;
+ };
+
+ uart0_rts: uart0-rts {
+ rockchip,pins =
+ <2 RK_PC3 1 &pcfg_pull_none>;
+ };
+ };
+
+ uart1 {
+ uart1_xfer: uart1-xfer {
+ rockchip,pins =
+ <3 RK_PB4 2 &pcfg_pull_up>,
+ <3 RK_PB5 2 &pcfg_pull_none>;
+ };
+ };
+
+ uart2a {
+ uart2a_xfer: uart2a-xfer {
+ rockchip,pins =
+ <4 RK_PB0 2 &pcfg_pull_up>,
+ <4 RK_PB1 2 &pcfg_pull_none>;
+ };
+ };
+
+ uart2b {
+ uart2b_xfer: uart2b-xfer {
+ rockchip,pins =
+ <4 RK_PC0 2 &pcfg_pull_up>,
+ <4 RK_PC1 2 &pcfg_pull_none>;
+ };
+ };
+
+ uart2c {
+ uart2c_xfer: uart2c-xfer {
+ rockchip,pins =
+ <4 RK_PC3 1 &pcfg_pull_up>,
+ <4 RK_PC4 1 &pcfg_pull_none>;
+ };
+ };
+
+ uart3 {
+ uart3_xfer: uart3-xfer {
+ rockchip,pins =
+ <3 RK_PB6 2 &pcfg_pull_up>,
+ <3 RK_PB7 2 &pcfg_pull_none>;
+ };
+
+ uart3_cts: uart3-cts {
+ rockchip,pins =
+ <3 RK_PC0 2 &pcfg_pull_none>;
+ };
+
+ uart3_rts: uart3-rts {
+ rockchip,pins =
+ <3 RK_PC1 2 &pcfg_pull_none>;
+ };
+ };
+
+ uart4 {
+ uart4_xfer: uart4-xfer {
+ rockchip,pins =
+ <1 RK_PA7 1 &pcfg_pull_up>,
+ <1 RK_PB0 1 &pcfg_pull_none>;
+ };
+ };
+
+ uarthdcp {
+ uarthdcp_xfer: uarthdcp-xfer {
+ rockchip,pins =
+ <4 RK_PC5 2 &pcfg_pull_up>,
+ <4 RK_PC6 2 &pcfg_pull_none>;
+ };
+ };
+
+ pwm0 {
+ pwm0_pin: pwm0-pin {
+ rockchip,pins =
+ <4 RK_PC2 1 &pcfg_pull_none>;
+ };
+
+ pwm0_pin_pull_down: pwm0-pin-pull-down {
+ rockchip,pins =
+ <4 RK_PC2 1 &pcfg_pull_down>;
+ };
+
+ vop0_pwm_pin: vop0-pwm-pin {
+ rockchip,pins =
+ <4 RK_PC2 2 &pcfg_pull_none>;
+ };
+
+ vop1_pwm_pin: vop1-pwm-pin {
+ rockchip,pins =
+ <4 RK_PC2 3 &pcfg_pull_none>;
+ };
+ };
+
+ pwm1 {
+ pwm1_pin: pwm1-pin {
+ rockchip,pins =
+ <4 RK_PC6 1 &pcfg_pull_none>;
+ };
+
+ pwm1_pin_pull_down: pwm1-pin-pull-down {
+ rockchip,pins =
+ <4 RK_PC6 1 &pcfg_pull_down>;
+ };
+ };
+
+ pwm2 {
+ pwm2_pin: pwm2-pin {
+ rockchip,pins =
+ <1 RK_PC3 1 &pcfg_pull_none>;
+ };
+
+ pwm2_pin_pull_down: pwm2-pin-pull-down {
+ rockchip,pins =
+ <1 RK_PC3 1 &pcfg_pull_down>;
+ };
+ };
+
+ pwm3a {
+ pwm3a_pin: pwm3a-pin {
+ rockchip,pins =
+ <0 RK_PA6 1 &pcfg_pull_none>;
+ };
+ };
+
+ pwm3b {
+ pwm3b_pin: pwm3b-pin {
+ rockchip,pins =
+ <1 RK_PB6 1 &pcfg_pull_none>;
+ };
+ };
+
+ hdmi {
+ hdmi_i2c_xfer: hdmi-i2c-xfer {
+ rockchip,pins =
+ <4 RK_PC1 3 &pcfg_pull_none>,
+ <4 RK_PC0 3 &pcfg_pull_none>;
+ };
+
+ hdmi_cec: hdmi-cec {
+ rockchip,pins =
+ <4 RK_PC7 1 &pcfg_pull_none>;
+ };
+ };
+
+ pcie {
+ pcie_clkreqn_cpm: pci-clkreqn-cpm {
+ rockchip,pins =
+ <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie_clkreqnb_cpm: pci-clkreqnb-cpm {
+ rockchip,pins =
+ <4 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts b/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts
index 173da81fc231..1489eb32e266 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts
@@ -8,7 +8,6 @@
#include <dt-bindings/pwm/pwm.h>
#include <dt-bindings/usb/pd.h>
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
model = "OPEN AI LAB EAIDK-610";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
index 55eca7a50a1f..54e67d2dac09 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
@@ -5,7 +5,7 @@
/dts-v1/;
#include <dt-bindings/pwm/pwm.h>
-#include "rk3399.dtsi"
+#include "rk3399-base.dtsi"
/ {
model = "Rockchip RK3399 Evaluation Board";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
index 260415d99aeb..f4491317a1b0 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
@@ -9,7 +9,6 @@
#include <dt-bindings/pwm/pwm.h>
#include <dt-bindings/usb/pd.h>
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
model = "Firefly-RK3399 Board";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
index 3cd63d1e8f15..776c0eec04d7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -6,8 +6,7 @@
*/
#include <dt-bindings/input/input.h>
-#include "rk3399.dtsi"
-#include "rk3399-op1-opp.dtsi"
+#include "rk3399-op1.dtsi"
/ {
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
index 4a6ab6c2e24c..5a02502d21cd 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
@@ -4,7 +4,6 @@
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
model = "Hugsun X99 TV BOX";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
index 9d9297bc5f04..c772985ae4e5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
@@ -9,7 +9,6 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pwm/pwm.h>
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
index 9586bb12a5d8..b0c1fb0b704e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
@@ -12,7 +12,6 @@
/dts-v1/;
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
model = "Kobol Helios64";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
index cb69e2145fa9..f12b1eb00575 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
@@ -8,7 +8,6 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pwm/pwm.h>
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
model = "Leez RK3399 P710";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
index b7f1e47978a6..7debc4a1b5fa 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
@@ -14,7 +14,6 @@
/dts-v1/;
#include <dt-bindings/input/linux-event-codes.h>
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-op1.dtsi
index 783120e9cebe..b24bff511513 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-op1.dtsi
@@ -3,6 +3,8 @@
* Copyright (c) 2016-2017 Fuzhou Rockchip Electronics Co., Ltd
*/
+#include "rk3399.dtsi"
+
/ {
cluster0_opp: opp-table-0 {
compatible = "operating-points-v2";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi
deleted file mode 100644
index fee5e7111279..000000000000
--- a/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi
+++ /dev/null
@@ -1,133 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-/*
- * Copyright (c) 2016-2017 Fuzhou Rockchip Electronics Co., Ltd
- */
-
-/ {
- cluster0_opp: opp-table-0 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp00 {
- opp-hz = /bits/ 64 <408000000>;
- opp-microvolt = <825000 825000 1250000>;
- clock-latency-ns = <40000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <825000 825000 1250000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <816000000>;
- opp-microvolt = <850000 850000 1250000>;
- };
- opp03 {
- opp-hz = /bits/ 64 <1008000000>;
- opp-microvolt = <925000 925000 1250000>;
- };
- opp04 {
- opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <1000000 1000000 1250000>;
- };
- opp05 {
- opp-hz = /bits/ 64 <1416000000>;
- opp-microvolt = <1125000 1125000 1250000>;
- };
- };
-
- cluster1_opp: opp-table-1 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp00 {
- opp-hz = /bits/ 64 <408000000>;
- opp-microvolt = <825000 825000 1250000>;
- clock-latency-ns = <40000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <825000 825000 1250000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <816000000>;
- opp-microvolt = <825000 825000 1250000>;
- };
- opp03 {
- opp-hz = /bits/ 64 <1008000000>;
- opp-microvolt = <875000 875000 1250000>;
- };
- opp04 {
- opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <950000 950000 1250000>;
- };
- opp05 {
- opp-hz = /bits/ 64 <1416000000>;
- opp-microvolt = <1025000 1025000 1250000>;
- };
- opp06 {
- opp-hz = /bits/ 64 <1608000000>;
- opp-microvolt = <1100000 1100000 1250000>;
- };
- opp07 {
- opp-hz = /bits/ 64 <1800000000>;
- opp-microvolt = <1200000 1200000 1250000>;
- };
- };
-
- gpu_opp_table: opp-table-2 {
- compatible = "operating-points-v2";
-
- opp00 {
- opp-hz = /bits/ 64 <200000000>;
- opp-microvolt = <825000 825000 1150000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <297000000>;
- opp-microvolt = <825000 825000 1150000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <400000000>;
- opp-microvolt = <825000 825000 1150000>;
- };
- opp03 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <875000 875000 1150000>;
- };
- opp04 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <925000 925000 1150000>;
- };
- opp05 {
- opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <1100000 1100000 1150000>;
- };
- };
-};
-
-&cpu_l0 {
- operating-points-v2 = <&cluster0_opp>;
-};
-
-&cpu_l1 {
- operating-points-v2 = <&cluster0_opp>;
-};
-
-&cpu_l2 {
- operating-points-v2 = <&cluster0_opp>;
-};
-
-&cpu_l3 {
- operating-points-v2 = <&cluster0_opp>;
-};
-
-&cpu_b0 {
- operating-points-v2 = <&cluster1_opp>;
-};
-
-&cpu_b1 {
- operating-points-v2 = <&cluster1_opp>;
-};
-
-&gpu {
- operating-points-v2 = <&gpu_opp_table>;
-};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
index e26e2d86279c..07ec33f3f55f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
@@ -10,7 +10,6 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include "dt-bindings/usb/pd.h"
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
model = "Orange Pi RK3399 Board";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
index 294eb2de263d..a5a7e374bc59 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
@@ -12,7 +12,6 @@
#include <dt-bindings/usb/pd.h>
#include <dt-bindings/leds/common.h>
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
model = "Pine64 Pinebook Pro";
@@ -32,12 +31,12 @@
backlight: edp-backlight {
compatible = "pwm-backlight";
power-supply = <&vcc_12v>;
- pwms = <&pwm0 0 740740 0>;
+ pwms = <&pwm0 0 125000 0>;
};
bat: battery {
compatible = "simple-battery";
- charge-full-design-microamp-hours = <9800000>;
+ charge-full-design-microamp-hours = <10000000>;
voltage-max-design-microvolt = <4350000>;
voltage-min-design-microvolt = <3000000>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
index ef754ea30a94..1a44582a49fb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
@@ -14,7 +14,6 @@
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/leds/common.h>
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
model = "Pine64 PinePhone Pro";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index ccbe3a7a1d2c..650b1ba9c192 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -5,7 +5,6 @@
#include <dt-bindings/pwm/pwm.h>
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
aliases {
@@ -154,6 +153,22 @@
};
};
+&gpio3 {
+ /*
+ * The Qseven BIOS_DISABLE signal on the RK3399-Q7 keeps the on-module
+ * eMMC and SPI flash powered-down initially (in fact it keeps the
+ * reset signal asserted). BIOS_DISABLE_OVERRIDE pin allows to override
+ * that signal so that eMMC and SPI can be used regardless of the state
+ * of the signal.
+ */
+ bios-disable-override-hog {
+ gpios = <RK_PD5 GPIO_ACTIVE_LOW>;
+ gpio-hog;
+ line-name = "bios_disable_override";
+ output-high;
+ };
+};
+
&gmac {
assigned-clocks = <&cru SCLK_RMII_SRC>;
assigned-clock-parents = <&clkin_gmac>;
@@ -409,6 +424,7 @@
&i2s0 {
pinctrl-0 = <&i2s0_2ch_bus>;
+ pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
rockchip,playback-channels = <2>;
rockchip,capture-channels = <2>;
status = "okay";
@@ -417,8 +433,8 @@
/*
* As Q7 does not specify neither a global nor a RX clock for I2S these
* signals are not used. Furthermore I2S0_LRCK_RX is used as GPIO.
- * Therefore we have to redefine the i2s0_2ch_bus definition to prevent
- * conflicts.
+ * Therefore we have to redefine the i2s0_2ch_bus and i2s0_2ch_bus_bclk_off
+ * definitions to prevent conflicts.
*/
&i2s0_2ch_bus {
rockchip,pins =
@@ -428,6 +444,14 @@
<3 RK_PD7 1 &pcfg_pull_none>;
};
+&i2s0_2ch_bus_bclk_off {
+ rockchip,pins =
+ <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ <3 RK_PD7 1 &pcfg_pull_none>;
+};
+
&io_domains {
status = "okay";
bt656-supply = <&vcc_1v8>;
@@ -449,9 +473,14 @@
&pinctrl {
pinctrl-names = "default";
- pinctrl-0 = <&q7_thermal_pin>;
+ pinctrl-0 = <&q7_thermal_pin &bios_disable_override_hog_pin>;
gpios {
+ bios_disable_override_hog_pin: bios-disable-override-hog-pin {
+ rockchip,pins =
+ <3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
q7_thermal_pin: q7-thermal-pin {
rockchip,pins =
<0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
index ca7a446b6568..d95b1cde1fc3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
@@ -7,7 +7,6 @@
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/pwm/pwm.h>
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
model = "Firefly ROC-RK3399-PC Board";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
index 972aea843afd..475d57f64d58 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
@@ -7,8 +7,7 @@
/dts-v1/;
#include <dt-bindings/leds/common.h>
-#include "rk3399.dtsi"
-#include "rk3399-t-opp.dtsi"
+#include "rk3399-t.dtsi"
/ {
model = "Radxa ROCK 4C+";
@@ -53,6 +52,21 @@
};
};
+ rk809-sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,name = "Analog RK809";
+ simple-audio-card,mclk-fs = <256>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s0>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&rk809>;
+ };
+ };
+
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
clocks = <&rk809 1>;
@@ -201,10 +215,13 @@
interrupt-parent = <&gpio1>;
interrupts = <RK_PC5 IRQ_TYPE_LEVEL_LOW>;
#clock-cells = <1>;
+ clock-names = "mclk";
+ clocks = <&cru SCLK_I2S_8CH_OUT>;
clock-output-names = "rk808-clkout1", "rk808-clkout2";
pinctrl-names = "default";
- pinctrl-0 = <&pmic_int_l>;
+ pinctrl-0 = <&pmic_int_l>, <&i2s_8ch_mclk>;
rockchip,system-power-controller;
+ #sound-dai-cells = <0>;
wakeup-source;
vcc1-supply = <&vcc5v0_sys>;
@@ -446,6 +463,26 @@
status = "okay";
};
+&i2s0 {
+ status = "okay";
+};
+
+&i2s0_8ch_bus {
+ rockchip,pins =
+ <3 RK_PD0 1 &pcfg_pull_none>,
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ <3 RK_PD7 1 &pcfg_pull_none>;
+};
+
+&i2s0_8ch_bus_bclk_off {
+ rockchip,pins =
+ <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ <3 RK_PD7 1 &pcfg_pull_none>;
+};
+
&i2s2 {
status = "okay";
};
@@ -473,6 +510,12 @@
};
};
+ i2s0 {
+ i2s_8ch_mclk: i2s-8ch-mclk {
+ rockchip,pins = <4 RK_PA0 1 &pcfg_pull_none>;
+ };
+ };
+
leds {
user_led1: user-led1 {
rockchip,pins = <3 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-4se.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-4se.dts
index 7cfc198bbae7..a8b8d4acc337 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-4se.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-4se.dts
@@ -5,8 +5,8 @@
*/
/dts-v1/;
+#include "rk3399-t.dtsi"
#include "rk3399-rock-pi-4.dtsi"
-#include "rk3399-t-opp.dtsi"
/ {
model = "Radxa ROCK 4SE";
@@ -17,14 +17,6 @@
};
};
-&pinctrl {
- usb2 {
- vcc5v0_host_en: vcc5v0-host-en {
- rockchip,pins = <4 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
-};
-
&sdio0 {
status = "okay";
@@ -56,10 +48,3 @@
vddio-supply = <&vcc_1v8>;
};
};
-
-&vcc5v0_host {
- enable-active-high;
- gpio = <&gpio4 RK_PD1 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&vcc5v0_host_en>;
-};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
index b9d6284bb804..9666504cd1c1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
@@ -4,11 +4,9 @@
* Copyright (c) 2019 Pragnesh Patel <Pragnesh_Patel@mentor.com>
*/
-/dts-v1/;
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/leds/common.h>
#include <dt-bindings/pwm/pwm.h>
-#include "rk3399.dtsi"
/ {
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a-plus.dts
index f5a68d8d072d..725ac3c1f6f6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a-plus.dts
@@ -5,8 +5,8 @@
*/
/dts-v1/;
+#include "rk3399-op1.dtsi"
#include "rk3399-rock-pi-4.dtsi"
-#include "rk3399-op1-opp.dtsi"
/ {
model = "Radxa ROCK Pi 4A+";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a.dts
index c68f45849c44..32d6bce5e3d4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a.dts
@@ -5,8 +5,8 @@
*/
/dts-v1/;
+#include "rk3399.dtsi"
#include "rk3399-rock-pi-4.dtsi"
-#include "rk3399-opp.dtsi"
/ {
model = "Radxa ROCK Pi 4A";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts
index 8a17c1eaae15..682e8b7297c1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts
@@ -5,8 +5,8 @@
*/
/dts-v1/;
+#include "rk3399-op1.dtsi"
#include "rk3399-rock-pi-4.dtsi"
-#include "rk3399-op1-opp.dtsi"
/ {
model = "Radxa ROCK Pi 4B+";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts
index 6ea3180e57ca..55285c7c6e54 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts
@@ -5,8 +5,8 @@
*/
/dts-v1/;
+#include "rk3399.dtsi"
#include "rk3399-rock-pi-4.dtsi"
-#include "rk3399-opp.dtsi"
/ {
model = "Radxa ROCK Pi 4B";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts
index 5274938bf1b8..82ad2ca6b5c2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts
@@ -6,8 +6,8 @@
*/
/dts-v1/;
+#include "rk3399.dtsi"
#include "rk3399-rock-pi-4.dtsi"
-#include "rk3399-opp.dtsi"
/ {
model = "Radxa ROCK Pi 4C";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
index c920ddf44baf..8146f870d2bd 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
@@ -5,9 +5,8 @@
* Copyright (c) 2018 Linaro Ltd.
*/
-#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
#include <dt-bindings/interrupt-controller/irq.h>
+#include "rk3399.dtsi"
/ {
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
index f30b82a10ca3..11d99d8b34a2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
@@ -7,7 +7,6 @@
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/pwm/pwm.h>
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
aliases {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
index b3ef1c85e754..31832aae9ab6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
@@ -6,7 +6,6 @@
#include "dt-bindings/pwm/pwm.h"
#include "dt-bindings/input/input.h"
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
/ {
compatible = "rockchip,rk3399-sapphire", "rockchip,rk3399";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-t-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-t.dtsi
index 1ababadda9df..72989f03fcb1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-t-opp.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-t.dtsi
@@ -4,6 +4,8 @@
* Copyright (c) 2022 Radxa Limited
*/
+#include "rk3399-base.dtsi"
+
/ {
cluster0_opp: opp-table-0 {
compatible = "operating-points-v2";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 9d5f5b083e3c..6bc1249d99e6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -1,3019 +1,135 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
- * Copyright (c) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * Copyright (c) 2016-2017 Fuzhou Rockchip Electronics Co., Ltd
*/
-#include <dt-bindings/clock/rk3399-cru.h>
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/rockchip.h>
-#include <dt-bindings/power/rk3399-power.h>
-#include <dt-bindings/thermal/thermal.h>
+#include "rk3399-base.dtsi"
/ {
- compatible = "rockchip,rk3399";
+ cluster0_opp: opp-table-0 {
+ compatible = "operating-points-v2";
+ opp-shared;
- interrupt-parent = <&gic>;
- #address-cells = <2>;
- #size-cells = <2>;
-
- aliases {
- gpio0 = &gpio0;
- gpio1 = &gpio1;
- gpio2 = &gpio2;
- gpio3 = &gpio3;
- gpio4 = &gpio4;
- i2c0 = &i2c0;
- i2c1 = &i2c1;
- i2c2 = &i2c2;
- i2c3 = &i2c3;
- i2c4 = &i2c4;
- i2c5 = &i2c5;
- i2c6 = &i2c6;
- i2c7 = &i2c7;
- i2c8 = &i2c8;
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- spi0 = &spi0;
- spi1 = &spi1;
- spi2 = &spi2;
- spi3 = &spi3;
- spi4 = &spi4;
- spi5 = &spi5;
- };
-
- cpus {
- #address-cells = <2>;
- #size-cells = <0>;
-
- cpu-map {
- cluster0 { /* Cortex-A53 */
- core0 {
- cpu = <&cpu_l0>;
- };
- core1 {
- cpu = <&cpu_l1>;
- };
- core2 {
- cpu = <&cpu_l2>;
- };
- core3 {
- cpu = <&cpu_l3>;
- };
- };
-
- cluster1 { /* Cortex-A72 */
- core0 {
- cpu = <&cpu_b0>;
- };
- core1 {
- cpu = <&cpu_b1>;
- };
- };
+ opp00 {
+ opp-hz = /bits/ 64 <408000000>;
+ opp-microvolt = <825000 825000 1250000>;
+ clock-latency-ns = <40000>;
};
-
- cpu_l0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0 0x0>;
- enable-method = "psci";
- capacity-dmips-mhz = <485>;
- clocks = <&cru ARMCLKL>;
- #cooling-cells = <2>; /* min followed by max */
- dynamic-power-coefficient = <100>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
- i-cache-size = <0x8000>;
- i-cache-line-size = <64>;
- i-cache-sets = <256>;
- d-cache-size = <0x8000>;
- d-cache-line-size = <64>;
- d-cache-sets = <128>;
- next-level-cache = <&l2_cache_l>;
- };
-
- cpu_l1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0 0x1>;
- enable-method = "psci";
- capacity-dmips-mhz = <485>;
- clocks = <&cru ARMCLKL>;
- #cooling-cells = <2>; /* min followed by max */
- dynamic-power-coefficient = <100>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
- i-cache-size = <0x8000>;
- i-cache-line-size = <64>;
- i-cache-sets = <256>;
- d-cache-size = <0x8000>;
- d-cache-line-size = <64>;
- d-cache-sets = <128>;
- next-level-cache = <&l2_cache_l>;
+ opp01 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <825000 825000 1250000>;
};
-
- cpu_l2: cpu@2 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0 0x2>;
- enable-method = "psci";
- capacity-dmips-mhz = <485>;
- clocks = <&cru ARMCLKL>;
- #cooling-cells = <2>; /* min followed by max */
- dynamic-power-coefficient = <100>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
- i-cache-size = <0x8000>;
- i-cache-line-size = <64>;
- i-cache-sets = <256>;
- d-cache-size = <0x8000>;
- d-cache-line-size = <64>;
- d-cache-sets = <128>;
- next-level-cache = <&l2_cache_l>;
- };
-
- cpu_l3: cpu@3 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0 0x3>;
- enable-method = "psci";
- capacity-dmips-mhz = <485>;
- clocks = <&cru ARMCLKL>;
- #cooling-cells = <2>; /* min followed by max */
- dynamic-power-coefficient = <100>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
- i-cache-size = <0x8000>;
- i-cache-line-size = <64>;
- i-cache-sets = <256>;
- d-cache-size = <0x8000>;
- d-cache-line-size = <64>;
- d-cache-sets = <128>;
- next-level-cache = <&l2_cache_l>;
- };
-
- cpu_b0: cpu@100 {
- device_type = "cpu";
- compatible = "arm,cortex-a72";
- reg = <0x0 0x100>;
- enable-method = "psci";
- capacity-dmips-mhz = <1024>;
- clocks = <&cru ARMCLKB>;
- #cooling-cells = <2>; /* min followed by max */
- dynamic-power-coefficient = <436>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
- i-cache-size = <0xC000>;
- i-cache-line-size = <64>;
- i-cache-sets = <256>;
- d-cache-size = <0x8000>;
- d-cache-line-size = <64>;
- d-cache-sets = <256>;
- next-level-cache = <&l2_cache_b>;
-
- thermal-idle {
- #cooling-cells = <2>;
- duration-us = <10000>;
- exit-latency-us = <500>;
- };
- };
-
- cpu_b1: cpu@101 {
- device_type = "cpu";
- compatible = "arm,cortex-a72";
- reg = <0x0 0x101>;
- enable-method = "psci";
- capacity-dmips-mhz = <1024>;
- clocks = <&cru ARMCLKB>;
- #cooling-cells = <2>; /* min followed by max */
- dynamic-power-coefficient = <436>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
- i-cache-size = <0xC000>;
- i-cache-line-size = <64>;
- i-cache-sets = <256>;
- d-cache-size = <0x8000>;
- d-cache-line-size = <64>;
- d-cache-sets = <256>;
- next-level-cache = <&l2_cache_b>;
-
- thermal-idle {
- #cooling-cells = <2>;
- duration-us = <10000>;
- exit-latency-us = <500>;
- };
+ opp02 {
+ opp-hz = /bits/ 64 <816000000>;
+ opp-microvolt = <850000 850000 1250000>;
};
-
- l2_cache_l: l2-cache-cluster0 {
- compatible = "cache";
- cache-level = <2>;
- cache-unified;
- cache-size = <0x80000>;
- cache-line-size = <64>;
- cache-sets = <512>;
+ opp03 {
+ opp-hz = /bits/ 64 <1008000000>;
+ opp-microvolt = <925000 925000 1250000>;
};
-
- l2_cache_b: l2-cache-cluster1 {
- compatible = "cache";
- cache-level = <2>;
- cache-unified;
- cache-size = <0x100000>;
- cache-line-size = <64>;
- cache-sets = <1024>;
+ opp04 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1000000 1000000 1250000>;
};
-
- idle-states {
- entry-method = "psci";
-
- CPU_SLEEP: cpu-sleep {
- compatible = "arm,idle-state";
- local-timer-stop;
- arm,psci-suspend-param = <0x0010000>;
- entry-latency-us = <120>;
- exit-latency-us = <250>;
- min-residency-us = <900>;
- };
-
- CLUSTER_SLEEP: cluster-sleep {
- compatible = "arm,idle-state";
- local-timer-stop;
- arm,psci-suspend-param = <0x1010000>;
- entry-latency-us = <400>;
- exit-latency-us = <500>;
- min-residency-us = <2000>;
- };
+ opp05 {
+ opp-hz = /bits/ 64 <1416000000>;
+ opp-microvolt = <1125000 1125000 1250000>;
};
};
- display-subsystem {
- compatible = "rockchip,display-subsystem";
- ports = <&vopl_out>, <&vopb_out>;
- };
-
- dmc: memory-controller {
- compatible = "rockchip,rk3399-dmc";
- rockchip,pmu = <&pmugrf>;
- devfreq-events = <&dfi>;
- clocks = <&cru SCLK_DDRC>;
- clock-names = "dmc_clk";
- status = "disabled";
- };
+ cluster1_opp: opp-table-1 {
+ compatible = "operating-points-v2";
+ opp-shared;
- pmu_a53 {
- compatible = "arm,cortex-a53-pmu";
- interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW &ppi_cluster0>;
- };
-
- pmu_a72 {
- compatible = "arm,cortex-a72-pmu";
- interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW &ppi_cluster1>;
- };
-
- psci {
- compatible = "arm,psci-1.0";
- method = "smc";
- };
-
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW 0>,
- <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW 0>,
- <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW 0>,
- <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW 0>;
- arm,no-tick-in-suspend;
- };
-
- xin24m: xin24m {
- compatible = "fixed-clock";
- clock-frequency = <24000000>;
- clock-output-names = "xin24m";
- #clock-cells = <0>;
- };
-
- pcie0: pcie@f8000000 {
- compatible = "rockchip,rk3399-pcie";
- reg = <0x0 0xf8000000 0x0 0x2000000>,
- <0x0 0xfd000000 0x0 0x1000000>;
- reg-names = "axi-base", "apb-base";
- device_type = "pci";
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- aspm-no-l0s;
- bus-range = <0x0 0x1f>;
- clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>,
- <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>;
- clock-names = "aclk", "aclk-perf",
- "hclk", "pm";
- interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "sys", "legacy", "client";
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie0_intc 0>,
- <0 0 0 2 &pcie0_intc 1>,
- <0 0 0 3 &pcie0_intc 2>,
- <0 0 0 4 &pcie0_intc 3>;
- max-link-speed = <1>;
- msi-map = <0x0 &its 0x0 0x1000>;
- phys = <&pcie_phy 0>, <&pcie_phy 1>,
- <&pcie_phy 2>, <&pcie_phy 3>;
- phy-names = "pcie-phy-0", "pcie-phy-1",
- "pcie-phy-2", "pcie-phy-3";
- ranges = <0x82000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x1e00000>,
- <0x81000000 0x0 0xfbe00000 0x0 0xfbe00000 0x0 0x100000>;
- resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
- <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
- <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
- <&cru SRST_A_PCIE>;
- reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
- "pm", "pclk", "aclk";
- status = "disabled";
-
- pcie0_intc: interrupt-controller {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <1>;
+ opp00 {
+ opp-hz = /bits/ 64 <408000000>;
+ opp-microvolt = <825000 825000 1250000>;
+ clock-latency-ns = <40000>;
};
- };
-
- pcie0_ep: pcie-ep@f8000000 {
- compatible = "rockchip,rk3399-pcie-ep";
- reg = <0x0 0xfd000000 0x0 0x1000000>,
- <0x0 0xfa000000 0x0 0x2000000>;
- reg-names = "apb-base", "mem-base";
- clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>,
- <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>;
- clock-names = "aclk", "aclk-perf",
- "hclk", "pm";
- max-functions = /bits/ 8 <8>;
- num-lanes = <4>;
- resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
- <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
- <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
- <&cru SRST_A_PCIE>;
- reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
- "pm", "pclk", "aclk";
- phys = <&pcie_phy 0>, <&pcie_phy 1>,
- <&pcie_phy 2>, <&pcie_phy 3>;
- phy-names = "pcie-phy-0", "pcie-phy-1",
- "pcie-phy-2", "pcie-phy-3";
- rockchip,max-outbound-regions = <32>;
- pinctrl-names = "default";
- pinctrl-0 = <&pcie_clkreqnb_cpm>;
- status = "disabled";
- };
-
- gmac: ethernet@fe300000 {
- compatible = "rockchip,rk3399-gmac";
- reg = <0x0 0xfe300000 0x0 0x10000>;
- interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "macirq";
- clocks = <&cru SCLK_MAC>, <&cru SCLK_MAC_RX>,
- <&cru SCLK_MAC_TX>, <&cru SCLK_MACREF>,
- <&cru SCLK_MACREF_OUT>, <&cru ACLK_GMAC>,
- <&cru PCLK_GMAC>;
- clock-names = "stmmaceth", "mac_clk_rx",
- "mac_clk_tx", "clk_mac_ref",
- "clk_mac_refout", "aclk_mac",
- "pclk_mac";
- power-domains = <&power RK3399_PD_GMAC>;
- resets = <&cru SRST_A_GMAC>;
- reset-names = "stmmaceth";
- rockchip,grf = <&grf>;
- snps,txpbl = <0x4>;
- status = "disabled";
- };
-
- sdio0: mmc@fe310000 {
- compatible = "rockchip,rk3399-dw-mshc",
- "rockchip,rk3288-dw-mshc";
- reg = <0x0 0xfe310000 0x0 0x4000>;
- interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH 0>;
- max-frequency = <150000000>;
- clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
- <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
- clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
- fifo-depth = <0x100>;
- power-domains = <&power RK3399_PD_SDIOAUDIO>;
- resets = <&cru SRST_SDIO0>;
- reset-names = "reset";
- status = "disabled";
- };
-
- sdmmc: mmc@fe320000 {
- compatible = "rockchip,rk3399-dw-mshc",
- "rockchip,rk3288-dw-mshc";
- reg = <0x0 0xfe320000 0x0 0x4000>;
- interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH 0>;
- max-frequency = <150000000>;
- assigned-clocks = <&cru HCLK_SD>;
- assigned-clock-rates = <200000000>;
- clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
- <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
- clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
- fifo-depth = <0x100>;
- power-domains = <&power RK3399_PD_SD>;
- resets = <&cru SRST_SDMMC>;
- reset-names = "reset";
- status = "disabled";
- };
-
- sdhci: mmc@fe330000 {
- compatible = "rockchip,rk3399-sdhci-5.1", "arasan,sdhci-5.1";
- reg = <0x0 0xfe330000 0x0 0x10000>;
- interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH 0>;
- arasan,soc-ctl-syscon = <&grf>;
- assigned-clocks = <&cru SCLK_EMMC>;
- assigned-clock-rates = <200000000>;
- clocks = <&cru SCLK_EMMC>, <&cru ACLK_EMMC>;
- clock-names = "clk_xin", "clk_ahb";
- clock-output-names = "emmc_cardclock";
- #clock-cells = <0>;
- phys = <&emmc_phy>;
- phy-names = "phy_arasan";
- power-domains = <&power RK3399_PD_EMMC>;
- disable-cqe-dcmd;
- status = "disabled";
- };
-
- usb_host0_ehci: usb@fe380000 {
- compatible = "generic-ehci";
- reg = <0x0 0xfe380000 0x0 0x20000>;
- interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>,
- <&u2phy0>;
- phys = <&u2phy0_host>;
- phy-names = "usb";
- status = "disabled";
- };
-
- usb_host0_ohci: usb@fe3a0000 {
- compatible = "generic-ohci";
- reg = <0x0 0xfe3a0000 0x0 0x20000>;
- interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>,
- <&u2phy0>;
- phys = <&u2phy0_host>;
- phy-names = "usb";
- status = "disabled";
- };
-
- usb_host1_ehci: usb@fe3c0000 {
- compatible = "generic-ehci";
- reg = <0x0 0xfe3c0000 0x0 0x20000>;
- interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>,
- <&u2phy1>;
- phys = <&u2phy1_host>;
- phy-names = "usb";
- status = "disabled";
- };
-
- usb_host1_ohci: usb@fe3e0000 {
- compatible = "generic-ohci";
- reg = <0x0 0xfe3e0000 0x0 0x20000>;
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>,
- <&u2phy1>;
- phys = <&u2phy1_host>;
- phy-names = "usb";
- status = "disabled";
- };
-
- debug@fe430000 {
- compatible = "arm,coresight-cpu-debug", "arm,primecell";
- reg = <0 0xfe430000 0 0x1000>;
- clocks = <&cru PCLK_COREDBG_L>;
- clock-names = "apb_pclk";
- cpu = <&cpu_l0>;
- };
-
- debug@fe432000 {
- compatible = "arm,coresight-cpu-debug", "arm,primecell";
- reg = <0 0xfe432000 0 0x1000>;
- clocks = <&cru PCLK_COREDBG_L>;
- clock-names = "apb_pclk";
- cpu = <&cpu_l1>;
- };
-
- debug@fe434000 {
- compatible = "arm,coresight-cpu-debug", "arm,primecell";
- reg = <0 0xfe434000 0 0x1000>;
- clocks = <&cru PCLK_COREDBG_L>;
- clock-names = "apb_pclk";
- cpu = <&cpu_l2>;
- };
-
- debug@fe436000 {
- compatible = "arm,coresight-cpu-debug", "arm,primecell";
- reg = <0 0xfe436000 0 0x1000>;
- clocks = <&cru PCLK_COREDBG_L>;
- clock-names = "apb_pclk";
- cpu = <&cpu_l3>;
- };
-
- debug@fe610000 {
- compatible = "arm,coresight-cpu-debug", "arm,primecell";
- reg = <0 0xfe610000 0 0x1000>;
- clocks = <&cru PCLK_COREDBG_B>;
- clock-names = "apb_pclk";
- cpu = <&cpu_b0>;
- };
-
- debug@fe710000 {
- compatible = "arm,coresight-cpu-debug", "arm,primecell";
- reg = <0 0xfe710000 0 0x1000>;
- clocks = <&cru PCLK_COREDBG_B>;
- clock-names = "apb_pclk";
- cpu = <&cpu_b1>;
- };
-
- usbdrd3_0: usb@fe800000 {
- compatible = "rockchip,rk3399-dwc3";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- clocks = <&cru SCLK_USB3OTG0_REF>, <&cru SCLK_USB3OTG0_SUSPEND>,
- <&cru ACLK_USB3OTG0>, <&cru ACLK_USB3_RKSOC_AXI_PERF>,
- <&cru ACLK_USB3>, <&cru ACLK_USB3_GRF>;
- clock-names = "ref_clk", "suspend_clk",
- "bus_clk", "aclk_usb3_rksoc_axi_perf",
- "aclk_usb3", "grf_clk";
- resets = <&cru SRST_A_USB3_OTG0>;
- reset-names = "usb3-otg";
- status = "disabled";
-
- usbdrd_dwc3_0: usb@fe800000 {
- compatible = "snps,dwc3";
- reg = <0x0 0xfe800000 0x0 0x100000>;
- interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_USB3OTG0_REF>, <&cru ACLK_USB3OTG0>,
- <&cru SCLK_USB3OTG0_SUSPEND>;
- clock-names = "ref", "bus_early", "suspend";
- dr_mode = "otg";
- phys = <&u2phy0_otg>, <&tcphy0_usb3>;
- phy-names = "usb2-phy", "usb3-phy";
- phy_type = "utmi_wide";
- snps,dis_enblslpm_quirk;
- snps,dis-u2-freeclk-exists-quirk;
- snps,dis_u2_susphy_quirk;
- snps,dis-del-phy-power-chg-quirk;
- snps,dis-tx-ipgap-linecheck-quirk;
- power-domains = <&power RK3399_PD_USB3>;
- status = "disabled";
+ opp01 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <825000 825000 1250000>;
};
- };
-
- usbdrd3_1: usb@fe900000 {
- compatible = "rockchip,rk3399-dwc3";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- clocks = <&cru SCLK_USB3OTG1_REF>, <&cru SCLK_USB3OTG1_SUSPEND>,
- <&cru ACLK_USB3OTG1>, <&cru ACLK_USB3_RKSOC_AXI_PERF>,
- <&cru ACLK_USB3>, <&cru ACLK_USB3_GRF>;
- clock-names = "ref_clk", "suspend_clk",
- "bus_clk", "aclk_usb3_rksoc_axi_perf",
- "aclk_usb3", "grf_clk";
- resets = <&cru SRST_A_USB3_OTG1>;
- reset-names = "usb3-otg";
- status = "disabled";
-
- usbdrd_dwc3_1: usb@fe900000 {
- compatible = "snps,dwc3";
- reg = <0x0 0xfe900000 0x0 0x100000>;
- interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_USB3OTG1_REF>, <&cru ACLK_USB3OTG1>,
- <&cru SCLK_USB3OTG1_SUSPEND>;
- clock-names = "ref", "bus_early", "suspend";
- dr_mode = "otg";
- phys = <&u2phy1_otg>, <&tcphy1_usb3>;
- phy-names = "usb2-phy", "usb3-phy";
- phy_type = "utmi_wide";
- snps,dis_enblslpm_quirk;
- snps,dis-u2-freeclk-exists-quirk;
- snps,dis_u2_susphy_quirk;
- snps,dis-del-phy-power-chg-quirk;
- snps,dis-tx-ipgap-linecheck-quirk;
- power-domains = <&power RK3399_PD_USB3>;
- status = "disabled";
+ opp02 {
+ opp-hz = /bits/ 64 <816000000>;
+ opp-microvolt = <825000 825000 1250000>;
};
- };
-
- cdn_dp: dp@fec00000 {
- compatible = "rockchip,rk3399-cdn-dp";
- reg = <0x0 0xfec00000 0x0 0x100000>;
- interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH 0>;
- assigned-clocks = <&cru SCLK_DP_CORE>, <&cru SCLK_SPDIF_REC_DPTX>;
- assigned-clock-rates = <100000000>, <200000000>;
- clocks = <&cru SCLK_DP_CORE>, <&cru PCLK_DP_CTRL>,
- <&cru SCLK_SPDIF_REC_DPTX>, <&cru PCLK_VIO_GRF>;
- clock-names = "core-clk", "pclk", "spdif", "grf";
- phys = <&tcphy0_dp>, <&tcphy1_dp>;
- power-domains = <&power RK3399_PD_HDCP>;
- resets = <&cru SRST_DPTX_SPDIF_REC>, <&cru SRST_P_UPHY0_DPTX>,
- <&cru SRST_P_UPHY0_APB>, <&cru SRST_DP_CORE>;
- reset-names = "spdif", "dptx", "apb", "core";
- rockchip,grf = <&grf>;
- #sound-dai-cells = <1>;
- status = "disabled";
-
- ports {
- dp_in: port {
- #address-cells = <1>;
- #size-cells = <0>;
-
- dp_in_vopb: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&vopb_out_dp>;
- };
-
- dp_in_vopl: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&vopl_out_dp>;
- };
- };
+ opp03 {
+ opp-hz = /bits/ 64 <1008000000>;
+ opp-microvolt = <875000 875000 1250000>;
};
- };
-
- gic: interrupt-controller@fee00000 {
- compatible = "arm,gic-v3";
- #interrupt-cells = <4>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- interrupt-controller;
-
- reg = <0x0 0xfee00000 0 0x10000>, /* GICD */
- <0x0 0xfef00000 0 0xc0000>, /* GICR */
- <0x0 0xfff00000 0 0x10000>, /* GICC */
- <0x0 0xfff10000 0 0x10000>, /* GICH */
- <0x0 0xfff20000 0 0x10000>; /* GICV */
- interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH 0>;
- its: msi-controller@fee20000 {
- compatible = "arm,gic-v3-its";
- msi-controller;
- #msi-cells = <1>;
- reg = <0x0 0xfee20000 0x0 0x20000>;
+ opp04 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <950000 950000 1250000>;
};
-
- ppi-partitions {
- ppi_cluster0: interrupt-partition-0 {
- affinity = <&cpu_l0 &cpu_l1 &cpu_l2 &cpu_l3>;
- };
-
- ppi_cluster1: interrupt-partition-1 {
- affinity = <&cpu_b0 &cpu_b1>;
- };
+ opp05 {
+ opp-hz = /bits/ 64 <1416000000>;
+ opp-microvolt = <1025000 1025000 1250000>;
};
- };
-
- saradc: saradc@ff100000 {
- compatible = "rockchip,rk3399-saradc";
- reg = <0x0 0xff100000 0x0 0x100>;
- interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH 0>;
- #io-channel-cells = <1>;
- clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
- clock-names = "saradc", "apb_pclk";
- resets = <&cru SRST_P_SARADC>;
- reset-names = "saradc-apb";
- status = "disabled";
- };
-
- crypto0: crypto@ff8b0000 {
- compatible = "rockchip,rk3399-crypto";
- reg = <0x0 0xff8b0000 0x0 0x4000>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru HCLK_M_CRYPTO0>, <&cru HCLK_S_CRYPTO0>, <&cru SCLK_CRYPTO0>;
- clock-names = "hclk_master", "hclk_slave", "sclk";
- resets = <&cru SRST_CRYPTO0>, <&cru SRST_CRYPTO0_S>, <&cru SRST_CRYPTO0_M>;
- reset-names = "master", "slave", "crypto-rst";
- };
-
- crypto1: crypto@ff8b8000 {
- compatible = "rockchip,rk3399-crypto";
- reg = <0x0 0xff8b8000 0x0 0x4000>;
- interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru HCLK_M_CRYPTO1>, <&cru HCLK_S_CRYPTO1>, <&cru SCLK_CRYPTO1>;
- clock-names = "hclk_master", "hclk_slave", "sclk";
- resets = <&cru SRST_CRYPTO1>, <&cru SRST_CRYPTO1_S>, <&cru SRST_CRYPTO1_M>;
- reset-names = "master", "slave", "crypto-rst";
- };
-
- i2c1: i2c@ff110000 {
- compatible = "rockchip,rk3399-i2c";
- reg = <0x0 0xff110000 0x0 0x1000>;
- assigned-clocks = <&cru SCLK_I2C1>;
- assigned-clock-rates = <200000000>;
- clocks = <&cru SCLK_I2C1>, <&cru PCLK_I2C1>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_xfer>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- i2c2: i2c@ff120000 {
- compatible = "rockchip,rk3399-i2c";
- reg = <0x0 0xff120000 0x0 0x1000>;
- assigned-clocks = <&cru SCLK_I2C2>;
- assigned-clock-rates = <200000000>;
- clocks = <&cru SCLK_I2C2>, <&cru PCLK_I2C2>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c2_xfer>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- i2c3: i2c@ff130000 {
- compatible = "rockchip,rk3399-i2c";
- reg = <0x0 0xff130000 0x0 0x1000>;
- assigned-clocks = <&cru SCLK_I2C3>;
- assigned-clock-rates = <200000000>;
- clocks = <&cru SCLK_I2C3>, <&cru PCLK_I2C3>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c3_xfer>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- i2c5: i2c@ff140000 {
- compatible = "rockchip,rk3399-i2c";
- reg = <0x0 0xff140000 0x0 0x1000>;
- assigned-clocks = <&cru SCLK_I2C5>;
- assigned-clock-rates = <200000000>;
- clocks = <&cru SCLK_I2C5>, <&cru PCLK_I2C5>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c5_xfer>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- i2c6: i2c@ff150000 {
- compatible = "rockchip,rk3399-i2c";
- reg = <0x0 0xff150000 0x0 0x1000>;
- assigned-clocks = <&cru SCLK_I2C6>;
- assigned-clock-rates = <200000000>;
- clocks = <&cru SCLK_I2C6>, <&cru PCLK_I2C6>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c6_xfer>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- i2c7: i2c@ff160000 {
- compatible = "rockchip,rk3399-i2c";
- reg = <0x0 0xff160000 0x0 0x1000>;
- assigned-clocks = <&cru SCLK_I2C7>;
- assigned-clock-rates = <200000000>;
- clocks = <&cru SCLK_I2C7>, <&cru PCLK_I2C7>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c7_xfer>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- uart0: serial@ff180000 {
- compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
- reg = <0x0 0xff180000 0x0 0x100>;
- clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
- clock-names = "baudclk", "apb_pclk";
- interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH 0>;
- reg-shift = <2>;
- reg-io-width = <4>;
- pinctrl-names = "default";
- pinctrl-0 = <&uart0_xfer>;
- status = "disabled";
- };
-
- uart1: serial@ff190000 {
- compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
- reg = <0x0 0xff190000 0x0 0x100>;
- clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
- clock-names = "baudclk", "apb_pclk";
- interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH 0>;
- reg-shift = <2>;
- reg-io-width = <4>;
- pinctrl-names = "default";
- pinctrl-0 = <&uart1_xfer>;
- status = "disabled";
- };
-
- uart2: serial@ff1a0000 {
- compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
- reg = <0x0 0xff1a0000 0x0 0x100>;
- clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
- clock-names = "baudclk", "apb_pclk";
- interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH 0>;
- reg-shift = <2>;
- reg-io-width = <4>;
- pinctrl-names = "default";
- pinctrl-0 = <&uart2c_xfer>;
- status = "disabled";
- };
-
- uart3: serial@ff1b0000 {
- compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
- reg = <0x0 0xff1b0000 0x0 0x100>;
- clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>;
- clock-names = "baudclk", "apb_pclk";
- interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH 0>;
- reg-shift = <2>;
- reg-io-width = <4>;
- pinctrl-names = "default";
- pinctrl-0 = <&uart3_xfer>;
- status = "disabled";
- };
-
- spi0: spi@ff1c0000 {
- compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
- reg = <0x0 0xff1c0000 0x0 0x1000>;
- clocks = <&cru SCLK_SPI0>, <&cru PCLK_SPI0>;
- clock-names = "spiclk", "apb_pclk";
- interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH 0>;
- dmas = <&dmac_peri 10>, <&dmac_peri 11>;
- dma-names = "tx", "rx";
- pinctrl-names = "default";
- pinctrl-0 = <&spi0_clk &spi0_tx &spi0_rx &spi0_cs0>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- spi1: spi@ff1d0000 {
- compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
- reg = <0x0 0xff1d0000 0x0 0x1000>;
- clocks = <&cru SCLK_SPI1>, <&cru PCLK_SPI1>;
- clock-names = "spiclk", "apb_pclk";
- interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH 0>;
- dmas = <&dmac_peri 12>, <&dmac_peri 13>;
- dma-names = "tx", "rx";
- pinctrl-names = "default";
- pinctrl-0 = <&spi1_clk &spi1_tx &spi1_rx &spi1_cs0>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- spi2: spi@ff1e0000 {
- compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
- reg = <0x0 0xff1e0000 0x0 0x1000>;
- clocks = <&cru SCLK_SPI2>, <&cru PCLK_SPI2>;
- clock-names = "spiclk", "apb_pclk";
- interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH 0>;
- dmas = <&dmac_peri 14>, <&dmac_peri 15>;
- dma-names = "tx", "rx";
- pinctrl-names = "default";
- pinctrl-0 = <&spi2_clk &spi2_tx &spi2_rx &spi2_cs0>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- spi4: spi@ff1f0000 {
- compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
- reg = <0x0 0xff1f0000 0x0 0x1000>;
- clocks = <&cru SCLK_SPI4>, <&cru PCLK_SPI4>;
- clock-names = "spiclk", "apb_pclk";
- interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH 0>;
- dmas = <&dmac_peri 18>, <&dmac_peri 19>;
- dma-names = "tx", "rx";
- pinctrl-names = "default";
- pinctrl-0 = <&spi4_clk &spi4_tx &spi4_rx &spi4_cs0>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- spi5: spi@ff200000 {
- compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
- reg = <0x0 0xff200000 0x0 0x1000>;
- clocks = <&cru SCLK_SPI5>, <&cru PCLK_SPI5>;
- clock-names = "spiclk", "apb_pclk";
- interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH 0>;
- dmas = <&dmac_bus 8>, <&dmac_bus 9>;
- dma-names = "tx", "rx";
- pinctrl-names = "default";
- pinctrl-0 = <&spi5_clk &spi5_tx &spi5_rx &spi5_cs0>;
- power-domains = <&power RK3399_PD_SDIOAUDIO>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- thermal_zones: thermal-zones {
- cpu_thermal: cpu-thermal {
- polling-delay-passive = <100>;
- polling-delay = <1000>;
-
- thermal-sensors = <&tsadc 0>;
-
- trips {
- cpu_alert0: cpu_alert0 {
- temperature = <70000>;
- hysteresis = <2000>;
- type = "passive";
- };
- cpu_alert1: cpu_alert1 {
- temperature = <75000>;
- hysteresis = <2000>;
- type = "passive";
- };
- cpu_crit: cpu_crit {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "critical";
- };
- };
-
- cooling-maps {
- map0 {
- trip = <&cpu_alert0>;
- cooling-device =
- <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu_b1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
- };
- map1 {
- trip = <&cpu_alert1>;
- cooling-device =
- <&cpu_l0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu_l1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu_l2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu_l3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu_b1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
- };
- };
+ opp06 {
+ opp-hz = /bits/ 64 <1608000000>;
+ opp-microvolt = <1100000 1100000 1250000>;
};
-
- gpu_thermal: gpu-thermal {
- polling-delay-passive = <100>;
- polling-delay = <1000>;
-
- thermal-sensors = <&tsadc 1>;
-
- trips {
- gpu_alert0: gpu_alert0 {
- temperature = <75000>;
- hysteresis = <2000>;
- type = "passive";
- };
- gpu_crit: gpu_crit {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "critical";
- };
- };
-
- cooling-maps {
- map0 {
- trip = <&gpu_alert0>;
- cooling-device =
- <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
- };
- };
+ opp07 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-microvolt = <1200000 1200000 1250000>;
};
};
- tsadc: tsadc@ff260000 {
- compatible = "rockchip,rk3399-tsadc";
- reg = <0x0 0xff260000 0x0 0x100>;
- interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH 0>;
- assigned-clocks = <&cru SCLK_TSADC>;
- assigned-clock-rates = <750000>;
- clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
- clock-names = "tsadc", "apb_pclk";
- resets = <&cru SRST_TSADC>;
- reset-names = "tsadc-apb";
- rockchip,grf = <&grf>;
- rockchip,hw-tshut-temp = <95000>;
- pinctrl-names = "init", "default", "sleep";
- pinctrl-0 = <&otp_pin>;
- pinctrl-1 = <&otp_out>;
- pinctrl-2 = <&otp_pin>;
- #thermal-sensor-cells = <1>;
- status = "disabled";
- };
-
- qos_emmc: qos@ffa58000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffa58000 0x0 0x20>;
- };
-
- qos_gmac: qos@ffa5c000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffa5c000 0x0 0x20>;
- };
-
- qos_pcie: qos@ffa60080 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffa60080 0x0 0x20>;
- };
+ gpu_opp_table: opp-table-2 {
+ compatible = "operating-points-v2";
- qos_usb_host0: qos@ffa60100 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffa60100 0x0 0x20>;
- };
-
- qos_usb_host1: qos@ffa60180 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffa60180 0x0 0x20>;
- };
-
- qos_usb_otg0: qos@ffa70000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffa70000 0x0 0x20>;
- };
-
- qos_usb_otg1: qos@ffa70080 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffa70080 0x0 0x20>;
- };
-
- qos_sd: qos@ffa74000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffa74000 0x0 0x20>;
- };
-
- qos_sdioaudio: qos@ffa76000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffa76000 0x0 0x20>;
- };
-
- qos_hdcp: qos@ffa90000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffa90000 0x0 0x20>;
- };
-
- qos_iep: qos@ffa98000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffa98000 0x0 0x20>;
- };
-
- qos_isp0_m0: qos@ffaa0000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffaa0000 0x0 0x20>;
- };
-
- qos_isp0_m1: qos@ffaa0080 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffaa0080 0x0 0x20>;
- };
-
- qos_isp1_m0: qos@ffaa8000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffaa8000 0x0 0x20>;
- };
-
- qos_isp1_m1: qos@ffaa8080 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffaa8080 0x0 0x20>;
- };
-
- qos_rga_r: qos@ffab0000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffab0000 0x0 0x20>;
- };
-
- qos_rga_w: qos@ffab0080 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffab0080 0x0 0x20>;
- };
-
- qos_video_m0: qos@ffab8000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffab8000 0x0 0x20>;
- };
-
- qos_video_m1_r: qos@ffac0000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffac0000 0x0 0x20>;
- };
-
- qos_video_m1_w: qos@ffac0080 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffac0080 0x0 0x20>;
- };
-
- qos_vop_big_r: qos@ffac8000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffac8000 0x0 0x20>;
- };
-
- qos_vop_big_w: qos@ffac8080 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffac8080 0x0 0x20>;
- };
-
- qos_vop_little: qos@ffad0000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffad0000 0x0 0x20>;
- };
-
- qos_perihp: qos@ffad8080 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffad8080 0x0 0x20>;
- };
-
- qos_gpu: qos@ffae0000 {
- compatible = "rockchip,rk3399-qos", "syscon";
- reg = <0x0 0xffae0000 0x0 0x20>;
- };
-
- pmu: power-management@ff310000 {
- compatible = "rockchip,rk3399-pmu", "syscon", "simple-mfd";
- reg = <0x0 0xff310000 0x0 0x1000>;
-
- /*
- * Note: RK3399 supports 6 voltage domains including VD_CORE_L,
- * VD_CORE_B, VD_CENTER, VD_GPU, VD_LOGIC and VD_PMU.
- * Some of the power domains are grouped together for every
- * voltage domain.
- * The detail contents as below.
- */
- power: power-controller {
- compatible = "rockchip,rk3399-power-controller";
- #power-domain-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* These power domains are grouped by VD_CENTER */
- power-domain@RK3399_PD_IEP {
- reg = <RK3399_PD_IEP>;
- clocks = <&cru ACLK_IEP>,
- <&cru HCLK_IEP>;
- pm_qos = <&qos_iep>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_RGA {
- reg = <RK3399_PD_RGA>;
- clocks = <&cru ACLK_RGA>,
- <&cru HCLK_RGA>;
- pm_qos = <&qos_rga_r>,
- <&qos_rga_w>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_VCODEC {
- reg = <RK3399_PD_VCODEC>;
- clocks = <&cru ACLK_VCODEC>,
- <&cru HCLK_VCODEC>;
- pm_qos = <&qos_video_m0>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_VDU {
- reg = <RK3399_PD_VDU>;
- clocks = <&cru ACLK_VDU>,
- <&cru HCLK_VDU>,
- <&cru SCLK_VDU_CA>,
- <&cru SCLK_VDU_CORE>;
- pm_qos = <&qos_video_m1_r>,
- <&qos_video_m1_w>;
- #power-domain-cells = <0>;
- };
-
- /* These power domains are grouped by VD_GPU */
- power-domain@RK3399_PD_GPU {
- reg = <RK3399_PD_GPU>;
- clocks = <&cru ACLK_GPU>;
- pm_qos = <&qos_gpu>;
- #power-domain-cells = <0>;
- };
-
- /* These power domains are grouped by VD_LOGIC */
- power-domain@RK3399_PD_EDP {
- reg = <RK3399_PD_EDP>;
- clocks = <&cru PCLK_EDP_CTRL>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_EMMC {
- reg = <RK3399_PD_EMMC>;
- clocks = <&cru ACLK_EMMC>;
- pm_qos = <&qos_emmc>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_GMAC {
- reg = <RK3399_PD_GMAC>;
- clocks = <&cru ACLK_GMAC>,
- <&cru PCLK_GMAC>;
- pm_qos = <&qos_gmac>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_SD {
- reg = <RK3399_PD_SD>;
- clocks = <&cru HCLK_SDMMC>,
- <&cru SCLK_SDMMC>;
- pm_qos = <&qos_sd>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_SDIOAUDIO {
- reg = <RK3399_PD_SDIOAUDIO>;
- clocks = <&cru HCLK_SDIO>;
- pm_qos = <&qos_sdioaudio>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_TCPD0 {
- reg = <RK3399_PD_TCPD0>;
- clocks = <&cru SCLK_UPHY0_TCPDCORE>,
- <&cru SCLK_UPHY0_TCPDPHY_REF>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_TCPD1 {
- reg = <RK3399_PD_TCPD1>;
- clocks = <&cru SCLK_UPHY1_TCPDCORE>,
- <&cru SCLK_UPHY1_TCPDPHY_REF>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_USB3 {
- reg = <RK3399_PD_USB3>;
- clocks = <&cru ACLK_USB3>;
- pm_qos = <&qos_usb_otg0>,
- <&qos_usb_otg1>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_VIO {
- reg = <RK3399_PD_VIO>;
- #power-domain-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- power-domain@RK3399_PD_HDCP {
- reg = <RK3399_PD_HDCP>;
- clocks = <&cru ACLK_HDCP>,
- <&cru HCLK_HDCP>,
- <&cru PCLK_HDCP>;
- pm_qos = <&qos_hdcp>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_ISP0 {
- reg = <RK3399_PD_ISP0>;
- clocks = <&cru ACLK_ISP0>,
- <&cru HCLK_ISP0>;
- pm_qos = <&qos_isp0_m0>,
- <&qos_isp0_m1>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_ISP1 {
- reg = <RK3399_PD_ISP1>;
- clocks = <&cru ACLK_ISP1>,
- <&cru HCLK_ISP1>;
- pm_qos = <&qos_isp1_m0>,
- <&qos_isp1_m1>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_VO {
- reg = <RK3399_PD_VO>;
- #power-domain-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- power-domain@RK3399_PD_VOPB {
- reg = <RK3399_PD_VOPB>;
- clocks = <&cru ACLK_VOP0>,
- <&cru HCLK_VOP0>;
- pm_qos = <&qos_vop_big_r>,
- <&qos_vop_big_w>;
- #power-domain-cells = <0>;
- };
- power-domain@RK3399_PD_VOPL {
- reg = <RK3399_PD_VOPL>;
- clocks = <&cru ACLK_VOP1>,
- <&cru HCLK_VOP1>;
- pm_qos = <&qos_vop_little>;
- #power-domain-cells = <0>;
- };
- };
- };
+ opp00 {
+ opp-hz = /bits/ 64 <200000000>;
+ opp-microvolt = <825000 825000 1150000>;
};
- };
-
- pmugrf: syscon@ff320000 {
- compatible = "rockchip,rk3399-pmugrf", "syscon", "simple-mfd";
- reg = <0x0 0xff320000 0x0 0x1000>;
-
- pmu_io_domains: io-domains {
- compatible = "rockchip,rk3399-pmu-io-voltage-domain";
- status = "disabled";
+ opp01 {
+ opp-hz = /bits/ 64 <297000000>;
+ opp-microvolt = <825000 825000 1150000>;
};
- };
-
- spi3: spi@ff350000 {
- compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
- reg = <0x0 0xff350000 0x0 0x1000>;
- clocks = <&pmucru SCLK_SPI3_PMU>, <&pmucru PCLK_SPI3_PMU>;
- clock-names = "spiclk", "apb_pclk";
- interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&spi3_clk &spi3_tx &spi3_rx &spi3_cs0>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- uart4: serial@ff370000 {
- compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
- reg = <0x0 0xff370000 0x0 0x100>;
- clocks = <&pmucru SCLK_UART4_PMU>, <&pmucru PCLK_UART4_PMU>;
- clock-names = "baudclk", "apb_pclk";
- interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH 0>;
- reg-shift = <2>;
- reg-io-width = <4>;
- pinctrl-names = "default";
- pinctrl-0 = <&uart4_xfer>;
- status = "disabled";
- };
-
- i2c0: i2c@ff3c0000 {
- compatible = "rockchip,rk3399-i2c";
- reg = <0x0 0xff3c0000 0x0 0x1000>;
- assigned-clocks = <&pmucru SCLK_I2C0_PMU>;
- assigned-clock-rates = <200000000>;
- clocks = <&pmucru SCLK_I2C0_PMU>, <&pmucru PCLK_I2C0_PMU>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_xfer>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- i2c4: i2c@ff3d0000 {
- compatible = "rockchip,rk3399-i2c";
- reg = <0x0 0xff3d0000 0x0 0x1000>;
- assigned-clocks = <&pmucru SCLK_I2C4_PMU>;
- assigned-clock-rates = <200000000>;
- clocks = <&pmucru SCLK_I2C4_PMU>, <&pmucru PCLK_I2C4_PMU>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c4_xfer>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- i2c8: i2c@ff3e0000 {
- compatible = "rockchip,rk3399-i2c";
- reg = <0x0 0xff3e0000 0x0 0x1000>;
- assigned-clocks = <&pmucru SCLK_I2C8_PMU>;
- assigned-clock-rates = <200000000>;
- clocks = <&pmucru SCLK_I2C8_PMU>, <&pmucru PCLK_I2C8_PMU>;
- clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c8_xfer>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- pwm0: pwm@ff420000 {
- compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm";
- reg = <0x0 0xff420000 0x0 0x10>;
- #pwm-cells = <3>;
- pinctrl-names = "default";
- pinctrl-0 = <&pwm0_pin>;
- clocks = <&pmucru PCLK_RKPWM_PMU>;
- status = "disabled";
- };
-
- pwm1: pwm@ff420010 {
- compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm";
- reg = <0x0 0xff420010 0x0 0x10>;
- #pwm-cells = <3>;
- pinctrl-names = "default";
- pinctrl-0 = <&pwm1_pin>;
- clocks = <&pmucru PCLK_RKPWM_PMU>;
- status = "disabled";
- };
-
- pwm2: pwm@ff420020 {
- compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm";
- reg = <0x0 0xff420020 0x0 0x10>;
- #pwm-cells = <3>;
- pinctrl-names = "default";
- pinctrl-0 = <&pwm2_pin>;
- clocks = <&pmucru PCLK_RKPWM_PMU>;
- status = "disabled";
- };
-
- pwm3: pwm@ff420030 {
- compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm";
- reg = <0x0 0xff420030 0x0 0x10>;
- #pwm-cells = <3>;
- pinctrl-names = "default";
- pinctrl-0 = <&pwm3a_pin>;
- clocks = <&pmucru PCLK_RKPWM_PMU>;
- status = "disabled";
- };
-
- dfi: dfi@ff630000 {
- reg = <0x00 0xff630000 0x00 0x4000>;
- compatible = "rockchip,rk3399-dfi";
- rockchip,pmu = <&pmugrf>;
- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru PCLK_DDR_MON>;
- clock-names = "pclk_ddr_mon";
- };
-
- vpu: video-codec@ff650000 {
- compatible = "rockchip,rk3399-vpu";
- reg = <0x0 0xff650000 0x0 0x800>;
- interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "vepu", "vdpu";
- clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>;
- clock-names = "aclk", "hclk";
- iommus = <&vpu_mmu>;
- power-domains = <&power RK3399_PD_VCODEC>;
- };
-
- vpu_mmu: iommu@ff650800 {
- compatible = "rockchip,iommu";
- reg = <0x0 0xff650800 0x0 0x40>;
- interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>;
- clock-names = "aclk", "iface";
- #iommu-cells = <0>;
- power-domains = <&power RK3399_PD_VCODEC>;
- };
-
- vdec: video-codec@ff660000 {
- compatible = "rockchip,rk3399-vdec";
- reg = <0x0 0xff660000 0x0 0x480>;
- interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
- <&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
- clock-names = "axi", "ahb", "cabac", "core";
- iommus = <&vdec_mmu>;
- power-domains = <&power RK3399_PD_VDU>;
- };
-
- vdec_mmu: iommu@ff660480 {
- compatible = "rockchip,iommu";
- reg = <0x0 0xff660480 0x0 0x40>, <0x0 0xff6604c0 0x0 0x40>;
- interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>;
- clock-names = "aclk", "iface";
- power-domains = <&power RK3399_PD_VDU>;
- #iommu-cells = <0>;
- };
-
- iep_mmu: iommu@ff670800 {
- compatible = "rockchip,iommu";
- reg = <0x0 0xff670800 0x0 0x40>;
- interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_IEP>, <&cru HCLK_IEP>;
- clock-names = "aclk", "iface";
- #iommu-cells = <0>;
- status = "disabled";
- };
-
- rga: rga@ff680000 {
- compatible = "rockchip,rk3399-rga";
- reg = <0x0 0xff680000 0x0 0x10000>;
- interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_RGA>, <&cru HCLK_RGA>, <&cru SCLK_RGA_CORE>;
- clock-names = "aclk", "hclk", "sclk";
- resets = <&cru SRST_RGA_CORE>, <&cru SRST_A_RGA>, <&cru SRST_H_RGA>;
- reset-names = "core", "axi", "ahb";
- power-domains = <&power RK3399_PD_RGA>;
- };
-
- efuse0: efuse@ff690000 {
- compatible = "rockchip,rk3399-efuse";
- reg = <0x0 0xff690000 0x0 0x80>;
- #address-cells = <1>;
- #size-cells = <1>;
- clocks = <&cru PCLK_EFUSE1024NS>;
- clock-names = "pclk_efuse";
-
- /* Data cells */
- cpu_id: cpu-id@7 {
- reg = <0x07 0x10>;
- };
- cpub_leakage: cpu-leakage@17 {
- reg = <0x17 0x1>;
- };
- gpu_leakage: gpu-leakage@18 {
- reg = <0x18 0x1>;
- };
- center_leakage: center-leakage@19 {
- reg = <0x19 0x1>;
- };
- cpul_leakage: cpu-leakage@1a {
- reg = <0x1a 0x1>;
- };
- logic_leakage: logic-leakage@1b {
- reg = <0x1b 0x1>;
- };
- wafer_info: wafer-info@1c {
- reg = <0x1c 0x1>;
- };
- };
-
- dmac_bus: dma-controller@ff6d0000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xff6d0000 0x0 0x4000>;
- interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH 0>;
- #dma-cells = <1>;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC0_PERILP>;
- clock-names = "apb_pclk";
- };
-
- dmac_peri: dma-controller@ff6e0000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xff6e0000 0x0 0x4000>;
- interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH 0>;
- #dma-cells = <1>;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC1_PERILP>;
- clock-names = "apb_pclk";
- };
-
- pmucru: clock-controller@ff750000 {
- compatible = "rockchip,rk3399-pmucru";
- reg = <0x0 0xff750000 0x0 0x1000>;
- clocks = <&xin24m>;
- clock-names = "xin24m";
- rockchip,grf = <&pmugrf>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- assigned-clocks = <&pmucru PLL_PPLL>;
- assigned-clock-rates = <676000000>;
- };
-
- cru: clock-controller@ff760000 {
- compatible = "rockchip,rk3399-cru";
- reg = <0x0 0xff760000 0x0 0x1000>;
- clocks = <&xin24m>;
- clock-names = "xin24m";
- rockchip,grf = <&grf>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- assigned-clocks =
- <&cru PLL_GPLL>, <&cru PLL_CPLL>,
- <&cru PLL_NPLL>,
- <&cru ACLK_PERIHP>, <&cru HCLK_PERIHP>,
- <&cru PCLK_PERIHP>,
- <&cru ACLK_PERILP0>, <&cru HCLK_PERILP0>,
- <&cru PCLK_PERILP0>, <&cru ACLK_CCI>,
- <&cru HCLK_PERILP1>, <&cru PCLK_PERILP1>,
- <&cru ACLK_VIO>, <&cru ACLK_HDCP>,
- <&cru ACLK_GIC_PRE>,
- <&cru PCLK_DDR>,
- <&cru ACLK_VDU>;
- assigned-clock-rates =
- <594000000>, <800000000>,
- <1000000000>,
- <150000000>, <75000000>,
- <37500000>,
- <100000000>, <100000000>,
- <50000000>, <600000000>,
- <100000000>, <50000000>,
- <400000000>, <400000000>,
- <200000000>,
- <200000000>,
- <400000000>;
- };
-
- grf: syscon@ff770000 {
- compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
- reg = <0x0 0xff770000 0x0 0x10000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- io_domains: io-domains {
- compatible = "rockchip,rk3399-io-voltage-domain";
- status = "disabled";
+ opp02 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <825000 825000 1150000>;
};
-
- mipi_dphy_rx0: mipi-dphy-rx0 {
- compatible = "rockchip,rk3399-mipi-dphy-rx0";
- clocks = <&cru SCLK_MIPIDPHY_REF>,
- <&cru SCLK_DPHY_RX0_CFG>,
- <&cru PCLK_VIO_GRF>;
- clock-names = "dphy-ref", "dphy-cfg", "grf";
- power-domains = <&power RK3399_PD_VIO>;
- #phy-cells = <0>;
- status = "disabled";
+ opp03 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-microvolt = <875000 875000 1150000>;
};
-
- u2phy0: usb2phy@e450 {
- compatible = "rockchip,rk3399-usb2phy";
- reg = <0xe450 0x10>;
- clocks = <&cru SCLK_USB2PHY0_REF>;
- clock-names = "phyclk";
- #clock-cells = <0>;
- clock-output-names = "clk_usbphy0_480m";
- status = "disabled";
-
- u2phy0_host: host-port {
- #phy-cells = <0>;
- interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "linestate";
- status = "disabled";
- };
-
- u2phy0_otg: otg-port {
- #phy-cells = <0>;
- interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "otg-bvalid", "otg-id",
- "linestate";
- status = "disabled";
- };
- };
-
- u2phy1: usb2phy@e460 {
- compatible = "rockchip,rk3399-usb2phy";
- reg = <0xe460 0x10>;
- clocks = <&cru SCLK_USB2PHY1_REF>;
- clock-names = "phyclk";
- #clock-cells = <0>;
- clock-output-names = "clk_usbphy1_480m";
- status = "disabled";
-
- u2phy1_host: host-port {
- #phy-cells = <0>;
- interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "linestate";
- status = "disabled";
- };
-
- u2phy1_otg: otg-port {
- #phy-cells = <0>;
- interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "otg-bvalid", "otg-id",
- "linestate";
- status = "disabled";
- };
- };
-
- emmc_phy: phy@f780 {
- compatible = "rockchip,rk3399-emmc-phy";
- reg = <0xf780 0x24>;
- clocks = <&sdhci>;
- clock-names = "emmcclk";
- drive-impedance-ohm = <50>;
- #phy-cells = <0>;
- status = "disabled";
+ opp04 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <925000 925000 1150000>;
};
-
- pcie_phy: pcie-phy {
- compatible = "rockchip,rk3399-pcie-phy";
- clocks = <&cru SCLK_PCIEPHY_REF>;
- clock-names = "refclk";
- #phy-cells = <1>;
- resets = <&cru SRST_PCIEPHY>;
- reset-names = "phy";
- status = "disabled";
+ opp05 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <1100000 1100000 1150000>;
};
};
+};
- tcphy0: phy@ff7c0000 {
- compatible = "rockchip,rk3399-typec-phy";
- reg = <0x0 0xff7c0000 0x0 0x40000>;
- clocks = <&cru SCLK_UPHY0_TCPDCORE>,
- <&cru SCLK_UPHY0_TCPDPHY_REF>;
- clock-names = "tcpdcore", "tcpdphy-ref";
- assigned-clocks = <&cru SCLK_UPHY0_TCPDCORE>;
- assigned-clock-rates = <50000000>;
- power-domains = <&power RK3399_PD_TCPD0>;
- resets = <&cru SRST_UPHY0>,
- <&cru SRST_UPHY0_PIPE_L00>,
- <&cru SRST_P_UPHY0_TCPHY>;
- reset-names = "uphy", "uphy-pipe", "uphy-tcphy";
- rockchip,grf = <&grf>;
- status = "disabled";
-
- tcphy0_dp: dp-port {
- #phy-cells = <0>;
- };
-
- tcphy0_usb3: usb3-port {
- #phy-cells = <0>;
- };
- };
-
- tcphy1: phy@ff800000 {
- compatible = "rockchip,rk3399-typec-phy";
- reg = <0x0 0xff800000 0x0 0x40000>;
- clocks = <&cru SCLK_UPHY1_TCPDCORE>,
- <&cru SCLK_UPHY1_TCPDPHY_REF>;
- clock-names = "tcpdcore", "tcpdphy-ref";
- assigned-clocks = <&cru SCLK_UPHY1_TCPDCORE>;
- assigned-clock-rates = <50000000>;
- power-domains = <&power RK3399_PD_TCPD1>;
- resets = <&cru SRST_UPHY1>,
- <&cru SRST_UPHY1_PIPE_L00>,
- <&cru SRST_P_UPHY1_TCPHY>;
- reset-names = "uphy", "uphy-pipe", "uphy-tcphy";
- rockchip,grf = <&grf>;
- status = "disabled";
-
- tcphy1_dp: dp-port {
- #phy-cells = <0>;
- };
-
- tcphy1_usb3: usb3-port {
- #phy-cells = <0>;
- };
- };
-
- watchdog@ff848000 {
- compatible = "rockchip,rk3399-wdt", "snps,dw-wdt";
- reg = <0x0 0xff848000 0x0 0x100>;
- clocks = <&cru PCLK_WDT>;
- interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH 0>;
- };
-
- rktimer: rktimer@ff850000 {
- compatible = "rockchip,rk3399-timer";
- reg = <0x0 0xff850000 0x0 0x1000>;
- interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru PCLK_TIMER0>, <&cru SCLK_TIMER00>;
- clock-names = "pclk", "timer";
- };
-
- spdif: spdif@ff870000 {
- compatible = "rockchip,rk3399-spdif";
- reg = <0x0 0xff870000 0x0 0x1000>;
- interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH 0>;
- dmas = <&dmac_bus 7>;
- dma-names = "tx";
- clock-names = "mclk", "hclk";
- clocks = <&cru SCLK_SPDIF_8CH>, <&cru HCLK_SPDIF>;
- pinctrl-names = "default";
- pinctrl-0 = <&spdif_bus>;
- power-domains = <&power RK3399_PD_SDIOAUDIO>;
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- i2s0: i2s@ff880000 {
- compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s";
- reg = <0x0 0xff880000 0x0 0x1000>;
- rockchip,grf = <&grf>;
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH 0>;
- dmas = <&dmac_bus 0>, <&dmac_bus 1>;
- dma-names = "tx", "rx";
- clock-names = "i2s_clk", "i2s_hclk";
- clocks = <&cru SCLK_I2S0_8CH>, <&cru HCLK_I2S0_8CH>;
- pinctrl-names = "bclk_on", "bclk_off";
- pinctrl-0 = <&i2s0_8ch_bus>;
- pinctrl-1 = <&i2s0_8ch_bus_bclk_off>;
- power-domains = <&power RK3399_PD_SDIOAUDIO>;
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- i2s1: i2s@ff890000 {
- compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s";
- reg = <0x0 0xff890000 0x0 0x1000>;
- interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH 0>;
- dmas = <&dmac_bus 2>, <&dmac_bus 3>;
- dma-names = "tx", "rx";
- clock-names = "i2s_clk", "i2s_hclk";
- clocks = <&cru SCLK_I2S1_8CH>, <&cru HCLK_I2S1_8CH>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2s1_2ch_bus>;
- power-domains = <&power RK3399_PD_SDIOAUDIO>;
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- i2s2: i2s@ff8a0000 {
- compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s";
- reg = <0x0 0xff8a0000 0x0 0x1000>;
- interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH 0>;
- dmas = <&dmac_bus 4>, <&dmac_bus 5>;
- dma-names = "tx", "rx";
- clock-names = "i2s_clk", "i2s_hclk";
- clocks = <&cru SCLK_I2S2_8CH>, <&cru HCLK_I2S2_8CH>;
- power-domains = <&power RK3399_PD_SDIOAUDIO>;
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- vopl: vop@ff8f0000 {
- compatible = "rockchip,rk3399-vop-lit";
- reg = <0x0 0xff8f0000 0x0 0x2000>, <0x0 0xff8f2000 0x0 0x400>;
- interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH 0>;
- assigned-clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>;
- assigned-clock-rates = <400000000>, <100000000>;
- clocks = <&cru ACLK_VOP1>, <&cru DCLK_VOP1>, <&cru HCLK_VOP1>;
- clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
- iommus = <&vopl_mmu>;
- power-domains = <&power RK3399_PD_VOPL>;
- resets = <&cru SRST_A_VOP1>, <&cru SRST_H_VOP1>, <&cru SRST_D_VOP1>;
- reset-names = "axi", "ahb", "dclk";
- status = "disabled";
-
- vopl_out: port {
- #address-cells = <1>;
- #size-cells = <0>;
-
- vopl_out_mipi: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&mipi_in_vopl>;
- };
-
- vopl_out_edp: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&edp_in_vopl>;
- };
-
- vopl_out_hdmi: endpoint@2 {
- reg = <2>;
- remote-endpoint = <&hdmi_in_vopl>;
- };
-
- vopl_out_mipi1: endpoint@3 {
- reg = <3>;
- remote-endpoint = <&mipi1_in_vopl>;
- };
-
- vopl_out_dp: endpoint@4 {
- reg = <4>;
- remote-endpoint = <&dp_in_vopl>;
- };
- };
- };
-
- vopl_mmu: iommu@ff8f3f00 {
- compatible = "rockchip,iommu";
- reg = <0x0 0xff8f3f00 0x0 0x100>;
- interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>;
- clock-names = "aclk", "iface";
- power-domains = <&power RK3399_PD_VOPL>;
- #iommu-cells = <0>;
- status = "disabled";
- };
-
- vopb: vop@ff900000 {
- compatible = "rockchip,rk3399-vop-big";
- reg = <0x0 0xff900000 0x0 0x2000>, <0x0 0xff902000 0x0 0x1000>;
- interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH 0>;
- assigned-clocks = <&cru ACLK_VOP0>, <&cru HCLK_VOP0>;
- assigned-clock-rates = <400000000>, <100000000>;
- clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>;
- clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
- iommus = <&vopb_mmu>;
- power-domains = <&power RK3399_PD_VOPB>;
- resets = <&cru SRST_A_VOP0>, <&cru SRST_H_VOP0>, <&cru SRST_D_VOP0>;
- reset-names = "axi", "ahb", "dclk";
- status = "disabled";
-
- vopb_out: port {
- #address-cells = <1>;
- #size-cells = <0>;
-
- vopb_out_edp: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&edp_in_vopb>;
- };
-
- vopb_out_mipi: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&mipi_in_vopb>;
- };
-
- vopb_out_hdmi: endpoint@2 {
- reg = <2>;
- remote-endpoint = <&hdmi_in_vopb>;
- };
-
- vopb_out_mipi1: endpoint@3 {
- reg = <3>;
- remote-endpoint = <&mipi1_in_vopb>;
- };
-
- vopb_out_dp: endpoint@4 {
- reg = <4>;
- remote-endpoint = <&dp_in_vopb>;
- };
- };
- };
-
- vopb_mmu: iommu@ff903f00 {
- compatible = "rockchip,iommu";
- reg = <0x0 0xff903f00 0x0 0x100>;
- interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_VOP0>, <&cru HCLK_VOP0>;
- clock-names = "aclk", "iface";
- power-domains = <&power RK3399_PD_VOPB>;
- #iommu-cells = <0>;
- status = "disabled";
- };
-
- isp0: isp0@ff910000 {
- compatible = "rockchip,rk3399-cif-isp";
- reg = <0x0 0xff910000 0x0 0x4000>;
- interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_ISP0>,
- <&cru ACLK_ISP0_WRAPPER>,
- <&cru HCLK_ISP0_WRAPPER>;
- clock-names = "isp", "aclk", "hclk";
- iommus = <&isp0_mmu>;
- phys = <&mipi_dphy_rx0>;
- phy-names = "dphy";
- power-domains = <&power RK3399_PD_ISP0>;
- status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
- };
- };
-
- isp0_mmu: iommu@ff914000 {
- compatible = "rockchip,iommu";
- reg = <0x0 0xff914000 0x0 0x100>, <0x0 0xff915000 0x0 0x100>;
- interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_ISP0_WRAPPER>, <&cru HCLK_ISP0_WRAPPER>;
- clock-names = "aclk", "iface";
- #iommu-cells = <0>;
- power-domains = <&power RK3399_PD_ISP0>;
- rockchip,disable-mmu-reset;
- };
-
- isp1: isp1@ff920000 {
- compatible = "rockchip,rk3399-cif-isp";
- reg = <0x0 0xff920000 0x0 0x4000>;
- interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_ISP1>,
- <&cru ACLK_ISP1_WRAPPER>,
- <&cru HCLK_ISP1_WRAPPER>;
- clock-names = "isp", "aclk", "hclk";
- iommus = <&isp1_mmu>;
- phys = <&mipi_dsi1>;
- phy-names = "dphy";
- power-domains = <&power RK3399_PD_ISP1>;
- status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
- };
- };
-
- isp1_mmu: iommu@ff924000 {
- compatible = "rockchip,iommu";
- reg = <0x0 0xff924000 0x0 0x100>, <0x0 0xff925000 0x0 0x100>;
- interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru ACLK_ISP1_WRAPPER>, <&cru HCLK_ISP1_WRAPPER>;
- clock-names = "aclk", "iface";
- #iommu-cells = <0>;
- power-domains = <&power RK3399_PD_ISP1>;
- rockchip,disable-mmu-reset;
- };
-
- hdmi_sound: hdmi-sound {
- compatible = "simple-audio-card";
- simple-audio-card,format = "i2s";
- simple-audio-card,mclk-fs = <256>;
- simple-audio-card,name = "hdmi-sound";
- status = "disabled";
-
- simple-audio-card,cpu {
- sound-dai = <&i2s2>;
- };
- simple-audio-card,codec {
- sound-dai = <&hdmi>;
- };
- };
-
- hdmi: hdmi@ff940000 {
- compatible = "rockchip,rk3399-dw-hdmi";
- reg = <0x0 0xff940000 0x0 0x20000>;
- reg-io-width = <4>;
- interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru PCLK_HDMI_CTRL>,
- <&cru SCLK_HDMI_SFR>,
- <&cru SCLK_HDMI_CEC>,
- <&cru PCLK_VIO_GRF>,
- <&cru PLL_VPLL>;
- clock-names = "iahb", "isfr", "cec", "grf", "ref";
- power-domains = <&power RK3399_PD_HDCP>;
- rockchip,grf = <&grf>;
- #sound-dai-cells = <0>;
- status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- hdmi_in: port@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- hdmi_in_vopb: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&vopb_out_hdmi>;
- };
- hdmi_in_vopl: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&vopl_out_hdmi>;
- };
- };
-
- hdmi_out: port@1 {
- reg = <1>;
- };
- };
- };
-
- mipi_dsi: dsi@ff960000 {
- compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi";
- reg = <0x0 0xff960000 0x0 0x8000>;
- interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI0>,
- <&cru SCLK_DPHY_TX0_CFG>, <&cru PCLK_VIO_GRF>;
- clock-names = "ref", "pclk", "phy_cfg", "grf";
- power-domains = <&power RK3399_PD_VIO>;
- resets = <&cru SRST_P_MIPI_DSI0>;
- reset-names = "apb";
- rockchip,grf = <&grf>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- mipi_in: port@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- mipi_in_vopb: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&vopb_out_mipi>;
- };
-
- mipi_in_vopl: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&vopl_out_mipi>;
- };
- };
-
- mipi_out: port@1 {
- reg = <1>;
- };
- };
- };
-
- mipi_dsi1: dsi@ff968000 {
- compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi";
- reg = <0x0 0xff968000 0x0 0x8000>;
- interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI1>,
- <&cru SCLK_DPHY_TX1RX1_CFG>, <&cru PCLK_VIO_GRF>;
- clock-names = "ref", "pclk", "phy_cfg", "grf";
- power-domains = <&power RK3399_PD_VIO>;
- resets = <&cru SRST_P_MIPI_DSI1>;
- reset-names = "apb";
- rockchip,grf = <&grf>;
- #address-cells = <1>;
- #size-cells = <0>;
- #phy-cells = <0>;
- status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- mipi1_in: port@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- mipi1_in_vopb: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&vopb_out_mipi1>;
- };
-
- mipi1_in_vopl: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&vopl_out_mipi1>;
- };
- };
-
- mipi1_out: port@1 {
- reg = <1>;
- };
- };
- };
-
- edp: dp@ff970000 {
- compatible = "rockchip,rk3399-edp";
- reg = <0x0 0xff970000 0x0 0x8000>;
- interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>, <&cru PCLK_VIO_GRF>;
- clock-names = "dp", "pclk", "grf";
- pinctrl-names = "default";
- pinctrl-0 = <&edp_hpd>;
- power-domains = <&power RK3399_PD_EDP>;
- resets = <&cru SRST_P_EDP_CTRL>;
- reset-names = "dp";
- rockchip,grf = <&grf>;
- status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- edp_in: port@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- edp_in_vopb: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&vopb_out_edp>;
- };
-
- edp_in_vopl: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&vopl_out_edp>;
- };
- };
-
- edp_out: port@1 {
- reg = <1>;
- };
- };
- };
-
- gpu: gpu@ff9a0000 {
- compatible = "rockchip,rk3399-mali", "arm,mali-t860";
- reg = <0x0 0xff9a0000 0x0 0x10000>;
- interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "job", "mmu", "gpu";
- clocks = <&cru ACLK_GPU>;
- #cooling-cells = <2>;
- dynamic-power-coefficient = <2640>;
- power-domains = <&power RK3399_PD_GPU>;
- status = "disabled";
- };
-
- pinctrl: pinctrl {
- compatible = "rockchip,rk3399-pinctrl";
- rockchip,grf = <&grf>;
- rockchip,pmu = <&pmugrf>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- gpio0: gpio@ff720000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xff720000 0x0 0x100>;
- clocks = <&pmucru PCLK_GPIO0_PMU>;
- interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH 0>;
-
- gpio-controller;
- #gpio-cells = <0x2>;
-
- interrupt-controller;
- #interrupt-cells = <0x2>;
- };
-
- gpio1: gpio@ff730000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xff730000 0x0 0x100>;
- clocks = <&pmucru PCLK_GPIO1_PMU>;
- interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH 0>;
-
- gpio-controller;
- #gpio-cells = <0x2>;
-
- interrupt-controller;
- #interrupt-cells = <0x2>;
- };
-
- gpio2: gpio@ff780000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xff780000 0x0 0x100>;
- clocks = <&cru PCLK_GPIO2>;
- interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH 0>;
-
- gpio-controller;
- #gpio-cells = <0x2>;
-
- interrupt-controller;
- #interrupt-cells = <0x2>;
- };
-
- gpio3: gpio@ff788000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xff788000 0x0 0x100>;
- clocks = <&cru PCLK_GPIO3>;
- interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>;
-
- gpio-controller;
- #gpio-cells = <0x2>;
-
- interrupt-controller;
- #interrupt-cells = <0x2>;
- };
-
- gpio4: gpio@ff790000 {
- compatible = "rockchip,gpio-bank";
- reg = <0x0 0xff790000 0x0 0x100>;
- clocks = <&cru PCLK_GPIO4>;
- interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH 0>;
-
- gpio-controller;
- #gpio-cells = <0x2>;
-
- interrupt-controller;
- #interrupt-cells = <0x2>;
- };
-
- pcfg_pull_up: pcfg-pull-up {
- bias-pull-up;
- };
-
- pcfg_pull_down: pcfg-pull-down {
- bias-pull-down;
- };
-
- pcfg_pull_none: pcfg-pull-none {
- bias-disable;
- };
-
- pcfg_pull_none_12ma: pcfg-pull-none-12ma {
- bias-disable;
- drive-strength = <12>;
- };
-
- pcfg_pull_none_13ma: pcfg-pull-none-13ma {
- bias-disable;
- drive-strength = <13>;
- };
-
- pcfg_pull_none_18ma: pcfg-pull-none-18ma {
- bias-disable;
- drive-strength = <18>;
- };
-
- pcfg_pull_none_20ma: pcfg-pull-none-20ma {
- bias-disable;
- drive-strength = <20>;
- };
-
- pcfg_pull_up_2ma: pcfg-pull-up-2ma {
- bias-pull-up;
- drive-strength = <2>;
- };
-
- pcfg_pull_up_8ma: pcfg-pull-up-8ma {
- bias-pull-up;
- drive-strength = <8>;
- };
-
- pcfg_pull_up_18ma: pcfg-pull-up-18ma {
- bias-pull-up;
- drive-strength = <18>;
- };
-
- pcfg_pull_up_20ma: pcfg-pull-up-20ma {
- bias-pull-up;
- drive-strength = <20>;
- };
-
- pcfg_pull_down_4ma: pcfg-pull-down-4ma {
- bias-pull-down;
- drive-strength = <4>;
- };
-
- pcfg_pull_down_8ma: pcfg-pull-down-8ma {
- bias-pull-down;
- drive-strength = <8>;
- };
-
- pcfg_pull_down_12ma: pcfg-pull-down-12ma {
- bias-pull-down;
- drive-strength = <12>;
- };
-
- pcfg_pull_down_18ma: pcfg-pull-down-18ma {
- bias-pull-down;
- drive-strength = <18>;
- };
-
- pcfg_pull_down_20ma: pcfg-pull-down-20ma {
- bias-pull-down;
- drive-strength = <20>;
- };
-
- pcfg_output_high: pcfg-output-high {
- output-high;
- };
-
- pcfg_output_low: pcfg-output-low {
- output-low;
- };
-
- pcfg_input_enable: pcfg-input-enable {
- input-enable;
- };
-
- pcfg_input_pull_up: pcfg-input-pull-up {
- input-enable;
- bias-pull-up;
- };
-
- pcfg_input_pull_down: pcfg-input-pull-down {
- input-enable;
- bias-pull-down;
- };
-
- clock {
- clk_32k: clk-32k {
- rockchip,pins = <0 RK_PA0 2 &pcfg_pull_none>;
- };
- };
-
- cif {
- cif_clkin: cif-clkin {
- rockchip,pins =
- <2 RK_PB2 3 &pcfg_pull_none>;
- };
-
- cif_clkouta: cif-clkouta {
- rockchip,pins =
- <2 RK_PB3 3 &pcfg_pull_none>;
- };
- };
-
- edp {
- edp_hpd: edp-hpd {
- rockchip,pins =
- <4 RK_PC7 2 &pcfg_pull_none>;
- };
- };
-
- gmac {
- rgmii_pins: rgmii-pins {
- rockchip,pins =
- /* mac_txclk */
- <3 RK_PC1 1 &pcfg_pull_none_13ma>,
- /* mac_rxclk */
- <3 RK_PB6 1 &pcfg_pull_none>,
- /* mac_mdio */
- <3 RK_PB5 1 &pcfg_pull_none>,
- /* mac_txen */
- <3 RK_PB4 1 &pcfg_pull_none_13ma>,
- /* mac_clk */
- <3 RK_PB3 1 &pcfg_pull_none>,
- /* mac_rxdv */
- <3 RK_PB1 1 &pcfg_pull_none>,
- /* mac_mdc */
- <3 RK_PB0 1 &pcfg_pull_none>,
- /* mac_rxd1 */
- <3 RK_PA7 1 &pcfg_pull_none>,
- /* mac_rxd0 */
- <3 RK_PA6 1 &pcfg_pull_none>,
- /* mac_txd1 */
- <3 RK_PA5 1 &pcfg_pull_none_13ma>,
- /* mac_txd0 */
- <3 RK_PA4 1 &pcfg_pull_none_13ma>,
- /* mac_rxd3 */
- <3 RK_PA3 1 &pcfg_pull_none>,
- /* mac_rxd2 */
- <3 RK_PA2 1 &pcfg_pull_none>,
- /* mac_txd3 */
- <3 RK_PA1 1 &pcfg_pull_none_13ma>,
- /* mac_txd2 */
- <3 RK_PA0 1 &pcfg_pull_none_13ma>;
- };
-
- rmii_pins: rmii-pins {
- rockchip,pins =
- /* mac_mdio */
- <3 RK_PB5 1 &pcfg_pull_none>,
- /* mac_txen */
- <3 RK_PB4 1 &pcfg_pull_none_13ma>,
- /* mac_clk */
- <3 RK_PB3 1 &pcfg_pull_none>,
- /* mac_rxer */
- <3 RK_PB2 1 &pcfg_pull_none>,
- /* mac_rxdv */
- <3 RK_PB1 1 &pcfg_pull_none>,
- /* mac_mdc */
- <3 RK_PB0 1 &pcfg_pull_none>,
- /* mac_rxd1 */
- <3 RK_PA7 1 &pcfg_pull_none>,
- /* mac_rxd0 */
- <3 RK_PA6 1 &pcfg_pull_none>,
- /* mac_txd1 */
- <3 RK_PA5 1 &pcfg_pull_none_13ma>,
- /* mac_txd0 */
- <3 RK_PA4 1 &pcfg_pull_none_13ma>;
- };
- };
-
- i2c0 {
- i2c0_xfer: i2c0-xfer {
- rockchip,pins =
- <1 RK_PB7 2 &pcfg_pull_none>,
- <1 RK_PC0 2 &pcfg_pull_none>;
- };
- };
-
- i2c1 {
- i2c1_xfer: i2c1-xfer {
- rockchip,pins =
- <4 RK_PA2 1 &pcfg_pull_none>,
- <4 RK_PA1 1 &pcfg_pull_none>;
- };
- };
-
- i2c2 {
- i2c2_xfer: i2c2-xfer {
- rockchip,pins =
- <2 RK_PA1 2 &pcfg_pull_none_12ma>,
- <2 RK_PA0 2 &pcfg_pull_none_12ma>;
- };
- };
-
- i2c3 {
- i2c3_xfer: i2c3-xfer {
- rockchip,pins =
- <4 RK_PC1 1 &pcfg_pull_none>,
- <4 RK_PC0 1 &pcfg_pull_none>;
- };
- };
-
- i2c4 {
- i2c4_xfer: i2c4-xfer {
- rockchip,pins =
- <1 RK_PB4 1 &pcfg_pull_none>,
- <1 RK_PB3 1 &pcfg_pull_none>;
- };
- };
-
- i2c5 {
- i2c5_xfer: i2c5-xfer {
- rockchip,pins =
- <3 RK_PB3 2 &pcfg_pull_none>,
- <3 RK_PB2 2 &pcfg_pull_none>;
- };
- };
-
- i2c6 {
- i2c6_xfer: i2c6-xfer {
- rockchip,pins =
- <2 RK_PB2 2 &pcfg_pull_none>,
- <2 RK_PB1 2 &pcfg_pull_none>;
- };
- };
-
- i2c7 {
- i2c7_xfer: i2c7-xfer {
- rockchip,pins =
- <2 RK_PB0 2 &pcfg_pull_none>,
- <2 RK_PA7 2 &pcfg_pull_none>;
- };
- };
-
- i2c8 {
- i2c8_xfer: i2c8-xfer {
- rockchip,pins =
- <1 RK_PC5 1 &pcfg_pull_none>,
- <1 RK_PC4 1 &pcfg_pull_none>;
- };
- };
-
- i2s0 {
- i2s0_2ch_bus: i2s0-2ch-bus {
- rockchip,pins =
- <3 RK_PD0 1 &pcfg_pull_none>,
- <3 RK_PD1 1 &pcfg_pull_none>,
- <3 RK_PD2 1 &pcfg_pull_none>,
- <3 RK_PD3 1 &pcfg_pull_none>,
- <3 RK_PD7 1 &pcfg_pull_none>,
- <4 RK_PA0 1 &pcfg_pull_none>;
- };
-
- i2s0_2ch_bus_bclk_off: i2s0-2ch-bus-bclk-off {
- rockchip,pins =
- <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
- <3 RK_PD1 1 &pcfg_pull_none>,
- <3 RK_PD2 1 &pcfg_pull_none>,
- <3 RK_PD3 1 &pcfg_pull_none>,
- <3 RK_PD7 1 &pcfg_pull_none>,
- <4 RK_PA0 1 &pcfg_pull_none>;
- };
-
- i2s0_8ch_bus: i2s0-8ch-bus {
- rockchip,pins =
- <3 RK_PD0 1 &pcfg_pull_none>,
- <3 RK_PD1 1 &pcfg_pull_none>,
- <3 RK_PD2 1 &pcfg_pull_none>,
- <3 RK_PD3 1 &pcfg_pull_none>,
- <3 RK_PD4 1 &pcfg_pull_none>,
- <3 RK_PD5 1 &pcfg_pull_none>,
- <3 RK_PD6 1 &pcfg_pull_none>,
- <3 RK_PD7 1 &pcfg_pull_none>,
- <4 RK_PA0 1 &pcfg_pull_none>;
- };
-
- i2s0_8ch_bus_bclk_off: i2s0-8ch-bus-bclk-off {
- rockchip,pins =
- <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
- <3 RK_PD1 1 &pcfg_pull_none>,
- <3 RK_PD2 1 &pcfg_pull_none>,
- <3 RK_PD3 1 &pcfg_pull_none>,
- <3 RK_PD4 1 &pcfg_pull_none>,
- <3 RK_PD5 1 &pcfg_pull_none>,
- <3 RK_PD6 1 &pcfg_pull_none>,
- <3 RK_PD7 1 &pcfg_pull_none>,
- <4 RK_PA0 1 &pcfg_pull_none>;
- };
- };
-
- i2s1 {
- i2s1_2ch_bus: i2s1-2ch-bus {
- rockchip,pins =
- <4 RK_PA3 1 &pcfg_pull_none>,
- <4 RK_PA4 1 &pcfg_pull_none>,
- <4 RK_PA5 1 &pcfg_pull_none>,
- <4 RK_PA6 1 &pcfg_pull_none>,
- <4 RK_PA7 1 &pcfg_pull_none>;
- };
-
- i2s1_2ch_bus_bclk_off: i2s1-2ch-bus-bclk-off {
- rockchip,pins =
- <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>,
- <4 RK_PA4 1 &pcfg_pull_none>,
- <4 RK_PA5 1 &pcfg_pull_none>,
- <4 RK_PA6 1 &pcfg_pull_none>,
- <4 RK_PA7 1 &pcfg_pull_none>;
- };
- };
-
- sdio0 {
- sdio0_bus1: sdio0-bus1 {
- rockchip,pins =
- <2 RK_PC4 1 &pcfg_pull_up>;
- };
-
- sdio0_bus4: sdio0-bus4 {
- rockchip,pins =
- <2 RK_PC4 1 &pcfg_pull_up>,
- <2 RK_PC5 1 &pcfg_pull_up>,
- <2 RK_PC6 1 &pcfg_pull_up>,
- <2 RK_PC7 1 &pcfg_pull_up>;
- };
-
- sdio0_cmd: sdio0-cmd {
- rockchip,pins =
- <2 RK_PD0 1 &pcfg_pull_up>;
- };
-
- sdio0_clk: sdio0-clk {
- rockchip,pins =
- <2 RK_PD1 1 &pcfg_pull_none>;
- };
-
- sdio0_cd: sdio0-cd {
- rockchip,pins =
- <2 RK_PD2 1 &pcfg_pull_up>;
- };
-
- sdio0_pwr: sdio0-pwr {
- rockchip,pins =
- <2 RK_PD3 1 &pcfg_pull_up>;
- };
-
- sdio0_bkpwr: sdio0-bkpwr {
- rockchip,pins =
- <2 RK_PD4 1 &pcfg_pull_up>;
- };
-
- sdio0_wp: sdio0-wp {
- rockchip,pins =
- <0 RK_PA3 1 &pcfg_pull_up>;
- };
-
- sdio0_int: sdio0-int {
- rockchip,pins =
- <0 RK_PA4 1 &pcfg_pull_up>;
- };
- };
-
- sdmmc {
- sdmmc_bus1: sdmmc-bus1 {
- rockchip,pins =
- <4 RK_PB0 1 &pcfg_pull_up>;
- };
-
- sdmmc_bus4: sdmmc-bus4 {
- rockchip,pins =
- <4 RK_PB0 1 &pcfg_pull_up>,
- <4 RK_PB1 1 &pcfg_pull_up>,
- <4 RK_PB2 1 &pcfg_pull_up>,
- <4 RK_PB3 1 &pcfg_pull_up>;
- };
-
- sdmmc_clk: sdmmc-clk {
- rockchip,pins =
- <4 RK_PB4 1 &pcfg_pull_none>;
- };
-
- sdmmc_cmd: sdmmc-cmd {
- rockchip,pins =
- <4 RK_PB5 1 &pcfg_pull_up>;
- };
-
- sdmmc_cd: sdmmc-cd {
- rockchip,pins =
- <0 RK_PA7 1 &pcfg_pull_up>;
- };
-
- sdmmc_wp: sdmmc-wp {
- rockchip,pins =
- <0 RK_PB0 1 &pcfg_pull_up>;
- };
- };
-
- suspend {
- ap_pwroff: ap-pwroff {
- rockchip,pins = <1 RK_PA5 1 &pcfg_pull_none>;
- };
-
- ddrio_pwroff: ddrio-pwroff {
- rockchip,pins = <0 RK_PA1 1 &pcfg_pull_none>;
- };
- };
-
- spdif {
- spdif_bus: spdif-bus {
- rockchip,pins =
- <4 RK_PC5 1 &pcfg_pull_none>;
- };
-
- spdif_bus_1: spdif-bus-1 {
- rockchip,pins =
- <3 RK_PC0 3 &pcfg_pull_none>;
- };
- };
-
- spi0 {
- spi0_clk: spi0-clk {
- rockchip,pins =
- <3 RK_PA6 2 &pcfg_pull_up>;
- };
- spi0_cs0: spi0-cs0 {
- rockchip,pins =
- <3 RK_PA7 2 &pcfg_pull_up>;
- };
- spi0_cs1: spi0-cs1 {
- rockchip,pins =
- <3 RK_PB0 2 &pcfg_pull_up>;
- };
- spi0_tx: spi0-tx {
- rockchip,pins =
- <3 RK_PA5 2 &pcfg_pull_up>;
- };
- spi0_rx: spi0-rx {
- rockchip,pins =
- <3 RK_PA4 2 &pcfg_pull_up>;
- };
- };
-
- spi1 {
- spi1_clk: spi1-clk {
- rockchip,pins =
- <1 RK_PB1 2 &pcfg_pull_up>;
- };
- spi1_cs0: spi1-cs0 {
- rockchip,pins =
- <1 RK_PB2 2 &pcfg_pull_up>;
- };
- spi1_rx: spi1-rx {
- rockchip,pins =
- <1 RK_PA7 2 &pcfg_pull_up>;
- };
- spi1_tx: spi1-tx {
- rockchip,pins =
- <1 RK_PB0 2 &pcfg_pull_up>;
- };
- };
-
- spi2 {
- spi2_clk: spi2-clk {
- rockchip,pins =
- <2 RK_PB3 1 &pcfg_pull_up>;
- };
- spi2_cs0: spi2-cs0 {
- rockchip,pins =
- <2 RK_PB4 1 &pcfg_pull_up>;
- };
- spi2_rx: spi2-rx {
- rockchip,pins =
- <2 RK_PB1 1 &pcfg_pull_up>;
- };
- spi2_tx: spi2-tx {
- rockchip,pins =
- <2 RK_PB2 1 &pcfg_pull_up>;
- };
- };
-
- spi3 {
- spi3_clk: spi3-clk {
- rockchip,pins =
- <1 RK_PC1 1 &pcfg_pull_up>;
- };
- spi3_cs0: spi3-cs0 {
- rockchip,pins =
- <1 RK_PC2 1 &pcfg_pull_up>;
- };
- spi3_rx: spi3-rx {
- rockchip,pins =
- <1 RK_PB7 1 &pcfg_pull_up>;
- };
- spi3_tx: spi3-tx {
- rockchip,pins =
- <1 RK_PC0 1 &pcfg_pull_up>;
- };
- };
-
- spi4 {
- spi4_clk: spi4-clk {
- rockchip,pins =
- <3 RK_PA2 2 &pcfg_pull_up>;
- };
- spi4_cs0: spi4-cs0 {
- rockchip,pins =
- <3 RK_PA3 2 &pcfg_pull_up>;
- };
- spi4_rx: spi4-rx {
- rockchip,pins =
- <3 RK_PA0 2 &pcfg_pull_up>;
- };
- spi4_tx: spi4-tx {
- rockchip,pins =
- <3 RK_PA1 2 &pcfg_pull_up>;
- };
- };
-
- spi5 {
- spi5_clk: spi5-clk {
- rockchip,pins =
- <2 RK_PC6 2 &pcfg_pull_up>;
- };
- spi5_cs0: spi5-cs0 {
- rockchip,pins =
- <2 RK_PC7 2 &pcfg_pull_up>;
- };
- spi5_rx: spi5-rx {
- rockchip,pins =
- <2 RK_PC4 2 &pcfg_pull_up>;
- };
- spi5_tx: spi5-tx {
- rockchip,pins =
- <2 RK_PC5 2 &pcfg_pull_up>;
- };
- };
-
- testclk {
- test_clkout0: test-clkout0 {
- rockchip,pins =
- <0 RK_PA0 1 &pcfg_pull_none>;
- };
-
- test_clkout1: test-clkout1 {
- rockchip,pins =
- <2 RK_PD1 2 &pcfg_pull_none>;
- };
-
- test_clkout2: test-clkout2 {
- rockchip,pins =
- <0 RK_PB0 3 &pcfg_pull_none>;
- };
- };
-
- tsadc {
- otp_pin: otp-pin {
- rockchip,pins = <1 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-
- otp_out: otp-out {
- rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none>;
- };
- };
-
- uart0 {
- uart0_xfer: uart0-xfer {
- rockchip,pins =
- <2 RK_PC0 1 &pcfg_pull_up>,
- <2 RK_PC1 1 &pcfg_pull_none>;
- };
-
- uart0_cts: uart0-cts {
- rockchip,pins =
- <2 RK_PC2 1 &pcfg_pull_none>;
- };
-
- uart0_rts: uart0-rts {
- rockchip,pins =
- <2 RK_PC3 1 &pcfg_pull_none>;
- };
- };
-
- uart1 {
- uart1_xfer: uart1-xfer {
- rockchip,pins =
- <3 RK_PB4 2 &pcfg_pull_up>,
- <3 RK_PB5 2 &pcfg_pull_none>;
- };
- };
-
- uart2a {
- uart2a_xfer: uart2a-xfer {
- rockchip,pins =
- <4 RK_PB0 2 &pcfg_pull_up>,
- <4 RK_PB1 2 &pcfg_pull_none>;
- };
- };
-
- uart2b {
- uart2b_xfer: uart2b-xfer {
- rockchip,pins =
- <4 RK_PC0 2 &pcfg_pull_up>,
- <4 RK_PC1 2 &pcfg_pull_none>;
- };
- };
-
- uart2c {
- uart2c_xfer: uart2c-xfer {
- rockchip,pins =
- <4 RK_PC3 1 &pcfg_pull_up>,
- <4 RK_PC4 1 &pcfg_pull_none>;
- };
- };
-
- uart3 {
- uart3_xfer: uart3-xfer {
- rockchip,pins =
- <3 RK_PB6 2 &pcfg_pull_up>,
- <3 RK_PB7 2 &pcfg_pull_none>;
- };
-
- uart3_cts: uart3-cts {
- rockchip,pins =
- <3 RK_PC0 2 &pcfg_pull_none>;
- };
-
- uart3_rts: uart3-rts {
- rockchip,pins =
- <3 RK_PC1 2 &pcfg_pull_none>;
- };
- };
-
- uart4 {
- uart4_xfer: uart4-xfer {
- rockchip,pins =
- <1 RK_PA7 1 &pcfg_pull_up>,
- <1 RK_PB0 1 &pcfg_pull_none>;
- };
- };
-
- uarthdcp {
- uarthdcp_xfer: uarthdcp-xfer {
- rockchip,pins =
- <4 RK_PC5 2 &pcfg_pull_up>,
- <4 RK_PC6 2 &pcfg_pull_none>;
- };
- };
-
- pwm0 {
- pwm0_pin: pwm0-pin {
- rockchip,pins =
- <4 RK_PC2 1 &pcfg_pull_none>;
- };
-
- pwm0_pin_pull_down: pwm0-pin-pull-down {
- rockchip,pins =
- <4 RK_PC2 1 &pcfg_pull_down>;
- };
-
- vop0_pwm_pin: vop0-pwm-pin {
- rockchip,pins =
- <4 RK_PC2 2 &pcfg_pull_none>;
- };
-
- vop1_pwm_pin: vop1-pwm-pin {
- rockchip,pins =
- <4 RK_PC2 3 &pcfg_pull_none>;
- };
- };
-
- pwm1 {
- pwm1_pin: pwm1-pin {
- rockchip,pins =
- <4 RK_PC6 1 &pcfg_pull_none>;
- };
-
- pwm1_pin_pull_down: pwm1-pin-pull-down {
- rockchip,pins =
- <4 RK_PC6 1 &pcfg_pull_down>;
- };
- };
-
- pwm2 {
- pwm2_pin: pwm2-pin {
- rockchip,pins =
- <1 RK_PC3 1 &pcfg_pull_none>;
- };
-
- pwm2_pin_pull_down: pwm2-pin-pull-down {
- rockchip,pins =
- <1 RK_PC3 1 &pcfg_pull_down>;
- };
- };
-
- pwm3a {
- pwm3a_pin: pwm3a-pin {
- rockchip,pins =
- <0 RK_PA6 1 &pcfg_pull_none>;
- };
- };
+&cpu_l0 {
+ operating-points-v2 = <&cluster0_opp>;
+};
- pwm3b {
- pwm3b_pin: pwm3b-pin {
- rockchip,pins =
- <1 RK_PB6 1 &pcfg_pull_none>;
- };
- };
+&cpu_l1 {
+ operating-points-v2 = <&cluster0_opp>;
+};
- hdmi {
- hdmi_i2c_xfer: hdmi-i2c-xfer {
- rockchip,pins =
- <4 RK_PC1 3 &pcfg_pull_none>,
- <4 RK_PC0 3 &pcfg_pull_none>;
- };
+&cpu_l2 {
+ operating-points-v2 = <&cluster0_opp>;
+};
- hdmi_cec: hdmi-cec {
- rockchip,pins =
- <4 RK_PC7 1 &pcfg_pull_none>;
- };
- };
+&cpu_l3 {
+ operating-points-v2 = <&cluster0_opp>;
+};
- pcie {
- pcie_clkreqn_cpm: pci-clkreqn-cpm {
- rockchip,pins =
- <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
- };
+&cpu_b0 {
+ operating-points-v2 = <&cluster1_opp>;
+};
- pcie_clkreqnb_cpm: pci-clkreqnb-cpm {
- rockchip,pins =
- <4 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
+&cpu_b1 {
+ operating-points-v2 = <&cluster1_opp>;
+};
- };
+&gpu {
+ operating-points-v2 = <&gpu_opp_table>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts b/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts
index c58fb7658d7a..d3c628218ce3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts
@@ -7,7 +7,6 @@
/dts-v1/;
#include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
#include <arm/rockchip/rockchip-radxa-dalang-carrier.dtsi>
#include "rk3399pro-vmarc-som.dtsi"
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-lckfb-tspi.dts b/arch/arm64/boot/dts/rockchip/rk3566-lckfb-tspi.dts
new file mode 100644
index 000000000000..7cd91f8000cb
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3566-lckfb-tspi.dts
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include <dt-bindings/soc/rockchip,boot-mode.h>
+#include "rk3566.dtsi"
+
+/ {
+ model = "LCKFB Taishan Pi RK3566";
+ compatible = "lckfb,tspi-rk3566", "rockchip,rk3566";
+
+ aliases {
+ mmc0 = &sdmmc0;
+ mmc1 = &sdhci;
+ mmc2 = &sdmmc1;
+ };
+
+ chosen: chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ramoops: ramoops@110000 {
+ compatible = "ramoops";
+ reg = <0 0x110000 0 0xf0000>;
+ console-size = <0x80000>;
+ ftrace-size = <0x00000>;
+ pmsg-size = <0x50000>;
+ record-size = <0x20000>;
+ };
+ };
+
+ adc_keys: adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-recovery {
+ label = "recovery";
+ linux,code = <KEY_RESTART>;
+ press-threshold-microvolt = <108>;
+ };
+ };
+
+ hdmi_con: hdmi-con {
+ compatible = "hdmi-connector";
+ type = "d";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
+ leds: leds {
+ compatible = "gpio-leds";
+
+ rgb_led_r: rgb-led-r {
+ color = <LED_COLOR_ID_RED>;
+ gpios = <&gpio1 RK_PB2 GPIO_ACTIVE_LOW>;
+ label = "status-red";
+ };
+
+ rgb_led_g: rgb-led-g {
+ gpios = <&gpio1 RK_PB1 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_GREEN>;
+ label = "status-green";
+ };
+
+ rgb_led_b: rgb-led-b {
+ gpios = <&gpio1 RK_PB0 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_BLUE>;
+ label = "status-blue";
+ };
+ };
+
+ multi_leds: multi-led {
+ compatible = "leds-group-multicolor";
+ color = <LED_COLOR_ID_RGB>;
+ label = "status-rgb";
+ function = LED_FUNCTION_INDICATOR;
+ leds = <&rgb_led_r>, <&rgb_led_g>, <&rgb_led_b>;
+ };
+
+ vcc12v0_dcin: regulator-12v0-dcin {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v0_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc3v3_sys: regulator-3v3-vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_sys: regulator-5v0-vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v0_dcin>;
+ };
+
+ vcc5v0_host: regulator-5v0-vcc-host {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 RK_PC4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
+ regulator-name = "vcc5v0_host";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk809 1>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_enable_h>;
+ post-power-on-delay-ms = <200>;
+ reset-gpios = <&gpio2 RK_PB1 GPIO_ACTIVE_LOW>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,name = "Analog RK809";
+ simple-audio-card,mclk-fs = <256>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s1_8ch>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&rk809>;
+ };
+ };
+};
+
+&combphy1 {
+ status = "okay";
+};
+
+&combphy2 {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
+&hdmi {
+ avdd-0v9-supply = <&vdda0v9_image>;
+ avdd-1v8-supply = <&vcca1v8_image>;
+ status = "okay";
+};
+
+&hdmi_in {
+ hdmi_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi>;
+ };
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdmi_sound {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ vdd_cpu: regulator@1c {
+ compatible = "tcs,tcs4525";
+ reg = <0x1c>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ rk809: pmic@20 {
+ compatible = "rockchip,rk809";
+ reg = <0x20>;
+ assigned-clocks = <&cru I2S1_MCLKOUT_TX>;
+ assigned-clock-parents = <&cru CLK_I2S1_8CH_TX>;
+ #clock-cells = <1>;
+ clock-output-names = "rk808-clkout1", "rk808-clkout2";
+ clock-names = "mclk";
+ clocks = <&cru I2S1_MCLKOUT_TX>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int>, <&i2s1m0_mclk>;
+ rockchip,system-power-controller;
+ #sound-dai-cells = <0>;
+ wakeup-source;
+
+ vcc1-supply = <&vcc3v3_sys>;
+ vcc2-supply = <&vcc3v3_sys>;
+ vcc3-supply = <&vcc3v3_sys>;
+ vcc4-supply = <&vcc3v3_sys>;
+ vcc5-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc3v3_sys>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc3v3_sys>;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-name = "vdd_logic";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-initial-mode = <0x2>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-name = "vdd_gpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-initial-mode = <0x2>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_npu: DCDC_REG4 {
+ regulator-name = "vdd_npu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-initial-mode = <0x2>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_image: LDO_REG1 {
+ regulator-name = "vdda0v9_image";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v9: LDO_REG2 {
+ regulator-name = "vdda_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_pmu: LDO_REG3 {
+ regulator-name = "vdda0v9_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vccio_acodec: LDO_REG4 {
+ regulator-name = "vccio_acodec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG6 {
+ regulator-name = "vcc3v3_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcca_1v8: LDO_REG7 {
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pmu: LDO_REG8 {
+ regulator-name = "vcca1v8_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcca1v8_image: LDO_REG9 {
+ regulator-name = "vcca1v8_image";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8: DCDC_REG5 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3: SWITCH_REG1 {
+ regulator-name = "vcc_3v3";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_sd: SWITCH_REG2 {
+ regulator-name = "vcc3v3_sd";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+
+ codec {
+ rockchip,mic-in-differential;
+ };
+ };
+};
+
+&i2c1 {
+ status = "okay";
+ /* Touch Screen */
+};
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4m0_xfer>;
+ status = "okay";
+ /* Camera */
+};
+
+&i2s0_8ch {
+ status = "okay";
+ /* HDMI */
+};
+
+&i2s1_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>;
+ rockchip,trcm-sync-tx-only;
+ status = "okay";
+ /* PMIC */
+};
+
+&i2s2_2ch {
+ rockchip,trcm-sync-tx-only;
+ status = "okay";
+ /* AP6212 Bluetooth */
+};
+
+&pinctrl {
+ bt {
+ bt_enable_h: bt-enable-h {
+ rockchip,pins = <2 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_host_wake_l: bt-host-wake-l {
+ rockchip,pins = <2 RK_PC0 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ bt_wake_l: bt-wake-l {
+ rockchip,pins = <2 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ hp-detect {
+ hp_det: hp-det {
+ rockchip,pins = <4 RK_PC6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ pmic {
+ pmic_int: pmic-int {
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ sdio-pwrseq {
+ wifi_enable_h: wifi-enable-h {
+ rockchip,pins = <2 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ wifi_host_wake_h: wifi-host-wake-l {
+ rockchip,pins = <2 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb2 {
+ vcc5v0_host_en: vcc5v0-host-en {
+ rockchip,pins = <4 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc3v3_pmu>;
+ pmuio2-supply = <&vcc3v3_pmu>;
+ vccio1-supply = <&vccio_acodec>;
+ vccio2-supply = <&vcc_1v8>;
+ vccio3-supply = <&vccio_sd>;
+ vccio4-supply = <&vcc_1v8>;
+ vccio5-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_1v8>;
+ vccio7-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&pmugrf {
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x200>;
+ mode-normal = <BOOT_NORMAL>;
+ mode-loader = <BOOT_BL_DOWNLOAD>;
+ mode-recovery = <BOOT_RECOVERY>;
+ mode-bootloader = <BOOT_FASTBOOT>;
+ };
+};
+
+&saradc {
+ vref-supply = <&vcca_1v8>;
+ status = "okay";
+ /* Channel 0: Recovery Button */
+ /* Channel 1: Hardware ID */
+};
+
+&sdhci {
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe &emmc_rstnout>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sdmmc0 {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+ max-frequency = <150000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc3v3_sd>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sdmmc1 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ disable-wp;
+ keep-power-in-suspend;
+ max-frequency = <150000000>;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_cmd &sdmmc1_clk>;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc3v3_sys>;
+ vqmmc-supply = <&vcc_1v8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ brcmf: wifi@1 {
+ compatible = "brcm,bcm4329-fmac";
+ reg = <1>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <RK_PB2 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_host_wake_h>;
+ };
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <0>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth: bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ clocks = <&rk809 1>;
+ clock-names = "lpo";
+ max-speed = <3000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_host_wake_l &bt_wake_l &bt_enable_h>;
+ shutdown-gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>;
+ vbat-supply = <&vcc3v3_sys>;
+ vddio-supply = <&vcc_1v8>;
+ };
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb2phy0 {
+ status = "okay";
+};
+
+&usb2phy0_host {
+ phy-supply = <&vcc5v0_sys>;
+ status = "okay";
+};
+
+&usb2phy0_otg {
+ phy-supply = <&vcc5v0_sys>;
+ status = "okay";
+};
+
+&usb2phy1 {
+ status = "okay";
+};
+
+&usb2phy1_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&usb2phy1_otg {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&vop {
+ assigned-clocks = <&cru DCLK_VOP0>, <&cru DCLK_VOP1>;
+ assigned-clock-parents = <&pmucru PLL_HPLL>, <&cru PLL_VPLL>;
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-odroid-m1s.dts b/arch/arm64/boot/dts/rockchip/rk3566-odroid-m1s.dts
new file mode 100644
index 000000000000..33bc5249d729
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3566-odroid-m1s.dts
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "rk3566.dtsi"
+
+/ {
+ model = "Hardkernel ODROID-M1S";
+ compatible = "hardkernel,odroid-m1s", "rockchip,rk3566";
+
+ aliases {
+ ethernet0 = &gmac1;
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc0;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ hdmi-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwr_led>, <&sys_led>;
+
+ led_pwr: led-0 {
+ color = <LED_COLOR_ID_RED>;
+ default-state = "on";
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio0 RK_PC6 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "default-on";
+ };
+
+ led_sys: led-1 {
+ color = <LED_COLOR_ID_BLUE>;
+ default-state = "on";
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio0 RK_PB7 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ vcc3v3_lcd: regulator-3v3-vcc-lcd {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PC7 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcd_pwren>;
+ regulator-name = "vcc3v3_lcd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ vcc3v3_pcie: regulator-3v3-vcc-pcie {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio2 RK_PC2 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pwren>;
+ regulator-name = "vcc3v3_pcie";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ vcc3v3_sys: regulator-3v3-vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_sys: regulator-5v0-vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc5v0_usb2_host: regulator-5v0-vcc-usb2-host {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio3 RK_PB0 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_host_pwren>;
+ regulator-name = "vcc5v0_usb2_host";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_usb2_otg: regulator-5v0-vcc-usb2-otg {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_otg_pwren>;
+ regulator-name = "vcc5v0_usb2_otg";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_usb3_host: regulator-5v0-vcc-usb3-host {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_host_pwren>;
+ regulator-name = "vcc5v0_usb3_host";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,name = "Analog RK809";
+ simple-audio-card,mclk-fs = <256>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s1_8ch>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&rk809>;
+ };
+ };
+};
+
+&combphy1 {
+ status = "okay";
+};
+
+&combphy2 {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&gmac1 {
+ assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>;
+ assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru CLK_MAC1_2TOP>;
+ clock_in_out = "input";
+ phy-handle = <&rgmii_phy1>;
+ phy-mode = "rgmii-id";
+ phy-supply = <&vcc_3v3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1m1_miim
+ &gmac1m1_tx_bus2
+ &gmac1m1_rx_bus2
+ &gmac1m1_rgmii_clk
+ &gmac1m1_rgmii_bus
+ &gmac1m1_clkinout>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
+&hdmi {
+ avdd-0v9-supply = <&vdda0v9_image>;
+ avdd-1v8-supply = <&vcca1v8_image>;
+ status = "okay";
+};
+
+&hdmi_in {
+ hdmi_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi>;
+ };
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdmi_sound {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ vdd_cpu: regulator@1c {
+ compatible = "tcs,tcs4525";
+ reg = <0x1c>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1390000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc3v3_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ rk809: pmic@20 {
+ compatible = "rockchip,rk809";
+ reg = <0x20>;
+ assigned-clocks = <&cru I2S1_MCLKOUT_TX>;
+ assigned-clock-parents = <&cru CLK_I2S1_8CH_TX>;
+ #clock-cells = <1>;
+ clocks = <&cru I2S1_MCLKOUT_TX>;
+ clock-names = "mclk";
+ clock-output-names = "rk809-clkout1", "rk809-clkout2";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int>, <&i2s1m0_mclk>;
+ #sound-dai-cells = <0>;
+ system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc3v3_sys>;
+ vcc2-supply = <&vcc3v3_sys>;
+ vcc3-supply = <&vcc3v3_sys>;
+ vcc4-supply = <&vcc3v3_sys>;
+ vcc5-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc3v3_sys>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc3v3_sys>;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-name = "vdd_logic";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-name = "vdd_gpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_npu: DCDC_REG4 {
+ regulator-name = "vdd_npu";
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8: DCDC_REG5 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_image: LDO_REG1 {
+ regulator-name = "vdda0v9_image";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v9: LDO_REG2 {
+ regulator-name = "vdda_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_pmu: LDO_REG3 {
+ regulator-name = "vdda0v9_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vccio_acodec: LDO_REG4 {
+ regulator-name = "vccio_acodec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG6 {
+ regulator-name = "vcc3v3_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcca_1v8: LDO_REG7 {
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pmu: LDO_REG8 {
+ regulator-name = "vcca1v8_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcca1v8_image: LDO_REG9 {
+ regulator-name = "vcca1v8_image";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3: SWITCH_REG1 {
+ regulator-name = "vcc_3v3";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_sd: SWITCH_REG2 {
+ regulator-name = "vcc3v3_sd";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&i2s0_8ch {
+ status = "okay";
+};
+
+&i2s1_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1m0_sclktx
+ &i2s1m0_lrcktx
+ &i2s1m0_sdi0
+ &i2s1m0_sdo0>;
+ rockchip,trcm-sync-tx-only;
+ status = "okay";
+};
+
+&mdio1 {
+ rgmii_phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpios = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&pcie2x1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie20_pins>;
+ reset-gpios = <&gpio1 RK_PB2 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie>;
+ status = "okay";
+};
+
+&pinctrl {
+ lcd {
+ lcd_pwren: lcd-pwren {
+ rockchip,pins = <0 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ pwr_led: pwr-led {
+ rockchip,pins = <0 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ sys_led: sys-led {
+ rockchip,pins = <0 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie {
+ pcie20_pins: pcie20-pins {
+ rockchip,pins =
+ <1 RK_PB0 4 &pcfg_pull_none>,
+ <1 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>,
+ <1 RK_PB1 4 &pcfg_pull_none>;
+ };
+
+ pcie_pwren: pcie-pwren {
+ rockchip,pins = <2 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int: pmic-int {
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ usb {
+ usb2_host_pwren: usb2-host-pwren {
+ rockchip,pins = <3 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usb2_otg_pwren: usb2-otg-pwren {
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usb3_host_pwren: usb3-host-pwren {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc3v3_pmu>;
+ pmuio2-supply = <&vcc3v3_pmu>;
+ vccio1-supply = <&vccio_acodec>;
+ vccio2-supply = <&vcc_1v8>;
+ vccio3-supply = <&vccio_sd>;
+ vccio4-supply = <&vcc_3v3>;
+ vccio5-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_3v3>;
+ vccio7-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcca_1v8>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ max-frequency = <200000000>;
+ mmc-hs200-1_8v;
+ no-sd;
+ no-sdio;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe &emmc_rstnout>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sdmmc0 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
+ sd-uhs-sdr50;
+ vmmc-supply = <&vcc3v3_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <0>;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ status = "okay";
+};
+
+&usb2phy0 {
+ status = "okay";
+};
+
+&usb2phy0_host {
+ phy-supply = <&vcc5v0_usb3_host>;
+ status = "okay";
+};
+
+&usb2phy0_otg {
+ phy-supply = <&vcc5v0_usb2_otg>;
+ status = "okay";
+};
+
+&usb2phy1 {
+ status = "okay";
+};
+
+&usb2phy1_host {
+ phy-supply = <&vcc5v0_usb2_host>;
+ status = "okay";
+};
+
+&usb2phy1_otg {
+ phy-supply = <&vcc5v0_usb2_host>;
+ status = "okay";
+};
+
+&vop {
+ assigned-clocks = <&cru DCLK_VOP0>, <&cru DCLK_VOP1>;
+ assigned-clock-parents = <&pmucru PLL_HPLL>, <&cru PLL_VPLL>;
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
index 13e599a85eb8..c164074ddf54 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
@@ -648,6 +648,8 @@
};
&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <0>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3.dtsi
index 9cc7aa3298d0..de390d92c35e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3.dtsi
@@ -493,7 +493,6 @@
};
&usb_host0_xhci {
- dr_mode = "peripheral";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts
index b5e67990dd0f..8e5c182ef76c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts
@@ -20,9 +20,9 @@
cap-mmc-highspeed;
cap-sd-highspeed;
disable-wp;
- max-frequency = <150000000>;
no-sdio;
no-mmc;
+ sd-uhs-sdr50;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
vmmc-supply = <&vcc3v3_sd>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts
index ce2a5e1ccefc..d27eb37b5b35 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts
@@ -39,12 +39,6 @@
&gmac0_rx_bus2
&gmac0_rgmii_clk
&gmac0_rgmii_bus>;
- snps,reset-gpio = <&gpio1 RK_PB0 GPIO_ACTIVE_LOW>;
- snps,reset-active-low;
- /* Reset time is 15ms, 50ms for rtl8211f */
- snps,reset-delays-us = <0 15000 50000>;
- tx_delay = <0x3c>;
- rx_delay = <0x2f>;
status = "okay";
};
@@ -61,12 +55,6 @@
&gmac1m1_rx_bus2
&gmac1m1_rgmii_clk
&gmac1m1_rgmii_bus>;
- snps,reset-gpio = <&gpio1 RK_PB1 GPIO_ACTIVE_LOW>;
- snps,reset-active-low;
- /* Reset time is 15ms, 50ms for rtl8211f */
- snps,reset-delays-us = <0 15000 50000>;
- tx_delay = <0x4f>;
- rx_delay = <0x26>;
status = "okay";
};
@@ -76,6 +64,9 @@
reg = <0x1>;
pinctrl-0 = <&eth_phy0_reset_pin>;
pinctrl-names = "default";
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpios = <&gpio1 RK_PB0 GPIO_ACTIVE_LOW>;
};
};
@@ -85,6 +76,9 @@
reg = <0x1>;
pinctrl-0 = <&eth_phy1_reset_pin>;
pinctrl-names = "default";
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpios = <&gpio1 RK_PB1 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts b/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts
index c2dfffc638d1..c491dc4d4947 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts
@@ -89,6 +89,20 @@
};
};
+&can0 {
+ compatible = "rockchip,rk3568v3-canfd", "rockchip,rk3568v2-canfd";
+ pinctrl-names = "default";
+ pinctrl-0 = <&can0m0_pins>;
+ status = "okay";
+};
+
+&can1 {
+ compatible = "rockchip,rk3568v3-canfd", "rockchip,rk3568v2-canfd";
+ pinctrl-names = "default";
+ pinctrl-0 = <&can1m1_pins>;
+ status = "okay";
+};
+
&combphy0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts
index a337f547caf5..6a02db4f073f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts
@@ -13,7 +13,7 @@
/ {
model = "Hardkernel ODROID-M1";
- compatible = "rockchip,rk3568-odroid-m1", "rockchip,rk3568";
+ compatible = "hardkernel,odroid-m1", "rockchip,rk3568";
aliases {
ethernet0 = &gmac0;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts b/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts
index 6a998166003c..e601d9271ba8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts
@@ -6,12 +6,190 @@
/dts-v1/;
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
#include <dt-bindings/gpio/gpio.h>
#include "rk3568.dtsi"
/ {
model = "Qnap TS-433-4G NAS System 4-Bay";
compatible = "qnap,ts433", "rockchip,rk3568";
+
+ aliases {
+ ethernet0 = &gmac0;
+ mmc0 = &sdhci;
+ rtc0 = &rtc_rv8263;
+ };
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&copy_button_pin>, <&reset_button_pin>;
+ pinctrl-names = "default";
+
+ key-copy {
+ label = "copy";
+ gpios = <&gpio0 RK_PB6 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_COPY>;
+ };
+
+ key-reset {
+ label = "reset";
+ gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_RESTART>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DISK;
+ gpios = <&gpio1 RK_PD5 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "disk-activity";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdd1_led_pin>;
+ };
+
+ led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DISK;
+ gpios = <&gpio1 RK_PD6 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "disk-activity";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdd2_led_pin>;
+ };
+
+ led-2 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DISK;
+ gpios = <&gpio1 RK_PD7 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "disk-activity";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdd3_led_pin>;
+ };
+
+ led-3 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DISK;
+ gpios = <&gpio2 RK_PA0 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "disk-activity";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdd4_led_pin>;
+ };
+ };
+
+ dc_12v: regulator-dc-12v {
+ compatible = "regulator-fixed";
+ regulator-name = "dc_12v";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc3v3_pcie: regulator-vcc3v3-pcie {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_pcie";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ enable-active-high;
+ gpios = <&gpio0 RK_PD4 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&dc_12v>;
+ };
+
+ vcc3v3_sys: regulator-vcc3v3-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&dc_12v>;
+ };
+
+ vcc5v0_host: regulator-vcc5v0-host {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
+ gpio = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
+ regulator-name = "vcc5v0_host";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_usb>;
+ };
+
+ vcc5v0_otg: regulator-vcc5v0-otg {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_otg_en>;
+ regulator-name = "vcc5v0_otg";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_usb>;
+ };
+
+ vcc5v0_sys: regulator-vcc5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&dc_12v>;
+ };
+
+ vcc5v0_usb: regulator-vcc5v0-usb {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_usb";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&dc_12v>;
+ };
+};
+
+/* connected to usb_host0_xhci */
+&combphy0 {
+ status = "okay";
+};
+
+/* connected to sata1 */
+&combphy1 {
+ status = "okay";
+};
+
+/* connected to sata2 */
+&combphy2 {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_cpu>;
};
&gmac0 {
@@ -20,35 +198,282 @@
assigned-clock-rates = <0>, <125000000>;
clock_in_out = "output";
phy-handle = <&rgmii_phy0>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
pinctrl-names = "default";
pinctrl-0 = <&gmac0_miim
&gmac0_tx_bus2
&gmac0_rx_bus2
&gmac0_rgmii_clk
&gmac0_rgmii_bus>;
- rx_delay = <0x2f>;
- tx_delay = <0x3c>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
status = "okay";
};
&i2c0 {
+ status = "okay";
+
pmic@20 {
compatible = "rockchip,rk809";
reg = <0x20>;
interrupt-parent = <&gpio0>;
- interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <RK_PA3 IRQ_TYPE_LEVEL_LOW>;
+ #clock-cells = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>;
+ system-power-controller;
+ vcc1-supply = <&vcc3v3_sys>;
+ vcc2-supply = <&vcc3v3_sys>;
+ vcc3-supply = <&vcc3v3_sys>;
+ vcc4-supply = <&vcc3v3_sys>;
+ vcc5-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc3v3_sys>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc3v3_sys>;
+ wakeup-source;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-name = "vdd_logic";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-name = "vdd_gpu";
+ regulator-always-on;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_npu: DCDC_REG4 {
+ regulator-name = "vdd_npu";
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8: DCDC_REG5 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_image: LDO_REG1 {
+ regulator-name = "vdda0v9_image";
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v9: LDO_REG2 {
+ regulator-name = "vdda_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_pmu: LDO_REG3 {
+ regulator-name = "vdda0v9_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vccio_acodec: LDO_REG4 {
+ regulator-name = "vccio_acodec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG6 {
+ regulator-name = "vcc3v3_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcca_1v8: LDO_REG7 {
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pmu: LDO_REG8 {
+ regulator-name = "vcca1v8_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcca1v8_image: LDO_REG9 {
+ regulator-name = "vcca1v8_image";
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3: SWITCH_REG1 {
+ regulator-name = "vcc_3v3";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_sd: SWITCH_REG2 {
+ regulator-name = "vcc3v3_sd";
+ /*
+ * turning this off, breaks access to both
+ * PCIe controllers, refclk generator perhaps
+ */
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+
+ vdd_cpu: regulator@40 {
+ compatible = "silergy,syr827";
+ reg = <0x40>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1390000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
};
};
&i2c1 {
status = "okay";
- rtc@51 {
+ rtc_rv8263: rtc@51 {
compatible = "microcrystal,rv8263";
reg = <0x51>;
wakeup-source;
};
+
+ /* eeprom for vital-product-data on the mainboard */
+ eeprom@54 {
+ compatible = "giantec,gt24c04a", "atmel,24c04";
+ reg = <0x54>;
+ label = "VPD_MB";
+ num-addresses = <2>;
+ pagesize = <16>;
+ read-only;
+ };
+
+ /* eeprom for vital-product-data on the backplane */
+ eeprom@56 {
+ compatible = "giantec,gt24c04a", "atmel,24c04";
+ reg = <0x56>;
+ label = "VPD_BP";
+ num-addresses = <2>;
+ pagesize = <16>;
+ read-only;
+ };
};
&mdio0 {
@@ -59,12 +484,82 @@
};
&pcie30phy {
+ data-lanes = <1 2>;
status = "okay";
};
+/* Connected to a JMicron AHCI SATA controller */
&pcie3x1 {
- /* The downstream dts has: rockchip,bifurcation, XXX: find out what this is about */
reset-gpios = <&gpio0 RK_PC7 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie>;
+ status = "okay";
+};
+
+/* Connected to the 2.5G NIC for the upper network jack */
+&pcie3x2 {
+ num-lanes = <1>;
+ reset-gpios = <&gpio2 RK_PD6 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie>;
+ status = "okay";
+};
+
+&pinctrl {
+ keys {
+ copy_button_pin: copy-button-pin {
+ rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ reset_button_pin: reset-button-pin {
+ rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ leds {
+ hdd1_led_pin: hdd1-led-pin {
+ rockchip,pins = <1 RK_PD5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ hdd2_led_pin: hdd2-led-pin {
+ rockchip,pins = <1 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ hdd3_led_pin: hdd3-led-pin {
+ rockchip,pins = <1 RK_PD7 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ hdd4_led_pin: hdd4_led-pin {
+ rockchip,pins = <2 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ pmic {
+ pmic_int_l: pmic-int-l {
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ usb {
+ vcc5v0_host_en: vcc5v0-host-en {
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc5v0_otg_en: vcc5v0-otg-en {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ vccio4-supply = <&vcc_1v8>;
+ vccio6-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sata1 {
+ status = "okay";
+};
+
+&sata2 {
status = "okay";
};
@@ -75,6 +570,20 @@
status = "okay";
};
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <0>;
+ status = "okay";
+};
+
+/*
+ * Connected to an MCU, that provides access to more LEDs,
+ * buzzer, fan control and more.
+ */
+&uart0 {
+ status = "okay";
+};
+
/*
* Pins available on CN3 connector at TTL voltage level (3V3).
* ,_ _.
@@ -84,3 +593,53 @@
&uart2 {
status = "okay";
};
+
+&usb2phy0 {
+ status = "okay";
+};
+
+/* connected to usb_host0_xhci */
+&usb2phy0_otg {
+ phy-supply = <&vcc5v0_otg>;
+ status = "okay";
+};
+
+&usb2phy1 {
+ status = "okay";
+};
+
+/* connected to usb_host1_ehci/ohci */
+&usb2phy1_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+/* connected to usb_host0_ehci/ohci */
+&usb2phy1_otg {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+/* right port backside */
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+/* front port */
+&usb_host0_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+/* left port backside */
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi
index 45b03dcbbad4..19d309654bdb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi
@@ -108,10 +108,6 @@
cpu-supply = <&vdd_cpu>;
};
-&display_subsystem {
- status = "disabled";
-};
-
&gpu {
mali-supply = <&vdd_gpu>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-radxa-e25.dts b/arch/arm64/boot/dts/rockchip/rk3568-radxa-e25.dts
index 72ad74c38a2b..84a0789fad96 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-radxa-e25.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-radxa-e25.dts
@@ -103,6 +103,10 @@
phy-supply = <&vcc3v3_pcie30x1>;
};
+&display_subsystem {
+ status = "disabled";
+};
+
&pcie2x1 {
pinctrl-names = "default";
pinctrl-0 = <&pcie20_reset_h>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display-vz.dtso b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display-vz.dtso
new file mode 100644
index 000000000000..70c23e1bf14b
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display-vz.dtso
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Device tree overlay for the WolfVision PF5 Visualizer display.
+ *
+ * Copyright (C) 2024 WolfVision GmbH.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "rk3568-wolfvision-pf5-display.dtsi"
+
+&st7789 {
+ compatible = "jasonic,jt240mhqs-hwt-ek-e3",
+ "sitronix,st7789v";
+ rotation = <270>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display.dtsi
new file mode 100644
index 000000000000..b22bb543ecbb
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display.dtsi
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Device tree overlay base for the WolfVision PF5 displays.
+ *
+ * Copyright (C) 2024 WolfVision GmbH.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/rk3568-cru.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+
+&{/} {
+ display_backlight: backlight {
+ compatible = "pwm-backlight";
+ brightness-levels = <0 255>;
+ default-brightness-level = <255>;
+ num-interpolated-steps = <255>;
+ power-supply = <&vcc3v3_sd>;
+ pwms = <&pwm10 0 1000000 0>;
+ };
+
+ display_spi: spi {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cs-gpios = <&gpio3 RK_PA2 GPIO_ACTIVE_LOW>;
+ miso-gpios = <&gpio3 RK_PA1 GPIO_ACTIVE_HIGH>;
+ mosi-gpios = <&gpio3 RK_PB2 GPIO_ACTIVE_HIGH>;
+ num-chipselects = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcd_spi>;
+ sck-gpios = <&gpio3 RK_PB1 GPIO_ACTIVE_HIGH>;
+
+ st7789: panel@0 {
+ compatible = "sitronix,st7789v";
+ reg = <0>;
+ assigned-clocks = <&cru PLL_VPLL>;
+ assigned-clock-rates = <700000000>;
+ backlight = <&display_backlight>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcdc_clock &lcdc_data18 &lcd_rstn>;
+ power-supply = <&vcc3v3_sw>;
+ reset-gpios = <&gpio3 RK_PC4 GPIO_ACTIVE_LOW>;
+ spi-max-frequency = <100000>;
+
+ port {
+ panel_in_vp2: endpoint {
+ remote-endpoint = <&vp2_out_rgb>;
+ };
+ };
+ };
+ };
+};
+
+&i2c1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ st1624: touchscreen@55 {
+ compatible = "sitronix,st1624", "sitronix,st1633";
+ reg = <0x55>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB5 IRQ_TYPE_EDGE_FALLING>;
+ gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touch_int &touch_rstn>;
+ wakeup-source;
+ };
+};
+
+&pinctrl {
+ display: display-pinctrl {
+ lcd_rstn: lcd-rstn-pinctrl {
+ rockchip,pins = <3 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ lcd_spi: lcd-spi-pinctrl {
+ rockchip,pins =
+ /* lcd_sdo */
+ <3 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>,
+ /* lcd_csn */
+ <3 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>,
+ /* lcd_scl */
+ <3 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>,
+ /* lcd_sdi */
+ <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ touchscreen: touchscreen-pinctrl {
+ touch_int: touch-int-pinctrl {
+ rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ touch_rstn: touch-rstn-pinctrl {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pwm10 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm10m1_pins>;
+ status = "okay";
+};
+
+&vp2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vp2_out_rgb: endpoint@ROCKCHIP_VOP2_EP_RGB0 {
+ reg = <ROCKCHIP_VOP2_EP_RGB0>;
+ remote-endpoint = <&panel_in_vp2>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568.dtsi b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
index f1be76a54ceb..0946310e8c12 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
@@ -213,6 +213,45 @@
};
};
+ can0: can@fe570000 {
+ compatible = "rockchip,rk3568v2-canfd";
+ reg = <0x0 0xfe570000 0x0 0x1000>;
+ interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_CAN0>, <&cru PCLK_CAN0>;
+ clock-names = "baud", "pclk";
+ resets = <&cru SRST_CAN0>, <&cru SRST_P_CAN0>;
+ reset-names = "core", "apb";
+ pinctrl-names = "default";
+ pinctrl-0 = <&can0m0_pins>;
+ status = "disabled";
+ };
+
+ can1: can@fe580000 {
+ compatible = "rockchip,rk3568v2-canfd";
+ reg = <0x0 0xfe580000 0x0 0x1000>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_CAN1>, <&cru PCLK_CAN1>;
+ clock-names = "baud", "pclk";
+ resets = <&cru SRST_CAN1>, <&cru SRST_P_CAN1>;
+ reset-names = "core", "apb";
+ pinctrl-names = "default";
+ pinctrl-0 = <&can1m0_pins>;
+ status = "disabled";
+ };
+
+ can2: can@fe590000 {
+ compatible = "rockchip,rk3568v2-canfd";
+ reg = <0x0 0xfe590000 0x0 0x1000>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_CAN2>, <&cru PCLK_CAN2>;
+ clock-names = "baud", "pclk";
+ resets = <&cru SRST_CAN2>, <&cru SRST_P_CAN2>;
+ reset-names = "core", "apb";
+ pinctrl-names = "default";
+ pinctrl-0 = <&can2m0_pins>;
+ status = "disabled";
+ };
+
combphy0: phy@fe820000 {
compatible = "rockchip,rk3568-naneng-combphy";
reg = <0x0 0xfe820000 0x0 0x100>;
@@ -257,6 +296,10 @@
};
};
+&rng {
+ status = "okay";
+};
+
&usb_host0_xhci {
phys = <&usb2phy0_otg>, <&combphy0 PHY_TYPE_USB3>;
phy-names = "usb2-phy", "usb3-phy";
diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
index 4690be841a1c..0ee0ada6f0ab 100644
--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
@@ -1113,6 +1113,15 @@
status = "disabled";
};
+ rng: rng@fe388000 {
+ compatible = "rockchip,rk3568-rng";
+ reg = <0x0 0xfe388000 0x0 0x4000>;
+ clocks = <&cru CLK_TRNG_NS>, <&cru HCLK_TRNG_NS>;
+ clock-names = "core", "ahb";
+ resets = <&cru SRST_TRNG_NS>;
+ status = "disabled";
+ };
+
i2s0_8ch: i2s@fe400000 {
compatible = "rockchip,rk3568-i2s-tdm";
reg = <0x0 0xfe400000 0x0 0x1000>;
@@ -1592,10 +1601,9 @@
<&cru SRST_TSADCPHY>;
rockchip,grf = <&grf>;
rockchip,hw-tshut-temp = <95000>;
- pinctrl-names = "init", "default", "sleep";
- pinctrl-0 = <&tsadc_pin>;
- pinctrl-1 = <&tsadc_shutorg>;
- pinctrl-2 = <&tsadc_pin>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&tsadc_shutorg>;
+ pinctrl-1 = <&tsadc_pin>;
#thermal-sensor-cells = <1>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base-pinctrl.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base-pinctrl.dtsi
index 30db12c4fc82..d1368418502a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-base-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-base-pinctrl.dtsi
@@ -2449,15 +2449,15 @@
/* sdio_clk_m1 */
<3 RK_PA5 2 &pcfg_pull_none>,
/* sdio_cmd_m1 */
- <3 RK_PA4 2 &pcfg_pull_none>,
+ <3 RK_PA4 2 &pcfg_pull_up>,
/* sdio_d0_m1 */
- <3 RK_PA0 2 &pcfg_pull_none>,
+ <3 RK_PA0 2 &pcfg_pull_up>,
/* sdio_d1_m1 */
- <3 RK_PA1 2 &pcfg_pull_none>,
+ <3 RK_PA1 2 &pcfg_pull_up>,
/* sdio_d2_m1 */
- <3 RK_PA2 2 &pcfg_pull_none>,
+ <3 RK_PA2 2 &pcfg_pull_up>,
/* sdio_d3_m1 */
- <3 RK_PA3 2 &pcfg_pull_none>;
+ <3 RK_PA3 2 &pcfg_pull_up>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
index b6e4df180f0b..d97d84b88837 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
@@ -582,14 +582,14 @@
};
vo0_grf: syscon@fd5a6000 {
- compatible = "rockchip,rk3588-vo-grf", "syscon";
+ compatible = "rockchip,rk3588-vo0-grf", "syscon";
reg = <0x0 0xfd5a6000 0x0 0x2000>;
clocks = <&cru PCLK_VO0GRF>;
};
vo1_grf: syscon@fd5a8000 {
- compatible = "rockchip,rk3588-vo-grf", "syscon";
- reg = <0x0 0xfd5a8000 0x0 0x100>;
+ compatible = "rockchip,rk3588-vo1-grf", "syscon";
+ reg = <0x0 0xfd5a8000 0x0 0x4000>;
clocks = <&cru PCLK_VO1GRF>;
};
@@ -1122,6 +1122,118 @@
};
};
+ vpu121: video-codec@fdb50000 {
+ compatible = "rockchip,rk3588-vpu121", "rockchip,rk3568-vpu";
+ reg = <0x0 0xfdb50000 0x0 0x800>;
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "vdpu";
+ clocks = <&cru ACLK_VPU>, <&cru HCLK_VPU>;
+ clock-names = "aclk", "hclk";
+ iommus = <&vpu121_mmu>;
+ power-domains = <&power RK3588_PD_VDPU>;
+ };
+
+ vpu121_mmu: iommu@fdb50800 {
+ compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu";
+ reg = <0x0 0xfdb50800 0x0 0x40>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH 0>;
+ clock-names = "aclk", "iface";
+ clocks = <&cru ACLK_VPU>, <&cru HCLK_VPU>;
+ power-domains = <&power RK3588_PD_VDPU>;
+ #iommu-cells = <0>;
+ };
+
+ rga: rga@fdb80000 {
+ compatible = "rockchip,rk3588-rga", "rockchip,rk3288-rga";
+ reg = <0x0 0xfdb80000 0x0 0x180>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_RGA2>, <&cru HCLK_RGA2>, <&cru CLK_RGA2_CORE>;
+ clock-names = "aclk", "hclk", "sclk";
+ resets = <&cru SRST_RGA2_CORE>, <&cru SRST_A_RGA2>, <&cru SRST_H_RGA2>;
+ reset-names = "core", "axi", "ahb";
+ power-domains = <&power RK3588_PD_VDPU>;
+ };
+
+ vepu121_0: video-codec@fdba0000 {
+ compatible = "rockchip,rk3588-vepu121";
+ reg = <0x0 0xfdba0000 0x0 0x800>;
+ interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_JPEG_ENCODER0>, <&cru HCLK_JPEG_ENCODER0>;
+ clock-names = "aclk", "hclk";
+ iommus = <&vepu121_0_mmu>;
+ power-domains = <&power RK3588_PD_VDPU>;
+ };
+
+ vepu121_0_mmu: iommu@fdba0800 {
+ compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu";
+ reg = <0x0 0xfdba0800 0x0 0x40>;
+ interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_JPEG_ENCODER0>, <&cru HCLK_JPEG_ENCODER0>;
+ clock-names = "aclk", "iface";
+ power-domains = <&power RK3588_PD_VDPU>;
+ #iommu-cells = <0>;
+ };
+
+ vepu121_1: video-codec@fdba4000 {
+ compatible = "rockchip,rk3588-vepu121";
+ reg = <0x0 0xfdba4000 0x0 0x800>;
+ interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_JPEG_ENCODER1>, <&cru HCLK_JPEG_ENCODER1>;
+ clock-names = "aclk", "hclk";
+ iommus = <&vepu121_1_mmu>;
+ power-domains = <&power RK3588_PD_VDPU>;
+ };
+
+ vepu121_1_mmu: iommu@fdba4800 {
+ compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu";
+ reg = <0x0 0xfdba4800 0x0 0x40>;
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_JPEG_ENCODER1>, <&cru HCLK_JPEG_ENCODER1>;
+ clock-names = "aclk", "iface";
+ power-domains = <&power RK3588_PD_VDPU>;
+ #iommu-cells = <0>;
+ };
+
+ vepu121_2: video-codec@fdba8000 {
+ compatible = "rockchip,rk3588-vepu121";
+ reg = <0x0 0xfdba8000 0x0 0x800>;
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_JPEG_ENCODER2>, <&cru HCLK_JPEG_ENCODER2>;
+ clock-names = "aclk", "hclk";
+ iommus = <&vepu121_2_mmu>;
+ power-domains = <&power RK3588_PD_VDPU>;
+ };
+
+ vepu121_2_mmu: iommu@fdba8800 {
+ compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu";
+ reg = <0x0 0xfdba8800 0x0 0x40>;
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_JPEG_ENCODER2>, <&cru HCLK_JPEG_ENCODER2>;
+ clock-names = "aclk", "iface";
+ power-domains = <&power RK3588_PD_VDPU>;
+ #iommu-cells = <0>;
+ };
+
+ vepu121_3: video-codec@fdbac000 {
+ compatible = "rockchip,rk3588-vepu121";
+ reg = <0x0 0xfdbac000 0x0 0x800>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_JPEG_ENCODER3>, <&cru HCLK_JPEG_ENCODER3>;
+ clock-names = "aclk", "hclk";
+ iommus = <&vepu121_3_mmu>;
+ power-domains = <&power RK3588_PD_VDPU>;
+ };
+
+ vepu121_3_mmu: iommu@fdbac800 {
+ compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu";
+ reg = <0x0 0xfdbac800 0x0 0x40>;
+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_JPEG_ENCODER3>, <&cru HCLK_JPEG_ENCODER3>;
+ clock-names = "aclk", "iface";
+ power-domains = <&power RK3588_PD_VDPU>;
+ #iommu-cells = <0>;
+ };
+
av1d: video-codec@fdc70000 {
compatible = "rockchip,rk3588-av1-vpu";
reg = <0x0 0xfdc70000 0x0 0x800>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
new file mode 100644
index 000000000000..6418286efe40
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Rockchip Electronics Co., Ltd.
+ *
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/leds/common.h>
+#include "rk3588-coolpi-cm5.dtsi"
+
+/ {
+ model = "CoolPi CM5 GenBook";
+ compatible = "coolpi,pi-cm5-genbook", "coolpi,pi-cm5", "rockchip,rk3588";
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ enable-gpios = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bl_en>;
+ power-supply = <&vcc12v_dcin>;
+ pwms = <&pwm6 0 25000 0>;
+ };
+
+ battery: battery {
+ compatible = "simple-battery";
+ charge-full-design-microamp-hours = <9800000>;
+ voltage-max-design-microvolt = <4350000>;
+ voltage-min-design-microvolt = <3000000>;
+ };
+
+ charger: dc-charger {
+ compatible = "gpio-charger";
+ charger-type = "mains";
+ gpios = <&gpio1 RK_PC0 GPIO_ACTIVE_LOW>;
+ };
+
+ leds: leds {
+ compatible = "gpio-leds";
+
+ heartbeat_led: led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio1 RK_PB0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ wlan_led: led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_WLAN;
+ gpios = <&gpio0 RK_PC5 GPIO_ACTIVE_HIGH>;
+ };
+
+ charging_red: led-2 {
+ function = LED_FUNCTION_CHARGING;
+ color = <LED_COLOR_ID_RED>;
+ gpios = <&gpio3 RK_PD2 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ vcc12v_dcin: vcc12v-dcin-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc_sys: vcc-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <7000000>;
+ regulator-max-microvolt = <7000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_sys: vcc5v0-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <7000000>;
+ regulator-max-microvolt = <7000000>;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vcc3v3_sys: vcc3v3-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_lcd: vcc3v3-lcd-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_lcd";
+ enable-active-high;
+ gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcdpwr_en>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ vcc5v0_usb: vcc5v0-usb-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_usb";
+ regulator-boot-on;
+ regulator-always-on;
+ enable-active-high;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 RK_PD5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_pwren>;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vcc5v0_usb_host0: vcc5v0_usb30_host: vcc5v0-usb-host-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_host";
+ regulator-boot-on;
+ regulator-always-on;
+ enable-active-high;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 RK_PA7 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_host_pwren>;
+ vin-supply = <&vcc5v0_usb>;
+ };
+};
+
+&i2c4 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4m3_xfer>;
+
+ cw2015@62 {
+ compatible = "cellwise,cw2015";
+ reg = <0x62>;
+
+ cellwise,battery-profile = /bits/ 8 <
+ 0x17 0x67 0x69 0x63 0x63 0x62 0x62 0x5F
+ 0x52 0x73 0x4C 0x5A 0x5B 0x4B 0x42 0x3A
+ 0x33 0x2D 0x29 0x28 0x2E 0x31 0x3C 0x49
+ 0x2C 0x2C 0x0C 0xCD 0x30 0x51 0x50 0x66
+ 0x74 0x74 0x75 0x78 0x41 0x1B 0x84 0x5F
+ 0x0B 0x34 0x1C 0x45 0x89 0x92 0xA0 0x13
+ 0x2C 0x55 0xAB 0xCB 0x80 0x5E 0x7B 0xCB
+ 0x2F 0x00 0x64 0xA5 0xB5 0x10 0x18 0x21
+ >;
+
+ cellwise,monitor-interval-ms = <3000>;
+ monitored-battery = <&battery>;
+ power-supplies = <&charger>;
+ };
+};
+
+&i2c5 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5m3_xfer>;
+
+ touchpad: touchpad@2c {
+ compatible = "hid-over-i2c";
+ reg = <0x2c>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <RK_PD6 IRQ_TYPE_LEVEL_LOW>;
+ hid-descr-addr = <0x0020>;
+ };
+};
+
+&gmac0 {
+ status = "disabled";
+};
+
+/* M.2 E-Key */
+&pcie2x1l0 {
+ reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_sys>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_clkreq &pcie_wake &pcie_rst &wifi_pwron &bt_pwron>;
+ status = "okay";
+};
+
+&pcie2x1l2 {
+ status = "disabled";
+};
+
+&pcie30phy {
+ status = "okay";
+};
+
+/* M.2 M-Key ssd */
+&pcie3x4 {
+ reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_sys>;
+ status = "okay";
+};
+
+&pinctrl {
+ lcd {
+ lcdpwr_en: lcdpwr-en {
+ rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ bl_en: bl-en {
+ rockchip,pins = <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ usb_pwren: usb-pwren {
+ rockchip,pins = <1 RK_PD5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ usb_otg_pwren: usb-otg-pwren {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ usb_host_pwren: usb-host-pwren {
+ rockchip,pins = <1 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ wifi {
+ bt_pwron: bt-pwron {
+ rockchip,pins = <3 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ pcie_clkreq: pcie-clkreq {
+ rockchip,pins = <4 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ pcie_rst: pcie-rst {
+ rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ wifi_pwron: wifi-pwron {
+ rockchip,pins = <3 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ pcie_wake: pcie-wake {
+ rockchip,pins = <4 RK_PA1 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&pwm6 {
+ pinctrl-0 = <&pwm6m1_pins>;
+ status = "okay";
+};
+
+&sdmmc {
+ status = "disabled";
+};
+
+&sfc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&fspim2_pins>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-max-frequency = <100000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ };
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+&usbdp_phy0 {
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ status = "okay";
+};
+
+&u2phy2 {
+ status = "okay";
+};
+
+&u2phy3 {
+ status = "okay";
+};
+
+&u2phy2_host {
+ phy-supply = <&vcc5v0_usb_host0>;
+ status = "okay";
+};
+
+&u2phy3_host {
+ phy-supply = <&vcc5v0_usb>;
+ status = "okay";
+};
+
+&usbdp_phy1 {
+ status = "okay";
+};
+
+/* For Keypad */
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+/* Type C port */
+&usb_host0_xhci {
+ dr_mode = "peripheral";
+ maximum-speed = "high-speed";
+ status = "okay";
+};
+
+/* connected to a HUB for camera and BT */
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+/* USB A out */
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6-lts.dts b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6-lts.dts
new file mode 100644
index 000000000000..2d92bbb4027d
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6-lts.dts
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2023 Thomas McKahan
+ * Copyright (c) 2024 Linaro Ltd.
+ *
+ */
+
+/dts-v1/;
+
+#include "rk3588-nanopc-t6.dtsi"
+
+/ {
+ model = "FriendlyElec NanoPC-T6 LTS";
+ compatible = "friendlyarm,nanopc-t6-lts", "rockchip,rk3588";
+
+ /* provide power for on-board USB 2.0 hub */
+ vcc5v0_usb20_host: vcc5v0-usb20-host-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 RK_PA4 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&usb20_host_pwren>;
+ pinctrl-names = "default";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <5000000>;
+ regulator-name = "vcc5v0_usb20_host";
+ vin-supply = <&vcc5v0_sys>;
+ };
+};
+
+&pinctrl {
+ usb {
+ usb20_host_pwren: usb20-host-pwren {
+ rockchip,pins = <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ status = "okay";
+};
+
+&u2phy2_host {
+ phy-supply = <&vcc5v0_usb20_host>;
+};
+
+&usbdp_phy1 {
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
index ad8e36a339dc..92321c1d3ff1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
@@ -2,175 +2,18 @@
/*
* Copyright (c) 2021 Rockchip Electronics Co., Ltd.
* Copyright (c) 2023 Thomas McKahan
+ * Copyright (c) 2024 Linaro Ltd.
*
*/
/dts-v1/;
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/rockchip.h>
-#include <dt-bindings/usb/pd.h>
-#include "rk3588.dtsi"
+#include "rk3588-nanopc-t6.dtsi"
/ {
model = "FriendlyElec NanoPC-T6";
compatible = "friendlyarm,nanopc-t6", "rockchip,rk3588";
- aliases {
- mmc0 = &sdhci;
- mmc1 = &sdmmc;
- };
-
- chosen {
- stdout-path = "serial2:1500000n8";
- };
-
- leds {
- compatible = "gpio-leds";
-
- sys_led: led-0 {
- gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>;
- label = "system-led";
- linux,default-trigger = "heartbeat";
- pinctrl-names = "default";
- pinctrl-0 = <&sys_led_pin>;
- };
-
- usr_led: led-1 {
- gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>;
- label = "user-led";
- pinctrl-names = "default";
- pinctrl-0 = <&usr_led_pin>;
- };
- };
-
- sound {
- compatible = "simple-audio-card";
- pinctrl-names = "default";
- pinctrl-0 = <&hp_det>;
-
- simple-audio-card,name = "realtek,rt5616-codec";
- simple-audio-card,format = "i2s";
- simple-audio-card,mclk-fs = <256>;
-
- simple-audio-card,hp-det-gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_LOW>;
- simple-audio-card,hp-pin-name = "Headphones";
-
- simple-audio-card,widgets =
- "Headphone", "Headphones",
- "Microphone", "Microphone Jack";
- simple-audio-card,routing =
- "Headphones", "HPOL",
- "Headphones", "HPOR",
- "MIC1", "Microphone Jack",
- "Microphone Jack", "micbias1";
-
- simple-audio-card,cpu {
- sound-dai = <&i2s0_8ch>;
- };
- simple-audio-card,codec {
- sound-dai = <&rt5616>;
- };
- };
-
- vcc12v_dcin: vcc12v-dcin-regulator {
- compatible = "regulator-fixed";
- regulator-name = "vcc12v_dcin";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <12000000>;
- regulator-max-microvolt = <12000000>;
- };
-
- /* vcc5v0_sys powers peripherals */
- vcc5v0_sys: vcc5v0-sys-regulator {
- compatible = "regulator-fixed";
- regulator-name = "vcc5v0_sys";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- vin-supply = <&vcc12v_dcin>;
- };
-
- /* vcc4v0_sys powers the RK806, RK860's */
- vcc4v0_sys: vcc4v0-sys-regulator {
- compatible = "regulator-fixed";
- regulator-name = "vcc4v0_sys";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <4000000>;
- regulator-max-microvolt = <4000000>;
- vin-supply = <&vcc12v_dcin>;
- };
-
- vcc_1v1_nldo_s3: vcc-1v1-nldo-s3-regulator {
- compatible = "regulator-fixed";
- regulator-name = "vcc-1v1-nldo-s3";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1100000>;
- regulator-max-microvolt = <1100000>;
- vin-supply = <&vcc4v0_sys>;
- };
-
- vcc_3v3_pcie20: vcc3v3-pcie20-regulator {
- compatible = "regulator-fixed";
- regulator-name = "vcc_3v3_pcie20";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- vin-supply = <&vcc_3v3_s3>;
- };
-
- vbus5v0_typec: vbus5v0-typec-regulator {
- compatible = "regulator-fixed";
- enable-active-high;
- gpio = <&gpio1 RK_PD2 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&typec5v_pwren>;
- regulator-name = "vbus5v0_typec";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- vin-supply = <&vcc5v0_sys>;
- };
-
- vcc3v3_pcie2x1l0: vcc3v3-pcie2x1l0-regulator {
- compatible = "regulator-fixed";
- enable-active-high;
- gpio = <&gpio4 RK_PC2 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&pcie_m2_1_pwren>;
- regulator-name = "vcc3v3_pcie2x1l0";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- vin-supply = <&vcc5v0_sys>;
- };
-
- vcc3v3_pcie30: vcc3v3-pcie30-regulator {
- compatible = "regulator-fixed";
- enable-active-high;
- gpios = <&gpio2 RK_PC5 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&pcie_m2_0_pwren>;
- regulator-name = "vcc3v3_pcie30";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- vin-supply = <&vcc5v0_sys>;
- };
-
- vcc3v3_sd_s0: vcc3v3-sd-s0-regulator {
- compatible = "regulator-fixed";
- enable-active-low;
- gpio = <&gpio4 RK_PA5 GPIO_ACTIVE_LOW>;
- regulator-boot-on;
- regulator-max-microvolt = <3300000>;
- regulator-min-microvolt = <3300000>;
- regulator-name = "vcc3v3_sd_s0";
- vin-supply = <&vcc_3v3_s3>;
- };
-
vdd_4g_3v3: vdd-4g-3v3-regulator {
compatible = "regulator-fixed";
enable-active-high;
@@ -184,762 +27,14 @@
};
};
-&combphy0_ps {
- status = "okay";
-};
-
-&combphy1_ps {
- status = "okay";
-};
-
-&combphy2_psu {
- status = "okay";
-};
-
-&cpu_l0 {
- cpu-supply = <&vdd_cpu_lit_s0>;
-};
-
-&cpu_l1 {
- cpu-supply = <&vdd_cpu_lit_s0>;
-};
-
-&cpu_l2 {
- cpu-supply = <&vdd_cpu_lit_s0>;
-};
-
-&cpu_l3 {
- cpu-supply = <&vdd_cpu_lit_s0>;
-};
-
-&cpu_b0 {
- cpu-supply = <&vdd_cpu_big0_s0>;
-};
-
-&cpu_b1 {
- cpu-supply = <&vdd_cpu_big0_s0>;
-};
-
-&cpu_b2 {
- cpu-supply = <&vdd_cpu_big1_s0>;
-};
-
-&cpu_b3 {
- cpu-supply = <&vdd_cpu_big1_s0>;
-};
-
-&gpio0 {
- gpio-line-names = /* GPIO0 A0-A7 */
- "", "", "", "",
- "", "", "", "",
- /* GPIO0 B0-B7 */
- "", "", "", "",
- "", "", "", "",
- /* GPIO0 C0-C7 */
- "", "", "", "",
- "HEADER_10", "HEADER_08", "HEADER_32", "",
- /* GPIO0 D0-D7 */
- "", "", "", "",
- "", "", "", "";
-};
-
-&gpio1 {
- gpio-line-names = /* GPIO1 A0-A7 */
- "HEADER_27", "HEADER_28", "", "",
- "", "", "", "HEADER_15",
- /* GPIO1 B0-B7 */
- "HEADER_26", "HEADER_21", "HEADER_19", "HEADER_23",
- "HEADER_24", "HEADER_22", "", "",
- /* GPIO1 C0-C7 */
- "", "", "", "",
- "", "", "", "",
- /* GPIO1 D0-D7 */
- "", "", "", "",
- "", "", "HEADER_05", "HEADER_03";
-};
-
-&gpio2 {
- gpio-line-names = /* GPIO2 A0-A7 */
- "", "", "", "",
- "", "", "", "",
- /* GPIO2 B0-B7 */
- "", "", "", "",
- "", "", "", "",
- /* GPIO2 C0-C7 */
- "", "CSI1_11", "CSI1_12", "",
- "", "", "", "",
- /* GPIO2 D0-D7 */
- "", "", "", "",
- "", "", "", "";
-};
-
-&gpio3 {
- gpio-line-names = /* GPIO3 A0-A7 */
- "HEADER_35", "HEADER_38", "HEADER_40", "HEADER_36",
- "HEADER_37", "", "DSI0_12", "",
- /* GPIO3 B0-B7 */
- "HEADER_33", "DSI0_10", "HEADER_07", "HEADER_16",
- "HEADER_18", "HEADER_29", "HEADER_31", "HEADER_12",
- /* GPIO3 C0-C7 */
- "DSI0_08", "DSI0_14", "HEADER_11", "HEADER_13",
- "", "", "", "",
- /* GPIO3 D0-D7 */
- "", "", "", "",
- "", "DSI1_10", "", "";
-};
-
-&gpio4 {
- gpio-line-names = /* GPIO4 A0-A7 */
- "DSI1_08", "DSI1_14", "", "DSI1_12",
- "", "", "", "",
- /* GPIO4 B0-B7 */
- "", "", "", "",
- "", "", "", "",
- /* GPIO4 C0-C7 */
- "", "", "", "",
- "CSI0_11", "CSI0_12", "", "",
- /* GPIO4 D0-D7 */
- "", "", "", "",
- "", "", "", "";
-};
-
-&i2c0 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0m2_xfer>;
- status = "okay";
-
- vdd_cpu_big0_s0: regulator@42 {
- compatible = "rockchip,rk8602";
- reg = <0x42>;
- fcs,suspend-voltage-selector = <1>;
- regulator-name = "vdd_cpu_big0_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <1050000>;
- regulator-ramp-delay = <2300>;
- vin-supply = <&vcc4v0_sys>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_cpu_big1_s0: regulator@43 {
- compatible = "rockchip,rk8603", "rockchip,rk8602";
- reg = <0x43>;
- fcs,suspend-voltage-selector = <1>;
- regulator-name = "vdd_cpu_big1_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <1050000>;
- regulator-ramp-delay = <2300>;
- vin-supply = <&vcc4v0_sys>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-};
-
-&i2c2 {
- status = "okay";
-
- vdd_npu_s0: regulator@42 {
- compatible = "rockchip,rk8602";
- reg = <0x42>;
- rockchip,suspend-voltage-selector = <1>;
- regulator-name = "vdd_npu_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <950000>;
- regulator-ramp-delay = <2300>;
- vin-supply = <&vcc4v0_sys>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-};
-
-&i2c6 {
- clock-frequency = <200000>;
- status = "okay";
-
- fusb302: typec-portc@22 {
- compatible = "fcs,fusb302";
- reg = <0x22>;
- interrupt-parent = <&gpio0>;
- interrupts = <RK_PD3 IRQ_TYPE_LEVEL_LOW>;
- pinctrl-0 = <&usbc0_int>;
- pinctrl-names = "default";
- vbus-supply = <&vbus5v0_typec>;
-
- connector {
- compatible = "usb-c-connector";
- data-role = "dual";
- label = "USB-C";
- power-role = "dual";
- try-power-role = "sink";
- source-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)>;
- sink-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
- op-sink-microwatt = <1000000>;
- };
- };
-
- hym8563: rtc@51 {
- compatible = "haoyu,hym8563";
- reg = <0x51>;
- #clock-cells = <0>;
- clock-output-names = "hym8563";
- pinctrl-names = "default";
- pinctrl-0 = <&hym8563_int>;
- interrupt-parent = <&gpio0>;
- interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
- wakeup-source;
- };
-};
-
-&i2c7 {
- clock-frequency = <200000>;
- status = "okay";
-
- rt5616: codec@1b {
- compatible = "realtek,rt5616";
- reg = <0x1b>;
- clocks = <&cru I2S0_8CH_MCLKOUT>;
- clock-names = "mclk";
- #sound-dai-cells = <0>;
- assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
- assigned-clock-rates = <12288000>;
-
- port {
- rt5616_p0_0: endpoint {
- remote-endpoint = <&i2s0_8ch_p0_0>;
- };
- };
- };
-
- /* connected with MIPI-CSI1 */
-};
-
-&i2c8 {
- pinctrl-0 = <&i2c8m2_xfer>;
-};
-
-&i2s0_8ch {
- pinctrl-names = "default";
- pinctrl-0 = <&i2s0_lrck
- &i2s0_mclk
- &i2s0_sclk
- &i2s0_sdi0
- &i2s0_sdo0>;
- status = "okay";
-
- i2s0_8ch_p0: port {
- i2s0_8ch_p0_0: endpoint {
- dai-format = "i2s";
- mclk-fs = <256>;
- remote-endpoint = <&rt5616_p0_0>;
- };
- };
-};
-
-&pcie2x1l0 {
- reset-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>;
- vpcie3v3-supply = <&vcc_3v3_pcie20>;
- pinctrl-names = "default";
- pinctrl-0 = <&pcie2_0_rst>;
- status = "okay";
-};
-
-&pcie2x1l1 {
- reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
- vpcie3v3-supply = <&vcc3v3_pcie2x1l0>;
- pinctrl-names = "default";
- pinctrl-0 = <&pcie2_1_rst>;
- status = "okay";
-};
-
-&pcie2x1l2 {
- reset-gpios = <&gpio4 RK_PA4 GPIO_ACTIVE_HIGH>;
- vpcie3v3-supply = <&vcc_3v3_pcie20>;
- pinctrl-names = "default";
- pinctrl-0 = <&pcie2_2_rst>;
- status = "okay";
-};
-
-&pcie30phy {
- status = "okay";
-};
-
-&pcie3x4 {
- reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
- vpcie3v3-supply = <&vcc3v3_pcie30>;
- status = "okay";
-};
-
&pinctrl {
- gpio-leds {
- sys_led_pin: sys-led-pin {
- rockchip,pins = <2 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-
- usr_led_pin: usr-led-pin {
- rockchip,pins = <2 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
-
- headphone {
- hp_det: hp-det {
- rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
-
- hym8563 {
- hym8563_int: hym8563-int {
- rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
- };
- };
-
- pcie {
- pcie2_0_rst: pcie2-0-rst {
- rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-
- pcie2_1_rst: pcie2-1-rst {
- rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-
- pcie2_2_rst: pcie2-2-rst {
- rockchip,pins = <4 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-
- pcie_m2_0_pwren: pcie-m20-pwren {
- rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-
- pcie_m2_1_pwren: pcie-m21-pwren {
- rockchip,pins = <4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
-
usb {
pin_4g_lte_pwren: 4g-lte-pwren {
rockchip,pins = <4 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
};
-
- typec5v_pwren: typec5v-pwren {
- rockchip,pins = <1 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-
- usbc0_int: usbc0-int {
- rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_up>;
- };
- };
-};
-
-&pwm1 {
- pinctrl-0 = <&pwm1m1_pins>;
- status = "okay";
-};
-
-&saradc {
- vref-supply = <&avcc_1v8_s0>;
- status = "okay";
-};
-
-&sdhci {
- bus-width = <8>;
- no-sdio;
- no-sd;
- non-removable;
- max-frequency = <200000000>;
- mmc-hs400-1_8v;
- mmc-hs400-enhanced-strobe;
- status = "okay";
-};
-
-&sdmmc {
- bus-width = <4>;
- cap-mmc-highspeed;
- cap-sd-highspeed;
- cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
- disable-wp;
- no-mmc;
- no-sdio;
- sd-uhs-sdr104;
- vmmc-supply = <&vcc3v3_sd_s0>;
- vqmmc-supply = <&vccio_sd_s0>;
- status = "okay";
-};
-
-&spi2 {
- status = "okay";
- assigned-clocks = <&cru CLK_SPI2>;
- assigned-clock-rates = <200000000>;
- pinctrl-names = "default";
- pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
- num-cs = <1>;
-
- pmic@0 {
- compatible = "rockchip,rk806";
- spi-max-frequency = <1000000>;
- reg = <0x0>;
-
- interrupt-parent = <&gpio0>;
- interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
- <&rk806_dvs2_null>, <&rk806_dvs3_null>;
-
- system-power-controller;
-
- vcc1-supply = <&vcc4v0_sys>;
- vcc2-supply = <&vcc4v0_sys>;
- vcc3-supply = <&vcc4v0_sys>;
- vcc4-supply = <&vcc4v0_sys>;
- vcc5-supply = <&vcc4v0_sys>;
- vcc6-supply = <&vcc4v0_sys>;
- vcc7-supply = <&vcc4v0_sys>;
- vcc8-supply = <&vcc4v0_sys>;
- vcc9-supply = <&vcc4v0_sys>;
- vcc10-supply = <&vcc4v0_sys>;
- vcc11-supply = <&vcc_2v0_pldo_s3>;
- vcc12-supply = <&vcc4v0_sys>;
- vcc13-supply = <&vcc_1v1_nldo_s3>;
- vcc14-supply = <&vcc_1v1_nldo_s3>;
- vcca-supply = <&vcc4v0_sys>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl1";
- function = "pin_fun0";
- };
-
- rk806_dvs2_null: dvs2-null-pins {
- pins = "gpio_pwrctrl2";
- function = "pin_fun0";
- };
-
- rk806_dvs3_null: dvs3-null-pins {
- pins = "gpio_pwrctrl3";
- function = "pin_fun0";
- };
-
- regulators {
- vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 {
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <950000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vdd_gpu_s0";
- regulator-enable-ramp-delay = <400>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <950000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vdd_cpu_lit_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_log_s0: dcdc-reg3 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <675000>;
- regulator-max-microvolt = <750000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vdd_log_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <750000>;
- };
- };
-
- vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <950000>;
- regulator-init-microvolt = <750000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vdd_vdenc_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_ddr_s0: dcdc-reg5 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <675000>;
- regulator-max-microvolt = <900000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vdd_ddr_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <850000>;
- };
- };
-
- vdd2_ddr_s3: dcdc-reg6 {
- regulator-always-on;
- regulator-boot-on;
- regulator-name = "vdd2_ddr_s3";
-
- regulator-state-mem {
- regulator-on-in-suspend;
- };
- };
-
- vcc_2v0_pldo_s3: dcdc-reg7 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <2000000>;
- regulator-max-microvolt = <2000000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vdd_2v0_pldo_s3";
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <2000000>;
- };
- };
-
- vcc_3v3_s3: dcdc-reg8 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc_3v3_s3";
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <3300000>;
- };
- };
-
- vddq_ddr_s0: dcdc-reg9 {
- regulator-always-on;
- regulator-boot-on;
- regulator-name = "vddq_ddr_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vcc_1v8_s3: dcdc-reg10 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-name = "vcc_1v8_s3";
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <1800000>;
- };
- };
-
- avcc_1v8_s0: pldo-reg1 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-name = "avcc_1v8_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vcc_1v8_s0: pldo-reg2 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-name = "vcc_1v8_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <1800000>;
- };
- };
-
- avdd_1v2_s0: pldo-reg3 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-name = "avdd_1v2_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vcc_3v3_s0: pldo-reg4 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vcc_3v3_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vccio_sd_s0: pldo-reg5 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- regulator-ramp-delay = <12500>;
- regulator-name = "vccio_sd_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- pldo6_s3: pldo-reg6 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-name = "pldo6_s3";
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <1800000>;
- };
- };
-
- vdd_0v75_s3: nldo-reg1 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <750000>;
- regulator-max-microvolt = <750000>;
- regulator-name = "vdd_0v75_s3";
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <750000>;
- };
- };
-
- vdd_ddr_pll_s0: nldo-reg2 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <850000>;
- regulator-name = "vdd_ddr_pll_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <850000>;
- };
- };
-
- avdd_0v75_s0: nldo-reg3 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <750000>;
- regulator-max-microvolt = <750000>;
- regulator-name = "avdd_0v75_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_0v85_s0: nldo-reg4 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <850000>;
- regulator-name = "vdd_0v85_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_0v75_s0: nldo-reg5 {
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <750000>;
- regulator-max-microvolt = <750000>;
- regulator-name = "vdd_0v75_s0";
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
- };
};
};
-&tsadc {
- status = "okay";
-};
-
-&uart2 {
- pinctrl-0 = <&uart2m0_xfer>;
- status = "okay";
-};
-
&u2phy2_host {
phy-supply = <&vdd_4g_3v3>;
- status = "okay";
-};
-
-&u2phy3_host {
- status = "okay";
-};
-
-&u2phy2 {
- status = "okay";
-};
-
-&u2phy3 {
- status = "okay";
-};
-
-&usb_host0_ehci {
- status = "okay";
-};
-
-&usb_host0_ohci {
- status = "okay";
-};
-
-&usb_host1_ehci {
- status = "okay";
-};
-
-&usb_host1_ohci {
- status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi
new file mode 100644
index 000000000000..fc131789b4c3
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi
@@ -0,0 +1,1041 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2023 Thomas McKahan
+ *
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/usb/pd.h>
+#include "rk3588.dtsi"
+
+/ {
+ model = "FriendlyElec NanoPC-T6";
+ compatible = "friendlyarm,nanopc-t6", "rockchip,rk3588";
+
+ aliases {
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc;
+ };
+
+ adc-keys-0 {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-maskrom {
+ label = "Mask Rom";
+ linux,code = <KEY_SETUP>;
+ press-threshold-microvolt = <2000>;
+ };
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio0 RK_PD4 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir_receiver_pin>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ sys_led: led-0 {
+ gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>;
+ label = "system-led";
+ linux,default-trigger = "heartbeat";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sys_led_pin>;
+ };
+
+ usr_led: led-1 {
+ gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>;
+ label = "user-led";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usr_led_pin>;
+ };
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hp_det>;
+
+ simple-audio-card,name = "realtek,rt5616-codec";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,mclk-fs = <256>;
+
+ simple-audio-card,hp-det-gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_LOW>;
+
+ simple-audio-card,widgets =
+ "Headphone", "Headphones",
+ "Microphone", "Microphone Jack";
+ simple-audio-card,routing =
+ "Headphones", "HPOL",
+ "Headphones", "HPOR",
+ "MIC1", "Microphone Jack",
+ "Microphone Jack", "micbias1";
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s0_8ch>;
+ };
+ simple-audio-card,codec {
+ sound-dai = <&rt5616>;
+ };
+ };
+
+ vcc12v_dcin: vcc12v-dcin-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ /* vcc5v0_sys powers peripherals */
+ vcc5v0_sys: vcc5v0-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ /* vcc4v0_sys powers the RK806, RK860's */
+ vcc4v0_sys: vcc4v0-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc4v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <4000000>;
+ regulator-max-microvolt = <4000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc_1v1_nldo_s3: vcc-1v1-nldo-s3-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-1v1-nldo-s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc4v0_sys>;
+ };
+
+ vcc_3v3_pcie20: vcc3v3-pcie20-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_pcie20";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vbus5v0_typec: vbus5v0-typec-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 RK_PD2 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&typec5v_pwren>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vbus5v0_typec";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_pcie2x1l0: vcc3v3-pcie2x1l0-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 RK_PC2 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_m2_1_pwren>;
+ regulator-name = "vcc3v3_pcie2x1l0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_pcie30: vcc3v3-pcie30-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio2 RK_PC5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_m2_0_pwren>;
+ regulator-name = "vcc3v3_pcie30";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_sd_s0: vcc3v3-sd-s0-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio4 RK_PA5 GPIO_ACTIVE_LOW>;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "vcc3v3_sd_s0";
+ vin-supply = <&vcc_3v3_s3>;
+ };
+};
+
+&combphy0_ps {
+ status = "okay";
+};
+
+&combphy1_ps {
+ status = "okay";
+};
+
+&combphy2_psu {
+ status = "okay";
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&gpio0 {
+ gpio-line-names = /* GPIO0 A0-A7 */
+ "", "", "", "",
+ "", "", "", "",
+ /* GPIO0 B0-B7 */
+ "", "", "", "",
+ "", "", "", "",
+ /* GPIO0 C0-C7 */
+ "", "", "", "",
+ "HEADER_10", "HEADER_08", "HEADER_32", "",
+ /* GPIO0 D0-D7 */
+ "", "", "", "",
+ "IR receiver [PWM3_IR_M0]", "", "", "";
+};
+
+&gpio1 {
+ gpio-line-names = /* GPIO1 A0-A7 */
+ "HEADER_27", "HEADER_28", "", "",
+ "", "", "", "HEADER_15",
+ /* GPIO1 B0-B7 */
+ "HEADER_26", "HEADER_21", "HEADER_19", "HEADER_23",
+ "HEADER_24", "HEADER_22", "", "",
+ /* GPIO1 C0-C7 */
+ "", "", "", "",
+ "", "", "", "",
+ /* GPIO1 D0-D7 */
+ "", "", "", "",
+ "", "", "HEADER_05", "HEADER_03";
+};
+
+&gpio2 {
+ gpio-line-names = /* GPIO2 A0-A7 */
+ "", "", "", "",
+ "", "", "", "",
+ /* GPIO2 B0-B7 */
+ "", "", "", "",
+ "", "", "", "",
+ /* GPIO2 C0-C7 */
+ "", "CSI1_11", "CSI1_12", "",
+ "", "", "", "",
+ /* GPIO2 D0-D7 */
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&gpio3 {
+ gpio-line-names = /* GPIO3 A0-A7 */
+ "HEADER_35", "HEADER_38", "HEADER_40", "HEADER_36",
+ "HEADER_37", "", "DSI0_12", "",
+ /* GPIO3 B0-B7 */
+ "HEADER_33", "DSI0_10", "HEADER_07", "HEADER_16",
+ "HEADER_18", "HEADER_29", "HEADER_31", "HEADER_12",
+ /* GPIO3 C0-C7 */
+ "DSI0_08", "DSI0_14", "HEADER_11", "HEADER_13",
+ "", "", "", "",
+ /* GPIO3 D0-D7 */
+ "", "", "", "",
+ "", "DSI1_10", "", "";
+};
+
+&gpio4 {
+ gpio-line-names = /* GPIO4 A0-A7 */
+ "DSI1_08", "DSI1_14", "", "DSI1_12",
+ "", "", "", "",
+ /* GPIO4 B0-B7 */
+ "", "", "", "",
+ "", "", "", "",
+ /* GPIO4 C0-C7 */
+ "", "", "", "",
+ "CSI0_11", "CSI0_12", "", "",
+ /* GPIO4 D0-D7 */
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0m2_xfer>;
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc4v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc4v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ vdd_npu_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_npu_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc4v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c6 {
+ clock-frequency = <200000>;
+ status = "okay";
+
+ fusb302: typec-portc@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PD3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&usbc0_int>;
+ pinctrl-names = "default";
+ vbus-supply = <&vbus5v0_typec>;
+
+ connector {
+ compatible = "usb-c-connector";
+ data-role = "dual";
+ label = "USB-C";
+ power-role = "source";
+ source-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ usbc0_hs: endpoint {
+ remote-endpoint = <&usb_host0_xhci_drd_sw>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ usbc0_ss: endpoint {
+ remote-endpoint = <&usbdp_phy0_typec_ss>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ usbc0_sbu: endpoint {
+ remote-endpoint = <&usbdp_phy0_typec_sbu>;
+ };
+ };
+ };
+ };
+ };
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-output-names = "hym8563";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hym8563_int>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
+ wakeup-source;
+ };
+};
+
+&i2c7 {
+ clock-frequency = <200000>;
+ status = "okay";
+
+ rt5616: codec@1b {
+ compatible = "realtek,rt5616";
+ reg = <0x1b>;
+ clocks = <&cru I2S0_8CH_MCLKOUT>;
+ clock-names = "mclk";
+ #sound-dai-cells = <0>;
+ assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
+ assigned-clock-rates = <12288000>;
+
+ port {
+ rt5616_p0_0: endpoint {
+ remote-endpoint = <&i2s0_8ch_p0_0>;
+ };
+ };
+ };
+
+ /* connected with MIPI-CSI1 */
+};
+
+&i2c8 {
+ pinctrl-0 = <&i2c8m2_xfer>;
+};
+
+&i2s0_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_lrck
+ &i2s0_mclk
+ &i2s0_sclk
+ &i2s0_sdi0
+ &i2s0_sdo0>;
+ status = "okay";
+
+ i2s0_8ch_p0: port {
+ i2s0_8ch_p0_0: endpoint {
+ dai-format = "i2s";
+ mclk-fs = <256>;
+ remote-endpoint = <&rt5616_p0_0>;
+ };
+ };
+};
+
+&pcie2x1l0 {
+ reset-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc_3v3_pcie20>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_0_rst>;
+ status = "okay";
+};
+
+&pcie2x1l1 {
+ reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie2x1l0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_1_rst>;
+ status = "okay";
+};
+
+&pcie2x1l2 {
+ reset-gpios = <&gpio4 RK_PA4 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc_3v3_pcie20>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_2_rst>;
+ status = "okay";
+};
+
+&pcie30phy {
+ status = "okay";
+};
+
+&pcie3x4 {
+ reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie30>;
+ status = "okay";
+};
+
+&pinctrl {
+ gpio-leds {
+ sys_led_pin: sys-led-pin {
+ rockchip,pins = <2 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usr_led_pin: usr-led-pin {
+ rockchip,pins = <2 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ headphone {
+ hp_det: hp-det {
+ rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ hym8563 {
+ hym8563_int: hym8563-int {
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ ir-receiver {
+ ir_receiver_pin: ir-receiver-pin {
+ rockchip,pins = <0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie {
+ pcie2_0_rst: pcie2-0-rst {
+ rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie2_1_rst: pcie2-1-rst {
+ rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie2_2_rst: pcie2-2-rst {
+ rockchip,pins = <4 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie_m2_0_pwren: pcie-m20-pwren {
+ rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie_m2_1_pwren: pcie-m21-pwren {
+ rockchip,pins = <4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ typec5v_pwren: typec5v-pwren {
+ rockchip,pins = <1 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usbc0_int: usbc0-int {
+ rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&pwm1 {
+ pinctrl-0 = <&pwm1m1_pins>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&avcc_1v8_s0>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ no-sdio;
+ no-sd;
+ non-removable;
+ max-frequency = <200000000>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ no-mmc;
+ no-sdio;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc3v3_sd_s0>;
+ vqmmc-supply = <&vccio_sd_s0>;
+ status = "okay";
+};
+
+/* optional on non-LTS, populated on LTS version */
+&sfc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&fspim1_pins>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <104000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ };
+};
+
+&spi2 {
+ status = "okay";
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
+ num-cs = <1>;
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ spi-max-frequency = <1000000>;
+ reg = <0x0>;
+
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+
+ system-power-controller;
+
+ vcc1-supply = <&vcc4v0_sys>;
+ vcc2-supply = <&vcc4v0_sys>;
+ vcc3-supply = <&vcc4v0_sys>;
+ vcc4-supply = <&vcc4v0_sys>;
+ vcc5-supply = <&vcc4v0_sys>;
+ vcc6-supply = <&vcc4v0_sys>;
+ vcc7-supply = <&vcc4v0_sys>;
+ vcc8-supply = <&vcc4v0_sys>;
+ vcc9-supply = <&vcc4v0_sys>;
+ vcc10-supply = <&vcc4v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc4v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc4v0_sys>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 {
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_lit_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_log_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_vdenc_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_2v0_pldo_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avcc_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "avcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avdd_1v2_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "avdd_1v2_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vcc_3v3_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vccio_sd_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ pldo6_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "pldo6_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_ddr_pll_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ avdd_0v75_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "avdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v85_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_0v85_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v75_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+&u2phy2_host {
+ status = "okay";
+};
+
+&u2phy3_host {
+ status = "okay";
+};
+
+&u2phy2 {
+ status = "okay";
+};
+
+&u2phy3 {
+ status = "okay";
+};
+
+&usbdp_phy0 {
+ mode-switch;
+ orientation-switch;
+ sbu1-dc-gpios = <&gpio4 RK_PA6 GPIO_ACTIVE_HIGH>;
+ sbu2-dc-gpios = <&gpio4 RK_PA7 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usbdp_phy0_typec_ss: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc0_ss>;
+ };
+
+ usbdp_phy0_typec_sbu: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&usbc0_sbu>;
+ };
+ };
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ dr_mode = "host";
+ status = "okay";
+ usb-role-switch;
+
+ port {
+ usb_host0_xhci_drd_sw: endpoint {
+ remote-endpoint = <&usbc0_hs>;
+ };
+ };
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
index e74871491ef5..c3a6812cc93a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
@@ -105,6 +105,13 @@
};
};
+ rfkill {
+ compatible = "rfkill-gpio";
+ label = "rfkill-pcie-wlan";
+ radio-type = "wlan";
+ shutdown-gpios = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
+ };
+
sound {
compatible = "simple-audio-card";
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-gameforce-ace.dts b/arch/arm64/boot/dts/rockchip/rk3588s-gameforce-ace.dts
new file mode 100644
index 000000000000..467f69594089
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-gameforce-ace.dts
@@ -0,0 +1,1237 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/usb/pd.h>
+#include "rk3588s.dtsi"
+
+/ {
+ model = "Gameforce Ace";
+ chassis-type = "handset";
+ compatible = "gameforce,ace", "rockchip,rk3588s";
+
+ aliases {
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc;
+ mmc2 = &sdio;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ adc_keys: adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 1>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <60>;
+
+ button-vol-up {
+ label = "VOLUMEUP";
+ linux,code = <KEY_VOLUMEUP>;
+ press-threshold-microvolt = <17000>;
+ };
+
+ button-vol-down {
+ label = "VOLUMEDOWN";
+ linux,code = <KEY_VOLUMEDOWN>;
+ press-threshold-microvolt = <417000>;
+ };
+ };
+
+ /* Joystick range values based on hardware observation. */
+ adc_joystick: adc-joystick {
+ compatible = "adc-joystick";
+ io-channels = <&saradc 2>, <&saradc 3>,
+ <&saradc 4>, <&saradc 5>;
+ poll-interval = <60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ axis@0 {
+ reg = <0>;
+ abs-flat = <40>;
+ abs-fuzz = <30>;
+ abs-range = <0 4095>;
+ linux,code = <ABS_RX>;
+ };
+
+ axis@1 {
+ reg = <1>;
+ abs-flat = <40>;
+ abs-fuzz = <30>;
+ abs-range = <0 4095>;
+ linux,code = <ABS_RY>;
+ };
+
+ axis@2 {
+ reg = <2>;
+ abs-flat = <40>;
+ abs-fuzz = <30>;
+ abs-range = <0 4095>;
+ linux,code = <ABS_Y>;
+ };
+
+ axis@3 {
+ reg = <3>;
+ abs-flat = <40>;
+ abs-fuzz = <30>;
+ abs-range = <0 4095>;
+ linux,code = <ABS_X>;
+ };
+ };
+
+ /* Trigger range values based on hardware observation. */
+ adc_triggers: adc-trigger {
+ compatible = "adc-joystick";
+ io-channels = <&ti_adc 6>,
+ <&ti_adc 7>;
+ poll-interval = <60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ axis@0 {
+ reg = <0>;
+ abs-flat = <15>;
+ abs-fuzz = <15>;
+ abs-range = <890 1530>;
+ linux,code = <ABS_HAT2X>;
+ };
+
+ axis@1 {
+ reg = <1>;
+ abs-flat = <15>;
+ abs-fuzz = <15>;
+ abs-range = <1010 1550>;
+ linux,code = <ABS_HAT2Y>;
+ };
+ };
+
+ analog-sound {
+ compatible = "simple-audio-card";
+ pinctrl-0 = <&hp_detect>;
+ pinctrl-names = "default";
+ simple-audio-card,aux-devs = <&amp_headphone>, <&amp_speaker>;
+ simple-audio-card,bitclock-master = <&masterdai>;
+ simple-audio-card,format = "i2s";
+ simple-audio-card,frame-master = <&masterdai>;
+ simple-audio-card,hp-det-gpio = <&gpio3 RK_PA6 GPIO_ACTIVE_LOW>;
+ simple-audio-card,mclk-fs = <256>;
+ simple-audio-card,name = "rockchip,es8388-codec";
+ simple-audio-card,pin-switches = "Headphones", "Speaker";
+ simple-audio-card,routing =
+ "Speaker Amplifier INL", "LOUT2",
+ "Speaker Amplifier INR", "ROUT2",
+ "Speaker", "Speaker Amplifier OUTL",
+ "Speaker", "Speaker Amplifier OUTR",
+ "Headphones Amplifier INL", "LOUT1",
+ "Headphones Amplifier INR", "ROUT1",
+ "Headphones", "Headphones Amplifier OUTL",
+ "Headphones", "Headphones Amplifier OUTR",
+ "LINPUT1", "Microphone Jack",
+ "RINPUT1", "Microphone Jack",
+ "LINPUT2", "Onboard Microphone",
+ "RINPUT2", "Onboard Microphone";
+ simple-audio-card,widgets =
+ "Microphone", "Microphone Jack",
+ "Microphone", "Onboard Microphone",
+ "Headphone", "Headphones",
+ "Speaker", "Speaker";
+
+ masterdai: simple-audio-card,codec {
+ sound-dai = <&es8388>;
+ system-clock-frequency = <12288000>;
+ };
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s0_8ch>;
+ };
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ enable-gpios = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&lcd_bl_en>;
+ pinctrl-names = "default";
+ pwms = <&pwm13 0 25000 PWM_POLARITY_INVERTED>;
+ };
+
+ battery: battery {
+ compatible = "simple-battery";
+ charge-full-design-microamp-hours = <3700000>;
+ constant-charge-current-max-microamp = <2500000>;
+ constant-charge-voltage-max-microvolt = <8750000>;
+ voltage-min-design-microvolt = <7400000>;
+ };
+
+ gpio_keys: gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&btn_pins_ctrl>;
+ pinctrl-names = "default";
+
+ button-a {
+ gpios = <&gpio1 RK_PC6 GPIO_ACTIVE_LOW>;
+ label = "EAST";
+ linux,code = <BTN_EAST>;
+ };
+
+ button-b {
+ gpios = <&gpio1 RK_PA5 GPIO_ACTIVE_LOW>;
+ label = "SOUTH";
+ linux,code = <BTN_SOUTH>;
+ };
+
+ button-down {
+ gpios = <&gpio1 RK_PB1 GPIO_ACTIVE_LOW>;
+ label = "DPAD-DOWN";
+ linux,code = <BTN_DPAD_DOWN>;
+ };
+
+ button-home {
+ gpios = <&gpio1 RK_PA0 GPIO_ACTIVE_LOW>;
+ label = "FUNCTION";
+ linux,code = <BTN_MODE>;
+ };
+
+ button-l1 {
+ gpios = <&gpio1 RK_PB5 GPIO_ACTIVE_LOW>;
+ label = "L1";
+ linux,code = <BTN_TL>;
+ };
+
+ button-left {
+ gpios = <&gpio1 RK_PD7 GPIO_ACTIVE_LOW>;
+ label = "DPAD-LEFT";
+ linux,code = <BTN_DPAD_LEFT>;
+ };
+
+ button-menu {
+ gpios = <&gpio1 RK_PB0 GPIO_ACTIVE_LOW>;
+ label = "HOME";
+ linux,code = <KEY_HOME>;
+ };
+
+ button-r1 {
+ gpios = <&gpio1 RK_PB2 GPIO_ACTIVE_LOW>;
+ label = "R1";
+ linux,code = <BTN_TR>;
+ };
+
+ button-right {
+ gpios = <&gpio1 RK_PB7 GPIO_ACTIVE_LOW>;
+ label = "DPAD-RIGHT";
+ linux,code = <BTN_DPAD_RIGHT>;
+ };
+
+ button-select {
+ gpios = <&gpio1 RK_PA3 GPIO_ACTIVE_LOW>;
+ label = "SELECT";
+ linux,code = <BTN_SELECT>;
+ };
+
+ button-start {
+ gpios = <&gpio1 RK_PB6 GPIO_ACTIVE_LOW>;
+ label = "START";
+ linux,code = <BTN_START>;
+ };
+
+ button-thumbl {
+ gpios = <&gpio1 RK_PA4 GPIO_ACTIVE_LOW>;
+ label = "THUMBL";
+ linux,code = <BTN_THUMBL>;
+ };
+
+ button-thumbr {
+ gpios = <&gpio1 RK_PD6 GPIO_ACTIVE_LOW>;
+ label = "THUMBR";
+ linux,code = <BTN_THUMBR>;
+ };
+
+ button-up {
+ gpios = <&gpio1 RK_PA2 GPIO_ACTIVE_LOW>;
+ label = "DPAD-UP";
+ linux,code = <BTN_DPAD_UP>;
+ };
+
+ button-x {
+ gpios = <&gpio1 RK_PB4 GPIO_ACTIVE_LOW>;
+ label = "NORTH";
+ linux,code = <BTN_NORTH>;
+ };
+
+ button-y {
+ gpios = <&gpio1 RK_PB3 GPIO_ACTIVE_LOW>;
+ label = "WEST";
+ linux,code = <BTN_WEST>;
+ };
+ };
+
+ gpio_leds: gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins>;
+
+ green_led: led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&gpio3 RK_PC1 GPIO_ACTIVE_HIGH>;
+ function = LED_FUNCTION_STATUS;
+ };
+
+ red_led: led-1 {
+ color = <LED_COLOR_ID_RED>;
+ gpios = <&gpio3 RK_PC2 GPIO_ACTIVE_HIGH>;
+ function = LED_FUNCTION_CHARGING;
+ };
+ };
+
+ amp_headphone: headphone-amplifier {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpio0 RK_PD4 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&headphone_amplifier_en>;
+ pinctrl-names = "default";
+ sound-name-prefix = "Headphones Amplifier";
+ };
+
+ pwm_fan: pwm-fan {
+ compatible = "pwm-fan";
+ #cooling-cells = <2>;
+ cooling-levels = <0 120 150 180 210 240 255>;
+ fan-supply = <&vcc5v0_sys>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <RK_PB2 IRQ_TYPE_EDGE_RISING>;
+ pulses-per-revolution = <4>;
+ pwms = <&pwm12 0 50000 PWM_POLARITY_INVERTED>;
+ };
+
+ pwm_gpio33: pwm-33 {
+ compatible = "pwm-gpio";
+ gpios = <&gpio1 RK_PA1 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&vib_right_h>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ };
+
+ pwm_gpio132: pwm-132 {
+ compatible = "pwm-gpio";
+ gpios = <&gpio4 RK_PA4 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&vib_left_h>;
+ pinctrl-names = "default";
+ #pwm-cells = <3>;
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clock-names = "ext_clock";
+ clocks = <&rtc_hym8563>;
+ pinctrl-0 = <&wifi_enable_h>;
+ pinctrl-names = "default";
+ post-power-on-delay-ms = <200>;
+ power-off-delay-us = <5000000>;
+ reset-gpios = <&gpio3 RK_PB4 GPIO_ACTIVE_LOW>;
+ };
+
+ amp_speaker: speaker-amplifier {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpio4 RK_PA5 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&speaker_amplifier_en>;
+ pinctrl-names = "default";
+ sound-name-prefix = "Speaker Amplifier";
+ VCC-supply = <&vcc5v0_spk>;
+ };
+
+ vcc_1v1_nldo_s3: vcc-1v1-nldo-s3-regulator {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1100000>;
+ regulator-min-microvolt = <1100000>;
+ regulator-name = "vcc_1v1_nldo_s3";
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_lcd0_n: vcc3v3-lcd0-n-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 RK_PA7 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&vcc_lcd_h>;
+ pinctrl-names = "default";
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "vcc3v3_lcd0_n";
+ vin-supply = <&vcc_3v3_s3>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_sd_s0: vcc-3v3-sd-s0-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 RK_PB4 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&sd_s0_pwr>;
+ pinctrl-names = "default";
+ regulator-max-microvolt = <3000000>;
+ regulator-min-microvolt = <3000000>;
+ regulator-name = "vcc_3v3_sd_s0";
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc5v0_spk: vcc5v0-spk-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&vcc5v0_spk_pwr>;
+ pinctrl-names = "default";
+ regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <5000000>;
+ regulator-name = "vcc5v0_spk";
+ vin-supply = <&vcc5v0_sys>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc5v0_sys: vcc5v0-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <5000000>;
+ regulator-name = "vcc5v0_sys";
+ };
+
+ vibrator_l: vibrator-l {
+ compatible = "pwm-vibrator";
+ pwm-names = "enable";
+ pwms = <&pwm_gpio132 0 20000000 0>;
+ };
+
+ vibrator_r: vibrator-r {
+ compatible = "pwm-vibrator";
+ pwm-names = "enable";
+ pwms = <&pwm_gpio33 0 20000000 0>;
+ };
+};
+
+&combphy2_psu {
+ status = "okay";
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-0 = <&i2c0m2_xfer>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-max-microvolt = <1050000>;
+ regulator-min-microvolt = <550000>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-max-microvolt = <1050000>;
+ regulator-min-microvolt = <550000>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ vdd_npu_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-max-microvolt = <950000>;
+ regulator-min-microvolt = <550000>;
+ regulator-name = "vdd_npu_s0";
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c3 {
+ status = "okay";
+
+ touchscreen@14 {
+ compatible = "goodix,gt911";
+ reg = <0x14>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <RK_PA6 IRQ_TYPE_LEVEL_LOW>;
+ irq-gpios = <&gpio1 RK_PA6 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&touch_int>, <&touch_rst>;
+ pinctrl-names = "default";
+ reset-gpios = <&gpio1 RK_PA7 GPIO_ACTIVE_HIGH>;
+ touchscreen-inverted-x;
+ touchscreen-size-x = <1080>;
+ touchscreen-size-y = <1920>;
+ touchscreen-swapped-x-y;
+ };
+};
+
+&i2c4 {
+ pinctrl-0 = <&i2c4m2_xfer>;
+ status = "okay";
+
+ ti_adc: adc@48 {
+ compatible = "ti,ads1015";
+ reg = <0x48>;
+ #address-cells = <1>;
+ #io-channel-cells = <1>;
+ #size-cells = <0>;
+
+ channel@4 {
+ reg = <4>;
+ };
+
+ channel@5 {
+ reg = <5>;
+ };
+
+ channel@6 {
+ reg = <6>;
+ };
+
+ channel@7 {
+ reg = <7>;
+ };
+ };
+
+ imu@68 {
+ compatible = "invensense,mpu6880";
+ reg = <0x68>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PD3 IRQ_TYPE_EDGE_RISING>;
+ };
+};
+
+&i2c6 {
+ pinctrl-0 = <&i2c6m3_xfer>;
+ status = "okay";
+
+ rtc_hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-output-names = "hym8563";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&hym8563_int>, <&clk32k_in>;
+ pinctrl-names = "default";
+ wakeup-source;
+ };
+
+ /* Battery profile from BSP device tree. */
+ battery@62 {
+ compatible = "cellwise,cw2015";
+ reg = <0x62>;
+
+ cellwise,battery-profile = /bits/ 8
+ <0x18 0x0A 0x76 0x6A 0x6A 0x6A 0x68 0x66
+ 0x62 0x5E 0x5A 0x58 0x5F 0x59 0x46 0x3D
+ 0x35 0x2D 0x28 0x21 0x29 0x38 0x44 0x50
+ 0x1A 0x85 0x07 0xAE 0x14 0x28 0x48 0x56
+ 0x66 0x66 0x66 0x6A 0x3E 0x1A 0x6C 0x3D
+ 0x09 0x38 0x1A 0x49 0x7B 0x96 0xA2 0x15
+ 0x3B 0x77 0x9A 0xB1 0x80 0x87 0xB0 0xCB
+ 0x2F 0x00 0x64 0xA5 0xB5 0x1C 0xF0 0x49>;
+ cellwise,monitor-interval-ms = <5000>;
+ monitored-battery = <&battery>;
+ status = "okay";
+ };
+};
+
+&i2c7 {
+ status = "okay";
+
+ es8388: audio-codec@11 {
+ compatible = "everest,es8388";
+ reg = <0x11>;
+ assigned-clock-rates = <12288000>;
+ assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
+ AVDD-supply = <&vcc_3v3_s3>;
+ clocks = <&cru I2S0_8CH_MCLKOUT>;
+ DVDD-supply = <&vcc_1v8_s3>;
+ HPVDD-supply = <&vcc_3v3_s3>;
+ PVDD-supply = <&vcc_1v8_s3>;
+ #sound-dai-cells = <0>;
+ };
+};
+
+&i2s0_8ch {
+ pinctrl-0 = <&i2s0_lrck
+ &i2s0_mclk
+ &i2s0_sclk
+ &i2s0_sdi0
+ &i2s0_sdo0>;
+ status = "okay";
+};
+
+&package_thermal {
+ polling-delay = <1000>;
+
+ trips {
+ package_fan0: package-fan0 {
+ temperature = <55000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ package_fan1: package-fan1 {
+ temperature = <65000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map1 {
+ trip = <&package_fan0>;
+ cooling-device = <&pwm_fan THERMAL_NO_LIMIT 1>;
+ };
+
+ map2 {
+ trip = <&package_fan1>;
+ cooling-device = <&pwm_fan 2 THERMAL_NO_LIMIT>;
+ };
+ };
+};
+
+/*
+ * Attempts to use an M.2 SATA in this slot worked intermittently
+ * with the correct nodes enabled in device-tree, but eventually
+ * resulted in a destroyed board. Advise caution.
+ */
+&pcie2x1l1 {
+ pinctrl-0 = <&pcie_rst>;
+ pinctrl-names = "default";
+ reset-gpios = <&gpio3 RK_PD1 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&pinctrl {
+ audio-amplifier {
+ headphone_amplifier_en: headphone-amplifier-en {
+ rockchip,pins =
+ <0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ hp_detect: headphone-detect {
+ rockchip,pins =
+ <3 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ speaker_amplifier_en: speaker-amplifier-en {
+ rockchip,pins =
+ <4 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ bt {
+ bt_enable_h: bt-enable-h {
+ rockchip,pins =
+ <3 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_host_wake_l: bt-host-wake-l {
+ rockchip,pins =
+ <3 RK_PB0 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ bt_wake_l: bt-wake-l {
+ rockchip,pins =
+ <3 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ charger {
+ boost_enable_h: boost-enable-h {
+ rockchip,pins =
+ <4 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ charger_int_h: charger-int-h {
+ rockchip,pins =
+ <0 RK_PD5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ hym8563 {
+ hym8563_int: hym8563-int {
+ rockchip,pins =
+ <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ gpio-btns {
+ btn_pins_ctrl: btn-pins-ctrl {
+ rockchip,pins =
+ <1 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB3 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB5 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB7 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PD7 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ gpio-leds {
+ led_pins: led-pins {
+ rockchip,pins =
+ <3 RK_PC1 RK_FUNC_GPIO &pcfg_pull_up>,
+ <3 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ lcd_bl_en {
+ lcd_bl_en: lcd-bl-en {
+ rockchip,pins =
+ <3 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie-pins {
+ pcie_rst: pcie-rst {
+ rockchip,pins =
+ <3 RK_PD1 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ sd-pwr {
+ sd_s0_pwr: sd-s0-pwr {
+ rockchip,pins =
+ <4 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ spk-pwr {
+ vcc5v0_spk_pwr: vcc5v0-spk-pwr {
+ rockchip,pins =
+ <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ touch {
+ touch_int: touch-int {
+ rockchip,pins =
+ <1 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ touch_rst: touch-rst {
+ rockchip,pins =
+ <1 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ usb-typec {
+ usbc0_int: usbc0-int {
+ rockchip,pins =
+ <0 RK_PC7 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ vcc3v3-lcd {
+ vcc_lcd_h: vcc-lcd-h {
+ rockchip,pins =
+ <4 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ vibrator {
+ vib_left_h: vib-left-h {
+ rockchip,pins =
+ <4 RK_PA4 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ vib_right_h: vib-right-h {
+ rockchip,pins =
+ <1 RK_PA1 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ wifi {
+ wifi_enable_h: wifi-enable-h {
+ rockchip,pins =
+ <3 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ wifi_host_wake_irq: wifi-host-wake-irq {
+ rockchip,pins =
+ <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+};
+
+&pwm12 {
+ pinctrl-0 = <&pwm12m1_pins>;
+ status = "okay";
+};
+
+&pwm13 {
+ pinctrl-0 = <&pwm13m1_pins>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcc_1v8_s0>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ no-sd;
+ no-sdio;
+ non-removable;
+ status = "okay";
+};
+
+&sdio {
+ #address-cells = <1>;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ disable-wp;
+ keep-power-in-suspend;
+ max-frequency = <150000000>;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ no-mmc;
+ no-sd;
+ sd-uhs-sdr104;
+ #size-cells = <0>;
+ status = "okay";
+
+ brcmf: wifi@1 {
+ compatible = "brcm,bcm43456-fmac", "brcm,bcm4329-fmac";
+ reg = <1>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ pinctrl-0 = <&wifi_host_wake_irq>;
+ pinctrl-names = "default";
+ };
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+ max-frequency = <150000000>;
+ no-sdio;
+ no-mmc;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_3v3_sd_s0>;
+ vqmmc-supply = <&vccio_sd_s0>;
+ status = "okay";
+};
+
+&spi2 {
+ #address-cells = <1>;
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ num-cs = <1>;
+ pinctrl-0 = <&spi2m2_pins>, <&spi2m2_cs0>;
+ pinctrl-names = "default";
+ #size-cells = <0>;
+ status = "okay";
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ reg = <0x0>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA7 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ pinctrl-names = "default";
+ spi-max-frequency = <1000000>;
+ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc5v0_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc5v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc5v0_sys>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: dcdc-reg1 {
+ regulator-boot-on;
+ regulator-enable-ramp-delay = <400>;
+ regulator-max-microvolt = <950000>;
+ regulator-min-microvolt = <550000>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-ramp-delay = <12500>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: dcdc-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <950000>;
+ regulator-min-microvolt = <550000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_lit_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_logic_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <750000>;
+ regulator-min-microvolt = <675000>;
+ regulator-name = "vdd_logic_s0";
+ regulator-ramp-delay = <12500>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <950000>;
+ regulator-min-microvolt = <550000>;
+ regulator-name = "vdd_vdenc_s0";
+ regulator-ramp-delay = <12500>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_ddr_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <2000000>;
+ regulator-min-microvolt = <2000000>;
+ regulator-name = "vdd_2v0_pldo_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avcc_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "avcc_1v8_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avdd_1v2_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <1200000>;
+ regulator-name = "avdd_1v2_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "vccio_sd_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3_pldo6: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3_pldo6";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <750000>;
+ regulator-min-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <850000>;
+ regulator-min-microvolt = <850000>;
+ regulator-name = "vdd_ddr_pll_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ avdd_0v75_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <837500>;
+ regulator-min-microvolt = <837500>;
+ regulator-name = "avdd_0v75_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v85_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_0v85_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v75_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
+
+&uart9 {
+ pinctrl-0 = <&uart9m2_xfer>, <&uart9m2_ctsn>, <&uart9m2_rtsn>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm4345c5";
+ clocks = <&rtc_hym8563>;
+ clock-names = "lpo";
+ device-wakeup-gpios = <&gpio3 RK_PB1 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <RK_PB0 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-0 = <&bt_enable_h>, <&bt_host_wake_l>, <&bt_wake_l>;
+ pinctrl-names = "default";
+ shutdown-gpios = <&gpio3 RK_PB7 GPIO_ACTIVE_HIGH>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-odroid-m2.dts b/arch/arm64/boot/dts/rockchip/rk3588s-odroid-m2.dts
new file mode 100644
index 000000000000..63d91236ba9f
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-odroid-m2.dts
@@ -0,0 +1,903 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/usb/pd.h>
+#include "rk3588s.dtsi"
+
+/ {
+ model = "Hardkernel ODROID-M2";
+ compatible = "hardkernel,odroid-m2", "rockchip,rk3588s";
+
+ aliases {
+ ethernet0 = &gmac1;
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwr_led>, <&sys_led>;
+
+ led_pwr: led-0 {
+ color = <LED_COLOR_ID_RED>;
+ default-state = "on";
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio1 RK_PB5 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+
+ led_sys: led-1 {
+ color = <LED_COLOR_ID_BLUE>;
+ default-state = "on";
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio1 RK_PB1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ #cooling-cells = <2>;
+ cooling-levels = <0 192 224 255>;
+ fan-supply = <&vcc5v0_sys>;
+ pwms = <&pwm0 0 22222 0>;
+ };
+
+ vcc_1v1_nldo_s3: regulator-1v1-vcc-nldo-s3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v1_nldo_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc4v0_sys>;
+ };
+
+ vcc3v3_lcd: regulator-3v3-vcc-lcd {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcd_pwren>;
+ regulator-name = "vcc3v3_lcd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc3v3_pcie: regulator-3v3-vcc-pcie {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PC6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pwren>;
+ regulator-name = "vcc3v3_pcie";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc_3v3_s0: regulator-3v3-vcc-s0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc4v0_sys: regulator-4v0-vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc4v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <4800000>;
+ regulator-max-microvolt = <4800000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_sys: regulator-5v0-vcc-sys {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_pwren>;
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_usb2_host: regulator-5v0-vcc-usb2-host {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio1 RK_PC6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_host_pwren>;
+ regulator-name = "vcc5v0_usb2_host";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_usb3_host: regulator-5v0-vcc-usb3-host {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio1 RK_PA6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_host_pwren>;
+ regulator-name = "vcc5v0_usb3_host";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_usb3_typec: regulator-5v0-vcc-usb3-typec {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PA0 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_typec_pwren>;
+ regulator-name = "vcc5v0_usb3_typec";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcca: regulator-5v0-vcca {
+ compatible = "regulator-fixed";
+ regulator-name = "vcca";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc12v_dcin: regulator-12v0-vcc-dcin {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+};
+
+&combphy0_ps {
+ status = "okay";
+};
+
+&combphy2_psu {
+ status = "okay";
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&gmac1 {
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy1>;
+ phy-mode = "rgmii-id";
+ phy-supply = <&vcc_3v3_s0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1_miim
+ &gmac1_tx_bus2
+ &gmac1_rx_bus2
+ &gmac1_rgmii_clk
+ &gmac1_rgmii_bus
+ &gmac1_clkinout>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0m2_xfer>;
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc4v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc4v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ vdd_npu_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_npu_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc4v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c8 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c8m2_xfer>;
+ status = "okay";
+
+ usbc0: usb-typec@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <RK_PA5 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usbc0_int>;
+ vbus-supply = <&vcc5v0_usb3_typec>;
+
+ connector {
+ compatible = "usb-c-connector";
+ data-role = "dual";
+ label = "USB-C";
+ op-sink-microwatt = <1000000>;
+ power-role = "dual";
+ sink-pdos = <PDO_FIXED(5000, 1000, PDO_FIXED_USB_COMM)>;
+ source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+ try-power-role = "source";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usbc0_role_switch: endpoint {
+ remote-endpoint = <&usb_host0_xhci_role_switch>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usbc0_orientation_switch: endpoint {
+ remote-endpoint = <&usbdp_phy0_orientation_switch>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ usbc0_dp_altmode_mux: endpoint {
+ remote-endpoint = <&usbdp_phy0_dp_altmode_mux>;
+ };
+ };
+ };
+ };
+ };
+
+ pcf8563: rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcf8563_int>;
+ wakeup-source;
+ };
+};
+
+&mdio1 {
+ rgmii_phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-id001c.c916";
+ reg = <1>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpios = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&package_thermal {
+ polling-delay = <1000>;
+
+ trips {
+ package_fan0: package-fan0 {
+ hysteresis = <2000>;
+ temperature = <60000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ cooling-device = <&fan 1 THERMAL_NO_LIMIT>;
+ trip = <&package_fan0>;
+ };
+ };
+};
+
+&pcie2x1l2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie20x1_pins>;
+ reset-gpios = <&gpio1 RK_PA7 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie>;
+ status = "okay";
+};
+
+&pinctrl {
+ lcd {
+ lcd_pwren: lcd-pwren {
+ rockchip,pins = <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ pwr_led: pwr-led {
+ rockchip,pins = <1 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ sys_led: sys-led {
+ rockchip,pins = <1 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie {
+ pcie20x1_pins: pcie20x1-pins {
+ rockchip,pins =
+ <1 RK_PA0 4 &pcfg_pull_none>,
+ <1 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>,
+ <1 RK_PA1 4 &pcfg_pull_none>;
+ };
+
+ pcie_pwren: pcie-pwren {
+ rockchip,pins = <0 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ regulator {
+ vcc5v0_pwren: vcc5v0-pwren {
+ rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ rtc {
+ pcf8563_int: pcf8563-int {
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ usb2_host_pwren: usb2-host-pwren {
+ rockchip,pins = <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usb3_host_pwren: usb3-host-pwren {
+ rockchip,pins = <1 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usb3_typec_pwren: usb3-typec-pwren {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usbc0_int: usbc0-int {
+ rockchip,pins = <4 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pwm0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0m2_pins>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcca_1v8_s0>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ no-sd;
+ no-sdio;
+ non-removable;
+ vmmc-supply = <&vcc_3v3_s0>;
+ vqmmc-supply = <&vcc_1v8_s0>;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ max-frequency = <150000000>;
+ no-mmc;
+ no-sdio;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_3v3_s3>;
+ vqmmc-supply = <&vccio_sd_s0>;
+ status = "okay";
+};
+
+&spi2 {
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m2_cs0>, <&spi2m2_pins>;
+ status = "okay";
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ reg = <0x0>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA7 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ spi-max-frequency = <1000000>;
+ system-power-controller;
+
+ vcc1-supply = <&vcc4v0_sys>;
+ vcc2-supply = <&vcc4v0_sys>;
+ vcc3-supply = <&vcc4v0_sys>;
+ vcc4-supply = <&vcc4v0_sys>;
+ vcc5-supply = <&vcc4v0_sys>;
+ vcc6-supply = <&vcc4v0_sys>;
+ vcc7-supply = <&vcc4v0_sys>;
+ vcc8-supply = <&vcc4v0_sys>;
+ vcc9-supply = <&vcc4v0_sys>;
+ vcc10-supply = <&vcc4v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc4v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcca>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: dcdc-reg1 {
+ regulator-name = "vdd_gpu_s0";
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: dcdc-reg2 {
+ regulator-name = "vdd_cpu_lit_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_logic_s0: dcdc-reg3 {
+ regulator-name = "vdd_logic_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: dcdc-reg4 {
+ regulator-name = "vdd_vdenc_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-name = "vdd_ddr_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-name = "vdd2_ddr_s3";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-name = "vdd_2v0_pldo_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-name = "vcc_3v3_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-name = "vddq_ddr_s0";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-name = "vcc_1v8_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc_1v8_s0: pldo-reg1 {
+ regulator-name = "vcc_1v8_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca_1v8_s0: pldo-reg2 {
+ regulator-name = "vcca_1v8_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdda_1v2_s0: pldo-reg3 {
+ regulator-name = "vdda_1v2_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca_3v3_s0: pldo-reg4 {
+ regulator-name = "vcca_3v3_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-name = "vccio_sd_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3_pldo6: pldo-reg6 {
+ regulator-name = "vcc_1v8_s3_pldo6";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-name = "vdd_0v75_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdda_ddr_pll_s0: nldo-reg2 {
+ regulator-name = "vdda_ddr_pll_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdda_0v75_s0: nldo-reg3 {
+ regulator-name = "vdda_0v75_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <837500>;
+ regulator-max-microvolt = <837500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v85_s0: nldo-reg4 {
+ regulator-name = "vdda_0v85_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ /* Schematics show not in use */
+ nldo-reg5 {
+ };
+ };
+ };
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+&u2phy2 {
+ status = "okay";
+};
+
+&u2phy2_host {
+ phy-supply = <&vcc5v0_usb2_host>;
+ status = "okay";
+};
+
+&u2phy3 {
+ status = "okay";
+};
+
+&u2phy3_host {
+ phy-supply = <&vcc5v0_usb3_host>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ usb-role-switch;
+ status = "okay";
+
+ port {
+ usb_host0_xhci_role_switch: endpoint {
+ remote-endpoint = <&usbc0_role_switch>;
+ };
+ };
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host2_xhci {
+ status = "okay";
+};
+
+&usbdp_phy0 {
+ mode-switch;
+ orientation-switch;
+ sbu1-dc-gpios = <&gpio4 RK_PA0 GPIO_ACTIVE_HIGH>;
+ sbu2-dc-gpios = <&gpio4 RK_PA1 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usbdp_phy0_orientation_switch: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc0_orientation_switch>;
+ };
+
+ usbdp_phy0_dp_altmode_mux: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&usbc0_dp_altmode_mux>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
index 03ed48246d36..294b99dd50da 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
@@ -65,6 +65,18 @@
regulator-max-microvolt = <12000000>;
};
+ vcc3v3_wf: vcc3v3-wf-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_wf";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ enable-active-high;
+ gpio = <&gpio0 RK_PC5 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&pow_en>;
+ pinctrl-names = "default";
+ vin-supply = <&vcc5v0_sys>;
+ };
+
vcc5v0_host: vcc5v0-host-regulator {
compatible = "regulator-fixed";
regulator-name = "vcc5v0_host";
@@ -114,6 +126,10 @@
};
};
+&combphy0_ps {
+ status = "okay";
+};
+
&combphy2_psu {
status = "okay";
};
@@ -293,6 +309,14 @@
};
};
+&pcie2x1l2 {
+ pinctrl-0 = <&pcie20x1m0_pins>;
+ pinctrl-names = "default";
+ reset-gpios = <&gpio3 RK_PD1 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_wf>;
+ status = "okay";
+};
+
&pinctrl {
leds {
io_led: io-led {
@@ -300,6 +324,12 @@
};
};
+ pcie {
+ pow_en: pow-en {
+ rockchip,pins = <0 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
power {
vcc_5v0_en: vcc-5v0-en {
rockchip,pins = <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
@@ -317,28 +347,6 @@
rockchip,pins = <4 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
-
- wifibt {
- wl_reset: wl-reset {
- rockchip,pins = <0 RK_PD0 RK_FUNC_GPIO &pcfg_pull_up>;
- };
-
- wl_dis: wl-dis {
- rockchip,pins = <0 RK_PD5 RK_FUNC_GPIO &pcfg_output_high>;
- };
-
- wl_wake_host: wl-wake-host {
- rockchip,pins = <0 RK_PC7 RK_FUNC_GPIO &pcfg_pull_up>;
- };
-
- bt_dis: bt-dis {
- rockchip,pins = <0 RK_PD4 RK_FUNC_GPIO &pcfg_output_high>;
- };
-
- bt_wake_host: bt-wake-host {
- rockchip,pins = <0 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
- };
- };
};
&pwm3 {
@@ -754,8 +762,6 @@
&usb_host0_ehci {
status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&wl_reset &wl_dis &wl_wake_host &bt_dis &bt_wake_host>;
};
&usb_host0_ohci {
diff --git a/arch/arm64/boot/dts/sprd/sc2731.dtsi b/arch/arm64/boot/dts/sprd/sc2731.dtsi
index e15409f55f43..12136e68dada 100644
--- a/arch/arm64/boot/dts/sprd/sc2731.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc2731.dtsi
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Spreadtrum SC2731 PMIC dts file
*
* Copyright (C) 2018, Spreadtrum Communications Inc.
- *
- * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
*/
&adi_bus {
@@ -95,7 +94,7 @@
nvmem-cells = <&adc_big_scale>, <&adc_small_scale>;
};
- fgu@a00 {
+ fuel-gauge@a00 {
compatible = "sprd,sc2731-fgu";
reg = <0xa00>;
bat-detect-gpio = <&pmic_eic 9 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/sprd/sc9836-openphone.dts b/arch/arm64/boot/dts/sprd/sc9836-openphone.dts
index e5657c35cd10..b98589ea5ac2 100644
--- a/arch/arm64/boot/dts/sprd/sc9836-openphone.dts
+++ b/arch/arm64/boot/dts/sprd/sc9836-openphone.dts
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
/*
* Spreadtrum SC9836 openphone board DTS file
*
* Copyright (C) 2014, Spreadtrum Communications Inc.
- *
- * This file is licensed under a dual GPLv2 or X11 license.
*/
/dts-v1/;
diff --git a/arch/arm64/boot/dts/sprd/sc9836.dtsi b/arch/arm64/boot/dts/sprd/sc9836.dtsi
index 8bb8a70966d2..bc3fc9fc3d90 100644
--- a/arch/arm64/boot/dts/sprd/sc9836.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc9836.dtsi
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
/*
* Spreadtrum SC9836 SoC DTS file
*
* Copyright (C) 2014, Spreadtrum Communications Inc.
- *
- * This file is licensed under a dual GPLv2 or X11 license.
*/
#include "sharkl64.dtsi"
diff --git a/arch/arm64/boot/dts/sprd/sc9860.dtsi b/arch/arm64/boot/dts/sprd/sc9860.dtsi
index 31952d361a8a..d2456d633c39 100644
--- a/arch/arm64/boot/dts/sprd/sc9860.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc9860.dtsi
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Spreadtrum SC9860 SoC
*
* Copyright (C) 2016, Spreadtrum Communications Inc.
- *
- * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
diff --git a/arch/arm64/boot/dts/sprd/sc9863a.dtsi b/arch/arm64/boot/dts/sprd/sc9863a.dtsi
index 53e5b77d70b5..e5a2857721e2 100644
--- a/arch/arm64/boot/dts/sprd/sc9863a.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc9863a.dtsi
@@ -551,14 +551,14 @@
#size-cells = <2>;
ranges;
- sdio0: sdio@20300000 {
+ sdio0: mmc@20300000 {
compatible = "sprd,sdhci-r11";
reg = <0 0x20300000 0 0x1000>;
interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "sdio", "enable";
clocks = <&aon_clk CLK_SDIO0_2X>,
<&apahb_gate CLK_SDIO0_EB>;
+ clock-names = "sdio", "enable";
assigned-clocks = <&aon_clk CLK_SDIO0_2X>;
assigned-clock-parents = <&rpll CLK_RPLL_390M>;
@@ -567,14 +567,14 @@
no-mmc;
};
- sdio3: sdio@20600000 {
+ sdio3: mmc@20600000 {
compatible = "sprd,sdhci-r11";
reg = <0 0x20600000 0 0x1000>;
interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "sdio", "enable";
clocks = <&aon_clk CLK_EMMC_2X>,
<&apahb_gate CLK_EMMC_EB>;
+ clock-names = "sdio", "enable";
assigned-clocks = <&aon_clk CLK_EMMC_2X>;
assigned-clock-parents = <&rpll CLK_RPLL_390M>;
diff --git a/arch/arm64/boot/dts/sprd/sharkl64.dtsi b/arch/arm64/boot/dts/sprd/sharkl64.dtsi
index 69f64e7fce7c..bf58702c4e07 100644
--- a/arch/arm64/boot/dts/sprd/sharkl64.dtsi
+++ b/arch/arm64/boot/dts/sprd/sharkl64.dtsi
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
/*
* Spreadtrum Sharkl64 platform DTS file
*
* Copyright (C) 2014, Spreadtrum Communications Inc.
- *
- * This file is licensed under a dual GPLv2 or X11 license.
*/
/ {
diff --git a/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
index 1ce3cbbd9668..095b24a31313 100644
--- a/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
+++ b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Spreadtrum SP9860g board
*
* Copyright (C) 2017, Spreadtrum Communications Inc.
- *
- * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
*/
/dts-v1/;
diff --git a/arch/arm64/boot/dts/sprd/ums512.dtsi b/arch/arm64/boot/dts/sprd/ums512.dtsi
index 4c080df48724..efa14309cc4e 100644
--- a/arch/arm64/boot/dts/sprd/ums512.dtsi
+++ b/arch/arm64/boot/dts/sprd/ums512.dtsi
@@ -849,9 +849,9 @@
compatible = "sprd,sdhci-r11";
reg = <0x1100000 0x1000>;
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "sdio", "enable";
clocks = <&ap_clk CLK_SDIO0_2X>,
<&apapb_gate CLK_SDIO0_EB>;
+ clock-names = "sdio", "enable";
assigned-clocks = <&ap_clk CLK_SDIO0_2X>;
assigned-clock-parents = <&pll1 CLK_RPLL>;
status = "disabled";
@@ -861,9 +861,9 @@
compatible = "sprd,sdhci-r11";
reg = <0x1400000 0x1000>;
interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "sdio", "enable";
clocks = <&ap_clk CLK_EMMC_2X>,
<&apapb_gate CLK_EMMC_EB>;
+ clock-names = "sdio", "enable";
assigned-clocks = <&ap_clk CLK_EMMC_2X>;
assigned-clock-parents = <&pll1 CLK_RPLL>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/sprd/whale2.dtsi b/arch/arm64/boot/dts/sprd/whale2.dtsi
index 7068bfd2f4c3..a551e14ce826 100644
--- a/arch/arm64/boot/dts/sprd/whale2.dtsi
+++ b/arch/arm64/boot/dts/sprd/whale2.dtsi
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Spreadtrum Whale2 platform peripherals
*
* Copyright (C) 2016, Spreadtrum Communications Inc.
- *
- * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
*/
#include <dt-bindings/clock/sprd,sc9860-clk.h>
@@ -75,9 +74,10 @@
"sprd,sc9836-uart";
reg = <0x0 0x100>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "enable", "uart", "source";
clocks = <&apapb_gate CLK_UART0_EB>,
- <&ap_clk CLK_UART0>, <&ext_26m>;
+ <&ap_clk CLK_UART0>,
+ <&ext_26m>;
+ clock-names = "enable", "uart", "source";
status = "disabled";
};
@@ -86,9 +86,10 @@
"sprd,sc9836-uart";
reg = <0x100000 0x100>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "enable", "uart", "source";
clocks = <&apapb_gate CLK_UART1_EB>,
- <&ap_clk CLK_UART1>, <&ext_26m>;
+ <&ap_clk CLK_UART1>,
+ <&ext_26m>;
+ clock-names = "enable", "uart", "source";
status = "disabled";
};
@@ -97,9 +98,10 @@
"sprd,sc9836-uart";
reg = <0x200000 0x100>;
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "enable", "uart", "source";
clocks = <&apapb_gate CLK_UART2_EB>,
- <&ap_clk CLK_UART2>, <&ext_26m>;
+ <&ap_clk CLK_UART2>,
+ <&ext_26m>;
+ clock-names = "enable", "uart", "source";
status = "disabled";
};
@@ -108,9 +110,10 @@
"sprd,sc9836-uart";
reg = <0x300000 0x100>;
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "enable", "uart", "source";
clocks = <&apapb_gate CLK_UART3_EB>,
- <&ap_clk CLK_UART3>, <&ext_26m>;
+ <&ap_clk CLK_UART3>,
+ <&ext_26m>;
+ clock-names = "enable", "uart", "source";
status = "disabled";
};
};
@@ -129,19 +132,19 @@
/* For backwards compatibility: */
#dma-channels = <32>;
dma-channels = <32>;
- clock-names = "enable";
clocks = <&apahb_gate CLK_DMA_EB>;
+ clock-names = "enable";
};
- sdio3: sdio@50430000 {
+ sdio3: mmc@50430000 {
compatible = "sprd,sdhci-r11";
reg = <0 0x50430000 0 0x1000>;
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "sdio", "enable", "2x_enable";
clocks = <&aon_prediv CLK_EMMC_2X>,
- <&apahb_gate CLK_EMMC_EB>,
- <&aon_gate CLK_EMMC_2X_EN>;
+ <&apahb_gate CLK_EMMC_EB>,
+ <&aon_gate CLK_EMMC_2X_EN>;
+ clock-names = "sdio", "enable", "2x_enable";
assigned-clocks = <&aon_prediv CLK_EMMC_2X>;
assigned-clock-parents = <&clk_l0_409m6>;
@@ -194,8 +197,8 @@
compatible = "sprd,hwspinlock-r3p0";
reg = <0 0x40500000 0 0x1000>;
#hwlock-cells = <1>;
- clock-names = "enable";
clocks = <&aon_gate CLK_SPLK_EB>;
+ clock-names = "enable";
};
eic_debounce: gpio@40210000 {
@@ -258,9 +261,9 @@
reg = <0 0x40310000 0 0x1000>;
interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
timeout-sec = <12>;
- clock-names = "enable", "rtc_enable";
clocks = <&aon_gate CLK_APCPU_WDG_EB>,
- <&aon_gate CLK_AP_WDG_RTC_EB>;
+ <&aon_gate CLK_AP_WDG_RTC_EB>;
+ clock-names = "enable", "rtc_enable";
};
};
@@ -277,9 +280,9 @@
/* For backwards compatibility: */
#dma-channels = <32>;
dma-channels = <32>;
- clock-names = "enable", "ashb_eb";
clocks = <&agcp_gate CLK_AGCP_DMAAP_EB>,
- <&agcp_gate CLK_AGCP_AP_ASHB_EB>;
+ <&agcp_gate CLK_AGCP_AP_ASHB_EB>;
+ clock-names = "enable", "ashb_eb";
};
};
};
diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile
index e20b27ddf901..bcd392c3206e 100644
--- a/arch/arm64/boot/dts/ti/Makefile
+++ b/arch/arm64/boot/dts/ti/Makefile
@@ -73,10 +73,16 @@ k3-am654-gp-evm-dtbs := k3-am654-base-board.dtb \
k3-am654-pcie-usb3.dtbo
k3-am654-evm-dtbs := k3-am654-base-board.dtb k3-am654-icssg2.dtbo
k3-am654-idk-dtbs := k3-am654-evm.dtb k3-am654-idk.dtbo k3-am654-pcie-usb2.dtbo
+k3-am6548-iot2050-advanced-m2-bkey-ekey-pcie-dtbs := k3-am6548-iot2050-advanced-m2.dtb \
+ k3-am6548-iot2050-advanced-m2-bkey-ekey-pcie.dtbo
+k3-am6548-iot2050-advanced-m2-bkey-usb3-dtbs := k3-am6548-iot2050-advanced-m2.dtb \
+ k3-am6548-iot2050-advanced-m2-bkey-usb3.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am6528-iot2050-basic.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am6528-iot2050-basic-pg2.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced-m2.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced-m2-bkey-ekey-pcie.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced-m2-bkey-usb3.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced-pg2.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced-sm.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am654-base-board.dtb
@@ -110,6 +116,7 @@ dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm.dtb
dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm-pcie1-ep.dtbo
# Boards with J722s SoC
+dtb-$(CONFIG_ARCH_K3) += k3-am67a-beagley-ai.dtb
dtb-$(CONFIG_ARCH_K3) += k3-j722s-evm.dtb
# Boards with J784s4 SoC
diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
index 328929c740dc..5b92aef5b284 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
@@ -241,6 +241,7 @@
bootph-pre-ram;
compatible = "ti,j721e-esm";
reg = <0x00 0x420000 0x00 0x1000>;
+ /* Interrupt sources: rti0, rti1, rti15, wrti0, rti2, rti3 */
ti,esm-pins = <160>, <161>, <162>, <163>, <177>, <178>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am62-mcu.dtsi
index e66d486ef1f2..bb43a411f59b 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-mcu.dtsi
@@ -19,6 +19,7 @@
bootph-pre-ram;
compatible = "ti,j721e-esm";
reg = <0x00 0x4100000 0x00 0x1000>;
+ /* Interrupt sources: esm0_cfg, esm0_hi, esm0_low, mrti0 */
ti,esm-pins = <0>, <1>, <2>, <85>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-thermal.dtsi b/arch/arm64/boot/dts/ti/k3-am62-thermal.dtsi
index 12ba833002a1..3c6a80aebd9f 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-thermal.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-thermal.dtsi
@@ -12,12 +12,29 @@ thermal_zones: thermal-zones {
thermal-sensors = <&wkup_vtm0 0>;
trips {
+ main0_alert: main0-alert {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
main0_crit: main0-crit {
temperature = <105000>; /* milliCelsius */
hysteresis = <2000>; /* milliCelsius */
type = "critical";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&main0_alert>;
+ cooling-device =
+ <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
main1_thermal: main1-thermal {
@@ -26,11 +43,28 @@ thermal_zones: thermal-zones {
thermal-sensors = <&wkup_vtm0 1>;
trips {
+ main1_alert: main1-alert {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
main1_crit: main1-crit {
temperature = <105000>; /* milliCelsius */
hysteresis = <2000>; /* milliCelsius */
type = "critical";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&main1_alert>;
+ cooling-device =
+ <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am625.dtsi b/arch/arm64/boot/dts/ti/k3-am625.dtsi
index 4014add6320d..c3d1db47dc9f 100644
--- a/arch/arm64/boot/dts/ti/k3-am625.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am625.dtsi
@@ -50,6 +50,7 @@
next-level-cache = <&L2_0>;
operating-points-v2 = <&a53_opp_table>;
clocks = <&k3_clks 135 0>;
+ #cooling-cells = <2>;
};
cpu1: cpu@1 {
@@ -66,6 +67,7 @@
next-level-cache = <&L2_0>;
operating-points-v2 = <&a53_opp_table>;
clocks = <&k3_clks 136 0>;
+ #cooling-cells = <2>;
};
cpu2: cpu@2 {
@@ -82,6 +84,7 @@
next-level-cache = <&L2_0>;
operating-points-v2 = <&a53_opp_table>;
clocks = <&k3_clks 137 0>;
+ #cooling-cells = <2>;
};
cpu3: cpu@3 {
@@ -98,6 +101,7 @@
next-level-cache = <&L2_0>;
operating-points-v2 = <&a53_opp_table>;
clocks = <&k3_clks 138 0>;
+ #cooling-cells = <2>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
index 916fcf3cc57d..16a578ae2b41 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
@@ -265,6 +265,14 @@
pinctrl-single,function-mask = <0xffffffff>;
};
+ main_esm: esm@420000 {
+ compatible = "ti,j721e-esm";
+ reg = <0x0 0x420000 0x0 0x1000>;
+ bootph-pre-ram;
+ /* Interrupt sources: rti0, rti1, wrti0, rti4, rti2, rti3 */
+ ti,esm-pins = <192>, <193>, <195>, <204>, <209>, <210>;
+ };
+
main_timer0: timer@2400000 {
compatible = "ti,am654-timer";
reg = <0x00 0x2400000 0x00 0x400>;
@@ -1088,4 +1096,14 @@
clocks = <&k3_clks 204 2>;
power-domains = <&k3_pds 204 TI_SCI_PD_EXCLUSIVE>;
};
+
+ e5010: jpeg-encoder@fd20000 {
+ compatible = "ti,am62a-jpeg-enc", "img,e5010-jpeg-enc";
+ reg = <0x00 0xfd20000 0x00 0x100>,
+ <0x00 0xfd20200 0x00 0x200>;
+ reg-names = "core", "mmu";
+ clocks = <&k3_clks 201 0>;
+ power-domains = <&k3_pds 201 TI_SCI_PD_EXCLUSIVE>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi
index 8c36e56f4138..0469c766b769 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi
@@ -15,6 +15,14 @@
status = "disabled";
};
+ mcu_esm: esm@4100000 {
+ compatible = "ti,j721e-esm";
+ reg = <0x0 0x4100000 0x0 0x1000>;
+ bootph-pre-ram;
+ /* Interrupt sources: esm0_cfg, esm0_hi, esm0_low, mrti0 */
+ ti,esm-pins = <0>, <1>, <2>, <85>;
+ };
+
/*
* The MCU domain timer interrupts are routed only to the ESM module,
* and not currently available for Linux. The MCU domain timers are
diff --git a/arch/arm64/boot/dts/ti/k3-am62a.dtsi b/arch/arm64/boot/dts/ti/k3-am62a.dtsi
index b1b884600293..4d79b3e9486a 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a.dtsi
@@ -61,6 +61,8 @@
<0x00 0x08000000 0x00 0x08000000 0x00 0x00200000>, /* Main CPSW */
<0x00 0x0e000000 0x00 0x0e000000 0x00 0x01d20000>, /* Second peripheral window */
<0x00 0x0fd00000 0x00 0x0fd00000 0x00 0x00020000>, /* GPU */
+ <0x00 0x0fd20000 0x00 0x0fd20000 0x00 0x00000100>, /* JPEGENC0_CORE */
+ <0x00 0x0fd20200 0x00 0x0fd20200 0x00 0x00000200>, /* JPEGENC0_CORE_MMU */
<0x00 0x20000000 0x00 0x20000000 0x00 0x0a008000>, /* Third peripheral window */
<0x00 0x30040000 0x00 0x30040000 0x00 0x00080000>, /* PRUSS-M */
<0x00 0x30101000 0x00 0x30101000 0x00 0x00010100>, /* CSI window */
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
index 9701fc69aed9..9b6f51379108 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
@@ -260,8 +260,9 @@
main_esm: esm@420000 {
compatible = "ti,j721e-esm";
reg = <0x00 0x420000 0x00 0x1000>;
- ti,esm-pins = <160>, <161>, <162>, <163>, <177>, <178>;
bootph-pre-ram;
+ /* Interrupt sources: rti0, rti1, wrti0 rti2, rti3, rti15 */
+ ti,esm-pins = <224>, <225>, <227>, <241>, <242>, <248>;
};
main_timer0: timer@2400000 {
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi
index df7945156397..b33aff0d65c9 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi
@@ -26,9 +26,9 @@
mcu_esm: esm@4100000 {
compatible = "ti,j721e-esm";
reg = <0x00 0x4100000 0x00 0x1000>;
- ti,esm-pins = <0>, <1>, <2>, <85>;
- status = "reserved";
bootph-pre-ram;
+ /* Interrupt sources: esm0_cfg, esm0_hi, esm0_low, mrti0, wrti0 */
+ ti,esm-pins = <0>, <1>, <2>, <85>, <86>;
};
/*
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
index 0ce9721b4176..420c77c8e9e5 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
@@ -65,5 +65,6 @@
&main_gpio1 {
gpio-ranges = <&main_pmx0 0 94 32>, <&main_pmx0 42 137 5>,
<&main_pmx0 47 143 3>, <&main_pmx0 50 149 2>;
+ gpio-reserved-ranges = <32 10>;
ti,ngpio = <52>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
index ff65955551a3..3efa12bb7254 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
@@ -645,8 +645,6 @@
wkup_uart0_pins_default: wkup-uart0-default-pins {
pinctrl-single,pins = <
- AM62PX_MCU_IOPAD(0x02c, PIN_INPUT, 0) /* (C7) WKUP_UART0_CTSn */
- AM62PX_MCU_IOPAD(0x030, PIN_OUTPUT, 0) /* (C6) WKUP_UART0_RTSn */
AM62PX_MCU_IOPAD(0x024, PIN_INPUT, 0) /* (D8) WKUP_UART0_RXD */
AM62PX_MCU_IOPAD(0x028, PIN_OUTPUT, 0) /* (D7) WKUP_UART0_TXD */
>;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
index f8370dd03350..7eae18399caa 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
@@ -389,7 +389,8 @@
bootph-pre-ram;
compatible = "ti,j721e-esm";
reg = <0x00 0x420000 0x00 0x1000>;
- ti,esm-pins = <160>, <161>;
+ /* Interrupt sources: rti0, rti1, rti8, rti9, rti10, rti11 */
+ ti,esm-pins = <160>, <161>, <162>, <163>, <164>, <165>;
};
main_uart0: serial@2800000 {
@@ -677,6 +678,7 @@
assigned-clock-parents = <&k3_clks 13 9>;
clock-names = "fck";
power-domains = <&k3_pds 13 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
dmas = <&main_pktdma 0xC500 15>,
<&main_pktdma 0xC501 15>,
@@ -701,6 +703,7 @@
phys = <&phy_gmii_sel 1>;
mac-address = [00 00 00 00 00 00];
ti,syscon-efuse = <&main_conf 0x200>;
+ status = "disabled";
};
cpsw_port2: port@2 {
@@ -709,6 +712,7 @@
label = "port2";
phys = <&phy_gmii_sel 2>;
mac-address = [00 00 00 00 00 00];
+ status = "disabled";
};
};
@@ -759,7 +763,7 @@
};
usbss0: cdns-usb@f900000 {
- compatible = "ti,am64-usb";
+ compatible = "ti,am64-usb", "ti,j721e-usb";
reg = <0x00 0xf900000 0x00 0x100>;
power-domains = <&k3_pds 161 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 161 9>, <&k3_clks 161 1>;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
index ec17285869da..ad4bed5d3f9e 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
@@ -158,6 +158,7 @@
bootph-pre-ram;
compatible = "ti,j721e-esm";
reg = <0x00 0x4100000 0x00 0x1000>;
- ti,esm-pins = <0>, <1>;
+ /* Interrupt sources: esm0_cfg, esm0_hi, esm0_low, mrti0 */
+ ti,esm-pins = <0>, <1>, <2>, <85>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
index ea7c58fb67e2..6bece2fb4e95 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
@@ -185,6 +185,7 @@
&cpsw3g {
pinctrl-names = "default";
pinctrl-0 = <&cpsw_rgmii1_pins_default>;
+ status = "okay";
};
&cpsw3g_mdio {
@@ -208,10 +209,7 @@
&cpsw_port1 {
phy-mode = "rgmii-rxid";
phy-handle = <&cpsw3g_phy1>;
-};
-
-&cpsw_port2 {
- status = "disabled";
+ status = "okay";
};
&mailbox0_cluster2 {
diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm-nand.dtso b/arch/arm64/boot/dts/ti/k3-am642-evm-nand.dtso
index f08c0e272b53..92faf762894c 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-evm-nand.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am642-evm-nand.dtso
@@ -12,7 +12,7 @@
#include "k3-pinctrl.h"
&main_pmx0 {
- gpmc0_pins_default: gpmc0-pins-default {
+ gpmc0_default_pins: gpmc0-default-pins {
bootph-all;
pinctrl-single,pins = <
AM64X_IOPAD(0x0094, PIN_INPUT, 7) /* (T19) GPMC0_BE1n.GPIO0_36 */
@@ -50,7 +50,7 @@
};
&main_gpio0 {
- gpio0-36 {
+ gpmc0-hog {
bootph-all;
gpio-hog;
gpios = <36 0>;
@@ -67,7 +67,7 @@
&gpmc0 {
status = "okay";
pinctrl-names = "default";
- pinctrl-0 = <&gpmc0_pins_default>;
+ pinctrl-0 = <&gpmc0_default_pins>;
#address-cells = <2>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm.dts b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
index 6bb1ad2e56ec..97ca16f00cd2 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
@@ -616,17 +616,20 @@
bootph-all;
pinctrl-names = "default";
pinctrl-0 = <&rgmii1_pins_default>, <&rgmii2_pins_default>;
+ status = "okay";
};
&cpsw_port1 {
bootph-all;
phy-mode = "rgmii-rxid";
phy-handle = <&cpsw3g_phy0>;
+ status = "okay";
};
&cpsw_port2 {
phy-mode = "rgmii-rxid";
phy-handle = <&cpsw3g_phy3>;
+ status = "okay";
};
&cpsw3g_mdio {
@@ -646,6 +649,10 @@
&tscadc0 {
/* ADC is reserved for R5 usage */
status = "reserved";
+
+ adc {
+ ti,adc-channels = <0 1 2 3 4 5 6 7>;
+ };
};
&ospi0 {
diff --git a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
index 30729b49dd69..60285d736e07 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
@@ -28,6 +28,8 @@
model = "PHYTEC phyBOARD-Electra-AM64x RDK";
aliases {
+ ethernet1 = &icssg0_emac0;
+ ethernet2 = &icssg0_emac1;
mmc1 = &sdhci1;
serial2 = &main_uart0;
serial3 = &main_uart1;
@@ -55,6 +57,73 @@
standby-gpios = <&main_gpio0 35 GPIO_ACTIVE_HIGH>;
};
+ /* Dual Ethernet application node on PRU-ICSSG0 */
+ ethernet {
+ compatible = "ti,am642-icssg-prueth";
+ pinctrl-names = "default";
+ pinctrl-0 = <&icssg0_rgmii1_pins_default>, <&icssg0_rgmii2_pins_default>;
+
+ interrupt-parent = <&icssg0_intc>;
+ interrupts = <24 0 2>, <25 1 3>;
+ interrupt-names = "tx_ts0", "tx_ts1";
+
+ sram = <&oc_sram>;
+ firmware-name = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
+ "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf",
+ "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf",
+ "ti-pruss/am65x-sr2-pru1-prueth-fw.elf",
+ "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf",
+ "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf";
+
+ dmas = <&main_pktdma 0xc100 15>, /* egress slice 0 */
+ <&main_pktdma 0xc101 15>, /* egress slice 0 */
+ <&main_pktdma 0xc102 15>, /* egress slice 0 */
+ <&main_pktdma 0xc103 15>, /* egress slice 0 */
+ <&main_pktdma 0xc104 15>, /* egress slice 1 */
+ <&main_pktdma 0xc105 15>, /* egress slice 1 */
+ <&main_pktdma 0xc106 15>, /* egress slice 1 */
+ <&main_pktdma 0xc107 15>, /* egress slice 1 */
+ <&main_pktdma 0x4100 15>, /* ingress slice 0 */
+ <&main_pktdma 0x4101 15>; /* ingress slice 1 */
+ dma-names = "tx0-0", "tx0-1", "tx0-2", "tx0-3",
+ "tx1-0", "tx1-1", "tx1-2", "tx1-3",
+ "rx0", "rx1";
+
+ ti,prus = <&pru0_0>, <&rtu0_0>, <&tx_pru0_0>, <&pru0_1>, <&rtu0_1>, <&tx_pru0_1>;
+ ti,pruss-gp-mux-sel = <2>, /* MII mode */
+ <2>,
+ <2>,
+ <2>, /* MII mode */
+ <2>,
+ <2>;
+
+ ti,mii-g-rt = <&icssg0_mii_g_rt>;
+ ti,mii-rt = <&icssg0_mii_rt>;
+ ti,iep = <&icssg0_iep0>, <&icssg0_iep1>;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ icssg0_emac0: port@0 {
+ reg = <0>;
+ phy-handle = <&icssg0_phy1>;
+ phy-mode = "rgmii-id";
+ /* Filled in by bootloader */
+ local-mac-address = [00 00 00 00 00 00];
+ ti,syscon-rgmii-delay = <&main_conf 0x4100>;
+ };
+
+ icssg0_emac1: port@1 {
+ reg = <1>;
+ phy-handle = <&icssg0_phy2>;
+ phy-mode = "rgmii-id";
+ /* Filled in by bootloader */
+ local-mac-address = [00 00 00 00 00 00];
+ ti,syscon-rgmii-delay = <&main_conf 0x4104>;
+ };
+ };
+ };
+
keys {
compatible = "gpio-keys";
autorepeat;
@@ -118,6 +187,12 @@
>;
};
+ clkout0_pins_default: clkout0-default-pins {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0274, PIN_OUTPUT, 5) /* (A19) EXT_REFCLK1.CLKOUT0 */
+ >;
+ };
+
gpio_keys_pins_default: gpio-keys-default-pins {
pinctrl-single,pins = <
AM64X_IOPAD(0x0044, PIN_INPUT, 7) /* (T18) GPMC0_AD2.GPIO0_17 */
@@ -125,6 +200,49 @@
>;
};
+ icssg0_mdio_pins_default: icssg0-mdio-default-pins {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0200, PIN_INPUT, 0) /* (P2) PRG0_MDIO0_MDIO */
+ AM64X_IOPAD(0x0204, PIN_OUTPUT, 0) /* (P3) PRG0_MDIO0_MDC */
+ AM64X_IOPAD(0x01A8, PIN_OUTPUT, 7) /* (V1) PRG0_PRU0_GPO18.GPIO1_18 */
+ AM64X_IOPAD(0x01AC, PIN_OUTPUT, 7) /* (W1) PRG0_PRU0_GPO19.GPIO1_19 */
+ >;
+ };
+
+ icssg0_rgmii1_pins_default: icssg0-rgmii1-default-pins {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0160, PIN_INPUT, 2) /* (Y1) PRG0_PRU0_GPO0.PRG0_RGMII1_RD0 */
+ AM64X_IOPAD(0x0164, PIN_INPUT, 2) /* (R4) PRG0_PRU0_GPO1.PRG0_RGMII1_RD1 */
+ AM64X_IOPAD(0x0168, PIN_INPUT, 2) /* (U2) PRG0_PRU0_GPO2.PRG0_RGMII1_RD2 */
+ AM64X_IOPAD(0x016c, PIN_INPUT, 2) /* (V2) PRG0_PRU0_GPO3.PRG0_RGMII1_RD3 */
+ AM64X_IOPAD(0x0170, PIN_INPUT, 2) /* (AA2) PRG0_PRU0_GPO4.PRG0_RGMII1_RX_CTL */
+ AM64X_IOPAD(0x0178, PIN_INPUT, 2) /* (T3) PRG0_PRU0_GPO6.PRG0_RGMII1_RXC */
+ AM64X_IOPAD(0x018c, PIN_OUTPUT, 2) /* (Y3) PRG0_PRU0_GPO11.PRG0_RGMII1_TD0 */
+ AM64X_IOPAD(0x0190, PIN_OUTPUT, 2) /* (AA3) PRG0_PRU0_GPO12.PRG0_RGMII1_TD1 */
+ AM64X_IOPAD(0x0194, PIN_OUTPUT, 2) /* (R6) PRG0_PRU0_GPO13.PRG0_RGMII1_TD2 */
+ AM64X_IOPAD(0x0198, PIN_OUTPUT, 2) /* (V4) PRG0_PRU0_GPO14.PRG0_RGMII1_TD3 */
+ AM64X_IOPAD(0x019c, PIN_OUTPUT, 2) /* (T5) PRG0_PRU0_GPO15.PRG0_RGMII1_TX_CTL */
+ AM64X_IOPAD(0x01a0, PIN_OUTPUT, 2) /* (U4) PRG0_PRU0_GPO16.PRG0_RGMII1_TXC */
+ >;
+ };
+
+ icssg0_rgmii2_pins_default: icssg0-rgmii2-default-pins {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x01b0, PIN_INPUT, 2) /* (Y2) PRG0_PRU1_GPO0.PRG0_RGMII2_RD0 */
+ AM64X_IOPAD(0x01b4, PIN_INPUT, 2) /* (W2) PRG0_PRU1_GPO1.PRG0_RGMII2_RD1 */
+ AM64X_IOPAD(0x01b8, PIN_INPUT, 2) /* (V3) PRG0_PRU1_GPO2.PRG0_RGMII2_RD2 */
+ AM64X_IOPAD(0x01bc, PIN_INPUT, 2) /* (T4) PRG0_PRU1_GPO3.PRG0_RGMII2_RD3 */
+ AM64X_IOPAD(0x01c0, PIN_INPUT, 2) /* (W3) PRG0_PRU1_GPO4.PRG0_RGMII2_RX_CTL */
+ AM64X_IOPAD(0x01c8, PIN_INPUT, 2) /* (R5) PRG0_PRU1_GPO6.PRG0_RGMII2_RXC */
+ AM64X_IOPAD(0x01dc, PIN_OUTPUT, 2) /* (W4) PRG0_PRU1_GPO11.PRG0_RGMII2_TD0 */
+ AM64X_IOPAD(0x01e0, PIN_OUTPUT, 2) /* (Y4) PRG0_PRU1_GPO12.PRG0_RGMII2_TD1 */
+ AM64X_IOPAD(0x01e4, PIN_OUTPUT, 2) /* (T6) PRG0_PRU1_GPO13.PRG0_RGMII2_TD2 */
+ AM64X_IOPAD(0x01e8, PIN_OUTPUT, 2) /* (U6) PRG0_PRU1_GPO14.PRG0_RGMII2_TD3 */
+ AM64X_IOPAD(0x01ec, PIN_OUTPUT, 2) /* (U5) PRG0_PRU1_GPO15.PRG0_RGMII2_TX_CTL */
+ AM64X_IOPAD(0x01f0, PIN_OUTPUT, 2) /* (AA4) PRG0_PRU1_GPO16.PRG0_RGMII2_TXC */
+ >;
+ };
+
main_i2c1_pins_default: main-i2c1-default-pins {
pinctrl-single,pins = <
AM64X_IOPAD(0x0268, PIN_INPUT, 0) /* (C18) I2C1_SCL */
@@ -198,6 +316,34 @@
};
};
+&icssg0_mdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&icssg0_mdio_pins_default &clkout0_pins_default>;
+ status = "okay";
+
+ icssg0_phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-id2000.a231", "ethernet-phy-ieee802.3-c22";
+ reg = <0x1>;
+ tx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ rx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ reset-gpios = <&main_gpio1 18 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <1000>;
+ reset-deassert-us = <1000>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ };
+
+ icssg0_phy2: ethernet-phy@2 {
+ compatible = "ethernet-phy-id2000.a231", "ethernet-phy-ieee802.3-c22";
+ reg = <0x2>;
+ tx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ rx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ reset-gpios = <&main_gpio1 19 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <1000>;
+ reset-deassert-us = <1000>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ };
+};
+
&main_i2c1 {
status = "okay";
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/ti/k3-am642-sk.dts b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
index 44ecbcf1c844..86369525259c 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
@@ -527,16 +527,19 @@
&cpsw3g {
pinctrl-names = "default";
pinctrl-0 = <&rgmii1_pins_default>, <&rgmii2_pins_default>;
+ status = "okay";
};
&cpsw_port1 {
phy-mode = "rgmii-rxid";
phy-handle = <&cpsw3g_phy0>;
+ status = "okay";
};
&cpsw_port2 {
phy-mode = "rgmii-rxid";
phy-handle = <&cpsw3g_phy1>;
+ status = "okay";
};
&cpsw3g_mdio {
diff --git a/arch/arm64/boot/dts/ti/k3-am642-sr-som.dtsi b/arch/arm64/boot/dts/ti/k3-am642-sr-som.dtsi
index c19d0b8bbf0f..a5cec9a07510 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-sr-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am642-sr-som.dtsi
@@ -177,6 +177,7 @@
&cpsw3g {
pinctrl-names = "default";
pinctrl-0 = <&rgmii1_default_pins>;
+ status = "okay";
};
&cpsw3g_mdio {
@@ -210,10 +211,7 @@
&cpsw_port1 {
phy-mode = "rgmii-id";
phy-handle = <&ethernet_phy0>;
-};
-
-&cpsw_port2 {
- status = "disabled";
+ status = "okay";
};
&icssg1_mdio {
diff --git a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
index c40ad67cee01..e06a3b178b34 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
@@ -24,6 +24,8 @@
aliases {
ethernet0 = &cpsw_port1;
+ ethernet1 = &icssg1_emac0;
+ ethernet2 = &icssg1_emac1;
i2c1 = &mcu_i2c0;
mmc1 = &sdhci1;
serial0 = &mcu_uart0;
@@ -71,6 +73,66 @@
};
};
+ icssg1_eth: icssg1-eth {
+ compatible = "ti,am642-icssg-prueth";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pru_icssg1_rgmii1_pins>, <&pru_icssg1_rgmii2_pins>;
+ interrupt-parent = <&icssg1_intc>;
+ interrupts = <24 0 2>, <25 1 3>;
+ interrupt-names = "tx_ts0", "tx_ts1";
+ dmas = <&main_pktdma 0xc200 15>, /* egress slice 0 */
+ <&main_pktdma 0xc201 15>, /* egress slice 0 */
+ <&main_pktdma 0xc202 15>, /* egress slice 0 */
+ <&main_pktdma 0xc203 15>, /* egress slice 0 */
+ <&main_pktdma 0xc204 15>, /* egress slice 1 */
+ <&main_pktdma 0xc205 15>, /* egress slice 1 */
+ <&main_pktdma 0xc206 15>, /* egress slice 1 */
+ <&main_pktdma 0xc207 15>, /* egress slice 1 */
+ <&main_pktdma 0x4200 15>, /* ingress slice 0 */
+ <&main_pktdma 0x4201 15>; /* ingress slice 1 */
+ dma-names = "tx0-0", "tx0-1", "tx0-2", "tx0-3",
+ "tx1-0", "tx1-1", "tx1-2", "tx1-3",
+ "rx0", "rx1";
+ sram = <&oc_sram>;
+ firmware-name = "ti-pruss/am64x-sr2-pru0-prueth-fw.elf",
+ "ti-pruss/am64x-sr2-rtu0-prueth-fw.elf",
+ "ti-pruss/am64x-sr2-txpru0-prueth-fw.elf",
+ "ti-pruss/am64x-sr2-pru1-prueth-fw.elf",
+ "ti-pruss/am64x-sr2-rtu1-prueth-fw.elf",
+ "ti-pruss/am64x-sr2-txpru1-prueth-fw.elf";
+ ti,prus = <&pru1_0>, <&rtu1_0>, <&tx_pru1_0>, <&pru1_1>, <&rtu1_1>, <&tx_pru1_1>;
+ ti,pruss-gp-mux-sel = <2>, /* MII mode */
+ <2>,
+ <2>,
+ <2>, /* MII mode */
+ <2>,
+ <2>;
+ ti,mii-g-rt = <&icssg1_mii_g_rt>;
+ ti,mii-rt = <&icssg1_mii_rt>;
+ ti,iep = <&icssg1_iep0>, <&icssg1_iep1>;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ icssg1_emac0: port@0 {
+ reg = <0>;
+ phy-handle = <&icssg1_phy0c>;
+ phy-mode = "rgmii-id";
+ /* Filled in by bootloader */
+ local-mac-address = [00 00 00 00 00 00];
+ };
+
+ icssg1_emac1: port@1 {
+ reg = <1>;
+ phy-handle = <&icssg1_phy03>;
+ phy-mode = "rgmii-id";
+ /* Filled in by bootloader */
+ local-mac-address = [00 00 00 00 00 00];
+ };
+ };
+ };
+
fan0: pwm-fan {
compatible = "pwm-fan";
pinctrl-names = "default";
@@ -119,15 +181,13 @@
&cpsw3g {
pinctrl-names = "default";
pinctrl-0 = <&cpsw_pins>;
+ status = "okay";
};
&cpsw_port1 {
phy-mode = "rgmii-rxid";
phy-handle = <&cpsw3g_phy0>;
-};
-
-&cpsw_port2 {
- status = "disabled";
+ status = "okay";
};
&cpsw3g_mdio {
@@ -154,6 +214,42 @@
status = "okay";
};
+&icssg1_mdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pru_icssg1_mdio_pins>;
+ status = "okay";
+
+ /* phy-mode is fixed up to rgmii-rxid by prueth driver to account for
+ * the SoC integration, so the only rx-internal-delay and no
+ * tx-internal-delay is set for the PHYs.
+ */
+
+ icssg1_phy03: ethernet-phy@3 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x3>;
+ reset-gpios = <&main_gpio1 47 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <1000>;
+ reset-deassert-us = <1000>;
+ ti,rx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,tx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ };
+
+ icssg1_phy0c: ethernet-phy@c {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0xc>;
+ reset-gpios = <&main_gpio1 51 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <1000>;
+ reset-deassert-us = <1000>;
+ ti,rx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,tx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ };
+};
+
+
&main_gpio0 {
pinctrl-names = "default";
pinctrl-0 = <&main_gpio0_digital_pins>,
diff --git a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg2.dtsi b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg2.dtsi
index e2584a5efe34..b3c4c0eec3dc 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg2.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg2.dtsi
@@ -9,11 +9,6 @@
* Common bits of the IOT2050 Basic and Advanced variants, PG2
*/
-&mcu_r5fss0 {
- /* lock-step mode not supported on PG2 boards */
- ti,cluster-mode = <0>;
-};
-
&main_pmx0 {
cp2102n_reset_pin_default: cp2102n-reset-default-pins {
pinctrl-single,pins = <
diff --git a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
index ef34b851e178..e5136ed94765 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
@@ -627,11 +627,62 @@
reg = <0>;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_SPEED_LAN;
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_SPEED_LAN;
+ };
+ };
};
icssg0_eth1_phy: ethernet-phy@1 {
reg = <1>;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ };
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_SPEED_LAN;
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_SPEED_LAN;
+ };
+ };
};
};
+
+&mcu_r5fss0 {
+ /* lock-step mode not supported on iot2050 boards */
+ ti,cluster-mode = <0>;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index 1af3dedde1f6..1f1af7ea2330 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -54,6 +54,14 @@
};
};
+ main_esm: esm@700000 {
+ compatible = "ti,j721e-esm";
+ reg = <0x00 0x700000 0x00 0x1000>;
+ bootph-pre-ram;
+ /* Interrupt sources: rti0, rti1, rti2, rti3 */
+ ti,esm-pins = <224>, <225>, <226>, <227>;
+ };
+
serdes0: serdes@900000 {
compatible = "ti,phy-am654-serdes";
reg = <0x0 0x900000 0x0 0x2000>;
@@ -478,7 +486,7 @@
ranges = <0x0 0x0 0x00100000 0x1c000>;
serdes0_clk: clock@4080 {
- compatible = "ti,am654-serdes-ctrl", "syscon";
+ compatible = "ti,am654-serdes-ctrl", "syscon", "simple-mfd";
reg = <0x4080 0x4>;
serdes0_mux: mux-controller {
@@ -489,7 +497,7 @@
};
serdes1_clk: clock@4090 {
- compatible = "ti,am654-serdes-ctrl", "syscon";
+ compatible = "ti,am654-serdes-ctrl", "syscon", "simple-mfd";
reg = <0x4090 0x4>;
serdes1_mux: mux-controller {
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index 43c6118d2bf0..7cf1f646500a 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -292,13 +292,13 @@
ranges = <0x0 0x47000000 0x0 0x47000000 0x0 0x100>, /* FSS Control */
<0x0 0x47040000 0x0 0x47040000 0x0 0x100>, /* OSPI0 Control */
<0x0 0x47050000 0x0 0x47050000 0x0 0x100>, /* OSPI1 Control */
- <0x5 0x00000000 0x5 0x00000000 0x1 0x0000000>, /* OSPI0 Memory */
- <0x7 0x00000000 0x7 0x00000000 0x1 0x0000000>; /* OSPI1 Memory */
+ <0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>, /* FSS data region 1 */
+ <0x4 0x00000000 0x4 0x00000000 0x4 0x00000000>; /* FSS data region 0/3 */
ospi0: spi@47040000 {
compatible = "ti,am654-ospi", "cdns,qspi-nor";
reg = <0x0 0x47040000 0x0 0x100>,
- <0x5 0x00000000 0x1 0x0000000>;
+ <0x5 0x00000000 0x1 0x00000000>;
interrupts = <GIC_SPI 552 IRQ_TYPE_LEVEL_HIGH>;
cdns,fifo-depth = <256>;
cdns,fifo-width = <4>;
@@ -316,7 +316,7 @@
ospi1: spi@47050000 {
compatible = "ti,am654-ospi", "cdns,qspi-nor";
reg = <0x0 0x47050000 0x0 0x100>,
- <0x7 0x00000000 0x1 0x00000000>;
+ <0x7 0x00000000 0x1 0x00000000>;
interrupts = <GIC_SPI 553 IRQ_TYPE_LEVEL_HIGH>;
cdns,fifo-depth = <256>;
cdns,fifo-width = <4>;
@@ -440,6 +440,14 @@
};
};
+ mcu_esm: esm@40800000 {
+ compatible = "ti,j721e-esm";
+ reg = <0x00 0x40800000 0x00 0x1000>;
+ bootph-pre-ram;
+ /* Interrupt sources: mrti0, mrti1 */
+ ti,esm-pins = <104>, <105>;
+ };
+
mcu_rti1: watchdog@40610000 {
compatible = "ti,j7-rti-wdt";
reg = <0x0 0x40610000 0x0 0x100>;
diff --git a/arch/arm64/boot/dts/ti/k3-am65.dtsi b/arch/arm64/boot/dts/ti/k3-am65.dtsi
index c59baebc5a25..c74a0a25832c 100644
--- a/arch/arm64/boot/dts/ti/k3-am65.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65.dtsi
@@ -69,11 +69,10 @@
<0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>,
<0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>,
<0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>,
- <0x00 0x50000000 0x00 0x50000000 0x00 0x8000000>,
+ <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>,
<0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A53 PERIPHBASE */
- <0x00 0x70000000 0x00 0x70000000 0x00 0x200000>,
- <0x05 0x00000000 0x05 0x00000000 0x01 0x0000000>,
- <0x07 0x00000000 0x07 0x00000000 0x01 0x0000000>;
+ <0x00 0x70000000 0x00 0x70000000 0x00 0x00200000>,
+ <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>;
cbass_mcu: bus@28380000 {
compatible = "simple-bus";
@@ -89,9 +88,8 @@
<0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */
<0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */
<0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, /* OSPI space 1 */
- <0x00 0x50000000 0x00 0x50000000 0x00 0x8000000>, /* FSS OSPI0 data region 1 */
- <0x05 0x00000000 0x05 0x00000000 0x01 0x0000000>, /* FSS OSPI0 data region 3*/
- <0x07 0x00000000 0x07 0x00000000 0x01 0x0000000>; /* FSS OSPI1 data region 3*/
+ <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */
+ <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */
cbass_wakeup: bus@42040000 {
compatible = "simple-bus";
diff --git a/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts b/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts
index 29a31891b3db..4968a47f31ea 100644
--- a/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts
+++ b/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts
@@ -22,8 +22,3 @@
compatible = "siemens,iot2050-basic", "ti,am654";
model = "SIMATIC IOT2050 Basic";
};
-
-&mcu_r5fss0 {
- /* lock-step mode not supported on this board */
- ti,cluster-mode = <0>;
-};
diff --git a/arch/arm64/boot/dts/ti/k3-am654-idk.dtso b/arch/arm64/boot/dts/ti/k3-am654-idk.dtso
index 8bdb87fcbde0..b0ce2cb2fdc8 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-idk.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am654-idk.dtso
@@ -8,6 +8,7 @@
/dts-v1/;
/plugin/;
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/net/ti-dp83867.h>
#include "k3-pinctrl.h"
@@ -58,9 +59,7 @@
<&main_udmap 0xc107>, /* egress slice 1 */
<&main_udmap 0x4100>, /* ingress slice 0 */
- <&main_udmap 0x4101>, /* ingress slice 1 */
- <&main_udmap 0x4102>, /* mgmnt rsp slice 0 */
- <&main_udmap 0x4103>; /* mgmnt rsp slice 1 */
+ <&main_udmap 0x4101>; /* ingress slice 1 */
dma-names = "tx0-0", "tx0-1", "tx0-2", "tx0-3",
"tx1-0", "tx1-1", "tx1-2", "tx1-3",
"rx0", "rx1";
@@ -126,9 +125,7 @@
<&main_udmap 0xc207>, /* egress slice 1 */
<&main_udmap 0x4200>, /* ingress slice 0 */
- <&main_udmap 0x4201>, /* ingress slice 1 */
- <&main_udmap 0x4202>, /* mgmnt rsp slice 0 */
- <&main_udmap 0x4203>; /* mgmnt rsp slice 1 */
+ <&main_udmap 0x4201>; /* ingress slice 1 */
dma-names = "tx0-0", "tx0-1", "tx0-2", "tx0-3",
"tx1-0", "tx1-1", "tx1-2", "tx1-3",
"rx0", "rx1";
@@ -154,6 +151,24 @@
};
};
};
+
+ transceiver1: can-phy0 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcan0_gpio_pins_default>;
+ standby-gpios = <&main_gpio1 47 GPIO_ACTIVE_LOW>;
+ };
+
+ transceiver2: can-phy1 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcan1_gpio_pins_default>;
+ standby-gpios = <&main_gpio1 67 GPIO_ACTIVE_LOW>;
+ };
};
&main_pmx0 {
@@ -243,6 +258,34 @@
AM65X_IOPAD(0x012c, PIN_INPUT, 2) /* (AG26) PRG1_PRU0_GPO19.PRG1_IEP0_EDC_SYNC_OUT0 */
>;
};
+
+ mcan0_gpio_pins_default: mcan0-gpio-default-pins {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x023c, PIN_INPUT, 7) /* (V25) PRG0_PRU0_GPIO18:GPIO1_47 */
+ >;
+ };
+
+ mcan1_gpio_pins_default: mcan1-gpio-default-pins {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x028c, PIN_INPUT, 7) /* (Y26) PRG0_PRU1_GPIO18.GPIO1_67 */
+ >;
+ };
+};
+
+&wkup_pmx0 {
+ mcu_mcan0_pins_default: mcu-mcan0-default-pins {
+ pinctrl-single,pins = <
+ AM65X_WKUP_IOPAD(0x00ac, PIN_INPUT_PULLUP, 0) /* (W2) MCU_MCAN0_RX */
+ AM65X_WKUP_IOPAD(0x00a8, PIN_OUTPUT_PULLUP, 0) /* (W1) MCU_MCAN0_TX */
+ >;
+ };
+
+ mcu_mcan1_pins_default: mcu-mcan1-default-pins {
+ pinctrl-single,pins = <
+ AM65X_WKUP_IOPAD(0x00c4, PIN_INPUT_PULLUP, 1) /* (AD3) WKUP_GPIO0_5.MCU_MCAN1_RX */
+ AM65X_WKUP_IOPAD(0x00c0, PIN_OUTPUT_PULLUP, 1) /* (AC3) WKUP_GPIO0_4.MCU_MCAN1_TX */
+ >;
+ };
};
&icssg0_mdio {
@@ -294,3 +337,17 @@
pinctrl-names = "default";
pinctrl-0 = <&icssg1_iep0_pins_default>;
};
+
+&m_can0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_mcan0_pins_default>;
+ phys = <&transceiver1>;
+ status = "okay";
+};
+
+&m_can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_mcan1_pins_default>;
+ phys = <&transceiver2>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-ekey-pcie.dtso b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-ekey-pcie.dtso
new file mode 100644
index 000000000000..666237f6d79c
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-ekey-pcie.dtso
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IOT2050 M.2 variant, overlay for B-key PCIE0_LANE0 + E-key PCIE1_LANE0
+ * Copyright (c) Siemens AG, 2022-2024
+ *
+ * Authors:
+ * Chao Zeng <chao.zeng@siemens.com>
+ * Jan Kiszka <jan.kiszka@siemens.com>
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/gpio/gpio.h>
+
+&pcie0_rc {
+ num-lanes = <1>;
+ phys = <&serdes0 PHY_TYPE_PCIE 1>;
+ phy-names = "pcie-phy0";
+ reset-gpios = <&main_gpio1 15 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&pcie1_rc {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-usb3.dtso b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-usb3.dtso
new file mode 100644
index 000000000000..0f86235c9771
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-usb3.dtso
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IOT2050 M.2 variant, overlay for B-key USB3.0 + E-key PCIE1_LANE0
+ * Copyright (c) Siemens AG, 2022-2024
+ *
+ * Authors:
+ * Chao Zeng <chao.zeng@siemens.com>
+ * Jan Kiszka <jan.kiszka@siemens.com>
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/gpio/gpio.h>
+
+&serdes0 {
+ assigned-clock-parents = <&k3_clks 153 7>, <&k3_clks 153 4>;
+};
+
+&pcie0_rc {
+ status = "disabled";
+};
+
+&pcie1_rc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&minipcie_pins_default>;
+
+ num-lanes = <1>;
+ phys = <&serdes1 PHY_TYPE_PCIE 0>;
+ phy-names = "pcie-phy0";
+ reset-gpios = <&wkup_gpio0 27 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&dwc3_0 {
+ assigned-clock-parents = <&k3_clks 151 4>, /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */
+ <&k3_clks 151 8>; /* set PIPE3_TXB_CLK to WIZ8B2M4VSB */
+ phys = <&serdes0 PHY_TYPE_USB3 0>;
+ phy-names = "usb3-phy";
+};
+
+&usb0 {
+ maximum-speed = "super-speed";
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts b/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts
new file mode 100644
index 000000000000..44dfbdf89277
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * https://beagley-ai.org/
+ *
+ * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2024 Robert Nelson, BeagleBoard.org Foundation
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/net/ti-dp83867.h>
+#include "k3-j722s.dtsi"
+
+/ {
+ compatible = "beagle,am67a-beagley-ai", "ti,j722s";
+ model = "BeagleBoard.org BeagleY-AI";
+
+ aliases {
+ serial0 = &wkup_uart0;
+ serial2 = &main_uart0;
+ mmc1 = &sdhci1;
+ rtc0 = &rtc;
+ };
+
+ chosen {
+ stdout-path = &main_uart0;
+ };
+
+ memory@80000000 {
+ /* 4G RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>,
+ <0x00000008 0x80000000 0x00000000 0x80000000>;
+ device_type = "memory";
+ bootph-pre-ram;
+ };
+
+ reserved_memory: reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ secure_tfa_ddr: tfa@9e780000 {
+ reg = <0x00 0x9e780000 0x00 0x80000>;
+ no-map;
+ };
+
+ secure_ddr: optee@9e800000 {
+ reg = <0x00 0x9e800000 0x00 0x01800000>;
+ no-map;
+ };
+
+ wkup_r5fss0_core0_memory_region: r5f-memory@a0100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa0100000 0x00 0xf00000>;
+ no-map;
+ };
+ };
+
+ vsys_5v0: regulator-1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ bootph-all;
+ };
+
+ vdd_3v3: regulator-2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vsys_5v0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_mmc1: regulator-3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_mmc1";
+ pinctrl-names = "default";
+ pinctrl-0 = <&vdd_3v3_sd_pins_default>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ enable-active-high;
+ gpio = <&main_gpio1 50 GPIO_ACTIVE_HIGH>;
+ bootph-all;
+ };
+
+ vdd_sd_dv: regulator-4 {
+ compatible = "regulator-gpio";
+ regulator-name = "tlv71033";
+ pinctrl-names = "default";
+ pinctrl-0 = <&vdd_sd_dv_pins_default>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ vin-supply = <&vsys_5v0>;
+ gpios = <&main_gpio1 49 GPIO_ACTIVE_HIGH>;
+ states = <1800000 0x0>,
+ <3300000 0x1>;
+ bootph-all;
+ };
+
+ vsys_io_1v8: regulator-5 {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_io_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vsys_io_1v2: regulator-6 {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_io_1v2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins_default>;
+
+ led-0 {
+ gpios = <&main_gpio0 11 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led-1 {
+ gpios = <&main_gpio0 12 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ function = LED_FUNCTION_HEARTBEAT;
+ default-state = "on";
+ };
+ };
+};
+
+&main_pmx0 {
+ main_i2c0_pins_default: main-i2c0-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x01e0, PIN_INPUT_PULLUP, 0) /* (D23) I2C0_SCL */
+ J722S_IOPAD(0x01e4, PIN_INPUT_PULLUP, 0) /* (B22) I2C0_SDA */
+ >;
+ bootph-all;
+ };
+
+ main_uart0_pins_default: main-uart0-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x01c8, PIN_INPUT, 0) /* (A22) UART0_RXD */
+ J722S_IOPAD(0x01cc, PIN_OUTPUT, 0) /* (B22) UART0_TXD */
+ >;
+ bootph-all;
+ };
+
+ vdd_sd_dv_pins_default: vdd-sd-dv-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x0244, PIN_OUTPUT, 7) /* (A24) MMC1_SDWP.GPIO1_49 */
+ >;
+ bootph-all;
+ };
+
+ main_mmc1_pins_default: main-mmc1-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x023c, PIN_INPUT, 0) /* (H22) MMC1_CMD */
+ J722S_IOPAD(0x0234, PIN_OUTPUT, 0) /* (H24) MMC1_CLK */
+ J722S_IOPAD(0x0230, PIN_INPUT, 0) /* (H23) MMC1_DAT0 */
+ J722S_IOPAD(0x022c, PIN_INPUT_PULLUP, 0) /* (H20) MMC1_DAT1 */
+ J722S_IOPAD(0x0228, PIN_INPUT_PULLUP, 0) /* (J23) MMC1_DAT2 */
+ J722S_IOPAD(0x0224, PIN_INPUT_PULLUP, 0) /* (H25) MMC1_DAT3 */
+ J722S_IOPAD(0x0240, PIN_INPUT, 7) /* (B24) MMC1_SDCD.GPIO1_48 */
+ >;
+ bootph-all;
+ };
+
+ mdio_pins_default: mdio-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x0160, PIN_OUTPUT, 0) /* (AC24) MDIO0_MDC */
+ J722S_IOPAD(0x015c, PIN_INPUT, 0) /* (AD25) MDIO0_MDIO */
+ >;
+ };
+
+ rgmii1_pins_default: rgmii1-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x014c, PIN_INPUT, 0) /* (AC25) RGMII1_RD0 */
+ J722S_IOPAD(0x0150, PIN_INPUT, 0) /* (AD27) RGMII1_RD1 */
+ J722S_IOPAD(0x0154, PIN_INPUT, 0) /* (AE24) RGMII1_RD2 */
+ J722S_IOPAD(0x0158, PIN_INPUT, 0) /* (AE26) RGMII1_RD3 */
+ J722S_IOPAD(0x0148, PIN_INPUT, 0) /* (AE27) RGMII1_RXC */
+ J722S_IOPAD(0x0144, PIN_INPUT, 0) /* (AD23) RGMII1_RX_CTL */
+ J722S_IOPAD(0x0134, PIN_OUTPUT, 0) /* (AF27) RGMII1_TD0 */
+ J722S_IOPAD(0x0138, PIN_OUTPUT, 0) /* (AE23) RGMII1_TD1 */
+ J722S_IOPAD(0x013c, PIN_OUTPUT, 0) /* (AG25) RGMII1_TD2 */
+ J722S_IOPAD(0x0140, PIN_OUTPUT, 0) /* (AF24) RGMII1_TD3 */
+ J722S_IOPAD(0x0130, PIN_OUTPUT, 0) /* (AG26) RGMII1_TXC */
+ J722S_IOPAD(0x012c, PIN_OUTPUT, 0) /* (AF25) RGMII1_TX_CTL */
+ >;
+ };
+
+ led_pins_default: led-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x002c, PIN_OUTPUT, 7) /* (K26) OSPI0_CSn0.GPIO0_11 */
+ J722S_IOPAD(0x0030, PIN_OUTPUT, 7) /* (K23) OSPI0_CSn1.GPIO0_12 */
+ >;
+ };
+
+ pmic_irq_pins_default: pmic-irq-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x01f4, PIN_INPUT_PULLUP, 0) /* (B23) EXTINTn */
+ >;
+ };
+
+ vdd_3v3_sd_pins_default: vdd-3v3-sd-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x0254, PIN_OUTPUT, 7) /* (E25) USB0_DRVVBUS.GPIO1_50 */
+ >;
+ };
+};
+
+&cpsw3g {
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii1_pins_default>;
+ status = "okay";
+};
+
+&cpsw3g_mdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mdio_pins_default>;
+ status = "okay";
+
+ cpsw3g_phy0: ethernet-phy@0 {
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,min-output-impedance;
+ };
+};
+
+&cpsw_port1 {
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&cpsw3g_phy0>;
+ status = "okay";
+};
+
+&main_gpio1 {
+ status = "okay";
+};
+
+&main_uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart0_pins_default>;
+ bootph-all;
+ status = "okay";
+};
+
+&mcu_pmx0 {
+ wkup_uart0_pins_default: wkup-uart0-default-pins {
+ pinctrl-single,pins = <
+ J722S_MCU_IOPAD(0x02c, PIN_INPUT, 0) /* (C7) WKUP_UART0_CTSn */
+ J722S_MCU_IOPAD(0x030, PIN_OUTPUT, 0) /* (C6) WKUP_UART0_RTSn */
+ J722S_MCU_IOPAD(0x024, PIN_INPUT, 0) /* (D8) WKUP_UART0_RXD */
+ J722S_MCU_IOPAD(0x028, PIN_OUTPUT, 0) /* (D7) WKUP_UART0_TXD */
+ >;
+ bootph-all;
+ };
+
+ wkup_i2c0_pins_default: wkup-i2c0-default-pins {
+ pinctrl-single,pins = <
+ J722S_MCU_IOPAD(0x04c, PIN_INPUT_PULLUP, 0) /* (C7) WKUP_I2C0_SCL */
+ J722S_MCU_IOPAD(0x050, PIN_INPUT_PULLUP, 0) /* (C6) WKUP_I2C1_SDA */
+ >;
+ bootph-all;
+ };
+};
+
+&wkup_uart0 {
+ /* WKUP UART0 is used by Device Manager firmware */
+ pinctrl-names = "default";
+ pinctrl-0 = <&wkup_uart0_pins_default>;
+ bootph-all;
+ status = "reserved";
+};
+
+&wkup_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wkup_i2c0_pins_default>;
+ clock-frequency = <100000>;
+ bootph-all;
+ status = "okay";
+
+ tps65219: pmic@30 {
+ compatible = "ti,tps65219";
+ reg = <0x30>;
+ buck1-supply = <&vsys_5v0>;
+ buck2-supply = <&vsys_5v0>;
+ buck3-supply = <&vsys_5v0>;
+ ldo1-supply = <&vdd_3v3>;
+ ldo3-supply = <&vdd_3v3>;
+ ldo4-supply = <&vdd_3v3>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_irq_pins_default>;
+ interrupt-parent = <&gic500>;
+ interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ bootph-all;
+ system-power-controller;
+ ti,power-button;
+
+ regulators {
+ buck1_reg: buck1 {
+ regulator-name = "VDD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck2_reg: buck2 {
+ regulator-name = "VDD_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1_reg: ldo1 {
+ regulator-name = "VDDSHV5_SDIO";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-allow-bypass;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: ldo2 {
+ regulator-name = "VDD_1V2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3_reg: ldo3 {
+ regulator-name = "VDDA_PHY_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4_reg: ldo4 {
+ regulator-name = "VDDA_PLL_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+
+ rtc: rtc@68 {
+ compatible = "dallas,ds1340";
+ reg = <0x68>;
+ };
+};
+
+&sdhci1 {
+ /* SD/MMC */
+ vmmc-supply = <&vdd_mmc1>;
+ vqmmc-supply = <&vdd_sd_dv>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mmc1_pins_default>;
+ disable-wp;
+ cd-gpios = <&main_gpio1 48 GPIO_ACTIVE_LOW>;
+ cd-debounce-delay-ms = <100>;
+ bootph-all;
+ ti,fails-without-test-cd;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts
index 90dbe31c5b81..d5ceab79536c 100644
--- a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts
@@ -204,6 +204,7 @@
pinctrl-single,pins = <
J721S2_IOPAD(0x104, PIN_INPUT, 0) /* (P23) MMC1_CLK */
J721S2_IOPAD(0x108, PIN_INPUT, 0) /* (N24) MMC1_CMD */
+ J721S2_IOPAD(0x100, PIN_INPUT, 0) /* (###) MMC1_CLKLB */
J721S2_IOPAD(0x0fc, PIN_INPUT, 0) /* (M23) MMC1_DAT0 */
J721S2_IOPAD(0x0f8, PIN_INPUT, 0) /* (P24) MMC1_DAT1 */
J721S2_IOPAD(0x0f4, PIN_INPUT, 0) /* (R24) MMC1_DAT2 */
diff --git a/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi b/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi
index 5c66e0ec6e82..5bc0d2fb4b8f 100644
--- a/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi
@@ -215,9 +215,9 @@
reg = <0x680000 0x40000>;
};
- partition@740000 {
+ partition@6c0000 {
label = "ospi.env.backup";
- reg = <0x740000 0x40000>;
+ reg = <0x6c0000 0x40000>;
};
partition@800000 {
@@ -302,6 +302,39 @@
<&mcu_r5fss0_core1_memory_region>;
};
+&main_r5fss0 {
+ ti,cluster-mode = <0>;
+};
+
+&main_r5fss1 {
+ ti,cluster-mode = <0>;
+};
+
+/* Timers are used by Remoteproc firmware */
+&main_timer0 {
+ status = "reserved";
+};
+
+&main_timer1 {
+ status = "reserved";
+};
+
+&main_timer2 {
+ status = "reserved";
+};
+
+&main_timer3 {
+ status = "reserved";
+};
+
+&main_timer4 {
+ status = "reserved";
+};
+
+&main_timer5 {
+ status = "reserved";
+};
+
&main_r5fss0_core0 {
mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>;
memory-region = <&main_r5fss0_core0_dma_memory_region>,
diff --git a/arch/arm64/boot/dts/ti/k3-am69-sk.dts b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
index 3f655852244e..1e36965a1403 100644
--- a/arch/arm64/boot/dts/ti/k3-am69-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
@@ -979,6 +979,59 @@
<&mcu_r5fss0_core1_memory_region>;
};
+&main_r5fss0 {
+ ti,cluster-mode = <0>;
+};
+
+&main_r5fss1 {
+ ti,cluster-mode = <0>;
+};
+
+/* Timers are used by Remoteproc firmware */
+&main_timer0 {
+ status = "reserved";
+};
+
+&main_timer1 {
+ status = "reserved";
+};
+
+&main_timer2 {
+ status = "reserved";
+};
+
+&main_timer3 {
+ status = "reserved";
+};
+
+&main_timer4 {
+ status = "reserved";
+};
+
+&main_timer5 {
+ status = "reserved";
+};
+
+&main_timer6 {
+ status = "reserved";
+};
+
+&main_timer7 {
+ status = "reserved";
+};
+
+&main_timer8 {
+ status = "reserved";
+};
+
+&main_timer9 {
+ status = "reserved";
+};
+
+&main_r5fss2 {
+ ti,cluster-mode = <0>;
+};
+
&main_r5fss0_core0 {
mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>;
memory-region = <&main_r5fss0_core0_dma_memory_region>,
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
index 21fe194a5766..e78b4622a7d1 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
@@ -84,13 +84,13 @@
};
};
- mux0: mux-controller {
+ mux0: mux-controller-0 {
compatible = "gpio-mux";
#mux-state-cells = <1>;
mux-gpios = <&exp_som 1 GPIO_ACTIVE_HIGH>;
};
- mux1: mux-controller {
+ mux1: mux-controller-1 {
compatible = "gpio-mux";
#mux-state-cells = <1>;
mux-gpios = <&exp_som 2 GPIO_ACTIVE_HIGH>;
@@ -262,6 +262,23 @@
<&mcu_r5fss0_core1_memory_region>;
};
+&main_r5fss0 {
+ ti,cluster-mode = <0>;
+};
+
+/* Timers are used by Remoteproc firmware */
+&main_timer0 {
+ status = "reserved";
+};
+
+&main_timer1 {
+ status = "reserved";
+};
+
+&main_timer2 {
+ status = "reserved";
+};
+
&main_r5fss0_core0 {
mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>;
memory-region = <&main_r5fss0_core0_dma_memory_region>,
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts
index a2925555fe81..fb899c99753e 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts
@@ -123,7 +123,7 @@
no-map;
};
- c66_1_dma_memory_region: c66-dma-memory@a6000000 {
+ c66_0_dma_memory_region: c66-dma-memory@a6000000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>;
no-map;
@@ -135,7 +135,7 @@
no-map;
};
- c66_0_dma_memory_region: c66-dma-memory@a7000000 {
+ c66_1_dma_memory_region: c66-dma-memory@a7000000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
index 6b6ef6a30614..3731ffb4a5c9 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
@@ -354,8 +354,8 @@
<0x0 0x47034000 0x0 0x47034000 0x0 0x100>, /* HBMC Control */
<0x0 0x47040000 0x0 0x47040000 0x0 0x100>, /* OSPI0 Control */
<0x0 0x47050000 0x0 0x47050000 0x0 0x100>, /* OSPI1 Control */
- <0x5 0x00000000 0x5 0x00000000 0x1 0x0000000>, /* HBMC/OSPI0 Memory */
- <0x7 0x00000000 0x7 0x00000000 0x1 0x0000000>; /* OSPI1 Memory */
+ <0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>, /* FSS data region 1 */
+ <0x4 0x00000000 0x4 0x00000000 0x4 0x00000000>; /* FSS data region 0/3 */
hbmc_mux: mux-controller@47000004 {
compatible = "reg-mux";
@@ -367,7 +367,7 @@
hbmc: hyperbus@47034000 {
compatible = "ti,am654-hbmc";
reg = <0x00 0x47034000 0x00 0x100>,
- <0x05 0x00000000 0x01 0x0000000>;
+ <0x05 0x00000000 0x01 0x00000000>;
power-domains = <&k3_pds 102 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 102 0>;
assigned-clocks = <&k3_clks 102 5>;
@@ -381,7 +381,7 @@
ospi0: spi@47040000 {
compatible = "ti,am654-ospi", "cdns,qspi-nor";
reg = <0x0 0x47040000 0x0 0x100>,
- <0x5 0x00000000 0x1 0x0000000>;
+ <0x5 0x00000000 0x1 0x00000000>;
interrupts = <GIC_SPI 840 IRQ_TYPE_LEVEL_HIGH>;
cdns,fifo-depth = <256>;
cdns,fifo-width = <4>;
@@ -399,7 +399,7 @@
ospi1: spi@47050000 {
compatible = "ti,am654-ospi", "cdns,qspi-nor";
reg = <0x0 0x47050000 0x0 0x100>,
- <0x7 0x00000000 0x1 0x00000000>;
+ <0x7 0x00000000 0x1 0x00000000>;
interrupts = <GIC_SPI 841 IRQ_TYPE_LEVEL_HIGH>;
cdns,fifo-depth = <256>;
cdns,fifo-width = <4>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
index 89fbfb21e5d3..6285e8d94dde 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
@@ -120,7 +120,7 @@
no-map;
};
- c66_1_dma_memory_region: c66-dma-memory@a6000000 {
+ c66_0_dma_memory_region: c66-dma-memory@a6000000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>;
no-map;
@@ -132,7 +132,7 @@
no-map;
};
- c66_0_dma_memory_region: c66-dma-memory@a7000000 {
+ c66_1_dma_memory_region: c66-dma-memory@a7000000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>;
no-map;
@@ -1311,6 +1311,43 @@
<&mcu_r5fss0_core1_memory_region>;
};
+&main_r5fss0 {
+ ti,cluster-mode = <0>;
+};
+
+&main_r5fss1 {
+ ti,cluster-mode = <0>;
+};
+
+/* Timers are used by Remoteproc firmware */
+&main_timer0 {
+ status = "reserved";
+};
+
+&main_timer1 {
+ status = "reserved";
+};
+
+&main_timer2 {
+ status = "reserved";
+};
+
+&main_timer12 {
+ status = "reserved";
+};
+
+&main_timer13 {
+ status = "reserved";
+};
+
+&main_timer14 {
+ status = "reserved";
+};
+
+&main_timer15 {
+ status = "reserved";
+};
+
&main_r5fss0_core0 {
mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>;
memory-region = <&main_r5fss0_core0_dma_memory_region>,
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
index 5ba947771b84..cef47c67493f 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
@@ -561,6 +561,43 @@
<&mcu_r5fss0_core1_memory_region>;
};
+&main_r5fss0 {
+ ti,cluster-mode = <0>;
+};
+
+&main_r5fss1 {
+ ti,cluster-mode = <0>;
+};
+
+/* Timers are used by Remoteproc firmware */
+&main_timer0 {
+ status = "reserved";
+};
+
+&main_timer1 {
+ status = "reserved";
+};
+
+&main_timer2 {
+ status = "reserved";
+};
+
+&main_timer12 {
+ status = "reserved";
+};
+
+&main_timer13 {
+ status = "reserved";
+};
+
+&main_timer14 {
+ status = "reserved";
+};
+
+&main_timer15 {
+ status = "reserved";
+};
+
&main_r5fss0_core0 {
mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>;
memory-region = <&main_r5fss0_core0_dma_memory_region>,
diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi
index 5a72c518ceb6..a7f2f52f42f7 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi
@@ -145,8 +145,7 @@
<0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>,
<0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>,
<0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>,
- <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>,
- <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>;
+ <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>;
cbass_mcu_wakeup: bus@28380000 {
compatible = "simple-bus";
@@ -162,9 +161,8 @@
<0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */
<0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */
<0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, /* OSPI register space */
- <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS OSPI0/1 data region 0 */
- <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, /* FSS OSPI0 data region 3 */
- <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>; /* FSS OSPI1 data region 3*/
+ <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */
+ <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-evm-gesi-exp-board.dtso b/arch/arm64/boot/dts/ti/k3-j721s2-evm-gesi-exp-board.dtso
index 1be28283c7d9..8583178fa1f3 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-evm-gesi-exp-board.dtso
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-evm-gesi-exp-board.dtso
@@ -48,7 +48,7 @@
};
&exp1 {
- p15 {
+ p15-hog {
/* P15 - EXP_MUX2 */
gpio-hog;
gpios = <13 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
index 8feb42c89e47..9d96b19d0e7c 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
@@ -622,8 +622,8 @@
#address-cells = <2>;
#size-cells = <2>;
ranges = <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>,
- <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>,
- <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>;
+ <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>,
+ <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>;
ospi0: spi@47040000 {
compatible = "ti,am654-ospi", "cdns,qspi-nor";
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
index 82aacc01e8fe..89252e4a5f1b 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
@@ -134,13 +134,13 @@
};
};
- mux0: mux-controller {
+ mux0: mux-controller-0 {
compatible = "gpio-mux";
#mux-state-cells = <1>;
mux-gpios = <&exp_som 1 GPIO_ACTIVE_HIGH>;
};
- mux1: mux-controller {
+ mux1: mux-controller-1 {
compatible = "gpio-mux";
#mux-state-cells = <1>;
mux-gpios = <&exp_som 2 GPIO_ACTIVE_HIGH>;
@@ -516,6 +516,39 @@
<&mcu_r5fss0_core1_memory_region>;
};
+&main_r5fss0 {
+ ti,cluster-mode = <0>;
+};
+
+&main_r5fss1 {
+ ti,cluster-mode = <0>;
+};
+
+/* Timers are used by Remoteproc firmware */
+&main_timer0 {
+ status = "reserved";
+};
+
+&main_timer1 {
+ status = "reserved";
+};
+
+&main_timer2 {
+ status = "reserved";
+};
+
+&main_timer3 {
+ status = "reserved";
+};
+
+&main_timer4 {
+ status = "reserved";
+};
+
+&main_timer5 {
+ status = "reserved";
+};
+
&main_r5fss0_core0 {
mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>;
memory-region = <&main_r5fss0_core0_dma_memory_region>,
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
index 568e6a04619d..ea16f82822ae 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
@@ -141,8 +141,7 @@
<0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>,
<0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>,
<0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>,
- <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>,
- <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>;
+ <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>;
cbass_mcu_wakeup: bus@28380000 {
compatible = "simple-bus";
@@ -158,9 +157,8 @@
<0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */
<0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */
<0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, /* OSPI register space */
- <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS OSPI0/1 data region 0 */
- <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, /* FSS OSPI0 data region 3 */
- <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>; /* FSS OSPI1 data region 3*/
+ <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */
+ <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */
};
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
index dd3b5f7039d7..a00f4a7d20d9 100644
--- a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
@@ -20,6 +20,7 @@
aliases {
serial0 = &wkup_uart0;
serial2 = &main_uart0;
+ serial3 = &main_uart5;
mmc0 = &sdhci0;
mmc1 = &sdhci1;
};
@@ -51,12 +52,71 @@
no-map;
};
+ wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@a0000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa0000000 0x00 0x100000>;
+ no-map;
+ };
+
wkup_r5fss0_core0_memory_region: r5f-memory@a0100000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
+ mcu_r5fss0_core0_dma_memory_region: mcu-r5fss-dma-memory-region@a1000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa1000000 0x00 0x100000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core0_memory_region: mcu-r5fss-memory-region@a1100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa1100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss0_core0_dma_memory_region: main-r5fss-dma-memory-region@a2000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa2000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss0_core0_memory_region: main-r5fss-memory-region@a2100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa2100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ c7x_0_dma_memory_region: c7x-dma-memory@a3000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa3000000 0x00 0x100000>;
+ no-map;
+ };
+
+ c7x_0_memory_region: c7x-memory@a3100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa3100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ c7x_1_dma_memory_region: c7x-dma-memory@a4000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa4000000 0x00 0x100000>;
+ no-map;
+ };
+
+ c7x_1_memory_region: c7x-memory@a4100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa4100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ rtos_ipc_memory_region: ipc-memories@a5000000 {
+ reg = <0x00 0xa5000000 0x00 0x1c00000>;
+ alignment = <0x1000>;
+ no-map;
+ };
};
vmain_pd: regulator-0 {
@@ -162,10 +222,39 @@
clocks = <&audio_refclk1>;
};
};
+
+ transceiver0: can-phy0 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_mcan0_gpio_pins_default>;
+ standby-gpios = <&mcu_gpio0 12 GPIO_ACTIVE_HIGH>;
+ };
+
+ transceiver1: can-phy1 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ };
+
+ transceiver2: can-phy2 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ standby-gpios = <&exp1 17 GPIO_ACTIVE_HIGH>;
+ };
};
&main_pmx0 {
+ main_mcan0_pins_default: main-mcan0-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x1dc, PIN_INPUT, 0) /* (C22) MCAN0_RX */
+ J722S_IOPAD(0x1d8, PIN_OUTPUT, 0) /*(D22) MCAN0_TX */
+ >;
+ };
+
main_i2c0_pins_default: main-i2c0-default-pins {
pinctrl-single,pins = <
J722S_IOPAD(0x01e0, PIN_INPUT_PULLUP, 0) /* (D23) I2C0_SCL */
@@ -182,6 +271,13 @@
bootph-all;
};
+ main_uart5_pins_default: main-uart5-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x0108, PIN_INPUT, 3) /* (J27) UART5_RXD */
+ J722S_IOPAD(0x010c, PIN_OUTPUT, 3) /* (H27) UART5_TXD */
+ >;
+ };
+
vdd_sd_dv_pins_default: vdd-sd-dv-default-pins {
pinctrl-single,pins = <
J722S_IOPAD(0x0120, PIN_INPUT, 7) /* (F27) MMC2_CMD.GPIO0_70 */
@@ -301,8 +397,35 @@
bootph-all;
};
+&main_uart5 {
+ /* MAIN UART 5 is used by System firmware */
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart5_pins_default>;
+ status = "reserved";
+};
+
&mcu_pmx0 {
+ mcu_mcan0_pins_default: mcu-mcan0-default-pins {
+ pinctrl-single,pins = <
+ J722S_MCU_IOPAD(0x038, PIN_INPUT, 0) /* (D8) MCU_MCAN0_RX */
+ J722S_MCU_IOPAD(0x034, PIN_OUTPUT, 0) /* (B2) MCU_MCAN0_TX */
+ >;
+ };
+
+ mcu_mcan1_pins_default: mcu-mcan1-default-pins {
+ pinctrl-single,pins = <
+ J722S_MCU_IOPAD(0x040, PIN_INPUT, 0) /* (B1) MCU_MCAN1_RX */
+ J722S_MCU_IOPAD(0x03C, PIN_OUTPUT, 0) /*(C1) MCU_MCAN1_TX */
+ >;
+ };
+
+ mcu_mcan0_gpio_pins_default: mcu-mcan0-gpio-default-pins {
+ pinctrl-single,pins = <
+ J722S_MCU_IOPAD(0x0030, PIN_OUTPUT, 7) /* (C3) MCU_GPIO0_12 */
+ >;
+ };
+
wkup_uart0_pins_default: wkup-uart0-default-pins {
pinctrl-single,pins = <
J722S_MCU_IOPAD(0x02c, PIN_INPUT, 0) /* (C7) WKUP_UART0_CTSn */
@@ -494,6 +617,104 @@
bootph-all;
};
+&mailbox0_cluster0 {
+ status = "okay";
+
+ mbox_wkup_r5_0: mbox-wkup-r5-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&mailbox0_cluster1 {
+ status = "okay";
+
+ mbox_mcu_r5_0: mbox-mcu-r5-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&mailbox0_cluster2 {
+ status = "okay";
+
+ mbox_c7x_0: mbox-c7x-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&mailbox0_cluster3 {
+ status = "okay";
+
+ mbox_main_r5_0: mbox-main-r5-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+
+ mbox_c7x_1: mbox-c7x-1 {
+ ti,mbox-rx = <2 0 0>;
+ ti,mbox-tx = <3 0 0>;
+ };
+};
+
+/* Timers are used by Remoteproc firmware */
+&main_timer0 {
+ status = "reserved";
+};
+
+&main_timer1 {
+ status = "reserved";
+};
+
+&main_timer2 {
+ status = "reserved";
+};
+
+&wkup_r5fss0 {
+ status = "okay";
+};
+
+&wkup_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster0 &mbox_wkup_r5_0>;
+ memory-region = <&wkup_r5fss0_core0_dma_memory_region>,
+ <&wkup_r5fss0_core0_memory_region>;
+};
+
+&mcu_r5fss0 {
+ status = "okay";
+};
+
+&mcu_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster1 &mbox_mcu_r5_0>;
+ memory-region = <&mcu_r5fss0_core0_dma_memory_region>,
+ <&mcu_r5fss0_core0_memory_region>;
+};
+
+&main_r5fss0 {
+ status = "okay";
+};
+
+&main_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster3 &mbox_main_r5_0>;
+ memory-region = <&main_r5fss0_core0_dma_memory_region>,
+ <&main_r5fss0_core0_memory_region>;
+};
+
+&c7x_0 {
+ mboxes = <&mailbox0_cluster2 &mbox_c7x_0>;
+ memory-region = <&c7x_0_dma_memory_region>,
+ <&c7x_0_memory_region>;
+ status = "okay";
+};
+
+&c7x_1 {
+ mboxes = <&mailbox0_cluster3 &mbox_c7x_1>;
+ memory-region = <&c7x_1_dma_memory_region>,
+ <&c7x_1_memory_region>;
+ status = "okay";
+};
+
&serdes_ln_ctrl {
idle-states = <J722S_SERDES0_LANE0_USB>,
<J722S_SERDES1_LANE0_PCIE0_LANE0>;
@@ -566,3 +787,28 @@
0 0 0 0
>;
};
+
+&mcu_mcan0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_mcan0_pins_default>;
+ phys = <&transceiver0>;
+ status = "okay";
+};
+
+&mcu_mcan1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_mcan1_pins_default>;
+ phys = <&transceiver1>;
+ status = "okay";
+};
+
+&main_mcan0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mcan0_pins_default>;
+ phys = <&transceiver2>;
+ status = "okay";
+};
+
+&mcu_gpio0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
index dde4bd5c6645..ed6f4ba08afc 100644
--- a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
@@ -153,6 +153,67 @@
dr_mode = "otg";
};
};
+
+ main_r5fss0: r5fss@78400000 {
+ compatible = "ti,am62-r5fss";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x78400000 0x00 0x78400000 0x8000>,
+ <0x78500000 0x00 0x78500000 0x8000>;
+ power-domains = <&k3_pds 261 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+
+ main_r5fss0_core0: r5f@78400000 {
+ compatible = "ti,am62-r5f";
+ reg = <0x78400000 0x00008000>,
+ <0x78500000 0x00008000>;
+ reg-names = "atcm", "btcm";
+ resets = <&k3_reset 262 1>;
+ firmware-name = "j722s-main-r5f0_0-fw";
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <262>;
+ ti,sci-proc-ids = <0x04 0xff>;
+ ti,atcm-enable = <1>;
+ ti,btcm-enable = <1>;
+ ti,loczrama = <1>;
+ };
+ };
+
+ c7x_0: dsp@7e000000 {
+ compatible = "ti,am62a-c7xv-dsp";
+ reg = <0x00 0x7e000000 0x00 0x00200000>;
+ reg-names = "l2sram";
+ resets = <&k3_reset 208 1>;
+ firmware-name = "j722s-c71_0-fw";
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <208>;
+ ti,sci-proc-ids = <0x30 0xff>;
+ status = "disabled";
+ };
+
+ c7x_1: dsp@7e200000 {
+ compatible = "ti,am62a-c7xv-dsp";
+ reg = <0x00 0x7e200000 0x00 0x00200000>;
+ reg-names = "l2sram";
+ resets = <&k3_reset 268 1>;
+ firmware-name = "j722s-c71_1-fw";
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <268>;
+ ti,sci-proc-ids = <0x31 0xff>;
+ status = "disabled";
+ };
+};
+
+/* MCU domain overrides */
+
+&mcu_r5fss0_core0 {
+ firmware-name = "j722s-mcu-r5f0_0-fw";
+};
+
+/* Wakeup domain overrides */
+
+&wkup_r5fss0_core0 {
+ firmware-name = "j722s-wkup-r5f0_0-fw";
};
&main_conf {
@@ -214,5 +275,6 @@
&main_gpio1 {
gpio-ranges = <&main_pmx0 7 101 25>, <&main_pmx0 42 137 5>,
<&main_pmx0 47 143 3>, <&main_pmx0 50 149 2>;
+ gpio-reserved-ranges = <0 7>, <32 10>;
ti,ngpio = <73>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
index ffa38f41679d..6695ebbcb4d0 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
@@ -1154,6 +1154,59 @@
<&mcu_r5fss0_core1_memory_region>;
};
+&main_r5fss0 {
+ ti,cluster-mode = <0>;
+};
+
+&main_r5fss1 {
+ ti,cluster-mode = <0>;
+};
+
+&main_r5fss2 {
+ ti,cluster-mode = <0>;
+};
+
+/* Timers are used by Remoteproc firmware */
+&main_timer0 {
+ status = "reserved";
+};
+
+&main_timer1 {
+ status = "reserved";
+};
+
+&main_timer2 {
+ status = "reserved";
+};
+
+&main_timer3 {
+ status = "reserved";
+};
+
+&main_timer4 {
+ status = "reserved";
+};
+
+&main_timer5 {
+ status = "reserved";
+};
+
+&main_timer6 {
+ status = "reserved";
+};
+
+&main_timer7 {
+ status = "reserved";
+};
+
+&main_timer8 {
+ status = "reserved";
+};
+
+&main_timer9 {
+ status = "reserved";
+};
+
&main_r5fss0_core0 {
status = "okay";
mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>;
@@ -1407,10 +1460,11 @@
serdes1_pcie0_link: phy@0 {
reg = <0>;
- cdns,num-lanes = <2>;
+ cdns,num-lanes = <4>;
#phy-cells = <0>;
cdns,phy-type = <PHY_TYPE_PCIE>;
- resets = <&serdes_wiz1 1>, <&serdes_wiz1 2>;
+ resets = <&serdes_wiz1 1>, <&serdes_wiz1 2>,
+ <&serdes_wiz1 3>, <&serdes_wiz1 4>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
index d4ac1c9872a5..e73bb750b09a 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi
@@ -2429,7 +2429,7 @@
watchdog0: watchdog@2200000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2200000 0x00 0x100>;
- clocks = <&k3_clks 348 1>;
+ clocks = <&k3_clks 348 0>;
power-domains = <&k3_pds 348 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 348 0>;
assigned-clock-parents = <&k3_clks 348 4>;
@@ -2438,7 +2438,7 @@
watchdog1: watchdog@2210000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2210000 0x00 0x100>;
- clocks = <&k3_clks 349 1>;
+ clocks = <&k3_clks 349 0>;
power-domains = <&k3_pds 349 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 349 0>;
assigned-clock-parents = <&k3_clks 349 4>;
@@ -2447,7 +2447,7 @@
watchdog2: watchdog@2220000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2220000 0x00 0x100>;
- clocks = <&k3_clks 350 1>;
+ clocks = <&k3_clks 350 0>;
power-domains = <&k3_pds 350 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 350 0>;
assigned-clock-parents = <&k3_clks 350 4>;
@@ -2456,7 +2456,7 @@
watchdog3: watchdog@2230000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2230000 0x00 0x100>;
- clocks = <&k3_clks 351 1>;
+ clocks = <&k3_clks 351 0>;
power-domains = <&k3_pds 351 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 351 0>;
assigned-clock-parents = <&k3_clks 351 4>;
@@ -2465,7 +2465,7 @@
watchdog4: watchdog@2240000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2240000 0x00 0x100>;
- clocks = <&k3_clks 352 1>;
+ clocks = <&k3_clks 352 0>;
power-domains = <&k3_pds 352 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 352 0>;
assigned-clock-parents = <&k3_clks 352 4>;
@@ -2474,7 +2474,7 @@
watchdog5: watchdog@2250000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2250000 0x00 0x100>;
- clocks = <&k3_clks 353 1>;
+ clocks = <&k3_clks 353 0>;
power-domains = <&k3_pds 353 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 353 0>;
assigned-clock-parents = <&k3_clks 353 4>;
@@ -2483,7 +2483,7 @@
watchdog6: watchdog@2260000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2260000 0x00 0x100>;
- clocks = <&k3_clks 354 1>;
+ clocks = <&k3_clks 354 0>;
power-domains = <&k3_pds 354 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 354 0>;
assigned-clock-parents = <&k3_clks 354 4>;
@@ -2492,7 +2492,7 @@
watchdog7: watchdog@2270000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2270000 0x00 0x100>;
- clocks = <&k3_clks 355 1>;
+ clocks = <&k3_clks 355 0>;
power-domains = <&k3_pds 355 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 355 0>;
assigned-clock-parents = <&k3_clks 355 4>;
@@ -2506,7 +2506,7 @@
watchdog8: watchdog@22f0000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x22f0000 0x00 0x100>;
- clocks = <&k3_clks 360 1>;
+ clocks = <&k3_clks 360 0>;
power-domains = <&k3_pds 360 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 360 0>;
assigned-clock-parents = <&k3_clks 360 4>;
@@ -2517,7 +2517,7 @@
watchdog9: watchdog@2300000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2300000 0x00 0x100>;
- clocks = <&k3_clks 356 1>;
+ clocks = <&k3_clks 356 0>;
power-domains = <&k3_pds 356 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 356 0>;
assigned-clock-parents = <&k3_clks 356 4>;
@@ -2528,7 +2528,7 @@
watchdog10: watchdog@2310000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2310000 0x00 0x100>;
- clocks = <&k3_clks 357 1>;
+ clocks = <&k3_clks 357 0>;
power-domains = <&k3_pds 357 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 357 0>;
assigned-clock-parents = <&k3_clks 357 4>;
@@ -2539,7 +2539,7 @@
watchdog11: watchdog@2320000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2320000 0x00 0x100>;
- clocks = <&k3_clks 358 1>;
+ clocks = <&k3_clks 358 0>;
power-domains = <&k3_pds 358 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 358 0>;
assigned-clock-parents = <&k3_clks 358 4>;
@@ -2550,7 +2550,7 @@
watchdog12: watchdog@2330000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2330000 0x00 0x100>;
- clocks = <&k3_clks 359 1>;
+ clocks = <&k3_clks 359 0>;
power-domains = <&k3_pds 359 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 359 0>;
assigned-clock-parents = <&k3_clks 359 4>;
@@ -2561,7 +2561,7 @@
watchdog13: watchdog@23c0000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x23c0000 0x00 0x100>;
- clocks = <&k3_clks 361 1>;
+ clocks = <&k3_clks 361 0>;
power-domains = <&k3_pds 361 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 361 0>;
assigned-clock-parents = <&k3_clks 361 4>;
@@ -2572,7 +2572,7 @@
watchdog14: watchdog@23d0000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x23d0000 0x00 0x100>;
- clocks = <&k3_clks 362 1>;
+ clocks = <&k3_clks 362 0>;
power-domains = <&k3_pds 362 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 362 0>;
assigned-clock-parents = <&k3_clks 362 4>;
@@ -2583,7 +2583,7 @@
watchdog15: watchdog@23e0000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x23e0000 0x00 0x100>;
- clocks = <&k3_clks 363 1>;
+ clocks = <&k3_clks 363 0>;
power-domains = <&k3_pds 363 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 363 0>;
assigned-clock-parents = <&k3_clks 363 4>;
@@ -2594,7 +2594,7 @@
watchdog16: watchdog@23f0000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x23f0000 0x00 0x100>;
- clocks = <&k3_clks 364 1>;
+ clocks = <&k3_clks 364 0>;
power-domains = <&k3_pds 364 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 364 0>;
assigned-clock-parents = <&k3_clks 364 4>;
@@ -2605,7 +2605,7 @@
watchdog17: watchdog@2540000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2540000 0x00 0x100>;
- clocks = <&k3_clks 365 1>;
+ clocks = <&k3_clks 365 0>;
power-domains = <&k3_pds 365 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 365 0>;
assigned-clock-parents = <&k3_clks 366 4>;
@@ -2616,7 +2616,7 @@
watchdog18: watchdog@2550000 {
compatible = "ti,j7-rti-wdt";
reg = <0x00 0x2550000 0x00 0x100>;
- clocks = <&k3_clks 366 1>;
+ clocks = <&k3_clks 366 0>;
power-domains = <&k3_pds 366 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 366 0>;
assigned-clock-parents = <&k3_clks 366 4>;
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
index f3a6ed1c979d..f603380fc91c 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
@@ -678,16 +678,16 @@
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
- ranges = <0x0 0x47000000 0x0 0x47000000 0x0 0x100>, /* FSS Control */
- <0x0 0x47040000 0x0 0x47040000 0x0 0x100>, /* OSPI0 Control */
- <0x0 0x47050000 0x0 0x47050000 0x0 0x100>, /* OSPI1 Control */
- <0x5 0x00000000 0x5 0x00000000 0x1 0x0000000>, /* OSPI0 Memory */
- <0x7 0x00000000 0x7 0x00000000 0x1 0x0000000>; /* OSPI1 Memory */
+ ranges = <0x00 0x47000000 0x00 0x47000000 0x00 0x00000100>, /* FSS Control */
+ <0x00 0x47040000 0x00 0x47040000 0x00 0x00000100>, /* OSPI0 Control */
+ <0x00 0x47050000 0x00 0x47050000 0x00 0x00000100>, /* OSPI1 Control */
+ <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */
+ <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */
ospi0: spi@47040000 {
compatible = "ti,am654-ospi", "cdns,qspi-nor";
reg = <0x00 0x47040000 0x00 0x100>,
- <0x05 0x0000000 0x01 0x0000000>;
+ <0x05 0x00000000 0x01 0x00000000>;
interrupts = <GIC_SPI 840 IRQ_TYPE_LEVEL_HIGH>;
cdns,fifo-depth = <256>;
cdns,fifo-width = <4>;
@@ -705,7 +705,7 @@
ospi1: spi@47050000 {
compatible = "ti,am654-ospi", "cdns,qspi-nor";
reg = <0x00 0x47050000 0x00 0x100>,
- <0x07 0x0000000 0x01 0x0000000>;
+ <0x07 0x00000000 0x01 0x00000000>;
interrupts = <GIC_SPI 841 IRQ_TYPE_LEVEL_HIGH>;
cdns,fifo-depth = <256>;
cdns,fifo-width = <4>;
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4.dtsi
index 73cc3c1fec08..5e84c6b4f5ad 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4.dtsi
@@ -271,8 +271,7 @@
<0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>,
<0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>,
<0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>,
- <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>,
- <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>;
+ <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>;
cbass_mcu_wakeup: bus@28380000 {
bootph-all;
@@ -289,9 +288,8 @@
<0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */
<0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */
<0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, /* OSPI register space */
- <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS OSPI0/1 data region 0 */
- <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, /* FSS OSPI0 data region 3 */
- <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>; /* FSS OSPI1 data region 3*/
+ <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */
+ <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */
};
};
diff --git a/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi b/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi
index b04829b3175d..39806f0ae513 100644
--- a/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi
+++ b/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi
@@ -196,8 +196,8 @@
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&uart0_pins>;
- clocks = <&pismu TMPV770X_CLK_PIUART0>;
- clock-names = "apb_pclk";
+ clocks = <&pismu TMPV770X_CLK_PIUART0>, <&pismu TMPV770X_CLK_PIUART0>;
+ clock-names = "uartclk", "apb_pclk";
status = "disabled";
};
@@ -207,8 +207,8 @@
interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>;
- clocks = <&pismu TMPV770X_CLK_PIUART1>;
- clock-names = "apb_pclk";
+ clocks = <&pismu TMPV770X_CLK_PIUART1>, <&pismu TMPV770X_CLK_PIUART1>;
+ clock-names = "uartclk", "apb_pclk";
status = "disabled";
};
@@ -218,8 +218,8 @@
interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&uart2_pins>;
- clocks = <&pismu TMPV770X_CLK_PIUART2>;
- clock-names = "apb_pclk";
+ clocks = <&pismu TMPV770X_CLK_PIUART2>, <&pismu TMPV770X_CLK_PIUART2>;
+ clock-names = "uartclk", "apb_pclk";
status = "disabled";
};
@@ -229,8 +229,8 @@
interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&uart3_pins>;
- clocks = <&pismu TMPV770X_CLK_PIUART2>;
- clock-names = "apb_pclk";
+ clocks = <&pismu TMPV770X_CLK_PIUART2>, <&pismu TMPV770X_CLK_PIUART2>;
+ clock-names = "uartclk", "apb_pclk";
status = "disabled";
};
@@ -360,8 +360,8 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&pismu TMPV770X_CLK_PISPI1>;
- clock-names = "apb_pclk";
+ clocks = <&pismu TMPV770X_CLK_PISPI1>, <&pismu TMPV770X_CLK_PISPI1>;
+ clock-names = "sspclk", "apb_pclk";
status = "disabled";
};
@@ -374,8 +374,8 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&pismu TMPV770X_CLK_PISPI1>;
- clock-names = "apb_pclk";
+ clocks = <&pismu TMPV770X_CLK_PISPI1>, <&pismu TMPV770X_CLK_PISPI1>;
+ clock-names = "sspclk", "apb_pclk";
status = "disabled";
};
@@ -388,8 +388,8 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&pismu TMPV770X_CLK_PISPI2>;
- clock-names = "apb_pclk";
+ clocks = <&pismu TMPV770X_CLK_PISPI2>, <&pismu TMPV770X_CLK_PISPI2>;
+ clock-names = "sspclk", "apb_pclk";
status = "disabled";
};
@@ -402,8 +402,8 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&pismu TMPV770X_CLK_PISPI3>;
- clock-names = "apb_pclk";
+ clocks = <&pismu TMPV770X_CLK_PISPI3>, <&pismu TMPV770X_CLK_PISPI3>;
+ clock-names = "sspclk", "apb_pclk";
status = "disabled";
};
@@ -416,8 +416,8 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&pismu TMPV770X_CLK_PISPI4>;
- clock-names = "apb_pclk";
+ clocks = <&pismu TMPV770X_CLK_PISPI4>, <&pismu TMPV770X_CLK_PISPI4>;
+ clock-names = "sspclk", "apb_pclk";
status = "disabled";
};
@@ -430,8 +430,8 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&pismu TMPV770X_CLK_PISPI5>;
- clock-names = "apb_pclk";
+ clocks = <&pismu TMPV770X_CLK_PISPI5>, <&pismu TMPV770X_CLK_PISPI5>;
+ clock-names = "sspclk", "apb_pclk";
status = "disabled";
};
@@ -444,8 +444,8 @@
num-cs = <1>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&pismu TMPV770X_CLK_PISPI6>;
- clock-names = "apb_pclk";
+ clocks = <&pismu TMPV770X_CLK_PISPI6>, <&pismu TMPV770X_CLK_PISPI6>;
+ clock-names = "sspclk", "apb_pclk";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
index ad8f23a0ec67..d2175f3dd099 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
@@ -941,6 +941,7 @@
&pcie {
status = "okay";
+ phys = <&psgtr 0 PHY_TYPE_PCIE 0 0>;
};
&psgtr {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 362df9390263..5fdbfea7a5b2 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -232,6 +232,7 @@ CONFIG_PCIE_KIRIN=y
CONFIG_PCIE_HISI_STB=y
CONFIG_PCIE_ARMADA_8K=y
CONFIG_PCIE_TEGRA194_HOST=m
+CONFIG_PCIE_TEGRA194_EP=m
CONFIG_PCIE_QCOM=y
CONFIG_PCIE_RCAR_GEN4_HOST=m
CONFIG_PCIE_RCAR_GEN4_EP=m
@@ -366,6 +367,7 @@ CONFIG_R8169=m
CONFIG_SH_ETH=y
CONFIG_RAVB=y
CONFIG_RENESAS_ETHER_SWITCH=y
+CONFIG_RTSN=y
CONFIG_SMC91X=y
CONFIG_SMSC911X=y
CONFIG_SNI_AVE=y
@@ -516,6 +518,7 @@ CONFIG_I2C_MUX=y
CONFIG_I2C_MUX_PCA954x=y
CONFIG_I2C_BCM2835=m
CONFIG_I2C_CADENCE=m
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_GPIO=m
CONFIG_I2C_IMX=y
@@ -655,6 +658,7 @@ CONFIG_GPIO_XGENE_SB=y
CONFIG_GPIO_MAX732X=y
CONFIG_GPIO_PCA953X=y
CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_ADP5585=m
CONFIG_GPIO_BD9571MWV=m
CONFIG_GPIO_MAX77620=y
CONFIG_GPIO_SL28CPLD=m
@@ -736,6 +740,7 @@ CONFIG_UNIPHIER_WATCHDOG=y
CONFIG_PM8916_WATCHDOG=m
CONFIG_BCM2835_WDT=y
CONFIG_BCM7038_WDT=m
+CONFIG_MFD_ADP5585=m
CONFIG_MFD_ALTERA_SYSMGR=y
CONFIG_MFD_BD9571MWV=y
CONFIG_MFD_AXP20X_I2C=y
@@ -785,6 +790,7 @@ CONFIG_REGULATOR_PCA9450=y
CONFIG_REGULATOR_PF8X00=y
CONFIG_REGULATOR_PFUZE100=y
CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_REFGEN=m
CONFIG_REGULATOR_QCOM_RPMH=y
CONFIG_REGULATOR_QCOM_SMD_RPM=y
CONFIG_REGULATOR_QCOM_SPMI=y
@@ -820,6 +826,7 @@ CONFIG_VIDEO_CADENCE_CSI2RX=m
CONFIG_VIDEO_MEDIATEK_JPEG=m
CONFIG_VIDEO_MEDIATEK_VCODEC=m
CONFIG_VIDEO_WAVE_VPU=m
+CONFIG_VIDEO_E5010_JPEG_ENC=m
CONFIG_VIDEO_IMX7_CSI=m
CONFIG_VIDEO_IMX_MIPI_CSIS=m
CONFIG_VIDEO_IMX8_ISI=m
@@ -961,6 +968,8 @@ CONFIG_SND_SOC_MT8192=m
CONFIG_SND_SOC_MT8192_MT6359_RT1015_RT5682=m
CONFIG_SND_SOC_MT8195=m
CONFIG_SND_SOC_MT8195_MT6359=m
+CONFIG_SND_SOC_MT8365=m
+CONFIG_SND_SOC_MT8365_MT6357=m
CONFIG_SND_MESON_AXG_SOUND_CARD=m
CONFIG_SND_MESON_GX_SOUND_CARD=m
CONFIG_SND_SOC_QCOM=m
@@ -1006,6 +1015,7 @@ CONFIG_SND_SOC_TEGRA_AUDIO_GRAPH_CARD=m
CONFIG_SND_SOC_DAVINCI_MCASP=m
CONFIG_SND_SOC_J721E_EVM=m
CONFIG_SND_SOC_AK4613=m
+CONFIG_SND_SOC_AK4619=m
CONFIG_SND_SOC_DA7213=m
CONFIG_SND_SOC_ES7134=m
CONFIG_SND_SOC_ES7241=m
@@ -1485,6 +1495,7 @@ CONFIG_IIO_ST_MAGN_3AXIS=m
CONFIG_IIO_CROS_EC_BARO=m
CONFIG_MPL3115=m
CONFIG_PWM=y
+CONFIG_PWM_ADP5585=m
CONFIG_PWM_BCM2835=m
CONFIG_PWM_BRCMSTB=m
CONFIG_PWM_CROS_EC=m
diff --git a/arch/arm64/crypto/poly1305-armv8.pl b/arch/arm64/crypto/poly1305-armv8.pl
index cbc980fb02e3..22c9069c0650 100644
--- a/arch/arm64/crypto/poly1305-armv8.pl
+++ b/arch/arm64/crypto/poly1305-armv8.pl
@@ -473,7 +473,8 @@ poly1305_blocks_neon:
subs $len,$len,#64
ldp x9,x13,[$inp,#48]
add $in2,$inp,#96
- adr $zeros,.Lzeros
+ adrp $zeros,.Lzeros
+ add $zeros,$zeros,#:lo12:.Lzeros
lsl $padbit,$padbit,#24
add x15,$ctx,#48
@@ -885,10 +886,13 @@ poly1305_blocks_neon:
ret
.size poly1305_blocks_neon,.-poly1305_blocks_neon
+.pushsection .rodata
.align 5
.Lzeros:
.long 0,0,0,0,0,0,0,0
.asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm"
+.popsection
+
.align 2
#if !defined(__KERNEL__) && !defined(_WIN64)
.comm OPENSSL_armcap_P,4,4
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 7d7d97ad3cd5..4e350df9a02d 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -9,6 +9,7 @@ syscall-y += unistd_compat_32.h
generic-y += early_ioremap.h
generic-y += mcs_spinlock.h
+generic-y += mmzone.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += parport.h
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
index d328f549b1a6..c8c77f9e36d6 100644
--- a/arch/arm64/include/asm/alternative-macros.h
+++ b/arch/arm64/include/asm/alternative-macros.h
@@ -230,7 +230,11 @@ alternative_has_cap_likely(const unsigned long cpucap)
return false;
asm goto(
+#ifdef BUILD_VDSO
+ ALTERNATIVE("b %l[l_no]", "nop", %[cpucap])
+#else
ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops)
+#endif
:
: [cpucap] "i" (cpucap)
:
diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
index a4697a0b6835..468a049bc63b 100644
--- a/arch/arm64/include/asm/arm_pmuv3.h
+++ b/arch/arm64/include/asm/arm_pmuv3.h
@@ -33,6 +33,14 @@ static inline void write_pmevtypern(int n, unsigned long val)
PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
}
+#define RETURN_READ_PMEVTYPERN(n) \
+ return read_sysreg(pmevtyper##n##_el0)
+static inline unsigned long read_pmevtypern(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVTYPERN);
+ return 0;
+}
+
static inline unsigned long read_pmmir(void)
{
return read_cpuid(PMMIR_EL1);
@@ -46,6 +54,14 @@ static inline u32 read_pmuver(void)
ID_AA64DFR0_EL1_PMUVer_SHIFT);
}
+static inline bool pmuv3_has_icntr(void)
+{
+ u64 dfr1 = read_sysreg(id_aa64dfr1_el1);
+
+ return !!cpuid_feature_extract_unsigned_field(dfr1,
+ ID_AA64DFR1_EL1_PMICNTR_SHIFT);
+}
+
static inline void write_pmcr(u64 val)
{
write_sysreg(val, pmcr_el0);
@@ -71,22 +87,32 @@ static inline u64 read_pmccntr(void)
return read_sysreg(pmccntr_el0);
}
-static inline void write_pmcntenset(u32 val)
+static inline void write_pmicntr(u64 val)
+{
+ write_sysreg_s(val, SYS_PMICNTR_EL0);
+}
+
+static inline u64 read_pmicntr(void)
+{
+ return read_sysreg_s(SYS_PMICNTR_EL0);
+}
+
+static inline void write_pmcntenset(u64 val)
{
write_sysreg(val, pmcntenset_el0);
}
-static inline void write_pmcntenclr(u32 val)
+static inline void write_pmcntenclr(u64 val)
{
write_sysreg(val, pmcntenclr_el0);
}
-static inline void write_pmintenset(u32 val)
+static inline void write_pmintenset(u64 val)
{
write_sysreg(val, pmintenset_el1);
}
-static inline void write_pmintenclr(u32 val)
+static inline void write_pmintenclr(u64 val)
{
write_sysreg(val, pmintenclr_el1);
}
@@ -96,12 +122,27 @@ static inline void write_pmccfiltr(u64 val)
write_sysreg(val, pmccfiltr_el0);
}
-static inline void write_pmovsclr(u32 val)
+static inline u64 read_pmccfiltr(void)
+{
+ return read_sysreg(pmccfiltr_el0);
+}
+
+static inline void write_pmicfiltr(u64 val)
+{
+ write_sysreg_s(val, SYS_PMICFILTR_EL0);
+}
+
+static inline u64 read_pmicfiltr(void)
+{
+ return read_sysreg_s(SYS_PMICFILTR_EL0);
+}
+
+static inline void write_pmovsclr(u64 val)
{
write_sysreg(val, pmovsclr_el0);
}
-static inline u32 read_pmovsclr(void)
+static inline u64 read_pmovsclr(void)
{
return read_sysreg(pmovsclr_el0);
}
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 558434267271..3d261cc123c1 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -832,6 +832,12 @@ static inline bool system_supports_lpa2(void)
return cpus_have_final_cap(ARM64_HAS_LPA2);
}
+static inline bool system_supports_poe(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_POE) &&
+ alternative_has_cap_unlikely(ARM64_HAS_S1POE);
+}
+
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 5fd7caea4419..5a7dfeb8e8eb 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -143,6 +143,7 @@
#define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039
#define AMPERE_CPU_PART_AMPERE1 0xAC3
+#define AMPERE_CPU_PART_AMPERE1A 0xAC4
#define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */
@@ -212,6 +213,7 @@
#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
+#define MIDR_AMPERE1A MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1A)
#define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index fd87c4b8f984..e0ffdf13a18b 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -165,42 +165,53 @@
mrs x1, id_aa64dfr0_el1
ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
cmp x1, #3
- b.lt .Lset_debug_fgt_\@
+ b.lt .Lskip_spe_fgt_\@
/* Disable PMSNEVFR_EL1 read and write traps */
orr x0, x0, #(1 << 62)
-.Lset_debug_fgt_\@:
+.Lskip_spe_fgt_\@:
msr_s SYS_HDFGRTR_EL2, x0
msr_s SYS_HDFGWTR_EL2, x0
mov x0, xzr
mrs x1, id_aa64pfr1_el1
ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
- cbz x1, .Lset_pie_fgt_\@
+ cbz x1, .Lskip_debug_fgt_\@
/* Disable nVHE traps of TPIDR2 and SMPRI */
orr x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK
orr x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK
-.Lset_pie_fgt_\@:
+.Lskip_debug_fgt_\@:
mrs_s x1, SYS_ID_AA64MMFR3_EL1
ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
- cbz x1, .Lset_fgt_\@
+ cbz x1, .Lskip_pie_fgt_\@
/* Disable trapping of PIR_EL1 / PIRE0_EL1 */
orr x0, x0, #HFGxTR_EL2_nPIR_EL1
orr x0, x0, #HFGxTR_EL2_nPIRE0_EL1
-.Lset_fgt_\@:
+.Lskip_pie_fgt_\@:
+ mrs_s x1, SYS_ID_AA64MMFR3_EL1
+ ubfx x1, x1, #ID_AA64MMFR3_EL1_S1POE_SHIFT, #4
+ cbz x1, .Lskip_poe_fgt_\@
+
+ /* Disable trapping of POR_EL0 */
+ orr x0, x0, #HFGxTR_EL2_nPOR_EL0
+
+.Lskip_poe_fgt_\@:
msr_s SYS_HFGRTR_EL2, x0
msr_s SYS_HFGWTR_EL2, x0
msr_s SYS_HFGITR_EL2, xzr
mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
- cbz x1, .Lskip_fgt_\@
+ cbz x1, .Lskip_amu_fgt_\@
msr_s SYS_HAFGRTR_EL2, xzr
+
+.Lskip_amu_fgt_\@:
+
.Lskip_fgt_\@:
.endm
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 56c148890daf..da6d2c1c0b03 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -10,63 +10,63 @@
#include <asm/memory.h>
#include <asm/sysreg.h>
-#define ESR_ELx_EC_UNKNOWN (0x00)
-#define ESR_ELx_EC_WFx (0x01)
+#define ESR_ELx_EC_UNKNOWN UL(0x00)
+#define ESR_ELx_EC_WFx UL(0x01)
/* Unallocated EC: 0x02 */
-#define ESR_ELx_EC_CP15_32 (0x03)
-#define ESR_ELx_EC_CP15_64 (0x04)
-#define ESR_ELx_EC_CP14_MR (0x05)
-#define ESR_ELx_EC_CP14_LS (0x06)
-#define ESR_ELx_EC_FP_ASIMD (0x07)
-#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */
-#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
+#define ESR_ELx_EC_CP15_32 UL(0x03)
+#define ESR_ELx_EC_CP15_64 UL(0x04)
+#define ESR_ELx_EC_CP14_MR UL(0x05)
+#define ESR_ELx_EC_CP14_LS UL(0x06)
+#define ESR_ELx_EC_FP_ASIMD UL(0x07)
+#define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */
+#define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */
/* Unallocated EC: 0x0A - 0x0B */
-#define ESR_ELx_EC_CP14_64 (0x0C)
-#define ESR_ELx_EC_BTI (0x0D)
-#define ESR_ELx_EC_ILL (0x0E)
+#define ESR_ELx_EC_CP14_64 UL(0x0C)
+#define ESR_ELx_EC_BTI UL(0x0D)
+#define ESR_ELx_EC_ILL UL(0x0E)
/* Unallocated EC: 0x0F - 0x10 */
-#define ESR_ELx_EC_SVC32 (0x11)
-#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */
-#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */
+#define ESR_ELx_EC_SVC32 UL(0x11)
+#define ESR_ELx_EC_HVC32 UL(0x12) /* EL2 only */
+#define ESR_ELx_EC_SMC32 UL(0x13) /* EL2 and above */
/* Unallocated EC: 0x14 */
-#define ESR_ELx_EC_SVC64 (0x15)
-#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */
-#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
-#define ESR_ELx_EC_SYS64 (0x18)
-#define ESR_ELx_EC_SVE (0x19)
-#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
+#define ESR_ELx_EC_SVC64 UL(0x15)
+#define ESR_ELx_EC_HVC64 UL(0x16) /* EL2 and above */
+#define ESR_ELx_EC_SMC64 UL(0x17) /* EL2 and above */
+#define ESR_ELx_EC_SYS64 UL(0x18)
+#define ESR_ELx_EC_SVE UL(0x19)
+#define ESR_ELx_EC_ERET UL(0x1a) /* EL2 only */
/* Unallocated EC: 0x1B */
-#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */
-#define ESR_ELx_EC_SME (0x1D)
+#define ESR_ELx_EC_FPAC UL(0x1C) /* EL1 and above */
+#define ESR_ELx_EC_SME UL(0x1D)
/* Unallocated EC: 0x1E */
-#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
-#define ESR_ELx_EC_IABT_LOW (0x20)
-#define ESR_ELx_EC_IABT_CUR (0x21)
-#define ESR_ELx_EC_PC_ALIGN (0x22)
+#define ESR_ELx_EC_IMP_DEF UL(0x1f) /* EL3 only */
+#define ESR_ELx_EC_IABT_LOW UL(0x20)
+#define ESR_ELx_EC_IABT_CUR UL(0x21)
+#define ESR_ELx_EC_PC_ALIGN UL(0x22)
/* Unallocated EC: 0x23 */
-#define ESR_ELx_EC_DABT_LOW (0x24)
-#define ESR_ELx_EC_DABT_CUR (0x25)
-#define ESR_ELx_EC_SP_ALIGN (0x26)
-#define ESR_ELx_EC_MOPS (0x27)
-#define ESR_ELx_EC_FP_EXC32 (0x28)
+#define ESR_ELx_EC_DABT_LOW UL(0x24)
+#define ESR_ELx_EC_DABT_CUR UL(0x25)
+#define ESR_ELx_EC_SP_ALIGN UL(0x26)
+#define ESR_ELx_EC_MOPS UL(0x27)
+#define ESR_ELx_EC_FP_EXC32 UL(0x28)
/* Unallocated EC: 0x29 - 0x2B */
-#define ESR_ELx_EC_FP_EXC64 (0x2C)
+#define ESR_ELx_EC_FP_EXC64 UL(0x2C)
/* Unallocated EC: 0x2D - 0x2E */
-#define ESR_ELx_EC_SERROR (0x2F)
-#define ESR_ELx_EC_BREAKPT_LOW (0x30)
-#define ESR_ELx_EC_BREAKPT_CUR (0x31)
-#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
-#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
-#define ESR_ELx_EC_WATCHPT_LOW (0x34)
-#define ESR_ELx_EC_WATCHPT_CUR (0x35)
+#define ESR_ELx_EC_SERROR UL(0x2F)
+#define ESR_ELx_EC_BREAKPT_LOW UL(0x30)
+#define ESR_ELx_EC_BREAKPT_CUR UL(0x31)
+#define ESR_ELx_EC_SOFTSTP_LOW UL(0x32)
+#define ESR_ELx_EC_SOFTSTP_CUR UL(0x33)
+#define ESR_ELx_EC_WATCHPT_LOW UL(0x34)
+#define ESR_ELx_EC_WATCHPT_CUR UL(0x35)
/* Unallocated EC: 0x36 - 0x37 */
-#define ESR_ELx_EC_BKPT32 (0x38)
+#define ESR_ELx_EC_BKPT32 UL(0x38)
/* Unallocated EC: 0x39 */
-#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */
+#define ESR_ELx_EC_VECTOR32 UL(0x3A) /* EL2 only */
/* Unallocated EC: 0x3B */
-#define ESR_ELx_EC_BRK64 (0x3C)
+#define ESR_ELx_EC_BRK64 UL(0x3C)
/* Unallocated EC: 0x3D - 0x3F */
-#define ESR_ELx_EC_MAX (0x3F)
+#define ESR_ELx_EC_MAX UL(0x3F)
#define ESR_ELx_EC_SHIFT (26)
#define ESR_ELx_EC_WIDTH (6)
@@ -122,8 +122,8 @@
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
/* Status codes for individual page table levels */
-#define ESR_ELx_FSC_ACCESS_L(n) (ESR_ELx_FSC_ACCESS + n)
-#define ESR_ELx_FSC_PERM_L(n) (ESR_ELx_FSC_PERM + n)
+#define ESR_ELx_FSC_ACCESS_L(n) (ESR_ELx_FSC_ACCESS + (n))
+#define ESR_ELx_FSC_PERM_L(n) (ESR_ELx_FSC_PERM + (n))
#define ESR_ELx_FSC_FAULT_nL (0x2C)
#define ESR_ELx_FSC_FAULT_L(n) (((n) < 0 ? ESR_ELx_FSC_FAULT_nL : \
@@ -161,6 +161,7 @@
/* ISS field definitions for exceptions taken in to Hyp */
#define ESR_ELx_FSC_ADDRSZ (0x00)
+#define ESR_ELx_FSC_ADDRSZ_L(n) (ESR_ELx_FSC_ADDRSZ + (n))
#define ESR_ELx_CV (UL(1) << 24)
#define ESR_ELx_COND_SHIFT (20)
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index bc69ac368d73..f2a84efc3618 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -155,8 +155,6 @@ extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused);
extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused);
extern void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__unused);
-extern u64 read_smcr_features(void);
-
/*
* Helpers to translate bit indices in sve_vq_map to VQ values (and
* vice versa). This allows find_next_bit() to be used to find the
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 4edd3b61df11..a775adddecf2 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -157,6 +157,7 @@
#define KERNEL_HWCAP_SME_SF8FMA __khwcap2_feature(SME_SF8FMA)
#define KERNEL_HWCAP_SME_SF8DP4 __khwcap2_feature(SME_SF8DP4)
#define KERNEL_HWCAP_SME_SF8DP2 __khwcap2_feature(SME_SF8DP2)
+#define KERNEL_HWCAP_POE __khwcap2_feature(POE)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h
index 0ae427f352c8..409e239834d1 100644
--- a/arch/arm64/include/asm/hypervisor.h
+++ b/arch/arm64/include/asm/hypervisor.h
@@ -7,4 +7,15 @@
void kvm_init_hyp_services(void);
bool kvm_arm_hyp_service_available(u32 func_id);
+#ifdef CONFIG_ARM_PKVM_GUEST
+void pkvm_init_hyp_services(void);
+#else
+static inline void pkvm_init_hyp_services(void) { };
+#endif
+
+static inline void kvm_arch_init_hyp_services(void)
+{
+ pkvm_init_hyp_services();
+};
+
#endif
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 41fd90895dfc..1ada23a6ec19 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -271,6 +271,10 @@ __iowrite64_copy(void __iomem *to, const void *from, size_t count)
* I/O memory mapping functions.
*/
+typedef int (*ioremap_prot_hook_t)(phys_addr_t phys_addr, size_t size,
+ pgprot_t *prot);
+int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook);
+
#define ioremap_prot ioremap_prot
#define _PAGE_IOREMAP PROT_DEVICE_nGnRE
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index d81cc746e0eb..109a85ee6910 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -107,6 +107,7 @@
/* TCR_EL2 Registers bits */
#define TCR_EL2_DS (1UL << 32)
#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
+#define TCR_EL2_HPD (1 << 24)
#define TCR_EL2_TBI (1 << 20)
#define TCR_EL2_PS_SHIFT 16
#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 2181a11b9d92..b36a3b6cc011 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -10,6 +10,7 @@
#include <asm/hyp_image.h>
#include <asm/insn.h>
#include <asm/virt.h>
+#include <asm/sysreg.h>
#define ARM_EXIT_WITH_SERROR_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
@@ -235,6 +236,9 @@ extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding);
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
+extern void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
+extern void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
+extern void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
@@ -259,7 +263,7 @@ extern u64 __kvm_get_mdcr_el2(void);
asm volatile( \
" mrs %1, spsr_el2\n" \
" mrs %2, elr_el2\n" \
- "1: at "at_op", %3\n" \
+ "1: " __msr_s(at_op, "%3") "\n" \
" isb\n" \
" b 9f\n" \
"2: msr spsr_el2, %1\n" \
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index a33f5996ca9f..329619c6fa96 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -446,6 +446,12 @@ enum vcpu_sysreg {
GCR_EL1, /* Tag Control Register */
TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
+ POR_EL0, /* Permission Overlay Register 0 (EL0) */
+
+ /* FP/SIMD/SVE */
+ SVCR,
+ FPMR,
+
/* 32bit specific registers. */
DACR32_EL2, /* Domain Access Control Register */
IFSR32_EL2, /* Instruction Fault Status Register */
@@ -517,6 +523,8 @@ enum vcpu_sysreg {
VNCR(PIR_EL1), /* Permission Indirection Register 1 (EL1) */
VNCR(PIRE0_EL1), /* Permission Indirection Register 0 (EL1) */
+ VNCR(POR_EL1), /* Permission Overlay Register 1 (EL1) */
+
VNCR(HFGRTR_EL2),
VNCR(HFGWTR_EL2),
VNCR(HFGITR_EL2),
@@ -530,6 +538,8 @@ enum vcpu_sysreg {
VNCR(CNTP_CVAL_EL0),
VNCR(CNTP_CTL_EL0),
+ VNCR(ICH_HCR_EL2),
+
NR_SYS_REGS /* Nothing after this line! */
};
@@ -595,6 +605,16 @@ struct kvm_host_data {
struct cpu_sve_state *sve_state;
};
+ union {
+ /* HYP VA pointer to the host storage for FPMR */
+ u64 *fpmr_ptr;
+ /*
+ * Used by pKVM only, as it needs to provide storage
+ * for the host
+ */
+ u64 fpmr;
+ };
+
/* Ownership of the FP regs */
enum {
FP_STATE_FREE,
@@ -664,8 +684,6 @@ struct kvm_vcpu_arch {
void *sve_state;
enum fp_type fp_type;
unsigned int sve_max_vl;
- u64 svcr;
- u64 fpmr;
/* Stage 2 paging state used by the hardware on next switch */
struct kvm_s2_mmu *hw_mmu;
@@ -1330,12 +1348,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM
-void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
-void kvm_clr_pmu_events(u32 clr);
+void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr);
+void kvm_clr_pmu_events(u64 clr);
bool kvm_set_pmuserenr(u64 val);
#else
-static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
-static inline void kvm_clr_pmu_events(u32 clr) {}
+static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {}
+static inline void kvm_clr_pmu_events(u64 clr) {}
static inline bool kvm_set_pmuserenr(u64 val)
{
return false;
@@ -1473,4 +1491,8 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
(pa + pi + pa3) == 1; \
})
+#define kvm_has_fpmr(k) \
+ (system_supports_fpmr() && \
+ kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP))
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 216ca424bb16..cd4087fbda9a 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -352,5 +352,11 @@ static inline bool kvm_is_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
return &kvm->arch.mmu != mmu;
}
+#ifdef CONFIG_PTDUMP_STAGE2_DEBUGFS
+void kvm_s2_ptdump_create_debugfs(struct kvm *kvm);
+#else
+static inline void kvm_s2_ptdump_create_debugfs(struct kvm *kvm) {}
+#endif /* CONFIG_PTDUMP_STAGE2_DEBUGFS */
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 5b06c31035a2..e8bc6d67aba2 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -85,7 +85,7 @@ struct kvm_s2_trans {
bool readable;
int level;
u32 esr;
- u64 upper_attr;
+ u64 desc;
};
static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
@@ -115,7 +115,7 @@ static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans)
{
- return !(trans->upper_attr & BIT(54));
+ return !(trans->desc & BIT(54));
}
extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
@@ -205,4 +205,40 @@ static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level);
}
+/* Adjust alignment for the contiguous bit as per StageOA() */
+#define contiguous_bit_shift(d, wi, l) \
+ ({ \
+ u8 shift = 0; \
+ \
+ if ((d) & PTE_CONT) { \
+ switch (BIT((wi)->pgshift)) { \
+ case SZ_4K: \
+ shift = 4; \
+ break; \
+ case SZ_16K: \
+ shift = (l) == 2 ? 5 : 7; \
+ break; \
+ case SZ_64K: \
+ shift = 5; \
+ break; \
+ } \
+ } \
+ \
+ shift; \
+ })
+
+static inline unsigned int ps_to_output_size(unsigned int ps)
+{
+ switch (ps) {
+ case 0: return 32;
+ case 1: return 36;
+ case 2: return 40;
+ case 3: return 42;
+ case 4: return 44;
+ case 5:
+ default:
+ return 48;
+ }
+}
+
#endif /* __ARM64_KVM_NESTED_H */
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 19278dfe7978..03f4c3d7839c 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -59,6 +59,48 @@ typedef u64 kvm_pte_t;
#define KVM_PHYS_INVALID (-1ULL)
+#define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
+
+#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
+#define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
+#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \
+ ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })
+#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \
+ ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })
+#define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
+#define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
+#define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
+
+#define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
+#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6)
+#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7)
+#define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8)
+#define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
+#define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
+
+#define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50)
+
+#define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
+
+#define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
+
+#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
+
+#define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50)
+
+#define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
+ KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
+ KVM_PTE_LEAF_ATTR_HI_S2_XN)
+
+#define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
+#define KVM_MAX_OWNER_ID 1
+
+/*
+ * Used to indicate a pte for which a 'break-before-make' sequence is in
+ * progress.
+ */
+#define KVM_INVALID_PTE_LOCKED BIT(10)
+
static inline bool kvm_pte_valid(kvm_pte_t pte)
{
return pte & KVM_PTE_VALID;
diff --git a/arch/arm64/include/asm/mem_encrypt.h b/arch/arm64/include/asm/mem_encrypt.h
new file mode 100644
index 000000000000..b0c9a86b13a4
--- /dev/null
+++ b/arch/arm64/include/asm/mem_encrypt.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_MEM_ENCRYPT_H
+#define __ASM_MEM_ENCRYPT_H
+
+struct arm64_mem_crypt_ops {
+ int (*encrypt)(unsigned long addr, int numpages);
+ int (*decrypt)(unsigned long addr, int numpages);
+};
+
+int arm64_mem_crypt_ops_register(const struct arm64_mem_crypt_ops *ops);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* __ASM_MEM_ENCRYPT_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 54fb014eba05..0480c61dbb4f 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -110,6 +110,8 @@
#define PAGE_END (_PAGE_END(VA_BITS_MIN))
#endif /* CONFIG_KASAN */
+#define PHYSMEM_END __pa(PAGE_END - 1)
+
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
/*
diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h
index 5966ee4a6154..9e39217b4afb 100644
--- a/arch/arm64/include/asm/mman.h
+++ b/arch/arm64/include/asm/mman.h
@@ -2,12 +2,14 @@
#ifndef __ASM_MMAN_H__
#define __ASM_MMAN_H__
+#include <uapi/asm/mman.h>
+
+#ifndef BUILD_VDSO
#include <linux/compiler.h>
#include <linux/types.h>
-#include <uapi/asm/mman.h>
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
- unsigned long pkey __always_unused)
+ unsigned long pkey)
{
unsigned long ret = 0;
@@ -17,6 +19,14 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
if (system_supports_mte() && (prot & PROT_MTE))
ret |= VM_MTE;
+#ifdef CONFIG_ARCH_HAS_PKEYS
+ if (system_supports_poe()) {
+ ret |= pkey & BIT(0) ? VM_PKEY_BIT0 : 0;
+ ret |= pkey & BIT(1) ? VM_PKEY_BIT1 : 0;
+ ret |= pkey & BIT(2) ? VM_PKEY_BIT2 : 0;
+ }
+#endif
+
return ret;
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
@@ -60,4 +70,6 @@ static inline bool arch_validate_flags(unsigned long vm_flags)
}
#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags)
+#endif /* !BUILD_VDSO */
+
#endif /* ! __ASM_MMAN_H__ */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 65977c7783c5..2ec96d91acc6 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -25,6 +25,7 @@ typedef struct {
refcount_t pinned;
void *vdso;
unsigned long flags;
+ u8 pkey_allocation_map;
} mm_context_t;
/*
@@ -63,7 +64,6 @@ static inline bool arm64_kernel_unmapped_at_el0(void)
extern void arm64_memblock_init(void);
extern void paging_init(void);
extern void bootmem_init(void);
-extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index bd19f4c758b7..7c09d47e09cb 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -15,12 +15,12 @@
#include <linux/sched/hotplug.h>
#include <linux/mm_types.h>
#include <linux/pgtable.h>
+#include <linux/pkeys.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
#include <asm/proc-fns.h>
-#include <asm-generic/mm_hooks.h>
#include <asm/cputype.h>
#include <asm/sysreg.h>
#include <asm/tlbflush.h>
@@ -175,9 +175,36 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
atomic64_set(&mm->context.id, 0);
refcount_set(&mm->context.pinned, 0);
+
+ /* pkey 0 is the default, so always reserve it. */
+ mm->context.pkey_allocation_map = BIT(0);
+
+ return 0;
+}
+
+static inline void arch_dup_pkeys(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+ /* Duplicate the oldmm pkey state in mm: */
+ mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
+}
+
+static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+{
+ arch_dup_pkeys(oldmm, mm);
+
return 0;
}
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
+static inline void arch_unmap(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void update_saved_ttbr0(struct task_struct *tsk,
struct mm_struct *mm)
@@ -267,6 +294,23 @@ static inline unsigned long mm_untag_mask(struct mm_struct *mm)
return -1UL >> 8;
}
+/*
+ * Only enforce protection keys on the current process, because there is no
+ * user context to access POR_EL0 for another address space.
+ */
+static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
+ bool write, bool execute, bool foreign)
+{
+ if (!system_supports_poe())
+ return true;
+
+ /* allow access if the VMA is not one from this process */
+ if (foreign || vma_is_foreign(vma))
+ return true;
+
+ return por_el0_allows_pkey(vma_pkey(vma), write, execute);
+}
+
#include <asm-generic/mmu_context.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/mmzone.h b/arch/arm64/include/asm/mmzone.h
deleted file mode 100644
index fa17e01d9ab2..000000000000
--- a/arch/arm64/include/asm/mmzone.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_MMZONE_H
-#define __ASM_MMZONE_H
-
-#ifdef CONFIG_NUMA
-
-#include <asm/numa.h>
-
-extern struct pglist_data *node_data[];
-#define NODE_DATA(nid) (node_data[(nid)])
-
-#endif /* CONFIG_NUMA */
-#endif /* __ASM_MMZONE_H */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 1f60aa1bc750..fd330c1db289 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -135,7 +135,6 @@
/*
* Section
*/
-#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
@@ -200,11 +199,26 @@
#define PTE_PI_IDX_3 54 /* UXN */
/*
+ * POIndex[2:0] encoding (Permission Overlay Extension)
+ */
+#define PTE_PO_IDX_0 (_AT(pteval_t, 1) << 60)
+#define PTE_PO_IDX_1 (_AT(pteval_t, 1) << 61)
+#define PTE_PO_IDX_2 (_AT(pteval_t, 1) << 62)
+
+#define PTE_PO_IDX_MASK GENMASK_ULL(62, 60)
+
+
+/*
* Memory Attribute override for Stage-2 (MemAttr[3:0])
*/
#define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2)
/*
+ * Hierarchical permission for Stage-1 tables
+ */
+#define S1_TABLE_AP (_AT(pmdval_t, 3) << 61)
+
+/*
* Highest possible physical address supported.
*/
#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
@@ -298,6 +312,10 @@
#define TCR_TBI1 (UL(1) << 38)
#define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40)
+#define TCR_HPD0_SHIFT 41
+#define TCR_HPD0 (UL(1) << TCR_HPD0_SHIFT)
+#define TCR_HPD1_SHIFT 42
+#define TCR_HPD1 (UL(1) << TCR_HPD1_SHIFT)
#define TCR_TBID0 (UL(1) << 51)
#define TCR_TBID1 (UL(1) << 52)
#define TCR_NFD0 (UL(1) << 53)
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index b11cfb9fdd37..2a11d0c10760 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -154,10 +154,10 @@ static inline bool __pure lpa2_is_enabled(void)
#define PIE_E0 ( \
PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_X_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW))
+ PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX_O) | \
+ PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX_O) | \
+ PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R_O) | \
+ PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW_O))
#define PIE_E1 ( \
PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_NONE_O) | \
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7a4f5604be3f..c329ea061dc9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -34,6 +34,7 @@
#include <asm/cmpxchg.h>
#include <asm/fixmap.h>
+#include <asm/por.h>
#include <linux/mmdebug.h>
#include <linux/mm_types.h>
#include <linux/sched.h>
@@ -149,6 +150,24 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
#define pte_accessible(mm, pte) \
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
+static inline bool por_el0_allows_pkey(u8 pkey, bool write, bool execute)
+{
+ u64 por;
+
+ if (!system_supports_poe())
+ return true;
+
+ por = read_sysreg_s(SYS_POR_EL0);
+
+ if (write)
+ return por_elx_allows_write(por, pkey);
+
+ if (execute)
+ return por_elx_allows_exec(por, pkey);
+
+ return por_elx_allows_read(por, pkey);
+}
+
/*
* p??_access_permitted() is true for valid user mappings (PTE_USER
* bit set, subject to the write permission check). For execute-only
@@ -156,8 +175,11 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
* not set) must return false. PROT_NONE mappings do not have the
* PTE_VALID bit set.
*/
-#define pte_access_permitted(pte, write) \
+#define pte_access_permitted_no_overlay(pte, write) \
(((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
+#define pte_access_permitted(pte, write) \
+ (pte_access_permitted_no_overlay(pte, write) && \
+ por_el0_allows_pkey(FIELD_GET(PTE_PO_IDX_MASK, pte_val(pte)), write, false))
#define pmd_access_permitted(pmd, write) \
(pte_access_permitted(pmd_pte(pmd), (write)))
#define pud_access_permitted(pud, write) \
@@ -373,10 +395,11 @@ static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
/*
* If the PTE would provide user space access to the tags associated
* with it then ensure that the MTE tags are synchronised. Although
- * pte_access_permitted() returns false for exec only mappings, they
- * don't expose tags (instruction fetches don't check tags).
+ * pte_access_permitted_no_overlay() returns false for exec only
+ * mappings, they don't expose tags (instruction fetches don't check
+ * tags).
*/
- if (system_supports_mte() && pte_access_permitted(pte, false) &&
+ if (system_supports_mte() && pte_access_permitted_no_overlay(pte, false) &&
!pte_special(pte) && pte_tagged(pte))
mte_sync_tags(pte, nr_pages);
}
@@ -384,6 +407,7 @@ static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
/*
* Select all bits except the pfn
*/
+#define pte_pgprot pte_pgprot
static inline pgprot_t pte_pgprot(pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
@@ -577,6 +601,14 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
}
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+#define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL)))
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return set_pmd_bit(pmd, __pgprot(PTE_SPECIAL));
+}
+#endif
+
#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
@@ -594,6 +626,27 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+#define pud_special(pte) pte_special(pud_pte(pud))
+#define pud_mkspecial(pte) pte_pud(pte_mkspecial(pud_pte(pud)))
+#endif
+
+#define pmd_pgprot pmd_pgprot
+static inline pgprot_t pmd_pgprot(pmd_t pmd)
+{
+ unsigned long pfn = pmd_pfn(pmd);
+
+ return __pgprot(pmd_val(pfn_pmd(pfn, __pgprot(0))) ^ pmd_val(pmd));
+}
+
+#define pud_pgprot pud_pgprot
+static inline pgprot_t pud_pgprot(pud_t pud)
+{
+ unsigned long pfn = pud_pfn(pud);
+
+ return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud));
+}
+
static inline void __set_pte_at(struct mm_struct *mm,
unsigned long __always_unused addr,
pte_t *ptep, pte_t pte, unsigned int nr)
@@ -1103,7 +1156,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*/
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
PTE_PRESENT_INVALID | PTE_VALID | PTE_WRITE |
- PTE_GP | PTE_ATTRINDX_MASK;
+ PTE_GP | PTE_ATTRINDX_MASK | PTE_PO_IDX_MASK;
+
/* preserve the hardware dirty information */
if (pte_hw_dirty(pte))
pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
diff --git a/arch/arm64/include/asm/pkeys.h b/arch/arm64/include/asm/pkeys.h
new file mode 100644
index 000000000000..0ca5f83ce148
--- /dev/null
+++ b/arch/arm64/include/asm/pkeys.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Arm Ltd.
+ *
+ * Based on arch/x86/include/asm/pkeys.h
+ */
+
+#ifndef _ASM_ARM64_PKEYS_H
+#define _ASM_ARM64_PKEYS_H
+
+#define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2)
+
+#define arch_max_pkey() 8
+
+int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+ unsigned long init_val);
+
+static inline bool arch_pkeys_enabled(void)
+{
+ return system_supports_poe();
+}
+
+static inline int vma_pkey(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT;
+}
+
+static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
+ int prot, int pkey)
+{
+ if (pkey != -1)
+ return pkey;
+
+ return vma_pkey(vma);
+}
+
+static inline int execute_only_pkey(struct mm_struct *mm)
+{
+ // Execute-only mappings are handled by EPAN/FEAT_PAN3.
+ return -1;
+}
+
+#define mm_pkey_allocation_map(mm) (mm)->context.pkey_allocation_map
+#define mm_set_pkey_allocated(mm, pkey) do { \
+ mm_pkey_allocation_map(mm) |= (1U << pkey); \
+} while (0)
+#define mm_set_pkey_free(mm, pkey) do { \
+ mm_pkey_allocation_map(mm) &= ~(1U << pkey); \
+} while (0)
+
+static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
+{
+ /*
+ * "Allocated" pkeys are those that have been returned
+ * from pkey_alloc() or pkey 0 which is allocated
+ * implicitly when the mm is created.
+ */
+ if (pkey < 0 || pkey >= arch_max_pkey())
+ return false;
+
+ return mm_pkey_allocation_map(mm) & (1U << pkey);
+}
+
+/*
+ * Returns a positive, 3-bit key on success, or -1 on failure.
+ */
+static inline int mm_pkey_alloc(struct mm_struct *mm)
+{
+ /*
+ * Note: this is the one and only place we make sure
+ * that the pkey is valid as far as the hardware is
+ * concerned. The rest of the kernel trusts that
+ * only good, valid pkeys come out of here.
+ */
+ u8 all_pkeys_mask = GENMASK(arch_max_pkey() - 1, 0);
+ int ret;
+
+ if (!arch_pkeys_enabled())
+ return -1;
+
+ /*
+ * Are we out of pkeys? We must handle this specially
+ * because ffz() behavior is undefined if there are no
+ * zeros.
+ */
+ if (mm_pkey_allocation_map(mm) == all_pkeys_mask)
+ return -1;
+
+ ret = ffz(mm_pkey_allocation_map(mm));
+
+ mm_set_pkey_allocated(mm, ret);
+
+ return ret;
+}
+
+static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
+{
+ if (!mm_pkey_is_allocated(mm, pkey))
+ return -EINVAL;
+
+ mm_set_pkey_free(mm, pkey);
+
+ return 0;
+}
+
+#endif /* _ASM_ARM64_PKEYS_H */
diff --git a/arch/arm64/include/asm/por.h b/arch/arm64/include/asm/por.h
new file mode 100644
index 000000000000..e06e9f473675
--- /dev/null
+++ b/arch/arm64/include/asm/por.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Arm Ltd.
+ */
+
+#ifndef _ASM_ARM64_POR_H
+#define _ASM_ARM64_POR_H
+
+#define POR_BITS_PER_PKEY 4
+#define POR_ELx_IDX(por_elx, idx) (((por_elx) >> ((idx) * POR_BITS_PER_PKEY)) & 0xf)
+
+static inline bool por_elx_allows_read(u64 por, u8 pkey)
+{
+ u8 perm = POR_ELx_IDX(por, pkey);
+
+ return perm & POE_R;
+}
+
+static inline bool por_elx_allows_write(u64 por, u8 pkey)
+{
+ u8 perm = POR_ELx_IDX(por, pkey);
+
+ return perm & POE_W;
+}
+
+static inline bool por_elx_allows_exec(u64 por, u8 pkey)
+{
+ u8 perm = POR_ELx_IDX(por, pkey);
+
+ return perm & POE_X;
+}
+
+#endif /* _ASM_ARM64_POR_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index f77371232d8c..1438424f0064 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -184,6 +184,7 @@ struct thread_struct {
u64 sctlr_user;
u64 svcr;
u64 tpidr2_el0;
+ u64 por_el0;
};
static inline unsigned int thread_get_vl(struct thread_struct *thread,
@@ -402,5 +403,10 @@ long get_tagged_addr_ctrl(struct task_struct *task);
#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current)
#endif
+int get_tsc_mode(unsigned long adr);
+int set_tsc_mode(unsigned int val);
+#define GET_TSC_CTL(adr) get_tsc_mode((adr))
+#define SET_TSC_CTL(val) set_tsc_mode((val))
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index 5b1701c76d1c..6cf4aae05219 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -5,6 +5,8 @@
#ifndef __ASM_PTDUMP_H
#define __ASM_PTDUMP_H
+#include <linux/ptdump.h>
+
#ifdef CONFIG_PTDUMP_CORE
#include <linux/mm_types.h>
@@ -21,14 +23,53 @@ struct ptdump_info {
unsigned long base_addr;
};
+struct ptdump_prot_bits {
+ u64 mask;
+ u64 val;
+ const char *set;
+ const char *clear;
+};
+
+struct ptdump_pg_level {
+ const struct ptdump_prot_bits *bits;
+ char name[4];
+ int num;
+ u64 mask;
+};
+
+/*
+ * The page dumper groups page table entries of the same type into a single
+ * description. It uses pg_state to track the range information while
+ * iterating over the pte entries. When the continuity is broken it then
+ * dumps out a description of the range.
+ */
+struct ptdump_pg_state {
+ struct ptdump_state ptdump;
+ struct ptdump_pg_level *pg_level;
+ struct seq_file *seq;
+ const struct addr_marker *marker;
+ const struct mm_struct *mm;
+ unsigned long start_address;
+ int level;
+ u64 current_prot;
+ bool check_wx;
+ unsigned long wx_pages;
+ unsigned long uxn_pages;
+};
+
void ptdump_walk(struct seq_file *s, struct ptdump_info *info);
+void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
+ u64 val);
#ifdef CONFIG_PTDUMP_DEBUGFS
#define EFI_RUNTIME_MAP_END DEFAULT_MAP_WINDOW_64
void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name);
#else
static inline void ptdump_debugfs_register(struct ptdump_info *info,
const char *name) { }
-#endif
+#endif /* CONFIG_PTDUMP_DEBUGFS */
+#else
+static inline void note_page(struct ptdump_state *pt_st, unsigned long addr,
+ int level, u64 val) { }
#endif /* CONFIG_PTDUMP_CORE */
#endif /* __ASM_PTDUMP_H */
diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h
index 0f740b781187..917761feeffd 100644
--- a/arch/arm64/include/asm/set_memory.h
+++ b/arch/arm64/include/asm/set_memory.h
@@ -3,6 +3,7 @@
#ifndef _ASM_ARM64_SET_MEMORY_H
#define _ASM_ARM64_SET_MEMORY_H
+#include <asm/mem_encrypt.h>
#include <asm-generic/set_memory.h>
bool can_set_direct_map(void);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 4a9ea103817e..9ea97dddefc4 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -109,6 +109,9 @@
#define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x))
#define set_pstate_dit(x) asm volatile(SET_PSTATE_DIT(x))
+/* Register-based PAN access, for save/restore purposes */
+#define SYS_PSTATE_PAN sys_reg(3, 0, 4, 2, 3)
+
#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
__emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
@@ -325,7 +328,25 @@
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
#define SYS_PAR_EL1_F BIT(0)
+/* When PAR_EL1.F == 1 */
#define SYS_PAR_EL1_FST GENMASK(6, 1)
+#define SYS_PAR_EL1_PTW BIT(8)
+#define SYS_PAR_EL1_S BIT(9)
+#define SYS_PAR_EL1_AssuredOnly BIT(12)
+#define SYS_PAR_EL1_TopLevel BIT(13)
+#define SYS_PAR_EL1_Overlay BIT(14)
+#define SYS_PAR_EL1_DirtyBit BIT(15)
+#define SYS_PAR_EL1_F1_IMPDEF GENMASK_ULL(63, 48)
+#define SYS_PAR_EL1_F1_RES0 (BIT(7) | BIT(10) | GENMASK_ULL(47, 16))
+#define SYS_PAR_EL1_RES1 BIT(11)
+/* When PAR_EL1.F == 0 */
+#define SYS_PAR_EL1_SH GENMASK_ULL(8, 7)
+#define SYS_PAR_EL1_NS BIT(9)
+#define SYS_PAR_EL1_F0_IMPDEF BIT(10)
+#define SYS_PAR_EL1_NSE BIT(11)
+#define SYS_PAR_EL1_PA GENMASK_ULL(51, 12)
+#define SYS_PAR_EL1_ATTR GENMASK_ULL(63, 56)
+#define SYS_PAR_EL1_F0_RES0 (GENMASK_ULL(6, 1) | GENMASK_ULL(55, 52))
/*** Statistical Profiling Extension ***/
#define PMSEVFR_EL1_RES0_IMP \
@@ -403,7 +424,6 @@
#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
#define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3)
#define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4)
-#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5)
#define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6)
#define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
#define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0)
@@ -652,6 +672,7 @@
#define OP_AT_S12E1W sys_insn(AT_Op0, 4, AT_CRn, 8, 5)
#define OP_AT_S12E0R sys_insn(AT_Op0, 4, AT_CRn, 8, 6)
#define OP_AT_S12E0W sys_insn(AT_Op0, 4, AT_CRn, 8, 7)
+#define OP_AT_S1E2A sys_insn(AT_Op0, 4, AT_CRn, 9, 2)
/* TLBI instructions */
#define TLBI_Op0 1
@@ -1077,6 +1098,9 @@
#define POE_RXW UL(0x7)
#define POE_MASK UL(0xf)
+/* Initial value for Permission Overlay Extension for EL0 */
+#define POR_EL0_INIT POE_RXW
+
#define ARM64_FEATURE_FIELD_BITS 4
/* Defined for compatibility only, do not add new users. */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index e72a3bf9e563..1114c1c3300a 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -81,6 +81,7 @@ void arch_setup_new_exec(void);
#define TIF_SME 27 /* SME in use */
#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */
#define TIF_KERNEL_FPSTATE 29 /* Task is in a kernel mode FPSIMD section */
+#define TIF_TSC_SIGSEGV 30 /* SIGSEGV on counter-timer access */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -97,6 +98,7 @@ void arch_setup_new_exec(void);
#define _TIF_SVE (1 << TIF_SVE)
#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
+#define _TIF_TSC_SIGSEGV (1 << TIF_TSC_SIGSEGV)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 0f6ef432fb84..5fc3af9f8f29 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -5,6 +5,7 @@
#include <linux/cpumask.h>
#ifdef CONFIG_NUMA
+#include <asm/numa.h>
struct pci_bus;
int pcibus_to_node(struct pci_bus *bus);
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index eefe766d6161..d780d1bd2eac 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -25,6 +25,7 @@ try_emulate_armv8_deprecated(struct pt_regs *regs, u32 insn)
void force_signal_inject(int signal, int code, unsigned long address, unsigned long err);
void arm64_notify_segfault(unsigned long addr);
void arm64_force_sig_fault(int signo, int code, unsigned long far, const char *str);
+void arm64_force_sig_fault_pkey(unsigned long far, const char *str, int pkey);
void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str);
void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, const char *str);
diff --git a/arch/arm64/include/asm/vdso/getrandom.h b/arch/arm64/include/asm/vdso/getrandom.h
new file mode 100644
index 000000000000..342f807e2044
--- /dev/null
+++ b/arch/arm64/include/asm/vdso/getrandom.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_VDSO_GETRANDOM_H
+#define __ASM_VDSO_GETRANDOM_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+#include <asm/vdso/vsyscall.h>
+#include <vdso/datapage.h>
+
+/**
+ * getrandom_syscall - Invoke the getrandom() syscall.
+ * @buffer: Destination buffer to fill with random bytes.
+ * @len: Size of @buffer in bytes.
+ * @flags: Zero or more GRND_* flags.
+ * Returns: The number of random bytes written to @buffer, or a negative value indicating an error.
+ */
+static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, unsigned int _flags)
+{
+ register void *buffer asm ("x0") = _buffer;
+ register size_t len asm ("x1") = _len;
+ register unsigned int flags asm ("x2") = _flags;
+ register long ret asm ("x0");
+ register long nr asm ("x8") = __NR_getrandom;
+
+ asm volatile(
+ " svc #0\n"
+ : "=r" (ret)
+ : "r" (buffer), "r" (len), "r" (flags), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void)
+{
+ /*
+ * The RNG data is in the real VVAR data page, but if a task belongs to a time namespace
+ * then VVAR_DATA_PAGE_OFFSET points to the namespace-specific VVAR page and VVAR_TIMENS_
+ * PAGE_OFFSET points to the real VVAR page.
+ */
+ if (IS_ENABLED(CONFIG_TIME_NS) && _vdso_data->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ return (void *)&_vdso_rng_data + VVAR_TIMENS_PAGE_OFFSET * (1UL << CONFIG_PAGE_SHIFT);
+ return &_vdso_rng_data;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/arm64/include/asm/vdso/vsyscall.h b/arch/arm64/include/asm/vdso/vsyscall.h
index f94b1457c117..5b6d0dd3cef5 100644
--- a/arch/arm64/include/asm/vdso/vsyscall.h
+++ b/arch/arm64/include/asm/vdso/vsyscall.h
@@ -2,11 +2,19 @@
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
+#define __VDSO_RND_DATA_OFFSET 480
+
#ifndef __ASSEMBLY__
#include <linux/timekeeper_internal.h>
#include <vdso/datapage.h>
+enum vvar_pages {
+ VVAR_DATA_PAGE_OFFSET,
+ VVAR_TIMENS_PAGE_OFFSET,
+ VVAR_NR_PAGES,
+};
+
#define VDSO_PRECISION_MASK ~(0xFF00ULL<<48)
extern struct vdso_data *vdso_data;
@@ -22,6 +30,13 @@ struct vdso_data *__arm64_get_k_vdso_data(void)
#define __arch_get_k_vdso_data __arm64_get_k_vdso_data
static __always_inline
+struct vdso_rng_data *__arm64_get_k_vdso_rnd_data(void)
+{
+ return (void *)vdso_data + __VDSO_RND_DATA_OFFSET;
+}
+#define __arch_get_k_vdso_rng_data __arm64_get_k_vdso_rnd_data
+
+static __always_inline
void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk)
{
vdata[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK;
diff --git a/arch/arm64/include/asm/vncr_mapping.h b/arch/arm64/include/asm/vncr_mapping.h
index df2c47c55972..06f8ec0906a6 100644
--- a/arch/arm64/include/asm/vncr_mapping.h
+++ b/arch/arm64/include/asm/vncr_mapping.h
@@ -52,6 +52,7 @@
#define VNCR_PIRE0_EL1 0x290
#define VNCR_PIRE0_EL2 0x298
#define VNCR_PIR_EL1 0x2A0
+#define VNCR_POR_EL1 0x2A8
#define VNCR_ICH_LR0_EL2 0x400
#define VNCR_ICH_LR1_EL2 0x408
#define VNCR_ICH_LR2_EL2 0x410
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 285610e626f5..055381b2c615 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -122,5 +122,6 @@
#define HWCAP2_SME_SF8FMA (1UL << 60)
#define HWCAP2_SME_SF8DP4 (1UL << 61)
#define HWCAP2_SME_SF8DP2 (1UL << 62)
+#define HWCAP2_POE (1UL << 63)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/mman.h b/arch/arm64/include/uapi/asm/mman.h
index 1e6482a838e1..e7e0c8216243 100644
--- a/arch/arm64/include/uapi/asm/mman.h
+++ b/arch/arm64/include/uapi/asm/mman.h
@@ -7,4 +7,13 @@
#define PROT_BTI 0x10 /* BTI guarded page */
#define PROT_MTE 0x20 /* Normal Tagged mapping */
+/* Override any generic PKEY permission defines */
+#define PKEY_DISABLE_EXECUTE 0x4
+#define PKEY_DISABLE_READ 0x8
+#undef PKEY_ACCESS_MASK
+#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
+ PKEY_DISABLE_WRITE |\
+ PKEY_DISABLE_READ |\
+ PKEY_DISABLE_EXECUTE)
+
#endif /* ! _UAPI__ASM_MMAN_H */
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index 8a45b7a411e0..bb7af77a30a7 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -98,6 +98,13 @@ struct esr_context {
__u64 esr;
};
+#define POE_MAGIC 0x504f4530
+
+struct poe_context {
+ struct _aarch64_ctx head;
+ __u64 por_el0;
+};
+
/*
* extra_context: describes extra space in the signal frame for
* additional structures that don't fit in sigcontext.__reserved[].
@@ -320,10 +327,10 @@ struct zt_context {
((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \
/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
-#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES))
+#define ZA_SIG_REGS_SIZE(vq) (((vq) * __SVE_VQ_BYTES) * ((vq) * __SVE_VQ_BYTES))
#define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \
- (SVE_SIG_ZREG_SIZE(vq) * n))
+ (SVE_SIG_ZREG_SIZE(vq) * (n)))
#define ZA_SIG_CONTEXT_SIZE(vq) \
(ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq))
@@ -334,7 +341,7 @@ struct zt_context {
#define ZT_SIG_REGS_OFFSET sizeof(struct zt_context)
-#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n)
+#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * (n))
#define ZT_SIG_CONTEXT_SIZE(n) \
(sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n))
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index f6b6b4507357..dfefbdf4073a 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -456,6 +456,14 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
};
#endif
+#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
+static const struct midr_range erratum_ac03_cpu_38_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_AMPERE1),
+ MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
+ {},
+};
+#endif
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
@@ -772,7 +780,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.desc = "AmpereOne erratum AC03_CPU_38",
.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
- ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1),
+ ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
},
#endif
{
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 646ecd3069fd..718728a85430 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -466,6 +466,8 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr3[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_POE),
+ FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1POE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1PIE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_TCRX_SHIFT, 4, 0),
ARM64_FTR_END,
@@ -2348,6 +2350,14 @@ static void cpu_enable_mops(const struct arm64_cpu_capabilities *__unused)
sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_MSCEn);
}
+#ifdef CONFIG_ARM64_POE
+static void cpu_enable_poe(const struct arm64_cpu_capabilities *__unused)
+{
+ sysreg_clear_set(REG_TCR2_EL1, 0, TCR2_EL1x_E0POE);
+ sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_E0POE);
+}
+#endif
+
/* Internal helper functions to match cpu capability type */
static bool
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
@@ -2870,6 +2880,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_nv1,
ARM64_CPUID_FIELDS_NEG(ID_AA64MMFR4_EL1, E2H0, NI_NV1)
},
+#ifdef CONFIG_ARM64_POE
+ {
+ .desc = "Stage-1 Permission Overlay Extension (S1POE)",
+ .capability = ARM64_HAS_S1POE,
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_enable_poe,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, S1POE, IMP)
+ },
+#endif
{},
};
@@ -3034,6 +3054,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP2),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8E4M3, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E4M3),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8E5M2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E5M2),
+#ifdef CONFIG_ARM64_POE
+ HWCAP_CAP(ID_AA64MMFR3_EL1, S1POE, IMP, CAP_HWCAP, KERNEL_HWCAP_POE),
+#endif
{},
};
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 09eeaa24d456..44718d0482b3 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -143,6 +143,7 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_SME_SF8FMA] = "smesf8fma",
[KERNEL_HWCAP_SME_SF8DP4] = "smesf8dp4",
[KERNEL_HWCAP_SME_SF8DP2] = "smesf8dp2",
+ [KERNEL_HWCAP_POE] = "poe",
};
#ifdef CONFIG_COMPAT
@@ -280,7 +281,7 @@ const struct seq_operations cpuinfo_op = {
};
-static struct kobj_type cpuregs_kobj_type = {
+static const struct kobj_type cpuregs_kobj_type = {
.sysfs_ops = &kobj_sysfs_ops,
};
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index b77a15955f28..3fcd9d080bf2 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -103,7 +103,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
static __always_inline void __enter_from_user_mode(void)
{
lockdep_hardirqs_off(CALLER_ADDR0);
- CT_WARN_ON(ct_state() != CONTEXT_USER);
+ CT_WARN_ON(ct_state() != CT_STATE_USER);
user_exit_irqoff();
trace_hardirqs_off_finish();
mte_disable_tco_entry(current);
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 02870beb271e..7b11d84f533c 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -407,7 +407,7 @@ int swsusp_arch_resume(void)
void *, phys_addr_t, phys_addr_t);
struct trans_pgd_info trans_info = {
.trans_alloc_page = hibernate_page_alloc,
- .trans_alloc_arg = (void *)GFP_ATOMIC,
+ .trans_alloc_arg = (__force void *)GFP_ATOMIC,
};
/*
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index f872c57e9909..fd9a7bed83ce 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -6,28 +6,7 @@
* Copyright (C) 2014 ARM Ltd.
*/
-#include <linux/acpi.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
#include <linux/pci.h>
-#include <linux/pci-acpi.h>
-#include <linux/pci-ecam.h>
-#include <linux/slab.h>
-
-#ifdef CONFIG_ACPI
-/*
- * Try to assign the IRQ number when probing a new device
- */
-int pcibios_alloc_irq(struct pci_dev *dev)
-{
- if (!acpi_disabled)
- acpi_pci_irq_enable(dev);
-
- return 0;
-}
-#endif
/*
* raw_pci_read/write - Platform-specific PCI config space access.
@@ -61,173 +40,3 @@ int pcibus_to_node(struct pci_bus *bus)
EXPORT_SYMBOL(pcibus_to_node);
#endif
-
-#ifdef CONFIG_ACPI
-
-struct acpi_pci_generic_root_info {
- struct acpi_pci_root_info common;
- struct pci_config_window *cfg; /* config space mapping */
-};
-
-int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
-{
- struct pci_config_window *cfg = bus->sysdata;
- struct acpi_device *adev = to_acpi_device(cfg->parent);
- struct acpi_pci_root *root = acpi_driver_data(adev);
-
- return root->segment;
-}
-
-int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
-{
- struct pci_config_window *cfg;
- struct acpi_device *adev;
- struct device *bus_dev;
-
- if (acpi_disabled)
- return 0;
-
- cfg = bridge->bus->sysdata;
-
- /*
- * On Hyper-V there is no corresponding ACPI device for a root bridge,
- * therefore ->parent is set as NULL by the driver. And set 'adev' as
- * NULL in this case because there is no proper ACPI device.
- */
- if (!cfg->parent)
- adev = NULL;
- else
- adev = to_acpi_device(cfg->parent);
-
- bus_dev = &bridge->bus->dev;
-
- ACPI_COMPANION_SET(&bridge->dev, adev);
- set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev)));
-
- return 0;
-}
-
-static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
-{
- struct resource_entry *entry, *tmp;
- int status;
-
- status = acpi_pci_probe_root_resources(ci);
- resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
- if (!(entry->res->flags & IORESOURCE_WINDOW))
- resource_list_destroy_entry(entry);
- }
- return status;
-}
-
-/*
- * Lookup the bus range for the domain in MCFG, and set up config space
- * mapping.
- */
-static struct pci_config_window *
-pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
-{
- struct device *dev = &root->device->dev;
- struct resource *bus_res = &root->secondary;
- u16 seg = root->segment;
- const struct pci_ecam_ops *ecam_ops;
- struct resource cfgres;
- struct acpi_device *adev;
- struct pci_config_window *cfg;
- int ret;
-
- ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops);
- if (ret) {
- dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res);
- return NULL;
- }
-
- adev = acpi_resource_consumer(&cfgres);
- if (adev)
- dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres,
- dev_name(&adev->dev));
- else
- dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n",
- &cfgres);
-
- cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
- if (IS_ERR(cfg)) {
- dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res,
- PTR_ERR(cfg));
- return NULL;
- }
-
- return cfg;
-}
-
-/* release_info: free resources allocated by init_info */
-static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
-{
- struct acpi_pci_generic_root_info *ri;
-
- ri = container_of(ci, struct acpi_pci_generic_root_info, common);
- pci_ecam_free(ri->cfg);
- kfree(ci->ops);
- kfree(ri);
-}
-
-/* Interface called from ACPI code to setup PCI host controller */
-struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
-{
- struct acpi_pci_generic_root_info *ri;
- struct pci_bus *bus, *child;
- struct acpi_pci_root_ops *root_ops;
- struct pci_host_bridge *host;
-
- ri = kzalloc(sizeof(*ri), GFP_KERNEL);
- if (!ri)
- return NULL;
-
- root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
- if (!root_ops) {
- kfree(ri);
- return NULL;
- }
-
- ri->cfg = pci_acpi_setup_ecam_mapping(root);
- if (!ri->cfg) {
- kfree(ri);
- kfree(root_ops);
- return NULL;
- }
-
- root_ops->release_info = pci_acpi_generic_release_info;
- root_ops->prepare_resources = pci_acpi_root_prepare_resources;
- root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
- bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
- if (!bus)
- return NULL;
-
- /* If we must preserve the resource configuration, claim now */
- host = pci_find_host_bridge(bus);
- if (host->preserve_config)
- pci_bus_claim_resources(bus);
-
- /*
- * Assign whatever was left unassigned. If we didn't claim above,
- * this will reassign everything.
- */
- pci_assign_unassigned_root_bus_resources(bus);
-
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
-
- return bus;
-}
-
-void pcibios_add_bus(struct pci_bus *bus)
-{
- acpi_pci_add_bus(bus);
-}
-
-void pcibios_remove_bus(struct pci_bus *bus)
-{
- acpi_pci_remove_bus(bus);
-}
-
-#endif
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 4ae31b7af6c3..0540653fbf38 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -43,6 +43,7 @@
#include <linux/stacktrace.h>
#include <asm/alternative.h>
+#include <asm/arch_timer.h>
#include <asm/compat.h>
#include <asm/cpufeature.h>
#include <asm/cacheflush.h>
@@ -271,12 +272,21 @@ static void flush_tagged_addr_state(void)
clear_thread_flag(TIF_TAGGED_ADDR);
}
+static void flush_poe(void)
+{
+ if (!system_supports_poe())
+ return;
+
+ write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
+}
+
void flush_thread(void)
{
fpsimd_flush_thread();
tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
flush_tagged_addr_state();
+ flush_poe();
}
void arch_release_task_struct(struct task_struct *tsk)
@@ -371,6 +381,9 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
if (system_supports_tpidr2())
p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
+ if (system_supports_poe())
+ p->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
+
if (stack_start) {
if (is_compat_thread(task_thread_info(p)))
childregs->compat_sp = stack_start;
@@ -472,27 +485,63 @@ static void entry_task_switch(struct task_struct *next)
}
/*
- * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
- * Ensure access is disabled when switching to a 32bit task, ensure
- * access is enabled when switching to a 64bit task.
+ * Handle sysreg updates for ARM erratum 1418040 which affects the 32bit view of
+ * CNTVCT, various other errata which require trapping all CNTVCT{,_EL0}
+ * accesses and prctl(PR_SET_TSC). Ensure access is disabled iff a workaround is
+ * required or PR_TSC_SIGSEGV is set.
*/
-static void erratum_1418040_thread_switch(struct task_struct *next)
+static void update_cntkctl_el1(struct task_struct *next)
{
- if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) ||
- !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
- return;
+ struct thread_info *ti = task_thread_info(next);
- if (is_compat_thread(task_thread_info(next)))
+ if (test_ti_thread_flag(ti, TIF_TSC_SIGSEGV) ||
+ has_erratum_handler(read_cntvct_el0) ||
+ (IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
+ this_cpu_has_cap(ARM64_WORKAROUND_1418040) &&
+ is_compat_thread(ti)))
sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
else
sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
}
-static void erratum_1418040_new_exec(void)
+static void cntkctl_thread_switch(struct task_struct *prev,
+ struct task_struct *next)
+{
+ if ((read_ti_thread_flags(task_thread_info(prev)) &
+ (_TIF_32BIT | _TIF_TSC_SIGSEGV)) !=
+ (read_ti_thread_flags(task_thread_info(next)) &
+ (_TIF_32BIT | _TIF_TSC_SIGSEGV)))
+ update_cntkctl_el1(next);
+}
+
+static int do_set_tsc_mode(unsigned int val)
{
+ bool tsc_sigsegv;
+
+ if (val == PR_TSC_SIGSEGV)
+ tsc_sigsegv = true;
+ else if (val == PR_TSC_ENABLE)
+ tsc_sigsegv = false;
+ else
+ return -EINVAL;
+
preempt_disable();
- erratum_1418040_thread_switch(current);
+ update_thread_flag(TIF_TSC_SIGSEGV, tsc_sigsegv);
+ update_cntkctl_el1(current);
preempt_enable();
+
+ return 0;
+}
+
+static void permission_overlay_switch(struct task_struct *next)
+{
+ if (!system_supports_poe())
+ return;
+
+ current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
+ if (current->thread.por_el0 != next->thread.por_el0) {
+ write_sysreg_s(next->thread.por_el0, SYS_POR_EL0);
+ }
}
/*
@@ -528,8 +577,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
contextidr_thread_switch(next);
entry_task_switch(next);
ssbs_thread_switch(next);
- erratum_1418040_thread_switch(next);
+ cntkctl_thread_switch(prev, next);
ptrauth_thread_switch_user(next);
+ permission_overlay_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -645,7 +695,7 @@ void arch_setup_new_exec(void)
current->mm->context.flags = mmflags;
ptrauth_thread_init_user();
mte_thread_init_user();
- erratum_1418040_new_exec();
+ do_set_tsc_mode(PR_TSC_ENABLE);
if (task_spec_ssb_noexec(current)) {
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
@@ -754,3 +804,26 @@ int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
return prot;
}
#endif
+
+int get_tsc_mode(unsigned long adr)
+{
+ unsigned int val;
+
+ if (is_compat_task())
+ return -EINVAL;
+
+ if (test_thread_flag(TIF_TSC_SIGSEGV))
+ val = PR_TSC_SIGSEGV;
+ else
+ val = PR_TSC_ENABLE;
+
+ return put_user(val, (unsigned int __user *)adr);
+}
+
+int set_tsc_mode(unsigned int val)
+{
+ if (is_compat_task())
+ return -EINVAL;
+
+ return do_set_tsc_mode(val);
+}
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 0d022599eb61..b756578aeaee 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1440,6 +1440,39 @@ static int tagged_addr_ctrl_set(struct task_struct *target, const struct
}
#endif
+#ifdef CONFIG_ARM64_POE
+static int poe_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ if (!system_supports_poe())
+ return -EINVAL;
+
+ return membuf_write(&to, &target->thread.por_el0,
+ sizeof(target->thread.por_el0));
+}
+
+static int poe_set(struct task_struct *target, const struct
+ user_regset *regset, unsigned int pos,
+ unsigned int count, const void *kbuf, const
+ void __user *ubuf)
+{
+ int ret;
+ long ctrl;
+
+ if (!system_supports_poe())
+ return -EINVAL;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
+ if (ret)
+ return ret;
+
+ target->thread.por_el0 = ctrl;
+
+ return 0;
+}
+#endif
+
enum aarch64_regset {
REGSET_GPR,
REGSET_FPR,
@@ -1469,6 +1502,9 @@ enum aarch64_regset {
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
REGSET_TAGGED_ADDR_CTRL,
#endif
+#ifdef CONFIG_ARM64_POE
+ REGSET_POE
+#endif
};
static const struct user_regset aarch64_regsets[] = {
@@ -1628,6 +1664,16 @@ static const struct user_regset aarch64_regsets[] = {
.set = tagged_addr_ctrl_set,
},
#endif
+#ifdef CONFIG_ARM64_POE
+ [REGSET_POE] = {
+ .core_note_type = NT_ARM_POE,
+ .n = 1,
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .regset_get = poe_get,
+ .set = poe_set,
+ },
+#endif
};
static const struct user_regset_view user_aarch64_view = {
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 4a77f4976e11..561986947530 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -61,6 +61,7 @@ struct rt_sigframe_user_layout {
unsigned long za_offset;
unsigned long zt_offset;
unsigned long fpmr_offset;
+ unsigned long poe_offset;
unsigned long extra_offset;
unsigned long end_offset;
};
@@ -185,6 +186,8 @@ struct user_ctxs {
u32 zt_size;
struct fpmr_context __user *fpmr;
u32 fpmr_size;
+ struct poe_context __user *poe;
+ u32 poe_size;
};
static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
@@ -258,6 +261,32 @@ static int restore_fpmr_context(struct user_ctxs *user)
return err;
}
+static int preserve_poe_context(struct poe_context __user *ctx)
+{
+ int err = 0;
+
+ __put_user_error(POE_MAGIC, &ctx->head.magic, err);
+ __put_user_error(sizeof(*ctx), &ctx->head.size, err);
+ __put_user_error(read_sysreg_s(SYS_POR_EL0), &ctx->por_el0, err);
+
+ return err;
+}
+
+static int restore_poe_context(struct user_ctxs *user)
+{
+ u64 por_el0;
+ int err = 0;
+
+ if (user->poe_size != sizeof(*user->poe))
+ return -EINVAL;
+
+ __get_user_error(por_el0, &(user->poe->por_el0), err);
+ if (!err)
+ write_sysreg_s(por_el0, SYS_POR_EL0);
+
+ return err;
+}
+
#ifdef CONFIG_ARM64_SVE
static int preserve_sve_context(struct sve_context __user *ctx)
@@ -621,6 +650,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
user->za = NULL;
user->zt = NULL;
user->fpmr = NULL;
+ user->poe = NULL;
if (!IS_ALIGNED((unsigned long)base, 16))
goto invalid;
@@ -671,6 +701,17 @@ static int parse_user_sigframe(struct user_ctxs *user,
/* ignore */
break;
+ case POE_MAGIC:
+ if (!system_supports_poe())
+ goto invalid;
+
+ if (user->poe)
+ goto invalid;
+
+ user->poe = (struct poe_context __user *)head;
+ user->poe_size = size;
+ break;
+
case SVE_MAGIC:
if (!system_supports_sve() && !system_supports_sme())
goto invalid;
@@ -857,6 +898,9 @@ static int restore_sigframe(struct pt_regs *regs,
if (err == 0 && system_supports_sme2() && user.zt)
err = restore_zt_context(&user);
+ if (err == 0 && system_supports_poe() && user.poe)
+ err = restore_poe_context(&user);
+
return err;
}
@@ -980,6 +1024,13 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
return err;
}
+ if (system_supports_poe()) {
+ err = sigframe_alloc(user, &user->poe_offset,
+ sizeof(struct poe_context));
+ if (err)
+ return err;
+ }
+
return sigframe_alloc_end(user);
}
@@ -1042,6 +1093,14 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
err |= preserve_fpmr_context(fpmr_ctx);
}
+ if (system_supports_poe() && err == 0 && user->poe_offset) {
+ struct poe_context __user *poe_ctx =
+ apply_user_offset(user, user->poe_offset);
+
+ err |= preserve_poe_context(poe_ctx);
+ }
+
+
/* ZA state if present */
if (system_supports_sme() && err == 0 && user->za_offset) {
struct za_context __user *za_ctx =
@@ -1178,6 +1237,9 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
sme_smstop();
}
+ if (system_supports_poe())
+ write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
+
if (ka->sa.sa_flags & SA_RESTORER)
sigtramp = ka->sa.sa_restorer;
else
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index f01f0fd7b7fe..3b3f6b56e733 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -68,7 +68,7 @@ enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
IPI_CPU_STOP,
- IPI_CPU_CRASH_STOP,
+ IPI_CPU_STOP_NMI,
IPI_TIMER,
IPI_IRQ_WORK,
NR_IPI,
@@ -85,6 +85,8 @@ static int ipi_irq_base __ro_after_init;
static int nr_ipi __ro_after_init = NR_IPI;
static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init;
+static bool crash_stop;
+
static void ipi_setup(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
@@ -823,7 +825,7 @@ static const char *ipi_types[MAX_IPI] __tracepoint_string = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
[IPI_CPU_STOP] = "CPU stop interrupts",
- [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
+ [IPI_CPU_STOP_NMI] = "CPU stop NMIs",
[IPI_TIMER] = "Timer broadcast interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
[IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
@@ -867,9 +869,9 @@ void arch_irq_work_raise(void)
}
#endif
-static void __noreturn local_cpu_stop(void)
+static void __noreturn local_cpu_stop(unsigned int cpu)
{
- set_cpu_online(smp_processor_id(), false);
+ set_cpu_online(cpu, false);
local_daif_mask();
sdei_mask_local_cpu();
@@ -883,21 +885,26 @@ static void __noreturn local_cpu_stop(void)
*/
void __noreturn panic_smp_self_stop(void)
{
- local_cpu_stop();
+ local_cpu_stop(smp_processor_id());
}
-#ifdef CONFIG_KEXEC_CORE
-static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
-#endif
-
static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
{
#ifdef CONFIG_KEXEC_CORE
+ /*
+ * Use local_daif_mask() instead of local_irq_disable() to make sure
+ * that pseudo-NMIs are disabled. The "crash stop" code starts with
+ * an IRQ and falls back to NMI (which might be pseudo). If the IRQ
+ * finally goes through right as we're timing out then the NMI could
+ * interrupt us. It's better to prevent the NMI and let the IRQ
+ * finish since the pt_regs will be better.
+ */
+ local_daif_mask();
+
crash_save_cpu(regs, cpu);
- atomic_dec(&waiting_for_crash_ipi);
+ set_cpu_online(cpu, false);
- local_irq_disable();
sdei_mask_local_cpu();
if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
@@ -962,14 +969,12 @@ static void do_handle_IPI(int ipinr)
break;
case IPI_CPU_STOP:
- local_cpu_stop();
- break;
-
- case IPI_CPU_CRASH_STOP:
- if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
+ case IPI_CPU_STOP_NMI:
+ if (IS_ENABLED(CONFIG_KEXEC_CORE) && crash_stop) {
ipi_cpu_crash_stop(cpu, get_irq_regs());
-
unreachable();
+ } else {
+ local_cpu_stop(cpu);
}
break;
@@ -1024,8 +1029,7 @@ static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
return false;
switch (ipi) {
- case IPI_CPU_STOP:
- case IPI_CPU_CRASH_STOP:
+ case IPI_CPU_STOP_NMI:
case IPI_CPU_BACKTRACE:
case IPI_KGDB_ROUNDUP:
return true;
@@ -1138,79 +1142,109 @@ static inline unsigned int num_other_online_cpus(void)
void smp_send_stop(void)
{
+ static unsigned long stop_in_progress;
+ cpumask_t mask;
unsigned long timeout;
- if (num_other_online_cpus()) {
- cpumask_t mask;
+ /*
+ * If this cpu is the only one alive at this point in time, online or
+ * not, there are no stop messages to be sent around, so just back out.
+ */
+ if (num_other_online_cpus() == 0)
+ goto skip_ipi;
- cpumask_copy(&mask, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), &mask);
+ /* Only proceed if this is the first CPU to reach this code */
+ if (test_and_set_bit(0, &stop_in_progress))
+ return;
- if (system_state <= SYSTEM_RUNNING)
- pr_crit("SMP: stopping secondary CPUs\n");
- smp_cross_call(&mask, IPI_CPU_STOP);
- }
+ /*
+ * Send an IPI to all currently online CPUs except the CPU running
+ * this code.
+ *
+ * NOTE: we don't do anything here to prevent other CPUs from coming
+ * online after we snapshot `cpu_online_mask`. Ideally, the calling code
+ * should do something to prevent other CPUs from coming up. This code
+ * can be called in the panic path and thus it doesn't seem wise to
+ * grab the CPU hotplug mutex ourselves. Worst case:
+ * - If a CPU comes online as we're running, we'll likely notice it
+ * during the 1 second wait below and then we'll catch it when we try
+ * with an NMI (assuming NMIs are enabled) since we re-snapshot the
+ * mask before sending an NMI.
+ * - If we leave the function and see that CPUs are still online we'll
+ * at least print a warning. Especially without NMIs this function
+ * isn't foolproof anyway so calling code will just have to accept
+ * the fact that there could be cases where a CPU can't be stopped.
+ */
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
- /* Wait up to one second for other CPUs to stop */
+ if (system_state <= SYSTEM_RUNNING)
+ pr_crit("SMP: stopping secondary CPUs\n");
+
+ /*
+ * Start with a normal IPI and wait up to one second for other CPUs to
+ * stop. We do this first because it gives other processors a chance
+ * to exit critical sections / drop locks and makes the rest of the
+ * stop process (especially console flush) more robust.
+ */
+ smp_cross_call(&mask, IPI_CPU_STOP);
timeout = USEC_PER_SEC;
while (num_other_online_cpus() && timeout--)
udelay(1);
- if (num_other_online_cpus())
+ /*
+ * If CPUs are still online, try an NMI. There's no excuse for this to
+ * be slow, so we only give them an extra 10 ms to respond.
+ */
+ if (num_other_online_cpus() && ipi_should_be_nmi(IPI_CPU_STOP_NMI)) {
+ smp_rmb();
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+
+ pr_info("SMP: retry stop with NMI for CPUs %*pbl\n",
+ cpumask_pr_args(&mask));
+
+ smp_cross_call(&mask, IPI_CPU_STOP_NMI);
+ timeout = USEC_PER_MSEC * 10;
+ while (num_other_online_cpus() && timeout--)
+ udelay(1);
+ }
+
+ if (num_other_online_cpus()) {
+ smp_rmb();
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+
pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
- cpumask_pr_args(cpu_online_mask));
+ cpumask_pr_args(&mask));
+ }
+skip_ipi:
sdei_mask_local_cpu();
}
#ifdef CONFIG_KEXEC_CORE
void crash_smp_send_stop(void)
{
- static int cpus_stopped;
- cpumask_t mask;
- unsigned long timeout;
-
/*
* This function can be called twice in panic path, but obviously
* we execute this only once.
+ *
+ * We use this same boolean to tell whether the IPI we send was a
+ * stop or a "crash stop".
*/
- if (cpus_stopped)
+ if (crash_stop)
return;
+ crash_stop = 1;
- cpus_stopped = 1;
+ smp_send_stop();
- /*
- * If this cpu is the only one alive at this point in time, online or
- * not, there are no stop messages to be sent around, so just back out.
- */
- if (num_other_online_cpus() == 0)
- goto skip_ipi;
-
- cpumask_copy(&mask, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), &mask);
-
- atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
-
- pr_crit("SMP: stopping secondary CPUs\n");
- smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
-
- /* Wait up to one second for other CPUs to stop */
- timeout = USEC_PER_SEC;
- while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
- udelay(1);
-
- if (atomic_read(&waiting_for_crash_ipi) > 0)
- pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
- cpumask_pr_args(&mask));
-
-skip_ipi:
- sdei_mask_local_cpu();
sdei_handler_abort();
}
bool smp_crash_stop_failed(void)
{
- return (atomic_read(&waiting_for_crash_ipi) > 0);
+ return num_other_online_cpus() != 0;
}
#endif
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 9e22683aa921..563cbce11126 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -273,6 +273,12 @@ void arm64_force_sig_fault(int signo, int code, unsigned long far,
force_sig_fault(signo, code, (void __user *)far);
}
+void arm64_force_sig_fault_pkey(unsigned long far, const char *str, int pkey)
+{
+ arm64_show_signal(SIGSEGV, str);
+ force_sig_pkuerr((void __user *)far, pkey);
+}
+
void arm64_force_sig_mceerr(int code, unsigned long far, short lsb,
const char *str)
{
@@ -601,18 +607,26 @@ static void ctr_read_handler(unsigned long esr, struct pt_regs *regs)
static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs)
{
- int rt = ESR_ELx_SYS64_ISS_RT(esr);
+ if (test_thread_flag(TIF_TSC_SIGSEGV)) {
+ force_sig(SIGSEGV);
+ } else {
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
- pt_regs_write_reg(regs, rt, arch_timer_read_counter());
- arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ pt_regs_write_reg(regs, rt, arch_timer_read_counter());
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ }
}
static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs)
{
- int rt = ESR_ELx_SYS64_ISS_RT(esr);
+ if (test_thread_flag(TIF_TSC_SIGSEGV)) {
+ force_sig(SIGSEGV);
+ } else {
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
- pt_regs_write_reg(regs, rt, arch_timer_get_rate());
- arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ pt_regs_write_reg(regs, rt, arch_timer_get_rate());
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ }
}
static void mrs_handler(unsigned long esr, struct pt_regs *regs)
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 89b6e7840002..706c9c3a7a50 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -34,12 +34,6 @@ enum vdso_abi {
VDSO_ABI_AA32,
};
-enum vvar_pages {
- VVAR_DATA_PAGE_OFFSET,
- VVAR_TIMENS_PAGE_OFFSET,
- VVAR_NR_PAGES,
-};
-
struct vdso_abi_info {
const char *name;
const char *vdso_code_start;
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index d11da6461278..35685c036044 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -9,7 +9,7 @@
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile
-obj-vdso := vgettimeofday.o note.o sigreturn.o
+obj-vdso := vgettimeofday.o note.o sigreturn.o vgetrandom.o vgetrandom-chacha.o
# Build rules
targets := $(obj-vdso) vdso.so vdso.so.dbg
@@ -34,19 +34,28 @@ ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
# -Wmissing-prototypes and -Wmissing-declarations are removed from
-# the CFLAGS of vgettimeofday.c to make possible to build the
-# kernel with CONFIG_WERROR enabled.
-CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) \
- $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) \
- $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) \
- -Wmissing-prototypes -Wmissing-declarations
+# the CFLAGS to make possible to build the kernel with CONFIG_WERROR enabled.
+CC_FLAGS_REMOVE_VDSO := $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) \
+ $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) \
+ $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) \
+ -Wmissing-prototypes -Wmissing-declarations
-CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -fasynchronous-unwind-tables
+CC_FLAGS_ADD_VDSO := -O2 -mcmodel=tiny -fasynchronous-unwind-tables
+
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_REMOVE_VDSO)
+CFLAGS_REMOVE_vgetrandom.o = $(CC_FLAGS_REMOVE_VDSO)
+
+CFLAGS_vgettimeofday.o = $(CC_FLAGS_ADD_VDSO)
+CFLAGS_vgetrandom.o = $(CC_FLAGS_ADD_VDSO)
ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
endif
+ifneq ($(c-getrandom-y),)
+ CFLAGS_vgetrandom.o += -include $(c-getrandom-y)
+endif
+
targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index 45354f2ddf70..f204a9ddc833 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -11,7 +11,9 @@
#include <linux/const.h>
#include <asm/page.h>
#include <asm/vdso.h>
+#include <asm/vdso/vsyscall.h>
#include <asm-generic/vmlinux.lds.h>
+#include <vdso/datapage.h>
OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
@@ -19,6 +21,7 @@ OUTPUT_ARCH(aarch64)
SECTIONS
{
PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
+ PROVIDE(_vdso_rng_data = _vdso_data + __VDSO_RND_DATA_OFFSET);
#ifdef CONFIG_TIME_NS
PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
#endif
@@ -102,6 +105,7 @@ VERSION
__kernel_gettimeofday;
__kernel_clock_gettime;
__kernel_clock_getres;
+ __kernel_getrandom;
local: *;
};
}
diff --git a/arch/arm64/kernel/vdso/vgetrandom-chacha.S b/arch/arm64/kernel/vdso/vgetrandom-chacha.S
new file mode 100644
index 000000000000..67890b445309
--- /dev/null
+++ b/arch/arm64/kernel/vdso/vgetrandom-chacha.S
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/linkage.h>
+#include <asm/cache.h>
+#include <asm/assembler.h>
+
+ .text
+
+#define state0 v0
+#define state1 v1
+#define state2 v2
+#define state3 v3
+#define copy0 v4
+#define copy0_q q4
+#define copy1 v5
+#define copy2 v6
+#define copy3 v7
+#define copy3_d d7
+#define one_d d16
+#define one_q q16
+#define one_v v16
+#define tmp v17
+#define rot8 v18
+
+/*
+ * ARM64 ChaCha20 implementation meant for vDSO. Produces a given positive
+ * number of blocks of output with nonce 0, taking an input key and 8-bytes
+ * counter. Importantly does not spill to the stack.
+ *
+ * This implementation avoids d8-d15 because they are callee-save in user
+ * space.
+ *
+ * void __arch_chacha20_blocks_nostack(uint8_t *dst_bytes,
+ * const uint8_t *key,
+ * uint32_t *counter,
+ * size_t nblocks)
+ *
+ * x0: output bytes
+ * x1: 32-byte key input
+ * x2: 8-byte counter input/output
+ * x3: number of 64-byte block to write to output
+ */
+SYM_FUNC_START(__arch_chacha20_blocks_nostack)
+
+ /* copy0 = "expand 32-byte k" */
+ mov_q x8, 0x3320646e61707865
+ mov_q x9, 0x6b20657479622d32
+ mov copy0.d[0], x8
+ mov copy0.d[1], x9
+
+ /* copy1,copy2 = key */
+ ld1 { copy1.4s, copy2.4s }, [x1]
+ /* copy3 = counter || zero nonce */
+ ld1 { copy3.2s }, [x2]
+
+ movi one_v.2s, #1
+ uzp1 one_v.4s, one_v.4s, one_v.4s
+
+.Lblock:
+ /* copy state to auxiliary vectors for the final add after the permute. */
+ mov state0.16b, copy0.16b
+ mov state1.16b, copy1.16b
+ mov state2.16b, copy2.16b
+ mov state3.16b, copy3.16b
+
+ mov w4, 20
+.Lpermute:
+ /*
+ * Permute one 64-byte block where the state matrix is stored in the four NEON
+ * registers state0-state3. It performs matrix operations on four words in parallel,
+ * but requires shuffling to rearrange the words after each round.
+ */
+
+.Ldoubleround:
+ /* state0 += state1, state3 = rotl32(state3 ^ state0, 16) */
+ add state0.4s, state0.4s, state1.4s
+ eor state3.16b, state3.16b, state0.16b
+ rev32 state3.8h, state3.8h
+
+ /* state2 += state3, state1 = rotl32(state1 ^ state2, 12) */
+ add state2.4s, state2.4s, state3.4s
+ eor tmp.16b, state1.16b, state2.16b
+ shl state1.4s, tmp.4s, #12
+ sri state1.4s, tmp.4s, #20
+
+ /* state0 += state1, state3 = rotl32(state3 ^ state0, 8) */
+ add state0.4s, state0.4s, state1.4s
+ eor tmp.16b, state3.16b, state0.16b
+ shl state3.4s, tmp.4s, #8
+ sri state3.4s, tmp.4s, #24
+
+ /* state2 += state3, state1 = rotl32(state1 ^ state2, 7) */
+ add state2.4s, state2.4s, state3.4s
+ eor tmp.16b, state1.16b, state2.16b
+ shl state1.4s, tmp.4s, #7
+ sri state1.4s, tmp.4s, #25
+
+ /* state1[0,1,2,3] = state1[1,2,3,0] */
+ ext state1.16b, state1.16b, state1.16b, #4
+ /* state2[0,1,2,3] = state2[2,3,0,1] */
+ ext state2.16b, state2.16b, state2.16b, #8
+ /* state3[0,1,2,3] = state3[1,2,3,0] */
+ ext state3.16b, state3.16b, state3.16b, #12
+
+ /* state0 += state1, state3 = rotl32(state3 ^ state0, 16) */
+ add state0.4s, state0.4s, state1.4s
+ eor state3.16b, state3.16b, state0.16b
+ rev32 state3.8h, state3.8h
+
+ /* state2 += state3, state1 = rotl32(state1 ^ state2, 12) */
+ add state2.4s, state2.4s, state3.4s
+ eor tmp.16b, state1.16b, state2.16b
+ shl state1.4s, tmp.4s, #12
+ sri state1.4s, tmp.4s, #20
+
+ /* state0 += state1, state3 = rotl32(state3 ^ state0, 8) */
+ add state0.4s, state0.4s, state1.4s
+ eor tmp.16b, state3.16b, state0.16b
+ shl state3.4s, tmp.4s, #8
+ sri state3.4s, tmp.4s, #24
+
+ /* state2 += state3, state1 = rotl32(state1 ^ state2, 7) */
+ add state2.4s, state2.4s, state3.4s
+ eor tmp.16b, state1.16b, state2.16b
+ shl state1.4s, tmp.4s, #7
+ sri state1.4s, tmp.4s, #25
+
+ /* state1[0,1,2,3] = state1[3,0,1,2] */
+ ext state1.16b, state1.16b, state1.16b, #12
+ /* state2[0,1,2,3] = state2[2,3,0,1] */
+ ext state2.16b, state2.16b, state2.16b, #8
+ /* state3[0,1,2,3] = state3[1,2,3,0] */
+ ext state3.16b, state3.16b, state3.16b, #4
+
+ subs w4, w4, #2
+ b.ne .Ldoubleround
+
+ /* output0 = state0 + state0 */
+ add state0.4s, state0.4s, copy0.4s
+ /* output1 = state1 + state1 */
+ add state1.4s, state1.4s, copy1.4s
+ /* output2 = state2 + state2 */
+ add state2.4s, state2.4s, copy2.4s
+ /* output2 = state3 + state3 */
+ add state3.4s, state3.4s, copy3.4s
+ st1 { state0.16b - state3.16b }, [x0]
+
+ /*
+ * ++copy3.counter, the 'add' clears the upper half of the SIMD register
+ * which is the expected behaviour here.
+ */
+ add copy3_d, copy3_d, one_d
+
+ /* output += 64, --nblocks */
+ add x0, x0, 64
+ subs x3, x3, #1
+ b.ne .Lblock
+
+ /* counter = copy3.counter */
+ st1 { copy3.2s }, [x2]
+
+ /* Zero out the potentially sensitive regs, in case nothing uses these again. */
+ movi state0.16b, #0
+ movi state1.16b, #0
+ movi state2.16b, #0
+ movi state3.16b, #0
+ movi copy1.16b, #0
+ movi copy2.16b, #0
+ ret
+SYM_FUNC_END(__arch_chacha20_blocks_nostack)
+
+emit_aarch64_feature_1_and
diff --git a/arch/arm64/kernel/vdso/vgetrandom.c b/arch/arm64/kernel/vdso/vgetrandom.c
new file mode 100644
index 000000000000..832fe195292b
--- /dev/null
+++ b/arch/arm64/kernel/vdso/vgetrandom.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <uapi/asm-generic/errno.h>
+
+typeof(__cvdso_getrandom) __kernel_getrandom;
+
+ssize_t __kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len)
+{
+ if (alternative_has_cap_likely(ARM64_HAS_FPSIMD))
+ return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len);
+
+ if (unlikely(opaque_len == ~0UL && !buffer && !len && !flags))
+ return -ENOSYS;
+ return getrandom_syscall(buffer, len, flags);
+}
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 8304eb342be9..ead632ad01b4 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -66,4 +66,21 @@ config PROTECTED_NVHE_STACKTRACE
If unsure, or not using protected nVHE (pKVM), say N.
+config PTDUMP_STAGE2_DEBUGFS
+ bool "Present the stage-2 pagetables to debugfs"
+ depends on KVM
+ depends on DEBUG_KERNEL
+ depends on DEBUG_FS
+ depends on GENERIC_PTDUMP
+ select PTDUMP_CORE
+ default n
+ help
+ Say Y here if you want to show the stage-2 kernel pagetables
+ layout in a debugfs file. This information is only useful for kernel developers
+ who are working in architecture specific areas of the kernel.
+ It is probably not a good idea to enable this feature in a production
+ kernel.
+
+ If in doubt, say N.
+
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 86a629aaf0a1..3cf7adb2b503 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -17,7 +17,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
inject_fault.o va_layout.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o stacktrace.o \
vgic-sys-reg-v3.o fpsimd.o pkvm.o \
- arch_timer.o trng.o vmid.o emulate-nested.o nested.o \
+ arch_timer.o trng.o vmid.o emulate-nested.o nested.o at.o \
vgic/vgic.o vgic/vgic-init.o \
vgic/vgic-irqfd.o vgic/vgic-v2.o \
vgic/vgic-v3.o vgic/vgic-v4.o \
@@ -27,6 +27,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
kvm-$(CONFIG_ARM64_PTR_AUTH) += pauth.o
+kvm-$(CONFIG_PTDUMP_STAGE2_DEBUGFS) += ptdump.o
always-y := hyp_constants.h hyp-constants.s
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 9bef7638342e..fe0764173cd0 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -46,6 +46,8 @@
#include <kvm/arm_pmu.h>
#include <kvm/arm_psci.h>
+#include "sys_regs.h"
+
static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
enum kvm_wfx_trap_policy {
@@ -228,6 +230,7 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
void kvm_arch_create_vm_debugfs(struct kvm *kvm)
{
kvm_sys_regs_create_debugfs(kvm);
+ kvm_s2_ptdump_create_debugfs(kvm);
}
static void kvm_destroy_mpidr_data(struct kvm *kvm)
@@ -821,15 +824,13 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
return ret;
}
- if (vcpu_has_nv(vcpu)) {
- ret = kvm_init_nv_sysregs(vcpu->kvm);
- if (ret)
- return ret;
- }
+ ret = kvm_finalize_sys_regs(vcpu);
+ if (ret)
+ return ret;
/*
- * This needs to happen after NV has imposed its own restrictions on
- * the feature set
+ * This needs to happen after any restriction has been applied
+ * to the feature set.
*/
kvm_calculate_traps(vcpu);
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
new file mode 100644
index 000000000000..39f0e87a340e
--- /dev/null
+++ b/arch/arm64/kvm/at.c
@@ -0,0 +1,1101 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 - Linaro Ltd
+ * Author: Jintack Lim <jintack.lim@linaro.org>
+ */
+
+#include <linux/kvm_host.h>
+
+#include <asm/esr.h>
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
+
+enum trans_regime {
+ TR_EL10,
+ TR_EL20,
+ TR_EL2,
+};
+
+struct s1_walk_info {
+ u64 baddr;
+ enum trans_regime regime;
+ unsigned int max_oa_bits;
+ unsigned int pgshift;
+ unsigned int txsz;
+ int sl;
+ bool hpd;
+ bool be;
+ bool s2;
+};
+
+struct s1_walk_result {
+ union {
+ struct {
+ u64 desc;
+ u64 pa;
+ s8 level;
+ u8 APTable;
+ bool UXNTable;
+ bool PXNTable;
+ };
+ struct {
+ u8 fst;
+ bool ptw;
+ bool s2;
+ };
+ };
+ bool failed;
+};
+
+static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool ptw, bool s2)
+{
+ wr->fst = fst;
+ wr->ptw = ptw;
+ wr->s2 = s2;
+ wr->failed = true;
+}
+
+#define S1_MMU_DISABLED (-127)
+
+static int get_ia_size(struct s1_walk_info *wi)
+{
+ return 64 - wi->txsz;
+}
+
+/* Return true if the IPA is out of the OA range */
+static bool check_output_size(u64 ipa, struct s1_walk_info *wi)
+{
+ return wi->max_oa_bits < 48 && (ipa & GENMASK_ULL(47, wi->max_oa_bits));
+}
+
+/* Return the translation regime that applies to an AT instruction */
+static enum trans_regime compute_translation_regime(struct kvm_vcpu *vcpu, u32 op)
+{
+ /*
+ * We only get here from guest EL2, so the translation
+ * regime AT applies to is solely defined by {E2H,TGE}.
+ */
+ switch (op) {
+ case OP_AT_S1E2R:
+ case OP_AT_S1E2W:
+ case OP_AT_S1E2A:
+ return vcpu_el2_e2h_is_set(vcpu) ? TR_EL20 : TR_EL2;
+ break;
+ default:
+ return (vcpu_el2_e2h_is_set(vcpu) &&
+ vcpu_el2_tge_is_set(vcpu)) ? TR_EL20 : TR_EL10;
+ }
+}
+
+static int setup_s1_walk(struct kvm_vcpu *vcpu, u32 op, struct s1_walk_info *wi,
+ struct s1_walk_result *wr, u64 va)
+{
+ u64 hcr, sctlr, tcr, tg, ps, ia_bits, ttbr;
+ unsigned int stride, x;
+ bool va55, tbi, lva, as_el0;
+
+ hcr = __vcpu_sys_reg(vcpu, HCR_EL2);
+
+ wi->regime = compute_translation_regime(vcpu, op);
+ as_el0 = (op == OP_AT_S1E0R || op == OP_AT_S1E0W);
+
+ va55 = va & BIT(55);
+
+ if (wi->regime == TR_EL2 && va55)
+ goto addrsz;
+
+ wi->s2 = wi->regime == TR_EL10 && (hcr & (HCR_VM | HCR_DC));
+
+ switch (wi->regime) {
+ case TR_EL10:
+ sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
+ tcr = vcpu_read_sys_reg(vcpu, TCR_EL1);
+ ttbr = (va55 ?
+ vcpu_read_sys_reg(vcpu, TTBR1_EL1) :
+ vcpu_read_sys_reg(vcpu, TTBR0_EL1));
+ break;
+ case TR_EL2:
+ case TR_EL20:
+ sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL2);
+ tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
+ ttbr = (va55 ?
+ vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
+ vcpu_read_sys_reg(vcpu, TTBR0_EL2));
+ break;
+ default:
+ BUG();
+ }
+
+ tbi = (wi->regime == TR_EL2 ?
+ FIELD_GET(TCR_EL2_TBI, tcr) :
+ (va55 ?
+ FIELD_GET(TCR_TBI1, tcr) :
+ FIELD_GET(TCR_TBI0, tcr)));
+
+ if (!tbi && (u64)sign_extend64(va, 55) != va)
+ goto addrsz;
+
+ va = (u64)sign_extend64(va, 55);
+
+ /* Let's put the MMU disabled case aside immediately */
+ switch (wi->regime) {
+ case TR_EL10:
+ /*
+ * If dealing with the EL1&0 translation regime, 3 things
+ * can disable the S1 translation:
+ *
+ * - HCR_EL2.DC = 1
+ * - HCR_EL2.{E2H,TGE} = {0,1}
+ * - SCTLR_EL1.M = 0
+ *
+ * The TGE part is interesting. If we have decided that this
+ * is EL1&0, then it means that either {E2H,TGE} == {1,0} or
+ * {0,x}, and we only need to test for TGE == 1.
+ */
+ if (hcr & (HCR_DC | HCR_TGE)) {
+ wr->level = S1_MMU_DISABLED;
+ break;
+ }
+ fallthrough;
+ case TR_EL2:
+ case TR_EL20:
+ if (!(sctlr & SCTLR_ELx_M))
+ wr->level = S1_MMU_DISABLED;
+ break;
+ }
+
+ if (wr->level == S1_MMU_DISABLED) {
+ if (va >= BIT(kvm_get_pa_bits(vcpu->kvm)))
+ goto addrsz;
+
+ wr->pa = va;
+ return 0;
+ }
+
+ wi->be = sctlr & SCTLR_ELx_EE;
+
+ wi->hpd = kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, HPDS, IMP);
+ wi->hpd &= (wi->regime == TR_EL2 ?
+ FIELD_GET(TCR_EL2_HPD, tcr) :
+ (va55 ?
+ FIELD_GET(TCR_HPD1, tcr) :
+ FIELD_GET(TCR_HPD0, tcr)));
+
+ /* Someone was silly enough to encode TG0/TG1 differently */
+ if (va55) {
+ wi->txsz = FIELD_GET(TCR_T1SZ_MASK, tcr);
+ tg = FIELD_GET(TCR_TG1_MASK, tcr);
+
+ switch (tg << TCR_TG1_SHIFT) {
+ case TCR_TG1_4K:
+ wi->pgshift = 12; break;
+ case TCR_TG1_16K:
+ wi->pgshift = 14; break;
+ case TCR_TG1_64K:
+ default: /* IMPDEF: treat any other value as 64k */
+ wi->pgshift = 16; break;
+ }
+ } else {
+ wi->txsz = FIELD_GET(TCR_T0SZ_MASK, tcr);
+ tg = FIELD_GET(TCR_TG0_MASK, tcr);
+
+ switch (tg << TCR_TG0_SHIFT) {
+ case TCR_TG0_4K:
+ wi->pgshift = 12; break;
+ case TCR_TG0_16K:
+ wi->pgshift = 14; break;
+ case TCR_TG0_64K:
+ default: /* IMPDEF: treat any other value as 64k */
+ wi->pgshift = 16; break;
+ }
+ }
+
+ /* R_PLCGL, R_YXNYW */
+ if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR2_EL1, ST, 48_47)) {
+ if (wi->txsz > 39)
+ goto transfault_l0;
+ } else {
+ if (wi->txsz > 48 || (BIT(wi->pgshift) == SZ_64K && wi->txsz > 47))
+ goto transfault_l0;
+ }
+
+ /* R_GTJBY, R_SXWGM */
+ switch (BIT(wi->pgshift)) {
+ case SZ_4K:
+ lva = kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, TGRAN4, 52_BIT);
+ lva &= tcr & (wi->regime == TR_EL2 ? TCR_EL2_DS : TCR_DS);
+ break;
+ case SZ_16K:
+ lva = kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, TGRAN16, 52_BIT);
+ lva &= tcr & (wi->regime == TR_EL2 ? TCR_EL2_DS : TCR_DS);
+ break;
+ case SZ_64K:
+ lva = kvm_has_feat(vcpu->kvm, ID_AA64MMFR2_EL1, VARange, 52);
+ break;
+ }
+
+ if ((lva && wi->txsz < 12) || (!lva && wi->txsz < 16))
+ goto transfault_l0;
+
+ ia_bits = get_ia_size(wi);
+
+ /* R_YYVYV, I_THCZK */
+ if ((!va55 && va > GENMASK(ia_bits - 1, 0)) ||
+ (va55 && va < GENMASK(63, ia_bits)))
+ goto transfault_l0;
+
+ /* I_ZFSYQ */
+ if (wi->regime != TR_EL2 &&
+ (tcr & (va55 ? TCR_EPD1_MASK : TCR_EPD0_MASK)))
+ goto transfault_l0;
+
+ /* R_BNDVG and following statements */
+ if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR2_EL1, E0PD, IMP) &&
+ as_el0 && (tcr & (va55 ? TCR_E0PD1 : TCR_E0PD0)))
+ goto transfault_l0;
+
+ /* AArch64.S1StartLevel() */
+ stride = wi->pgshift - 3;
+ wi->sl = 3 - (((ia_bits - 1) - wi->pgshift) / stride);
+
+ ps = (wi->regime == TR_EL2 ?
+ FIELD_GET(TCR_EL2_PS_MASK, tcr) : FIELD_GET(TCR_IPS_MASK, tcr));
+
+ wi->max_oa_bits = min(get_kvm_ipa_limit(), ps_to_output_size(ps));
+
+ /* Compute minimal alignment */
+ x = 3 + ia_bits - ((3 - wi->sl) * stride + wi->pgshift);
+
+ wi->baddr = ttbr & TTBRx_EL1_BADDR;
+
+ /* R_VPBBF */
+ if (check_output_size(wi->baddr, wi))
+ goto addrsz;
+
+ wi->baddr &= GENMASK_ULL(wi->max_oa_bits - 1, x);
+
+ return 0;
+
+addrsz: /* Address Size Fault level 0 */
+ fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(0), false, false);
+ return -EFAULT;
+
+transfault_l0: /* Translation Fault level 0 */
+ fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(0), false, false);
+ return -EFAULT;
+}
+
+static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
+ struct s1_walk_result *wr, u64 va)
+{
+ u64 va_top, va_bottom, baddr, desc;
+ int level, stride, ret;
+
+ level = wi->sl;
+ stride = wi->pgshift - 3;
+ baddr = wi->baddr;
+
+ va_top = get_ia_size(wi) - 1;
+
+ while (1) {
+ u64 index, ipa;
+
+ va_bottom = (3 - level) * stride + wi->pgshift;
+ index = (va & GENMASK_ULL(va_top, va_bottom)) >> (va_bottom - 3);
+
+ ipa = baddr | index;
+
+ if (wi->s2) {
+ struct kvm_s2_trans s2_trans = {};
+
+ ret = kvm_walk_nested_s2(vcpu, ipa, &s2_trans);
+ if (ret) {
+ fail_s1_walk(wr,
+ (s2_trans.esr & ~ESR_ELx_FSC_LEVEL) | level,
+ true, true);
+ return ret;
+ }
+
+ if (!kvm_s2_trans_readable(&s2_trans)) {
+ fail_s1_walk(wr, ESR_ELx_FSC_PERM_L(level),
+ true, true);
+
+ return -EPERM;
+ }
+
+ ipa = kvm_s2_trans_output(&s2_trans);
+ }
+
+ ret = kvm_read_guest(vcpu->kvm, ipa, &desc, sizeof(desc));
+ if (ret) {
+ fail_s1_walk(wr, ESR_ELx_FSC_SEA_TTW(level),
+ true, false);
+ return ret;
+ }
+
+ if (wi->be)
+ desc = be64_to_cpu((__force __be64)desc);
+ else
+ desc = le64_to_cpu((__force __le64)desc);
+
+ /* Invalid descriptor */
+ if (!(desc & BIT(0)))
+ goto transfault;
+
+ /* Block mapping, check validity down the line */
+ if (!(desc & BIT(1)))
+ break;
+
+ /* Page mapping */
+ if (level == 3)
+ break;
+
+ /* Table handling */
+ if (!wi->hpd) {
+ wr->APTable |= FIELD_GET(S1_TABLE_AP, desc);
+ wr->UXNTable |= FIELD_GET(PMD_TABLE_UXN, desc);
+ wr->PXNTable |= FIELD_GET(PMD_TABLE_PXN, desc);
+ }
+
+ baddr = desc & GENMASK_ULL(47, wi->pgshift);
+
+ /* Check for out-of-range OA */
+ if (check_output_size(baddr, wi))
+ goto addrsz;
+
+ /* Prepare for next round */
+ va_top = va_bottom - 1;
+ level++;
+ }
+
+ /* Block mapping, check the validity of the level */
+ if (!(desc & BIT(1))) {
+ bool valid_block = false;
+
+ switch (BIT(wi->pgshift)) {
+ case SZ_4K:
+ valid_block = level == 1 || level == 2;
+ break;
+ case SZ_16K:
+ case SZ_64K:
+ valid_block = level == 2;
+ break;
+ }
+
+ if (!valid_block)
+ goto transfault;
+ }
+
+ if (check_output_size(desc & GENMASK(47, va_bottom), wi))
+ goto addrsz;
+
+ va_bottom += contiguous_bit_shift(desc, wi, level);
+
+ wr->failed = false;
+ wr->level = level;
+ wr->desc = desc;
+ wr->pa = desc & GENMASK(47, va_bottom);
+ wr->pa |= va & GENMASK_ULL(va_bottom - 1, 0);
+
+ return 0;
+
+addrsz:
+ fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(level), true, false);
+ return -EINVAL;
+transfault:
+ fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(level), true, false);
+ return -ENOENT;
+}
+
+struct mmu_config {
+ u64 ttbr0;
+ u64 ttbr1;
+ u64 tcr;
+ u64 mair;
+ u64 sctlr;
+ u64 vttbr;
+ u64 vtcr;
+ u64 hcr;
+};
+
+static void __mmu_config_save(struct mmu_config *config)
+{
+ config->ttbr0 = read_sysreg_el1(SYS_TTBR0);
+ config->ttbr1 = read_sysreg_el1(SYS_TTBR1);
+ config->tcr = read_sysreg_el1(SYS_TCR);
+ config->mair = read_sysreg_el1(SYS_MAIR);
+ config->sctlr = read_sysreg_el1(SYS_SCTLR);
+ config->vttbr = read_sysreg(vttbr_el2);
+ config->vtcr = read_sysreg(vtcr_el2);
+ config->hcr = read_sysreg(hcr_el2);
+}
+
+static void __mmu_config_restore(struct mmu_config *config)
+{
+ write_sysreg(config->hcr, hcr_el2);
+
+ /*
+ * ARM errata 1165522 and 1530923 require TGE to be 1 before
+ * we update the guest state.
+ */
+ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
+
+ write_sysreg_el1(config->ttbr0, SYS_TTBR0);
+ write_sysreg_el1(config->ttbr1, SYS_TTBR1);
+ write_sysreg_el1(config->tcr, SYS_TCR);
+ write_sysreg_el1(config->mair, SYS_MAIR);
+ write_sysreg_el1(config->sctlr, SYS_SCTLR);
+ write_sysreg(config->vttbr, vttbr_el2);
+ write_sysreg(config->vtcr, vtcr_el2);
+}
+
+static bool at_s1e1p_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
+{
+ u64 host_pan;
+ bool fail;
+
+ host_pan = read_sysreg_s(SYS_PSTATE_PAN);
+ write_sysreg_s(*vcpu_cpsr(vcpu) & PSTATE_PAN, SYS_PSTATE_PAN);
+
+ switch (op) {
+ case OP_AT_S1E1RP:
+ fail = __kvm_at(OP_AT_S1E1RP, vaddr);
+ break;
+ case OP_AT_S1E1WP:
+ fail = __kvm_at(OP_AT_S1E1WP, vaddr);
+ break;
+ }
+
+ write_sysreg_s(host_pan, SYS_PSTATE_PAN);
+
+ return fail;
+}
+
+#define MEMATTR(ic, oc) (MEMATTR_##oc << 4 | MEMATTR_##ic)
+#define MEMATTR_NC 0b0100
+#define MEMATTR_Wt 0b1000
+#define MEMATTR_Wb 0b1100
+#define MEMATTR_WbRaWa 0b1111
+
+#define MEMATTR_IS_DEVICE(m) (((m) & GENMASK(7, 4)) == 0)
+
+static u8 s2_memattr_to_attr(u8 memattr)
+{
+ memattr &= 0b1111;
+
+ switch (memattr) {
+ case 0b0000:
+ case 0b0001:
+ case 0b0010:
+ case 0b0011:
+ return memattr << 2;
+ case 0b0100:
+ return MEMATTR(Wb, Wb);
+ case 0b0101:
+ return MEMATTR(NC, NC);
+ case 0b0110:
+ return MEMATTR(Wt, NC);
+ case 0b0111:
+ return MEMATTR(Wb, NC);
+ case 0b1000:
+ /* Reserved, assume NC */
+ return MEMATTR(NC, NC);
+ case 0b1001:
+ return MEMATTR(NC, Wt);
+ case 0b1010:
+ return MEMATTR(Wt, Wt);
+ case 0b1011:
+ return MEMATTR(Wb, Wt);
+ case 0b1100:
+ /* Reserved, assume NC */
+ return MEMATTR(NC, NC);
+ case 0b1101:
+ return MEMATTR(NC, Wb);
+ case 0b1110:
+ return MEMATTR(Wt, Wb);
+ case 0b1111:
+ return MEMATTR(Wb, Wb);
+ default:
+ unreachable();
+ }
+}
+
+static u8 combine_s1_s2_attr(u8 s1, u8 s2)
+{
+ bool transient;
+ u8 final = 0;
+
+ /* Upgrade transient s1 to non-transient to simplify things */
+ switch (s1) {
+ case 0b0001 ... 0b0011: /* Normal, Write-Through Transient */
+ transient = true;
+ s1 = MEMATTR_Wt | (s1 & GENMASK(1,0));
+ break;
+ case 0b0101 ... 0b0111: /* Normal, Write-Back Transient */
+ transient = true;
+ s1 = MEMATTR_Wb | (s1 & GENMASK(1,0));
+ break;
+ default:
+ transient = false;
+ }
+
+ /* S2CombineS1AttrHints() */
+ if ((s1 & GENMASK(3, 2)) == MEMATTR_NC ||
+ (s2 & GENMASK(3, 2)) == MEMATTR_NC)
+ final = MEMATTR_NC;
+ else if ((s1 & GENMASK(3, 2)) == MEMATTR_Wt ||
+ (s2 & GENMASK(3, 2)) == MEMATTR_Wt)
+ final = MEMATTR_Wt;
+ else
+ final = MEMATTR_Wb;
+
+ if (final != MEMATTR_NC) {
+ /* Inherit RaWa hints form S1 */
+ if (transient) {
+ switch (s1 & GENMASK(3, 2)) {
+ case MEMATTR_Wt:
+ final = 0;
+ break;
+ case MEMATTR_Wb:
+ final = MEMATTR_NC;
+ break;
+ }
+ }
+
+ final |= s1 & GENMASK(1, 0);
+ }
+
+ return final;
+}
+
+#define ATTR_NSH 0b00
+#define ATTR_RSV 0b01
+#define ATTR_OSH 0b10
+#define ATTR_ISH 0b11
+
+static u8 compute_sh(u8 attr, u64 desc)
+{
+ u8 sh;
+
+ /* Any form of device, as well as NC has SH[1:0]=0b10 */
+ if (MEMATTR_IS_DEVICE(attr) || attr == MEMATTR(NC, NC))
+ return ATTR_OSH;
+
+ sh = FIELD_GET(PTE_SHARED, desc);
+ if (sh == ATTR_RSV) /* Reserved, mapped to NSH */
+ sh = ATTR_NSH;
+
+ return sh;
+}
+
+static u8 combine_sh(u8 s1_sh, u8 s2_sh)
+{
+ if (s1_sh == ATTR_OSH || s2_sh == ATTR_OSH)
+ return ATTR_OSH;
+ if (s1_sh == ATTR_ISH || s2_sh == ATTR_ISH)
+ return ATTR_ISH;
+
+ return ATTR_NSH;
+}
+
+static u64 compute_par_s12(struct kvm_vcpu *vcpu, u64 s1_par,
+ struct kvm_s2_trans *tr)
+{
+ u8 s1_parattr, s2_memattr, final_attr;
+ u64 par;
+
+ /* If S2 has failed to translate, report the damage */
+ if (tr->esr) {
+ par = SYS_PAR_EL1_RES1;
+ par |= SYS_PAR_EL1_F;
+ par |= SYS_PAR_EL1_S;
+ par |= FIELD_PREP(SYS_PAR_EL1_FST, tr->esr);
+ return par;
+ }
+
+ s1_parattr = FIELD_GET(SYS_PAR_EL1_ATTR, s1_par);
+ s2_memattr = FIELD_GET(GENMASK(5, 2), tr->desc);
+
+ if (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_FWB) {
+ if (!kvm_has_feat(vcpu->kvm, ID_AA64PFR2_EL1, MTEPERM, IMP))
+ s2_memattr &= ~BIT(3);
+
+ /* Combination of R_VRJSW and R_RHWZM */
+ switch (s2_memattr) {
+ case 0b0101:
+ if (MEMATTR_IS_DEVICE(s1_parattr))
+ final_attr = s1_parattr;
+ else
+ final_attr = MEMATTR(NC, NC);
+ break;
+ case 0b0110:
+ case 0b1110:
+ final_attr = MEMATTR(WbRaWa, WbRaWa);
+ break;
+ case 0b0111:
+ case 0b1111:
+ /* Preserve S1 attribute */
+ final_attr = s1_parattr;
+ break;
+ case 0b0100:
+ case 0b1100:
+ case 0b1101:
+ /* Reserved, do something non-silly */
+ final_attr = s1_parattr;
+ break;
+ default:
+ /* MemAttr[2]=0, Device from S2 */
+ final_attr = s2_memattr & GENMASK(1,0) << 2;
+ }
+ } else {
+ /* Combination of R_HMNDG, R_TNHFM and R_GQFSF */
+ u8 s2_parattr = s2_memattr_to_attr(s2_memattr);
+
+ if (MEMATTR_IS_DEVICE(s1_parattr) ||
+ MEMATTR_IS_DEVICE(s2_parattr)) {
+ final_attr = min(s1_parattr, s2_parattr);
+ } else {
+ /* At this stage, this is memory vs memory */
+ final_attr = combine_s1_s2_attr(s1_parattr & 0xf,
+ s2_parattr & 0xf);
+ final_attr |= combine_s1_s2_attr(s1_parattr >> 4,
+ s2_parattr >> 4) << 4;
+ }
+ }
+
+ if ((__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_CD) &&
+ !MEMATTR_IS_DEVICE(final_attr))
+ final_attr = MEMATTR(NC, NC);
+
+ par = FIELD_PREP(SYS_PAR_EL1_ATTR, final_attr);
+ par |= tr->output & GENMASK(47, 12);
+ par |= FIELD_PREP(SYS_PAR_EL1_SH,
+ combine_sh(FIELD_GET(SYS_PAR_EL1_SH, s1_par),
+ compute_sh(final_attr, tr->desc)));
+
+ return par;
+}
+
+static u64 compute_par_s1(struct kvm_vcpu *vcpu, struct s1_walk_result *wr,
+ enum trans_regime regime)
+{
+ u64 par;
+
+ if (wr->failed) {
+ par = SYS_PAR_EL1_RES1;
+ par |= SYS_PAR_EL1_F;
+ par |= FIELD_PREP(SYS_PAR_EL1_FST, wr->fst);
+ par |= wr->ptw ? SYS_PAR_EL1_PTW : 0;
+ par |= wr->s2 ? SYS_PAR_EL1_S : 0;
+ } else if (wr->level == S1_MMU_DISABLED) {
+ /* MMU off or HCR_EL2.DC == 1 */
+ par = SYS_PAR_EL1_NSE;
+ par |= wr->pa & GENMASK_ULL(47, 12);
+
+ if (regime == TR_EL10 &&
+ (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_DC)) {
+ par |= FIELD_PREP(SYS_PAR_EL1_ATTR,
+ MEMATTR(WbRaWa, WbRaWa));
+ par |= FIELD_PREP(SYS_PAR_EL1_SH, ATTR_NSH);
+ } else {
+ par |= FIELD_PREP(SYS_PAR_EL1_ATTR, 0); /* nGnRnE */
+ par |= FIELD_PREP(SYS_PAR_EL1_SH, ATTR_OSH);
+ }
+ } else {
+ u64 mair, sctlr;
+ u8 sh;
+
+ par = SYS_PAR_EL1_NSE;
+
+ mair = (regime == TR_EL10 ?
+ vcpu_read_sys_reg(vcpu, MAIR_EL1) :
+ vcpu_read_sys_reg(vcpu, MAIR_EL2));
+
+ mair >>= FIELD_GET(PTE_ATTRINDX_MASK, wr->desc) * 8;
+ mair &= 0xff;
+
+ sctlr = (regime == TR_EL10 ?
+ vcpu_read_sys_reg(vcpu, SCTLR_EL1) :
+ vcpu_read_sys_reg(vcpu, SCTLR_EL2));
+
+ /* Force NC for memory if SCTLR_ELx.C is clear */
+ if (!(sctlr & SCTLR_EL1_C) && !MEMATTR_IS_DEVICE(mair))
+ mair = MEMATTR(NC, NC);
+
+ par |= FIELD_PREP(SYS_PAR_EL1_ATTR, mair);
+ par |= wr->pa & GENMASK_ULL(47, 12);
+
+ sh = compute_sh(mair, wr->desc);
+ par |= FIELD_PREP(SYS_PAR_EL1_SH, sh);
+ }
+
+ return par;
+}
+
+static bool pan3_enabled(struct kvm_vcpu *vcpu, enum trans_regime regime)
+{
+ u64 sctlr;
+
+ if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, PAN, PAN3))
+ return false;
+
+ if (regime == TR_EL10)
+ sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
+ else
+ sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL2);
+
+ return sctlr & SCTLR_EL1_EPAN;
+}
+
+static u64 handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
+{
+ bool perm_fail, ur, uw, ux, pr, pw, px;
+ struct s1_walk_result wr = {};
+ struct s1_walk_info wi = {};
+ int ret, idx;
+
+ ret = setup_s1_walk(vcpu, op, &wi, &wr, vaddr);
+ if (ret)
+ goto compute_par;
+
+ if (wr.level == S1_MMU_DISABLED)
+ goto compute_par;
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ ret = walk_s1(vcpu, &wi, &wr, vaddr);
+
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
+ if (ret)
+ goto compute_par;
+
+ /* FIXME: revisit when adding indirect permission support */
+ /* AArch64.S1DirectBasePermissions() */
+ if (wi.regime != TR_EL2) {
+ switch (FIELD_GET(PTE_USER | PTE_RDONLY, wr.desc)) {
+ case 0b00:
+ pr = pw = true;
+ ur = uw = false;
+ break;
+ case 0b01:
+ pr = pw = ur = uw = true;
+ break;
+ case 0b10:
+ pr = true;
+ pw = ur = uw = false;
+ break;
+ case 0b11:
+ pr = ur = true;
+ pw = uw = false;
+ break;
+ }
+
+ switch (wr.APTable) {
+ case 0b00:
+ break;
+ case 0b01:
+ ur = uw = false;
+ break;
+ case 0b10:
+ pw = uw = false;
+ break;
+ case 0b11:
+ pw = ur = uw = false;
+ break;
+ }
+
+ /* We don't use px for anything yet, but hey... */
+ px = !((wr.desc & PTE_PXN) || wr.PXNTable || uw);
+ ux = !((wr.desc & PTE_UXN) || wr.UXNTable);
+
+ if (op == OP_AT_S1E1RP || op == OP_AT_S1E1WP) {
+ bool pan;
+
+ pan = *vcpu_cpsr(vcpu) & PSR_PAN_BIT;
+ pan &= ur || uw || (pan3_enabled(vcpu, wi.regime) && ux);
+ pw &= !pan;
+ pr &= !pan;
+ }
+ } else {
+ ur = uw = ux = false;
+
+ if (!(wr.desc & PTE_RDONLY)) {
+ pr = pw = true;
+ } else {
+ pr = true;
+ pw = false;
+ }
+
+ if (wr.APTable & BIT(1))
+ pw = false;
+
+ /* XN maps to UXN */
+ px = !((wr.desc & PTE_UXN) || wr.UXNTable);
+ }
+
+ perm_fail = false;
+
+ switch (op) {
+ case OP_AT_S1E1RP:
+ case OP_AT_S1E1R:
+ case OP_AT_S1E2R:
+ perm_fail = !pr;
+ break;
+ case OP_AT_S1E1WP:
+ case OP_AT_S1E1W:
+ case OP_AT_S1E2W:
+ perm_fail = !pw;
+ break;
+ case OP_AT_S1E0R:
+ perm_fail = !ur;
+ break;
+ case OP_AT_S1E0W:
+ perm_fail = !uw;
+ break;
+ case OP_AT_S1E1A:
+ case OP_AT_S1E2A:
+ break;
+ default:
+ BUG();
+ }
+
+ if (perm_fail)
+ fail_s1_walk(&wr, ESR_ELx_FSC_PERM_L(wr.level), false, false);
+
+compute_par:
+ return compute_par_s1(vcpu, &wr, wi.regime);
+}
+
+/*
+ * Return the PAR_EL1 value as the result of a valid translation.
+ *
+ * If the translation is unsuccessful, the value may only contain
+ * PAR_EL1.F, and cannot be taken at face value. It isn't an
+ * indication of the translation having failed, only that the fast
+ * path did not succeed, *unless* it indicates a S1 permission fault.
+ */
+static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
+{
+ struct mmu_config config;
+ struct kvm_s2_mmu *mmu;
+ bool fail;
+ u64 par;
+
+ par = SYS_PAR_EL1_F;
+
+ /*
+ * We've trapped, so everything is live on the CPU. As we will
+ * be switching contexts behind everybody's back, disable
+ * interrupts while holding the mmu lock.
+ */
+ guard(write_lock_irqsave)(&vcpu->kvm->mmu_lock);
+
+ /*
+ * If HCR_EL2.{E2H,TGE} == {1,1}, the MMU context is already
+ * the right one (as we trapped from vEL2). If not, save the
+ * full MMU context.
+ */
+ if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu))
+ goto skip_mmu_switch;
+
+ /*
+ * Obtaining the S2 MMU for a L2 is horribly racy, and we may not
+ * find it (recycled by another vcpu, for example). When this
+ * happens, admit defeat immediately and use the SW (slow) path.
+ */
+ mmu = lookup_s2_mmu(vcpu);
+ if (!mmu)
+ return par;
+
+ __mmu_config_save(&config);
+
+ write_sysreg_el1(vcpu_read_sys_reg(vcpu, TTBR0_EL1), SYS_TTBR0);
+ write_sysreg_el1(vcpu_read_sys_reg(vcpu, TTBR1_EL1), SYS_TTBR1);
+ write_sysreg_el1(vcpu_read_sys_reg(vcpu, TCR_EL1), SYS_TCR);
+ write_sysreg_el1(vcpu_read_sys_reg(vcpu, MAIR_EL1), SYS_MAIR);
+ write_sysreg_el1(vcpu_read_sys_reg(vcpu, SCTLR_EL1), SYS_SCTLR);
+ __load_stage2(mmu, mmu->arch);
+
+skip_mmu_switch:
+ /* Clear TGE, enable S2 translation, we're rolling */
+ write_sysreg((config.hcr & ~HCR_TGE) | HCR_VM, hcr_el2);
+ isb();
+
+ switch (op) {
+ case OP_AT_S1E1RP:
+ case OP_AT_S1E1WP:
+ fail = at_s1e1p_fast(vcpu, op, vaddr);
+ break;
+ case OP_AT_S1E1R:
+ fail = __kvm_at(OP_AT_S1E1R, vaddr);
+ break;
+ case OP_AT_S1E1W:
+ fail = __kvm_at(OP_AT_S1E1W, vaddr);
+ break;
+ case OP_AT_S1E0R:
+ fail = __kvm_at(OP_AT_S1E0R, vaddr);
+ break;
+ case OP_AT_S1E0W:
+ fail = __kvm_at(OP_AT_S1E0W, vaddr);
+ break;
+ case OP_AT_S1E1A:
+ fail = __kvm_at(OP_AT_S1E1A, vaddr);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ fail = true;
+ break;
+ }
+
+ if (!fail)
+ par = read_sysreg_par();
+
+ if (!(vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)))
+ __mmu_config_restore(&config);
+
+ return par;
+}
+
+static bool par_check_s1_perm_fault(u64 par)
+{
+ u8 fst = FIELD_GET(SYS_PAR_EL1_FST, par);
+
+ return ((fst & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM &&
+ !(par & SYS_PAR_EL1_S));
+}
+
+void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
+{
+ u64 par = __kvm_at_s1e01_fast(vcpu, op, vaddr);
+
+ /*
+ * If PAR_EL1 reports that AT failed on a S1 permission fault, we
+ * know for sure that the PTW was able to walk the S1 tables and
+ * there's nothing else to do.
+ *
+ * If AT failed for any other reason, then we must walk the guest S1
+ * to emulate the instruction.
+ */
+ if ((par & SYS_PAR_EL1_F) && !par_check_s1_perm_fault(par))
+ par = handle_at_slow(vcpu, op, vaddr);
+
+ vcpu_write_sys_reg(vcpu, par, PAR_EL1);
+}
+
+void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
+{
+ u64 par;
+
+ /*
+ * We've trapped, so everything is live on the CPU. As we will be
+ * switching context behind everybody's back, disable interrupts...
+ */
+ scoped_guard(write_lock_irqsave, &vcpu->kvm->mmu_lock) {
+ struct kvm_s2_mmu *mmu;
+ u64 val, hcr;
+ bool fail;
+
+ mmu = &vcpu->kvm->arch.mmu;
+
+ val = hcr = read_sysreg(hcr_el2);
+ val &= ~HCR_TGE;
+ val |= HCR_VM;
+
+ if (!vcpu_el2_e2h_is_set(vcpu))
+ val |= HCR_NV | HCR_NV1;
+
+ write_sysreg(val, hcr_el2);
+ isb();
+
+ par = SYS_PAR_EL1_F;
+
+ switch (op) {
+ case OP_AT_S1E2R:
+ fail = __kvm_at(OP_AT_S1E1R, vaddr);
+ break;
+ case OP_AT_S1E2W:
+ fail = __kvm_at(OP_AT_S1E1W, vaddr);
+ break;
+ case OP_AT_S1E2A:
+ fail = __kvm_at(OP_AT_S1E1A, vaddr);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ fail = true;
+ }
+
+ isb();
+
+ if (!fail)
+ par = read_sysreg_par();
+
+ write_sysreg(hcr, hcr_el2);
+ isb();
+ }
+
+ /* We failed the translation, let's replay it in slow motion */
+ if ((par & SYS_PAR_EL1_F) && !par_check_s1_perm_fault(par))
+ par = handle_at_slow(vcpu, op, vaddr);
+
+ vcpu_write_sys_reg(vcpu, par, PAR_EL1);
+}
+
+void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
+{
+ struct kvm_s2_trans out = {};
+ u64 ipa, par;
+ bool write;
+ int ret;
+
+ /* Do the stage-1 translation */
+ switch (op) {
+ case OP_AT_S12E1R:
+ op = OP_AT_S1E1R;
+ write = false;
+ break;
+ case OP_AT_S12E1W:
+ op = OP_AT_S1E1W;
+ write = true;
+ break;
+ case OP_AT_S12E0R:
+ op = OP_AT_S1E0R;
+ write = false;
+ break;
+ case OP_AT_S12E0W:
+ op = OP_AT_S1E0W;
+ write = true;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ __kvm_at_s1e01(vcpu, op, vaddr);
+ par = vcpu_read_sys_reg(vcpu, PAR_EL1);
+ if (par & SYS_PAR_EL1_F)
+ return;
+
+ /*
+ * If we only have a single stage of translation (E2H=0 or
+ * TGE=1), exit early. Same thing if {VM,DC}=={0,0}.
+ */
+ if (!vcpu_el2_e2h_is_set(vcpu) || vcpu_el2_tge_is_set(vcpu) ||
+ !(vcpu_read_sys_reg(vcpu, HCR_EL2) & (HCR_VM | HCR_DC)))
+ return;
+
+ /* Do the stage-2 translation */
+ ipa = (par & GENMASK_ULL(47, 12)) | (vaddr & GENMASK_ULL(11, 0));
+ out.esr = 0;
+ ret = kvm_walk_nested_s2(vcpu, ipa, &out);
+ if (ret < 0)
+ return;
+
+ /* Check the access permission */
+ if (!out.esr &&
+ ((!write && !out.readable) || (write && !out.writable)))
+ out.esr = ESR_ELx_FSC_PERM_L(out.level & 0x3);
+
+ par = compute_par_s12(vcpu, par, &out);
+ vcpu_write_sys_reg(vcpu, par, PAR_EL1);
+}
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 05166eccea0a..05b6435d02a9 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -83,14 +83,20 @@ enum cgt_group_id {
CGT_CPTR_TAM,
CGT_CPTR_TCPAC,
+ CGT_HCRX_EnFPM,
CGT_HCRX_TCR2En,
+ CGT_ICH_HCR_TC,
+ CGT_ICH_HCR_TALL0,
+ CGT_ICH_HCR_TALL1,
+ CGT_ICH_HCR_TDIR,
+
/*
* Anything after this point is a combination of coarse trap
* controls, which must all be evaluated to decide what to do.
*/
__MULTIPLE_CONTROL_BITS__,
- CGT_HCR_IMO_FMO = __MULTIPLE_CONTROL_BITS__,
+ CGT_HCR_IMO_FMO_ICH_HCR_TC = __MULTIPLE_CONTROL_BITS__,
CGT_HCR_TID2_TID4,
CGT_HCR_TTLB_TTLBIS,
CGT_HCR_TTLB_TTLBOS,
@@ -105,6 +111,8 @@ enum cgt_group_id {
CGT_MDCR_TDE_TDRA,
CGT_MDCR_TDCC_TDE_TDA,
+ CGT_ICH_HCR_TC_TDIR,
+
/*
* Anything after this point requires a callback evaluating a
* complex trap condition. Ugly stuff.
@@ -372,12 +380,42 @@ static const struct trap_bits coarse_trap_bits[] = {
.mask = CPTR_EL2_TCPAC,
.behaviour = BEHAVE_FORWARD_ANY,
},
+ [CGT_HCRX_EnFPM] = {
+ .index = HCRX_EL2,
+ .value = 0,
+ .mask = HCRX_EL2_EnFPM,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
[CGT_HCRX_TCR2En] = {
.index = HCRX_EL2,
.value = 0,
.mask = HCRX_EL2_TCR2En,
.behaviour = BEHAVE_FORWARD_ANY,
},
+ [CGT_ICH_HCR_TC] = {
+ .index = ICH_HCR_EL2,
+ .value = ICH_HCR_TC,
+ .mask = ICH_HCR_TC,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_ICH_HCR_TALL0] = {
+ .index = ICH_HCR_EL2,
+ .value = ICH_HCR_TALL0,
+ .mask = ICH_HCR_TALL0,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_ICH_HCR_TALL1] = {
+ .index = ICH_HCR_EL2,
+ .value = ICH_HCR_TALL1,
+ .mask = ICH_HCR_TALL1,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_ICH_HCR_TDIR] = {
+ .index = ICH_HCR_EL2,
+ .value = ICH_HCR_TDIR,
+ .mask = ICH_HCR_TDIR,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
};
#define MCB(id, ...) \
@@ -387,7 +425,6 @@ static const struct trap_bits coarse_trap_bits[] = {
}
static const enum cgt_group_id *coarse_control_combo[] = {
- MCB(CGT_HCR_IMO_FMO, CGT_HCR_IMO, CGT_HCR_FMO),
MCB(CGT_HCR_TID2_TID4, CGT_HCR_TID2, CGT_HCR_TID4),
MCB(CGT_HCR_TTLB_TTLBIS, CGT_HCR_TTLB, CGT_HCR_TTLBIS),
MCB(CGT_HCR_TTLB_TTLBOS, CGT_HCR_TTLB, CGT_HCR_TTLBOS),
@@ -402,6 +439,9 @@ static const enum cgt_group_id *coarse_control_combo[] = {
MCB(CGT_MDCR_TDE_TDOSA, CGT_MDCR_TDE, CGT_MDCR_TDOSA),
MCB(CGT_MDCR_TDE_TDRA, CGT_MDCR_TDE, CGT_MDCR_TDRA),
MCB(CGT_MDCR_TDCC_TDE_TDA, CGT_MDCR_TDCC, CGT_MDCR_TDE, CGT_MDCR_TDA),
+
+ MCB(CGT_HCR_IMO_FMO_ICH_HCR_TC, CGT_HCR_IMO, CGT_HCR_FMO, CGT_ICH_HCR_TC),
+ MCB(CGT_ICH_HCR_TC_TDIR, CGT_ICH_HCR_TC, CGT_ICH_HCR_TDIR),
};
typedef enum trap_behaviour (*complex_condition_check)(struct kvm_vcpu *);
@@ -536,9 +576,9 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(SYS_CSSELR_EL1, CGT_HCR_TID2_TID4),
SR_RANGE_TRAP(SYS_ID_PFR0_EL1,
sys_reg(3, 0, 0, 7, 7), CGT_HCR_TID3),
- SR_TRAP(SYS_ICC_SGI0R_EL1, CGT_HCR_IMO_FMO),
- SR_TRAP(SYS_ICC_ASGI1R_EL1, CGT_HCR_IMO_FMO),
- SR_TRAP(SYS_ICC_SGI1R_EL1, CGT_HCR_IMO_FMO),
+ SR_TRAP(SYS_ICC_SGI0R_EL1, CGT_HCR_IMO_FMO_ICH_HCR_TC),
+ SR_TRAP(SYS_ICC_ASGI1R_EL1, CGT_HCR_IMO_FMO_ICH_HCR_TC),
+ SR_TRAP(SYS_ICC_SGI1R_EL1, CGT_HCR_IMO_FMO_ICH_HCR_TC),
SR_RANGE_TRAP(sys_reg(3, 0, 11, 0, 0),
sys_reg(3, 0, 11, 15, 7), CGT_HCR_TIDCP),
SR_RANGE_TRAP(sys_reg(3, 1, 11, 0, 0),
@@ -786,6 +826,7 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(OP_AT_S12E1W, CGT_HCR_NV),
SR_TRAP(OP_AT_S12E0R, CGT_HCR_NV),
SR_TRAP(OP_AT_S12E0W, CGT_HCR_NV),
+ SR_TRAP(OP_AT_S1E2A, CGT_HCR_NV),
SR_TRAP(OP_TLBI_IPAS2E1, CGT_HCR_NV),
SR_TRAP(OP_TLBI_RIPAS2E1, CGT_HCR_NV),
SR_TRAP(OP_TLBI_IPAS2LE1, CGT_HCR_NV),
@@ -867,6 +908,7 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(OP_AT_S1E0W, CGT_HCR_AT),
SR_TRAP(OP_AT_S1E1RP, CGT_HCR_AT),
SR_TRAP(OP_AT_S1E1WP, CGT_HCR_AT),
+ SR_TRAP(OP_AT_S1E1A, CGT_HCR_AT),
SR_TRAP(SYS_ERXPFGF_EL1, CGT_HCR_nFIEN),
SR_TRAP(SYS_ERXPFGCTL_EL1, CGT_HCR_nFIEN),
SR_TRAP(SYS_ERXPFGCDN_EL1, CGT_HCR_nFIEN),
@@ -1108,6 +1150,35 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(SYS_CNTP_CTL_EL0, CGT_CNTHCTL_EL1PTEN),
SR_TRAP(SYS_CNTPCT_EL0, CGT_CNTHCTL_EL1PCTEN),
SR_TRAP(SYS_CNTPCTSS_EL0, CGT_CNTHCTL_EL1PCTEN),
+ SR_TRAP(SYS_FPMR, CGT_HCRX_EnFPM),
+ /*
+ * IMPDEF choice:
+ * We treat ICC_SRE_EL2.{SRE,Enable) and ICV_SRE_EL1.SRE as
+ * RAO/WI. We therefore never consider ICC_SRE_EL2.Enable for
+ * ICC_SRE_EL1 access, and always handle it locally.
+ */
+ SR_TRAP(SYS_ICC_AP0R0_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_AP0R1_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_AP0R2_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_AP0R3_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_AP1R0_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_AP1R1_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_AP1R2_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_AP1R3_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_BPR0_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_BPR1_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_CTLR_EL1, CGT_ICH_HCR_TC),
+ SR_TRAP(SYS_ICC_DIR_EL1, CGT_ICH_HCR_TC_TDIR),
+ SR_TRAP(SYS_ICC_EOIR0_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_EOIR1_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_HPPIR0_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_HPPIR1_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_IAR0_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_IAR1_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_IGRPEN0_EL1, CGT_ICH_HCR_TALL0),
+ SR_TRAP(SYS_ICC_IGRPEN1_EL1, CGT_ICH_HCR_TALL1),
+ SR_TRAP(SYS_ICC_PMR_EL1, CGT_ICH_HCR_TC),
+ SR_TRAP(SYS_ICC_RPR_EL1, CGT_ICH_HCR_TC),
};
static DEFINE_XARRAY(sr_forward_xa);
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index c53e5b14038d..ea5484ce1f3b 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -63,6 +63,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
*/
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
*host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
+ *host_data_ptr(fpmr_ptr) = kern_hyp_va(&current->thread.uw.fpmr);
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
@@ -134,8 +135,8 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
fp_state.sve_state = vcpu->arch.sve_state;
fp_state.sve_vl = vcpu->arch.sve_max_vl;
fp_state.sme_state = NULL;
- fp_state.svcr = &vcpu->arch.svcr;
- fp_state.fpmr = &vcpu->arch.fpmr;
+ fp_state.svcr = &__vcpu_sys_reg(vcpu, SVCR);
+ fp_state.fpmr = &__vcpu_sys_reg(vcpu, FPMR);
fp_state.fp_type = &vcpu->arch.fp_type;
if (vcpu_has_sve(vcpu))
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 11098eb7eb44..962f985977c2 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -1045,6 +1045,11 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
mutex_lock(&kvm->slots_lock);
+ if (write && atomic_read(&kvm->nr_memslots_dirty_logging)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
while (length > 0) {
kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL);
void *maddr;
@@ -1059,6 +1064,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
page = pfn_to_online_page(pfn);
if (!page) {
/* Reject ZONE_DEVICE memory */
+ kvm_release_pfn_clean(pfn);
ret = -EFAULT;
goto out;
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/fault.h b/arch/arm64/kvm/hyp/include/hyp/fault.h
index 9e13c1bc2ad5..17df94570f03 100644
--- a/arch/arm64/kvm/hyp/include/hyp/fault.h
+++ b/arch/arm64/kvm/hyp/include/hyp/fault.h
@@ -14,6 +14,7 @@
static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
{
+ int ret;
u64 par, tmp;
/*
@@ -27,7 +28,9 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
* saved the guest context yet, and we may return early...
*/
par = read_sysreg_par();
- if (!__kvm_at("s1e1r", far))
+ ret = system_supports_poe() ? __kvm_at(OP_AT_S1E1A, far) :
+ __kvm_at(OP_AT_S1E1R, far);
+ if (!ret)
tmp = read_sysreg_par();
else
tmp = SYS_PAR_EL1_F; /* back to the guest */
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 37ff87d782b6..46d52e8a3df3 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -403,6 +403,9 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
else
__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
+ if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
+ write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR);
+
/* Skip restoring fpexc32 for AArch64 guests */
if (!(read_sysreg(hcr_el2) & HCR_RW))
write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index 4c0fdabaf8ae..1579a3c08a36 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -16,9 +16,15 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
+static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt);
+
static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1);
+
+ // POR_EL0 can affect uaccess, so must be saved/restored early.
+ if (ctxt_has_s1poe(ctxt))
+ ctxt_sys_reg(ctxt, POR_EL0) = read_sysreg_s(SYS_POR_EL0);
}
static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
@@ -66,6 +72,17 @@ static inline bool ctxt_has_tcrx(struct kvm_cpu_context *ctxt)
return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, TCRX, IMP);
}
+static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu;
+
+ if (!system_supports_poe())
+ return false;
+
+ vcpu = ctxt_to_vcpu(ctxt);
+ return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1POE, IMP);
+}
+
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
@@ -80,6 +97,9 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR);
ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0);
}
+
+ if (ctxt_has_s1poe(ctxt))
+ ctxt_sys_reg(ctxt, POR_EL1) = read_sysreg_el1(SYS_POR);
}
ctxt_sys_reg(ctxt, ESR_EL1) = read_sysreg_el1(SYS_ESR);
ctxt_sys_reg(ctxt, AFSR0_EL1) = read_sysreg_el1(SYS_AFSR0);
@@ -120,6 +140,10 @@ static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1), mdscr_el1);
+
+ // POR_EL0 can affect uaccess, so must be saved/restored early.
+ if (ctxt_has_s1poe(ctxt))
+ write_sysreg_s(ctxt_sys_reg(ctxt, POR_EL0), SYS_POR_EL0);
}
static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
@@ -158,6 +182,9 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR);
write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0);
}
+
+ if (ctxt_has_s1poe(ctxt))
+ write_sysreg_el1(ctxt_sys_reg(ctxt, POR_EL1), SYS_POR);
}
write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1), SYS_ESR);
write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR0_EL1), SYS_AFSR0);
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c
index e715c157c2c4..e433dfab882a 100644
--- a/arch/arm64/kvm/hyp/nvhe/ffa.c
+++ b/arch/arm64/kvm/hyp/nvhe/ffa.c
@@ -426,9 +426,9 @@ out:
return;
}
-static __always_inline void do_ffa_mem_xfer(const u64 func_id,
- struct arm_smccc_res *res,
- struct kvm_cpu_context *ctxt)
+static void __do_ffa_mem_xfer(const u64 func_id,
+ struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
{
DECLARE_REG(u32, len, ctxt, 1);
DECLARE_REG(u32, fraglen, ctxt, 2);
@@ -440,9 +440,6 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
u32 offset, nr_ranges;
int ret = 0;
- BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
- func_id != FFA_FN64_MEM_LEND);
-
if (addr_mbz || npages_mbz || fraglen > len ||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
ret = FFA_RET_INVALID_PARAMETERS;
@@ -461,6 +458,11 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
goto out_unlock;
}
+ if (len > ffa_desc_buf.len) {
+ ret = FFA_RET_NO_MEMORY;
+ goto out_unlock;
+ }
+
buf = hyp_buffers.tx;
memcpy(buf, host_buffers.tx, fraglen);
@@ -512,6 +514,13 @@ err_unshare:
goto out_unlock;
}
+#define do_ffa_mem_xfer(fid, res, ctxt) \
+ do { \
+ BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \
+ (fid) != FFA_FN64_MEM_LEND); \
+ __do_ffa_mem_xfer((fid), (res), (ctxt)); \
+ } while (0);
+
static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index 07120b37da35..401af1835be6 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -130,7 +130,7 @@ alternative_else_nop_endif
/* Invalidate the stale TLBs from Bootloader */
tlbi alle2
- tlbi vmalls12e1
+ tlbi alle1
dsb sy
mov_q x0, INIT_SCTLR_EL2_MMU_ON
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index f43d845f3c4e..87692b566d90 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -62,6 +62,8 @@ static void fpsimd_sve_flush(void)
static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
{
+ bool has_fpmr;
+
if (!guest_owns_fp_regs())
return;
@@ -73,11 +75,18 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
else
__fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
+ has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm));
+ if (has_fpmr)
+ __vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR);
+
if (system_supports_sve())
__hyp_sve_restore_host();
else
__fpsimd_restore_state(*host_data_ptr(fpsimd_state));
+ if (has_fpmr)
+ write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
+
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 8f5c56d5b1cd..cc69106734ca 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -197,6 +197,15 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
} else {
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
}
+
+ if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) {
+ u64 val = read_sysreg_s(SYS_FPMR);
+
+ if (unlikely(is_protected_kvm_enabled()))
+ *host_data_ptr(fpmr) = val;
+ else
+ **host_data_ptr(fpmr_ptr) = val;
+ }
}
static const exit_handler_fn hyp_exit_handlers[] = {
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index ca3c09df8d7c..48da9ca9763f 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -132,10 +132,10 @@ static void exit_vmid_context(struct tlb_inv_context *cxt)
else
__load_host_stage2();
- if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
- /* Ensure write of the old VMID */
- isb();
+ /* Ensure write of the old VMID */
+ isb();
+ if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
if (!(cxt->sctlr & SCTLR_ELx_M)) {
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
isb();
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 9e2bbee77491..b11bcebac908 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -17,48 +17,6 @@
#define KVM_PTE_TYPE_PAGE 1
#define KVM_PTE_TYPE_TABLE 1
-#define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
-
-#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
-#define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
-#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \
- ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })
-#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \
- ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })
-#define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
-#define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
-#define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
-
-#define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
-#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6)
-#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7)
-#define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8)
-#define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
-#define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
-
-#define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50)
-
-#define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
-
-#define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
-
-#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
-
-#define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50)
-
-#define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
- KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
- KVM_PTE_LEAF_ATTR_HI_S2_XN)
-
-#define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
-#define KVM_MAX_OWNER_ID 1
-
-/*
- * Used to indicate a pte for which a 'break-before-make' sequence is in
- * progress.
- */
-#define KVM_INVALID_PTE_LOCKED BIT(10)
-
struct kvm_pgtable_walk_data {
struct kvm_pgtable_walker *walker;
@@ -1547,7 +1505,6 @@ static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
*/
new = kvm_init_table_pte(childp, mm_ops);
stage2_make_pte(ctx, new);
- dsb(ishst);
return 0;
}
@@ -1559,8 +1516,11 @@ int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
.flags = KVM_PGTABLE_WALK_LEAF,
.arg = mc,
};
+ int ret;
- return kvm_pgtable_walk(pgt, addr, size, &walker);
+ ret = kvm_pgtable_walk(pgt, addr, size, &walker);
+ dsb(ishst);
+ return ret;
}
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 7b397fad26f2..18d4677002b1 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -268,8 +268,16 @@ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
* starting to mess with the rest of the GIC, and VMCR_EL2 in
* particular. This logic must be called before
* __vgic_v3_restore_state().
+ *
+ * However, if the vgic is disabled (ICH_HCR_EL2.EN==0), no GIC is
+ * provisioned at all. In order to prevent illegal accesses to the
+ * system registers to trap to EL1 (duh), force ICC_SRE_EL1.SRE to 1
+ * so that the trap bits can take effect. Yes, we *loves* the GIC.
*/
- if (!cpu_if->vgic_sre) {
+ if (!(cpu_if->vgic_hcr & ICH_HCR_EN)) {
+ write_gicreg(ICC_SRE_EL1_SRE, ICC_SRE_EL1);
+ isb();
+ } else if (!cpu_if->vgic_sre) {
write_gicreg(0, ICC_SRE_EL1);
isb();
write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
@@ -288,8 +296,9 @@ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
}
/*
- * Prevent the guest from touching the GIC system registers if
- * SRE isn't enabled for GICv3 emulation.
+ * Prevent the guest from touching the ICC_SRE_EL1 system
+ * register. Note that this may not have any effect, as
+ * ICC_SRE_EL2.Enable being RAO/WI is a valid implementation.
*/
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
ICC_SRE_EL2);
@@ -297,10 +306,11 @@ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
/*
* If we need to trap system registers, we must write
* ICH_HCR_EL2 anyway, even if no interrupts are being
- * injected,
+ * injected. Note that this also applies if we don't expect
+ * any system register access (no vgic at all).
*/
if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
- cpu_if->its_vpe.its_vm)
+ cpu_if->its_vpe.its_vm || !cpu_if->vgic_sre)
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
}
@@ -326,7 +336,7 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
* no interrupts were being injected, and we disable it again here.
*/
if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
- cpu_if->its_vpe.its_vm)
+ cpu_if->its_vpe.its_vm || !cpu_if->vgic_sre)
write_gicreg(0, ICH_HCR_EL2);
}
@@ -1032,6 +1042,75 @@ static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
write_gicreg(vmcr, ICH_VMCR_EL2);
}
+static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu,
+ u32 sysreg, bool is_read)
+{
+ u64 ich_hcr;
+
+ if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+ return false;
+
+ ich_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
+
+ switch (sysreg) {
+ case SYS_ICC_IGRPEN0_EL1:
+ if (is_read &&
+ (__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1))
+ return true;
+
+ if (!is_read &&
+ (__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1))
+ return true;
+
+ fallthrough;
+
+ case SYS_ICC_AP0Rn_EL1(0):
+ case SYS_ICC_AP0Rn_EL1(1):
+ case SYS_ICC_AP0Rn_EL1(2):
+ case SYS_ICC_AP0Rn_EL1(3):
+ case SYS_ICC_BPR0_EL1:
+ case SYS_ICC_EOIR0_EL1:
+ case SYS_ICC_HPPIR0_EL1:
+ case SYS_ICC_IAR0_EL1:
+ return ich_hcr & ICH_HCR_TALL0;
+
+ case SYS_ICC_IGRPEN1_EL1:
+ if (is_read &&
+ (__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1))
+ return true;
+
+ if (!is_read &&
+ (__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1))
+ return true;
+
+ fallthrough;
+
+ case SYS_ICC_AP1Rn_EL1(0):
+ case SYS_ICC_AP1Rn_EL1(1):
+ case SYS_ICC_AP1Rn_EL1(2):
+ case SYS_ICC_AP1Rn_EL1(3):
+ case SYS_ICC_BPR1_EL1:
+ case SYS_ICC_EOIR1_EL1:
+ case SYS_ICC_HPPIR1_EL1:
+ case SYS_ICC_IAR1_EL1:
+ return ich_hcr & ICH_HCR_TALL1;
+
+ case SYS_ICC_DIR_EL1:
+ if (ich_hcr & ICH_HCR_TDIR)
+ return true;
+
+ fallthrough;
+
+ case SYS_ICC_RPR_EL1:
+ case SYS_ICC_CTLR_EL1:
+ case SYS_ICC_PMR_EL1:
+ return ich_hcr & ICH_HCR_TC;
+
+ default:
+ return false;
+ }
+}
+
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
{
int rt;
@@ -1041,6 +1120,9 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
bool is_read;
u32 sysreg;
+ if (kern_hyp_va(vcpu->kvm)->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
+ return 0;
+
esr = kvm_vcpu_get_esr(vcpu);
if (vcpu_mode_is_32bit(vcpu)) {
if (!kvm_condition_valid(vcpu)) {
@@ -1055,6 +1137,9 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
+ if (__vgic_v3_check_trap_forwarding(vcpu, sysreg, is_read))
+ return 0;
+
switch (sysreg) {
case SYS_ICC_IAR0_EL1:
case SYS_ICC_IAR1_EL1:
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 77010b76c150..80581b1c3995 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -312,6 +312,9 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
{
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
+
+ if (kvm_has_fpmr(vcpu->kvm))
+ **host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
}
static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index bab27f9d8cc6..f9e30dd34c7a 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -62,7 +62,6 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
*/
num_mmus = atomic_read(&kvm->online_vcpus) * S2_MMU_PER_VCPU;
tmp = kvrealloc(kvm->arch.nested_mmus,
- size_mul(sizeof(*kvm->arch.nested_mmus), kvm->arch.nested_mmus_size),
size_mul(sizeof(*kvm->arch.nested_mmus), num_mmus),
GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!tmp)
@@ -103,20 +102,6 @@ struct s2_walk_info {
bool be;
};
-static unsigned int ps_to_output_size(unsigned int ps)
-{
- switch (ps) {
- case 0: return 32;
- case 1: return 36;
- case 2: return 40;
- case 3: return 42;
- case 4: return 44;
- case 5:
- default:
- return 48;
- }
-}
-
static u32 compute_fsc(int level, u32 fsc)
{
return fsc | (level & 0x3);
@@ -256,7 +241,7 @@ static int walk_nested_s2_pgd(phys_addr_t ipa,
/* Check for valid descriptor at this point */
if (!(desc & 1) || ((desc & 3) == 1 && level == 3)) {
out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT);
- out->upper_attr = desc;
+ out->desc = desc;
return 1;
}
@@ -266,7 +251,7 @@ static int walk_nested_s2_pgd(phys_addr_t ipa,
if (check_output_size(wi, desc)) {
out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
- out->upper_attr = desc;
+ out->desc = desc;
return 1;
}
@@ -278,27 +263,24 @@ static int walk_nested_s2_pgd(phys_addr_t ipa,
if (level < first_block_level) {
out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT);
- out->upper_attr = desc;
+ out->desc = desc;
return 1;
}
- /*
- * We don't use the contiguous bit in the stage-2 ptes, so skip check
- * for misprogramming of the contiguous bit.
- */
-
if (check_output_size(wi, desc)) {
out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
- out->upper_attr = desc;
+ out->desc = desc;
return 1;
}
if (!(desc & BIT(10))) {
out->esr = compute_fsc(level, ESR_ELx_FSC_ACCESS);
- out->upper_attr = desc;
+ out->desc = desc;
return 1;
}
+ addr_bottom += contiguous_bit_shift(desc, wi, level);
+
/* Calculate and return the result */
paddr = (desc & GENMASK_ULL(47, addr_bottom)) |
(ipa & GENMASK_ULL(addr_bottom - 1, 0));
@@ -307,7 +289,7 @@ static int walk_nested_s2_pgd(phys_addr_t ipa,
out->readable = desc & (0b01 << 6);
out->writable = desc & (0b10 << 6);
out->level = level;
- out->upper_attr = desc & GENMASK_ULL(63, 52);
+ out->desc = desc;
return 0;
}
@@ -954,19 +936,16 @@ static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
int kvm_init_nv_sysregs(struct kvm *kvm)
{
u64 res0, res1;
- int ret = 0;
- mutex_lock(&kvm->arch.config_lock);
+ lockdep_assert_held(&kvm->arch.config_lock);
if (kvm->arch.sysreg_masks)
- goto out;
+ return 0;
kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
GFP_KERNEL_ACCOUNT);
- if (!kvm->arch.sysreg_masks) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!kvm->arch.sysreg_masks)
+ return -ENOMEM;
limit_nv_id_regs(kvm);
@@ -1195,8 +1174,13 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
res0 |= ~(res0 | res1);
set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1);
-out:
- mutex_unlock(&kvm->arch.config_lock);
- return ret;
+ /* SCTLR_EL1 */
+ res0 = SCTLR_EL1_RES0;
+ res1 = SCTLR_EL1_RES1;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN3))
+ res0 |= SCTLR_EL1_EPAN;
+ set_sysreg_masks(kvm, SCTLR_EL1, res0, res1);
+
+ return 0;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 82a2a003259c..ac36c438b8c1 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -233,7 +233,7 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
int i;
struct kvm_pmu *pmu = &vcpu->arch.pmu;
- for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
+ for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++)
pmu->pmc[i].idx = i;
}
@@ -260,7 +260,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
{
int i;
- for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
+ for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++)
kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i));
irq_work_sync(&vcpu->arch.pmu.overflow_work);
}
@@ -291,7 +291,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
return;
- for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+ for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
struct kvm_pmc *pmc;
if (!(val & BIT(i)))
@@ -323,7 +323,7 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
if (!kvm_vcpu_has_pmu(vcpu) || !val)
return;
- for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+ for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
struct kvm_pmc *pmc;
if (!(val & BIT(i)))
@@ -910,10 +910,10 @@ u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
struct arm_pmu *arm_pmu = kvm->arch.arm_pmu;
/*
- * The arm_pmu->num_events considers the cycle counter as well.
- * Ignore that and return only the general-purpose counters.
+ * The arm_pmu->cntr_mask considers the fixed counter(s) as well.
+ * Ignore those and return only the general-purpose counters.
*/
- return arm_pmu->num_events - 1;
+ return bitmap_weight(arm_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS);
}
static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 329819806096..0b3adf3e17b4 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -5,6 +5,8 @@
*/
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/perf/arm_pmuv3.h>
static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events);
@@ -35,7 +37,7 @@ struct kvm_pmu_events *kvm_get_pmu_events(void)
* Add events to track that we may want to switch at guest entry/exit
* time.
*/
-void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
+void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr)
{
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
@@ -51,7 +53,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
/*
* Stop tracking events
*/
-void kvm_clr_pmu_events(u32 clr)
+void kvm_clr_pmu_events(u64 clr)
{
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
@@ -62,79 +64,32 @@ void kvm_clr_pmu_events(u32 clr)
pmu->events_guest &= ~clr;
}
-#define PMEVTYPER_READ_CASE(idx) \
- case idx: \
- return read_sysreg(pmevtyper##idx##_el0)
-
-#define PMEVTYPER_WRITE_CASE(idx) \
- case idx: \
- write_sysreg(val, pmevtyper##idx##_el0); \
- break
-
-#define PMEVTYPER_CASES(readwrite) \
- PMEVTYPER_##readwrite##_CASE(0); \
- PMEVTYPER_##readwrite##_CASE(1); \
- PMEVTYPER_##readwrite##_CASE(2); \
- PMEVTYPER_##readwrite##_CASE(3); \
- PMEVTYPER_##readwrite##_CASE(4); \
- PMEVTYPER_##readwrite##_CASE(5); \
- PMEVTYPER_##readwrite##_CASE(6); \
- PMEVTYPER_##readwrite##_CASE(7); \
- PMEVTYPER_##readwrite##_CASE(8); \
- PMEVTYPER_##readwrite##_CASE(9); \
- PMEVTYPER_##readwrite##_CASE(10); \
- PMEVTYPER_##readwrite##_CASE(11); \
- PMEVTYPER_##readwrite##_CASE(12); \
- PMEVTYPER_##readwrite##_CASE(13); \
- PMEVTYPER_##readwrite##_CASE(14); \
- PMEVTYPER_##readwrite##_CASE(15); \
- PMEVTYPER_##readwrite##_CASE(16); \
- PMEVTYPER_##readwrite##_CASE(17); \
- PMEVTYPER_##readwrite##_CASE(18); \
- PMEVTYPER_##readwrite##_CASE(19); \
- PMEVTYPER_##readwrite##_CASE(20); \
- PMEVTYPER_##readwrite##_CASE(21); \
- PMEVTYPER_##readwrite##_CASE(22); \
- PMEVTYPER_##readwrite##_CASE(23); \
- PMEVTYPER_##readwrite##_CASE(24); \
- PMEVTYPER_##readwrite##_CASE(25); \
- PMEVTYPER_##readwrite##_CASE(26); \
- PMEVTYPER_##readwrite##_CASE(27); \
- PMEVTYPER_##readwrite##_CASE(28); \
- PMEVTYPER_##readwrite##_CASE(29); \
- PMEVTYPER_##readwrite##_CASE(30)
-
/*
* Read a value direct from PMEVTYPER<idx> where idx is 0-30
- * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
+ * or PMxCFILTR_EL0 where idx is 31-32.
*/
static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
{
- switch (idx) {
- PMEVTYPER_CASES(READ);
- case ARMV8_PMU_CYCLE_IDX:
- return read_sysreg(pmccfiltr_el0);
- default:
- WARN_ON(1);
- }
+ if (idx == ARMV8_PMU_CYCLE_IDX)
+ return read_pmccfiltr();
+ else if (idx == ARMV8_PMU_INSTR_IDX)
+ return read_pmicfiltr();
- return 0;
+ return read_pmevtypern(idx);
}
/*
* Write a value direct to PMEVTYPER<idx> where idx is 0-30
- * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
+ * or PMxCFILTR_EL0 where idx is 31-32.
*/
static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
{
- switch (idx) {
- PMEVTYPER_CASES(WRITE);
- case ARMV8_PMU_CYCLE_IDX:
- write_sysreg(val, pmccfiltr_el0);
- break;
- default:
- WARN_ON(1);
- }
+ if (idx == ARMV8_PMU_CYCLE_IDX)
+ write_pmccfiltr(val);
+ else if (idx == ARMV8_PMU_INSTR_IDX)
+ write_pmicfiltr(val);
+ else
+ write_pmevtypern(idx, val);
}
/*
@@ -145,7 +100,7 @@ static void kvm_vcpu_pmu_enable_el0(unsigned long events)
u64 typer;
u32 counter;
- for_each_set_bit(counter, &events, 32) {
+ for_each_set_bit(counter, &events, ARMPMU_MAX_HWEVENTS) {
typer = kvm_vcpu_pmu_read_evtype_direct(counter);
typer &= ~ARMV8_PMU_EXCLUDE_EL0;
kvm_vcpu_pmu_write_evtype_direct(counter, typer);
@@ -160,7 +115,7 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
u64 typer;
u32 counter;
- for_each_set_bit(counter, &events, 32) {
+ for_each_set_bit(counter, &events, ARMPMU_MAX_HWEVENTS) {
typer = kvm_vcpu_pmu_read_evtype_direct(counter);
typer |= ARMV8_PMU_EXCLUDE_EL0;
kvm_vcpu_pmu_write_evtype_direct(counter, typer);
@@ -176,7 +131,7 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
struct kvm_pmu_events *pmu;
- u32 events_guest, events_host;
+ u64 events_guest, events_host;
if (!kvm_arm_support_pmu_v3() || !has_vhe())
return;
@@ -197,7 +152,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{
struct kvm_pmu_events *pmu;
- u32 events_guest, events_host;
+ u64 events_guest, events_host;
if (!kvm_arm_support_pmu_v3() || !has_vhe())
return;
diff --git a/arch/arm64/kvm/ptdump.c b/arch/arm64/kvm/ptdump.c
new file mode 100644
index 000000000000..e4a342e903e2
--- /dev/null
+++ b/arch/arm64/kvm/ptdump.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Debug helper used to dump the stage-2 pagetables of the system and their
+ * associated permissions.
+ *
+ * Copyright (C) Google, 2024
+ * Author: Sebastian Ene <sebastianene@google.com>
+ */
+#include <linux/debugfs.h>
+#include <linux/kvm_host.h>
+#include <linux/seq_file.h>
+
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_pgtable.h>
+#include <asm/ptdump.h>
+
+#define MARKERS_LEN 2
+#define KVM_PGTABLE_MAX_LEVELS (KVM_PGTABLE_LAST_LEVEL + 1)
+
+struct kvm_ptdump_guest_state {
+ struct kvm *kvm;
+ struct ptdump_pg_state parser_state;
+ struct addr_marker ipa_marker[MARKERS_LEN];
+ struct ptdump_pg_level level[KVM_PGTABLE_MAX_LEVELS];
+ struct ptdump_range range[MARKERS_LEN];
+};
+
+static const struct ptdump_prot_bits stage2_pte_bits[] = {
+ {
+ .mask = PTE_VALID,
+ .val = PTE_VALID,
+ .set = " ",
+ .clear = "F",
+ }, {
+ .mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | PTE_VALID,
+ .val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | PTE_VALID,
+ .set = "R",
+ .clear = " ",
+ }, {
+ .mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | PTE_VALID,
+ .val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | PTE_VALID,
+ .set = "W",
+ .clear = " ",
+ }, {
+ .mask = KVM_PTE_LEAF_ATTR_HI_S2_XN | PTE_VALID,
+ .val = PTE_VALID,
+ .set = " ",
+ .clear = "X",
+ }, {
+ .mask = KVM_PTE_LEAF_ATTR_LO_S2_AF | PTE_VALID,
+ .val = KVM_PTE_LEAF_ATTR_LO_S2_AF | PTE_VALID,
+ .set = "AF",
+ .clear = " ",
+ }, {
+ .mask = PTE_TABLE_BIT | PTE_VALID,
+ .val = PTE_VALID,
+ .set = "BLK",
+ .clear = " ",
+ },
+};
+
+static int kvm_ptdump_visitor(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
+{
+ struct ptdump_pg_state *st = ctx->arg;
+ struct ptdump_state *pt_st = &st->ptdump;
+
+ note_page(pt_st, ctx->addr, ctx->level, ctx->old);
+
+ return 0;
+}
+
+static int kvm_ptdump_build_levels(struct ptdump_pg_level *level, u32 start_lvl)
+{
+ u32 i;
+ u64 mask;
+
+ if (WARN_ON_ONCE(start_lvl >= KVM_PGTABLE_LAST_LEVEL))
+ return -EINVAL;
+
+ mask = 0;
+ for (i = 0; i < ARRAY_SIZE(stage2_pte_bits); i++)
+ mask |= stage2_pte_bits[i].mask;
+
+ for (i = start_lvl; i < KVM_PGTABLE_MAX_LEVELS; i++) {
+ snprintf(level[i].name, sizeof(level[i].name), "%u", i);
+
+ level[i].num = ARRAY_SIZE(stage2_pte_bits);
+ level[i].bits = stage2_pte_bits;
+ level[i].mask = mask;
+ }
+
+ return 0;
+}
+
+static struct kvm_ptdump_guest_state *kvm_ptdump_parser_create(struct kvm *kvm)
+{
+ struct kvm_ptdump_guest_state *st;
+ struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
+ struct kvm_pgtable *pgtable = mmu->pgt;
+ int ret;
+
+ st = kzalloc(sizeof(struct kvm_ptdump_guest_state), GFP_KERNEL_ACCOUNT);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ ret = kvm_ptdump_build_levels(&st->level[0], pgtable->start_level);
+ if (ret) {
+ kfree(st);
+ return ERR_PTR(ret);
+ }
+
+ st->ipa_marker[0].name = "Guest IPA";
+ st->ipa_marker[1].start_address = BIT(pgtable->ia_bits);
+ st->range[0].end = BIT(pgtable->ia_bits);
+
+ st->kvm = kvm;
+ st->parser_state = (struct ptdump_pg_state) {
+ .marker = &st->ipa_marker[0],
+ .level = -1,
+ .pg_level = &st->level[0],
+ .ptdump.range = &st->range[0],
+ .start_address = 0,
+ };
+
+ return st;
+}
+
+static int kvm_ptdump_guest_show(struct seq_file *m, void *unused)
+{
+ int ret;
+ struct kvm_ptdump_guest_state *st = m->private;
+ struct kvm *kvm = st->kvm;
+ struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
+ struct ptdump_pg_state *parser_state = &st->parser_state;
+ struct kvm_pgtable_walker walker = (struct kvm_pgtable_walker) {
+ .cb = kvm_ptdump_visitor,
+ .arg = parser_state,
+ .flags = KVM_PGTABLE_WALK_LEAF,
+ };
+
+ parser_state->seq = m;
+
+ write_lock(&kvm->mmu_lock);
+ ret = kvm_pgtable_walk(mmu->pgt, 0, BIT(mmu->pgt->ia_bits), &walker);
+ write_unlock(&kvm->mmu_lock);
+
+ return ret;
+}
+
+static int kvm_ptdump_guest_open(struct inode *m, struct file *file)
+{
+ struct kvm *kvm = m->i_private;
+ struct kvm_ptdump_guest_state *st;
+ int ret;
+
+ if (!kvm_get_kvm_safe(kvm))
+ return -ENOENT;
+
+ st = kvm_ptdump_parser_create(kvm);
+ if (IS_ERR(st)) {
+ ret = PTR_ERR(st);
+ goto err_with_kvm_ref;
+ }
+
+ ret = single_open(file, kvm_ptdump_guest_show, st);
+ if (!ret)
+ return 0;
+
+ kfree(st);
+err_with_kvm_ref:
+ kvm_put_kvm(kvm);
+ return ret;
+}
+
+static int kvm_ptdump_guest_close(struct inode *m, struct file *file)
+{
+ struct kvm *kvm = m->i_private;
+ void *st = ((struct seq_file *)file->private_data)->private;
+
+ kfree(st);
+ kvm_put_kvm(kvm);
+
+ return single_release(m, file);
+}
+
+static const struct file_operations kvm_ptdump_guest_fops = {
+ .open = kvm_ptdump_guest_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = kvm_ptdump_guest_close,
+};
+
+static int kvm_pgtable_range_show(struct seq_file *m, void *unused)
+{
+ struct kvm_pgtable *pgtable = m->private;
+
+ seq_printf(m, "%2u\n", pgtable->ia_bits);
+ return 0;
+}
+
+static int kvm_pgtable_levels_show(struct seq_file *m, void *unused)
+{
+ struct kvm_pgtable *pgtable = m->private;
+
+ seq_printf(m, "%1d\n", KVM_PGTABLE_MAX_LEVELS - pgtable->start_level);
+ return 0;
+}
+
+static int kvm_pgtable_debugfs_open(struct inode *m, struct file *file,
+ int (*show)(struct seq_file *, void *))
+{
+ struct kvm *kvm = m->i_private;
+ struct kvm_pgtable *pgtable;
+ int ret;
+
+ if (!kvm_get_kvm_safe(kvm))
+ return -ENOENT;
+
+ pgtable = kvm->arch.mmu.pgt;
+
+ ret = single_open(file, show, pgtable);
+ if (ret < 0)
+ kvm_put_kvm(kvm);
+ return ret;
+}
+
+static int kvm_pgtable_range_open(struct inode *m, struct file *file)
+{
+ return kvm_pgtable_debugfs_open(m, file, kvm_pgtable_range_show);
+}
+
+static int kvm_pgtable_levels_open(struct inode *m, struct file *file)
+{
+ return kvm_pgtable_debugfs_open(m, file, kvm_pgtable_levels_show);
+}
+
+static int kvm_pgtable_debugfs_close(struct inode *m, struct file *file)
+{
+ struct kvm *kvm = m->i_private;
+
+ kvm_put_kvm(kvm);
+ return single_release(m, file);
+}
+
+static const struct file_operations kvm_pgtable_range_fops = {
+ .open = kvm_pgtable_range_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = kvm_pgtable_debugfs_close,
+};
+
+static const struct file_operations kvm_pgtable_levels_fops = {
+ .open = kvm_pgtable_levels_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = kvm_pgtable_debugfs_close,
+};
+
+void kvm_s2_ptdump_create_debugfs(struct kvm *kvm)
+{
+ debugfs_create_file("stage2_page_tables", 0400, kvm->debugfs_dentry,
+ kvm, &kvm_ptdump_guest_fops);
+ debugfs_create_file("ipa_range", 0400, kvm->debugfs_dentry, kvm,
+ &kvm_pgtable_range_fops);
+ debugfs_create_file("stage2_levels", 0400, kvm->debugfs_dentry,
+ kvm, &kvm_pgtable_levels_fops);
+}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 31e49da867ff..dad88e31f953 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -18,6 +18,7 @@
#include <linux/printk.h>
#include <linux/uaccess.h>
+#include <asm/arm_pmuv3.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/debug-monitors.h>
@@ -47,6 +48,13 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
u64 val);
+static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ kvm_inject_undefined(vcpu);
+ return false;
+}
+
static bool bad_trap(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *r,
@@ -54,8 +62,7 @@ static bool bad_trap(struct kvm_vcpu *vcpu,
{
WARN_ONCE(1, "Unexpected %s\n", msg);
print_sys_reg_instr(params);
- kvm_inject_undefined(vcpu);
- return false;
+ return undef_access(vcpu, params, r);
}
static bool read_from_write_only(struct kvm_vcpu *vcpu,
@@ -346,10 +353,8 @@ static bool access_dcgsw(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- if (!kvm_has_mte(vcpu->kvm)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
+ if (!kvm_has_mte(vcpu->kvm))
+ return undef_access(vcpu, p, r);
/* Treat MTE S/W ops as we treat the classic ones: with contempt */
return access_dcsw(vcpu, p, r);
@@ -386,10 +391,8 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
u64 val, mask, shift;
if (reg_to_encoding(r) == SYS_TCR2_EL1 &&
- !kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
+ !kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
+ return undef_access(vcpu, p, r);
BUG_ON(!p->is_write);
@@ -436,10 +439,8 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
{
bool g1;
- if (!kvm_has_gicv3(vcpu->kvm)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
+ if (!kvm_has_gicv3(vcpu->kvm))
+ return undef_access(vcpu, p, r);
if (!p->is_write)
return read_from_write_only(vcpu, p, r);
@@ -484,6 +485,9 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ if (!kvm_has_gicv3(vcpu->kvm))
+ return undef_access(vcpu, p, r);
+
if (p->is_write)
return ignore_write(vcpu, p);
@@ -501,14 +505,6 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
return read_zero(vcpu, p);
}
-static bool trap_undef(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *r)
-{
- kvm_inject_undefined(vcpu);
- return false;
-}
-
/*
* ARMv8.1 mandates at least a trivial LORegion implementation, where all the
* RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
@@ -521,10 +517,8 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
{
u32 sr = reg_to_encoding(r);
- if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
+ if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP))
+ return undef_access(vcpu, p, r);
if (p->is_write && sr == SYS_LORID_EL1)
return write_to_read_only(vcpu, p, r);
@@ -893,7 +887,7 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
reset_unknown(vcpu, r);
- __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
+ __vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK;
return __vcpu_sys_reg(vcpu, r->reg);
}
@@ -985,7 +979,7 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
else
/* return PMSELR.SEL field */
p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
- & ARMV8_PMU_COUNTER_MASK;
+ & PMSELR_EL0_SEL_MASK;
return true;
}
@@ -1053,8 +1047,8 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
if (pmu_access_event_counter_el0_disabled(vcpu))
return false;
- idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
- & ARMV8_PMU_COUNTER_MASK;
+ idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
+ __vcpu_sys_reg(vcpu, PMSELR_EL0));
} else if (r->Op2 == 0) {
/* PMCCNTR_EL0 */
if (pmu_access_cycle_counter_el0_disabled(vcpu))
@@ -1104,7 +1098,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
/* PMXEVTYPER_EL0 */
- idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
+ idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
reg = PMEVTYPER0_EL0 + idx;
} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
@@ -1257,10 +1251,8 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
- if (!vcpu_mode_priv(vcpu)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
+ if (!vcpu_mode_priv(vcpu))
+ return undef_access(vcpu, p, r);
__vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
p->regval & ARMV8_PMU_USERENR_MASK;
@@ -1344,14 +1336,6 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
.reset = reset_pmevtyper, \
.access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
-static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
- const struct sys_reg_desc *r)
-{
- kvm_inject_undefined(vcpu);
-
- return false;
-}
-
/* Macro to expand the AMU counter and type registers*/
#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
@@ -1410,8 +1394,7 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
break;
default:
print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
- kvm_inject_undefined(vcpu);
- return false;
+ return undef_access(vcpu, p, r);
}
if (p->is_write)
@@ -1545,6 +1528,10 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
break;
+ case SYS_ID_AA64PFR2_EL1:
+ /* We only expose FPMR */
+ val &= ID_AA64PFR2_EL1_FPMR;
+ break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
@@ -1562,6 +1549,9 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
case SYS_ID_AA64MMFR2_EL1:
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
break;
+ case SYS_ID_AA64MMFR3_EL1:
+ val &= ID_AA64MMFR3_EL1_TCRX | ID_AA64MMFR3_EL1_S1POE;
+ break;
case SYS_ID_MMFR4_EL1:
val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
break;
@@ -1675,6 +1665,24 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
+static unsigned int sme_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (kvm_has_fpmr(vcpu->kvm))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
@@ -2091,26 +2099,6 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
#define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
/*
- * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
- * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
- * handling traps. Given that, they are always hidden from userspace.
- */
-static unsigned int hidden_user_visibility(const struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
-{
- return REG_HIDDEN_USER;
-}
-
-#define EL12_REG(name, acc, rst, v) { \
- SYS_DESC(SYS_##name##_EL12), \
- .access = acc, \
- .reset = rst, \
- .reg = name##_EL1, \
- .val = v, \
- .visibility = hidden_user_visibility, \
-}
-
-/*
* Since reset() callback and field val are not used for idregs, they will be
* used for specific purposes for idregs.
* The reset() would return KVM sanitised register value. The value would be the
@@ -2217,6 +2205,18 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
return true;
}
+static bool access_cntkctl_el12(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ __vcpu_sys_reg(vcpu, CNTKCTL_EL1) = p->regval;
+ else
+ p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1);
+
+ return true;
+}
+
static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 val = r->val;
@@ -2261,6 +2261,15 @@ static bool access_zcr_el2(struct kvm_vcpu *vcpu,
return true;
}
+static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -2307,7 +2316,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
// DBGDTR[TR]X_EL0 share the same encoding
{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
- { SYS_DESC(SYS_DBGVCR32_EL2), trap_undef, reset_val, DBGVCR32_EL2, 0 },
+ { SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 },
{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
@@ -2365,16 +2374,15 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64PFR0_EL1_MPAM |
ID_AA64PFR0_EL1_SVE |
ID_AA64PFR0_EL1_RAS |
- ID_AA64PFR0_EL1_GIC |
ID_AA64PFR0_EL1_AdvSIMD |
ID_AA64PFR0_EL1_FP), },
ID_SANITISED(ID_AA64PFR1_EL1),
- ID_UNALLOCATED(4,2),
+ ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR),
ID_UNALLOCATED(4,3),
ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
ID_HIDDEN(ID_AA64SMFR0_EL1),
ID_UNALLOCATED(4,6),
- ID_UNALLOCATED(4,7),
+ ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
/* CRm=5 */
{ SYS_DESC(SYS_ID_AA64DFR0_EL1),
@@ -2424,7 +2432,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64MMFR2_EL1_IDS |
ID_AA64MMFR2_EL1_NV |
ID_AA64MMFR2_EL1_CCIDX)),
- ID_SANITISED(ID_AA64MMFR3_EL1),
+ ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX |
+ ID_AA64MMFR3_EL1_S1POE)),
ID_SANITISED(ID_AA64MMFR4_EL1),
ID_UNALLOCATED(7,5),
ID_UNALLOCATED(7,6),
@@ -2455,6 +2464,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_SPSR_EL1), access_spsr},
{ SYS_DESC(SYS_ELR_EL1), access_elr},
+ { SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
+
{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
@@ -2498,6 +2509,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
{ SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 },
{ SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 },
+ { SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1,
+ .visibility = s1poe_visibility },
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
@@ -2509,18 +2522,31 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
- { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
- { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
- { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
- { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
- { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
+ { SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_DIR_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
- { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
- { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
- { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
+ { SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
+ { SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
+ { SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
@@ -2541,7 +2567,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
CTR_EL0_IDC_MASK |
CTR_EL0_DminLine_MASK |
CTR_EL0_IminLine_MASK),
- { SYS_DESC(SYS_SVCR), undef_access },
+ { SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility },
+ { SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
.reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
@@ -2584,6 +2611,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
.access = access_pmovs, .reg = PMOVSSET_EL0,
.get_user = get_pmreg, .set_user = set_pmreg },
+ { SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0,
+ .visibility = s1poe_visibility },
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
{ SYS_DESC(SYS_TPIDR2_EL0), undef_access },
@@ -2764,7 +2793,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
- { SYS_DESC(SYS_DACR32_EL2), trap_undef, reset_unknown, DACR32_EL2 },
+ { SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 },
EL2_REG_VNCR(HDFGRTR_EL2, reset_val, 0),
EL2_REG_VNCR(HDFGWTR_EL2, reset_val, 0),
EL2_REG_VNCR(HAFGRTR_EL2, reset_val, 0),
@@ -2773,20 +2802,16 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
/* AArch32 SPSR_* are RES0 if trapped from a NV guest */
- { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi,
- .visibility = hidden_user_visibility },
- { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi,
- .visibility = hidden_user_visibility },
- { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi,
- .visibility = hidden_user_visibility },
- { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi,
- .visibility = hidden_user_visibility },
-
- { SYS_DESC(SYS_IFSR32_EL2), trap_undef, reset_unknown, IFSR32_EL2 },
+ { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi },
+ { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi },
+ { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi },
+ { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi },
+
+ { SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 },
EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
EL2_REG_REDIR(ESR_EL2, reset_val, 0),
- { SYS_DESC(SYS_FPEXC32_EL2), trap_undef, reset_val, FPEXC32_EL2, 0x700 },
+ { SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 },
EL2_REG_REDIR(FAR_EL2, reset_val, 0),
EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
@@ -2796,7 +2821,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
- { SYS_DESC(SYS_RMR_EL2), trap_undef },
+ { SYS_DESC(SYS_RMR_EL2), undef_access },
+
+ EL2_REG_VNCR(ICH_HCR_EL2, reset_val, 0),
EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
@@ -2804,11 +2831,48 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
- EL12_REG(CNTKCTL, access_rw, reset_val, 0),
+ { SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
EL2_REG(SP_EL2, NULL, reset_unknown, 0),
};
+static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+
+ __kvm_at_s1e01(vcpu, op, p->regval);
+
+ return true;
+}
+
+static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+
+ /* There is no FGT associated with AT S1E2A :-( */
+ if (op == OP_AT_S1E2A &&
+ !kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) {
+ kvm_inject_undefined(vcpu);
+ return false;
+ }
+
+ __kvm_at_s1e2(vcpu, op, p->regval);
+
+ return true;
+}
+
+static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+
+ __kvm_at_s12(vcpu, op, p->regval);
+
+ return true;
+}
+
static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
{
struct kvm *kvm = vpcu->kvm;
@@ -2830,10 +2894,8 @@ static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
- if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
+ if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
+ return undef_access(vcpu, p, r);
write_lock(&vcpu->kvm->mmu_lock);
@@ -2902,10 +2964,8 @@ static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
u64 limit, vttbr;
- if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
+ if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
+ return undef_access(vcpu, p, r);
vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
@@ -2930,10 +2990,8 @@ static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
u64 base, range, tg, num, scale;
int shift;
- if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
+ if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
+ return undef_access(vcpu, p, r);
/*
* Because the shadow S2 structure doesn't necessarily reflect that
@@ -3001,10 +3059,8 @@ static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
- if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
+ if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
+ return undef_access(vcpu, p, r);
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
&(union tlbi_info) {
@@ -3044,10 +3100,8 @@ static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
WARN_ON(!vcpu_is_el2(vcpu));
- if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
+ if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding))
+ return undef_access(vcpu, p, r);
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
&(union tlbi_info) {
@@ -3071,6 +3125,14 @@ static struct sys_reg_desc sys_insn_descs[] = {
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
+
+ SYS_INSN(AT_S1E1R, handle_at_s1e01),
+ SYS_INSN(AT_S1E1W, handle_at_s1e01),
+ SYS_INSN(AT_S1E0R, handle_at_s1e01),
+ SYS_INSN(AT_S1E0W, handle_at_s1e01),
+ SYS_INSN(AT_S1E1RP, handle_at_s1e01),
+ SYS_INSN(AT_S1E1WP, handle_at_s1e01),
+
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
@@ -3150,19 +3212,27 @@ static struct sys_reg_desc sys_insn_descs[] = {
SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1),
SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1),
+ SYS_INSN(AT_S1E2R, handle_at_s1e2),
+ SYS_INSN(AT_S1E2W, handle_at_s1e2),
+ SYS_INSN(AT_S12E1R, handle_at_s12),
+ SYS_INSN(AT_S12E1W, handle_at_s12),
+ SYS_INSN(AT_S12E0R, handle_at_s12),
+ SYS_INSN(AT_S12E0W, handle_at_s12),
+ SYS_INSN(AT_S1E2A, handle_at_s1e2),
+
SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is),
SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is),
SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
- SYS_INSN(TLBI_ALLE2OS, trap_undef),
- SYS_INSN(TLBI_VAE2OS, trap_undef),
+ SYS_INSN(TLBI_ALLE2OS, undef_access),
+ SYS_INSN(TLBI_VAE2OS, undef_access),
SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
- SYS_INSN(TLBI_VALE2OS, trap_undef),
+ SYS_INSN(TLBI_VALE2OS, undef_access),
SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
- SYS_INSN(TLBI_RVAE2IS, trap_undef),
- SYS_INSN(TLBI_RVALE2IS, trap_undef),
+ SYS_INSN(TLBI_RVAE2IS, undef_access),
+ SYS_INSN(TLBI_RVALE2IS, undef_access),
SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
@@ -3174,10 +3244,10 @@ static struct sys_reg_desc sys_insn_descs[] = {
SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
- SYS_INSN(TLBI_RVAE2OS, trap_undef),
- SYS_INSN(TLBI_RVALE2OS, trap_undef),
- SYS_INSN(TLBI_RVAE2, trap_undef),
- SYS_INSN(TLBI_RVALE2, trap_undef),
+ SYS_INSN(TLBI_RVAE2OS, undef_access),
+ SYS_INSN(TLBI_RVALE2OS, undef_access),
+ SYS_INSN(TLBI_RVAE2, undef_access),
+ SYS_INSN(TLBI_RVALE2, undef_access),
SYS_INSN(TLBI_ALLE1, handle_alle1is),
SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
@@ -3186,19 +3256,19 @@ static struct sys_reg_desc sys_insn_descs[] = {
SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
- SYS_INSN(TLBI_ALLE2OSNXS, trap_undef),
- SYS_INSN(TLBI_VAE2OSNXS, trap_undef),
+ SYS_INSN(TLBI_ALLE2OSNXS, undef_access),
+ SYS_INSN(TLBI_VAE2OSNXS, undef_access),
SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
- SYS_INSN(TLBI_VALE2OSNXS, trap_undef),
+ SYS_INSN(TLBI_VALE2OSNXS, undef_access),
SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
- SYS_INSN(TLBI_RVAE2ISNXS, trap_undef),
- SYS_INSN(TLBI_RVALE2ISNXS, trap_undef),
- SYS_INSN(TLBI_ALLE2ISNXS, trap_undef),
- SYS_INSN(TLBI_VAE2ISNXS, trap_undef),
+ SYS_INSN(TLBI_RVAE2ISNXS, undef_access),
+ SYS_INSN(TLBI_RVALE2ISNXS, undef_access),
+ SYS_INSN(TLBI_ALLE2ISNXS, undef_access),
+ SYS_INSN(TLBI_VAE2ISNXS, undef_access),
SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
- SYS_INSN(TLBI_VALE2ISNXS, trap_undef),
+ SYS_INSN(TLBI_VALE2ISNXS, undef_access),
SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
@@ -3208,14 +3278,14 @@ static struct sys_reg_desc sys_insn_descs[] = {
SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
- SYS_INSN(TLBI_RVAE2OSNXS, trap_undef),
- SYS_INSN(TLBI_RVALE2OSNXS, trap_undef),
- SYS_INSN(TLBI_RVAE2NXS, trap_undef),
- SYS_INSN(TLBI_RVALE2NXS, trap_undef),
- SYS_INSN(TLBI_ALLE2NXS, trap_undef),
- SYS_INSN(TLBI_VAE2NXS, trap_undef),
+ SYS_INSN(TLBI_RVAE2OSNXS, undef_access),
+ SYS_INSN(TLBI_RVALE2OSNXS, undef_access),
+ SYS_INSN(TLBI_RVAE2NXS, undef_access),
+ SYS_INSN(TLBI_RVALE2NXS, undef_access),
+ SYS_INSN(TLBI_ALLE2NXS, undef_access),
+ SYS_INSN(TLBI_VAE2NXS, undef_access),
SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
- SYS_INSN(TLBI_VALE2NXS, trap_undef),
+ SYS_INSN(TLBI_VALE2NXS, undef_access),
SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
};
@@ -3393,6 +3463,7 @@ static const struct sys_reg_desc cp15_regs[] = {
/* TTBCR2 */
{ AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
+ { CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
/* DFSR */
{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
@@ -3442,8 +3513,28 @@ static const struct sys_reg_desc cp15_regs[] = {
/* AMAIR1 */
{ AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
- /* ICC_SRE */
- { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
+ { CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_DIR_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
+ { CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
+ { CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
@@ -4280,7 +4371,7 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
int ret;
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
- if (!r || sysreg_hidden_user(vcpu, r))
+ if (!r || sysreg_hidden(vcpu, r))
return -ENOENT;
if (r->get_user) {
@@ -4324,7 +4415,7 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
return -EFAULT;
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
- if (!r || sysreg_hidden_user(vcpu, r))
+ if (!r || sysreg_hidden(vcpu, r))
return -ENOENT;
if (sysreg_user_write_ignore(vcpu, r))
@@ -4410,7 +4501,7 @@ static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
if (!(rd->reg || rd->get_user))
return 0;
- if (sysreg_hidden_user(vcpu, rd))
+ if (sysreg_hidden(vcpu, rd))
return 0;
if (!copy_reg_to_user(rd, uind))
@@ -4551,6 +4642,7 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
mutex_lock(&kvm->arch.config_lock);
vcpu_set_hcr(vcpu);
+ vcpu_set_ich_hcr(vcpu);
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
/*
@@ -4566,6 +4658,9 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
if (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
+
+ if (kvm_has_fpmr(kvm))
+ vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
}
if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
@@ -4574,8 +4669,6 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 |
HFGxTR_EL2_nMAIR2_EL1 |
HFGxTR_EL2_nS2POR_EL1 |
- HFGxTR_EL2_nPOR_EL1 |
- HFGxTR_EL2_nPOR_EL0 |
HFGxTR_EL2_nACCDATA_EL1 |
HFGxTR_EL2_nSMPRI_EL1_MASK |
HFGxTR_EL2_nTPIDR2_EL0_MASK);
@@ -4606,10 +4699,21 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
HFGITR_EL2_TLBIRVAAE1OS |
HFGITR_EL2_TLBIRVAE1OS);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP))
+ kvm->arch.fgu[HFGITR_GROUP] |= HFGITR_EL2_ATS1E1A;
+
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2))
+ kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_ATS1E1RP |
+ HFGITR_EL2_ATS1E1WP);
+
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 |
HFGxTR_EL2_nPIR_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
+ kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPOR_EL1 |
+ HFGxTR_EL2_nPOR_EL0);
+
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 |
HAFGRTR_EL2_RES1);
@@ -4619,6 +4723,36 @@ out:
mutex_unlock(&kvm->arch.config_lock);
}
+/*
+ * Perform last adjustments to the ID registers that are implied by the
+ * configuration outside of the ID regs themselves, as well as any
+ * initialisation that directly depend on these ID registers (such as
+ * RES0/RES1 behaviours). This is not the place to configure traps though.
+ *
+ * Because this can be called once per CPU, changes must be idempotent.
+ */
+int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ guard(mutex)(&kvm->arch.config_lock);
+
+ if (!(static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
+ irqchip_in_kernel(kvm) &&
+ kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)) {
+ kvm->arch.id_regs[IDREG_IDX(SYS_ID_AA64PFR0_EL1)] &= ~ID_AA64PFR0_EL1_GIC_MASK;
+ kvm->arch.id_regs[IDREG_IDX(SYS_ID_PFR1_EL1)] &= ~ID_PFR1_EL1_GIC_MASK;
+ }
+
+ if (vcpu_has_nv(vcpu)) {
+ int ret = kvm_init_nv_sysregs(kvm);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int __init kvm_sys_reg_table_init(void)
{
bool valid = true;
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index 997eea21ba2a..1d94ed6efad2 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -95,9 +95,8 @@ struct sys_reg_desc {
};
#define REG_HIDDEN (1 << 0) /* hidden from userspace and guest */
-#define REG_HIDDEN_USER (1 << 1) /* hidden from userspace only */
-#define REG_RAZ (1 << 2) /* RAZ from userspace and guest */
-#define REG_USER_WI (1 << 3) /* WI from userspace only */
+#define REG_RAZ (1 << 1) /* RAZ from userspace and guest */
+#define REG_USER_WI (1 << 2) /* WI from userspace only */
static __printf(2, 3)
inline void print_sys_reg_msg(const struct sys_reg_params *p,
@@ -165,15 +164,6 @@ static inline bool sysreg_hidden(const struct kvm_vcpu *vcpu,
return sysreg_visibility(vcpu, r) & REG_HIDDEN;
}
-static inline bool sysreg_hidden_user(const struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *r)
-{
- if (likely(!r->visibility))
- return false;
-
- return r->visibility(vcpu, r) & (REG_HIDDEN | REG_HIDDEN_USER);
-}
-
static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
@@ -235,6 +225,8 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index);
+int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu);
+
#define AA32(_x) .aarch32_map = AA32_##_x
#define Op0(_x) .Op0 = _x
#define Op1(_x) .Op1 = _x
@@ -248,4 +240,11 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index);
CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \
Op2(sys_reg_Op2(reg))
+#define CP15_SYS_DESC(reg) \
+ .name = #reg, \
+ .aarch32_map = AA32_DIRECT, \
+ Op0(0), Op1(sys_reg_Op1(reg)), \
+ CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \
+ Op2(sys_reg_Op2(reg))
+
#endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 3eecdd2f4b8f..b217b256853c 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -292,6 +292,18 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
/* Get the show on the road... */
vgic_v3->vgic_hcr = ICH_HCR_EN;
+}
+
+void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+ /* Hide GICv3 sysreg if necessary */
+ if (!kvm_has_gicv3(vcpu->kvm)) {
+ vgic_v3->vgic_hcr |= ICH_HCR_TALL0 | ICH_HCR_TALL1 | ICH_HCR_TC;
+ return;
+ }
+
if (group0_trap)
vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
if (group1_trap)
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index abe29c7d85d0..f50274fd5581 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -922,10 +922,13 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
void kvm_vgic_load(struct kvm_vcpu *vcpu)
{
- if (unlikely(!vgic_initialized(vcpu->kvm)))
+ if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
+ if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
return;
+ }
- if (kvm_vgic_global_state.type == VGIC_V2)
+ if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_load(vcpu);
else
vgic_v3_load(vcpu);
@@ -933,10 +936,13 @@ void kvm_vgic_load(struct kvm_vcpu *vcpu)
void kvm_vgic_put(struct kvm_vcpu *vcpu)
{
- if (unlikely(!vgic_initialized(vcpu->kvm)))
+ if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
+ if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
return;
+ }
- if (kvm_vgic_global_state.type == VGIC_V2)
+ if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_put(vcpu);
else
vgic_v3_put(vcpu);
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 8532bfe3fed4..f2486b4d9f95 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -346,11 +346,11 @@ void vgic_v4_configure_vsgis(struct kvm *kvm);
void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
+void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu);
+
static inline bool kvm_has_gicv3(struct kvm *kvm)
{
- return (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
- irqchip_in_kernel(kvm) &&
- kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3);
+ return kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP);
}
#endif
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 60454256945b..2fc8c6dd0407 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \
- ioremap.o mmap.o pgd.o mmu.o \
+ ioremap.o mmap.o pgd.o mem_encrypt.o mmu.o \
context.o proc.o pageattr.o fixmap.o
obj-$(CONFIG_ARM64_CONTPTE) += contpte.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
index a3edced29ac1..55107d27d3f8 100644
--- a/arch/arm64/mm/contpte.c
+++ b/arch/arm64/mm/contpte.c
@@ -421,6 +421,12 @@ int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
ptep = contpte_align_down(ptep);
start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
+ /*
+ * We are not advancing entry because __ptep_set_access_flags()
+ * only consumes access flags from entry. And since we have checked
+ * for the whole contpte block and returned early, pte_same()
+ * within __ptep_set_access_flags() is likely false.
+ */
for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE)
__ptep_set_access_flags(vma, addr, ptep, entry, 0);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 451ba7cbd5ad..8b281cf308b3 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -23,6 +23,7 @@
#include <linux/sched/debug.h>
#include <linux/highmem.h>
#include <linux/perf_event.h>
+#include <linux/pkeys.h>
#include <linux/preempt.h>
#include <linux/hugetlb.h>
@@ -486,6 +487,23 @@ static void do_bad_area(unsigned long far, unsigned long esr,
}
}
+static bool fault_from_pkey(unsigned long esr, struct vm_area_struct *vma,
+ unsigned int mm_flags)
+{
+ unsigned long iss2 = ESR_ELx_ISS2(esr);
+
+ if (!system_supports_poe())
+ return false;
+
+ if (esr_fsc_is_permission_fault(esr) && (iss2 & ESR_ELx_Overlay))
+ return true;
+
+ return !arch_vma_access_permitted(vma,
+ mm_flags & FAULT_FLAG_WRITE,
+ mm_flags & FAULT_FLAG_INSTRUCTION,
+ false);
+}
+
static bool is_el0_instruction_abort(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
@@ -511,6 +529,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
unsigned long addr = untagged_addr(far);
struct vm_area_struct *vma;
int si_code;
+ int pkey = -1;
if (kprobe_page_fault(regs, esr))
return 0;
@@ -575,6 +594,16 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
goto bad_area;
}
+
+ if (fault_from_pkey(esr, vma, mm_flags)) {
+ pkey = vma_pkey(vma);
+ vma_end_read(vma);
+ fault = 0;
+ si_code = SEGV_PKUERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area;
+ }
+
fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
vma_end_read(vma);
@@ -610,7 +639,16 @@ retry:
goto bad_area;
}
+ if (fault_from_pkey(esr, vma, mm_flags)) {
+ pkey = vma_pkey(vma);
+ mmap_read_unlock(mm);
+ fault = 0;
+ si_code = SEGV_PKUERR;
+ goto bad_area;
+ }
+
fault = handle_mm_fault(vma, addr, mm_flags, regs);
+
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
@@ -669,8 +707,23 @@ bad_area:
arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name);
} else {
+ /*
+ * The pkey value that we return to userspace can be different
+ * from the pkey that caused the fault.
+ *
+ * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
+ * 2. T1 : set POR_EL0 to deny access to pkey=4, touches, page
+ * 3. T1 : faults...
+ * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
+ * 5. T1 : enters fault handler, takes mmap_lock, etc...
+ * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
+ * faulted on a pte with its pkey=4.
+ */
/* Something tried to access memory that out of memory map */
- arm64_force_sig_fault(SIGSEGV, si_code, far, inf->name);
+ if (si_code == SEGV_PKUERR)
+ arm64_force_sig_fault_pkey(far, inf->name, pkey);
+ else
+ arm64_force_sig_fault(SIGSEGV, si_code, far, inf->name);
}
return 0;
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 9b5ab6818f7f..27a32ff15412 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -114,36 +114,33 @@ static void __init arch_reserve_crashkernel(void)
low_size, high);
}
-/*
- * Return the maximum physical address for a zone accessible by the given bits
- * limit. If DRAM starts above 32-bit, expand the zone to the maximum
- * available memory, otherwise cap it at 32-bit.
- */
-static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
+static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
{
- phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
- phys_addr_t phys_start = memblock_start_of_DRAM();
-
- if (phys_start > U32_MAX)
- zone_mask = PHYS_ADDR_MAX;
- else if (phys_start > zone_mask)
- zone_mask = U32_MAX;
+ /**
+ * Information we get from firmware (e.g. DT dma-ranges) describe DMA
+ * bus constraints. Devices using DMA might have their own limitations.
+ * Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
+ * DMA zone on platforms that have RAM there.
+ */
+ if (memblock_start_of_DRAM() < U32_MAX)
+ zone_limit = min(zone_limit, U32_MAX);
- return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
+ return min(zone_limit, memblock_end_of_DRAM() - 1) + 1;
}
static void __init zone_sizes_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
- unsigned int __maybe_unused acpi_zone_dma_bits;
- unsigned int __maybe_unused dt_zone_dma_bits;
- phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
+ phys_addr_t __maybe_unused acpi_zone_dma_limit;
+ phys_addr_t __maybe_unused dt_zone_dma_limit;
+ phys_addr_t __maybe_unused dma32_phys_limit =
+ max_zone_phys(DMA_BIT_MASK(32));
#ifdef CONFIG_ZONE_DMA
- acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
- dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
- zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
- arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
+ acpi_zone_dma_limit = acpi_iort_dma_get_max_cpu_address();
+ dt_zone_dma_limit = of_dma_get_max_cpu_address(NULL);
+ zone_dma_limit = min(dt_zone_dma_limit, acpi_zone_dma_limit);
+ arm64_dma_phys_limit = max_zone_phys(zone_dma_limit);
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
#ifdef CONFIG_ZONE_DMA32
@@ -414,8 +411,16 @@ void __init mem_init(void)
void free_initmem(void)
{
- free_reserved_area(lm_alias(__init_begin),
- lm_alias(__init_end),
+ void *lm_init_begin = lm_alias(__init_begin);
+ void *lm_init_end = lm_alias(__init_end);
+
+ WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE));
+ WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE));
+
+ /* Delete __init region from memblock.reserved. */
+ memblock_free(lm_init_begin, lm_init_end - lm_init_begin);
+
+ free_reserved_area(lm_init_begin, lm_init_end,
POISON_FREE_INITMEM, "unused kernel");
/*
* Unmap the __init region but leave the VM area in place. This
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 269f2f63ab7d..6cc0b7e7eb03 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -3,10 +3,22 @@
#include <linux/mm.h>
#include <linux/io.h>
+static ioremap_prot_hook_t ioremap_prot_hook;
+
+int arm64_ioremap_prot_hook_register(ioremap_prot_hook_t hook)
+{
+ if (WARN_ON(ioremap_prot_hook))
+ return -EBUSY;
+
+ ioremap_prot_hook = hook;
+ return 0;
+}
+
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot)
{
unsigned long last_addr = phys_addr + size - 1;
+ pgprot_t pgprot = __pgprot(prot);
/* Don't allow outside PHYS_MASK */
if (last_addr & ~PHYS_MASK)
@@ -16,7 +28,16 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
return NULL;
- return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
+ /*
+ * If a hook is registered (e.g. for confidential computing
+ * purposes), call that now and barf if it fails.
+ */
+ if (unlikely(ioremap_prot_hook) &&
+ WARN_ON(ioremap_prot_hook(phys_addr, size, &pgprot))) {
+ return NULL;
+ }
+
+ return generic_ioremap_prot(phys_addr, size, pgprot);
}
EXPORT_SYMBOL(ioremap_prot);
diff --git a/arch/arm64/mm/mem_encrypt.c b/arch/arm64/mm/mem_encrypt.c
new file mode 100644
index 000000000000..ee3c0ab04384
--- /dev/null
+++ b/arch/arm64/mm/mem_encrypt.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of the memory encryption/decryption API.
+ *
+ * Since the low-level details of the operation depend on the
+ * Confidential Computing environment (e.g. pKVM, CCA, ...), this just
+ * acts as a top-level dispatcher to whatever hooks may have been
+ * registered.
+ *
+ * Author: Will Deacon <will@kernel.org>
+ * Copyright (C) 2024 Google LLC
+ *
+ * "Hello, boils and ghouls!"
+ */
+
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+
+#include <asm/mem_encrypt.h>
+
+static const struct arm64_mem_crypt_ops *crypt_ops;
+
+int arm64_mem_crypt_ops_register(const struct arm64_mem_crypt_ops *ops)
+{
+ if (WARN_ON(crypt_ops))
+ return -EBUSY;
+
+ crypt_ops = ops;
+ return 0;
+}
+
+int set_memory_encrypted(unsigned long addr, int numpages)
+{
+ if (likely(!crypt_ops) || WARN_ON(!PAGE_ALIGNED(addr)))
+ return 0;
+
+ return crypt_ops->encrypt(addr, numpages);
+}
+EXPORT_SYMBOL_GPL(set_memory_encrypted);
+
+int set_memory_decrypted(unsigned long addr, int numpages)
+{
+ if (likely(!crypt_ops) || WARN_ON(!PAGE_ALIGNED(addr)))
+ return 0;
+
+ return crypt_ops->decrypt(addr, numpages);
+}
+EXPORT_SYMBOL_GPL(set_memory_decrypted);
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 642bdf908b22..7e3ad97e27d8 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -102,6 +102,17 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
if (vm_flags & VM_MTE)
prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
+#ifdef CONFIG_ARCH_HAS_PKEYS
+ if (system_supports_poe()) {
+ if (vm_flags & VM_PKEY_BIT0)
+ prot |= PTE_PO_IDX_0;
+ if (vm_flags & VM_PKEY_BIT1)
+ prot |= PTE_PO_IDX_1;
+ if (vm_flags & VM_PKEY_BIT2)
+ prot |= PTE_PO_IDX_2;
+ }
+#endif
+
return __pgprot(prot);
}
EXPORT_SYMBOL(vm_get_page_prot);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 353ea5dc32b8..e55b02fbddc8 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -25,6 +25,7 @@
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
#include <linux/kfence.h>
+#include <linux/pkeys.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -1549,3 +1550,47 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp)
cpu_uninstall_idmap();
}
+
+#ifdef CONFIG_ARCH_HAS_PKEYS
+int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val)
+{
+ u64 new_por = POE_RXW;
+ u64 old_por;
+ u64 pkey_shift;
+
+ if (!system_supports_poe())
+ return -ENOSPC;
+
+ /*
+ * This code should only be called with valid 'pkey'
+ * values originating from in-kernel users. Complain
+ * if a bad value is observed.
+ */
+ if (WARN_ON_ONCE(pkey >= arch_max_pkey()))
+ return -EINVAL;
+
+ /* Set the bits we need in POR: */
+ new_por = POE_RXW;
+ if (init_val & PKEY_DISABLE_WRITE)
+ new_por &= ~POE_W;
+ if (init_val & PKEY_DISABLE_ACCESS)
+ new_por &= ~POE_RW;
+ if (init_val & PKEY_DISABLE_READ)
+ new_por &= ~POE_R;
+ if (init_val & PKEY_DISABLE_EXECUTE)
+ new_por &= ~POE_X;
+
+ /* Shift the bits in to the correct place in POR for pkey: */
+ pkey_shift = pkey * POR_BITS_PER_PKEY;
+ new_por <<= pkey_shift;
+
+ /* Get old POR and mask off any old bits in place: */
+ old_por = read_sysreg_s(SYS_POR_EL0);
+ old_por &= ~(POE_MASK << pkey_shift);
+
+ /* Write old part along with new part: */
+ write_sysreg_s(old_por | new_por, SYS_POR_EL0);
+
+ return 0;
+}
+#endif
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index f4bc6c5bac06..8abdc7fed321 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -36,8 +36,6 @@
#define TCR_KASLR_FLAGS 0
#endif
-#define TCR_SMP_FLAGS TCR_SHARED
-
/* PTWs cacheable, inner/outer WBWA */
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
@@ -469,7 +467,7 @@ SYM_FUNC_START(__cpu_setup)
tcr .req x16
mov_q mair, MAIR_EL1_SET
mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \
- TCR_SMP_FLAGS | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
+ TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
tcr_clear_errata_bits tcr, x9, x5
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index 6986827e0d64..264c5f9b97d8 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -38,33 +38,7 @@
seq_printf(m, fmt); \
})
-/*
- * The page dumper groups page table entries of the same type into a single
- * description. It uses pg_state to track the range information while
- * iterating over the pte entries. When the continuity is broken it then
- * dumps out a description of the range.
- */
-struct pg_state {
- struct ptdump_state ptdump;
- struct seq_file *seq;
- const struct addr_marker *marker;
- const struct mm_struct *mm;
- unsigned long start_address;
- int level;
- u64 current_prot;
- bool check_wx;
- unsigned long wx_pages;
- unsigned long uxn_pages;
-};
-
-struct prot_bits {
- u64 mask;
- u64 val;
- const char *set;
- const char *clear;
-};
-
-static const struct prot_bits pte_bits[] = {
+static const struct ptdump_prot_bits pte_bits[] = {
{
.mask = PTE_VALID,
.val = PTE_VALID,
@@ -143,14 +117,7 @@ static const struct prot_bits pte_bits[] = {
}
};
-struct pg_level {
- const struct prot_bits *bits;
- char name[4];
- int num;
- u64 mask;
-};
-
-static struct pg_level pg_level[] __ro_after_init = {
+static struct ptdump_pg_level kernel_pg_levels[] __ro_after_init = {
{ /* pgd */
.name = "PGD",
.bits = pte_bits,
@@ -174,7 +141,7 @@ static struct pg_level pg_level[] __ro_after_init = {
},
};
-static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
+static void dump_prot(struct ptdump_pg_state *st, const struct ptdump_prot_bits *bits,
size_t num)
{
unsigned i;
@@ -192,7 +159,7 @@ static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
}
}
-static void note_prot_uxn(struct pg_state *st, unsigned long addr)
+static void note_prot_uxn(struct ptdump_pg_state *st, unsigned long addr)
{
if (!st->check_wx)
return;
@@ -206,7 +173,7 @@ static void note_prot_uxn(struct pg_state *st, unsigned long addr)
st->uxn_pages += (addr - st->start_address) / PAGE_SIZE;
}
-static void note_prot_wx(struct pg_state *st, unsigned long addr)
+static void note_prot_wx(struct ptdump_pg_state *st, unsigned long addr)
{
if (!st->check_wx)
return;
@@ -221,16 +188,17 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
}
-static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
- u64 val)
+void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
+ u64 val)
{
- struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
+ struct ptdump_pg_state *st = container_of(pt_st, struct ptdump_pg_state, ptdump);
+ struct ptdump_pg_level *pg_level = st->pg_level;
static const char units[] = "KMGTPE";
u64 prot = 0;
/* check if the current level has been folded dynamically */
- if ((level == 1 && mm_p4d_folded(st->mm)) ||
- (level == 2 && mm_pud_folded(st->mm)))
+ if (st->mm && ((level == 1 && mm_p4d_folded(st->mm)) ||
+ (level == 2 && mm_pud_folded(st->mm))))
level = 0;
if (level >= 0)
@@ -286,15 +254,16 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
{
unsigned long end = ~0UL;
- struct pg_state st;
+ struct ptdump_pg_state st;
if (info->base_addr < TASK_SIZE_64)
end = TASK_SIZE_64;
- st = (struct pg_state){
+ st = (struct ptdump_pg_state){
.seq = s,
.marker = info->markers,
.mm = info->mm,
+ .pg_level = &kernel_pg_levels[0],
.level = -1,
.ptdump = {
.note_page = note_page,
@@ -312,10 +281,10 @@ static void __init ptdump_initialize(void)
{
unsigned i, j;
- for (i = 0; i < ARRAY_SIZE(pg_level); i++)
- if (pg_level[i].bits)
- for (j = 0; j < pg_level[i].num; j++)
- pg_level[i].mask |= pg_level[i].bits[j].mask;
+ for (i = 0; i < ARRAY_SIZE(kernel_pg_levels); i++)
+ if (kernel_pg_levels[i].bits)
+ for (j = 0; j < kernel_pg_levels[i].num; j++)
+ kernel_pg_levels[i].mask |= kernel_pg_levels[i].bits[j].mask;
}
static struct ptdump_info kernel_ptdump_info __ro_after_init = {
@@ -324,12 +293,13 @@ static struct ptdump_info kernel_ptdump_info __ro_after_init = {
bool ptdump_check_wx(void)
{
- struct pg_state st = {
+ struct ptdump_pg_state st = {
.seq = NULL,
.marker = (struct addr_marker[]) {
{ 0, NULL},
{ -1, NULL},
},
+ .pg_level = &kernel_pg_levels[0],
.level = -1,
.check_wx = true,
.ptdump = {
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
index 5139a28130c0..0f7b484cb2ff 100644
--- a/arch/arm64/mm/trans_pgd.c
+++ b/arch/arm64/mm/trans_pgd.c
@@ -42,14 +42,16 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
* the temporary mappings we use during restore.
*/
__set_pte(dst_ptep, pte_mkwrite_novma(pte));
- } else if ((debug_pagealloc_enabled() ||
- is_kfence_address((void *)addr)) && !pte_none(pte)) {
+ } else if (!pte_none(pte)) {
/*
* debug_pagealloc will removed the PTE_VALID bit if
* the page isn't in use by the resume kernel. It may have
* been in use by the original kernel, in which case we need
* to put it back in our copy to do the restore.
*
+ * Other cases include kfence / vmalloc / memfd_secret which
+ * may call `set_direct_map_invalid_noflush()`.
+ *
* Before marking this entry valid, check the pfn should
* be mapped.
*/
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index dd0bb069df4b..8bbd0b20136a 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -26,9 +26,8 @@
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
-#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
+#define TCCNT_PTR (MAX_BPF_JIT_REG + 2)
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
-#define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
#define ARENA_VM_START (MAX_BPF_JIT_REG + 5)
#define check_imm(bits, imm) do { \
@@ -63,11 +62,10 @@ static const int bpf2a64[] = {
[TMP_REG_1] = A64_R(10),
[TMP_REG_2] = A64_R(11),
[TMP_REG_3] = A64_R(12),
- /* tail_call_cnt */
- [TCALL_CNT] = A64_R(26),
+ /* tail_call_cnt_ptr */
+ [TCCNT_PTR] = A64_R(26),
/* temporary register for blinding constants */
[BPF_REG_AX] = A64_R(9),
- [FP_BOTTOM] = A64_R(27),
/* callee saved register for kern_vm_start address */
[ARENA_VM_START] = A64_R(28),
};
@@ -78,11 +76,15 @@ struct jit_ctx {
int epilogue_offset;
int *offset;
int exentry_idx;
+ int nr_used_callee_reg;
+ u8 used_callee_reg[8]; /* r6~r9, fp, arena_vm_start */
__le32 *image;
__le32 *ro_image;
u32 stack_size;
- int fpb_offset;
u64 user_vm_start;
+ u64 arena_vm_start;
+ bool fp_used;
+ bool write;
};
struct bpf_plt {
@@ -96,7 +98,7 @@ struct bpf_plt {
static inline void emit(const u32 insn, struct jit_ctx *ctx)
{
- if (ctx->image != NULL)
+ if (ctx->image != NULL && ctx->write)
ctx->image[ctx->idx] = cpu_to_le32(insn);
ctx->idx++;
@@ -181,14 +183,47 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
}
}
-static inline void emit_call(u64 target, struct jit_ctx *ctx)
+static bool should_emit_indirect_call(long target, const struct jit_ctx *ctx)
+{
+ long offset;
+
+ /* when ctx->ro_image is not allocated or the target is unknown,
+ * emit indirect call
+ */
+ if (!ctx->ro_image || !target)
+ return true;
+
+ offset = target - (long)&ctx->ro_image[ctx->idx];
+ return offset < -SZ_128M || offset >= SZ_128M;
+}
+
+static void emit_direct_call(u64 target, struct jit_ctx *ctx)
{
- u8 tmp = bpf2a64[TMP_REG_1];
+ u32 insn;
+ unsigned long pc;
+
+ pc = (unsigned long)&ctx->ro_image[ctx->idx];
+ insn = aarch64_insn_gen_branch_imm(pc, target, AARCH64_INSN_BRANCH_LINK);
+ emit(insn, ctx);
+}
+
+static void emit_indirect_call(u64 target, struct jit_ctx *ctx)
+{
+ u8 tmp;
+ tmp = bpf2a64[TMP_REG_1];
emit_addr_mov_i64(tmp, target, ctx);
emit(A64_BLR(tmp), ctx);
}
+static void emit_call(u64 target, struct jit_ctx *ctx)
+{
+ if (should_emit_indirect_call((long)target, ctx))
+ emit_indirect_call(target, ctx);
+ else
+ emit_direct_call(target, ctx);
+}
+
static inline int bpf2a64_offset(int bpf_insn, int off,
const struct jit_ctx *ctx)
{
@@ -273,21 +308,143 @@ static bool is_lsi_offset(int offset, int scale)
return true;
}
-/* generated prologue:
+/* generated main prog prologue:
* bti c // if CONFIG_ARM64_BTI_KERNEL
* mov x9, lr
* nop // POKE_OFFSET
* paciasp // if CONFIG_ARM64_PTR_AUTH_KERNEL
* stp x29, lr, [sp, #-16]!
* mov x29, sp
- * stp x19, x20, [sp, #-16]!
- * stp x21, x22, [sp, #-16]!
- * stp x25, x26, [sp, #-16]!
- * stp x27, x28, [sp, #-16]!
- * mov x25, sp
- * mov tcc, #0
+ * stp xzr, x26, [sp, #-16]!
+ * mov x26, sp
* // PROLOGUE_OFFSET
+ * // save callee-saved registers
*/
+static void prepare_bpf_tail_call_cnt(struct jit_ctx *ctx)
+{
+ const bool is_main_prog = !bpf_is_subprog(ctx->prog);
+ const u8 ptr = bpf2a64[TCCNT_PTR];
+
+ if (is_main_prog) {
+ /* Initialize tail_call_cnt. */
+ emit(A64_PUSH(A64_ZR, ptr, A64_SP), ctx);
+ emit(A64_MOV(1, ptr, A64_SP), ctx);
+ } else
+ emit(A64_PUSH(ptr, ptr, A64_SP), ctx);
+}
+
+static void find_used_callee_regs(struct jit_ctx *ctx)
+{
+ int i;
+ const struct bpf_prog *prog = ctx->prog;
+ const struct bpf_insn *insn = &prog->insnsi[0];
+ int reg_used = 0;
+
+ for (i = 0; i < prog->len; i++, insn++) {
+ if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
+ reg_used |= 1;
+
+ if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
+ reg_used |= 2;
+
+ if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
+ reg_used |= 4;
+
+ if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
+ reg_used |= 8;
+
+ if (insn->dst_reg == BPF_REG_FP || insn->src_reg == BPF_REG_FP) {
+ ctx->fp_used = true;
+ reg_used |= 16;
+ }
+ }
+
+ i = 0;
+ if (reg_used & 1)
+ ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_6];
+
+ if (reg_used & 2)
+ ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_7];
+
+ if (reg_used & 4)
+ ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_8];
+
+ if (reg_used & 8)
+ ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_9];
+
+ if (reg_used & 16)
+ ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_FP];
+
+ if (ctx->arena_vm_start)
+ ctx->used_callee_reg[i++] = bpf2a64[ARENA_VM_START];
+
+ ctx->nr_used_callee_reg = i;
+}
+
+/* Save callee-saved registers */
+static void push_callee_regs(struct jit_ctx *ctx)
+{
+ int reg1, reg2, i;
+
+ /*
+ * Program acting as exception boundary should save all ARM64
+ * Callee-saved registers as the exception callback needs to recover
+ * all ARM64 Callee-saved registers in its epilogue.
+ */
+ if (ctx->prog->aux->exception_boundary) {
+ emit(A64_PUSH(A64_R(19), A64_R(20), A64_SP), ctx);
+ emit(A64_PUSH(A64_R(21), A64_R(22), A64_SP), ctx);
+ emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx);
+ emit(A64_PUSH(A64_R(25), A64_R(26), A64_SP), ctx);
+ emit(A64_PUSH(A64_R(27), A64_R(28), A64_SP), ctx);
+ } else {
+ find_used_callee_regs(ctx);
+ for (i = 0; i + 1 < ctx->nr_used_callee_reg; i += 2) {
+ reg1 = ctx->used_callee_reg[i];
+ reg2 = ctx->used_callee_reg[i + 1];
+ emit(A64_PUSH(reg1, reg2, A64_SP), ctx);
+ }
+ if (i < ctx->nr_used_callee_reg) {
+ reg1 = ctx->used_callee_reg[i];
+ /* keep SP 16-byte aligned */
+ emit(A64_PUSH(reg1, A64_ZR, A64_SP), ctx);
+ }
+ }
+}
+
+/* Restore callee-saved registers */
+static void pop_callee_regs(struct jit_ctx *ctx)
+{
+ struct bpf_prog_aux *aux = ctx->prog->aux;
+ int reg1, reg2, i;
+
+ /*
+ * Program acting as exception boundary pushes R23 and R24 in addition
+ * to BPF callee-saved registers. Exception callback uses the boundary
+ * program's stack frame, so recover these extra registers in the above
+ * two cases.
+ */
+ if (aux->exception_boundary || aux->exception_cb) {
+ emit(A64_POP(A64_R(27), A64_R(28), A64_SP), ctx);
+ emit(A64_POP(A64_R(25), A64_R(26), A64_SP), ctx);
+ emit(A64_POP(A64_R(23), A64_R(24), A64_SP), ctx);
+ emit(A64_POP(A64_R(21), A64_R(22), A64_SP), ctx);
+ emit(A64_POP(A64_R(19), A64_R(20), A64_SP), ctx);
+ } else {
+ i = ctx->nr_used_callee_reg - 1;
+ if (ctx->nr_used_callee_reg % 2 != 0) {
+ reg1 = ctx->used_callee_reg[i];
+ emit(A64_POP(reg1, A64_ZR, A64_SP), ctx);
+ i--;
+ }
+ while (i > 0) {
+ reg1 = ctx->used_callee_reg[i - 1];
+ reg2 = ctx->used_callee_reg[i];
+ emit(A64_POP(reg1, reg2, A64_SP), ctx);
+ i -= 2;
+ }
+ }
+}
#define BTI_INSNS (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) ? 1 : 0)
#define PAC_INSNS (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) ? 1 : 0)
@@ -296,20 +453,13 @@ static bool is_lsi_offset(int offset, int scale)
#define POKE_OFFSET (BTI_INSNS + 1)
/* Tail call offset to jump into */
-#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
+#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 4)
-static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
- bool is_exception_cb, u64 arena_vm_start)
+static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
{
const struct bpf_prog *prog = ctx->prog;
const bool is_main_prog = !bpf_is_subprog(prog);
- const u8 r6 = bpf2a64[BPF_REG_6];
- const u8 r7 = bpf2a64[BPF_REG_7];
- const u8 r8 = bpf2a64[BPF_REG_8];
- const u8 r9 = bpf2a64[BPF_REG_9];
const u8 fp = bpf2a64[BPF_REG_FP];
- const u8 tcc = bpf2a64[TCALL_CNT];
- const u8 fpb = bpf2a64[FP_BOTTOM];
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
const int idx0 = ctx->idx;
int cur_offset;
@@ -348,19 +498,28 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
emit(A64_NOP, ctx);
- if (!is_exception_cb) {
+ if (!prog->aux->exception_cb) {
/* Sign lr */
if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
emit(A64_PACIASP, ctx);
+
/* Save FP and LR registers to stay align with ARM64 AAPCS */
emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
emit(A64_MOV(1, A64_FP, A64_SP), ctx);
- /* Save callee-saved registers */
- emit(A64_PUSH(r6, r7, A64_SP), ctx);
- emit(A64_PUSH(r8, r9, A64_SP), ctx);
- emit(A64_PUSH(fp, tcc, A64_SP), ctx);
- emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx);
+ prepare_bpf_tail_call_cnt(ctx);
+
+ if (!ebpf_from_cbpf && is_main_prog) {
+ cur_offset = ctx->idx - idx0;
+ if (cur_offset != PROLOGUE_OFFSET) {
+ pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
+ cur_offset, PROLOGUE_OFFSET);
+ return -1;
+ }
+ /* BTI landing pad for the tail call, done with a BR */
+ emit_bti(A64_BTI_J, ctx);
+ }
+ push_callee_regs(ctx);
} else {
/*
* Exception callback receives FP of Main Program as third
@@ -372,58 +531,28 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
* callee-saved registers. The exception callback will not push
* anything and re-use the main program's stack.
*
- * 10 registers are on the stack
+ * 12 registers are on the stack
*/
- emit(A64_SUB_I(1, A64_SP, A64_FP, 80), ctx);
+ emit(A64_SUB_I(1, A64_SP, A64_FP, 96), ctx);
}
- /* Set up BPF prog stack base register */
- emit(A64_MOV(1, fp, A64_SP), ctx);
-
- if (!ebpf_from_cbpf && is_main_prog) {
- /* Initialize tail_call_cnt */
- emit(A64_MOVZ(1, tcc, 0, 0), ctx);
-
- cur_offset = ctx->idx - idx0;
- if (cur_offset != PROLOGUE_OFFSET) {
- pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
- cur_offset, PROLOGUE_OFFSET);
- return -1;
- }
-
- /* BTI landing pad for the tail call, done with a BR */
- emit_bti(A64_BTI_J, ctx);
- }
-
- /*
- * Program acting as exception boundary should save all ARM64
- * Callee-saved registers as the exception callback needs to recover
- * all ARM64 Callee-saved registers in its epilogue.
- */
- if (prog->aux->exception_boundary) {
- /*
- * As we are pushing two more registers, BPF_FP should be moved
- * 16 bytes
- */
- emit(A64_SUB_I(1, fp, fp, 16), ctx);
- emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx);
- }
-
- emit(A64_SUB_I(1, fpb, fp, ctx->fpb_offset), ctx);
+ if (ctx->fp_used)
+ /* Set up BPF prog stack base register */
+ emit(A64_MOV(1, fp, A64_SP), ctx);
/* Stack must be multiples of 16B */
ctx->stack_size = round_up(prog->aux->stack_depth, 16);
/* Set up function call stack */
- emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+ if (ctx->stack_size)
+ emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
- if (arena_vm_start)
- emit_a64_mov_i64(arena_vm_base, arena_vm_start, ctx);
+ if (ctx->arena_vm_start)
+ emit_a64_mov_i64(arena_vm_base, ctx->arena_vm_start, ctx);
return 0;
}
-static int out_offset = -1; /* initialized on the first pass of build_body() */
static int emit_bpf_tail_call(struct jit_ctx *ctx)
{
/* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
@@ -432,11 +561,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 prg = bpf2a64[TMP_REG_2];
- const u8 tcc = bpf2a64[TCALL_CNT];
- const int idx0 = ctx->idx;
-#define cur_offset (ctx->idx - idx0)
-#define jmp_offset (out_offset - (cur_offset))
+ const u8 tcc = bpf2a64[TMP_REG_3];
+ const u8 ptr = bpf2a64[TCCNT_PTR];
size_t off;
+ __le32 *branch1 = NULL;
+ __le32 *branch2 = NULL;
+ __le32 *branch3 = NULL;
/* if (index >= array->map.max_entries)
* goto out;
@@ -446,16 +576,20 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
emit(A64_LDR32(tmp, r2, tmp), ctx);
emit(A64_MOV(0, r3, r3), ctx);
emit(A64_CMP(0, r3, tmp), ctx);
- emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
+ branch1 = ctx->image + ctx->idx;
+ emit(A64_NOP, ctx);
/*
- * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
+ * if ((*tail_call_cnt_ptr) >= MAX_TAIL_CALL_CNT)
* goto out;
- * tail_call_cnt++;
*/
emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
+ emit(A64_LDR64I(tcc, ptr, 0), ctx);
emit(A64_CMP(1, tcc, tmp), ctx);
- emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
+ branch2 = ctx->image + ctx->idx;
+ emit(A64_NOP, ctx);
+
+ /* (*tail_call_cnt_ptr)++; */
emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
/* prog = array->ptrs[index];
@@ -467,27 +601,37 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
emit(A64_ADD(1, tmp, r2, tmp), ctx);
emit(A64_LSL(1, prg, r3, 3), ctx);
emit(A64_LDR64(prg, tmp, prg), ctx);
- emit(A64_CBZ(1, prg, jmp_offset), ctx);
+ branch3 = ctx->image + ctx->idx;
+ emit(A64_NOP, ctx);
+
+ /* Update tail_call_cnt if the slot is populated. */
+ emit(A64_STR64I(tcc, ptr, 0), ctx);
+
+ /* restore SP */
+ if (ctx->stack_size)
+ emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+
+ pop_callee_regs(ctx);
/* goto *(prog->bpf_func + prologue_offset); */
off = offsetof(struct bpf_prog, bpf_func);
emit_a64_mov_i64(tmp, off, ctx);
emit(A64_LDR64(tmp, prg, tmp), ctx);
emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
- emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
emit(A64_BR(tmp), ctx);
- /* out: */
- if (out_offset == -1)
- out_offset = cur_offset;
- if (cur_offset != out_offset) {
- pr_err_once("tail_call out_offset = %d, expected %d!\n",
- cur_offset, out_offset);
- return -1;
+ if (ctx->image) {
+ off = &ctx->image[ctx->idx] - branch1;
+ *branch1 = cpu_to_le32(A64_B_(A64_COND_CS, off));
+
+ off = &ctx->image[ctx->idx] - branch2;
+ *branch2 = cpu_to_le32(A64_B_(A64_COND_CS, off));
+
+ off = &ctx->image[ctx->idx] - branch3;
+ *branch3 = cpu_to_le32(A64_CBZ(1, prg, off));
}
+
return 0;
-#undef cur_offset
-#undef jmp_offset
}
#ifdef CONFIG_ARM64_LSE_ATOMICS
@@ -713,36 +857,18 @@ static void build_plt(struct jit_ctx *ctx)
plt->target = (u64)&dummy_tramp;
}
-static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)
+static void build_epilogue(struct jit_ctx *ctx)
{
const u8 r0 = bpf2a64[BPF_REG_0];
- const u8 r6 = bpf2a64[BPF_REG_6];
- const u8 r7 = bpf2a64[BPF_REG_7];
- const u8 r8 = bpf2a64[BPF_REG_8];
- const u8 r9 = bpf2a64[BPF_REG_9];
- const u8 fp = bpf2a64[BPF_REG_FP];
- const u8 fpb = bpf2a64[FP_BOTTOM];
+ const u8 ptr = bpf2a64[TCCNT_PTR];
/* We're done with BPF stack */
- emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+ if (ctx->stack_size)
+ emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
- /*
- * Program acting as exception boundary pushes R23 and R24 in addition
- * to BPF callee-saved registers. Exception callback uses the boundary
- * program's stack frame, so recover these extra registers in the above
- * two cases.
- */
- if (ctx->prog->aux->exception_boundary || is_exception_cb)
- emit(A64_POP(A64_R(23), A64_R(24), A64_SP), ctx);
+ pop_callee_regs(ctx);
- /* Restore x27 and x28 */
- emit(A64_POP(fpb, A64_R(28), A64_SP), ctx);
- /* Restore fs (x25) and x26 */
- emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
-
- /* Restore callee-saved register */
- emit(A64_POP(r8, r9, A64_SP), ctx);
- emit(A64_POP(r6, r7, A64_SP), ctx);
+ emit(A64_POP(A64_ZR, ptr, A64_SP), ctx);
/* Restore FP/LR registers */
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
@@ -862,7 +988,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
const u8 fp = bpf2a64[BPF_REG_FP];
- const u8 fpb = bpf2a64[FP_BOTTOM];
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
const s16 off = insn->off;
const s32 imm = insn->imm;
@@ -1314,9 +1439,9 @@ emit_cond_jmp:
emit(A64_ADD(1, tmp2, src, arena_vm_base), ctx);
src = tmp2;
}
- if (ctx->fpb_offset > 0 && src == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
- src_adj = fpb;
- off_adj = off + ctx->fpb_offset;
+ if (src == fp) {
+ src_adj = A64_SP;
+ off_adj = off + ctx->stack_size;
} else {
src_adj = src;
off_adj = off;
@@ -1407,9 +1532,9 @@ emit_cond_jmp:
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
dst = tmp2;
}
- if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
- dst_adj = fpb;
- off_adj = off + ctx->fpb_offset;
+ if (dst == fp) {
+ dst_adj = A64_SP;
+ off_adj = off + ctx->stack_size;
} else {
dst_adj = dst;
off_adj = off;
@@ -1469,9 +1594,9 @@ emit_cond_jmp:
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
dst = tmp2;
}
- if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
- dst_adj = fpb;
- off_adj = off + ctx->fpb_offset;
+ if (dst == fp) {
+ dst_adj = A64_SP;
+ off_adj = off + ctx->stack_size;
} else {
dst_adj = dst;
off_adj = off;
@@ -1540,79 +1665,6 @@ emit_cond_jmp:
return 0;
}
-/*
- * Return 0 if FP may change at runtime, otherwise find the minimum negative
- * offset to FP, converts it to positive number, and align down to 8 bytes.
- */
-static int find_fpb_offset(struct bpf_prog *prog)
-{
- int i;
- int offset = 0;
-
- for (i = 0; i < prog->len; i++) {
- const struct bpf_insn *insn = &prog->insnsi[i];
- const u8 class = BPF_CLASS(insn->code);
- const u8 mode = BPF_MODE(insn->code);
- const u8 src = insn->src_reg;
- const u8 dst = insn->dst_reg;
- const s32 imm = insn->imm;
- const s16 off = insn->off;
-
- switch (class) {
- case BPF_STX:
- case BPF_ST:
- /* fp holds atomic operation result */
- if (class == BPF_STX && mode == BPF_ATOMIC &&
- ((imm == BPF_XCHG ||
- imm == (BPF_FETCH | BPF_ADD) ||
- imm == (BPF_FETCH | BPF_AND) ||
- imm == (BPF_FETCH | BPF_XOR) ||
- imm == (BPF_FETCH | BPF_OR)) &&
- src == BPF_REG_FP))
- return 0;
-
- if (mode == BPF_MEM && dst == BPF_REG_FP &&
- off < offset)
- offset = insn->off;
- break;
-
- case BPF_JMP32:
- case BPF_JMP:
- break;
-
- case BPF_LDX:
- case BPF_LD:
- /* fp holds load result */
- if (dst == BPF_REG_FP)
- return 0;
-
- if (class == BPF_LDX && mode == BPF_MEM &&
- src == BPF_REG_FP && off < offset)
- offset = off;
- break;
-
- case BPF_ALU:
- case BPF_ALU64:
- default:
- /* fp holds ALU result */
- if (dst == BPF_REG_FP)
- return 0;
- }
- }
-
- if (offset < 0) {
- /*
- * safely be converted to a positive 'int', since insn->off
- * is 's16'
- */
- offset = -offset;
- /* align down to 8 bytes */
- offset = ALIGN_DOWN(offset, 8);
- }
-
- return offset;
-}
-
static int build_body(struct jit_ctx *ctx, bool extra_pass)
{
const struct bpf_prog *prog = ctx->prog;
@@ -1631,13 +1683,11 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
- if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
+ ctx->offset[i] = ctx->idx;
ret = build_insn(insn, ctx, extra_pass);
if (ret > 0) {
i++;
- if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
+ ctx->offset[i] = ctx->idx;
continue;
}
if (ret)
@@ -1648,8 +1698,7 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
* the last element with the offset after the last
* instruction (end of program)
*/
- if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
+ ctx->offset[i] = ctx->idx;
return 0;
}
@@ -1701,9 +1750,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
- u64 arena_vm_start;
u8 *image_ptr;
u8 *ro_image_ptr;
+ int body_idx;
+ int exentry_idx;
if (!prog->jit_requested)
return orig_prog;
@@ -1719,7 +1769,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp;
}
- arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
@@ -1749,17 +1798,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto out_off;
}
- ctx.fpb_offset = find_fpb_offset(prog);
ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
+ ctx.arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
- /*
- * 1. Initial fake pass to compute ctx->idx and ctx->offset.
+ /* Pass 1: Estimate the maximum image size.
*
* BPF line info needs ctx->offset[i] to be the offset of
* instruction[i] in jited image, so build prologue first.
*/
- if (build_prologue(&ctx, was_classic, prog->aux->exception_cb,
- arena_vm_start)) {
+ if (build_prologue(&ctx, was_classic)) {
prog = orig_prog;
goto out_off;
}
@@ -1770,14 +1817,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
ctx.epilogue_offset = ctx.idx;
- build_epilogue(&ctx, prog->aux->exception_cb);
+ build_epilogue(&ctx);
build_plt(&ctx);
extable_align = __alignof__(struct exception_table_entry);
extable_size = prog->aux->num_exentries *
sizeof(struct exception_table_entry);
- /* Now we know the actual image size. */
+ /* Now we know the maximum image size. */
prog_size = sizeof(u32) * ctx.idx;
/* also allocate space for plt target */
extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
@@ -1790,7 +1837,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto out_off;
}
- /* 2. Now, the actual pass. */
+ /* Pass 2: Determine jited position and result for each instruction */
/*
* Use the image(RW) for writing the JITed instructions. But also save
@@ -1806,30 +1853,56 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
skip_init_ctx:
ctx.idx = 0;
ctx.exentry_idx = 0;
+ ctx.write = true;
+
+ build_prologue(&ctx, was_classic);
- build_prologue(&ctx, was_classic, prog->aux->exception_cb, arena_vm_start);
+ /* Record exentry_idx and body_idx before first build_body */
+ exentry_idx = ctx.exentry_idx;
+ body_idx = ctx.idx;
+ /* Dont write body instructions to memory for now */
+ ctx.write = false;
if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
goto out_free_hdr;
}
- build_epilogue(&ctx, prog->aux->exception_cb);
+ ctx.epilogue_offset = ctx.idx;
+ ctx.exentry_idx = exentry_idx;
+ ctx.idx = body_idx;
+ ctx.write = true;
+
+ /* Pass 3: Adjust jump offset and write final image */
+ if (build_body(&ctx, extra_pass) ||
+ WARN_ON_ONCE(ctx.idx != ctx.epilogue_offset)) {
+ prog = orig_prog;
+ goto out_free_hdr;
+ }
+
+ build_epilogue(&ctx);
build_plt(&ctx);
- /* 3. Extra pass to validate JITed code. */
+ /* Extra pass to validate JITed code. */
if (validate_ctx(&ctx)) {
prog = orig_prog;
goto out_free_hdr;
}
+ /* update the real prog size */
+ prog_size = sizeof(u32) * ctx.idx;
+
/* And we're done. */
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
if (!prog->is_func || extra_pass) {
- if (extra_pass && ctx.idx != jit_data->ctx.idx) {
- pr_err_once("multi-func JIT bug %d != %d\n",
+ /* The jited image may shrink since the jited result for
+ * BPF_CALL to subprog may be changed from indirect call
+ * to direct call.
+ */
+ if (extra_pass && ctx.idx > jit_data->ctx.idx) {
+ pr_err_once("multi-func JIT bug %d > %d\n",
ctx.idx, jit_data->ctx.idx);
prog->bpf_func = NULL;
prog->jited = 0;
@@ -2300,6 +2373,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
.image = image,
.ro_image = ro_image,
.idx = 0,
+ .write = true,
};
nregs = btf_func_model_nregs(m);
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index ac3429d892b9..eedb5acc21ed 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -45,6 +45,7 @@ HAS_MOPS
HAS_NESTED_VIRT
HAS_PAN
HAS_S1PIE
+HAS_S1POE
HAS_RAS_EXTN
HAS_RNG
HAS_SB
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 7ceaa1e0b4bc..8d637ac4b7c6 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -2029,6 +2029,31 @@ Sysreg FAR_EL1 3 0 6 0 0
Field 63:0 ADDR
EndSysreg
+Sysreg PMICNTR_EL0 3 3 9 4 0
+Field 63:0 ICNT
+EndSysreg
+
+Sysreg PMICFILTR_EL0 3 3 9 6 0
+Res0 63:59
+Field 58 SYNC
+Field 57:56 VS
+Res0 55:32
+Field 31 P
+Field 30 U
+Field 29 NSK
+Field 28 NSU
+Field 27 NSH
+Field 26 M
+Res0 25
+Field 24 SH
+Field 23 T
+Field 22 RLK
+Field 21 RLU
+Field 20 RLH
+Res0 19:16
+Field 15:0 evtCount
+EndSysreg
+
Sysreg PMSCR_EL1 3 0 9 9 0
Res0 63:8
Field 7:6 PCT
@@ -2153,6 +2178,11 @@ Field 4 P
Field 3:0 ALIGN
EndSysreg
+Sysreg PMSELR_EL0 3 3 9 12 5
+Res0 63:5
+Field 4:0 SEL
+EndSysreg
+
SysregFields CONTEXTIDR_ELx
Res0 63:32
Field 31:0 PROCID
diff --git a/arch/csky/abiv1/mmap.c b/arch/csky/abiv1/mmap.c
index 7f826331d409..1047865e82a9 100644
--- a/arch/csky/abiv1/mmap.c
+++ b/arch/csky/abiv1/mmap.c
@@ -23,7 +23,8 @@
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c
index 2ca886e4a458..5c9ef63c29f1 100644
--- a/arch/csky/kernel/vdso.c
+++ b/arch/csky/kernel/vdso.c
@@ -45,9 +45,16 @@ arch_initcall(vdso_init);
int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp)
{
+ struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long vdso_base, vdso_len;
int ret;
+ static struct vm_special_mapping vdso_mapping = {
+ .name = "[vdso]",
+ };
+ static struct vm_special_mapping vvar_mapping = {
+ .name = "[vvar]",
+ };
vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
@@ -65,22 +72,29 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
*/
mm->context.vdso = (void *)vdso_base;
- ret =
- install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+ vdso_mapping.pages = vdso_pagelist;
+ vma =
+ _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
- vdso_pagelist);
+ &vdso_mapping);
- if (unlikely(ret)) {
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
mm->context.vdso = NULL;
goto end;
}
vdso_base += (vdso_pages << PAGE_SHIFT);
- ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
- (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+ vvar_mapping.pages = &vdso_pagelist[vdso_pages];
+ vma = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ (VM_READ | VM_MAYREAD), &vvar_mapping);
- if (unlikely(ret))
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
mm->context.vdso = NULL;
+ goto end;
+ }
+ ret = 0;
end:
mmap_write_unlock(mm);
return ret;
diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c
index 2e4872d62124..8119084dc519 100644
--- a/arch/hexagon/kernel/vdso.c
+++ b/arch/hexagon/kernel/vdso.c
@@ -51,7 +51,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
int ret;
unsigned long vdso_base;
+ struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
+ static struct vm_special_mapping vdso_mapping = {
+ .name = "[vdso]",
+ };
if (mmap_write_lock_killable(mm))
return -EINTR;
@@ -66,16 +70,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
}
/* MAYWRITE to allow gdb to COW and set breakpoints. */
- ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ vdso_mapping.pages = &vdso_page;
+ vma = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- &vdso_page);
+ &vdso_mapping);
- if (ret)
+ ret = PTR_ERR(vma);
+ if (IS_ERR(vma))
goto up_fail;
mm->context.vdso = (void *)vdso_base;
-
+ ret = 0;
up_fail:
mmap_write_unlock(mm);
return ret;
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 70f169210b52..0eb0436ad4ce 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -85,6 +85,7 @@ config LOONGARCH
select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY
select GENERIC_IOREMAP if !ARCH_IOREMAP
+ select GENERIC_IRQ_MATRIX_ALLOCATOR
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
@@ -190,6 +191,7 @@ config LOONGARCH
select TRACE_IRQFLAGS_SUPPORT
select USE_PERCPU_NUMA_NODE_ID
select USER_STACKTRACE_SUPPORT
+ select VDSO_GETRANDOM
select ZONE_DMA32
config 32BIT
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index b4252c357c8e..75b366407a60 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -96,7 +96,6 @@ CONFIG_ZPOOL=y
CONFIG_ZSWAP=y
CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
CONFIG_ZBUD=y
-CONFIG_Z3FOLD=y
CONFIG_ZSMALLOC=m
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index 2bb3676429c0..5b5a6c90e6e2 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -6,8 +6,8 @@ generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += early_ioremap.h
generic-y += qrwlock.h
-generic-y += qspinlock.h
generic-y += user.h
generic-y += ioctl.h
+generic-y += mmzone.h
generic-y += statfs.h
generic-y += param.h
diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h
index 2eafe6a6aca8..16a716f88a5c 100644
--- a/arch/loongarch/include/asm/cpu-features.h
+++ b/arch/loongarch/include/asm/cpu-features.h
@@ -65,5 +65,6 @@
#define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID)
#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR)
#define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW)
+#define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT)
#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
index 48b9f7168bcc..843f9c4ec980 100644
--- a/arch/loongarch/include/asm/cpu.h
+++ b/arch/loongarch/include/asm/cpu.h
@@ -99,6 +99,7 @@ enum cpu_type_enum {
#define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */
#define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */
#define CPU_FEATURE_PTW 26 /* CPU has hardware page table walker */
+#define CPU_FEATURE_AVECINT 27 /* CPU has avec interrupt */
#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
@@ -127,5 +128,6 @@ enum cpu_type_enum {
#define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID)
#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR)
#define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW)
+#define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT)
#endif /* _ASM_CPU_H */
diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h
index 1d7feb719515..10da8d6961cb 100644
--- a/arch/loongarch/include/asm/hardirq.h
+++ b/arch/loongarch/include/asm/hardirq.h
@@ -12,12 +12,13 @@
extern void ack_bad_irq(unsigned int irq);
#define ack_bad_irq ack_bad_irq
-#define NR_IPI 3
+#define NR_IPI 4
enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CALL_FUNCTION,
IPI_IRQ_WORK,
+ IPI_CLEAR_VECTOR,
};
typedef struct {
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
index 480418bc5071..9c2ca785faa9 100644
--- a/arch/loongarch/include/asm/irq.h
+++ b/arch/loongarch/include/asm/irq.h
@@ -39,11 +39,22 @@ void spurious_interrupt(void);
#define NR_IRQS_LEGACY 16
+/*
+ * 256 Vectors Mapping for AVECINTC:
+ *
+ * 0 - 15: Mapping classic IPs, e.g. IP0-12.
+ * 16 - 255: Mapping vectors for external IRQ.
+ *
+ */
+#define NR_VECTORS 256
+#define NR_LEGACY_VECTORS 16
+#define IRQ_MATRIX_BITS NR_VECTORS
+
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
#define MAX_IO_PICS 2
-#define NR_IRQS (64 + (256 * MAX_IO_PICS))
+#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
struct acpi_vector_group {
int node;
@@ -65,7 +76,7 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS];
#define LOONGSON_LPC_LAST_IRQ (LOONGSON_LPC_IRQ_BASE + 15)
#define LOONGSON_CPU_IRQ_BASE 16
-#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 14)
+#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 15)
#define LOONGSON_PCH_IRQ_BASE 64
#define LOONGSON_PCH_ACPI_IRQ (LOONGSON_PCH_IRQ_BASE + 47)
@@ -88,20 +99,8 @@ struct acpi_madt_bio_pic;
struct acpi_madt_msi_pic;
struct acpi_madt_lpc_pic;
-int liointc_acpi_init(struct irq_domain *parent,
- struct acpi_madt_lio_pic *acpi_liointc);
-int eiointc_acpi_init(struct irq_domain *parent,
- struct acpi_madt_eio_pic *acpi_eiointc);
-
-int htvec_acpi_init(struct irq_domain *parent,
- struct acpi_madt_ht_pic *acpi_htvec);
-int pch_lpc_acpi_init(struct irq_domain *parent,
- struct acpi_madt_lpc_pic *acpi_pchlpc);
-int pch_msi_acpi_init(struct irq_domain *parent,
- struct acpi_madt_msi_pic *acpi_pchmsi);
-int pch_pic_acpi_init(struct irq_domain *parent,
- struct acpi_madt_bio_pic *acpi_pchpic);
-int find_pch_pic(u32 gsi);
+void complete_irq_moving(void);
+
struct fwnode_handle *get_pch_msi_handle(int pci_segment);
extern struct acpi_madt_lio_pic *acpi_liointc;
diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
index 724ca8b7b401..4a76ce796f1f 100644
--- a/arch/loongarch/include/asm/kvm_csr.h
+++ b/arch/loongarch/include/asm/kvm_csr.h
@@ -30,6 +30,7 @@
: [val] "+r" (__v) \
: [reg] "i" (csr) \
: "memory"); \
+ __v; \
})
#define gcsr_xchg(v, m, csr) \
@@ -181,6 +182,8 @@ __BUILD_GCSR_OP(tlbidx)
#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
+#define kvm_read_clear_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_write(0, gid))
+
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
@@ -208,4 +211,7 @@ static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
csr->csrs[gid] |= val & _mask;
}
+#define KVM_PMU_EVENT_ENABLED (CSR_PERFCTRL_PLV0 | CSR_PERFCTRL_PLV1 | \
+ CSR_PERFCTRL_PLV2 | CSR_PERFCTRL_PLV3)
+
#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index 5f0677e03817..d6bb72424027 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -30,6 +30,7 @@
#define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1)
+#define KVM_REQ_PMU KVM_ARCH_REQ(2)
#define KVM_GUESTDBG_SW_BP_MASK \
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
@@ -60,9 +61,13 @@ struct kvm_arch_memory_slot {
unsigned long flags;
};
+#define HOST_MAX_PMNUM 16
struct kvm_context {
unsigned long vpid_cache;
struct kvm_vcpu *last_vcpu;
+ /* Host PMU CSR */
+ u64 perf_ctrl[HOST_MAX_PMNUM];
+ u64 perf_cntr[HOST_MAX_PMNUM];
};
struct kvm_world_switch {
@@ -107,6 +112,8 @@ struct kvm_arch {
unsigned int root_level;
spinlock_t phyid_map_lock;
struct kvm_phyid_map *phyid_map;
+ /* Enabled PV features */
+ unsigned long pv_features;
s64 time_offset;
struct kvm_context __percpu *vmcs;
@@ -133,8 +140,15 @@ enum emulation_result {
#define KVM_LARCH_FPU (0x1 << 0)
#define KVM_LARCH_LSX (0x1 << 1)
#define KVM_LARCH_LASX (0x1 << 2)
-#define KVM_LARCH_SWCSR_LATEST (0x1 << 3)
-#define KVM_LARCH_HWCSR_USABLE (0x1 << 4)
+#define KVM_LARCH_LBT (0x1 << 3)
+#define KVM_LARCH_PMU (0x1 << 4)
+#define KVM_LARCH_SWCSR_LATEST (0x1 << 5)
+#define KVM_LARCH_HWCSR_USABLE (0x1 << 6)
+
+#define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63)
+#define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \
+ BIT(KVM_FEATURE_STEAL_TIME) | \
+ BIT(KVM_FEATURE_VIRT_EXTIOI))
struct kvm_vcpu_arch {
/*
@@ -168,10 +182,14 @@ struct kvm_vcpu_arch {
/* FPU state */
struct loongarch_fpu fpu FPU_ALIGN;
+ struct loongarch_lbt lbt;
/* CSR state */
struct loongarch_csrs *csr;
+ /* Guest max PMU CSR id */
+ int max_pmu_csrid;
+
/* GPR used as IO source/target */
u32 io_gpr;
@@ -239,6 +257,21 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
return arch->cpucfg[2] & CPUCFG2_LASX;
}
+static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch)
+{
+ return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT);
+}
+
+static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch)
+{
+ return arch->cpucfg[6] & CPUCFG6_PMP;
+}
+
+static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
+{
+ return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT;
+}
+
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h
index 43ec61589e6c..c4e84227280d 100644
--- a/arch/loongarch/include/asm/kvm_para.h
+++ b/arch/loongarch/include/asm/kvm_para.h
@@ -2,6 +2,8 @@
#ifndef _ASM_LOONGARCH_KVM_PARA_H
#define _ASM_LOONGARCH_KVM_PARA_H
+#include <uapi/asm/kvm_para.h>
+
/*
* Hypercall code field
*/
@@ -154,10 +156,20 @@ static __always_inline long kvm_hypercall5(u64 fid,
return ret;
}
+#ifdef CONFIG_PARAVIRT
+bool kvm_para_available(void);
+unsigned int kvm_arch_para_features(void);
+#else
+static inline bool kvm_para_available(void)
+{
+ return false;
+}
+
static inline unsigned int kvm_arch_para_features(void)
{
return 0;
}
+#endif
static inline unsigned int kvm_arch_para_hints(void)
{
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index 86570084e05a..d7e8f7d50ee0 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -75,6 +75,12 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
#endif
+#ifdef CONFIG_CPU_HAS_LBT
+int kvm_own_lbt(struct kvm_vcpu *vcpu);
+#else
+static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; }
+#endif
+
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
void kvm_save_timer(struct kvm_vcpu *vcpu);
void kvm_restore_timer(struct kvm_vcpu *vcpu);
@@ -124,4 +130,9 @@ static inline bool kvm_pvtime_supported(void)
return !!sched_info_on();
}
+static inline bool kvm_guest_has_pv_feature(struct kvm_vcpu *vcpu, unsigned int feature)
+{
+ return vcpu->kvm->arch.pv_features & BIT(feature);
+}
+
#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 04a78010fc72..04bf1a7f903a 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -119,6 +119,7 @@
#define CPUCFG6_PMP BIT(0)
#define CPUCFG6_PAMVER GENMASK(3, 1)
#define CPUCFG6_PMNUM GENMASK(7, 4)
+#define CPUCFG6_PMNUM_SHIFT 4
#define CPUCFG6_PMBITS GENMASK(13, 8)
#define CPUCFG6_UPM BIT(14)
@@ -160,16 +161,8 @@
/*
* CPUCFG index area: 0x40000000 -- 0x400000ff
- * SW emulation for KVM hypervirsor
+ * SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h
*/
-#define CPUCFG_KVM_BASE 0x40000000
-#define CPUCFG_KVM_SIZE 0x100
-
-#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0)
-#define KVM_SIGNATURE "KVM\0"
-#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
-#define KVM_FEATURE_IPI BIT(1)
-#define KVM_FEATURE_STEAL_TIME BIT(2)
#ifndef __ASSEMBLY__
@@ -253,8 +246,8 @@
#define CSR_ESTAT_EXC_WIDTH 6
#define CSR_ESTAT_EXC (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT)
#define CSR_ESTAT_IS_SHIFT 0
-#define CSR_ESTAT_IS_WIDTH 14
-#define CSR_ESTAT_IS (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT)
+#define CSR_ESTAT_IS_WIDTH 15
+#define CSR_ESTAT_IS (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT)
#define LOONGARCH_CSR_ERA 0x6 /* ERA */
@@ -649,6 +642,13 @@
#define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */
+#define LOONGARCH_CSR_ISR0 0xa0
+#define LOONGARCH_CSR_ISR1 0xa1
+#define LOONGARCH_CSR_ISR2 0xa2
+#define LOONGARCH_CSR_ISR3 0xa3
+
+#define LOONGARCH_CSR_IRR 0xa4
+
#define LOONGARCH_CSR_PRID 0xc0
/* Shadow MCSR : 0xc0 ~ 0xff */
@@ -1011,7 +1011,7 @@
/*
* CSR_ECFG IM
*/
-#define ECFG0_IM 0x00001fff
+#define ECFG0_IM 0x00005fff
#define ECFGB_SIP0 0
#define ECFGF_SIP0 (_ULCAST_(1) << ECFGB_SIP0)
#define ECFGB_SIP1 1
@@ -1054,6 +1054,7 @@
#define IOCSRF_EIODECODE BIT_ULL(9)
#define IOCSRF_FLATMODE BIT_ULL(10)
#define IOCSRF_VM BIT_ULL(11)
+#define IOCSRF_AVEC BIT_ULL(15)
#define LOONGARCH_IOCSR_VENDOR 0x10
@@ -1065,6 +1066,7 @@
#define IOCSR_MISC_FUNC_SOFT_INT BIT_ULL(10)
#define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21)
#define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48)
+#define IOCSR_MISC_FUNC_AVEC_EN BIT_ULL(51)
#define LOONGARCH_IOCSR_CPUTEMP 0x428
@@ -1387,9 +1389,10 @@ __BUILD_CSR_OP(tlbidx)
#define INT_TI 11 /* Timer */
#define INT_IPI 12
#define INT_NMI 13
+#define INT_AVEC 14
/* ExcCodes corresponding to interrupts */
-#define EXCCODE_INT_NUM (INT_NMI + 1)
+#define EXCCODE_INT_NUM (INT_AVEC + 1)
#define EXCCODE_INT_START 64
#define EXCCODE_INT_END (EXCCODE_INT_START + EXCCODE_INT_NUM - 1)
diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h
deleted file mode 100644
index 2b9a90727e19..000000000000
--- a/arch/loongarch/include/asm/mmzone.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Author: Huacai Chen (chenhuacai@loongson.cn)
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#ifndef _ASM_MMZONE_H_
-#define _ASM_MMZONE_H_
-
-#include <asm/page.h>
-#include <asm/numa.h>
-
-extern struct pglist_data *node_data[];
-
-#define NODE_DATA(nid) (node_data[(nid)])
-
-#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h
index dddec49671ae..3f4323603e6a 100644
--- a/arch/loongarch/include/asm/paravirt.h
+++ b/arch/loongarch/include/asm/paravirt.h
@@ -19,6 +19,7 @@ static inline u64 paravirt_steal_clock(int cpu)
int __init pv_ipi_init(void);
int __init pv_time_init(void);
+int __init pv_spinlock_init(void);
#else
@@ -31,5 +32,11 @@ static inline int pv_time_init(void)
{
return 0;
}
+
+static inline int pv_spinlock_init(void)
+{
+ return 0;
+}
+
#endif // CONFIG_PARAVIRT
#endif
diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
new file mode 100644
index 000000000000..e76d3aa1e1eb
--- /dev/null
+++ b/arch/loongarch/include/asm/qspinlock.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_QSPINLOCK_H
+#define _ASM_LOONGARCH_QSPINLOCK_H
+
+#include <linux/jump_label.h>
+
+#ifdef CONFIG_PARAVIRT
+
+DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
+
+#define virt_spin_lock virt_spin_lock
+
+static inline bool virt_spin_lock(struct qspinlock *lock)
+{
+ int val;
+
+ if (!static_branch_unlikely(&virt_spin_lock_key))
+ return false;
+
+ /*
+ * On hypervisors without PARAVIRT_SPINLOCKS support we fall
+ * back to a Test-and-Set spinlock, because fair locks have
+ * horrible lock 'holder' preemption issues.
+ */
+
+__retry:
+ val = atomic_read(&lock->val);
+
+ if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
+ cpu_relax();
+ goto __retry;
+ }
+
+ return true;
+}
+
+#endif /* CONFIG_PARAVIRT */
+
+#include <asm-generic/qspinlock.h>
+
+#endif // _ASM_LOONGARCH_QSPINLOCK_H
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
index 50db503f44e3..3383c9d24e94 100644
--- a/arch/loongarch/include/asm/smp.h
+++ b/arch/loongarch/include/asm/smp.h
@@ -70,10 +70,12 @@ extern int __cpu_logical_map[NR_CPUS];
#define ACTION_RESCHEDULE 1
#define ACTION_CALL_FUNCTION 2
#define ACTION_IRQ_WORK 3
+#define ACTION_CLEAR_VECTOR 4
#define SMP_BOOT_CPU BIT(ACTION_BOOT_CPU)
#define SMP_RESCHEDULE BIT(ACTION_RESCHEDULE)
#define SMP_CALL_FUNCTION BIT(ACTION_CALL_FUNCTION)
#define SMP_IRQ_WORK BIT(ACTION_IRQ_WORK)
+#define SMP_CLEAR_VECTOR BIT(ACTION_CLEAR_VECTOR)
struct secondary_data {
unsigned long stack;
diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h
index 66128dec0bf6..50273c9187d0 100644
--- a/arch/loongarch/include/asm/topology.h
+++ b/arch/loongarch/include/asm/topology.h
@@ -8,6 +8,7 @@
#include <linux/smp.h>
#ifdef CONFIG_NUMA
+#include <asm/numa.h>
extern cpumask_t cpus_on_node[];
diff --git a/arch/loongarch/include/asm/vdso/getrandom.h b/arch/loongarch/include/asm/vdso/getrandom.h
new file mode 100644
index 000000000000..02f36772541b
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/getrandom.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Xi Ruoyao <xry111@xry111.site>. All Rights Reserved.
+ */
+#ifndef __ASM_VDSO_GETRANDOM_H
+#define __ASM_VDSO_GETRANDOM_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+#include <asm/vdso/vdso.h>
+
+static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, unsigned int _flags)
+{
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_getrandom;
+ register void *buffer asm("a0") = _buffer;
+ register size_t len asm("a1") = _len;
+ register unsigned int flags asm("a2") = _flags;
+
+ asm volatile(
+ " syscall 0\n"
+ : "+r" (ret)
+ : "r" (nr), "r" (buffer), "r" (len), "r" (flags)
+ : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
+ "memory");
+
+ return ret;
+}
+
+static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void)
+{
+ return (const struct vdso_rng_data *)(get_vdso_data() + VVAR_LOONGARCH_PAGES_START *
+ PAGE_SIZE + offsetof(struct loongarch_vdso_data, rng_data));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h
index 5a12309d9fb5..e31ac7474513 100644
--- a/arch/loongarch/include/asm/vdso/vdso.h
+++ b/arch/loongarch/include/asm/vdso/vdso.h
@@ -4,6 +4,9 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
+#ifndef _ASM_VDSO_VDSO_H
+#define _ASM_VDSO_VDSO_H
+
#ifndef __ASSEMBLY__
#include <asm/asm.h>
@@ -16,6 +19,7 @@ struct vdso_pcpu_data {
struct loongarch_vdso_data {
struct vdso_pcpu_data pdata[NR_CPUS];
+ struct vdso_rng_data rng_data;
};
/*
@@ -63,3 +67,5 @@ static inline unsigned long get_vdso_data(void)
}
#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h
index 5de615383a22..b1273ce6f140 100644
--- a/arch/loongarch/include/asm/vdso/vsyscall.h
+++ b/arch/loongarch/include/asm/vdso/vsyscall.h
@@ -8,6 +8,7 @@
#include <vdso/datapage.h>
extern struct vdso_data *vdso_data;
+extern struct vdso_rng_data *vdso_rng_data;
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
@@ -19,6 +20,13 @@ struct vdso_data *__loongarch_get_k_vdso_data(void)
}
#define __arch_get_k_vdso_data __loongarch_get_k_vdso_data
+static __always_inline
+struct vdso_rng_data *__loongarch_get_k_vdso_rng_data(void)
+{
+ return vdso_rng_data;
+}
+#define __arch_get_k_vdso_rng_data __loongarch_get_k_vdso_rng_data
+
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild
index c6d141d7b7d7..517761419999 100644
--- a/arch/loongarch/include/uapi/asm/Kbuild
+++ b/arch/loongarch/include/uapi/asm/Kbuild
@@ -1,4 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
syscall-y += unistd_64.h
-
-generic-y += kvm_para.h
diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
index ddc5cab0ffd0..70d89070bfeb 100644
--- a/arch/loongarch/include/uapi/asm/kvm.h
+++ b/arch/loongarch/include/uapi/asm/kvm.h
@@ -64,6 +64,7 @@ struct kvm_fpu {
#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL)
#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL)
#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL)
+#define KVM_REG_LOONGARCH_LBT (KVM_REG_LOONGARCH | 0x50000ULL)
#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL)
#define KVM_CSR_IDX_MASK 0x7fff
#define KVM_CPUCFG_IDX_MASK 0x7fff
@@ -77,11 +78,30 @@ struct kvm_fpu {
/* Debugging: Special instruction for software breakpoint */
#define KVM_REG_LOONGARCH_DEBUG_INST (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
+/* LBT registers */
+#define KVM_REG_LOONGARCH_LBT_SCR0 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_LOONGARCH_LBT_SCR1 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_LOONGARCH_LBT_SCR2 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_LOONGARCH_LBT_SCR3 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_LOONGARCH_LBT_EFLAGS (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_LOONGARCH_LBT_FTOP (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 6)
+
#define LOONGARCH_REG_SHIFT 3
#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
+/* Device Control API on vm fd */
+#define KVM_LOONGARCH_VM_FEAT_CTRL 0
+#define KVM_LOONGARCH_VM_FEAT_LSX 0
+#define KVM_LOONGARCH_VM_FEAT_LASX 1
+#define KVM_LOONGARCH_VM_FEAT_X86BT 2
+#define KVM_LOONGARCH_VM_FEAT_ARMBT 3
+#define KVM_LOONGARCH_VM_FEAT_MIPSBT 4
+#define KVM_LOONGARCH_VM_FEAT_PMU 5
+#define KVM_LOONGARCH_VM_FEAT_PV_IPI 6
+#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7
+
/* Device Control API on vcpu fd */
#define KVM_LOONGARCH_VCPU_CPUCFG 0
#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1
diff --git a/arch/loongarch/include/uapi/asm/kvm_para.h b/arch/loongarch/include/uapi/asm/kvm_para.h
new file mode 100644
index 000000000000..b0604aa9b4bb
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/kvm_para.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_KVM_PARA_H
+#define _UAPI_ASM_KVM_PARA_H
+
+#include <linux/types.h>
+
+/*
+ * CPUCFG index area: 0x40000000 -- 0x400000ff
+ * SW emulation for KVM hypervirsor
+ */
+#define CPUCFG_KVM_BASE 0x40000000
+#define CPUCFG_KVM_SIZE 0x100
+#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0)
+#define KVM_SIGNATURE "KVM\0"
+#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
+#define KVM_FEATURE_IPI 1
+#define KVM_FEATURE_STEAL_TIME 2
+/* BIT 24 - 31 are features configurable by user space vmm */
+#define KVM_FEATURE_VIRT_EXTIOI 24
+
+#endif /* _UAPI_ASM_KVM_PARA_H */
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index 55320813ee08..14f0449f5452 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -106,7 +106,6 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
elf_hwcap |= HWCAP_LOONGARCH_CRC32;
}
-
config = read_cpucfg(LOONGARCH_CPUCFG2);
if (config & CPUCFG2_LAM) {
c->options |= LOONGARCH_CPU_LAM;
@@ -174,6 +173,8 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
c->options |= LOONGARCH_CPU_FLATMODE;
if (config & IOCSRF_EIODECODE)
c->options |= LOONGARCH_CPU_EIODECODE;
+ if (config & IOCSRF_AVEC)
+ c->options |= LOONGARCH_CPU_AVECINT;
if (config & IOCSRF_VM)
c->options |= LOONGARCH_CPU_HYPERVISOR;
diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c
index adac8fcbb2ac..d129039b368b 100644
--- a/arch/loongarch/kernel/irq.c
+++ b/arch/loongarch/kernel/irq.c
@@ -87,6 +87,18 @@ static void __init init_vec_parent_group(void)
acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse);
}
+int __init arch_probe_nr_irqs(void)
+{
+ int nr_io_pics = bitmap_weight(loongson_sysconf.cores_io_master, NR_CPUS);
+
+ if (!cpu_has_avecint)
+ nr_irqs = (64 + NR_VECTORS * nr_io_pics);
+ else
+ nr_irqs = (64 + NR_VECTORS * (nr_cpu_ids + nr_io_pics));
+
+ return NR_IRQS_LEGACY;
+}
+
void __init init_IRQ(void)
{
int i;
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
index 8fe21f868f72..84fe7f854820 100644
--- a/arch/loongarch/kernel/numa.c
+++ b/arch/loongarch/kernel/numa.c
@@ -27,10 +27,7 @@
#include <asm/time.h>
int numa_off;
-struct pglist_data *node_data[MAX_NUMNODES];
unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
-
-EXPORT_SYMBOL(node_data);
EXPORT_SYMBOL(node_distances);
static struct numa_meminfo numa_meminfo;
@@ -190,24 +187,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
return numa_add_memblk_to(nid, start, end, &numa_meminfo);
}
-static void __init alloc_node_data(int nid)
-{
- void *nd;
- unsigned long nd_pa;
- size_t nd_sz = roundup(sizeof(pg_data_t), PAGE_SIZE);
-
- nd_pa = memblock_phys_alloc_try_nid(nd_sz, SMP_CACHE_BYTES, nid);
- if (!nd_pa) {
- pr_err("Cannot find %zu Byte for node_data (initial node: %d)\n", nd_sz, nid);
- return;
- }
-
- nd = __va(nd_pa);
-
- node_data[nid] = nd;
- memset(nd, 0, sizeof(pg_data_t));
-}
-
static void __init node_mem_init(unsigned int node)
{
unsigned long start_pfn, end_pfn;
diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
index 9c9b75b76f62..a5fc61f8b348 100644
--- a/arch/loongarch/kernel/paravirt.c
+++ b/arch/loongarch/kernel/paravirt.c
@@ -13,6 +13,7 @@ static int has_steal_clock;
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
+DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
static u64 native_steal_clock(int cpu)
{
@@ -134,6 +135,11 @@ static irqreturn_t pv_ipi_interrupt(int irq, void *dev)
info->ipi_irqs[IPI_IRQ_WORK]++;
}
+ if (action & SMP_CLEAR_VECTOR) {
+ complete_irq_moving();
+ info->ipi_irqs[IPI_CLEAR_VECTOR]++;
+ }
+
return IRQ_HANDLED;
}
@@ -151,11 +157,14 @@ static void pv_init_ipi(void)
}
#endif
-static bool kvm_para_available(void)
+bool kvm_para_available(void)
{
int config;
static int hypervisor_type;
+ if (!cpu_has_hypervisor)
+ return false;
+
if (!hypervisor_type) {
config = read_cpucfg(CPUCFG_KVM_SIG);
if (!memcmp(&config, KVM_SIGNATURE, 4))
@@ -165,17 +174,22 @@ static bool kvm_para_available(void)
return hypervisor_type == HYPERVISOR_KVM;
}
-int __init pv_ipi_init(void)
+unsigned int kvm_arch_para_features(void)
{
- int feature;
+ static unsigned int feature;
- if (!cpu_has_hypervisor)
- return 0;
if (!kvm_para_available())
return 0;
- feature = read_cpucfg(CPUCFG_KVM_FEATURE);
- if (!(feature & KVM_FEATURE_IPI))
+ if (!feature)
+ feature = read_cpucfg(CPUCFG_KVM_FEATURE);
+
+ return feature;
+}
+
+int __init pv_ipi_init(void)
+{
+ if (!kvm_para_has_feature(KVM_FEATURE_IPI))
return 0;
#ifdef CONFIG_SMP
@@ -206,7 +220,7 @@ static int pv_enable_steal_time(void)
}
addr |= KVM_STEAL_PHYS_VALID;
- kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr);
+ kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), addr);
return 0;
}
@@ -214,7 +228,7 @@ static int pv_enable_steal_time(void)
static void pv_disable_steal_time(void)
{
if (has_steal_clock)
- kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0);
+ kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), 0);
}
#ifdef CONFIG_SMP
@@ -258,15 +272,9 @@ static struct notifier_block pv_reboot_nb = {
int __init pv_time_init(void)
{
- int r, feature;
+ int r;
- if (!cpu_has_hypervisor)
- return 0;
- if (!kvm_para_available())
- return 0;
-
- feature = read_cpucfg(CPUCFG_KVM_FEATURE);
- if (!(feature & KVM_FEATURE_STEAL_TIME))
+ if (!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
return 0;
has_steal_clock = 1;
@@ -300,3 +308,13 @@ int __init pv_time_init(void)
return 0;
}
+
+int __init pv_spinlock_init(void)
+{
+ if (!cpu_has_hypervisor)
+ return 0;
+
+ static_branch_enable(&virt_spin_lock_key);
+
+ return 0;
+}
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 0f0740f0be27..00e307203ddb 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -603,6 +603,8 @@ void __init setup_arch(char **cmdline_p)
arch_mem_init(cmdline_p);
resource_init();
+ jump_label_init(); /* Initialise the static keys for paravirtualization */
+
#ifdef CONFIG_SMP
plat_smp_setup();
prefill_possible_map();
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index ca405ab86aae..9afc2d8b3414 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -72,6 +72,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNCTION] = "Function call interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
+ [IPI_CLEAR_VECTOR] = "Clear vector interrupts",
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -248,6 +249,11 @@ static irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
per_cpu(irq_stat, cpu).ipi_irqs[IPI_IRQ_WORK]++;
}
+ if (action & SMP_CLEAR_VECTOR) {
+ complete_irq_moving();
+ per_cpu(irq_stat, cpu).ipi_irqs[IPI_CLEAR_VECTOR]++;
+ }
+
return IRQ_HANDLED;
}
@@ -476,7 +482,7 @@ core_initcall(ipi_pm_init);
#endif
/* Preload SMP state for boot cpu */
-void smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu, node, rr_node;
@@ -509,6 +515,8 @@ void smp_prepare_boot_cpu(void)
rr_node = next_node_in(rr_node, node_online_map);
}
}
+
+ pv_spinlock_init();
}
/* called from main before smp_init() */
diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
index 90dfccb41c14..f6fcc52aefae 100644
--- a/arch/loongarch/kernel/vdso.c
+++ b/arch/loongarch/kernel/vdso.c
@@ -37,6 +37,7 @@ static union {
static struct page *vdso_pages[] = { NULL };
struct vdso_data *vdso_data = generic_vdso_data.data;
struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
+struct vdso_rng_data *vdso_rng_data = &loongarch_vdso_data.vdata.rng_data;
static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
{
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index ea73f9dc2cc6..90894f70ff4a 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -50,9 +50,7 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
break;
case CPUCFG_KVM_FEATURE:
- ret = KVM_FEATURE_IPI;
- if (kvm_pvtime_supported())
- ret |= KVM_FEATURE_STEAL_TIME;
+ ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
vcpu->arch.gprs[rd] = ret;
break;
default:
@@ -127,6 +125,14 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
rj = inst.reg2csr_format.rj;
csrid = inst.reg2csr_format.csr;
+ if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) {
+ if (kvm_guest_has_pmu(&vcpu->arch)) {
+ vcpu->arch.pc -= 4;
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+ return EMULATE_DONE;
+ }
+ }
+
/* Process CSR ops */
switch (rj) {
case 0: /* process csrrd */
@@ -697,25 +703,22 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu)
id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
switch (id) {
- case KVM_FEATURE_STEAL_TIME:
- if (!kvm_pvtime_supported())
- return KVM_HCALL_INVALID_CODE;
-
+ case BIT(KVM_FEATURE_STEAL_TIME):
if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
return KVM_HCALL_INVALID_PARAMETER;
vcpu->arch.st.guest_addr = data;
if (!(data & KVM_STEAL_PHYS_VALID))
- break;
+ return 0;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
- break;
+ return 0;
default:
- break;
+ return KVM_HCALL_INVALID_CODE;
};
- return 0;
+ return KVM_HCALL_INVALID_CODE;
};
/*
@@ -748,6 +751,14 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
+static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
+{
+ if (kvm_own_lbt(vcpu))
+ kvm_queue_exception(vcpu, EXCCODE_INE, 0);
+
+ return RESUME_GUEST;
+}
+
static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
{
unsigned int min, cpu, i;
@@ -781,19 +792,21 @@ static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
*/
static void kvm_handle_service(struct kvm_vcpu *vcpu)
{
+ long ret = KVM_HCALL_INVALID_CODE;
unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
- long ret;
switch (func) {
case KVM_HCALL_FUNC_IPI:
- kvm_send_pv_ipi(vcpu);
- ret = KVM_HCALL_SUCCESS;
+ if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) {
+ kvm_send_pv_ipi(vcpu);
+ ret = KVM_HCALL_SUCCESS;
+ }
break;
case KVM_HCALL_FUNC_NOTIFY:
- ret = kvm_save_notify(vcpu);
+ if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME))
+ ret = kvm_save_notify(vcpu);
break;
default:
- ret = KVM_HCALL_INVALID_CODE;
break;
}
@@ -865,6 +878,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
[EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
[EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
[EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
+ [EXCCODE_BTDIS] = kvm_handle_lbt_disabled,
[EXCCODE_GSPR] = kvm_handle_gspr,
[EXCCODE_HVC] = kvm_handle_hypercall,
};
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 6905283f535b..0697b1064251 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -6,6 +6,7 @@
#include <linux/kvm_host.h>
#include <linux/entry-kvm.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/loongarch.h>
#include <asm/setup.h>
#include <asm/time.h>
@@ -31,6 +32,126 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};
+static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_context *context;
+
+ context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
+ context->perf_cntr[0] = read_csr_perfcntr0();
+ context->perf_cntr[1] = read_csr_perfcntr1();
+ context->perf_cntr[2] = read_csr_perfcntr2();
+ context->perf_cntr[3] = read_csr_perfcntr3();
+ context->perf_ctrl[0] = write_csr_perfctrl0(0);
+ context->perf_ctrl[1] = write_csr_perfctrl1(0);
+ context->perf_ctrl[2] = write_csr_perfctrl2(0);
+ context->perf_ctrl[3] = write_csr_perfctrl3(0);
+}
+
+static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_context *context;
+
+ context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
+ write_csr_perfcntr0(context->perf_cntr[0]);
+ write_csr_perfcntr1(context->perf_cntr[1]);
+ write_csr_perfcntr2(context->perf_cntr[2]);
+ write_csr_perfcntr3(context->perf_cntr[3]);
+ write_csr_perfctrl0(context->perf_ctrl[0]);
+ write_csr_perfctrl1(context->perf_ctrl[1]);
+ write_csr_perfctrl2(context->perf_ctrl[2]);
+ write_csr_perfctrl3(context->perf_ctrl[3]);
+}
+
+
+static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
+ kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+}
+
+static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+}
+
+static int kvm_own_pmu(struct kvm_vcpu *vcpu)
+{
+ unsigned long val;
+
+ if (!kvm_guest_has_pmu(&vcpu->arch))
+ return -EINVAL;
+
+ kvm_save_host_pmu(vcpu);
+
+ /* Set PM0-PM(num) to guest */
+ val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
+ val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
+ write_csr_gcfg(val);
+
+ kvm_restore_guest_pmu(vcpu);
+
+ return 0;
+}
+
+static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
+{
+ unsigned long val;
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
+ return;
+
+ kvm_save_guest_pmu(vcpu);
+
+ /* Disable pmu access from guest */
+ write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
+
+ /*
+ * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
+ * exiting the guest, so that the next time trap into the guest.
+ * We don't need to deal with PMU CSRs contexts.
+ */
+ val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
+ val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
+ val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
+ val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+ if (!(val & KVM_PMU_EVENT_ENABLED))
+ vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
+
+ kvm_restore_host_pmu(vcpu);
+}
+
+static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
+{
+ if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+}
+
+static void kvm_check_pmu(struct kvm_vcpu *vcpu)
+{
+ if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
+ kvm_own_pmu(vcpu);
+ vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
+ }
+}
+
static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{
u32 version;
@@ -158,6 +279,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
/* Make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
kvm_check_vpid(vcpu);
+ kvm_check_pmu(vcpu);
/*
* Called after function kvm_check_vpid()
@@ -195,6 +317,8 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Set a default exit reason */
run->exit_reason = KVM_EXIT_UNKNOWN;
+ kvm_lose_pmu(vcpu);
+
guest_timing_exit_irqoff();
guest_state_exit_irqoff();
local_irq_enable();
@@ -468,6 +592,22 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
kvm_write_sw_gcsr(csr, id, val);
+ /*
+ * After modifying the PMU CSR register value of the vcpu.
+ * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
+ */
+ if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
+ unsigned long val;
+
+ val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
+ kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
+ kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
+ kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
+
+ if (val & KVM_PMU_EVENT_ENABLED)
+ kvm_make_request(KVM_REQ_PMU, vcpu);
+ }
+
return ret;
}
@@ -497,6 +637,12 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
*v |= CPUCFG2_LSX;
if (cpu_has_lasx)
*v |= CPUCFG2_LASX;
+ if (cpu_has_lbt_x86)
+ *v |= CPUCFG2_X86BT;
+ if (cpu_has_lbt_arm)
+ *v |= CPUCFG2_ARMBT;
+ if (cpu_has_lbt_mips)
+ *v |= CPUCFG2_MIPSBT;
return 0;
case LOONGARCH_CPUCFG3:
@@ -506,6 +652,12 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
case LOONGARCH_CPUCFG5:
*v = GENMASK(31, 0);
return 0;
+ case LOONGARCH_CPUCFG6:
+ if (cpu_has_pmp)
+ *v = GENMASK(14, 0);
+ else
+ *v = 0;
+ return 0;
case LOONGARCH_CPUCFG16:
*v = GENMASK(16, 0);
return 0;
@@ -550,6 +702,17 @@ static int kvm_check_cpucfg(int id, u64 val)
/* LASX architecturally implies LSX and FP but val does not satisfy that */
return -EINVAL;
return 0;
+ case LOONGARCH_CPUCFG6:
+ if (val & CPUCFG6_PMP) {
+ u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
+ if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
+ return -EINVAL;
+ if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
+ return -EINVAL;
+ if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
+ return -EINVAL;
+ }
+ return 0;
default:
/*
* Values for the other CPUCFG IDs are not being further validated
@@ -577,6 +740,34 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
else
ret = -EINVAL;
break;
+ case KVM_REG_LOONGARCH_LBT:
+ if (!kvm_guest_has_lbt(&vcpu->arch))
+ return -ENXIO;
+
+ switch (reg->id) {
+ case KVM_REG_LOONGARCH_LBT_SCR0:
+ *v = vcpu->arch.lbt.scr0;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR1:
+ *v = vcpu->arch.lbt.scr1;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR2:
+ *v = vcpu->arch.lbt.scr2;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR3:
+ *v = vcpu->arch.lbt.scr3;
+ break;
+ case KVM_REG_LOONGARCH_LBT_EFLAGS:
+ *v = vcpu->arch.lbt.eflags;
+ break;
+ case KVM_REG_LOONGARCH_LBT_FTOP:
+ *v = vcpu->arch.fpu.ftop;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
case KVM_REG_LOONGARCH_KVM:
switch (reg->id) {
case KVM_REG_LOONGARCH_COUNTER:
@@ -635,6 +826,37 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
if (ret)
break;
vcpu->arch.cpucfg[id] = (u32)v;
+ if (id == LOONGARCH_CPUCFG6)
+ vcpu->arch.max_pmu_csrid =
+ LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
+ break;
+ case KVM_REG_LOONGARCH_LBT:
+ if (!kvm_guest_has_lbt(&vcpu->arch))
+ return -ENXIO;
+
+ switch (reg->id) {
+ case KVM_REG_LOONGARCH_LBT_SCR0:
+ vcpu->arch.lbt.scr0 = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR1:
+ vcpu->arch.lbt.scr1 = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR2:
+ vcpu->arch.lbt.scr2 = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_SCR3:
+ vcpu->arch.lbt.scr3 = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_EFLAGS:
+ vcpu->arch.lbt.eflags = v;
+ break;
+ case KVM_REG_LOONGARCH_LBT_FTOP:
+ vcpu->arch.fpu.ftop = v;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
break;
case KVM_REG_LOONGARCH_KVM:
switch (reg->id) {
@@ -728,7 +950,10 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
switch (attr->attr) {
- case 2:
+ case LOONGARCH_CPUCFG2:
+ case LOONGARCH_CPUCFG6:
+ return 0;
+ case CPUCFG_KVM_FEATURE:
return 0;
default:
return -ENXIO;
@@ -740,8 +965,8 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
- if (!kvm_pvtime_supported() ||
- attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+ if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
+ || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
return -ENXIO;
return 0;
@@ -773,9 +998,18 @@ static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
uint64_t val;
uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
- ret = _kvm_get_cpucfg_mask(attr->attr, &val);
- if (ret)
- return ret;
+ switch (attr->attr) {
+ case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
+ ret = _kvm_get_cpucfg_mask(attr->attr, &val);
+ if (ret)
+ return ret;
+ break;
+ case CPUCFG_KVM_FEATURE:
+ val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
+ break;
+ default:
+ return -ENXIO;
+ }
put_user(val, uaddr);
@@ -788,8 +1022,8 @@ static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
u64 gpa;
u64 __user *user = (u64 __user *)attr->addr;
- if (!kvm_pvtime_supported() ||
- attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+ if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
+ || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
return -ENXIO;
gpa = vcpu->arch.st.guest_addr;
@@ -821,7 +1055,28 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
- return -ENXIO;
+ u64 val, valid;
+ u64 __user *user = (u64 __user *)attr->addr;
+ struct kvm *kvm = vcpu->kvm;
+
+ switch (attr->attr) {
+ case CPUCFG_KVM_FEATURE:
+ if (get_user(val, user))
+ return -EFAULT;
+
+ valid = LOONGARCH_PV_FEAT_MASK;
+ if (val & ~valid)
+ return -EINVAL;
+
+ /* All vCPUs need set the same PV features */
+ if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
+ && ((kvm->arch.pv_features & valid) != val))
+ return -EINVAL;
+ kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
+ return 0;
+ default:
+ return -ENXIO;
+ }
}
static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
@@ -831,8 +1086,8 @@ static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
u64 gpa, __user *user = (u64 __user *)attr->addr;
struct kvm *kvm = vcpu->kvm;
- if (!kvm_pvtime_supported() ||
- attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
+ if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
+ || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
return -ENXIO;
if (get_user(gpa, user))
@@ -977,12 +1232,66 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return 0;
}
+#ifdef CONFIG_CPU_HAS_LBT
+int kvm_own_lbt(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_guest_has_lbt(&vcpu->arch))
+ return -EINVAL;
+
+ preempt_disable();
+ set_csr_euen(CSR_EUEN_LBTEN);
+ _restore_lbt(&vcpu->arch.lbt);
+ vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
+ preempt_enable();
+
+ return 0;
+}
+
+static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
+ _save_lbt(&vcpu->arch.lbt);
+ clear_csr_euen(CSR_EUEN_LBTEN);
+ vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
+ }
+ preempt_enable();
+}
+
+static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
+{
+ /*
+ * If TM is enabled, top register save/restore will
+ * cause lbt exception, here enable lbt in advance
+ */
+ if (fcsr & FPU_CSR_TM)
+ kvm_own_lbt(vcpu);
+}
+
+static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
+ if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
+ return;
+ kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
+ }
+}
+#else
+static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
+static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
+static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
+#endif
+
/* Enable FPU and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
- /* Enable FPU */
+ /*
+ * Enable FPU for guest
+ * Set FR and FRE according to guest context
+ */
+ kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_FPEN);
kvm_restore_fpu(&vcpu->arch.fpu);
@@ -1002,6 +1311,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
preempt_disable();
/* Enable LSX for guest */
+ kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
case KVM_LARCH_FPU:
@@ -1036,6 +1346,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu)
preempt_disable();
+ kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
case KVM_LARCH_LSX:
@@ -1067,6 +1378,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
+ kvm_check_fcsr_alive(vcpu);
if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
kvm_save_lasx(&vcpu->arch.fpu);
vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
@@ -1089,6 +1401,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
/* Disable FPU */
clear_csr_euen(CSR_EUEN_FPEN);
}
+ kvm_lose_lbt(vcpu);
preempt_enable();
}
@@ -1235,6 +1548,9 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ /* Restore hardware PMU CSRs */
+ kvm_restore_pmu(vcpu);
+
/* Don't bother restoring registers multiple times unless necessary */
if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
return 0;
diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c
index 6b2e4f66ad26..4ba734aaef87 100644
--- a/arch/loongarch/kvm/vm.c
+++ b/arch/loongarch/kvm/vm.c
@@ -5,6 +5,7 @@
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_vcpu.h>
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
@@ -39,6 +40,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
spin_lock_init(&kvm->arch.phyid_map_lock);
kvm_init_vmcs(kvm);
+
+ /* Enable all PV features by default */
+ kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
+ if (kvm_pvtime_supported())
+ kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
+
kvm->arch.gpa_size = BIT(cpu_vabits - 1);
kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
kvm->arch.invalid_ptes[0] = 0;
@@ -99,7 +106,67 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
return r;
}
+static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ switch (attr->attr) {
+ case KVM_LOONGARCH_VM_FEAT_LSX:
+ if (cpu_has_lsx)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_LASX:
+ if (cpu_has_lasx)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_X86BT:
+ if (cpu_has_lbt_x86)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_ARMBT:
+ if (cpu_has_lbt_arm)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_MIPSBT:
+ if (cpu_has_lbt_mips)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_PMU:
+ if (cpu_has_pmp)
+ return 0;
+ return -ENXIO;
+ case KVM_LOONGARCH_VM_FEAT_PV_IPI:
+ return 0;
+ case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
+ if (kvm_pvtime_supported())
+ return 0;
+ return -ENXIO;
+ default:
+ return -ENXIO;
+ }
+}
+
+static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_LOONGARCH_VM_FEAT_CTRL:
+ return kvm_vm_feature_has_attr(kvm, attr);
+ default:
+ return -ENXIO;
+ }
+}
+
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
- return -ENOIOCTLCMD;
+ void __user *argp = (void __user *)arg;
+ struct kvm *kvm = filp->private_data;
+ struct kvm_device_attr attr;
+
+ switch (ioctl) {
+ case KVM_HAS_DEVICE_ATTR:
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ return -EFAULT;
+
+ return kvm_vm_has_attr(kvm, &attr);
+ default:
+ return -ENOIOCTLCMD;
+ }
}
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
index 889030985135..914e82ff3f65 100644
--- a/arch/loongarch/mm/mmap.c
+++ b/arch/loongarch/mm/mmap.c
@@ -89,7 +89,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, UP);
@@ -101,7 +102,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
*/
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, DOWN);
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index d724d46b07c8..40c1175823d6 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -4,7 +4,8 @@
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile
-obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o sigreturn.o
+obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o vgetrandom.o \
+ vgetrandom-chacha.o sigreturn.o
# Common compiler flags between ABIs.
ccflags-vdso := \
@@ -29,6 +30,10 @@ ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
endif
+ifneq ($(c-getrandom-y),)
+ CFLAGS_vgetrandom.o += -include $(c-getrandom-y)
+endif
+
# VDSO linker flags.
ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
$(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S
index 56ad855896de..6b441bde4026 100644
--- a/arch/loongarch/vdso/vdso.lds.S
+++ b/arch/loongarch/vdso/vdso.lds.S
@@ -62,6 +62,7 @@ VERSION
__vdso_clock_getres;
__vdso_clock_gettime;
__vdso_gettimeofday;
+ __vdso_getrandom;
__vdso_rt_sigreturn;
local: *;
};
diff --git a/arch/loongarch/vdso/vgetrandom-chacha.S b/arch/loongarch/vdso/vgetrandom-chacha.S
new file mode 100644
index 000000000000..7e86a50f6e85
--- /dev/null
+++ b/arch/loongarch/vdso/vgetrandom-chacha.S
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Xi Ruoyao <xry111@xry111.site>. All Rights Reserved.
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <linux/linkage.h>
+
+.text
+
+/* Salsa20 quarter-round */
+.macro QR a b c d
+ add.w \a, \a, \b
+ xor \d, \d, \a
+ rotri.w \d, \d, 16
+
+ add.w \c, \c, \d
+ xor \b, \b, \c
+ rotri.w \b, \b, 20
+
+ add.w \a, \a, \b
+ xor \d, \d, \a
+ rotri.w \d, \d, 24
+
+ add.w \c, \c, \d
+ xor \b, \b, \c
+ rotri.w \b, \b, 25
+.endm
+
+/*
+ * Very basic LoongArch implementation of ChaCha20. Produces a given positive
+ * number of blocks of output with a nonce of 0, taking an input key and
+ * 8-byte counter. Importantly does not spill to the stack. Its arguments
+ * are:
+ *
+ * a0: output bytes
+ * a1: 32-byte key input
+ * a2: 8-byte counter input/output
+ * a3: number of 64-byte blocks to write to output
+ */
+SYM_FUNC_START(__arch_chacha20_blocks_nostack)
+
+/* We don't need a frame pointer */
+#define s9 fp
+
+#define output a0
+#define key a1
+#define counter a2
+#define nblocks a3
+#define i a4
+#define state0 s0
+#define state1 s1
+#define state2 s2
+#define state3 s3
+#define state4 s4
+#define state5 s5
+#define state6 s6
+#define state7 s7
+#define state8 s8
+#define state9 s9
+#define state10 a5
+#define state11 a6
+#define state12 a7
+#define state13 t0
+#define state14 t1
+#define state15 t2
+#define cnt_lo t3
+#define cnt_hi t4
+#define copy0 t5
+#define copy1 t6
+#define copy2 t7
+
+/* Reuse i as copy3 */
+#define copy3 i
+
+ /*
+ * The ABI requires s0-s9 saved, and sp aligned to 16-byte.
+ * This does not violate the stack-less requirement: no sensitive data
+ * is spilled onto the stack.
+ */
+ PTR_ADDI sp, sp, (-SZREG * 10) & STACK_ALIGN
+ REG_S s0, sp, 0
+ REG_S s1, sp, SZREG
+ REG_S s2, sp, SZREG * 2
+ REG_S s3, sp, SZREG * 3
+ REG_S s4, sp, SZREG * 4
+ REG_S s5, sp, SZREG * 5
+ REG_S s6, sp, SZREG * 6
+ REG_S s7, sp, SZREG * 7
+ REG_S s8, sp, SZREG * 8
+ REG_S s9, sp, SZREG * 9
+
+ li.w copy0, 0x61707865
+ li.w copy1, 0x3320646e
+ li.w copy2, 0x79622d32
+
+ ld.w cnt_lo, counter, 0
+ ld.w cnt_hi, counter, 4
+
+.Lblock:
+ /* state[0,1,2,3] = "expand 32-byte k" */
+ move state0, copy0
+ move state1, copy1
+ move state2, copy2
+ li.w state3, 0x6b206574
+
+ /* state[4,5,..,11] = key */
+ ld.w state4, key, 0
+ ld.w state5, key, 4
+ ld.w state6, key, 8
+ ld.w state7, key, 12
+ ld.w state8, key, 16
+ ld.w state9, key, 20
+ ld.w state10, key, 24
+ ld.w state11, key, 28
+
+ /* state[12,13] = counter */
+ move state12, cnt_lo
+ move state13, cnt_hi
+
+ /* state[14,15] = 0 */
+ move state14, zero
+ move state15, zero
+
+ li.w i, 10
+.Lpermute:
+ /* odd round */
+ QR state0, state4, state8, state12
+ QR state1, state5, state9, state13
+ QR state2, state6, state10, state14
+ QR state3, state7, state11, state15
+
+ /* even round */
+ QR state0, state5, state10, state15
+ QR state1, state6, state11, state12
+ QR state2, state7, state8, state13
+ QR state3, state4, state9, state14
+
+ addi.w i, i, -1
+ bnez i, .Lpermute
+
+ /*
+ * copy[3] = "expa", materialize it here because copy[3] shares the
+ * same register with i which just became dead.
+ */
+ li.w copy3, 0x6b206574
+
+ /* output[0,1,2,3] = copy[0,1,2,3] + state[0,1,2,3] */
+ add.w state0, state0, copy0
+ add.w state1, state1, copy1
+ add.w state2, state2, copy2
+ add.w state3, state3, copy3
+ st.w state0, output, 0
+ st.w state1, output, 4
+ st.w state2, output, 8
+ st.w state3, output, 12
+
+ /* from now on state[0,1,2,3] are scratch registers */
+
+ /* state[0,1,2,3] = lo32(key) */
+ ld.w state0, key, 0
+ ld.w state1, key, 4
+ ld.w state2, key, 8
+ ld.w state3, key, 12
+
+ /* output[4,5,6,7] = state[0,1,2,3] + state[4,5,6,7] */
+ add.w state4, state4, state0
+ add.w state5, state5, state1
+ add.w state6, state6, state2
+ add.w state7, state7, state3
+ st.w state4, output, 16
+ st.w state5, output, 20
+ st.w state6, output, 24
+ st.w state7, output, 28
+
+ /* state[0,1,2,3] = hi32(key) */
+ ld.w state0, key, 16
+ ld.w state1, key, 20
+ ld.w state2, key, 24
+ ld.w state3, key, 28
+
+ /* output[8,9,10,11] = state[0,1,2,3] + state[8,9,10,11] */
+ add.w state8, state8, state0
+ add.w state9, state9, state1
+ add.w state10, state10, state2
+ add.w state11, state11, state3
+ st.w state8, output, 32
+ st.w state9, output, 36
+ st.w state10, output, 40
+ st.w state11, output, 44
+
+ /* output[12,13,14,15] = state[12,13,14,15] + [cnt_lo, cnt_hi, 0, 0] */
+ add.w state12, state12, cnt_lo
+ add.w state13, state13, cnt_hi
+ st.w state12, output, 48
+ st.w state13, output, 52
+ st.w state14, output, 56
+ st.w state15, output, 60
+
+ /* ++counter */
+ addi.w cnt_lo, cnt_lo, 1
+ sltui state0, cnt_lo, 1
+ add.w cnt_hi, cnt_hi, state0
+
+ /* output += 64 */
+ PTR_ADDI output, output, 64
+ /* --nblocks */
+ PTR_ADDI nblocks, nblocks, -1
+ bnez nblocks, .Lblock
+
+ /* counter = [cnt_lo, cnt_hi] */
+ st.w cnt_lo, counter, 0
+ st.w cnt_hi, counter, 4
+
+ /*
+ * Zero out the potentially sensitive regs, in case nothing uses these
+ * again. As at now copy[0,1,2,3] just contains "expand 32-byte k" and
+ * state[0,...,9] are s0-s9 those we'll restore in the epilogue, so we
+ * only need to zero state[11,...,15].
+ */
+ move state10, zero
+ move state11, zero
+ move state12, zero
+ move state13, zero
+ move state14, zero
+ move state15, zero
+
+ REG_L s0, sp, 0
+ REG_L s1, sp, SZREG
+ REG_L s2, sp, SZREG * 2
+ REG_L s3, sp, SZREG * 3
+ REG_L s4, sp, SZREG * 4
+ REG_L s5, sp, SZREG * 5
+ REG_L s6, sp, SZREG * 6
+ REG_L s7, sp, SZREG * 7
+ REG_L s8, sp, SZREG * 8
+ REG_L s9, sp, SZREG * 9
+ PTR_ADDI sp, sp, -((-SZREG * 10) & STACK_ALIGN)
+
+ jr ra
+SYM_FUNC_END(__arch_chacha20_blocks_nostack)
diff --git a/arch/loongarch/vdso/vgetrandom.c b/arch/loongarch/vdso/vgetrandom.c
new file mode 100644
index 000000000000..d5f258ac4a36
--- /dev/null
+++ b/arch/loongarch/vdso/vgetrandom.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Xi Ruoyao <xry111@xry111.site>. All Rights Reserved.
+ */
+#include <linux/types.h>
+
+ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len)
+{
+ return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len);
+}
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 9a084943a68a..d01dc47d52ea 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -559,7 +559,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -636,7 +635,6 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
-CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 58ec725bf392..46808e581d7b 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -516,7 +516,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -593,7 +592,6 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
-CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 63ab7e892e59..4469a7839c9d 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -536,7 +536,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -613,7 +612,6 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
-CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index ca40744eec60..c0719322c028 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -508,7 +508,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -585,7 +584,6 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
-CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 77bcc6faf468..8d429e63f8f2 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -518,7 +518,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -595,7 +594,6 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
-CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index e73d4032b659..bafd33da27c1 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -535,7 +535,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -612,7 +611,6 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
-CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 638df8442c98..6f5ca3f85ea1 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -621,7 +621,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -698,7 +697,6 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
-CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 2248db426081..d16b328c7136 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -507,7 +507,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -584,7 +583,6 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
-CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 2975b66521f6..80f6c15a5ed5 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -508,7 +508,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -585,7 +584,6 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
-CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 0a0e32344033..0e81589f0ee2 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -525,7 +525,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -602,7 +601,6 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
-CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index f16f1a99c2ba..8cd785290339 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -506,7 +506,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -582,7 +581,6 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
-CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 667bd61ae9b3..78035369f60f 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -506,7 +506,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -583,7 +582,6 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
CONFIG_TEST_VMALLOC=m
-CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h
index 4ba14f3535fc..71fbe5c5c564 100644
--- a/arch/m68k/include/asm/cmpxchg.h
+++ b/arch/m68k/include/asm/cmpxchg.h
@@ -3,6 +3,7 @@
#define __ARCH_M68K_CMPXCHG__
#include <linux/irqflags.h>
+#include <linux/minmax.h>
#define __xg(type, x) ((volatile type *)(x))
@@ -11,25 +12,19 @@ extern unsigned long __invalid_xchg_size(unsigned long, volatile void *, int);
#ifndef CONFIG_RMW_INSNS
static inline unsigned long __arch_xchg(unsigned long x, volatile void * ptr, int size)
{
- unsigned long flags, tmp;
+ unsigned long flags;
local_irq_save(flags);
switch (size) {
case 1:
- tmp = *(u8 *)ptr;
- *(u8 *)ptr = x;
- x = tmp;
+ swap(*(u8 *)ptr, x);
break;
case 2:
- tmp = *(u16 *)ptr;
- *(u16 *)ptr = x;
- x = tmp;
+ swap(*(u16 *)ptr, x);
break;
case 4:
- tmp = *(u32 *)ptr;
- *(u32 *)ptr = x;
- x = tmp;
+ swap(*(u32 *)ptr, x);
break;
default:
x = __invalid_xchg_size(x, ptr, size);
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 2584e94e2134..fda7eac23f87 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -117,7 +117,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs)
{
/* regs will be equal to current_pt_regs() */
struct kernel_clone_args args = {
- .flags = regs->d1 & ~CSIGNAL,
+ .flags = (u32)(regs->d1) & ~CSIGNAL,
.pidfd = (int __user *)regs->d3,
.child_tid = (int __user *)regs->d4,
.parent_tid = (int __user *)regs->d3,
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 37fb663559b4..c926da9d5ec2 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -138,7 +138,7 @@ void __init setup_arch(char **cmdline_p)
pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n",
_stext, _etext, _sdata, _edata, __bss_start, __bss_stop);
- pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
+ pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n",
__bss_stop, memory_start, memory_start, memory_end);
memblock_add(_rambase, memory_end - _rambase);
diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
index 10f1f294e91f..14b774b9d308 100644
--- a/arch/m68k/q40/q40ints.c
+++ b/arch/m68k/q40/q40ints.c
@@ -106,7 +106,7 @@ void __init q40_init_IRQ(void)
* this stuff doesn't really belong here..
*/
-int ql_ticks; /* 200Hz ticks since last jiffie */
+int ql_ticks; /* 200Hz ticks since last jiffy */
static int sound_ticks;
#define SVOL 45
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 60077e576935..397edf05dd72 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -8,6 +8,7 @@ config MIPS
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_CURRENT_STACK_POINTER if !CC_IS_CLANG || CLANG_VERSION >= 140000
select ARCH_HAS_DEBUG_VIRTUAL if !64BIT
+ select ARCH_HAS_DMA_OPS if MACH_JAZZ
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
@@ -393,7 +394,6 @@ config MACH_JAZZ
select ARC_PROMLIB
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
- select DMA_OPS
select FW_ARC
select FW_ARC32
select ARCH_MAY_HAVE_PC_FDC
@@ -502,7 +502,6 @@ config MACH_LOONGSON64
select USE_OF
select BUILTIN_DTB
select PCI_HOST_GENERIC
- select HAVE_ARCH_NODEDATA_EXTENSION if NUMA
help
This enables the support of Loongson-2/3 family of machines.
@@ -735,7 +734,6 @@ config SGI_IP27
select WAR_R10000_LLSC
select MIPS_L1_CACHE_SHIFT_7
select NUMA
- select HAVE_ARCH_NODEDATA_EXTENSION
help
This are the SGI Origin 200, Origin 2000 and Onyx 2 Graphics
workstations. To compile a Linux kernel that runs on these, say Y
@@ -2613,9 +2611,6 @@ config NUMA
config SYS_SUPPORTS_NUMA
bool
-config HAVE_ARCH_NODEDATA_EXTENSION
- bool
-
config RELOCATABLE
bool "Relocatable kernel"
depends on SYS_SUPPORTS_RELOCATABLE
diff --git a/arch/mips/alchemy/common/dma.c b/arch/mips/alchemy/common/dma.c
index 973049b5bd61..44d8433b1f45 100644
--- a/arch/mips/alchemy/common/dma.c
+++ b/arch/mips/alchemy/common/dma.c
@@ -131,29 +131,6 @@ static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = {
{ AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR } /* coherent */
};
-void dump_au1000_dma_channel(unsigned int dmanr)
-{
- struct dma_chan *chan;
-
- if (dmanr >= NUM_AU1000_DMA_CHANNELS)
- return;
- chan = &au1000_dma_table[dmanr];
-
- printk(KERN_INFO "Au1000 DMA%d Register Dump:\n", dmanr);
- printk(KERN_INFO " mode = 0x%08x\n",
- __raw_readl(chan->io + DMA_MODE_SET));
- printk(KERN_INFO " addr = 0x%08x\n",
- __raw_readl(chan->io + DMA_PERIPHERAL_ADDR));
- printk(KERN_INFO " start0 = 0x%08x\n",
- __raw_readl(chan->io + DMA_BUFFER0_START));
- printk(KERN_INFO " start1 = 0x%08x\n",
- __raw_readl(chan->io + DMA_BUFFER1_START));
- printk(KERN_INFO " count0 = 0x%08x\n",
- __raw_readl(chan->io + DMA_BUFFER0_COUNT));
- printk(KERN_INFO " count1 = 0x%08x\n",
- __raw_readl(chan->io + DMA_BUFFER1_COUNT));
-}
-
/*
* Finds a free channel, and binds the requested device to it.
* Returns the allocated channel number, or negative on error.
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 99f321b6e417..9cc8fbf218a5 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -42,7 +42,7 @@ static struct board_info __initdata board_cvg834g = {
.expected_cpu_id = 0x3368,
.ephy_reset_gpio = 36,
- .ephy_reset_gpio_flags = GPIOF_INIT_HIGH,
+ .ephy_reset_gpio_flags = GPIOF_OUT_INIT_HIGH,
.has_pci = 1,
.has_uart0 = 1,
.has_uart1 = 1,
diff --git a/arch/mips/configs/generic/board-ocelot.config b/arch/mips/configs/generic/board-ocelot.config
index 8cfbafa532e0..a5b5b5102472 100644
--- a/arch/mips/configs/generic/board-ocelot.config
+++ b/arch/mips/configs/generic/board-ocelot.config
@@ -31,6 +31,7 @@ CONFIG_MICROSEMI_PHY=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MUX=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c
index ec6d58008f8e..2a59b85f88aa 100644
--- a/arch/mips/crypto/crc32-mips.c
+++ b/arch/mips/crypto/crc32-mips.c
@@ -77,24 +77,26 @@ static u32 crc32_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
{
u32 crc = crc_;
-#ifdef CONFIG_64BIT
- while (len >= sizeof(u64)) {
- u64 value = get_unaligned_le64(p);
-
- CRC32(crc, value, d);
- p += sizeof(u64);
- len -= sizeof(u64);
- }
-
- if (len & sizeof(u32)) {
-#else /* !CONFIG_64BIT */
- while (len >= sizeof(u32)) {
-#endif
- u32 value = get_unaligned_le32(p);
-
- CRC32(crc, value, w);
- p += sizeof(u32);
- len -= sizeof(u32);
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) {
+ u64 value = get_unaligned_le64(p);
+
+ CRC32(crc, value, d);
+ }
+
+ if (len & sizeof(u32)) {
+ u32 value = get_unaligned_le32(p);
+
+ CRC32(crc, value, w);
+ p += sizeof(u32);
+ }
+ } else {
+ for (; len >= sizeof(u32); len -= sizeof(u32)) {
+ u32 value = get_unaligned_le32(p);
+
+ CRC32(crc, value, w);
+ p += sizeof(u32);
+ }
}
if (len & sizeof(u16)) {
@@ -117,24 +119,26 @@ static u32 crc32c_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
{
u32 crc = crc_;
-#ifdef CONFIG_64BIT
- while (len >= sizeof(u64)) {
- u64 value = get_unaligned_le64(p);
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) {
+ u64 value = get_unaligned_le64(p);
- CRC32C(crc, value, d);
- p += sizeof(u64);
- len -= sizeof(u64);
- }
+ CRC32(crc, value, d);
+ }
- if (len & sizeof(u32)) {
-#else /* !CONFIG_64BIT */
- while (len >= sizeof(u32)) {
-#endif
- u32 value = get_unaligned_le32(p);
+ if (len & sizeof(u32)) {
+ u32 value = get_unaligned_le32(p);
+
+ CRC32(crc, value, w);
+ p += sizeof(u32);
+ }
+ } else {
+ for (; len >= sizeof(u32); len -= sizeof(u32)) {
+ u32 value = get_unaligned_le32(p);
- CRC32C(crc, value, w);
- p += sizeof(u32);
- len -= sizeof(u32);
+ CRC32(crc, value, w);
+ p += sizeof(u32);
+ }
}
if (len & sizeof(u16)) {
diff --git a/arch/mips/include/asm/cmp.h b/arch/mips/include/asm/cmp.h
index e9e87504bb0c..71e20e6cd38d 100644
--- a/arch/mips/include/asm/cmp.h
+++ b/arch/mips/include/asm/cmp.h
@@ -7,12 +7,4 @@
*/
struct task_struct;
-extern void cmp_smp_setup(void);
-extern void cmp_smp_finish(void);
-extern void cmp_boot_secondary(int cpu, struct task_struct *t);
-extern void cmp_init_secondary(void);
-extern void cmp_prepare_cpus(unsigned int max_cpus);
-
-/* This is platform specific */
-extern void cmp_send_ipi(int cpu, unsigned int action);
#endif /* _ASM_CMP_H */
diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h
index 908e96e3a311..8fcad6984389 100644
--- a/arch/mips/include/asm/dec/prom.h
+++ b/arch/mips/include/asm/dec/prom.h
@@ -160,6 +160,5 @@ extern void prom_identify_arch(u32);
extern void prom_init_cmdline(s32, s32 *, u32);
extern void register_prom_console(void);
-extern void unregister_prom_console(void);
#endif /* _ASM_DEC_PROM_H */
diff --git a/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
index b82e513c8523..18c24051a1f2 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000_dma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
@@ -124,7 +124,6 @@ extern int request_au1000_dma(int dev_id,
extern void free_au1000_dma(unsigned int dmanr);
extern int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
int length, int *eof, void *data);
-extern void dump_au1000_dma_channel(unsigned int dmanr);
extern spinlock_t au1000_dma_spin_lock;
static inline struct dma_chan *get_dma_chan(unsigned int dmanr)
diff --git a/arch/mips/include/asm/mach-ip27/mmzone.h b/arch/mips/include/asm/mach-ip27/mmzone.h
index 08c36e50a860..56959eb9cb26 100644
--- a/arch/mips/include/asm/mach-ip27/mmzone.h
+++ b/arch/mips/include/asm/mach-ip27/mmzone.h
@@ -22,7 +22,6 @@ struct node_data {
extern struct node_data *__node_data[];
-#define NODE_DATA(n) (&__node_data[(n)]->pglist)
#define hub_data(n) (&__node_data[(n)]->hub)
#endif /* _ASM_MACH_MMZONE_H */
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index a3d65d37b8b5..8fb70fd3c9c4 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -14,10 +14,6 @@
#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
#define nid_to_addrbase(nid) ((unsigned long)(nid) << NODE_ADDRSPACE_SHIFT)
-extern struct pglist_data *__node_data[];
-
-#define NODE_DATA(n) (__node_data[n])
-
extern void __init prom_init_numa_memory(void);
#endif /* _ASM_MACH_MMZONE_H */
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
index c904c24550f6..5befba569c9f 100644
--- a/arch/mips/include/asm/mips-boards/generic.h
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -73,7 +73,4 @@ extern void mips_pcibios_init(void);
#define mips_pcibios_init() do { } while (0)
#endif
-extern void mips_scroll_message(void);
-extern void mips_display_message(const char *str);
-
#endif /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h
index 28917f1582b3..6ea02af29876 100644
--- a/arch/mips/include/asm/mips_mt.h
+++ b/arch/mips/include/asm/mips_mt.h
@@ -17,8 +17,6 @@ extern int vpelimit;
extern cpumask_t mt_fpu_cpumask;
extern unsigned long mt_fpemul_threshold;
-extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
-
#ifdef CONFIG_MIPS_MT
extern void mips_mt_set_cpuoptions(void);
#else
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index d0a540e88bb4..d10afd13ee5b 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -56,7 +56,6 @@ struct sigcontext {
#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
-#include <linux/posix_types.h>
/*
* Keep this struct definition in sync with the sigcontext fragment
* in arch/mips/kernel/asm-offsets.c
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 60ebaed28a4c..8ab7582291ab 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -151,6 +151,12 @@
#define SO_PASSPIDFD 76
#define SO_PEERPIDFD 77
+#define SO_DEVMEM_LINEAR 78
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 79
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 80
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index e318ea11c858..d21e5d441f53 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -23,8 +23,6 @@
#include <asm/reboot.h>
#include <asm/tlbmisc.h>
-extern asmlinkage void jazz_handle_int(void);
-
extern void jazz_machine_restart(char *command);
static struct resource jazz_io_resources[] = {
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index c938ba208fc0..37676a44fefb 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -43,83 +43,6 @@ static int __init maxtcs(char *str)
__setup("maxtcs=", maxtcs);
-/*
- * Dump new MIPS MT state for the core. Does not leave TCs halted.
- * Takes an argument which taken to be a pre-call MVPControl value.
- */
-
-void mips_mt_regdump(unsigned long mvpctl)
-{
- unsigned long flags;
- unsigned long vpflags;
- unsigned long mvpconf0;
- int nvpe;
- int ntc;
- int i;
- int tc;
- unsigned long haltval;
- unsigned long tcstatval;
-
- local_irq_save(flags);
- vpflags = dvpe();
- printk("=== MIPS MT State Dump ===\n");
- printk("-- Global State --\n");
- printk(" MVPControl Passed: %08lx\n", mvpctl);
- printk(" MVPControl Read: %08lx\n", vpflags);
- printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
- nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
- ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
- printk("-- per-VPE State --\n");
- for (i = 0; i < nvpe; i++) {
- for (tc = 0; tc < ntc; tc++) {
- settc(tc);
- if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
- printk(" VPE %d\n", i);
- printk(" VPEControl : %08lx\n",
- read_vpe_c0_vpecontrol());
- printk(" VPEConf0 : %08lx\n",
- read_vpe_c0_vpeconf0());
- printk(" VPE%d.Status : %08lx\n",
- i, read_vpe_c0_status());
- printk(" VPE%d.EPC : %08lx %pS\n",
- i, read_vpe_c0_epc(),
- (void *) read_vpe_c0_epc());
- printk(" VPE%d.Cause : %08lx\n",
- i, read_vpe_c0_cause());
- printk(" VPE%d.Config7 : %08lx\n",
- i, read_vpe_c0_config7());
- break; /* Next VPE */
- }
- }
- }
- printk("-- per-TC State --\n");
- for (tc = 0; tc < ntc; tc++) {
- settc(tc);
- if (read_tc_c0_tcbind() == read_c0_tcbind()) {
- /* Are we dumping ourself? */
- haltval = 0; /* Then we're not halted, and mustn't be */
- tcstatval = flags; /* And pre-dump TCStatus is flags */
- printk(" TC %d (current TC with VPE EPC above)\n", tc);
- } else {
- haltval = read_tc_c0_tchalt();
- write_tc_c0_tchalt(1);
- tcstatval = read_tc_c0_tcstatus();
- printk(" TC %d\n", tc);
- }
- printk(" TCStatus : %08lx\n", tcstatval);
- printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
- printk(" TCRestart : %08lx %pS\n",
- read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart());
- printk(" TCHalt : %08lx\n", haltval);
- printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
- if (!haltval)
- write_tc_c0_tchalt(0);
- }
- printk("===========================\n");
- evpe(vpflags);
- local_irq_restore(flags);
-}
-
static int mt_opt_rpsctl = -1;
static int mt_opt_nblsu = -1;
static int mt_opt_forceconfig7;
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 0362fc5df7b0..39e193cad2b9 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -439,7 +439,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
/* preload SMP state for boot cpu */
-void smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
{
if (mp_ops->prepare_boot_cpu)
mp_ops->prepare_boot_cpu();
diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
index 68dafd6d3e25..8388400d052f 100644
--- a/arch/mips/loongson64/numa.c
+++ b/arch/mips/loongson64/numa.c
@@ -29,8 +29,6 @@
unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
EXPORT_SYMBOL(__node_distances);
-struct pglist_data *__node_data[MAX_NUMNODES];
-EXPORT_SYMBOL(__node_data);
cpumask_t __node_cpumask[MAX_NUMNODES];
EXPORT_SYMBOL(__node_cpumask);
@@ -83,12 +81,8 @@ static void __init init_topology_matrix(void)
static void __init node_mem_init(unsigned int node)
{
- struct pglist_data *nd;
unsigned long node_addrspace_offset;
unsigned long start_pfn, end_pfn;
- unsigned long nd_pa;
- int tnid;
- const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
node_addrspace_offset = nid_to_addrbase(node);
pr_info("Node%d's addrspace_offset is 0x%lx\n",
@@ -98,16 +92,8 @@ static void __init node_mem_init(unsigned int node)
pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
node, start_pfn, end_pfn);
- nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, node);
- if (!nd_pa)
- panic("Cannot allocate %zu bytes for node %d data\n",
- nd_size, node);
- nd = __va(nd_pa);
- memset(nd, 0, sizeof(struct pglist_data));
- tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
- if (tnid != node)
- pr_info("NODE_DATA(%d) on node %d\n", node, tnid);
- __node_data[node] = nd;
+ alloc_node_data(node);
+
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
@@ -198,13 +184,3 @@ void __init prom_init_numa_memory(void)
pr_info("CP0_PageGrain: CP0 5.1 (0x%x)\n", read_c0_pagegrain());
prom_meminit();
}
-
-pg_data_t * __init arch_alloc_nodedata(int nid)
-{
- return memblock_alloc(sizeof(pg_data_t), SMP_CACHE_BYTES);
-}
-
-void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
- __node_data[nid] = pgdat;
-}
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 7e11d7b58761..5d2a1225785b 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -98,7 +98,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, UP);
@@ -110,7 +111,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
*/
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, DOWN);
diff --git a/arch/mips/ralink/irq-gic.c b/arch/mips/ralink/irq-gic.c
index 3bab51a5fb4c..8bc566ea00e5 100644
--- a/arch/mips/ralink/irq-gic.c
+++ b/arch/mips/ralink/irq-gic.c
@@ -10,6 +10,7 @@
#include <linux/of.h>
#include <linux/irqchip.h>
#include <asm/mips-cps.h>
+#include <asm/time.h>
int get_c0_perfcount_int(void)
{
diff --git a/arch/mips/ralink/timer-gic.c b/arch/mips/ralink/timer-gic.c
index dcf2a44ac51e..926082655a78 100644
--- a/arch/mips/ralink/timer-gic.c
+++ b/arch/mips/ralink/timer-gic.c
@@ -11,6 +11,8 @@
#include <linux/of_clk.h>
#include <linux/clocksource.h>
+#include <asm/time.h>
+
#include "common.h"
void __init plat_time_init(void)
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index b8ca94cfb4fe..1963313f55d8 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -35,7 +35,6 @@
#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
struct node_data *__node_data[MAX_NUMNODES];
-
EXPORT_SYMBOL(__node_data);
static u64 gen_region_mask(void)
@@ -361,6 +360,7 @@ static void __init node_mem_init(nasid_t node)
*/
__node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
memset(__node_data[node], 0, PAGE_SIZE);
+ node_data[node] = &__node_data[node]->pglist;
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
@@ -423,13 +423,3 @@ void __init mem_init(void)
memblock_free_all();
setup_zero_pages(); /* This comes from node 0 */
}
-
-pg_data_t * __init arch_alloc_nodedata(int nid)
-{
- return memblock_alloc(sizeof(pg_data_t), SMP_CACHE_BYTES);
-}
-
-void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
- __node_data[nid] = (struct node_data *)pgdat;
-}
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index 5d2652a1d35a..62733e049570 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -70,11 +70,13 @@ void cpu_node_probe(void)
gda_t *gdap = GDA;
nodes_clear(node_online_map);
+ nodes_clear(node_possible_map);
for (i = 0; i < MAX_NUMNODES; i++) {
nasid_t nasid = gdap->g_nasidtable[i];
if (nasid == INVALID_NASID)
break;
node_set_online(nasid);
+ node_set(nasid, node_possible_map);
highest = node_scan_cpus(nasid, highest);
}
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index 3459df28afee..a2278485de19 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -82,6 +82,10 @@ void __init mmu_init(void)
pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE);
static struct page *kuser_page[1];
+static struct vm_special_mapping vdso_mapping = {
+ .name = "[vdso]",
+ .pages = kuser_page,
+};
static int alloc_kuser_page(void)
{
@@ -106,18 +110,18 @@ arch_initcall(alloc_kuser_page);
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
- int ret;
+ struct vm_area_struct *vma;
mmap_write_lock(mm);
/* Map kuser helpers to user space address */
- ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
+ vma = _install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD |
- VM_MAYEXEC, kuser_page);
+ VM_MAYEXEC, &vdso_mapping);
mmap_write_unlock(mm);
- return ret;
+ return IS_ERR(vma) ? PTR_ERR(vma) : 0;
}
const char *arch_vma_name(struct vm_area_struct *vma)
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index b0a2ac3ba916..aa6a3cad275d 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -10,6 +10,7 @@ config PARISC
select ARCH_WANT_FRAME_POINTERS
select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_DMA_ALLOC if PA11
+ select ARCH_HAS_DMA_OPS
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
@@ -23,7 +24,6 @@ config PARISC
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE
select HAVE_RELIABLE_STACKTRACE
- select DMA_OPS
select RTC_CLASS
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
@@ -72,7 +72,7 @@ config PARISC
select GENERIC_SCHED_CLOCK
select GENERIC_IRQ_MIGRATION if SMP
select HAVE_UNSTABLE_SCHED_CLOCK if SMP
- select LEGACY_TIMER_TICK
+ select GENERIC_CLOCKEVENTS
select CPU_NO_EFFICIENT_FFS
select THREAD_INFO_IN_TASK
select NEED_DMA_MAP_STATE
diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h
index 47c5a1991d10..89b6beeda0b8 100644
--- a/arch/parisc/include/asm/mman.h
+++ b/arch/parisc/include/asm/mman.h
@@ -11,4 +11,18 @@ static inline bool arch_memory_deny_write_exec_supported(void)
}
#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
+{
+ /*
+ * The stack on parisc grows upwards, so if userspace requests memory
+ * for a stack, mark it with VM_GROWSUP so that the stack expansion in
+ * the fault handler will work.
+ */
+ if (flags & MAP_STACK)
+ return VM_GROWSUP;
+
+ return 0;
+}
+#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
+
#endif /* __ASM_MMAN_H__ */
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 982aca20f56f..77fac02188e1 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -298,7 +298,7 @@ extern unsigned int toc_handler_csum;
extern void do_cpu_irq_mask(struct pt_regs *);
extern irqreturn_t timer_interrupt(int, void *);
extern irqreturn_t ipi_interrupt(int, void *);
-extern void start_cpu_itimer(void);
+extern void parisc_clockevent_init(void);
extern void handle_interruption(int, struct pt_regs *);
/* called from assembly code: */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index be264c2b1a11..38fc0b188e08 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -132,6 +132,12 @@
#define SO_PASSPIDFD 0x404A
#define SO_PEERPIDFD 0x404B
+#define SO_DEVMEM_LINEAR 78
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 79
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 80
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index ab23e61a6f01..ea57bcc21dc5 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -1051,8 +1051,7 @@ ENTRY_CFI(intr_save) /* for os_hpmc */
STREG %r16, PT_ISR(%r29)
STREG %r17, PT_IOR(%r29)
-#if 0 && defined(CONFIG_64BIT)
- /* Revisit when we have 64-bit code above 4Gb */
+#if defined(CONFIG_64BIT)
b,n intr_save2
skip_save_ior:
@@ -1060,8 +1059,7 @@ skip_save_ior:
* need to adjust iasq/iaoq here in the same way we adjusted isr/ior
* above.
*/
- extrd,u,* %r8,PSW_W_BIT,1,%r1
- cmpib,COND(=),n 1,%r1,intr_save2
+ bb,COND(>=),n %r8,PSW_W_BIT,intr_save2
LDREG PT_IASQ0(%r29), %r16
LDREG PT_IAOQ0(%r29), %r17
/* adjust iasq/iaoq */
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 800eb64e91ad..b2d12ab728b1 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -297,7 +297,7 @@ smp_cpu_init(int cpunum)
enter_lazy_tlb(&init_mm, current);
init_IRQ(); /* make sure no IRQs are enabled or pending */
- start_cpu_itimer();
+ parisc_clockevent_init();
}
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index f7722451276e..f852fe274abe 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -167,7 +167,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr, len, pgoff, flags, UP);
@@ -175,7 +176,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr, len, pgoff, flags, DOWN);
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 1f51aa9c8230..0fa81bf1466b 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -243,10 +243,10 @@ linux_gateway_entry:
#ifdef CONFIG_64BIT
ldil L%sys_call_table, %r1
- or,= %r2,%r2,%r2
- addil L%(sys_call_table64-sys_call_table), %r1
+ or,ev %r2,%r2,%r2
+ ldil L%sys_call_table64, %r1
ldo R%sys_call_table(%r1), %r19
- or,= %r2,%r2,%r2
+ or,ev %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
load32 sys_call_table, %r19
@@ -379,10 +379,10 @@ tracesys_next:
extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */
ldil L%sys_call_table, %r1
- or,= %r2,%r2,%r2
- addil L%(sys_call_table64-sys_call_table), %r1
+ or,ev %r2,%r2,%r2
+ ldil L%sys_call_table64, %r1
ldo R%sys_call_table(%r1), %r19
- or,= %r2,%r2,%r2
+ or,ev %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
load32 sys_call_table, %r19
@@ -1327,6 +1327,8 @@ ENTRY(sys_call_table)
END(sys_call_table)
#ifdef CONFIG_64BIT
+#undef __SYSCALL_WITH_COMPAT
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
.align 8
ENTRY(sys_call_table64)
#include <asm/syscall_table_64.h> /* 64-bit syscalls */
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 9714fbd7c42d..c17e2249115f 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -1,166 +1,119 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * linux/arch/parisc/kernel/time.c
+ * Common time service routines for parisc machines.
+ * based on arch/loongarch/kernel/time.c
*
- * Copyright (C) 1991, 1992, 1995 Linus Torvalds
- * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
- * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
- *
- * 1994-07-02 Alan Modra
- * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
- * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
- * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ * Copyright (C) 2024 Helge Deller <deller@gmx.de>
*/
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/rtc.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-#include <linux/sched_clock.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/time.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/profile.h>
-#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched_clock.h>
+#include <linux/spinlock.h>
+#include <linux/rtc.h>
#include <linux/platform_device.h>
-#include <linux/ftrace.h>
+#include <asm/processor.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/param.h>
-#include <asm/pdc.h>
-#include <asm/led.h>
+static u64 cr16_clock_freq;
+static unsigned long clocktick;
-#include <linux/timex.h>
+int time_keeper_id; /* CPU used for timekeeping */
-int time_keeper_id __read_mostly; /* CPU used for timekeeping. */
+static DEFINE_PER_CPU(struct clock_event_device, parisc_clockevent_device);
-static unsigned long clocktick __ro_after_init; /* timer cycles per tick */
+static void parisc_event_handler(struct clock_event_device *dev)
+{
+}
-/*
- * We keep time on PA-RISC Linux by using the Interval Timer which is
- * a pair of registers; one is read-only and one is write-only; both
- * accessed through CR16. The read-only register is 32 or 64 bits wide,
- * and increments by 1 every CPU clock tick. The architecture only
- * guarantees us a rate between 0.5 and 2, but all implementations use a
- * rate of 1. The write-only register is 32-bits wide. When the lowest
- * 32 bits of the read-only register compare equal to the write-only
- * register, it raises a maskable external interrupt. Each processor has
- * an Interval Timer of its own and they are not synchronised.
- *
- * We want to generate an interrupt every 1/HZ seconds. So we program
- * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data
- * is programmed with the intended time of the next tick. We can be
- * held off for an arbitrarily long period of time by interrupts being
- * disabled, so we may miss one or more ticks.
- */
-irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
+static int parisc_timer_next_event(unsigned long delta, struct clock_event_device *evt)
{
- unsigned long now;
- unsigned long next_tick;
- unsigned long ticks_elapsed = 0;
- unsigned int cpu = smp_processor_id();
- struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
-
- /* gcc can optimize for "read-only" case with a local clocktick */
- unsigned long cpt = clocktick;
-
- /* Initialize next_tick to the old expected tick time. */
- next_tick = cpuinfo->it_value;
-
- /* Calculate how many ticks have elapsed. */
- now = mfctl(16);
- do {
- ++ticks_elapsed;
- next_tick += cpt;
- } while (next_tick - now > cpt);
-
- /* Store (in CR16 cycles) up to when we are accounting right now. */
- cpuinfo->it_value = next_tick;
-
- /* Go do system house keeping. */
- if (IS_ENABLED(CONFIG_SMP) && (cpu != time_keeper_id))
- ticks_elapsed = 0;
- legacy_timer_tick(ticks_elapsed);
-
- /* Skip clockticks on purpose if we know we would miss those.
- * The new CR16 must be "later" than current CR16 otherwise
- * itimer would not fire until CR16 wrapped - e.g 4 seconds
- * later on a 1Ghz processor. We'll account for the missed
- * ticks on the next timer interrupt.
- * We want IT to fire modulo clocktick even if we miss/skip some.
- * But those interrupts don't in fact get delivered that regularly.
- *
- * "next_tick - now" will always give the difference regardless
- * if one or the other wrapped. If "now" is "bigger" we'll end up
- * with a very large unsigned number.
- */
- now = mfctl(16);
- while (next_tick - now > cpt)
- next_tick += cpt;
-
- /* Program the IT when to deliver the next interrupt.
- * Only bottom 32-bits of next_tick are writable in CR16!
- * Timer interrupt will be delivered at least a few hundred cycles
- * after the IT fires, so if we are too close (<= 8000 cycles) to the
- * next cycle, simply skip it.
- */
- if (next_tick - now <= 8000)
- next_tick += cpt;
- mtctl(next_tick, 16);
+ unsigned long new_cr16;
- return IRQ_HANDLED;
-}
+ new_cr16 = mfctl(16) + delta;
+ mtctl(new_cr16, 16);
+ return 0;
+}
-unsigned long profile_pc(struct pt_regs *regs)
+irqreturn_t timer_interrupt(int irq, void *data)
{
- unsigned long pc = instruction_pointer(regs);
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
- if (regs->gr[0] & PSW_N)
- pc -= 4;
+ cd = &per_cpu(parisc_clockevent_device, cpu);
-#ifdef CONFIG_SMP
- if (in_lock_functions(pc))
- pc = regs->gr[2];
-#endif
+ if (clockevent_state_periodic(cd))
+ parisc_timer_next_event(clocktick, cd);
- return pc;
+ if (clockevent_state_periodic(cd) || clockevent_state_oneshot(cd))
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
}
-EXPORT_SYMBOL(profile_pc);
+static int parisc_set_state_oneshot(struct clock_event_device *evt)
+{
+ parisc_timer_next_event(clocktick, evt);
-/* clock source code */
+ return 0;
+}
-static u64 notrace read_cr16(struct clocksource *cs)
+static int parisc_set_state_periodic(struct clock_event_device *evt)
{
- return get_cycles();
+ parisc_timer_next_event(clocktick, evt);
+
+ return 0;
}
-static struct clocksource clocksource_cr16 = {
- .name = "cr16",
- .rating = 300,
- .read = read_cr16,
- .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
+static int parisc_set_state_shutdown(struct clock_event_device *evt)
+{
+ return 0;
+}
-void start_cpu_itimer(void)
+void parisc_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
- unsigned long next_tick = mfctl(16) + clocktick;
+ unsigned long min_delta = 0x600; /* XXX */
+ unsigned long max_delta = (1UL << (BITS_PER_LONG - 1));
+ struct clock_event_device *cd;
+
+ cd = &per_cpu(parisc_clockevent_device, cpu);
+
+ cd->name = "cr16_clockevent";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_PERCPU;
+
+ cd->irq = TIMER_IRQ;
+ cd->rating = 320;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_state_oneshot = parisc_set_state_oneshot;
+ cd->set_state_oneshot_stopped = parisc_set_state_shutdown;
+ cd->set_state_periodic = parisc_set_state_periodic;
+ cd->set_state_shutdown = parisc_set_state_shutdown;
+ cd->set_next_event = parisc_timer_next_event;
+ cd->event_handler = parisc_event_handler;
+
+ clockevents_config_and_register(cd, cr16_clock_freq, min_delta, max_delta);
+}
+
+unsigned long notrace profile_pc(struct pt_regs *regs)
+{
+ unsigned long pc = instruction_pointer(regs);
- mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
+ if (regs->gr[0] & PSW_N)
+ pc -= 4;
+
+#ifdef CONFIG_SMP
+ if (in_lock_functions(pc))
+ pc = regs->gr[2];
+#endif
- per_cpu(cpu_data, cpu).it_value = next_tick;
+ return pc;
}
+EXPORT_SYMBOL(profile_pc);
#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
@@ -224,12 +177,27 @@ void read_persistent_clock64(struct timespec64 *ts)
}
}
-
static u64 notrace read_cr16_sched_clock(void)
{
return get_cycles();
}
+static u64 notrace read_cr16(struct clocksource *cs)
+{
+ return get_cycles();
+}
+
+static struct clocksource clocksource_cr16 = {
+ .name = "cr16",
+ .rating = 300,
+ .read = read_cr16,
+ .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS |
+ CLOCK_SOURCE_VALID_FOR_HRES |
+ CLOCK_SOURCE_MUST_VERIFY |
+ CLOCK_SOURCE_VERIFY_PERCPU,
+};
+
/*
* timer interrupt and sched_clock() initialization
@@ -237,33 +205,14 @@ static u64 notrace read_cr16_sched_clock(void)
void __init time_init(void)
{
- unsigned long cr16_hz;
-
- clocktick = (100 * PAGE0->mem_10msec) / HZ;
- start_cpu_itimer(); /* get CPU 0 started */
-
- cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
+ cr16_clock_freq = 100 * PAGE0->mem_10msec; /* Hz */
+ clocktick = cr16_clock_freq / HZ;
/* register as sched_clock source */
- sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
-}
+ sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_clock_freq);
-static int __init init_cr16_clocksource(void)
-{
- /*
- * The cr16 interval timers are not synchronized across CPUs.
- */
- if (num_online_cpus() > 1 && !running_on_qemu) {
- clocksource_cr16.name = "cr16_unstable";
- clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
- clocksource_cr16.rating = 0;
- }
+ parisc_clockevent_init();
/* register at clocksource framework */
- clocksource_register_hz(&clocksource_cr16,
- 100 * PAGE0->mem_10msec);
-
- return 0;
+ clocksource_register_hz(&clocksource_cr16, cr16_clock_freq);
}
-
-device_initcall(init_cr16_clocksource);
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 1107ca819ac8..294b0e026c9a 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -504,7 +504,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
if (((unsigned long)regs->iaoq[0] & 3) &&
((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
/* Kill the user process later */
- regs->iaoq[0] = 0 | 3;
+ regs->iaoq[0] = 0 | PRIV_USER;
regs->iaoq[1] = regs->iaoq[0] + 4;
regs->iasq[0] = regs->iasq[1] = regs->sr[7];
regs->gr[0] &= ~PSW_B;
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index 0356199bd9e7..aa664f7ddb63 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -40,7 +40,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
/* we need to make sure the colouring is OK */
- return arch_get_unmapped_area(file, addr, len, pgoff, flags);
+ return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0);
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d7b09b064a8a..8094a01974cc 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -133,6 +133,7 @@ config PPC
select ARCH_HAS_DEBUG_WX if STRICT_KERNEL_RWX
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_MAP_DIRECT if PPC_PSERIES
+ select ARCH_HAS_DMA_OPS if PPC64
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV
@@ -185,7 +186,6 @@ config PPC
select CPUMASK_OFFSTACK if NR_CPUS >= 8192
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
select DMA_OPS_BYPASS if PPC64
- select DMA_OPS if PPC64
select DYNAMIC_FTRACE if FUNCTION_TRACER
select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT
@@ -269,6 +269,7 @@ config PPC
select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_RETHOOK if KPROBES
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ
@@ -311,6 +312,7 @@ config PPC
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select TRACE_IRQFLAGS_SUPPORT
+ select VDSO_GETRANDOM
#
# Please keep this list sorted alphabetically.
#
@@ -853,8 +855,8 @@ config DATA_SHIFT_BOOL
bool "Set custom data alignment"
depends on ADVANCED_OPTIONS
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE
- depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX) || \
- PPC_85xx
+ depends on (PPC_8xx && !PIN_TLB_DATA && (!STRICT_KERNEL_RWX || !PIN_TLB_TEXT)) || \
+ PPC_BOOK3S_32 || PPC_85xx
help
This option allows you to set the kernel data alignment. When
RAM is mapped by blocks, the alignment needs to fit the size and
@@ -870,9 +872,9 @@ config DATA_SHIFT
range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_85xx
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
- default 23 if STRICT_KERNEL_RWX && PPC_8xx
- default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA
- default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
+ default 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && \
+ (PIN_TLB_DATA || PIN_TLB_TEXT)
+ default 19 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
default 24 if STRICT_KERNEL_RWX && PPC_85xx
default PAGE_SHIFT
help
@@ -1026,6 +1028,10 @@ config PPC_MEM_KEYS
If unsure, say y.
+config ARCH_PKEY_BITS
+ int
+ default 5
+
config PPC_SECURE_BOOT
prompt "Enable secure boot support"
bool
@@ -1269,8 +1275,27 @@ config TASK_SIZE_BOOL
config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
default "0x80000000" if PPC_8xx
- default "0xb0000000" if PPC_BOOK3S_32
+ default "0xb0000000" if PPC_BOOK3S_32 && EXECMEM
default "0xc0000000"
+
+config MODULES_SIZE_BOOL
+ bool "Set custom size for modules/execmem area"
+ depends on EXECMEM && ADVANCED_OPTIONS
+ help
+ This option allows you to set the size of kernel virtual address
+ space dedicated for modules/execmem.
+ For the time being it is only for 8xx and book3s/32. Other
+ platform share it with vmalloc space.
+
+ Say N here unless you know what you are doing.
+
+config MODULES_SIZE
+ int "Size of modules/execmem area (In Mbytes)" if MODULES_SIZE_BOOL
+ range 1 256 if EXECMEM
+ default 64 if EXECMEM && PPC_BOOK3S_32
+ default 32 if EXECMEM && PPC_8xx
+ default 0
+
endmenu
if PPC64
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 3799ceceb04a..0bbec4afc0d5 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -379,12 +379,6 @@ config FAIL_IOMMU
If you are unsure, say N.
-config PPC_FAST_ENDIAN_SWITCH
- bool "Deprecated fast endian-switch syscall"
- depends on DEBUG_KERNEL && PPC_BOOK3S_64
- help
- If you're unsure what this is, say N.
-
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
diff --git a/arch/powerpc/boot/xz_config.h b/arch/powerpc/boot/xz_config.h
index ebfadd39e192..9506a96ebbcc 100644
--- a/arch/powerpc/boot/xz_config.h
+++ b/arch/powerpc/boot/xz_config.h
@@ -50,11 +50,8 @@ static inline void put_unaligned_be32(u32 val, void *p)
/* prevent the inclusion of the xz-preboot MM headers */
#define DECOMPR_MM_H
#define memmove memmove
-#define XZ_EXTERN static
/* xz.h needs to be included directly since we need enum xz_mode */
#include "../../../include/linux/xz.h"
-#undef XZ_EXTERN
-
#endif
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 544a65fda77b..a5e3e7f97f4d 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -81,7 +81,6 @@ CONFIG_MODULE_SIG_SHA512=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
-CONFIG_Z3FOLD=y
CONFIG_ZSMALLOC=y
# CONFIG_SLAB_MERGE_DEFAULT is not set
CONFIG_SLAB_FREELIST_RANDOM=y
@@ -93,6 +92,7 @@ CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_MEM_SOFT_DIRTY=y
+CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_ZONE_DEVICE=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
index 09ebcbdfb34f..46a4c85e85e2 100644
--- a/arch/powerpc/crypto/Kconfig
+++ b/arch/powerpc/crypto/Kconfig
@@ -107,6 +107,7 @@ config CRYPTO_AES_PPC_SPE
config CRYPTO_AES_GCM_P10
tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
+ depends on BROKEN
depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
select CRYPTO_LIB_AES
select CRYPTO_ALGAPI
diff --git a/arch/powerpc/crypto/curve25519-ppc64le-core.c b/arch/powerpc/crypto/curve25519-ppc64le-core.c
index 4e3e44ea4484..f7810be0b292 100644
--- a/arch/powerpc/crypto/curve25519-ppc64le-core.c
+++ b/arch/powerpc/crypto/curve25519-ppc64le-core.c
@@ -295,5 +295,6 @@ module_exit(curve25519_mod_exit);
MODULE_ALIAS_CRYPTO("curve25519");
MODULE_ALIAS_CRYPTO("curve25519-ppc64le");
+MODULE_DESCRIPTION("PPC64le Curve25519 scalar multiplication with 51 bits limbs");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Danny Tsen <dtsen@us.ibm.com>");
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 2bc53c646ccd..f48e644900a2 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -26,19 +26,23 @@
#define PPC_MIN_STKFRM 112
#ifdef __BIG_ENDIAN__
-#define LHZX_BE stringify_in_c(lhzx)
#define LWZX_BE stringify_in_c(lwzx)
#define LDX_BE stringify_in_c(ldx)
#define STWX_BE stringify_in_c(stwx)
#define STDX_BE stringify_in_c(stdx)
#else
-#define LHZX_BE stringify_in_c(lhbrx)
#define LWZX_BE stringify_in_c(lwbrx)
#define LDX_BE stringify_in_c(ldbrx)
#define STWX_BE stringify_in_c(stwbrx)
#define STDX_BE stringify_in_c(stdbrx)
#endif
+#ifdef CONFIG_CC_IS_CLANG
+#define DS_FORM_CONSTRAINT "Z<>"
+#else
+#define DS_FORM_CONSTRAINT "YZ<>"
+#endif
+
#else /* 32-bit */
/* operations for longs and pointers */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 5bf6a4d49268..d1ea554c33ed 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -11,6 +11,7 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#include <asm/asm-const.h>
+#include <asm/asm-compat.h>
/*
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
@@ -197,7 +198,7 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
__asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
else
- __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));
return t;
}
@@ -208,7 +209,7 @@ static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
__asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
else
- __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i));
}
#define ATOMIC64_OP(op, asm_op) \
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 52971ee30717..42c3af90d1f0 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -196,7 +196,8 @@ void unmap_kernel_page(unsigned long va);
#endif
#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
-#define MODULES_VADDR (MODULES_END - SZ_256M)
+#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
+#define MODULES_VADDR (MODULES_END - MODULES_SIZE)
#ifndef __ASSEMBLY__
#include <linux/sched.h>
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index c654c376ef8b..c3efacab4b94 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -75,6 +75,26 @@
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
/*
+ * With 4K page size the real_pte machinery is all nops.
+ */
+#define __real_pte(e, p, o) ((real_pte_t){(e)})
+#define __rpte_to_pte(r) ((r).pte)
+#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
+
+#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
+ do { \
+ index = 0; \
+ shift = mmu_psize_defs[psize].shift; \
+
+#define pte_iterate_hashed_end() } while(0)
+
+/*
+ * We expect this to be called only for user addresses or kernel virtual
+ * addresses other than the linear mapping.
+ */
+#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
+
+/*
* 4K PTE format is different from 64K PTE format. Saving the hash_slot is just
* a matter of returning the PTE bits that need to be modified. On 64K PTE,
* things are a little more involved and hence needs many more parameters to
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 519b1743a0f4..6d98e6f08d4d 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -330,32 +330,6 @@ static inline unsigned long pud_leaf_size(pud_t pud)
#ifndef __ASSEMBLY__
-/*
- * This is the default implementation of various PTE accessors, it's
- * used in all cases except Book3S with 64K pages where we have a
- * concept of sub-pages
- */
-#ifndef __real_pte
-
-#define __real_pte(e, p, o) ((real_pte_t){(e)})
-#define __rpte_to_pte(r) ((r).pte)
-#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
-
-#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
- do { \
- index = 0; \
- shift = mmu_psize_defs[psize].shift; \
-
-#define pte_iterate_hashed_end() } while(0)
-
-/*
- * We expect this to be called only for user addresses or kernel virtual
- * addresses other than the linear mapping.
- */
-#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
-
-#endif /* __real_pte */
-
static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long clr,
unsigned long set, int huge)
@@ -1124,6 +1098,7 @@ extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
extern pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot);
extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
+extern pud_t pud_modify(pud_t pud, pgprot_t newprot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd);
extern void set_pud_at(struct mm_struct *mm, unsigned long addr,
@@ -1384,6 +1359,8 @@ static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
#define __HAVE_ARCH_PMDP_INVALIDATE
extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
+extern pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pud_t *pudp);
#define pmd_move_must_withdraw pmd_move_must_withdraw
struct spinlock;
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 0e29ccf903d0..e7f14720f630 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -76,6 +76,43 @@ int patch_instruction(u32 *addr, ppc_inst_t instr);
int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr);
+/*
+ * The data patching functions patch_uint() and patch_ulong(), etc., must be
+ * called on aligned addresses.
+ *
+ * The instruction patching functions patch_instruction() and similar must be
+ * called on addresses satisfying instruction alignment requirements.
+ */
+
+#ifdef CONFIG_PPC64
+
+int patch_uint(void *addr, unsigned int val);
+int patch_ulong(void *addr, unsigned long val);
+
+#define patch_u64 patch_ulong
+
+#else
+
+static inline int patch_uint(void *addr, unsigned int val)
+{
+ if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned int)))
+ return -EINVAL;
+
+ return patch_instruction(addr, ppc_inst(val));
+}
+
+static inline int patch_ulong(void *addr, unsigned long val)
+{
+ if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)))
+ return -EINVAL;
+
+ return patch_instruction(addr, ppc_inst(val));
+}
+
+#endif
+
+#define patch_u32 patch_uint
+
static inline unsigned long patch_site_addr(s32 *site)
{
return (unsigned long)site + *site;
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 91a9fd53254f..5e34611de9ef 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -308,6 +308,7 @@ int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed);
int eeh_pe_configure(struct eeh_pe *pe);
int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
unsigned long addr, unsigned long mask);
+int eeh_pe_inject_mmio_error(struct pci_dev *pdev);
/**
* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 2d6c886b40f4..23638d4e73ac 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -177,7 +177,7 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs)
if (user_mode(regs)) {
kuap_lock();
- CT_WARN_ON(ct_state() != CONTEXT_USER);
+ CT_WARN_ON(ct_state() != CT_STATE_USER);
user_exit_irqoff();
account_cpu_user_entry();
@@ -189,8 +189,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs)
* so avoid recursion.
*/
if (TRAP(regs) != INTERRUPT_PROGRAM)
- CT_WARN_ON(ct_state() != CONTEXT_KERNEL &&
- ct_state() != CONTEXT_IDLE);
+ CT_WARN_ON(ct_state() != CT_STATE_KERNEL &&
+ ct_state() != CT_STATE_IDLE);
INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs));
INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) &&
search_kernel_restart_table(regs->nip));
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 17a77d47ed6d..42a51a993d94 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -6,7 +6,7 @@
#include <uapi/asm/mman.h>
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) && !defined(BUILD_VDSO)
#include <asm/cputable.h>
#include <linux/mm.h>
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 37bffa0f7918..a157ab513347 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -116,9 +116,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
}
#endif
-extern int use_cop(unsigned long acop, struct mm_struct *mm);
-extern void drop_cop(unsigned long acop, struct mm_struct *mm);
-
#ifdef CONFIG_PPC_BOOK3S_64
static inline void inc_mm_active_cpus(struct mm_struct *mm)
{
@@ -260,15 +257,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
extern void arch_exit_mmap(struct mm_struct *mm);
-static inline void arch_unmap(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- unsigned long vdso_base = (unsigned long)mm->context.vdso;
-
- if (start <= vdso_base && vdso_base < end)
- mm->context.vdso = NULL;
-}
-
#ifdef CONFIG_PPC_MEM_KEYS
bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
bool execute, bool foreign);
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index da827d2d0866..d99863cd6cde 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -20,12 +20,6 @@
#ifdef CONFIG_NUMA
-extern struct pglist_data *node_data[];
-/*
- * Return a pointer to the node data for node n.
- */
-#define NODE_DATA(nid) (node_data[nid])
-
/*
* Following are specific to this numa platform.
*/
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index a756a1e59c54..2986f9ba40b8 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -170,8 +170,9 @@
#define mmu_linear_psize MMU_PAGE_8M
-#define MODULES_VADDR (PAGE_OFFSET - SZ_256M)
#define MODULES_END PAGE_OFFSET
+#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
+#define MODULES_VADDR (MODULES_END - MODULES_SIZE)
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h
index d06efac6d7aa..bb5f3e8ea912 100644
--- a/arch/powerpc/include/asm/nohash/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/pgalloc.h
@@ -19,8 +19,14 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+ pgd_t *pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
pgtable_gfp_flags(mm, GFP_KERNEL));
+
+#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603)
+ memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
+ (MAX_PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+#endif
+ return pgd;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
index 634970ce13c6..ecf5ac70cfae 100644
--- a/arch/powerpc/include/asm/percpu.h
+++ b/arch/powerpc/include/asm/percpu.h
@@ -23,7 +23,7 @@ DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
(static_key_enabled(&__percpu_first_chunk_is_paged.key))
#else
#define percpu_first_chunk_is_paged false
-#endif /* CONFIG_PPC64 && CONFIG_SMP */
+#endif
#include <asm-generic/percpu.h>
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 264a6c09517a..2f72ad885332 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -65,6 +65,7 @@ static inline unsigned long pte_pfn(pte_t pte)
/*
* Select all bits except the pfn
*/
+#define pte_pgprot pte_pgprot
static inline pgprot_t pte_pgprot(pte_t pte)
{
unsigned long pte_flags;
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 065ffd1b2f8a..04406162fc5a 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -397,6 +397,7 @@ inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect)
#define PSERIES_HP_ELOG_RESOURCE_SLOT 3
#define PSERIES_HP_ELOG_RESOURCE_PHB 4
#define PSERIES_HP_ELOG_RESOURCE_PMEM 6
+#define PSERIES_HP_ELOG_RESOURCE_DT 7
#define PSERIES_HP_ELOG_ACTION_ADD 1
#define PSERIES_HP_ELOG_ACTION_REMOVE 2
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 15c5691dd218..6ebca2996f18 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -226,6 +226,10 @@ static inline int arch_within_stack_frames(const void * const stack,
return BAD_STACK;
}
+#ifdef CONFIG_PPC32
+extern void *emergency_ctx[];
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index fd594bf6c6a9..4f5a46a77fa2 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -6,6 +6,7 @@
#include <asm/page.h>
#include <asm/extable.h>
#include <asm/kup.h>
+#include <asm/asm-compat.h>
#ifdef __powerpc64__
/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
@@ -92,12 +93,6 @@ __pu_failed: \
: label)
#endif
-#ifdef CONFIG_CC_IS_CLANG
-#define DS_FORM_CONSTRAINT "Z<>"
-#else
-#define DS_FORM_CONSTRAINT "YZ<>"
-#endif
-
#ifdef __powerpc64__
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm2_goto(x, ptr, label) \
diff --git a/arch/powerpc/include/asm/vdso/getrandom.h b/arch/powerpc/include/asm/vdso/getrandom.h
new file mode 100644
index 000000000000..501d6bb14e8a
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso/getrandom.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Christophe Leroy <christophe.leroy@csgroup.eu>, CS GROUP France
+ */
+#ifndef _ASM_POWERPC_VDSO_GETRANDOM_H
+#define _ASM_POWERPC_VDSO_GETRANDOM_H
+
+#ifndef __ASSEMBLY__
+
+static __always_inline int do_syscall_3(const unsigned long _r0, const unsigned long _r3,
+ const unsigned long _r4, const unsigned long _r5)
+{
+ register long r0 asm("r0") = _r0;
+ register unsigned long r3 asm("r3") = _r3;
+ register unsigned long r4 asm("r4") = _r4;
+ register unsigned long r5 asm("r5") = _r5;
+ register int ret asm ("r3");
+
+ asm volatile(
+ " sc\n"
+ " bns+ 1f\n"
+ " neg %0, %0\n"
+ "1:\n"
+ : "=r" (ret), "+r" (r4), "+r" (r5), "+r" (r0)
+ : "r" (r3)
+ : "memory", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "cr0", "ctr");
+
+ return ret;
+}
+
+/**
+ * getrandom_syscall - Invoke the getrandom() syscall.
+ * @buffer: Destination buffer to fill with random bytes.
+ * @len: Size of @buffer in bytes.
+ * @flags: Zero or more GRND_* flags.
+ * Returns: The number of bytes written to @buffer, or a negative value indicating an error.
+ */
+static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsigned int flags)
+{
+ return do_syscall_3(__NR_getrandom, (unsigned long)buffer,
+ (unsigned long)len, (unsigned long)flags);
+}
+
+static __always_inline struct vdso_rng_data *__arch_get_vdso_rng_data(void)
+{
+ return NULL;
+}
+
+ssize_t __c_kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state,
+ size_t opaque_len, const struct vdso_rng_data *vd);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_VDSO_GETRANDOM_H */
diff --git a/arch/powerpc/include/asm/vdso/vsyscall.h b/arch/powerpc/include/asm/vdso/vsyscall.h
index 48cf23f1e273..92f480d8cc6d 100644
--- a/arch/powerpc/include/asm/vdso/vsyscall.h
+++ b/arch/powerpc/include/asm/vdso/vsyscall.h
@@ -17,6 +17,12 @@ struct vdso_data *__arch_get_k_vdso_data(void)
}
#define __arch_get_k_vdso_data __arch_get_k_vdso_data
+static __always_inline
+struct vdso_rng_data *__arch_get_k_vdso_rng_data(void)
+{
+ return &vdso_data->rng_data;
+}
+
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index a585c8e538ff..248dee138f7b 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -83,6 +83,7 @@ struct vdso_arch_data {
__u32 compat_syscall_map[SYSCALL_MAP_SIZE]; /* Map of compat syscalls */
struct vdso_data data[CS_BASES];
+ struct vdso_rng_data rng_data;
};
#else /* CONFIG_PPC64 */
@@ -95,6 +96,7 @@ struct vdso_arch_data {
__u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */
__u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */
struct vdso_data data[CS_BASES];
+ struct vdso_rng_data rng_data;
};
#endif /* CONFIG_PPC64 */
@@ -111,6 +113,21 @@ extern struct vdso_arch_data *vdso_data;
addi \ptr, \ptr, (_vdso_datapage - 999b)@l
.endm
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+
+.macro get_realdatapage ptr scratch
+ get_datapage \ptr
+#ifdef CONFIG_TIME_NS
+ lwz \scratch, VDSO_CLOCKMODE_OFFSET(\ptr)
+ xoris \scratch, \scratch, VDSO_CLOCKMODE_TIMENS@h
+ xori \scratch, \scratch, VDSO_CLOCKMODE_TIMENS@l
+ cntlzw \scratch, \scratch
+ rlwinm \scratch, \scratch, PAGE_SHIFT - 5, 1 << PAGE_SHIFT
+ add \ptr, \ptr, \scratch
+#endif
+.endm
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 1784b6a6ca1d..f43c1198768c 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -139,6 +139,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_OPTPROBES) += optprobes.o optprobes_head.o
obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o
obj-$(CONFIG_UPROBES) += uprobes.o
+obj-$(CONFIG_RETHOOK) += rethook.o
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
obj-$(CONFIG_ARCH_HAS_DMA_SET_MASK) += dma-mask.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 23733282de4d..131a8cc10dbe 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -335,6 +335,7 @@ int main(void)
/* datapage offsets for use by vdso */
OFFSET(VDSO_DATA_OFFSET, vdso_arch_data, data);
+ OFFSET(VDSO_RNG_DATA_OFFSET, vdso_arch_data, rng_data);
OFFSET(CFG_TB_TICKS_PER_SEC, vdso_arch_data, tb_ticks_per_sec);
#ifdef CONFIG_PPC64
OFFSET(CFG_ICACHE_BLOCKSZ, vdso_arch_data, icache_block_size);
@@ -346,6 +347,8 @@ int main(void)
#else
OFFSET(CFG_SYSCALL_MAP32, vdso_arch_data, syscall_map);
#endif
+ OFFSET(VDSO_CLOCKMODE_OFFSET, vdso_arch_data, data[0].clock_mode);
+ DEFINE(VDSO_CLOCKMODE_TIMENS, VDSO_CLOCKMODE_TIMENS);
#ifdef CONFIG_BUG
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index f502337dd37d..0fcc463b02e2 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -735,7 +735,7 @@ static const struct sysfs_ops cache_index_ops = {
.show = cache_index_show,
};
-static struct kobj_type cache_index_type = {
+static const struct kobj_type cache_index_type = {
.release = cache_index_release,
.sysfs_ops = &cache_index_ops,
.default_groups = cache_index_default_groups,
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index d03f17987fca..83fe99861eb1 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1537,10 +1537,6 @@ int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
if (!eeh_ops || !eeh_ops->err_inject)
return -ENOENT;
- /* Check on PCI error type */
- if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64)
- return -EINVAL;
-
/* Check on PCI error function */
if (func < EEH_ERR_FUNC_MIN || func > EEH_ERR_FUNC_MAX)
return -EINVAL;
@@ -1578,6 +1574,104 @@ static int proc_eeh_show(struct seq_file *m, void *v)
}
#endif /* CONFIG_PROC_FS */
+static int eeh_break_device(struct pci_dev *pdev)
+{
+ struct resource *bar = NULL;
+ void __iomem *mapped;
+ u16 old, bit;
+ int i, pos;
+
+ /* Do we have an MMIO BAR to disable? */
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ struct resource *r = &pdev->resource[i];
+
+ if (!r->flags || !r->start)
+ continue;
+ if (r->flags & IORESOURCE_IO)
+ continue;
+ if (r->flags & IORESOURCE_UNSET)
+ continue;
+
+ bar = r;
+ break;
+ }
+
+ if (!bar) {
+ pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n");
+ return -ENXIO;
+ }
+
+ pci_err(pdev, "Going to break: %pR\n", bar);
+
+ if (pdev->is_virtfn) {
+#ifndef CONFIG_PCI_IOV
+ return -ENXIO;
+#else
+ /*
+ * VFs don't have a per-function COMMAND register, so the best
+ * we can do is clear the Memory Space Enable bit in the PF's
+ * SRIOV control reg.
+ *
+ * Unfortunately, this requires that we have a PF (i.e doesn't
+ * work for a passed-through VF) and it has the potential side
+ * effect of also causing an EEH on every other VF under the
+ * PF. Oh well.
+ */
+ pdev = pdev->physfn;
+ if (!pdev)
+ return -ENXIO; /* passed through VFs have no PF */
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ pos += PCI_SRIOV_CTRL;
+ bit = PCI_SRIOV_CTRL_MSE;
+#endif /* !CONFIG_PCI_IOV */
+ } else {
+ bit = PCI_COMMAND_MEMORY;
+ pos = PCI_COMMAND;
+ }
+
+ /*
+ * Process here is:
+ *
+ * 1. Disable Memory space.
+ *
+ * 2. Perform an MMIO to the device. This should result in an error
+ * (CA / UR) being raised by the device which results in an EEH
+ * PE freeze. Using the in_8() accessor skips the eeh detection hook
+ * so the freeze hook so the EEH Detection machinery won't be
+ * triggered here. This is to match the usual behaviour of EEH
+ * where the HW will asynchronously freeze a PE and it's up to
+ * the kernel to notice and deal with it.
+ *
+ * 3. Turn Memory space back on. This is more important for VFs
+ * since recovery will probably fail if we don't. For normal
+ * the COMMAND register is reset as a part of re-initialising
+ * the device.
+ *
+ * Breaking stuff is the point so who cares if it's racy ;)
+ */
+ pci_read_config_word(pdev, pos, &old);
+
+ mapped = ioremap(bar->start, PAGE_SIZE);
+ if (!mapped) {
+ pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar);
+ return -ENXIO;
+ }
+
+ pci_write_config_word(pdev, pos, old & ~bit);
+ in_8(mapped);
+ pci_write_config_word(pdev, pos, old);
+
+ iounmap(mapped);
+
+ return 0;
+}
+
+int eeh_pe_inject_mmio_error(struct pci_dev *pdev)
+{
+ return eeh_break_device(pdev);
+}
+
#ifdef CONFIG_DEBUG_FS
@@ -1682,7 +1776,6 @@ static ssize_t eeh_force_recover_write(struct file *filp,
static const struct file_operations eeh_force_recover_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = eeh_force_recover_write,
};
@@ -1726,104 +1819,10 @@ static ssize_t eeh_dev_check_write(struct file *filp,
static const struct file_operations eeh_dev_check_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = eeh_dev_check_write,
.read = eeh_debugfs_dev_usage,
};
-static int eeh_debugfs_break_device(struct pci_dev *pdev)
-{
- struct resource *bar = NULL;
- void __iomem *mapped;
- u16 old, bit;
- int i, pos;
-
- /* Do we have an MMIO BAR to disable? */
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
- struct resource *r = &pdev->resource[i];
-
- if (!r->flags || !r->start)
- continue;
- if (r->flags & IORESOURCE_IO)
- continue;
- if (r->flags & IORESOURCE_UNSET)
- continue;
-
- bar = r;
- break;
- }
-
- if (!bar) {
- pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n");
- return -ENXIO;
- }
-
- pci_err(pdev, "Going to break: %pR\n", bar);
-
- if (pdev->is_virtfn) {
-#ifndef CONFIG_PCI_IOV
- return -ENXIO;
-#else
- /*
- * VFs don't have a per-function COMMAND register, so the best
- * we can do is clear the Memory Space Enable bit in the PF's
- * SRIOV control reg.
- *
- * Unfortunately, this requires that we have a PF (i.e doesn't
- * work for a passed-through VF) and it has the potential side
- * effect of also causing an EEH on every other VF under the
- * PF. Oh well.
- */
- pdev = pdev->physfn;
- if (!pdev)
- return -ENXIO; /* passed through VFs have no PF */
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- pos += PCI_SRIOV_CTRL;
- bit = PCI_SRIOV_CTRL_MSE;
-#endif /* !CONFIG_PCI_IOV */
- } else {
- bit = PCI_COMMAND_MEMORY;
- pos = PCI_COMMAND;
- }
-
- /*
- * Process here is:
- *
- * 1. Disable Memory space.
- *
- * 2. Perform an MMIO to the device. This should result in an error
- * (CA / UR) being raised by the device which results in an EEH
- * PE freeze. Using the in_8() accessor skips the eeh detection hook
- * so the freeze hook so the EEH Detection machinery won't be
- * triggered here. This is to match the usual behaviour of EEH
- * where the HW will asynchronously freeze a PE and it's up to
- * the kernel to notice and deal with it.
- *
- * 3. Turn Memory space back on. This is more important for VFs
- * since recovery will probably fail if we don't. For normal
- * the COMMAND register is reset as a part of re-initialising
- * the device.
- *
- * Breaking stuff is the point so who cares if it's racy ;)
- */
- pci_read_config_word(pdev, pos, &old);
-
- mapped = ioremap(bar->start, PAGE_SIZE);
- if (!mapped) {
- pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar);
- return -ENXIO;
- }
-
- pci_write_config_word(pdev, pos, old & ~bit);
- in_8(mapped);
- pci_write_config_word(pdev, pos, old);
-
- iounmap(mapped);
-
- return 0;
-}
-
static ssize_t eeh_dev_break_write(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1835,7 +1834,7 @@ static ssize_t eeh_dev_break_write(struct file *filp,
if (IS_ERR(pdev))
return PTR_ERR(pdev);
- ret = eeh_debugfs_break_device(pdev);
+ ret = eeh_break_device(pdev);
pci_dev_put(pdev);
if (ret < 0)
@@ -1846,7 +1845,6 @@ static ssize_t eeh_dev_break_write(struct file *filp,
static const struct file_operations eeh_dev_break_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = eeh_dev_break_write,
.read = eeh_debugfs_dev_usage,
};
@@ -1893,7 +1891,6 @@ static ssize_t eeh_dev_can_recover(struct file *filp,
static const struct file_operations eeh_dev_can_recover_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = eeh_dev_can_recover,
.read = eeh_debugfs_dev_usage,
};
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index eaf2f167c342..195b075d116c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1989,13 +1989,6 @@ INT_DEFINE_END(system_call)
INTERRUPT_TO_KERNEL
#endif
-#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
-BEGIN_FTR_SECTION
- cmpdi r0,0x1ebe
- beq- 1f
-END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
-#endif
-
/* We reach here with PACA in r13, r13 in r9. */
mfspr r11,SPRN_SRR0
mfspr r12,SPRN_SRR1
@@ -2015,16 +2008,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
b system_call_common
#endif
.endif
-
-#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
- /* Fast LE/BE switch system call */
-1: mfspr r12,SPRN_SRR1
- xori r12,r12,MSR_LE
- mtspr SPRN_SRR1,r12
- mr r13,r9
- RFI_TO_USER /* return to userspace */
- b . /* prevent speculative execution */
-#endif
.endm
EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index ac74321b1192..811a7130505c 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -40,16 +40,6 @@
#include "head_32.h"
-.macro compare_to_kernel_boundary scratch, addr
-#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
-/* By simply checking Address >= 0x80000000, we know if its a kernel address */
- not. \scratch, \addr
-#else
- rlwinm \scratch, \addr, 16, 0xfff8
- cmpli cr0, \scratch, PAGE_OFFSET@h
-#endif
-.endm
-
#define PAGE_SHIFT_512K 19
#define PAGE_SHIFT_8M 23
@@ -199,18 +189,7 @@ instruction_counter:
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11)
mtspr SPRN_MD_EPN, r10
-#ifdef CONFIG_EXECMEM
- mfcr r11
- compare_to_kernel_boundary r10, r10
-#endif
mfspr r10, SPRN_M_TWB /* Get level 1 table */
-#ifdef CONFIG_EXECMEM
- blt+ 3f
- rlwinm r10, r10, 0, 20, 31
- oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
-3:
- mtcr r11
-#endif
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
mtspr SPRN_MD_TWC, r11
mfspr r10, SPRN_MD_TWC
@@ -248,19 +227,12 @@ instruction_counter:
START_EXCEPTION(INTERRUPT_DATA_TLB_MISS_8xx, DataStoreTLBMiss)
mtspr SPRN_SPRG_SCRATCH2, r10
mtspr SPRN_M_TW, r11
- mfcr r11
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
mfspr r10, SPRN_MD_EPN
- compare_to_kernel_boundary r10, r10
mfspr r10, SPRN_M_TWB /* Get level 1 table */
- blt+ 3f
- rlwinm r10, r10, 0, 20, 31
- oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
-3:
- mtcr r11
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
mtspr SPRN_MD_TWC, r11
@@ -332,15 +304,19 @@ instruction_counter:
cmpwi cr1, r11, RPN_PATTERN
beq- cr1, FixupDAR /* must be a buggy dcbX, icbi insn. */
DARFixed:/* Return from dcbx instruction bug workaround */
+ mfspr r11, SPRN_DSISR
+ rlwinm r11, r11, 0, DSISR_NOHPTE
+ cmpwi cr1, r11, 0
+ beq+ cr1, .Ldtlbie
+ mfspr r11, SPRN_DAR
+ tlbie r11
+ rlwinm r11, r11, 16, 0xffff
+ cmplwi cr1, r11, TASK_SIZE@h
+ bge- cr1, FixupPGD
+.Ldtlbie:
EXCEPTION_PROLOG_1
/* 0x300 is DataAccess exception, needed by bad_page_fault() */
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataTLBError handle_dar_dsisr=1
- lwz r4, _DAR(r11)
- lwz r5, _DSISR(r11)
- andis. r10,r5,DSISR_NOHPTE@h
- beq+ .Ldtlbie
- tlbie r4
-.Ldtlbie:
prepare_transfer_to_handler
bl do_page_fault
b interrupt_return
@@ -394,6 +370,30 @@ DARFixed:/* Return from dcbx instruction bug workaround */
__HEAD
. = 0x2000
+FixupPGD:
+ mtspr SPRN_M_TW, r10
+ mfspr r10, SPRN_DAR
+ mtspr SPRN_MD_EPN, r10
+ mfspr r11, SPRN_M_TWB /* Get level 1 table */
+ lwz r10, (swapper_pg_dir - PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
+ cmpwi cr1, r10, 0
+ bne cr1, 1f
+
+ rlwinm r10, r11, 0, 20, 31
+ oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
+ lwz r10, (swapper_pg_dir - PAGE_OFFSET)@l(r10) /* Get the level 1 entry */
+ cmpwi cr1, r10, 0
+ beq cr1, 1f
+ stw r10, (swapper_pg_dir - PAGE_OFFSET)@l(r11) /* Set the level 1 entry */
+ mfspr r10, SPRN_M_TW
+ mtcr r10
+ mfspr r10, SPRN_SPRG_SCRATCH0
+ mfspr r11, SPRN_SPRG_SCRATCH1
+ rfi
+1:
+ mfspr r10, SPRN_M_TW
+ b .Ldtlbie
+
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
* by decoding the registers used by the dcbx instruction and adding them.
* DAR is set to the calculated address.
@@ -404,7 +404,7 @@ FixupDAR:/* Entry point for dcbx workaround. */
mfspr r10, SPRN_SRR0
mtspr SPRN_MD_EPN, r10
rlwinm r11, r10, 16, 0xfff8
- cmpli cr1, r11, PAGE_OFFSET@h
+ cmpli cr1, r11, TASK_SIZE@h
mfspr r11, SPRN_M_TWB /* Get level 1 table */
blt+ cr1, 3f
@@ -587,6 +587,10 @@ start_here:
lis r0, (MD_TWAM | MD_RSV4I)@h
mtspr SPRN_MD_CTR, r0
#endif
+#ifndef CONFIG_PIN_TLB_TEXT
+ li r0, 0
+ mtspr SPRN_MI_CTR, r0
+#endif
#if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR)
lis r0, MD_TWAM@h
mtspr SPRN_MD_CTR, r0
@@ -683,6 +687,7 @@ SYM_FUNC_START_LOCAL(initial_mmu)
blr
SYM_FUNC_END(initial_mmu)
+#ifdef CONFIG_PIN_TLB
_GLOBAL(mmu_pin_tlb)
lis r9, (1f - PAGE_OFFSET)@h
ori r9, r9, (1f - PAGE_OFFSET)@l
@@ -704,6 +709,7 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_MD_CTR, r6
tlbia
+#ifdef CONFIG_PIN_TLB_TEXT
LOAD_REG_IMMEDIATE(r5, 28 << 8)
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
@@ -724,6 +730,7 @@ _GLOBAL(mmu_pin_tlb)
bdnzt lt, 2b
lis r0, MI_RSV4I@h
mtspr SPRN_MI_CTR, r0
+#endif
LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
#ifdef CONFIG_PIN_TLB_DATA
@@ -783,3 +790,4 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_SRR1, r10
mtspr SPRN_SRR0, r11
rfi
+#endif
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 57196883a00e..cb2bca76be53 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -411,39 +411,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
*/
. = INTERRUPT_INST_TLB_MISS_603
InstructionTLBMiss:
-/*
- * r0: userspace flag (later scratch)
- * r1: linux style pte ( later becomes ppc hardware pte )
- * r2: ptr to linux-style pte
- * r3: fault address
- */
/* Get PTE (linux-style) and check access */
- mfspr r3,SPRN_IMISS
-#ifdef CONFIG_EXECMEM
- lis r1, TASK_SIZE@h /* check if kernel address */
- cmplw 0,r1,r3
-#endif
+ mfspr r0,SPRN_IMISS
mfspr r2, SPRN_SDR1
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
rlwinm r2, r2, 28, 0xfffff000
+ rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */
+ lwz r2,0(r2) /* get pmd entry */
#ifdef CONFIG_EXECMEM
- li r0, 3
- bgt- 112f
- lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
- li r0, 0
- addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
+ rlwinm r3, r0, 4, 0xf
+ subi r3, r3, (TASK_SIZE >> 28) & 0xf
#endif
-112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
- lwz r2,0(r2) /* get pmd entry */
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
beq- InstructionAddressInvalid /* return if no mapping */
- rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
+ rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */
lwz r2,0(r2) /* get linux-style pte */
andc. r1,r1,r2 /* check access & ~permission */
bne- InstructionAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */
#ifdef CONFIG_EXECMEM
- rlwimi r2, r0, 0, 31, 31 /* userspace ? -> PP lsb */
+ rlwimi r2, r3, 1, 31, 31 /* userspace ? -> PP lsb */
#endif
ori r1, r1, 0xe06 /* clear out reserved bits */
andc r1, r2, r1 /* PP = user? 1 : 0 */
@@ -451,7 +438,7 @@ BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
mtspr SPRN_RPA,r1
- tlbli r3
+ tlbli r0
mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
mtcrf 0x80,r3
rfi
@@ -480,35 +467,24 @@ InstructionAddressInvalid:
*/
. = INTERRUPT_DATA_LOAD_TLB_MISS_603
DataLoadTLBMiss:
-/*
- * r0: userspace flag (later scratch)
- * r1: linux style pte ( later becomes ppc hardware pte )
- * r2: ptr to linux-style pte
- * r3: fault address
- */
/* Get PTE (linux-style) and check access */
- mfspr r3,SPRN_DMISS
- lis r1, TASK_SIZE@h /* check if kernel address */
- cmplw 0,r1,r3
+ mfspr r0,SPRN_DMISS
mfspr r2, SPRN_SDR1
- li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ
- rlwinm r2, r2, 28, 0xfffff000
- li r0, 3
- bgt- 112f
- lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
- li r0, 0
- addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
-112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
- lwz r2,0(r2) /* get pmd entry */
+ rlwinm r1, r2, 28, 0xfffff000
+ rlwimi r1,r0,12,20,29 /* insert top 10 bits of address */
+ lwz r2,0(r1) /* get pmd entry */
+ rlwinm r3, r0, 4, 0xf
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
- beq- DataAddressInvalid /* return if no mapping */
- rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
+ subi r3, r3, (TASK_SIZE >> 28) & 0xf
+ beq- 2f /* bail if no mapping */
+1: rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */
lwz r2,0(r2) /* get linux-style pte */
+ li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ
andc. r1,r1,r2 /* check access & ~permission */
bne- DataAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */
rlwinm r1,r2,32-9,30,30 /* _PAGE_WRITE -> PP msb */
- rlwimi r2,r0,0,30,31 /* userspace ? -> PP */
+ rlwimi r2,r3,2,30,31 /* userspace ? -> PP */
rlwimi r1,r2,32-3,24,24 /* _PAGE_WRITE -> _PAGE_DIRTY */
xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */
ori r1,r1,0xe04 /* clear out reserved bits */
@@ -518,25 +494,35 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
mtspr SPRN_RPA,r1
BEGIN_MMU_FTR_SECTION
- li r0,1
+ li r3,1
mfspr r1,SPRN_SPRG_603_LRU
- rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
- slw r0,r0,r2
- xor r1,r0,r1
- srw r0,r1,r2
+ rlwinm r2,r0,20,27,31 /* Get Address bits 15:19 */
+ slw r3,r3,r2
+ xor r1,r3,r1
+ srw r3,r1,r2
mtspr SPRN_SPRG_603_LRU,r1
mfspr r2,SPRN_SRR1
- rlwimi r2,r0,31-14,14,14
+ rlwimi r2,r3,31-14,14,14
mtspr SPRN_SRR1,r2
mtcrf 0x80,r2
- tlbld r3
+ tlbld r0
rfi
MMU_FTR_SECTION_ELSE
mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
mtcrf 0x80,r2
- tlbld r3
+ tlbld r0
rfi
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
+
+2: lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha
+ addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
+ rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */
+ lwz r2,0(r2) /* get pmd entry */
+ cmpwi cr0,r2,0
+ beq- DataAddressInvalid /* return if no mapping */
+ stw r2,0(r1)
+ rlwinm. r2,r2,0,0,19 /* extract address of pte page */
+ b 1b
DataAddressInvalid:
mfspr r3,SPRN_SRR1
rlwinm r1,r3,9,6,6 /* Get load/store bit */
@@ -560,34 +546,24 @@ DataAddressInvalid:
*/
. = INTERRUPT_DATA_STORE_TLB_MISS_603
DataStoreTLBMiss:
-/*
- * r0: userspace flag (later scratch)
- * r1: linux style pte ( later becomes ppc hardware pte )
- * r2: ptr to linux-style pte
- * r3: fault address
- */
/* Get PTE (linux-style) and check access */
- mfspr r3,SPRN_DMISS
- lis r1, TASK_SIZE@h /* check if kernel address */
- cmplw 0,r1,r3
+ mfspr r0,SPRN_DMISS
mfspr r2, SPRN_SDR1
- li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
- rlwinm r2, r2, 28, 0xfffff000
- li r0, 3
- bgt- 112f
- lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
- li r0, 0
- addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
-112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
- lwz r2,0(r2) /* get pmd entry */
+ rlwinm r1, r2, 28, 0xfffff000
+ rlwimi r1,r0,12,20,29 /* insert top 10 bits of address */
+ lwz r2,0(r1) /* get pmd entry */
+ rlwinm r3, r0, 4, 0xf
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
- beq- DataAddressInvalid /* return if no mapping */
- rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
+ subi r3, r3, (TASK_SIZE >> 28) & 0xf
+ beq- 2f /* bail if no mapping */
+1:
+ rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */
lwz r2,0(r2) /* get linux-style pte */
+ li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
andc. r1,r1,r2 /* check access & ~permission */
bne- DataAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwimi r2,r0,0,31,31 /* userspace ? -> PP lsb */
+ rlwimi r2,r3,1,31,31 /* userspace ? -> PP lsb */
li r1,0xe06 /* clear out reserved bits & PP msb */
andc r1,r2,r1 /* PP = user? 1: 0 */
BEGIN_FTR_SECTION
@@ -597,26 +573,36 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
mtcrf 0x80,r2
BEGIN_MMU_FTR_SECTION
- li r0,1
+ li r3,1
mfspr r1,SPRN_SPRG_603_LRU
- rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
- slw r0,r0,r2
- xor r1,r0,r1
- srw r0,r1,r2
+ rlwinm r2,r0,20,27,31 /* Get Address bits 15:19 */
+ slw r3,r3,r2
+ xor r1,r3,r1
+ srw r3,r1,r2
mtspr SPRN_SPRG_603_LRU,r1
mfspr r2,SPRN_SRR1
- rlwimi r2,r0,31-14,14,14
+ rlwimi r2,r3,31-14,14,14
mtspr SPRN_SRR1,r2
mtcrf 0x80,r2
- tlbld r3
+ tlbld r0
rfi
MMU_FTR_SECTION_ELSE
mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
mtcrf 0x80,r2
- tlbld r3
+ tlbld r0
rfi
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
+2: lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha
+ addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
+ rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */
+ lwz r2,0(r2) /* get pmd entry */
+ cmpwi cr0,r2,0
+ beq- DataAddressInvalid /* return if no mapping */
+ stw r2,0(r1)
+ rlwinm r2,r2,0,0,19 /* extract address of pte page */
+ b 1b
+
#ifndef CONFIG_ALTIVEC
#define altivec_assist_exception unknown_exception
#endif
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index eca293794a1e..af62ec974b97 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -266,7 +266,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
unsigned long ret = 0;
bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
- CT_WARN_ON(ct_state() == CONTEXT_USER);
+ CT_WARN_ON(ct_state() == CT_STATE_USER);
kuap_assert_locked();
@@ -344,7 +344,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
BUG_ON(regs_is_unrecoverable(regs));
BUG_ON(arch_irq_disabled_regs(regs));
- CT_WARN_ON(ct_state() == CONTEXT_USER);
+ CT_WARN_ON(ct_state() == CT_STATE_USER);
/*
* We don't need to restore AMR on the way back to userspace for KUAP.
@@ -386,7 +386,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64) &&
TRAP(regs) != INTERRUPT_PROGRAM &&
TRAP(regs) != INTERRUPT_PERFMON)
- CT_WARN_ON(ct_state() == CONTEXT_USER);
+ CT_WARN_ON(ct_state() == CT_STATE_USER);
kuap = kuap_get_and_assert_locked();
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 14c5ddec3056..f8aa91bc3b17 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -228,16 +228,6 @@ static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs
kcb->kprobe_saved_msr = regs->msr;
}
-void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
-{
- ri->ret_addr = (kprobe_opcode_t *)regs->link;
- ri->fp = NULL;
-
- /* Replace the return addr with trampoline addr */
- regs->link = (unsigned long)__kretprobe_trampoline;
-}
-NOKPROBE_SYMBOL(arch_prepare_kretprobe);
-
static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
{
int ret;
@@ -395,49 +385,6 @@ no_kprobe:
NOKPROBE_SYMBOL(kprobe_handler);
/*
- * Function return probe trampoline:
- * - init_kprobes() establishes a probepoint here
- * - When the probed function returns, this probe
- * causes the handlers to fire
- */
-asm(".global __kretprobe_trampoline\n"
- ".type __kretprobe_trampoline, @function\n"
- "__kretprobe_trampoline:\n"
- "nop\n"
- "blr\n"
- ".size __kretprobe_trampoline, .-__kretprobe_trampoline\n");
-
-/*
- * Called when the probe at kretprobe trampoline is hit
- */
-static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
-{
- unsigned long orig_ret_address;
-
- orig_ret_address = __kretprobe_trampoline_handler(regs, NULL);
- /*
- * We get here through one of two paths:
- * 1. by taking a trap -> kprobe_handler() -> here
- * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
- *
- * When going back through (1), we need regs->nip to be setup properly
- * as it is used to determine the return address from the trap.
- * For (2), since nip is not honoured with optprobes, we instead setup
- * the link register properly so that the subsequent 'blr' in
- * __kretprobe_trampoline jumps back to the right instruction.
- *
- * For nip, we should set the address to the previous instruction since
- * we end up emulating it in kprobe_handler(), which increments the nip
- * again.
- */
- regs_set_return_ip(regs, orig_ret_address - 4);
- regs->link = orig_ret_address;
-
- return 0;
-}
-NOKPROBE_SYMBOL(trampoline_probe_handler);
-
-/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "breakpoint"
* instruction. To avoid the SMP problems that can occur when we
@@ -539,19 +486,9 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
}
NOKPROBE_SYMBOL(kprobe_fault_handler);
-static struct kprobe trampoline_p = {
- .addr = (kprobe_opcode_t *) &__kretprobe_trampoline,
- .pre_handler = trampoline_probe_handler
-};
-
-int __init arch_init_kprobes(void)
-{
- return register_kprobe(&trampoline_p);
-}
-
int arch_trampoline_kprobe(struct kprobe *p)
{
- if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
+ if (p->addr == (kprobe_opcode_t *)&arch_rethook_trampoline)
return 1;
return 0;
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 7112adc597a8..e9bab599d0c2 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -651,12 +651,11 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
// func_desc_t is 8 bytes if ABIv2, else 16 bytes
desc = func_desc(addr);
for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) {
- if (patch_instruction(((u32 *)&entry->funcdata) + i,
- ppc_inst(((u32 *)(&desc))[i])))
+ if (patch_u32(((u32 *)&entry->funcdata) + i, ((u32 *)&desc)[i]))
return 0;
}
- if (patch_instruction(&entry->magic, ppc_inst(STUB_MAGIC)))
+ if (patch_u32(&entry->magic, STUB_MAGIC))
return 0;
return 1;
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index e385d3164648..f9c6568a9137 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -73,7 +73,7 @@ static const char *nvram_os_partitions[] = {
};
static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason);
+ struct kmsg_dump_detail *detail);
static struct kmsg_dumper nvram_kmsg_dumper = {
.dump = oops_to_nvram
@@ -643,7 +643,7 @@ void __init nvram_init_oops_partition(int rtas_partition_exists)
* partition. If that's too much, go back and capture uncompressed text.
*/
static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
static unsigned int oops_count = 0;
@@ -655,7 +655,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
int rc = -1;
- switch (reason) {
+ switch (detail->reason) {
case KMSG_DUMP_SHUTDOWN:
/* These are almost always orderly shutdowns. */
return;
@@ -671,7 +671,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
break;
default:
pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
- __func__, (int) reason);
+ __func__, (int) detail->reason);
return;
}
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index 004fae2044a3..c0b351d61058 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -56,7 +56,7 @@ static unsigned long can_optimize(struct kprobe *p)
* has a 'nop' instruction, which can be emulated.
* So further checks can be skipped.
*/
- if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
+ if (p->addr == (kprobe_opcode_t *)&arch_rethook_trampoline)
return addr + sizeof(kprobe_opcode_t);
/*
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 3b506d4c55f3..ff61a3e7984c 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -72,8 +72,6 @@
#define TM_DEBUG(x...) do { } while(0)
#endif
-extern unsigned long _get_SP(void);
-
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Are we running in "Suspend disabled" mode? If so we have to block any
@@ -2177,10 +2175,10 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
return 0;
}
+#ifdef CONFIG_PPC64
static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
-#ifdef CONFIG_PPC64
unsigned long stack_page;
unsigned long cpu = task_cpu(p);
@@ -2208,10 +2206,26 @@ static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
# endif
-#endif
return 0;
}
+#else
+static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
+ unsigned long nbytes)
+{
+ unsigned long stack_page;
+ unsigned long cpu = task_cpu(p);
+
+ if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ return 0;
+
+ stack_page = (unsigned long)emergency_ctx[cpu] - THREAD_SIZE;
+ if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
+ return 1;
+
+ return 0;
+}
+#endif
/*
* validate the stack frame of a particular minimum size, used for when we are
diff --git a/arch/powerpc/kernel/rethook.c b/arch/powerpc/kernel/rethook.c
new file mode 100644
index 000000000000..5f5f47ae82cf
--- /dev/null
+++ b/arch/powerpc/kernel/rethook.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PowerPC implementation of rethook. This depends on kprobes.
+ */
+
+#include <linux/kprobes.h>
+#include <linux/rethook.h>
+
+/*
+ * Function return trampoline:
+ * - init_kprobes() establishes a probepoint here
+ * - When the probed function returns, this probe
+ * causes the handlers to fire
+ */
+asm(".global arch_rethook_trampoline\n"
+ ".type arch_rethook_trampoline, @function\n"
+ "arch_rethook_trampoline:\n"
+ "nop\n"
+ "blr\n"
+ ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n");
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+static int trampoline_rethook_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ return !rethook_trampoline_handler(regs, regs->gpr[1]);
+}
+NOKPROBE_SYMBOL(trampoline_rethook_handler);
+
+void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount)
+{
+ rh->ret_addr = regs->link;
+ rh->frame = regs->gpr[1];
+
+ /* Replace the return addr with trampoline addr */
+ regs->link = (unsigned long)arch_rethook_trampoline;
+}
+NOKPROBE_SYMBOL(arch_rethook_prepare);
+
+/* This is called from rethook_trampoline_handler(). */
+void arch_rethook_fixup_return(struct pt_regs *regs, unsigned long orig_ret_address)
+{
+ /*
+ * We get here through one of two paths:
+ * 1. by taking a trap -> kprobe_handler() -> here
+ * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
+ *
+ * When going back through (1), we need regs->nip to be setup properly
+ * as it is used to determine the return address from the trap.
+ * For (2), since nip is not honoured with optprobes, we instead setup
+ * the link register properly so that the subsequent 'blr' in
+ * arch_rethook_trampoline jumps back to the right instruction.
+ *
+ * For nip, we should set the address to the previous instruction since
+ * we end up emulating it in kprobe_handler(), which increments the nip
+ * again.
+ */
+ regs_set_return_ip(regs, orig_ret_address - 4);
+ regs->link = orig_ret_address;
+}
+NOKPROBE_SYMBOL(arch_rethook_fixup_return);
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *) &arch_rethook_trampoline,
+ .pre_handler = trampoline_rethook_handler
+};
+
+/* rethook initializer */
+int __init arch_init_kprobes(void)
+{
+ return register_kprobe(&trampoline_p);
+}
diff --git a/arch/powerpc/kernel/secvar-sysfs.c b/arch/powerpc/kernel/secvar-sysfs.c
index eb3c053f323f..fbeb1cbac01b 100644
--- a/arch/powerpc/kernel/secvar-sysfs.c
+++ b/arch/powerpc/kernel/secvar-sysfs.c
@@ -125,7 +125,7 @@ static const struct attribute_group secvar_attr_group = {
};
__ATTRIBUTE_GROUPS(secvar_attr);
-static struct kobj_type secvar_ktype = {
+static const struct kobj_type secvar_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = secvar_attr_groups,
};
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 46e6d2cd7a2d..4ab9b8cee77a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -1166,7 +1166,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
cpu_smt_set_num_threads(num_threads, threads_per_core);
}
-void smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
{
BUG_ON(smp_processor_id() != boot_cpuid);
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index e6a958a5da27..90882b5175cd 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -21,6 +21,7 @@
#include <asm/processor.h>
#include <linux/ftrace.h>
#include <asm/kprobes.h>
+#include <linux/rethook.h>
#include <asm/paca.h>
@@ -133,12 +134,13 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum
* arch-dependent code, they are generic.
*/
ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
-#ifdef CONFIG_KPROBES
+
/*
* Mark stacktraces with kretprobed functions on them
* as unreliable.
*/
- if (ip == (unsigned long)__kretprobe_trampoline)
+#ifdef CONFIG_RETHOOK
+ if (ip == (unsigned long)arch_rethook_trampoline)
return -EINVAL;
#endif
diff --git a/arch/powerpc/kernel/static_call.c b/arch/powerpc/kernel/static_call.c
index 863a7aa24650..1502b7e439ca 100644
--- a/arch/powerpc/kernel/static_call.c
+++ b/arch/powerpc/kernel/static_call.c
@@ -17,7 +17,7 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
mutex_lock(&text_mutex);
if (func && !is_short) {
- err = patch_instruction(tramp + PPC_SCT_DATA, ppc_inst(target));
+ err = patch_ulong(tramp + PPC_SCT_DATA, target);
if (err)
goto out;
}
diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
index f6f868e817e6..be159ad4b77b 100644
--- a/arch/powerpc/kernel/syscall.c
+++ b/arch/powerpc/kernel/syscall.c
@@ -27,7 +27,7 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)
trace_hardirqs_off(); /* finish reconciling */
- CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
+ CT_WARN_ON(ct_state() == CT_STATE_KERNEL);
user_exit_irqoff();
BUG_ON(regs_is_unrecoverable(regs));
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 28d6472c380a..edf5cabe5dfd 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -121,7 +121,7 @@ static void pmac_backlight_unblank(void)
props = &pmac_backlight->props;
props->brightness = props->max_brightness;
- props->power = FB_BLANK_UNBLANK;
+ props->power = BACKLIGHT_POWER_ON;
backlight_update_status(pmac_backlight);
}
mutex_unlock(&pmac_backlight_mutex);
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 7a2ff9010f17..ee4b9d676cff 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -81,6 +81,21 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str
return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
}
+static void vdso_close(const struct vm_special_mapping *sm, struct vm_area_struct *vma)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ /*
+ * close() is called for munmap() but also for mremap(). In the mremap()
+ * case the vdso pointer has already been updated by the mremap() hook
+ * above, so it must not be set to NULL here.
+ */
+ if (vma->vm_start != (unsigned long)mm->context.vdso)
+ return;
+
+ mm->context.vdso = NULL;
+}
+
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -92,11 +107,13 @@ static struct vm_special_mapping vvar_spec __ro_after_init = {
static struct vm_special_mapping vdso32_spec __ro_after_init = {
.name = "[vdso]",
.mremap = vdso32_mremap,
+ .close = vdso_close,
};
static struct vm_special_mapping vdso64_spec __ro_after_init = {
.name = "[vdso]",
.mremap = vdso64_mremap,
+ .close = vdso_close,
};
#ifdef CONFIG_TIME_NS
@@ -197,13 +214,6 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
/* Add required alignment. */
vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
- /*
- * Put vDSO base into mm struct. We need to do this before calling
- * install_special_mapping or the perf counter mmap tracking code
- * will fail to recognise it as a vDSO.
- */
- mm->context.vdso = (void __user *)vdso_base + vvar_size;
-
vma = _install_special_mapping(mm, vdso_base, vvar_size,
VM_READ | VM_MAYREAD | VM_IO |
VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
@@ -223,10 +233,15 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
VM_READ | VM_EXEC | VM_MAYREAD |
VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
- if (IS_ERR(vma))
+ if (IS_ERR(vma)) {
do_munmap(mm, vdso_base, vvar_size, NULL);
+ return PTR_ERR(vma);
+ }
+
+ // Now that the mappings are in place, set the mm VDSO pointer
+ mm->context.vdso = (void __user *)vdso_base + vvar_size;
- return PTR_ERR_OR_ZERO(vma);
+ return 0;
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
@@ -240,8 +255,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
return -EINTR;
rc = __arch_setup_additional_pages(bprm, uses_interp);
- if (rc)
- mm->context.vdso = NULL;
mmap_write_unlock(mm);
return rc;
diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile
index 1425b6edc66b..56fb1633529a 100644
--- a/arch/powerpc/kernel/vdso/Makefile
+++ b/arch/powerpc/kernel/vdso/Makefile
@@ -8,30 +8,21 @@ include $(srctree)/lib/vdso/Makefile
obj-vdso32 = sigtramp32-32.o gettimeofday-32.o datapage-32.o cacheflush-32.o note-32.o getcpu-32.o
obj-vdso64 = sigtramp64-64.o gettimeofday-64.o datapage-64.o cacheflush-64.o note-64.o getcpu-64.o
+obj-vdso32 += getrandom-32.o vgetrandom-chacha-32.o
+obj-vdso64 += getrandom-64.o vgetrandom-chacha-64.o
+
ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday-32.o += -include $(c-gettimeofday-y)
- CFLAGS_vgettimeofday-32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
- CFLAGS_vgettimeofday-32.o += $(call cc-option, -fno-stack-protector)
- CFLAGS_vgettimeofday-32.o += -DDISABLE_BRANCH_PROFILING
- CFLAGS_vgettimeofday-32.o += -ffreestanding -fasynchronous-unwind-tables
- CFLAGS_REMOVE_vgettimeofday-32.o = $(CC_FLAGS_FTRACE)
- CFLAGS_REMOVE_vgettimeofday-32.o += -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc
- # This flag is supported by clang for 64-bit but not 32-bit so it will cause
- # an unused command line flag warning for this file.
- ifdef CONFIG_CC_IS_CLANG
- CFLAGS_REMOVE_vgettimeofday-32.o += -fno-stack-clash-protection
- endif
- CFLAGS_vgettimeofday-64.o += -include $(c-gettimeofday-y)
- CFLAGS_vgettimeofday-64.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
- CFLAGS_vgettimeofday-64.o += $(call cc-option, -fno-stack-protector)
- CFLAGS_vgettimeofday-64.o += -DDISABLE_BRANCH_PROFILING
- CFLAGS_vgettimeofday-64.o += -ffreestanding -fasynchronous-unwind-tables
- CFLAGS_REMOVE_vgettimeofday-64.o = $(CC_FLAGS_FTRACE)
# Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true
# by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is
# compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code
# generation is minimal, it will just use r29 instead.
- CFLAGS_vgettimeofday-64.o += $(call cc-option, -ffixed-r30)
+ CFLAGS_vgettimeofday-64.o += -include $(c-gettimeofday-y) $(call cc-option, -ffixed-r30)
+endif
+
+ifneq ($(c-getrandom-y),)
+ CFLAGS_vgetrandom-32.o += -include $(c-getrandom-y)
+ CFLAGS_vgetrandom-64.o += -include $(c-getrandom-y) $(call cc-option, -ffixed-r30)
endif
# Build rules
@@ -42,12 +33,18 @@ else
VDSOCC := $(CC)
endif
-targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday-32.o
+targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday-32.o vgetrandom-32.o
+targets += crtsavres-32.o
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
-targets += $(obj-vdso64) vdso64.so.dbg vgettimeofday-64.o
+targets += $(obj-vdso64) vdso64.so.dbg vgettimeofday-64.o vgetrandom-64.o
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
-ccflags-y := -fno-common -fno-builtin
+ccflags-y := -fno-common -fno-builtin -DBUILD_VDSO
+ccflags-y += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ccflags-y += $(call cc-option, -fno-stack-protector)
+ccflags-y += -DDISABLE_BRANCH_PROFILING
+ccflags-y += -ffreestanding -fasynchronous-unwind-tables
+ccflags-remove-y := $(CC_FLAGS_FTRACE)
ldflags-y := -Wl,--hash-style=both -nostdlib -shared -z noexecstack $(CLANG_FLAGS)
ldflags-$(CONFIG_LD_IS_LLD) += $(call cc-option,--ld-path=$(LD),-fuse-ld=lld)
ldflags-$(CONFIG_LD_ORPHAN_WARN) += -Wl,--orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
@@ -56,6 +53,12 @@ ldflags-$(CONFIG_LD_ORPHAN_WARN) += -Wl,--orphan-handling=$(CONFIG_LD_ORPHAN_WAR
ldflags-y += $(filter-out $(CC_AUTO_VAR_INIT_ZERO_ENABLER) $(CC_FLAGS_FTRACE) -Wa$(comma)%, $(KBUILD_CFLAGS))
CC32FLAGS := -m32
+CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc
+ # This flag is supported by clang for 64-bit but not 32-bit so it will cause
+ # an unused command line flag warning for this file.
+ifdef CONFIG_CC_IS_CLANG
+CC32FLAGSREMOVE += -fno-stack-clash-protection
+endif
LD32FLAGS := -Wl,-soname=linux-vdso32.so.1
AS32FLAGS := -D__VDSO32__
@@ -68,20 +71,26 @@ targets += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C
# link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so.dbg: $(obj)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday-32.o FORCE
+$(obj)/vdso32.so.dbg: $(obj)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday-32.o $(obj)/vgetrandom-32.o $(obj)/crtsavres-32.o FORCE
$(call if_changed,vdso32ld_and_check)
-$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday-64.o FORCE
+$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday-64.o $(obj)/vgetrandom-64.o FORCE
$(call if_changed,vdso64ld_and_check)
# assembly rules for the .S files
$(obj-vdso32): %-32.o: %.S FORCE
$(call if_changed_dep,vdso32as)
+$(obj)/crtsavres-32.o: %-32.o: $(srctree)/arch/powerpc/lib/crtsavres.S FORCE
+ $(call if_changed_dep,vdso32as)
$(obj)/vgettimeofday-32.o: %-32.o: %.c FORCE
$(call if_changed_dep,vdso32cc)
+$(obj)/vgetrandom-32.o: %-32.o: %.c FORCE
+ $(call if_changed_dep,vdso32cc)
$(obj-vdso64): %-64.o: %.S FORCE
$(call if_changed_dep,vdso64as)
$(obj)/vgettimeofday-64.o: %-64.o: %.c FORCE
$(call if_changed_dep,cc_o_c)
+$(obj)/vgetrandom-64.o: %-64.o: %.c FORCE
+ $(call if_changed_dep,cc_o_c)
# Generate VDSO offsets using helper script
gen-vdso32sym := $(src)/gen_vdso32_offsets.sh
@@ -102,7 +111,7 @@ quiet_cmd_vdso32ld_and_check = VDSO32L $@
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) $(AS32FLAGS) -c -o $@ $<
quiet_cmd_vdso32cc = VDSO32C $@
- cmd_vdso32cc = $(VDSOCC) $(c_flags) $(CC32FLAGS) -c -o $@ $<
+ cmd_vdso32cc = $(VDSOCC) $(filter-out $(CC32FLAGSREMOVE), $(c_flags)) $(CC32FLAGS) -c -o $@ $<
quiet_cmd_vdso64ld_and_check = VDSO64L $@
cmd_vdso64ld_and_check = $(VDSOCC) $(ldflags-y) $(LD64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^); $(cmd_vdso_check)
diff --git a/arch/powerpc/kernel/vdso/cacheflush.S b/arch/powerpc/kernel/vdso/cacheflush.S
index 0085ae464dac..3b2479bd2f9a 100644
--- a/arch/powerpc/kernel/vdso/cacheflush.S
+++ b/arch/powerpc/kernel/vdso/cacheflush.S
@@ -30,7 +30,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
#ifdef CONFIG_PPC64
mflr r12
.cfi_register lr,r12
- get_datapage r10
+ get_realdatapage r10, r11
mtlr r12
.cfi_restore lr
#endif
diff --git a/arch/powerpc/kernel/vdso/datapage.S b/arch/powerpc/kernel/vdso/datapage.S
index db8e167f0166..2b19b6201a33 100644
--- a/arch/powerpc/kernel/vdso/datapage.S
+++ b/arch/powerpc/kernel/vdso/datapage.S
@@ -28,7 +28,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
mflr r12
.cfi_register lr,r12
mr. r4,r3
- get_datapage r3
+ get_realdatapage r3, r11
mtlr r12
#ifdef __powerpc64__
addi r3,r3,CFG_SYSCALL_MAP64
@@ -52,7 +52,7 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
.cfi_startproc
mflr r12
.cfi_register lr,r12
- get_datapage r3
+ get_realdatapage r3, r11
#ifndef __powerpc64__
lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3)
#endif
diff --git a/arch/powerpc/kernel/vdso/getrandom.S b/arch/powerpc/kernel/vdso/getrandom.S
new file mode 100644
index 000000000000..f3bbf931931c
--- /dev/null
+++ b/arch/powerpc/kernel/vdso/getrandom.S
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Userland implementation of getrandom() for processes
+ * for use in the vDSO
+ *
+ * Copyright (C) 2024 Christophe Leroy <christophe.leroy@csgroup.eu>, CS GROUP France
+ */
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+#include <asm/vdso.h>
+#include <asm/vdso_datapage.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+
+/*
+ * The macro sets two stack frames, one for the caller and one for the callee
+ * because there are no requirement for the caller to set a stack frame when
+ * calling VDSO so it may have omitted to set one, especially on PPC64
+ */
+
+.macro cvdso_call funct
+ .cfi_startproc
+ PPC_STLU r1, -PPC_MIN_STKFRM(r1)
+ .cfi_adjust_cfa_offset PPC_MIN_STKFRM
+ mflr r0
+ PPC_STLU r1, -PPC_MIN_STKFRM(r1)
+ .cfi_adjust_cfa_offset PPC_MIN_STKFRM
+ PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
+ .cfi_rel_offset lr, PPC_MIN_STKFRM + PPC_LR_STKOFF
+#ifdef __powerpc64__
+ PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1)
+ .cfi_rel_offset r2, PPC_MIN_STKFRM + STK_GOT
+#endif
+ get_realdatapage r8, r11
+ addi r8, r8, VDSO_RNG_DATA_OFFSET
+ bl CFUNC(DOTSYM(\funct))
+ PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
+#ifdef __powerpc64__
+ PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)
+ .cfi_restore r2
+#endif
+ cmpwi r3, 0
+ mtlr r0
+ addi r1, r1, 2 * PPC_MIN_STKFRM
+ .cfi_restore lr
+ .cfi_def_cfa_offset 0
+ crclr so
+ bgelr+
+ crset so
+ neg r3, r3
+ blr
+ .cfi_endproc
+.endm
+
+ .text
+V_FUNCTION_BEGIN(__kernel_getrandom)
+ cvdso_call __c_kernel_getrandom
+V_FUNCTION_END(__kernel_getrandom)
diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S
index 48fc6658053a..5540d7021fa2 100644
--- a/arch/powerpc/kernel/vdso/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso/gettimeofday.S
@@ -38,11 +38,7 @@
.else
addi r4, r5, VDSO_DATA_OFFSET
.endif
-#ifdef __powerpc64__
bl CFUNC(DOTSYM(\funct))
-#else
- bl \funct
-#endif
PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
#ifdef __powerpc64__
PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)
@@ -118,16 +114,3 @@ V_FUNCTION_END(__kernel_clock_getres)
V_FUNCTION_BEGIN(__kernel_time)
cvdso_call __c_kernel_time call_time=1
V_FUNCTION_END(__kernel_time)
-
-/* Routines for restoring integer registers, called by the compiler. */
-/* Called with r11 pointing to the stack header word of the caller of the */
-/* function, just beyond the end of the integer restore area. */
-#ifndef __powerpc64__
-_GLOBAL(_restgpr_31_x)
-_GLOBAL(_rest32gpr_31_x)
- lwz r0,4(r11)
- lwz r31,-4(r11)
- mtlr r0
- mr r1,r11
- blr
-#endif
diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S
index 8f57107000a2..7b41d5d256e8 100644
--- a/arch/powerpc/kernel/vdso/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso32.lds.S
@@ -130,6 +130,7 @@ VERSION
#if defined(CONFIG_PPC64) || !defined(CONFIG_SMP)
__kernel_getcpu;
#endif
+ __kernel_getrandom;
local: *;
};
diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S
index 400819258c06..9481e4b892ed 100644
--- a/arch/powerpc/kernel/vdso/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso64.lds.S
@@ -123,6 +123,7 @@ VERSION
__kernel_sigtramp_rt64;
__kernel_getcpu;
__kernel_time;
+ __kernel_getrandom;
local: *;
};
diff --git a/arch/powerpc/kernel/vdso/vgetrandom-chacha.S b/arch/powerpc/kernel/vdso/vgetrandom-chacha.S
new file mode 100644
index 000000000000..7f9061a9e8b4
--- /dev/null
+++ b/arch/powerpc/kernel/vdso/vgetrandom-chacha.S
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Christophe Leroy <christophe.leroy@csgroup.eu>, CS GROUP France
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/ppc_asm.h>
+
+#define dst_bytes r3
+#define key r4
+#define counter r5
+#define nblocks r6
+
+#define idx_r0 r0
+#define val4 r4
+
+#define const0 0x61707865
+#define const1 0x3320646e
+#define const2 0x79622d32
+#define const3 0x6b206574
+
+#define key0 r5
+#define key1 r6
+#define key2 r7
+#define key3 r8
+#define key4 r9
+#define key5 r10
+#define key6 r11
+#define key7 r12
+
+#define counter0 r14
+#define counter1 r15
+
+#define state0 r16
+#define state1 r17
+#define state2 r18
+#define state3 r19
+#define state4 r20
+#define state5 r21
+#define state6 r22
+#define state7 r23
+#define state8 r24
+#define state9 r25
+#define state10 r26
+#define state11 r27
+#define state12 r28
+#define state13 r29
+#define state14 r30
+#define state15 r31
+
+.macro quarterround4 a1 b1 c1 d1 a2 b2 c2 d2 a3 b3 c3 d3 a4 b4 c4 d4
+ add \a1, \a1, \b1
+ add \a2, \a2, \b2
+ add \a3, \a3, \b3
+ add \a4, \a4, \b4
+ xor \d1, \d1, \a1
+ xor \d2, \d2, \a2
+ xor \d3, \d3, \a3
+ xor \d4, \d4, \a4
+ rotlwi \d1, \d1, 16
+ rotlwi \d2, \d2, 16
+ rotlwi \d3, \d3, 16
+ rotlwi \d4, \d4, 16
+ add \c1, \c1, \d1
+ add \c2, \c2, \d2
+ add \c3, \c3, \d3
+ add \c4, \c4, \d4
+ xor \b1, \b1, \c1
+ xor \b2, \b2, \c2
+ xor \b3, \b3, \c3
+ xor \b4, \b4, \c4
+ rotlwi \b1, \b1, 12
+ rotlwi \b2, \b2, 12
+ rotlwi \b3, \b3, 12
+ rotlwi \b4, \b4, 12
+ add \a1, \a1, \b1
+ add \a2, \a2, \b2
+ add \a3, \a3, \b3
+ add \a4, \a4, \b4
+ xor \d1, \d1, \a1
+ xor \d2, \d2, \a2
+ xor \d3, \d3, \a3
+ xor \d4, \d4, \a4
+ rotlwi \d1, \d1, 8
+ rotlwi \d2, \d2, 8
+ rotlwi \d3, \d3, 8
+ rotlwi \d4, \d4, 8
+ add \c1, \c1, \d1
+ add \c2, \c2, \d2
+ add \c3, \c3, \d3
+ add \c4, \c4, \d4
+ xor \b1, \b1, \c1
+ xor \b2, \b2, \c2
+ xor \b3, \b3, \c3
+ xor \b4, \b4, \c4
+ rotlwi \b1, \b1, 7
+ rotlwi \b2, \b2, 7
+ rotlwi \b3, \b3, 7
+ rotlwi \b4, \b4, 7
+.endm
+
+#define QUARTERROUND4(a1,b1,c1,d1,a2,b2,c2,d2,a3,b3,c3,d3,a4,b4,c4,d4) \
+ quarterround4 state##a1 state##b1 state##c1 state##d1 \
+ state##a2 state##b2 state##c2 state##d2 \
+ state##a3 state##b3 state##c3 state##d3 \
+ state##a4 state##b4 state##c4 state##d4
+
+/*
+ * Very basic 32 bits implementation of ChaCha20. Produces a given positive number
+ * of blocks of output with a nonce of 0, taking an input key and 8-byte
+ * counter. Importantly does not spill to the stack. Its arguments are:
+ *
+ * r3: output bytes
+ * r4: 32-byte key input
+ * r5: 8-byte counter input/output (saved on stack)
+ * r6: number of 64-byte blocks to write to output
+ *
+ * r0: counter of blocks (initialised with r6)
+ * r4: Value '4' after key has been read.
+ * r5-r12: key
+ * r14-r15: counter
+ * r16-r31: state
+ */
+SYM_FUNC_START(__arch_chacha20_blocks_nostack)
+#ifdef __powerpc64__
+ std counter, -216(r1)
+
+ std r14, -144(r1)
+ std r15, -136(r1)
+ std r16, -128(r1)
+ std r17, -120(r1)
+ std r18, -112(r1)
+ std r19, -104(r1)
+ std r20, -96(r1)
+ std r21, -88(r1)
+ std r22, -80(r1)
+ std r23, -72(r1)
+ std r24, -64(r1)
+ std r25, -56(r1)
+ std r26, -48(r1)
+ std r27, -40(r1)
+ std r28, -32(r1)
+ std r29, -24(r1)
+ std r30, -16(r1)
+ std r31, -8(r1)
+#else
+ stwu r1, -96(r1)
+ stw counter, 20(r1)
+#ifdef __BIG_ENDIAN__
+ stmw r14, 24(r1)
+#else
+ stw r14, 24(r1)
+ stw r15, 28(r1)
+ stw r16, 32(r1)
+ stw r17, 36(r1)
+ stw r18, 40(r1)
+ stw r19, 44(r1)
+ stw r20, 48(r1)
+ stw r21, 52(r1)
+ stw r22, 56(r1)
+ stw r23, 60(r1)
+ stw r24, 64(r1)
+ stw r25, 68(r1)
+ stw r26, 72(r1)
+ stw r27, 76(r1)
+ stw r28, 80(r1)
+ stw r29, 84(r1)
+ stw r30, 88(r1)
+ stw r31, 92(r1)
+#endif
+#endif /* __powerpc64__ */
+
+ lwz counter0, 0(counter)
+ lwz counter1, 4(counter)
+#ifdef __powerpc64__
+ rldimi counter0, counter1, 32, 0
+#endif
+ mr idx_r0, nblocks
+ subi dst_bytes, dst_bytes, 4
+
+ lwz key0, 0(key)
+ lwz key1, 4(key)
+ lwz key2, 8(key)
+ lwz key3, 12(key)
+ lwz key4, 16(key)
+ lwz key5, 20(key)
+ lwz key6, 24(key)
+ lwz key7, 28(key)
+
+ li val4, 4
+.Lblock:
+ li r31, 10
+
+ lis state0, const0@ha
+ lis state1, const1@ha
+ lis state2, const2@ha
+ lis state3, const3@ha
+ addi state0, state0, const0@l
+ addi state1, state1, const1@l
+ addi state2, state2, const2@l
+ addi state3, state3, const3@l
+
+ mtctr r31
+
+ mr state4, key0
+ mr state5, key1
+ mr state6, key2
+ mr state7, key3
+ mr state8, key4
+ mr state9, key5
+ mr state10, key6
+ mr state11, key7
+
+ mr state12, counter0
+ mr state13, counter1
+
+ li state14, 0
+ li state15, 0
+
+.Lpermute:
+ QUARTERROUND4( 0, 4, 8,12, 1, 5, 9,13, 2, 6,10,14, 3, 7,11,15)
+ QUARTERROUND4( 0, 5,10,15, 1, 6,11,12, 2, 7, 8,13, 3, 4, 9,14)
+
+ bdnz .Lpermute
+
+ addis state0, state0, const0@ha
+ addis state1, state1, const1@ha
+ addis state2, state2, const2@ha
+ addis state3, state3, const3@ha
+ addi state0, state0, const0@l
+ addi state1, state1, const1@l
+ addi state2, state2, const2@l
+ addi state3, state3, const3@l
+
+ add state4, state4, key0
+ add state5, state5, key1
+ add state6, state6, key2
+ add state7, state7, key3
+ add state8, state8, key4
+ add state9, state9, key5
+ add state10, state10, key6
+ add state11, state11, key7
+
+ add state12, state12, counter0
+ add state13, state13, counter1
+
+#ifdef __BIG_ENDIAN__
+ stwbrx state0, val4, dst_bytes
+ addi dst_bytes, dst_bytes, 8
+ stwbrx state1, 0, dst_bytes
+ stwbrx state2, val4, dst_bytes
+ addi dst_bytes, dst_bytes, 8
+ stwbrx state3, 0, dst_bytes
+ stwbrx state4, val4, dst_bytes
+ addi dst_bytes, dst_bytes, 8
+ stwbrx state5, 0, dst_bytes
+ stwbrx state6, val4, dst_bytes
+ addi dst_bytes, dst_bytes, 8
+ stwbrx state7, 0, dst_bytes
+ stwbrx state8, val4, dst_bytes
+ addi dst_bytes, dst_bytes, 8
+ stwbrx state9, 0, dst_bytes
+ stwbrx state10, val4, dst_bytes
+ addi dst_bytes, dst_bytes, 8
+ stwbrx state11, 0, dst_bytes
+ stwbrx state12, val4, dst_bytes
+ addi dst_bytes, dst_bytes, 8
+ stwbrx state13, 0, dst_bytes
+ stwbrx state14, val4, dst_bytes
+ addi dst_bytes, dst_bytes, 8
+ stwbrx state15, 0, dst_bytes
+#else
+ stw state0, 4(dst_bytes)
+ stw state1, 8(dst_bytes)
+ stw state2, 12(dst_bytes)
+ stw state3, 16(dst_bytes)
+ stw state4, 20(dst_bytes)
+ stw state5, 24(dst_bytes)
+ stw state6, 28(dst_bytes)
+ stw state7, 32(dst_bytes)
+ stw state8, 36(dst_bytes)
+ stw state9, 40(dst_bytes)
+ stw state10, 44(dst_bytes)
+ stw state11, 48(dst_bytes)
+ stw state12, 52(dst_bytes)
+ stw state13, 56(dst_bytes)
+ stw state14, 60(dst_bytes)
+ stwu state15, 64(dst_bytes)
+#endif
+
+ subic. idx_r0, idx_r0, 1 /* subi. can't use r0 as source */
+
+#ifdef __powerpc64__
+ addi counter0, counter0, 1
+ srdi counter1, counter0, 32
+#else
+ addic counter0, counter0, 1
+ addze counter1, counter1
+#endif
+
+ bne .Lblock
+
+#ifdef __powerpc64__
+ ld counter, -216(r1)
+#else
+ lwz counter, 20(r1)
+#endif
+ stw counter0, 0(counter)
+ stw counter1, 4(counter)
+
+ li r6, 0
+ li r7, 0
+ li r8, 0
+ li r9, 0
+ li r10, 0
+ li r11, 0
+ li r12, 0
+
+#ifdef __powerpc64__
+ ld r14, -144(r1)
+ ld r15, -136(r1)
+ ld r16, -128(r1)
+ ld r17, -120(r1)
+ ld r18, -112(r1)
+ ld r19, -104(r1)
+ ld r20, -96(r1)
+ ld r21, -88(r1)
+ ld r22, -80(r1)
+ ld r23, -72(r1)
+ ld r24, -64(r1)
+ ld r25, -56(r1)
+ ld r26, -48(r1)
+ ld r27, -40(r1)
+ ld r28, -32(r1)
+ ld r29, -24(r1)
+ ld r30, -16(r1)
+ ld r31, -8(r1)
+#else
+#ifdef __BIG_ENDIAN__
+ lmw r14, 24(r1)
+#else
+ lwz r14, 24(r1)
+ lwz r15, 28(r1)
+ lwz r16, 32(r1)
+ lwz r17, 36(r1)
+ lwz r18, 40(r1)
+ lwz r19, 44(r1)
+ lwz r20, 48(r1)
+ lwz r21, 52(r1)
+ lwz r22, 56(r1)
+ lwz r23, 60(r1)
+ lwz r24, 64(r1)
+ lwz r25, 68(r1)
+ lwz r26, 72(r1)
+ lwz r27, 76(r1)
+ lwz r28, 80(r1)
+ lwz r29, 84(r1)
+ lwz r30, 88(r1)
+ lwz r31, 92(r1)
+#endif
+ addi r1, r1, 96
+#endif /* __powerpc64__ */
+ blr
+SYM_FUNC_END(__arch_chacha20_blocks_nostack)
diff --git a/arch/powerpc/kernel/vdso/vgetrandom.c b/arch/powerpc/kernel/vdso/vgetrandom.c
new file mode 100644
index 000000000000..5f855d45fb7b
--- /dev/null
+++ b/arch/powerpc/kernel/vdso/vgetrandom.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Powerpc userspace implementation of getrandom()
+ *
+ * Copyright (C) 2024 Christophe Leroy <christophe.leroy@csgroup.eu>, CS GROUP France
+ */
+#include <linux/time.h>
+#include <linux/types.h>
+
+ssize_t __c_kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state,
+ size_t opaque_len, const struct vdso_rng_data *vd)
+{
+ return __cvdso_getrandom_data(vd, buffer, len, flags, opaque_state, opaque_len);
+}
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 3ff3de9a52ac..34c0adb9fdbf 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -118,12 +118,12 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
struct fd f;
f = fdget(tablefd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
rcu_read_lock();
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
- if (stt == f.file->private_data) {
+ if (stt == fd_file(f)->private_data) {
found = true;
break;
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 8f7d7e37bc8c..ba0492f9de65 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1922,14 +1922,22 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
r = EMULATE_FAIL;
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- if (cause == FSCR_MSGP_LG)
+ switch (cause) {
+ case FSCR_MSGP_LG:
r = kvmppc_emulate_doorbell_instr(vcpu);
- if (cause == FSCR_PM_LG)
+ break;
+ case FSCR_PM_LG:
r = kvmppc_pmu_unavailable(vcpu);
- if (cause == FSCR_EBB_LG)
+ break;
+ case FSCR_EBB_LG:
r = kvmppc_ebb_unavailable(vcpu);
- if (cause == FSCR_TM_LG)
+ break;
+ case FSCR_TM_LG:
r = kvmppc_tm_unavailable(vcpu);
+ break;
+ default:
+ break;
+ }
}
if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
@@ -4049,7 +4057,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
/* Return to whole-core mode if we split the core earlier */
if (cmd_bit) {
unsigned long hid0 = mfspr(SPRN_HID0);
- unsigned long loops = 0;
hid0 &= ~HID0_POWER8_DYNLPARDIS;
stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
@@ -4061,7 +4068,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
if (!(hid0 & stat_bit))
break;
cpu_relax();
- ++loops;
}
split_info.do_nap = 0;
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 5e6c7b527677..f14329989e9a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1938,11 +1938,11 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = -EBADF;
f = fdget(cap->args[0]);
- if (!f.file)
+ if (!fd_file(f))
break;
r = -EPERM;
- dev = kvm_device_from_filp(f.file);
+ dev = kvm_device_from_filp(fd_file(f));
if (dev)
r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
@@ -1957,11 +1957,11 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = -EBADF;
f = fdget(cap->args[0]);
- if (!f.file)
+ if (!fd_file(f))
break;
r = -EPERM;
- dev = kvm_device_from_filp(f.file);
+ dev = kvm_device_from_filp(fd_file(f));
if (dev) {
if (xics_on_xive())
r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
@@ -1980,7 +1980,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = -EBADF;
f = fdget(cap->args[0]);
- if (!f.file)
+ if (!fd_file(f))
break;
r = -ENXIO;
@@ -1990,7 +1990,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
}
r = -EPERM;
- dev = kvm_device_from_filp(f.file);
+ dev = kvm_device_from_filp(fd_file(f));
if (dev)
r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
cap->args[1]);
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 0d1f3ee91115..acdab294b340 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -20,15 +20,14 @@
#include <asm/code-patching.h>
#include <asm/inst.h>
-static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr)
+static int __patch_mem(void *exec_addr, unsigned long val, void *patch_addr, bool is_dword)
{
- if (!ppc_inst_prefixed(instr)) {
- u32 val = ppc_inst_val(instr);
+ if (!IS_ENABLED(CONFIG_PPC64) || likely(!is_dword)) {
+ /* For big endian correctness: plain address would use the wrong half */
+ u32 val32 = val;
- __put_kernel_nofault(patch_addr, &val, u32, failed);
+ __put_kernel_nofault(patch_addr, &val32, u32, failed);
} else {
- u64 val = ppc_inst_as_ulong(instr);
-
__put_kernel_nofault(patch_addr, &val, u64, failed);
}
@@ -44,7 +43,10 @@ failed:
int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
{
- return __patch_instruction(addr, instr, addr);
+ if (ppc_inst_prefixed(instr))
+ return __patch_mem(addr, ppc_inst_as_ulong(instr), addr, true);
+ else
+ return __patch_mem(addr, ppc_inst_val(instr), addr, false);
}
struct patch_context {
@@ -276,7 +278,7 @@ static void unmap_patch_area(unsigned long addr)
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
}
-static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
+static int __do_patch_mem_mm(void *addr, unsigned long val, bool is_dword)
{
int err;
u32 *patch_addr;
@@ -305,7 +307,7 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
orig_mm = start_using_temp_mm(patching_mm);
- err = __patch_instruction(addr, instr, patch_addr);
+ err = __patch_mem(addr, val, patch_addr, is_dword);
/* context synchronisation performed by __patch_instruction (isync or exception) */
stop_using_temp_mm(patching_mm, orig_mm);
@@ -322,7 +324,7 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
return err;
}
-static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
+static int __do_patch_mem(void *addr, unsigned long val, bool is_dword)
{
int err;
u32 *patch_addr;
@@ -339,7 +341,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
if (radix_enabled())
asm volatile("ptesync": : :"memory");
- err = __patch_instruction(addr, instr, patch_addr);
+ err = __patch_mem(addr, val, patch_addr, is_dword);
pte_clear(&init_mm, text_poke_addr, pte);
flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
@@ -347,7 +349,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
return err;
}
-int patch_instruction(u32 *addr, ppc_inst_t instr)
+static int patch_mem(void *addr, unsigned long val, bool is_dword)
{
int err;
unsigned long flags;
@@ -359,19 +361,57 @@ int patch_instruction(u32 *addr, ppc_inst_t instr)
*/
if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) ||
!static_branch_likely(&poking_init_done))
- return raw_patch_instruction(addr, instr);
+ return __patch_mem(addr, val, addr, is_dword);
local_irq_save(flags);
if (mm_patch_enabled())
- err = __do_patch_instruction_mm(addr, instr);
+ err = __do_patch_mem_mm(addr, val, is_dword);
else
- err = __do_patch_instruction(addr, instr);
+ err = __do_patch_mem(addr, val, is_dword);
local_irq_restore(flags);
return err;
}
+
+#ifdef CONFIG_PPC64
+
+int patch_instruction(u32 *addr, ppc_inst_t instr)
+{
+ if (ppc_inst_prefixed(instr))
+ return patch_mem(addr, ppc_inst_as_ulong(instr), true);
+ else
+ return patch_mem(addr, ppc_inst_val(instr), false);
+}
NOKPROBE_SYMBOL(patch_instruction);
+int patch_uint(void *addr, unsigned int val)
+{
+ if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned int)))
+ return -EINVAL;
+
+ return patch_mem(addr, val, false);
+}
+NOKPROBE_SYMBOL(patch_uint);
+
+int patch_ulong(void *addr, unsigned long val)
+{
+ if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)))
+ return -EINVAL;
+
+ return patch_mem(addr, val, true);
+}
+NOKPROBE_SYMBOL(patch_ulong);
+
+#else
+
+int patch_instruction(u32 *addr, ppc_inst_t instr)
+{
+ return patch_mem(addr, ppc_inst_val(instr), false);
+}
+NOKPROBE_SYMBOL(patch_instruction)
+
+#endif
+
static int patch_memset64(u64 *addr, u64 val, size_t count)
{
for (u64 *end = addr + count; addr < end; addr++)
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index 7e5e1c28e56a..8967903c15e9 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -46,7 +46,7 @@
.section ".text"
-#ifndef CONFIG_PPC64
+#ifndef __powerpc64__
/* Routines for saving integer registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
diff --git a/arch/powerpc/lib/test-code-patching.c b/arch/powerpc/lib/test-code-patching.c
index f76030087f98..8cd3b32f805b 100644
--- a/arch/powerpc/lib/test-code-patching.c
+++ b/arch/powerpc/lib/test-code-patching.c
@@ -438,6 +438,46 @@ static void __init test_multi_instruction_patching(void)
vfree(buf);
}
+static void __init test_data_patching(void)
+{
+ void *buf;
+ u32 *addr32;
+
+ buf = vzalloc(PAGE_SIZE);
+ check(buf);
+ if (!buf)
+ return;
+
+ addr32 = buf + 128;
+
+ addr32[1] = 0xA0A1A2A3;
+ addr32[2] = 0xB0B1B2B3;
+
+ check(!patch_uint(&addr32[1], 0xC0C1C2C3));
+
+ check(addr32[0] == 0);
+ check(addr32[1] == 0xC0C1C2C3);
+ check(addr32[2] == 0xB0B1B2B3);
+ check(addr32[3] == 0);
+
+ /* Unaligned patch_ulong() should fail */
+ if (IS_ENABLED(CONFIG_PPC64))
+ check(patch_ulong(&addr32[1], 0xD0D1D2D3) == -EINVAL);
+
+ check(!patch_ulong(&addr32[2], 0xD0D1D2D3));
+
+ check(addr32[0] == 0);
+ check(addr32[1] == 0xC0C1C2C3);
+ check(*(unsigned long *)(&addr32[2]) == 0xD0D1D2D3);
+
+ if (!IS_ENABLED(CONFIG_PPC64))
+ check(addr32[3] == 0);
+
+ check(addr32[4] == 0);
+
+ vfree(buf);
+}
+
static int __init test_code_patching(void)
{
pr_info("Running code patching self-tests ...\n");
@@ -448,6 +488,7 @@ static int __init test_code_patching(void)
test_translate_branch();
test_prefixed_patching();
test_multi_instruction_patching();
+ test_data_patching();
return 0;
}
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 625fe7d08e06..2db167f4233f 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -223,6 +223,8 @@ int mmu_mark_initmem_nx(void)
update_bats();
+ BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, SZ_256M) < TASK_SIZE);
+
for (i = TASK_SIZE >> 28; i < 16; i++) {
/* Do not set NX on VM space for modules */
if (is_module_segment(i << 28))
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 6727a15ab94f..e1eadd03f133 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -125,7 +125,7 @@ int mmu_ci_restrictions;
#endif
static u8 *linear_map_hash_slots;
static unsigned long linear_map_hash_count;
-struct mmu_hash_ops mmu_hash_ops;
+struct mmu_hash_ops mmu_hash_ops __ro_after_init;
EXPORT_SYMBOL(mmu_hash_ops);
/*
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index f4d8d3c40e5c..5a4a75369043 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -176,6 +176,17 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
return __pmd(old_pmd);
}
+pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pud_t *pudp)
+{
+ unsigned long old_pud;
+
+ VM_WARN_ON_ONCE(!pud_present(*pudp));
+ old_pud = pud_hugepage_update(vma->vm_mm, address, pudp, _PAGE_PRESENT, _PAGE_INVALID);
+ flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
+ return __pud(old_pud);
+}
+
pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp, int full)
{
@@ -259,6 +270,15 @@ pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
pmdv &= _HPAGE_CHG_MASK;
return pmd_set_protbits(__pmd(pmdv), newprot);
}
+
+pud_t pud_modify(pud_t pud, pgprot_t newprot)
+{
+ unsigned long pudv;
+
+ pudv = pud_val(pud);
+ pudv &= _HPAGE_CHG_MASK;
+ return pud_set_protbits(__pud(pudv), newprot);
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/* For use by kexec, called with MMU off */
diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
index ef3ce37f1bb3..87307d0fc3b8 100644
--- a/arch/powerpc/mm/book3s64/slice.c
+++ b/arch/powerpc/mm/book3s64/slice.c
@@ -637,10 +637,11 @@ unsigned long arch_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags,
+ vm_flags_t vm_flags)
{
if (radix_enabled())
- return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
+ return generic_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
return slice_get_unmapped_area(addr, len, flags,
mm_ctx_user_psize(&current->mm->context), 0);
@@ -650,10 +651,11 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
const unsigned long addr0,
const unsigned long len,
const unsigned long pgoff,
- const unsigned long flags)
+ const unsigned long flags,
+ vm_flags_t vm_flags)
{
if (radix_enabled())
- return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
+ return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags, vm_flags);
return slice_get_unmapped_area(addr0, len, flags,
mm_ctx_user_psize(&current->mm->context), 1);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index da21cb018984..1221c561b43a 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -216,7 +216,7 @@ static int __init mark_nonram_nosave(void)
* everything else. GFP_DMA32 page allocations automatically fall back to
* ZONE_DMA.
*
- * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
+ * By using 31-bit unconditionally, we can exploit zone_dma_limit to inform the
* generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
* anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
* ZONE_DMA.
@@ -230,6 +230,7 @@ void __init paging_init(void)
{
unsigned long long total_ram = memblock_phys_mem_size();
phys_addr_t top_of_ram = memblock_end_of_DRAM();
+ int zone_dma_bits;
#ifdef CONFIG_HIGHMEM
unsigned long v = __fix_to_virt(FIX_KMAP_END);
@@ -256,6 +257,8 @@ void __init paging_init(void)
else
zone_dma_bits = 31;
+ zone_dma_limit = DMA_BIT_MASK(zone_dma_bits);
+
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
1UL << (zone_dma_bits - PAGE_SHIFT));
@@ -410,6 +413,18 @@ EXPORT_SYMBOL_GPL(walk_system_ram_range);
#ifdef CONFIG_EXECMEM
static struct execmem_info execmem_info __ro_after_init;
+#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603)
+static void prealloc_execmem_pgtable(void)
+{
+ unsigned long va;
+
+ for (va = ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE); va < MODULES_END; va += PGDIR_SIZE)
+ pte_alloc_kernel(pmd_off_k(va), va);
+}
+#else
+static void prealloc_execmem_pgtable(void) { }
+#endif
+
struct execmem_info __init *execmem_arch_setup(void)
{
pgprot_t kprobes_prot = strict_module_rwx_enabled() ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
@@ -441,6 +456,8 @@ struct execmem_info __init *execmem_arch_setup(void)
end = VMALLOC_END;
#endif
+ prealloc_execmem_pgtable();
+
execmem_info = (struct execmem_info){
.ranges = {
[EXECMEM_DEFAULT] = {
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 388bba0ab3e7..8b54f12d1889 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -150,11 +150,11 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
mmu_mapin_immr();
- mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
+ mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_X, true);
if (debug_pagealloc_enabled_or_kfence()) {
top = boundary;
} else {
- mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
+ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_X, true);
mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
}
@@ -177,7 +177,8 @@ int mmu_mark_initmem_nx(void)
if (!debug_pagealloc_enabled_or_kfence())
err = mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
- mmu_pin_tlb(block_mapped_ram, false);
+ if (IS_ENABLED(CONFIG_PIN_TLB_TEXT))
+ mmu_pin_tlb(block_mapped_ram, false);
return err;
}
@@ -206,6 +207,8 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
/* 8xx can only access 32MB at the moment */
memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
+
+ BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE) < TASK_SIZE);
}
int pud_clear_huge(pud_t *pud)
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index aa89899f0c1a..3c1da08304d0 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -43,11 +43,9 @@ static char *cmdline __initdata;
int numa_cpu_lookup_table[NR_CPUS];
cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
-struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
EXPORT_SYMBOL(node_to_cpumask_map);
-EXPORT_SYMBOL(node_data);
static int primary_domain_index;
static int n_mem_addr_cells, n_mem_size_cells;
@@ -1095,27 +1093,9 @@ void __init dump_numa_cpu_topology(void)
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
{
u64 spanned_pages = end_pfn - start_pfn;
- const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
- u64 nd_pa;
- void *nd;
- int tnid;
-
- nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
- if (!nd_pa)
- panic("Cannot allocate %zu bytes for node %d data\n",
- nd_size, nid);
-
- nd = __va(nd_pa);
-
- /* report and initialize */
- pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
- nd_pa, nd_pa + nd_size - 1);
- tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
- if (tnid != nid)
- pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
-
- node_data[nid] = nd;
- memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
+
+ alloc_node_data(nid);
+
NODE_DATA(nid)->node_id = nid;
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = spanned_pages;
diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
index 8c31802f97e8..e89f64a0f24a 100644
--- a/arch/powerpc/mm/pgtable-frag.c
+++ b/arch/powerpc/mm/pgtable-frag.c
@@ -136,10 +136,10 @@ void pte_fragment_free(unsigned long *table, int kernel)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
{
- struct page *page;
+ struct folio *folio;
- page = virt_to_page(pgtable);
- SetPageActive(page);
+ folio = virt_to_folio(pgtable);
+ folio_set_active(folio);
pte_fragment_free((unsigned long *)pgtable, 0);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index ab0656115424..7316396e452d 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -297,6 +297,12 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
}
#if defined(CONFIG_PPC_8xx)
+
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS) || defined(CONFIG_SPLIT_PMD_PTLOCKS)
+/* We need the same lock to protect the PMD table and the two PTE tables. */
+#error "8M hugetlb folios are incompatible with split page table locks"
+#endif
+
static void __set_huge_pte_at(pmd_t *pmd, pte_t *ptep, pte_basic_t val)
{
pte_basic_t *entry = (pte_basic_t *)ptep;
diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c
index 164cbcd4588e..e7b7bdaad341 100644
--- a/arch/powerpc/platforms/44x/ppc476.c
+++ b/arch/powerpc/platforms/44x/ppc476.c
@@ -95,7 +95,7 @@ static int avr_probe(struct i2c_client *client)
}
static const struct i2c_device_id avr_id[] = {
- { "akebono-avr", 0 },
+ { "akebono-avr" },
{ }
};
diff --git a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
index 4a25b6b48615..9668b052cd4b 100644
--- a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
+++ b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
@@ -504,7 +504,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match);
static struct platform_driver mpc512x_lpbfifo_driver = {
.probe = mpc512x_lpbfifo_probe,
- .remove_new = mpc512x_lpbfifo_remove,
+ .remove = mpc512x_lpbfifo_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = mpc512x_lpbfifo_match,
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 2bd6abcdc113..1ea591ec6083 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -644,7 +644,6 @@ static int mpc52xx_wdt_release(struct inode *inode, struct file *file)
static const struct file_operations mpc52xx_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = mpc52xx_wdt_write,
.unlocked_ioctl = mpc52xx_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 34ce21f42623..e635b27ee718 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -143,7 +143,7 @@ static struct platform_driver gpio_halt_driver = {
.of_match_table = gpio_halt_match,
},
.probe = gpio_halt_probe,
- .remove_new = gpio_halt_remove,
+ .remove = gpio_halt_remove,
};
module_platform_driver(gpio_halt_driver);
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index a14d9d8997a4..8623aebfac48 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -195,6 +195,13 @@ config PIN_TLB_IMMR
CONFIG_PIN_TLB_DATA is also selected, it will reduce
CONFIG_PIN_TLB_DATA to 24 Mbytes.
+config PIN_TLB_TEXT
+ bool "Pinned TLB for TEXT"
+ depends on PIN_TLB
+ default y
+ help
+ This pins kernel text with 8M pages.
+
endmenu
endmenu
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 4b0d7d4f88f6..1453ccc900c4 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -84,11 +84,8 @@ config PPC_BOOK3S_64
bool "Server processors"
select PPC_FPU
select PPC_HAVE_PMU_SUPPORT
- select HAVE_ARCH_TRANSPARENT_HUGEPAGE
- select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_SPLIT_PMD_PTLOCK
- select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_NUMA_BALANCING
select HAVE_MOVE_PMD
@@ -108,6 +105,14 @@ config PPC_BOOK3E_64
endchoice
+config PPC_THP
+ def_bool y
+ depends on PPC_BOOK3S_64
+ depends on PPC_RADIX_MMU || (PPC_64S_HASH_MMU && PAGE_SIZE_64KB)
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
+
choice
prompt "CPU selection"
help
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
index 87ad7d563cfa..cd7d42fc12a6 100644
--- a/arch/powerpc/platforms/cell/spu_syscalls.c
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -66,8 +66,8 @@ SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags,
if (flags & SPU_CREATE_AFFINITY_SPU) {
struct fd neighbor = fdget(neighbor_fd);
ret = -EBADF;
- if (neighbor.file) {
- ret = calls->create_thread(name, flags, mode, neighbor.file);
+ if (fd_file(neighbor)) {
+ ret = calls->create_thread(name, flags, mode, fd_file(neighbor));
fdput(neighbor);
}
} else
@@ -89,8 +89,8 @@ SYSCALL_DEFINE3(spu_run,int, fd, __u32 __user *, unpc, __u32 __user *, ustatus)
ret = -EBADF;
arg = fdget(fd);
- if (arg.file) {
- ret = calls->spu_run(arg.file, unpc, ustatus);
+ if (fd_file(arg)) {
+ ret = calls->spu_run(fd_file(arg), unpc, ustatus);
fdput(arg);
}
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 7f4e0db8eb08..d5a2c77bc908 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -453,7 +453,6 @@ static const struct file_operations spufs_cntl_fops = {
.release = spufs_cntl_release,
.read = simple_attr_read,
.write = simple_attr_write,
- .llseek = no_llseek,
.mmap = spufs_cntl_mmap,
};
@@ -634,7 +633,6 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
static const struct file_operations spufs_mbox_fops = {
.open = spufs_pipe_open,
.read = spufs_mbox_read,
- .llseek = no_llseek,
};
static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
@@ -664,7 +662,6 @@ static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
static const struct file_operations spufs_mbox_stat_fops = {
.open = spufs_pipe_open,
.read = spufs_mbox_stat_read,
- .llseek = no_llseek,
};
/* low-level ibox access function */
@@ -769,7 +766,6 @@ static const struct file_operations spufs_ibox_fops = {
.open = spufs_pipe_open,
.read = spufs_ibox_read,
.poll = spufs_ibox_poll,
- .llseek = no_llseek,
};
static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
@@ -797,7 +793,6 @@ static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
static const struct file_operations spufs_ibox_stat_fops = {
.open = spufs_pipe_open,
.read = spufs_ibox_stat_read,
- .llseek = no_llseek,
};
/* low-level mailbox write */
@@ -901,7 +896,6 @@ static const struct file_operations spufs_wbox_fops = {
.open = spufs_pipe_open,
.write = spufs_wbox_write,
.poll = spufs_wbox_poll,
- .llseek = no_llseek,
};
static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
@@ -929,7 +923,6 @@ static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
static const struct file_operations spufs_wbox_stat_fops = {
.open = spufs_pipe_open,
.read = spufs_wbox_stat_read,
- .llseek = no_llseek,
};
static int spufs_signal1_open(struct inode *inode, struct file *file)
@@ -1056,7 +1049,6 @@ static const struct file_operations spufs_signal1_fops = {
.read = spufs_signal1_read,
.write = spufs_signal1_write,
.mmap = spufs_signal1_mmap,
- .llseek = no_llseek,
};
static const struct file_operations spufs_signal1_nosched_fops = {
@@ -1064,7 +1056,6 @@ static const struct file_operations spufs_signal1_nosched_fops = {
.release = spufs_signal1_release,
.write = spufs_signal1_write,
.mmap = spufs_signal1_mmap,
- .llseek = no_llseek,
};
static int spufs_signal2_open(struct inode *inode, struct file *file)
@@ -1195,7 +1186,6 @@ static const struct file_operations spufs_signal2_fops = {
.read = spufs_signal2_read,
.write = spufs_signal2_write,
.mmap = spufs_signal2_mmap,
- .llseek = no_llseek,
};
static const struct file_operations spufs_signal2_nosched_fops = {
@@ -1203,7 +1193,6 @@ static const struct file_operations spufs_signal2_nosched_fops = {
.release = spufs_signal2_release,
.write = spufs_signal2_write,
.mmap = spufs_signal2_mmap,
- .llseek = no_llseek,
};
/*
@@ -1343,7 +1332,6 @@ static const struct file_operations spufs_mss_fops = {
.open = spufs_mss_open,
.release = spufs_mss_release,
.mmap = spufs_mss_mmap,
- .llseek = no_llseek,
};
static vm_fault_t
@@ -1401,7 +1389,6 @@ static const struct file_operations spufs_psmap_fops = {
.open = spufs_psmap_open,
.release = spufs_psmap_release,
.mmap = spufs_psmap_mmap,
- .llseek = no_llseek,
};
@@ -1732,7 +1719,6 @@ static const struct file_operations spufs_mfc_fops = {
.flush = spufs_mfc_flush,
.fsync = spufs_mfc_fsync,
.mmap = spufs_mfc_mmap,
- .llseek = no_llseek,
};
static int spufs_npc_set(void *data, u64 val)
@@ -2102,7 +2088,6 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
static const struct file_operations spufs_dma_info_fops = {
.open = spufs_info_open,
.read = spufs_dma_info_read,
- .llseek = no_llseek,
};
static void spufs_get_proxydma_info(struct spu_context *ctx,
@@ -2159,7 +2144,6 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
static const struct file_operations spufs_proxydma_info_fops = {
.open = spufs_info_open,
.read = spufs_proxydma_info_read,
- .llseek = no_llseek,
};
static int spufs_show_tid(struct seq_file *s, void *private)
@@ -2442,7 +2426,6 @@ static const struct file_operations spufs_switch_log_fops = {
.read = spufs_switch_log_read,
.poll = spufs_switch_log_poll,
.release = spufs_switch_log_release,
- .llseek = no_llseek,
};
/**
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c
index 5c4f1a9ca154..6f4a41a9352a 100644
--- a/arch/powerpc/platforms/chrp/pegasos_eth.c
+++ b/arch/powerpc/platforms/chrp/pegasos_eth.c
@@ -14,7 +14,7 @@
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/platform_device.h>
-#include <linux/mv643xx.h>
+#include <linux/mv643xx_eth.h>
#include <linux/pci.h>
#define PEGASOS2_MARVELL_REGBASE (0xf1000000)
@@ -25,12 +25,15 @@
#define PEGASOS2_SRAM_BASE_ETH_PORT0 (PEGASOS2_SRAM_BASE)
#define PEGASOS2_SRAM_BASE_ETH_PORT1 (PEGASOS2_SRAM_BASE_ETH_PORT0 + (PEGASOS2_SRAM_SIZE / 2) )
-
#define PEGASOS2_SRAM_RXRING_SIZE (PEGASOS2_SRAM_SIZE/4)
#define PEGASOS2_SRAM_TXRING_SIZE (PEGASOS2_SRAM_SIZE/4)
#undef BE_VERBOSE
+#define MV64340_BASE_ADDR_ENABLE 0x278
+#define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268
+#define MV64340_SRAM_CONFIG 0x380
+
static struct resource mv643xx_eth_shared_resources[] = {
[0] = {
.name = "ethernet shared base",
diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h
index 4f358b55c341..8ddbaa4ebd0b 100644
--- a/arch/powerpc/platforms/maple/maple.h
+++ b/arch/powerpc/platforms/maple/maple.h
@@ -7,7 +7,6 @@
extern int maple_set_rtc_time(struct rtc_time *tm);
extern void maple_get_rtc_time(struct rtc_time *tm);
extern time64_t maple_get_boot_time(void);
-extern void maple_calibrate_decr(void);
extern void maple_pci_init(void);
extern void maple_pci_irq_fixup(struct pci_dev *dev);
extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c
index 4e983af32949..e4538d471256 100644
--- a/arch/powerpc/platforms/pasemi/gpio_mdio.c
+++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c
@@ -285,7 +285,7 @@ MODULE_DEVICE_TABLE(of, gpio_mdio_match);
static struct platform_driver gpio_mdio_driver =
{
.probe = gpio_mdio_probe,
- .remove_new = gpio_mdio_remove,
+ .remove = gpio_mdio_remove,
.driver = {
.name = "gpio-mdio-bitbang",
.of_match_table = gpio_mdio_match,
diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
index 018c30665e1b..6f6743b8e48d 100644
--- a/arch/powerpc/platforms/pasemi/pasemi.h
+++ b/arch/powerpc/platforms/pasemi/pasemi.h
@@ -5,7 +5,6 @@
extern time64_t pas_get_boot_time(void);
extern void pas_pci_init(void);
struct pci_dev;
-extern void pas_pci_irq_fixup(struct pci_dev *dev);
extern void pas_pci_dma_dev_setup(struct pci_dev *dev);
void __iomem *__init pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset);
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
index 085e0ad20eba..8253de737373 100644
--- a/arch/powerpc/platforms/powermac/pfunc_base.c
+++ b/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -313,7 +313,7 @@ static void __init uninorth_install_pfunc(void)
/*
* Install handlers for the hwclock child if any
*/
- for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;)
+ for_each_child_of_node(uninorth_node, np)
if (of_node_name_eq(np, "hw-clock")) {
unin_hwclock = np;
break;
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 15644be31990..d21b681f52fb 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -827,7 +827,7 @@ static int smp_core99_kick_cpu(int nr)
mdelay(1);
/* Restore our exception vector */
- patch_instruction(vector, ppc_inst(save_vector));
+ patch_uint(vector, save_vector);
local_irq_restore(flags);
if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index af3a5d37a149..db3370d1673c 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -99,7 +99,6 @@ static ssize_t pnv_eeh_ei_write(struct file *filp,
static const struct file_operations pnv_eeh_ei_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = pnv_eeh_ei_write,
};
@@ -860,7 +859,7 @@ static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option)
int64_t rc;
/* Hot reset to the bus if firmware cannot handle */
- if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL))
+ if (!dn || !of_property_present(dn, "ibm,reset-by-firmware"))
return __pnv_eeh_bridge_reset(pdev, option);
pr_debug("%s: FW reset PCI bus %04x:%02x with option %d\n",
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 16c5860f1372..608e4b68c5ea 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -210,7 +210,7 @@ static struct attribute *dump_default_attrs[] = {
};
ATTRIBUTE_GROUPS(dump_default);
-static struct kobj_type dump_ktype = {
+static const struct kobj_type dump_ktype = {
.sysfs_ops = &dump_sysfs_ops,
.release = &dump_release,
.default_groups = dump_default_groups,
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 554fdd7f88b8..5db1e733143b 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -146,7 +146,7 @@ static struct attribute *elog_default_attrs[] = {
};
ATTRIBUTE_GROUPS(elog_default);
-static struct kobj_type elog_ktype = {
+static const struct kobj_type elog_ktype = {
.sysfs_ops = &elog_sysfs_ops,
.release = &elog_release,
.default_groups = elog_default_groups,
diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c
index 6c3bc4b4da98..bb4218fa796e 100644
--- a/arch/powerpc/platforms/powernv/opal-kmsg.c
+++ b/arch/powerpc/platforms/powernv/opal-kmsg.c
@@ -20,13 +20,13 @@
* message, it just ensures that OPAL completely flushes the console buffer.
*/
static void kmsg_dump_opal_console_flush(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
/*
* Outside of a panic context the pollers will continue to run,
* so we don't need to do any special flushing.
*/
- if (reason != KMSG_DUMP_PANIC)
+ if (detail->reason != KMSG_DUMP_PANIC)
return;
opal_flush_console(0);
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index a16f07cdab26..8a7f39e106bd 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -393,7 +393,7 @@ void __init opal_lpc_init(void)
for_each_compatible_node(np, NULL, "ibm,power8-lpc") {
if (!of_device_is_available(np))
continue;
- if (!of_get_property(np, "primary", NULL))
+ if (!of_property_present(np, "primary"))
continue;
opal_lpc_chip_id = of_get_ibm_chip_id(np);
of_node_put(np);
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
index 24f04f20d3e8..dc246ed4b7b4 100644
--- a/arch/powerpc/platforms/powernv/opal-prd.c
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -443,7 +443,7 @@ static struct platform_driver opal_prd_driver = {
.of_match_table = opal_prd_match,
},
.probe = opal_prd_probe,
- .remove_new = opal_prd_remove,
+ .remove = opal_prd_remove,
};
module_platform_driver(opal_prd_driver);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 957f2b47a3c0..93fba1f8661f 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -274,7 +274,6 @@ int pnv_pci_cfg_write(struct pci_dn *pdn,
int where, int size, u32 val);
extern struct iommu_table *pnv_pci_table_alloc(int nid);
-extern void pnv_pci_init_ioda_hub(struct device_node *np);
extern void pnv_pci_init_ioda2_phb(struct device_node *np);
extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np);
extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 47f8eabd1bee..213aa26dc8b3 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -23,6 +23,7 @@
#include <linux/uaccess.h>
#include <asm/rtas.h>
#include <asm/rtas-work-area.h>
+#include <asm/prom.h>
static struct workqueue_struct *pseries_hp_wq;
@@ -250,11 +251,8 @@ int dlpar_detach_node(struct device_node *dn)
struct device_node *child;
int rc;
- child = of_get_next_child(dn, NULL);
- while (child) {
+ for_each_child_of_node(dn, child)
dlpar_detach_node(child);
- child = of_get_next_child(dn, child);
- }
rc = of_detach_node(dn);
if (rc)
@@ -264,6 +262,20 @@ int dlpar_detach_node(struct device_node *dn)
return 0;
}
+static int dlpar_changeset_attach_cc_nodes(struct of_changeset *ocs,
+ struct device_node *dn)
+{
+ int rc;
+
+ rc = of_changeset_attach_node(ocs, dn);
+
+ if (!rc && dn->child)
+ rc = dlpar_changeset_attach_cc_nodes(ocs, dn->child);
+ if (!rc && dn->sibling)
+ rc = dlpar_changeset_attach_cc_nodes(ocs, dn->sibling);
+
+ return rc;
+}
#define DR_ENTITY_SENSE 9003
#define DR_ENTITY_PRESENT 1
@@ -330,27 +342,206 @@ int dlpar_unisolate_drc(u32 drc_index)
return 0;
}
-int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
+static struct device_node *
+get_device_node_with_drc_index(u32 index)
+{
+ struct device_node *np = NULL;
+ u32 node_index;
+ int rc;
+
+ for_each_node_with_property(np, "ibm,my-drc-index") {
+ rc = of_property_read_u32(np, "ibm,my-drc-index",
+ &node_index);
+ if (rc) {
+ pr_err("%s: %pOF: of_property_read_u32 %s: %d\n",
+ __func__, np, "ibm,my-drc-index", rc);
+ of_node_put(np);
+ return NULL;
+ }
+
+ if (index == node_index)
+ break;
+ }
+
+ return np;
+}
+
+static struct device_node *
+get_device_node_with_drc_info(u32 index)
+{
+ struct device_node *np = NULL;
+ struct of_drc_info drc;
+ struct property *info;
+ const __be32 *value;
+ u32 node_index;
+ int i, j, count;
+
+ for_each_node_with_property(np, "ibm,drc-info") {
+ info = of_find_property(np, "ibm,drc-info", NULL);
+ if (info == NULL) {
+ /* XXX can this happen? */
+ of_node_put(np);
+ return NULL;
+ }
+ value = of_prop_next_u32(info, NULL, &count);
+ if (value == NULL)
+ continue;
+ value++;
+ for (i = 0; i < count; i++) {
+ if (of_read_drc_info_cell(&info, &value, &drc))
+ break;
+ if (index > drc.last_drc_index)
+ continue;
+ node_index = drc.drc_index_start;
+ for (j = 0; j < drc.num_sequential_elems; j++) {
+ if (index == node_index)
+ return np;
+ node_index += drc.sequential_inc;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static int dlpar_hp_dt_add(u32 index)
+{
+ struct device_node *np, *nodes;
+ struct of_changeset ocs;
+ int rc;
+
+ /*
+ * Do not add device node(s) if already exists in the
+ * device tree.
+ */
+ np = get_device_node_with_drc_index(index);
+ if (np) {
+ pr_err("%s: Adding device node for index (%d), but "
+ "already exists in the device tree\n",
+ __func__, index);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ np = get_device_node_with_drc_info(index);
+
+ if (!np)
+ return -EIO;
+
+ /* Next, configure the connector. */
+ nodes = dlpar_configure_connector(cpu_to_be32(index), np);
+ if (!nodes) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /*
+ * Add the new nodes from dlpar_configure_connector() onto
+ * the device-tree.
+ */
+ of_changeset_init(&ocs);
+ rc = dlpar_changeset_attach_cc_nodes(&ocs, nodes);
+
+ if (!rc)
+ rc = of_changeset_apply(&ocs);
+ else
+ dlpar_free_cc_nodes(nodes);
+
+ of_changeset_destroy(&ocs);
+
+out:
+ of_node_put(np);
+ return rc;
+}
+
+static int changeset_detach_node_recursive(struct of_changeset *ocs,
+ struct device_node *node)
{
+ struct device_node *child;
int rc;
- /* pseries error logs are in BE format, convert to cpu type */
- switch (hp_elog->id_type) {
- case PSERIES_HP_ELOG_ID_DRC_COUNT:
- hp_elog->_drc_u.drc_count =
- be32_to_cpu(hp_elog->_drc_u.drc_count);
+ for_each_child_of_node(node, child) {
+ rc = changeset_detach_node_recursive(ocs, child);
+ if (rc) {
+ of_node_put(child);
+ return rc;
+ }
+ }
+
+ return of_changeset_detach_node(ocs, node);
+}
+
+static int dlpar_hp_dt_remove(u32 drc_index)
+{
+ struct device_node *np;
+ struct of_changeset ocs;
+ u32 index;
+ int rc = 0;
+
+ /*
+ * Prune all nodes with a matching index.
+ */
+ of_changeset_init(&ocs);
+
+ for_each_node_with_property(np, "ibm,my-drc-index") {
+ rc = of_property_read_u32(np, "ibm,my-drc-index", &index);
+ if (rc) {
+ pr_err("%s: %pOF: of_property_read_u32 %s: %d\n",
+ __func__, np, "ibm,my-drc-index", rc);
+ of_node_put(np);
+ goto out;
+ }
+
+ if (index == drc_index) {
+ rc = changeset_detach_node_recursive(&ocs, np);
+ if (rc) {
+ of_node_put(np);
+ goto out;
+ }
+ }
+ }
+
+ rc = of_changeset_apply(&ocs);
+
+out:
+ of_changeset_destroy(&ocs);
+ return rc;
+}
+
+static int dlpar_hp_dt(struct pseries_hp_errorlog *phpe)
+{
+ u32 drc_index;
+ int rc;
+
+ if (phpe->id_type != PSERIES_HP_ELOG_ID_DRC_INDEX)
+ return -EINVAL;
+
+ drc_index = be32_to_cpu(phpe->_drc_u.drc_index);
+
+ lock_device_hotplug();
+
+ switch (phpe->action) {
+ case PSERIES_HP_ELOG_ACTION_ADD:
+ rc = dlpar_hp_dt_add(drc_index);
break;
- case PSERIES_HP_ELOG_ID_DRC_INDEX:
- hp_elog->_drc_u.drc_index =
- be32_to_cpu(hp_elog->_drc_u.drc_index);
+ case PSERIES_HP_ELOG_ACTION_REMOVE:
+ rc = dlpar_hp_dt_remove(drc_index);
+ break;
+ default:
+ pr_err("Invalid action (%d) specified\n", phpe->action);
+ rc = -EINVAL;
break;
- case PSERIES_HP_ELOG_ID_DRC_IC:
- hp_elog->_drc_u.ic.count =
- be32_to_cpu(hp_elog->_drc_u.ic.count);
- hp_elog->_drc_u.ic.index =
- be32_to_cpu(hp_elog->_drc_u.ic.index);
}
+ unlock_device_hotplug();
+
+ return rc;
+}
+
+int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
+{
+ int rc;
+
switch (hp_elog->resource) {
case PSERIES_HP_ELOG_RESOURCE_MEM:
rc = dlpar_memory(hp_elog);
@@ -361,6 +552,9 @@ int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
case PSERIES_HP_ELOG_RESOURCE_PMEM:
rc = dlpar_hp_pmem(hp_elog);
break;
+ case PSERIES_HP_ELOG_RESOURCE_DT:
+ rc = dlpar_hp_dt(hp_elog);
+ break;
default:
pr_warn_ratelimited("Invalid resource (%d) specified\n",
@@ -413,6 +607,8 @@ static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
} else if (sysfs_streq(arg, "cpu")) {
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
+ } else if (sysfs_streq(arg, "dt")) {
+ hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_DT;
} else {
pr_err("Invalid resource specified.\n");
return -EINVAL;
@@ -554,7 +750,7 @@ dlpar_store_out:
static ssize_t dlpar_show(const struct class *class, const struct class_attribute *attr,
char *buf)
{
- return sprintf(buf, "%s\n", "memory,cpu");
+ return sprintf(buf, "%s\n", "memory,cpu,dt");
}
static CLASS_ATTR_RW(dlpar);
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 3f1cdccebc9c..8cb9d36ea491 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -325,7 +325,6 @@ static const struct file_operations dtl_fops = {
.open = dtl_file_open,
.release = dtl_file_release,
.read = dtl_file_read,
- .llseek = no_llseek,
};
static struct dentry *dtl_dir;
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index b1ae0c0d1187..1893f66371fa 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -784,6 +784,43 @@ static int pseries_notify_resume(struct eeh_dev *edev)
}
#endif
+/**
+ * pseries_eeh_err_inject - Inject specified error to the indicated PE
+ * @pe: the indicated PE
+ * @type: error type
+ * @func: specific error type
+ * @addr: address
+ * @mask: address mask
+ * The routine is called to inject specified error, which is
+ * determined by @type and @func, to the indicated PE
+ */
+static int pseries_eeh_err_inject(struct eeh_pe *pe, int type, int func,
+ unsigned long addr, unsigned long mask)
+{
+ struct eeh_dev *pdev;
+
+ /* Check on PCI error type */
+ if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64)
+ return -EINVAL;
+
+ switch (func) {
+ case EEH_ERR_FUNC_LD_MEM_ADDR:
+ case EEH_ERR_FUNC_LD_MEM_DATA:
+ case EEH_ERR_FUNC_ST_MEM_ADDR:
+ case EEH_ERR_FUNC_ST_MEM_DATA:
+ /* injects a MMIO error for all pdev's belonging to PE */
+ pci_lock_rescan_remove();
+ list_for_each_entry(pdev, &pe->edevs, entry)
+ eeh_pe_inject_mmio_error(pdev->pdev);
+ pci_unlock_rescan_remove();
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
static struct eeh_ops pseries_eeh_ops = {
.name = "pseries",
.probe = pseries_eeh_probe,
@@ -792,7 +829,7 @@ static struct eeh_ops pseries_eeh_ops = {
.reset = pseries_eeh_reset,
.get_log = pseries_eeh_get_log,
.configure_bridge = pseries_eeh_configure_bridge,
- .err_inject = NULL,
+ .err_inject = pseries_eeh_err_inject,
.read_config = pseries_eeh_read_config,
.write_config = pseries_eeh_write_config,
.next_error = NULL,
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index e62835a12d73..6838a0fcda29 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -757,7 +757,7 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
u32 drc_index;
int rc;
- drc_index = hp_elog->_drc_u.drc_index;
+ drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
lock_device_hotplug();
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 3fe3ddb30c04..38dc4f7c9296 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -817,16 +817,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
case PSERIES_HP_ELOG_ACTION_ADD:
switch (hp_elog->id_type) {
case PSERIES_HP_ELOG_ID_DRC_COUNT:
- count = hp_elog->_drc_u.drc_count;
+ count = be32_to_cpu(hp_elog->_drc_u.drc_count);
rc = dlpar_memory_add_by_count(count);
break;
case PSERIES_HP_ELOG_ID_DRC_INDEX:
- drc_index = hp_elog->_drc_u.drc_index;
+ drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
rc = dlpar_memory_add_by_index(drc_index);
break;
case PSERIES_HP_ELOG_ID_DRC_IC:
- count = hp_elog->_drc_u.ic.count;
- drc_index = hp_elog->_drc_u.ic.index;
+ count = be32_to_cpu(hp_elog->_drc_u.ic.count);
+ drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index);
rc = dlpar_memory_add_by_ic(count, drc_index);
break;
default:
@@ -838,16 +838,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
case PSERIES_HP_ELOG_ACTION_REMOVE:
switch (hp_elog->id_type) {
case PSERIES_HP_ELOG_ID_DRC_COUNT:
- count = hp_elog->_drc_u.drc_count;
+ count = be32_to_cpu(hp_elog->_drc_u.drc_count);
rc = dlpar_memory_remove_by_count(count);
break;
case PSERIES_HP_ELOG_ID_DRC_INDEX:
- drc_index = hp_elog->_drc_u.drc_index;
+ drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
rc = dlpar_memory_remove_by_index(drc_index);
break;
case PSERIES_HP_ELOG_ID_DRC_IC:
- count = hp_elog->_drc_u.ic.count;
- drc_index = hp_elog->_drc_u.ic.index;
+ count = be32_to_cpu(hp_elog->_drc_u.ic.count);
+ drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index);
rc = dlpar_memory_remove_by_ic(count, drc_index);
break;
default:
diff --git a/arch/powerpc/platforms/pseries/papr-vpd.c b/arch/powerpc/platforms/pseries/papr-vpd.c
index c29e85db5f35..1574176e3ffc 100644
--- a/arch/powerpc/platforms/pseries/papr-vpd.c
+++ b/arch/powerpc/platforms/pseries/papr-vpd.c
@@ -156,10 +156,7 @@ static int vpd_blob_extend(struct vpd_blob *blob, const char *data, size_t len)
const char *old_ptr = blob->data;
char *new_ptr;
- new_ptr = old_ptr ?
- kvrealloc(old_ptr, old_len, new_len, GFP_KERNEL_ACCOUNT) :
- kvmalloc(len, GFP_KERNEL_ACCOUNT);
-
+ new_ptr = kvrealloc(old_ptr, new_len, GFP_KERNEL_ACCOUNT);
if (!new_ptr)
return -ENOMEM;
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index f6a70bc92e83..d95e03b3d3e3 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -1509,7 +1509,7 @@ static const struct of_device_id papr_scm_match[] = {
static struct platform_driver papr_scm_driver = {
.probe = papr_scm_probe,
- .remove_new = papr_scm_remove,
+ .remove = papr_scm_remove,
.driver = {
.name = "papr_scm",
.of_match_table = papr_scm_match,
diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c
index 3c290b9ed01b..0f1d45f32e4a 100644
--- a/arch/powerpc/platforms/pseries/pmem.c
+++ b/arch/powerpc/platforms/pseries/pmem.c
@@ -121,7 +121,7 @@ int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog)
return -EINVAL;
}
- drc_index = hp_elog->_drc_u.drc_index;
+ drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
lock_device_hotplug();
diff --git a/arch/powerpc/platforms/pseries/vas-sysfs.c b/arch/powerpc/platforms/pseries/vas-sysfs.c
index f9f682724e77..9e05a0e99cad 100644
--- a/arch/powerpc/platforms/pseries/vas-sysfs.c
+++ b/arch/powerpc/platforms/pseries/vas-sysfs.c
@@ -162,13 +162,13 @@ static const struct sysfs_ops vas_sysfs_ops = {
.store = vas_type_store,
};
-static struct kobj_type vas_def_attr_type = {
+static const struct kobj_type vas_def_attr_type = {
.release = vas_type_release,
.sysfs_ops = &vas_sysfs_ops,
.default_groups = vas_def_capab_groups,
};
-static struct kobj_type vas_qos_attr_type = {
+static const struct kobj_type vas_qos_attr_type = {
.release = vas_type_release,
.sysfs_ops = &vas_sysfs_ops,
.default_groups = vas_qos_capab_groups,
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index e205135ae1fe..1aa0cb097c9c 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -603,7 +603,7 @@ static struct platform_driver fsl_of_msi_driver = {
.of_match_table = fsl_of_msi_ids,
},
.probe = fsl_of_msi_probe,
- .remove_new = fsl_of_msi_remove,
+ .remove = fsl_of_msi_remove,
};
static __init int fsl_of_msi_init(void)
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 737f97fd67d7..2511e586fe31 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -193,7 +193,7 @@ static void pmi_of_remove(struct platform_device *dev)
static struct platform_driver pmi_of_platform_driver = {
.probe = pmi_of_probe,
- .remove_new = pmi_of_remove,
+ .remove = pmi_of_remove,
.driver = {
.name = "pmi",
.of_match_table = pmi_match,
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index bd4813bad317..e6cddbb2305f 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -3543,7 +3543,7 @@ scanhex(unsigned long *vp)
}
} else if (c == '$') {
int i;
- for (i=0; i<63; i++) {
+ for (i = 0; i < (KSYM_NAME_LEN - 1); i++) {
c = inchar();
if (isspace(c) || c == '\0') {
termch = c;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 939ea7f6a228..22dc5ea4196c 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -13,6 +13,7 @@ config 32BIT
config RISCV
def_bool y
select ACPI_GENERIC_GSI if ACPI
+ select ACPI_MCFG if (ACPI && PCI)
select ACPI_PPTT if ACPI
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
select ACPI_SPCR_TABLE if ACPI
@@ -64,10 +65,12 @@ config RISCV
select ARCH_SUPPORTS_LTO_CLANG_THIN if LLD_VERSION >= 140000
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
+ select ARCH_SUPPORTS_RT
select ARCH_SUPPORTS_SHADOW_CALL_STACK if HAVE_SHADOW_CALL_STACK
select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_SYM_ANNOTATIONS
select ARCH_USES_CFI_TRAPS if CFI_CLANG
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
@@ -92,6 +95,7 @@ config RISCV
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CPU_DEVICES
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
@@ -156,6 +160,7 @@ config RISCV
select HAVE_KERNEL_LZO if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KERNEL_UNCOMPRESSED if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KERNEL_ZSTD if !XIP_KERNEL && !EFI_ZBOOT
+ select HAVE_KERNEL_XZ if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KPROBES if !XIP_KERNEL
select HAVE_KRETPROBES if !XIP_KERNEL
# https://github.com/ClangBuiltLinux/linux/issues/1881
@@ -172,7 +177,7 @@ config RISCV
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RETHOOK if !XIP_KERNEL
select HAVE_RSEQ
- select HAVE_RUST if 64BIT
+ select HAVE_RUST if RUSTC_SUPPORTS_RISCV
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_STACKPROTECTOR
@@ -188,6 +193,7 @@ config RISCV
select OF_EARLY_FLATTREE
select OF_IRQ
select PCI_DOMAINS_GENERIC if PCI
+ select PCI_ECAM if (ACPI && PCI)
select PCI_MSI if PCI
select RISCV_ALTERNATIVE if !XIP_KERNEL
select RISCV_APLIC
@@ -200,8 +206,16 @@ config RISCV
select THREAD_INFO_IN_TASK
select TRACE_IRQFLAGS_SUPPORT
select UACCESS_MEMCPY if !MMU
+ select USER_STACKTRACE_SUPPORT
select ZONE_DMA32 if 64BIT
+config RUSTC_SUPPORTS_RISCV
+ def_bool y
+ depends on 64BIT
+ # Shadow call stack requires rustc version 1.82+ due to use of the
+ # -Zsanitizer=shadow-call-stack flag.
+ depends on !SHADOW_CALL_STACK || RUSTC_VERSION >= 108200
+
config CLANG_SUPPORTS_DYNAMIC_FTRACE
def_bool CC_IS_CLANG
# https://github.com/ClangBuiltLinux/linux/issues/1817
@@ -319,6 +333,11 @@ config GENERIC_HWEIGHT
config FIX_EARLYCON_MEM
def_bool MMU
+config ILLEGAL_POINTER_VALUE
+ hex
+ default 0 if 32BIT
+ default 0xdead000000000000 if 64BIT
+
config PGTABLE_LEVELS
int
default 5 if 64BIT
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 6fe682139d2e..d469db9f46f4 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -159,6 +159,7 @@ boot-image-$(CONFIG_KERNEL_LZ4) := Image.lz4
boot-image-$(CONFIG_KERNEL_LZMA) := Image.lzma
boot-image-$(CONFIG_KERNEL_LZO) := Image.lzo
boot-image-$(CONFIG_KERNEL_ZSTD) := Image.zst
+boot-image-$(CONFIG_KERNEL_XZ) := Image.xz
ifdef CONFIG_RISCV_M_MODE
boot-image-$(CONFIG_ARCH_CANAAN) := loader.bin
endif
@@ -183,12 +184,12 @@ endif
vdso-install-y += arch/riscv/kernel/vdso/vdso.so.dbg
vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg
-BOOT_TARGETS := Image Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst loader loader.bin xipImage vmlinuz.efi
+BOOT_TARGETS := Image Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst Image.xz loader loader.bin xipImage vmlinuz.efi
all: $(notdir $(KBUILD_IMAGE))
loader.bin: loader
-Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst loader xipImage vmlinuz.efi: Image
+Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst Image.xz loader xipImage vmlinuz.efi: Image
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
@@ -225,6 +226,7 @@ define archhelp
echo ' Image.lzma - Compressed kernel image (arch/riscv/boot/Image.lzma)'
echo ' Image.lzo - Compressed kernel image (arch/riscv/boot/Image.lzo)'
echo ' Image.zst - Compressed kernel image (arch/riscv/boot/Image.zst)'
+ echo ' Image.xz - Compressed kernel image (arch/riscv/boot/Image.xz)'
echo ' vmlinuz.efi - Compressed EFI kernel image (arch/riscv/boot/vmlinuz.efi)'
echo ' Default when CONFIG_EFI_ZBOOT=y'
echo ' xipImage - Execute-in-place kernel image (arch/riscv/boot/xipImage)'
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
index 4e9e7a28bf9b..b25d524ce5eb 100644
--- a/arch/riscv/boot/Makefile
+++ b/arch/riscv/boot/Makefile
@@ -64,6 +64,9 @@ $(obj)/Image.lzo: $(obj)/Image FORCE
$(obj)/Image.zst: $(obj)/Image FORCE
$(call if_changed,zstd)
+$(obj)/Image.xz: $(obj)/Image FORCE
+ $(call if_changed,xzkern)
+
$(obj)/loader.bin: $(obj)/loader FORCE
$(call if_changed,objcopy)
diff --git a/arch/riscv/boot/dts/sophgo/cv1812h-huashan-pi.dts b/arch/riscv/boot/dts/sophgo/cv1812h-huashan-pi.dts
index aa361f3a86bb..7b5f57853690 100644
--- a/arch/riscv/boot/dts/sophgo/cv1812h-huashan-pi.dts
+++ b/arch/riscv/boot/dts/sophgo/cv1812h-huashan-pi.dts
@@ -43,6 +43,15 @@
clock-frequency = <25000000>;
};
+&sdhci0 {
+ status = "okay";
+ bus-width = <4>;
+ no-1-8-v;
+ no-mmc;
+ no-sdio;
+ disable-wp;
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi
index 891932ae470f..b724fb6d9689 100644
--- a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi
+++ b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi
@@ -297,6 +297,22 @@
status = "disabled";
};
+ dmac: dma-controller@4330000 {
+ compatible = "snps,axi-dma-1.01a";
+ reg = <0x04330000 0x1000>;
+ interrupts = <29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk CLK_SDMA_AXI>, <&clk CLK_SDMA_AXI>;
+ clock-names = "core-clk", "cfgr-clk";
+ #dma-cells = <1>;
+ dma-channels = <8>;
+ snps,block-size = <1024 1024 1024 1024
+ 1024 1024 1024 1024>;
+ snps,priority = <0 1 2 3 4 5 6 7>;
+ snps,dma-masters = <2>;
+ snps,data-width = <4>;
+ status = "disabled";
+ };
+
plic: interrupt-controller@70000000 {
reg = <0x70000000 0x4000000>;
interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>;
diff --git a/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts b/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts
index 80cb017974d8..a3f9d6f22566 100644
--- a/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts
+++ b/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts
@@ -26,6 +26,83 @@
clock-frequency = <25000000>;
};
+&emmc {
+ bus-width = <4>;
+ no-sdio;
+ no-sd;
+ non-removable;
+ wp-inverted;
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+
+ mcu: syscon@17 {
+ compatible = "sophgo,sg2042-hwmon-mcu";
+ reg = <0x17>;
+ #thermal-sensor-cells = <1>;
+ };
+};
+
+&sd {
+ bus-width = <4>;
+ no-sdio;
+ no-mmc;
+ wp-inverted;
+ status = "okay";
+};
+
&uart0 {
status = "okay";
};
+
+/ {
+ thermal-zones {
+ soc-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <1000>;
+ thermal-sensors = <&mcu 0>;
+
+ trips {
+ soc_active1: soc-active1 {
+ temperature = <30000>;
+ hysteresis = <8000>;
+ type = "active";
+ };
+
+ soc_active2: soc-active2 {
+ temperature = <58000>;
+ hysteresis = <12000>;
+ type = "active";
+ };
+
+ soc_active3: soc-active3 {
+ temperature = <70000>;
+ hysteresis = <10000>;
+ type = "active";
+ };
+
+ soc_hot: soc-hot {
+ temperature = <80000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+ };
+ };
+
+ board-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <1000>;
+ thermal-sensors = <&mcu 1>;
+
+ trips {
+ board_active: board-active {
+ temperature = <75000>;
+ hysteresis = <8000>;
+ type = "active";
+ };
+ };
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/sophgo/sg2042.dtsi b/arch/riscv/boot/dts/sophgo/sg2042.dtsi
index 34c802bd3f9b..4e5fa6591623 100644
--- a/arch/riscv/boot/dts/sophgo/sg2042.dtsi
+++ b/arch/riscv/boot/dts/sophgo/sg2042.dtsi
@@ -44,8 +44,127 @@
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
+ interrupt-parent = <&intc>;
ranges;
+ i2c0: i2c@7030005000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x70 0x30005000 0x0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkgen GATE_CLK_APB_I2C>;
+ clock-names = "ref";
+ clock-frequency = <100000>;
+ interrupts = <101 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rstgen RST_I2C0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@7030006000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x70 0x30006000 0x0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkgen GATE_CLK_APB_I2C>;
+ clock-names = "ref";
+ clock-frequency = <100000>;
+ interrupts = <102 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rstgen RST_I2C1>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@7030007000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x70 0x30007000 0x0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkgen GATE_CLK_APB_I2C>;
+ clock-names = "ref";
+ clock-frequency = <100000>;
+ interrupts = <103 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rstgen RST_I2C2>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@7030008000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x70 0x30008000 0x0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkgen GATE_CLK_APB_I2C>;
+ clock-names = "ref";
+ clock-frequency = <100000>;
+ interrupts = <104 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rstgen RST_I2C3>;
+ status = "disabled";
+ };
+
+ gpio0: gpio@7030009000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x70 0x30009000 0x0 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkgen GATE_CLK_APB_GPIO>,
+ <&clkgen GATE_CLK_GPIO_DB>;
+ clock-names = "bus", "db";
+
+ port0a: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <32>;
+ reg = <0>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupts = <96 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ gpio1: gpio@703000a000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x70 0x3000a000 0x0 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkgen GATE_CLK_APB_GPIO>,
+ <&clkgen GATE_CLK_GPIO_DB>;
+ clock-names = "bus", "db";
+
+ port1a: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <32>;
+ reg = <0>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupts = <97 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ gpio2: gpio@703000b000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x70 0x3000b000 0x0 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkgen GATE_CLK_APB_GPIO>,
+ <&clkgen GATE_CLK_GPIO_DB>;
+ clock-names = "bus", "db";
+
+ port2a: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <32>;
+ reg = <0>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupts = <98 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
pllclk: clock-controller@70300100c0 {
compatible = "sophgo,sg2042-pll";
reg = <0x70 0x300100c0 0x0 0x40>;
@@ -388,7 +507,6 @@
uart0: serial@7040000000 {
compatible = "snps,dw-apb-uart";
reg = <0x00000070 0x40000000 0x00000000 0x00001000>;
- interrupt-parent = <&intc>;
interrupts = <112 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <500000000>;
clocks = <&clkgen GATE_CLK_UART_500M>,
@@ -399,5 +517,33 @@
resets = <&rstgen RST_UART0>;
status = "disabled";
};
+
+ emmc: mmc@704002a000 {
+ compatible = "sophgo,sg2042-dwcmshc";
+ reg = <0x70 0x4002a000 0x0 0x1000>;
+ interrupt-parent = <&intc>;
+ interrupts = <134 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkgen GATE_CLK_EMMC_100M>,
+ <&clkgen GATE_CLK_AXI_EMMC>,
+ <&clkgen GATE_CLK_100K_EMMC>;
+ clock-names = "core",
+ "bus",
+ "timer";
+ status = "disabled";
+ };
+
+ sd: mmc@704002b000 {
+ compatible = "sophgo,sg2042-dwcmshc";
+ reg = <0x70 0x4002b000 0x0 0x1000>;
+ interrupt-parent = <&intc>;
+ interrupts = <136 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkgen GATE_CLK_SD_100M>,
+ <&clkgen GATE_CLK_AXI_SD>,
+ <&clkgen GATE_CLK_100K_SD>;
+ clock-names = "core",
+ "bus",
+ "timer";
+ status = "disabled";
+ };
};
};
diff --git a/arch/riscv/boot/dts/starfive/jh7110-common.dtsi b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
index ca2d44d59d48..c7771b3b6475 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
@@ -365,6 +365,12 @@
};
};
+&syscrg {
+ assigned-clocks = <&syscrg JH7110_SYSCLK_CPU_CORE>,
+ <&pllclk JH7110_PLLCLK_PLL0_OUT>;
+ assigned-clock-rates = <500000000>, <1500000000>;
+};
+
&sysgpio {
i2c0_pins: i2c0-0 {
i2c-pins {
diff --git a/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts b/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts
index d9b4de9e4757..497d961456f3 100644
--- a/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts
+++ b/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts
@@ -23,6 +23,7 @@
serial3 = &uart3;
serial4 = &uart4;
serial5 = &uart5;
+ spi0 = &spi0;
};
chosen {
@@ -44,18 +45,6 @@
clock-frequency = <32768>;
};
-&apb_clk {
- clock-frequency = <62500000>;
-};
-
-&sdhci_clk {
- clock-frequency = <198000000>;
-};
-
-&uart_sclk {
- clock-frequency = <100000000>;
-};
-
&dmac0 {
status = "okay";
};
@@ -79,3 +68,7 @@
&uart0 {
status = "okay";
};
+
+&spi0 {
+ status = "okay";
+};
diff --git a/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi b/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi
index 1365d3a512a3..78977bdbbe3d 100644
--- a/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi
+++ b/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi
@@ -25,18 +25,6 @@
clock-frequency = <32768>;
};
-&apb_clk {
- clock-frequency = <62500000>;
-};
-
-&sdhci_clk {
- clock-frequency = <198000000>;
-};
-
-&uart_sclk {
- clock-frequency = <100000000>;
-};
-
&dmac0 {
status = "okay";
};
diff --git a/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a.dts b/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a.dts
index 9a3884a73e13..7738d2895c5a 100644
--- a/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a.dts
+++ b/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a.dts
@@ -20,6 +20,7 @@
serial3 = &uart3;
serial4 = &uart4;
serial5 = &uart5;
+ spi0 = &spi0;
};
chosen {
@@ -30,3 +31,7 @@
&uart0 {
status = "okay";
};
+
+&spi0 {
+ status = "okay";
+};
diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi
index 3c9974062c20..6992060e6a54 100644
--- a/arch/riscv/boot/dts/thead/th1520.dtsi
+++ b/arch/riscv/boot/dts/thead/th1520.dtsi
@@ -5,6 +5,7 @@
*/
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/clock/thead,th1520-clk-ap.h>
/ {
compatible = "thead,th1520";
@@ -215,25 +216,6 @@
#clock-cells = <0>;
};
- apb_clk: apb-clk-clock {
- compatible = "fixed-clock";
- clock-output-names = "apb_clk";
- #clock-cells = <0>;
- };
-
- uart_sclk: uart-sclk-clock {
- compatible = "fixed-clock";
- clock-output-names = "uart_sclk";
- #clock-cells = <0>;
- };
-
- sdhci_clk: sdhci-clock {
- compatible = "fixed-clock";
- clock-frequency = <198000000>;
- clock-output-names = "sdhci_clk";
- #clock-cells = <0>;
- };
-
soc {
compatible = "simple-bus";
interrupt-parent = <&plic>;
@@ -264,11 +246,22 @@
<&cpu3_intc 3>, <&cpu3_intc 7>;
};
+ spi0: spi@ffe700c000 {
+ compatible = "thead,th1520-spi", "snps,dw-apb-ssi";
+ reg = <0xff 0xe700c000 0x0 0x1000>;
+ interrupts = <54 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk CLK_SPI>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
uart0: serial@ffe7014000 {
compatible = "snps,dw-apb-uart";
reg = <0xff 0xe7014000 0x0 0x100>;
interrupts = <36 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&uart_sclk>;
+ clocks = <&clk CLK_UART_SCLK>, <&clk CLK_UART0_PCLK>;
+ clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -278,7 +271,7 @@
compatible = "thead,th1520-dwcmshc";
reg = <0xff 0xe7080000 0x0 0x10000>;
interrupts = <62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sdhci_clk>;
+ clocks = <&clk CLK_EMMC_SDIO>;
clock-names = "core";
status = "disabled";
};
@@ -287,7 +280,7 @@
compatible = "thead,th1520-dwcmshc";
reg = <0xff 0xe7090000 0x0 0x10000>;
interrupts = <64 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sdhci_clk>;
+ clocks = <&clk CLK_EMMC_SDIO>;
clock-names = "core";
status = "disabled";
};
@@ -296,7 +289,7 @@
compatible = "thead,th1520-dwcmshc";
reg = <0xff 0xe70a0000 0x0 0x10000>;
interrupts = <71 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sdhci_clk>;
+ clocks = <&clk CLK_EMMC_SDIO>;
clock-names = "core";
status = "disabled";
};
@@ -305,7 +298,8 @@
compatible = "snps,dw-apb-uart";
reg = <0xff 0xe7f00000 0x0 0x100>;
interrupts = <37 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&uart_sclk>;
+ clocks = <&clk CLK_UART_SCLK>, <&clk CLK_UART1_PCLK>;
+ clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -315,7 +309,8 @@
compatible = "snps,dw-apb-uart";
reg = <0xff 0xe7f04000 0x0 0x100>;
interrupts = <39 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&uart_sclk>;
+ clocks = <&clk CLK_UART_SCLK>, <&clk CLK_UART3_PCLK>;
+ clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -326,6 +321,7 @@
reg = <0xff 0xe7f34000 0x0 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
+ clocks = <&clk CLK_GPIO2>;
portc: gpio-controller@0 {
compatible = "snps,dw-apb-gpio-port";
@@ -344,6 +340,7 @@
reg = <0xff 0xe7f38000 0x0 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
+ clocks = <&clk CLK_GPIO3>;
portd: gpio-controller@0 {
compatible = "snps,dw-apb-gpio-port";
@@ -362,6 +359,7 @@
reg = <0xff 0xec005000 0x0 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
+ clocks = <&clk CLK_GPIO0>;
porta: gpio-controller@0 {
compatible = "snps,dw-apb-gpio-port";
@@ -380,6 +378,7 @@
reg = <0xff 0xec006000 0x0 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
+ clocks = <&clk CLK_GPIO1>;
portb: gpio-controller@0 {
compatible = "snps,dw-apb-gpio-port";
@@ -397,17 +396,25 @@
compatible = "snps,dw-apb-uart";
reg = <0xff 0xec010000 0x0 0x4000>;
interrupts = <38 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&uart_sclk>;
+ clocks = <&clk CLK_UART_SCLK>, <&clk CLK_UART2_PCLK>;
+ clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
};
+ clk: clock-controller@ffef010000 {
+ compatible = "thead,th1520-clk-ap";
+ reg = <0xff 0xef010000 0x0 0x1000>;
+ clocks = <&osc>;
+ #clock-cells = <1>;
+ };
+
dmac0: dma-controller@ffefc00000 {
compatible = "snps,axi-dma-1.01a";
reg = <0xff 0xefc00000 0x0 0x1000>;
interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&apb_clk>, <&apb_clk>;
+ clocks = <&clk CLK_PERI_APB_PCLK>, <&clk CLK_PERI_APB_PCLK>;
clock-names = "core-clk", "cfgr-clk";
#dma-cells = <1>;
dma-channels = <4>;
@@ -422,7 +429,7 @@
timer0: timer@ffefc32000 {
compatible = "snps,dw-apb-timer";
reg = <0xff 0xefc32000 0x0 0x14>;
- clocks = <&apb_clk>;
+ clocks = <&clk CLK_PERI_APB_PCLK>;
clock-names = "timer";
interrupts = <16 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -431,7 +438,7 @@
timer1: timer@ffefc32014 {
compatible = "snps,dw-apb-timer";
reg = <0xff 0xefc32014 0x0 0x14>;
- clocks = <&apb_clk>;
+ clocks = <&clk CLK_PERI_APB_PCLK>;
clock-names = "timer";
interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -440,7 +447,7 @@
timer2: timer@ffefc32028 {
compatible = "snps,dw-apb-timer";
reg = <0xff 0xefc32028 0x0 0x14>;
- clocks = <&apb_clk>;
+ clocks = <&clk CLK_PERI_APB_PCLK>;
clock-names = "timer";
interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -449,7 +456,7 @@
timer3: timer@ffefc3203c {
compatible = "snps,dw-apb-timer";
reg = <0xff 0xefc3203c 0x0 0x14>;
- clocks = <&apb_clk>;
+ clocks = <&clk CLK_PERI_APB_PCLK>;
clock-names = "timer";
interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -459,7 +466,8 @@
compatible = "snps,dw-apb-uart";
reg = <0xff 0xf7f08000 0x0 0x4000>;
interrupts = <40 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&uart_sclk>;
+ clocks = <&clk CLK_UART_SCLK>, <&clk CLK_UART4_PCLK>;
+ clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -469,7 +477,8 @@
compatible = "snps,dw-apb-uart";
reg = <0xff 0xf7f0c000 0x0 0x4000>;
interrupts = <41 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&uart_sclk>;
+ clocks = <&clk CLK_UART_SCLK>, <&clk CLK_UART5_PCLK>;
+ clock-names = "baudclk", "apb_pclk";
reg-shift = <2>;
reg-io-width = <4>;
status = "disabled";
@@ -478,7 +487,7 @@
timer4: timer@ffffc33000 {
compatible = "snps,dw-apb-timer";
reg = <0xff 0xffc33000 0x0 0x14>;
- clocks = <&apb_clk>;
+ clocks = <&clk CLK_PERI_APB_PCLK>;
clock-names = "timer";
interrupts = <20 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -487,7 +496,7 @@
timer5: timer@ffffc33014 {
compatible = "snps,dw-apb-timer";
reg = <0xff 0xffc33014 0x0 0x14>;
- clocks = <&apb_clk>;
+ clocks = <&clk CLK_PERI_APB_PCLK>;
clock-names = "timer";
interrupts = <21 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -496,7 +505,7 @@
timer6: timer@ffffc33028 {
compatible = "snps,dw-apb-timer";
reg = <0xff 0xffc33028 0x0 0x14>;
- clocks = <&apb_clk>;
+ clocks = <&clk CLK_PERI_APB_PCLK>;
clock-names = "timer";
interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -505,7 +514,7 @@
timer7: timer@ffffc3303c {
compatible = "snps,dw-apb-timer";
reg = <0xff 0xffc3303c 0x0 0x14>;
- clocks = <&apb_clk>;
+ clocks = <&clk CLK_PERI_APB_PCLK>;
clock-names = "timer";
interrupts = <23 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 0d678325444f..2341393cfac1 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -137,12 +137,10 @@ CONFIG_VIRTIO_NET=y
CONFIG_MACB=y
CONFIG_E1000E=y
CONFIG_R8169=y
-CONFIG_RAVB=y
CONFIG_STMMAC_ETH=m
CONFIG_MICREL_PHY=y
CONFIG_MICROSEMI_PHY=y
CONFIG_MOTORCOMM_PHY=y
-CONFIG_CAN_RCAR_CANFD=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_KEYBOARD_SUN4I_LRADC=m
CONFIG_SERIAL_8250=y
@@ -150,29 +148,30 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
-CONFIG_SERIAL_SH_SCI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_HW_RANDOM_JH7110=m
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_MV64XXX=m
-CONFIG_I2C_RIIC=y
CONFIG_SPI=y
CONFIG_SPI_CADENCE_QUADSPI=m
CONFIG_SPI_PL022=m
-CONFIG_SPI_RSPI=m
CONFIG_SPI_SIFIVE=y
CONFIG_SPI_SUN6I=y
# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_PINCTRL_SOPHGO_CV1800B=y
+CONFIG_PINCTRL_SOPHGO_CV1812H=y
+CONFIG_PINCTRL_SOPHGO_SG2000=y
+CONFIG_PINCTRL_SOPHGO_SG2002=y
CONFIG_GPIO_SIFIVE=y
CONFIG_POWER_RESET_GPIO_RESTART=y
CONFIG_SENSORS_SFCTEMP=m
CONFIG_CPU_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y
-CONFIG_RZG2L_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=y
CONFIG_MFD_AXP20X_I2C=y
@@ -201,11 +200,11 @@ CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PLATFORM=y
+# CONFIG_USB_XHCI_RCAR is not set
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
-CONFIG_USB_RENESAS_USBHS=m
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
CONFIG_USB_CDNS_SUPPORT=m
@@ -217,7 +216,6 @@ CONFIG_USB_MUSB_HDRC=m
CONFIG_USB_MUSB_SUNXI=m
CONFIG_NOP_USB_XCEIV=m
CONFIG_USB_GADGET=y
-CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_USB_CONFIGFS=m
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_ACM=y
@@ -235,7 +233,6 @@ CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_OF_DWCMSHC=y
CONFIG_MMC_SDHCI_CADENCE=y
CONFIG_MMC_SPI=y
-CONFIG_MMC_SDHI=y
CONFIG_MMC_DW=y
CONFIG_MMC_DW_STARFIVE=y
CONFIG_MMC_SUNXI=y
@@ -249,8 +246,10 @@ CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_MMIO=y
CONFIG_CLK_SOPHGO_CV1800=y
+CONFIG_CLK_SOPHGO_SG2042_PLL=y
+CONFIG_CLK_SOPHGO_SG2042_CLKGEN=y
+CONFIG_CLK_SOPHGO_SG2042_RPGATE=y
CONFIG_SUN8I_DE2_CCU=m
-CONFIG_RENESAS_OSTM=y
CONFIG_SUN50I_IOMMU=y
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_CTRL=y
@@ -258,7 +257,6 @@ CONFIG_RPMSG_VIRTIO=y
CONFIG_PM_DEVFREQ=y
CONFIG_IIO=y
CONFIG_PHY_SUN4I_USB=m
-CONFIG_PHY_RCAR_GEN3_USB2=y
CONFIG_PHY_STARFIVE_JH7110_DPHY_RX=m
CONFIG_PHY_STARFIVE_JH7110_PCIE=m
CONFIG_PHY_STARFIVE_JH7110_USB=m
diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig
index af9601da4643..87ff5a1233af 100644
--- a/arch/riscv/configs/nommu_k210_defconfig
+++ b/arch/riscv/configs/nommu_k210_defconfig
@@ -58,6 +58,7 @@ CONFIG_I2C=y
# CONFIG_I2C_COMPAT is not set
CONFIG_I2C_CHARDEV=y
# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
# CONFIG_SPI_MEM is not set
diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
index dd460c649152..95cbd574f291 100644
--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig
+++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
@@ -50,6 +50,7 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
# CONFIG_SPI_MEM is not set
diff --git a/arch/riscv/errata/sifive/errata_cip_453.S b/arch/riscv/errata/sifive/errata_cip_453.S
index f1b9623fe1de..b1f7b636fe9a 100644
--- a/arch/riscv/errata/sifive/errata_cip_453.S
+++ b/arch/riscv/errata/sifive/errata_cip_453.S
@@ -21,7 +21,7 @@
1:
.endm
-ENTRY(sifive_cip_453_page_fault_trp)
+SYM_FUNC_START(sifive_cip_453_page_fault_trp)
ADD_SIGN_EXT a0, t0, t1
#ifdef CONFIG_MMU
la t0, do_page_fault
@@ -29,10 +29,10 @@ ENTRY(sifive_cip_453_page_fault_trp)
la t0, do_trap_unknown
#endif
jr t0
-END(sifive_cip_453_page_fault_trp)
+SYM_FUNC_END(sifive_cip_453_page_fault_trp)
-ENTRY(sifive_cip_453_insn_fault_trp)
+SYM_FUNC_START(sifive_cip_453_insn_fault_trp)
ADD_SIGN_EXT a0, t0, t1
la t0, do_trap_insn_fault
jr t0
-END(sifive_cip_453_insn_fault_trp)
+SYM_FUNC_END(sifive_cip_453_insn_fault_trp)
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 5c589770f2a8..1461af12da6e 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -5,6 +5,7 @@ syscall-y += syscall_table_64.h
generic-y += early_ioremap.h
generic-y += flat.h
generic-y += kvm_para.h
+generic-y += mmzone.h
generic-y += parport.h
generic-y += spinlock.h
generic-y += spinlock_types.h
diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h
index e0a1f84404f3..6e13695120bc 100644
--- a/arch/riscv/include/asm/acpi.h
+++ b/arch/riscv/include/asm/acpi.h
@@ -91,10 +91,8 @@ static inline void acpi_get_cbo_block_size(struct acpi_table_header *table,
#endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI_NUMA
-int acpi_numa_get_nid(unsigned int cpu);
void acpi_map_cpus_to_nodes(void);
#else
-static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
static inline void acpi_map_cpus_to_nodes(void) { }
#endif /* CONFIG_ACPI_NUMA */
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index 71af9ecfcfcb..fae152ea0508 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -222,44 +222,44 @@ legacy:
#define __NOT(x) (~(x))
/**
- * test_and_set_bit - Set a bit and return its old value
+ * arch_test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation may be reordered on other architectures than x86.
*/
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(or, __NOP, nr, addr);
}
/**
- * test_and_clear_bit - Clear a bit and return its old value
+ * arch_test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation can be reordered on other architectures other than x86.
*/
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(and, __NOT, nr, addr);
}
/**
- * test_and_change_bit - Change a bit and return its old value
+ * arch_test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(xor, __NOP, nr, addr);
}
/**
- * set_bit - Atomically set a bit in memory
+ * arch_set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -270,13 +270,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void set_bit(int nr, volatile unsigned long *addr)
+static inline void arch_set_bit(int nr, volatile unsigned long *addr)
{
__op_bit(or, __NOP, nr, addr);
}
/**
- * clear_bit - Clears a bit in memory
+ * arch_clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
@@ -284,13 +284,13 @@ static inline void set_bit(int nr, volatile unsigned long *addr)
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
*/
-static inline void clear_bit(int nr, volatile unsigned long *addr)
+static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
{
__op_bit(and, __NOT, nr, addr);
}
/**
- * change_bit - Toggle a bit in memory
+ * arch_change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
@@ -298,40 +298,40 @@ static inline void clear_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void change_bit(int nr, volatile unsigned long *addr)
+static inline void arch_change_bit(int nr, volatile unsigned long *addr)
{
__op_bit(xor, __NOP, nr, addr);
}
/**
- * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and provides acquire barrier semantics.
* It can be used to implement bit locks.
*/
-static inline int test_and_set_bit_lock(
+static inline int arch_test_and_set_bit_lock(
unsigned long nr, volatile unsigned long *addr)
{
return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
}
/**
- * clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch_clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is atomic and provides release barrier semantics.
*/
-static inline void clear_bit_unlock(
+static inline void arch_clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
__op_bit_ord(and, __NOT, nr, addr, .rl);
}
/**
- * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch___clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -345,13 +345,13 @@ static inline void clear_bit_unlock(
* non-atomic property here: it's a lot more instructions and we still have to
* provide release semantics anyway.
*/
-static inline void __clear_bit_unlock(
+static inline void arch___clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
- clear_bit_unlock(nr, addr);
+ arch_clear_bit_unlock(nr, addr);
}
-static inline bool xor_unlock_is_negative_byte(unsigned long mask,
+static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
volatile unsigned long *addr)
{
unsigned long res;
@@ -369,6 +369,9 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask,
#undef __NOT
#undef __AMO
+#include <asm-generic/bitops/instrumented-atomic.h>
+#include <asm-generic/bitops/instrumented-lock.h>
+
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index ce79c558a4c8..8de73f91bfa3 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -46,7 +46,23 @@ do { \
} while (0)
#ifdef CONFIG_64BIT
-#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
+extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
+extern char _end[];
+#define flush_cache_vmap flush_cache_vmap
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ if (is_vmalloc_or_module_addr((void *)start)) {
+ int i;
+
+ /*
+ * We don't care if concurrently a cpu resets this value since
+ * the only place this can happen is in handle_exception() where
+ * an sfence.vma is emitted.
+ */
+ for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i)
+ new_vmalloc[i] = -1ULL;
+ }
+}
#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
#endif
diff --git a/arch/riscv/include/asm/exec.h b/arch/riscv/include/asm/exec.h
new file mode 100644
index 000000000000..07d9942682e0
--- /dev/null
+++ b/arch/riscv/include/asm/exec.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_EXEC_H
+#define __ASM_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* __ASM_EXEC_H */
diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
index 6bcd80325dfc..182db7930edc 100644
--- a/arch/riscv/include/asm/fence.h
+++ b/arch/riscv/include/asm/fence.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 5a0bd27fd11a..46d9de54179e 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -92,6 +92,7 @@
#define RISCV_ISA_EXT_ZCF 83
#define RISCV_ISA_EXT_ZCMOP 84
#define RISCV_ISA_EXT_ZAWRS 85
+#define RISCV_ISA_EXT_SVVPTC 86
#define RISCV_ISA_EXT_XLINUXENVCFG 127
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
index 8e10a94430a2..7b038f3b7cb0 100644
--- a/arch/riscv/include/asm/irq.h
+++ b/arch/riscv/include/asm/irq.h
@@ -12,8 +12,68 @@
#include <asm-generic/irq.h>
+#define INVALID_CONTEXT UINT_MAX
+
+#ifdef CONFIG_SMP
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+#endif
+
void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void));
struct fwnode_handle *riscv_get_intc_hwnode(void);
+#ifdef CONFIG_ACPI
+
+enum riscv_irqchip_type {
+ ACPI_RISCV_IRQCHIP_INTC = 0x00,
+ ACPI_RISCV_IRQCHIP_IMSIC = 0x01,
+ ACPI_RISCV_IRQCHIP_PLIC = 0x02,
+ ACPI_RISCV_IRQCHIP_APLIC = 0x03,
+};
+
+int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base,
+ u32 *id, u32 *nr_irqs, u32 *nr_idcs);
+struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi);
+unsigned long acpi_rintc_index_to_hartid(u32 index);
+unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int ctxt_idx);
+unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id);
+unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx);
+int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res);
+
+#else
+static inline int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base,
+ u32 *id, u32 *nr_irqs, u32 *nr_idcs)
+{
+ return 0;
+}
+
+static inline unsigned long acpi_rintc_index_to_hartid(u32 index)
+{
+ return INVALID_HARTID;
+}
+
+static inline unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id,
+ unsigned int ctxt_idx)
+{
+ return INVALID_HARTID;
+}
+
+static inline unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id)
+{
+ return INVALID_CONTEXT;
+}
+
+static inline unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx)
+{
+ return INVALID_CONTEXT;
+}
+
+static inline int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ACPI */
+
#endif /* _ASM_RISCV_IRQ_H */
diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h
index fa0f535bbbf0..1d85b6617508 100644
--- a/arch/riscv/include/asm/kvm_vcpu_pmu.h
+++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h
@@ -10,6 +10,7 @@
#define __KVM_VCPU_RISCV_PMU_H
#include <linux/perf/riscv_pmu.h>
+#include <asm/kvm_vcpu_insn.h>
#include <asm/sbi.h>
#ifdef CONFIG_RISCV_PMU_SBI
@@ -64,11 +65,11 @@ struct kvm_pmu {
#if defined(CONFIG_32BIT)
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
-{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
-{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
+{.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
+{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
#else
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
-{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
+{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
#endif
int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
@@ -104,8 +105,20 @@ void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
struct kvm_pmu {
};
+static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask)
+{
+ if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
+ *val = 0;
+ return KVM_INSN_CONTINUE_NEXT_SEPC;
+ } else {
+ return KVM_INSN_ILLEGAL_TRAP;
+ }
+}
+
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
-{.base = 0, .count = 0, .func = NULL },
+{.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy },
static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
diff --git a/arch/riscv/include/asm/mmzone.h b/arch/riscv/include/asm/mmzone.h
deleted file mode 100644
index fa17e01d9ab2..000000000000
--- a/arch/riscv/include/asm/mmzone.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_MMZONE_H
-#define __ASM_MMZONE_H
-
-#ifdef CONFIG_NUMA
-
-#include <asm/numa.h>
-
-extern struct pglist_data *node_data[];
-#define NODE_DATA(nid) (node_data[(nid)])
-
-#endif /* CONFIG_NUMA */
-#endif /* __ASM_MMZONE_H */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 7ede2111c591..32d308a3355f 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -112,11 +112,13 @@ struct kernel_mapping {
/* Offset between linear mapping virtual address and kernel load address */
unsigned long va_pa_offset;
/* Offset between kernel mapping virtual address and kernel load address */
- unsigned long va_kernel_pa_offset;
- unsigned long va_kernel_xip_pa_offset;
#ifdef CONFIG_XIP_KERNEL
+ unsigned long va_kernel_xip_text_pa_offset;
+ unsigned long va_kernel_xip_data_pa_offset;
uintptr_t xiprom;
uintptr_t xiprom_sz;
+#else
+ unsigned long va_kernel_pa_offset;
#endif
};
@@ -134,12 +136,18 @@ extern phys_addr_t phys_ram_base;
#else
void *linear_mapping_pa_to_va(unsigned long x);
#endif
+
+#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_pa_to_va(y) ({ \
unsigned long _y = (unsigned long)(y); \
- (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
- (void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \
- (void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
+ (_y < phys_ram_base) ? \
+ (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \
+ (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \
})
+#else
+#define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset))
+#endif
+
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
#ifndef CONFIG_DEBUG_VIRTUAL
@@ -147,12 +155,17 @@ void *linear_mapping_pa_to_va(unsigned long x);
#else
phys_addr_t linear_mapping_va_to_pa(unsigned long x);
#endif
+
+#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_va_to_pa(y) ({ \
unsigned long _y = (unsigned long)(y); \
- (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
- (_y - kernel_map.va_kernel_xip_pa_offset) : \
- (_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
+ (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \
+ (_y - kernel_map.va_kernel_xip_text_pa_offset) : \
+ (_y - kernel_map.va_kernel_xip_data_pa_offset); \
})
+#else
+#define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset)
+#endif
#define __va_to_pa_nodebug(x) ({ \
unsigned long _x = x; \
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 089f3c9f56a3..e79f15293492 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -107,13 +107,6 @@
#endif
-#ifdef CONFIG_XIP_KERNEL
-#define XIP_OFFSET SZ_32M
-#define XIP_OFFSET_MASK (SZ_32M - 1)
-#else
-#define XIP_OFFSET 0
-#endif
-
#ifndef __ASSEMBLY__
#include <asm/page.h>
@@ -142,11 +135,14 @@
#ifdef CONFIG_XIP_KERNEL
#define XIP_FIXUP(addr) ({ \
+ extern char _sdata[], _start[], _end[]; \
+ uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \
+ + (uintptr_t)&_sdata - (uintptr_t)&_start; \
+ uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \
+ + (uintptr_t)&_end - (uintptr_t)&_start; \
uintptr_t __a = (uintptr_t)(addr); \
- (__a >= CONFIG_XIP_PHYS_ADDR && \
- __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
- __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
- __a; \
+ (__a >= __rom_start_data && __a < __rom_end_data) ? \
+ __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \
})
#else
#define XIP_FIXUP(addr) (addr)
@@ -501,6 +497,9 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
+ asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
+ : : : : svvptc);
+
/*
* The kernel assumes that TLBs don't cache invalid entries, but
* in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
@@ -510,6 +509,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
*/
while (nr--)
local_flush_tlb_page(address + nr * PAGE_SIZE);
+
+svvptc:;
+ /*
+ * Svvptc guarantees that the new valid pte will be visible within
+ * a bounded timeframe, so when the uarch does not cache invalid
+ * entries, we don't have to do anything.
+ */
}
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 7bd3746028c9..98f631b051db 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -159,6 +159,7 @@ struct riscv_pmu_snapshot_data {
#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
#define RISCV_PMU_RAW_EVENT_IDX 0x20000
+#define RISCV_PLAT_FW_EVENT 0xFFFF
/** General pmu event codes specified in SBI PMU extension */
enum sbi_pmu_hw_generic_events_t {
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index ec11001c3fe0..ab92fc84e1fc 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -46,7 +46,7 @@ bool kernel_page_present(struct page *page);
#endif /* __ASSEMBLY__ */
-#ifdef CONFIG_STRICT_KERNEL_RWX
+#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_XIP_KERNEL)
#ifdef CONFIG_64BIT
#define SECTION_ALIGN (1 << 21)
#else
diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h
index 63acaecc3374..2f901a410586 100644
--- a/arch/riscv/include/asm/sparsemem.h
+++ b/arch/riscv/include/asm/sparsemem.h
@@ -7,7 +7,7 @@
#ifdef CONFIG_64BIT
#define MAX_PHYSMEM_BITS 56
#else
-#define MAX_PHYSMEM_BITS 34
+#define MAX_PHYSMEM_BITS 32
#endif /* CONFIG_64BIT */
#define SECTION_SIZE_BITS 27
#endif /* CONFIG_SPARSEMEM */
diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
index a96b1fea24fe..5ba77f60bf0b 100644
--- a/arch/riscv/include/asm/string.h
+++ b/arch/riscv/include/asm/string.h
@@ -19,6 +19,7 @@ extern asmlinkage void *__memcpy(void *, const void *, size_t);
extern asmlinkage void *memmove(void *, const void *, size_t);
extern asmlinkage void *__memmove(void *, const void *, size_t);
+#if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
#define __HAVE_ARCH_STRCMP
extern asmlinkage int strcmp(const char *cs, const char *ct);
@@ -27,6 +28,7 @@ extern asmlinkage __kernel_size_t strlen(const char *);
#define __HAVE_ARCH_STRNCMP
extern asmlinkage int strncmp(const char *cs, const char *ct, size_t count);
+#endif
/* For those files which don't want to check by kasan. */
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index fca5c6be2b81..ebe52f96da34 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -61,6 +61,13 @@ struct thread_info {
void *scs_base;
void *scs_sp;
#endif
+#ifdef CONFIG_64BIT
+ /*
+ * Used in handle_exception() to save a0, a1 and a2 before knowing if we
+ * can access the kernel stack.
+ */
+ unsigned long a0, a1, a2;
+#endif
};
#ifdef CONFIG_SHADOW_CALL_STACK
@@ -112,8 +119,4 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE)
-#define _TIF_WORK_MASK \
- (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
-
#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/topology.h b/arch/riscv/include/asm/topology.h
index 61183688bdd5..fe1a8bf6902d 100644
--- a/arch/riscv/include/asm/topology.h
+++ b/arch/riscv/include/asm/topology.h
@@ -4,6 +4,10 @@
#include <linux/arch_topology.h>
+#ifdef CONFIG_NUMA
+#include <asm/numa.h>
+#endif
+
/* Replace task scheduler's default frequency-invariant accounting */
#define arch_scale_freq_tick topology_scale_freq_tick
#define arch_set_freq_scale topology_set_freq_scale
diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
index 51f6dfe19745..fefe94dc98e2 100644
--- a/arch/riscv/include/asm/vmalloc.h
+++ b/arch/riscv/include/asm/vmalloc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_RISCV_VMALLOC_H
#define _ASM_RISCV_VMALLOC_H
diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h
index b65bf6306f69..f3d56299bc22 100644
--- a/arch/riscv/include/asm/xip_fixup.h
+++ b/arch/riscv/include/asm/xip_fixup.h
@@ -9,18 +9,36 @@
#ifdef CONFIG_XIP_KERNEL
.macro XIP_FIXUP_OFFSET reg
- REG_L t0, _xip_fixup
+ /* Fix-up address in Flash into address in RAM early during boot before
+ * MMU is up. Because generated code "thinks" data is in Flash, but it
+ * is actually in RAM (actually data is also in Flash, but Flash is
+ * read-only, thus we need to use the data residing in RAM).
+ *
+ * The start of data in Flash is _sdata and the start of data in RAM is
+ * CONFIG_PHYS_RAM_BASE. So this fix-up essentially does this:
+ * reg += CONFIG_PHYS_RAM_BASE - _start
+ */
+ li t0, CONFIG_PHYS_RAM_BASE
add \reg, \reg, t0
+ la t0, _sdata
+ sub \reg, \reg, t0
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
+ /* In linker script, at the transition from read-only section to
+ * writable section, the VMA is increased while LMA remains the same.
+ * (See in linker script how _sdata, __data_loc and LOAD_OFFSET is
+ * changed)
+ *
+ * Consequently, early during boot before MMU is up, the generated code
+ * reads the "writable" section at wrong addresses, because VMA is used
+ * by compiler to generate code, but the data is located in Flash using
+ * LMA.
+ */
+ la t0, _sdata
+ sub \reg, \reg, t0
la t0, __data_loc
- REG_L t1, _xip_phys_offset
- sub \reg, \reg, t1
add \reg, \reg, t0
.endm
-
-_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
-_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
#else
.macro XIP_FIXUP_OFFSET reg
.endm
diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c
index ba957aaca5cb..6e0d333f57e5 100644
--- a/arch/riscv/kernel/acpi.c
+++ b/arch/riscv/kernel/acpi.c
@@ -311,29 +311,26 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
#ifdef CONFIG_PCI
/*
- * These interfaces are defined just to enable building ACPI core.
- * TODO: Update it with actual implementation when external interrupt
- * controller support is added in RISC-V ACPI.
+ * raw_pci_read/write - Platform-specific PCI config space access.
*/
-int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
- int reg, int len, u32 *val)
+int raw_pci_read(unsigned int domain, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 *val)
{
- return PCIBIOS_DEVICE_NOT_FOUND;
-}
+ struct pci_bus *b = pci_find_bus(domain, bus);
-int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
- int reg, int len, u32 val)
-{
- return PCIBIOS_DEVICE_NOT_FOUND;
+ if (!b)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return b->ops->read(b, devfn, reg, len, val);
}
-int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
+int raw_pci_write(unsigned int domain, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 val)
{
- return -1;
-}
+ struct pci_bus *b = pci_find_bus(domain, bus);
-struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
-{
- return NULL;
+ if (!b)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return b->ops->write(b, devfn, reg, len, val);
}
+
#endif /* CONFIG_PCI */
diff --git a/arch/riscv/kernel/acpi_numa.c b/arch/riscv/kernel/acpi_numa.c
index ff95aeebee3e..130769e3a99c 100644
--- a/arch/riscv/kernel/acpi_numa.c
+++ b/arch/riscv/kernel/acpi_numa.c
@@ -30,7 +30,7 @@
static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
-int __init acpi_numa_get_nid(unsigned int cpu)
+static int __init acpi_numa_get_nid(unsigned int cpu)
{
return acpi_early_node_map[cpu];
}
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index b09ca5f944f7..e94180ba432f 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -36,6 +36,8 @@ void asm_offsets(void)
OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
+
+ OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
@@ -43,6 +45,11 @@ void asm_offsets(void)
#ifdef CONFIG_SHADOW_CALL_STACK
OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp);
#endif
+#ifdef CONFIG_64BIT
+ OFFSET(TASK_TI_A0, task_struct, thread_info.a0);
+ OFFSET(TASK_TI_A1, task_struct, thread_info.a1);
+ OFFSET(TASK_TI_A2, task_struct, thread_info.a2);
+#endif
OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index d6c108c50cba..b320b1d9aa01 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -71,6 +71,11 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
this_leaf->type = type;
}
+int init_cache_level(unsigned int cpu)
+{
+ return init_of_cache_level(cpu);
+}
+
int populate_cache_leaves(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index b427188b28fc..3a8eeaa9310c 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -381,6 +381,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
__RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
+ __RISCV_ISA_EXT_DATA(svvptc, RISCV_ISA_EXT_SVVPTC),
};
const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
index 11c0d2e0becf..3c37661801f9 100644
--- a/arch/riscv/kernel/elf_kexec.c
+++ b/arch/riscv/kernel/elf_kexec.c
@@ -451,6 +451,12 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
*(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) |
ENCODE_CJTYPE_IMM(val - addr);
break;
+ case R_RISCV_ADD16:
+ *(u16 *)loc += val;
+ break;
+ case R_RISCV_SUB16:
+ *(u16 *)loc -= val;
+ break;
case R_RISCV_ADD32:
*(u32 *)loc += val;
break;
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index ac2e908d4418..c200d329d4bd 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -19,6 +19,79 @@
.section .irqentry.text, "ax"
+.macro new_vmalloc_check
+ REG_S a0, TASK_TI_A0(tp)
+ csrr a0, CSR_CAUSE
+ /* Exclude IRQs */
+ blt a0, zero, _new_vmalloc_restore_context_a0
+
+ REG_S a1, TASK_TI_A1(tp)
+ /* Only check new_vmalloc if we are in page/protection fault */
+ li a1, EXC_LOAD_PAGE_FAULT
+ beq a0, a1, _new_vmalloc_kernel_address
+ li a1, EXC_STORE_PAGE_FAULT
+ beq a0, a1, _new_vmalloc_kernel_address
+ li a1, EXC_INST_PAGE_FAULT
+ bne a0, a1, _new_vmalloc_restore_context_a1
+
+_new_vmalloc_kernel_address:
+ /* Is it a kernel address? */
+ csrr a0, CSR_TVAL
+ bge a0, zero, _new_vmalloc_restore_context_a1
+
+ /* Check if a new vmalloc mapping appeared that could explain the trap */
+ REG_S a2, TASK_TI_A2(tp)
+ /*
+ * Computes:
+ * a0 = &new_vmalloc[BIT_WORD(cpu)]
+ * a1 = BIT_MASK(cpu)
+ */
+ REG_L a2, TASK_TI_CPU(tp)
+ /*
+ * Compute the new_vmalloc element position:
+ * (cpu / 64) * 8 = (cpu >> 6) << 3
+ */
+ srli a1, a2, 6
+ slli a1, a1, 3
+ la a0, new_vmalloc
+ add a0, a0, a1
+ /*
+ * Compute the bit position in the new_vmalloc element:
+ * bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6
+ * = cpu - ((cpu >> 6) << 3) << 3
+ */
+ slli a1, a1, 3
+ sub a1, a2, a1
+ /* Compute the "get mask": 1 << bit_pos */
+ li a2, 1
+ sll a1, a2, a1
+
+ /* Check the value of new_vmalloc for this cpu */
+ REG_L a2, 0(a0)
+ and a2, a2, a1
+ beq a2, zero, _new_vmalloc_restore_context
+
+ /* Atomically reset the current cpu bit in new_vmalloc */
+ amoxor.d a0, a1, (a0)
+
+ /* Only emit a sfence.vma if the uarch caches invalid entries */
+ ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1)
+
+ REG_L a0, TASK_TI_A0(tp)
+ REG_L a1, TASK_TI_A1(tp)
+ REG_L a2, TASK_TI_A2(tp)
+ csrw CSR_SCRATCH, x0
+ sret
+
+_new_vmalloc_restore_context:
+ REG_L a2, TASK_TI_A2(tp)
+_new_vmalloc_restore_context_a1:
+ REG_L a1, TASK_TI_A1(tp)
+_new_vmalloc_restore_context_a0:
+ REG_L a0, TASK_TI_A0(tp)
+.endm
+
+
SYM_CODE_START(handle_exception)
/*
* If coming from userspace, preserve the user thread pointer and load
@@ -30,6 +103,20 @@ SYM_CODE_START(handle_exception)
.Lrestore_kernel_tpsp:
csrr tp, CSR_SCRATCH
+
+#ifdef CONFIG_64BIT
+ /*
+ * The RISC-V kernel does not eagerly emit a sfence.vma after each
+ * new vmalloc mapping, which may result in exceptions:
+ * - if the uarch caches invalid entries, the new mapping would not be
+ * observed by the page table walker and an invalidation is needed.
+ * - if the uarch does not cache invalid entries, a reordered access
+ * could "miss" the new mapping and traps: in that case, we only need
+ * to retry the access, no sfence.vma is required.
+ */
+ new_vmalloc_check
+#endif
+
REG_S sp, TASK_TI_KERNEL_SP(tp)
#ifdef CONFIG_VMAP_STACK
@@ -239,8 +326,8 @@ SYM_CODE_START(ret_from_fork)
jalr s0
1:
move a0, sp /* pt_regs */
- la ra, ret_from_exception
- tail syscall_exit_to_user_mode
+ call syscall_exit_to_user_mode
+ j ret_from_exception
SYM_CODE_END(ret_from_fork)
#ifdef CONFIG_IRQ_STACKS
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 906f9a3a5d65..1cd461f3d872 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -787,8 +787,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
int res;
unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
struct hlist_head *relocation_hashtable;
- struct list_head used_buckets_list;
unsigned int hashtable_bits;
+ LIST_HEAD(used_buckets_list);
hashtable_bits = initialize_relocation_hashtable(num_relocations,
&relocation_hashtable);
@@ -796,8 +796,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
if (!relocation_hashtable)
return -ENOMEM;
- INIT_LIST_HEAD(&used_buckets_list);
-
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c
index 3348a61de7d9..c7468af77c66 100644
--- a/arch/riscv/kernel/perf_callchain.c
+++ b/arch/riscv/kernel/perf_callchain.c
@@ -6,37 +6,9 @@
#include <asm/stacktrace.h>
-/*
- * Get the return address for a single stackframe and return a pointer to the
- * next frame tail.
- */
-static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
- unsigned long fp, unsigned long reg_ra)
+static bool fill_callchain(void *entry, unsigned long pc)
{
- struct stackframe buftail;
- unsigned long ra = 0;
- unsigned long __user *user_frame_tail =
- (unsigned long __user *)(fp - sizeof(struct stackframe));
-
- /* Check accessibility of one struct frame_tail beyond */
- if (!access_ok(user_frame_tail, sizeof(buftail)))
- return 0;
- if (__copy_from_user_inatomic(&buftail, user_frame_tail,
- sizeof(buftail)))
- return 0;
-
- if (reg_ra != 0)
- ra = reg_ra;
- else
- ra = buftail.ra;
-
- fp = buftail.fp;
- if (ra != 0)
- perf_callchain_store(entry, ra);
- else
- return 0;
-
- return fp;
+ return perf_callchain_store(entry, pc) == 0;
}
/*
@@ -56,19 +28,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- unsigned long fp = 0;
-
- fp = regs->s0;
- perf_callchain_store(entry, regs->epc);
-
- fp = user_backtrace(entry, fp, regs->ra);
- while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
- fp = user_backtrace(entry, fp, 0);
-}
-
-static bool fill_callchain(void *entry, unsigned long pc)
-{
- return perf_callchain_store(entry, pc) == 0;
+ arch_stack_walk_user(fill_callchain, entry, regs);
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile
index 50bc5ef7dd2f..d5bf1bc7de62 100644
--- a/arch/riscv/kernel/pi/Makefile
+++ b/arch/riscv/kernel/pi/Makefile
@@ -5,6 +5,7 @@ KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \
-Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \
$(call cc-option,-mbranch-protection=none) \
-I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
+ -include $(srctree)/include/linux/hidden.h \
-D__DISABLE_EXPORTS -ffreestanding \
-fno-asynchronous-unwind-tables -fno-unwind-tables \
$(call cc-option,-fno-addrsig)
@@ -16,6 +17,7 @@ KBUILD_CFLAGS += -mcmodel=medany
CFLAGS_cmdline_early.o += -D__NO_FORTIFY
CFLAGS_lib-fdt_ro.o += -D__NO_FORTIFY
+CFLAGS_fdt_early.o += -D__NO_FORTIFY
$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \
--remove-section=.note.gnu.property \
@@ -32,5 +34,5 @@ $(obj)/string.o: $(srctree)/lib/string.c FORCE
$(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE
$(call if_changed_rule,cc_o_c)
-obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o
+obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o archrandom_early.pi.o
extra-y := $(patsubst %.pi.o,%.o,$(obj-y))
diff --git a/arch/riscv/kernel/pi/archrandom_early.c b/arch/riscv/kernel/pi/archrandom_early.c
new file mode 100644
index 000000000000..3f05d3cf3b7b
--- /dev/null
+++ b/arch/riscv/kernel/pi/archrandom_early.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/csr.h>
+#include <linux/processor.h>
+
+#include "pi.h"
+
+/*
+ * To avoid rewriting code include asm/archrandom.h and create macros
+ * for the functions that won't be included.
+ */
+#undef riscv_has_extension_unlikely
+#define riscv_has_extension_likely(...) false
+#undef pr_err_once
+#define pr_err_once(...)
+
+#include <asm/archrandom.h>
+
+u64 get_kaslr_seed_zkr(const uintptr_t dtb_pa)
+{
+ unsigned long seed = 0;
+
+ if (!fdt_early_match_extension_isa((const void *)dtb_pa, "zkr"))
+ return 0;
+
+ if (!csr_seed_long(&seed))
+ return 0;
+
+ return seed;
+}
diff --git a/arch/riscv/kernel/pi/cmdline_early.c b/arch/riscv/kernel/pi/cmdline_early.c
index f6d4dedffb84..fbcdc9e4e143 100644
--- a/arch/riscv/kernel/pi/cmdline_early.c
+++ b/arch/riscv/kernel/pi/cmdline_early.c
@@ -6,15 +6,9 @@
#include <asm/pgtable.h>
#include <asm/setup.h>
-static char early_cmdline[COMMAND_LINE_SIZE];
+#include "pi.h"
-/*
- * Declare the functions that are exported (but prefixed) here so that LLVM
- * does not complain it lacks the 'static' keyword (which, if added, makes
- * LLVM complain because the function is actually unused in this file).
- */
-u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa);
-bool set_nokaslr_from_cmdline(uintptr_t dtb_pa);
+static char early_cmdline[COMMAND_LINE_SIZE];
static char *get_early_cmdline(uintptr_t dtb_pa)
{
diff --git a/arch/riscv/kernel/pi/fdt_early.c b/arch/riscv/kernel/pi/fdt_early.c
index 899610e042ab..9bdee2fafe47 100644
--- a/arch/riscv/kernel/pi/fdt_early.c
+++ b/arch/riscv/kernel/pi/fdt_early.c
@@ -2,13 +2,9 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/libfdt.h>
+#include <linux/ctype.h>
-/*
- * Declare the functions that are exported (but prefixed) here so that LLVM
- * does not complain it lacks the 'static' keyword (which, if added, makes
- * LLVM complain because the function is actually unused in this file).
- */
-u64 get_kaslr_seed(uintptr_t dtb_pa);
+#include "pi.h"
u64 get_kaslr_seed(uintptr_t dtb_pa)
{
@@ -28,3 +24,162 @@ u64 get_kaslr_seed(uintptr_t dtb_pa)
*prop = 0;
return ret;
}
+
+/**
+ * fdt_device_is_available - check if a device is available for use
+ *
+ * @fdt: pointer to the device tree blob
+ * @node: offset of the node whose property to find
+ *
+ * Returns true if the status property is absent or set to "okay" or "ok",
+ * false otherwise
+ */
+static bool fdt_device_is_available(const void *fdt, int node)
+{
+ const char *status;
+ int statlen;
+
+ status = fdt_getprop(fdt, node, "status", &statlen);
+ if (!status)
+ return true;
+
+ if (statlen > 0) {
+ if (!strcmp(status, "okay") || !strcmp(status, "ok"))
+ return true;
+ }
+
+ return false;
+}
+
+/* Copy of fdt_nodename_eq_ */
+static int fdt_node_name_eq(const void *fdt, int offset,
+ const char *s)
+{
+ int olen;
+ int len = strlen(s);
+ const char *p = fdt_get_name(fdt, offset, &olen);
+
+ if (!p || olen < len)
+ /* short match */
+ return 0;
+
+ if (memcmp(p, s, len) != 0)
+ return 0;
+
+ if (p[len] == '\0')
+ return 1;
+ else if (!memchr(s, '@', len) && (p[len] == '@'))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * isa_string_contains - check if isa string contains an extension
+ *
+ * @isa_str: isa string to search
+ * @ext_name: the extension to search for
+ *
+ * Returns true if the extension is in the given isa string,
+ * false otherwise
+ */
+static bool isa_string_contains(const char *isa_str, const char *ext_name)
+{
+ size_t i, single_end, len = strlen(ext_name);
+ char ext_end;
+
+ /* Error must contain rv32/64 */
+ if (strlen(isa_str) < 4)
+ return false;
+
+ if (len == 1) {
+ single_end = strcspn(isa_str, "sSxXzZ");
+ /* Search for single chars between rv32/64 and multi-letter extensions */
+ for (i = 4; i < single_end; i++) {
+ if (tolower(isa_str[i]) == ext_name[0])
+ return true;
+ }
+ return false;
+ }
+
+ /* Skip to start of multi-letter extensions */
+ isa_str = strpbrk(isa_str, "sSxXzZ");
+ while (isa_str) {
+ if (strncasecmp(isa_str, ext_name, len) == 0) {
+ ext_end = isa_str[len];
+ /* Check if matches the whole extension. */
+ if (ext_end == '\0' || ext_end == '_')
+ return true;
+ }
+ /* Multi-letter extensions must be split from other multi-letter
+ * extensions with an "_", the end of a multi-letter extension will
+ * either be the null character or the "_" at the start of the next
+ * multi-letter extension.
+ */
+ isa_str = strchr(isa_str, '_');
+ if (isa_str)
+ isa_str++;
+ }
+
+ return false;
+}
+
+/**
+ * early_cpu_isa_ext_available - check if cpu node has an extension
+ *
+ * @fdt: pointer to the device tree blob
+ * @node: offset of the cpu node
+ * @ext_name: the extension to search for
+ *
+ * Returns true if the cpu node has the extension,
+ * false otherwise
+ */
+static bool early_cpu_isa_ext_available(const void *fdt, int node, const char *ext_name)
+{
+ const void *prop;
+ int len;
+
+ prop = fdt_getprop(fdt, node, "riscv,isa-extensions", &len);
+ if (prop && fdt_stringlist_contains(prop, len, ext_name))
+ return true;
+
+ prop = fdt_getprop(fdt, node, "riscv,isa", &len);
+ if (prop && isa_string_contains(prop, ext_name))
+ return true;
+
+ return false;
+}
+
+/**
+ * fdt_early_match_extension_isa - check if all cpu nodes have an extension
+ *
+ * @fdt: pointer to the device tree blob
+ * @ext_name: the extension to search for
+ *
+ * Returns true if the all available the cpu nodes have the extension,
+ * false otherwise
+ */
+bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name)
+{
+ int node, parent;
+ bool ret = false;
+
+ parent = fdt_path_offset(fdt, "/cpus");
+ if (parent < 0)
+ return false;
+
+ fdt_for_each_subnode(node, fdt, parent) {
+ if (!fdt_node_name_eq(fdt, node, "cpu"))
+ continue;
+
+ if (!fdt_device_is_available(fdt, node))
+ continue;
+
+ if (!early_cpu_isa_ext_available(fdt, node, ext_name))
+ return false;
+
+ ret = true;
+ }
+
+ return ret;
+}
diff --git a/arch/riscv/kernel/pi/pi.h b/arch/riscv/kernel/pi/pi.h
new file mode 100644
index 000000000000..21141d84fea6
--- /dev/null
+++ b/arch/riscv/kernel/pi/pi.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _RISCV_PI_H_
+#define _RISCV_PI_H_
+
+#include <linux/types.h>
+
+/*
+ * The following functions are exported (but prefixed). Declare them here so
+ * that LLVM does not complain it lacks the 'static' keyword (which, if
+ * added, makes LLVM complain because the function is unused).
+ */
+
+u64 get_kaslr_seed(uintptr_t dtb_pa);
+u64 get_kaslr_seed_zkr(const uintptr_t dtb_pa);
+bool set_nokaslr_from_cmdline(uintptr_t dtb_pa);
+u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa);
+
+bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name);
+
+#endif /* _RISCV_PI_H_ */
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index e4bc61c4e58a..e3142d8a6e28 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -15,6 +15,7 @@
#include <linux/tick.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
+#include <linux/personality.h>
#include <asm/unistd.h>
#include <asm/processor.h>
@@ -26,6 +27,7 @@
#include <asm/cpuidle.h>
#include <asm/vector.h>
#include <asm/cpufeature.h>
+#include <asm/exec.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
@@ -99,6 +101,13 @@ void show_regs(struct pt_regs *regs)
dump_backtrace(regs, NULL, KERN_DEFAULT);
}
+unsigned long arch_align_stack(unsigned long sp)
+{
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ sp -= get_random_u32_below(PAGE_SIZE);
+ return sp & ~0xf;
+}
+
#ifdef CONFIG_COMPAT
static bool compat_mode_supported __read_mostly;
diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c
index a72879b4249a..5ab1c7e1a6ed 100644
--- a/arch/riscv/kernel/riscv_ksyms.c
+++ b/arch/riscv/kernel/riscv_ksyms.c
@@ -12,9 +12,6 @@
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 8e6eb64459af..c180a647a30e 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -13,6 +13,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kexec.h>
+#include <linux/kgdb.h>
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/smp.h>
@@ -21,6 +22,7 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/irq_work.h>
+#include <linux/nmi.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -33,6 +35,8 @@ enum ipi_message_type {
IPI_CPU_CRASH_STOP,
IPI_IRQ_WORK,
IPI_TIMER,
+ IPI_CPU_BACKTRACE,
+ IPI_KGDB_ROUNDUP,
IPI_MAX
};
@@ -113,6 +117,7 @@ void arch_irq_work_raise(void)
static irqreturn_t handle_IPI(int irq, void *data)
{
+ unsigned int cpu = smp_processor_id();
int ipi = irq - ipi_virq_base;
switch (ipi) {
@@ -126,7 +131,7 @@ static irqreturn_t handle_IPI(int irq, void *data)
ipi_stop();
break;
case IPI_CPU_CRASH_STOP:
- ipi_cpu_crash_stop(smp_processor_id(), get_irq_regs());
+ ipi_cpu_crash_stop(cpu, get_irq_regs());
break;
case IPI_IRQ_WORK:
irq_work_run();
@@ -136,8 +141,14 @@ static irqreturn_t handle_IPI(int irq, void *data)
tick_receive_broadcast();
break;
#endif
+ case IPI_CPU_BACKTRACE:
+ nmi_cpu_backtrace(get_irq_regs());
+ break;
+ case IPI_KGDB_ROUNDUP:
+ kgdb_nmicallback(cpu, get_irq_regs());
+ break;
default:
- pr_warn("CPU%d: unhandled IPI%d\n", smp_processor_id(), ipi);
+ pr_warn("CPU%d: unhandled IPI%d\n", cpu, ipi);
break;
}
@@ -203,6 +214,8 @@ static const char * const ipi_names[] = {
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
[IPI_TIMER] = "Timer broadcast interrupts",
+ [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
+ [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
};
void show_ipi_stats(struct seq_file *p, int prec)
@@ -323,3 +336,29 @@ void arch_smp_send_reschedule(int cpu)
send_ipi_single(cpu, IPI_RESCHEDULE);
}
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
+
+static void riscv_backtrace_ipi(cpumask_t *mask)
+{
+ send_ipi_mask(mask, IPI_CPU_BACKTRACE);
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+{
+ nmi_trigger_cpumask_backtrace(mask, exclude_cpu, riscv_backtrace_ipi);
+}
+
+#ifdef CONFIG_KGDB
+void kgdb_roundup_cpus(void)
+{
+ int this_cpu = raw_smp_processor_id();
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ /* No need to roundup ourselves */
+ if (cpu == this_cpu)
+ continue;
+
+ send_ipi_single(cpu, IPI_KGDB_ROUNDUP);
+ }
+}
+#endif
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index c6d5de22463f..153a2db4c5fa 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -162,3 +162,46 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void
{
walk_stackframe(task, regs, consume_entry, cookie);
}
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
+ void *cookie, unsigned long fp,
+ unsigned long reg_ra)
+{
+ struct stackframe buftail;
+ unsigned long ra = 0;
+ unsigned long __user *user_frame_tail =
+ (unsigned long __user *)(fp - sizeof(struct stackframe));
+
+ /* Check accessibility of one struct frame_tail beyond */
+ if (!access_ok(user_frame_tail, sizeof(buftail)))
+ return 0;
+ if (__copy_from_user_inatomic(&buftail, user_frame_tail,
+ sizeof(buftail)))
+ return 0;
+
+ ra = reg_ra ? : buftail.ra;
+
+ fp = buftail.fp;
+ if (!ra || !consume_entry(cookie, ra))
+ return 0;
+
+ return fp;
+}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ unsigned long fp = 0;
+
+ fp = regs->s0;
+ if (!consume_entry(cookie, regs->epc))
+ return;
+
+ fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
+ while (fp && !(fp & 0x7))
+ fp = unwind_user_frame(consume_entry, cookie, fp, 0);
+}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index f7ef8ad9b550..960feb1526ca 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -45,7 +45,7 @@ $(obj)/vdso.o: $(obj)/vdso.so
# link rule for the .so file, .lds has to be first
$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
-LDFLAGS_vdso.so.dbg = -shared -S -soname=linux-vdso.so.1 \
+LDFLAGS_vdso.so.dbg = -shared -soname=linux-vdso.so.1 \
--build-id=sha1 --hash-style=both --eh-frame-hdr
# strip rule for the .so file
diff --git a/arch/riscv/kernel/vendor_extensions/andes.c b/arch/riscv/kernel/vendor_extensions/andes.c
index ec688c88456a..51f302b6d503 100644
--- a/arch/riscv/kernel/vendor_extensions/andes.c
+++ b/arch/riscv/kernel/vendor_extensions/andes.c
@@ -8,7 +8,7 @@
#include <linux/types.h>
/* All Andes vendor extensions supported in Linux */
-const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = {
+static const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = {
__RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_VENDOR_EXT_XANDESPMU),
};
diff --git a/arch/riscv/kernel/vmcore_info.c b/arch/riscv/kernel/vmcore_info.c
index 6d7a22522d63..d5e448aa90e7 100644
--- a/arch/riscv/kernel/vmcore_info.c
+++ b/arch/riscv/kernel/vmcore_info.c
@@ -19,6 +19,13 @@ void arch_crash_save_vmcoreinfo(void)
#endif
#endif
vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR);
+#ifdef CONFIG_XIP_KERNEL
+ /* TODO: Communicate with crash-utility developers on the information to
+ * export. The XIP case is more complicated, because the virtual-physical
+ * address offset depends on whether the address is in ROM or in RAM.
+ */
+#else
vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n",
kernel_map.va_kernel_pa_offset);
+#endif
}
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index 8c3daa1b0531..a7611789bad5 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -14,6 +14,7 @@
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
+#include <asm/set_memory.h>
OUTPUT_ARCH(riscv)
ENTRY(_start)
@@ -65,10 +66,10 @@ SECTIONS
* From this point, stuff is considered writable and will be copied to RAM
*/
__data_loc = ALIGN(PAGE_SIZE); /* location in file */
- . = KERNEL_LINK_ADDR + XIP_OFFSET; /* location in memory */
+ . = ALIGN(SECTION_ALIGN); /* location in memory */
#undef LOAD_OFFSET
-#define LOAD_OFFSET (KERNEL_LINK_ADDR + XIP_OFFSET - (__data_loc & XIP_OFFSET_MASK))
+#define LOAD_OFFSET (KERNEL_LINK_ADDR + _sdata - __data_loc)
_sdata = .; /* Start of data section */
_data = .;
diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
index bcf41d6e0df0..2707a51b082c 100644
--- a/arch/riscv/kvm/vcpu_pmu.c
+++ b/arch/riscv/kvm/vcpu_pmu.c
@@ -391,19 +391,9 @@ int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
static void kvm_pmu_clear_snapshot_area(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
- int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data);
- if (kvpmu->sdata) {
- if (kvpmu->snapshot_addr != INVALID_GPA) {
- memset(kvpmu->sdata, 0, snapshot_area_size);
- kvm_vcpu_write_guest(vcpu, kvpmu->snapshot_addr,
- kvpmu->sdata, snapshot_area_size);
- } else {
- pr_warn("snapshot address invalid\n");
- }
- kfree(kvpmu->sdata);
- kvpmu->sdata = NULL;
- }
+ kfree(kvpmu->sdata);
+ kvpmu->sdata = NULL;
kvpmu->snapshot_addr = INVALID_GPA;
}
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index 62f409d4176e..7de128be8db9 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -127,8 +127,8 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
run->riscv_sbi.args[3] = cp->a3;
run->riscv_sbi.args[4] = cp->a4;
run->riscv_sbi.args[5] = cp->a5;
- run->riscv_sbi.ret[0] = cp->a0;
- run->riscv_sbi.ret[1] = cp->a1;
+ run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
+ run->riscv_sbi.ret[1] = 0;
}
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 2b369f51b0a5..8eec6b69a875 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -3,9 +3,11 @@ lib-y += delay.o
lib-y += memcpy.o
lib-y += memset.o
lib-y += memmove.o
+ifeq ($(CONFIG_KASAN_GENERIC)$(CONFIG_KASAN_SW_TAGS),)
lib-y += strcmp.o
lib-y += strlen.o
lib-y += strncmp.o
+endif
lib-y += csum.o
ifeq ($(CONFIG_MMU), y)
lib-$(CONFIG_RISCV_ISA_V) += uaccess_vector.o
diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S
index 35f358e70bdb..da23b8347e2d 100644
--- a/arch/riscv/lib/memset.S
+++ b/arch/riscv/lib/memset.S
@@ -111,3 +111,5 @@ SYM_FUNC_START(__memset)
ret
SYM_FUNC_END(__memset)
SYM_FUNC_ALIAS_WEAK(memset, __memset)
+SYM_FUNC_ALIAS(__pi_memset, __memset)
+SYM_FUNC_ALIAS(__pi___memset, __memset)
diff --git a/arch/riscv/lib/strcmp.S b/arch/riscv/lib/strcmp.S
index 687b2bea5c43..57a5c0066231 100644
--- a/arch/riscv/lib/strcmp.S
+++ b/arch/riscv/lib/strcmp.S
@@ -120,3 +120,5 @@ strcmp_zbb:
.option pop
#endif
SYM_FUNC_END(strcmp)
+SYM_FUNC_ALIAS(__pi_strcmp, strcmp)
+EXPORT_SYMBOL(strcmp)
diff --git a/arch/riscv/lib/strlen.S b/arch/riscv/lib/strlen.S
index 8ae3064e45ff..962983b73251 100644
--- a/arch/riscv/lib/strlen.S
+++ b/arch/riscv/lib/strlen.S
@@ -131,3 +131,4 @@ strlen_zbb:
#endif
SYM_FUNC_END(strlen)
SYM_FUNC_ALIAS(__pi_strlen, strlen)
+EXPORT_SYMBOL(strlen)
diff --git a/arch/riscv/lib/strncmp.S b/arch/riscv/lib/strncmp.S
index aba5b3148621..7b2d0ff9ed6c 100644
--- a/arch/riscv/lib/strncmp.S
+++ b/arch/riscv/lib/strncmp.S
@@ -136,3 +136,5 @@ strncmp_zbb:
.option pop
#endif
SYM_FUNC_END(strncmp)
+SYM_FUNC_ALIAS(__pi_strncmp, strncmp)
+EXPORT_SYMBOL(strncmp)
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index a03c994eed3b..b81672729887 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -158,6 +158,7 @@ void __init riscv_init_cbo_blocksizes(void)
#ifdef CONFIG_SMP
static void set_icache_stale_mask(void)
{
+ int cpu = get_cpu();
cpumask_t *mask;
bool stale_cpu;
@@ -168,10 +169,11 @@ static void set_icache_stale_mask(void)
* concurrently on different harts.
*/
mask = &current->mm->context.icache_stale_mask;
- stale_cpu = cpumask_test_cpu(smp_processor_id(), mask);
+ stale_cpu = cpumask_test_cpu(cpu, mask);
cpumask_setall(mask);
- cpumask_assign_cpu(smp_processor_id(), mask, stale_cpu);
+ cpumask_assign_cpu(cpu, mask, stale_cpu);
+ put_cpu();
}
#endif
@@ -239,14 +241,12 @@ int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long scope)
case PR_RISCV_CTX_SW_FENCEI_OFF:
switch (scope) {
case PR_RISCV_SCOPE_PER_PROCESS:
- current->mm->context.force_icache_flush = false;
-
set_icache_stale_mask();
+ current->mm->context.force_icache_flush = false;
break;
case PR_RISCV_SCOPE_PER_THREAD:
- current->thread.force_icache_flush = false;
-
set_icache_stale_mask();
+ current->thread.force_icache_flush = false;
break;
default:
return -EINVAL;
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 1785782c2e55..0e8c20adcd98 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -37,6 +37,8 @@
#include "../kernel/head.h"
+u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
+
struct kernel_mapping kernel_map __ro_after_init;
EXPORT_SYMBOL(kernel_map);
#ifdef CONFIG_XIP_KERNEL
@@ -917,7 +919,7 @@ static void __init relocate_kernel(void)
static void __init create_kernel_page_table(pgd_t *pgdir,
__always_unused bool early)
{
- uintptr_t va, end_va;
+ uintptr_t va, start_va, end_va;
/* Map the flash resident part */
end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
@@ -927,10 +929,11 @@ static void __init create_kernel_page_table(pgd_t *pgdir,
PMD_SIZE, PAGE_KERNEL_EXEC);
/* Map the data in RAM */
+ start_va = kernel_map.virt_addr + (uintptr_t)&_sdata - (uintptr_t)&_start;
end_va = kernel_map.virt_addr + kernel_map.size;
- for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
+ for (va = start_va; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
- kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
+ kernel_map.phys_addr + (va - start_va),
PMD_SIZE, PAGE_KERNEL);
}
#else
@@ -1048,6 +1051,7 @@ static void __init pt_ops_set_late(void)
#ifdef CONFIG_RANDOMIZE_BASE
extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa);
extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa);
+extern u64 __init __pi_get_kaslr_seed_zkr(const uintptr_t dtb_pa);
static int __init print_nokaslr(char *p)
{
@@ -1068,10 +1072,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#ifdef CONFIG_RANDOMIZE_BASE
if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) {
- u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa);
+ u64 kaslr_seed = __pi_get_kaslr_seed_zkr(dtb_pa);
u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
u32 nr_pos;
+ if (kaslr_seed == 0)
+ kaslr_seed = __pi_get_kaslr_seed(dtb_pa);
/*
* Compute the number of positions available: we are limited
* by the early page table that only has one PUD and we must
@@ -1098,11 +1104,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
- kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
+ kernel_map.va_kernel_xip_text_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
+ kernel_map.va_kernel_xip_data_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr
+ + (uintptr_t)&_sdata - (uintptr_t)&_start;
#else
kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
+ kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
#endif
#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
@@ -1124,15 +1133,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
*/
kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ?
0UL : PAGE_OFFSET - kernel_map.phys_addr;
- kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
- /*
- * The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit
- * kernel, whereas for 64-bit kernel, the end of the virtual address
- * space is occupied by the modules/BPF/kernel mappings which reduces
- * the available size of the linear mapping.
- */
- memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0);
+ memory_limit = KERN_VIRT_SIZE;
/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
index 533ec9055fa0..4ae67324f992 100644
--- a/arch/riscv/mm/pgtable.c
+++ b/arch/riscv/mm/pgtable.c
@@ -9,6 +9,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
+ asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
+ : : : : svvptc);
+
if (!pte_same(ptep_get(ptep), entry))
__set_pte_at(vma->vm_mm, ptep, entry);
/*
@@ -16,6 +19,16 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
* the case that the PTE changed and the spurious fault case.
*/
return true;
+
+svvptc:
+ if (!pte_same(ptep_get(ptep), entry)) {
+ __set_pte_at(vma->vm_mm, ptep, entry);
+ /* Here only not svadu is impacted */
+ flush_tlb_page(vma, address);
+ return true;
+ }
+
+ return false;
}
int ptep_test_and_clear_young(struct vm_area_struct *vma,
diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
index f11945ee2490..fb9c917c9b45 100644
--- a/arch/riscv/purgatory/Makefile
+++ b/arch/riscv/purgatory/Makefile
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
purgatory-y := purgatory.o sha256.o entry.o string.o ctype.o memcpy.o memset.o
+ifeq ($(CONFIG_KASAN_GENERIC)$(CONFIG_KASAN_SW_TAGS),)
purgatory-y += strcmp.o strlen.o strncmp.o
+endif
targets += $(purgatory-y)
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c60e699e99f5..d339fe4fdedf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -70,6 +70,7 @@ config S390
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_WX
select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_DMA_OPS if PCI
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
select ARCH_HAS_FORTIFY_SOURCE
@@ -137,7 +138,6 @@ config S390
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS2
select DCACHE_WORD_ACCESS if !KMSAN
- select DMA_OPS if PCI
select DYNAMIC_FTRACE if FUNCTION_TRACER
select FUNCTION_ALIGNMENT_8B if CC_IS_GCC
select FUNCTION_ALIGNMENT_16B if !CC_IS_GCC
@@ -243,6 +243,7 @@ config S390
select TRACE_IRQFLAGS_SUPPORT
select TTY
select USER_STACKTRACE_SUPPORT
+ select VDSO_GETRANDOM
select VIRT_CPU_ACCOUNTING
select ZONE_DMA
# Note: keep the above list sorted alphabetically
@@ -513,6 +514,26 @@ config SCHED_TOPOLOGY
making when dealing with machines that have multi-threading,
multiple cores or multiple books.
+config SCHED_TOPOLOGY_VERTICAL
+ def_bool y
+ bool "Use vertical CPU polarization by default"
+ depends on SCHED_TOPOLOGY
+ help
+ Use vertical CPU polarization by default if available.
+ The default CPU polarization is horizontal.
+
+config HIPERDISPATCH_ON
+ def_bool y
+ bool "Use hiperdispatch on vertical polarization by default"
+ depends on SCHED_TOPOLOGY
+ depends on PROC_SYSCTL
+ help
+ Hiperdispatch aims to improve the CPU scheduler's decision
+ making when using vertical polarization by adjusting CPU
+ capacities dynamically. Set this option to use hiperdispatch
+ on vertical polarization by default. This can be overwritten
+ by sysctl's s390.hiperdispatch attribute later on.
+
source "kernel/Kconfig.hz"
config CERT_STORE
@@ -557,17 +578,13 @@ config EXPOLINE
If unsure, say N.
config EXPOLINE_EXTERN
- def_bool y if EXPOLINE
- depends on EXPOLINE
- depends on CC_IS_GCC && GCC_VERSION >= 110200
- depends on $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC))
- prompt "Generate expolines as extern functions."
+ def_bool EXPOLINE && CC_IS_GCC && GCC_VERSION >= 110200 && \
+ $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC))
help
- This option is required for some tooling like kpatch. The kernel is
- compiled with -mindirect-branch=thunk-extern and requires a newer
- compiler.
-
- If unsure, say N.
+ Generate expolines as external functions if the compiler supports it.
+ This option is required for some tooling like kpatch, if expolines
+ are enabled. The kernel is compiled with
+ -mindirect-branch=thunk-extern, which requires a newer compiler.
choice
prompt "Expoline default"
diff --git a/arch/s390/Makefile.postlink b/arch/s390/Makefile.postlink
new file mode 100644
index 000000000000..df82f5410769
--- /dev/null
+++ b/arch/s390/Makefile.postlink
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+# ===========================================================================
+# Post-link s390 pass
+# ===========================================================================
+#
+# 1. Separate relocations from vmlinux into relocs.S.
+# 2. Strip relocations from vmlinux.
+
+PHONY := __archpost
+__archpost:
+
+-include include/config/auto.conf
+include $(srctree)/scripts/Kbuild.include
+
+CMD_RELOCS=arch/s390/tools/relocs
+OUT_RELOCS = arch/s390/boot
+quiet_cmd_relocs = RELOCS $(OUT_RELOCS)/relocs.S
+ cmd_relocs = \
+ mkdir -p $(OUT_RELOCS); \
+ $(CMD_RELOCS) $@ > $(OUT_RELOCS)/relocs.S
+
+quiet_cmd_strip_relocs = RSTRIP $@
+ cmd_strip_relocs = \
+ $(OBJCOPY) --remove-section='.rel.*' --remove-section='.rel__*' \
+ --remove-section='.rela.*' --remove-section='.rela__*' $@
+
+vmlinux: FORCE
+ $(call cmd,relocs)
+ $(call cmd,strip_relocs)
+
+clean:
+ @rm -f $(OUT_RELOCS)/relocs.S
+
+PHONY += FORCE clean
+
+FORCE:
+
+.PHONY: $(PHONY)
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 4f476884d340..8bc1308ac892 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -11,35 +11,23 @@ KASAN_SANITIZE := n
KCSAN_SANITIZE := n
KMSAN_SANITIZE := n
-KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
-KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
-
#
-# Use minimum architecture for als.c to be able to print an error
+# Use minimum architecture level so it is possible to print an error
# message if the kernel is started on a machine which is too old
#
-ifndef CONFIG_CC_IS_CLANG
-CC_FLAGS_MARCH_MINIMUM := -march=z900
-else
CC_FLAGS_MARCH_MINIMUM := -march=z10
-endif
-
-ifneq ($(CC_FLAGS_MARCH),$(CC_FLAGS_MARCH_MINIMUM))
-AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
-AFLAGS_head.o += $(CC_FLAGS_MARCH_MINIMUM)
-AFLAGS_REMOVE_mem.o += $(CC_FLAGS_MARCH)
-AFLAGS_mem.o += $(CC_FLAGS_MARCH_MINIMUM)
-CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
-CFLAGS_als.o += $(CC_FLAGS_MARCH_MINIMUM)
-CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
-CFLAGS_sclp_early_core.o += $(CC_FLAGS_MARCH_MINIMUM)
-endif
+
+KBUILD_AFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_AFLAGS_DECOMPRESSOR))
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_CFLAGS_DECOMPRESSOR))
+KBUILD_AFLAGS += $(CC_FLAGS_MARCH_MINIMUM)
+KBUILD_CFLAGS += $(CC_FLAGS_MARCH_MINIMUM)
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
-obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o uv.o
+obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o
+obj-y += uv.o printk.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
@@ -109,11 +97,9 @@ OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
-CMD_RELOCS=arch/s390/tools/relocs
-quiet_cmd_relocs = RELOCS $@
- cmd_relocs = $(CMD_RELOCS) $< > $@
-$(obj)/relocs.S: vmlinux FORCE
- $(call if_changed,relocs)
+# relocs.S is created by the vmlinux postlink step.
+$(obj)/relocs.S: vmlinux
+ @true
suffix-$(CONFIG_KERNEL_GZIP) := .gz
suffix-$(CONFIG_KERNEL_BZIP2) := .bz2
diff --git a/arch/s390/boot/als.c b/arch/s390/boot/als.c
index 47c48fbfb563..11e0c3d5dbc8 100644
--- a/arch/s390/boot/als.c
+++ b/arch/s390/boot/als.c
@@ -9,42 +9,8 @@
#include <asm/sclp.h>
#include "boot.h"
-/*
- * The code within this file will be called very early. It may _not_
- * access anything within the bss section, since that is not cleared
- * yet and may contain data (e.g. initrd) that must be saved by other
- * code.
- * For temporary objects the stack (16k) should be used.
- */
-
static unsigned long als[] = { FACILITIES_ALS };
-static void u16_to_hex(char *str, u16 val)
-{
- int i, num;
-
- for (i = 1; i <= 4; i++) {
- num = (val >> (16 - 4 * i)) & 0xf;
- if (num >= 10)
- num += 7;
- *str++ = '0' + num;
- }
- *str = '\0';
-}
-
-static void print_machine_type(void)
-{
- static char mach_str[80] = "Detected machine-type number: ";
- char type_str[5];
- struct cpuid id;
-
- get_cpu_id(&id);
- u16_to_hex(type_str, id.machine);
- strcat(mach_str, type_str);
- strcat(mach_str, "\n");
- sclp_early_printk(mach_str);
-}
-
static void u16_to_decimal(char *str, u16 val)
{
int div = 1;
@@ -80,8 +46,7 @@ void print_missing_facilities(void)
* z/VM adds a four character prefix.
*/
if (strlen(als_str) > 70) {
- strcat(als_str, "\n");
- sclp_early_printk(als_str);
+ boot_printk("%s\n", als_str);
*als_str = '\0';
}
u16_to_decimal(val_str, i * BITS_PER_LONG + j);
@@ -89,16 +54,18 @@ void print_missing_facilities(void)
first = 0;
}
}
- strcat(als_str, "\n");
- sclp_early_printk(als_str);
+ boot_printk("%s\n", als_str);
}
static void facility_mismatch(void)
{
- sclp_early_printk("The Linux kernel requires more recent processor hardware\n");
- print_machine_type();
+ struct cpuid id;
+
+ get_cpu_id(&id);
+ boot_printk("The Linux kernel requires more recent processor hardware\n");
+ boot_printk("Detected machine-type number: %4x\n", id.machine);
print_missing_facilities();
- sclp_early_printk("See Principles of Operations for facility bits\n");
+ boot_printk("See Principles of Operations for facility bits\n");
disabled_wait();
}
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 83e2ce050b6c..7521a9d75fa2 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -70,7 +70,7 @@ void print_pgm_check_info(void);
unsigned long randomize_within_range(unsigned long size, unsigned long align,
unsigned long min, unsigned long max);
void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit);
-void __printf(1, 2) decompressor_printk(const char *fmt, ...);
+void __printf(1, 2) boot_printk(const char *fmt, ...);
void print_stacktrace(unsigned long sp);
void error(char *m);
int get_random(unsigned long limit, unsigned long *value);
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index 637c29c3f6e3..0a47b16f6412 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -299,11 +299,11 @@ SYM_CODE_END(startup_normal)
# the save area and does disabled wait with a faulty address.
#
SYM_CODE_START_LOCAL(startup_pgm_check_handler)
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ stmg %r8,%r15,__LC_SAVE_AREA
la %r8,4095
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8)
stmg %r0,%r7,__LC_GPREGS_SAVE_AREA-4095(%r8)
- mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA_SYNC
+ mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA
mvc __LC_PSW_SAVE_AREA-4095(16,%r8),__LC_PGM_OLD_PSW
mvc __LC_RETURN_PSW(16),__LC_PGM_OLD_PSW
ni __LC_RETURN_PSW,0xfc # remove IO and EX bits
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 1773b72a6a7b..557462e62cd7 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -215,7 +215,7 @@ static void check_cleared_facilities(void)
for (i = 0; i < ARRAY_SIZE(als); i++) {
if ((stfle_fac_list[i] & als[i]) != als[i]) {
- sclp_early_printk("Warning: The Linux kernel requires facilities cleared via command line option\n");
+ boot_printk("Warning: The Linux kernel requires facilities cleared via command line option\n");
print_missing_facilities();
break;
}
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index bd3bf5ef472d..f864d2bff775 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -32,7 +32,7 @@ struct prng_parm {
static int check_prng(void)
{
if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
- sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
+ boot_printk("KASLR disabled: CPU has no PRNG\n");
return 0;
}
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
diff --git a/arch/s390/boot/pgm_check_info.c b/arch/s390/boot/pgm_check_info.c
index 5352b3d356da..5abe59fb3bc0 100644
--- a/arch/s390/boot/pgm_check_info.c
+++ b/arch/s390/boot/pgm_check_info.c
@@ -11,131 +11,19 @@
#include <asm/uv.h>
#include "boot.h"
-const char hex_asc[] = "0123456789abcdef";
-
-static char *as_hex(char *dst, unsigned long val, int pad)
-{
- char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1);
-
- for (*p-- = 0; p >= dst; val >>= 4)
- *p-- = hex_asc[val & 0x0f];
- return end;
-}
-
-static char *symstart(char *p)
-{
- while (*p)
- p--;
- return p + 1;
-}
-
-static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len)
-{
- /* symbol entries are in a form "10000 c4 startup\0" */
- char *a = _decompressor_syms_start;
- char *b = _decompressor_syms_end;
- unsigned long start;
- unsigned long size;
- char *pivot;
- char *endp;
-
- while (a < b) {
- pivot = symstart(a + (b - a) / 2);
- start = simple_strtoull(pivot, &endp, 16);
- size = simple_strtoull(endp + 1, &endp, 16);
- if (ip < start) {
- b = pivot;
- continue;
- }
- if (ip > start + size) {
- a = pivot + strlen(pivot) + 1;
- continue;
- }
- *off = ip - start;
- *len = size;
- return endp + 1;
- }
- return NULL;
-}
-
-static noinline char *strsym(void *ip)
-{
- static char buf[64];
- unsigned short off;
- unsigned short len;
- char *p;
-
- p = findsym((unsigned long)ip, &off, &len);
- if (p) {
- strncpy(buf, p, sizeof(buf));
- /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */
- p = buf + strnlen(buf, sizeof(buf) - 15);
- strcpy(p, "+0x");
- p = as_hex(p + 3, off, 0);
- strcpy(p, "/0x");
- as_hex(p + 3, len, 0);
- } else {
- as_hex(buf, (unsigned long)ip, 16);
- }
- return buf;
-}
-
-void decompressor_printk(const char *fmt, ...)
-{
- char buf[1024] = { 0 };
- char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */
- unsigned long pad;
- char *p = buf;
- va_list args;
-
- va_start(args, fmt);
- for (; p < end && *fmt; fmt++) {
- if (*fmt != '%') {
- *p++ = *fmt;
- continue;
- }
- pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0;
- switch (*fmt) {
- case 's':
- p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf));
- break;
- case 'p':
- if (*++fmt != 'S')
- goto out;
- p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf));
- break;
- case 'l':
- if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad))
- goto out;
- p = as_hex(p, va_arg(args, unsigned long), pad);
- break;
- case 'x':
- if (end - p <= max(sizeof(int) * 2, pad))
- goto out;
- p = as_hex(p, va_arg(args, unsigned int), pad);
- break;
- default:
- goto out;
- }
- }
-out:
- va_end(args);
- sclp_early_printk(buf);
-}
-
void print_stacktrace(unsigned long sp)
{
struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start,
(unsigned long)_stack_end };
bool first = true;
- decompressor_printk("Call Trace:\n");
+ boot_printk("Call Trace:\n");
while (!(sp & 0x7) && on_stack(&boot_stack, sp, sizeof(struct stack_frame))) {
struct stack_frame *sf = (struct stack_frame *)sp;
- decompressor_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" :
- " sp:%016lx [<%016lx>] %pS\n",
- sp, sf->gprs[8], (void *)sf->gprs[8]);
+ boot_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" :
+ " sp:%016lx [<%016lx>] %pS\n",
+ sp, sf->gprs[8], (void *)sf->gprs[8]);
if (sf->back_chain <= sp)
break;
sp = sf->back_chain;
@@ -148,34 +36,30 @@ void print_pgm_check_info(void)
unsigned long *gpregs = (unsigned long *)get_lowcore()->gpregs_save_area;
struct psw_bits *psw = &psw_bits(get_lowcore()->psw_save_area);
- decompressor_printk("Linux version %s\n", kernel_version);
+ boot_printk("Linux version %s\n", kernel_version);
if (!is_prot_virt_guest() && early_command_line[0])
- decompressor_printk("Kernel command line: %s\n", early_command_line);
- decompressor_printk("Kernel fault: interruption code %04x ilc:%x\n",
- get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1);
+ boot_printk("Kernel command line: %s\n", early_command_line);
+ boot_printk("Kernel fault: interruption code %04x ilc:%x\n",
+ get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1);
if (kaslr_enabled()) {
- decompressor_printk("Kernel random base: %lx\n", __kaslr_offset);
- decompressor_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys);
+ boot_printk("Kernel random base: %lx\n", __kaslr_offset);
+ boot_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys);
}
- decompressor_printk("PSW : %016lx %016lx (%pS)\n",
- get_lowcore()->psw_save_area.mask,
- get_lowcore()->psw_save_area.addr,
- (void *)get_lowcore()->psw_save_area.addr);
- decompressor_printk(
+ boot_printk("PSW : %016lx %016lx (%pS)\n",
+ get_lowcore()->psw_save_area.mask,
+ get_lowcore()->psw_save_area.addr,
+ (void *)get_lowcore()->psw_save_area.addr);
+ boot_printk(
" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n",
psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck,
psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri,
psw->eaba);
- decompressor_printk("GPRS: %016lx %016lx %016lx %016lx\n",
- gpregs[0], gpregs[1], gpregs[2], gpregs[3]);
- decompressor_printk(" %016lx %016lx %016lx %016lx\n",
- gpregs[4], gpregs[5], gpregs[6], gpregs[7]);
- decompressor_printk(" %016lx %016lx %016lx %016lx\n",
- gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
- decompressor_printk(" %016lx %016lx %016lx %016lx\n",
- gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
+ boot_printk("GPRS: %016lx %016lx %016lx %016lx\n", gpregs[0], gpregs[1], gpregs[2], gpregs[3]);
+ boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[4], gpregs[5], gpregs[6], gpregs[7]);
+ boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
+ boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
print_stacktrace(get_lowcore()->gpregs_save_area[15]);
- decompressor_printk("Last Breaking-Event-Address:\n");
- decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break,
- (void *)get_lowcore()->pgm_last_break);
+ boot_printk("Last Breaking-Event-Address:\n");
+ boot_printk(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break,
+ (void *)get_lowcore()->pgm_last_break);
}
diff --git a/arch/s390/boot/physmem_info.c b/arch/s390/boot/physmem_info.c
index 4c9ad8258f7e..1d131a81cb8b 100644
--- a/arch/s390/boot/physmem_info.c
+++ b/arch/s390/boot/physmem_info.c
@@ -190,27 +190,27 @@ static void die_oom(unsigned long size, unsigned long align, unsigned long min,
enum reserved_range_type t;
int i;
- decompressor_printk("Linux version %s\n", kernel_version);
+ boot_printk("Linux version %s\n", kernel_version);
if (!is_prot_virt_guest() && early_command_line[0])
- decompressor_printk("Kernel command line: %s\n", early_command_line);
- decompressor_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
- size, align, min, max);
- decompressor_printk("Reserved memory ranges:\n");
+ boot_printk("Kernel command line: %s\n", early_command_line);
+ boot_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
+ size, align, min, max);
+ boot_printk("Reserved memory ranges:\n");
for_each_physmem_reserved_range(t, range, &start, &end) {
- decompressor_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
+ boot_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
total_reserved_mem += end - start;
}
- decompressor_printk("Usable online memory ranges (info source: %s [%x]):\n",
- get_physmem_info_source(), physmem_info.info_source);
+ boot_printk("Usable online memory ranges (info source: %s [%x]):\n",
+ get_physmem_info_source(), physmem_info.info_source);
for_each_physmem_usable_range(i, &start, &end) {
- decompressor_printk("%016lx %016lx\n", start, end);
+ boot_printk("%016lx %016lx\n", start, end);
total_mem += end - start;
}
- decompressor_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
- total_mem, total_reserved_mem,
- total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
+ boot_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
+ total_mem, total_reserved_mem,
+ total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
print_stacktrace(current_frame_address());
- sclp_early_printk("\n\n -- System halted\n");
+ boot_printk("\n\n -- System halted\n");
disabled_wait();
}
diff --git a/arch/s390/boot/printk.c b/arch/s390/boot/printk.c
new file mode 100644
index 000000000000..35f18f2b936e
--- /dev/null
+++ b/arch/s390/boot/printk.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/stdarg.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <asm/stacktrace.h>
+#include <asm/boot_data.h>
+#include <asm/lowcore.h>
+#include <asm/setup.h>
+#include <asm/sclp.h>
+#include <asm/uv.h>
+#include "boot.h"
+
+const char hex_asc[] = "0123456789abcdef";
+
+static char *as_hex(char *dst, unsigned long val, int pad)
+{
+ char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1);
+
+ for (*p-- = 0; p >= dst; val >>= 4)
+ *p-- = hex_asc[val & 0x0f];
+ return end;
+}
+
+static char *symstart(char *p)
+{
+ while (*p)
+ p--;
+ return p + 1;
+}
+
+static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len)
+{
+ /* symbol entries are in a form "10000 c4 startup\0" */
+ char *a = _decompressor_syms_start;
+ char *b = _decompressor_syms_end;
+ unsigned long start;
+ unsigned long size;
+ char *pivot;
+ char *endp;
+
+ while (a < b) {
+ pivot = symstart(a + (b - a) / 2);
+ start = simple_strtoull(pivot, &endp, 16);
+ size = simple_strtoull(endp + 1, &endp, 16);
+ if (ip < start) {
+ b = pivot;
+ continue;
+ }
+ if (ip > start + size) {
+ a = pivot + strlen(pivot) + 1;
+ continue;
+ }
+ *off = ip - start;
+ *len = size;
+ return endp + 1;
+ }
+ return NULL;
+}
+
+static noinline char *strsym(void *ip)
+{
+ static char buf[64];
+ unsigned short off;
+ unsigned short len;
+ char *p;
+
+ p = findsym((unsigned long)ip, &off, &len);
+ if (p) {
+ strncpy(buf, p, sizeof(buf));
+ /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */
+ p = buf + strnlen(buf, sizeof(buf) - 15);
+ strcpy(p, "+0x");
+ p = as_hex(p + 3, off, 0);
+ strcpy(p, "/0x");
+ as_hex(p + 3, len, 0);
+ } else {
+ as_hex(buf, (unsigned long)ip, 16);
+ }
+ return buf;
+}
+
+void boot_printk(const char *fmt, ...)
+{
+ char buf[1024] = { 0 };
+ char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */
+ unsigned long pad;
+ char *p = buf;
+ va_list args;
+
+ va_start(args, fmt);
+ for (; p < end && *fmt; fmt++) {
+ if (*fmt != '%') {
+ *p++ = *fmt;
+ continue;
+ }
+ pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0;
+ switch (*fmt) {
+ case 's':
+ p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf));
+ break;
+ case 'p':
+ if (*++fmt != 'S')
+ goto out;
+ p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf));
+ break;
+ case 'l':
+ if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad))
+ goto out;
+ p = as_hex(p, va_arg(args, unsigned long), pad);
+ break;
+ case 'x':
+ if (end - p <= max(sizeof(int) * 2, pad))
+ goto out;
+ p = as_hex(p, va_arg(args, unsigned int), pad);
+ break;
+ default:
+ goto out;
+ }
+ }
+out:
+ va_end(args);
+ sclp_early_printk(buf);
+}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index c73b5118ad42..c8f149ad77e5 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -39,10 +39,7 @@ struct machine_info machine;
void error(char *x)
{
- sclp_early_printk("\n\n");
- sclp_early_printk(x);
- sclp_early_printk("\n\n -- System halted");
-
+ boot_printk("\n\n%s\n\n -- System halted", x);
disabled_wait();
}
@@ -296,7 +293,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
} else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
- decompressor_printk("The kernel base address is forced to %lx\n", kernel_start);
+ boot_printk("The kernel base address is forced to %lx\n", kernel_start);
} else {
kernel_start = __NO_KASLR_START_KERNEL;
}
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index ea63a7342f5f..7ec1b8cd0de9 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -794,8 +794,12 @@ CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_CHACHA_S390=m
+CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
+CONFIG_PKEY_CCA=m
+CONFIG_PKEY_EP11=m
+CONFIG_PKEY_PCKMO=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index d8b28ff8ff45..df4addd1834a 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -781,8 +781,12 @@ CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_CHACHA_S390=m
+CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
+CONFIG_PKEY_CCA=m
+CONFIG_PKEY_EP11=m
+CONFIG_PKEY_PCKMO=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
index 06ee706b0d78..d3eb3a233693 100644
--- a/arch/s390/crypto/Kconfig
+++ b/arch/s390/crypto/Kconfig
@@ -132,4 +132,14 @@ config CRYPTO_CHACHA_S390
It is available as of z13.
+config CRYPTO_HMAC_S390
+ tristate "Keyed-hash message authentication code: HMAC"
+ depends on S390
+ select CRYPTO_HASH
+ help
+ s390 specific HMAC hardware support for SHA224, SHA256, SHA384 and
+ SHA512.
+
+ Architecture: s390
+
endmenu
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index 1b1cc478fa94..a0cb96937c3d 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_CRYPTO_CHACHA_S390) += chacha_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o
+obj-$(CONFIG_CRYPTO_HMAC_S390) += hmac_s390.o
obj-y += arch_random.o
crc32-vx_s390-y := crc32-vx.o crc32le-vx.o crc32be-vx.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index c6fe5405de4a..8cc02d6e0d0f 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -51,8 +51,13 @@ struct s390_aes_ctx {
};
struct s390_xts_ctx {
- u8 key[32];
- u8 pcc_key[32];
+ union {
+ u8 keys[64];
+ struct {
+ u8 key[32];
+ u8 pcc_key[32];
+ };
+ };
int key_len;
unsigned long fc;
struct crypto_skcipher *fallback;
@@ -526,6 +531,108 @@ static struct skcipher_alg xts_aes_alg = {
.decrypt = xts_aes_decrypt,
};
+static int fullxts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
+ unsigned long fc;
+ int err;
+
+ err = xts_fallback_setkey(tfm, in_key, key_len);
+ if (err)
+ return err;
+
+ /* Pick the correct function code based on the key length */
+ fc = (key_len == 32) ? CPACF_KM_XTS_128_FULL :
+ (key_len == 64) ? CPACF_KM_XTS_256_FULL : 0;
+
+ /* Check if the function code is available */
+ xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+ if (!xts_ctx->fc)
+ return 0;
+
+ /* Store double-key */
+ memcpy(xts_ctx->keys, in_key, key_len);
+ xts_ctx->key_len = key_len;
+ return 0;
+}
+
+static int fullxts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
+ unsigned int offset, nbytes, n;
+ struct skcipher_walk walk;
+ int ret;
+ struct {
+ __u8 key[64];
+ __u8 tweak[16];
+ __u8 nap[16];
+ } fxts_param = {
+ .nap = {0},
+ };
+
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, xts_ctx->fallback);
+ return (modifier & CPACF_DECRYPT) ?
+ crypto_skcipher_decrypt(subreq) :
+ crypto_skcipher_encrypt(subreq);
+ }
+
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
+
+ offset = xts_ctx->key_len & 0x20;
+ memcpy(fxts_param.key + offset, xts_ctx->keys, xts_ctx->key_len);
+ memcpy(fxts_param.tweak, req->iv, AES_BLOCK_SIZE);
+ fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */
+
+ while ((nbytes = walk.nbytes) != 0) {
+ /* only use complete blocks */
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ cpacf_km(xts_ctx->fc | modifier, fxts_param.key + offset,
+ walk.dst.virt.addr, walk.src.virt.addr, n);
+ ret = skcipher_walk_done(&walk, nbytes - n);
+ }
+ memzero_explicit(&fxts_param, sizeof(fxts_param));
+ return ret;
+}
+
+static int fullxts_aes_encrypt(struct skcipher_request *req)
+{
+ return fullxts_aes_crypt(req, 0);
+}
+
+static int fullxts_aes_decrypt(struct skcipher_request *req)
+{
+ return fullxts_aes_crypt(req, CPACF_DECRYPT);
+}
+
+static struct skcipher_alg fullxts_aes_alg = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "full-xts-aes-s390",
+ .base.cra_priority = 403, /* aes-xts-s390 + 1 */
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = xts_fallback_init,
+ .exit = xts_fallback_exit,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = fullxts_aes_set_key,
+ .encrypt = fullxts_aes_encrypt,
+ .decrypt = fullxts_aes_decrypt,
+};
+
static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -955,7 +1062,7 @@ static struct aead_alg gcm_aes_aead = {
};
static struct crypto_alg *aes_s390_alg;
-static struct skcipher_alg *aes_s390_skcipher_algs[4];
+static struct skcipher_alg *aes_s390_skcipher_algs[5];
static int aes_s390_skciphers_num;
static struct aead_alg *aes_s390_aead_alg;
@@ -1012,6 +1119,13 @@ static int __init aes_s390_init(void)
goto out_err;
}
+ if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128_FULL) ||
+ cpacf_test_func(&km_functions, CPACF_KM_XTS_256_FULL)) {
+ ret = aes_s390_register_skcipher(&fullxts_aes_alg);
+ if (ret)
+ goto out_err;
+ }
+
if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
ret = aes_s390_register_skcipher(&xts_aes_alg);
diff --git a/arch/s390/crypto/hmac_s390.c b/arch/s390/crypto/hmac_s390.c
new file mode 100644
index 000000000000..bba9a818dfdc
--- /dev/null
+++ b/arch/s390/crypto/hmac_s390.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2024
+ *
+ * s390 specific HMAC support.
+ */
+
+#define KMSG_COMPONENT "hmac_s390"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <asm/cpacf.h>
+#include <crypto/sha2.h>
+#include <crypto/internal/hash.h>
+#include <linux/cpufeature.h>
+#include <linux/module.h>
+
+/*
+ * KMAC param block layout for sha2 function codes:
+ * The layout of the param block for the KMAC instruction depends on the
+ * blocksize of the used hashing sha2-algorithm function codes. The param block
+ * contains the hash chaining value (cv), the input message bit-length (imbl)
+ * and the hmac-secret (key). To prevent code duplication, the sizes of all
+ * these are calculated based on the blocksize.
+ *
+ * param-block:
+ * +-------+
+ * | cv |
+ * +-------+
+ * | imbl |
+ * +-------+
+ * | key |
+ * +-------+
+ *
+ * sizes:
+ * part | sh2-alg | calculation | size | type
+ * -----+---------+-------------+------+--------
+ * cv | 224/256 | blocksize/2 | 32 | u64[8]
+ * | 384/512 | | 64 | u128[8]
+ * imbl | 224/256 | blocksize/8 | 8 | u64
+ * | 384/512 | | 16 | u128
+ * key | 224/256 | blocksize | 64 | u8[64]
+ * | 384/512 | | 128 | u8[128]
+ */
+
+#define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define MAX_IMBL_SIZE sizeof(u128)
+#define MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
+
+#define SHA2_CV_SIZE(bs) ((bs) >> 1)
+#define SHA2_IMBL_SIZE(bs) ((bs) >> 3)
+
+#define SHA2_IMBL_OFFSET(bs) (SHA2_CV_SIZE(bs))
+#define SHA2_KEY_OFFSET(bs) (SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs))
+
+struct s390_hmac_ctx {
+ u8 key[MAX_BLOCK_SIZE];
+};
+
+union s390_kmac_gr0 {
+ unsigned long reg;
+ struct {
+ unsigned long : 48;
+ unsigned long ikp : 1;
+ unsigned long iimp : 1;
+ unsigned long ccup : 1;
+ unsigned long : 6;
+ unsigned long fc : 7;
+ };
+};
+
+struct s390_kmac_sha2_ctx {
+ u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + MAX_BLOCK_SIZE];
+ union s390_kmac_gr0 gr0;
+ u8 buf[MAX_BLOCK_SIZE];
+ unsigned int buflen;
+};
+
+/*
+ * kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize
+ */
+static inline void kmac_sha2_set_imbl(u8 *param, unsigned int buflen,
+ unsigned int blocksize)
+{
+ u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize);
+
+ switch (blocksize) {
+ case SHA256_BLOCK_SIZE:
+ *(u64 *)imbl = (u64)buflen * BITS_PER_BYTE;
+ break;
+ case SHA512_BLOCK_SIZE:
+ *(u128 *)imbl = (u128)buflen * BITS_PER_BYTE;
+ break;
+ default:
+ break;
+ }
+}
+
+static int hash_key(const u8 *in, unsigned int inlen,
+ u8 *digest, unsigned int digestsize)
+{
+ unsigned long func;
+ union {
+ struct sha256_paramblock {
+ u32 h[8];
+ u64 mbl;
+ } sha256;
+ struct sha512_paramblock {
+ u64 h[8];
+ u128 mbl;
+ } sha512;
+ } __packed param;
+
+#define PARAM_INIT(x, y, z) \
+ param.sha##x.h[0] = SHA##y ## _H0; \
+ param.sha##x.h[1] = SHA##y ## _H1; \
+ param.sha##x.h[2] = SHA##y ## _H2; \
+ param.sha##x.h[3] = SHA##y ## _H3; \
+ param.sha##x.h[4] = SHA##y ## _H4; \
+ param.sha##x.h[5] = SHA##y ## _H5; \
+ param.sha##x.h[6] = SHA##y ## _H6; \
+ param.sha##x.h[7] = SHA##y ## _H7; \
+ param.sha##x.mbl = (z)
+
+ switch (digestsize) {
+ case SHA224_DIGEST_SIZE:
+ func = CPACF_KLMD_SHA_256;
+ PARAM_INIT(256, 224, inlen * 8);
+ break;
+ case SHA256_DIGEST_SIZE:
+ func = CPACF_KLMD_SHA_256;
+ PARAM_INIT(256, 256, inlen * 8);
+ break;
+ case SHA384_DIGEST_SIZE:
+ func = CPACF_KLMD_SHA_512;
+ PARAM_INIT(512, 384, inlen * 8);
+ break;
+ case SHA512_DIGEST_SIZE:
+ func = CPACF_KLMD_SHA_512;
+ PARAM_INIT(512, 512, inlen * 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+#undef PARAM_INIT
+
+ cpacf_klmd(func, &param, in, inlen);
+
+ memcpy(digest, &param, digestsize);
+
+ return 0;
+}
+
+static int s390_hmac_sha2_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct s390_hmac_ctx *tfm_ctx = crypto_shash_ctx(tfm);
+ unsigned int ds = crypto_shash_digestsize(tfm);
+ unsigned int bs = crypto_shash_blocksize(tfm);
+
+ memset(tfm_ctx, 0, sizeof(*tfm_ctx));
+
+ if (keylen > bs)
+ return hash_key(key, keylen, tfm_ctx->key, ds);
+
+ memcpy(tfm_ctx->key, key, keylen);
+ return 0;
+}
+
+static int s390_hmac_sha2_init(struct shash_desc *desc)
+{
+ struct s390_hmac_ctx *tfm_ctx = crypto_shash_ctx(desc->tfm);
+ struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int bs = crypto_shash_blocksize(desc->tfm);
+
+ memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
+ tfm_ctx->key, bs);
+
+ ctx->buflen = 0;
+ ctx->gr0.reg = 0;
+ switch (crypto_shash_digestsize(desc->tfm)) {
+ case SHA224_DIGEST_SIZE:
+ ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_224;
+ break;
+ case SHA256_DIGEST_SIZE:
+ ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_256;
+ break;
+ case SHA384_DIGEST_SIZE:
+ ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_384;
+ break;
+ case SHA512_DIGEST_SIZE:
+ ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int s390_hmac_sha2_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int bs = crypto_shash_blocksize(desc->tfm);
+ unsigned int offset, n;
+
+ /* check current buffer */
+ offset = ctx->buflen % bs;
+ ctx->buflen += len;
+ if (offset + len < bs)
+ goto store;
+
+ /* process one stored block */
+ if (offset) {
+ n = bs - offset;
+ memcpy(ctx->buf + offset, data, n);
+ ctx->gr0.iimp = 1;
+ _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs);
+ data += n;
+ len -= n;
+ offset = 0;
+ }
+ /* process as many blocks as possible */
+ if (len >= bs) {
+ n = (len / bs) * bs;
+ ctx->gr0.iimp = 1;
+ _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, n);
+ data += n;
+ len -= n;
+ }
+store:
+ /* store incomplete block in buffer */
+ if (len)
+ memcpy(ctx->buf + offset, data, len);
+
+ return 0;
+}
+
+static int s390_hmac_sha2_final(struct shash_desc *desc, u8 *out)
+{
+ struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int bs = crypto_shash_blocksize(desc->tfm);
+
+ ctx->gr0.iimp = 0;
+ kmac_sha2_set_imbl(ctx->param, ctx->buflen, bs);
+ _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, ctx->buflen % bs);
+ memcpy(out, ctx->param, crypto_shash_digestsize(desc->tfm));
+
+ return 0;
+}
+
+static int s390_hmac_sha2_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int ds = crypto_shash_digestsize(desc->tfm);
+ int rc;
+
+ rc = s390_hmac_sha2_init(desc);
+ if (rc)
+ return rc;
+
+ ctx->gr0.iimp = 0;
+ kmac_sha2_set_imbl(ctx->param, len,
+ crypto_shash_blocksize(desc->tfm));
+ _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, len);
+ memcpy(out, ctx->param, ds);
+
+ return 0;
+}
+
+#define S390_HMAC_SHA2_ALG(x) { \
+ .fc = CPACF_KMAC_HMAC_SHA_##x, \
+ .alg = { \
+ .init = s390_hmac_sha2_init, \
+ .update = s390_hmac_sha2_update, \
+ .final = s390_hmac_sha2_final, \
+ .digest = s390_hmac_sha2_digest, \
+ .setkey = s390_hmac_sha2_setkey, \
+ .descsize = sizeof(struct s390_kmac_sha2_ctx), \
+ .halg = { \
+ .digestsize = SHA##x##_DIGEST_SIZE, \
+ .base = { \
+ .cra_name = "hmac(sha" #x ")", \
+ .cra_driver_name = "hmac_s390_sha" #x, \
+ .cra_blocksize = SHA##x##_BLOCK_SIZE, \
+ .cra_priority = 400, \
+ .cra_ctxsize = sizeof(struct s390_hmac_ctx), \
+ .cra_module = THIS_MODULE, \
+ }, \
+ }, \
+ }, \
+}
+
+static struct s390_hmac_alg {
+ bool registered;
+ unsigned int fc;
+ struct shash_alg alg;
+} s390_hmac_algs[] = {
+ S390_HMAC_SHA2_ALG(224),
+ S390_HMAC_SHA2_ALG(256),
+ S390_HMAC_SHA2_ALG(384),
+ S390_HMAC_SHA2_ALG(512),
+};
+
+static __always_inline void _s390_hmac_algs_unregister(void)
+{
+ struct s390_hmac_alg *hmac;
+ int i;
+
+ for (i = ARRAY_SIZE(s390_hmac_algs) - 1; i >= 0; i--) {
+ hmac = &s390_hmac_algs[i];
+ if (!hmac->registered)
+ continue;
+ crypto_unregister_shash(&hmac->alg);
+ }
+}
+
+static int __init hmac_s390_init(void)
+{
+ struct s390_hmac_alg *hmac;
+ int i, rc = -ENODEV;
+
+ if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256))
+ return -ENODEV;
+ if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512))
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(s390_hmac_algs); i++) {
+ hmac = &s390_hmac_algs[i];
+ if (!cpacf_query_func(CPACF_KMAC, hmac->fc))
+ continue;
+
+ rc = crypto_register_shash(&hmac->alg);
+ if (rc) {
+ pr_err("unable to register %s\n",
+ hmac->alg.halg.base.cra_name);
+ goto out;
+ }
+ hmac->registered = true;
+ pr_debug("registered %s\n", hmac->alg.halg.base.cra_name);
+ }
+ return rc;
+out:
+ _s390_hmac_algs_unregister();
+ return rc;
+}
+
+static void __exit hmac_s390_exit(void)
+{
+ _s390_hmac_algs_unregister();
+}
+
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, hmac_s390_init);
+module_exit(hmac_s390_exit);
+
+MODULE_DESCRIPTION("S390 HMAC driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 99ea3f12c5d2..ef4491ccbbf8 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -133,8 +133,8 @@ static inline int __paes_keyblob2pkey(struct key_blob *kb,
if (msleep_interruptible(1000))
return -EINTR;
}
- ret = pkey_keyblob2pkey(kb->key, kb->keylen,
- pk->protkey, &pk->len, &pk->type);
+ ret = pkey_key2protkey(kb->key, kb->keylen,
+ pk->protkey, &pk->len, &pk->type);
}
return ret;
@@ -802,7 +802,10 @@ out_err:
module_init(paes_s390_init);
module_exit(paes_s390_fini);
-MODULE_ALIAS_CRYPTO("paes");
+MODULE_ALIAS_CRYPTO("ecb(paes)");
+MODULE_ALIAS_CRYPTO("cbc(paes)");
+MODULE_ALIAS_CRYPTO("ctr(paes)");
+MODULE_ALIAS_CRYPTO("xts(paes)");
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h
index 65ea12fc87a1..2bb22db54c31 100644
--- a/arch/s390/crypto/sha.h
+++ b/arch/s390/crypto/sha.h
@@ -25,6 +25,7 @@ struct s390_sha_ctx {
u32 state[CPACF_MAX_PARMBLOCK_SIZE / sizeof(u32)];
u8 buf[SHA_MAX_BLOCK_SIZE];
int func; /* KIMD function to use */
+ int first_message_part;
};
struct shash_desc;
diff --git a/arch/s390/crypto/sha3_256_s390.c b/arch/s390/crypto/sha3_256_s390.c
index e1350e033a32..a84ef692f572 100644
--- a/arch/s390/crypto/sha3_256_s390.c
+++ b/arch/s390/crypto/sha3_256_s390.c
@@ -21,9 +21,11 @@ static int sha3_256_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- memset(sctx->state, 0, sizeof(sctx->state));
+ if (!test_facility(86)) /* msa 12 */
+ memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_256;
+ sctx->first_message_part = 1;
return 0;
}
@@ -36,6 +38,7 @@ static int sha3_256_export(struct shash_desc *desc, void *out)
octx->rsiz = sctx->count;
memcpy(octx->st, sctx->state, sizeof(octx->st));
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
+ octx->partial = sctx->first_message_part;
return 0;
}
@@ -48,6 +51,7 @@ static int sha3_256_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->rsiz;
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ sctx->first_message_part = ictx->partial;
sctx->func = CPACF_KIMD_SHA3_256;
return 0;
@@ -61,6 +65,7 @@ static int sha3_224_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->rsiz;
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ sctx->first_message_part = ictx->partial;
sctx->func = CPACF_KIMD_SHA3_224;
return 0;
@@ -88,9 +93,11 @@ static int sha3_224_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- memset(sctx->state, 0, sizeof(sctx->state));
+ if (!test_facility(86)) /* msa 12 */
+ memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_224;
+ sctx->first_message_part = 1;
return 0;
}
diff --git a/arch/s390/crypto/sha3_512_s390.c b/arch/s390/crypto/sha3_512_s390.c
index 06c142ed9bb1..07528fc98ff7 100644
--- a/arch/s390/crypto/sha3_512_s390.c
+++ b/arch/s390/crypto/sha3_512_s390.c
@@ -20,9 +20,11 @@ static int sha3_512_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- memset(sctx->state, 0, sizeof(sctx->state));
+ if (!test_facility(86)) /* msa 12 */
+ memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_512;
+ sctx->first_message_part = 1;
return 0;
}
@@ -37,6 +39,7 @@ static int sha3_512_export(struct shash_desc *desc, void *out)
memcpy(octx->st, sctx->state, sizeof(octx->st));
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
+ octx->partial = sctx->first_message_part;
return 0;
}
@@ -52,6 +55,7 @@ static int sha3_512_import(struct shash_desc *desc, const void *in)
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ sctx->first_message_part = ictx->partial;
sctx->func = CPACF_KIMD_SHA3_512;
return 0;
@@ -68,6 +72,7 @@ static int sha3_384_import(struct shash_desc *desc, const void *in)
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ sctx->first_message_part = ictx->partial;
sctx->func = CPACF_KIMD_SHA3_384;
return 0;
@@ -97,9 +102,11 @@ static int sha3_384_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- memset(sctx->state, 0, sizeof(sctx->state));
+ if (!test_facility(86)) /* msa 12 */
+ memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_384;
+ sctx->first_message_part = 1;
return 0;
}
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index 686fe7aa192f..961d7d522af1 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -18,6 +18,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
unsigned int index, n;
+ int fc;
/* how much is already in the buffer? */
index = ctx->count % bsize;
@@ -26,10 +27,16 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if ((index + len) < bsize)
goto store;
+ fc = ctx->func;
+ if (ctx->first_message_part)
+ fc |= test_facility(86) ? CPACF_KIMD_NIP : 0;
+
/* process one stored block */
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
- cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize);
+ cpacf_kimd(fc, ctx->state, ctx->buf, bsize);
+ ctx->first_message_part = 0;
+ fc &= ~CPACF_KIMD_NIP;
data += bsize - index;
len -= bsize - index;
index = 0;
@@ -38,7 +45,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process as many blocks as possible */
if (len >= bsize) {
n = (len / bsize) * bsize;
- cpacf_kimd(ctx->func, ctx->state, data, n);
+ cpacf_kimd(fc, ctx->state, data, n);
+ ctx->first_message_part = 0;
data += n;
len -= n;
}
@@ -75,7 +83,7 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
u64 bits;
unsigned int n;
- int mbl_offset;
+ int mbl_offset, fc;
n = ctx->count % bsize;
bits = ctx->count * 8;
@@ -109,7 +117,11 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
return -EINVAL;
}
- cpacf_klmd(ctx->func, ctx->state, ctx->buf, n);
+ fc = ctx->func;
+ fc |= test_facility(86) ? CPACF_KLMD_DUFOP : 0;
+ if (ctx->first_message_part)
+ fc |= CPACF_KLMD_NIP;
+ cpacf_klmd(fc, ctx->state, ctx->buf, n);
/* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index 65f4036fd541..83ebf54cca6b 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -78,7 +78,6 @@ struct hypfs_dbfs_file {
struct dentry *dentry;
};
-extern void hypfs_dbfs_exit(void);
extern void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df);
extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df);
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 26a009f9c49e..c8af67d20994 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -29,8 +29,6 @@ static enum diag204_format diag204_info_type; /* used diag 204 data format */
static void *diag204_buf; /* 4K aligned buffer for diag204 data */
static int diag204_buf_pages; /* number of pages for diag204 data */
-static struct dentry *dbfs_d204_file;
-
enum diag204_format diag204_get_info_type(void)
{
return diag204_info_type;
@@ -214,16 +212,13 @@ __init int hypfs_diag_init(void)
hypfs_dbfs_create_file(&dbfs_file_d204);
rc = hypfs_diag_fs_init();
- if (rc) {
+ if (rc)
pr_err("The hardware system does not provide all functions required by hypfs\n");
- debugfs_remove(dbfs_d204_file);
- }
return rc;
}
void hypfs_diag_exit(void)
{
- debugfs_remove(dbfs_d204_file);
hypfs_diag_fs_exit();
diag204_free_buffer();
hypfs_dbfs_remove_file(&dbfs_file_d204);
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 4b904110d27c..297bf7157968 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -7,3 +7,4 @@ generated-y += unistd_nr.h
generic-y += asm-offsets.h
generic-y += kvm_types.h
generic-y += mcs_spinlock.h
+generic-y += mmzone.h
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index de980c938a3e..73e781b56bfe 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -39,11 +39,7 @@
#define ALT_TYPE_SHIFT 20
#define ALT_CTX_SHIFT 28
-#define ALT_FACILITY_EARLY(facility) (ALT_CTX_EARLY << ALT_CTX_SHIFT | \
- ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \
- (facility) << ALT_DATA_SHIFT)
-
-#define ALT_FACILITY(facility) (ALT_CTX_LATE << ALT_CTX_SHIFT | \
+#define ALT_FACILITY(facility) (ALT_CTX_EARLY << ALT_CTX_SHIFT | \
ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \
(facility) << ALT_DATA_SHIFT)
diff --git a/arch/s390/include/asm/arch_hweight.h b/arch/s390/include/asm/arch_hweight.h
index 50e23ce854e5..aca08b0acbc1 100644
--- a/arch/s390/include/asm/arch_hweight.h
+++ b/arch/s390/include/asm/arch_hweight.h
@@ -4,6 +4,7 @@
#define _ASM_S390_ARCH_HWEIGHT_H
#include <linux/types.h>
+#include <asm/march.h>
static __always_inline unsigned long popcnt_z196(unsigned long w)
{
@@ -29,9 +30,9 @@ static __always_inline unsigned long popcnt_z15(unsigned long w)
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES))
+ if (__is_defined(MARCH_HAS_Z15_FEATURES))
return popcnt_z15(w);
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) {
+ if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
w = popcnt_z196(w);
w += w >> 32;
w += w >> 16;
@@ -43,9 +44,9 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES))
+ if (__is_defined(MARCH_HAS_Z15_FEATURES))
return popcnt_z15(w);
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) {
+ if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
w = popcnt_z196(w);
w += w >> 16;
w += w >> 8;
@@ -56,9 +57,9 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w)
static __always_inline unsigned int __arch_hweight16(unsigned int w)
{
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES))
+ if (__is_defined(MARCH_HAS_Z15_FEATURES))
return popcnt_z15((unsigned short)w);
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) {
+ if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
w = popcnt_z196(w);
w += w >> 8;
return w & 0xff;
@@ -68,7 +69,7 @@ static __always_inline unsigned int __arch_hweight16(unsigned int w)
static __always_inline unsigned int __arch_hweight8(unsigned int w)
{
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES))
+ if (__is_defined(MARCH_HAS_Z196_FEATURES))
return popcnt_z196((unsigned char)w);
return __sw_hweight8(w);
}
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index 742c7919cbcd..65380da9e75f 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -9,6 +9,7 @@
#define __ARCH_S390_ATOMIC_OPS__
#include <linux/limits.h>
+#include <asm/march.h>
static __always_inline int __atomic_read(const atomic_t *v)
{
@@ -56,7 +57,7 @@ static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
}
}
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
static __always_inline op_type op_name(op_type val, op_type *ptr) \
@@ -107,7 +108,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
#undef __ATOMIC_CONST_OPS
#undef __ATOMIC_CONST_OP
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#else /* MARCH_HAS_Z196_FEATURES */
#define __ATOMIC_OP(op_name, op_string) \
static __always_inline int op_name(int val, int *ptr) \
@@ -166,7 +167,7 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#endif /* MARCH_HAS_Z196_FEATURES */
static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
{
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 82de2a7c4160..d82130d7f2b6 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -8,13 +8,15 @@
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
+#include <asm/march.h>
+
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */
#define __ASM_BCR_SERIALIZE "bcr 14,0\n"
#else
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index dae8843b164f..1d3a4b0c650f 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -54,6 +54,8 @@
#define CPACF_KM_XTS_256 0x34
#define CPACF_KM_PXTS_128 0x3a
#define CPACF_KM_PXTS_256 0x3c
+#define CPACF_KM_XTS_128_FULL 0x52
+#define CPACF_KM_XTS_256_FULL 0x54
/*
* Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
@@ -121,23 +123,31 @@
#define CPACF_KMAC_DEA 0x01
#define CPACF_KMAC_TDEA_128 0x02
#define CPACF_KMAC_TDEA_192 0x03
+#define CPACF_KMAC_HMAC_SHA_224 0x70
+#define CPACF_KMAC_HMAC_SHA_256 0x71
+#define CPACF_KMAC_HMAC_SHA_384 0x72
+#define CPACF_KMAC_HMAC_SHA_512 0x73
/*
* Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT)
* instruction
*/
-#define CPACF_PCKMO_QUERY 0x00
-#define CPACF_PCKMO_ENC_DES_KEY 0x01
-#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
-#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
-#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
-#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
-#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
-#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20
-#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21
-#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22
-#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28
-#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29
+#define CPACF_PCKMO_QUERY 0x00
+#define CPACF_PCKMO_ENC_DES_KEY 0x01
+#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
+#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
+#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
+#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
+#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
+#define CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY 0x15
+#define CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY 0x16
+#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20
+#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21
+#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22
+#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28
+#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29
+#define CPACF_PCKMO_ENC_HMAC_512_KEY 0x76
+#define CPACF_PCKMO_ENC_HMAC_1024_KEY 0x7a
/*
* Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION)
@@ -165,7 +175,40 @@
#define CPACF_KMA_LAAD 0x200 /* Last-AAD */
#define CPACF_KMA_HS 0x400 /* Hash-subkey Supplied */
+/*
+ * Flags for the KIMD/KLMD (COMPUTE INTERMEDIATE/LAST MESSAGE DIGEST)
+ * instructions
+ */
+#define CPACF_KIMD_NIP 0x8000
+#define CPACF_KLMD_DUFOP 0x4000
+#define CPACF_KLMD_NIP 0x8000
+
+/*
+ * Function codes for KDSA (COMPUTE DIGITAL SIGNATURE AUTHENTICATION)
+ * instruction
+ */
+#define CPACF_KDSA_QUERY 0x00
+#define CPACF_KDSA_ECDSA_VERIFY_P256 0x01
+#define CPACF_KDSA_ECDSA_VERIFY_P384 0x02
+#define CPACF_KDSA_ECDSA_VERIFY_P521 0x03
+#define CPACF_KDSA_ECDSA_SIGN_P256 0x09
+#define CPACF_KDSA_ECDSA_SIGN_P384 0x0a
+#define CPACF_KDSA_ECDSA_SIGN_P521 0x0b
+#define CPACF_KDSA_ENC_ECDSA_SIGN_P256 0x11
+#define CPACF_KDSA_ENC_ECDSA_SIGN_P384 0x12
+#define CPACF_KDSA_ENC_ECDSA_SIGN_P521 0x13
+#define CPACF_KDSA_EDDSA_VERIFY_ED25519 0x20
+#define CPACF_KDSA_EDDSA_VERIFY_ED448 0x24
+#define CPACF_KDSA_EDDSA_SIGN_ED25519 0x28
+#define CPACF_KDSA_EDDSA_SIGN_ED448 0x2c
+#define CPACF_KDSA_ENC_EDDSA_SIGN_ED25519 0x30
+#define CPACF_KDSA_ENC_EDDSA_SIGN_ED448 0x34
+
+#define CPACF_FC_QUERY 0x00
+#define CPACF_FC_QUERY_AUTH_INFO 0x7F
+
typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
+typedef struct { unsigned char bytes[256]; } cpacf_qai_t;
/*
* Prototype for a not existing function to produce a link
@@ -175,80 +218,85 @@ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
void __cpacf_bad_opcode(void);
static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2,
- cpacf_mask_t *mask)
+ u8 *pb, u8 fc)
{
asm volatile(
- " la %%r1,%[mask]\n"
- " xgr %%r0,%%r0\n"
+ " la %%r1,%[pb]\n"
+ " lghi %%r0,%[fc]\n"
" .insn rre,%[opc] << 16,%[r1],%[r2]\n"
- : [mask] "=R" (*mask)
- : [opc] "i" (opc),
+ : [pb] "=R" (*pb)
+ : [opc] "i" (opc), [fc] "i" (fc),
[r1] "i" (r1), [r2] "i" (r2)
- : "cc", "r0", "r1");
+ : "cc", "memory", "r0", "r1");
}
-static __always_inline void __cpacf_query_rrf(u32 opc,
- u8 r1, u8 r2, u8 r3, u8 m4,
- cpacf_mask_t *mask)
+static __always_inline void __cpacf_query_rrf(u32 opc, u8 r1, u8 r2, u8 r3,
+ u8 m4, u8 *pb, u8 fc)
{
asm volatile(
- " la %%r1,%[mask]\n"
- " xgr %%r0,%%r0\n"
+ " la %%r1,%[pb]\n"
+ " lghi %%r0,%[fc]\n"
" .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n"
- : [mask] "=R" (*mask)
- : [opc] "i" (opc), [r1] "i" (r1), [r2] "i" (r2),
- [r3] "i" (r3), [m4] "i" (m4)
- : "cc", "r0", "r1");
+ : [pb] "=R" (*pb)
+ : [opc] "i" (opc), [fc] "i" (fc), [r1] "i" (r1),
+ [r2] "i" (r2), [r3] "i" (r3), [m4] "i" (m4)
+ : "cc", "memory", "r0", "r1");
}
-static __always_inline void __cpacf_query(unsigned int opcode,
- cpacf_mask_t *mask)
+static __always_inline void __cpacf_query_insn(unsigned int opcode, void *pb,
+ u8 fc)
{
switch (opcode) {
case CPACF_KDSA:
- __cpacf_query_rre(CPACF_KDSA, 0, 2, mask);
+ __cpacf_query_rre(CPACF_KDSA, 0, 2, pb, fc);
break;
case CPACF_KIMD:
- __cpacf_query_rre(CPACF_KIMD, 0, 2, mask);
+ __cpacf_query_rre(CPACF_KIMD, 0, 2, pb, fc);
break;
case CPACF_KLMD:
- __cpacf_query_rre(CPACF_KLMD, 0, 2, mask);
+ __cpacf_query_rre(CPACF_KLMD, 0, 2, pb, fc);
break;
case CPACF_KM:
- __cpacf_query_rre(CPACF_KM, 2, 4, mask);
+ __cpacf_query_rre(CPACF_KM, 2, 4, pb, fc);
break;
case CPACF_KMA:
- __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, mask);
+ __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, pb, fc);
break;
case CPACF_KMAC:
- __cpacf_query_rre(CPACF_KMAC, 0, 2, mask);
+ __cpacf_query_rre(CPACF_KMAC, 0, 2, pb, fc);
break;
case CPACF_KMC:
- __cpacf_query_rre(CPACF_KMC, 2, 4, mask);
+ __cpacf_query_rre(CPACF_KMC, 2, 4, pb, fc);
break;
case CPACF_KMCTR:
- __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, mask);
+ __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, pb, fc);
break;
case CPACF_KMF:
- __cpacf_query_rre(CPACF_KMF, 2, 4, mask);
+ __cpacf_query_rre(CPACF_KMF, 2, 4, pb, fc);
break;
case CPACF_KMO:
- __cpacf_query_rre(CPACF_KMO, 2, 4, mask);
+ __cpacf_query_rre(CPACF_KMO, 2, 4, pb, fc);
break;
case CPACF_PCC:
- __cpacf_query_rre(CPACF_PCC, 0, 0, mask);
+ __cpacf_query_rre(CPACF_PCC, 0, 0, pb, fc);
break;
case CPACF_PCKMO:
- __cpacf_query_rre(CPACF_PCKMO, 0, 0, mask);
+ __cpacf_query_rre(CPACF_PCKMO, 0, 0, pb, fc);
break;
case CPACF_PRNO:
- __cpacf_query_rre(CPACF_PRNO, 2, 4, mask);
+ __cpacf_query_rre(CPACF_PRNO, 2, 4, pb, fc);
break;
default:
__cpacf_bad_opcode();
}
}
+static __always_inline void __cpacf_query(unsigned int opcode,
+ cpacf_mask_t *mask)
+{
+ __cpacf_query_insn(opcode, mask, CPACF_FC_QUERY);
+}
+
static __always_inline int __cpacf_check_opcode(unsigned int opcode)
{
switch (opcode) {
@@ -269,6 +317,8 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
return test_facility(57); /* check for MSA5 */
case CPACF_KMA:
return test_facility(146); /* check for MSA8 */
+ case CPACF_KDSA:
+ return test_facility(155); /* check for MSA9 */
default:
__cpacf_bad_opcode();
return 0;
@@ -276,14 +326,15 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
}
/**
- * cpacf_query() - check if a specific CPACF function is available
+ * cpacf_query() - Query the function code mask for this CPACF opcode
* @opcode: the opcode of the crypto instruction
- * @func: the function code to test for
+ * @mask: ptr to struct cpacf_mask_t
*
* Executes the query function for the given crypto instruction @opcode
* and checks if @func is available
*
- * Returns 1 if @func is available for @opcode, 0 otherwise
+ * On success 1 is returned and the mask is filled with the function
+ * code mask for this CPACF opcode, otherwise 0 is returned.
*/
static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
{
@@ -300,7 +351,8 @@ static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func)
return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0;
}
-static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int func)
+static __always_inline int cpacf_query_func(unsigned int opcode,
+ unsigned int func)
{
cpacf_mask_t mask;
@@ -309,6 +361,32 @@ static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int fu
return 0;
}
+static __always_inline void __cpacf_qai(unsigned int opcode, cpacf_qai_t *qai)
+{
+ __cpacf_query_insn(opcode, qai, CPACF_FC_QUERY_AUTH_INFO);
+}
+
+/**
+ * cpacf_qai() - Get the query authentication information for a CPACF opcode
+ * @opcode: the opcode of the crypto instruction
+ * @mask: ptr to struct cpacf_qai_t
+ *
+ * Executes the query authentication information function for the given crypto
+ * instruction @opcode and checks if @func is available
+ *
+ * On success 1 is returned and the mask is filled with the query authentication
+ * information for this CPACF opcode, otherwise 0 is returned.
+ */
+static __always_inline int cpacf_qai(unsigned int opcode, cpacf_qai_t *qai)
+{
+ if (cpacf_query_func(opcode, CPACF_FC_QUERY_AUTH_INFO)) {
+ __cpacf_qai(opcode, qai);
+ return 1;
+ }
+ memset(qai, 0, sizeof(*qai));
+ return 0;
+}
+
/**
* cpacf_km() - executes the KM (CIPHER MESSAGE) instruction
* @func: the function code passed to KM; see CPACF_KM_xxx defines
@@ -391,7 +469,7 @@ static inline void cpacf_kimd(unsigned long func, void *param,
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
- "0: .insn rre,%[opc] << 16,0,%[src]\n"
+ "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+&d" (s.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)(param)),
@@ -416,7 +494,7 @@ static inline void cpacf_klmd(unsigned long func, void *param,
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
- "0: .insn rre,%[opc] << 16,0,%[src]\n"
+ "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+&d" (s.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
@@ -425,29 +503,30 @@ static inline void cpacf_klmd(unsigned long func, void *param,
}
/**
- * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
- * instruction
- * @func: the function code passed to KM; see CPACF_KMAC_xxx defines
+ * _cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+ * instruction and updates flags in gr0
+ * @gr0: pointer to gr0 (fc and flags) passed to KMAC; see CPACF_KMAC_xxx defines
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Returns 0 for the query func, number of processed bytes for digest funcs
*/
-static inline int cpacf_kmac(unsigned long func, void *param,
- const u8 *src, long src_len)
+static inline int _cpacf_kmac(unsigned long *gr0, void *param,
+ const u8 *src, long src_len)
{
union register_pair s;
s.even = (unsigned long)src;
s.odd = (unsigned long)src_len;
asm volatile(
- " lgr 0,%[fc]\n"
+ " lgr 0,%[r0]\n"
" lgr 1,%[pba]\n"
"0: .insn rre,%[opc] << 16,0,%[src]\n"
" brc 1,0b\n" /* handle partial completion */
- : [src] "+&d" (s.pair)
- : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+ " lgr %[r0],0\n"
+ : [r0] "+d" (*gr0), [src] "+&d" (s.pair)
+ : [pba] "d" ((unsigned long)param),
[opc] "i" (CPACF_KMAC)
: "cc", "memory", "0", "1");
@@ -455,6 +534,22 @@ static inline int cpacf_kmac(unsigned long func, void *param,
}
/**
+ * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+ * instruction
+ * @func: function code passed to KMAC; see CPACF_KMAC_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Returns 0 for the query func, number of processed bytes for digest funcs
+ */
+static inline int cpacf_kmac(unsigned long func, void *param,
+ const u8 *src, long src_len)
+{
+ return _cpacf_kmac(&func, param, src, src_len);
+}
+
+/**
* cpacf_kmctr() - executes the KMCTR (CIPHER MESSAGE WITH COUNTER) instruction
* @func: the function code passed to KMCTR; see CPACF_KMCTR_xxx defines
* @param: address of parameter block; see POP for details on each func
diff --git a/arch/s390/include/asm/ctlreg.h b/arch/s390/include/asm/ctlreg.h
index 72a9556d04f3..e6527f51ad0b 100644
--- a/arch/s390/include/asm/ctlreg.h
+++ b/arch/s390/include/asm/ctlreg.h
@@ -202,8 +202,9 @@ union ctlreg0 {
unsigned long : 3;
unsigned long ccc : 1; /* Cryptography counter control */
unsigned long pec : 1; /* PAI extension control */
- unsigned long : 17;
- unsigned long : 3;
+ unsigned long : 15;
+ unsigned long wti : 1; /* Warning-track */
+ unsigned long : 4;
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
unsigned long edat : 1; /* Enhanced-DAT-enablement control */
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index c0d43512f4fc..e1316e181230 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -38,6 +38,7 @@ enum diag_stat_enum {
DIAG_STAT_X308,
DIAG_STAT_X318,
DIAG_STAT_X320,
+ DIAG_STAT_X49C,
DIAG_STAT_X500,
NR_DIAG_STAT
};
@@ -363,4 +364,12 @@ void _diag0c_amode31(unsigned long rx);
void _diag308_reset_amode31(void);
int _diag8c_amode31(struct diag8c *addr, struct ccw_dev_id *devno, size_t len);
+/* diag 49c subcodes */
+enum diag49c_sc {
+ DIAG49C_SUBC_ACK = 0,
+ DIAG49C_SUBC_REG = 1
+};
+
+int diag49c(unsigned long subcode);
+
#endif /* _ASM_S390_DIAG_H */
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index b7d234838a36..715bcf8fb69a 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -14,7 +14,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/preempt.h>
-
+#include <asm/alternative.h>
#include <asm/lowcore.h>
#define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8)
@@ -39,28 +39,51 @@ static inline void __clear_facility(unsigned long nr, void *facilities)
ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
}
-static inline int __test_facility(unsigned long nr, void *facilities)
+static __always_inline bool __test_facility(unsigned long nr, void *facilities)
{
unsigned char *ptr;
if (nr >= MAX_FACILITY_BIT)
- return 0;
+ return false;
ptr = (unsigned char *) facilities + (nr >> 3);
return (*ptr & (0x80 >> (nr & 7))) != 0;
}
/*
+ * __test_facility_constant() generates a single instruction branch. If the
+ * tested facility is available (likely) the branch is patched into a nop.
+ *
+ * Do not use this function unless you know what you are doing. All users are
+ * supposed to use test_facility() which will do the right thing.
+ */
+static __always_inline bool __test_facility_constant(unsigned long nr)
+{
+ asm goto(
+ ALTERNATIVE("brcl 15,%l[l_no]", "brcl 0,0", ALT_FACILITY(%[nr]))
+ :
+ : [nr] "i" (nr)
+ :
+ : l_no);
+ return true;
+l_no:
+ return false;
+}
+
+/*
* The test_facility function uses the bit ordering where the MSB is bit 0.
* That makes it easier to query facility bits with the bit number as
* documented in the Principles of Operation.
*/
-static inline int test_facility(unsigned long nr)
+static __always_inline bool test_facility(unsigned long nr)
{
unsigned long facilities_als[] = { FACILITIES_ALS };
- if (__builtin_constant_p(nr) && nr < sizeof(facilities_als) * 8) {
- if (__test_facility(nr, &facilities_als))
- return 1;
+ if (!__is_defined(__DECOMPRESSOR) && __builtin_constant_p(nr)) {
+ if (nr < sizeof(facilities_als) * 8) {
+ if (__test_facility(nr, &facilities_als))
+ return true;
+ }
+ return __test_facility_constant(nr);
}
return __test_facility(nr, &stfle_fac_list);
}
diff --git a/arch/s390/include/asm/fpu-insn-asm.h b/arch/s390/include/asm/fpu-insn-asm.h
index 02ccfe46050a..d296322be4bc 100644
--- a/arch/s390/include/asm/fpu-insn-asm.h
+++ b/arch/s390/include/asm/fpu-insn-asm.h
@@ -407,6 +407,28 @@
MRXBOPC 0, 0x0E, v1
.endm
+/* VECTOR STORE BYTE REVERSED ELEMENTS */
+ .macro VSTBR vr1, disp, index="%r0", base, m
+ VX_NUM v1, \vr1
+ GR_NUM x2, \index
+ GR_NUM b2, \base
+ .word 0xE600 | ((v1&15) << 4) | (x2&15)
+ .word (b2 << 12) | (\disp)
+ MRXBOPC \m, 0x0E, v1
+.endm
+.macro VSTBRH vr1, disp, index="%r0", base
+ VSTBR \vr1, \disp, \index, \base, 1
+.endm
+.macro VSTBRF vr1, disp, index="%r0", base
+ VSTBR \vr1, \disp, \index, \base, 2
+.endm
+.macro VSTBRG vr1, disp, index="%r0", base
+ VSTBR \vr1, \disp, \index, \base, 3
+.endm
+.macro VSTBRQ vr1, disp, index="%r0", base
+ VSTBR \vr1, \disp, \index, \base, 4
+.endm
+
/* VECTOR STORE MULTIPLE */
.macro VSTM vfrom, vto, disp, base, hint=3
VX_NUM v1, \vfrom
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index fbadca645af7..406746666eb7 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -6,8 +6,23 @@
#define MCOUNT_INSN_SIZE 6
#ifndef __ASSEMBLY__
+#include <asm/stacktrace.h>
-unsigned long return_address(unsigned int n);
+static __always_inline unsigned long return_address(unsigned int n)
+{
+ struct stack_frame *sf;
+
+ if (!n)
+ return (unsigned long)__builtin_return_address(0);
+
+ sf = (struct stack_frame *)current_frame_address();
+ do {
+ sf = (struct stack_frame *)sf->back_chain;
+ if (!sf)
+ return 0;
+ } while (--n);
+ return sf->gprs[8];
+}
#define ftrace_return_address(n) return_address(n)
void ftrace_caller(void);
diff --git a/arch/s390/include/asm/hiperdispatch.h b/arch/s390/include/asm/hiperdispatch.h
new file mode 100644
index 000000000000..27e23aa27a24
--- /dev/null
+++ b/arch/s390/include/asm/hiperdispatch.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2024
+ */
+
+#ifndef _ASM_HIPERDISPATCH_H
+#define _ASM_HIPERDISPATCH_H
+
+void hd_reset_state(void);
+void hd_add_core(int cpu);
+void hd_disable_hiperdispatch(void);
+int hd_enable_hiperdispatch(void);
+
+#endif /* _ASM_HIPERDISPATCH_H */
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 54b42817f70a..d9e705f4a697 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -47,6 +47,7 @@ enum interruption_class {
IRQEXT_CMS,
IRQEXT_CMC,
IRQEXT_FTP,
+ IRQEXT_WTI,
IRQIO_CIO,
IRQIO_DAS,
IRQIO_C15,
@@ -99,6 +100,7 @@ int unregister_external_irq(u16 code, ext_int_handler_t handler);
enum irq_subclass {
IRQ_SUBCLASS_MEASUREMENT_ALERT = 5,
IRQ_SUBCLASS_SERVICE_SIGNAL = 9,
+ IRQ_SUBCLASS_WARNING_TRACK = 33,
};
#define CR0_IRQ_SUBCLASS_MASK \
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 183ac29afaf8..48c64716d1f2 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -98,8 +98,8 @@ struct lowcore {
psw_t io_new_psw; /* 0x01f0 */
/* Save areas. */
- __u64 save_area_sync[8]; /* 0x0200 */
- __u64 save_area_async[8]; /* 0x0240 */
+ __u64 save_area[8]; /* 0x0200 */
+ __u8 pad_0x0240[0x0280-0x0240]; /* 0x0240 */
__u64 save_area_restart[1]; /* 0x0280 */
__u64 pcpu; /* 0x0288 */
diff --git a/arch/s390/include/asm/march.h b/arch/s390/include/asm/march.h
new file mode 100644
index 000000000000..fd9eef3be44c
--- /dev/null
+++ b/arch/s390/include/asm/march.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_S390_MARCH_H
+#define __ASM_S390_MARCH_H
+
+#include <linux/kconfig.h>
+
+#define MARCH_HAS_Z10_FEATURES 1
+
+#ifndef __DECOMPRESSOR
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#define MARCH_HAS_Z196_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+#define MARCH_HAS_ZEC12_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
+#define MARCH_HAS_Z13_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z14_FEATURES
+#define MARCH_HAS_Z14_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z15_FEATURES
+#define MARCH_HAS_Z15_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z16_FEATURES
+#define MARCH_HAS_Z16_FEATURES 1
+#endif
+
+#endif /* __DECOMPRESSOR */
+
+#endif /* __ASM_S390_MARCH_H */
diff --git a/arch/s390/include/asm/mmzone.h b/arch/s390/include/asm/mmzone.h
deleted file mode 100644
index 73e3e7c6976c..000000000000
--- a/arch/s390/include/asm/mmzone.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * NUMA support for s390
- *
- * Copyright IBM Corp. 2015
- */
-
-#ifndef _ASM_S390_MMZONE_H
-#define _ASM_S390_MMZONE_H
-
-#ifdef CONFIG_NUMA
-
-extern struct pglist_data *node_data[];
-#define NODE_DATA(nid) (node_data[nid])
-
-#endif /* CONFIG_NUMA */
-#endif /* _ASM_S390_MMZONE_H */
diff --git a/arch/s390/include/asm/module.h b/arch/s390/include/asm/module.h
index 9f1eea15872c..916ab59e458a 100644
--- a/arch/s390/include/asm/module.h
+++ b/arch/s390/include/asm/module.h
@@ -38,4 +38,18 @@ struct mod_arch_specific {
#endif /* CONFIG_FUNCTION_TRACER */
};
+static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
+{
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ const Elf_Shdr *s, *se;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+ if (strcmp(name, secstrs + s->sh_name) == 0)
+ return s;
+ }
+ return NULL;
+}
+
#endif /* _ASM_S390_MODULE_H */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 16e4caa931f1..73e1e03317b4 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -176,8 +176,6 @@ static inline int devmem_is_allowed(unsigned long pfn)
int arch_make_folio_accessible(struct folio *folio);
#define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
-int arch_make_page_accessible(struct page *page);
-#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
struct vm_layout {
unsigned long kaslr_offset;
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 30820a649e6e..9d920ced6047 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -191,7 +191,14 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
return (zdev->fh & (1UL << 31)) ? true : false;
}
-extern const struct attribute_group *zpci_attr_groups[];
+extern const struct attribute_group zpci_attr_group;
+extern const struct attribute_group pfip_attr_group;
+extern const struct attribute_group zpci_ident_attr_group;
+
+#define ARCH_PCI_DEV_GROUPS &zpci_attr_group, \
+ &pfip_attr_group, \
+ &zpci_ident_attr_group,
+
extern unsigned int s390_pci_force_floating __initdata;
extern unsigned int s390_pci_no_rid;
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 89a28740b6ab..84f6b8357b45 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -4,6 +4,7 @@
#include <linux/preempt.h>
#include <asm/cmpxchg.h>
+#include <asm/march.h>
/*
* s390 uses its own implementation for per cpu data, the offset of
@@ -50,7 +51,7 @@
#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
-#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifndef MARCH_HAS_Z196_FEATURES
#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
@@ -61,7 +62,7 @@
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#else /* MARCH_HAS_Z196_FEATURES */
#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \
{ \
@@ -129,7 +130,7 @@
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao")
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog")
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#endif /* MARCH_HAS_Z196_FEATURES */
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
({ \
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 9917e2717b2b..66200d4a2134 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -48,30 +48,6 @@ struct perf_sf_sde_regs {
unsigned long reserved:63; /* reserved */
};
-/* Perf PMU definitions for the counter facility */
-#define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */
-
-/* Perf PMU definitions for the sampling facility */
-#define PERF_CPUM_SF_MAX_CTR 2
-#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
-#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
-#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */
-#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
-#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
-#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
- PERF_CPUM_SF_DIAG_MODE)
-#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
-
-#define REG_NONE 0
-#define REG_OVERFLOW 1
-#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
-#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
-#define TEAR_REG(hwc) ((hwc)->last_tag)
-#define SAMPL_RATE(hwc) ((hwc)->event_base)
-#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
-#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
-#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
-
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
(regs)->psw.addr = (__ip); \
(regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 3fa280d0672a..0ffbaf741955 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -955,6 +955,7 @@ static inline int pte_unused(pte_t pte)
* young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
* must not be set.
*/
+#define pte_pgprot pte_pgprot
static inline pgprot_t pte_pgprot(pte_t pte)
{
unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h
index 47d80a7451a6..5dca1a46a9f6 100644
--- a/arch/s390/include/asm/pkey.h
+++ b/arch/s390/include/asm/pkey.h
@@ -22,7 +22,7 @@
* @param protkey pointer to buffer receiving the protected key
* @return 0 on success, negative errno value on failure
*/
-int pkey_keyblob2pkey(const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+int pkey_key2protkey(const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
#endif /* _KAPI_PKEY_H */
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index 3ae5f31c665d..deca3f221836 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -5,8 +5,9 @@
#include <asm/current.h>
#include <linux/thread_info.h>
#include <asm/atomic_ops.h>
+#include <asm/march.h>
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
@@ -75,7 +76,7 @@ static __always_inline bool should_resched(int preempt_offset)
preempt_offset);
}
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#else /* MARCH_HAS_Z196_FEATURES */
#define PREEMPT_ENABLED (0)
@@ -123,7 +124,7 @@ static __always_inline bool should_resched(int preempt_offset)
tif_need_resched());
}
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#endif /* MARCH_HAS_Z196_FEATURES */
#define init_task_preempt_count(p) do { } while (0)
/* Deferred to CPU bringup time */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 5ecd442535b9..9a5236acc0a8 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -44,6 +44,7 @@ struct pcpu {
unsigned long ec_mask; /* bit mask for ec_xxx functions */
unsigned long ec_clk; /* sigp timestamp for ec_xxx */
unsigned long flags; /* per CPU flags */
+ unsigned long capacity; /* cpu capacity for scheduler */
signed char state; /* physical cpu state */
signed char polarization; /* physical polarization */
u16 address; /* physical cpu address */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index da3dad18fe50..eb00fa1771da 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -72,6 +72,7 @@ struct sclp_info {
unsigned char has_core_type : 1;
unsigned char has_sprp : 1;
unsigned char has_hvs : 1;
+ unsigned char has_wti : 1;
unsigned char has_esca : 1;
unsigned char has_sief2 : 1;
unsigned char has_64bscao : 1;
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 8505737712ee..70b920b32827 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -34,6 +34,7 @@
#define MACHINE_FLAG_SCC BIT(17)
#define MACHINE_FLAG_PCI_MIO BIT(18)
#define MACHINE_FLAG_RDP BIT(19)
+#define MACHINE_FLAG_SEQ_INSN BIT(20)
#define LPP_MAGIC BIT(31)
#define LPP_PID_MASK _AC(0xffffffff, UL)
@@ -95,6 +96,7 @@ extern unsigned long mio_wb_bit_mask;
#define MACHINE_HAS_SCC (get_lowcore()->machine_flags & MACHINE_FLAG_SCC)
#define MACHINE_HAS_PCI_MIO (get_lowcore()->machine_flags & MACHINE_FLAG_PCI_MIO)
#define MACHINE_HAS_RDP (get_lowcore()->machine_flags & MACHINE_FLAG_RDP)
+#define MACHINE_HAS_SEQ_INSN (get_lowcore()->machine_flags & MACHINE_FLAG_SEQ_INSN)
/*
* Console mode. Override with conmode=
@@ -115,6 +117,8 @@ extern unsigned int console_irq;
#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
+void register_early_console(void);
+
#ifdef CONFIG_VMCP
void vmcp_cma_reserve(void);
#else
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index cd835f4fb11a..7feca96c48c6 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -12,6 +12,7 @@
#include <asm/processor.h>
#define raw_smp_processor_id() (get_lowcore()->cpu_nr)
+#define arch_scale_cpu_capacity smp_cpu_get_capacity
extern struct mutex smp_cpu_state_mutex;
extern unsigned int smp_cpu_mt_shift;
@@ -34,6 +35,9 @@ extern void smp_save_dump_secondary_cpus(void);
extern void smp_yield_cpu(int cpu);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
+extern void smp_cpu_set_capacity(int cpu, unsigned long val);
+extern void smp_set_core_capacity(int cpu, unsigned long val);
+extern unsigned long smp_cpu_get_capacity(int cpu);
extern int smp_cpu_get_cpu_address(int cpu);
extern void smp_fill_possible_mask(void);
extern void smp_detect_cpus(void);
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 3a0ac0c7a9a3..cef06bffad80 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -67,6 +67,9 @@ static inline void topology_expect_change(void) { }
#define POLARIZATION_VM (2)
#define POLARIZATION_VH (3)
+#define CPU_CAPACITY_HIGH SCHED_CAPACITY_SCALE
+#define CPU_CAPACITY_LOW (SCHED_CAPACITY_SCALE >> 3)
+
#define SD_BOOK_INIT SD_CPU_INIT
#ifdef CONFIG_NUMA
diff --git a/arch/s390/include/asm/trace/hiperdispatch.h b/arch/s390/include/asm/trace/hiperdispatch.h
new file mode 100644
index 000000000000..46462ee645b0
--- /dev/null
+++ b/arch/s390/include/asm/trace/hiperdispatch.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Tracepoint header for hiperdispatch
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM s390
+
+#if !defined(_TRACE_S390_HIPERDISPATCH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_S390_HIPERDISPATCH_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH asm/trace
+#define TRACE_INCLUDE_FILE hiperdispatch
+
+TRACE_EVENT(s390_hd_work_fn,
+ TP_PROTO(int steal_time_percentage,
+ int entitled_core_count,
+ int highcap_core_count),
+ TP_ARGS(steal_time_percentage,
+ entitled_core_count,
+ highcap_core_count),
+ TP_STRUCT__entry(__field(int, steal_time_percentage)
+ __field(int, entitled_core_count)
+ __field(int, highcap_core_count)),
+ TP_fast_assign(__entry->steal_time_percentage = steal_time_percentage;
+ __entry->entitled_core_count = entitled_core_count;
+ __entry->highcap_core_count = highcap_core_count;),
+ TP_printk("steal: %d entitled_core_count: %d highcap_core_count: %d",
+ __entry->steal_time_percentage,
+ __entry->entitled_core_count,
+ __entry->highcap_core_count)
+);
+
+TRACE_EVENT(s390_hd_rebuild_domains,
+ TP_PROTO(int current_highcap_core_count,
+ int new_highcap_core_count),
+ TP_ARGS(current_highcap_core_count,
+ new_highcap_core_count),
+ TP_STRUCT__entry(__field(int, current_highcap_core_count)
+ __field(int, new_highcap_core_count)),
+ TP_fast_assign(__entry->current_highcap_core_count = current_highcap_core_count;
+ __entry->new_highcap_core_count = new_highcap_core_count),
+ TP_printk("change highcap_core_count: %u -> %u",
+ __entry->current_highcap_core_count,
+ __entry->new_highcap_core_count)
+);
+
+#endif /* _TRACE_S390_HIPERDISPATCH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/s390/include/asm/vdso-symbols.h b/arch/s390/include/asm/vdso-symbols.h
new file mode 100644
index 000000000000..0df17574d788
--- /dev/null
+++ b/arch/s390/include/asm/vdso-symbols.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __S390_VDSO_SYMBOLS_H__
+#define __S390_VDSO_SYMBOLS_H__
+
+#include <generated/vdso64-offsets.h>
+#ifdef CONFIG_COMPAT
+#include <generated/vdso32-offsets.h>
+#endif
+
+#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name))
+#ifdef CONFIG_COMPAT
+#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name))
+#else
+#define VDSO32_SYMBOL(tsk, name) (-1UL)
+#endif
+
+#endif /* __S390_VDSO_SYMBOLS_H__ */
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 53165aa7813a..91061f0279be 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -6,18 +6,6 @@
#ifndef __ASSEMBLY__
-#include <generated/vdso64-offsets.h>
-#ifdef CONFIG_COMPAT
-#include <generated/vdso32-offsets.h>
-#endif
-
-#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name))
-#ifdef CONFIG_COMPAT
-#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name))
-#else
-#define VDSO32_SYMBOL(tsk, name) (-1UL)
-#endif
-
extern struct vdso_data *vdso_data;
int vdso_getcpu_init(void);
diff --git a/arch/s390/include/asm/vdso/getrandom.h b/arch/s390/include/asm/vdso/getrandom.h
new file mode 100644
index 000000000000..36355af7160b
--- /dev/null
+++ b/arch/s390/include/asm/vdso/getrandom.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_VDSO_GETRANDOM_H
+#define __ASM_VDSO_GETRANDOM_H
+
+#ifndef __ASSEMBLY__
+
+#include <vdso/datapage.h>
+#include <asm/vdso/vsyscall.h>
+#include <asm/syscall.h>
+#include <asm/unistd.h>
+#include <asm/page.h>
+
+/**
+ * getrandom_syscall - Invoke the getrandom() syscall.
+ * @buffer: Destination buffer to fill with random bytes.
+ * @len: Size of @buffer in bytes.
+ * @flags: Zero or more GRND_* flags.
+ * Returns: The number of random bytes written to @buffer, or a negative value indicating an error.
+ */
+static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsigned int flags)
+{
+ return syscall3(__NR_getrandom, (long)buffer, (long)len, (long)flags);
+}
+
+static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void)
+{
+ /*
+ * The RNG data is in the real VVAR data page, but if a task belongs to a time namespace
+ * then VVAR_DATA_PAGE_OFFSET points to the namespace-specific VVAR page and VVAR_TIMENS_
+ * PAGE_OFFSET points to the real VVAR page.
+ */
+ if (IS_ENABLED(CONFIG_TIME_NS) && _vdso_data->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ return (void *)&_vdso_rng_data + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
+ return &_vdso_rng_data;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/s390/include/asm/vdso/vsyscall.h b/arch/s390/include/asm/vdso/vsyscall.h
index 6c67c08cefdd..3c5d5e47814e 100644
--- a/arch/s390/include/asm/vdso/vsyscall.h
+++ b/arch/s390/include/asm/vdso/vsyscall.h
@@ -2,12 +2,21 @@
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
+#define __VDSO_RND_DATA_OFFSET 768
+
#ifndef __ASSEMBLY__
#include <linux/hrtimer.h>
#include <linux/timekeeper_internal.h>
#include <vdso/datapage.h>
#include <asm/vdso.h>
+
+enum vvar_pages {
+ VVAR_DATA_PAGE_OFFSET,
+ VVAR_TIMENS_PAGE_OFFSET,
+ VVAR_NR_PAGES
+};
+
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
*/
@@ -18,6 +27,12 @@ static __always_inline struct vdso_data *__s390_get_k_vdso_data(void)
}
#define __arch_get_k_vdso_data __s390_get_k_vdso_data
+static __always_inline struct vdso_rng_data *__s390_get_k_vdso_rnd_data(void)
+{
+ return (void *)vdso_data + __VDSO_RND_DATA_OFFSET;
+}
+#define __arch_get_k_vdso_rng_data __s390_get_k_vdso_rnd_data
+
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
index 5ad76471e73f..60431d00e6bd 100644
--- a/arch/s390/include/uapi/asm/pkey.h
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -41,6 +41,10 @@
#define PKEY_KEYTYPE_ECC_P521 7
#define PKEY_KEYTYPE_ECC_ED25519 8
#define PKEY_KEYTYPE_ECC_ED448 9
+#define PKEY_KEYTYPE_AES_XTS_128 10
+#define PKEY_KEYTYPE_AES_XTS_256 11
+#define PKEY_KEYTYPE_HMAC_512 12
+#define PKEY_KEYTYPE_HMAC_1024 13
/* the newer ioctls use a pkey_key_type enum for type information */
enum pkey_key_type {
@@ -50,6 +54,7 @@ enum pkey_key_type {
PKEY_TYPE_CCA_ECC = (__u32) 0x1f,
PKEY_TYPE_EP11_AES = (__u32) 6,
PKEY_TYPE_EP11_ECC = (__u32) 7,
+ PKEY_TYPE_PROTKEY = (__u32) 8,
};
/* the newer ioctls use a pkey_key_size enum for key size information */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index e47a4be54ff8..48caae8c7e10 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -36,22 +36,23 @@ CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
-obj-y := head64.o traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o
+obj-y := head64.o traps.o time.o process.o early.o setup.o idle.o vtime.o
obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o cpufeature.o
obj-y += sysinfo.o lgr.o os_info.o ctlreg.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o kdebugfs.o alternative.o
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
-obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o
+obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o wti.o
extra-y += vmlinux.lds
obj-$(CONFIG_SYSFS) += nospec-sysfs.o
CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
+obj-$(CONFIG_SYSFS) += cpacf.o
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
+obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o hiperdispatch.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index ffa0dd2dbaac..5529248d84fb 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -112,8 +112,7 @@ int main(void)
OFFSET(__LC_MCK_NEW_PSW, lowcore, mcck_new_psw);
OFFSET(__LC_IO_NEW_PSW, lowcore, io_new_psw);
/* software defined lowcore locations 0x200 - 0xdff*/
- OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync);
- OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async);
+ OFFSET(__LC_SAVE_AREA, lowcore, save_area);
OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart);
OFFSET(__LC_PCPU, lowcore, pcpu);
OFFSET(__LC_RETURN_PSW, lowcore, return_psw);
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 1942e2a9f8db..5a86b9d1da71 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -24,11 +24,11 @@
#include <linux/tty.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
+#include <asm/vdso-symbols.h>
#include <asm/access-regs.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
#include <asm/lowcore.h>
-#include <asm/vdso.h>
#include <asm/fpu.h>
#include "compat_linux.h"
#include "compat_ptrace.h"
diff --git a/arch/s390/kernel/cpacf.c b/arch/s390/kernel/cpacf.c
new file mode 100644
index 000000000000..c8575dbc890d
--- /dev/null
+++ b/arch/s390/kernel/cpacf.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "cpacf"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <asm/cpacf.h>
+
+#define CPACF_QUERY(name, instruction) \
+static ssize_t name##_query_raw_read(struct file *fp, \
+ struct kobject *kobj, \
+ struct bin_attribute *attr, \
+ char *buf, loff_t offs, \
+ size_t count) \
+{ \
+ cpacf_mask_t mask; \
+ \
+ if (!cpacf_query(CPACF_##instruction, &mask)) \
+ return -EOPNOTSUPP; \
+ return memory_read_from_buffer(buf, count, &offs, &mask, sizeof(mask)); \
+} \
+static BIN_ATTR_RO(name##_query_raw, sizeof(cpacf_mask_t))
+
+CPACF_QUERY(km, KM);
+CPACF_QUERY(kmc, KMC);
+CPACF_QUERY(kimd, KIMD);
+CPACF_QUERY(klmd, KLMD);
+CPACF_QUERY(kmac, KMAC);
+CPACF_QUERY(pckmo, PCKMO);
+CPACF_QUERY(kmf, KMF);
+CPACF_QUERY(kmctr, KMCTR);
+CPACF_QUERY(kmo, KMO);
+CPACF_QUERY(pcc, PCC);
+CPACF_QUERY(prno, PRNO);
+CPACF_QUERY(kma, KMA);
+CPACF_QUERY(kdsa, KDSA);
+
+#define CPACF_QAI(name, instruction) \
+static ssize_t name##_query_auth_info_raw_read( \
+ struct file *fp, struct kobject *kobj, \
+ struct bin_attribute *attr, char *buf, loff_t offs, \
+ size_t count) \
+{ \
+ cpacf_qai_t qai; \
+ \
+ if (!cpacf_qai(CPACF_##instruction, &qai)) \
+ return -EOPNOTSUPP; \
+ return memory_read_from_buffer(buf, count, &offs, &qai, \
+ sizeof(qai)); \
+} \
+static BIN_ATTR_RO(name##_query_auth_info_raw, sizeof(cpacf_qai_t))
+
+CPACF_QAI(km, KM);
+CPACF_QAI(kmc, KMC);
+CPACF_QAI(kimd, KIMD);
+CPACF_QAI(klmd, KLMD);
+CPACF_QAI(kmac, KMAC);
+CPACF_QAI(pckmo, PCKMO);
+CPACF_QAI(kmf, KMF);
+CPACF_QAI(kmctr, KMCTR);
+CPACF_QAI(kmo, KMO);
+CPACF_QAI(pcc, PCC);
+CPACF_QAI(prno, PRNO);
+CPACF_QAI(kma, KMA);
+CPACF_QAI(kdsa, KDSA);
+
+static struct bin_attribute *cpacf_attrs[] = {
+ &bin_attr_km_query_raw,
+ &bin_attr_kmc_query_raw,
+ &bin_attr_kimd_query_raw,
+ &bin_attr_klmd_query_raw,
+ &bin_attr_kmac_query_raw,
+ &bin_attr_pckmo_query_raw,
+ &bin_attr_kmf_query_raw,
+ &bin_attr_kmctr_query_raw,
+ &bin_attr_kmo_query_raw,
+ &bin_attr_pcc_query_raw,
+ &bin_attr_prno_query_raw,
+ &bin_attr_kma_query_raw,
+ &bin_attr_kdsa_query_raw,
+ &bin_attr_km_query_auth_info_raw,
+ &bin_attr_kmc_query_auth_info_raw,
+ &bin_attr_kimd_query_auth_info_raw,
+ &bin_attr_klmd_query_auth_info_raw,
+ &bin_attr_kmac_query_auth_info_raw,
+ &bin_attr_pckmo_query_auth_info_raw,
+ &bin_attr_kmf_query_auth_info_raw,
+ &bin_attr_kmctr_query_auth_info_raw,
+ &bin_attr_kmo_query_auth_info_raw,
+ &bin_attr_pcc_query_auth_info_raw,
+ &bin_attr_prno_query_auth_info_raw,
+ &bin_attr_kma_query_auth_info_raw,
+ &bin_attr_kdsa_query_auth_info_raw,
+ NULL,
+};
+
+static const struct attribute_group cpacf_attr_grp = {
+ .name = "cpacf",
+ .bin_attrs = cpacf_attrs,
+};
+
+static int __init cpacf_init(void)
+{
+ struct device *cpu_root;
+ int rc = 0;
+
+ cpu_root = bus_get_dev_root(&cpu_subsys);
+ if (cpu_root) {
+ rc = sysfs_create_group(&cpu_root->kobj, &cpacf_attr_grp);
+ put_device(cpu_root);
+ }
+ return rc;
+}
+device_initcall(cpacf_init);
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index ac7b8c8e3133..007e1795670e 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -52,6 +52,7 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
[DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" },
[DIAG_STAT_X318] = { .code = 0x318, .name = "CP Name and Version Codes" },
[DIAG_STAT_X320] = { .code = 0x320, .name = "Certificate Store" },
+ [DIAG_STAT_X49C] = { .code = 0x49c, .name = "Warning-Track Interruption" },
[DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
};
@@ -303,3 +304,19 @@ int diag26c(void *req, void *resp, enum diag26c_sc subcode)
return diag_amode31_ops.diag26c(virt_to_phys(req), virt_to_phys(resp), subcode);
}
EXPORT_SYMBOL(diag26c);
+
+int diag49c(unsigned long subcode)
+{
+ int rc;
+
+ diag_stat_inc(DIAG_STAT_X49C);
+ asm volatile(
+ " diag %[subcode],0,0x49c\n"
+ " ipm %[rc]\n"
+ " srl %[rc],28\n"
+ : [rc] "=d" (rc)
+ : [subcode] "d" (subcode)
+ : "cc");
+ return rc;
+}
+EXPORT_SYMBOL(diag49c);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 89dc826a8d2e..94eb8168ea44 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -122,6 +122,7 @@ enum {
U8_32, /* 8 bit unsigned value starting at 32 */
U12_16, /* 12 bit unsigned value starting at 16 */
U16_16, /* 16 bit unsigned value starting at 16 */
+ U16_20, /* 16 bit unsigned value starting at 20 */
U16_32, /* 16 bit unsigned value starting at 32 */
U32_16, /* 32 bit unsigned value starting at 16 */
VX_12, /* Vector index register starting at position 12 */
@@ -184,6 +185,7 @@ static const struct s390_operand operands[] = {
[U8_32] = { 8, 32, 0 },
[U12_16] = { 12, 16, 0 },
[U16_16] = { 16, 16, 0 },
+ [U16_20] = { 16, 20, 0 },
[U16_32] = { 16, 32, 0 },
[U32_16] = { 32, 16, 0 },
[VX_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR },
@@ -257,7 +259,6 @@ static const unsigned char formats[][6] = {
[INSTR_RSL_R0RD] = { D_20, L4_8, B_16, 0, 0, 0 },
[INSTR_RSY_AARD] = { A_8, A_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_CCRD] = { C_8, C_12, D20_20, B_16, 0, 0 },
- [INSTR_RSY_RDRU] = { R_8, D20_20, B_16, U4_12, 0, 0 },
[INSTR_RSY_RRRD] = { R_8, R_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_RURD] = { R_8, U4_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_RURD2] = { R_8, D20_20, B_16, U4_12, 0, 0 },
@@ -300,14 +301,17 @@ static const unsigned char formats[][6] = {
[INSTR_VRI_V0UU2] = { V_8, U16_16, U4_32, 0, 0, 0 },
[INSTR_VRI_V0UUU] = { V_8, U8_16, U8_24, U4_32, 0, 0 },
[INSTR_VRI_VR0UU] = { V_8, R_12, U8_28, U4_24, 0, 0 },
+ [INSTR_VRI_VV0UU] = { V_8, V_12, U8_28, U4_24, 0, 0 },
[INSTR_VRI_VVUU] = { V_8, V_12, U16_16, U4_32, 0, 0 },
[INSTR_VRI_VVUUU] = { V_8, V_12, U12_16, U4_32, U4_28, 0 },
[INSTR_VRI_VVUUU2] = { V_8, V_12, U8_28, U8_16, U4_24, 0 },
[INSTR_VRI_VVV0U] = { V_8, V_12, V_16, U8_24, 0, 0 },
[INSTR_VRI_VVV0UU] = { V_8, V_12, V_16, U8_24, U4_32, 0 },
[INSTR_VRI_VVV0UU2] = { V_8, V_12, V_16, U8_28, U4_24, 0 },
- [INSTR_VRR_0V] = { V_12, 0, 0, 0, 0, 0 },
+ [INSTR_VRI_VVV0UV] = { V_8, V_12, V_16, V_32, U8_24, 0 },
+ [INSTR_VRR_0V0U] = { V_12, U16_20, 0, 0, 0, 0 },
[INSTR_VRR_0VV0U] = { V_12, V_16, U4_24, 0, 0, 0 },
+ [INSTR_VRR_0VVU] = { V_12, V_16, U16_20, 0, 0, 0 },
[INSTR_VRR_RV0UU] = { R_8, V_12, U4_24, U4_28, 0, 0 },
[INSTR_VRR_VRR] = { V_8, R_12, R_16, 0, 0, 0 },
[INSTR_VRR_VV] = { V_8, V_12, 0, 0, 0, 0 },
@@ -455,21 +459,21 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
if (separator)
ptr += sprintf(ptr, "%c", separator);
if (operand->flags & OPERAND_GPR)
- ptr += sprintf(ptr, "%%r%i", value);
+ ptr += sprintf(ptr, "%%r%u", value);
else if (operand->flags & OPERAND_FPR)
- ptr += sprintf(ptr, "%%f%i", value);
+ ptr += sprintf(ptr, "%%f%u", value);
else if (operand->flags & OPERAND_AR)
- ptr += sprintf(ptr, "%%a%i", value);
+ ptr += sprintf(ptr, "%%a%u", value);
else if (operand->flags & OPERAND_CR)
- ptr += sprintf(ptr, "%%c%i", value);
+ ptr += sprintf(ptr, "%%c%u", value);
else if (operand->flags & OPERAND_VR)
- ptr += sprintf(ptr, "%%v%i", value);
+ ptr += sprintf(ptr, "%%v%u", value);
else if (operand->flags & OPERAND_PCREL) {
void *pcrel = (void *)((int)value + addr);
ptr += sprintf(ptr, "%px", pcrel);
} else if (operand->flags & OPERAND_SIGNED)
- ptr += sprintf(ptr, "%i", value);
+ ptr += sprintf(ptr, "%i", (int)value);
else
ptr += sprintf(ptr, "%u", value);
if (operand->flags & OPERAND_DISP)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 14d324865e33..62f8f5a750a3 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -7,6 +7,7 @@
#define KMSG_COMPONENT "setup"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/sched/debug.h>
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -175,20 +176,45 @@ static __init void setup_topology(void)
topology_max_mnest = max_mnest;
}
-void __do_early_pgm_check(struct pt_regs *regs)
+void __init __do_early_pgm_check(struct pt_regs *regs)
{
- if (!fixup_exception(regs))
- disabled_wait();
+ struct lowcore *lc = get_lowcore();
+ unsigned long ip;
+
+ regs->int_code = lc->pgm_int_code;
+ regs->int_parm_long = lc->trans_exc_code;
+ ip = __rewind_psw(regs->psw, regs->int_code >> 16);
+
+ /* Monitor Event? Might be a warning */
+ if ((regs->int_code & PGM_INT_CODE_MASK) == 0x40) {
+ if (report_bug(ip, regs) == BUG_TRAP_TYPE_WARN)
+ return;
+ }
+ if (fixup_exception(regs))
+ return;
+ /*
+ * Unhandled exception - system cannot continue but try to get some
+ * helpful messages to the console. Use early_printk() to print
+ * some basic information in case it is too early for printk().
+ */
+ register_early_console();
+ early_printk("PANIC: early exception %04x PSW: %016lx %016lx\n",
+ regs->int_code & 0xffff, regs->psw.mask, regs->psw.addr);
+ show_regs(regs);
+ disabled_wait();
}
static noinline __init void setup_lowcore_early(void)
{
+ struct lowcore *lc = get_lowcore();
psw_t psw;
psw.addr = (unsigned long)early_pgm_check_handler;
psw.mask = PSW_KERNEL_BITS;
- get_lowcore()->program_new_psw = psw;
- get_lowcore()->preempt_count = INIT_PREEMPT_COUNT;
+ lc->program_new_psw = psw;
+ lc->preempt_count = INIT_PREEMPT_COUNT;
+ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
+ lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
}
static __init void detect_diag9c(void)
@@ -242,6 +268,8 @@ static __init void detect_machine_facilities(void)
}
if (test_facility(194))
get_lowcore()->machine_flags |= MACHINE_FLAG_RDP;
+ if (test_facility(85))
+ get_lowcore()->machine_flags |= MACHINE_FLAG_SEQ_INSN;
}
static inline void save_vector_registers(void)
diff --git a/arch/s390/kernel/early_printk.c b/arch/s390/kernel/early_printk.c
index d9d53f44008a..cefe020a3be3 100644
--- a/arch/s390/kernel/early_printk.c
+++ b/arch/s390/kernel/early_printk.c
@@ -6,6 +6,7 @@
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <asm/setup.h>
#include <asm/sclp.h>
static void sclp_early_write(struct console *con, const char *s, unsigned int len)
@@ -20,6 +21,16 @@ static struct console sclp_early_console = {
.index = -1,
};
+void __init register_early_console(void)
+{
+ if (early_console)
+ return;
+ if (!sclp.has_linemode && !sclp.has_vt220)
+ return;
+ early_console = &sclp_early_console;
+ register_console(early_console);
+}
+
static int __init setup_early_printk(char *buf)
{
if (early_console)
@@ -27,10 +38,7 @@ static int __init setup_early_printk(char *buf)
/* Accept only "earlyprintk" and "earlyprintk=sclp" */
if (buf && !str_has_prefix(buf, "sclp"))
return 0;
- if (!sclp.has_linemode && !sclp.has_vt220)
- return 0;
- early_console = &sclp_early_console;
- register_console(early_console);
+ register_early_console();
return 0;
}
early_param("earlyprintk", setup_early_printk);
diff --git a/arch/s390/kernel/earlypgm.S b/arch/s390/kernel/earlypgm.S
deleted file mode 100644
index c634871f0d90..000000000000
--- a/arch/s390/kernel/earlypgm.S
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright IBM Corp. 2006, 2007
- * Author(s): Michael Holzheu <holzheu@de.ibm.com>
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-
-SYM_CODE_START(early_pgm_check_handler)
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
- lgr %r2,%r11
- brasl %r14,__do_early_pgm_check
- mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
- lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
- lpswe __LC_RETURN_PSW
-SYM_CODE_END(early_pgm_check_handler)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 749410cfdbc0..d6d5317f768e 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -42,7 +42,7 @@ _LPP_OFFSET = __LC_LPP
.macro LPSWEY address, lpswe
ALTERNATIVE_2 "b \lpswe;nopr", \
- ".insn siy,0xeb0000000071,\address,0", ALT_FACILITY_EARLY(193), \
+ ".insn siy,0xeb0000000071,\address,0", ALT_FACILITY(193), \
__stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0), \
ALT_LOWCORE
.endm
@@ -264,7 +264,7 @@ EXPORT_SYMBOL(sie_exit)
*/
SYM_CODE_START(system_call)
- STMG_LC %r8,%r15,__LC_SAVE_AREA_SYNC
+ STMG_LC %r8,%r15,__LC_SAVE_AREA
GET_LC %r13
stpt __LC_SYS_ENTER_TIMER(%r13)
BPOFF
@@ -287,7 +287,7 @@ SYM_CODE_START(system_call)
xgr %r10,%r10
xgr %r11,%r11
la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
- mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC(%r13)
+ mvc __PT_R8(64,%r2),__LC_SAVE_AREA(%r13)
MBEAR %r2,%r13
lgr %r3,%r14
brasl %r14,__do_syscall
@@ -323,7 +323,7 @@ SYM_CODE_END(ret_from_fork)
*/
SYM_CODE_START(pgm_check_handler)
- STMG_LC %r8,%r15,__LC_SAVE_AREA_SYNC
+ STMG_LC %r8,%r15,__LC_SAVE_AREA
GET_LC %r13
stpt __LC_SYS_ENTER_TIMER(%r13)
BPOFF
@@ -338,16 +338,16 @@ SYM_CODE_START(pgm_check_handler)
jnz 2f # -> enabled, can't be a double fault
tm __LC_PGM_ILC+3(%r13),0x80 # check for per exception
jnz .Lpgm_svcper # -> single stepped svc
-2: CHECK_STACK __LC_SAVE_AREA_SYNC,%r13
+2: CHECK_STACK __LC_SAVE_AREA,%r13
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
# CHECK_VMAP_STACK branches to stack_overflow or 4f
- CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,%r13,4f
+ CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f
3: lg %r15,__LC_KERNEL_STACK(%r13)
4: la %r11,STACK_FRAME_OVERHEAD(%r15)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC(%r13)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13)
stctg %c1,%c1,__PT_CR1(%r11)
#if IS_ENABLED(CONFIG_KVM)
@@ -398,7 +398,7 @@ SYM_CODE_END(pgm_check_handler)
*/
.macro INT_HANDLER name,lc_old_psw,handler
SYM_CODE_START(\name)
- STMG_LC %r8,%r15,__LC_SAVE_AREA_ASYNC
+ STMG_LC %r8,%r15,__LC_SAVE_AREA
GET_LC %r13
stckf __LC_INT_CLOCK(%r13)
stpt __LC_SYS_ENTER_TIMER(%r13)
@@ -414,7 +414,7 @@ SYM_CODE_START(\name)
BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
SIEEXIT __SF_SIE_CONTROL(%r15),%r13
#endif
-0: CHECK_STACK __LC_SAVE_AREA_ASYNC,%r13
+0: CHECK_STACK __LC_SAVE_AREA,%r13
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13)
@@ -432,7 +432,7 @@ SYM_CODE_START(\name)
xgr %r7,%r7
xgr %r10,%r10
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC(%r13)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
MBEAR %r11,%r13
stmg %r8,%r9,__PT_PSW(%r11)
lgr %r2,%r11 # pass pointer to pt_regs
@@ -599,6 +599,24 @@ SYM_CODE_START(restart_int_handler)
3: j 3b
SYM_CODE_END(restart_int_handler)
+ __INIT
+SYM_CODE_START(early_pgm_check_handler)
+ STMG_LC %r8,%r15,__LC_SAVE_AREA
+ GET_LC %r13
+ aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
+ lgr %r2,%r11
+ brasl %r14,__do_early_pgm_check
+ mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
+ lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
+ LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
+SYM_CODE_END(early_pgm_check_handler)
+ __FINIT
+
.section .kprobes.text, "ax"
#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 0bd6adc40a34..0b6e62d1d8b8 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -50,10 +50,6 @@ struct ftrace_insn {
s32 disp;
} __packed;
-#ifdef CONFIG_MODULES
-static char *ftrace_plt;
-#endif /* CONFIG_MODULES */
-
static const char *ftrace_shared_hotpatch_trampoline(const char **end)
{
const char *tstart, *tend;
@@ -73,19 +69,20 @@ static const char *ftrace_shared_hotpatch_trampoline(const char **end)
bool ftrace_need_init_nop(void)
{
- return true;
+ return !MACHINE_HAS_SEQ_INSN;
}
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
__ftrace_hotpatch_trampolines_start;
- static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
+ static const struct ftrace_insn orig = { .opc = 0xc004, .disp = 0 };
static struct ftrace_hotpatch_trampoline *trampoline;
struct ftrace_hotpatch_trampoline **next_trampoline;
struct ftrace_hotpatch_trampoline *trampolines_end;
struct ftrace_hotpatch_trampoline tmp;
struct ftrace_insn *insn;
+ struct ftrace_insn old;
const char *shared;
s32 disp;
@@ -99,7 +96,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
if (mod) {
next_trampoline = &mod->arch.next_trampoline;
trampolines_end = mod->arch.trampolines_end;
- shared = ftrace_plt;
}
#endif
@@ -107,8 +103,10 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
return -ENOMEM;
trampoline = (*next_trampoline)++;
+ if (copy_from_kernel_nofault(&old, (void *)rec->ip, sizeof(old)))
+ return -EFAULT;
/* Check for the compiler-generated fentry nop (brcl 0, .). */
- if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
+ if (WARN_ON_ONCE(memcmp(&orig, &old, sizeof(old))))
return -EINVAL;
/* Generate the trampoline. */
@@ -144,8 +142,35 @@ static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrac
return trampoline;
}
-int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
- unsigned long addr)
+static inline struct ftrace_insn
+ftrace_generate_branch_insn(unsigned long ip, unsigned long target)
+{
+ /* brasl r0,target or brcl 0,0 */
+ return (struct ftrace_insn){ .opc = target ? 0xc005 : 0xc004,
+ .disp = target ? (target - ip) / 2 : 0 };
+}
+
+static int ftrace_patch_branch_insn(unsigned long ip, unsigned long old_target,
+ unsigned long target)
+{
+ struct ftrace_insn orig = ftrace_generate_branch_insn(ip, old_target);
+ struct ftrace_insn new = ftrace_generate_branch_insn(ip, target);
+ struct ftrace_insn old;
+
+ if (!IS_ALIGNED(ip, 8))
+ return -EINVAL;
+ if (copy_from_kernel_nofault(&old, (void *)ip, sizeof(old)))
+ return -EFAULT;
+ /* Verify that the to be replaced code matches what we expect. */
+ if (memcmp(&orig, &old, sizeof(old)))
+ return -EINVAL;
+ s390_kernel_write((void *)ip, &new, sizeof(new));
+ return 0;
+}
+
+static int ftrace_modify_trampoline_call(struct dyn_ftrace *rec,
+ unsigned long old_addr,
+ unsigned long addr)
{
struct ftrace_hotpatch_trampoline *trampoline;
u64 old;
@@ -161,6 +186,15 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return 0;
}
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ if (MACHINE_HAS_SEQ_INSN)
+ return ftrace_patch_branch_insn(rec->ip, old_addr, addr);
+ else
+ return ftrace_modify_trampoline_call(rec, old_addr, addr);
+}
+
static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
{
u16 old;
@@ -179,11 +213,14 @@ static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
- /* Expect brcl 0xf,... */
- return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
+ /* Expect brcl 0xf,... for the !MACHINE_HAS_SEQ_INSN case */
+ if (MACHINE_HAS_SEQ_INSN)
+ return ftrace_patch_branch_insn(rec->ip, addr, 0);
+ else
+ return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
}
-int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+static int ftrace_make_trampoline_call(struct dyn_ftrace *rec, unsigned long addr)
{
struct ftrace_hotpatch_trampoline *trampoline;
@@ -195,6 +232,14 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
}
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ if (MACHINE_HAS_SEQ_INSN)
+ return ftrace_patch_branch_insn(rec->ip, 0, addr);
+ else
+ return ftrace_make_trampoline_call(rec, addr);
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
ftrace_func = func;
@@ -215,25 +260,6 @@ void ftrace_arch_code_modify_post_process(void)
text_poke_sync_lock();
}
-#ifdef CONFIG_MODULES
-
-static int __init ftrace_plt_init(void)
-{
- const char *start, *end;
-
- ftrace_plt = execmem_alloc(EXECMEM_FTRACE, PAGE_SIZE);
- if (!ftrace_plt)
- panic("cannot allocate ftrace plt\n");
-
- start = ftrace_shared_hotpatch_trampoline(&end);
- memcpy(ftrace_plt, start, end - start);
- set_memory_rox((unsigned long)ftrace_plt, 1);
- return 0;
-}
-device_initcall(ftrace_plt_init);
-
-#endif /* CONFIG_MODULES */
-
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addresses
@@ -264,26 +290,14 @@ NOKPROBE_SYMBOL(prepare_ftrace_return);
*/
int ftrace_enable_ftrace_graph_caller(void)
{
- int rc;
-
/* Expect brc 0xf,... */
- rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
- if (rc)
- return rc;
- text_poke_sync_lock();
- return 0;
+ return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
}
int ftrace_disable_ftrace_graph_caller(void)
{
- int rc;
-
/* Expect brc 0x0,... */
- rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
- if (rc)
- return rc;
- text_poke_sync_lock();
- return 0;
+ return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/ftrace.h b/arch/s390/kernel/ftrace.h
index 7f75a9616406..23337065f402 100644
--- a/arch/s390/kernel/ftrace.h
+++ b/arch/s390/kernel/ftrace.h
@@ -18,7 +18,5 @@ extern const char ftrace_shared_hotpatch_trampoline_br[];
extern const char ftrace_shared_hotpatch_trampoline_br_end[];
extern const char ftrace_shared_hotpatch_trampoline_exrl[];
extern const char ftrace_shared_hotpatch_trampoline_exrl_end[];
-extern const char ftrace_plt_template[];
-extern const char ftrace_plt_template_end[];
#endif /* _FTRACE_H */
diff --git a/arch/s390/kernel/hiperdispatch.c b/arch/s390/kernel/hiperdispatch.c
new file mode 100644
index 000000000000..2a99a216ab62
--- /dev/null
+++ b/arch/s390/kernel/hiperdispatch.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "hd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+/*
+ * Hiperdispatch:
+ * Dynamically calculates the optimum number of high capacity COREs
+ * by considering the state the system is in. When hiperdispatch decides
+ * that a capacity update is necessary, it schedules a topology update.
+ * During topology updates the CPU capacities are always re-adjusted.
+ *
+ * There is two places where CPU capacities are being accessed within
+ * hiperdispatch.
+ * -> hiperdispatch's reoccuring work function reads CPU capacities to
+ * determine high capacity CPU count.
+ * -> during a topology update hiperdispatch's adjustment function
+ * updates CPU capacities.
+ * These two can run on different CPUs in parallel which can cause
+ * hiperdispatch to make wrong decisions. This can potentially cause
+ * some overhead by leading to extra rebuild_sched_domains() calls
+ * for correction. Access to capacities within hiperdispatch has to be
+ * serialized to prevent the overhead.
+ *
+ * Hiperdispatch decision making revolves around steal time.
+ * HD_STEAL_THRESHOLD value is taken as reference. Whenever steal time
+ * crosses the threshold value hiperdispatch falls back to giving high
+ * capacities to entitled CPUs. When steal time drops below the
+ * threshold boundary, hiperdispatch utilizes all CPUs by giving all
+ * of them high capacity.
+ *
+ * The theory behind HD_STEAL_THRESHOLD is related to the SMP thread
+ * performance. Comparing the throughput of;
+ * - single CORE, with N threads, running N tasks
+ * - N separate COREs running N tasks,
+ * using individual COREs for individual tasks yield better
+ * performance. This performance difference is roughly ~30% (can change
+ * between machine generations)
+ *
+ * Hiperdispatch tries to hint scheduler to use individual COREs for
+ * each task, as long as steal time on those COREs are less than 30%,
+ * therefore delaying the throughput loss caused by using SMP threads.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/kernel_stat.h>
+#include <linux/kstrtox.h>
+#include <linux/ktime.h>
+#include <linux/sysctl.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <asm/hiperdispatch.h>
+#include <asm/setup.h>
+#include <asm/smp.h>
+#include <asm/topology.h>
+
+#define CREATE_TRACE_POINTS
+#include <asm/trace/hiperdispatch.h>
+
+#define HD_DELAY_FACTOR (4)
+#define HD_DELAY_INTERVAL (HZ / 4)
+#define HD_STEAL_THRESHOLD 30
+#define HD_STEAL_AVG_WEIGHT 16
+
+static cpumask_t hd_vl_coremask; /* Mask containing all vertical low COREs */
+static cpumask_t hd_vmvl_cpumask; /* Mask containing vertical medium and low CPUs */
+static int hd_high_capacity_cores; /* Current CORE count with high capacity */
+static int hd_entitled_cores; /* Total vertical high and medium CORE count */
+static int hd_online_cores; /* Current online CORE count */
+
+static unsigned long hd_previous_steal; /* Previous iteration's CPU steal timer total */
+static unsigned long hd_high_time; /* Total time spent while all cpus have high capacity */
+static unsigned long hd_low_time; /* Total time spent while vl cpus have low capacity */
+static atomic64_t hd_adjustments; /* Total occurrence count of hiperdispatch adjustments */
+
+static unsigned int hd_steal_threshold = HD_STEAL_THRESHOLD;
+static unsigned int hd_delay_factor = HD_DELAY_FACTOR;
+static int hd_enabled;
+
+static void hd_capacity_work_fn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(hd_capacity_work, hd_capacity_work_fn);
+
+static int hd_set_hiperdispatch_mode(int enable)
+{
+ if (!MACHINE_HAS_TOPOLOGY)
+ enable = 0;
+ if (hd_enabled == enable)
+ return 0;
+ hd_enabled = enable;
+ return 1;
+}
+
+void hd_reset_state(void)
+{
+ cpumask_clear(&hd_vl_coremask);
+ cpumask_clear(&hd_vmvl_cpumask);
+ hd_entitled_cores = 0;
+ hd_online_cores = 0;
+}
+
+void hd_add_core(int cpu)
+{
+ const struct cpumask *siblings;
+ int polarization;
+
+ hd_online_cores++;
+ polarization = smp_cpu_get_polarization(cpu);
+ siblings = topology_sibling_cpumask(cpu);
+ switch (polarization) {
+ case POLARIZATION_VH:
+ hd_entitled_cores++;
+ break;
+ case POLARIZATION_VM:
+ hd_entitled_cores++;
+ cpumask_or(&hd_vmvl_cpumask, &hd_vmvl_cpumask, siblings);
+ break;
+ case POLARIZATION_VL:
+ cpumask_set_cpu(cpu, &hd_vl_coremask);
+ cpumask_or(&hd_vmvl_cpumask, &hd_vmvl_cpumask, siblings);
+ break;
+ }
+}
+
+/* Serialize update and read operations of debug counters. */
+static DEFINE_MUTEX(hd_counter_mutex);
+
+static void hd_update_times(void)
+{
+ static ktime_t prev;
+ ktime_t now;
+
+ /*
+ * Check if hiperdispatch is active, if not set the prev to 0.
+ * This way it is possible to differentiate the first update iteration after
+ * enabling hiperdispatch.
+ */
+ if (hd_entitled_cores == 0 || hd_enabled == 0) {
+ prev = ktime_set(0, 0);
+ return;
+ }
+ now = ktime_get();
+ if (ktime_after(prev, 0)) {
+ if (hd_high_capacity_cores == hd_online_cores)
+ hd_high_time += ktime_ms_delta(now, prev);
+ else
+ hd_low_time += ktime_ms_delta(now, prev);
+ }
+ prev = now;
+}
+
+static void hd_update_capacities(void)
+{
+ int cpu, upscaling_cores;
+ unsigned long capacity;
+
+ upscaling_cores = hd_high_capacity_cores - hd_entitled_cores;
+ capacity = upscaling_cores > 0 ? CPU_CAPACITY_HIGH : CPU_CAPACITY_LOW;
+ hd_high_capacity_cores = hd_entitled_cores;
+ for_each_cpu(cpu, &hd_vl_coremask) {
+ smp_set_core_capacity(cpu, capacity);
+ if (capacity != CPU_CAPACITY_HIGH)
+ continue;
+ hd_high_capacity_cores++;
+ upscaling_cores--;
+ if (upscaling_cores == 0)
+ capacity = CPU_CAPACITY_LOW;
+ }
+}
+
+void hd_disable_hiperdispatch(void)
+{
+ cancel_delayed_work_sync(&hd_capacity_work);
+ hd_high_capacity_cores = hd_online_cores;
+ hd_previous_steal = 0;
+}
+
+int hd_enable_hiperdispatch(void)
+{
+ mutex_lock(&hd_counter_mutex);
+ hd_update_times();
+ mutex_unlock(&hd_counter_mutex);
+ if (hd_enabled == 0)
+ return 0;
+ if (hd_entitled_cores == 0)
+ return 0;
+ if (hd_online_cores <= hd_entitled_cores)
+ return 0;
+ mod_delayed_work(system_wq, &hd_capacity_work, HD_DELAY_INTERVAL * hd_delay_factor);
+ hd_update_capacities();
+ return 1;
+}
+
+static unsigned long hd_steal_avg(unsigned long new)
+{
+ static unsigned long steal;
+
+ steal = (steal * (HD_STEAL_AVG_WEIGHT - 1) + new) / HD_STEAL_AVG_WEIGHT;
+ return steal;
+}
+
+static unsigned long hd_calculate_steal_percentage(void)
+{
+ unsigned long time_delta, steal_delta, steal, percentage;
+ static ktime_t prev;
+ int cpus, cpu;
+ ktime_t now;
+
+ cpus = 0;
+ steal = 0;
+ percentage = 0;
+ for_each_cpu(cpu, &hd_vmvl_cpumask) {
+ steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ cpus++;
+ }
+ /*
+ * If there is no vertical medium and low CPUs steal time
+ * is 0 as vertical high CPUs shouldn't experience steal time.
+ */
+ if (cpus == 0)
+ return percentage;
+ now = ktime_get();
+ time_delta = ktime_to_ns(ktime_sub(now, prev));
+ if (steal > hd_previous_steal && hd_previous_steal != 0) {
+ steal_delta = (steal - hd_previous_steal) * 100 / time_delta;
+ percentage = steal_delta / cpus;
+ }
+ hd_previous_steal = steal;
+ prev = now;
+ return percentage;
+}
+
+static void hd_capacity_work_fn(struct work_struct *work)
+{
+ unsigned long steal_percentage, new_cores;
+
+ mutex_lock(&smp_cpu_state_mutex);
+ /*
+ * If online cores are less or equal to entitled cores hiperdispatch
+ * does not need to make any adjustments, call a topology update to
+ * disable hiperdispatch.
+ * Normally this check is handled on topology update, but during cpu
+ * unhotplug, topology and cpu mask updates are done in reverse
+ * order, causing hd_enable_hiperdispatch() to get stale data.
+ */
+ if (hd_online_cores <= hd_entitled_cores) {
+ topology_schedule_update();
+ mutex_unlock(&smp_cpu_state_mutex);
+ return;
+ }
+ steal_percentage = hd_steal_avg(hd_calculate_steal_percentage());
+ if (steal_percentage < hd_steal_threshold)
+ new_cores = hd_online_cores;
+ else
+ new_cores = hd_entitled_cores;
+ if (hd_high_capacity_cores != new_cores) {
+ trace_s390_hd_rebuild_domains(hd_high_capacity_cores, new_cores);
+ hd_high_capacity_cores = new_cores;
+ atomic64_inc(&hd_adjustments);
+ topology_schedule_update();
+ }
+ trace_s390_hd_work_fn(steal_percentage, hd_entitled_cores, hd_high_capacity_cores);
+ mutex_unlock(&smp_cpu_state_mutex);
+ schedule_delayed_work(&hd_capacity_work, HD_DELAY_INTERVAL);
+}
+
+static int hiperdispatch_ctl_handler(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int hiperdispatch;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &hiperdispatch,
+ .maxlen = sizeof(int),
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ };
+
+ hiperdispatch = hd_enabled;
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
+ mutex_lock(&smp_cpu_state_mutex);
+ if (hd_set_hiperdispatch_mode(hiperdispatch))
+ topology_schedule_update();
+ mutex_unlock(&smp_cpu_state_mutex);
+ return 0;
+}
+
+static struct ctl_table hiperdispatch_ctl_table[] = {
+ {
+ .procname = "hiperdispatch",
+ .mode = 0644,
+ .proc_handler = hiperdispatch_ctl_handler,
+ },
+};
+
+static ssize_t hd_steal_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%u\n", hd_steal_threshold);
+}
+
+static ssize_t hd_steal_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned int val;
+ int rc;
+
+ rc = kstrtouint(buf, 0, &val);
+ if (rc)
+ return rc;
+ if (val > 100)
+ return -ERANGE;
+ hd_steal_threshold = val;
+ return count;
+}
+
+static DEVICE_ATTR_RW(hd_steal_threshold);
+
+static ssize_t hd_delay_factor_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%u\n", hd_delay_factor);
+}
+
+static ssize_t hd_delay_factor_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned int val;
+ int rc;
+
+ rc = kstrtouint(buf, 0, &val);
+ if (rc)
+ return rc;
+ if (!val)
+ return -ERANGE;
+ hd_delay_factor = val;
+ return count;
+}
+
+static DEVICE_ATTR_RW(hd_delay_factor);
+
+static struct attribute *hd_attrs[] = {
+ &dev_attr_hd_steal_threshold.attr,
+ &dev_attr_hd_delay_factor.attr,
+ NULL,
+};
+
+static const struct attribute_group hd_attr_group = {
+ .name = "hiperdispatch",
+ .attrs = hd_attrs,
+};
+
+static int hd_greedy_time_get(void *unused, u64 *val)
+{
+ mutex_lock(&hd_counter_mutex);
+ hd_update_times();
+ *val = hd_high_time;
+ mutex_unlock(&hd_counter_mutex);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hd_greedy_time_fops, hd_greedy_time_get, NULL, "%llu\n");
+
+static int hd_conservative_time_get(void *unused, u64 *val)
+{
+ mutex_lock(&hd_counter_mutex);
+ hd_update_times();
+ *val = hd_low_time;
+ mutex_unlock(&hd_counter_mutex);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hd_conservative_time_fops, hd_conservative_time_get, NULL, "%llu\n");
+
+static int hd_adjustment_count_get(void *unused, u64 *val)
+{
+ *val = atomic64_read(&hd_adjustments);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hd_adjustments_fops, hd_adjustment_count_get, NULL, "%llu\n");
+
+static void __init hd_create_debugfs_counters(void)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("hiperdispatch", arch_debugfs_dir);
+ debugfs_create_file("conservative_time_ms", 0400, dir, NULL, &hd_conservative_time_fops);
+ debugfs_create_file("greedy_time_ms", 0400, dir, NULL, &hd_greedy_time_fops);
+ debugfs_create_file("adjustment_count", 0400, dir, NULL, &hd_adjustments_fops);
+}
+
+static void __init hd_create_attributes(void)
+{
+ struct device *dev;
+
+ dev = bus_get_dev_root(&cpu_subsys);
+ if (!dev)
+ return;
+ if (sysfs_create_group(&dev->kobj, &hd_attr_group))
+ pr_warn("Unable to create hiperdispatch attribute group\n");
+ put_device(dev);
+}
+
+static int __init hd_init(void)
+{
+ if (IS_ENABLED(CONFIG_HIPERDISPATCH_ON)) {
+ hd_set_hiperdispatch_mode(1);
+ topology_schedule_update();
+ }
+ if (!register_sysctl("s390", hiperdispatch_ctl_table))
+ pr_warn("Failed to register s390.hiperdispatch sysctl attribute\n");
+ hd_create_debugfs_counters();
+ hd_create_attributes();
+ return 0;
+}
+late_initcall(hd_init);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1af5a08d72ab..2639a3d12736 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -76,6 +76,7 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
{.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
{.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
+ {.irq = IRQEXT_WTI, .name = "WTI", .desc = "[EXT] Warning Track"},
{.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
{.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
{.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"},
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 05c83505e979..6295faf0987d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -21,6 +21,7 @@
#include <linux/hardirq.h>
#include <linux/ftrace.h>
#include <linux/execmem.h>
+#include <asm/text-patching.h>
#include <asm/set_memory.h>
#include <asm/sections.h>
#include <asm/dis.h>
@@ -152,7 +153,12 @@ void arch_arm_kprobe(struct kprobe *p)
{
struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
- stop_machine_cpuslocked(swap_instruction, &args, NULL);
+ if (MACHINE_HAS_SEQ_INSN) {
+ swap_instruction(&args);
+ text_poke_sync();
+ } else {
+ stop_machine_cpuslocked(swap_instruction, &args, NULL);
+ }
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
@@ -160,7 +166,12 @@ void arch_disarm_kprobe(struct kprobe *p)
{
struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
- stop_machine_cpuslocked(swap_instruction, &args, NULL);
+ if (MACHINE_HAS_SEQ_INSN) {
+ swap_instruction(&args);
+ text_poke_sync();
+ } else {
+ stop_machine_cpuslocked(swap_instruction, &args, NULL);
+ }
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index ae4d4fd9afcd..7e267ef63a7f 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -9,6 +9,7 @@
#include <asm/ftrace.h>
#include <asm/nospec-insn.h>
#include <asm/ptrace.h>
+#include <asm/march.h>
#define STACK_FRAME_SIZE_PTREGS (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
@@ -88,7 +89,7 @@ SYM_CODE_START(ftrace_caller)
SYM_CODE_END(ftrace_caller)
SYM_CODE_START(ftrace_common)
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
aghik %r2,%r0,-MCOUNT_INSN_SIZE
lgrl %r4,function_trace_op
lgrl %r1,ftrace_func
@@ -115,7 +116,7 @@ SYM_INNER_LABEL(ftrace_graph_caller, SYM_L_GLOBAL)
.Lftrace_graph_caller_end:
#endif
lg %r0,(STACK_FREGS_PTREGS_PSW+8)(%r15)
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
ltg %r1,STACK_FREGS_PTREGS_ORIG_GPR2(%r15)
locgrz %r1,%r0
#else
diff --git a/arch/s390/kernel/numa.c b/arch/s390/kernel/numa.c
index 23ab9f02f278..ddc1448ea2e1 100644
--- a/arch/s390/kernel/numa.c
+++ b/arch/s390/kernel/numa.c
@@ -14,9 +14,6 @@
#include <linux/node.h>
#include <asm/numa.h>
-struct pglist_data *node_data[MAX_NUMNODES];
-EXPORT_SYMBOL(node_data);
-
void __init numa_setup(void)
{
int nid;
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 6968be98af11..18b0d025f3a2 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -22,6 +22,10 @@
#include <asm/hwctrset.h>
#include <asm/debug.h>
+/* Perf PMU definitions for the counter facility */
+#define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */
+#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */
+
enum cpumf_ctr_set {
CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */
CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 736c1d9632dd..5b765e3ccf0c 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -24,6 +24,22 @@
#include <asm/timex.h>
#include <linux/io.h>
+/* Perf PMU definitions for the sampling facility */
+#define PERF_CPUM_SF_MAX_CTR 2
+#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
+#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
+#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
+#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
+#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
+
+#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
+#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
+#define TEAR_REG(hwc) ((hwc)->last_tag)
+#define SAMPL_RATE(hwc) ((hwc)->event_base)
+#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
+#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
+#define SAMPL_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
+
/* Minimum number of sample-data-block-tables:
* At least one table is required for the sampling buffer structure.
* A single table contains up to 511 pointers to sample-data-blocks.
@@ -113,17 +129,6 @@ static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
return USEC_PER_SEC * qsi->cpu_speed / rate;
}
-/* Return TOD timestamp contained in an trailer entry */
-static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
-{
- /* TOD in STCKE format */
- if (te->header.t)
- return *((unsigned long long *)&te->timestamp[1]);
-
- /* TOD in STCK format */
- return *((unsigned long long *)&te->timestamp[0]);
-}
-
/* Return pointer to trailer entry of an sample data block */
static inline struct hws_trailer_entry *trailer_entry_ptr(unsigned long v)
{
@@ -154,12 +159,12 @@ static inline unsigned long *get_next_sdbt(unsigned long *s)
/*
* sf_disable() - Switch off sampling facility
*/
-static int sf_disable(void)
+static void sf_disable(void)
{
struct hws_lsctl_request_block sreq;
memset(&sreq, 0, sizeof(sreq));
- return lsctl(&sreq);
+ lsctl(&sreq);
}
/*
@@ -208,8 +213,6 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
}
}
- debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__,
- (unsigned long)sfb->sdbt);
memset(sfb, 0, sizeof(*sfb));
}
@@ -265,10 +268,8 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
* the sampling buffer origin.
*/
if (sfb->sdbt != get_next_sdbt(tail)) {
- debug_sprintf_event(sfdbg, 3, "%s: "
- "sampling buffer is not linked: origin %#lx"
- " tail %#lx\n", __func__,
- (unsigned long)sfb->sdbt,
+ debug_sprintf_event(sfdbg, 3, "%s buffer not linked origin %#lx tail %#lx\n",
+ __func__, (unsigned long)sfb->sdbt,
(unsigned long)tail);
return -EINVAL;
}
@@ -318,9 +319,6 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
*tail = virt_to_phys(sfb->sdbt) + 1;
sfb->tail = tail;
- debug_sprintf_event(sfdbg, 4, "%s: new buffer"
- " settings: sdbt %lu sdb %lu\n", __func__,
- sfb->num_sdbt, sfb->num_sdb);
return rc;
}
@@ -357,15 +355,8 @@ static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
/* Allocate requested number of sample-data-blocks */
rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL);
- if (rc) {
+ if (rc)
free_sampling_buffer(sfb);
- debug_sprintf_event(sfdbg, 4, "%s: "
- "realloc_sampling_buffer failed with rc %i\n",
- __func__, rc);
- } else
- debug_sprintf_event(sfdbg, 4,
- "%s: tear %#lx dear %#lx\n", __func__,
- (unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt);
return rc;
}
@@ -377,8 +368,8 @@ static void sfb_set_limits(unsigned long min, unsigned long max)
CPUM_SF_MAX_SDB = max;
memset(&si, 0, sizeof(si));
- if (!qsi(&si))
- CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes);
+ qsi(&si);
+ CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes);
}
static unsigned long sfb_max_limit(struct hw_perf_event *hwc)
@@ -397,12 +388,6 @@ static unsigned long sfb_pending_allocs(struct sf_buffer *sfb,
return 0;
}
-static int sfb_has_pending_allocs(struct sf_buffer *sfb,
- struct hw_perf_event *hwc)
-{
- return sfb_pending_allocs(sfb, hwc) > 0;
-}
-
static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc)
{
/* Limit the number of SDBs to not exceed the maximum */
@@ -426,7 +411,6 @@ static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
{
unsigned long n_sdb, freq;
- size_t sample_size;
/* Calculate sampling buffers using 4K pages
*
@@ -457,7 +441,6 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
* ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up
* to 511 SDBs).
*/
- sample_size = sizeof(struct hws_basic_entry);
freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc));
n_sdb = CPUM_SF_MIN_SDB + DIV_ROUND_UP(freq, 10000);
@@ -473,12 +456,6 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
if (sf_buffer_available(cpuhw))
return 0;
- debug_sprintf_event(sfdbg, 3,
- "%s: rate %lu f %lu sdb %lu/%lu"
- " sample_size %lu cpuhw %p\n", __func__,
- SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc),
- sample_size, cpuhw);
-
return alloc_sampling_buffer(&cpuhw->sfb,
sfb_pending_allocs(&cpuhw->sfb, hwc));
}
@@ -535,8 +512,6 @@ static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
if (num)
sfb_account_allocs(num, hwc);
- debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n",
- __func__, OVERFLOW_REG(hwc), ratio, num);
OVERFLOW_REG(hwc) = 0;
}
@@ -554,13 +529,11 @@ static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
static void extend_sampling_buffer(struct sf_buffer *sfb,
struct hw_perf_event *hwc)
{
- unsigned long num, num_old;
- int rc;
+ unsigned long num;
num = sfb_pending_allocs(sfb, hwc);
if (!num)
return;
- num_old = sfb->num_sdb;
/* Disable the sampling facility to reset any states and also
* clear pending measurement alerts.
@@ -572,51 +545,33 @@ static void extend_sampling_buffer(struct sf_buffer *sfb,
* called by perf. Because this is a reallocation, it is fine if the
* new SDB-request cannot be satisfied immediately.
*/
- rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
- if (rc)
- debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n",
- __func__, rc);
-
- if (sfb_has_pending_allocs(sfb, hwc))
- debug_sprintf_event(sfdbg, 5, "%s: "
- "req %lu alloc %lu remaining %lu\n",
- __func__, num, sfb->num_sdb - num_old,
- sfb_pending_allocs(sfb, hwc));
+ realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
}
/* Number of perf events counting hardware events */
-static atomic_t num_events;
+static refcount_t num_events;
/* Used to avoid races in calling reserve/release_cpumf_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
#define PMC_INIT 0
#define PMC_RELEASE 1
-#define PMC_FAILURE 2
static void setup_pmc_cpu(void *flags)
{
- struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf);
- int err = 0;
+ struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
switch (*((int *)flags)) {
case PMC_INIT:
- memset(cpusf, 0, sizeof(*cpusf));
- err = qsi(&cpusf->qsi);
- if (err)
- break;
- cpusf->flags |= PMU_F_RESERVED;
- err = sf_disable();
+ memset(cpuhw, 0, sizeof(*cpuhw));
+ qsi(&cpuhw->qsi);
+ cpuhw->flags |= PMU_F_RESERVED;
+ sf_disable();
break;
case PMC_RELEASE:
- cpusf->flags &= ~PMU_F_RESERVED;
- err = sf_disable();
- if (!err)
- deallocate_buffers(cpusf);
+ cpuhw->flags &= ~PMU_F_RESERVED;
+ sf_disable();
+ deallocate_buffers(cpuhw);
break;
}
- if (err) {
- *((int *)flags) |= PMC_FAILURE;
- pr_err("Switching off the sampling facility failed with rc %i\n", err);
- }
}
static void release_pmc_hardware(void)
@@ -627,27 +582,19 @@ static void release_pmc_hardware(void)
on_each_cpu(setup_pmc_cpu, &flags, 1);
}
-static int reserve_pmc_hardware(void)
+static void reserve_pmc_hardware(void)
{
int flags = PMC_INIT;
on_each_cpu(setup_pmc_cpu, &flags, 1);
- if (flags & PMC_FAILURE) {
- release_pmc_hardware();
- return -ENODEV;
- }
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
-
- return 0;
}
static void hw_perf_event_destroy(struct perf_event *event)
{
/* Release PMC if this is the last perf event */
- if (!atomic_add_unless(&num_events, -1, 1)) {
- mutex_lock(&pmc_reserve_mutex);
- if (atomic_dec_return(&num_events) == 0)
- release_pmc_hardware();
+ if (refcount_dec_and_mutex_lock(&num_events, &pmc_reserve_mutex)) {
+ release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
@@ -751,9 +698,6 @@ static unsigned long getrate(bool freq, unsigned long sample,
*/
if (sample_rate_to_freq(si, rate) >
sysctl_perf_event_sample_rate) {
- debug_sprintf_event(sfdbg, 1, "%s: "
- "Sampling rate exceeds maximum "
- "perf sample rate\n", __func__);
rate = 0;
}
}
@@ -798,9 +742,6 @@ static int __hw_perf_event_init_rate(struct perf_event *event,
attr->sample_period = rate;
SAMPL_RATE(hwc) = rate;
hw_init_period(hwc, SAMPL_RATE(hwc));
- debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n",
- __func__, event->cpu, event->attr.sample_period,
- event->attr.freq, SAMPLE_FREQ_MODE(hwc));
return 0;
}
@@ -810,23 +751,17 @@ static int __hw_perf_event_init(struct perf_event *event)
struct hws_qsi_info_block si;
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
- int cpu, err;
+ int cpu, err = 0;
/* Reserve CPU-measurement sampling facility */
- err = 0;
- if (!atomic_inc_not_zero(&num_events)) {
- mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
- err = -EBUSY;
- else
- atomic_inc(&num_events);
- mutex_unlock(&pmc_reserve_mutex);
+ mutex_lock(&pmc_reserve_mutex);
+ if (!refcount_inc_not_zero(&num_events)) {
+ reserve_pmc_hardware();
+ refcount_set(&num_events, 1);
}
+ mutex_unlock(&pmc_reserve_mutex);
event->destroy = hw_perf_event_destroy;
- if (err)
- goto out;
-
/* Access per-CPU sampling information (query sampling info) */
/*
* The event->cpu value can be -1 to count on every CPU, for example,
@@ -838,9 +773,9 @@ static int __hw_perf_event_init(struct perf_event *event)
*/
memset(&si, 0, sizeof(si));
cpuhw = NULL;
- if (event->cpu == -1)
+ if (event->cpu == -1) {
qsi(&si);
- else {
+ } else {
/* Event is pinned to a particular CPU, retrieve the per-CPU
* sampling structure for accessing the CPU-specific QSI.
*/
@@ -881,10 +816,6 @@ static int __hw_perf_event_init(struct perf_event *event)
if (err)
goto out;
- /* Initialize sample data overflow accounting */
- hwc->extra_reg.reg = REG_OVERFLOW;
- OVERFLOW_REG(hwc) = 0;
-
/* Use AUX buffer. No need to allocate it by ourself */
if (attr->config == PERF_EVENT_CPUM_SF_DIAG)
return 0;
@@ -1007,7 +938,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
extend_sampling_buffer(&cpuhw->sfb, hwc);
}
/* Rate may be adjusted with ioctl() */
- cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw);
+ cpuhw->lsctl.interval = SAMPL_RATE(hwc);
}
/* (Re)enable the PMU and sampling facility */
@@ -1023,12 +954,6 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
/* Load current program parameter */
lpp(&get_lowcore()->lpp);
-
- debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i "
- "interval %#lx tear %#lx dear %#lx\n", __func__,
- cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed,
- cpuhw->lsctl.cd, cpuhw->lsctl.interval,
- cpuhw->lsctl.tear, cpuhw->lsctl.dear);
}
static void cpumsf_pmu_disable(struct pmu *pmu)
@@ -1055,21 +980,18 @@ static void cpumsf_pmu_disable(struct pmu *pmu)
return;
}
- /* Save state of TEAR and DEAR register contents */
- err = qsi(&si);
- if (!err) {
- /* TEAR/DEAR values are valid only if the sampling facility is
- * enabled. Note that cpumsf_pmu_disable() might be called even
- * for a disabled sampling facility because cpumsf_pmu_enable()
- * controls the enable/disable state.
- */
- if (si.es) {
- cpuhw->lsctl.tear = si.tear;
- cpuhw->lsctl.dear = si.dear;
- }
- } else
- debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n",
- __func__, err);
+ /*
+ * Save state of TEAR and DEAR register contents.
+ * TEAR/DEAR values are valid only if the sampling facility is
+ * enabled. Note that cpumsf_pmu_disable() might be called even
+ * for a disabled sampling facility because cpumsf_pmu_enable()
+ * controls the enable/disable state.
+ */
+ qsi(&si);
+ if (si.es) {
+ cpuhw->lsctl.tear = si.tear;
+ cpuhw->lsctl.dear = si.dear;
+ }
cpuhw->flags &= ~PMU_F_ENABLED;
}
@@ -1235,11 +1157,6 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
/* Count discarded samples */
*overflow += 1;
} else {
- debug_sprintf_event(sfdbg, 4,
- "%s: Found unknown"
- " sampling data entry: te->f %i"
- " basic.def %#4x (%p)\n", __func__,
- te->header.f, sample->def, sample);
/* Sample slot is not yet written or other record.
*
* This condition can occur if the buffer was reused
@@ -1284,7 +1201,7 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
* AUX buffer is used when in diagnostic sampling mode.
* No perf events/samples are created.
*/
- if (SAMPL_DIAG_MODE(&event->hw))
+ if (SAMPL_DIAG_MODE(hwc))
return;
sdbt = (unsigned long *)TEAR_REG(hwc);
@@ -1309,13 +1226,6 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
*/
sampl_overflow += te->header.overflow;
- /* Timestamps are valid for full sample-data-blocks only */
- debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx/%#lx "
- "overflow %llu timestamp %#llx\n",
- __func__, sdb, (unsigned long)sdbt,
- te->header.overflow,
- (te->header.f) ? trailer_timestamp(te) : 0ULL);
-
/* Collect all samples from a single sample-data-block and
* flag if an (perf) event overflow happened. If so, the PMU
* is stopped and remaining samples will be discarded.
@@ -1340,7 +1250,7 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
sdbt = get_next_sdbt(sdbt);
/* Update event hardware registers */
- TEAR_REG(hwc) = (unsigned long) sdbt;
+ TEAR_REG(hwc) = (unsigned long)sdbt;
/* Stop processing sample-data if all samples of the current
* sample-data-block were flushed even if it was not full.
@@ -1362,19 +1272,8 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
* are dropped.
* Slightly increase the interval to avoid hitting this limit.
*/
- if (event_overflow) {
+ if (event_overflow)
SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
- debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
- __func__,
- DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
- }
-
- if (sampl_overflow || event_overflow)
- debug_sprintf_event(sfdbg, 4, "%s: "
- "overflows: sample %llu event %llu"
- " total %llu num_sdb %llu\n",
- __func__, sampl_overflow, event_overflow,
- OVERFLOW_REG(hwc), num_sdb);
}
static inline unsigned long aux_sdb_index(struct aux_buffer *aux,
@@ -1442,9 +1341,6 @@ static void aux_output_end(struct perf_output_handle *handle)
/* Remove alert indicators in the buffer */
te = aux_sdb_trailer(aux, aux->alert_mark);
te->header.a = 0;
-
- debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n",
- __func__, i, range_scan, aux->head);
}
/*
@@ -1463,7 +1359,7 @@ static int aux_output_begin(struct perf_output_handle *handle,
unsigned long range, i, range_scan, idx, head, base, offset;
struct hws_trailer_entry *te;
- if (WARN_ON_ONCE(handle->head & ~PAGE_MASK))
+ if (handle->head & ~PAGE_MASK)
return -EINVAL;
aux->head = handle->head >> PAGE_SHIFT;
@@ -1475,10 +1371,6 @@ static int aux_output_begin(struct perf_output_handle *handle,
* SDBs between aux->head and aux->empty_mark are already ready
* for new data. range_scan is num of SDBs not within them.
*/
- debug_sprintf_event(sfdbg, 6,
- "%s: range %ld head %ld alert %ld empty %ld\n",
- __func__, range, aux->head, aux->alert_mark,
- aux->empty_mark);
if (range > aux_sdb_num_empty(aux)) {
range_scan = range - aux_sdb_num_empty(aux);
idx = aux->empty_mark + 1;
@@ -1504,12 +1396,6 @@ static int aux_output_begin(struct perf_output_handle *handle,
cpuhw->lsctl.tear = virt_to_phys((void *)base) + offset * sizeof(unsigned long);
cpuhw->lsctl.dear = virt_to_phys((void *)aux->sdb_index[head]);
- debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld empty %ld "
- "index %ld tear %#lx dear %#lx\n", __func__,
- aux->head, aux->alert_mark, aux->empty_mark,
- head / CPUM_SF_SDB_PER_TABLE,
- cpuhw->lsctl.tear, cpuhw->lsctl.dear);
-
return 0;
}
@@ -1571,14 +1457,11 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
unsigned long long *overflow)
{
- unsigned long i, range_scan, idx, idx_old;
union hws_trailer_header old, prev, new;
+ unsigned long i, range_scan, idx;
unsigned long long orig_overflow;
struct hws_trailer_entry *te;
- debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld "
- "empty %ld\n", __func__, range, aux->head,
- aux->alert_mark, aux->empty_mark);
if (range <= aux_sdb_num_empty(aux))
/*
* No need to scan. All SDBs in range are marked as empty.
@@ -1601,7 +1484,7 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
* indicator fall into this range, set it.
*/
range_scan = range - aux_sdb_num_empty(aux);
- idx_old = idx = aux->empty_mark + 1;
+ idx = aux->empty_mark + 1;
for (i = 0; i < range_scan; i++, idx++) {
te = aux_sdb_trailer(aux, idx);
prev.val = READ_ONCE_ALIGNED_128(te->header.val);
@@ -1623,9 +1506,6 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
/* Update empty_mark to new position */
aux->empty_mark = aux->head + range - 1;
- debug_sprintf_event(sfdbg, 6, "%s: range_scan %ld idx %ld..%ld "
- "empty %ld\n", __func__, range_scan, idx_old,
- idx - 1, aux->empty_mark);
return true;
}
@@ -1642,12 +1522,12 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
unsigned long num_sdb;
aux = perf_get_aux(handle);
- if (WARN_ON_ONCE(!aux))
+ if (!aux)
return;
/* Inform user space new data arrived */
size = aux_sdb_num_alert(aux) << PAGE_SHIFT;
- debug_sprintf_event(sfdbg, 6, "%s: #alert %ld\n", __func__,
+ debug_sprintf_event(sfdbg, 6, "%s #alert %ld\n", __func__,
size >> PAGE_SHIFT);
perf_aux_output_end(handle, size);
@@ -1661,7 +1541,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
num_sdb);
break;
}
- if (WARN_ON_ONCE(!aux))
+ if (!aux)
return;
/* Update head and alert_mark to new position */
@@ -1681,23 +1561,11 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
perf_aux_output_end(&cpuhw->handle, size);
pr_err("Sample data caused the AUX buffer with %lu "
"pages to overflow\n", aux->sfb.num_sdb);
- debug_sprintf_event(sfdbg, 1, "%s: head %ld range %ld "
- "overflow %lld\n", __func__,
- aux->head, range, overflow);
} else {
size = aux_sdb_num_alert(aux) << PAGE_SHIFT;
perf_aux_output_end(&cpuhw->handle, size);
- debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld "
- "already full, try another\n",
- __func__,
- aux->head, aux->alert_mark);
}
}
-
- if (done)
- debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld "
- "empty %ld\n", __func__, aux->head,
- aux->alert_mark, aux->empty_mark);
}
/*
@@ -1719,8 +1587,6 @@ static void aux_buffer_free(void *data)
kfree(aux->sdbt_index);
kfree(aux->sdb_index);
kfree(aux);
-
- debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu\n", __func__, num_sdbt);
}
static void aux_sdb_init(unsigned long sdb)
@@ -1828,9 +1694,6 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
*/
aux->empty_mark = sfb->num_sdb - 1;
- debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu SDBs %lu\n", __func__,
- sfb->num_sdbt, sfb->num_sdb);
-
return aux;
no_sdbt:
@@ -1863,8 +1726,7 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
memset(&si, 0, sizeof(si));
if (event->cpu == -1) {
- if (qsi(&si))
- return -ENODEV;
+ qsi(&si);
} else {
/* Event is pinned to a particular CPU, retrieve the per-CPU
* sampling structure for accessing the CPU-specific QSI.
@@ -1874,7 +1736,7 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
si = cpuhw->qsi;
}
- do_freq = !!SAMPLE_FREQ_MODE(&event->hw);
+ do_freq = !!SAMPL_FREQ_MODE(&event->hw);
rate = getrate(do_freq, value, &si);
if (!rate)
return -EINVAL;
@@ -1882,10 +1744,6 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
event->attr.sample_period = rate;
SAMPL_RATE(&event->hw) = rate;
hw_init_period(&event->hw, SAMPL_RATE(&event->hw));
- debug_sprintf_event(sfdbg, 4, "%s:"
- " cpu %d value %#llx period %#llx freq %d\n",
- __func__, event->cpu, value,
- event->attr.sample_period, do_freq);
return 0;
}
@@ -1896,12 +1754,8 @@ static void cpumsf_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
- if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ if (!(event->hw.state & PERF_HES_STOPPED))
return;
-
- if (flags & PERF_EF_RELOAD)
- WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
-
perf_pmu_disable(event->pmu);
event->hw.state = 0;
cpuhw->lsctl.cs = 1;
@@ -1936,7 +1790,7 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
struct aux_buffer *aux;
- int err;
+ int err = 0;
if (cpuhw->flags & PMU_F_IN_USE)
return -EAGAIN;
@@ -1944,7 +1798,6 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt)
return -EINVAL;
- err = 0;
perf_pmu_disable(event->pmu);
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
@@ -2115,7 +1968,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
/* Report measurement alerts only for non-PRA codes */
if (alert != CPU_MF_INT_SF_PRA)
- debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__,
+ debug_sprintf_event(sfdbg, 6, "%s alert %#x\n", __func__,
alert);
/* Sampling authorization change request */
@@ -2143,7 +1996,7 @@ static int cpusf_pmu_setup(unsigned int cpu, int flags)
/* Ignore the notification if no events are scheduled on the PMU.
* This might be racy...
*/
- if (!atomic_read(&num_events))
+ if (!refcount_read(&num_events))
return 0;
local_irq_disable();
@@ -2205,10 +2058,12 @@ static const struct kernel_param_ops param_ops_sfb_size = {
.get = param_get_sfb_size,
};
-#define RS_INIT_FAILURE_QSI 0x0001
-#define RS_INIT_FAILURE_BSDES 0x0002
-#define RS_INIT_FAILURE_ALRT 0x0003
-#define RS_INIT_FAILURE_PERF 0x0004
+enum {
+ RS_INIT_FAILURE_BSDES = 2, /* Bad basic sampling size */
+ RS_INIT_FAILURE_ALRT = 3, /* IRQ registration failure */
+ RS_INIT_FAILURE_PERF = 4 /* PMU registration failure */
+};
+
static void __init pr_cpumsf_err(unsigned int reason)
{
pr_err("Sampling facility support for perf is not available: "
@@ -2224,11 +2079,7 @@ static int __init init_cpum_sampling_pmu(void)
return -ENODEV;
memset(&si, 0, sizeof(si));
- if (qsi(&si)) {
- pr_cpumsf_err(RS_INIT_FAILURE_QSI);
- return -ENODEV;
- }
-
+ qsi(&si);
if (!si.as && !si.ad)
return -ENODEV;
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index 2f5a20e300f6..fa7325454266 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -738,6 +738,22 @@ static const char * const paicrypt_ctrnames[] = {
[154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
[155] = "IBM_RESERVED_155",
[156] = "IBM_RESERVED_156",
+ [157] = "KM_FULL_XTS_AES_128",
+ [158] = "KM_FULL_XTS_AES_256",
+ [159] = "KM_FULL_XTS_ENCRYPTED_AES_128",
+ [160] = "KM_FULL_XTS_ENCRYPTED_AES_256",
+ [161] = "KMAC_HMAC_SHA_224",
+ [162] = "KMAC_HMAC_SHA_256",
+ [163] = "KMAC_HMAC_SHA_384",
+ [164] = "KMAC_HMAC_SHA_512",
+ [165] = "KMAC_HMAC_ENCRYPTED_SHA_224",
+ [166] = "KMAC_HMAC_ENCRYPTED_SHA_256",
+ [167] = "KMAC_HMAC_ENCRYPTED_SHA_384",
+ [168] = "KMAC_HMAC_ENCRYPTED_SHA_512",
+ [169] = "PCKMO_ENCRYPT_HMAC_512_KEY",
+ [170] = "PCKMO_ENCRYPT_HMAC_1024_KEY",
+ [171] = "PCKMO_ENCRYPT_AES_XTS_128",
+ [172] = "PCKMO_ENCRYPT_AES_XTS_256",
};
static void __init attr_event_free(struct attribute **attrs, int num)
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
index 6295531b39a2..7f462bef1fc0 100644
--- a/arch/s390/kernel/perf_pai_ext.c
+++ b/arch/s390/kernel/perf_pai_ext.c
@@ -635,6 +635,15 @@ static const char * const paiext_ctrnames[] = {
[25] = "NNPA_1MFRAME",
[26] = "NNPA_2GFRAME",
[27] = "NNPA_ACCESSEXCEPT",
+ [28] = "NNPA_TRANSFORM",
+ [29] = "NNPA_GELU",
+ [30] = "NNPA_MOMENTS",
+ [31] = "NNPA_LAYERNORM",
+ [32] = "NNPA_MATMUL_OP_BCAST1",
+ [33] = "NNPA_SQRT",
+ [34] = "NNPA_INVSQRT",
+ [35] = "NNPA_NORM",
+ [36] = "NNPA_REDUCE",
};
static void __init attr_event_free(struct attribute **attrs, int num)
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 6c2cb345402f..e48013cd832c 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -30,9 +30,9 @@
#include <linux/compat.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
+#include <asm/vdso-symbols.h>
#include <asm/access-regs.h>
#include <asm/lowcore.h>
-#include <asm/vdso.h>
#include "entry.h"
/*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index fbba37ec53cf..4df56fdb2488 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -671,6 +671,25 @@ int smp_cpu_get_polarization(int cpu)
return per_cpu(pcpu_devices, cpu).polarization;
}
+void smp_cpu_set_capacity(int cpu, unsigned long val)
+{
+ per_cpu(pcpu_devices, cpu).capacity = val;
+}
+
+unsigned long smp_cpu_get_capacity(int cpu)
+{
+ return per_cpu(pcpu_devices, cpu).capacity;
+}
+
+void smp_set_core_capacity(int cpu, unsigned long val)
+{
+ int i;
+
+ cpu = smp_get_base_cpu(cpu);
+ for (i = cpu; (i <= cpu + smp_cpu_mtid) && (i < nr_cpu_ids); i++)
+ smp_cpu_set_capacity(i, val);
+}
+
int smp_cpu_get_cpu_address(int cpu)
{
return per_cpu(pcpu_devices, cpu).address;
@@ -719,6 +738,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
else
pcpu->state = CPU_STATE_STANDBY;
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH);
set_cpu_present(cpu, true);
if (!early && arch_register_cpu(cpu))
set_cpu_present(cpu, false);
@@ -961,6 +981,7 @@ void __init smp_prepare_boot_cpu(void)
ipl_pcpu->state = CPU_STATE_CONFIGURED;
lc->pcpu = (unsigned long)ipl_pcpu;
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
+ smp_cpu_set_capacity(0, CPU_CAPACITY_HIGH);
}
void __init smp_setup_processor_id(void)
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 640363b2a105..9f59837d159e 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -162,22 +162,3 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
{
arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
}
-
-unsigned long return_address(unsigned int n)
-{
- struct unwind_state state;
- unsigned long addr;
-
- /* Increment to skip current stack entry */
- n++;
-
- unwind_for_each_frame(&state, NULL, NULL, 0) {
- addr = unwind_get_return_address(&state);
- if (!addr)
- break;
- if (!n--)
- return addr;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 22029ecae1c5..813e5da9a973 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -24,6 +24,7 @@
#include <linux/mm.h>
#include <linux/nodemask.h>
#include <linux/node.h>
+#include <asm/hiperdispatch.h>
#include <asm/sysinfo.h>
#define PTF_HORIZONTAL (0UL)
@@ -47,6 +48,7 @@ static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
+static int cpu_management;
static DECLARE_WORK(topology_work, topology_work_fn);
@@ -144,6 +146,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
cpumask_set_cpu(cpu, &book->mask);
cpumask_set_cpu(cpu, &socket->mask);
smp_cpu_set_polarization(cpu, tl_core->pp);
+ smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH);
}
}
}
@@ -270,6 +273,7 @@ void update_cpu_masks(void)
topo->drawer_id = id;
}
}
+ hd_reset_state();
for_each_online_cpu(cpu) {
topo = &cpu_topology[cpu];
pkg_first = cpumask_first(&topo->core_mask);
@@ -278,8 +282,10 @@ void update_cpu_masks(void)
for_each_cpu(sibling, &topo->core_mask) {
topo_sibling = &cpu_topology[sibling];
smt_first = cpumask_first(&topo_sibling->thread_mask);
- if (sibling == smt_first)
+ if (sibling == smt_first) {
topo_package->booted_cores++;
+ hd_add_core(sibling);
+ }
}
} else {
topo->booted_cores = topo_package->booted_cores;
@@ -303,8 +309,10 @@ static void __arch_update_dedicated_flag(void *arg)
static int __arch_update_cpu_topology(void)
{
struct sysinfo_15_1_x *info = tl_info;
- int rc = 0;
+ int rc, hd_status;
+ hd_status = 0;
+ rc = 0;
mutex_lock(&smp_cpu_state_mutex);
if (MACHINE_HAS_TOPOLOGY) {
rc = 1;
@@ -314,7 +322,11 @@ static int __arch_update_cpu_topology(void)
update_cpu_masks();
if (!MACHINE_HAS_TOPOLOGY)
topology_update_polarization_simple();
+ if (cpu_management == 1)
+ hd_status = hd_enable_hiperdispatch();
mutex_unlock(&smp_cpu_state_mutex);
+ if (hd_status == 0)
+ hd_disable_hiperdispatch();
return rc;
}
@@ -374,7 +386,24 @@ void topology_expect_change(void)
set_topology_timer();
}
-static int cpu_management;
+static int set_polarization(int polarization)
+{
+ int rc = 0;
+
+ cpus_read_lock();
+ mutex_lock(&smp_cpu_state_mutex);
+ if (cpu_management == polarization)
+ goto out;
+ rc = topology_set_cpu_management(polarization);
+ if (rc)
+ goto out;
+ cpu_management = polarization;
+ topology_expect_change();
+out:
+ mutex_unlock(&smp_cpu_state_mutex);
+ cpus_read_unlock();
+ return rc;
+}
static ssize_t dispatching_show(struct device *dev,
struct device_attribute *attr,
@@ -400,19 +429,7 @@ static ssize_t dispatching_store(struct device *dev,
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
- rc = 0;
- cpus_read_lock();
- mutex_lock(&smp_cpu_state_mutex);
- if (cpu_management == val)
- goto out;
- rc = topology_set_cpu_management(val);
- if (rc)
- goto out;
- cpu_management = val;
- topology_expect_change();
-out:
- mutex_unlock(&smp_cpu_state_mutex);
- cpus_read_unlock();
+ rc = set_polarization(val);
return rc ? rc : count;
}
static DEVICE_ATTR_RW(dispatching);
@@ -624,12 +641,37 @@ static int topology_ctl_handler(const struct ctl_table *ctl, int write,
return rc;
}
+static int polarization_ctl_handler(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int polarization;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &polarization,
+ .maxlen = sizeof(int),
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ };
+
+ polarization = cpu_management;
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
+ return set_polarization(polarization);
+}
+
static struct ctl_table topology_ctl_table[] = {
{
.procname = "topology",
.mode = 0644,
.proc_handler = topology_ctl_handler,
},
+ {
+ .procname = "polarization",
+ .mode = 0644,
+ .proc_handler = polarization_ctl_handler,
+ },
};
static int __init topology_init(void)
@@ -642,6 +684,8 @@ static int __init topology_init(void)
set_topology_timer();
else
topology_update_polarization_simple();
+ if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY_VERTICAL))
+ set_polarization(1);
register_sysctl("s390", topology_ctl_table);
dev_root = bus_get_dev_root(&cpu_subsys);
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 36db065c7cf7..9646f773208a 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -14,6 +14,7 @@
#include <linux/memblock.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
+#include <linux/pagewalk.h>
#include <asm/facility.h>
#include <asm/sections.h>
#include <asm/uv.h>
@@ -462,9 +463,9 @@ EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
{
struct vm_area_struct *vma;
+ struct folio_walk fw;
unsigned long uaddr;
struct folio *folio;
- struct page *page;
int rc;
rc = -EFAULT;
@@ -483,11 +484,15 @@ int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
goto out;
rc = 0;
- /* we take an extra reference here */
- page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
- if (IS_ERR_OR_NULL(page))
+ folio = folio_walk_start(&fw, vma, uaddr, 0);
+ if (!folio)
goto out;
- folio = page_folio(page);
+ /*
+ * See gmap_make_secure(): large folios cannot be secure. Small
+ * folio implies FW_LEVEL_PTE.
+ */
+ if (folio_test_large(folio) || !pte_write(fw.pte))
+ goto out_walk_end;
rc = uv_destroy_folio(folio);
/*
* Fault handlers can race; it is possible that two CPUs will fault
@@ -500,7 +505,8 @@ int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
*/
if (rc)
rc = uv_convert_from_secure_folio(folio);
- folio_put(folio);
+out_walk_end:
+ folio_walk_end(&fw, vma);
out:
mmap_read_unlock(gmap->mm);
return rc;
@@ -548,11 +554,6 @@ int arch_make_folio_accessible(struct folio *folio)
}
EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
-int arch_make_page_accessible(struct page *page)
-{
- return arch_make_folio_accessible(page_folio(page));
-}
-EXPORT_SYMBOL_GPL(arch_make_page_accessible);
static ssize_t uv_query_facilities(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 2f967ac2b8e3..598b512cde01 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -12,12 +12,15 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/time_namespace.h>
#include <linux/random.h>
#include <vdso/datapage.h>
+#include <asm/vdso/vsyscall.h>
+#include <asm/alternative.h>
#include <asm/vdso.h>
extern char vdso64_start[], vdso64_end[];
@@ -29,12 +32,6 @@ static union vdso_data_store vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = vdso_data_store.data;
-enum vvar_pages {
- VVAR_DATA_PAGE_OFFSET,
- VVAR_TIMENS_PAGE_OFFSET,
- VVAR_NR_PAGES,
-};
-
#ifdef CONFIG_TIME_NS
struct vdso_data *arch_get_vdso_data(void *vvar_page)
{
@@ -250,8 +247,25 @@ static struct page ** __init vdso_setup_pages(void *start, void *end)
return pagelist;
}
+static void vdso_apply_alternatives(void)
+{
+ const struct elf64_shdr *alt, *shdr;
+ struct alt_instr *start, *end;
+ const struct elf64_hdr *hdr;
+
+ hdr = (struct elf64_hdr *)vdso64_start;
+ shdr = (void *)hdr + hdr->e_shoff;
+ alt = find_section(hdr, shdr, ".altinstructions");
+ if (!alt)
+ return;
+ start = (void *)hdr + alt->sh_offset;
+ end = (void *)hdr + alt->sh_offset + alt->sh_size;
+ apply_alternatives(start, end);
+}
+
static int __init vdso_init(void)
{
+ vdso_apply_alternatives();
vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
if (IS_ENABLED(CONFIG_COMPAT))
vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index ba19c0ca7c87..37bb4b761229 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -3,12 +3,17 @@
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile
-obj-vdso64 = vdso_user_wrapper.o note.o
-obj-cvdso64 = vdso64_generic.o getcpu.o
+obj-vdso64 = vdso_user_wrapper.o note.o vgetrandom-chacha.o
+obj-cvdso64 = vdso64_generic.o getcpu.o vgetrandom.o
VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) $(CC_FLAGS_CHECK_STACK)
CFLAGS_REMOVE_getcpu.o = $(VDSO_CFLAGS_REMOVE)
+CFLAGS_REMOVE_vgetrandom.o = $(VDSO_CFLAGS_REMOVE)
CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE)
+ifneq ($(c-getrandom-y),)
+ CFLAGS_vgetrandom.o += -include $(c-getrandom-y)
+endif
+
# Build rules
targets := $(obj-vdso64) $(obj-cvdso64) vdso64.so vdso64.so.dbg
diff --git a/arch/s390/kernel/vdso64/vdso.h b/arch/s390/kernel/vdso64/vdso.h
index 34c7a2312f9d..9e5397e7b590 100644
--- a/arch/s390/kernel/vdso64/vdso.h
+++ b/arch/s390/kernel/vdso64/vdso.h
@@ -10,5 +10,6 @@ int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unuse
int __s390_vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
int __s390_vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
int __s390_vdso_clock_getres(clockid_t clock, struct __kernel_timespec *ts);
+ssize_t __kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len);
#endif /* __ARCH_S390_KERNEL_VDSO64_VDSO_H */
diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S
index 37e2a505e81d..753040a4b5ab 100644
--- a/arch/s390/kernel/vdso64/vdso64.lds.S
+++ b/arch/s390/kernel/vdso64/vdso64.lds.S
@@ -4,6 +4,7 @@
* library
*/
+#include <asm/vdso/vsyscall.h>
#include <asm/page.h>
#include <asm/vdso.h>
@@ -13,6 +14,7 @@ OUTPUT_ARCH(s390:64-bit)
SECTIONS
{
PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
+ PROVIDE(_vdso_rng_data = _vdso_data + __VDSO_RND_DATA_OFFSET);
#ifdef CONFIG_TIME_NS
PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
#endif
@@ -42,6 +44,10 @@ SECTIONS
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
+ . = ALIGN(8);
+ .altinstructions : { *(.altinstructions) }
+ .altinstr_replacement : { *(.altinstr_replacement) }
+
.dynamic : { *(.dynamic) } :text :dynamic
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
@@ -140,6 +146,7 @@ VERSION
__kernel_restart_syscall;
__kernel_rt_sigreturn;
__kernel_sigreturn;
+ __kernel_getrandom;
local: *;
};
}
diff --git a/arch/s390/kernel/vdso64/vgetrandom-chacha.S b/arch/s390/kernel/vdso64/vgetrandom-chacha.S
new file mode 100644
index 000000000000..d802b0a96f41
--- /dev/null
+++ b/arch/s390/kernel/vdso64/vgetrandom-chacha.S
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/linkage.h>
+#include <asm/alternative.h>
+#include <asm/fpu-insn.h>
+
+#define STATE0 %v0
+#define STATE1 %v1
+#define STATE2 %v2
+#define STATE3 %v3
+#define COPY0 %v4
+#define COPY1 %v5
+#define COPY2 %v6
+#define COPY3 %v7
+#define PERM4 %v16
+#define PERM8 %v17
+#define PERM12 %v18
+#define BEPERM %v19
+#define TMP0 %v20
+#define TMP1 %v21
+#define TMP2 %v22
+#define TMP3 %v23
+
+ .section .rodata
+
+ .balign 128
+.Lconstants:
+ .long 0x61707865,0x3320646e,0x79622d32,0x6b206574 # endian-neutral
+ .long 0x04050607,0x08090a0b,0x0c0d0e0f,0x00010203 # rotl 4 bytes
+ .long 0x08090a0b,0x0c0d0e0f,0x00010203,0x04050607 # rotl 8 bytes
+ .long 0x0c0d0e0f,0x00010203,0x04050607,0x08090a0b # rotl 12 bytes
+ .long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c # byte swap
+
+ .text
+/*
+ * s390 ChaCha20 implementation meant for vDSO. Produces a given positive
+ * number of blocks of output with nonce 0, taking an input key and 8-bytes
+ * counter. Does not spill to the stack.
+ *
+ * void __arch_chacha20_blocks_nostack(uint8_t *dst_bytes,
+ * const uint8_t *key,
+ * uint32_t *counter,
+ * size_t nblocks)
+ */
+SYM_FUNC_START(__arch_chacha20_blocks_nostack)
+ larl %r1,.Lconstants
+
+ /* COPY0 = "expand 32-byte k" */
+ VL COPY0,0,,%r1
+
+ /* PERM4-PERM12,BEPERM = byte selectors for VPERM */
+ VLM PERM4,BEPERM,16,%r1
+
+ /* COPY1,COPY2 = key */
+ VLM COPY1,COPY2,0,%r3
+
+ /* COPY3 = counter || zero nonce */
+ lg %r3,0(%r4)
+ VZERO COPY3
+ VLVGG COPY3,%r3,0
+
+ lghi %r1,0
+.Lblock:
+ VLR STATE0,COPY0
+ VLR STATE1,COPY1
+ VLR STATE2,COPY2
+ VLR STATE3,COPY3
+
+ lghi %r0,10
+.Ldoubleround:
+ /* STATE0 += STATE1, STATE3 = rotl32(STATE3 ^ STATE0, 16) */
+ VAF STATE0,STATE0,STATE1
+ VX STATE3,STATE3,STATE0
+ VERLLF STATE3,STATE3,16
+
+ /* STATE2 += STATE3, STATE1 = rotl32(STATE1 ^ STATE2, 12) */
+ VAF STATE2,STATE2,STATE3
+ VX STATE1,STATE1,STATE2
+ VERLLF STATE1,STATE1,12
+
+ /* STATE0 += STATE1, STATE3 = rotl32(STATE3 ^ STATE0, 8) */
+ VAF STATE0,STATE0,STATE1
+ VX STATE3,STATE3,STATE0
+ VERLLF STATE3,STATE3,8
+
+ /* STATE2 += STATE3, STATE1 = rotl32(STATE1 ^ STATE2, 7) */
+ VAF STATE2,STATE2,STATE3
+ VX STATE1,STATE1,STATE2
+ VERLLF STATE1,STATE1,7
+
+ /* STATE1[0,1,2,3] = STATE1[1,2,3,0] */
+ VPERM STATE1,STATE1,STATE1,PERM4
+ /* STATE2[0,1,2,3] = STATE2[2,3,0,1] */
+ VPERM STATE2,STATE2,STATE2,PERM8
+ /* STATE3[0,1,2,3] = STATE3[3,0,1,2] */
+ VPERM STATE3,STATE3,STATE3,PERM12
+
+ /* STATE0 += STATE1, STATE3 = rotl32(STATE3 ^ STATE0, 16) */
+ VAF STATE0,STATE0,STATE1
+ VX STATE3,STATE3,STATE0
+ VERLLF STATE3,STATE3,16
+
+ /* STATE2 += STATE3, STATE1 = rotl32(STATE1 ^ STATE2, 12) */
+ VAF STATE2,STATE2,STATE3
+ VX STATE1,STATE1,STATE2
+ VERLLF STATE1,STATE1,12
+
+ /* STATE0 += STATE1, STATE3 = rotl32(STATE3 ^ STATE0, 8) */
+ VAF STATE0,STATE0,STATE1
+ VX STATE3,STATE3,STATE0
+ VERLLF STATE3,STATE3,8
+
+ /* STATE2 += STATE3, STATE1 = rotl32(STATE1 ^ STATE2, 7) */
+ VAF STATE2,STATE2,STATE3
+ VX STATE1,STATE1,STATE2
+ VERLLF STATE1,STATE1,7
+
+ /* STATE1[0,1,2,3] = STATE1[3,0,1,2] */
+ VPERM STATE1,STATE1,STATE1,PERM12
+ /* STATE2[0,1,2,3] = STATE2[2,3,0,1] */
+ VPERM STATE2,STATE2,STATE2,PERM8
+ /* STATE3[0,1,2,3] = STATE3[1,2,3,0] */
+ VPERM STATE3,STATE3,STATE3,PERM4
+ brctg %r0,.Ldoubleround
+
+ /* OUTPUT0 = STATE0 + STATE0 */
+ VAF STATE0,STATE0,COPY0
+ /* OUTPUT1 = STATE1 + STATE1 */
+ VAF STATE1,STATE1,COPY1
+ /* OUTPUT2 = STATE2 + STATE2 */
+ VAF STATE2,STATE2,COPY2
+ /* OUTPUT2 = STATE3 + STATE3 */
+ VAF STATE3,STATE3,COPY3
+
+ /*
+ * 32 bit wise little endian store to OUTPUT. If the vector
+ * enhancement facility 2 is not installed use the slow path.
+ */
+ ALTERNATIVE "brc 0xf,.Lstoreslow", "nop", ALT_FACILITY(148)
+ VSTBRF STATE0,0,,%r2
+ VSTBRF STATE1,16,,%r2
+ VSTBRF STATE2,32,,%r2
+ VSTBRF STATE3,48,,%r2
+.Lstoredone:
+
+ /* ++COPY3.COUNTER */
+ /* alsih %r3,1 */
+ .insn rilu,0xcc0a00000000,%r3,1
+ alcr %r3,%r1
+ VLVGG COPY3,%r3,0
+
+ /* OUTPUT += 64, --NBLOCKS */
+ aghi %r2,64
+ brctg %r5,.Lblock
+
+ /* COUNTER = COPY3.COUNTER */
+ stg %r3,0(%r4)
+
+ /* Zero out potentially sensitive regs */
+ VZERO STATE0
+ VZERO STATE1
+ VZERO STATE2
+ VZERO STATE3
+ VZERO COPY1
+ VZERO COPY2
+
+ /* Early exit if TMP0-TMP3 have not been used */
+ ALTERNATIVE "nopr", "br %r14", ALT_FACILITY(148)
+
+ VZERO TMP0
+ VZERO TMP1
+ VZERO TMP2
+ VZERO TMP3
+
+ br %r14
+
+.Lstoreslow:
+ /* Convert STATE to little endian format and store to OUTPUT */
+ VPERM TMP0,STATE0,STATE0,BEPERM
+ VPERM TMP1,STATE1,STATE1,BEPERM
+ VPERM TMP2,STATE2,STATE2,BEPERM
+ VPERM TMP3,STATE3,STATE3,BEPERM
+ VSTM TMP0,TMP3,0,%r2
+ j .Lstoredone
+SYM_FUNC_END(__arch_chacha20_blocks_nostack)
diff --git a/arch/s390/kernel/vdso64/vgetrandom.c b/arch/s390/kernel/vdso64/vgetrandom.c
new file mode 100644
index 000000000000..b5268b507fb5
--- /dev/null
+++ b/arch/s390/kernel/vdso64/vgetrandom.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm/facility.h>
+#include <uapi/asm-generic/errno.h>
+#include "vdso.h"
+
+ssize_t __kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len)
+{
+ if (test_facility(129))
+ return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len);
+ if (unlikely(opaque_len == ~0UL && !buffer && !len && !flags))
+ return -ENOSYS;
+ return getrandom_syscall(buffer, len, flags);
+}
diff --git a/arch/s390/kernel/wti.c b/arch/s390/kernel/wti.c
new file mode 100644
index 000000000000..949fdbf0e8b6
--- /dev/null
+++ b/arch/s390/kernel/wti.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for warning track interruption
+ *
+ * Copyright IBM Corp. 2023
+ */
+
+#include <linux/cpu.h>
+#include <linux/debugfs.h>
+#include <linux/kallsyms.h>
+#include <linux/smpboot.h>
+#include <linux/irq.h>
+#include <uapi/linux/sched/types.h>
+#include <asm/debug.h>
+#include <asm/diag.h>
+#include <asm/sclp.h>
+
+#define WTI_DBF_LEN 64
+
+struct wti_debug {
+ unsigned long missed;
+ unsigned long addr;
+ pid_t pid;
+};
+
+struct wti_state {
+ /* debug data for s390dbf */
+ struct wti_debug dbg;
+ /*
+ * Represents the real-time thread responsible to
+ * acknowledge the warning-track interrupt and trigger
+ * preliminary and postliminary precautions.
+ */
+ struct task_struct *thread;
+ /*
+ * If pending is true, the real-time thread must be scheduled.
+ * If not, a wake up of that thread will remain a noop.
+ */
+ bool pending;
+};
+
+static DEFINE_PER_CPU(struct wti_state, wti_state);
+
+static debug_info_t *wti_dbg;
+
+/*
+ * During a warning-track grace period, interrupts are disabled
+ * to prevent delays of the warning-track acknowledgment.
+ *
+ * Once the CPU is physically dispatched again, interrupts are
+ * re-enabled.
+ */
+
+static void wti_irq_disable(void)
+{
+ unsigned long flags;
+ struct ctlreg cr6;
+
+ local_irq_save(flags);
+ local_ctl_store(6, &cr6);
+ /* disable all I/O interrupts */
+ cr6.val &= ~0xff000000UL;
+ local_ctl_load(6, &cr6);
+ local_irq_restore(flags);
+}
+
+static void wti_irq_enable(void)
+{
+ unsigned long flags;
+ struct ctlreg cr6;
+
+ local_irq_save(flags);
+ local_ctl_store(6, &cr6);
+ /* enable all I/O interrupts */
+ cr6.val |= 0xff000000UL;
+ local_ctl_load(6, &cr6);
+ local_irq_restore(flags);
+}
+
+static void store_debug_data(struct wti_state *st)
+{
+ struct pt_regs *regs = get_irq_regs();
+
+ st->dbg.pid = current->pid;
+ st->dbg.addr = 0;
+ if (!user_mode(regs))
+ st->dbg.addr = regs->psw.addr;
+}
+
+static void wti_interrupt(struct ext_code ext_code,
+ unsigned int param32, unsigned long param64)
+{
+ struct wti_state *st = this_cpu_ptr(&wti_state);
+
+ inc_irq_stat(IRQEXT_WTI);
+ wti_irq_disable();
+ store_debug_data(st);
+ st->pending = true;
+ wake_up_process(st->thread);
+}
+
+static int wti_pending(unsigned int cpu)
+{
+ struct wti_state *st = per_cpu_ptr(&wti_state, cpu);
+
+ return st->pending;
+}
+
+static void wti_dbf_grace_period(struct wti_state *st)
+{
+ struct wti_debug *wdi = &st->dbg;
+ char buf[WTI_DBF_LEN];
+
+ if (wdi->addr)
+ snprintf(buf, sizeof(buf), "%d %pS", wdi->pid, (void *)wdi->addr);
+ else
+ snprintf(buf, sizeof(buf), "%d <user>", wdi->pid);
+ debug_text_event(wti_dbg, 2, buf);
+ wdi->missed++;
+}
+
+static int wti_show(struct seq_file *seq, void *v)
+{
+ struct wti_state *st;
+ int cpu;
+
+ cpus_read_lock();
+ seq_puts(seq, " ");
+ for_each_online_cpu(cpu)
+ seq_printf(seq, "CPU%-8d", cpu);
+ seq_putc(seq, '\n');
+ for_each_online_cpu(cpu) {
+ st = per_cpu_ptr(&wti_state, cpu);
+ seq_printf(seq, " %10lu", st->dbg.missed);
+ }
+ seq_putc(seq, '\n');
+ cpus_read_unlock();
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wti);
+
+static void wti_thread_fn(unsigned int cpu)
+{
+ struct wti_state *st = per_cpu_ptr(&wti_state, cpu);
+
+ st->pending = false;
+ /*
+ * Yield CPU voluntarily to the hypervisor. Control
+ * resumes when hypervisor decides to dispatch CPU
+ * to this LPAR again.
+ */
+ if (diag49c(DIAG49C_SUBC_ACK))
+ wti_dbf_grace_period(st);
+ wti_irq_enable();
+}
+
+static struct smp_hotplug_thread wti_threads = {
+ .store = &wti_state.thread,
+ .thread_should_run = wti_pending,
+ .thread_fn = wti_thread_fn,
+ .thread_comm = "cpuwti/%u",
+ .selfparking = false,
+};
+
+static int __init wti_init(void)
+{
+ struct sched_param wti_sched_param = { .sched_priority = MAX_RT_PRIO - 1 };
+ struct dentry *wti_dir;
+ struct wti_state *st;
+ int cpu, rc;
+
+ rc = -EOPNOTSUPP;
+ if (!sclp.has_wti)
+ goto out;
+ rc = smpboot_register_percpu_thread(&wti_threads);
+ if (WARN_ON(rc))
+ goto out;
+ for_each_online_cpu(cpu) {
+ st = per_cpu_ptr(&wti_state, cpu);
+ sched_setscheduler(st->thread, SCHED_FIFO, &wti_sched_param);
+ }
+ rc = register_external_irq(EXT_IRQ_WARNING_TRACK, wti_interrupt);
+ if (rc) {
+ pr_warn("Couldn't request external interrupt 0x1007\n");
+ goto out_thread;
+ }
+ irq_subclass_register(IRQ_SUBCLASS_WARNING_TRACK);
+ rc = diag49c(DIAG49C_SUBC_REG);
+ if (rc) {
+ pr_warn("Failed to register warning track interrupt through DIAG 49C\n");
+ rc = -EOPNOTSUPP;
+ goto out_subclass;
+ }
+ wti_dir = debugfs_create_dir("wti", arch_debugfs_dir);
+ debugfs_create_file("stat", 0400, wti_dir, NULL, &wti_fops);
+ wti_dbg = debug_register("wti", 1, 1, WTI_DBF_LEN);
+ if (!wti_dbg) {
+ rc = -ENOMEM;
+ goto out_debug_register;
+ }
+ rc = debug_register_view(wti_dbg, &debug_hex_ascii_view);
+ if (rc)
+ goto out_debug_register;
+ goto out;
+out_debug_register:
+ debug_unregister(wti_dbg);
+out_subclass:
+ irq_subclass_unregister(IRQ_SUBCLASS_WARNING_TRACK);
+ unregister_external_irq(EXT_IRQ_WARNING_TRACK, wti_interrupt);
+out_thread:
+ smpboot_unregister_percpu_thread(&wti_threads);
+out:
+ return rc;
+}
+late_initcall(wti_init);
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 75d15bf41d97..d01724a715d0 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -95,11 +95,12 @@ static long cmm_alloc_pages(long nr, long *counter,
(*counter)++;
spin_unlock(&cmm_lock);
nr--;
+ cond_resched();
}
return nr;
}
-static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
+static long __cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa;
unsigned long addr;
@@ -123,6 +124,21 @@ static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
return nr;
}
+static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
+{
+ long inc = 0;
+
+ while (nr) {
+ inc = min(256L, nr);
+ nr -= inc;
+ inc = __cmm_free_pages(inc, counter, list);
+ if (inc)
+ break;
+ cond_resched();
+ }
+ return nr + inc;
+}
+
static int cmm_oom_notify(struct notifier_block *self,
unsigned long dummy, void *parm)
{
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 0a67fcee4414..fa54f3bc0c8d 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -18,89 +18,12 @@ static unsigned long max_addr;
struct addr_marker {
int is_start;
unsigned long start_address;
+ unsigned long size;
const char *name;
};
-enum address_markers_idx {
- KVA_NR = 0,
- LOWCORE_START_NR,
- LOWCORE_END_NR,
- AMODE31_START_NR,
- AMODE31_END_NR,
- KERNEL_START_NR,
- KERNEL_END_NR,
-#ifdef CONFIG_KFENCE
- KFENCE_START_NR,
- KFENCE_END_NR,
-#endif
- IDENTITY_START_NR,
- IDENTITY_END_NR,
- VMEMMAP_NR,
- VMEMMAP_END_NR,
- VMALLOC_NR,
- VMALLOC_END_NR,
-#ifdef CONFIG_KMSAN
- KMSAN_VMALLOC_SHADOW_START_NR,
- KMSAN_VMALLOC_SHADOW_END_NR,
- KMSAN_VMALLOC_ORIGIN_START_NR,
- KMSAN_VMALLOC_ORIGIN_END_NR,
- KMSAN_MODULES_SHADOW_START_NR,
- KMSAN_MODULES_SHADOW_END_NR,
- KMSAN_MODULES_ORIGIN_START_NR,
- KMSAN_MODULES_ORIGIN_END_NR,
-#endif
- MODULES_NR,
- MODULES_END_NR,
- ABS_LOWCORE_NR,
- ABS_LOWCORE_END_NR,
- MEMCPY_REAL_NR,
- MEMCPY_REAL_END_NR,
-#ifdef CONFIG_KASAN
- KASAN_SHADOW_START_NR,
- KASAN_SHADOW_END_NR,
-#endif
-};
-
-static struct addr_marker address_markers[] = {
- [KVA_NR] = {0, 0, "Kernel Virtual Address Space"},
- [LOWCORE_START_NR] = {1, 0, "Lowcore Start"},
- [LOWCORE_END_NR] = {0, 0, "Lowcore End"},
- [IDENTITY_START_NR] = {1, 0, "Identity Mapping Start"},
- [IDENTITY_END_NR] = {0, 0, "Identity Mapping End"},
- [AMODE31_START_NR] = {1, 0, "Amode31 Area Start"},
- [AMODE31_END_NR] = {0, 0, "Amode31 Area End"},
- [KERNEL_START_NR] = {1, (unsigned long)_stext, "Kernel Image Start"},
- [KERNEL_END_NR] = {0, (unsigned long)_end, "Kernel Image End"},
-#ifdef CONFIG_KFENCE
- [KFENCE_START_NR] = {1, 0, "KFence Pool Start"},
- [KFENCE_END_NR] = {0, 0, "KFence Pool End"},
-#endif
- [VMEMMAP_NR] = {1, 0, "vmemmap Area Start"},
- [VMEMMAP_END_NR] = {0, 0, "vmemmap Area End"},
- [VMALLOC_NR] = {1, 0, "vmalloc Area Start"},
- [VMALLOC_END_NR] = {0, 0, "vmalloc Area End"},
-#ifdef CONFIG_KMSAN
- [KMSAN_VMALLOC_SHADOW_START_NR] = {1, 0, "Kmsan vmalloc Shadow Start"},
- [KMSAN_VMALLOC_SHADOW_END_NR] = {0, 0, "Kmsan vmalloc Shadow End"},
- [KMSAN_VMALLOC_ORIGIN_START_NR] = {1, 0, "Kmsan vmalloc Origins Start"},
- [KMSAN_VMALLOC_ORIGIN_END_NR] = {0, 0, "Kmsan vmalloc Origins End"},
- [KMSAN_MODULES_SHADOW_START_NR] = {1, 0, "Kmsan Modules Shadow Start"},
- [KMSAN_MODULES_SHADOW_END_NR] = {0, 0, "Kmsan Modules Shadow End"},
- [KMSAN_MODULES_ORIGIN_START_NR] = {1, 0, "Kmsan Modules Origins Start"},
- [KMSAN_MODULES_ORIGIN_END_NR] = {0, 0, "Kmsan Modules Origins End"},
-#endif
- [MODULES_NR] = {1, 0, "Modules Area Start"},
- [MODULES_END_NR] = {0, 0, "Modules Area End"},
- [ABS_LOWCORE_NR] = {1, 0, "Lowcore Area Start"},
- [ABS_LOWCORE_END_NR] = {0, 0, "Lowcore Area End"},
- [MEMCPY_REAL_NR] = {1, 0, "Real Memory Copy Area Start"},
- [MEMCPY_REAL_END_NR] = {0, 0, "Real Memory Copy Area End"},
-#ifdef CONFIG_KASAN
- [KASAN_SHADOW_START_NR] = {1, KASAN_SHADOW_START, "Kasan Shadow Start"},
- [KASAN_SHADOW_END_NR] = {0, KASAN_SHADOW_END, "Kasan Shadow End"},
-#endif
- {1, -1UL, NULL}
-};
+static struct addr_marker *markers;
+static unsigned int markers_cnt;
struct pg_state {
struct ptdump_state ptdump;
@@ -173,7 +96,8 @@ static void note_page_update_state(struct pg_state *st, unsigned long addr, unsi
while (addr >= st->marker[1].start_address) {
st->marker++;
- pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(m, "---[ %s %s ]---\n", st->marker->name,
+ st->marker->is_start ? "Start" : "End");
}
st->start_address = addr;
st->current_prot = prot;
@@ -202,7 +126,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
if (level == -1)
addr = max_addr;
if (st->level == -1) {
- pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_puts(m, "---[ Kernel Virtual Address Space ]---\n");
note_page_update_state(st, addr, prot, level);
} else if (prot != st->current_prot || level != st->level ||
addr >= st->marker[1].start_address) {
@@ -276,7 +200,7 @@ static int ptdump_show(struct seq_file *m, void *v)
.check_wx = false,
.wx_pages = 0,
.start_address = 0,
- .marker = address_markers,
+ .marker = markers,
};
get_online_mems();
@@ -299,10 +223,23 @@ static int ptdump_cmp(const void *a, const void *b)
if (ama->start_address < amb->start_address)
return -1;
/*
- * If the start addresses of two markers are identical consider the
- * marker which defines the start of an area higher than the one which
- * defines the end of an area. This keeps pairs of markers sorted.
+ * If the start addresses of two markers are identical sort markers in an
+ * order that considers areas contained within other areas correctly.
*/
+ if (ama->is_start && amb->is_start) {
+ if (ama->size > amb->size)
+ return -1;
+ if (ama->size < amb->size)
+ return 1;
+ return 0;
+ }
+ if (!ama->is_start && !amb->is_start) {
+ if (ama->size > amb->size)
+ return 1;
+ if (ama->size < amb->size)
+ return -1;
+ return 0;
+ }
if (ama->is_start)
return 1;
if (amb->is_start)
@@ -310,12 +247,41 @@ static int ptdump_cmp(const void *a, const void *b)
return 0;
}
+static int add_marker(unsigned long start, unsigned long end, const char *name)
+{
+ size_t oldsize, newsize;
+
+ oldsize = markers_cnt * sizeof(*markers);
+ newsize = oldsize + 2 * sizeof(*markers);
+ if (!oldsize)
+ markers = kvmalloc(newsize, GFP_KERNEL);
+ else
+ markers = kvrealloc(markers, newsize, GFP_KERNEL);
+ if (!markers)
+ goto error;
+ markers[markers_cnt].is_start = 1;
+ markers[markers_cnt].start_address = start;
+ markers[markers_cnt].size = end - start;
+ markers[markers_cnt].name = name;
+ markers_cnt++;
+ markers[markers_cnt].is_start = 0;
+ markers[markers_cnt].start_address = end;
+ markers[markers_cnt].size = end - start;
+ markers[markers_cnt].name = name;
+ markers_cnt++;
+ return 0;
+error:
+ markers_cnt = 0;
+ return -ENOMEM;
+}
+
static int pt_dump_init(void)
{
#ifdef CONFIG_KFENCE
unsigned long kfence_start = (unsigned long)__kfence_pool;
#endif
unsigned long lowcore = (unsigned long)get_lowcore();
+ int rc;
/*
* Figure out the maximum virtual address being accessible with the
@@ -324,41 +290,38 @@ static int pt_dump_init(void)
*/
max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
- address_markers[LOWCORE_START_NR].start_address = lowcore;
- address_markers[LOWCORE_END_NR].start_address = lowcore + sizeof(struct lowcore);
- address_markers[IDENTITY_START_NR].start_address = __identity_base;
- address_markers[IDENTITY_END_NR].start_address = __identity_base + ident_map_size;
- address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
- address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
- address_markers[MODULES_NR].start_address = MODULES_VADDR;
- address_markers[MODULES_END_NR].start_address = MODULES_END;
- address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore;
- address_markers[ABS_LOWCORE_END_NR].start_address = __abs_lowcore + ABS_LOWCORE_MAP_SIZE;
- address_markers[MEMCPY_REAL_NR].start_address = __memcpy_real_area;
- address_markers[MEMCPY_REAL_END_NR].start_address = __memcpy_real_area + MEMCPY_REAL_SIZE;
- address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
- address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size;
- address_markers[VMALLOC_NR].start_address = VMALLOC_START;
- address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
+ /* start + end markers - must be added first */
+ rc = add_marker(0, -1UL, NULL);
+ rc |= add_marker((unsigned long)_stext, (unsigned long)_end, "Kernel Image");
+ rc |= add_marker(lowcore, lowcore + sizeof(struct lowcore), "Lowcore");
+ rc |= add_marker(__identity_base, __identity_base + ident_map_size, "Identity Mapping");
+ rc |= add_marker((unsigned long)__samode31, (unsigned long)__eamode31, "Amode31 Area");
+ rc |= add_marker(MODULES_VADDR, MODULES_END, "Modules Area");
+ rc |= add_marker(__abs_lowcore, __abs_lowcore + ABS_LOWCORE_MAP_SIZE, "Lowcore Area");
+ rc |= add_marker(__memcpy_real_area, __memcpy_real_area + MEMCPY_REAL_SIZE, "Real Memory Copy Area");
+ rc |= add_marker((unsigned long)vmemmap, (unsigned long)vmemmap + vmemmap_size, "vmemmap Area");
+ rc |= add_marker(VMALLOC_START, VMALLOC_END, "vmalloc Area");
#ifdef CONFIG_KFENCE
- address_markers[KFENCE_START_NR].start_address = kfence_start;
- address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE;
+ rc |= add_marker(kfence_start, kfence_start + KFENCE_POOL_SIZE, "KFence Pool");
#endif
#ifdef CONFIG_KMSAN
- address_markers[KMSAN_VMALLOC_SHADOW_START_NR].start_address = KMSAN_VMALLOC_SHADOW_START;
- address_markers[KMSAN_VMALLOC_SHADOW_END_NR].start_address = KMSAN_VMALLOC_SHADOW_END;
- address_markers[KMSAN_VMALLOC_ORIGIN_START_NR].start_address = KMSAN_VMALLOC_ORIGIN_START;
- address_markers[KMSAN_VMALLOC_ORIGIN_END_NR].start_address = KMSAN_VMALLOC_ORIGIN_END;
- address_markers[KMSAN_MODULES_SHADOW_START_NR].start_address = KMSAN_MODULES_SHADOW_START;
- address_markers[KMSAN_MODULES_SHADOW_END_NR].start_address = KMSAN_MODULES_SHADOW_END;
- address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START;
- address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END;
+ rc |= add_marker(KMSAN_VMALLOC_SHADOW_START, KMSAN_VMALLOC_SHADOW_END, "Kmsan vmalloc Shadow");
+ rc |= add_marker(KMSAN_VMALLOC_ORIGIN_START, KMSAN_VMALLOC_ORIGIN_END, "Kmsan vmalloc Origins");
+ rc |= add_marker(KMSAN_MODULES_SHADOW_START, KMSAN_MODULES_SHADOW_END, "Kmsan Modules Shadow");
+ rc |= add_marker(KMSAN_MODULES_ORIGIN_START, KMSAN_MODULES_ORIGIN_END, "Kmsan Modules Origins");
+#endif
+#ifdef CONFIG_KASAN
+ rc |= add_marker(KASAN_SHADOW_START, KASAN_SHADOW_END, "Kasan Shadow");
#endif
- sort(address_markers, ARRAY_SIZE(address_markers) - 1,
- sizeof(address_markers[0]), ptdump_cmp, NULL);
+ if (rc)
+ goto error;
+ sort(&markers[1], markers_cnt - 1, sizeof(*markers), ptdump_cmp, NULL);
#ifdef CONFIG_PTDUMP_DEBUGFS
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
#endif /* CONFIG_PTDUMP_DEBUGFS */
return 0;
+error:
+ kvfree(markers);
+ return -ENOMEM;
}
device_initcall(pt_dump_init);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 8e149ef5e89b..ad8b0d6b77ea 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -34,6 +34,7 @@
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <linux/kfence.h>
+#include <linux/pagewalk.h>
#include <asm/asm-extable.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
@@ -492,9 +493,9 @@ void do_secure_storage_access(struct pt_regs *regs)
union teid teid = { .val = regs->int_parm_long };
unsigned long addr = get_fault_address(regs);
struct vm_area_struct *vma;
+ struct folio_walk fw;
struct mm_struct *mm;
struct folio *folio;
- struct page *page;
struct gmap *gmap;
int rc;
@@ -536,15 +537,18 @@ void do_secure_storage_access(struct pt_regs *regs)
vma = find_vma(mm, addr);
if (!vma)
return handle_fault_error(regs, SEGV_MAPERR);
- page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
- if (IS_ERR_OR_NULL(page)) {
+ folio = folio_walk_start(&fw, vma, addr, 0);
+ if (!folio) {
mmap_read_unlock(mm);
break;
}
- folio = page_folio(page);
- if (arch_make_folio_accessible(folio))
- send_sig(SIGSEGV, current, 0);
+ /* arch_make_folio_accessible() needs a raised refcount. */
+ folio_get(folio);
+ rc = arch_make_folio_accessible(folio);
folio_put(folio);
+ folio_walk_end(&fw, vma);
+ if (rc)
+ send_sig(SIGSEGV, current, 0);
mmap_read_unlock(mm);
break;
case KERNEL_FAULT:
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index e3d258f9e726..7a96623a9d2e 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(zero_page_mask);
static void __init setup_zero_pages(void)
{
- unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size());
+ unsigned long total_pages = memblock_estimated_nr_free_pages();
unsigned int order;
struct page *page;
int i;
@@ -97,7 +97,7 @@ void __init paging_init(void)
vmem_map_init();
sparse_init();
- zone_dma_bits = 31;
+ zone_dma_limit = DMA_BIT_MASK(31);
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 206756946589..96efa061ce01 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -82,7 +82,7 @@ static int get_align_mask(struct file *filp, unsigned long flags)
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -117,7 +117,7 @@ check_asce_limit:
unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index 0547a10406e7..2c21f0394c9a 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -3,7 +3,8 @@
# Makefile for the s390 PCI subsystem.
#
-obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_clp.o pci_sysfs.o \
+obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_clp.o \
pci_event.o pci_debug.o pci_insn.o pci_mmio.o \
pci_bus.o pci_kvm_hook.o
obj-$(CONFIG_PCI_IOV) += pci_iov.o
+obj-$(CONFIG_SYSFS) += pci_sysfs.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index cff4838fad21..bd9624c20b80 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -587,7 +587,6 @@ int pcibios_device_add(struct pci_dev *pdev)
if (pdev->is_physfn)
pdev->no_vf_scan = 1;
- pdev->dev.groups = zpci_attr_groups;
zpci_map_resources(pdev);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 5398729bfe1b..de5c0b389a3e 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -118,12 +118,11 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
const void __user *, user_buffer, size_t, length)
{
+ struct follow_pfnmap_args args = { };
u8 local_buf[64];
void __iomem *io_addr;
void *buf;
struct vm_area_struct *vma;
- pte_t *ptep;
- spinlock_t *ptl;
long ret;
if (!zpci_is_enabled())
@@ -169,11 +168,13 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock_mmap;
- ret = follow_pte(vma, mmio_addr, &ptep, &ptl);
+ args.address = mmio_addr;
+ args.vma = vma;
+ ret = follow_pfnmap_start(&args);
if (ret)
goto out_unlock_mmap;
- io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
+ io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
(mmio_addr & ~PAGE_MASK));
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
@@ -181,7 +182,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
ret = zpci_memcpy_toio(io_addr, buf, length);
out_unlock_pt:
- pte_unmap_unlock(ptep, ptl);
+ follow_pfnmap_end(&args);
out_unlock_mmap:
mmap_read_unlock(current->mm);
out_free:
@@ -260,12 +261,11 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
void __user *, user_buffer, size_t, length)
{
+ struct follow_pfnmap_args args = { };
u8 local_buf[64];
void __iomem *io_addr;
void *buf;
struct vm_area_struct *vma;
- pte_t *ptep;
- spinlock_t *ptl;
long ret;
if (!zpci_is_enabled())
@@ -308,11 +308,13 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock_mmap;
- ret = follow_pte(vma, mmio_addr, &ptep, &ptl);
+ args.vma = vma;
+ args.address = mmio_addr;
+ ret = follow_pfnmap_start(&args);
if (ret)
goto out_unlock_mmap;
- io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
+ io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
(mmio_addr & ~PAGE_MASK));
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
@@ -322,7 +324,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
ret = zpci_memcpy_fromio(buf, io_addr, length);
out_unlock_pt:
- pte_unmap_unlock(ptep, ptl);
+ follow_pfnmap_end(&args);
out_unlock_mmap:
mmap_read_unlock(current->mm);
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index 0f4f1e8fc480..1f81f6ff7b95 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -197,7 +197,7 @@ static struct attribute *zpci_ident_attrs[] = {
NULL,
};
-static struct attribute_group zpci_ident_attr_group = {
+const struct attribute_group zpci_ident_attr_group = {
.attrs = zpci_ident_attrs,
.is_visible = zpci_index_is_visible,
};
@@ -223,7 +223,7 @@ static struct attribute *zpci_dev_attrs[] = {
NULL,
};
-static struct attribute_group zpci_attr_group = {
+const struct attribute_group zpci_attr_group = {
.attrs = zpci_dev_attrs,
.bin_attrs = zpci_bin_attrs,
};
@@ -235,14 +235,8 @@ static struct attribute *pfip_attrs[] = {
&dev_attr_segment3.attr,
NULL,
};
-static struct attribute_group pfip_attr_group = {
+
+const struct attribute_group pfip_attr_group = {
.name = "pfip",
.attrs = pfip_attrs,
};
-
-const struct attribute_group *zpci_attr_groups[] = {
- &zpci_attr_group,
- &pfip_attr_group,
- &zpci_ident_attr_group,
- NULL,
-};
diff --git a/arch/s390/tools/opcodes.txt b/arch/s390/tools/opcodes.txt
index 5f008e794898..def2659f6602 100644
--- a/arch/s390/tools/opcodes.txt
+++ b/arch/s390/tools/opcodes.txt
@@ -527,9 +527,9 @@ b938 sortl RRE_RR
b939 dfltcc RRF_R0RR2
b93a kdsa RRE_RR
b93b nnpa RRE_00
-b93c ppno RRE_RR
-b93e kimd RRE_RR
-b93f klmd RRE_RR
+b93c prno RRE_RR
+b93e kimd RRF_U0RR
+b93f klmd RRF_U0RR
b941 cfdtr RRF_UURF
b942 clgdtr RRF_UURF
b943 clfdtr RRF_UURF
@@ -549,6 +549,10 @@ b964 nngrk RRF_R0RR2
b965 ocgrk RRF_R0RR2
b966 nogrk RRF_R0RR2
b967 nxgrk RRF_R0RR2
+b968 clzg RRE_RR
+b969 ctzg RRE_RR
+b96c bextg RRF_R0RR2
+b96d bdepg RRF_R0RR2
b972 crt RRF_U0RR
b973 clrt RRF_U0RR
b974 nnrk RRF_R0RR2
@@ -796,6 +800,16 @@ e35b sy RXY_RRRD
e35c mfy RXY_RRRD
e35e aly RXY_RRRD
e35f sly RXY_RRRD
+e360 lxab RXY_RRRD
+e361 llxab RXY_RRRD
+e362 lxah RXY_RRRD
+e363 llxah RXY_RRRD
+e364 lxaf RXY_RRRD
+e365 llxaf RXY_RRRD
+e366 lxag RXY_RRRD
+e367 llxag RXY_RRRD
+e368 lxaq RXY_RRRD
+e369 llxaq RXY_RRRD
e370 sthy RXY_RRRD
e371 lay RXY_RRRD
e372 stcy RXY_RRRD
@@ -880,6 +894,8 @@ e63c vupkz VSI_URDV
e63d vstrl VSI_URDV
e63f vstrlr VRS_RRDV
e649 vlip VRI_V0UU2
+e64a vcvdq VRI_VV0UU
+e64e vcvbq VRR_VV0U2
e650 vcvb VRR_RV0UU
e651 vclzdp VRR_VV0U2
e652 vcvbg VRR_RV0UU
@@ -893,7 +909,7 @@ e65b vpsop VRI_VVUUU2
e65c vupkzl VRR_VV0U2
e65d vcfn VRR_VV0UU2
e65e vclfnl VRR_VV0UU2
-e65f vtp VRR_0V
+e65f vtp VRR_0V0U
e670 vpkzr VRI_VVV0UU2
e671 vap VRI_VVV0UU2
e672 vsrpr VRI_VVV0UU2
@@ -908,6 +924,7 @@ e67b vrp VRI_VVV0UU2
e67c vscshp VRR_VVV
e67d vcsph VRR_VVV0U0
e67e vsdp VRI_VVV0UU2
+e67f vtz VRR_0VVU
e700 vleb VRX_VRRDU
e701 vleh VRX_VRRDU
e702 vleg VRX_VRRDU
@@ -948,6 +965,7 @@ e74d vrep VRI_VVUU
e750 vpopct VRR_VV0U
e752 vctz VRR_VV0U
e753 vclz VRR_VV0U
+e754 vgem VRR_VV0U
e756 vlr VRX_VV
e75c vistr VRR_VV0U0U
e75f vseg VRR_VV0U
@@ -985,6 +1003,8 @@ e784 vpdi VRR_VVV0U
e785 vbperm VRR_VVV
e786 vsld VRI_VVV0U
e787 vsrd VRI_VVV0U
+e788 veval VRI_VVV0UV
+e789 vblend VRR_VVVU0V
e78a vstrc VRR_VVVUU0V
e78b vstrs VRR_VVVUU0V
e78c vperm VRR_VVV0V
@@ -1010,6 +1030,10 @@ e7ac vmale VRR_VVVU0V
e7ad vmalo VRR_VVVU0V
e7ae vmae VRR_VVVU0V
e7af vmao VRR_VVVU0V
+e7b0 vdl VRR_VVV0UU
+e7b1 vrl VRR_VVV0UU
+e7b2 vd VRR_VVV0UU
+e7b3 vr VRR_VVV0UU
e7b4 vgfm VRR_VVV0U
e7b8 vmsl VRR_VVVUU0V
e7b9 vaccc VRR_VVVU0V
@@ -1017,12 +1041,12 @@ e7bb vac VRR_VVVU0V
e7bc vgfma VRR_VVVU0V
e7bd vsbcbi VRR_VVVU0V
e7bf vsbi VRR_VVVU0V
-e7c0 vclgd VRR_VV0UUU
-e7c1 vcdlg VRR_VV0UUU
-e7c2 vcgd VRR_VV0UUU
-e7c3 vcdg VRR_VV0UUU
-e7c4 vlde VRR_VV0UU2
-e7c5 vled VRR_VV0UUU
+e7c0 vclfp VRR_VV0UUU
+e7c1 vcfpl VRR_VV0UUU
+e7c2 vcsfp VRR_VV0UUU
+e7c3 vcfps VRR_VV0UUU
+e7c4 vfll VRR_VV0UU2
+e7c5 vflr VRR_VV0UUU
e7c7 vfi VRR_VV0UUU
e7ca wfk VRR_VV0UU2
e7cb wfc VRR_VV0UU2
@@ -1094,9 +1118,9 @@ eb54 niy SIY_URD
eb55 cliy SIY_URD
eb56 oiy SIY_URD
eb57 xiy SIY_URD
-eb60 lric RSY_RDRU
-eb61 stric RSY_RDRU
-eb62 mric RSY_RDRU
+eb60 lric RSY_RURD2
+eb61 stric RSY_RURD2
+eb62 mric RSY_RURD2
eb6a asi SIY_IRD
eb6e alsi SIY_IRD
eb71 lpswey SIY_RD
@@ -1104,7 +1128,7 @@ eb7a agsi SIY_IRD
eb7e algsi SIY_IRD
eb80 icmh RSY_RURD
eb81 icmy RSY_RURD
-eb8a sqbs RSY_RDRU
+eb8a sqbs RSY_RURD2
eb8e mvclu RSY_RRRD
eb8f clclu RSY_RRRD
eb90 stmy RSY_RRRD
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 1aa3c4a0c5b2..e9103998cca9 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -14,6 +14,7 @@ config SUPERH
select ARCH_HIBERNATION_POSSIBLE if MMU
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_NEED_CMPXCHG_1_EMU
select CPU_NO_EFFICIENT_FFS
select DMA_DECLARE_COHERENT
select GENERIC_ATOMIC64
diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h
index 5d617b3ef78f..1e5dc5ccf7bf 100644
--- a/arch/sh/include/asm/cmpxchg.h
+++ b/arch/sh/include/asm/cmpxchg.h
@@ -9,6 +9,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <linux/cmpxchg-emu.h>
#if defined(CONFIG_GUSA_RB)
#include <asm/cmpxchg-grb.h>
@@ -56,6 +57,8 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
unsigned long new, int size)
{
switch (size) {
+ case 1:
+ return cmpxchg_emu_u8(ptr, old, new);
case 4:
return __cmpxchg_u32(ptr, old, new);
}
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h
index 7b8dead2723d..63f88b465e39 100644
--- a/arch/sh/include/asm/mmzone.h
+++ b/arch/sh/include/asm/mmzone.h
@@ -5,9 +5,6 @@
#ifdef CONFIG_NUMA
#include <linux/numa.h>
-extern struct pglist_data *node_data[];
-#define NODE_DATA(nid) (node_data[nid])
-
static inline int pfn_to_nid(unsigned long pfn)
{
int nid;
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 1bd85a6949c4..add35c51e017 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -36,6 +36,10 @@ __setup("vdso=", vdso_setup);
*/
extern const char vsyscall_trapa_start, vsyscall_trapa_end;
static struct page *syscall_pages[1];
+static struct vm_special_mapping vdso_mapping = {
+ .name = "[vdso]",
+ .pages = syscall_pages,
+};
int __init vsyscall_init(void)
{
@@ -58,6 +62,7 @@ int __init vsyscall_init(void)
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
unsigned long addr;
int ret;
@@ -70,14 +75,17 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto up_fail;
}
- ret = install_special_mapping(mm, addr, PAGE_SIZE,
+ vdso_mapping.pages = syscall_pages;
+ vma = _install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
- syscall_pages);
- if (unlikely(ret))
+ &vdso_mapping);
+ ret = PTR_ERR(vma);
+ if (IS_ERR(vma))
goto up_fail;
current->mm->context.vdso = (void *)addr;
+ ret = 0;
up_fail:
mmap_write_unlock(mm);
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index d1fe90b2f5ff..2a88b0c9e70f 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -212,12 +212,7 @@ void __init allocate_pgdat(unsigned int nid)
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
#ifdef CONFIG_NUMA
- NODE_DATA(nid) = memblock_alloc_try_nid(
- sizeof(struct pglist_data),
- SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
- MEMBLOCK_ALLOC_ACCESSIBLE, nid);
- if (!NODE_DATA(nid))
- panic("Can't allocate pgdat for node %d\n", nid);
+ alloc_node_data(nid);
#endif
NODE_DATA(nid)->node_start_pfn = start_pfn;
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index bee329d4149a..c442734d9b0c 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -52,7 +52,8 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr,
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -99,7 +100,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
+ const unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index 50f0dc1744d0..9bc212b5e762 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -14,9 +14,6 @@
#include <linux/pfn.h>
#include <asm/sections.h>
-struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL_GPL(node_data);
-
/*
* On SH machines the conventional approach is to stash system RAM
* in node 0, and other memory blocks in to node 1 and up, ordered by
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 11bf9d312318..dcfdb7f1dae9 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -14,9 +14,9 @@ config SPARC
bool
default y
select ARCH_HAS_CPU_CACHE_ALIASING
+ select ARCH_HAS_DMA_OPS
select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI
select ARCH_MIGHT_HAVE_PC_SERIO
- select DMA_OPS
select OF
select OF_PROMTREE
select HAVE_ASM_MODVERSIONS
diff --git a/arch/sparc/include/asm/mmzone.h b/arch/sparc/include/asm/mmzone.h
index a236d8aa893a..74eb2c71d077 100644
--- a/arch/sparc/include/asm/mmzone.h
+++ b/arch/sparc/include/asm/mmzone.h
@@ -6,10 +6,6 @@
#include <linux/cpumask.h>
-extern struct pglist_data *node_data[];
-
-#define NODE_DATA(nid) (node_data[nid])
-
extern int numa_cpu_lookup_table[];
extern cpumask_t numa_cpumask_lookup_table[];
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 3fe429d73a65..2b7f358762c1 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -783,6 +783,7 @@ static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
return __pmd(pte_val(pte));
}
+#define pmd_pgprot pmd_pgprot
static inline pgprot_t pmd_pgprot(pmd_t entry)
{
unsigned long val = pmd_val(entry);
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 682da3714686..57084ed2f3c4 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -133,6 +133,12 @@
#define SO_PASSPIDFD 0x0055
#define SO_PEERPIDFD 0x0056
+#define SO_DEVMEM_LINEAR 0x0057
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 0x0058
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 0x0059
+
#if !defined(__KERNEL__)
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 08a19727795c..80822f922e76 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -39,7 +39,7 @@ SYSCALL_DEFINE0(getpagesize)
return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
}
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
{
struct vm_unmapped_area_info info = {};
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index d9c3b34ca744..acade309dc2f 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -87,7 +87,7 @@ static inline unsigned long COLOR_ALIGN(unsigned long addr,
return base + off;
}
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
@@ -146,7 +146,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
+ const unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 53d7cb5bbffe..21f8cbbd0581 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1075,14 +1075,9 @@ static void __init allocate_node_data(int nid)
{
struct pglist_data *p;
unsigned long start_pfn, end_pfn;
-#ifdef CONFIG_NUMA
- NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
- SMP_CACHE_BYTES, nid);
- if (!NODE_DATA(nid)) {
- prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
- prom_halt();
- }
+#ifdef CONFIG_NUMA
+ alloc_node_data(nid);
NODE_DATA(nid)->node_id = nid;
#endif
@@ -1115,11 +1110,9 @@ static void init_node_masks_nonnuma(void)
}
#ifdef CONFIG_NUMA
-struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
EXPORT_SYMBOL(numa_cpumask_lookup_table);
-EXPORT_SYMBOL(node_data);
static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
u32 cfg_handle)
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index ec61ff1f96b7..1dc9b3d70eda 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -39,12 +39,10 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
unsigned int ctxtbl;
unsigned int pgd, pmd, ped;
unsigned int ptr;
- unsigned int lvl, pte, paddrbase;
+ unsigned int lvl, pte;
unsigned int ctx;
unsigned int paddr_calc;
- paddrbase = 0;
-
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: trace on\n");
@@ -73,7 +71,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: pgd is entry level 3\n");
lvl = 3;
pte = pgd;
- paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -96,7 +93,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: pmd is entry level 2\n");
lvl = 2;
pte = pmd;
- paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -124,7 +120,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: ped is entry level 1\n");
lvl = 1;
pte = ped;
- paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -147,7 +142,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: ptr is entry level 0\n");
lvl = 0;
pte = ptr;
- paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (srmmu_swprobe_trace)
diff --git a/arch/um/kernel/kmsg_dump.c b/arch/um/kernel/kmsg_dump.c
index 4382cf02a6d1..419021175272 100644
--- a/arch/um/kernel/kmsg_dump.c
+++ b/arch/um/kernel/kmsg_dump.c
@@ -8,7 +8,7 @@
#include <os.h>
static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
static struct kmsg_dump_iter iter;
static DEFINE_SPINLOCK(lock);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 007bab9f2a0e..2852fcd82cbd 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -28,6 +28,7 @@ config X86_64
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_PER_VMA_LOCK
+ select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
select HAVE_ARCH_SOFT_DIRTY
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE
@@ -79,6 +80,7 @@ config X86
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE
select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_DMA_OPS if GART_IOMMU || XEN
select ARCH_HAS_EARLY_DEBUG if KGDB
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
@@ -107,6 +109,7 @@ config X86
select ARCH_HAS_DEBUG_WX
select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ select ARCH_HAVE_EXTRA_ELF_NOTES
select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_MIGHT_HAVE_PC_PARPORT
@@ -122,6 +125,7 @@ config X86
select ARCH_USES_CFI_TRAPS if X86_64 && CFI_CLANG
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN
+ select ARCH_SUPPORTS_RT
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if X86_CMPXCHG64
select ARCH_USE_MEMTEST
@@ -296,6 +300,7 @@ config X86
select NEED_PER_CPU_EMBED_FIRST_CHUNK
select NEED_PER_CPU_PAGE_FIRST_CHUNK
select NEED_SG_DMA_LENGTH
+ select NUMA_MEMBLKS if NUMA
select PCI_DOMAINS if PCI
select PCI_LOCKLESS_CONFIG if PCI
select PERF_EVENTS
@@ -943,7 +948,6 @@ config DMI
config GART_IOMMU
bool "Old AMD GART IOMMU support"
- select DMA_OPS
select IOMMU_HELPER
select SWIOTLB
depends on X86_64 && PCI && AMD_NB
@@ -1599,14 +1603,6 @@ config X86_64_ACPI_NUMA
help
Enable ACPI SRAT based node topology detection.
-config NUMA_EMU
- bool "NUMA emulation"
- depends on NUMA
- help
- Enable NUMA emulation. A flat machine will be split
- into virtual nodes when booted with "numa=fake=N", where N is the
- number of nodes. This is only useful for debugging.
-
config NODES_SHIFT
int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP
range 1 10
@@ -1806,6 +1802,7 @@ config X86_PAT
def_bool y
prompt "x86 PAT support" if EXPERT
depends on MTRR
+ select ARCH_USES_PG_ARCH_2
help
Use PAT attributes to setup page level cache control.
@@ -1817,10 +1814,6 @@ config X86_PAT
If unsure, say Y.
-config ARCH_USES_PG_UNCACHED
- def_bool y
- depends on X86_PAT
-
config X86_UMIP
def_bool y
prompt "User Mode Instruction Prevention" if EXPERT
@@ -1889,6 +1882,10 @@ config X86_INTEL_MEMORY_PROTECTION_KEYS
If unsure, say y.
+config ARCH_PKEY_BITS
+ int
+ default 4
+
choice
prompt "TSX enable mode"
depends on CPU_SUP_INTEL
@@ -2610,24 +2607,15 @@ config MITIGATION_SLS
against straight line speculation. The kernel image might be slightly
larger.
-config MITIGATION_GDS_FORCE
- bool "Force GDS Mitigation"
+config MITIGATION_GDS
+ bool "Mitigate Gather Data Sampling"
depends on CPU_SUP_INTEL
- default n
+ default y
help
- Gather Data Sampling (GDS) is a hardware vulnerability which allows
- unprivileged speculative access to data which was previously stored in
- vector registers.
-
- This option is equivalent to setting gather_data_sampling=force on the
- command line. The microcode mitigation is used if present, otherwise
- AVX is disabled as a mitigation. On affected systems that are missing
- the microcode any userspace code that unconditionally uses AVX will
- break with this option set.
-
- Setting this option on systems not vulnerable to GDS has no effect.
-
- If in doubt, say N.
+ Enable mitigation for Gather Data Sampling (GDS). GDS is a hardware
+ vulnerability which allows unprivileged speculative access to data
+ which was previously stored in vector registers. The attacker uses gather
+ instructions to infer the stale vector register data.
config MITIGATION_RFDS
bool "RFDS Mitigation"
@@ -2650,6 +2638,107 @@ config MITIGATION_SPECTRE_BHI
indirect branches.
See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
+config MITIGATION_MDS
+ bool "Mitigate Microarchitectural Data Sampling (MDS) hardware bug"
+ depends on CPU_SUP_INTEL
+ default y
+ help
+ Enable mitigation for Microarchitectural Data Sampling (MDS). MDS is
+ a hardware vulnerability which allows unprivileged speculative access
+ to data which is available in various CPU internal buffers.
+ See also <file:Documentation/admin-guide/hw-vuln/mds.rst>
+
+config MITIGATION_TAA
+ bool "Mitigate TSX Asynchronous Abort (TAA) hardware bug"
+ depends on CPU_SUP_INTEL
+ default y
+ help
+ Enable mitigation for TSX Asynchronous Abort (TAA). TAA is a hardware
+ vulnerability that allows unprivileged speculative access to data
+ which is available in various CPU internal buffers by using
+ asynchronous aborts within an Intel TSX transactional region.
+ See also <file:Documentation/admin-guide/hw-vuln/tsx_async_abort.rst>
+
+config MITIGATION_MMIO_STALE_DATA
+ bool "Mitigate MMIO Stale Data hardware bug"
+ depends on CPU_SUP_INTEL
+ default y
+ help
+ Enable mitigation for MMIO Stale Data hardware bugs. Processor MMIO
+ Stale Data Vulnerabilities are a class of memory-mapped I/O (MMIO)
+ vulnerabilities that can expose data. The vulnerabilities require the
+ attacker to have access to MMIO.
+ See also
+ <file:Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst>
+
+config MITIGATION_L1TF
+ bool "Mitigate L1 Terminal Fault (L1TF) hardware bug"
+ depends on CPU_SUP_INTEL
+ default y
+ help
+ Mitigate L1 Terminal Fault (L1TF) hardware bug. L1 Terminal Fault is a
+ hardware vulnerability which allows unprivileged speculative access to data
+ available in the Level 1 Data Cache.
+ See <file:Documentation/admin-guide/hw-vuln/l1tf.rst
+
+config MITIGATION_RETBLEED
+ bool "Mitigate RETBleed hardware bug"
+ depends on (CPU_SUP_INTEL && MITIGATION_SPECTRE_V2) || MITIGATION_UNRET_ENTRY || MITIGATION_IBPB_ENTRY
+ default y
+ help
+ Enable mitigation for RETBleed (Arbitrary Speculative Code Execution
+ with Return Instructions) vulnerability. RETBleed is a speculative
+ execution attack which takes advantage of microarchitectural behavior
+ in many modern microprocessors, similar to Spectre v2. An
+ unprivileged attacker can use these flaws to bypass conventional
+ memory security restrictions to gain read access to privileged memory
+ that would otherwise be inaccessible.
+
+config MITIGATION_SPECTRE_V1
+ bool "Mitigate SPECTRE V1 hardware bug"
+ default y
+ help
+ Enable mitigation for Spectre V1 (Bounds Check Bypass). Spectre V1 is a
+ class of side channel attacks that takes advantage of speculative
+ execution that bypasses conditional branch instructions used for
+ memory access bounds check.
+ See also <file:Documentation/admin-guide/hw-vuln/spectre.rst>
+
+config MITIGATION_SPECTRE_V2
+ bool "Mitigate SPECTRE V2 hardware bug"
+ default y
+ help
+ Enable mitigation for Spectre V2 (Branch Target Injection). Spectre
+ V2 is a class of side channel attacks that takes advantage of
+ indirect branch predictors inside the processor. In Spectre variant 2
+ attacks, the attacker can steer speculative indirect branches in the
+ victim to gadget code by poisoning the branch target buffer of a CPU
+ used for predicting indirect branch addresses.
+ See also <file:Documentation/admin-guide/hw-vuln/spectre.rst>
+
+config MITIGATION_SRBDS
+ bool "Mitigate Special Register Buffer Data Sampling (SRBDS) hardware bug"
+ depends on CPU_SUP_INTEL
+ default y
+ help
+ Enable mitigation for Special Register Buffer Data Sampling (SRBDS).
+ SRBDS is a hardware vulnerability that allows Microarchitectural Data
+ Sampling (MDS) techniques to infer values returned from special
+ register accesses. An unprivileged user can extract values returned
+ from RDRAND and RDSEED executed on another core or sibling thread
+ using MDS techniques.
+ See also
+ <file:Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst>
+
+config MITIGATION_SSB
+ bool "Mitigate Speculative Store Bypass (SSB) hardware bug"
+ default y
+ help
+ Enable mitigation for Speculative Store Bypass (SSB). SSB is a
+ hardware security vulnerability and its exploitation takes advantage
+ of speculative execution in a similar way to the Meltdown and Spectre
+ security vulnerabilities.
+
endif
config ARCH_HAS_ADD_PAGES
@@ -2979,9 +3068,13 @@ config OLPC_XO15_SCI
- AC adapter status updates
- Battery status updates
+config GEODE_COMMON
+ bool
+
config ALIX
bool "PCEngines ALIX System Support (LED setup)"
select GPIOLIB
+ select GEODE_COMMON
help
This option enables system support for the PCEngines ALIX.
At present this just sets up LEDs for GPIO control on
@@ -2996,12 +3089,14 @@ config ALIX
config NET5501
bool "Soekris Engineering net5501 System Support (LEDS, GPIO, etc)"
select GPIOLIB
+ select GEODE_COMMON
help
This option enables system support for the Soekris Engineering net5501.
config GEOS
bool "Traverse Technologies GEOS System Support (LEDS, GPIO, etc)"
select GPIOLIB
+ select GEODE_COMMON
depends on DMI
help
This option enables system support for the Traverse Technologies GEOS.
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 801fd85c3ef6..cd75e78a06c1 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -24,11 +24,15 @@ RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix)
ifdef CONFIG_MITIGATION_RETHUNK
RETHUNK_CFLAGS := -mfunction-return=thunk-extern
+RETHUNK_RUSTFLAGS := -Zfunction-return=thunk-extern
RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS)
+RETPOLINE_RUSTFLAGS += $(RETHUNK_RUSTFLAGS)
endif
export RETHUNK_CFLAGS
+export RETHUNK_RUSTFLAGS
export RETPOLINE_CFLAGS
+export RETPOLINE_RUSTFLAGS
export RETPOLINE_VDSO_CFLAGS
# For gcc stack alignment is specified with -mpreferred-stack-boundary,
@@ -218,9 +222,10 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# Avoid indirect branches in kernel to deal with Spectre
ifdef CONFIG_MITIGATION_RETPOLINE
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
+ KBUILD_RUSTFLAGS += $(RETPOLINE_RUSTFLAGS)
# Additionally, avoid generating expensive indirect jumps which
# are subject to retpolines for small number of switch cases.
- # clang turns off jump table generation by default when under
+ # LLVM turns off jump table generation by default when under
# retpoline builds, however, gcc does not for x86. This has
# only been fixed starting from gcc stable version 8.4.0 and
# onwards, but not for older ones. See gcc bug #86952.
@@ -237,6 +242,10 @@ ifdef CONFIG_CALL_PADDING
PADDING_CFLAGS := -fpatchable-function-entry=$(CONFIG_FUNCTION_PADDING_BYTES),$(CONFIG_FUNCTION_PADDING_BYTES)
KBUILD_CFLAGS += $(PADDING_CFLAGS)
export PADDING_CFLAGS
+
+PADDING_RUSTFLAGS := -Zpatchable-function-entry=$(CONFIG_FUNCTION_PADDING_BYTES),$(CONFIG_FUNCTION_PADDING_BYTES)
+KBUILD_RUSTFLAGS += $(PADDING_RUSTFLAGS)
+export PADDING_RUSTFLAGS
endif
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 944454306ef4..04a35b2c26e9 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -511,7 +511,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output)
if (init_unaccepted_memory()) {
debug_putstr("Accepting memory... ");
- accept_memory(__pa(output), __pa(output) + needed_size);
+ accept_memory(__pa(output), needed_size);
}
entry_offset = decompress_kernel(output, virt_addr, error);
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index b353a7be380c..dd8d1a85f671 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -256,6 +256,6 @@ static inline bool init_unaccepted_memory(void) { return false; }
/* Defined in EFI stub */
extern struct efi_unaccepted_memory *unaccepted_table;
-void accept_memory(phys_addr_t start, phys_addr_t end);
+void accept_memory(phys_addr_t start, unsigned long size);
#endif /* BOOT_COMPRESSED_MISC_H */
diff --git a/arch/x86/configs/tiny.config b/arch/x86/configs/tiny.config
index be3ee4294903..aabafa3faa6d 100644
--- a/arch/x86/configs/tiny.config
+++ b/arch/x86/configs/tiny.config
@@ -1,6 +1,2 @@
CONFIG_NOHIGHMEM=y
-# CONFIG_HIGHMEM4G is not set
-# CONFIG_HIGHMEM64G is not set
-# CONFIG_UNWINDER_ORC is not set
CONFIG_UNWINDER_GUESS=y
-# CONFIG_UNWINDER_FRAME_POINTER is not set
diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig
index 24875e6295f2..7b1bebed879d 100644
--- a/arch/x86/crypto/Kconfig
+++ b/arch/x86/crypto/Kconfig
@@ -14,7 +14,7 @@ config CRYPTO_CURVE25519_X86
- ADX (large integer arithmetic)
config CRYPTO_AES_NI_INTEL
- tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTR, XTS, GCM (AES-NI)"
+ tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XCTR, XTS, GCM (AES-NI/VAES)"
depends on X86
select CRYPTO_AEAD
select CRYPTO_LIB_AES
@@ -25,10 +25,14 @@ config CRYPTO_AES_NI_INTEL
help
Block cipher: AES cipher algorithms
AEAD cipher: AES with GCM
- Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XTR, XTS
+ Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XCTR, XTS
Architecture: x86 (32-bit and 64-bit) using:
- AES-NI (AES new instructions)
+ - VAES (Vector AES)
+
+ Some algorithm implementations are supported only in 64-bit builds,
+ and some have additional prerequisites such as AVX2 or AVX512.
config CRYPTO_BLOWFISH_X86_64
tristate "Ciphers: Blowfish, modes: ECB, CBC"
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index cd37de5ec404..b0dd83555499 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1366,6 +1366,8 @@ gcm_crypt(struct aead_request *req, int flags)
err = skcipher_walk_aead_encrypt(&walk, req, false);
else
err = skcipher_walk_aead_decrypt(&walk, req, false);
+ if (err)
+ return err;
/*
* Since the AES-GCM assembly code requires that at least three assembly
@@ -1381,37 +1383,31 @@ gcm_crypt(struct aead_request *req, int flags)
gcm_process_assoc(key, ghash_acc, req->src, assoclen, flags);
/* En/decrypt the data and pass the ciphertext through GHASH. */
- while ((nbytes = walk.nbytes) != 0) {
- if (unlikely(nbytes < walk.total)) {
- /*
- * Non-last segment. In this case, the assembly
- * function requires that the length be a multiple of 16
- * (AES_BLOCK_SIZE) bytes. The needed buffering of up
- * to 16 bytes is handled by the skcipher_walk. Here we
- * just need to round down to a multiple of 16.
- */
- nbytes = round_down(nbytes, AES_BLOCK_SIZE);
- aes_gcm_update(key, le_ctr, ghash_acc,
- walk.src.virt.addr, walk.dst.virt.addr,
- nbytes, flags);
- le_ctr[0] += nbytes / AES_BLOCK_SIZE;
- kernel_fpu_end();
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- kernel_fpu_begin();
- } else {
- /* Last segment: process all remaining data. */
- aes_gcm_update(key, le_ctr, ghash_acc,
- walk.src.virt.addr, walk.dst.virt.addr,
- nbytes, flags);
- err = skcipher_walk_done(&walk, 0);
- /*
- * The low word of the counter isn't used by the
- * finalize, so there's no need to increment it here.
- */
- }
+ while (unlikely((nbytes = walk.nbytes) < walk.total)) {
+ /*
+ * Non-last segment. In this case, the assembly function
+ * requires that the length be a multiple of 16 (AES_BLOCK_SIZE)
+ * bytes. The needed buffering of up to 16 bytes is handled by
+ * the skcipher_walk. Here we just need to round down to a
+ * multiple of 16.
+ */
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
+ aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
+ walk.dst.virt.addr, nbytes, flags);
+ le_ctr[0] += nbytes / AES_BLOCK_SIZE;
+ kernel_fpu_end();
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ if (err)
+ return err;
+ kernel_fpu_begin();
}
- if (err)
- goto out;
+ /* Last segment: process all remaining data. */
+ aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
+ walk.dst.virt.addr, nbytes, flags);
+ /*
+ * The low word of the counter isn't used by the finalize, so there's no
+ * need to increment it here.
+ */
/* Finalize */
taglen = crypto_aead_authsize(tfm);
@@ -1439,8 +1435,9 @@ gcm_crypt(struct aead_request *req, int flags)
datalen, tag, taglen, flags))
err = -EBADMSG;
}
-out:
kernel_fpu_end();
+ if (nbytes)
+ skcipher_walk_done(&walk, 0);
return err;
}
@@ -1753,6 +1750,6 @@ static void __exit aesni_exit(void)
late_initcall(aesni_init);
module_exit(aesni_exit);
-MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
+MODULE_DESCRIPTION("AES cipher and modes, optimized with AES-NI or VAES instructions");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("aes");
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
index 0ffb072be956..0bbec1c75cd0 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -592,22 +592,22 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
leaq K256+0*32(%rip), INP ## reuse INP as scratch reg
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
- FOUR_ROUNDS_AND_SCHED _XFER + 0*32
+ FOUR_ROUNDS_AND_SCHED (_XFER + 0*32)
leaq K256+1*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
- FOUR_ROUNDS_AND_SCHED _XFER + 1*32
+ FOUR_ROUNDS_AND_SCHED (_XFER + 1*32)
leaq K256+2*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
- FOUR_ROUNDS_AND_SCHED _XFER + 2*32
+ FOUR_ROUNDS_AND_SCHED (_XFER + 2*32)
leaq K256+3*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
- FOUR_ROUNDS_AND_SCHED _XFER + 3*32
+ FOUR_ROUNDS_AND_SCHED (_XFER + 3*32)
add $4*32, SRND
cmp $3*4*32, SRND
@@ -618,12 +618,12 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
leaq K256+0*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
- DO_4ROUNDS _XFER + 0*32
+ DO_4ROUNDS (_XFER + 0*32)
leaq K256+1*32(%rip), INP
vpaddd (INP, SRND), X1, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
- DO_4ROUNDS _XFER + 1*32
+ DO_4ROUNDS (_XFER + 1*32)
add $2*32, SRND
vmovdqa X2, X0
@@ -651,8 +651,8 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
xor SRND, SRND
.align 16
.Lloop3:
- DO_4ROUNDS _XFER + 0*32 + 16
- DO_4ROUNDS _XFER + 1*32 + 16
+ DO_4ROUNDS (_XFER + 0*32 + 16)
+ DO_4ROUNDS (_XFER + 1*32 + 16)
add $2*32, SRND
cmp $4*4*32, SRND
jb .Lloop3
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 51cc9c7cb9bd..94941c5a10ac 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -150,7 +150,7 @@ early_param("ia32_emulation", ia32_emulation_override_cmdline);
#endif
/*
- * Invoke a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL.
+ * Invoke a 32-bit syscall. Called with IRQs on in CT_STATE_KERNEL.
*/
static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
{
diff --git a/arch/x86/entry/vdso/vgetrandom.c b/arch/x86/entry/vdso/vgetrandom.c
index 52d3c7faae2e..430862b8977c 100644
--- a/arch/x86/entry/vdso/vgetrandom.c
+++ b/arch/x86/entry/vdso/vgetrandom.c
@@ -6,8 +6,6 @@
#include "../../../../lib/vdso/getrandom.c"
-ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len);
-
ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len)
{
return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len);
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 6d83ceb7f1ba..b8fed8b8b9cc 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -38,6 +38,9 @@ struct vdso_data *arch_get_vdso_data(void *vvar_page)
}
#undef EMIT_VVAR
+DEFINE_VVAR(struct vdso_data, _vdso_data);
+DEFINE_VVAR_SINGLE(struct vdso_rng_data, _vdso_rng_data);
+
unsigned int vclocks_used __read_mostly;
#if defined(CONFIG_X86_64)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index be01823b1bb4..65ab6460aed4 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -41,6 +41,8 @@
#include <asm/desc.h>
#include <asm/ldt.h>
#include <asm/unwind.h>
+#include <asm/uprobes.h>
+#include <asm/ibt.h>
#include "perf_event.h"
@@ -2816,6 +2818,46 @@ static unsigned long get_segment_base(unsigned int segment)
return get_desc_base(desc);
}
+#ifdef CONFIG_UPROBES
+/*
+ * Heuristic-based check if uprobe is installed at the function entry.
+ *
+ * Under assumption of user code being compiled with frame pointers,
+ * `push %rbp/%ebp` is a good indicator that we indeed are.
+ *
+ * Similarly, `endbr64` (assuming 64-bit mode) is also a common pattern.
+ * If we get this wrong, captured stack trace might have one extra bogus
+ * entry, but the rest of stack trace will still be meaningful.
+ */
+static bool is_uprobe_at_func_entry(struct pt_regs *regs)
+{
+ struct arch_uprobe *auprobe;
+
+ if (!current->utask)
+ return false;
+
+ auprobe = current->utask->auprobe;
+ if (!auprobe)
+ return false;
+
+ /* push %rbp/%ebp */
+ if (auprobe->insn[0] == 0x55)
+ return true;
+
+ /* endbr64 (64-bit only) */
+ if (user_64bit_mode(regs) && is_endbr(*(u32 *)auprobe->insn))
+ return true;
+
+ return false;
+}
+
+#else
+static bool is_uprobe_at_func_entry(struct pt_regs *regs)
+{
+ return false;
+}
+#endif /* CONFIG_UPROBES */
+
#ifdef CONFIG_IA32_EMULATION
#include <linux/compat.h>
@@ -2827,6 +2869,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
unsigned long ss_base, cs_base;
struct stack_frame_ia32 frame;
const struct stack_frame_ia32 __user *fp;
+ u32 ret_addr;
if (user_64bit_mode(regs))
return 0;
@@ -2836,6 +2879,12 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
fp = compat_ptr(ss_base + regs->bp);
pagefault_disable();
+
+ /* see perf_callchain_user() below for why we do this */
+ if (is_uprobe_at_func_entry(regs) &&
+ !get_user(ret_addr, (const u32 __user *)regs->sp))
+ perf_callchain_store(entry, ret_addr);
+
while (entry->nr < entry->max_stack) {
if (!valid_user_frame(fp, sizeof(frame)))
break;
@@ -2864,6 +2913,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
{
struct stack_frame frame;
const struct stack_frame __user *fp;
+ unsigned long ret_addr;
if (perf_guest_state()) {
/* TODO: We don't support guest os callchain now */
@@ -2887,6 +2937,19 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
return;
pagefault_disable();
+
+ /*
+ * If we are called from uprobe handler, and we are indeed at the very
+ * entry to user function (which is normally a `push %rbp` instruction,
+ * under assumption of application being compiled with frame pointers),
+ * we should read return address from *regs->sp before proceeding
+ * to follow frame pointers, otherwise we'll skip immediate caller
+ * as %rbp is not yet setup.
+ */
+ if (is_uprobe_at_func_entry(regs) &&
+ !get_user(ret_addr, (const unsigned long __user *)regs->sp))
+ perf_callchain_store(entry, ret_addr);
+
while (entry->nr < entry->max_stack) {
if (!valid_user_frame(fp, sizeof(frame)))
break;
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 974e917e65b2..8f78b0c900ef 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -557,9 +557,6 @@ static int bts_event_init(struct perf_event *event)
* disabled, so disallow intel_bts driver for unprivileged
* users on paranoid systems since it provides trace data
* to the user in a zero-copy fashion.
- *
- * Note that the default paranoia setting permits unprivileged
- * users to profile the kernel.
*/
if (event->attr.exclude_kernel) {
ret = perf_allow_kernel(&event->attr);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 9e519d8a810a..d879478db3f5 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3972,8 +3972,12 @@ static int intel_pmu_hw_config(struct perf_event *event)
x86_pmu.pebs_aliases(event);
}
- if (needs_branch_stack(event) && is_sampling_event(event))
- event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK;
+ if (needs_branch_stack(event)) {
+ /* Avoid branch stack setup for counting events in SAMPLE READ */
+ if (is_sampling_event(event) ||
+ !(event->attr.sample_type & PERF_SAMPLE_READ))
+ event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK;
+ }
if (branch_sample_counters(event)) {
struct perf_event *leader, *sibling;
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 9f116dfc4728..ae4ec16156bb 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -128,10 +128,6 @@ static ssize_t __cstate_##_var##_show(struct device *dev, \
static struct device_attribute format_attr_##_var = \
__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
-static ssize_t cstate_get_attr_cpumask(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-
/* Model -> events mapping */
struct cstate_model {
unsigned long core_events;
@@ -206,22 +202,9 @@ static struct attribute_group cstate_format_attr_group = {
.attrs = cstate_format_attrs,
};
-static cpumask_t cstate_core_cpu_mask;
-static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
-
-static struct attribute *cstate_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static struct attribute_group cpumask_attr_group = {
- .attrs = cstate_cpumask_attrs,
-};
-
static const struct attribute_group *cstate_attr_groups[] = {
&cstate_events_attr_group,
&cstate_format_attr_group,
- &cpumask_attr_group,
NULL,
};
@@ -269,8 +252,6 @@ static struct perf_msr pkg_msr[] = {
[PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr },
};
-static cpumask_t cstate_pkg_cpu_mask;
-
/* cstate_module PMU */
static struct pmu cstate_module_pmu;
static bool has_cstate_module;
@@ -291,28 +272,9 @@ static struct perf_msr module_msr[] = {
[PERF_CSTATE_MODULE_C6_RES] = { MSR_MODULE_C6_RES_MS, &group_cstate_module_c6, test_msr },
};
-static cpumask_t cstate_module_cpu_mask;
-
-static ssize_t cstate_get_attr_cpumask(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct pmu *pmu = dev_get_drvdata(dev);
-
- if (pmu == &cstate_core_pmu)
- return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
- else if (pmu == &cstate_pkg_pmu)
- return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
- else if (pmu == &cstate_module_pmu)
- return cpumap_print_to_pagebuf(true, buf, &cstate_module_cpu_mask);
- else
- return 0;
-}
-
static int cstate_pmu_event_init(struct perf_event *event)
{
u64 cfg = event->attr.config;
- int cpu;
if (event->attr.type != event->pmu->type)
return -ENOENT;
@@ -331,20 +293,13 @@ static int cstate_pmu_event_init(struct perf_event *event)
if (!(core_msr_mask & (1 << cfg)))
return -EINVAL;
event->hw.event_base = core_msr[cfg].msr;
- cpu = cpumask_any_and(&cstate_core_cpu_mask,
- topology_sibling_cpumask(event->cpu));
} else if (event->pmu == &cstate_pkg_pmu) {
if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
return -EINVAL;
cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
if (!(pkg_msr_mask & (1 << cfg)))
return -EINVAL;
-
- event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
-
event->hw.event_base = pkg_msr[cfg].msr;
- cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
- topology_die_cpumask(event->cpu));
} else if (event->pmu == &cstate_module_pmu) {
if (cfg >= PERF_CSTATE_MODULE_EVENT_MAX)
return -EINVAL;
@@ -352,16 +307,10 @@ static int cstate_pmu_event_init(struct perf_event *event)
if (!(module_msr_mask & (1 << cfg)))
return -EINVAL;
event->hw.event_base = module_msr[cfg].msr;
- cpu = cpumask_any_and(&cstate_module_cpu_mask,
- topology_cluster_cpumask(event->cpu));
} else {
return -ENOENT;
}
- if (cpu >= nr_cpu_ids)
- return -ENODEV;
-
- event->cpu = cpu;
event->hw.config = cfg;
event->hw.idx = -1;
return 0;
@@ -412,84 +361,6 @@ static int cstate_pmu_event_add(struct perf_event *event, int mode)
return 0;
}
-/*
- * Check if exiting cpu is the designated reader. If so migrate the
- * events when there is a valid target available
- */
-static int cstate_cpu_exit(unsigned int cpu)
-{
- unsigned int target;
-
- if (has_cstate_core &&
- cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
-
- target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
- /* Migrate events if there is a valid target */
- if (target < nr_cpu_ids) {
- cpumask_set_cpu(target, &cstate_core_cpu_mask);
- perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
- }
- }
-
- if (has_cstate_pkg &&
- cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
-
- target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
- /* Migrate events if there is a valid target */
- if (target < nr_cpu_ids) {
- cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
- perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
- }
- }
-
- if (has_cstate_module &&
- cpumask_test_and_clear_cpu(cpu, &cstate_module_cpu_mask)) {
-
- target = cpumask_any_but(topology_cluster_cpumask(cpu), cpu);
- /* Migrate events if there is a valid target */
- if (target < nr_cpu_ids) {
- cpumask_set_cpu(target, &cstate_module_cpu_mask);
- perf_pmu_migrate_context(&cstate_module_pmu, cpu, target);
- }
- }
- return 0;
-}
-
-static int cstate_cpu_init(unsigned int cpu)
-{
- unsigned int target;
-
- /*
- * If this is the first online thread of that core, set it in
- * the core cpu mask as the designated reader.
- */
- target = cpumask_any_and(&cstate_core_cpu_mask,
- topology_sibling_cpumask(cpu));
-
- if (has_cstate_core && target >= nr_cpu_ids)
- cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
-
- /*
- * If this is the first online thread of that package, set it
- * in the package cpu mask as the designated reader.
- */
- target = cpumask_any_and(&cstate_pkg_cpu_mask,
- topology_die_cpumask(cpu));
- if (has_cstate_pkg && target >= nr_cpu_ids)
- cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
-
- /*
- * If this is the first online thread of that cluster, set it
- * in the cluster cpu mask as the designated reader.
- */
- target = cpumask_any_and(&cstate_module_cpu_mask,
- topology_cluster_cpumask(cpu));
- if (has_cstate_module && target >= nr_cpu_ids)
- cpumask_set_cpu(cpu, &cstate_module_cpu_mask);
-
- return 0;
-}
-
static const struct attribute_group *core_attr_update[] = {
&group_cstate_core_c1,
&group_cstate_core_c3,
@@ -526,6 +397,7 @@ static struct pmu cstate_core_pmu = {
.stop = cstate_pmu_event_stop,
.read = cstate_pmu_event_update,
.capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
+ .scope = PERF_PMU_SCOPE_CORE,
.module = THIS_MODULE,
};
@@ -541,6 +413,7 @@ static struct pmu cstate_pkg_pmu = {
.stop = cstate_pmu_event_stop,
.read = cstate_pmu_event_update,
.capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
+ .scope = PERF_PMU_SCOPE_PKG,
.module = THIS_MODULE,
};
@@ -556,6 +429,7 @@ static struct pmu cstate_module_pmu = {
.stop = cstate_pmu_event_stop,
.read = cstate_pmu_event_update,
.capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
+ .scope = PERF_PMU_SCOPE_CLUSTER,
.module = THIS_MODULE,
};
@@ -810,9 +684,6 @@ static int __init cstate_probe(const struct cstate_model *cm)
static inline void cstate_cleanup(void)
{
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
-
if (has_cstate_core)
perf_pmu_unregister(&cstate_core_pmu);
@@ -827,11 +698,6 @@ static int __init cstate_init(void)
{
int err;
- cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
- "perf/x86/cstate:starting", cstate_cpu_init, NULL);
- cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
- "perf/x86/cstate:online", NULL, cstate_cpu_exit);
-
if (has_cstate_core) {
err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
if (err) {
@@ -844,6 +710,8 @@ static int __init cstate_init(void)
if (has_cstate_pkg) {
if (topology_max_dies_per_package() > 1) {
+ /* CLX-AP is multi-die and the cstate is die-scope */
+ cstate_pkg_pmu.scope = PERF_PMU_SCOPE_DIE;
err = perf_pmu_register(&cstate_pkg_pmu,
"cstate_die", -1);
} else {
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index b4aa8daa4773..fd4670a6694e 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -416,7 +416,7 @@ static bool pt_event_valid(struct perf_event *event)
static void pt_config_start(struct perf_event *event)
{
struct pt *pt = this_cpu_ptr(&pt_ctx);
- u64 ctl = event->hw.config;
+ u64 ctl = event->hw.aux_config;
ctl |= RTIT_CTL_TRACEEN;
if (READ_ONCE(pt->vmx_on))
@@ -424,7 +424,7 @@ static void pt_config_start(struct perf_event *event)
else
wrmsrl(MSR_IA32_RTIT_CTL, ctl);
- WRITE_ONCE(event->hw.config, ctl);
+ WRITE_ONCE(event->hw.aux_config, ctl);
}
/* Address ranges and their corresponding msr configuration registers */
@@ -503,7 +503,7 @@ static void pt_config(struct perf_event *event)
u64 reg;
/* First round: clear STATUS, in particular the PSB byte counter. */
- if (!event->hw.config) {
+ if (!event->hw.aux_config) {
perf_event_itrace_started(event);
wrmsrl(MSR_IA32_RTIT_STATUS, 0);
}
@@ -533,14 +533,14 @@ static void pt_config(struct perf_event *event)
reg |= (event->attr.config & PT_CONFIG_MASK);
- event->hw.config = reg;
+ event->hw.aux_config = reg;
pt_config_start(event);
}
static void pt_config_stop(struct perf_event *event)
{
struct pt *pt = this_cpu_ptr(&pt_ctx);
- u64 ctl = READ_ONCE(event->hw.config);
+ u64 ctl = READ_ONCE(event->hw.aux_config);
/* may be already stopped by a PMI */
if (!(ctl & RTIT_CTL_TRACEEN))
@@ -550,7 +550,7 @@ static void pt_config_stop(struct perf_event *event)
if (!READ_ONCE(pt->vmx_on))
wrmsrl(MSR_IA32_RTIT_CTL, ctl);
- WRITE_ONCE(event->hw.config, ctl);
+ WRITE_ONCE(event->hw.aux_config, ctl);
/*
* A wrmsr that disables trace generation serializes other PT
@@ -1557,7 +1557,7 @@ void intel_pt_handle_vmx(int on)
/* Turn PTs back on */
if (!on && event)
- wrmsrl(MSR_IA32_RTIT_CTL, event->hw.config);
+ wrmsrl(MSR_IA32_RTIT_CTL, event->hw.aux_config);
local_irq_restore(flags);
}
@@ -1606,6 +1606,7 @@ static void pt_event_stop(struct perf_event *event, int mode)
* see comment in intel_pt_interrupt().
*/
WRITE_ONCE(pt->handle_nmi, 0);
+ barrier();
pt_config_stop(event);
@@ -1657,11 +1658,10 @@ static long pt_event_snapshot_aux(struct perf_event *event,
return 0;
/*
- * Here, handle_nmi tells us if the tracing is on
+ * There is no PT interrupt in this mode, so stop the trace and it will
+ * remain stopped while the buffer is copied.
*/
- if (READ_ONCE(pt->handle_nmi))
- pt_config_stop(event);
-
+ pt_config_stop(event);
pt_read_offset(buf);
pt_update_head(pt);
@@ -1673,11 +1673,10 @@ static long pt_event_snapshot_aux(struct perf_event *event,
ret = perf_output_copy_aux(&pt->handle, handle, from, to);
/*
- * If the tracing was on when we turned up, restart it.
- * Compiler barrier not needed as we couldn't have been
- * preempted by anything that touches pt->handle_nmi.
+ * Here, handle_nmi tells us if the tracing was on.
+ * If the tracing was on, restart it.
*/
- if (pt->handle_nmi)
+ if (READ_ONCE(pt->handle_nmi))
pt_config_start(event);
return ret;
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 64ca8625eb58..d98fac567684 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1816,6 +1816,11 @@ static const struct intel_uncore_init_fun mtl_uncore_init __initconst = {
.mmio_init = adl_uncore_mmio_init,
};
+static const struct intel_uncore_init_fun lnl_uncore_init __initconst = {
+ .cpu_init = lnl_uncore_cpu_init,
+ .mmio_init = lnl_uncore_mmio_init,
+};
+
static const struct intel_uncore_init_fun icx_uncore_init __initconst = {
.cpu_init = icx_uncore_cpu_init,
.pci_init = icx_uncore_pci_init,
@@ -1893,6 +1898,10 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_uncore_init),
X86_MATCH_VFM(INTEL_METEORLAKE, &mtl_uncore_init),
X86_MATCH_VFM(INTEL_METEORLAKE_L, &mtl_uncore_init),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, &mtl_uncore_init),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_U, &mtl_uncore_init),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_H, &mtl_uncore_init),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_uncore_init),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init),
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 027ef292c602..79ff32e13dcc 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -611,10 +611,12 @@ void skl_uncore_cpu_init(void);
void icl_uncore_cpu_init(void);
void tgl_uncore_cpu_init(void);
void adl_uncore_cpu_init(void);
+void lnl_uncore_cpu_init(void);
void mtl_uncore_cpu_init(void);
void tgl_uncore_mmio_init(void);
void tgl_l_uncore_mmio_init(void);
void adl_uncore_mmio_init(void);
+void lnl_uncore_mmio_init(void);
int snb_pci2phy_map_init(int devid);
/* uncore_snbep.c */
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 9462fd9f3b7a..3934e1e4e3b1 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -252,6 +252,7 @@ DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29");
+DEFINE_UNCORE_FORMAT_ATTR(threshold2, threshold, "config:24-31");
/* Sandy Bridge uncore support */
static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
@@ -746,6 +747,34 @@ void mtl_uncore_cpu_init(void)
uncore_msr_uncores = mtl_msr_uncores;
}
+static struct intel_uncore_type *lnl_msr_uncores[] = {
+ &mtl_uncore_cbox,
+ &mtl_uncore_arb,
+ NULL
+};
+
+#define LNL_UNC_MSR_GLOBAL_CTL 0x240e
+
+static void lnl_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->pmu_idx == 0)
+ wrmsrl(LNL_UNC_MSR_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
+}
+
+static struct intel_uncore_ops lnl_uncore_msr_ops = {
+ .init_box = lnl_uncore_msr_init_box,
+ .disable_event = snb_uncore_msr_disable_event,
+ .enable_event = snb_uncore_msr_enable_event,
+ .read_counter = uncore_msr_read_counter,
+};
+
+void lnl_uncore_cpu_init(void)
+{
+ mtl_uncore_cbox.num_boxes = 4;
+ mtl_uncore_cbox.ops = &lnl_uncore_msr_ops;
+ uncore_msr_uncores = lnl_msr_uncores;
+}
+
enum {
SNB_PCI_UNCORE_IMC,
};
@@ -1475,39 +1504,45 @@ static struct pci_dev *tgl_uncore_get_mc_dev(void)
ids++;
}
+ /* Just try to grab 00:00.0 device */
+ if (!mc_dev)
+ mc_dev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+
return mc_dev;
}
#define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000
#define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000
-static void __uncore_imc_init_box(struct intel_uncore_box *box,
- unsigned int base_offset)
+static void
+uncore_get_box_mmio_addr(struct intel_uncore_box *box,
+ unsigned int base_offset,
+ int bar_offset, int step)
{
struct pci_dev *pdev = tgl_uncore_get_mc_dev();
struct intel_uncore_pmu *pmu = box->pmu;
struct intel_uncore_type *type = pmu->type;
resource_size_t addr;
- u32 mch_bar;
+ u32 bar;
if (!pdev) {
pr_warn("perf uncore: Cannot find matched IMC device.\n");
return;
}
- pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &mch_bar);
- /* MCHBAR is disabled */
- if (!(mch_bar & BIT(0))) {
- pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n");
+ pci_read_config_dword(pdev, bar_offset, &bar);
+ if (!(bar & BIT(0))) {
+ pr_warn("perf uncore: BAR 0x%x is disabled. Failed to map %s counters.\n",
+ bar_offset, type->name);
pci_dev_put(pdev);
return;
}
- mch_bar &= ~BIT(0);
- addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx);
+ bar &= ~BIT(0);
+ addr = (resource_size_t)(bar + step * pmu->pmu_idx);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, &mch_bar);
- addr |= ((resource_size_t)mch_bar << 32);
+ pci_read_config_dword(pdev, bar_offset + 4, &bar);
+ addr |= ((resource_size_t)bar << 32);
#endif
addr += base_offset;
@@ -1518,6 +1553,14 @@ static void __uncore_imc_init_box(struct intel_uncore_box *box,
pci_dev_put(pdev);
}
+static void __uncore_imc_init_box(struct intel_uncore_box *box,
+ unsigned int base_offset)
+{
+ uncore_get_box_mmio_addr(box, base_offset,
+ SNB_UNCORE_PCI_IMC_BAR_OFFSET,
+ TGL_UNCORE_MMIO_IMC_MEM_OFFSET);
+}
+
static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
{
__uncore_imc_init_box(box, 0);
@@ -1612,14 +1655,17 @@ static void adl_uncore_mmio_enable_box(struct intel_uncore_box *box)
writel(0, box->io_addr + uncore_mmio_box_ctl(box));
}
+#define MMIO_UNCORE_COMMON_OPS() \
+ .exit_box = uncore_mmio_exit_box, \
+ .disable_box = adl_uncore_mmio_disable_box, \
+ .enable_box = adl_uncore_mmio_enable_box, \
+ .disable_event = intel_generic_uncore_mmio_disable_event, \
+ .enable_event = intel_generic_uncore_mmio_enable_event, \
+ .read_counter = uncore_mmio_read_counter,
+
static struct intel_uncore_ops adl_uncore_mmio_ops = {
.init_box = adl_uncore_imc_init_box,
- .exit_box = uncore_mmio_exit_box,
- .disable_box = adl_uncore_mmio_disable_box,
- .enable_box = adl_uncore_mmio_enable_box,
- .disable_event = intel_generic_uncore_mmio_disable_event,
- .enable_event = intel_generic_uncore_mmio_enable_event,
- .read_counter = uncore_mmio_read_counter,
+ MMIO_UNCORE_COMMON_OPS()
};
#define ADL_UNC_CTL_CHMASK_MASK 0x00000f00
@@ -1703,3 +1749,108 @@ void adl_uncore_mmio_init(void)
}
/* end of Alder Lake MMIO uncore support */
+
+/* Lunar Lake MMIO uncore support */
+#define LNL_UNCORE_PCI_SAFBAR_OFFSET 0x68
+#define LNL_UNCORE_MAP_SIZE 0x1000
+#define LNL_UNCORE_SNCU_BASE 0xE4B000
+#define LNL_UNCORE_SNCU_CTR 0x390
+#define LNL_UNCORE_SNCU_CTRL 0x398
+#define LNL_UNCORE_SNCU_BOX_CTL 0x380
+#define LNL_UNCORE_GLOBAL_CTL 0x700
+#define LNL_UNCORE_HBO_BASE 0xE54000
+#define LNL_UNCORE_HBO_OFFSET -4096
+#define LNL_UNCORE_HBO_CTR 0x570
+#define LNL_UNCORE_HBO_CTRL 0x550
+#define LNL_UNCORE_HBO_BOX_CTL 0x548
+
+#define LNL_UNC_CTL_THRESHOLD 0xff000000
+#define LNL_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
+ SNB_UNC_CTL_UMASK_MASK | \
+ SNB_UNC_CTL_EDGE_DET | \
+ SNB_UNC_CTL_INVERT | \
+ LNL_UNC_CTL_THRESHOLD)
+
+static struct attribute *lnl_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_threshold2.attr,
+ NULL
+};
+
+static const struct attribute_group lnl_uncore_format_group = {
+ .name = "format",
+ .attrs = lnl_uncore_formats_attr,
+};
+
+static void lnl_uncore_hbo_init_box(struct intel_uncore_box *box)
+{
+ uncore_get_box_mmio_addr(box, LNL_UNCORE_HBO_BASE,
+ LNL_UNCORE_PCI_SAFBAR_OFFSET,
+ LNL_UNCORE_HBO_OFFSET);
+}
+
+static struct intel_uncore_ops lnl_uncore_hbo_ops = {
+ .init_box = lnl_uncore_hbo_init_box,
+ MMIO_UNCORE_COMMON_OPS()
+};
+
+static struct intel_uncore_type lnl_uncore_hbo = {
+ .name = "hbo",
+ .num_counters = 4,
+ .num_boxes = 2,
+ .perf_ctr_bits = 64,
+ .perf_ctr = LNL_UNCORE_HBO_CTR,
+ .event_ctl = LNL_UNCORE_HBO_CTRL,
+ .event_mask = LNL_UNC_RAW_EVENT_MASK,
+ .box_ctl = LNL_UNCORE_HBO_BOX_CTL,
+ .mmio_map_size = LNL_UNCORE_MAP_SIZE,
+ .ops = &lnl_uncore_hbo_ops,
+ .format_group = &lnl_uncore_format_group,
+};
+
+static void lnl_uncore_sncu_init_box(struct intel_uncore_box *box)
+{
+ uncore_get_box_mmio_addr(box, LNL_UNCORE_SNCU_BASE,
+ LNL_UNCORE_PCI_SAFBAR_OFFSET,
+ 0);
+
+ if (box->io_addr)
+ writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + LNL_UNCORE_GLOBAL_CTL);
+}
+
+static struct intel_uncore_ops lnl_uncore_sncu_ops = {
+ .init_box = lnl_uncore_sncu_init_box,
+ MMIO_UNCORE_COMMON_OPS()
+};
+
+static struct intel_uncore_type lnl_uncore_sncu = {
+ .name = "sncu",
+ .num_counters = 2,
+ .num_boxes = 1,
+ .perf_ctr_bits = 64,
+ .perf_ctr = LNL_UNCORE_SNCU_CTR,
+ .event_ctl = LNL_UNCORE_SNCU_CTRL,
+ .event_mask = LNL_UNC_RAW_EVENT_MASK,
+ .box_ctl = LNL_UNCORE_SNCU_BOX_CTL,
+ .mmio_map_size = LNL_UNCORE_MAP_SIZE,
+ .ops = &lnl_uncore_sncu_ops,
+ .format_group = &lnl_uncore_format_group,
+};
+
+static struct intel_uncore_type *lnl_mmio_uncores[] = {
+ &adl_uncore_imc,
+ &lnl_uncore_hbo,
+ &lnl_uncore_sncu,
+ &adl_uncore_imc_free_running,
+ NULL
+};
+
+void lnl_uncore_mmio_init(void)
+{
+ uncore_mmio_uncores = lnl_mmio_uncores;
+}
+
+/* end of Lunar Lake MMIO uncore support */
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index b985ca79cf97..a481a939862e 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -103,6 +103,19 @@ static struct perf_pmu_events_attr event_attr_##v = { \
.event_str = str, \
};
+/*
+ * RAPL Package energy counter scope:
+ * 1. AMD/HYGON platforms have a per-PKG package energy counter
+ * 2. For Intel platforms
+ * 2.1. CLX-AP is multi-die and its RAPL MSRs are die-scope
+ * 2.2. Other Intel platforms are single die systems so the scope can be
+ * considered as either pkg-scope or die-scope, and we are considering
+ * them as die-scope.
+ */
+#define rapl_pmu_is_pkg_scope() \
+ (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+
struct rapl_pmu {
raw_spinlock_t lock;
int n_active;
@@ -140,9 +153,25 @@ static unsigned int rapl_cntr_mask;
static u64 rapl_timer_ms;
static struct perf_msr *rapl_msrs;
+/*
+ * Helper functions to get the correct topology macros according to the
+ * RAPL PMU scope.
+ */
+static inline unsigned int get_rapl_pmu_idx(int cpu)
+{
+ return rapl_pmu_is_pkg_scope() ? topology_logical_package_id(cpu) :
+ topology_logical_die_id(cpu);
+}
+
+static inline const struct cpumask *get_rapl_pmu_cpumask(int cpu)
+{
+ return rapl_pmu_is_pkg_scope() ? topology_core_cpumask(cpu) :
+ topology_die_cpumask(cpu);
+}
+
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
{
- unsigned int rapl_pmu_idx = topology_logical_die_id(cpu);
+ unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
/*
* The unsigned check also catches the '-1' return value for non
@@ -552,7 +581,7 @@ static int rapl_cpu_offline(unsigned int cpu)
pmu->cpu = -1;
/* Find a new cpu to collect rapl events */
- target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
+ target = cpumask_any_but(get_rapl_pmu_cpumask(cpu), cpu);
/* Migrate rapl events to the new target */
if (target < nr_cpu_ids) {
@@ -565,6 +594,11 @@ static int rapl_cpu_offline(unsigned int cpu)
static int rapl_cpu_online(unsigned int cpu)
{
+ s32 rapl_pmu_idx = get_rapl_pmu_idx(cpu);
+ if (rapl_pmu_idx < 0) {
+ pr_err("topology_logical_(package/die)_id() returned a negative value");
+ return -EINVAL;
+ }
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target;
@@ -579,14 +613,14 @@ static int rapl_cpu_online(unsigned int cpu)
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
rapl_hrtimer_init(pmu);
- rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
+ rapl_pmus->pmus[rapl_pmu_idx] = pmu;
}
/*
* Check if there is an online cpu in the package which collects rapl
* events already.
*/
- target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu));
+ target = cpumask_any_and(&rapl_cpu_mask, get_rapl_pmu_cpumask(cpu));
if (target < nr_cpu_ids)
return 0;
@@ -675,7 +709,10 @@ static const struct attribute_group *rapl_attr_update[] = {
static int __init init_rapl_pmus(void)
{
- int nr_rapl_pmu = topology_max_packages() * topology_max_dies_per_package();
+ int nr_rapl_pmu = topology_max_packages();
+
+ if (!rapl_pmu_is_pkg_scope())
+ nr_rapl_pmu *= topology_max_dies_per_package();
rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL);
if (!rapl_pmus)
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 17a71e92a343..95eada2994e1 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -35,7 +35,6 @@
#include <clocksource/hyperv_timer.h>
#include <linux/highmem.h>
-int hyperv_init_cpuhp;
u64 hv_current_partition_id = ~0ull;
EXPORT_SYMBOL_GPL(hv_current_partition_id);
@@ -607,8 +606,6 @@ skip_hypercall_pg_init:
register_syscore_ops(&hv_syscore_ops);
- hyperv_init_cpuhp = cpuhp;
-
if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID)
hv_get_partition_id();
@@ -637,7 +634,7 @@ skip_hypercall_pg_init:
clean_guest_os_id:
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
- cpuhp_remove_state(cpuhp);
+ cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
free_ghcb_page:
free_percpu(hv_ghcb_pg);
free_vp_assist_page:
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index b4a851d27c7c..60fc3ed72830 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -321,9 +321,9 @@ int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
vmsa->efer = native_read_msr(MSR_EFER);
- asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
- asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
- asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
+ vmsa->cr4 = native_read_cr4();
+ vmsa->cr3 = __native_read_cr3();
+ vmsa->cr0 = native_read_cr0();
vmsa->xcr0 = 1;
vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index a192bdea69e2..6c23d1661b17 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -11,3 +11,4 @@ generated-y += xen-hypercalls.h
generic-y += early_ioremap.h
generic-y += mcs_spinlock.h
+generic-y += mmzone.h
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 21bc53f5ed0c..5ab1a4598d00 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -174,6 +174,14 @@ void acpi_generic_reduced_hw_init(void);
void x86_default_set_root_pointer(u64 addr);
u64 x86_default_get_root_pointer(void);
+#ifdef CONFIG_XEN_PV
+/* A Xen PV domain needs a special acpi_os_ioremap() handling. */
+extern void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys,
+ acpi_size size);
+void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
+#define acpi_os_ioremap acpi_os_ioremap
+#endif
+
#else /* !CONFIG_ACPI */
#define acpi_lapic 0
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 9327eb00e96d..f21ff1932699 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -18,6 +18,11 @@
#define ARCH_APICTIMER_STOPS_ON_C3 1
+/* Macros for apic_extnmi which controls external NMI masking */
+#define APIC_EXTNMI_BSP 0 /* Default */
+#define APIC_EXTNMI_ALL 1
+#define APIC_EXTNMI_NONE 2
+
/*
* Debugging macros
*/
@@ -25,22 +30,22 @@
#define APIC_VERBOSE 1
#define APIC_DEBUG 2
-/* Macros for apic_extnmi which controls external NMI masking */
-#define APIC_EXTNMI_BSP 0 /* Default */
-#define APIC_EXTNMI_ALL 1
-#define APIC_EXTNMI_NONE 2
-
/*
- * Define the default level of output to be very little
- * This can be turned up by using apic=verbose for more
- * information and apic=debug for _lots_ of information.
- * apic_verbosity is defined in apic.c
+ * Define the default level of output to be very little This can be turned
+ * up by using apic=verbose for more information and apic=debug for _lots_
+ * of information. apic_verbosity is defined in apic.c
*/
-#define apic_printk(v, s, a...) do { \
- if ((v) <= apic_verbosity) \
- printk(s, ##a); \
- } while (0)
-
+#define apic_printk(v, s, a...) \
+do { \
+ if ((v) <= apic_verbosity) \
+ printk(s, ##a); \
+} while (0)
+
+#define apic_pr_verbose(s, a...) apic_printk(APIC_VERBOSE, KERN_INFO s, ##a)
+#define apic_pr_debug(s, a...) apic_printk(APIC_DEBUG, KERN_DEBUG s, ##a)
+#define apic_pr_debug_cont(s, a...) apic_printk(APIC_DEBUG, KERN_CONT s, ##a)
+/* Unconditional debug prints for code which is guarded by apic_verbosity already */
+#define apic_dbg(s, a...) printk(KERN_DEBUG s, ##a)
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
extern void x86_32_probe_apic(void);
@@ -122,8 +127,6 @@ static inline bool apic_is_x2apic_enabled(void)
extern void enable_IR_x2apic(void);
-extern int get_physical_broadcast(void);
-
extern int lapic_get_maxlvt(void);
extern void clear_local_APIC(void);
extern void disconnect_bsp_APIC(int virt_wire_setup);
@@ -345,20 +348,12 @@ extern struct apic *apic;
* APIC drivers are probed based on how they are listed in the .apicdrivers
* section. So the order is important and enforced by the ordering
* of different apic driver files in the Makefile.
- *
- * For the files having two apic drivers, we use apic_drivers()
- * to enforce the order with in them.
*/
#define apic_driver(sym) \
static const struct apic *__apicdrivers_##sym __used \
__aligned(sizeof(struct apic *)) \
__section(".apicdrivers") = { &sym }
-#define apic_drivers(sym1, sym2) \
- static struct apic *__apicdrivers_##sym1##sym2[2] __used \
- __aligned(sizeof(struct apic *)) \
- __section(".apicdrivers") = { &sym1, &sym2 }
-
extern struct apic *__apicdrivers[], *__apicdrivers_end[];
/*
@@ -484,7 +479,6 @@ static inline u64 apic_icr_read(void) { return 0; }
static inline void apic_icr_write(u32 low, u32 high) { }
static inline void apic_wait_icr_idle(void) { }
static inline u32 safe_apic_wait_icr_idle(void) { return 0; }
-static inline void apic_set_eoi_cb(void (*eoi)(void)) {}
static inline void apic_native_eoi(void) { WARN_ON_ONCE(1); }
static inline void apic_setup_apic_calls(void) { }
@@ -512,8 +506,6 @@ static inline bool is_vector_pending(unsigned int vector)
#define TRAMPOLINE_PHYS_LOW 0x467
#define TRAMPOLINE_PHYS_HIGH 0x469
-extern void generic_bigsmp_probe(void);
-
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/smp.h>
@@ -536,8 +528,6 @@ static inline int default_acpi_madt_oem_check(char *a, char *b) { return 0; }
static inline void x86_64_probe_apic(void) { }
#endif
-extern int default_apic_id_valid(u32 apicid);
-
extern u32 apic_default_calc_apicid(unsigned int cpu);
extern u32 apic_flat_calc_apicid(unsigned int cpu);
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index a3ec87d198ac..806649c7f23d 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -13,6 +13,18 @@
#define INSN_UD2 0x0b0f
#define LEN_UD2 2
+/*
+ * In clang we have UD1s reporting UBSAN failures on X86, 64 and 32bit.
+ */
+#define INSN_ASOP 0x67
+#define OPCODE_ESCAPE 0x0f
+#define SECOND_BYTE_OPCODE_UD1 0xb9
+#define SECOND_BYTE_OPCODE_UD2 0x0b
+
+#define BUG_NONE 0xffff
+#define BUG_UD1 0xfffe
+#define BUG_UD2 0xfffd
+
#ifdef CONFIG_GENERIC_BUG
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
index 3831f612e89c..e4121d9aa9e1 100644
--- a/arch/x86/include/asm/cpu_device_id.h
+++ b/arch/x86/include/asm/cpu_device_id.h
@@ -193,26 +193,6 @@
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, X86_MODEL_ANY, data)
/**
- * X86_MATCH_INTEL_FAM6_MODEL - Match vendor INTEL, family 6 and model
- * @model: The model name without the INTEL_FAM6_ prefix or ANY
- * The model name is expanded to INTEL_FAM6_@model internally
- * @data: Driver specific data or NULL. The internal storage
- * format is unsigned long. The supplied value, pointer
- * etc. is casted to unsigned long internally.
- *
- * The vendor is set to INTEL, the family to 6 and all other missing
- * arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are set to wildcards.
- *
- * See X86_MATCH_VENDOR_FAM_MODEL_FEATURE() for further information.
- */
-#define X86_MATCH_INTEL_FAM6_MODEL(model, data) \
- X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, INTEL_FAM6_##model, data)
-
-#define X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(model, steppings, data) \
- X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
- steppings, X86_FEATURE_ANY, data)
-
-/**
* X86_MATCH_VFM - Match encoded vendor/family/model
* @vfm: Encoded 8-bits each for vendor, family, model
* @data: Driver specific data or NULL. The internal storage
diff --git a/arch/x86/include/asm/cpuid.h b/arch/x86/include/asm/cpuid.h
index 6b122a31da06..80cc6386d7b1 100644
--- a/arch/x86/include/asm/cpuid.h
+++ b/arch/x86/include/asm/cpuid.h
@@ -196,7 +196,12 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
for_each_possible_hypervisor_cpuid_base(base) {
cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
- if (!memcmp(sig, signature, 12) &&
+ /*
+ * This must not compile to "call memcmp" because it's called
+ * from PVH early boot code before instrumentation is set up
+ * and memcmp() itself may be instrumented.
+ */
+ if (!__builtin_memcmp(sig, signature, 12) &&
(leaves == 0 || ((eax - base) >= leaves)))
return base;
}
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index fb2809b20b0a..77d20555e04d 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -8,6 +8,7 @@
#include <asm/nospec-branch.h>
#include <asm/io_bitmap.h>
#include <asm/fpu/api.h>
+#include <asm/fred.h>
/* Check that the stack and regs on entry from user mode are sane. */
static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs)
@@ -44,8 +45,7 @@ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs)
}
#define arch_enter_from_user_mode arch_enter_from_user_mode
-static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
- unsigned long ti_work)
+static inline void arch_exit_work(unsigned long ti_work)
{
if (ti_work & _TIF_USER_RETURN_NOTIFY)
fire_user_return_notifiers();
@@ -56,6 +56,15 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
fpregs_assert_state_consistent();
if (unlikely(ti_work & _TIF_NEED_FPU_LOAD))
switch_fpu_return();
+}
+
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+ if (IS_ENABLED(CONFIG_X86_DEBUG_FPU) || unlikely(ti_work))
+ arch_exit_work(ti_work);
+
+ fred_update_rsp0();
#ifdef CONFIG_COMPAT
/*
diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h
index eeed395c3177..a0e0c6b50155 100644
--- a/arch/x86/include/asm/extable.h
+++ b/arch/x86/include/asm/extable.h
@@ -37,7 +37,6 @@ struct pt_regs;
extern int fixup_exception(struct pt_regs *regs, int trapnr,
unsigned long error_code, unsigned long fault_addr);
-extern int fixup_bug(struct pt_regs *regs, int trapnr);
extern int ex_get_fixup_type(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
diff --git a/arch/x86/include/asm/fpu/signal.h b/arch/x86/include/asm/fpu/signal.h
index 611fa41711af..eccc75bc9c4f 100644
--- a/arch/x86/include/asm/fpu/signal.h
+++ b/arch/x86/include/asm/fpu/signal.h
@@ -29,7 +29,7 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
unsigned long fpu__get_fpstate_size(void);
-extern bool copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
+extern bool copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size, u32 pkru);
extern void fpu__clear_user_states(struct fpu *fpu);
extern bool fpu__restore_sig(void __user *buf, int ia32_frame);
diff --git a/arch/x86/include/asm/fred.h b/arch/x86/include/asm/fred.h
index e86c7ba32435..25ca00bd70e8 100644
--- a/arch/x86/include/asm/fred.h
+++ b/arch/x86/include/asm/fred.h
@@ -36,6 +36,7 @@
#ifdef CONFIG_X86_FRED
#include <linux/kernel.h>
+#include <linux/sched/task_stack.h>
#include <asm/ptrace.h>
@@ -84,13 +85,33 @@ static __always_inline void fred_entry_from_kvm(unsigned int type, unsigned int
}
void cpu_init_fred_exceptions(void);
+void cpu_init_fred_rsps(void);
void fred_complete_exception_setup(void);
+DECLARE_PER_CPU(unsigned long, fred_rsp0);
+
+static __always_inline void fred_sync_rsp0(unsigned long rsp0)
+{
+ __this_cpu_write(fred_rsp0, rsp0);
+}
+
+static __always_inline void fred_update_rsp0(void)
+{
+ unsigned long rsp0 = (unsigned long) task_stack_page(current) + THREAD_SIZE;
+
+ if (cpu_feature_enabled(X86_FEATURE_FRED) && (__this_cpu_read(fred_rsp0) != rsp0)) {
+ wrmsrns(MSR_IA32_FRED_RSP0, rsp0);
+ __this_cpu_write(fred_rsp0, rsp0);
+ }
+}
#else /* CONFIG_X86_FRED */
static __always_inline unsigned long fred_event_data(struct pt_regs *regs) { return 0; }
static inline void cpu_init_fred_exceptions(void) { }
+static inline void cpu_init_fred_rsps(void) { }
static inline void fred_complete_exception_setup(void) { }
-static __always_inline void fred_entry_from_kvm(unsigned int type, unsigned int vector) { }
+static inline void fred_entry_from_kvm(unsigned int type, unsigned int vector) { }
+static inline void fred_sync_rsp0(unsigned long rsp0) { }
+static inline void fred_update_rsp0(void) { }
#endif /* CONFIG_X86_FRED */
#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index c67fa6ad098a..6ffa8b75f4cd 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -69,7 +69,11 @@ extern u64 arch_irq_stat(void);
#define local_softirq_pending_ref pcpu_hot.softirq_pending
#if IS_ENABLED(CONFIG_KVM_INTEL)
-static inline void kvm_set_cpu_l1tf_flush_l1d(void)
+/*
+ * This function is called from noinstr interrupt contexts
+ * and must be inlined to not get instrumentation.
+ */
+static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void)
{
__this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
}
@@ -84,7 +88,7 @@ static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void)
return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
}
#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
-static inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
+static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
#endif /* _ASM_X86_HARDIRQ_H */
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index d4f24499b256..ad5c68f0509d 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -212,8 +212,8 @@ __visible noinstr void func(struct pt_regs *regs, \
irqentry_state_t state = irqentry_enter(regs); \
u32 vector = (u32)(u8)error_code; \
\
+ kvm_set_cpu_l1tf_flush_l1d(); \
instrumentation_begin(); \
- kvm_set_cpu_l1tf_flush_l1d(); \
run_irq_on_irqstack_cond(__##func, regs, vector); \
instrumentation_end(); \
irqentry_exit(regs, state); \
@@ -250,7 +250,6 @@ static void __##func(struct pt_regs *regs); \
\
static __always_inline void instr_##func(struct pt_regs *regs) \
{ \
- kvm_set_cpu_l1tf_flush_l1d(); \
run_sysvec_on_irqstack_cond(__##func, regs); \
} \
\
@@ -258,6 +257,7 @@ __visible noinstr void func(struct pt_regs *regs) \
{ \
irqentry_state_t state = irqentry_enter(regs); \
\
+ kvm_set_cpu_l1tf_flush_l1d(); \
instrumentation_begin(); \
instr_##func (regs); \
instrumentation_end(); \
@@ -288,7 +288,6 @@ static __always_inline void __##func(struct pt_regs *regs); \
static __always_inline void instr_##func(struct pt_regs *regs) \
{ \
__irq_enter_raw(); \
- kvm_set_cpu_l1tf_flush_l1d(); \
__##func (regs); \
__irq_exit_raw(); \
} \
@@ -297,6 +296,7 @@ __visible noinstr void func(struct pt_regs *regs) \
{ \
irqentry_state_t state = irqentry_enter(regs); \
\
+ kvm_set_cpu_l1tf_flush_l1d(); \
instrumentation_begin(); \
instr_##func (regs); \
instrumentation_end(); \
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index f81a851c46dc..44949f972826 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -10,7 +10,7 @@
* that group keep the CPUID for the variants sorted by model number.
*
* The defined symbol names have the following form:
- * INTEL_FAM6{OPTFAMILY}_{MICROARCH}{OPTDIFF}
+ * INTEL_{OPTFAMILY}_{MICROARCH}{OPTDIFF}
* where:
* OPTFAMILY Describes the family of CPUs that this belongs to. Default
* is assumed to be "_CORE" (and should be omitted). Other values
@@ -42,215 +42,136 @@
#define IFM(_fam, _model) VFM_MAKE(X86_VENDOR_INTEL, _fam, _model)
-/* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */
-#define INTEL_FAM6_ANY X86_MODEL_ANY
-/* Wildcard match for FAM6 so X86_MATCH_VFM(ANY) works */
+/* Wildcard match so X86_MATCH_VFM(ANY) works */
#define INTEL_ANY IFM(X86_FAMILY_ANY, X86_MODEL_ANY)
-#define INTEL_FAM6_CORE_YONAH 0x0E
+#define INTEL_PENTIUM_PRO IFM(6, 0x01)
+
#define INTEL_CORE_YONAH IFM(6, 0x0E)
-#define INTEL_FAM6_CORE2_MEROM 0x0F
#define INTEL_CORE2_MEROM IFM(6, 0x0F)
-#define INTEL_FAM6_CORE2_MEROM_L 0x16
#define INTEL_CORE2_MEROM_L IFM(6, 0x16)
-#define INTEL_FAM6_CORE2_PENRYN 0x17
#define INTEL_CORE2_PENRYN IFM(6, 0x17)
-#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
#define INTEL_CORE2_DUNNINGTON IFM(6, 0x1D)
-#define INTEL_FAM6_NEHALEM 0x1E
#define INTEL_NEHALEM IFM(6, 0x1E)
-#define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */
#define INTEL_NEHALEM_G IFM(6, 0x1F) /* Auburndale / Havendale */
-#define INTEL_FAM6_NEHALEM_EP 0x1A
#define INTEL_NEHALEM_EP IFM(6, 0x1A)
-#define INTEL_FAM6_NEHALEM_EX 0x2E
#define INTEL_NEHALEM_EX IFM(6, 0x2E)
-#define INTEL_FAM6_WESTMERE 0x25
#define INTEL_WESTMERE IFM(6, 0x25)
-#define INTEL_FAM6_WESTMERE_EP 0x2C
#define INTEL_WESTMERE_EP IFM(6, 0x2C)
-#define INTEL_FAM6_WESTMERE_EX 0x2F
#define INTEL_WESTMERE_EX IFM(6, 0x2F)
-#define INTEL_FAM6_SANDYBRIDGE 0x2A
#define INTEL_SANDYBRIDGE IFM(6, 0x2A)
-#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
#define INTEL_SANDYBRIDGE_X IFM(6, 0x2D)
-#define INTEL_FAM6_IVYBRIDGE 0x3A
#define INTEL_IVYBRIDGE IFM(6, 0x3A)
-#define INTEL_FAM6_IVYBRIDGE_X 0x3E
#define INTEL_IVYBRIDGE_X IFM(6, 0x3E)
-#define INTEL_FAM6_HASWELL 0x3C
#define INTEL_HASWELL IFM(6, 0x3C)
-#define INTEL_FAM6_HASWELL_X 0x3F
#define INTEL_HASWELL_X IFM(6, 0x3F)
-#define INTEL_FAM6_HASWELL_L 0x45
#define INTEL_HASWELL_L IFM(6, 0x45)
-#define INTEL_FAM6_HASWELL_G 0x46
#define INTEL_HASWELL_G IFM(6, 0x46)
-#define INTEL_FAM6_BROADWELL 0x3D
#define INTEL_BROADWELL IFM(6, 0x3D)
-#define INTEL_FAM6_BROADWELL_G 0x47
#define INTEL_BROADWELL_G IFM(6, 0x47)
-#define INTEL_FAM6_BROADWELL_X 0x4F
#define INTEL_BROADWELL_X IFM(6, 0x4F)
-#define INTEL_FAM6_BROADWELL_D 0x56
#define INTEL_BROADWELL_D IFM(6, 0x56)
-#define INTEL_FAM6_SKYLAKE_L 0x4E /* Sky Lake */
#define INTEL_SKYLAKE_L IFM(6, 0x4E) /* Sky Lake */
-#define INTEL_FAM6_SKYLAKE 0x5E /* Sky Lake */
#define INTEL_SKYLAKE IFM(6, 0x5E) /* Sky Lake */
-#define INTEL_FAM6_SKYLAKE_X 0x55 /* Sky Lake */
#define INTEL_SKYLAKE_X IFM(6, 0x55) /* Sky Lake */
/* CASCADELAKE_X 0x55 Sky Lake -- s: 7 */
/* COOPERLAKE_X 0x55 Sky Lake -- s: 11 */
-#define INTEL_FAM6_KABYLAKE_L 0x8E /* Sky Lake */
#define INTEL_KABYLAKE_L IFM(6, 0x8E) /* Sky Lake */
/* AMBERLAKE_L 0x8E Sky Lake -- s: 9 */
/* COFFEELAKE_L 0x8E Sky Lake -- s: 10 */
/* WHISKEYLAKE_L 0x8E Sky Lake -- s: 11,12 */
-#define INTEL_FAM6_KABYLAKE 0x9E /* Sky Lake */
#define INTEL_KABYLAKE IFM(6, 0x9E) /* Sky Lake */
/* COFFEELAKE 0x9E Sky Lake -- s: 10-13 */
-#define INTEL_FAM6_COMETLAKE 0xA5 /* Sky Lake */
#define INTEL_COMETLAKE IFM(6, 0xA5) /* Sky Lake */
-#define INTEL_FAM6_COMETLAKE_L 0xA6 /* Sky Lake */
#define INTEL_COMETLAKE_L IFM(6, 0xA6) /* Sky Lake */
-#define INTEL_FAM6_CANNONLAKE_L 0x66 /* Palm Cove */
#define INTEL_CANNONLAKE_L IFM(6, 0x66) /* Palm Cove */
-#define INTEL_FAM6_ICELAKE_X 0x6A /* Sunny Cove */
#define INTEL_ICELAKE_X IFM(6, 0x6A) /* Sunny Cove */
-#define INTEL_FAM6_ICELAKE_D 0x6C /* Sunny Cove */
#define INTEL_ICELAKE_D IFM(6, 0x6C) /* Sunny Cove */
-#define INTEL_FAM6_ICELAKE 0x7D /* Sunny Cove */
#define INTEL_ICELAKE IFM(6, 0x7D) /* Sunny Cove */
-#define INTEL_FAM6_ICELAKE_L 0x7E /* Sunny Cove */
#define INTEL_ICELAKE_L IFM(6, 0x7E) /* Sunny Cove */
-#define INTEL_FAM6_ICELAKE_NNPI 0x9D /* Sunny Cove */
#define INTEL_ICELAKE_NNPI IFM(6, 0x9D) /* Sunny Cove */
-#define INTEL_FAM6_ROCKETLAKE 0xA7 /* Cypress Cove */
#define INTEL_ROCKETLAKE IFM(6, 0xA7) /* Cypress Cove */
-#define INTEL_FAM6_TIGERLAKE_L 0x8C /* Willow Cove */
#define INTEL_TIGERLAKE_L IFM(6, 0x8C) /* Willow Cove */
-#define INTEL_FAM6_TIGERLAKE 0x8D /* Willow Cove */
#define INTEL_TIGERLAKE IFM(6, 0x8D) /* Willow Cove */
-#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Golden Cove */
#define INTEL_SAPPHIRERAPIDS_X IFM(6, 0x8F) /* Golden Cove */
-#define INTEL_FAM6_EMERALDRAPIDS_X 0xCF
#define INTEL_EMERALDRAPIDS_X IFM(6, 0xCF)
-#define INTEL_FAM6_GRANITERAPIDS_X 0xAD
#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD)
-#define INTEL_FAM6_GRANITERAPIDS_D 0xAE
#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
/* "Hybrid" Processors (P-Core/E-Core) */
-#define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */
#define INTEL_LAKEFIELD IFM(6, 0x8A) /* Sunny Cove / Tremont */
-#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
#define INTEL_ALDERLAKE IFM(6, 0x97) /* Golden Cove / Gracemont */
-#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
#define INTEL_ALDERLAKE_L IFM(6, 0x9A) /* Golden Cove / Gracemont */
-#define INTEL_FAM6_RAPTORLAKE 0xB7 /* Raptor Cove / Enhanced Gracemont */
#define INTEL_RAPTORLAKE IFM(6, 0xB7) /* Raptor Cove / Enhanced Gracemont */
-#define INTEL_FAM6_RAPTORLAKE_P 0xBA
#define INTEL_RAPTORLAKE_P IFM(6, 0xBA)
-#define INTEL_FAM6_RAPTORLAKE_S 0xBF
#define INTEL_RAPTORLAKE_S IFM(6, 0xBF)
-#define INTEL_FAM6_METEORLAKE 0xAC
#define INTEL_METEORLAKE IFM(6, 0xAC)
-#define INTEL_FAM6_METEORLAKE_L 0xAA
#define INTEL_METEORLAKE_L IFM(6, 0xAA)
-#define INTEL_FAM6_ARROWLAKE_H 0xC5
#define INTEL_ARROWLAKE_H IFM(6, 0xC5)
-#define INTEL_FAM6_ARROWLAKE 0xC6
#define INTEL_ARROWLAKE IFM(6, 0xC6)
-#define INTEL_FAM6_ARROWLAKE_U 0xB5
#define INTEL_ARROWLAKE_U IFM(6, 0xB5)
-#define INTEL_FAM6_LUNARLAKE_M 0xBD
#define INTEL_LUNARLAKE_M IFM(6, 0xBD)
/* "Small Core" Processors (Atom/E-Core) */
-#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
#define INTEL_ATOM_BONNELL IFM(6, 0x1C) /* Diamondville, Pineview */
-#define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */
#define INTEL_ATOM_BONNELL_MID IFM(6, 0x26) /* Silverthorne, Lincroft */
-#define INTEL_FAM6_ATOM_SALTWELL 0x36 /* Cedarview */
#define INTEL_ATOM_SALTWELL IFM(6, 0x36) /* Cedarview */
-#define INTEL_FAM6_ATOM_SALTWELL_MID 0x27 /* Penwell */
#define INTEL_ATOM_SALTWELL_MID IFM(6, 0x27) /* Penwell */
-#define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35 /* Cloverview */
#define INTEL_ATOM_SALTWELL_TABLET IFM(6, 0x35) /* Cloverview */
-#define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* Bay Trail, Valleyview */
#define INTEL_ATOM_SILVERMONT IFM(6, 0x37) /* Bay Trail, Valleyview */
-#define INTEL_FAM6_ATOM_SILVERMONT_D 0x4D /* Avaton, Rangely */
#define INTEL_ATOM_SILVERMONT_D IFM(6, 0x4D) /* Avaton, Rangely */
-#define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Merriefield */
#define INTEL_ATOM_SILVERMONT_MID IFM(6, 0x4A) /* Merriefield */
-#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */
#define INTEL_ATOM_AIRMONT IFM(6, 0x4C) /* Cherry Trail, Braswell */
-#define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */
#define INTEL_ATOM_AIRMONT_MID IFM(6, 0x5A) /* Moorefield */
-#define INTEL_FAM6_ATOM_AIRMONT_NP 0x75 /* Lightning Mountain */
#define INTEL_ATOM_AIRMONT_NP IFM(6, 0x75) /* Lightning Mountain */
-#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
#define INTEL_ATOM_GOLDMONT IFM(6, 0x5C) /* Apollo Lake */
-#define INTEL_FAM6_ATOM_GOLDMONT_D 0x5F /* Denverton */
#define INTEL_ATOM_GOLDMONT_D IFM(6, 0x5F) /* Denverton */
/* Note: the micro-architecture is "Goldmont Plus" */
-#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
#define INTEL_ATOM_GOLDMONT_PLUS IFM(6, 0x7A) /* Gemini Lake */
-#define INTEL_FAM6_ATOM_TREMONT_D 0x86 /* Jacobsville */
#define INTEL_ATOM_TREMONT_D IFM(6, 0x86) /* Jacobsville */
-#define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */
#define INTEL_ATOM_TREMONT IFM(6, 0x96) /* Elkhart Lake */
-#define INTEL_FAM6_ATOM_TREMONT_L 0x9C /* Jasper Lake */
#define INTEL_ATOM_TREMONT_L IFM(6, 0x9C) /* Jasper Lake */
-#define INTEL_FAM6_ATOM_GRACEMONT 0xBE /* Alderlake N */
#define INTEL_ATOM_GRACEMONT IFM(6, 0xBE) /* Alderlake N */
-#define INTEL_FAM6_ATOM_CRESTMONT_X 0xAF /* Sierra Forest */
#define INTEL_ATOM_CRESTMONT_X IFM(6, 0xAF) /* Sierra Forest */
-#define INTEL_FAM6_ATOM_CRESTMONT 0xB6 /* Grand Ridge */
#define INTEL_ATOM_CRESTMONT IFM(6, 0xB6) /* Grand Ridge */
-#define INTEL_FAM6_ATOM_DARKMONT_X 0xDD /* Clearwater Forest */
#define INTEL_ATOM_DARKMONT_X IFM(6, 0xDD) /* Clearwater Forest */
/* Xeon Phi */
-#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
#define INTEL_XEON_PHI_KNL IFM(6, 0x57) /* Knights Landing */
-#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
#define INTEL_XEON_PHI_KNM IFM(6, 0x85) /* Knights Mill */
/* Family 5 */
diff --git a/arch/x86/include/asm/intel_telemetry.h b/arch/x86/include/asm/intel_telemetry.h
index 8046e70dfd7c..43b7657febca 100644
--- a/arch/x86/include/asm/intel_telemetry.h
+++ b/arch/x86/include/asm/intel_telemetry.h
@@ -10,7 +10,7 @@
#define TELEM_MAX_EVENTS_SRAM 28
#define TELEM_MAX_OS_ALLOCATED_EVENTS 20
-#include <asm/intel_scu_ipc.h>
+#include <linux/platform_data/x86/intel_scu_ipc.h>
enum telemetry_unit {
TELEM_PSS = 0,
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 13aea8fc3d45..47051871b436 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -18,8 +18,8 @@
* Vectors 0 ... 31 : system traps and exceptions - hardcoded events
* Vectors 32 ... 127 : device interrupts
* Vector 128 : legacy int80 syscall interface
- * Vectors 129 ... LOCAL_TIMER_VECTOR-1
- * Vectors LOCAL_TIMER_VECTOR ... 255 : special interrupts
+ * Vectors 129 ... FIRST_SYSTEM_VECTOR-1 : device interrupts
+ * Vectors FIRST_SYSTEM_VECTOR ... 255 : special interrupts
*
* 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
*
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 3ad29b128943..3b9970117a0f 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -221,7 +221,7 @@ static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
u64 lapic_id) { return -EINVAL; }
#endif
-void mce_setup(struct mce *m);
+void mce_prep_record(struct mce *m);
void mce_log(struct mce *m);
DECLARE_PER_CPU(struct device *, mce_device);
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 8dac45a2c7fc..2886cb668d7f 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -88,7 +88,13 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
#ifdef CONFIG_ADDRESS_MASKING
static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
{
- return mm->context.lam_cr3_mask;
+ /*
+ * When switch_mm_irqs_off() is called for a kthread, it may race with
+ * LAM enablement. switch_mm_irqs_off() uses the LAM mask to do two
+ * things: populate CR3 and populate 'cpu_tlbstate.lam'. Make sure it
+ * reads a single value for both.
+ */
+ return READ_ONCE(mm->context.lam_cr3_mask);
}
static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
@@ -232,11 +238,6 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
}
#endif
-static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
- unsigned long end)
-{
-}
-
/*
* We only want to enforce protection keys on the current process
* because we effectively have no access to PKRU for other
diff --git a/arch/x86/include/asm/mmzone.h b/arch/x86/include/asm/mmzone.h
deleted file mode 100644
index c41b41edd691..000000000000
--- a/arch/x86/include/asm/mmzone.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_X86_32
-# include <asm/mmzone_32.h>
-#else
-# include <asm/mmzone_64.h>
-#endif
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
deleted file mode 100644
index 2d4515e8b7df..000000000000
--- a/arch/x86/include/asm/mmzone_32.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
- *
- */
-
-#ifndef _ASM_X86_MMZONE_32_H
-#define _ASM_X86_MMZONE_32_H
-
-#include <asm/smp.h>
-
-#ifdef CONFIG_NUMA
-extern struct pglist_data *node_data[];
-#define NODE_DATA(nid) (node_data[nid])
-#endif /* CONFIG_NUMA */
-
-#endif /* _ASM_X86_MMZONE_32_H */
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h
deleted file mode 100644
index 0c585046f744..000000000000
--- a/arch/x86/include/asm/mmzone_64.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* K8 NUMA support */
-/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */
-/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */
-#ifndef _ASM_X86_MMZONE_64_H
-#define _ASM_X86_MMZONE_64_H
-
-#ifdef CONFIG_NUMA
-
-#include <linux/mmdebug.h>
-#include <asm/smp.h>
-
-extern struct pglist_data *node_data[];
-
-#define NODE_DATA(nid) (node_data[nid])
-
-#endif
-#endif /* _ASM_X86_MMZONE_64_H */
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 390c4d13956d..5f0bc6a6d025 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -40,7 +40,6 @@ static inline unsigned char hv_get_nmi_reason(void)
}
#if IS_ENABLED(CONFIG_HYPERV)
-extern int hyperv_init_cpuhp;
extern bool hyperv_paravisor_present;
extern void *hv_hypercall_pg;
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 82c6a4d350e0..a7c06a46fb76 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -247,6 +247,8 @@
#define MSR_INTEGRITY_CAPS_ARRAY_BIST BIT(MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT)
#define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT 4
#define MSR_INTEGRITY_CAPS_PERIODIC_BIST BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT)
+#define MSR_INTEGRITY_CAPS_SBAF_BIT 8
+#define MSR_INTEGRITY_CAPS_SBAF BIT(MSR_INTEGRITY_CAPS_SBAF_BIT)
#define MSR_INTEGRITY_CAPS_SAF_GEN_MASK GENMASK_ULL(10, 9)
#define MSR_LBR_NHM_FROM 0x00000680
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index d642037f9ed5..001853541f1e 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -99,19 +99,6 @@ static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
: : "c" (msr), "a"(low), "d" (high) : "memory");
}
-/*
- * WRMSRNS behaves exactly like WRMSR with the only difference being
- * that it is not a serializing instruction by default.
- */
-static __always_inline void __wrmsrns(u32 msr, u32 low, u32 high)
-{
- /* Instruction opcode for WRMSRNS; supported in binutils >= 2.40. */
- asm volatile("1: .byte 0x0f,0x01,0xc6\n"
- "2:\n"
- _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
- : : "c" (msr), "a"(low), "d" (high));
-}
-
#define native_rdmsr(msr, val1, val2) \
do { \
u64 __val = __rdmsr((msr)); \
@@ -312,9 +299,19 @@ do { \
#endif /* !CONFIG_PARAVIRT_XXL */
+/* Instruction opcode for WRMSRNS supported in binutils >= 2.40 */
+#define WRMSRNS _ASM_BYTES(0x0f,0x01,0xc6)
+
+/* Non-serializing WRMSR, when available. Falls back to a serializing WRMSR. */
static __always_inline void wrmsrns(u32 msr, u64 val)
{
- __wrmsrns(msr, val, val >> 32);
+ /*
+ * WRMSR is 2 bytes. WRMSRNS is 3 bytes. Pad WRMSR with a redundant
+ * DS prefix to avoid a trailing NOP.
+ */
+ asm volatile("1: " ALTERNATIVE("ds wrmsr", WRMSRNS, X86_FEATURE_WRMSRNS)
+ "2: " _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
+ : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32)));
}
/*
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 090d658a85a6..4218248083d9 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -69,7 +69,6 @@ extern int mtrr_add_page(unsigned long base, unsigned long size,
unsigned int type, bool increment);
extern int mtrr_del(int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
-extern void mtrr_bp_restore(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
extern int amd_special_default_mtrr(void);
void mtrr_disable(void);
@@ -117,7 +116,6 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
return 0;
}
#define mtrr_bp_init() do {} while (0)
-#define mtrr_bp_restore() do {} while (0)
#define mtrr_disable() do {} while (0)
#define mtrr_enable() do {} while (0)
#define mtrr_generic_set_state() do {} while (0)
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index ef2844d69173..5469d7a7c40f 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -10,8 +10,6 @@
#ifdef CONFIG_NUMA
-#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
-
extern int numa_off;
/*
@@ -25,9 +23,6 @@ extern int numa_off;
extern s16 __apicid_to_node[MAX_LOCAL_APIC];
extern nodemask_t numa_nodes_parsed __initdata;
-extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
-extern void __init numa_set_distance(int from, int to, int distance);
-
static inline void set_apicid_to_node(int apicid, s16 node)
{
__apicid_to_node[apicid] = node;
@@ -54,31 +49,20 @@ static inline int numa_cpu_node(int cpu)
extern void numa_set_node(int cpu, int node);
extern void numa_clear_node(int cpu);
extern void __init init_cpu_to_node(void);
-extern void numa_add_cpu(int cpu);
-extern void numa_remove_cpu(int cpu);
+extern void numa_add_cpu(unsigned int cpu);
+extern void numa_remove_cpu(unsigned int cpu);
extern void init_gi_nodes(void);
#else /* CONFIG_NUMA */
static inline void numa_set_node(int cpu, int node) { }
static inline void numa_clear_node(int cpu) { }
static inline void init_cpu_to_node(void) { }
-static inline void numa_add_cpu(int cpu) { }
-static inline void numa_remove_cpu(int cpu) { }
+static inline void numa_add_cpu(unsigned int cpu) { }
+static inline void numa_remove_cpu(unsigned int cpu) { }
static inline void init_gi_nodes(void) { }
#endif /* CONFIG_NUMA */
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
-void debug_cpumask_set_cpu(int cpu, int node, bool enable);
+void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable);
#endif
-#ifdef CONFIG_NUMA_EMU
-#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
-#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
-int numa_emu_cmdline(char *str);
-#else /* CONFIG_NUMA_EMU */
-static inline int numa_emu_cmdline(char *str)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_NUMA_EMU */
-
#endif /* _ASM_X86_NUMA_H */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index e39311a89bf4..4c2d080d26b4 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -120,6 +120,34 @@ extern pmdval_t early_pmd_flags;
#define arch_end_context_switch(prev) do {} while(0)
#endif /* CONFIG_PARAVIRT_XXL */
+static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
+{
+ pmdval_t v = native_pmd_val(pmd);
+
+ return native_make_pmd(v | set);
+}
+
+static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
+{
+ pmdval_t v = native_pmd_val(pmd);
+
+ return native_make_pmd(v & ~clear);
+}
+
+static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
+{
+ pudval_t v = native_pud_val(pud);
+
+ return native_make_pud(v | set);
+}
+
+static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
+{
+ pudval_t v = native_pud_val(pud);
+
+ return native_make_pud(v & ~clear);
+}
+
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
@@ -174,6 +202,13 @@ static inline int pud_young(pud_t pud)
return pud_flags(pud) & _PAGE_ACCESSED;
}
+static inline bool pud_shstk(pud_t pud)
+{
+ return cpu_feature_enabled(X86_FEATURE_SHSTK) &&
+ (pud_flags(pud) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE)) ==
+ (_PAGE_DIRTY | _PAGE_PSE);
+}
+
static inline int pte_write(pte_t pte)
{
/*
@@ -310,6 +345,30 @@ static inline int pud_devmap(pud_t pud)
}
#endif
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+static inline bool pmd_special(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_SPECIAL;
+}
+
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_SPECIAL);
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
+
+#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+static inline bool pud_special(pud_t pud)
+{
+ return pud_flags(pud) & _PAGE_SPECIAL;
+}
+
+static inline pud_t pud_mkspecial(pud_t pud)
+{
+ return pud_set_flags(pud, _PAGE_SPECIAL);
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
+
static inline int pgd_devmap(pgd_t pgd)
{
return 0;
@@ -480,20 +539,6 @@ static inline pte_t pte_mkdevmap(pte_t pte)
return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
}
-static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
-{
- pmdval_t v = native_pmd_val(pmd);
-
- return native_make_pmd(v | set);
-}
-
-static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
-{
- pmdval_t v = native_pmd_val(pmd);
-
- return native_make_pmd(v & ~clear);
-}
-
/* See comments above mksaveddirty_shift() */
static inline pmd_t pmd_mksaveddirty(pmd_t pmd)
{
@@ -588,20 +633,6 @@ static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
#define pmd_mkwrite pmd_mkwrite
-static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
-{
- pudval_t v = native_pud_val(pud);
-
- return native_make_pud(v | set);
-}
-
-static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
-{
- pudval_t v = native_pud_val(pud);
-
- return native_make_pud(v & ~clear);
-}
-
/* See comments above mksaveddirty_shift() */
static inline pud_t pud_mksaveddirty(pud_t pud)
{
@@ -780,6 +811,12 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd)
__pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
}
+static inline pud_t pud_mkinvalid(pud_t pud)
+{
+ return pfn_pud(pud_pfn(pud),
+ __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
+}
+
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
@@ -827,14 +864,8 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
pmd_result = __pmd(val);
/*
- * To avoid creating Write=0,Dirty=1 PMDs, pte_modify() needs to avoid:
- * 1. Marking Write=0 PMDs Dirty=1
- * 2. Marking Dirty=1 PMDs Write=0
- *
- * The first case cannot happen because the _PAGE_CHG_MASK will filter
- * out any Dirty bit passed in newprot. Handle the second case by
- * going through the mksaveddirty exercise. Only do this if the old
- * value was Write=1 to avoid doing this on Shadow Stack PTEs.
+ * Avoid creating shadow stack PMD by accident. See comment in
+ * pte_modify().
*/
if (oldval & _PAGE_RW)
pmd_result = pmd_mksaveddirty(pmd_result);
@@ -844,6 +875,29 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
return pmd_result;
}
+static inline pud_t pud_modify(pud_t pud, pgprot_t newprot)
+{
+ pudval_t val = pud_val(pud), oldval = val;
+ pud_t pud_result;
+
+ val &= _HPAGE_CHG_MASK;
+ val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
+ val = flip_protnone_guard(oldval, val, PHYSICAL_PUD_PAGE_MASK);
+
+ pud_result = __pud(val);
+
+ /*
+ * Avoid creating shadow stack PUD by accident. See comment in
+ * pte_modify().
+ */
+ if (oldval & _PAGE_RW)
+ pud_result = pud_mksaveddirty(pud_result);
+ else
+ pud_result = pud_clear_saveddirty(pud_result);
+
+ return pud_result;
+}
+
/*
* mprotect needs to preserve PAT and encryption bits when updating
* vm_page_prot
@@ -1078,8 +1132,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
#define pud_leaf pud_leaf
static inline bool pud_leaf(pud_t pud)
{
- return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
- (_PAGE_PSE | _PAGE_PRESENT);
+ return pud_val(pud) & _PAGE_PSE;
}
static inline int pud_bad(pud_t pud)
@@ -1383,10 +1436,28 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
}
#endif
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline pud_t pudp_establish(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp, pud_t pud)
+{
+ page_table_check_pud_set(vma->vm_mm, pudp, pud);
+ if (IS_ENABLED(CONFIG_SMP)) {
+ return xchg(pudp, pud);
+ } else {
+ pud_t old = *pudp;
+ WRITE_ONCE(*pudp, pud);
+ return old;
+ }
+}
+#endif
+
#define __HAVE_ARCH_PMDP_INVALIDATE_AD
extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
+pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pud_t *pudp);
+
/*
* Page table pages are page-aligned. The lower half of the top
* level is used for userspace and the top half for the kernel.
@@ -1668,6 +1739,9 @@ void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte);
#define arch_check_zapped_pmd arch_check_zapped_pmd
void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd);
+#define arch_check_zapped_pud arch_check_zapped_pud
+void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud);
+
#ifdef CONFIG_XEN_PV
#define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young
static inline bool arch_has_hw_nonleaf_pmd_young(void)
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 3c4407271d08..7e9db77231ac 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -245,7 +245,6 @@ extern void cleanup_highmap(void);
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#define HAVE_ARCH_UNMAPPED_AREA_VMFLAGS
#define PAGE_AGP PAGE_KERNEL_NOCACHE
#define HAVE_PAGE_AGP 1
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 2f321137736c..6f82e75b6149 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -517,8 +517,6 @@ typedef struct page *pgtable_t;
extern pteval_t __supported_pte_mask;
extern pteval_t __default_kernel_pte_mask;
-extern void set_nx(void);
-extern int nx_enabled;
#define pgprot_writecombine pgprot_writecombine
extern pgprot_t pgprot_writecombine(pgprot_t prot);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a75a07f4931f..4a686f0e5dbf 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -582,7 +582,8 @@ extern void switch_gdt_and_percpu_base(int);
extern void load_direct_gdt(int);
extern void load_fixmap_gdt(int);
extern void cpu_init(void);
-extern void cpu_init_exception_handling(void);
+extern void cpu_init_exception_handling(bool boot_cpu);
+extern void cpu_init_replace_early_idt(void);
extern void cr4_init(void);
extern void set_task_blockstep(struct task_struct *task, bool on);
@@ -691,8 +692,6 @@ static inline u32 per_cpu_l2c_id(unsigned int cpu)
}
#ifdef CONFIG_CPU_SUP_AMD
-extern u32 amd_get_highest_perf(void);
-
/*
* Issue a DIV 0/1 insn to clear any division data from previous DIV
* operations.
@@ -705,7 +704,6 @@ static __always_inline void amd_clear_divider(void)
extern void amd_check_microcode(void);
#else
-static inline u32 amd_get_highest_perf(void) { return 0; }
static inline void amd_clear_divider(void) { }
static inline void amd_check_microcode(void) { }
#endif
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 0c92db84469d..6e4f8fae3ce9 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -5,6 +5,7 @@
#include <asm/clocksource.h>
#include <asm/pvclock-abi.h>
+struct timespec64;
/* some helper functions for xen and kvm pv clock sources */
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
u64 pvclock_clocksource_read_nowd(struct pvclock_vcpu_time_info *src);
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 79bbe2be900e..ee34ab00a8d6 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -164,7 +164,7 @@ struct snp_guest_msg_hdr {
struct snp_guest_msg {
struct snp_guest_msg_hdr hdr;
- u8 payload[4000];
+ u8 payload[PAGE_SIZE - sizeof(struct snp_guest_msg_hdr)];
} __packed;
struct sev_guest_platform_data {
diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
index 64df897c0ee3..3918c7a434f5 100644
--- a/arch/x86/include/asm/sparsemem.h
+++ b/arch/x86/include/asm/sparsemem.h
@@ -31,13 +31,4 @@
#endif /* CONFIG_SPARSEMEM */
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_NUMA_KEEP_MEMINFO
-extern int phys_to_target_node(phys_addr_t start);
-#define phys_to_target_node phys_to_target_node
-extern int memory_add_physaddr_to_nid(u64 start);
-#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
-#endif
-#endif /* __ASSEMBLY__ */
-
#endif /* _ASM_X86_SPARSEMEM_H */
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index c3bd0c0758c9..75248546403d 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -70,13 +70,9 @@ static inline void update_task_stack(struct task_struct *task)
#ifdef CONFIG_X86_32
this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
#else
- if (cpu_feature_enabled(X86_FEATURE_FRED)) {
- /* WRMSRNS is a baseline feature for FRED. */
- wrmsrns(MSR_IA32_FRED_RSP0, (unsigned long)task_stack_page(task) + THREAD_SIZE);
- } else if (cpu_feature_enabled(X86_FEATURE_XENPV)) {
+ if (!cpu_feature_enabled(X86_FEATURE_FRED) && cpu_feature_enabled(X86_FEATURE_XENPV))
/* Xen PV enters the kernel on the thread stack. */
load_sp0(task_top_of_stack(task));
- }
#endif
}
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 2fc7bc3863ff..7c488ff0c764 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -82,7 +82,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned long *args)
{
- memcpy(args, &regs->bx, 6 * sizeof(args[0]));
+ args[0] = regs->bx;
+ args[1] = regs->cx;
+ args[2] = regs->dx;
+ args[3] = regs->si;
+ args[4] = regs->di;
+ args[5] = regs->bp;
}
static inline int syscall_get_arch(struct task_struct *task)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 25726893c6f4..69e79fff41b8 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -399,11 +399,10 @@ static inline u64 tlbstate_lam_cr3_mask(void)
return lam << X86_CR3_LAM_U57_BIT;
}
-static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
+static inline void cpu_tlbstate_update_lam(unsigned long lam, u64 untag_mask)
{
- this_cpu_write(cpu_tlbstate.lam,
- mm->context.lam_cr3_mask >> X86_CR3_LAM_U57_BIT);
- this_cpu_write(tlbstate_untag_mask, mm->context.untag_mask);
+ this_cpu_write(cpu_tlbstate.lam, lam >> X86_CR3_LAM_U57_BIT);
+ this_cpu_write(tlbstate_untag_mask, untag_mask);
}
#else
@@ -413,7 +412,7 @@ static inline u64 tlbstate_lam_cr3_mask(void)
return 0;
}
-static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
+static inline void cpu_tlbstate_update_lam(unsigned long lam, u64 untag_mask)
{
}
#endif
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index abe3a8f22cbd..aef70336d624 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -282,9 +282,22 @@ static inline long arch_scale_freq_capacity(int cpu)
}
#define arch_scale_freq_capacity arch_scale_freq_capacity
+bool arch_enable_hybrid_capacity_scale(void);
+void arch_set_cpu_capacity(int cpu, unsigned long cap, unsigned long max_cap,
+ unsigned long cap_freq, unsigned long base_freq);
+
+unsigned long arch_scale_cpu_capacity(int cpu);
+#define arch_scale_cpu_capacity arch_scale_cpu_capacity
+
extern void arch_set_max_freq_ratio(bool turbo_disabled);
extern void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled);
#else
+static inline bool arch_enable_hybrid_capacity_scale(void) { return false; }
+static inline void arch_set_cpu_capacity(int cpu, unsigned long cap,
+ unsigned long max_cap,
+ unsigned long cap_freq,
+ unsigned long base_freq) { }
+
static inline void arch_set_max_freq_ratio(bool turbo_disabled) { }
static inline void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled) { }
#endif
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 04789f45ab2b..afce8ee5d7b7 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -54,6 +54,17 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
#define valid_user_address(x) ((__force long)(x) >= 0)
/*
+ * Masking the user address is an alternative to a conditional
+ * user_access_begin that can avoid the fencing. This only works
+ * for dense accesses starting at the address.
+ */
+#define mask_user_address(x) ((typeof(x))((long)(x)|((long)(x)>>63)))
+#define masked_user_access_begin(x) ({ \
+ __auto_type __masked_ptr = (x); \
+ __masked_ptr = mask_user_address(__masked_ptr); \
+ __uaccess_begin(); __masked_ptr; })
+
+/*
* User pointers can have tag bits on x86-64. This scheme tolerates
* arbitrary values in those bits rather then masking them off.
*
diff --git a/arch/x86/include/asm/uv/uv_irq.h b/arch/x86/include/asm/uv/uv_irq.h
index d6b17c760622..1876b5edd142 100644
--- a/arch/x86/include/asm/uv/uv_irq.h
+++ b/arch/x86/include/asm/uv/uv_irq.h
@@ -31,7 +31,6 @@ enum {
UV_AFFINITY_CPU
};
-extern int uv_irq_2_mmr_info(int, unsigned long *, int *);
extern int uv_setup_irq(char *, int, int, unsigned long, int);
extern void uv_teardown_irq(unsigned int);
diff --git a/arch/x86/include/asm/vdso/getrandom.h b/arch/x86/include/asm/vdso/getrandom.h
index b96e674cafde..ff5334ad32a0 100644
--- a/arch/x86/include/asm/vdso/getrandom.h
+++ b/arch/x86/include/asm/vdso/getrandom.h
@@ -37,19 +37,6 @@ static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void
return &__vdso_rng_data;
}
-/**
- * __arch_chacha20_blocks_nostack - Generate ChaCha20 stream without using the stack.
- * @dst_bytes: Destination buffer to hold @nblocks * 64 bytes of output.
- * @key: 32-byte input key.
- * @counter: 8-byte counter, read on input and updated on return.
- * @nblocks: Number of blocks to generate.
- *
- * Generates a given positive number of blocks of ChaCha20 output with nonce=0, and does not write
- * to any stack or memory outside of the parameters passed to it, in order to mitigate stack data
- * leaking into forked child processes.
- */
-extern void __arch_chacha20_blocks_nostack(u8 *dst_bytes, const u32 *key, u32 *counter, size_t nblocks);
-
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/x86/include/asm/vdso/vsyscall.h b/arch/x86/include/asm/vdso/vsyscall.h
index 972415a8be31..67fedf1698b5 100644
--- a/arch/x86/include/asm/vdso/vsyscall.h
+++ b/arch/x86/include/asm/vdso/vsyscall.h
@@ -9,9 +9,6 @@
#include <asm/vgtod.h>
#include <asm/vvar.h>
-DEFINE_VVAR(struct vdso_data, _vdso_data);
-DEFINE_VVAR_SINGLE(struct vdso_rng_data, _vdso_rng_data);
-
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
*/
@@ -22,6 +19,13 @@ struct vdso_data *__x86_get_k_vdso_data(void)
}
#define __arch_get_k_vdso_data __x86_get_k_vdso_data
+static __always_inline
+struct vdso_rng_data *__x86_get_k_vdso_rng_data(void)
+{
+ return &_vdso_rng_data;
+}
+#define __arch_get_k_vdso_rng_data __x86_get_k_vdso_rng_data
+
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/x86/include/uapi/asm/elf.h b/arch/x86/include/uapi/asm/elf.h
new file mode 100644
index 000000000000..468e135fa285
--- /dev/null
+++ b/arch/x86/include/uapi/asm/elf.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_X86_ELF_H
+#define _UAPI_ASM_X86_ELF_H
+
+#include <linux/types.h>
+
+struct x86_xfeat_component {
+ __u32 type;
+ __u32 size;
+ __u32 offset;
+ __u32 flags;
+} __packed;
+
+_Static_assert(sizeof(struct x86_xfeat_component) % 4 == 0, "x86_xfeat_component is not aligned");
+
+#endif /* _UAPI_ASM_X86_ELF_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index a847180836e4..f7918980667a 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -35,6 +35,14 @@ KMSAN_SANITIZE_nmi.o := n
# If instrumentation of the following files is enabled, boot hangs during
# first second.
KCOV_INSTRUMENT_head$(BITS).o := n
+# These are called from save_stack_trace() on debug paths,
+# and produce large amounts of uninteresting coverage.
+KCOV_INSTRUMENT_stacktrace.o := n
+KCOV_INSTRUMENT_dumpstack.o := n
+KCOV_INSTRUMENT_dumpstack_$(BITS).o := n
+KCOV_INSTRUMENT_unwind_orc.o := n
+KCOV_INSTRUMENT_unwind_frame.o := n
+KCOV_INSTRUMENT_unwind_guess.o := n
CFLAGS_irq.o := -I $(src)/../include/asm/trace
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 9f4618dcd704..4efecac49863 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1778,3 +1778,14 @@ u64 x86_default_get_root_pointer(void)
{
return boot_params.acpi_rsdp_addr;
}
+
+#ifdef CONFIG_XEN_PV
+void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+{
+ return ioremap_cache(phys, size);
+}
+
+void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, acpi_size size) =
+ x86_acpi_os_ioremap;
+EXPORT_SYMBOL_GPL(acpi_os_ioremap);
+#endif
diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c
index ff8f25faca3d..956984054bf3 100644
--- a/arch/x86/kernel/acpi/cppc.c
+++ b/arch/x86/kernel/acpi/cppc.c
@@ -9,6 +9,17 @@
#include <asm/processor.h>
#include <asm/topology.h>
+#define CPPC_HIGHEST_PERF_PERFORMANCE 196
+#define CPPC_HIGHEST_PERF_PREFCORE 166
+
+enum amd_pref_core {
+ AMD_PREF_CORE_UNKNOWN = 0,
+ AMD_PREF_CORE_SUPPORTED,
+ AMD_PREF_CORE_UNSUPPORTED,
+};
+static enum amd_pref_core amd_pref_core_detected;
+static u64 boost_numerator;
+
/* Refer to drivers/acpi/cppc_acpi.c for the description of functions */
bool cpc_supported_by_cpu(void)
@@ -69,31 +80,30 @@ int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
static void amd_set_max_freq_ratio(void)
{
struct cppc_perf_caps perf_caps;
- u64 highest_perf, nominal_perf;
+ u64 numerator, nominal_perf;
u64 perf_ratio;
int rc;
rc = cppc_get_perf_caps(0, &perf_caps);
if (rc) {
- pr_debug("Could not retrieve perf counters (%d)\n", rc);
+ pr_warn("Could not retrieve perf counters (%d)\n", rc);
return;
}
- highest_perf = amd_get_highest_perf();
+ rc = amd_get_boost_ratio_numerator(0, &numerator);
+ if (rc) {
+ pr_warn("Could not retrieve highest performance (%d)\n", rc);
+ return;
+ }
nominal_perf = perf_caps.nominal_perf;
- if (!highest_perf || !nominal_perf) {
- pr_debug("Could not retrieve highest or nominal performance\n");
+ if (!nominal_perf) {
+ pr_warn("Could not retrieve nominal performance\n");
return;
}
- perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf);
/* midpoint between max_boost and max_P */
- perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1;
- if (!perf_ratio) {
- pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n");
- return;
- }
+ perf_ratio = (div_u64(numerator * SCHED_CAPACITY_SCALE, nominal_perf) + SCHED_CAPACITY_SCALE) >> 1;
freq_invariance_set_perf_ratio(perf_ratio, false);
}
@@ -116,3 +126,143 @@ void init_freq_invariance_cppc(void)
init_done = true;
mutex_unlock(&freq_invariance_lock);
}
+
+/*
+ * Get the highest performance register value.
+ * @cpu: CPU from which to get highest performance.
+ * @highest_perf: Return address for highest performance value.
+ *
+ * Return: 0 for success, negative error code otherwise.
+ */
+int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
+{
+ u64 val;
+ int ret;
+
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
+ ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val);
+ if (ret)
+ goto out;
+
+ val = AMD_CPPC_HIGHEST_PERF(val);
+ } else {
+ ret = cppc_get_highest_perf(cpu, &val);
+ if (ret)
+ goto out;
+ }
+
+ WRITE_ONCE(*highest_perf, (u32)val);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(amd_get_highest_perf);
+
+/**
+ * amd_detect_prefcore: Detect if CPUs in the system support preferred cores
+ * @detected: Output variable for the result of the detection.
+ *
+ * Determine whether CPUs in the system support preferred cores. On systems
+ * that support preferred cores, different highest perf values will be found
+ * on different cores. On other systems, the highest perf value will be the
+ * same on all cores.
+ *
+ * The result of the detection will be stored in the 'detected' parameter.
+ *
+ * Return: 0 for success, negative error code otherwise
+ */
+int amd_detect_prefcore(bool *detected)
+{
+ int cpu, count = 0;
+ u64 highest_perf[2] = {0};
+
+ if (WARN_ON(!detected))
+ return -EINVAL;
+
+ switch (amd_pref_core_detected) {
+ case AMD_PREF_CORE_SUPPORTED:
+ *detected = true;
+ return 0;
+ case AMD_PREF_CORE_UNSUPPORTED:
+ *detected = false;
+ return 0;
+ default:
+ break;
+ }
+
+ for_each_present_cpu(cpu) {
+ u32 tmp;
+ int ret;
+
+ ret = amd_get_highest_perf(cpu, &tmp);
+ if (ret)
+ return ret;
+
+ if (!count || (count == 1 && tmp != highest_perf[0]))
+ highest_perf[count++] = tmp;
+
+ if (count == 2)
+ break;
+ }
+
+ *detected = (count == 2);
+ boost_numerator = highest_perf[0];
+
+ amd_pref_core_detected = *detected ? AMD_PREF_CORE_SUPPORTED :
+ AMD_PREF_CORE_UNSUPPORTED;
+
+ pr_debug("AMD CPPC preferred core is %ssupported (highest perf: 0x%llx)\n",
+ *detected ? "" : "un", highest_perf[0]);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(amd_detect_prefcore);
+
+/**
+ * amd_get_boost_ratio_numerator: Get the numerator to use for boost ratio calculation
+ * @cpu: CPU to get numerator for.
+ * @numerator: Output variable for numerator.
+ *
+ * Determine the numerator to use for calculating the boost ratio on
+ * a CPU. On systems that support preferred cores, this will be a hardcoded
+ * value. On other systems this will the highest performance register value.
+ *
+ * If booting the system with amd-pstate enabled but preferred cores disabled then
+ * the correct boost numerator will be returned to match hardware capabilities
+ * even if the preferred cores scheduling hints are not enabled.
+ *
+ * Return: 0 for success, negative error code otherwise.
+ */
+int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
+{
+ bool prefcore;
+ int ret;
+
+ ret = amd_detect_prefcore(&prefcore);
+ if (ret)
+ return ret;
+
+ /* without preferred cores, return the highest perf register value */
+ if (!prefcore) {
+ *numerator = boost_numerator;
+ return 0;
+ }
+
+ /*
+ * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
+ * the highest performance level is set to 196.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=218759
+ */
+ if (cpu_feature_enabled(X86_FEATURE_ZEN4)) {
+ switch (boot_cpu_data.x86_model) {
+ case 0x70 ... 0x7f:
+ *numerator = CPPC_HIGHEST_PERF_PERFORMANCE;
+ return 0;
+ default:
+ break;
+ }
+ }
+ *numerator = CPPC_HIGHEST_PERF_PREFCORE;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(amd_get_boost_ratio_numerator);
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 059e5c16af05..dc5d3216af24 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -26,6 +26,7 @@
#define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8
#define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT 0x153a
#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
+#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122
#define PCI_DEVICE_ID_AMD_MI200_ROOT 0x14bb
#define PCI_DEVICE_ID_AMD_MI300_ROOT 0x14f8
@@ -43,6 +44,8 @@
#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc
#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4
+#define PCI_DEVICE_ID_AMD_1AH_M60H_DF_F4 0x124c
+#define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F4 0x12bc
#define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4
#define PCI_DEVICE_ID_AMD_MI300_DF_F4 0x152c
@@ -63,6 +66,7 @@ static const struct pci_device_id amd_root_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_ROOT) },
{}
@@ -95,6 +99,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F3) },
@@ -122,6 +127,8 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F4) },
{}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 373638691cd4..6513c53c9459 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -677,7 +677,7 @@ calibrate_by_pmtimer(u32 deltapm, long *delta, long *deltatsc)
return -1;
#endif
- apic_printk(APIC_VERBOSE, "... PM-Timer delta = %u\n", deltapm);
+ apic_pr_verbose("... PM-Timer delta = %u\n", deltapm);
/* Check, if the PM timer is available */
if (!deltapm)
@@ -687,14 +687,14 @@ calibrate_by_pmtimer(u32 deltapm, long *delta, long *deltatsc)
if (deltapm > (pm_100ms - pm_thresh) &&
deltapm < (pm_100ms + pm_thresh)) {
- apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
+ apic_pr_verbose("... PM-Timer result ok\n");
return 0;
}
res = (((u64)deltapm) * mult) >> 22;
do_div(res, 1000000);
- pr_warn("APIC calibration not consistent "
- "with PM-Timer: %ldms instead of 100ms\n", (long)res);
+ pr_warn("APIC calibration not consistent with PM-Timer: %ldms instead of 100ms\n",
+ (long)res);
/* Correct the lapic counter value */
res = (((u64)(*delta)) * pm_100ms);
@@ -707,9 +707,8 @@ calibrate_by_pmtimer(u32 deltapm, long *delta, long *deltatsc)
if (boot_cpu_has(X86_FEATURE_TSC)) {
res = (((u64)(*deltatsc)) * pm_100ms);
do_div(res, deltapm);
- apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
- "PM-Timer: %lu (%ld)\n",
- (unsigned long)res, *deltatsc);
+ apic_pr_verbose("TSC delta adjusted to PM-Timer: %lu (%ld)\n",
+ (unsigned long)res, *deltatsc);
*deltatsc = (long)res;
}
@@ -792,8 +791,7 @@ static int __init calibrate_APIC_clock(void)
* in the clockevent structure and return.
*/
if (!lapic_init_clockevent()) {
- apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
- lapic_timer_period);
+ apic_pr_verbose("lapic timer already calibrated %d\n", lapic_timer_period);
/*
* Direct calibration methods must have an always running
* local APIC timer, no need for broadcast timer.
@@ -802,8 +800,7 @@ static int __init calibrate_APIC_clock(void)
return 0;
}
- apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
- "calibrating APIC timer ...\n");
+ apic_pr_verbose("Using local APIC timer interrupts. Calibrating APIC timer ...\n");
/*
* There are platforms w/o global clockevent devices. Instead of
@@ -866,7 +863,7 @@ static int __init calibrate_APIC_clock(void)
/* Build delta t1-t2 as apic timer counts down */
delta = lapic_cal_t1 - lapic_cal_t2;
- apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
+ apic_pr_verbose("... lapic delta = %ld\n", delta);
deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
@@ -877,22 +874,19 @@ static int __init calibrate_APIC_clock(void)
lapic_timer_period = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
lapic_init_clockevent();
- apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
- apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
- apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
- lapic_timer_period);
+ apic_pr_verbose("..... delta %ld\n", delta);
+ apic_pr_verbose("..... mult: %u\n", lapic_clockevent.mult);
+ apic_pr_verbose("..... calibration result: %u\n", lapic_timer_period);
if (boot_cpu_has(X86_FEATURE_TSC)) {
- apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
- "%ld.%04ld MHz.\n",
- (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
- (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
+ apic_pr_verbose("..... CPU clock speed is %ld.%04ld MHz.\n",
+ (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
+ (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
}
- apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
- "%u.%04u MHz.\n",
- lapic_timer_period / (1000000 / HZ),
- lapic_timer_period % (1000000 / HZ));
+ apic_pr_verbose("..... host bus clock speed is %u.%04u MHz.\n",
+ lapic_timer_period / (1000000 / HZ),
+ lapic_timer_period % (1000000 / HZ));
/*
* Do a sanity check on the APIC calibration result
@@ -911,7 +905,7 @@ static int __init calibrate_APIC_clock(void)
* available.
*/
if (!pm_referenced && global_clock_event) {
- apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
+ apic_pr_verbose("... verify APIC timer\n");
/*
* Setup the apic timer manually
@@ -932,11 +926,11 @@ static int __init calibrate_APIC_clock(void)
/* Jiffies delta */
deltaj = lapic_cal_j2 - lapic_cal_j1;
- apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
+ apic_pr_verbose("... jiffies delta = %lu\n", deltaj);
/* Check, if the jiffies result is consistent */
if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
- apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
+ apic_pr_verbose("... jiffies result ok\n");
else
levt->features |= CLOCK_EVT_FEAT_DUMMY;
}
@@ -1221,9 +1215,8 @@ void __init sync_Arb_IDs(void)
*/
apic_wait_icr_idle();
- apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
- apic_write(APIC_ICR, APIC_DEST_ALLINC |
- APIC_INT_LEVELTRIG | APIC_DM_INIT);
+ apic_pr_debug("Synchronizing Arb IDs.\n");
+ apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG | APIC_DM_INIT);
}
enum apic_intr_mode_id apic_intr_mode __ro_after_init;
@@ -1409,10 +1402,10 @@ static void lapic_setup_esr(void)
if (maxlvt > 3)
apic_write(APIC_ESR, 0);
value = apic_read(APIC_ESR);
- if (value != oldvalue)
- apic_printk(APIC_VERBOSE, "ESR value before enabling "
- "vector: 0x%08x after: 0x%08x\n",
- oldvalue, value);
+ if (value != oldvalue) {
+ apic_pr_verbose("ESR value before enabling vector: 0x%08x after: 0x%08x\n",
+ oldvalue, value);
+ }
}
#define APIC_IR_REGS APIC_ISR_NR
@@ -1599,10 +1592,10 @@ static void setup_local_APIC(void)
value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
if (!cpu && (pic_mode || !value || ioapic_is_disabled)) {
value = APIC_DM_EXTINT;
- apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
+ apic_pr_verbose("Enabled ExtINT on CPU#%d\n", cpu);
} else {
value = APIC_DM_EXTINT | APIC_LVT_MASKED;
- apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu);
+ apic_pr_verbose("Masked ExtINT on CPU#%d\n", cpu);
}
apic_write(APIC_LVT0, value);
@@ -2067,8 +2060,7 @@ static __init void apic_set_fixmap(bool read_apic)
{
set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
apic_mmio_base = APIC_BASE;
- apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
- apic_mmio_base, mp_lapic_addr);
+ apic_pr_verbose("Mapped APIC to %16lx (%16lx)\n", apic_mmio_base, mp_lapic_addr);
if (read_apic)
apic_read_boot_cpu_id(false);
}
@@ -2171,18 +2163,17 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt)
apic_eoi();
atomic_inc(&irq_err_count);
- apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
- smp_processor_id(), v);
+ apic_pr_debug("APIC error on CPU%d: %02x", smp_processor_id(), v);
v &= 0xff;
while (v) {
if (v & 0x1)
- apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]);
+ apic_pr_debug_cont(" : %s", error_interrupt_reason[i]);
i++;
v >>= 1;
}
- apic_printk(APIC_DEBUG, KERN_CONT "\n");
+ apic_pr_debug_cont("\n");
trace_error_apic_exit(ERROR_APIC_VECTOR);
}
@@ -2202,8 +2193,7 @@ static void __init connect_bsp_APIC(void)
* PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
* local APIC to INT and NMI lines.
*/
- apic_printk(APIC_VERBOSE, "leaving PIC mode, "
- "enabling APIC mode.\n");
+ apic_pr_verbose("Leaving PIC mode, enabling APIC mode.\n");
imcr_pic_to_apic();
}
#endif
@@ -2228,8 +2218,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
* IPIs, won't work beyond this point! The only exception are
* INIT IPIs.
*/
- apic_printk(APIC_VERBOSE, "disabling APIC mode, "
- "entering PIC mode.\n");
+ apic_pr_verbose("Disabling APIC mode, entering PIC mode.\n");
imcr_apic_to_pic();
return;
}
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index f37ad3392fec..e0308d8c4e6c 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -8,129 +8,25 @@
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
-#include <linux/cpumask.h>
#include <linux/export.h>
-#include <linux/acpi.h>
-#include <asm/jailhouse_para.h>
#include <asm/apic.h>
#include "local.h"
-static struct apic apic_physflat;
-static struct apic apic_flat;
-
-struct apic *apic __ro_after_init = &apic_flat;
-EXPORT_SYMBOL_GPL(apic);
-
-static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
- return 1;
-}
-
-static void _flat_send_IPI_mask(unsigned long mask, int vector)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
- local_irq_restore(flags);
-}
-
-static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
-{
- unsigned long mask = cpumask_bits(cpumask)[0];
-
- _flat_send_IPI_mask(mask, vector);
-}
-
-static void
-flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
-{
- unsigned long mask = cpumask_bits(cpumask)[0];
- int cpu = smp_processor_id();
-
- if (cpu < BITS_PER_LONG)
- __clear_bit(cpu, &mask);
-
- _flat_send_IPI_mask(mask, vector);
-}
-
-static u32 flat_get_apic_id(u32 x)
+static u32 physflat_get_apic_id(u32 x)
{
return (x >> 24) & 0xFF;
}
-static int flat_probe(void)
+static int physflat_probe(void)
{
return 1;
}
-static struct apic apic_flat __ro_after_init = {
- .name = "flat",
- .probe = flat_probe,
- .acpi_madt_oem_check = flat_acpi_madt_oem_check,
-
- .dest_mode_logical = true,
-
- .disable_esr = 0,
-
- .init_apic_ldr = default_init_apic_ldr,
- .cpu_present_to_apicid = default_cpu_present_to_apicid,
-
- .max_apic_id = 0xFE,
- .get_apic_id = flat_get_apic_id,
-
- .calc_dest_apicid = apic_flat_calc_apicid,
-
- .send_IPI = default_send_IPI_single,
- .send_IPI_mask = flat_send_IPI_mask,
- .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
- .send_IPI_allbutself = default_send_IPI_allbutself,
- .send_IPI_all = default_send_IPI_all,
- .send_IPI_self = default_send_IPI_self,
- .nmi_to_offline_cpu = true,
-
- .read = native_apic_mem_read,
- .write = native_apic_mem_write,
- .eoi = native_apic_mem_eoi,
- .icr_read = native_apic_icr_read,
- .icr_write = native_apic_icr_write,
- .wait_icr_idle = apic_mem_wait_icr_idle,
- .safe_wait_icr_idle = apic_mem_wait_icr_idle_timeout,
-};
-
-/*
- * Physflat mode is used when there are more than 8 CPUs on a system.
- * We cannot use logical delivery in this case because the mask
- * overflows, so use physical mode.
- */
static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
-#ifdef CONFIG_ACPI
- /*
- * Quirk: some x86_64 machines can only use physical APIC mode
- * regardless of how many processors are present (x86_64 ES7000
- * is an example).
- */
- if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
- (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
- printk(KERN_DEBUG "system APIC only can use physical flat");
- return 1;
- }
-
- if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) {
- printk(KERN_DEBUG "IBM Summit detected, will use apic physical");
- return 1;
- }
-#endif
-
- return 0;
-}
-
-static int physflat_probe(void)
-{
- return apic == &apic_physflat || num_possible_cpus() > 8 || jailhouse_paravirt();
+ return 1;
}
static struct apic apic_physflat __ro_after_init = {
@@ -146,7 +42,7 @@ static struct apic apic_physflat __ro_after_init = {
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.max_apic_id = 0xFE,
- .get_apic_id = flat_get_apic_id,
+ .get_apic_id = physflat_get_apic_id,
.calc_dest_apicid = apic_default_calc_apicid,
@@ -166,8 +62,7 @@ static struct apic apic_physflat __ro_after_init = {
.wait_icr_idle = apic_mem_wait_icr_idle,
.safe_wait_icr_idle = apic_mem_wait_icr_idle_timeout,
};
+apic_driver(apic_physflat);
-/*
- * We need to check for physflat first, so this order is important.
- */
-apic_drivers(apic_physflat, apic_flat);
+struct apic *apic __ro_after_init = &apic_physflat;
+EXPORT_SYMBOL_GPL(apic);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 477b740b2f26..1029ea4ac8ba 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -86,8 +86,8 @@ static unsigned int ioapic_dynirq_base;
static int ioapic_initialized;
struct irq_pin_list {
- struct list_head list;
- int apic, pin;
+ struct list_head list;
+ int apic, pin;
};
struct mp_chip_data {
@@ -96,7 +96,7 @@ struct mp_chip_data {
bool is_level;
bool active_low;
bool isa_irq;
- u32 count;
+ u32 count;
};
struct mp_ioapic_gsi {
@@ -105,21 +105,17 @@ struct mp_ioapic_gsi {
};
static struct ioapic {
- /*
- * # of IRQ routing registers
- */
- int nr_registers;
- /*
- * Saved state during suspend/resume, or while enabling intr-remap.
- */
- struct IO_APIC_route_entry *saved_registers;
+ /* # of IRQ routing registers */
+ int nr_registers;
+ /* Saved state during suspend/resume, or while enabling intr-remap. */
+ struct IO_APIC_route_entry *saved_registers;
/* I/O APIC config */
- struct mpc_ioapic mp_config;
+ struct mpc_ioapic mp_config;
/* IO APIC gsi routing info */
- struct mp_ioapic_gsi gsi_config;
- struct ioapic_domain_cfg irqdomain_cfg;
- struct irq_domain *irqdomain;
- struct resource *iomem_res;
+ struct mp_ioapic_gsi gsi_config;
+ struct ioapic_domain_cfg irqdomain_cfg;
+ struct irq_domain *irqdomain;
+ struct resource *iomem_res;
} ioapics[MAX_IO_APICS];
#define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver
@@ -205,10 +201,9 @@ void mp_save_irq(struct mpc_intsrc *m)
{
int i;
- apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
- " IRQ %02x, APIC ID %x, APIC INT %02x\n",
- m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
- m->srcbusirq, m->dstapic, m->dstirq);
+ apic_pr_verbose("Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
+ m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
+ m->srcbusirq, m->dstapic, m->dstirq);
for (i = 0; i < mp_irq_entries; i++) {
if (!memcmp(&mp_irqs[i], m, sizeof(*m)))
@@ -269,12 +264,14 @@ static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
{
struct io_apic __iomem *io_apic = io_apic_base(apic);
+
writel(vector, &io_apic->eoi);
}
unsigned int native_io_apic_read(unsigned int apic, unsigned int reg)
{
struct io_apic __iomem *io_apic = io_apic_base(apic);
+
writel(reg, &io_apic->index);
return readl(&io_apic->data);
}
@@ -300,14 +297,8 @@ static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin)
static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
{
- struct IO_APIC_route_entry entry;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- entry = __ioapic_read_entry(apic, pin);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
-
- return entry;
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
+ return __ioapic_read_entry(apic, pin);
}
/*
@@ -324,11 +315,8 @@ static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e
static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&ioapic_lock, flags);
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
__ioapic_write_entry(apic, pin, e);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
/*
@@ -339,12 +327,10 @@ static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
static void ioapic_mask_entry(int apic, int pin)
{
struct IO_APIC_route_entry e = { .masked = true };
- unsigned long flags;
- raw_spin_lock_irqsave(&ioapic_lock, flags);
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
io_apic_write(apic, 0x10 + 2*pin, e.w1);
io_apic_write(apic, 0x11 + 2*pin, e.w2);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
/*
@@ -352,68 +338,39 @@ static void ioapic_mask_entry(int apic, int pin)
* shared ISA-space IRQs, so we have to support them. We are super
* fast in the common case, and fast for shared ISA-space IRQs.
*/
-static int __add_pin_to_irq_node(struct mp_chip_data *data,
- int node, int apic, int pin)
+static bool add_pin_to_irq_node(struct mp_chip_data *data, int node, int apic, int pin)
{
struct irq_pin_list *entry;
- /* don't allow duplicates */
- for_each_irq_pin(entry, data->irq_2_pin)
+ /* Don't allow duplicates */
+ for_each_irq_pin(entry, data->irq_2_pin) {
if (entry->apic == apic && entry->pin == pin)
- return 0;
+ return true;
+ }
entry = kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node);
if (!entry) {
- pr_err("can not alloc irq_pin_list (%d,%d,%d)\n",
- node, apic, pin);
- return -ENOMEM;
+ pr_err("Cannot allocate irq_pin_list (%d,%d,%d)\n", node, apic, pin);
+ return false;
}
+
entry->apic = apic;
entry->pin = pin;
list_add_tail(&entry->list, &data->irq_2_pin);
-
- return 0;
+ return true;
}
static void __remove_pin_from_irq(struct mp_chip_data *data, int apic, int pin)
{
struct irq_pin_list *tmp, *entry;
- list_for_each_entry_safe(entry, tmp, &data->irq_2_pin, list)
+ list_for_each_entry_safe(entry, tmp, &data->irq_2_pin, list) {
if (entry->apic == apic && entry->pin == pin) {
list_del(&entry->list);
kfree(entry);
return;
}
-}
-
-static void add_pin_to_irq_node(struct mp_chip_data *data,
- int node, int apic, int pin)
-{
- if (__add_pin_to_irq_node(data, node, apic, pin))
- panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
-}
-
-/*
- * Reroute an IRQ to a different pin.
- */
-static void __init replace_pin_at_irq_node(struct mp_chip_data *data, int node,
- int oldapic, int oldpin,
- int newapic, int newpin)
-{
- struct irq_pin_list *entry;
-
- for_each_irq_pin(entry, data->irq_2_pin) {
- if (entry->apic == oldapic && entry->pin == oldpin) {
- entry->apic = newapic;
- entry->pin = newpin;
- /* every one is different, right? */
- return;
- }
}
-
- /* old apic/pin didn't exist, so just add new ones */
- add_pin_to_irq_node(data, node, newapic, newpin);
}
static void io_apic_modify_irq(struct mp_chip_data *data, bool masked,
@@ -430,12 +387,12 @@ static void io_apic_modify_irq(struct mp_chip_data *data, bool masked,
}
}
+/*
+ * Synchronize the IO-APIC and the CPU by doing a dummy read from the
+ * IO-APIC
+ */
static void io_apic_sync(struct irq_pin_list *entry)
{
- /*
- * Synchronize the IO-APIC and the CPU by doing
- * a dummy read from the IO-APIC
- */
struct io_apic __iomem *io_apic;
io_apic = io_apic_base(entry->apic);
@@ -445,11 +402,9 @@ static void io_apic_sync(struct irq_pin_list *entry)
static void mask_ioapic_irq(struct irq_data *irq_data)
{
struct mp_chip_data *data = irq_data->chip_data;
- unsigned long flags;
- raw_spin_lock_irqsave(&ioapic_lock, flags);
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
io_apic_modify_irq(data, true, &io_apic_sync);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void __unmask_ioapic(struct mp_chip_data *data)
@@ -460,11 +415,9 @@ static void __unmask_ioapic(struct mp_chip_data *data)
static void unmask_ioapic_irq(struct irq_data *irq_data)
{
struct mp_chip_data *data = irq_data->chip_data;
- unsigned long flags;
- raw_spin_lock_irqsave(&ioapic_lock, flags);
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
__unmask_ioapic(data);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
/*
@@ -492,30 +445,24 @@ static void __eoi_ioapic_pin(int apic, int pin, int vector)
entry = entry1 = __ioapic_read_entry(apic, pin);
- /*
- * Mask the entry and change the trigger mode to edge.
- */
+ /* Mask the entry and change the trigger mode to edge. */
entry1.masked = true;
entry1.is_level = false;
__ioapic_write_entry(apic, pin, entry1);
- /*
- * Restore the previous level triggered entry.
- */
+ /* Restore the previous level triggered entry. */
__ioapic_write_entry(apic, pin, entry);
}
}
static void eoi_ioapic_pin(int vector, struct mp_chip_data *data)
{
- unsigned long flags;
struct irq_pin_list *entry;
- raw_spin_lock_irqsave(&ioapic_lock, flags);
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
for_each_irq_pin(entry, data->irq_2_pin)
__eoi_ioapic_pin(entry->apic, entry->pin, vector);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
@@ -538,8 +485,6 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
}
if (entry.irr) {
- unsigned long flags;
-
/*
* Make sure the trigger mode is set to level. Explicit EOI
* doesn't clear the remote-IRR if the trigger mode is not
@@ -549,9 +494,8 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
entry.is_level = true;
ioapic_write_entry(apic, pin, entry);
}
- raw_spin_lock_irqsave(&ioapic_lock, flags);
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
__eoi_ioapic_pin(apic, pin, entry.vector);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
/*
@@ -586,28 +530,23 @@ static int pirq_entries[MAX_PIRQS] = {
static int __init ioapic_pirq_setup(char *str)
{
- int i, max;
- int ints[MAX_PIRQS+1];
+ int i, max, ints[MAX_PIRQS+1];
get_options(str, ARRAY_SIZE(ints), ints);
- apic_printk(APIC_VERBOSE, KERN_INFO
- "PIRQ redirection, working around broken MP-BIOS.\n");
+ apic_pr_verbose("PIRQ redirection, working around broken MP-BIOS.\n");
+
max = MAX_PIRQS;
if (ints[0] < MAX_PIRQS)
max = ints[0];
for (i = 0; i < max; i++) {
- apic_printk(APIC_VERBOSE, KERN_DEBUG
- "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
- /*
- * PIRQs are mapped upside down, usually.
- */
+ apic_pr_verbose("... PIRQ%d -> IRQ %d\n", i, ints[i + 1]);
+ /* PIRQs are mapped upside down, usually */
pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
}
return 1;
}
-
__setup("pirq=", ioapic_pirq_setup);
#endif /* CONFIG_X86_32 */
@@ -626,8 +565,7 @@ int save_ioapic_entries(void)
}
for_each_pin(apic, pin)
- ioapics[apic].saved_registers[pin] =
- ioapic_read_entry(apic, pin);
+ ioapics[apic].saved_registers[pin] = ioapic_read_entry(apic, pin);
}
return err;
@@ -668,8 +606,7 @@ int restore_ioapic_entries(void)
continue;
for_each_pin(apic, pin)
- ioapic_write_entry(apic, pin,
- ioapics[apic].saved_registers[pin]);
+ ioapic_write_entry(apic, pin, ioapics[apic].saved_registers[pin]);
}
return 0;
}
@@ -681,12 +618,13 @@ static int find_irq_entry(int ioapic_idx, int pin, int type)
{
int i;
- for (i = 0; i < mp_irq_entries; i++)
+ for (i = 0; i < mp_irq_entries; i++) {
if (mp_irqs[i].irqtype == type &&
(mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) ||
mp_irqs[i].dstapic == MP_APIC_ALL) &&
mp_irqs[i].dstirq == pin)
return i;
+ }
return -1;
}
@@ -701,10 +639,8 @@ static int __init find_isa_irq_pin(int irq, int type)
for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].srcbus;
- if (test_bit(lbus, mp_bus_not_pci) &&
- (mp_irqs[i].irqtype == type) &&
+ if (test_bit(lbus, mp_bus_not_pci) && (mp_irqs[i].irqtype == type) &&
(mp_irqs[i].srcbusirq == irq))
-
return mp_irqs[i].dstirq;
}
return -1;
@@ -717,8 +653,7 @@ static int __init find_isa_irq_apic(int irq, int type)
for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].srcbus;
- if (test_bit(lbus, mp_bus_not_pci) &&
- (mp_irqs[i].irqtype == type) &&
+ if (test_bit(lbus, mp_bus_not_pci) && (mp_irqs[i].irqtype == type) &&
(mp_irqs[i].srcbusirq == irq))
break;
}
@@ -726,9 +661,10 @@ static int __init find_isa_irq_apic(int irq, int type)
if (i < mp_irq_entries) {
int ioapic_idx;
- for_each_ioapic(ioapic_idx)
+ for_each_ioapic(ioapic_idx) {
if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic)
return ioapic_idx;
+ }
}
return -1;
@@ -769,8 +705,7 @@ static bool EISA_ELCR(unsigned int irq)
unsigned int port = PIC_ELCR1 + (irq >> 3);
return (inb(port) >> (irq & 7)) & 1;
}
- apic_printk(APIC_VERBOSE, KERN_INFO
- "Broken MPtable reports ISA irq %d\n", irq);
+ apic_pr_verbose("Broken MPtable reports ISA irq %d\n", irq);
return false;
}
@@ -947,9 +882,9 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi,
struct irq_alloc_info *info)
{
+ int type = ioapics[ioapic].irqdomain_cfg.type;
bool legacy = false;
int irq = -1;
- int type = ioapics[ioapic].irqdomain_cfg.type;
switch (type) {
case IOAPIC_DOMAIN_LEGACY:
@@ -971,8 +906,7 @@ static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi,
return -1;
}
- return __irq_domain_alloc_irqs(domain, irq, 1,
- ioapic_alloc_attr_node(info),
+ return __irq_domain_alloc_irqs(domain, irq, 1, ioapic_alloc_attr_node(info),
info, legacy, NULL);
}
@@ -986,13 +920,12 @@ static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi,
* PIRQs instead of reprogramming the interrupt routing logic. Thus there may be
* multiple pins sharing the same legacy IRQ number when ACPI is disabled.
*/
-static int alloc_isa_irq_from_domain(struct irq_domain *domain,
- int irq, int ioapic, int pin,
+static int alloc_isa_irq_from_domain(struct irq_domain *domain, int irq, int ioapic, int pin,
struct irq_alloc_info *info)
{
- struct mp_chip_data *data;
struct irq_data *irq_data = irq_get_irq_data(irq);
int node = ioapic_alloc_attr_node(info);
+ struct mp_chip_data *data;
/*
* Legacy ISA IRQ has already been allocated, just add pin to
@@ -1002,13 +935,11 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain,
if (irq_data && irq_data->parent_data) {
if (!mp_check_pin_attr(irq, info))
return -EBUSY;
- if (__add_pin_to_irq_node(irq_data->chip_data, node, ioapic,
- info->ioapic.pin))
+ if (!add_pin_to_irq_node(irq_data->chip_data, node, ioapic, info->ioapic.pin))
return -ENOMEM;
} else {
info->flags |= X86_IRQ_ALLOC_LEGACY;
- irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true,
- NULL);
+ irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true, NULL);
if (irq >= 0) {
irq_data = irq_domain_get_irq_data(domain, irq);
data = irq_data->chip_data;
@@ -1022,11 +953,11 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain,
static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
unsigned int flags, struct irq_alloc_info *info)
{
- int irq;
- bool legacy = false;
+ struct irq_domain *domain = mp_ioapic_irqdomain(ioapic);
struct irq_alloc_info tmp;
struct mp_chip_data *data;
- struct irq_domain *domain = mp_ioapic_irqdomain(ioapic);
+ bool legacy = false;
+ int irq;
if (!domain)
return -ENOSYS;
@@ -1046,7 +977,7 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
return -EINVAL;
}
- mutex_lock(&ioapic_mutex);
+ guard(mutex)(&ioapic_mutex);
if (!(flags & IOAPIC_MAP_ALLOC)) {
if (!legacy) {
irq = irq_find_mapping(domain, pin);
@@ -1067,8 +998,6 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
data->count++;
}
}
- mutex_unlock(&ioapic_mutex);
-
return irq;
}
@@ -1076,26 +1005,20 @@ static int pin_2_irq(int idx, int ioapic, int pin, unsigned int flags)
{
u32 gsi = mp_pin_to_gsi(ioapic, pin);
- /*
- * Debugging check, we are in big trouble if this message pops up!
- */
+ /* Debugging check, we are in big trouble if this message pops up! */
if (mp_irqs[idx].dstirq != pin)
pr_err("broken BIOS or MPTABLE parser, ayiee!!\n");
#ifdef CONFIG_X86_32
- /*
- * PCI IRQ command line redirection. Yes, limits are hardcoded.
- */
+ /* PCI IRQ command line redirection. Yes, limits are hardcoded. */
if ((pin >= 16) && (pin <= 23)) {
- if (pirq_entries[pin-16] != -1) {
- if (!pirq_entries[pin-16]) {
- apic_printk(APIC_VERBOSE, KERN_DEBUG
- "disabling PIRQ%d\n", pin-16);
+ if (pirq_entries[pin - 16] != -1) {
+ if (!pirq_entries[pin - 16]) {
+ apic_pr_verbose("Disabling PIRQ%d\n", pin - 16);
} else {
int irq = pirq_entries[pin-16];
- apic_printk(APIC_VERBOSE, KERN_DEBUG
- "using PIRQ%d -> IRQ %d\n",
- pin-16, irq);
+
+ apic_pr_verbose("Using PIRQ%d -> IRQ %d\n", pin - 16, irq);
return irq;
}
}
@@ -1133,10 +1056,9 @@ void mp_unmap_irq(int irq)
if (!data || data->isa_irq)
return;
- mutex_lock(&ioapic_mutex);
+ guard(mutex)(&ioapic_mutex);
if (--data->count == 0)
irq_domain_free_irqs(irq, 1);
- mutex_unlock(&ioapic_mutex);
}
/*
@@ -1147,12 +1069,10 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
{
int irq, i, best_ioapic = -1, best_idx = -1;
- apic_printk(APIC_DEBUG,
- "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
- bus, slot, pin);
+ apic_pr_debug("Querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
+ bus, slot, pin);
if (test_bit(bus, mp_bus_not_pci)) {
- apic_printk(APIC_VERBOSE,
- "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
+ apic_pr_verbose("PCI BIOS passed nonexistent PCI bus %d!\n", bus);
return -1;
}
@@ -1197,8 +1117,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
return -1;
out:
- return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq,
- IOAPIC_MAP_ALLOC);
+ return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq, IOAPIC_MAP_ALLOC);
}
EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
@@ -1209,17 +1128,16 @@ static void __init setup_IO_APIC_irqs(void)
unsigned int ioapic, pin;
int idx;
- apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
+ apic_pr_verbose("Init IO_APIC IRQs\n");
for_each_ioapic_pin(ioapic, pin) {
idx = find_irq_entry(ioapic, pin, mp_INT);
- if (idx < 0)
- apic_printk(APIC_VERBOSE,
- KERN_DEBUG " apic %d pin %d not connected\n",
- mpc_ioapic_id(ioapic), pin);
- else
- pin_2_irq(idx, ioapic, pin,
- ioapic ? 0 : IOAPIC_MAP_ALLOC);
+ if (idx < 0) {
+ apic_pr_verbose("apic %d pin %d not connected\n",
+ mpc_ioapic_id(ioapic), pin);
+ } else {
+ pin_2_irq(idx, ioapic, pin, ioapic ? 0 : IOAPIC_MAP_ALLOC);
+ }
}
}
@@ -1234,26 +1152,21 @@ static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
char buf[256];
int i;
- printk(KERN_DEBUG "IOAPIC %d:\n", apic);
+ apic_dbg("IOAPIC %d:\n", apic);
for (i = 0; i <= nr_entries; i++) {
entry = ioapic_read_entry(apic, i);
- snprintf(buf, sizeof(buf),
- " pin%02x, %s, %s, %s, V(%02X), IRR(%1d), S(%1d)",
- i,
- entry.masked ? "disabled" : "enabled ",
+ snprintf(buf, sizeof(buf), " pin%02x, %s, %s, %s, V(%02X), IRR(%1d), S(%1d)",
+ i, entry.masked ? "disabled" : "enabled ",
entry.is_level ? "level" : "edge ",
entry.active_low ? "low " : "high",
entry.vector, entry.irr, entry.delivery_status);
if (entry.ir_format) {
- printk(KERN_DEBUG "%s, remapped, I(%04X), Z(%X)\n",
- buf,
- (entry.ir_index_15 << 15) | entry.ir_index_0_14,
- entry.ir_zero);
+ apic_dbg("%s, remapped, I(%04X), Z(%X)\n", buf,
+ (entry.ir_index_15 << 15) | entry.ir_index_0_14, entry.ir_zero);
} else {
- printk(KERN_DEBUG "%s, %s, D(%02X%02X), M(%1d)\n", buf,
- entry.dest_mode_logical ? "logical " : "physical",
- entry.virt_destid_8_14, entry.destid_0_7,
- entry.delivery_mode);
+ apic_dbg("%s, %s, D(%02X%02X), M(%1d)\n", buf,
+ entry.dest_mode_logical ? "logical " : "physic al",
+ entry.virt_destid_8_14, entry.destid_0_7, entry.delivery_mode);
}
}
}
@@ -1264,30 +1177,25 @@ static void __init print_IO_APIC(int ioapic_idx)
union IO_APIC_reg_01 reg_01;
union IO_APIC_reg_02 reg_02;
union IO_APIC_reg_03 reg_03;
- unsigned long flags;
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- reg_00.raw = io_apic_read(ioapic_idx, 0);
- reg_01.raw = io_apic_read(ioapic_idx, 1);
- if (reg_01.bits.version >= 0x10)
- reg_02.raw = io_apic_read(ioapic_idx, 2);
- if (reg_01.bits.version >= 0x20)
- reg_03.raw = io_apic_read(ioapic_idx, 3);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
-
- printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx));
- printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
- printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
- printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
- printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
-
- printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
- printk(KERN_DEBUG "....... : max redirection entries: %02X\n",
- reg_01.bits.entries);
-
- printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
- printk(KERN_DEBUG "....... : IO APIC version: %02X\n",
- reg_01.bits.version);
+ scoped_guard (raw_spinlock_irqsave, &ioapic_lock) {
+ reg_00.raw = io_apic_read(ioapic_idx, 0);
+ reg_01.raw = io_apic_read(ioapic_idx, 1);
+ if (reg_01.bits.version >= 0x10)
+ reg_02.raw = io_apic_read(ioapic_idx, 2);
+ if (reg_01.bits.version >= 0x20)
+ reg_03.raw = io_apic_read(ioapic_idx, 3);
+ }
+
+ apic_dbg("IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx));
+ apic_dbg(".... register #00: %08X\n", reg_00.raw);
+ apic_dbg("....... : physical APIC id: %02X\n", reg_00.bits.ID);
+ apic_dbg("....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
+ apic_dbg("....... : LTS : %X\n", reg_00.bits.LTS);
+ apic_dbg(".... register #01: %08X\n", *(int *)&reg_01);
+ apic_dbg("....... : max redirection entries: %02X\n", reg_01.bits.entries);
+ apic_dbg("....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
+ apic_dbg("....... : IO APIC version: %02X\n", reg_01.bits.version);
/*
* Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
@@ -1295,8 +1203,8 @@ static void __init print_IO_APIC(int ioapic_idx)
* value, so ignore it if reg_02 == reg_01.
*/
if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
- printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
- printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
+ apic_dbg(".... register #02: %08X\n", reg_02.raw);
+ apic_dbg("....... : arbitration: %02X\n", reg_02.bits.arbitration);
}
/*
@@ -1306,11 +1214,11 @@ static void __init print_IO_APIC(int ioapic_idx)
*/
if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
reg_03.raw != reg_01.raw) {
- printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
- printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
+ apic_dbg(".... register #03: %08X\n", reg_03.raw);
+ apic_dbg("....... : Boot DT : %X\n", reg_03.bits.boot_DT);
}
- printk(KERN_DEBUG ".... IRQ redirection table:\n");
+ apic_dbg(".... IRQ redirection table:\n");
io_apic_print_entries(ioapic_idx, reg_01.bits.entries);
}
@@ -1319,11 +1227,11 @@ void __init print_IO_APICs(void)
int ioapic_idx;
unsigned int irq;
- printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
- for_each_ioapic(ioapic_idx)
- printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
- mpc_ioapic_id(ioapic_idx),
- ioapics[ioapic_idx].nr_registers);
+ apic_dbg("number of MP IRQ sources: %d.\n", mp_irq_entries);
+ for_each_ioapic(ioapic_idx) {
+ apic_dbg("number of IO-APIC #%d registers: %d.\n",
+ mpc_ioapic_id(ioapic_idx), ioapics[ioapic_idx].nr_registers);
+ }
/*
* We are a bit conservative about what we expect. We have to
@@ -1334,7 +1242,7 @@ void __init print_IO_APICs(void)
for_each_ioapic(ioapic_idx)
print_IO_APIC(ioapic_idx);
- printk(KERN_DEBUG "IRQ to pin mappings:\n");
+ apic_dbg("IRQ to pin mappings:\n");
for_each_active_irq(irq) {
struct irq_pin_list *entry;
struct irq_chip *chip;
@@ -1349,7 +1257,7 @@ void __init print_IO_APICs(void)
if (list_empty(&data->irq_2_pin))
continue;
- printk(KERN_DEBUG "IRQ%d ", irq);
+ apic_dbg("IRQ%d ", irq);
for_each_irq_pin(entry, data->irq_2_pin)
pr_cont("-> %d:%d", entry->apic, entry->pin);
pr_cont("\n");
@@ -1363,8 +1271,7 @@ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
void __init enable_IO_APIC(void)
{
- int i8259_apic, i8259_pin;
- int apic, pin;
+ int i8259_apic, i8259_pin, apic, pin;
if (ioapic_is_disabled)
nr_ioapics = 0;
@@ -1376,19 +1283,21 @@ void __init enable_IO_APIC(void)
/* See if any of the pins is in ExtINT mode */
struct IO_APIC_route_entry entry = ioapic_read_entry(apic, pin);
- /* If the interrupt line is enabled and in ExtInt mode
- * I have found the pin where the i8259 is connected.
+ /*
+ * If the interrupt line is enabled and in ExtInt mode I
+ * have found the pin where the i8259 is connected.
*/
- if (!entry.masked &&
- entry.delivery_mode == APIC_DELIVERY_MODE_EXTINT) {
+ if (!entry.masked && entry.delivery_mode == APIC_DELIVERY_MODE_EXTINT) {
ioapic_i8259.apic = apic;
ioapic_i8259.pin = pin;
- goto found_i8259;
+ break;
}
}
- found_i8259:
- /* Look to see what if the MP table has reported the ExtINT */
- /* If we could not find the appropriate pin by looking at the ioapic
+
+ /*
+ * Look to see what if the MP table has reported the ExtINT
+ *
+ * If we could not find the appropriate pin by looking at the ioapic
* the i8259 probably is not connected the ioapic but give the
* mptable a chance anyway.
*/
@@ -1396,29 +1305,24 @@ void __init enable_IO_APIC(void)
i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
/* Trust the MP table if nothing is setup in the hardware */
if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
- printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
+ pr_warn("ExtINT not setup in hardware but reported by MP table\n");
ioapic_i8259.pin = i8259_pin;
ioapic_i8259.apic = i8259_apic;
}
/* Complain if the MP table and the hardware disagree */
if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
- (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
- {
- printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
- }
+ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
+ pr_warn("ExtINT in hardware and MP table differ\n");
- /*
- * Do not trust the IO-APIC being empty at bootup
- */
+ /* Do not trust the IO-APIC being empty at bootup */
clear_IO_APIC();
}
void native_restore_boot_irq_mode(void)
{
/*
- * If the i8259 is routed through an IOAPIC
- * Put that IOAPIC in virtual wire mode
- * so legacy interrupts can be delivered.
+ * If the i8259 is routed through an IOAPIC Put that IOAPIC in
+ * virtual wire mode so legacy interrupts can be delivered.
*/
if (ioapic_i8259.pin != -1) {
struct IO_APIC_route_entry entry;
@@ -1433,9 +1337,7 @@ void native_restore_boot_irq_mode(void)
entry.destid_0_7 = apic_id & 0xFF;
entry.virt_destid_8_14 = apic_id >> 8;
- /*
- * Add it to the IO-APIC irq-routing table:
- */
+ /* Add it to the IO-APIC irq-routing table */
ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
}
@@ -1464,7 +1366,6 @@ static void __init setup_ioapic_ids_from_mpc_nocheck(void)
const u32 broadcast_id = 0xF;
union IO_APIC_reg_00 reg_00;
unsigned char old_id;
- unsigned long flags;
int ioapic_idx, i;
/*
@@ -1478,9 +1379,8 @@ static void __init setup_ioapic_ids_from_mpc_nocheck(void)
*/
for_each_ioapic(ioapic_idx) {
/* Read the register 0 value */
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- reg_00.raw = io_apic_read(ioapic_idx, 0);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ scoped_guard (raw_spinlock_irqsave, &ioapic_lock)
+ reg_00.raw = io_apic_read(ioapic_idx, 0);
old_id = mpc_ioapic_id(ioapic_idx);
@@ -1508,47 +1408,42 @@ static void __init setup_ioapic_ids_from_mpc_nocheck(void)
set_bit(i, phys_id_present_map);
ioapics[ioapic_idx].mp_config.apicid = i;
} else {
- apic_printk(APIC_VERBOSE, "Setting %d in the phys_id_present_map\n",
- mpc_ioapic_id(ioapic_idx));
+ apic_pr_verbose("Setting %d in the phys_id_present_map\n",
+ mpc_ioapic_id(ioapic_idx));
set_bit(mpc_ioapic_id(ioapic_idx), phys_id_present_map);
}
/*
- * We need to adjust the IRQ routing table
- * if the ID changed.
+ * We need to adjust the IRQ routing table if the ID
+ * changed.
*/
- if (old_id != mpc_ioapic_id(ioapic_idx))
- for (i = 0; i < mp_irq_entries; i++)
+ if (old_id != mpc_ioapic_id(ioapic_idx)) {
+ for (i = 0; i < mp_irq_entries; i++) {
if (mp_irqs[i].dstapic == old_id)
- mp_irqs[i].dstapic
- = mpc_ioapic_id(ioapic_idx);
+ mp_irqs[i].dstapic = mpc_ioapic_id(ioapic_idx);
+ }
+ }
/*
- * Update the ID register according to the right value
- * from the MPC table if they are different.
+ * Update the ID register according to the right value from
+ * the MPC table if they are different.
*/
if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID)
continue;
- apic_printk(APIC_VERBOSE, KERN_INFO
- "...changing IO-APIC physical APIC ID to %d ...",
- mpc_ioapic_id(ioapic_idx));
+ apic_pr_verbose("...changing IO-APIC physical APIC ID to %d ...",
+ mpc_ioapic_id(ioapic_idx));
reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(ioapic_idx, 0, reg_00.raw);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
-
- /*
- * Sanity check
- */
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- reg_00.raw = io_apic_read(ioapic_idx, 0);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ scoped_guard (raw_spinlock_irqsave, &ioapic_lock) {
+ io_apic_write(ioapic_idx, 0, reg_00.raw);
+ reg_00.raw = io_apic_read(ioapic_idx, 0);
+ }
+ /* Sanity check */
if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx))
pr_cont("could not set ID!\n");
else
- apic_printk(APIC_VERBOSE, " ok.\n");
+ apic_pr_verbose(" ok.\n");
}
}
@@ -1593,8 +1488,7 @@ static void __init delay_with_tsc(void)
do {
rep_nop();
now = rdtsc();
- } while ((now - start) < 40000000000ULL / HZ &&
- time_before_eq(jiffies, end));
+ } while ((now - start) < 40000000000ULL / HZ && time_before_eq(jiffies, end));
}
static void __init delay_without_tsc(void)
@@ -1655,36 +1549,29 @@ static int __init timer_irq_works(void)
* so we 'resend' these IRQs via IPIs, to the same CPU. It's much
* better to do it this way as thus we do not have to be aware of
* 'pending' interrupts in the IRQ path, except at this point.
- */
-/*
- * Edge triggered needs to resend any interrupt
- * that was delayed but this is now handled in the device
- * independent code.
- */
-
-/*
- * Starting up a edge-triggered IO-APIC interrupt is
- * nasty - we need to make sure that we get the edge.
- * If it is already asserted for some reason, we need
- * return 1 to indicate that is was pending.
*
- * This is not complete - we should be able to fake
- * an edge even if it isn't on the 8259A...
+ *
+ * Edge triggered needs to resend any interrupt that was delayed but this
+ * is now handled in the device independent code.
+ *
+ * Starting up a edge-triggered IO-APIC interrupt is nasty - we need to
+ * make sure that we get the edge. If it is already asserted for some
+ * reason, we need return 1 to indicate that is was pending.
+ *
+ * This is not complete - we should be able to fake an edge even if it
+ * isn't on the 8259A...
*/
static unsigned int startup_ioapic_irq(struct irq_data *data)
{
int was_pending = 0, irq = data->irq;
- unsigned long flags;
- raw_spin_lock_irqsave(&ioapic_lock, flags);
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
if (irq < nr_legacy_irqs()) {
legacy_pic->mask(irq);
if (legacy_pic->irq_pending(irq))
was_pending = 1;
}
__unmask_ioapic(data->chip_data);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
-
return was_pending;
}
@@ -1694,9 +1581,8 @@ atomic_t irq_mis_count;
static bool io_apic_level_ack_pending(struct mp_chip_data *data)
{
struct irq_pin_list *entry;
- unsigned long flags;
- raw_spin_lock_irqsave(&ioapic_lock, flags);
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
for_each_irq_pin(entry, data->irq_2_pin) {
struct IO_APIC_route_entry e;
int pin;
@@ -1704,13 +1590,9 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
pin = entry->pin;
e.w1 = io_apic_read(entry->apic, 0x10 + pin*2);
/* Is the remote IRR bit set? */
- if (e.irr) {
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ if (e.irr)
return true;
- }
}
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
-
return false;
}
@@ -1728,7 +1610,8 @@ static inline bool ioapic_prepare_move(struct irq_data *data)
static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
{
if (unlikely(moveit)) {
- /* Only migrate the irq if the ack has been received.
+ /*
+ * Only migrate the irq if the ack has been received.
*
* On rare occasions the broadcast level triggered ack gets
* delayed going to ioapics, and if we reprogram the
@@ -1911,18 +1794,16 @@ static void ioapic_configure_entry(struct irq_data *irqd)
__ioapic_write_entry(entry->apic, entry->pin, mpd->entry);
}
-static int ioapic_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
+static int ioapic_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force)
{
struct irq_data *parent = irq_data->parent_data;
- unsigned long flags;
int ret;
ret = parent->chip->irq_set_affinity(parent, mask, force);
- raw_spin_lock_irqsave(&ioapic_lock, flags);
+
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE)
ioapic_configure_entry(irq_data);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return ret;
}
@@ -1941,9 +1822,8 @@ static int ioapic_set_affinity(struct irq_data *irq_data,
*
* Verify that the corresponding Remote-IRR bits are clear.
*/
-static int ioapic_irq_get_chip_state(struct irq_data *irqd,
- enum irqchip_irq_state which,
- bool *state)
+static int ioapic_irq_get_chip_state(struct irq_data *irqd, enum irqchip_irq_state which,
+ bool *state)
{
struct mp_chip_data *mcd = irqd->chip_data;
struct IO_APIC_route_entry rentry;
@@ -1953,7 +1833,8 @@ static int ioapic_irq_get_chip_state(struct irq_data *irqd,
return -EINVAL;
*state = false;
- raw_spin_lock(&ioapic_lock);
+
+ guard(raw_spinlock)(&ioapic_lock);
for_each_irq_pin(p, mcd->irq_2_pin) {
rentry = __ioapic_read_entry(p->apic, p->pin);
/*
@@ -1967,7 +1848,6 @@ static int ioapic_irq_get_chip_state(struct irq_data *irqd,
break;
}
}
- raw_spin_unlock(&ioapic_lock);
return 0;
}
@@ -2008,14 +1888,13 @@ static inline void init_IO_APIC_traps(void)
cfg = irq_cfg(irq);
if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
/*
- * Hmm.. We don't have an entry for this,
- * so default to an old-fashioned 8259
- * interrupt if we can..
+ * Hmm.. We don't have an entry for this, so
+ * default to an old-fashioned 8259 interrupt if we
+ * can. Otherwise set the dummy interrupt chip.
*/
if (irq < nr_legacy_irqs())
legacy_pic->make_irq(irq);
else
- /* Strange. Oh, well.. */
irq_set_chip(irq, &no_irq_chip);
}
}
@@ -2024,20 +1903,17 @@ static inline void init_IO_APIC_traps(void)
/*
* The local APIC irq-chip implementation:
*/
-
static void mask_lapic_irq(struct irq_data *data)
{
- unsigned long v;
+ unsigned long v = apic_read(APIC_LVT0);
- v = apic_read(APIC_LVT0);
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
}
static void unmask_lapic_irq(struct irq_data *data)
{
- unsigned long v;
+ unsigned long v = apic_read(APIC_LVT0);
- v = apic_read(APIC_LVT0);
apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
}
@@ -2056,8 +1932,7 @@ static struct irq_chip lapic_chip __read_mostly = {
static void lapic_register_intr(int irq)
{
irq_clear_status_flags(irq, IRQ_LEVEL);
- irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
- "edge");
+ irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, "edge");
}
/*
@@ -2069,9 +1944,9 @@ static void lapic_register_intr(int irq)
*/
static inline void __init unlock_ExtINT_logic(void)
{
- int apic, pin, i;
- struct IO_APIC_route_entry entry0, entry1;
unsigned char save_control, save_freq_select;
+ struct IO_APIC_route_entry entry0, entry1;
+ int apic, pin, i;
u32 apic_id;
pin = find_isa_irq_pin(8, mp_INT);
@@ -2131,10 +2006,10 @@ static int __init disable_timer_pin_setup(char *arg)
}
early_param("disable_timer_pin_1", disable_timer_pin_setup);
-static int mp_alloc_timer_irq(int ioapic, int pin)
+static int __init mp_alloc_timer_irq(int ioapic, int pin)
{
- int irq = -1;
struct irq_domain *domain = mp_ioapic_irqdomain(ioapic);
+ int irq = -1;
if (domain) {
struct irq_alloc_info info;
@@ -2142,21 +2017,36 @@ static int mp_alloc_timer_irq(int ioapic, int pin)
ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 0, 0);
info.devid = mpc_ioapic_id(ioapic);
info.ioapic.pin = pin;
- mutex_lock(&ioapic_mutex);
+ guard(mutex)(&ioapic_mutex);
irq = alloc_isa_irq_from_domain(domain, 0, ioapic, pin, &info);
- mutex_unlock(&ioapic_mutex);
}
return irq;
}
+static void __init replace_pin_at_irq_node(struct mp_chip_data *data, int node,
+ int oldapic, int oldpin,
+ int newapic, int newpin)
+{
+ struct irq_pin_list *entry;
+
+ for_each_irq_pin(entry, data->irq_2_pin) {
+ if (entry->apic == oldapic && entry->pin == oldpin) {
+ entry->apic = newapic;
+ entry->pin = newpin;
+ return;
+ }
+ }
+
+ /* Old apic/pin didn't exist, so just add a new one */
+ add_pin_to_irq_node(data, node, newapic, newpin);
+}
+
/*
* This code may look a bit paranoid, but it's supposed to cooperate with
* a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
* is so screwy. Thanks to Brian Perkins for testing/hacking this beast
* fanatically on his truly buggy board.
- *
- * FIXME: really need to revamp this for all platforms.
*/
static inline void __init check_timer(void)
{
@@ -2194,9 +2084,8 @@ static inline void __init check_timer(void)
pin2 = ioapic_i8259.pin;
apic2 = ioapic_i8259.apic;
- apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
- "apic1=%d pin1=%d apic2=%d pin2=%d\n",
- cfg->vector, apic1, pin1, apic2, pin2);
+ pr_info("..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
+ cfg->vector, apic1, pin1, apic2, pin2);
/*
* Some BIOS writers are clueless and report the ExtINTA
@@ -2240,13 +2129,10 @@ static inline void __init check_timer(void)
panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC");
clear_IO_APIC_pin(apic1, pin1);
if (!no_pin1)
- apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
- "8254 timer not connected to IO-APIC\n");
+ pr_err("..MP-BIOS bug: 8254 timer not connected to IO-APIC\n");
- apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
- "(IRQ0) through the 8259A ...\n");
- apic_printk(APIC_QUIET, KERN_INFO
- "..... (found apic %d pin %d) ...\n", apic2, pin2);
+ pr_info("...trying to set up timer (IRQ0) through the 8259A ...\n");
+ pr_info("..... (found apic %d pin %d) ...\n", apic2, pin2);
/*
* legacy devices should be connected to IO APIC #0
*/
@@ -2255,7 +2141,7 @@ static inline void __init check_timer(void)
irq_domain_activate_irq(irq_data, false);
legacy_pic->unmask(0);
if (timer_irq_works()) {
- apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
+ pr_info("....... works.\n");
goto out;
}
/*
@@ -2263,26 +2149,24 @@ static inline void __init check_timer(void)
*/
legacy_pic->mask(0);
clear_IO_APIC_pin(apic2, pin2);
- apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
+ pr_info("....... failed.\n");
}
- apic_printk(APIC_QUIET, KERN_INFO
- "...trying to set up timer as Virtual Wire IRQ...\n");
+ pr_info("...trying to set up timer as Virtual Wire IRQ...\n");
lapic_register_intr(0);
apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
legacy_pic->unmask(0);
if (timer_irq_works()) {
- apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
+ pr_info("..... works.\n");
goto out;
}
legacy_pic->mask(0);
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
- apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
+ pr_info("..... failed.\n");
- apic_printk(APIC_QUIET, KERN_INFO
- "...trying to set up timer as ExtINT IRQ...\n");
+ pr_info("...trying to set up timer as ExtINT IRQ...\n");
legacy_pic->init(0);
legacy_pic->make_irq(0);
@@ -2292,14 +2176,15 @@ static inline void __init check_timer(void)
unlock_ExtINT_logic();
if (timer_irq_works()) {
- apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
+ pr_info("..... works.\n");
goto out;
}
- apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
- if (apic_is_x2apic_enabled())
- apic_printk(APIC_QUIET, KERN_INFO
- "Perhaps problem with the pre-enabled x2apic mode\n"
- "Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
+
+ pr_info("..... failed :\n");
+ if (apic_is_x2apic_enabled()) {
+ pr_info("Perhaps problem with the pre-enabled x2apic mode\n"
+ "Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
+ }
panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
"report. Then try booting with the 'noapic' option.\n");
out:
@@ -2327,11 +2212,11 @@ out:
static int mp_irqdomain_create(int ioapic)
{
- struct irq_domain *parent;
+ struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic);
int hwirqs = mp_ioapic_pin_count(ioapic);
struct ioapic *ip = &ioapics[ioapic];
struct ioapic_domain_cfg *cfg = &ip->irqdomain_cfg;
- struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic);
+ struct irq_domain *parent;
struct fwnode_handle *fn;
struct irq_fwspec fwspec;
@@ -2367,10 +2252,8 @@ static int mp_irqdomain_create(int ioapic)
return -ENOMEM;
}
- if (cfg->type == IOAPIC_DOMAIN_LEGACY ||
- cfg->type == IOAPIC_DOMAIN_STRICT)
- ioapic_dynirq_base = max(ioapic_dynirq_base,
- gsi_cfg->gsi_end + 1);
+ if (cfg->type == IOAPIC_DOMAIN_LEGACY || cfg->type == IOAPIC_DOMAIN_STRICT)
+ ioapic_dynirq_base = max(ioapic_dynirq_base, gsi_cfg->gsi_end + 1);
return 0;
}
@@ -2397,13 +2280,11 @@ void __init setup_IO_APIC(void)
io_apic_irqs = nr_legacy_irqs() ? ~PIC_IRQS : ~0UL;
- apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
+ apic_pr_verbose("ENABLING IO-APIC IRQs\n");
for_each_ioapic(ioapic)
BUG_ON(mp_irqdomain_create(ioapic));
- /*
- * Set up IO-APIC IRQ routing.
- */
+ /* Set up IO-APIC IRQ routing. */
x86_init.mpparse.setup_ioapic_ids();
sync_Arb_IDs();
@@ -2417,16 +2298,14 @@ void __init setup_IO_APIC(void)
static void resume_ioapic_id(int ioapic_idx)
{
- unsigned long flags;
union IO_APIC_reg_00 reg_00;
- raw_spin_lock_irqsave(&ioapic_lock, flags);
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
reg_00.raw = io_apic_read(ioapic_idx, 0);
if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) {
reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
io_apic_write(ioapic_idx, 0, reg_00.raw);
}
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void ioapic_resume(void)
@@ -2440,8 +2319,8 @@ static void ioapic_resume(void)
}
static struct syscore_ops ioapic_syscore_ops = {
- .suspend = save_ioapic_entries,
- .resume = ioapic_resume,
+ .suspend = save_ioapic_entries,
+ .resume = ioapic_resume,
};
static int __init ioapic_init_ops(void)
@@ -2456,15 +2335,13 @@ device_initcall(ioapic_init_ops);
static int io_apic_get_redir_entries(int ioapic)
{
union IO_APIC_reg_01 reg_01;
- unsigned long flags;
- raw_spin_lock_irqsave(&ioapic_lock, flags);
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
reg_01.raw = io_apic_read(ioapic, 1);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
- /* The register returns the maximum index redir index
- * supported, which is one less than the total number of redir
- * entries.
+ /*
+ * The register returns the maximum index redir index supported,
+ * which is one less than the total number of redir entries.
*/
return reg_01.bits.entries + 1;
}
@@ -2494,16 +2371,14 @@ static int io_apic_get_unique_id(int ioapic, int apic_id)
static DECLARE_BITMAP(apic_id_map, MAX_LOCAL_APIC);
const u32 broadcast_id = 0xF;
union IO_APIC_reg_00 reg_00;
- unsigned long flags;
int i = 0;
/* Initialize the ID map */
if (bitmap_empty(apic_id_map, MAX_LOCAL_APIC))
copy_phys_cpu_present_map(apic_id_map);
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- reg_00.raw = io_apic_read(ioapic, 0);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ scoped_guard (raw_spinlock_irqsave, &ioapic_lock)
+ reg_00.raw = io_apic_read(ioapic, 0);
if (apic_id >= broadcast_id) {
pr_warn("IOAPIC[%d]: Invalid apic_id %d, trying %d\n",
@@ -2530,21 +2405,19 @@ static int io_apic_get_unique_id(int ioapic, int apic_id)
if (reg_00.bits.ID != apic_id) {
reg_00.bits.ID = apic_id;
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(ioapic, 0, reg_00.raw);
- reg_00.raw = io_apic_read(ioapic, 0);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ scoped_guard (raw_spinlock_irqsave, &ioapic_lock) {
+ io_apic_write(ioapic, 0, reg_00.raw);
+ reg_00.raw = io_apic_read(ioapic, 0);
+ }
/* Sanity check */
if (reg_00.bits.ID != apic_id) {
- pr_err("IOAPIC[%d]: Unable to change apic_id!\n",
- ioapic);
+ pr_err("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
return -1;
}
}
- apic_printk(APIC_VERBOSE, KERN_INFO
- "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
+ apic_pr_verbose("IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
return apic_id;
}
@@ -2560,7 +2433,6 @@ static u8 io_apic_unique_id(int idx, u8 id)
{
union IO_APIC_reg_00 reg_00;
DECLARE_BITMAP(used, 256);
- unsigned long flags;
u8 new_id;
int i;
@@ -2576,26 +2448,23 @@ static u8 io_apic_unique_id(int idx, u8 id)
* Read the current id from the ioapic and keep it if
* available.
*/
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- reg_00.raw = io_apic_read(idx, 0);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ scoped_guard (raw_spinlock_irqsave, &ioapic_lock)
+ reg_00.raw = io_apic_read(idx, 0);
+
new_id = reg_00.bits.ID;
if (!test_bit(new_id, used)) {
- apic_printk(APIC_VERBOSE, KERN_INFO
- "IOAPIC[%d]: Using reg apic_id %d instead of %d\n",
- idx, new_id, id);
+ apic_pr_verbose("IOAPIC[%d]: Using reg apic_id %d instead of %d\n",
+ idx, new_id, id);
return new_id;
}
- /*
- * Get the next free id and write it to the ioapic.
- */
+ /* Get the next free id and write it to the ioapic. */
new_id = find_first_zero_bit(used, 256);
reg_00.bits.ID = new_id;
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(idx, 0, reg_00.raw);
- reg_00.raw = io_apic_read(idx, 0);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ scoped_guard (raw_spinlock_irqsave, &ioapic_lock) {
+ io_apic_write(idx, 0, reg_00.raw);
+ reg_00.raw = io_apic_read(idx, 0);
+ }
/* Sanity check */
BUG_ON(reg_00.bits.ID != new_id);
@@ -2605,12 +2474,10 @@ static u8 io_apic_unique_id(int idx, u8 id)
static int io_apic_get_version(int ioapic)
{
- union IO_APIC_reg_01 reg_01;
- unsigned long flags;
+ union IO_APIC_reg_01 reg_01;
- raw_spin_lock_irqsave(&ioapic_lock, flags);
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
reg_01.raw = io_apic_read(ioapic, 1);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return reg_01.bits.version;
}
@@ -2625,8 +2492,8 @@ static struct resource *ioapic_resources;
static struct resource * __init ioapic_setup_resources(void)
{
- unsigned long n;
struct resource *res;
+ unsigned long n;
char *mem;
int i;
@@ -2686,9 +2553,7 @@ void __init io_apic_init_mappings(void)
ioapic_phys = mpc_ioapic_addr(i);
#ifdef CONFIG_X86_32
if (!ioapic_phys) {
- printk(KERN_ERR
- "WARNING: bogus zero IO-APIC "
- "address found in MPTABLE, "
+ pr_err("WARNING: bogus zero IO-APIC address found in MPTABLE, "
"disabling IO/APIC support!\n");
smp_found_config = 0;
ioapic_is_disabled = true;
@@ -2707,9 +2572,8 @@ fake_ioapic_page:
ioapic_phys = __pa(ioapic_phys);
}
io_apic_set_fixmap(idx, ioapic_phys);
- apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
- __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
- ioapic_phys);
+ apic_pr_verbose("mapped IOAPIC to %08lx (%08lx)\n",
+ __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), ioapic_phys);
idx++;
ioapic_res->start = ioapic_phys;
@@ -2720,13 +2584,12 @@ fake_ioapic_page:
void __init ioapic_insert_resources(void)
{
- int i;
struct resource *r = ioapic_resources;
+ int i;
if (!r) {
if (nr_ioapics > 0)
- printk(KERN_ERR
- "IO APIC resources couldn't be allocated.\n");
+ pr_err("IO APIC resources couldn't be allocated.\n");
return;
}
@@ -2746,11 +2609,12 @@ int mp_find_ioapic(u32 gsi)
/* Find the IOAPIC that manages this GSI. */
for_each_ioapic(i) {
struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i);
+
if (gsi >= gsi_cfg->gsi_base && gsi <= gsi_cfg->gsi_end)
return i;
}
- printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
+ pr_err("ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
return -1;
}
@@ -2789,12 +2653,10 @@ static int bad_ioapic_register(int idx)
static int find_free_ioapic_entry(void)
{
- int idx;
-
- for (idx = 0; idx < MAX_IO_APICS; idx++)
+ for (int idx = 0; idx < MAX_IO_APICS; idx++) {
if (ioapics[idx].nr_registers == 0)
return idx;
-
+ }
return MAX_IO_APICS;
}
@@ -2805,8 +2667,7 @@ static int find_free_ioapic_entry(void)
* @gsi_base: base of GSI associated with the IOAPIC
* @cfg: configuration information for the IOAPIC
*/
-int mp_register_ioapic(int id, u32 address, u32 gsi_base,
- struct ioapic_domain_cfg *cfg)
+int mp_register_ioapic(int id, u32 address, u32 gsi_base, struct ioapic_domain_cfg *cfg)
{
bool hotplug = !!ioapic_initialized;
struct mp_ioapic_gsi *gsi_cfg;
@@ -2817,12 +2678,13 @@ int mp_register_ioapic(int id, u32 address, u32 gsi_base,
pr_warn("Bogus (zero) I/O APIC address found, skipping!\n");
return -EINVAL;
}
- for_each_ioapic(ioapic)
+
+ for_each_ioapic(ioapic) {
if (ioapics[ioapic].mp_config.apicaddr == address) {
- pr_warn("address 0x%x conflicts with IOAPIC%d\n",
- address, ioapic);
+ pr_warn("address 0x%x conflicts with IOAPIC%d\n", address, ioapic);
return -EEXIST;
}
+ }
idx = find_free_ioapic_entry();
if (idx >= MAX_IO_APICS) {
@@ -2857,8 +2719,7 @@ int mp_register_ioapic(int id, u32 address, u32 gsi_base,
(gsi_end >= gsi_cfg->gsi_base &&
gsi_end <= gsi_cfg->gsi_end)) {
pr_warn("GSI range [%u-%u] for new IOAPIC conflicts with GSI[%u-%u]\n",
- gsi_base, gsi_end,
- gsi_cfg->gsi_base, gsi_cfg->gsi_end);
+ gsi_base, gsi_end, gsi_cfg->gsi_base, gsi_cfg->gsi_end);
clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
return -ENOSPC;
}
@@ -2892,8 +2753,7 @@ int mp_register_ioapic(int id, u32 address, u32 gsi_base,
ioapics[idx].nr_registers = entries;
pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
- idx, mpc_ioapic_id(idx),
- mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
+ idx, mpc_ioapic_id(idx), mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
gsi_cfg->gsi_base, gsi_cfg->gsi_end);
return 0;
@@ -2904,11 +2764,13 @@ int mp_unregister_ioapic(u32 gsi_base)
int ioapic, pin;
int found = 0;
- for_each_ioapic(ioapic)
+ for_each_ioapic(ioapic) {
if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) {
found = 1;
break;
}
+ }
+
if (!found) {
pr_warn("can't find IOAPIC for GSI %d\n", gsi_base);
return -ENODEV;
@@ -2922,8 +2784,7 @@ int mp_unregister_ioapic(u32 gsi_base)
if (irq >= 0) {
data = irq_get_chip_data(irq);
if (data && data->count) {
- pr_warn("pin%d on IOAPIC%d is still in use.\n",
- pin, ioapic);
+ pr_warn("pin%d on IOAPIC%d is still in use.\n", pin, ioapic);
return -EBUSY;
}
}
@@ -2958,8 +2819,7 @@ static void mp_irqdomain_get_attr(u32 gsi, struct mp_chip_data *data,
if (info && info->ioapic.valid) {
data->is_level = info->ioapic.is_level;
data->active_low = info->ioapic.active_low;
- } else if (__acpi_get_override_irq(gsi, &data->is_level,
- &data->active_low) < 0) {
+ } else if (__acpi_get_override_irq(gsi, &data->is_level, &data->active_low) < 0) {
/* PCI interrupts are always active low level triggered. */
data->is_level = true;
data->active_low = true;
@@ -3017,10 +2877,8 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
return -ENOMEM;
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info);
- if (ret < 0) {
- kfree(data);
- return ret;
- }
+ if (ret < 0)
+ goto free_data;
INIT_LIST_HEAD(&data->irq_2_pin);
irq_data->hwirq = info->ioapic.pin;
@@ -3029,7 +2887,10 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
irq_data->chip_data = data;
mp_irqdomain_get_attr(mp_pin_to_gsi(ioapic, pin), data, info);
- add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin);
+ if (!add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin)) {
+ ret = -ENOMEM;
+ goto free_irqs;
+ }
mp_preconfigure_entry(data);
mp_register_handler(virq, data->is_level);
@@ -3039,11 +2900,15 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
legacy_pic->mask(virq);
local_irq_restore(flags);
- apic_printk(APIC_VERBOSE, KERN_DEBUG
- "IOAPIC[%d]: Preconfigured routing entry (%d-%d -> IRQ %d Level:%i ActiveLow:%i)\n",
- ioapic, mpc_ioapic_id(ioapic), pin, virq,
- data->is_level, data->active_low);
+ apic_pr_verbose("IOAPIC[%d]: Preconfigured routing entry (%d-%d -> IRQ %d Level:%i ActiveLow:%i)\n",
+ ioapic, mpc_ioapic_id(ioapic), pin, virq, data->is_level, data->active_low);
return 0;
+
+free_irqs:
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+free_data:
+ kfree(data);
+ return ret;
}
void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
@@ -3056,22 +2921,17 @@ void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
irq_data = irq_domain_get_irq_data(domain, virq);
if (irq_data && irq_data->chip_data) {
data = irq_data->chip_data;
- __remove_pin_from_irq(data, mp_irqdomain_ioapic_idx(domain),
- (int)irq_data->hwirq);
+ __remove_pin_from_irq(data, mp_irqdomain_ioapic_idx(domain), (int)irq_data->hwirq);
WARN_ON(!list_empty(&data->irq_2_pin));
kfree(irq_data->chip_data);
}
irq_domain_free_irqs_top(domain, virq, nr_irqs);
}
-int mp_irqdomain_activate(struct irq_domain *domain,
- struct irq_data *irq_data, bool reserve)
+int mp_irqdomain_activate(struct irq_domain *domain, struct irq_data *irq_data, bool reserve)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&ioapic_lock, flags);
+ guard(raw_spinlock_irqsave)(&ioapic_lock);
ioapic_configure_entry(irq_data);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return 0;
}
@@ -3079,8 +2939,7 @@ void mp_irqdomain_deactivate(struct irq_domain *domain,
struct irq_data *irq_data)
{
/* It won't be called for IRQ with multiple IOAPIC pins associated */
- ioapic_mask_entry(mp_irqdomain_ioapic_idx(domain),
- (int)irq_data->hwirq);
+ ioapic_mask_entry(mp_irqdomain_ioapic_idx(domain), (int)irq_data->hwirq);
}
int mp_irqdomain_ioapic_idx(struct irq_domain *domain)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 1e0fe5f8ab84..015971adadfc 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1190,22 +1190,6 @@ unsigned long amd_get_dr_addr_mask(unsigned int dr)
}
EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask);
-u32 amd_get_highest_perf(void)
-{
- struct cpuinfo_x86 *c = &boot_cpu_data;
-
- if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
- (c->x86_model >= 0x70 && c->x86_model < 0x80)))
- return 166;
-
- if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
- (c->x86_model >= 0x40 && c->x86_model < 0x70)))
- return 166;
-
- return 255;
-}
-EXPORT_SYMBOL_GPL(amd_get_highest_perf);
-
static void zenbleed_check_cpu(void *unused)
{
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index 0b69bfbf345d..f642de2ebdac 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -349,9 +349,89 @@ static DECLARE_WORK(disable_freq_invariance_work,
DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
+static DEFINE_STATIC_KEY_FALSE(arch_hybrid_cap_scale_key);
+
+struct arch_hybrid_cpu_scale {
+ unsigned long capacity;
+ unsigned long freq_ratio;
+};
+
+static struct arch_hybrid_cpu_scale __percpu *arch_cpu_scale;
+
+/**
+ * arch_enable_hybrid_capacity_scale() - Enable hybrid CPU capacity scaling
+ *
+ * Allocate memory for per-CPU data used by hybrid CPU capacity scaling,
+ * initialize it and set the static key controlling its code paths.
+ *
+ * Must be called before arch_set_cpu_capacity().
+ */
+bool arch_enable_hybrid_capacity_scale(void)
+{
+ int cpu;
+
+ if (static_branch_unlikely(&arch_hybrid_cap_scale_key)) {
+ WARN_ONCE(1, "Hybrid CPU capacity scaling already enabled");
+ return true;
+ }
+
+ arch_cpu_scale = alloc_percpu(struct arch_hybrid_cpu_scale);
+ if (!arch_cpu_scale)
+ return false;
+
+ for_each_possible_cpu(cpu) {
+ per_cpu_ptr(arch_cpu_scale, cpu)->capacity = SCHED_CAPACITY_SCALE;
+ per_cpu_ptr(arch_cpu_scale, cpu)->freq_ratio = arch_max_freq_ratio;
+ }
+
+ static_branch_enable(&arch_hybrid_cap_scale_key);
+
+ pr_info("Hybrid CPU capacity scaling enabled\n");
+
+ return true;
+}
+
+/**
+ * arch_set_cpu_capacity() - Set scale-invariance parameters for a CPU
+ * @cpu: Target CPU.
+ * @cap: Capacity of @cpu at its maximum frequency, relative to @max_cap.
+ * @max_cap: System-wide maximum CPU capacity.
+ * @cap_freq: Frequency of @cpu corresponding to @cap.
+ * @base_freq: Frequency of @cpu at which MPERF counts.
+ *
+ * The units in which @cap and @max_cap are expressed do not matter, so long
+ * as they are consistent, because the former is effectively divided by the
+ * latter. Analogously for @cap_freq and @base_freq.
+ *
+ * After calling this function for all CPUs, call arch_rebuild_sched_domains()
+ * to let the scheduler know that capacity-aware scheduling can be used going
+ * forward.
+ */
+void arch_set_cpu_capacity(int cpu, unsigned long cap, unsigned long max_cap,
+ unsigned long cap_freq, unsigned long base_freq)
+{
+ if (static_branch_likely(&arch_hybrid_cap_scale_key)) {
+ WRITE_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->capacity,
+ div_u64(cap << SCHED_CAPACITY_SHIFT, max_cap));
+ WRITE_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->freq_ratio,
+ div_u64(cap_freq << SCHED_CAPACITY_SHIFT, base_freq));
+ } else {
+ WARN_ONCE(1, "Hybrid CPU capacity scaling not enabled");
+ }
+}
+
+unsigned long arch_scale_cpu_capacity(int cpu)
+{
+ if (static_branch_unlikely(&arch_hybrid_cap_scale_key))
+ return READ_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->capacity);
+
+ return SCHED_CAPACITY_SCALE;
+}
+EXPORT_SYMBOL_GPL(arch_scale_cpu_capacity);
+
static void scale_freq_tick(u64 acnt, u64 mcnt)
{
- u64 freq_scale;
+ u64 freq_scale, freq_ratio;
if (!arch_scale_freq_invariant())
return;
@@ -359,7 +439,12 @@ static void scale_freq_tick(u64 acnt, u64 mcnt)
if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt))
goto error;
- if (check_mul_overflow(mcnt, arch_max_freq_ratio, &mcnt) || !mcnt)
+ if (static_branch_unlikely(&arch_hybrid_cap_scale_key))
+ freq_ratio = READ_ONCE(this_cpu_ptr(arch_cpu_scale)->freq_ratio);
+ else
+ freq_ratio = arch_max_freq_ratio;
+
+ if (check_mul_overflow(mcnt, freq_ratio, &mcnt) || !mcnt)
goto error;
freq_scale = div64_u64(acnt, mcnt);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 45675da354f3..d1915427b4ff 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -233,7 +233,8 @@ static void x86_amd_ssb_disable(void)
#define pr_fmt(fmt) "MDS: " fmt
/* Default mitigation for MDS-affected CPUs */
-static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
+static enum mds_mitigations mds_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_FULL : MDS_MITIGATION_OFF;
static bool mds_nosmt __ro_after_init = false;
static const char * const mds_strings[] = {
@@ -293,7 +294,8 @@ enum taa_mitigations {
};
/* Default mitigation for TAA-affected CPUs */
-static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
+static enum taa_mitigations taa_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_VERW : TAA_MITIGATION_OFF;
static bool taa_nosmt __ro_after_init;
static const char * const taa_strings[] = {
@@ -391,7 +393,8 @@ enum mmio_mitigations {
};
/* Default mitigation for Processor MMIO Stale Data vulnerabilities */
-static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
+static enum mmio_mitigations mmio_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_VERW : MMIO_MITIGATION_OFF;
static bool mmio_nosmt __ro_after_init = false;
static const char * const mmio_strings[] = {
@@ -605,7 +608,8 @@ enum srbds_mitigations {
SRBDS_MITIGATION_HYPERVISOR,
};
-static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
+static enum srbds_mitigations srbds_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_FULL : SRBDS_MITIGATION_OFF;
static const char * const srbds_strings[] = {
[SRBDS_MITIGATION_OFF] = "Vulnerable",
@@ -731,11 +735,8 @@ enum gds_mitigations {
GDS_MITIGATION_HYPERVISOR,
};
-#if IS_ENABLED(CONFIG_MITIGATION_GDS_FORCE)
-static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE;
-#else
-static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL;
-#endif
+static enum gds_mitigations gds_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_FULL : GDS_MITIGATION_OFF;
static const char * const gds_strings[] = {
[GDS_MITIGATION_OFF] = "Vulnerable",
@@ -871,7 +872,8 @@ enum spectre_v1_mitigation {
};
static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
- SPECTRE_V1_MITIGATION_AUTO;
+ IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ?
+ SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE;
static const char * const spectre_v1_strings[] = {
[SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
@@ -986,7 +988,7 @@ static const char * const retbleed_strings[] = {
static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
RETBLEED_MITIGATION_NONE;
static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
- RETBLEED_CMD_AUTO;
+ IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_CMD_AUTO : RETBLEED_CMD_OFF;
static int __ro_after_init retbleed_nosmt = false;
@@ -1447,17 +1449,18 @@ static void __init spec_v2_print_cond(const char *reason, bool secure)
static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
{
- enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
+ enum spectre_v2_mitigation_cmd cmd;
char arg[20];
int ret, i;
+ cmd = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE;
if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
cpu_mitigations_off())
return SPECTRE_V2_CMD_NONE;
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
if (ret < 0)
- return SPECTRE_V2_CMD_AUTO;
+ return cmd;
for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
if (!match_option(arg, ret, mitigation_options[i].option))
@@ -1467,8 +1470,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
}
if (i >= ARRAY_SIZE(mitigation_options)) {
- pr_err("unknown option (%s). Switching to AUTO select\n", arg);
- return SPECTRE_V2_CMD_AUTO;
+ pr_err("unknown option (%s). Switching to default mode\n", arg);
+ return cmd;
}
if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
@@ -2021,10 +2024,12 @@ static const struct {
static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
{
- enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
+ enum ssb_mitigation_cmd cmd;
char arg[20];
int ret, i;
+ cmd = IS_ENABLED(CONFIG_MITIGATION_SSB) ?
+ SPEC_STORE_BYPASS_CMD_AUTO : SPEC_STORE_BYPASS_CMD_NONE;
if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
cpu_mitigations_off()) {
return SPEC_STORE_BYPASS_CMD_NONE;
@@ -2032,7 +2037,7 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
arg, sizeof(arg));
if (ret < 0)
- return SPEC_STORE_BYPASS_CMD_AUTO;
+ return cmd;
for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
if (!match_option(arg, ret, ssb_mitigation_options[i].option))
@@ -2043,8 +2048,8 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
}
if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
- pr_err("unknown option (%s). Switching to AUTO select\n", arg);
- return SPEC_STORE_BYPASS_CMD_AUTO;
+ pr_err("unknown option (%s). Switching to default mode\n", arg);
+ return cmd;
}
}
@@ -2371,7 +2376,8 @@ EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
#define pr_fmt(fmt) "L1TF: " fmt
/* Default mitigation for L1TF-affected CPUs */
-enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
+enum l1tf_mitigations l1tf_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_FLUSH : L1TF_MITIGATION_OFF;
#if IS_ENABLED(CONFIG_KVM_INTEL)
EXPORT_SYMBOL_GPL(l1tf_mitigation);
#endif
@@ -2551,10 +2557,9 @@ static void __init srso_select_mitigation(void)
{
bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
- if (cpu_mitigations_off())
- return;
-
- if (!boot_cpu_has_bug(X86_BUG_SRSO)) {
+ if (!boot_cpu_has_bug(X86_BUG_SRSO) ||
+ cpu_mitigations_off() ||
+ srso_cmd == SRSO_CMD_OFF) {
if (boot_cpu_has(X86_FEATURE_SBPB))
x86_pred_cmd = PRED_CMD_SBPB;
return;
@@ -2585,11 +2590,6 @@ static void __init srso_select_mitigation(void)
}
switch (srso_cmd) {
- case SRSO_CMD_OFF:
- if (boot_cpu_has(X86_FEATURE_SBPB))
- x86_pred_cmd = PRED_CMD_SBPB;
- return;
-
case SRSO_CMD_MICROCODE:
if (has_microcode) {
srso_mitigation = SRSO_MITIGATION_MICROCODE;
@@ -2643,6 +2643,8 @@ static void __init srso_select_mitigation(void)
pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
}
break;
+ default:
+ break;
}
out:
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d4e539d4e158..07a34d723505 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1165,8 +1165,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL_INTEL(INTEL_CORE_YONAH, NO_SSB),
- VULNWL_INTEL(INTEL_ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(INTEL_ATOM_AIRMONT_MID, NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | MSBDS_ONLY),
+ VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP, NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
VULNWL_INTEL(INTEL_ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
VULNWL_INTEL(INTEL_ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
@@ -1510,6 +1510,11 @@ static void __init cpu_parse_early_param(void)
if (cmdline_find_option_bool(boot_command_line, "nousershstk"))
setup_clear_cpu_cap(X86_FEATURE_USER_SHSTK);
+ /* Minimize the gap between FRED is available and available but disabled. */
+ arglen = cmdline_find_option(boot_command_line, "fred", arg, sizeof(arg));
+ if (arglen != 2 || strncmp(arg, "on", 2))
+ setup_clear_cpu_cap(X86_FEATURE_FRED);
+
arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
if (arglen <= 0)
return;
@@ -2171,7 +2176,7 @@ static inline void tss_setup_io_bitmap(struct tss_struct *tss)
* Setup everything needed to handle exceptions from the IDT, including the IST
* exceptions which use paranoid_entry().
*/
-void cpu_init_exception_handling(void)
+void cpu_init_exception_handling(bool boot_cpu)
{
struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
int cpu = raw_smp_processor_id();
@@ -2190,10 +2195,23 @@ void cpu_init_exception_handling(void)
/* GHCB needs to be setup to handle #VC. */
setup_ghcb();
+ if (cpu_feature_enabled(X86_FEATURE_FRED)) {
+ /* The boot CPU has enabled FRED during early boot */
+ if (!boot_cpu)
+ cpu_init_fred_exceptions();
+
+ cpu_init_fred_rsps();
+ } else {
+ load_current_idt();
+ }
+}
+
+void __init cpu_init_replace_early_idt(void)
+{
if (cpu_feature_enabled(X86_FEATURE_FRED))
cpu_init_fred_exceptions();
else
- load_current_idt();
+ idt_setup_early_pf();
}
/*
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index b7d9f530ae16..8bd84114c2d9 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -83,7 +83,6 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AMX_TILE, X86_FEATURE_XFD },
{ X86_FEATURE_SHSTK, X86_FEATURE_XSAVES },
{ X86_FEATURE_FRED, X86_FEATURE_LKGS },
- { X86_FEATURE_FRED, X86_FEATURE_WRMSRNS },
{}
};
diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c
index 1640ae76548f..4a4118784c13 100644
--- a/arch/x86/kernel/cpu/feat_ctl.c
+++ b/arch/x86/kernel/cpu/feat_ctl.c
@@ -188,7 +188,7 @@ update_caps:
update_sgx:
if (!(msr & FEAT_CTL_SGX_ENABLED)) {
if (enable_sgx_kvm || enable_sgx_driver)
- pr_err_once("SGX disabled by BIOS.\n");
+ pr_err_once("SGX disabled or unsupported by BIOS.\n");
clear_cpu_cap(c, X86_FEATURE_SGX);
return;
}
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 08b95a35b5cb..e7656cbef68d 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -311,16 +311,18 @@ static void early_init_intel(struct cpuinfo_x86 *c)
}
/*
- * There is a known erratum on Pentium III and Core Solo
- * and Core Duo CPUs.
- * " Page with PAT set to WC while associated MTRR is UC
- * may consolidate to UC "
- * Because of this erratum, it is better to stick with
- * setting WC in MTRR rather than using PAT on these CPUs.
+ * PAT is broken on early family 6 CPUs, the last of which
+ * is "Yonah" where the erratum is named "AN7":
*
- * Enable PAT WC only on P4, Core 2 or later CPUs.
+ * Page with PAT (Page Attribute Table) Set to USWC
+ * (Uncacheable Speculative Write Combine) While
+ * Associated MTRR (Memory Type Range Register) Is UC
+ * (Uncacheable) May Consolidate to UC
+ *
+ * Disable PAT and fall back to MTRR on these CPUs.
*/
- if (c->x86 == 6 && c->x86_model < 15)
+ if (c->x86_vfm >= INTEL_PENTIUM_PRO &&
+ c->x86_vfm <= INTEL_CORE_YONAH)
clear_cpu_cap(c, X86_FEATURE_PAT);
/*
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 9a0133ef7e20..14bf8c232e45 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -780,7 +780,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
{
struct mce m;
- mce_setup(&m);
+ mce_prep_record(&m);
m.status = status;
m.misc = misc;
diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c
index 7f7309ff67d0..3885fe05f01e 100644
--- a/arch/x86/kernel/cpu/mce/apei.c
+++ b/arch/x86/kernel/cpu/mce/apei.c
@@ -44,7 +44,7 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
else
lsb = PAGE_SHIFT;
- mce_setup(&m);
+ mce_prep_record(&m);
m.bank = -1;
/* Fake a memory read error with unknown channel */
m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | MCI_STATUS_MISCV | 0x9f;
@@ -66,6 +66,7 @@ EXPORT_SYMBOL_GPL(apei_mce_report_mem_error);
int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id)
{
const u64 *i_mce = ((const u64 *) (ctx_info + 1));
+ bool apicid_found = false;
unsigned int cpu;
struct mce m;
@@ -97,20 +98,19 @@ int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id)
if (ctx_info->reg_arr_size < 48)
return -EINVAL;
- mce_setup(&m);
-
- m.extcpu = -1;
- m.socketid = -1;
-
for_each_possible_cpu(cpu) {
if (cpu_data(cpu).topo.initial_apicid == lapic_id) {
- m.extcpu = cpu;
- m.socketid = cpu_data(m.extcpu).topo.pkg_id;
+ apicid_found = true;
break;
}
}
- m.apicid = lapic_id;
+ if (!apicid_found)
+ return -EINVAL;
+
+ mce_prep_record_common(&m);
+ mce_prep_record_per_cpu(cpu, &m);
+
m.bank = (ctx_info->msr_addr >> 4) & 0xFF;
m.status = *i_mce;
m.addr = *(i_mce + 1);
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index b85ec7a4ec9e..2a938f429c4d 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -117,20 +117,32 @@ static struct irq_work mce_irq_work;
*/
BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
-/* Do initial initialization of a struct mce */
-void mce_setup(struct mce *m)
+void mce_prep_record_common(struct mce *m)
{
memset(m, 0, sizeof(struct mce));
- m->cpu = m->extcpu = smp_processor_id();
+
+ m->cpuid = cpuid_eax(1);
+ m->cpuvendor = boot_cpu_data.x86_vendor;
+ m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
/* need the internal __ version to avoid deadlocks */
- m->time = __ktime_get_real_seconds();
- m->cpuvendor = boot_cpu_data.x86_vendor;
- m->cpuid = cpuid_eax(1);
- m->socketid = cpu_data(m->extcpu).topo.pkg_id;
- m->apicid = cpu_data(m->extcpu).topo.initial_apicid;
- m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
- m->ppin = cpu_data(m->extcpu).ppin;
- m->microcode = boot_cpu_data.microcode;
+ m->time = __ktime_get_real_seconds();
+}
+
+void mce_prep_record_per_cpu(unsigned int cpu, struct mce *m)
+{
+ m->cpu = cpu;
+ m->extcpu = cpu;
+ m->apicid = cpu_data(cpu).topo.initial_apicid;
+ m->microcode = cpu_data(cpu).microcode;
+ m->ppin = topology_ppin(cpu);
+ m->socketid = topology_physical_package_id(cpu);
+}
+
+/* Do initial initialization of a struct mce */
+void mce_prep_record(struct mce *m)
+{
+ mce_prep_record_common(m);
+ mce_prep_record_per_cpu(smp_processor_id(), m);
}
DEFINE_PER_CPU(struct mce, injectm);
@@ -436,11 +448,11 @@ static noinstr void mce_wrmsrl(u32 msr, u64 v)
static noinstr void mce_gather_info(struct mce *m, struct pt_regs *regs)
{
/*
- * Enable instrumentation around mce_setup() which calls external
+ * Enable instrumentation around mce_prep_record() which calls external
* facilities.
*/
instrumentation_begin();
- mce_setup(m);
+ mce_prep_record(m);
instrumentation_end();
m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
diff --git a/arch/x86/kernel/cpu/mce/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c
index a05ac0716ecf..a3aa0199222e 100644
--- a/arch/x86/kernel/cpu/mce/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c
@@ -314,7 +314,7 @@ static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
/*
* Need to give user space some time to set everything up,
- * so do it a jiffie or two later everywhere.
+ * so do it a jiffy or two later everywhere.
*/
schedule_timeout(2);
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index 01f8f03969e6..43c7f3b71df5 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -261,6 +261,8 @@ enum mca_msr {
/* Decide whether to add MCE record to MCE event pool or filter it out. */
extern bool filter_mce(struct mce *m);
+void mce_prep_record_common(struct mce *m);
+void mce_prep_record_per_cpu(unsigned int cpu, struct mce *m);
#ifdef CONFIG_X86_MCE_AMD
extern bool amd_filter_mce(struct mce *m);
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index c0d56c02b8da..f63b051f25a0 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -89,6 +89,31 @@ static struct equiv_cpu_table {
struct equiv_cpu_entry *entry;
} equiv_table;
+union zen_patch_rev {
+ struct {
+ __u32 rev : 8,
+ stepping : 4,
+ model : 4,
+ __reserved : 4,
+ ext_model : 4,
+ ext_fam : 8;
+ };
+ __u32 ucode_rev;
+};
+
+union cpuid_1_eax {
+ struct {
+ __u32 stepping : 4,
+ model : 4,
+ family : 4,
+ __reserved0 : 4,
+ ext_model : 4,
+ ext_fam : 8,
+ __reserved1 : 4;
+ };
+ __u32 full;
+};
+
/*
* This points to the current valid container of microcode patches which we will
* save from the initrd/builtin before jettisoning its contents. @mc is the
@@ -96,7 +121,6 @@ static struct equiv_cpu_table {
*/
struct cont_desc {
struct microcode_amd *mc;
- u32 cpuid_1_eax;
u32 psize;
u8 *data;
size_t size;
@@ -109,10 +133,42 @@ struct cont_desc {
static const char
ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
+/*
+ * This is CPUID(1).EAX on the BSP. It is used in two ways:
+ *
+ * 1. To ignore the equivalence table on Zen1 and newer.
+ *
+ * 2. To match which patches to load because the patch revision ID
+ * already contains the f/m/s for which the microcode is destined
+ * for.
+ */
+static u32 bsp_cpuid_1_eax __ro_after_init;
+
+static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
+{
+ union zen_patch_rev p;
+ union cpuid_1_eax c;
+
+ p.ucode_rev = val;
+ c.full = 0;
+
+ c.stepping = p.stepping;
+ c.model = p.model;
+ c.ext_model = p.ext_model;
+ c.family = 0xf;
+ c.ext_fam = p.ext_fam;
+
+ return c;
+}
+
static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
{
unsigned int i;
+ /* Zen and newer do not need an equivalence table. */
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17)
+ return 0;
+
if (!et || !et->num_entries)
return 0;
@@ -159,6 +215,10 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
if (!verify_container(buf, buf_size))
return false;
+ /* Zen and newer do not need an equivalence table. */
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17)
+ return true;
+
cont_type = hdr[1];
if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
pr_debug("Wrong microcode container equivalence table type: %u.\n",
@@ -222,8 +282,9 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
* exceed the per-family maximum). @sh_psize is the size read from the section
* header.
*/
-static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size)
+static unsigned int __verify_patch_size(u32 sh_psize, size_t buf_size)
{
+ u8 family = x86_family(bsp_cpuid_1_eax);
u32 max_size;
if (family >= 0x15)
@@ -258,9 +319,9 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size
* positive: patch is not for this family, skip it
* 0: success
*/
-static int
-verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size)
+static int verify_patch(const u8 *buf, size_t buf_size, u32 *patch_size)
{
+ u8 family = x86_family(bsp_cpuid_1_eax);
struct microcode_header_amd *mc_hdr;
unsigned int ret;
u32 sh_psize;
@@ -286,7 +347,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size)
return -1;
}
- ret = __verify_patch_size(family, sh_psize, buf_size);
+ ret = __verify_patch_size(sh_psize, buf_size);
if (!ret) {
pr_debug("Per-family patch size mismatch.\n");
return -1;
@@ -308,6 +369,15 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size)
return 0;
}
+static bool mc_patch_matches(struct microcode_amd *mc, u16 eq_id)
+{
+ /* Zen and newer do not need an equivalence table. */
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17)
+ return ucode_rev_to_cpuid(mc->hdr.patch_id).full == bsp_cpuid_1_eax;
+ else
+ return eq_id == mc->hdr.processor_rev_id;
+}
+
/*
* This scans the ucode blob for the proper container as we can have multiple
* containers glued together. Returns the equivalence ID from the equivalence
@@ -336,7 +406,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
* doesn't contain a patch for the CPU, scan through the whole container
* so that it can be skipped in case there are other containers appended.
*/
- eq_id = find_equiv_id(&table, desc->cpuid_1_eax);
+ eq_id = find_equiv_id(&table, bsp_cpuid_1_eax);
buf += hdr[2] + CONTAINER_HDR_SZ;
size -= hdr[2] + CONTAINER_HDR_SZ;
@@ -350,7 +420,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
u32 patch_size;
int ret;
- ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size);
+ ret = verify_patch(buf, size, &patch_size);
if (ret < 0) {
/*
* Patch verification failed, skip to the next container, if
@@ -363,7 +433,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
}
mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
- if (eq_id == mc->hdr.processor_rev_id) {
+ if (mc_patch_matches(mc, eq_id)) {
desc->psize = patch_size;
desc->mc = mc;
}
@@ -421,6 +491,7 @@ static int __apply_microcode_amd(struct microcode_amd *mc)
/* verify patch application was successful */
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+
if (rev != mc->hdr.patch_id)
return -1;
@@ -438,14 +509,12 @@ static int __apply_microcode_amd(struct microcode_amd *mc)
*
* Returns true if container found (sets @desc), false otherwise.
*/
-static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, size_t size)
+static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size)
{
struct cont_desc desc = { 0 };
struct microcode_amd *mc;
bool ret = false;
- desc.cpuid_1_eax = cpuid_1_eax;
-
scan_containers(ucode, size, &desc);
mc = desc.mc;
@@ -463,9 +532,10 @@ static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, siz
return !__apply_microcode_amd(mc);
}
-static bool get_builtin_microcode(struct cpio_data *cp, u8 family)
+static bool get_builtin_microcode(struct cpio_data *cp)
{
char fw_name[36] = "amd-ucode/microcode_amd.bin";
+ u8 family = x86_family(bsp_cpuid_1_eax);
struct firmware fw;
if (IS_ENABLED(CONFIG_X86_32))
@@ -484,11 +554,11 @@ static bool get_builtin_microcode(struct cpio_data *cp, u8 family)
return false;
}
-static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret)
+static void __init find_blobs_in_containers(struct cpio_data *ret)
{
struct cpio_data cp;
- if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
+ if (!get_builtin_microcode(&cp))
cp = find_microcode_in_initrd(ucode_path);
*ret = cp;
@@ -499,16 +569,18 @@ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_
struct cpio_data cp = { };
u32 dummy;
+ bsp_cpuid_1_eax = cpuid_1_eax;
+
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy);
/* Needed in load_microcode_amd() */
ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
- find_blobs_in_containers(cpuid_1_eax, &cp);
+ find_blobs_in_containers(&cp);
if (!(cp.data && cp.size))
return;
- if (early_apply_microcode(cpuid_1_eax, ed->old_rev, cp.data, cp.size))
+ if (early_apply_microcode(ed->old_rev, cp.data, cp.size))
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
}
@@ -525,12 +597,10 @@ static int __init save_microcode_in_initrd(void)
if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
return 0;
- find_blobs_in_containers(cpuid_1_eax, &cp);
+ find_blobs_in_containers(&cp);
if (!(cp.data && cp.size))
return -EINVAL;
- desc.cpuid_1_eax = cpuid_1_eax;
-
scan_containers(cp.data, cp.size, &desc);
if (!desc.mc)
return -EINVAL;
@@ -543,26 +613,65 @@ static int __init save_microcode_in_initrd(void)
}
early_initcall(save_microcode_in_initrd);
+static inline bool patch_cpus_equivalent(struct ucode_patch *p, struct ucode_patch *n)
+{
+ /* Zen and newer hardcode the f/m/s in the patch ID */
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
+ union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id);
+ union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id);
+
+ /* Zap stepping */
+ p_cid.stepping = 0;
+ n_cid.stepping = 0;
+
+ return p_cid.full == n_cid.full;
+ } else {
+ return p->equiv_cpu == n->equiv_cpu;
+ }
+}
+
/*
* a small, trivial cache of per-family ucode patches
*/
-static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
+static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equiv_cpu)
{
struct ucode_patch *p;
+ struct ucode_patch n;
+
+ n.equiv_cpu = equiv_cpu;
+ n.patch_id = uci->cpu_sig.rev;
+
+ WARN_ON_ONCE(!n.patch_id);
list_for_each_entry(p, &microcode_cache, plist)
- if (p->equiv_cpu == equiv_cpu)
+ if (patch_cpus_equivalent(p, &n))
return p;
+
return NULL;
}
+static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n)
+{
+ /* Zen and newer hardcode the f/m/s in the patch ID */
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
+ union zen_patch_rev zp, zn;
+
+ zp.ucode_rev = p->patch_id;
+ zn.ucode_rev = n->patch_id;
+
+ return zn.rev > zp.rev;
+ } else {
+ return n->patch_id > p->patch_id;
+ }
+}
+
static void update_cache(struct ucode_patch *new_patch)
{
struct ucode_patch *p;
list_for_each_entry(p, &microcode_cache, plist) {
- if (p->equiv_cpu == new_patch->equiv_cpu) {
- if (p->patch_id >= new_patch->patch_id) {
+ if (patch_cpus_equivalent(p, new_patch)) {
+ if (!patch_newer(p, new_patch)) {
/* we already have the latest patch */
kfree(new_patch->data);
kfree(new_patch);
@@ -593,13 +702,22 @@ static void free_cache(void)
static struct ucode_patch *find_patch(unsigned int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- u16 equiv_id;
+ u32 rev, dummy __always_unused;
+ u16 equiv_id = 0;
- equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
- if (!equiv_id)
- return NULL;
+ /* fetch rev if not populated yet: */
+ if (!uci->cpu_sig.rev) {
+ rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+ uci->cpu_sig.rev = rev;
+ }
+
+ if (x86_family(bsp_cpuid_1_eax) < 0x17) {
+ equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
+ if (!equiv_id)
+ return NULL;
+ }
- return cache_find_patch(equiv_id);
+ return cache_find_patch(uci, equiv_id);
}
void reload_ucode_amd(unsigned int cpu)
@@ -649,7 +767,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
struct ucode_cpu_info *uci;
struct ucode_patch *p;
enum ucode_state ret;
- u32 rev, dummy __always_unused;
+ u32 rev;
BUG_ON(raw_smp_processor_id() != cpu);
@@ -659,11 +777,11 @@ static enum ucode_state apply_microcode_amd(int cpu)
if (!p)
return UCODE_NFOUND;
+ rev = uci->cpu_sig.rev;
+
mc_amd = p->data;
uci->mc = p->data;
- rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
-
/* need to apply patch? */
if (rev > mc_amd->hdr.patch_id) {
ret = UCODE_OK;
@@ -709,6 +827,10 @@ static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
hdr = (const u32 *)buf;
equiv_tbl_len = hdr[2];
+ /* Zen and newer do not need an equivalence table. */
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17)
+ goto out;
+
equiv_table.entry = vmalloc(equiv_tbl_len);
if (!equiv_table.entry) {
pr_err("failed to allocate equivalent CPU table\n");
@@ -718,12 +840,16 @@ static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
+out:
/* add header length */
return equiv_tbl_len + CONTAINER_HDR_SZ;
}
static void free_equiv_cpu_table(void)
{
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17)
+ return;
+
vfree(equiv_table.entry);
memset(&equiv_table, 0, sizeof(equiv_table));
}
@@ -749,7 +875,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
u16 proc_id;
int ret;
- ret = verify_patch(family, fw, leftover, patch_size);
+ ret = verify_patch(fw, leftover, patch_size);
if (ret)
return ret;
@@ -774,7 +900,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
patch->patch_id = mc_hdr->patch_id;
patch->equiv_cpu = proc_id;
- pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
+ pr_debug("%s: Adding patch_id: 0x%08x, proc_id: 0x%04x\n",
__func__, patch->patch_id, proc_id);
/* ... and add to cache. */
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index e0fd57a8ba84..d18078834ded 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kexec.h>
-#include <linux/i8253.h>
#include <linux/random.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
@@ -199,8 +198,8 @@ static void hv_machine_shutdown(void)
* Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
* corrupts the old VP Assist Pages and can crash the kexec kernel.
*/
- if (kexec_in_progress && hyperv_init_cpuhp > 0)
- cpuhp_remove_state(hyperv_init_cpuhp);
+ if (kexec_in_progress)
+ cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
/* The function calls stop_other_cpus(). */
native_machine_shutdown();
@@ -424,6 +423,7 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
x86_platform.calibrate_tsc = hv_get_tsc_khz;
x86_platform.calibrate_cpu = hv_get_tsc_khz;
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
}
if (ms_hyperv.priv_high & HV_ISOLATION) {
@@ -449,9 +449,23 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.hints &= ~HV_X64_APIC_ACCESS_RECOMMENDED;
if (!ms_hyperv.paravisor_present) {
- /* To be supported: more work is required. */
+ /*
+ * Mark the Hyper-V TSC page feature as disabled
+ * in a TDX VM without paravisor so that the
+ * Invariant TSC, which is a better clocksource
+ * anyway, is used instead.
+ */
ms_hyperv.features &= ~HV_MSR_REFERENCE_TSC_AVAILABLE;
+ /*
+ * The Invariant TSC is expected to be available
+ * in a TDX VM without paravisor, but if not,
+ * print a warning message. The slower Hyper-V MSR-based
+ * Ref Counter should end up being the clocksource.
+ */
+ if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT))
+ pr_warn("Hyper-V: Invariant TSC is unavailable\n");
+
/* HV_MSR_CRASH_CTL is unsupported. */
ms_hyperv.misc_features &= ~HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
@@ -522,16 +536,6 @@ static void __init ms_hyperv_init_platform(void)
if (efi_enabled(EFI_BOOT))
x86_platform.get_nmi_reason = hv_get_nmi_reason;
- /*
- * Hyper-V VMs have a PIT emulation quirk such that zeroing the
- * counter register during PIT shutdown restarts the PIT. So it
- * continues to interrupt @18.2 HZ. Setting i8253_clear_counter
- * to false tells pit_shutdown() not to zero the counter so that
- * the PIT really is shutdown. Generation 2 VMs don't have a PIT,
- * and setting this value has no effect.
- */
- i8253_clear_counter_on_shutdown = false;
-
#if IS_ENABLED(CONFIG_HYPERV)
if ((hv_get_isolation_type() == HV_ISOLATION_TYPE_VBS) ||
ms_hyperv.paravisor_present)
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 27892e57c4ef..9ace84486499 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -475,24 +475,25 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void)
{
struct sgx_epc_page *page;
int nid_of_current = numa_node_id();
- int nid = nid_of_current;
+ int nid_start, nid;
- if (node_isset(nid_of_current, sgx_numa_mask)) {
- page = __sgx_alloc_epc_page_from_node(nid_of_current);
- if (page)
- return page;
- }
-
- /* Fall back to the non-local NUMA nodes: */
- while (true) {
- nid = next_node_in(nid, sgx_numa_mask);
- if (nid == nid_of_current)
- break;
+ /*
+ * Try local node first. If it doesn't have an EPC section,
+ * fall back to the non-local NUMA nodes.
+ */
+ if (node_isset(nid_of_current, sgx_numa_mask))
+ nid_start = nid_of_current;
+ else
+ nid_start = next_node_in(nid_of_current, sgx_numa_mask);
+ nid = nid_start;
+ do {
page = __sgx_alloc_epc_page_from_node(nid);
if (page)
return page;
- }
+
+ nid = next_node_in(nid, sgx_numa_mask);
+ } while (nid != nid_start);
return ERR_PTR(-ENOMEM);
}
@@ -732,7 +733,7 @@ out:
return 0;
}
-/**
+/*
* A section metric is concatenated in a way that @low bits 12-31 define the
* bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
* metric.
@@ -847,6 +848,13 @@ static bool __init sgx_page_cache_init(void)
return false;
}
+ for_each_online_node(nid) {
+ if (!node_isset(nid, sgx_numa_mask) &&
+ node_state(nid, N_MEMORY) && node_state(nid, N_CPU))
+ pr_info("node%d has both CPUs and memory but doesn't have an EPC section\n",
+ nid);
+ }
+
return true;
}
@@ -895,10 +903,10 @@ int sgx_set_attribute(unsigned long *allowed_attributes,
{
struct fd f = fdget(attribute_fd);
- if (!f.file)
+ if (!fd_file(f))
return -EINVAL;
- if (f.file->f_op != &sgx_provision_fops) {
+ if (fd_file(f)->f_op != &sgx_provision_fops) {
fdput(f);
return -EINVAL;
}
diff --git a/arch/x86/kernel/eisa.c b/arch/x86/kernel/eisa.c
index 53935b4d62e3..9535a6507db7 100644
--- a/arch/x86/kernel/eisa.c
+++ b/arch/x86/kernel/eisa.c
@@ -11,15 +11,15 @@
static __init int eisa_bus_probe(void)
{
- void __iomem *p;
+ u32 *p;
if ((xen_pv_domain() && !xen_initial_domain()) || cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return 0;
- p = ioremap(0x0FFFD9, 4);
- if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24))
+ p = memremap(0x0FFFD9, 4, MEMREMAP_WB);
+ if (p && *p == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24))
EISA_bus = 1;
- iounmap(p);
+ memunmap(p);
return 0;
}
subsys_initcall(eisa_bus_probe);
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 247f2225aa9f..1065ab995305 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -64,6 +64,16 @@ setfx:
}
/*
+ * Update the value of PKRU register that was already pushed onto the signal frame.
+ */
+static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
+{
+ if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
+ return 0;
+ return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
+}
+
+/*
* Signal frame handlers.
*/
static inline bool save_fsave_header(struct task_struct *tsk, void __user *buf)
@@ -156,10 +166,17 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
return !err;
}
-static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
+static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pkru)
{
- if (use_xsave())
- return xsave_to_user_sigframe(buf);
+ int err = 0;
+
+ if (use_xsave()) {
+ err = xsave_to_user_sigframe(buf);
+ if (!err)
+ err = update_pkru_in_sigframe(buf, pkru);
+ return err;
+ }
+
if (use_fxsr())
return fxsave_to_user_sigframe((struct fxregs_state __user *) buf);
else
@@ -185,7 +202,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
* For [f]xsave state, update the SW reserved fields in the [f]xsave frame
* indicating the absence/presence of the extended state to the user.
*/
-bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
+bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size, u32 pkru)
{
struct task_struct *tsk = current;
struct fpstate *fpstate = tsk->thread.fpu.fpstate;
@@ -228,7 +245,7 @@ retry:
fpregs_restore_userregs();
pagefault_disable();
- ret = copy_fpregs_to_sigframe(buf_fx);
+ ret = copy_fpregs_to_sigframe(buf_fx, pkru);
pagefault_enable();
fpregs_unlock();
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 1339f8328db5..22abb5ee0cf2 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -13,6 +13,7 @@
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
+#include <linux/coredump.h>
#include <asm/fpu/api.h>
#include <asm/fpu/regset.h>
@@ -23,6 +24,8 @@
#include <asm/prctl.h>
#include <asm/elf.h>
+#include <uapi/asm/elf.h>
+
#include "context.h"
#include "internal.h"
#include "legacy.h"
@@ -996,6 +999,19 @@ void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
}
EXPORT_SYMBOL_GPL(get_xsave_addr);
+/*
+ * Given an xstate feature nr, calculate where in the xsave buffer the state is.
+ * The xsave buffer should be in standard format, not compacted (e.g. user mode
+ * signal frames).
+ */
+void __user *get_xsave_addr_user(struct xregs_state __user *xsave, int xfeature_nr)
+{
+ if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr)))
+ return NULL;
+
+ return (void __user *)xsave + xstate_offsets[xfeature_nr];
+}
+
#ifdef CONFIG_ARCH_HAS_PKEYS
/*
@@ -1841,3 +1857,89 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
return 0;
}
#endif /* CONFIG_PROC_PID_ARCH_STATUS */
+
+#ifdef CONFIG_COREDUMP
+static const char owner_name[] = "LINUX";
+
+/*
+ * Dump type, size, offset and flag values for every xfeature that is present.
+ */
+static int dump_xsave_layout_desc(struct coredump_params *cprm)
+{
+ int num_records = 0;
+ int i;
+
+ for_each_extended_xfeature(i, fpu_user_cfg.max_features) {
+ struct x86_xfeat_component xc = {
+ .type = i,
+ .size = xstate_sizes[i],
+ .offset = xstate_offsets[i],
+ /* reserved for future use */
+ .flags = 0,
+ };
+
+ if (!dump_emit(cprm, &xc, sizeof(xc)))
+ return 0;
+
+ num_records++;
+ }
+ return num_records;
+}
+
+static u32 get_xsave_desc_size(void)
+{
+ u32 cnt = 0;
+ u32 i;
+
+ for_each_extended_xfeature(i, fpu_user_cfg.max_features)
+ cnt++;
+
+ return cnt * (sizeof(struct x86_xfeat_component));
+}
+
+int elf_coredump_extra_notes_write(struct coredump_params *cprm)
+{
+ int num_records = 0;
+ struct elf_note en;
+
+ if (!fpu_user_cfg.max_features)
+ return 0;
+
+ en.n_namesz = sizeof(owner_name);
+ en.n_descsz = get_xsave_desc_size();
+ en.n_type = NT_X86_XSAVE_LAYOUT;
+
+ if (!dump_emit(cprm, &en, sizeof(en)))
+ return 1;
+ if (!dump_emit(cprm, owner_name, en.n_namesz))
+ return 1;
+ if (!dump_align(cprm, 4))
+ return 1;
+
+ num_records = dump_xsave_layout_desc(cprm);
+ if (!num_records)
+ return 1;
+
+ /* Total size should be equal to the number of records */
+ if ((sizeof(struct x86_xfeat_component) * num_records) != en.n_descsz)
+ return 1;
+
+ return 0;
+}
+
+int elf_coredump_extra_notes_size(void)
+{
+ int size;
+
+ if (!fpu_user_cfg.max_features)
+ return 0;
+
+ /* .note header */
+ size = sizeof(struct elf_note);
+ /* Name plus alignment to 4 bytes */
+ size += roundup(sizeof(owner_name), 4);
+ size += get_xsave_desc_size();
+
+ return size;
+}
+#endif /* CONFIG_COREDUMP */
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index afb404cd2059..0b86a5002c84 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -54,6 +54,8 @@ extern int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void
extern void fpu__init_cpu_xstate(void);
extern void fpu__init_system_xstate(unsigned int legacy_size);
+extern void __user *get_xsave_addr_user(struct xregs_state __user *xsave, int xfeature_nr);
+
static inline u64 xfeatures_mask_supervisor(void)
{
return fpu_kernel_cfg.max_features & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
diff --git a/arch/x86/kernel/fred.c b/arch/x86/kernel/fred.c
index 4bcd8791ad96..8d32c3f48abc 100644
--- a/arch/x86/kernel/fred.c
+++ b/arch/x86/kernel/fred.c
@@ -21,17 +21,53 @@
#define FRED_STKLVL(vector, lvl) ((lvl) << (2 * (vector)))
+DEFINE_PER_CPU(unsigned long, fred_rsp0);
+EXPORT_PER_CPU_SYMBOL(fred_rsp0);
+
void cpu_init_fred_exceptions(void)
{
/* When FRED is enabled by default, remove this log message */
pr_info("Initialize FRED on CPU%d\n", smp_processor_id());
+ /*
+ * If a kernel event is delivered before a CPU goes to user level for
+ * the first time, its SS is NULL thus NULL is pushed into the SS field
+ * of the FRED stack frame. But before ERETS is executed, the CPU may
+ * context switch to another task and go to user level. Then when the
+ * CPU comes back to kernel mode, SS is changed to __KERNEL_DS. Later
+ * when ERETS is executed to return from the kernel event handler, a #GP
+ * fault is generated because SS doesn't match the SS saved in the FRED
+ * stack frame.
+ *
+ * Initialize SS to __KERNEL_DS when enabling FRED to avoid such #GPs.
+ */
+ loadsegment(ss, __KERNEL_DS);
+
wrmsrl(MSR_IA32_FRED_CONFIG,
/* Reserve for CALL emulation */
FRED_CONFIG_REDZONE |
FRED_CONFIG_INT_STKLVL(0) |
FRED_CONFIG_ENTRYPOINT(asm_fred_entrypoint_user));
+ wrmsrl(MSR_IA32_FRED_STKLVLS, 0);
+ wrmsrl(MSR_IA32_FRED_RSP0, 0);
+ wrmsrl(MSR_IA32_FRED_RSP1, 0);
+ wrmsrl(MSR_IA32_FRED_RSP2, 0);
+ wrmsrl(MSR_IA32_FRED_RSP3, 0);
+
+ /* Enable FRED */
+ cr4_set_bits(X86_CR4_FRED);
+ /* Any further IDT use is a bug */
+ idt_invalidate();
+
+ /* Use int $0x80 for 32-bit system calls in FRED mode */
+ setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
+ setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
+}
+
+/* Must be called after setup_cpu_entry_areas() */
+void cpu_init_fred_rsps(void)
+{
/*
* The purpose of separate stacks for NMI, #DB and #MC *in the kernel*
* (remember that user space faults are always taken on stack level 0)
@@ -47,13 +83,4 @@ void cpu_init_fred_exceptions(void)
wrmsrl(MSR_IA32_FRED_RSP1, __this_cpu_ist_top_va(DB));
wrmsrl(MSR_IA32_FRED_RSP2, __this_cpu_ist_top_va(NMI));
wrmsrl(MSR_IA32_FRED_RSP3, __this_cpu_ist_top_va(DF));
-
- /* Enable FRED */
- cr4_set_bits(X86_CR4_FRED);
- /* Any further IDT use is a bug */
- idt_invalidate();
-
- /* Use int $0x80 for 32-bit system calls in FRED mode */
- setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
- setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index a817ed0724d1..4b9d4557fc94 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -559,10 +559,11 @@ void early_setup_idt(void)
*/
void __head startup_64_setup_gdt_idt(void)
{
+ struct desc_struct *gdt = (void *)(__force unsigned long)init_per_cpu_var(gdt_page.gdt);
void *handler = NULL;
struct desc_ptr startup_gdt_descr = {
- .address = (unsigned long)&RIP_REL_REF(init_per_cpu_var(gdt_page.gdt)),
+ .address = (unsigned long)&RIP_REL_REF(*gdt),
.size = GDT_SIZE - 1,
};
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 2b7999a1a50a..80e262bb627f 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -8,6 +8,7 @@
#include <linux/timex.h>
#include <linux/i8253.h>
+#include <asm/hypervisor.h>
#include <asm/apic.h>
#include <asm/hpet.h>
#include <asm/time.h>
@@ -39,9 +40,15 @@ static bool __init use_pit(void)
bool __init pit_timer_init(void)
{
- if (!use_pit())
+ if (!use_pit()) {
+ /*
+ * Don't just ignore the PIT. Ensure it's stopped, because
+ * VMMs otherwise steal CPU time just to pointlessly waggle
+ * the (masked) IRQ.
+ */
+ clockevent_i8253_disable();
return false;
-
+ }
clockevent_i8253_init(true);
global_clock_event = &i8253_clockevent;
return true;
diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c
index df337860612d..cd8ed1edbf9e 100644
--- a/arch/x86/kernel/jailhouse.c
+++ b/arch/x86/kernel/jailhouse.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/serial_8250.h>
+#include <linux/acpi.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/acpi.h>
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index cc0f7f70b17b..9c9ac606893e 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -28,6 +28,7 @@
#include <asm/setup.h>
#include <asm/set_memory.h>
#include <asm/cpu.h>
+#include <asm/efi.h>
#ifdef CONFIG_ACPI
/*
@@ -87,6 +88,8 @@ map_efi_systab(struct x86_mapping_info *info, pgd_t *level4p)
{
#ifdef CONFIG_EFI
unsigned long mstart, mend;
+ void *kaddr;
+ int ret;
if (!efi_enabled(EFI_BOOT))
return 0;
@@ -102,6 +105,30 @@ map_efi_systab(struct x86_mapping_info *info, pgd_t *level4p)
if (!mstart)
return 0;
+ ret = kernel_ident_mapping_init(info, level4p, mstart, mend);
+ if (ret)
+ return ret;
+
+ kaddr = memremap(mstart, mend - mstart, MEMREMAP_WB);
+ if (!kaddr) {
+ pr_err("Could not map UEFI system table\n");
+ return -ENOMEM;
+ }
+
+ mstart = efi_config_table;
+
+ if (efi_enabled(EFI_64BIT)) {
+ efi_system_table_64_t *stbl = (efi_system_table_64_t *)kaddr;
+
+ mend = mstart + sizeof(efi_config_table_64_t) * stbl->nr_tables;
+ } else {
+ efi_system_table_32_t *stbl = (efi_system_table_32_t *)kaddr;
+
+ mend = mstart + sizeof(efi_config_table_32_t) * stbl->nr_tables;
+ }
+
+ memunmap(kaddr);
+
return kernel_ident_mapping_init(info, level4p, mstart, mend);
#endif
return 0;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index c94dec6a1834..1f54eedc3015 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/dmi.h>
#include <linux/range.h>
+#include <linux/acpi.h>
#include <asm/pci-direct.h>
#include <linux/sort.h>
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index e89171b0347a..4a1b1b28abf9 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -68,7 +68,7 @@ static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str)
{
memcpy(str, m->bustype, 6);
str[6] = 0;
- apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
+ apic_pr_verbose("Bus #%d is %s\n", m->busid, str);
}
static void __init MP_bus_info(struct mpc_bus *m)
@@ -417,7 +417,7 @@ static unsigned long __init get_mpc_size(unsigned long physptr)
mpc = early_memremap(physptr, PAGE_SIZE);
size = mpc->length;
early_memunmap(mpc, PAGE_SIZE);
- apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size);
+ apic_pr_verbose(" mpc: %lx-%lx\n", physptr, physptr + size);
return size;
}
@@ -560,8 +560,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
struct mpf_intel *mpf;
int ret = 0;
- apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
- base, base + length - 1);
+ apic_pr_verbose("Scan for SMP in [mem %#010lx-%#010lx]\n", base, base + length - 1);
BUILD_BUG_ON(sizeof(*mpf) != 16);
while (length > 0) {
@@ -683,13 +682,13 @@ static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
{
int i;
- apic_printk(APIC_VERBOSE, "OLD ");
+ apic_pr_verbose("OLD ");
print_mp_irq_info(m);
i = get_MP_intsrc_index(m);
if (i > 0) {
memcpy(m, &mp_irqs[i], sizeof(*m));
- apic_printk(APIC_VERBOSE, "NEW ");
+ apic_pr_verbose("NEW ");
print_mp_irq_info(&mp_irqs[i]);
return;
}
@@ -772,7 +771,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
continue;
if (nr_m_spare > 0) {
- apic_printk(APIC_VERBOSE, "*NEW* found\n");
+ apic_pr_verbose("*NEW* found\n");
nr_m_spare--;
memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
m_spare[nr_m_spare] = NULL;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6d3d20e3e43a..226472332a70 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -798,6 +798,32 @@ static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
#define LAM_U57_BITS 6
+static void enable_lam_func(void *__mm)
+{
+ struct mm_struct *mm = __mm;
+ unsigned long lam;
+
+ if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) {
+ lam = mm_lam_cr3_mask(mm);
+ write_cr3(__read_cr3() | lam);
+ cpu_tlbstate_update_lam(lam, mm_untag_mask(mm));
+ }
+}
+
+static void mm_enable_lam(struct mm_struct *mm)
+{
+ mm->context.lam_cr3_mask = X86_CR3_LAM_U57;
+ mm->context.untag_mask = ~GENMASK(62, 57);
+
+ /*
+ * Even though the process must still be single-threaded at this
+ * point, kernel threads may be using the mm. IPI those kernel
+ * threads if they exist.
+ */
+ on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true);
+ set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags);
+}
+
static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
{
if (!cpu_feature_enabled(X86_FEATURE_LAM))
@@ -814,25 +840,21 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
if (mmap_write_lock_killable(mm))
return -EINTR;
+ /*
+ * MM_CONTEXT_LOCK_LAM is set on clone. Prevent LAM from
+ * being enabled unless the process is single threaded:
+ */
if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) {
mmap_write_unlock(mm);
return -EBUSY;
}
- if (!nr_bits) {
- mmap_write_unlock(mm);
- return -EINVAL;
- } else if (nr_bits <= LAM_U57_BITS) {
- mm->context.lam_cr3_mask = X86_CR3_LAM_U57;
- mm->context.untag_mask = ~GENMASK(62, 57);
- } else {
+ if (!nr_bits || nr_bits > LAM_U57_BITS) {
mmap_write_unlock(mm);
return -EINVAL;
}
- write_cr3(__read_cr3() | mm->context.lam_cr3_mask);
- set_tlbstate_lam_mode(mm);
- set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags);
+ mm_enable_lam(mm);
mmap_write_unlock(mm);
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index 042c9a0334e9..e9e88c342f75 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -170,6 +170,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
wbinvd
.Lsme_off:
+ /* Save the preserve_context to %r11 as swap_pages clobbers %rcx. */
movq %rcx, %r11
call swap_pages
@@ -258,7 +259,7 @@ SYM_CODE_END(virtual_mapped)
/* Do the copies */
SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
UNWIND_HINT_END_OF_STACK
- movq %rdi, %rcx /* Put the page_list in %rcx */
+ movq %rdi, %rcx /* Put the indirection_page in %rcx */
xorl %edi, %edi
xorl %esi, %esi
jmp 1f
@@ -289,18 +290,21 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
movq %rcx, %rsi /* For ever source page do a copy */
andq $0xfffffffffffff000, %rsi
- movq %rdi, %rdx
- movq %rsi, %rax
+ movq %rdi, %rdx /* Save destination page to %rdx */
+ movq %rsi, %rax /* Save source page to %rax */
+ /* copy source page to swap page */
movq %r10, %rdi
movl $512, %ecx
rep ; movsq
+ /* copy destination page to source page */
movq %rax, %rdi
movq %rdx, %rsi
movl $512, %ecx
rep ; movsq
+ /* copy swap page to destination page */
movq %rdx, %rdi
movq %r10, %rsi
movl $512, %ecx
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 6129dc2ba784..f1fea506e20f 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1039,7 +1039,12 @@ void __init setup_arch(char **cmdline_p)
init_mem_mapping();
- idt_setup_early_pf();
+ /*
+ * init_mem_mapping() relies on the early IDT page fault handling.
+ * Now either enable FRED or install the real page fault handler
+ * for 64-bit in the IDT.
+ */
+ cpu_init_replace_early_idt();
/*
* Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 31b6f5dddfc2..5f441039b572 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -61,6 +61,24 @@ static inline int is_x32_frame(struct ksignal *ksig)
}
/*
+ * Enable all pkeys temporarily, so as to ensure that both the current
+ * execution stack as well as the alternate signal stack are writeable.
+ * The application can use any of the available pkeys to protect the
+ * alternate signal stack, and we don't know which one it is, so enable
+ * all. The PKRU register will be reset to init_pkru later in the flow,
+ * in fpu__clear_user_states(), and it is the application's responsibility
+ * to enable the appropriate pkey as the first step in the signal handler
+ * so that the handler does not segfault.
+ */
+static inline u32 sig_prepare_pkru(void)
+{
+ u32 orig_pkru = read_pkru();
+
+ write_pkru(0);
+ return orig_pkru;
+}
+
+/*
* Set up a signal frame.
*/
@@ -84,6 +102,7 @@ get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size,
unsigned long math_size = 0;
unsigned long sp = regs->sp;
unsigned long buf_fx = 0;
+ u32 pkru;
/* redzone */
if (!ia32_frame)
@@ -138,9 +157,17 @@ get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size,
return (void __user *)-1L;
}
+ /* Update PKRU to enable access to the alternate signal stack. */
+ pkru = sig_prepare_pkru();
/* save i387 and extended state */
- if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size))
+ if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size, pkru)) {
+ /*
+ * Restore PKRU to the original, user-defined value; disable
+ * extra pkeys enabled for the alternate signal stack, if any.
+ */
+ write_pkru(pkru);
return (void __user *)-1L;
+ }
return (void __user *)sp;
}
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index 8a94053c5444..ee9453891901 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -260,13 +260,13 @@ SYSCALL_DEFINE0(rt_sigreturn)
set_current_blocked(&set);
- if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
+ if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
- if (restore_signal_shadow_stack())
+ if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
goto badframe;
- if (restore_altstack(&frame->uc.uc_stack))
+ if (restore_signal_shadow_stack())
goto badframe;
return regs->ax;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 0c35207320cb..766f092dab80 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -60,6 +60,7 @@
#include <linux/stackprotector.h>
#include <linux/cpuhotplug.h>
#include <linux/mc146818rtc.h>
+#include <linux/acpi.h>
#include <asm/acpi.h>
#include <asm/cacheinfo.h>
@@ -246,7 +247,7 @@ static void notrace start_secondary(void *unused)
__flush_tlb_all();
}
- cpu_init_exception_handling();
+ cpu_init_exception_handling(false);
/*
* Load the microcode before reaching the AP alive synchronization
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 01d7cd85ef97..87f8c9a71c49 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -121,7 +121,7 @@ static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
}
unsigned long
-arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len,
+arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
@@ -158,7 +158,7 @@ arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned l
}
unsigned long
-arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr0,
+arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
@@ -228,20 +228,5 @@ bottomup:
* can happen with large stack limits and large mmap()
* allocations.
*/
- return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-}
-
-unsigned long
-arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
-}
-
-unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
- const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
-{
- return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff, flags, 0);
+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags, 0);
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 4fa0b17e5043..d05392db5d0f 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -42,6 +42,7 @@
#include <linux/hardirq.h>
#include <linux/atomic.h>
#include <linux/iommu.h>
+#include <linux/ubsan.h>
#include <asm/stacktrace.h>
#include <asm/processor.h>
@@ -91,6 +92,47 @@ __always_inline int is_valid_bugaddr(unsigned long addr)
return *(unsigned short *)addr == INSN_UD2;
}
+/*
+ * Check for UD1 or UD2, accounting for Address Size Override Prefixes.
+ * If it's a UD1, get the ModRM byte to pass along to UBSan.
+ */
+__always_inline int decode_bug(unsigned long addr, u32 *imm)
+{
+ u8 v;
+
+ if (addr < TASK_SIZE_MAX)
+ return BUG_NONE;
+
+ v = *(u8 *)(addr++);
+ if (v == INSN_ASOP)
+ v = *(u8 *)(addr++);
+ if (v != OPCODE_ESCAPE)
+ return BUG_NONE;
+
+ v = *(u8 *)(addr++);
+ if (v == SECOND_BYTE_OPCODE_UD2)
+ return BUG_UD2;
+
+ if (!IS_ENABLED(CONFIG_UBSAN_TRAP) || v != SECOND_BYTE_OPCODE_UD1)
+ return BUG_NONE;
+
+ /* Retrieve the immediate (type value) for the UBSAN UD1 */
+ v = *(u8 *)(addr++);
+ if (X86_MODRM_RM(v) == 4)
+ addr++;
+
+ *imm = 0;
+ if (X86_MODRM_MOD(v) == 1)
+ *imm = *(u8 *)addr;
+ else if (X86_MODRM_MOD(v) == 2)
+ *imm = *(u32 *)addr;
+ else
+ WARN_ONCE(1, "Unexpected MODRM_MOD: %u\n", X86_MODRM_MOD(v));
+
+ return BUG_UD1;
+}
+
+
static nokprobe_inline int
do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
struct pt_regs *regs, long error_code)
@@ -216,6 +258,8 @@ static inline void handle_invalid_op(struct pt_regs *regs)
static noinstr bool handle_bug(struct pt_regs *regs)
{
bool handled = false;
+ int ud_type;
+ u32 imm;
/*
* Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
@@ -223,7 +267,8 @@ static noinstr bool handle_bug(struct pt_regs *regs)
* irqentry_enter().
*/
kmsan_unpoison_entry_regs(regs);
- if (!is_valid_bugaddr(regs->ip))
+ ud_type = decode_bug(regs->ip, &imm);
+ if (ud_type == BUG_NONE)
return handled;
/*
@@ -236,10 +281,14 @@ static noinstr bool handle_bug(struct pt_regs *regs)
*/
if (regs->flags & X86_EFLAGS_IF)
raw_local_irq_enable();
- if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN ||
- handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
- regs->ip += LEN_UD2;
- handled = true;
+ if (ud_type == BUG_UD2) {
+ if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN ||
+ handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
+ regs->ip += LEN_UD2;
+ handled = true;
+ }
+ } else if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
+ pr_crit("%s at %pS\n", report_ubsan_failure(regs, imm), (void *)regs->ip);
}
if (regs->flags & X86_EFLAGS_IF)
raw_local_irq_disable();
@@ -1402,34 +1451,8 @@ DEFINE_IDTENTRY_SW(iret_error)
}
#endif
-/* Do not enable FRED by default yet. */
-static bool enable_fred __ro_after_init = false;
-
-#ifdef CONFIG_X86_FRED
-static int __init fred_setup(char *str)
-{
- if (!str)
- return -EINVAL;
-
- if (!cpu_feature_enabled(X86_FEATURE_FRED))
- return 0;
-
- if (!strcmp(str, "on"))
- enable_fred = true;
- else if (!strcmp(str, "off"))
- enable_fred = false;
- else
- pr_warn("invalid FRED option: 'fred=%s'\n", str);
- return 0;
-}
-early_param("fred", fred_setup);
-#endif
-
void __init trap_init(void)
{
- if (cpu_feature_enabled(X86_FEATURE_FRED) && !enable_fred)
- setup_clear_cpu_cap(X86_FEATURE_FRED);
-
/* Init cpu_entry_area before IST entries are set up */
setup_cpu_entry_areas();
@@ -1437,7 +1460,7 @@ void __init trap_init(void)
sev_es_init_vc_handling();
/* Initialize TSS before setting up traps so ISTs work */
- cpu_init_exception_handling();
+ cpu_init_exception_handling(true);
/* Setup traps as cpu_init() might #GP */
if (!cpu_feature_enabled(X86_FEATURE_FRED))
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index d4462fb26299..dfe6847fd99e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -28,6 +28,7 @@
#include <asm/apic.h>
#include <asm/cpu_device_id.h>
#include <asm/i8259.h>
+#include <asm/topology.h>
#include <asm/uv/uv.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
@@ -1253,15 +1254,12 @@ static void __init check_system_tsc_reliable(void)
* - TSC which does not stop in C-States
* - the TSC_ADJUST register which allows to detect even minimal
* modifications
- * - not more than two sockets. As the number of sockets cannot be
- * evaluated at the early boot stage where this has to be
- * invoked, check the number of online memory nodes as a
- * fallback solution which is an reasonable estimate.
+ * - not more than four packages
*/
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
- nr_online_nodes <= 4)
+ topology_max_packages() <= 4)
tsc_disable_clocksource_watchdog();
}
@@ -1290,7 +1288,7 @@ int unsynchronized_tsc(void)
*/
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
/* assume multi socket systems are not synchronized: */
- if (num_possible_cpus() > 1)
+ if (topology_max_packages() > 1)
return 1;
}
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 82b128d3f309..0a2bbd674a6d 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -8,6 +8,7 @@
#include <linux/ioport.h>
#include <linux/export.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
#include <asm/acpi.h>
#include <asm/bios_ebda.h>
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index de05a26b0b7d..7813d28b082f 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4674,16 +4674,14 @@ out_unlock:
bool kvm_mmu_may_ignore_guest_pat(void)
{
/*
- * When EPT is enabled (shadow_memtype_mask is non-zero), the CPU does
- * not support self-snoop (or is affected by an erratum), and the VM
+ * When EPT is enabled (shadow_memtype_mask is non-zero), and the VM
* has non-coherent DMA (DMA doesn't snoop CPU caches), KVM's ABI is to
* honor the memtype from the guest's PAT so that guest accesses to
* memory that is DMA'd aren't cached against the guest's wishes. As a
* result, KVM _may_ ignore guest PAT, whereas without non-coherent DMA,
- * KVM _always_ ignores or honors guest PAT, i.e. doesn't toggle SPTE
- * bits in response to non-coherent device (un)registration.
+ * KVM _always_ ignores guest PAT (when EPT is enabled).
*/
- return !static_cpu_has(X86_FEATURE_SELFSNOOP) && shadow_memtype_mask;
+ return shadow_memtype_mask;
}
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 714c517dd4b7..0b851ef937f2 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -534,10 +534,10 @@ static int __sev_issue_cmd(int fd, int id, void *data, int *error)
int ret;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- ret = sev_issue_cmd_external_user(f.file, id, data, error);
+ ret = sev_issue_cmd_external_user(fd_file(f), id, data, error);
fdput(f);
return ret;
@@ -2078,15 +2078,15 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
bool charged = false;
int ret;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (!file_is_kvm(f.file)) {
+ if (!file_is_kvm(fd_file(f))) {
ret = -EBADF;
goto out_fput;
}
- source_kvm = f.file->private_data;
+ source_kvm = fd_file(f)->private_data;
ret = sev_lock_two_vms(kvm, source_kvm);
if (ret)
goto out_fput;
@@ -2803,15 +2803,15 @@ int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
struct kvm_sev_info *source_sev, *mirror_sev;
int ret;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (!file_is_kvm(f.file)) {
+ if (!file_is_kvm(fd_file(f))) {
ret = -EBADF;
goto e_source_fput;
}
- source_kvm = f.file->private_data;
+ source_kvm = fd_file(f)->private_data;
ret = sev_lock_two_vms(kvm, source_kvm);
if (ret)
goto e_source_fput;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index f18c2d8c7476..733a0c45d1a6 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7659,13 +7659,11 @@ u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
/*
* Force WB and ignore guest PAT if the VM does NOT have a non-coherent
- * device attached and the CPU doesn't support self-snoop. Letting the
- * guest control memory types on Intel CPUs without self-snoop may
- * result in unexpected behavior, and so KVM's (historical) ABI is to
- * trust the guest to behave only as a last resort.
+ * device attached. Letting the guest control memory types on Intel
+ * CPUs may result in unexpected behavior, and so KVM's ABI is to trust
+ * the guest to behave only as a last resort.
*/
- if (!static_cpu_has(X86_FEATURE_SELFSNOOP) &&
- !kvm_arch_has_noncoherent_dma(vcpu->kvm))
+ if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT);
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 8d3a00e5c528..690fbf48e853 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -57,7 +57,6 @@ obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o
obj-$(CONFIG_AMD_NUMA) += amdtopology.o
obj-$(CONFIG_ACPI_NUMA) += srat.o
-obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 9332b36a1091..628833afee37 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
+#include <linux/numa_memblks.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index e91500a80963..575f863f3c75 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -164,7 +164,7 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu)
}
}
#else
-static inline void percpu_setup_exception_stacks(unsigned int cpu)
+static void __init percpu_setup_exception_stacks(unsigned int cpu)
{
struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index c45127265f2f..437e96fb4977 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -99,18 +99,31 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
for (; addr < end; addr = next) {
pud_t *pud = pud_page + pud_index(addr);
pmd_t *pmd;
+ bool use_gbpage;
next = (addr & PUD_MASK) + PUD_SIZE;
if (next > end)
next = end;
- if (info->direct_gbpages) {
- pud_t pudval;
+ /* if this is already a gbpage, this portion is already mapped */
+ if (pud_leaf(*pud))
+ continue;
+
+ /* Is using a gbpage allowed? */
+ use_gbpage = info->direct_gbpages;
- if (pud_present(*pud))
- continue;
+ /* Don't use gbpage if it maps more than the requested region. */
+ /* at the begining: */
+ use_gbpage &= ((addr & ~PUD_MASK) == 0);
+ /* ... or at the end: */
+ use_gbpage &= ((next & ~PUD_MASK) == 0);
+
+ /* Never overwrite existing mappings */
+ use_gbpage &= !pud_present(*pud);
+
+ if (use_gbpage) {
+ pud_t pudval;
- addr &= PUD_MASK;
pudval = __pud((addr - info->offset) | info->page_flag);
set_pud(pud, pudval);
continue;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index aa7d279321ea..70b02fc61d93 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/ioremap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mmiotrace.h>
@@ -457,7 +458,7 @@ void iounmap(volatile void __iomem *addr)
{
struct vm_struct *p, *o;
- if ((void __force *)addr <= high_memory)
+ if (WARN_ON_ONCE(!is_ioremap_addr((void __force *)addr)))
return;
/*
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 6ce10e3c6228..64e5cdb2460a 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/topology.h>
#include <linux/sort.h>
+#include <linux/numa_memblks.h>
#include <asm/e820/api.h>
#include <asm/proto.h>
@@ -22,16 +23,6 @@
#include "numa_internal.h"
int numa_off;
-nodemask_t numa_nodes_parsed __initdata;
-
-struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
-
-static struct numa_meminfo numa_meminfo __initdata_or_meminfo;
-static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo;
-
-static int numa_distance_cnt;
-static u8 *numa_distance;
static __init int numa_setup(char *opt)
{
@@ -124,456 +115,27 @@ void __init setup_node_to_cpumask_map(void)
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
}
-static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
- struct numa_meminfo *mi)
-{
- /* ignore zero length blks */
- if (start == end)
- return 0;
-
- /* whine about and ignore invalid blks */
- if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
- pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
- nid, start, end - 1);
- return 0;
- }
-
- if (mi->nr_blks >= NR_NODE_MEMBLKS) {
- pr_err("too many memblk ranges\n");
- return -EINVAL;
- }
-
- mi->blk[mi->nr_blks].start = start;
- mi->blk[mi->nr_blks].end = end;
- mi->blk[mi->nr_blks].nid = nid;
- mi->nr_blks++;
- return 0;
-}
-
-/**
- * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
- * @idx: Index of memblk to remove
- * @mi: numa_meminfo to remove memblk from
- *
- * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
- * decrementing @mi->nr_blks.
- */
-void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
-{
- mi->nr_blks--;
- memmove(&mi->blk[idx], &mi->blk[idx + 1],
- (mi->nr_blks - idx) * sizeof(mi->blk[0]));
-}
-
-/**
- * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another
- * @dst: numa_meminfo to append block to
- * @idx: Index of memblk to remove
- * @src: numa_meminfo to remove memblk from
- */
-static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx,
- struct numa_meminfo *src)
-{
- dst->blk[dst->nr_blks++] = src->blk[idx];
- numa_remove_memblk_from(idx, src);
-}
-
-/**
- * numa_add_memblk - Add one numa_memblk to numa_meminfo
- * @nid: NUMA node ID of the new memblk
- * @start: Start address of the new memblk
- * @end: End address of the new memblk
- *
- * Add a new memblk to the default numa_meminfo.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int __init numa_add_memblk(int nid, u64 start, u64 end)
-{
- return numa_add_memblk_to(nid, start, end, &numa_meminfo);
-}
-
-/* Allocate NODE_DATA for a node on the local memory */
-static void __init alloc_node_data(int nid)
+static int __init numa_register_nodes(void)
{
- const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
- u64 nd_pa;
- void *nd;
- int tnid;
-
- /*
- * Allocate node data. Try node-local memory and then any node.
- * Never allocate in DMA zone.
- */
- nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
- if (!nd_pa) {
- pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
- nd_size, nid);
- return;
- }
- nd = __va(nd_pa);
-
- /* report and initialize */
- printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
- nd_pa, nd_pa + nd_size - 1);
- tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
- if (tnid != nid)
- printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
-
- node_data[nid] = nd;
- memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
-
- node_set_online(nid);
-}
-
-/**
- * numa_cleanup_meminfo - Cleanup a numa_meminfo
- * @mi: numa_meminfo to clean up
- *
- * Sanitize @mi by merging and removing unnecessary memblks. Also check for
- * conflicts and clear unused memblks.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
-{
- const u64 low = 0;
- const u64 high = PFN_PHYS(max_pfn);
- int i, j, k;
-
- /* first, trim all entries */
- for (i = 0; i < mi->nr_blks; i++) {
- struct numa_memblk *bi = &mi->blk[i];
-
- /* move / save reserved memory ranges */
- if (!memblock_overlaps_region(&memblock.memory,
- bi->start, bi->end - bi->start)) {
- numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi);
- continue;
- }
-
- /* make sure all non-reserved blocks are inside the limits */
- bi->start = max(bi->start, low);
-
- /* preserve info for non-RAM areas above 'max_pfn': */
- if (bi->end > high) {
- numa_add_memblk_to(bi->nid, high, bi->end,
- &numa_reserved_meminfo);
- bi->end = high;
- }
-
- /* and there's no empty block */
- if (bi->start >= bi->end)
- numa_remove_memblk_from(i--, mi);
- }
-
- /* merge neighboring / overlapping entries */
- for (i = 0; i < mi->nr_blks; i++) {
- struct numa_memblk *bi = &mi->blk[i];
-
- for (j = i + 1; j < mi->nr_blks; j++) {
- struct numa_memblk *bj = &mi->blk[j];
- u64 start, end;
-
- /*
- * See whether there are overlapping blocks. Whine
- * about but allow overlaps of the same nid. They
- * will be merged below.
- */
- if (bi->end > bj->start && bi->start < bj->end) {
- if (bi->nid != bj->nid) {
- pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
- bi->nid, bi->start, bi->end - 1,
- bj->nid, bj->start, bj->end - 1);
- return -EINVAL;
- }
- pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
- bi->nid, bi->start, bi->end - 1,
- bj->start, bj->end - 1);
- }
-
- /*
- * Join together blocks on the same node, holes
- * between which don't overlap with memory on other
- * nodes.
- */
- if (bi->nid != bj->nid)
- continue;
- start = min(bi->start, bj->start);
- end = max(bi->end, bj->end);
- for (k = 0; k < mi->nr_blks; k++) {
- struct numa_memblk *bk = &mi->blk[k];
-
- if (bi->nid == bk->nid)
- continue;
- if (start < bk->end && end > bk->start)
- break;
- }
- if (k < mi->nr_blks)
- continue;
- printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
- bi->nid, bi->start, bi->end - 1, bj->start,
- bj->end - 1, start, end - 1);
- bi->start = start;
- bi->end = end;
- numa_remove_memblk_from(j--, mi);
- }
- }
-
- /* clear unused ones */
- for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
- mi->blk[i].start = mi->blk[i].end = 0;
- mi->blk[i].nid = NUMA_NO_NODE;
- }
-
- return 0;
-}
-
-/*
- * Set nodes, which have memory in @mi, in *@nodemask.
- */
-static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
- const struct numa_meminfo *mi)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
- if (mi->blk[i].start != mi->blk[i].end &&
- mi->blk[i].nid != NUMA_NO_NODE)
- node_set(mi->blk[i].nid, *nodemask);
-}
-
-/**
- * numa_reset_distance - Reset NUMA distance table
- *
- * The current table is freed. The next numa_set_distance() call will
- * create a new one.
- */
-void __init numa_reset_distance(void)
-{
- size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
-
- /* numa_distance could be 1LU marking allocation failure, test cnt */
- if (numa_distance_cnt)
- memblock_free(numa_distance, size);
- numa_distance_cnt = 0;
- numa_distance = NULL; /* enable table creation */
-}
-
-static int __init numa_alloc_distance(void)
-{
- nodemask_t nodes_parsed;
- size_t size;
- int i, j, cnt = 0;
- u64 phys;
-
- /* size the new table and allocate it */
- nodes_parsed = numa_nodes_parsed;
- numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
-
- for_each_node_mask(i, nodes_parsed)
- cnt = i;
- cnt++;
- size = cnt * cnt * sizeof(numa_distance[0]);
-
- phys = memblock_phys_alloc_range(size, PAGE_SIZE, 0,
- PFN_PHYS(max_pfn_mapped));
- if (!phys) {
- pr_warn("Warning: can't allocate distance table!\n");
- /* don't retry until explicitly reset */
- numa_distance = (void *)1LU;
- return -ENOMEM;
- }
-
- numa_distance = __va(phys);
- numa_distance_cnt = cnt;
-
- /* fill with the default distances */
- for (i = 0; i < cnt; i++)
- for (j = 0; j < cnt; j++)
- numa_distance[i * cnt + j] = i == j ?
- LOCAL_DISTANCE : REMOTE_DISTANCE;
- printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
-
- return 0;
-}
-
-/**
- * numa_set_distance - Set NUMA distance from one NUMA to another
- * @from: the 'from' node to set distance
- * @to: the 'to' node to set distance
- * @distance: NUMA distance
- *
- * Set the distance from node @from to @to to @distance. If distance table
- * doesn't exist, one which is large enough to accommodate all the currently
- * known nodes will be created.
- *
- * If such table cannot be allocated, a warning is printed and further
- * calls are ignored until the distance table is reset with
- * numa_reset_distance().
- *
- * If @from or @to is higher than the highest known node or lower than zero
- * at the time of table creation or @distance doesn't make sense, the call
- * is ignored.
- * This is to allow simplification of specific NUMA config implementations.
- */
-void __init numa_set_distance(int from, int to, int distance)
-{
- if (!numa_distance && numa_alloc_distance() < 0)
- return;
-
- if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
- from < 0 || to < 0) {
- pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- if ((u8)distance != distance ||
- (from == to && distance != LOCAL_DISTANCE)) {
- pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- numa_distance[from * numa_distance_cnt + to] = distance;
-}
-
-int __node_distance(int from, int to)
-{
- if (from >= numa_distance_cnt || to >= numa_distance_cnt)
- return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
- return numa_distance[from * numa_distance_cnt + to];
-}
-EXPORT_SYMBOL(__node_distance);
-
-/*
- * Mark all currently memblock-reserved physical memory (which covers the
- * kernel's own memory ranges) as hot-unswappable.
- */
-static void __init numa_clear_kernel_node_hotplug(void)
-{
- nodemask_t reserved_nodemask = NODE_MASK_NONE;
- struct memblock_region *mb_region;
- int i;
-
- /*
- * We have to do some preprocessing of memblock regions, to
- * make them suitable for reservation.
- *
- * At this time, all memory regions reserved by memblock are
- * used by the kernel, but those regions are not split up
- * along node boundaries yet, and don't necessarily have their
- * node ID set yet either.
- *
- * So iterate over all memory known to the x86 architecture,
- * and use those ranges to set the nid in memblock.reserved.
- * This will split up the memblock regions along node
- * boundaries and will set the node IDs as well.
- */
- for (i = 0; i < numa_meminfo.nr_blks; i++) {
- struct numa_memblk *mb = numa_meminfo.blk + i;
- int ret;
-
- ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
- WARN_ON_ONCE(ret);
- }
-
- /*
- * Now go over all reserved memblock regions, to construct a
- * node mask of all kernel reserved memory areas.
- *
- * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
- * numa_meminfo might not include all memblock.reserved
- * memory ranges, because quirks such as trim_snb_memory()
- * reserve specific pages for Sandy Bridge graphics. ]
- */
- for_each_reserved_mem_region(mb_region) {
- int nid = memblock_get_region_node(mb_region);
-
- if (nid != NUMA_NO_NODE)
- node_set(nid, reserved_nodemask);
- }
-
- /*
- * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
- * belonging to the reserved node mask.
- *
- * Note that this will include memory regions that reside
- * on nodes that contain kernel memory - entire nodes
- * become hot-unpluggable:
- */
- for (i = 0; i < numa_meminfo.nr_blks; i++) {
- struct numa_memblk *mb = numa_meminfo.blk + i;
-
- if (!node_isset(mb->nid, reserved_nodemask))
- continue;
-
- memblock_clear_hotplug(mb->start, mb->end - mb->start);
- }
-}
-
-static int __init numa_register_memblks(struct numa_meminfo *mi)
-{
- int i, nid;
-
- /* Account for nodes with cpus and no memory */
- node_possible_map = numa_nodes_parsed;
- numa_nodemask_from_meminfo(&node_possible_map, mi);
- if (WARN_ON(nodes_empty(node_possible_map)))
- return -EINVAL;
-
- for (i = 0; i < mi->nr_blks; i++) {
- struct numa_memblk *mb = &mi->blk[i];
- memblock_set_node(mb->start, mb->end - mb->start,
- &memblock.memory, mb->nid);
- }
-
- /*
- * At very early time, the kernel have to use some memory such as
- * loading the kernel image. We cannot prevent this anyway. So any
- * node the kernel resides in should be un-hotpluggable.
- *
- * And when we come here, alloc node data won't fail.
- */
- numa_clear_kernel_node_hotplug();
-
- /*
- * If sections array is gonna be used for pfn -> nid mapping, check
- * whether its granularity is fine enough.
- */
- if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) {
- unsigned long pfn_align = node_map_pfn_alignment();
-
- if (pfn_align && pfn_align < PAGES_PER_SECTION) {
- pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
- PFN_PHYS(pfn_align) >> 20,
- PFN_PHYS(PAGES_PER_SECTION) >> 20);
- return -EINVAL;
- }
- }
+ int nid;
if (!memblock_validate_numa_coverage(SZ_1M))
return -EINVAL;
/* Finally register nodes. */
for_each_node_mask(nid, node_possible_map) {
- u64 start = PFN_PHYS(max_pfn);
- u64 end = 0;
-
- for (i = 0; i < mi->nr_blks; i++) {
- if (nid != mi->blk[i].nid)
- continue;
- start = min(mi->blk[i].start, start);
- end = max(mi->blk[i].end, end);
- }
+ unsigned long start_pfn, end_pfn;
- if (start >= end)
+ /*
+ * Note, get_pfn_range_for_nid() depends on
+ * memblock_set_node() having already happened
+ */
+ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+ if (start_pfn >= end_pfn)
continue;
alloc_node_data(nid);
+ node_set_online(nid);
}
/* Dump memblock with node info and return. */
@@ -609,39 +171,11 @@ static int __init numa_init(int (*init_func)(void))
for (i = 0; i < MAX_LOCAL_APIC; i++)
set_apicid_to_node(i, NUMA_NO_NODE);
- nodes_clear(numa_nodes_parsed);
- nodes_clear(node_possible_map);
- nodes_clear(node_online_map);
- memset(&numa_meminfo, 0, sizeof(numa_meminfo));
- WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
- NUMA_NO_NODE));
- WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
- NUMA_NO_NODE));
- /* In case that parsing SRAT failed. */
- WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
- numa_reset_distance();
-
- ret = init_func();
- if (ret < 0)
- return ret;
-
- /*
- * We reset memblock back to the top-down direction
- * here because if we configured ACPI_NUMA, we have
- * parsed SRAT in init_func(). It is ok to have the
- * reset here even if we did't configure ACPI_NUMA
- * or acpi numa init fails and fallbacks to dummy
- * numa init.
- */
- memblock_set_bottom_up(false);
-
- ret = numa_cleanup_meminfo(&numa_meminfo);
+ ret = numa_memblks_init(init_func, /* memblock_force_top_down */ true);
if (ret < 0)
return ret;
- numa_emulation(&numa_meminfo, numa_distance_cnt);
-
- ret = numa_register_memblks(&numa_meminfo);
+ ret = numa_register_nodes();
if (ret < 0)
return ret;
@@ -782,12 +316,12 @@ void __init init_cpu_to_node(void)
#ifndef CONFIG_DEBUG_PER_CPU_MAPS
# ifndef CONFIG_NUMA_EMU
-void numa_add_cpu(int cpu)
+void numa_add_cpu(unsigned int cpu)
{
cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
-void numa_remove_cpu(int cpu)
+void numa_remove_cpu(unsigned int cpu)
{
cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
@@ -825,7 +359,7 @@ int early_cpu_to_node(int cpu)
return per_cpu(x86_cpu_to_node_map, cpu);
}
-void debug_cpumask_set_cpu(int cpu, int node, bool enable)
+void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable)
{
struct cpumask *mask;
@@ -857,12 +391,12 @@ static void numa_set_cpumask(int cpu, bool enable)
debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
}
-void numa_add_cpu(int cpu)
+void numa_add_cpu(unsigned int cpu)
{
numa_set_cpumask(cpu, true);
}
-void numa_remove_cpu(int cpu)
+void numa_remove_cpu(unsigned int cpu)
{
numa_set_cpumask(cpu, false);
}
@@ -893,113 +427,29 @@ EXPORT_SYMBOL(cpumask_of_node);
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
-#ifdef CONFIG_NUMA_KEEP_MEMINFO
-static int meminfo_to_nid(struct numa_meminfo *mi, u64 start)
+#ifdef CONFIG_NUMA_EMU
+void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
+ unsigned int nr_emu_nids)
{
- int i;
-
- for (i = 0; i < mi->nr_blks; i++)
- if (mi->blk[i].start <= start && mi->blk[i].end > start)
- return mi->blk[i].nid;
- return NUMA_NO_NODE;
-}
-
-int phys_to_target_node(phys_addr_t start)
-{
- int nid = meminfo_to_nid(&numa_meminfo, start);
+ int i, j;
/*
- * Prefer online nodes, but if reserved memory might be
- * hot-added continue the search with reserved ranges.
+ * Transform __apicid_to_node table to use emulated nids by
+ * reverse-mapping phys_nid. The maps should always exist but fall
+ * back to zero just in case.
*/
- if (nid != NUMA_NO_NODE)
- return nid;
-
- return meminfo_to_nid(&numa_reserved_meminfo, start);
-}
-EXPORT_SYMBOL_GPL(phys_to_target_node);
-
-int memory_add_physaddr_to_nid(u64 start)
-{
- int nid = meminfo_to_nid(&numa_meminfo, start);
-
- if (nid == NUMA_NO_NODE)
- nid = numa_meminfo.blk[0].nid;
- return nid;
-}
-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
-
-#endif
-
-static int __init cmp_memblk(const void *a, const void *b)
-{
- const struct numa_memblk *ma = *(const struct numa_memblk **)a;
- const struct numa_memblk *mb = *(const struct numa_memblk **)b;
-
- return (ma->start > mb->start) - (ma->start < mb->start);
+ for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) {
+ if (__apicid_to_node[i] == NUMA_NO_NODE)
+ continue;
+ for (j = 0; j < nr_emu_nids; j++)
+ if (__apicid_to_node[i] == emu_nid_to_phys[j])
+ break;
+ __apicid_to_node[i] = j < nr_emu_nids ? j : 0;
+ }
}
-static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
-
-/**
- * numa_fill_memblks - Fill gaps in numa_meminfo memblks
- * @start: address to begin fill
- * @end: address to end fill
- *
- * Find and extend numa_meminfo memblks to cover the physical
- * address range @start-@end
- *
- * RETURNS:
- * 0 : Success
- * NUMA_NO_MEMBLK : No memblks exist in address range @start-@end
- */
-
-int __init numa_fill_memblks(u64 start, u64 end)
+u64 __init numa_emu_dma_end(void)
{
- struct numa_memblk **blk = &numa_memblk_list[0];
- struct numa_meminfo *mi = &numa_meminfo;
- int count = 0;
- u64 prev_end;
-
- /*
- * Create a list of pointers to numa_meminfo memblks that
- * overlap start, end. The list is used to make in-place
- * changes that fill out the numa_meminfo memblks.
- */
- for (int i = 0; i < mi->nr_blks; i++) {
- struct numa_memblk *bi = &mi->blk[i];
-
- if (memblock_addrs_overlap(start, end - start, bi->start,
- bi->end - bi->start)) {
- blk[count] = &mi->blk[i];
- count++;
- }
- }
- if (!count)
- return NUMA_NO_MEMBLK;
-
- /* Sort the list of pointers in memblk->start order */
- sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL);
-
- /* Make sure the first/last memblks include start/end */
- blk[0]->start = min(blk[0]->start, start);
- blk[count - 1]->end = max(blk[count - 1]->end, end);
-
- /*
- * Fill any gaps by tracking the previous memblks
- * end address and backfilling to it if needed.
- */
- prev_end = blk[0]->end;
- for (int i = 1; i < count; i++) {
- struct numa_memblk *curr = blk[i];
-
- if (prev_end >= curr->start) {
- if (prev_end < curr->end)
- prev_end = curr->end;
- } else {
- curr->start = prev_end;
- prev_end = curr->end;
- }
- }
- return 0;
+ return PFN_PHYS(MAX_DMA32_PFN);
}
+#endif /* CONFIG_NUMA_EMU */
diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h
index 86860f279662..11e1ff370c10 100644
--- a/arch/x86/mm/numa_internal.h
+++ b/arch/x86/mm/numa_internal.h
@@ -5,30 +5,6 @@
#include <linux/types.h>
#include <asm/numa.h>
-struct numa_memblk {
- u64 start;
- u64 end;
- int nid;
-};
-
-struct numa_meminfo {
- int nr_blks;
- struct numa_memblk blk[NR_NODE_MEMBLKS];
-};
-
-void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi);
-int __init numa_cleanup_meminfo(struct numa_meminfo *mi);
-void __init numa_reset_distance(void);
-
void __init x86_numa_init(void);
-#ifdef CONFIG_NUMA_EMU
-void __init numa_emulation(struct numa_meminfo *numa_meminfo,
- int numa_dist_cnt);
-#else
-static inline void numa_emulation(struct numa_meminfo *numa_meminfo,
- int numa_dist_cnt)
-{ }
-#endif
-
#endif /* __X86_MM_NUMA_INTERNAL_H */
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index bdc2a240c2aa..f73b5ce270b3 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -104,7 +104,7 @@ __setup("debugpat", pat_debug_setup);
#ifdef CONFIG_X86_PAT
/*
- * X86 PAT uses page flags arch_1 and uncached together to keep track of
+ * X86 PAT uses page flags arch_1 and arch_2 together to keep track of
* memory type of pages that have backing page struct.
*
* X86 PAT supports 4 different memory types:
@@ -118,9 +118,9 @@ __setup("debugpat", pat_debug_setup);
#define _PGMT_WB 0
#define _PGMT_WC (1UL << PG_arch_1)
-#define _PGMT_UC_MINUS (1UL << PG_uncached)
-#define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1)
-#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
+#define _PGMT_UC_MINUS (1UL << PG_arch_2)
+#define _PGMT_WT (1UL << PG_arch_2 | 1UL << PG_arch_1)
+#define _PGMT_MASK (1UL << PG_arch_2 | 1UL << PG_arch_1)
#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
static inline enum page_cache_mode get_page_memtype(struct page *pg)
@@ -951,23 +951,20 @@ static void free_pfn_range(u64 paddr, unsigned long size)
static int follow_phys(struct vm_area_struct *vma, unsigned long *prot,
resource_size_t *phys)
{
- pte_t *ptep, pte;
- spinlock_t *ptl;
+ struct follow_pfnmap_args args = { .vma = vma, .address = vma->vm_start };
- if (follow_pte(vma, vma->vm_start, &ptep, &ptl))
+ if (follow_pfnmap_start(&args))
return -EINVAL;
- pte = ptep_get(ptep);
-
/* Never return PFNs of anon folios in COW mappings. */
- if (vm_normal_folio(vma, vma->vm_start, pte)) {
- pte_unmap_unlock(ptep, ptl);
+ if (!args.special) {
+ follow_pfnmap_end(&args);
return -EINVAL;
}
- *prot = pgprot_val(pte_pgprot(pte));
- *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
- pte_unmap_unlock(ptep, ptl);
+ *prot = pgprot_val(args.pgprot);
+ *phys = (resource_size_t)args.pfn << PAGE_SHIFT;
+ follow_pfnmap_end(&args);
return 0;
}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index f5931499c2d6..5745a354a241 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -641,6 +641,18 @@ pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
}
#endif
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pud_t *pudp)
+{
+ VM_WARN_ON_ONCE(!pud_present(*pudp));
+ pud_t old = pudp_establish(vma, address, pudp, pud_mkinvalid(*pudp));
+ flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
+ return old;
+}
+#endif
+
/**
* reserve_top_address - reserves a hole in the top of kernel address space
* @reserve - size of hole to reserve
@@ -926,3 +938,9 @@ void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd)
VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
pmd_shstk(pmd));
}
+
+void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
+{
+ /* See note in arch_check_zapped_pte() */
+ VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && pud_shstk(pud));
+}
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 9c52a95937ad..6f8e0f21c710 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -57,8 +57,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
}
set_apicid_to_node(apic_id, node);
node_set(node, numa_nodes_parsed);
- printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
- pxm, apic_id, node);
+ pr_debug("SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", pxm, apic_id, node);
}
/* Callback for Proximity Domain -> LAPIC mapping */
@@ -98,8 +97,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
set_apicid_to_node(apic_id, node);
node_set(node, numa_nodes_parsed);
- printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
- pxm, apic_id, node);
+ pr_debug("SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", pxm, apic_id, node);
}
int __init x86_acpi_numa_init(void)
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index bda73cb7a044..ae295659ca14 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -144,3 +144,4 @@ static void __exit cleanup(void)
module_init(init);
module_exit(cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Test module for mmiotrace");
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 44ac64f3a047..86593d1b787d 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -11,6 +11,7 @@
#include <linux/sched/smt.h>
#include <linux/task_work.h>
#include <linux/mmu_notifier.h>
+#include <linux/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
@@ -85,9 +86,6 @@
*
*/
-/* There are 12 bits of space for ASIDS in CR3 */
-#define CR3_HW_ASID_BITS 12
-
/*
* When enabled, MITIGATION_PAGE_TABLE_ISOLATION consumes a single bit for
* user/kernel switches
@@ -160,7 +158,6 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid, unsigned long lam)
unsigned long cr3 = __sme_pa(pgd) | lam;
if (static_cpu_has(X86_FEATURE_PCID)) {
- VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
cr3 |= kern_pcid(asid);
} else {
VM_WARN_ON_ONCE(asid != 0);
@@ -503,9 +500,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
{
struct mm_struct *prev = this_cpu_read(cpu_tlbstate.loaded_mm);
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
- unsigned long new_lam = mm_lam_cr3_mask(next);
bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy);
unsigned cpu = smp_processor_id();
+ unsigned long new_lam;
u64 next_tlb_gen;
bool need_flush;
u16 new_asid;
@@ -619,9 +616,7 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
cpumask_clear_cpu(cpu, mm_cpumask(prev));
}
- /*
- * Start remote flushes and then read tlb_gen.
- */
+ /* Start receiving IPIs and then read tlb_gen (and LAM below) */
if (next != &init_mm)
cpumask_set_cpu(cpu, mm_cpumask(next));
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
@@ -633,7 +628,7 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
barrier();
}
- set_tlbstate_lam_mode(next);
+ new_lam = mm_lam_cr3_mask(next);
if (need_flush) {
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
@@ -652,6 +647,7 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
this_cpu_write(cpu_tlbstate.loaded_mm, next);
this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+ cpu_tlbstate_update_lam(new_lam, mm_untag_mask(next));
if (next != prev) {
cr4_update_pce_mm(next);
@@ -698,6 +694,7 @@ void initialize_tlbstate_and_flush(void)
int i;
struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen);
+ unsigned long lam = mm_lam_cr3_mask(mm);
unsigned long cr3 = __read_cr3();
/* Assert that CR3 already references the right mm. */
@@ -705,7 +702,7 @@ void initialize_tlbstate_and_flush(void)
/* LAM expected to be disabled */
WARN_ON(cr3 & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57));
- WARN_ON(mm_lam_cr3_mask(mm));
+ WARN_ON(lam);
/*
* Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization
@@ -724,7 +721,7 @@ void initialize_tlbstate_and_flush(void)
this_cpu_write(cpu_tlbstate.next_asid, 1);
this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);
- set_tlbstate_lam_mode(mm);
+ cpu_tlbstate_update_lam(lam, mm_untag_mask(mm));
for (i = 1; i < TLB_NR_DYN_ASIDS; i++)
this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index d25d81c8ecc0..06b080b61aa5 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -64,6 +64,56 @@ static bool is_imm8(int value)
return value <= 127 && value >= -128;
}
+/*
+ * Let us limit the positive offset to be <= 123.
+ * This is to ensure eventual jit convergence For the following patterns:
+ * ...
+ * pass4, final_proglen=4391:
+ * ...
+ * 20e: 48 85 ff test rdi,rdi
+ * 211: 74 7d je 0x290
+ * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
+ * ...
+ * 289: 48 85 ff test rdi,rdi
+ * 28c: 74 17 je 0x2a5
+ * 28e: e9 7f ff ff ff jmp 0x212
+ * 293: bf 03 00 00 00 mov edi,0x3
+ * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125)
+ * and insn at 0x28e is 5-byte jmp insn with offset -129.
+ *
+ * pass5, final_proglen=4392:
+ * ...
+ * 20e: 48 85 ff test rdi,rdi
+ * 211: 0f 84 80 00 00 00 je 0x297
+ * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
+ * ...
+ * 28d: 48 85 ff test rdi,rdi
+ * 290: 74 1a je 0x2ac
+ * 292: eb 84 jmp 0x218
+ * 294: bf 03 00 00 00 mov edi,0x3
+ * Note that insn at 0x211 is 6-byte cond jump insn now since its offset
+ * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80).
+ * At the same time, insn at 0x292 is a 2-byte insn since its offset is
+ * -124.
+ *
+ * pass6 will repeat the same code as in pass4 and this will prevent
+ * eventual convergence.
+ *
+ * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes)
+ * cycle in the above. In the above example je offset <= 0x7c should work.
+ *
+ * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence
+ * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should
+ * avoid no convergence issue.
+ *
+ * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn
+ * to maximum 123 (0x7b). This way, the jit pass can eventually converge.
+ */
+static bool is_imm8_jmp_offset(int value)
+{
+ return value <= 123 && value >= -128;
+}
+
static bool is_simm32(s64 value)
{
return value == (s64)(s32)value;
@@ -273,7 +323,7 @@ struct jit_context {
/* Number of bytes emit_patch() needs to generate instructions */
#define X86_PATCH_SIZE 5
/* Number of bytes that will be skipped on tailcall */
-#define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE)
+#define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE)
static void push_r12(u8 **pprog)
{
@@ -403,6 +453,37 @@ static void emit_cfi(u8 **pprog, u32 hash)
*pprog = prog;
}
+static void emit_prologue_tail_call(u8 **pprog, bool is_subprog)
+{
+ u8 *prog = *pprog;
+
+ if (!is_subprog) {
+ /* cmp rax, MAX_TAIL_CALL_CNT */
+ EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT);
+ EMIT2(X86_JA, 6); /* ja 6 */
+ /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT.
+ * case1: entry of main prog.
+ * case2: tail callee of main prog.
+ */
+ EMIT1(0x50); /* push rax */
+ /* Make rax as tail_call_cnt_ptr. */
+ EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */
+ EMIT2(0xEB, 1); /* jmp 1 */
+ /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT.
+ * case: tail callee of subprog.
+ */
+ EMIT1(0x50); /* push rax */
+ /* push tail_call_cnt_ptr */
+ EMIT1(0x50); /* push rax */
+ } else { /* is_subprog */
+ /* rax is tail_call_cnt_ptr. */
+ EMIT1(0x50); /* push rax */
+ EMIT1(0x50); /* push rax */
+ }
+
+ *pprog = prog;
+}
+
/*
* Emit x86-64 prologue code for BPF program.
* bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
@@ -424,10 +505,10 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
/* When it's the entry of the whole tailcall context,
* zeroing rax means initialising tail_call_cnt.
*/
- EMIT2(0x31, 0xC0); /* xor eax, eax */
+ EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */
else
/* Keep the same instruction layout. */
- EMIT2(0x66, 0x90); /* nop2 */
+ emit_nops(&prog, 3); /* nop3 */
}
/* Exception callback receives FP as third parameter */
if (is_exception_cb) {
@@ -453,7 +534,7 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
if (stack_depth)
EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
if (tail_call_reachable)
- EMIT1(0x50); /* push rax */
+ emit_prologue_tail_call(&prog, is_subprog);
*pprog = prog;
}
@@ -589,13 +670,15 @@ static void emit_return(u8 **pprog, u8 *ip)
*pprog = prog;
}
+#define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8))
+
/*
* Generate the following code:
*
* ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
* if (index >= array->map.max_entries)
* goto out;
- * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
+ * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
* goto out;
* prog = array->ptrs[index];
* if (prog == NULL)
@@ -608,7 +691,7 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
u32 stack_depth, u8 *ip,
struct jit_context *ctx)
{
- int tcc_off = -4 - round_up(stack_depth, 8);
+ int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
u8 *prog = *pprog, *start = *pprog;
int offset;
@@ -630,16 +713,14 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
EMIT2(X86_JBE, offset); /* jbe out */
/*
- * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
+ * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
* goto out;
*/
- EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
- EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
+ EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
+ EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
offset = ctx->tail_call_indirect_label - (prog + 2 - start);
EMIT2(X86_JAE, offset); /* jae out */
- EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
- EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
/* prog = array->ptrs[index]; */
EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
@@ -654,6 +735,9 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
offset = ctx->tail_call_indirect_label - (prog + 2 - start);
EMIT2(X86_JE, offset); /* je out */
+ /* Inc tail_call_cnt if the slot is populated. */
+ EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
+
if (bpf_prog->aux->exception_boundary) {
pop_callee_regs(&prog, all_callee_regs_used);
pop_r12(&prog);
@@ -663,6 +747,11 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
pop_r12(&prog);
}
+ /* Pop tail_call_cnt_ptr. */
+ EMIT1(0x58); /* pop rax */
+ /* Pop tail_call_cnt, if it's main prog.
+ * Pop tail_call_cnt_ptr, if it's subprog.
+ */
EMIT1(0x58); /* pop rax */
if (stack_depth)
EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
@@ -691,21 +780,19 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
bool *callee_regs_used, u32 stack_depth,
struct jit_context *ctx)
{
- int tcc_off = -4 - round_up(stack_depth, 8);
+ int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
u8 *prog = *pprog, *start = *pprog;
int offset;
/*
- * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
+ * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
* goto out;
*/
- EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
- EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
+ EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
+ EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
offset = ctx->tail_call_direct_label - (prog + 2 - start);
EMIT2(X86_JAE, offset); /* jae out */
- EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
- EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
poke->tailcall_bypass = ip + (prog - start);
poke->adj_off = X86_TAIL_CALL_OFFSET;
@@ -715,6 +802,9 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
poke->tailcall_bypass);
+ /* Inc tail_call_cnt if the slot is populated. */
+ EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
+
if (bpf_prog->aux->exception_boundary) {
pop_callee_regs(&prog, all_callee_regs_used);
pop_r12(&prog);
@@ -724,6 +814,11 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
pop_r12(&prog);
}
+ /* Pop tail_call_cnt_ptr. */
+ EMIT1(0x58); /* pop rax */
+ /* Pop tail_call_cnt, if it's main prog.
+ * Pop tail_call_cnt_ptr, if it's subprog.
+ */
EMIT1(0x58); /* pop rax */
if (stack_depth)
EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
@@ -1311,9 +1406,11 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
-/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
-#define RESTORE_TAIL_CALL_CNT(stack) \
- EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
+#define __LOAD_TCC_PTR(off) \
+ EMIT3_off32(0x48, 0x8B, 0x85, off)
+/* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */
+#define LOAD_TAIL_CALL_CNT_PTR(stack) \
+ __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
int oldproglen, struct jit_context *ctx, bool jmp_padding)
@@ -2031,7 +2128,7 @@ populate_extable:
func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) {
- RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
+ LOAD_TAIL_CALL_CNT_PTR(bpf_prog->aux->stack_depth);
ip += 7;
}
if (!imm32)
@@ -2184,7 +2281,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
return -EFAULT;
}
jmp_offset = addrs[i + insn->off] - addrs[i];
- if (is_imm8(jmp_offset)) {
+ if (is_imm8_jmp_offset(jmp_offset)) {
if (jmp_padding) {
/* To keep the jmp_offset valid, the extra bytes are
* padded before the jump insn, so we subtract the
@@ -2266,7 +2363,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
break;
}
emit_jmp:
- if (is_imm8(jmp_offset)) {
+ if (is_imm8_jmp_offset(jmp_offset)) {
if (jmp_padding) {
/* To avoid breaking jmp_offset, the extra bytes
* are padded before the actual jmp insn, so
@@ -2706,6 +2803,10 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
return 0;
}
+/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
+#define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \
+ __LOAD_TCC_PTR(-round_up(stack, 8) - 8)
+
/* Example:
* __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
* its 'struct btf_func_model' will be nr_args=2
@@ -2826,7 +2927,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
* [ ... ]
* [ stack_arg2 ]
* RBP - arg_stack_off [ stack_arg1 ]
- * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
+ * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX
*/
/* room for return value of orig_call or fentry prog */
@@ -2955,10 +3056,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
save_args(m, &prog, arg_stack_off, true);
if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
- /* Before calling the original function, restore the
- * tail_call_cnt from stack to rax.
+ /* Before calling the original function, load the
+ * tail_call_cnt_ptr from stack to rax.
*/
- RESTORE_TAIL_CALL_CNT(stack_size);
+ LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
}
if (flags & BPF_TRAMP_F_ORIG_STACK) {
@@ -3017,10 +3118,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
goto cleanup;
}
} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
- /* Before running the original function, restore the
- * tail_call_cnt from stack to rax.
+ /* Before running the original function, load the
+ * tail_call_cnt_ptr from stack to rax.
*/
- RESTORE_TAIL_CALL_CNT(stack_size);
+ LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
}
/* restore return value of orig_call or fentry prog back into RAX */
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index b33afb240601..98a9bb92d75c 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -980,7 +980,7 @@ static void amd_rp_pme_suspend(struct pci_dev *dev)
return;
rp = pcie_find_root_port(dev);
- if (!rp->pm_cap)
+ if (!rp || !rp->pm_cap)
return;
rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >>
@@ -994,7 +994,7 @@ static void amd_rp_pme_resume(struct pci_dev *dev)
u16 pmc;
rp = pcie_find_root_port(dev);
- if (!rp->pm_cap)
+ if (!rp || !rp->pm_cap)
return;
pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc);
diff --git a/arch/x86/platform/geode/Makefile b/arch/x86/platform/geode/Makefile
index a8a6b1dedb01..34b53e97a0ad 100644
--- a/arch/x86/platform/geode/Makefile
+++ b/arch/x86/platform/geode/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_GEODE_COMMON) += geode-common.o
obj-$(CONFIG_ALIX) += alix.o
obj-$(CONFIG_NET5501) += net5501.o
obj-$(CONFIG_GEOS) += geos.o
diff --git a/arch/x86/platform/geode/alix.c b/arch/x86/platform/geode/alix.c
index b39bf3b5e108..be65cd704e21 100644
--- a/arch/x86/platform/geode/alix.c
+++ b/arch/x86/platform/geode/alix.c
@@ -18,15 +18,12 @@
#include <linux/io.h>
#include <linux/string.h>
#include <linux/moduleparam.h>
-#include <linux/leds.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/gpio_keys.h>
-#include <linux/gpio/machine.h>
#include <linux/dmi.h>
#include <asm/geode.h>
+#include "geode-common.h"
+
#define BIOS_SIGNATURE_TINYBIOS 0xf0000
#define BIOS_SIGNATURE_COREBOOT 0x500
#define BIOS_REGION_SIZE 0x10000
@@ -41,79 +38,16 @@ module_param(force, bool, 0444);
/* FIXME: Award bios is not automatically detected as Alix platform */
MODULE_PARM_DESC(force, "Force detection as ALIX.2/ALIX.3 platform");
-static struct gpio_keys_button alix_gpio_buttons[] = {
- {
- .code = KEY_RESTART,
- .gpio = 24,
- .active_low = 1,
- .desc = "Reset button",
- .type = EV_KEY,
- .wakeup = 0,
- .debounce_interval = 100,
- .can_disable = 0,
- }
-};
-static struct gpio_keys_platform_data alix_buttons_data = {
- .buttons = alix_gpio_buttons,
- .nbuttons = ARRAY_SIZE(alix_gpio_buttons),
- .poll_interval = 20,
-};
-
-static struct platform_device alix_buttons_dev = {
- .name = "gpio-keys-polled",
- .id = 1,
- .dev = {
- .platform_data = &alix_buttons_data,
- }
-};
-
-static struct gpio_led alix_leds[] = {
- {
- .name = "alix:1",
- .default_trigger = "default-on",
- },
- {
- .name = "alix:2",
- .default_trigger = "default-off",
- },
- {
- .name = "alix:3",
- .default_trigger = "default-off",
- },
-};
-
-static struct gpio_led_platform_data alix_leds_data = {
- .num_leds = ARRAY_SIZE(alix_leds),
- .leds = alix_leds,
-};
-
-static struct gpiod_lookup_table alix_leds_gpio_table = {
- .dev_id = "leds-gpio",
- .table = {
- /* The Geode GPIOs should be on the CS5535 companion chip */
- GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("cs5535-gpio", 25, NULL, 1, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("cs5535-gpio", 27, NULL, 2, GPIO_ACTIVE_LOW),
- { }
- },
-};
-
-static struct platform_device alix_leds_dev = {
- .name = "leds-gpio",
- .id = -1,
- .dev.platform_data = &alix_leds_data,
-};
-
-static struct platform_device *alix_devs[] __initdata = {
- &alix_buttons_dev,
- &alix_leds_dev,
+static const struct geode_led alix_leds[] __initconst = {
+ { 6, true },
+ { 25, false },
+ { 27, false },
};
static void __init register_alix(void)
{
- /* Setup LED control through leds-gpio driver */
- gpiod_add_lookup_table(&alix_leds_gpio_table);
- platform_add_devices(alix_devs, ARRAY_SIZE(alix_devs));
+ geode_create_restart_key(24);
+ geode_create_leds("alix", alix_leds, ARRAY_SIZE(alix_leds));
}
static bool __init alix_present(unsigned long bios_phys,
diff --git a/arch/x86/platform/geode/geode-common.c b/arch/x86/platform/geode/geode-common.c
new file mode 100644
index 000000000000..8fd78e60bf15
--- /dev/null
+++ b/arch/x86/platform/geode/geode-common.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Shared helpers to register GPIO-connected buttons and LEDs
+ * on AMD Geode boards.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio/property.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "geode-common.h"
+
+static const struct software_node geode_gpiochip_node = {
+ .name = "cs5535-gpio",
+};
+
+static const struct property_entry geode_gpio_keys_props[] = {
+ PROPERTY_ENTRY_U32("poll-interval", 20),
+ { }
+};
+
+static const struct software_node geode_gpio_keys_node = {
+ .name = "geode-gpio-keys",
+ .properties = geode_gpio_keys_props,
+};
+
+static struct property_entry geode_restart_key_props[] = {
+ { /* Placeholder for GPIO property */ },
+ PROPERTY_ENTRY_U32("linux,code", KEY_RESTART),
+ PROPERTY_ENTRY_STRING("label", "Reset button"),
+ PROPERTY_ENTRY_U32("debounce-interval", 100),
+ { }
+};
+
+static const struct software_node geode_restart_key_node = {
+ .parent = &geode_gpio_keys_node,
+ .properties = geode_restart_key_props,
+};
+
+static const struct software_node *geode_gpio_keys_swnodes[] __initconst = {
+ &geode_gpiochip_node,
+ &geode_gpio_keys_node,
+ &geode_restart_key_node,
+ NULL
+};
+
+/*
+ * Creates gpio-keys-polled device for the restart key.
+ *
+ * Note that it needs to be called first, before geode_create_leds(),
+ * because it registers gpiochip software node used by both gpio-keys and
+ * leds-gpio devices.
+ */
+int __init geode_create_restart_key(unsigned int pin)
+{
+ struct platform_device_info keys_info = {
+ .name = "gpio-keys-polled",
+ .id = 1,
+ };
+ struct platform_device *pd;
+ int err;
+
+ geode_restart_key_props[0] = PROPERTY_ENTRY_GPIO("gpios",
+ &geode_gpiochip_node,
+ pin, GPIO_ACTIVE_LOW);
+
+ err = software_node_register_node_group(geode_gpio_keys_swnodes);
+ if (err) {
+ pr_err("failed to register gpio-keys software nodes: %d\n", err);
+ return err;
+ }
+
+ keys_info.fwnode = software_node_fwnode(&geode_gpio_keys_node);
+
+ pd = platform_device_register_full(&keys_info);
+ err = PTR_ERR_OR_ZERO(pd);
+ if (err) {
+ pr_err("failed to create gpio-keys device: %d\n", err);
+ software_node_unregister_node_group(geode_gpio_keys_swnodes);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct software_node geode_gpio_leds_node = {
+ .name = "geode-leds",
+};
+
+#define MAX_LEDS 3
+
+int __init geode_create_leds(const char *label, const struct geode_led *leds,
+ unsigned int n_leds)
+{
+ const struct software_node *group[MAX_LEDS + 2] = { 0 };
+ struct software_node *swnodes;
+ struct property_entry *props;
+ struct platform_device_info led_info = {
+ .name = "leds-gpio",
+ .id = PLATFORM_DEVID_NONE,
+ };
+ struct platform_device *led_dev;
+ const char *node_name;
+ int err;
+ int i;
+
+ if (n_leds > MAX_LEDS) {
+ pr_err("%s: too many LEDs\n", __func__);
+ return -EINVAL;
+ }
+
+ swnodes = kcalloc(n_leds, sizeof(*swnodes), GFP_KERNEL);
+ if (!swnodes)
+ return -ENOMEM;
+
+ /*
+ * Each LED is represented by 3 properties: "gpios",
+ * "linux,default-trigger", and am empty terminator.
+ */
+ props = kcalloc(n_leds * 3, sizeof(*props), GFP_KERNEL);
+ if (!props) {
+ err = -ENOMEM;
+ goto err_free_swnodes;
+ }
+
+ group[0] = &geode_gpio_leds_node;
+ for (i = 0; i < n_leds; i++) {
+ node_name = kasprintf(GFP_KERNEL, "%s:%d", label, i);
+ if (!node_name) {
+ err = -ENOMEM;
+ goto err_free_names;
+ }
+
+ props[i * 3 + 0] =
+ PROPERTY_ENTRY_GPIO("gpios", &geode_gpiochip_node,
+ leds[i].pin, GPIO_ACTIVE_LOW);
+ props[i * 3 + 1] =
+ PROPERTY_ENTRY_STRING("linux,default-trigger",
+ leds[i].default_on ?
+ "default-on" : "default-off");
+ /* props[i * 3 + 2] is an empty terminator */
+
+ swnodes[i] = SOFTWARE_NODE(node_name, &props[i * 3],
+ &geode_gpio_leds_node);
+ group[i + 1] = &swnodes[i];
+ }
+
+ err = software_node_register_node_group(group);
+ if (err) {
+ pr_err("failed to register LED software nodes: %d\n", err);
+ goto err_free_names;
+ }
+
+ led_info.fwnode = software_node_fwnode(&geode_gpio_leds_node);
+
+ led_dev = platform_device_register_full(&led_info);
+ err = PTR_ERR_OR_ZERO(led_dev);
+ if (err) {
+ pr_err("failed to create LED device: %d\n", err);
+ goto err_unregister_group;
+ }
+
+ return 0;
+
+err_unregister_group:
+ software_node_unregister_node_group(group);
+err_free_names:
+ while (--i >= 0)
+ kfree(swnodes[i].name);
+ kfree(props);
+err_free_swnodes:
+ kfree(swnodes);
+ return err;
+}
diff --git a/arch/x86/platform/geode/geode-common.h b/arch/x86/platform/geode/geode-common.h
new file mode 100644
index 000000000000..9e0afd34bfad
--- /dev/null
+++ b/arch/x86/platform/geode/geode-common.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Shared helpers to register GPIO-connected buttons and LEDs
+ * on AMD Geode boards.
+ */
+
+#ifndef __PLATFORM_GEODE_COMMON_H
+#define __PLATFORM_GEODE_COMMON_H
+
+#include <linux/property.h>
+
+struct geode_led {
+ unsigned int pin;
+ bool default_on;
+};
+
+int geode_create_restart_key(unsigned int pin);
+int geode_create_leds(const char *label, const struct geode_led *leds,
+ unsigned int n_leds);
+
+#endif /* __PLATFORM_GEODE_COMMON_H */
diff --git a/arch/x86/platform/geode/geos.c b/arch/x86/platform/geode/geos.c
index d263528c90bb..98027fb1ec32 100644
--- a/arch/x86/platform/geode/geos.c
+++ b/arch/x86/platform/geode/geos.c
@@ -16,88 +16,22 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/string.h>
-#include <linux/leds.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/gpio_keys.h>
-#include <linux/gpio/machine.h>
#include <linux/dmi.h>
#include <asm/geode.h>
-static struct gpio_keys_button geos_gpio_buttons[] = {
- {
- .code = KEY_RESTART,
- .gpio = 3,
- .active_low = 1,
- .desc = "Reset button",
- .type = EV_KEY,
- .wakeup = 0,
- .debounce_interval = 100,
- .can_disable = 0,
- }
-};
-static struct gpio_keys_platform_data geos_buttons_data = {
- .buttons = geos_gpio_buttons,
- .nbuttons = ARRAY_SIZE(geos_gpio_buttons),
- .poll_interval = 20,
-};
-
-static struct platform_device geos_buttons_dev = {
- .name = "gpio-keys-polled",
- .id = 1,
- .dev = {
- .platform_data = &geos_buttons_data,
- }
-};
-
-static struct gpio_led geos_leds[] = {
- {
- .name = "geos:1",
- .default_trigger = "default-on",
- },
- {
- .name = "geos:2",
- .default_trigger = "default-off",
- },
- {
- .name = "geos:3",
- .default_trigger = "default-off",
- },
-};
-
-static struct gpio_led_platform_data geos_leds_data = {
- .num_leds = ARRAY_SIZE(geos_leds),
- .leds = geos_leds,
-};
-
-static struct gpiod_lookup_table geos_leds_gpio_table = {
- .dev_id = "leds-gpio",
- .table = {
- /* The Geode GPIOs should be on the CS5535 companion chip */
- GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("cs5535-gpio", 25, NULL, 1, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("cs5535-gpio", 27, NULL, 2, GPIO_ACTIVE_LOW),
- { }
- },
-};
-
-static struct platform_device geos_leds_dev = {
- .name = "leds-gpio",
- .id = -1,
- .dev.platform_data = &geos_leds_data,
-};
+#include "geode-common.h"
-static struct platform_device *geos_devs[] __initdata = {
- &geos_buttons_dev,
- &geos_leds_dev,
+static const struct geode_led geos_leds[] __initconst = {
+ { 6, true },
+ { 25, false },
+ { 27, false },
};
static void __init register_geos(void)
{
- /* Setup LED control through leds-gpio driver */
- gpiod_add_lookup_table(&geos_leds_gpio_table);
- platform_add_devices(geos_devs, ARRAY_SIZE(geos_devs));
+ geode_create_restart_key(3);
+ geode_create_leds("geos", geos_leds, ARRAY_SIZE(geos_leds));
}
static int __init geos_init(void)
diff --git a/arch/x86/platform/geode/net5501.c b/arch/x86/platform/geode/net5501.c
index 558384acd777..c9cee7dea99b 100644
--- a/arch/x86/platform/geode/net5501.c
+++ b/arch/x86/platform/geode/net5501.c
@@ -16,80 +16,25 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/string.h>
-#include <linux/leds.h>
-#include <linux/platform_device.h>
#include <linux/input.h>
-#include <linux/gpio_keys.h>
#include <linux/gpio/machine.h>
+#include <linux/gpio/property.h>
#include <asm/geode.h>
+#include "geode-common.h"
+
#define BIOS_REGION_BASE 0xffff0000
#define BIOS_REGION_SIZE 0x00010000
-static struct gpio_keys_button net5501_gpio_buttons[] = {
- {
- .code = KEY_RESTART,
- .gpio = 24,
- .active_low = 1,
- .desc = "Reset button",
- .type = EV_KEY,
- .wakeup = 0,
- .debounce_interval = 100,
- .can_disable = 0,
- }
-};
-static struct gpio_keys_platform_data net5501_buttons_data = {
- .buttons = net5501_gpio_buttons,
- .nbuttons = ARRAY_SIZE(net5501_gpio_buttons),
- .poll_interval = 20,
-};
-
-static struct platform_device net5501_buttons_dev = {
- .name = "gpio-keys-polled",
- .id = 1,
- .dev = {
- .platform_data = &net5501_buttons_data,
- }
-};
-
-static struct gpio_led net5501_leds[] = {
- {
- .name = "net5501:1",
- .default_trigger = "default-on",
- },
-};
-
-static struct gpio_led_platform_data net5501_leds_data = {
- .num_leds = ARRAY_SIZE(net5501_leds),
- .leds = net5501_leds,
-};
-
-static struct gpiod_lookup_table net5501_leds_gpio_table = {
- .dev_id = "leds-gpio",
- .table = {
- /* The Geode GPIOs should be on the CS5535 companion chip */
- GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_HIGH),
- { }
- },
-};
-
-static struct platform_device net5501_leds_dev = {
- .name = "leds-gpio",
- .id = -1,
- .dev.platform_data = &net5501_leds_data,
-};
-
-static struct platform_device *net5501_devs[] __initdata = {
- &net5501_buttons_dev,
- &net5501_leds_dev,
+static const struct geode_led net5501_leds[] __initconst = {
+ { 6, true },
};
static void __init register_net5501(void)
{
- /* Setup LED control through leds-gpio driver */
- gpiod_add_lookup_table(&net5501_leds_gpio_table);
- platform_add_devices(net5501_devs, ARRAY_SIZE(net5501_devs));
+ geode_create_restart_key(24);
+ geode_create_leds("net5501", net5501_leds, ARRAY_SIZE(net5501_leds));
}
struct net5501_board {
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index f83bbe0acd4a..a8e75f8c14fd 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -27,9 +27,10 @@
#include <asm/intel-mid.h>
#include <asm/io.h>
#include <asm/i8259.h>
-#include <asm/intel_scu_ipc.h>
#include <asm/reboot.h>
+#include <linux/platform_data/x86/intel_scu_ipc.h>
+
#define IPCMSG_COLD_OFF 0x80 /* Only for Tangier */
#define IPCMSG_COLD_RESET 0xF1
diff --git a/arch/x86/platform/pvh/Makefile b/arch/x86/platform/pvh/Makefile
index 5dec5067c9fb..c43fb7964dc4 100644
--- a/arch/x86/platform/pvh/Makefile
+++ b/arch/x86/platform/pvh/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
OBJECT_FILES_NON_STANDARD_head.o := y
+KASAN_SANITIZE := n
obj-$(CONFIG_PVH) += enlighten.o
obj-$(CONFIG_PVH) += head.o
diff --git a/arch/x86/platform/pvh/enlighten.c b/arch/x86/platform/pvh/enlighten.c
index 944e0290f2c0..2263885d16ba 100644
--- a/arch/x86/platform/pvh/enlighten.c
+++ b/arch/x86/platform/pvh/enlighten.c
@@ -130,7 +130,11 @@ void __init xen_prepare_pvh(void)
BUG();
}
- memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
+ /*
+ * This must not compile to "call memset" because memset() may be
+ * instrumented.
+ */
+ __builtin_memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
hypervisor_specific_init(xen_guest);
diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c
index 76d9f6ce7a3d..f238f7b33cdd 100644
--- a/arch/x86/um/vdso/vma.c
+++ b/arch/x86/um/vdso/vma.c
@@ -52,8 +52,11 @@ subsys_initcall(init_vdso);
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
- int err;
+ struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
+ static struct vm_special_mapping vdso_mapping = {
+ .name = "[vdso]",
+ };
if (!vdso_enabled)
return 0;
@@ -61,12 +64,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (mmap_write_lock_killable(mm))
return -EINTR;
- err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
+ vdso_mapping.pages = vdsop;
+ vma = _install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- vdsop);
+ &vdso_mapping);
mmap_write_unlock(mm);
- return err;
+ return IS_ERR(vma) ? PTR_ERR(vma) : 0;
}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index f1ce39d6d32c..55a4996d0c04 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -665,7 +665,7 @@ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
{
spinlock_t *ptl = NULL;
-#if USE_SPLIT_PTE_PTLOCKS
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
ptl = ptlock_ptr(page_ptdesc(page));
spin_lock_nest_lock(ptl, &mm->page_table_lock);
#endif
@@ -1553,7 +1553,8 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
+ if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS) &&
+ !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(XEN_LAZY_MMU);
@@ -1581,7 +1582,7 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
if (pinned) {
xen_mc_batch();
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+ if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS))
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
__set_pfn_prot(pfn, PAGE_KERNEL);
@@ -2018,10 +2019,7 @@ void __init xen_reserve_special_pages(void)
void __init xen_pt_check_e820(void)
{
- if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
- xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
- BUG();
- }
+ xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table");
}
static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 7c735b730acd..b52d3e17e2c1 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -70,6 +70,7 @@
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/acpi.h>
#include <asm/cache.h>
#include <asm/setup.h>
@@ -80,6 +81,7 @@
#include <asm/xen/hypervisor.h>
#include <xen/balloon.h>
#include <xen/grant_table.h>
+#include <xen/hvc-console.h>
#include "xen-ops.h"
@@ -792,6 +794,102 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
return ret;
}
+/* Remapped non-RAM areas */
+#define NR_NONRAM_REMAP 4
+static struct nonram_remap {
+ phys_addr_t maddr;
+ phys_addr_t paddr;
+ size_t size;
+} xen_nonram_remap[NR_NONRAM_REMAP] __ro_after_init;
+static unsigned int nr_nonram_remap __ro_after_init;
+
+/*
+ * Do the real remapping of non-RAM regions as specified in the
+ * xen_nonram_remap[] array.
+ * In case of an error just crash the system.
+ */
+void __init xen_do_remap_nonram(void)
+{
+ unsigned int i;
+ unsigned int remapped = 0;
+ const struct nonram_remap *remap = xen_nonram_remap;
+ unsigned long pfn, mfn, end_pfn;
+
+ for (i = 0; i < nr_nonram_remap; i++) {
+ end_pfn = PFN_UP(remap->paddr + remap->size);
+ pfn = PFN_DOWN(remap->paddr);
+ mfn = PFN_DOWN(remap->maddr);
+ while (pfn < end_pfn) {
+ if (!set_phys_to_machine(pfn, mfn))
+ panic("Failed to set p2m mapping for pfn=%lx mfn=%lx\n",
+ pfn, mfn);
+
+ pfn++;
+ mfn++;
+ remapped++;
+ }
+
+ remap++;
+ }
+
+ pr_info("Remapped %u non-RAM page(s)\n", remapped);
+}
+
+#ifdef CONFIG_ACPI
+/*
+ * Xen variant of acpi_os_ioremap() taking potentially remapped non-RAM
+ * regions into account.
+ * Any attempt to map an area crossing a remap boundary will produce a
+ * WARN() splat.
+ * phys is related to remap->maddr on input and will be rebased to remap->paddr.
+ */
+static void __iomem *xen_acpi_os_ioremap(acpi_physical_address phys,
+ acpi_size size)
+{
+ unsigned int i;
+ const struct nonram_remap *remap = xen_nonram_remap;
+
+ for (i = 0; i < nr_nonram_remap; i++) {
+ if (phys + size > remap->maddr &&
+ phys < remap->maddr + remap->size) {
+ WARN_ON(phys < remap->maddr ||
+ phys + size > remap->maddr + remap->size);
+ phys += remap->paddr - remap->maddr;
+ break;
+ }
+ }
+
+ return x86_acpi_os_ioremap(phys, size);
+}
+#endif /* CONFIG_ACPI */
+
+/*
+ * Add a new non-RAM remap entry.
+ * In case of no free entry found, just crash the system.
+ */
+void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
+ unsigned long size)
+{
+ BUG_ON((maddr & ~PAGE_MASK) != (paddr & ~PAGE_MASK));
+
+ if (nr_nonram_remap == NR_NONRAM_REMAP) {
+ xen_raw_console_write("Number of required E820 entry remapping actions exceed maximum value\n");
+ BUG();
+ }
+
+#ifdef CONFIG_ACPI
+ /* Switch to the Xen acpi_os_ioremap() variant. */
+ if (nr_nonram_remap == 0)
+ acpi_os_ioremap = xen_acpi_os_ioremap;
+#endif
+
+ xen_nonram_remap[nr_nonram_remap].maddr = maddr;
+ xen_nonram_remap[nr_nonram_remap].paddr = paddr;
+ xen_nonram_remap[nr_nonram_remap].size = size;
+
+ nr_nonram_remap++;
+}
+
#ifdef CONFIG_XEN_DEBUG_FS
#include <linux/debugfs.h>
static int p2m_dump_show(struct seq_file *m, void *v)
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 806ddb2391d9..c3db71d96c43 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -15,12 +15,12 @@
#include <linux/cpuidle.h>
#include <linux/cpufreq.h>
#include <linux/memory_hotplug.h>
+#include <linux/acpi.h>
#include <asm/elf.h>
#include <asm/vdso.h>
#include <asm/e820/api.h>
#include <asm/setup.h>
-#include <asm/acpi.h>
#include <asm/numa.h>
#include <asm/idtentry.h>
#include <asm/xen/hypervisor.h>
@@ -46,6 +46,9 @@ bool xen_pv_pci_possible;
/* E820 map used during setting up memory. */
static struct e820_table xen_e820_table __initdata;
+/* Number of initially usable memory pages. */
+static unsigned long ini_nr_pages __initdata;
+
/*
* Buffer used to remap identity mapped pages. We only need the virtual space.
* The physical page behind this address is remapped as needed to different
@@ -212,7 +215,7 @@ static int __init xen_free_mfn(unsigned long mfn)
* as a fallback if the remapping fails.
*/
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
- unsigned long end_pfn, unsigned long nr_pages)
+ unsigned long end_pfn)
{
unsigned long pfn, end;
int ret;
@@ -220,7 +223,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
WARN_ON(start_pfn > end_pfn);
/* Release pages first. */
- end = min(end_pfn, nr_pages);
+ end = min(end_pfn, ini_nr_pages);
for (pfn = start_pfn; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
@@ -341,15 +344,14 @@ static void __init xen_do_set_identity_and_remap_chunk(
* to Xen and not remapped.
*/
static unsigned long __init xen_set_identity_and_remap_chunk(
- unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
- unsigned long remap_pfn)
+ unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn)
{
unsigned long pfn;
unsigned long i = 0;
unsigned long n = end_pfn - start_pfn;
if (remap_pfn == 0)
- remap_pfn = nr_pages;
+ remap_pfn = ini_nr_pages;
while (i < n) {
unsigned long cur_pfn = start_pfn + i;
@@ -358,19 +360,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
unsigned long remap_range_size;
/* Do not remap pages beyond the current allocation */
- if (cur_pfn >= nr_pages) {
+ if (cur_pfn >= ini_nr_pages) {
/* Identity map remaining pages */
set_phys_range_identity(cur_pfn, cur_pfn + size);
break;
}
- if (cur_pfn + size > nr_pages)
- size = nr_pages - cur_pfn;
+ if (cur_pfn + size > ini_nr_pages)
+ size = ini_nr_pages - cur_pfn;
remap_range_size = xen_find_pfn_range(&remap_pfn);
if (!remap_range_size) {
pr_warn("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn,
- cur_pfn + left, nr_pages);
+ cur_pfn + left);
break;
}
/* Adjust size to fit in current e820 RAM region */
@@ -397,18 +399,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
}
static unsigned long __init xen_count_remap_pages(
- unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+ unsigned long start_pfn, unsigned long end_pfn,
unsigned long remap_pages)
{
- if (start_pfn >= nr_pages)
+ if (start_pfn >= ini_nr_pages)
return remap_pages;
- return remap_pages + min(end_pfn, nr_pages) - start_pfn;
+ return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn;
}
-static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
+static unsigned long __init xen_foreach_remap_area(
unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
- unsigned long nr_pages, unsigned long last_val))
+ unsigned long last_val))
{
phys_addr_t start = 0;
unsigned long ret_val = 0;
@@ -436,8 +438,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn)
- ret_val = func(start_pfn, end_pfn, nr_pages,
- ret_val);
+ ret_val = func(start_pfn, end_pfn, ret_val);
start = end;
}
}
@@ -494,6 +495,8 @@ void __init xen_remap_memory(void)
set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
pr_info("Remapped %ld page(s)\n", remapped);
+
+ xen_do_remap_nonram();
}
static unsigned long __init xen_get_pages_limit(void)
@@ -567,7 +570,7 @@ static void __init xen_ignore_unusable(void)
}
}
-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
+static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
{
struct e820_entry *entry;
unsigned mapcnt;
@@ -625,6 +628,111 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size)
}
/*
+ * Swap a non-RAM E820 map entry with RAM above ini_nr_pages.
+ * Note that the E820 map is modified accordingly, but the P2M map isn't yet.
+ * The adaption of the P2M must be deferred until page allocation is possible.
+ */
+static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry)
+{
+ struct e820_entry *entry;
+ unsigned int mapcnt;
+ phys_addr_t mem_end = PFN_PHYS(ini_nr_pages);
+ phys_addr_t swap_addr, swap_size, entry_end;
+
+ swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr);
+ swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size);
+ entry = xen_e820_table.entries;
+
+ for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
+ entry_end = entry->addr + entry->size;
+ if (entry->type == E820_TYPE_RAM && entry->size >= swap_size &&
+ entry_end - swap_size >= mem_end) {
+ /* Reduce RAM entry by needed space (whole pages). */
+ entry->size -= swap_size;
+
+ /* Add new entry at the end of E820 map. */
+ entry = xen_e820_table.entries +
+ xen_e820_table.nr_entries;
+ xen_e820_table.nr_entries++;
+
+ /* Fill new entry (keep size and page offset). */
+ entry->type = swap_entry->type;
+ entry->addr = entry_end - swap_size +
+ swap_addr - swap_entry->addr;
+ entry->size = swap_entry->size;
+
+ /* Convert old entry to RAM, align to pages. */
+ swap_entry->type = E820_TYPE_RAM;
+ swap_entry->addr = swap_addr;
+ swap_entry->size = swap_size;
+
+ /* Remember PFN<->MFN relation for P2M update. */
+ xen_add_remap_nonram(swap_addr, entry_end - swap_size,
+ swap_size);
+
+ /* Order E820 table and merge entries. */
+ e820__update_table(&xen_e820_table);
+
+ return;
+ }
+
+ entry++;
+ }
+
+ xen_raw_console_write("No suitable area found for required E820 entry remapping action\n");
+ BUG();
+}
+
+/*
+ * Look for non-RAM memory types in a specific guest physical area and move
+ * those away if possible (ACPI NVS only for now).
+ */
+static void __init xen_e820_resolve_conflicts(phys_addr_t start,
+ phys_addr_t size)
+{
+ struct e820_entry *entry;
+ unsigned int mapcnt;
+ phys_addr_t end;
+
+ if (!size)
+ return;
+
+ end = start + size;
+ entry = xen_e820_table.entries;
+
+ for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
+ if (entry->addr >= end)
+ return;
+
+ if (entry->addr + entry->size > start &&
+ entry->type == E820_TYPE_NVS)
+ xen_e820_swap_entry_with_ram(entry);
+
+ entry++;
+ }
+}
+
+/*
+ * Check for an area in physical memory to be usable for non-movable purposes.
+ * An area is considered to usable if the used E820 map lists it to be RAM or
+ * some other type which can be moved to higher PFNs while keeping the MFNs.
+ * In case the area is not usable, crash the system with an error message.
+ */
+void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
+ const char *component)
+{
+ xen_e820_resolve_conflicts(start, size);
+
+ if (!xen_is_e820_reserved(start, size))
+ return;
+
+ xen_raw_console_write("Xen hypervisor allocated ");
+ xen_raw_console_write(component);
+ xen_raw_console_write(" memory conflicts with E820 map\n");
+ BUG();
+}
+
+/*
* Like memcpy, but with physical addresses for dest and src.
*/
static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
@@ -683,7 +791,7 @@ static void __init xen_reserve_xen_mfnlist(void)
**/
char * __init xen_memory_setup(void)
{
- unsigned long max_pfn, pfn_s, n_pfns;
+ unsigned long pfn_s, n_pfns;
phys_addr_t mem_end, addr, size, chunk_size;
u32 type;
int rc;
@@ -695,9 +803,8 @@ char * __init xen_memory_setup(void)
int op;
xen_parse_512gb();
- max_pfn = xen_get_pages_limit();
- max_pfn = min(max_pfn, xen_start_info->nr_pages);
- mem_end = PFN_PHYS(max_pfn);
+ ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages);
+ mem_end = PFN_PHYS(ini_nr_pages);
memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
@@ -747,13 +854,35 @@ char * __init xen_memory_setup(void)
/* Make sure the Xen-supplied memory map is well-ordered. */
e820__update_table(&xen_e820_table);
+ /*
+ * Check whether the kernel itself conflicts with the target E820 map.
+ * Failing now is better than running into weird problems later due
+ * to relocating (and even reusing) pages with kernel text or data.
+ */
+ xen_chk_is_e820_usable(__pa_symbol(_text),
+ __pa_symbol(_end) - __pa_symbol(_text),
+ "kernel");
+
+ /*
+ * Check for a conflict of the xen_start_info memory with the target
+ * E820 map.
+ */
+ xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info),
+ "xen_start_info");
+
+ /*
+ * Check for a conflict of the hypervisor supplied page tables with
+ * the target E820 map.
+ */
+ xen_pt_check_e820();
+
max_pages = xen_get_max_pages();
/* How many extra pages do we need due to remapping? */
- max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
+ max_pages += xen_foreach_remap_area(xen_count_remap_pages);
- if (max_pages > max_pfn)
- extra_pages += max_pages - max_pfn;
+ if (max_pages > ini_nr_pages)
+ extra_pages += max_pages - ini_nr_pages;
/*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
@@ -762,8 +891,8 @@ char * __init xen_memory_setup(void)
* Make sure we have no memory above max_pages, as this area
* isn't handled by the p2m management.
*/
- maxmem_pages = EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM));
- extra_pages = min3(maxmem_pages, extra_pages, max_pages - max_pfn);
+ maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM));
+ extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages);
i = 0;
addr = xen_e820_table.entries[0].addr;
size = xen_e820_table.entries[0].size;
@@ -819,23 +948,6 @@ char * __init xen_memory_setup(void)
e820__update_table(e820_table);
- /*
- * Check whether the kernel itself conflicts with the target E820 map.
- * Failing now is better than running into weird problems later due
- * to relocating (and even reusing) pages with kernel text or data.
- */
- if (xen_is_e820_reserved(__pa_symbol(_text),
- __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
- xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
- BUG();
- }
-
- /*
- * Check for a conflict of the hypervisor supplied page tables with
- * the target E820 map.
- */
- xen_pt_check_e820();
-
xen_reserve_xen_mfnlist();
/* Check for a conflict of the initrd with the target E820 map. */
@@ -863,7 +975,7 @@ char * __init xen_memory_setup(void)
* Set identity map on non-RAM pages and prepare remapping the
* underlying RAM.
*/
- xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
+ xen_foreach_remap_area(xen_set_identity_and_remap_chunk);
pr_info("Released %ld page(s)\n", xen_released_pages);
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 0cf16fc79e0b..e1b782e823e6 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -47,8 +47,12 @@ void xen_mm_unpin_all(void);
#ifdef CONFIG_X86_64
void __init xen_relocate_p2m(void);
#endif
+void __init xen_do_remap_nonram(void);
+void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
+ unsigned long size);
-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size);
+void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
+ const char *component);
unsigned long __ref xen_chk_extra_mem(unsigned long pfn);
void __init xen_inv_extra_mem(void);
void __init xen_remap_memory(void);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index f200a4ec044e..d3db28f2f811 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -14,6 +14,7 @@ config XTENSA
select ARCH_HAS_DMA_SET_UNCACHED if MMU
select ARCH_HAS_STRNCPY_FROM_USER if !KASAN
select ARCH_HAS_STRNLEN_USER
+ select ARCH_NEED_CMPXCHG_1_EMU
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 675a11ea8de7..95e33a913962 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -15,6 +15,7 @@
#include <linux/bits.h>
#include <linux/stringify.h>
+#include <linux/cmpxchg-emu.h>
/*
* cmpxchg
@@ -74,6 +75,7 @@ static __inline__ unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
switch (size) {
+ case 1: return cmpxchg_emu_u8(ptr, old, new);
case 4: return __cmpxchg_u32(ptr, old, new);
default: __cmpxchg_called_with_bad_pointer();
return old;
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index b3c2450d6f23..dc54f854c2f5 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -55,7 +55,8 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
#ifdef CONFIG_MMU
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
struct vm_area_struct *vmm;
struct vma_iterator vmi;
diff --git a/block/bdev.c b/block/bdev.c
index c5507b6f63b8..738e3c8457e7 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -24,6 +24,7 @@
#include <linux/pseudo_fs.h>
#include <linux/uio.h>
#include <linux/namei.h>
+#include <linux/security.h>
#include <linux/part_stat.h>
#include <linux/uaccess.h>
#include <linux/stat.h>
@@ -324,6 +325,11 @@ static struct inode *bdev_alloc_inode(struct super_block *sb)
if (!ei)
return NULL;
memset(&ei->bdev, 0, sizeof(ei->bdev));
+
+ if (security_bdev_alloc(&ei->bdev)) {
+ kmem_cache_free(bdev_cachep, ei);
+ return NULL;
+ }
return &ei->vfs_inode;
}
@@ -333,6 +339,7 @@ static void bdev_free_inode(struct inode *inode)
free_percpu(bdev->bd_stats);
kfree(bdev->bd_meta_info);
+ security_bdev_free(bdev);
if (!bdev_is_partition(bdev)) {
if (bdev->bd_disk && bdev->bd_disk->bdi)
@@ -548,7 +555,7 @@ retry:
/* if claiming is already in progress, wait for it to finish */
if (whole->bd_claiming) {
- wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
+ wait_queue_head_t *wq = __var_waitqueue(&whole->bd_claiming);
DEFINE_WAIT(wait);
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
@@ -571,7 +578,7 @@ static void bd_clear_claiming(struct block_device *whole, void *holder)
/* tell others that we're done */
BUG_ON(whole->bd_claiming != holder);
whole->bd_claiming = NULL;
- wake_up_bit(&whole->bd_claiming, 0);
+ wake_up_var(&whole->bd_claiming);
}
/**
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index b758693697c0..e831aedb4643 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -679,12 +679,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
bfqg_and_blkg_put(old_parent);
- if (entity->parent &&
- entity->parent->last_bfqq_created == bfqq)
- entity->parent->last_bfqq_created = NULL;
- else if (bfqd->last_bfqq_created == bfqq)
- bfqd->last_bfqq_created = NULL;
-
+ bfq_reassign_last_bfqq(bfqq, NULL);
entity->parent = bfqg->my_entity;
entity->sched_data = &bfqg->sched_data;
/* pin down bfqg and its associated blkg */
@@ -741,7 +736,6 @@ static void bfq_sync_bfqq_move(struct bfq_data *bfqd,
*/
bfq_put_cooperator(sync_bfqq);
bic_set_bfqq(bic, NULL, true, act_idx);
- bfq_release_process_ref(bfqd, sync_bfqq);
}
}
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 36a4998c4b37..0747d9d0e48c 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -2911,8 +2911,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx];
/* if a merge has already been setup, then proceed with that first */
- if (bfqq->new_bfqq)
- return bfqq->new_bfqq;
+ new_bfqq = bfqq->new_bfqq;
+ if (new_bfqq) {
+ while (new_bfqq->new_bfqq)
+ new_bfqq = new_bfqq->new_bfqq;
+ return new_bfqq;
+ }
/*
* Check delayed stable merge for rotational or non-queueing
@@ -3093,8 +3097,8 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
}
-static void
-bfq_reassign_last_bfqq(struct bfq_queue *cur_bfqq, struct bfq_queue *new_bfqq)
+void bfq_reassign_last_bfqq(struct bfq_queue *cur_bfqq,
+ struct bfq_queue *new_bfqq)
{
if (cur_bfqq->entity.parent &&
cur_bfqq->entity.parent->last_bfqq_created == cur_bfqq)
@@ -3125,10 +3129,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_put_queue(bfqq);
}
-static void
-bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
- struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd,
+ struct bfq_io_cq *bic,
+ struct bfq_queue *bfqq)
{
+ struct bfq_queue *new_bfqq = bfqq->new_bfqq;
+
bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
(unsigned long)new_bfqq->pid);
/* Save weight raising and idle window of the merged queues */
@@ -3222,6 +3228,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
bfq_reassign_last_bfqq(bfqq, new_bfqq);
bfq_release_process_ref(bfqd, bfqq);
+
+ return new_bfqq;
}
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
@@ -3257,14 +3265,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
* fulfilled, i.e., bic can be redirected to new_bfqq
* and bfqq can be put.
*/
- bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
- new_bfqq);
- /*
- * If we get here, bio will be queued into new_queue,
- * so use new_bfqq to decide whether bio and rq can be
- * merged.
- */
- bfqq = new_bfqq;
+ while (bfqq != new_bfqq)
+ bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq);
/*
* Change also bqfd->bio_bfqq, as
@@ -5432,6 +5434,8 @@ void bfq_put_cooperator(struct bfq_queue *bfqq)
bfq_put_queue(__bfqq);
__bfqq = next;
}
+
+ bfq_release_process_ref(bfqq->bfqd, bfqq);
}
static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
@@ -5444,8 +5448,6 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
bfq_put_cooperator(bfqq);
-
- bfq_release_process_ref(bfqd, bfqq);
}
static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync,
@@ -5701,9 +5703,7 @@ bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* state before killing it.
*/
bfqq->bic = bic;
- bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
-
- return new_bfqq;
+ return bfq_merge_bfqqs(bfqd, bic, bfqq);
}
/*
@@ -6158,6 +6158,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
bool waiting, idle_timer_disabled = false;
if (new_bfqq) {
+ struct bfq_queue *old_bfqq = bfqq;
/*
* Release the request's reference to the old bfqq
* and make sure one is taken to the shared queue.
@@ -6174,18 +6175,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
* new_bfqq.
*/
if (bic_to_bfqq(RQ_BIC(rq), true,
- bfq_actuator_index(bfqd, rq->bio)) == bfqq)
- bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
- bfqq, new_bfqq);
+ bfq_actuator_index(bfqd, rq->bio)) == bfqq) {
+ while (bfqq != new_bfqq)
+ bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq);
+ }
- bfq_clear_bfqq_just_created(bfqq);
+ bfq_clear_bfqq_just_created(old_bfqq);
/*
* rq is about to be enqueued into new_bfqq,
* release rq reference on bfqq
*/
- bfq_put_queue(bfqq);
+ bfq_put_queue(old_bfqq);
rq->elv.priv[1] = new_bfqq;
- bfqq = new_bfqq;
}
bfq_update_io_thinktime(bfqd, bfqq);
@@ -6723,7 +6724,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
{
bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
- if (bfqq_process_refs(bfqq) == 1) {
+ if (bfqq_process_refs(bfqq) == 1 && !bfqq->new_bfqq) {
bfqq->pid = current->pid;
bfq_clear_bfqq_coop(bfqq);
bfq_clear_bfqq_split_coop(bfqq);
@@ -6733,16 +6734,13 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
bic_set_bfqq(bic, NULL, true, bfqq->actuator_idx);
bfq_put_cooperator(bfqq);
-
- bfq_release_process_ref(bfqq->bfqd, bfqq);
return NULL;
}
-static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
- struct bfq_io_cq *bic,
- struct bio *bio,
- bool split, bool is_sync,
- bool *new_queue)
+static struct bfq_queue *
+__bfq_get_bfqq_handle_split(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+ struct bio *bio, bool split, bool is_sync,
+ bool *new_queue)
{
unsigned int act_idx = bfq_actuator_index(bfqd, bio);
struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync, act_idx);
@@ -6821,6 +6819,84 @@ static void bfq_prepare_request(struct request *rq)
rq->elv.priv[0] = rq->elv.priv[1] = NULL;
}
+static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq)
+{
+ struct bfq_queue *new_bfqq = bfqq->new_bfqq;
+ struct bfq_queue *waker_bfqq = bfqq->waker_bfqq;
+
+ if (!waker_bfqq)
+ return NULL;
+
+ while (new_bfqq) {
+ if (new_bfqq == waker_bfqq) {
+ /*
+ * If waker_bfqq is in the merge chain, and current
+ * is the only procress.
+ */
+ if (bfqq_process_refs(waker_bfqq) == 1)
+ return NULL;
+ break;
+ }
+
+ new_bfqq = new_bfqq->new_bfqq;
+ }
+
+ return waker_bfqq;
+}
+
+static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
+ struct bfq_io_cq *bic,
+ struct bio *bio,
+ unsigned int idx,
+ bool is_sync)
+{
+ struct bfq_queue *waker_bfqq;
+ struct bfq_queue *bfqq;
+ bool new_queue = false;
+
+ bfqq = __bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
+ &new_queue);
+ if (unlikely(new_queue))
+ return bfqq;
+
+ /* If the queue was seeky for too long, break it apart. */
+ if (!bfq_bfqq_coop(bfqq) || !bfq_bfqq_split_coop(bfqq) ||
+ bic->bfqq_data[idx].stably_merged)
+ return bfqq;
+
+ waker_bfqq = bfq_waker_bfqq(bfqq);
+
+ /* Update bic before losing reference to bfqq */
+ if (bfq_bfqq_in_large_burst(bfqq))
+ bic->bfqq_data[idx].saved_in_large_burst = true;
+
+ bfqq = bfq_split_bfqq(bic, bfqq);
+ if (bfqq) {
+ bfq_bfqq_resume_state(bfqq, bfqd, bic, true);
+ return bfqq;
+ }
+
+ bfqq = __bfq_get_bfqq_handle_split(bfqd, bic, bio, true, is_sync, NULL);
+ if (unlikely(bfqq == &bfqd->oom_bfqq))
+ return bfqq;
+
+ bfq_bfqq_resume_state(bfqq, bfqd, bic, false);
+ bfqq->waker_bfqq = waker_bfqq;
+ bfqq->tentative_waker_bfqq = NULL;
+
+ /*
+ * If the waker queue disappears, then new_bfqq->waker_bfqq must be
+ * reset. So insert new_bfqq into the
+ * woken_list of the waker. See
+ * bfq_check_waker for details.
+ */
+ if (waker_bfqq)
+ hlist_add_head(&bfqq->woken_list_node,
+ &bfqq->waker_bfqq->woken_list);
+
+ return bfqq;
+}
+
/*
* If needed, init rq, allocate bfq data structures associated with
* rq, and increment reference counters in the destination bfq_queue
@@ -6852,8 +6928,6 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
struct bfq_io_cq *bic;
const int is_sync = rq_is_sync(rq);
struct bfq_queue *bfqq;
- bool new_queue = false;
- bool bfqq_already_existing = false, split = false;
unsigned int a_idx = bfq_actuator_index(bfqd, bio);
if (unlikely(!rq->elv.icq))
@@ -6870,54 +6944,9 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
return RQ_BFQQ(rq);
bic = icq_to_bic(rq->elv.icq);
-
bfq_check_ioprio_change(bic, bio);
-
bfq_bic_update_cgroup(bic, bio);
-
- bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
- &new_queue);
-
- if (likely(!new_queue)) {
- /* If the queue was seeky for too long, break it apart. */
- if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) &&
- !bic->bfqq_data[a_idx].stably_merged) {
- struct bfq_queue *old_bfqq = bfqq;
-
- /* Update bic before losing reference to bfqq */
- if (bfq_bfqq_in_large_burst(bfqq))
- bic->bfqq_data[a_idx].saved_in_large_burst =
- true;
-
- bfqq = bfq_split_bfqq(bic, bfqq);
- split = true;
-
- if (!bfqq) {
- bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
- true, is_sync,
- NULL);
- if (unlikely(bfqq == &bfqd->oom_bfqq))
- bfqq_already_existing = true;
- } else
- bfqq_already_existing = true;
-
- if (!bfqq_already_existing) {
- bfqq->waker_bfqq = old_bfqq->waker_bfqq;
- bfqq->tentative_waker_bfqq = NULL;
-
- /*
- * If the waker queue disappears, then
- * new_bfqq->waker_bfqq must be
- * reset. So insert new_bfqq into the
- * woken_list of the waker. See
- * bfq_check_waker for details.
- */
- if (bfqq->waker_bfqq)
- hlist_add_head(&bfqq->woken_list_node,
- &bfqq->waker_bfqq->woken_list);
- }
- }
- }
+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, a_idx, is_sync);
bfqq_request_allocated(bfqq);
bfqq->ref++;
@@ -6934,18 +6963,9 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
* addition, if the queue has also just been split, we have to
* resume its state.
*/
- if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
+ if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq &&
+ bfqq_process_refs(bfqq) == 1)
bfqq->bic = bic;
- if (split) {
- /*
- * The queue has just been split from a shared
- * queue: restore the idle window and the
- * possible weight raising period.
- */
- bfq_bfqq_resume_state(bfqq, bfqd, bic,
- bfqq_already_existing);
- }
- }
/*
* Consider bfqq as possibly belonging to a burst of newly
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 08ddf2cfae5b..687a3a7ba784 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -1156,6 +1156,8 @@ void bfq_del_bfqq_busy(struct bfq_queue *bfqq, bool expiration);
void bfq_add_bfqq_busy(struct bfq_queue *bfqq);
void bfq_add_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq);
void bfq_del_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq);
+void bfq_reassign_last_bfqq(struct bfq_queue *cur_bfqq,
+ struct bfq_queue *new_bfqq);
/* --------------- end of interface of B-WF2Q+ ---------------- */
@@ -1183,11 +1185,6 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
"%s " fmt, pid_str, ##args); \
} while (0)
-#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
- blk_add_cgroup_trace_msg((bfqd)->queue, \
- &bfqg_to_blkg(bfqg)->blkcg->css, fmt, ##args); \
-} while (0)
-
#else /* CONFIG_BFQ_GROUP_IOSCHED */
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
@@ -1197,7 +1194,6 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
bfq_bfqq_name((bfqq), pid_str, MAX_BFQQ_NAME_LENGTH); \
blk_add_trace_msg((bfqd)->queue, "%s " fmt, pid_str, ##args); \
} while (0)
-#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
#endif /* CONFIG_BFQ_GROUP_IOSCHED */
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 96a2653905ae..88e3ad73c385 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -367,7 +367,6 @@ free_bvec:
kfree(bvec);
return ret;
}
-EXPORT_SYMBOL_GPL(bio_integrity_map_user);
/**
* bio_integrity_prep - Prepare bio for integrity I/O
diff --git a/block/bio.c b/block/bio.c
index c4053d49679a..ac4d77c88932 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -931,7 +931,8 @@ static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
return false;
- *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
+ *same_page = ((vec_end_addr & PAGE_MASK) == ((page_addr + off) &
+ PAGE_MASK));
if (!*same_page) {
if (IS_ENABLED(CONFIG_KMSAN))
return false;
@@ -1017,6 +1018,29 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
}
/**
+ * bio_add_hw_folio - attempt to add a folio to a bio with hw constraints
+ * @q: the target queue
+ * @bio: destination bio
+ * @folio: folio to add
+ * @len: vec entry length
+ * @offset: vec entry offset in the folio
+ * @max_sectors: maximum number of sectors that can be added
+ * @same_page: return if the segment has been merged inside the same folio
+ *
+ * Add a folio to a bio while respecting the hardware max_sectors, max_segment
+ * and gap limitations.
+ */
+int bio_add_hw_folio(struct request_queue *q, struct bio *bio,
+ struct folio *folio, size_t len, size_t offset,
+ unsigned int max_sectors, bool *same_page)
+{
+ if (len > UINT_MAX || offset > UINT_MAX)
+ return 0;
+ return bio_add_hw_page(q, bio, folio_page(folio, 0), len, offset,
+ max_sectors, same_page);
+}
+
+/**
* bio_add_pc_page - attempt to add page to passthrough bio
* @q: the target queue
* @bio: destination bio
@@ -1166,7 +1190,6 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty)
struct folio_iter fi;
bio_for_each_folio_all(fi, bio) {
- struct page *page;
size_t nr_pages;
if (mark_dirty) {
@@ -1174,12 +1197,9 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty)
folio_mark_dirty(fi.folio);
folio_unlock(fi.folio);
}
- page = folio_page(fi.folio, fi.offset / PAGE_SIZE);
nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
fi.offset / PAGE_SIZE + 1;
- do {
- bio_release_page(bio, page++);
- } while (--nr_pages != 0);
+ unpin_user_folio(fi.folio, nr_pages);
}
}
EXPORT_SYMBOL_GPL(__bio_release_pages);
@@ -1204,8 +1224,8 @@ void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
bio_set_flag(bio, BIO_CLONED);
}
-static int bio_iov_add_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int offset)
+static int bio_iov_add_folio(struct bio *bio, struct folio *folio, size_t len,
+ size_t offset)
{
bool same_page = false;
@@ -1214,30 +1234,61 @@ static int bio_iov_add_page(struct bio *bio, struct page *page,
if (bio->bi_vcnt > 0 &&
bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
- page, len, offset, &same_page)) {
+ folio_page(folio, 0), len, offset,
+ &same_page)) {
bio->bi_iter.bi_size += len;
- if (same_page)
- bio_release_page(bio, page);
+ if (same_page && bio_flagged(bio, BIO_PAGE_PINNED))
+ unpin_user_folio(folio, 1);
return 0;
}
- __bio_add_page(bio, page, len, offset);
+ bio_add_folio_nofail(bio, folio, len, offset);
return 0;
}
-static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int offset)
+static int bio_iov_add_zone_append_folio(struct bio *bio, struct folio *folio,
+ size_t len, size_t offset)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
bool same_page = false;
- if (bio_add_hw_page(q, bio, page, len, offset,
+ if (bio_add_hw_folio(q, bio, folio, len, offset,
queue_max_zone_append_sectors(q), &same_page) != len)
return -EINVAL;
- if (same_page)
- bio_release_page(bio, page);
+ if (same_page && bio_flagged(bio, BIO_PAGE_PINNED))
+ unpin_user_folio(folio, 1);
return 0;
}
+static unsigned int get_contig_folio_len(unsigned int *num_pages,
+ struct page **pages, unsigned int i,
+ struct folio *folio, size_t left,
+ size_t offset)
+{
+ size_t bytes = left;
+ size_t contig_sz = min_t(size_t, PAGE_SIZE - offset, bytes);
+ unsigned int j;
+
+ /*
+ * We might COW a single page in the middle of
+ * a large folio, so we have to check that all
+ * pages belong to the same folio.
+ */
+ bytes -= contig_sz;
+ for (j = i + 1; j < i + *num_pages; j++) {
+ size_t next = min_t(size_t, PAGE_SIZE, bytes);
+
+ if (page_folio(pages[j]) != folio ||
+ pages[j] != pages[j - 1] + 1) {
+ break;
+ }
+ contig_sz += next;
+ bytes -= next;
+ }
+ *num_pages = j - i;
+
+ return contig_sz;
+}
+
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
/**
@@ -1257,9 +1308,9 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
- ssize_t size, left;
- unsigned len, i = 0;
- size_t offset;
+ ssize_t size;
+ unsigned int num_pages, i = 0;
+ size_t offset, folio_offset, left, len;
int ret = 0;
/*
@@ -1299,17 +1350,28 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
goto out;
}
- for (left = size, i = 0; left > 0; left -= len, i++) {
+ for (left = size, i = 0; left > 0; left -= len, i += num_pages) {
struct page *page = pages[i];
+ struct folio *folio = page_folio(page);
+
+ folio_offset = ((size_t)folio_page_idx(folio, page) <<
+ PAGE_SHIFT) + offset;
+
+ len = min(folio_size(folio) - folio_offset, left);
+
+ num_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+
+ if (num_pages > 1)
+ len = get_contig_folio_len(&num_pages, pages, i,
+ folio, left, offset);
- len = min_t(size_t, PAGE_SIZE - offset, left);
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
- ret = bio_iov_add_zone_append_page(bio, page, len,
- offset);
+ ret = bio_iov_add_zone_append_folio(bio, folio, len,
+ folio_offset);
if (ret)
break;
} else
- bio_iov_add_page(bio, page, len, offset);
+ bio_iov_add_folio(bio, folio, len, folio_offset);
offset = 0;
}
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 69e70964398c..e68c725cf8d9 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1458,7 +1458,6 @@ int blkcg_init_disk(struct gendisk *disk)
struct request_queue *q = disk->queue;
struct blkcg_gq *new_blkg, *blkg;
bool preloaded;
- int ret;
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
if (!new_blkg)
@@ -1478,15 +1477,8 @@ int blkcg_init_disk(struct gendisk *disk)
if (preloaded)
radix_tree_preload_end();
- ret = blk_ioprio_init(disk);
- if (ret)
- goto err_destroy_all;
-
return 0;
-err_destroy_all:
- blkg_destroy_all(disk);
- return ret;
err_unlock:
spin_unlock_irq(&q->queue_lock);
if (preloaded)
@@ -1554,6 +1546,14 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
if (blkcg_policy_enabled(q, pol))
return 0;
+ /*
+ * Policy is allowed to be registered without pd_alloc_fn/pd_free_fn,
+ * for example, ioprio. Such policy will work on blkcg level, not disk
+ * level, and don't need to be activated.
+ */
+ if (WARN_ON_ONCE(!pol->pd_alloc_fn || !pol->pd_free_fn))
+ return -EINVAL;
+
if (queue_is_mq(q))
blk_mq_freeze_queue(q);
retry:
@@ -1733,9 +1733,12 @@ int blkcg_policy_register(struct blkcg_policy *pol)
goto err_unlock;
}
- /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
+ /*
+ * Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy
+ * without pd_alloc_fn/pd_free_fn can't be activated.
+ */
if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
- (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
+ (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
goto err_unlock;
/* register @pol */
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 864fad4a850b..b9e3265c1eb3 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -485,7 +485,6 @@ static inline void blkcg_deactivate_policy(struct gendisk *disk,
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
struct blkcg_policy *pol) { return NULL; }
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
-static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
static inline void blkcg_bio_issue_init(struct bio *bio) { }
diff --git a/block/blk-core.c b/block/blk-core.c
index 1217c2cd66dd..bc5e8c5eaac9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -799,6 +799,7 @@ void submit_bio_noacct(struct bio *bio)
switch (bio_op(bio)) {
case REQ_OP_READ:
+ break;
case REQ_OP_WRITE:
if (bio->bi_opf & REQ_ATOMIC) {
status = blk_validate_atomic_write_op_size(q, bio);
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 010decc892ea..0a2b1c5d0ebf 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -53,7 +53,6 @@ new_segment:
return segments;
}
-EXPORT_SYMBOL(blk_rq_count_integrity_sg);
/**
* blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
@@ -63,19 +62,20 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);
*
* Description: Map the integrity vectors in request into a
* scatterlist. The scatterlist must be big enough to hold all
- * elements. I.e. sized using blk_rq_count_integrity_sg().
+ * elements. I.e. sized using blk_rq_count_integrity_sg() or
+ * rq->nr_integrity_segments.
*/
-int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
- struct scatterlist *sglist)
+int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
{
struct bio_vec iv, ivprv = { NULL };
+ struct request_queue *q = rq->q;
struct scatterlist *sg = NULL;
+ struct bio *bio = rq->bio;
unsigned int segments = 0;
struct bvec_iter iter;
int prev = 0;
bio_for_each_integrity_vec(iv, bio, iter) {
-
if (prev) {
if (!biovec_phys_mergeable(q, &ivprv, &iv))
goto new_segment;
@@ -103,10 +103,30 @@ new_segment:
if (sg)
sg_mark_end(sg);
+ /*
+ * Something must have been wrong if the figured number of segment
+ * is bigger than number of req's physical integrity segments
+ */
+ BUG_ON(segments > rq->nr_integrity_segments);
+ BUG_ON(segments > queue_max_integrity_segments(q));
return segments;
}
EXPORT_SYMBOL(blk_rq_map_integrity_sg);
+int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
+ ssize_t bytes, u32 seed)
+{
+ int ret = bio_integrity_map_user(rq->bio, ubuf, bytes, seed);
+
+ if (ret)
+ return ret;
+
+ rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
+ rq->cmd_flags |= REQ_INTEGRITY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
+
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
struct request *next)
{
@@ -134,7 +154,6 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
struct bio *bio)
{
int nr_integrity_segs;
- struct bio *next = bio->bi_next;
if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
return true;
@@ -145,16 +164,11 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
return false;
- bio->bi_next = NULL;
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
- bio->bi_next = next;
-
if (req->nr_integrity_segments + nr_integrity_segs >
q->limits.max_integrity_segments)
return false;
- req->nr_integrity_segments += nr_integrity_segs;
-
return true;
}
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 690ca99dfaca..9dc9323f84ac 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -648,7 +648,7 @@ static const struct ioc_params autop[] = {
* vrate adjust percentages indexed by ioc->busy_level. We adjust up on
* vtime credit shortage and down on device saturation.
*/
-static u32 vrate_adj_pct[] =
+static const u32 vrate_adj_pct[] =
{ 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
@@ -2076,7 +2076,7 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
struct ioc_now *now)
{
struct ioc_gq *iocg;
- u64 dur, usage_pct, nr_cycles;
+ u64 dur, usage_pct, nr_cycles, nr_cycles_shift;
/* if no debtor, reset the cycle */
if (!nr_debtors) {
@@ -2138,10 +2138,12 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
old_debt = iocg->abs_vdebt;
old_delay = iocg->delay;
+ nr_cycles_shift = min_t(u64, nr_cycles, BITS_PER_LONG - 1);
if (iocg->abs_vdebt)
- iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
+ iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles_shift ?: 1;
+
if (iocg->delay)
- iocg->delay = iocg->delay >> nr_cycles ?: 1;
+ iocg->delay = iocg->delay >> nr_cycles_shift ?: 1;
iocg_kick_waitq(iocg, true, now);
diff --git a/block/blk-ioprio.c b/block/blk-ioprio.c
index 4051fada01f1..8fff7ccc0ac7 100644
--- a/block/blk-ioprio.c
+++ b/block/blk-ioprio.c
@@ -50,14 +50,6 @@ static const char *policy_name[] = {
static struct blkcg_policy ioprio_policy;
/**
- * struct ioprio_blkg - Per (cgroup, request queue) data.
- * @pd: blkg_policy_data structure.
- */
-struct ioprio_blkg {
- struct blkg_policy_data pd;
-};
-
-/**
* struct ioprio_blkcg - Per cgroup data.
* @cpd: blkcg_policy_data structure.
* @prio_policy: One of the IOPRIO_CLASS_* values. See also <linux/ioprio.h>.
@@ -67,11 +59,6 @@ struct ioprio_blkcg {
enum prio_policy prio_policy;
};
-static inline struct ioprio_blkg *pd_to_ioprio(struct blkg_policy_data *pd)
-{
- return pd ? container_of(pd, struct ioprio_blkg, pd) : NULL;
-}
-
static struct ioprio_blkcg *blkcg_to_ioprio_blkcg(struct blkcg *blkcg)
{
return container_of(blkcg_to_cpd(blkcg, &ioprio_policy),
@@ -84,16 +71,6 @@ ioprio_blkcg_from_css(struct cgroup_subsys_state *css)
return blkcg_to_ioprio_blkcg(css_to_blkcg(css));
}
-static struct ioprio_blkcg *ioprio_blkcg_from_bio(struct bio *bio)
-{
- struct blkg_policy_data *pd = blkg_to_pd(bio->bi_blkg, &ioprio_policy);
-
- if (!pd)
- return NULL;
-
- return blkcg_to_ioprio_blkcg(pd->blkg->blkcg);
-}
-
static int ioprio_show_prio_policy(struct seq_file *sf, void *v)
{
struct ioprio_blkcg *blkcg = ioprio_blkcg_from_css(seq_css(sf));
@@ -118,25 +95,6 @@ static ssize_t ioprio_set_prio_policy(struct kernfs_open_file *of, char *buf,
return nbytes;
}
-static struct blkg_policy_data *
-ioprio_alloc_pd(struct gendisk *disk, struct blkcg *blkcg, gfp_t gfp)
-{
- struct ioprio_blkg *ioprio_blkg;
-
- ioprio_blkg = kzalloc(sizeof(*ioprio_blkg), gfp);
- if (!ioprio_blkg)
- return NULL;
-
- return &ioprio_blkg->pd;
-}
-
-static void ioprio_free_pd(struct blkg_policy_data *pd)
-{
- struct ioprio_blkg *ioprio_blkg = pd_to_ioprio(pd);
-
- kfree(ioprio_blkg);
-}
-
static struct blkcg_policy_data *ioprio_alloc_cpd(gfp_t gfp)
{
struct ioprio_blkcg *blkcg;
@@ -179,14 +137,11 @@ static struct blkcg_policy ioprio_policy = {
.cpd_alloc_fn = ioprio_alloc_cpd,
.cpd_free_fn = ioprio_free_cpd,
-
- .pd_alloc_fn = ioprio_alloc_pd,
- .pd_free_fn = ioprio_free_pd,
};
void blkcg_set_ioprio(struct bio *bio)
{
- struct ioprio_blkcg *blkcg = ioprio_blkcg_from_bio(bio);
+ struct ioprio_blkcg *blkcg = blkcg_to_ioprio_blkcg(bio->bi_blkg->blkcg);
u16 prio;
if (!blkcg || blkcg->prio_policy == POLICY_NO_CHANGE)
@@ -219,16 +174,6 @@ void blkcg_set_ioprio(struct bio *bio)
bio->bi_ioprio = prio;
}
-void blk_ioprio_exit(struct gendisk *disk)
-{
- blkcg_deactivate_policy(disk, &ioprio_policy);
-}
-
-int blk_ioprio_init(struct gendisk *disk)
-{
- return blkcg_activate_policy(disk, &ioprio_policy);
-}
-
static int __init ioprio_init(void)
{
return blkcg_policy_register(&ioprio_policy);
diff --git a/block/blk-ioprio.h b/block/blk-ioprio.h
index b6afb8e80de0..9265143f9bc9 100644
--- a/block/blk-ioprio.h
+++ b/block/blk-ioprio.h
@@ -9,17 +9,8 @@ struct request_queue;
struct bio;
#ifdef CONFIG_BLK_CGROUP_IOPRIO
-int blk_ioprio_init(struct gendisk *disk);
-void blk_ioprio_exit(struct gendisk *disk);
void blkcg_set_ioprio(struct bio *bio);
#else
-static inline int blk_ioprio_init(struct gendisk *disk)
-{
- return 0;
-}
-static inline void blk_ioprio_exit(struct gendisk *disk)
-{
-}
static inline void blkcg_set_ioprio(struct bio *bio)
{
}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index de5281bcadc5..ad763ec313b6 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -105,9 +105,33 @@ static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
}
-static struct bio *bio_split_discard(struct bio *bio,
- const struct queue_limits *lim,
- unsigned *nsegs, struct bio_set *bs)
+static struct bio *bio_submit_split(struct bio *bio, int split_sectors)
+{
+ if (unlikely(split_sectors < 0)) {
+ bio->bi_status = errno_to_blk_status(split_sectors);
+ bio_endio(bio);
+ return NULL;
+ }
+
+ if (split_sectors) {
+ struct bio *split;
+
+ split = bio_split(bio, split_sectors, GFP_NOIO,
+ &bio->bi_bdev->bd_disk->bio_split);
+ split->bi_opf |= REQ_NOMERGE;
+ blkcg_bio_issue_init(split);
+ bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
+ WARN_ON_ONCE(bio_zone_write_plugging(bio));
+ submit_bio_noacct(bio);
+ return split;
+ }
+
+ return bio;
+}
+
+struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
+ unsigned *nsegs)
{
unsigned int max_discard_sectors, granularity;
sector_t tmp;
@@ -121,10 +145,10 @@ static struct bio *bio_split_discard(struct bio *bio,
min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors))
- return NULL;
+ return bio;
if (bio_sectors(bio) <= max_discard_sectors)
- return NULL;
+ return bio;
split_sectors = max_discard_sectors;
@@ -139,19 +163,18 @@ static struct bio *bio_split_discard(struct bio *bio,
if (split_sectors > tmp)
split_sectors -= tmp;
- return bio_split(bio, split_sectors, GFP_NOIO, bs);
+ return bio_submit_split(bio, split_sectors);
}
-static struct bio *bio_split_write_zeroes(struct bio *bio,
- const struct queue_limits *lim,
- unsigned *nsegs, struct bio_set *bs)
+struct bio *bio_split_write_zeroes(struct bio *bio,
+ const struct queue_limits *lim, unsigned *nsegs)
{
*nsegs = 0;
if (!lim->max_write_zeroes_sectors)
- return NULL;
+ return bio;
if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
- return NULL;
- return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
+ return bio;
+ return bio_submit_split(bio, lim->max_write_zeroes_sectors);
}
static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim,
@@ -274,27 +297,19 @@ static bool bvec_split_segs(const struct queue_limits *lim,
}
/**
- * bio_split_rw - split a bio in two bios
+ * bio_split_rw_at - check if and where to split a read/write bio
* @bio: [in] bio to be split
* @lim: [in] queue limits to split based on
* @segs: [out] number of segments in the bio with the first half of the sectors
- * @bs: [in] bio set to allocate the clone from
* @max_bytes: [in] maximum number of bytes per bio
*
- * Clone @bio, update the bi_iter of the clone to represent the first sectors
- * of @bio and update @bio->bi_iter to represent the remaining sectors. The
- * following is guaranteed for the cloned bio:
- * - That it has at most @max_bytes worth of data
- * - That it has at most queue_max_segments(@q) segments.
- *
- * Except for discard requests the cloned bio will point at the bi_io_vec of
- * the original bio. It is the responsibility of the caller to ensure that the
- * original bio is not freed before the cloned bio. The caller is also
- * responsible for ensuring that @bs is only destroyed after processing of the
- * split bio has finished.
+ * Find out if @bio needs to be split to fit the queue limits in @lim and a
+ * maximum size of @max_bytes. Returns a negative error number if @bio can't be
+ * split, 0 if the bio doesn't have to be split, or a positive sector offset if
+ * @bio needs to be split.
*/
-struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
- unsigned *segs, struct bio_set *bs, unsigned max_bytes)
+int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
+ unsigned *segs, unsigned max_bytes)
{
struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter;
@@ -324,22 +339,17 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
}
*segs = nsegs;
- return NULL;
+ return 0;
split:
- if (bio->bi_opf & REQ_ATOMIC) {
- bio->bi_status = BLK_STS_INVAL;
- bio_endio(bio);
- return ERR_PTR(-EINVAL);
- }
+ if (bio->bi_opf & REQ_ATOMIC)
+ return -EINVAL;
+
/*
* We can't sanely support splitting for a REQ_NOWAIT bio. End it
* with EAGAIN if splitting is required and return an error pointer.
*/
- if (bio->bi_opf & REQ_NOWAIT) {
- bio->bi_status = BLK_STS_AGAIN;
- bio_endio(bio);
- return ERR_PTR(-EAGAIN);
- }
+ if (bio->bi_opf & REQ_NOWAIT)
+ return -EAGAIN;
*segs = nsegs;
@@ -356,58 +366,36 @@ split:
* big IO can be trival, disable iopoll when split needed.
*/
bio_clear_polled(bio);
- return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
+ return bytes >> SECTOR_SHIFT;
}
-EXPORT_SYMBOL_GPL(bio_split_rw);
+EXPORT_SYMBOL_GPL(bio_split_rw_at);
-/**
- * __bio_split_to_limits - split a bio to fit the queue limits
- * @bio: bio to be split
- * @lim: queue limits to split based on
- * @nr_segs: returns the number of segments in the returned bio
- *
- * Check if @bio needs splitting based on the queue limits, and if so split off
- * a bio fitting the limits from the beginning of @bio and return it. @bio is
- * shortened to the remainder and re-submitted.
+struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
+ unsigned *nr_segs)
+{
+ return bio_submit_split(bio,
+ bio_split_rw_at(bio, lim, nr_segs,
+ get_max_io_size(bio, lim) << SECTOR_SHIFT));
+}
+
+/*
+ * REQ_OP_ZONE_APPEND bios must never be split by the block layer.
*
- * The split bio is allocated from @q->bio_split, which is provided by the
- * block layer.
+ * But we want the nr_segs calculation provided by bio_split_rw_at, and having
+ * a good sanity check that the submitter built the bio correctly is nice to
+ * have as well.
*/
-struct bio *__bio_split_to_limits(struct bio *bio,
- const struct queue_limits *lim,
- unsigned int *nr_segs)
+struct bio *bio_split_zone_append(struct bio *bio,
+ const struct queue_limits *lim, unsigned *nr_segs)
{
- struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
- struct bio *split;
-
- switch (bio_op(bio)) {
- case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
- split = bio_split_discard(bio, lim, nr_segs, bs);
- break;
- case REQ_OP_WRITE_ZEROES:
- split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
- break;
- default:
- split = bio_split_rw(bio, lim, nr_segs, bs,
- get_max_io_size(bio, lim) << SECTOR_SHIFT);
- if (IS_ERR(split))
- return NULL;
- break;
- }
-
- if (split) {
- /* there isn't chance to merge the split bio */
- split->bi_opf |= REQ_NOMERGE;
-
- blkcg_bio_issue_init(split);
- bio_chain(split, bio);
- trace_block_split(split, bio->bi_iter.bi_sector);
- WARN_ON_ONCE(bio_zone_write_plugging(bio));
- submit_bio_noacct(bio);
- return split;
- }
- return bio;
+ unsigned int max_sectors = queue_limits_max_zone_append_sectors(lim);
+ int split_sectors;
+
+ split_sectors = bio_split_rw_at(bio, lim, nr_segs,
+ max_sectors << SECTOR_SHIFT);
+ if (WARN_ON_ONCE(split_sectors > 0))
+ split_sectors = -EINVAL;
+ return bio_submit_split(bio, split_sectors);
}
/**
@@ -426,9 +414,7 @@ struct bio *bio_split_to_limits(struct bio *bio)
const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
unsigned int nr_segs;
- if (bio_may_exceed_limits(bio, lim))
- return __bio_split_to_limits(bio, lim, &nr_segs);
- return bio;
+ return __bio_split_to_limits(bio, lim, &nr_segs);
}
EXPORT_SYMBOL(bio_split_to_limits);
@@ -653,6 +639,9 @@ static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
* counters.
*/
req->nr_phys_segments += nr_phys_segs;
+ if (bio_integrity(bio))
+ req->nr_integrity_segments += blk_rq_count_integrity_sg(req->q,
+ bio);
return 1;
no_merge:
@@ -745,6 +734,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
/* Merge is OK... */
req->nr_phys_segments = total_phys_segments;
+ req->nr_integrity_segments += next->nr_integrity_segments;
return 1;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e3c3c0c21b55..4b2c8e940f59 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -376,9 +376,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->io_start_time_ns = 0;
rq->stats_sectors = 0;
rq->nr_phys_segments = 0;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
rq->nr_integrity_segments = 0;
-#endif
rq->end_io = NULL;
rq->end_io_data = NULL;
@@ -1128,7 +1126,7 @@ static void blk_complete_reqs(struct llist_head *list)
rq->q->mq_ops->complete(rq);
}
-static __latent_entropy void blk_done_softirq(struct softirq_action *h)
+static __latent_entropy void blk_done_softirq(void)
{
blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
}
@@ -2546,6 +2544,9 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
rq->__sector = bio->bi_iter.bi_sector;
rq->write_hint = bio->bi_write_hint;
blk_rq_bio_prep(rq, bio, nr_segs);
+ if (bio_integrity(bio))
+ rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q,
+ bio);
/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
@@ -2753,6 +2754,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request *rq;
+ unsigned int depth;
/*
* We may have been called recursively midway through handling
@@ -2763,6 +2765,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
*/
if (plug->rq_count == 0)
return;
+ depth = plug->rq_count;
plug->rq_count = 0;
if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
@@ -2770,6 +2773,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
rq = rq_list_peek(&plug->mq_list);
q = rq->q;
+ trace_block_unplug(q, depth, true);
/*
* Peek first request and see if we have a ->queue_rqs() hook.
@@ -2939,7 +2943,7 @@ void blk_mq_submit_bio(struct bio *bio)
struct blk_plug *plug = current->plug;
const int is_sync = op_is_sync(bio->bi_opf);
struct blk_mq_hw_ctx *hctx;
- unsigned int nr_segs = 1;
+ unsigned int nr_segs;
struct request *rq;
blk_status_t ret;
@@ -2981,11 +2985,10 @@ void blk_mq_submit_bio(struct bio *bio)
goto queue_exit;
}
- if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
- bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
- if (!bio)
- goto queue_exit;
- }
+ bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+ if (!bio)
+ goto queue_exit;
+
if (!bio_integrity_prep(bio))
goto queue_exit;
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index dd7310c94713..2cfb297d9a62 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -263,7 +263,7 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq,
TASK_UNINTERRUPTIBLE);
do {
- /* The memory barrier in set_task_state saves us here. */
+ /* The memory barrier in set_current_state saves us here. */
if (data.got_token)
break;
if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
diff --git a/block/blk-settings.c b/block/blk-settings.c
index cd8a8eabc9a5..a446654ddee5 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -437,48 +437,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
}
EXPORT_SYMBOL_GPL(queue_limits_set);
-/**
- * blk_limits_io_min - set minimum request size for a device
- * @limits: the queue limits
- * @min: smallest I/O size in bytes
- *
- * Description:
- * Some devices have an internal block size bigger than the reported
- * hardware sector size. This function can be used to signal the
- * smallest I/O the device can perform without incurring a performance
- * penalty.
- */
-void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
-{
- limits->io_min = min;
-
- if (limits->io_min < limits->logical_block_size)
- limits->io_min = limits->logical_block_size;
-
- if (limits->io_min < limits->physical_block_size)
- limits->io_min = limits->physical_block_size;
-}
-EXPORT_SYMBOL(blk_limits_io_min);
-
-/**
- * blk_limits_io_opt - set optimal request size for a device
- * @limits: the queue limits
- * @opt: smallest I/O size in bytes
- *
- * Description:
- * Storage devices may report an optimal I/O size, which is the
- * device's preferred unit for sustained I/O. This is rarely reported
- * for disk drives. For RAID arrays it is usually the stripe width or
- * the internal track size. A properly aligned multiple of
- * optimal_io_size is the preferred request size for workloads where
- * sustained throughput is desired.
- */
-void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
-{
- limits->io_opt = opt;
-}
-EXPORT_SYMBOL(blk_limits_io_opt);
-
static int queue_limit_alignment_offset(const struct queue_limits *lim,
sector_t sector)
{
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 60116d13cb80..e85941bec857 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -23,6 +23,7 @@
struct queue_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct gendisk *disk, char *page);
+ int (*load_module)(struct gendisk *disk, const char *page, size_t count);
ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
};
@@ -413,6 +414,14 @@ static struct queue_sysfs_entry _prefix##_entry = { \
.store = _prefix##_store, \
};
+#define QUEUE_RW_LOAD_MODULE_ENTRY(_prefix, _name) \
+static struct queue_sysfs_entry _prefix##_entry = { \
+ .attr = { .name = _name, .mode = 0644 }, \
+ .show = _prefix##_show, \
+ .load_module = _prefix##_load_module, \
+ .store = _prefix##_store, \
+}
+
QUEUE_RW_ENTRY(queue_requests, "nr_requests");
QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
@@ -420,7 +429,7 @@ QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
-QUEUE_RW_ENTRY(elv_iosched, "scheduler");
+QUEUE_RW_LOAD_MODULE_ENTRY(elv_iosched, "scheduler");
QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
@@ -670,6 +679,17 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
if (!entry->store)
return -EIO;
+ /*
+ * If the attribute needs to load a module, do it before freezing the
+ * queue to ensure that the module file can be read when the request
+ * queue is the one for the device storing the module file.
+ */
+ if (entry->load_module) {
+ res = entry->load_module(disk, page, length);
+ if (res)
+ return res;
+ }
+
blk_mq_freeze_queue(q);
mutex_lock(&q->sysfs_lock);
res = entry->store(disk, page, length);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 6943ec720f39..2c4192e12efa 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1584,6 +1584,22 @@ void blk_throtl_cancel_bios(struct gendisk *disk)
spin_unlock_irq(&q->queue_lock);
}
+static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
+{
+ /* throtl is FIFO - if bios are already queued, should queue */
+ if (tg->service_queue.nr_queued[rw])
+ return false;
+
+ return tg_may_dispatch(tg, bio, NULL);
+}
+
+static void tg_dispatch_in_debt(struct throtl_grp *tg, struct bio *bio, bool rw)
+{
+ if (!bio_flagged(bio, BIO_BPS_THROTTLED))
+ tg->carryover_bytes[rw] -= throtl_bio_data_size(bio);
+ tg->carryover_ios[rw]--;
+}
+
bool __blk_throtl_bio(struct bio *bio)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
@@ -1600,34 +1616,35 @@ bool __blk_throtl_bio(struct bio *bio)
sq = &tg->service_queue;
while (true) {
- if (tg->last_low_overflow_time[rw] == 0)
- tg->last_low_overflow_time[rw] = jiffies;
- /* throtl is FIFO - if bios are already queued, should queue */
- if (sq->nr_queued[rw])
- break;
-
- /* if above limits, break to queue */
- if (!tg_may_dispatch(tg, bio, NULL)) {
- tg->last_low_overflow_time[rw] = jiffies;
+ if (tg_within_limit(tg, bio, rw)) {
+ /* within limits, let's charge and dispatch directly */
+ throtl_charge_bio(tg, bio);
+
+ /*
+ * We need to trim slice even when bios are not being
+ * queued otherwise it might happen that a bio is not
+ * queued for a long time and slice keeps on extending
+ * and trim is not called for a long time. Now if limits
+ * are reduced suddenly we take into account all the IO
+ * dispatched so far at new low rate and * newly queued
+ * IO gets a really long dispatch time.
+ *
+ * So keep on trimming slice even if bio is not queued.
+ */
+ throtl_trim_slice(tg, rw);
+ } else if (bio_issue_as_root_blkg(bio)) {
+ /*
+ * IOs which may cause priority inversions are
+ * dispatched directly, even if they're over limit.
+ * Debts are handled by carryover_bytes/ios while
+ * calculating wait time.
+ */
+ tg_dispatch_in_debt(tg, bio, rw);
+ } else {
+ /* if above limits, break to queue */
break;
}
- /* within limits, let's charge and dispatch directly */
- throtl_charge_bio(tg, bio);
-
- /*
- * We need to trim slice even when bios are not being queued
- * otherwise it might happen that a bio is not queued for
- * a long time and slice keeps on extending and trim is not
- * called for a long time. Now if limits are reduced suddenly
- * we take into account all the IO dispatched so far at new
- * low rate and * newly queued IO gets a really long dispatch
- * time.
- *
- * So keep on trimming slice even if bio is not queued.
- */
- throtl_trim_slice(tg, rw);
-
/*
* @bio passed through this layer without being throttled.
* Climb up the ladder. If we're already at the top, it
@@ -1650,8 +1667,6 @@ bool __blk_throtl_bio(struct bio *bio)
tg->io_disp[rw], tg_iops_limit(tg, rw),
sq->nr_queued[READ], sq->nr_queued[WRITE]);
- tg->last_low_overflow_time[rw] = jiffies;
-
td->nr_queued[rw]++;
throtl_add_bio_tg(bio, qn, tg);
throttled = true;
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index 4d9ef5abdf21..1a36d1278eea 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -106,8 +106,6 @@ struct throtl_grp {
/* Number of bio's dispatched in current slice */
unsigned int io_disp[2];
- unsigned long last_low_overflow_time[2];
-
uint64_t last_bytes_disp[2];
unsigned int last_io_disp[2];
diff --git a/block/blk.h b/block/blk.h
index e180863f918b..c718e4291db0 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -331,33 +331,67 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *,
const char *, size_t);
-static inline bool bio_may_exceed_limits(struct bio *bio,
- const struct queue_limits *lim)
+struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
+ unsigned *nsegs);
+struct bio *bio_split_write_zeroes(struct bio *bio,
+ const struct queue_limits *lim, unsigned *nsegs);
+struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
+ unsigned *nr_segs);
+struct bio *bio_split_zone_append(struct bio *bio,
+ const struct queue_limits *lim, unsigned *nr_segs);
+
+/*
+ * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
+ *
+ * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
+ * always valid if a bio has data. The check might lead to occasional false
+ * positives when bios are cloned, but compared to the performance impact of
+ * cloned bios themselves the loop below doesn't matter anyway.
+ */
+static inline bool bio_may_need_split(struct bio *bio,
+ const struct queue_limits *lim)
+{
+ return lim->chunk_sectors || bio->bi_vcnt != 1 ||
+ bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
+}
+
+/**
+ * __bio_split_to_limits - split a bio to fit the queue limits
+ * @bio: bio to be split
+ * @lim: queue limits to split based on
+ * @nr_segs: returns the number of segments in the returned bio
+ *
+ * Check if @bio needs splitting based on the queue limits, and if so split off
+ * a bio fitting the limits from the beginning of @bio and return it. @bio is
+ * shortened to the remainder and re-submitted.
+ *
+ * The split bio is allocated from @q->bio_split, which is provided by the
+ * block layer.
+ */
+static inline struct bio *__bio_split_to_limits(struct bio *bio,
+ const struct queue_limits *lim, unsigned int *nr_segs)
{
switch (bio_op(bio)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ if (bio_may_need_split(bio, lim))
+ return bio_split_rw(bio, lim, nr_segs);
+ *nr_segs = 1;
+ return bio;
+ case REQ_OP_ZONE_APPEND:
+ return bio_split_zone_append(bio, lim, nr_segs);
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
+ return bio_split_discard(bio, lim, nr_segs);
case REQ_OP_WRITE_ZEROES:
- return true; /* non-trivial splitting decisions */
+ return bio_split_write_zeroes(bio, lim, nr_segs);
default:
- break;
+ /* other operations can't be split */
+ *nr_segs = 0;
+ return bio;
}
-
- /*
- * All drivers must accept single-segments bios that are <= PAGE_SIZE.
- * This is a quick and dirty check that relies on the fact that
- * bi_io_vec[0] is always valid if a bio has data. The check might
- * lead to occasional false negatives when bios are cloned, but compared
- * to the performance impact of cloned bios themselves the loop below
- * doesn't matter anyway.
- */
- return lim->chunk_sectors || bio->bi_vcnt != 1 ||
- bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
}
-struct bio *__bio_split_to_limits(struct bio *bio,
- const struct queue_limits *lim,
- unsigned int *nr_segs);
int ll_back_merge_fn(struct request *req, struct bio *bio,
unsigned int nr_segs);
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
@@ -540,6 +574,10 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset,
unsigned int max_sectors, bool *same_page);
+int bio_add_hw_folio(struct request_queue *q, struct bio *bio,
+ struct folio *folio, size_t len, size_t offset,
+ unsigned int max_sectors, bool *same_page);
+
/*
* Clean up a page appropriately, where the page may be pinned, may have a
* ref taken on it or neither.
@@ -571,6 +609,7 @@ blk_mode_t file_to_blk_mode(struct file *file);
int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
loff_t lstart, loff_t lend);
long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
+int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
extern const struct address_space_operations def_blk_aops;
diff --git a/block/elevator.c b/block/elevator.c
index f13d552a32c8..4122026b11f1 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -698,17 +698,28 @@ static int elevator_change(struct request_queue *q, const char *elevator_name)
return 0;
e = elevator_find_get(q, elevator_name);
- if (!e) {
- request_module("%s-iosched", elevator_name);
- e = elevator_find_get(q, elevator_name);
- if (!e)
- return -EINVAL;
- }
+ if (!e)
+ return -EINVAL;
ret = elevator_switch(q, e);
elevator_put(e);
return ret;
}
+int elv_iosched_load_module(struct gendisk *disk, const char *buf,
+ size_t count)
+{
+ char elevator_name[ELV_NAME_MAX];
+
+ if (!elv_support_iosched(disk->queue))
+ return -EOPNOTSUPP;
+
+ strscpy(elevator_name, buf, sizeof(elevator_name));
+
+ request_module("%s-iosched", strstrip(elevator_name));
+
+ return 0;
+}
+
ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
size_t count)
{
diff --git a/block/elevator.h b/block/elevator.h
index 3fe18e1a8692..2a78544bf201 100644
--- a/block/elevator.h
+++ b/block/elevator.h
@@ -148,6 +148,8 @@ extern void elv_unregister(struct elevator_type *);
* io scheduler sysfs switching
*/
ssize_t elv_iosched_show(struct gendisk *disk, char *page);
+int elv_iosched_load_module(struct gendisk *disk, const char *page,
+ size_t count);
ssize_t elv_iosched_store(struct gendisk *disk, const char *page, size_t count);
extern bool elv_bio_merge_ok(struct request *, struct bio *);
diff --git a/block/fops.c b/block/fops.c
index 9825c1713a49..e696ae53bf1e 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -17,6 +17,7 @@
#include <linux/fs.h>
#include <linux/iomap.h>
#include <linux/module.h>
+#include <linux/io_uring/cmd.h>
#include "blk.h"
static inline struct inode *bdev_file_inode(struct file *file)
@@ -451,20 +452,20 @@ static void blkdev_readahead(struct readahead_control *rac)
}
static int blkdev_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{
- return block_write_begin(mapping, pos, len, pagep, blkdev_get_block);
+ return block_write_begin(mapping, pos, len, foliop, blkdev_get_block);
}
static int blkdev_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied, struct page *page,
+ loff_t pos, unsigned len, unsigned copied, struct folio *folio,
void *fsdata)
{
int ret;
- ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+ ret = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return ret;
}
@@ -665,7 +666,7 @@ blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
{
- return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops);
+ return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL);
}
/*
@@ -771,7 +772,7 @@ reexpand:
#define BLKDEV_FALLOC_FL_SUPPORTED \
(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
- FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
+ FALLOC_FL_ZERO_RANGE)
static long blkdev_fallocate(struct file *file, int mode, loff_t start,
loff_t len)
@@ -830,14 +831,6 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
len >> SECTOR_SHIFT, GFP_KERNEL,
BLKDEV_ZERO_NOFALLBACK);
break;
- case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
- error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
- if (error)
- goto fail;
-
- error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
- len >> SECTOR_SHIFT, GFP_KERNEL);
- break;
default:
error = -EOPNOTSUPP;
}
@@ -873,6 +866,7 @@ const struct file_operations def_blk_fops = {
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = blkdev_fallocate,
+ .uring_cmd = blkdev_uring_cmd,
.fop_flags = FOP_BUFFER_RASYNC,
};
diff --git a/block/ioctl.c b/block/ioctl.c
index e8e4a4190f18..6554b728bae6 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -11,6 +11,9 @@
#include <linux/blktrace_api.h>
#include <linux/pr.h>
#include <linux/uaccess.h>
+#include <linux/pagemap.h>
+#include <linux/io_uring/cmd.h>
+#include <uapi/linux/blkdev.h>
#include "blk.h"
static int blkpg_do_ioctl(struct block_device *bdev,
@@ -92,38 +95,51 @@ static int compat_blkpg_ioctl(struct block_device *bdev,
}
#endif
+/*
+ * Check that [start, start + len) is a valid range from the block device's
+ * perspective, including verifying that it can be correctly translated into
+ * logical block addresses.
+ */
+static int blk_validate_byte_range(struct block_device *bdev,
+ uint64_t start, uint64_t len)
+{
+ unsigned int bs_mask = bdev_logical_block_size(bdev) - 1;
+ uint64_t end;
+
+ if ((start | len) & bs_mask)
+ return -EINVAL;
+ if (!len)
+ return -EINVAL;
+ if (check_add_overflow(start, len, &end) || end > bdev_nr_bytes(bdev))
+ return -EINVAL;
+
+ return 0;
+}
+
static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
unsigned long arg)
{
- unsigned int bs_mask = bdev_logical_block_size(bdev) - 1;
- uint64_t range[2], start, len, end;
+ uint64_t range[2], start, len;
struct bio *prev = NULL, *bio;
sector_t sector, nr_sects;
struct blk_plug plug;
int err;
- if (!(mode & BLK_OPEN_WRITE))
- return -EBADF;
-
- if (!bdev_max_discard_sectors(bdev))
- return -EOPNOTSUPP;
- if (bdev_read_only(bdev))
- return -EPERM;
-
if (copy_from_user(range, (void __user *)arg, sizeof(range)))
return -EFAULT;
-
start = range[0];
len = range[1];
- if (!len)
- return -EINVAL;
- if ((start | len) & bs_mask)
- return -EINVAL;
+ if (!bdev_max_discard_sectors(bdev))
+ return -EOPNOTSUPP;
- if (check_add_overflow(start, len, &end) ||
- end > bdev_nr_bytes(bdev))
- return -EINVAL;
+ if (!(mode & BLK_OPEN_WRITE))
+ return -EBADF;
+ if (bdev_read_only(bdev))
+ return -EPERM;
+ err = blk_validate_byte_range(bdev, start, len);
+ if (err)
+ return err;
filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
@@ -163,7 +179,7 @@ fail:
static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
void __user *argp)
{
- uint64_t start, len;
+ uint64_t start, len, end;
uint64_t range[2];
int err;
@@ -178,11 +194,12 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
len = range[1];
if ((start & 511) || (len & 511))
return -EINVAL;
- if (start + len > bdev_nr_bytes(bdev))
+ if (check_add_overflow(start, len, &end) ||
+ end > bdev_nr_bytes(bdev))
return -EINVAL;
filemap_invalidate_lock(bdev->bd_mapping);
- err = truncate_bdev_range(bdev, mode, start, start + len - 1);
+ err = truncate_bdev_range(bdev, mode, start, end - 1);
if (!err)
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
GFP_KERNEL);
@@ -734,3 +751,112 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return ret;
}
#endif
+
+struct blk_iou_cmd {
+ int res;
+ bool nowait;
+};
+
+static void blk_cmd_complete(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
+
+ if (bic->res == -EAGAIN && bic->nowait)
+ io_uring_cmd_issue_blocking(cmd);
+ else
+ io_uring_cmd_done(cmd, bic->res, 0, issue_flags);
+}
+
+static void bio_cmd_bio_end_io(struct bio *bio)
+{
+ struct io_uring_cmd *cmd = bio->bi_private;
+ struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
+
+ if (unlikely(bio->bi_status) && !bic->res)
+ bic->res = blk_status_to_errno(bio->bi_status);
+
+ io_uring_cmd_do_in_task_lazy(cmd, blk_cmd_complete);
+ bio_put(bio);
+}
+
+static int blkdev_cmd_discard(struct io_uring_cmd *cmd,
+ struct block_device *bdev,
+ uint64_t start, uint64_t len, bool nowait)
+{
+ struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
+ gfp_t gfp = nowait ? GFP_NOWAIT : GFP_KERNEL;
+ sector_t sector = start >> SECTOR_SHIFT;
+ sector_t nr_sects = len >> SECTOR_SHIFT;
+ struct bio *prev = NULL, *bio;
+ int err;
+
+ if (!bdev_max_discard_sectors(bdev))
+ return -EOPNOTSUPP;
+ if (!(file_to_blk_mode(cmd->file) & BLK_OPEN_WRITE))
+ return -EBADF;
+ if (bdev_read_only(bdev))
+ return -EPERM;
+ err = blk_validate_byte_range(bdev, start, len);
+ if (err)
+ return err;
+
+ err = filemap_invalidate_pages(bdev->bd_mapping, start,
+ start + len - 1, nowait);
+ if (err)
+ return err;
+
+ while (true) {
+ bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects, gfp);
+ if (!bio)
+ break;
+ if (nowait) {
+ /*
+ * Don't allow multi-bio non-blocking submissions as
+ * subsequent bios may fail but we won't get a direct
+ * indication of that. Normally, the caller should
+ * retry from a blocking context.
+ */
+ if (unlikely(nr_sects)) {
+ bio_put(bio);
+ return -EAGAIN;
+ }
+ bio->bi_opf |= REQ_NOWAIT;
+ }
+
+ prev = bio_chain_and_submit(prev, bio);
+ }
+ if (unlikely(!prev))
+ return -EAGAIN;
+ if (unlikely(nr_sects))
+ bic->res = -EAGAIN;
+
+ prev->bi_private = cmd;
+ prev->bi_end_io = bio_cmd_bio_end_io;
+ submit_bio(prev);
+ return -EIOCBQUEUED;
+}
+
+int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct block_device *bdev = I_BDEV(cmd->file->f_mapping->host);
+ struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
+ const struct io_uring_sqe *sqe = cmd->sqe;
+ u32 cmd_op = cmd->cmd_op;
+ uint64_t start, len;
+
+ if (unlikely(sqe->ioprio || sqe->__pad1 || sqe->len ||
+ sqe->rw_flags || sqe->file_index))
+ return -EINVAL;
+
+ bic->res = 0;
+ bic->nowait = issue_flags & IO_URING_F_NONBLOCK;
+
+ start = READ_ONCE(sqe->addr);
+ len = READ_ONCE(sqe->addr3);
+
+ switch (cmd_op) {
+ case BLOCK_URING_CMD_DISCARD:
+ return blkdev_cmd_discard(cmd, bdev, start, len, bic->nowait);
+ }
+ return -EINVAL;
+}
diff --git a/block/partitions/core.c b/block/partitions/core.c
index ab76e64f0f6c..5bd7a603092e 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -555,9 +555,11 @@ static bool blk_add_partition(struct gendisk *disk,
part = add_partition(disk, p, from, size, state->parts[p].flags,
&state->parts[p].info);
- if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) {
- printk(KERN_ERR " %s: p%d could not be added: %pe\n",
- disk->disk_name, p, part);
+ if (IS_ERR(part)) {
+ if (PTR_ERR(part) != -ENXIO) {
+ printk(KERN_ERR " %s: p%d could not be added: %pe\n",
+ disk->disk_name, p, part);
+ }
return true;
}
diff --git a/block/t10-pi.c b/block/t10-pi.c
index 425e2836f3e1..e7052a728966 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -8,7 +8,6 @@
#include <linux/blk-integrity.h>
#include <linux/crc-t10dif.h>
#include <linux/crc64.h>
-#include <linux/module.h>
#include <net/checksum.h>
#include <asm/unaligned.h>
#include "blk.h"
@@ -240,9 +239,9 @@ static void ext_pi_crc64_generate(struct blk_integrity_iter *iter,
}
}
-static bool ext_pi_ref_escape(u8 *ref_tag)
+static bool ext_pi_ref_escape(const u8 ref_tag[6])
{
- static u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ static const u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
return memcmp(ref_tag, ref_escape, sizeof(ref_escape)) == 0;
}
@@ -472,6 +471,3 @@ void blk_integrity_complete(struct request *rq, unsigned int nr_bytes)
else
t10_pi_type1_complete(rq, nr_bytes);
}
-
-MODULE_DESCRIPTION("T10 Protection Information module");
-MODULE_LICENSE("GPL");
diff --git a/certs/Makefile b/certs/Makefile
index 1094e3860c2a..f6fa4d8d75e0 100644
--- a/certs/Makefile
+++ b/certs/Makefile
@@ -84,5 +84,5 @@ targets += x509_revocation_list
hostprogs := extract-cert
-HOSTCFLAGS_extract-cert.o = $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null)
+HOSTCFLAGS_extract-cert.o = $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) -I$(srctree)/scripts
HOSTLDLIBS_extract-cert = $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto)
diff --git a/certs/extract-cert.c b/certs/extract-cert.c
index 70e9ec89d87d..7d6d468ed612 100644
--- a/certs/extract-cert.c
+++ b/certs/extract-cert.c
@@ -21,14 +21,17 @@
#include <openssl/bio.h>
#include <openssl/pem.h>
#include <openssl/err.h>
-#include <openssl/engine.h>
-
-/*
- * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API.
- *
- * Remove this if/when that API is no longer used
- */
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#if OPENSSL_VERSION_MAJOR >= 3
+# define USE_PKCS11_PROVIDER
+# include <openssl/provider.h>
+# include <openssl/store.h>
+#else
+# if !defined(OPENSSL_NO_ENGINE) && !defined(OPENSSL_NO_DEPRECATED_3_0)
+# define USE_PKCS11_ENGINE
+# include <openssl/engine.h>
+# endif
+#endif
+#include "ssl-common.h"
#define PKEY_ID_PKCS7 2
@@ -40,41 +43,6 @@ void format(void)
exit(2);
}
-static void display_openssl_errors(int l)
-{
- const char *file;
- char buf[120];
- int e, line;
-
- if (ERR_peek_error() == 0)
- return;
- fprintf(stderr, "At main.c:%d:\n", l);
-
- while ((e = ERR_get_error_line(&file, &line))) {
- ERR_error_string(e, buf);
- fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line);
- }
-}
-
-static void drain_openssl_errors(void)
-{
- const char *file;
- int line;
-
- if (ERR_peek_error() == 0)
- return;
- while (ERR_get_error_line(&file, &line)) {}
-}
-
-#define ERR(cond, fmt, ...) \
- do { \
- bool __cond = (cond); \
- display_openssl_errors(__LINE__); \
- if (__cond) { \
- err(1, fmt, ## __VA_ARGS__); \
- } \
- } while(0)
-
static const char *key_pass;
static BIO *wb;
static char *cert_dst;
@@ -94,6 +62,66 @@ static void write_cert(X509 *x509)
fprintf(stderr, "Extracted cert: %s\n", buf);
}
+static X509 *load_cert_pkcs11(const char *cert_src)
+{
+ X509 *cert = NULL;
+#ifdef USE_PKCS11_PROVIDER
+ OSSL_STORE_CTX *store;
+
+ if (!OSSL_PROVIDER_try_load(NULL, "pkcs11", true))
+ ERR(1, "OSSL_PROVIDER_try_load(pkcs11)");
+ if (!OSSL_PROVIDER_try_load(NULL, "default", true))
+ ERR(1, "OSSL_PROVIDER_try_load(default)");
+
+ store = OSSL_STORE_open(cert_src, NULL, NULL, NULL, NULL);
+ ERR(!store, "OSSL_STORE_open");
+
+ while (!OSSL_STORE_eof(store)) {
+ OSSL_STORE_INFO *info = OSSL_STORE_load(store);
+
+ if (!info) {
+ drain_openssl_errors(__LINE__, 0);
+ continue;
+ }
+ if (OSSL_STORE_INFO_get_type(info) == OSSL_STORE_INFO_CERT) {
+ cert = OSSL_STORE_INFO_get1_CERT(info);
+ ERR(!cert, "OSSL_STORE_INFO_get1_CERT");
+ }
+ OSSL_STORE_INFO_free(info);
+ if (cert)
+ break;
+ }
+ OSSL_STORE_close(store);
+#elif defined(USE_PKCS11_ENGINE)
+ ENGINE *e;
+ struct {
+ const char *cert_id;
+ X509 *cert;
+ } parms;
+
+ parms.cert_id = cert_src;
+ parms.cert = NULL;
+
+ ENGINE_load_builtin_engines();
+ drain_openssl_errors(__LINE__, 1);
+ e = ENGINE_by_id("pkcs11");
+ ERR(!e, "Load PKCS#11 ENGINE");
+ if (ENGINE_init(e))
+ drain_openssl_errors(__LINE__, 1);
+ else
+ ERR(1, "ENGINE_init");
+ if (key_pass)
+ ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN");
+ ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1);
+ ERR(!parms.cert, "Get X.509 from PKCS#11");
+ cert = parms.cert;
+#else
+ fprintf(stderr, "no pkcs11 engine/provider available\n");
+ exit(1);
+#endif
+ return cert;
+}
+
int main(int argc, char **argv)
{
char *cert_src;
@@ -122,28 +150,10 @@ int main(int argc, char **argv)
fclose(f);
exit(0);
} else if (!strncmp(cert_src, "pkcs11:", 7)) {
- ENGINE *e;
- struct {
- const char *cert_id;
- X509 *cert;
- } parms;
-
- parms.cert_id = cert_src;
- parms.cert = NULL;
+ X509 *cert = load_cert_pkcs11(cert_src);
- ENGINE_load_builtin_engines();
- drain_openssl_errors();
- e = ENGINE_by_id("pkcs11");
- ERR(!e, "Load PKCS#11 ENGINE");
- if (ENGINE_init(e))
- drain_openssl_errors();
- else
- ERR(1, "ENGINE_init");
- if (key_pass)
- ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN");
- ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1);
- ERR(!parms.cert, "Get X.509 from PKCS#11");
- write_cert(parms.cert);
+ ERR(!cert, "load_cert_pkcs11 failed");
+ write_cert(cert);
} else {
BIO *b;
X509 *x509;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 72e2decb8c6a..a779cab668c2 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1305,7 +1305,7 @@ config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE
config CRYPTO_JITTERENTROPY_OSR
int "CPU Jitter RNG Oversampling Rate"
range 1 15
- default 1
+ default 3
help
The Jitter RNG allows the specification of an oversampling rate (OSR).
The Jitter RNG operation requires a fixed amount of timing
diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c
index c4f1bfa1d04f..4fdb53435827 100644
--- a/crypto/aegis128-core.c
+++ b/crypto/aegis128-core.c
@@ -323,8 +323,9 @@ static __always_inline
int crypto_aegis128_process_crypt(struct aegis_state *state,
struct skcipher_walk *walk,
void (*crypt)(struct aegis_state *state,
- u8 *dst, const u8 *src,
- unsigned int size))
+ u8 *dst,
+ const u8 *src,
+ unsigned int size))
{
int err = 0;
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 122cd910c4e1..74e2261c184c 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -235,7 +235,6 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
EXPORT_SYMBOL_GPL(crypto_remove_spawns);
static void crypto_alg_finish_registration(struct crypto_alg *alg,
- bool fulfill_requests,
struct list_head *algs_to_put)
{
struct crypto_alg *q;
@@ -247,30 +246,8 @@ static void crypto_alg_finish_registration(struct crypto_alg *alg,
if (crypto_is_moribund(q))
continue;
- if (crypto_is_larval(q)) {
- struct crypto_larval *larval = (void *)q;
-
- /*
- * Check to see if either our generic name or
- * specific name can satisfy the name requested
- * by the larval entry q.
- */
- if (strcmp(alg->cra_name, q->cra_name) &&
- strcmp(alg->cra_driver_name, q->cra_name))
- continue;
-
- if (larval->adult)
- continue;
- if ((q->cra_flags ^ alg->cra_flags) & larval->mask)
- continue;
-
- if (fulfill_requests && crypto_mod_get(alg))
- larval->adult = alg;
- else
- larval->adult = ERR_PTR(-EAGAIN);
-
+ if (crypto_is_larval(q))
continue;
- }
if (strcmp(alg->cra_name, q->cra_name))
continue;
@@ -359,7 +336,7 @@ __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put)
list_add(&larval->alg.cra_list, &crypto_alg_list);
} else {
alg->cra_flags |= CRYPTO_ALG_TESTED;
- crypto_alg_finish_registration(alg, true, algs_to_put);
+ crypto_alg_finish_registration(alg, algs_to_put);
}
out:
@@ -376,7 +353,6 @@ void crypto_alg_tested(const char *name, int err)
struct crypto_alg *alg;
struct crypto_alg *q;
LIST_HEAD(list);
- bool best;
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
@@ -390,7 +366,8 @@ void crypto_alg_tested(const char *name, int err)
}
pr_err("alg: Unexpected test result for %s: %d\n", name, err);
- goto unlock;
+ up_write(&crypto_alg_sem);
+ return;
found:
q->cra_flags |= CRYPTO_ALG_DEAD;
@@ -408,32 +385,15 @@ found:
alg->cra_flags |= CRYPTO_ALG_TESTED;
- /*
- * If a higher-priority implementation of the same algorithm is
- * currently being tested, then don't fulfill request larvals.
- */
- best = true;
- list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (crypto_is_moribund(q) || !crypto_is_larval(q))
- continue;
-
- if (strcmp(alg->cra_name, q->cra_name))
- continue;
-
- if (q->cra_priority > alg->cra_priority) {
- best = false;
- break;
- }
- }
-
- crypto_alg_finish_registration(alg, best, &list);
+ crypto_alg_finish_registration(alg, &list);
complete:
+ list_del_init(&test->alg.cra_list);
complete_all(&test->completion);
-unlock:
up_write(&crypto_alg_sem);
+ crypto_alg_put(&test->alg);
crypto_remove_final(&list);
}
EXPORT_SYMBOL_GPL(crypto_alg_tested);
@@ -454,7 +414,6 @@ int crypto_register_alg(struct crypto_alg *alg)
{
struct crypto_larval *larval;
LIST_HEAD(algs_to_put);
- bool test_started = false;
int err;
alg->cra_flags &= ~CRYPTO_ALG_DEAD;
@@ -465,15 +424,16 @@ int crypto_register_alg(struct crypto_alg *alg)
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(alg, &algs_to_put);
if (!IS_ERR_OR_NULL(larval)) {
- test_started = crypto_boot_test_finished();
+ bool test_started = crypto_boot_test_finished();
+
larval->test_started = test_started;
+ if (test_started)
+ crypto_schedule_test(larval);
}
up_write(&crypto_alg_sem);
if (IS_ERR(larval))
return PTR_ERR(larval);
- if (test_started)
- crypto_wait_for_test(larval);
crypto_remove_final(&algs_to_put);
return 0;
}
@@ -688,8 +648,10 @@ int crypto_register_instance(struct crypto_template *tmpl,
larval = __crypto_register_alg(&inst->alg, &algs_to_put);
if (IS_ERR(larval))
goto unlock;
- else if (larval)
+ else if (larval) {
larval->test_started = true;
+ crypto_schedule_test(larval);
+ }
hlist_add_head(&inst->list, &tmpl->instances);
inst->tmpl = tmpl;
@@ -699,8 +661,6 @@ unlock:
if (IS_ERR(larval))
return PTR_ERR(larval);
- if (larval)
- crypto_wait_for_test(larval);
crypto_remove_final(&algs_to_put);
return 0;
}
@@ -1084,6 +1044,7 @@ static void __init crypto_start_tests(void)
l->test_started = true;
larval = l;
+ crypto_schedule_test(larval);
break;
}
@@ -1091,8 +1052,6 @@ static void __init crypto_start_tests(void)
if (!larval)
break;
-
- crypto_wait_for_test(larval);
}
set_crypto_boot_test_finished();
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 1aa5f306998a..a20926bfd34e 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -51,7 +51,7 @@ static int cryptomgr_probe(void *data)
{
struct cryptomgr_param *param = data;
struct crypto_template *tmpl;
- int err;
+ int err = -ENOENT;
tmpl = crypto_lookup_template(param->template);
if (!tmpl)
@@ -64,6 +64,8 @@ static int cryptomgr_probe(void *data)
crypto_tmpl_put(tmpl);
out:
+ param->larval->adult = ERR_PTR(err);
+ param->larval->alg.cra_flags |= CRYPTO_ALG_DEAD;
complete_all(&param->larval->completion);
crypto_alg_put(&param->larval->alg);
kfree(param);
diff --git a/crypto/api.c b/crypto/api.c
index 22556907b3bc..bfd177a4313a 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -37,6 +37,8 @@ DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
#endif
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
+static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
+ u32 mask);
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
{
@@ -68,11 +70,6 @@ static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type,
if ((q->cra_flags ^ type) & mask)
continue;
- if (crypto_is_larval(q) &&
- !crypto_is_test_larval((struct crypto_larval *)q) &&
- ((struct crypto_larval *)q)->mask != mask)
- continue;
-
exact = !strcmp(q->cra_driver_name, name);
fuzzy = !strcmp(q->cra_name, name);
if (!exact && !(fuzzy && q->cra_priority > best))
@@ -111,6 +108,8 @@ struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask)
if (!larval)
return ERR_PTR(-ENOMEM);
+ type &= ~CRYPTO_ALG_TYPE_MASK | (mask ?: CRYPTO_ALG_TYPE_MASK);
+
larval->mask = mask;
larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type;
larval->alg.cra_priority = -1;
@@ -152,32 +151,31 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
return alg;
}
-void crypto_larval_kill(struct crypto_alg *alg)
+static void crypto_larval_kill(struct crypto_larval *larval)
{
- struct crypto_larval *larval = (void *)alg;
+ bool unlinked;
down_write(&crypto_alg_sem);
- list_del(&alg->cra_list);
+ unlinked = list_empty(&larval->alg.cra_list);
+ if (!unlinked)
+ list_del_init(&larval->alg.cra_list);
up_write(&crypto_alg_sem);
+
+ if (unlinked)
+ return;
+
complete_all(&larval->completion);
- crypto_alg_put(alg);
+ crypto_alg_put(&larval->alg);
}
-EXPORT_SYMBOL_GPL(crypto_larval_kill);
-void crypto_wait_for_test(struct crypto_larval *larval)
+void crypto_schedule_test(struct crypto_larval *larval)
{
int err;
err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult);
- if (WARN_ON_ONCE(err != NOTIFY_STOP))
- goto out;
-
- err = wait_for_completion_killable(&larval->completion);
- WARN_ON(err);
-out:
- crypto_larval_kill(&larval->alg);
+ WARN_ON_ONCE(err != NOTIFY_STOP);
}
-EXPORT_SYMBOL_GPL(crypto_wait_for_test);
+EXPORT_SYMBOL_GPL(crypto_schedule_test);
static void crypto_start_test(struct crypto_larval *larval)
{
@@ -196,14 +194,17 @@ static void crypto_start_test(struct crypto_larval *larval)
larval->test_started = true;
up_write(&crypto_alg_sem);
- crypto_wait_for_test(larval);
+ crypto_schedule_test(larval);
}
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
{
- struct crypto_larval *larval = (void *)alg;
+ struct crypto_larval *larval;
long time_left;
+again:
+ larval = container_of(alg, struct crypto_larval, alg);
+
if (!crypto_boot_test_finished())
crypto_start_test(larval);
@@ -213,11 +214,20 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
alg = larval->adult;
if (time_left < 0)
alg = ERR_PTR(-EINTR);
- else if (!time_left)
+ else if (!time_left) {
+ if (crypto_is_test_larval(larval))
+ crypto_larval_kill(larval);
alg = ERR_PTR(-ETIMEDOUT);
- else if (!alg)
- alg = ERR_PTR(-ENOENT);
- else if (IS_ERR(alg))
+ } else if (!alg) {
+ u32 type;
+ u32 mask;
+
+ alg = &larval->alg;
+ type = alg->cra_flags & ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
+ mask = larval->mask;
+ alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
+ ERR_PTR(-EAGAIN);
+ } else if (IS_ERR(alg))
;
else if (crypto_is_test_larval(larval) &&
!(alg->cra_flags & CRYPTO_ALG_TESTED))
@@ -228,6 +238,9 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
alg = ERR_PTR(-EAGAIN);
crypto_mod_put(&larval->alg);
+ if (!IS_ERR(alg) && crypto_is_larval(alg))
+ goto again;
+
return alg;
}
@@ -292,8 +305,12 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
alg = crypto_larval_wait(alg);
- else if (!alg)
+ else if (alg)
+ ;
+ else if (!(mask & CRYPTO_ALG_TESTED))
alg = crypto_larval_add(name, type, mask);
+ else
+ alg = ERR_PTR(-ENOENT);
return alg;
}
@@ -340,7 +357,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
crypto_mod_put(larval);
alg = ERR_PTR(-ENOENT);
}
- crypto_larval_kill(larval);
+ crypto_larval_kill(container_of(larval, struct crypto_larval, alg));
return alg;
}
EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index a5da8ccd353e..43af5fa510c0 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -60,17 +60,18 @@ struct key *find_asymmetric_key(struct key *keyring,
char *req, *p;
int len;
- WARN_ON(!id_0 && !id_1 && !id_2);
-
if (id_0) {
lookup = id_0->data;
len = id_0->len;
} else if (id_1) {
lookup = id_1->data;
len = id_1->len;
- } else {
+ } else if (id_2) {
lookup = id_2->data;
len = id_2->len;
+ } else {
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
}
/* Construct an identifier "id:<keyid>". */
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 9e4651330852..d740849f1c19 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -27,7 +27,7 @@ struct chachapoly_ctx {
struct crypto_ahash *poly;
/* key bytes we use for the ChaCha20 IV */
unsigned int saltlen;
- u8 salt[];
+ u8 salt[] __counted_by(saltlen);
};
struct poly_req {
diff --git a/crypto/dh.c b/crypto/dh.c
index 68d11d66c0b5..afc0fd847761 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -145,9 +145,9 @@ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y)
* ->p is odd, so no need to explicitly subtract one
* from it before shifting to the right.
*/
- mpi_rshift(q, ctx->p, 1);
+ ret = mpi_rshift(q, ctx->p, 1) ?:
+ mpi_powm(val, y, q, ctx->p);
- ret = mpi_powm(val, y, q, ctx->p);
mpi_free(q);
if (ret) {
mpi_free(val);
diff --git a/crypto/internal.h b/crypto/internal.h
index aee31319be2e..711a6a5bfa2b 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -113,8 +113,7 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
-void crypto_larval_kill(struct crypto_alg *alg);
-void crypto_wait_for_test(struct crypto_larval *larval);
+void crypto_schedule_test(struct crypto_larval *larval);
void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index d7056de8c0d7..3b390bd6c119 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -146,6 +146,7 @@ struct rand_data {
#define JENT_ENTROPY_SAFETY_FACTOR 64
#include <linux/fips.h>
+#include <linux/minmax.h>
#include "jitterentropy.h"
/***************************************************************************
@@ -638,10 +639,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
return -2;
}
- if ((DATA_SIZE_BITS / 8) < len)
- tocopy = (DATA_SIZE_BITS / 8);
- else
- tocopy = len;
+ tocopy = min(DATA_SIZE_BITS / 8, len);
if (jent_read_random_block(ec->hash_state, p, tocopy))
return -1;
diff --git a/crypto/rsa.c b/crypto/rsa.c
index d9be9e86097e..78b28d14ced3 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -98,14 +98,13 @@ static int _rsa_dec_crt(const struct rsa_mpi_key *key, MPI m_or_m1_or_h, MPI c)
goto err_free_mpi;
/* (2iii) h = (m_1 - m_2) * qInv mod p */
- mpi_sub(m12_or_qh, m_or_m1_or_h, m2);
- mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p);
+ ret = mpi_sub(m12_or_qh, m_or_m1_or_h, m2) ?:
+ mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p);
/* (2iv) m = m_2 + q * h */
- mpi_mul(m12_or_qh, key->q, m_or_m1_or_h);
- mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n);
-
- ret = 0;
+ ret = ret ?:
+ mpi_mul(m12_or_qh, key->q, m_or_m1_or_h) ?:
+ mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n);
err_free_mpi:
mpi_free(m12_or_qh);
@@ -236,6 +235,7 @@ static int rsa_check_key_length(unsigned int len)
static int rsa_check_exponent_fips(MPI e)
{
MPI e_max = NULL;
+ int err;
/* check if odd */
if (!mpi_test_bit(e, 0)) {
@@ -250,7 +250,12 @@ static int rsa_check_exponent_fips(MPI e)
e_max = mpi_alloc(0);
if (!e_max)
return -ENOMEM;
- mpi_set_bit(e_max, 256);
+
+ err = mpi_set_bit(e_max, 256);
+ if (err) {
+ mpi_free(e_max);
+ return err;
+ }
if (mpi_cmp(e, e_max) >= 0) {
mpi_free(e_max);
diff --git a/crypto/simd.c b/crypto/simd.c
index 2aa4f72e224f..b07721d1f3f6 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -136,27 +136,19 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm)
return 0;
}
-struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
+ const char *algname,
const char *drvname,
const char *basename)
{
struct simd_skcipher_alg *salg;
- struct crypto_skcipher *tfm;
- struct skcipher_alg *ialg;
struct skcipher_alg *alg;
int err;
- tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
-
- ialg = crypto_skcipher_alg(tfm);
-
salg = kzalloc(sizeof(*salg), GFP_KERNEL);
if (!salg) {
salg = ERR_PTR(-ENOMEM);
- goto out_put_tfm;
+ goto out;
}
salg->ialg_name = basename;
@@ -195,30 +187,16 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
if (err)
goto out_free_salg;
-out_put_tfm:
- crypto_free_skcipher(tfm);
+out:
return salg;
out_free_salg:
kfree(salg);
salg = ERR_PTR(err);
- goto out_put_tfm;
+ goto out;
}
EXPORT_SYMBOL_GPL(simd_skcipher_create_compat);
-struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
- const char *basename)
-{
- char drvname[CRYPTO_MAX_ALG_NAME];
-
- if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
- CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-ENAMETOOLONG);
-
- return simd_skcipher_create_compat(algname, drvname, basename);
-}
-EXPORT_SYMBOL_GPL(simd_skcipher_create);
-
void simd_skcipher_free(struct simd_skcipher_alg *salg)
{
crypto_unregister_skcipher(&salg->alg);
@@ -246,7 +224,7 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
algname = algs[i].base.cra_name + 2;
drvname = algs[i].base.cra_driver_name + 2;
basename = algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(algname, drvname, basename);
+ simd = simd_skcipher_create_compat(algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto err_unregister;
@@ -383,27 +361,19 @@ static int simd_aead_init(struct crypto_aead *tfm)
return 0;
}
-struct simd_aead_alg *simd_aead_create_compat(const char *algname,
- const char *drvname,
- const char *basename)
+static struct simd_aead_alg *simd_aead_create_compat(struct aead_alg *ialg,
+ const char *algname,
+ const char *drvname,
+ const char *basename)
{
struct simd_aead_alg *salg;
- struct crypto_aead *tfm;
- struct aead_alg *ialg;
struct aead_alg *alg;
int err;
- tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
-
- ialg = crypto_aead_alg(tfm);
-
salg = kzalloc(sizeof(*salg), GFP_KERNEL);
if (!salg) {
salg = ERR_PTR(-ENOMEM);
- goto out_put_tfm;
+ goto out;
}
salg->ialg_name = basename;
@@ -442,36 +412,20 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname,
if (err)
goto out_free_salg;
-out_put_tfm:
- crypto_free_aead(tfm);
+out:
return salg;
out_free_salg:
kfree(salg);
salg = ERR_PTR(err);
- goto out_put_tfm;
-}
-EXPORT_SYMBOL_GPL(simd_aead_create_compat);
-
-struct simd_aead_alg *simd_aead_create(const char *algname,
- const char *basename)
-{
- char drvname[CRYPTO_MAX_ALG_NAME];
-
- if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
- CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-ENAMETOOLONG);
-
- return simd_aead_create_compat(algname, drvname, basename);
+ goto out;
}
-EXPORT_SYMBOL_GPL(simd_aead_create);
-void simd_aead_free(struct simd_aead_alg *salg)
+static void simd_aead_free(struct simd_aead_alg *salg)
{
crypto_unregister_aead(&salg->alg);
kfree(salg);
}
-EXPORT_SYMBOL_GPL(simd_aead_free);
int simd_register_aeads_compat(struct aead_alg *algs, int count,
struct simd_aead_alg **simd_algs)
@@ -493,7 +447,7 @@ int simd_register_aeads_compat(struct aead_alg *algs, int count,
algname = algs[i].base.cra_name + 2;
drvname = algs[i].base.cra_driver_name + 2;
basename = algs[i].base.cra_driver_name;
- simd = simd_aead_create_compat(algname, drvname, basename);
+ simd = simd_aead_create_compat(algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto err_unregister;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index f02cb075bd68..ee8da628e9da 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1939,6 +1939,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
atfm = crypto_alloc_ahash(driver, type, mask);
if (IS_ERR(atfm)) {
+ if (PTR_ERR(atfm) == -ENOENT)
+ return -ENOENT;
pr_err("alg: hash: failed to allocate transform for %s: %ld\n",
driver, PTR_ERR(atfm));
return PTR_ERR(atfm);
@@ -2703,6 +2705,8 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
tfm = crypto_alloc_aead(driver, type, mask);
if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT)
+ return -ENOENT;
pr_err("alg: aead: failed to allocate transform for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
@@ -3280,6 +3284,8 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
tfm = crypto_alloc_skcipher(driver, type, mask);
if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT)
+ return -ENOENT;
pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
@@ -3693,6 +3699,8 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
tfm = crypto_alloc_cipher(driver, type, mask);
if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT)
+ return -ENOENT;
printk(KERN_ERR "alg: cipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
@@ -3717,6 +3725,8 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
acomp = crypto_alloc_acomp(driver, type, mask);
if (IS_ERR(acomp)) {
+ if (PTR_ERR(acomp) == -ENOENT)
+ return -ENOENT;
pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
driver, PTR_ERR(acomp));
return PTR_ERR(acomp);
@@ -3729,6 +3739,8 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
} else {
comp = crypto_alloc_comp(driver, type, mask);
if (IS_ERR(comp)) {
+ if (PTR_ERR(comp) == -ENOENT)
+ return -ENOENT;
pr_err("alg: comp: Failed to load transform for %s: %ld\n",
driver, PTR_ERR(comp));
return PTR_ERR(comp);
@@ -3805,6 +3817,8 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
rng = crypto_alloc_rng(driver, type, mask);
if (IS_ERR(rng)) {
+ if (PTR_ERR(rng) == -ENOENT)
+ return -ENOENT;
printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(rng));
return PTR_ERR(rng);
@@ -3832,10 +3846,13 @@ static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
drng = crypto_alloc_rng(driver, type, mask);
if (IS_ERR(drng)) {
+ if (PTR_ERR(drng) == -ENOENT)
+ goto out_no_rng;
printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
"%s\n", driver);
+out_no_rng:
kfree_sensitive(buf);
- return -ENOMEM;
+ return PTR_ERR(drng);
}
test_data.testentropy = &testentropy;
@@ -4077,6 +4094,8 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
tfm = crypto_alloc_kpp(driver, type, mask);
if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT)
+ return -ENOENT;
pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
@@ -4305,6 +4324,8 @@ static int alg_test_akcipher(const struct alg_test_desc *desc,
tfm = crypto_alloc_akcipher(driver, type, mask);
if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT)
+ return -ENOENT;
pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
diff --git a/crypto/xor.c b/crypto/xor.c
index a1363162978c..f39621a57bb3 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -83,33 +83,30 @@ static void __init
do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
{
int speed;
- int i, j;
- ktime_t min, start, diff;
+ unsigned long reps;
+ ktime_t min, start, t0;
tmpl->next = template_list;
template_list = tmpl;
preempt_disable();
- min = (ktime_t)S64_MAX;
- for (i = 0; i < 3; i++) {
- start = ktime_get();
- for (j = 0; j < REPS; j++) {
- mb(); /* prevent loop optimization */
- tmpl->do_2(BENCH_SIZE, b1, b2);
- mb();
- }
- diff = ktime_sub(ktime_get(), start);
- if (diff < min)
- min = diff;
- }
+ reps = 0;
+ t0 = ktime_get();
+ /* delay start until time has advanced */
+ while ((start = ktime_get()) == t0)
+ cpu_relax();
+ do {
+ mb(); /* prevent loop optimization */
+ tmpl->do_2(BENCH_SIZE, b1, b2);
+ mb();
+ } while (reps++ < REPS || (t0 = ktime_get()) == start);
+ min = ktime_sub(t0, start);
preempt_enable();
// bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
- if (!min)
- min = 1;
- speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
+ speed = (1000 * reps * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
tmpl->speed = speed;
pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed);
diff --git a/drivers/Makefile b/drivers/Makefile
index fe9ceb0d2288..45d1c3e630f7 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -17,6 +17,9 @@ obj-$(CONFIG_PINCTRL) += pinctrl/
obj-$(CONFIG_GPIOLIB) += gpio/
obj-y += pwm/
+# LEDs must come before PCI, it is needed by NPEM driver
+obj-y += leds/
+
obj-y += pci/
obj-$(CONFIG_PARISC) += parisc/
@@ -130,7 +133,6 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
obj-y += ufs/
obj-$(CONFIG_MEMSTICK) += memstick/
-obj-y += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
index 16c3edb8c46e..aa826033b0ce 100644
--- a/drivers/accel/drm_accel.c
+++ b/drivers/accel/drm_accel.c
@@ -8,7 +8,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <drm/drm_accel.h>
#include <drm/drm_auth.h>
@@ -18,8 +18,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_print.h>
-static DEFINE_SPINLOCK(accel_minor_lock);
-static struct idr accel_minors_idr;
+DEFINE_XARRAY_ALLOC(accel_minors_xa);
static struct dentry *accel_debugfs_root;
@@ -118,99 +117,6 @@ void accel_set_device_instance_params(struct device *kdev, int index)
}
/**
- * accel_minor_alloc() - Allocates a new accel minor
- *
- * This function access the accel minors idr and allocates from it
- * a new id to represent a new accel minor
- *
- * Return: A new id on success or error code in case idr_alloc failed
- */
-int accel_minor_alloc(void)
-{
- unsigned long flags;
- int r;
-
- spin_lock_irqsave(&accel_minor_lock, flags);
- r = idr_alloc(&accel_minors_idr, NULL, 0, ACCEL_MAX_MINORS, GFP_NOWAIT);
- spin_unlock_irqrestore(&accel_minor_lock, flags);
-
- return r;
-}
-
-/**
- * accel_minor_remove() - Remove an accel minor
- * @index: The minor id to remove.
- *
- * This function access the accel minors idr and removes from
- * it the member with the id that is passed to this function.
- */
-void accel_minor_remove(int index)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&accel_minor_lock, flags);
- idr_remove(&accel_minors_idr, index);
- spin_unlock_irqrestore(&accel_minor_lock, flags);
-}
-
-/**
- * accel_minor_replace() - Replace minor pointer in accel minors idr.
- * @minor: Pointer to the new minor.
- * @index: The minor id to replace.
- *
- * This function access the accel minors idr structure and replaces the pointer
- * that is associated with an existing id. Because the minor pointer can be
- * NULL, we need to explicitly pass the index.
- *
- * Return: 0 for success, negative value for error
- */
-void accel_minor_replace(struct drm_minor *minor, int index)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&accel_minor_lock, flags);
- idr_replace(&accel_minors_idr, minor, index);
- spin_unlock_irqrestore(&accel_minor_lock, flags);
-}
-
-/*
- * Looks up the given minor-ID and returns the respective DRM-minor object. The
- * refence-count of the underlying device is increased so you must release this
- * object with accel_minor_release().
- *
- * The object can be only a drm_minor that represents an accel device.
- *
- * As long as you hold this minor, it is guaranteed that the object and the
- * minor->dev pointer will stay valid! However, the device may get unplugged and
- * unregistered while you hold the minor.
- */
-static struct drm_minor *accel_minor_acquire(unsigned int minor_id)
-{
- struct drm_minor *minor;
- unsigned long flags;
-
- spin_lock_irqsave(&accel_minor_lock, flags);
- minor = idr_find(&accel_minors_idr, minor_id);
- if (minor)
- drm_dev_get(minor->dev);
- spin_unlock_irqrestore(&accel_minor_lock, flags);
-
- if (!minor) {
- return ERR_PTR(-ENODEV);
- } else if (drm_dev_is_unplugged(minor->dev)) {
- drm_dev_put(minor->dev);
- return ERR_PTR(-ENODEV);
- }
-
- return minor;
-}
-
-static void accel_minor_release(struct drm_minor *minor)
-{
- drm_dev_put(minor->dev);
-}
-
-/**
* accel_open - open method for ACCEL file
* @inode: device inode
* @filp: file pointer.
@@ -227,7 +133,7 @@ int accel_open(struct inode *inode, struct file *filp)
struct drm_minor *minor;
int retcode;
- minor = accel_minor_acquire(iminor(inode));
+ minor = drm_minor_acquire(&accel_minors_xa, iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
@@ -246,7 +152,7 @@ int accel_open(struct inode *inode, struct file *filp)
err_undo:
atomic_dec(&dev->open_count);
- accel_minor_release(minor);
+ drm_minor_release(minor);
return retcode;
}
EXPORT_SYMBOL_GPL(accel_open);
@@ -257,7 +163,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp)
struct drm_minor *minor;
int err;
- minor = accel_minor_acquire(iminor(inode));
+ minor = drm_minor_acquire(&accel_minors_xa, iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
@@ -274,7 +180,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp)
err = 0;
out:
- accel_minor_release(minor);
+ drm_minor_release(minor);
return err;
}
@@ -290,15 +196,13 @@ void accel_core_exit(void)
unregister_chrdev(ACCEL_MAJOR, "accel");
debugfs_remove(accel_debugfs_root);
accel_sysfs_destroy();
- idr_destroy(&accel_minors_idr);
+ WARN_ON(!xa_empty(&accel_minors_xa));
}
int __init accel_core_init(void)
{
int ret;
- idr_init(&accel_minors_idr);
-
ret = accel_sysfs_init();
if (ret < 0) {
DRM_ERROR("Cannot create ACCEL class: %d\n", ret);
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index de3d66116375..ede6165e09d9 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -60,6 +60,10 @@ static struct {
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
};
+/* Production fw_names from the table above */
+MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin");
+
static int ivpu_fw_request(struct ivpu_device *vdev)
{
int ret = -ENOENT;
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 580b29ed1902..bf10156c334e 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -447,9 +447,7 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
return ret;
- ret = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
- if (ret)
- return ret;
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
if (IS_ERR(qdev->bar_0))
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index eaa70b23dd0b..7c5b040a83e8 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -213,8 +213,8 @@ static int acpi_ac_probe(struct platform_device *pdev)
return -ENOMEM;
ac->device = adev;
- strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
- strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
+ strscpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
+ strscpy(acpi_device_class(adev), ACPI_AC_CLASS);
platform_set_drvdata(pdev, ac);
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 80f945cbec8a..800f97868448 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -118,6 +118,11 @@ static const struct apd_device_desc wt_i2c_desc = {
.fixed_clk_rate = 150000000,
};
+static const struct apd_device_desc wt_i3c_desc = {
+ .setup = acpi_apd_setup,
+ .fixed_clk_rate = 125000000,
+};
+
static struct property_entry uart_properties[] = {
PROPERTY_ENTRY_U32("reg-io-width", 4),
PROPERTY_ENTRY_U32("reg-shift", 2),
@@ -231,6 +236,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "AMD0030", },
{ "AMD0040", APD_ADDR(fch_misc_desc)},
{ "AMDI0010", APD_ADDR(wt_i2c_desc) },
+ { "AMDI0015", APD_ADDR(wt_i3c_desc) },
{ "AMDI0019", APD_ADDR(wt_i2c_desc) },
{ "AMDI0020", APD_ADDR(cz_uart_desc) },
{ "AMDI0022", APD_ADDR(cz_uart_desc) },
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 350d3a892889..42b7220d4cfd 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -136,8 +136,10 @@ static void exit_round_robin(unsigned int tsk_index)
{
struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
- cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
- tsk_in_cpu[tsk_index] = -1;
+ if (tsk_in_cpu[tsk_index] != -1) {
+ cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
+ tsk_in_cpu[tsk_index] = -1;
+ }
}
static unsigned int idle_pct = 5; /* percentage */
@@ -428,8 +430,8 @@ static int acpi_pad_probe(struct platform_device *pdev)
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
acpi_status status;
- strcpy(acpi_device_name(adev), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
- strcpy(acpi_device_class(adev), ACPI_PROCESSOR_AGGREGATOR_CLASS);
+ strscpy(acpi_device_name(adev), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
+ strscpy(acpi_device_class(adev), ACPI_PROCESSOR_AGGREGATOR_CLASS);
status = acpi_install_notify_handler(adev->handle,
ACPI_DEVICE_NOTIFY, acpi_pad_notify, adev);
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 9916cc7ced39..7cf6101cb4c7 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -436,8 +436,8 @@ static int acpi_processor_add(struct acpi_device *device,
}
pr->handle = device->handle;
- strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
+ strscpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
device->driver_data = pr;
result = acpi_processor_get_info(device);
@@ -985,7 +985,7 @@ int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
memcpy(&info->states[++last_index], &cx, sizeof(cx));
}
- acpi_handle_info(handle, "Found %d idle states\n", last_index);
+ acpi_handle_debug(handle, "Found %d idle states\n", last_index);
info->count = last_index;
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index f4c90fc99be2..309ce8efb4f6 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -29,11 +29,7 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX);
ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX);
ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX);
ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX);
-
-#if (!ACPI_REDUCED_HARDWARE)
-ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
-
-#endif /* !ACPI_REDUCED_HARDWARE */
+ACPI_INIT_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS, NULL);
/* These addresses are calculated from the FADT Event Block addresses */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 02012168a087..6f4fe47c955b 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -1090,6 +1090,8 @@ struct acpi_port_info {
#define ACPI_ADDRESS_TYPE_IO_RANGE 1
#define ACPI_ADDRESS_TYPE_BUS_NUMBER_RANGE 2
+#define ACPI_ADDRESS_TYPE_PCC_NUMBER 0xA
+
/* Resource descriptor types and masks */
#define ACPI_RESOURCE_NAME_LARGE 0x80
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 2e442f5a3123..ef068f4c864a 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -450,7 +450,7 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_DSM",
METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
- ACPI_TYPE_PACKAGE),
+ ACPI_TYPE_ANY) | ARG_COUNT_IS_MINIMUM,
METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */
{{"_DSS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 2b84ac093698..8dbab6932049 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -174,6 +174,8 @@ acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object)
elements =
ACPI_ALLOCATE_ZEROED(DB_DEFAULT_PKG_ELEMENTS *
sizeof(union acpi_object));
+ if (!elements)
+ return (AE_NO_MEMORY);
this = string;
for (i = 0; i < (DB_DEFAULT_PKG_ELEMENTS - 1); i++) {
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 3729bf3b74f7..bb1be42daee1 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -17,7 +17,8 @@ ACPI_MODULE_NAME("exconvrt")
/* Local prototypes */
static u32
-acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length);
+acpi_ex_convert_to_ascii(u64 integer,
+ u16 base, u8 *string, u8 max_length, u8 leading_zeros);
/*******************************************************************************
*
@@ -249,6 +250,7 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
* base - ACPI_STRING_DECIMAL or ACPI_STRING_HEX
* string - Where the string is returned
* data_width - Size of data item to be converted, in bytes
+ * leading_zeros - Allow leading zeros
*
* RETURN: Actual string length
*
@@ -257,7 +259,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
******************************************************************************/
static u32
-acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
+acpi_ex_convert_to_ascii(u64 integer,
+ u16 base, u8 *string, u8 data_width, u8 leading_zeros)
{
u64 digit;
u32 i;
@@ -266,7 +269,8 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
u32 hex_length;
u32 decimal_length;
u32 remainder;
- u8 supress_zeros;
+ u8 supress_zeros = !leading_zeros;
+ u8 hex_char;
ACPI_FUNCTION_ENTRY();
@@ -293,7 +297,6 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
break;
}
- supress_zeros = TRUE; /* No leading zeros */
remainder = 0;
for (i = decimal_length; i > 0; i--) {
@@ -328,8 +331,17 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
/* Get one hex digit, most significant digits first */
- string[k] = (u8)
+ hex_char = (u8)
acpi_ut_hex_to_ascii_char(integer, ACPI_MUL_4(j));
+
+ /* Supress leading zeros until the first non-zero character */
+
+ if (hex_char == ACPI_ASCII_ZERO && supress_zeros) {
+ continue;
+ }
+
+ supress_zeros = FALSE;
+ string[k] = hex_char;
k++;
}
break;
@@ -379,6 +391,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
u32 string_length = 0;
u16 base = 16;
u8 separator = ',';
+ u8 leading_zeros;
ACPI_FUNCTION_TRACE_PTR(ex_convert_to_string, obj_desc);
@@ -400,14 +413,26 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
* Make room for the maximum decimal number size
*/
string_length = ACPI_MAX_DECIMAL_DIGITS;
+ leading_zeros = FALSE;
base = 10;
break;
+ case ACPI_EXPLICIT_CONVERT_HEX:
+ /*
+ * From to_hex_string.
+ *
+ * Supress leading zeros and append "0x"
+ */
+ string_length =
+ ACPI_MUL_2(acpi_gbl_integer_byte_width) + 2;
+ leading_zeros = FALSE;
+ break;
default:
/* Two hex string characters for each integer byte */
string_length = ACPI_MUL_2(acpi_gbl_integer_byte_width);
+ leading_zeros = TRUE;
break;
}
@@ -422,17 +447,32 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
}
new_buf = return_desc->buffer.pointer;
+ if (type == ACPI_EXPLICIT_CONVERT_HEX) {
+
+ /* Append "0x" prefix for explicit hex conversion */
+
+ *new_buf++ = '0';
+ *new_buf++ = 'x';
+ }
/* Convert integer to string */
string_length =
acpi_ex_convert_to_ascii(obj_desc->integer.value, base,
new_buf,
- acpi_gbl_integer_byte_width);
+ acpi_gbl_integer_byte_width,
+ leading_zeros);
/* Null terminate at the correct place */
return_desc->string.length = string_length;
+ if (type == ACPI_EXPLICIT_CONVERT_HEX) {
+
+ /* Take "0x" prefix into account */
+
+ return_desc->string.length += 2;
+ }
+
new_buf[string_length] = 0;
break;
@@ -448,6 +488,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
* From ACPI: "If the input is a buffer, it is converted to a
* a string of decimal values separated by commas."
*/
+ leading_zeros = FALSE;
base = 10;
/*
@@ -475,6 +516,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
*
* Each hex number is prefixed with 0x (11/2018)
*/
+ leading_zeros = TRUE;
separator = ' ';
string_length = (obj_desc->buffer.length * 5);
break;
@@ -488,6 +530,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
*
* Each hex number is prefixed with 0x (11/2018)
*/
+ leading_zeros = TRUE;
separator = ',';
string_length = (obj_desc->buffer.length * 5);
break;
@@ -528,7 +571,8 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
new_buf += acpi_ex_convert_to_ascii((u64) obj_desc->
buffer.pointer[i],
- base, new_buf, 1);
+ base, new_buf, 1,
+ leading_zeros);
/* Each digit is separated by either a comma or space */
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 08196fa17080..82b1fa2d201f 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -437,6 +437,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
if (info->connection_node) {
second_desc = info->connection_node->object;
+ if (second_desc == NULL) {
+ break;
+ }
if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
status =
acpi_ds_get_buffer_arguments(second_desc);
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index f665ffd9a396..2c384bd52b9c 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -133,14 +133,15 @@ acpi_status acpi_ex_system_do_stall(u32 how_long_us)
* (ACPI specifies 100 usec as max, but this gives some slack in
* order to support existing BIOSs)
*/
- ACPI_ERROR((AE_INFO,
- "Time parameter is too large (%u)", how_long_us));
+ ACPI_ERROR_ONCE((AE_INFO,
+ "Time parameter is too large (%u)",
+ how_long_us));
status = AE_AML_OPERAND_VALUE;
} else {
if (how_long_us > 100) {
- ACPI_WARNING((AE_INFO,
- "Time parameter %u us > 100 us violating ACPI spec, please fix the firmware.",
- how_long_us));
+ ACPI_WARNING_ONCE((AE_INFO,
+ "Time parameter %u us > 100 us violating ACPI spec, please fix the firmware.",
+ how_long_us));
}
acpi_os_stall(how_long_us);
}
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 36ea48f64110..8dbf83aeb455 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -16,20 +16,11 @@
ACPI_MODULE_NAME("hwxfsleep")
/* Local prototypes */
-#if (!ACPI_REDUCED_HARDWARE)
static acpi_status
acpi_hw_set_firmware_waking_vector(struct acpi_table_facs *facs,
acpi_physical_address physical_address,
acpi_physical_address physical_address64);
-#endif
-
-/*
- * These functions are removed for the ACPI_REDUCED_HARDWARE case:
- * acpi_set_firmware_waking_vector
- * acpi_enter_sleep_state_s4bios
- */
-#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
* FUNCTION: acpi_hw_set_firmware_waking_vector
@@ -115,6 +106,12 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address,
ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
+/*
+ * These functions are removed for the ACPI_REDUCED_HARDWARE case:
+ * acpi_enter_sleep_state_s4bios
+ */
+
+#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
* FUNCTION: acpi_enter_sleep_state_s4bios
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 422c074ed289..28582adfc0ac 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -25,6 +25,8 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state);
static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
*parser_state);
+static void acpi_ps_free_field_list(union acpi_parse_object *start);
+
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_next_package_length
@@ -685,6 +687,39 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
/*******************************************************************************
*
+ * FUNCTION: acpi_ps_free_field_list
+ *
+ * PARAMETERS: start - First Op in field list
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Free all Op objects inside a field list.
+ *
+ ******************************************************************************/
+
+static void acpi_ps_free_field_list(union acpi_parse_object *start)
+{
+ union acpi_parse_object *cur = start;
+ union acpi_parse_object *next;
+ union acpi_parse_object *arg;
+
+ while (cur) {
+ next = cur->common.next;
+
+ /* AML_INT_CONNECTION_OP can have a single argument */
+
+ arg = acpi_ps_get_arg(cur, 0);
+ if (arg) {
+ acpi_ps_free_op(arg);
+ }
+
+ acpi_ps_free_op(cur);
+ cur = next;
+ }
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ps_get_next_arg
*
* PARAMETERS: walk_state - Current state
@@ -751,6 +786,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
while (parser_state->aml < parser_state->pkg_end) {
field = acpi_ps_get_next_field(parser_state);
if (!field) {
+ if (arg) {
+ acpi_ps_free_field_list(arg);
+ }
+
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -820,6 +859,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
acpi_ps_get_next_namepath(walk_state, parser_state,
arg,
ACPI_NOT_METHOD_CALL);
+ if (ACPI_FAILURE(status)) {
+ acpi_ps_free_op(arg);
+ return_ACPI_STATUS(status);
+ }
} else {
/* Single complex argument, nothing returned */
@@ -854,6 +897,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
acpi_ps_get_next_namepath(walk_state, parser_state,
arg,
ACPI_POSSIBLE_METHOD_CALL);
+ if (ACPI_FAILURE(status)) {
+ acpi_ps_free_op(arg);
+ return_ACPI_STATUS(status);
+ }
if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) {
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index fff48001d7ef..27384ee245f0 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -282,7 +282,8 @@ acpi_rs_get_address_common(struct acpi_resource *resource,
/* Validate the Resource Type */
- if ((address.resource_type > 2) && (address.resource_type < 0xC0)) {
+ if ((address.resource_type > 2) &&
+ (address.resource_type < 0xC0) && (address.resource_type != 0x0A)) {
return (FALSE);
}
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 611bc71c193f..5b7d7074ce4f 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -48,6 +48,7 @@ static void acpi_rs_dump_address_common(union acpi_resource_data *resource);
static void
acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
+#ifdef ACPI_DEBUGGER
/*******************************************************************************
*
* FUNCTION: acpi_rs_dump_resource_list
@@ -160,6 +161,7 @@ void acpi_rs_dump_irq_list(u8 *route_table)
prt_element, prt_element->length);
}
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 15fa68a5ea6e..dad7425fce3f 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -18,7 +18,6 @@ ACPI_MODULE_NAME("tbutils")
static acpi_physical_address
acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
-#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
* FUNCTION: acpi_tb_initialize_facs
@@ -56,7 +55,6 @@ acpi_status acpi_tb_initialize_facs(void)
return (AE_OK);
}
-#endif /* !ACPI_REDUCED_HARDWARE */
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 8d7736d2d269..c85bfa13ac1e 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -140,7 +140,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
(void)
acpi_os_delete_semaphore
(acpi_gbl_global_lock_semaphore);
- acpi_gbl_global_lock_semaphore = NULL;
+ acpi_gbl_global_lock_semaphore = ACPI_SEMAPHORE_NULL;
acpi_os_delete_mutex(object->mutex.os_mutex);
acpi_gbl_global_lock_mutex = NULL;
@@ -157,7 +157,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
object, object->event.os_semaphore));
(void)acpi_os_delete_semaphore(object->event.os_semaphore);
- object->event.os_semaphore = NULL;
+ object->event.os_semaphore = ACPI_SEMAPHORE_NULL;
break;
case ACPI_TYPE_METHOD:
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 92fbaef161a7..6d78504e9fbc 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -154,7 +154,7 @@ acpi_status acpi_ut_init_globals(void)
/* Global Lock support */
- acpi_gbl_global_lock_semaphore = NULL;
+ acpi_gbl_global_lock_semaphore = ACPI_SEMAPHORE_NULL;
acpi_gbl_global_lock_mutex = NULL;
acpi_gbl_global_lock_acquired = FALSE;
acpi_gbl_global_lock_handle = 0;
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 251bd396c6fd..99b85fd6eccf 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -75,6 +75,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2019", NULL, 0, ACPI_OSI_WIN_10_19H1}, /* Windows 10 version 1903 - Added 08/2019 */
{"Windows 2020", NULL, 0, ACPI_OSI_WIN_10_20H1}, /* Windows 10 version 2004 - Added 08/2021 */
{"Windows 2021", NULL, 0, ACPI_OSI_WIN_11}, /* Windows 11 - Added 01/2022 */
+ {"Windows 2022", NULL, 0, ACPI_OSI_WIN_11_22H2}, /* Windows 11 version 22H2 - Added 04/2024 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 1915bec2b279..70ae0afa7939 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -120,6 +120,18 @@ acpi_status ACPI_INIT_FUNCTION acpi_enable_subsystem(u32 flags)
*/
acpi_gbl_early_initialization = FALSE;
+ /*
+ * Obtain a permanent mapping for the FACS. This is required for the
+ * Global Lock and the Firmware Waking Vector
+ */
+ if (!(flags & ACPI_NO_FACS_INIT)) {
+ status = acpi_tb_initialize_facs();
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
+ return_ACPI_STATUS(status);
+ }
+ }
+
#if (!ACPI_REDUCED_HARDWARE)
/* Enable ACPI mode */
@@ -138,18 +150,6 @@ acpi_status ACPI_INIT_FUNCTION acpi_enable_subsystem(u32 flags)
}
/*
- * Obtain a permanent mapping for the FACS. This is required for the
- * Global Lock and the Firmware Waking Vector
- */
- if (!(flags & ACPI_NO_FACS_INIT)) {
- status = acpi_tb_initialize_facs();
- if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
- return_ACPI_STATUS(status);
- }
- }
-
- /*
* Initialize ACPI Event handling (Fixed and General Purpose)
*
* Note1: We must have the hardware and events initialized before we can
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 1b39e9ae7ac1..4c745a26226b 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -822,7 +822,7 @@ static struct iommu_iort_rmr_data *iort_rmr_alloc(
return NULL;
/* Create a copy of SIDs array to associate with this rmr_data */
- sids_copy = kmemdup(sids, num_sids * sizeof(*sids), GFP_KERNEL);
+ sids_copy = kmemdup_array(sids, num_sids, sizeof(*sids), GFP_KERNEL);
if (!sids_copy) {
kfree(rmr_data);
return NULL;
@@ -1703,6 +1703,13 @@ static struct acpi_platform_list pmcg_plat_info[] __initdata = {
/* HiSilicon Hip09 Platform */
{"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+ /* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
+ {"HISI ", "HIP10 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+ {"HISI ", "HIP10C ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+ {"HISI ", "HIP11 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
{ }
};
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index da3a879d638a..f4599261cfc3 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -10,7 +10,6 @@
#define pr_fmt(fmt) "ACPI: battery: " fmt
-#include <linux/async.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/jiffies.h>
@@ -50,8 +49,6 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
MODULE_DESCRIPTION("ACPI Battery Driver");
MODULE_LICENSE("GPL");
-static async_cookie_t async_cookie;
-static bool battery_driver_registered;
static int battery_bix_broken_package;
static int battery_notification_delay_ms;
static int battery_ac_is_broken;
@@ -1207,7 +1204,7 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
- struct acpi_battery *battery = NULL;
+ struct acpi_battery *battery;
if (!device)
return -EINVAL;
@@ -1219,8 +1216,8 @@ static int acpi_battery_add(struct acpi_device *device)
if (!battery)
return -ENOMEM;
battery->device = device;
- strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
+ strscpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
device->driver_data = battery;
mutex_init(&battery->lock);
mutex_init(&battery->sysfs_lock);
@@ -1260,7 +1257,7 @@ fail:
static void acpi_battery_remove(struct acpi_device *device)
{
- struct acpi_battery *battery = NULL;
+ struct acpi_battery *battery;
if (!device || !acpi_driver_data(device))
return;
@@ -1311,37 +1308,23 @@ static struct acpi_driver acpi_battery_driver = {
.remove = acpi_battery_remove,
},
.drv.pm = &acpi_battery_pm,
+ .drv.probe_type = PROBE_PREFER_ASYNCHRONOUS,
};
-static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
-{
- int result;
-
- if (acpi_quirk_skip_acpi_ac_and_battery())
- return;
-
- dmi_check_system(bat_dmi_table);
-
- result = acpi_bus_register_driver(&acpi_battery_driver);
- battery_driver_registered = (result == 0);
-}
-
static int __init acpi_battery_init(void)
{
- if (acpi_disabled)
+ if (acpi_disabled || acpi_quirk_skip_acpi_ac_and_battery())
return -ENODEV;
- async_cookie = async_schedule(acpi_battery_init_async, NULL);
- return 0;
+ dmi_check_system(bat_dmi_table);
+
+ return acpi_bus_register_driver(&acpi_battery_driver);
}
static void __exit acpi_battery_exit(void)
{
- async_synchronize_cookie(async_cookie + 1);
- if (battery_driver_registered) {
- acpi_bus_unregister_driver(&acpi_battery_driver);
- battery_hook_exit();
- }
+ acpi_bus_unregister_driver(&acpi_battery_driver);
+ battery_hook_exit();
}
module_init(acpi_battery_init);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 284bc2e03580..16917dc3ad60 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1203,6 +1203,9 @@ static int __init acpi_bus_init_irq(void)
case ACPI_IRQ_MODEL_LPIC:
message = "LPIC";
break;
+ case ACPI_IRQ_MODEL_RINTC:
+ message = "RINTC";
+ break;
default:
pr_info("Unknown interrupt routing model\n");
return -ENODEV;
@@ -1459,6 +1462,7 @@ static int __init acpi_init(void)
acpi_hest_init();
acpi_ghes_init();
acpi_arm_init();
+ acpi_riscv_init();
acpi_scan_init();
acpi_ec_init();
acpi_debugfs_init();
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index cc61020756be..51470208e6da 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -547,20 +547,20 @@ static int acpi_button_add(struct acpi_device *device)
!strcmp(hid, ACPI_BUTTON_HID_POWERF)) {
button->type = ACPI_BUTTON_TYPE_POWER;
handler = acpi_button_notify;
- strcpy(name, ACPI_BUTTON_DEVICE_NAME_POWER);
+ strscpy(name, ACPI_BUTTON_DEVICE_NAME_POWER, MAX_ACPI_DEVICE_NAME_LEN);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER);
} else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEP) ||
!strcmp(hid, ACPI_BUTTON_HID_SLEEPF)) {
button->type = ACPI_BUTTON_TYPE_SLEEP;
handler = acpi_button_notify;
- strcpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP);
+ strscpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP, MAX_ACPI_DEVICE_NAME_LEN);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP);
} else if (!strcmp(hid, ACPI_BUTTON_HID_LID)) {
button->type = ACPI_BUTTON_TYPE_LID;
handler = acpi_lid_notify;
- strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID);
+ strscpy(name, ACPI_BUTTON_DEVICE_NAME_LID, MAX_ACPI_DEVICE_NAME_LEN);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID);
input->open = acpi_lid_input_open;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index dd3d3082c8c7..5b06e236aabe 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -103,6 +103,11 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
(cpc)->cpc_entry.reg.space_id == \
ACPI_ADR_SPACE_PLATFORM_COMM)
+/* Check if a CPC register is in FFH */
+#define CPC_IN_FFH(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
+ (cpc)->cpc_entry.reg.space_id == \
+ ACPI_ADR_SPACE_FIXED_HARDWARE)
+
/* Check if a CPC register is in SystemMemory */
#define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
(cpc)->cpc_entry.reg.space_id == \
@@ -171,8 +176,11 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
/* Shift and apply the mask for CPC reads/writes */
-#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \
+#define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \
GENMASK(((reg)->bit_width) - 1, 0))
+#define MASK_VAL_WRITE(reg, prev_val, val) \
+ ((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \
+ ((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -859,6 +867,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id;
+ spin_lock_init(&cpc_ptr->rmw_lock);
/* Parse PSD data for this CPU */
ret = acpi_get_psd(cpc_ptr, handle);
@@ -1064,7 +1073,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
}
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- *val = MASK_VAL(reg, *val);
+ *val = MASK_VAL_READ(reg, *val);
return 0;
}
@@ -1073,9 +1082,11 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{
int ret_val = 0;
int size;
+ u64 prev_val;
void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
+ struct cpc_desc *cpc_desc;
size = GET_BIT_WIDTH(reg);
@@ -1108,8 +1119,34 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return acpi_os_write_memory((acpi_physical_address)reg->address,
val, size);
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- val = MASK_VAL(reg, val);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
+ return -ENODEV;
+ }
+
+ spin_lock(&cpc_desc->rmw_lock);
+ switch (size) {
+ case 8:
+ prev_val = readb_relaxed(vaddr);
+ break;
+ case 16:
+ prev_val = readw_relaxed(vaddr);
+ break;
+ case 32:
+ prev_val = readl_relaxed(vaddr);
+ break;
+ case 64:
+ prev_val = readq_relaxed(vaddr);
+ break;
+ default:
+ spin_unlock(&cpc_desc->rmw_lock);
+ return -EFAULT;
+ }
+ val = MASK_VAL_WRITE(reg, prev_val, val);
+ val |= prev_val;
+ }
switch (size) {
case 8:
@@ -1136,6 +1173,9 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
break;
}
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ spin_unlock(&cpc_desc->rmw_lock);
+
return ret_val;
}
@@ -1486,9 +1526,12 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
/* after writing CPC, transfer the ownership of PCC to platform */
ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
up_write(&pcc_ss_data->pcc_lock);
+ } else if (osc_cpc_flexible_adr_space_confirmed &&
+ CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) {
+ ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
} else {
ret = -ENOTSUPP;
- pr_debug("_CPC in PCC is not supported\n");
+ pr_debug("_CPC in PCC and _CPC in FFH are not supported\n");
}
return ret;
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 23373faa35ec..3961fc47152c 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -439,23 +439,33 @@ static ssize_t description_show(struct device *dev,
char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *str_obj;
+ acpi_status status;
int result;
- if (acpi_dev->pnp.str_obj == NULL)
- return 0;
+ status = acpi_evaluate_object_typed(acpi_dev->handle, "_STR",
+ NULL, &buffer,
+ ACPI_TYPE_BUFFER);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ str_obj = buffer.pointer;
/*
* The _STR object contains a Unicode identifier for a device.
* We need to convert to utf-8 so it can be displayed.
*/
result = utf16s_to_utf8s(
- (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
- acpi_dev->pnp.str_obj->buffer.length,
+ (wchar_t *)str_obj->buffer.pointer,
+ str_obj->buffer.length,
UTF16_LITTLE_ENDIAN, buf,
PAGE_SIZE - 1);
buf[result++] = '\n';
+ kfree(str_obj);
+
return result;
}
static DEVICE_ATTR_RO(description);
@@ -507,96 +517,97 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(status);
-/**
- * acpi_device_setup_files - Create sysfs attributes of an ACPI device.
- * @dev: ACPI device object.
- */
-int acpi_device_setup_files(struct acpi_device *dev)
-{
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
- int result = 0;
+static struct attribute *acpi_attrs[] = {
+ &dev_attr_path.attr,
+ &dev_attr_hid.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_description.attr,
+ &dev_attr_adr.attr,
+ &dev_attr_uid.attr,
+ &dev_attr_sun.attr,
+ &dev_attr_hrv.attr,
+ &dev_attr_status.attr,
+ &dev_attr_eject.attr,
+ &dev_attr_power_state.attr,
+ &dev_attr_real_power_state.attr,
+ NULL
+};
+static bool acpi_show_attr(struct acpi_device *dev, const struct device_attribute *attr)
+{
/*
* Devices gotten from FADT don't have a "path" attribute
*/
- if (dev->handle) {
- result = device_create_file(&dev->dev, &dev_attr_path);
- if (result)
- goto end;
- }
+ if (attr == &dev_attr_path)
+ return dev->handle;
- if (!list_empty(&dev->pnp.ids)) {
- result = device_create_file(&dev->dev, &dev_attr_hid);
- if (result)
- goto end;
+ if (attr == &dev_attr_hid || attr == &dev_attr_modalias)
+ return !list_empty(&dev->pnp.ids);
- result = device_create_file(&dev->dev, &dev_attr_modalias);
- if (result)
- goto end;
- }
+ if (attr == &dev_attr_description)
+ return acpi_has_method(dev->handle, "_STR");
- /*
- * If device has _STR, 'description' file is created
- */
- if (acpi_has_method(dev->handle, "_STR")) {
- status = acpi_evaluate_object(dev->handle, "_STR",
- NULL, &buffer);
- if (ACPI_FAILURE(status))
- buffer.pointer = NULL;
- dev->pnp.str_obj = buffer.pointer;
- result = device_create_file(&dev->dev, &dev_attr_description);
- if (result)
- goto end;
- }
+ if (attr == &dev_attr_adr)
+ return dev->pnp.type.bus_address;
- if (dev->pnp.type.bus_address)
- result = device_create_file(&dev->dev, &dev_attr_adr);
- if (acpi_device_uid(dev))
- result = device_create_file(&dev->dev, &dev_attr_uid);
+ if (attr == &dev_attr_uid)
+ return acpi_device_uid(dev);
- if (acpi_has_method(dev->handle, "_SUN")) {
- result = device_create_file(&dev->dev, &dev_attr_sun);
- if (result)
- goto end;
- }
+ if (attr == &dev_attr_sun)
+ return acpi_has_method(dev->handle, "_SUN");
- if (acpi_has_method(dev->handle, "_HRV")) {
- result = device_create_file(&dev->dev, &dev_attr_hrv);
- if (result)
- goto end;
- }
+ if (attr == &dev_attr_hrv)
+ return acpi_has_method(dev->handle, "_HRV");
- if (acpi_has_method(dev->handle, "_STA")) {
- result = device_create_file(&dev->dev, &dev_attr_status);
- if (result)
- goto end;
- }
+ if (attr == &dev_attr_status)
+ return acpi_has_method(dev->handle, "_STA");
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
*/
- if (acpi_has_method(dev->handle, "_EJ0")) {
- result = device_create_file(&dev->dev, &dev_attr_eject);
- if (result)
- return result;
- }
+ if (attr == &dev_attr_eject)
+ return acpi_has_method(dev->handle, "_EJ0");
- if (dev->flags.power_manageable) {
- result = device_create_file(&dev->dev, &dev_attr_power_state);
- if (result)
- return result;
+ if (attr == &dev_attr_power_state)
+ return dev->flags.power_manageable;
- if (dev->power.flags.power_resources)
- result = device_create_file(&dev->dev,
- &dev_attr_real_power_state);
- }
+ if (attr == &dev_attr_real_power_state)
+ return dev->flags.power_manageable && dev->power.flags.power_resources;
- acpi_expose_nondev_subnodes(&dev->dev.kobj, &dev->data);
+ dev_warn_once(&dev->dev, "Unexpected attribute: %s\n", attr->attr.name);
+ return false;
+}
-end:
- return result;
+static umode_t acpi_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int attrno)
+{
+ struct acpi_device *dev = to_acpi_device(kobj_to_dev(kobj));
+
+ if (acpi_show_attr(dev, container_of(attr, struct device_attribute, attr)))
+ return attr->mode;
+ else
+ return 0;
+}
+
+static const struct attribute_group acpi_group = {
+ .attrs = acpi_attrs,
+ .is_visible = acpi_attr_is_visible,
+};
+
+const struct attribute_group *acpi_groups[] = {
+ &acpi_group,
+ NULL
+};
+
+/**
+ * acpi_device_setup_files - Create sysfs attributes of an ACPI device.
+ * @dev: ACPI device object.
+ */
+void acpi_device_setup_files(struct acpi_device *dev)
+{
+ acpi_expose_nondev_subnodes(&dev->dev.kobj, &dev->data);
}
/**
@@ -606,41 +617,4 @@ end:
void acpi_device_remove_files(struct acpi_device *dev)
{
acpi_hide_nondev_subnodes(&dev->data);
-
- if (dev->flags.power_manageable) {
- device_remove_file(&dev->dev, &dev_attr_power_state);
- if (dev->power.flags.power_resources)
- device_remove_file(&dev->dev,
- &dev_attr_real_power_state);
- }
-
- /*
- * If device has _STR, remove 'description' file
- */
- if (acpi_has_method(dev->handle, "_STR")) {
- kfree(dev->pnp.str_obj);
- device_remove_file(&dev->dev, &dev_attr_description);
- }
- /*
- * If device has _EJ0, remove 'eject' file.
- */
- if (acpi_has_method(dev->handle, "_EJ0"))
- device_remove_file(&dev->dev, &dev_attr_eject);
-
- if (acpi_has_method(dev->handle, "_SUN"))
- device_remove_file(&dev->dev, &dev_attr_sun);
-
- if (acpi_has_method(dev->handle, "_HRV"))
- device_remove_file(&dev->dev, &dev_attr_hrv);
-
- if (acpi_device_uid(dev))
- device_remove_file(&dev->dev, &dev_attr_uid);
- if (dev->pnp.type.bus_address)
- device_remove_file(&dev->dev, &dev_attr_adr);
- device_remove_file(&dev->dev, &dev_attr_modalias);
- device_remove_file(&dev->dev, &dev_attr_hid);
- if (acpi_has_method(dev->handle, "_STA"))
- device_remove_file(&dev->dev, &dev_attr_status);
- if (dev->handle)
- device_remove_file(&dev->dev, &dev_attr_path);
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 38d2f6e6b12b..25399f6dde7e 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -783,6 +783,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
unsigned long tmp;
int ret = 0;
+ if (t->rdata)
+ memset(t->rdata, 0, t->rlen);
+
/* start transaction */
spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */
@@ -819,8 +822,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
return -EINVAL;
- if (t->rdata)
- memset(t->rdata, 0, t->rlen);
mutex_lock(&ec->mutex);
if (ec->global_lock) {
@@ -847,7 +848,7 @@ static int acpi_ec_burst_enable(struct acpi_ec *ec)
.wdata = NULL, .rdata = &d,
.wlen = 0, .rlen = 1};
- return acpi_ec_transaction(ec, &t);
+ return acpi_ec_transaction_unlocked(ec, &t);
}
static int acpi_ec_burst_disable(struct acpi_ec *ec)
@@ -857,7 +858,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec)
.wlen = 0, .rlen = 0};
return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
- acpi_ec_transaction(ec, &t) : 0;
+ acpi_ec_transaction_unlocked(ec, &t) : 0;
}
static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
@@ -873,6 +874,19 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
return result;
}
+static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data)
+{
+ int result;
+ u8 d;
+ struct transaction t = {.command = ACPI_EC_COMMAND_READ,
+ .wdata = &address, .rdata = &d,
+ .wlen = 1, .rlen = 1};
+
+ result = acpi_ec_transaction_unlocked(ec, &t);
+ *data = d;
+ return result;
+}
+
static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
{
u8 wdata[2] = { address, data };
@@ -883,6 +897,16 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
return acpi_ec_transaction(ec, &t);
}
+static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data)
+{
+ u8 wdata[2] = { address, data };
+ struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
+ .wdata = wdata, .rdata = NULL,
+ .wlen = 2, .rlen = 0};
+
+ return acpi_ec_transaction_unlocked(ec, &t);
+}
+
int ec_read(u8 addr, u8 *val)
{
int err;
@@ -1323,6 +1347,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
struct acpi_ec *ec = handler_context;
int result = 0, i, bytes = bits / 8;
u8 *value = (u8 *)value64;
+ u32 glk;
if ((address > 0xFF) || !value || !handler_context)
return AE_BAD_PARAMETER;
@@ -1330,13 +1355,25 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
if (function != ACPI_READ && function != ACPI_WRITE)
return AE_BAD_PARAMETER;
+ mutex_lock(&ec->mutex);
+
+ if (ec->global_lock) {
+ acpi_status status;
+
+ status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
+ if (ACPI_FAILURE(status)) {
+ result = -ENODEV;
+ goto unlock;
+ }
+ }
+
if (ec->busy_polling || bits > 8)
acpi_ec_burst_enable(ec);
for (i = 0; i < bytes; ++i, ++address, ++value) {
result = (function == ACPI_READ) ?
- acpi_ec_read(ec, address, value) :
- acpi_ec_write(ec, address, *value);
+ acpi_ec_read_unlocked(ec, address, value) :
+ acpi_ec_write_unlocked(ec, address, *value);
if (result < 0)
break;
}
@@ -1344,6 +1381,12 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
if (ec->busy_polling || bits > 8)
acpi_ec_burst_disable(ec);
+ if (ec->global_lock)
+ acpi_release_global_lock(glk);
+
+unlock:
+ mutex_unlock(&ec->mutex);
+
switch (result) {
case -EINVAL:
return AE_BAD_PARAMETER;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index aadd4c218b32..ced7dff9a5db 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -118,8 +118,9 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, void (*release)(struct device *));
int acpi_tie_acpi_dev(struct acpi_device *adev);
int acpi_device_add(struct acpi_device *device);
-int acpi_device_setup_files(struct acpi_device *dev);
+void acpi_device_setup_files(struct acpi_device *dev);
void acpi_device_remove_files(struct acpi_device *dev);
+extern const struct attribute_group *acpi_groups[];
void acpi_device_add_finalize(struct acpi_device *device);
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
bool acpi_device_is_enabled(const struct acpi_device *adev);
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 44f91f2c6c5d..bec0dcd1f9c3 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -17,6 +17,7 @@
#include <linux/numa.h>
#include <linux/nodemask.h>
#include <linux/topology.h>
+#include <linux/numa_memblks.h>
static nodemask_t nodes_found_map = NODE_MASK_NONE;
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index aa1038b8aec4..b727db968f33 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -748,6 +748,8 @@ static int acpi_pci_link_add(struct acpi_device *device,
if (result)
kfree(link);
+ acpi_dev_clear_dependencies(device);
+
return result < 0 ? result : 1;
}
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 860014b89b8e..58e10a980114 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -181,6 +181,18 @@ static struct mcfg_fixup mcfg_quirks[] = {
LOONGSON_ECAM_MCFG("LOONGSON", 0),
LOONGSON_ECAM_MCFG("\0", 1),
LOONGSON_ECAM_MCFG("LOONGSON", 1),
+ LOONGSON_ECAM_MCFG("\0", 2),
+ LOONGSON_ECAM_MCFG("LOONGSON", 2),
+ LOONGSON_ECAM_MCFG("\0", 3),
+ LOONGSON_ECAM_MCFG("LOONGSON", 3),
+ LOONGSON_ECAM_MCFG("\0", 4),
+ LOONGSON_ECAM_MCFG("LOONGSON", 4),
+ LOONGSON_ECAM_MCFG("\0", 5),
+ LOONGSON_ECAM_MCFG("LOONGSON", 5),
+ LOONGSON_ECAM_MCFG("\0", 6),
+ LOONGSON_ECAM_MCFG("LOONGSON", 6),
+ LOONGSON_ECAM_MCFG("\0", 7),
+ LOONGSON_ECAM_MCFG("LOONGSON", 7),
#endif /* LOONGARCH */
};
diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c
index ebd03e472955..0d1a82eeb4b0 100644
--- a/drivers/acpi/pmic/tps68470_pmic.c
+++ b/drivers/acpi/pmic/tps68470_pmic.c
@@ -376,10 +376,8 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev)
struct tps68470_pmic_opregion *opregion;
acpi_status status;
- if (!dev || !tps68470_regmap) {
- dev_warn(dev, "dev or regmap is NULL\n");
- return -EINVAL;
- }
+ if (!tps68470_regmap)
+ return dev_err_probe(dev, -EINVAL, "regmap is missing\n");
if (!handle) {
dev_warn(dev, "acpi handle is NULL\n");
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index c78453c74ef5..1cfaa5957ac4 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -214,6 +214,30 @@ static struct prm_handler_info *find_prm_handler(const guid_t *guid)
#define UPDATE_LOCK_ALREADY_HELD 4
#define UPDATE_UNLOCK_WITHOUT_LOCK 5
+int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer)
+{
+ struct prm_handler_info *handler = find_prm_handler(&handler_guid);
+ struct prm_module_info *module = find_prm_module(&handler_guid);
+ struct prm_context_buffer context;
+ efi_status_t status;
+
+ if (!module || !handler)
+ return -ENODEV;
+
+ memset(&context, 0, sizeof(context));
+ ACPI_COPY_NAMESEG(context.signature, "PRMC");
+ context.identifier = handler->guid;
+ context.static_data_buffer = handler->static_data_buffer_addr;
+ context.mmio_ranges = module->mmio_info;
+
+ status = efi_call_acpi_prm_handler(handler->handler_addr,
+ (u64)param_buffer,
+ &context);
+
+ return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(acpi_call_prm_handler);
+
/*
* This is the PlatformRtMechanism opregion space handler.
* @function: indicates the read/write. In fact as the PlatformRtMechanism
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index df5d5a554b38..8a4726e2eb69 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -504,6 +504,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook Go E1404GAB */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "E1404GAB"),
+ },
+ },
+ {
/* Asus Vivobook E1504GA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -555,6 +562,12 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
*/
static const struct dmi_system_id irq1_edge_low_force_override[] = {
{
+ /* MECHREV Jiaolong17KS Series GM7XG0M */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GM7XG0M"),
+ },
+ },
+ {
/* XMG APEX 17 (M23) */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxBGxx"),
@@ -573,6 +586,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ /* TongFang GMxXGxX/TUXEDO Polaris 15 Gen5 AMD */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"),
+ },
+ },
+ {
/* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile
index 86b0925f612d..a96fdf1e2cb8 100644
--- a/drivers/acpi/riscv/Makefile
+++ b/drivers/acpi/riscv/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += rhct.o
+obj-y += rhct.o init.o irq.o
obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o
diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c
new file mode 100644
index 000000000000..5ef97905a727
--- /dev/null
+++ b/drivers/acpi/riscv/init.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023-2024, Ventana Micro Systems Inc
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ */
+
+#include <linux/acpi.h>
+#include "init.h"
+
+void __init acpi_riscv_init(void)
+{
+ riscv_acpi_init_gsi_mapping();
+}
diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h
new file mode 100644
index 000000000000..0b9a07e4031f
--- /dev/null
+++ b/drivers/acpi/riscv/init.h
@@ -0,0 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <linux/init.h>
+
+void __init riscv_acpi_init_gsi_mapping(void);
diff --git a/drivers/acpi/riscv/irq.c b/drivers/acpi/riscv/irq.c
new file mode 100644
index 000000000000..cced960c2aef
--- /dev/null
+++ b/drivers/acpi/riscv/irq.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023-2024, Ventana Micro Systems Inc
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/sort.h>
+#include <linux/irq.h>
+
+#include "init.h"
+
+struct riscv_ext_intc_list {
+ acpi_handle handle;
+ u32 gsi_base;
+ u32 nr_irqs;
+ u32 nr_idcs;
+ u32 id;
+ u32 type;
+ struct list_head list;
+};
+
+struct acpi_irq_dep_ctx {
+ int rc;
+ unsigned int index;
+ acpi_handle handle;
+};
+
+LIST_HEAD(ext_intc_list);
+
+static int irqchip_cmp_func(const void *in0, const void *in1)
+{
+ struct acpi_probe_entry *elem0 = (struct acpi_probe_entry *)in0;
+ struct acpi_probe_entry *elem1 = (struct acpi_probe_entry *)in1;
+
+ return (elem0->type > elem1->type) - (elem0->type < elem1->type);
+}
+
+/*
+ * On RISC-V, RINTC structures in MADT should be probed before any other
+ * interrupt controller structures and IMSIC before APLIC. The interrupt
+ * controller subtypes in MADT of ACPI spec for RISC-V are defined in
+ * the incremental order like RINTC(24)->IMSIC(25)->APLIC(26)->PLIC(27).
+ * Hence, simply sorting the subtypes in incremental order will
+ * establish the required order.
+ */
+void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr)
+{
+ struct acpi_probe_entry *ape = ap_head;
+
+ if (nr == 1 || !ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id))
+ return;
+ sort(ape, nr, sizeof(*ape), irqchip_cmp_func, NULL);
+}
+
+static acpi_status riscv_acpi_update_gsi_handle(u32 gsi_base, acpi_handle handle)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+ struct list_head *i, *tmp;
+
+ list_for_each_safe(i, tmp, &ext_intc_list) {
+ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list);
+ if (gsi_base == ext_intc_element->gsi_base) {
+ ext_intc_element->handle = handle;
+ return AE_OK;
+ }
+ }
+
+ return AE_NOT_FOUND;
+}
+
+int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base,
+ u32 *id, u32 *nr_irqs, u32 *nr_idcs)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+ struct list_head *i;
+
+ list_for_each(i, &ext_intc_list) {
+ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list);
+ if (ext_intc_element->handle == ACPI_HANDLE_FWNODE(fwnode)) {
+ *gsi_base = ext_intc_element->gsi_base;
+ *id = ext_intc_element->id;
+ *nr_irqs = ext_intc_element->nr_irqs;
+ if (nr_idcs)
+ *nr_idcs = ext_intc_element->nr_idcs;
+
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+ struct acpi_device *adev;
+ struct list_head *i;
+
+ list_for_each(i, &ext_intc_list) {
+ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list);
+ if (gsi >= ext_intc_element->gsi_base &&
+ gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) {
+ adev = acpi_fetch_acpi_dev(ext_intc_element->handle);
+ if (!adev)
+ return NULL;
+
+ return acpi_fwnode_handle(adev);
+ }
+ }
+
+ return NULL;
+}
+
+static int __init riscv_acpi_register_ext_intc(u32 gsi_base, u32 nr_irqs, u32 nr_idcs,
+ u32 id, u32 type)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+
+ ext_intc_element = kzalloc(sizeof(*ext_intc_element), GFP_KERNEL);
+ if (!ext_intc_element)
+ return -ENOMEM;
+
+ ext_intc_element->gsi_base = gsi_base;
+ ext_intc_element->nr_irqs = nr_irqs;
+ ext_intc_element->nr_idcs = nr_idcs;
+ ext_intc_element->id = id;
+ list_add_tail(&ext_intc_element->list, &ext_intc_list);
+ return 0;
+}
+
+static acpi_status __init riscv_acpi_create_gsi_map(acpi_handle handle, u32 level,
+ void *context, void **return_value)
+{
+ acpi_status status;
+ u64 gbase;
+
+ if (!acpi_has_method(handle, "_GSB")) {
+ acpi_handle_err(handle, "_GSB method not found\n");
+ return AE_ERROR;
+ }
+
+ status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to evaluate _GSB method\n");
+ return status;
+ }
+
+ status = riscv_acpi_update_gsi_handle((u32)gbase, handle);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to find the GSI mapping entry\n");
+ return status;
+ }
+
+ return AE_OK;
+}
+
+static int __init riscv_acpi_aplic_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_aplic *aplic = (struct acpi_madt_aplic *)header;
+
+ return riscv_acpi_register_ext_intc(aplic->gsi_base, aplic->num_sources, aplic->num_idcs,
+ aplic->id, ACPI_RISCV_IRQCHIP_APLIC);
+}
+
+static int __init riscv_acpi_plic_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_plic *plic = (struct acpi_madt_plic *)header;
+
+ return riscv_acpi_register_ext_intc(plic->gsi_base, plic->num_irqs, 0,
+ plic->id, ACPI_RISCV_IRQCHIP_PLIC);
+}
+
+void __init riscv_acpi_init_gsi_mapping(void)
+{
+ /* There can be either PLIC or APLIC */
+ if (acpi_table_parse_madt(ACPI_MADT_TYPE_PLIC, riscv_acpi_plic_parse_madt, 0) > 0) {
+ acpi_get_devices("RSCV0001", riscv_acpi_create_gsi_map, NULL, NULL);
+ return;
+ }
+
+ if (acpi_table_parse_madt(ACPI_MADT_TYPE_APLIC, riscv_acpi_aplic_parse_madt, 0) > 0)
+ acpi_get_devices("RSCV0002", riscv_acpi_create_gsi_map, NULL, NULL);
+}
+
+static acpi_handle riscv_acpi_get_gsi_handle(u32 gsi)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+ struct list_head *i;
+
+ list_for_each(i, &ext_intc_list) {
+ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list);
+ if (gsi >= ext_intc_element->gsi_base &&
+ gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs))
+ return ext_intc_element->handle;
+ }
+
+ return NULL;
+}
+
+static acpi_status riscv_acpi_irq_get_parent(struct acpi_resource *ares, void *context)
+{
+ struct acpi_irq_dep_ctx *ctx = context;
+ struct acpi_resource_irq *irq;
+ struct acpi_resource_extended_irq *eirq;
+
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ irq = &ares->data.irq;
+ if (ctx->index >= irq->interrupt_count) {
+ ctx->index -= irq->interrupt_count;
+ return AE_OK;
+ }
+ ctx->handle = riscv_acpi_get_gsi_handle(irq->interrupts[ctx->index]);
+ return AE_CTRL_TERMINATE;
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ eirq = &ares->data.extended_irq;
+ if (eirq->producer_consumer == ACPI_PRODUCER)
+ return AE_OK;
+
+ if (ctx->index >= eirq->interrupt_count) {
+ ctx->index -= eirq->interrupt_count;
+ return AE_OK;
+ }
+
+ /* Support GSIs only */
+ if (eirq->resource_source.string_length)
+ return AE_OK;
+
+ ctx->handle = riscv_acpi_get_gsi_handle(eirq->interrupts[ctx->index]);
+ return AE_CTRL_TERMINATE;
+ }
+
+ return AE_OK;
+}
+
+static int riscv_acpi_irq_get_dep(acpi_handle handle, unsigned int index, acpi_handle *gsi_handle)
+{
+ struct acpi_irq_dep_ctx ctx = {-EINVAL, index, NULL};
+
+ if (!gsi_handle)
+ return 0;
+
+ acpi_walk_resources(handle, METHOD_NAME__CRS, riscv_acpi_irq_get_parent, &ctx);
+ *gsi_handle = ctx.handle;
+ if (*gsi_handle)
+ return 1;
+
+ return 0;
+}
+
+static u32 riscv_acpi_add_prt_dep(acpi_handle handle)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_pci_routing_table *entry;
+ struct acpi_handle_list dep_devices;
+ acpi_handle gsi_handle;
+ acpi_handle link_handle;
+ acpi_status status;
+ u32 count = 0;
+
+ status = acpi_get_irq_routing_table(handle, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to get IRQ routing table\n");
+ kfree(buffer.pointer);
+ return 0;
+ }
+
+ entry = buffer.pointer;
+ while (entry && (entry->length > 0)) {
+ if (entry->source[0]) {
+ acpi_get_handle(handle, entry->source, &link_handle);
+ dep_devices.count = 1;
+ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL);
+ if (!dep_devices.handles) {
+ acpi_handle_err(handle, "failed to allocate memory\n");
+ continue;
+ }
+
+ dep_devices.handles[0] = link_handle;
+ count += acpi_scan_add_dep(handle, &dep_devices);
+ } else {
+ gsi_handle = riscv_acpi_get_gsi_handle(entry->source_index);
+ dep_devices.count = 1;
+ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL);
+ if (!dep_devices.handles) {
+ acpi_handle_err(handle, "failed to allocate memory\n");
+ continue;
+ }
+
+ dep_devices.handles[0] = gsi_handle;
+ count += acpi_scan_add_dep(handle, &dep_devices);
+ }
+
+ entry = (struct acpi_pci_routing_table *)
+ ((unsigned long)entry + entry->length);
+ }
+
+ kfree(buffer.pointer);
+ return count;
+}
+
+static u32 riscv_acpi_add_irq_dep(acpi_handle handle)
+{
+ struct acpi_handle_list dep_devices;
+ acpi_handle gsi_handle;
+ u32 count = 0;
+ int i;
+
+ for (i = 0;
+ riscv_acpi_irq_get_dep(handle, i, &gsi_handle);
+ i++) {
+ dep_devices.count = 1;
+ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL);
+ if (!dep_devices.handles) {
+ acpi_handle_err(handle, "failed to allocate memory\n");
+ continue;
+ }
+
+ dep_devices.handles[0] = gsi_handle;
+ count += acpi_scan_add_dep(handle, &dep_devices);
+ }
+
+ return count;
+}
+
+u32 arch_acpi_add_auto_dep(acpi_handle handle)
+{
+ if (acpi_has_method(handle, "_PRT"))
+ return riscv_acpi_add_prt_dep(handle);
+
+ return riscv_acpi_add_irq_dep(handle);
+}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 22ae7829a915..7ecc401fb97f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -795,10 +795,7 @@ int acpi_device_add(struct acpi_device *device)
goto err;
}
- result = acpi_device_setup_files(device);
- if (result)
- pr_err("Error creating sysfs interface for device %s\n",
- dev_name(&device->dev));
+ acpi_device_setup_files(device);
return 0;
@@ -861,6 +858,9 @@ static const char * const acpi_honor_dep_ids[] = {
"INTC1095", /* IVSC (ADL) driver must be loaded to allow i2c access to camera sensors */
"INTC100A", /* IVSC (RPL) driver must be loaded to allow i2c access to camera sensors */
"INTC10CF", /* IVSC (MTL) driver must be loaded to allow i2c access to camera sensors */
+ "RSCV0001", /* RISC-V PLIC */
+ "RSCV0002", /* RISC-V APLIC */
+ "PNP0C0F", /* PCI Link Device */
NULL
};
@@ -1822,6 +1822,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
device->dev.parent = parent ? &parent->dev : NULL;
device->dev.release = release;
device->dev.bus = &acpi_bus_type;
+ device->dev.groups = acpi_groups;
fwnode_init(&device->fwnode, &acpi_device_fwnode_ops);
acpi_set_device_status(device, ACPI_STA_DEFAULT);
acpi_device_get_busid(device);
@@ -2013,6 +2014,49 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val)
mutex_unlock(&acpi_scan_lock);
}
+int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices)
+{
+ u32 count;
+ int i;
+
+ for (count = 0, i = 0; i < dep_devices->count; i++) {
+ struct acpi_device_info *info;
+ struct acpi_dep_data *dep;
+ bool skip, honor_dep;
+ acpi_status status;
+
+ status = acpi_get_object_info(dep_devices->handles[i], &info);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(handle, "Error reading _DEP device info\n");
+ continue;
+ }
+
+ skip = acpi_info_matches_ids(info, acpi_ignore_dep_ids);
+ honor_dep = acpi_info_matches_ids(info, acpi_honor_dep_ids);
+ kfree(info);
+
+ if (skip)
+ continue;
+
+ dep = kzalloc(sizeof(*dep), GFP_KERNEL);
+ if (!dep)
+ continue;
+
+ count++;
+
+ dep->supplier = dep_devices->handles[i];
+ dep->consumer = handle;
+ dep->honor_dep = honor_dep;
+
+ mutex_lock(&acpi_dep_list_lock);
+ list_add_tail(&dep->node, &acpi_dep_list);
+ mutex_unlock(&acpi_dep_list_lock);
+ }
+
+ acpi_handle_list_free(dep_devices);
+ return count;
+}
+
static void acpi_scan_init_hotplug(struct acpi_device *adev)
{
struct acpi_hardware_id *hwid;
@@ -2032,11 +2076,21 @@ static void acpi_scan_init_hotplug(struct acpi_device *adev)
}
}
+u32 __weak arch_acpi_add_auto_dep(acpi_handle handle) { return 0; }
+
static u32 acpi_scan_check_dep(acpi_handle handle)
{
struct acpi_handle_list dep_devices;
- u32 count;
- int i;
+ u32 count = 0;
+
+ /*
+ * Some architectures like RISC-V need to add dependencies for
+ * all devices which use GSI to the interrupt controller so that
+ * interrupt controller is probed before any of those devices.
+ * Instead of mandating _DEP on all the devices, detect the
+ * dependency and add automatically.
+ */
+ count += arch_acpi_add_auto_dep(handle);
/*
* Check for _HID here to avoid deferring the enumeration of:
@@ -2045,48 +2099,14 @@ static u32 acpi_scan_check_dep(acpi_handle handle)
* Still, checking for _HID catches more then just these cases ...
*/
if (!acpi_has_method(handle, "_DEP") || !acpi_has_method(handle, "_HID"))
- return 0;
+ return count;
if (!acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices)) {
acpi_handle_debug(handle, "Failed to evaluate _DEP.\n");
- return 0;
- }
-
- for (count = 0, i = 0; i < dep_devices.count; i++) {
- struct acpi_device_info *info;
- struct acpi_dep_data *dep;
- bool skip, honor_dep;
- acpi_status status;
-
- status = acpi_get_object_info(dep_devices.handles[i], &info);
- if (ACPI_FAILURE(status)) {
- acpi_handle_debug(handle, "Error reading _DEP device info\n");
- continue;
- }
-
- skip = acpi_info_matches_ids(info, acpi_ignore_dep_ids);
- honor_dep = acpi_info_matches_ids(info, acpi_honor_dep_ids);
- kfree(info);
-
- if (skip)
- continue;
-
- dep = kzalloc(sizeof(*dep), GFP_KERNEL);
- if (!dep)
- continue;
-
- count++;
-
- dep->supplier = dep_devices.handles[i];
- dep->consumer = handle;
- dep->honor_dep = honor_dep;
-
- mutex_lock(&acpi_dep_list_lock);
- list_add_tail(&dep->node , &acpi_dep_list);
- mutex_unlock(&acpi_dep_list_lock);
+ return count;
}
- acpi_handle_list_free(&dep_devices);
+ count += acpi_scan_add_dep(handle, &dep_devices);
return count;
}
@@ -2757,6 +2777,8 @@ static int __init acpi_match_madt(union acpi_subtable_headers *header,
return 0;
}
+void __weak arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr) { }
+
int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
{
int count = 0;
@@ -2765,6 +2787,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
return 0;
mutex_lock(&acpi_probe_mutex);
+ arch_sort_irqchip_probe(ap_head, nr);
for (ape = ap_head; nr; ape++, nr--) {
if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) {
acpi_probe_count = 0;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 889f1c1a1fa9..c8ee8e42b0f6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -352,6 +352,20 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
},
},
/*
+ * The ASUS ROG M16 from 2023 has many events which wake it from s2idle
+ * resulting in excessive battery drain and risk of laptop overheating,
+ * these events can be caused by the MMC or y AniMe display if installed.
+ * The match is valid for all of the GU604V<x> range.
+ */
+ {
+ .callback = init_default_s3,
+ .ident = "ASUS ROG Zephyrus M16 (2023)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus M16 GU604V"),
+ },
+ },
+ /*
* https://bugzilla.kernel.org/show_bug.cgi?id=189431
* Lenovo G50-45 is a platform later than 2012, but needs nvs memory
* saving during S3.
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index a0cfc857fb55..78db38c7076e 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -558,77 +558,31 @@ static void acpi_thermal_zone_device_critical(struct thermal_zone_device *therma
thermal_zone_device_critical(thermal);
}
-struct acpi_thermal_bind_data {
- struct thermal_zone_device *thermal;
- struct thermal_cooling_device *cdev;
- bool bind;
-};
-
-static int bind_unbind_cdev_cb(struct thermal_trip *trip, void *arg)
+static bool acpi_thermal_should_bind_cdev(struct thermal_zone_device *thermal,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
struct acpi_thermal_trip *acpi_trip = trip->priv;
- struct acpi_thermal_bind_data *bd = arg;
- struct thermal_zone_device *thermal = bd->thermal;
- struct thermal_cooling_device *cdev = bd->cdev;
struct acpi_device *cdev_adev = cdev->devdata;
int i;
/* Skip critical and hot trips. */
if (!acpi_trip)
- return 0;
+ return false;
for (i = 0; i < acpi_trip->devices.count; i++) {
acpi_handle handle = acpi_trip->devices.handles[i];
- struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
-
- if (adev != cdev_adev)
- continue;
-
- if (bd->bind) {
- int ret;
-
- ret = thermal_bind_cdev_to_trip(thermal, trip, cdev,
- THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT);
- if (ret)
- return ret;
- } else {
- thermal_unbind_cdev_from_trip(thermal, trip, cdev);
- }
- }
-
- return 0;
-}
-static int acpi_thermal_bind_unbind_cdev(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev,
- bool bind)
-{
- struct acpi_thermal_bind_data bd = {
- .thermal = thermal, .cdev = cdev, .bind = bind
- };
-
- return for_each_thermal_trip(thermal, bind_unbind_cdev_cb, &bd);
-}
-
-static int
-acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
-{
- return acpi_thermal_bind_unbind_cdev(thermal, cdev, true);
-}
+ if (acpi_fetch_acpi_dev(handle) == cdev_adev)
+ return true;
+ }
-static int
-acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
-{
- return acpi_thermal_bind_unbind_cdev(thermal, cdev, false);
+ return false;
}
static const struct thermal_zone_device_ops acpi_thermal_zone_ops = {
- .bind = acpi_thermal_bind_cooling_device,
- .unbind = acpi_thermal_unbind_cooling_device,
+ .should_bind = acpi_thermal_should_bind_cdev,
.get_temp = thermal_get_temp,
.get_trend = thermal_get_trend,
.hot = acpi_thermal_zone_device_hot,
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index ae9384282273..6de542d99518 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -801,7 +801,8 @@ acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 func,
if (ret != AE_NOT_FOUND)
acpi_handle_warn(handle,
- "failed to evaluate _DSM %pUb (0x%x)\n", guid, ret);
+ "failed to evaluate _DSM %pUb rev:%lld func:%lld (0x%x)\n",
+ guid, rev, func, ret);
return NULL;
}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 674b9db7a1ef..b70e84e8049a 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -254,6 +254,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "PCG-FRV35"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ /* Panasonic Toughbook CF-18 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Matsushita Electric Industrial"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CF-18"),
+ },
+ },
/*
* Toshiba models with Transflective display, these need to use
@@ -550,6 +558,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ .callback = video_detect_force_native,
+ /* Apple MacBook Pro 9,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,2"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
/* Apple MacBook Pro 12,1 */
@@ -896,7 +912,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
/* Lenovo Yoga Tab 3 Pro YT3-X90F */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
},
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index ab2b5fa83e1f..6af546b21574 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -355,7 +355,6 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
/* Lenovo Yoga Tab 3 Pro X90F */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 34bc880ca20b..0230c43377c1 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -435,7 +435,7 @@ static const struct dev_pm_ops amba_pm = {
* DMA configuration for platform and AMBA bus is same. So here we reuse
* platform's DMA config routine.
*/
-struct bus_type amba_bustype = {
+const struct bus_type amba_bustype = {
.name = "amba",
.dev_groups = amba_dev_groups,
.match = amba_match,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index a05c17249448..45f63b09828a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1370,7 +1370,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
* V1.03 is known to be broken. V3.04 is known to
* work. Between, there are V1.06, V2.06 and V3.03
* that we don't have much idea about. For now,
- * blacklist anything older than V3.04.
+ * assume that anything older than V3.04 is broken.
*
* http://bugzilla.kernel.org/show_bug.cgi?id=15104
*/
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 70c3a33eee6f..2f16524c2526 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -437,7 +437,6 @@ static int brcm_ahci_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct brcm_ahci_priv *priv;
struct ahci_host_priv *hpriv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -451,8 +450,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
priv->version = (unsigned long)of_id->data;
priv->dev = dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl");
- priv->top_ctrl = devm_ioremap_resource(dev, res);
+ priv->top_ctrl = devm_platform_ioremap_resource_byname(pdev, "top-ctrl");
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index cb768f66f0a7..6f955e9105e8 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -19,6 +19,7 @@
#include <linux/libata.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/phy/phy.h>
#include <linux/thermal.h>
#include "ahci.h"
@@ -44,42 +45,10 @@ enum {
/* Clock Reset Register */
IMX_CLOCK_RESET = 0x7f3f,
IMX_CLOCK_RESET_RESET = 1 << 0,
- /* IMX8QM HSIO AHCI definitions */
- IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET = 0x03,
- IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET = 0x09,
- IMX8QM_SATA_PHY_IMPED_RATIO_85OHM = 0x6c,
- IMX8QM_LPCG_PHYX2_OFFSET = 0x00000,
- IMX8QM_CSR_PHYX2_OFFSET = 0x90000,
- IMX8QM_CSR_PHYX1_OFFSET = 0xa0000,
- IMX8QM_CSR_PHYX_STTS0_OFFSET = 0x4,
- IMX8QM_CSR_PCIEA_OFFSET = 0xb0000,
- IMX8QM_CSR_PCIEB_OFFSET = 0xc0000,
- IMX8QM_CSR_SATA_OFFSET = 0xd0000,
- IMX8QM_CSR_PCIE_CTRL2_OFFSET = 0x8,
- IMX8QM_CSR_MISC_OFFSET = 0xe0000,
-
- IMX8QM_LPCG_PHYX2_PCLK0_MASK = (0x3 << 16),
- IMX8QM_LPCG_PHYX2_PCLK1_MASK = (0x3 << 20),
- IMX8QM_PHY_APB_RSTN_0 = BIT(0),
- IMX8QM_PHY_MODE_SATA = BIT(19),
- IMX8QM_PHY_MODE_MASK = (0xf << 17),
- IMX8QM_PHY_PIPE_RSTN_0 = BIT(24),
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0 = BIT(25),
- IMX8QM_PHY_PIPE_RSTN_1 = BIT(26),
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1 = BIT(27),
- IMX8QM_STTS0_LANE0_TX_PLL_LOCK = BIT(4),
- IMX8QM_MISC_IOB_RXENA = BIT(0),
- IMX8QM_MISC_IOB_TXENA = BIT(1),
- IMX8QM_MISC_PHYX1_EPCS_SEL = BIT(12),
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 = BIT(24),
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 = BIT(25),
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 = BIT(28),
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0 = BIT(29),
- IMX8QM_SATA_CTRL_RESET_N = BIT(12),
- IMX8QM_SATA_CTRL_EPCS_PHYRESET_N = BIT(7),
- IMX8QM_CTRL_BUTTON_RST_N = BIT(21),
- IMX8QM_CTRL_POWER_UP_RST_N = BIT(23),
- IMX8QM_CTRL_LTSSM_ENABLE = BIT(4),
+ /* IMX8QM SATA specific control registers */
+ IMX8QM_SATA_AHCI_PTC = 0xc8,
+ IMX8QM_SATA_AHCI_PTC_RXWM_MASK = GENMASK(6, 0),
+ IMX8QM_SATA_AHCI_PTC_RXWM = 0x29,
};
enum ahci_imx_type {
@@ -95,14 +64,10 @@ struct imx_ahci_priv {
struct clk *sata_clk;
struct clk *sata_ref_clk;
struct clk *ahb_clk;
- struct clk *epcs_tx_clk;
- struct clk *epcs_rx_clk;
- struct clk *phy_apbclk;
- struct clk *phy_pclk0;
- struct clk *phy_pclk1;
- void __iomem *phy_base;
- struct gpio_desc *clkreq_gpiod;
struct regmap *gpr;
+ struct phy *sata_phy;
+ struct phy *cali_phy0;
+ struct phy *cali_phy1;
bool no_device;
bool first_time;
u32 phy_params;
@@ -450,201 +415,79 @@ ATTRIBUTE_GROUPS(fsl_sata_ahci);
static int imx8_sata_enable(struct ahci_host_priv *hpriv)
{
- u32 val, reg;
- int i, ret;
+ u32 val;
+ int ret;
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
struct device *dev = &imxpriv->ahci_pdev->dev;
- /* configure the hsio for sata */
- ret = clk_prepare_enable(imxpriv->phy_pclk0);
- if (ret < 0) {
- dev_err(dev, "can't enable phy_pclk0.\n");
+ /*
+ * Since "REXT" pin is only present for first lane of i.MX8QM
+ * PHY, its calibration results will be stored, passed through
+ * to the second lane PHY, and shared with all three lane PHYs.
+ *
+ * Initialize the first two lane PHYs here, although only the
+ * third lane PHY is used by SATA.
+ */
+ ret = phy_init(imxpriv->cali_phy0);
+ if (ret) {
+ dev_err(dev, "cali PHY init failed\n");
return ret;
}
- ret = clk_prepare_enable(imxpriv->phy_pclk1);
- if (ret < 0) {
- dev_err(dev, "can't enable phy_pclk1.\n");
- goto disable_phy_pclk0;
+ ret = phy_power_on(imxpriv->cali_phy0);
+ if (ret) {
+ dev_err(dev, "cali PHY power on failed\n");
+ goto err_cali_phy0_exit;
}
- ret = clk_prepare_enable(imxpriv->epcs_tx_clk);
- if (ret < 0) {
- dev_err(dev, "can't enable epcs_tx_clk.\n");
- goto disable_phy_pclk1;
+ ret = phy_init(imxpriv->cali_phy1);
+ if (ret) {
+ dev_err(dev, "cali PHY1 init failed\n");
+ goto err_cali_phy0_off;
}
- ret = clk_prepare_enable(imxpriv->epcs_rx_clk);
- if (ret < 0) {
- dev_err(dev, "can't enable epcs_rx_clk.\n");
- goto disable_epcs_tx_clk;
+ ret = phy_power_on(imxpriv->cali_phy1);
+ if (ret) {
+ dev_err(dev, "cali PHY1 power on failed\n");
+ goto err_cali_phy1_exit;
}
- ret = clk_prepare_enable(imxpriv->phy_apbclk);
- if (ret < 0) {
- dev_err(dev, "can't enable phy_apbclk.\n");
- goto disable_epcs_rx_clk;
+ ret = phy_init(imxpriv->sata_phy);
+ if (ret) {
+ dev_err(dev, "sata PHY init failed\n");
+ goto err_cali_phy1_off;
}
- /* Configure PHYx2 PIPE_RSTN */
- regmap_read(imxpriv->gpr, IMX8QM_CSR_PCIEA_OFFSET +
- IMX8QM_CSR_PCIE_CTRL2_OFFSET, &val);
- if ((val & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
- /* The link of the PCIEA of HSIO is down */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_PHYX2_OFFSET,
- IMX8QM_PHY_PIPE_RSTN_0 |
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0,
- IMX8QM_PHY_PIPE_RSTN_0 |
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0);
+ ret = phy_set_mode(imxpriv->sata_phy, PHY_MODE_SATA);
+ if (ret) {
+ dev_err(dev, "unable to set SATA PHY mode\n");
+ goto err_sata_phy_exit;
}
- regmap_read(imxpriv->gpr, IMX8QM_CSR_PCIEB_OFFSET +
- IMX8QM_CSR_PCIE_CTRL2_OFFSET, &reg);
- if ((reg & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
- /* The link of the PCIEB of HSIO is down */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_PHYX2_OFFSET,
- IMX8QM_PHY_PIPE_RSTN_1 |
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1,
- IMX8QM_PHY_PIPE_RSTN_1 |
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1);
+ ret = phy_power_on(imxpriv->sata_phy);
+ if (ret) {
+ dev_err(dev, "sata PHY power up failed\n");
+ goto err_sata_phy_exit;
}
- if (((reg | val) & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
- /* The links of both PCIA and PCIEB of HSIO are down */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_LPCG_PHYX2_OFFSET,
- IMX8QM_LPCG_PHYX2_PCLK0_MASK |
- IMX8QM_LPCG_PHYX2_PCLK1_MASK,
- 0);
- }
-
- /* set PWR_RST and BT_RST of csr_pciea */
- val = IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET;
- regmap_update_bits(imxpriv->gpr,
- val,
- IMX8QM_CTRL_BUTTON_RST_N,
- IMX8QM_CTRL_BUTTON_RST_N);
- regmap_update_bits(imxpriv->gpr,
- val,
- IMX8QM_CTRL_POWER_UP_RST_N,
- IMX8QM_CTRL_POWER_UP_RST_N);
-
- /* PHYX1_MODE to SATA */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_PHYX1_OFFSET,
- IMX8QM_PHY_MODE_MASK,
- IMX8QM_PHY_MODE_SATA);
- /*
- * BIT0 RXENA 1, BIT1 TXENA 0
- * BIT12 PHY_X1_EPCS_SEL 1.
- */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_MISC_OFFSET,
- IMX8QM_MISC_IOB_RXENA,
- IMX8QM_MISC_IOB_RXENA);
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_MISC_OFFSET,
- IMX8QM_MISC_IOB_TXENA,
- 0);
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_MISC_OFFSET,
- IMX8QM_MISC_PHYX1_EPCS_SEL,
- IMX8QM_MISC_PHYX1_EPCS_SEL);
- /*
- * It is possible, for PCIe and SATA are sharing
- * the same clock source, HPLL or external oscillator.
- * When PCIe is in low power modes (L1.X or L2 etc),
- * the clock source can be turned off. In this case,
- * if this clock source is required to be toggling by
- * SATA, then SATA functions will be abnormal.
- * Set the override here to avoid it.
- */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_MISC_OFFSET,
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 |
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 |
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 |
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0,
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 |
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 |
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 |
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0);
-
- /* clear PHY RST, then set it */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_EPCS_PHYRESET_N,
- 0);
-
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_EPCS_PHYRESET_N,
- IMX8QM_SATA_CTRL_EPCS_PHYRESET_N);
-
- /* CTRL RST: SET -> delay 1 us -> CLEAR -> SET */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_RESET_N,
- IMX8QM_SATA_CTRL_RESET_N);
- udelay(1);
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_RESET_N,
- 0);
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_RESET_N,
- IMX8QM_SATA_CTRL_RESET_N);
-
- /* APB reset */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_PHYX1_OFFSET,
- IMX8QM_PHY_APB_RSTN_0,
- IMX8QM_PHY_APB_RSTN_0);
-
- for (i = 0; i < 100; i++) {
- reg = IMX8QM_CSR_PHYX1_OFFSET +
- IMX8QM_CSR_PHYX_STTS0_OFFSET;
- regmap_read(imxpriv->gpr, reg, &val);
- val &= IMX8QM_STTS0_LANE0_TX_PLL_LOCK;
- if (val == IMX8QM_STTS0_LANE0_TX_PLL_LOCK)
- break;
- udelay(1);
- }
+ /* The cali_phy# can be turned off after SATA PHY is initialized. */
+ phy_power_off(imxpriv->cali_phy1);
+ phy_exit(imxpriv->cali_phy1);
+ phy_power_off(imxpriv->cali_phy0);
+ phy_exit(imxpriv->cali_phy0);
- if (val != IMX8QM_STTS0_LANE0_TX_PLL_LOCK) {
- dev_err(dev, "TX PLL of the PHY is not locked\n");
- ret = -ENODEV;
- } else {
- writeb(imxpriv->imped_ratio, imxpriv->phy_base +
- IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET);
- writeb(imxpriv->imped_ratio, imxpriv->phy_base +
- IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET);
- reg = readb(imxpriv->phy_base +
- IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET);
- if (unlikely(reg != imxpriv->imped_ratio))
- dev_info(dev, "Can't set PHY RX impedance ratio.\n");
- reg = readb(imxpriv->phy_base +
- IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET);
- if (unlikely(reg != imxpriv->imped_ratio))
- dev_info(dev, "Can't set PHY TX impedance ratio.\n");
- usleep_range(50, 100);
+ /* RxWaterMark setting */
+ val = readl(hpriv->mmio + IMX8QM_SATA_AHCI_PTC);
+ val &= ~IMX8QM_SATA_AHCI_PTC_RXWM_MASK;
+ val |= IMX8QM_SATA_AHCI_PTC_RXWM;
+ writel(val, hpriv->mmio + IMX8QM_SATA_AHCI_PTC);
- /*
- * To reduce the power consumption, gate off
- * the PHY clks
- */
- clk_disable_unprepare(imxpriv->phy_apbclk);
- clk_disable_unprepare(imxpriv->phy_pclk1);
- clk_disable_unprepare(imxpriv->phy_pclk0);
- return ret;
- }
+ return 0;
- clk_disable_unprepare(imxpriv->phy_apbclk);
-disable_epcs_rx_clk:
- clk_disable_unprepare(imxpriv->epcs_rx_clk);
-disable_epcs_tx_clk:
- clk_disable_unprepare(imxpriv->epcs_tx_clk);
-disable_phy_pclk1:
- clk_disable_unprepare(imxpriv->phy_pclk1);
-disable_phy_pclk0:
- clk_disable_unprepare(imxpriv->phy_pclk0);
+err_sata_phy_exit:
+ phy_exit(imxpriv->sata_phy);
+err_cali_phy1_off:
+ phy_power_off(imxpriv->cali_phy1);
+err_cali_phy1_exit:
+ phy_exit(imxpriv->cali_phy1);
+err_cali_phy0_off:
+ phy_power_off(imxpriv->cali_phy0);
+err_cali_phy0_exit:
+ phy_exit(imxpriv->cali_phy0);
return ret;
}
@@ -698,6 +541,9 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
}
} else if (imxpriv->type == AHCI_IMX8QM) {
ret = imx8_sata_enable(hpriv);
+ if (ret)
+ goto disable_clk;
+
}
usleep_range(1000, 2000);
@@ -736,8 +582,10 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv)
break;
case AHCI_IMX8QM:
- clk_disable_unprepare(imxpriv->epcs_rx_clk);
- clk_disable_unprepare(imxpriv->epcs_tx_clk);
+ if (imxpriv->sata_phy) {
+ phy_power_off(imxpriv->sata_phy);
+ phy_exit(imxpriv->sata_phy);
+ }
break;
default:
@@ -760,6 +608,9 @@ static void ahci_imx_error_handler(struct ata_port *ap)
ahci_error_handler(ap);
+ if (imxpriv->type == AHCI_IMX8QM)
+ return;
+
if (!(imxpriv->first_time) || ahci_imx_hotplug)
return;
@@ -986,65 +837,19 @@ static const struct scsi_host_template ahci_platform_sht = {
static int imx8_sata_probe(struct device *dev, struct imx_ahci_priv *imxpriv)
{
- struct resource *phy_res;
- struct platform_device *pdev = imxpriv->ahci_pdev;
- struct device_node *np = dev->of_node;
-
- if (of_property_read_u32(np, "fsl,phy-imp", &imxpriv->imped_ratio))
- imxpriv->imped_ratio = IMX8QM_SATA_PHY_IMPED_RATIO_85OHM;
- phy_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- if (phy_res) {
- imxpriv->phy_base = devm_ioremap(dev, phy_res->start,
- resource_size(phy_res));
- if (!imxpriv->phy_base) {
- dev_err(dev, "error with ioremap\n");
- return -ENOMEM;
- }
- } else {
- dev_err(dev, "missing *phy* reg region.\n");
- return -ENOMEM;
- }
- imxpriv->gpr =
- syscon_regmap_lookup_by_phandle(np, "hsio");
- if (IS_ERR(imxpriv->gpr)) {
- dev_err(dev, "unable to find gpr registers\n");
- return PTR_ERR(imxpriv->gpr);
- }
-
- imxpriv->epcs_tx_clk = devm_clk_get(dev, "epcs_tx");
- if (IS_ERR(imxpriv->epcs_tx_clk)) {
- dev_err(dev, "can't get epcs_tx_clk clock.\n");
- return PTR_ERR(imxpriv->epcs_tx_clk);
- }
- imxpriv->epcs_rx_clk = devm_clk_get(dev, "epcs_rx");
- if (IS_ERR(imxpriv->epcs_rx_clk)) {
- dev_err(dev, "can't get epcs_rx_clk clock.\n");
- return PTR_ERR(imxpriv->epcs_rx_clk);
- }
- imxpriv->phy_pclk0 = devm_clk_get(dev, "phy_pclk0");
- if (IS_ERR(imxpriv->phy_pclk0)) {
- dev_err(dev, "can't get phy_pclk0 clock.\n");
- return PTR_ERR(imxpriv->phy_pclk0);
- }
- imxpriv->phy_pclk1 = devm_clk_get(dev, "phy_pclk1");
- if (IS_ERR(imxpriv->phy_pclk1)) {
- dev_err(dev, "can't get phy_pclk1 clock.\n");
- return PTR_ERR(imxpriv->phy_pclk1);
- }
- imxpriv->phy_apbclk = devm_clk_get(dev, "phy_apbclk");
- if (IS_ERR(imxpriv->phy_apbclk)) {
- dev_err(dev, "can't get phy_apbclk clock.\n");
- return PTR_ERR(imxpriv->phy_apbclk);
- }
-
- /* Fetch GPIO, then enable the external OSC */
- imxpriv->clkreq_gpiod = devm_gpiod_get_optional(dev, "clkreq",
- GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
- if (IS_ERR(imxpriv->clkreq_gpiod))
- return PTR_ERR(imxpriv->clkreq_gpiod);
- if (imxpriv->clkreq_gpiod)
- gpiod_set_consumer_name(imxpriv->clkreq_gpiod, "SATA CLKREQ");
-
+ imxpriv->sata_phy = devm_phy_get(dev, "sata-phy");
+ if (IS_ERR(imxpriv->sata_phy))
+ return dev_err_probe(dev, PTR_ERR(imxpriv->sata_phy),
+ "Failed to get sata_phy\n");
+
+ imxpriv->cali_phy0 = devm_phy_get(dev, "cali-phy0");
+ if (IS_ERR(imxpriv->cali_phy0))
+ return dev_err_probe(dev, PTR_ERR(imxpriv->cali_phy0),
+ "Failed to get cali_phy0\n");
+ imxpriv->cali_phy1 = devm_phy_get(dev, "cali-phy1");
+ if (IS_ERR(imxpriv->cali_phy1))
+ return dev_err_probe(dev, PTR_ERR(imxpriv->cali_phy1),
+ "Failed to get cali_phy1\n");
return 0;
}
@@ -1077,12 +882,6 @@ static int imx_ahci_probe(struct platform_device *pdev)
return PTR_ERR(imxpriv->sata_ref_clk);
}
- imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
- if (IS_ERR(imxpriv->ahb_clk)) {
- dev_err(dev, "can't get ahb clock.\n");
- return PTR_ERR(imxpriv->ahb_clk);
- }
-
if (imxpriv->type == AHCI_IMX6Q || imxpriv->type == AHCI_IMX6QP) {
u32 reg_value;
@@ -1142,11 +941,8 @@ static int imx_ahci_probe(struct platform_device *pdev)
goto disable_clk;
/*
- * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
- * and IP vendor specific register IMX_TIMER1MS.
- * Configure CAP_SSS (support stagered spin up).
- * Implement the port0.
- * Get the ahb clock rate, and configure the TIMER1MS register.
+ * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL.
+ * Set CAP_SSS (support stagered spin up) and Implement the port0.
*/
reg_val = readl(hpriv->mmio + HOST_CAP);
if (!(reg_val & HOST_CAP_SSS)) {
@@ -1159,8 +955,20 @@ static int imx_ahci_probe(struct platform_device *pdev)
writel(reg_val, hpriv->mmio + HOST_PORTS_IMPL);
}
- reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
- writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
+ if (imxpriv->type != AHCI_IMX8QM) {
+ /*
+ * Get AHB clock rate and configure the vendor specified
+ * TIMER1MS register on i.MX53, i.MX6Q and i.MX6QP only.
+ */
+ imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(imxpriv->ahb_clk)) {
+ dev_err(dev, "Failed to get ahb clock\n");
+ ret = PTR_ERR(imxpriv->ahb_clk);
+ goto disable_sata;
+ }
+ reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
+ writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
+ }
ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
&ahci_platform_sht);
@@ -1229,6 +1037,6 @@ static struct platform_driver imx_ahci_driver = {
module_platform_driver(imx_ahci_driver);
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
-MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
+MODULE_AUTHOR("Richard Zhu <hongxing.zhu@nxp.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ec3c5bd1f813..093b940bc953 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1446,7 +1446,6 @@ static int piix_init_sidpr(struct ata_host *host)
if (hpriv->map[i] == IDE)
return 0;
- /* is it blacklisted? */
if (piix_no_sidpr(host))
return 0;
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 581704e61f28..7a8064520a35 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -410,7 +410,6 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv,
struct device *dev)
{
- struct device_node *child;
u32 port;
if (!of_property_read_u32(dev->of_node, "hba-cap", &hpriv->saved_cap))
@@ -419,14 +418,12 @@ static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv,
of_property_read_u32(dev->of_node,
"ports-implemented", &hpriv->saved_port_map);
- for_each_child_of_node(dev->of_node, child) {
+ for_each_child_of_node_scoped(dev->of_node, child) {
if (!of_device_is_available(child))
continue;
- if (of_property_read_u32(child, "reg", &port)) {
- of_node_put(child);
+ if (of_property_read_u32(child, "reg", &port))
return -EINVAL;
- }
if (!of_property_read_u32(child, "hba-port-cap", &hpriv->saved_port_cap[port]))
hpriv->saved_port_cap[port] &= PORT_CMD_CAP;
@@ -460,7 +457,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
int child_nodes, rc = -ENOMEM, enabled_ports = 0;
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
- struct device_node *child;
u32 mask_port_map = 0;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
@@ -579,7 +575,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
}
if (child_nodes) {
- for_each_child_of_node(dev->of_node, child) {
+ for_each_child_of_node_scoped(dev->of_node, child) {
u32 port;
struct platform_device *port_dev __maybe_unused;
@@ -588,7 +584,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
if (of_property_read_u32(child, "reg", &port)) {
rc = -EINVAL;
- of_node_put(child);
goto err_out;
}
@@ -606,18 +601,14 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
if (port_dev) {
rc = ahci_platform_get_regulator(hpriv, port,
&port_dev->dev);
- if (rc == -EPROBE_DEFER) {
- of_node_put(child);
+ if (rc == -EPROBE_DEFER)
goto err_out;
- }
}
#endif
rc = ahci_platform_get_phy(hpriv, port, dev, child);
- if (rc) {
- of_node_put(child);
+ if (rc)
goto err_out;
- }
enabled_ports++;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 30932552437a..cdb20a700b55 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -72,19 +72,11 @@ const struct ata_port_operations ata_base_port_ops = {
.end_eh = ata_std_end_eh,
};
-const struct ata_port_operations sata_port_ops = {
- .inherits = &ata_base_port_ops,
-
- .qc_defer = ata_std_qc_defer,
- .hardreset = sata_std_hardreset,
-};
-EXPORT_SYMBOL_GPL(sata_port_ops);
-
static unsigned int ata_dev_init_params(struct ata_device *dev,
u16 heads, u16 sectors);
static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
static void ata_dev_xfermask(struct ata_device *dev);
-static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
+static unsigned int ata_dev_quirks(const struct ata_device *dev);
static DEFINE_IDA(ata_ida);
@@ -94,8 +86,8 @@ struct ata_force_param {
u8 cbl;
u8 spd_limit;
unsigned int xfer_mask;
- unsigned int horkage_on;
- unsigned int horkage_off;
+ unsigned int quirk_on;
+ unsigned int quirk_off;
u16 lflags_on;
u16 lflags_off;
};
@@ -160,18 +152,13 @@ MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-static inline bool ata_dev_print_info(struct ata_device *dev)
+static inline bool ata_dev_print_info(const struct ata_device *dev)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
return ehc->i.flags & ATA_EHI_PRINTINFO;
}
-static bool ata_sstatus_online(u32 sstatus)
-{
- return (sstatus & 0xf) == 0x3;
-}
-
/**
* ata_link_next - link iteration helper
* @link: the previous link, NULL to start
@@ -457,17 +444,17 @@ static void ata_force_xfermask(struct ata_device *dev)
}
/**
- * ata_force_horkage - force horkage according to libata.force
+ * ata_force_quirks - force quirks according to libata.force
* @dev: ATA device of interest
*
- * Force horkage according to libata.force and whine about it.
+ * Force quirks according to libata.force and whine about it.
* For consistency with link selection, device number 15 selects
* the first device connected to the host link.
*
* LOCKING:
* EH context.
*/
-static void ata_force_horkage(struct ata_device *dev)
+static void ata_force_quirks(struct ata_device *dev)
{
int devno = dev->link->pmp + dev->devno;
int alt_devno = devno;
@@ -487,21 +474,21 @@ static void ata_force_horkage(struct ata_device *dev)
fe->device != alt_devno)
continue;
- if (!(~dev->horkage & fe->param.horkage_on) &&
- !(dev->horkage & fe->param.horkage_off))
+ if (!(~dev->quirks & fe->param.quirk_on) &&
+ !(dev->quirks & fe->param.quirk_off))
continue;
- dev->horkage |= fe->param.horkage_on;
- dev->horkage &= ~fe->param.horkage_off;
+ dev->quirks |= fe->param.quirk_on;
+ dev->quirks &= ~fe->param.quirk_off;
- ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
+ ata_dev_notice(dev, "FORCE: modified (%s)\n",
fe->param.name);
}
}
#else
static inline void ata_force_link_limits(struct ata_link *link) { }
static inline void ata_force_xfermask(struct ata_device *dev) { }
-static inline void ata_force_horkage(struct ata_device *dev) { }
+static inline void ata_force_quirks(struct ata_device *dev) { }
#endif
/**
@@ -1221,7 +1208,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
*max_sectors = ata_tf_to_lba48(&tf) + 1;
else
*max_sectors = ata_tf_to_lba(&tf) + 1;
- if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
+ if (dev->quirks & ATA_QUIRK_HPA_SIZE)
(*max_sectors)--;
return 0;
}
@@ -1306,7 +1293,7 @@ static int ata_hpa_resize(struct ata_device *dev)
/* do we need to do it? */
if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
!ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
- (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
+ (dev->quirks & ATA_QUIRK_BROKEN_HPA))
return 0;
/* read native max address */
@@ -1318,7 +1305,7 @@ static int ata_hpa_resize(struct ata_device *dev)
if (rc == -EACCES || !unlock_hpa) {
ata_dev_warn(dev,
"HPA support seems broken, skipping HPA handling\n");
- dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+ dev->quirks |= ATA_QUIRK_BROKEN_HPA;
/* we can continue if device aborted the command */
if (rc == -EACCES)
@@ -1355,7 +1342,7 @@ static int ata_hpa_resize(struct ata_device *dev)
"device aborted resize (%llu -> %llu), skipping HPA handling\n",
(unsigned long long)sectors,
(unsigned long long)native_sectors);
- dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+ dev->quirks |= ATA_QUIRK_BROKEN_HPA;
return 0;
} else if (rc)
return rc;
@@ -1835,7 +1822,7 @@ retry:
goto err_out;
}
- if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
+ if (dev->quirks & ATA_QUIRK_DUMP_ID) {
ata_dev_info(dev, "dumping IDENTIFY data, "
"class=%d may_fallback=%d tried_spinup=%d\n",
class, may_fallback, tried_spinup);
@@ -2104,7 +2091,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
retry:
ata_tf_init(dev, &tf);
if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
- !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
+ !(dev->quirks & ATA_QUIRK_NO_DMA_LOG)) {
tf.command = ATA_CMD_READ_LOG_DMA_EXT;
tf.protocol = ATA_PROT_DMA;
dma = true;
@@ -2124,7 +2111,7 @@ retry:
if (err_mask) {
if (dma) {
- dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
+ dev->quirks |= ATA_QUIRK_NO_DMA_LOG;
if (!ata_port_is_frozen(dev->link->ap))
goto retry;
}
@@ -2138,22 +2125,19 @@ retry:
static int ata_log_supported(struct ata_device *dev, u8 log)
{
- struct ata_port *ap = dev->link->ap;
-
- if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
+ if (dev->quirks & ATA_QUIRK_NO_LOG_DIR)
return 0;
- if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
+ if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->sector_buf, 1))
return 0;
- return get_unaligned_le16(&ap->sector_buf[log * 2]);
+ return get_unaligned_le16(&dev->sector_buf[log * 2]);
}
static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err, i;
- if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
+ if (dev->quirks & ATA_QUIRK_NO_ID_DEV_LOG)
return false;
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
@@ -2165,7 +2149,7 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
if (ata_id_major_version(dev->id) >= 10)
ata_dev_warn(dev,
"ATA Identify Device Log not supported\n");
- dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
+ dev->quirks |= ATA_QUIRK_NO_ID_DEV_LOG;
return false;
}
@@ -2173,20 +2157,20 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
* Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
* supported.
*/
- err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
- 1);
+ err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0,
+ dev->sector_buf, 1);
if (err)
return false;
- for (i = 0; i < ap->sector_buf[8]; i++) {
- if (ap->sector_buf[9 + i] == page)
+ for (i = 0; i < dev->sector_buf[8]; i++) {
+ if (dev->sector_buf[9 + i] == page)
return true;
}
return false;
}
-static int ata_do_link_spd_horkage(struct ata_device *dev)
+static int ata_do_link_spd_quirk(struct ata_device *dev)
{
struct ata_link *plink = ata_dev_phys_link(dev);
u32 target, target_limit;
@@ -2194,7 +2178,7 @@ static int ata_do_link_spd_horkage(struct ata_device *dev)
if (!sata_scr_valid(plink))
return 0;
- if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
+ if (dev->quirks & ATA_QUIRK_1_5_GBPS)
target = 1;
else
return 0;
@@ -2212,26 +2196,25 @@ static int ata_do_link_spd_horkage(struct ata_device *dev)
* guaranteed by setting sata_spd_limit to target_limit above.
*/
if (plink->sata_spd > target) {
- ata_dev_info(dev, "applying link speed limit horkage to %s\n",
+ ata_dev_info(dev, "applying link speed limit quirk to %s\n",
sata_spd_string(target));
return -EAGAIN;
}
return 0;
}
-static inline u8 ata_dev_knobble(struct ata_device *dev)
+static inline bool ata_dev_knobble(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
- if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
- return 0;
+ if (ata_dev_quirks(dev) & ATA_QUIRK_BRIDGE_OK)
+ return false;
return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
}
static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
@@ -2239,14 +2222,14 @@ static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
return;
}
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
- 0, ap->sector_buf, 1);
+ 0, dev->sector_buf, 1);
if (!err_mask) {
u8 *cmds = dev->ncq_send_recv_cmds;
dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
- memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
+ memcpy(cmds, dev->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
- if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
+ if (dev->quirks & ATA_QUIRK_NO_NCQ_TRIM) {
ata_dev_dbg(dev, "disabling queued TRIM support\n");
cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
@@ -2256,7 +2239,6 @@ static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
static void ata_dev_config_ncq_non_data(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
@@ -2265,17 +2247,14 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev)
return;
}
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
- 0, ap->sector_buf, 1);
- if (!err_mask) {
- u8 *cmds = dev->ncq_non_data_cmds;
-
- memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
- }
+ 0, dev->sector_buf, 1);
+ if (!err_mask)
+ memcpy(dev->ncq_non_data_cmds, dev->sector_buf,
+ ATA_LOG_NCQ_NON_DATA_SIZE);
}
static void ata_dev_config_ncq_prio(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
@@ -2284,12 +2263,11 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev)
err_mask = ata_read_log_page(dev,
ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_SATA_SETTINGS,
- ap->sector_buf,
- 1);
+ dev->sector_buf, 1);
if (err_mask)
goto not_supported;
- if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
+ if (!(dev->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
goto not_supported;
dev->flags |= ATA_DFLAG_NCQ_PRIO;
@@ -2334,12 +2312,12 @@ static int ata_dev_config_ncq(struct ata_device *dev,
}
if (!IS_ENABLED(CONFIG_SATA_HOST))
return 0;
- if (dev->horkage & ATA_HORKAGE_NONCQ) {
+ if (dev->quirks & ATA_QUIRK_NONCQ) {
snprintf(desc, desc_sz, "NCQ (not used)");
return 0;
}
- if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
+ if (dev->quirks & ATA_QUIRK_NO_NCQ_ON_ATI &&
ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
snprintf(desc, desc_sz, "NCQ (not used)");
return 0;
@@ -2350,7 +2328,7 @@ static int ata_dev_config_ncq(struct ata_device *dev,
dev->flags |= ATA_DFLAG_NCQ;
}
- if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
+ if (!(dev->quirks & ATA_QUIRK_BROKEN_FPDMA_AA) &&
(ap->flags & ATA_FLAG_FPDMA_AA) &&
ata_id_has_fpdma_aa(dev->id)) {
err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
@@ -2360,7 +2338,7 @@ static int ata_dev_config_ncq(struct ata_device *dev,
"failed to enable AA (error_mask=0x%x)\n",
err_mask);
if (err_mask != AC_ERR_DEV) {
- dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
+ dev->quirks |= ATA_QUIRK_BROKEN_FPDMA_AA;
return -EIO;
}
} else
@@ -2405,9 +2383,8 @@ static void ata_dev_config_sense_reporting(struct ata_device *dev)
static void ata_dev_config_zac(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
- u8 *identify_buf = ap->sector_buf;
+ u8 *identify_buf = dev->sector_buf;
dev->zac_zones_optimal_open = U32_MAX;
dev->zac_zones_optimal_nonseq = U32_MAX;
@@ -2459,7 +2436,6 @@ static void ata_dev_config_zac(struct ata_device *dev)
static void ata_dev_config_trusted(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
u64 trusted_cap;
unsigned int err;
@@ -2473,11 +2449,11 @@ static void ata_dev_config_trusted(struct ata_device *dev)
}
err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
- ap->sector_buf, 1);
+ dev->sector_buf, 1);
if (err)
return;
- trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
+ trusted_cap = get_unaligned_le64(&dev->sector_buf[40]);
if (!(trusted_cap & (1ULL << 63))) {
ata_dev_dbg(dev,
"Trusted Computing capability qword not valid!\n");
@@ -2488,12 +2464,41 @@ static void ata_dev_config_trusted(struct ata_device *dev)
dev->flags |= ATA_DFLAG_TRUSTED;
}
+void ata_dev_cleanup_cdl_resources(struct ata_device *dev)
+{
+ kfree(dev->cdl);
+ dev->cdl = NULL;
+}
+
+static int ata_dev_init_cdl_resources(struct ata_device *dev)
+{
+ struct ata_cdl *cdl = dev->cdl;
+ unsigned int err_mask;
+
+ if (!cdl) {
+ cdl = kzalloc(sizeof(*cdl), GFP_KERNEL);
+ if (!cdl)
+ return -ENOMEM;
+ dev->cdl = cdl;
+ }
+
+ err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, cdl->desc_log_buf,
+ ATA_LOG_CDL_SIZE / ATA_SECT_SIZE);
+ if (err_mask) {
+ ata_dev_warn(dev, "Read Command Duration Limits log failed\n");
+ ata_dev_cleanup_cdl_resources(dev);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static void ata_dev_config_cdl(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
bool cdl_enabled;
u64 val;
+ int ret;
if (ata_id_major_version(dev->id) < 11)
goto not_supported;
@@ -2505,12 +2510,12 @@ static void ata_dev_config_cdl(struct ata_device *dev)
err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_SUPPORTED_CAPABILITIES,
- ap->sector_buf, 1);
+ dev->sector_buf, 1);
if (err_mask)
goto not_supported;
/* Check Command Duration Limit Supported bits */
- val = get_unaligned_le64(&ap->sector_buf[168]);
+ val = get_unaligned_le64(&dev->sector_buf[168]);
if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(0)))
goto not_supported;
@@ -2523,7 +2528,7 @@ static void ata_dev_config_cdl(struct ata_device *dev)
* We must have support for the sense data for successful NCQ commands
* log indicated by the successful NCQ command sense data supported bit.
*/
- val = get_unaligned_le64(&ap->sector_buf[8]);
+ val = get_unaligned_le64(&dev->sector_buf[8]);
if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(47))) {
ata_dev_warn(dev,
"CDL supported but Successful NCQ Command Sense Data is not supported\n");
@@ -2543,11 +2548,11 @@ static void ata_dev_config_cdl(struct ata_device *dev)
*/
err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_CURRENT_SETTINGS,
- ap->sector_buf, 1);
+ dev->sector_buf, 1);
if (err_mask)
goto not_supported;
- val = get_unaligned_le64(&ap->sector_buf[8]);
+ val = get_unaligned_le64(&dev->sector_buf[8]);
cdl_enabled = val & BIT_ULL(63) && val & BIT_ULL(21);
if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
if (!cdl_enabled) {
@@ -2588,37 +2593,20 @@ static void ata_dev_config_cdl(struct ata_device *dev)
}
}
- /*
- * Allocate a buffer to handle reading the sense data for successful
- * NCQ Commands log page for commands using a CDL with one of the limit
- * policy set to 0xD (successful completion with sense data available
- * bit set).
- */
- if (!ap->ncq_sense_buf) {
- ap->ncq_sense_buf = kmalloc(ATA_LOG_SENSE_NCQ_SIZE, GFP_KERNEL);
- if (!ap->ncq_sense_buf)
- goto not_supported;
- }
-
- /*
- * Command duration limits is supported: cache the CDL log page 18h
- * (command duration descriptors).
- */
- err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, ap->sector_buf, 1);
- if (err_mask) {
- ata_dev_warn(dev, "Read Command Duration Limits log failed\n");
+ /* CDL is supported: allocate and initialize needed resources. */
+ ret = ata_dev_init_cdl_resources(dev);
+ if (ret) {
+ ata_dev_warn(dev, "Initialize CDL resources failed\n");
goto not_supported;
}
- memcpy(dev->cdl, ap->sector_buf, ATA_LOG_CDL_SIZE);
dev->flags |= ATA_DFLAG_CDL;
return;
not_supported:
dev->flags &= ~(ATA_DFLAG_CDL | ATA_DFLAG_CDL_ENABLED);
- kfree(ap->ncq_sense_buf);
- ap->ncq_sense_buf = NULL;
+ ata_dev_cleanup_cdl_resources(dev);
}
static int ata_dev_config_lba(struct ata_device *dev)
@@ -2689,7 +2677,7 @@ static void ata_dev_config_fua(struct ata_device *dev)
goto nofua;
/* Ignore known bad devices and devices that lack NCQ support */
- if (!ata_ncq_supported(dev) || (dev->horkage & ATA_HORKAGE_NO_FUA))
+ if (!ata_ncq_supported(dev) || (dev->quirks & ATA_QUIRK_NO_FUA))
goto nofua;
dev->flags |= ATA_DFLAG_FUA;
@@ -2702,7 +2690,7 @@ nofua:
static void ata_dev_config_devslp(struct ata_device *dev)
{
- u8 *sata_setting = dev->link->ap->sector_buf;
+ u8 *sata_setting = dev->sector_buf;
unsigned int err_mask;
int i, j;
@@ -2829,11 +2817,11 @@ int ata_dev_configure(struct ata_device *dev)
return 0;
}
- /* set horkage */
- dev->horkage |= ata_dev_blacklisted(dev);
- ata_force_horkage(dev);
+ /* Set quirks */
+ dev->quirks |= ata_dev_quirks(dev);
+ ata_force_quirks(dev);
- if (dev->horkage & ATA_HORKAGE_DISABLE) {
+ if (dev->quirks & ATA_QUIRK_DISABLE) {
ata_dev_info(dev, "unsupported device, disabling\n");
ata_dev_disable(dev);
return 0;
@@ -2848,19 +2836,19 @@ int ata_dev_configure(struct ata_device *dev)
return 0;
}
- rc = ata_do_link_spd_horkage(dev);
+ rc = ata_do_link_spd_quirk(dev);
if (rc)
return rc;
/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
- if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
+ if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) &&
(id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
- dev->horkage |= ATA_HORKAGE_NOLPM;
+ dev->quirks |= ATA_QUIRK_NOLPM;
if (ap->flags & ATA_FLAG_NO_LPM)
- dev->horkage |= ATA_HORKAGE_NOLPM;
+ dev->quirks |= ATA_QUIRK_NOLPM;
- if (dev->horkage & ATA_HORKAGE_NOLPM) {
+ if (dev->quirks & ATA_QUIRK_NOLPM) {
ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
}
@@ -3006,7 +2994,8 @@ int ata_dev_configure(struct ata_device *dev)
cdb_intr_string = ", CDB intr";
}
- if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
+ if (atapi_dmadir || (dev->quirks & ATA_QUIRK_ATAPI_DMADIR) ||
+ atapi_id_dmadir(dev->id)) {
dev->flags |= ATA_DFLAG_DMADIR;
dma_dir_string = ", DMADIR";
}
@@ -3043,24 +3032,24 @@ int ata_dev_configure(struct ata_device *dev)
if ((dev->class == ATA_DEV_ATAPI) &&
(atapi_command_packet_set(id) == TYPE_TAPE)) {
dev->max_sectors = ATA_MAX_SECTORS_TAPE;
- dev->horkage |= ATA_HORKAGE_STUCK_ERR;
+ dev->quirks |= ATA_QUIRK_STUCK_ERR;
}
- if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_128)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
dev->max_sectors);
- if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_1024)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
dev->max_sectors);
- if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_LBA48)
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
if (ap->ops->dev_config)
ap->ops->dev_config(dev);
- if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
+ if (dev->quirks & ATA_QUIRK_DIAGNOSTIC) {
/* Let the user know. We don't want to disallow opens for
rescue purposes, or in case the vendor is just a blithering
idiot. Do this after the dev_config call as some controllers
@@ -3075,7 +3064,7 @@ int ata_dev_configure(struct ata_device *dev)
}
}
- if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
+ if ((dev->quirks & ATA_QUIRK_FIRMWARE_WARN) && print_info) {
ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
}
@@ -3199,86 +3188,6 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
}
EXPORT_SYMBOL_GPL(ata_dev_pair);
-/**
- * sata_down_spd_limit - adjust SATA spd limit downward
- * @link: Link to adjust SATA spd limit for
- * @spd_limit: Additional limit
- *
- * Adjust SATA spd limit of @link downward. Note that this
- * function only adjusts the limit. The change must be applied
- * using sata_set_spd().
- *
- * If @spd_limit is non-zero, the speed is limited to equal to or
- * lower than @spd_limit if such speed is supported. If
- * @spd_limit is slower than any supported speed, only the lowest
- * supported speed is allowed.
- *
- * LOCKING:
- * Inherited from caller.
- *
- * RETURNS:
- * 0 on success, negative errno on failure
- */
-int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
-{
- u32 sstatus, spd, mask;
- int rc, bit;
-
- if (!sata_scr_valid(link))
- return -EOPNOTSUPP;
-
- /* If SCR can be read, use it to determine the current SPD.
- * If not, use cached value in link->sata_spd.
- */
- rc = sata_scr_read(link, SCR_STATUS, &sstatus);
- if (rc == 0 && ata_sstatus_online(sstatus))
- spd = (sstatus >> 4) & 0xf;
- else
- spd = link->sata_spd;
-
- mask = link->sata_spd_limit;
- if (mask <= 1)
- return -EINVAL;
-
- /* unconditionally mask off the highest bit */
- bit = fls(mask) - 1;
- mask &= ~(1 << bit);
-
- /*
- * Mask off all speeds higher than or equal to the current one. At
- * this point, if current SPD is not available and we previously
- * recorded the link speed from SStatus, the driver has already
- * masked off the highest bit so mask should already be 1 or 0.
- * Otherwise, we should not force 1.5Gbps on a link where we have
- * not previously recorded speed from SStatus. Just return in this
- * case.
- */
- if (spd > 1)
- mask &= (1 << (spd - 1)) - 1;
- else if (link->sata_spd)
- return -EINVAL;
-
- /* were we already at the bottom? */
- if (!mask)
- return -EINVAL;
-
- if (spd_limit) {
- if (mask & ((1 << spd_limit) - 1))
- mask &= (1 << spd_limit) - 1;
- else {
- bit = ffs(mask) - 1;
- mask = 1 << bit;
- }
- }
-
- link->sata_spd_limit = mask;
-
- ata_link_warn(link, "limiting SATA link speed to %s\n",
- sata_spd_string(fls(mask)));
-
- return 0;
-}
-
#ifdef CONFIG_ATA_ACPI
/**
* ata_timing_cycle2mode - find xfer mode for the specified cycle duration
@@ -3425,7 +3334,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
struct ata_eh_context *ehc = &dev->link->eh_context;
- const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
+ const bool nosetxfer = dev->quirks & ATA_QUIRK_NOSETXFER;
const char *dev_err_whine = "";
int ign_dev_err = 0;
unsigned int err_mask = 0;
@@ -3761,33 +3670,6 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)
EXPORT_SYMBOL_GPL(ata_std_prereset);
/**
- * sata_std_hardreset - COMRESET w/o waiting or classification
- * @link: link to reset
- * @class: resulting class of attached device
- * @deadline: deadline jiffies for the operation
- *
- * Standard SATA COMRESET w/o waiting or classification.
- *
- * LOCKING:
- * Kernel thread context (may sleep)
- *
- * RETURNS:
- * 0 if link offline, -EAGAIN if link online, -errno on errors.
- */
-int sata_std_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
-{
- const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
- bool online;
- int rc;
-
- /* do hardreset */
- rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
- return online ? -EAGAIN : rc;
-}
-EXPORT_SYMBOL_GPL(sata_std_hardreset);
-
-/**
* ata_std_postreset - standard postreset callback
* @link: the target ata_link
* @classes: classes of attached devices
@@ -3878,7 +3760,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
{
unsigned int class = dev->class;
- u16 *id = (void *)dev->link->ap->sector_buf;
+ u16 *id = (void *)dev->sector_buf;
int rc;
/* read ID data */
@@ -3969,7 +3851,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
*/
if (dev->n_native_sectors == n_native_sectors &&
dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
- !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
+ !(dev->quirks & ATA_QUIRK_BROKEN_HPA)) {
ata_dev_warn(dev,
"old n_sectors matches native, probably "
"late HPA lock, will try to unlock HPA\n");
@@ -3987,223 +3869,292 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
return rc;
}
-struct ata_blacklist_entry {
+static const char * const ata_quirk_names[] = {
+ [__ATA_QUIRK_DIAGNOSTIC] = "diagnostic",
+ [__ATA_QUIRK_NODMA] = "nodma",
+ [__ATA_QUIRK_NONCQ] = "noncq",
+ [__ATA_QUIRK_MAX_SEC_128] = "maxsec128",
+ [__ATA_QUIRK_BROKEN_HPA] = "brokenhpa",
+ [__ATA_QUIRK_DISABLE] = "disable",
+ [__ATA_QUIRK_HPA_SIZE] = "hpasize",
+ [__ATA_QUIRK_IVB] = "ivb",
+ [__ATA_QUIRK_STUCK_ERR] = "stuckerr",
+ [__ATA_QUIRK_BRIDGE_OK] = "bridgeok",
+ [__ATA_QUIRK_ATAPI_MOD16_DMA] = "atapimod16dma",
+ [__ATA_QUIRK_FIRMWARE_WARN] = "firmwarewarn",
+ [__ATA_QUIRK_1_5_GBPS] = "1.5gbps",
+ [__ATA_QUIRK_NOSETXFER] = "nosetxfer",
+ [__ATA_QUIRK_BROKEN_FPDMA_AA] = "brokenfpdmaaa",
+ [__ATA_QUIRK_DUMP_ID] = "dumpid",
+ [__ATA_QUIRK_MAX_SEC_LBA48] = "maxseclba48",
+ [__ATA_QUIRK_ATAPI_DMADIR] = "atapidmadir",
+ [__ATA_QUIRK_NO_NCQ_TRIM] = "noncqtrim",
+ [__ATA_QUIRK_NOLPM] = "nolpm",
+ [__ATA_QUIRK_WD_BROKEN_LPM] = "wdbrokenlpm",
+ [__ATA_QUIRK_ZERO_AFTER_TRIM] = "zeroaftertrim",
+ [__ATA_QUIRK_NO_DMA_LOG] = "nodmalog",
+ [__ATA_QUIRK_NOTRIM] = "notrim",
+ [__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024",
+ [__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m",
+ [__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati",
+ [__ATA_QUIRK_NO_ID_DEV_LOG] = "noiddevlog",
+ [__ATA_QUIRK_NO_LOG_DIR] = "nologdir",
+ [__ATA_QUIRK_NO_FUA] = "nofua",
+};
+
+static void ata_dev_print_quirks(const struct ata_device *dev,
+ const char *model, const char *rev,
+ unsigned int quirks)
+{
+ struct ata_eh_context *ehc = &dev->link->eh_context;
+ int n = 0, i;
+ size_t sz;
+ char *str;
+
+ if (!ata_dev_print_info(dev) || ehc->i.flags & ATA_EHI_DID_PRINT_QUIRKS)
+ return;
+
+ ehc->i.flags |= ATA_EHI_DID_PRINT_QUIRKS;
+
+ if (!quirks)
+ return;
+
+ sz = 64 + ARRAY_SIZE(ata_quirk_names) * 16;
+ str = kmalloc(sz, GFP_KERNEL);
+ if (!str)
+ return;
+
+ n = snprintf(str, sz, "Model '%s', rev '%s', applying quirks:",
+ model, rev);
+
+ for (i = 0; i < ARRAY_SIZE(ata_quirk_names); i++) {
+ if (quirks & (1U << i))
+ n += snprintf(str + n, sz - n,
+ " %s", ata_quirk_names[i]);
+ }
+
+ ata_dev_warn(dev, "%s\n", str);
+
+ kfree(str);
+}
+
+struct ata_dev_quirks_entry {
const char *model_num;
const char *model_rev;
- unsigned long horkage;
+ unsigned int quirks;
};
-static const struct ata_blacklist_entry ata_device_blacklist [] = {
+static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
/* Devices with DMA related problems under Linux */
- { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
- { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
- { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
- { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
- { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
- { "CRD-84", NULL, ATA_HORKAGE_NODMA },
- { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
- { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
- { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
- { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
- { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
- { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
- { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
- { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
- { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
- { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
- { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
- { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
- { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
- { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
- { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
- { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
- { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
- { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
- { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
+ { "WDC AC11000H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC22100H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC32500H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC33100H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC31600H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC32100H", "24.09P07", ATA_QUIRK_NODMA },
+ { "WDC AC23200L", "21.10N21", ATA_QUIRK_NODMA },
+ { "Compaq CRD-8241B", NULL, ATA_QUIRK_NODMA },
+ { "CRD-8400B", NULL, ATA_QUIRK_NODMA },
+ { "CRD-848[02]B", NULL, ATA_QUIRK_NODMA },
+ { "CRD-84", NULL, ATA_QUIRK_NODMA },
+ { "SanDisk SDP3B", NULL, ATA_QUIRK_NODMA },
+ { "SanDisk SDP3B-64", NULL, ATA_QUIRK_NODMA },
+ { "SANYO CD-ROM CRD", NULL, ATA_QUIRK_NODMA },
+ { "HITACHI CDR-8", NULL, ATA_QUIRK_NODMA },
+ { "HITACHI CDR-8[34]35", NULL, ATA_QUIRK_NODMA },
+ { "Toshiba CD-ROM XM-6202B", NULL, ATA_QUIRK_NODMA },
+ { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_QUIRK_NODMA },
+ { "CD-532E-A", NULL, ATA_QUIRK_NODMA },
+ { "E-IDE CD-ROM CR-840", NULL, ATA_QUIRK_NODMA },
+ { "CD-ROM Drive/F5A", NULL, ATA_QUIRK_NODMA },
+ { "WPI CDD-820", NULL, ATA_QUIRK_NODMA },
+ { "SAMSUNG CD-ROM SC-148C", NULL, ATA_QUIRK_NODMA },
+ { "SAMSUNG CD-ROM SC", NULL, ATA_QUIRK_NODMA },
+ { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL, ATA_QUIRK_NODMA },
+ { "_NEC DV5800A", NULL, ATA_QUIRK_NODMA },
+ { "SAMSUNG CD-ROM SN-124", "N001", ATA_QUIRK_NODMA },
+ { "Seagate STT20000A", NULL, ATA_QUIRK_NODMA },
+ { " 2GB ATA Flash Disk", "ADMA428M", ATA_QUIRK_NODMA },
+ { "VRFDFC22048UCHC-TE*", NULL, ATA_QUIRK_NODMA },
/* Odd clown on sil3726/4726 PMPs */
- { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
+ { "Config Disk", NULL, ATA_QUIRK_DISABLE },
/* Similar story with ASMedia 1092 */
- { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
+ { "ASMT109x- Config", NULL, ATA_QUIRK_DISABLE },
/* Weird ATAPI devices */
- { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
- { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
- { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
- { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_QUIRK_MAX_SEC_128 },
+ { "QUANTUM DAT DAT72-000", NULL, ATA_QUIRK_ATAPI_MOD16_DMA },
+ { "Slimtype DVD A DS8A8SH", NULL, ATA_QUIRK_MAX_SEC_LBA48 },
+ { "Slimtype DVD A DS8A9SH", NULL, ATA_QUIRK_MAX_SEC_LBA48 },
/*
* Causes silent data corruption with higher max sects.
* http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
*/
- { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+ { "ST380013AS", "3.20", ATA_QUIRK_MAX_SEC_1024 },
/*
* These devices time out with higher max sects.
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
*/
- { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
- { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+ { "LITEON CX1-JB*-HP", NULL, ATA_QUIRK_MAX_SEC_1024 },
+ { "LITEON EP1-*", NULL, ATA_QUIRK_MAX_SEC_1024 },
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
/* NCQ is slow */
- { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
- { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ },
+ { "WDC WD740ADFD-00", NULL, ATA_QUIRK_NONCQ },
+ { "WDC WD740ADFD-00NLR1", NULL, ATA_QUIRK_NONCQ },
/* http://thread.gmane.org/gmane.linux.ide/14907 */
- { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
+ { "FUJITSU MHT2060BH", NULL, ATA_QUIRK_NONCQ },
/* NCQ is broken */
- { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
- { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
- { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
- { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
- { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
+ { "Maxtor *", "BANC*", ATA_QUIRK_NONCQ },
+ { "Maxtor 7V300F0", "VA111630", ATA_QUIRK_NONCQ },
+ { "ST380817AS", "3.42", ATA_QUIRK_NONCQ },
+ { "ST3160023AS", "3.42", ATA_QUIRK_NONCQ },
+ { "OCZ CORE_SSD", "02.10104", ATA_QUIRK_NONCQ },
/* Seagate NCQ + FLUSH CACHE firmware bug */
- { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31500341AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
+ ATA_QUIRK_FIRMWARE_WARN },
- { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31000333AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
+ ATA_QUIRK_FIRMWARE_WARN },
- { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640[36]23AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
+ ATA_QUIRK_FIRMWARE_WARN },
- { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320[68]13AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
+ ATA_QUIRK_FIRMWARE_WARN },
/* drives which fail FPDMA_AA activation (some may freeze afterwards)
the ST disks also have LPM issues */
- { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
- ATA_HORKAGE_NOLPM },
- { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ { "ST1000LM024 HN-M101MBB", NULL, ATA_QUIRK_BROKEN_FPDMA_AA |
+ ATA_QUIRK_NOLPM },
+ { "VB0250EAVER", "HPG7", ATA_QUIRK_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
- { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ },
- { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ },
- { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ },
+ { "HTS541060G9SA00", "MB3OC60D", ATA_QUIRK_NONCQ },
+ { "HTS541080G9SA00", "MB4OC60D", ATA_QUIRK_NONCQ },
+ { "HTS541010G9SA00", "MBZOC60D", ATA_QUIRK_NONCQ },
/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
- { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ },
+ { "C300-CTFDDAC128MAG", "0001", ATA_QUIRK_NONCQ },
/* Sandisk SD7/8/9s lock up hard on large trims */
- { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M },
+ { "SanDisk SD[789]*", NULL, ATA_QUIRK_MAX_TRIM_128M },
/* devices which puke on READ_NATIVE_MAX */
- { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA },
- { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
- { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
- { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
+ { "HDS724040KLSA80", "KFAOA20N", ATA_QUIRK_BROKEN_HPA },
+ { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_QUIRK_BROKEN_HPA },
+ { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_QUIRK_BROKEN_HPA },
+ { "MAXTOR 6L080L4", "A93.0500", ATA_QUIRK_BROKEN_HPA },
/* this one allows HPA unlocking but fails IOs on the area */
- { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
+ { "OCZ-VERTEX", "1.30", ATA_QUIRK_BROKEN_HPA },
/* Devices which report 1 sector over size HPA */
- { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE },
- { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE },
- { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE },
+ { "ST340823A", NULL, ATA_QUIRK_HPA_SIZE },
+ { "ST320413A", NULL, ATA_QUIRK_HPA_SIZE },
+ { "ST310211A", NULL, ATA_QUIRK_HPA_SIZE },
/* Devices which get the IVB wrong */
- { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
- /* Maybe we should just blacklist TSSTcorp... */
- { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB },
+ { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_QUIRK_IVB },
+ /* Maybe we should just add all TSSTcorp devices... */
+ { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_QUIRK_IVB },
/* Devices that do not need bridging limits applied */
- { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK },
- { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK },
+ { "MTRON MSP-SATA*", NULL, ATA_QUIRK_BRIDGE_OK },
+ { "BUFFALO HD-QSU2/R5", NULL, ATA_QUIRK_BRIDGE_OK },
/* Devices which aren't very happy with higher link speeds */
- { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS },
- { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS },
+ { "WD My Book", NULL, ATA_QUIRK_1_5_GBPS },
+ { "Seagate FreeAgent GoFlex", NULL, ATA_QUIRK_1_5_GBPS },
/*
* Devices which choke on SETXFER. Applies only if both the
* device and controller are SATA.
*/
- { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVRTD08", NULL, ATA_QUIRK_NOSETXFER },
+ { "PIONEER DVD-RW DVRTD08A", NULL, ATA_QUIRK_NOSETXFER },
+ { "PIONEER DVD-RW DVR-215", NULL, ATA_QUIRK_NOSETXFER },
+ { "PIONEER DVD-RW DVR-212D", NULL, ATA_QUIRK_NOSETXFER },
+ { "PIONEER DVD-RW DVR-216D", NULL, ATA_QUIRK_NOSETXFER },
/* These specific Pioneer models have LPM issues */
- { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
- { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
+ { "PIONEER BD-RW BDR-207M", NULL, ATA_QUIRK_NOLPM },
+ { "PIONEER BD-RW BDR-205", NULL, ATA_QUIRK_NOLPM },
/* Crucial devices with broken LPM support */
- { "CT*0BX*00SSD1", NULL, ATA_HORKAGE_NOLPM },
+ { "CT*0BX*00SSD1", NULL, ATA_QUIRK_NOLPM },
/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
- { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM },
+ { "Crucial_CT512MX100*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NOLPM },
/* 512GB MX100 with newer firmware has only LPM issues */
- { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM },
+ { "Crucial_CT512MX100*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NOLPM },
/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
- { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM },
- { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM },
+ { "Crucial_CT480M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NOLPM },
+ { "Crucial_CT960M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NOLPM },
/* AMD Radeon devices with broken LPM support */
- { "R3SL240G", NULL, ATA_HORKAGE_NOLPM },
+ { "R3SL240G", NULL, ATA_QUIRK_NOLPM },
/* Apacer models with LPM issues */
- { "Apacer AS340*", NULL, ATA_HORKAGE_NOLPM },
+ { "Apacer AS340*", NULL, ATA_QUIRK_NOLPM },
/* These specific Samsung models/firmware-revs do not handle LPM well */
- { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
- { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
- { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM },
- { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
+ { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_QUIRK_NOLPM },
+ { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_QUIRK_NOLPM },
+ { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_QUIRK_NOLPM },
+ { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_QUIRK_NOLPM },
/* devices that don't properly handle queued TRIM commands */
- { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Micron_1100_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_NO_DMA_LOG |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NO_NCQ_ON_ATI },
- { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NO_NCQ_ON_ATI },
- { "SAMSUNG*MZ7LH*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NO_NCQ_ON_ATI, },
- { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "Micron_M500IT_*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Micron_M500_*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Micron_M5[15]0_*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Micron_1100_*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM, },
+ { "Crucial_CT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Crucial_CT*M550*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Crucial_CT*MX100*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung SSD 840 EVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_NO_DMA_LOG |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung SSD 840*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung SSD 850*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NO_NCQ_ON_ATI },
+ { "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NO_NCQ_ON_ATI },
+ { "SAMSUNG*MZ7LH*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NO_NCQ_ON_ATI, },
+ { "FCCT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
/* devices that don't properly handle TRIM commands */
- { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM },
- { "M88V29*", NULL, ATA_HORKAGE_NOTRIM },
+ { "SuperSSpeed S238*", NULL, ATA_QUIRK_NOTRIM },
+ { "M88V29*", NULL, ATA_QUIRK_NOTRIM },
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4223,14 +4174,14 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "INTEL*SSDSC2MH*", NULL, 0 },
- { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "Micron*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Crucial*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "INTEL*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "SSD*INTEL*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "SAMSUNG*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "SAMSUNG*MZ7KM*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "ST[1248][0248]0[FH]*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
/*
* Some WD SATA-I drives spin up and down erratically when the link
@@ -4241,62 +4192,66 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*
* https://bugzilla.kernel.org/show_bug.cgi?id=57211
*/
- { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD800JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD1200JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD1600JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD2000JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD2500JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD3000JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD3200JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
/*
* This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
* log page is accessed. Ensure we never ask for this log page with
* these devices.
*/
- { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
+ { "SATADOM-ML 3ME", NULL, ATA_QUIRK_NO_LOG_DIR },
/* Buggy FUA */
- { "Maxtor", "BANC1G10", ATA_HORKAGE_NO_FUA },
- { "WDC*WD2500J*", NULL, ATA_HORKAGE_NO_FUA },
- { "OCZ-VERTEX*", NULL, ATA_HORKAGE_NO_FUA },
- { "INTEL*SSDSC2CT*", NULL, ATA_HORKAGE_NO_FUA },
+ { "Maxtor", "BANC1G10", ATA_QUIRK_NO_FUA },
+ { "WDC*WD2500J*", NULL, ATA_QUIRK_NO_FUA },
+ { "OCZ-VERTEX*", NULL, ATA_QUIRK_NO_FUA },
+ { "INTEL*SSDSC2CT*", NULL, ATA_QUIRK_NO_FUA },
/* End Marker */
{ }
};
-static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
+static unsigned int ata_dev_quirks(const struct ata_device *dev)
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
- const struct ata_blacklist_entry *ad = ata_device_blacklist;
+ const struct ata_dev_quirks_entry *ad = __ata_dev_quirks;
+
+ /* dev->quirks is an unsigned int. */
+ BUILD_BUG_ON(__ATA_QUIRK_MAX > 32);
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
while (ad->model_num) {
- if (glob_match(ad->model_num, model_num)) {
- if (ad->model_rev == NULL)
- return ad->horkage;
- if (glob_match(ad->model_rev, model_rev))
- return ad->horkage;
+ if (glob_match(ad->model_num, model_num) &&
+ (!ad->model_rev || glob_match(ad->model_rev, model_rev))) {
+ ata_dev_print_quirks(dev, model_num, model_rev,
+ ad->quirks);
+ return ad->quirks;
}
ad++;
}
return 0;
}
-static int ata_dma_blacklisted(const struct ata_device *dev)
+static bool ata_dev_nodma(const struct ata_device *dev)
{
- /* We don't support polling DMA.
- * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
- * if the LLDD handles only interrupts in the HSM_ST_LAST state.
+ /*
+ * We do not support polling DMA. Deny DMA for those ATAPI devices
+ * with CDB-intr (and use PIO) if the LLDD handles only interrupts in
+ * the HSM_ST_LAST state.
*/
if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
(dev->flags & ATA_DFLAG_CDB_INTR))
- return 1;
- return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
+ return true;
+ return dev->quirks & ATA_QUIRK_NODMA;
}
/**
@@ -4309,7 +4264,7 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
static int ata_is_40wire(struct ata_device *dev)
{
- if (dev->horkage & ATA_HORKAGE_IVB)
+ if (dev->quirks & ATA_QUIRK_IVB)
return ata_drive_40wire_relaxed(dev->id);
return ata_drive_40wire(dev->id);
}
@@ -4371,8 +4326,7 @@ static int cable_is_40wire(struct ata_port *ap)
*
* Compute supported xfermask of @dev and store it in
* dev->*_mask. This function is responsible for applying all
- * known limits including host controller limits, device
- * blacklist, etc...
+ * known limits including host controller limits, device quirks, etc...
*
* LOCKING:
* None.
@@ -4404,10 +4358,10 @@ static void ata_dev_xfermask(struct ata_device *dev)
xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
}
- if (ata_dma_blacklisted(dev)) {
+ if (ata_dev_nodma(dev)) {
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
ata_dev_warn(dev,
- "device is on DMA blacklist, disabling DMA\n");
+ "device does not support DMA, disabling DMA\n");
}
if ((host->flags & ATA_HOST_SIMPLEX) &&
@@ -4588,7 +4542,7 @@ int atapi_check_dma(struct ata_queued_cmd *qc)
/* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
* few ATAPI devices choke on such DMA requests.
*/
- if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
+ if (!(qc->dev->quirks & ATA_QUIRK_ATAPI_MOD16_DMA) &&
unlikely(qc->nbytes & 15))
return 1;
@@ -4629,12 +4583,6 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
}
EXPORT_SYMBOL_GPL(ata_std_qc_defer);
-enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
-{
- return AC_ERR_OK;
-}
-EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
-
/**
* ata_sg_init - Associate command with scatter-gather table.
* @qc: Command to be associated
@@ -4762,8 +4710,9 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_port *ap;
struct ata_link *link;
- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
- WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
+ if (WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)))
+ return;
+
ap = qc->ap;
link = qc->dev->link;
@@ -4785,9 +4734,10 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
ap->excl_link == link))
ap->excl_link = NULL;
- /* atapi: mark qc as inactive to prevent the interrupt handler
- * from completing the command twice later, before the error handler
- * is called. (when rc != 0 and atapi request sense is needed)
+ /*
+ * Mark qc as inactive to prevent the port interrupt handler from
+ * completing the command twice later, before the error handler is
+ * called.
*/
qc->flags &= ~ATA_QCFLAG_ACTIVE;
ap->qc_active &= ~(1ULL << qc->tag);
@@ -5021,10 +4971,13 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
return;
}
- trace_ata_qc_prep(qc);
- qc->err_mask |= ap->ops->qc_prep(qc);
- if (unlikely(qc->err_mask))
- goto err;
+ if (ap->ops->qc_prep) {
+ trace_ata_qc_prep(qc);
+ qc->err_mask |= ap->ops->qc_prep(qc);
+ if (unlikely(qc->err_mask))
+ goto err;
+ }
+
trace_ata_qc_issue(qc);
qc->err_mask |= ap->ops->qc_issue(qc);
if (unlikely(qc->err_mask))
@@ -5368,7 +5321,7 @@ void ata_dev_init(struct ata_device *dev)
*/
spin_lock_irqsave(ap->lock, flags);
dev->flags &= ~ATA_DFLAG_INIT_MASK;
- dev->horkage = 0;
+ dev->quirks = 0;
spin_unlock_irqrestore(ap->lock, flags);
memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
@@ -5510,7 +5463,6 @@ void ata_port_free(struct ata_port *ap)
kfree(ap->pmp_link);
kfree(ap->slave_link);
- kfree(ap->ncq_sense_buf);
ida_free(&ata_ida, ap->print_id);
kfree(ap);
}
@@ -6038,6 +5990,23 @@ int ata_host_activate(struct ata_host *host, int irq,
EXPORT_SYMBOL_GPL(ata_host_activate);
/**
+ * ata_dev_free_resources - Free a device resources
+ * @dev: Target ATA device
+ *
+ * Free resources allocated to support a device features.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_dev_free_resources(struct ata_device *dev)
+{
+ if (zpodd_dev_enabled(dev))
+ zpodd_exit(dev);
+
+ ata_dev_cleanup_cdl_resources(dev);
+}
+
+/**
* ata_port_detach - Detach ATA port in preparation of device removal
* @ap: ATA port to be detached
*
@@ -6091,19 +6060,15 @@ static void ata_port_detach(struct ata_port *ap)
cancel_delayed_work_sync(&ap->hotplug_task);
cancel_delayed_work_sync(&ap->scsi_rescan_task);
- /* clean up zpodd on port removal */
- ata_for_each_link(link, ap, HOST_FIRST) {
- ata_for_each_dev(dev, link, ALL) {
- if (zpodd_dev_enabled(dev))
- zpodd_exit(dev);
- }
- }
+ /* Delete port multiplier link transport devices */
if (ap->pmp_link) {
int i;
+
for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
ata_tlink_delete(&ap->pmp_link[i]);
}
- /* remove the associated SCSI host */
+
+ /* Remove the associated SCSI host */
scsi_remove_host(ap->scsi_host);
ata_tport_delete(ap);
}
@@ -6299,12 +6264,12 @@ EXPORT_SYMBOL_GPL(ata_platform_remove_one);
{ "no" #name, .lflags_on = (flags) }, \
{ #name, .lflags_off = (flags) }
-#define force_horkage_on(name, flag) \
- { #name, .horkage_on = (flag) }
+#define force_quirk_on(name, flag) \
+ { #name, .quirk_on = (flag) }
-#define force_horkage_onoff(name, flag) \
- { "no" #name, .horkage_on = (flag) }, \
- { #name, .horkage_off = (flag) }
+#define force_quirk_onoff(name, flag) \
+ { "no" #name, .quirk_on = (flag) }, \
+ { #name, .quirk_off = (flag) }
static const struct ata_force_param force_tbl[] __initconst = {
force_cbl(40c, ATA_CBL_PATA40),
@@ -6358,32 +6323,32 @@ static const struct ata_force_param force_tbl[] __initconst = {
force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
- force_horkage_onoff(ncq, ATA_HORKAGE_NONCQ),
- force_horkage_onoff(ncqtrim, ATA_HORKAGE_NO_NCQ_TRIM),
- force_horkage_onoff(ncqati, ATA_HORKAGE_NO_NCQ_ON_ATI),
+ force_quirk_onoff(ncq, ATA_QUIRK_NONCQ),
+ force_quirk_onoff(ncqtrim, ATA_QUIRK_NO_NCQ_TRIM),
+ force_quirk_onoff(ncqati, ATA_QUIRK_NO_NCQ_ON_ATI),
- force_horkage_onoff(trim, ATA_HORKAGE_NOTRIM),
- force_horkage_on(trim_zero, ATA_HORKAGE_ZERO_AFTER_TRIM),
- force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M),
+ force_quirk_onoff(trim, ATA_QUIRK_NOTRIM),
+ force_quirk_on(trim_zero, ATA_QUIRK_ZERO_AFTER_TRIM),
+ force_quirk_on(max_trim_128m, ATA_QUIRK_MAX_TRIM_128M),
- force_horkage_onoff(dma, ATA_HORKAGE_NODMA),
- force_horkage_on(atapi_dmadir, ATA_HORKAGE_ATAPI_DMADIR),
- force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA),
+ force_quirk_onoff(dma, ATA_QUIRK_NODMA),
+ force_quirk_on(atapi_dmadir, ATA_QUIRK_ATAPI_DMADIR),
+ force_quirk_on(atapi_mod16_dma, ATA_QUIRK_ATAPI_MOD16_DMA),
- force_horkage_onoff(dmalog, ATA_HORKAGE_NO_DMA_LOG),
- force_horkage_onoff(iddevlog, ATA_HORKAGE_NO_ID_DEV_LOG),
- force_horkage_onoff(logdir, ATA_HORKAGE_NO_LOG_DIR),
+ force_quirk_onoff(dmalog, ATA_QUIRK_NO_DMA_LOG),
+ force_quirk_onoff(iddevlog, ATA_QUIRK_NO_ID_DEV_LOG),
+ force_quirk_onoff(logdir, ATA_QUIRK_NO_LOG_DIR),
- force_horkage_on(max_sec_128, ATA_HORKAGE_MAX_SEC_128),
- force_horkage_on(max_sec_1024, ATA_HORKAGE_MAX_SEC_1024),
- force_horkage_on(max_sec_lba48, ATA_HORKAGE_MAX_SEC_LBA48),
+ force_quirk_on(max_sec_128, ATA_QUIRK_MAX_SEC_128),
+ force_quirk_on(max_sec_1024, ATA_QUIRK_MAX_SEC_1024),
+ force_quirk_on(max_sec_lba48, ATA_QUIRK_MAX_SEC_LBA48),
- force_horkage_onoff(lpm, ATA_HORKAGE_NOLPM),
- force_horkage_onoff(setxfer, ATA_HORKAGE_NOSETXFER),
- force_horkage_on(dump_id, ATA_HORKAGE_DUMP_ID),
- force_horkage_onoff(fua, ATA_HORKAGE_NO_FUA),
+ force_quirk_onoff(lpm, ATA_QUIRK_NOLPM),
+ force_quirk_onoff(setxfer, ATA_QUIRK_NOSETXFER),
+ force_quirk_on(dump_id, ATA_QUIRK_DUMP_ID),
+ force_quirk_onoff(fua, ATA_QUIRK_NO_FUA),
- force_horkage_on(disable, ATA_HORKAGE_DISABLE),
+ force_quirk_on(disable, ATA_QUIRK_DISABLE),
};
static int __init ata_parse_force_one(char **cur,
@@ -6659,7 +6624,6 @@ static void ata_dummy_error_handler(struct ata_port *ap)
}
struct ata_port_operations ata_dummy_port_ops = {
- .qc_prep = ata_noop_qc_prep,
.qc_issue = ata_dummy_qc_issue,
.error_handler = ata_dummy_error_handler,
.sched_eh = ata_std_sched_eh,
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 214b935c2ced..3f0144e7dc80 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -500,10 +500,13 @@ static void ata_eh_dev_disable(struct ata_device *dev)
ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
dev->class++;
- /* From now till the next successful probe, ering is used to
+ /*
+ * From now till the next successful probe, ering is used to
* track probe failures. Clear accumulated device error info.
*/
ata_ering_clear(&dev->ering);
+
+ ata_dev_free_resources(dev);
}
static void ata_eh_unload(struct ata_port *ap)
@@ -630,6 +633,14 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
struct ata_queued_cmd *qc;
+ /*
+ * If the scmd was added to EH, via ata_qc_schedule_eh() ->
+ * scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will
+ * have set DID_TIME_OUT (since libata does not have an abort
+ * handler). Thus, to clear DID_TIME_OUT, clear the host byte.
+ */
+ set_host_byte(scmd, DID_OK);
+
ata_qc_for_each_raw(ap, qc, i) {
if (qc->flags & ATA_QCFLAG_ACTIVE &&
qc->scsicmd == scmd)
@@ -1402,6 +1413,43 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
}
/**
+ * ata_eh_decide_disposition - Disposition a qc based on sense data
+ * @qc: qc to examine
+ *
+ * For a regular SCSI command, the SCSI completion callback (scsi_done())
+ * will call scsi_complete(), which will call scsi_decide_disposition(),
+ * which will call scsi_check_sense(). scsi_complete() finally calls
+ * scsi_finish_command(). This is fine for SCSI, since any eventual sense
+ * data is usually returned in the completion itself (without invoking SCSI
+ * EH). However, for a QC, we always need to fetch the sense data
+ * explicitly using SCSI EH.
+ *
+ * A command that is completed via SCSI EH will instead be completed using
+ * scsi_eh_flush_done_q(), which will call scsi_finish_command() directly
+ * (without ever calling scsi_check_sense()).
+ *
+ * For a command that went through SCSI EH, it is the responsibility of the
+ * SCSI EH strategy handler to call scsi_decide_disposition(), see e.g. how
+ * scsi_eh_get_sense() calls scsi_decide_disposition() for SCSI LLDDs that
+ * do not get the sense data as part of the completion.
+ *
+ * Thus, for QC commands that went via SCSI EH, we need to call
+ * scsi_check_sense() ourselves, similar to how scsi_eh_get_sense() calls
+ * scsi_decide_disposition(), which calls scsi_check_sense(), in order to
+ * set the correct SCSI ML byte (if any).
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
+ */
+enum scsi_disposition ata_eh_decide_disposition(struct ata_queued_cmd *qc)
+{
+ return scsi_check_sense(qc->scsicmd);
+}
+
+/**
* ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
* @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
*
@@ -1627,7 +1675,8 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc)
}
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
- enum scsi_disposition ret = scsi_check_sense(qc->scsicmd);
+ enum scsi_disposition ret = ata_eh_decide_disposition(qc);
+
/*
* SUCCESS here means that the sense code could be
* evaluated and should be passed to the upper layers
@@ -1924,7 +1973,7 @@ static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
return qc->flags & ATA_QCFLAG_QUIET;
}
-static int ata_eh_read_sense_success_non_ncq(struct ata_link *link)
+static int ata_eh_get_non_ncq_success_sense(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_queued_cmd *qc;
@@ -1942,11 +1991,10 @@ static int ata_eh_read_sense_success_non_ncq(struct ata_link *link)
return -EIO;
/*
- * If we have sense data, call scsi_check_sense() in order to set the
- * correct SCSI ML byte (if any). No point in checking the return value,
- * since the command has already completed successfully.
+ * No point in checking the return value, since the command has already
+ * completed successfully.
*/
- scsi_check_sense(qc->scsicmd);
+ ata_eh_decide_disposition(qc);
return 0;
}
@@ -1976,9 +2024,9 @@ static void ata_eh_get_success_sense(struct ata_link *link)
* request sense ext command to retrieve the sense data.
*/
if (link->sactive)
- ret = ata_eh_read_sense_success_ncq_log(link);
+ ret = ata_eh_get_ncq_success_sense(link);
else
- ret = ata_eh_read_sense_success_non_ncq(link);
+ ret = ata_eh_get_non_ncq_success_sense(link);
if (ret)
goto out;
@@ -3247,7 +3295,7 @@ static int atapi_eh_clear_ua(struct ata_device *dev)
int i;
for (i = 0; i < ATA_EH_UA_TRIES; i++) {
- u8 *sense_buffer = dev->link->ap->sector_buf;
+ u8 *sense_buffer = dev->sector_buf;
u8 sense_key = 0;
unsigned int err_mask;
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index e2e9cbd405fa..d5d189328ae6 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -648,8 +648,7 @@ static int sata_pmp_same_pmp(struct ata_device *dev, const u32 *new_gscr)
static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
{
struct ata_link *link = dev->link;
- struct ata_port *ap = link->ap;
- u32 *gscr = (void *)ap->sector_buf;
+ u32 *gscr = (void *)dev->sector_buf;
int rc;
ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE);
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 48660d445602..c8b119a06bb2 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -518,6 +518,86 @@ int sata_set_spd(struct ata_link *link)
EXPORT_SYMBOL_GPL(sata_set_spd);
/**
+ * sata_down_spd_limit - adjust SATA spd limit downward
+ * @link: Link to adjust SATA spd limit for
+ * @spd_limit: Additional limit
+ *
+ * Adjust SATA spd limit of @link downward. Note that this
+ * function only adjusts the limit. The change must be applied
+ * using sata_set_spd().
+ *
+ * If @spd_limit is non-zero, the speed is limited to equal to or
+ * lower than @spd_limit if such speed is supported. If
+ * @spd_limit is slower than any supported speed, only the lowest
+ * supported speed is allowed.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * 0 on success, negative errno on failure
+ */
+int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
+{
+ u32 sstatus, spd, mask;
+ int rc, bit;
+
+ if (!sata_scr_valid(link))
+ return -EOPNOTSUPP;
+
+ /* If SCR can be read, use it to determine the current SPD.
+ * If not, use cached value in link->sata_spd.
+ */
+ rc = sata_scr_read(link, SCR_STATUS, &sstatus);
+ if (rc == 0 && ata_sstatus_online(sstatus))
+ spd = (sstatus >> 4) & 0xf;
+ else
+ spd = link->sata_spd;
+
+ mask = link->sata_spd_limit;
+ if (mask <= 1)
+ return -EINVAL;
+
+ /* unconditionally mask off the highest bit */
+ bit = fls(mask) - 1;
+ mask &= ~(1 << bit);
+
+ /*
+ * Mask off all speeds higher than or equal to the current one. At
+ * this point, if current SPD is not available and we previously
+ * recorded the link speed from SStatus, the driver has already
+ * masked off the highest bit so mask should already be 1 or 0.
+ * Otherwise, we should not force 1.5Gbps on a link where we have
+ * not previously recorded speed from SStatus. Just return in this
+ * case.
+ */
+ if (spd > 1)
+ mask &= (1 << (spd - 1)) - 1;
+ else if (link->sata_spd)
+ return -EINVAL;
+
+ /* were we already at the bottom? */
+ if (!mask)
+ return -EINVAL;
+
+ if (spd_limit) {
+ if (mask & ((1 << spd_limit) - 1))
+ mask &= (1 << spd_limit) - 1;
+ else {
+ bit = ffs(mask) - 1;
+ mask = 1 << bit;
+ }
+ }
+
+ link->sata_spd_limit = mask;
+
+ ata_link_warn(link, "limiting SATA link speed to %s\n",
+ sata_spd_string(fls(mask)));
+
+ return 0;
+}
+
+/**
* sata_link_hardreset - reset link via SATA phy reset
* @link: link to reset
* @timing: timing parameters { interval, duration, timeout } in msec
@@ -627,6 +707,34 @@ int sata_link_hardreset(struct ata_link *link, const unsigned int *timing,
EXPORT_SYMBOL_GPL(sata_link_hardreset);
/**
+ * sata_std_hardreset - COMRESET w/o waiting or classification
+ * @link: link to reset
+ * @class: resulting class of attached device
+ * @deadline: deadline jiffies for the operation
+ *
+ * Standard SATA COMRESET w/o waiting or classification.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 if link offline, -EAGAIN if link online, -errno on errors.
+ */
+int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
+ bool online;
+ int rc;
+
+ rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
+ if (online)
+ return -EAGAIN;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sata_std_hardreset);
+
+/**
* ata_qc_complete_multiple - Complete multiple qcs successfully
* @ap: port in question
* @qc_active: new qc_active mask
@@ -818,7 +926,7 @@ static ssize_t ata_scsi_lpm_store(struct device *device,
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, &ap->link, ENABLED) {
- if (dev->horkage & ATA_HORKAGE_NOLPM) {
+ if (dev->quirks & ATA_QUIRK_NOLPM) {
count = -EOPNOTSUPP;
goto out_unlock;
}
@@ -1340,7 +1448,7 @@ EXPORT_SYMBOL_GPL(sata_async_notification);
static int ata_eh_read_log_10h(struct ata_device *dev,
int *tag, struct ata_taskfile *tf)
{
- u8 *buf = dev->link->ap->sector_buf;
+ u8 *buf = dev->sector_buf;
unsigned int err_mask;
u8 csum;
int i;
@@ -1379,8 +1487,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
}
/**
- * ata_eh_read_sense_success_ncq_log - Read the sense data for successful
- * NCQ commands log
+ * ata_eh_get_ncq_success_sense - Read and process the sense data for
+ * successful NCQ commands log page
* @link: ATA link to get sense data for
*
* Read the sense data for successful NCQ commands log page to obtain
@@ -1393,11 +1501,11 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
* RETURNS:
* 0 on success, -errno otherwise.
*/
-int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
+int ata_eh_get_ncq_success_sense(struct ata_link *link)
{
struct ata_device *dev = link->device;
struct ata_port *ap = dev->link->ap;
- u8 *buf = ap->ncq_sense_buf;
+ u8 *buf = dev->cdl->ncq_sense_log_buf;
struct ata_queued_cmd *qc;
unsigned int err_mask, tag;
u8 *sense, sk = 0, asc = 0, ascq = 0;
@@ -1455,17 +1563,14 @@ int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
qc->flags |= ATA_QCFLAG_SENSE_VALID;
/*
- * If we have sense data, call scsi_check_sense() in order to
- * set the correct SCSI ML byte (if any). No point in checking
- * the return value, since the command has already completed
- * successfully.
+ * No point in checking the return value, since the command has
+ * already completed successfully.
*/
- scsi_check_sense(qc->scsicmd);
+ ata_eh_decide_disposition(qc);
}
return ret;
}
-EXPORT_SYMBOL_GPL(ata_eh_read_sense_success_ncq_log);
/**
* ata_eh_analyze_ncq_error - analyze NCQ error
@@ -1576,3 +1681,11 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
ehc->i.err_mask &= ~AC_ERR_DEV;
}
EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
+
+const struct ata_port_operations sata_port_ops = {
+ .inherits = &ata_base_port_ops,
+
+ .qc_defer = ata_std_qc_defer,
+ .hardreset = sata_std_hardreset,
+};
+EXPORT_SYMBOL_GPL(sata_port_ops);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 473e00a58a8b..3328a6febc13 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1691,9 +1691,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
} else if (is_error && !have_sense) {
ata_gen_ata_sense(qc);
- } else {
- /* Keep the SCSI ML and status byte, clear host byte. */
- cmd->result &= 0x0000ffff;
}
ata_qc_done(qc);
@@ -2094,7 +2091,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
if (ata_id_has_trim(args->id)) {
u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
- if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M)
+ if (dev->quirks & ATA_QUIRK_MAX_TRIM_128M)
max_blocks = 128 << (20 - SECTOR_SHIFT);
put_unaligned_be64(max_blocks, &rbuf[36]);
@@ -2259,7 +2256,7 @@ static inline u16 ata_xlat_cdl_limit(u8 *buf)
static unsigned int ata_msense_control_spgt2(struct ata_device *dev, u8 *buf,
u8 spg)
{
- u8 *b, *cdl = dev->cdl, *desc;
+ u8 *b, *cdl = dev->cdl->desc_log_buf, *desc;
u32 policy;
int i;
@@ -2572,11 +2569,11 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[15] = lowest_aligned;
if (ata_id_has_trim(args->id) &&
- !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
+ !(dev->quirks & ATA_QUIRK_NOTRIM)) {
rbuf[14] |= 0x80; /* LBPME */
if (ata_id_has_zero_after_trim(args->id) &&
- dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) {
+ dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) {
ata_dev_info(dev, "Enabling discard_zeroes_data\n");
rbuf[14] |= 0x40; /* LBPRZ */
}
@@ -3240,8 +3237,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
}
scsi_16_lba_len(cdb, &block, &n_block);
- if (!unmap ||
- (dev->horkage & ATA_HORKAGE_NOTRIM) ||
+ if (!unmap || (dev->quirks & ATA_QUIRK_NOTRIM) ||
!ata_id_has_trim(dev->id)) {
fp = 1;
bp = 3;
@@ -4617,16 +4613,15 @@ static void ata_scsi_handle_link_detach(struct ata_link *link)
ata_for_each_dev(dev, link, ALL) {
unsigned long flags;
- if (!(dev->flags & ATA_DFLAG_DETACHED))
+ spin_lock_irqsave(ap->lock, flags);
+ if (!(dev->flags & ATA_DFLAG_DETACHED)) {
+ spin_unlock_irqrestore(ap->lock, flags);
continue;
+ }
- spin_lock_irqsave(ap->lock, flags);
dev->flags &= ~ATA_DFLAG_DETACHED;
spin_unlock_irqrestore(ap->lock, flags);
- if (zpodd_dev_enabled(dev))
- zpodd_exit(dev);
-
ata_scsi_remove_dev(dev);
}
}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 250f7dae05fd..67f277e1c3bf 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -26,7 +26,6 @@ static struct workqueue_struct *ata_sff_wq;
const struct ata_port_operations ata_sff_port_ops = {
.inherits = &ata_base_port_ops,
- .qc_prep = ata_noop_qc_prep,
.qc_issue = ata_sff_qc_issue,
.qc_fill_rtf = ata_sff_qc_fill_rtf,
@@ -970,7 +969,7 @@ fsm_start:
* We ignore ERR here to workaround and proceed sending
* the CDB.
*/
- if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
+ if (!(qc->dev->quirks & ATA_QUIRK_STUCK_ERR)) {
ata_ehi_push_desc(ehi, "ST_FIRST: "
"DRQ=1 with device error, "
"dev_stat 0x%X", status);
@@ -1045,8 +1044,8 @@ fsm_start:
* IDENTIFY, it's likely a phantom
* device. Mark hint.
*/
- if (qc->dev->horkage &
- ATA_HORKAGE_DIAGNOSTIC)
+ if (qc->dev->quirks &
+ ATA_QUIRK_DIAGNOSTIC)
qc->err_mask |=
AC_ERR_NODEV_HINT;
} else {
@@ -1762,7 +1761,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
/* see if device passed diags: continue and warn later */
if (err == 0)
/* diagnostic fail : do nothing _YET_ */
- dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
+ dev->quirks |= ATA_QUIRK_DIAGNOSTIC;
else if (err == 1)
/* do nothing */ ;
else if ((dev->devno == 0) && (err == 0x81))
@@ -1781,7 +1780,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
* device signature is invalid with diagnostic
* failure.
*/
- if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
+ if (present && (dev->quirks & ATA_QUIRK_DIAGNOSTIC))
class = ATA_DEV_ATA;
else
class = ATA_DEV_NONE;
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 9e24c33388f9..e898be49df6b 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -80,12 +80,6 @@ struct ata_internal {
#define transport_class_to_port(dev) \
tdev_to_port((dev)->parent)
-
-/* Device objects are always created whit link objects */
-static int ata_tdev_add(struct ata_device *dev);
-static void ata_tdev_delete(struct ata_device *dev);
-
-
/*
* Hack to allow attributes of the same name in different objects.
*/
@@ -365,135 +359,6 @@ unsigned int ata_port_classify(struct ata_port *ap,
EXPORT_SYMBOL_GPL(ata_port_classify);
/*
- * ATA link attributes
- */
-static int noop(int x) { return x; }
-
-#define ata_link_show_linkspeed(field, format) \
-static ssize_t \
-show_ata_link_##field(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct ata_link *link = transport_class_to_link(dev); \
- \
- return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \
-}
-
-#define ata_link_linkspeed_attr(field, format) \
- ata_link_show_linkspeed(field, format) \
-static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL)
-
-ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
-ata_link_linkspeed_attr(sata_spd_limit, fls);
-ata_link_linkspeed_attr(sata_spd, noop);
-
-
-static DECLARE_TRANSPORT_CLASS(ata_link_class,
- "ata_link", NULL, NULL, NULL);
-
-static void ata_tlink_release(struct device *dev)
-{
-}
-
-/**
- * ata_is_link -- check if a struct device represents a ATA link
- * @dev: device to check
- *
- * Returns:
- * %1 if the device represents a ATA link, %0 else
- */
-static int ata_is_link(const struct device *dev)
-{
- return dev->release == ata_tlink_release;
-}
-
-static int ata_tlink_match(struct attribute_container *cont,
- struct device *dev)
-{
- struct ata_internal* i = to_ata_internal(ata_scsi_transport_template);
- if (!ata_is_link(dev))
- return 0;
- return &i->link_attr_cont.ac == cont;
-}
-
-/**
- * ata_tlink_delete -- remove ATA LINK
- * @link: ATA LINK to remove
- *
- * Removes the specified ATA LINK. remove associated ATA device(s) as well.
- */
-void ata_tlink_delete(struct ata_link *link)
-{
- struct device *dev = &link->tdev;
- struct ata_device *ata_dev;
-
- ata_for_each_dev(ata_dev, link, ALL) {
- ata_tdev_delete(ata_dev);
- }
-
- transport_remove_device(dev);
- device_del(dev);
- transport_destroy_device(dev);
- put_device(dev);
-}
-
-/**
- * ata_tlink_add -- initialize a transport ATA link structure
- * @link: allocated ata_link structure.
- *
- * Initialize an ATA LINK structure for sysfs. It will be added in the
- * device tree below the ATA PORT it belongs to.
- *
- * Returns %0 on success
- */
-int ata_tlink_add(struct ata_link *link)
-{
- struct device *dev = &link->tdev;
- struct ata_port *ap = link->ap;
- struct ata_device *ata_dev;
- int error;
-
- device_initialize(dev);
- dev->parent = &ap->tdev;
- dev->release = ata_tlink_release;
- if (ata_is_host_link(link))
- dev_set_name(dev, "link%d", ap->print_id);
- else
- dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp);
-
- transport_setup_device(dev);
-
- error = device_add(dev);
- if (error) {
- goto tlink_err;
- }
-
- error = transport_add_device(dev);
- if (error)
- goto tlink_transport_err;
- transport_configure_device(dev);
-
- ata_for_each_dev(ata_dev, link, ALL) {
- error = ata_tdev_add(ata_dev);
- if (error) {
- goto tlink_dev_err;
- }
- }
- return 0;
- tlink_dev_err:
- while (--ata_dev >= link->device) {
- ata_tdev_delete(ata_dev);
- }
- transport_remove_device(dev);
- tlink_transport_err:
- device_del(dev);
- tlink_err:
- transport_destroy_device(dev);
- put_device(dev);
- return error;
-}
-
-/*
* ATA device attributes
*/
@@ -617,10 +482,10 @@ show_ata_dev_trim(struct device *dev,
if (!ata_id_has_trim(ata_dev->id))
mode = "unsupported";
- else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
+ else if (ata_dev->quirks & ATA_QUIRK_NOTRIM)
mode = "forced_unsupported";
- else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
- mode = "forced_unqueued";
+ else if (ata_dev->quirks & ATA_QUIRK_NO_NCQ_TRIM)
+ mode = "forced_unqueued";
else if (ata_fpdma_dsm_supported(ata_dev))
mode = "queued";
else
@@ -643,9 +508,9 @@ static void ata_tdev_release(struct device *dev)
* @dev: device to check
*
* Returns:
- * %1 if the device represents a ATA device, %0 else
+ * true if the device represents a ATA device, false otherwise
*/
-static int ata_is_ata_dev(const struct device *dev)
+static bool ata_is_ata_dev(const struct device *dev)
{
return dev->release == ata_tdev_release;
}
@@ -653,21 +518,22 @@ static int ata_is_ata_dev(const struct device *dev)
static int ata_tdev_match(struct attribute_container *cont,
struct device *dev)
{
- struct ata_internal* i = to_ata_internal(ata_scsi_transport_template);
+ struct ata_internal *i = to_ata_internal(ata_scsi_transport_template);
+
if (!ata_is_ata_dev(dev))
return 0;
return &i->dev_attr_cont.ac == cont;
}
/**
- * ata_tdev_free -- free a ATA LINK
- * @dev: ATA PHY to free
+ * ata_tdev_free -- free an ATA transport device
+ * @dev: struct ata_device owning the transport device to free
*
- * Frees the specified ATA PHY.
+ * Free the ATA transport device for the specified ATA device.
*
* Note:
- * This function must only be called on a PHY that has not
- * successfully been added using ata_tdev_add().
+ * This function must only be called for a ATA transport device that has not
+ * yet successfully been added using ata_tdev_add().
*/
static void ata_tdev_free(struct ata_device *dev)
{
@@ -676,10 +542,10 @@ static void ata_tdev_free(struct ata_device *dev)
}
/**
- * ata_tdev_delete -- remove ATA device
- * @ata_dev: ATA device to remove
+ * ata_tdev_delete -- remove an ATA transport device
+ * @ata_dev: struct ata_device owning the transport device to delete
*
- * Removes the specified ATA device.
+ * Removes the ATA transport device for the specified ATA device.
*/
static void ata_tdev_delete(struct ata_device *ata_dev)
{
@@ -690,15 +556,14 @@ static void ata_tdev_delete(struct ata_device *ata_dev)
ata_tdev_free(ata_dev);
}
-
/**
- * ata_tdev_add -- initialize a transport ATA device structure.
- * @ata_dev: ata_dev structure.
+ * ata_tdev_add -- initialize an ATA transport device
+ * @ata_dev: struct ata_device owning the transport device to add
*
- * Initialize an ATA device structure for sysfs. It will be added in the
- * device tree below the ATA LINK device it belongs to.
+ * Initialize an ATA transport device for sysfs. It will be added in the
+ * device tree below the ATA link device it belongs to.
*
- * Returns %0 on success
+ * Returns %0 on success and a negative error code on error.
*/
static int ata_tdev_add(struct ata_device *ata_dev)
{
@@ -734,6 +599,136 @@ static int ata_tdev_add(struct ata_device *ata_dev)
return 0;
}
+/*
+ * ATA link attributes
+ */
+static int noop(int x)
+{
+ return x;
+}
+
+#define ata_link_show_linkspeed(field, format) \
+static ssize_t \
+show_ata_link_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct ata_link *link = transport_class_to_link(dev); \
+ \
+ return sprintf(buf, "%s\n", \
+ sata_spd_string(format(link->field))); \
+}
+
+#define ata_link_linkspeed_attr(field, format) \
+ ata_link_show_linkspeed(field, format) \
+static DEVICE_ATTR(field, 0444, show_ata_link_##field, NULL)
+
+ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd, noop);
+
+static DECLARE_TRANSPORT_CLASS(ata_link_class,
+ "ata_link", NULL, NULL, NULL);
+
+static void ata_tlink_release(struct device *dev)
+{
+}
+
+/**
+ * ata_is_link -- check if a struct device represents a ATA link
+ * @dev: device to check
+ *
+ * Returns:
+ * true if the device represents a ATA link, false otherwise
+ */
+static bool ata_is_link(const struct device *dev)
+{
+ return dev->release == ata_tlink_release;
+}
+
+static int ata_tlink_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct ata_internal *i = to_ata_internal(ata_scsi_transport_template);
+
+ if (!ata_is_link(dev))
+ return 0;
+ return &i->link_attr_cont.ac == cont;
+}
+
+/**
+ * ata_tlink_delete -- remove an ATA link transport device
+ * @link: struct ata_link owning the link transport device to remove
+ *
+ * Removes the link transport device of the specified ATA link. This also
+ * removes the ATA device(s) associated with the link as well.
+ */
+void ata_tlink_delete(struct ata_link *link)
+{
+ struct device *dev = &link->tdev;
+ struct ata_device *ata_dev;
+
+ ata_for_each_dev(ata_dev, link, ALL) {
+ ata_tdev_delete(ata_dev);
+ }
+
+ transport_remove_device(dev);
+ device_del(dev);
+ transport_destroy_device(dev);
+ put_device(dev);
+}
+
+/**
+ * ata_tlink_add -- initialize an ATA link transport device
+ * @link: struct ata_link owning the link transport device to initialize
+ *
+ * Initialize an ATA link transport device for sysfs. It will be added in the
+ * device tree below the ATA port it belongs to.
+ *
+ * Returns %0 on success and a negative error code on error.
+ */
+int ata_tlink_add(struct ata_link *link)
+{
+ struct device *dev = &link->tdev;
+ struct ata_port *ap = link->ap;
+ struct ata_device *ata_dev;
+ int error;
+
+ device_initialize(dev);
+ dev->parent = &ap->tdev;
+ dev->release = ata_tlink_release;
+ if (ata_is_host_link(link))
+ dev_set_name(dev, "link%d", ap->print_id);
+ else
+ dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp);
+
+ transport_setup_device(dev);
+
+ error = device_add(dev);
+ if (error)
+ goto tlink_err;
+
+ error = transport_add_device(dev);
+ if (error)
+ goto tlink_transport_err;
+ transport_configure_device(dev);
+
+ ata_for_each_dev(ata_dev, link, ALL) {
+ error = ata_tdev_add(ata_dev);
+ if (error)
+ goto tlink_dev_err;
+ }
+ return 0;
+ tlink_dev_err:
+ while (--ata_dev >= link->device)
+ ata_tdev_delete(ata_dev);
+ transport_remove_device(dev);
+ tlink_transport_err:
+ device_del(dev);
+ tlink_err:
+ transport_destroy_device(dev);
+ put_device(dev);
+ return error;
+}
/*
* Setup / Teardown code
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index eefda51f97d3..4b83b517caec 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -112,7 +112,7 @@ static bool zpready(struct ata_device *dev)
if (!ret || sense_key != NOT_READY)
return false;
- sense_buf = dev->link->ap->sector_buf;
+ sense_buf = dev->sector_buf;
ret = atapi_eh_request_sense(dev, sense_buf, sense_key);
if (ret)
return false;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 6abf265f626e..0337be4faec7 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -38,6 +38,12 @@ extern int libata_noacpi;
extern int libata_allow_tpm;
extern const struct device_type ata_port_type;
extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
+
+static inline bool ata_sstatus_online(u32 sstatus)
+{
+ return (sstatus & 0xf) == 0x3;
+}
+
#ifdef CONFIG_ATA_FORCE
extern void ata_force_cbl(struct ata_port *ap);
#else
@@ -65,7 +71,7 @@ extern bool ata_dev_power_init_tf(struct ata_device *dev,
struct ata_taskfile *tf, bool set_active);
extern void ata_dev_power_set_standby(struct ata_device *dev);
extern void ata_dev_power_set_active(struct ata_device *dev);
-extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
+void ata_dev_free_resources(struct ata_device *dev);
extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
extern unsigned int ata_dev_set_feature(struct ata_device *dev,
u8 subcmd, u8 action);
@@ -84,9 +90,25 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern const char *sata_spd_string(unsigned int spd);
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors);
+void ata_dev_cleanup_cdl_resources(struct ata_device *dev);
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
+/* libata-sata.c */
+#ifdef CONFIG_SATA_HOST
+int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
+int ata_eh_get_ncq_success_sense(struct ata_link *link);
+#else
+static inline int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
+{
+ return -EOPNOTSUPP;
+}
+static inline int ata_eh_get_ncq_success_sense(struct ata_link *link)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
/* libata-acpi.c */
#ifdef CONFIG_ATA_ACPI
extern unsigned int ata_acpi_gtf_filter;
@@ -124,7 +146,6 @@ extern void ata_scsi_set_sense_information(struct ata_device *dev,
const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
-extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
extern void ata_scsi_dev_rescan(struct work_struct *work);
extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun);
@@ -162,6 +183,7 @@ extern void ata_eh_finish(struct ata_port *ap);
extern int ata_ering_map(struct ata_ering *ering,
int (*map_fn)(struct ata_ering_entry *, void *),
void *arg);
+enum scsi_disposition ata_eh_decide_disposition(struct ata_queued_cmd *qc);
extern unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key);
extern unsigned int atapi_eh_request_sense(struct ata_device *dev,
u8 *sense_buf, u8 dfl_sense_key);
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 027cf67101ef..3163c8d9cef5 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -8,9 +8,9 @@
* PIO mode and smarter silicon.
*
* The practical upshot of this is that we must always tune the
- * drive for the right PIO mode. We must also ignore all the blacklists
- * and the drive bus mastering DMA information. Also to confuse matters
- * further we can do DMA on PIO only drives.
+ * drive for the right PIO mode and ignore the drive bus mastering DMA
+ * information. Also to confuse matters further we can do DMA on PIO only
+ * drives.
*
* DMA on the 5510 also requires we disable_hlt() during DMA on early
* revisions.
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index c84a20892f1b..a34e56a9d535 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -884,8 +884,6 @@ static const struct scsi_host_template ep93xx_pata_sht = {
static struct ata_port_operations ep93xx_pata_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .qc_prep = ata_noop_qc_prep,
-
.softreset = ep93xx_pata_softreset,
.hardreset = ATA_OP_NULL,
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 4d6ef90ccc77..73a9a5109238 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -549,6 +549,7 @@ static const struct of_device_id pata_ftide010_of_match[] = {
{ .compatible = "faraday,ftide010", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, pata_ftide010_of_match);
static struct platform_driver pata_ftide010_driver = {
.driver = {
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index bdccd1ba1524..5280e9960025 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -170,8 +170,8 @@ static const char * const bad_ata66_3[] = {
NULL
};
-static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
- const char * const list[])
+static int hpt_dma_broken(const struct ata_device *dev, char *modestr,
+ const char * const list[])
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
int i;
@@ -197,11 +197,11 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
static unsigned int hpt366_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
- if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
+ if (hpt_dma_broken(adev, "UDMA", bad_ata33))
mask &= ~ATA_MASK_UDMA;
- if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3))
+ if (hpt_dma_broken(adev, "UDMA3", bad_ata66_3))
mask &= ~(0xF8 << ATA_SHIFT_UDMA);
- if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
+ if (hpt_dma_broken(adev, "UDMA4", bad_ata66_4))
mask &= ~(0xF0 << ATA_SHIFT_UDMA);
} else if (adev->class == ATA_DEV_ATAPI)
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index c0329cf01135..4af22b819416 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -218,8 +218,8 @@ static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
return 0xffffffffU; /* silence compiler warning */
}
-static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
- const char * const list[])
+static int hpt_dma_broken(const struct ata_device *dev, char *modestr,
+ const char * const list[])
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
int i;
@@ -281,9 +281,9 @@ static const char * const bad_ata100_5[] = {
static unsigned int hpt370_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
- if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
+ if (hpt_dma_broken(adev, "UDMA", bad_ata33))
mask &= ~ATA_MASK_UDMA;
- if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
+ if (hpt_dma_broken(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
return mask;
@@ -300,7 +300,7 @@ static unsigned int hpt370_filter(struct ata_device *adev, unsigned int mask)
static unsigned int hpt370a_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
- if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
+ if (hpt_dma_broken(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
return mask;
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 9cfb064782c3..61d8760f09d9 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -328,8 +328,6 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
static struct ata_port_operations pata_icside_port_ops = {
.inherits = &ata_bmdma_port_ops,
- /* no need to build any PRD tables for DMA */
- .qc_prep = ata_noop_qc_prep,
.sff_data_xfer = ata_sff_data_xfer32,
.bmdma_setup = pata_icside_bmdma_setup,
.bmdma_start = pata_icside_bmdma_start,
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 2fe3fb6102ce..042f6ad1f7c6 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -519,9 +519,9 @@ static void it821x_dev_config(struct ata_device *adev)
}
/* This is a controller firmware triggered funny, don't
report the drive faulty! */
- adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC;
+ adev->quirks &= ~ATA_QUIRK_DIAGNOSTIC;
/* No HPA in 'smart' mode */
- adev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+ adev->quirks |= ATA_QUIRK_BROKEN_HPA;
}
/**
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 246bb4f8f1f7..8a9ee828478f 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -290,6 +290,7 @@ static const struct of_device_id ixp4xx_pata_of_match[] = {
{ .compatible = "intel,ixp4xx-compact-flash", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, ixp4xx_pata_of_match);
static struct platform_driver ixp4xx_pata_platform_driver = {
.driver = {
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 6c317a461a1f..3f9258677915 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -620,7 +620,6 @@ static struct ata_port_operations mpc52xx_ata_port_ops = {
.bmdma_start = mpc52xx_bmdma_start,
.bmdma_stop = mpc52xx_bmdma_stop,
.bmdma_status = mpc52xx_bmdma_status,
- .qc_prep = ata_noop_qc_prep,
};
static int mpc52xx_ata_init_one(struct device *dev,
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 2884acfc4863..0bb9607e7348 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -789,7 +789,6 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
static struct ata_port_operations octeon_cf_ops = {
.inherits = &ata_sff_port_ops,
.check_atapi_dma = octeon_cf_check_atapi_dma,
- .qc_prep = ata_noop_qc_prep,
.qc_issue = octeon_cf_qc_issue,
.sff_dev_select = octeon_cf_dev_select,
.sff_irq_on = octeon_cf_ata_port_noaction,
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 549ff24a9823..4edddf6bcc15 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -46,10 +46,11 @@
#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
-/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
- * can overrun their FIFOs when used with the CSB5 */
-
-static const char *csb_bad_ata100[] = {
+/*
+ * Seagate Barracuda ATA IV Family drives in UDMA mode 5
+ * can overrun their FIFOs when used with the CSB5.
+ */
+static const char * const csb_bad_ata100[] = {
"ST320011A",
"ST340016A",
"ST360021A",
@@ -163,10 +164,11 @@ static unsigned int serverworks_osb4_filter(struct ata_device *adev, unsigned in
* @adev: ATA device
* @mask: Mask of proposed modes
*
- * Check the blacklist and disable UDMA5 if matched
+ * Check the list of devices with broken UDMA5 and
+ * disable UDMA5 if matched.
*/
-
-static unsigned int serverworks_csb_filter(struct ata_device *adev, unsigned int mask)
+static unsigned int serverworks_csb_filter(struct ata_device *adev,
+ unsigned int mask)
{
const char *p;
char model_num[ATA_ID_PROD_LEN + 1];
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index 4c270999ba3c..f574e3c3f5b4 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -417,6 +417,7 @@ static const struct of_device_id gemini_sata_of_match[] = {
{ .compatible = "cortina,gemini-sata-bridge", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, gemini_sata_of_match);
static struct platform_driver gemini_sata_driver = {
.driver = {
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index cc77c0248284..3a99f66198a9 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -128,7 +128,7 @@ static const struct pci_device_id sil_pci_tbl[] = {
static const struct sil_drivelist {
const char *product;
unsigned int quirk;
-} sil_blacklist [] = {
+} sil_quirks[] = {
{ "ST320012AS", SIL_QUIRK_MOD15WRITE },
{ "ST330013AS", SIL_QUIRK_MOD15WRITE },
{ "ST340017AS", SIL_QUIRK_MOD15WRITE },
@@ -600,8 +600,8 @@ static void sil_thaw(struct ata_port *ap)
* list, and apply the fixups to only the specific
* devices/hosts/firmwares that need it.
*
- * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
- * The Maxtor quirk is in the blacklist, but I'm keeping the original
+ * 20040111 - Seagate drives affected by the Mod15Write bug are quirked
+ * The Maxtor quirk is in sil_quirks, but I'm keeping the original
* pessimistic fix for the following reasons...
* - There seems to be less info on it, only one device gleaned off the
* Windows driver, maybe only one is affected. More info would be greatly
@@ -616,13 +616,13 @@ static void sil_dev_config(struct ata_device *dev)
unsigned char model_num[ATA_ID_PROD_LEN + 1];
/* This controller doesn't support trim */
- dev->horkage |= ATA_HORKAGE_NOTRIM;
+ dev->quirks |= ATA_QUIRK_NOTRIM;
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
- for (n = 0; sil_blacklist[n].product; n++)
- if (!strcmp(sil_blacklist[n].product, model_num)) {
- quirks = sil_blacklist[n].quirk;
+ for (n = 0; sil_quirks[n].product; n++)
+ if (!strcmp(sil_quirks[n].product, model_num)) {
+ quirks = sil_quirks[n].quirk;
break;
}
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 2b8fd6bb7da0..064eb52ff7e2 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -226,6 +226,7 @@ config GENERIC_ARCH_TOPOLOGY
config GENERIC_ARCH_NUMA
bool
+ select NUMA_MEMBLKS
help
Enable support for generic NUMA implementation. Currently, RISC-V
and ARM64 use it.
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index 555aee3ee8e7..e18701676426 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -12,16 +12,12 @@
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/numa_memblks.h>
#include <asm/sections.h>
-struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
-nodemask_t numa_nodes_parsed __initdata;
static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
-static int numa_distance_cnt;
-static u8 *numa_distance;
bool numa_off;
static __init int numa_parse_early_param(char *opt)
@@ -30,6 +26,8 @@ static __init int numa_parse_early_param(char *opt)
return -EINVAL;
if (str_has_prefix(opt, "off"))
numa_off = true;
+ if (!strncmp(opt, "fake=", 5))
+ return numa_emu_cmdline(opt + 5);
return 0;
}
@@ -61,6 +59,7 @@ EXPORT_SYMBOL(cpumask_of_node);
#endif
+#ifndef CONFIG_NUMA_EMU
static void numa_update_cpu(unsigned int cpu, bool remove)
{
int nid = cpu_to_node(cpu);
@@ -83,6 +82,7 @@ void numa_remove_cpu(unsigned int cpu)
{
numa_update_cpu(cpu, true);
}
+#endif
void numa_clear_node(unsigned int cpu)
{
@@ -144,7 +144,7 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
-int __init early_cpu_to_node(int cpu)
+int early_cpu_to_node(int cpu)
{
return cpu_to_node_map[cpu];
}
@@ -189,174 +189,24 @@ void __init setup_per_cpu_areas(void)
}
#endif
-/**
- * numa_add_memblk() - Set node id to memblk
- * @nid: NUMA node ID of the new memblk
- * @start: Start address of the new memblk
- * @end: End address of the new memblk
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int __init numa_add_memblk(int nid, u64 start, u64 end)
-{
- int ret;
-
- ret = memblock_set_node(start, (end - start), &memblock.memory, nid);
- if (ret < 0) {
- pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n",
- start, (end - 1), nid);
- return ret;
- }
-
- node_set(nid, numa_nodes_parsed);
- return ret;
-}
-
/*
* Initialize NODE_DATA for a node on the local memory
*/
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
{
- const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
- u64 nd_pa;
- void *nd;
- int tnid;
-
if (start_pfn >= end_pfn)
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
- nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
- if (!nd_pa)
- panic("Cannot allocate %zu bytes for node %d data\n",
- nd_size, nid);
+ alloc_node_data(nid);
- nd = __va(nd_pa);
-
- /* report and initialize */
- pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n",
- nd_pa, nd_pa + nd_size - 1);
- tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
- if (tnid != nid)
- pr_info("NODE_DATA(%d) on node %d\n", nid, tnid);
-
- node_data[nid] = nd;
- memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
NODE_DATA(nid)->node_id = nid;
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
}
-/*
- * numa_free_distance
- *
- * The current table is freed.
- */
-void __init numa_free_distance(void)
-{
- size_t size;
-
- if (!numa_distance)
- return;
-
- size = numa_distance_cnt * numa_distance_cnt *
- sizeof(numa_distance[0]);
-
- memblock_free(numa_distance, size);
- numa_distance_cnt = 0;
- numa_distance = NULL;
-}
-
-/*
- * Create a new NUMA distance table.
- */
-static int __init numa_alloc_distance(void)
-{
- size_t size;
- int i, j;
-
- size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
- numa_distance = memblock_alloc(size, PAGE_SIZE);
- if (WARN_ON(!numa_distance))
- return -ENOMEM;
-
- numa_distance_cnt = nr_node_ids;
-
- /* fill with the default distances */
- for (i = 0; i < numa_distance_cnt; i++)
- for (j = 0; j < numa_distance_cnt; j++)
- numa_distance[i * numa_distance_cnt + j] = i == j ?
- LOCAL_DISTANCE : REMOTE_DISTANCE;
-
- pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt);
-
- return 0;
-}
-
-/**
- * numa_set_distance() - Set inter node NUMA distance from node to node.
- * @from: the 'from' node to set distance
- * @to: the 'to' node to set distance
- * @distance: NUMA distance
- *
- * Set the distance from node @from to @to to @distance.
- * If distance table doesn't exist, a warning is printed.
- *
- * If @from or @to is higher than the highest known node or lower than zero
- * or @distance doesn't make sense, the call is ignored.
- */
-void __init numa_set_distance(int from, int to, int distance)
-{
- if (!numa_distance) {
- pr_warn_once("Warning: distance table not allocated yet\n");
- return;
- }
-
- if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
- from < 0 || to < 0) {
- pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- if ((u8)distance != distance ||
- (from == to && distance != LOCAL_DISTANCE)) {
- pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- numa_distance[from * numa_distance_cnt + to] = distance;
-}
-
-/*
- * Return NUMA distance @from to @to
- */
-int __node_distance(int from, int to)
-{
- if (from >= numa_distance_cnt || to >= numa_distance_cnt)
- return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
- return numa_distance[from * numa_distance_cnt + to];
-}
-EXPORT_SYMBOL(__node_distance);
-
static int __init numa_register_nodes(void)
{
int nid;
- struct memblock_region *mblk;
-
- /* Check that valid nid is set to memblks */
- for_each_mem_region(mblk) {
- int mblk_nid = memblock_get_region_node(mblk);
- phys_addr_t start = mblk->base;
- phys_addr_t end = mblk->base + mblk->size - 1;
-
- if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) {
- pr_warn("Warning: invalid memblk node %d [mem %pap-%pap]\n",
- mblk_nid, &start, &end);
- return -EINVAL;
- }
- }
/* Finally register nodes. */
for_each_node_mask(nid, numa_nodes_parsed) {
@@ -381,11 +231,7 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
- ret = numa_alloc_distance();
- if (ret < 0)
- return ret;
-
- ret = init_func();
+ ret = numa_memblks_init(init_func, /* memblock_force_top_down */ false);
if (ret < 0)
goto out_free_distance;
@@ -403,7 +249,7 @@ static int __init numa_init(int (*init_func)(void))
return 0;
out_free_distance:
- numa_free_distance();
+ numa_reset_distance();
return ret;
}
@@ -433,6 +279,7 @@ static int __init dummy_numa_init(void)
pr_err("NUMA init failed\n");
return ret;
}
+ node_set(0, numa_nodes_parsed);
numa_off = true;
return 0;
@@ -475,3 +322,54 @@ void __init arch_numa_init(void)
numa_init(dummy_numa_init);
}
+
+#ifdef CONFIG_NUMA_EMU
+void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
+ unsigned int nr_emu_nids)
+{
+ int i, j;
+
+ /*
+ * Transform cpu_to_node_map table to use emulated nids by
+ * reverse-mapping phys_nid. The maps should always exist but fall
+ * back to zero just in case.
+ */
+ for (i = 0; i < ARRAY_SIZE(cpu_to_node_map); i++) {
+ if (cpu_to_node_map[i] == NUMA_NO_NODE)
+ continue;
+ for (j = 0; j < nr_emu_nids; j++)
+ if (cpu_to_node_map[i] == emu_nid_to_phys[j])
+ break;
+ cpu_to_node_map[i] = j < nr_emu_nids ? j : 0;
+ }
+}
+
+u64 __init numa_emu_dma_end(void)
+{
+ return memblock_start_of_DRAM() + SZ_4G;
+}
+
+void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable)
+{
+ struct cpumask *mask;
+
+ if (node == NUMA_NO_NODE)
+ return;
+
+ mask = node_to_cpumask_map[node];
+ if (!cpumask_available(mask)) {
+ pr_err("node_to_cpumask_map[%i] NULL\n", node);
+ dump_stack();
+ return;
+ }
+
+ if (enable)
+ cpumask_set_cpu(cpu, mask);
+ else
+ cpumask_clear_cpu(cpu, mask);
+
+ pr_debug("%s cpu %d node %d: mask now %*pbl\n",
+ enable ? "numa_add_cpu" : "numa_remove_cpu",
+ cpu, node, cpumask_pr_args(mask));
+}
+#endif /* CONFIG_NUMA_EMU */
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 9b745ba54de1..964111361497 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -393,6 +393,7 @@ bool device_is_bound(struct device *dev)
{
return dev->p && klist_node_attached(&dev->p->knode_driver);
}
+EXPORT_SYMBOL_GPL(device_is_bound);
static void driver_bound(struct device *dev)
{
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 327d168dd37a..8c34ae1cd8d5 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -277,6 +277,51 @@ err_attach:
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list);
/**
+ * devm_pm_domain_detach_list - devres-enabled version of dev_pm_domain_detach_list.
+ * @_list: The list of PM domains to detach.
+ *
+ * This function reverse the actions from devm_pm_domain_attach_list().
+ * it will be invoked during the remove phase from drivers implicitly if driver
+ * uses devm_pm_domain_attach_list() to attach the PM domains.
+ */
+static void devm_pm_domain_detach_list(void *_list)
+{
+ struct dev_pm_domain_list *list = _list;
+
+ dev_pm_domain_detach_list(list);
+}
+
+/**
+ * devm_pm_domain_attach_list - devres-enabled version of dev_pm_domain_attach_list
+ * @dev: The device used to lookup the PM domains for.
+ * @data: The data used for attaching to the PM domains.
+ * @list: An out-parameter with an allocated list of attached PM domains.
+ *
+ * NOTE: this will also handle calling devm_pm_domain_detach_list() for
+ * you during remove phase.
+ *
+ * Returns the number of attached PM domains or a negative error code in case of
+ * a failure.
+ */
+int devm_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ int ret, num_pds;
+
+ num_pds = dev_pm_domain_attach_list(dev, data, list);
+ if (num_pds <= 0)
+ return num_pds;
+
+ ret = devm_add_action_or_reset(dev, devm_pm_domain_detach_list, *list);
+ if (ret)
+ return ret;
+
+ return num_pds;
+}
+EXPORT_SYMBOL_GPL(devm_pm_domain_attach_list);
+
+/**
* dev_pm_domain_detach - Detach a device from its PM domain.
* @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 9b17c77dec9d..f36d3618b67c 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -27,7 +27,7 @@ static int regcache_flat_init(struct regmap *map)
return -EINVAL;
map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
- + 1, sizeof(unsigned int), GFP_KERNEL);
+ + 1, sizeof(unsigned int), map->alloc_flags);
if (!map->cache)
return -ENOMEM;
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 2dea9d259c49..8d27d3653ea3 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -348,7 +348,7 @@ static int regcache_maple_init(struct regmap *map)
int ret;
int range_start;
- mt = kmalloc(sizeof(*mt), GFP_KERNEL);
+ mt = kmalloc(sizeof(*mt), map->alloc_flags);
if (!mt)
return -ENOMEM;
map->cache = mt;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 3db88bbcae0f..188438186589 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -187,7 +187,7 @@ static int regcache_rbtree_init(struct regmap *map)
int i;
int ret;
- map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
+ map->cache = kmalloc(sizeof *rbtree_ctx, map->alloc_flags);
if (!map->cache)
return -ENOMEM;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 7ec1ec605335..d3659ba3cc11 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -195,7 +195,9 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
if (map->cache_ops->init) {
dev_dbg(map->dev, "Initializing %s cache\n",
map->cache_ops->name);
+ map->lock(map->lock_arg);
ret = map->cache_ops->init(map);
+ map->unlock(map->lock_arg);
if (ret)
goto err_free;
}
@@ -223,7 +225,9 @@ void regcache_exit(struct regmap *map)
if (map->cache_ops->exit) {
dev_dbg(map->dev, "Destroying %s cache\n",
map->cache_ops->name);
+ map->lock(map->lock_arg);
map->cache_ops->exit(map);
+ map->unlock(map->lock_arg);
}
}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index d3ec1345b5b5..a750e48a26b8 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -608,6 +608,30 @@ int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
}
EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple);
+static int regmap_irq_create_domain(struct fwnode_handle *fwnode, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data *d)
+{
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .size = chip->num_irqs,
+ .hwirq_max = chip->num_irqs,
+ .virq_base = irq_base,
+ .ops = &regmap_domain_ops,
+ .host_data = d,
+ .name_suffix = chip->domain_suffix,
+ };
+
+ d->domain = irq_domain_instantiate(&info);
+ if (IS_ERR(d->domain)) {
+ dev_err(d->map->dev, "Failed to create IRQ domain\n");
+ return PTR_ERR(d->domain);
+ }
+
+ return 0;
+}
+
+
/**
* regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
*
@@ -856,18 +880,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
}
}
- if (irq_base)
- d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs,
- irq_base, 0,
- &regmap_domain_ops, d);
- else
- d->domain = irq_domain_create_linear(fwnode, chip->num_irqs,
- &regmap_domain_ops, d);
- if (!d->domain) {
- dev_err(map->dev, "Failed to create IRQ domain\n");
- ret = -ENOMEM;
+ ret = regmap_irq_create_domain(fwnode, irq_base, chip, d);
+ if (ret)
goto err_alloc;
- }
ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
irq_flags | IRQF_ONESHOT,
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index d790c7df5cac..4bf3f1e59ed7 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -22,6 +22,7 @@ struct regmap_test_param {
enum regmap_endian val_endian;
unsigned int from_reg;
+ bool fast_io;
};
static void get_changed_bytes(void *orig, void *new, size_t size)
@@ -80,41 +81,52 @@ static const char *regmap_endian_name(enum regmap_endian endian)
static void param_to_desc(const struct regmap_test_param *param, char *desc)
{
- snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x",
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s%s @%#x",
regcache_type_name(param->cache),
regmap_endian_name(param->val_endian),
+ param->fast_io ? " fast I/O" : "",
param->from_reg);
}
static const struct regmap_test_param regcache_types_list[] = {
{ .cache = REGCACHE_NONE },
+ { .cache = REGCACHE_NONE, .fast_io = true },
{ .cache = REGCACHE_FLAT },
+ { .cache = REGCACHE_FLAT, .fast_io = true },
{ .cache = REGCACHE_RBTREE },
+ { .cache = REGCACHE_RBTREE, .fast_io = true },
{ .cache = REGCACHE_MAPLE },
+ { .cache = REGCACHE_MAPLE, .fast_io = true },
};
KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
static const struct regmap_test_param real_cache_types_only_list[] = {
{ .cache = REGCACHE_FLAT },
+ { .cache = REGCACHE_FLAT, .fast_io = true },
{ .cache = REGCACHE_RBTREE },
+ { .cache = REGCACHE_RBTREE, .fast_io = true },
{ .cache = REGCACHE_MAPLE },
+ { .cache = REGCACHE_MAPLE, .fast_io = true },
};
KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
static const struct regmap_test_param real_cache_types_list[] = {
{ .cache = REGCACHE_FLAT, .from_reg = 0 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
{ .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
{ .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
{ .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
@@ -125,11 +137,13 @@ KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
static const struct regmap_test_param sparse_cache_types_list[] = {
{ .cache = REGCACHE_RBTREE, .from_reg = 0 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
@@ -151,8 +165,7 @@ static struct regmap *gen_regmap(struct kunit *test,
struct reg_default *defaults;
config->cache_type = param->cache;
- config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
- config->cache_type == REGCACHE_MAPLE;
+ config->fast_io = param->fast_io;
if (config->max_register == 0) {
config->max_register = param->from_reg;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index bfc6bc1eb3a4..9ed842d17642 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1445,6 +1445,7 @@ void regmap_exit(struct regmap *map)
struct regmap_async *async;
regcache_exit(map);
+
regmap_debugfs_exit(map);
regmap_range_exit(map);
if (map->bus && map->bus->free_context)
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index ed3be52ab63d..8540052d37c5 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -334,7 +334,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
}
/* If the root port is capable of returning Config Request
- * Retry Status (CRS) Completion Status to software then
+ * Retry Status (RRS) Completion Status to software then
* enable the feature.
*/
static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
@@ -348,10 +348,10 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
NULL);
root_cap = cap_ptr + PCI_EXP_RTCAP;
bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
- if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) {
- /* Enable CRS software visibility */
+ if (val16 & BCMA_CORE_PCI_RC_RRS_VISIBILITY) {
+ /* Enable Configuration RRS Software Visibility */
root_ctrl = cap_ptr + PCI_EXP_RTCTL;
- val16 = PCI_EXP_RTCTL_CRSSVE;
+ val16 = PCI_EXP_RTCTL_RRS_SVE;
bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
sizeof(u16));
@@ -360,7 +360,7 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
* 100 ms wait time from the end of Reset. If the device is
* not done with its internal initialization, it must at
* least return a completion TLP, with a completion status
- * of "Configuration Request Retry Status (CRS)". The root
+ * of "Configuration Request Retry Status (RRS)". The root
* complex must complete the request to the host by returning
* a read-data value of 0001h for the Vendor ID field and
* all 1s for any additional bytes included in the request.
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 94dc0a235919..2a05d955e30b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -297,10 +297,6 @@ struct drbd_epoch {
unsigned long flags;
};
-/* Prototype declaration of function defined in drbd_receiver.c */
-int drbdd_init(struct drbd_thread *);
-int drbd_asender(struct drbd_thread *);
-
/* drbd_epoch flag bits */
enum {
DE_HAVE_BARRIER_NUMBER,
@@ -864,7 +860,6 @@ struct drbd_device {
struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
struct list_head net_ee; /* zero-copy network send in progress */
- int next_barrier_nr;
struct list_head resync_reads;
atomic_t pp_in_use; /* allocated from page pool */
atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */
@@ -1390,9 +1385,6 @@ extern void conn_free_crypto(struct drbd_connection *connection);
extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_device *, struct bio *);
void drbd_submit_bio(struct bio *bio);
-extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
-extern int is_valid_ar_handle(struct drbd_request *, sector_t);
-
/* drbd_nl.c */
@@ -1474,7 +1466,6 @@ extern int w_resync_timer(struct drbd_work *, int);
extern int w_send_write_hint(struct drbd_work *, int);
extern int w_send_dblock(struct drbd_work *, int);
extern int w_send_read_req(struct drbd_work *, int);
-extern int w_e_reissue(struct drbd_work *, int);
extern int w_restart_disk_io(struct drbd_work *, int);
extern int w_send_out_of_sync(struct drbd_work *, int);
@@ -1488,7 +1479,6 @@ extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
sector_t start, unsigned int nr_sectors, int flags);
extern int drbd_receiver(struct drbd_thread *thi);
extern int drbd_ack_receiver(struct drbd_thread *thi);
-extern void drbd_send_ping_wf(struct work_struct *ws);
extern void drbd_send_acks_wf(struct work_struct *ws);
extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
extern bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector,
@@ -1504,7 +1494,6 @@ extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request
#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
-extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
extern int drbd_connected(struct drbd_peer_device *);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index a9e49b212341..0d74d75260ef 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1550,7 +1550,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
* put_page(); and would cause either a VM_BUG directly, or
* __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */
- if (!drbd_disable_sendpage && sendpage_ok(page))
+ if (!drbd_disable_sendpage && sendpages_ok(page, len, offset))
msg.msg_flags |= MSG_NOSIGNAL | MSG_SPLICE_PAGES;
drbd_update_congested(peer_device->connection);
@@ -3399,10 +3399,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
{
unsigned long flags;
- if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
+ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
+ if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) {
+ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
return;
+ }
- spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
if (val == 0) {
drbd_uuid_move_history(device);
device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index e858e7e0383f..c2b6c4d9729d 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -876,7 +876,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
ns.disk == D_OUTDATED)
rv = SS_CONNECTED_OUTDATES;
- else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+ else if (nc && (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
(nc->verify_alg[0] == 0))
rv = SS_NO_VERIFY_ALG;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index c6ef0546ffc9..11901f2812ad 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2269,25 +2269,12 @@ static const struct file_operations mtip_flags_fops = {
.llseek = no_llseek,
};
-static int mtip_hw_debugfs_init(struct driver_data *dd)
+static void mtip_hw_debugfs_init(struct driver_data *dd)
{
- if (!dfs_parent)
- return -1;
-
dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
- if (IS_ERR_OR_NULL(dd->dfs_node)) {
- dev_warn(&dd->pdev->dev,
- "Error creating node %s under debugfs\n",
- dd->disk->disk_name);
- dd->dfs_node = NULL;
- return -1;
- }
-
debugfs_create_file("flags", 0444, dd->dfs_node, dd, &mtip_flags_fops);
debugfs_create_file("registers", 0444, dd->dfs_node, dd,
&mtip_regs_fops);
-
- return 0;
}
static void mtip_hw_debugfs_exit(struct driver_data *dd)
@@ -4043,10 +4030,6 @@ static int __init mtip_init(void)
mtip_major = error;
dfs_parent = debugfs_create_dir("rssd", NULL);
- if (IS_ERR_OR_NULL(dfs_parent)) {
- pr_warn("Error creating debugfs parent\n");
- dfs_parent = NULL;
- }
/* Register our PCI operations. */
error = pci_register_driver(&mtip_pci_driver);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 41a90150b501..b852050d8a96 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -181,6 +181,17 @@ static void nbd_requeue_cmd(struct nbd_cmd *cmd)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
+ lockdep_assert_held(&cmd->lock);
+
+ /*
+ * Clear INFLIGHT flag so that this cmd won't be completed in
+ * normal completion path
+ *
+ * INFLIGHT flag will be set when the cmd is queued to nbd next
+ * time.
+ */
+ __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
+
if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
blk_mq_requeue_request(req, true);
}
@@ -339,7 +350,7 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
lim = queue_limits_start_update(nbd->disk->queue);
if (nbd->config->flags & NBD_FLAG_SEND_TRIM)
- lim.max_hw_discard_sectors = UINT_MAX;
+ lim.max_hw_discard_sectors = UINT_MAX >> SECTOR_SHIFT;
else
lim.max_hw_discard_sectors = 0;
if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) {
@@ -350,6 +361,11 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
lim.features |= BLK_FEAT_WRITE_CACHE;
lim.features &= ~BLK_FEAT_FUA;
}
+ if (nbd->config->flags & NBD_FLAG_ROTATIONAL)
+ lim.features |= BLK_FEAT_ROTATIONAL;
+ if (nbd->config->flags & NBD_FLAG_SEND_WRITE_ZEROES)
+ lim.max_write_zeroes_sectors = UINT_MAX >> SECTOR_SHIFT;
+
lim.logical_block_size = blksize;
lim.physical_block_size = blksize;
error = queue_limits_commit_update(nbd->disk->queue, &lim);
@@ -418,6 +434,8 @@ static u32 req_to_nbd_cmd_type(struct request *req)
return NBD_CMD_WRITE;
case REQ_OP_READ:
return NBD_CMD_READ;
+ case REQ_OP_WRITE_ZEROES:
+ return NBD_CMD_WRITE_ZEROES;
default:
return U32_MAX;
}
@@ -488,8 +506,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
}
- mutex_unlock(&cmd->lock);
nbd_requeue_cmd(cmd);
+ mutex_unlock(&cmd->lock);
nbd_config_put(nbd);
return BLK_EH_DONE;
}
@@ -634,6 +652,8 @@ static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
if (req->cmd_flags & REQ_FUA)
nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
+ if ((req->cmd_flags & REQ_NOUNMAP) && (type == NBD_CMD_WRITE_ZEROES))
+ nbd_cmd_flags |= NBD_CMD_FLAG_NO_HOLE;
/* We did a partial send previously, and we at least sent the whole
* request struct, so just go and send the rest of the pages in the
@@ -1703,6 +1723,10 @@ static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
seq_puts(s, "NBD_FLAG_SEND_FUA\n");
if (flags & NBD_FLAG_SEND_TRIM)
seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
+ if (flags & NBD_FLAG_SEND_WRITE_ZEROES)
+ seq_puts(s, "NBD_FLAG_SEND_WRITE_ZEROES\n");
+ if (flags & NBD_FLAG_ROTATIONAL)
+ seq_puts(s, "NBD_FLAG_ROTATIONAL\n");
return 0;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 7cece5884b9c..3edb37a41312 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -498,8 +498,6 @@ static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
if (!pkt_debugfs_root)
return;
pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root);
- if (!pd->dfs_d_root)
- return;
pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root,
pd, &pkt_seq_fops);
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index f6e3a3c4b76c..08ce6d96d04c 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -149,15 +149,22 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
if (bio_add_page(bio, virt_to_page(data), datalen,
offset_in_page(data)) != datalen) {
- rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
+ rnbd_srv_err_rl(sess_dev, "Failed to map data to bio\n");
err = -EINVAL;
goto bio_put;
}
+ bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
+ if (bio_has_data(bio) &&
+ bio->bi_iter.bi_size != le32_to_cpu(msg->bi_size)) {
+ rnbd_srv_err_rl(sess_dev, "Datalen mismatch: bio bi_size (%u), bi_size (%u)\n",
+ bio->bi_iter.bi_size, msg->bi_size);
+ err = -EINVAL;
+ goto bio_put;
+ }
bio->bi_end_io = rnbd_dev_bi_end_io;
bio->bi_private = priv;
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
- bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
bio_set_prio(bio, prio);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 1d53a3f48a0e..bca06bfb4bc3 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -71,9 +71,6 @@ struct ublk_rq_data {
struct llist_node node;
struct kref ref;
- __u64 sector;
- __u32 operation;
- __u32 nr_zones;
};
struct ublk_uring_cmd_pdu {
@@ -214,6 +211,33 @@ static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
#ifdef CONFIG_BLK_DEV_ZONED
+struct ublk_zoned_report_desc {
+ __u64 sector;
+ __u32 operation;
+ __u32 nr_zones;
+};
+
+static DEFINE_XARRAY(ublk_zoned_report_descs);
+
+static int ublk_zoned_insert_report_desc(const struct request *req,
+ struct ublk_zoned_report_desc *desc)
+{
+ return xa_insert(&ublk_zoned_report_descs, (unsigned long)req,
+ desc, GFP_KERNEL);
+}
+
+static struct ublk_zoned_report_desc *ublk_zoned_erase_report_desc(
+ const struct request *req)
+{
+ return xa_erase(&ublk_zoned_report_descs, (unsigned long)req);
+}
+
+static struct ublk_zoned_report_desc *ublk_zoned_get_report_desc(
+ const struct request *req)
+{
+ return xa_load(&ublk_zoned_report_descs, (unsigned long)req);
+}
+
static int ublk_get_nr_zones(const struct ublk_device *ub)
{
const struct ublk_param_basic *p = &ub->params.basic;
@@ -308,7 +332,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
unsigned int zones_in_request =
min_t(unsigned int, remaining_zones, max_zones_per_request);
struct request *req;
- struct ublk_rq_data *pdu;
+ struct ublk_zoned_report_desc desc;
blk_status_t status;
memset(buffer, 0, buffer_length);
@@ -319,20 +343,23 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
goto out;
}
- pdu = blk_mq_rq_to_pdu(req);
- pdu->operation = UBLK_IO_OP_REPORT_ZONES;
- pdu->sector = sector;
- pdu->nr_zones = zones_in_request;
+ desc.operation = UBLK_IO_OP_REPORT_ZONES;
+ desc.sector = sector;
+ desc.nr_zones = zones_in_request;
+ ret = ublk_zoned_insert_report_desc(req, &desc);
+ if (ret)
+ goto free_req;
ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
GFP_KERNEL);
- if (ret) {
- blk_mq_free_request(req);
- goto out;
- }
+ if (ret)
+ goto erase_desc;
status = blk_execute_rq(req, 0);
ret = blk_status_to_errno(status);
+erase_desc:
+ ublk_zoned_erase_report_desc(req);
+free_req:
blk_mq_free_request(req);
if (ret)
goto out;
@@ -366,7 +393,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
{
struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
struct ublk_io *io = &ubq->ios[req->tag];
- struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req);
+ struct ublk_zoned_report_desc *desc;
u32 ublk_op;
switch (req_op(req)) {
@@ -389,12 +416,15 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
ublk_op = UBLK_IO_OP_ZONE_RESET_ALL;
break;
case REQ_OP_DRV_IN:
- ublk_op = pdu->operation;
+ desc = ublk_zoned_get_report_desc(req);
+ if (!desc)
+ return BLK_STS_IOERR;
+ ublk_op = desc->operation;
switch (ublk_op) {
case UBLK_IO_OP_REPORT_ZONES:
iod->op_flags = ublk_op | ublk_req_build_flags(req);
- iod->nr_zones = pdu->nr_zones;
- iod->start_sector = pdu->sector;
+ iod->nr_zones = desc->nr_zones;
+ iod->start_sector = desc->sector;
return BLK_STS_OK;
default:
return BLK_STS_IOERR;
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index eacf1cba7bf4..6aea609b795c 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -2,8 +2,6 @@
config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS && MMU
- depends on HAVE_ZSMALLOC
- depends on CRYPTO_LZO || CRYPTO_ZSTD || CRYPTO_LZ4 || CRYPTO_LZ4HC || CRYPTO_842
select ZSMALLOC
help
Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
@@ -16,6 +14,49 @@ config ZRAM
See Documentation/admin-guide/blockdev/zram.rst for more information.
+config ZRAM_BACKEND_LZ4
+ bool "lz4 compression support"
+ depends on ZRAM
+ select LZ4_COMPRESS
+ select LZ4_DECOMPRESS
+
+config ZRAM_BACKEND_LZ4HC
+ bool "lz4hc compression support"
+ depends on ZRAM
+ select LZ4HC_COMPRESS
+ select LZ4_DECOMPRESS
+
+config ZRAM_BACKEND_ZSTD
+ bool "zstd compression support"
+ depends on ZRAM
+ select ZSTD_COMPRESS
+ select ZSTD_DECOMPRESS
+
+config ZRAM_BACKEND_DEFLATE
+ bool "deflate compression support"
+ depends on ZRAM
+ select ZLIB_DEFLATE
+ select ZLIB_INFLATE
+
+config ZRAM_BACKEND_842
+ bool "842 compression support"
+ depends on ZRAM
+ select 842_COMPRESS
+ select 842_DECOMPRESS
+
+config ZRAM_BACKEND_FORCE_LZO
+ depends on ZRAM
+ def_bool !ZRAM_BACKEND_LZ4 && !ZRAM_BACKEND_LZ4HC && \
+ !ZRAM_BACKEND_ZSTD && !ZRAM_BACKEND_DEFLATE && \
+ !ZRAM_BACKEND_842
+
+config ZRAM_BACKEND_LZO
+ bool "lzo and lzo-rle compression support" if !ZRAM_BACKEND_FORCE_LZO
+ depends on ZRAM
+ default ZRAM_BACKEND_FORCE_LZO
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
+
choice
prompt "Default zram compressor"
default ZRAM_DEF_COMP_LZORLE
@@ -23,38 +64,44 @@ choice
config ZRAM_DEF_COMP_LZORLE
bool "lzo-rle"
- depends on CRYPTO_LZO
+ depends on ZRAM_BACKEND_LZO
-config ZRAM_DEF_COMP_ZSTD
- bool "zstd"
- depends on CRYPTO_ZSTD
+config ZRAM_DEF_COMP_LZO
+ bool "lzo"
+ depends on ZRAM_BACKEND_LZO
config ZRAM_DEF_COMP_LZ4
bool "lz4"
- depends on CRYPTO_LZ4
-
-config ZRAM_DEF_COMP_LZO
- bool "lzo"
- depends on CRYPTO_LZO
+ depends on ZRAM_BACKEND_LZ4
config ZRAM_DEF_COMP_LZ4HC
bool "lz4hc"
- depends on CRYPTO_LZ4HC
+ depends on ZRAM_BACKEND_LZ4HC
+
+config ZRAM_DEF_COMP_ZSTD
+ bool "zstd"
+ depends on ZRAM_BACKEND_ZSTD
+
+config ZRAM_DEF_COMP_DEFLATE
+ bool "deflate"
+ depends on ZRAM_BACKEND_DEFLATE
config ZRAM_DEF_COMP_842
bool "842"
- depends on CRYPTO_842
+ depends on ZRAM_BACKEND_842
endchoice
config ZRAM_DEF_COMP
string
default "lzo-rle" if ZRAM_DEF_COMP_LZORLE
- default "zstd" if ZRAM_DEF_COMP_ZSTD
- default "lz4" if ZRAM_DEF_COMP_LZ4
default "lzo" if ZRAM_DEF_COMP_LZO
+ default "lz4" if ZRAM_DEF_COMP_LZ4
default "lz4hc" if ZRAM_DEF_COMP_LZ4HC
+ default "zstd" if ZRAM_DEF_COMP_ZSTD
+ default "deflate" if ZRAM_DEF_COMP_DEFLATE
default "842" if ZRAM_DEF_COMP_842
+ default "unset-value"
config ZRAM_WRITEBACK
bool "Write back incompressible or idle page to backing device"
diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile
index de9e457907b1..0fdefd576691 100644
--- a/drivers/block/zram/Makefile
+++ b/drivers/block/zram/Makefile
@@ -1,4 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
+
zram-y := zcomp.o zram_drv.o
+zram-$(CONFIG_ZRAM_BACKEND_LZO) += backend_lzorle.o backend_lzo.o
+zram-$(CONFIG_ZRAM_BACKEND_LZ4) += backend_lz4.o
+zram-$(CONFIG_ZRAM_BACKEND_LZ4HC) += backend_lz4hc.o
+zram-$(CONFIG_ZRAM_BACKEND_ZSTD) += backend_zstd.o
+zram-$(CONFIG_ZRAM_BACKEND_DEFLATE) += backend_deflate.o
+zram-$(CONFIG_ZRAM_BACKEND_842) += backend_842.o
+
obj-$(CONFIG_ZRAM) += zram.o
diff --git a/drivers/block/zram/backend_842.c b/drivers/block/zram/backend_842.c
new file mode 100644
index 000000000000..10d9d5c60f53
--- /dev/null
+++ b/drivers/block/zram/backend_842.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sw842.h>
+#include <linux/vmalloc.h>
+
+#include "backend_842.h"
+
+static void release_params_842(struct zcomp_params *params)
+{
+}
+
+static int setup_params_842(struct zcomp_params *params)
+{
+ return 0;
+}
+
+static void destroy_842(struct zcomp_ctx *ctx)
+{
+ kfree(ctx->context);
+}
+
+static int create_842(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ ctx->context = kmalloc(SW842_MEM_COMPRESS, GFP_KERNEL);
+ if (!ctx->context)
+ return -ENOMEM;
+ return 0;
+}
+
+static int compress_842(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ unsigned int dlen = req->dst_len;
+ int ret;
+
+ ret = sw842_compress(req->src, req->src_len, req->dst, &dlen,
+ ctx->context);
+ if (ret == 0)
+ req->dst_len = dlen;
+ return ret;
+}
+
+static int decompress_842(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ unsigned int dlen = req->dst_len;
+
+ return sw842_decompress(req->src, req->src_len, req->dst, &dlen);
+}
+
+const struct zcomp_ops backend_842 = {
+ .compress = compress_842,
+ .decompress = decompress_842,
+ .create_ctx = create_842,
+ .destroy_ctx = destroy_842,
+ .setup_params = setup_params_842,
+ .release_params = release_params_842,
+ .name = "842",
+};
diff --git a/drivers/block/zram/backend_842.h b/drivers/block/zram/backend_842.h
new file mode 100644
index 000000000000..4dc85c188799
--- /dev/null
+++ b/drivers/block/zram/backend_842.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_842_H__
+#define __BACKEND_842_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_842;
+
+#endif /* __BACKEND_842_H__ */
diff --git a/drivers/block/zram/backend_deflate.c b/drivers/block/zram/backend_deflate.c
new file mode 100644
index 000000000000..0f7f252c12f4
--- /dev/null
+++ b/drivers/block/zram/backend_deflate.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/zlib.h>
+
+#include "backend_deflate.h"
+
+/* Use the same value as crypto API */
+#define DEFLATE_DEF_WINBITS 11
+#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
+
+struct deflate_ctx {
+ struct z_stream_s cctx;
+ struct z_stream_s dctx;
+};
+
+static void deflate_release_params(struct zcomp_params *params)
+{
+}
+
+static int deflate_setup_params(struct zcomp_params *params)
+{
+ if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ params->level = Z_DEFAULT_COMPRESSION;
+
+ return 0;
+}
+
+static void deflate_destroy(struct zcomp_ctx *ctx)
+{
+ struct deflate_ctx *zctx = ctx->context;
+
+ if (!zctx)
+ return;
+
+ if (zctx->cctx.workspace) {
+ zlib_deflateEnd(&zctx->cctx);
+ vfree(zctx->cctx.workspace);
+ }
+ if (zctx->dctx.workspace) {
+ zlib_inflateEnd(&zctx->dctx);
+ vfree(zctx->dctx.workspace);
+ }
+ kfree(zctx);
+}
+
+static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ struct deflate_ctx *zctx;
+ size_t sz;
+ int ret;
+
+ zctx = kzalloc(sizeof(*zctx), GFP_KERNEL);
+ if (!zctx)
+ return -ENOMEM;
+
+ ctx->context = zctx;
+ sz = zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL);
+ zctx->cctx.workspace = vzalloc(sz);
+ if (!zctx->cctx.workspace)
+ goto error;
+
+ ret = zlib_deflateInit2(&zctx->cctx, params->level, Z_DEFLATED,
+ -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ Z_DEFAULT_STRATEGY);
+ if (ret != Z_OK)
+ goto error;
+
+ sz = zlib_inflate_workspacesize();
+ zctx->dctx.workspace = vzalloc(sz);
+ if (!zctx->dctx.workspace)
+ goto error;
+
+ ret = zlib_inflateInit2(&zctx->dctx, -DEFLATE_DEF_WINBITS);
+ if (ret != Z_OK)
+ goto error;
+
+ return 0;
+
+error:
+ deflate_destroy(ctx);
+ return -EINVAL;
+}
+
+static int deflate_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct deflate_ctx *zctx = ctx->context;
+ struct z_stream_s *deflate;
+ int ret;
+
+ deflate = &zctx->cctx;
+ ret = zlib_deflateReset(deflate);
+ if (ret != Z_OK)
+ return -EINVAL;
+
+ deflate->next_in = (u8 *)req->src;
+ deflate->avail_in = req->src_len;
+ deflate->next_out = (u8 *)req->dst;
+ deflate->avail_out = req->dst_len;
+
+ ret = zlib_deflate(deflate, Z_FINISH);
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dst_len = deflate->total_out;
+ return 0;
+}
+
+static int deflate_decompress(struct zcomp_params *params,
+ struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct deflate_ctx *zctx = ctx->context;
+ struct z_stream_s *inflate;
+ int ret;
+
+ inflate = &zctx->dctx;
+
+ ret = zlib_inflateReset(inflate);
+ if (ret != Z_OK)
+ return -EINVAL;
+
+ inflate->next_in = (u8 *)req->src;
+ inflate->avail_in = req->src_len;
+ inflate->next_out = (u8 *)req->dst;
+ inflate->avail_out = req->dst_len;
+
+ ret = zlib_inflate(inflate, Z_SYNC_FLUSH);
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ return 0;
+}
+
+const struct zcomp_ops backend_deflate = {
+ .compress = deflate_compress,
+ .decompress = deflate_decompress,
+ .create_ctx = deflate_create,
+ .destroy_ctx = deflate_destroy,
+ .setup_params = deflate_setup_params,
+ .release_params = deflate_release_params,
+ .name = "deflate",
+};
diff --git a/drivers/block/zram/backend_deflate.h b/drivers/block/zram/backend_deflate.h
new file mode 100644
index 000000000000..a39ac12b114c
--- /dev/null
+++ b/drivers/block/zram/backend_deflate.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_DEFLATE_H__
+#define __BACKEND_DEFLATE_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_deflate;
+
+#endif /* __BACKEND_DEFLATE_H__ */
diff --git a/drivers/block/zram/backend_lz4.c b/drivers/block/zram/backend_lz4.c
new file mode 100644
index 000000000000..847f3334eb38
--- /dev/null
+++ b/drivers/block/zram/backend_lz4.c
@@ -0,0 +1,127 @@
+#include <linux/kernel.h>
+#include <linux/lz4.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "backend_lz4.h"
+
+struct lz4_ctx {
+ void *mem;
+
+ LZ4_streamDecode_t *dstrm;
+ LZ4_stream_t *cstrm;
+};
+
+static void lz4_release_params(struct zcomp_params *params)
+{
+}
+
+static int lz4_setup_params(struct zcomp_params *params)
+{
+ if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ params->level = LZ4_ACCELERATION_DEFAULT;
+
+ return 0;
+}
+
+static void lz4_destroy(struct zcomp_ctx *ctx)
+{
+ struct lz4_ctx *zctx = ctx->context;
+
+ if (!zctx)
+ return;
+
+ vfree(zctx->mem);
+ kfree(zctx->dstrm);
+ kfree(zctx->cstrm);
+ kfree(zctx);
+}
+
+static int lz4_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ struct lz4_ctx *zctx;
+
+ zctx = kzalloc(sizeof(*zctx), GFP_KERNEL);
+ if (!zctx)
+ return -ENOMEM;
+
+ ctx->context = zctx;
+ if (params->dict_sz == 0) {
+ zctx->mem = vmalloc(LZ4_MEM_COMPRESS);
+ if (!zctx->mem)
+ goto error;
+ } else {
+ zctx->dstrm = kzalloc(sizeof(*zctx->dstrm), GFP_KERNEL);
+ if (!zctx->dstrm)
+ goto error;
+
+ zctx->cstrm = kzalloc(sizeof(*zctx->cstrm), GFP_KERNEL);
+ if (!zctx->cstrm)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ lz4_destroy(ctx);
+ return -ENOMEM;
+}
+
+static int lz4_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct lz4_ctx *zctx = ctx->context;
+ int ret;
+
+ if (!zctx->cstrm) {
+ ret = LZ4_compress_fast(req->src, req->dst, req->src_len,
+ req->dst_len, params->level,
+ zctx->mem);
+ } else {
+ /* Cstrm needs to be reset */
+ ret = LZ4_loadDict(zctx->cstrm, params->dict, params->dict_sz);
+ if (ret != params->dict_sz)
+ return -EINVAL;
+ ret = LZ4_compress_fast_continue(zctx->cstrm, req->src,
+ req->dst, req->src_len,
+ req->dst_len, params->level);
+ }
+ if (!ret)
+ return -EINVAL;
+ req->dst_len = ret;
+ return 0;
+}
+
+static int lz4_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct lz4_ctx *zctx = ctx->context;
+ int ret;
+
+ if (!zctx->dstrm) {
+ ret = LZ4_decompress_safe(req->src, req->dst, req->src_len,
+ req->dst_len);
+ } else {
+ /* Dstrm needs to be reset */
+ ret = LZ4_setStreamDecode(zctx->dstrm, params->dict,
+ params->dict_sz);
+ if (!ret)
+ return -EINVAL;
+ ret = LZ4_decompress_safe_continue(zctx->dstrm, req->src,
+ req->dst, req->src_len,
+ req->dst_len);
+ }
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
+}
+
+const struct zcomp_ops backend_lz4 = {
+ .compress = lz4_compress,
+ .decompress = lz4_decompress,
+ .create_ctx = lz4_create,
+ .destroy_ctx = lz4_destroy,
+ .setup_params = lz4_setup_params,
+ .release_params = lz4_release_params,
+ .name = "lz4",
+};
diff --git a/drivers/block/zram/backend_lz4.h b/drivers/block/zram/backend_lz4.h
new file mode 100644
index 000000000000..c11fa602a703
--- /dev/null
+++ b/drivers/block/zram/backend_lz4.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_LZ4_H__
+#define __BACKEND_LZ4_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_lz4;
+
+#endif /* __BACKEND_LZ4_H__ */
diff --git a/drivers/block/zram/backend_lz4hc.c b/drivers/block/zram/backend_lz4hc.c
new file mode 100644
index 000000000000..5f37d5abcaeb
--- /dev/null
+++ b/drivers/block/zram/backend_lz4hc.c
@@ -0,0 +1,128 @@
+#include <linux/kernel.h>
+#include <linux/lz4.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "backend_lz4hc.h"
+
+struct lz4hc_ctx {
+ void *mem;
+
+ LZ4_streamDecode_t *dstrm;
+ LZ4_streamHC_t *cstrm;
+};
+
+static void lz4hc_release_params(struct zcomp_params *params)
+{
+}
+
+static int lz4hc_setup_params(struct zcomp_params *params)
+{
+ if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ params->level = LZ4HC_DEFAULT_CLEVEL;
+
+ return 0;
+}
+
+static void lz4hc_destroy(struct zcomp_ctx *ctx)
+{
+ struct lz4hc_ctx *zctx = ctx->context;
+
+ if (!zctx)
+ return;
+
+ kfree(zctx->dstrm);
+ kfree(zctx->cstrm);
+ vfree(zctx->mem);
+ kfree(zctx);
+}
+
+static int lz4hc_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ struct lz4hc_ctx *zctx;
+
+ zctx = kzalloc(sizeof(*zctx), GFP_KERNEL);
+ if (!zctx)
+ return -ENOMEM;
+
+ ctx->context = zctx;
+ if (params->dict_sz == 0) {
+ zctx->mem = vmalloc(LZ4HC_MEM_COMPRESS);
+ if (!zctx->mem)
+ goto error;
+ } else {
+ zctx->dstrm = kzalloc(sizeof(*zctx->dstrm), GFP_KERNEL);
+ if (!zctx->dstrm)
+ goto error;
+
+ zctx->cstrm = kzalloc(sizeof(*zctx->cstrm), GFP_KERNEL);
+ if (!zctx->cstrm)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ lz4hc_destroy(ctx);
+ return -EINVAL;
+}
+
+static int lz4hc_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct lz4hc_ctx *zctx = ctx->context;
+ int ret;
+
+ if (!zctx->cstrm) {
+ ret = LZ4_compress_HC(req->src, req->dst, req->src_len,
+ req->dst_len, params->level,
+ zctx->mem);
+ } else {
+ /* Cstrm needs to be reset */
+ LZ4_resetStreamHC(zctx->cstrm, params->level);
+ ret = LZ4_loadDictHC(zctx->cstrm, params->dict,
+ params->dict_sz);
+ if (ret != params->dict_sz)
+ return -EINVAL;
+ ret = LZ4_compress_HC_continue(zctx->cstrm, req->src, req->dst,
+ req->src_len, req->dst_len);
+ }
+ if (!ret)
+ return -EINVAL;
+ req->dst_len = ret;
+ return 0;
+}
+
+static int lz4hc_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct lz4hc_ctx *zctx = ctx->context;
+ int ret;
+
+ if (!zctx->dstrm) {
+ ret = LZ4_decompress_safe(req->src, req->dst, req->src_len,
+ req->dst_len);
+ } else {
+ /* Dstrm needs to be reset */
+ ret = LZ4_setStreamDecode(zctx->dstrm, params->dict,
+ params->dict_sz);
+ if (!ret)
+ return -EINVAL;
+ ret = LZ4_decompress_safe_continue(zctx->dstrm, req->src,
+ req->dst, req->src_len,
+ req->dst_len);
+ }
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
+}
+
+const struct zcomp_ops backend_lz4hc = {
+ .compress = lz4hc_compress,
+ .decompress = lz4hc_decompress,
+ .create_ctx = lz4hc_create,
+ .destroy_ctx = lz4hc_destroy,
+ .setup_params = lz4hc_setup_params,
+ .release_params = lz4hc_release_params,
+ .name = "lz4hc",
+};
diff --git a/drivers/block/zram/backend_lz4hc.h b/drivers/block/zram/backend_lz4hc.h
new file mode 100644
index 000000000000..6de03551ed4d
--- /dev/null
+++ b/drivers/block/zram/backend_lz4hc.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_LZ4HC_H__
+#define __BACKEND_LZ4HC_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_lz4hc;
+
+#endif /* __BACKEND_LZ4HC_H__ */
diff --git a/drivers/block/zram/backend_lzo.c b/drivers/block/zram/backend_lzo.c
new file mode 100644
index 000000000000..4c906beaae6b
--- /dev/null
+++ b/drivers/block/zram/backend_lzo.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/lzo.h>
+
+#include "backend_lzo.h"
+
+static void lzo_release_params(struct zcomp_params *params)
+{
+}
+
+static int lzo_setup_params(struct zcomp_params *params)
+{
+ return 0;
+}
+
+static int lzo_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ ctx->context = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!ctx->context)
+ return -ENOMEM;
+ return 0;
+}
+
+static void lzo_destroy(struct zcomp_ctx *ctx)
+{
+ kfree(ctx->context);
+}
+
+static int lzo_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ int ret;
+
+ ret = lzo1x_1_compress(req->src, req->src_len, req->dst,
+ &req->dst_len, ctx->context);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+static int lzo_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ int ret;
+
+ ret = lzo1x_decompress_safe(req->src, req->src_len,
+ req->dst, &req->dst_len);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+const struct zcomp_ops backend_lzo = {
+ .compress = lzo_compress,
+ .decompress = lzo_decompress,
+ .create_ctx = lzo_create,
+ .destroy_ctx = lzo_destroy,
+ .setup_params = lzo_setup_params,
+ .release_params = lzo_release_params,
+ .name = "lzo",
+};
diff --git a/drivers/block/zram/backend_lzo.h b/drivers/block/zram/backend_lzo.h
new file mode 100644
index 000000000000..93d54749e63c
--- /dev/null
+++ b/drivers/block/zram/backend_lzo.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_LZO_H__
+#define __BACKEND_LZO_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_lzo;
+
+#endif /* __BACKEND_LZO_H__ */
diff --git a/drivers/block/zram/backend_lzorle.c b/drivers/block/zram/backend_lzorle.c
new file mode 100644
index 000000000000..10640c96cbfc
--- /dev/null
+++ b/drivers/block/zram/backend_lzorle.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/lzo.h>
+
+#include "backend_lzorle.h"
+
+static void lzorle_release_params(struct zcomp_params *params)
+{
+}
+
+static int lzorle_setup_params(struct zcomp_params *params)
+{
+ return 0;
+}
+
+static int lzorle_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ ctx->context = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!ctx->context)
+ return -ENOMEM;
+ return 0;
+}
+
+static void lzorle_destroy(struct zcomp_ctx *ctx)
+{
+ kfree(ctx->context);
+}
+
+static int lzorle_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ int ret;
+
+ ret = lzorle1x_1_compress(req->src, req->src_len, req->dst,
+ &req->dst_len, ctx->context);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+static int lzorle_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ int ret;
+
+ ret = lzo1x_decompress_safe(req->src, req->src_len,
+ req->dst, &req->dst_len);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+const struct zcomp_ops backend_lzorle = {
+ .compress = lzorle_compress,
+ .decompress = lzorle_decompress,
+ .create_ctx = lzorle_create,
+ .destroy_ctx = lzorle_destroy,
+ .setup_params = lzorle_setup_params,
+ .release_params = lzorle_release_params,
+ .name = "lzo-rle",
+};
diff --git a/drivers/block/zram/backend_lzorle.h b/drivers/block/zram/backend_lzorle.h
new file mode 100644
index 000000000000..6ecb163b09f1
--- /dev/null
+++ b/drivers/block/zram/backend_lzorle.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_LZORLE_H__
+#define __BACKEND_LZORLE_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_lzorle;
+
+#endif /* __BACKEND_LZORLE_H__ */
diff --git a/drivers/block/zram/backend_zstd.c b/drivers/block/zram/backend_zstd.c
new file mode 100644
index 000000000000..1184c0036f44
--- /dev/null
+++ b/drivers/block/zram/backend_zstd.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/zstd.h>
+
+#include "backend_zstd.h"
+
+struct zstd_ctx {
+ zstd_cctx *cctx;
+ zstd_dctx *dctx;
+ void *cctx_mem;
+ void *dctx_mem;
+};
+
+struct zstd_params {
+ zstd_custom_mem custom_mem;
+ zstd_cdict *cdict;
+ zstd_ddict *ddict;
+ zstd_parameters cprm;
+};
+
+/*
+ * For C/D dictionaries we need to provide zstd with zstd_custom_mem,
+ * which zstd uses internally to allocate/free memory when needed.
+ *
+ * This means that allocator.customAlloc() can be called from zcomp_compress()
+ * under local-lock (per-CPU compression stream), in which case we must use
+ * GFP_ATOMIC.
+ *
+ * Another complication here is that we can be configured as a swap device.
+ */
+static void *zstd_custom_alloc(void *opaque, size_t size)
+{
+ if (!preemptible())
+ return kvzalloc(size, GFP_ATOMIC);
+
+ return kvzalloc(size, __GFP_KSWAPD_RECLAIM | __GFP_NOWARN);
+}
+
+static void zstd_custom_free(void *opaque, void *address)
+{
+ kvfree(address);
+}
+
+static void zstd_release_params(struct zcomp_params *params)
+{
+ struct zstd_params *zp = params->drv_data;
+
+ params->drv_data = NULL;
+ if (!zp)
+ return;
+
+ zstd_free_cdict(zp->cdict);
+ zstd_free_ddict(zp->ddict);
+ kfree(zp);
+}
+
+static int zstd_setup_params(struct zcomp_params *params)
+{
+ zstd_compression_parameters prm;
+ struct zstd_params *zp;
+
+ zp = kzalloc(sizeof(*zp), GFP_KERNEL);
+ if (!zp)
+ return -ENOMEM;
+
+ params->drv_data = zp;
+ if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ params->level = zstd_default_clevel();
+
+ zp->cprm = zstd_get_params(params->level, PAGE_SIZE);
+
+ zp->custom_mem.customAlloc = zstd_custom_alloc;
+ zp->custom_mem.customFree = zstd_custom_free;
+
+ prm = zstd_get_cparams(params->level, PAGE_SIZE,
+ params->dict_sz);
+
+ zp->cdict = zstd_create_cdict_byreference(params->dict,
+ params->dict_sz,
+ prm,
+ zp->custom_mem);
+ if (!zp->cdict)
+ goto error;
+
+ zp->ddict = zstd_create_ddict_byreference(params->dict,
+ params->dict_sz,
+ zp->custom_mem);
+ if (!zp->ddict)
+ goto error;
+
+ return 0;
+
+error:
+ zstd_release_params(params);
+ return -EINVAL;
+}
+
+static void zstd_destroy(struct zcomp_ctx *ctx)
+{
+ struct zstd_ctx *zctx = ctx->context;
+
+ if (!zctx)
+ return;
+
+ /*
+ * If ->cctx_mem and ->dctx_mem were allocated then we didn't use
+ * C/D dictionary and ->cctx / ->dctx were "embedded" into these
+ * buffers.
+ *
+ * If otherwise then we need to explicitly release ->cctx / ->dctx.
+ */
+ if (zctx->cctx_mem)
+ vfree(zctx->cctx_mem);
+ else
+ zstd_free_cctx(zctx->cctx);
+
+ if (zctx->dctx_mem)
+ vfree(zctx->dctx_mem);
+ else
+ zstd_free_dctx(zctx->dctx);
+
+ kfree(zctx);
+}
+
+static int zstd_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ struct zstd_ctx *zctx;
+ zstd_parameters prm;
+ size_t sz;
+
+ zctx = kzalloc(sizeof(*zctx), GFP_KERNEL);
+ if (!zctx)
+ return -ENOMEM;
+
+ ctx->context = zctx;
+ if (params->dict_sz == 0) {
+ prm = zstd_get_params(params->level, PAGE_SIZE);
+ sz = zstd_cctx_workspace_bound(&prm.cParams);
+ zctx->cctx_mem = vzalloc(sz);
+ if (!zctx->cctx_mem)
+ goto error;
+
+ zctx->cctx = zstd_init_cctx(zctx->cctx_mem, sz);
+ if (!zctx->cctx)
+ goto error;
+
+ sz = zstd_dctx_workspace_bound();
+ zctx->dctx_mem = vzalloc(sz);
+ if (!zctx->dctx_mem)
+ goto error;
+
+ zctx->dctx = zstd_init_dctx(zctx->dctx_mem, sz);
+ if (!zctx->dctx)
+ goto error;
+ } else {
+ struct zstd_params *zp = params->drv_data;
+
+ zctx->cctx = zstd_create_cctx_advanced(zp->custom_mem);
+ if (!zctx->cctx)
+ goto error;
+
+ zctx->dctx = zstd_create_dctx_advanced(zp->custom_mem);
+ if (!zctx->dctx)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ zstd_release_params(params);
+ zstd_destroy(ctx);
+ return -EINVAL;
+}
+
+static int zstd_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct zstd_params *zp = params->drv_data;
+ struct zstd_ctx *zctx = ctx->context;
+ size_t ret;
+
+ if (params->dict_sz == 0)
+ ret = zstd_compress_cctx(zctx->cctx, req->dst, req->dst_len,
+ req->src, req->src_len, &zp->cprm);
+ else
+ ret = zstd_compress_using_cdict(zctx->cctx, req->dst,
+ req->dst_len, req->src,
+ req->src_len,
+ zp->cdict);
+ if (zstd_is_error(ret))
+ return -EINVAL;
+ req->dst_len = ret;
+ return 0;
+}
+
+static int zstd_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct zstd_params *zp = params->drv_data;
+ struct zstd_ctx *zctx = ctx->context;
+ size_t ret;
+
+ if (params->dict_sz == 0)
+ ret = zstd_decompress_dctx(zctx->dctx, req->dst, req->dst_len,
+ req->src, req->src_len);
+ else
+ ret = zstd_decompress_using_ddict(zctx->dctx, req->dst,
+ req->dst_len, req->src,
+ req->src_len, zp->ddict);
+ if (zstd_is_error(ret))
+ return -EINVAL;
+ return 0;
+}
+
+const struct zcomp_ops backend_zstd = {
+ .compress = zstd_compress,
+ .decompress = zstd_decompress,
+ .create_ctx = zstd_create,
+ .destroy_ctx = zstd_destroy,
+ .setup_params = zstd_setup_params,
+ .release_params = zstd_release_params,
+ .name = "zstd",
+};
diff --git a/drivers/block/zram/backend_zstd.h b/drivers/block/zram/backend_zstd.h
new file mode 100644
index 000000000000..10fdfff1ec1c
--- /dev/null
+++ b/drivers/block/zram/backend_zstd.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_ZSTD_H__
+#define __BACKEND_ZSTD_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_zstd;
+
+#endif /* __BACKEND_ZSTD_H__ */
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 8237b08c49d8..bb514403e305 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -1,7 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2014 Sergey Senozhatsky.
- */
#include <linux/kernel.h>
#include <linux/string.h>
@@ -15,91 +12,97 @@
#include "zcomp.h"
-static const char * const backends[] = {
-#if IS_ENABLED(CONFIG_CRYPTO_LZO)
- "lzo",
- "lzo-rle",
+#include "backend_lzo.h"
+#include "backend_lzorle.h"
+#include "backend_lz4.h"
+#include "backend_lz4hc.h"
+#include "backend_zstd.h"
+#include "backend_deflate.h"
+#include "backend_842.h"
+
+static const struct zcomp_ops *backends[] = {
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZO)
+ &backend_lzorle,
+ &backend_lzo,
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
- "lz4",
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4)
+ &backend_lz4,
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
- "lz4hc",
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4HC)
+ &backend_lz4hc,
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_842)
- "842",
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_ZSTD)
+ &backend_zstd,
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
- "zstd",
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_DEFLATE)
+ &backend_deflate,
#endif
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_842)
+ &backend_842,
+#endif
+ NULL
};
-static void zcomp_strm_free(struct zcomp_strm *zstrm)
+static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
{
- if (!IS_ERR_OR_NULL(zstrm->tfm))
- crypto_free_comp(zstrm->tfm);
+ comp->ops->destroy_ctx(&zstrm->ctx);
vfree(zstrm->buffer);
- zstrm->tfm = NULL;
zstrm->buffer = NULL;
}
-/*
- * Initialize zcomp_strm structure with ->tfm initialized by backend, and
- * ->buffer. Return a negative value on error.
- */
-static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
+static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
{
- zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
+ int ret;
+
+ ret = comp->ops->create_ctx(comp->params, &zstrm->ctx);
+ if (ret)
+ return ret;
+
/*
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
* case when compressed size is larger than the original one
*/
zstrm->buffer = vzalloc(2 * PAGE_SIZE);
- if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
- zcomp_strm_free(zstrm);
+ if (!zstrm->buffer) {
+ zcomp_strm_free(comp, zstrm);
return -ENOMEM;
}
return 0;
}
+static const struct zcomp_ops *lookup_backend_ops(const char *comp)
+{
+ int i = 0;
+
+ while (backends[i]) {
+ if (sysfs_streq(comp, backends[i]->name))
+ break;
+ i++;
+ }
+ return backends[i];
+}
+
bool zcomp_available_algorithm(const char *comp)
{
- /*
- * Crypto does not ignore a trailing new line symbol,
- * so make sure you don't supply a string containing
- * one.
- * This also means that we permit zcomp initialisation
- * with any compressing algorithm known to crypto api.
- */
- return crypto_has_comp(comp, 0, 0) == 1;
+ return lookup_backend_ops(comp) != NULL;
}
/* show available compressors */
ssize_t zcomp_available_show(const char *comp, char *buf)
{
- bool known_algorithm = false;
ssize_t sz = 0;
int i;
- for (i = 0; i < ARRAY_SIZE(backends); i++) {
- if (!strcmp(comp, backends[i])) {
- known_algorithm = true;
+ for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
+ if (!strcmp(comp, backends[i]->name)) {
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "[%s] ", backends[i]);
+ "[%s] ", backends[i]->name);
} else {
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "%s ", backends[i]);
+ "%s ", backends[i]->name);
}
}
- /*
- * Out-of-tree module known to crypto api or a missing
- * entry in `backends'.
- */
- if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1)
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "[%s] ", comp);
-
sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
return sz;
}
@@ -115,38 +118,34 @@ void zcomp_stream_put(struct zcomp *comp)
local_unlock(&comp->stream->lock);
}
-int zcomp_compress(struct zcomp_strm *zstrm,
- const void *src, unsigned int *dst_len)
+int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int *dst_len)
{
- /*
- * Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized
- * because sometimes we can endup having a bigger compressed data
- * due to various reasons: for example compression algorithms tend
- * to add some padding to the compressed buffer. Speaking of padding,
- * comp algorithm `842' pads the compressed length to multiple of 8
- * and returns -ENOSP when the dst memory is not big enough, which
- * is not something that ZRAM wants to see. We can handle the
- * `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we
- * receive -ERRNO from the compressing backend we can't help it
- * anymore. To make `842' happy we need to tell the exact size of
- * the dst buffer, zram_drv will take care of the fact that
- * compressed buffer is too big.
- */
- *dst_len = PAGE_SIZE * 2;
+ struct zcomp_req req = {
+ .src = src,
+ .dst = zstrm->buffer,
+ .src_len = PAGE_SIZE,
+ .dst_len = 2 * PAGE_SIZE,
+ };
+ int ret;
- return crypto_comp_compress(zstrm->tfm,
- src, PAGE_SIZE,
- zstrm->buffer, dst_len);
+ ret = comp->ops->compress(comp->params, &zstrm->ctx, &req);
+ if (!ret)
+ *dst_len = req.dst_len;
+ return ret;
}
-int zcomp_decompress(struct zcomp_strm *zstrm,
- const void *src, unsigned int src_len, void *dst)
+int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int src_len, void *dst)
{
- unsigned int dst_len = PAGE_SIZE;
-
- return crypto_comp_decompress(zstrm->tfm,
- src, src_len,
- dst, &dst_len);
+ struct zcomp_req req = {
+ .src = src,
+ .dst = dst,
+ .src_len = src_len,
+ .dst_len = PAGE_SIZE,
+ };
+
+ return comp->ops->decompress(comp->params, &zstrm->ctx, &req);
}
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
@@ -158,7 +157,7 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
zstrm = per_cpu_ptr(comp->stream, cpu);
local_lock_init(&zstrm->lock);
- ret = zcomp_strm_init(zstrm, comp);
+ ret = zcomp_strm_init(comp, zstrm);
if (ret)
pr_err("Can't allocate a compression stream\n");
return ret;
@@ -170,11 +169,11 @@ int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
struct zcomp_strm *zstrm;
zstrm = per_cpu_ptr(comp->stream, cpu);
- zcomp_strm_free(zstrm);
+ zcomp_strm_free(comp, zstrm);
return 0;
}
-static int zcomp_init(struct zcomp *comp)
+static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
{
int ret;
@@ -182,12 +181,19 @@ static int zcomp_init(struct zcomp *comp)
if (!comp->stream)
return -ENOMEM;
+ comp->params = params;
+ ret = comp->ops->setup_params(comp->params);
+ if (ret)
+ goto cleanup;
+
ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
if (ret < 0)
goto cleanup;
+
return 0;
cleanup:
+ comp->ops->release_params(comp->params);
free_percpu(comp->stream);
return ret;
}
@@ -195,37 +201,35 @@ cleanup:
void zcomp_destroy(struct zcomp *comp)
{
cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
+ comp->ops->release_params(comp->params);
free_percpu(comp->stream);
kfree(comp);
}
-/*
- * search available compressors for requested algorithm.
- * allocate new zcomp and initialize it. return compressing
- * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
- * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
- * case of allocation error, or any other error potentially
- * returned by zcomp_init().
- */
-struct zcomp *zcomp_create(const char *alg)
+struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params)
{
struct zcomp *comp;
int error;
/*
- * Crypto API will execute /sbin/modprobe if the compression module
- * is not loaded yet. We must do it here, otherwise we are about to
- * call /sbin/modprobe under CPU hot-plug lock.
+ * The backends array has a sentinel NULL value, so the minimum
+ * size is 1. In order to be valid the array, apart from the
+ * sentinel NULL element, should have at least one compression
+ * backend selected.
*/
- if (!zcomp_available_algorithm(alg))
- return ERR_PTR(-EINVAL);
+ BUILD_BUG_ON(ARRAY_SIZE(backends) <= 1);
comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
if (!comp)
return ERR_PTR(-ENOMEM);
- comp->name = alg;
- error = zcomp_init(comp);
+ comp->ops = lookup_backend_ops(alg);
+ if (!comp->ops) {
+ kfree(comp);
+ return ERR_PTR(-EINVAL);
+ }
+
+ error = zcomp_init(comp, params);
if (error) {
kfree(comp);
return ERR_PTR(error);
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index e9fe63da0e9b..ad5762813842 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -1,24 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2014 Sergey Senozhatsky.
- */
#ifndef _ZCOMP_H_
#define _ZCOMP_H_
+
#include <linux/local_lock.h>
+#define ZCOMP_PARAM_NO_LEVEL INT_MIN
+
+/*
+ * Immutable driver (backend) parameters. The driver may attach private
+ * data to it (e.g. driver representation of the dictionary, etc.).
+ *
+ * This data is kept per-comp and is shared among execution contexts.
+ */
+struct zcomp_params {
+ void *dict;
+ size_t dict_sz;
+ s32 level;
+
+ void *drv_data;
+};
+
+/*
+ * Run-time driver context - scratch buffers, etc. It is modified during
+ * request execution (compression/decompression), cannot be shared, so
+ * it's in per-CPU area.
+ */
+struct zcomp_ctx {
+ void *context;
+};
+
struct zcomp_strm {
- /* The members ->buffer and ->tfm are protected by ->lock. */
local_lock_t lock;
- /* compression/decompression buffer */
+ /* compression buffer */
void *buffer;
- struct crypto_comp *tfm;
+ struct zcomp_ctx ctx;
+};
+
+struct zcomp_req {
+ const unsigned char *src;
+ const size_t src_len;
+
+ unsigned char *dst;
+ size_t dst_len;
+};
+
+struct zcomp_ops {
+ int (*compress)(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req);
+ int (*decompress)(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req);
+
+ int (*create_ctx)(struct zcomp_params *params, struct zcomp_ctx *ctx);
+ void (*destroy_ctx)(struct zcomp_ctx *ctx);
+
+ int (*setup_params)(struct zcomp_params *params);
+ void (*release_params)(struct zcomp_params *params);
+
+ const char *name;
};
/* dynamic per-device compression frontend */
struct zcomp {
struct zcomp_strm __percpu *stream;
- const char *name;
+ const struct zcomp_ops *ops;
+ struct zcomp_params *params;
struct hlist_node node;
};
@@ -27,16 +73,15 @@ int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
ssize_t zcomp_available_show(const char *comp, char *buf);
bool zcomp_available_algorithm(const char *comp);
-struct zcomp *zcomp_create(const char *alg);
+struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params);
void zcomp_destroy(struct zcomp *comp);
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp);
void zcomp_stream_put(struct zcomp *comp);
-int zcomp_compress(struct zcomp_strm *zstrm,
- const void *src, unsigned int *dst_len);
-
-int zcomp_decompress(struct zcomp_strm *zstrm,
- const void *src, unsigned int src_len, void *dst);
+int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int *dst_len);
+int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int src_len, void *dst);
#endif /* _ZCOMP_H_ */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index efcb8d9d274c..c3d245617083 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -33,6 +33,7 @@
#include <linux/debugfs.h>
#include <linux/cpuhotplug.h>
#include <linux/part_stat.h>
+#include <linux/kernel_read_file.h>
#include "zram_drv.h"
@@ -59,17 +60,17 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
static int zram_slot_trylock(struct zram *zram, u32 index)
{
- return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
+ return spin_trylock(&zram->table[index].lock);
}
static void zram_slot_lock(struct zram *zram, u32 index)
{
- bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
+ spin_lock(&zram->table[index].lock);
}
static void zram_slot_unlock(struct zram *zram, u32 index)
{
- bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
+ spin_unlock(&zram->table[index].lock);
}
static inline bool init_done(struct zram *zram)
@@ -998,6 +999,103 @@ static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
return 0;
}
+static void comp_params_reset(struct zram *zram, u32 prio)
+{
+ struct zcomp_params *params = &zram->params[prio];
+
+ vfree(params->dict);
+ params->level = ZCOMP_PARAM_NO_LEVEL;
+ params->dict_sz = 0;
+ params->dict = NULL;
+}
+
+static int comp_params_store(struct zram *zram, u32 prio, s32 level,
+ const char *dict_path)
+{
+ ssize_t sz = 0;
+
+ comp_params_reset(zram, prio);
+
+ if (dict_path) {
+ sz = kernel_read_file_from_path(dict_path, 0,
+ &zram->params[prio].dict,
+ INT_MAX,
+ NULL,
+ READING_POLICY);
+ if (sz < 0)
+ return -EINVAL;
+ }
+
+ zram->params[prio].dict_sz = sz;
+ zram->params[prio].level = level;
+ return 0;
+}
+
+static ssize_t algorithm_params_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NO_LEVEL;
+ char *args, *param, *val, *algo = NULL, *dict_path = NULL;
+ struct zram *zram = dev_to_zram(dev);
+ int ret;
+
+ args = skip_spaces(buf);
+ while (*args) {
+ args = next_arg(args, &param, &val);
+
+ if (!val || !*val)
+ return -EINVAL;
+
+ if (!strcmp(param, "priority")) {
+ ret = kstrtoint(val, 10, &prio);
+ if (ret)
+ return ret;
+ continue;
+ }
+
+ if (!strcmp(param, "level")) {
+ ret = kstrtoint(val, 10, &level);
+ if (ret)
+ return ret;
+ continue;
+ }
+
+ if (!strcmp(param, "algo")) {
+ algo = val;
+ continue;
+ }
+
+ if (!strcmp(param, "dict")) {
+ dict_path = val;
+ continue;
+ }
+ }
+
+ /* Lookup priority by algorithm name */
+ if (algo) {
+ s32 p;
+
+ prio = -EINVAL;
+ for (p = ZRAM_PRIMARY_COMP; p < ZRAM_MAX_COMPS; p++) {
+ if (!zram->comp_algs[p])
+ continue;
+
+ if (!strcmp(zram->comp_algs[p], algo)) {
+ prio = p;
+ break;
+ }
+ }
+ }
+
+ if (prio < ZRAM_PRIMARY_COMP || prio >= ZRAM_MAX_COMPS)
+ return -EINVAL;
+
+ ret = comp_params_store(zram, prio, level, dict_path);
+ return ret ? ret : len;
+}
+
static ssize_t comp_algorithm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1211,7 +1309,7 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
static bool zram_meta_alloc(struct zram *zram, u64 disksize)
{
- size_t num_pages;
+ size_t num_pages, index;
num_pages = disksize >> PAGE_SHIFT;
zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
@@ -1226,6 +1324,9 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
+
+ for (index = 0; index < num_pages; index++)
+ spin_lock_init(&zram->table[index].lock);
return true;
}
@@ -1283,7 +1384,7 @@ out:
zram_set_handle(zram, index, 0);
zram_set_obj_size(zram, index, 0);
WARN_ON_ONCE(zram->table[index].flags &
- ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
+ ~(1UL << ZRAM_UNDER_WB));
}
/*
@@ -1327,7 +1428,8 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page,
ret = 0;
} else {
dst = kmap_local_page(page);
- ret = zcomp_decompress(zstrm, src, size, dst);
+ ret = zcomp_decompress(zram->comps[prio], zstrm,
+ src, size, dst);
kunmap_local(dst);
zcomp_stream_put(zram->comps[prio]);
}
@@ -1414,7 +1516,8 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
compress_again:
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
src = kmap_local_page(page);
- ret = zcomp_compress(zstrm, src, &comp_len);
+ ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
+ src, &comp_len);
kunmap_local(src);
if (unlikely(ret)) {
@@ -1601,7 +1704,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
num_recomps++;
zstrm = zcomp_stream_get(zram->comps[prio]);
src = kmap_local_page(page);
- ret = zcomp_compress(zstrm, src, &comp_len_new);
+ ret = zcomp_compress(zram->comps[prio], zstrm,
+ src, &comp_len_new);
kunmap_local(src);
if (ret) {
@@ -1754,6 +1858,18 @@ static ssize_t recompress_store(struct device *dev,
algo = val;
continue;
}
+
+ if (!strcmp(param, "priority")) {
+ ret = kstrtouint(val, 10, &prio);
+ if (ret)
+ return ret;
+
+ if (prio == ZRAM_PRIMARY_COMP)
+ prio = ZRAM_SECONDARY_COMP;
+
+ prio_max = min(prio + 1, ZRAM_MAX_COMPS);
+ continue;
+ }
}
if (threshold >= huge_class_size)
@@ -1976,6 +2092,15 @@ static void zram_slot_free_notify(struct block_device *bdev,
zram_slot_unlock(zram, index);
}
+static void zram_comp_params_reset(struct zram *zram)
+{
+ u32 prio;
+
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
+ comp_params_reset(zram, prio);
+ }
+}
+
static void zram_destroy_comps(struct zram *zram)
{
u32 prio;
@@ -1989,6 +2114,13 @@ static void zram_destroy_comps(struct zram *zram)
zcomp_destroy(comp);
zram->num_active_comps--;
}
+
+ for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
+ kfree(zram->comp_algs[prio]);
+ zram->comp_algs[prio] = NULL;
+ }
+
+ zram_comp_params_reset(zram);
}
static void zram_reset_device(struct zram *zram)
@@ -2046,7 +2178,8 @@ static ssize_t disksize_store(struct device *dev,
if (!zram->comp_algs[prio])
continue;
- comp = zcomp_create(zram->comp_algs[prio]);
+ comp = zcomp_create(zram->comp_algs[prio],
+ &zram->params[prio]);
if (IS_ERR(comp)) {
pr_err("Cannot initialise %s compressing backend\n",
zram->comp_algs[prio]);
@@ -2149,6 +2282,7 @@ static DEVICE_ATTR_RW(writeback_limit_enable);
static DEVICE_ATTR_RW(recomp_algorithm);
static DEVICE_ATTR_WO(recompress);
#endif
+static DEVICE_ATTR_WO(algorithm_params);
static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr,
@@ -2176,6 +2310,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_recomp_algorithm.attr,
&dev_attr_recompress.attr,
#endif
+ &dev_attr_algorithm_params.attr,
NULL,
};
@@ -2251,6 +2386,7 @@ static int zram_add(void)
if (ret)
goto out_cleanup_disk;
+ zram_comp_params_reset(zram);
comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
zram_debugfs_register(zram);
@@ -2401,9 +2537,10 @@ static void destroy_devices(void)
static int __init zram_init(void)
{
+ struct zram_table_entry zram_te;
int ret;
- BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG);
+ BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.flags) * 8);
ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
zcomp_cpu_up_prepare, zcomp_cpu_dead);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 35e322144629..cfc8c059db63 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -45,9 +45,7 @@
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
- /* zram slot is locked */
- ZRAM_LOCK = ZRAM_FLAG_SHIFT,
- ZRAM_SAME, /* Page consists the same element */
+ ZRAM_SAME = ZRAM_FLAG_SHIFT, /* Page consists the same element */
ZRAM_WB, /* page is stored on backing_device */
ZRAM_UNDER_WB, /* page is under writeback */
ZRAM_HUGE, /* Incompressible page */
@@ -68,7 +66,8 @@ struct zram_table_entry {
unsigned long handle;
unsigned long element;
};
- unsigned long flags;
+ unsigned int flags;
+ spinlock_t lock;
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
ktime_t ac_time;
#endif
@@ -107,6 +106,7 @@ struct zram {
struct zram_table_entry *table;
struct zs_pool *mem_pool;
struct zcomp *comps[ZRAM_MAX_COMPS];
+ struct zcomp_params params[ZRAM_MAX_COMPS];
struct gendisk *disk;
/* Prevent concurrent execution of device init */
struct rw_semaphore init_lock;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 769fa288179d..18767b54df35 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -274,6 +274,18 @@ config BT_HCIUART_MRVL
Say Y here to compile support for HCI MRVL protocol.
+config BT_HCIUART_AML
+ bool "Amlogic protocol support"
+ depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
+ select BT_HCIUART_H4
+ select FW_LOADER
+ help
+ The Amlogic protocol support enables Bluetooth HCI over serial
+ port interface for Amlogic Bluetooth controllers.
+
+ Say Y here to compile support for HCI AML protocol.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 0730d6684d1a..81856512ddd0 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -51,4 +51,5 @@ hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o
hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o
hci_uart-$(CONFIG_BT_HCIUART_MRVL) += hci_mrvl.o
+hci_uart-$(CONFIG_BT_HCIUART_AML) += hci_aml.o
hci_uart-objs := $(hci_uart-y)
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 1c7631f22c52..fda47948c35d 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -46,6 +46,7 @@ MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
#define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002
#define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003
#define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
+#define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
u16 queue_num)
@@ -423,6 +424,18 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
goto exit_error;
}
break;
+
+ case BTINTEL_PCIE_HCI_ISO_PKT:
+ if (skb->len >= HCI_ISO_HDR_SIZE) {
+ plen = HCI_ISO_HDR_SIZE + __le16_to_cpu(hci_iso_hdr(skb)->dlen);
+ pkt_type = HCI_ISODATA_PKT;
+ } else {
+ bt_dev_err(hdev, "ISO packet is too short");
+ ret = -EILSEQ;
+ goto exit_error;
+ }
+ break;
+
default:
bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x",
pcie_pkt_type);
@@ -1082,6 +1095,9 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
type = BTINTEL_PCIE_HCI_SCO_PKT;
hdev->stat.sco_tx++;
break;
+ case HCI_ISODATA_PKT:
+ type = BTINTEL_PCIE_HCI_ISO_PKT;
+ break;
default:
bt_dev_err(hdev, "Unknown HCI packet type");
return -EILSEQ;
@@ -1208,7 +1224,7 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
int err;
struct hci_dev *hdev;
- hdev = hci_alloc_dev();
+ hdev = hci_alloc_dev_priv(sizeof(struct btintel_data));
if (!hdev)
return -ENOMEM;
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index ad1ec6f3685a..7c2030cec10e 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1412,6 +1412,7 @@ static const struct h4_recv_pkt nxp_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
+ { H4_RECV_ISO, .recv = hci_recv_frame },
{ NXP_RECV_CHIP_VER_V1, .recv = nxp_recv_chip_ver_v1 },
{ NXP_RECV_FW_REQ_V1, .recv = nxp_recv_fw_req_v1 },
{ NXP_RECV_CHIP_VER_V3, .recv = nxp_recv_chip_ver_v3 },
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index fd7991ea7672..2d95b3ea046d 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -30,6 +30,7 @@
#define RTL_ROM_LMP_8822B 0x8822
#define RTL_ROM_LMP_8852A 0x8852
#define RTL_ROM_LMP_8851B 0x8851
+#define RTL_ROM_LMP_8922A 0x8922
#define RTL_CONFIG_MAGIC 0x8723ab55
#define RTL_VSC_OP_COREDUMP 0xfcff
@@ -69,6 +70,7 @@ enum btrtl_chip_id {
CHIP_ID_8852B = 20,
CHIP_ID_8852C = 25,
CHIP_ID_8851B = 36,
+ CHIP_ID_8922A = 44,
CHIP_ID_8852BT = 47,
};
@@ -309,6 +311,15 @@ static const struct id_table ic_id_table[] = {
.cfg_name = "rtl_bt/rtl8851bu_config",
.hw_info = "rtl8851bu" },
+ /* 8922A */
+ { IC_INFO(RTL_ROM_LMP_8922A, 0xa, 0xc, HCI_USB),
+ .config_needed = false,
+ .has_rom_version = true,
+ .has_msft_ext = true,
+ .fw_name = "rtl_bt/rtl8922au_fw",
+ .cfg_name = "rtl_bt/rtl8922au_config",
+ .hw_info = "rtl8922au" },
+
/* 8852BT/8852BE-VT */
{ IC_INFO(RTL_ROM_LMP_8852A, 0x87, 0xc, HCI_USB),
.config_needed = false,
@@ -655,6 +666,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8852A, 20 }, /* 8852B */
{ RTL_ROM_LMP_8852A, 25 }, /* 8852C */
{ RTL_ROM_LMP_8851B, 36 }, /* 8851B */
+ { RTL_ROM_LMP_8922A, 44 }, /* 8922A */
{ RTL_ROM_LMP_8852A, 47 }, /* 8852BT */
};
@@ -878,10 +890,8 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
if (ret < 0)
return ret;
ret = fw->size;
- *buff = kvmalloc(fw->size, GFP_KERNEL);
- if (*buff)
- memcpy(*buff, fw->data, ret);
- else
+ *buff = kvmemdup(fw->data, fw->size, GFP_KERNEL);
+ if (!*buff)
ret = -ENOMEM;
release_firmware(fw);
@@ -1255,6 +1265,7 @@ int btrtl_download_firmware(struct hci_dev *hdev,
case RTL_ROM_LMP_8852A:
case RTL_ROM_LMP_8703B:
case RTL_ROM_LMP_8851B:
+ case RTL_ROM_LMP_8922A:
err = btrtl_setup_rtl8723b(hdev, btrtl_dev);
break;
default:
@@ -1286,6 +1297,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8852B:
case CHIP_ID_8852C:
case CHIP_ID_8851B:
+ case CHIP_ID_8922A:
case CHIP_ID_8852BT:
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
@@ -1296,6 +1308,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
btrealtek_set_flag(hdev, REALTEK_ALT6_CONTINUOUS_TX_CHIP);
if (btrtl_dev->project_id == CHIP_ID_8852A ||
+ btrtl_dev->project_id == CHIP_ID_8852B ||
btrtl_dev->project_id == CHIP_ID_8852C)
set_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks);
@@ -1528,3 +1541,5 @@ MODULE_FIRMWARE("rtl_bt/rtl8852btu_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw_v2.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8922au_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8922au_config.bin");
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index fdcfe9c50313..a69feb08486a 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -295,6 +295,7 @@ static int btsdio_probe(struct sdio_func *func,
case SDIO_DEVICE_ID_BROADCOM_4345:
case SDIO_DEVICE_ID_BROADCOM_43455:
case SDIO_DEVICE_ID_BROADCOM_4356:
+ case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
return -ENODEV;
}
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 51d9d4532dda..6c9c761d5b93 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -59,7 +59,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_CW6622 BIT(19)
#define BTUSB_MEDIATEK BIT(20)
#define BTUSB_WIDEBAND_SPEECH BIT(21)
-#define BTUSB_VALID_LE_STATES BIT(22)
+#define BTUSB_INVALID_LE_STATES BIT(22)
#define BTUSB_QCA_WCN6855 BIT(23)
#define BTUSB_INTEL_BROKEN_SHUTDOWN_LED BIT(24)
#define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25)
@@ -298,115 +298,79 @@ static const struct usb_device_id quirks_table[] = {
/* QCA WCN6855 chipset */
{ USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9108), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9109), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9208), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9209), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9308), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9408), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9508), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9509), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9608), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9609), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x10ab, 0x9f09), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0c7), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0c9), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0ca), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0cb), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0ce), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0de), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0df), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0e1), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0ea), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0ec), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3023), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3024), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3a22), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3a24), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3a26), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3a27), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
/* QCA WCN785x chipset */
{ USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -540,6 +504,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe122), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
@@ -564,6 +530,17 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8852BT/8852BE-VT Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+
+ /* Realtek 8922AE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0x8922), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3617), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3616), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe130), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_REALTEK },
@@ -571,131 +548,102 @@ static const struct usb_device_id quirks_table[] = {
/* MediaTek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7615E Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK},
/* Additional MediaTek MT7663 Bluetooth devices */
{ USB_DEVICE(0x043e, 0x310c), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3801), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7668 Bluetooth devices */
{ USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7921 Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3606), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
/* MediaTek MT7922 Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3585), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
/* MediaTek MT7922A Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0e2), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0e4), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0f1), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0f6), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe102), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3614), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3615), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x35f5, 0x7922), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7925 Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe113), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe118), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe11e), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3603), .driver_info = BTUSB_MEDIATEK |
- BTUSB_WIDEBAND_SPEECH |
- BTUSB_VALID_LE_STATES },
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3604), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3608), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -1397,7 +1345,10 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
if (!urb)
return -ENOMEM;
- size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
+ /* Use maximum HCI Event size so the USB stack handles
+ * ZPL/short-transfer automatically.
+ */
+ size = HCI_MAX_EVENT_SIZE;
buf = kmalloc(size, mem_flags);
if (!buf) {
@@ -3956,7 +3907,7 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_WIDEBAND_SPEECH)
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
- if (!(id->driver_info & BTUSB_VALID_LE_STATES))
+ if (id->driver_info & BTUSB_INVALID_LE_STATES)
set_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks);
if (id->driver_info & BTUSB_DIGIANSWER) {
diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h
index 4f2c89742245..647d37ca4cdd 100644
--- a/drivers/bluetooth/h4_recv.h
+++ b/drivers/bluetooth/h4_recv.h
@@ -38,6 +38,13 @@ struct h4_recv_pkt {
.lsize = 1, \
.maxlen = HCI_MAX_EVENT_SIZE
+#define H4_RECV_ISO \
+ .type = HCI_ISODATA_PKT, \
+ .hlen = HCI_ISO_HDR_SIZE, \
+ .loff = 2, \
+ .lsize = 2, \
+ .maxlen = HCI_MAX_FRAME_SIZE
+
static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev,
struct sk_buff *skb,
const unsigned char *buffer,
diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c
new file mode 100644
index 000000000000..dc9541e76d81
--- /dev/null
+++ b/drivers/bluetooth/hci_aml.c
@@ -0,0 +1,755 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/property.h>
+#include <linux/of.h>
+#include <linux/serdev.h>
+#include <linux/clk.h>
+#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci.h>
+
+#include "hci_uart.h"
+
+#define AML_EVT_HEAD_SIZE 4
+#define AML_BDADDR_DEFAULT (&(bdaddr_t) {{ 0x00, 0xff, 0x00, 0x22, 0x2d, 0xae }})
+
+#define AML_FIRMWARE_OPERATION_SIZE (248)
+#define AML_FIRMWARE_MAX_SIZE (512 * 1024)
+
+/* TCI command */
+#define AML_TCI_CMD_READ 0xFEF0
+#define AML_TCI_CMD_WRITE 0xFEF1
+#define AML_TCI_CMD_UPDATE_BAUDRATE 0xFEF2
+#define AML_TCI_CMD_HARDWARE_RESET 0xFEF2
+#define AML_TCI_CMD_DOWNLOAD_BT_FW 0xFEF3
+
+/* Vendor command */
+#define AML_BT_HCI_VENDOR_CMD 0xFC1A
+
+/* TCI operation parameter in controller chip */
+#define AML_OP_UART_MODE 0x00A30128
+#define AML_OP_EVT_ENABLE 0x00A70014
+#define AML_OP_MEM_HARD_TRANS_EN 0x00A7000C
+#define AML_OP_RF_CFG 0x00F03040
+#define AML_OP_RAM_POWER_CTR 0x00F03050
+#define AML_OP_HARDWARE_RST 0x00F03058
+#define AML_OP_ICCM_RAM_BASE 0x00000000
+#define AML_OP_DCCM_RAM_BASE 0x00D00000
+
+/* UART configuration */
+#define AML_UART_XMIT_EN BIT(12)
+#define AML_UART_RECV_EN BIT(13)
+#define AML_UART_TIMEOUT_INT_EN BIT(14)
+#define AML_UART_CLK_SOURCE 40000000
+
+/* Controller event */
+#define AML_EVT_EN BIT(24)
+
+/* RAM power control */
+#define AML_RAM_POWER_ON (0)
+#define AML_RAM_POWER_OFF (1)
+
+/* RF configuration */
+#define AML_RF_ANT_SINGLE BIT(28)
+#define AML_RF_ANT_DOUBLE BIT(29)
+
+/* Memory transaction */
+#define AML_MM_CTR_HARD_TRAS_EN BIT(27)
+
+/* Controller reset */
+#define AML_CTR_CPU_RESET BIT(8)
+#define AML_CTR_MAC_RESET BIT(9)
+#define AML_CTR_PHY_RESET BIT(10)
+
+enum {
+ FW_ICCM,
+ FW_DCCM
+};
+
+struct aml_fw_len {
+ u32 iccm_len;
+ u32 dccm_len;
+};
+
+struct aml_tci_rsp {
+ u8 num_cmd_packet;
+ u16 opcode;
+ u8 status;
+} __packed;
+
+struct aml_device_data {
+ int iccm_offset;
+ int dccm_offset;
+ bool is_coex;
+};
+
+struct aml_serdev {
+ struct hci_uart serdev_hu;
+ struct device *dev;
+ struct gpio_desc *bt_en_gpio;
+ struct regulator *bt_supply;
+ struct clk *lpo_clk;
+ const struct aml_device_data *aml_dev_data;
+ const char *firmware_name;
+};
+
+struct aml_data {
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+};
+
+static const struct h4_recv_pkt aml_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+ { H4_RECV_ISO, .recv = hci_recv_frame },
+};
+
+/* The TCI command is a private command, which is for setting baud rate,
+ * downloading firmware, initiating RAM.
+ *
+ * op_code | op_len | op_addr | parameter |
+ * --------|-----------------------|---------|-------------|
+ * 2B | 1B len(addr+param) | 4B | len(param) |
+ */
+static int aml_send_tci_cmd(struct hci_dev *hdev, u16 op_code, u32 op_addr,
+ u32 *param, u32 param_len)
+{
+ struct aml_tci_rsp *rsp = NULL;
+ struct sk_buff *skb = NULL;
+ size_t buf_len = 0;
+ u8 *buf = NULL;
+ int err = 0;
+
+ buf_len = sizeof(op_addr) + param_len;
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, &op_addr, sizeof(op_addr));
+ if (param && param_len > 0)
+ memcpy(buf + sizeof(op_addr), param, param_len);
+
+ skb = __hci_cmd_sync_ev(hdev, op_code, buf_len, buf,
+ HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to send TCI cmd (error: %d)", err);
+ goto exit;
+ }
+
+ rsp = skb_pull_data(skb, sizeof(struct aml_tci_rsp));
+ if (!rsp)
+ goto skb_free;
+
+ if (rsp->opcode != op_code || rsp->status != 0x00) {
+ bt_dev_err(hdev, "send TCI cmd (0x%04X), response (0x%04X):(%d)",
+ op_code, rsp->opcode, rsp->status);
+ err = -EINVAL;
+ goto skb_free;
+ }
+
+skb_free:
+ kfree_skb(skb);
+
+exit:
+ kfree(buf);
+ return err;
+}
+
+static int aml_update_chip_baudrate(struct hci_dev *hdev, u32 baud)
+{
+ u32 value;
+
+ value = ((AML_UART_CLK_SOURCE / baud) - 1) & 0x0FFF;
+ value |= AML_UART_XMIT_EN | AML_UART_RECV_EN | AML_UART_TIMEOUT_INT_EN;
+
+ return aml_send_tci_cmd(hdev, AML_TCI_CMD_UPDATE_BAUDRATE,
+ AML_OP_UART_MODE, &value, sizeof(value));
+}
+
+static int aml_start_chip(struct hci_dev *hdev)
+{
+ u32 value = 0;
+ int ret;
+
+ value = AML_MM_CTR_HARD_TRAS_EN;
+ ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE,
+ AML_OP_MEM_HARD_TRANS_EN,
+ &value, sizeof(value));
+ if (ret)
+ return ret;
+
+ /* controller hardware reset */
+ value = AML_CTR_CPU_RESET | AML_CTR_MAC_RESET | AML_CTR_PHY_RESET;
+ ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_HARDWARE_RESET,
+ AML_OP_HARDWARE_RST,
+ &value, sizeof(value));
+ return ret;
+}
+
+static int aml_send_firmware_segment(struct hci_dev *hdev,
+ u8 fw_type,
+ u8 *seg,
+ u32 seg_size,
+ u32 offset)
+{
+ u32 op_addr = 0;
+
+ if (fw_type == FW_ICCM)
+ op_addr = AML_OP_ICCM_RAM_BASE + offset;
+ else if (fw_type == FW_DCCM)
+ op_addr = AML_OP_DCCM_RAM_BASE + offset;
+
+ return aml_send_tci_cmd(hdev, AML_TCI_CMD_DOWNLOAD_BT_FW,
+ op_addr, (u32 *)seg, seg_size);
+}
+
+static int aml_send_firmware(struct hci_dev *hdev, u8 fw_type,
+ u8 *fw, u32 fw_size, u32 offset)
+{
+ u32 seg_size = 0;
+ u32 seg_off = 0;
+
+ if (fw_size > AML_FIRMWARE_MAX_SIZE) {
+ bt_dev_err(hdev,
+ "Firmware size %d kB is larger than the maximum of 512 kB. Aborting.",
+ fw_size);
+ return -EINVAL;
+ }
+ while (fw_size > 0) {
+ seg_size = (fw_size > AML_FIRMWARE_OPERATION_SIZE) ?
+ AML_FIRMWARE_OPERATION_SIZE : fw_size;
+ if (aml_send_firmware_segment(hdev, fw_type, (fw + seg_off),
+ seg_size, offset)) {
+ bt_dev_err(hdev, "Failed send firmware, type: %d, offset: 0x%x",
+ fw_type, offset);
+ return -EINVAL;
+ }
+ seg_off += seg_size;
+ fw_size -= seg_size;
+ offset += seg_size;
+ }
+ return 0;
+}
+
+static int aml_download_firmware(struct hci_dev *hdev, const char *fw_name)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct aml_serdev *amldev = serdev_device_get_drvdata(hu->serdev);
+ const struct firmware *firmware = NULL;
+ struct aml_fw_len *fw_len = NULL;
+ u8 *iccm_start = NULL, *dccm_start = NULL;
+ u32 iccm_len, dccm_len;
+ u32 value = 0;
+ int ret = 0;
+
+ /* Enable firmware download event */
+ value = AML_EVT_EN;
+ ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE,
+ AML_OP_EVT_ENABLE,
+ &value, sizeof(value));
+ if (ret)
+ goto exit;
+
+ /* RAM power on */
+ value = AML_RAM_POWER_ON;
+ ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE,
+ AML_OP_RAM_POWER_CTR,
+ &value, sizeof(value));
+ if (ret)
+ goto exit;
+
+ /* Check RAM power status */
+ ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_READ,
+ AML_OP_RAM_POWER_CTR, NULL, 0);
+ if (ret)
+ goto exit;
+
+ ret = request_firmware(&firmware, fw_name, &hdev->dev);
+ if (ret < 0) {
+ bt_dev_err(hdev, "Failed to load <%s>:(%d)", fw_name, ret);
+ goto exit;
+ }
+
+ fw_len = (struct aml_fw_len *)firmware->data;
+
+ /* Download ICCM */
+ iccm_start = (u8 *)(firmware->data) + sizeof(struct aml_fw_len)
+ + amldev->aml_dev_data->iccm_offset;
+ iccm_len = fw_len->iccm_len - amldev->aml_dev_data->iccm_offset;
+ ret = aml_send_firmware(hdev, FW_ICCM, iccm_start, iccm_len,
+ amldev->aml_dev_data->iccm_offset);
+ if (ret) {
+ bt_dev_err(hdev, "Failed to send FW_ICCM (%d)", ret);
+ goto exit;
+ }
+
+ /* Download DCCM */
+ dccm_start = (u8 *)(firmware->data) + sizeof(struct aml_fw_len) + fw_len->iccm_len;
+ dccm_len = fw_len->dccm_len;
+ ret = aml_send_firmware(hdev, FW_DCCM, dccm_start, dccm_len,
+ amldev->aml_dev_data->dccm_offset);
+ if (ret) {
+ bt_dev_err(hdev, "Failed to send FW_DCCM (%d)", ret);
+ goto exit;
+ }
+
+ /* Disable firmware download event */
+ value = 0;
+ ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE,
+ AML_OP_EVT_ENABLE,
+ &value, sizeof(value));
+ if (ret)
+ goto exit;
+
+exit:
+ if (firmware)
+ release_firmware(firmware);
+ return ret;
+}
+
+static int aml_send_reset(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = __hci_cmd_sync_ev(hdev, HCI_OP_RESET, 0, NULL,
+ HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to send hci reset cmd (%d)", err);
+ return err;
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int aml_dump_fw_version(struct hci_dev *hdev)
+{
+ struct aml_tci_rsp *rsp = NULL;
+ struct sk_buff *skb;
+ u8 value[6] = {0};
+ u8 *fw_ver = NULL;
+ int err = 0;
+
+ skb = __hci_cmd_sync_ev(hdev, AML_BT_HCI_VENDOR_CMD, sizeof(value), value,
+ HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to get fw version (error: %d)", err);
+ return err;
+ }
+
+ rsp = skb_pull_data(skb, sizeof(struct aml_tci_rsp));
+ if (!rsp)
+ goto exit;
+
+ if (rsp->opcode != AML_BT_HCI_VENDOR_CMD || rsp->status != 0x00) {
+ bt_dev_err(hdev, "dump version, error response (0x%04X):(%d)",
+ rsp->opcode, rsp->status);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ fw_ver = (u8 *)rsp + AML_EVT_HEAD_SIZE;
+ bt_dev_info(hdev, "fw_version: date = %02x.%02x, number = 0x%02x%02x",
+ *(fw_ver + 1), *fw_ver, *(fw_ver + 3), *(fw_ver + 2));
+
+exit:
+ kfree_skb(skb);
+ return err;
+}
+
+static int aml_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct aml_tci_rsp *rsp = NULL;
+ struct sk_buff *skb;
+ int err = 0;
+
+ bt_dev_info(hdev, "set bdaddr (%pM)", bdaddr);
+ skb = __hci_cmd_sync_ev(hdev, AML_BT_HCI_VENDOR_CMD,
+ sizeof(bdaddr_t), bdaddr,
+ HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to set bdaddr (error: %d)", err);
+ return err;
+ }
+
+ rsp = skb_pull_data(skb, sizeof(struct aml_tci_rsp));
+ if (!rsp)
+ goto exit;
+
+ if (rsp->opcode != AML_BT_HCI_VENDOR_CMD || rsp->status != 0x00) {
+ bt_dev_err(hdev, "error response (0x%x):(%d)", rsp->opcode, rsp->status);
+ err = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ kfree_skb(skb);
+ return err;
+}
+
+static int aml_check_bdaddr(struct hci_dev *hdev)
+{
+ struct hci_rp_read_bd_addr *paddr;
+ struct sk_buff *skb;
+ int err;
+
+ if (bacmp(&hdev->public_addr, BDADDR_ANY))
+ return 0;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to read bdaddr (error: %d)", err);
+ return err;
+ }
+
+ paddr = skb_pull_data(skb, sizeof(struct hci_rp_read_bd_addr));
+ if (!paddr)
+ goto exit;
+
+ if (!bacmp(&paddr->bdaddr, AML_BDADDR_DEFAULT)) {
+ bt_dev_info(hdev, "amlbt using default bdaddr (%pM)", &paddr->bdaddr);
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ }
+
+exit:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int aml_config_rf(struct hci_dev *hdev, bool is_coex)
+{
+ u32 value = AML_RF_ANT_DOUBLE;
+
+ /* Use a single antenna when co-existing with wifi */
+ if (is_coex)
+ value = AML_RF_ANT_SINGLE;
+
+ return aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE,
+ AML_OP_RF_CFG,
+ &value, sizeof(value));
+}
+
+static int aml_parse_dt(struct aml_serdev *amldev)
+{
+ struct device *pdev = amldev->dev;
+
+ amldev->bt_en_gpio = devm_gpiod_get(pdev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(amldev->bt_en_gpio)) {
+ dev_err(pdev, "Failed to acquire enable gpios");
+ return PTR_ERR(amldev->bt_en_gpio);
+ }
+
+ if (device_property_read_string(pdev, "firmware-name",
+ &amldev->firmware_name)) {
+ dev_err(pdev, "Failed to acquire firmware path");
+ return -ENODEV;
+ }
+
+ amldev->bt_supply = devm_regulator_get(pdev, "vddio");
+ if (IS_ERR(amldev->bt_supply)) {
+ dev_err(pdev, "Failed to acquire regulator");
+ return PTR_ERR(amldev->bt_supply);
+ }
+
+ amldev->lpo_clk = devm_clk_get(pdev, NULL);
+ if (IS_ERR(amldev->lpo_clk)) {
+ dev_err(pdev, "Failed to acquire clock source");
+ return PTR_ERR(amldev->lpo_clk);
+ }
+
+ return 0;
+}
+
+static int aml_power_on(struct aml_serdev *amldev)
+{
+ int err;
+
+ err = regulator_enable(amldev->bt_supply);
+ if (err) {
+ dev_err(amldev->dev, "Failed to enable regulator: (%d)", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(amldev->lpo_clk);
+ if (err) {
+ dev_err(amldev->dev, "Failed to enable lpo clock: (%d)", err);
+ return err;
+ }
+
+ gpiod_set_value_cansleep(amldev->bt_en_gpio, 1);
+
+ /* Wait 20ms for bluetooth controller power on */
+ msleep(20);
+ return 0;
+}
+
+static int aml_power_off(struct aml_serdev *amldev)
+{
+ gpiod_set_value_cansleep(amldev->bt_en_gpio, 0);
+
+ clk_disable_unprepare(amldev->lpo_clk);
+
+ regulator_disable(amldev->bt_supply);
+
+ return 0;
+}
+
+static int aml_set_baudrate(struct hci_uart *hu, unsigned int speed)
+{
+ /* update controller baudrate */
+ if (aml_update_chip_baudrate(hu->hdev, speed) != 0) {
+ bt_dev_err(hu->hdev, "Failed to update baud rate");
+ return -EINVAL;
+ }
+
+ /* update local baudrate */
+ serdev_device_set_baudrate(hu->serdev, speed);
+
+ return 0;
+}
+
+/* Initialize protocol */
+static int aml_open(struct hci_uart *hu)
+{
+ struct aml_serdev *amldev = serdev_device_get_drvdata(hu->serdev);
+ struct aml_data *aml_data;
+ int err;
+
+ err = aml_parse_dt(amldev);
+ if (err)
+ return err;
+
+ if (!hci_uart_has_flow_control(hu)) {
+ bt_dev_err(hu->hdev, "no flow control");
+ return -EOPNOTSUPP;
+ }
+
+ aml_data = kzalloc(sizeof(*aml_data), GFP_KERNEL);
+ if (!aml_data)
+ return -ENOMEM;
+
+ skb_queue_head_init(&aml_data->txq);
+
+ hu->priv = aml_data;
+
+ return 0;
+}
+
+static int aml_close(struct hci_uart *hu)
+{
+ struct aml_serdev *amldev = serdev_device_get_drvdata(hu->serdev);
+ struct aml_data *aml_data = hu->priv;
+
+ skb_queue_purge(&aml_data->txq);
+ kfree_skb(aml_data->rx_skb);
+ kfree(aml_data);
+
+ hu->priv = NULL;
+
+ return aml_power_off(amldev);
+}
+
+static int aml_flush(struct hci_uart *hu)
+{
+ struct aml_data *aml_data = hu->priv;
+
+ skb_queue_purge(&aml_data->txq);
+
+ return 0;
+}
+
+static int aml_setup(struct hci_uart *hu)
+{
+ struct aml_serdev *amldev = serdev_device_get_drvdata(hu->serdev);
+ struct hci_dev *hdev = amldev->serdev_hu.hdev;
+ int err;
+
+ /* Setup bdaddr */
+ hdev->set_bdaddr = aml_set_bdaddr;
+
+ err = aml_power_on(amldev);
+ if (err)
+ return err;
+
+ err = aml_set_baudrate(hu, amldev->serdev_hu.proto->oper_speed);
+ if (err)
+ return err;
+
+ err = aml_download_firmware(hdev, amldev->firmware_name);
+ if (err)
+ return err;
+
+ err = aml_config_rf(hdev, amldev->aml_dev_data->is_coex);
+ if (err)
+ return err;
+
+ err = aml_start_chip(hdev);
+ if (err)
+ return err;
+
+ /* Wait 60ms for controller startup */
+ msleep(60);
+
+ err = aml_dump_fw_version(hdev);
+ if (err)
+ return err;
+
+ err = aml_send_reset(hdev);
+ if (err)
+ return err;
+
+ err = aml_check_bdaddr(hdev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int aml_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct aml_data *aml_data = hu->priv;
+
+ skb_queue_tail(&aml_data->txq, skb);
+
+ return 0;
+}
+
+static struct sk_buff *aml_dequeue(struct hci_uart *hu)
+{
+ struct aml_data *aml_data = hu->priv;
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&aml_data->txq);
+
+ /* Prepend skb with frame type */
+ if (skb)
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+ return skb;
+}
+
+static int aml_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct aml_data *aml_data = hu->priv;
+ int err;
+
+ aml_data->rx_skb = h4_recv_buf(hu->hdev, aml_data->rx_skb, data, count,
+ aml_recv_pkts,
+ ARRAY_SIZE(aml_recv_pkts));
+ if (IS_ERR(aml_data->rx_skb)) {
+ err = PTR_ERR(aml_data->rx_skb);
+ bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
+ aml_data->rx_skb = NULL;
+ return err;
+ }
+
+ return count;
+}
+
+static const struct hci_uart_proto aml_hci_proto = {
+ .id = HCI_UART_AML,
+ .name = "AML",
+ .init_speed = 115200,
+ .oper_speed = 4000000,
+ .open = aml_open,
+ .close = aml_close,
+ .setup = aml_setup,
+ .flush = aml_flush,
+ .recv = aml_recv,
+ .enqueue = aml_enqueue,
+ .dequeue = aml_dequeue,
+};
+
+static void aml_device_driver_shutdown(struct device *dev)
+{
+ struct aml_serdev *amldev = dev_get_drvdata(dev);
+
+ aml_power_off(amldev);
+}
+
+static int aml_serdev_probe(struct serdev_device *serdev)
+{
+ struct aml_serdev *amldev;
+ int err;
+
+ amldev = devm_kzalloc(&serdev->dev, sizeof(*amldev), GFP_KERNEL);
+ if (!amldev)
+ return -ENOMEM;
+
+ amldev->serdev_hu.serdev = serdev;
+ amldev->dev = &serdev->dev;
+ serdev_device_set_drvdata(serdev, amldev);
+
+ err = hci_uart_register_device(&amldev->serdev_hu, &aml_hci_proto);
+ if (err)
+ return dev_err_probe(amldev->dev, err,
+ "Failed to register hci uart device");
+
+ amldev->aml_dev_data = device_get_match_data(&serdev->dev);
+
+ return 0;
+}
+
+static void aml_serdev_remove(struct serdev_device *serdev)
+{
+ struct aml_serdev *amldev = serdev_device_get_drvdata(serdev);
+
+ hci_uart_unregister_device(&amldev->serdev_hu);
+}
+
+static const struct aml_device_data data_w155s2 = {
+ .iccm_offset = 256 * 1024,
+};
+
+static const struct aml_device_data data_w265s2 = {
+ .iccm_offset = 384 * 1024,
+};
+
+static const struct of_device_id aml_bluetooth_of_match[] = {
+ { .compatible = "amlogic,w155s2-bt", .data = &data_w155s2 },
+ { .compatible = "amlogic,w265s2-bt", .data = &data_w265s2 },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, aml_bluetooth_of_match);
+
+static struct serdev_device_driver aml_serdev_driver = {
+ .probe = aml_serdev_probe,
+ .remove = aml_serdev_remove,
+ .driver = {
+ .name = "hci_uart_aml",
+ .of_match_table = aml_bluetooth_of_match,
+ .shutdown = aml_device_driver_shutdown,
+ },
+};
+
+int __init aml_init(void)
+{
+ serdev_device_driver_register(&aml_serdev_driver);
+
+ return hci_uart_register_proto(&aml_hci_proto);
+}
+
+int __exit aml_deinit(void)
+{
+ serdev_device_driver_unregister(&aml_serdev_driver);
+
+ return hci_uart_unregister_proto(&aml_hci_proto);
+}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 30192bb08354..395d66e32a2e 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -507,6 +507,9 @@ static int hci_uart_tty_open(struct tty_struct *tty)
hu->alignment = 1;
hu->padding = 0;
+ /* Use serial port speed as oper_speed */
+ hu->oper_speed = tty->termios.c_ospeed;
+
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
@@ -870,7 +873,9 @@ static int __init hci_uart_init(void)
#ifdef CONFIG_BT_HCIUART_MRVL
mrvl_init();
#endif
-
+#ifdef CONFIG_BT_HCIUART_AML
+ aml_init();
+#endif
return 0;
}
@@ -906,7 +911,9 @@ static void __exit hci_uart_exit(void)
#ifdef CONFIG_BT_HCIUART_MRVL
mrvl_deinit();
#endif
-
+#ifdef CONFIG_BT_HCIUART_AML
+ aml_deinit();
+#endif
tty_unregister_ldisc(&hci_uart_ldisc);
}
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 00bf7ae82c5b..fbf3079b92a5 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -20,7 +20,7 @@
#define HCIUARTGETFLAGS _IOR('U', 204, int)
/* UART protocols */
-#define HCI_UART_MAX_PROTO 12
+#define HCI_UART_MAX_PROTO 13
#define HCI_UART_H4 0
#define HCI_UART_BCSP 1
@@ -34,6 +34,7 @@
#define HCI_UART_AG6XX 9
#define HCI_UART_NOKIA 10
#define HCI_UART_MRVL 11
+#define HCI_UART_AML 12
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
@@ -209,3 +210,8 @@ int ag6xx_deinit(void);
int mrvl_init(void);
int mrvl_deinit(void);
#endif
+
+#ifdef CONFIG_BT_HCIUART_AML
+int aml_init(void);
+int aml_deinit(void);
+#endif
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 64cd2ee03aa3..ff669a8ccad9 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -89,7 +89,7 @@ config HISILICON_LPC
config IMX_WEIM
bool "Freescale EIM DRIVER"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
help
Driver for i.MX WEIM controller.
The WEIM(Wireless External Interface Module) works like a bus.
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
index b715c8ab36e8..a65c79b08804 100644
--- a/drivers/bus/arm-integrator-lm.c
+++ b/drivers/bus/arm-integrator-lm.c
@@ -85,6 +85,7 @@ static int integrator_ap_lm_probe(struct platform_device *pdev)
return -ENODEV;
}
map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(map)) {
dev_err(dev,
"could not find Integrator/AP system controller\n");
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
index 595fb22b73e0..7463124b6dd9 100644
--- a/drivers/bus/bt1-apb.c
+++ b/drivers/bus/bt1-apb.c
@@ -185,34 +185,13 @@ static int bt1_apb_request_rst(struct bt1_apb *apb)
return ret;
}
-static void bt1_apb_disable_clk(void *data)
-{
- struct bt1_apb *apb = data;
-
- clk_disable_unprepare(apb->pclk);
-}
-
static int bt1_apb_request_clk(struct bt1_apb *apb)
{
- int ret;
-
- apb->pclk = devm_clk_get(apb->dev, "pclk");
+ apb->pclk = devm_clk_get_enabled(apb->dev, "pclk");
if (IS_ERR(apb->pclk))
return dev_err_probe(apb->dev, PTR_ERR(apb->pclk),
"Couldn't get APB clock descriptor\n");
- ret = clk_prepare_enable(apb->pclk);
- if (ret) {
- dev_err(apb->dev, "Couldn't enable the APB clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb);
- if (ret) {
- dev_err(apb->dev, "Can't add APB EHB clocks disable action\n");
- return ret;
- }
-
apb->rate = clk_get_rate(apb->pclk);
if (!apb->rate) {
dev_err(apb->dev, "Invalid clock rate\n");
diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c
index 4007e7322cf2..a5254c73bf43 100644
--- a/drivers/bus/bt1-axi.c
+++ b/drivers/bus/bt1-axi.c
@@ -146,33 +146,14 @@ static int bt1_axi_request_rst(struct bt1_axi *axi)
return ret;
}
-static void bt1_axi_disable_clk(void *data)
-{
- struct bt1_axi *axi = data;
-
- clk_disable_unprepare(axi->aclk);
-}
-
static int bt1_axi_request_clk(struct bt1_axi *axi)
{
- int ret;
-
- axi->aclk = devm_clk_get(axi->dev, "aclk");
+ axi->aclk = devm_clk_get_enabled(axi->dev, "aclk");
if (IS_ERR(axi->aclk))
return dev_err_probe(axi->dev, PTR_ERR(axi->aclk),
"Couldn't get AXI Interconnect clock\n");
- ret = clk_prepare_enable(axi->aclk);
- if (ret) {
- dev_err(axi->dev, "Couldn't enable the AXI clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi);
- if (ret)
- dev_err(axi->dev, "Can't add AXI clock disable action\n");
-
- return ret;
+ return 0;
}
static int bt1_axi_request_irq(struct bt1_axi *axi)
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 837bf9d51c6e..83d623d97f5f 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -282,22 +282,18 @@ static int weim_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, priv);
/* get the clock */
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
/* parse the device node */
ret = weim_parse_dt(pdev);
if (ret)
- clk_disable_unprepare(clk);
- else
- dev_info(&pdev->dev, "Driver registered.\n");
+ return ret;
- return ret;
+ dev_info(&pdev->dev, "Driver registered.\n");
+
+ return 0;
}
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index eee41fb798a1..a89d78925637 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -751,12 +751,10 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
int irq, ret;
of_property_read_u32(np, "clock-frequency", &clk_freq);
- if (clk_freq > RSB_MAX_FREQ) {
- dev_err(dev,
- "clock-frequency (%u Hz) is too high (max = 20MHz)\n",
- clk_freq);
- return -EINVAL;
- }
+ if (clk_freq > RSB_MAX_FREQ)
+ return dev_err_probe(dev, -EINVAL,
+ "clock-frequency (%u Hz) is too high (max = 20MHz)\n",
+ clk_freq);
rsb = devm_kzalloc(dev, sizeof(*rsb), GFP_KERNEL);
if (!rsb)
@@ -774,28 +772,22 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
return irq;
rsb->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(rsb->clk)) {
- ret = PTR_ERR(rsb->clk);
- dev_err(dev, "failed to retrieve clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(rsb->clk))
+ return dev_err_probe(dev, PTR_ERR(rsb->clk),
+ "failed to retrieve clk\n");
rsb->rstc = devm_reset_control_get(dev, NULL);
- if (IS_ERR(rsb->rstc)) {
- ret = PTR_ERR(rsb->rstc);
- dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(rsb->rstc))
+ return dev_err_probe(dev, PTR_ERR(rsb->rstc),
+ "failed to retrieve reset controller\n");
init_completion(&rsb->complete);
mutex_init(&rsb->lock);
ret = devm_request_irq(dev, irq, sunxi_rsb_irq, 0, RSB_CTRL_NAME, rsb);
- if (ret) {
- dev_err(dev, "can't register interrupt handler irq %d: %d\n",
- irq, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "can't register interrupt handler irq %d\n", irq);
ret = sunxi_rsb_hw_init(rsb);
if (ret)
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 2b59ef61dda2..270a94a06e05 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -126,7 +126,6 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
* @enabled: sysc runtime enabled status
* @needs_resume: runtime resume needed on resume from suspend
* @child_needs_resume: runtime resume needed for child on resume from suspend
- * @disable_on_idle: status flag used for disabling modules with resets
* @idle_work: work structure used to perform delayed idle on a module
* @pre_reset_quirk: module specific pre-reset quirk
* @post_reset_quirk: module specific post-reset quirk
@@ -2569,14 +2568,12 @@ static const struct sysc_dts_quirk sysc_dts_quirks[] = {
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child)
{
- const struct property *prop;
- int i, len;
+ int i;
for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
const char *name = sysc_dts_quirks[i].name;
- prop = of_get_property(np, name, &len);
- if (!prop)
+ if (!of_property_present(np, name))
continue;
ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
diff --git a/drivers/char/adi.c b/drivers/char/adi.c
index 751d7cc0da1b..f9bec10a6064 100644
--- a/drivers/char/adi.c
+++ b/drivers/char/adi.c
@@ -14,12 +14,6 @@
#define MAX_BUF_SZ PAGE_SIZE
-static int adi_open(struct inode *inode, struct file *file)
-{
- file->f_mode |= FMODE_UNSIGNED_OFFSET;
- return 0;
-}
-
static int read_mcd_tag(unsigned long addr)
{
long err;
@@ -196,7 +190,6 @@ static loff_t adi_llseek(struct file *file, loff_t offset, int whence)
if (offset != file->f_pos) {
file->f_pos = offset;
- file->f_version = 0;
ret = offset;
}
@@ -206,9 +199,9 @@ static loff_t adi_llseek(struct file *file, loff_t offset, int whence)
static const struct file_operations adi_fops = {
.owner = THIS_MODULE,
.llseek = adi_llseek,
- .open = adi_open,
.read = adi_read,
.write = adi_write,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
static struct miscdevice adi_miscdev = {
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 01e2e1ef82cf..b51d9e243f35 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -555,6 +555,7 @@ config HW_RANDOM_ARM_SMCCC_TRNG
config HW_RANDOM_CN10K
tristate "Marvell CN10K Random Number Generator support"
depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST))
+ default HW_RANDOM if ARCH_THUNDER
help
This driver provides support for the True Random Number
generator available in Marvell CN10K SoCs.
@@ -572,6 +573,20 @@ config HW_RANDOM_JH7110
To compile this driver as a module, choose M here.
The module will be called jh7110-trng.
+config HW_RANDOM_ROCKCHIP
+ tristate "Rockchip True Random Number Generator"
+ depends on HW_RANDOM && (ARCH_ROCKCHIP || COMPILE_TEST)
+ depends on HAS_IOMEM
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the True Random Number
+ Generator hardware found on some Rockchip SoC like RK3566 or RK3568.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rockchip-rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 32549a1186dc..01f012eab440 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -48,4 +48,5 @@ obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o
obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o
+obj-$(CONFIG_HW_RANDOM_ROCKCHIP) += rockchip-rng.o
obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index b03e80300627..aa2b135e3ee2 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -94,8 +94,10 @@ static int bcm2835_rng_init(struct hwrng *rng)
return ret;
ret = reset_control_reset(priv->reset);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(priv->clk);
return ret;
+ }
if (priv->mask_interrupts) {
/* mask the interrupt */
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index c0d2f824769f..4c50efc46483 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -622,6 +622,7 @@ static int __maybe_unused cctrng_resume(struct device *dev)
/* wait for Cryptocell reset completion */
if (!cctrng_wait_for_reset_completion(drvdata)) {
dev_err(dev, "Cryptocell reset not completed");
+ clk_disable_unprepare(drvdata->clk);
return -EBUSY;
}
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index aa993753ab12..1e3048f2bb38 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -142,7 +142,7 @@ static int mtk_rng_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, priv);
pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ devm_pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "registered RNG driver\n");
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index 94ee18a1120a..f01eb95bee31 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -147,33 +147,25 @@ static int mxc_rnga_probe(struct platform_device *pdev)
mxc_rng->rng.data_present = mxc_rnga_data_present;
mxc_rng->rng.data_read = mxc_rnga_data_read;
- mxc_rng->clk = devm_clk_get(&pdev->dev, NULL);
+ mxc_rng->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(mxc_rng->clk)) {
dev_err(&pdev->dev, "Could not get rng_clk!\n");
return PTR_ERR(mxc_rng->clk);
}
- err = clk_prepare_enable(mxc_rng->clk);
- if (err)
- return err;
-
mxc_rng->mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxc_rng->mem)) {
err = PTR_ERR(mxc_rng->mem);
- goto err_ioremap;
+ return err;
}
err = hwrng_register(&mxc_rng->rng);
if (err) {
dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err);
- goto err_ioremap;
+ return err;
}
return 0;
-
-err_ioremap:
- clk_disable_unprepare(mxc_rng->clk);
- return err;
}
static void mxc_rnga_remove(struct platform_device *pdev)
@@ -181,8 +173,6 @@ static void mxc_rnga_remove(struct platform_device *pdev)
struct mxc_rng *mxc_rng = platform_get_drvdata(pdev);
hwrng_unregister(&mxc_rng->rng);
-
- clk_disable_unprepare(mxc_rng->clk);
}
static const struct of_device_id mxc_rnga_of_match[] = {
diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c
new file mode 100644
index 000000000000..289b385bbf05
--- /dev/null
+++ b/drivers/char/hw_random/rockchip-rng.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rockchip-rng.c True Random Number Generator driver for Rockchip RK3568 SoC
+ *
+ * Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2022, Aurelien Jarno
+ * Authors:
+ * Lin Jinhan <troy.lin@rock-chips.com>
+ * Aurelien Jarno <aurelien@aurel32.net>
+ */
+#include <linux/clk.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define RK_RNG_AUTOSUSPEND_DELAY 100
+#define RK_RNG_MAX_BYTE 32
+#define RK_RNG_POLL_PERIOD_US 100
+#define RK_RNG_POLL_TIMEOUT_US 10000
+
+/*
+ * TRNG collects osc ring output bit every RK_RNG_SAMPLE_CNT time. The value is
+ * a tradeoff between speed and quality and has been adjusted to get a quality
+ * of ~900 (~87.5% of FIPS 140-2 successes).
+ */
+#define RK_RNG_SAMPLE_CNT 1000
+
+/* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */
+#define TRNG_RST_CTL 0x0004
+#define TRNG_RNG_CTL 0x0400
+#define TRNG_RNG_CTL_LEN_64_BIT (0x00 << 4)
+#define TRNG_RNG_CTL_LEN_128_BIT (0x01 << 4)
+#define TRNG_RNG_CTL_LEN_192_BIT (0x02 << 4)
+#define TRNG_RNG_CTL_LEN_256_BIT (0x03 << 4)
+#define TRNG_RNG_CTL_OSC_RING_SPEED_0 (0x00 << 2)
+#define TRNG_RNG_CTL_OSC_RING_SPEED_1 (0x01 << 2)
+#define TRNG_RNG_CTL_OSC_RING_SPEED_2 (0x02 << 2)
+#define TRNG_RNG_CTL_OSC_RING_SPEED_3 (0x03 << 2)
+#define TRNG_RNG_CTL_MASK GENMASK(15, 0)
+#define TRNG_RNG_CTL_ENABLE BIT(1)
+#define TRNG_RNG_CTL_START BIT(0)
+#define TRNG_RNG_SAMPLE_CNT 0x0404
+#define TRNG_RNG_DOUT 0x0410
+
+struct rk_rng {
+ struct hwrng rng;
+ void __iomem *base;
+ int clk_num;
+ struct clk_bulk_data *clk_bulks;
+};
+
+/* The mask in the upper 16 bits determines the bits that are updated */
+static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask)
+{
+ writel((mask << 16) | val, rng->base + TRNG_RNG_CTL);
+}
+
+static int rk_rng_init(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ int ret;
+
+ /* start clocks */
+ ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks);
+ if (ret < 0) {
+ dev_err((struct device *) rk_rng->rng.priv,
+ "Failed to enable clks %d\n", ret);
+ return ret;
+ }
+
+ /* set the sample period */
+ writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT);
+
+ /* set osc ring speed and enable it */
+ rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_LEN_256_BIT |
+ TRNG_RNG_CTL_OSC_RING_SPEED_0 |
+ TRNG_RNG_CTL_ENABLE,
+ TRNG_RNG_CTL_MASK);
+
+ return 0;
+}
+
+static void rk_rng_cleanup(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+
+ /* stop TRNG */
+ rk_rng_write_ctl(rk_rng, 0, TRNG_RNG_CTL_MASK);
+
+ /* stop clocks */
+ clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
+}
+
+static int rk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
+ u32 reg;
+ int ret = 0;
+
+ ret = pm_runtime_resume_and_get((struct device *) rk_rng->rng.priv);
+ if (ret < 0)
+ return ret;
+
+ /* Start collecting random data */
+ rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_START, TRNG_RNG_CTL_START);
+
+ ret = readl_poll_timeout(rk_rng->base + TRNG_RNG_CTL, reg,
+ !(reg & TRNG_RNG_CTL_START),
+ RK_RNG_POLL_PERIOD_US,
+ RK_RNG_POLL_TIMEOUT_US);
+ if (ret < 0)
+ goto out;
+
+ /* Read random data stored in the registers */
+ memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read);
+out:
+ pm_runtime_mark_last_busy((struct device *) rk_rng->rng.priv);
+ pm_runtime_put_sync_autosuspend((struct device *) rk_rng->rng.priv);
+
+ return (ret < 0) ? ret : to_read;
+}
+
+static int rk_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *rst;
+ struct rk_rng *rk_rng;
+ int ret;
+
+ rk_rng = devm_kzalloc(dev, sizeof(*rk_rng), GFP_KERNEL);
+ if (!rk_rng)
+ return -ENOMEM;
+
+ rk_rng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rk_rng->base))
+ return PTR_ERR(rk_rng->base);
+
+ rk_rng->clk_num = devm_clk_bulk_get_all(dev, &rk_rng->clk_bulks);
+ if (rk_rng->clk_num < 0)
+ return dev_err_probe(dev, rk_rng->clk_num,
+ "Failed to get clks property\n");
+
+ rst = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(rst))
+ return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n");
+
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+
+ platform_set_drvdata(pdev, rk_rng);
+
+ rk_rng->rng.name = dev_driver_string(dev);
+ if (!IS_ENABLED(CONFIG_PM)) {
+ rk_rng->rng.init = rk_rng_init;
+ rk_rng->rng.cleanup = rk_rng_cleanup;
+ }
+ rk_rng->rng.read = rk_rng_read;
+ rk_rng->rng.priv = (unsigned long) dev;
+ rk_rng->rng.quality = 900;
+
+ pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Runtime pm activation failed.\n");
+
+ ret = devm_hwrng_register(dev, &rk_rng->rng);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register Rockchip hwrng\n");
+
+ return 0;
+}
+
+static int __maybe_unused rk_rng_runtime_suspend(struct device *dev)
+{
+ struct rk_rng *rk_rng = dev_get_drvdata(dev);
+
+ rk_rng_cleanup(&rk_rng->rng);
+
+ return 0;
+}
+
+static int __maybe_unused rk_rng_runtime_resume(struct device *dev)
+{
+ struct rk_rng *rk_rng = dev_get_drvdata(dev);
+
+ return rk_rng_init(&rk_rng->rng);
+}
+
+static const struct dev_pm_ops rk_rng_pm_ops = {
+ SET_RUNTIME_PM_OPS(rk_rng_runtime_suspend,
+ rk_rng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const struct of_device_id rk_rng_dt_match[] = {
+ { .compatible = "rockchip,rk3568-rng", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, rk_rng_dt_match);
+
+static struct platform_driver rk_rng_driver = {
+ .driver = {
+ .name = "rockchip-rng",
+ .pm = &rk_rng_pm_ops,
+ .of_match_table = rk_rng_dt_match,
+ },
+ .probe = rk_rng_probe,
+};
+
+module_platform_driver(rk_rng_driver);
+
+MODULE_DESCRIPTION("Rockchip RK3568 True Random Number Generator driver");
+MODULE_AUTHOR("Lin Jinhan <troy.lin@rock-chips.com>");
+MODULE_AUTHOR("Aurelien Jarno <aurelien@aurel32.net>");
+MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 96ad571d041a..d04b391048fb 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -980,7 +980,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
ipmi_ssif_unlock_cond(ssif_info, flags);
start_get(ssif_info);
} else {
- /* Wait a jiffie then request the next message */
+ /* Wait a jiffy then request the next message */
ssif_info->waiting_alert = true;
ssif_info->retries_left = SSIF_RECV_RETRIES;
if (!ssif_info->stopping)
@@ -1368,8 +1368,20 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
rv = do_cmd(client, 2, msg, &len, resp);
if (rv)
rv = -ENODEV;
- else
+ else {
+ if (len < 3) {
+ rv = -ENODEV;
+ } else {
+ struct ipmi_device_id id;
+
+ rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
+ resp + 2, len - 2, &id);
+ if (rv)
+ rv = -ENODEV; /* Error means a BMC probably isn't there. */
+ }
+ if (!rv && info)
strscpy(info->type, DEVICE_NAME, I2C_NAME_SIZE);
+ }
kfree(resp);
return rv;
}
@@ -1704,6 +1716,16 @@ static int ssif_probe(struct i2c_client *client)
ipmi_addr_src_to_str(ssif_info->addr_source),
client->addr, client->adapter->name, slave_addr);
+ /*
+ * Send a get device id command and validate its response to
+ * make sure a valid BMC is there.
+ */
+ rv = ssif_detect(client, NULL);
+ if (rv) {
+ dev_err(&client->dev, "Not present\n");
+ goto out;
+ }
+
/* Now check for system interface capabilities */
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
@@ -2085,6 +2107,7 @@ static const struct platform_device_id ssif_plat_ids[] = {
{ "dmi-ipmi-ssif", 0 },
{ }
};
+MODULE_DEVICE_TABLE(platform, ssif_plat_ids);
static struct platform_driver ipmi_driver = {
.driver = {
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 7c359cc406d5..169eed162a7f 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -643,6 +643,7 @@ static const struct file_operations __maybe_unused mem_fops = {
.get_unmapped_area = get_unmapped_area_mem,
.mmap_capabilities = memory_mmap_capabilities,
#endif
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
static const struct file_operations null_fops = {
@@ -693,7 +694,7 @@ static const struct memdev {
umode_t mode;
} devlist[] = {
#ifdef CONFIG_DEVMEM
- [DEVMEM_MINOR] = { "mem", &mem_fops, FMODE_UNSIGNED_OFFSET, 0 },
+ [DEVMEM_MINOR] = { "mem", &mem_fops, 0, 0 },
#endif
[3] = { "null", &null_fops, FMODE_NOWAIT, 0666 },
#ifdef CONFIG_DEVPORT
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 87fe61295ea1..23ee76bbb4aa 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -59,6 +59,7 @@
#ifdef CONFIG_VDSO_GETRANDOM
#include <vdso/getrandom.h>
#include <vdso/datapage.h>
+#include <vdso/vsyscall.h>
#endif
#include <asm/archrandom.h>
#include <asm/processor.h>
@@ -281,8 +282,15 @@ static void crng_reseed(struct work_struct *work)
* former to arrive at the latter. Use smp_store_release so that this
* is ordered with the write above to base_crng.generation. Pairs with
* the smp_rmb() before the syscall in the vDSO code.
+ *
+ * Cast to unsigned long for 32-bit architectures, since atomic 64-bit
+ * operations are not supported on those architectures. This is safe
+ * because base_crng.generation is a 32-bit value. On big-endian
+ * architectures it will be stored in the upper 32 bits, but that's okay
+ * because the vDSO side only checks whether the value changed, without
+ * actually using or interpreting the value.
*/
- smp_store_release(&_vdso_rng_data.generation, next_gen + 1);
+ smp_store_release((unsigned long *)&__arch_get_k_vdso_rng_data()->generation, next_gen + 1);
#endif
if (!static_branch_likely(&crng_is_ready))
crng_init = CRNG_READY;
@@ -735,7 +743,7 @@ static void __cold _credit_init_bits(size_t bits)
queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
#ifdef CONFIG_VDSO_GETRANDOM
- WRITE_ONCE(_vdso_rng_data.is_ready, true);
+ WRITE_ONCE(__arch_get_k_vdso_rng_data()->is_ready, true);
#endif
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index 45ca33b3dcb2..81348487c125 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -133,7 +133,7 @@ static void st33zp24_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id st33zp24_i2c_id[] = {
- {TPM_ST33_I2C, 0},
+ { TPM_ST33_I2C },
{}
};
MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 30b4c288c1bb..c3fbbf4d3db7 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -47,6 +47,8 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
if (!ret)
ret = tpm2_commit_space(chip, space, buf, &len);
+ else
+ tpm2_flush_space(chip);
out_rc:
return ret ? ret : len;
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index d3521aadd43e..44f60730cff4 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -1362,4 +1362,5 @@ int tpm2_sessions_init(struct tpm_chip *chip)
return rc;
}
+EXPORT_SYMBOL(tpm2_sessions_init);
#endif /* CONFIG_TCG_TPM2_HMAC */
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 4892d491da8d..25a66870c165 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -169,6 +169,9 @@ void tpm2_flush_space(struct tpm_chip *chip)
struct tpm_space *space = &chip->work_space;
int i;
+ if (!space)
+ return;
+
for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
if (space->context_tbl[i] && ~space->context_tbl[i])
tpm2_flush_context(chip, space->context_tbl[i]);
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index 301a95b3734f..d1d27fdfe523 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -186,7 +186,7 @@ static void i2c_atmel_remove(struct i2c_client *client)
}
static const struct i2c_device_id i2c_atmel_id[] = {
- {I2C_DRIVER_NAME, 0},
+ { I2C_DRIVER_NAME },
{}
};
MODULE_DEVICE_TABLE(i2c, i2c_atmel_id);
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
index 9511c0d50185..6cd07dd34507 100644
--- a/drivers/char/tpm/tpm_tis_i2c.c
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -375,7 +375,7 @@ static void tpm_tis_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id tpm_tis_i2c_id[] = {
- { "tpm_tis_i2c", 0 },
+ { "tpm_tis_i2c" },
{}
};
MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id);
diff --git a/drivers/clk/.kunitconfig b/drivers/clk/.kunitconfig
index efa12ac2b3f2..54ece9207055 100644
--- a/drivers/clk/.kunitconfig
+++ b/drivers/clk/.kunitconfig
@@ -1,6 +1,8 @@
CONFIG_KUNIT=y
+CONFIG_OF=y
CONFIG_COMMON_CLK=y
CONFIG_CLK_KUNIT_TEST=y
+CONFIG_CLK_FIXED_RATE_KUNIT_TEST=y
CONFIG_CLK_GATE_KUNIT_TEST=y
CONFIG_CLK_FD_KUNIT_TEST=y
CONFIG_UML_PCI_OVER_VIRTIO=n
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 983ef4f36d8c..260961668e48 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -509,9 +509,20 @@ config CLK_KUNIT_TEST
tristate "Basic Clock Framework Kunit Tests" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
+ select OF_OVERLAY if OF
+ select DTC
help
Kunit tests for the common clock framework.
+config CLK_FIXED_RATE_KUNIT_TEST
+ tristate "Basic fixed rate clk type KUnit test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ select OF_OVERLAY if OF
+ select DTC
+ help
+ KUnit tests for the basic fixed rate clk type.
+
config CLK_GATE_KUNIT_TEST
tristate "Basic gate type Kunit test" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index f793a16cad40..9b783c3e5d2f 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -2,10 +2,14 @@
# common clock types
obj-$(CONFIG_HAVE_CLK) += clk-devres.o clk-bulk.o clkdev.o
obj-$(CONFIG_COMMON_CLK) += clk.o
-obj-$(CONFIG_CLK_KUNIT_TEST) += clk_test.o
+obj-$(CONFIG_CLK_KUNIT_TEST) += clk-test.o
+clk-test-y := clk_test.o \
+ kunit_clk_parent_data_test.dtbo.o
obj-$(CONFIG_COMMON_CLK) += clk-divider.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o
+obj-$(CONFIG_CLK_FIXED_RATE_KUNIT_TEST) += clk-fixed-rate-test.o
+clk-fixed-rate-test-y := clk-fixed-rate_test.o kunit_clk_fixed_rate_test.dtbo.o
obj-$(CONFIG_COMMON_CLK) += clk-gate.o
obj-$(CONFIG_CLK_GATE_KUNIT_TEST) += clk-gate_test.o
obj-$(CONFIG_COMMON_CLK) += clk-multiplier.o
@@ -18,6 +22,11 @@ ifeq ($(CONFIG_OF), y)
obj-$(CONFIG_COMMON_CLK) += clk-conf.o
endif
+# KUnit specific helpers
+ifeq ($(CONFIG_COMMON_CLK), y)
+obj-$(CONFIG_KUNIT) += clk_kunit_helpers.o
+endif
+
# hardware specific clock types
# please keep this section sorted lexicographically by file path name
obj-$(CONFIG_COMMON_CLK_APPLE_NCO) += clk-apple-nco.o
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 89061b85e7d2..8e3684ba2c74 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_SOC_AT91SAM9) += at91sam9260.o at91sam9rl.o at91sam9x5.o dt-compat.
obj-$(CONFIG_SOC_AT91SAM9) += at91sam9g45.o dt-compat.o
obj-$(CONFIG_SOC_AT91SAM9) += at91sam9n12.o at91sam9x5.o dt-compat.o
obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o
+obj-$(CONFIG_SOC_SAM9X7) += sam9x7.o
obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o dt-compat.o
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index ff65f7b916f0..fda041102224 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -23,9 +23,6 @@
#define UPLL_DIV 2
#define PLL_MUL_MAX (FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, UINT_MAX) + 1)
-#define FCORE_MIN (600000000)
-#define FCORE_MAX (1200000000)
-
#define PLL_MAX_ID 7
struct sam9x60_pll_core {
@@ -76,9 +73,15 @@ static unsigned long sam9x60_frac_pll_recalc_rate(struct clk_hw *hw,
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
struct sam9x60_frac *frac = to_sam9x60_frac(core);
+ unsigned long freq;
- return parent_rate * (frac->mul + 1) +
+ freq = parent_rate * (frac->mul + 1) +
DIV_ROUND_CLOSEST_ULL((u64)parent_rate * frac->frac, (1 << 22));
+
+ if (core->layout->div2)
+ freq >>= 1;
+
+ return freq;
}
static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
@@ -194,7 +197,8 @@ static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core,
unsigned long nmul = 0;
unsigned long nfrac = 0;
- if (rate < FCORE_MIN || rate > FCORE_MAX)
+ if (rate < core->characteristics->core_output[0].min ||
+ rate > core->characteristics->core_output[0].max)
return -ERANGE;
/*
@@ -214,7 +218,8 @@ static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core,
}
/* Check if resulted rate is a valid. */
- if (tmprate < FCORE_MIN || tmprate > FCORE_MAX)
+ if (tmprate < core->characteristics->core_output[0].min ||
+ tmprate > core->characteristics->core_output[0].max)
return -ERANGE;
if (update) {
@@ -433,6 +438,12 @@ static unsigned long sam9x60_div_pll_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST_ULL(parent_rate, (div->div + 1));
}
+static unsigned long sam9x60_fixed_div_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return parent_rate >> 1;
+}
+
static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core,
unsigned long *parent_rate,
unsigned long rate)
@@ -607,6 +618,16 @@ static const struct clk_ops sam9x60_div_pll_ops_chg = {
.restore_context = sam9x60_div_pll_restore_context,
};
+static const struct clk_ops sam9x60_fixed_div_pll_ops = {
+ .prepare = sam9x60_div_pll_prepare,
+ .unprepare = sam9x60_div_pll_unprepare,
+ .is_prepared = sam9x60_div_pll_is_prepared,
+ .recalc_rate = sam9x60_fixed_div_pll_recalc_rate,
+ .round_rate = sam9x60_div_pll_round_rate,
+ .save_context = sam9x60_div_pll_save_context,
+ .restore_context = sam9x60_div_pll_restore_context,
+};
+
struct clk_hw * __init
sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name,
@@ -669,7 +690,8 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
goto free;
}
- ret = sam9x60_frac_pll_compute_mul_frac(&frac->core, FCORE_MIN,
+ ret = sam9x60_frac_pll_compute_mul_frac(&frac->core,
+ characteristics->core_output[0].min,
parent_rate, true);
if (ret < 0) {
hw = ERR_PTR(ret);
@@ -725,10 +747,14 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
else
init.parent_names = &parent_name;
init.num_parents = 1;
- if (flags & CLK_SET_RATE_GATE)
+
+ if (layout->div2)
+ init.ops = &sam9x60_fixed_div_pll_ops;
+ else if (flags & CLK_SET_RATE_GATE)
init.ops = &sam9x60_div_pll_ops;
else
init.ops = &sam9x60_div_pll_ops_chg;
+
init.flags = flags;
div->core.id = id;
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index a32dc2111b90..f5a5f9ba7634 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -563,9 +563,10 @@ of_at91_clk_pll_get_characteristics(struct device_node *np)
if (num_cells < 2 || num_cells > 4)
return NULL;
- if (!of_get_property(np, "atmel,pll-clk-output-ranges", &tmp))
+ num_output = of_property_count_u32_elems(np, "atmel,pll-clk-output-ranges");
+ if (num_output <= 0)
return NULL;
- num_output = tmp / (sizeof(u32) * num_cells);
+ num_output /= num_cells;
characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL);
if (!characteristics)
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 0f52e80bcd49..4fb29ca111f7 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -64,6 +64,7 @@ struct clk_pll_layout {
u8 frac_shift;
u8 div_shift;
u8 endiv_shift;
+ u8 div2;
};
extern const struct clk_pll_layout at91rm9200_pll_layout;
@@ -75,6 +76,7 @@ struct clk_pll_characteristics {
struct clk_range input;
int num_output;
const struct clk_range *output;
+ const struct clk_range *core_output;
u16 *icpll;
u8 *out;
u8 upll : 1;
@@ -119,6 +121,22 @@ struct at91_clk_pms {
#define ndck(a, s) (a[s - 1].id + 1)
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
+
+#define PMC_INIT_TABLE(_table, _count) \
+ do { \
+ u8 _i; \
+ for (_i = 0; _i < (_count); _i++) \
+ (_table)[_i] = _i; \
+ } while (0)
+
+#define PMC_FILL_TABLE(_to, _from, _count) \
+ do { \
+ u8 _i; \
+ for (_i = 0; _i < (_count); _i++) { \
+ (_to)[_i] = (_from)[_i]; \
+ } \
+ } while (0)
+
struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
unsigned int nperiph, unsigned int ngck,
unsigned int npck);
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index e309cbf3cb9a..db6db9e2073e 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -26,10 +26,16 @@ static const struct clk_range plla_outputs[] = {
{ .min = 2343750, .max = 1200000000 },
};
+/* Fractional PLL core output range. */
+static const struct clk_range core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
static const struct clk_pll_characteristics plla_characteristics = {
.input = { .min = 12000000, .max = 48000000 },
.num_output = ARRAY_SIZE(plla_outputs),
.output = plla_outputs,
+ .core_output = core_outputs,
};
static const struct clk_range upll_outputs[] = {
@@ -40,6 +46,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
.input = { .min = 12000000, .max = 48000000 },
.num_output = ARRAY_SIZE(upll_outputs),
.output = upll_outputs,
+ .core_output = core_outputs,
.upll = true,
};
diff --git a/drivers/clk/at91/sam9x7.c b/drivers/clk/at91/sam9x7.c
new file mode 100644
index 000000000000..cbb8b220f16b
--- /dev/null
+++ b/drivers/clk/at91/sam9x7.c
@@ -0,0 +1,946 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SAM9X7 PMC code.
+ *
+ * Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Varshini Rajendran <varshini.rajendran@microchip.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(mck_lock);
+
+/**
+ * enum pll_ids - PLL clocks identifiers
+ * @PLL_ID_PLLA: PLLA identifier
+ * @PLL_ID_UPLL: UPLL identifier
+ * @PLL_ID_AUDIO: Audio PLL identifier
+ * @PLL_ID_LVDS: LVDS PLL identifier
+ * @PLL_ID_PLLA_DIV2: PLLA DIV2 identifier
+ * @PLL_ID_MAX: Max PLL Identifier
+ */
+enum pll_ids {
+ PLL_ID_PLLA,
+ PLL_ID_UPLL,
+ PLL_ID_AUDIO,
+ PLL_ID_LVDS,
+ PLL_ID_PLLA_DIV2,
+ PLL_ID_MAX,
+};
+
+/**
+ * enum pll_type - PLL type identifiers
+ * @PLL_TYPE_FRAC: fractional PLL identifier
+ * @PLL_TYPE_DIV: divider PLL identifier
+ */
+enum pll_type {
+ PLL_TYPE_FRAC,
+ PLL_TYPE_DIV,
+};
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 32000000, .max = 266666667 },
+ .divisors = { 1, 2, 4, 3, 5},
+ .have_div3_pres = 1,
+};
+
+static const struct clk_master_layout sam9x7_master_layout = {
+ .mask = 0x373,
+ .pres_shift = 4,
+ .offset = 0x28,
+};
+
+/* Fractional PLL core output range. */
+static const struct clk_range plla_core_outputs[] = {
+ { .min = 375000000, .max = 1600000000 },
+};
+
+static const struct clk_range upll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range lvdspll_core_outputs[] = {
+ { .min = 400000000, .max = 800000000 },
+};
+
+static const struct clk_range audiopll_core_outputs[] = {
+ { .min = 400000000, .max = 800000000 },
+};
+
+static const struct clk_range plladiv2_core_outputs[] = {
+ { .min = 375000000, .max = 1600000000 },
+};
+
+/* Fractional PLL output range. */
+static const struct clk_range plla_outputs[] = {
+ { .min = 732421, .max = 800000000 },
+};
+
+static const struct clk_range upll_outputs[] = {
+ { .min = 300000000, .max = 600000000 },
+};
+
+static const struct clk_range lvdspll_outputs[] = {
+ { .min = 10000000, .max = 800000000 },
+};
+
+static const struct clk_range audiopll_outputs[] = {
+ { .min = 10000000, .max = 800000000 },
+};
+
+static const struct clk_range plladiv2_outputs[] = {
+ { .min = 366210, .max = 400000000 },
+};
+
+/* PLL characteristics. */
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+ .core_output = plla_core_outputs,
+};
+
+static const struct clk_pll_characteristics upll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(upll_outputs),
+ .output = upll_outputs,
+ .core_output = upll_core_outputs,
+ .upll = true,
+};
+
+static const struct clk_pll_characteristics lvdspll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(lvdspll_outputs),
+ .output = lvdspll_outputs,
+ .core_output = lvdspll_core_outputs,
+};
+
+static const struct clk_pll_characteristics audiopll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(audiopll_outputs),
+ .output = audiopll_outputs,
+ .core_output = audiopll_core_outputs,
+};
+
+static const struct clk_pll_characteristics plladiv2_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(plladiv2_outputs),
+ .output = plladiv2_outputs,
+ .core_output = plladiv2_core_outputs,
+};
+
+/* Layout for fractional PLL ID PLLA. */
+static const struct clk_pll_layout plla_frac_layout = {
+ .mul_mask = GENMASK(31, 24),
+ .frac_mask = GENMASK(21, 0),
+ .mul_shift = 24,
+ .frac_shift = 0,
+ .div2 = 1,
+};
+
+/* Layout for fractional PLLs. */
+static const struct clk_pll_layout pll_frac_layout = {
+ .mul_mask = GENMASK(31, 24),
+ .frac_mask = GENMASK(21, 0),
+ .mul_shift = 24,
+ .frac_shift = 0,
+};
+
+/* Layout for DIV PLLs. */
+static const struct clk_pll_layout pll_divpmc_layout = {
+ .div_mask = GENMASK(7, 0),
+ .endiv_mask = BIT(29),
+ .div_shift = 0,
+ .endiv_shift = 29,
+};
+
+/* Layout for DIV PLL ID PLLADIV2. */
+static const struct clk_pll_layout plladiv2_divpmc_layout = {
+ .div_mask = GENMASK(7, 0),
+ .endiv_mask = BIT(29),
+ .div_shift = 0,
+ .endiv_shift = 29,
+ .div2 = 1,
+};
+
+/* Layout for DIVIO dividers. */
+static const struct clk_pll_layout pll_divio_layout = {
+ .div_mask = GENMASK(19, 12),
+ .endiv_mask = BIT(30),
+ .div_shift = 12,
+ .endiv_shift = 30,
+};
+
+/*
+ * PLL clocks description
+ * @n: clock name
+ * @p: clock parent
+ * @l: clock layout
+ * @t: clock type
+ * @c: pll characteristics
+ * @f: clock flags
+ * @eid: export index in sam9x7->chws[] array
+ */
+static const struct {
+ const char *n;
+ const char *p;
+ const struct clk_pll_layout *l;
+ u8 t;
+ const struct clk_pll_characteristics *c;
+ unsigned long f;
+ u8 eid;
+} sam9x7_plls[][3] = {
+ [PLL_ID_PLLA] = {
+ {
+ .n = "plla_fracck",
+ .p = "mainck",
+ .l = &plla_frac_layout,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds plla_divpmcck which feeds CPU. It should
+ * not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .c = &plla_characteristics,
+ },
+
+ {
+ .n = "plla_divpmcck",
+ .p = "plla_fracck",
+ .l = &pll_divpmc_layout,
+ .t = PLL_TYPE_DIV,
+ /* This feeds CPU. It should not be disabled */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .eid = PMC_PLLACK,
+ .c = &plla_characteristics,
+ },
+ },
+
+ [PLL_ID_UPLL] = {
+ {
+ .n = "upll_fracck",
+ .p = "main_osc",
+ .l = &pll_frac_layout,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ .c = &upll_characteristics,
+ },
+
+ {
+ .n = "upll_divpmcck",
+ .p = "upll_fracck",
+ .l = &pll_divpmc_layout,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_UTMI,
+ .c = &upll_characteristics,
+ },
+ },
+
+ [PLL_ID_AUDIO] = {
+ {
+ .n = "audiopll_fracck",
+ .p = "main_osc",
+ .l = &pll_frac_layout,
+ .f = CLK_SET_RATE_GATE,
+ .c = &audiopll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ },
+
+ {
+ .n = "audiopll_divpmcck",
+ .p = "audiopll_fracck",
+ .l = &pll_divpmc_layout,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .c = &audiopll_characteristics,
+ .eid = PMC_AUDIOPMCPLL,
+ .t = PLL_TYPE_DIV,
+ },
+
+ {
+ .n = "audiopll_diviock",
+ .p = "audiopll_fracck",
+ .l = &pll_divio_layout,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .c = &audiopll_characteristics,
+ .eid = PMC_AUDIOIOPLL,
+ .t = PLL_TYPE_DIV,
+ },
+ },
+
+ [PLL_ID_LVDS] = {
+ {
+ .n = "lvdspll_fracck",
+ .p = "main_osc",
+ .l = &pll_frac_layout,
+ .f = CLK_SET_RATE_GATE,
+ .c = &lvdspll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ },
+
+ {
+ .n = "lvdspll_divpmcck",
+ .p = "lvdspll_fracck",
+ .l = &pll_divpmc_layout,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .c = &lvdspll_characteristics,
+ .eid = PMC_LVDSPLL,
+ .t = PLL_TYPE_DIV,
+ },
+ },
+
+ [PLL_ID_PLLA_DIV2] = {
+ {
+ .n = "plla_div2pmcck",
+ .p = "plla_fracck",
+ .l = &plladiv2_divpmc_layout,
+ /*
+ * This may feed critical parts of the system like timers.
+ * It should not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .c = &plladiv2_characteristics,
+ .eid = PMC_PLLADIV2,
+ .t = PLL_TYPE_DIV,
+ },
+ },
+};
+
+static const struct clk_programmable_layout sam9x7_programmable_layout = {
+ .pres_mask = 0xff,
+ .pres_shift = 8,
+ .css_mask = 0x1f,
+ .have_slck_mck = 0,
+ .is_pres_direct = 1,
+};
+
+static const struct clk_pcr_layout sam9x7_pcr_layout = {
+ .offset = 0x88,
+ .cmd = BIT(31),
+ .gckcss_mask = GENMASK(12, 8),
+ .pid_mask = GENMASK(6, 0),
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+ unsigned long flags;
+} sam9x7_systemck[] = {
+ /*
+ * ddrck feeds DDR controller and is enabled by bootloader thus we need
+ * to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "ddrck", .p = "masterck_div", .id = 2, .flags = CLK_IS_CRITICAL },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+};
+
+/*
+ * Peripheral clocks description
+ * @n: clock name
+ * @f: clock flags
+ * @id: peripheral id
+ */
+static const struct {
+ char *n;
+ unsigned long f;
+ u8 id;
+} sam9x7_periphck[] = {
+ { .n = "pioA_clk", .id = 2, },
+ { .n = "pioB_clk", .id = 3, },
+ { .n = "pioC_clk", .id = 4, },
+ { .n = "flex0_clk", .id = 5, },
+ { .n = "flex1_clk", .id = 6, },
+ { .n = "flex2_clk", .id = 7, },
+ { .n = "flex3_clk", .id = 8, },
+ { .n = "flex6_clk", .id = 9, },
+ { .n = "flex7_clk", .id = 10, },
+ { .n = "flex8_clk", .id = 11, },
+ { .n = "sdmmc0_clk", .id = 12, },
+ { .n = "flex4_clk", .id = 13, },
+ { .n = "flex5_clk", .id = 14, },
+ { .n = "flex9_clk", .id = 15, },
+ { .n = "flex10_clk", .id = 16, },
+ { .n = "tcb0_clk", .id = 17, },
+ { .n = "pwm_clk", .id = 18, },
+ { .n = "adc_clk", .id = 19, },
+ { .n = "dma0_clk", .id = 20, },
+ { .n = "uhphs_clk", .id = 22, },
+ { .n = "udphs_clk", .id = 23, },
+ { .n = "macb0_clk", .id = 24, },
+ { .n = "lcd_clk", .id = 25, },
+ { .n = "sdmmc1_clk", .id = 26, },
+ { .n = "ssc_clk", .id = 28, },
+ { .n = "can0_clk", .id = 29, },
+ { .n = "can1_clk", .id = 30, },
+ { .n = "flex11_clk", .id = 32, },
+ { .n = "flex12_clk", .id = 33, },
+ { .n = "i2s_clk", .id = 34, },
+ { .n = "qspi_clk", .id = 35, },
+ { .n = "gfx2d_clk", .id = 36, },
+ { .n = "pit64b0_clk", .id = 37, },
+ { .n = "trng_clk", .id = 38, },
+ { .n = "aes_clk", .id = 39, },
+ { .n = "tdes_clk", .id = 40, },
+ { .n = "sha_clk", .id = 41, },
+ { .n = "classd_clk", .id = 42, },
+ { .n = "isi_clk", .id = 43, },
+ { .n = "pioD_clk", .id = 44, },
+ { .n = "tcb1_clk", .id = 45, },
+ { .n = "dbgu_clk", .id = 47, },
+ /*
+ * mpddr_clk feeds DDR controller and is enabled by bootloader thus we
+ * need to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "mpddr_clk", .id = 49, .f = CLK_IS_CRITICAL },
+ { .n = "csi2dc_clk", .id = 52, },
+ { .n = "csi4l_clk", .id = 53, },
+ { .n = "dsi4l_clk", .id = 54, },
+ { .n = "lvdsc_clk", .id = 56, },
+ { .n = "pit64b1_clk", .id = 58, },
+ { .n = "puf_clk", .id = 59, },
+ { .n = "gmactsu_clk", .id = 67, },
+};
+
+/*
+ * Generic clock description
+ * @n: clock name
+ * @pp: PLL parents
+ * @pp_mux_table: PLL parents mux table
+ * @r: clock output range
+ * @pp_chg_id: id in parent array of changeable PLL parent
+ * @pp_count: PLL parents count
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ const char *pp[8];
+ const char pp_mux_table[8];
+ struct clk_range r;
+ int pp_chg_id;
+ u8 pp_count;
+ u8 id;
+} sam9x7_gck[] = {
+ {
+ .n = "flex0_gclk",
+ .id = 5,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex1_gclk",
+ .id = 6,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex2_gclk",
+ .id = 7,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex3_gclk",
+ .id = 8,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex6_gclk",
+ .id = 9,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex7_gclk",
+ .id = 10,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex8_gclk",
+ .id = 11,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "sdmmc0_gclk",
+ .id = 12,
+ .r = { .max = 105000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex4_gclk",
+ .id = 13,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex5_gclk",
+ .id = 14,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex9_gclk",
+ .id = 15,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex10_gclk",
+ .id = 16,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "tcb0_gclk",
+ .id = 17,
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "adc_gclk",
+ .id = 19,
+ .pp = { "upll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "lcd_gclk",
+ .id = 25,
+ .r = { .max = 75000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "sdmmc1_gclk",
+ .id = 26,
+ .r = { .max = 105000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "mcan0_gclk",
+ .id = 29,
+ .r = { .max = 80000000 },
+ .pp = { "upll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "mcan1_gclk",
+ .id = 30,
+ .r = { .max = 80000000 },
+ .pp = { "upll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex11_gclk",
+ .id = 32,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex12_gclk",
+ .id = 33,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "i2s_gclk",
+ .id = 34,
+ .r = { .max = 100000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "qspi_gclk",
+ .id = 35,
+ .r = { .max = 200000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "pit64b0_gclk",
+ .id = 37,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "classd_gclk",
+ .id = 42,
+ .r = { .max = 100000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "tcb1_gclk",
+ .id = 45,
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "dbgu_gclk",
+ .id = 47,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "mipiphy_gclk",
+ .id = 55,
+ .r = { .max = 27000000 },
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "pit64b1_gclk",
+ .id = 58,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "gmac_gclk",
+ .id = 67,
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+};
+
+static void __init sam9x7_pmc_setup(struct device_node *np)
+{
+ struct clk_range range = CLK_RANGE(0, 0);
+ const char *td_slck_name, *md_slck_name, *mainxtal_name;
+ struct pmc_data *sam9x7_pmc;
+ const char *parent_names[9];
+ void **clk_mux_buffer = NULL;
+ int clk_mux_buffer_size = 0;
+ struct clk_hw *main_osc_hw;
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i, j;
+
+ i = of_property_match_string(np, "clock-names", "td_slck");
+ if (i < 0)
+ return;
+
+ td_slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "md_slck");
+ if (i < 0)
+ return;
+
+ md_slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sam9x7_pmc = pmc_data_allocate(PMC_LVDSPLL + 1,
+ nck(sam9x7_systemck),
+ nck(sam9x7_periphck),
+ nck(sam9x7_gck), 8);
+ if (!sam9x7_pmc)
+ return;
+
+ clk_mux_buffer = kmalloc(sizeof(void *) *
+ (ARRAY_SIZE(sam9x7_gck)),
+ GFP_KERNEL);
+ if (!clk_mux_buffer)
+ goto err_free;
+
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, NULL, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+ main_osc_hw = hw;
+
+ parent_names[0] = "main_rc_osc";
+ parent_names[1] = "main_osc";
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, NULL, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->chws[PMC_MAIN] = hw;
+
+ for (i = 0; i < PLL_ID_MAX; i++) {
+ for (j = 0; j < 3; j++) {
+ struct clk_hw *parent_hw;
+
+ if (!sam9x7_plls[i][j].n)
+ continue;
+
+ switch (sam9x7_plls[i][j].t) {
+ case PLL_TYPE_FRAC:
+ if (!strcmp(sam9x7_plls[i][j].p, "mainck"))
+ parent_hw = sam9x7_pmc->chws[PMC_MAIN];
+ else if (!strcmp(sam9x7_plls[i][j].p, "main_osc"))
+ parent_hw = main_osc_hw;
+ else
+ parent_hw = __clk_get_hw(of_clk_get_by_name
+ (np, sam9x7_plls[i][j].p));
+
+ hw = sam9x60_clk_register_frac_pll(regmap,
+ &pmc_pll_lock,
+ sam9x7_plls[i][j].n,
+ sam9x7_plls[i][j].p,
+ parent_hw, i,
+ sam9x7_plls[i][j].c,
+ sam9x7_plls[i][j].l,
+ sam9x7_plls[i][j].f);
+ break;
+
+ case PLL_TYPE_DIV:
+ hw = sam9x60_clk_register_div_pll(regmap,
+ &pmc_pll_lock,
+ sam9x7_plls[i][j].n,
+ sam9x7_plls[i][j].p, NULL, i,
+ sam9x7_plls[i][j].c,
+ sam9x7_plls[i][j].l,
+ sam9x7_plls[i][j].f, 0);
+ break;
+
+ default:
+ continue;
+ }
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ if (sam9x7_plls[i][j].eid)
+ sam9x7_pmc->chws[sam9x7_plls[i][j].eid] = hw;
+ }
+ }
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plla_divpmcck";
+ parent_names[3] = "upll_divpmcck";
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names, NULL, &sam9x7_master_layout,
+ &mck_characteristics, &mck_lock);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres", NULL, &sam9x7_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->chws[PMC_MCK] = hw;
+
+ parent_names[0] = "plla_divpmcck";
+ parent_names[1] = "upll_divpmcck";
+ parent_names[2] = "main_osc";
+ hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = td_slck_name;
+ parent_names[2] = "mainck";
+ parent_names[3] = "masterck_div";
+ parent_names[4] = "plla_divpmcck";
+ parent_names[5] = "upll_divpmcck";
+ parent_names[6] = "audiopll_divpmcck";
+ for (i = 0; i < 2; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, NULL, 7, i,
+ &sam9x7_programmable_layout,
+ NULL);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->pchws[i] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sam9x7_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sam9x7_systemck[i].n,
+ sam9x7_systemck[i].p, NULL,
+ sam9x7_systemck[i].id,
+ sam9x7_systemck[i].flags);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->shws[sam9x7_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sam9x7_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sam9x7_pcr_layout,
+ sam9x7_periphck[i].n,
+ "masterck_div", NULL,
+ sam9x7_periphck[i].id,
+ &range, INT_MIN,
+ sam9x7_periphck[i].f);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->phws[sam9x7_periphck[i].id] = hw;
+ }
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = td_slck_name;
+ parent_names[2] = "mainck";
+ parent_names[3] = "masterck_div";
+ for (i = 0; i < ARRAY_SIZE(sam9x7_gck); i++) {
+ u8 num_parents = 4 + sam9x7_gck[i].pp_count;
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ PMC_INIT_TABLE(mux_table, 4);
+ PMC_FILL_TABLE(&mux_table[4], sam9x7_gck[i].pp_mux_table,
+ sam9x7_gck[i].pp_count);
+ PMC_FILL_TABLE(&parent_names[4], sam9x7_gck[i].pp,
+ sam9x7_gck[i].pp_count);
+
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &sam9x7_pcr_layout,
+ sam9x7_gck[i].n,
+ parent_names, NULL, mux_table,
+ num_parents,
+ sam9x7_gck[i].id,
+ &sam9x7_gck[i].r,
+ sam9x7_gck[i].pp_chg_id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->ghws[sam9x7_gck[i].id] = hw;
+ clk_mux_buffer[clk_mux_buffer_size++] = mux_table;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sam9x7_pmc);
+ kfree(clk_mux_buffer);
+
+ return;
+
+err_free:
+ if (clk_mux_buffer) {
+ for (i = 0; i < clk_mux_buffer_size; i++)
+ kfree(clk_mux_buffer[i]);
+ kfree(clk_mux_buffer);
+ }
+ kfree(sam9x7_pmc);
+}
+
+/* Some clks are used for a clocksource */
+CLK_OF_DECLARE(sam9x7_pmc, "microchip,sam9x7-pmc", sam9x7_pmc_setup);
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index 91b5c6f14819..8385badc1c70 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -16,21 +16,6 @@
#include "pmc.h"
-#define SAMA7G5_INIT_TABLE(_table, _count) \
- do { \
- u8 _i; \
- for (_i = 0; _i < (_count); _i++) \
- (_table)[_i] = _i; \
- } while (0)
-
-#define SAMA7G5_FILL_TABLE(_to, _from, _count) \
- do { \
- u8 _i; \
- for (_i = 0; _i < (_count); _i++) { \
- (_to)[_i] = (_from)[_i]; \
- } \
- } while (0)
-
static DEFINE_SPINLOCK(pmc_pll_lock);
static DEFINE_SPINLOCK(pmc_mck0_lock);
static DEFINE_SPINLOCK(pmc_mckX_lock);
@@ -66,6 +51,7 @@ enum pll_component_id {
PLL_COMPID_FRAC,
PLL_COMPID_DIV0,
PLL_COMPID_DIV1,
+ PLL_COMPID_MAX,
};
/*
@@ -116,11 +102,17 @@ static const struct clk_range pll_outputs[] = {
{ .min = 2343750, .max = 1200000000 },
};
+/* Fractional PLL core output range. */
+static const struct clk_range core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
/* CPU PLL characteristics. */
static const struct clk_pll_characteristics cpu_pll_characteristics = {
.input = { .min = 12000000, .max = 50000000 },
.num_output = ARRAY_SIZE(cpu_pll_outputs),
.output = cpu_pll_outputs,
+ .core_output = core_outputs,
};
/* PLL characteristics. */
@@ -128,6 +120,7 @@ static const struct clk_pll_characteristics pll_characteristics = {
.input = { .min = 12000000, .max = 50000000 },
.num_output = ARRAY_SIZE(pll_outputs),
.output = pll_outputs,
+ .core_output = core_outputs,
};
/*
@@ -165,7 +158,7 @@ static struct sama7g5_pll {
u8 t;
u8 eid;
u8 safe_div;
-} sama7g5_plls[][PLL_ID_MAX] = {
+} sama7g5_plls[][PLL_COMPID_MAX] = {
[PLL_ID_CPU] = {
[PLL_COMPID_FRAC] = {
.n = "cpupll_fracck",
@@ -1038,7 +1031,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
sama7g5_pmc->chws[PMC_MAIN] = hw;
for (i = 0; i < PLL_ID_MAX; i++) {
- for (j = 0; j < 3; j++) {
+ for (j = 0; j < PLL_COMPID_MAX; j++) {
struct clk_hw *parent_hw;
if (!sama7g5_plls[i][j].n)
@@ -1112,17 +1105,17 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (!mux_table)
goto err_free;
- SAMA7G5_INIT_TABLE(mux_table, 3);
- SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_mckx[i].ep_mux_table,
- sama7g5_mckx[i].ep_count);
+ PMC_INIT_TABLE(mux_table, 3);
+ PMC_FILL_TABLE(&mux_table[3], sama7g5_mckx[i].ep_mux_table,
+ sama7g5_mckx[i].ep_count);
for (j = 0; j < sama7g5_mckx[i].ep_count; j++) {
u8 pll_id = sama7g5_mckx[i].ep[j].pll_id;
u8 pll_compid = sama7g5_mckx[i].ep[j].pll_compid;
tmp_parent_hws[j] = sama7g5_plls[pll_id][pll_compid].hw;
}
- SAMA7G5_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
- sama7g5_mckx[i].ep_count);
+ PMC_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
+ sama7g5_mckx[i].ep_count);
hw = at91_clk_sama7g5_register_master(regmap, sama7g5_mckx[i].n,
num_parents, NULL, parent_hws, mux_table,
@@ -1208,17 +1201,17 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (!mux_table)
goto err_free;
- SAMA7G5_INIT_TABLE(mux_table, 3);
- SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_gck[i].pp_mux_table,
- sama7g5_gck[i].pp_count);
+ PMC_INIT_TABLE(mux_table, 3);
+ PMC_FILL_TABLE(&mux_table[3], sama7g5_gck[i].pp_mux_table,
+ sama7g5_gck[i].pp_count);
for (j = 0; j < sama7g5_gck[i].pp_count; j++) {
u8 pll_id = sama7g5_gck[i].pp[j].pll_id;
u8 pll_compid = sama7g5_gck[i].pp[j].pll_compid;
tmp_parent_hws[j] = sama7g5_plls[pll_id][pll_compid].hw;
}
- SAMA7G5_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
- sama7g5_gck[i].pp_count);
+ PMC_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
+ sama7g5_gck[i].pp_count);
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
&sama7g5_pcr_layout,
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 2334e6c334cf..9667ce898428 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -215,7 +215,7 @@ static struct platform_driver i2s_pll_clk_driver = {
.of_match_table = i2s_pll_clk_id,
},
.probe = i2s_pll_clk_probe,
- .remove_new = i2s_pll_clk_remove,
+ .remove = i2s_pll_clk_remove,
};
module_platform_driver(i2s_pll_clk_driver);
diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c
index 3cb235df9d37..e79720e85685 100644
--- a/drivers/clk/bcm/clk-bcm2711-dvp.c
+++ b/drivers/clk/bcm/clk-bcm2711-dvp.c
@@ -110,7 +110,7 @@ MODULE_DEVICE_TABLE(of, clk_dvp_dt_ids);
static struct platform_driver clk_dvp_driver = {
.probe = clk_dvp_probe,
- .remove_new = clk_dvp_remove,
+ .remove = clk_dvp_remove,
.driver = {
.name = "brcm2711-dvp",
.of_match_table = clk_dvp_dt_ids,
diff --git a/drivers/clk/bcm/clk-bcm53573-ilp.c b/drivers/clk/bcm/clk-bcm53573-ilp.c
index 84f2af736ee8..83ef41d618be 100644
--- a/drivers/clk/bcm/clk-bcm53573-ilp.c
+++ b/drivers/clk/bcm/clk-bcm53573-ilp.c
@@ -112,7 +112,7 @@ static void bcm53573_ilp_init(struct device_node *np)
goto err_free_ilp;
}
- ilp->regmap = syscon_node_to_regmap(of_get_parent(np));
+ ilp->regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(ilp->regmap)) {
err = PTR_ERR(ilp->regmap);
goto err_free_ilp;
diff --git a/drivers/clk/bcm/clk-bcm63xx-gate.c b/drivers/clk/bcm/clk-bcm63xx-gate.c
index 36c7b302e396..d6d857474436 100644
--- a/drivers/clk/bcm/clk-bcm63xx-gate.c
+++ b/drivers/clk/bcm/clk-bcm63xx-gate.c
@@ -567,7 +567,7 @@ static const struct of_device_id clk_bcm63xx_dt_ids[] = {
static struct platform_driver clk_bcm63xx = {
.probe = clk_bcm63xx_probe,
- .remove_new = clk_bcm63xx_remove,
+ .remove = clk_bcm63xx_remove,
.driver = {
.name = "bcm63xx-clock",
.of_match_table = clk_bcm63xx_dt_ids,
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 4d411408e4af..a18a8768feb4 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -458,7 +458,7 @@ static struct platform_driver raspberrypi_clk_driver = {
.of_match_table = raspberrypi_clk_match,
},
.probe = raspberrypi_clk_probe,
- .remove_new = raspberrypi_clk_remove,
+ .remove = raspberrypi_clk_remove,
};
module_platform_driver(raspberrypi_clk_driver);
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 058420562020..303a0bb26e54 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/of.h>
#include <linux/printk.h>
+#include <linux/slab.h>
static int __set_clk_parents(struct device_node *node, bool clk_supplier)
{
@@ -81,11 +82,44 @@ err:
static int __set_clk_rates(struct device_node *node, bool clk_supplier)
{
struct of_phandle_args clkspec;
- int rc, index = 0;
+ int rc, count, count_64, index;
struct clk *clk;
- u32 rate;
+ u64 *rates_64 __free(kfree) = NULL;
+ u32 *rates __free(kfree) = NULL;
+
+ count = of_property_count_u32_elems(node, "assigned-clock-rates");
+ count_64 = of_property_count_u64_elems(node, "assigned-clock-rates-u64");
+ if (count_64 > 0) {
+ count = count_64;
+ rates_64 = kcalloc(count, sizeof(*rates_64), GFP_KERNEL);
+ if (!rates_64)
+ return -ENOMEM;
+
+ rc = of_property_read_u64_array(node,
+ "assigned-clock-rates-u64",
+ rates_64, count);
+ } else if (count > 0) {
+ rates = kcalloc(count, sizeof(*rates), GFP_KERNEL);
+ if (!rates)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(node, "assigned-clock-rates",
+ rates, count);
+ } else {
+ return 0;
+ }
+
+ if (rc)
+ return rc;
+
+ for (index = 0; index < count; index++) {
+ unsigned long rate;
+
+ if (rates_64)
+ rate = rates_64[index];
+ else
+ rate = rates[index];
- of_property_for_each_u32(node, "assigned-clock-rates", rate) {
if (rate) {
rc = of_parse_phandle_with_args(node, "assigned-clocks",
"#clock-cells", index, &clkspec);
@@ -112,12 +146,11 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
rc = clk_set_rate(clk, rate);
if (rc < 0)
- pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n",
+ pr_err("clk: couldn't set %s clk rate to %lu (%d), current rate: %lu\n",
__clk_get_name(clk), rate, rc,
clk_get_rate(clk));
clk_put(clk);
}
- index++;
}
return 0;
}
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index 90e6078fb6e1..82ae1f26e634 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -99,6 +99,34 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled);
+struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev,
+ const char *id,
+ unsigned long rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = __devm_clk_get(dev, id, clk_get_optional, NULL,
+ clk_disable_unprepare);
+ if (IS_ERR(clk))
+ return ERR_CAST(clk);
+
+ ret = clk_set_rate(clk, rate);
+ if (ret)
+ goto out_put_clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_put_clk;
+
+ return clk;
+
+out_put_clk:
+ devm_clk_put(dev, clk);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled_with_rate);
+
struct clk_bulk_devres {
struct clk_bulk_data *clks;
int num_clks;
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index fe0500a1af3e..8fba63fc70c5 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -405,7 +405,7 @@ static struct platform_driver of_fixed_factor_clk_driver = {
.of_match_table = of_fixed_factor_clk_ids,
},
.probe = of_fixed_factor_clk_probe,
- .remove_new = of_fixed_factor_clk_remove,
+ .remove = of_fixed_factor_clk_remove,
};
builtin_platform_driver(of_fixed_factor_clk_driver);
#endif
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
index 0e08cb22c196..3bfcf4cd98a2 100644
--- a/drivers/clk/clk-fixed-mmio.c
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -91,7 +91,7 @@ static struct platform_driver of_fixed_mmio_clk_driver = {
.of_match_table = of_fixed_mmio_clk_ids,
},
.probe = of_fixed_mmio_clk_probe,
- .remove_new = of_fixed_mmio_clk_remove,
+ .remove = of_fixed_mmio_clk_remove,
};
module_platform_driver(of_fixed_mmio_clk_driver);
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 3481eb8cdeb3..6b4f76b9c4da 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -232,7 +232,7 @@ static struct platform_driver of_fixed_clk_driver = {
.of_match_table = of_fixed_clk_ids,
},
.probe = of_fixed_clk_probe,
- .remove_new = of_fixed_clk_remove,
+ .remove = of_fixed_clk_remove,
};
builtin_platform_driver(of_fixed_clk_driver);
#endif
diff --git a/drivers/clk/clk-fixed-rate_test.c b/drivers/clk/clk-fixed-rate_test.c
new file mode 100644
index 000000000000..0e04c10a21aa
--- /dev/null
+++ b/drivers/clk/clk-fixed-rate_test.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for clk fixed rate basic type
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <kunit/clk.h>
+#include <kunit/of.h>
+#include <kunit/platform_device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+
+#include "clk-fixed-rate_test.h"
+
+/**
+ * struct clk_hw_fixed_rate_kunit_params - Parameters to pass to __clk_hw_register_fixed_rate()
+ * @dev: device registering clk
+ * @np: device_node of device registering clk
+ * @name: name of clk
+ * @parent_name: parent name of clk
+ * @parent_hw: clk_hw pointer to parent of clk
+ * @parent_data: parent_data describing parent of clk
+ * @flags: clk framework flags
+ * @fixed_rate: frequency of clk
+ * @fixed_accuracy: accuracy of clk
+ * @clk_fixed_flags: fixed rate specific clk flags
+ */
+struct clk_hw_fixed_rate_kunit_params {
+ struct device *dev;
+ struct device_node *np;
+ const char *name;
+ const char *parent_name;
+ const struct clk_hw *parent_hw;
+ const struct clk_parent_data *parent_data;
+ unsigned long flags;
+ unsigned long fixed_rate;
+ unsigned long fixed_accuracy;
+ unsigned long clk_fixed_flags;
+};
+
+static int
+clk_hw_register_fixed_rate_kunit_init(struct kunit_resource *res, void *context)
+{
+ struct clk_hw_fixed_rate_kunit_params *params = context;
+ struct clk_hw *hw;
+
+ hw = __clk_hw_register_fixed_rate(params->dev, params->np,
+ params->name,
+ params->parent_name,
+ params->parent_hw,
+ params->parent_data,
+ params->flags,
+ params->fixed_rate,
+ params->fixed_accuracy,
+ params->clk_fixed_flags,
+ false);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ res->data = hw;
+
+ return 0;
+}
+
+static void clk_hw_register_fixed_rate_kunit_exit(struct kunit_resource *res)
+{
+ struct clk_hw *hw = res->data;
+
+ clk_hw_unregister_fixed_rate(hw);
+}
+
+/**
+ * clk_hw_register_fixed_rate_kunit() - Test managed __clk_hw_register_fixed_rate()
+ * @test: The test context
+ * @params: Arguments to __clk_hw_register_fixed_rate()
+ *
+ * Return: Registered fixed rate clk_hw or ERR_PTR on failure
+ */
+static struct clk_hw *
+clk_hw_register_fixed_rate_kunit(struct kunit *test,
+ struct clk_hw_fixed_rate_kunit_params *params)
+{
+ struct clk_hw *hw;
+
+ hw = kunit_alloc_resource(test,
+ clk_hw_register_fixed_rate_kunit_init,
+ clk_hw_register_fixed_rate_kunit_exit,
+ GFP_KERNEL, params);
+ if (!hw)
+ return ERR_PTR(-EINVAL);
+
+ return hw;
+}
+
+/**
+ * clk_hw_unregister_fixed_rate_kunit() - Test managed clk_hw_unregister_fixed_rate()
+ * @test: The test context
+ * @hw: fixed rate clk to unregister upon test completion
+ *
+ * Automatically unregister @hw when @test is complete via
+ * clk_hw_unregister_fixed_rate().
+ *
+ * Return: 0 on success or negative errno on failure
+ */
+static int clk_hw_unregister_fixed_rate_kunit(struct kunit *test, struct clk_hw *hw)
+{
+ if (!kunit_alloc_resource(test, NULL,
+ clk_hw_register_fixed_rate_kunit_exit,
+ GFP_KERNEL, hw))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * Test that clk_get_rate() on a fixed rate clk registered with
+ * clk_hw_register_fixed_rate() gets the proper frequency.
+ */
+static void clk_fixed_rate_rate_test(struct kunit *test)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+ const unsigned long fixed_rate = 230000;
+
+ hw = clk_hw_register_fixed_rate(NULL, "test-fixed-rate", NULL, 0, fixed_rate);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_prepared_enabled_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, fixed_rate, clk_get_rate(clk));
+}
+
+/*
+ * Test that clk_get_accuracy() on a fixed rate clk registered via
+ * clk_hw_register_fixed_rate_with_accuracy() gets the proper accuracy.
+ */
+static void clk_fixed_rate_accuracy_test(struct kunit *test)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+ const unsigned long fixed_accuracy = 5000;
+
+ hw = clk_hw_register_fixed_rate_with_accuracy(NULL, "test-fixed-rate",
+ NULL, 0, 0,
+ fixed_accuracy);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, fixed_accuracy, clk_get_accuracy(clk));
+}
+
+/* Test suite for a fixed rate clk without any parent */
+static struct kunit_case clk_fixed_rate_test_cases[] = {
+ KUNIT_CASE(clk_fixed_rate_rate_test),
+ KUNIT_CASE(clk_fixed_rate_accuracy_test),
+ {}
+};
+
+static struct kunit_suite clk_fixed_rate_suite = {
+ .name = "clk_fixed_rate",
+ .test_cases = clk_fixed_rate_test_cases,
+};
+
+/*
+ * Test that clk_get_parent() on a fixed rate clk gets the proper parent.
+ */
+static void clk_fixed_rate_parent_test(struct kunit *test)
+{
+ struct clk_hw *hw, *parent_hw;
+ struct clk *expected_parent, *actual_parent;
+ struct clk *clk;
+ const char *parent_name = "test-fixed-rate-parent";
+ struct clk_hw_fixed_rate_kunit_params parent_params = {
+ .name = parent_name,
+ };
+
+ parent_hw = clk_hw_register_fixed_rate_kunit(test, &parent_params);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+ KUNIT_ASSERT_STREQ(test, parent_name, clk_hw_get_name(parent_hw));
+
+ expected_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
+
+ hw = clk_hw_register_fixed_rate(NULL, "test-fixed-rate", parent_name, 0, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ actual_parent = clk_get_parent(clk);
+ KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
+}
+
+/*
+ * Test that clk_get_rate() on a fixed rate clk ignores the parent rate.
+ */
+static void clk_fixed_rate_parent_rate_test(struct kunit *test)
+{
+ struct clk_hw *hw, *parent_hw;
+ struct clk *clk;
+ const unsigned long expected_rate = 1405;
+ const unsigned long parent_rate = 90402;
+ const char *parent_name = "test-fixed-rate-parent";
+ struct clk_hw_fixed_rate_kunit_params parent_params = {
+ .name = parent_name,
+ .fixed_rate = parent_rate,
+ };
+
+ parent_hw = clk_hw_register_fixed_rate_kunit(test, &parent_params);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+ KUNIT_ASSERT_STREQ(test, parent_name, clk_hw_get_name(parent_hw));
+
+ hw = clk_hw_register_fixed_rate(NULL, "test-fixed-rate", parent_name, 0,
+ expected_rate);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_prepared_enabled_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, expected_rate, clk_get_rate(clk));
+}
+
+/*
+ * Test that clk_get_accuracy() on a fixed rate clk ignores the parent accuracy.
+ */
+static void clk_fixed_rate_parent_accuracy_test(struct kunit *test)
+{
+ struct clk_hw *hw, *parent_hw;
+ struct clk *clk;
+ const unsigned long expected_accuracy = 900;
+ const unsigned long parent_accuracy = 24000;
+ const char *parent_name = "test-fixed-rate-parent";
+ struct clk_hw_fixed_rate_kunit_params parent_params = {
+ .name = parent_name,
+ .fixed_accuracy = parent_accuracy,
+ };
+
+ parent_hw = clk_hw_register_fixed_rate_kunit(test, &parent_params);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+ KUNIT_ASSERT_STREQ(test, parent_name, clk_hw_get_name(parent_hw));
+
+ hw = clk_hw_register_fixed_rate_with_accuracy(NULL, "test-fixed-rate",
+ parent_name, 0, 0,
+ expected_accuracy);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, expected_accuracy, clk_get_accuracy(clk));
+}
+
+/* Test suite for a fixed rate clk with a parent */
+static struct kunit_case clk_fixed_rate_parent_test_cases[] = {
+ KUNIT_CASE(clk_fixed_rate_parent_test),
+ KUNIT_CASE(clk_fixed_rate_parent_rate_test),
+ KUNIT_CASE(clk_fixed_rate_parent_accuracy_test),
+ {}
+};
+
+static struct kunit_suite clk_fixed_rate_parent_suite = {
+ .name = "clk_fixed_rate_parent",
+ .test_cases = clk_fixed_rate_parent_test_cases,
+};
+
+struct clk_fixed_rate_of_test_context {
+ struct device *dev;
+ struct platform_driver pdrv;
+ struct completion probed;
+};
+
+static inline struct clk_fixed_rate_of_test_context *
+pdev_to_clk_fixed_rate_of_test_context(struct platform_device *pdev)
+{
+ return container_of(to_platform_driver(pdev->dev.driver),
+ struct clk_fixed_rate_of_test_context,
+ pdrv);
+}
+
+/*
+ * Test that of_fixed_clk_setup() registers a fixed rate clk with the proper
+ * rate.
+ */
+static void clk_fixed_rate_of_probe_test(struct kunit *test)
+{
+ struct clk_fixed_rate_of_test_context *ctx = test->priv;
+ struct device *dev = ctx->dev;
+ struct clk *clk;
+
+ clk = clk_get_kunit(test, dev, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_ASSERT_EQ(test, 0, clk_prepare_enable_kunit(test, clk));
+ KUNIT_EXPECT_EQ(test, TEST_FIXED_FREQUENCY, clk_get_rate(clk));
+}
+
+/*
+ * Test that of_fixed_clk_setup() registers a fixed rate clk with the proper
+ * accuracy.
+ */
+static void clk_fixed_rate_of_accuracy_test(struct kunit *test)
+{
+ struct clk_fixed_rate_of_test_context *ctx = test->priv;
+ struct device *dev = ctx->dev;
+ struct clk *clk;
+
+ clk = clk_get_kunit(test, dev, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, TEST_FIXED_ACCURACY, clk_get_accuracy(clk));
+}
+
+static struct kunit_case clk_fixed_rate_of_cases[] = {
+ KUNIT_CASE(clk_fixed_rate_of_probe_test),
+ KUNIT_CASE(clk_fixed_rate_of_accuracy_test),
+ {}
+};
+
+static int clk_fixed_rate_of_test_probe(struct platform_device *pdev)
+{
+ struct clk_fixed_rate_of_test_context *ctx;
+
+ ctx = pdev_to_clk_fixed_rate_of_test_context(pdev);
+ ctx->dev = &pdev->dev;
+ complete(&ctx->probed);
+
+ return 0;
+}
+
+static int clk_fixed_rate_of_init(struct kunit *test)
+{
+ struct clk_fixed_rate_of_test_context *ctx;
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,single-clk-consumer" },
+ { }
+ };
+
+ KUNIT_ASSERT_EQ(test, 0, of_overlay_apply_kunit(test, kunit_clk_fixed_rate_test));
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ test->priv = ctx;
+
+ ctx->pdrv.probe = clk_fixed_rate_of_test_probe;
+ ctx->pdrv.driver.of_match_table = match_table;
+ ctx->pdrv.driver.name = __func__;
+ ctx->pdrv.driver.owner = THIS_MODULE;
+ init_completion(&ctx->probed);
+
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ KUNIT_ASSERT_NE(test, 0, wait_for_completion_timeout(&ctx->probed, HZ));
+
+ return 0;
+}
+
+static struct kunit_suite clk_fixed_rate_of_suite = {
+ .name = "clk_fixed_rate_of",
+ .init = clk_fixed_rate_of_init,
+ .test_cases = clk_fixed_rate_of_cases,
+};
+
+kunit_test_suites(
+ &clk_fixed_rate_suite,
+ &clk_fixed_rate_of_suite,
+ &clk_fixed_rate_parent_suite,
+);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit test for clk fixed rate basic type");
diff --git a/drivers/clk/clk-fixed-rate_test.h b/drivers/clk/clk-fixed-rate_test.h
new file mode 100644
index 000000000000..e0d28e5b6081
--- /dev/null
+++ b/drivers/clk/clk-fixed-rate_test.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CLK_FIXED_RATE_TEST_H
+#define _CLK_FIXED_RATE_TEST_H
+
+#define TEST_FIXED_FREQUENCY 50000000
+#define TEST_FIXED_ACCURACY 300
+
+#endif
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index 99b271c1278a..c997e7491996 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -1405,16 +1405,12 @@ static int lmk04832_probe(struct spi_device *spi)
lmk->dev = &spi->dev;
- lmk->oscin = devm_clk_get(lmk->dev, "oscin");
+ lmk->oscin = devm_clk_get_enabled(lmk->dev, "oscin");
if (IS_ERR(lmk->oscin)) {
dev_err(lmk->dev, "failed to get oscin clock\n");
return PTR_ERR(lmk->oscin);
}
- ret = clk_prepare_enable(lmk->oscin);
- if (ret)
- return ret;
-
lmk->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
GPIOD_OUT_LOW);
@@ -1422,14 +1418,14 @@ static int lmk04832_probe(struct spi_device *spi)
sizeof(struct lmk_dclk), GFP_KERNEL);
if (!lmk->dclk) {
ret = -ENOMEM;
- goto err_disable_oscin;
+ return ret;
}
lmk->clkout = devm_kcalloc(lmk->dev, info->num_channels,
sizeof(*lmk->clkout), GFP_KERNEL);
if (!lmk->clkout) {
ret = -ENOMEM;
- goto err_disable_oscin;
+ return ret;
}
lmk->clk_data = devm_kzalloc(lmk->dev, struct_size(lmk->clk_data, hws,
@@ -1437,7 +1433,7 @@ static int lmk04832_probe(struct spi_device *spi)
GFP_KERNEL);
if (!lmk->clk_data) {
ret = -ENOMEM;
- goto err_disable_oscin;
+ return ret;
}
device_property_read_u32(lmk->dev, "ti,vco-hz", &lmk->vco_rate);
@@ -1465,7 +1461,7 @@ static int lmk04832_probe(struct spi_device *spi)
dev_err(lmk->dev, "missing reg property in child: %s\n",
child->full_name);
of_node_put(child);
- goto err_disable_oscin;
+ return ret;
}
of_property_read_u32(child, "ti,clkout-fmt",
@@ -1486,7 +1482,7 @@ static int lmk04832_probe(struct spi_device *spi)
__func__, PTR_ERR(lmk->regmap));
ret = PTR_ERR(lmk->regmap);
- goto err_disable_oscin;
+ return ret;
}
regmap_write(lmk->regmap, LMK04832_REG_RST3W, LMK04832_BIT_RESET);
@@ -1496,7 +1492,7 @@ static int lmk04832_probe(struct spi_device *spi)
&rdbk_pin);
ret = lmk04832_set_spi_rdbk(lmk, rdbk_pin);
if (ret)
- goto err_disable_oscin;
+ return ret;
}
regmap_bulk_read(lmk->regmap, LMK04832_REG_ID_PROD_MSB, &tmp, 3);
@@ -1504,13 +1500,13 @@ static int lmk04832_probe(struct spi_device *spi)
dev_err(lmk->dev, "unsupported device type: pid 0x%04x, maskrev 0x%02x\n",
tmp[0] << 8 | tmp[1], tmp[2]);
ret = -EINVAL;
- goto err_disable_oscin;
+ return ret;
}
ret = lmk04832_register_vco(lmk);
if (ret) {
dev_err(lmk->dev, "failed to init device clock path\n");
- goto err_disable_oscin;
+ return ret;
}
if (lmk->vco_rate) {
@@ -1518,21 +1514,21 @@ static int lmk04832_probe(struct spi_device *spi)
ret = clk_set_rate(lmk->vco.clk, lmk->vco_rate);
if (ret) {
dev_err(lmk->dev, "failed to set VCO rate\n");
- goto err_disable_oscin;
+ return ret;
}
}
ret = lmk04832_register_sclk(lmk);
if (ret) {
dev_err(lmk->dev, "failed to init SYNC/SYSREF clock path\n");
- goto err_disable_oscin;
+ return ret;
}
for (i = 0; i < info->num_channels; i++) {
ret = lmk04832_register_clkout(lmk, i);
if (ret) {
dev_err(lmk->dev, "failed to register clk %d\n", i);
- goto err_disable_oscin;
+ return ret;
}
}
@@ -1541,24 +1537,12 @@ static int lmk04832_probe(struct spi_device *spi)
lmk->clk_data);
if (ret) {
dev_err(lmk->dev, "failed to add provider (%d)\n", ret);
- goto err_disable_oscin;
+ return ret;
}
spi_set_drvdata(spi, lmk);
return 0;
-
-err_disable_oscin:
- clk_disable_unprepare(lmk->oscin);
-
- return ret;
-}
-
-static void lmk04832_remove(struct spi_device *spi)
-{
- struct lmk04832 *lmk = spi_get_drvdata(spi);
-
- clk_disable_unprepare(lmk->oscin);
}
static const struct spi_device_id lmk04832_id[] = {
@@ -1579,7 +1563,6 @@ static struct spi_driver lmk04832_driver = {
.of_match_table = lmk04832_of_id,
},
.probe = lmk04832_probe,
- .remove = lmk04832_remove,
.id_table = lmk04832_id,
};
module_spi_driver(lmk04832_driver);
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index 5efb10776ae5..39049f62dbbb 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -281,7 +281,7 @@ static struct platform_driver palmas_clks_driver = {
.of_match_table = palmas_clks_of_match,
},
.probe = palmas_clks_probe,
- .remove_new = palmas_clks_remove,
+ .remove = palmas_clks_remove,
};
module_platform_driver(palmas_clks_driver);
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 3dd2b83d0404..bd4f21c22004 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -142,7 +142,7 @@ MODULE_DEVICE_TABLE(of, clk_pwm_dt_ids);
static struct platform_driver clk_pwm_driver = {
.probe = clk_pwm_probe,
- .remove_new = clk_pwm_remove,
+ .remove = clk_pwm_remove,
.driver = {
.name = "pwm-clock",
.of_match_table = clk_pwm_dt_ids,
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 38c456540d1b..014db6386624 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -263,7 +263,7 @@ static struct platform_driver s2mps11_clk_driver = {
.name = "s2mps11-clk",
},
.probe = s2mps11_clk_probe,
- .remove_new = s2mps11_clk_remove,
+ .remove = s2mps11_clk_remove,
.id_table = s2mps11_clk_id,
};
module_platform_driver(s2mps11_clk_driver);
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index d86a02563f6c..15510c2ff21c 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -156,13 +156,13 @@ static void scmi_clk_atomic_disable(struct clk_hw *hw)
scmi_proto_clk_ops->disable(clk->ph, clk->id, ATOMIC);
}
-static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
+static int __scmi_clk_is_enabled(struct clk_hw *hw, bool atomic)
{
int ret;
bool enabled = false;
struct scmi_clk *clk = to_scmi_clk(hw);
- ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, ATOMIC);
+ ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, atomic);
if (ret)
dev_warn(clk->dev,
"Failed to get state for clock ID %d\n", clk->id);
@@ -170,6 +170,16 @@ static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
return !!enabled;
}
+static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
+{
+ return __scmi_clk_is_enabled(hw, ATOMIC);
+}
+
+static int scmi_clk_is_enabled(struct clk_hw *hw)
+{
+ return __scmi_clk_is_enabled(hw, NOT_ATOMIC);
+}
+
static int scmi_clk_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
int ret;
@@ -285,6 +295,8 @@ scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED))
ops->is_enabled = scmi_clk_atomic_is_enabled;
+ else
+ ops->is_prepared = scmi_clk_is_enabled;
/* Rate ops */
ops->recalc_rate = scmi_clk_recalc_rate;
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 108b697bd317..19d530d52e64 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -303,7 +303,7 @@ static struct platform_driver scpi_clocks_driver = {
.of_match_table = scpi_clocks_ids,
},
.probe = scpi_clocks_probe,
- .remove_new = scpi_clocks_remove,
+ .remove = scpi_clocks_remove,
};
module_platform_driver(scpi_clocks_driver);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 285ed1ad8a37..d02451f951cf 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -4762,7 +4762,7 @@ void __clk_put(struct clk *clk)
clk->exclusive_count = 0;
}
- hlist_del(&clk->clks_node);
+ clk_core_unlink_consumer(clk);
/* If we had any boundaries on that clock, let's drop them. */
if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
@@ -5232,7 +5232,7 @@ static int of_parse_clkspec(const struct device_node *np, int index,
* clocks.
*/
np = np->parent;
- if (np && !of_get_property(np, "clock-ranges", NULL))
+ if (np && !of_property_present(np, "clock-ranges"))
break;
index = 0;
}
diff --git a/drivers/clk/clk_kunit_helpers.c b/drivers/clk/clk_kunit_helpers.c
new file mode 100644
index 000000000000..52fd25594c96
--- /dev/null
+++ b/drivers/clk/clk_kunit_helpers.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit helpers for clk providers and consumers
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <kunit/clk.h>
+#include <kunit/resource.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(clk_disable_unprepare_wrapper,
+ clk_disable_unprepare, struct clk *);
+/**
+ * clk_prepare_enable_kunit() - Test managed clk_prepare_enable()
+ * @test: The test context
+ * @clk: clk to prepare and enable
+ *
+ * Return: 0 on success, or negative errno on failure.
+ */
+int clk_prepare_enable_kunit(struct kunit *test, struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, clk_disable_unprepare_wrapper,
+ clk);
+}
+EXPORT_SYMBOL_GPL(clk_prepare_enable_kunit);
+
+KUNIT_DEFINE_ACTION_WRAPPER(clk_put_wrapper, clk_put, struct clk *);
+
+static struct clk *__clk_get_kunit(struct kunit *test, struct clk *clk)
+{
+ int ret;
+
+ if (IS_ERR(clk))
+ return clk;
+
+ ret = kunit_add_action_or_reset(test, clk_put_wrapper, clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+
+/**
+ * clk_get_kunit() - Test managed clk_get()
+ * @test: The test context
+ * @dev: device for clock "consumer"
+ * @con_id: clock consumer ID
+ *
+ * Just like clk_get(), except the clk is managed by the test case and is
+ * automatically put with clk_put() after the test case concludes.
+ *
+ * Return: new clk consumer or ERR_PTR on failure.
+ */
+struct clk *
+clk_get_kunit(struct kunit *test, struct device *dev, const char *con_id)
+{
+ struct clk *clk;
+
+ clk = clk_get(dev, con_id);
+
+ return __clk_get_kunit(test, clk);
+}
+EXPORT_SYMBOL_GPL(clk_get_kunit);
+
+/**
+ * of_clk_get_kunit() - Test managed of_clk_get()
+ * @test: The test context
+ * @np: device_node for clock "consumer"
+ * @index: index in 'clocks' property of @np
+ *
+ * Just like of_clk_get(), except the clk is managed by the test case and is
+ * automatically put with clk_put() after the test case concludes.
+ *
+ * Return: new clk consumer or ERR_PTR on failure.
+ */
+struct clk *
+of_clk_get_kunit(struct kunit *test, struct device_node *np, int index)
+{
+ struct clk *clk;
+
+ clk = of_clk_get(np, index);
+
+ return __clk_get_kunit(test, clk);
+}
+EXPORT_SYMBOL_GPL(of_clk_get_kunit);
+
+/**
+ * clk_hw_get_clk_kunit() - Test managed clk_hw_get_clk()
+ * @test: The test context
+ * @hw: clk_hw associated with the clk being consumed
+ * @con_id: connection ID string on device
+ *
+ * Just like clk_hw_get_clk(), except the clk is managed by the test case and
+ * is automatically put with clk_put() after the test case concludes.
+ *
+ * Return: new clk consumer or ERR_PTR on failure.
+ */
+struct clk *
+clk_hw_get_clk_kunit(struct kunit *test, struct clk_hw *hw, const char *con_id)
+{
+ struct clk *clk;
+
+ clk = clk_hw_get_clk(hw, con_id);
+
+ return __clk_get_kunit(test, clk);
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_clk_kunit);
+
+/**
+ * clk_hw_get_clk_prepared_enabled_kunit() - Test managed clk_hw_get_clk() + clk_prepare_enable()
+ * @test: The test context
+ * @hw: clk_hw associated with the clk being consumed
+ * @con_id: connection ID string on device
+ *
+ * Just like
+ *
+ * .. code-block:: c
+ *
+ * struct clk *clk = clk_hw_get_clk(...);
+ * clk_prepare_enable(clk);
+ *
+ * except the clk is managed by the test case and is automatically disabled and
+ * unprepared with clk_disable_unprepare() and put with clk_put() after the
+ * test case concludes.
+ *
+ * Return: new clk consumer that is prepared and enabled or ERR_PTR on failure.
+ */
+struct clk *
+clk_hw_get_clk_prepared_enabled_kunit(struct kunit *test, struct clk_hw *hw,
+ const char *con_id)
+{
+ int ret;
+ struct clk *clk;
+
+ clk = clk_hw_get_clk_kunit(test, hw, con_id);
+ if (IS_ERR(clk))
+ return clk;
+
+ ret = clk_prepare_enable_kunit(test, clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_clk_prepared_enabled_kunit);
+
+KUNIT_DEFINE_ACTION_WRAPPER(clk_hw_unregister_wrapper,
+ clk_hw_unregister, struct clk_hw *);
+
+/**
+ * clk_hw_register_kunit() - Test managed clk_hw_register()
+ * @test: The test context
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * Just like clk_hw_register(), except the clk registration is managed by the
+ * test case and is automatically unregistered after the test case concludes.
+ *
+ * Return: 0 on success or a negative errno value on failure.
+ */
+int clk_hw_register_kunit(struct kunit *test, struct device *dev, struct clk_hw *hw)
+{
+ int ret;
+
+ ret = clk_hw_register(dev, hw);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, clk_hw_unregister_wrapper, hw);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_kunit);
+
+/**
+ * of_clk_hw_register_kunit() - Test managed of_clk_hw_register()
+ * @test: The test context
+ * @node: device_node of device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * Just like of_clk_hw_register(), except the clk registration is managed by
+ * the test case and is automatically unregistered after the test case
+ * concludes.
+ *
+ * Return: 0 on success or a negative errno value on failure.
+ */
+int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node, struct clk_hw *hw)
+{
+ int ret;
+
+ ret = of_clk_hw_register(node, hw);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, clk_hw_unregister_wrapper, hw);
+}
+EXPORT_SYMBOL_GPL(of_clk_hw_register_kunit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit helpers for clk providers and consumers");
diff --git a/drivers/clk/clk_parent_data_test.h b/drivers/clk/clk_parent_data_test.h
new file mode 100644
index 000000000000..eedd53ae910d
--- /dev/null
+++ b/drivers/clk/clk_parent_data_test.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CLK_PARENT_DATA_TEST_H
+#define _CLK_PARENT_DATA_TEST_H
+
+#define CLK_PARENT_DATA_1MHZ_NAME "1mhz_fixed_legacy"
+#define CLK_PARENT_DATA_PARENT1 "parent_fwname"
+#define CLK_PARENT_DATA_PARENT2 "50"
+#define CLK_PARENT_DATA_50MHZ_NAME "50_clk"
+
+#endif
diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c
index fbbea66d9cba..41fc8eba3418 100644
--- a/drivers/clk/clk_test.c
+++ b/drivers/clk/clk_test.c
@@ -4,12 +4,19 @@
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
/* Needed for clk_hw_get_clk() */
#include "clk.h"
+#include <kunit/clk.h>
+#include <kunit/of.h>
+#include <kunit/platform_device.h>
#include <kunit/test.h>
+#include "clk_parent_data_test.h"
+
static const struct clk_ops empty_clk_ops = { };
#define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000)
@@ -2659,6 +2666,448 @@ static struct kunit_suite clk_mux_no_reparent_test_suite = {
.test_cases = clk_mux_no_reparent_test_cases,
};
+struct clk_register_clk_parent_data_test_case {
+ const char *desc;
+ struct clk_parent_data pdata;
+};
+
+static void
+clk_register_clk_parent_data_test_case_to_desc(
+ const struct clk_register_clk_parent_data_test_case *t, char *desc)
+{
+ strcpy(desc, t->desc);
+}
+
+static const struct clk_register_clk_parent_data_test_case
+clk_register_clk_parent_data_of_cases[] = {
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct clk_parent_data::index.
+ */
+ .desc = "clk_parent_data_of_index_test",
+ .pdata.index = 0,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct clk_parent_data::fwname.
+ */
+ .desc = "clk_parent_data_of_fwname_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct clk_parent_data::name.
+ */
+ .desc = "clk_parent_data_of_name_test",
+ /* The index must be negative to indicate firmware not used */
+ .pdata.index = -1,
+ .pdata.name = CLK_PARENT_DATA_1MHZ_NAME,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct
+ * clk_parent_data::{fw_name,name}.
+ */
+ .desc = "clk_parent_data_of_fwname_name_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct clk_parent_data::{index,name}.
+ * Index takes priority.
+ */
+ .desc = "clk_parent_data_of_index_name_priority_test",
+ .pdata.index = 0,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct
+ * clk_parent_data::{index,fwname,name}. The fw_name takes
+ * priority over index and name.
+ */
+ .desc = "clk_parent_data_of_index_fwname_name_priority_test",
+ .pdata.index = 1,
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
+ .pdata.name = "not_matching",
+ },
+};
+
+KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_of_test, clk_register_clk_parent_data_of_cases,
+ clk_register_clk_parent_data_test_case_to_desc)
+
+/**
+ * struct clk_register_clk_parent_data_of_ctx - Context for clk_parent_data OF tests
+ * @np: device node of clk under test
+ * @hw: clk_hw for clk under test
+ */
+struct clk_register_clk_parent_data_of_ctx {
+ struct device_node *np;
+ struct clk_hw hw;
+};
+
+static int clk_register_clk_parent_data_of_test_init(struct kunit *test)
+{
+ struct clk_register_clk_parent_data_of_ctx *ctx;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->np = of_find_compatible_node(NULL, NULL, "test,clk-parent-data");
+ if (!ctx->np)
+ return -ENODEV;
+
+ of_node_put_kunit(test, ctx->np);
+
+ return 0;
+}
+
+/*
+ * Test that a clk registered with a struct device_node can find a parent based on
+ * struct clk_parent_data when the hw member isn't set.
+ */
+static void clk_register_clk_parent_data_of_test(struct kunit *test)
+{
+ struct clk_register_clk_parent_data_of_ctx *ctx = test->priv;
+ struct clk_hw *parent_hw;
+ const struct clk_register_clk_parent_data_test_case *test_param;
+ struct clk_init_data init = { };
+ struct clk *expected_parent, *actual_parent;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->np);
+
+ expected_parent = of_clk_get_kunit(test, ctx->np, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
+
+ test_param = test->param_value;
+ init.parent_data = &test_param->pdata;
+ init.num_parents = 1;
+ init.name = "parent_data_of_test_clk";
+ init.ops = &clk_dummy_single_parent_ops;
+ ctx->hw.init = &init;
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, ctx->np, &ctx->hw));
+
+ parent_hw = clk_hw_get_parent(&ctx->hw);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+
+ actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
+
+ KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
+}
+
+static struct kunit_case clk_register_clk_parent_data_of_test_cases[] = {
+ KUNIT_CASE_PARAM(clk_register_clk_parent_data_of_test,
+ clk_register_clk_parent_data_of_test_gen_params),
+ {}
+};
+
+/*
+ * Test suite for registering clks with struct clk_parent_data and a struct
+ * device_node.
+ */
+static struct kunit_suite clk_register_clk_parent_data_of_suite = {
+ .name = "clk_register_clk_parent_data_of",
+ .init = clk_register_clk_parent_data_of_test_init,
+ .test_cases = clk_register_clk_parent_data_of_test_cases,
+};
+
+/**
+ * struct clk_register_clk_parent_data_device_ctx - Context for clk_parent_data device tests
+ * @dev: device of clk under test
+ * @hw: clk_hw for clk under test
+ * @pdrv: driver to attach to find @dev
+ */
+struct clk_register_clk_parent_data_device_ctx {
+ struct device *dev;
+ struct clk_hw hw;
+ struct platform_driver pdrv;
+};
+
+static inline struct clk_register_clk_parent_data_device_ctx *
+clk_register_clk_parent_data_driver_to_test_context(struct platform_device *pdev)
+{
+ return container_of(to_platform_driver(pdev->dev.driver),
+ struct clk_register_clk_parent_data_device_ctx, pdrv);
+}
+
+static int clk_register_clk_parent_data_device_probe(struct platform_device *pdev)
+{
+ struct clk_register_clk_parent_data_device_ctx *ctx;
+
+ ctx = clk_register_clk_parent_data_driver_to_test_context(pdev);
+ ctx->dev = &pdev->dev;
+
+ return 0;
+}
+
+static void clk_register_clk_parent_data_device_driver(struct kunit *test)
+{
+ struct clk_register_clk_parent_data_device_ctx *ctx = test->priv;
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-parent-data" },
+ { }
+ };
+
+ ctx->pdrv.probe = clk_register_clk_parent_data_device_probe;
+ ctx->pdrv.driver.of_match_table = match_table;
+ ctx->pdrv.driver.name = __func__;
+ ctx->pdrv.driver.owner = THIS_MODULE;
+
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
+}
+
+static const struct clk_register_clk_parent_data_test_case
+clk_register_clk_parent_data_device_cases[] = {
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::index.
+ */
+ .desc = "clk_parent_data_device_index_test",
+ .pdata.index = 1,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::fwname.
+ */
+ .desc = "clk_parent_data_device_fwname_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::name.
+ */
+ .desc = "clk_parent_data_device_name_test",
+ /* The index must be negative to indicate firmware not used */
+ .pdata.index = -1,
+ .pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::{fw_name,name}.
+ */
+ .desc = "clk_parent_data_device_fwname_name_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::{index,name}. Index
+ * takes priority.
+ */
+ .desc = "clk_parent_data_device_index_name_priority_test",
+ .pdata.index = 1,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::{index,fwname,name}.
+ * The fw_name takes priority over index and name.
+ */
+ .desc = "clk_parent_data_device_index_fwname_name_priority_test",
+ .pdata.index = 0,
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ .pdata.name = "not_matching",
+ },
+};
+
+KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,
+ clk_register_clk_parent_data_device_cases,
+ clk_register_clk_parent_data_test_case_to_desc)
+
+/*
+ * Test that a clk registered with a struct device can find a parent based on
+ * struct clk_parent_data when the hw member isn't set.
+ */
+static void clk_register_clk_parent_data_device_test(struct kunit *test)
+{
+ struct clk_register_clk_parent_data_device_ctx *ctx;
+ const struct clk_register_clk_parent_data_test_case *test_param;
+ struct clk_hw *parent_hw;
+ struct clk_init_data init = { };
+ struct clk *expected_parent, *actual_parent;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ test->priv = ctx;
+
+ clk_register_clk_parent_data_device_driver(test);
+
+ expected_parent = clk_get_kunit(test, ctx->dev, "50");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
+
+ test_param = test->param_value;
+ init.parent_data = &test_param->pdata;
+ init.num_parents = 1;
+ init.name = "parent_data_device_test_clk";
+ init.ops = &clk_dummy_single_parent_ops;
+ ctx->hw.init = &init;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
+
+ parent_hw = clk_hw_get_parent(&ctx->hw);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+
+ actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
+
+ KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
+}
+
+static const struct clk_register_clk_parent_data_test_case
+clk_register_clk_parent_data_device_hw_cases[] = {
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw.
+ */
+ .desc = "clk_parent_data_device_hw_index_test",
+ /* The index must be negative to indicate firmware not used */
+ .pdata.index = -1,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when
+ * struct clk_parent_data::fw_name is set.
+ */
+ .desc = "clk_parent_data_device_hw_fwname_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when struct
+ * clk_parent_data::name is set.
+ */
+ .desc = "clk_parent_data_device_hw_name_test",
+ /* The index must be negative to indicate firmware not used */
+ .pdata.index = -1,
+ .pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when struct
+ * clk_parent_data::{fw_name,name} are set.
+ */
+ .desc = "clk_parent_data_device_hw_fwname_name_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when struct
+ * clk_parent_data::index is set. The hw pointer takes
+ * priority.
+ */
+ .desc = "clk_parent_data_device_hw_index_priority_test",
+ .pdata.index = 0,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when
+ * struct clk_parent_data::{index,fwname,name} are set.
+ * The hw pointer takes priority over everything else.
+ */
+ .desc = "clk_parent_data_device_hw_index_fwname_name_priority_test",
+ .pdata.index = 0,
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ .pdata.name = "not_matching",
+ },
+};
+
+KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,
+ clk_register_clk_parent_data_device_hw_cases,
+ clk_register_clk_parent_data_test_case_to_desc)
+
+/*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw.
+ */
+static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
+{
+ struct clk_register_clk_parent_data_device_ctx *ctx;
+ const struct clk_register_clk_parent_data_test_case *test_param;
+ struct clk_dummy_context *parent;
+ struct clk_hw *parent_hw;
+ struct clk_parent_data pdata = { };
+ struct clk_init_data init = { };
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ test->priv = ctx;
+
+ clk_register_clk_parent_data_device_driver(test);
+
+ parent = kunit_kzalloc(test, sizeof(*parent), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ parent_hw = &parent->hw;
+ parent_hw->init = CLK_HW_INIT_NO_PARENT("parent-clk",
+ &clk_dummy_rate_ops, 0);
+
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, parent_hw));
+
+ test_param = test->param_value;
+ memcpy(&pdata, &test_param->pdata, sizeof(pdata));
+ pdata.hw = parent_hw;
+ init.parent_data = &pdata;
+ init.num_parents = 1;
+ init.ops = &clk_dummy_single_parent_ops;
+ init.name = "parent_data_device_hw_test_clk";
+ ctx->hw.init = &init;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(&ctx->hw));
+}
+
+static struct kunit_case clk_register_clk_parent_data_device_test_cases[] = {
+ KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_test,
+ clk_register_clk_parent_data_device_test_gen_params),
+ KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_hw_test,
+ clk_register_clk_parent_data_device_hw_test_gen_params),
+ {}
+};
+
+static int clk_register_clk_parent_data_device_init(struct kunit *test)
+{
+ KUNIT_ASSERT_EQ(test, 0,
+ of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
+
+ return 0;
+}
+
+/*
+ * Test suite for registering clks with struct clk_parent_data and a struct
+ * device.
+ */
+static struct kunit_suite clk_register_clk_parent_data_device_suite = {
+ .name = "clk_register_clk_parent_data_device",
+ .init = clk_register_clk_parent_data_device_init,
+ .test_cases = clk_register_clk_parent_data_device_test_cases,
+};
+
kunit_test_suites(
&clk_leaf_mux_set_rate_parent_test_suite,
&clk_test_suite,
@@ -2671,8 +3120,10 @@ kunit_test_suites(
&clk_range_test_suite,
&clk_range_maximize_test_suite,
&clk_range_minimize_test_suite,
+ &clk_register_clk_parent_data_of_suite,
+ &clk_register_clk_parent_data_device_suite,
&clk_single_parent_mux_test_suite,
- &clk_uncached_test_suite
+ &clk_uncached_test_suite,
);
MODULE_DESCRIPTION("Kunit tests for clk framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index ec60ecb517f1..a5109fe8b16e 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -513,8 +513,7 @@ da8xx_cfgchip_register_usb0_clk48(struct device *dev,
fck_clk = devm_clk_get(dev, "fck");
if (IS_ERR(fck_clk)) {
- dev_err_probe(dev, PTR_ERR(fck_clk), "Missing fck clock\n");
- return ERR_CAST(fck_clk);
+ return dev_err_cast_probe(dev, fck_clk, "Missing fck clock\n");
}
usb0 = devm_kzalloc(dev, sizeof(*usb0), GFP_KERNEL);
@@ -749,11 +748,9 @@ static int da8xx_cfgchip_probe(struct platform_device *pdev)
clk_init = device_get_match_data(dev);
if (clk_init) {
- struct device_node *parent;
+ struct device_node *parent __free(device_node) = of_get_parent(dev->of_node);
- parent = of_get_parent(dev->of_node);
regmap = syscon_node_to_regmap(parent);
- of_node_put(parent);
} else if (pdev->id_entry && pdata) {
clk_init = (void *)pdev->id_entry->driver_data;
regmap = pdata->cfgchip;
diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c
index 141b727ff60d..0c50acd8543a 100644
--- a/drivers/clk/hisilicon/clk-hi3519.c
+++ b/drivers/clk/hisilicon/clk-hi3519.c
@@ -179,7 +179,7 @@ MODULE_DEVICE_TABLE(of, hi3519_clk_match_table);
static struct platform_driver hi3519_clk_driver = {
.probe = hi3519_clk_probe,
- .remove_new = hi3519_clk_remove,
+ .remove = hi3519_clk_remove,
.driver = {
.name = "hi3519-clk",
.of_match_table = hi3519_clk_match_table,
diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c
index c79a94f6d9d2..f297fb25c512 100644
--- a/drivers/clk/hisilicon/clk-hi3559a.c
+++ b/drivers/clk/hisilicon/clk-hi3559a.c
@@ -407,7 +407,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct hi3559av100_clk_pll *clk = to_pll_clk(hw);
- u64 frac_val, fbdiv_val, refdiv_val;
+ u64 frac_val, fbdiv_val;
u32 postdiv1_val, postdiv2_val;
u32 val;
u64 tmp, rate;
@@ -435,14 +435,13 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
val = readl_relaxed(clk->ctrl_reg2);
val = val >> clk->refdiv_shift;
val &= ((1 << clk->refdiv_width) - 1);
- refdiv_val = val;
/* rate = 24000000 * (fbdiv + frac / (1<<24) ) / refdiv */
rate = 0;
tmp = 24000000 * fbdiv_val + (24000000 * frac_val) / (1 << 24);
rate += tmp;
- do_div(rate, refdiv_val);
- do_div(rate, postdiv1_val * postdiv2_val);
+ rate = div_u64(rate, val);
+ rate = div_u64(rate, postdiv1_val * postdiv2_val);
return rate;
}
@@ -818,7 +817,7 @@ static void hi3559av100_crg_remove(struct platform_device *pdev)
static struct platform_driver hi3559av100_crg_driver = {
.probe = hi3559av100_crg_probe,
- .remove_new = hi3559av100_crg_remove,
+ .remove = hi3559av100_crg_remove,
.driver = {
.name = "hi3559av100-clock",
.of_match_table = hi3559av100_crg_match_table,
diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c
index e602e65fbc38..b66140f74c51 100644
--- a/drivers/clk/hisilicon/crg-hi3516cv300.c
+++ b/drivers/clk/hisilicon/crg-hi3516cv300.c
@@ -294,7 +294,7 @@ static void hi3516cv300_crg_remove(struct platform_device *pdev)
static struct platform_driver hi3516cv300_crg_driver = {
.probe = hi3516cv300_crg_probe,
- .remove_new = hi3516cv300_crg_remove,
+ .remove = hi3516cv300_crg_remove,
.driver = {
.name = "hi3516cv300-crg",
.of_match_table = hi3516cv300_crg_match_table,
diff --git a/drivers/clk/hisilicon/crg-hi3798cv200.c b/drivers/clk/hisilicon/crg-hi3798cv200.c
index f651b197e45a..8eabd1cc229f 100644
--- a/drivers/clk/hisilicon/crg-hi3798cv200.c
+++ b/drivers/clk/hisilicon/crg-hi3798cv200.c
@@ -377,7 +377,7 @@ static void hi3798cv200_crg_remove(struct platform_device *pdev)
static struct platform_driver hi3798cv200_crg_driver = {
.probe = hi3798cv200_crg_probe,
- .remove_new = hi3798cv200_crg_remove,
+ .remove = hi3798cv200_crg_remove,
.driver = {
.name = "hi3798cv200-crg",
.of_match_table = hi3798cv200_crg_match_table,
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 6da0fba68225..6ff6d934848a 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -81,6 +81,7 @@ config CLK_IMX8MP
tristate "IMX8MP CCM Clock Driver"
depends on ARCH_MXC || COMPILE_TEST
select MXC_CLK
+ select AUXILIARY_BUS if RESET_CONTROLLER
help
Build the driver for i.MX8MP CCM Clock Driver
diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
index e208ddc51133..8ed2e0ad2769 100644
--- a/drivers/clk/imx/clk-composite-7ulp.c
+++ b/drivers/clk/imx/clk-composite-7ulp.c
@@ -14,6 +14,7 @@
#include "../clk-fractional-divider.h"
#include "clk.h"
+#define PCG_PR_MASK BIT(31)
#define PCG_PCS_SHIFT 24
#define PCG_PCS_MASK 0x7
#define PCG_CGC_SHIFT 30
@@ -78,6 +79,12 @@ static struct clk_hw *imx_ulp_clk_hw_composite(const char *name,
struct clk_hw *hw;
u32 val;
+ val = readl(reg);
+ if (!(val & PCG_PR_MASK)) {
+ pr_info("PCC PR is 0 for clk:%s, bypass\n", name);
+ return NULL;
+ }
+
if (mux_present) {
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 8cc07d056a83..f187582ba491 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -204,6 +204,34 @@ static const struct clk_ops imx8m_clk_composite_mux_ops = {
.determine_rate = imx8m_clk_composite_mux_determine_rate,
};
+static int imx8m_clk_composite_gate_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ val = readl(gate->reg);
+ val |= BIT(gate->bit_idx);
+ writel(val, gate->reg);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return 0;
+}
+
+static void imx8m_clk_composite_gate_disable(struct clk_hw *hw)
+{
+ /* composite clk requires the disable hook */
+}
+
+static const struct clk_ops imx8m_clk_composite_gate_ops = {
+ .enable = imx8m_clk_composite_gate_enable,
+ .disable = imx8m_clk_composite_gate_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+
struct clk_hw *__imx8m_clk_hw_composite(const char *name,
const char * const *parent_names,
int num_parents, void __iomem *reg,
@@ -217,6 +245,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
struct clk_mux *mux;
const struct clk_ops *divider_ops;
const struct clk_ops *mux_ops;
+ const struct clk_ops *gate_ops;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -257,20 +286,22 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
div->flags = CLK_DIVIDER_ROUND_CLOSEST;
/* skip registering the gate ops if M4 is enabled */
- if (!mcore_booted) {
- gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate)
- goto free_div;
-
- gate_hw = &gate->hw;
- gate->reg = reg;
- gate->bit_idx = PCG_CGC_SHIFT;
- gate->lock = &imx_ccm_lock;
- }
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto free_div;
+
+ gate_hw = &gate->hw;
+ gate->reg = reg;
+ gate->bit_idx = PCG_CGC_SHIFT;
+ gate->lock = &imx_ccm_lock;
+ if (!mcore_booted)
+ gate_ops = &clk_gate_ops;
+ else
+ gate_ops = &imx8m_clk_composite_gate_ops;
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, mux_ops, div_hw,
- divider_ops, gate_hw, &clk_gate_ops, flags);
+ divider_ops, gate_hw, gate_ops, flags);
if (IS_ERR(hw))
goto free_gate;
diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c
index 81164bdcd6cc..6c6c5a30f328 100644
--- a/drivers/clk/imx/clk-composite-93.c
+++ b/drivers/clk/imx/clk-composite-93.c
@@ -76,6 +76,13 @@ static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
{
+ /*
+ * Skip disable the root clock gate if mcore enabled.
+ * The root clock may be used by the mcore.
+ */
+ if (mcore_booted)
+ return;
+
imx93_clk_composite_gate_endisable(hw, 0);
}
@@ -222,7 +229,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, &clk_mux_ro_ops, div_hw,
&clk_divider_ro_ops, NULL, NULL, flags);
- } else if (!mcore_booted) {
+ } else {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
goto fail;
@@ -238,12 +245,6 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
&imx93_clk_composite_divider_ops, gate_hw,
&imx93_clk_composite_gate_ops,
flags | CLK_SET_RATE_NO_REPARENT);
- } else {
- hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
- mux_hw, &imx93_clk_composite_mux_ops, div_hw,
- &imx93_clk_composite_divider_ops, NULL,
- &imx93_clk_composite_gate_ops,
- flags | CLK_SET_RATE_NO_REPARENT);
}
if (IS_ERR(hw))
diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
index 44462ab50e51..591e0364ee5c 100644
--- a/drivers/clk/imx/clk-fracn-gppll.c
+++ b/drivers/clk/imx/clk-fracn-gppll.c
@@ -78,6 +78,7 @@ struct clk_fracn_gppll {
* The Fvco should be in range 2.5Ghz to 5Ghz
*/
static const struct imx_fracn_gppll_rate_table fracn_tbl[] = {
+ PLL_FRACN_GP(1039500000U, 173, 25, 100, 1, 4),
PLL_FRACN_GP(650000000U, 162, 50, 100, 0, 6),
PLL_FRACN_GP(594000000U, 198, 0, 1, 0, 8),
PLL_FRACN_GP(560000000U, 140, 0, 1, 0, 6),
@@ -106,6 +107,7 @@ static const struct imx_fracn_gppll_rate_table int_tbl[] = {
PLL_FRACN_GP_INTEGER(1700000000U, 141, 1, 2),
PLL_FRACN_GP_INTEGER(1400000000U, 175, 1, 3),
PLL_FRACN_GP_INTEGER(900000000U, 150, 1, 4),
+ PLL_FRACN_GP_INTEGER(800000000U, 200, 1, 6),
};
struct imx_fracn_gppll_clk imx_fracn_gppll_integer = {
@@ -291,6 +293,10 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw)
if (val & POWERUP_MASK)
return 0;
+ if (pll->flags & CLK_FRACN_GPPLL_FRACN)
+ writel_relaxed(readl_relaxed(pll->base + PLL_NUMERATOR),
+ pll->base + PLL_NUMERATOR);
+
val |= CLKMUX_BYPASS;
writel_relaxed(val, pll->base + PLL_CTRL);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index f9394e94f69d..05c7a82b751f 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -542,8 +542,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_parent(hws[IMX6UL_CLK_ENFC_SEL]->clk, hws[IMX6UL_CLK_PLL2_PFD2]->clk);
- clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET_REF]->clk);
- clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF]->clk);
+ clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET1_REF_125M]->clk);
+ clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF_125M]->clk);
imx_register_uart_clocks();
}
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 2b77d1fc7bb9..99adc55e3f5d 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -498,14 +498,14 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
hws[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_hw_mux2_flags("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_hw_mux2_flags("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel), CLK_SET_PARENT_GATE);
- hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel));
hws[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel), CLK_SET_PARENT_GATE);
- hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel));
hws[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_hw_mux2_flags("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_EPDC_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("epdc_pixel_src", base + 0xa280, 24, 3, epdc_pixel_sel, ARRAY_SIZE(epdc_pixel_sel), CLK_SET_PARENT_GATE);
- hws[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel), CLK_SET_PARENT_GATE | CLK_SET_RATE_PARENT);
hws[IMX7D_MIPI_DSI_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_dsi_src", base + 0xa380, 24, 3, mipi_dsi_sel, ARRAY_SIZE(mipi_dsi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_MIPI_CSI_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_csi_src", base + 0xa400, 24, 3, mipi_csi_sel, ARRAY_SIZE(mipi_csi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_MIPI_DPHY_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_dphy_src", base + 0xa480, 24, 3, mipi_dphy_sel, ARRAY_SIZE(mipi_dphy_sel), CLK_SET_PARENT_GATE);
diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c
index 1bdb480cc96c..6c351050b82a 100644
--- a/drivers/clk/imx/clk-imx8-acm.c
+++ b/drivers/clk/imx/clk-imx8-acm.c
@@ -54,10 +54,12 @@ struct clk_imx8_acm_sel {
* struct imx8_acm_soc_data - soc specific data
* @sels: pointer to struct clk_imx8_acm_sel
* @num_sels: numbers of items
+ * @mclk_sels: pointer to imx8qm/qxp/dxl_mclk_sels
*/
struct imx8_acm_soc_data {
struct clk_imx8_acm_sel *sels;
unsigned int num_sels;
+ struct clk_parent_data *mclk_sels;
};
/**
@@ -111,11 +113,14 @@ static const struct clk_parent_data imx8qm_mclk_out_sels[] = {
{ .fw_name = "sai6_rx_bclk" },
};
-static const struct clk_parent_data imx8qm_mclk_sels[] = {
+#define ACM_AUD_CLK0_SEL_INDEX 2
+#define ACM_AUD_CLK1_SEL_INDEX 3
+
+static struct clk_parent_data imx8qm_mclk_sels[] = {
{ .fw_name = "aud_pll_div_clk0_lpcg_clk" },
{ .fw_name = "aud_pll_div_clk1_lpcg_clk" },
- { .fw_name = "acm_aud_clk0_sel" },
- { .fw_name = "acm_aud_clk1_sel" },
+ { }, /* clk_hw pointer of "acm_aud_clk0_sel" */
+ { }, /* clk_hw pointer of "acm_aud_clk1_sel" */
};
static const struct clk_parent_data imx8qm_asrc_mux_clk_sels[] = {
@@ -176,11 +181,11 @@ static const struct clk_parent_data imx8qxp_mclk_out_sels[] = {
{ .fw_name = "sai4_rx_bclk" },
};
-static const struct clk_parent_data imx8qxp_mclk_sels[] = {
+static struct clk_parent_data imx8qxp_mclk_sels[] = {
{ .fw_name = "aud_pll_div_clk0_lpcg_clk" },
{ .fw_name = "aud_pll_div_clk1_lpcg_clk" },
- { .fw_name = "acm_aud_clk0_sel" },
- { .fw_name = "acm_aud_clk1_sel" },
+ { }, /* clk_hw pointer of "acm_aud_clk0_sel" */
+ { }, /* clk_hw pointer of "acm_aud_clk1_sel" */
};
static struct clk_imx8_acm_sel imx8qxp_sels[] = {
@@ -228,11 +233,11 @@ static const struct clk_parent_data imx8dxl_mclk_out_sels[] = {
{ .index = -1 },
};
-static const struct clk_parent_data imx8dxl_mclk_sels[] = {
+static struct clk_parent_data imx8dxl_mclk_sels[] = {
{ .fw_name = "aud_pll_div_clk0_lpcg_clk" },
{ .fw_name = "aud_pll_div_clk1_lpcg_clk" },
- { .fw_name = "acm_aud_clk0_sel" },
- { .fw_name = "acm_aud_clk1_sel" },
+ { }, /* clk_hw pointer of "acm_aud_clk0_sel" */
+ { }, /* clk_hw pointer of "acm_aud_clk1_sel" */
};
static struct clk_imx8_acm_sel imx8dxl_sels[] = {
@@ -375,6 +380,18 @@ static int imx8_acm_clk_probe(struct platform_device *pdev)
imx_check_clk_hws(hws, IMX_ADMA_ACM_CLK_END);
goto err_clk_register;
}
+
+ /*
+ * The IMX_ADMA_ACM_AUD_CLK0_SEL and IMX_ADMA_ACM_AUD_CLK1_SEL are
+ * registered first. After registration, update the clk_hw pointer
+ * to imx8qm/qxp/dxl_mclk_sels structures.
+ */
+ if (sels[i].clkid == IMX_ADMA_ACM_AUD_CLK0_SEL)
+ priv->soc_data->mclk_sels[ACM_AUD_CLK0_SEL_INDEX].hw =
+ hws[IMX_ADMA_ACM_AUD_CLK0_SEL];
+ if (sels[i].clkid == IMX_ADMA_ACM_AUD_CLK1_SEL)
+ priv->soc_data->mclk_sels[ACM_AUD_CLK1_SEL_INDEX].hw =
+ hws[IMX_ADMA_ACM_AUD_CLK1_SEL];
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data);
@@ -406,16 +423,19 @@ static void imx8_acm_clk_remove(struct platform_device *pdev)
static const struct imx8_acm_soc_data imx8qm_acm_data = {
.sels = imx8qm_sels,
.num_sels = ARRAY_SIZE(imx8qm_sels),
+ .mclk_sels = imx8qm_mclk_sels,
};
static const struct imx8_acm_soc_data imx8qxp_acm_data = {
.sels = imx8qxp_sels,
.num_sels = ARRAY_SIZE(imx8qxp_sels),
+ .mclk_sels = imx8qxp_mclk_sels,
};
static const struct imx8_acm_soc_data imx8dxl_acm_data = {
.sels = imx8dxl_sels,
.num_sels = ARRAY_SIZE(imx8dxl_sels),
+ .mclk_sels = imx8dxl_mclk_sels,
};
static const struct of_device_id imx8_acm_match[] = {
@@ -468,7 +488,7 @@ static struct platform_driver imx8_acm_clk_driver = {
.pm = &imx8_acm_pm_ops,
},
.probe = imx8_acm_clk_probe,
- .remove_new = imx8_acm_clk_remove,
+ .remove = imx8_acm_clk_remove,
};
module_platform_driver(imx8_acm_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 075f643e3f35..342049b847b9 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -432,7 +432,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
/* BUS */
hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_bus_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
- hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
+ hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 4bd1ed11353b..ab77e148e70c 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -583,6 +583,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
hws[IMX8MN_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
hws[IMX8MN_CLK_SAI7_ROOT] = imx_clk_hw_gate2_shared2("sai7_root_clk", "sai7", base + 0x4650, 0, &share_count_sai7);
+ hws[IMX8MN_CLK_SAI7_IPG] = imx_clk_hw_gate2_shared2("sai7_ipg_clk", "ipg_audio_root", base + 0x4650, 0, &share_count_sai7);
hws[IMX8MN_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc_24m", 1, 8);
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
index b381d6f784c8..b2cb157703c5 100644
--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -5,6 +5,7 @@
* Copyright (C) 2022 Marek Vasut <marex@denx.de>
*/
+#include <linux/auxiliary_bus.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -13,6 +14,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/imx8mp-clock.h>
@@ -154,6 +156,15 @@ static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = {
PDM_SEL, 2, 0 \
}
+#define CLK_GATE_PARENT(gname, cname, pname) \
+ { \
+ gname"_cg", \
+ IMX8MP_CLK_AUDIOMIX_##cname, \
+ { .fw_name = pname, .name = pname }, NULL, 1, \
+ CLKEN0 + 4 * !!(IMX8MP_CLK_AUDIOMIX_##cname / 32), \
+ 1, IMX8MP_CLK_AUDIOMIX_##cname % 32 \
+ }
+
struct clk_imx8mp_audiomix_sel {
const char *name;
int clkid;
@@ -171,14 +182,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = {
CLK_GATE("earc", EARC_IPG),
CLK_GATE("ocrama", OCRAMA_IPG),
CLK_GATE("aud2htx", AUD2HTX_IPG),
- CLK_GATE("earc_phy", EARC_PHY),
+ CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"),
CLK_GATE("sdma2", SDMA2_ROOT),
CLK_GATE("sdma3", SDMA3_ROOT),
CLK_GATE("spba2", SPBA2_ROOT),
CLK_GATE("dsp", DSP_ROOT),
CLK_GATE("dspdbg", DSPDBG_ROOT),
CLK_GATE("edma", EDMA_ROOT),
- CLK_GATE("audpll", AUDPLL_ROOT),
+ CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"),
CLK_GATE("mu2", MU2_ROOT),
CLK_GATE("mu3", MU3_ROOT),
CLK_PDM,
@@ -217,6 +228,63 @@ struct clk_imx8mp_audiomix_priv {
struct clk_hw_onecell_data clk_data;
};
+#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
+
+static void clk_imx8mp_audiomix_reset_unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+static void clk_imx8mp_audiomix_reset_adev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ kfree(adev);
+}
+
+static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
+ struct clk_imx8mp_audiomix_priv *priv)
+{
+ struct auxiliary_device *adev __free(kfree) = NULL;
+ int ret;
+
+ if (!of_property_present(dev->of_node, "#reset-cells"))
+ return 0;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->name = "reset";
+ adev->dev.parent = dev;
+ adev->dev.release = clk_imx8mp_audiomix_reset_adev_release;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ return ret;
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(dev, clk_imx8mp_audiomix_reset_unregister_adev,
+ no_free_ptr(adev));
+}
+
+#else /* !CONFIG_RESET_CONTROLLER */
+
+static int clk_imx8mp_audiomix_reset_controller_register(struct clk_imx8mp_audiomix_priv *priv)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_RESET_CONTROLLER */
+
static void clk_imx8mp_audiomix_save_restore(struct device *dev, bool save)
{
struct clk_imx8mp_audiomix_priv *priv = dev_get_drvdata(dev);
@@ -269,12 +337,12 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(sels); i++) {
if (sels[i].num_parents == 1) {
hw = devm_clk_hw_register_gate_parent_data(dev,
- sels[i].name, &sels[i].parent, 0,
+ sels[i].name, &sels[i].parent, CLK_SET_RATE_PARENT,
base + sels[i].reg, sels[i].shift, 0, NULL);
} else {
hw = devm_clk_hw_register_mux_parent_data_table(dev,
sels[i].name, sels[i].parents,
- sels[i].num_parents, 0,
+ sels[i].num_parents, CLK_SET_RATE_PARENT,
base + sels[i].reg,
sels[i].shift, sels[i].width,
0, NULL, NULL);
@@ -317,7 +385,8 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS] = hw;
hw = devm_clk_hw_register_gate(dev, "sai_pll_out", "sai_pll_bypass",
- 0, base + SAI_PLL_GNRL_CTL, 13,
+ CLK_SET_RATE_PARENT,
+ base + SAI_PLL_GNRL_CTL, 13,
0, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
@@ -326,7 +395,8 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT] = hw;
hw = devm_clk_hw_register_fixed_factor(dev, "sai_pll_out_div2",
- "sai_pll_out", 0, 1, 2);
+ "sai_pll_out",
+ CLK_SET_RATE_PARENT, 1, 2);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_clk_register;
@@ -337,6 +407,10 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
if (ret)
goto err_clk_register;
+ ret = clk_imx8mp_audiomix_reset_controller_register(dev, priv);
+ if (ret)
+ goto err_clk_register;
+
pm_runtime_put_sync(dev);
return 0;
@@ -380,7 +454,7 @@ MODULE_DEVICE_TABLE(of, clk_imx8mp_audiomix_of_match);
static struct platform_driver clk_imx8mp_audiomix_driver = {
.probe = clk_imx8mp_audiomix_probe,
- .remove_new = clk_imx8mp_audiomix_remove,
+ .remove = clk_imx8mp_audiomix_remove,
.driver = {
.name = "imx8mp-audio-blk-ctrl",
.of_match_table = clk_imx8mp_audiomix_of_match,
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 670aa2bab301..516dbd170c8a 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -547,12 +547,12 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
- hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite_bus("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300);
+ hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite_bus_flags("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300, CLK_SET_RATE_PARENT);
hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
- hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
- hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
+ hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
+ hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100);
hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180);
hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200);
@@ -609,7 +609,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80);
hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00);
hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF] = imx8m_clk_hw_composite("media_mipi_phy1_ref", imx8mp_media_mipi_phy1_ref_sels, ccm_base + 0xbd80);
- hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp_pix_sels, ccm_base + 0xbe00);
+ hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite_bus_flags("media_disp1_pix", imx8mp_media_disp_pix_sels, ccm_base + 0xbe00, CLK_SET_RATE_PARENT);
hws[IMX8MP_CLK_MEDIA_CAM2_PIX] = imx8m_clk_hw_composite("media_cam2_pix", imx8mp_media_cam2_pix_sels, ccm_base + 0xbe80);
hws[IMX8MP_CLK_MEDIA_LDB] = imx8m_clk_hw_composite("media_ldb", imx8mp_media_ldb_sels, ccm_base + 0xbf00);
hws[IMX8MP_CLK_MEMREPAIR] = imx8m_clk_hw_composite_critical("mem_repair", imx8mp_memrepair_sels, ccm_base + 0xbf80);
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
index 7d8883916cac..3ae162625bb1 100644
--- a/drivers/clk/imx/clk-imx8qxp.c
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -71,7 +71,7 @@ static const char *const lvds0_sels[] = {
"clk_dummy",
"clk_dummy",
"clk_dummy",
- "mipi0_lvds_bypass_clk",
+ "lvds0_bypass_clk",
};
static const char *const lvds1_sels[] = {
@@ -79,7 +79,7 @@ static const char *const lvds1_sels[] = {
"clk_dummy",
"clk_dummy",
"clk_dummy",
- "mipi1_lvds_bypass_clk",
+ "lvds1_bypass_clk",
};
static const char * const mipi_sels[] = {
@@ -90,6 +90,22 @@ static const char * const mipi_sels[] = {
"clk_dummy",
};
+static const char * const mipi0_phy_sels[] = {
+ "clk_dummy",
+ "clk_dummy",
+ "mipi_pll_div2_clk",
+ "clk_dummy",
+ "mipi0_bypass_clk",
+};
+
+static const char * const mipi1_phy_sels[] = {
+ "clk_dummy",
+ "clk_dummy",
+ "mipi_pll_div2_clk",
+ "clk_dummy",
+ "mipi1_bypass_clk",
+};
+
static const char * const lcd_sels[] = {
"clk_dummy",
"clk_dummy",
@@ -170,8 +186,8 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
- imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
/* Audio SS */
imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL);
@@ -206,42 +222,41 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC);
/* Display controller SS */
- imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
- imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc0_pll0_clk", IMX_SC_R_DC_0_PLL_0, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc0_pll1_clk", IMX_SC_R_DC_0_PLL_1, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc0_bypass0_clk", IMX_SC_R_DC_0_VIDEO0, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
+ imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc0_bypass1_clk", IMX_SC_R_DC_0_VIDEO1, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0);
- imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc1_pll0_clk", IMX_SC_R_DC_1_PLL_0, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc1_pll1_clk", IMX_SC_R_DC_1_PLL_1, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc1_bypass0_clk", IMX_SC_R_DC_1_VIDEO0, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0);
+ imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc1_bypass1_clk", IMX_SC_R_DC_1_VIDEO1, IMX_SC_PM_CLK_BYPASS);
/* MIPI-LVDS SS */
imx_clk_scu("mipi0_bypass_clk", IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu("mipi0_pixel_clk", IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PER);
- imx_clk_scu("mipi0_lvds_bypass_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu2("mipi0_lvds_pixel_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2);
- imx_clk_scu2("mipi0_lvds_phy_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3);
+ imx_clk_scu2("mipi0_pixel_clk", mipi0_phy_sels, ARRAY_SIZE(mipi0_phy_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PER);
+ imx_clk_scu("lvds0_bypass_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("lvds0_pixel_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2);
+ imx_clk_scu2("lvds0_phy_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3);
imx_clk_scu2("mipi0_dsi_tx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_MST_BUS);
imx_clk_scu2("mipi0_dsi_rx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_SLV_BUS);
- imx_clk_scu2("mipi0_dsi_phy_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PHY);
+ imx_clk_scu2("mipi0_dsi_phy_clk", mipi0_phy_sels, ARRAY_SIZE(mipi0_phy_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PHY);
imx_clk_scu("mipi0_i2c0_clk", IMX_SC_R_MIPI_0_I2C_0, IMX_SC_PM_CLK_MISC2);
imx_clk_scu("mipi0_i2c1_clk", IMX_SC_R_MIPI_0_I2C_1, IMX_SC_PM_CLK_MISC2);
imx_clk_scu("mipi0_pwm0_clk", IMX_SC_R_MIPI_0_PWM_0, IMX_SC_PM_CLK_PER);
imx_clk_scu("mipi1_bypass_clk", IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu("mipi1_pixel_clk", IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PER);
- imx_clk_scu("mipi1_lvds_bypass_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu2("mipi1_lvds_pixel_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2);
- imx_clk_scu2("mipi1_lvds_phy_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3);
-
+ imx_clk_scu2("mipi1_pixel_clk", mipi1_phy_sels, ARRAY_SIZE(mipi1_phy_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PER);
+ imx_clk_scu("lvds1_bypass_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("lvds1_pixel_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2);
+ imx_clk_scu2("lvds1_phy_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3);
imx_clk_scu2("mipi1_dsi_tx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_MST_BUS);
imx_clk_scu2("mipi1_dsi_rx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_SLV_BUS);
- imx_clk_scu2("mipi1_dsi_phy_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PHY);
+ imx_clk_scu2("mipi1_dsi_phy_clk", mipi1_phy_sels, ARRAY_SIZE(mipi1_phy_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PHY);
imx_clk_scu("mipi1_i2c0_clk", IMX_SC_R_MIPI_1_I2C_0, IMX_SC_PM_CLK_MISC2);
imx_clk_scu("mipi1_i2c1_clk", IMX_SC_R_MIPI_1_I2C_1, IMX_SC_PM_CLK_MISC2);
imx_clk_scu("mipi1_pwm0_clk", IMX_SC_R_MIPI_1_PWM_0, IMX_SC_PM_CLK_PER);
diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c
index 74f595f9e5e3..19a62da74be4 100644
--- a/drivers/clk/imx/clk-imx95-blk-ctl.c
+++ b/drivers/clk/imx/clk-imx95-blk-ctl.c
@@ -248,6 +248,35 @@ static const struct imx95_blk_ctl_dev_data dispmix_csr_dev_data = {
.clk_reg_offset = 0,
};
+static const struct imx95_blk_ctl_clk_dev_data netxmix_clk_dev_data[] = {
+ [IMX95_CLK_NETCMIX_ENETC0_RMII] = {
+ .name = "enetc0_rmii_sel",
+ .parent_names = (const char *[]){"ext_enetref", "enetref"},
+ .num_parents = 2,
+ .reg = 4,
+ .bit_idx = 5,
+ .bit_width = 1,
+ .type = CLK_MUX,
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ },
+ [IMX95_CLK_NETCMIX_ENETC1_RMII] = {
+ .name = "enetc1_rmii_sel",
+ .parent_names = (const char *[]){"ext_enetref", "enetref"},
+ .num_parents = 2,
+ .reg = 4,
+ .bit_idx = 10,
+ .bit_width = 1,
+ .type = CLK_MUX,
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct imx95_blk_ctl_dev_data netcmix_dev_data = {
+ .num_clks = ARRAY_SIZE(netxmix_clk_dev_data),
+ .clk_dev_data = netxmix_clk_dev_data,
+ .clk_reg_offset = 0,
+};
+
static int imx95_bc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -419,6 +448,7 @@ static const struct of_device_id imx95_bc_of_match[] = {
{ .compatible = "nxp,imx95-lvds-csr", .data = &lvds_csr_dev_data },
{ .compatible = "nxp,imx95-display-csr", .data = &dispmix_csr_dev_data },
{ .compatible = "nxp,imx95-vpu-csr", .data = &vpublk_dev_data },
+ { .compatible = "nxp,imx95-netcmix-blk-ctrl", .data = &netcmix_dev_data},
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, imx95_bc_of_match);
diff --git a/drivers/clk/imx/clk-imxrt1050.c b/drivers/clk/imx/clk-imxrt1050.c
index 08d155feb035..efd1ac9d8eeb 100644
--- a/drivers/clk/imx/clk-imxrt1050.c
+++ b/drivers/clk/imx/clk-imxrt1050.c
@@ -176,6 +176,7 @@ static struct platform_driver imxrt1050_clk_driver = {
};
module_platform_driver(imxrt1050_clk_driver);
+MODULE_DESCRIPTION("NXP i.MX RT1050 clock driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jesse Taube <Mr.Bossman075@gmail.com>");
MODULE_AUTHOR("Giulio Benetti <giulio.benetti@benettiengineering.com>");
diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
index e35496af5ceb..df83bd939492 100644
--- a/drivers/clk/imx/clk.c
+++ b/drivers/clk/imx/clk.c
@@ -226,4 +226,5 @@ static int __init imx_clk_disable_uart(void)
late_initcall_sync(imx_clk_disable_uart);
#endif
+MODULE_DESCRIPTION("Common clock support for NXP i.MX SoC family");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index adb7ad649a0d..aa5202f284f3 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -442,6 +442,10 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
_imx8m_clk_hw_composite(name, parent_names, reg, \
IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_DEFAULT)
+#define imx8m_clk_hw_composite_bus_flags(name, parent_names, reg, flags) \
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_DEFAULT | flags)
+
#define imx8m_clk_hw_composite_bus_critical(name, parent_names, reg) \
_imx8m_clk_hw_composite(name, parent_names, reg, \
IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_CRITICAL)
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index 5cefc30a843e..c5894fc9395e 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -707,7 +707,7 @@ static void ti_sci_clk_remove(struct platform_device *pdev)
static struct platform_driver ti_sci_clk_driver = {
.probe = ti_sci_clk_probe,
- .remove_new = ti_sci_clk_remove,
+ .remove = ti_sci_clk_remove,
.driver = {
.name = "ti-sci-clk",
.of_match_table = of_match_ptr(ti_sci_clk_of_match),
diff --git a/drivers/clk/kunit_clk_fixed_rate_test.dtso b/drivers/clk/kunit_clk_fixed_rate_test.dtso
new file mode 100644
index 000000000000..d838ce766fa2
--- /dev/null
+++ b/drivers/clk/kunit_clk_fixed_rate_test.dtso
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "clk-fixed-rate_test.h"
+
+&{/} {
+ fixed_50MHz: kunit-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <TEST_FIXED_FREQUENCY>;
+ clock-accuracy = <TEST_FIXED_ACCURACY>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,single-clk-consumer";
+ clocks = <&fixed_50MHz>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_parent_data_test.dtso b/drivers/clk/kunit_clk_parent_data_test.dtso
new file mode 100644
index 000000000000..7d3ed9a5a2e8
--- /dev/null
+++ b/drivers/clk/kunit_clk_parent_data_test.dtso
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "clk_parent_data_test.h"
+
+&{/} {
+ fixed_50: kunit-clock-50MHz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ clock-output-names = CLK_PARENT_DATA_50MHZ_NAME;
+ };
+
+ fixed_parent: kunit-clock-1MHz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000>;
+ clock-output-names = CLK_PARENT_DATA_1MHZ_NAME;
+ };
+
+ kunit-clock-controller {
+ compatible = "test,clk-parent-data";
+ clocks = <&fixed_parent>, <&fixed_50>;
+ clock-names = CLK_PARENT_DATA_PARENT1, CLK_PARENT_DATA_PARENT2;
+ #clock-cells = <1>;
+ };
+};
diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
index 15859132c769..425c69cfb105 100644
--- a/drivers/clk/mediatek/clk-mt2701-aud.c
+++ b/drivers/clk/mediatek/clk-mt2701-aud.c
@@ -158,7 +158,7 @@ static void clk_mt2701_aud_remove(struct platform_device *pdev)
static struct platform_driver clk_mt2701_aud_drv = {
.probe = clk_mt2701_aud_probe,
- .remove_new = clk_mt2701_aud_remove,
+ .remove = clk_mt2701_aud_remove,
.driver = {
.name = "clk-mt2701-aud",
.of_match_table = of_match_clk_mt2701_aud,
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
index e203dca70786..5da3eabffd3e 100644
--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_bdp);
static struct platform_driver clk_mt2701_bdp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-bdp",
.of_match_table = of_match_clk_mt2701_bdp,
diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
index f6e1fdc9ee0a..608252e73f24 100644
--- a/drivers/clk/mediatek/clk-mt2701-eth.c
+++ b/drivers/clk/mediatek/clk-mt2701-eth.c
@@ -53,7 +53,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_eth);
static struct platform_driver clk_mt2701_eth_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-eth",
.of_match_table = of_match_clk_mt2701_eth,
diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
index 5e04975433ea..b3e18b6db75d 100644
--- a/drivers/clk/mediatek/clk-mt2701-g3d.c
+++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
@@ -50,7 +50,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_g3d);
static struct platform_driver clk_mt2701_g3d_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-g3d",
.of_match_table = of_match_clk_mt2701_g3d,
diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
index c7b38d066403..000e00576052 100644
--- a/drivers/clk/mediatek/clk-mt2701-hif.c
+++ b/drivers/clk/mediatek/clk-mt2701-hif.c
@@ -50,7 +50,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_hif);
static struct platform_driver clk_mt2701_hif_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-hif",
.of_match_table = of_match_clk_mt2701_hif,
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
index ce13b79a7994..875594bc9dcb 100644
--- a/drivers/clk/mediatek/clk-mt2701-img.c
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_img);
static struct platform_driver clk_mt2701_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-img",
.of_match_table = of_match_clk_mt2701_img,
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index 903592be56b5..bc68fa718878 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -80,7 +80,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt2701_mm_id_table);
static struct platform_driver clk_mt2701_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt2701-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
index 591091fb2151..94db86f8d0a4 100644
--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -52,7 +52,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_vdec);
static struct platform_driver clk_mt2701_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-vdec",
.of_match_table = of_match_clk_mt2701_vdec,
diff --git a/drivers/clk/mediatek/clk-mt2712-apmixedsys.c b/drivers/clk/mediatek/clk-mt2712-apmixedsys.c
index 66987d205eee..a60622d251ff 100644
--- a/drivers/clk/mediatek/clk-mt2712-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt2712-apmixedsys.c
@@ -156,7 +156,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_apmixed);
static struct platform_driver clk_mt2712_apmixed_drv = {
.probe = clk_mt2712_apmixed_probe,
- .remove_new = clk_mt2712_apmixed_remove,
+ .remove = clk_mt2712_apmixed_remove,
.driver = {
.name = "clk-mt2712-apmixed",
.of_match_table = of_match_clk_mt2712_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c
index 93c5453e4392..c838311a0c51 100644
--- a/drivers/clk/mediatek/clk-mt2712-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2712-bdp.c
@@ -69,7 +69,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_bdp);
static struct platform_driver clk_mt2712_bdp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-bdp",
.of_match_table = of_match_clk_mt2712_bdp,
diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c
index 84abd0515fd2..bedebf86b0b5 100644
--- a/drivers/clk/mediatek/clk-mt2712-img.c
+++ b/drivers/clk/mediatek/clk-mt2712-img.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_img);
static struct platform_driver clk_mt2712_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-img",
.of_match_table = of_match_clk_mt2712_img,
diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
index 89be9082adba..1a73474b2f99 100644
--- a/drivers/clk/mediatek/clk-mt2712-jpgdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_jpgdec);
static struct platform_driver clk_mt2712_jpgdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-jpgdec",
.of_match_table = of_match_clk_mt2712_jpgdec,
diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c
index f7e0d0ebf665..c1bb45c7469e 100644
--- a/drivers/clk/mediatek/clk-mt2712-mfg.c
+++ b/drivers/clk/mediatek/clk-mt2712-mfg.c
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_mfg);
static struct platform_driver clk_mt2712_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-mfg",
.of_match_table = of_match_clk_mt2712_mfg,
diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
index 248529d3134d..32ecb949f7eb 100644
--- a/drivers/clk/mediatek/clk-mt2712-mm.c
+++ b/drivers/clk/mediatek/clk-mt2712-mm.c
@@ -121,7 +121,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt2712_mm_id_table);
static struct platform_driver clk_mt2712_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt2712-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c
index a063f1f0aa52..a766342fbafa 100644
--- a/drivers/clk/mediatek/clk-mt2712-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-vdec.c
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_vdec);
static struct platform_driver clk_mt2712_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-vdec",
.of_match_table = of_match_clk_mt2712_vdec,
diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c
index 5b15df0a26f5..fc193dc8e8f6 100644
--- a/drivers/clk/mediatek/clk-mt2712-venc.c
+++ b/drivers/clk/mediatek/clk-mt2712-venc.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_venc);
static struct platform_driver clk_mt2712_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-venc",
.of_match_table = of_match_clk_mt2712_venc,
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 91af45160aa4..964c92130e3c 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -993,7 +993,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712);
static struct platform_driver clk_mt2712_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712",
.of_match_table = of_match_clk_mt2712,
diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c
index 3e481c697eff..2be1458087e6 100644
--- a/drivers/clk/mediatek/clk-mt6765-audio.c
+++ b/drivers/clk/mediatek/clk-mt6765-audio.c
@@ -69,7 +69,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_audio);
static struct platform_driver clk_mt6765_audio_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-audio",
.of_match_table = of_match_clk_mt6765_audio,
diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c
index fed9c789d9fa..2a7f30dc85bb 100644
--- a/drivers/clk/mediatek/clk-mt6765-cam.c
+++ b/drivers/clk/mediatek/clk-mt6765-cam.c
@@ -50,7 +50,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_cam);
static struct platform_driver clk_mt6765_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-cam",
.of_match_table = of_match_clk_mt6765_cam,
diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c
index 34bb89ffd2dd..ff857852cfb0 100644
--- a/drivers/clk/mediatek/clk-mt6765-img.c
+++ b/drivers/clk/mediatek/clk-mt6765-img.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_img);
static struct platform_driver clk_mt6765_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-img",
.of_match_table = of_match_clk_mt6765_img,
diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
index 957eb494fee5..8261dfd12a9a 100644
--- a/drivers/clk/mediatek/clk-mt6765-mipi0a.c
+++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_mipi0a);
static struct platform_driver clk_mt6765_mipi0a_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-mipi0a",
.of_match_table = of_match_clk_mt6765_mipi0a,
diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c
index 099540fcfc76..e525919f9e81 100644
--- a/drivers/clk/mediatek/clk-mt6765-mm.c
+++ b/drivers/clk/mediatek/clk-mt6765-mm.c
@@ -72,7 +72,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_mm);
static struct platform_driver clk_mt6765_mm_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-mm",
.of_match_table = of_match_clk_mt6765_mm,
diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c
index 64f3451d0aee..f309d1090cda 100644
--- a/drivers/clk/mediatek/clk-mt6765-vcodec.c
+++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c
@@ -45,7 +45,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_vcodec);
static struct platform_driver clk_mt6765_vcodec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-vcodec",
.of_match_table = of_match_clk_mt6765_vcodec,
diff --git a/drivers/clk/mediatek/clk-mt6779-aud.c b/drivers/clk/mediatek/clk-mt6779-aud.c
index 3d23b8e29af6..8ed318bd7765 100644
--- a/drivers/clk/mediatek/clk-mt6779-aud.c
+++ b/drivers/clk/mediatek/clk-mt6779-aud.c
@@ -104,7 +104,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_aud);
static struct platform_driver clk_mt6779_aud_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-aud",
.of_match_table = of_match_clk_mt6779_aud,
diff --git a/drivers/clk/mediatek/clk-mt6779-cam.c b/drivers/clk/mediatek/clk-mt6779-cam.c
index e76b2c4f548e..f397b55606de 100644
--- a/drivers/clk/mediatek/clk-mt6779-cam.c
+++ b/drivers/clk/mediatek/clk-mt6779-cam.c
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_cam);
static struct platform_driver clk_mt6779_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-cam",
.of_match_table = of_match_clk_mt6779_cam,
diff --git a/drivers/clk/mediatek/clk-mt6779-img.c b/drivers/clk/mediatek/clk-mt6779-img.c
index 0c5971f3966a..474a59a4ca9e 100644
--- a/drivers/clk/mediatek/clk-mt6779-img.c
+++ b/drivers/clk/mediatek/clk-mt6779-img.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_img);
static struct platform_driver clk_mt6779_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-img",
.of_match_table = of_match_clk_mt6779_img,
diff --git a/drivers/clk/mediatek/clk-mt6779-ipe.c b/drivers/clk/mediatek/clk-mt6779-ipe.c
index 9c1a9f1b0f3e..c2314654f43a 100644
--- a/drivers/clk/mediatek/clk-mt6779-ipe.c
+++ b/drivers/clk/mediatek/clk-mt6779-ipe.c
@@ -49,7 +49,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_ipe);
static struct platform_driver clk_mt6779_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-ipe",
.of_match_table = of_match_clk_mt6779_ipe,
diff --git a/drivers/clk/mediatek/clk-mt6779-mfg.c b/drivers/clk/mediatek/clk-mt6779-mfg.c
index 3cc82b59117f..21793cb6e6e3 100644
--- a/drivers/clk/mediatek/clk-mt6779-mfg.c
+++ b/drivers/clk/mediatek/clk-mt6779-mfg.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_mfg);
static struct platform_driver clk_mt6779_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-mfg",
.of_match_table = of_match_clk_mt6779_mfg,
diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c
index 97d437a6f98f..30bbab308388 100644
--- a/drivers/clk/mediatek/clk-mt6779-mm.c
+++ b/drivers/clk/mediatek/clk-mt6779-mm.c
@@ -98,7 +98,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt6779_mm_id_table);
static struct platform_driver clk_mt6779_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt6779-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt6779-vdec.c b/drivers/clk/mediatek/clk-mt6779-vdec.c
index a9122e627aa5..458d012f023c 100644
--- a/drivers/clk/mediatek/clk-mt6779-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6779-vdec.c
@@ -56,7 +56,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_vdec);
static struct platform_driver clk_mt6779_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-vdec",
.of_match_table = of_match_clk_mt6779_vdec,
diff --git a/drivers/clk/mediatek/clk-mt6779-venc.c b/drivers/clk/mediatek/clk-mt6779-venc.c
index 2cd032648eb1..70cebc274031 100644
--- a/drivers/clk/mediatek/clk-mt6779-venc.c
+++ b/drivers/clk/mediatek/clk-mt6779-venc.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_venc);
static struct platform_driver clk_mt6779_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-venc",
.of_match_table = of_match_clk_mt6779_venc,
diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
index 819253b97a02..86732f5acf93 100644
--- a/drivers/clk/mediatek/clk-mt6779.c
+++ b/drivers/clk/mediatek/clk-mt6779.c
@@ -1305,7 +1305,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779);
static struct platform_driver clk_mt6779_infra_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-infra",
.of_match_table = of_match_clk_mt6779_infra,
diff --git a/drivers/clk/mediatek/clk-mt6795-apmixedsys.c b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
index 8c65974ed9b8..91665d7f125e 100644
--- a/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
@@ -201,7 +201,7 @@ static void clk_mt6795_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt6795_apmixed_drv = {
.probe = clk_mt6795_apmixed_probe,
- .remove_new = clk_mt6795_apmixed_remove,
+ .remove = clk_mt6795_apmixed_remove,
.driver = {
.name = "clk-mt6795-apmixed",
.of_match_table = of_match_clk_mt6795_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt6795-infracfg.c b/drivers/clk/mediatek/clk-mt6795-infracfg.c
index 06d7fdf3098b..e4559569f5b0 100644
--- a/drivers/clk/mediatek/clk-mt6795-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-infracfg.c
@@ -144,7 +144,7 @@ static struct platform_driver clk_mt6795_infracfg_drv = {
.of_match_table = of_match_clk_mt6795_infracfg,
},
.probe = clk_mt6795_infracfg_probe,
- .remove_new = clk_mt6795_infracfg_remove,
+ .remove = clk_mt6795_infracfg_remove,
};
module_platform_driver(clk_mt6795_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-mfg.c b/drivers/clk/mediatek/clk-mt6795-mfg.c
index dff6a6ded837..1d658bb19e82 100644
--- a/drivers/clk/mediatek/clk-mt6795-mfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-mfg.c
@@ -43,7 +43,7 @@ static struct platform_driver clk_mt6795_mfg_drv = {
.of_match_table = of_match_clk_mt6795_mfg,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt6795_mfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-mm.c b/drivers/clk/mediatek/clk-mt6795-mm.c
index dd1708d689dc..733d0e2021fc 100644
--- a/drivers/clk/mediatek/clk-mt6795-mm.c
+++ b/drivers/clk/mediatek/clk-mt6795-mm.c
@@ -93,7 +93,7 @@ static struct platform_driver clk_mt6795_mm_drv = {
},
.id_table = clk_mt6795_mm_id_table,
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
};
module_platform_driver(clk_mt6795_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-pericfg.c b/drivers/clk/mediatek/clk-mt6795-pericfg.c
index 3f6bea418a5a..d48240eb2a67 100644
--- a/drivers/clk/mediatek/clk-mt6795-pericfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-pericfg.c
@@ -153,7 +153,7 @@ static struct platform_driver clk_mt6795_pericfg_drv = {
.of_match_table = of_match_clk_mt6795_pericfg,
},
.probe = clk_mt6795_pericfg_probe,
- .remove_new = clk_mt6795_pericfg_remove,
+ .remove = clk_mt6795_pericfg_remove,
};
module_platform_driver(clk_mt6795_pericfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-topckgen.c b/drivers/clk/mediatek/clk-mt6795-topckgen.c
index be595853a925..9c6d63a80b19 100644
--- a/drivers/clk/mediatek/clk-mt6795-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt6795-topckgen.c
@@ -547,7 +547,7 @@ static struct platform_driver clk_mt6795_topckgen_drv = {
.of_match_table = of_match_clk_mt6795_topckgen,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt6795_topckgen_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-vdecsys.c b/drivers/clk/mediatek/clk-mt6795-vdecsys.c
index 9e91d6f7f5bf..f2968f859dca 100644
--- a/drivers/clk/mediatek/clk-mt6795-vdecsys.c
+++ b/drivers/clk/mediatek/clk-mt6795-vdecsys.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6795_vdecsys);
static struct platform_driver clk_mt6795_vdecsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6795-vdecsys",
.of_match_table = of_match_clk_mt6795_vdecsys,
diff --git a/drivers/clk/mediatek/clk-mt6795-vencsys.c b/drivers/clk/mediatek/clk-mt6795-vencsys.c
index bd81e80b744f..2f8d48da1a85 100644
--- a/drivers/clk/mediatek/clk-mt6795-vencsys.c
+++ b/drivers/clk/mediatek/clk-mt6795-vencsys.c
@@ -43,7 +43,7 @@ static struct platform_driver clk_mt6795_vencsys_drv = {
.of_match_table = of_match_clk_mt6795_vencsys,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt6795_vencsys_drv);
diff --git a/drivers/clk/mediatek/clk-mt6797-img.c b/drivers/clk/mediatek/clk-mt6797-img.c
index 0ec0cf2154dc..338c69234f24 100644
--- a/drivers/clk/mediatek/clk-mt6797-img.c
+++ b/drivers/clk/mediatek/clk-mt6797-img.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_img);
static struct platform_driver clk_mt6797_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-img",
.of_match_table = of_match_clk_mt6797_img,
diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c
index f5701e965792..ddb40b8a1a7d 100644
--- a/drivers/clk/mediatek/clk-mt6797-mm.c
+++ b/drivers/clk/mediatek/clk-mt6797-mm.c
@@ -93,7 +93,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt6797_mm_id_table);
static struct platform_driver clk_mt6797_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt6797-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt6797-vdec.c b/drivers/clk/mediatek/clk-mt6797-vdec.c
index c967d5e25c7d..d832f48123f5 100644
--- a/drivers/clk/mediatek/clk-mt6797-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6797-vdec.c
@@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_vdec);
static struct platform_driver clk_mt6797_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-vdec",
.of_match_table = of_match_clk_mt6797_vdec,
diff --git a/drivers/clk/mediatek/clk-mt6797-venc.c b/drivers/clk/mediatek/clk-mt6797-venc.c
index f6fac5db65b0..fd4446f4a9d7 100644
--- a/drivers/clk/mediatek/clk-mt6797-venc.c
+++ b/drivers/clk/mediatek/clk-mt6797-venc.c
@@ -45,7 +45,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_venc);
static struct platform_driver clk_mt6797_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-venc",
.of_match_table = of_match_clk_mt6797_venc,
diff --git a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
index 1b8f859b6b6c..2350592d9a93 100644
--- a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
@@ -137,7 +137,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_apmixed);
static struct platform_driver clk_mt7622_apmixed_drv = {
.probe = clk_mt7622_apmixed_probe,
- .remove_new = clk_mt7622_apmixed_remove,
+ .remove = clk_mt7622_apmixed_remove,
.driver = {
.name = "clk-mt7622-apmixed",
.of_match_table = of_match_clk_mt7622_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
index b7bf626e4d14..931a0598e598 100644
--- a/drivers/clk/mediatek/clk-mt7622-aud.c
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -149,7 +149,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_aud);
static struct platform_driver clk_mt7622_aud_drv = {
.probe = clk_mt7622_aud_probe,
- .remove_new = clk_mt7622_aud_remove,
+ .remove = clk_mt7622_aud_remove,
.driver = {
.name = "clk-mt7622-aud",
.of_match_table = of_match_clk_mt7622_aud,
diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
index fa4876317a8d..1c1033a92c46 100644
--- a/drivers/clk/mediatek/clk-mt7622-eth.c
+++ b/drivers/clk/mediatek/clk-mt7622-eth.c
@@ -79,7 +79,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_eth);
static struct platform_driver clk_mt7622_eth_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7622-eth",
.of_match_table = of_match_clk_mt7622_eth,
diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
index 8e57582454c2..5bcfe12c4fd0 100644
--- a/drivers/clk/mediatek/clk-mt7622-hif.c
+++ b/drivers/clk/mediatek/clk-mt7622-hif.c
@@ -91,7 +91,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_hif);
static struct platform_driver clk_mt7622_hif_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7622-hif",
.of_match_table = of_match_clk_mt7622_hif,
diff --git a/drivers/clk/mediatek/clk-mt7622-infracfg.c b/drivers/clk/mediatek/clk-mt7622-infracfg.c
index 6bc911cb29a6..cfdf3b07c3e0 100644
--- a/drivers/clk/mediatek/clk-mt7622-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7622-infracfg.c
@@ -118,7 +118,7 @@ static struct platform_driver clk_mt7622_infracfg_drv = {
.of_match_table = of_match_clk_mt7622_infracfg,
},
.probe = clk_mt7622_infracfg_probe,
- .remove_new = clk_mt7622_infracfg_remove,
+ .remove = clk_mt7622_infracfg_remove,
};
module_platform_driver(clk_mt7622_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
index 27781a62a131..f62b03abab4f 100644
--- a/drivers/clk/mediatek/clk-mt7622.c
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -524,7 +524,7 @@ static struct platform_driver clk_mt7622_drv = {
.of_match_table = of_match_clk_mt7622,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7622_drv)
diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
index 96d1a82ad75f..3fdc2d7d4274 100644
--- a/drivers/clk/mediatek/clk-mt7629-hif.c
+++ b/drivers/clk/mediatek/clk-mt7629-hif.c
@@ -86,7 +86,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7629_hif);
static struct platform_driver clk_mt7629_hif_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7629-hif",
.of_match_table = of_match_clk_mt7629_hif,
diff --git a/drivers/clk/mediatek/clk-mt7981-eth.c b/drivers/clk/mediatek/clk-mt7981-eth.c
index e8cb247db0ce..906aec9ddff5 100644
--- a/drivers/clk/mediatek/clk-mt7981-eth.c
+++ b/drivers/clk/mediatek/clk-mt7981-eth.c
@@ -107,7 +107,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_eth);
static struct platform_driver clk_mt7981_eth_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7981-eth",
.of_match_table = of_match_clk_mt7981_eth,
diff --git a/drivers/clk/mediatek/clk-mt7981-infracfg.c b/drivers/clk/mediatek/clk-mt7981-infracfg.c
index b2b055151297..0487b6bb80ae 100644
--- a/drivers/clk/mediatek/clk-mt7981-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7981-infracfg.c
@@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_infracfg);
static struct platform_driver clk_mt7981_infracfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7981-infracfg",
.of_match_table = of_match_clk_mt7981_infracfg,
diff --git a/drivers/clk/mediatek/clk-mt7981-topckgen.c b/drivers/clk/mediatek/clk-mt7981-topckgen.c
index 72f2f4f30e85..1943f11e47c1 100644
--- a/drivers/clk/mediatek/clk-mt7981-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7981-topckgen.c
@@ -413,7 +413,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_topckgen);
static struct platform_driver clk_mt7981_topckgen_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7981-topckgen",
.of_match_table = of_match_clk_mt7981_topckgen,
diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c
index 7ab78e0f49a1..4514d42c0829 100644
--- a/drivers/clk/mediatek/clk-mt7986-eth.c
+++ b/drivers/clk/mediatek/clk-mt7986-eth.c
@@ -92,7 +92,7 @@ static struct platform_driver clk_mt7986_eth_drv = {
.of_match_table = of_match_clk_mt7986_eth,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7986_eth_drv);
diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
index cb8ab3e53abf..732c65e616de 100644
--- a/drivers/clk/mediatek/clk-mt7986-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
@@ -177,7 +177,7 @@ static struct platform_driver clk_mt7986_infracfg_drv = {
.of_match_table = of_match_clk_mt7986_infracfg,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7986_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt7986-topckgen.c b/drivers/clk/mediatek/clk-mt7986-topckgen.c
index b644b4ca4710..2dd30da306d9 100644
--- a/drivers/clk/mediatek/clk-mt7986-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7986-topckgen.c
@@ -306,7 +306,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7986_topckgen);
static struct platform_driver clk_mt7986_topckgen_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7986-topckgen",
.of_match_table = of_match_clk_mt7986_topckgen,
diff --git a/drivers/clk/mediatek/clk-mt7988-eth.c b/drivers/clk/mediatek/clk-mt7988-eth.c
index adf4a9d39b38..7d9463688be2 100644
--- a/drivers/clk/mediatek/clk-mt7988-eth.c
+++ b/drivers/clk/mediatek/clk-mt7988-eth.c
@@ -142,7 +142,7 @@ static struct platform_driver clk_mt7988_eth_drv = {
.of_match_table = of_match_clk_mt7988_eth,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7988_eth_drv);
diff --git a/drivers/clk/mediatek/clk-mt7988-infracfg.c b/drivers/clk/mediatek/clk-mt7988-infracfg.c
index 6c2bebabb4de..ef8267319d91 100644
--- a/drivers/clk/mediatek/clk-mt7988-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7988-infracfg.c
@@ -292,7 +292,7 @@ static struct platform_driver clk_mt7988_infracfg_drv = {
.of_match_table = of_match_clk_mt7988_infracfg,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7988_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt7988-topckgen.c b/drivers/clk/mediatek/clk-mt7988-topckgen.c
index 7300e9694582..50e02cc7a214 100644
--- a/drivers/clk/mediatek/clk-mt7988-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7988-topckgen.c
@@ -315,7 +315,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7988_topckgen);
static struct platform_driver clk_mt7988_topckgen_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7988-topckgen",
.of_match_table = of_match_clk_mt7988_topckgen,
diff --git a/drivers/clk/mediatek/clk-mt7988-xfipll.c b/drivers/clk/mediatek/clk-mt7988-xfipll.c
index 9b9ca5471158..f941e4d3ef28 100644
--- a/drivers/clk/mediatek/clk-mt7988-xfipll.c
+++ b/drivers/clk/mediatek/clk-mt7988-xfipll.c
@@ -74,7 +74,7 @@ static struct platform_driver clk_mt7988_xfipll_drv = {
.of_match_table = of_match_clk_mt7988_xfipll,
},
.probe = clk_mt7988_xfipll_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7988_xfipll_drv);
diff --git a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
index 41bb2d2e2ea7..bdadc35c64cb 100644
--- a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
@@ -93,7 +93,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8135_apmixed);
static struct platform_driver clk_mt8135_apmixed_drv = {
.probe = clk_mt8135_apmixed_probe,
- .remove_new = clk_mt8135_apmixed_remove,
+ .remove = clk_mt8135_apmixed_remove,
.driver = {
.name = "clk-mt8135-apmixed",
.of_match_table = of_match_clk_mt8135_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
index 019af88d7f9c..084e48a554c2 100644
--- a/drivers/clk/mediatek/clk-mt8135.c
+++ b/drivers/clk/mediatek/clk-mt8135.c
@@ -558,7 +558,7 @@ static struct platform_driver clk_mt8135_drv = {
.of_match_table = of_match_clk_mt8135,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8135_drv);
diff --git a/drivers/clk/mediatek/clk-mt8167-aud.c b/drivers/clk/mediatek/clk-mt8167-aud.c
index d1a42ff549c1..d6cff4bdf4cb 100644
--- a/drivers/clk/mediatek/clk-mt8167-aud.c
+++ b/drivers/clk/mediatek/clk-mt8167-aud.c
@@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_audsys);
static struct platform_driver clk_mt8167_audsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167-audsys",
.of_match_table = of_match_clk_mt8167_audsys,
diff --git a/drivers/clk/mediatek/clk-mt8167-img.c b/drivers/clk/mediatek/clk-mt8167-img.c
index 888ac3bdeacb..42d38ae94b69 100644
--- a/drivers/clk/mediatek/clk-mt8167-img.c
+++ b/drivers/clk/mediatek/clk-mt8167-img.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_imgsys);
static struct platform_driver clk_mt8167_imgsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167-imgsys",
.of_match_table = of_match_clk_mt8167_imgsys,
diff --git a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
index e873766f130c..1ef37a3e6851 100644
--- a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_mfgcfg);
static struct platform_driver clk_mt8167_mfgcfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167-mfgcfg",
.of_match_table = of_match_clk_mt8167_mfgcfg,
diff --git a/drivers/clk/mediatek/clk-mt8167-mm.c b/drivers/clk/mediatek/clk-mt8167-mm.c
index 38deedffaacf..cef66ee836f3 100644
--- a/drivers/clk/mediatek/clk-mt8167-mm.c
+++ b/drivers/clk/mediatek/clk-mt8167-mm.c
@@ -85,7 +85,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8167_mm_id_table);
static struct platform_driver clk_mt8167_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8167-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt8167-vdec.c b/drivers/clk/mediatek/clk-mt8167-vdec.c
index c3c892bb8334..e3769bc556a9 100644
--- a/drivers/clk/mediatek/clk-mt8167-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8167-vdec.c
@@ -53,7 +53,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_vdec);
static struct platform_driver clk_mt8167_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167-vdecsys",
.of_match_table = of_match_clk_mt8167_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8167.c b/drivers/clk/mediatek/clk-mt8167.c
index 5c94995f859c..c64d918c37de 100644
--- a/drivers/clk/mediatek/clk-mt8167.c
+++ b/drivers/clk/mediatek/clk-mt8167.c
@@ -887,7 +887,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167);
static struct platform_driver clk_mt8167_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167",
.of_match_table = of_match_clk_mt8167,
diff --git a/drivers/clk/mediatek/clk-mt8173-apmixedsys.c b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
index 6cab483b8e1e..95385bb67d55 100644
--- a/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
@@ -207,7 +207,7 @@ static void clk_mt8173_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8173_apmixed_drv = {
.probe = clk_mt8173_apmixed_probe,
- .remove_new = clk_mt8173_apmixed_remove,
+ .remove = clk_mt8173_apmixed_remove,
.driver = {
.name = "clk-mt8173-apmixed",
.of_match_table = of_match_clk_mt8173_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt8173-img.c b/drivers/clk/mediatek/clk-mt8173-img.c
index 1011b9ab3dad..6db2b9ab2bc9 100644
--- a/drivers/clk/mediatek/clk-mt8173-img.c
+++ b/drivers/clk/mediatek/clk-mt8173-img.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_imgsys);
static struct platform_driver clk_mt8173_vdecsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8173-imgsys",
.of_match_table = of_match_clk_mt8173_imgsys,
diff --git a/drivers/clk/mediatek/clk-mt8173-infracfg.c b/drivers/clk/mediatek/clk-mt8173-infracfg.c
index ecc8b0063ea5..fa2d1d557e04 100644
--- a/drivers/clk/mediatek/clk-mt8173-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt8173-infracfg.c
@@ -156,7 +156,7 @@ static struct platform_driver clk_mt8173_infracfg_drv = {
.of_match_table = of_match_clk_mt8173_infracfg,
},
.probe = clk_mt8173_infracfg_probe,
- .remove_new = clk_mt8173_infracfg_remove,
+ .remove = clk_mt8173_infracfg_remove,
};
module_platform_driver(clk_mt8173_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c
index fd903bee328f..26d27250b914 100644
--- a/drivers/clk/mediatek/clk-mt8173-mm.c
+++ b/drivers/clk/mediatek/clk-mt8173-mm.c
@@ -106,7 +106,7 @@ static struct platform_driver clk_mt8173_mm_drv = {
},
.id_table = clk_mt8173_mm_id_table,
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
};
module_platform_driver(clk_mt8173_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173-pericfg.c b/drivers/clk/mediatek/clk-mt8173-pericfg.c
index 783efed3f254..bebda74d0f43 100644
--- a/drivers/clk/mediatek/clk-mt8173-pericfg.c
+++ b/drivers/clk/mediatek/clk-mt8173-pericfg.c
@@ -115,7 +115,7 @@ static struct platform_driver clk_mt8173_pericfg_drv = {
.of_match_table = of_match_clk_mt8173_pericfg,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8173_pericfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173-topckgen.c b/drivers/clk/mediatek/clk-mt8173-topckgen.c
index 6bb7ffd74487..42c37541cebb 100644
--- a/drivers/clk/mediatek/clk-mt8173-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8173-topckgen.c
@@ -646,7 +646,7 @@ static struct platform_driver clk_mt8173_topckgen_drv = {
.of_match_table = of_match_clk_mt8173_topckgen,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8173_topckgen_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173-vdecsys.c b/drivers/clk/mediatek/clk-mt8173-vdecsys.c
index 011e3812156f..625ca0b09cc2 100644
--- a/drivers/clk/mediatek/clk-mt8173-vdecsys.c
+++ b/drivers/clk/mediatek/clk-mt8173-vdecsys.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_vdecsys);
static struct platform_driver clk_mt8173_vdecsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8173-vdecsys",
.of_match_table = of_match_clk_mt8173_vdecsys,
diff --git a/drivers/clk/mediatek/clk-mt8173-vencsys.c b/drivers/clk/mediatek/clk-mt8173-vencsys.c
index 1bf84ae6a0bc..87755dd1a337 100644
--- a/drivers/clk/mediatek/clk-mt8173-vencsys.c
+++ b/drivers/clk/mediatek/clk-mt8173-vencsys.c
@@ -57,7 +57,7 @@ static struct platform_driver clk_mt8173_vencsys_drv = {
.of_match_table = of_match_clk_mt8173_vencsys,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8173_vencsys_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-audio.c b/drivers/clk/mediatek/clk-mt8183-audio.c
index 30a20e8ba84b..011d329ad30e 100644
--- a/drivers/clk/mediatek/clk-mt8183-audio.c
+++ b/drivers/clk/mediatek/clk-mt8183-audio.c
@@ -101,7 +101,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_audio);
static struct platform_driver clk_mt8183_audio_drv = {
.probe = clk_mt8183_audio_probe,
- .remove_new = clk_mt8183_audio_remove,
+ .remove = clk_mt8183_audio_remove,
.driver = {
.name = "clk-mt8183-audio",
.of_match_table = of_match_clk_mt8183_audio,
diff --git a/drivers/clk/mediatek/clk-mt8183-cam.c b/drivers/clk/mediatek/clk-mt8183-cam.c
index f16c3aa3c911..c7642085f8de 100644
--- a/drivers/clk/mediatek/clk-mt8183-cam.c
+++ b/drivers/clk/mediatek/clk-mt8183-cam.c
@@ -51,7 +51,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_cam);
static struct platform_driver clk_mt8183_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-cam",
.of_match_table = of_match_clk_mt8183_cam,
diff --git a/drivers/clk/mediatek/clk-mt8183-img.c b/drivers/clk/mediatek/clk-mt8183-img.c
index 32ee6a1867fc..ee92459c74ca 100644
--- a/drivers/clk/mediatek/clk-mt8183-img.c
+++ b/drivers/clk/mediatek/clk-mt8183-img.c
@@ -51,7 +51,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_img);
static struct platform_driver clk_mt8183_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-img",
.of_match_table = of_match_clk_mt8183_img,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu0.c b/drivers/clk/mediatek/clk-mt8183-ipu0.c
index dc2916c4e0dc..6831747f123b 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu0.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu0.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_core0);
static struct platform_driver clk_mt8183_ipu_core0_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_core0",
.of_match_table = of_match_clk_mt8183_ipu_core0,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu1.c b/drivers/clk/mediatek/clk-mt8183-ipu1.c
index 9c63e4c592d0..ecf434432e7b 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu1.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu1.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_core1);
static struct platform_driver clk_mt8183_ipu_core1_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_core1",
.of_match_table = of_match_clk_mt8183_ipu_core1,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
index 54a50eda1719..c1a770ba3245 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_adl);
static struct platform_driver clk_mt8183_ipu_adl_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_adl",
.of_match_table = of_match_clk_mt8183_ipu_adl,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
index 99a817d3be6c..f0e72e6edb7a 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
@@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_conn);
static struct platform_driver clk_mt8183_ipu_conn_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_conn",
.of_match_table = of_match_clk_mt8183_ipu_conn,
diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
index b1e802bbfaef..be44889783ff 100644
--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_mfg);
static struct platform_driver clk_mt8183_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-mfg",
.of_match_table = of_match_clk_mt8183_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c
index 59acf1e2951b..0f132f05fa8b 100644
--- a/drivers/clk/mediatek/clk-mt8183-mm.c
+++ b/drivers/clk/mediatek/clk-mt8183-mm.c
@@ -95,7 +95,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8183_mm_id_table);
static struct platform_driver clk_mt8183_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8183-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt8183-vdec.c b/drivers/clk/mediatek/clk-mt8183-vdec.c
index 48a8ef3f69aa..43bf34077b16 100644
--- a/drivers/clk/mediatek/clk-mt8183-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8183-vdec.c
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_vdec);
static struct platform_driver clk_mt8183_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-vdec",
.of_match_table = of_match_clk_mt8183_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8183-venc.c b/drivers/clk/mediatek/clk-mt8183-venc.c
index 8f36688dfa14..c3d99b3b8ff7 100644
--- a/drivers/clk/mediatek/clk-mt8183-venc.c
+++ b/drivers/clk/mediatek/clk-mt8183-venc.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_venc);
static struct platform_driver clk_mt8183_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-venc",
.of_match_table = of_match_clk_mt8183_venc,
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 27eee4ef2c0f..aa7cc7709b2d 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -899,7 +899,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183);
static struct platform_driver clk_mt8183_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183",
.of_match_table = of_match_clk_mt8183,
diff --git a/drivers/clk/mediatek/clk-mt8186-apmixedsys.c b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
index 6f7127003e4f..4b2b16578232 100644
--- a/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
@@ -185,7 +185,7 @@ static void clk_mt8186_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8186_apmixed_drv = {
.probe = clk_mt8186_apmixed_probe,
- .remove_new = clk_mt8186_apmixed_remove,
+ .remove = clk_mt8186_apmixed_remove,
.driver = {
.name = "clk-mt8186-apmixed",
.of_match_table = of_match_clk_mt8186_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt8186-cam.c b/drivers/clk/mediatek/clk-mt8186-cam.c
index 0082f0d9286b..2ddd5f90377f 100644
--- a/drivers/clk/mediatek/clk-mt8186-cam.c
+++ b/drivers/clk/mediatek/clk-mt8186-cam.c
@@ -82,7 +82,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_cam);
static struct platform_driver clk_mt8186_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-cam",
.of_match_table = of_match_clk_mt8186_cam,
diff --git a/drivers/clk/mediatek/clk-mt8186-img.c b/drivers/clk/mediatek/clk-mt8186-img.c
index 0583a18805ce..5e466e1f5f44 100644
--- a/drivers/clk/mediatek/clk-mt8186-img.c
+++ b/drivers/clk/mediatek/clk-mt8186-img.c
@@ -60,7 +60,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_img);
static struct platform_driver clk_mt8186_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-img",
.of_match_table = of_match_clk_mt8186_img,
diff --git a/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
index 2a2a6bb23205..75abb871044c 100644
--- a/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
@@ -59,7 +59,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_imp_iic_wrap);
static struct platform_driver clk_mt8186_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-imp_iic_wrap",
.of_match_table = of_match_clk_mt8186_imp_iic_wrap,
diff --git a/drivers/clk/mediatek/clk-mt8186-infra_ao.c b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
index d7239875fb15..8d9d86a510ff 100644
--- a/drivers/clk/mediatek/clk-mt8186-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
@@ -231,7 +231,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_infra_ao);
static struct platform_driver clk_mt8186_infra_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-infra-ao",
.of_match_table = of_match_clk_mt8186_infra_ao,
diff --git a/drivers/clk/mediatek/clk-mt8186-ipe.c b/drivers/clk/mediatek/clk-mt8186-ipe.c
index 77bdd2806517..f66a0aeaa6b3 100644
--- a/drivers/clk/mediatek/clk-mt8186-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8186-ipe.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_ipe);
static struct platform_driver clk_mt8186_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-ipe",
.of_match_table = of_match_clk_mt8186_ipe,
diff --git a/drivers/clk/mediatek/clk-mt8186-mcu.c b/drivers/clk/mediatek/clk-mt8186-mcu.c
index eb54ccb77b74..d1640e4dc2ad 100644
--- a/drivers/clk/mediatek/clk-mt8186-mcu.c
+++ b/drivers/clk/mediatek/clk-mt8186-mcu.c
@@ -60,7 +60,7 @@ static struct platform_driver clk_mt8186_mcu_drv = {
.of_match_table = of_match_clk_mt8186_mcu,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8186_mcu_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-mdp.c b/drivers/clk/mediatek/clk-mt8186-mdp.c
index fb47d6bacf7f..01561cf902c4 100644
--- a/drivers/clk/mediatek/clk-mt8186-mdp.c
+++ b/drivers/clk/mediatek/clk-mt8186-mdp.c
@@ -72,7 +72,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_mdp);
static struct platform_driver clk_mt8186_mdp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-mdp",
.of_match_table = of_match_clk_mt8186_mdp,
diff --git a/drivers/clk/mediatek/clk-mt8186-mfg.c b/drivers/clk/mediatek/clk-mt8186-mfg.c
index 64cdee1fddd4..3f21b1f222e1 100644
--- a/drivers/clk/mediatek/clk-mt8186-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8186-mfg.c
@@ -41,7 +41,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_mfg);
static struct platform_driver clk_mt8186_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-mfg",
.of_match_table = of_match_clk_mt8186_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8186-mm.c b/drivers/clk/mediatek/clk-mt8186-mm.c
index 403566187e64..fc8488c44866 100644
--- a/drivers/clk/mediatek/clk-mt8186-mm.c
+++ b/drivers/clk/mediatek/clk-mt8186-mm.c
@@ -71,7 +71,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8186_mm_id_table);
static struct platform_driver clk_mt8186_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8186-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt8186-topckgen.c b/drivers/clk/mediatek/clk-mt8186-topckgen.c
index eb9f51e77ca8..14f1cbdbbd13 100644
--- a/drivers/clk/mediatek/clk-mt8186-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8186-topckgen.c
@@ -725,7 +725,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_topck);
static struct platform_driver clk_mt8186_topck_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-topck",
.of_match_table = of_match_clk_mt8186_topck,
diff --git a/drivers/clk/mediatek/clk-mt8186-vdec.c b/drivers/clk/mediatek/clk-mt8186-vdec.c
index 25465704ddfb..522b8c952969 100644
--- a/drivers/clk/mediatek/clk-mt8186-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8186-vdec.c
@@ -80,7 +80,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_vdec);
static struct platform_driver clk_mt8186_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-vdec",
.of_match_table = of_match_clk_mt8186_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8186-venc.c b/drivers/clk/mediatek/clk-mt8186-venc.c
index 647dd66a3ce0..c0c98bc75112 100644
--- a/drivers/clk/mediatek/clk-mt8186-venc.c
+++ b/drivers/clk/mediatek/clk-mt8186-venc.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_venc);
static struct platform_driver clk_mt8186_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-venc",
.of_match_table = of_match_clk_mt8186_venc,
diff --git a/drivers/clk/mediatek/clk-mt8186-wpe.c b/drivers/clk/mediatek/clk-mt8186-wpe.c
index 47f96e088361..babd7b2778c2 100644
--- a/drivers/clk/mediatek/clk-mt8186-wpe.c
+++ b/drivers/clk/mediatek/clk-mt8186-wpe.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_wpe);
static struct platform_driver clk_mt8186_wpe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-wpe",
.of_match_table = of_match_clk_mt8186_wpe,
diff --git a/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c b/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c
index 5ac035bbe684..dcde2187d24a 100644
--- a/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c
+++ b/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c
@@ -40,7 +40,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_adsp_audio26m);
static struct platform_driver clk_mt8188_adsp_audio26m_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-adsp_audio26m",
.of_match_table = of_match_clk_mt8188_adsp_audio26m,
diff --git a/drivers/clk/mediatek/clk-mt8188-apmixedsys.c b/drivers/clk/mediatek/clk-mt8188-apmixedsys.c
index 85d573d96081..21d7a9a2ab1a 100644
--- a/drivers/clk/mediatek/clk-mt8188-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8188-apmixedsys.c
@@ -145,7 +145,7 @@ static void clk_mt8188_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8188_apmixed_drv = {
.probe = clk_mt8188_apmixed_probe,
- .remove_new = clk_mt8188_apmixed_remove,
+ .remove = clk_mt8188_apmixed_remove,
.driver = {
.name = "clk-mt8188-apmixed",
.of_match_table = of_match_clk_mt8188_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt8188-cam.c b/drivers/clk/mediatek/clk-mt8188-cam.c
index a6a6581f0461..7500bd25387f 100644
--- a/drivers/clk/mediatek/clk-mt8188-cam.c
+++ b/drivers/clk/mediatek/clk-mt8188-cam.c
@@ -109,7 +109,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_cam);
static struct platform_driver clk_mt8188_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-cam",
.of_match_table = of_match_clk_mt8188_cam,
diff --git a/drivers/clk/mediatek/clk-mt8188-ccu.c b/drivers/clk/mediatek/clk-mt8188-ccu.c
index 9532fc652f01..1566fc437ea3 100644
--- a/drivers/clk/mediatek/clk-mt8188-ccu.c
+++ b/drivers/clk/mediatek/clk-mt8188-ccu.c
@@ -39,7 +39,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_ccu);
static struct platform_driver clk_mt8188_ccu_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-ccu",
.of_match_table = of_match_clk_mt8188_ccu,
diff --git a/drivers/clk/mediatek/clk-mt8188-img.c b/drivers/clk/mediatek/clk-mt8188-img.c
index 00ad6d7884ae..cb2fbd4136b9 100644
--- a/drivers/clk/mediatek/clk-mt8188-img.c
+++ b/drivers/clk/mediatek/clk-mt8188-img.c
@@ -101,7 +101,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_imgsys_main);
static struct platform_driver clk_mt8188_imgsys_main_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-imgsys_main",
.of_match_table = of_match_clk_mt8188_imgsys_main,
diff --git a/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c
index 7b713f4cd662..14a4b575b583 100644
--- a/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c
@@ -71,7 +71,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_imp_iic_wrap);
static struct platform_driver clk_mt8188_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-imp_iic_wrap",
.of_match_table = of_match_clk_mt8188_imp_iic_wrap,
diff --git a/drivers/clk/mediatek/clk-mt8188-infra_ao.c b/drivers/clk/mediatek/clk-mt8188-infra_ao.c
index face3e191464..b9bc8fcc2ade 100644
--- a/drivers/clk/mediatek/clk-mt8188-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8188-infra_ao.c
@@ -213,7 +213,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_infra_ao);
static struct platform_driver clk_mt8188_infra_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-infra_ao",
.of_match_table = of_match_clk_mt8188_infra_ao,
diff --git a/drivers/clk/mediatek/clk-mt8188-ipe.c b/drivers/clk/mediatek/clk-mt8188-ipe.c
index fa439af34359..8f1933b71e28 100644
--- a/drivers/clk/mediatek/clk-mt8188-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8188-ipe.c
@@ -41,7 +41,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_ipe);
static struct platform_driver clk_mt8188_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-ipe",
.of_match_table = of_match_clk_mt8188_ipe,
diff --git a/drivers/clk/mediatek/clk-mt8188-mfg.c b/drivers/clk/mediatek/clk-mt8188-mfg.c
index ec562e7d459d..2ddfb1a3de47 100644
--- a/drivers/clk/mediatek/clk-mt8188-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8188-mfg.c
@@ -38,7 +38,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_mfgcfg);
static struct platform_driver clk_mt8188_mfgcfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-mfgcfg",
.of_match_table = of_match_clk_mt8188_mfgcfg,
diff --git a/drivers/clk/mediatek/clk-mt8188-peri_ao.c b/drivers/clk/mediatek/clk-mt8188-peri_ao.c
index e4339885b062..639865335fc8 100644
--- a/drivers/clk/mediatek/clk-mt8188-peri_ao.c
+++ b/drivers/clk/mediatek/clk-mt8188-peri_ao.c
@@ -49,7 +49,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_peri_ao);
static struct platform_driver clk_mt8188_peri_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-peri_ao",
.of_match_table = of_match_clk_mt8188_peri_ao,
diff --git a/drivers/clk/mediatek/clk-mt8188-topckgen.c b/drivers/clk/mediatek/clk-mt8188-topckgen.c
index 2ccc8a1c98f9..c4baf4076ed6 100644
--- a/drivers/clk/mediatek/clk-mt8188-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8188-topckgen.c
@@ -1347,7 +1347,7 @@ static void clk_mt8188_topck_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8188_topck_drv = {
.probe = clk_mt8188_topck_probe,
- .remove_new = clk_mt8188_topck_remove,
+ .remove = clk_mt8188_topck_remove,
.driver = {
.name = "clk-mt8188-topck",
.of_match_table = of_match_clk_mt8188_topck,
diff --git a/drivers/clk/mediatek/clk-mt8188-vdec.c b/drivers/clk/mediatek/clk-mt8188-vdec.c
index bf388997c3f8..f48f0716d7c2 100644
--- a/drivers/clk/mediatek/clk-mt8188-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8188-vdec.c
@@ -81,7 +81,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_vdec);
static struct platform_driver clk_mt8188_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-vdec",
.of_match_table = of_match_clk_mt8188_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8188-vdo0.c b/drivers/clk/mediatek/clk-mt8188-vdo0.c
index 935371fbf1d2..017d6662589b 100644
--- a/drivers/clk/mediatek/clk-mt8188-vdo0.c
+++ b/drivers/clk/mediatek/clk-mt8188-vdo0.c
@@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vdo0_id_table);
static struct platform_driver clk_mt8188_vdo0_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8188-vdo0",
},
diff --git a/drivers/clk/mediatek/clk-mt8188-vdo1.c b/drivers/clk/mediatek/clk-mt8188-vdo1.c
index fb24c9026fd8..4fa355f8f0c2 100644
--- a/drivers/clk/mediatek/clk-mt8188-vdo1.c
+++ b/drivers/clk/mediatek/clk-mt8188-vdo1.c
@@ -144,7 +144,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vdo1_id_table);
static struct platform_driver clk_mt8188_vdo1_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8188-vdo1",
},
diff --git a/drivers/clk/mediatek/clk-mt8188-venc.c b/drivers/clk/mediatek/clk-mt8188-venc.c
index 4df8d4e05159..01e971545506 100644
--- a/drivers/clk/mediatek/clk-mt8188-venc.c
+++ b/drivers/clk/mediatek/clk-mt8188-venc.c
@@ -45,7 +45,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_venc1);
static struct platform_driver clk_mt8188_venc1_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-venc1",
.of_match_table = of_match_clk_mt8188_venc1,
diff --git a/drivers/clk/mediatek/clk-mt8188-vpp0.c b/drivers/clk/mediatek/clk-mt8188-vpp0.c
index 310792108793..cd2579b7b9c3 100644
--- a/drivers/clk/mediatek/clk-mt8188-vpp0.c
+++ b/drivers/clk/mediatek/clk-mt8188-vpp0.c
@@ -104,7 +104,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vpp0_id_table);
static struct platform_driver clk_mt8188_vpp0_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8188-vpp0",
},
diff --git a/drivers/clk/mediatek/clk-mt8188-vpp1.c b/drivers/clk/mediatek/clk-mt8188-vpp1.c
index 0aa10aaa0292..0e1bd8306e8a 100644
--- a/drivers/clk/mediatek/clk-mt8188-vpp1.c
+++ b/drivers/clk/mediatek/clk-mt8188-vpp1.c
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vpp1_id_table);
static struct platform_driver clk_mt8188_vpp1_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8188-vpp1",
},
diff --git a/drivers/clk/mediatek/clk-mt8188-wpe.c b/drivers/clk/mediatek/clk-mt8188-wpe.c
index fbac440363cc..d709bb1ee1d6 100644
--- a/drivers/clk/mediatek/clk-mt8188-wpe.c
+++ b/drivers/clk/mediatek/clk-mt8188-wpe.c
@@ -94,7 +94,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_wpe);
static struct platform_driver clk_mt8188_wpe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-wpe",
.of_match_table = of_match_clk_mt8188_wpe,
diff --git a/drivers/clk/mediatek/clk-mt8192-apmixedsys.c b/drivers/clk/mediatek/clk-mt8192-apmixedsys.c
index 3590932acc63..0b66a27e4d5a 100644
--- a/drivers/clk/mediatek/clk-mt8192-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8192-apmixedsys.c
@@ -206,7 +206,7 @@ static struct platform_driver clk_mt8192_apmixed_drv = {
.of_match_table = of_match_clk_mt8192_apmixed,
},
.probe = clk_mt8192_apmixed_probe,
- .remove_new = clk_mt8192_apmixed_remove,
+ .remove = clk_mt8192_apmixed_remove,
};
module_platform_driver(clk_mt8192_apmixed_drv);
MODULE_DESCRIPTION("MediaTek MT8192 apmixed clocks driver");
diff --git a/drivers/clk/mediatek/clk-mt8192-aud.c b/drivers/clk/mediatek/clk-mt8192-aud.c
index b438ebad998d..f3ebf8713fbb 100644
--- a/drivers/clk/mediatek/clk-mt8192-aud.c
+++ b/drivers/clk/mediatek/clk-mt8192-aud.c
@@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_aud);
static struct platform_driver clk_mt8192_aud_drv = {
.probe = clk_mt8192_aud_probe,
- .remove_new = clk_mt8192_aud_remove,
+ .remove = clk_mt8192_aud_remove,
.driver = {
.name = "clk-mt8192-aud",
.of_match_table = of_match_clk_mt8192_aud,
diff --git a/drivers/clk/mediatek/clk-mt8192-cam.c b/drivers/clk/mediatek/clk-mt8192-cam.c
index 3eed4a7b6d8e..891d2f88d9cf 100644
--- a/drivers/clk/mediatek/clk-mt8192-cam.c
+++ b/drivers/clk/mediatek/clk-mt8192-cam.c
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_cam);
static struct platform_driver clk_mt8192_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-cam",
.of_match_table = of_match_clk_mt8192_cam,
diff --git a/drivers/clk/mediatek/clk-mt8192-img.c b/drivers/clk/mediatek/clk-mt8192-img.c
index 13a435332752..c08e831125a5 100644
--- a/drivers/clk/mediatek/clk-mt8192-img.c
+++ b/drivers/clk/mediatek/clk-mt8192-img.c
@@ -62,7 +62,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_img);
static struct platform_driver clk_mt8192_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-img",
.of_match_table = of_match_clk_mt8192_img,
diff --git a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
index 45585f2edd50..0f9530d9263c 100644
--- a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
@@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_imp_iic_wrap);
static struct platform_driver clk_mt8192_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-imp_iic_wrap",
.of_match_table = of_match_clk_mt8192_imp_iic_wrap,
diff --git a/drivers/clk/mediatek/clk-mt8192-ipe.c b/drivers/clk/mediatek/clk-mt8192-ipe.c
index da2e2d83cd25..c932b8b20edc 100644
--- a/drivers/clk/mediatek/clk-mt8192-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8192-ipe.c
@@ -49,7 +49,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_ipe);
static struct platform_driver clk_mt8192_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-ipe",
.of_match_table = of_match_clk_mt8192_ipe,
diff --git a/drivers/clk/mediatek/clk-mt8192-mdp.c b/drivers/clk/mediatek/clk-mt8192-mdp.c
index be674d6c31d7..30334ebca864 100644
--- a/drivers/clk/mediatek/clk-mt8192-mdp.c
+++ b/drivers/clk/mediatek/clk-mt8192-mdp.c
@@ -74,7 +74,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_mdp);
static struct platform_driver clk_mt8192_mdp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-mdp",
.of_match_table = of_match_clk_mt8192_mdp,
diff --git a/drivers/clk/mediatek/clk-mt8192-mfg.c b/drivers/clk/mediatek/clk-mt8192-mfg.c
index 2da969f4ca6b..9d176659e8a2 100644
--- a/drivers/clk/mediatek/clk-mt8192-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8192-mfg.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_mfg);
static struct platform_driver clk_mt8192_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-mfg",
.of_match_table = of_match_clk_mt8192_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8192-mm.c b/drivers/clk/mediatek/clk-mt8192-mm.c
index 2b9c1c4524c2..bda4406e1304 100644
--- a/drivers/clk/mediatek/clk-mt8192-mm.c
+++ b/drivers/clk/mediatek/clk-mt8192-mm.c
@@ -93,7 +93,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8192_mm_id_table);
static struct platform_driver clk_mt8192_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8192-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt8192-msdc.c b/drivers/clk/mediatek/clk-mt8192-msdc.c
index bc5ce987b76c..04a66220f269 100644
--- a/drivers/clk/mediatek/clk-mt8192-msdc.c
+++ b/drivers/clk/mediatek/clk-mt8192-msdc.c
@@ -56,7 +56,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_msdc);
static struct platform_driver clk_mt8192_msdc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-msdc",
.of_match_table = of_match_clk_mt8192_msdc,
diff --git a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
index e017d30a8832..f9e4c16573e2 100644
--- a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
+++ b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_scp_adsp);
static struct platform_driver clk_mt8192_scp_adsp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-scp_adsp",
.of_match_table = of_match_clk_mt8192_scp_adsp,
diff --git a/drivers/clk/mediatek/clk-mt8192-vdec.c b/drivers/clk/mediatek/clk-mt8192-vdec.c
index fcb34b1dcdab..9c10161807b2 100644
--- a/drivers/clk/mediatek/clk-mt8192-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8192-vdec.c
@@ -86,7 +86,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_vdec);
static struct platform_driver clk_mt8192_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-vdec",
.of_match_table = of_match_clk_mt8192_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8192-venc.c b/drivers/clk/mediatek/clk-mt8192-venc.c
index 98d58a9397cd..0b01e2b7f036 100644
--- a/drivers/clk/mediatek/clk-mt8192-venc.c
+++ b/drivers/clk/mediatek/clk-mt8192-venc.c
@@ -45,7 +45,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_venc);
static struct platform_driver clk_mt8192_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-venc",
.of_match_table = of_match_clk_mt8192_venc,
diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
index bce2298ebc8d..50b43807c60c 100644
--- a/drivers/clk/mediatek/clk-mt8192.c
+++ b/drivers/clk/mediatek/clk-mt8192.c
@@ -1026,7 +1026,7 @@ static struct platform_driver clk_mt8192_drv = {
.of_match_table = of_match_clk_mt8192,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8192_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
index 049ae8123e34..282a3137dc89 100644
--- a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
@@ -223,7 +223,7 @@ static void clk_mt8195_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8195_apmixed_drv = {
.probe = clk_mt8195_apmixed_probe,
- .remove_new = clk_mt8195_apmixed_remove,
+ .remove = clk_mt8195_apmixed_remove,
.driver = {
.name = "clk-mt8195-apmixed",
.of_match_table = of_match_clk_mt8195_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
index b1b562e44cb4..8b45a3fad02f 100644
--- a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
+++ b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
@@ -103,7 +103,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_apusys_pll);
static struct platform_driver clk_mt8195_apusys_pll_drv = {
.probe = clk_mt8195_apusys_pll_probe,
- .remove_new = clk_mt8195_apusys_pll_remove,
+ .remove = clk_mt8195_apusys_pll_remove,
.driver = {
.name = "clk-mt8195-apusys_pll",
.of_match_table = of_match_clk_mt8195_apusys_pll,
diff --git a/drivers/clk/mediatek/clk-mt8195-cam.c b/drivers/clk/mediatek/clk-mt8195-cam.c
index 7c8f77817616..02cb20c2948b 100644
--- a/drivers/clk/mediatek/clk-mt8195-cam.c
+++ b/drivers/clk/mediatek/clk-mt8195-cam.c
@@ -135,7 +135,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_cam);
static struct platform_driver clk_mt8195_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-cam",
.of_match_table = of_match_clk_mt8195_cam,
diff --git a/drivers/clk/mediatek/clk-mt8195-ccu.c b/drivers/clk/mediatek/clk-mt8195-ccu.c
index f78afd7b6ade..22cd1cb070f1 100644
--- a/drivers/clk/mediatek/clk-mt8195-ccu.c
+++ b/drivers/clk/mediatek/clk-mt8195-ccu.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_ccu);
static struct platform_driver clk_mt8195_ccu_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-ccu",
.of_match_table = of_match_clk_mt8195_ccu,
diff --git a/drivers/clk/mediatek/clk-mt8195-img.c b/drivers/clk/mediatek/clk-mt8195-img.c
index a59c082ef522..11beba4b2ac2 100644
--- a/drivers/clk/mediatek/clk-mt8195-img.c
+++ b/drivers/clk/mediatek/clk-mt8195-img.c
@@ -89,7 +89,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_img);
static struct platform_driver clk_mt8195_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-img",
.of_match_table = of_match_clk_mt8195_img,
diff --git a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
index 54557f1b0681..8711b18b1576 100644
--- a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
@@ -59,7 +59,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_imp_iic_wrap);
static struct platform_driver clk_mt8195_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-imp_iic_wrap",
.of_match_table = of_match_clk_mt8195_imp_iic_wrap,
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
index 165fe92c6f61..bb648a88e43a 100644
--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -233,7 +233,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_infra_ao);
static struct platform_driver clk_mt8195_infra_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-infra_ao",
.of_match_table = of_match_clk_mt8195_infra_ao,
diff --git a/drivers/clk/mediatek/clk-mt8195-ipe.c b/drivers/clk/mediatek/clk-mt8195-ipe.c
index 38a23d88370b..b1af00348a86 100644
--- a/drivers/clk/mediatek/clk-mt8195-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8195-ipe.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_ipe);
static struct platform_driver clk_mt8195_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-ipe",
.of_match_table = of_match_clk_mt8195_ipe,
diff --git a/drivers/clk/mediatek/clk-mt8195-mfg.c b/drivers/clk/mediatek/clk-mt8195-mfg.c
index e19968eeb346..07c358db1af9 100644
--- a/drivers/clk/mediatek/clk-mt8195-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8195-mfg.c
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_mfg);
static struct platform_driver clk_mt8195_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-mfg",
.of_match_table = of_match_clk_mt8195_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8195-peri_ao.c b/drivers/clk/mediatek/clk-mt8195-peri_ao.c
index fc341030f10b..b743eb60a30b 100644
--- a/drivers/clk/mediatek/clk-mt8195-peri_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-peri_ao.c
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_peri_ao);
static struct platform_driver clk_mt8195_peri_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-peri_ao",
.of_match_table = of_match_clk_mt8195_peri_ao,
diff --git a/drivers/clk/mediatek/clk-mt8195-scp_adsp.c b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
index 1f37bde97d90..bc73fccd0515 100644
--- a/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
+++ b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
@@ -40,7 +40,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_scp_adsp);
static struct platform_driver clk_mt8195_scp_adsp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-scp_adsp",
.of_match_table = of_match_clk_mt8195_scp_adsp,
diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c
index 704498c40349..b1f44b873354 100644
--- a/drivers/clk/mediatek/clk-mt8195-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c
@@ -1354,7 +1354,7 @@ static void clk_mt8195_topck_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8195_topck_drv = {
.probe = clk_mt8195_topck_probe,
- .remove_new = clk_mt8195_topck_remove,
+ .remove = clk_mt8195_topck_remove,
.driver = {
.name = "clk-mt8195-topck",
.of_match_table = of_match_clk_mt8195_topck,
diff --git a/drivers/clk/mediatek/clk-mt8195-vdec.c b/drivers/clk/mediatek/clk-mt8195-vdec.c
index 9e4cc1a82cbe..0bad706047c9 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdec.c
@@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_vdec);
static struct platform_driver clk_mt8195_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-vdec",
.of_match_table = of_match_clk_mt8195_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo0.c b/drivers/clk/mediatek/clk-mt8195-vdo0.c
index 6e9c3ef19502..581d99f8c254 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo0.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo0.c
@@ -106,7 +106,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vdo0_id_table);
static struct platform_driver clk_mt8195_vdo0_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vdo0",
},
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo1.c b/drivers/clk/mediatek/clk-mt8195-vdo1.c
index 422e5729386c..7f8b1a8967bd 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo1.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo1.c
@@ -133,7 +133,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vdo1_id_table);
static struct platform_driver clk_mt8195_vdo1_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vdo1",
},
diff --git a/drivers/clk/mediatek/clk-mt8195-venc.c b/drivers/clk/mediatek/clk-mt8195-venc.c
index db7a6ce97ed0..3b52ff025d5e 100644
--- a/drivers/clk/mediatek/clk-mt8195-venc.c
+++ b/drivers/clk/mediatek/clk-mt8195-venc.c
@@ -62,7 +62,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_venc);
static struct platform_driver clk_mt8195_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-venc",
.of_match_table = of_match_clk_mt8195_venc,
diff --git a/drivers/clk/mediatek/clk-mt8195-vpp0.c b/drivers/clk/mediatek/clk-mt8195-vpp0.c
index 77d9aaf47a25..0e3e1dd7977c 100644
--- a/drivers/clk/mediatek/clk-mt8195-vpp0.c
+++ b/drivers/clk/mediatek/clk-mt8195-vpp0.c
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vpp0_id_table);
static struct platform_driver clk_mt8195_vpp0_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vpp0",
},
diff --git a/drivers/clk/mediatek/clk-mt8195-vpp1.c b/drivers/clk/mediatek/clk-mt8195-vpp1.c
index 18ca8f1d9538..fb7b7aef0bba 100644
--- a/drivers/clk/mediatek/clk-mt8195-vpp1.c
+++ b/drivers/clk/mediatek/clk-mt8195-vpp1.c
@@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vpp1_id_table);
static struct platform_driver clk_mt8195_vpp1_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vpp1",
},
diff --git a/drivers/clk/mediatek/clk-mt8195-wpe.c b/drivers/clk/mediatek/clk-mt8195-wpe.c
index 9c45a2fed0ce..315b93bbfcdc 100644
--- a/drivers/clk/mediatek/clk-mt8195-wpe.c
+++ b/drivers/clk/mediatek/clk-mt8195-wpe.c
@@ -136,7 +136,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_wpe);
static struct platform_driver clk_mt8195_wpe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-wpe",
.of_match_table = of_match_clk_mt8195_wpe,
diff --git a/drivers/clk/mediatek/clk-mt8365-apu.c b/drivers/clk/mediatek/clk-mt8365-apu.c
index 934060e6d9e9..2583c4704ffa 100644
--- a/drivers/clk/mediatek/clk-mt8365-apu.c
+++ b/drivers/clk/mediatek/clk-mt8365-apu.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_apu);
static struct platform_driver clk_mt8365_apu_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-apu",
.of_match_table = of_match_clk_mt8365_apu,
diff --git a/drivers/clk/mediatek/clk-mt8365-cam.c b/drivers/clk/mediatek/clk-mt8365-cam.c
index c8fe5f5bb06c..89d2bd50263b 100644
--- a/drivers/clk/mediatek/clk-mt8365-cam.c
+++ b/drivers/clk/mediatek/clk-mt8365-cam.c
@@ -48,7 +48,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_cam);
static struct platform_driver clk_mt8365_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-cam",
.of_match_table = of_match_clk_mt8365_cam,
diff --git a/drivers/clk/mediatek/clk-mt8365-mfg.c b/drivers/clk/mediatek/clk-mt8365-mfg.c
index 5355f725363d..41bcd389119c 100644
--- a/drivers/clk/mediatek/clk-mt8365-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8365-mfg.c
@@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_mfg);
static struct platform_driver clk_mt8365_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-mfg",
.of_match_table = of_match_clk_mt8365_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c
index 8201949bfdae..56fb2a43ecd0 100644
--- a/drivers/clk/mediatek/clk-mt8365-mm.c
+++ b/drivers/clk/mediatek/clk-mt8365-mm.c
@@ -85,7 +85,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8365_mm_id_table);
static struct platform_driver clk_mt8365_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8365-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt8365-vdec.c b/drivers/clk/mediatek/clk-mt8365-vdec.c
index 1be0b3faa2c3..f5d0518bc2e0 100644
--- a/drivers/clk/mediatek/clk-mt8365-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8365-vdec.c
@@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_vdec);
static struct platform_driver clk_mt8365_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-vdec",
.of_match_table = of_match_clk_mt8365_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8365-venc.c b/drivers/clk/mediatek/clk-mt8365-venc.c
index 4228ddec5657..35abd908537c 100644
--- a/drivers/clk/mediatek/clk-mt8365-venc.c
+++ b/drivers/clk/mediatek/clk-mt8365-venc.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_venc);
static struct platform_driver clk_mt8365_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-venc",
.of_match_table = of_match_clk_mt8365_venc,
diff --git a/drivers/clk/mediatek/clk-mt8365.c b/drivers/clk/mediatek/clk-mt8365.c
index 485b525b8acd..e7952121112e 100644
--- a/drivers/clk/mediatek/clk-mt8365.c
+++ b/drivers/clk/mediatek/clk-mt8365.c
@@ -809,7 +809,7 @@ static struct platform_driver clk_mt8365_drv = {
.of_match_table = of_match_clk_mt8365,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8365_drv);
diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c
index 53e1866fb8e2..6227635fd5a1 100644
--- a/drivers/clk/mediatek/clk-mt8516-aud.c
+++ b/drivers/clk/mediatek/clk-mt8516-aud.c
@@ -53,7 +53,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8516_aud);
static struct platform_driver clk_mt8516_aud_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8516-aud",
.of_match_table = of_match_clk_mt8516_aud,
diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c
index b8ae837c59dc..21eb052b0a53 100644
--- a/drivers/clk/mediatek/clk-mt8516.c
+++ b/drivers/clk/mediatek/clk-mt8516.c
@@ -669,7 +669,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8516);
static struct platform_driver clk_mt8516_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8516",
.of_match_table = of_match_clk_mt8516,
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index 290ceda84ce4..2e3303975096 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -110,65 +110,6 @@ static int reset_xlate(struct reset_controller_dev *rcdev,
return data->desc->rst_idx_map[reset_spec->args[0]];
}
-int mtk_register_reset_controller(struct device_node *np,
- const struct mtk_clk_rst_desc *desc)
-{
- struct regmap *regmap;
- const struct reset_control_ops *rcops = NULL;
- struct mtk_clk_rst_data *data;
- int ret;
-
- if (!desc) {
- pr_err("mtk clock reset desc is NULL\n");
- return -EINVAL;
- }
-
- switch (desc->version) {
- case MTK_RST_SIMPLE:
- rcops = &mtk_reset_ops;
- break;
- case MTK_RST_SET_CLR:
- rcops = &mtk_reset_ops_set_clr;
- break;
- default:
- pr_err("Unknown reset version %d\n", desc->version);
- return -EINVAL;
- }
-
- regmap = device_node_to_regmap(np);
- if (IS_ERR(regmap)) {
- pr_err("Cannot find regmap for %pOF: %pe\n", np, regmap);
- return -EINVAL;
- }
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->desc = desc;
- data->regmap = regmap;
- data->rcdev.owner = THIS_MODULE;
- data->rcdev.ops = rcops;
- data->rcdev.of_node = np;
-
- if (data->desc->rst_idx_map_nr > 0) {
- data->rcdev.of_reset_n_cells = 1;
- data->rcdev.nr_resets = desc->rst_idx_map_nr;
- data->rcdev.of_xlate = reset_xlate;
- } else {
- data->rcdev.nr_resets = desc->rst_bank_nr * RST_NR_PER_BANK;
- }
-
- ret = reset_controller_register(&data->rcdev);
- if (ret) {
- pr_err("could not register reset controller: %d\n", ret);
- kfree(data);
- return ret;
- }
-
- return 0;
-}
-
int mtk_register_reset_controller_with_dev(struct device *dev,
const struct mtk_clk_rst_desc *desc)
{
@@ -198,7 +139,7 @@ int mtk_register_reset_controller_with_dev(struct device *dev,
regmap = device_node_to_regmap(np);
if (IS_ERR(regmap)) {
dev_err(dev, "Cannot find regmap %pe\n", regmap);
- return -EINVAL;
+ return PTR_ERR(regmap);
}
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
diff --git a/drivers/clk/mediatek/reset.h b/drivers/clk/mediatek/reset.h
index 6a58a3d59165..562ffd290a22 100644
--- a/drivers/clk/mediatek/reset.h
+++ b/drivers/clk/mediatek/reset.h
@@ -60,16 +60,6 @@ struct mtk_clk_rst_data {
};
/**
- * mtk_register_reset_controller - Register MediaTek clock reset controller
- * @np: Pointer to device node.
- * @desc: Constant pointer to description of clock reset.
- *
- * Return: 0 on success and errorno otherwise.
- */
-int mtk_register_reset_controller(struct device_node *np,
- const struct mtk_clk_rst_desc *desc);
-
-/**
* mtk_register_reset_controller - Register mediatek clock reset controller with device
* @np: Pointer to device.
* @desc: Constant pointer to description of clock reset.
diff --git a/drivers/clk/meson/a1-peripherals.c b/drivers/clk/meson/a1-peripherals.c
index 99b5bc450446..7aa6abb2eb1f 100644
--- a/drivers/clk/meson/a1-peripherals.c
+++ b/drivers/clk/meson/a1-peripherals.c
@@ -2183,7 +2183,7 @@ static struct clk_regmap *const a1_periphs_regmaps[] = {
&dmc_sel2,
};
-static struct regmap_config a1_periphs_regmap_cfg = {
+static const struct regmap_config a1_periphs_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -2246,3 +2246,4 @@ MODULE_DESCRIPTION("Amlogic A1 Peripherals Clock Controller driver");
MODULE_AUTHOR("Jian Hu <jian.hu@amlogic.com>");
MODULE_AUTHOR("Dmitry Rokosov <ddrokosov@sberdevices.ru>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/a1-pll.c b/drivers/clk/meson/a1-pll.c
index a16e537d139a..8e5a42d1afbb 100644
--- a/drivers/clk/meson/a1-pll.c
+++ b/drivers/clk/meson/a1-pll.c
@@ -295,7 +295,7 @@ static struct clk_regmap *const a1_pll_regmaps[] = {
&hifi_pll,
};
-static struct regmap_config a1_pll_regmap_cfg = {
+static const struct regmap_config a1_pll_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -360,3 +360,4 @@ MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver");
MODULE_AUTHOR("Jian Hu <jian.hu@amlogic.com>");
MODULE_AUTHOR("Dmitry Rokosov <ddrokosov@sberdevices.ru>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index fa1dcb7f91e4..1dabc81535a6 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -342,3 +342,4 @@ module_platform_driver(axg_aoclkc_driver);
MODULE_DESCRIPTION("Amlogic AXG Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index e03a5bf899c0..beda86349389 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -753,6 +753,9 @@ static struct clk_regmap toddr_d =
AUD_PCLK_GATE(toddr_d, AUDIO_CLK_GATE_EN1, 1);
static struct clk_regmap loopback_b =
AUD_PCLK_GATE(loopback_b, AUDIO_CLK_GATE_EN1, 2);
+static struct clk_regmap earcrx =
+ AUD_PCLK_GATE(earcrx, AUDIO_CLK_GATE_EN1, 6);
+
static struct clk_regmap sm1_mst_a_mclk_sel =
AUD_MST_MCLK_MUX(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL);
@@ -766,6 +769,10 @@ static struct clk_regmap sm1_mst_e_mclk_sel =
AUD_MST_MCLK_MUX(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL);
static struct clk_regmap sm1_mst_f_mclk_sel =
AUD_MST_MCLK_MUX(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL);
+static struct clk_regmap sm1_earcrx_cmdc_clk_sel =
+ AUD_MST_MCLK_MUX(earcrx_cmdc_clk, AUDIO_EARCRX_CMDC_CLK_CTRL);
+static struct clk_regmap sm1_earcrx_dmac_clk_sel =
+ AUD_MST_MCLK_MUX(earcrx_dmac_clk, AUDIO_EARCRX_DMAC_CLK_CTRL);
static struct clk_regmap sm1_mst_a_mclk_div =
AUD_MST_MCLK_DIV(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL);
@@ -779,6 +786,11 @@ static struct clk_regmap sm1_mst_e_mclk_div =
AUD_MST_MCLK_DIV(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL);
static struct clk_regmap sm1_mst_f_mclk_div =
AUD_MST_MCLK_DIV(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL);
+static struct clk_regmap sm1_earcrx_cmdc_clk_div =
+ AUD_MST_MCLK_DIV(earcrx_cmdc_clk, AUDIO_EARCRX_CMDC_CLK_CTRL);
+static struct clk_regmap sm1_earcrx_dmac_clk_div =
+ AUD_MST_MCLK_DIV(earcrx_dmac_clk, AUDIO_EARCRX_DMAC_CLK_CTRL);
+
static struct clk_regmap sm1_mst_a_mclk =
AUD_MST_MCLK_GATE(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL);
@@ -792,6 +804,10 @@ static struct clk_regmap sm1_mst_e_mclk =
AUD_MST_MCLK_GATE(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL);
static struct clk_regmap sm1_mst_f_mclk =
AUD_MST_MCLK_GATE(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL);
+static struct clk_regmap sm1_earcrx_cmdc_clk =
+ AUD_MST_MCLK_GATE(earcrx_cmdc_clk, AUDIO_EARCRX_CMDC_CLK_CTRL);
+static struct clk_regmap sm1_earcrx_dmac_clk =
+ AUD_MST_MCLK_GATE(earcrx_dmac_clk, AUDIO_EARCRX_DMAC_CLK_CTRL);
static struct clk_regmap sm1_tdm_mclk_pad_0 = AUD_TDM_PAD_CTRL(
tdm_mclk_pad_0, AUDIO_SM1_MST_PAD_CTRL0, 0, mclk_pad_ctrl_parent_data);
@@ -1232,6 +1248,13 @@ static struct clk_hw *sm1_audio_hw_clks[] = {
[AUD_CLKID_SYSCLK_A_EN] = &sm1_sysclk_a_en.hw,
[AUD_CLKID_SYSCLK_B_DIV] = &sm1_sysclk_b_div.hw,
[AUD_CLKID_SYSCLK_B_EN] = &sm1_sysclk_b_en.hw,
+ [AUD_CLKID_EARCRX] = &earcrx.hw,
+ [AUD_CLKID_EARCRX_CMDC_SEL] = &sm1_earcrx_cmdc_clk_sel.hw,
+ [AUD_CLKID_EARCRX_CMDC_DIV] = &sm1_earcrx_cmdc_clk_div.hw,
+ [AUD_CLKID_EARCRX_CMDC] = &sm1_earcrx_cmdc_clk.hw,
+ [AUD_CLKID_EARCRX_DMAC_SEL] = &sm1_earcrx_dmac_clk_sel.hw,
+ [AUD_CLKID_EARCRX_DMAC_DIV] = &sm1_earcrx_dmac_clk_div.hw,
+ [AUD_CLKID_EARCRX_DMAC] = &sm1_earcrx_dmac_clk.hw,
};
@@ -1646,6 +1669,13 @@ static struct clk_regmap *const sm1_clk_regmaps[] = {
&sm1_sysclk_a_en,
&sm1_sysclk_b_div,
&sm1_sysclk_b_en,
+ &earcrx,
+ &sm1_earcrx_cmdc_clk_sel,
+ &sm1_earcrx_cmdc_clk_div,
+ &sm1_earcrx_cmdc_clk,
+ &sm1_earcrx_dmac_clk_sel,
+ &sm1_earcrx_dmac_clk_div,
+ &sm1_earcrx_dmac_clk,
};
struct axg_audio_reset_data {
@@ -1726,11 +1756,10 @@ static const struct reset_control_ops axg_audio_rstc_ops = {
.status = axg_audio_reset_status,
};
-static const struct regmap_config axg_audio_regmap_cfg = {
+static struct regmap_config axg_audio_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .max_register = AUDIO_CLK_SPDIFOUT_B_CTRL,
};
struct audioclk_data {
@@ -1739,6 +1768,7 @@ struct audioclk_data {
struct meson_clk_hw_data hw_clks;
unsigned int reset_offset;
unsigned int reset_num;
+ unsigned int max_register;
};
static int axg_audio_clkc_probe(struct platform_device *pdev)
@@ -1760,6 +1790,7 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
+ axg_audio_regmap_cfg.max_register = data->max_register;
map = devm_regmap_init_mmio(dev, regs, &axg_audio_regmap_cfg);
if (IS_ERR(map)) {
dev_err(dev, "failed to init regmap: %ld\n", PTR_ERR(map));
@@ -1828,6 +1859,7 @@ static const struct audioclk_data axg_audioclk_data = {
.hws = axg_audio_hw_clks,
.num = ARRAY_SIZE(axg_audio_hw_clks),
},
+ .max_register = AUDIO_CLK_PDMIN_CTRL1,
};
static const struct audioclk_data g12a_audioclk_data = {
@@ -1839,6 +1871,7 @@ static const struct audioclk_data g12a_audioclk_data = {
},
.reset_offset = AUDIO_SW_RESET,
.reset_num = 26,
+ .max_register = AUDIO_CLK_SPDIFOUT_B_CTRL,
};
static const struct audioclk_data sm1_audioclk_data = {
@@ -1850,6 +1883,7 @@ static const struct audioclk_data sm1_audioclk_data = {
},
.reset_offset = AUDIO_SM1_SW_RESET0,
.reset_num = 39,
+ .max_register = AUDIO_EARCRX_DMAC_CLK_CTRL,
};
static const struct of_device_id clkc_match_table[] = {
@@ -1878,3 +1912,4 @@ module_platform_driver(axg_audio_driver);
MODULE_DESCRIPTION("Amlogic AXG/G12A/SM1 Audio Clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h
index 01a3da19933e..9e7765b630c9 100644
--- a/drivers/clk/meson/axg-audio.h
+++ b/drivers/clk/meson/axg-audio.h
@@ -64,5 +64,7 @@
#define AUDIO_SM1_SW_RESET1 0x02C
#define AUDIO_CLK81_CTRL 0x030
#define AUDIO_CLK81_EN 0x034
+#define AUDIO_EARCRX_CMDC_CLK_CTRL 0x0D0
+#define AUDIO_EARCRX_DMAC_CLK_CTRL 0x0D4
#endif /*__AXG_AUDIO_CLKC_H */
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 065b5f198297..757c7a28c53d 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -2187,3 +2187,4 @@ module_platform_driver(axg_driver);
MODULE_DESCRIPTION("Amlogic AXG Main Clock Controller driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/c3-peripherals.c b/drivers/clk/meson/c3-peripherals.c
index 56b33d23c317..7dcbf4ebee07 100644
--- a/drivers/clk/meson/c3-peripherals.c
+++ b/drivers/clk/meson/c3-peripherals.c
@@ -2296,7 +2296,7 @@ static struct clk_regmap *const c3_periphs_clk_regmaps[] = {
&vapb,
};
-static struct regmap_config clkc_regmap_config = {
+static const struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -2364,3 +2364,4 @@ module_platform_driver(c3_peripherals_driver);
MODULE_DESCRIPTION("Amlogic C3 Peripherals Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/c3-pll.c b/drivers/clk/meson/c3-pll.c
index 6d5271c61d14..32bd2ed9d304 100644
--- a/drivers/clk/meson/c3-pll.c
+++ b/drivers/clk/meson/c3-pll.c
@@ -678,7 +678,7 @@ static struct clk_regmap *const c3_pll_clk_regmaps[] = {
&mclk1,
};
-static struct regmap_config clkc_regmap_config = {
+static const struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -745,3 +745,4 @@ module_platform_driver(c3_pll_driver);
MODULE_DESCRIPTION("Amlogic C3 PLL Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/clk-cpu-dyndiv.c b/drivers/clk/meson/clk-cpu-dyndiv.c
index aa824b030cb8..6c1f58826e24 100644
--- a/drivers/clk/meson/clk-cpu-dyndiv.c
+++ b/drivers/clk/meson/clk-cpu-dyndiv.c
@@ -65,8 +65,9 @@ const struct clk_ops meson_clk_cpu_dyndiv_ops = {
.determine_rate = meson_clk_cpu_dyndiv_determine_rate,
.set_rate = meson_clk_cpu_dyndiv_set_rate,
};
-EXPORT_SYMBOL_GPL(meson_clk_cpu_dyndiv_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_cpu_dyndiv_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic CPU Dynamic Clock divider");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c
index d46c02b51be5..913bf25d3771 100644
--- a/drivers/clk/meson/clk-dualdiv.c
+++ b/drivers/clk/meson/clk-dualdiv.c
@@ -130,14 +130,15 @@ const struct clk_ops meson_clk_dualdiv_ops = {
.determine_rate = meson_clk_dualdiv_determine_rate,
.set_rate = meson_clk_dualdiv_set_rate,
};
-EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ops, CLK_MESON);
const struct clk_ops meson_clk_dualdiv_ro_ops = {
.recalc_rate = meson_clk_dualdiv_recalc_rate,
};
-EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ro_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ro_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic dual divider driver");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index eae9b7dc5a6c..f639d56f0fd3 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -165,7 +165,7 @@ const struct clk_ops meson_clk_mpll_ro_ops = {
.recalc_rate = mpll_recalc_rate,
.determine_rate = mpll_determine_rate,
};
-EXPORT_SYMBOL_GPL(meson_clk_mpll_ro_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ro_ops, CLK_MESON);
const struct clk_ops meson_clk_mpll_ops = {
.recalc_rate = mpll_recalc_rate,
@@ -173,8 +173,9 @@ const struct clk_ops meson_clk_mpll_ops = {
.set_rate = mpll_set_rate,
.init = mpll_init,
};
-EXPORT_SYMBOL_GPL(meson_clk_mpll_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic MPLL driver");
MODULE_AUTHOR("Michael Turquette <mturquette@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index ff3f0b1a3ed1..c1526fbfb6c4 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -61,7 +61,7 @@ const struct clk_ops meson_clk_phase_ops = {
.get_phase = meson_clk_phase_get_phase,
.set_phase = meson_clk_phase_set_phase,
};
-EXPORT_SYMBOL_GPL(meson_clk_phase_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_phase_ops, CLK_MESON);
/*
* This is a special clock for the audio controller.
@@ -123,7 +123,7 @@ const struct clk_ops meson_clk_triphase_ops = {
.get_phase = meson_clk_triphase_get_phase,
.set_phase = meson_clk_triphase_set_phase,
};
-EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_triphase_ops, CLK_MESON);
/*
* This is a special clock for the audio controller.
@@ -178,9 +178,9 @@ const struct clk_ops meson_sclk_ws_inv_ops = {
.get_phase = meson_sclk_ws_inv_get_phase,
.set_phase = meson_sclk_ws_inv_set_phase,
};
-EXPORT_SYMBOL_GPL(meson_sclk_ws_inv_ops);
-
+EXPORT_SYMBOL_NS_GPL(meson_sclk_ws_inv_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic phase driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index 467dc8b61a37..bc570a2ff3a3 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -472,7 +472,7 @@ const struct clk_ops meson_clk_pcie_pll_ops = {
.enable = meson_clk_pcie_pll_enable,
.disable = meson_clk_pll_disable
};
-EXPORT_SYMBOL_GPL(meson_clk_pcie_pll_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_pcie_pll_ops, CLK_MESON);
const struct clk_ops meson_clk_pll_ops = {
.init = meson_clk_pll_init,
@@ -483,15 +483,16 @@ const struct clk_ops meson_clk_pll_ops = {
.enable = meson_clk_pll_enable,
.disable = meson_clk_pll_disable
};
-EXPORT_SYMBOL_GPL(meson_clk_pll_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ops, CLK_MESON);
const struct clk_ops meson_clk_pll_ro_ops = {
.recalc_rate = meson_clk_pll_recalc_rate,
.is_enabled = meson_clk_pll_is_enabled,
};
-EXPORT_SYMBOL_GPL(meson_clk_pll_ro_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ro_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic PLL driver");
MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index ad116d24f700..07f7e441b916 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -49,12 +49,12 @@ const struct clk_ops clk_regmap_gate_ops = {
.disable = clk_regmap_gate_disable,
.is_enabled = clk_regmap_gate_is_enabled,
};
-EXPORT_SYMBOL_GPL(clk_regmap_gate_ops);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ops, CLK_MESON);
const struct clk_ops clk_regmap_gate_ro_ops = {
.is_enabled = clk_regmap_gate_is_enabled,
};
-EXPORT_SYMBOL_GPL(clk_regmap_gate_ro_ops);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ro_ops, CLK_MESON);
static unsigned long clk_regmap_div_recalc_rate(struct clk_hw *hw,
unsigned long prate)
@@ -125,13 +125,13 @@ const struct clk_ops clk_regmap_divider_ops = {
.determine_rate = clk_regmap_div_determine_rate,
.set_rate = clk_regmap_div_set_rate,
};
-EXPORT_SYMBOL_GPL(clk_regmap_divider_ops);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ops, CLK_MESON);
const struct clk_ops clk_regmap_divider_ro_ops = {
.recalc_rate = clk_regmap_div_recalc_rate,
.determine_rate = clk_regmap_div_determine_rate,
};
-EXPORT_SYMBOL_GPL(clk_regmap_divider_ro_ops);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ro_ops, CLK_MESON);
static u8 clk_regmap_mux_get_parent(struct clk_hw *hw)
{
@@ -174,13 +174,14 @@ const struct clk_ops clk_regmap_mux_ops = {
.set_parent = clk_regmap_mux_set_parent,
.determine_rate = clk_regmap_mux_determine_rate,
};
-EXPORT_SYMBOL_GPL(clk_regmap_mux_ops);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ops, CLK_MESON);
const struct clk_ops clk_regmap_mux_ro_ops = {
.get_parent = clk_regmap_mux_get_parent,
};
-EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ro_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic regmap backed clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
index a5f4d15d8396..f0a18d8c9fc2 100644
--- a/drivers/clk/meson/g12a-aoclk.c
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -477,3 +477,4 @@ module_platform_driver(g12a_aoclkc_driver);
MODULE_DESCRIPTION("Amlogic G12A Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index 4647e84d2502..02dda57105b1 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -5616,3 +5616,4 @@ module_platform_driver(g12a_driver);
MODULE_DESCRIPTION("Amlogic G12/SM1 Main Clock Controller driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 33fafbdf65c4..83b034157b35 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -303,3 +303,4 @@ module_platform_driver(gxbb_aoclkc_driver);
MODULE_DESCRIPTION("Amlogic GXBB Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index d3175e4335bb..f071faad1ebb 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -3571,3 +3571,4 @@ module_platform_driver(gxbb_driver);
MODULE_DESCRIPTION("Amlogic GXBB Main Clock Controller driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index 2dd064201fae..053940ee8940 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -88,7 +88,8 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
}
-EXPORT_SYMBOL_GPL(meson_aoclkc_probe);
+EXPORT_SYMBOL_NS_GPL(meson_aoclkc_probe, CLK_MESON);
MODULE_DESCRIPTION("Amlogic Always-ON Clock Controller helpers");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/meson-clkc-utils.c b/drivers/clk/meson/meson-clkc-utils.c
index 4dd5948b7ae4..a8cd2c21fab7 100644
--- a/drivers/clk/meson/meson-clkc-utils.c
+++ b/drivers/clk/meson/meson-clkc-utils.c
@@ -20,7 +20,8 @@ struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_da
return data->hws[idx];
}
-EXPORT_SYMBOL_GPL(meson_clk_hw_get);
+EXPORT_SYMBOL_NS_GPL(meson_clk_hw_get, CLK_MESON);
MODULE_DESCRIPTION("Amlogic Clock Controller Utilities");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
index 570992eece86..66f79e384fe5 100644
--- a/drivers/clk/meson/meson-eeclk.c
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -57,7 +57,8 @@ int meson_eeclkc_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
}
-EXPORT_SYMBOL_GPL(meson_eeclkc_probe);
+EXPORT_SYMBOL_NS_GPL(meson_eeclkc_probe, CLK_MESON);
MODULE_DESCRIPTION("Amlogic Main Clock Controller Helpers");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/s4-peripherals.c b/drivers/clk/meson/s4-peripherals.c
index 130c50554290..c930cf0614a0 100644
--- a/drivers/clk/meson/s4-peripherals.c
+++ b/drivers/clk/meson/s4-peripherals.c
@@ -3747,7 +3747,7 @@ static struct clk_regmap *const s4_periphs_clk_regmaps[] = {
&s4_adc_extclk_in_gate,
};
-static struct regmap_config clkc_regmap_config = {
+static const struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -3814,3 +3814,4 @@ module_platform_driver(s4_driver);
MODULE_DESCRIPTION("Amlogic S4 Peripherals Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/s4-pll.c b/drivers/clk/meson/s4-pll.c
index c2afade24f9f..b0258933fb9d 100644
--- a/drivers/clk/meson/s4-pll.c
+++ b/drivers/clk/meson/s4-pll.c
@@ -799,7 +799,7 @@ static const struct reg_sequence s4_init_regs[] = {
{ .reg = ANACTRL_MPLL_CTRL0, .def = 0x00000543 },
};
-static struct regmap_config clkc_regmap_config = {
+static const struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -873,3 +873,4 @@ module_platform_driver(s4_driver);
MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index 987f5b06587c..ae03b048182f 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -247,8 +247,9 @@ const struct clk_ops meson_sclk_div_ops = {
.set_duty_cycle = sclk_div_set_duty_cycle,
.init = sclk_div_init,
};
-EXPORT_SYMBOL_GPL(meson_sclk_div_ops);
+EXPORT_SYMBOL_NS_GPL(meson_sclk_div_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic Sample divider driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/vclk.c b/drivers/clk/meson/vclk.c
index e886df55d6e3..36f637d2d01b 100644
--- a/drivers/clk/meson/vclk.c
+++ b/drivers/clk/meson/vclk.c
@@ -49,7 +49,7 @@ const struct clk_ops meson_vclk_gate_ops = {
.disable = meson_vclk_gate_disable,
.is_enabled = meson_vclk_gate_is_enabled,
};
-EXPORT_SYMBOL_GPL(meson_vclk_gate_ops);
+EXPORT_SYMBOL_NS_GPL(meson_vclk_gate_ops, CLK_MESON);
/* The VCLK Divider has supplementary reset & enable bits */
@@ -134,8 +134,9 @@ const struct clk_ops meson_vclk_div_ops = {
.disable = meson_vclk_div_disable,
.is_enabled = meson_vclk_div_is_enabled,
};
-EXPORT_SYMBOL_GPL(meson_vclk_div_ops);
+EXPORT_SYMBOL_NS_GPL(meson_vclk_div_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic vclk clock driver");
MODULE_AUTHOR("Neil Armstrong <neil.armstrong@linaro.org>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
index ee129f86794d..486cf68fc97a 100644
--- a/drivers/clk/meson/vid-pll-div.c
+++ b/drivers/clk/meson/vid-pll-div.c
@@ -92,8 +92,9 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
const struct clk_ops meson_vid_pll_div_ro_ops = {
.recalc_rate = meson_vid_pll_div_recalc_rate,
};
-EXPORT_SYMBOL_GPL(meson_vid_pll_div_ro_ops);
+EXPORT_SYMBOL_NS_GPL(meson_vid_pll_div_ro_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic video pll divider driver");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c
index ae521aaf8cdc..88d798d510cd 100644
--- a/drivers/clk/mmp/clk-audio.c
+++ b/drivers/clk/mmp/clk-audio.c
@@ -436,7 +436,7 @@ static struct platform_driver mmp2_audio_clk_driver = {
.pm = &mmp2_audio_clk_pm_ops,
},
.probe = mmp2_audio_clk_probe,
- .remove_new = mmp2_audio_clk_remove,
+ .remove = mmp2_audio_clk_remove,
};
module_platform_driver(mmp2_audio_clk_driver);
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index 454d131f475e..07ac9e6937e5 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -447,7 +447,6 @@ struct clk *mmp_clk_register_mix(struct device *dev,
struct mmp_clk_mix *mix;
struct clk *clk;
struct clk_init_data init;
- size_t table_bytes;
mix = kzalloc(sizeof(*mix), GFP_KERNEL);
if (!mix)
@@ -461,8 +460,8 @@ struct clk *mmp_clk_register_mix(struct device *dev,
memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info));
if (config->table) {
- table_bytes = sizeof(*config->table) * config->table_size;
- mix->table = kmemdup(config->table, table_bytes, GFP_KERNEL);
+ mix->table = kmemdup_array(config->table, config->table_size,
+ sizeof(*mix->table), GFP_KERNEL);
if (!mix->table)
goto free_mix;
@@ -470,9 +469,8 @@ struct clk *mmp_clk_register_mix(struct device *dev,
}
if (config->mux_table) {
- table_bytes = sizeof(u32) * num_parents;
- mix->mux_table = kmemdup(config->mux_table, table_bytes,
- GFP_KERNEL);
+ mix->mux_table = kmemdup_array(config->mux_table, num_parents,
+ sizeof(*mix->mux_table), GFP_KERNEL);
if (!mix->mux_table) {
kfree(mix->table);
goto free_mix;
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 8701a58a5804..13906e31bef8 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -792,7 +792,7 @@ static void armada_3700_periph_clock_remove(struct platform_device *pdev)
static struct platform_driver armada_3700_periph_clock_driver = {
.probe = armada_3700_periph_clock_probe,
- .remove_new = armada_3700_periph_clock_remove,
+ .remove = armada_3700_periph_clock_remove,
.driver = {
.name = "marvell-armada-3700-periph-clock",
.of_match_table = armada_3700_periph_clock_of_match,
diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c
index e94c336e0f1c..1a16f9c0b1d8 100644
--- a/drivers/clk/mvebu/armada-37xx-tbg.c
+++ b/drivers/clk/mvebu/armada-37xx-tbg.c
@@ -141,7 +141,7 @@ static const struct of_device_id armada_3700_tbg_clock_of_match[] = {
static struct platform_driver armada_3700_tbg_clock_driver = {
.probe = armada_3700_tbg_clock_probe,
- .remove_new = armada_3700_tbg_clock_remove,
+ .remove = armada_3700_tbg_clock_remove,
.driver = {
.name = "marvell-armada-3700-tbg-clock",
.of_match_table = armada_3700_tbg_clock_of_match,
diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c
index 0e2e7d00ae11..ca88e5e78b06 100644
--- a/drivers/clk/mvebu/armada-37xx-xtal.c
+++ b/drivers/clk/mvebu/armada-37xx-xtal.c
@@ -77,7 +77,7 @@ static const struct of_device_id armada_3700_xtal_clock_of_match[] = {
static struct platform_driver armada_3700_xtal_clock_driver = {
.probe = armada_3700_xtal_clock_probe,
- .remove_new = armada_3700_xtal_clock_remove,
+ .remove = armada_3700_xtal_clock_remove,
.driver = {
.name = "marvell-armada-3700-xtal-clock",
.of_match_table = armada_3700_xtal_clock_of_match,
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 11ae28430dad..a3e2a09e2105 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -810,6 +810,14 @@ config SDX_GCC_75
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/eMMC, PCIe etc.
+config SM_CAMCC_4450
+ tristate "SM4450 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_4450
+ help
+ Support for the camera clock controller on SM4450 devices.
+ Say Y if you want to support camera devices and camera functionality.
+
config SM_CAMCC_6350
tristate "SM6350 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -826,6 +834,16 @@ config SM_CAMCC_7150
Support for the camera clock controller on SM7150 devices.
Say Y if you want to support camera devices and camera functionality.
+config SM_CAMCC_8150
+ tristate "SM8150 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_8150
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ SM8150 devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
config SM_CAMCC_8250
tristate "SM8250 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -858,6 +876,16 @@ config SM_CAMCC_8650
Support for the camera clock controller on SM8650 devices.
Say Y if you want to support camera devices and camera functionality.
+config SM_DISPCC_4450
+ tristate "SM4450 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on SM_GCC_4450
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SM4450 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen
+
config SM_DISPCC_6115
tristate "SM6115 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -931,20 +959,10 @@ config SM_DISPCC_8450
config SM_DISPCC_8550
tristate "SM8550 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
- depends on SM_GCC_8550
+ depends on SM_GCC_8550 || SM_GCC_8650
help
Support for the display clock controller on Qualcomm Technologies, Inc
- SM8550 devices.
- Say Y if you want to support display devices and functionality such as
- splash screen.
-
-config SM_DISPCC_8650
- tristate "SM8650 Display Clock Controller"
- depends on ARM64 || COMPILE_TEST
- select SM_GCC_8650
- help
- Support for the display clock controller on Qualcomm Technologies, Inc
- SM8650 devices.
+ SM8550 or SM8650 devices.
Say Y if you want to support display devices and functionality such as
splash screen.
@@ -1054,6 +1072,15 @@ config SM_GCC_8650
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GPUCC_4450
+ tristate "SM4450 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_4450
+ help
+ Support for the graphics clock controller on SM4450 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SM_GPUCC_6115
tristate "SM6115 Graphics Clock Controller"
select SM_GCC_6115
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 0de5fce6113a..2b378667a63f 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -107,12 +107,15 @@ obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o
obj-$(CONFIG_SDX_GCC_75) += gcc-sdx75.o
+obj-$(CONFIG_SM_CAMCC_4450) += camcc-sm4450.o
obj-$(CONFIG_SM_CAMCC_6350) += camcc-sm6350.o
obj-$(CONFIG_SM_CAMCC_7150) += camcc-sm7150.o
+obj-$(CONFIG_SM_CAMCC_8150) += camcc-sm8150.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o
obj-$(CONFIG_SM_CAMCC_8550) += camcc-sm8550.o
obj-$(CONFIG_SM_CAMCC_8650) += camcc-sm8650.o
+obj-$(CONFIG_SM_DISPCC_4450) += dispcc-sm4450.o
obj-$(CONFIG_SM_DISPCC_6115) += dispcc-sm6115.o
obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o
obj-$(CONFIG_SM_DISPCC_6350) += dispcc-sm6350.o
@@ -121,7 +124,6 @@ obj-$(CONFIG_SM_DISPCC_7150) += dispcc-sm7150.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_DISPCC_8450) += dispcc-sm8450.o
obj-$(CONFIG_SM_DISPCC_8550) += dispcc-sm8550.o
-obj-$(CONFIG_SM_DISPCC_8650) += dispcc-sm8650.o
obj-$(CONFIG_SM_GCC_4450) += gcc-sm4450.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
@@ -134,6 +136,7 @@ obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GCC_8550) += gcc-sm8550.o
obj-$(CONFIG_SM_GCC_8650) += gcc-sm8650.o
+obj-$(CONFIG_SM_GPUCC_4450) += gpucc-sm4450.o
obj-$(CONFIG_SM_GPUCC_6115) += gpucc-sm6115.o
obj-$(CONFIG_SM_GPUCC_6125) += gpucc-sm6125.o
obj-$(CONFIG_SM_GPUCC_6350) += gpucc-sm6350.o
diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
index f9c5e296dba2..f43d455ab4b8 100644
--- a/drivers/clk/qcom/a53-pll.c
+++ b/drivers/clk/qcom/a53-pll.c
@@ -151,6 +151,7 @@ static int qcom_a53pll_probe(struct platform_device *pdev)
}
static const struct of_device_id qcom_a53pll_match_table[] = {
+ { .compatible = "qcom,msm8226-a7pll" },
{ .compatible = "qcom,msm8916-a53pll" },
{ .compatible = "qcom,msm8939-a53pll" },
{ }
diff --git a/drivers/clk/qcom/apcs-msm8916.c b/drivers/clk/qcom/apcs-msm8916.c
index ce57b333ec99..ef31386831eb 100644
--- a/drivers/clk/qcom/apcs-msm8916.c
+++ b/drivers/clk/qcom/apcs-msm8916.c
@@ -128,7 +128,7 @@ static void qcom_apcs_msm8916_clk_remove(struct platform_device *pdev)
static struct platform_driver qcom_apcs_msm8916_clk_driver = {
.probe = qcom_apcs_msm8916_clk_probe,
- .remove_new = qcom_apcs_msm8916_clk_remove,
+ .remove = qcom_apcs_msm8916_clk_remove,
.driver = {
.name = "qcom-apcs-msm8916-clk",
},
diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c
index d644e6e1f8b7..76ece6c4a969 100644
--- a/drivers/clk/qcom/apcs-sdx55.c
+++ b/drivers/clk/qcom/apcs-sdx55.c
@@ -131,7 +131,7 @@ static void qcom_apcs_sdx55_clk_remove(struct platform_device *pdev)
static struct platform_driver qcom_apcs_sdx55_clk_driver = {
.probe = qcom_apcs_sdx55_clk_probe,
- .remove_new = qcom_apcs_sdx55_clk_remove,
+ .remove = qcom_apcs_sdx55_clk_remove,
.driver = {
.name = "qcom-sdx55-acps-clk",
},
diff --git a/drivers/clk/qcom/camcc-sm4450.c b/drivers/clk/qcom/camcc-sm4450.c
new file mode 100644
index 000000000000..f8503ced3d05
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sm4450.c
@@ -0,0 +1,1688 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm4450-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL1_OUT_MAIN,
+ P_CAM_CC_PLL2_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+static const struct pll_vco rivian_evo_vco[] = {
+ { 864000000, 1056000000, 0 },
+};
+
+/* 1200.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x2, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 14,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+/* 960.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x90008820,
+ .config_ctl_hi_val = 0x00890263,
+ .config_ctl_hi1_val = 0x00000247,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00400000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = rivian_evo_vco,
+ .num_vco = ARRAY_SIZE(rivian_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_rivian_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_even = {
+ .offset = 0x2000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll2_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_rivian_evo_ops,
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2_out_even.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL4_OUT_MAIN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+ { .hw = &cam_cc_pll4.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL1_OUT_MAIN, 2 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_MAIN, 2 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 3 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(410000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(460000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0xa004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0x13014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0x10004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0x11004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0xc054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cre_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x9004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x9028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x904c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_MAIN, 6, 0, 0),
+ F(240000000, P_CAM_CC_PLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0xa02c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xf014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_CAM_CC_PLL2_OUT_MAIN, 1, 1, 50),
+ F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4),
+ F(64000000, P_CAM_CC_PLL2_OUT_MAIN, 15, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x8004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x8024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x8044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x8064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ope_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(410000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(460000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ope_0_clk_src = {
+ .cmd_rcgr = 0xb004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_ope_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0xa048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_tfe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(350000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(432000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(548000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(630000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_tfe_0_clk_src = {
+ .cmd_rcgr = 0xc004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_tfe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_0_csid_clk_src = {
+ .cmd_rcgr = 0xc02c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_1_clk_src = {
+ .cmd_rcgr = 0xd004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_tfe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_1_csid_clk_src = {
+ .cmd_rcgr = 0xd024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0xa060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0xa044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0xa01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_atb_clk = {
+ .halt_reg = 0x13034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0x1302c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_hf_clk = {
+ .halt_reg = 0x1300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_hf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_sf_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_sf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0x1001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0x1101c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0x1401c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0x12004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cre_ahb_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cre_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cre_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x901c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x901c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x9040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x9064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x9020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x9044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x9068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_atb_clk = {
+ .halt_reg = 0xf004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xf02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_cti_clk = {
+ .halt_reg = 0xf008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_cti_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ts_clk = {
+ .halt_reg = 0xf00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ts_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_ahb_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_areg_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ope_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+ .halt_reg = 0x14018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_soc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+ .halt_reg = 0xf034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sys_tmr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_ahb_clk = {
+ .halt_reg = 0xc070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_clk = {
+ .halt_reg = 0xc01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_cphy_rx_clk = {
+ .halt_reg = 0xc06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_csid_clk = {
+ .halt_reg = 0xc044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_ahb_clk = {
+ .halt_reg = 0xd048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_clk = {
+ .halt_reg = 0xd01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_cphy_rx_clk = {
+ .halt_reg = 0xd044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_csid_clk = {
+ .halt_reg = 0xd03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc cam_cc_camss_top_gdsc = {
+ .gdscr = 0x14004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_camss_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_sm4450_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_ATB_CLK] = &cam_cc_camnoc_atb_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_HF_CLK] = &cam_cc_camnoc_axi_hf_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_SF_CLK] = &cam_cc_camnoc_axi_sf_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CRE_AHB_CLK] = &cam_cc_cre_ahb_clk.clkr,
+ [CAM_CC_CRE_CLK] = &cam_cc_cre_clk.clkr,
+ [CAM_CC_CRE_CLK_SRC] = &cam_cc_cre_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_ATB_CLK] = &cam_cc_icp_atb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_ICP_CTI_CLK] = &cam_cc_icp_cti_clk.clkr,
+ [CAM_CC_ICP_TS_CLK] = &cam_cc_icp_ts_clk.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_OPE_0_AHB_CLK] = &cam_cc_ope_0_ahb_clk.clkr,
+ [CAM_CC_OPE_0_AREG_CLK] = &cam_cc_ope_0_areg_clk.clkr,
+ [CAM_CC_OPE_0_CLK] = &cam_cc_ope_0_clk.clkr,
+ [CAM_CC_OPE_0_CLK_SRC] = &cam_cc_ope_0_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_EVEN] = &cam_cc_pll2_out_even.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+ [CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+ [CAM_CC_TFE_0_AHB_CLK] = &cam_cc_tfe_0_ahb_clk.clkr,
+ [CAM_CC_TFE_0_CLK] = &cam_cc_tfe_0_clk.clkr,
+ [CAM_CC_TFE_0_CLK_SRC] = &cam_cc_tfe_0_clk_src.clkr,
+ [CAM_CC_TFE_0_CPHY_RX_CLK] = &cam_cc_tfe_0_cphy_rx_clk.clkr,
+ [CAM_CC_TFE_0_CSID_CLK] = &cam_cc_tfe_0_csid_clk.clkr,
+ [CAM_CC_TFE_0_CSID_CLK_SRC] = &cam_cc_tfe_0_csid_clk_src.clkr,
+ [CAM_CC_TFE_1_AHB_CLK] = &cam_cc_tfe_1_ahb_clk.clkr,
+ [CAM_CC_TFE_1_CLK] = &cam_cc_tfe_1_clk.clkr,
+ [CAM_CC_TFE_1_CLK_SRC] = &cam_cc_tfe_1_clk_src.clkr,
+ [CAM_CC_TFE_1_CPHY_RX_CLK] = &cam_cc_tfe_1_cphy_rx_clk.clkr,
+ [CAM_CC_TFE_1_CSID_CLK] = &cam_cc_tfe_1_csid_clk.clkr,
+ [CAM_CC_TFE_1_CSID_CLK_SRC] = &cam_cc_tfe_1_csid_clk_src.clkr,
+};
+
+static struct gdsc *cam_cc_sm4450_gdscs[] = {
+ [CAM_CC_CAMSS_TOP_GDSC] = &cam_cc_camss_top_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_sm4450_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0xa000 },
+ [CAM_CC_CAMNOC_BCR] = { 0x13000 },
+ [CAM_CC_CAMSS_TOP_BCR] = { 0x14000 },
+ [CAM_CC_CCI_0_BCR] = { 0x10000 },
+ [CAM_CC_CCI_1_BCR] = { 0x11000 },
+ [CAM_CC_CPAS_BCR] = { 0x12000 },
+ [CAM_CC_CRE_BCR] = { 0x16000 },
+ [CAM_CC_CSI0PHY_BCR] = { 0x9000 },
+ [CAM_CC_CSI1PHY_BCR] = { 0x9024 },
+ [CAM_CC_CSI2PHY_BCR] = { 0x9048 },
+ [CAM_CC_ICP_BCR] = { 0xf000 },
+ [CAM_CC_MCLK0_BCR] = { 0x8000 },
+ [CAM_CC_MCLK1_BCR] = { 0x8020 },
+ [CAM_CC_MCLK2_BCR] = { 0x8040 },
+ [CAM_CC_MCLK3_BCR] = { 0x8060 },
+ [CAM_CC_OPE_0_BCR] = { 0xb000 },
+ [CAM_CC_TFE_0_BCR] = { 0xc000 },
+ [CAM_CC_TFE_1_BCR] = { 0xd000 },
+};
+
+static const struct regmap_config cam_cc_sm4450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x16024,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc cam_cc_sm4450_desc = {
+ .config = &cam_cc_sm4450_regmap_config,
+ .clks = cam_cc_sm4450_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sm4450_clocks),
+ .resets = cam_cc_sm4450_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_sm4450_resets),
+ .gdscs = cam_cc_sm4450_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sm4450_gdscs),
+};
+
+static const struct of_device_id cam_cc_sm4450_match_table[] = {
+ { .compatible = "qcom,sm4450-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sm4450_match_table);
+
+static int cam_cc_sm4450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sm4450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+
+ return qcom_cc_really_probe(&pdev->dev, &cam_cc_sm4450_desc, regmap);
+}
+
+static struct platform_driver cam_cc_sm4450_driver = {
+ .probe = cam_cc_sm4450_probe,
+ .driver = {
+ .name = "camcc-sm4450",
+ .of_match_table = cam_cc_sm4450_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_sm4450_driver);
+
+MODULE_DESCRIPTION("QTI CAMCC SM4450 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-sm8150.c b/drivers/clk/qcom/camcc-sm8150.c
new file mode 100644
index 000000000000..bb3009818ad7
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sm8150.c
@@ -0,0 +1,2159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include <dt-bindings/clock/qcom,sm8150-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_IFACE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_EARLY,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+};
+
+static const struct pll_vco regera_vco[] = {
+ { 600000000, 3300000000, 0 },
+};
+
+static const struct pll_vco trion_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00003100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000D0,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000D0,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x10000807,
+ .config_ctl_hi_val = 0x00000011,
+ .config_ctl_hi1_val = 0x04300142,
+ .test_ctl_val = 0x04000400,
+ .test_ctl_hi_val = 0x00004000,
+ .test_ctl_hi1_val = 0x00000000,
+ .user_ctl_val = 0x00000100,
+ .user_ctl_hi_val = 0x00000000,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = regera_vco,
+ .num_vco = ARRAY_SIZE(regera_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_REGERA],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_regera_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_main),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_REGERA],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x29,
+ .alpha = 0xaaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000D0,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x29,
+ .alpha = 0xaaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000D0,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll2_out_main.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(266666667, P_CAM_CC_PLL0_OUT_ODD, 1.5, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0xc170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0xc108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0xc124,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0xa064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x6004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x6028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x604c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x6070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x703c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fd_core_clk_src = {
+ .cmd_rcgr = 0xc0e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xc0b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(847000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(950000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0xa03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(847000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(950000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0xb010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xb034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_clk_src = {
+ .cmd_rcgr = 0xc004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_csid_clk_src = {
+ .cmd_rcgr = 0xc020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_clk_src = {
+ .cmd_rcgr = 0xc048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_csid_clk_src = {
+ .cmd_rcgr = 0xc064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(475000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(520000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x8010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0xc08c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(240000000, P_CAM_CC_PLL2_OUT_MAIN, 2, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ .cmd_rcgr = 0xc144,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_lrme_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(12000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 8),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 4),
+ F(68571429, P_CAM_CC_PLL2_OUT_EARLY, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x5024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x5044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x5064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x7058,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x7070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x7054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0xc18c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc18c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0xc194,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc194,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0xc120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0xc13c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc13c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0xc1c8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xc1c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0xc168,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc168,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x601c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x6040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x6064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x6088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x6020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x6044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x6068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x608c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x608c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_fd_core_clk = {
+ .halt_reg = 0xc0f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fd_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_fd_core_uar_clk = {
+ .halt_reg = 0xc100,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc100,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_uar_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fd_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0xc0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xc0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+ .halt_reg = 0xa080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0xa07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0xa038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+ .halt_reg = 0xb058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xb054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+ .halt_reg = 0xb04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_clk = {
+ .halt_reg = 0xc01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_cphy_rx_clk = {
+ .halt_reg = 0xc040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_csid_clk = {
+ .halt_reg = 0xc038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_clk = {
+ .halt_reg = 0xc060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_cphy_rx_clk = {
+ .halt_reg = 0xc084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_csid_clk = {
+ .halt_reg = 0xc07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_ahb_clk = {
+ .halt_reg = 0x9028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_areg_clk = {
+ .halt_reg = 0x9024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_axi_clk = {
+ .halt_reg = 0x9020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_clk = {
+ .halt_reg = 0x9010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0xc0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+ .halt_reg = 0xc15c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc15c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_lrme_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x503c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x505c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x507c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x507c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0xc1bc,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x7004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0xa004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0xb004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x8004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ipe_1_gdsc = {
+ .gdscr = 0x9004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ipe_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *cam_cc_sm8150_clocks[] = {
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_MAIN] = &cam_cc_pll2_out_main.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
+ [CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
+ [CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK] = &cam_cc_ife_lite_0_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK_SRC] = &cam_cc_ife_lite_0_clk_src.clkr,
+ [CAM_CC_IFE_LITE_0_CPHY_RX_CLK] = &cam_cc_ife_lite_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK] = &cam_cc_ife_lite_0_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK_SRC] = &cam_cc_ife_lite_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CLK] = &cam_cc_ife_lite_1_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CLK_SRC] = &cam_cc_ife_lite_1_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CPHY_RX_CLK] = &cam_cc_ife_lite_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK] = &cam_cc_ife_lite_1_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK_SRC] = &cam_cc_ife_lite_1_csid_clk_src.clkr,
+ [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+ [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+ [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+ [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+ [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+ [CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr,
+ [CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr,
+ [CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr,
+ [CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+ [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+};
+
+static struct gdsc *cam_cc_sm8150_gdscs[] = {
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+ [BPS_GDSC] = &bps_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [IPE_1_GDSC] = &ipe_1_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_sm8150_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x7000 },
+ [CAM_CC_CAMNOC_BCR] = { 0xc16c },
+ [CAM_CC_CCI_BCR] = { 0xc104 },
+ [CAM_CC_CPAS_BCR] = { 0xc164 },
+ [CAM_CC_CSI0PHY_BCR] = { 0x6000 },
+ [CAM_CC_CSI1PHY_BCR] = { 0x6024 },
+ [CAM_CC_CSI2PHY_BCR] = { 0x6048 },
+ [CAM_CC_CSI3PHY_BCR] = { 0x606c },
+ [CAM_CC_FD_BCR] = { 0xc0dc },
+ [CAM_CC_ICP_BCR] = { 0xc0b4 },
+ [CAM_CC_IFE_0_BCR] = { 0xa000 },
+ [CAM_CC_IFE_1_BCR] = { 0xb000 },
+ [CAM_CC_IFE_LITE_0_BCR] = { 0xc000 },
+ [CAM_CC_IFE_LITE_1_BCR] = { 0xc044 },
+ [CAM_CC_IPE_0_BCR] = { 0x8000 },
+ [CAM_CC_IPE_1_BCR] = { 0x9000 },
+ [CAM_CC_JPEG_BCR] = { 0xc088 },
+ [CAM_CC_LRME_BCR] = { 0xc140 },
+ [CAM_CC_MCLK0_BCR] = { 0x5000 },
+ [CAM_CC_MCLK1_BCR] = { 0x5020 },
+ [CAM_CC_MCLK2_BCR] = { 0x5040 },
+ [CAM_CC_MCLK3_BCR] = { 0x5060 },
+};
+
+static const struct regmap_config cam_cc_sm8150_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xe004,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc cam_cc_sm8150_desc = {
+ .config = &cam_cc_sm8150_regmap_config,
+ .clks = cam_cc_sm8150_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sm8150_clocks),
+ .resets = cam_cc_sm8150_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_sm8150_resets),
+ .gdscs = cam_cc_sm8150_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sm8150_gdscs),
+};
+
+static const struct of_device_id cam_cc_sm8150_match_table[] = {
+ { .compatible = "qcom,sm8150-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sm8150_match_table);
+
+static int cam_cc_sm8150_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sm8150_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_trion_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_trion_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_regera_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_trion_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_trion_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+
+ /* Keep the critical clock always-on */
+ qcom_branch_set_clk_en(regmap, 0xc1e4); /* cam_cc_gdsc_clk */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8150_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver cam_cc_sm8150_driver = {
+ .probe = cam_cc_sm8150_probe,
+ .driver = {
+ .name = "camcc-sm8150",
+ .of_match_table = cam_cc_sm8150_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_sm8150_driver);
+
+MODULE_DESCRIPTION("QTI CAM_CC SM8150 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index ad9a84d521fc..f9105443d7db 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
@@ -1713,7 +1713,7 @@ static int __alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
if (ret < 0)
return ret;
- regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_update_bits(pll->clkr.regmap, PLL_L_VAL(pll), LUCID_EVO_PLL_L_VAL_MASK, l);
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
/* Latch the PLL input */
@@ -1832,6 +1832,58 @@ const struct clk_ops clk_alpha_pll_agera_ops = {
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
+/**
+ * clk_lucid_5lpe_pll_configure - configure the lucid 5lpe pll
+ *
+ * @pll: clk alpha pll
+ * @regmap: register map
+ * @config: configuration to apply for pll
+ */
+void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ /*
+ * If the bootloader left the PLL enabled it's likely that there are
+ * RCGs that will lock up if we disable the PLL below.
+ */
+ if (trion_pll_is_enabled(pll, regmap)) {
+ pr_debug("Lucid 5LPE PLL is already enabled, skipping configuration\n");
+ return;
+ }
+
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll),
+ config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll),
+ config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll),
+ config->user_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll),
+ config->user_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
+ config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
+ config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll),
+ config->test_ctl_hi1_val);
+
+ /* Disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+
+ /* Set operation mode to OFF */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+
+ /* Place the PLL in STANDBY mode */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+}
+EXPORT_SYMBOL_GPL(clk_lucid_5lpe_pll_configure);
+
static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
@@ -2124,10 +2176,8 @@ static void zonda_pll_adjust_l_val(unsigned long rate, unsigned long prate, u32
quotient = rate;
remainder = do_div(quotient, prate);
- *l = quotient;
- if ((remainder * 2) / prate)
- *l = *l + 1;
+ *l = rate + (u32)(remainder * 2 >= prate);
}
static int clk_zonda_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -2676,3 +2726,33 @@ const struct clk_ops clk_alpha_pll_stromer_plus_ops = {
.set_rate = clk_alpha_pll_stromer_plus_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_plus_ops);
+
+void clk_regera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll), config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), config->user_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll), config->user_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), config->test_ctl_hi1_val);
+
+ /* Set operation mode to STANDBY */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+}
+EXPORT_SYMBOL_GPL(clk_regera_pll_configure);
+
+const struct clk_ops clk_alpha_pll_regera_ops = {
+ .enable = clk_zonda_pll_enable,
+ .disable = clk_zonda_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_zonda_pll_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_regera_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index df8f0fe15531..55eca04b23a1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -23,6 +23,7 @@ enum {
CLK_ALPHA_PLL_TYPE_LUCID = CLK_ALPHA_PLL_TYPE_TRION,
CLK_ALPHA_PLL_TYPE_AGERA,
CLK_ALPHA_PLL_TYPE_ZONDA,
+ CLK_ALPHA_PLL_TYPE_REGERA = CLK_ALPHA_PLL_TYPE_ZONDA,
CLK_ALPHA_PLL_TYPE_ZONDA_OLE,
CLK_ALPHA_PLL_TYPE_LUCID_EVO,
CLK_ALPHA_PLL_TYPE_LUCID_OLE,
@@ -193,6 +194,8 @@ extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_rivian_evo_ops;
#define clk_alpha_pll_postdiv_rivian_evo_ops clk_alpha_pll_postdiv_fabia_ops
+extern const struct clk_ops clk_alpha_pll_regera_ops;
+
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_huayra_2290_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
@@ -208,6 +211,8 @@ void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_lucid_ole_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
@@ -216,5 +221,7 @@ void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regm
const struct alpha_pll_config *config);
void clk_stromer_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_regera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
#endif
diff --git a/drivers/clk/qcom/clk-cbf-8996.c b/drivers/clk/qcom/clk-cbf-8996.c
index f5fd1ff9c6c9..ce4efcd995ea 100644
--- a/drivers/clk/qcom/clk-cbf-8996.c
+++ b/drivers/clk/qcom/clk-cbf-8996.c
@@ -346,7 +346,7 @@ MODULE_DEVICE_TABLE(of, qcom_msm8996_cbf_match_table);
static struct platform_driver qcom_msm8996_cbf_driver = {
.probe = qcom_msm8996_cbf_probe,
- .remove_new = qcom_msm8996_cbf_remove,
+ .remove = qcom_msm8996_cbf_remove,
.driver = {
.name = "qcom-msm8996-cbf",
.of_match_table = qcom_msm8996_cbf_match_table,
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index bb82abeed88f..4acde937114a 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -263,6 +263,8 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
cmd_state = 0;
}
+ cmd_state = min(cmd_state, BCM_TCS_CMD_VOTE_MASK);
+
if (c->last_sent_aggr_state != cmd_state) {
cmd.addr = c->res_addr;
cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
diff --git a/drivers/clk/qcom/dispcc-sm4450.c b/drivers/clk/qcom/dispcc-sm4450.c
new file mode 100644
index 000000000000..98ba016bc57f
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sm4450.c
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm4450-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_AHB_CLK,
+ DT_SLEEP_CLK,
+
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x82a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x80f8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x8114,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(380000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(506000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(608000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x80b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x8098,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+ F(200000000, P_DISP_CC_PLL1_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_DISP_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x80c8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x80e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8110,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x8094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot1_clk = {
+ .halt_reg = 0xa00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rot1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x8010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc disp_cc_mdss_core_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_sm4450_clocks[] = {
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_ROT1_CLK] = &disp_cc_mdss_rot1_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *disp_cc_sm4450_gdscs[] = {
+ [DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
+ [DISP_CC_MDSS_CORE_INT2_GDSC] = &disp_cc_mdss_core_int2_gdsc,
+};
+
+static const struct qcom_reset_map disp_cc_sm4450_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static const struct regmap_config disp_cc_sm4450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11008,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc disp_cc_sm4450_desc = {
+ .config = &disp_cc_sm4450_regmap_config,
+ .clks = disp_cc_sm4450_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sm4450_clocks),
+ .resets = disp_cc_sm4450_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sm4450_resets),
+ .gdscs = disp_cc_sm4450_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sm4450_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm4450_match_table[] = {
+ { .compatible = "qcom,sm4450-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm4450_match_table);
+
+static int disp_cc_sm4450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sm4450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll0_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xe070); /* DISP_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0xe054); /* DISP_CC_XO_CLK */
+
+ return qcom_cc_really_probe(&pdev->dev, &disp_cc_sm4450_desc, regmap);
+}
+
+static struct platform_driver disp_cc_sm4450_driver = {
+ .probe = disp_cc_sm4450_probe,
+ .driver = {
+ .name = "dispcc-sm4450",
+ .of_match_table = disp_cc_sm4450_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_sm4450_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC SM4450 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index 5a09009b7289..884bbd3fb305 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -849,6 +849,7 @@ static struct clk_branch disp_cc_mdss_dp_link1_intf_clk = {
&disp_cc_mdss_dp_link1_div_clk_src.clkr.hw,
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -884,6 +885,7 @@ static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
&disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1009,6 +1011,7 @@ static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
&disp_cc_mdss_mdp_clk_src.clkr.hw,
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1357,8 +1360,13 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev)
disp_cc_sm8250_clocks[DISP_CC_MDSS_EDP_GTC_CLK_SRC] = NULL;
}
- clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
- clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8350-dispcc")) {
+ clk_lucid_5lpe_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_5lpe_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ } else {
+ clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ }
/* Enable clock gating for MDP clocks */
regmap_update_bits(regmap, 0x8000, 0x10, 0x10);
diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
index 31ae46f180a5..7f9021ca0ecb 100644
--- a/drivers/clk/qcom/dispcc-sm8550.c
+++ b/drivers/clk/qcom/dispcc-sm8550.c
@@ -71,7 +71,7 @@ enum {
P_SLEEP_CLK,
};
-static const struct pll_vco lucid_ole_vco[] = {
+static struct pll_vco lucid_ole_vco[] = {
{ 249600000, 2000000000, 0 },
};
@@ -95,7 +95,7 @@ static struct clk_alpha_pll disp_cc_pll0 = {
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
.clkr = {
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_pll0",
.parent_data = &(const struct clk_parent_data) {
.index = DT_BI_TCXO,
@@ -126,7 +126,7 @@ static struct clk_alpha_pll disp_cc_pll1 = {
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
.clkr = {
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_pll1",
.parent_data = &(const struct clk_parent_data) {
.index = DT_BI_TCXO,
@@ -196,7 +196,7 @@ static const struct clk_parent_data disp_cc_parent_data_3[] = {
static const struct parent_map disp_cc_parent_map_4[] = {
{ P_BI_TCXO, 0 },
{ P_DP0_PHY_PLL_LINK_CLK, 1 },
- { P_DP1_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
{ P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
{ P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
{ P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
@@ -213,7 +213,7 @@ static const struct clk_parent_data disp_cc_parent_data_4[] = {
static const struct parent_map disp_cc_parent_map_5[] = {
{ P_BI_TCXO, 0 },
- { P_DSI0_PHY_PLL_OUT_BYTECLK, 4 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
{ P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
};
@@ -286,7 +286,7 @@ static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_6,
.freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_ahb_clk_src",
.parent_data = disp_cc_parent_data_6,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
@@ -306,7 +306,7 @@ static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_2,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte0_clk_src",
.parent_data = disp_cc_parent_data_2,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
@@ -321,7 +321,7 @@ static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_2,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte1_clk_src",
.parent_data = disp_cc_parent_data_2,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
@@ -336,7 +336,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_aux_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
@@ -350,7 +350,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_7,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_clk_src",
.parent_data = disp_cc_parent_data_7,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
@@ -365,7 +365,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_4,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_pixel0_clk_src",
.parent_data = disp_cc_parent_data_4,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
@@ -380,7 +380,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_4,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_pixel1_clk_src",
.parent_data = disp_cc_parent_data_4,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
@@ -395,12 +395,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_aux_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -409,7 +409,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
@@ -424,7 +424,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_pixel0_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -439,7 +439,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_pixel1_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -454,7 +454,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_aux_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
@@ -468,7 +468,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
@@ -483,7 +483,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_pixel0_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -498,7 +498,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_pixel1_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -513,7 +513,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_aux_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
@@ -527,7 +527,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
@@ -542,7 +542,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_pixel0_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -557,12 +557,12 @@ static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_5,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_esc0_clk_src",
.parent_data = disp_cc_parent_data_5,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -572,12 +572,12 @@ static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_5,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_esc1_clk_src",
.parent_data = disp_cc_parent_data_5,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -594,13 +594,25 @@ static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sm8650[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(402000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
.cmd_rcgr = 0x80d8,
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_8,
.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_mdp_clk_src",
.parent_data = disp_cc_parent_data_8,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
@@ -615,7 +627,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_2,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_pclk0_clk_src",
.parent_data = disp_cc_parent_data_2,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
@@ -630,7 +642,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_2,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_pclk1_clk_src",
.parent_data = disp_cc_parent_data_2,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
@@ -645,7 +657,7 @@ static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_vsync_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
@@ -665,7 +677,7 @@ static struct clk_rcg2 disp_cc_sleep_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_9,
.freq_tbl = ftbl_disp_cc_sleep_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_sleep_clk_src",
.parent_data = disp_cc_parent_data_9,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
@@ -680,7 +692,7 @@ static struct clk_rcg2 disp_cc_xo_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_xo_clk_src",
.parent_data = disp_cc_parent_data_0_ao,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao),
@@ -693,7 +705,7 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
.reg = 0x8120,
.shift = 0,
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte0_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_byte0_clk_src.clkr.hw,
@@ -707,7 +719,7 @@ static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
.reg = 0x813c,
.shift = 0,
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte1_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_byte1_clk_src.clkr.hw,
@@ -721,7 +733,7 @@ static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
.reg = 0x8188,
.shift = 0,
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
@@ -736,7 +748,7 @@ static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
.reg = 0x821c,
.shift = 0,
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
@@ -751,7 +763,7 @@ static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
.reg = 0x8250,
.shift = 0,
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
@@ -766,7 +778,7 @@ static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
.reg = 0x82cc,
.shift = 0,
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
@@ -783,7 +795,7 @@ static struct clk_branch disp_cc_mdss_accu_clk = {
.clkr = {
.enable_reg = 0xe058,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_accu_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_xo_clk_src.clkr.hw,
@@ -801,7 +813,7 @@ static struct clk_branch disp_cc_mdss_ahb1_clk = {
.clkr = {
.enable_reg = 0xa020,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_ahb1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_ahb_clk_src.clkr.hw,
@@ -819,7 +831,7 @@ static struct clk_branch disp_cc_mdss_ahb_clk = {
.clkr = {
.enable_reg = 0x80a4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_ahb_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_ahb_clk_src.clkr.hw,
@@ -837,7 +849,7 @@ static struct clk_branch disp_cc_mdss_byte0_clk = {
.clkr = {
.enable_reg = 0x8028,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_byte0_clk_src.clkr.hw,
@@ -855,7 +867,7 @@ static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
.clkr = {
.enable_reg = 0x802c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte0_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_byte0_div_clk_src.clkr.hw,
@@ -873,7 +885,7 @@ static struct clk_branch disp_cc_mdss_byte1_clk = {
.clkr = {
.enable_reg = 0x8030,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_byte1_clk_src.clkr.hw,
@@ -891,7 +903,7 @@ static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
.clkr = {
.enable_reg = 0x8034,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte1_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_byte1_div_clk_src.clkr.hw,
@@ -909,7 +921,7 @@ static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
.clkr = {
.enable_reg = 0x8058,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_aux_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
@@ -927,7 +939,7 @@ static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
.clkr = {
.enable_reg = 0x804c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_crypto_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
@@ -945,7 +957,7 @@ static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
.clkr = {
.enable_reg = 0x8040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
@@ -963,7 +975,7 @@ static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
.clkr = {
.enable_reg = 0x8048,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
@@ -981,7 +993,7 @@ static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
.clkr = {
.enable_reg = 0x8050,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_pixel0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
@@ -999,7 +1011,7 @@ static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
.clkr = {
.enable_reg = 0x8054,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_pixel1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
@@ -1017,7 +1029,7 @@ static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
.clkr = {
.enable_reg = 0x8044,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
@@ -1035,7 +1047,7 @@ static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
.clkr = {
.enable_reg = 0x8074,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_aux_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
@@ -1053,7 +1065,7 @@ static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = {
.clkr = {
.enable_reg = 0x8070,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_crypto_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
@@ -1071,7 +1083,7 @@ static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
.clkr = {
.enable_reg = 0x8064,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
@@ -1089,7 +1101,7 @@ static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
.clkr = {
.enable_reg = 0x806c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
@@ -1107,7 +1119,7 @@ static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
.clkr = {
.enable_reg = 0x805c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_pixel0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
@@ -1125,7 +1137,7 @@ static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
.clkr = {
.enable_reg = 0x8060,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_pixel1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
@@ -1143,7 +1155,7 @@ static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
.clkr = {
.enable_reg = 0x8068,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
@@ -1161,7 +1173,7 @@ static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
.clkr = {
.enable_reg = 0x808c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_aux_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
@@ -1179,7 +1191,7 @@ static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = {
.clkr = {
.enable_reg = 0x8088,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_crypto_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
@@ -1197,7 +1209,7 @@ static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
.clkr = {
.enable_reg = 0x8080,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
@@ -1215,7 +1227,7 @@ static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
.clkr = {
.enable_reg = 0x8084,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
@@ -1233,7 +1245,7 @@ static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
.clkr = {
.enable_reg = 0x8078,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_pixel0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
@@ -1251,7 +1263,7 @@ static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
.clkr = {
.enable_reg = 0x807c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_pixel1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
@@ -1269,7 +1281,7 @@ static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
.clkr = {
.enable_reg = 0x809c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_aux_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
@@ -1287,7 +1299,7 @@ static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = {
.clkr = {
.enable_reg = 0x80a0,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_crypto_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
@@ -1305,7 +1317,7 @@ static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
.clkr = {
.enable_reg = 0x8094,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
@@ -1323,7 +1335,7 @@ static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
.clkr = {
.enable_reg = 0x8098,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
@@ -1341,7 +1353,7 @@ static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
.clkr = {
.enable_reg = 0x8090,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_pixel0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
@@ -1359,7 +1371,7 @@ static struct clk_branch disp_cc_mdss_esc0_clk = {
.clkr = {
.enable_reg = 0x8038,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_esc0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_esc0_clk_src.clkr.hw,
@@ -1377,7 +1389,7 @@ static struct clk_branch disp_cc_mdss_esc1_clk = {
.clkr = {
.enable_reg = 0x803c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_esc1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_esc1_clk_src.clkr.hw,
@@ -1395,7 +1407,7 @@ static struct clk_branch disp_cc_mdss_mdp1_clk = {
.clkr = {
.enable_reg = 0xa004,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_mdp1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_mdp_clk_src.clkr.hw,
@@ -1413,7 +1425,7 @@ static struct clk_branch disp_cc_mdss_mdp_clk = {
.clkr = {
.enable_reg = 0x800c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_mdp_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_mdp_clk_src.clkr.hw,
@@ -1431,7 +1443,7 @@ static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
.clkr = {
.enable_reg = 0xa010,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_mdp_lut1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_mdp_clk_src.clkr.hw,
@@ -1449,7 +1461,7 @@ static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
.clkr = {
.enable_reg = 0x8018,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_mdp_lut_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_mdp_clk_src.clkr.hw,
@@ -1467,7 +1479,7 @@ static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
.clkr = {
.enable_reg = 0xc004,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_non_gdsc_ahb_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_ahb_clk_src.clkr.hw,
@@ -1485,7 +1497,7 @@ static struct clk_branch disp_cc_mdss_pclk0_clk = {
.clkr = {
.enable_reg = 0x8004,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_pclk0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_pclk0_clk_src.clkr.hw,
@@ -1503,7 +1515,7 @@ static struct clk_branch disp_cc_mdss_pclk1_clk = {
.clkr = {
.enable_reg = 0x8008,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_pclk1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_pclk1_clk_src.clkr.hw,
@@ -1521,7 +1533,7 @@ static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
.clkr = {
.enable_reg = 0xc00c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_rscc_ahb_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_ahb_clk_src.clkr.hw,
@@ -1539,7 +1551,7 @@ static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
.clkr = {
.enable_reg = 0xc008,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_rscc_vsync_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_vsync_clk_src.clkr.hw,
@@ -1557,7 +1569,7 @@ static struct clk_branch disp_cc_mdss_vsync1_clk = {
.clkr = {
.enable_reg = 0xa01c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_vsync1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_vsync_clk_src.clkr.hw,
@@ -1575,7 +1587,7 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
.clkr = {
.enable_reg = 0x8024,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_vsync_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_vsync_clk_src.clkr.hw,
@@ -1593,7 +1605,7 @@ static struct clk_branch disp_cc_sleep_clk = {
.clkr = {
.enable_reg = 0xe074,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_sleep_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_sleep_clk_src.clkr.hw,
@@ -1611,7 +1623,7 @@ static struct gdsc mdss_gdsc = {
.name = "mdss_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
};
static struct gdsc mdss_int2_gdsc = {
@@ -1620,7 +1632,7 @@ static struct gdsc mdss_int2_gdsc = {
.name = "mdss_int2_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
};
static struct clk_regmap *disp_cc_sm8550_clocks[] = {
@@ -1739,6 +1751,7 @@ static struct qcom_cc_desc disp_cc_sm8550_desc = {
static const struct of_device_id disp_cc_sm8550_match_table[] = {
{ .compatible = "qcom,sm8550-dispcc" },
+ { .compatible = "qcom,sm8650-dispcc" },
{ }
};
MODULE_DEVICE_TABLE(of, disp_cc_sm8550_match_table);
@@ -1762,6 +1775,13 @@ static int disp_cc_sm8550_probe(struct platform_device *pdev)
goto err_put_rpm;
}
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8650-dispcc")) {
+ lucid_ole_vco[0].max_freq = 2100000000;
+ disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sm8650;
+ disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr.hw.init->parent_hws[0] =
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw;
+ }
+
clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
clk_lucid_ole_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
@@ -1795,5 +1815,5 @@ static struct platform_driver disp_cc_sm8550_driver = {
module_platform_driver(disp_cc_sm8550_driver);
-MODULE_DESCRIPTION("QTI DISPCC SM8550 Driver");
+MODULE_DESCRIPTION("QTI DISPCC SM8550 / SM8650 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sm8650.c b/drivers/clk/qcom/dispcc-sm8650.c
deleted file mode 100644
index c9d2751f5cb8..000000000000
--- a/drivers/clk/qcom/dispcc-sm8650.c
+++ /dev/null
@@ -1,1796 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
- * Copyright (c) 2023, Linaro Ltd.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/mod_devicetable.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/regmap.h>
-
-#include <dt-bindings/clock/qcom,sm8650-dispcc.h>
-
-#include "common.h"
-#include "clk-alpha-pll.h"
-#include "clk-branch.h"
-#include "clk-pll.h"
-#include "clk-rcg.h"
-#include "clk-regmap.h"
-#include "clk-regmap-divider.h"
-#include "reset.h"
-#include "gdsc.h"
-
-/* Need to match the order of clocks in DT binding */
-enum {
- DT_BI_TCXO,
- DT_BI_TCXO_AO,
- DT_AHB_CLK,
- DT_SLEEP_CLK,
-
- DT_DSI0_PHY_PLL_OUT_BYTECLK,
- DT_DSI0_PHY_PLL_OUT_DSICLK,
- DT_DSI1_PHY_PLL_OUT_BYTECLK,
- DT_DSI1_PHY_PLL_OUT_DSICLK,
-
- DT_DP0_PHY_PLL_LINK_CLK,
- DT_DP0_PHY_PLL_VCO_DIV_CLK,
- DT_DP1_PHY_PLL_LINK_CLK,
- DT_DP1_PHY_PLL_VCO_DIV_CLK,
- DT_DP2_PHY_PLL_LINK_CLK,
- DT_DP2_PHY_PLL_VCO_DIV_CLK,
- DT_DP3_PHY_PLL_LINK_CLK,
- DT_DP3_PHY_PLL_VCO_DIV_CLK,
-};
-
-#define DISP_CC_MISC_CMD 0xF000
-
-enum {
- P_BI_TCXO,
- P_DISP_CC_PLL0_OUT_MAIN,
- P_DISP_CC_PLL1_OUT_EVEN,
- P_DISP_CC_PLL1_OUT_MAIN,
- P_DP0_PHY_PLL_LINK_CLK,
- P_DP0_PHY_PLL_VCO_DIV_CLK,
- P_DP1_PHY_PLL_LINK_CLK,
- P_DP1_PHY_PLL_VCO_DIV_CLK,
- P_DP2_PHY_PLL_LINK_CLK,
- P_DP2_PHY_PLL_VCO_DIV_CLK,
- P_DP3_PHY_PLL_LINK_CLK,
- P_DP3_PHY_PLL_VCO_DIV_CLK,
- P_DSI0_PHY_PLL_OUT_BYTECLK,
- P_DSI0_PHY_PLL_OUT_DSICLK,
- P_DSI1_PHY_PLL_OUT_BYTECLK,
- P_DSI1_PHY_PLL_OUT_DSICLK,
- P_SLEEP_CLK,
-};
-
-static const struct pll_vco lucid_ole_vco[] = {
- { 249600000, 2100000000, 0 },
-};
-
-static const struct alpha_pll_config disp_cc_pll0_config = {
- .l = 0xd,
- .alpha = 0x6492,
- .config_ctl_val = 0x20485699,
- .config_ctl_hi_val = 0x00182261,
- .config_ctl_hi1_val = 0x82aa299c,
- .test_ctl_val = 0x00000000,
- .test_ctl_hi_val = 0x00000003,
- .test_ctl_hi1_val = 0x00009000,
- .test_ctl_hi2_val = 0x00000034,
- .user_ctl_val = 0x00000000,
- .user_ctl_hi_val = 0x00000005,
-};
-
-static struct clk_alpha_pll disp_cc_pll0 = {
- .offset = 0x0,
- .vco_table = lucid_ole_vco,
- .num_vco = ARRAY_SIZE(lucid_ole_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
- .clkr = {
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_pll0",
- .parent_data = &(const struct clk_parent_data) {
- .index = DT_BI_TCXO,
- },
- .num_parents = 1,
- .ops = &clk_alpha_pll_reset_lucid_ole_ops,
- },
- },
-};
-
-static const struct alpha_pll_config disp_cc_pll1_config = {
- .l = 0x1f,
- .alpha = 0x4000,
- .config_ctl_val = 0x20485699,
- .config_ctl_hi_val = 0x00182261,
- .config_ctl_hi1_val = 0x82aa299c,
- .test_ctl_val = 0x00000000,
- .test_ctl_hi_val = 0x00000003,
- .test_ctl_hi1_val = 0x00009000,
- .test_ctl_hi2_val = 0x00000034,
- .user_ctl_val = 0x00000000,
- .user_ctl_hi_val = 0x00000005,
-};
-
-static struct clk_alpha_pll disp_cc_pll1 = {
- .offset = 0x1000,
- .vco_table = lucid_ole_vco,
- .num_vco = ARRAY_SIZE(lucid_ole_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
- .clkr = {
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_pll1",
- .parent_data = &(const struct clk_parent_data) {
- .index = DT_BI_TCXO,
- },
- .num_parents = 1,
- .ops = &clk_alpha_pll_reset_lucid_ole_ops,
- },
- },
-};
-
-static const struct parent_map disp_cc_parent_map_0[] = {
- { P_BI_TCXO, 0 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_0[] = {
- { .index = DT_BI_TCXO },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_0_ao[] = {
- { .index = DT_BI_TCXO_AO },
-};
-
-static const struct parent_map disp_cc_parent_map_1[] = {
- { P_BI_TCXO, 0 },
- { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
- { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
- { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_1[] = {
- { .index = DT_BI_TCXO },
- { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
- { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
- { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
-};
-
-static const struct parent_map disp_cc_parent_map_2[] = {
- { P_BI_TCXO, 0 },
- { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
- { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
- { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
- { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_2[] = {
- { .index = DT_BI_TCXO },
- { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
- { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
- { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
- { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
-};
-
-static const struct parent_map disp_cc_parent_map_3[] = {
- { P_BI_TCXO, 0 },
- { P_DP1_PHY_PLL_LINK_CLK, 2 },
- { P_DP2_PHY_PLL_LINK_CLK, 3 },
- { P_DP3_PHY_PLL_LINK_CLK, 4 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_3[] = {
- { .index = DT_BI_TCXO },
- { .index = DT_DP1_PHY_PLL_LINK_CLK },
- { .index = DT_DP2_PHY_PLL_LINK_CLK },
- { .index = DT_DP3_PHY_PLL_LINK_CLK },
-};
-
-static const struct parent_map disp_cc_parent_map_4[] = {
- { P_BI_TCXO, 0 },
- { P_DP0_PHY_PLL_LINK_CLK, 1 },
- { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
- { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
- { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
- { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_4[] = {
- { .index = DT_BI_TCXO },
- { .index = DT_DP0_PHY_PLL_LINK_CLK },
- { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
- { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
- { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
- { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
-};
-
-static const struct parent_map disp_cc_parent_map_5[] = {
- { P_BI_TCXO, 0 },
- { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
- { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_5[] = {
- { .index = DT_BI_TCXO },
- { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
- { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
-};
-
-static const struct parent_map disp_cc_parent_map_6[] = {
- { P_BI_TCXO, 0 },
- { P_DISP_CC_PLL1_OUT_MAIN, 4 },
- { P_DISP_CC_PLL1_OUT_EVEN, 6 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_6[] = {
- { .index = DT_BI_TCXO },
- { .hw = &disp_cc_pll1.clkr.hw },
- { .hw = &disp_cc_pll1.clkr.hw },
-};
-
-static const struct parent_map disp_cc_parent_map_7[] = {
- { P_BI_TCXO, 0 },
- { P_DP0_PHY_PLL_LINK_CLK, 1 },
- { P_DP1_PHY_PLL_LINK_CLK, 2 },
- { P_DP2_PHY_PLL_LINK_CLK, 3 },
- { P_DP3_PHY_PLL_LINK_CLK, 4 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_7[] = {
- { .index = DT_BI_TCXO },
- { .index = DT_DP0_PHY_PLL_LINK_CLK },
- { .index = DT_DP1_PHY_PLL_LINK_CLK },
- { .index = DT_DP2_PHY_PLL_LINK_CLK },
- { .index = DT_DP3_PHY_PLL_LINK_CLK },
-};
-
-static const struct parent_map disp_cc_parent_map_8[] = {
- { P_BI_TCXO, 0 },
- { P_DISP_CC_PLL0_OUT_MAIN, 1 },
- { P_DISP_CC_PLL1_OUT_MAIN, 4 },
- { P_DISP_CC_PLL1_OUT_EVEN, 6 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_8[] = {
- { .index = DT_BI_TCXO },
- { .hw = &disp_cc_pll0.clkr.hw },
- { .hw = &disp_cc_pll1.clkr.hw },
- { .hw = &disp_cc_pll1.clkr.hw },
-};
-
-static const struct parent_map disp_cc_parent_map_9[] = {
- { P_SLEEP_CLK, 0 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_9[] = {
- { .index = DT_SLEEP_CLK },
-};
-
-static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
- F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
- { }
-};
-
-static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
- .cmd_rcgr = 0x82e8,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_6,
- .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_ahb_clk_src",
- .parent_data = disp_cc_parent_data_6,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
- },
-};
-
-static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- { }
-};
-
-static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
- .cmd_rcgr = 0x8108,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_2,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte0_clk_src",
- .parent_data = disp_cc_parent_data_2,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_byte2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
- .cmd_rcgr = 0x8124,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_2,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte1_clk_src",
- .parent_data = disp_cc_parent_data_2,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_byte2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
- .cmd_rcgr = 0x81bc,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_aux_clk_src",
- .parent_data = disp_cc_parent_data_0,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
- .cmd_rcgr = 0x8170,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_7,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_link_clk_src",
- .parent_data = disp_cc_parent_data_7,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_byte2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
- .cmd_rcgr = 0x818c,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_4,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
- .parent_data = disp_cc_parent_data_4,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
- .cmd_rcgr = 0x81a4,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_4,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
- .parent_data = disp_cc_parent_data_4,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
- .cmd_rcgr = 0x8220,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_aux_clk_src",
- .parent_data = disp_cc_parent_data_0,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
- .cmd_rcgr = 0x8204,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_3,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_link_clk_src",
- .parent_data = disp_cc_parent_data_3,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_byte2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
- .cmd_rcgr = 0x81d4,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_1,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
- .parent_data = disp_cc_parent_data_1,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
- .cmd_rcgr = 0x81ec,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_1,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
- .parent_data = disp_cc_parent_data_1,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
- .cmd_rcgr = 0x8284,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_aux_clk_src",
- .parent_data = disp_cc_parent_data_0,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
- .cmd_rcgr = 0x8238,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_3,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_link_clk_src",
- .parent_data = disp_cc_parent_data_3,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_byte2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
- .cmd_rcgr = 0x8254,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_1,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
- .parent_data = disp_cc_parent_data_1,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
- .cmd_rcgr = 0x826c,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_1,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
- .parent_data = disp_cc_parent_data_1,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
- .cmd_rcgr = 0x82d0,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_aux_clk_src",
- .parent_data = disp_cc_parent_data_0,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
- .cmd_rcgr = 0x82b4,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_3,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_link_clk_src",
- .parent_data = disp_cc_parent_data_3,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_byte2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
- .cmd_rcgr = 0x829c,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_1,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
- .parent_data = disp_cc_parent_data_1,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
- .cmd_rcgr = 0x8140,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_5,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_esc0_clk_src",
- .parent_data = disp_cc_parent_data_5,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
- .cmd_rcgr = 0x8158,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_5,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_esc1_clk_src",
- .parent_data = disp_cc_parent_data_5,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- F(402000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- { }
-};
-
-static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
- .cmd_rcgr = 0x80d8,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_8,
- .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_mdp_clk_src",
- .parent_data = disp_cc_parent_data_8,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
- .cmd_rcgr = 0x80a8,
- .mnd_width = 8,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_2,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_pclk0_clk_src",
- .parent_data = disp_cc_parent_data_2,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_pixel_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
- .cmd_rcgr = 0x80c0,
- .mnd_width = 8,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_2,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_pclk1_clk_src",
- .parent_data = disp_cc_parent_data_2,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_pixel_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
- .cmd_rcgr = 0x80f0,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_vsync_clk_src",
- .parent_data = disp_cc_parent_data_0,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
- F(32000, P_SLEEP_CLK, 1, 0, 0),
- { }
-};
-
-static struct clk_rcg2 disp_cc_sleep_clk_src = {
- .cmd_rcgr = 0xe05c,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_9,
- .freq_tbl = ftbl_disp_cc_sleep_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_sleep_clk_src",
- .parent_data = disp_cc_parent_data_9,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_xo_clk_src = {
- .cmd_rcgr = 0xe03c,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_xo_clk_src",
- .parent_data = disp_cc_parent_data_0_ao,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
- .reg = 0x8120,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte0_div_clk_src",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_byte0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .ops = &clk_regmap_div_ops,
- },
-};
-
-static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
- .reg = 0x813c,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte1_div_clk_src",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_byte1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .ops = &clk_regmap_div_ops,
- },
-};
-
-static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
- .reg = 0x8188,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_link_div_clk_src",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_regmap_div_ro_ops,
- },
-};
-
-static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
- .reg = 0x821c,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_link_div_clk_src",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_regmap_div_ro_ops,
- },
-};
-
-static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
- .reg = 0x8250,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_link_div_clk_src",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_regmap_div_ro_ops,
- },
-};
-
-static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
- .reg = 0x82cc,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_link_div_clk_src",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_regmap_div_ro_ops,
- },
-};
-
-static struct clk_branch disp_cc_mdss_accu_clk = {
- .halt_reg = 0xe058,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0xe058,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_accu_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_xo_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_ahb1_clk = {
- .halt_reg = 0xa020,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xa020,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_ahb1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_ahb_clk = {
- .halt_reg = 0x80a4,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x80a4,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_ahb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_byte0_clk = {
- .halt_reg = 0x8028,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8028,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_byte0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
- .halt_reg = 0x802c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x802c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte0_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_byte1_clk = {
- .halt_reg = 0x8030,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8030,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_byte1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
- .halt_reg = 0x8034,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8034,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte1_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
- .halt_reg = 0x8058,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8058,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_aux_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
- .halt_reg = 0x804c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x804c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_crypto_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
- .halt_reg = 0x8040,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8040,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_link_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
- .halt_reg = 0x8048,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8048,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_link_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
- .halt_reg = 0x8050,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8050,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_pixel0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
- .halt_reg = 0x8054,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8054,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_pixel1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
- .halt_reg = 0x8044,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8044,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
- .halt_reg = 0x8074,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8074,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_aux_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = {
- .halt_reg = 0x8070,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8070,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_crypto_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
- .halt_reg = 0x8064,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8064,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_link_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
- .halt_reg = 0x806c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x806c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_link_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
- .halt_reg = 0x805c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x805c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_pixel0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
- .halt_reg = 0x8060,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8060,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_pixel1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
- .halt_reg = 0x8068,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8068,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
- .halt_reg = 0x808c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x808c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_aux_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = {
- .halt_reg = 0x8088,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8088,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_crypto_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
- .halt_reg = 0x8080,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8080,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_link_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
- .halt_reg = 0x8084,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8084,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_link_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
- .halt_reg = 0x8078,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8078,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_pixel0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
- .halt_reg = 0x807c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x807c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_pixel1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
- .halt_reg = 0x809c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x809c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_aux_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = {
- .halt_reg = 0x80a0,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x80a0,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_crypto_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
- .halt_reg = 0x8094,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8094,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_link_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
- .halt_reg = 0x8098,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8098,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_link_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
- .halt_reg = 0x8090,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8090,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_pixel0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_esc0_clk = {
- .halt_reg = 0x8038,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8038,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_esc0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_esc0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_esc1_clk = {
- .halt_reg = 0x803c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x803c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_esc1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_esc1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_mdp1_clk = {
- .halt_reg = 0xa004,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xa004,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_mdp1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_mdp_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_mdp_clk = {
- .halt_reg = 0x800c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x800c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_mdp_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_mdp_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
- .halt_reg = 0xa010,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xa010,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_mdp_lut1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_mdp_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
- .halt_reg = 0x8018,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x8018,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_mdp_lut_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_mdp_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
- .halt_reg = 0xc004,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0xc004,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_non_gdsc_ahb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_pclk0_clk = {
- .halt_reg = 0x8004,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8004,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_pclk0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_pclk0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_pclk1_clk = {
- .halt_reg = 0x8008,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8008,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_pclk1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_pclk1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
- .halt_reg = 0xc00c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xc00c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_rscc_ahb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
- .halt_reg = 0xc008,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xc008,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_rscc_vsync_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_vsync_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_vsync1_clk = {
- .halt_reg = 0xa01c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xa01c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_vsync1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_vsync_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_vsync_clk = {
- .halt_reg = 0x8024,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8024,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_vsync_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_vsync_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_sleep_clk = {
- .halt_reg = 0xe074,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xe074,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_sleep_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_sleep_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct gdsc mdss_gdsc = {
- .gdscr = 0x9000,
- .pd = {
- .name = "mdss_gdsc",
- },
- .pwrsts = PWRSTS_OFF_ON,
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
-};
-
-static struct gdsc mdss_int2_gdsc = {
- .gdscr = 0xb000,
- .pd = {
- .name = "mdss_int2_gdsc",
- },
- .pwrsts = PWRSTS_OFF_ON,
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
-};
-
-static struct clk_regmap *disp_cc_sm8650_clocks[] = {
- [DISP_CC_MDSS_ACCU_CLK] = &disp_cc_mdss_accu_clk.clkr,
- [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
- [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
- [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
- [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
- [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
- [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
- [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
- [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
- [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
- [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
- [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
- [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
- [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr,
- [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
- [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
- [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
- [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
- [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
- [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
- [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
- [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
- &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
- [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
- [DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &disp_cc_mdss_dptx1_crypto_clk.clkr,
- [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
- [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
- [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
- [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
- [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
- [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
- [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
- [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
- &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
- [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
- [DISP_CC_MDSS_DPTX2_CRYPTO_CLK] = &disp_cc_mdss_dptx2_crypto_clk.clkr,
- [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
- [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
- [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
- [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
- [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
- [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
- [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
- [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
- [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
- [DISP_CC_MDSS_DPTX3_CRYPTO_CLK] = &disp_cc_mdss_dptx3_crypto_clk.clkr,
- [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
- [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
- [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
- [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
- [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
- [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
- [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
- [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
- [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
- [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
- [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
- [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
- [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
- [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
- [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
- [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
- [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
- [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
- [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
- [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
- [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
- [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
- [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
- [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
- [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
- [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
- [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
- [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
- [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
-};
-
-static const struct qcom_reset_map disp_cc_sm8650_resets[] = {
- [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
- [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
- [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
-};
-
-static struct gdsc *disp_cc_sm8650_gdscs[] = {
- [MDSS_GDSC] = &mdss_gdsc,
- [MDSS_INT2_GDSC] = &mdss_int2_gdsc,
-};
-
-static const struct regmap_config disp_cc_sm8650_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .max_register = 0x11008,
- .fast_io = true,
-};
-
-static struct qcom_cc_desc disp_cc_sm8650_desc = {
- .config = &disp_cc_sm8650_regmap_config,
- .clks = disp_cc_sm8650_clocks,
- .num_clks = ARRAY_SIZE(disp_cc_sm8650_clocks),
- .resets = disp_cc_sm8650_resets,
- .num_resets = ARRAY_SIZE(disp_cc_sm8650_resets),
- .gdscs = disp_cc_sm8650_gdscs,
- .num_gdscs = ARRAY_SIZE(disp_cc_sm8650_gdscs),
-};
-
-static const struct of_device_id disp_cc_sm8650_match_table[] = {
- { .compatible = "qcom,sm8650-dispcc" },
- { }
-};
-MODULE_DEVICE_TABLE(of, disp_cc_sm8650_match_table);
-
-static int disp_cc_sm8650_probe(struct platform_device *pdev)
-{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &disp_cc_sm8650_desc);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- goto err_put_rpm;
- }
-
- clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
- clk_lucid_ole_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
-
- /* Enable clock gating for MDP clocks */
- regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
-
- /* Keep some clocks always-on */
- qcom_branch_set_clk_en(regmap, 0xe054); /* DISP_CC_XO_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8650_desc, regmap);
- if (ret)
- goto err_put_rpm;
-
- pm_runtime_put(&pdev->dev);
-
- return 0;
-
-err_put_rpm:
- pm_runtime_put_sync(&pdev->dev);
-
- return ret;
-}
-
-static struct platform_driver disp_cc_sm8650_driver = {
- .probe = disp_cc_sm8650_probe,
- .driver = {
- .name = "disp_cc-sm8650",
- .of_match_table = disp_cc_sm8650_match_table,
- },
-};
-
-module_platform_driver(disp_cc_sm8650_driver);
-
-MODULE_DESCRIPTION("QTI DISPCC SM8650 Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c
index f98591148a97..9536b2b7d07c 100644
--- a/drivers/clk/qcom/gcc-ipq5332.c
+++ b/drivers/clk/qcom/gcc-ipq5332.c
@@ -4,12 +4,14 @@
*/
#include <linux/clk-provider.h>
+#include <linux/interconnect-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,ipq5332-gcc.h>
+#include <dt-bindings/interconnect/qcom,ipq5332.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
@@ -126,17 +128,6 @@ static struct clk_alpha_pll gpll4_main = {
.parent_data = &gcc_parent_data_xo,
.num_parents = 1,
.ops = &clk_alpha_pll_stromer_ops,
- /*
- * There are no consumers for this GPLL in kernel yet,
- * (will be added soon), so the clock framework
- * disables this source. But some of the clocks
- * initialized by boot loaders uses this source. So we
- * need to keep this clock ON. Add the
- * CLK_IGNORE_UNUSED flag so the clock will not be
- * disabled. Once the consumer in kernel is added, we
- * can get rid of this flag.
- */
- .flags = CLK_IGNORE_UNUSED,
},
},
};
@@ -3388,6 +3379,7 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = {
[GCC_QDSS_DAP_DIV_CLK_SRC] = &gcc_qdss_dap_div_clk_src.clkr,
[GCC_QDSS_ETR_USB_CLK] = &gcc_qdss_etr_usb_clk.clkr,
[GCC_QDSS_EUD_AT_CLK] = &gcc_qdss_eud_at_clk.clkr,
+ [GCC_QDSS_TSCTR_CLK_SRC] = &gcc_qdss_tsctr_clk_src.clkr,
[GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
[GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
[GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr,
@@ -3628,6 +3620,24 @@ static const struct qcom_reset_map gcc_ipq5332_resets[] = {
[GCC_UNIPHY1_XPCS_ARES] = { 0x16060 },
};
+#define IPQ_APPS_ID 5332 /* some unique value */
+
+static struct qcom_icc_hws_data icc_ipq5332_hws[] = {
+ { MASTER_SNOC_PCIE3_1_M, SLAVE_SNOC_PCIE3_1_M, GCC_SNOC_PCIE3_1LANE_M_CLK },
+ { MASTER_ANOC_PCIE3_1_S, SLAVE_ANOC_PCIE3_1_S, GCC_SNOC_PCIE3_1LANE_S_CLK },
+ { MASTER_SNOC_PCIE3_2_M, SLAVE_SNOC_PCIE3_2_M, GCC_SNOC_PCIE3_2LANE_M_CLK },
+ { MASTER_ANOC_PCIE3_2_S, SLAVE_ANOC_PCIE3_2_S, GCC_SNOC_PCIE3_2LANE_S_CLK },
+ { MASTER_SNOC_USB, SLAVE_SNOC_USB, GCC_SNOC_USB_CLK },
+ { MASTER_NSSNOC_NSSCC, SLAVE_NSSNOC_NSSCC, GCC_NSSNOC_NSSCC_CLK },
+ { MASTER_NSSNOC_SNOC_0, SLAVE_NSSNOC_SNOC_0, GCC_NSSNOC_SNOC_CLK },
+ { MASTER_NSSNOC_SNOC_1, SLAVE_NSSNOC_SNOC_1, GCC_NSSNOC_SNOC_1_CLK },
+ { MASTER_NSSNOC_ATB, SLAVE_NSSNOC_ATB, GCC_NSSNOC_ATB_CLK },
+ { MASTER_NSSNOC_PCNOC_1, SLAVE_NSSNOC_PCNOC_1, GCC_NSSNOC_PCNOC_1_CLK },
+ { MASTER_NSSNOC_QOSGEN_REF, SLAVE_NSSNOC_QOSGEN_REF, GCC_NSSNOC_QOSGEN_REF_CLK },
+ { MASTER_NSSNOC_TIMEOUT_REF, SLAVE_NSSNOC_TIMEOUT_REF, GCC_NSSNOC_TIMEOUT_REF_CLK },
+ { MASTER_NSSNOC_XO_DCD, SLAVE_NSSNOC_XO_DCD, GCC_NSSNOC_XO_DCD_CLK },
+};
+
static const struct regmap_config gcc_ipq5332_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3656,6 +3666,9 @@ static const struct qcom_cc_desc gcc_ipq5332_desc = {
.num_resets = ARRAY_SIZE(gcc_ipq5332_resets),
.clk_hws = gcc_ipq5332_hws,
.num_clk_hws = ARRAY_SIZE(gcc_ipq5332_hws),
+ .icc_hws = icc_ipq5332_hws,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq5332_hws),
+ .icc_first_node_id = IPQ_APPS_ID,
};
static int gcc_ipq5332_probe(struct platform_device *pdev)
@@ -3674,6 +3687,7 @@ static struct platform_driver gcc_ipq5332_driver = {
.driver = {
.name = "gcc-ipq5332",
.of_match_table = gcc_ipq5332_match_table,
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
index 2e411d874662..ab0f7fc665a9 100644
--- a/drivers/clk/qcom/gcc-ipq6018.c
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -2684,7 +2684,7 @@ static struct clk_rcg2 lpass_q6_axim_clk_src = {
},
};
-static struct freq_tbl ftbl_rbcpr_wcss_clk_src[] = {
+static const struct freq_tbl ftbl_rbcpr_wcss_clk_src[] = {
F(24000000, P_XO, 1, 0, 0),
F(50000000, P_GPLL0, 16, 0, 0),
{ }
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 974d01fd4381..9260e2fdb839 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -390,7 +390,7 @@ static const struct clk_parent_data gcc_pxo_pll3_pll0_pll14_pll18_pll11[] = {
};
-static struct freq_tbl clk_tbl_gsbi_uart[] = {
+static const struct freq_tbl clk_tbl_gsbi_uart[] = {
{ 1843200, P_PLL8, 2, 6, 625 },
{ 3686400, P_PLL8, 2, 12, 625 },
{ 7372800, P_PLL8, 2, 24, 625 },
@@ -714,7 +714,7 @@ static struct clk_branch gsbi7_uart_clk = {
},
};
-static struct freq_tbl clk_tbl_gsbi_qup[] = {
+static const struct freq_tbl clk_tbl_gsbi_qup[] = {
{ 1100000, P_PXO, 1, 2, 49 },
{ 5400000, P_PXO, 1, 1, 5 },
{ 10800000, P_PXO, 1, 2, 5 },
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 32fd01ef469a..7258ba5c0900 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -1947,7 +1947,7 @@ static struct clk_regmap_div nss_port6_tx_div_clk_src = {
},
};
-static struct freq_tbl ftbl_crypto_clk_src[] = {
+static const struct freq_tbl ftbl_crypto_clk_src[] = {
F(40000000, P_GPLL0_DIV2, 10, 0, 0),
F(80000000, P_GPLL0, 10, 0, 0),
F(100000000, P_GPLL0, 8, 0, 0),
@@ -1968,7 +1968,7 @@ static struct clk_rcg2 crypto_clk_src = {
},
};
-static struct freq_tbl ftbl_gp_clk_src[] = {
+static const struct freq_tbl ftbl_gp_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
index 33987b957737..37fc5607b2d3 100644
--- a/drivers/clk/qcom/gcc-mdm9615.c
+++ b/drivers/clk/qcom/gcc-mdm9615.c
@@ -164,7 +164,7 @@ static const struct clk_parent_data gcc_cxo_pll14[] = {
{ .hw = &pll14_vote.hw },
};
-static struct freq_tbl clk_tbl_gsbi_uart[] = {
+static const struct freq_tbl clk_tbl_gsbi_uart[] = {
{ 1843200, P_PLL8, 2, 6, 625 },
{ 3686400, P_PLL8, 2, 12, 625 },
{ 7372800, P_PLL8, 2, 24, 625 },
@@ -437,7 +437,7 @@ static struct clk_branch gsbi5_uart_clk = {
},
};
-static struct freq_tbl clk_tbl_gsbi_qup[] = {
+static const struct freq_tbl clk_tbl_gsbi_qup[] = {
{ 960000, P_CXO, 4, 1, 5 },
{ 4800000, P_CXO, 4, 0, 1 },
{ 9600000, P_CXO, 2, 0, 1 },
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index 67870c899ab9..a6a4477ccdef 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -82,7 +82,7 @@ static const struct clk_parent_data gcc_pxo_pll8_cxo[] = {
{ .fw_name = "cxo", .name = "cxo_board" },
};
-static struct freq_tbl clk_tbl_gsbi_uart[] = {
+static const struct freq_tbl clk_tbl_gsbi_uart[] = {
{ 1843200, P_PLL8, 2, 6, 625 },
{ 3686400, P_PLL8, 2, 12, 625 },
{ 7372800, P_PLL8, 2, 24, 625 },
@@ -712,7 +712,7 @@ static struct clk_branch gsbi12_uart_clk = {
},
};
-static struct freq_tbl clk_tbl_gsbi_qup[] = {
+static const struct freq_tbl clk_tbl_gsbi_qup[] = {
{ 1100000, P_PXO, 1, 2, 49 },
{ 5400000, P_PXO, 1, 1, 5 },
{ 10800000, P_PXO, 1, 2, 5 },
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 6236a458e4eb..9ddce11db6df 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -328,7 +328,7 @@ static const struct clk_parent_data gcc_pxo_pll8_pll3[] = {
{ .hw = &pll3.clkr.hw },
};
-static struct freq_tbl clk_tbl_gsbi_uart[] = {
+static const struct freq_tbl clk_tbl_gsbi_uart[] = {
{ 1843200, P_PLL8, 2, 6, 625 },
{ 3686400, P_PLL8, 2, 12, 625 },
{ 7372800, P_PLL8, 2, 24, 625 },
@@ -958,7 +958,7 @@ static struct clk_branch gsbi12_uart_clk = {
},
};
-static struct freq_tbl clk_tbl_gsbi_qup[] = {
+static const struct freq_tbl clk_tbl_gsbi_qup[] = {
{ 1100000, P_PXO, 1, 2, 49 },
{ 5400000, P_PXO, 1, 1, 5 },
{ 10800000, P_PXO, 1, 2, 5 },
@@ -2940,7 +2940,7 @@ static struct clk_branch adm0_pbus_clk = {
},
};
-static struct freq_tbl clk_tbl_ce3[] = {
+static const struct freq_tbl clk_tbl_ce3[] = {
{ 48000000, P_PLL8, 8 },
{ 100000000, P_PLL3, 12 },
{ 120000000, P_PLL3, 10 },
@@ -3761,7 +3761,7 @@ static void gcc_msm8960_remove(struct platform_device *pdev)
static struct platform_driver gcc_msm8960_driver = {
.probe = gcc_msm8960_probe,
- .remove_new = gcc_msm8960_remove,
+ .remove = gcc_msm8960_remove,
.driver = {
.name = "gcc-msm8960",
.of_match_table = gcc_msm8960_match_table,
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 80170a805c3b..6a6b7da2b151 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -112,7 +112,7 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
{ .hw = &gpll4.clkr.hw },
};
-static struct freq_tbl ftbl_ufs_axi_clk_src[] = {
+static const struct freq_tbl ftbl_ufs_axi_clk_src[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(150000000, P_GPLL0, 4, 0, 0),
@@ -136,7 +136,7 @@ static struct clk_rcg2 ufs_axi_clk_src = {
},
};
-static struct freq_tbl ftbl_usb30_master_clk_src[] = {
+static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(125000000, P_GPLL0, 1, 5, 24),
{ }
@@ -156,7 +156,7 @@ static struct clk_rcg2 usb30_master_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
{ }
@@ -175,7 +175,7 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -188,7 +188,7 @@ static struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
{ }
};
-static struct freq_tbl ftbl_blsp1_qup_spi_apps_clk_src_8992[] = {
+static const struct freq_tbl ftbl_blsp1_qup_spi_apps_clk_src_8992[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -226,7 +226,7 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp1_qup2_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp1_qup2_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -266,7 +266,7 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp1_qup3_4_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp1_qup3_4_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -333,7 +333,7 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp1_qup5_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp1_qup5_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -373,7 +373,7 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp1_qup6_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp1_qup6_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -400,7 +400,7 @@ static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
F(3686400, P_GPLL0, 1, 96, 15625),
F(7372800, P_GPLL0, 1, 192, 15625),
F(14745600, P_GPLL0, 1, 384, 15625),
@@ -516,7 +516,7 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp2_qup1_2_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp2_qup1_2_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -570,7 +570,7 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp2_qup3_4_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp2_qup3_4_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -678,7 +678,7 @@ static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp2_qup6_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp2_qup6_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -789,7 +789,7 @@ static struct clk_rcg2 blsp2_uart6_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_gp1_clk_src[] = {
+static const struct freq_tbl ftbl_gp1_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_GPLL0, 3, 0, 0),
@@ -810,7 +810,7 @@ static struct clk_rcg2 gp1_clk_src = {
},
};
-static struct freq_tbl ftbl_gp2_clk_src[] = {
+static const struct freq_tbl ftbl_gp2_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_GPLL0, 3, 0, 0),
@@ -831,7 +831,7 @@ static struct clk_rcg2 gp2_clk_src = {
},
};
-static struct freq_tbl ftbl_gp3_clk_src[] = {
+static const struct freq_tbl ftbl_gp3_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_GPLL0, 3, 0, 0),
@@ -852,7 +852,7 @@ static struct clk_rcg2 gp3_clk_src = {
},
};
-static struct freq_tbl ftbl_pcie_0_aux_clk_src[] = {
+static const struct freq_tbl ftbl_pcie_0_aux_clk_src[] = {
F(1011000, P_XO, 1, 1, 19),
{ }
};
@@ -872,7 +872,7 @@ static struct clk_rcg2 pcie_0_aux_clk_src = {
},
};
-static struct freq_tbl ftbl_pcie_pipe_clk_src[] = {
+static const struct freq_tbl ftbl_pcie_pipe_clk_src[] = {
F(125000000, P_XO, 1, 0, 0),
{ }
};
@@ -891,7 +891,7 @@ static struct clk_rcg2 pcie_0_pipe_clk_src = {
},
};
-static struct freq_tbl ftbl_pcie_1_aux_clk_src[] = {
+static const struct freq_tbl ftbl_pcie_1_aux_clk_src[] = {
F(1011000, P_XO, 1, 1, 19),
{ }
};
@@ -925,7 +925,7 @@ static struct clk_rcg2 pcie_1_pipe_clk_src = {
},
};
-static struct freq_tbl ftbl_pdm2_clk_src[] = {
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
F(60000000, P_GPLL0, 10, 0, 0),
{ }
};
@@ -943,7 +943,7 @@ static struct clk_rcg2 pdm2_clk_src = {
},
};
-static struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+static const struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
F(144000, P_XO, 16, 3, 25),
F(400000, P_XO, 12, 1, 4),
F(20000000, P_GPLL0, 15, 1, 2),
@@ -955,7 +955,7 @@ static struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
{ }
};
-static struct freq_tbl ftbl_sdcc1_apps_clk_src_8992[] = {
+static const struct freq_tbl ftbl_sdcc1_apps_clk_src_8992[] = {
F(144000, P_XO, 16, 3, 25),
F(400000, P_XO, 12, 1, 4),
F(20000000, P_GPLL0, 15, 1, 2),
@@ -981,7 +981,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_sdcc2_4_apps_clk_src[] = {
+static const struct freq_tbl ftbl_sdcc2_4_apps_clk_src[] = {
F(144000, P_XO, 16, 3, 25),
F(400000, P_XO, 12, 1, 4),
F(20000000, P_GPLL0, 15, 1, 2),
@@ -1034,7 +1034,7 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_tsif_ref_clk_src[] = {
+static const struct freq_tbl ftbl_tsif_ref_clk_src[] = {
F(105500, P_XO, 1, 1, 182),
{ }
};
@@ -1054,7 +1054,7 @@ static struct clk_rcg2 tsif_ref_clk_src = {
},
};
-static struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
+static const struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
{ }
@@ -1073,7 +1073,7 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
},
};
-static struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
+static const struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
F(1200000, P_XO, 16, 0, 0),
{ }
};
@@ -1092,7 +1092,7 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
},
};
-static struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
+static const struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
F(75000000, P_GPLL0, 8, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 4fc667b94cf2..aa3bd2777868 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -359,7 +359,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+static const struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(150000000, P_GPLL0, 4, 0, 0),
F(300000000, P_GPLL0, 2, 0, 0),
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 90b66caba2cd..c9701f7f6e18 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -2242,7 +2242,7 @@ static struct clk_branch gcc_hmss_trig_clk = {
},
};
-static struct freq_tbl ftbl_hmss_gpll0_clk_src[] = {
+static const struct freq_tbl ftbl_hmss_gpll0_clk_src[] = {
F( 300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
F( 600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
{ }
@@ -2922,6 +2922,43 @@ static struct clk_branch ssc_cnoc_ahbs_clk = {
},
};
+static struct clk_branch hlos1_vote_lpass_core_smmu_clk = {
+ .halt_reg = 0x7D010,
+ .clkr = {
+ .enable_reg = 0x7D010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "hlos1_vote_lpass_core_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = {
+ .halt_reg = 0x7D014,
+ .clkr = {
+ .enable_reg = 0x7D014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "hlos1_vote_lpass_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x8A040,
+ .clkr = {
+ .enable_reg = 0x8A040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc pcie_0_gdsc = {
.gdscr = 0x6b004,
.gds_hw_ctrl = 0x0,
@@ -2953,6 +2990,26 @@ static struct gdsc usb_30_gdsc = {
.flags = VOTABLE,
};
+static struct gdsc hlos1_vote_lpass_adsp = {
+ .gdscr = 0x7d034,
+ .gds_hw_ctrl = 0x0,
+ .pd = {
+ .name = "lpass_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_lpass_core = {
+ .gdscr = 0x7d038,
+ .gds_hw_ctrl = 0x0,
+ .pd = {
+ .name = "lpass_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = ALWAYS_ON,
+};
+
static struct clk_regmap *gcc_msm8998_clocks[] = {
[BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
[BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
@@ -3133,12 +3190,17 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
[GCC_MMSS_GPLL0_DIV_CLK] = &gcc_mmss_gpll0_div_clk.clkr,
[GCC_GPU_GPLL0_DIV_CLK] = &gcc_gpu_gpll0_div_clk.clkr,
[GCC_GPU_GPLL0_CLK] = &gcc_gpu_gpll0_clk.clkr,
+ [HLOS1_VOTE_LPASS_CORE_SMMU_CLK] = &hlos1_vote_lpass_core_smmu_clk.clkr,
+ [HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] = &hlos1_vote_lpass_adsp_smmu_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
};
static struct gdsc *gcc_msm8998_gdscs[] = {
[PCIE_0_GDSC] = &pcie_0_gdsc,
[UFS_GDSC] = &ufs_gdsc,
[USB_30_GDSC] = &usb_30_gdsc,
+ [LPASS_ADSP_GDSC] = &hlos1_vote_lpass_adsp,
+ [LPASS_CORE_GDSC] = &hlos1_vote_lpass_core,
};
static const struct qcom_reset_map gcc_msm8998_resets[] = {
diff --git a/drivers/clk/qcom/gcc-sc8180x.c b/drivers/clk/qcom/gcc-sc8180x.c
index ad135bfa4c76..31e788e22ab4 100644
--- a/drivers/clk/qcom/gcc-sc8180x.c
+++ b/drivers/clk/qcom/gcc-sc8180x.c
@@ -142,6 +142,23 @@ static struct clk_alpha_pll gpll7 = {
},
};
+static struct clk_alpha_pll gpll9 = {
+ .offset = 0x1c000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_trion_ops,
+ },
+ },
+};
+
static const struct parent_map gcc_parent_map_0[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
@@ -241,7 +258,7 @@ static const struct parent_map gcc_parent_map_7[] = {
static const struct clk_parent_data gcc_parents_7[] = {
{ .fw_name = "bi_tcxo", },
{ .hw = &gpll0.clkr.hw },
- { .name = "gppl9" },
+ { .hw = &gpll9.clkr.hw },
{ .hw = &gpll4.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
};
@@ -260,28 +277,6 @@ static const struct clk_parent_data gcc_parents_8[] = {
{ .hw = &gpll0_out_even.clkr.hw },
};
-static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- { }
-};
-
-static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
- .cmd_rcgr = 0x48014,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = gcc_parent_map_0,
- .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_cpuss_ahb_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
@@ -609,19 +604,29 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
{ }
};
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
.cmd_rcgr = 0x17148,
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s0_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@@ -630,13 +635,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s1_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@@ -645,13 +652,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s2_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@@ -660,13 +669,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s3_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@@ -675,13 +686,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s4_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@@ -690,13 +703,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s5_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
@@ -705,13 +720,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s6_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
@@ -720,13 +737,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s7_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -735,13 +754,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s0_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -750,13 +771,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s1_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -765,13 +788,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s2_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -780,13 +805,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s3_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -795,13 +822,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s4_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -810,13 +839,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s5_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
@@ -825,13 +856,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s0_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
@@ -840,28 +873,33 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s1_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
};
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+
static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
.cmd_rcgr = 0x1e3a8,
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s2_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
@@ -870,13 +908,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s3_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
@@ -885,13 +925,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s4_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
@@ -900,13 +942,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s5_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
};
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
@@ -916,7 +952,7 @@ static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(202000000, P_GPLL9_OUT_MAIN, 4, 0, 0),
{ }
};
@@ -939,9 +975,8 @@ static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
F(400000, P_BI_TCXO, 12, 1, 4),
F(9600000, P_BI_TCXO, 2, 0, 0),
F(19200000, P_BI_TCXO, 1, 0, 0),
- F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
- F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
{ }
};
@@ -1599,25 +1634,6 @@ static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
},
};
-/* For CPUSS functionality the AHB clock needs to be left enabled */
-static struct clk_branch gcc_cpuss_ahb_clk = {
- .halt_reg = 0x48000,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x52004,
- .enable_mask = BIT(21),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_cpuss_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){
- &gcc_cpuss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_cpuss_rbcpr_clk = {
.halt_reg = 0x48008,
.halt_check = BRANCH_HALT,
@@ -3150,25 +3166,6 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
},
};
-/* For CPUSS functionality the SYS NOC clock needs to be left enabled */
-static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
- .halt_reg = 0x4819c,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x52004,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_sys_noc_cpuss_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){
- &gcc_cpuss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_tsif_ahb_clk = {
.halt_reg = 0x36004,
.halt_check = BRANCH_HALT,
@@ -4284,8 +4281,6 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = {
[GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr,
[GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
[GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
- [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
- [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
[GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
[GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
@@ -4422,7 +4417,6 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = {
[GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
[GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
[GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
- [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
[GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
[GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
[GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
@@ -4511,6 +4505,7 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = {
[GPLL1] = &gpll1.clkr,
[GPLL4] = &gpll4.clkr,
[GPLL7] = &gpll7.clkr,
+ [GPLL9] = &gpll9.clkr,
};
static const struct qcom_reset_map gcc_sc8180x_resets[] = {
@@ -4546,6 +4541,10 @@ static const struct qcom_reset_map gcc_sc8180x_resets[] = {
[GCC_USB3_PHY_SEC_BCR] = { 0x50018 },
[GCC_USB3PHY_PHY_SEC_BCR] = { 0x5001c },
[GCC_USB3_DP_PHY_SEC_BCR] = { 0x50020 },
+ [GCC_USB3_UNIPHY_MP0_BCR] = { 0x50024 },
+ [GCC_USB3_UNIPHY_MP1_BCR] = { 0x50028 },
+ [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x5002c },
+ [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x50030 },
[GCC_SDCC2_BCR] = { 0x14000 },
[GCC_SDCC4_BCR] = { 0x16000 },
[GCC_TSIF_BCR] = { 0x36000 },
@@ -4561,6 +4560,29 @@ static const struct qcom_reset_map gcc_sc8180x_resets[] = {
[GCC_VIDEO_AXI1_CLK_BCR] = { .reg = 0xb028, .bit = 2, .udelay = 150 },
};
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+};
+
static struct gdsc *gcc_sc8180x_gdscs[] = {
[EMAC_GDSC] = &emac_gdsc,
[PCIE_0_GDSC] = &pcie_0_gdsc,
@@ -4602,6 +4624,7 @@ MODULE_DEVICE_TABLE(of, gcc_sc8180x_match_table);
static int gcc_sc8180x_probe(struct platform_device *pdev)
{
struct regmap *regmap;
+ int ret;
regmap = qcom_cc_map(pdev, &gcc_sc8180x_desc);
if (IS_ERR(regmap))
@@ -4623,6 +4646,11 @@ static int gcc_sc8180x_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
return qcom_cc_really_probe(&pdev->dev, &gcc_sc8180x_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
index 991cd8b8d597..1c59d70e0f96 100644
--- a/drivers/clk/qcom/gcc-sm8250.c
+++ b/drivers/clk/qcom/gcc-sm8250.c
@@ -3226,7 +3226,7 @@ static struct gdsc pcie_0_gdsc = {
.pd = {
.name = "pcie_0_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc pcie_1_gdsc = {
@@ -3234,7 +3234,7 @@ static struct gdsc pcie_1_gdsc = {
.pd = {
.name = "pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc pcie_2_gdsc = {
@@ -3242,7 +3242,7 @@ static struct gdsc pcie_2_gdsc = {
.pd = {
.name = "pcie_2_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc ufs_card_gdsc = {
diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
index 639a9a955914..c445c271678a 100644
--- a/drivers/clk/qcom/gcc-sm8450.c
+++ b/drivers/clk/qcom/gcc-sm8450.c
@@ -2974,7 +2974,7 @@ static struct gdsc pcie_0_gdsc = {
.pd = {
.name = "pcie_0_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc pcie_1_gdsc = {
@@ -2982,7 +2982,7 @@ static struct gdsc pcie_1_gdsc = {
.pd = {
.name = "pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc ufs_phy_gdsc = {
diff --git a/drivers/clk/qcom/gpucc-sm4450.c b/drivers/clk/qcom/gpucc-sm4450.c
new file mode 100644
index 000000000000..a14d0bb031ac
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sm4450.c
@@ -0,0 +1,805 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm4450-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_EVEN,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL0_OUT_ODD,
+ P_GPU_CC_PLL1_OUT_EVEN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_ODD,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+/* 680.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x23,
+ .alpha = 0x6aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+/* 500.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x1a,
+ .alpha = 0xaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_EVEN, 1 },
+ { P_GPU_CC_PLL0_OUT_ODD, 2 },
+ { P_GPU_CC_PLL1_OUT_EVEN, 3 },
+ { P_GPU_CC_PLL1_OUT_ODD, 4 },
+ { P_GPLL0_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+};
+
+static const struct parent_map gpu_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
+ F(340000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(500000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(605000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(765000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(850000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(955000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(1010000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
+ .cmd_rcgr = 0x9070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_2,
+ .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gfx3d_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(150000000, P_GPLL0_OUT_MAIN_DIV, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_3,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_xo_clk_src = {
+ .cmd_rcgr = 0x9010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_4,
+ .freq_tbl = ftbl_gpu_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_xo_clk_src",
+ .parent_data = gpu_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_demet_div_clk_src = {
+ .reg = 0x9054,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_demet_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_ahb_div_clk_src = {
+ .reg = 0x9430,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_ahb_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_cx_int_div_clk_src = {
+ .reg = 0x942c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_xo_div_clk_src = {
+ .reg = 0x9050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_xo_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x911c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x911c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x9120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x914c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x914c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_clk = {
+ .halt_reg = 0x919c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x919c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_slv_clk = {
+ .halt_reg = 0x91a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x91a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gfx3d_slv_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x913c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x913c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x9130,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9130,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x9144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x9008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_freq_measure_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_cxo_clk = {
+ .halt_reg = 0x90b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_cxo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_ff_clk = {
+ .halt_reg = 0x90c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+ .halt_reg = 0x90a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_rdvm_clk = {
+ .halt_reg = 0x90c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gfx3d_rdvm_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+ .halt_reg = 0x90b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_vsense_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x9148,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x9150,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = {
+ .halt_reg = 0x9288,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9288,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_0_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x9134,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9134,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cc_cx_gdsc = {
+ .gdscr = 0x9108,
+ .gds_hw_ctrl = 0x953c,
+ .clk_dis_wait_val = 8,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gpu_cc_gx_gdsc = {
+ .gdscr = 0x905c,
+ .clamp_io_ctrl = 0x9504,
+ .resets = (unsigned int []){ GPU_CC_GX_BCR,
+ GPU_CC_ACD_BCR,
+ GPU_CC_GX_ACD_IROOT_BCR },
+ .reset_count = 3,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET | SW_RESET | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_sm4450_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr,
+ [GPU_CC_CX_GFX3D_SLV_CLK] = &gpu_cc_cx_gfx3d_slv_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_DEMET_DIV_CLK_SRC] = &gpu_cc_demet_div_clk_src.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_CXO_CLK] = &gpu_cc_gx_cxo_clk.clkr,
+ [GPU_CC_GX_FF_CLK] = &gpu_cc_gx_ff_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
+ [GPU_CC_GX_GFX3D_RDVM_CLK] = &gpu_cc_gx_gfx3d_rdvm_clk.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+ [GPU_CC_HUB_AHB_DIV_CLK_SRC] = &gpu_cc_hub_ahb_div_clk_src.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_CX_INT_DIV_CLK_SRC] = &gpu_cc_hub_cx_int_div_clk_src.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+ [GPU_CC_XO_CLK_SRC] = &gpu_cc_xo_clk_src.clkr,
+ [GPU_CC_XO_DIV_CLK_SRC] = &gpu_cc_xo_div_clk_src.clkr,
+};
+
+static struct gdsc *gpu_cc_sm4450_gdscs[] = {
+ [GPU_CC_CX_GDSC] = &gpu_cc_cx_gdsc,
+ [GPU_CC_GX_GDSC] = &gpu_cc_gx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_sm4450_resets[] = {
+ [GPU_CC_CB_BCR] = { 0x93a0 },
+ [GPU_CC_CX_BCR] = { 0x9104 },
+ [GPU_CC_GX_BCR] = { 0x9058 },
+ [GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPU_CC_ACD_BCR] = { 0x9358 },
+ [GPU_CC_FF_BCR] = { 0x9470 },
+ [GPU_CC_GFX3D_AON_BCR] = { 0x9198 },
+ [GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPU_CC_RBCPR_BCR] = { 0x91e0 },
+ [GPU_CC_XO_BCR] = { 0x9000 },
+ [GPU_CC_GX_ACD_IROOT_BCR] = { 0x958c },
+};
+
+static const struct regmap_config gpu_cc_sm4450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x95c0,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm4450_desc = {
+ .config = &gpu_cc_sm4450_regmap_config,
+ .clks = gpu_cc_sm4450_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sm4450_clocks),
+ .resets = gpu_cc_sm4450_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sm4450_resets),
+ .gdscs = gpu_cc_sm4450_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sm4450_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sm4450_match_table[] = {
+ { .compatible = "qcom,sm4450-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm4450_match_table);
+
+static int gpu_cc_sm4450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sm4450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x93a4); /* GPU_CC_CB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x9004); /* GPU_CC_CXO_AON_CLK */
+ qcom_branch_set_clk_en(regmap, 0x900c); /* GPU_CC_DEMET_CLK */
+
+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm4450_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sm4450_driver = {
+ .probe = gpu_cc_sm4450_probe,
+ .driver = {
+ .name = "gpucc-sm4450",
+ .of_match_table = gpu_cc_sm4450_match_table,
+ },
+};
+
+module_platform_driver(gpu_cc_sm4450_driver);
+
+MODULE_DESCRIPTION("QTI GPUCC SM4450 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index bf5320a43e8c..bbacd7fedb2f 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -70,7 +70,7 @@ static const struct clk_parent_data lcc_pxo_pll4[] = {
{ .fw_name = "pll4_vote", .name = "pll4_vote" },
};
-static struct freq_tbl clk_tbl_aif_mi2s[] = {
+static const struct freq_tbl clk_tbl_aif_mi2s[] = {
{ 1024000, P_PLL4, 4, 1, 96 },
{ 1411200, P_PLL4, 4, 2, 139 },
{ 1536000, P_PLL4, 4, 1, 64 },
@@ -214,7 +214,7 @@ static struct clk_regmap_mux mi2s_bit_clk = {
},
};
-static struct freq_tbl clk_tbl_pcm[] = {
+static const struct freq_tbl clk_tbl_pcm[] = {
{ 64000, P_PLL4, 4, 1, 1536 },
{ 128000, P_PLL4, 4, 1, 768 },
{ 256000, P_PLL4, 4, 1, 384 },
@@ -296,7 +296,7 @@ static struct clk_regmap_mux pcm_clk = {
},
};
-static struct freq_tbl clk_tbl_aif_osr[] = {
+static const struct freq_tbl clk_tbl_aif_osr[] = {
{ 2822400, P_PLL4, 1, 147, 20480 },
{ 4096000, P_PLL4, 1, 1, 96 },
{ 5644800, P_PLL4, 1, 147, 10240 },
@@ -360,7 +360,7 @@ static struct clk_branch spdif_clk = {
},
};
-static struct freq_tbl clk_tbl_ahbix[] = {
+static const struct freq_tbl clk_tbl_ahbix[] = {
{ 131072000, P_PLL4, 1, 1, 3 },
{ },
};
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index d53bf315e9c3..7cba2ce3e408 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -57,7 +57,7 @@ static struct clk_parent_data lcc_pxo_pll4[] = {
{ .fw_name = "pll4_vote", .name = "pll4_vote" },
};
-static struct freq_tbl clk_tbl_aif_osr_492[] = {
+static const struct freq_tbl clk_tbl_aif_osr_492[] = {
{ 512000, P_PLL4, 4, 1, 240 },
{ 768000, P_PLL4, 4, 1, 160 },
{ 1024000, P_PLL4, 4, 1, 120 },
@@ -73,7 +73,7 @@ static struct freq_tbl clk_tbl_aif_osr_492[] = {
{ }
};
-static struct freq_tbl clk_tbl_aif_osr_393[] = {
+static const struct freq_tbl clk_tbl_aif_osr_393[] = {
{ 512000, P_PLL4, 4, 1, 192 },
{ 768000, P_PLL4, 4, 1, 128 },
{ 1024000, P_PLL4, 4, 1, 96 },
@@ -218,7 +218,7 @@ CLK_AIF_OSR_DIV(spare_i2s_mic, 0x78, 0x7c, 0x80);
CLK_AIF_OSR_DIV(codec_i2s_spkr, 0x6c, 0x70, 0x74);
CLK_AIF_OSR_DIV(spare_i2s_spkr, 0x84, 0x88, 0x8c);
-static struct freq_tbl clk_tbl_pcm_492[] = {
+static const struct freq_tbl clk_tbl_pcm_492[] = {
{ 256000, P_PLL4, 4, 1, 480 },
{ 512000, P_PLL4, 4, 1, 240 },
{ 768000, P_PLL4, 4, 1, 160 },
@@ -235,7 +235,7 @@ static struct freq_tbl clk_tbl_pcm_492[] = {
{ }
};
-static struct freq_tbl clk_tbl_pcm_393[] = {
+static const struct freq_tbl clk_tbl_pcm_393[] = {
{ 256000, P_PLL4, 4, 1, 384 },
{ 512000, P_PLL4, 4, 1, 192 },
{ 768000, P_PLL4, 4, 1, 128 },
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index c89700ab93f9..cc03722596a4 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -338,7 +338,7 @@ static struct clk_rcg2 mmss_ahb_clk_src = {
},
};
-static struct freq_tbl ftbl_mmss_axi_clk[] = {
+static const struct freq_tbl ftbl_mmss_axi_clk[] = {
F(19200000, P_XO, 1, 0, 0),
F(37500000, P_GPLL0, 16, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
@@ -364,7 +364,7 @@ static struct clk_rcg2 mmss_axi_clk_src = {
},
};
-static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+static const struct freq_tbl ftbl_ocmemnoc_clk[] = {
F(19200000, P_XO, 1, 0, 0),
F(37500000, P_GPLL0, 16, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
@@ -389,7 +389,7 @@ static struct clk_rcg2 ocmemnoc_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_csi0_3_clk[] = {
+static const struct freq_tbl ftbl_camss_csi0_3_clk[] = {
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_MMPLL0, 4, 0, 0),
{ }
@@ -447,7 +447,7 @@ static struct clk_rcg2 csi3_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = {
+static const struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = {
F(37500000, P_GPLL0, 16, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
@@ -490,7 +490,7 @@ static struct clk_rcg2 vfe1_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_mdp_clk[] = {
+static const struct freq_tbl ftbl_mdss_mdp_clk[] = {
F(37500000, P_GPLL0, 16, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
F(75000000, P_GPLL0, 8, 0, 0),
@@ -530,7 +530,7 @@ static struct clk_rcg2 gfx3d_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
+static const struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
F(75000000, P_GPLL0, 8, 0, 0),
F(133330000, P_GPLL0, 4.5, 0, 0),
F(200000000, P_GPLL0, 3, 0, 0),
@@ -607,7 +607,7 @@ static struct clk_rcg2 pclk1_clk_src = {
},
};
-static struct freq_tbl ftbl_venus0_vcodec0_clk[] = {
+static const struct freq_tbl ftbl_venus0_vcodec0_clk[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(133330000, P_GPLL0, 4.5, 0, 0),
@@ -631,7 +631,7 @@ static struct clk_rcg2 vcodec0_clk_src = {
},
};
-static struct freq_tbl ftbl_avsync_vp_clk[] = {
+static const struct freq_tbl ftbl_avsync_vp_clk[] = {
F(150000000, P_GPLL0, 4, 0, 0),
F(320000000, P_MMPLL0, 2.5, 0, 0),
{ }
@@ -650,7 +650,7 @@ static struct clk_rcg2 vp_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_cci_cci_clk[] = {
+static const struct freq_tbl ftbl_camss_cci_cci_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -669,7 +669,7 @@ static struct clk_rcg2 cci_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_gp0_1_clk[] = {
+static const struct freq_tbl ftbl_camss_gp0_1_clk[] = {
F(10000, P_XO, 16, 1, 120),
F(24000, P_XO, 16, 1, 50),
F(6000000, P_GPLL0, 10, 1, 10),
@@ -707,7 +707,7 @@ static struct clk_rcg2 camss_gp1_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_mclk0_3_clk[] = {
+static const struct freq_tbl ftbl_camss_mclk0_3_clk[] = {
F(4800000, P_XO, 4, 0, 0),
F(6000000, P_GPLL0, 10, 1, 10),
F(8000000, P_GPLL0, 15, 1, 5),
@@ -777,7 +777,7 @@ static struct clk_rcg2 mclk3_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = {
+static const struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = {
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_MMPLL0, 4, 0, 0),
{ }
@@ -822,7 +822,7 @@ static struct clk_rcg2 csi2phytimer_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_vfe_cpp_clk[] = {
+static const struct freq_tbl ftbl_camss_vfe_cpp_clk[] = {
F(133330000, P_GPLL0, 4.5, 0, 0),
F(266670000, P_MMPLL0, 3, 0, 0),
F(320000000, P_MMPLL0, 2.5, 0, 0),
@@ -871,7 +871,7 @@ static struct clk_rcg2 byte1_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_edpaux_clk[] = {
+static const struct freq_tbl ftbl_mdss_edpaux_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -889,7 +889,7 @@ static struct clk_rcg2 edpaux_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_edplink_clk[] = {
+static const struct freq_tbl ftbl_mdss_edplink_clk[] = {
F(135000000, P_EDPLINK, 2, 0, 0),
F(270000000, P_EDPLINK, 11, 0, 0),
{ }
@@ -909,7 +909,7 @@ static struct clk_rcg2 edplink_clk_src = {
},
};
-static struct freq_tbl edp_pixel_freq_tbl[] = {
+static const struct freq_tbl edp_pixel_freq_tbl[] = {
{ .src = P_EDPVCO },
{ }
};
@@ -928,7 +928,7 @@ static struct clk_rcg2 edppixel_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+static const struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -959,7 +959,7 @@ static struct clk_rcg2 esc1_clk_src = {
},
};
-static struct freq_tbl extpclk_freq_tbl[] = {
+static const struct freq_tbl extpclk_freq_tbl[] = {
{ .src = P_HDMIPLL },
{ }
};
@@ -978,7 +978,7 @@ static struct clk_rcg2 extpclk_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_hdmi_clk[] = {
+static const struct freq_tbl ftbl_mdss_hdmi_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -996,7 +996,7 @@ static struct clk_rcg2 hdmi_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+static const struct freq_tbl ftbl_mdss_vsync_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -1014,7 +1014,7 @@ static struct clk_rcg2 vsync_clk_src = {
},
};
-static struct freq_tbl ftbl_mmss_rbcpr_clk[] = {
+static const struct freq_tbl ftbl_mmss_rbcpr_clk[] = {
F(50000000, P_GPLL0, 12, 0, 0),
{ }
};
@@ -1032,7 +1032,7 @@ static struct clk_rcg2 rbcpr_clk_src = {
},
};
-static struct freq_tbl ftbl_oxili_rbbmtimer_clk[] = {
+static const struct freq_tbl ftbl_oxili_rbbmtimer_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -1050,7 +1050,7 @@ static struct clk_rcg2 rbbmtimer_clk_src = {
},
};
-static struct freq_tbl ftbl_vpu_maple_clk[] = {
+static const struct freq_tbl ftbl_vpu_maple_clk[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(133330000, P_GPLL0, 4.5, 0, 0),
@@ -1073,7 +1073,7 @@ static struct clk_rcg2 maple_clk_src = {
},
};
-static struct freq_tbl ftbl_vpu_vdp_clk[] = {
+static const struct freq_tbl ftbl_vpu_vdp_clk[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_MMPLL0, 4, 0, 0),
@@ -1095,7 +1095,7 @@ static struct clk_rcg2 vdp_clk_src = {
},
};
-static struct freq_tbl ftbl_vpu_bus_clk[] = {
+static const struct freq_tbl ftbl_vpu_bus_clk[] = {
F(40000000, P_GPLL0, 15, 0, 0),
F(80000000, P_MMPLL0, 10, 0, 0),
{ }
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 1061322534c4..3f41249c5ae4 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -155,7 +155,7 @@ static const struct clk_parent_data mmcc_pxo_dsi1_dsi2_byte[] = {
{ .fw_name = "dsi2pllbyte", .name = "dsi2pllbyte" },
};
-static struct freq_tbl clk_tbl_cam[] = {
+static const struct freq_tbl clk_tbl_cam[] = {
{ 6000000, P_PLL8, 4, 1, 16 },
{ 8000000, P_PLL8, 4, 1, 12 },
{ 12000000, P_PLL8, 4, 1, 8 },
@@ -323,7 +323,7 @@ static struct clk_branch camclk2_clk = {
};
-static struct freq_tbl clk_tbl_csi[] = {
+static const struct freq_tbl clk_tbl_csi[] = {
{ 27000000, P_PXO, 1, 0, 0 },
{ 85330000, P_PLL8, 1, 2, 9 },
{ 177780000, P_PLL2, 1, 2, 9 },
@@ -715,7 +715,7 @@ static struct clk_pix_rdi csi_rdi2_clk = {
},
};
-static struct freq_tbl clk_tbl_csiphytimer[] = {
+static const struct freq_tbl clk_tbl_csiphytimer[] = {
{ 85330000, P_PLL8, 1, 2, 9 },
{ 177780000, P_PLL2, 1, 2, 9 },
{ }
@@ -808,7 +808,7 @@ static struct clk_branch csiphy2_timer_clk = {
},
};
-static struct freq_tbl clk_tbl_gfx2d[] = {
+static const struct freq_tbl clk_tbl_gfx2d[] = {
F_MN( 27000000, P_PXO, 1, 0),
F_MN( 48000000, P_PLL8, 1, 8),
F_MN( 54857000, P_PLL8, 1, 7),
@@ -948,7 +948,7 @@ static struct clk_branch gfx2d1_clk = {
},
};
-static struct freq_tbl clk_tbl_gfx3d[] = {
+static const struct freq_tbl clk_tbl_gfx3d[] = {
F_MN( 27000000, P_PXO, 1, 0),
F_MN( 48000000, P_PLL8, 1, 8),
F_MN( 54857000, P_PLL8, 1, 7),
@@ -968,7 +968,7 @@ static struct freq_tbl clk_tbl_gfx3d[] = {
{ }
};
-static struct freq_tbl clk_tbl_gfx3d_8064[] = {
+static const struct freq_tbl clk_tbl_gfx3d_8064[] = {
F_MN( 27000000, P_PXO, 0, 0),
F_MN( 48000000, P_PLL8, 1, 8),
F_MN( 54857000, P_PLL8, 1, 7),
@@ -1058,7 +1058,7 @@ static struct clk_branch gfx3d_clk = {
},
};
-static struct freq_tbl clk_tbl_vcap[] = {
+static const struct freq_tbl clk_tbl_vcap[] = {
F_MN( 27000000, P_PXO, 0, 0),
F_MN( 54860000, P_PLL8, 1, 7),
F_MN( 64000000, P_PLL8, 1, 6),
@@ -1149,7 +1149,7 @@ static struct clk_branch vcap_npl_clk = {
},
};
-static struct freq_tbl clk_tbl_ijpeg[] = {
+static const struct freq_tbl clk_tbl_ijpeg[] = {
{ 27000000, P_PXO, 1, 0, 0 },
{ 36570000, P_PLL8, 1, 2, 21 },
{ 54860000, P_PLL8, 7, 0, 0 },
@@ -1214,7 +1214,7 @@ static struct clk_branch ijpeg_clk = {
},
};
-static struct freq_tbl clk_tbl_jpegd[] = {
+static const struct freq_tbl clk_tbl_jpegd[] = {
{ 64000000, P_PLL8, 6 },
{ 76800000, P_PLL8, 5 },
{ 96000000, P_PLL8, 4 },
@@ -1264,7 +1264,7 @@ static struct clk_branch jpegd_clk = {
},
};
-static struct freq_tbl clk_tbl_mdp[] = {
+static const struct freq_tbl clk_tbl_mdp[] = {
{ 9600000, P_PLL8, 1, 1, 40 },
{ 13710000, P_PLL8, 1, 1, 28 },
{ 27000000, P_PXO, 1, 0, 0 },
@@ -1381,7 +1381,7 @@ static struct clk_branch mdp_vsync_clk = {
},
};
-static struct freq_tbl clk_tbl_rot[] = {
+static const struct freq_tbl clk_tbl_rot[] = {
{ 27000000, P_PXO, 1 },
{ 29540000, P_PLL8, 13 },
{ 32000000, P_PLL8, 12 },
@@ -1461,7 +1461,7 @@ static const struct clk_parent_data mmcc_pxo_hdmi[] = {
{ .fw_name = "hdmipll", .name = "hdmi_pll" },
};
-static struct freq_tbl clk_tbl_tv[] = {
+static const struct freq_tbl clk_tbl_tv[] = {
{ .src = P_HDMI_PLL, .pre_div = 1 },
{ }
};
@@ -1624,7 +1624,7 @@ static struct clk_branch hdmi_app_clk = {
},
};
-static struct freq_tbl clk_tbl_vcodec[] = {
+static const struct freq_tbl clk_tbl_vcodec[] = {
F_MN( 27000000, P_PXO, 1, 0),
F_MN( 32000000, P_PLL8, 1, 12),
F_MN( 48000000, P_PLL8, 1, 8),
@@ -1699,7 +1699,7 @@ static struct clk_branch vcodec_clk = {
},
};
-static struct freq_tbl clk_tbl_vpe[] = {
+static const struct freq_tbl clk_tbl_vpe[] = {
{ 27000000, P_PXO, 1 },
{ 34909000, P_PLL8, 11 },
{ 38400000, P_PLL8, 10 },
@@ -1752,7 +1752,7 @@ static struct clk_branch vpe_clk = {
},
};
-static struct freq_tbl clk_tbl_vfe[] = {
+static const struct freq_tbl clk_tbl_vfe[] = {
{ 13960000, P_PLL8, 1, 2, 55 },
{ 27000000, P_PXO, 1, 0, 0 },
{ 36570000, P_PLL8, 1, 2, 21 },
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index d5bcb09ebd0c..169e85f60550 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -268,7 +268,7 @@ static struct clk_rcg2 mmss_ahb_clk_src = {
},
};
-static struct freq_tbl ftbl_mmss_axi_clk_msm8226[] = {
+static const struct freq_tbl ftbl_mmss_axi_clk_msm8226[] = {
F(19200000, P_XO, 1, 0, 0),
F(37500000, P_GPLL0, 16, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
@@ -280,7 +280,7 @@ static struct freq_tbl ftbl_mmss_axi_clk_msm8226[] = {
{ }
};
-static struct freq_tbl ftbl_mmss_axi_clk[] = {
+static const struct freq_tbl ftbl_mmss_axi_clk[] = {
F( 19200000, P_XO, 1, 0, 0),
F( 37500000, P_GPLL0, 16, 0, 0),
F( 50000000, P_GPLL0, 12, 0, 0),
@@ -306,7 +306,7 @@ static struct clk_rcg2 mmss_axi_clk_src = {
},
};
-static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+static const struct freq_tbl ftbl_ocmemnoc_clk[] = {
F( 19200000, P_XO, 1, 0, 0),
F( 37500000, P_GPLL0, 16, 0, 0),
F( 50000000, P_GPLL0, 12, 0, 0),
@@ -331,7 +331,7 @@ static struct clk_rcg2 ocmemnoc_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_csi0_3_clk[] = {
+static const struct freq_tbl ftbl_camss_csi0_3_clk[] = {
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_MMPLL0, 4, 0, 0),
{ }
@@ -389,7 +389,7 @@ static struct clk_rcg2 csi3_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_vfe_vfe0_clk_msm8226[] = {
+static const struct freq_tbl ftbl_camss_vfe_vfe0_clk_msm8226[] = {
F(37500000, P_GPLL0, 16, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
@@ -406,7 +406,7 @@ static struct freq_tbl ftbl_camss_vfe_vfe0_clk_msm8226[] = {
{ }
};
-static struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = {
+static const struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = {
F(37500000, P_GPLL0, 16, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
@@ -449,7 +449,7 @@ static struct clk_rcg2 vfe1_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_mdp_clk_msm8226[] = {
+static const struct freq_tbl ftbl_mdss_mdp_clk_msm8226[] = {
F(37500000, P_GPLL0, 16, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
F(75000000, P_GPLL0, 8, 0, 0),
@@ -461,7 +461,7 @@ static struct freq_tbl ftbl_mdss_mdp_clk_msm8226[] = {
{ }
};
-static struct freq_tbl ftbl_mdss_mdp_clk[] = {
+static const struct freq_tbl ftbl_mdss_mdp_clk[] = {
F(37500000, P_GPLL0, 16, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
F(75000000, P_GPLL0, 8, 0, 0),
@@ -490,7 +490,7 @@ static struct clk_rcg2 mdp_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
+static const struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
F(75000000, P_GPLL0, 8, 0, 0),
F(133330000, P_GPLL0, 4.5, 0, 0),
F(200000000, P_GPLL0, 3, 0, 0),
@@ -567,7 +567,7 @@ static struct clk_rcg2 pclk1_clk_src = {
},
};
-static struct freq_tbl ftbl_venus0_vcodec0_clk_msm8226[] = {
+static const struct freq_tbl ftbl_venus0_vcodec0_clk_msm8226[] = {
F(66700000, P_GPLL0, 9, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(133330000, P_MMPLL0, 6, 0, 0),
@@ -575,7 +575,7 @@ static struct freq_tbl ftbl_venus0_vcodec0_clk_msm8226[] = {
{ }
};
-static struct freq_tbl ftbl_venus0_vcodec0_clk[] = {
+static const struct freq_tbl ftbl_venus0_vcodec0_clk[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(133330000, P_MMPLL0, 6, 0, 0),
@@ -599,7 +599,7 @@ static struct clk_rcg2 vcodec0_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_cci_cci_clk[] = {
+static const struct freq_tbl ftbl_camss_cci_cci_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -617,7 +617,7 @@ static struct clk_rcg2 cci_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_gp0_1_clk[] = {
+static const struct freq_tbl ftbl_camss_gp0_1_clk[] = {
F(10000, P_XO, 16, 1, 120),
F(24000, P_XO, 16, 1, 50),
F(6000000, P_GPLL0, 10, 1, 10),
@@ -655,14 +655,14 @@ static struct clk_rcg2 camss_gp1_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_mclk0_3_clk_msm8226[] = {
+static const struct freq_tbl ftbl_camss_mclk0_3_clk_msm8226[] = {
F(19200000, P_XO, 1, 0, 0),
F(24000000, P_GPLL0, 5, 1, 5),
F(66670000, P_GPLL0, 9, 0, 0),
{ }
};
-static struct freq_tbl ftbl_camss_mclk0_3_clk[] = {
+static const struct freq_tbl ftbl_camss_mclk0_3_clk[] = {
F(4800000, P_XO, 4, 0, 0),
F(6000000, P_GPLL0, 10, 1, 10),
F(8000000, P_GPLL0, 15, 1, 5),
@@ -729,7 +729,7 @@ static struct clk_rcg2 mclk3_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = {
+static const struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = {
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_MMPLL0, 4, 0, 0),
{ }
@@ -774,7 +774,7 @@ static struct clk_rcg2 csi2phytimer_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_vfe_cpp_clk_msm8226[] = {
+static const struct freq_tbl ftbl_camss_vfe_cpp_clk_msm8226[] = {
F(133330000, P_GPLL0, 4.5, 0, 0),
F(150000000, P_GPLL0, 4, 0, 0),
F(266670000, P_MMPLL0, 3, 0, 0),
@@ -783,7 +783,7 @@ static struct freq_tbl ftbl_camss_vfe_cpp_clk_msm8226[] = {
{ }
};
-static struct freq_tbl ftbl_camss_vfe_cpp_clk[] = {
+static const struct freq_tbl ftbl_camss_vfe_cpp_clk[] = {
F(133330000, P_GPLL0, 4.5, 0, 0),
F(266670000, P_MMPLL0, 3, 0, 0),
F(320000000, P_MMPLL0, 2.5, 0, 0),
@@ -805,7 +805,7 @@ static struct clk_rcg2 cpp_clk_src = {
},
};
-static struct freq_tbl byte_freq_tbl[] = {
+static const struct freq_tbl byte_freq_tbl[] = {
{ .src = P_DSI0PLL_BYTE },
{ }
};
@@ -838,7 +838,7 @@ static struct clk_rcg2 byte1_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_edpaux_clk[] = {
+static const struct freq_tbl ftbl_mdss_edpaux_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -856,7 +856,7 @@ static struct clk_rcg2 edpaux_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_edplink_clk[] = {
+static const struct freq_tbl ftbl_mdss_edplink_clk[] = {
F(135000000, P_EDPLINK, 2, 0, 0),
F(270000000, P_EDPLINK, 11, 0, 0),
{ }
@@ -876,7 +876,7 @@ static struct clk_rcg2 edplink_clk_src = {
},
};
-static struct freq_tbl edp_pixel_freq_tbl[] = {
+static const struct freq_tbl edp_pixel_freq_tbl[] = {
{ .src = P_EDPVCO },
{ }
};
@@ -895,7 +895,7 @@ static struct clk_rcg2 edppixel_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+static const struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -926,7 +926,7 @@ static struct clk_rcg2 esc1_clk_src = {
},
};
-static struct freq_tbl extpclk_freq_tbl[] = {
+static const struct freq_tbl extpclk_freq_tbl[] = {
{ .src = P_HDMIPLL },
{ }
};
@@ -945,7 +945,7 @@ static struct clk_rcg2 extpclk_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_hdmi_clk[] = {
+static const struct freq_tbl ftbl_mdss_hdmi_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -963,7 +963,7 @@ static struct clk_rcg2 hdmi_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+static const struct freq_tbl ftbl_mdss_vsync_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/mmcc-msm8994.c b/drivers/clk/qcom/mmcc-msm8994.c
index 78e5083eaf0f..f70d080bf51c 100644
--- a/drivers/clk/qcom/mmcc-msm8994.c
+++ b/drivers/clk/qcom/mmcc-msm8994.c
@@ -974,7 +974,7 @@ static struct clk_rcg2 byte1_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+static const struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -1005,7 +1005,7 @@ static struct clk_rcg2 esc1_clk_src = {
},
};
-static struct freq_tbl extpclk_freq_tbl[] = {
+static const struct freq_tbl extpclk_freq_tbl[] = {
{ .src = P_HDMIPLL },
{ }
};
@@ -1024,7 +1024,7 @@ static struct clk_rcg2 extpclk_clk_src = {
},
};
-static struct freq_tbl ftbl_hdmi_clk_src[] = {
+static const struct freq_tbl ftbl_hdmi_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -1042,7 +1042,7 @@ static struct clk_rcg2 hdmi_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+static const struct freq_tbl ftbl_mdss_vsync_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 1a32c6eb8217..a742f848e4ee 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -734,7 +734,7 @@ static struct clk_rcg2 mdp_clk_src = {
},
};
-static struct freq_tbl extpclk_freq_tbl[] = {
+static const struct freq_tbl extpclk_freq_tbl[] = {
{ .src = P_HDMIPLL },
{ }
};
@@ -753,7 +753,7 @@ static struct clk_rcg2 extpclk_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+static const struct freq_tbl ftbl_mdss_vsync_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -771,7 +771,7 @@ static struct clk_rcg2 vsync_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_hdmi_clk[] = {
+static const struct freq_tbl ftbl_mdss_hdmi_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -815,7 +815,7 @@ static struct clk_rcg2 byte1_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+static const struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/videocc-sm8550.c b/drivers/clk/qcom/videocc-sm8550.c
index 97d150b132a6..7c25a50cfa97 100644
--- a/drivers/clk/qcom/videocc-sm8550.c
+++ b/drivers/clk/qcom/videocc-sm8550.c
@@ -449,7 +449,7 @@ static struct gdsc video_cc_mvs0_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.parent = &video_cc_mvs0c_gdsc.pd,
- .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL_TRIGGER,
};
static struct gdsc video_cc_mvs1c_gdsc = {
@@ -474,7 +474,7 @@ static struct gdsc video_cc_mvs1_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.parent = &video_cc_mvs1c_gdsc.pd,
- .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL_TRIGGER,
};
static struct clk_regmap *video_cc_sm8550_clocks[] = {
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 4410d16de4e2..76791a1c50ac 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -40,6 +40,7 @@ config CLK_RENESAS
select CLK_R9A07G054 if ARCH_R9A07G054
select CLK_R9A08G045 if ARCH_R9A08G045
select CLK_R9A09G011 if ARCH_R9A09G011
+ select CLK_R9A09G057 if ARCH_R9A09G057
select CLK_SH73A0 if ARCH_SH73A0
if CLK_RENESAS
@@ -193,6 +194,10 @@ config CLK_R9A09G011
bool "RZ/V2M clock support" if COMPILE_TEST
select CLK_RZG2L
+config CLK_R9A09G057
+ bool "RZ/V2H(P) clock support" if COMPILE_TEST
+ select CLK_RZV2H
+
config CLK_SH73A0
bool "SH-Mobile AG5 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
@@ -228,6 +233,10 @@ config CLK_RZG2L
bool "RZ/{G2L,G2UL,G3S,V2L} family clock support" if COMPILE_TEST
select RESET_CONTROLLER
+config CLK_RZV2H
+ bool "RZ/V2H(P) family clock support" if COMPILE_TEST
+ select RESET_CONTROLLER
+
# Generic
config CLK_RENESAS_CPG_MSSR
bool "CPG/MSSR clock support" if COMPILE_TEST
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index f7e18679c3b8..23d2e26051c8 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_CLK_R9A07G044) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A08G045) += r9a08g045-cpg.o
obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o
+obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
@@ -46,6 +47,7 @@ obj-$(CONFIG_CLK_RCAR_GEN3_CPG) += rcar-gen3-cpg.o
obj-$(CONFIG_CLK_RCAR_GEN4_CPG) += rcar-gen4-cpg.o
obj-$(CONFIG_CLK_RCAR_USB2_CLOCK_SEL) += rcar-usb2-clock-sel.o
obj-$(CONFIG_CLK_RZG2L) += rzg2l-cpg.o
+obj-$(CONFIG_CLK_RZV2H) += rzv2h-cpg.o
# Generic
obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 5304c977562f..5bc473c2adb3 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -207,7 +207,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
clks[i] = ERR_PTR(-ENOENT);
- if (of_find_property(np, "clock-indices", &i))
+ if (of_property_present(np, "clock-indices"))
idxname = "clock-indices";
else
idxname = "renesas,clock-indices";
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index ff3f85e906fe..4c8e4c69c1bf 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -61,6 +61,11 @@ enum clk_ids {
DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL2X_3X, CLK_MAIN, \
.offset = _offset)
+#define CPG_PLL20CR 0x0834 /* PLL20 Control Register */
+#define CPG_PLL21CR 0x0838 /* PLL21 Control Register */
+#define CPG_PLL30CR 0x083c /* PLL30 Control Register */
+#define CPG_PLL31CR 0x0840 /* PLL31 Control Register */
+
static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
@@ -70,10 +75,10 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
- DEF_PLL(".pll20", CLK_PLL20, 0x0834),
- DEF_PLL(".pll21", CLK_PLL21, 0x0838),
- DEF_PLL(".pll30", CLK_PLL30, 0x083c),
- DEF_PLL(".pll31", CLK_PLL31, 0x0840),
+ DEF_PLL(".pll20", CLK_PLL20, CPG_PLL20CR),
+ DEF_PLL(".pll21", CLK_PLL21, CPG_PLL21CR),
+ DEF_PLL(".pll30", CLK_PLL30, CPG_PLL30CR),
+ DEF_PLL(".pll31", CLK_PLL31, CPG_PLL31CR),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
DEF_FIXED(".pll20_div2", CLK_PLL20_DIV2, CLK_PLL20, 2, 1),
@@ -116,17 +121,17 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED("cp", R8A779A0_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cl16mck", R8A779A0_CLK_CL16MCK, CLK_PLL1_DIV2, 64, 1),
- DEF_GEN4_SDH("sd0h", R8A779A0_CLK_SD0H, CLK_SDSRC, 0x870),
- DEF_GEN4_SD("sd0", R8A779A0_CLK_SD0, R8A779A0_CLK_SD0H, 0x870),
+ DEF_GEN4_SDH("sd0h", R8A779A0_CLK_SD0H, CLK_SDSRC, CPG_SD0CKCR),
+ DEF_GEN4_SD("sd0", R8A779A0_CLK_SD0, R8A779A0_CLK_SD0H, CPG_SD0CKCR),
DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2,
R8A779A0_CLK_RPC),
- DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
- DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
- DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, 0x880),
- DEF_DIV6P1("dsi", R8A779A0_CLK_DSI, CLK_PLL5_DIV4, 0x884),
+ DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, CPG_MSOCKCR),
+ DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, CPG_CANFDCKCR),
+ DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, CPG_CSICKCR),
+ DEF_DIV6P1("dsi", R8A779A0_CLK_DSI, CLK_PLL5_DIV4, CPG_DSIEXTCKCR),
DEF_GEN4_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8),
DEF_GEN4_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
@@ -253,12 +258,12 @@ static const unsigned int r8a779a0_crit_mod_clks[] __initconst = {
*/
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
-static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
- /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
- { 1, 128, 1, 0, 0, 0, 0, 144, 1, 192, 1, 0, 0, 16, },
- { 1, 106, 1, 0, 0, 0, 0, 120, 1, 160, 1, 0, 0, 19, },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- { 2, 128, 1, 0, 0, 0, 0, 144, 1, 192, 1, 0, 0, 32, },
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */
+ { 1, 128, 1, 192, 1, 16, },
+ { 1, 106, 1, 160, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, },
+ { 2, 128, 1, 192, 1, 32, },
};
diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
index cc06127406ab..f33342314b2e 100644
--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
@@ -57,12 +57,12 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
DEF_INPUT("extalr", CLK_EXTALR),
/* Internal Core Clocks */
- DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
- DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
- DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2, CLK_MAIN),
- DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN),
- DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
- DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN),
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
+ DEF_GEN4_PLL_F9_24(".pll1", 1, CLK_PLL1, CLK_MAIN),
+ DEF_GEN4_PLL_V9_24(".pll2", 2, CLK_PLL2, CLK_MAIN),
+ DEF_GEN4_PLL_V9_24(".pll3", 3, CLK_PLL3, CLK_MAIN),
+ DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
+ DEF_GEN4_PLL_V9_24(".pll6", 6, CLK_PLL6, CLK_MAIN),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1),
@@ -115,13 +115,13 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
DEF_FIXED("sasyncperd2",R8A779F0_CLK_SASYNCPERD2, CLK_SASYNCPER,2, 1),
DEF_FIXED("sasyncperd4",R8A779F0_CLK_SASYNCPERD4, CLK_SASYNCPER,4, 1),
- DEF_GEN4_SDH("sd0h", R8A779F0_CLK_SD0H, CLK_SDSRC, 0x870),
- DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, R8A779F0_CLK_SD0H, 0x870),
+ DEF_GEN4_SDH("sd0h", R8A779F0_CLK_SD0H, CLK_SDSRC, CPG_SD0CKCR),
+ DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, R8A779F0_CLK_SD0H, CPG_SD0CKCR),
DEF_BASE("rpc", R8A779F0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
DEF_BASE("rpcd2", R8A779F0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779F0_CLK_RPC),
- DEF_DIV6P1("mso", R8A779F0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
+ DEF_DIV6P1("mso", R8A779F0_CLK_MSO, CLK_PLL5_DIV4, CPG_MSOCKCR),
DEF_GEN4_OSC("osc", R8A779F0_CLK_OSC, CLK_EXTAL, 8),
DEF_GEN4_MDSEL("r", R8A779F0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
@@ -187,12 +187,12 @@ static const unsigned int r8a779f0_crit_mod_clks[] __initconst = {
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
-static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
- /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
- { 1, 200, 1, 150, 1, 200, 1, 0, 0, 200, 1, 134, 1, 15, },
- { 1, 160, 1, 120, 1, 160, 1, 0, 0, 160, 1, 106, 1, 19, },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- { 2, 160, 1, 120, 1, 160, 1, 0, 0, 160, 1, 106, 1, 38, },
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */
+ { 1, 200, 1, 200, 1, 15, },
+ { 1, 160, 1, 160, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, },
+ { 2, 160, 1, 160, 1, 38, },
};
static int __init r8a779f0_cpg_mssr_init(struct device *dev)
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
index c4b1938db76b..55c8dd032fc3 100644
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -66,13 +66,13 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
DEF_INPUT("extalr", CLK_EXTALR),
/* Internal Core Clocks */
- DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
- DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
- DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2_VAR, CLK_MAIN),
- DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN),
- DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN4_PLL4, CLK_MAIN),
- DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
- DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN),
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
+ DEF_GEN4_PLL_F8_25(".pll1", 1, CLK_PLL1, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll2", 2, CLK_PLL2, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll3", 3, CLK_PLL3, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll4", 4, CLK_PLL4, CLK_MAIN),
+ DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll6", 6, CLK_PLL6, CLK_MAIN),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1),
@@ -146,14 +146,14 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
DEF_FIXED("viobusd2", R8A779G0_CLK_VIOBUSD2, CLK_VIO, 2, 1),
DEF_FIXED("vcbus", R8A779G0_CLK_VCBUS, CLK_VC, 1, 1),
DEF_FIXED("vcbusd2", R8A779G0_CLK_VCBUSD2, CLK_VC, 2, 1),
- DEF_DIV6P1("canfd", R8A779G0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
- DEF_DIV6P1("csi", R8A779G0_CLK_CSI, CLK_PLL5_DIV4, 0x880),
+ DEF_DIV6P1("canfd", R8A779G0_CLK_CANFD, CLK_PLL5_DIV4, CPG_CANFDCKCR),
+ DEF_DIV6P1("csi", R8A779G0_CLK_CSI, CLK_PLL5_DIV4, CPG_CSICKCR),
DEF_FIXED("dsiref", R8A779G0_CLK_DSIREF, CLK_PLL5_DIV4, 48, 1),
- DEF_DIV6P1("dsiext", R8A779G0_CLK_DSIEXT, CLK_PLL5_DIV4, 0x884),
+ DEF_DIV6P1("dsiext", R8A779G0_CLK_DSIEXT, CLK_PLL5_DIV4, CPG_DSIEXTCKCR),
- DEF_GEN4_SDH("sd0h", R8A779G0_CLK_SD0H, CLK_SDSRC, 0x870),
- DEF_GEN4_SD("sd0", R8A779G0_CLK_SD0, R8A779G0_CLK_SD0H, 0x870),
- DEF_DIV6P1("mso", R8A779G0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
+ DEF_GEN4_SDH("sd0h", R8A779G0_CLK_SD0H, CLK_SDSRC, CPG_SD0CKCR),
+ DEF_GEN4_SD("sd0", R8A779G0_CLK_SD0, R8A779G0_CLK_SD0H, CPG_SD0CKCR),
+ DEF_DIV6P1("mso", R8A779G0_CLK_MSO, CLK_PLL5_DIV4, CPG_MSOCKCR),
DEF_BASE("rpc", R8A779G0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
DEF_BASE("rpcd2", R8A779G0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779G0_CLK_RPC),
@@ -258,12 +258,12 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
-static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
- /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
- { 1, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 16, },
- { 1, 160, 1, 170, 1, 160, 1, 120, 1, 160, 1, 140, 1, 19, },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- { 2, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 32, },
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 160, 1, 160, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, },
+ { 2, 192, 1, 192, 1, 32, },
};
static int __init r8a779g0_cpg_mssr_init(struct device *dev)
diff --git a/drivers/clk/renesas/r8a779h0-cpg-mssr.c b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
index 16a2e26abcc7..e20c048bfa9b 100644
--- a/drivers/clk/renesas/r8a779h0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
@@ -63,19 +63,19 @@ enum clk_ids {
MOD_CLK_BASE
};
-static const struct cpg_core_clk r8a779h0_core_clks[] = {
+static const struct cpg_core_clk r8a779h0_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
DEF_INPUT("extalr", CLK_EXTALR),
/* Internal Core Clocks */
DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
- DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
- DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2, CLK_MAIN),
- DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN),
- DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN4_PLL4, CLK_MAIN),
+ DEF_GEN4_PLL_F8_25(".pll1", 1, CLK_PLL1, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll2", 2, CLK_PLL2, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll3", 3, CLK_PLL3, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll4", 4, CLK_PLL4, CLK_MAIN),
DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
- DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll6", 6, CLK_PLL6, CLK_MAIN),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1),
@@ -156,14 +156,14 @@ static const struct cpg_core_clk r8a779h0_core_clks[] = {
DEF_FIXED("viobusd2", R8A779H0_CLK_VIOBUSD2, CLK_VIOSRC, 2, 1),
DEF_FIXED("vcbusd1", R8A779H0_CLK_VCBUSD1, CLK_VCSRC, 1, 1),
DEF_FIXED("vcbusd2", R8A779H0_CLK_VCBUSD2, CLK_VCSRC, 2, 1),
- DEF_DIV6P1("canfd", R8A779H0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
- DEF_DIV6P1("csi", R8A779H0_CLK_CSI, CLK_PLL5_DIV4, 0x880),
+ DEF_DIV6P1("canfd", R8A779H0_CLK_CANFD, CLK_PLL5_DIV4, CPG_CANFDCKCR),
+ DEF_DIV6P1("csi", R8A779H0_CLK_CSI, CLK_PLL5_DIV4, CPG_CSICKCR),
DEF_FIXED("dsiref", R8A779H0_CLK_DSIREF, CLK_PLL5_DIV4, 48, 1),
- DEF_DIV6P1("dsiext", R8A779H0_CLK_DSIEXT, CLK_PLL5_DIV4, 0x884),
- DEF_DIV6P1("mso", R8A779H0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
+ DEF_DIV6P1("dsiext", R8A779H0_CLK_DSIEXT, CLK_PLL5_DIV4, CPG_DSIEXTCKCR),
+ DEF_DIV6P1("mso", R8A779H0_CLK_MSO, CLK_PLL5_DIV4, CPG_MSOCKCR),
- DEF_GEN4_SDH("sd0h", R8A779H0_CLK_SD0H, CLK_SDSRC, 0x870),
- DEF_GEN4_SD("sd0", R8A779H0_CLK_SD0, R8A779H0_CLK_SD0H, 0x870),
+ DEF_GEN4_SDH("sd0h", R8A779H0_CLK_SD0H, CLK_SDSRC, CPG_SD0CKCR),
+ DEF_GEN4_SD("sd0", R8A779H0_CLK_SD0, R8A779H0_CLK_SD0H, CPG_SD0CKCR),
DEF_BASE("rpc", R8A779H0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
DEF_BASE("rpcd2", R8A779H0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779H0_CLK_RPC),
@@ -172,10 +172,11 @@ static const struct cpg_core_clk r8a779h0_core_clks[] = {
DEF_GEN4_MDSEL("r", R8A779H0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
};
-static const struct mssr_mod_clk r8a779h0_mod_clks[] = {
+static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = {
DEF_MOD("avb0:rgmii0", 211, R8A779H0_CLK_S0D8_HSC),
DEF_MOD("avb1:rgmii1", 212, R8A779H0_CLK_S0D8_HSC),
DEF_MOD("avb2:rgmii2", 213, R8A779H0_CLK_S0D8_HSC),
+ DEF_MOD("canfd0", 328, R8A779H0_CLK_SASYNCPERD2),
DEF_MOD("csi40", 331, R8A779H0_CLK_CSI),
DEF_MOD("csi41", 400, R8A779H0_CLK_CSI),
DEF_MOD("hscif0", 514, R8A779H0_CLK_SASYNCPERD1),
@@ -195,6 +196,8 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] = {
DEF_MOD("msi3", 621, R8A779H0_CLK_MSO),
DEF_MOD("msi4", 622, R8A779H0_CLK_MSO),
DEF_MOD("msi5", 623, R8A779H0_CLK_MSO),
+ DEF_MOD("pcie0", 624, R8A779H0_CLK_S0D2_HSC),
+ DEF_MOD("pwm", 628, R8A779H0_CLK_SASYNCPERD4),
DEF_MOD("rpc-if", 629, R8A779H0_CLK_RPCD2),
DEF_MOD("scif0", 702, R8A779H0_CLK_SASYNCPERD4),
DEF_MOD("scif1", 703, R8A779H0_CLK_SASYNCPERD4),
@@ -252,12 +255,12 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] = {
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
-static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
- /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
- { 1, 192, 1, 240, 1, 192, 1, 240, 1, 192, 1, 168, 1, 16, },
- { 1, 160, 1, 200, 1, 160, 1, 200, 1, 160, 1, 140, 1, 19, },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- { 2, 192, 1, 240, 1, 192, 1, 240, 1, 192, 1, 168, 1, 32, },
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 160, 1, 160, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, },
+ { 2, 192, 1, 192, 1, 32, },
};
static int __init r8a779h0_cpg_mssr_init(struct device *dev)
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index 16acc95f3c62..c3c2b0c43983 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -52,6 +52,8 @@ enum clk_ids {
CLK_PLL5,
CLK_PLL5_500,
CLK_PLL5_250,
+ CLK_PLL5_FOUTPOSTDIV,
+ CLK_DSI_DIV,
#endif
CLK_PLL6,
CLK_PLL6_250,
@@ -120,6 +122,7 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
DEF_FIXED(".pll5", CLK_PLL5, CLK_EXTAL, 125, 1),
DEF_FIXED(".pll5_500", CLK_PLL5_500, CLK_PLL5, 1, 6),
DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_500, 1, 2),
+ DEF_PLL5_FOUTPOSTDIV(".pll5_foutpostdiv", CLK_PLL5_FOUTPOSTDIV, CLK_EXTAL),
#endif
DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6),
DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2),
@@ -146,6 +149,8 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
#ifdef CONFIG_ARM64
DEF_FIXED("M2", R9A07G043_CLK_M2, CLK_PLL3_533, 1, 2),
DEF_FIXED("M2_DIV2", CLK_M2_DIV2, R9A07G043_CLK_M2, 1, 2),
+ DEF_DSI_DIV("DSI_DIV", CLK_DSI_DIV, CLK_PLL5_FOUTPOSTDIV, CLK_SET_RATE_PARENT),
+ DEF_FIXED("M3", R9A07G043_CLK_M3, CLK_DSI_DIV, 1, 1),
#endif
};
@@ -209,6 +214,12 @@ static const struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
0x564, 2),
DEF_MOD("cru_aclk", R9A07G043_CRU_ACLK, R9A07G043_CLK_M0,
0x564, 3),
+ DEF_COUPLED("lcdc_clk_a", R9A07G043_LCDC_CLK_A, R9A07G043_CLK_M0,
+ 0x56c, 0),
+ DEF_COUPLED("lcdc_clk_p", R9A07G043_LCDC_CLK_P, R9A07G043_CLK_ZT,
+ 0x56c, 0),
+ DEF_MOD("lcdc_clk_d", R9A07G043_LCDC_CLK_D, R9A07G043_CLK_M3,
+ 0x56c, 1),
#endif
DEF_MOD("ssi0_pclk", R9A07G043_SSI0_PCLK2, R9A07G043_CLK_P0,
0x570, 0),
@@ -309,6 +320,7 @@ static const struct rzg2l_reset r9a07g043_resets[] = {
DEF_RST(R9A07G043_CRU_CMN_RSTB, 0x864, 0),
DEF_RST(R9A07G043_CRU_PRESETN, 0x864, 1),
DEF_RST(R9A07G043_CRU_ARESETN, 0x864, 2),
+ DEF_RST(R9A07G043_LCDC_RESET_N, 0x86c, 0),
#endif
DEF_RST(R9A07G043_SSI0_RST_M2_REG, 0x870, 0),
DEF_RST(R9A07G043_SSI1_RST_M2_REG, 0x870, 1),
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index a891bfc3ab5a..1ce40fb51f13 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -193,6 +193,7 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("ia55_pclk", R9A08G045_IA55_PCLK, R9A08G045_CLK_P2, 0x518, 0),
DEF_MOD("ia55_clk", R9A08G045_IA55_CLK, R9A08G045_CLK_P1, 0x518, 1),
DEF_MOD("dmac_aclk", R9A08G045_DMAC_ACLK, R9A08G045_CLK_P3, 0x52c, 0),
+ DEF_MOD("dmac_pclk", R9A08G045_DMAC_PCLK, CLK_P3_DIV2, 0x52c, 1),
DEF_MOD("wdt0_pclk", R9A08G045_WDT0_PCLK, R9A08G045_CLK_P0, 0x548, 0),
DEF_MOD("wdt0_clk", R9A08G045_WDT0_CLK, R9A08G045_OSCCLK, 0x548, 1),
DEF_MOD("sdhi0_imclk", R9A08G045_SDHI0_IMCLK, CLK_SD0_DIV4, 0x554, 0),
@@ -207,6 +208,10 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9),
DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10),
DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11),
+ DEF_MOD("usb0_host", R9A08G045_USB_U2H0_HCLK, R9A08G045_CLK_P1, 0x578, 0),
+ DEF_MOD("usb1_host", R9A08G045_USB_U2H1_HCLK, R9A08G045_CLK_P1, 0x578, 1),
+ DEF_MOD("usb0_func", R9A08G045_USB_U2P_EXR_CPUCLK, R9A08G045_CLK_P1, 0x578, 2),
+ DEF_MOD("usb_pclk", R9A08G045_USB_PCLK, R9A08G045_CLK_P1, 0x578, 3),
DEF_COUPLED("eth0_axi", R9A08G045_ETH0_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 0),
DEF_COUPLED("eth0_chi", R9A08G045_ETH0_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 0),
DEF_MOD("eth0_refclk", R9A08G045_ETH0_REFCLK, R9A08G045_CLK_HP, 0x57c, 8),
@@ -226,10 +231,16 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_GIC600_GICRESET_N, 0x814, 0),
DEF_RST(R9A08G045_GIC600_DBG_GICRESET_N, 0x814, 1),
DEF_RST(R9A08G045_IA55_RESETN, 0x818, 0),
+ DEF_RST(R9A08G045_DMAC_ARESETN, 0x82c, 0),
+ DEF_RST(R9A08G045_DMAC_RST_ASYNC, 0x82c, 1),
DEF_RST(R9A08G045_WDT0_PRESETN, 0x848, 0),
DEF_RST(R9A08G045_SDHI0_IXRST, 0x854, 0),
DEF_RST(R9A08G045_SDHI1_IXRST, 0x854, 1),
DEF_RST(R9A08G045_SDHI2_IXRST, 0x854, 2),
+ DEF_RST(R9A08G045_USB_U2H0_HRESETN, 0x878, 0),
+ DEF_RST(R9A08G045_USB_U2H1_HRESETN, 0x878, 1),
+ DEF_RST(R9A08G045_USB_U2P_EXL_SYSRST, 0x878, 2),
+ DEF_RST(R9A08G045_USB_PRESETN, 0x878, 3),
DEF_RST(R9A08G045_ETH0_RST_HW_N, 0x87c, 0),
DEF_RST(R9A08G045_ETH1_RST_HW_N, 0x87c, 1),
DEF_RST(R9A08G045_I2C0_MRST, 0x880, 0),
@@ -277,6 +288,15 @@ static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = {
DEF_PD("sdhi2", R9A08G045_PD_SDHI2,
DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)),
RZG2L_PD_F_NONE),
+ DEF_PD("usb0", R9A08G045_PD_USB0,
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, GENMASK(6, 5)),
+ RZG2L_PD_F_NONE),
+ DEF_PD("usb1", R9A08G045_PD_USB1,
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(7)),
+ RZG2L_PD_F_NONE),
+ DEF_PD("usb-phy", R9A08G045_PD_USB_PHY,
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(4)),
+ RZG2L_PD_F_NONE),
DEF_PD("eth0", R9A08G045_PD_ETHER0,
DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(2)),
RZG2L_PD_F_NONE),
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
new file mode 100644
index 000000000000..3ee32db5c0af
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2H(P) CPG driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g057-cpg.h>
+
+#include "rzv2h-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G057_IOTOP_0_SHCLK,
+
+ /* External Input Clocks */
+ CLK_AUDIO_EXTAL,
+ CLK_RTXIN,
+ CLK_QEXTAL,
+
+ /* PLL Clocks */
+ CLK_PLLCM33,
+ CLK_PLLCLN,
+ CLK_PLLDTY,
+ CLK_PLLCA55,
+
+ /* Internal Core Clocks */
+ CLK_PLLCM33_DIV16,
+ CLK_PLLCLN_DIV2,
+ CLK_PLLCLN_DIV8,
+ CLK_PLLCLN_DIV16,
+ CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV4,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_2_64[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {4, 64},
+ {0, 0},
+};
+
+static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
+ DEF_INPUT("rtxin", CLK_RTXIN),
+ DEF_INPUT("qextal", CLK_QEXTAL),
+
+ /* PLL Clocks */
+ DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+
+ /* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+
+ DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
+ DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
+ DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
+
+ DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+
+ /* Core Clocks */
+ DEF_FIXED("sys_0_pclk", R9A09G057_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("iotop_0_shclk", R9A09G057_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+};
+
+static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
+ DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3),
+ DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4),
+ DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5),
+ DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6),
+ DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7),
+ DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8),
+ DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9),
+ DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10),
+ DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11),
+ DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12),
+ DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13),
+ DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14),
+ DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15),
+ DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16),
+ DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17),
+ DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15),
+ DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19),
+ DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20),
+ DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21),
+ DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22),
+ DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23),
+ DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24),
+ DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25),
+ DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26),
+ DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27),
+ DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3),
+ DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4),
+ DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5),
+ DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6),
+ DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7),
+ DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8),
+ DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9),
+ DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10),
+ DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11),
+ DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12),
+ DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13),
+ DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14),
+};
+
+static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
+ DEF_RST(6, 13, 2, 30), /* GTM_0_PRESETZ */
+ DEF_RST(6, 14, 2, 31), /* GTM_1_PRESETZ */
+ DEF_RST(6, 15, 3, 0), /* GTM_2_PRESETZ */
+ DEF_RST(7, 0, 3, 1), /* GTM_3_PRESETZ */
+ DEF_RST(7, 1, 3, 2), /* GTM_4_PRESETZ */
+ DEF_RST(7, 2, 3, 3), /* GTM_5_PRESETZ */
+ DEF_RST(7, 3, 3, 4), /* GTM_6_PRESETZ */
+ DEF_RST(7, 4, 3, 5), /* GTM_7_PRESETZ */
+ DEF_RST(7, 5, 3, 6), /* WDT_0_RESET */
+ DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
+ DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
+ DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
+ DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
+ DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
+ DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
+ DEF_RST(9, 11, 4, 12), /* RIIC_3_MRST */
+ DEF_RST(9, 12, 4, 13), /* RIIC_4_MRST */
+ DEF_RST(9, 13, 4, 14), /* RIIC_5_MRST */
+ DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
+ DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
+ DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
+ DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
+ DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
+ DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+};
+
+const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r9a09g057_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g057_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g057_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g057_mod_clks),
+ .num_hw_mod_clks = 25 * 16,
+
+ /* Resets */
+ .resets = r9a09g057_resets,
+ .num_resets = ARRAY_SIZE(r9a09g057_resets),
+};
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index 77a4bb3e17f3..31aa790fd003 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -45,7 +45,6 @@ static u32 cpg_mode __initdata;
#define CPG_PLL6CR1 0x8d8
#define CPG_PLLxCR0_KICK BIT(31)
-#define CPG_PLLxCR0_NI GENMASK(27, 20) /* Integer mult. factor */
#define CPG_PLLxCR0_SSMODE GENMASK(18, 16) /* PLL mode */
#define CPG_PLLxCR0_SSMODE_FM BIT(18) /* Fractional Multiplication */
#define CPG_PLLxCR0_SSMODE_DITH BIT(17) /* Frequency Dithering */
@@ -53,35 +52,57 @@ static u32 cpg_mode __initdata;
#define CPG_PLLxCR0_SSFREQ GENMASK(14, 8) /* SSCG Modulation Frequency */
#define CPG_PLLxCR0_SSDEPT GENMASK(6, 0) /* SSCG Modulation Depth */
-#define SSMODE_FM BIT(2) /* Fractional Multiplication */
-#define SSMODE_DITHER BIT(1) /* Frequency Dithering */
-#define SSMODE_CENTER BIT(0) /* Center (vs. Down) Spread Dithering */
+/* Fractional 8.25 PLL */
+#define CPG_PLLxCR0_NI8 GENMASK(27, 20) /* Integer mult. factor */
+#define CPG_PLLxCR1_NF25 GENMASK(24, 0) /* Fractional mult. factor */
+
+/* Fractional 9.24 PLL */
+#define CPG_PLLxCR0_NI9 GENMASK(28, 20) /* Integer mult. factor */
+#define CPG_PLLxCR1_NF24 GENMASK(23, 0) /* Fractional mult. factor */
+
+#define CPG_PLLxCR_STC GENMASK(30, 24) /* R_Car V3U PLLxCR */
+
+#define CPG_RPCCKCR 0x874 /* RPC Clock Freq. Control Register */
+
+#define CPG_SD0CKCR1 0x8a4 /* SD-IF0 Clock Freq. Control Reg. 1 */
+
+#define CPG_SD0CKCR1_SDSRC_SEL GENMASK(30, 29) /* SDSRC clock freq. select */
/* PLL Clocks */
struct cpg_pll_clk {
struct clk_hw hw;
void __iomem *pllcr0_reg;
+ void __iomem *pllcr1_reg;
void __iomem *pllecr_reg;
u32 pllecr_pllst_mask;
};
#define to_pll_clk(_hw) container_of(_hw, struct cpg_pll_clk, hw)
-static unsigned long cpg_pll_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static unsigned long cpg_pll_8_25_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
- unsigned int mult;
-
- mult = FIELD_GET(CPG_PLLxCR0_NI, readl(pll_clk->pllcr0_reg)) + 1;
+ u32 cr0 = readl(pll_clk->pllcr0_reg);
+ unsigned int ni, nf;
+ unsigned long rate;
+
+ ni = (FIELD_GET(CPG_PLLxCR0_NI8, cr0) + 1) * 2;
+ rate = parent_rate * ni;
+ if (cr0 & CPG_PLLxCR0_SSMODE_FM) {
+ nf = FIELD_GET(CPG_PLLxCR1_NF25, readl(pll_clk->pllcr1_reg));
+ rate += mul_u64_u32_shr(parent_rate, nf, 24);
+ }
- return parent_rate * mult * 2;
+ return rate;
}
-static int cpg_pll_clk_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
+static int cpg_pll_8_25_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned int min_mult, max_mult, mult;
+ struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
+ unsigned int min_mult, max_mult, ni, nf;
+ u32 cr0 = readl(pll_clk->pllcr0_reg);
unsigned long prate;
prate = req->best_parent_rate * 2;
@@ -90,28 +111,58 @@ static int cpg_pll_clk_determine_rate(struct clk_hw *hw,
if (max_mult < min_mult)
return -EINVAL;
- mult = DIV_ROUND_CLOSEST_ULL(req->rate, prate);
- mult = clamp(mult, min_mult, max_mult);
+ if (cr0 & CPG_PLLxCR0_SSMODE_FM) {
+ ni = div64_ul(req->rate, prate);
+ if (ni < min_mult) {
+ ni = min_mult;
+ nf = 0;
+ } else {
+ ni = min(ni, max_mult);
+ nf = div64_ul((u64)(req->rate - prate * ni) << 24,
+ req->best_parent_rate);
+ }
+ } else {
+ ni = DIV_ROUND_CLOSEST_ULL(req->rate, prate);
+ ni = clamp(ni, min_mult, max_mult);
+ nf = 0;
+ }
+ req->rate = prate * ni + mul_u64_u32_shr(req->best_parent_rate, nf, 24);
- req->rate = prate * mult;
return 0;
}
-static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+static int cpg_pll_8_25_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
- unsigned int mult;
+ unsigned long prate = parent_rate * 2;
+ u32 cr0 = readl(pll_clk->pllcr0_reg);
+ unsigned int ni, nf;
u32 val;
- mult = DIV_ROUND_CLOSEST_ULL(rate, parent_rate * 2);
- mult = clamp(mult, 1U, 256U);
+ if (cr0 & CPG_PLLxCR0_SSMODE_FM) {
+ ni = div64_ul(rate, prate);
+ if (ni < 1) {
+ ni = 1;
+ nf = 0;
+ } else {
+ ni = min(ni, 256U);
+ nf = div64_ul((u64)(rate - prate * ni) << 24,
+ parent_rate);
+ }
+ } else {
+ ni = DIV_ROUND_CLOSEST_ULL(rate, prate);
+ ni = clamp(ni, 1U, 256U);
+ }
if (readl(pll_clk->pllcr0_reg) & CPG_PLLxCR0_KICK)
return -EBUSY;
- cpg_reg_modify(pll_clk->pllcr0_reg, CPG_PLLxCR0_NI,
- FIELD_PREP(CPG_PLLxCR0_NI, mult - 1));
+ cpg_reg_modify(pll_clk->pllcr0_reg, CPG_PLLxCR0_NI8,
+ FIELD_PREP(CPG_PLLxCR0_NI8, ni - 1));
+ if (cr0 & CPG_PLLxCR0_SSMODE_FM)
+ cpg_reg_modify(pll_clk->pllcr1_reg, CPG_PLLxCR1_NF25,
+ FIELD_PREP(CPG_PLLxCR1_NF25, nf));
/*
* Set KICK bit in PLLxCR0 to update hardware setting and wait for
@@ -132,22 +183,55 @@ static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
val & pll_clk->pllecr_pllst_mask, 0, 1000);
}
-static const struct clk_ops cpg_pll_clk_ops = {
- .recalc_rate = cpg_pll_clk_recalc_rate,
- .determine_rate = cpg_pll_clk_determine_rate,
- .set_rate = cpg_pll_clk_set_rate,
+static const struct clk_ops cpg_pll_f8_25_clk_ops = {
+ .recalc_rate = cpg_pll_8_25_clk_recalc_rate,
+};
+
+static const struct clk_ops cpg_pll_v8_25_clk_ops = {
+ .recalc_rate = cpg_pll_8_25_clk_recalc_rate,
+ .determine_rate = cpg_pll_8_25_clk_determine_rate,
+ .set_rate = cpg_pll_8_25_clk_set_rate,
+};
+
+static unsigned long cpg_pll_9_24_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
+ u32 cr0 = readl(pll_clk->pllcr0_reg);
+ unsigned int ni, nf;
+ unsigned long rate;
+
+ ni = FIELD_GET(CPG_PLLxCR0_NI9, cr0) + 1;
+ rate = parent_rate * ni;
+ if (cr0 & CPG_PLLxCR0_SSMODE_FM) {
+ nf = FIELD_GET(CPG_PLLxCR1_NF24, readl(pll_clk->pllcr1_reg));
+ rate += mul_u64_u32_shr(parent_rate, nf, 24);
+ } else {
+ rate *= 2;
+ }
+
+ return rate;
+}
+
+static const struct clk_ops cpg_pll_f9_24_clk_ops = {
+ .recalc_rate = cpg_pll_9_24_clk_recalc_rate,
};
static struct clk * __init cpg_pll_clk_register(const char *name,
const char *parent_name,
void __iomem *base,
- unsigned int cr0_offset,
- unsigned int cr1_offset,
- unsigned int index)
-
+ unsigned int index,
+ const struct clk_ops *ops)
{
- struct cpg_pll_clk *pll_clk;
+ static const struct { u16 cr0, cr1; } pll_cr_offsets[] __initconst = {
+ [1 - 1] = { CPG_PLL1CR0, CPG_PLL1CR1 },
+ [2 - 1] = { CPG_PLL2CR0, CPG_PLL2CR1 },
+ [3 - 1] = { CPG_PLL3CR0, CPG_PLL3CR1 },
+ [4 - 1] = { CPG_PLL4CR0, CPG_PLL4CR1 },
+ [6 - 1] = { CPG_PLL6CR0, CPG_PLL6CR1 },
+ };
struct clk_init_data init = {};
+ struct cpg_pll_clk *pll_clk;
struct clk *clk;
pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
@@ -155,25 +239,23 @@ static struct clk * __init cpg_pll_clk_register(const char *name,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.ops = &cpg_pll_clk_ops;
+ init.ops = ops;
init.parent_names = &parent_name;
init.num_parents = 1;
pll_clk->hw.init = &init;
- pll_clk->pllcr0_reg = base + cr0_offset;
+ pll_clk->pllcr0_reg = base + pll_cr_offsets[index - 1].cr0;
+ pll_clk->pllcr1_reg = base + pll_cr_offsets[index - 1].cr1;
pll_clk->pllecr_reg = base + CPG_PLLECR;
pll_clk->pllecr_pllst_mask = CPG_PLLECR_PLLST(index);
- /* Disable Fractional Multiplication and Frequency Dithering */
- writel(0, base + cr1_offset);
- cpg_reg_modify(pll_clk->pllcr0_reg, CPG_PLLxCR0_SSMODE, 0);
-
clk = clk_register(NULL, &pll_clk->hw);
if (IS_ERR(clk))
kfree(pll_clk);
return clk;
}
+
/*
* Z0 Clock & Z1 Clock
*/
@@ -358,51 +440,41 @@ struct clk * __init rcar_gen4_cpg_clk_register(struct device *dev,
div = cpg_pll_config->pll1_div;
break;
- case CLK_TYPE_GEN4_PLL2_VAR:
- /*
- * PLL2 is implemented as a custom clock, to change the
- * multiplier when cpufreq changes between normal and boost
- * modes.
- */
- return cpg_pll_clk_register(core->name, __clk_get_name(parent),
- base, CPG_PLL2CR0, CPG_PLL2CR1, 2);
-
- case CLK_TYPE_GEN4_PLL2:
- mult = cpg_pll_config->pll2_mult;
- div = cpg_pll_config->pll2_div;
- break;
-
- case CLK_TYPE_GEN4_PLL3:
- mult = cpg_pll_config->pll3_mult;
- div = cpg_pll_config->pll3_div;
- break;
-
- case CLK_TYPE_GEN4_PLL4:
- mult = cpg_pll_config->pll4_mult;
- div = cpg_pll_config->pll4_div;
- break;
-
case CLK_TYPE_GEN4_PLL5:
mult = cpg_pll_config->pll5_mult;
div = cpg_pll_config->pll5_div;
break;
- case CLK_TYPE_GEN4_PLL6:
- mult = cpg_pll_config->pll6_mult;
- div = cpg_pll_config->pll6_div;
- break;
-
case CLK_TYPE_GEN4_PLL2X_3X:
value = readl(base + core->offset);
- mult = (((value >> 24) & 0x7f) + 1) * 2;
+ mult = (FIELD_GET(CPG_PLLxCR_STC, value) + 1) * 2;
break;
+ case CLK_TYPE_GEN4_PLL_F8_25:
+ return cpg_pll_clk_register(core->name, __clk_get_name(parent),
+ base, core->offset,
+ &cpg_pll_f8_25_clk_ops);
+
+ case CLK_TYPE_GEN4_PLL_V8_25:
+ return cpg_pll_clk_register(core->name, __clk_get_name(parent),
+ base, core->offset,
+ &cpg_pll_v8_25_clk_ops);
+
+ case CLK_TYPE_GEN4_PLL_V9_24:
+ /* Variable fractional 9.24 is not yet supported, using fixed */
+ fallthrough;
+ case CLK_TYPE_GEN4_PLL_F9_24:
+ return cpg_pll_clk_register(core->name, __clk_get_name(parent),
+ base, core->offset,
+ &cpg_pll_f9_24_clk_ops);
+
case CLK_TYPE_GEN4_Z:
return cpg_z_clk_register(core->name, __clk_get_name(parent),
base, core->div, core->offset);
case CLK_TYPE_GEN4_SDSRC:
- div = ((readl(base + SD0CKCR1) >> 29) & 0x03) + 4;
+ value = readl(base + CPG_SD0CKCR1);
+ div = FIELD_GET(CPG_SD0CKCR1_SDSRC_SEL, value) + 4;
break;
case CLK_TYPE_GEN4_SDH:
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.h b/drivers/clk/renesas/rcar-gen4-cpg.h
index 006537e29e4e..717fd148464f 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.h
+++ b/drivers/clk/renesas/rcar-gen4-cpg.h
@@ -12,13 +12,12 @@
enum rcar_gen4_clk_types {
CLK_TYPE_GEN4_MAIN = CLK_TYPE_CUSTOM,
CLK_TYPE_GEN4_PLL1,
- CLK_TYPE_GEN4_PLL2,
- CLK_TYPE_GEN4_PLL2_VAR,
CLK_TYPE_GEN4_PLL2X_3X, /* r8a779a0 only */
- CLK_TYPE_GEN4_PLL3,
- CLK_TYPE_GEN4_PLL4,
CLK_TYPE_GEN4_PLL5,
- CLK_TYPE_GEN4_PLL6,
+ CLK_TYPE_GEN4_PLL_F8_25, /* Fixed fractional 8.25 PLL */
+ CLK_TYPE_GEN4_PLL_V8_25, /* Variable fractional 8.25 PLL */
+ CLK_TYPE_GEN4_PLL_F9_24, /* Fixed fractional 9.24 PLL */
+ CLK_TYPE_GEN4_PLL_V9_24, /* Variable fractional 9.24 PLL */
CLK_TYPE_GEN4_SDSRC,
CLK_TYPE_GEN4_SDH,
CLK_TYPE_GEN4_SD,
@@ -47,6 +46,18 @@ enum rcar_gen4_clk_types {
#define DEF_GEN4_OSC(_name, _id, _parent, _div) \
DEF_BASE(_name, _id, CLK_TYPE_GEN4_OSC, _parent, .div = _div)
+#define DEF_GEN4_PLL_F8_25(_name, _idx, _id, _parent) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL_F8_25, _parent, .offset = _idx)
+
+#define DEF_GEN4_PLL_V8_25(_name, _idx, _id, _parent) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL_V8_25, _parent, .offset = _idx)
+
+#define DEF_GEN4_PLL_F9_24(_name, _idx, _id, _parent) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL_F9_24, _parent, .offset = _idx)
+
+#define DEF_GEN4_PLL_V9_24(_name, _idx, _id, _parent) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL_V9_24, _parent, .offset = _idx)
+
#define DEF_GEN4_Z(_name, _id, _type, _parent, _div, _offset) \
DEF_BASE(_name, _id, _type, _parent, .div = _div, .offset = _offset)
@@ -54,21 +65,16 @@ struct rcar_gen4_cpg_pll_config {
u8 extal_div;
u8 pll1_mult;
u8 pll1_div;
- u8 pll2_mult;
- u8 pll2_div;
- u8 pll3_mult;
- u8 pll3_div;
- u8 pll4_mult;
- u8 pll4_div;
u8 pll5_mult;
u8 pll5_div;
- u8 pll6_mult;
- u8 pll6_div;
u8 osc_prediv;
};
-#define CPG_RPCCKCR 0x874
-#define SD0CKCR1 0x8a4
+#define CPG_SD0CKCR 0x870 /* SD-IF0 Clock Frequency Control Register */
+#define CPG_CANFDCKCR 0x878 /* CAN-FD Clock Frequency Control Register */
+#define CPG_MSOCKCR 0x87c /* MSIOF Clock Frequency Control Register */
+#define CPG_CSICKCR 0x880 /* CSI Clock Frequency Control Register */
+#define CPG_DSIEXTCKCR 0x884 /* DSI Clock Frequency Control Register */
struct clk *rcar_gen4_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
index de4896cf5f40..421ae973ea8e 100644
--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -212,7 +212,7 @@ static struct platform_driver rcar_usb2_clock_sel_driver = {
.pm = &rcar_usb2_clock_sel_pm_ops,
},
.probe = rcar_usb2_clock_sel_probe,
- .remove_new = rcar_usb2_clock_sel_remove,
+ .remove = rcar_usb2_clock_sel_remove,
};
builtin_platform_driver(rcar_usb2_clock_sel_driver);
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 04b78064d4e0..88bf39e8c79c 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -339,8 +339,7 @@ static const struct clk_ops rzg3s_div_clk_ops = {
};
static struct clk * __init
-rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks,
- void __iomem *base, struct rzg2l_cpg_priv *priv)
+rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct rzg2l_cpg_priv *priv)
{
struct div_hw_data *div_hw_data;
struct clk_init_data init = {};
@@ -351,7 +350,7 @@ rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks,
u32 max = 0;
int ret;
- parent = clks[core->parent & 0xffff];
+ parent = priv->clks[core->parent];
if (IS_ERR(parent))
return ERR_CAST(parent);
@@ -400,16 +399,15 @@ rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks,
static struct clk * __init
rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
- struct clk **clks,
- void __iomem *base,
struct rzg2l_cpg_priv *priv)
{
+ void __iomem *base = priv->base;
struct device *dev = priv->dev;
const struct clk *parent;
const char *parent_name;
struct clk_hw *clk_hw;
- parent = clks[core->parent & 0xffff];
+ parent = priv->clks[core->parent];
if (IS_ERR(parent))
return ERR_CAST(parent);
@@ -440,7 +438,6 @@ rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
static struct clk * __init
rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
- void __iomem *base,
struct rzg2l_cpg_priv *priv)
{
const struct clk_hw *clk_hw;
@@ -448,7 +445,7 @@ rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
core->parent_names, core->num_parents,
core->flag,
- base + GET_REG_OFFSET(core->conf),
+ priv->base + GET_REG_OFFSET(core->conf),
GET_SHIFT(core->conf),
GET_WIDTH(core->conf),
core->mux_flags, &priv->rmw_lock);
@@ -508,7 +505,6 @@ static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
static struct clk * __init
rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
- void __iomem *base,
struct rzg2l_cpg_priv *priv)
{
struct sd_mux_hw_data *sd_mux_hw_data;
@@ -652,7 +648,6 @@ static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
static struct clk * __init
rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
- struct clk **clks,
struct rzg2l_cpg_priv *priv)
{
struct dsi_div_hw_data *clk_hw_data;
@@ -662,7 +657,7 @@ rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
struct clk_hw *clk_hw;
int ret;
- parent = clks[core->parent & 0xffff];
+ parent = priv->clks[core->parent];
if (IS_ERR(parent))
return ERR_CAST(parent);
@@ -900,7 +895,6 @@ static const struct clk_ops rzg2l_cpg_sipll5_ops = {
static struct clk * __init
rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
- struct clk **clks,
struct rzg2l_cpg_priv *priv)
{
const struct clk *parent;
@@ -910,7 +904,7 @@ rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
struct clk_hw *clk_hw;
int ret;
- parent = clks[core->parent & 0xffff];
+ parent = priv->clks[core->parent];
if (IS_ERR(parent))
return ERR_CAST(parent);
@@ -1013,8 +1007,6 @@ static const struct clk_ops rzg3s_cpg_pll_ops = {
static struct clk * __init
rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
- struct clk **clks,
- void __iomem *base,
struct rzg2l_cpg_priv *priv,
const struct clk_ops *ops)
{
@@ -1023,8 +1015,9 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
struct clk_init_data init;
const char *parent_name;
struct pll_clk *pll_clk;
+ int ret;
- parent = clks[core->parent & 0xffff];
+ parent = priv->clks[core->parent];
if (IS_ERR(parent))
return ERR_CAST(parent);
@@ -1041,11 +1034,15 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
pll_clk->hw.init = &init;
pll_clk->conf = core->conf;
- pll_clk->base = base;
+ pll_clk->base = priv->base;
pll_clk->priv = priv;
pll_clk->type = core->type;
- return clk_register(NULL, &pll_clk->hw);
+ ret = devm_clk_hw_register(dev, &pll_clk->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return pll_clk->hw.clk;
}
static struct clk
@@ -1102,6 +1099,7 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
struct device *dev = priv->dev;
unsigned int id = core->id, div = core->div;
const char *parent_name;
+ struct clk_hw *clk_hw;
WARN_DEBUG(id >= priv->num_core_clks);
WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
@@ -1124,39 +1122,40 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
}
parent_name = __clk_get_name(parent);
- clk = clk_register_fixed_factor(NULL, core->name,
- parent_name, CLK_SET_RATE_PARENT,
- core->mult, div);
+ clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, parent_name,
+ CLK_SET_RATE_PARENT,
+ core->mult, div);
+ if (IS_ERR(clk_hw))
+ clk = ERR_CAST(clk_hw);
+ else
+ clk = clk_hw->clk;
break;
case CLK_TYPE_SAM_PLL:
- clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv,
- &rzg2l_cpg_pll_ops);
+ clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg2l_cpg_pll_ops);
break;
case CLK_TYPE_G3S_PLL:
- clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv,
- &rzg3s_cpg_pll_ops);
+ clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg3s_cpg_pll_ops);
break;
case CLK_TYPE_SIPLL5:
- clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
+ clk = rzg2l_cpg_sipll5_register(core, priv);
break;
case CLK_TYPE_DIV:
- clk = rzg2l_cpg_div_clk_register(core, priv->clks,
- priv->base, priv);
+ clk = rzg2l_cpg_div_clk_register(core, priv);
break;
case CLK_TYPE_G3S_DIV:
- clk = rzg3s_cpg_div_clk_register(core, priv->clks, priv->base, priv);
+ clk = rzg3s_cpg_div_clk_register(core, priv);
break;
case CLK_TYPE_MUX:
- clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
+ clk = rzg2l_cpg_mux_clk_register(core, priv);
break;
case CLK_TYPE_SD_MUX:
- clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
+ clk = rzg2l_cpg_sd_mux_clk_register(core, priv);
break;
case CLK_TYPE_PLL5_4_MUX:
clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
break;
case CLK_TYPE_DSI_DIV:
- clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
+ clk = rzg2l_cpg_dsi_div_clk_register(core, priv);
break;
default:
goto fail;
@@ -1337,6 +1336,7 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
struct clk *parent, *clk;
const char *parent_name;
unsigned int i;
+ int ret;
WARN_DEBUG(id < priv->num_core_clks);
WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
@@ -1380,10 +1380,13 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
clock->priv = priv;
clock->hw.init = &init;
- clk = clk_register(NULL, &clock->hw);
- if (IS_ERR(clk))
+ ret = devm_clk_hw_register(dev, &clock->hw);
+ if (ret) {
+ clk = ERR_PTR(ret);
goto fail;
+ }
+ clk = clock->hw.clk;
dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
priv->clks[id] = clk;
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
new file mode 100644
index 000000000000..b524a9d33610
--- /dev/null
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -0,0 +1,853 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2H(P) Clock Pulse Generator
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ *
+ * Based on rzg2l-cpg.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ * Copyright (C) 2013 Ideas On Board SPRL
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+#include "rzv2h-cpg.h"
+
+#ifdef DEBUG
+#define WARN_DEBUG(x) WARN_ON(x)
+#else
+#define WARN_DEBUG(x) do { } while (0)
+#endif
+
+#define GET_CLK_ON_OFFSET(x) (0x600 + ((x) * 4))
+#define GET_CLK_MON_OFFSET(x) (0x800 + ((x) * 4))
+#define GET_RST_OFFSET(x) (0x900 + ((x) * 4))
+#define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4))
+
+#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
+#define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
+#define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
+#define SDIV(val) FIELD_GET(GENMASK(2, 0), (val))
+
+#define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16)
+
+#define GET_MOD_CLK_ID(base, index, bit) \
+ ((base) + ((((index) * (16))) + (bit)))
+
+#define CPG_CLKSTATUS0 (0x700)
+
+/**
+ * struct rzv2h_cpg_priv - Clock Pulse Generator Private Data
+ *
+ * @dev: CPG device
+ * @base: CPG register block base address
+ * @rmw_lock: protects register accesses
+ * @clks: Array containing all Core and Module Clocks
+ * @num_core_clks: Number of Core Clocks in clks[]
+ * @num_mod_clks: Number of Module Clocks in clks[]
+ * @resets: Array of resets
+ * @num_resets: Number of Module Resets in info->resets[]
+ * @last_dt_core_clk: ID of the last Core Clock exported to DT
+ * @rcdev: Reset controller entity
+ */
+struct rzv2h_cpg_priv {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t rmw_lock;
+
+ struct clk **clks;
+ unsigned int num_core_clks;
+ unsigned int num_mod_clks;
+ struct rzv2h_reset *resets;
+ unsigned int num_resets;
+ unsigned int last_dt_core_clk;
+
+ struct reset_controller_dev rcdev;
+};
+
+#define rcdev_to_priv(x) container_of(x, struct rzv2h_cpg_priv, rcdev)
+
+struct pll_clk {
+ struct rzv2h_cpg_priv *priv;
+ void __iomem *base;
+ struct clk_hw hw;
+ unsigned int conf;
+ unsigned int type;
+};
+
+#define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
+
+/**
+ * struct mod_clock - Module clock
+ *
+ * @priv: CPG private data
+ * @hw: handle between common and hardware-specific interfaces
+ * @on_index: register offset
+ * @on_bit: ON/MON bit
+ * @mon_index: monitor register offset
+ * @mon_bit: montor bit
+ */
+struct mod_clock {
+ struct rzv2h_cpg_priv *priv;
+ struct clk_hw hw;
+ u8 on_index;
+ u8 on_bit;
+ s8 mon_index;
+ u8 mon_bit;
+};
+
+#define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
+
+/**
+ * struct ddiv_clk - DDIV clock
+ *
+ * @priv: CPG private data
+ * @div: divider clk
+ * @mon: monitor bit in CPG_CLKSTATUS0 register
+ */
+struct ddiv_clk {
+ struct rzv2h_cpg_priv *priv;
+ struct clk_divider div;
+ u8 mon;
+};
+
+#define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
+
+static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ unsigned int clk1, clk2;
+ u64 rate;
+
+ if (!PLL_CLK_ACCESS(pll_clk->conf))
+ return 0;
+
+ clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf));
+ clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf));
+
+ rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1),
+ 16 + SDIV(clk2));
+
+ return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1));
+}
+
+static const struct clk_ops rzv2h_cpg_pll_ops = {
+ .recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
+};
+
+static struct clk * __init
+rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv,
+ const struct clk_ops *ops)
+{
+ void __iomem *base = priv->base;
+ struct device *dev = priv->dev;
+ struct clk_init_data init;
+ const struct clk *parent;
+ const char *parent_name;
+ struct pll_clk *pll_clk;
+ int ret;
+
+ parent = priv->clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
+ if (!pll_clk)
+ return ERR_PTR(-ENOMEM);
+
+ parent_name = __clk_get_name(parent);
+ init.name = core->name;
+ init.ops = ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll_clk->hw.init = &init;
+ pll_clk->conf = core->cfg.conf;
+ pll_clk->base = base;
+ pll_clk->priv = priv;
+ pll_clk->type = core->type;
+
+ ret = devm_clk_hw_register(dev, &pll_clk->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return pll_clk->hw.clk;
+}
+
+static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int val;
+
+ val = readl(divider->reg) >> divider->shift;
+ val &= clk_div_mask(divider->width);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+}
+
+static long rzv2h_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+
+ return divider_round_rate(hw, rate, prate, divider->table,
+ divider->width, divider->flags);
+}
+
+static int rzv2h_ddiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+
+ return divider_determine_rate(hw, req, divider->table, divider->width,
+ divider->flags);
+}
+
+static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon)
+{
+ u32 bitmask = BIT(mon);
+ u32 val;
+
+ return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200);
+}
+
+static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct ddiv_clk *ddiv = to_ddiv_clock(divider);
+ struct rzv2h_cpg_priv *priv = ddiv->priv;
+ unsigned long flags = 0;
+ int value;
+ u32 val;
+ int ret;
+
+ value = divider_get_val(rate, parent_rate, divider->table,
+ divider->width, divider->flags);
+ if (value < 0)
+ return value;
+
+ spin_lock_irqsave(divider->lock, flags);
+
+ ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
+ if (ret)
+ goto ddiv_timeout;
+
+ val = readl(divider->reg) | DDIV_DIVCTL_WEN(divider->shift);
+ val &= ~(clk_div_mask(divider->width) << divider->shift);
+ val |= (u32)value << divider->shift;
+ writel(val, divider->reg);
+
+ ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
+ if (ret)
+ goto ddiv_timeout;
+
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return 0;
+
+ddiv_timeout:
+ spin_unlock_irqrestore(divider->lock, flags);
+ return ret;
+}
+
+static const struct clk_ops rzv2h_ddiv_clk_divider_ops = {
+ .recalc_rate = rzv2h_ddiv_recalc_rate,
+ .round_rate = rzv2h_ddiv_round_rate,
+ .determine_rate = rzv2h_ddiv_determine_rate,
+ .set_rate = rzv2h_ddiv_set_rate,
+};
+
+static struct clk * __init
+rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct ddiv cfg_ddiv = core->cfg.ddiv;
+ struct clk_init_data init = {};
+ struct device *dev = priv->dev;
+ u8 shift = cfg_ddiv.shift;
+ u8 width = cfg_ddiv.width;
+ const struct clk *parent;
+ const char *parent_name;
+ struct clk_divider *div;
+ struct ddiv_clk *ddiv;
+ int ret;
+
+ parent = priv->clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ parent_name = __clk_get_name(parent);
+
+ if ((shift + width) > 16)
+ return ERR_PTR(-EINVAL);
+
+ ddiv = devm_kzalloc(priv->dev, sizeof(*ddiv), GFP_KERNEL);
+ if (!ddiv)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = core->name;
+ init.ops = &rzv2h_ddiv_clk_divider_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ ddiv->priv = priv;
+ ddiv->mon = cfg_ddiv.monbit;
+ div = &ddiv->div;
+ div->reg = priv->base + cfg_ddiv.offset;
+ div->shift = shift;
+ div->width = width;
+ div->flags = core->flag;
+ div->lock = &priv->rmw_lock;
+ div->hw.init = &init;
+ div->table = core->dtable;
+
+ ret = devm_clk_hw_register(dev, &div->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return div->hw.clk;
+}
+
+static struct clk
+*rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ unsigned int clkidx = clkspec->args[1];
+ struct rzv2h_cpg_priv *priv = data;
+ struct device *dev = priv->dev;
+ const char *type;
+ struct clk *clk;
+
+ switch (clkspec->args[0]) {
+ case CPG_CORE:
+ type = "core";
+ if (clkidx > priv->last_dt_core_clk) {
+ dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
+ return ERR_PTR(-EINVAL);
+ }
+ clk = priv->clks[clkidx];
+ break;
+
+ case CPG_MOD:
+ type = "module";
+ if (clkidx >= priv->num_mod_clks) {
+ dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
+ return ERR_PTR(-EINVAL);
+ }
+ clk = priv->clks[priv->num_core_clks + clkidx];
+ break;
+
+ default:
+ dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (IS_ERR(clk))
+ dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
+ PTR_ERR(clk));
+ else
+ dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
+ clkspec->args[0], clkspec->args[1], clk,
+ clk_get_rate(clk));
+ return clk;
+}
+
+static void __init
+rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
+ unsigned int id = core->id, div = core->div;
+ struct device *dev = priv->dev;
+ const char *parent_name;
+ struct clk_hw *clk_hw;
+
+ WARN_DEBUG(id >= priv->num_core_clks);
+ WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
+
+ switch (core->type) {
+ case CLK_TYPE_IN:
+ clk = of_clk_get_by_name(priv->dev->of_node, core->name);
+ break;
+ case CLK_TYPE_FF:
+ WARN_DEBUG(core->parent >= priv->num_core_clks);
+ parent = priv->clks[core->parent];
+ if (IS_ERR(parent)) {
+ clk = parent;
+ goto fail;
+ }
+
+ parent_name = __clk_get_name(parent);
+ clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name,
+ parent_name, CLK_SET_RATE_PARENT,
+ core->mult, div);
+ if (IS_ERR(clk_hw))
+ clk = ERR_CAST(clk_hw);
+ else
+ clk = clk_hw->clk;
+ break;
+ case CLK_TYPE_PLL:
+ clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops);
+ break;
+ case CLK_TYPE_DDIV:
+ clk = rzv2h_cpg_ddiv_clk_register(core, priv);
+ break;
+ default:
+ goto fail;
+ }
+
+ if (IS_ERR_OR_NULL(clk))
+ goto fail;
+
+ dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
+ priv->clks[id] = clk;
+ return;
+
+fail:
+ dev_err(dev, "Failed to register core clock %s: %ld\n",
+ core->name, PTR_ERR(clk));
+}
+
+static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
+{
+ struct mod_clock *clock = to_mod_clock(hw);
+ unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index);
+ struct rzv2h_cpg_priv *priv = clock->priv;
+ u32 bitmask = BIT(clock->on_bit);
+ struct device *dev = priv->dev;
+ u32 value;
+ int error;
+
+ dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
+ enable ? "ON" : "OFF");
+
+ value = bitmask << 16;
+ if (enable)
+ value |= bitmask;
+
+ writel(value, priv->base + reg);
+
+ if (!enable || clock->mon_index < 0)
+ return 0;
+
+ reg = GET_CLK_MON_OFFSET(clock->mon_index);
+ bitmask = BIT(clock->mon_bit);
+ error = readl_poll_timeout_atomic(priv->base + reg, value,
+ value & bitmask, 0, 10);
+ if (error)
+ dev_err(dev, "Failed to enable CLK_ON %p\n",
+ priv->base + reg);
+
+ return error;
+}
+
+static int rzv2h_mod_clock_enable(struct clk_hw *hw)
+{
+ return rzv2h_mod_clock_endisable(hw, true);
+}
+
+static void rzv2h_mod_clock_disable(struct clk_hw *hw)
+{
+ rzv2h_mod_clock_endisable(hw, false);
+}
+
+static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
+{
+ struct mod_clock *clock = to_mod_clock(hw);
+ struct rzv2h_cpg_priv *priv = clock->priv;
+ u32 bitmask;
+ u32 offset;
+
+ if (clock->mon_index >= 0) {
+ offset = GET_CLK_MON_OFFSET(clock->mon_index);
+ bitmask = BIT(clock->mon_bit);
+ } else {
+ offset = GET_CLK_ON_OFFSET(clock->on_index);
+ bitmask = BIT(clock->on_bit);
+ }
+
+ return readl(priv->base + offset) & bitmask;
+}
+
+static const struct clk_ops rzv2h_mod_clock_ops = {
+ .enable = rzv2h_mod_clock_enable,
+ .disable = rzv2h_mod_clock_disable,
+ .is_enabled = rzv2h_mod_clock_is_enabled,
+};
+
+static void __init
+rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct mod_clock *clock = NULL;
+ struct device *dev = priv->dev;
+ struct clk_init_data init;
+ struct clk *parent, *clk;
+ const char *parent_name;
+ unsigned int id;
+ int ret;
+
+ id = GET_MOD_CLK_ID(priv->num_core_clks, mod->on_index, mod->on_bit);
+ WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
+ WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
+ WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
+
+ parent = priv->clks[mod->parent];
+ if (IS_ERR(parent)) {
+ clk = parent;
+ goto fail;
+ }
+
+ clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ clk = ERR_PTR(-ENOMEM);
+ goto fail;
+ }
+
+ init.name = mod->name;
+ init.ops = &rzv2h_mod_clock_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ if (mod->critical)
+ init.flags |= CLK_IS_CRITICAL;
+
+ parent_name = __clk_get_name(parent);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clock->on_index = mod->on_index;
+ clock->on_bit = mod->on_bit;
+ clock->mon_index = mod->mon_index;
+ clock->mon_bit = mod->mon_bit;
+ clock->priv = priv;
+ clock->hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &clock->hw);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto fail;
+ }
+
+ priv->clks[id] = clock->hw.clk;
+
+ return;
+
+fail:
+ dev_err(dev, "Failed to register module clock %s: %ld\n",
+ mod->name, PTR_ERR(clk));
+}
+
+static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
+ unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
+ u32 mask = BIT(priv->resets[id].reset_bit);
+ u8 monbit = priv->resets[id].mon_bit;
+ u32 value = mask << 16;
+
+ dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg);
+
+ writel(value, priv->base + reg);
+
+ reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
+ mask = BIT(monbit);
+
+ return readl_poll_timeout_atomic(priv->base + reg, value,
+ value & mask, 10, 200);
+}
+
+static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
+ unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
+ u32 mask = BIT(priv->resets[id].reset_bit);
+ u8 monbit = priv->resets[id].mon_bit;
+ u32 value = (mask << 16) | mask;
+
+ dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg);
+
+ writel(value, priv->base + reg);
+
+ reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
+ mask = BIT(monbit);
+
+ return readl_poll_timeout_atomic(priv->base + reg, value,
+ !(value & mask), 10, 200);
+}
+
+static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ int ret;
+
+ ret = rzv2h_cpg_assert(rcdev, id);
+ if (ret)
+ return ret;
+
+ return rzv2h_cpg_deassert(rcdev, id);
+}
+
+static int rzv2h_cpg_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
+ unsigned int reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
+ u8 monbit = priv->resets[id].mon_bit;
+
+ return !!(readl(priv->base + reg) & BIT(monbit));
+}
+
+static const struct reset_control_ops rzv2h_cpg_reset_ops = {
+ .reset = rzv2h_cpg_reset,
+ .assert = rzv2h_cpg_assert,
+ .deassert = rzv2h_cpg_deassert,
+ .status = rzv2h_cpg_status,
+};
+
+static int rzv2h_cpg_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
+ unsigned int id = reset_spec->args[0];
+ u8 rst_index = id / 16;
+ u8 rst_bit = id % 16;
+ unsigned int i;
+
+ for (i = 0; i < rcdev->nr_resets; i++) {
+ if (rst_index == priv->resets[i].reset_index &&
+ rst_bit == priv->resets[i].reset_bit)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv *priv)
+{
+ priv->rcdev.ops = &rzv2h_cpg_reset_ops;
+ priv->rcdev.of_node = priv->dev->of_node;
+ priv->rcdev.dev = priv->dev;
+ priv->rcdev.of_reset_n_cells = 1;
+ priv->rcdev.of_xlate = rzv2h_cpg_reset_xlate;
+ priv->rcdev.nr_resets = priv->num_resets;
+
+ return devm_reset_controller_register(priv->dev, &priv->rcdev);
+}
+
+/**
+ * struct rzv2h_cpg_pd - RZ/V2H power domain data structure
+ * @priv: pointer to CPG private data structure
+ * @genpd: generic PM domain
+ */
+struct rzv2h_cpg_pd {
+ struct rzv2h_cpg_priv *priv;
+ struct generic_pm_domain genpd;
+};
+
+static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args clkspec;
+ bool once = true;
+ struct clk *clk;
+ int error;
+ int i = 0;
+
+ while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
+ &clkspec)) {
+ if (once) {
+ once = false;
+ error = pm_clk_create(dev);
+ if (error) {
+ of_node_put(clkspec.np);
+ goto err;
+ }
+ }
+ clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
+ if (IS_ERR(clk)) {
+ error = PTR_ERR(clk);
+ goto fail_destroy;
+ }
+
+ error = pm_clk_add_clk(dev, clk);
+ if (error) {
+ dev_err(dev, "pm_clk_add_clk failed %d\n",
+ error);
+ goto fail_put;
+ }
+ i++;
+ }
+
+ return 0;
+
+fail_put:
+ clk_put(clk);
+
+fail_destroy:
+ pm_clk_destroy(dev);
+err:
+ return error;
+}
+
+static void rzv2h_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
+{
+ if (!pm_clk_no_clocks(dev))
+ pm_clk_destroy(dev);
+}
+
+static void rzv2h_cpg_genpd_remove_simple(void *data)
+{
+ pm_genpd_remove(data);
+}
+
+static int __init rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct device_node *np = dev->of_node;
+ struct rzv2h_cpg_pd *pd;
+ int ret;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->genpd.name = np->name;
+ pd->priv = priv;
+ pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+ pd->genpd.attach_dev = rzv2h_cpg_attach_dev;
+ pd->genpd.detach_dev = rzv2h_cpg_detach_dev;
+ ret = pm_genpd_init(&pd->genpd, &pm_domain_always_on_gov, false);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, rzv2h_cpg_genpd_remove_simple, &pd->genpd);
+ if (ret)
+ return ret;
+
+ return of_genpd_add_provider_simple(np, &pd->genpd);
+}
+
+static void rzv2h_cpg_del_clk_provider(void *data)
+{
+ of_clk_del_provider(data);
+}
+
+static int __init rzv2h_cpg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct rzv2h_cpg_info *info;
+ struct rzv2h_cpg_priv *priv;
+ unsigned int nclks, i;
+ struct clk **clks;
+ int error;
+
+ info = of_device_get_match_data(dev);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->rmw_lock);
+
+ priv->dev = dev;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ nclks = info->num_total_core_clks + info->num_hw_mod_clks;
+ clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
+ info->num_resets, GFP_KERNEL);
+ if (!priv->resets)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->clks = clks;
+ priv->num_core_clks = info->num_total_core_clks;
+ priv->num_mod_clks = info->num_hw_mod_clks;
+ priv->last_dt_core_clk = info->last_dt_core_clk;
+ priv->num_resets = info->num_resets;
+
+ for (i = 0; i < nclks; i++)
+ clks[i] = ERR_PTR(-ENOENT);
+
+ for (i = 0; i < info->num_core_clks; i++)
+ rzv2h_cpg_register_core_clk(&info->core_clks[i], priv);
+
+ for (i = 0; i < info->num_mod_clks; i++)
+ rzv2h_cpg_register_mod_clk(&info->mod_clks[i], priv);
+
+ error = of_clk_add_provider(np, rzv2h_cpg_clk_src_twocell_get, priv);
+ if (error)
+ return error;
+
+ error = devm_add_action_or_reset(dev, rzv2h_cpg_del_clk_provider, np);
+ if (error)
+ return error;
+
+ error = rzv2h_cpg_add_pm_domains(priv);
+ if (error)
+ return error;
+
+ error = rzv2h_cpg_reset_controller_register(priv);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct of_device_id rzv2h_cpg_match[] = {
+#ifdef CONFIG_CLK_R9A09G057
+ {
+ .compatible = "renesas,r9a09g057-cpg",
+ .data = &r9a09g057_cpg_info,
+ },
+#endif
+ { /* sentinel */ }
+};
+
+static struct platform_driver rzv2h_cpg_driver = {
+ .driver = {
+ .name = "rzv2h-cpg",
+ .of_match_table = rzv2h_cpg_match,
+ },
+};
+
+static int __init rzv2h_cpg_init(void)
+{
+ return platform_driver_probe(&rzv2h_cpg_driver, rzv2h_cpg_probe);
+}
+
+subsys_initcall(rzv2h_cpg_init);
+
+MODULE_DESCRIPTION("Renesas RZ/V2H CPG Driver");
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
new file mode 100644
index 000000000000..1bd406c69015
--- /dev/null
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RZ/V2H(P) Clock Pulse Generator
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#ifndef __RENESAS_RZV2H_CPG_H__
+#define __RENESAS_RZV2H_CPG_H__
+
+/**
+ * struct ddiv - Structure for dynamic switching divider
+ *
+ * @offset: register offset
+ * @shift: position of the divider bit
+ * @width: width of the divider
+ * @monbit: monitor bit in CPG_CLKSTATUS0 register
+ */
+struct ddiv {
+ unsigned int offset:11;
+ unsigned int shift:4;
+ unsigned int width:4;
+ unsigned int monbit:5;
+};
+
+#define DDIV_PACK(_offset, _shift, _width, _monbit) \
+ ((struct ddiv){ \
+ .offset = _offset, \
+ .shift = _shift, \
+ .width = _width, \
+ .monbit = _monbit \
+ })
+
+#define CPG_CDDIV0 (0x400)
+
+#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2)
+
+/**
+ * Definitions of CPG Core Clocks
+ *
+ * These include:
+ * - Clock outputs exported to DT
+ * - External input clocks
+ * - Internal CPG clocks
+ */
+struct cpg_core_clk {
+ const char *name;
+ unsigned int id;
+ unsigned int parent;
+ unsigned int div;
+ unsigned int mult;
+ unsigned int type;
+ union {
+ unsigned int conf;
+ struct ddiv ddiv;
+ } cfg;
+ const struct clk_div_table *dtable;
+ u32 flag;
+};
+
+enum clk_types {
+ /* Generic */
+ CLK_TYPE_IN, /* External Clock Input */
+ CLK_TYPE_FF, /* Fixed Factor Clock */
+ CLK_TYPE_PLL,
+ CLK_TYPE_DDIV, /* Dynamic Switching Divider */
+};
+
+/* BIT(31) indicates if CLK1/2 are accessible or not */
+#define PLL_CONF(n) (BIT(31) | ((n) & ~GENMASK(31, 16)))
+#define PLL_CLK_ACCESS(n) ((n) & BIT(31) ? 1 : 0)
+#define PLL_CLK1_OFFSET(n) ((n) & ~GENMASK(31, 16))
+#define PLL_CLK2_OFFSET(n) (((n) & ~GENMASK(31, 16)) + (0x4))
+
+#define DEF_TYPE(_name, _id, _type...) \
+ { .name = _name, .id = _id, .type = _type }
+#define DEF_BASE(_name, _id, _type, _parent...) \
+ DEF_TYPE(_name, _id, _type, .parent = _parent)
+#define DEF_PLL(_name, _id, _parent, _conf) \
+ DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.conf = _conf)
+#define DEF_INPUT(_name, _id) \
+ DEF_TYPE(_name, _id, CLK_TYPE_IN)
+#define DEF_FIXED(_name, _id, _parent, _mult, _div) \
+ DEF_BASE(_name, _id, CLK_TYPE_FF, _parent, .div = _div, .mult = _mult)
+#define DEF_DDIV(_name, _id, _parent, _ddiv_packed, _dtable) \
+ DEF_TYPE(_name, _id, CLK_TYPE_DDIV, \
+ .cfg.ddiv = _ddiv_packed, \
+ .parent = _parent, \
+ .dtable = _dtable, \
+ .flag = CLK_DIVIDER_HIWORD_MASK)
+
+/**
+ * struct rzv2h_mod_clk - Module Clocks definitions
+ *
+ * @name: handle between common and hardware-specific interfaces
+ * @parent: id of parent clock
+ * @critical: flag to indicate the clock is critical
+ * @on_index: control register index
+ * @on_bit: ON bit
+ * @mon_index: monitor register index
+ * @mon_bit: monitor bit
+ */
+struct rzv2h_mod_clk {
+ const char *name;
+ u16 parent;
+ bool critical;
+ u8 on_index;
+ u8 on_bit;
+ s8 mon_index;
+ u8 mon_bit;
+};
+
+#define DEF_MOD_BASE(_name, _parent, _critical, _onindex, _onbit, _monindex, _monbit) \
+ { \
+ .name = (_name), \
+ .parent = (_parent), \
+ .critical = (_critical), \
+ .on_index = (_onindex), \
+ .on_bit = (_onbit), \
+ .mon_index = (_monindex), \
+ .mon_bit = (_monbit), \
+ }
+
+#define DEF_MOD(_name, _parent, _onindex, _onbit, _monindex, _monbit) \
+ DEF_MOD_BASE(_name, _parent, false, _onindex, _onbit, _monindex, _monbit)
+
+#define DEF_MOD_CRITICAL(_name, _parent, _onindex, _onbit, _monindex, _monbit) \
+ DEF_MOD_BASE(_name, _parent, true, _onindex, _onbit, _monindex, _monbit)
+
+/**
+ * struct rzv2h_reset - Reset definitions
+ *
+ * @reset_index: reset register index
+ * @reset_bit: reset bit
+ * @mon_index: monitor register index
+ * @mon_bit: monitor bit
+ */
+struct rzv2h_reset {
+ u8 reset_index;
+ u8 reset_bit;
+ u8 mon_index;
+ u8 mon_bit;
+};
+
+#define DEF_RST_BASE(_resindex, _resbit, _monindex, _monbit) \
+ { \
+ .reset_index = (_resindex), \
+ .reset_bit = (_resbit), \
+ .mon_index = (_monindex), \
+ .mon_bit = (_monbit), \
+ }
+
+#define DEF_RST(_resindex, _resbit, _monindex, _monbit) \
+ DEF_RST_BASE(_resindex, _resbit, _monindex, _monbit)
+
+/**
+ * struct rzv2h_cpg_info - SoC-specific CPG Description
+ *
+ * @core_clks: Array of Core Clock definitions
+ * @num_core_clks: Number of entries in core_clks[]
+ * @last_dt_core_clk: ID of the last Core Clock exported to DT
+ * @num_total_core_clks: Total number of Core Clocks (exported + internal)
+ *
+ * @mod_clks: Array of Module Clock definitions
+ * @num_mod_clks: Number of entries in mod_clks[]
+ * @num_hw_mod_clks: Number of Module Clocks supported by the hardware
+ *
+ * @resets: Array of Module Reset definitions
+ * @num_resets: Number of entries in resets[]
+ */
+struct rzv2h_cpg_info {
+ /* Core Clocks */
+ const struct cpg_core_clk *core_clks;
+ unsigned int num_core_clks;
+ unsigned int last_dt_core_clk;
+ unsigned int num_total_core_clks;
+
+ /* Module Clocks */
+ const struct rzv2h_mod_clk *mod_clks;
+ unsigned int num_mod_clks;
+ unsigned int num_hw_mod_clks;
+
+ /* Resets */
+ const struct rzv2h_reset *resets;
+ unsigned int num_resets;
+};
+
+extern const struct rzv2h_cpg_info r9a09g057_cpg_info;
+
+#endif /* __RENESAS_RZV2H_CPG_H__ */
diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig
index 9aad86925cd2..570ad90835d3 100644
--- a/drivers/clk/rockchip/Kconfig
+++ b/drivers/clk/rockchip/Kconfig
@@ -100,6 +100,13 @@ config CLK_RK3568
help
Build the driver for RK3568 Clock Driver.
+config CLK_RK3576
+ bool "Rockchip RK3576 clock controller support"
+ depends on ARM64 || COMPILE_TEST
+ default y
+ help
+ Build the driver for RK3576 Clock Driver.
+
config CLK_RK3588
bool "Rockchip RK3588 clock controller support"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 36894f6a7022..af2ade54a7ef 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -28,4 +28,5 @@ obj-$(CONFIG_CLK_RK3328) += clk-rk3328.o
obj-$(CONFIG_CLK_RK3368) += clk-rk3368.o
obj-$(CONFIG_CLK_RK3399) += clk-rk3399.o
obj-$(CONFIG_CLK_RK3568) += clk-rk3568.o
+obj-$(CONFIG_CLK_RK3576) += clk-rk3576.o rst-rk3576.o
obj-$(CONFIG_CLK_RK3588) += clk-rk3588.o rst-rk3588.o
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 606ce5458f54..fe76756e592e 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -914,7 +914,10 @@ static unsigned long rockchip_rk3588_pll_recalc_rate(struct clk_hw *hw, unsigned
}
rate64 = rate64 >> cur.s;
- return (unsigned long)rate64;
+ if (pll->type == pll_rk3588_ddr)
+ return (unsigned long)rate64 * 2;
+ else
+ return (unsigned long)rate64;
}
static int rockchip_rk3588_pll_set_params(struct rockchip_clk_pll *pll,
@@ -1167,6 +1170,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
break;
case pll_rk3588:
case pll_rk3588_core:
+ case pll_rk3588_ddr:
if (!pll->rate_table)
init.ops = &rockchip_rk3588_pll_clk_norate_ops;
else
diff --git a/drivers/clk/rockchip/clk-px30.c b/drivers/clk/rockchip/clk-px30.c
index b58619eb412b..caf7c0e6e479 100644
--- a/drivers/clk/rockchip/clk-px30.c
+++ b/drivers/clk/rockchip/clk-px30.c
@@ -1002,6 +1002,7 @@ static const char *const px30_cru_critical_clocks[] __initconst = {
static void __init px30_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -1010,7 +1011,9 @@ static void __init px30_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(px30_clk_branches,
+ ARRAY_SIZE(px30_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
@@ -1043,6 +1046,7 @@ CLK_OF_DECLARE(px30_cru, "rockchip,px30-cru", px30_clk_init);
static void __init px30_pmu_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clkpmu_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -1051,7 +1055,9 @@ static void __init px30_pmu_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
+ clkpmu_nr_clks = rockchip_clk_find_max_clk_id(px30_clk_pmu_branches,
+ ARRAY_SIZE(px30_clk_pmu_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clkpmu_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip pmu clk init failed\n", __func__);
return;
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index d644bc155ec6..d341ce0708aa 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -436,6 +436,7 @@ static const char *const rk3036_critical_clocks[] __initconst = {
static void __init rk3036_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
struct clk *clk;
@@ -452,7 +453,9 @@ static void __init rk3036_clk_init(struct device_node *np)
writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10),
reg_base + RK2928_CLKSEL_CON(13));
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3036_clk_branches,
+ ARRAY_SIZE(rk3036_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index a24a35553e13..ed602c27b624 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -409,7 +409,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(29), 0, 3, DFLAGS),
DIV(0, "sclk_vop_pre", "sclk_vop_src", 0,
RK2928_CLKSEL_CON(27), 8, 8, DFLAGS),
- MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, 0,
+ MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
RK2928_CLKSEL_CON(27), 1, 1, MFLAGS),
FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
@@ -683,6 +683,7 @@ static const char *const rk3228_critical_clocks[] __initconst = {
static void __init rk3228_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -691,7 +692,9 @@ static void __init rk3228_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3228_clk_branches,
+ ARRAY_SIZE(rk3228_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index baa5aebd3277..90d329216064 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -932,6 +932,7 @@ static void __init rk3288_common_init(struct device_node *np,
enum rk3288_variant soc)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
rk3288_cru_base = of_iomap(np, 0);
if (!rk3288_cru_base) {
@@ -939,7 +940,9 @@ static void __init rk3288_common_init(struct device_node *np,
return;
}
- ctx = rockchip_clk_init(np, rk3288_cru_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3288_clk_branches,
+ ARRAY_SIZE(rk3288_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, rk3288_cru_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(rk3288_cru_base);
diff --git a/drivers/clk/rockchip/clk-rk3308.c b/drivers/clk/rockchip/clk-rk3308.c
index db3396c3e6e9..95a9512a41a3 100644
--- a/drivers/clk/rockchip/clk-rk3308.c
+++ b/drivers/clk/rockchip/clk-rk3308.c
@@ -917,6 +917,7 @@ static const char *const rk3308_critical_clocks[] __initconst = {
static void __init rk3308_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -925,7 +926,9 @@ static void __init rk3308_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3308_clk_branches,
+ ARRAY_SIZE(rk3308_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index 267ab54937d3..3bb87b27b662 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -881,6 +881,7 @@ static const char *const rk3328_critical_clocks[] __initconst = {
static void __init rk3328_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -889,7 +890,9 @@ static void __init rk3328_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3328_clk_branches,
+ ARRAY_SIZE(rk3328_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 2c50cc2cc6db..04391e4e2874 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -866,6 +866,7 @@ static const char *const rk3368_critical_clocks[] __initconst = {
static void __init rk3368_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -874,7 +875,9 @@ static void __init rk3368_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3368_clk_branches,
+ ARRAY_SIZE(rk3368_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 4f1a5782c230..c2b243d7a5e2 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -1531,6 +1531,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
static void __init rk3399_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -1539,7 +1540,9 @@ static void __init rk3399_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3399_clk_branches,
+ ARRAY_SIZE(rk3399_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
@@ -1577,6 +1580,7 @@ CLK_OF_DECLARE(rk3399_cru, "rockchip,rk3399-cru", rk3399_clk_init);
static void __init rk3399_pmu_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clkpmu_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -1585,7 +1589,9 @@ static void __init rk3399_pmu_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
+ clkpmu_nr_clks = rockchip_clk_find_max_clk_id(rk3399_clk_pmu_branches,
+ ARRAY_SIZE(rk3399_clk_pmu_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clkpmu_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip pmu clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rk3576.c b/drivers/clk/rockchip/clk-rk3576.c
new file mode 100644
index 000000000000..595e010341f7
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3576.c
@@ -0,0 +1,1818 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 Rockchip Electronics Co. Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/syscore_ops.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/clock/rockchip,rk3576-cru.h>
+#include "clk.h"
+
+#define RK3576_GRF_SOC_STATUS0 0x600
+#define RK3576_PMU0_GRF_OSC_CON6 0x18
+
+enum rk3576_plls {
+ bpll, lpll, vpll, aupll, cpll, gpll, ppll,
+};
+
+static struct rockchip_pll_rate_table rk3576_pll_rates[] = {
+ /* _mhz, _p, _m, _s, _k */
+ RK3588_PLL_RATE(2520000000, 2, 210, 0, 0),
+ RK3588_PLL_RATE(2496000000, 2, 208, 0, 0),
+ RK3588_PLL_RATE(2472000000, 2, 206, 0, 0),
+ RK3588_PLL_RATE(2448000000, 2, 204, 0, 0),
+ RK3588_PLL_RATE(2424000000, 2, 202, 0, 0),
+ RK3588_PLL_RATE(2400000000, 2, 200, 0, 0),
+ RK3588_PLL_RATE(2376000000, 2, 198, 0, 0),
+ RK3588_PLL_RATE(2352000000, 2, 196, 0, 0),
+ RK3588_PLL_RATE(2328000000, 2, 194, 0, 0),
+ RK3588_PLL_RATE(2304000000, 2, 192, 0, 0),
+ RK3588_PLL_RATE(2280000000, 2, 190, 0, 0),
+ RK3588_PLL_RATE(2256000000, 2, 376, 1, 0),
+ RK3588_PLL_RATE(2232000000, 2, 372, 1, 0),
+ RK3588_PLL_RATE(2208000000, 2, 368, 1, 0),
+ RK3588_PLL_RATE(2184000000, 2, 364, 1, 0),
+ RK3588_PLL_RATE(2160000000, 2, 360, 1, 0),
+ RK3588_PLL_RATE(2136000000, 2, 356, 1, 0),
+ RK3588_PLL_RATE(2112000000, 2, 352, 1, 0),
+ RK3588_PLL_RATE(2088000000, 2, 348, 1, 0),
+ RK3588_PLL_RATE(2064000000, 2, 344, 1, 0),
+ RK3588_PLL_RATE(2040000000, 2, 340, 1, 0),
+ RK3588_PLL_RATE(2016000000, 2, 336, 1, 0),
+ RK3588_PLL_RATE(1992000000, 2, 332, 1, 0),
+ RK3588_PLL_RATE(1968000000, 2, 328, 1, 0),
+ RK3588_PLL_RATE(1944000000, 2, 324, 1, 0),
+ RK3588_PLL_RATE(1920000000, 2, 320, 1, 0),
+ RK3588_PLL_RATE(1896000000, 2, 316, 1, 0),
+ RK3588_PLL_RATE(1872000000, 2, 312, 1, 0),
+ RK3588_PLL_RATE(1848000000, 2, 308, 1, 0),
+ RK3588_PLL_RATE(1824000000, 2, 304, 1, 0),
+ RK3588_PLL_RATE(1800000000, 2, 300, 1, 0),
+ RK3588_PLL_RATE(1776000000, 2, 296, 1, 0),
+ RK3588_PLL_RATE(1752000000, 2, 292, 1, 0),
+ RK3588_PLL_RATE(1728000000, 2, 288, 1, 0),
+ RK3588_PLL_RATE(1704000000, 2, 284, 1, 0),
+ RK3588_PLL_RATE(1680000000, 2, 280, 1, 0),
+ RK3588_PLL_RATE(1656000000, 2, 276, 1, 0),
+ RK3588_PLL_RATE(1632000000, 2, 272, 1, 0),
+ RK3588_PLL_RATE(1608000000, 2, 268, 1, 0),
+ RK3588_PLL_RATE(1584000000, 2, 264, 1, 0),
+ RK3588_PLL_RATE(1560000000, 2, 260, 1, 0),
+ RK3588_PLL_RATE(1536000000, 2, 256, 1, 0),
+ RK3588_PLL_RATE(1512000000, 2, 252, 1, 0),
+ RK3588_PLL_RATE(1488000000, 2, 248, 1, 0),
+ RK3588_PLL_RATE(1464000000, 2, 244, 1, 0),
+ RK3588_PLL_RATE(1440000000, 2, 240, 1, 0),
+ RK3588_PLL_RATE(1416000000, 2, 236, 1, 0),
+ RK3588_PLL_RATE(1392000000, 2, 232, 1, 0),
+ RK3588_PLL_RATE(1320000000, 2, 220, 1, 0),
+ RK3588_PLL_RATE(1200000000, 2, 200, 1, 0),
+ RK3588_PLL_RATE(1188000000, 2, 198, 1, 0),
+ RK3588_PLL_RATE(1100000000, 3, 550, 2, 0),
+ RK3588_PLL_RATE(1008000000, 2, 336, 2, 0),
+ RK3588_PLL_RATE(1000000000, 3, 500, 2, 0),
+ RK3588_PLL_RATE(983040000, 4, 655, 2, 23592),
+ RK3588_PLL_RATE(955520000, 3, 477, 2, 49806),
+ RK3588_PLL_RATE(903168000, 6, 903, 2, 11009),
+ RK3588_PLL_RATE(900000000, 2, 300, 2, 0),
+ RK3588_PLL_RATE(816000000, 2, 272, 2, 0),
+ RK3588_PLL_RATE(786432000, 2, 262, 2, 9437),
+ RK3588_PLL_RATE(786000000, 1, 131, 2, 0),
+ RK3588_PLL_RATE(785560000, 3, 392, 2, 51117),
+ RK3588_PLL_RATE(722534400, 8, 963, 2, 24850),
+ RK3588_PLL_RATE(600000000, 2, 200, 2, 0),
+ RK3588_PLL_RATE(594000000, 2, 198, 2, 0),
+ RK3588_PLL_RATE(408000000, 2, 272, 3, 0),
+ RK3588_PLL_RATE(312000000, 2, 208, 3, 0),
+ RK3588_PLL_RATE(216000000, 2, 288, 4, 0),
+ RK3588_PLL_RATE(96000000, 2, 256, 5, 0),
+ { /* sentinel */ },
+};
+
+static struct rockchip_pll_rate_table rk3576_ppll_rates[] = {
+ /* _mhz, _p, _m, _s, _k */
+ RK3588_PLL_RATE(1300000000, 3, 325, 2, 0),
+ { /* sentinel */ },
+};
+
+#define RK3576_ACLK_M_BIGCORE_DIV_MASK 0x1f
+#define RK3576_ACLK_M_BIGCORE_DIV_SHIFT 0
+#define RK3576_ACLK_M_LITCORE_DIV_MASK 0x1f
+#define RK3576_ACLK_M_LITCORE_DIV_SHIFT 8
+#define RK3576_PCLK_DBG_LITCORE_DIV_MASK 0x1f
+#define RK3576_PCLK_DBG_LITCORE_DIV_SHIFT 0
+#define RK3576_ACLK_CCI_DIV_MASK 0x1f
+#define RK3576_ACLK_CCI_DIV_SHIFT 7
+#define RK3576_ACLK_CCI_MUX_MASK 0x3
+#define RK3576_ACLK_CCI_MUX_SHIFT 12
+
+#define RK3576_BIGCORE_CLKSEL2(_amcore) \
+{ \
+ .reg = RK3576_BIGCORE_CLKSEL_CON(2), \
+ .val = HIWORD_UPDATE(_amcore - 1, RK3576_ACLK_M_BIGCORE_DIV_MASK, \
+ RK3576_ACLK_M_BIGCORE_DIV_SHIFT), \
+}
+
+#define RK3576_LITCORE_CLKSEL1(_amcore) \
+{ \
+ .reg = RK3576_LITCORE_CLKSEL_CON(1), \
+ .val = HIWORD_UPDATE(_amcore - 1, RK3576_ACLK_M_LITCORE_DIV_MASK, \
+ RK3576_ACLK_M_LITCORE_DIV_SHIFT), \
+}
+
+#define RK3576_LITCORE_CLKSEL2(_pclkdbg) \
+{ \
+ .reg = RK3576_LITCORE_CLKSEL_CON(2), \
+ .val = HIWORD_UPDATE(_pclkdbg - 1, RK3576_PCLK_DBG_LITCORE_DIV_MASK, \
+ RK3576_PCLK_DBG_LITCORE_DIV_SHIFT), \
+}
+
+#define RK3576_CCI_CLKSEL4(_ccisel, _div) \
+{ \
+ .reg = RK3576_CCI_CLKSEL_CON(4), \
+ .val = HIWORD_UPDATE(_ccisel, RK3576_ACLK_CCI_MUX_MASK, \
+ RK3576_ACLK_CCI_MUX_SHIFT) | \
+ HIWORD_UPDATE(_div - 1, RK3576_ACLK_CCI_DIV_MASK, \
+ RK3576_ACLK_CCI_DIV_SHIFT), \
+}
+
+#define RK3576_CPUBCLK_RATE(_prate, _amcore) \
+{ \
+ .prate = _prate##U, \
+ .divs = { \
+ RK3576_BIGCORE_CLKSEL2(_amcore), \
+ }, \
+}
+
+#define RK3576_CPULCLK_RATE(_prate, _amcore, _pclkdbg, _ccisel) \
+{ \
+ .prate = _prate##U, \
+ .divs = { \
+ RK3576_LITCORE_CLKSEL1(_amcore), \
+ RK3576_LITCORE_CLKSEL2(_pclkdbg), \
+ }, \
+ .pre_muxs = { \
+ RK3576_CCI_CLKSEL4(2, 2), \
+ }, \
+ .post_muxs = { \
+ RK3576_CCI_CLKSEL4(_ccisel, 2), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table rk3576_cpubclk_rates[] __initdata = {
+ RK3576_CPUBCLK_RATE(2496000000, 2),
+ RK3576_CPUBCLK_RATE(2400000000, 2),
+ RK3576_CPUBCLK_RATE(2304000000, 2),
+ RK3576_CPUBCLK_RATE(2208000000, 2),
+ RK3576_CPUBCLK_RATE(2184000000, 2),
+ RK3576_CPUBCLK_RATE(2088000000, 2),
+ RK3576_CPUBCLK_RATE(2040000000, 2),
+ RK3576_CPUBCLK_RATE(2016000000, 2),
+ RK3576_CPUBCLK_RATE(1992000000, 2),
+ RK3576_CPUBCLK_RATE(1896000000, 2),
+ RK3576_CPUBCLK_RATE(1800000000, 2),
+ RK3576_CPUBCLK_RATE(1704000000, 2),
+ RK3576_CPUBCLK_RATE(1608000000, 2),
+ RK3576_CPUBCLK_RATE(1584000000, 2),
+ RK3576_CPUBCLK_RATE(1560000000, 2),
+ RK3576_CPUBCLK_RATE(1536000000, 2),
+ RK3576_CPUBCLK_RATE(1512000000, 2),
+ RK3576_CPUBCLK_RATE(1488000000, 2),
+ RK3576_CPUBCLK_RATE(1464000000, 2),
+ RK3576_CPUBCLK_RATE(1440000000, 2),
+ RK3576_CPUBCLK_RATE(1416000000, 2),
+ RK3576_CPUBCLK_RATE(1392000000, 2),
+ RK3576_CPUBCLK_RATE(1368000000, 2),
+ RK3576_CPUBCLK_RATE(1344000000, 2),
+ RK3576_CPUBCLK_RATE(1320000000, 2),
+ RK3576_CPUBCLK_RATE(1296000000, 2),
+ RK3576_CPUBCLK_RATE(1272000000, 2),
+ RK3576_CPUBCLK_RATE(1248000000, 2),
+ RK3576_CPUBCLK_RATE(1224000000, 2),
+ RK3576_CPUBCLK_RATE(1200000000, 2),
+ RK3576_CPUBCLK_RATE(1104000000, 2),
+ RK3576_CPUBCLK_RATE(1008000000, 2),
+ RK3576_CPUBCLK_RATE(912000000, 2),
+ RK3576_CPUBCLK_RATE(816000000, 2),
+ RK3576_CPUBCLK_RATE(696000000, 2),
+ RK3576_CPUBCLK_RATE(600000000, 2),
+ RK3576_CPUBCLK_RATE(408000000, 2),
+ RK3576_CPUBCLK_RATE(312000000, 2),
+ RK3576_CPUBCLK_RATE(216000000, 2),
+ RK3576_CPUBCLK_RATE(96000000, 2),
+};
+
+static const struct rockchip_cpuclk_reg_data rk3576_cpubclk_data = {
+ .core_reg[0] = RK3576_BIGCORE_CLKSEL_CON(1),
+ .div_core_shift[0] = 7,
+ .div_core_mask[0] = 0x1f,
+ .num_cores = 1,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
+ .mux_core_shift = 12,
+ .mux_core_mask = 0x3,
+};
+
+static struct rockchip_cpuclk_rate_table rk3576_cpulclk_rates[] __initdata = {
+ RK3576_CPULCLK_RATE(2400000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(2304000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(2208000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(2184000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(2088000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(2040000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(2016000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1992000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1896000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1800000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1704000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1608000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1584000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1560000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1536000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1512000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1488000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1464000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1440000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1416000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1392000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1368000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1344000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1320000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1296000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1272000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1248000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1224000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1200000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(1104000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(1008000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(912000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(816000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(696000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(600000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(408000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(312000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(216000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(96000000, 2, 6, 2),
+};
+
+static const struct rockchip_cpuclk_reg_data rk3576_cpulclk_data = {
+ .core_reg[0] = RK3576_LITCORE_CLKSEL_CON(0),
+ .div_core_shift[0] = 7,
+ .div_core_mask[0] = 0x1f,
+ .num_cores = 1,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
+ .mux_core_shift = 12,
+ .mux_core_mask = 0x3,
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+PNAME(mux_pll_p) = { "xin24m", "xin32k" };
+PNAME(mux_24m_32k_p) = { "xin24m", "xin_osc0_div" };
+PNAME(mux_armclkl_p) = { "xin24m", "pll_lpll", "lpll" };
+PNAME(mux_armclkb_p) = { "xin24m", "pll_bpll", "bpll" };
+PNAME(gpll_24m_p) = { "gpll", "xin24m" };
+PNAME(cpll_24m_p) = { "cpll", "xin24m" };
+PNAME(gpll_cpll_p) = { "gpll", "cpll" };
+PNAME(gpll_spll_p) = { "gpll", "spll" };
+PNAME(gpll_cpll_aupll_p) = { "gpll", "cpll", "aupll" };
+PNAME(gpll_cpll_24m_p) = { "gpll", "cpll", "xin24m" };
+PNAME(gpll_cpll_24m_spll_p) = { "gpll", "cpll", "xin24m", "spll" };
+PNAME(gpll_cpll_aupll_24m_p) = { "gpll", "cpll", "aupll", "xin24m" };
+PNAME(gpll_cpll_aupll_spll_p) = { "gpll", "cpll", "aupll", "spll" };
+PNAME(gpll_cpll_aupll_spll_lpll_p) = { "gpll", "cpll", "aupll", "spll", "lpll_dummy" };
+PNAME(gpll_cpll_spll_bpll_p) = { "gpll", "cpll", "spll", "bpll_dummy" };
+PNAME(gpll_cpll_lpll_bpll_p) = { "gpll", "cpll", "lpll_dummy", "bpll_dummy" };
+PNAME(gpll_spll_cpll_bpll_lpll_p) = { "gpll", "spll", "cpll", "bpll_dummy", "lpll_dummy" };
+PNAME(gpll_cpll_vpll_aupll_24m_p) = { "gpll", "cpll", "vpll", "aupll", "xin24m" };
+PNAME(gpll_cpll_spll_aupll_bpll_p) = { "gpll", "cpll", "spll", "aupll", "bpll_dummy" };
+PNAME(gpll_cpll_spll_bpll_lpll_p) = { "gpll", "cpll", "spll", "bpll_dummy", "lpll_dummy" };
+PNAME(gpll_cpll_spll_lpll_bpll_p) = { "gpll", "cpll", "spll", "lpll_dummy", "bpll_dummy" };
+PNAME(gpll_cpll_vpll_bpll_lpll_p) = { "gpll", "cpll", "vpll", "bpll_dummy", "lpll_dummy" };
+PNAME(gpll_spll_aupll_bpll_lpll_p) = { "gpll", "spll", "aupll", "bpll_dummy", "lpll_dummy" };
+PNAME(gpll_spll_isppvtpll_bpll_lpll_p) = { "gpll", "spll", "isp_pvtpll", "bpll_dummy", "lpll_dummy" };
+PNAME(gpll_cpll_spll_aupll_lpll_24m_p) = { "gpll", "cpll", "spll", "aupll", "lpll_dummy", "xin24m" };
+PNAME(gpll_cpll_spll_vpll_bpll_lpll_p) = { "gpll", "cpll", "spll", "vpll", "bpll_dummy", "lpll_dummy" };
+PNAME(cpll_vpll_lpll_bpll_p) = { "cpll", "vpll", "lpll_dummy", "bpll_dummy" };
+PNAME(mux_24m_ccipvtpll_gpll_lpll_p) = { "xin24m", "cci_pvtpll", "gpll", "lpll" };
+PNAME(mux_24m_spll_gpll_cpll_p) = {"xin24m", "spll", "gpll", "cpll" };
+PNAME(audio_frac_int_p) = { "xin24m", "clk_audio_frac_0", "clk_audio_frac_1", "clk_audio_frac_2",
+ "clk_audio_frac_3", "clk_audio_int_0", "clk_audio_int_1", "clk_audio_int_2" };
+PNAME(audio_frac_p) = { "clk_audio_frac_0", "clk_audio_frac_1", "clk_audio_frac_2", "clk_audio_frac_3" };
+PNAME(mux_100m_24m_p) = { "clk_cpll_div10", "xin24m" };
+PNAME(mux_100m_50m_24m_p) = { "clk_cpll_div10", "clk_cpll_div20", "xin24m" };
+PNAME(mux_100m_24m_lclk0_p) = { "clk_cpll_div10", "xin24m", "lclk_asrc_src_0" };
+PNAME(mux_100m_24m_lclk1_p) = { "clk_cpll_div10", "xin24m", "lclk_asrc_src_1" };
+PNAME(mux_150m_100m_50m_24m_p) = { "clk_gpll_div8", "clk_cpll_div10", "clk_cpll_div20", "xin24m" };
+PNAME(mux_200m_100m_50m_24m_p) = { "clk_gpll_div6", "clk_cpll_div10", "clk_cpll_div20", "xin24m" };
+PNAME(mux_400m_200m_100m_24m_p) = { "clk_gpll_div3", "clk_gpll_div6", "clk_cpll_div10", "xin24m" };
+PNAME(mux_500m_250m_100m_24m_p) = { "clk_cpll_div2", "clk_cpll_div4", "clk_cpll_div10", "xin24m" };
+PNAME(mux_600m_400m_300m_24m_p) = { "clk_gpll_div2", "clk_gpll_div3", "clk_gpll_div4", "xin24m" };
+PNAME(mux_350m_175m_116m_24m_p) = { "clk_spll_div2", "clk_spll_div4", "clk_spll_div6", "xin24m" };
+PNAME(mux_175m_116m_58m_24m_p) = { "clk_spll_div4", "clk_spll_div6", "clk_spll_div12", "xin24m" };
+PNAME(mux_116m_58m_24m_p) = { "clk_spll_div6", "clk_spll_div12", "xin24m" };
+PNAME(mclk_sai0_8ch_p) = { "mclk_sai0_8ch_src", "sai0_mclkin", "sai1_mclkin" };
+PNAME(mclk_sai1_8ch_p) = { "mclk_sai1_8ch_src", "sai1_mclkin" };
+PNAME(mclk_sai2_2ch_p) = { "mclk_sai2_2ch_src", "sai2_mclkin", "sai1_mclkin" };
+PNAME(mclk_sai3_2ch_p) = { "mclk_sai3_2ch_src", "sai3_mclkin", "sai1_mclkin" };
+PNAME(mclk_sai4_2ch_p) = { "mclk_sai4_2ch_src", "sai4_mclkin", "sai1_mclkin" };
+PNAME(mclk_sai5_8ch_p) = { "mclk_sai5_8ch_src", "sai1_mclkin" };
+PNAME(mclk_sai6_8ch_p) = { "mclk_sai6_8ch_src", "sai1_mclkin" };
+PNAME(mclk_sai7_8ch_p) = { "mclk_sai7_8ch_src", "sai1_mclkin" };
+PNAME(mclk_sai8_8ch_p) = { "mclk_sai8_8ch_src", "sai1_mclkin" };
+PNAME(mclk_sai9_8ch_p) = { "mclk_sai9_8ch_src", "sai1_mclkin" };
+PNAME(uart1_p) = { "clk_uart1_src_top", "xin24m" };
+PNAME(clk_gmac1_ptp_ref_src_p) = { "gpll", "cpll", "gmac1_ptp_refclk_in" };
+PNAME(clk_gmac0_ptp_ref_src_p) = { "gpll", "cpll", "gmac0_ptp_refclk_in" };
+PNAME(dclk_ebc_p) = { "gpll", "cpll", "vpll", "aupll", "lpll_dummy",
+ "dclk_ebc_frac", "xin24m" };
+PNAME(dclk_vp0_p) = { "dclk_vp0_src", "clk_hdmiphy_pixel0" };
+PNAME(dclk_vp1_p) = { "dclk_vp1_src", "clk_hdmiphy_pixel0" };
+PNAME(dclk_vp2_p) = { "dclk_vp2_src", "clk_hdmiphy_pixel0" };
+PNAME(clk_uart_p) = { "gpll", "cpll", "aupll", "xin24m", "clk_uart_frac_0",
+ "clk_uart_frac_1", "clk_uart_frac_2"};
+PNAME(clk_freq_pwm1_p) = { "sai0_mclkin", "sai1_mclkin", "sai2_mclkin",
+ "sai3_mclkin", "sai4_mclkin", "sai_sclkin_freq"};
+PNAME(clk_counter_pwm1_p) = { "sai0_mclkin", "sai1_mclkin", "sai2_mclkin",
+ "sai3_mclkin", "sai4_mclkin", "sai_sclkin_counter"};
+PNAME(sai_sclkin_freq_p) = { "sai0_sclk_in", "sai1_sclk_in", "sai2_sclk_in",
+ "sai3_sclk_in", "sai4_sclk_in"};
+PNAME(clk_ref_pcie0_phy_p) = { "clk_pcie_100m_src", "clk_pcie_100m_nduty_src",
+ "xin24m"};
+PNAME(hclk_vi_root_p) = { "clk_gpll_div6", "clk_cpll_div10",
+ "aclk_vi_root_inter", "xin24m"};
+PNAME(clk_ref_osc_mphy_p) = { "xin24m", "clk_gpio_mphy_i", "clk_ref_mphy_26m"};
+PNAME(mux_pmu200m_pmu100m_pmu50m_24m_p) = { "clk_200m_pmu_src", "clk_100m_pmu_src",
+ "clk_50m_pmu_src", "xin24m" };
+PNAME(mux_pmu100m_pmu50m_24m_p) = { "clk_100m_pmu_src", "clk_50m_pmu_src", "xin24m" };
+PNAME(mux_pmu100m_24m_32k_p) = { "clk_100m_pmu_src", "xin24m", "xin_osc0_div" };
+PNAME(clk_phy_ref_src_p) = { "xin24m", "clk_pmuphy_ref_src" };
+PNAME(clk_usbphy_ref_src_p) = { "usbphy0_24m", "usbphy1_24m" };
+PNAME(clk_cpll_ref_src_p) = { "xin24m", "clk_usbphy_ref_src" };
+PNAME(clk_aupll_ref_src_p) = { "xin24m", "clk_aupll_ref_io" };
+
+static struct rockchip_pll_clock rk3576_pll_clks[] __initdata = {
+ [bpll] = PLL(pll_rk3588_core, PLL_BPLL, "bpll", mux_pll_p,
+ 0, RK3576_PLL_CON(0),
+ RK3576_BPLL_MODE_CON0, 0, 15, 0, rk3576_pll_rates),
+ [lpll] = PLL(pll_rk3588_core, PLL_LPLL, "lpll", mux_pll_p,
+ 0, RK3576_LPLL_CON(16),
+ RK3576_LPLL_MODE_CON0, 0, 15, 0, rk3576_pll_rates),
+ [vpll] = PLL(pll_rk3588, PLL_VPLL, "vpll", mux_pll_p,
+ 0, RK3576_PLL_CON(88),
+ RK3576_MODE_CON0, 4, 15, 0, rk3576_pll_rates),
+ [aupll] = PLL(pll_rk3588, PLL_AUPLL, "aupll", mux_pll_p,
+ 0, RK3576_PLL_CON(96),
+ RK3576_MODE_CON0, 6, 15, 0, rk3576_pll_rates),
+ [cpll] = PLL(pll_rk3588, PLL_CPLL, "cpll", mux_pll_p,
+ CLK_IGNORE_UNUSED, RK3576_PLL_CON(104),
+ RK3576_MODE_CON0, 8, 15, 0, rk3576_pll_rates),
+ [gpll] = PLL(pll_rk3588, PLL_GPLL, "gpll", mux_pll_p,
+ CLK_IGNORE_UNUSED, RK3576_PLL_CON(112),
+ RK3576_MODE_CON0, 2, 15, 0, rk3576_pll_rates),
+ [ppll] = PLL(pll_rk3588_ddr, PLL_PPLL, "ppll", mux_pll_p,
+ CLK_IGNORE_UNUSED, RK3576_PMU_PLL_CON(128),
+ RK3576_MODE_CON0, 10, 15, 0, rk3576_ppll_rates),
+};
+
+static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
+ /*
+ * CRU Clock-Architecture
+ */
+ /* fixed */
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
+ COMPOSITE_FRAC(XIN_OSC0_DIV, "xin_osc0_div", "xin24m", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKSEL_CON(21), 0,
+ RK3576_PMU_CLKGATE_CON(7), 11, GFLAGS),
+
+ FACTOR(0, "clk_spll_div12", "spll", 0, 1, 12),
+ FACTOR(0, "clk_spll_div6", "spll", 0, 1, 6),
+ FACTOR(0, "clk_spll_div4", "spll", 0, 1, 4),
+ FACTOR(0, "lpll_div2", "lpll", 0, 1, 2),
+ FACTOR(0, "bpll_div4", "bpll", 0, 1, 4),
+
+ /* top */
+ COMPOSITE(CLK_CPLL_DIV20, "clk_cpll_div20", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(0), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE(CLK_CPLL_DIV10, "clk_cpll_div10", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(0), 11, 1, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 1, GFLAGS),
+ COMPOSITE(CLK_GPLL_DIV8, "clk_gpll_div8", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(1), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE(CLK_GPLL_DIV6, "clk_gpll_div6", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(1), 11, 1, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 3, GFLAGS),
+ COMPOSITE(CLK_CPLL_DIV4, "clk_cpll_div4", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(2), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 4, GFLAGS),
+ COMPOSITE(CLK_GPLL_DIV4, "clk_gpll_div4", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(2), 11, 1, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE(CLK_SPLL_DIV2, "clk_spll_div2", gpll_cpll_spll_bpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(3), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 6, GFLAGS),
+ COMPOSITE(CLK_GPLL_DIV3, "clk_gpll_div3", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(3), 12, 1, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE(CLK_CPLL_DIV2, "clk_cpll_div2", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(4), 11, 1, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 9, GFLAGS),
+ COMPOSITE(CLK_GPLL_DIV2, "clk_gpll_div2", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(5), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE(CLK_SPLL_DIV1, "clk_spll_div1", gpll_cpll_spll_bpll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(6), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 12, GFLAGS),
+ COMPOSITE_NODIV(PCLK_TOP_ROOT, "pclk_top_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(8), 7, 2, MFLAGS,
+ RK3576_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE(ACLK_TOP, "aclk_top", gpll_cpll_aupll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(9), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(1), 3, GFLAGS),
+ COMPOSITE(ACLK_TOP_MID, "aclk_top_mid", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(10), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(1), 6, GFLAGS),
+ COMPOSITE(ACLK_SECURE_HIGH, "aclk_secure_high", gpll_spll_aupll_bpll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(10), 11, 3, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(1), 7, GFLAGS),
+ COMPOSITE_NODIV(HCLK_TOP, "hclk_top", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(19), 2, 2, MFLAGS,
+ RK3576_CLKGATE_CON(1), 14, GFLAGS),
+ COMPOSITE_NODIV(HCLK_VO0VOP_CHANNEL, "hclk_vo0vop_channel", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(19), 6, 2, MFLAGS,
+ RK3576_CLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE(ACLK_VO0VOP_CHANNEL, "aclk_vo0vop_channel", gpll_cpll_lpll_bpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(19), 12, 2, MFLAGS, 8, 4, DFLAGS,
+ RK3576_CLKGATE_CON(2), 1, GFLAGS),
+ MUX(CLK_AUDIO_FRAC_0_SRC, "clk_audio_frac_0_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(13), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_AUDIO_FRAC_0, "clk_audio_frac_0", "clk_audio_frac_0_src", 0,
+ RK3576_CLKSEL_CON(12), 0,
+ RK3576_CLKGATE_CON(1), 10, GFLAGS),
+ MUX(CLK_AUDIO_FRAC_1_SRC, "clk_audio_frac_1_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(15), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_AUDIO_FRAC_1, "clk_audio_frac_1", "clk_audio_frac_1_src", 0,
+ RK3576_CLKSEL_CON(14), 0,
+ RK3576_CLKGATE_CON(1), 11, GFLAGS),
+ MUX(CLK_AUDIO_FRAC_2_SRC, "clk_audio_frac_2_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(17), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_AUDIO_FRAC_2, "clk_audio_frac_2", "clk_audio_frac_2_src", 0,
+ RK3576_CLKSEL_CON(16), 0,
+ RK3576_CLKGATE_CON(1), 12, GFLAGS),
+ MUX(CLK_AUDIO_FRAC_3_SRC, "clk_audio_frac_3_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(19), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_AUDIO_FRAC_3, "clk_audio_frac_3", "clk_audio_frac_3_src", 0,
+ RK3576_CLKSEL_CON(18), 0,
+ RK3576_CLKGATE_CON(1), 13, GFLAGS),
+ MUX(0, "clk_uart_frac_0_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(22), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_UART_FRAC_0, "clk_uart_frac_0", "clk_uart_frac_0_src", 0,
+ RK3576_CLKSEL_CON(21), 0,
+ RK3576_CLKGATE_CON(2), 5, GFLAGS),
+ MUX(0, "clk_uart_frac_1_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(24), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_UART_FRAC_1, "clk_uart_frac_1", "clk_uart_frac_1_src", 0,
+ RK3576_CLKSEL_CON(23), 0,
+ RK3576_CLKGATE_CON(2), 6, GFLAGS),
+ MUX(0, "clk_uart_frac_2_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(26), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_UART_FRAC_2, "clk_uart_frac_2", "clk_uart_frac_2_src", 0,
+ RK3576_CLKSEL_CON(25), 0,
+ RK3576_CLKGATE_CON(2), 7, GFLAGS),
+ COMPOSITE(CLK_UART1_SRC_TOP, "clk_uart1_src_top", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(27), 13, 3, MFLAGS, 5, 8, DFLAGS,
+ RK3576_CLKGATE_CON(2), 13, GFLAGS),
+ COMPOSITE_NOMUX(CLK_AUDIO_INT_0, "clk_audio_int_0", "gpll", 0,
+ RK3576_CLKSEL_CON(28), 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(2), 14, GFLAGS),
+ COMPOSITE_NOMUX(CLK_AUDIO_INT_1, "clk_audio_int_1", "cpll", 0,
+ RK3576_CLKSEL_CON(28), 5, 5, DFLAGS,
+ RK3576_CLKGATE_CON(2), 15, GFLAGS),
+ COMPOSITE_NOMUX(CLK_AUDIO_INT_2, "clk_audio_int_2", "aupll", 0,
+ RK3576_CLKSEL_CON(28), 10, 5, DFLAGS,
+ RK3576_CLKGATE_CON(3), 0, GFLAGS),
+ COMPOSITE(CLK_PDM0_SRC_TOP, "clk_pdm0_src_top", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(29), 9, 3, MFLAGS, 0, 9, DFLAGS,
+ RK3576_CLKGATE_CON(3), 2, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GMAC0_125M_SRC, "clk_gmac0_125m_src", "cpll", 0,
+ RK3576_CLKSEL_CON(30), 10, 5, DFLAGS,
+ RK3576_CLKGATE_CON(3), 6, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GMAC1_125M_SRC, "clk_gmac1_125m_src", "cpll", 0,
+ RK3576_CLKSEL_CON(31), 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(3), 7, GFLAGS),
+ COMPOSITE(LCLK_ASRC_SRC_0, "lclk_asrc_src_0", audio_frac_p, 0,
+ RK3576_CLKSEL_CON(31), 10, 2, MFLAGS, 5, 5, DFLAGS,
+ RK3576_CLKGATE_CON(3), 10, GFLAGS),
+ COMPOSITE(LCLK_ASRC_SRC_1, "lclk_asrc_src_1", audio_frac_p, 0,
+ RK3576_CLKSEL_CON(32), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(3), 11, GFLAGS),
+ COMPOSITE(REF_CLK0_OUT_PLL, "ref_clk0_out_pll", gpll_cpll_spll_aupll_lpll_24m_p, 0,
+ RK3576_CLKSEL_CON(33), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(4), 1, GFLAGS),
+ COMPOSITE(REF_CLK1_OUT_PLL, "ref_clk1_out_pll", gpll_cpll_spll_aupll_lpll_24m_p, 0,
+ RK3576_CLKSEL_CON(34), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE(REF_CLK2_OUT_PLL, "ref_clk2_out_pll", gpll_cpll_spll_aupll_lpll_24m_p, 0,
+ RK3576_CLKSEL_CON(35), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(4), 3, GFLAGS),
+ COMPOSITE(REFCLKO25M_GMAC0_OUT, "refclko25m_gmac0_out", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(36), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3576_CLKGATE_CON(5), 10, GFLAGS),
+ COMPOSITE(REFCLKO25M_GMAC1_OUT, "refclko25m_gmac1_out", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(36), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3576_CLKGATE_CON(5), 11, GFLAGS),
+ COMPOSITE(CLK_CIFOUT_OUT, "clk_cifout_out", gpll_cpll_24m_spll_p, 0,
+ RK3576_CLKSEL_CON(37), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(5), 12, GFLAGS),
+ GATE(CLK_GMAC0_RMII_CRU, "clk_gmac0_rmii_cru", "clk_cpll_div20", 0,
+ RK3576_CLKGATE_CON(5), 13, GFLAGS),
+ GATE(CLK_GMAC1_RMII_CRU, "clk_gmac1_rmii_cru", "clk_cpll_div20", 0,
+ RK3576_CLKGATE_CON(5), 14, GFLAGS),
+ GATE(CLK_OTPC_AUTO_RD_G, "clk_otpc_auto_rd_g", "xin24m", 0,
+ RK3576_CLKGATE_CON(5), 15, GFLAGS),
+ COMPOSITE(CLK_MIPI_CAMERAOUT_M0, "clk_mipi_cameraout_m0", mux_24m_spll_gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(38), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(6), 3, GFLAGS),
+ COMPOSITE(CLK_MIPI_CAMERAOUT_M1, "clk_mipi_cameraout_m1", mux_24m_spll_gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(39), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(6), 4, GFLAGS),
+ COMPOSITE(CLK_MIPI_CAMERAOUT_M2, "clk_mipi_cameraout_m2", mux_24m_spll_gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(40), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(6), 5, GFLAGS),
+ COMPOSITE(MCLK_PDM0_SRC_TOP, "mclk_pdm0_src_top", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(41), 7, 3, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(6), 8, GFLAGS),
+
+ /* bus */
+ COMPOSITE_NODIV(HCLK_BUS_ROOT, "hclk_bus_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(55), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(11), 0, GFLAGS),
+ COMPOSITE_NODIV(PCLK_BUS_ROOT, "pclk_bus_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(55), 2, 2, MFLAGS,
+ RK3576_CLKGATE_CON(11), 1, GFLAGS),
+ COMPOSITE(ACLK_BUS_ROOT, "aclk_bus_root", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(55), 9, 1, MFLAGS, 4, 5, DFLAGS,
+ RK3576_CLKGATE_CON(11), 2, GFLAGS),
+ GATE(HCLK_CAN0, "hclk_can0", "hclk_bus_root", 0,
+ RK3576_CLKGATE_CON(11), 6, GFLAGS),
+ COMPOSITE(CLK_CAN0, "clk_can0", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(56), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(11), 7, GFLAGS),
+ GATE(HCLK_CAN1, "hclk_can1", "hclk_bus_root", 0,
+ RK3576_CLKGATE_CON(11), 8, GFLAGS),
+ COMPOSITE(CLK_CAN1, "clk_can1", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(56), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(11), 9, GFLAGS),
+ GATE(CLK_KEY_SHIFT, "clk_key_shift", "xin24m", CLK_IS_CRITICAL,
+ RK3576_CLKGATE_CON(11), 15, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 0, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 1, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 2, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 3, GFLAGS),
+ GATE(PCLK_I2C5, "pclk_i2c5", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 4, GFLAGS),
+ GATE(PCLK_I2C6, "pclk_i2c6", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 5, GFLAGS),
+ GATE(PCLK_I2C7, "pclk_i2c7", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 6, GFLAGS),
+ GATE(PCLK_I2C8, "pclk_i2c8", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 7, GFLAGS),
+ GATE(PCLK_I2C9, "pclk_i2c9", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 8, GFLAGS),
+ GATE(PCLK_WDT_BUSMCU, "pclk_wdt_busmcu", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 9, GFLAGS),
+ GATE(TCLK_WDT_BUSMCU, "tclk_wdt_busmcu", "xin24m", 0,
+ RK3576_CLKGATE_CON(12), 10, GFLAGS),
+ GATE(ACLK_GIC, "aclk_gic", "aclk_bus_root", CLK_IS_CRITICAL,
+ RK3576_CLKGATE_CON(12), 11, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C1, "clk_i2c1", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(12), 12, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C2, "clk_i2c2", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 2, 2, MFLAGS,
+ RK3576_CLKGATE_CON(12), 13, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C3, "clk_i2c3", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 4, 2, MFLAGS,
+ RK3576_CLKGATE_CON(12), 14, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C4, "clk_i2c4", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 6, 2, MFLAGS,
+ RK3576_CLKGATE_CON(12), 15, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C5, "clk_i2c5", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 8, 2, MFLAGS,
+ RK3576_CLKGATE_CON(13), 0, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C6, "clk_i2c6", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 10, 2, MFLAGS,
+ RK3576_CLKGATE_CON(13), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C7, "clk_i2c7", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 12, 2, MFLAGS,
+ RK3576_CLKGATE_CON(13), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C8, "clk_i2c8", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 14, 2, MFLAGS,
+ RK3576_CLKGATE_CON(13), 3, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C9, "clk_i2c9", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(58), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(13), 4, GFLAGS),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 6, GFLAGS),
+ COMPOSITE(CLK_SARADC, "clk_saradc", gpll_24m_p, 0,
+ RK3576_CLKSEL_CON(58), 12, 1, MFLAGS, 4, 8, DFLAGS,
+ RK3576_CLKGATE_CON(13), 7, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC, "clk_tsadc", "xin24m", 0,
+ RK3576_CLKSEL_CON(59), 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(13), 9, GFLAGS),
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 10, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 11, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 12, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 13, GFLAGS),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 14, GFLAGS),
+ GATE(PCLK_UART6, "pclk_uart6", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 15, GFLAGS),
+ GATE(PCLK_UART7, "pclk_uart7", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(14), 0, GFLAGS),
+ GATE(PCLK_UART8, "pclk_uart8", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(14), 1, GFLAGS),
+ GATE(PCLK_UART9, "pclk_uart9", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(14), 2, GFLAGS),
+ GATE(PCLK_UART10, "pclk_uart10", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(14), 3, GFLAGS),
+ GATE(PCLK_UART11, "pclk_uart11", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(14), 4, GFLAGS),
+ COMPOSITE(SCLK_UART0, "sclk_uart0", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(60), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(14), 5, GFLAGS),
+ COMPOSITE(SCLK_UART2, "sclk_uart2", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(61), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(14), 6, GFLAGS),
+ COMPOSITE(SCLK_UART3, "sclk_uart3", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(62), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(14), 9, GFLAGS),
+ COMPOSITE(SCLK_UART4, "sclk_uart4", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(63), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(14), 12, GFLAGS),
+ COMPOSITE(SCLK_UART5, "sclk_uart5", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(64), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(14), 15, GFLAGS),
+ COMPOSITE(SCLK_UART6, "sclk_uart6", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(65), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(15), 2, GFLAGS),
+ COMPOSITE(SCLK_UART7, "sclk_uart7", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(66), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(15), 5, GFLAGS),
+ COMPOSITE(SCLK_UART8, "sclk_uart8", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(67), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(15), 8, GFLAGS),
+ COMPOSITE(SCLK_UART9, "sclk_uart9", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(68), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(15), 9, GFLAGS),
+ COMPOSITE(SCLK_UART10, "sclk_uart10", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(69), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(15), 10, GFLAGS),
+ COMPOSITE(SCLK_UART11, "sclk_uart11", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(70), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(15), 11, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(15), 13, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(15), 14, GFLAGS),
+ GATE(PCLK_SPI2, "pclk_spi2", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(15), 15, GFLAGS),
+ GATE(PCLK_SPI3, "pclk_spi3", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(16), 0, GFLAGS),
+ GATE(PCLK_SPI4, "pclk_spi4", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(16), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI0, "clk_spi0", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(70), 13, 2, MFLAGS,
+ RK3576_CLKGATE_CON(16), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI1, "clk_spi1", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(71), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(16), 3, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI2, "clk_spi2", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(71), 2, 2, MFLAGS,
+ RK3576_CLKGATE_CON(16), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI3, "clk_spi3", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(71), 4, 2, MFLAGS,
+ RK3576_CLKGATE_CON(16), 5, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI4, "clk_spi4", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(71), 6, 2, MFLAGS,
+ RK3576_CLKGATE_CON(16), 6, GFLAGS),
+ GATE(PCLK_WDT0, "pclk_wdt0", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(16), 7, GFLAGS),
+ GATE(TCLK_WDT0, "tclk_wdt0", "xin24m", 0,
+ RK3576_CLKGATE_CON(16), 8, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(16), 10, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM1, "clk_pwm1", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(71), 8, 2, MFLAGS,
+ RK3576_CLKGATE_CON(16), 11, GFLAGS),
+ GATE(CLK_OSC_PWM1, "clk_osc_pwm1", "xin24m", 0,
+ RK3576_CLKGATE_CON(16), 13, GFLAGS),
+ GATE(CLK_RC_PWM1, "clk_rc_pwm1", "clk_pvtm_clkout", 0,
+ RK3576_CLKGATE_CON(16), 15, GFLAGS),
+ GATE(PCLK_BUSTIMER0, "pclk_bustimer0", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(17), 3, GFLAGS),
+ GATE(PCLK_BUSTIMER1, "pclk_bustimer1", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(17), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_ROOT, "clk_timer0_root", mux_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(71), 14, 1, MFLAGS,
+ RK3576_CLKGATE_CON(17), 5, GFLAGS),
+ GATE(CLK_TIMER0, "clk_timer0", "clk_timer0_root", 0,
+ RK3576_CLKGATE_CON(17), 6, GFLAGS),
+ GATE(CLK_TIMER1, "clk_timer1", "clk_timer0_root", 0,
+ RK3576_CLKGATE_CON(17), 7, GFLAGS),
+ GATE(CLK_TIMER2, "clk_timer2", "clk_timer0_root", 0,
+ RK3576_CLKGATE_CON(17), 8, GFLAGS),
+ GATE(CLK_TIMER3, "clk_timer3", "clk_timer0_root", 0,
+ RK3576_CLKGATE_CON(17), 9, GFLAGS),
+ GATE(CLK_TIMER4, "clk_timer4", "clk_timer0_root", 0,
+ RK3576_CLKGATE_CON(17), 10, GFLAGS),
+ GATE(CLK_TIMER5, "clk_timer5", "clk_timer0_root", 0,
+ RK3576_CLKGATE_CON(17), 11, GFLAGS),
+ GATE(PCLK_MAILBOX0, "pclk_mailbox0", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(17), 13, GFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(17), 15, GFLAGS),
+ GATE(DBCLK_GPIO1, "dbclk_gpio1", "xin24m", 0,
+ RK3576_CLKGATE_CON(18), 0, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(18), 1, GFLAGS),
+ GATE(DBCLK_GPIO2, "dbclk_gpio2", "xin24m", 0,
+ RK3576_CLKGATE_CON(18), 2, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(18), 3, GFLAGS),
+ GATE(DBCLK_GPIO3, "dbclk_gpio3", "xin24m", 0,
+ RK3576_CLKGATE_CON(18), 4, GFLAGS),
+ GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(18), 5, GFLAGS),
+ GATE(DBCLK_GPIO4, "dbclk_gpio4", "xin24m", 0,
+ RK3576_CLKGATE_CON(18), 6, GFLAGS),
+ GATE(ACLK_DECOM, "aclk_decom", "aclk_bus_root", 0,
+ RK3576_CLKGATE_CON(18), 7, GFLAGS),
+ GATE(PCLK_DECOM, "pclk_decom", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(18), 8, GFLAGS),
+ COMPOSITE(DCLK_DECOM, "dclk_decom", gpll_spll_p, 0,
+ RK3576_CLKSEL_CON(72), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(18), 9, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER1_ROOT, "clk_timer1_root", mux_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(72), 6, 1, MFLAGS,
+ RK3576_CLKGATE_CON(18), 10, GFLAGS),
+ GATE(CLK_TIMER6, "clk_timer6", "clk_timer1_root", 0,
+ RK3576_CLKGATE_CON(18), 11, GFLAGS),
+ COMPOSITE(CLK_TIMER7, "clk_timer7", mux_100m_24m_lclk0_p, 0,
+ RK3576_CLKSEL_CON(72), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(18), 12, GFLAGS),
+ COMPOSITE(CLK_TIMER8, "clk_timer8", mux_100m_24m_lclk1_p, 0,
+ RK3576_CLKSEL_CON(73), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(18), 13, GFLAGS),
+ GATE(CLK_TIMER9, "clk_timer9", "clk_timer1_root", 0,
+ RK3576_CLKGATE_CON(18), 14, GFLAGS),
+ GATE(CLK_TIMER10, "clk_timer10", "clk_timer1_root", 0,
+ RK3576_CLKGATE_CON(18), 15, GFLAGS),
+ GATE(CLK_TIMER11, "clk_timer11", "clk_timer1_root", 0,
+ RK3576_CLKGATE_CON(19), 0, GFLAGS),
+ GATE(ACLK_DMAC0, "aclk_dmac0", "aclk_bus_root", 0,
+ RK3576_CLKGATE_CON(19), 1, GFLAGS),
+ GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_bus_root", 0,
+ RK3576_CLKGATE_CON(19), 2, GFLAGS),
+ GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_bus_root", 0,
+ RK3576_CLKGATE_CON(19), 3, GFLAGS),
+ GATE(ACLK_SPINLOCK, "aclk_spinlock", "aclk_bus_root", 0,
+ RK3576_CLKGATE_CON(19), 4, GFLAGS),
+ GATE(HCLK_I3C0, "hclk_i3c0", "hclk_bus_root", 0,
+ RK3576_CLKGATE_CON(19), 7, GFLAGS),
+ GATE(HCLK_I3C1, "hclk_i3c1", "hclk_bus_root", 0,
+ RK3576_CLKGATE_CON(19), 9, GFLAGS),
+ COMPOSITE_NODIV(HCLK_BUS_CM0_ROOT, "hclk_bus_cm0_root", mux_400m_200m_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(73), 13, 2, MFLAGS,
+ RK3576_CLKGATE_CON(19), 10, GFLAGS),
+ GATE(FCLK_BUS_CM0_CORE, "fclk_bus_cm0_core", "hclk_bus_cm0_root", 0,
+ RK3576_CLKGATE_CON(19), 12, GFLAGS),
+ COMPOSITE(CLK_BUS_CM0_RTC, "clk_bus_cm0_rtc", mux_24m_32k_p, 0,
+ RK3576_CLKSEL_CON(74), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(19), 14, GFLAGS),
+ GATE(PCLK_PMU2, "pclk_pmu2", "pclk_bus_root", CLK_IS_CRITICAL,
+ RK3576_CLKGATE_CON(19), 15, GFLAGS),
+ GATE(PCLK_PWM2, "pclk_pwm2", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(20), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM2, "clk_pwm2", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(74), 6, 2, MFLAGS,
+ RK3576_CLKGATE_CON(20), 5, GFLAGS),
+ GATE(CLK_OSC_PWM2, "clk_osc_pwm2", "xin24m", 0,
+ RK3576_CLKGATE_CON(20), 7, GFLAGS),
+ GATE(CLK_RC_PWM2, "clk_rc_pwm2", "clk_pvtm_clkout", 0,
+ RK3576_CLKGATE_CON(20), 6, GFLAGS),
+ COMPOSITE_NODIV(CLK_FREQ_PWM1, "clk_freq_pwm1", clk_freq_pwm1_p, 0,
+ RK3576_CLKSEL_CON(74), 8, 3, MFLAGS,
+ RK3576_CLKGATE_CON(20), 8, GFLAGS),
+ COMPOSITE_NODIV(CLK_COUNTER_PWM1, "clk_counter_pwm1", clk_counter_pwm1_p, 0,
+ RK3576_CLKSEL_CON(74), 11, 3, MFLAGS,
+ RK3576_CLKGATE_CON(20), 9, GFLAGS),
+ COMPOSITE_NODIV(SAI_SCLKIN_FREQ, "sai_sclkin_freq", sai_sclkin_freq_p, 0,
+ RK3576_CLKSEL_CON(75), 0, 3, MFLAGS,
+ RK3576_CLKGATE_CON(20), 10, GFLAGS),
+ COMPOSITE_NODIV(SAI_SCLKIN_COUNTER, "sai_sclkin_counter", sai_sclkin_freq_p, 0,
+ RK3576_CLKSEL_CON(75), 3, 3, MFLAGS,
+ RK3576_CLKGATE_CON(20), 11, GFLAGS),
+ COMPOSITE(CLK_I3C0, "clk_i3c0", gpll_cpll_aupll_spll_p, 0,
+ RK3576_CLKSEL_CON(78), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(20), 12, GFLAGS),
+ COMPOSITE(CLK_I3C1, "clk_i3c1", gpll_cpll_aupll_spll_p, 0,
+ RK3576_CLKSEL_CON(78), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(20), 13, GFLAGS),
+ GATE(PCLK_CSIDPHY1, "pclk_csidphy1", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(40), 2, GFLAGS),
+
+ /* cci */
+ COMPOSITE(PCLK_CCI_ROOT, "pclk_cci_root", mux_24m_ccipvtpll_gpll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CCI_CLKSEL_CON(4), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CCI_CLKGATE_CON(1), 10, GFLAGS),
+ COMPOSITE(ACLK_CCI_ROOT, "aclk_cci_root", mux_24m_ccipvtpll_gpll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CCI_CLKSEL_CON(4), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CCI_CLKGATE_CON(1), 11, GFLAGS),
+
+ /* center */
+ COMPOSITE_DIV_OFFSET(ACLK_CENTER_ROOT, "aclk_center_root", gpll_cpll_spll_aupll_bpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(168), 5, 3, MFLAGS,
+ RK3576_CLKSEL_CON(167), 9, 5, DFLAGS,
+ RK3576_CLKGATE_CON(72), 0, GFLAGS),
+ COMPOSITE_NODIV(ACLK_CENTER_LOW_ROOT, "aclk_center_low_root", mux_500m_250m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(168), 8, 2, MFLAGS,
+ RK3576_CLKGATE_CON(72), 1, GFLAGS),
+ COMPOSITE_NODIV(HCLK_CENTER_ROOT, "hclk_center_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(168), 10, 2, MFLAGS,
+ RK3576_CLKGATE_CON(72), 2, GFLAGS),
+ COMPOSITE_NODIV(PCLK_CENTER_ROOT, "pclk_center_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(168), 12, 2, MFLAGS,
+ RK3576_CLKGATE_CON(72), 3, GFLAGS),
+ GATE(ACLK_DMA2DDR, "aclk_dma2ddr", "aclk_center_root", CLK_IGNORE_UNUSED,
+ RK3576_CLKGATE_CON(72), 5, GFLAGS),
+ GATE(ACLK_DDR_SHAREMEM, "aclk_ddr_sharemem", "aclk_center_low_root", CLK_IGNORE_UNUSED,
+ RK3576_CLKGATE_CON(72), 6, GFLAGS),
+ GATE(PCLK_DMA2DDR, "pclk_dma2ddr", "pclk_center_root", CLK_IGNORE_UNUSED,
+ RK3576_CLKGATE_CON(72), 10, GFLAGS),
+ GATE(PCLK_SHAREMEM, "pclk_sharemem", "pclk_center_root", CLK_IGNORE_UNUSED,
+ RK3576_CLKGATE_CON(72), 11, GFLAGS),
+
+ /* ddr */
+ COMPOSITE(PCLK_DDR_ROOT, "pclk_ddr_root", gpll_cpll_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(76), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(21), 0, GFLAGS),
+ GATE(PCLK_DDR_MON_CH0, "pclk_ddr_mon_ch0", "pclk_ddr_root", CLK_IGNORE_UNUSED,
+ RK3576_CLKGATE_CON(21), 1, GFLAGS),
+ COMPOSITE(HCLK_DDR_ROOT, "hclk_ddr_root", gpll_cpll_p, CLK_IGNORE_UNUSED,
+ RK3576_CLKSEL_CON(77), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(22), 11, GFLAGS),
+ GATE(FCLK_DDR_CM0_CORE, "fclk_ddr_cm0_core", "hclk_ddr_root", CLK_IS_CRITICAL,
+ RK3576_CLKGATE_CON(22), 15, GFLAGS),
+ COMPOSITE_NODIV(CLK_DDR_TIMER_ROOT, "clk_ddr_timer_root", mux_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(77), 6, 1, MFLAGS,
+ RK3576_CLKGATE_CON(23), 3, GFLAGS),
+ GATE(CLK_DDR_TIMER0, "clk_ddr_timer0", "clk_ddr_timer_root", 0,
+ RK3576_CLKGATE_CON(23), 4, GFLAGS),
+ GATE(CLK_DDR_TIMER1, "clk_ddr_timer1", "clk_ddr_timer_root", 0,
+ RK3576_CLKGATE_CON(23), 5, GFLAGS),
+ GATE(TCLK_WDT_DDR, "tclk_wdt_ddr", "xin24m", 0,
+ RK3576_CLKGATE_CON(23), 6, GFLAGS),
+ GATE(PCLK_WDT, "pclk_wdt", "pclk_ddr_root", 0,
+ RK3576_CLKGATE_CON(23), 7, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_ddr_root", 0,
+ RK3576_CLKGATE_CON(23), 8, GFLAGS),
+ COMPOSITE(CLK_DDR_CM0_RTC, "clk_ddr_cm0_rtc", mux_24m_32k_p, 0,
+ RK3576_CLKSEL_CON(77), 12, 1, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(23), 10, GFLAGS),
+
+ /* gpu */
+ COMPOSITE(CLK_GPU_SRC_PRE, "clk_gpu_src_pre", gpll_cpll_aupll_spll_lpll_p, 0,
+ RK3576_CLKSEL_CON(165), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(69), 1, GFLAGS),
+ GATE(CLK_GPU, "clk_gpu", "clk_gpu_src_pre", 0,
+ RK3576_CLKGATE_CON(69), 3, GFLAGS),
+ COMPOSITE_NODIV(PCLK_GPU_ROOT, "pclk_gpu_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(166), 10, 2, MFLAGS,
+ RK3576_CLKGATE_CON(69), 8, GFLAGS),
+
+ /* npu */
+ COMPOSITE_NODIV(HCLK_RKNN_ROOT, "hclk_rknn_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(86), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(31), 4, GFLAGS),
+ COMPOSITE(CLK_RKNN_DSU0, "clk_rknn_dsu0", gpll_cpll_aupll_spll_p, 0,
+ RK3576_CLKSEL_CON(86), 7, 2, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(31), 5, GFLAGS),
+ GATE(ACLK_RKNN0, "aclk_rknn0", "clk_rknn_dsu0", 0,
+ RK3576_CLKGATE_CON(28), 9, GFLAGS),
+ GATE(ACLK_RKNN1, "aclk_rknn1", "clk_rknn_dsu0", 0,
+ RK3576_CLKGATE_CON(29), 0, GFLAGS),
+ COMPOSITE_NODIV(PCLK_NPUTOP_ROOT, "pclk_nputop_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(87), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(31), 8, GFLAGS),
+ GATE(PCLK_NPU_TIMER, "pclk_npu_timer", "pclk_nputop_root", 0,
+ RK3576_CLKGATE_CON(31), 10, GFLAGS),
+ COMPOSITE_NODIV(CLK_NPUTIMER_ROOT, "clk_nputimer_root", mux_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(87), 2, 1, MFLAGS,
+ RK3576_CLKGATE_CON(31), 11, GFLAGS),
+ GATE(CLK_NPUTIMER0, "clk_nputimer0", "clk_nputimer_root", 0,
+ RK3576_CLKGATE_CON(31), 12, GFLAGS),
+ GATE(CLK_NPUTIMER1, "clk_nputimer1", "clk_nputimer_root", 0,
+ RK3576_CLKGATE_CON(31), 13, GFLAGS),
+ GATE(PCLK_NPU_WDT, "pclk_npu_wdt", "pclk_nputop_root", 0,
+ RK3576_CLKGATE_CON(31), 14, GFLAGS),
+ GATE(TCLK_NPU_WDT, "tclk_npu_wdt", "xin24m", 0,
+ RK3576_CLKGATE_CON(31), 15, GFLAGS),
+ GATE(ACLK_RKNN_CBUF, "aclk_rknn_cbuf", "clk_rknn_dsu0", 0,
+ RK3576_CLKGATE_CON(32), 0, GFLAGS),
+ COMPOSITE_NODIV(HCLK_NPU_CM0_ROOT, "hclk_npu_cm0_root", mux_400m_200m_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(87), 3, 2, MFLAGS,
+ RK3576_CLKGATE_CON(32), 5, GFLAGS),
+ GATE(FCLK_NPU_CM0_CORE, "fclk_npu_cm0_core", "hclk_npu_cm0_root", 0,
+ RK3576_CLKGATE_CON(32), 7, GFLAGS),
+ COMPOSITE(CLK_NPU_CM0_RTC, "clk_npu_cm0_rtc", mux_24m_32k_p, 0,
+ RK3576_CLKSEL_CON(87), 10, 1, MFLAGS, 5, 5, DFLAGS,
+ RK3576_CLKGATE_CON(32), 9, GFLAGS),
+ GATE(HCLK_RKNN_CBUF, "hclk_rknn_cbuf", "hclk_rknn_root", 0,
+ RK3576_CLKGATE_CON(32), 12, GFLAGS),
+
+ /* nvm */
+ COMPOSITE_NODIV(HCLK_NVM_ROOT, "hclk_nvm_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(88), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(33), 0, GFLAGS),
+ COMPOSITE(ACLK_NVM_ROOT, "aclk_nvm_root", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(88), 7, 1, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(33), 1, GFLAGS),
+ COMPOSITE(SCLK_FSPI_X2, "sclk_fspi_x2", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(89), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3576_CLKGATE_CON(33), 6, GFLAGS),
+ GATE(HCLK_FSPI, "hclk_fspi", "hclk_nvm_root", 0,
+ RK3576_CLKGATE_CON(33), 7, GFLAGS),
+ COMPOSITE(CCLK_SRC_EMMC, "cclk_src_emmc", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(89), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3576_CLKGATE_CON(33), 8, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_nvm_root", 0,
+ RK3576_CLKGATE_CON(33), 9, GFLAGS),
+ GATE(ACLK_EMMC, "aclk_emmc", "aclk_nvm_root", 0,
+ RK3576_CLKGATE_CON(33), 10, GFLAGS),
+ COMPOSITE_NODIV(BCLK_EMMC, "bclk_emmc", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(90), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(33), 11, GFLAGS),
+ GATE(TCLK_EMMC, "tclk_emmc", "xin24m", 0,
+ RK3576_CLKGATE_CON(33), 12, GFLAGS),
+
+ /* usb */
+ COMPOSITE(ACLK_UFS_ROOT, "aclk_ufs_root", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(115), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(47), 0, GFLAGS),
+ COMPOSITE(ACLK_USB_ROOT, "aclk_usb_root", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(115), 11, 1, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(47), 1, GFLAGS),
+ COMPOSITE_NODIV(PCLK_USB_ROOT, "pclk_usb_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(115), 12, 2, MFLAGS,
+ RK3576_CLKGATE_CON(47), 2, GFLAGS),
+ GATE(ACLK_USB3OTG0, "aclk_usb3otg0", "aclk_usb_root", 0,
+ RK3576_CLKGATE_CON(47), 5, GFLAGS),
+ GATE(CLK_REF_USB3OTG0, "clk_ref_usb3otg0", "xin24m", 0,
+ RK3576_CLKGATE_CON(47), 6, GFLAGS),
+ GATE(CLK_SUSPEND_USB3OTG0, "clk_suspend_usb3otg0", "xin24m", 0,
+ RK3576_CLKGATE_CON(47), 7, GFLAGS),
+ GATE(ACLK_MMU2, "aclk_mmu2", "aclk_usb_root", 0,
+ RK3576_CLKGATE_CON(47), 12, GFLAGS),
+ GATE(ACLK_SLV_MMU2, "aclk_slv_mmu2", "aclk_usb_root", 0,
+ RK3576_CLKGATE_CON(47), 13, GFLAGS),
+ GATE(ACLK_UFS_SYS, "aclk_ufs_sys", "aclk_ufs_root", 0,
+ RK3576_CLKGATE_CON(47), 15, GFLAGS),
+
+ /* vdec */
+ COMPOSITE_NODIV(HCLK_RKVDEC_ROOT, "hclk_rkvdec_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(110), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(45), 0, GFLAGS),
+ COMPOSITE(ACLK_RKVDEC_ROOT, "aclk_rkvdec_root", gpll_cpll_aupll_spll_p, 0,
+ RK3576_CLKSEL_CON(110), 7, 2, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(45), 1, GFLAGS),
+ COMPOSITE(ACLK_RKVDEC_ROOT_BAK, "aclk_rkvdec_root_bak", cpll_vpll_lpll_bpll_p, 0,
+ RK3576_CLKSEL_CON(110), 14, 2, MFLAGS, 9, 5, DFLAGS,
+ RK3576_CLKGATE_CON(45), 2, GFLAGS),
+ GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_rkvdec_root", 0,
+ RK3576_CLKGATE_CON(45), 3, GFLAGS),
+ COMPOSITE(CLK_RKVDEC_HEVC_CA, "clk_rkvdec_hevc_ca", gpll_cpll_lpll_bpll_p, 0,
+ RK3576_CLKSEL_CON(111), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(45), 8, GFLAGS),
+ GATE(CLK_RKVDEC_CORE, "clk_rkvdec_core", "aclk_rkvdec_root", 0,
+ RK3576_CLKGATE_CON(45), 9, GFLAGS),
+
+ /* venc */
+ COMPOSITE_NODIV(HCLK_VEPU0_ROOT, "hclk_vepu0_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(124), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(51), 0, GFLAGS),
+ COMPOSITE(ACLK_VEPU0_ROOT, "aclk_vepu0_root", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(124), 7, 1, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(51), 1, GFLAGS),
+ COMPOSITE(CLK_VEPU0_CORE, "clk_vepu0_core", gpll_cpll_spll_lpll_bpll_p, 0,
+ RK3576_CLKSEL_CON(124), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RK3576_CLKGATE_CON(51), 6, GFLAGS),
+ GATE(HCLK_VEPU0, "hclk_vepu0", "hclk_vepu0_root", 0,
+ RK3576_CLKGATE_CON(51), 4, GFLAGS),
+ GATE(ACLK_VEPU0, "aclk_vepu0", "aclk_vepu0_root", 0,
+ RK3576_CLKGATE_CON(51), 5, GFLAGS),
+
+ /* vi */
+ COMPOSITE(ACLK_VI_ROOT, "aclk_vi_root", gpll_spll_isppvtpll_bpll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(128), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(53), 0, GFLAGS),
+ COMPOSITE_NOMUX(ACLK_VI_ROOT_INTER, "aclk_vi_root_inter", "aclk_vi_root", 0,
+ RK3576_CLKSEL_CON(130), 10, 3, DFLAGS,
+ RK3576_CLKGATE_CON(54), 13, GFLAGS),
+ COMPOSITE_NODIV(HCLK_VI_ROOT, "hclk_vi_root", hclk_vi_root_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(128), 8, 2, MFLAGS,
+ RK3576_CLKGATE_CON(53), 1, GFLAGS),
+ COMPOSITE_NODIV(PCLK_VI_ROOT, "pclk_vi_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(128), 10, 2, MFLAGS,
+ RK3576_CLKGATE_CON(53), 2, GFLAGS),
+ COMPOSITE(DCLK_VICAP, "dclk_vicap", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(129), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(53), 6, GFLAGS),
+ GATE(ACLK_VICAP, "aclk_vicap", "aclk_vi_root", 0,
+ RK3576_CLKGATE_CON(53), 7, GFLAGS),
+ GATE(HCLK_VICAP, "hclk_vicap", "hclk_vi_root", 0,
+ RK3576_CLKGATE_CON(53), 8, GFLAGS),
+ COMPOSITE(CLK_ISP_CORE, "clk_isp_core", gpll_spll_isppvtpll_bpll_lpll_p, 0,
+ RK3576_CLKSEL_CON(129), 11, 3, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(53), 9, GFLAGS),
+ GATE(CLK_ISP_CORE_MARVIN, "clk_isp_core_marvin", "clk_isp_core", 0,
+ RK3576_CLKGATE_CON(53), 10, GFLAGS),
+ GATE(CLK_ISP_CORE_VICAP, "clk_isp_core_vicap", "clk_isp_core", 0,
+ RK3576_CLKGATE_CON(53), 11, GFLAGS),
+ GATE(ACLK_ISP, "aclk_isp", "aclk_vi_root", 0,
+ RK3576_CLKGATE_CON(53), 12, GFLAGS),
+ GATE(HCLK_ISP, "hclk_isp", "hclk_vi_root", 0,
+ RK3576_CLKGATE_CON(53), 13, GFLAGS),
+ GATE(ACLK_VPSS, "aclk_vpss", "aclk_vi_root", 0,
+ RK3576_CLKGATE_CON(53), 15, GFLAGS),
+ GATE(HCLK_VPSS, "hclk_vpss", "hclk_vi_root", 0,
+ RK3576_CLKGATE_CON(54), 0, GFLAGS),
+ GATE(CLK_CORE_VPSS, "clk_core_vpss", "clk_isp_core", 0,
+ RK3576_CLKGATE_CON(54), 1, GFLAGS),
+ GATE(PCLK_CSI_HOST_0, "pclk_csi_host_0", "pclk_vi_root", 0,
+ RK3576_CLKGATE_CON(54), 4, GFLAGS),
+ GATE(PCLK_CSI_HOST_1, "pclk_csi_host_1", "pclk_vi_root", 0,
+ RK3576_CLKGATE_CON(54), 5, GFLAGS),
+ GATE(PCLK_CSI_HOST_2, "pclk_csi_host_2", "pclk_vi_root", 0,
+ RK3576_CLKGATE_CON(54), 6, GFLAGS),
+ GATE(PCLK_CSI_HOST_3, "pclk_csi_host_3", "pclk_vi_root", 0,
+ RK3576_CLKGATE_CON(54), 7, GFLAGS),
+ GATE(PCLK_CSI_HOST_4, "pclk_csi_host_4", "pclk_vi_root", 0,
+ RK3576_CLKGATE_CON(54), 8, GFLAGS),
+ COMPOSITE_NODIV(ICLK_CSIHOST01, "iclk_csihost01", mux_400m_200m_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(130), 7, 2, MFLAGS,
+ RK3576_CLKGATE_CON(54), 10, GFLAGS),
+ GATE(ICLK_CSIHOST0, "iclk_csihost0", "iclk_csihost01", 0,
+ RK3576_CLKGATE_CON(54), 11, GFLAGS),
+ COMPOSITE(ACLK_VOP_ROOT, "aclk_vop_root", gpll_cpll_aupll_spll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(144), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(61), 0, GFLAGS),
+ COMPOSITE_NODIV(HCLK_VOP_ROOT, "hclk_vop_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(144), 10, 2, MFLAGS,
+ RK3576_CLKGATE_CON(61), 2, GFLAGS),
+ COMPOSITE_NODIV(PCLK_VOP_ROOT, "pclk_vop_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(144), 12, 2, MFLAGS,
+ RK3576_CLKGATE_CON(61), 3, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vop_root", 0,
+ RK3576_CLKGATE_CON(61), 8, GFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vop_root", 0,
+ RK3576_CLKGATE_CON(61), 9, GFLAGS),
+ COMPOSITE(DCLK_VP0_SRC, "dclk_vp0_src", gpll_cpll_vpll_bpll_lpll_p, CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(145), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(61), 10, GFLAGS),
+ COMPOSITE(DCLK_VP1_SRC, "dclk_vp1_src", gpll_cpll_vpll_bpll_lpll_p, CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(146), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(61), 11, GFLAGS),
+ COMPOSITE(DCLK_VP2_SRC, "dclk_vp2_src", gpll_cpll_vpll_bpll_lpll_p, CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(147), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(61), 12, GFLAGS),
+ COMPOSITE_NODIV(DCLK_VP0, "dclk_vp0", dclk_vp0_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(147), 11, 1, MFLAGS,
+ RK3576_CLKGATE_CON(61), 13, GFLAGS),
+ COMPOSITE_NODIV(DCLK_VP1, "dclk_vp1", dclk_vp1_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(147), 12, 1, MFLAGS,
+ RK3576_CLKGATE_CON(62), 0, GFLAGS),
+ COMPOSITE_NODIV(DCLK_VP2, "dclk_vp2", dclk_vp2_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(147), 13, 1, MFLAGS,
+ RK3576_CLKGATE_CON(62), 1, GFLAGS),
+
+ /* vo0 */
+ COMPOSITE(ACLK_VO0_ROOT, "aclk_vo0_root", gpll_cpll_lpll_bpll_p, 0,
+ RK3576_CLKSEL_CON(149), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(63), 0, GFLAGS),
+ COMPOSITE_NODIV(HCLK_VO0_ROOT, "hclk_vo0_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(149), 7, 2, MFLAGS,
+ RK3576_CLKGATE_CON(63), 1, GFLAGS),
+ COMPOSITE_NODIV(PCLK_VO0_ROOT, "pclk_vo0_root", mux_150m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(149), 11, 2, MFLAGS,
+ RK3576_CLKGATE_CON(63), 3, GFLAGS),
+ GATE(ACLK_HDCP0, "aclk_hdcp0", "aclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(63), 12, GFLAGS),
+ GATE(HCLK_HDCP0, "hclk_hdcp0", "hclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(63), 13, GFLAGS),
+ GATE(PCLK_HDCP0, "pclk_hdcp0", "pclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(63), 14, GFLAGS),
+ GATE(CLK_TRNG0_SKP, "clk_trng0_skp", "aclk_hdcp0", 0,
+ RK3576_CLKGATE_CON(64), 4, GFLAGS),
+ GATE(PCLK_DSIHOST0, "pclk_dsihost0", "pclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(64), 5, GFLAGS),
+ COMPOSITE(CLK_DSIHOST0, "clk_dsihost0", gpll_cpll_spll_vpll_bpll_lpll_p, 0,
+ RK3576_CLKSEL_CON(151), 7, 3, MFLAGS, 0, 7, DFLAGS,
+ RK3576_CLKGATE_CON(64), 6, GFLAGS),
+ GATE(PCLK_HDMITX0, "pclk_hdmitx0", "pclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(64), 7, GFLAGS),
+ COMPOSITE(CLK_HDMITX0_EARC, "clk_hdmitx0_earc", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(151), 15, 1, MFLAGS, 10, 5, DFLAGS,
+ RK3576_CLKGATE_CON(64), 8, GFLAGS),
+ GATE(CLK_HDMITX0_REF, "clk_hdmitx0_ref", "aclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(64), 9, GFLAGS),
+ GATE(PCLK_EDP0, "pclk_edp0", "pclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(64), 13, GFLAGS),
+ GATE(CLK_EDP0_24M, "clk_edp0_24m", "xin24m", 0,
+ RK3576_CLKGATE_CON(64), 14, GFLAGS),
+ COMPOSITE_NODIV(CLK_EDP0_200M, "clk_edp0_200m", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(152), 1, 2, MFLAGS,
+ RK3576_CLKGATE_CON(64), 15, GFLAGS),
+ COMPOSITE(MCLK_SAI5_8CH_SRC, "mclk_sai5_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(154), 10, 3, MFLAGS, 2, 8, DFLAGS,
+ RK3576_CLKGATE_CON(65), 3, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI5_8CH, "mclk_sai5_8ch", mclk_sai5_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(154), 13, 1, MFLAGS,
+ RK3576_CLKGATE_CON(65), 4, GFLAGS),
+ GATE(HCLK_SAI5_8CH, "hclk_sai5_8ch", "hclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(65), 5, GFLAGS),
+ COMPOSITE(MCLK_SAI6_8CH_SRC, "mclk_sai6_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(155), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(65), 7, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI6_8CH, "mclk_sai6_8ch", mclk_sai6_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(155), 11, 1, MFLAGS,
+ RK3576_CLKGATE_CON(65), 8, GFLAGS),
+ GATE(HCLK_SAI6_8CH, "hclk_sai6_8ch", "hclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(65), 9, GFLAGS),
+ GATE(HCLK_SPDIF_TX2, "hclk_spdif_tx2", "hclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(65), 10, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_TX2, "mclk_spdif_tx2", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(156), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(65), 13, GFLAGS),
+ GATE(HCLK_SPDIF_RX2, "hclk_spdif_rx2", "hclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(65), 14, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_RX2, "mclk_spdif_rx2", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(156), 13, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3576_CLKGATE_CON(65), 15, GFLAGS),
+
+ /* vo1 */
+ COMPOSITE(ACLK_VO1_ROOT, "aclk_vo1_root", gpll_cpll_lpll_bpll_p, 0,
+ RK3576_CLKSEL_CON(158), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(67), 1, GFLAGS),
+ COMPOSITE_NODIV(HCLK_VO1_ROOT, "hclk_vo1_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(158), 7, 2, MFLAGS,
+ RK3576_CLKGATE_CON(67), 2, GFLAGS),
+ COMPOSITE_NODIV(PCLK_VO1_ROOT, "pclk_vo1_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(158), 9, 2, MFLAGS,
+ RK3576_CLKGATE_CON(67), 3, GFLAGS),
+ COMPOSITE(MCLK_SAI8_8CH_SRC, "mclk_sai8_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(157), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(66), 1, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI8_8CH, "mclk_sai8_8ch", mclk_sai8_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(157), 11, 1, MFLAGS,
+ RK3576_CLKGATE_CON(66), 2, GFLAGS),
+ GATE(HCLK_SAI8_8CH, "hclk_sai8_8ch", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(66), 0, GFLAGS),
+ COMPOSITE(MCLK_SAI7_8CH_SRC, "mclk_sai7_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(159), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(67), 8, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI7_8CH, "mclk_sai7_8ch", mclk_sai7_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(159), 11, 1, MFLAGS,
+ RK3576_CLKGATE_CON(67), 9, GFLAGS),
+ GATE(HCLK_SAI7_8CH, "hclk_sai7_8ch", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(67), 10, GFLAGS),
+ GATE(HCLK_SPDIF_TX3, "hclk_spdif_tx3", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(67), 11, GFLAGS),
+ GATE(HCLK_SPDIF_TX4, "hclk_spdif_tx4", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(67), 12, GFLAGS),
+ GATE(HCLK_SPDIF_TX5, "hclk_spdif_tx5", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(67), 13, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_TX3, "mclk_spdif_tx3", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(160), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(67), 14, GFLAGS),
+ COMPOSITE_NOMUX(CLK_AUX16MHZ_0, "clk_aux16mhz_0", "gpll", 0,
+ RK3576_CLKSEL_CON(161), 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(67), 15, GFLAGS),
+ GATE(ACLK_DP0, "aclk_dp0", "aclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(68), 0, GFLAGS),
+ GATE(PCLK_DP0, "pclk_dp0", "pclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(68), 1, GFLAGS),
+ GATE(ACLK_HDCP1, "aclk_hdcp1", "aclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(68), 4, GFLAGS),
+ GATE(HCLK_HDCP1, "hclk_hdcp1", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(68), 5, GFLAGS),
+ GATE(PCLK_HDCP1, "pclk_hdcp1", "pclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(68), 6, GFLAGS),
+ GATE(CLK_TRNG1_SKP, "clk_trng1_skp", "aclk_hdcp1", 0,
+ RK3576_CLKGATE_CON(68), 7, GFLAGS),
+ GATE(HCLK_SAI9_8CH, "hclk_sai9_8ch", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(68), 9, GFLAGS),
+ COMPOSITE(MCLK_SAI9_8CH_SRC, "mclk_sai9_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(162), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(68), 10, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI9_8CH, "mclk_sai9_8ch", mclk_sai9_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(162), 11, 1, MFLAGS,
+ RK3576_CLKGATE_CON(68), 11, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_TX4, "mclk_spdif_tx4", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(163), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(68), 12, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_TX5, "mclk_spdif_tx5", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(164), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(68), 13, GFLAGS),
+
+ /* vpu */
+ COMPOSITE(ACLK_VPU_ROOT, "aclk_vpu_root", gpll_spll_cpll_bpll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(118), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(49), 0, GFLAGS),
+ COMPOSITE_NODIV(ACLK_VPU_MID_ROOT, "aclk_vpu_mid_root", mux_600m_400m_300m_24m_p, 0,
+ RK3576_CLKSEL_CON(118), 8, 2, MFLAGS,
+ RK3576_CLKGATE_CON(49), 1, GFLAGS),
+ COMPOSITE_NODIV(HCLK_VPU_ROOT, "hclk_vpu_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(118), 10, 2, MFLAGS,
+ RK3576_CLKGATE_CON(49), 2, GFLAGS),
+ COMPOSITE(ACLK_JPEG_ROOT, "aclk_jpeg_root", gpll_cpll_aupll_spll_p, 0,
+ RK3576_CLKSEL_CON(119), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(49), 3, GFLAGS),
+ COMPOSITE_NODIV(ACLK_VPU_LOW_ROOT, "aclk_vpu_low_root", mux_400m_200m_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(119), 7, 2, MFLAGS,
+ RK3576_CLKGATE_CON(49), 4, GFLAGS),
+ GATE(HCLK_RGA2E_0, "hclk_rga2e_0", "hclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(49), 13, GFLAGS),
+ GATE(ACLK_RGA2E_0, "aclk_rga2e_0", "aclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(49), 14, GFLAGS),
+ COMPOSITE(CLK_CORE_RGA2E_0, "clk_core_rga2e_0", gpll_spll_cpll_bpll_lpll_p, 0,
+ RK3576_CLKSEL_CON(120), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(49), 15, GFLAGS),
+ GATE(ACLK_JPEG, "aclk_jpeg", "aclk_jpeg_root", 0,
+ RK3576_CLKGATE_CON(50), 0, GFLAGS),
+ GATE(HCLK_JPEG, "hclk_jpeg", "hclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(50), 1, GFLAGS),
+ GATE(HCLK_VDPP, "hclk_vdpp", "hclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(50), 2, GFLAGS),
+ GATE(ACLK_VDPP, "aclk_vdpp", "aclk_vpu_mid_root", 0,
+ RK3576_CLKGATE_CON(50), 3, GFLAGS),
+ COMPOSITE(CLK_CORE_VDPP, "clk_core_vdpp", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(120), 13, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3576_CLKGATE_CON(50), 4, GFLAGS),
+ GATE(HCLK_RGA2E_1, "hclk_rga2e_1", "hclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(50), 5, GFLAGS),
+ GATE(ACLK_RGA2E_1, "aclk_rga2e_1", "aclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(50), 6, GFLAGS),
+ COMPOSITE(CLK_CORE_RGA2E_1, "clk_core_rga2e_1", gpll_spll_cpll_bpll_lpll_p, 0,
+ RK3576_CLKSEL_CON(121), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(50), 7, GFLAGS),
+ MUX(0, "dclk_ebc_frac_src_p", gpll_cpll_vpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(123), 0, 3, MFLAGS),
+ COMPOSITE_FRAC(DCLK_EBC_FRAC_SRC, "dclk_ebc_frac_src", "dclk_ebc_frac_src_p", 0,
+ RK3576_CLKSEL_CON(122), 0,
+ RK3576_CLKGATE_CON(50), 9, GFLAGS),
+ GATE(ACLK_EBC, "aclk_ebc", "aclk_vpu_low_root", 0,
+ RK3576_CLKGATE_CON(50), 11, GFLAGS),
+ GATE(HCLK_EBC, "hclk_ebc", "hclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(50), 10, GFLAGS),
+ COMPOSITE(DCLK_EBC, "dclk_ebc", dclk_ebc_p, CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(123), 12, 3, MFLAGS, 3, 9, DFLAGS,
+ RK3576_CLKGATE_CON(50), 12, GFLAGS),
+
+ /* vepu */
+ COMPOSITE_NODIV(HCLK_VEPU1_ROOT, "hclk_vepu1_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(178), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(78), 0, GFLAGS),
+ COMPOSITE(ACLK_VEPU1_ROOT, "aclk_vepu1_root", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(180), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(79), 0, GFLAGS),
+ GATE(HCLK_VEPU1, "hclk_vepu1", "hclk_vepu1_root", 0,
+ RK3576_CLKGATE_CON(79), 3, GFLAGS),
+ GATE(ACLK_VEPU1, "aclk_vepu1", "aclk_vepu1_root", 0,
+ RK3576_CLKGATE_CON(79), 4, GFLAGS),
+ COMPOSITE(CLK_VEPU1_CORE, "clk_vepu1_core", gpll_cpll_spll_lpll_bpll_p, 0,
+ RK3576_CLKSEL_CON(180), 11, 3, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(79), 5, GFLAGS),
+
+ /* php */
+ COMPOSITE_NODIV(PCLK_PHP_ROOT, "pclk_php_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(92), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(34), 0, GFLAGS),
+ COMPOSITE(ACLK_PHP_ROOT, "aclk_php_root", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(92), 9, 1, MFLAGS, 4, 5, DFLAGS,
+ RK3576_CLKGATE_CON(34), 7, GFLAGS),
+ GATE(PCLK_PCIE0, "pclk_pcie0", "pclk_php_root", 0,
+ RK3576_CLKGATE_CON(34), 13, GFLAGS),
+ GATE(CLK_PCIE0_AUX, "clk_pcie0_aux", "xin24m", 0,
+ RK3576_CLKGATE_CON(34), 14, GFLAGS),
+ GATE(ACLK_PCIE0_MST, "aclk_pcie0_mst", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(34), 15, GFLAGS),
+ GATE(ACLK_PCIE0_SLV, "aclk_pcie0_slv", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(35), 0, GFLAGS),
+ GATE(ACLK_PCIE0_DBI, "aclk_pcie0_dbi", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(35), 1, GFLAGS),
+ GATE(ACLK_USB3OTG1, "aclk_usb3otg1", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(35), 3, GFLAGS),
+ GATE(CLK_REF_USB3OTG1, "clk_ref_usb3otg1", "xin24m", 0,
+ RK3576_CLKGATE_CON(35), 4, GFLAGS),
+ GATE(CLK_SUSPEND_USB3OTG1, "clk_suspend_usb3otg1", "xin24m", 0,
+ RK3576_CLKGATE_CON(35), 5, GFLAGS),
+ GATE(ACLK_MMU0, "aclk_mmu0", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(35), 11, GFLAGS),
+ GATE(ACLK_SLV_MMU0, "aclk_slv_mmu0", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(35), 13, GFLAGS),
+ GATE(ACLK_MMU1, "aclk_mmu1", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(35), 14, GFLAGS),
+ GATE(ACLK_SLV_MMU1, "aclk_slv_mmu1", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(36), 0, GFLAGS),
+ GATE(PCLK_PCIE1, "pclk_pcie1", "pclk_php_root", 0,
+ RK3576_CLKGATE_CON(36), 7, GFLAGS),
+ GATE(CLK_PCIE1_AUX, "clk_pcie1_aux", "xin24m", 0,
+ RK3576_CLKGATE_CON(36), 8, GFLAGS),
+ GATE(ACLK_PCIE1_MST, "aclk_pcie1_mst", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(36), 9, GFLAGS),
+ GATE(ACLK_PCIE1_SLV, "aclk_pcie1_slv", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(36), 10, GFLAGS),
+ GATE(ACLK_PCIE1_DBI, "aclk_pcie1_dbi", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(36), 11, GFLAGS),
+ COMPOSITE(CLK_RXOOB0, "clk_rxoob0", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(93), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3576_CLKGATE_CON(37), 0, GFLAGS),
+ COMPOSITE(CLK_RXOOB1, "clk_rxoob1", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(93), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3576_CLKGATE_CON(37), 1, GFLAGS),
+ GATE(CLK_PMALIVE0, "clk_pmalive0", "xin24m", CLK_IS_CRITICAL,
+ RK3576_CLKGATE_CON(37), 2, GFLAGS),
+ GATE(CLK_PMALIVE1, "clk_pmalive1", "xin24m", CLK_IS_CRITICAL,
+ RK3576_CLKGATE_CON(37), 3, GFLAGS),
+ GATE(ACLK_SATA0, "aclk_sata0", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(37), 4, GFLAGS),
+ GATE(ACLK_SATA1, "aclk_sata1", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(37), 5, GFLAGS),
+
+ /* audio */
+ COMPOSITE_NODIV(HCLK_AUDIO_ROOT, "hclk_audio_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(42), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(7), 1, GFLAGS),
+ GATE(HCLK_ASRC_2CH_0, "hclk_asrc_2ch_0", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(HCLK_ASRC_2CH_1, "hclk_asrc_2ch_1", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(HCLK_ASRC_4CH_0, "hclk_asrc_4ch_0", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(7), 5, GFLAGS),
+ GATE(HCLK_ASRC_4CH_1, "hclk_asrc_4ch_1", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(7), 6, GFLAGS),
+ COMPOSITE(CLK_ASRC_2CH_0, "clk_asrc_2ch_0", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(42), 7, 2, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(7), 7, GFLAGS),
+ COMPOSITE(CLK_ASRC_2CH_1, "clk_asrc_2ch_1", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(42), 14, 2, MFLAGS, 9, 5, DFLAGS,
+ RK3576_CLKGATE_CON(7), 8, GFLAGS),
+ COMPOSITE(CLK_ASRC_4CH_0, "clk_asrc_4ch_0", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(43), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(7), 9, GFLAGS),
+ COMPOSITE(CLK_ASRC_4CH_1, "clk_asrc_4ch_1", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(43), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(7), 10, GFLAGS),
+ COMPOSITE(MCLK_SAI0_8CH_SRC, "mclk_sai0_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(44), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(7), 11, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI0_8CH, "mclk_sai0_8ch", mclk_sai0_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(44), 11, 2, MFLAGS,
+ RK3576_CLKGATE_CON(7), 12, GFLAGS),
+ GATE(HCLK_SAI0_8CH, "hclk_sai0_8ch", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(7), 13, GFLAGS),
+ GATE(HCLK_SPDIF_RX0, "hclk_spdif_rx0", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(7), 14, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_RX0, "mclk_spdif_rx0", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(45), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(7), 15, GFLAGS),
+ GATE(HCLK_SPDIF_RX1, "hclk_spdif_rx1", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(8), 0, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_RX1, "mclk_spdif_rx1", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(45), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(8), 1, GFLAGS),
+ COMPOSITE(MCLK_SAI1_8CH_SRC, "mclk_sai1_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(46), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(8), 4, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI1_8CH, "mclk_sai1_8ch", mclk_sai1_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(46), 11, 1, MFLAGS,
+ RK3576_CLKGATE_CON(8), 5, GFLAGS),
+ GATE(HCLK_SAI1_8CH, "hclk_sai1_8ch", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(8), 6, GFLAGS),
+ COMPOSITE(MCLK_SAI2_2CH_SRC, "mclk_sai2_2ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(47), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(8), 7, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI2_2CH, "mclk_sai2_2ch", mclk_sai2_2ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(47), 11, 2, MFLAGS,
+ RK3576_CLKGATE_CON(8), 8, GFLAGS),
+ GATE(HCLK_SAI2_2CH, "hclk_sai2_2ch", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(8), 10, GFLAGS),
+ COMPOSITE(MCLK_SAI3_2CH_SRC, "mclk_sai3_2ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(48), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(8), 11, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI3_2CH, "mclk_sai3_2ch", mclk_sai3_2ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(48), 11, 2, MFLAGS,
+ RK3576_CLKGATE_CON(8), 12, GFLAGS),
+ GATE(HCLK_SAI3_2CH, "hclk_sai3_2ch", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(8), 14, GFLAGS),
+ COMPOSITE(MCLK_SAI4_2CH_SRC, "mclk_sai4_2ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(49), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(8), 15, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI4_2CH, "mclk_sai4_2ch", mclk_sai4_2ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(49), 11, 2, MFLAGS,
+ RK3576_CLKGATE_CON(9), 0, GFLAGS),
+ GATE(HCLK_SAI4_2CH, "hclk_sai4_2ch", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(HCLK_ACDCDIG_DSM, "hclk_acdcdig_dsm", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(9), 3, GFLAGS),
+ GATE(MCLK_ACDCDIG_DSM, "mclk_acdcdig_dsm", "mclk_sai4_2ch", 0,
+ RK3576_CLKGATE_CON(9), 4, GFLAGS),
+ COMPOSITE(CLK_PDM1, "clk_pdm1", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(50), 9, 3, MFLAGS, 0, 9, DFLAGS,
+ RK3576_CLKGATE_CON(9), 5, GFLAGS),
+ GATE(HCLK_PDM1, "hclk_pdm1", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(9), 7, GFLAGS),
+ GATE(CLK_PDM1_OUT, "clk_pdm1_out", "clk_pdm1", 0,
+ RK3576_CLKGATE_CON(3), 5, GFLAGS),
+ COMPOSITE(MCLK_PDM1, "mclk_pdm1", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(51), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(9), 8, GFLAGS),
+ GATE(HCLK_SPDIF_TX0, "hclk_spdif_tx0", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(9), 9, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_TX0, "mclk_spdif_tx0", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(52), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(9), 10, GFLAGS),
+ GATE(HCLK_SPDIF_TX1, "hclk_spdif_tx1", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(9), 11, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_TX1, "mclk_spdif_tx1", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(53), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(9), 12, GFLAGS),
+ GATE(CLK_SAI1_MCLKOUT, "clk_sai1_mclkout", "mclk_sai1_8ch", 0,
+ RK3576_CLKGATE_CON(9), 13, GFLAGS),
+ GATE(CLK_SAI2_MCLKOUT, "clk_sai2_mclkout", "mclk_sai2_2ch", 0,
+ RK3576_CLKGATE_CON(9), 14, GFLAGS),
+ GATE(CLK_SAI3_MCLKOUT, "clk_sai3_mclkout", "mclk_sai3_2ch", 0,
+ RK3576_CLKGATE_CON(9), 15, GFLAGS),
+ GATE(CLK_SAI4_MCLKOUT, "clk_sai4_mclkout", "mclk_sai4_2ch", 0,
+ RK3576_CLKGATE_CON(10), 0, GFLAGS),
+ GATE(CLK_SAI0_MCLKOUT, "clk_sai0_mclkout", "mclk_sai0_8ch", 0,
+ RK3576_CLKGATE_CON(10), 1, GFLAGS),
+
+ /* sdgmac */
+ COMPOSITE_NODIV(HCLK_SDGMAC_ROOT, "hclk_sdgmac_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(103), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(42), 0, GFLAGS),
+ COMPOSITE(ACLK_SDGMAC_ROOT, "aclk_sdgmac_root", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(103), 7, 1, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(42), 1, GFLAGS),
+ COMPOSITE_NODIV(PCLK_SDGMAC_ROOT, "pclk_sdgmac_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(103), 8, 2, MFLAGS,
+ RK3576_CLKGATE_CON(42), 2, GFLAGS),
+ GATE(ACLK_GMAC0, "aclk_gmac0", "aclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(42), 7, GFLAGS),
+ GATE(ACLK_GMAC1, "aclk_gmac1", "aclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(42), 8, GFLAGS),
+ GATE(PCLK_GMAC0, "pclk_gmac0", "pclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(42), 9, GFLAGS),
+ GATE(PCLK_GMAC1, "pclk_gmac1", "pclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(42), 10, GFLAGS),
+ COMPOSITE(CCLK_SRC_SDIO, "cclk_src_sdio", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(104), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3576_CLKGATE_CON(42), 11, GFLAGS),
+ GATE(HCLK_SDIO, "hclk_sdio", "hclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(42), 12, GFLAGS),
+ COMPOSITE(CLK_GMAC1_PTP_REF_SRC, "clk_gmac1_ptp_ref_src", clk_gmac1_ptp_ref_src_p, 0,
+ RK3576_CLKSEL_CON(104), 13, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3576_CLKGATE_CON(42), 15, GFLAGS),
+ COMPOSITE(CLK_GMAC0_PTP_REF_SRC, "clk_gmac0_ptp_ref_src", clk_gmac0_ptp_ref_src_p, 0,
+ RK3576_CLKSEL_CON(105), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(43), 0, GFLAGS),
+ GATE(CLK_GMAC1_PTP_REF, "clk_gmac1_ptp_ref", "clk_gmac1_ptp_ref_src", 0,
+ RK3576_CLKGATE_CON(42), 13, GFLAGS),
+ GATE(CLK_GMAC0_PTP_REF, "clk_gmac0_ptp_ref", "clk_gmac0_ptp_ref_src", 0,
+ RK3576_CLKGATE_CON(42), 14, GFLAGS),
+ COMPOSITE(CCLK_SRC_SDMMC0, "cclk_src_sdmmc0", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(105), 13, 2, MFLAGS, 7, 6, DFLAGS,
+ RK3576_CLKGATE_CON(43), 1, GFLAGS),
+ GATE(HCLK_SDMMC0, "hclk_sdmmc0", "hclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(43), 2, GFLAGS),
+ COMPOSITE(SCLK_FSPI1_X2, "sclk_fspi1_x2", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(106), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3576_CLKGATE_CON(43), 3, GFLAGS),
+ GATE(HCLK_FSPI1, "hclk_fspi1", "hclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(43), 4, GFLAGS),
+ COMPOSITE(ACLK_DSMC_ROOT, "aclk_dsmc_root", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(106), 13, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3576_CLKGATE_CON(43), 5, GFLAGS),
+ GATE(ACLK_DSMC, "aclk_dsmc", "aclk_dsmc_root", 0,
+ RK3576_CLKGATE_CON(43), 7, GFLAGS),
+ GATE(PCLK_DSMC, "pclk_dsmc", "pclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(43), 8, GFLAGS),
+ COMPOSITE(CLK_DSMC_SYS, "clk_dsmc_sys", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(107), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(43), 9, GFLAGS),
+ GATE(HCLK_HSGPIO, "hclk_hsgpio", "hclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(43), 10, GFLAGS),
+ COMPOSITE(CLK_HSGPIO_TX, "clk_hsgpio_tx", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(107), 11, 2, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(43), 11, GFLAGS),
+ COMPOSITE(CLK_HSGPIO_RX, "clk_hsgpio_rx", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(108), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(43), 12, GFLAGS),
+ GATE(ACLK_HSGPIO, "aclk_hsgpio", "aclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(43), 13, GFLAGS),
+
+ /* phpphy */
+ GATE(PCLK_PHPPHY_ROOT, "pclk_phpphy_root", "pclk_bus_root", CLK_IS_CRITICAL,
+ RK3576_PHP_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_PCIE2_COMBOPHY0, "pclk_pcie2_combophy0", "pclk_phpphy_root", 0,
+ RK3576_PHP_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(PCLK_PCIE2_COMBOPHY1, "pclk_pcie2_combophy1", "pclk_phpphy_root", 0,
+ RK3576_PHP_CLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PCIE_100M_SRC, "clk_pcie_100m_src", "ppll", 0,
+ RK3576_PHP_CLKSEL_CON(0), 2, 5, DFLAGS,
+ RK3576_PHP_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PCIE_100M_NDUTY_SRC, "clk_pcie_100m_nduty_src", "ppll", 0,
+ RK3576_PHP_CLKSEL_CON(0), 7, 5, DFLAGS,
+ RK3576_PHP_CLKGATE_CON(1), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_REF_PCIE0_PHY, "clk_ref_pcie0_phy", clk_ref_pcie0_phy_p, 0,
+ RK3576_PHP_CLKSEL_CON(0), 12, 2, MFLAGS,
+ RK3576_PHP_CLKGATE_CON(1), 5, GFLAGS),
+ COMPOSITE_NODIV(CLK_REF_PCIE1_PHY, "clk_ref_pcie1_phy", clk_ref_pcie0_phy_p, 0,
+ RK3576_PHP_CLKSEL_CON(0), 14, 2, MFLAGS,
+ RK3576_PHP_CLKGATE_CON(1), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_REF_MPHY_26M, "clk_ref_mphy_26m", "ppll", CLK_IS_CRITICAL,
+ RK3576_PHP_CLKSEL_CON(1), 0, 8, DFLAGS,
+ RK3576_PHP_CLKGATE_CON(1), 9, GFLAGS),
+
+ /* pmu */
+ GATE(CLK_200M_PMU_SRC, "clk_200m_pmu_src", "clk_gpll_div6", 0,
+ RK3576_PMU_CLKGATE_CON(3), 2, GFLAGS),
+ COMPOSITE_NOMUX(CLK_100M_PMU_SRC, "clk_100m_pmu_src", "cpll", 0,
+ RK3576_PMU_CLKSEL_CON(4), 4, 5, DFLAGS,
+ RK3576_PMU_CLKGATE_CON(3), 3, GFLAGS),
+ FACTOR_GATE(CLK_50M_PMU_SRC, "clk_50m_pmu_src", "clk_100m_pmu_src", 0, 1, 2,
+ RK3576_PMU_CLKGATE_CON(3), 4, GFLAGS),
+ COMPOSITE_NODIV(HCLK_PMU1_ROOT, "hclk_pmu1_root", mux_pmu200m_pmu100m_pmu50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_PMU_CLKSEL_CON(4), 0, 2, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(3), 0, GFLAGS),
+ COMPOSITE_NODIV(HCLK_PMU_CM0_ROOT, "hclk_pmu_cm0_root", mux_pmu200m_pmu100m_pmu50m_24m_p, 0,
+ RK3576_PMU_CLKSEL_CON(4), 2, 2, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(3), 1, GFLAGS),
+ COMPOSITE_NODIV(PCLK_PMU0_ROOT, "pclk_pmu0_root", mux_pmu100m_pmu50m_24m_p, 0,
+ RK3576_PMU_CLKSEL_CON(20), 0, 2, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(7), 0, GFLAGS),
+ GATE(PCLK_PMU0, "pclk_pmu0", "pclk_pmu0_root", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(PCLK_PMU1_ROOT, "pclk_pmu1_root", "pclk_pmu0_root", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKGATE_CON(7), 9, GFLAGS),
+ GATE(PCLK_PMU1, "pclk_pmu1", "pclk_pmu1_root", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKGATE_CON(3), 15, GFLAGS),
+ GATE(CLK_PMU1, "clk_pmu1", "xin24m", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKGATE_CON(4), 2, GFLAGS),
+ GATE(PCLK_PMUPHY_ROOT, "pclk_pmuphy_root", "pclk_pmu1_root", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKGATE_CON(5), 0, GFLAGS),
+ GATE(PCLK_HDPTX_APB, "pclk_hdptx_apb", "pclk_pmuphy_root", 0,
+ RK3576_PMU_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(PCLK_MIPI_DCPHY, "pclk_mipi_dcphy", "pclk_pmuphy_root", 0,
+ RK3576_PMU_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_CSIDPHY, "pclk_csidphy", "pclk_pmuphy_root", 0,
+ RK3576_PMU_CLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_USBDPPHY, "pclk_usbdpphy", "pclk_pmuphy_root", 0,
+ RK3576_PMU_CLKGATE_CON(0), 12, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PMUPHY_REF_SRC, "clk_pmuphy_ref_src", "cpll", 0,
+ RK3576_PMU_CLKSEL_CON(0), 0, 5, DFLAGS,
+ RK3576_PMU_CLKGATE_CON(0), 13, GFLAGS),
+ GATE(CLK_USBDP_COMBO_PHY_IMMORTAL, "clk_usbdp_combo_phy_immortal", "xin24m", 0,
+ RK3576_PMU_CLKGATE_CON(0), 15, GFLAGS),
+ GATE(CLK_HDMITXHDP, "clk_hdmitxhdp", "xin24m", 0,
+ RK3576_PMU_CLKGATE_CON(1), 13, GFLAGS),
+ GATE(PCLK_MPHY, "pclk_mphy", "pclk_pmuphy_root", 0,
+ RK3576_PMU_CLKGATE_CON(2), 0, GFLAGS),
+ MUX(CLK_REF_OSC_MPHY, "clk_ref_osc_mphy", clk_ref_osc_mphy_p, 0,
+ RK3576_PMU_CLKSEL_CON(3), 0, 2, MFLAGS),
+ GATE(CLK_REF_UFS_CLKOUT, "clk_ref_ufs_clkout", "clk_ref_osc_mphy", 0,
+ RK3576_PMU_CLKGATE_CON(2), 5, GFLAGS),
+ GATE(FCLK_PMU_CM0_CORE, "fclk_pmu_cm0_core", "hclk_pmu_cm0_root", 0,
+ RK3576_PMU_CLKGATE_CON(3), 12, GFLAGS),
+ COMPOSITE(CLK_PMU_CM0_RTC, "clk_pmu_cm0_rtc", mux_24m_32k_p, 0,
+ RK3576_PMU_CLKSEL_CON(4), 14, 1, MFLAGS, 9, 5, DFLAGS,
+ RK3576_PMU_CLKGATE_CON(3), 14, GFLAGS),
+ GATE(PCLK_PMU1WDT, "pclk_pmu1wdt", "pclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(4), 5, GFLAGS),
+ COMPOSITE_NODIV(TCLK_PMU1WDT, "tclk_pmu1wdt", mux_24m_32k_p, 0,
+ RK3576_PMU_CLKSEL_CON(4), 15, 1, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(4), 6, GFLAGS),
+ GATE(PCLK_PMUTIMER, "pclk_pmutimer", "pclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(4), 7, GFLAGS),
+ COMPOSITE_NODIV(CLK_PMUTIMER_ROOT, "clk_pmutimer_root", mux_pmu100m_24m_32k_p, 0,
+ RK3576_PMU_CLKSEL_CON(5), 0, 2, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(4), 8, GFLAGS),
+ GATE(CLK_PMUTIMER0, "clk_pmutimer0", "clk_pmutimer_root", 0,
+ RK3576_PMU_CLKGATE_CON(4), 9, GFLAGS),
+ GATE(CLK_PMUTIMER1, "clk_pmutimer1", "clk_pmutimer_root", 0,
+ RK3576_PMU_CLKGATE_CON(4), 10, GFLAGS),
+ GATE(PCLK_PMU1PWM, "pclk_pmu1pwm", "pclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(4), 11, GFLAGS),
+ COMPOSITE_NODIV(CLK_PMU1PWM, "clk_pmu1pwm", mux_pmu100m_pmu50m_24m_p, 0,
+ RK3576_PMU_CLKSEL_CON(5), 2, 2, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(4), 12, GFLAGS),
+ GATE(CLK_PMU1PWM_OSC, "clk_pmu1pwm_osc", "xin24m", 0,
+ RK3576_PMU_CLKGATE_CON(4), 13, GFLAGS),
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(5), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C0, "clk_i2c0", mux_pmu200m_pmu100m_pmu50m_24m_p, 0,
+ RK3576_PMU_CLKSEL_CON(6), 7, 2, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(5), 2, GFLAGS),
+ COMPOSITE_NODIV(SCLK_UART1, "sclk_uart1", uart1_p, 0,
+ RK3576_PMU_CLKSEL_CON(8), 0, 1, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(5), 5, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(5), 6, GFLAGS),
+ GATE(CLK_PDM0, "clk_pdm0", "clk_pdm0_src_top", 0,
+ RK3576_PMU_CLKGATE_CON(5), 13, GFLAGS),
+ GATE(HCLK_PDM0, "hclk_pdm0", "hclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(5), 15, GFLAGS),
+ GATE(MCLK_PDM0, "mclk_pdm0", "mclk_pdm0_src_top", 0,
+ RK3576_PMU_CLKGATE_CON(6), 0, GFLAGS),
+ GATE(HCLK_VAD, "hclk_vad", "hclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(6), 1, GFLAGS),
+ GATE(CLK_PDM0_OUT, "clk_pdm0_out", "clk_pdm0", 0,
+ RK3576_PMU_CLKGATE_CON(6), 8, GFLAGS),
+ COMPOSITE(CLK_HPTIMER_SRC, "clk_hptimer_src", cpll_24m_p, CLK_IS_CRITICAL,
+ RK3576_PMU_CLKSEL_CON(11), 6, 1, MFLAGS, 1, 5, DFLAGS,
+ RK3576_PMU_CLKGATE_CON(6), 10, GFLAGS),
+ GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pmu0_root", 0,
+ RK3576_PMU_CLKGATE_CON(7), 6, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO0, "dbclk_gpio0", mux_24m_32k_p, 0,
+ RK3576_PMU_CLKSEL_CON(20), 2, 1, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(7), 7, GFLAGS),
+ GATE(CLK_OSC0_PMU1, "clk_osc0_pmu1", "xin24m", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKGATE_CON(7), 8, GFLAGS),
+ GATE(CLK_PMU1PWM_RC, "clk_pmu1pwm_rc", "clk_pvtm_clkout", 0,
+ RK3576_PMU_CLKGATE_CON(5), 7, GFLAGS),
+
+ /* phy ref */
+ MUXGRF(CLK_PHY_REF_SRC, "clk_phy_ref_src", clk_phy_ref_src_p, 0,
+ RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS),
+ MUXGRF(CLK_USBPHY_REF_SRC, "clk_usbphy_ref_src", clk_usbphy_ref_src_p, 0,
+ RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS),
+ MUXGRF(CLK_CPLL_REF_SRC, "clk_cpll_ref_src", clk_cpll_ref_src_p, 0,
+ RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS),
+ MUXGRF(CLK_AUPLL_REF_SRC, "clk_aupll_ref_src", clk_aupll_ref_src_p, 0,
+ RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS),
+
+ /* secure ns */
+ COMPOSITE_NODIV(ACLK_SECURE_NS, "aclk_secure_ns", mux_350m_175m_116m_24m_p, CLK_IS_CRITICAL,
+ RK3576_SECURE_NS_CLKSEL_CON(0), 0, 2, MFLAGS,
+ RK3576_SECURE_NS_CLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE_NODIV(HCLK_SECURE_NS, "hclk_secure_ns", mux_175m_116m_58m_24m_p, CLK_IS_CRITICAL,
+ RK3576_SECURE_NS_CLKSEL_CON(0), 2, 2, MFLAGS,
+ RK3576_SECURE_NS_CLKGATE_CON(0), 1, GFLAGS),
+ COMPOSITE_NODIV(PCLK_SECURE_NS, "pclk_secure_ns", mux_116m_58m_24m_p, CLK_IS_CRITICAL,
+ RK3576_SECURE_NS_CLKSEL_CON(0), 4, 2, MFLAGS,
+ RK3576_SECURE_NS_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(HCLK_CRYPTO_NS, "hclk_crypto_ns", "hclk_secure_ns", 0,
+ RK3576_SECURE_NS_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(PCLK_OTPC_NS, "pclk_otpc_ns", "pclk_secure_ns", 0,
+ RK3576_SECURE_NS_CLKGATE_CON(0), 8, GFLAGS),
+ GATE(CLK_OTPC_NS, "clk_otpc_ns", "xin24m", 0,
+ RK3576_SECURE_NS_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(ACLK_CRYPTO_NS, "aclk_crypto_ns", "aclk_secure_s", 0,
+ RK3576_NON_SECURE_GATING_CON00, 14, GFLAGS),
+ GATE(HCLK_TRNG_NS, "hclk_trng_ns", "hclk_secure_s", 0,
+ RK3576_NON_SECURE_GATING_CON00, 13, GFLAGS),
+ GATE(CLK_PKA_CRYPTO_NS, "clk_pka_crypto_ns", "clk_pka_crypto_s", 0,
+ RK3576_NON_SECURE_GATING_CON00, 1, GFLAGS),
+
+ /* io */
+ GATE(CLK_VICAP_I0CLK, "clk_vicap_i0clk", "clk_csihost0_clkdata_i", 0,
+ RK3576_CLKGATE_CON(59), 1, GFLAGS),
+ GATE(CLK_VICAP_I1CLK, "clk_vicap_i1clk", "clk_csihost1_clkdata_i", 0,
+ RK3576_CLKGATE_CON(59), 2, GFLAGS),
+ GATE(CLK_VICAP_I2CLK, "clk_vicap_i2clk", "clk_csihost2_clkdata_i", 0,
+ RK3576_CLKGATE_CON(59), 3, GFLAGS),
+ GATE(CLK_VICAP_I3CLK, "clk_vicap_i3clk", "clk_csihost3_clkdata_i", 0,
+ RK3576_CLKGATE_CON(59), 4, GFLAGS),
+ GATE(CLK_VICAP_I4CLK, "clk_vicap_i4clk", "clk_csihost4_clkdata_i", 0,
+ RK3576_CLKGATE_CON(59), 5, GFLAGS),
+};
+
+static void __init rk3576_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
+ void __iomem *reg_base;
+ struct regmap *grf;
+
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3576_clk_branches,
+ ARRAY_SIZE(rk3576_clk_branches)) + 1;
+
+ grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf");
+ if (IS_ERR(grf)) {
+ pr_err("%s: could not get PMU0 GRF syscon\n", __func__);
+ return;
+ }
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ ctx->grf = grf;
+
+ rockchip_clk_register_plls(ctx, rk3576_pll_clks,
+ ARRAY_SIZE(rk3576_pll_clks),
+ RK3576_GRF_SOC_STATUS0);
+
+ rockchip_clk_register_armclk(ctx, ARMCLK_L, "armclk_l",
+ mux_armclkl_p, ARRAY_SIZE(mux_armclkl_p),
+ &rk3576_cpulclk_data, rk3576_cpulclk_rates,
+ ARRAY_SIZE(rk3576_cpulclk_rates));
+ rockchip_clk_register_armclk(ctx, ARMCLK_B, "armclk_b",
+ mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p),
+ &rk3576_cpubclk_data, rk3576_cpubclk_rates,
+ ARRAY_SIZE(rk3576_cpubclk_rates));
+
+ rockchip_clk_register_branches(ctx, rk3576_clk_branches,
+ ARRAY_SIZE(rk3576_clk_branches));
+
+ rk3576_rst_init(np, reg_base);
+
+ rockchip_register_restart_notifier(ctx, RK3576_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+
+CLK_OF_DECLARE(rk3576_cru, "rockchip,rk3576-cru", rk3576_clk_init);
+
+struct clk_rk3576_inits {
+ void (*inits)(struct device_node *np);
+};
+
+static const struct clk_rk3576_inits clk_rk3576_cru_init = {
+ .inits = rk3576_clk_init,
+};
+
+static const struct of_device_id clk_rk3576_match_table[] = {
+ {
+ .compatible = "rockchip,rk3576-cru",
+ .data = &clk_rk3576_cru_init,
+ },
+ { }
+};
+
+static int clk_rk3576_probe(struct platform_device *pdev)
+{
+ const struct clk_rk3576_inits *init_data;
+ struct device *dev = &pdev->dev;
+
+ init_data = device_get_match_data(dev);
+ if (!init_data)
+ return -EINVAL;
+
+ if (init_data->inits)
+ init_data->inits(dev->of_node);
+
+ return 0;
+}
+
+static struct platform_driver clk_rk3576_driver = {
+ .probe = clk_rk3576_probe,
+ .driver = {
+ .name = "clk-rk3576",
+ .of_match_table = clk_rk3576_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_rk3576_driver, clk_rk3576_probe);
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index b30279a96dc8..0ffaf639f807 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -526,7 +526,7 @@ PNAME(pmu_200m_100m_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src" };
PNAME(pmu_300m_24m_p) = { "clk_300m_src", "xin24m" };
PNAME(pmu_400m_24m_p) = { "clk_400m_src", "xin24m" };
PNAME(pmu_100m_50m_24m_src_p) = { "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" };
-PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "32k", "clk_pmu1_100m_src" };
+PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "xin32k", "clk_pmu1_100m_src" };
PNAME(hclk_pmu1_root_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" };
PNAME(hclk_pmu_cm0_root_p) = { "clk_pmu1_400m_src", "clk_pmu1_200m_src", "clk_pmu1_100m_src", "xin24m" };
PNAME(mclk_pdm0_p) = { "clk_pmu1_300m_src", "clk_pmu1_200m_src" };
@@ -2502,43 +2502,3 @@ static void __init rk3588_clk_init(struct device_node *np)
}
CLK_OF_DECLARE(rk3588_cru, "rockchip,rk3588-cru", rk3588_clk_init);
-
-struct clk_rk3588_inits {
- void (*inits)(struct device_node *np);
-};
-
-static const struct clk_rk3588_inits clk_3588_cru_init = {
- .inits = rk3588_clk_init,
-};
-
-static const struct of_device_id clk_rk3588_match_table[] = {
- {
- .compatible = "rockchip,rk3588-cru",
- .data = &clk_3588_cru_init,
- },
- { }
-};
-
-static int __init clk_rk3588_probe(struct platform_device *pdev)
-{
- const struct clk_rk3588_inits *init_data;
- struct device *dev = &pdev->dev;
-
- init_data = device_get_match_data(dev);
- if (!init_data)
- return -EINVAL;
-
- if (init_data->inits)
- init_data->inits(dev->of_node);
-
- return 0;
-}
-
-static struct platform_driver clk_rk3588_driver = {
- .driver = {
- .name = "clk-rk3588",
- .of_match_table = clk_rk3588_match_table,
- .suppress_bind_attrs = true,
- },
-};
-builtin_platform_driver_probe(clk_rk3588_driver, clk_rk3588_probe);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 73d2cbdc716b..2fa7253c73b2 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -450,12 +450,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
{
- struct clk *clk = NULL;
+ struct clk *clk;
unsigned int idx;
unsigned long flags;
for (idx = 0; idx < nr_clk; idx++, list++) {
flags = list->flags;
+ clk = NULL;
/* catch simple muxes */
switch (list->branch_type) {
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index fd3b476dedda..f1957e1c1178 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -235,6 +235,58 @@ struct clk;
#define RK3568_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x180)
#define RK3568_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x200)
+#define RK3576_PHP_CRU_BASE 0x8000
+#define RK3576_SECURE_NS_CRU_BASE 0x10000
+#define RK3576_PMU_CRU_BASE 0x20000
+#define RK3576_BIGCORE_CRU_BASE 0x38000
+#define RK3576_LITCORE_CRU_BASE 0x40000
+#define RK3576_CCI_CRU_BASE 0x48000
+
+#define RK3576_PLL_CON(x) RK2928_PLL_CON(x)
+#define RK3576_MODE_CON0 0x280
+#define RK3576_BPLL_MODE_CON0 (RK3576_BIGCORE_CRU_BASE + 0x280)
+#define RK3576_LPLL_MODE_CON0 (RK3576_LITCORE_CRU_BASE + 0x280)
+#define RK3576_PPLL_MODE_CON0 (RK3576_PHP_CRU_BASE + 0x280)
+#define RK3576_CLKSEL_CON(x) ((x) * 0x4 + 0x300)
+#define RK3576_CLKGATE_CON(x) ((x) * 0x4 + 0x800)
+#define RK3576_SOFTRST_CON(x) ((x) * 0x4 + 0xa00)
+#define RK3576_GLB_CNT_TH 0xc00
+#define RK3576_GLB_SRST_FST 0xc08
+#define RK3576_GLB_SRST_SND 0xc0c
+#define RK3576_GLB_RST_CON 0xc10
+#define RK3576_GLB_RST_ST 0xc04
+#define RK3576_SDIO_CON0 0xC24
+#define RK3576_SDIO_CON1 0xC28
+#define RK3576_SDMMC_CON0 0xC30
+#define RK3576_SDMMC_CON1 0xC34
+
+#define RK3576_PHP_CLKSEL_CON(x) ((x) * 0x4 + RK3576_PHP_CRU_BASE + 0x300)
+#define RK3576_PHP_CLKGATE_CON(x) ((x) * 0x4 + RK3576_PHP_CRU_BASE + 0x800)
+#define RK3576_PHP_SOFTRST_CON(x) ((x) * 0x4 + RK3576_PHP_CRU_BASE + 0xa00)
+
+#define RK3576_PMU_PLL_CON(x) ((x) * 0x4 + RK3576_PHP_CRU_BASE)
+#define RK3576_PMU_CLKSEL_CON(x) ((x) * 0x4 + RK3576_PMU_CRU_BASE + 0x300)
+#define RK3576_PMU_CLKGATE_CON(x) ((x) * 0x4 + RK3576_PMU_CRU_BASE + 0x800)
+#define RK3576_PMU_SOFTRST_CON(x) ((x) * 0x4 + RK3576_PMU_CRU_BASE + 0xa00)
+
+#define RK3576_SECURE_NS_CLKSEL_CON(x) ((x) * 0x4 + RK3576_SECURE_NS_CRU_BASE + 0x300)
+#define RK3576_SECURE_NS_CLKGATE_CON(x) ((x) * 0x4 + RK3576_SECURE_NS_CRU_BASE + 0x800)
+#define RK3576_SECURE_NS_SOFTRST_CON(x) ((x) * 0x4 + RK3576_SECURE_NS_CRU_BASE + 0xa00)
+
+#define RK3576_CCI_CLKSEL_CON(x) ((x) * 0x4 + RK3576_CCI_CRU_BASE + 0x300)
+#define RK3576_CCI_CLKGATE_CON(x) ((x) * 0x4 + RK3576_CCI_CRU_BASE + 0x800)
+#define RK3576_CCI_SOFTRST_CON(x) ((x) * 0x4 + RK3576_CCI_CRU_BASE + 0xa00)
+
+#define RK3576_BPLL_CON(x) ((x) * 0x4 + RK3576_BIGCORE_CRU_BASE)
+#define RK3576_BIGCORE_CLKSEL_CON(x) ((x) * 0x4 + RK3576_BIGCORE_CRU_BASE + 0x300)
+#define RK3576_BIGCORE_CLKGATE_CON(x) ((x) * 0x4 + RK3576_BIGCORE_CRU_BASE + 0x800)
+#define RK3576_BIGCORE_SOFTRST_CON(x) ((x) * 0x4 + RK3576_BIGCORE_CRU_BASE + 0xa00)
+#define RK3576_LPLL_CON(x) ((x) * 0x4 + RK3576_CCI_CRU_BASE)
+#define RK3576_LITCORE_CLKSEL_CON(x) ((x) * 0x4 + RK3576_LITCORE_CRU_BASE + 0x300)
+#define RK3576_LITCORE_CLKGATE_CON(x) ((x) * 0x4 + RK3576_LITCORE_CRU_BASE + 0x800)
+#define RK3576_LITCORE_SOFTRST_CON(x) ((x) * 0x4 + RK3576_LITCORE_CRU_BASE + 0xa00)
+#define RK3576_NON_SECURE_GATING_CON00 0xc48
+
#define RK3588_PHP_CRU_BASE 0x8000
#define RK3588_PMU_CRU_BASE 0x30000
#define RK3588_BIGCORE0_CRU_BASE 0x50000
@@ -287,6 +339,7 @@ enum rockchip_pll_type {
pll_rk3399,
pll_rk3588,
pll_rk3588_core,
+ pll_rk3588_ddr,
};
#define RK3036_PLL_RATE(_rate, _refdiv, _fbdiv, _postdiv1, \
@@ -1025,6 +1078,7 @@ static inline void rockchip_register_softrst(struct device_node *np,
return rockchip_register_softrst_lut(np, NULL, num_regs, base, flags);
}
+void rk3576_rst_init(struct device_node *np, void __iomem *reg_base);
void rk3588_rst_init(struct device_node *np, void __iomem *reg_base);
#endif
diff --git a/drivers/clk/rockchip/rst-rk3576.c b/drivers/clk/rockchip/rst-rk3576.c
new file mode 100644
index 000000000000..15cbb9bc0a41
--- /dev/null
+++ b/drivers/clk/rockchip/rst-rk3576.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ * Based on Sebastien Reichel's implementation for RK3588
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <dt-bindings/reset/rockchip,rk3576-cru.h>
+#include "clk.h"
+
+/* 0x27200000 + 0x0A00 */
+#define RK3576_CRU_RESET_OFFSET(id, reg, bit) [id] = (0 + reg * 16 + bit)
+/* 0x27208000 + 0x0A00 */
+#define RK3576_PHPCRU_RESET_OFFSET(id, reg, bit) [id] = (0x8000*4 + reg * 16 + bit)
+/* 0x27210000 + 0x0A00 */
+#define RK3576_SECURENSCRU_RESET_OFFSET(id, reg, bit) [id] = (0x10000*4 + reg * 16 + bit)
+/* 0x27220000 + 0x0A00 */
+#define RK3576_PMU1CRU_RESET_OFFSET(id, reg, bit) [id] = (0x20000*4 + reg * 16 + bit)
+
+/* mapping table for reset ID to register offset */
+static const int rk3576_register_offset[] = {
+ /* SOFTRST_CON01 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_TOP_BIU, 1, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_TOP_BIU, 1, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_TOP_MID_BIU, 1, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SECURE_HIGH_BIU, 1, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_TOP_BIU, 1, 14),
+
+ /* SOFTRST_CON02 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_VO0VOP_CHANNEL_BIU, 2, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VO0VOP_CHANNEL_BIU, 2, 1),
+
+ /* SOFTRST_CON06 */
+ RK3576_CRU_RESET_OFFSET(SRST_BISRINTF, 6, 2),
+
+ /* SOFTRST_CON07 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_AUDIO_BIU, 7, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_H_ASRC_2CH_0, 7, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_H_ASRC_2CH_1, 7, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_H_ASRC_4CH_0, 7, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_ASRC_4CH_1, 7, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_ASRC_2CH_0, 7, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_ASRC_2CH_1, 7, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_ASRC_4CH_0, 7, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_ASRC_4CH_1, 7, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI0_8CH, 7, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI0_8CH, 7, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_RX0, 7, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_RX0, 7, 15),
+
+ /* SOFTRST_CON08 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_RX1, 8, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_RX1, 8, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI1_8CH, 8, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI1_8CH, 8, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI2_2CH, 8, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI2_2CH, 8, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI3_2CH, 8, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI3_2CH, 8, 14),
+
+ /* SOFTRST_CON09 */
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI4_2CH, 9, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI4_2CH, 9, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_H_ACDCDIG_DSM, 9, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_M_ACDCDIG_DSM, 9, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_PDM1, 9, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_PDM1, 9, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_M_PDM1, 9, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX0, 9, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX0, 9, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX1, 9, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX1, 9, 12),
+
+ /* SOFTRST_CON11 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_BUS_BIU, 11, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_BUS_BIU, 11, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CRU, 11, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_CAN0, 11, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_CAN0, 11, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_CAN1, 11, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_CAN1, 11, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_INTMUX2BUS, 11, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VCCIO_IOC, 11, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_H_BUS_BIU, 11, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_KEY_SHIFT, 11, 15),
+
+ /* SOFTRST_CON12 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C1, 12, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C2, 12, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C3, 12, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C4, 12, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C5, 12, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C6, 12, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C7, 12, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C8, 12, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C9, 12, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_P_WDT_BUSMCU, 12, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_T_WDT_BUSMCU, 12, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_A_GIC, 12, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C1, 12, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C2, 12, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C3, 12, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C4, 12, 15),
+
+ /* SOFTRST_CON13 */
+ RK3576_CRU_RESET_OFFSET(SRST_I2C5, 13, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C6, 13, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C7, 13, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C8, 13, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C9, 13, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SARADC, 13, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_SARADC, 13, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_TSADC, 13, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_TSADC, 13, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART0, 13, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART2, 13, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART3, 13, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART4, 13, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART5, 13, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART6, 13, 15),
+
+ /* SOFTRST_CON14 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART7, 14, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART8, 14, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART9, 14, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART10, 14, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART11, 14, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART0, 14, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART2, 14, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART3, 14, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART4, 14, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART5, 14, 15),
+
+ /* SOFTRST_CON15 */
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART6, 15, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART7, 15, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART8, 15, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART9, 15, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART10, 15, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART11, 15, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SPI0, 15, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SPI1, 15, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SPI2, 15, 15),
+
+ /* SOFTRST_CON16 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_SPI3, 16, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SPI4, 16, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_SPI0, 16, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_SPI1, 16, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_SPI2, 16, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_SPI3, 16, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_SPI4, 16, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_P_WDT0, 16, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_T_WDT0, 16, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SYS_GRF, 16, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PWM1, 16, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_PWM1, 16, 11),
+
+ /* SOFTRST_CON17 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_BUSTIMER0, 17, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_BUSTIMER1, 17, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER0, 17, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER1, 17, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER2, 17, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER3, 17, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER4, 17, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER5, 17, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_P_BUSIOC, 17, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_P_MAILBOX0, 17, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GPIO1, 17, 15),
+
+ /* SOFTRST_CON18 */
+ RK3576_CRU_RESET_OFFSET(SRST_GPIO1, 18, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GPIO2, 18, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_GPIO2, 18, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GPIO3, 18, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_GPIO3, 18, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GPIO4, 18, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_GPIO4, 18, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DECOM, 18, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DECOM, 18, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_D_DECOM, 18, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER6, 18, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER7, 18, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER8, 18, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER9, 18, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER10, 18, 15),
+
+ /* SOFTRST_CON19 */
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER11, 19, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DMAC0, 19, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DMAC1, 19, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DMAC2, 19, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SPINLOCK, 19, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_REF_PVTPLL_BUS, 19, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_I3C0, 19, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_I3C1, 19, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_H_BUS_CM0_BIU, 19, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_F_BUS_CM0_CORE, 19, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_T_BUS_CM0_JTAG, 19, 13),
+
+ /* SOFTRST_CON20 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_INTMUX2PMU, 20, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_INTMUX2DDR, 20, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PVTPLL_BUS, 20, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PWM2, 20, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_PWM2, 20, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_FREQ_PWM1, 20, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_COUNTER_PWM1, 20, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_I3C0, 20, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_I3C1, 20, 13),
+
+ /* SOFTRST_CON21 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_MON_CH0, 21, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_BIU, 21, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_UPCTL_CH0, 21, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_TM_DDR_MON_CH0, 21, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_BIU, 21, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_DFI_CH0, 21, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_MON_CH0, 21, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_HWLP_CH0, 21, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_MON_CH1, 21, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_HWLP_CH1, 21, 15),
+
+ /* SOFTRST_CON22 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_UPCTL_CH1, 22, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_TM_DDR_MON_CH1, 22, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_DFI_CH1, 22, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR01_MSCH0, 22, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR01_MSCH1, 22, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_MON_CH1, 22, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_SCRAMBLE_CH0, 22, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_SCRAMBLE_CH1, 22, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_P_AHB2APB, 22, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_AHB2APB, 22, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_H_DDR_BIU, 22, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_F_DDR_CM0_CORE, 22, 15),
+
+ /* SOFTRST_CON23 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR01_MSCH0, 23, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR01_MSCH1, 23, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_TIMER0, 23, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_TIMER1, 23, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_T_WDT_DDR, 23, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_P_WDT, 23, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_TIMER, 23, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_T_DDR_CM0_JTAG, 23, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_GRF, 23, 11),
+
+ /* SOFTRST_CON25 */
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_UPCTL_CH0, 25, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_0_CH0, 25, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_1_CH0, 25, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_2_CH0, 25, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_3_CH0, 25, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_4_CH0, 25, 6),
+
+ /* SOFTRST_CON26 */
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_UPCTL_CH1, 26, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_0_CH1, 26, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_1_CH1, 26, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_2_CH1, 26, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_3_CH1, 26, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_4_CH1, 26, 6),
+
+ /* SOFTRST_CON27 */
+ RK3576_CRU_RESET_OFFSET(SRST_REF_PVTPLL_DDR, 27, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PVTPLL_DDR, 27, 1),
+
+ /* SOFTRST_CON28 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKNN0, 28, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKNN0_BIU, 28, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_L_RKNN0_BIU, 28, 12),
+
+ /* SOFTRST_CON29 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKNN1, 29, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKNN1_BIU, 29, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_L_RKNN1_BIU, 29, 3),
+
+ /* SOFTRST_CON31 */
+ RK3576_CRU_RESET_OFFSET(SRST_NPU_DAP, 31, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_L_NPUSUBSYS_BIU, 31, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_NPUTOP_BIU, 31, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_NPU_TIMER, 31, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_NPUTIMER0, 31, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_NPUTIMER1, 31, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_P_NPU_WDT, 31, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_T_NPU_WDT, 31, 15),
+
+ /* SOFTRST_CON32 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKNN_CBUF, 32, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RVCORE0, 32, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_NPU_GRF, 32, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PVTPLL_NPU, 32, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_NPU_PVTPLL, 32, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_H_NPU_CM0_BIU, 32, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_F_NPU_CM0_CORE, 32, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_T_NPU_CM0_JTAG, 32, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKNNTOP_BIU, 32, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_H_RKNN_CBUF, 32, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_RKNNTOP_BIU, 32, 13),
+
+ /* SOFTRST_CON33 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_NVM_BIU, 33, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_NVM_BIU, 33, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_S_FSPI, 33, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_H_FSPI, 33, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_C_EMMC, 33, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_H_EMMC, 33, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_A_EMMC, 33, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_B_EMMC, 33, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_T_EMMC, 33, 12),
+
+ /* SOFTRST_CON34 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_GRF, 34, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PHP_BIU, 34, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_PHP_BIU, 34, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PCIE0, 34, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_PCIE0_POWER_UP, 34, 15),
+
+ /* SOFTRST_CON35 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_USB3OTG1, 35, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_MMU0, 35, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SLV_MMU0, 35, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_A_MMU1, 35, 14),
+
+ /* SOFTRST_CON36 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_SLV_MMU1, 36, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PCIE1, 36, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_PCIE1_POWER_UP, 36, 9),
+
+ /* SOFTRST_CON37 */
+ RK3576_CRU_RESET_OFFSET(SRST_RXOOB0, 37, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_RXOOB1, 37, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_PMALIVE0, 37, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_PMALIVE1, 37, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SATA0, 37, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SATA1, 37, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_ASIC1, 37, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_ASIC0, 37, 7),
+
+ /* SOFTRST_CON40 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_CSIDPHY1, 40, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_SCAN_CSIDPHY1, 40, 3),
+
+ /* SOFTRST_CON42 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_SDGMAC_GRF, 42, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SDGMAC_BIU, 42, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SDGMAC_BIU, 42, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SDGMAC_BIU, 42, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_GMAC0, 42, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_A_GMAC1, 42, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GMAC0, 42, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GMAC1, 42, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SDIO, 42, 12),
+
+ /* SOFTRST_CON43 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_SDMMC0, 43, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_S_FSPI1, 43, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_H_FSPI1, 43, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DSMC_BIU, 43, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DSMC, 43, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DSMC, 43, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_H_HSGPIO, 43, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_HSGPIO, 43, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_A_HSGPIO, 43, 13),
+
+ /* SOFTRST_CON45 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_RKVDEC, 45, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_H_RKVDEC_BIU, 45, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKVDEC_BIU, 45, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_RKVDEC_HEVC_CA, 45, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_RKVDEC_CORE, 45, 9),
+
+ /* SOFTRST_CON47 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_USB_BIU, 47, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_USBUFS_BIU, 47, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_USB3OTG0, 47, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_UFS_BIU, 47, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_A_MMU2, 47, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SLV_MMU2, 47, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_A_UFS_SYS, 47, 15),
+
+ /* SOFTRST_CON48 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_UFS, 48, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_USBUFS_GRF, 48, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UFS_GRF, 48, 2),
+
+ /* SOFTRST_CON49 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_VPU_BIU, 49, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_JPEG_BIU, 49, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RGA_BIU, 49, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VDPP_BIU, 49, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_A_EBC_BIU, 49, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_RGA2E_0, 49, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RGA2E_0, 49, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_CORE_RGA2E_0, 49, 15),
+
+ /* SOFTRST_CON50 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_JPEG, 50, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_H_JPEG, 50, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VDPP, 50, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VDPP, 50, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_CORE_VDPP, 50, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_H_RGA2E_1, 50, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RGA2E_1, 50, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_CORE_RGA2E_1, 50, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_EBC, 50, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_A_EBC, 50, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_D_EBC, 50, 12),
+
+ /* SOFTRST_CON51 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_VEPU0_BIU, 51, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VEPU0_BIU, 51, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VEPU0, 51, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VEPU0, 51, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_VEPU0_CORE, 51, 6),
+
+ /* SOFTRST_CON53 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_VI_BIU, 53, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VI_BIU, 53, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VI_BIU, 53, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_D_VICAP, 53, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VICAP, 53, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VICAP, 53, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_ISP0, 53, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_ISP0_VICAP, 53, 11),
+
+ /* SOFTRST_CON54 */
+ RK3576_CRU_RESET_OFFSET(SRST_CORE_VPSS, 54, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_0, 54, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_1, 54, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_2, 54, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_3, 54, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_4, 54, 8),
+
+ /* SOFTRST_CON59 */
+ RK3576_CRU_RESET_OFFSET(SRST_CIFIN, 59, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_VICAP_I0CLK, 59, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_VICAP_I1CLK, 59, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_VICAP_I2CLK, 59, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_VICAP_I3CLK, 59, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_VICAP_I4CLK, 59, 5),
+
+ /* SOFTRST_CON61 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_VOP_BIU, 61, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VOP2_BIU, 61, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VOP_BIU, 61, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VOP_BIU, 61, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VOP, 61, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VOP, 61, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_D_VP0, 61, 13),
+
+ /* SOFTRST_CON62 */
+ RK3576_CRU_RESET_OFFSET(SRST_D_VP1, 62, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_D_VP2, 62, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VOP2_BIU, 62, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VOPGRF, 62, 3),
+
+ /* SOFTRST_CON63 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_VO0_BIU, 63, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VO0_BIU, 63, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_A_HDCP0_BIU, 63, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VO0_GRF, 63, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_A_HDCP0, 63, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_HDCP0, 63, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_HDCP0, 63, 14),
+
+ /* SOFTRST_CON64 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_DSIHOST0, 64, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_DSIHOST0, 64, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_P_HDMITX0, 64, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_HDMITX0_REF, 64, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_EDP0, 64, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_EDP0_24M, 64, 14),
+
+ /* SOFTRST_CON65 */
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI5_8CH, 65, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI5_8CH, 65, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI6_8CH, 65, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI6_8CH, 65, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX2, 65, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX2, 65, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_RX2, 65, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_RX2, 65, 15),
+
+ /* SOFTRST_CON66 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI8_8CH, 66, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI8_8CH, 66, 2),
+
+ /* SOFTRST_CON67 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_VO1_BIU, 67, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VO1_BIU, 67, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI7_8CH, 67, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI7_8CH, 67, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX3, 67, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX4, 67, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX5, 67, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX3, 67, 14),
+
+ /* SOFTRST_CON68 */
+ RK3576_CRU_RESET_OFFSET(SRST_DP0, 68, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VO1_GRF, 68, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_HDCP1_BIU, 68, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_HDCP1, 68, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_H_HDCP1, 68, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_HDCP1, 68, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI9_8CH, 68, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI9_8CH, 68, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX4, 68, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX5, 68, 13),
+
+ /* SOFTRST_CON69 */
+ RK3576_CRU_RESET_OFFSET(SRST_GPU, 69, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_S_GPU_BIU, 69, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_M0_GPU_BIU, 69, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GPU_BIU, 69, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GPU_GRF, 69, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_GPU_PVTPLL, 69, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PVTPLL_GPU, 69, 15),
+
+ /* SOFTRST_CON72 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_CENTER_BIU, 72, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DMA2DDR, 72, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_SHAREMEM, 72, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_SHAREMEM_BIU, 72, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_CENTER_BIU, 72, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CENTER_GRF, 72, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DMA2DDR, 72, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SHAREMEM, 72, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CENTER_BIU, 72, 12),
+
+ /* SOFTRST_CON75 */
+ RK3576_CRU_RESET_OFFSET(SRST_LINKSYM_HDMITXPHY0, 75, 1),
+
+ /* SOFTRST_CON78 */
+ RK3576_CRU_RESET_OFFSET(SRST_DP0_PIXELCLK, 78, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_PHY_DP0_TX, 78, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_DP1_PIXELCLK, 78, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_DP2_PIXELCLK, 78, 4),
+
+ /* SOFTRST_CON79 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_VEPU1_BIU, 79, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VEPU1_BIU, 79, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VEPU1, 79, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VEPU1, 79, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_VEPU1_CORE, 79, 5),
+
+ /* PPLL_SOFTRST_CON00 */
+ RK3576_PHPCRU_RESET_OFFSET(SRST_P_PHPPHY_CRU, 0, 1),
+ RK3576_PHPCRU_RESET_OFFSET(SRST_P_APB2ASB_SLV_CHIP_TOP, 0, 3),
+ RK3576_PHPCRU_RESET_OFFSET(SRST_P_PCIE2_COMBOPHY0, 0, 5),
+ RK3576_PHPCRU_RESET_OFFSET(SRST_P_PCIE2_COMBOPHY0_GRF, 0, 6),
+ RK3576_PHPCRU_RESET_OFFSET(SRST_P_PCIE2_COMBOPHY1, 0, 7),
+ RK3576_PHPCRU_RESET_OFFSET(SRST_P_PCIE2_COMBOPHY1_GRF, 0, 8),
+
+ /* PPLL_SOFTRST_CON01 */
+ RK3576_PHPCRU_RESET_OFFSET(SRST_PCIE0_PIPE_PHY, 1, 5),
+ RK3576_PHPCRU_RESET_OFFSET(SRST_PCIE1_PIPE_PHY, 1, 8),
+
+ /* SECURENS_SOFTRST_CON00 */
+ RK3576_SECURENSCRU_RESET_OFFSET(SRST_H_CRYPTO_NS, 0, 3),
+ RK3576_SECURENSCRU_RESET_OFFSET(SRST_H_TRNG_NS, 0, 4),
+ RK3576_SECURENSCRU_RESET_OFFSET(SRST_P_OTPC_NS, 0, 8),
+ RK3576_SECURENSCRU_RESET_OFFSET(SRST_OTPC_NS, 0, 9),
+
+ /* PMU1_SOFTRST_CON00 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_HDPTX_GRF, 0, 0),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_HDPTX_APB, 0, 1),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_MIPI_DCPHY, 0, 2),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_DCPHY_GRF, 0, 3),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_BOT0_APB2ASB, 0, 4),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_BOT1_APB2ASB, 0, 5),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_USB2DEBUG, 0, 6),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_CSIPHY_GRF, 0, 7),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_CSIPHY, 0, 8),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_USBPHY_GRF_0, 0, 9),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_USBPHY_GRF_1, 0, 10),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_USBDP_GRF, 0, 11),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_USBDPPHY, 0, 12),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_USBDP_COMBO_PHY_INIT, 0, 15),
+
+ /* PMU1_SOFTRST_CON01 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_USBDP_COMBO_PHY_CMN, 1, 0),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_USBDP_COMBO_PHY_LANE, 1, 1),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_USBDP_COMBO_PHY_PCS, 1, 2),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_M_MIPI_DCPHY, 1, 3),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_S_MIPI_DCPHY, 1, 4),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_SCAN_CSIPHY, 1, 5),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_VCCIO6_IOC, 1, 6),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_OTGPHY_0, 1, 7),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_OTGPHY_1, 1, 8),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_HDPTX_INIT, 1, 9),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_HDPTX_CMN, 1, 10),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_HDPTX_LANE, 1, 11),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_HDMITXHDP, 1, 13),
+
+ /* PMU1_SOFTRST_CON02 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_MPHY_INIT, 2, 0),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_MPHY_GRF, 2, 1),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_VCCIO7_IOC, 2, 3),
+
+ /* PMU1_SOFTRST_CON03 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_H_PMU1_BIU, 3, 9),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_NIU, 3, 10),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_H_PMU_CM0_BIU, 3, 11),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_PMU_CM0_CORE, 3, 12),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_PMU_CM0_JTAG, 3, 13),
+
+ /* PMU1_SOFTRST_CON04 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_CRU_PMU1, 4, 1),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_GRF, 4, 3),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_IOC, 4, 4),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1WDT, 4, 5),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_T_PMU1WDT, 4, 6),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMUTIMER, 4, 7),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_PMUTIMER0, 4, 9),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_PMUTIMER1, 4, 10),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1PWM, 4, 11),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_PMU1PWM, 4, 12),
+
+ /* PMU1_SOFTRST_CON05 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_I2C0, 5, 1),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_I2C0, 5, 2),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_S_UART1, 5, 5),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_UART1, 5, 6),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_PDM0, 5, 13),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_H_PDM0, 5, 15),
+
+ /* PMU1_SOFTRST_CON06 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_M_PDM0, 6, 0),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_H_VAD, 6, 1),
+
+ /* PMU1_SOFTRST_CON07 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU0GRF, 7, 4),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU0IOC, 7, 5),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_GPIO0, 7, 6),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_DB_GPIO0, 7, 7),
+};
+
+void rk3576_rst_init(struct device_node *np, void __iomem *reg_base)
+{
+ rockchip_register_softrst_lut(np,
+ rk3576_register_offset,
+ ARRAY_SIZE(rk3576_register_offset),
+ reg_base + RK3576_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 3056944a5a54..f1ba48758c78 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov920.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-gs101.o
obj-$(CONFIG_S3C64XX_COMMON_CLK) += clk-s3c64xx.o
obj-$(CONFIG_S5PV210_COMMON_CLK) += clk-s5pv210.o clk-s5pv210-audss.o
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index e44b172d7255..abd49edcf707 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -292,7 +292,7 @@ static struct platform_driver exynos_audss_clk_driver = {
.pm = &exynos_audss_clk_pm_ops,
},
.probe = exynos_audss_clk_probe,
- .remove_new = exynos_audss_clk_remove,
+ .remove = exynos_audss_clk_remove,
};
module_platform_driver(exynos_audss_clk_driver);
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 89cf2000884f..2ef5748c139b 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -241,7 +241,7 @@ static struct platform_driver exynos_clkout_driver = {
.pm = &exynos_clkout_pm_ops,
},
.probe = exynos_clkout_probe,
- .remove_new = exynos_clkout_remove,
+ .remove = exynos_clkout_remove,
};
module_platform_driver(exynos_clkout_driver);
diff --git a/drivers/clk/samsung/clk-exynos7885.c b/drivers/clk/samsung/clk-exynos7885.c
index f7d7427a558b..fc42251731ed 100644
--- a/drivers/clk/samsung/clk-exynos7885.c
+++ b/drivers/clk/samsung/clk-exynos7885.c
@@ -17,10 +17,10 @@
#include "clk-exynos-arm64.h"
/* NOTE: Must be equal to the last clock ID increased by one */
-#define CLKS_NR_TOP (CLK_GOUT_FSYS_USB30DRD + 1)
+#define CLKS_NR_TOP (CLK_MOUT_SHARED1_PLL + 1)
#define CLKS_NR_CORE (CLK_GOUT_TREX_P_CORE_PCLK_P_CORE + 1)
#define CLKS_NR_PERI (CLK_GOUT_WDT1_PCLK + 1)
-#define CLKS_NR_FSYS (CLK_GOUT_MMC_SDIO_SDCLKIN + 1)
+#define CLKS_NR_FSYS (CLK_FSYS_USB30DRD_REF_CLK + 1)
/* ---- CMU_TOP ------------------------------------------------------------- */
@@ -162,6 +162,10 @@ static const struct samsung_pll_clock top_pll_clks[] __initconst = {
NULL),
};
+/* List of parent clocks for Muxes in CMU_TOP */
+PNAME(mout_shared0_pll_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" };
+
/* List of parent clocks for Muxes in CMU_TOP: for CMU_CORE */
PNAME(mout_core_bus_p) = { "dout_shared0_div2", "dout_shared1_div2",
"dout_shared0_div3", "dout_shared0_div3" };
@@ -189,6 +193,12 @@ PNAME(mout_fsys_mmc_sdio_p) = { "dout_shared0_div2", "dout_shared1_div2" };
PNAME(mout_fsys_usb30drd_p) = { "dout_shared0_div4", "dout_shared1_div4" };
static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ /* TOP */
+ MUX(CLK_MOUT_SHARED0_PLL, "mout_shared0_pll", mout_shared0_pll_p,
+ PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(CLK_MOUT_SHARED1_PLL, "mout_shared1_pll", mout_shared1_pll_p,
+ PLL_CON0_PLL_SHARED1, 4, 1),
+
/* CORE */
MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p,
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 2),
@@ -232,17 +242,17 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
static const struct samsung_div_clock top_div_clks[] __initconst = {
/* TOP */
- DIV(CLK_DOUT_SHARED0_DIV2, "dout_shared0_div2", "fout_shared0_pll",
+ DIV(CLK_DOUT_SHARED0_DIV2, "dout_shared0_div2", "mout_shared0_pll",
CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
- DIV(CLK_DOUT_SHARED0_DIV3, "dout_shared0_div3", "fout_shared0_pll",
+ DIV(CLK_DOUT_SHARED0_DIV3, "dout_shared0_div3", "mout_shared0_pll",
CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
DIV(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4", "dout_shared0_div2",
CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
- DIV(CLK_DOUT_SHARED0_DIV5, "dout_shared0_div5", "fout_shared0_pll",
+ DIV(CLK_DOUT_SHARED0_DIV5, "dout_shared0_div5", "mout_shared0_pll",
CLK_CON_DIV_PLL_SHARED0_DIV5, 0, 3),
- DIV(CLK_DOUT_SHARED1_DIV2, "dout_shared1_div2", "fout_shared1_pll",
+ DIV(CLK_DOUT_SHARED1_DIV2, "dout_shared1_div2", "mout_shared1_pll",
CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
- DIV(CLK_DOUT_SHARED1_DIV3, "dout_shared1_div3", "fout_shared1_pll",
+ DIV(CLK_DOUT_SHARED1_DIV3, "dout_shared1_div3", "mout_shared1_pll",
CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "dout_shared1_div2",
CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
@@ -676,30 +686,56 @@ static const struct samsung_cmu_info core_cmu_info __initconst = {
/* ---- CMU_FSYS ------------------------------------------------------------ */
/* Register Offset definitions for CMU_FSYS (0x13400000) */
-#define PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER 0x0100
-#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER 0x0120
-#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER 0x0140
-#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER 0x0160
-#define PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER 0x0180
-#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK 0x2030
-#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN 0x2034
-#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK 0x2038
-#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN 0x203c
-#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK 0x2040
-#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN 0x2044
+#define PLL_LOCKTIME_PLL_USB 0x0000
+#define PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER 0x0100
+#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER 0x0120
+#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER 0x0140
+#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER 0x0160
+#define PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER 0x0180
+#define PLL_CON0_PLL_USB 0x01a0
+#define CLK_CON_GAT_CLK_FSYS_USB20PHY_CLKCORE 0x200c
+#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK 0x2030
+#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN 0x2034
+#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK 0x2038
+#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN 0x203c
+#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK 0x2040
+#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN 0x2044
+#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_20PHYCTRL 0x2068
+#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_0 0x206c
+#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_1 0x2070
+#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_BUS_CLK_EARLY 0x2074
+#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_REF_CLK 0x2078
static const unsigned long fsys_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_USB,
PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER,
PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER,
PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER,
PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER,
PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER,
+ PLL_CON0_PLL_USB,
+ CLK_CON_GAT_CLK_FSYS_USB20PHY_CLKCORE,
CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK,
CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN,
CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK,
CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN,
CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK,
CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN,
+ CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_20PHYCTRL,
+ CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_0,
+ CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_1,
+ CLK_CON_GAT_GOUT_FSYS_USB30DRD_BUS_CLK_EARLY,
+ CLK_CON_GAT_GOUT_FSYS_USB30DRD_REF_CLK,
+};
+
+static const struct samsung_pll_rate_table pll_usb_rate_table[] __initconst = {
+ PLL_35XX_RATE(26 * MHZ, 50000000U, 400, 13, 4),
+};
+
+static const struct samsung_pll_clock fsys_pll_clks[] __initconst = {
+ PLL(pll_1418x, CLK_FOUT_USB_PLL, "fout_usb_pll", "oscclk",
+ PLL_LOCKTIME_PLL_USB, PLL_CON0_PLL_USB,
+ pll_usb_rate_table),
};
/* List of parent clocks for Muxes in CMU_FSYS */
@@ -708,6 +744,7 @@ PNAME(mout_fsys_mmc_card_user_p) = { "oscclk", "dout_fsys_mmc_card" };
PNAME(mout_fsys_mmc_embd_user_p) = { "oscclk", "dout_fsys_mmc_embd" };
PNAME(mout_fsys_mmc_sdio_user_p) = { "oscclk", "dout_fsys_mmc_sdio" };
PNAME(mout_fsys_usb30drd_user_p) = { "oscclk", "dout_fsys_usb30drd" };
+PNAME(mout_usb_pll_p) = { "oscclk", "fout_usb_pll" };
static const struct samsung_mux_clock fsys_mux_clks[] __initconst = {
MUX(CLK_MOUT_FSYS_BUS_USER, "mout_fsys_bus_user", mout_fsys_bus_user_p,
@@ -721,12 +758,16 @@ static const struct samsung_mux_clock fsys_mux_clks[] __initconst = {
MUX_F(CLK_MOUT_FSYS_MMC_SDIO_USER, "mout_fsys_mmc_sdio_user",
mout_fsys_mmc_sdio_user_p, PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER,
4, 1, CLK_SET_RATE_PARENT, 0),
- MUX_F(CLK_MOUT_FSYS_USB30DRD_USER, "mout_fsys_usb30drd_user",
+ MUX(CLK_MOUT_FSYS_USB30DRD_USER, "mout_fsys_usb30drd_user",
mout_fsys_usb30drd_user_p, PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER,
- 4, 1, CLK_SET_RATE_PARENT, 0),
+ 4, 1),
+ nMUX_F(CLK_MOUT_USB_PLL, "mout_usb_pll", mout_usb_pll_p,
+ PLL_CON0_PLL_USB, 4, 1, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_gate_clock fsys_gate_clks[] __initconst = {
+ GATE(CLK_FSYS_USB20PHY_CLKCORE, "clk_fsys_usb20phy_clkcore", "mout_usb_pll",
+ CLK_CON_GAT_CLK_FSYS_USB20PHY_CLKCORE, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_MMC_CARD_ACLK, "gout_mmc_card_aclk", "mout_fsys_bus_user",
CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK, 21, 0, 0),
GATE(CLK_GOUT_MMC_CARD_SDCLKIN, "gout_mmc_card_sdclkin",
@@ -742,9 +783,21 @@ static const struct samsung_gate_clock fsys_gate_clks[] __initconst = {
GATE(CLK_GOUT_MMC_SDIO_SDCLKIN, "gout_mmc_sdio_sdclkin",
"mout_fsys_mmc_sdio_user", CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN,
21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_FSYS_USB30DRD_ACLK_20PHYCTRL, "clk_fsys_usb30drd_aclk_20phyctrl",
+ "mout_fsys_bus_user", CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_20PHYCTRL, 21, 0, 0),
+ GATE(CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_0, "clk_fsys_usb30drd_aclk_30phyctrl_0",
+ "mout_fsys_bus_user", CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_0, 21, 0, 0),
+ GATE(CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_1, "clk_fsys_usb30drd_aclk_30phyctrl_1",
+ "mout_fsys_bus_user", CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_1, 21, 0, 0),
+ GATE(CLK_FSYS_USB30DRD_BUS_CLK_EARLY, "clk_fsys_usb30drd_bus_clk_early",
+ "mout_fsys_bus_user", CLK_CON_GAT_GOUT_FSYS_USB30DRD_BUS_CLK_EARLY, 21, 0, 0),
+ GATE(CLK_FSYS_USB30DRD_REF_CLK, "clk_fsys_usb30drd_ref_clk", "mout_fsys_usb30drd_user",
+ CLK_CON_GAT_GOUT_FSYS_USB30DRD_REF_CLK, 21, 0, 0),
};
static const struct samsung_cmu_info fsys_cmu_info __initconst = {
+ .pll_clks = fsys_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(fsys_pll_clks),
.mux_clks = fsys_mux_clks,
.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks),
.gate_clks = fsys_gate_clks,
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index 6215471c4ac6..e00e213b1201 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -28,7 +28,7 @@
#define CLKS_NR_HSI (CLK_GOUT_HSI_CMU_HSI_PCLK + 1)
#define CLKS_NR_IS (CLK_GOUT_IS_SYSREG_PCLK + 1)
#define CLKS_NR_MFCMSCL (CLK_GOUT_MFCMSCL_SYSREG_PCLK + 1)
-#define CLKS_NR_PERI (CLK_GOUT_WDT1_PCLK + 1)
+#define CLKS_NR_PERI (CLK_GOUT_BUSIF_TMU_PCLK + 1)
#define CLKS_NR_CORE (CLK_GOUT_SPDMA_CORE_ACLK + 1)
#define CLKS_NR_DPU (CLK_GOUT_DPU_SYSREG_PCLK + 1)
@@ -1921,6 +1921,7 @@ static const struct samsung_cmu_info mfcmscl_cmu_info __initconst = {
#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0 0x200c
#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1 0x2010
#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2 0x2014
+#define CLK_CON_GAT_GOUT_PERI_BUSIF_TMU_PCLK 0x2018
#define CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK 0x2020
#define CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK 0x2024
#define CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK 0x2028
@@ -1957,6 +1958,7 @@ static const unsigned long peri_clk_regs[] __initconst = {
CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0,
CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1,
CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2,
+ CLK_CON_GAT_GOUT_PERI_BUSIF_TMU_PCLK,
CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK,
CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK,
CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK,
@@ -2068,6 +2070,9 @@ static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
GATE(CLK_GOUT_GPIO_PERI_PCLK, "gout_gpio_peri_pclk",
"mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_BUSIF_TMU_PCLK, "gout_busif_tmu_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_BUSIF_TMU_PCLK, 21, 0, 0),
};
static const struct samsung_cmu_info peri_cmu_info __initconst = {
diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c
index f04bacacab2c..5971e680e566 100644
--- a/drivers/clk/samsung/clk-exynosautov9.c
+++ b/drivers/clk/samsung/clk-exynosautov9.c
@@ -20,6 +20,7 @@
#define CLKS_NR_TOP (GOUT_CLKCMU_PERIS_BUS + 1)
#define CLKS_NR_BUSMC (CLK_GOUT_BUSMC_SPDMA_PCLK + 1)
#define CLKS_NR_CORE (CLK_GOUT_CORE_CMU_CORE_PCLK + 1)
+#define CLKS_NR_DPUM (CLK_GOUT_DPUM_SYSMMU_D3_CLK + 1)
#define CLKS_NR_FSYS0 (CLK_GOUT_FSYS0_PCIE_GEN3B_4L_CLK + 1)
#define CLKS_NR_FSYS1 (CLK_GOUT_FSYS1_USB30_1_ACLK + 1)
#define CLKS_NR_FSYS2 (CLK_GOUT_FSYS2_UFS_EMBD1_UNIPRO + 1)
@@ -1076,6 +1077,85 @@ static const struct samsung_cmu_info core_cmu_info __initconst = {
.clk_name = "dout_clkcmu_core_bus",
};
+/* ---- CMU_DPUM ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_DPUM (0x18c00000) */
+#define PLL_CON0_MUX_CLKCMU_DPUM_BUS_USER 0x0600
+#define CLK_CON_DIV_DIV_CLK_DPUM_BUSP 0x1800
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DECON 0x202c
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DMA 0x2030
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DPP 0x2034
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D0_DPUM_IPCLKPORT_CLK_S1 0x207c
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D1_DPUM_IPCLKPORT_CLK_S1 0x2084
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D2_DPUM_IPCLKPORT_CLK_S1 0x208c
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D3_DPUM_IPCLKPORT_CLK_S1 0x2094
+
+static const unsigned long dpum_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_DPUM_BUS_USER,
+ CLK_CON_DIV_DIV_CLK_DPUM_BUSP,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DECON,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DMA,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DPP,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D0_DPUM_IPCLKPORT_CLK_S1,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D1_DPUM_IPCLKPORT_CLK_S1,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D2_DPUM_IPCLKPORT_CLK_S1,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D3_DPUM_IPCLKPORT_CLK_S1,
+};
+
+PNAME(mout_dpum_bus_user_p) = { "oscclk", "dout_clkcmu_dpum_bus" };
+
+static const struct samsung_mux_clock dpum_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_DPUM_BUS_USER, "mout_dpum_bus_user",
+ mout_dpum_bus_user_p, PLL_CON0_MUX_CLKCMU_DPUM_BUS_USER, 4, 1),
+};
+
+static const struct samsung_div_clock dpum_div_clks[] __initconst = {
+ DIV(CLK_DOUT_DPUM_BUSP, "dout_dpum_busp", "mout_dpum_bus_user",
+ CLK_CON_DIV_DIV_CLK_DPUM_BUSP, 0, 3),
+};
+
+static const struct samsung_gate_clock dpum_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_DPUM_ACLK_DECON, "gout_dpum_decon_aclk",
+ "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DECON, 21,
+ 0, 0),
+ GATE(CLK_GOUT_DPUM_ACLK_DMA, "gout_dpum_dma_aclk", "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DMA, 21,
+ 0, 0),
+ GATE(CLK_GOUT_DPUM_ACLK_DPP, "gout_dpum_dpp_aclk", "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DPP, 21,
+ 0, 0),
+ GATE(CLK_GOUT_DPUM_SYSMMU_D0_CLK, "gout_dpum_sysmmu_d0_clk",
+ "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D0_DPUM_IPCLKPORT_CLK_S1, 21,
+ 0, 0),
+ GATE(CLK_GOUT_DPUM_SYSMMU_D1_CLK, "gout_dpum_sysmmu_d1_clk",
+ "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D1_DPUM_IPCLKPORT_CLK_S1, 21,
+ 0, 0),
+ GATE(CLK_GOUT_DPUM_SYSMMU_D2_CLK, "gout_dpum_sysmmu_d2_clk",
+ "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D2_DPUM_IPCLKPORT_CLK_S1, 21,
+ 0, 0),
+ GATE(CLK_GOUT_DPUM_SYSMMU_D3_CLK, "gout_dpum_sysmmu_d3_clk",
+ "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D3_DPUM_IPCLKPORT_CLK_S1, 21,
+ 0, 0),
+};
+
+static const struct samsung_cmu_info dpum_cmu_info __initconst = {
+ .mux_clks = dpum_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(dpum_mux_clks),
+ .div_clks = dpum_div_clks,
+ .nr_div_clks = ARRAY_SIZE(dpum_div_clks),
+ .gate_clks = dpum_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(dpum_gate_clks),
+ .nr_clk_ids = CLKS_NR_DPUM,
+ .clk_regs = dpum_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(dpum_clk_regs),
+ .clk_name = "bus",
+};
+
/* ---- CMU_FSYS0 ---------------------------------------------------------- */
/* Register Offset definitions for CMU_FSYS2 (0x17700000) */
@@ -2086,6 +2166,9 @@ static const struct of_device_id exynosautov9_cmu_of_match[] = {
.compatible = "samsung,exynosautov9-cmu-core",
.data = &core_cmu_info,
}, {
+ .compatible = "samsung,exynosautov9-cmu-dpum",
+ .data = &dpum_cmu_info,
+ }, {
.compatible = "samsung,exynosautov9-cmu-fsys0",
.data = &fsys0_cmu_info,
}, {
diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c
new file mode 100644
index 000000000000..7ba9748c0526
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynosautov920.c
@@ -0,0 +1,1173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Sunyeal Hong <sunyeal.hong@samsung.com>
+ *
+ * Common Clock Framework support for ExynosAuto v920 SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/samsung,exynosautov920.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CLKS_NR_TOP (DOUT_CLKCMU_TAA_NOC + 1)
+#define CLKS_NR_PERIC0 (CLK_DOUT_PERIC0_I3C + 1)
+
+/* ---- CMU_TOP ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_TOP (0x11000000) */
+#define PLL_LOCKTIME_PLL_MMC 0x0004
+#define PLL_LOCKTIME_PLL_SHARED0 0x0008
+#define PLL_LOCKTIME_PLL_SHARED1 0x000c
+#define PLL_LOCKTIME_PLL_SHARED2 0x0010
+#define PLL_LOCKTIME_PLL_SHARED3 0x0014
+#define PLL_LOCKTIME_PLL_SHARED4 0x0018
+#define PLL_LOCKTIME_PLL_SHARED5 0x0018
+#define PLL_CON0_PLL_MMC 0x0140
+#define PLL_CON3_PLL_MMC 0x014c
+#define PLL_CON0_PLL_SHARED0 0x0180
+#define PLL_CON3_PLL_SHARED0 0x018c
+#define PLL_CON0_PLL_SHARED1 0x01c0
+#define PLL_CON3_PLL_SHARED1 0x01cc
+#define PLL_CON0_PLL_SHARED2 0x0200
+#define PLL_CON3_PLL_SHARED2 0x020c
+#define PLL_CON0_PLL_SHARED3 0x0240
+#define PLL_CON3_PLL_SHARED3 0x024c
+#define PLL_CON0_PLL_SHARED4 0x0280
+#define PLL_CON3_PLL_SHARED4 0x028c
+#define PLL_CON0_PLL_SHARED5 0x02c0
+#define PLL_CON3_PLL_SHARED5 0x02cc
+
+/* MUX */
+#define CLK_CON_MUX_MUX_CLKCMU_ACC_NOC 0x1000
+#define CLK_CON_MUX_MUX_CLKCMU_APM_NOC 0x1004
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x1008
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_NOC 0x100c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK0 0x1010
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK1 0x1014
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK2 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK3 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST 0x1020
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER 0x1024
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG 0x1028
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x102c
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER 0x1030
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_CLUSTER 0x1038
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH 0x103c
+#define CLK_CON_MUX_MUX_CLKCMU_DNC_NOC 0x1040
+#define CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_DPTX_DPOSC 0x1048
+#define CLK_CON_MUX_MUX_CLKCMU_DPTX_NOC 0x104c
+#define CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM 0x1050
+#define CLK_CON_MUX_MUX_CLKCMU_DPUB_NOC 0x1054
+#define CLK_CON_MUX_MUX_CLKCMU_DPUF0_NOC 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_DPUF1_NOC 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_DPUF2_NOC 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_DSP_NOC 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_ACC_ORB 0x1078
+#define CLK_CON_MUX_MUX_CLKCMU_GNPU_XMAA 0x107c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD 0x1080
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC 0x1084
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_USBDRD 0x1088
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_ETHERNET 0x108c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC 0x1090
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC_UFS 0x1094
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_UFS_EMBD 0x1098
+#define CLK_CON_MUX_MUX_CLKCMU_ISP_NOC 0x109c
+#define CLK_CON_MUX_MUX_CLKCMU_M2M_JPEG 0x10a0
+#define CLK_CON_MUX_MUX_CLKCMU_M2M_NOC 0x10a4
+#define CLK_CON_MUX_MUX_CLKCMU_MFC_MFC 0x10a8
+#define CLK_CON_MUX_MUX_CLKCMU_MFC_WFD 0x10ac
+#define CLK_CON_MUX_MUX_CLKCMU_MFD_NOC 0x10b0
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP 0x10b4
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x10b8
+#define CLK_CON_MUX_MUX_CLKCMU_MISC_NOC 0x10bc
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC 0x10c0
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL1_NOC 0x10c4
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL2_NOC 0x10c8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP 0x10cc
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC 0x10d0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP 0x10d4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC 0x10d8
+#define CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC 0x10dc
+#define CLK_CON_MUX_MUX_CLKCMU_SNW_NOC 0x10e0
+#define CLK_CON_MUX_MUX_CLKCMU_SSP_NOC 0x10e4
+#define CLK_CON_MUX_MUX_CLKCMU_TAA_NOC 0x10e8
+#define CLK_CON_MUX_MUX_CLK_CMU_NOCP 0x10ec
+#define CLK_CON_MUX_MUX_CLK_CMU_PLLCLKOUT 0x10f0
+#define CLK_CON_MUX_MUX_CMU_CMUREF 0x10f4
+
+/* DIV */
+#define CLK_CON_DIV_CLKCMU_ACC_NOC 0x1800
+#define CLK_CON_DIV_CLKCMU_APM_NOC 0x1804
+#define CLK_CON_DIV_CLKCMU_AUD_CPU 0x1808
+#define CLK_CON_DIV_CLKCMU_AUD_NOC 0x180c
+#define CLK_CON_DIV_CLKCMU_CIS_MCLK0 0x1810
+#define CLK_CON_DIV_CLKCMU_CIS_MCLK1 0x1814
+#define CLK_CON_DIV_CLKCMU_CIS_MCLK2 0x1818
+#define CLK_CON_DIV_CLKCMU_CIS_MCLK3 0x181c
+#define CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER 0x1820
+#define CLK_CON_DIV_CLKCMU_CPUCL0_DBG 0x1824
+#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1828
+#define CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER 0x182c
+#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x1830
+#define CLK_CON_DIV_CLKCMU_CPUCL2_CLUSTER 0x1834
+#define CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH 0x1838
+#define CLK_CON_DIV_CLKCMU_DNC_NOC 0x183c
+#define CLK_CON_DIV_CLKCMU_DPTX_DPGTC 0x1840
+#define CLK_CON_DIV_CLKCMU_DPTX_DPOSC 0x1844
+#define CLK_CON_DIV_CLKCMU_DPTX_NOC 0x1848
+#define CLK_CON_DIV_CLKCMU_DPUB_DSIM 0x184c
+#define CLK_CON_DIV_CLKCMU_DPUB_NOC 0x1850
+#define CLK_CON_DIV_CLKCMU_DPUF0_NOC 0x1854
+#define CLK_CON_DIV_CLKCMU_DPUF1_NOC 0x1858
+#define CLK_CON_DIV_CLKCMU_DPUF2_NOC 0x185c
+#define CLK_CON_DIV_CLKCMU_DSP_NOC 0x1860
+#define CLK_CON_DIV_CLKCMU_G3D_NOCP 0x1864
+#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1868
+#define CLK_CON_DIV_CLKCMU_GNPU_NOC 0x186c
+#define CLK_CON_DIV_CLKCMU_HSI0_NOC 0x1870
+#define CLK_CON_DIV_CLKCMU_ACC_ORB 0x1874
+#define CLK_CON_DIV_CLKCMU_GNPU_XMAA 0x1878
+#define CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD 0x187c
+#define CLK_CON_DIV_CLKCMU_HSI1_NOC 0x1880
+#define CLK_CON_DIV_CLKCMU_HSI1_USBDRD 0x1884
+#define CLK_CON_DIV_CLKCMU_HSI2_ETHERNET 0x1888
+#define CLK_CON_DIV_CLKCMU_HSI2_NOC 0x188c
+#define CLK_CON_DIV_CLKCMU_HSI2_NOC_UFS 0x1890
+#define CLK_CON_DIV_CLKCMU_HSI2_UFS_EMBD 0x1894
+#define CLK_CON_DIV_CLKCMU_ISP_NOC 0x1898
+#define CLK_CON_DIV_CLKCMU_M2M_JPEG 0x189c
+#define CLK_CON_DIV_CLKCMU_M2M_NOC 0x18a0
+#define CLK_CON_DIV_CLKCMU_MFC_MFC 0x18a4
+#define CLK_CON_DIV_CLKCMU_MFC_WFD 0x18a8
+#define CLK_CON_DIV_CLKCMU_MFD_NOC 0x18ac
+#define CLK_CON_DIV_CLKCMU_MIF_NOCP 0x18b0
+#define CLK_CON_DIV_CLKCMU_MISC_NOC 0x18b4
+#define CLK_CON_DIV_CLKCMU_NOCL0_NOC 0x18b8
+#define CLK_CON_DIV_CLKCMU_NOCL1_NOC 0x18bc
+#define CLK_CON_DIV_CLKCMU_NOCL2_NOC 0x18c0
+#define CLK_CON_DIV_CLKCMU_PERIC0_IP 0x18c4
+#define CLK_CON_DIV_CLKCMU_PERIC0_NOC 0x18c8
+#define CLK_CON_DIV_CLKCMU_PERIC1_IP 0x18cc
+#define CLK_CON_DIV_CLKCMU_PERIC1_NOC 0x18d0
+#define CLK_CON_DIV_CLKCMU_SDMA_NOC 0x18d4
+#define CLK_CON_DIV_CLKCMU_SNW_NOC 0x18d8
+#define CLK_CON_DIV_CLKCMU_SSP_NOC 0x18dc
+#define CLK_CON_DIV_CLKCMU_TAA_NOC 0x18e0
+#define CLK_CON_DIV_CLK_ADD_CH_CLK 0x18e4
+#define CLK_CON_DIV_CLK_CMU_PLLCLKOUT 0x18e8
+#define CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST 0x18ec
+#define CLK_CON_DIV_DIV_CLK_CMU_NOCP 0x18f0
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_MMC,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_LOCKTIME_PLL_SHARED2,
+ PLL_LOCKTIME_PLL_SHARED3,
+ PLL_LOCKTIME_PLL_SHARED4,
+ PLL_LOCKTIME_PLL_SHARED5,
+ PLL_CON0_PLL_MMC,
+ PLL_CON3_PLL_MMC,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON3_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ PLL_CON3_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED2,
+ PLL_CON3_PLL_SHARED2,
+ PLL_CON0_PLL_SHARED3,
+ PLL_CON3_PLL_SHARED3,
+ PLL_CON0_PLL_SHARED4,
+ PLL_CON3_PLL_SHARED4,
+ PLL_CON0_PLL_SHARED5,
+ PLL_CON3_PLL_SHARED5,
+ CLK_CON_MUX_MUX_CLKCMU_ACC_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_APM_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK0,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK1,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK2,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK3,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_CLUSTER,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_DNC_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC,
+ CLK_CON_MUX_MUX_CLKCMU_DPTX_DPOSC,
+ CLK_CON_MUX_MUX_CLKCMU_DPTX_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM,
+ CLK_CON_MUX_MUX_CLKCMU_DPUB_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DPUF0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DPUF1_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DPUF2_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DSP_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_ACC_ORB,
+ CLK_CON_MUX_MUX_CLKCMU_GNPU_XMAA,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_USBDRD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_ETHERNET,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC_UFS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_UFS_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_ISP_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_M2M_JPEG,
+ CLK_CON_MUX_MUX_CLKCMU_M2M_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_MFC_MFC,
+ CLK_CON_MUX_MUX_CLKCMU_MFC_WFD,
+ CLK_CON_MUX_MUX_CLKCMU_MFD_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_MISC_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL1_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL2_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_SNW_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_SSP_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_TAA_NOC,
+ CLK_CON_MUX_MUX_CLK_CMU_NOCP,
+ CLK_CON_MUX_MUX_CLK_CMU_PLLCLKOUT,
+ CLK_CON_MUX_MUX_CMU_CMUREF,
+ CLK_CON_DIV_CLKCMU_ACC_NOC,
+ CLK_CON_DIV_CLKCMU_APM_NOC,
+ CLK_CON_DIV_CLKCMU_AUD_CPU,
+ CLK_CON_DIV_CLKCMU_AUD_NOC,
+ CLK_CON_DIV_CLKCMU_CIS_MCLK0,
+ CLK_CON_DIV_CLKCMU_CIS_MCLK1,
+ CLK_CON_DIV_CLKCMU_CIS_MCLK2,
+ CLK_CON_DIV_CLKCMU_CIS_MCLK3,
+ CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER,
+ CLK_CON_DIV_CLKCMU_CPUCL0_DBG,
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER,
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL2_CLUSTER,
+ CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_DIV_CLKCMU_DNC_NOC,
+ CLK_CON_DIV_CLKCMU_DPTX_DPGTC,
+ CLK_CON_DIV_CLKCMU_DPTX_DPOSC,
+ CLK_CON_DIV_CLKCMU_DPTX_NOC,
+ CLK_CON_DIV_CLKCMU_DPUB_DSIM,
+ CLK_CON_DIV_CLKCMU_DPUB_NOC,
+ CLK_CON_DIV_CLKCMU_DPUF0_NOC,
+ CLK_CON_DIV_CLKCMU_DPUF1_NOC,
+ CLK_CON_DIV_CLKCMU_DPUF2_NOC,
+ CLK_CON_DIV_CLKCMU_DSP_NOC,
+ CLK_CON_DIV_CLKCMU_G3D_NOCP,
+ CLK_CON_DIV_CLKCMU_G3D_SWITCH,
+ CLK_CON_DIV_CLKCMU_GNPU_NOC,
+ CLK_CON_DIV_CLKCMU_HSI0_NOC,
+ CLK_CON_DIV_CLKCMU_ACC_ORB,
+ CLK_CON_DIV_CLKCMU_GNPU_XMAA,
+ CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_DIV_CLKCMU_HSI1_NOC,
+ CLK_CON_DIV_CLKCMU_HSI1_USBDRD,
+ CLK_CON_DIV_CLKCMU_HSI2_ETHERNET,
+ CLK_CON_DIV_CLKCMU_HSI2_NOC,
+ CLK_CON_DIV_CLKCMU_HSI2_NOC_UFS,
+ CLK_CON_DIV_CLKCMU_HSI2_UFS_EMBD,
+ CLK_CON_DIV_CLKCMU_ISP_NOC,
+ CLK_CON_DIV_CLKCMU_M2M_JPEG,
+ CLK_CON_DIV_CLKCMU_M2M_NOC,
+ CLK_CON_DIV_CLKCMU_MFC_MFC,
+ CLK_CON_DIV_CLKCMU_MFC_WFD,
+ CLK_CON_DIV_CLKCMU_MFD_NOC,
+ CLK_CON_DIV_CLKCMU_MIF_NOCP,
+ CLK_CON_DIV_CLKCMU_MISC_NOC,
+ CLK_CON_DIV_CLKCMU_NOCL0_NOC,
+ CLK_CON_DIV_CLKCMU_NOCL1_NOC,
+ CLK_CON_DIV_CLKCMU_NOCL2_NOC,
+ CLK_CON_DIV_CLKCMU_PERIC0_IP,
+ CLK_CON_DIV_CLKCMU_PERIC0_NOC,
+ CLK_CON_DIV_CLKCMU_PERIC1_IP,
+ CLK_CON_DIV_CLKCMU_PERIC1_NOC,
+ CLK_CON_DIV_CLKCMU_SDMA_NOC,
+ CLK_CON_DIV_CLKCMU_SNW_NOC,
+ CLK_CON_DIV_CLKCMU_SSP_NOC,
+ CLK_CON_DIV_CLKCMU_TAA_NOC,
+ CLK_CON_DIV_CLK_ADD_CH_CLK,
+ CLK_CON_DIV_CLK_CMU_PLLCLKOUT,
+ CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST,
+ CLK_CON_DIV_DIV_CLK_CMU_NOCP,
+};
+
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ PLL(pll_531x, FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
+ PLL(pll_531x, FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
+ PLL(pll_531x, FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
+ PLL(pll_531x, FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
+ PLL(pll_531x, FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
+ PLL(pll_531x, FOUT_SHARED5_PLL, "fout_shared5_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED5, PLL_CON3_PLL_SHARED5, NULL),
+ PLL(pll_531x, FOUT_MMC_PLL, "fout_mmc_pll", "oscclk",
+ PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL),
+};
+
+/* List of parent clocks for Muxes in CMU_TOP */
+PNAME(mout_shared0_pll_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" };
+PNAME(mout_shared2_pll_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_shared3_pll_p) = { "oscclk", "fout_shared3_pll" };
+PNAME(mout_shared4_pll_p) = { "oscclk", "fout_shared4_pll" };
+PNAME(mout_shared5_pll_p) = { "oscclk", "fout_shared5_pll" };
+PNAME(mout_mmc_pll_p) = { "oscclk", "fout_mmc_pll" };
+
+PNAME(mout_clkcmu_cmu_boost_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_cmu_cmuref_p) = { "oscclk", "dout_cmu_boost" };
+
+PNAME(mout_clkcmu_acc_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "dout_shared5_div1",
+ "dout_shared3_div1", "oscclk" };
+
+PNAME(mout_clkcmu_acc_orb_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared1_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+PNAME(mout_clkcmu_apm_noc_p) = { "dout_shared2_div2", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_aud_cpu_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "dout_shared4_div3" };
+
+PNAME(mout_clkcmu_aud_noc_p) = { "dout_shared2_div2", "dout_shared4_div2",
+ "dout_shared1_div2", "dout_shared2_div3" };
+
+PNAME(mout_clkcmu_cpucl0_switch_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2" };
+
+PNAME(mout_clkcmu_cpucl0_cluster_p) = { "fout_shared2_pll", "fout_shared4_pll",
+ "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2",
+ "dout_shared2_div3", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_cpucl0_dbg_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared0_div4" };
+
+PNAME(mout_clkcmu_cpucl1_switch_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2" };
+
+PNAME(mout_clkcmu_cpucl1_cluster_p) = { "fout_shared2_pll", "fout_shared4_pll",
+ "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2",
+ "dout_shared2_div3", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_cpucl2_switch_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2" };
+
+PNAME(mout_clkcmu_cpucl2_cluster_p) = { "fout_shared2_pll", "fout_shared4_pll",
+ "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2",
+ "dout_shared2_div3", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_dnc_noc_p) = { "dout_shared1_div2", "dout_shared2_div2",
+ "dout_shared0_div3", "dout_shared4_div2",
+ "dout_shared1_div3", "dout_shared2_div3",
+ "dout_shared1_div4", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_dptx_noc_p) = { "dout_shared4_div2", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_dptx_dpgtc_p) = { "oscclk", "dout_shared2_div3",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_dptx_dposc_p) = { "oscclk", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_dpub_noc_p) = { "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4",
+ "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_dpub_dsim_p) = { "dout_shared2_div3", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_dpuf_noc_p) = { "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4",
+ "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_dsp_noc_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "fout_shared5_pll", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_g3d_switch_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2" };
+
+PNAME(mout_clkcmu_g3d_nocp_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_gnpu_noc_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared2_div3",
+ "fout_shared5_pll", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_hsi0_noc_p) = { "dout_shared4_div2", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_hsi1_noc_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_hsi1_usbdrd_p) = { "oscclk", "dout_shared2_div3",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_hsi1_mmc_card_p) = { "oscclk", "dout_shared2_div2",
+ "dout_shared4_div2", "fout_mmc_pll" };
+
+PNAME(mout_clkcmu_hsi2_noc_p) = { "dout_shared4_div2", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_hsi2_noc_ufs_p) = { "dout_shared4_div2", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div2" };
+
+PNAME(mout_clkcmu_hsi2_ufs_embd_p) = { "oscclk", "dout_shared2_div3",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_hsi2_ethernet_p) = { "oscclk", "dout_shared2_div2",
+ "dout_shared0_div3", "dout_shared1_div3" };
+
+PNAME(mout_clkcmu_isp_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+PNAME(mout_clkcmu_m2m_noc_p) = { "dout_shared0_div3", "dout_shared4_div2",
+ "dout_shared2_div3", "dout_shared1_div4" };
+
+PNAME(mout_clkcmu_m2m_jpeg_p) = { "dout_shared0_div3", "dout_shared4_div2",
+ "dout_shared2_div3", "dout_shared1_div4" };
+
+PNAME(mout_clkcmu_mfc_mfc_p) = { "dout_shared0_div3", "dout_shared4_div2",
+ "dout_shared2_div3", "dout_shared1_div4" };
+
+PNAME(mout_clkcmu_mfc_wfd_p) = { "dout_shared0_div3", "dout_shared4_div2",
+ "dout_shared2_div3", "dout_shared1_div4" };
+
+PNAME(mout_clkcmu_mfd_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+PNAME(mout_clkcmu_mif_switch_p) = { "fout_shared0_pll", "fout_shared1_pll",
+ "fout_shared2_pll", "fout_shared4_pll",
+ "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "fout_shared5_pll" };
+
+PNAME(mout_clkcmu_mif_nocp_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_misc_noc_p) = { "dout_shared4_div2", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_nocl0_noc_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_nocl1_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+PNAME(mout_clkcmu_nocl2_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+PNAME(mout_clkcmu_peric0_noc_p) = { "dout_shared2_div3", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_peric0_ip_p) = { "dout_shared2_div3", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_peric1_noc_p) = { "dout_shared2_div3", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_peric1_ip_p) = { "dout_shared2_div3", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_sdma_noc_p) = { "dout_shared1_div2", "dout_shared2_div2",
+ "dout_shared0_div3", "dout_shared4_div2",
+ "dout_shared1_div3", "dout_shared2_div3",
+ "dout_shared1_div4", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_snw_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+PNAME(mout_clkcmu_ssp_noc_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div2", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_taa_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ MUX(MOUT_SHARED0_PLL, "mout_shared0_pll", mout_shared0_pll_p,
+ PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(MOUT_SHARED1_PLL, "mout_shared1_pll", mout_shared1_pll_p,
+ PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(MOUT_SHARED2_PLL, "mout_shared2_pll", mout_shared2_pll_p,
+ PLL_CON0_PLL_SHARED2, 4, 1),
+ MUX(MOUT_SHARED3_PLL, "mout_shared3_pll", mout_shared3_pll_p,
+ PLL_CON0_PLL_SHARED3, 4, 1),
+ MUX(MOUT_SHARED4_PLL, "mout_shared4_pll", mout_shared4_pll_p,
+ PLL_CON0_PLL_SHARED4, 4, 1),
+ MUX(MOUT_SHARED5_PLL, "mout_shared5_pll", mout_shared5_pll_p,
+ PLL_CON0_PLL_SHARED5, 4, 1),
+ MUX(MOUT_MMC_PLL, "mout_mmc_pll", mout_mmc_pll_p,
+ PLL_CON0_PLL_MMC, 4, 1),
+
+ /* BOOST */
+ MUX(MOUT_CLKCMU_CMU_BOOST, "mout_clkcmu_cmu_boost",
+ mout_clkcmu_cmu_boost_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, 0, 2),
+ MUX(MOUT_CLKCMU_CMU_CMUREF, "mout_clkcmu_cmu_cmuref",
+ mout_clkcmu_cmu_cmuref_p, CLK_CON_MUX_MUX_CMU_CMUREF, 0, 1),
+
+ /* ACC */
+ MUX(MOUT_CLKCMU_ACC_NOC, "mout_clkcmu_acc_noc",
+ mout_clkcmu_acc_noc_p, CLK_CON_MUX_MUX_CLKCMU_ACC_NOC, 0, 3),
+ MUX(MOUT_CLKCMU_ACC_ORB, "mout_clkcmu_acc_orb",
+ mout_clkcmu_acc_orb_p, CLK_CON_MUX_MUX_CLKCMU_ACC_ORB, 0, 3),
+
+ /* APM */
+ MUX(MOUT_CLKCMU_APM_NOC, "mout_clkcmu_apm_noc",
+ mout_clkcmu_apm_noc_p, CLK_CON_MUX_MUX_CLKCMU_APM_NOC, 0, 2),
+
+ /* AUD */
+ MUX(MOUT_CLKCMU_AUD_CPU, "mout_clkcmu_aud_cpu",
+ mout_clkcmu_aud_cpu_p, CLK_CON_MUX_MUX_CLKCMU_AUD_CPU, 0, 3),
+ MUX(MOUT_CLKCMU_AUD_NOC, "mout_clkcmu_aud_noc",
+ mout_clkcmu_aud_noc_p, CLK_CON_MUX_MUX_CLKCMU_AUD_NOC, 0, 2),
+
+ /* CPUCL0 */
+ MUX(MOUT_CLKCMU_CPUCL0_SWITCH, "mout_clkcmu_cpucl0_switch",
+ mout_clkcmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ 0, 2),
+ MUX(MOUT_CLKCMU_CPUCL0_CLUSTER, "mout_clkcmu_cpucl0_cluster",
+ mout_clkcmu_cpucl0_cluster_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER,
+ 0, 3),
+ MUX(MOUT_CLKCMU_CPUCL0_DBG, "mout_clkcmu_cpucl0_dbg",
+ mout_clkcmu_cpucl0_dbg_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG,
+ 0, 2),
+
+ /* CPUCL1 */
+ MUX(MOUT_CLKCMU_CPUCL1_SWITCH, "mout_clkcmu_cpucl1_switch",
+ mout_clkcmu_cpucl1_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ 0, 2),
+ MUX(MOUT_CLKCMU_CPUCL1_CLUSTER, "mout_clkcmu_cpucl1_cluster",
+ mout_clkcmu_cpucl1_cluster_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER,
+ 0, 3),
+
+ /* CPUCL2 */
+ MUX(MOUT_CLKCMU_CPUCL2_SWITCH, "mout_clkcmu_cpucl2_switch",
+ mout_clkcmu_cpucl2_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ 0, 2),
+ MUX(MOUT_CLKCMU_CPUCL2_CLUSTER, "mout_clkcmu_cpucl2_cluster",
+ mout_clkcmu_cpucl2_cluster_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_CLUSTER,
+ 0, 3),
+
+ /* DNC */
+ MUX(MOUT_CLKCMU_DNC_NOC, "mout_clkcmu_dnc_noc",
+ mout_clkcmu_dnc_noc_p, CLK_CON_MUX_MUX_CLKCMU_DNC_NOC, 0, 3),
+
+ /* DPTX */
+ MUX(MOUT_CLKCMU_DPTX_NOC, "mout_clkcmu_dptx_noc",
+ mout_clkcmu_dptx_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPTX_NOC, 0, 2),
+ MUX(MOUT_CLKCMU_DPTX_DPGTC, "mout_clkcmu_dptx_dpgtc",
+ mout_clkcmu_dptx_dpgtc_p, CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC, 0, 2),
+ MUX(MOUT_CLKCMU_DPTX_DPOSC, "mout_clkcmu_dptx_dposc",
+ mout_clkcmu_dptx_dposc_p, CLK_CON_MUX_MUX_CLKCMU_DPTX_DPOSC, 0, 1),
+
+ /* DPUB */
+ MUX(MOUT_CLKCMU_DPUB_NOC, "mout_clkcmu_dpub_noc",
+ mout_clkcmu_dpub_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPUB_NOC, 0, 3),
+ MUX(MOUT_CLKCMU_DPUB_DSIM, "mout_clkcmu_dpub_dsim",
+ mout_clkcmu_dpub_dsim_p, CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM, 0, 1),
+
+ /* DPUF */
+ MUX(MOUT_CLKCMU_DPUF0_NOC, "mout_clkcmu_dpuf0_noc",
+ mout_clkcmu_dpuf_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPUF0_NOC, 0, 3),
+ MUX(MOUT_CLKCMU_DPUF1_NOC, "mout_clkcmu_dpuf1_noc",
+ mout_clkcmu_dpuf_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPUF1_NOC, 0, 3),
+ MUX(MOUT_CLKCMU_DPUF2_NOC, "mout_clkcmu_dpuf2_noc",
+ mout_clkcmu_dpuf_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPUF2_NOC, 0, 3),
+
+ /* DSP */
+ MUX(MOUT_CLKCMU_DSP_NOC, "mout_clkcmu_dsp_noc",
+ mout_clkcmu_dsp_noc_p, CLK_CON_MUX_MUX_CLKCMU_DSP_NOC, 0, 3),
+
+ /* G3D */
+ MUX(MOUT_CLKCMU_G3D_SWITCH, "mout_clkcmu_g3d_switch",
+ mout_clkcmu_g3d_switch_p, CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH, 0, 2),
+ MUX(MOUT_CLKCMU_G3D_NOCP, "mout_clkcmu_g3d_nocp",
+ mout_clkcmu_g3d_nocp_p, CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP, 0, 2),
+
+ /* GNPU */
+ MUX(MOUT_CLKCMU_GNPU_NOC, "mout_clkcmu_gnpu_noc",
+ mout_clkcmu_gnpu_noc_p, CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC, 0, 3),
+
+ /* HSI0 */
+ MUX(MOUT_CLKCMU_HSI0_NOC, "mout_clkcmu_hsi0_noc",
+ mout_clkcmu_hsi0_noc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC, 0, 2),
+
+ /* HSI1 */
+ MUX(MOUT_CLKCMU_HSI1_NOC, "mout_clkcmu_hsi1_noc",
+ mout_clkcmu_hsi1_noc_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC,
+ 0, 2),
+ MUX(MOUT_CLKCMU_HSI1_USBDRD, "mout_clkcmu_hsi1_usbdrd",
+ mout_clkcmu_hsi1_usbdrd_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_USBDRD,
+ 0, 2),
+ MUX(MOUT_CLKCMU_HSI1_MMC_CARD, "mout_clkcmu_hsi1_mmc_card",
+ mout_clkcmu_hsi1_mmc_card_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD,
+ 0, 2),
+
+ /* HSI2 */
+ MUX(MOUT_CLKCMU_HSI2_NOC, "mout_clkcmu_hsi2_noc",
+ mout_clkcmu_hsi2_noc_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC,
+ 0, 2),
+ MUX(MOUT_CLKCMU_HSI2_NOC_UFS, "mout_clkcmu_hsi2_noc_ufs",
+ mout_clkcmu_hsi2_noc_ufs_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC_UFS,
+ 0, 2),
+ MUX(MOUT_CLKCMU_HSI2_UFS_EMBD, "mout_clkcmu_hsi2_ufs_embd",
+ mout_clkcmu_hsi2_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_UFS_EMBD,
+ 0, 2),
+ MUX(MOUT_CLKCMU_HSI2_ETHERNET, "mout_clkcmu_hsi2_ethernet",
+ mout_clkcmu_hsi2_ethernet_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_ETHERNET,
+ 0, 2),
+
+ /* ISP */
+ MUX(MOUT_CLKCMU_ISP_NOC, "mout_clkcmu_isp_noc",
+ mout_clkcmu_isp_noc_p, CLK_CON_MUX_MUX_CLKCMU_ISP_NOC, 0, 3),
+
+ /* M2M */
+ MUX(MOUT_CLKCMU_M2M_NOC, "mout_clkcmu_m2m_noc",
+ mout_clkcmu_m2m_noc_p, CLK_CON_MUX_MUX_CLKCMU_M2M_NOC, 0, 2),
+ MUX(MOUT_CLKCMU_M2M_JPEG, "mout_clkcmu_m2m_jpeg",
+ mout_clkcmu_m2m_jpeg_p, CLK_CON_MUX_MUX_CLKCMU_M2M_JPEG, 0, 2),
+
+ /* MFC */
+ MUX(MOUT_CLKCMU_MFC_MFC, "mout_clkcmu_mfc_mfc",
+ mout_clkcmu_mfc_mfc_p, CLK_CON_MUX_MUX_CLKCMU_MFC_MFC, 0, 2),
+ MUX(MOUT_CLKCMU_MFC_WFD, "mout_clkcmu_mfc_wfd",
+ mout_clkcmu_mfc_wfd_p, CLK_CON_MUX_MUX_CLKCMU_MFC_WFD, 0, 2),
+
+ /* MFD */
+ MUX(MOUT_CLKCMU_MFD_NOC, "mout_clkcmu_mfd_noc",
+ mout_clkcmu_mfd_noc_p, CLK_CON_MUX_MUX_CLKCMU_MFD_NOC, 0, 3),
+
+ /* MIF */
+ MUX(MOUT_CLKCMU_MIF_SWITCH, "mout_clkcmu_mif_switch",
+ mout_clkcmu_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, 0, 3),
+ MUX(MOUT_CLKCMU_MIF_NOCP, "mout_clkcmu_mif_nocp",
+ mout_clkcmu_mif_nocp_p, CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP, 0, 2),
+
+ /* MISC */
+ MUX(MOUT_CLKCMU_MISC_NOC, "mout_clkcmu_misc_noc",
+ mout_clkcmu_misc_noc_p, CLK_CON_MUX_MUX_CLKCMU_MISC_NOC, 0, 2),
+
+ /* NOCL0 */
+ MUX(MOUT_CLKCMU_NOCL0_NOC, "mout_clkcmu_nocl0_noc",
+ mout_clkcmu_nocl0_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC, 0, 3),
+
+ /* NOCL1 */
+ MUX(MOUT_CLKCMU_NOCL1_NOC, "mout_clkcmu_nocl1_noc",
+ mout_clkcmu_nocl1_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL1_NOC, 0, 3),
+
+ /* NOCL2 */
+ MUX(MOUT_CLKCMU_NOCL2_NOC, "mout_clkcmu_nocl2_noc",
+ mout_clkcmu_nocl2_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL2_NOC, 0, 3),
+
+ /* PERIC0 */
+ MUX(MOUT_CLKCMU_PERIC0_NOC, "mout_clkcmu_peric0_noc",
+ mout_clkcmu_peric0_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC, 0, 1),
+ MUX(MOUT_CLKCMU_PERIC0_IP, "mout_clkcmu_peric0_ip",
+ mout_clkcmu_peric0_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP, 0, 1),
+
+ /* PERIC1 */
+ MUX(MOUT_CLKCMU_PERIC1_NOC, "mout_clkcmu_peric1_noc",
+ mout_clkcmu_peric1_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC, 0, 1),
+ MUX(MOUT_CLKCMU_PERIC1_IP, "mout_clkcmu_peric1_ip",
+ mout_clkcmu_peric1_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP, 0, 1),
+
+ /* SDMA */
+ MUX(MOUT_CLKCMU_SDMA_NOC, "mout_clkcmu_sdma_noc",
+ mout_clkcmu_sdma_noc_p, CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC, 0, 3),
+
+ /* SNW */
+ MUX(MOUT_CLKCMU_SNW_NOC, "mout_clkcmu_snw_noc",
+ mout_clkcmu_snw_noc_p, CLK_CON_MUX_MUX_CLKCMU_SNW_NOC, 0, 3),
+
+ /* SSP */
+ MUX(MOUT_CLKCMU_SSP_NOC, "mout_clkcmu_ssp_noc",
+ mout_clkcmu_ssp_noc_p, CLK_CON_MUX_MUX_CLKCMU_SSP_NOC, 0, 2),
+
+ /* TAA */
+ MUX(MOUT_CLKCMU_TAA_NOC, "mout_clkcmu_taa_noc",
+ mout_clkcmu_taa_noc_p, CLK_CON_MUX_MUX_CLKCMU_TAA_NOC, 0, 3),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+
+ /* BOOST */
+ DIV(DOUT_CLKCMU_CMU_BOOST, "dout_clkcmu_cmu_boost",
+ "mout_clkcmu_cmu_boost", CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST, 0, 2),
+
+ /* ACC */
+ DIV(DOUT_CLKCMU_ACC_NOC, "dout_clkcmu_acc_noc",
+ "mout_clkcmu_acc_noc", CLK_CON_DIV_CLKCMU_ACC_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_ACC_ORB, "dout_clkcmu_acc_orb",
+ "mout_clkcmu_acc_orb", CLK_CON_DIV_CLKCMU_ACC_ORB, 0, 4),
+
+ /* APM */
+ DIV(DOUT_CLKCMU_APM_NOC, "dout_clkcmu_apm_noc",
+ "mout_clkcmu_apm_noc", CLK_CON_DIV_CLKCMU_APM_NOC, 0, 3),
+
+ /* AUD */
+ DIV(DOUT_CLKCMU_AUD_CPU, "dout_clkcmu_aud_cpu",
+ "mout_clkcmu_aud_cpu", CLK_CON_DIV_CLKCMU_AUD_CPU, 0, 3),
+ DIV(DOUT_CLKCMU_AUD_NOC, "dout_clkcmu_aud_noc",
+ "mout_clkcmu_aud_noc", CLK_CON_DIV_CLKCMU_AUD_NOC, 0, 4),
+
+ /* CPUCL0 */
+ DIV(DOUT_CLKCMU_CPUCL0_SWITCH, "dout_clkcmu_cpucl0_switch",
+ "mout_clkcmu_cpucl0_switch",
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
+ DIV(DOUT_CLKCMU_CPUCL0_CLUSTER, "dout_clkcmu_cpucl0_cluster",
+ "mout_clkcmu_cpucl0_cluster",
+ CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER, 0, 3),
+ DIV(DOUT_CLKCMU_CPUCL0_DBG, "dout_clkcmu_cpucl0_dbg",
+ "mout_clkcmu_cpucl0_dbg",
+ CLK_CON_DIV_CLKCMU_CPUCL0_DBG, 0, 4),
+
+ /* CPUCL1 */
+ DIV(DOUT_CLKCMU_CPUCL1_SWITCH, "dout_clkcmu_cpucl1_switch",
+ "mout_clkcmu_cpucl1_switch",
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3),
+ DIV(DOUT_CLKCMU_CPUCL1_CLUSTER, "dout_clkcmu_cpucl1_cluster",
+ "mout_clkcmu_cpucl1_cluster",
+ CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER, 0, 3),
+
+ /* CPUCL2 */
+ DIV(DOUT_CLKCMU_CPUCL2_SWITCH, "dout_clkcmu_cpucl2_switch",
+ "mout_clkcmu_cpucl2_switch",
+ CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH, 0, 3),
+ DIV(DOUT_CLKCMU_CPUCL2_CLUSTER, "dout_clkcmu_cpucl2_cluster",
+ "mout_clkcmu_cpucl2_cluster",
+ CLK_CON_DIV_CLKCMU_CPUCL2_CLUSTER, 0, 3),
+
+ /* DNC */
+ DIV(DOUT_CLKCMU_DNC_NOC, "dout_clkcmu_dnc_noc",
+ "mout_clkcmu_dnc_noc", CLK_CON_DIV_CLKCMU_DNC_NOC, 0, 4),
+
+ /* DPTX */
+ DIV(DOUT_CLKCMU_DPTX_NOC, "dout_clkcmu_dptx_noc",
+ "mout_clkcmu_dptx_noc", CLK_CON_DIV_CLKCMU_DPTX_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_DPTX_DPGTC, "dout_clkcmu_dptx_dpgtc",
+ "mout_clkcmu_dptx_dpgtc", CLK_CON_DIV_CLKCMU_DPTX_DPGTC, 0, 3),
+ DIV(DOUT_CLKCMU_DPTX_DPOSC, "dout_clkcmu_dptx_dposc",
+ "mout_clkcmu_dptx_dposc", CLK_CON_DIV_CLKCMU_DPTX_DPOSC, 0, 5),
+
+ /* DPUB */
+ DIV(DOUT_CLKCMU_DPUB_NOC, "dout_clkcmu_dpub_noc",
+ "mout_clkcmu_dpub_noc", CLK_CON_DIV_CLKCMU_DPUB_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_DPUB_DSIM, "dout_clkcmu_dpub_dsim",
+ "mout_clkcmu_dpub_dsim", CLK_CON_DIV_CLKCMU_DPUB_DSIM, 0, 4),
+
+ /* DPUF */
+ DIV(DOUT_CLKCMU_DPUF0_NOC, "dout_clkcmu_dpuf0_noc",
+ "mout_clkcmu_dpuf0_noc", CLK_CON_DIV_CLKCMU_DPUF0_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_DPUF1_NOC, "dout_clkcmu_dpuf1_noc",
+ "mout_clkcmu_dpuf1_noc", CLK_CON_DIV_CLKCMU_DPUF1_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_DPUF2_NOC, "dout_clkcmu_dpuf2_noc",
+ "mout_clkcmu_dpuf2_noc", CLK_CON_DIV_CLKCMU_DPUF2_NOC, 0, 4),
+
+ /* DSP */
+ DIV(DOUT_CLKCMU_DSP_NOC, "dout_clkcmu_dsp_noc",
+ "mout_clkcmu_dsp_noc", CLK_CON_DIV_CLKCMU_DSP_NOC, 0, 4),
+
+ /* G3D */
+ DIV(DOUT_CLKCMU_G3D_SWITCH, "dout_clkcmu_g3d_switch",
+ "mout_clkcmu_g3d_switch", CLK_CON_DIV_CLKCMU_G3D_SWITCH, 0, 3),
+ DIV(DOUT_CLKCMU_G3D_NOCP, "dout_clkcmu_g3d_nocp",
+ "mout_clkcmu_g3d_nocp", CLK_CON_DIV_CLKCMU_G3D_NOCP, 0, 3),
+
+ /* GNPU */
+ DIV(DOUT_CLKCMU_GNPU_NOC, "dout_clkcmu_gnpu_noc",
+ "mout_clkcmu_gnpu_noc", CLK_CON_DIV_CLKCMU_GNPU_NOC, 0, 4),
+
+ /* HSI0 */
+ DIV(DOUT_CLKCMU_HSI0_NOC, "dout_clkcmu_hsi0_noc",
+ "mout_clkcmu_hsi0_noc", CLK_CON_DIV_CLKCMU_HSI0_NOC, 0, 4),
+
+ /* HSI1 */
+ DIV(DOUT_CLKCMU_HSI1_NOC, "dout_clkcmu_hsi1_noc",
+ "mout_clkcmu_hsi1_noc", CLK_CON_DIV_CLKCMU_HSI1_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_HSI1_USBDRD, "dout_clkcmu_hsi1_usbdrd",
+ "mout_clkcmu_hsi1_usbdrd", CLK_CON_DIV_CLKCMU_HSI1_USBDRD, 0, 4),
+ DIV(DOUT_CLKCMU_HSI1_MMC_CARD, "dout_clkcmu_hsi1_mmc_card",
+ "mout_clkcmu_hsi1_mmc_card", CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD, 0, 9),
+
+ /* HSI2 */
+ DIV(DOUT_CLKCMU_HSI2_NOC, "dout_clkcmu_hsi2_noc",
+ "mout_clkcmu_hsi2_noc", CLK_CON_DIV_CLKCMU_HSI2_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_HSI2_NOC_UFS, "dout_clkcmu_hsi2_noc_ufs",
+ "mout_clkcmu_hsi2_noc_ufs", CLK_CON_DIV_CLKCMU_HSI2_NOC_UFS, 0, 4),
+ DIV(DOUT_CLKCMU_HSI2_UFS_EMBD, "dout_clkcmu_hsi2_ufs_embd",
+ "mout_clkcmu_hsi2_ufs_embd", CLK_CON_DIV_CLKCMU_HSI2_UFS_EMBD, 0, 3),
+ DIV(DOUT_CLKCMU_HSI2_ETHERNET, "dout_clkcmu_hsi2_ethernet",
+ "mout_clkcmu_hsi2_ethernet", CLK_CON_DIV_CLKCMU_HSI2_ETHERNET, 0, 3),
+
+ /* ISP */
+ DIV(DOUT_CLKCMU_ISP_NOC, "dout_clkcmu_isp_noc",
+ "mout_clkcmu_isp_noc", CLK_CON_DIV_CLKCMU_ISP_NOC, 0, 4),
+
+ /* M2M */
+ DIV(DOUT_CLKCMU_M2M_NOC, "dout_clkcmu_m2m_noc",
+ "mout_clkcmu_m2m_noc", CLK_CON_DIV_CLKCMU_M2M_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_M2M_JPEG, "dout_clkcmu_m2m_jpeg",
+ "mout_clkcmu_m2m_jpeg", CLK_CON_DIV_CLKCMU_M2M_JPEG, 0, 4),
+
+ /* MFC */
+ DIV(DOUT_CLKCMU_MFC_MFC, "dout_clkcmu_mfc_mfc",
+ "mout_clkcmu_mfc_mfc", CLK_CON_DIV_CLKCMU_MFC_MFC, 0, 4),
+ DIV(DOUT_CLKCMU_MFC_WFD, "dout_clkcmu_mfc_wfd",
+ "mout_clkcmu_mfc_wfd", CLK_CON_DIV_CLKCMU_MFC_WFD, 0, 4),
+
+ /* MFD */
+ DIV(DOUT_CLKCMU_MFD_NOC, "dout_clkcmu_mfd_noc",
+ "mout_clkcmu_mfd_noc", CLK_CON_DIV_CLKCMU_MFD_NOC, 0, 4),
+
+ /* MIF */
+ DIV(DOUT_CLKCMU_MIF_NOCP, "dout_clkcmu_mif_nocp",
+ "mout_clkcmu_mif_nocp", CLK_CON_DIV_CLKCMU_MIF_NOCP, 0, 4),
+
+ /* MISC */
+ DIV(DOUT_CLKCMU_MISC_NOC, "dout_clkcmu_misc_noc",
+ "mout_clkcmu_misc_noc", CLK_CON_DIV_CLKCMU_MISC_NOC, 0, 4),
+
+ /* NOCL0 */
+ DIV(DOUT_CLKCMU_NOCL0_NOC, "dout_clkcmu_nocl0_noc",
+ "mout_clkcmu_nocl0_noc", CLK_CON_DIV_CLKCMU_NOCL0_NOC, 0, 4),
+
+ /* NOCL1 */
+ DIV(DOUT_CLKCMU_NOCL1_NOC, "dout_clkcmu_nocl1_noc",
+ "mout_clkcmu_nocl1_noc", CLK_CON_DIV_CLKCMU_NOCL1_NOC, 0, 4),
+
+ /* NOCL2 */
+ DIV(DOUT_CLKCMU_NOCL2_NOC, "dout_clkcmu_nocl2_noc",
+ "mout_clkcmu_nocl2_noc", CLK_CON_DIV_CLKCMU_NOCL2_NOC, 0, 4),
+
+ /* PERIC0 */
+ DIV(DOUT_CLKCMU_PERIC0_NOC, "dout_clkcmu_peric0_noc",
+ "mout_clkcmu_peric0_noc", CLK_CON_DIV_CLKCMU_PERIC0_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_PERIC0_IP, "dout_clkcmu_peric0_ip",
+ "mout_clkcmu_peric0_ip", CLK_CON_DIV_CLKCMU_PERIC0_IP, 0, 4),
+
+ /* PERIC1 */
+ DIV(DOUT_CLKCMU_PERIC1_NOC, "dout_clkcmu_peric1_noc",
+ "mout_clkcmu_peric1_noc", CLK_CON_DIV_CLKCMU_PERIC1_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_PERIC1_IP, "dout_clkcmu_peric1_ip",
+ "mout_clkcmu_peric1_ip", CLK_CON_DIV_CLKCMU_PERIC1_IP, 0, 4),
+
+ /* SDMA */
+ DIV(DOUT_CLKCMU_SDMA_NOC, "dout_clkcmu_sdma_noc",
+ "mout_clkcmu_sdma_noc", CLK_CON_DIV_CLKCMU_SDMA_NOC, 0, 4),
+
+ /* SNW */
+ DIV(DOUT_CLKCMU_SNW_NOC, "dout_clkcmu_snw_noc",
+ "mout_clkcmu_snw_noc", CLK_CON_DIV_CLKCMU_SNW_NOC, 0, 4),
+
+ /* SSP */
+ DIV(DOUT_CLKCMU_SSP_NOC, "dout_clkcmu_ssp_noc",
+ "mout_clkcmu_ssp_noc", CLK_CON_DIV_CLKCMU_SSP_NOC, 0, 4),
+
+ /* TAA */
+ DIV(DOUT_CLKCMU_TAA_NOC, "dout_clkcmu_taa_noc",
+ "mout_clkcmu_taa_noc", CLK_CON_DIV_CLKCMU_TAA_NOC, 0, 4),
+};
+
+static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initconst = {
+ FFACTOR(DOUT_SHARED0_DIV1, "dout_shared0_div1",
+ "mout_shared0_pll", 1, 1, 0),
+ FFACTOR(DOUT_SHARED0_DIV2, "dout_shared0_div2",
+ "mout_shared0_pll", 1, 2, 0),
+ FFACTOR(DOUT_SHARED0_DIV3, "dout_shared0_div3",
+ "mout_shared0_pll", 1, 3, 0),
+ FFACTOR(DOUT_SHARED0_DIV4, "dout_shared0_div4",
+ "mout_shared0_pll", 1, 4, 0),
+ FFACTOR(DOUT_SHARED1_DIV1, "dout_shared1_div1",
+ "mout_shared1_pll", 1, 1, 0),
+ FFACTOR(DOUT_SHARED1_DIV2, "dout_shared1_div2",
+ "mout_shared1_pll", 1, 2, 0),
+ FFACTOR(DOUT_SHARED1_DIV3, "dout_shared1_div3",
+ "mout_shared1_pll", 1, 3, 0),
+ FFACTOR(DOUT_SHARED1_DIV4, "dout_shared1_div4",
+ "mout_shared1_pll", 1, 4, 0),
+ FFACTOR(DOUT_SHARED2_DIV1, "dout_shared2_div1",
+ "mout_shared2_pll", 1, 1, 0),
+ FFACTOR(DOUT_SHARED2_DIV2, "dout_shared2_div2",
+ "mout_shared2_pll", 1, 2, 0),
+ FFACTOR(DOUT_SHARED2_DIV3, "dout_shared2_div3",
+ "mout_shared2_pll", 1, 3, 0),
+ FFACTOR(DOUT_SHARED2_DIV4, "dout_shared2_div4",
+ "mout_shared2_pll", 1, 4, 0),
+ FFACTOR(DOUT_SHARED3_DIV1, "dout_shared3_div1",
+ "mout_shared3_pll", 1, 1, 0),
+ FFACTOR(DOUT_SHARED3_DIV2, "dout_shared3_div2",
+ "mout_shared3_pll", 1, 2, 0),
+ FFACTOR(DOUT_SHARED3_DIV3, "dout_shared3_div3",
+ "mout_shared3_pll", 1, 3, 0),
+ FFACTOR(DOUT_SHARED3_DIV4, "dout_shared3_div4",
+ "mout_shared3_pll", 1, 4, 0),
+ FFACTOR(DOUT_SHARED4_DIV1, "dout_shared4_div1",
+ "mout_shared4_pll", 1, 1, 0),
+ FFACTOR(DOUT_SHARED4_DIV2, "dout_shared4_div2",
+ "mout_shared4_pll", 1, 2, 0),
+ FFACTOR(DOUT_SHARED4_DIV3, "dout_shared4_div3",
+ "mout_shared4_pll", 1, 3, 0),
+ FFACTOR(DOUT_SHARED4_DIV4, "dout_shared4_div4",
+ "mout_shared4_pll", 1, 4, 0),
+ FFACTOR(DOUT_SHARED5_DIV1, "dout_shared5_div1",
+ "mout_shared5_pll", 1, 1, 0),
+ FFACTOR(DOUT_SHARED5_DIV2, "dout_shared5_div2",
+ "mout_shared5_pll", 1, 2, 0),
+ FFACTOR(DOUT_SHARED5_DIV3, "dout_shared5_div3",
+ "mout_shared5_pll", 1, 3, 0),
+ FFACTOR(DOUT_SHARED5_DIV4, "dout_shared5_div4",
+ "mout_shared5_pll", 1, 4, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .fixed_factor_clks = top_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(top_fixed_factor_clks),
+ .nr_clk_ids = CLKS_NR_TOP,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynosautov920_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynosautov920_cmu_top, "samsung,exynosautov920-cmu-top",
+ exynosautov920_cmu_top_init);
+
+/* ---- CMU_PERIC0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC0 (0x10800000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER 0x0610
+#define CLK_CON_MUX_MUX_CLK_PERIC0_I3C 0x1000
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI 0x1004
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI 0x1008
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI 0x100c
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI 0x1010
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI 0x1014
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI 0x1018
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI06_USI 0x101c
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI07_USI 0x1020
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI08_USI 0x1024
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C 0x1028
+#define CLK_CON_DIV_DIV_CLK_PERIC0_I3C 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI06_USI 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI07_USI 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI08_USI 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C 0x1828
+
+static const unsigned long peric0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_PERIC0_I3C,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI06_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI07_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI08_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC0_I3C,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI06_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI07_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI08_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIC0 */
+PNAME(mout_peric0_ip_user_p) = { "oscclk", "dout_clkcmu_peric0_ip" };
+PNAME(mout_peric0_noc_user_p) = { "oscclk", "dout_clkcmu_peric0_noc" };
+PNAME(mout_peric0_usi_p) = { "oscclk", "mout_peric0_ip_user" };
+
+static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC0_IP_USER, "mout_peric0_ip_user",
+ mout_peric0_ip_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_NOC_USER, "mout_peric0_noc_user",
+ mout_peric0_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER, 4, 1),
+ /* USI00 ~ USI08 */
+ MUX(CLK_MOUT_PERIC0_USI00_USI, "mout_peric0_usi00_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI01_USI, "mout_peric0_usi01_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI02_USI, "mout_peric0_usi02_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI03_USI, "mout_peric0_usi03_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI04_USI, "mout_peric0_usi04_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI05_USI, "mout_peric0_usi05_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI06_USI, "mout_peric0_usi06_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI06_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI07_USI, "mout_peric0_usi07_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI07_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI08_USI, "mout_peric0_usi08_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI08_USI, 0, 1),
+ /* USI_I2C */
+ MUX(CLK_MOUT_PERIC0_USI_I2C, "mout_peric0_usi_i2c",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C, 0, 1),
+ /* USI_I3C */
+ MUX(CLK_MOUT_PERIC0_I3C, "mout_peric0_i3c",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_I3C, 0, 1),
+};
+
+static const struct samsung_div_clock peric0_div_clks[] __initconst = {
+ /* USI00 ~ USI08 */
+ DIV(CLK_DOUT_PERIC0_USI00_USI, "dout_peric0_usi00_usi",
+ "mout_peric0_usi00_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI01_USI, "dout_peric0_usi01_usi",
+ "mout_peric0_usi01_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI02_USI, "dout_peric0_usi02_usi",
+ "mout_peric0_usi02_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI03_USI, "dout_peric0_usi03_usi",
+ "mout_peric0_usi03_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI04_USI, "dout_peric0_usi04_usi",
+ "mout_peric0_usi04_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI05_USI, "dout_peric0_usi05_usi",
+ "mout_peric0_usi05_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI06_USI, "dout_peric0_usi06_usi",
+ "mout_peric0_usi06_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI06_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI07_USI, "dout_peric0_usi07_usi",
+ "mout_peric0_usi07_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI07_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI08_USI, "dout_peric0_usi08_usi",
+ "mout_peric0_usi08_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI08_USI,
+ 0, 4),
+ /* USI_I2C */
+ DIV(CLK_DOUT_PERIC0_USI_I2C, "dout_peric0_usi_i2c",
+ "mout_peric0_usi_i2c", CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C, 0, 4),
+ /* USI_I3C */
+ DIV(CLK_DOUT_PERIC0_I3C, "dout_peric0_i3c",
+ "mout_peric0_i3c", CLK_CON_DIV_DIV_CLK_PERIC0_I3C, 0, 4),
+};
+
+static const struct samsung_cmu_info peric0_cmu_info __initconst = {
+ .mux_clks = peric0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
+ .div_clks = peric0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric0_div_clks),
+ .nr_clk_ids = CLKS_NR_PERIC0,
+ .clk_regs = peric0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
+ .clk_name = "noc",
+};
+
+static int __init exynosautov920_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynosautov920_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynosautov920-cmu-peric0",
+ .data = &peric0_cmu_info,
+ },
+};
+
+static struct platform_driver exynosautov920_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynosautov920-cmu",
+ .of_match_table = exynosautov920_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynosautov920_cmu_probe,
+};
+
+static int __init exynosautov920_cmu_init(void)
+{
+ return platform_driver_register(&exynosautov920_cmu_driver);
+}
+core_initcall(exynosautov920_cmu_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 4be879ab917e..cca3e630922c 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -430,6 +430,9 @@ static const struct clk_ops samsung_pll36xx_clk_min_ops = {
#define PLL0822X_LOCK_STAT_SHIFT (29)
#define PLL0822X_ENABLE_SHIFT (31)
+/* PLL1418x is similar to PLL0822x, except that MDIV is one bit smaller */
+#define PLL1418X_MDIV_MASK (0x1FF)
+
static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -438,7 +441,10 @@ static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
u64 fvco = parent_rate;
pll_con3 = readl_relaxed(pll->con_reg);
- mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL0822X_MDIV_MASK;
+ if (pll->type != pll_1418x)
+ mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL0822X_MDIV_MASK;
+ else
+ mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL1418X_MDIV_MASK;
pdiv = (pll_con3 >> PLL0822X_PDIV_SHIFT) & PLL0822X_PDIV_MASK;
sdiv = (pll_con3 >> PLL0822X_SDIV_SHIFT) & PLL0822X_SDIV_MASK;
@@ -456,7 +462,12 @@ static int samsung_pll0822x_set_rate(struct clk_hw *hw, unsigned long drate,
{
const struct samsung_pll_rate_table *rate;
struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 pll_con3;
+ u32 mdiv_mask, pll_con3;
+
+ if (pll->type != pll_1418x)
+ mdiv_mask = PLL0822X_MDIV_MASK;
+ else
+ mdiv_mask = PLL1418X_MDIV_MASK;
/* Get required rate settings from table */
rate = samsung_get_pll_settings(pll, drate);
@@ -468,7 +479,7 @@ static int samsung_pll0822x_set_rate(struct clk_hw *hw, unsigned long drate,
/* Change PLL PMS values */
pll_con3 = readl_relaxed(pll->con_reg);
- pll_con3 &= ~((PLL0822X_MDIV_MASK << PLL0822X_MDIV_SHIFT) |
+ pll_con3 &= ~((mdiv_mask << PLL0822X_MDIV_SHIFT) |
(PLL0822X_PDIV_MASK << PLL0822X_PDIV_SHIFT) |
(PLL0822X_SDIV_MASK << PLL0822X_SDIV_SHIFT));
pll_con3 |= (rate->mdiv << PLL0822X_MDIV_SHIFT) |
@@ -1261,6 +1272,47 @@ static const struct clk_ops samsung_pll2650xx_clk_min_ops = {
.recalc_rate = samsung_pll2650xx_recalc_rate,
};
+/*
+ * PLL531X Clock Type
+ */
+/* Maximum lock time can be 500 * PDIV cycles */
+#define PLL531X_LOCK_FACTOR (500)
+#define PLL531X_MDIV_MASK (0x3FF)
+#define PLL531X_PDIV_MASK (0x3F)
+#define PLL531X_SDIV_MASK (0x7)
+#define PLL531X_FDIV_MASK (0xFFFFFFFF)
+#define PLL531X_MDIV_SHIFT (16)
+#define PLL531X_PDIV_SHIFT (8)
+#define PLL531X_SDIV_SHIFT (0)
+
+static unsigned long samsung_pll531x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 pdiv, sdiv, fdiv, pll_con0, pll_con8;
+ u64 mdiv, fout = parent_rate;
+
+ pll_con0 = readl_relaxed(pll->con_reg);
+ pll_con8 = readl_relaxed(pll->con_reg + 20);
+ mdiv = (pll_con0 >> PLL531X_MDIV_SHIFT) & PLL531X_MDIV_MASK;
+ pdiv = (pll_con0 >> PLL531X_PDIV_SHIFT) & PLL531X_PDIV_MASK;
+ sdiv = (pll_con0 >> PLL531X_SDIV_SHIFT) & PLL531X_SDIV_MASK;
+ fdiv = (pll_con8 & PLL531X_FDIV_MASK);
+
+ if (fdiv >> 31)
+ mdiv--;
+
+ fout *= (mdiv << 24) + (fdiv >> 8);
+ do_div(fout, (pdiv << sdiv));
+ fout >>= 24;
+
+ return (unsigned long)fout;
+}
+
+static const struct clk_ops samsung_pll531x_clk_ops = {
+ .recalc_rate = samsung_pll531x_recalc_rate,
+};
+
static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
const struct samsung_pll_clock *pll_clk)
{
@@ -1317,6 +1369,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
init.ops = &samsung_pll35xx_clk_ops;
break;
case pll_1417x:
+ case pll_1418x:
case pll_0818x:
case pll_0822x:
case pll_0516x:
@@ -1394,6 +1447,9 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
else
init.ops = &samsung_pll2650xx_clk_ops;
break;
+ case pll_531x:
+ init.ops = &samsung_pll531x_clk_ops;
+ break;
default:
pr_warn("%s: Unknown pll type for pll clk %s\n",
__func__, pll_clk->name);
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index ffd3d52c0dec..3481941ba07a 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -30,6 +30,7 @@ enum samsung_pll_type {
pll_2650x,
pll_2650xx,
pll_1417x,
+ pll_1418x,
pll_1450x,
pll_1451x,
pll_1452x,
@@ -41,6 +42,7 @@ enum samsung_pll_type {
pll_0516x,
pll_0517x,
pll_0518x,
+ pll_531x,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-isp.c b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
index d3c85421f948..8c4c3a958a9f 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-isp.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
@@ -216,7 +216,7 @@ MODULE_DEVICE_TABLE(of, jh7110_ispcrg_match);
static struct platform_driver jh7110_ispcrg_driver = {
.probe = jh7110_ispcrg_probe,
- .remove_new = jh7110_ispcrg_remove,
+ .remove = jh7110_ispcrg_remove,
.driver = {
.name = "clk-starfive-jh7110-isp",
.of_match_table = jh7110_ispcrg_match,
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-vout.c b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
index 53f7af234cc2..04eeed199087 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-vout.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
@@ -145,7 +145,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev)
/* enable power domain and clocks */
pm_runtime_enable(priv->dev);
- ret = pm_runtime_get_sync(priv->dev);
+ ret = pm_runtime_resume_and_get(priv->dev);
if (ret < 0)
return dev_err_probe(priv->dev, ret, "failed to turn on power\n");
@@ -223,7 +223,7 @@ MODULE_DEVICE_TABLE(of, jh7110_voutcrg_match);
static struct platform_driver jh7110_voutcrg_driver = {
.probe = jh7110_voutcrg_probe,
- .remove_new = jh7110_voutcrg_remove,
+ .remove = jh7110_voutcrg_remove,
.driver = {
.name = "clk-starfive-jh7110-vout",
.of_match_table = jh7110_voutcrg_match,
diff --git a/drivers/clk/stm32/clk-stm32mp1.c b/drivers/clk/stm32/clk-stm32mp1.c
index 7e2337297402..5fcc4c77c11f 100644
--- a/drivers/clk/stm32/clk-stm32mp1.c
+++ b/drivers/clk/stm32/clk-stm32mp1.c
@@ -2354,7 +2354,7 @@ static struct platform_driver stm32mp1_rcc_clocks_driver = {
.of_match_table = stm32mp1_match_data,
},
.probe = stm32mp1_rcc_clocks_probe,
- .remove_new = stm32mp1_rcc_clocks_remove,
+ .remove = stm32mp1_rcc_clocks_remove,
};
static int __init stm32mp1_clocks_init(void)
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index a9be4b56b2b7..0251618b82c8 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -635,7 +635,7 @@ static const struct dev_pm_ops tegra124_dfll_pm_ops = {
static struct platform_driver tegra124_dfll_fcpu_driver = {
.probe = tegra124_dfll_fcpu_probe,
- .remove_new = tegra124_dfll_fcpu_remove,
+ .remove = tegra124_dfll_fcpu_remove,
.driver = {
.name = "tegra124-dfll",
.of_match_table = tegra124_dfll_fcpu_of_match,
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index 6121020b4b38..e305fcbac647 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -934,7 +934,7 @@ static struct platform_driver ti_adpll_driver = {
.of_match_table = ti_adpll_match,
},
.probe = ti_adpll_probe,
- .remove_new = ti_adpll_remove,
+ .remove = ti_adpll_remove,
};
static int __init ti_adpll_init(void)
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index d964e3affd42..0eab7f3e2eab 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -240,6 +240,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
}
clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(clk)) {
pr_err("%s: failed to get atl clock %d from provider\n",
__func__, i);
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index 45adac1b4630..033d4f78edc8 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -110,7 +110,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
init.parent_names = parent_names;
init.num_parents = num;
- deprecated = !of_find_property(node, "assigned-clock-parents", NULL);
+ deprecated = !of_property_present(node, "assigned-clock-parents");
for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c
index e9cd80e085dc..3f929cf8dd2f 100644
--- a/drivers/clk/visconti/pll.c
+++ b/drivers/clk/visconti/pll.c
@@ -262,9 +262,9 @@ static struct clk_hw *visconti_register_pll(struct visconti_pll_provider *ctx,
for (len = 0; rate_table[len].rate != 0; )
len++;
pll->rate_count = len;
- pll->rate_table = kmemdup(rate_table,
- pll->rate_count * sizeof(struct visconti_pll_rate_table),
- GFP_KERNEL);
+ pll->rate_table = kmemdup_array(rate_table,
+ pll->rate_count, sizeof(*pll->rate_table),
+ GFP_KERNEL);
WARN(!pll->rate_table, "%s: could not allocate rate table for %s\n", __func__, name);
init.ops = &visconti_pll_ops;
diff --git a/drivers/clk/x86/clk-fch.c b/drivers/clk/x86/clk-fch.c
index aed7d22fae63..cf5cd3ad4647 100644
--- a/drivers/clk/x86/clk-fch.c
+++ b/drivers/clk/x86/clk-fch.c
@@ -115,6 +115,6 @@ static struct platform_driver fch_clk_driver = {
.suppress_bind_attrs = true,
},
.probe = fch_clk_probe,
- .remove_new = fch_clk_remove,
+ .remove = fch_clk_remove,
};
builtin_platform_driver(fch_clk_driver);
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index 5ec9255e33fa..99291ba65da7 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -373,6 +373,6 @@ static struct platform_driver plt_clk_driver = {
.name = "clk-pmc-atom",
},
.probe = plt_clk_probe,
- .remove_new = plt_clk_remove,
+ .remove = plt_clk_remove,
};
builtin_platform_driver(plt_clk_driver);
diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index 19eb3fb7ae31..7a0269bdfbb3 100644
--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -1257,7 +1257,7 @@ static struct platform_driver clk_wzrd_driver = {
.pm = &clk_wzrd_dev_pm_ops,
},
.probe = clk_wzrd_probe,
- .remove_new = clk_wzrd_remove,
+ .remove = clk_wzrd_remove,
};
module_platform_driver(clk_wzrd_driver);
diff --git a/drivers/clk/xilinx/xlnx_vcu.c b/drivers/clk/xilinx/xlnx_vcu.c
index d983fab12756..81501b48412e 100644
--- a/drivers/clk/xilinx/xlnx_vcu.c
+++ b/drivers/clk/xilinx/xlnx_vcu.c
@@ -729,7 +729,7 @@ static struct platform_driver xvcu_driver = {
.of_match_table = xvcu_of_id_table,
},
.probe = xvcu_probe,
- .remove_new = xvcu_remove,
+ .remove = xvcu_remove,
};
module_platform_driver(xvcu_driver);
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 82338773602c..b4330a01a566 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -25,6 +25,10 @@
#include <asm/io.h>
#include <asm/time.h>
+static void *suspend_resume_cb_data;
+
+static void (*suspend_resume_callback)(void *data, bool suspend);
+
/*
* The I/O port the PMTMR resides at.
* The location is detected during setup_arch(),
@@ -58,6 +62,32 @@ u32 acpi_pm_read_verified(void)
return v2;
}
+void acpi_pmtmr_register_suspend_resume_callback(void (*cb)(void *data, bool suspend), void *data)
+{
+ suspend_resume_callback = cb;
+ suspend_resume_cb_data = data;
+}
+EXPORT_SYMBOL_GPL(acpi_pmtmr_register_suspend_resume_callback);
+
+void acpi_pmtmr_unregister_suspend_resume_callback(void)
+{
+ suspend_resume_callback = NULL;
+ suspend_resume_cb_data = NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_pmtmr_unregister_suspend_resume_callback);
+
+static void acpi_pm_suspend(struct clocksource *cs)
+{
+ if (suspend_resume_callback)
+ suspend_resume_callback(suspend_resume_cb_data, true);
+}
+
+static void acpi_pm_resume(struct clocksource *cs)
+{
+ if (suspend_resume_callback)
+ suspend_resume_callback(suspend_resume_cb_data, false);
+}
+
static u64 acpi_pm_read(struct clocksource *cs)
{
return (u64)read_pmtmr();
@@ -69,6 +99,8 @@ static struct clocksource clocksource_acpi_pm = {
.read = acpi_pm_read,
.mask = (u64)ACPI_PM_MASK,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .suspend = acpi_pm_suspend,
+ .resume = acpi_pm_resume,
};
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index aeafc74181f0..03733101e231 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1594,7 +1594,6 @@ static int __init arch_timer_mem_of_init(struct device_node *np)
{
struct arch_timer_mem *timer_mem;
struct arch_timer_mem_frame *frame;
- struct device_node *frame_node;
struct resource res;
int ret = -EINVAL;
u32 rate;
@@ -1608,33 +1607,29 @@ static int __init arch_timer_mem_of_init(struct device_node *np)
timer_mem->cntctlbase = res.start;
timer_mem->size = resource_size(&res);
- for_each_available_child_of_node(np, frame_node) {
+ for_each_available_child_of_node_scoped(np, frame_node) {
u32 n;
struct arch_timer_mem_frame *frame;
if (of_property_read_u32(frame_node, "frame-number", &n)) {
pr_err(FW_BUG "Missing frame-number.\n");
- of_node_put(frame_node);
goto out;
}
if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
ARCH_TIMER_MEM_MAX_FRAMES - 1);
- of_node_put(frame_node);
goto out;
}
frame = &timer_mem->frame[n];
if (frame->valid) {
pr_err(FW_BUG "Duplicated frame-number.\n");
- of_node_put(frame_node);
goto out;
}
- if (of_address_to_resource(frame_node, 0, &res)) {
- of_node_put(frame_node);
+ if (of_address_to_resource(frame_node, 0, &res))
goto out;
- }
+
frame->cntbase = res.start;
frame->size = resource_size(&res);
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 5b39d3701fa3..8f97ab0b01ec 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -210,6 +210,7 @@ static int __init asm9260_timer_init(struct device_node *np)
DRIVER_NAME, &event_dev);
if (ret) {
pr_err("Failed to setup irq!\n");
+ clk_disable_unprepare(clk);
return ret;
}
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index b2a080647e41..99177835cade 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -137,7 +137,21 @@ static int hv_stimer_init(unsigned int cpu)
ce->name = "Hyper-V clockevent";
ce->features = CLOCK_EVT_FEAT_ONESHOT;
ce->cpumask = cpumask_of(cpu);
- ce->rating = 1000;
+
+ /*
+ * Lower the rating of the Hyper-V timer in a TDX VM without paravisor,
+ * so the local APIC timer (lapic_clockevent) is the default timer in
+ * such a VM. The Hyper-V timer is not preferred in such a VM because
+ * it depends on the slow VM Reference Counter MSR (the Hyper-V TSC
+ * page is not enbled in such a VM because the VM uses Invariant TSC
+ * as a better clocksource and it's challenging to mark the Hyper-V
+ * TSC page shared in very early boot).
+ */
+ if (!ms_hyperv.paravisor_present && hv_isolation_type_tdx())
+ ce->rating = 90;
+ else
+ ce->rating = 1000;
+
ce->set_state_shutdown = hv_ce_shutdown;
ce->set_state_oneshot = hv_ce_set_oneshot;
ce->set_next_event = hv_ce_set_next_event;
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index d4350bb10b83..39f7c2d736d1 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -20,13 +20,6 @@
DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
-/*
- * Handle PIT quirk in pit_shutdown() where zeroing the counter register
- * restarts the PIT, negating the shutdown. On platforms with the quirk,
- * platform specific code can set this to false.
- */
-bool i8253_clear_counter_on_shutdown __ro_after_init = true;
-
#ifdef CONFIG_CLKSRC_I8253
/*
* Since the PIT overflows every tick, its not very useful
@@ -108,21 +101,47 @@ int __init clocksource_i8253_init(void)
#endif
#ifdef CONFIG_CLKEVT_I8253
-static int pit_shutdown(struct clock_event_device *evt)
+void clockevent_i8253_disable(void)
{
- if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt))
- return 0;
-
raw_spin_lock(&i8253_lock);
+ /*
+ * Writing the MODE register should stop the counter, according to
+ * the datasheet. This appears to work on real hardware (well, on
+ * modern Intel and AMD boxes; I didn't dig the Pegasos out of the
+ * shed).
+ *
+ * However, some virtual implementations differ, and the MODE change
+ * doesn't have any effect until either the counter is written (KVM
+ * in-kernel PIT) or the next interrupt (QEMU). And in those cases,
+ * it may not stop the *count*, only the interrupts. Although in
+ * the virt case, that probably doesn't matter, as the value of the
+ * counter will only be calculated on demand if the guest reads it;
+ * it's the interrupts which cause steal time.
+ *
+ * Hyper-V apparently has a bug where even in mode 0, the IRQ keeps
+ * firing repeatedly if the counter is running. But it *does* do the
+ * right thing when the MODE register is written.
+ *
+ * So: write the MODE and then load the counter, which ensures that
+ * the IRQ is stopped on those buggy virt implementations. And then
+ * write the MODE again, which is the right way to stop it.
+ */
outb_p(0x30, PIT_MODE);
+ outb_p(0, PIT_CH0);
+ outb_p(0, PIT_CH0);
- if (i8253_clear_counter_on_shutdown) {
- outb_p(0, PIT_CH0);
- outb_p(0, PIT_CH0);
- }
+ outb_p(0x30, PIT_MODE);
raw_spin_unlock(&i8253_lock);
+}
+
+static int pit_shutdown(struct clock_event_device *evt)
+{
+ if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt))
+ return 0;
+
+ clockevent_i8253_disable();
return 0;
}
diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c
index 9f7c280a1336..e0ec33307c84 100644
--- a/drivers/clocksource/ingenic-ost.c
+++ b/drivers/clocksource/ingenic-ost.c
@@ -93,14 +93,10 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
return PTR_ERR(map);
}
- ost->clk = devm_clk_get(dev, "ost");
+ ost->clk = devm_clk_get_enabled(dev, "ost");
if (IS_ERR(ost->clk))
return PTR_ERR(ost->clk);
- err = clk_prepare_enable(ost->clk);
- if (err)
- return err;
-
/* Clear counter high/low registers */
if (soc_info->is64bit)
regmap_write(map, TCU_REG_OST_CNTL, 0);
@@ -129,7 +125,6 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
err = clocksource_register_hz(cs, rate);
if (err) {
dev_err(dev, "clocksource registration failed");
- clk_disable_unprepare(ost->clk);
return err;
}
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
index a4a991101fa3..a3fe98cd3838 100644
--- a/drivers/clocksource/jcore-pit.c
+++ b/drivers/clocksource/jcore-pit.c
@@ -120,7 +120,7 @@ static int jcore_pit_local_init(unsigned cpu)
static irqreturn_t jcore_timer_interrupt(int irq, void *dev_id)
{
- struct jcore_pit *pit = this_cpu_ptr(dev_id);
+ struct jcore_pit *pit = dev_id;
if (clockevent_state_oneshot(&pit->ced))
jcore_pit_disable(pit);
@@ -168,9 +168,8 @@ static int __init jcore_pit_init(struct device_node *node)
return -ENOMEM;
}
- err = request_irq(pit_irq, jcore_timer_interrupt,
- IRQF_TIMER | IRQF_PERCPU,
- "jcore_pit", jcore_pit_percpu);
+ err = request_percpu_irq(pit_irq, jcore_timer_interrupt,
+ "jcore_pit", jcore_pit_percpu);
if (err) {
pr_err("pit irq request failed: %d\n", err);
free_percpu(jcore_pit_percpu);
diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
index ca7a06489c40..b8a1cf59b9d6 100644
--- a/drivers/clocksource/timer-cadence-ttc.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -435,7 +435,7 @@ static int __init ttc_setup_clockevent(struct clk *clk,
&ttcce->ttc.clk_rate_change_nb);
if (err) {
pr_warn("Unable to register clock notifier.\n");
- goto out_kfree;
+ goto out_clk_unprepare;
}
ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
@@ -465,13 +465,15 @@ static int __init ttc_setup_clockevent(struct clk *clk,
err = request_irq(irq, ttc_clock_event_interrupt,
IRQF_TIMER, ttcce->ce.name, ttcce);
if (err)
- goto out_kfree;
+ goto out_clk_unprepare;
clockevents_config_and_register(&ttcce->ce,
ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
return 0;
+out_clk_unprepare:
+ clk_disable_unprepare(ttcce->ttc.clk);
out_kfree:
kfree(ttcce);
return err;
diff --git a/drivers/clocksource/timer-qcom.c b/drivers/clocksource/timer-qcom.c
index b4afe3a67583..eac4c95c6127 100644
--- a/drivers/clocksource/timer-qcom.c
+++ b/drivers/clocksource/timer-qcom.c
@@ -233,6 +233,7 @@ static int __init msm_dt_timer_init(struct device_node *np)
}
if (of_property_read_u32(np, "clock-frequency", &freq)) {
+ iounmap(cpu0_base);
pr_err("Unknown frequency\n");
return -EINVAL;
}
@@ -243,7 +244,11 @@ static int __init msm_dt_timer_init(struct device_node *np)
freq /= 4;
writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
- return msm_timer_init(freq, 32, irq, !!percpu_offset);
+ ret = msm_timer_init(freq, 32, irq, !!percpu_offset);
+ if (ret)
+ iounmap(cpu0_base);
+
+ return ret;
}
TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 10cda6f2fe1d..2561b215432a 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -231,9 +231,7 @@ if X86
source "drivers/cpufreq/Kconfig.x86"
endif
-if ARM || ARM64
source "drivers/cpufreq/Kconfig.arm"
-endif
if PPC32 || PPC64
source "drivers/cpufreq/Kconfig.powerpc"
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 96b404ce829f..5f7e13e60c80 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -5,7 +5,7 @@
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
depends on NVMEM_SUNXI_SID
select PM_OPP
help
@@ -26,15 +26,17 @@ config ARM_APPLE_SOC_CPUFREQ
config ARM_ARMADA_37XX_CPUFREQ
tristate "Armada 37xx CPUFreq support"
- depends on ARCH_MVEBU && CPUFREQ_DT
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on CPUFREQ_DT
help
This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
The Armada 37xx PMU supports 4 frequency and VDD levels.
config ARM_ARMADA_8K_CPUFREQ
tristate "Armada 8K CPUFreq driver"
- depends on ARCH_MVEBU && CPUFREQ_DT
- select ARMADA_AP_CPU_CLK
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on CPUFREQ_DT
+ select ARMADA_AP_CPU_CLK if COMMON_CLK
help
This enables the CPUFreq driver support for Marvell
Armada8k SOCs.
@@ -56,7 +58,7 @@ config ARM_SCPI_CPUFREQ
config ARM_VEXPRESS_SPC_CPUFREQ
tristate "Versatile Express SPC based CPUfreq driver"
depends on ARM_CPU_TOPOLOGY && HAVE_CLK
- depends on ARCH_VEXPRESS_SPC
+ depends on ARCH_VEXPRESS_SPC || COMPILE_TEST
select PM_OPP
help
This add the CPUfreq driver support for Versatile Express
@@ -75,7 +77,8 @@ config ARM_BRCMSTB_AVS_CPUFREQ
config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
- depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
+ depends on ARCH_HIGHBANK || COMPILE_TEST
+ depends on CPUFREQ_DT && REGULATOR && PL320_MBOX
default m
help
This adds the CPUFreq driver for Calxeda Highbank SoC
@@ -96,7 +99,8 @@ config ARM_IMX6Q_CPUFREQ
config ARM_IMX_CPUFREQ_DT
tristate "Freescale i.MX8M cpufreq support"
- depends on ARCH_MXC && CPUFREQ_DT
+ depends on CPUFREQ_DT
+ depends on ARCH_MXC || COMPILE_TEST
help
This adds cpufreq driver support for Freescale i.MX7/i.MX8M
series SoCs, based on cpufreq-dt.
@@ -111,7 +115,8 @@ config ARM_KIRKWOOD_CPUFREQ
config ARM_MEDIATEK_CPUFREQ
tristate "CPU Frequency scaling support for MediaTek SoCs"
- depends on ARCH_MEDIATEK && REGULATOR
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on REGULATOR
select PM_OPP
help
This adds the CPUFreq driver support for MediaTek SoCs.
@@ -130,12 +135,12 @@ config ARM_MEDIATEK_CPUFREQ_HW
config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
- depends on ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
default ARCH_OMAP2PLUS
config ARM_QCOM_CPUFREQ_NVMEM
tristate "Qualcomm nvmem based CPUFreq"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
depends on NVMEM_QCOM_QFPROM
depends on QCOM_SMEM
select PM_OPP
@@ -166,7 +171,7 @@ config ARM_RASPBERRYPI_CPUFREQ
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
- depends on CPU_S3C6410
+ depends on CPU_S3C6410 || COMPILE_TEST
default y
help
This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -175,7 +180,7 @@ config ARM_S3C64XX_CPUFREQ
config ARM_S5PV210_CPUFREQ
bool "Samsung S5PV210 and S5PC110"
- depends on CPU_S5PV210
+ depends on CPU_S5PV210 || COMPILE_TEST
default y
help
This adds the CPUFreq driver for Samsung S5PV210 and
@@ -199,14 +204,15 @@ config ARM_SCMI_CPUFREQ
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
- depends on PLAT_SPEAR
+ depends on PLAT_SPEAR || COMPILE_TEST
default y
help
This adds the CPUFreq driver support for SPEAr SOCs.
config ARM_STI_CPUFREQ
tristate "STi CPUFreq support"
- depends on CPUFREQ_DT && SOC_STIH407
+ depends on CPUFREQ_DT
+ depends on SOC_STIH407 || COMPILE_TEST
help
This driver uses the generic OPP framework to match the running
platform with a predefined set of suitable values. If not provided
@@ -216,34 +222,38 @@ config ARM_STI_CPUFREQ
config ARM_TEGRA20_CPUFREQ
tristate "Tegra20/30 CPUFreq support"
- depends on ARCH_TEGRA && CPUFREQ_DT
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on CPUFREQ_DT
default y
help
This adds the CPUFreq driver support for Tegra20/30 SOCs.
config ARM_TEGRA124_CPUFREQ
bool "Tegra124 CPUFreq support"
- depends on ARCH_TEGRA && CPUFREQ_DT
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on CPUFREQ_DT
default y
help
This adds the CPUFreq driver support for Tegra124 SOCs.
config ARM_TEGRA186_CPUFREQ
tristate "Tegra186 CPUFreq support"
- depends on ARCH_TEGRA && TEGRA_BPMP
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on TEGRA_BPMP
help
This adds the CPUFreq driver support for Tegra186 SOCs.
config ARM_TEGRA194_CPUFREQ
tristate "Tegra194 CPUFreq support"
- depends on ARCH_TEGRA_194_SOC && TEGRA_BPMP
+ depends on ARCH_TEGRA_194_SOC || (64BIT && COMPILE_TEST)
+ depends on TEGRA_BPMP
default y
help
This adds CPU frequency driver support for Tegra194 SOCs.
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
- depends on ARCH_OMAP2PLUS || ARCH_K3
+ depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
default y
help
This driver enables valid OPPs on the running platform based on
@@ -255,7 +265,7 @@ config ARM_TI_CPUFREQ
config ARM_PXA2xx_CPUFREQ
tristate "Intel PXA2xx CPUfreq driver"
- depends on PXA27x || PXA25x
+ depends on PXA27x || PXA25x || COMPILE_TEST
help
This add the CPUFreq driver support for Intel PXA2xx SOCs.
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index a8ca625a98b8..0f04feb6cafa 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -642,10 +642,16 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return 0;
}
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
- highest_perf = amd_get_highest_perf();
- else
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ ret = amd_get_boost_ratio_numerator(cpu, &highest_perf);
+ if (ret) {
+ pr_debug("CPU%d: Unable to get boost ratio numerator (%d)\n",
+ cpu, ret);
+ return 0;
+ }
+ } else {
highest_perf = perf_caps.highest_perf;
+ }
nominal_perf = perf_caps.nominal_perf;
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index b7318669485e..f66701514d90 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -54,12 +54,14 @@ static void amd_pstate_ut_acpi_cpc_valid(u32 index);
static void amd_pstate_ut_check_enabled(u32 index);
static void amd_pstate_ut_check_perf(u32 index);
static void amd_pstate_ut_check_freq(u32 index);
+static void amd_pstate_ut_check_driver(u32 index);
static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
{"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid },
{"amd_pstate_ut_check_enabled", amd_pstate_ut_check_enabled },
{"amd_pstate_ut_check_perf", amd_pstate_ut_check_perf },
- {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq }
+ {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq },
+ {"amd_pstate_ut_check_driver", amd_pstate_ut_check_driver }
};
static bool get_shared_mem(void)
@@ -257,6 +259,43 @@ skip_test:
cpufreq_cpu_put(policy);
}
+static int amd_pstate_set_mode(enum amd_pstate_mode mode)
+{
+ const char *mode_str = amd_pstate_get_mode_string(mode);
+
+ pr_debug("->setting mode to %s\n", mode_str);
+
+ return amd_pstate_update_status(mode_str, strlen(mode_str));
+}
+
+static void amd_pstate_ut_check_driver(u32 index)
+{
+ enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE;
+ int ret;
+
+ for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) {
+ ret = amd_pstate_set_mode(mode1);
+ if (ret)
+ goto out;
+ for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) {
+ if (mode1 == mode2)
+ continue;
+ ret = amd_pstate_set_mode(mode2);
+ if (ret)
+ goto out;
+ }
+ }
+out:
+ if (ret)
+ pr_warn("%s: failed to update status for %s->%s: %d\n", __func__,
+ amd_pstate_get_mode_string(mode1),
+ amd_pstate_get_mode_string(mode2), ret);
+
+ amd_pstate_ut_cases[index].result = ret ?
+ AMD_PSTATE_UT_RESULT_FAIL :
+ AMD_PSTATE_UT_RESULT_PASS;
+}
+
static int __init amd_pstate_ut_init(void)
{
u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases);
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 259a917da75f..15e201d5e911 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -52,26 +52,12 @@
#define AMD_PSTATE_TRANSITION_LATENCY 20000
#define AMD_PSTATE_TRANSITION_DELAY 1000
#define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600
-#define CPPC_HIGHEST_PERF_PERFORMANCE 196
-#define CPPC_HIGHEST_PERF_DEFAULT 166
#define AMD_CPPC_EPP_PERFORMANCE 0x00
#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
#define AMD_CPPC_EPP_POWERSAVE 0xFF
-/*
- * enum amd_pstate_mode - driver working mode of amd pstate
- */
-enum amd_pstate_mode {
- AMD_PSTATE_UNDEFINED = 0,
- AMD_PSTATE_DISABLE,
- AMD_PSTATE_PASSIVE,
- AMD_PSTATE_ACTIVE,
- AMD_PSTATE_GUIDED,
- AMD_PSTATE_MAX,
-};
-
static const char * const amd_pstate_mode_string[] = {
[AMD_PSTATE_UNDEFINED] = "undefined",
[AMD_PSTATE_DISABLE] = "disable",
@@ -81,6 +67,14 @@ static const char * const amd_pstate_mode_string[] = {
NULL,
};
+const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode)
+{
+ if (mode < 0 || mode >= AMD_PSTATE_MAX)
+ return NULL;
+ return amd_pstate_mode_string[mode];
+}
+EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string);
+
struct quirk_entry {
u32 nominal_freq;
u32 lowest_freq;
@@ -372,43 +366,17 @@ static inline int amd_pstate_enable(bool enable)
return static_call(amd_pstate_enable)(enable);
}
-static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata)
-{
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- /*
- * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
- * the highest performance level is set to 196.
- * https://bugzilla.kernel.org/show_bug.cgi?id=218759
- */
- if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f))
- return CPPC_HIGHEST_PERF_PERFORMANCE;
-
- return CPPC_HIGHEST_PERF_DEFAULT;
-}
-
static int pstate_init_perf(struct amd_cpudata *cpudata)
{
u64 cap1;
- u32 highest_perf;
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
if (ret)
return ret;
- /* For platforms that do not support the preferred core feature, the
- * highest_pef may be configured with 166 or 255, to avoid max frequency
- * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as
- * the default max perf.
- */
- if (cpudata->hw_prefcore)
- highest_perf = amd_pstate_highest_perf_set(cpudata);
- else
- highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
-
- WRITE_ONCE(cpudata->highest_perf, highest_perf);
- WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
+ WRITE_ONCE(cpudata->highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
+ WRITE_ONCE(cpudata->max_limit_perf, AMD_CPPC_HIGHEST_PERF(cap1));
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
@@ -420,19 +388,13 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
static int cppc_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
- u32 highest_perf;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
- if (cpudata->hw_prefcore)
- highest_perf = amd_pstate_highest_perf_set(cpudata);
- else
- highest_perf = cppc_perf.highest_perf;
-
- WRITE_ONCE(cpudata->highest_perf, highest_perf);
- WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
+ WRITE_ONCE(cpudata->highest_perf, cppc_perf.highest_perf);
+ WRITE_ONCE(cpudata->max_limit_perf, cppc_perf.highest_perf);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
cppc_perf.lowest_nonlinear_perf);
@@ -554,12 +516,15 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
}
if (value == prev)
- return;
+ goto cpufreq_policy_put;
WRITE_ONCE(cpudata->cppc_req_cached, value);
amd_pstate_update_perf(cpudata, min_perf, des_perf,
max_perf, fast_switch);
+
+cpufreq_policy_put:
+ cpufreq_cpu_put(policy);
}
static int amd_pstate_verify(struct cpufreq_policy_data *policy)
@@ -656,7 +621,12 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
unsigned long max_perf, min_perf, des_perf,
cap_perf, lowest_nonlinear_perf;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+
+ if (!policy)
+ return;
+
+ cpudata = policy->driver_data;
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
@@ -803,66 +773,22 @@ static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
}
static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
-/*
- * Get the highest performance register value.
- * @cpu: CPU from which to get highest performance.
- * @highest_perf: Return address.
- *
- * Return: 0 for success, -EIO otherwise.
- */
-static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
-{
- int ret;
-
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- u64 cap1;
-
- ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
- if (ret)
- return ret;
- WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
- } else {
- u64 cppc_highest_perf;
-
- ret = cppc_get_highest_perf(cpu, &cppc_highest_perf);
- if (ret)
- return ret;
- WRITE_ONCE(*highest_perf, cppc_highest_perf);
- }
-
- return (ret);
-}
-
#define CPPC_MAX_PERF U8_MAX
static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
{
- int ret, prio;
- u32 highest_perf;
-
- ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf);
- if (ret)
+ /* user disabled or not detected */
+ if (!amd_pstate_prefcore)
return;
cpudata->hw_prefcore = true;
- /* check if CPPC preferred core feature is enabled*/
- if (highest_perf < CPPC_MAX_PERF)
- prio = (int)highest_perf;
- else {
- pr_debug("AMD CPPC preferred core is unsupported!\n");
- cpudata->hw_prefcore = false;
- return;
- }
-
- if (!amd_pstate_prefcore)
- return;
/*
* The priorities can be set regardless of whether or not
* sched_set_itmt_support(true) has been called and it is valid to
* update them at any time after it has been called.
*/
- sched_set_itmt_core_prio(prio, cpudata->cpu);
+ sched_set_itmt_core_prio((int)READ_ONCE(cpudata->highest_perf), cpudata->cpu);
schedule_work(&sched_prefcore_work);
}
@@ -870,22 +796,27 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
static void amd_pstate_update_limits(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
int ret;
bool highest_perf_changed = false;
- mutex_lock(&amd_pstate_driver_lock);
- if ((!amd_pstate_prefcore) || (!cpudata->hw_prefcore))
- goto free_cpufreq_put;
+ if (!policy)
+ return;
+
+ cpudata = policy->driver_data;
+
+ if (!amd_pstate_prefcore)
+ return;
- ret = amd_pstate_get_highest_perf(cpu, &cur_high);
+ mutex_lock(&amd_pstate_driver_lock);
+ ret = amd_get_highest_perf(cpu, &cur_high);
if (ret)
goto free_cpufreq_put;
prev_high = READ_ONCE(cpudata->prefcore_ranking);
- if (prev_high != cur_high) {
- highest_perf_changed = true;
+ highest_perf_changed = (prev_high != cur_high);
+ if (highest_perf_changed) {
WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
if (cur_high < CPPC_MAX_PERF)
@@ -949,8 +880,8 @@ static u32 amd_pstate_get_transition_latency(unsigned int cpu)
static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
{
int ret;
- u32 min_freq;
- u32 highest_perf, max_freq;
+ u32 min_freq, max_freq;
+ u64 numerator;
u32 nominal_perf, nominal_freq;
u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
u32 boost_ratio, lowest_nonlinear_ratio;
@@ -972,8 +903,10 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
nominal_perf = READ_ONCE(cpudata->nominal_perf);
- highest_perf = READ_ONCE(cpudata->highest_perf);
- boost_ratio = div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
+ boost_ratio = div_u64(numerator << SCHED_CAPACITY_SHIFT, nominal_perf);
max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
@@ -1028,12 +961,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
- amd_pstate_init_prefcore(cpudata);
-
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
+ amd_pstate_init_prefcore(cpudata);
+
ret = amd_pstate_init_freq(cpudata);
if (ret)
goto free_cpudata1;
@@ -1349,7 +1282,7 @@ static ssize_t amd_pstate_show_status(char *buf)
return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
}
-static int amd_pstate_update_status(const char *buf, size_t size)
+int amd_pstate_update_status(const char *buf, size_t size)
{
int mode_idx;
@@ -1366,6 +1299,7 @@ static int amd_pstate_update_status(const char *buf, size_t size)
return 0;
}
+EXPORT_SYMBOL_GPL(amd_pstate_update_status);
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1483,12 +1417,12 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
cpudata->epp_policy = 0;
- amd_pstate_init_prefcore(cpudata);
-
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
+ amd_pstate_init_prefcore(cpudata);
+
ret = amd_pstate_init_freq(cpudata);
if (ret)
goto free_cpudata1;
@@ -1555,7 +1489,7 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
pr_debug("CPU %d exiting\n", policy->cpu);
}
-static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
+static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
@@ -1605,7 +1539,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
* This return value can only be negative for shared_memory
* systems where EPP register read/write not supported.
*/
- return;
+ return epp;
}
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
@@ -1618,12 +1552,13 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
}
WRITE_ONCE(cpudata->cppc_req_cached, value);
- amd_pstate_set_epp(cpudata, epp);
+ return amd_pstate_set_epp(cpudata, epp);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
if (!policy->cpuinfo.max_freq)
return -ENODEV;
@@ -1633,7 +1568,9 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
cpudata->policy = policy->policy;
- amd_pstate_epp_update_limit(policy);
+ ret = amd_pstate_epp_update_limit(policy);
+ if (ret)
+ return ret;
/*
* policy->cur is never updated with the amd_pstate_epp driver, but it
@@ -1947,6 +1884,12 @@ static int __init amd_pstate_init(void)
static_call_update(amd_pstate_update_perf, cppc_update_perf);
}
+ if (amd_pstate_prefcore) {
+ ret = amd_detect_prefcore(&amd_pstate_prefcore);
+ if (ret)
+ return ret;
+ }
+
/* enable amd pstate feature */
ret = amd_pstate_enable(true);
if (ret) {
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index cc8bb2bc325a..cd573bc6b6db 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -103,4 +103,18 @@ struct amd_cpudata {
bool boost_state;
};
+/*
+ * enum amd_pstate_mode - driver working mode of amd pstate
+ */
+enum amd_pstate_mode {
+ AMD_PSTATE_UNDEFINED = 0,
+ AMD_PSTATE_DISABLE,
+ AMD_PSTATE_PASSIVE,
+ AMD_PSTATE_ACTIVE,
+ AMD_PSTATE_GUIDED,
+ AMD_PSTATE_MAX,
+};
+const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode);
+int amd_pstate_update_status(const char *buf, size_t size);
+
#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index af34c22fa273..4dcacab9b4bf 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -85,7 +85,7 @@ static const struct apple_soc_cpufreq_info soc_default_info = {
.cur_pstate_mask = 0, /* fallback */
};
-static const struct of_device_id apple_soc_cpufreq_of_match[] = {
+static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
{
.compatible = "apple,t8103-cluster-cpufreq",
.data = &soc_t8103_info,
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index ce5a5641b6dd..7a979db81f09 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -132,7 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
int ret = 0, opps_index = 0, cpu, nb_cpus;
struct freq_table *freq_tables;
struct device_node *node;
- struct cpumask cpus;
+ static struct cpumask cpus;
node = of_find_matching_node_and_match(NULL, armada_8k_cpufreq_of_match,
NULL);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index bafa32dd375d..1a5ad184d28f 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -224,9 +224,9 @@ static void __init cppc_freq_invariance_init(void)
* Fake (unused) bandwidth; workaround to "fix"
* priority inheritance.
*/
- .sched_runtime = 1000000,
- .sched_deadline = 10000000,
- .sched_period = 10000000,
+ .sched_runtime = NSEC_PER_MSEC,
+ .sched_deadline = 10 * NSEC_PER_MSEC,
+ .sched_period = 10 * NSEC_PER_MSEC,
};
int ret;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index cac379ba006d..18942bfe9c95 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -166,6 +166,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,sm6350", },
{ .compatible = "qcom,sm6375", },
{ .compatible = "qcom,sm7225", },
+ { .compatible = "qcom,sm7325", },
{ .compatible = "qcom,sm8150", },
{ .compatible = "qcom,sm8250", },
{ .compatible = "qcom,sm8350", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 6532c4d71338..983443396f8f 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -69,7 +69,6 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
static const char *find_supply_name(struct device *dev)
{
struct device_node *np __free(device_node) = of_node_get(dev->of_node);
- struct property *pp;
int cpu = dev->id;
/* This must be valid for sure */
@@ -77,14 +76,10 @@ static const char *find_supply_name(struct device *dev)
return NULL;
/* Try "cpu0" for older DTs */
- if (!cpu) {
- pp = of_find_property(np, "cpu0-supply", NULL);
- if (pp)
- return "cpu0";
- }
+ if (!cpu && of_property_present(np, "cpu0-supply"))
+ return "cpu0";
- pp = of_find_property(np, "cpu-supply", NULL);
- if (pp)
+ if (of_property_present(np, "cpu-supply"))
return "cpu";
dev_dbg(dev, "no regulator for cpu%d\n", cpu);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 04fc786dd2c0..f98c9438760c 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -575,30 +575,11 @@ unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
return policy->transition_delay_us;
latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
- if (latency) {
- unsigned int max_delay_us = 2 * MSEC_PER_SEC;
+ if (latency)
+ /* Give a 50% breathing room between updates */
+ return latency + (latency >> 1);
- /*
- * If the platform already has high transition_latency, use it
- * as-is.
- */
- if (latency > max_delay_us)
- return latency;
-
- /*
- * For platforms that can change the frequency very fast (< 2
- * us), the above formula gives a decent transition delay. But
- * for platforms where transition_latency is in milliseconds, it
- * ends up giving unrealistic values.
- *
- * Cap the default transition delay to 2 ms, which seems to be
- * a reasonable amount of time after which we should reevaluate
- * the frequency.
- */
- return min(latency * LATENCY_MULTIPLIER, max_delay_us);
- }
-
- return LATENCY_MULTIPLIER;
+ return USEC_PER_MSEC;
}
EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index c0278d023cfc..aaea9a39eced 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -16,6 +16,7 @@
#include <linux/tick.h>
#include <linux/slab.h>
#include <linux/sched/cpufreq.h>
+#include <linux/sched/smt.h>
#include <linux/list.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
@@ -215,6 +216,7 @@ struct global_params {
* @hwp_req_cached: Cached value of the last HWP Request MSR
* @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
* @last_io_update: Last time when IO wake flag was set
+ * @capacity_perf: Highest perf used for scale invariance
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
@@ -253,6 +255,7 @@ struct cpudata {
u64 hwp_req_cached;
u64 hwp_cap_cached;
u64 last_io_update;
+ unsigned int capacity_perf;
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
@@ -295,6 +298,7 @@ static int hwp_mode_bdw __ro_after_init;
static bool per_cpu_limits __ro_after_init;
static bool hwp_forced __ro_after_init;
static bool hwp_boost __read_mostly;
+static bool hwp_is_hybrid;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
@@ -934,6 +938,139 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
+static struct cpudata *hybrid_max_perf_cpu __read_mostly;
+/*
+ * Protects hybrid_max_perf_cpu, the capacity_perf fields in struct cpudata,
+ * and the x86 arch scale-invariance information from concurrent updates.
+ */
+static DEFINE_MUTEX(hybrid_capacity_lock);
+
+static void hybrid_set_cpu_capacity(struct cpudata *cpu)
+{
+ arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf,
+ hybrid_max_perf_cpu->capacity_perf,
+ cpu->capacity_perf,
+ cpu->pstate.max_pstate_physical);
+
+ pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu,
+ cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf,
+ cpu->pstate.max_pstate_physical);
+}
+
+static void hybrid_clear_cpu_capacity(unsigned int cpunum)
+{
+ arch_set_cpu_capacity(cpunum, 1, 1, 1, 1);
+}
+
+static void hybrid_get_capacity_perf(struct cpudata *cpu)
+{
+ if (READ_ONCE(global.no_turbo)) {
+ cpu->capacity_perf = cpu->pstate.max_pstate_physical;
+ return;
+ }
+
+ cpu->capacity_perf = HWP_HIGHEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
+}
+
+static void hybrid_set_capacity_of_cpus(void)
+{
+ int cpunum;
+
+ for_each_online_cpu(cpunum) {
+ struct cpudata *cpu = all_cpu_data[cpunum];
+
+ if (cpu)
+ hybrid_set_cpu_capacity(cpu);
+ }
+}
+
+static void hybrid_update_cpu_capacity_scaling(void)
+{
+ struct cpudata *max_perf_cpu = NULL;
+ unsigned int max_cap_perf = 0;
+ int cpunum;
+
+ for_each_online_cpu(cpunum) {
+ struct cpudata *cpu = all_cpu_data[cpunum];
+
+ if (!cpu)
+ continue;
+
+ /*
+ * During initialization, CPU performance at full capacity needs
+ * to be determined.
+ */
+ if (!hybrid_max_perf_cpu)
+ hybrid_get_capacity_perf(cpu);
+
+ /*
+ * If hybrid_max_perf_cpu is not NULL at this point, it is
+ * being replaced, so don't take it into account when looking
+ * for the new one.
+ */
+ if (cpu == hybrid_max_perf_cpu)
+ continue;
+
+ if (cpu->capacity_perf > max_cap_perf) {
+ max_cap_perf = cpu->capacity_perf;
+ max_perf_cpu = cpu;
+ }
+ }
+
+ if (max_perf_cpu) {
+ hybrid_max_perf_cpu = max_perf_cpu;
+ hybrid_set_capacity_of_cpus();
+ } else {
+ pr_info("Found no CPUs with nonzero maximum performance\n");
+ /* Revert to the flat CPU capacity structure. */
+ for_each_online_cpu(cpunum)
+ hybrid_clear_cpu_capacity(cpunum);
+ }
+}
+
+static void __hybrid_init_cpu_capacity_scaling(void)
+{
+ hybrid_max_perf_cpu = NULL;
+ hybrid_update_cpu_capacity_scaling();
+}
+
+static void hybrid_init_cpu_capacity_scaling(void)
+{
+ bool disable_itmt = false;
+
+ mutex_lock(&hybrid_capacity_lock);
+
+ /*
+ * If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity
+ * scaling has been enabled already and the driver is just changing the
+ * operation mode.
+ */
+ if (hybrid_max_perf_cpu) {
+ __hybrid_init_cpu_capacity_scaling();
+ goto unlock;
+ }
+
+ /*
+ * On hybrid systems, use asym capacity instead of ITMT, but because
+ * the capacity of SMT threads is not deterministic even approximately,
+ * do not do that when SMT is in use.
+ */
+ if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) {
+ __hybrid_init_cpu_capacity_scaling();
+ disable_itmt = true;
+ }
+
+unlock:
+ mutex_unlock(&hybrid_capacity_lock);
+
+ /*
+ * Disabling ITMT causes sched domains to be rebuilt to disable asym
+ * packing and enable asym capacity.
+ */
+ if (disable_itmt)
+ sched_clear_itmt_support();
+}
+
static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
{
u64 cap;
@@ -962,6 +1099,43 @@ static void intel_pstate_get_hwp_cap(struct cpudata *cpu)
}
}
+static void hybrid_update_capacity(struct cpudata *cpu)
+{
+ unsigned int max_cap_perf;
+
+ mutex_lock(&hybrid_capacity_lock);
+
+ if (!hybrid_max_perf_cpu)
+ goto unlock;
+
+ /*
+ * The maximum performance of the CPU may have changed, but assume
+ * that the performance of the other CPUs has not changed.
+ */
+ max_cap_perf = hybrid_max_perf_cpu->capacity_perf;
+
+ intel_pstate_get_hwp_cap(cpu);
+
+ hybrid_get_capacity_perf(cpu);
+ /* Should hybrid_max_perf_cpu be replaced by this CPU? */
+ if (cpu->capacity_perf > max_cap_perf) {
+ hybrid_max_perf_cpu = cpu;
+ hybrid_set_capacity_of_cpus();
+ goto unlock;
+ }
+
+ /* If this CPU is hybrid_max_perf_cpu, should it be replaced? */
+ if (cpu == hybrid_max_perf_cpu && cpu->capacity_perf < max_cap_perf) {
+ hybrid_update_cpu_capacity_scaling();
+ goto unlock;
+ }
+
+ hybrid_set_cpu_capacity(cpu);
+
+unlock:
+ mutex_unlock(&hybrid_capacity_lock);
+}
+
static void intel_pstate_hwp_set(unsigned int cpu)
{
struct cpudata *cpu_data = all_cpu_data[cpu];
@@ -1070,6 +1244,22 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+
+ mutex_lock(&hybrid_capacity_lock);
+
+ if (!hybrid_max_perf_cpu) {
+ mutex_unlock(&hybrid_capacity_lock);
+
+ return;
+ }
+
+ if (hybrid_max_perf_cpu == cpu)
+ hybrid_update_cpu_capacity_scaling();
+
+ mutex_unlock(&hybrid_capacity_lock);
+
+ /* Reset the capacity of the CPU going offline to the initial value. */
+ hybrid_clear_cpu_capacity(cpu->cpu);
}
#define POWER_CTL_EE_ENABLE 1
@@ -1165,21 +1355,46 @@ static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
static void intel_pstate_update_limits(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+ struct cpudata *cpudata;
if (!policy)
return;
- __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
+ cpudata = all_cpu_data[cpu];
+
+ __intel_pstate_update_max_freq(cpudata, policy);
+
+ /* Prevent the driver from being unregistered now. */
+ mutex_lock(&intel_pstate_driver_lock);
cpufreq_cpu_release(policy);
+
+ hybrid_update_capacity(cpudata);
+
+ mutex_unlock(&intel_pstate_driver_lock);
}
static void intel_pstate_update_limits_for_all(void)
{
int cpu;
- for_each_possible_cpu(cpu)
- intel_pstate_update_limits(cpu);
+ for_each_possible_cpu(cpu) {
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+
+ if (!policy)
+ continue;
+
+ __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
+
+ cpufreq_cpu_release(policy);
+ }
+
+ mutex_lock(&hybrid_capacity_lock);
+
+ if (hybrid_max_perf_cpu)
+ __hybrid_init_cpu_capacity_scaling();
+
+ mutex_unlock(&hybrid_capacity_lock);
}
/************************** sysfs begin ************************/
@@ -1618,6 +1833,13 @@ static void intel_pstate_notify_work(struct work_struct *work)
__intel_pstate_update_max_freq(cpudata, policy);
cpufreq_cpu_release(policy);
+
+ /*
+ * The driver will not be unregistered while this function is
+ * running, so update the capacity without acquiring the driver
+ * lock.
+ */
+ hybrid_update_capacity(cpudata);
}
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
@@ -2034,8 +2256,10 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (pstate_funcs.get_cpu_scaling) {
cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
- if (cpu->pstate.scaling != perf_ctl_scaling)
+ if (cpu->pstate.scaling != perf_ctl_scaling) {
intel_pstate_hybrid_hwp_adjust(cpu);
+ hwp_is_hybrid = true;
+ }
} else {
cpu->pstate.scaling = perf_ctl_scaling;
}
@@ -2425,6 +2649,10 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(INTEL_ICELAKE_X, core_funcs),
X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_D, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
+ X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
{}
};
#endif
@@ -2703,6 +2931,8 @@ static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
*/
intel_pstate_hwp_reenable(cpu);
cpu->suspended = false;
+
+ hybrid_update_capacity(cpu);
}
return 0;
@@ -3143,6 +3373,8 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
global.min_perf_pct = min_perf_pct_min();
+ hybrid_init_cpu_capacity_scaling();
+
return 0;
}
diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c
index 5f79b6de127c..6b5e6798d9a2 100644
--- a/drivers/cpufreq/loongson3_cpufreq.c
+++ b/drivers/cpufreq/loongson3_cpufreq.c
@@ -176,7 +176,7 @@ static DEFINE_PER_CPU(struct loongson3_freq_data *, freq_data);
static inline int do_service_request(u32 id, u32 info, u32 cmd, u32 val, u32 extra)
{
int retries;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu = raw_smp_processor_id();
unsigned int package = cpu_data[cpu].package;
union smc_message msg, last;
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index f9306410a07f..690da85c4865 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -238,4 +238,5 @@ bail_noprops:
module_init(maple_cpufreq_init);
+MODULE_DESCRIPTION("cpufreq driver for Maple 970FX/970MP boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 3a1aadaa723c..663f61565cf7 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -738,7 +738,7 @@ static const struct mtk_cpufreq_platform_data mt8516_platform_data = {
};
/* List of machines supported by this driver */
-static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
+static const struct of_device_id mtk_cpufreq_machines[] __initconst __maybe_unused = {
{ .compatible = "mediatek,mt2701", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 3458d5cc9b7f..de8be0a8932d 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -28,9 +28,6 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <asm/smp_plat.h>
-#include <asm/cpu.h>
-
/* OPP tolerance in percentage */
#define OPP_TOLERANCE 4
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index ee925b53b6b9..5fc9cb480516 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -269,5 +269,6 @@ static void __exit pas_cpufreq_exit(void)
module_init(pas_cpufreq_init);
module_exit(pas_cpufreq_exit);
+MODULE_DESCRIPTION("cpufreq driver for PA Semi PWRficient");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>, Olof Johansson <olof@lixom.net>");
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 2cd2b06849a2..74ff6c47df29 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -505,7 +505,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
continue;
if (strcmp(loc, "CPU CLOCK"))
continue;
- if (!of_get_property(hwclock, "platform-get-frequency", NULL))
+ if (!of_property_present(hwclock, "platform-get-frequency"))
continue;
break;
}
@@ -671,4 +671,5 @@ static int __init g5_cpufreq_init(void)
module_init(g5_cpufreq_init);
+MODULE_DESCRIPTION("cpufreq driver for SMU & 970FX based G5 Macs");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 50c62929f7ca..8de759247771 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -692,7 +692,7 @@ static void gpstate_timer_handler(struct timer_list *t)
}
/*
- * If PMCR was last updated was using fast_swtich then
+ * If PMCR was last updated was using fast_switch then
* We may have wrong in gpstate->last_lpstate_idx
* value. Hence, read from PMCR to get correct data.
*/
@@ -1160,5 +1160,6 @@ static void __exit powernv_cpufreq_exit(void)
}
module_exit(powernv_cpufreq_exit);
+MODULE_DESCRIPTION("cpufreq driver for IBM/OpenPOWER powernv systems");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 5ee4c7bfdcc5..98595b3ea13f 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -168,5 +168,6 @@ static void __exit cbe_cpufreq_exit(void)
module_init(cbe_cpufreq_init);
module_exit(cbe_cpufreq_exit);
+MODULE_DESCRIPTION("cpufreq driver for Cell BE processors");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 370fe6a0104b..900d6844c43d 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 939702dfa73f..703308fb891a 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -611,7 +611,7 @@ static struct platform_driver qcom_cpufreq_driver = {
},
};
-static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
+static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_unused = {
{ .compatible = "qcom,apq8096", .data = &match_data_kryo },
{ .compatible = "qcom,msm8909", .data = &match_data_msm8909 },
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 78b875db6b66..d8ab5b01d46d 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -171,10 +171,9 @@ static struct cpufreq_driver spear_cpufreq_driver = {
static int spear_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
- const struct property *prop;
struct cpufreq_frequency_table *freq_tbl;
- const __be32 *val;
- int cnt, i, ret;
+ u32 val;
+ int cnt, ret, i = 0;
np = of_cpu_device_node_get(0);
if (!np) {
@@ -186,26 +185,23 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
&spear_cpufreq.transition_latency))
spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
- prop = of_find_property(np, "cpufreq_tbl", NULL);
- if (!prop || !prop->value) {
+ cnt = of_property_count_u32_elems(np, "cpufreq_tbl");
+ if (cnt <= 0) {
pr_err("Invalid cpufreq_tbl\n");
ret = -ENODEV;
goto out_put_node;
}
- cnt = prop->length / sizeof(u32);
- val = prop->value;
-
freq_tbl = kcalloc(cnt + 1, sizeof(*freq_tbl), GFP_KERNEL);
if (!freq_tbl) {
ret = -ENOMEM;
goto out_put_node;
}
- for (i = 0; i < cnt; i++)
- freq_tbl[i].frequency = be32_to_cpup(val++);
+ of_property_for_each_u32(np, "cpufreq_tbl", val)
+ freq_tbl[i++].frequency = val;
- freq_tbl[i].frequency = CPUFREQ_TABLE_END;
+ freq_tbl[cnt].frequency = CPUFREQ_TABLE_END;
spear_cpufreq.freq_tbl = freq_tbl;
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index 8e2e703c3865..b15b3142b5fe 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -267,7 +267,7 @@ static int __init sti_cpufreq_init(void)
goto skip_voltage_scaling;
}
- if (!of_get_property(ddata.cpu->of_node, "operating-points-v2", NULL)) {
+ if (!of_property_present(ddata.cpu->of_node, "operating-points-v2")) {
dev_err(ddata.cpu, "OPP-v2 not supported\n");
goto skip_voltage_scaling;
}
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 95ac8d46c156..293921acec93 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -146,7 +146,7 @@ static bool dt_has_supported_hw(void)
return false;
for_each_child_of_node_scoped(np, opp) {
- if (of_find_property(opp, "opp-supported-hw", NULL)) {
+ if (of_property_present(opp, "opp-supported-hw")) {
has_opp_supported_hw = true;
break;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 4d3f27958fbd..ba621ce1cdda 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -16,6 +16,7 @@
#include <linux/pm_opp.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#define REVISION_MASK 0xF
#define REVISION_SHIFT 28
@@ -90,6 +91,9 @@ struct ti_cpufreq_soc_data {
unsigned long efuse_shift;
unsigned long rev_offset;
bool multi_regulator;
+/* Backward compatibility hack: Might have missing syscon */
+#define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1
+ u8 quirks;
};
struct ti_cpufreq_data {
@@ -254,6 +258,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = {
.efuse_mask = BIT(3),
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = false,
+ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
};
/*
@@ -281,6 +286,7 @@ static struct ti_cpufreq_soc_data omap36xx_soc_data = {
.efuse_mask = BIT(9),
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = true,
+ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
};
/*
@@ -295,6 +301,14 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
.efuse_mask = 0,
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = false,
+ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
+};
+
+static const struct soc_device_attribute k3_cpufreq_soc[] = {
+ { .family = "AM62X", .revision = "SR1.0" },
+ { .family = "AM62AX", .revision = "SR1.0" },
+ { .family = "AM62PX", .revision = "SR1.0" },
+ { /* sentinel */ }
};
static struct ti_cpufreq_soc_data am625_soc_data = {
@@ -340,7 +354,7 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
&efuse);
- if (ret == -EIO) {
+ if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
/* not a syscon register! */
void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
opp_data->soc_data->efuse_offset, 4);
@@ -378,10 +392,20 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
struct device *dev = opp_data->cpu_dev;
u32 revision;
int ret;
+ if (soc_device_match(k3_cpufreq_soc)) {
+ /*
+ * Since the SR is 1.0, hard code the revision_value as
+ * 0x1 here. This way we avoid re using the same register
+ * that is giving us required information inside socinfo
+ * anyway.
+ */
+ *revision_value = 0x1;
+ goto done;
+ }
ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset,
&revision);
- if (ret == -EIO) {
+ if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
/* not a syscon register! */
void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
opp_data->soc_data->rev_offset, 4);
@@ -400,6 +424,7 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
*revision_value = BIT((revision >> REVISION_SHIFT) & REVISION_MASK);
+done:
return 0;
}
@@ -419,7 +444,7 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
return 0;
}
-static const struct of_device_id ti_cpufreq_of_match[] = {
+static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = {
{ .compatible = "ti,am33xx", .data = &am3x_soc_data, },
{ .compatible = "ti,am3517", .data = &am3517_soc_data, },
{ .compatible = "ti,am43", .data = &am4x_soc_data, },
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index fae958794339..146f97068022 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -67,12 +67,16 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
/*
* Allow power off when OSI has been successfully enabled.
- * PREEMPT_RT is not yet ready to enter domain idle states.
+ * On a PREEMPT_RT based configuration the domain idle states are
+ * supported, but only during system-wide suspend.
*/
- if (use_osi && !IS_ENABLED(CONFIG_PREEMPT_RT))
+ if (use_osi) {
pd->power_off = psci_pd_power_off;
- else
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ pd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
+ } else {
pd->flags |= GENPD_FLAG_ALWAYS_ON;
+ }
/* Use governor for CPU PM domains if it has some states to manage. */
pd_gov = pd->states ? &pm_domain_cpu_gov : NULL;
@@ -138,7 +142,6 @@ static const struct of_device_id psci_of_match[] = {
static int psci_cpuidle_domain_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct device_node *node;
bool use_osi = psci_has_osi_support();
int ret = 0, pd_count = 0;
@@ -149,15 +152,13 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
* Parse child nodes for the "#power-domain-cells" property and
* initialize a genpd/genpd-of-provider pair when it's found.
*/
- for_each_child_of_node(np, node) {
+ for_each_child_of_node_scoped(np, node) {
if (!of_property_present(node, "#power-domain-cells"))
continue;
ret = psci_pd_init(node, use_osi);
- if (ret) {
- of_node_put(node);
+ if (ret)
goto exit;
- }
pd_count++;
}
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 782030a27703..2562dc001fc1 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -37,6 +37,7 @@ struct psci_cpuidle_data {
static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
static DEFINE_PER_CPU(u32, domain_state);
+static bool psci_cpuidle_use_syscore;
static bool psci_cpuidle_use_cpuhp;
void psci_set_domain_state(u32 state)
@@ -166,6 +167,12 @@ static struct syscore_ops psci_idle_syscore_ops = {
.resume = psci_idle_syscore_resume,
};
+static void psci_idle_init_syscore(void)
+{
+ if (psci_cpuidle_use_syscore)
+ register_syscore_ops(&psci_idle_syscore_ops);
+}
+
static void psci_idle_init_cpuhp(void)
{
int err;
@@ -173,8 +180,6 @@ static void psci_idle_init_cpuhp(void)
if (!psci_cpuidle_use_cpuhp)
return;
- register_syscore_ops(&psci_idle_syscore_ops);
-
err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
"cpuidle/psci:online",
psci_idle_cpuhp_up,
@@ -222,22 +227,23 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
if (!psci_has_osi_support())
return 0;
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- return 0;
-
data->dev = dt_idle_attach_cpu(cpu, "psci");
if (IS_ERR_OR_NULL(data->dev))
return PTR_ERR_OR_ZERO(data->dev);
+ psci_cpuidle_use_syscore = true;
+
/*
* Using the deepest state for the CPU to trigger a potential selection
* of a shared state for the domain, assumes the domain states are all
- * deeper states.
+ * deeper states. On PREEMPT_RT the hierarchical topology is limited to
+ * s2ram and s2idle.
*/
- drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
- drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state;
- psci_cpuidle_use_cpuhp = true;
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
+ psci_cpuidle_use_cpuhp = true;
+ }
return 0;
}
@@ -313,6 +319,7 @@ static void psci_cpu_deinit_idle(int cpu)
struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
dt_idle_detach_cpu(data->dev);
+ psci_cpuidle_use_syscore = false;
psci_cpuidle_use_cpuhp = false;
}
@@ -409,6 +416,7 @@ static int psci_cpuidle_probe(struct platform_device *pdev)
goto out_fail;
}
+ psci_idle_init_syscore();
psci_idle_init_cpuhp();
return 0;
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index a6e123dfe394..d228b4d18d56 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
+#include <linux/cleanup.h>
#include <linux/cpuhotplug.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
@@ -236,19 +237,16 @@ static int sbi_cpuidle_dt_init_states(struct device *dev,
{
struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
struct device_node *state_node;
- struct device_node *cpu_node;
u32 *states;
int i, ret;
- cpu_node = of_cpu_device_node_get(cpu);
+ struct device_node *cpu_node __free(device_node) = of_cpu_device_node_get(cpu);
if (!cpu_node)
return -ENODEV;
states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL);
- if (!states) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!states)
+ return -ENOMEM;
/* Parse SBI specific details from state DT nodes */
for (i = 1; i < state_count; i++) {
@@ -264,10 +262,8 @@ static int sbi_cpuidle_dt_init_states(struct device *dev,
pr_debug("sbi-state %#x index %d\n", states[i], i);
}
- if (i != state_count) {
- ret = -ENODEV;
- goto fail;
- }
+ if (i != state_count)
+ return -ENODEV;
/* Initialize optional data, used for the hierarchical topology. */
ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu);
@@ -277,10 +273,7 @@ static int sbi_cpuidle_dt_init_states(struct device *dev,
/* Store states in the per-cpu struct. */
data->states = states;
-fail:
- of_node_put(cpu_node);
-
- return ret;
+ return 0;
}
static void sbi_cpuidle_deinit_cpu(int cpu)
@@ -455,7 +448,6 @@ static void sbi_pd_remove(void)
static int sbi_genpd_probe(struct device_node *np)
{
- struct device_node *node;
int ret = 0, pd_count = 0;
if (!np)
@@ -465,13 +457,13 @@ static int sbi_genpd_probe(struct device_node *np)
* Parse child nodes for the "#power-domain-cells" property and
* initialize a genpd/genpd-of-provider pair when it's found.
*/
- for_each_child_of_node(np, node) {
+ for_each_child_of_node_scoped(np, node) {
if (!of_property_present(node, "#power-domain-cells"))
continue;
ret = sbi_pd_init(node);
if (ret)
- goto put_node;
+ goto remove_pd;
pd_count++;
}
@@ -487,8 +479,6 @@ static int sbi_genpd_probe(struct device_node *np)
return 0;
-put_node:
- of_node_put(node);
remove_pd:
sbi_pd_remove();
pr_err("failed to create CPU PM domains ret=%d\n", ret);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 02e40fd7d948..9e418aec1755 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -228,10 +228,7 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
if (broadcast && tick_broadcast_enter()) {
index = find_deepest_state(drv, dev, target_state->exit_latency_ns,
CPUIDLE_FLAG_TIMER_STOP, false);
- if (index < 0) {
- default_idle_call();
- return -EBUSY;
- }
+
target_state = &drv->states[index];
broadcast = false;
}
diff --git a/drivers/cpuidle/dt_idle_genpd.c b/drivers/cpuidle/dt_idle_genpd.c
index 1af63c189039..203e9b754aea 100644
--- a/drivers/cpuidle/dt_idle_genpd.c
+++ b/drivers/cpuidle/dt_idle_genpd.c
@@ -130,11 +130,10 @@ out:
int dt_idle_pd_init_topology(struct device_node *np)
{
- struct device_node *node;
struct of_phandle_args child, parent;
int ret;
- for_each_child_of_node(np, node) {
+ for_each_child_of_node_scoped(np, node) {
if (of_parse_phandle_with_args(node, "power-domains",
"#power-domain-cells", 0, &parent))
continue;
@@ -143,10 +142,8 @@ int dt_idle_pd_init_topology(struct device_node *np)
child.args_count = 0;
ret = of_genpd_add_subdomain(&parent, &child);
of_node_put(parent.np);
- if (ret) {
- of_node_put(node);
+ if (ret)
return ret;
- }
}
return 0;
@@ -154,11 +151,10 @@ int dt_idle_pd_init_topology(struct device_node *np)
int dt_idle_pd_remove_topology(struct device_node *np)
{
- struct device_node *node;
struct of_phandle_args child, parent;
int ret;
- for_each_child_of_node(np, node) {
+ for_each_child_of_node_scoped(np, node) {
if (of_parse_phandle_with_args(node, "power-domains",
"#power-domain-cells", 0, &parent))
continue;
@@ -167,10 +163,8 @@ int dt_idle_pd_remove_topology(struct device_node *np)
child.args_count = 0;
ret = of_genpd_remove_subdomain(&parent, &child);
of_node_put(parent.np);
- if (ret) {
- of_node_put(node);
+ if (ret)
return ret;
- }
}
return 0;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 94f23c6fc93b..08b1238bcd7b 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -21,7 +21,7 @@ config CRYPTO_DEV_PADLOCK
(so called VIA PadLock ACE, Advanced Cryptography Engine)
that provides instructions for very fast cryptographic
operations with supported algorithms.
-
+
The instructions are used only when the CPU supports them.
Otherwise software encryption is used.
@@ -78,18 +78,79 @@ config ZCRYPT
config PKEY
tristate "Kernel API for protected key handling"
depends on S390
- depends on ZCRYPT
help
- With this option enabled the pkey kernel module provides an API
+ With this option enabled the pkey kernel modules provide an API
for creation and handling of protected keys. Other parts of the
kernel or userspace applications may use these functions.
+ The protected key support is distributed into:
+ - A pkey base and API kernel module (pkey.ko) which offers the
+ infrastructure for the pkey handler kernel modules, the ioctl
+ and the sysfs API and the in-kernel API to the crypto cipher
+ implementations using protected key.
+ - A pkey pckmo kernel module (pkey-pckmo.ko) which is automatically
+ loaded when pckmo support (that is generation of protected keys
+ from clear key values) is available.
+ - A pkey CCA kernel module (pkey-cca.ko) which is automatically
+ loaded when a CEX crypto card is available.
+ - A pkey EP11 kernel module (pkey-ep11.ko) which is automatically
+ loaded when a CEX crypto card is available.
+
Select this option if you want to enable the kernel and userspace
- API for proteced key handling.
+ API for protected key handling.
+
+config PKEY_CCA
+ tristate "PKEY CCA support handler"
+ depends on PKEY
+ depends on ZCRYPT
+ help
+ This is the CCA support handler for deriving protected keys
+ from CCA (secure) keys. Also this handler provides an alternate
+ way to make protected keys from clear key values.
+
+ The PKEY CCA support handler needs a Crypto Express card (CEX)
+ in CCA mode.
+
+ If you have selected the PKEY option then you should also enable
+ this option unless you are sure you never need to derive protected
+ keys from CCA key material.
+
+config PKEY_EP11
+ tristate "PKEY EP11 support handler"
+ depends on PKEY
+ depends on ZCRYPT
+ help
+ This is the EP11 support handler for deriving protected keys
+ from EP11 (secure) keys. Also this handler provides an alternate
+ way to make protected keys from clear key values.
+
+ The PKEY EP11 support handler needs a Crypto Express card (CEX)
+ in EP11 mode.
+
+ If you have selected the PKEY option then you should also enable
+ this option unless you are sure you never need to derive protected
+ keys from EP11 key material.
+
+config PKEY_PCKMO
+ tristate "PKEY PCKMO support handler"
+ depends on PKEY
+ help
+ This is the PCKMO support handler for deriving protected keys
+ from clear key values via invoking the PCKMO instruction.
+
+ The PCKMO instruction can be enabled and disabled in the crypto
+ settings at the LPAR profile. This handler checks for availability
+ during initialization and if build as a kernel module unloads
+ itself if PCKMO is disabled.
+
+ The PCKMO way of deriving protected keys from clear key material
+ is especially used during self test of protected key ciphers like
+ PAES but the CCA and EP11 handler provide alternate ways to
+ generate protected keys from clear key values.
- Please note that creation of protected keys from secure keys
- requires to have at least one CEX card in coprocessor mode
- available at runtime.
+ If you have selected the PKEY option then you should also enable
+ this option unless you are sure you never need to derive protected
+ keys from clear key values directly via PCKMO.
config CRYPTO_PAES_S390
tristate "PAES cipher algorithms"
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 96355d463b04..3adcc5e65694 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -149,7 +149,6 @@ struct crypto4xx_alg {
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
-void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
int crypto4xx_build_pd(struct crypto_async_request *req,
struct crypto4xx_ctx *ctx,
struct scatterlist *src,
diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h
index 1013a666c932..d68094ffb70a 100644
--- a/drivers/crypto/amlogic/amlogic-gxl.h
+++ b/drivers/crypto/amlogic/amlogic-gxl.h
@@ -150,8 +150,6 @@ struct meson_alg_template {
#endif
};
-int meson_enqueue(struct crypto_async_request *areq, u32 type);
-
int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int meson_cipher_init(struct crypto_tfm *tfm);
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 8bd64fc37e75..0dd90785db9a 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2376,33 +2376,29 @@ static int atmel_aes_probe(struct platform_device *pdev)
}
/* Initializing the clock */
- aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
+ aes_dd->iclk = devm_clk_get_prepared(&pdev->dev, "aes_clk");
if (IS_ERR(aes_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(aes_dd->iclk);
goto err_tasklet_kill;
}
- err = clk_prepare(aes_dd->iclk);
- if (err)
- goto err_tasklet_kill;
-
err = atmel_aes_hw_version_init(aes_dd);
if (err)
- goto err_iclk_unprepare;
+ goto err_tasklet_kill;
atmel_aes_get_cap(aes_dd);
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
err = -EPROBE_DEFER;
- goto err_iclk_unprepare;
+ goto err_tasklet_kill;
}
#endif
err = atmel_aes_buff_init(aes_dd);
if (err)
- goto err_iclk_unprepare;
+ goto err_tasklet_kill;
err = atmel_aes_dma_init(aes_dd);
if (err)
@@ -2429,8 +2425,6 @@ err_algs:
atmel_aes_dma_cleanup(aes_dd);
err_buff_cleanup:
atmel_aes_buff_cleanup(aes_dd);
-err_iclk_unprepare:
- clk_unprepare(aes_dd->iclk);
err_tasklet_kill:
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
@@ -2455,8 +2449,6 @@ static void atmel_aes_remove(struct platform_device *pdev)
atmel_aes_dma_cleanup(aes_dd);
atmel_aes_buff_cleanup(aes_dd);
-
- clk_unprepare(aes_dd->iclk);
}
static struct platform_driver atmel_aes_driver = {
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index f4cd6158a4f7..8cc57df25778 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2623,27 +2623,23 @@ static int atmel_sha_probe(struct platform_device *pdev)
}
/* Initializing the clock */
- sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
+ sha_dd->iclk = devm_clk_get_prepared(&pdev->dev, "sha_clk");
if (IS_ERR(sha_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(sha_dd->iclk);
goto err_tasklet_kill;
}
- err = clk_prepare(sha_dd->iclk);
- if (err)
- goto err_tasklet_kill;
-
err = atmel_sha_hw_version_init(sha_dd);
if (err)
- goto err_iclk_unprepare;
+ goto err_tasklet_kill;
atmel_sha_get_cap(sha_dd);
if (sha_dd->caps.has_dma) {
err = atmel_sha_dma_init(sha_dd);
if (err)
- goto err_iclk_unprepare;
+ goto err_tasklet_kill;
dev_info(dev, "using %s for DMA transfers\n",
dma_chan_name(sha_dd->dma_lch_in.chan));
@@ -2669,8 +2665,6 @@ err_algs:
spin_unlock(&atmel_sha.lock);
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
-err_iclk_unprepare:
- clk_unprepare(sha_dd->iclk);
err_tasklet_kill:
tasklet_kill(&sha_dd->queue_task);
tasklet_kill(&sha_dd->done_task);
@@ -2693,8 +2687,6 @@ static void atmel_sha_remove(struct platform_device *pdev)
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
-
- clk_unprepare(sha_dd->iclk);
}
static struct platform_driver atmel_sha_driver = {
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 743ce50c14f2..13347dfecf7a 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -961,7 +961,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (IS_ERR(drv_ctx))
- return (struct aead_edesc *)drv_ctx;
+ return ERR_CAST(drv_ctx);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = qi_cache_alloc(flags);
@@ -1271,7 +1271,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (IS_ERR(drv_ctx))
- return (struct skcipher_edesc *)drv_ctx;
+ return ERR_CAST(drv_ctx);
src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 207dc422785a..44e1f8f46967 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -5006,10 +5006,14 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
struct device *dev = &ls_dev->dev;
struct dpaa2_caam_priv *priv;
struct dpaa2_caam_priv_per_cpu *ppriv;
- cpumask_t clean_mask;
+ cpumask_var_t clean_mask;
int err, cpu;
u8 i;
+ err = -ENOMEM;
+ if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
+ goto err_cpumask;
+
priv = dev_get_drvdata(dev);
priv->dev = dev;
@@ -5085,7 +5089,6 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
}
}
- cpumask_clear(&clean_mask);
i = 0;
for_each_online_cpu(cpu) {
u8 j;
@@ -5114,7 +5117,7 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
err = -ENOMEM;
goto err_alloc_netdev;
}
- cpumask_set_cpu(cpu, &clean_mask);
+ cpumask_set_cpu(cpu, clean_mask);
ppriv->net_dev->dev = *dev;
netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi,
@@ -5122,15 +5125,19 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
DPAA2_CAAM_NAPI_WEIGHT);
}
- return 0;
+ err = 0;
+ goto free_cpumask;
err_alloc_netdev:
- free_dpaa2_pcpu_netdev(priv, &clean_mask);
+ free_dpaa2_pcpu_netdev(priv, clean_mask);
err_get_rx_queue:
dpaa2_dpseci_congestion_free(priv);
err_get_vers:
dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
err_open:
+free_cpumask:
+ free_cpumask_var(clean_mask);
+err_cpumask:
return err;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index fdd724228c2f..25c02e267258 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -708,6 +708,7 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
GFP_KERNEL : GFP_ATOMIC;
struct ahash_edesc *edesc;
+ sg_num = pad_sg_nents(sg_num);
edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
if (!edesc)
return NULL;
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index ba8fb5d8a7b2..f6111ee9ed34 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -736,7 +736,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
struct device *ctrldev = &caam_pdev->dev, *qidev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
- cpumask_t clean_mask;
+ cpumask_var_t clean_mask;
+
+ err = -ENOMEM;
+ if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
+ goto fail_cpumask;
ctrlpriv = dev_get_drvdata(ctrldev);
qidev = ctrldev;
@@ -745,19 +749,16 @@ int caam_qi_init(struct platform_device *caam_pdev)
err = init_cgr(qidev);
if (err) {
dev_err(qidev, "CGR initialization failed: %d\n", err);
- return err;
+ goto fail_cgr;
}
/* Initialise response FQs */
err = alloc_rsp_fqs(qidev);
if (err) {
dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
- free_rsp_fqs();
- return err;
+ goto fail_fqs;
}
- cpumask_clear(&clean_mask);
-
/*
* Enable the NAPI contexts on each of the core which has an affine
* portal.
@@ -773,7 +774,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
err = -ENOMEM;
goto fail;
}
- cpumask_set_cpu(i, &clean_mask);
+ cpumask_set_cpu(i, clean_mask);
priv->net_dev = net_dev;
net_dev->dev = *qidev;
@@ -788,7 +789,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
if (!qi_cache) {
dev_err(qidev, "Can't allocate CAAM cache\n");
err = -ENOMEM;
- goto fail2;
+ goto fail;
}
caam_debugfs_qi_init(ctrlpriv);
@@ -798,11 +799,19 @@ int caam_qi_init(struct platform_device *caam_pdev)
goto fail2;
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
- return 0;
+ goto free_cpumask;
fail2:
- free_rsp_fqs();
+ kmem_cache_destroy(qi_cache);
fail:
- free_caam_qi_pcpu_netdev(&clean_mask);
+ free_caam_qi_pcpu_netdev(clean_mask);
+fail_fqs:
+ free_rsp_fqs();
+ qman_delete_cgr_safe(&qipriv.cgr);
+ qman_release_cgrid(qipriv.cgr.cgrid);
+fail_cgr:
+free_cpumask:
+ free_cpumask_var(clean_mask);
+fail_cpumask:
return err;
}
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 9810edbb272d..af018afd9cd7 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -910,7 +910,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
sev->int_rcvd = 0;
- reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC;
+ reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd);
+
+ /*
+ * If invoked during panic handling, local interrupts are disabled so
+ * the PSP command completion interrupt can't be used.
+ * sev_wait_cmd_ioc() already checks for interrupts disabled and
+ * polls for PSP command completion. Ensure we do not request an
+ * interrupt from the PSP if irqs disabled.
+ */
+ if (!irqs_disabled())
+ reg |= SEV_CMDRESP_IOC;
+
iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
/* wait for command completion */
@@ -1629,8 +1640,6 @@ static int sev_update_firmware(struct device *dev)
if (ret)
dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
- else
- dev_info(dev, "SEV firmware update successful\n");
__free_pages(p, order);
@@ -2382,6 +2391,7 @@ void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_platform_init_args args = {0};
+ u8 api_major, api_minor, build;
int rc;
if (!sev)
@@ -2392,9 +2402,19 @@ void sev_pci_init(void)
if (sev_get_api_version())
goto err;
+ api_major = sev->api_major;
+ api_minor = sev->api_minor;
+ build = sev->build;
+
if (sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
+ if (api_major != sev->api_major || api_minor != sev->api_minor ||
+ build != sev->build)
+ dev_info(sev->dev, "SEV firmware updated from %d.%d.%d to %d.%d.%d\n",
+ api_major, api_minor, build,
+ sev->api_major, sev->api_minor, sev->build);
+
/* Initialize the platform */
args.probe = true;
rc = sev_platform_init(&args);
@@ -2410,6 +2430,8 @@ void sev_pci_init(void)
return;
err:
+ sev_dev_destroy(psp_master);
+
psp_master->sev_data = NULL;
}
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 0895de823674..6f9d7063257d 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -138,7 +138,6 @@ struct sp_device *sp_alloc_struct(struct device *dev);
int sp_init(struct sp_device *sp);
void sp_destroy(struct sp_device *sp);
-struct sp_device *sp_get_master(void);
int sp_suspend(struct sp_device *sp);
int sp_resume(struct sp_device *sp);
diff --git a/drivers/crypto/gemini/sl3516-ce.h b/drivers/crypto/gemini/sl3516-ce.h
index 9e1a7e7f8961..56b844d0cd9c 100644
--- a/drivers/crypto/gemini/sl3516-ce.h
+++ b/drivers/crypto/gemini/sl3516-ce.h
@@ -326,8 +326,6 @@ struct sl3516_ce_alg_template {
unsigned long stat_bytes;
};
-int sl3516_ce_enqueue(struct crypto_async_request *areq, u32 type);
-
int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sl3516_ce_cipher_init(struct crypto_tfm *tfm);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 764532a6ca82..c167dbd6c7d6 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -575,7 +575,9 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
do {
atomic64_inc(&dfx[HPRE_SEND_CNT].value);
+ spin_lock_bh(&ctx->req_lock);
ret = hisi_qp_send(ctx->qp, msg);
+ spin_unlock_bh(&ctx->req_lock);
if (ret != -EBUSY)
break;
atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 10aa4da93323..6b536ad2ada5 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -13,9 +13,7 @@
#include <linux/uacce.h>
#include "hpre.h"
-#define HPRE_QM_ABNML_INT_MASK 0x100004
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
-#define HPRE_COMM_CNT_CLR_CE 0x0
#define HPRE_CTRL_CNT_CLR_CE 0x301000
#define HPRE_FSM_MAX_CNT 0x301008
#define HPRE_VFG_AXQOS 0x30100c
@@ -42,7 +40,6 @@
#define HPRE_HAC_INT_SET 0x301500
#define HPRE_RNG_TIMEOUT_NUM 0x301A34
#define HPRE_CORE_INT_ENABLE 0
-#define HPRE_CORE_INT_DISABLE GENMASK(21, 0)
#define HPRE_RDCHN_INI_ST 0x301a00
#define HPRE_CLSTR_BASE 0x302000
#define HPRE_CORE_EN_OFFSET 0x04
@@ -66,7 +63,6 @@
#define HPRE_CLSTR_ADDR_INTRVL 0x1000
#define HPRE_CLUSTER_INQURY 0x100
#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
-#define HPRE_TIMEOUT_ABNML_BIT 6
#define HPRE_PASID_EN_BIT 9
#define HPRE_REG_RD_INTVRL_US 10
#define HPRE_REG_RD_TMOUT_US 1000
@@ -203,9 +199,9 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
{HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
{HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
- {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE},
- {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
- {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
+ {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E},
+ {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E},
+ {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E},
{HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
{HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
{HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
@@ -358,6 +354,8 @@ static struct dfx_diff_registers hpre_diff_regs[] = {
},
};
+static const struct hisi_qm_err_ini hpre_err_ini;
+
bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
{
u32 cap_val;
@@ -654,11 +652,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
- /* HPRE need more time, we close this interrupt */
- val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK);
- val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
- writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK);
-
if (qm->ver >= QM_HW_V3)
writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
qm->io_base + HPRE_TYPES_ENB);
@@ -667,9 +660,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE);
writel(0x0, qm->io_base + HPRE_BD_ENDIAN);
- writel(0x0, qm->io_base + HPRE_INT_MASK);
writel(0x0, qm->io_base + HPRE_POISON_BYPASS);
- writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE);
writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG);
@@ -759,7 +750,7 @@ static void hpre_hw_error_disable(struct hisi_qm *qm)
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
- u32 ce, nfe;
+ u32 ce, nfe, err_en;
ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
@@ -776,7 +767,8 @@ static void hpre_hw_error_enable(struct hisi_qm *qm)
hpre_master_ooo_ctrl(qm, true);
/* enable hpre hw error interrupts */
- writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
+ err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE;
+ writel(~err_en, qm->io_base + HPRE_INT_MASK);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -1161,6 +1153,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = pf_q_num;
qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &hpre_devices;
+ qm->err_ini = &hpre_err_ini;
if (pf_q_num_flag)
set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
}
@@ -1350,8 +1343,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
hpre_open_sva_prefetch(qm);
- qm->err_ini = &hpre_err_ini;
- qm->err_ini->err_info_init(qm);
hisi_qm_dev_err_init(qm);
ret = hpre_show_last_regs_init(qm);
if (ret)
@@ -1380,6 +1371,18 @@ static int hpre_probe_init(struct hpre *hpre)
return 0;
}
+static void hpre_probe_uninit(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ hpre_cnt_regs_clear(qm);
+ qm->debug.curr_qm_qp_num = 0;
+ hpre_show_last_regs_uninit(qm);
+ hpre_close_sva_prefetch(qm);
+ hisi_qm_dev_err_uninit(qm);
+}
+
static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_qm *qm;
@@ -1405,7 +1408,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm);
if (ret)
- goto err_with_err_init;
+ goto err_with_probe_init;
ret = hpre_debugfs_init(qm);
if (ret)
@@ -1444,9 +1447,8 @@ err_qm_del_list:
hpre_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
-err_with_err_init:
- hpre_show_last_regs_uninit(qm);
- hisi_qm_dev_err_uninit(qm);
+err_with_probe_init:
+ hpre_probe_uninit(qm);
err_with_qm_init:
hisi_qm_uninit(qm);
@@ -1468,13 +1470,7 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
- if (qm->fun_type == QM_HW_PF) {
- hpre_cnt_regs_clear(qm);
- qm->debug.curr_qm_qp_num = 0;
- hpre_show_last_regs_uninit(qm);
- hisi_qm_dev_err_uninit(qm);
- }
-
+ hpre_probe_uninit(qm);
hisi_qm_uninit(qm);
}
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index f614fd228b56..07983af9e3e2 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -450,6 +450,7 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
};
static void qm_irqs_unregister(struct hisi_qm *qm);
+static int qm_reset_device(struct hisi_qm *qm);
static u32 qm_get_hw_error_status(struct hisi_qm *qm)
{
@@ -4014,6 +4015,28 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
return -ETIMEDOUT;
}
+static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
+{
+ u32 nfe_enb = 0;
+
+ /* Kunpeng930 hardware automatically close master ooo when NFE occurs */
+ if (qm->ver >= QM_HW_V3)
+ return;
+
+ if (!qm->err_status.is_dev_ecc_mbit &&
+ qm->err_status.is_qm_ecc_mbit &&
+ qm->err_ini->close_axi_master_ooo) {
+ qm->err_ini->close_axi_master_ooo(qm);
+ } else if (qm->err_status.is_dev_ecc_mbit &&
+ !qm->err_status.is_qm_ecc_mbit &&
+ !qm->err_ini->close_axi_master_ooo) {
+ nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
+ qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
+ }
+}
+
static int qm_vf_reset_prepare(struct hisi_qm *qm,
enum qm_stop_reason stop_reason)
{
@@ -4078,6 +4101,8 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
return ret;
}
+ qm_dev_ecc_mbit_handle(qm);
+
/* PF obtains the information of VF by querying the register. */
qm_cmd_uninit(qm);
@@ -4108,33 +4133,26 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
return 0;
}
-static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
+static int qm_master_ooo_check(struct hisi_qm *qm)
{
- u32 nfe_enb = 0;
+ u32 val;
+ int ret;
- /* Kunpeng930 hardware automatically close master ooo when NFE occurs */
- if (qm->ver >= QM_HW_V3)
- return;
+ /* Check the ooo register of the device before resetting the device. */
+ writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+ ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
+ val, (val == ACC_MASTER_TRANS_RETURN_RW),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret)
+ pci_warn(qm->pdev, "Bus lock! Please reset system.\n");
- if (!qm->err_status.is_dev_ecc_mbit &&
- qm->err_status.is_qm_ecc_mbit &&
- qm->err_ini->close_axi_master_ooo) {
- qm->err_ini->close_axi_master_ooo(qm);
- } else if (qm->err_status.is_dev_ecc_mbit &&
- !qm->err_status.is_qm_ecc_mbit &&
- !qm->err_ini->close_axi_master_ooo) {
- nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
- writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
- qm->io_base + QM_RAS_NFE_ENABLE);
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
- }
+ return ret;
}
-static int qm_soft_reset(struct hisi_qm *qm)
+static int qm_soft_reset_prepare(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
int ret;
- u32 val;
/* Ensure all doorbells and mailboxes received by QM */
ret = qm_check_req_recv(qm);
@@ -4155,30 +4173,23 @@ static int qm_soft_reset(struct hisi_qm *qm)
return ret;
}
- qm_dev_ecc_mbit_handle(qm);
-
- /* OOO register set and check */
- writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
- qm->io_base + ACC_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
- val,
- (val == ACC_MASTER_TRANS_RETURN_RW),
- POLL_PERIOD, POLL_TIMEOUT);
- if (ret) {
- pci_emerg(pdev, "Bus lock! Please reset system.\n");
+ ret = qm_master_ooo_check(qm);
+ if (ret)
return ret;
- }
if (qm->err_ini->close_sva_prefetch)
qm->err_ini->close_sva_prefetch(qm);
ret = qm_set_pf_mse(qm, false);
- if (ret) {
+ if (ret)
pci_err(pdev, "Fails to disable pf MSE bit.\n");
- return ret;
- }
+
+ return ret;
+}
+
+static int qm_reset_device(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
/* The reset related sub-control registers are not in PCI BAR */
if (ACPI_HANDLE(&pdev->dev)) {
@@ -4197,12 +4208,23 @@ static int qm_soft_reset(struct hisi_qm *qm)
pci_err(pdev, "Reset step %llu failed!\n", value);
return -EIO;
}
- } else {
- pci_err(pdev, "No reset method!\n");
- return -EINVAL;
+
+ return 0;
}
- return 0;
+ pci_err(pdev, "No reset method!\n");
+ return -EINVAL;
+}
+
+static int qm_soft_reset(struct hisi_qm *qm)
+{
+ int ret;
+
+ ret = qm_soft_reset_prepare(qm);
+ if (ret)
+ return ret;
+
+ return qm_reset_device(qm);
}
static int qm_vf_reset_done(struct hisi_qm *qm)
@@ -5155,6 +5177,35 @@ err_request_mem_regions:
return ret;
}
+static int qm_clear_device(struct hisi_qm *qm)
+{
+ acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev);
+ int ret;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ /* Device does not support reset, return */
+ if (!qm->err_ini->err_info_init)
+ return 0;
+ qm->err_ini->err_info_init(qm);
+
+ if (!handle)
+ return 0;
+
+ /* No reset method, return */
+ if (!acpi_has_method(handle, qm->err_info.acpi_rst))
+ return 0;
+
+ ret = qm_master_ooo_check(qm);
+ if (ret) {
+ writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+ return ret;
+ }
+
+ return qm_reset_device(qm);
+}
+
static int hisi_qm_pci_init(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5184,8 +5235,14 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
goto err_get_pci_res;
}
+ ret = qm_clear_device(qm);
+ if (ret)
+ goto err_free_vectors;
+
return 0;
+err_free_vectors:
+ pci_free_irq_vectors(pdev);
err_get_pci_res:
qm_put_pci_res(qm);
err_disable_pcidev:
@@ -5486,7 +5543,6 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
int ret;
- u32 val;
ret = qm->ops->set_msi(qm, false);
if (ret) {
@@ -5494,18 +5550,9 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
return ret;
}
- /* shutdown OOO register */
- writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
- qm->io_base + ACC_MASTER_GLOBAL_CTRL);
-
- ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
- val,
- (val == ACC_MASTER_TRANS_RETURN_RW),
- POLL_PERIOD, POLL_TIMEOUT);
- if (ret) {
- pci_emerg(pdev, "Bus lock! Please reset system.\n");
+ ret = qm_master_ooo_check(qm);
+ if (ret)
return ret;
- }
ret = qm_set_pf_mse(qm, false);
if (ret)
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index afdddf87cc34..9bafcc5aa404 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -458,7 +458,7 @@ static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[])
static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask)
{
if (hash_mask & SEC_HASH_IPV4_MASK) {
- dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n ");
+ dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n");
return -EINVAL;
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 75aad04ffe5e..c35533d8930b 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -1065,9 +1065,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)
struct hisi_qm *qm = &sec->qm;
int ret;
- qm->err_ini = &sec_err_ini;
- qm->err_ini->err_info_init(qm);
-
ret = sec_set_user_domain_and_cache(qm);
if (ret)
return ret;
@@ -1122,6 +1119,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = pf_q_num;
qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &sec_devices;
+ qm->err_ini = &sec_err_ini;
if (pf_q_num_flag)
set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
@@ -1186,6 +1184,12 @@ static int sec_probe_init(struct sec_dev *sec)
static void sec_probe_uninit(struct hisi_qm *qm)
{
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ sec_debug_regs_clear(qm);
+ sec_show_last_regs_uninit(qm);
+ sec_close_sva_prefetch(qm);
hisi_qm_dev_err_uninit(qm);
}
@@ -1274,7 +1278,6 @@ err_qm_del_list:
sec_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
err_probe_uninit:
- sec_show_last_regs_uninit(qm);
sec_probe_uninit(qm);
err_qm_uninit:
sec_qm_uninit(qm);
@@ -1296,11 +1299,6 @@ static void sec_remove(struct pci_dev *pdev)
sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm, QM_NORMAL);
-
- if (qm->fun_type == QM_HW_PF)
- sec_debug_regs_clear(qm);
- sec_show_last_regs_uninit(qm);
-
sec_probe_uninit(qm);
sec_qm_uninit(qm);
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 568acd0aee3f..c974f95cd126 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -225,7 +225,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
- int sg_n;
+ int sg_n, ret;
if (!dev || !sgl || !pool || !hw_sgl_dma || index >= pool->count)
return ERR_PTR(-EINVAL);
@@ -240,14 +240,15 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
if (sg_n_mapped > pool->sge_nr) {
dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n");
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto err_unmap;
}
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
if (IS_ERR(curr_hw_sgl)) {
dev_err(dev, "Get SGL error!\n");
- dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto err_unmap;
}
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
@@ -262,6 +263,11 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
*hw_sgl_dma = curr_sgl_dma;
return curr_hw_sgl;
+
+err_unmap:
+ dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c
index 451b167bcc73..66c551ecdee8 100644
--- a/drivers/crypto/hisilicon/trng/trng.c
+++ b/drivers/crypto/hisilicon/trng/trng.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
+#include <crypto/internal/rng.h>
#include <linux/acpi.h>
#include <linux/crypto.h>
#include <linux/err.h>
@@ -13,7 +14,6 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/random.h>
-#include <crypto/internal/rng.h>
#define HISI_TRNG_REG 0x00F0
#define HISI_TRNG_BYTES 4
@@ -121,7 +121,7 @@ static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src,
u32 i;
if (dlen > SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES || dlen == 0) {
- pr_err("dlen(%d) exceeds limit(%d)!\n", dlen,
+ pr_err("dlen(%u) exceeds limit(%d)!\n", dlen,
SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES);
return -EINVAL;
}
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 94e2d66b04b6..7327f8f29b01 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -54,7 +54,7 @@ struct hisi_zip_req {
struct hisi_zip_req_q {
struct hisi_zip_req *q;
unsigned long *req_bitmap;
- rwlock_t req_lock;
+ spinlock_t req_lock;
u16 size;
};
@@ -116,17 +116,17 @@ static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
struct hisi_zip_req *req_cache;
int req_id;
- write_lock(&req_q->req_lock);
+ spin_lock(&req_q->req_lock);
req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
if (req_id >= req_q->size) {
- write_unlock(&req_q->req_lock);
+ spin_unlock(&req_q->req_lock);
dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
return ERR_PTR(-EAGAIN);
}
set_bit(req_id, req_q->req_bitmap);
- write_unlock(&req_q->req_lock);
+ spin_unlock(&req_q->req_lock);
req_cache = q + req_id;
req_cache->req_id = req_id;
@@ -140,9 +140,9 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
- write_lock(&req_q->req_lock);
+ spin_lock(&req_q->req_lock);
clear_bit(req->req_id, req_q->req_bitmap);
- write_unlock(&req_q->req_lock);
+ spin_unlock(&req_q->req_lock);
}
static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
@@ -213,6 +213,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+ struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
@@ -244,7 +245,9 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
/* send command to start a task */
atomic64_inc(&dfx->send_cnt);
+ spin_lock_bh(&req_q->req_lock);
ret = hisi_qp_send(qp, &zip_sqe);
+ spin_unlock_bh(&req_q->req_lock);
if (unlikely(ret < 0)) {
atomic64_inc(&dfx->send_busy_cnt);
ret = -EAGAIN;
@@ -456,7 +459,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
goto err_free_comp_q;
}
- rwlock_init(&req_q->req_lock);
+ spin_lock_init(&req_q->req_lock);
req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req),
GFP_KERNEL);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 7c2d803886fd..d07e47b48be0 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -1141,8 +1141,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip->ctrl = ctrl;
ctrl->hisi_zip = hisi_zip;
- qm->err_ini = &hisi_zip_err_ini;
- qm->err_ini->err_info_init(qm);
ret = hisi_zip_set_user_domain_and_cache(qm);
if (ret)
@@ -1203,6 +1201,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = pf_q_num;
qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &zip_devices;
+ qm->err_ini = &hisi_zip_err_ini;
if (pf_q_num_flag)
set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
@@ -1269,6 +1268,16 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
return 0;
}
+static void hisi_zip_probe_uninit(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ hisi_zip_show_last_regs_uninit(qm);
+ hisi_zip_close_sva_prefetch(qm);
+ hisi_qm_dev_err_uninit(qm);
+}
+
static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_zip *hisi_zip;
@@ -1295,7 +1304,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm);
if (ret)
- goto err_dev_err_uninit;
+ goto err_probe_uninit;
ret = hisi_zip_debugfs_init(qm);
if (ret)
@@ -1334,9 +1343,8 @@ err_qm_del_list:
hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
-err_dev_err_uninit:
- hisi_zip_show_last_regs_uninit(qm);
- hisi_qm_dev_err_uninit(qm);
+err_probe_uninit:
+ hisi_zip_probe_uninit(qm);
err_qm_uninit:
hisi_zip_qm_uninit(qm);
@@ -1358,8 +1366,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
- hisi_zip_show_last_regs_uninit(qm);
- hisi_qm_dev_err_uninit(qm);
+ hisi_zip_probe_uninit(qm);
hisi_zip_qm_uninit(qm);
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index d269036bdaa3..7e93159c3b6b 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -987,31 +987,23 @@ static int img_hash_probe(struct platform_device *pdev)
}
dev_dbg(dev, "using IRQ channel %d\n", irq);
- hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
+ hdev->hash_clk = devm_clk_get_enabled(&pdev->dev, "hash");
if (IS_ERR(hdev->hash_clk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(hdev->hash_clk);
goto res_err;
}
- hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
+ hdev->sys_clk = devm_clk_get_enabled(&pdev->dev, "sys");
if (IS_ERR(hdev->sys_clk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(hdev->sys_clk);
goto res_err;
}
- err = clk_prepare_enable(hdev->hash_clk);
- if (err)
- goto res_err;
-
- err = clk_prepare_enable(hdev->sys_clk);
- if (err)
- goto clk_err;
-
err = img_hash_dma_init(hdev);
if (err)
- goto dma_err;
+ goto res_err;
dev_dbg(dev, "using %s for DMA transfers\n",
dma_chan_name(hdev->dma_lch));
@@ -1032,10 +1024,6 @@ err_algs:
list_del(&hdev->list);
spin_unlock(&img_hash.lock);
dma_release_channel(hdev->dma_lch);
-dma_err:
- clk_disable_unprepare(hdev->sys_clk);
-clk_err:
- clk_disable_unprepare(hdev->hash_clk);
res_err:
tasklet_kill(&hdev->done_task);
tasklet_kill(&hdev->dma_task);
@@ -1058,9 +1046,6 @@ static void img_hash_remove(struct platform_device *pdev)
tasklet_kill(&hdev->dma_task);
dma_release_channel(hdev->dma_lch);
-
- clk_disable_unprepare(hdev->hash_clk);
- clk_disable_unprepare(hdev->sys_clk);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index d0059ce954dd..0c79ad78d1c0 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -897,7 +897,6 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
int safexcel_select_ring(struct safexcel_crypto_priv *priv);
void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *ring);
-void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int ring);
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *ring);
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index e810d286ee8c..237f87000070 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -495,10 +495,10 @@ static void remove_device_compression_modes(struct iaa_device *iaa_device)
if (!device_mode)
continue;
- free_device_compression_mode(iaa_device, device_mode);
- iaa_device->compression_modes[i] = NULL;
if (iaa_compression_modes[i]->free)
iaa_compression_modes[i]->free(device_mode);
+ free_device_compression_mode(iaa_device, device_mode);
+ iaa_device->compression_modes[i] = NULL;
}
}
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index c2dfca73fe4e..e54c79890d44 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -1150,6 +1150,7 @@ static const struct of_device_id kmb_ocs_hcu_of_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, kmb_ocs_hcu_of_match);
static void kmb_ocs_hcu_remove(struct platform_device *pdev)
{
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
index 2a3598409eeb..f49818a13013 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
@@ -163,7 +163,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
out_err:
adf_cleanup_accel(accel_dev);
return ret;
@@ -177,7 +177,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index d26564cebdec..659905e45950 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -165,7 +165,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
out_err:
adf_cleanup_accel(accel_dev);
return ret;
@@ -179,7 +179,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
}
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index 956a4c85609a..4d18057745d4 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -202,7 +202,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -221,7 +221,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
index a8de9cd09c05..f0023cfb234c 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
@@ -176,7 +176,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -196,7 +196,7 @@ static void adf_remove(struct pci_dev *pdev)
return;
}
adf_flush_vf_wq(accel_dev);
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index ad0ca4384998..e6b5de55434e 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -202,7 +202,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -221,7 +221,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
index 53b8ddb63364..2bd5b0ff00e3 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
@@ -176,7 +176,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -196,7 +196,7 @@ static void adf_remove(struct pci_dev *pdev)
return;
}
adf_flush_vf_wq(accel_dev);
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index 04260f61d042..ec7913ab00a2 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -44,7 +44,7 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
adf_pf2vf_notify_restarting(accel_dev);
adf_pf2vf_wait_for_restarting_complete(accel_dev);
pci_clear_master(pdev);
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
index 2cf102ad4ca8..b0fc453fa3fb 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
@@ -100,6 +100,8 @@ void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev)
}
static void adf_cfg_section_del_all(struct list_head *head);
+static void adf_cfg_section_del_all_except(struct list_head *head,
+ const char *section_name);
void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
{
@@ -111,6 +113,17 @@ void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
}
+void adf_cfg_del_all_except(struct adf_accel_dev *accel_dev,
+ const char *section_name)
+{
+ struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+ down_write(&dev_cfg_data->lock);
+ adf_cfg_section_del_all_except(&dev_cfg_data->sec_list, section_name);
+ up_write(&dev_cfg_data->lock);
+ clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+}
+
/**
* adf_cfg_dev_remove() - Clears acceleration device configuration table.
* @accel_dev: Pointer to acceleration device.
@@ -185,6 +198,22 @@ static void adf_cfg_section_del_all(struct list_head *head)
}
}
+static void adf_cfg_section_del_all_except(struct list_head *head,
+ const char *section_name)
+{
+ struct list_head *list, *tmp;
+ struct adf_cfg_section *ptr;
+
+ list_for_each_prev_safe(list, tmp, head) {
+ ptr = list_entry(list, struct adf_cfg_section, list);
+ if (!strcmp(ptr->name, section_name))
+ continue;
+ adf_cfg_keyval_del_all(&ptr->param_head);
+ list_del(list);
+ kfree(ptr);
+ }
+}
+
static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
const char *key)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.h b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
index c0c9052b2213..2afa6f0d15c5 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
@@ -35,6 +35,8 @@ void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev);
void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev);
int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
+void adf_cfg_del_all_except(struct adf_accel_dev *accel_dev,
+ const char *section_name);
int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
const char *section_name,
const char *key, const void *val,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index 3bec9e20bad0..f7ecabdf7805 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -56,7 +56,7 @@ int adf_service_register(struct service_hndl *service);
int adf_service_unregister(struct service_hndl *service);
int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config);
-int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config);
+int adf_dev_down(struct adf_accel_dev *accel_dev);
int adf_dev_restart(struct adf_accel_dev *accel_dev);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
index 26a1662fafbb..70fa0f6497a9 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
@@ -247,7 +247,7 @@ static void adf_ctl_stop_devices(u32 id)
if (!accel_dev->is_vf)
continue;
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
}
}
@@ -256,7 +256,7 @@ static void adf_ctl_stop_devices(u32 id)
if (!adf_dev_started(accel_dev))
continue;
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
}
}
}
@@ -319,7 +319,7 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
if (ret) {
dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
ctl_data->device_id);
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
}
out:
kfree(ctl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 8b10926cedba..e8c53bd76f1b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -83,7 +83,7 @@
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
/* Ring interrupt */
-#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2)
+#define ADF_RP_INT_SRC_SEL_F_RISE_MASK GENMASK(1, 0)
#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4
#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index 74f0818c0703..f189cce7d153 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -323,6 +323,8 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
if (hw_data->stop_timer)
hw_data->stop_timer(accel_dev);
+ hw_data->disable_iov(accel_dev);
+
if (wait)
msleep(100);
@@ -386,16 +388,14 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
adf_tl_shutdown(accel_dev);
- hw_data->disable_iov(accel_dev);
-
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
hw_data->free_irq(accel_dev);
clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
}
- /* Delete configuration only if not restarting */
+ /* If not restarting, delete all cfg sections except for GENERAL */
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
- adf_cfg_del_all(accel_dev);
+ adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC);
if (hw_data->exit_arb)
hw_data->exit_arb(accel_dev);
@@ -445,33 +445,7 @@ void adf_error_notifier(struct adf_accel_dev *accel_dev)
}
}
-static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
-{
- char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
- int ret;
-
- ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED, services);
-
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
-
- if (!ret) {
- ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
- if (ret)
- return ret;
-
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED,
- services, ADF_STR);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
+int adf_dev_down(struct adf_accel_dev *accel_dev)
{
int ret = 0;
@@ -480,15 +454,9 @@ int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
mutex_lock(&accel_dev->state_lock);
- if (reconfig) {
- ret = adf_dev_shutdown_cache_cfg(accel_dev);
- goto out;
- }
-
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
-out:
mutex_unlock(&accel_dev->state_lock);
return ret;
}
@@ -535,7 +503,7 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev)
if (!accel_dev)
return -EFAULT;
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
ret = adf_dev_up(accel_dev, false);
/* if device is already up return success*/
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
index 0e31f4b41844..0cee3b23dee9 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
@@ -18,14 +18,17 @@ void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n");
for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
- vf->restarting = false;
+ if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK)
+ vf->restarting = true;
+ else
+ vf->restarting = false;
+
if (!vf->init)
continue;
+
if (adf_send_pf2vf_msg(accel_dev, i, msg))
dev_err(&GET_DEV(accel_dev),
"Failed to send restarting msg to VF%d\n", i);
- else if (vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK)
- vf->restarting = true;
}
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c
index 1141258db4b6..10c91e56d6be 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c
@@ -48,6 +48,20 @@ void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
+void adf_vf2pf_notify_restart_complete(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE };
+
+ /* Check compatibility version */
+ if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_FALLBACK)
+ return;
+
+ if (adf_send_vf2pf_msg(accel_dev, msg))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Restarting complete event to PF\n");
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_restart_complete);
+
int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
{
u8 pf_version;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h
index 71bc0e3f1d93..d79340ab3134 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h
@@ -6,6 +6,7 @@
#if defined(CONFIG_PCI_IOV)
int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_notify_restart_complete(struct adf_accel_dev *accel_dev);
int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev);
int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev);
int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index 8d645e7e04aa..c75d0b6cb0ad 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -86,11 +86,133 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
return pci_enable_sriov(pdev, totalvfs);
}
+static int adf_add_sriov_configuration(struct adf_accel_dev *accel_dev)
+{
+ unsigned long val = 0;
+ int ret;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+ if (ret)
+ return ret;
+
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+ if (ret)
+ return ret;
+
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ return ret;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ return ret;
+}
+
+static int adf_do_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ if (adf_dev_in_use(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Cannot disable SR-IOV, device in use\n");
+ return -EBUSY;
+ }
+
+ if (adf_dev_started(accel_dev)) {
+ if (adf_devmgr_in_reset(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Cannot disable SR-IOV, device in reset\n");
+ return -EBUSY;
+ }
+
+ ret = adf_dev_down(accel_dev);
+ if (ret)
+ goto err_del_cfg;
+ }
+
+ adf_disable_sriov(accel_dev);
+
+ ret = adf_dev_up(accel_dev, true);
+ if (ret)
+ goto err_del_cfg;
+
+ return 0;
+
+err_del_cfg:
+ adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC);
+ return ret;
+}
+
+static int adf_do_enable_sriov(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ int totalvfs = pci_sriov_get_totalvfs(pdev);
+ unsigned long val;
+ int ret;
+
+ if (!device_iommu_mapped(&GET_DEV(accel_dev))) {
+ dev_warn(&GET_DEV(accel_dev),
+ "IOMMU should be enabled for SR-IOV to work correctly\n");
+ return -EINVAL;
+ }
+
+ if (adf_dev_started(accel_dev)) {
+ if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Device busy\n");
+ return -EBUSY;
+ }
+
+ ret = adf_dev_down(accel_dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = adf_add_sriov_configuration(accel_dev);
+ if (ret)
+ goto err_del_cfg;
+
+ /* Allocate memory for VF info structs */
+ accel_dev->pf.vf_info = kcalloc(totalvfs, sizeof(struct adf_accel_vf_info),
+ GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!accel_dev->pf.vf_info)
+ goto err_del_cfg;
+
+ ret = adf_dev_up(accel_dev, false);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+ accel_dev->accel_id);
+ goto err_free_vf_info;
+ }
+
+ ret = adf_enable_sriov(accel_dev);
+ if (ret)
+ goto err_free_vf_info;
+
+ val = 1;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED,
+ &val, ADF_DEC);
+ if (ret)
+ goto err_free_vf_info;
+
+ return totalvfs;
+
+err_free_vf_info:
+ adf_dev_down(accel_dev);
+ kfree(accel_dev->pf.vf_info);
+ accel_dev->pf.vf_info = NULL;
+ return ret;
+err_del_cfg:
+ adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC);
+ return ret;
+}
+
void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
- unsigned long val = 0;
if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_SRIOV_ENABLED, cfg))
@@ -99,15 +221,9 @@ void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
if (!accel_dev->pf.vf_info)
return;
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
- &val, ADF_DEC))
- return;
-
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
- &val, ADF_DEC))
+ if (adf_add_sriov_configuration(accel_dev))
return;
- set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
dev_dbg(&pdev->dev, "Re-enabling SRIOV\n");
adf_enable_sriov(accel_dev);
}
@@ -168,70 +284,16 @@ EXPORT_SYMBOL_GPL(adf_disable_sriov);
int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
- int totalvfs = pci_sriov_get_totalvfs(pdev);
- unsigned long val;
- int ret;
if (!accel_dev) {
dev_err(&pdev->dev, "Failed to find accel_dev\n");
return -EFAULT;
}
- if (!device_iommu_mapped(&pdev->dev))
- dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
-
- if (accel_dev->pf.vf_info) {
- dev_info(&pdev->dev, "Already enabled for this device\n");
- return -EINVAL;
- }
-
- if (adf_dev_started(accel_dev)) {
- if (adf_devmgr_in_reset(accel_dev) ||
- adf_dev_in_use(accel_dev)) {
- dev_err(&GET_DEV(accel_dev), "Device busy\n");
- return -EBUSY;
- }
-
- ret = adf_dev_down(accel_dev, true);
- if (ret)
- return ret;
- }
-
- if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
- return -EFAULT;
- val = 0;
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- ADF_NUM_CY, (void *)&val, ADF_DEC))
- return -EFAULT;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
- &val, ADF_DEC);
- if (ret)
- return ret;
-
- set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-
- /* Allocate memory for VF info structs */
- accel_dev->pf.vf_info = kcalloc(totalvfs,
- sizeof(struct adf_accel_vf_info),
- GFP_KERNEL);
- if (!accel_dev->pf.vf_info)
- return -ENOMEM;
-
- if (adf_dev_up(accel_dev, false)) {
- dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
- accel_dev->accel_id);
- return -EFAULT;
- }
-
- ret = adf_enable_sriov(accel_dev);
- if (ret)
- return ret;
-
- val = 1;
- adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED,
- &val, ADF_DEC);
-
- return numvfs;
+ if (numvfs)
+ return adf_do_enable_sriov(accel_dev);
+ else
+ return adf_do_disable_sriov(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_sriov_configure);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index 4e7f70d4049d..4fcd61ff70d1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -62,7 +62,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
break;
}
- ret = adf_dev_down(accel_dev, true);
+ ret = adf_dev_down(accel_dev);
if (ret)
return ret;
@@ -76,7 +76,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
} else if (ret) {
dev_err(dev, "Failed to start device qat_dev%d\n",
accel_id);
- adf_dev_down(accel_dev, true);
+ adf_dev_down(accel_dev);
return ret;
}
break;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
index cdbb2d687b1b..a4636ec9f9ca 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
@@ -13,6 +13,7 @@
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
#include "adf_cfg_common.h"
+#include "adf_pfvf_vf_msg.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
@@ -71,10 +72,11 @@ static void adf_dev_stop_async(struct work_struct *work)
struct adf_accel_dev *accel_dev = stop_data->accel_dev;
adf_dev_restarting_notify(accel_dev);
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
/* Re-enable PF2VF interrupts */
adf_enable_pf2vf_interrupts(accel_dev);
+ adf_vf2pf_notify_restart_complete(accel_dev);
kfree(stop_data);
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h
index 85bc32a9ec0e..3f5b79015400 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h
@@ -23,6 +23,8 @@ struct qat_alg_buf_list {
);
struct qat_alg_buf buffers[];
} __packed;
+static_assert(offsetof(struct qat_alg_buf_list, buffers) == sizeof(struct qat_alg_buf_list_hdr),
+ "struct member likely outside of __struct_group()");
struct qat_alg_fixed_buf_list {
struct qat_alg_buf_list_hdr sgl_hdr;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index ad2c64af7427..7ea40b4f6e5b 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -58,7 +58,7 @@ static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
unsigned int i;
if (!ae_data) {
- pr_err("QAT: bad argument, ae_data is NULL\n ");
+ pr_err("QAT: bad argument, ae_data is NULL\n");
return -EINVAL;
}
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index 40b456b8035b..2a50cce41515 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -202,7 +202,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -221,7 +221,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
index d59cb1ba2ad5..7cb015b55122 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
@@ -176,7 +176,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@@ -196,7 +196,7 @@ static void adf_remove(struct pci_dev *pdev)
return;
}
adf_flush_vf_wq(accel_dev);
- adf_dev_down(accel_dev, false);
+ adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
index a48591af12d0..78217577aa54 100644
--- a/drivers/crypto/marvell/Kconfig
+++ b/drivers/crypto/marvell/Kconfig
@@ -28,6 +28,7 @@ config CRYPTO_DEV_OCTEONTX_CPT
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
select CRYPTO_DEV_MARVELL
help
This driver allows you to utilize the Marvell Cryptographic
@@ -47,6 +48,7 @@ config CRYPTO_DEV_OCTEONTX2_CPT
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
select NET_DEVLINK
help
This driver allows you to utilize the Marvell Cryptographic
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 3c5d577d8f0d..096be42e9d03 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -17,7 +17,6 @@
#include <crypto/sha2.h>
#include <crypto/xts.h>
#include <crypto/scatterwalk.h>
-#include <linux/rtnetlink.h>
#include <linux/sort.h>
#include <linux/module.h>
#include "otx_cptvf.h"
@@ -66,6 +65,8 @@ static struct cpt_device_table ae_devices = {
.count = ATOMIC_INIT(0)
};
+static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg);
+
static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
{
int count, ret = 0;
@@ -509,44 +510,61 @@ static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
ctx->cipher_type = cipher_type;
ctx->mac_type = mac_type;
+ switch (ctx->mac_type) {
+ case OTX_CPT_SHA1:
+ ctx->hashalg = crypto_alloc_shash("sha1", 0, 0);
+ break;
+
+ case OTX_CPT_SHA256:
+ ctx->hashalg = crypto_alloc_shash("sha256", 0, 0);
+ break;
+
+ case OTX_CPT_SHA384:
+ ctx->hashalg = crypto_alloc_shash("sha384", 0, 0);
+ break;
+
+ case OTX_CPT_SHA512:
+ ctx->hashalg = crypto_alloc_shash("sha512", 0, 0);
+ break;
+ }
+
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+
+ crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
+
+ if (!ctx->hashalg)
+ return 0;
+
/*
* When selected cipher is NULL we use HMAC opcode instead of
* FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
* for calculating ipad and opad
*/
if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
- switch (ctx->mac_type) {
- case OTX_CPT_SHA1:
- ctx->hashalg = crypto_alloc_shash("sha1", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
+ int ss = crypto_shash_statesize(ctx->hashalg);
- case OTX_CPT_SHA256:
- ctx->hashalg = crypto_alloc_shash("sha256", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
-
- case OTX_CPT_SHA384:
- ctx->hashalg = crypto_alloc_shash("sha384", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
+ ctx->ipad = kzalloc(ss, GFP_KERNEL);
+ if (!ctx->ipad) {
+ crypto_free_shash(ctx->hashalg);
+ return -ENOMEM;
+ }
- case OTX_CPT_SHA512:
- ctx->hashalg = crypto_alloc_shash("sha512", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
+ ctx->opad = kzalloc(ss, GFP_KERNEL);
+ if (!ctx->opad) {
+ kfree(ctx->ipad);
+ crypto_free_shash(ctx->hashalg);
+ return -ENOMEM;
}
}
- crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
+ ctx->sdesc = alloc_sdesc(ctx->hashalg);
+ if (!ctx->sdesc) {
+ kfree(ctx->opad);
+ kfree(ctx->ipad);
+ crypto_free_shash(ctx->hashalg);
+ return -ENOMEM;
+ }
return 0;
}
@@ -602,8 +620,7 @@ static void otx_cpt_aead_exit(struct crypto_aead *tfm)
kfree(ctx->ipad);
kfree(ctx->opad);
- if (ctx->hashalg)
- crypto_free_shash(ctx->hashalg);
+ crypto_free_shash(ctx->hashalg);
kfree(ctx->sdesc);
}
@@ -699,7 +716,7 @@ static inline void swap_data64(void *buf, u32 len)
*dst = cpu_to_be64p(src);
}
-static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+static int swap_pad(u8 mac_type, u8 *pad)
{
struct sha512_state *sha512;
struct sha256_state *sha256;
@@ -707,22 +724,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
switch (mac_type) {
case OTX_CPT_SHA1:
- sha1 = (struct sha1_state *) in_pad;
+ sha1 = (struct sha1_state *)pad;
swap_data32(sha1->state, SHA1_DIGEST_SIZE);
- memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
break;
case OTX_CPT_SHA256:
- sha256 = (struct sha256_state *) in_pad;
+ sha256 = (struct sha256_state *)pad;
swap_data32(sha256->state, SHA256_DIGEST_SIZE);
- memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
break;
case OTX_CPT_SHA384:
case OTX_CPT_SHA512:
- sha512 = (struct sha512_state *) in_pad;
+ sha512 = (struct sha512_state *)pad;
swap_data64(sha512->state, SHA512_DIGEST_SIZE);
- memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
break;
default:
@@ -732,55 +746,53 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
return 0;
}
-static int aead_hmac_init(struct crypto_aead *cipher)
+static int aead_hmac_init(struct crypto_aead *cipher,
+ struct crypto_authenc_keys *keys)
{
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
- int state_size = crypto_shash_statesize(ctx->hashalg);
int ds = crypto_shash_digestsize(ctx->hashalg);
int bs = crypto_shash_blocksize(ctx->hashalg);
- int authkeylen = ctx->auth_key_len;
+ int authkeylen = keys->authkeylen;
u8 *ipad = NULL, *opad = NULL;
- int ret = 0, icount = 0;
+ int icount = 0;
+ int ret;
- ctx->sdesc = alloc_sdesc(ctx->hashalg);
- if (!ctx->sdesc)
- return -ENOMEM;
+ if (authkeylen > bs) {
+ ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey,
+ authkeylen, ctx->key);
+ if (ret)
+ return ret;
+ authkeylen = ds;
+ } else
+ memcpy(ctx->key, keys->authkey, authkeylen);
- ctx->ipad = kzalloc(bs, GFP_KERNEL);
- if (!ctx->ipad) {
- ret = -ENOMEM;
- goto calc_fail;
- }
+ ctx->enc_key_len = keys->enckeylen;
+ ctx->auth_key_len = authkeylen;
- ctx->opad = kzalloc(bs, GFP_KERNEL);
- if (!ctx->opad) {
- ret = -ENOMEM;
- goto calc_fail;
- }
+ if (ctx->cipher_type == OTX_CPT_CIPHER_NULL)
+ return keys->enckeylen ? -EINVAL : 0;
- ipad = kzalloc(state_size, GFP_KERNEL);
- if (!ipad) {
- ret = -ENOMEM;
- goto calc_fail;
+ switch (keys->enckeylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX_CPT_AES_256_BIT;
+ break;
+ default:
+ /* Invalid key length */
+ return -EINVAL;
}
- opad = kzalloc(state_size, GFP_KERNEL);
- if (!opad) {
- ret = -ENOMEM;
- goto calc_fail;
- }
+ memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen);
- if (authkeylen > bs) {
- ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
- authkeylen, ipad);
- if (ret)
- goto calc_fail;
-
- authkeylen = ds;
- } else {
- memcpy(ipad, ctx->key, authkeylen);
- }
+ ipad = ctx->ipad;
+ opad = ctx->opad;
+ memcpy(ipad, ctx->key, authkeylen);
memset(ipad + authkeylen, 0, bs - authkeylen);
memcpy(opad, ipad, bs);
@@ -798,7 +810,7 @@ static int aead_hmac_init(struct crypto_aead *cipher)
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
crypto_shash_export(&ctx->sdesc->shash, ipad);
- ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
+ ret = swap_pad(ctx->mac_type, ipad);
if (ret)
goto calc_fail;
@@ -806,25 +818,9 @@ static int aead_hmac_init(struct crypto_aead *cipher)
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, opad, bs);
crypto_shash_export(&ctx->sdesc->shash, opad);
- ret = copy_pad(ctx->mac_type, ctx->opad, opad);
- if (ret)
- goto calc_fail;
-
- kfree(ipad);
- kfree(opad);
-
- return 0;
+ ret = swap_pad(ctx->mac_type, opad);
calc_fail:
- kfree(ctx->ipad);
- ctx->ipad = NULL;
- kfree(ctx->opad);
- ctx->opad = NULL;
- kfree(ipad);
- kfree(opad);
- kfree(ctx->sdesc);
- ctx->sdesc = NULL;
-
return ret;
}
@@ -832,57 +828,15 @@ static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
- struct crypto_authenc_key_param *param;
- int enckeylen = 0, authkeylen = 0;
- struct rtattr *rta = (void *)key;
- int status = -EINVAL;
-
- if (!RTA_OK(rta, keylen))
- goto badkey;
-
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
-
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
-
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < enckeylen)
- goto badkey;
-
- if (keylen > OTX_CPT_MAX_KEY_SIZE)
- goto badkey;
-
- authkeylen = keylen - enckeylen;
- memcpy(ctx->key, key, keylen);
-
- switch (enckeylen) {
- case AES_KEYSIZE_128:
- ctx->key_type = OTX_CPT_AES_128_BIT;
- break;
- case AES_KEYSIZE_192:
- ctx->key_type = OTX_CPT_AES_192_BIT;
- break;
- case AES_KEYSIZE_256:
- ctx->key_type = OTX_CPT_AES_256_BIT;
- break;
- default:
- /* Invalid key length */
- goto badkey;
- }
-
- ctx->enc_key_len = enckeylen;
- ctx->auth_key_len = authkeylen;
+ struct crypto_authenc_keys authenc_keys;
+ int status;
- status = aead_hmac_init(cipher);
+ status = crypto_authenc_extractkeys(&authenc_keys, key, keylen);
if (status)
goto badkey;
- return 0;
+ status = aead_hmac_init(cipher, &authenc_keys);
+
badkey:
return status;
}
@@ -891,36 +845,7 @@ static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
- struct crypto_authenc_key_param *param;
- struct rtattr *rta = (void *)key;
- int enckeylen = 0;
-
- if (!RTA_OK(rta, keylen))
- goto badkey;
-
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
-
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
-
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (enckeylen != 0)
- goto badkey;
-
- if (keylen > OTX_CPT_MAX_KEY_SIZE)
- goto badkey;
-
- memcpy(ctx->key, key, keylen);
- ctx->enc_key_len = enckeylen;
- ctx->auth_key_len = keylen;
- return 0;
-badkey:
- return -EINVAL;
+ return otx_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen);
}
static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
@@ -1613,14 +1538,6 @@ static int compare_func(const void *lptr, const void *rptr)
return 0;
}
-static void swap_func(void *lptr, void *rptr, int size)
-{
- struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
- struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
-
- swap(*ldesc, *rdesc);
-}
-
int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
enum otx_cptpf_type pf_type,
enum otx_cptvf_type engine_type,
@@ -1655,7 +1572,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
is_crypto_registered = true;
}
sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
- compare_func, swap_func);
+ compare_func, NULL);
break;
case OTX_CPT_AE_TYPES:
@@ -1670,7 +1587,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
ae_devices.desc[count++].dev = pdev;
atomic_inc(&ae_devices.count);
sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
- compare_func, swap_func);
+ compare_func, NULL);
break;
default:
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
index 4181b5c5c356..a50b5e2f8d00 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
@@ -185,6 +185,5 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
int num_queues, int num_devices);
void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
enum otx_cptvf_type engine_type);
-void otx_cpt_callback(int status, void *arg, void *req);
#endif /* __OTX_CPT_ALGS_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 1604fc58dc13..7eb0bc13994d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -11,7 +11,6 @@
#include <crypto/xts.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
-#include <linux/rtnetlink.h>
#include <linux/sort.h>
#include <linux/module.h>
#include "otx2_cptvf.h"
@@ -55,6 +54,8 @@ static struct cpt_device_table se_devices = {
.count = ATOMIC_INIT(0)
};
+static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg);
+
static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
{
int count;
@@ -598,40 +599,56 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
ctx->cipher_type = cipher_type;
ctx->mac_type = mac_type;
+ switch (ctx->mac_type) {
+ case OTX2_CPT_SHA1:
+ ctx->hashalg = crypto_alloc_shash("sha1", 0, 0);
+ break;
+
+ case OTX2_CPT_SHA256:
+ ctx->hashalg = crypto_alloc_shash("sha256", 0, 0);
+ break;
+
+ case OTX2_CPT_SHA384:
+ ctx->hashalg = crypto_alloc_shash("sha384", 0, 0);
+ break;
+
+ case OTX2_CPT_SHA512:
+ ctx->hashalg = crypto_alloc_shash("sha512", 0, 0);
+ break;
+ }
+
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+
+ if (ctx->hashalg) {
+ ctx->sdesc = alloc_sdesc(ctx->hashalg);
+ if (!ctx->sdesc) {
+ crypto_free_shash(ctx->hashalg);
+ return -ENOMEM;
+ }
+ }
+
/*
* When selected cipher is NULL we use HMAC opcode instead of
* FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
* for calculating ipad and opad
*/
- if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) {
- switch (ctx->mac_type) {
- case OTX2_CPT_SHA1:
- ctx->hashalg = crypto_alloc_shash("sha1", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
-
- case OTX2_CPT_SHA256:
- ctx->hashalg = crypto_alloc_shash("sha256", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
+ if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL && ctx->hashalg) {
+ int ss = crypto_shash_statesize(ctx->hashalg);
- case OTX2_CPT_SHA384:
- ctx->hashalg = crypto_alloc_shash("sha384", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
+ ctx->ipad = kzalloc(ss, GFP_KERNEL);
+ if (!ctx->ipad) {
+ kfree(ctx->sdesc);
+ crypto_free_shash(ctx->hashalg);
+ return -ENOMEM;
+ }
- case OTX2_CPT_SHA512:
- ctx->hashalg = crypto_alloc_shash("sha512", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->hashalg))
- return PTR_ERR(ctx->hashalg);
- break;
+ ctx->opad = kzalloc(ss, GFP_KERNEL);
+ if (!ctx->opad) {
+ kfree(ctx->ipad);
+ kfree(ctx->sdesc);
+ crypto_free_shash(ctx->hashalg);
+ return -ENOMEM;
}
}
switch (ctx->cipher_type) {
@@ -713,8 +730,7 @@ static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
kfree(ctx->ipad);
kfree(ctx->opad);
- if (ctx->hashalg)
- crypto_free_shash(ctx->hashalg);
+ crypto_free_shash(ctx->hashalg);
kfree(ctx->sdesc);
if (ctx->fbk_cipher) {
@@ -788,7 +804,7 @@ static inline void swap_data64(void *buf, u32 len)
cpu_to_be64s(src);
}
-static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+static int swap_pad(u8 mac_type, u8 *pad)
{
struct sha512_state *sha512;
struct sha256_state *sha256;
@@ -796,22 +812,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
switch (mac_type) {
case OTX2_CPT_SHA1:
- sha1 = (struct sha1_state *) in_pad;
+ sha1 = (struct sha1_state *)pad;
swap_data32(sha1->state, SHA1_DIGEST_SIZE);
- memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
break;
case OTX2_CPT_SHA256:
- sha256 = (struct sha256_state *) in_pad;
+ sha256 = (struct sha256_state *)pad;
swap_data32(sha256->state, SHA256_DIGEST_SIZE);
- memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
break;
case OTX2_CPT_SHA384:
case OTX2_CPT_SHA512:
- sha512 = (struct sha512_state *) in_pad;
+ sha512 = (struct sha512_state *)pad;
swap_data64(sha512->state, SHA512_DIGEST_SIZE);
- memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
break;
default:
@@ -821,55 +834,54 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
return 0;
}
-static int aead_hmac_init(struct crypto_aead *cipher)
+static int aead_hmac_init(struct crypto_aead *cipher,
+ struct crypto_authenc_keys *keys)
{
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
- int state_size = crypto_shash_statesize(ctx->hashalg);
int ds = crypto_shash_digestsize(ctx->hashalg);
int bs = crypto_shash_blocksize(ctx->hashalg);
- int authkeylen = ctx->auth_key_len;
+ int authkeylen = keys->authkeylen;
u8 *ipad = NULL, *opad = NULL;
- int ret = 0, icount = 0;
+ int icount = 0;
+ int ret;
- ctx->sdesc = alloc_sdesc(ctx->hashalg);
- if (!ctx->sdesc)
- return -ENOMEM;
+ if (authkeylen > bs) {
+ ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey,
+ authkeylen, ctx->key);
+ if (ret)
+ goto calc_fail;
- ctx->ipad = kzalloc(bs, GFP_KERNEL);
- if (!ctx->ipad) {
- ret = -ENOMEM;
- goto calc_fail;
- }
+ authkeylen = ds;
+ } else
+ memcpy(ctx->key, keys->authkey, authkeylen);
- ctx->opad = kzalloc(bs, GFP_KERNEL);
- if (!ctx->opad) {
- ret = -ENOMEM;
- goto calc_fail;
- }
+ ctx->enc_key_len = keys->enckeylen;
+ ctx->auth_key_len = authkeylen;
- ipad = kzalloc(state_size, GFP_KERNEL);
- if (!ipad) {
- ret = -ENOMEM;
- goto calc_fail;
- }
+ if (ctx->cipher_type == OTX2_CPT_CIPHER_NULL)
+ return keys->enckeylen ? -EINVAL : 0;
- opad = kzalloc(state_size, GFP_KERNEL);
- if (!opad) {
- ret = -ENOMEM;
- goto calc_fail;
+ switch (keys->enckeylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ /* Invalid key length */
+ return -EINVAL;
}
- if (authkeylen > bs) {
- ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
- authkeylen, ipad);
- if (ret)
- goto calc_fail;
+ memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen);
- authkeylen = ds;
- } else {
- memcpy(ipad, ctx->key, authkeylen);
- }
+ ipad = ctx->ipad;
+ opad = ctx->opad;
+ memcpy(ipad, ctx->key, authkeylen);
memset(ipad + authkeylen, 0, bs - authkeylen);
memcpy(opad, ipad, bs);
@@ -887,7 +899,7 @@ static int aead_hmac_init(struct crypto_aead *cipher)
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
crypto_shash_export(&ctx->sdesc->shash, ipad);
- ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
+ ret = swap_pad(ctx->mac_type, ipad);
if (ret)
goto calc_fail;
@@ -895,25 +907,9 @@ static int aead_hmac_init(struct crypto_aead *cipher)
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, opad, bs);
crypto_shash_export(&ctx->sdesc->shash, opad);
- ret = copy_pad(ctx->mac_type, ctx->opad, opad);
- if (ret)
- goto calc_fail;
-
- kfree(ipad);
- kfree(opad);
-
- return 0;
+ ret = swap_pad(ctx->mac_type, opad);
calc_fail:
- kfree(ctx->ipad);
- ctx->ipad = NULL;
- kfree(ctx->opad);
- ctx->opad = NULL;
- kfree(ipad);
- kfree(opad);
- kfree(ctx->sdesc);
- ctx->sdesc = NULL;
-
return ret;
}
@@ -921,87 +917,17 @@ static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
- struct crypto_authenc_key_param *param;
- int enckeylen = 0, authkeylen = 0;
- struct rtattr *rta = (void *)key;
-
- if (!RTA_OK(rta, keylen))
- return -EINVAL;
+ struct crypto_authenc_keys authenc_keys;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- return -EINVAL;
-
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- return -EINVAL;
-
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < enckeylen)
- return -EINVAL;
-
- if (keylen > OTX2_CPT_MAX_KEY_SIZE)
- return -EINVAL;
-
- authkeylen = keylen - enckeylen;
- memcpy(ctx->key, key, keylen);
-
- switch (enckeylen) {
- case AES_KEYSIZE_128:
- ctx->key_type = OTX2_CPT_AES_128_BIT;
- break;
- case AES_KEYSIZE_192:
- ctx->key_type = OTX2_CPT_AES_192_BIT;
- break;
- case AES_KEYSIZE_256:
- ctx->key_type = OTX2_CPT_AES_256_BIT;
- break;
- default:
- /* Invalid key length */
- return -EINVAL;
- }
-
- ctx->enc_key_len = enckeylen;
- ctx->auth_key_len = authkeylen;
-
- return aead_hmac_init(cipher);
+ return crypto_authenc_extractkeys(&authenc_keys, key, keylen) ?:
+ aead_hmac_init(cipher, &authenc_keys);
}
static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
- struct crypto_authenc_key_param *param;
- struct rtattr *rta = (void *)key;
- int enckeylen = 0;
-
- if (!RTA_OK(rta, keylen))
- return -EINVAL;
-
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- return -EINVAL;
-
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- return -EINVAL;
-
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (enckeylen != 0)
- return -EINVAL;
-
- if (keylen > OTX2_CPT_MAX_KEY_SIZE)
- return -EINVAL;
-
- memcpy(ctx->key, key, keylen);
- ctx->enc_key_len = enckeylen;
- ctx->auth_key_len = keylen;
-
- return 0;
+ return otx2_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen);
}
static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
@@ -1702,14 +1628,6 @@ static int compare_func(const void *lptr, const void *rptr)
return 0;
}
-static void swap_func(void *lptr, void *rptr, int size)
-{
- struct cpt_device_desc *ldesc = lptr;
- struct cpt_device_desc *rdesc = rptr;
-
- swap(*ldesc, *rdesc);
-}
-
int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
int num_queues, int num_devices)
{
@@ -1739,7 +1657,7 @@ int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
is_crypto_registered = true;
}
sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
- compare_func, swap_func);
+ compare_func, NULL);
unlock:
mutex_unlock(&mutex);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 251e088a53df..b11545cc5cb7 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1353,6 +1353,7 @@ static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
ahash->setkey = n2_hmac_async_setkey;
base = &ahash->halg.base;
+ err = -EINVAL;
if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
p->child_alg) >= CRYPTO_MAX_ALG_NAME)
goto out_free_p;
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index 25fa70b2112c..887d4ce3cb49 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -157,6 +157,7 @@ struct nx842_crypto_header_group {
} __packed;
struct nx842_crypto_header {
+ /* New members MUST be added within the struct_group() macro below. */
struct_group_tagged(nx842_crypto_header_hdr, hdr,
__be16 magic; /* NX842_CRYPTO_MAGIC */
__be16 ignore; /* decompressed end bytes to ignore */
@@ -164,6 +165,8 @@ struct nx842_crypto_header {
);
struct nx842_crypto_header_group group[];
} __packed;
+static_assert(offsetof(struct nx842_crypto_header, group) == sizeof(struct nx842_crypto_header_hdr),
+ "struct member likely outside of struct_group_tagged()");
#define NX842_CRYPTO_GROUP_MAX (0x20)
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index c670d7d0c11e..09419e79e34c 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -36,14 +36,14 @@ struct qcom_rng {
void __iomem *base;
struct clk *clk;
struct hwrng hwrng;
- struct qcom_rng_of_data *of_data;
+ struct qcom_rng_match_data *match_data;
};
struct qcom_rng_ctx {
struct qcom_rng *rng;
};
-struct qcom_rng_of_data {
+struct qcom_rng_match_data {
bool skip_init;
bool hwrng_support;
};
@@ -155,7 +155,7 @@ static int qcom_rng_init(struct crypto_tfm *tfm)
ctx->rng = qcom_rng_dev;
- if (!ctx->rng->of_data->skip_init)
+ if (!ctx->rng->match_data->skip_init)
return qcom_rng_enable(ctx->rng);
return 0;
@@ -196,7 +196,7 @@ static int qcom_rng_probe(struct platform_device *pdev)
if (IS_ERR(rng->clk))
return PTR_ERR(rng->clk);
- rng->of_data = (struct qcom_rng_of_data *)of_device_get_match_data(&pdev->dev);
+ rng->match_data = (struct qcom_rng_match_data *)device_get_match_data(&pdev->dev);
qcom_rng_dev = rng;
ret = crypto_register_rng(&qcom_rng_alg);
@@ -206,7 +206,7 @@ static int qcom_rng_probe(struct platform_device *pdev)
return ret;
}
- if (rng->of_data->hwrng_support) {
+ if (rng->match_data->hwrng_support) {
rng->hwrng.name = "qcom_hwrng";
rng->hwrng.read = qcom_hwrng_read;
rng->hwrng.quality = QCOM_TRNG_QUALITY;
@@ -231,31 +231,31 @@ static void qcom_rng_remove(struct platform_device *pdev)
qcom_rng_dev = NULL;
}
-static struct qcom_rng_of_data qcom_prng_of_data = {
+static struct qcom_rng_match_data qcom_prng_match_data = {
.skip_init = false,
.hwrng_support = false,
};
-static struct qcom_rng_of_data qcom_prng_ee_of_data = {
+static struct qcom_rng_match_data qcom_prng_ee_match_data = {
.skip_init = true,
.hwrng_support = false,
};
-static struct qcom_rng_of_data qcom_trng_of_data = {
+static struct qcom_rng_match_data qcom_trng_match_data = {
.skip_init = true,
.hwrng_support = true,
};
static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = {
- { .id = "QCOM8160", .driver_data = 1 },
+ { .id = "QCOM8160", .driver_data = (kernel_ulong_t)&qcom_prng_ee_match_data },
{}
};
MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
static const struct of_device_id __maybe_unused qcom_rng_of_match[] = {
- { .compatible = "qcom,prng", .data = &qcom_prng_of_data },
- { .compatible = "qcom,prng-ee", .data = &qcom_prng_ee_of_data },
- { .compatible = "qcom,trng", .data = &qcom_trng_of_data },
+ { .compatible = "qcom,prng", .data = &qcom_prng_match_data },
+ { .compatible = "qcom,prng-ee", .data = &qcom_prng_ee_match_data },
+ { .compatible = "qcom,trng", .data = &qcom_trng_match_data },
{}
};
MODULE_DEVICE_TABLE(of, qcom_rng_of_match);
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 99b5c25be079..29c192f20082 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -6,7 +6,7 @@ menuconfig CXL_BUS
select FW_UPLOAD
select PCI_DOE
select FIRMWARE_TABLE
- select NUMA_KEEP_MEMINFO if (NUMA && X86)
+ select NUMA_KEEP_MEMINFO if NUMA_MEMBLKS
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index a88744244149..d656e4c0eb84 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -30,7 +30,7 @@ config DEV_DAX_PMEM
config DEV_DAX_HMEM
tristate "HMEM DAX: direct access to 'specific purpose' memory"
depends on EFI_SOFT_RESERVE
- select NUMA_KEEP_MEMINFO if (NUMA && X86)
+ select NUMA_KEEP_MEMINFO if NUMA_MEMBLKS
default DEV_DAX
help
EFI 2.8 platforms, and others, may advertise 'specific purpose'
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 2051e4f73c8a..9c1a729cd77e 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -235,9 +235,9 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
int id;
struct dev_dax *dev_dax = filp->private_data;
- dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) order:%d\n", current->comm,
- (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
- vmf->vma->vm_start, vmf->vma->vm_end, order);
+ dev_dbg(&dev_dax->dev, "%s: op=%s addr=%#lx order=%d\n", current->comm,
+ (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
+ vmf->address & ~((1UL << (order + PAGE_SHIFT)) - 1), order);
id = dax_read_lock();
if (order == 0)
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 00118580905a..7d06c476d8e9 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -160,7 +160,6 @@ static void exynos_bus_exit(struct device *dev)
platform_device_unregister(bus->icc_pdev);
dev_pm_opp_of_remove_table(dev);
- clk_disable_unprepare(bus->clk);
dev_pm_opp_put_regulators(bus->opp_token);
}
@@ -171,7 +170,6 @@ static void exynos_bus_passive_exit(struct device *dev)
platform_device_unregister(bus->icc_pdev);
dev_pm_opp_of_remove_table(dev);
- clk_disable_unprepare(bus->clk);
}
static int exynos_bus_parent_parse_of(struct device_node *np,
@@ -247,23 +245,16 @@ static int exynos_bus_parse_of(struct device_node *np,
int ret;
/* Get the clock to provide each bus with source clock */
- bus->clk = devm_clk_get(dev, "bus");
- if (IS_ERR(bus->clk)) {
- dev_err(dev, "failed to get bus clock\n");
- return PTR_ERR(bus->clk);
- }
-
- ret = clk_prepare_enable(bus->clk);
- if (ret < 0) {
- dev_err(dev, "failed to get enable clock\n");
- return ret;
- }
+ bus->clk = devm_clk_get_enabled(dev, "bus");
+ if (IS_ERR(bus->clk))
+ return dev_err_probe(dev, PTR_ERR(bus->clk),
+ "failed to get bus clock\n");
/* Get the freq and voltage from OPP table to scale the bus freq */
ret = dev_pm_opp_of_add_table(dev);
if (ret < 0) {
dev_err(dev, "failed to get OPP table\n");
- goto err_clk;
+ return ret;
}
rate = clk_get_rate(bus->clk);
@@ -281,8 +272,6 @@ static int exynos_bus_parse_of(struct device_node *np,
err_opp:
dev_pm_opp_of_remove_table(dev);
-err_clk:
- clk_disable_unprepare(bus->clk);
return ret;
}
@@ -453,7 +442,6 @@ static int exynos_bus_probe(struct platform_device *pdev)
err:
dev_pm_opp_of_remove_table(dev);
- clk_disable_unprepare(bus->clk);
err_reg:
dev_pm_opp_put_regulators(bus->opp_token);
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index 5dbc1e56ec08..2e4e981446fa 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -58,4 +58,5 @@ static void __exit devfreq_performance_exit(void)
return;
}
module_exit(devfreq_performance_exit);
+MODULE_DESCRIPTION("DEVFREQ Performance governor");
MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index 4746af2435b0..f059e8814804 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -58,4 +58,5 @@ static void __exit devfreq_powersave_exit(void)
return;
}
module_exit(devfreq_powersave_exit);
+MODULE_DESCRIPTION("DEVFREQ Powersave governor");
MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index d57b82a2b570..c23435736367 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -140,4 +140,5 @@ static void __exit devfreq_simple_ondemand_exit(void)
return;
}
module_exit(devfreq_simple_ondemand_exit);
+MODULE_DESCRIPTION("DEVFREQ Simple On-demand governor");
MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index d69672ccacc4..d1aa6806b683 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -153,4 +153,5 @@ static void __exit devfreq_userspace_exit(void)
return;
}
module_exit(devfreq_userspace_exit);
+MODULE_DESCRIPTION("DEVFREQ Userspace governor");
MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/imx-bus.c b/drivers/devfreq/imx-bus.c
index 86850b7dea09..49798f542d68 100644
--- a/drivers/devfreq/imx-bus.c
+++ b/drivers/devfreq/imx-bus.c
@@ -59,7 +59,7 @@ static int imx_bus_init_icc(struct device *dev)
struct imx_bus *priv = dev_get_drvdata(dev);
const char *icc_driver_name;
- if (!of_get_property(dev->of_node, "#interconnect-cells", NULL))
+ if (!of_property_present(dev->of_node, "#interconnect-cells"))
return 0;
if (!IS_ENABLED(CONFIG_INTERCONNECT_IMX)) {
dev_warn(dev, "imx interconnect drivers disabled\n");
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index c74ac197d5fe..8a08ffde31e7 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -144,37 +144,38 @@ const struct dma_fence_ops dma_fence_array_ops = {
EXPORT_SYMBOL(dma_fence_array_ops);
/**
- * dma_fence_array_create - Create a custom fence array
+ * dma_fence_array_alloc - Allocate a custom fence array
+ * @num_fences: [in] number of fences to add in the array
+ *
+ * Return dma fence array on success, NULL on failure
+ */
+struct dma_fence_array *dma_fence_array_alloc(int num_fences)
+{
+ struct dma_fence_array *array;
+
+ return kzalloc(struct_size(array, callbacks, num_fences), GFP_KERNEL);
+}
+EXPORT_SYMBOL(dma_fence_array_alloc);
+
+/**
+ * dma_fence_array_init - Init a custom fence array
+ * @array: [in] dma fence array to arm
* @num_fences: [in] number of fences to add in the array
* @fences: [in] array containing the fences
* @context: [in] fence context to use
* @seqno: [in] sequence number to use
* @signal_on_any: [in] signal on any fence in the array
*
- * Allocate a dma_fence_array object and initialize the base fence with
- * dma_fence_init().
- * In case of error it returns NULL.
- *
- * The caller should allocate the fences array with num_fences size
- * and fill it with the fences it wants to add to the object. Ownership of this
- * array is taken and dma_fence_put() is used on each fence on release.
- *
- * If @signal_on_any is true the fence array signals if any fence in the array
- * signals, otherwise it signals when all fences in the array signal.
+ * Implementation of @dma_fence_array_create without allocation. Useful to init
+ * a preallocated dma fence array in the path of reclaim or dma fence signaling.
*/
-struct dma_fence_array *dma_fence_array_create(int num_fences,
- struct dma_fence **fences,
- u64 context, unsigned seqno,
- bool signal_on_any)
+void dma_fence_array_init(struct dma_fence_array *array,
+ int num_fences, struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any)
{
- struct dma_fence_array *array;
-
WARN_ON(!num_fences || !fences);
- array = kzalloc(struct_size(array, callbacks, num_fences), GFP_KERNEL);
- if (!array)
- return NULL;
-
array->num_fences = num_fences;
spin_lock_init(&array->lock);
@@ -200,6 +201,41 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
*/
while (num_fences--)
WARN_ON(dma_fence_is_container(fences[num_fences]));
+}
+EXPORT_SYMBOL(dma_fence_array_init);
+
+/**
+ * dma_fence_array_create - Create a custom fence array
+ * @num_fences: [in] number of fences to add in the array
+ * @fences: [in] array containing the fences
+ * @context: [in] fence context to use
+ * @seqno: [in] sequence number to use
+ * @signal_on_any: [in] signal on any fence in the array
+ *
+ * Allocate a dma_fence_array object and initialize the base fence with
+ * dma_fence_init().
+ * In case of error it returns NULL.
+ *
+ * The caller should allocate the fences array with num_fences size
+ * and fill it with the fences it wants to add to the object. Ownership of this
+ * array is taken and dma_fence_put() is used on each fence on release.
+ *
+ * If @signal_on_any is true the fence array signals if any fence in the array
+ * signals, otherwise it signals when all fences in the array signal.
+ */
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+ struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any)
+{
+ struct dma_fence_array *array;
+
+ array = dma_fence_array_alloc(num_fences);
+ if (!array)
+ return NULL;
+
+ dma_fence_array_init(array, num_fences, fences,
+ context, seqno, signal_on_any);
return array;
}
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 2298ca5e112e..3cbe87d4a464 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -7,17 +7,15 @@
*/
#include <linux/cdev.h>
-#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
#include <linux/err.h>
-#include <linux/xarray.h>
#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/nospec.h>
-#include <linux/uaccess.h>
#include <linux/syscalls.h>
-#include <linux/dma-heap.h>
+#include <linux/uaccess.h>
+#include <linux/xarray.h>
#include <uapi/linux/dma-heap.h>
#define DEVNAME "dma_heap"
@@ -28,9 +26,10 @@
* struct dma_heap - represents a dmabuf heap in the system
* @name: used for debugging/device-node name
* @ops: ops struct for this heap
- * @heap_devt heap device node
- * @list list head connecting to list of heaps
- * @heap_cdev heap char device
+ * @priv: private data for this heap
+ * @heap_devt: heap device node
+ * @list: list head connecting to list of heaps
+ * @heap_cdev: heap char device
*
* Represents a heap of memory from which buffers can be made.
*/
@@ -193,11 +192,11 @@ static const struct file_operations dma_heap_fops = {
};
/**
- * dma_heap_get_drvdata() - get per-subdriver data for the heap
+ * dma_heap_get_drvdata - get per-heap driver data
* @heap: DMA-Heap to retrieve private data for
*
* Returns:
- * The per-subdriver data for the heap.
+ * The per-heap data for the heap.
*/
void *dma_heap_get_drvdata(struct dma_heap *heap)
{
@@ -205,8 +204,8 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
}
/**
- * dma_heap_get_name() - get heap name
- * @heap: DMA-Heap to retrieve private data for
+ * dma_heap_get_name - get heap name
+ * @heap: DMA-Heap to retrieve the name of
*
* Returns:
* The char* for the heap name.
@@ -216,6 +215,10 @@ const char *dma_heap_get_name(struct dma_heap *heap)
return heap->name;
}
+/**
+ * dma_heap_add - adds a heap to dmabuf heaps
+ * @exp_info: information needed to register this heap
+ */
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
{
struct dma_heap *heap, *h, *err_ret;
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index c384004b918e..93be88b805fe 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -165,7 +165,7 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct cma_heap_buffer *buffer = vma->vm_private_data;
- if (vmf->pgoff > buffer->pagecount)
+ if (vmf->pgoff >= buffer->pagecount)
return VM_FAULT_SIGBUS;
return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff]));
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index 6a1bfcd0cc21..cf2ce3744ce6 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -402,7 +402,7 @@ static int test_wait_timeout(void *arg)
if (dma_fence_wait_timeout(wt.f, false, 2) == -ETIME) {
if (timer_pending(&wt.timer)) {
- pr_notice("Timer did not fire within the jiffie!\n");
+ pr_notice("Timer did not fire within the jiffy!\n");
err = 0; /* not our fault! */
} else {
pr_err("Wait reported incomplete after timeout\n");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index cc0a62c34861..d9ec1e69e428 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -369,6 +369,15 @@ config K3_DMA
Support the DMA engine for Hisilicon K3 platform
devices.
+config LOONGSON1_APB_DMA
+ tristate "Loongson1 APB DMA support"
+ depends on MACH_LOONGSON32 || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ This selects support for the APB DMA controller in Loongson1 SoCs,
+ which is required by Loongson1 NAND and audio support.
+
config LPC18XX_DMAMUX
bool "NXP LPC18xx/43xx DMA MUX for PL080"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -378,6 +387,15 @@ config LPC18XX_DMAMUX
Enable support for DMA on NXP LPC18xx/43xx platforms
with PL080 and multiplexed DMA request lines.
+config LPC32XX_DMAMUX
+ bool "NXP LPC32xx DMA MUX for PL080"
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on OF && AMBA_PL08X
+ select MFD_SYSCON
+ help
+ Support for PL080 multiplexed DMA request lines on
+ LPC32XX platrofm.
+
config LS2X_APB_DMA
tristate "Loongson LS2X APB DMA support"
depends on LOONGARCH || COMPILE_TEST
@@ -716,6 +734,8 @@ config XILINX_ZYNQMP_DPDMA
display driver.
# driver files
+source "drivers/dma/amd/Kconfig"
+
source "drivers/dma/bestcomm/Kconfig"
source "drivers/dma/mediatek/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 374ea98faf43..ad6a03c052ec 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -49,7 +49,9 @@ obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-y += idxd/
obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_LOONGSON1_APB_DMA) += loongson1-apb-dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_LPC32XX_DMAMUX) += lpc32xx-dmamux.o
obj-$(CONFIG_LS2X_APB_DMA) += ls2x-apb-dma.o
obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
@@ -83,6 +85,7 @@ obj-$(CONFIG_ST_FDMA) += st_fdma.o
obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
obj-$(CONFIG_INTEL_LDMA) += lgm/
+obj-y += amd/
obj-y += mediatek/
obj-y += qcom/
obj-y += stm32/
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 5906eae26e2a..a58a1600dd65 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -112,7 +112,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
}
/**
- * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources
+ * acpi_dma_parse_csrt - parse CSRT to extract additional DMA resources
* @adev: ACPI device to match with
* @adma: struct acpi_dma of the given DMA controller
*
@@ -305,7 +305,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
* found.
*
* Return:
- * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
+ * 0, if no information is available, -1 on mismatch, and 1 otherwise.
*/
static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
struct acpi_dma_spec *dma_spec)
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 0968176f323d..e6a6566b309e 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -153,7 +153,7 @@ struct msgdma_extended_desc {
/**
* struct msgdma_sw_desc - implements a sw descriptor
* @async_tx: support for the async_tx api
- * @hw_desc: assosiated HW descriptor
+ * @hw_desc: associated HW descriptor
* @node: node to move from the free list to the tx list
* @tx_list: transmit list node
*/
@@ -511,7 +511,7 @@ static void msgdma_copy_one(struct msgdma_device *mdev,
* of the DMA controller. The descriptor will get flushed to the
* FIFO, once the last word (control word) is written. Since we
* are not 100% sure that memcpy() writes all word in the "correct"
- * oder (address from low to high) on all architectures, we make
+ * order (address from low to high) on all architectures, we make
* sure this control word is written last by single coding it and
* adding some write-barriers here.
*/
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 73a5cfb4da8a..38cdbca59485 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2006 ARM Ltd.
* Copyright (c) 2010 ST-Ericsson SA
- * Copyirght (c) 2017 Linaro Ltd.
+ * Copyright (c) 2017 Linaro Ltd.
*
* Author: Peter Pearse <peter.pearse@arm.com>
* Author: Linus Walleij <linus.walleij@linaro.org>
diff --git a/drivers/dma/amd/Kconfig b/drivers/dma/amd/Kconfig
new file mode 100644
index 000000000000..7d1f51d69675
--- /dev/null
+++ b/drivers/dma/amd/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config AMD_QDMA
+ tristate "AMD Queue-based DMA"
+ depends on HAS_IOMEM
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select REGMAP_MMIO
+ help
+ Enable support for the AMD Queue-based DMA subsystem. The primary
+ mechanism to transfer data using the QDMA is for the QDMA engine to
+ operate on instructions (descriptors) provided by the host operating
+ system. Using the descriptors, the QDMA can move data in either the
+ Host to Card (H2C) direction or the Card to Host (C2H) direction.
diff --git a/drivers/dma/amd/Makefile b/drivers/dma/amd/Makefile
new file mode 100644
index 000000000000..37212be9364f
--- /dev/null
+++ b/drivers/dma/amd/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_AMD_QDMA) += qdma/
diff --git a/drivers/dma/amd/qdma/Makefile b/drivers/dma/amd/qdma/Makefile
new file mode 100644
index 000000000000..011268fef377
--- /dev/null
+++ b/drivers/dma/amd/qdma/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_AMD_QDMA) += amd-qdma.o
+
+amd-qdma-$(CONFIG_AMD_QDMA) := qdma.o qdma-comm-regs.o
diff --git a/drivers/dma/amd/qdma/qdma-comm-regs.c b/drivers/dma/amd/qdma/qdma-comm-regs.c
new file mode 100644
index 000000000000..9162f9d367cc
--- /dev/null
+++ b/drivers/dma/amd/qdma/qdma-comm-regs.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef __QDMA_REGS_DEF_H
+#define __QDMA_REGS_DEF_H
+
+#include "qdma.h"
+
+const struct qdma_reg qdma_regos_default[QDMA_REGO_MAX] = {
+ [QDMA_REGO_CTXT_DATA] = QDMA_REGO(0x804, 8),
+ [QDMA_REGO_CTXT_CMD] = QDMA_REGO(0x844, 1),
+ [QDMA_REGO_CTXT_MASK] = QDMA_REGO(0x824, 8),
+ [QDMA_REGO_MM_H2C_CTRL] = QDMA_REGO(0x1004, 1),
+ [QDMA_REGO_MM_C2H_CTRL] = QDMA_REGO(0x1204, 1),
+ [QDMA_REGO_QUEUE_COUNT] = QDMA_REGO(0x120, 1),
+ [QDMA_REGO_RING_SIZE] = QDMA_REGO(0x204, 1),
+ [QDMA_REGO_H2C_PIDX] = QDMA_REGO(0x18004, 1),
+ [QDMA_REGO_C2H_PIDX] = QDMA_REGO(0x18008, 1),
+ [QDMA_REGO_INTR_CIDX] = QDMA_REGO(0x18000, 1),
+ [QDMA_REGO_FUNC_ID] = QDMA_REGO(0x12c, 1),
+ [QDMA_REGO_ERR_INT] = QDMA_REGO(0xb04, 1),
+ [QDMA_REGO_ERR_STAT] = QDMA_REGO(0x248, 1),
+};
+
+const struct qdma_reg_field qdma_regfs_default[QDMA_REGF_MAX] = {
+ /* QDMA_REGO_CTXT_DATA fields */
+ [QDMA_REGF_IRQ_ENABLE] = QDMA_REGF(53, 53),
+ [QDMA_REGF_WBK_ENABLE] = QDMA_REGF(52, 52),
+ [QDMA_REGF_WBI_CHECK] = QDMA_REGF(34, 34),
+ [QDMA_REGF_IRQ_ARM] = QDMA_REGF(16, 16),
+ [QDMA_REGF_IRQ_VEC] = QDMA_REGF(138, 128),
+ [QDMA_REGF_IRQ_AGG] = QDMA_REGF(139, 139),
+ [QDMA_REGF_WBI_INTVL_ENABLE] = QDMA_REGF(35, 35),
+ [QDMA_REGF_MRKR_DISABLE] = QDMA_REGF(62, 62),
+ [QDMA_REGF_QUEUE_ENABLE] = QDMA_REGF(32, 32),
+ [QDMA_REGF_QUEUE_MODE] = QDMA_REGF(63, 63),
+ [QDMA_REGF_DESC_BASE] = QDMA_REGF(127, 64),
+ [QDMA_REGF_DESC_SIZE] = QDMA_REGF(49, 48),
+ [QDMA_REGF_RING_ID] = QDMA_REGF(47, 44),
+ [QDMA_REGF_QUEUE_BASE] = QDMA_REGF(11, 0),
+ [QDMA_REGF_QUEUE_MAX] = QDMA_REGF(44, 32),
+ [QDMA_REGF_FUNCTION_ID] = QDMA_REGF(24, 17),
+ [QDMA_REGF_INTR_AGG_BASE] = QDMA_REGF(66, 15),
+ [QDMA_REGF_INTR_VECTOR] = QDMA_REGF(11, 1),
+ [QDMA_REGF_INTR_SIZE] = QDMA_REGF(69, 67),
+ [QDMA_REGF_INTR_VALID] = QDMA_REGF(0, 0),
+ [QDMA_REGF_INTR_COLOR] = QDMA_REGF(14, 14),
+ [QDMA_REGF_INTR_FUNCTION_ID] = QDMA_REGF(125, 114),
+ /* QDMA_REGO_CTXT_CMD fields */
+ [QDMA_REGF_CMD_INDX] = QDMA_REGF(19, 7),
+ [QDMA_REGF_CMD_CMD] = QDMA_REGF(6, 5),
+ [QDMA_REGF_CMD_TYPE] = QDMA_REGF(4, 1),
+ [QDMA_REGF_CMD_BUSY] = QDMA_REGF(0, 0),
+ /* QDMA_REGO_QUEUE_COUNT fields */
+ [QDMA_REGF_QUEUE_COUNT] = QDMA_REGF(11, 0),
+ /* QDMA_REGO_ERR_INT fields */
+ [QDMA_REGF_ERR_INT_FUNC] = QDMA_REGF(11, 0),
+ [QDMA_REGF_ERR_INT_VEC] = QDMA_REGF(22, 12),
+ [QDMA_REGF_ERR_INT_ARM] = QDMA_REGF(24, 24),
+};
+
+#endif /* __QDMA_REGS_DEF_H */
diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c
new file mode 100644
index 000000000000..b0a1f3ad851b
--- /dev/null
+++ b/drivers/dma/amd/qdma/qdma.c
@@ -0,0 +1,1143 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DMA driver for AMD Queue-based DMA Subsystem
+ *
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/dma-map-ops.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/amd_qdma.h>
+#include <linux/regmap.h>
+
+#include "qdma.h"
+
+#define CHAN_STR(q) (((q)->dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H")
+#define QDMA_REG_OFF(d, r) ((d)->roffs[r].off)
+
+/* MMIO regmap config for all QDMA registers */
+static const struct regmap_config qdma_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static inline struct qdma_queue *to_qdma_queue(struct dma_chan *chan)
+{
+ return container_of(chan, struct qdma_queue, vchan.chan);
+}
+
+static inline struct qdma_mm_vdesc *to_qdma_vdesc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct qdma_mm_vdesc, vdesc);
+}
+
+static inline u32 qdma_get_intr_ring_idx(struct qdma_device *qdev)
+{
+ u32 idx;
+
+ idx = qdev->qintr_rings[qdev->qintr_ring_idx++].ridx;
+ qdev->qintr_ring_idx %= qdev->qintr_ring_num;
+
+ return idx;
+}
+
+static u64 qdma_get_field(const struct qdma_device *qdev, const u32 *data,
+ enum qdma_reg_fields field)
+{
+ const struct qdma_reg_field *f = &qdev->rfields[field];
+ u16 low_pos, hi_pos, low_bit, hi_bit;
+ u64 value = 0, mask;
+
+ low_pos = f->lsb / BITS_PER_TYPE(*data);
+ hi_pos = f->msb / BITS_PER_TYPE(*data);
+
+ if (low_pos == hi_pos) {
+ low_bit = f->lsb % BITS_PER_TYPE(*data);
+ hi_bit = f->msb % BITS_PER_TYPE(*data);
+ mask = GENMASK(hi_bit, low_bit);
+ value = (data[low_pos] & mask) >> low_bit;
+ } else if (hi_pos == low_pos + 1) {
+ low_bit = f->lsb % BITS_PER_TYPE(*data);
+ hi_bit = low_bit + (f->msb - f->lsb);
+ value = ((u64)data[hi_pos] << BITS_PER_TYPE(*data)) |
+ data[low_pos];
+ mask = GENMASK_ULL(hi_bit, low_bit);
+ value = (value & mask) >> low_bit;
+ } else {
+ hi_bit = f->msb % BITS_PER_TYPE(*data);
+ mask = GENMASK(hi_bit, 0);
+ value = data[hi_pos] & mask;
+ low_bit = f->msb - f->lsb - hi_bit;
+ value <<= low_bit;
+ low_bit -= 32;
+ value |= (u64)data[hi_pos - 1] << low_bit;
+ mask = GENMASK(31, 32 - low_bit);
+ value |= (data[hi_pos - 2] & mask) >> low_bit;
+ }
+
+ return value;
+}
+
+static void qdma_set_field(const struct qdma_device *qdev, u32 *data,
+ enum qdma_reg_fields field, u64 value)
+{
+ const struct qdma_reg_field *f = &qdev->rfields[field];
+ u16 low_pos, hi_pos, low_bit;
+
+ low_pos = f->lsb / BITS_PER_TYPE(*data);
+ hi_pos = f->msb / BITS_PER_TYPE(*data);
+ low_bit = f->lsb % BITS_PER_TYPE(*data);
+
+ data[low_pos++] |= value << low_bit;
+ if (low_pos <= hi_pos)
+ data[low_pos++] |= (u32)(value >> (32 - low_bit));
+ if (low_pos <= hi_pos)
+ data[low_pos] |= (u32)(value >> (64 - low_bit));
+}
+
+static inline int qdma_reg_write(const struct qdma_device *qdev,
+ const u32 *data, enum qdma_regs reg)
+{
+ const struct qdma_reg *r = &qdev->roffs[reg];
+ int ret;
+
+ if (r->count > 1)
+ ret = regmap_bulk_write(qdev->regmap, r->off, data, r->count);
+ else
+ ret = regmap_write(qdev->regmap, r->off, *data);
+
+ return ret;
+}
+
+static inline int qdma_reg_read(const struct qdma_device *qdev, u32 *data,
+ enum qdma_regs reg)
+{
+ const struct qdma_reg *r = &qdev->roffs[reg];
+ int ret;
+
+ if (r->count > 1)
+ ret = regmap_bulk_read(qdev->regmap, r->off, data, r->count);
+ else
+ ret = regmap_read(qdev->regmap, r->off, data);
+
+ return ret;
+}
+
+static int qdma_context_cmd_execute(const struct qdma_device *qdev,
+ enum qdma_ctxt_type type,
+ enum qdma_ctxt_cmd cmd, u16 index)
+{
+ u32 value = 0;
+ int ret;
+
+ qdma_set_field(qdev, &value, QDMA_REGF_CMD_INDX, index);
+ qdma_set_field(qdev, &value, QDMA_REGF_CMD_CMD, cmd);
+ qdma_set_field(qdev, &value, QDMA_REGF_CMD_TYPE, type);
+
+ ret = qdma_reg_write(qdev, &value, QDMA_REGO_CTXT_CMD);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(qdev->regmap,
+ QDMA_REG_OFF(qdev, QDMA_REGO_CTXT_CMD),
+ value,
+ !qdma_get_field(qdev, &value,
+ QDMA_REGF_CMD_BUSY),
+ QDMA_POLL_INTRVL_US,
+ QDMA_POLL_TIMEOUT_US);
+ if (ret) {
+ qdma_err(qdev, "Context command execution timed out");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qdma_context_write_data(const struct qdma_device *qdev,
+ const u32 *data)
+{
+ u32 mask[QDMA_CTXT_REGMAP_LEN];
+ int ret;
+
+ memset(mask, ~0, sizeof(mask));
+
+ ret = qdma_reg_write(qdev, mask, QDMA_REGO_CTXT_MASK);
+ if (ret)
+ return ret;
+
+ ret = qdma_reg_write(qdev, data, QDMA_REGO_CTXT_DATA);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void qdma_prep_sw_desc_context(const struct qdma_device *qdev,
+ const struct qdma_ctxt_sw_desc *ctxt,
+ u32 *data)
+{
+ memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
+ qdma_set_field(qdev, data, QDMA_REGF_DESC_BASE, ctxt->desc_base);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_VEC, ctxt->vec);
+ qdma_set_field(qdev, data, QDMA_REGF_FUNCTION_ID, qdev->fid);
+
+ qdma_set_field(qdev, data, QDMA_REGF_DESC_SIZE, QDMA_DESC_SIZE_32B);
+ qdma_set_field(qdev, data, QDMA_REGF_RING_ID, QDMA_DEFAULT_RING_ID);
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MODE, QDMA_QUEUE_OP_MM);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_WBK_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_WBI_CHECK, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_ARM, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_AGG, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_WBI_INTVL_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_MRKR_DISABLE, 1);
+}
+
+static void qdma_prep_intr_context(const struct qdma_device *qdev,
+ const struct qdma_ctxt_intr *ctxt,
+ u32 *data)
+{
+ memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_AGG_BASE, ctxt->agg_base);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_VECTOR, ctxt->vec);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_SIZE, ctxt->size);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_VALID, ctxt->valid);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_COLOR, ctxt->color);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_FUNCTION_ID, qdev->fid);
+}
+
+static void qdma_prep_fmap_context(const struct qdma_device *qdev,
+ const struct qdma_ctxt_fmap *ctxt,
+ u32 *data)
+{
+ memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_BASE, ctxt->qbase);
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MAX, ctxt->qmax);
+}
+
+/*
+ * Program the indirect context register space
+ *
+ * Once the queue is enabled, context is dynamically updated by hardware. Any
+ * modification of the context through this API when the queue is enabled can
+ * result in unexpected behavior. Reading the context when the queue is enabled
+ * is not recommended as it can result in reduced performance.
+ */
+static int qdma_prog_context(struct qdma_device *qdev, enum qdma_ctxt_type type,
+ enum qdma_ctxt_cmd cmd, u16 index, u32 *ctxt)
+{
+ int ret;
+
+ mutex_lock(&qdev->ctxt_lock);
+ if (cmd == QDMA_CTXT_WRITE) {
+ ret = qdma_context_write_data(qdev, ctxt);
+ if (ret)
+ goto failed;
+ }
+
+ ret = qdma_context_cmd_execute(qdev, type, cmd, index);
+ if (ret)
+ goto failed;
+
+ if (cmd == QDMA_CTXT_READ) {
+ ret = qdma_reg_read(qdev, ctxt, QDMA_REGO_CTXT_DATA);
+ if (ret)
+ goto failed;
+ }
+
+failed:
+ mutex_unlock(&qdev->ctxt_lock);
+
+ return ret;
+}
+
+static int qdma_check_queue_status(struct qdma_device *qdev,
+ enum dma_transfer_direction dir, u16 qid)
+{
+ u32 status, data[QDMA_CTXT_REGMAP_LEN] = {0};
+ enum qdma_ctxt_type type;
+ int ret;
+
+ if (dir == DMA_MEM_TO_DEV)
+ type = QDMA_CTXT_DESC_SW_H2C;
+ else
+ type = QDMA_CTXT_DESC_SW_C2H;
+ ret = qdma_prog_context(qdev, type, QDMA_CTXT_READ, qid, data);
+ if (ret)
+ return ret;
+
+ status = qdma_get_field(qdev, data, QDMA_REGF_QUEUE_ENABLE);
+ if (status) {
+ qdma_err(qdev, "queue %d already in use", qid);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qdma_clear_queue_context(const struct qdma_queue *queue)
+{
+ enum qdma_ctxt_type h2c_types[] = { QDMA_CTXT_DESC_SW_H2C,
+ QDMA_CTXT_DESC_HW_H2C,
+ QDMA_CTXT_DESC_CR_H2C,
+ QDMA_CTXT_PFTCH, };
+ enum qdma_ctxt_type c2h_types[] = { QDMA_CTXT_DESC_SW_C2H,
+ QDMA_CTXT_DESC_HW_C2H,
+ QDMA_CTXT_DESC_CR_C2H,
+ QDMA_CTXT_PFTCH, };
+ struct qdma_device *qdev = queue->qdev;
+ enum qdma_ctxt_type *type;
+ int ret, num, i;
+
+ if (queue->dir == DMA_MEM_TO_DEV) {
+ type = h2c_types;
+ num = ARRAY_SIZE(h2c_types);
+ } else {
+ type = c2h_types;
+ num = ARRAY_SIZE(c2h_types);
+ }
+ for (i = 0; i < num; i++) {
+ ret = qdma_prog_context(qdev, type[i], QDMA_CTXT_CLEAR,
+ queue->qid, NULL);
+ if (ret) {
+ qdma_err(qdev, "Failed to clear ctxt %d", type[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int qdma_setup_fmap_context(struct qdma_device *qdev)
+{
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ struct qdma_ctxt_fmap fmap;
+ int ret;
+
+ ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_CLEAR,
+ qdev->fid, NULL);
+ if (ret) {
+ qdma_err(qdev, "Failed clearing context");
+ return ret;
+ }
+
+ fmap.qbase = 0;
+ fmap.qmax = qdev->chan_num * 2;
+ qdma_prep_fmap_context(qdev, &fmap, ctxt);
+ ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_WRITE,
+ qdev->fid, ctxt);
+ if (ret)
+ qdma_err(qdev, "Failed setup fmap, ret %d", ret);
+
+ return ret;
+}
+
+static int qdma_setup_queue_context(struct qdma_device *qdev,
+ const struct qdma_ctxt_sw_desc *sw_desc,
+ enum dma_transfer_direction dir, u16 qid)
+{
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ enum qdma_ctxt_type type;
+ int ret;
+
+ if (dir == DMA_MEM_TO_DEV)
+ type = QDMA_CTXT_DESC_SW_H2C;
+ else
+ type = QDMA_CTXT_DESC_SW_C2H;
+
+ qdma_prep_sw_desc_context(qdev, sw_desc, ctxt);
+ /* Setup SW descriptor context */
+ ret = qdma_prog_context(qdev, type, QDMA_CTXT_WRITE, qid, ctxt);
+ if (ret)
+ qdma_err(qdev, "Failed setup SW desc ctxt for queue: %d", qid);
+
+ return ret;
+}
+
+/*
+ * Enable or disable memory-mapped DMA engines
+ * 1: enable, 0: disable
+ */
+static int qdma_sgdma_control(struct qdma_device *qdev, u32 ctrl)
+{
+ int ret;
+
+ ret = qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_H2C_CTRL);
+ ret |= qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_C2H_CTRL);
+
+ return ret;
+}
+
+static int qdma_get_hw_info(struct qdma_device *qdev)
+{
+ struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
+ u32 value = 0;
+ int ret;
+
+ ret = qdma_reg_read(qdev, &value, QDMA_REGO_QUEUE_COUNT);
+ if (ret)
+ return ret;
+
+ value = qdma_get_field(qdev, &value, QDMA_REGF_QUEUE_COUNT) + 1;
+ if (pdata->max_mm_channels * 2 > value) {
+ qdma_err(qdev, "not enough hw queues %d", value);
+ return -EINVAL;
+ }
+ qdev->chan_num = pdata->max_mm_channels;
+
+ ret = qdma_reg_read(qdev, &qdev->fid, QDMA_REGO_FUNC_ID);
+ if (ret)
+ return ret;
+
+ qdma_info(qdev, "max channel %d, function id %d",
+ qdev->chan_num, qdev->fid);
+
+ return 0;
+}
+
+static inline int qdma_update_pidx(const struct qdma_queue *queue, u16 pidx)
+{
+ struct qdma_device *qdev = queue->qdev;
+
+ return regmap_write(qdev->regmap, queue->pidx_reg,
+ pidx | QDMA_QUEUE_ARM_BIT);
+}
+
+static inline int qdma_update_cidx(const struct qdma_queue *queue,
+ u16 ridx, u16 cidx)
+{
+ struct qdma_device *qdev = queue->qdev;
+
+ return regmap_write(qdev->regmap, queue->cidx_reg,
+ ((u32)ridx << 16) | cidx);
+}
+
+/**
+ * qdma_free_vdesc - Free descriptor
+ * @vdesc: Virtual DMA descriptor
+ */
+static void qdma_free_vdesc(struct virt_dma_desc *vdesc)
+{
+ struct qdma_mm_vdesc *vd = to_qdma_vdesc(vdesc);
+
+ kfree(vd);
+}
+
+static int qdma_alloc_queues(struct qdma_device *qdev,
+ enum dma_transfer_direction dir)
+{
+ struct qdma_queue *q, **queues;
+ u32 i, pidx_base;
+ int ret;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ queues = &qdev->h2c_queues;
+ pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_H2C_PIDX);
+ } else {
+ queues = &qdev->c2h_queues;
+ pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_C2H_PIDX);
+ }
+
+ *queues = devm_kcalloc(&qdev->pdev->dev, qdev->chan_num, sizeof(*q),
+ GFP_KERNEL);
+ if (!*queues)
+ return -ENOMEM;
+
+ for (i = 0; i < qdev->chan_num; i++) {
+ ret = qdma_check_queue_status(qdev, dir, i);
+ if (ret)
+ return ret;
+
+ q = &(*queues)[i];
+ q->ring_size = QDMA_DEFAULT_RING_SIZE;
+ q->idx_mask = q->ring_size - 2;
+ q->qdev = qdev;
+ q->dir = dir;
+ q->qid = i;
+ q->pidx_reg = pidx_base + i * QDMA_DMAP_REG_STRIDE;
+ q->cidx_reg = QDMA_REG_OFF(qdev, QDMA_REGO_INTR_CIDX) +
+ i * QDMA_DMAP_REG_STRIDE;
+ q->vchan.desc_free = qdma_free_vdesc;
+ vchan_init(&q->vchan, &qdev->dma_dev);
+ }
+
+ return 0;
+}
+
+static int qdma_device_verify(struct qdma_device *qdev)
+{
+ u32 value;
+ int ret;
+
+ ret = regmap_read(qdev->regmap, QDMA_IDENTIFIER_REGOFF, &value);
+ if (ret)
+ return ret;
+
+ value = FIELD_GET(QDMA_IDENTIFIER_MASK, value);
+ if (value != QDMA_IDENTIFIER) {
+ qdma_err(qdev, "Invalid identifier");
+ return -ENODEV;
+ }
+ qdev->rfields = qdma_regfs_default;
+ qdev->roffs = qdma_regos_default;
+
+ return 0;
+}
+
+static int qdma_device_setup(struct qdma_device *qdev)
+{
+ struct device *dev = &qdev->pdev->dev;
+ u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
+ int ret = 0;
+
+ while (dev && get_dma_ops(dev))
+ dev = dev->parent;
+ if (!dev) {
+ qdma_err(qdev, "dma device not found");
+ return -EINVAL;
+ }
+ set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev));
+
+ ret = qdma_setup_fmap_context(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed setup fmap context");
+ return ret;
+ }
+
+ /* Setup global ring buffer size at QDMA_DEFAULT_RING_ID index */
+ ret = qdma_reg_write(qdev, &ring_sz, QDMA_REGO_RING_SIZE);
+ if (ret) {
+ qdma_err(qdev, "Failed to setup ring %d of size %ld",
+ QDMA_DEFAULT_RING_ID, QDMA_DEFAULT_RING_SIZE);
+ return ret;
+ }
+
+ /* Enable memory-mapped DMA engine in both directions */
+ ret = qdma_sgdma_control(qdev, 1);
+ if (ret) {
+ qdma_err(qdev, "Failed to SGDMA with error %d", ret);
+ return ret;
+ }
+
+ ret = qdma_alloc_queues(qdev, DMA_MEM_TO_DEV);
+ if (ret) {
+ qdma_err(qdev, "Failed to alloc H2C queues, ret %d", ret);
+ return ret;
+ }
+
+ ret = qdma_alloc_queues(qdev, DMA_DEV_TO_MEM);
+ if (ret) {
+ qdma_err(qdev, "Failed to alloc C2H queues, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * qdma_free_queue_resources() - Free queue resources
+ * @chan: DMA channel
+ */
+static void qdma_free_queue_resources(struct dma_chan *chan)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ struct qdma_device *qdev = queue->qdev;
+ struct device *dev = qdev->dma_dev.dev;
+
+ qdma_clear_queue_context(queue);
+ vchan_free_chan_resources(&queue->vchan);
+ dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE,
+ queue->desc_base, queue->dma_desc_base);
+}
+
+/**
+ * qdma_alloc_queue_resources() - Allocate queue resources
+ * @chan: DMA channel
+ */
+static int qdma_alloc_queue_resources(struct dma_chan *chan)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ struct qdma_device *qdev = queue->qdev;
+ struct qdma_ctxt_sw_desc desc;
+ size_t size;
+ int ret;
+
+ ret = qdma_clear_queue_context(queue);
+ if (ret)
+ return ret;
+
+ size = queue->ring_size * QDMA_MM_DESC_SIZE;
+ queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size,
+ &queue->dma_desc_base,
+ GFP_KERNEL);
+ if (!queue->desc_base) {
+ qdma_err(qdev, "Failed to allocate descriptor ring");
+ return -ENOMEM;
+ }
+
+ /* Setup SW descriptor queue context for DMA memory map */
+ desc.vec = qdma_get_intr_ring_idx(qdev);
+ desc.desc_base = queue->dma_desc_base;
+ ret = qdma_setup_queue_context(qdev, &desc, queue->dir, queue->qid);
+ if (ret) {
+ qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
+ chan->name);
+ dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base,
+ queue->dma_desc_base);
+ return ret;
+ }
+
+ queue->pidx = 0;
+ queue->cidx = 0;
+
+ return 0;
+}
+
+static bool qdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ struct qdma_queue_info *info = param;
+
+ return info->dir == queue->dir;
+}
+
+static int qdma_xfer_start(struct qdma_queue *queue)
+{
+ struct qdma_device *qdev = queue->qdev;
+ int ret;
+
+ if (!vchan_next_desc(&queue->vchan))
+ return 0;
+
+ qdma_dbg(qdev, "Tnx kickoff with P: %d for %s%d",
+ queue->issued_vdesc->pidx, CHAN_STR(queue), queue->qid);
+
+ ret = qdma_update_pidx(queue, queue->issued_vdesc->pidx);
+ if (ret) {
+ qdma_err(qdev, "Failed to update PIDX to %d for %s queue: %d",
+ queue->pidx, CHAN_STR(queue), queue->qid);
+ }
+
+ return ret;
+}
+
+static void qdma_issue_pending(struct dma_chan *chan)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->vchan.lock, flags);
+ if (vchan_issue_pending(&queue->vchan)) {
+ if (queue->submitted_vdesc) {
+ queue->issued_vdesc = queue->submitted_vdesc;
+ queue->submitted_vdesc = NULL;
+ }
+ qdma_xfer_start(queue);
+ }
+
+ spin_unlock_irqrestore(&queue->vchan.lock, flags);
+}
+
+static struct qdma_mm_desc *qdma_get_desc(struct qdma_queue *q)
+{
+ struct qdma_mm_desc *desc;
+
+ if (((q->pidx + 1) & q->idx_mask) == q->cidx)
+ return NULL;
+
+ desc = q->desc_base + q->pidx;
+ q->pidx = (q->pidx + 1) & q->idx_mask;
+
+ return desc;
+}
+
+static int qdma_hw_enqueue(struct qdma_queue *q, struct qdma_mm_vdesc *vdesc)
+{
+ struct qdma_mm_desc *desc;
+ struct scatterlist *sg;
+ u64 addr, *src, *dst;
+ u32 rest, len;
+ int ret = 0;
+ u32 i;
+
+ if (!vdesc->sg_len)
+ return 0;
+
+ if (q->dir == DMA_MEM_TO_DEV) {
+ dst = &vdesc->dev_addr;
+ src = &addr;
+ } else {
+ dst = &addr;
+ src = &vdesc->dev_addr;
+ }
+
+ for_each_sg(vdesc->sgl, sg, vdesc->sg_len, i) {
+ addr = sg_dma_address(sg) + vdesc->sg_off;
+ rest = sg_dma_len(sg) - vdesc->sg_off;
+ while (rest) {
+ len = min_t(u32, rest, QDMA_MM_DESC_MAX_LEN);
+ desc = qdma_get_desc(q);
+ if (!desc) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ desc->src_addr = cpu_to_le64(*src);
+ desc->dst_addr = cpu_to_le64(*dst);
+ desc->len = cpu_to_le32(len);
+
+ vdesc->dev_addr += len;
+ vdesc->sg_off += len;
+ vdesc->pending_descs++;
+ addr += len;
+ rest -= len;
+ }
+ vdesc->sg_off = 0;
+ }
+out:
+ vdesc->sg_len -= i;
+ vdesc->pidx = q->pidx;
+ return ret;
+}
+
+static void qdma_fill_pending_vdesc(struct qdma_queue *q)
+{
+ struct virt_dma_chan *vc = &q->vchan;
+ struct qdma_mm_vdesc *vdesc = NULL;
+ struct virt_dma_desc *vd;
+ int ret;
+
+ if (!list_empty(&vc->desc_issued)) {
+ vd = &q->issued_vdesc->vdesc;
+ list_for_each_entry_from(vd, &vc->desc_issued, node) {
+ vdesc = to_qdma_vdesc(vd);
+ ret = qdma_hw_enqueue(q, vdesc);
+ if (ret) {
+ q->issued_vdesc = vdesc;
+ return;
+ }
+ }
+ q->issued_vdesc = vdesc;
+ }
+
+ if (list_empty(&vc->desc_submitted))
+ return;
+
+ if (q->submitted_vdesc)
+ vd = &q->submitted_vdesc->vdesc;
+ else
+ vd = list_first_entry(&vc->desc_submitted, typeof(*vd), node);
+
+ list_for_each_entry_from(vd, &vc->desc_submitted, node) {
+ vdesc = to_qdma_vdesc(vd);
+ ret = qdma_hw_enqueue(q, vdesc);
+ if (ret)
+ break;
+ }
+ q->submitted_vdesc = vdesc;
+}
+
+static dma_cookie_t qdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+ struct qdma_queue *q = to_qdma_queue(&vc->chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ vd = container_of(tx, struct virt_dma_desc, tx);
+ spin_lock_irqsave(&vc->lock, flags);
+ cookie = dma_cookie_assign(tx);
+
+ list_move_tail(&vd->node, &vc->desc_submitted);
+ qdma_fill_pending_vdesc(q);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor *
+qdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct qdma_queue *q = to_qdma_queue(chan);
+ struct dma_async_tx_descriptor *tx;
+ struct qdma_mm_vdesc *vdesc;
+
+ vdesc = kzalloc(sizeof(*vdesc), GFP_NOWAIT);
+ if (!vdesc)
+ return NULL;
+ vdesc->sgl = sgl;
+ vdesc->sg_len = sg_len;
+ if (dir == DMA_MEM_TO_DEV)
+ vdesc->dev_addr = q->cfg.dst_addr;
+ else
+ vdesc->dev_addr = q->cfg.src_addr;
+
+ tx = vchan_tx_prep(&q->vchan, &vdesc->vdesc, flags);
+ tx->tx_submit = qdma_tx_submit;
+
+ return tx;
+}
+
+static int qdma_device_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct qdma_queue *q = to_qdma_queue(chan);
+
+ memcpy(&q->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+static int qdma_arm_err_intr(const struct qdma_device *qdev)
+{
+ u32 value = 0;
+
+ qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_FUNC, qdev->fid);
+ qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_VEC, qdev->err_irq_idx);
+ qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_ARM, 1);
+
+ return qdma_reg_write(qdev, &value, QDMA_REGO_ERR_INT);
+}
+
+static irqreturn_t qdma_error_isr(int irq, void *data)
+{
+ struct qdma_device *qdev = data;
+ u32 err_stat = 0;
+ int ret;
+
+ ret = qdma_reg_read(qdev, &err_stat, QDMA_REGO_ERR_STAT);
+ if (ret) {
+ qdma_err(qdev, "read error state failed, ret %d", ret);
+ goto out;
+ }
+
+ qdma_err(qdev, "global error %d", err_stat);
+ ret = qdma_reg_write(qdev, &err_stat, QDMA_REGO_ERR_STAT);
+ if (ret)
+ qdma_err(qdev, "clear error state failed, ret %d", ret);
+
+out:
+ qdma_arm_err_intr(qdev);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qdma_queue_isr(int irq, void *data)
+{
+ struct qdma_intr_ring *intr = data;
+ struct qdma_queue *q = NULL;
+ struct qdma_device *qdev;
+ u32 index, comp_desc;
+ u64 intr_ent;
+ u8 color;
+ int ret;
+ u16 qid;
+
+ qdev = intr->qdev;
+ index = intr->cidx;
+ while (1) {
+ struct virt_dma_desc *vd;
+ struct qdma_mm_vdesc *vdesc;
+ unsigned long flags;
+ u32 cidx;
+
+ intr_ent = le64_to_cpu(intr->base[index]);
+ color = FIELD_GET(QDMA_INTR_MASK_COLOR, intr_ent);
+ if (color != intr->color)
+ break;
+
+ qid = FIELD_GET(QDMA_INTR_MASK_QID, intr_ent);
+ if (FIELD_GET(QDMA_INTR_MASK_TYPE, intr_ent))
+ q = qdev->c2h_queues;
+ else
+ q = qdev->h2c_queues;
+ q += qid;
+
+ cidx = FIELD_GET(QDMA_INTR_MASK_CIDX, intr_ent);
+
+ spin_lock_irqsave(&q->vchan.lock, flags);
+ comp_desc = (cidx - q->cidx) & q->idx_mask;
+
+ vd = vchan_next_desc(&q->vchan);
+ if (!vd)
+ goto skip;
+
+ vdesc = to_qdma_vdesc(vd);
+ while (comp_desc > vdesc->pending_descs) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ comp_desc -= vdesc->pending_descs;
+ vd = vchan_next_desc(&q->vchan);
+ vdesc = to_qdma_vdesc(vd);
+ }
+ vdesc->pending_descs -= comp_desc;
+ if (!vdesc->pending_descs && QDMA_VDESC_QUEUED(vdesc)) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ }
+ q->cidx = cidx;
+
+ qdma_fill_pending_vdesc(q);
+ qdma_xfer_start(q);
+
+skip:
+ spin_unlock_irqrestore(&q->vchan.lock, flags);
+
+ /*
+ * Wrap the index value and flip the expected color value if
+ * interrupt aggregation PIDX has wrapped around.
+ */
+ index++;
+ index &= QDMA_INTR_RING_IDX_MASK;
+ if (!index)
+ intr->color = !intr->color;
+ }
+
+ /*
+ * Update the software interrupt aggregation ring CIDX if a valid entry
+ * was found.
+ */
+ if (q) {
+ qdma_dbg(qdev, "update intr ring%d %d", intr->ridx, index);
+
+ /*
+ * Record the last read index of status descriptor from the
+ * interrupt aggregation ring.
+ */
+ intr->cidx = index;
+
+ ret = qdma_update_cidx(q, intr->ridx, index);
+ if (ret) {
+ qdma_err(qdev, "Failed to update IRQ CIDX");
+ return IRQ_NONE;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int qdma_init_error_irq(struct qdma_device *qdev)
+{
+ struct device *dev = &qdev->pdev->dev;
+ int ret;
+ u32 vec;
+
+ vec = qdev->queue_irq_start - 1;
+
+ ret = devm_request_threaded_irq(dev, vec, NULL, qdma_error_isr,
+ IRQF_ONESHOT, "amd-qdma-error", qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to request error IRQ vector: %d", vec);
+ return ret;
+ }
+
+ ret = qdma_arm_err_intr(qdev);
+ if (ret)
+ qdma_err(qdev, "Failed to arm err interrupt, ret %d", ret);
+
+ return ret;
+}
+
+static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
+{
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ struct device *dev = &qdev->pdev->dev;
+ struct qdma_intr_ring *ring;
+ struct qdma_ctxt_intr intr_ctxt;
+ u32 vector;
+ int ret, i;
+
+ qdev->qintr_ring_num = qdev->queue_irq_num;
+ qdev->qintr_rings = devm_kcalloc(dev, qdev->qintr_ring_num,
+ sizeof(*qdev->qintr_rings),
+ GFP_KERNEL);
+ if (!qdev->qintr_rings)
+ return -ENOMEM;
+
+ vector = qdev->queue_irq_start;
+ for (i = 0; i < qdev->qintr_ring_num; i++, vector++) {
+ ring = &qdev->qintr_rings[i];
+ ring->qdev = qdev;
+ ring->msix_id = qdev->err_irq_idx + i + 1;
+ ring->ridx = i;
+ ring->color = 1;
+ ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE,
+ &ring->dev_base, GFP_KERNEL);
+ if (!ring->base) {
+ qdma_err(qdev, "Failed to alloc intr ring %d", i);
+ return -ENOMEM;
+ }
+ intr_ctxt.agg_base = QDMA_INTR_RING_BASE(ring->dev_base);
+ intr_ctxt.size = (QDMA_INTR_RING_SIZE - 1) / 4096;
+ intr_ctxt.vec = ring->msix_id;
+ intr_ctxt.valid = true;
+ intr_ctxt.color = true;
+ ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL,
+ QDMA_CTXT_CLEAR, ring->ridx, NULL);
+ if (ret) {
+ qdma_err(qdev, "Failed clear intr ctx, ret %d", ret);
+ return ret;
+ }
+
+ qdma_prep_intr_context(qdev, &intr_ctxt, ctxt);
+ ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL,
+ QDMA_CTXT_WRITE, ring->ridx, ctxt);
+ if (ret) {
+ qdma_err(qdev, "Failed setup intr ctx, ret %d", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, vector, NULL,
+ qdma_queue_isr, IRQF_ONESHOT,
+ "amd-qdma-queue", ring);
+ if (ret) {
+ qdma_err(qdev, "Failed to request irq %d", vector);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int qdma_intr_init(struct qdma_device *qdev)
+{
+ int ret;
+
+ ret = qdma_init_error_irq(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to init error IRQs, ret %d", ret);
+ return ret;
+ }
+
+ ret = qdmam_alloc_qintr_rings(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to init queue IRQs, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void amd_qdma_remove(struct platform_device *pdev)
+{
+ struct qdma_device *qdev = platform_get_drvdata(pdev);
+
+ qdma_sgdma_control(qdev, 0);
+ dma_async_device_unregister(&qdev->dma_dev);
+
+ mutex_destroy(&qdev->ctxt_lock);
+}
+
+static int amd_qdma_probe(struct platform_device *pdev)
+{
+ struct qdma_platdata *pdata = dev_get_platdata(&pdev->dev);
+ struct qdma_device *qdev;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
+ if (!qdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, qdev);
+ qdev->pdev = pdev;
+ mutex_init(&qdev->ctxt_lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ qdma_err(qdev, "Failed to get IRQ resource");
+ ret = -ENODEV;
+ goto failed;
+ }
+ qdev->err_irq_idx = pdata->irq_index;
+ qdev->queue_irq_start = res->start + 1;
+ qdev->queue_irq_num = resource_size(res) - 1;
+
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ qdma_err(qdev, "Failed to map IO resource, err %d", ret);
+ goto failed;
+ }
+
+ qdev->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
+ &qdma_regmap_config);
+ if (IS_ERR(qdev->regmap)) {
+ ret = PTR_ERR(qdev->regmap);
+ qdma_err(qdev, "Regmap init failed, err %d", ret);
+ goto failed;
+ }
+
+ ret = qdma_device_verify(qdev);
+ if (ret)
+ goto failed;
+
+ ret = qdma_get_hw_info(qdev);
+ if (ret)
+ goto failed;
+
+ INIT_LIST_HEAD(&qdev->dma_dev.channels);
+
+ ret = qdma_device_setup(qdev);
+ if (ret)
+ goto failed;
+
+ ret = qdma_intr_init(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to initialize IRQs %d", ret);
+ goto failed_disable_engine;
+ }
+
+ dma_cap_set(DMA_SLAVE, qdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, qdev->dma_dev.cap_mask);
+
+ qdev->dma_dev.dev = &pdev->dev;
+ qdev->dma_dev.filter.map = pdata->device_map;
+ qdev->dma_dev.filter.mapcnt = qdev->chan_num * 2;
+ qdev->dma_dev.filter.fn = qdma_filter_fn;
+ qdev->dma_dev.device_alloc_chan_resources = qdma_alloc_queue_resources;
+ qdev->dma_dev.device_free_chan_resources = qdma_free_queue_resources;
+ qdev->dma_dev.device_prep_slave_sg = qdma_prep_device_sg;
+ qdev->dma_dev.device_config = qdma_device_config;
+ qdev->dma_dev.device_issue_pending = qdma_issue_pending;
+ qdev->dma_dev.device_tx_status = dma_cookie_status;
+ qdev->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+
+ ret = dma_async_device_register(&qdev->dma_dev);
+ if (ret) {
+ qdma_err(qdev, "Failed to register AMD QDMA: %d", ret);
+ goto failed_disable_engine;
+ }
+
+ return 0;
+
+failed_disable_engine:
+ qdma_sgdma_control(qdev, 0);
+failed:
+ mutex_destroy(&qdev->ctxt_lock);
+ qdma_err(qdev, "Failed to probe AMD QDMA driver");
+ return ret;
+}
+
+static struct platform_driver amd_qdma_driver = {
+ .driver = {
+ .name = "amd-qdma",
+ },
+ .probe = amd_qdma_probe,
+ .remove_new = amd_qdma_remove,
+};
+
+module_platform_driver(amd_qdma_driver);
+
+MODULE_DESCRIPTION("AMD QDMA driver");
+MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/amd/qdma/qdma.h b/drivers/dma/amd/qdma/qdma.h
new file mode 100644
index 000000000000..94089f1f0c11
--- /dev/null
+++ b/drivers/dma/amd/qdma/qdma.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * DMA header for AMD Queue-based DMA Subsystem
+ *
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef __QDMA_H
+#define __QDMA_H
+
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../../virt-dma.h"
+
+#define DISABLE 0
+#define ENABLE 1
+
+#define QDMA_MIN_IRQ 3
+#define QDMA_INTR_NAME_MAX_LEN 30
+#define QDMA_INTR_PREFIX "amd-qdma"
+
+#define QDMA_IDENTIFIER 0x1FD3
+#define QDMA_DEFAULT_RING_SIZE (BIT(10) + 1)
+#define QDMA_DEFAULT_RING_ID 0
+#define QDMA_POLL_INTRVL_US 10 /* 10us */
+#define QDMA_POLL_TIMEOUT_US (500 * 1000) /* 500ms */
+#define QDMA_DMAP_REG_STRIDE 16
+#define QDMA_CTXT_REGMAP_LEN 8 /* 8 regs */
+#define QDMA_MM_DESC_SIZE 32 /* Bytes */
+#define QDMA_MM_DESC_LEN_BITS 28
+#define QDMA_MM_DESC_MAX_LEN (BIT(QDMA_MM_DESC_LEN_BITS) - 1)
+#define QDMA_MIN_DMA_ALLOC_SIZE 4096
+#define QDMA_INTR_RING_SIZE BIT(13)
+#define QDMA_INTR_RING_IDX_MASK GENMASK(9, 0)
+#define QDMA_INTR_RING_BASE(_addr) ((_addr) >> 12)
+
+#define QDMA_IDENTIFIER_REGOFF 0x0
+#define QDMA_IDENTIFIER_MASK GENMASK(31, 16)
+#define QDMA_QUEUE_ARM_BIT BIT(16)
+
+#define qdma_err(qdev, fmt, args...) \
+ dev_err(&(qdev)->pdev->dev, fmt, ##args)
+
+#define qdma_dbg(qdev, fmt, args...) \
+ dev_dbg(&(qdev)->pdev->dev, fmt, ##args)
+
+#define qdma_info(qdev, fmt, args...) \
+ dev_info(&(qdev)->pdev->dev, fmt, ##args)
+
+enum qdma_reg_fields {
+ QDMA_REGF_IRQ_ENABLE,
+ QDMA_REGF_WBK_ENABLE,
+ QDMA_REGF_WBI_CHECK,
+ QDMA_REGF_IRQ_ARM,
+ QDMA_REGF_IRQ_VEC,
+ QDMA_REGF_IRQ_AGG,
+ QDMA_REGF_WBI_INTVL_ENABLE,
+ QDMA_REGF_MRKR_DISABLE,
+ QDMA_REGF_QUEUE_ENABLE,
+ QDMA_REGF_QUEUE_MODE,
+ QDMA_REGF_DESC_BASE,
+ QDMA_REGF_DESC_SIZE,
+ QDMA_REGF_RING_ID,
+ QDMA_REGF_CMD_INDX,
+ QDMA_REGF_CMD_CMD,
+ QDMA_REGF_CMD_TYPE,
+ QDMA_REGF_CMD_BUSY,
+ QDMA_REGF_QUEUE_COUNT,
+ QDMA_REGF_QUEUE_MAX,
+ QDMA_REGF_QUEUE_BASE,
+ QDMA_REGF_FUNCTION_ID,
+ QDMA_REGF_INTR_AGG_BASE,
+ QDMA_REGF_INTR_VECTOR,
+ QDMA_REGF_INTR_SIZE,
+ QDMA_REGF_INTR_VALID,
+ QDMA_REGF_INTR_COLOR,
+ QDMA_REGF_INTR_FUNCTION_ID,
+ QDMA_REGF_ERR_INT_FUNC,
+ QDMA_REGF_ERR_INT_VEC,
+ QDMA_REGF_ERR_INT_ARM,
+ QDMA_REGF_MAX
+};
+
+enum qdma_regs {
+ QDMA_REGO_CTXT_DATA,
+ QDMA_REGO_CTXT_CMD,
+ QDMA_REGO_CTXT_MASK,
+ QDMA_REGO_MM_H2C_CTRL,
+ QDMA_REGO_MM_C2H_CTRL,
+ QDMA_REGO_QUEUE_COUNT,
+ QDMA_REGO_RING_SIZE,
+ QDMA_REGO_H2C_PIDX,
+ QDMA_REGO_C2H_PIDX,
+ QDMA_REGO_INTR_CIDX,
+ QDMA_REGO_FUNC_ID,
+ QDMA_REGO_ERR_INT,
+ QDMA_REGO_ERR_STAT,
+ QDMA_REGO_MAX
+};
+
+struct qdma_reg_field {
+ u16 lsb; /* Least significant bit of field */
+ u16 msb; /* Most significant bit of field */
+};
+
+struct qdma_reg {
+ u32 off;
+ u32 count;
+};
+
+#define QDMA_REGF(_msb, _lsb) { \
+ .lsb = (_lsb), \
+ .msb = (_msb), \
+}
+
+#define QDMA_REGO(_off, _count) { \
+ .off = (_off), \
+ .count = (_count), \
+}
+
+enum qdma_desc_size {
+ QDMA_DESC_SIZE_8B,
+ QDMA_DESC_SIZE_16B,
+ QDMA_DESC_SIZE_32B,
+ QDMA_DESC_SIZE_64B,
+};
+
+enum qdma_queue_op_mode {
+ QDMA_QUEUE_OP_STREAM,
+ QDMA_QUEUE_OP_MM,
+};
+
+enum qdma_ctxt_type {
+ QDMA_CTXT_DESC_SW_C2H,
+ QDMA_CTXT_DESC_SW_H2C,
+ QDMA_CTXT_DESC_HW_C2H,
+ QDMA_CTXT_DESC_HW_H2C,
+ QDMA_CTXT_DESC_CR_C2H,
+ QDMA_CTXT_DESC_CR_H2C,
+ QDMA_CTXT_WRB,
+ QDMA_CTXT_PFTCH,
+ QDMA_CTXT_INTR_COAL,
+ QDMA_CTXT_RSVD,
+ QDMA_CTXT_HOST_PROFILE,
+ QDMA_CTXT_TIMER,
+ QDMA_CTXT_FMAP,
+ QDMA_CTXT_FNC_STS,
+};
+
+enum qdma_ctxt_cmd {
+ QDMA_CTXT_CLEAR,
+ QDMA_CTXT_WRITE,
+ QDMA_CTXT_READ,
+ QDMA_CTXT_INVALIDATE,
+ QDMA_CTXT_MAX
+};
+
+struct qdma_ctxt_sw_desc {
+ u64 desc_base;
+ u16 vec;
+};
+
+struct qdma_ctxt_intr {
+ u64 agg_base;
+ u16 vec;
+ u32 size;
+ bool valid;
+ bool color;
+};
+
+struct qdma_ctxt_fmap {
+ u16 qbase;
+ u16 qmax;
+};
+
+struct qdma_device;
+
+struct qdma_mm_desc {
+ __le64 src_addr;
+ __le32 len;
+ __le32 reserved1;
+ __le64 dst_addr;
+ __le64 reserved2;
+} __packed;
+
+struct qdma_mm_vdesc {
+ struct virt_dma_desc vdesc;
+ struct qdma_queue *queue;
+ struct scatterlist *sgl;
+ u64 sg_off;
+ u32 sg_len;
+ u64 dev_addr;
+ u32 pidx;
+ u32 pending_descs;
+ struct dma_slave_config cfg;
+};
+
+#define QDMA_VDESC_QUEUED(vdesc) (!(vdesc)->sg_len)
+
+struct qdma_queue {
+ struct qdma_device *qdev;
+ struct virt_dma_chan vchan;
+ enum dma_transfer_direction dir;
+ struct dma_slave_config cfg;
+ struct qdma_mm_desc *desc_base;
+ struct qdma_mm_vdesc *submitted_vdesc;
+ struct qdma_mm_vdesc *issued_vdesc;
+ dma_addr_t dma_desc_base;
+ u32 pidx_reg;
+ u32 cidx_reg;
+ u32 ring_size;
+ u32 idx_mask;
+ u16 qid;
+ u32 pidx;
+ u32 cidx;
+};
+
+struct qdma_intr_ring {
+ struct qdma_device *qdev;
+ __le64 *base;
+ dma_addr_t dev_base;
+ char msix_name[QDMA_INTR_NAME_MAX_LEN];
+ u32 msix_vector;
+ u16 msix_id;
+ u32 ring_size;
+ u16 ridx;
+ u16 cidx;
+ u8 color;
+};
+
+#define QDMA_INTR_MASK_PIDX GENMASK_ULL(15, 0)
+#define QDMA_INTR_MASK_CIDX GENMASK_ULL(31, 16)
+#define QDMA_INTR_MASK_DESC_COLOR GENMASK_ULL(32, 32)
+#define QDMA_INTR_MASK_STATE GENMASK_ULL(34, 33)
+#define QDMA_INTR_MASK_ERROR GENMASK_ULL(36, 35)
+#define QDMA_INTR_MASK_TYPE GENMASK_ULL(38, 38)
+#define QDMA_INTR_MASK_QID GENMASK_ULL(62, 39)
+#define QDMA_INTR_MASK_COLOR GENMASK_ULL(63, 63)
+
+struct qdma_device {
+ struct platform_device *pdev;
+ struct dma_device dma_dev;
+ struct regmap *regmap;
+ struct mutex ctxt_lock; /* protect ctxt registers */
+ const struct qdma_reg_field *rfields;
+ const struct qdma_reg *roffs;
+ struct qdma_queue *h2c_queues;
+ struct qdma_queue *c2h_queues;
+ struct qdma_intr_ring *qintr_rings;
+ u32 qintr_ring_num;
+ u32 qintr_ring_idx;
+ u32 chan_num;
+ u32 queue_irq_start;
+ u32 queue_irq_num;
+ u32 err_irq_idx;
+ u32 fid;
+};
+
+extern const struct qdma_reg qdma_regos_default[QDMA_REGO_MAX];
+extern const struct qdma_reg_field qdma_regfs_default[QDMA_REGF_MAX];
+
+#endif /* __QDMA_H */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 40052d1bd0b5..baebddc740b0 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -339,7 +339,7 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
* @regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
- * @all_chan_mask: all channels availlable in a mask
+ * @all_chan_mask: all channels available in a mask
* @lli_pool: hw lli table
* @memset_pool: hw memset pool
* @chan: channels table to store at_dma_chan structures
@@ -668,7 +668,7 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
* CTRLA is read in turn, next the DSCR is read a second time. If the two
* consecutive read values of the DSCR are the same then we assume both refers
* to the very same LLI as well as the CTRLA value read inbetween does. For
- * cyclic tranfers, the assumption is that a full loop is "not so fast". If the
+ * cyclic transfers, the assumption is that a full loop is "not so fast". If the
* two DSCR values are different, we read again the CTRLA then the DSCR till two
* consecutive read values from DSCR are equal or till the maximum trials is
* reach. This algorithm is very unlikely not to find a stable value for DSCR.
@@ -700,7 +700,7 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan,
break;
/*
- * DSCR has changed inside the DMA controller, so the previouly
+ * DSCR has changed inside the DMA controller, so the previously
* read value of CTRLA may refer to an already processed
* descriptor hence could be outdated. We need to update ctrla
* to match the current descriptor.
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index fbaacb4c19b2..cfa6e1167a1f 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -15,7 +15,7 @@
* number of hardware rings over one or more SBA hardware devices. By
* design, the internal buffer size of SBA hardware device is limited
* but all offload operations supported by SBA can be broken down into
- * multiple small size requests and executed parallely on multiple SBA
+ * multiple small size requests and executed parallelly on multiple SBA
* hardware devices for achieving high through-put.
*
* The Broadcom SBA RAID driver does not require any register programming
@@ -135,7 +135,7 @@ struct sba_device {
u32 max_xor_srcs;
u32 max_resp_pool_size;
u32 max_cmds_pool_size;
- /* Maibox client and Mailbox channels */
+ /* Mailbox client and Mailbox channels */
struct mbox_client client;
struct mbox_chan *mchan;
struct device *mbox_dev;
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 9d74fe97452e..e1b92b4d7b05 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -369,7 +369,7 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
/* the last frame requires extra flags */
d->cb_list[d->frames - 1].cb->info |= finalextrainfo;
- /* detect a size missmatch */
+ /* detect a size mismatch */
if (buf_len && (d->size != buf_len))
goto error_cb;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index c380a4dda77a..c1357d7f3dc6 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1070,7 +1070,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
if (!name)
dev_set_name(&chan->dev->device, "dma%dchan%d", device->dev_id, chan->chan_id);
else
- dev_set_name(&chan->dev->device, name);
+ dev_set_name(&chan->dev->device, "%s", name);
rc = device_register(&chan->dev->device);
if (rc)
goto err_out_ida;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 1f201a542b37..91b2fbc0b864 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -500,7 +500,7 @@ static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
per_sec *= val;
per_sec = INT_TO_FIXPT(per_sec);
- do_div(per_sec, runtime);
+ do_div(per_sec, (u32)runtime);
return per_sec;
}
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index d6c60635e90d..4ee337e78c23 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -841,7 +841,7 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
desc = container_of(tx, struct ep93xx_dma_desc, txd);
/*
- * If nothing is currently prosessed, we push this descriptor
+ * If nothing is currently processed, we push this descriptor
* directly to the hardware. Otherwise we put the descriptor
* to the pending queue.
*/
@@ -1025,7 +1025,7 @@ fail:
* @chan: channel
* @sgl: list of buffers to transfer
* @sg_len: number of entries in @sgl
- * @dir: direction of tha DMA transfer
+ * @dir: direction of the DMA transfer
* @flags: flags for the descriptor
* @context: operation context (ignored)
*
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
index 2c80077cb7c0..36c284a3d184 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
@@ -12,8 +12,8 @@ struct dpaa2_qdma_sd_d {
u32 rsv:32;
union {
struct {
- u32 ssd:12; /* souce stride distance */
- u32 sss:12; /* souce stride size */
+ u32 ssd:12; /* source stride distance */
+ u32 sss:12; /* source stride size */
u32 rsv1:8;
} sdf;
struct {
@@ -48,7 +48,7 @@ struct dpaa2_qdma_sd_d {
#define QDMA_SER_DISABLE (8) /* no notification */
#define QDMA_SER_CTX BIT(8) /* notification by FQD_CTX[fqid] */
#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
-#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
+#define QDMA_SER_BOTH (3 << 8) /* source and dest notification */
#define QDMA_FD_SPF_ENALBE BIT(30) /* source prefetch enable */
#define QMAN_FD_VA_ENABLE BIT(14) /* Address used is virtual address */
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index c66185c5a199..f9f1eda79254 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -100,6 +100,22 @@ static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
return fsl_edma_err_handler(irq, dev_id);
}
+static bool fsl_edma_srcid_in_use(struct fsl_edma_engine *fsl_edma, u32 srcid)
+{
+ struct fsl_edma_chan *fsl_chan;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ fsl_chan = &fsl_edma->chans[i];
+
+ if (fsl_chan->srcid && srcid == fsl_chan->srcid) {
+ dev_err(&fsl_chan->pdev->dev, "The srcid is in use, can't use!");
+ return true;
+ }
+ }
+ return false;
+}
+
static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
@@ -117,6 +133,10 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
if (chan->client_count)
continue;
+
+ if (fsl_edma_srcid_in_use(fsl_edma, dma_spec->args[1]))
+ return NULL;
+
if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) {
chan = dma_get_slave_channel(chan);
if (chan) {
@@ -153,7 +173,7 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX);
- mutex_lock(&fsl_edma->fsl_edma_mutex);
+ guard(mutex)(&fsl_edma->fsl_edma_mutex);
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels,
device_node) {
@@ -161,6 +181,8 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
continue;
fsl_chan = to_fsl_edma_chan(chan);
+ if (fsl_edma_srcid_in_use(fsl_edma, dma_spec->args[0]))
+ return NULL;
i = fsl_chan - fsl_edma->chans;
fsl_chan->priority = dma_spec->args[1];
@@ -177,18 +199,15 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
if (!b_chmux && i == dma_spec->args[0]) {
chan = dma_get_slave_channel(chan);
chan->device->privatecnt++;
- mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
} else if (b_chmux && !fsl_chan->srcid) {
/* if controller support channel mux, choose a free channel */
chan = dma_get_slave_channel(chan);
chan->device->privatecnt++;
fsl_chan->srcid = dma_spec->args[0];
- mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
}
}
- mutex_unlock(&fsl_edma->fsl_edma_mutex);
return NULL;
}
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index 4c47bff81064..25a4134be36b 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -677,7 +677,7 @@ static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
writel_relaxed(tmp, addr);
/*
- * 0 - dma should process FLR whith CPU.
+ * 0 - dma should process FLR with CPU.
* 1 - dma not process FLR, only cpu process FLR.
*/
addr = q_base + HISI_DMA_HIP09_DMA_FLR_DISABLE +
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index e3505e56784b..3c648308a54a 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -290,7 +290,7 @@ static void idma64_desc_fill(struct idma64_chan *idma64c,
desc->length += hw->len;
} while (i);
- /* Trigger an interrupt after the last block is transfered */
+ /* Trigger an interrupt after the last block is transferred */
lli->ctllo |= IDMA64C_CTLL_INT_EN;
/* Disable LLP transfer in the last block */
@@ -364,7 +364,7 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
if (!i)
return bytes;
- /* The current chunk is not fully transfered yet */
+ /* The current chunk is not fully transferred yet */
bytes += desc->hw[--i].len;
return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
@@ -598,9 +598,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.dev = chip->sysdev;
- ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
- if (ret)
- return ret;
+ dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
ret = dma_async_device_register(&idma64->dma);
if (ret)
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 868b724a3b75..d84e21daa991 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -124,7 +124,6 @@ struct idxd_pmu {
struct pmu pmu;
char name[IDXD_NAME_SIZE];
- int cpu;
int n_counters;
int counter_width;
@@ -135,8 +134,6 @@ struct idxd_pmu {
unsigned long supported_filters;
int n_filters;
-
- struct hlist_node cpuhp_node;
};
#define IDXD_MAX_PRIORITY 0xf
@@ -803,14 +800,10 @@ void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index);
int perfmon_pmu_init(struct idxd_device *idxd);
void perfmon_pmu_remove(struct idxd_device *idxd);
void perfmon_counter_overflow(struct idxd_device *idxd);
-void perfmon_init(void);
-void perfmon_exit(void);
#else
static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; }
static inline void perfmon_pmu_remove(struct idxd_device *idxd) {}
static inline void perfmon_counter_overflow(struct idxd_device *idxd) {}
-static inline void perfmon_init(void) {}
-static inline void perfmon_exit(void) {}
#endif
/* debugfs */
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 21f6905b554d..234c1c658ec7 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -69,9 +69,15 @@ static struct idxd_driver_data idxd_driver_data[] = {
static struct pci_device_id idxd_pci_tbl[] = {
/* DSA ver 1.0 platforms */
{ PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
+ /* DSA on GNR-D platforms */
+ { PCI_DEVICE_DATA(INTEL, DSA_GNRD, &idxd_driver_data[IDXD_TYPE_DSA]) },
+ /* DSA on DMR platforms */
+ { PCI_DEVICE_DATA(INTEL, DSA_DMR, &idxd_driver_data[IDXD_TYPE_DSA]) },
/* IAX ver 1.0 platforms */
{ PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
+ /* IAA on DMR platforms */
+ { PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
@@ -878,8 +884,6 @@ static int __init idxd_init_module(void)
else
support_enqcmd = true;
- perfmon_init();
-
err = idxd_driver_register(&idxd_drv);
if (err < 0)
goto err_idxd_driver_register;
@@ -928,7 +932,6 @@ static void __exit idxd_exit_module(void)
idxd_driver_unregister(&idxd_drv);
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
- perfmon_exit();
idxd_remove_debugfs();
}
module_exit(idxd_exit_module);
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index 5e94247e1ea7..4b6af2f15d8a 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -6,29 +6,6 @@
#include "idxd.h"
#include "perfmon.h"
-static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
- char *buf);
-
-static cpumask_t perfmon_dsa_cpu_mask;
-static bool cpuhp_set_up;
-static enum cpuhp_state cpuhp_slot;
-
-/*
- * perf userspace reads this attribute to determine which cpus to open
- * counters on. It's connected to perfmon_dsa_cpu_mask, which is
- * maintained by the cpu hotplug handlers.
- */
-static DEVICE_ATTR_RO(cpumask);
-
-static struct attribute *perfmon_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static struct attribute_group cpumask_attr_group = {
- .attrs = perfmon_cpumask_attrs,
-};
-
/*
* These attributes specify the bits in the config word that the perf
* syscall uses to pass the event ids and categories to perfmon.
@@ -67,16 +44,9 @@ static struct attribute_group perfmon_format_attr_group = {
static const struct attribute_group *perfmon_attr_groups[] = {
&perfmon_format_attr_group,
- &cpumask_attr_group,
NULL,
};
-static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return cpumap_print_to_pagebuf(true, buf, &perfmon_dsa_cpu_mask);
-}
-
static bool is_idxd_event(struct idxd_pmu *idxd_pmu, struct perf_event *event)
{
return &idxd_pmu->pmu == event->pmu;
@@ -217,7 +187,6 @@ static int perfmon_pmu_event_init(struct perf_event *event)
return -EINVAL;
event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd));
- event->cpu = idxd->idxd_pmu->cpu;
event->hw.config = event->attr.config;
if (event->group_leader != event)
@@ -480,14 +449,15 @@ static void idxd_pmu_init(struct idxd_pmu *idxd_pmu)
idxd_pmu->pmu.attr_groups = perfmon_attr_groups;
idxd_pmu->pmu.task_ctx_nr = perf_invalid_context;
idxd_pmu->pmu.event_init = perfmon_pmu_event_init;
- idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable,
- idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable,
+ idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable;
+ idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable;
idxd_pmu->pmu.add = perfmon_pmu_event_add;
idxd_pmu->pmu.del = perfmon_pmu_event_del;
idxd_pmu->pmu.start = perfmon_pmu_event_start;
idxd_pmu->pmu.stop = perfmon_pmu_event_stop;
idxd_pmu->pmu.read = perfmon_pmu_event_update;
idxd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
+ idxd_pmu->pmu.scope = PERF_PMU_SCOPE_SYS_WIDE;
idxd_pmu->pmu.module = THIS_MODULE;
}
@@ -496,47 +466,11 @@ void perfmon_pmu_remove(struct idxd_device *idxd)
if (!idxd->idxd_pmu)
return;
- cpuhp_state_remove_instance(cpuhp_slot, &idxd->idxd_pmu->cpuhp_node);
perf_pmu_unregister(&idxd->idxd_pmu->pmu);
kfree(idxd->idxd_pmu);
idxd->idxd_pmu = NULL;
}
-static int perf_event_cpu_online(unsigned int cpu, struct hlist_node *node)
-{
- struct idxd_pmu *idxd_pmu;
-
- idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
-
- /* select the first online CPU as the designated reader */
- if (cpumask_empty(&perfmon_dsa_cpu_mask)) {
- cpumask_set_cpu(cpu, &perfmon_dsa_cpu_mask);
- idxd_pmu->cpu = cpu;
- }
-
- return 0;
-}
-
-static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
-{
- struct idxd_pmu *idxd_pmu;
- unsigned int target;
-
- idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
-
- if (!cpumask_test_and_clear_cpu(cpu, &perfmon_dsa_cpu_mask))
- return 0;
-
- target = cpumask_any_but(cpu_online_mask, cpu);
- /* migrate events if there is a valid target */
- if (target < nr_cpu_ids) {
- cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
- perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
- }
-
- return 0;
-}
-
int perfmon_pmu_init(struct idxd_device *idxd)
{
union idxd_perfcap perfcap;
@@ -544,12 +478,6 @@ int perfmon_pmu_init(struct idxd_device *idxd)
int rc = -ENODEV;
/*
- * perfmon module initialization failed, nothing to do
- */
- if (!cpuhp_set_up)
- return -ENODEV;
-
- /*
* If perfmon_offset or num_counters is 0, it means perfmon is
* not supported on this hardware.
*/
@@ -624,11 +552,6 @@ int perfmon_pmu_init(struct idxd_device *idxd)
if (rc)
goto free;
- rc = cpuhp_state_add_instance(cpuhp_slot, &idxd_pmu->cpuhp_node);
- if (rc) {
- perf_pmu_unregister(&idxd->idxd_pmu->pmu);
- goto free;
- }
out:
return rc;
free:
@@ -637,22 +560,3 @@ free:
goto out;
}
-
-void __init perfmon_init(void)
-{
- int rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
- "driver/dma/idxd/perf:online",
- perf_event_cpu_online,
- perf_event_cpu_offline);
- if (WARN_ON(rc < 0))
- return;
-
- cpuhp_slot = rc;
- cpuhp_set_up = true;
-}
-
-void __exit perfmon_exit(void)
-{
- if (cpuhp_set_up)
- cpuhp_remove_multi_state(cpuhp_slot);
-}
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 817a564413b0..94eca25ae9b9 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -134,7 +134,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
* completing the descriptor will return desc to allocator and
* the desc can be acquired by a different process and the
* desc->list can be modified. Delete desc from list so the
- * list trasversing does not get corrupted by the other process.
+ * list traversing does not get corrupted by the other process.
*/
list_for_each_entry_safe(d, t, &flist, list) {
list_del_init(&d->list);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index ebf7c115d553..e913f0db99da 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -167,7 +167,6 @@ struct imxdma_channel {
enum imx_dma_type {
IMX1_DMA,
- IMX21_DMA,
IMX27_DMA,
};
@@ -195,8 +194,6 @@ static const struct of_device_id imx_dma_of_dev_id[] = {
{
.compatible = "fsl,imx1-dma", .data = (const void *)IMX1_DMA,
}, {
- .compatible = "fsl,imx21-dma", .data = (const void *)IMX21_DMA,
- }, {
.compatible = "fsl,imx27-dma", .data = (const void *)IMX27_DMA,
}, {
/* sentinel */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 7b502b60b38b..cc9ddd6c325b 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -905,7 +905,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
op = IOAT_OP_XOR_VAL;
- /* validate the sources with the destintation page */
+ /* validate the sources with the destination page */
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
xor_val_srcs[i] = xor_srcs[i];
xor_val_srcs[i] = dest;
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
index 4117c7b67e9c..8173c3f1075a 100644
--- a/drivers/dma/lgm/lgm-dma.c
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -107,7 +107,7 @@
* If header mode is set in DMA descriptor,
* If bit 30 is disabled, HDR_LEN must be configured according to channel
* requirement.
- * If bit 30 is enabled(checksum with heade mode), HDR_LEN has no need to
+ * If bit 30 is enabled(checksum with header mode), HDR_LEN has no need to
* be configured. It will enable check sum for switch
* If header mode is not set in DMA descriptor,
* This register setting doesn't matter
diff --git a/drivers/dma/loongson1-apb-dma.c b/drivers/dma/loongson1-apb-dma.c
new file mode 100644
index 000000000000..255fe7eca212
--- /dev/null
+++ b/drivers/dma/loongson1-apb-dma.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Loongson-1 APB DMA Controller
+ *
+ * Copyright (C) 2015-2024 Keguang Zhang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/* Loongson-1 DMA Control Register */
+#define LS1X_DMA_CTRL 0x0
+
+/* DMA Control Register Bits */
+#define LS1X_DMA_STOP BIT(4)
+#define LS1X_DMA_START BIT(3)
+#define LS1X_DMA_ASK_VALID BIT(2)
+
+/* DMA Next Field Bits */
+#define LS1X_DMA_NEXT_VALID BIT(0)
+
+/* DMA Command Field Bits */
+#define LS1X_DMA_RAM2DEV BIT(12)
+#define LS1X_DMA_INT BIT(1)
+#define LS1X_DMA_INT_MASK BIT(0)
+
+#define LS1X_DMA_LLI_ALIGNMENT 64
+#define LS1X_DMA_LLI_ADDR_MASK GENMASK(31, __ffs(LS1X_DMA_LLI_ALIGNMENT))
+#define LS1X_DMA_MAX_CHANNELS 3
+
+enum ls1x_dmadesc_offsets {
+ LS1X_DMADESC_NEXT = 0,
+ LS1X_DMADESC_SADDR,
+ LS1X_DMADESC_DADDR,
+ LS1X_DMADESC_LENGTH,
+ LS1X_DMADESC_STRIDE,
+ LS1X_DMADESC_CYCLES,
+ LS1X_DMADESC_CMD,
+ LS1X_DMADESC_SIZE
+};
+
+struct ls1x_dma_lli {
+ unsigned int hw[LS1X_DMADESC_SIZE];
+ dma_addr_t phys;
+ struct list_head node;
+} __aligned(LS1X_DMA_LLI_ALIGNMENT);
+
+struct ls1x_dma_desc {
+ struct virt_dma_desc vd;
+ struct list_head lli_list;
+};
+
+struct ls1x_dma_chan {
+ struct virt_dma_chan vc;
+ struct dma_pool *lli_pool;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ enum dma_slave_buswidth src_addr_width;
+ enum dma_slave_buswidth dst_addr_width;
+ unsigned int bus_width;
+ void __iomem *reg_base;
+ int irq;
+ bool is_cyclic;
+ struct ls1x_dma_lli *curr_lli;
+};
+
+struct ls1x_dma {
+ struct dma_device ddev;
+ unsigned int nr_chans;
+ struct ls1x_dma_chan chan[];
+};
+
+static irqreturn_t ls1x_dma_irq_handler(int irq, void *data);
+
+#define to_ls1x_dma_chan(dchan) \
+ container_of(dchan, struct ls1x_dma_chan, vc.chan)
+
+#define to_ls1x_dma_desc(d) \
+ container_of(d, struct ls1x_dma_desc, vd)
+
+static inline struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline int ls1x_dma_query(struct ls1x_dma_chan *chan,
+ dma_addr_t *lli_phys)
+{
+ struct dma_chan *dchan = &chan->vc.chan;
+ int val, ret;
+
+ val = *lli_phys & LS1X_DMA_LLI_ADDR_MASK;
+ val |= LS1X_DMA_ASK_VALID;
+ val |= dchan->chan_id;
+ writel(val, chan->reg_base + LS1X_DMA_CTRL);
+ ret = readl_poll_timeout_atomic(chan->reg_base + LS1X_DMA_CTRL, val,
+ !(val & LS1X_DMA_ASK_VALID), 0, 3000);
+ if (ret)
+ dev_err(chan2dev(dchan), "failed to query DMA\n");
+
+ return ret;
+}
+
+static inline int ls1x_dma_start(struct ls1x_dma_chan *chan,
+ dma_addr_t *lli_phys)
+{
+ struct dma_chan *dchan = &chan->vc.chan;
+ struct device *dev = chan2dev(dchan);
+ int val, ret;
+
+ val = *lli_phys & LS1X_DMA_LLI_ADDR_MASK;
+ val |= LS1X_DMA_START;
+ val |= dchan->chan_id;
+ writel(val, chan->reg_base + LS1X_DMA_CTRL);
+ ret = readl_poll_timeout(chan->reg_base + LS1X_DMA_CTRL, val,
+ !(val & LS1X_DMA_START), 0, 1000);
+ if (!ret)
+ dev_dbg(dev, "start DMA with lli_phys=%pad\n", lli_phys);
+ else
+ dev_err(dev, "failed to start DMA\n");
+
+ return ret;
+}
+
+static inline void ls1x_dma_stop(struct ls1x_dma_chan *chan)
+{
+ int val = readl(chan->reg_base + LS1X_DMA_CTRL);
+
+ writel(val | LS1X_DMA_STOP, chan->reg_base + LS1X_DMA_CTRL);
+}
+
+static void ls1x_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct device *dev = chan2dev(dchan);
+
+ dma_free_coherent(dev, sizeof(struct ls1x_dma_lli),
+ chan->curr_lli, chan->curr_lli->phys);
+ dma_pool_destroy(chan->lli_pool);
+ chan->lli_pool = NULL;
+ devm_free_irq(dev, chan->irq, chan);
+ vchan_free_chan_resources(&chan->vc);
+}
+
+static int ls1x_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct device *dev = chan2dev(dchan);
+ dma_addr_t phys;
+ int ret;
+
+ ret = devm_request_irq(dev, chan->irq, ls1x_dma_irq_handler,
+ IRQF_SHARED, dma_chan_name(dchan), chan);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %d\n", chan->irq);
+ return ret;
+ }
+
+ chan->lli_pool = dma_pool_create(dma_chan_name(dchan), dev,
+ sizeof(struct ls1x_dma_lli),
+ __alignof__(struct ls1x_dma_lli), 0);
+ if (!chan->lli_pool)
+ return -ENOMEM;
+
+ /* allocate memory for querying the current lli */
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ chan->curr_lli = dma_alloc_coherent(dev, sizeof(struct ls1x_dma_lli),
+ &phys, GFP_KERNEL);
+ if (!chan->curr_lli) {
+ dma_pool_destroy(chan->lli_pool);
+ return -ENOMEM;
+ }
+ chan->curr_lli->phys = phys;
+
+ return 0;
+}
+
+static void ls1x_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(vd->tx.chan);
+ struct ls1x_dma_lli *lli, *_lli;
+
+ list_for_each_entry_safe(lli, _lli, &desc->lli_list, node) {
+ list_del(&lli->node);
+ dma_pool_free(chan->lli_pool, lli, lli->phys);
+ }
+
+ kfree(desc);
+}
+
+static struct ls1x_dma_desc *ls1x_dma_alloc_desc(void)
+{
+ struct ls1x_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ INIT_LIST_HEAD(&desc->lli_list);
+
+ return desc;
+}
+
+static int ls1x_dma_prep_lli(struct dma_chan *dchan, struct ls1x_dma_desc *desc,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, bool is_cyclic)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct ls1x_dma_lli *lli, *prev = NULL, *first = NULL;
+ struct device *dev = chan2dev(dchan);
+ struct list_head *pos = NULL;
+ struct scatterlist *sg;
+ unsigned int dev_addr, cmd, i;
+
+ switch (dir) {
+ case DMA_MEM_TO_DEV:
+ dev_addr = chan->dst_addr;
+ chan->bus_width = chan->dst_addr_width;
+ cmd = LS1X_DMA_RAM2DEV | LS1X_DMA_INT;
+ break;
+ case DMA_DEV_TO_MEM:
+ dev_addr = chan->src_addr;
+ chan->bus_width = chan->src_addr_width;
+ cmd = LS1X_DMA_INT;
+ break;
+ default:
+ dev_err(dev, "unsupported DMA direction: %s\n",
+ dmaengine_get_direction_text(dir));
+ return -EINVAL;
+ }
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_addr_t buf_addr = sg_dma_address(sg);
+ size_t buf_len = sg_dma_len(sg);
+ dma_addr_t phys;
+
+ if (!is_dma_copy_aligned(dchan->device, buf_addr, 0, buf_len)) {
+ dev_err(dev, "buffer is not aligned\n");
+ return -EINVAL;
+ }
+
+ /* allocate HW descriptors */
+ lli = dma_pool_zalloc(chan->lli_pool, GFP_NOWAIT, &phys);
+ if (!lli) {
+ dev_err(dev, "failed to alloc lli %u\n", i);
+ return -ENOMEM;
+ }
+
+ /* setup HW descriptors */
+ lli->phys = phys;
+ lli->hw[LS1X_DMADESC_SADDR] = buf_addr;
+ lli->hw[LS1X_DMADESC_DADDR] = dev_addr;
+ lli->hw[LS1X_DMADESC_LENGTH] = buf_len / chan->bus_width;
+ lli->hw[LS1X_DMADESC_STRIDE] = 0;
+ lli->hw[LS1X_DMADESC_CYCLES] = 1;
+ lli->hw[LS1X_DMADESC_CMD] = cmd;
+
+ if (prev)
+ prev->hw[LS1X_DMADESC_NEXT] =
+ lli->phys | LS1X_DMA_NEXT_VALID;
+ prev = lli;
+
+ if (!first)
+ first = lli;
+
+ list_add_tail(&lli->node, &desc->lli_list);
+ }
+
+ if (is_cyclic) {
+ lli->hw[LS1X_DMADESC_NEXT] = first->phys | LS1X_DMA_NEXT_VALID;
+ chan->is_cyclic = is_cyclic;
+ }
+
+ list_for_each(pos, &desc->lli_list) {
+ lli = list_entry(pos, struct ls1x_dma_lli, node);
+ print_hex_dump_debug("LLI: ", DUMP_PREFIX_OFFSET, 16, 4,
+ lli, sizeof(*lli), false);
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *
+ls1x_dma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct ls1x_dma_desc *desc;
+
+ dev_dbg(chan2dev(dchan), "sg_len=%u flags=0x%lx dir=%s\n",
+ sg_len, flags, dmaengine_get_direction_text(dir));
+
+ desc = ls1x_dma_alloc_desc();
+ if (!desc)
+ return NULL;
+
+ if (ls1x_dma_prep_lli(dchan, desc, sgl, sg_len, dir, false)) {
+ ls1x_dma_free_desc(&desc->vd);
+ return NULL;
+ }
+
+ return vchan_tx_prep(to_virt_chan(dchan), &desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+ls1x_dma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ struct ls1x_dma_desc *desc;
+ struct scatterlist *sgl;
+ unsigned int sg_len;
+ unsigned int i;
+ int ret;
+
+ dev_dbg(chan2dev(dchan),
+ "buf_len=%zu period_len=%zu flags=0x%lx dir=%s\n",
+ buf_len, period_len, flags, dmaengine_get_direction_text(dir));
+
+ desc = ls1x_dma_alloc_desc();
+ if (!desc)
+ return NULL;
+
+ /* allocate the scatterlist */
+ sg_len = buf_len / period_len;
+ sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT);
+ if (!sgl)
+ return NULL;
+
+ sg_init_table(sgl, sg_len);
+ for (i = 0; i < sg_len; ++i) {
+ sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(buf_addr)),
+ period_len, offset_in_page(buf_addr));
+ sg_dma_address(&sgl[i]) = buf_addr;
+ sg_dma_len(&sgl[i]) = period_len;
+ buf_addr += period_len;
+ }
+
+ ret = ls1x_dma_prep_lli(dchan, desc, sgl, sg_len, dir, true);
+ kfree(sgl);
+ if (ret) {
+ ls1x_dma_free_desc(&desc->vd);
+ return NULL;
+ }
+
+ return vchan_tx_prep(to_virt_chan(dchan), &desc->vd, flags);
+}
+
+static int ls1x_dma_slave_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+
+ chan->src_addr = config->src_addr;
+ chan->src_addr_width = config->src_addr_width;
+ chan->dst_addr = config->dst_addr;
+ chan->dst_addr_width = config->dst_addr_width;
+
+ return 0;
+}
+
+static int ls1x_dma_pause(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ int ret;
+
+ guard(spinlock_irqsave)(&chan->vc.lock);
+ /* save the current lli */
+ ret = ls1x_dma_query(chan, &chan->curr_lli->phys);
+ if (!ret)
+ ls1x_dma_stop(chan);
+
+ return ret;
+}
+
+static int ls1x_dma_resume(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+
+ guard(spinlock_irqsave)(&chan->vc.lock);
+
+ return ls1x_dma_start(chan, &chan->curr_lli->phys);
+}
+
+static int ls1x_dma_terminate_all(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct virt_dma_desc *vd;
+ LIST_HEAD(head);
+
+ ls1x_dma_stop(chan);
+
+ scoped_guard(spinlock_irqsave, &chan->vc.lock) {
+ vd = vchan_next_desc(&chan->vc);
+ if (vd)
+ vchan_terminate_vdesc(vd);
+
+ vchan_get_all_descriptors(&chan->vc, &head);
+ }
+
+ vchan_dma_desc_free_list(&chan->vc, &head);
+
+ return 0;
+}
+
+static void ls1x_dma_synchronize(struct dma_chan *dchan)
+{
+ vchan_synchronize(to_virt_chan(dchan));
+}
+
+static enum dma_status ls1x_dma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct virt_dma_desc *vd;
+ enum dma_status status;
+ size_t bytes = 0;
+
+ status = dma_cookie_status(dchan, cookie, state);
+ if (status == DMA_COMPLETE)
+ return status;
+
+ scoped_guard(spinlock_irqsave, &chan->vc.lock) {
+ vd = vchan_find_desc(&chan->vc, cookie);
+ if (vd) {
+ struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
+ struct ls1x_dma_lli *lli;
+ dma_addr_t next_phys;
+
+ /* get the current lli */
+ if (ls1x_dma_query(chan, &chan->curr_lli->phys))
+ return status;
+
+ /* locate the current lli */
+ next_phys = chan->curr_lli->hw[LS1X_DMADESC_NEXT];
+ list_for_each_entry(lli, &desc->lli_list, node)
+ if (lli->hw[LS1X_DMADESC_NEXT] == next_phys)
+ break;
+
+ dev_dbg(chan2dev(dchan), "current lli_phys=%pad",
+ &lli->phys);
+
+ /* count the residues */
+ list_for_each_entry_from(lli, &desc->lli_list, node)
+ bytes += lli->hw[LS1X_DMADESC_LENGTH] *
+ chan->bus_width;
+ }
+ }
+
+ dma_set_residue(state, bytes);
+
+ return status;
+}
+
+static void ls1x_dma_issue_pending(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+
+ guard(spinlock_irqsave)(&chan->vc.lock);
+
+ if (vchan_issue_pending(&chan->vc)) {
+ struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
+
+ if (vd) {
+ struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
+ struct ls1x_dma_lli *lli;
+
+ lli = list_first_entry(&desc->lli_list,
+ struct ls1x_dma_lli, node);
+ ls1x_dma_start(chan, &lli->phys);
+ }
+ }
+}
+
+static irqreturn_t ls1x_dma_irq_handler(int irq, void *data)
+{
+ struct ls1x_dma_chan *chan = data;
+ struct dma_chan *dchan = &chan->vc.chan;
+ struct device *dev = chan2dev(dchan);
+ struct virt_dma_desc *vd;
+
+ scoped_guard(spinlock, &chan->vc.lock) {
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_warn(dev,
+ "IRQ %d with no active desc on channel %d\n",
+ irq, dchan->chan_id);
+ return IRQ_NONE;
+ }
+
+ if (chan->is_cyclic) {
+ vchan_cyclic_callback(vd);
+ } else {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ }
+ }
+
+ dev_dbg(dev, "DMA IRQ %d on channel %d\n", irq, dchan->chan_id);
+
+ return IRQ_HANDLED;
+}
+
+static int ls1x_dma_chan_probe(struct platform_device *pdev,
+ struct ls1x_dma *dma)
+{
+ void __iomem *reg_base;
+ int id;
+
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ for (id = 0; id < dma->nr_chans; id++) {
+ struct ls1x_dma_chan *chan = &dma->chan[id];
+ char pdev_irqname[16];
+
+ snprintf(pdev_irqname, sizeof(pdev_irqname), "ch%d", id);
+ chan->irq = platform_get_irq_byname(pdev, pdev_irqname);
+ if (chan->irq < 0)
+ return dev_err_probe(&pdev->dev, chan->irq,
+ "failed to get IRQ for ch%d\n",
+ id);
+
+ chan->reg_base = reg_base;
+ chan->vc.desc_free = ls1x_dma_free_desc;
+ vchan_init(&chan->vc, &dma->ddev);
+ }
+
+ return 0;
+}
+
+static void ls1x_dma_chan_remove(struct ls1x_dma *dma)
+{
+ int id;
+
+ for (id = 0; id < dma->nr_chans; id++) {
+ struct ls1x_dma_chan *chan = &dma->chan[id];
+
+ if (chan->vc.chan.device == &dma->ddev) {
+ list_del(&chan->vc.chan.device_node);
+ tasklet_kill(&chan->vc.task);
+ }
+ }
+}
+
+static int ls1x_dma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dma_device *ddev;
+ struct ls1x_dma *dma;
+ int ret;
+
+ ret = platform_irq_count(pdev);
+ if (ret <= 0 || ret > LS1X_DMA_MAX_CHANNELS)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid number of IRQ channels: %d\n",
+ ret);
+
+ dma = devm_kzalloc(dev, struct_size(dma, chan, ret), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+ dma->nr_chans = ret;
+
+ /* initialize DMA device */
+ ddev = &dma->ddev;
+ ddev->dev = dev;
+ ddev->copy_align = DMAENGINE_ALIGN_4_BYTES;
+ ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ ddev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ ddev->device_alloc_chan_resources = ls1x_dma_alloc_chan_resources;
+ ddev->device_free_chan_resources = ls1x_dma_free_chan_resources;
+ ddev->device_prep_slave_sg = ls1x_dma_prep_slave_sg;
+ ddev->device_prep_dma_cyclic = ls1x_dma_prep_dma_cyclic;
+ ddev->device_config = ls1x_dma_slave_config;
+ ddev->device_pause = ls1x_dma_pause;
+ ddev->device_resume = ls1x_dma_resume;
+ ddev->device_terminate_all = ls1x_dma_terminate_all;
+ ddev->device_synchronize = ls1x_dma_synchronize;
+ ddev->device_tx_status = ls1x_dma_tx_status;
+ ddev->device_issue_pending = ls1x_dma_issue_pending;
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ INIT_LIST_HEAD(&ddev->channels);
+
+ /* initialize DMA channels */
+ ret = ls1x_dma_chan_probe(pdev, dma);
+ if (ret)
+ goto err;
+
+ ret = dmaenginem_async_device_register(ddev);
+ if (ret) {
+ dev_err(dev, "failed to register DMA device\n");
+ goto err;
+ }
+
+ ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id,
+ ddev);
+ if (ret) {
+ dev_err(dev, "failed to register DMA controller\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, dma);
+ dev_info(dev, "Loongson1 DMA driver registered\n");
+
+ return 0;
+
+err:
+ ls1x_dma_chan_remove(dma);
+
+ return ret;
+}
+
+static void ls1x_dma_remove(struct platform_device *pdev)
+{
+ struct ls1x_dma *dma = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ ls1x_dma_chan_remove(dma);
+}
+
+static const struct of_device_id ls1x_dma_match[] = {
+ { .compatible = "loongson,ls1b-apbdma" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls1x_dma_match);
+
+static struct platform_driver ls1x_dma_driver = {
+ .probe = ls1x_dma_probe,
+ .remove = ls1x_dma_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = ls1x_dma_match,
+ },
+};
+
+module_platform_driver(ls1x_dma_driver);
+
+MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson-1 APB DMA Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/lpc32xx-dmamux.c b/drivers/dma/lpc32xx-dmamux.c
new file mode 100644
index 000000000000..351d7e23e615
--- /dev/null
+++ b/drivers/dma/lpc32xx-dmamux.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright 2024 Timesys Corporation <piotr.wojtaszczyk@timesys.com>
+//
+// Based on TI DMA Crossbar driver by:
+// Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+// Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#define LPC32XX_SSP_CLK_CTRL 0x78
+#define LPC32XX_I2S_CLK_CTRL 0x7c
+
+struct lpc32xx_dmamux {
+ int signal;
+ char *name_sel0;
+ char *name_sel1;
+ int muxval;
+ int muxreg;
+ int bit;
+ bool busy;
+};
+
+struct lpc32xx_dmamux_data {
+ struct dma_router dmarouter;
+ struct regmap *reg;
+ spinlock_t lock; /* protects busy status flag */
+};
+
+/* From LPC32x0 User manual "3.2.1 DMA request signals" */
+static struct lpc32xx_dmamux lpc32xx_muxes[] = {
+ {
+ .signal = 3,
+ .name_sel0 = "spi2-rx-tx",
+ .name_sel1 = "ssp1-rx",
+ .muxreg = LPC32XX_SSP_CLK_CTRL,
+ .bit = 5,
+ },
+ {
+ .signal = 10,
+ .name_sel0 = "uart7-rx",
+ .name_sel1 = "i2s1-dma1",
+ .muxreg = LPC32XX_I2S_CLK_CTRL,
+ .bit = 4,
+ },
+ {
+ .signal = 11,
+ .name_sel0 = "spi1-rx-tx",
+ .name_sel1 = "ssp1-tx",
+ .muxreg = LPC32XX_SSP_CLK_CTRL,
+ .bit = 4,
+ },
+ {
+ .signal = 14,
+ .name_sel0 = "none",
+ .name_sel1 = "ssp0-rx",
+ .muxreg = LPC32XX_SSP_CLK_CTRL,
+ .bit = 3,
+ },
+ {
+ .signal = 15,
+ .name_sel0 = "none",
+ .name_sel1 = "ssp0-tx",
+ .muxreg = LPC32XX_SSP_CLK_CTRL,
+ .bit = 2,
+ },
+};
+
+static void lpc32xx_dmamux_release(struct device *dev, void *route_data)
+{
+ struct lpc32xx_dmamux_data *dmamux = dev_get_drvdata(dev);
+ struct lpc32xx_dmamux *mux = route_data;
+
+ dev_dbg(dev, "releasing dma request signal %d routed to %s\n",
+ mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
+
+ guard(spinlock)(&dmamux->lock);
+
+ mux->busy = false;
+}
+
+static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct device *dev = &pdev->dev;
+ struct lpc32xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
+ unsigned long flags;
+ struct lpc32xx_dmamux *mux = NULL;
+ int i;
+
+ if (dma_spec->args_count != 3) {
+ dev_err(&pdev->dev, "invalid number of dma mux args\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lpc32xx_muxes); i++) {
+ if (lpc32xx_muxes[i].signal == dma_spec->args[0]) {
+ mux = &lpc32xx_muxes[i];
+ break;
+ }
+ }
+ if (!mux) {
+ dev_err(&pdev->dev, "invalid mux request number: %d\n",
+ dma_spec->args[0]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (dma_spec->args[2] > 1) {
+ dev_err(&pdev->dev, "invalid dma mux value: %d\n",
+ dma_spec->args[1]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* The of_node_put() will be done in the core for the node */
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "can't get dma master\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_lock_irqsave(&dmamux->lock, flags);
+ if (mux->busy) {
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+ dev_err(dev, "dma request signal %d busy, routed to %s\n",
+ mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
+ of_node_put(dma_spec->np);
+ return ERR_PTR(-EBUSY);
+ }
+
+ mux->busy = true;
+ mux->muxval = dma_spec->args[2] ? BIT(mux->bit) : 0;
+
+ regmap_update_bits(dmamux->reg, mux->muxreg, BIT(mux->bit), mux->muxval);
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ dma_spec->args[2] = 0;
+ dma_spec->args_count = 2;
+
+ dev_dbg(dev, "dma request signal %d routed to %s\n",
+ mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
+
+ return mux;
+}
+
+static int lpc32xx_dmamux_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct lpc32xx_dmamux_data *dmamux;
+
+ dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL);
+ if (!dmamux)
+ return -ENOMEM;
+
+ dmamux->reg = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(dmamux->reg)) {
+ dev_err(&pdev->dev, "syscon lookup failed\n");
+ return PTR_ERR(dmamux->reg);
+ }
+
+ spin_lock_init(&dmamux->lock);
+ platform_set_drvdata(pdev, dmamux);
+ dmamux->dmarouter.dev = &pdev->dev;
+ dmamux->dmarouter.route_free = lpc32xx_dmamux_release;
+
+ return of_dma_router_register(np, lpc32xx_dmamux_reserve,
+ &dmamux->dmarouter);
+}
+
+static const struct of_device_id lpc32xx_dmamux_match[] = {
+ { .compatible = "nxp,lpc3220-dmamux" },
+ {},
+};
+
+static struct platform_driver lpc32xx_dmamux_driver = {
+ .probe = lpc32xx_dmamux_probe,
+ .driver = {
+ .name = "lpc32xx-dmamux",
+ .of_match_table = lpc32xx_dmamux_match,
+ },
+};
+
+static int __init lpc32xx_dmamux_init(void)
+{
+ return platform_driver_register(&lpc32xx_dmamux_driver);
+}
+arch_initcall(lpc32xx_dmamux_init);
diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/ls2x-apb-dma.c
index a49913f3ed3f..9652e8666722 100644
--- a/drivers/dma/ls2x-apb-dma.c
+++ b/drivers/dma/ls2x-apb-dma.c
@@ -33,11 +33,11 @@
#define LDMA_STOP BIT(4) /* DMA stop operation */
#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
-/* Bitfields in ndesc_addr field of HW decriptor */
+/* Bitfields in ndesc_addr field of HW descriptor */
#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
#define LDMA_DESC_ADDR_LOW GENMASK(31, 1)
-/* Bitfields in cmd field of HW decriptor */
+/* Bitfields in cmd field of HW descriptor */
#define LDMA_INT BIT(1) /* Enable DMA interrupts */
#define LDMA_DATA_DIRECTION BIT(12) /* 1: write to device, 0: read from device */
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 529100c5b9f5..b69eabf12a24 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -518,7 +518,7 @@ mtk_cqdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest,
/* setup dma channel */
cvd[i]->ch = c;
- /* setup sourece, destination, and length */
+ /* setup source, destination, and length */
tlen = (len > MTK_CQDMA_MAX_LEN) ? MTK_CQDMA_MAX_LEN : len;
cvd[i]->len = tlen;
cvd[i]->src = src;
@@ -617,7 +617,7 @@ static int mtk_cqdma_alloc_chan_resources(struct dma_chan *c)
u32 i, min_refcnt = U32_MAX, refcnt;
unsigned long flags;
- /* allocate PC with the minimun refcount */
+ /* allocate PC with the minimum refcount */
for (i = 0; i < cqdma->dma_channels; ++i) {
refcnt = refcount_read(&cqdma->pc[i]->refcnt);
if (refcnt < min_refcnt) {
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 36ff11e909ea..58c7961ab9ad 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -226,7 +226,7 @@ struct mtk_hsdma_soc {
* @pc_refcnt: Track how many VCs are using the PC
* @lock: Lock protect agaisting multiple VCs access PC
* @soc: The pointer to area holding differences among
- * vaious platform
+ * various platform
*/
struct mtk_hsdma_device {
struct dma_device ddev;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index bcd3b623ac6c..43efce77bb57 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -414,7 +414,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (!mv_chan_is_busy(mv_chan)) {
u32 current_desc = mv_chan_get_current_desc(mv_chan);
/*
- * and the curren desc is the end of the chain before
+ * and the current desc is the end of the chain before
* the append, then we need to start the channel
*/
if (current_desc == old_chain_tail->async_tx.phys)
@@ -1074,7 +1074,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
if (!mv_chan->dma_desc_pool_virt)
return ERR_PTR(-ENOMEM);
- /* discover transaction capabilites from the platform data */
+ /* discover transaction capabilities from the platform data */
dma_dev->cap_mask = cap_mask;
INIT_LIST_HEAD(&dma_dev->channels);
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index d86086b05b0e..c87cefd38a07 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -99,7 +99,7 @@ struct mv_xor_device {
* @common: common dmaengine channel object members
* @slots_allocated: records the actual size of the descriptor slot pool
* @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
- * @op_in_desc: new mode of driver, each op is writen to descriptor.
+ * @op_in_desc: new mode of driver, each op is written to descriptor.
*/
struct mv_xor_chan {
int pending;
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 97ebc791a30b..c8c67f4d982c 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -175,7 +175,7 @@ struct mv_xor_v2_device {
* struct mv_xor_v2_sw_desc - implements a xor SW descriptor
* @idx: descriptor index
* @async_tx: support for the async_tx api
- * @hw_desc: assosiated HW descriptor
+ * @hw_desc: associated HW descriptor
* @free_list: node of the free SW descriprots list
*/
struct mv_xor_v2_sw_desc {
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index c08916339aa7..3b011a91d48e 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -897,7 +897,7 @@ static int nbpf_config(struct dma_chan *dchan,
/*
* We could check config->slave_id to match chan->terminal here,
* but with DT they would be coming from the same source, so
- * such a check would be superflous
+ * such a check would be superfluous
*/
chan->slave_dst_addr = config->dst_addr;
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index e588fff9f21d..423442e55d36 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -26,7 +26,7 @@ static DEFINE_MUTEX(of_dma_lock);
*
* Finds a DMA controller with matching device node and number for dma cells
* in a list of registered DMA controllers. If a match is found a valid pointer
- * to the DMA data stored is retuned. A NULL pointer is returned if no match is
+ * to the DMA data stored is returned. A NULL pointer is returned if no match is
* found.
*/
static struct of_dma *of_dma_find_controller(const struct of_phandle_args *dma_spec)
@@ -342,7 +342,7 @@ EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
*
* This function can be used as the of xlate callback for DMA driver which wants
* to match the channel based on the channel id. When using this xlate function
- * the #dma-cells propety of the DMA controller dt node needs to be set to 1.
+ * the #dma-cells property of the DMA controller dt node needs to be set to 1.
* The data parameter of of_dma_controller_register must be a pointer to the
* dma_device struct the function should match upon.
*
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index e001f4f7aa64..aa436f9e3571 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -1156,7 +1156,7 @@ static int owl_dma_probe(struct platform_device *pdev)
}
/*
- * Eventhough the DMA controller is capable of generating 4
+ * Even though the DMA controller is capable of generating 4
* IRQ's for DMA priority feature, we only use 1 IRQ for
* simplification.
*/
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 60c4de8dac1d..82a9fe88ad54 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -3163,10 +3163,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
* This is the limit for transfers with a buswidth of 1, larger
* buswidths will have larger limits.
*/
- ret = dma_set_max_seg_size(&adev->dev, 1900800);
- if (ret)
- dev_err(&adev->dev, "unable to set the seg size\n");
-
+ dma_set_max_seg_size(&adev->dev, 1900800);
init_pl330_debugfs(pl330);
dev_info(&adev->dev,
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index bbb60a970dab..7b78759ac734 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -9,7 +9,7 @@
*/
/*
- * This driver supports the asynchrounous DMA copy and RAID engines available
+ * This driver supports the asynchronous DMA copy and RAID engines available
* on the AMCC PPC440SPe Processors.
* Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
* ADMA driver written by D.Williams.
diff --git a/drivers/dma/ppc4xx/dma.h b/drivers/dma/ppc4xx/dma.h
index 1ff4be23db0f..b5725481bfa6 100644
--- a/drivers/dma/ppc4xx/dma.h
+++ b/drivers/dma/ppc4xx/dma.h
@@ -14,7 +14,7 @@
/* Number of elements in the array with statical CDBs */
#define MAX_STAT_DMA_CDBS 16
-/* Number of DMA engines available on the contoller */
+/* Number of DMA engines available on the controller */
#define DMA_ENGINES_NUM 2
/* Maximum h/w supported number of destinations */
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
index 21b4bf895200..39bc37268235 100644
--- a/drivers/dma/ptdma/ptdma.h
+++ b/drivers/dma/ptdma/ptdma.h
@@ -192,7 +192,7 @@ struct pt_cmd_queue {
/* Queue dma pool */
struct dma_pool *dma_pool;
- /* Queue base address (not neccessarily aligned)*/
+ /* Queue base address (not necessarily aligned)*/
struct ptdma_desc *qbase;
/* Aligned queue start address (per requirement) */
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 5e7d332731e0..d43a881e43b9 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -440,7 +440,7 @@ static void bam_reset(struct bam_device *bdev)
val |= BAM_EN;
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
- /* set descriptor threshhold, start with 4 bytes */
+ /* set descriptor threshold, start with 4 bytes */
writel_relaxed(DEFAULT_CNT_THRSHLD,
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
@@ -667,7 +667,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
for_each_sg(sgl, sg, sg_len, i)
num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
- /* allocate enough room to accomodate the number of entries */
+ /* allocate enough room to accommodate the number of entries */
async_desc = kzalloc(struct_size(async_desc, desc, num_alloc),
GFP_NOWAIT);
@@ -1325,11 +1325,7 @@ static int bam_dma_probe(struct platform_device *pdev)
/* set max dma segment size */
bdev->common.dev = bdev->dev;
- ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
- if (ret) {
- dev_err(bdev->dev, "cannot set maximum segment size\n");
- goto err_bam_channel_exit;
- }
+ dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
platform_set_drvdata(pdev, bdev);
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index e6ebd688d746..52a7c8f2498f 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1856,7 +1856,7 @@ static void gpi_issue_pending(struct dma_chan *chan)
read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
- /* move all submitted discriptors to issued list */
+ /* move all submitted descriptors to issued list */
spin_lock_irqsave(&gchan->vc.lock, flags);
if (vchan_issue_pending(&gchan->vc))
vd = list_last_entry(&gchan->vc.desc_issued,
diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c
index 53f4273b657c..c1db398adc84 100644
--- a/drivers/dma/qcom/qcom_adm.c
+++ b/drivers/dma/qcom/qcom_adm.c
@@ -650,7 +650,7 @@ static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
/*
* residue is either the full length if it is in the issued list, or 0
* if it is in progress. We have no reliable way of determining
- * anything inbetween
+ * anything in between
*/
dma_set_residue(txstate, residue);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 40482cb73d79..1094a2f82164 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1868,9 +1868,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
- ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
- if (ret)
- return ret;
+ dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
if (ret)
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 7cc9eb2217e8..8ead0a1fd237 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -318,7 +318,7 @@ static void sh_dmae_setup_xfer(struct shdma_chan *schan,
}
/*
- * Find a slave channel configuration from the contoller list by either a slave
+ * Find a slave channel configuration from the controller list by either a slave
* ID in the non-DT case, or by a MID/RID value in the DT case
*/
static const struct sh_dmae_slave_config *dmae_find_slave(
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 2c489299148e..d52e1685aed5 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3632,11 +3632,7 @@ static int __init d40_probe(struct platform_device *pdev)
if (ret)
goto destroy_cache;
- ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
- if (ret) {
- d40_err(dev, "Failed to set dma max seg size\n");
- goto destroy_cache;
- }
+ dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
d40_hw_init(base);
diff --git a/drivers/dma/ste_dma40.h b/drivers/dma/ste_dma40.h
index c697bfe16a01..a90c786acc1f 100644
--- a/drivers/dma/ste_dma40.h
+++ b/drivers/dma/ste_dma40.h
@@ -4,7 +4,7 @@
#define STE_DMA40_H
/*
- * Maxium size for a single dma descriptor
+ * Maximum size for a single dma descriptor
* Size is limited to 16 bits.
* Size is in the units of addr-widths (1,2,4,8 bytes)
* Larger transfers will be split up to multiple linked desc
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index c504e855eb02..2e30e9a94a1e 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -369,7 +369,7 @@ struct d40_phy_lli_bidir {
* @lcsp02: Either maps to register lcsp0 if src or lcsp2 if dst.
* @lcsp13: Either maps to register lcsp1 if src or lcsp3 if dst.
*
- * This struct must be 8 bytes aligned since it will be accessed directy by
+ * This struct must be 8 bytes aligned since it will be accessed directly by
* the DMA. Never add any none hw mapped registers to this struct.
*/
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index ac69778827f2..7d1acda2d72b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -463,7 +463,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
/*
* If interrupt is pending then do nothing as the ISR will handle
- * the programing for new request.
+ * the programming for new request.
*/
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
dev_err(tdc2dev(tdc),
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
index d349c6d482ae..9062a237cd16 100644
--- a/drivers/dma/ti/k3-udma.h
+++ b/drivers/dma/ti/k3-udma.h
@@ -131,7 +131,6 @@ int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
struct device *xudma_get_device(struct udma_dev *ud);
struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud);
-void xudma_dev_put(struct udma_dev *ud);
u32 xudma_dev_get_psil_base(struct udma_dev *ud);
struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index fd4397adeb79..275848a9c450 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -1742,7 +1742,7 @@ static int xgene_dma_probe(struct platform_device *pdev)
/* Initialize DMA channels software state */
xgene_dma_init_channels(pdma);
- /* Configue DMA rings */
+ /* Configure DMA rings */
ret = xgene_dma_init_rings(pdma);
if (ret)
goto err_clk_enable;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 36bd4825d389..be87764af9e8 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -149,7 +149,7 @@ struct xilinx_dpdma_chan;
* @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
* @next_desc: next descriptor 32 bit address
* @src_addr: payload source address (1st page, 32 LSB)
- * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs)
+ * @addr_ext_23: payload source address (2nd and 3rd pages, 16 LSBs)
* @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs)
* @src_addr2: payload source address (2nd page, 32 LSB)
* @src_addr3: payload source address (3rd page, 32 LSB)
@@ -210,7 +210,7 @@ struct xilinx_dpdma_tx_desc {
* @vchan: virtual DMA channel
* @reg: register base address
* @id: channel ID
- * @wait_to_stop: queue to wait for outstanding transacitons before stopping
+ * @wait_to_stop: queue to wait for outstanding transactions before stopping
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
@@ -671,6 +671,84 @@ static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
}
/**
+ * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
+ * @chan: DPDMA channel
+ * @buf_addr: buffer address
+ * @buf_len: buffer length
+ * @period_len: number of periods
+ * @flags: tx flags argument passed in to prepare function
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * for the given cyclic transaction.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, unsigned long flags)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
+ unsigned int periods = buf_len / period_len;
+ unsigned int i;
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return NULL;
+
+ for (i = 0; i < periods; i++) {
+ struct xilinx_dpdma_hw_desc *hw_desc;
+
+ if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev,
+ "buffer should be aligned at %d B\n",
+ XILINX_DPDMA_ALIGN_BYTES);
+ goto error;
+ }
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc)
+ goto error;
+
+ xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, last,
+ &buf_addr, 1);
+ hw_desc = &sw_desc->hw;
+ hw_desc->xfer_size = period_len;
+ hw_desc->hsize_stride =
+ FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK,
+ period_len) |
+ FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
+ period_len);
+ hw_desc->control = XILINX_DPDMA_DESC_CONTROL_PREEMBLE |
+ XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE |
+ XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+
+ buf_addr += period_len;
+ last = sw_desc;
+ }
+
+ sw_desc = list_first_entry(&tx_desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ last->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
+ if (chan->xdev->ext_addr)
+ last->hw.addr_ext |=
+ FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
+ upper_32_bits(sw_desc->dma_addr));
+
+ last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ return vchan_tx_prep(&chan->vchan, &tx_desc->vdesc, flags);
+
+error:
+ xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
+
+ return NULL;
+}
+
+/**
* xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
* descriptor
* @chan: DPDMA channel
@@ -1189,6 +1267,23 @@ out_unlock:
/* -----------------------------------------------------------------------------
* DMA Engine Operations
*/
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ if (direction != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (buf_len % period_len)
+ return NULL;
+
+ return xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
+ period_len, flags);
+}
static struct dma_async_tx_descriptor *
xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
@@ -1672,6 +1767,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, ddev->cap_mask);
dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
dma_cap_set(DMA_REPEAT, ddev->cap_mask);
dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
@@ -1679,6 +1775,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
+ ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
/* TODO: Can we achieve better granularity ? */
ddev->device_tx_status = dma_cookie_status;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index f31631bef961..9ae46f1198fe 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -22,10 +22,10 @@
#include "../dmaengine.h"
/* Register Offsets */
-#define ZYNQMP_DMA_ISR 0x100
-#define ZYNQMP_DMA_IMR 0x104
-#define ZYNQMP_DMA_IER 0x108
-#define ZYNQMP_DMA_IDS 0x10C
+#define ZYNQMP_DMA_ISR (chan->irq_offset + 0x100)
+#define ZYNQMP_DMA_IMR (chan->irq_offset + 0x104)
+#define ZYNQMP_DMA_IER (chan->irq_offset + 0x108)
+#define ZYNQMP_DMA_IDS (chan->irq_offset + 0x10c)
#define ZYNQMP_DMA_CTRL0 0x110
#define ZYNQMP_DMA_CTRL1 0x114
#define ZYNQMP_DMA_DATA_ATTR 0x120
@@ -145,6 +145,9 @@
#define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \
async_tx)
+/* IRQ Register offset for Versal Gen 2 */
+#define IRQ_REG_OFFSET 0x308
+
/**
* struct zynqmp_dma_desc_ll - Hw linked list descriptor
* @addr: Buffer address
@@ -211,6 +214,7 @@ struct zynqmp_dma_desc_sw {
* @bus_width: Bus width
* @src_burst_len: Source burst length
* @dst_burst_len: Dest burst length
+ * @irq_offset: Irq register offset
*/
struct zynqmp_dma_chan {
struct zynqmp_dma_device *zdev;
@@ -235,6 +239,7 @@ struct zynqmp_dma_chan {
u32 bus_width;
u32 src_burst_len;
u32 dst_burst_len;
+ u32 irq_offset;
};
/**
@@ -253,6 +258,14 @@ struct zynqmp_dma_device {
struct clk *clk_apb;
};
+struct zynqmp_dma_config {
+ u32 offset;
+};
+
+static const struct zynqmp_dma_config versal2_dma_config = {
+ .offset = IRQ_REG_OFFSET,
+};
+
static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
u64 value)
{
@@ -892,6 +905,7 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
{
struct zynqmp_dma_chan *chan;
struct device_node *node = pdev->dev.of_node;
+ const struct zynqmp_dma_config *match_data;
int err;
chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL);
@@ -919,6 +933,10 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
return -EINVAL;
}
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (match_data)
+ chan->irq_offset = match_data->offset;
+
chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent");
zdev->chan = chan;
tasklet_setup(&chan->tasklet, zynqmp_dma_do_tasklet);
@@ -1161,6 +1179,7 @@ static void zynqmp_dma_remove(struct platform_device *pdev)
}
static const struct of_device_id zynqmp_dma_of_match[] = {
+ { .compatible = "amd,versal2-dma-1.0", .data = &versal2_dma_config },
{ .compatible = "xlnx,zynqmp-dma-1.0", },
{}
};
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 98e6ad8528d3..fc0280dcddd1 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -342,6 +342,51 @@ dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin,
return 0;
}
+static int
+dpll_msg_add_pin_esync(struct sk_buff *msg, struct dpll_pin *pin,
+ struct dpll_pin_ref *ref, struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
+ struct dpll_device *dpll = ref->dpll;
+ struct dpll_pin_esync esync;
+ struct nlattr *nest;
+ int ret, i;
+
+ if (!ops->esync_get)
+ return 0;
+ ret = ops->esync_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
+ dpll_priv(dpll), &esync, extack);
+ if (ret == -EOPNOTSUPP)
+ return 0;
+ else if (ret)
+ return ret;
+ if (nla_put_64bit(msg, DPLL_A_PIN_ESYNC_FREQUENCY, sizeof(esync.freq),
+ &esync.freq, DPLL_A_PIN_PAD))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, DPLL_A_PIN_ESYNC_PULSE, esync.pulse))
+ return -EMSGSIZE;
+ for (i = 0; i < esync.range_num; i++) {
+ nest = nla_nest_start(msg,
+ DPLL_A_PIN_ESYNC_FREQUENCY_SUPPORTED);
+ if (!nest)
+ return -EMSGSIZE;
+ if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN,
+ sizeof(esync.range[i].min),
+ &esync.range[i].min, DPLL_A_PIN_PAD))
+ goto nest_cancel;
+ if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX,
+ sizeof(esync.range[i].max),
+ &esync.range[i].max, DPLL_A_PIN_PAD))
+ goto nest_cancel;
+ nla_nest_end(msg, nest);
+ }
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(msg, nest);
+ return -EMSGSIZE;
+}
+
static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
{
int fs;
@@ -483,6 +528,9 @@ dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
ret = dpll_msg_add_ffo(msg, pin, ref, extack);
if (ret)
return ret;
+ ret = dpll_msg_add_pin_esync(msg, pin, ref, extack);
+ if (ret)
+ return ret;
if (xa_empty(&pin->parent_refs))
ret = dpll_msg_add_pin_dplls(msg, pin, extack);
else
@@ -739,6 +787,83 @@ rollback:
}
static int
+dpll_pin_esync_set(struct dpll_pin *pin, struct nlattr *a,
+ struct netlink_ext_ack *extack)
+{
+ struct dpll_pin_ref *ref, *failed;
+ const struct dpll_pin_ops *ops;
+ struct dpll_pin_esync esync;
+ u64 freq = nla_get_u64(a);
+ struct dpll_device *dpll;
+ bool supported = false;
+ unsigned long i;
+ int ret;
+
+ xa_for_each(&pin->dpll_refs, i, ref) {
+ ops = dpll_pin_ops(ref);
+ if (!ops->esync_set || !ops->esync_get) {
+ NL_SET_ERR_MSG(extack,
+ "embedded sync feature is not supported by this device");
+ return -EOPNOTSUPP;
+ }
+ }
+ ref = dpll_xa_ref_dpll_first(&pin->dpll_refs);
+ ops = dpll_pin_ops(ref);
+ dpll = ref->dpll;
+ ret = ops->esync_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
+ dpll_priv(dpll), &esync, extack);
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "unable to get current embedded sync frequency value");
+ return ret;
+ }
+ if (freq == esync.freq)
+ return 0;
+ for (i = 0; i < esync.range_num; i++)
+ if (freq <= esync.range[i].max && freq >= esync.range[i].min)
+ supported = true;
+ if (!supported) {
+ NL_SET_ERR_MSG_ATTR(extack, a,
+ "requested embedded sync frequency value is not supported by this device");
+ return -EINVAL;
+ }
+
+ xa_for_each(&pin->dpll_refs, i, ref) {
+ void *pin_dpll_priv;
+
+ ops = dpll_pin_ops(ref);
+ dpll = ref->dpll;
+ pin_dpll_priv = dpll_pin_on_dpll_priv(dpll, pin);
+ ret = ops->esync_set(pin, pin_dpll_priv, dpll, dpll_priv(dpll),
+ freq, extack);
+ if (ret) {
+ failed = ref;
+ NL_SET_ERR_MSG_FMT(extack,
+ "embedded sync frequency set failed for dpll_id: %u",
+ dpll->id);
+ goto rollback;
+ }
+ }
+ __dpll_pin_change_ntf(pin);
+
+ return 0;
+
+rollback:
+ xa_for_each(&pin->dpll_refs, i, ref) {
+ void *pin_dpll_priv;
+
+ if (ref == failed)
+ break;
+ ops = dpll_pin_ops(ref);
+ dpll = ref->dpll;
+ pin_dpll_priv = dpll_pin_on_dpll_priv(dpll, pin);
+ if (ops->esync_set(pin, pin_dpll_priv, dpll, dpll_priv(dpll),
+ esync.freq, extack))
+ NL_SET_ERR_MSG(extack, "set embedded sync frequency rollback failed");
+ }
+ return ret;
+}
+
+static int
dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
enum dpll_pin_state state,
struct netlink_ext_ack *extack)
@@ -1039,6 +1164,11 @@ dpll_pin_set_from_nlattr(struct dpll_pin *pin, struct genl_info *info)
if (ret)
return ret;
break;
+ case DPLL_A_PIN_ESYNC_FREQUENCY:
+ ret = dpll_pin_esync_set(pin, a, info->extack);
+ if (ret)
+ return ret;
+ break;
}
}
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index 1e95f5397cfc..fe9b6893d261 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -62,7 +62,7 @@ static const struct nla_policy dpll_pin_get_dump_nl_policy[DPLL_A_PIN_ID + 1] =
};
/* DPLL_CMD_PIN_SET - do */
-static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_PHASE_ADJUST + 1] = {
+static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_ESYNC_FREQUENCY + 1] = {
[DPLL_A_PIN_ID] = { .type = NLA_U32, },
[DPLL_A_PIN_FREQUENCY] = { .type = NLA_U64, },
[DPLL_A_PIN_DIRECTION] = NLA_POLICY_RANGE(NLA_U32, 1, 2),
@@ -71,6 +71,7 @@ static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_PHASE_ADJUST +
[DPLL_A_PIN_PARENT_DEVICE] = NLA_POLICY_NESTED(dpll_pin_parent_device_nl_policy),
[DPLL_A_PIN_PARENT_PIN] = NLA_POLICY_NESTED(dpll_pin_parent_pin_nl_policy),
[DPLL_A_PIN_PHASE_ADJUST] = { .type = NLA_S32, },
+ [DPLL_A_PIN_ESYNC_FREQUENCY] = { .type = NLA_U64, },
};
/* Ops table for dpll */
@@ -138,7 +139,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
.doit = dpll_nl_pin_set_doit,
.post_doit = dpll_pin_post_doit,
.policy = dpll_pin_set_nl_policy,
- .maxattr = DPLL_A_PIN_PHASE_ADJUST,
+ .maxattr = DPLL_A_PIN_ESYNC_FREQUENCY,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
};
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 16c8de5050e5..81af6c344d6b 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -311,15 +311,6 @@ config EDAC_CELL
Cell Broadband Engine internal memory controller
on platform without a hypervisor
-config EDAC_PPC4XX
- tristate "PPC4xx IBM DDR2 Memory Controller"
- depends on 4xx
- help
- This enables support for EDAC on the ECC memory used
- with the IBM DDR2 memory controller found in various
- PowerPC 4xx embedded processors such as the 405EX[r],
- 440SP, 440SPe, 460EX, 460GT and 460SX.
-
config EDAC_AMD8131
tristate "AMD8131 HyperTransport PCI-X Tunnel"
depends on PCI && PPC_MAPLE
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 4edfb83ffbee..faf310eec4a6 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -63,7 +63,6 @@ i10nm_edac-y := i10nm_base.o
obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o
obj-$(CONFIG_EDAC_CELL) += cell_edac.o
-obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 24dd896d9a9d..e2a954de913b 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -47,10 +47,6 @@
readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : \
(res_cfg->type == GNR ? 0xaf8 : 0x20ef8)) + \
(i) * (m)->chan_mmio_sz)
-#define I10NM_GET_AMAP(m, i) \
- readl((m)->mbase + ((m)->hbm_mc ? 0x814 : \
- (res_cfg->type == GNR ? 0xc14 : 0x20814)) + \
- (i) * (m)->chan_mmio_sz)
#define I10NM_GET_REG32(m, i, offset) \
readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
#define I10NM_GET_REG64(m, i, offset) \
@@ -971,7 +967,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
{
struct skx_pvt *pvt = mci->pvt_info;
struct skx_imc *imc = pvt->imc;
- u32 mtr, amap, mcddrtcfg = 0;
+ u32 mtr, mcddrtcfg = 0;
struct dimm_info *dimm;
int i, j, ndimms;
@@ -980,7 +976,6 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
continue;
ndimms = 0;
- amap = I10NM_GET_AMAP(imc, i);
if (res_cfg->type != GNR)
mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
@@ -992,7 +987,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
mtr, mcddrtcfg, imc->mc, i, j);
if (IS_DIMM_PRESENT(mtr))
- ndimms += skx_get_dimm_info(mtr, 0, amap, dimm,
+ ndimms += skx_get_dimm_info(mtr, 0, 0, dimm,
imc, i, j, cfg);
else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
@@ -1013,54 +1008,6 @@ static struct notifier_block i10nm_mce_dec = {
.priority = MCE_PRIO_EDAC,
};
-#ifdef CONFIG_EDAC_DEBUG
-/*
- * Debug feature.
- * Exercise the address decode logic by writing an address to
- * /sys/kernel/debug/edac/i10nm_test/addr.
- */
-static struct dentry *i10nm_test;
-
-static int debugfs_u64_set(void *data, u64 val)
-{
- struct mce m;
-
- pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
-
- memset(&m, 0, sizeof(m));
- /* ADDRV + MemRd + Unknown channel */
- m.status = MCI_STATUS_ADDRV + 0x90;
- /* One corrected error */
- m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
- m.addr = val;
- skx_mce_check_error(NULL, 0, &m);
-
- return 0;
-}
-DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
-
-static void setup_i10nm_debug(void)
-{
- i10nm_test = edac_debugfs_create_dir("i10nm_test");
- if (!i10nm_test)
- return;
-
- if (!edac_debugfs_create_file("addr", 0200, i10nm_test,
- NULL, &fops_u64_wo)) {
- debugfs_remove(i10nm_test);
- i10nm_test = NULL;
- }
-}
-
-static void teardown_i10nm_debug(void)
-{
- debugfs_remove_recursive(i10nm_test);
-}
-#else
-static inline void setup_i10nm_debug(void) {}
-static inline void teardown_i10nm_debug(void) {}
-#endif /*CONFIG_EDAC_DEBUG*/
-
static int __init i10nm_init(void)
{
u8 mc = 0, src_id = 0, node_id = 0;
@@ -1159,7 +1106,7 @@ static int __init i10nm_init(void)
opstate_init();
mce_register_decode_chain(&i10nm_mce_dec);
- setup_i10nm_debug();
+ skx_setup_debug("i10nm_test");
if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log);
@@ -1187,7 +1134,7 @@ static void __exit i10nm_exit(void)
enable_retry_rd_err_log(false);
}
- teardown_i10nm_debug();
+ skx_teardown_debug();
mce_unregister_decode_chain(&i10nm_mce_dec);
skx_adxl_put();
skx_remove();
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 0fe75eed8973..189a2fc29e74 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -316,7 +316,7 @@ static u64 ehl_err_addr_to_imc_addr(u64 eaddr, int mc)
if (igen6_tom <= _4GB)
return eaddr + igen6_tolud - _4GB;
- if (eaddr < _4GB)
+ if (eaddr >= igen6_tom)
return eaddr + igen6_tolud - igen6_tom;
return eaddr;
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
deleted file mode 100644
index 1eea3341a916..000000000000
--- a/drivers/edac/ppc4xx_edac.c
+++ /dev/null
@@ -1,1425 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2008 Nuovation System Designs, LLC
- * Grant Erickson <gerickson@nuovations.com>
- */
-
-#include <linux/edac.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/types.h>
-
-#include <asm/dcr.h>
-
-#include "edac_module.h"
-#include "ppc4xx_edac.h"
-
-/*
- * This file implements a driver for monitoring and handling events
- * associated with the IMB DDR2 ECC controller found in the AMCC/IBM
- * 405EX[r], 440SP, 440SPe, 460EX, 460GT and 460SX.
- *
- * As realized in the 405EX[r], this controller features:
- *
- * - Support for registered- and non-registered DDR1 and DDR2 memory.
- * - 32-bit or 16-bit memory interface with optional ECC.
- *
- * o ECC support includes:
- *
- * - 4-bit SEC/DED
- * - Aligned-nibble error detect
- * - Bypass mode
- *
- * - Two (2) memory banks/ranks.
- * - Up to 1 GiB per bank/rank in 32-bit mode and up to 512 MiB per
- * bank/rank in 16-bit mode.
- *
- * As realized in the 440SP and 440SPe, this controller changes/adds:
- *
- * - 64-bit or 32-bit memory interface with optional ECC.
- *
- * o ECC support includes:
- *
- * - 8-bit SEC/DED
- * - Aligned-nibble error detect
- * - Bypass mode
- *
- * - Up to 4 GiB per bank/rank in 64-bit mode and up to 2 GiB
- * per bank/rank in 32-bit mode.
- *
- * As realized in the 460EX and 460GT, this controller changes/adds:
- *
- * - 64-bit or 32-bit memory interface with optional ECC.
- *
- * o ECC support includes:
- *
- * - 8-bit SEC/DED
- * - Aligned-nibble error detect
- * - Bypass mode
- *
- * - Four (4) memory banks/ranks.
- * - Up to 16 GiB per bank/rank in 64-bit mode and up to 8 GiB
- * per bank/rank in 32-bit mode.
- *
- * At present, this driver has ONLY been tested against the controller
- * realization in the 405EX[r] on the AMCC Kilauea and Haleakala
- * boards (256 MiB w/o ECC memory soldered onto the board) and a
- * proprietary board based on those designs (128 MiB ECC memory, also
- * soldered onto the board).
- *
- * Dynamic feature detection and handling needs to be added for the
- * other realizations of this controller listed above.
- *
- * Eventually, this driver will likely be adapted to the above variant
- * realizations of this controller as well as broken apart to handle
- * the other known ECC-capable controllers prevalent in other 4xx
- * processors:
- *
- * - IBM SDRAM (405GP, 405CR and 405EP) "ibm,sdram-4xx"
- * - IBM DDR1 (440GP, 440GX, 440EP and 440GR) "ibm,sdram-4xx-ddr"
- * - Denali DDR1/DDR2 (440EPX and 440GRX) "denali,sdram-4xx-ddr2"
- *
- * For this controller, unfortunately, correctable errors report
- * nothing more than the beat/cycle and byte/lane the correction
- * occurred on and the check bit group that covered the error.
- *
- * In contrast, uncorrectable errors also report the failing address,
- * the bus master and the transaction direction (i.e. read or write)
- *
- * Regardless of whether the error is a CE or a UE, we report the
- * following pieces of information in the driver-unique message to the
- * EDAC subsystem:
- *
- * - Device tree path
- * - Bank(s)
- * - Check bit error group
- * - Beat(s)/lane(s)
- */
-
-/* Preprocessor Definitions */
-
-#define EDAC_OPSTATE_INT_STR "interrupt"
-#define EDAC_OPSTATE_POLL_STR "polled"
-#define EDAC_OPSTATE_UNKNOWN_STR "unknown"
-
-#define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac"
-#define PPC4XX_EDAC_MODULE_REVISION "v1.0.0"
-
-#define PPC4XX_EDAC_MESSAGE_SIZE 256
-
-/*
- * Kernel logging without an EDAC instance
- */
-#define ppc4xx_edac_printk(level, fmt, arg...) \
- edac_printk(level, "PPC4xx MC", fmt, ##arg)
-
-/*
- * Kernel logging with an EDAC instance
- */
-#define ppc4xx_edac_mc_printk(level, mci, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "PPC4xx", fmt, ##arg)
-
-/*
- * Macros to convert bank configuration size enumerations into MiB and
- * page values.
- */
-#define SDRAM_MBCF_SZ_MiB_MIN 4
-#define SDRAM_MBCF_SZ_TO_MiB(n) (SDRAM_MBCF_SZ_MiB_MIN \
- << (SDRAM_MBCF_SZ_DECODE(n)))
-#define SDRAM_MBCF_SZ_TO_PAGES(n) (SDRAM_MBCF_SZ_MiB_MIN \
- << (20 - PAGE_SHIFT + \
- SDRAM_MBCF_SZ_DECODE(n)))
-
-/*
- * The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are
- * indirectly accessed and have a base and length defined by the
- * device tree. The base can be anything; however, we expect the
- * length to be precisely two registers, the first for the address
- * window and the second for the data window.
- */
-#define SDRAM_DCR_RESOURCE_LEN 2
-#define SDRAM_DCR_ADDR_OFFSET 0
-#define SDRAM_DCR_DATA_OFFSET 1
-
-/*
- * Device tree interrupt indices
- */
-#define INTMAP_ECCDED_INDEX 0 /* Double-bit Error Detect */
-#define INTMAP_ECCSEC_INDEX 1 /* Single-bit Error Correct */
-
-/* Type Definitions */
-
-/*
- * PPC4xx SDRAM memory controller private instance data
- */
-struct ppc4xx_edac_pdata {
- dcr_host_t dcr_host; /* Indirect DCR address/data window mapping */
- struct {
- int sec; /* Single-bit correctable error IRQ assigned */
- int ded; /* Double-bit detectable error IRQ assigned */
- } irqs;
-};
-
-/*
- * Various status data gathered and manipulated when checking and
- * reporting ECC status.
- */
-struct ppc4xx_ecc_status {
- u32 ecces;
- u32 besr;
- u32 bearh;
- u32 bearl;
- u32 wmirq;
-};
-
-/* Global Variables */
-
-/*
- * Device tree node type and compatible tuples this driver can match
- * on.
- */
-static const struct of_device_id ppc4xx_edac_match[] = {
- {
- .compatible = "ibm,sdram-4xx-ddr2"
- },
- { }
-};
-MODULE_DEVICE_TABLE(of, ppc4xx_edac_match);
-
-/*
- * TODO: The row and channel parameters likely need to be dynamically
- * set based on the aforementioned variant controller realizations.
- */
-static const unsigned ppc4xx_edac_nr_csrows = 2;
-static const unsigned ppc4xx_edac_nr_chans = 1;
-
-/*
- * Strings associated with PLB master IDs capable of being posted in
- * SDRAM_BESR or SDRAM_WMIRQ on uncorrectable ECC errors.
- */
-static const char * const ppc4xx_plb_masters[9] = {
- [SDRAM_PLB_M0ID_ICU] = "ICU",
- [SDRAM_PLB_M0ID_PCIE0] = "PCI-E 0",
- [SDRAM_PLB_M0ID_PCIE1] = "PCI-E 1",
- [SDRAM_PLB_M0ID_DMA] = "DMA",
- [SDRAM_PLB_M0ID_DCU] = "DCU",
- [SDRAM_PLB_M0ID_OPB] = "OPB",
- [SDRAM_PLB_M0ID_MAL] = "MAL",
- [SDRAM_PLB_M0ID_SEC] = "SEC",
- [SDRAM_PLB_M0ID_AHB] = "AHB"
-};
-
-/**
- * mfsdram - read and return controller register data
- * @dcr_host: A pointer to the DCR mapping.
- * @idcr_n: The indirect DCR register to read.
- *
- * This routine reads and returns the data associated with the
- * controller's specified indirect DCR register.
- *
- * Returns the read data.
- */
-static inline u32
-mfsdram(const dcr_host_t *dcr_host, unsigned int idcr_n)
-{
- return __mfdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET,
- dcr_host->base + SDRAM_DCR_DATA_OFFSET,
- idcr_n);
-}
-
-/**
- * mtsdram - write controller register data
- * @dcr_host: A pointer to the DCR mapping.
- * @idcr_n: The indirect DCR register to write.
- * @value: The data to write.
- *
- * This routine writes the provided data to the controller's specified
- * indirect DCR register.
- */
-static inline void
-mtsdram(const dcr_host_t *dcr_host, unsigned int idcr_n, u32 value)
-{
- return __mtdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET,
- dcr_host->base + SDRAM_DCR_DATA_OFFSET,
- idcr_n,
- value);
-}
-
-/**
- * ppc4xx_edac_check_bank_error - check a bank for an ECC bank error
- * @status: A pointer to the ECC status structure to check for an
- * ECC bank error.
- * @bank: The bank to check for an ECC error.
- *
- * This routine determines whether the specified bank has an ECC
- * error.
- *
- * Returns true if the specified bank has an ECC error; otherwise,
- * false.
- */
-static bool
-ppc4xx_edac_check_bank_error(const struct ppc4xx_ecc_status *status,
- unsigned int bank)
-{
- switch (bank) {
- case 0:
- return status->ecces & SDRAM_ECCES_BK0ER;
- case 1:
- return status->ecces & SDRAM_ECCES_BK1ER;
- default:
- return false;
- }
-}
-
-/**
- * ppc4xx_edac_generate_bank_message - generate interpretted bank status message
- * @mci: A pointer to the EDAC memory controller instance associated
- * with the bank message being generated.
- * @status: A pointer to the ECC status structure to generate the
- * message from.
- * @buffer: A pointer to the buffer in which to generate the
- * message.
- * @size: The size, in bytes, of space available in buffer.
- *
- * This routine generates to the provided buffer the portion of the
- * driver-unique report message associated with the ECCESS[BKNER]
- * field of the specified ECC status.
- *
- * Returns the number of characters generated on success; otherwise, <
- * 0 on error.
- */
-static int
-ppc4xx_edac_generate_bank_message(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status,
- char *buffer,
- size_t size)
-{
- int n, total = 0;
- unsigned int row, rows;
-
- n = snprintf(buffer, size, "%s: Banks: ", mci->dev_name);
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- for (rows = 0, row = 0; row < mci->nr_csrows; row++) {
- if (ppc4xx_edac_check_bank_error(status, row)) {
- n = snprintf(buffer, size, "%s%u",
- (rows++ ? ", " : ""), row);
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
- }
- }
-
- n = snprintf(buffer, size, "%s; ", rows ? "" : "None");
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- fail:
- return total;
-}
-
-/**
- * ppc4xx_edac_generate_checkbit_message - generate interpretted checkbit message
- * @mci: A pointer to the EDAC memory controller instance associated
- * with the checkbit message being generated.
- * @status: A pointer to the ECC status structure to generate the
- * message from.
- * @buffer: A pointer to the buffer in which to generate the
- * message.
- * @size: The size, in bytes, of space available in buffer.
- *
- * This routine generates to the provided buffer the portion of the
- * driver-unique report message associated with the ECCESS[CKBER]
- * field of the specified ECC status.
- *
- * Returns the number of characters generated on success; otherwise, <
- * 0 on error.
- */
-static int
-ppc4xx_edac_generate_checkbit_message(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status,
- char *buffer,
- size_t size)
-{
- const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
- const char *ckber = NULL;
-
- switch (status->ecces & SDRAM_ECCES_CKBER_MASK) {
- case SDRAM_ECCES_CKBER_NONE:
- ckber = "None";
- break;
- case SDRAM_ECCES_CKBER_32_ECC_0_3:
- ckber = "ECC0:3";
- break;
- case SDRAM_ECCES_CKBER_32_ECC_4_8:
- switch (mfsdram(&pdata->dcr_host, SDRAM_MCOPT1) &
- SDRAM_MCOPT1_WDTH_MASK) {
- case SDRAM_MCOPT1_WDTH_16:
- ckber = "ECC0:3";
- break;
- case SDRAM_MCOPT1_WDTH_32:
- ckber = "ECC4:8";
- break;
- default:
- ckber = "Unknown";
- break;
- }
- break;
- case SDRAM_ECCES_CKBER_32_ECC_0_8:
- ckber = "ECC0:8";
- break;
- default:
- ckber = "Unknown";
- break;
- }
-
- return snprintf(buffer, size, "Checkbit Error: %s", ckber);
-}
-
-/**
- * ppc4xx_edac_generate_lane_message - generate interpretted byte lane message
- * @mci: A pointer to the EDAC memory controller instance associated
- * with the byte lane message being generated.
- * @status: A pointer to the ECC status structure to generate the
- * message from.
- * @buffer: A pointer to the buffer in which to generate the
- * message.
- * @size: The size, in bytes, of space available in buffer.
- *
- * This routine generates to the provided buffer the portion of the
- * driver-unique report message associated with the ECCESS[BNCE]
- * field of the specified ECC status.
- *
- * Returns the number of characters generated on success; otherwise, <
- * 0 on error.
- */
-static int
-ppc4xx_edac_generate_lane_message(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status,
- char *buffer,
- size_t size)
-{
- int n, total = 0;
- unsigned int lane, lanes;
- const unsigned int first_lane = 0;
- const unsigned int lane_count = 16;
-
- n = snprintf(buffer, size, "; Byte Lane Errors: ");
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- for (lanes = 0, lane = first_lane; lane < lane_count; lane++) {
- if ((status->ecces & SDRAM_ECCES_BNCE_ENCODE(lane)) != 0) {
- n = snprintf(buffer, size,
- "%s%u",
- (lanes++ ? ", " : ""), lane);
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
- }
- }
-
- n = snprintf(buffer, size, "%s; ", lanes ? "" : "None");
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- fail:
- return total;
-}
-
-/**
- * ppc4xx_edac_generate_ecc_message - generate interpretted ECC status message
- * @mci: A pointer to the EDAC memory controller instance associated
- * with the ECCES message being generated.
- * @status: A pointer to the ECC status structure to generate the
- * message from.
- * @buffer: A pointer to the buffer in which to generate the
- * message.
- * @size: The size, in bytes, of space available in buffer.
- *
- * This routine generates to the provided buffer the portion of the
- * driver-unique report message associated with the ECCESS register of
- * the specified ECC status.
- *
- * Returns the number of characters generated on success; otherwise, <
- * 0 on error.
- */
-static int
-ppc4xx_edac_generate_ecc_message(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status,
- char *buffer,
- size_t size)
-{
- int n, total = 0;
-
- n = ppc4xx_edac_generate_bank_message(mci, status, buffer, size);
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- n = ppc4xx_edac_generate_checkbit_message(mci, status, buffer, size);
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- n = ppc4xx_edac_generate_lane_message(mci, status, buffer, size);
-
- if (n < 0 || n >= size)
- goto fail;
-
- buffer += n;
- size -= n;
- total += n;
-
- fail:
- return total;
-}
-
-/**
- * ppc4xx_edac_generate_plb_message - generate interpretted PLB status message
- * @mci: A pointer to the EDAC memory controller instance associated
- * with the PLB message being generated.
- * @status: A pointer to the ECC status structure to generate the
- * message from.
- * @buffer: A pointer to the buffer in which to generate the
- * message.
- * @size: The size, in bytes, of space available in buffer.
- *
- * This routine generates to the provided buffer the portion of the
- * driver-unique report message associated with the PLB-related BESR
- * and/or WMIRQ registers of the specified ECC status.
- *
- * Returns the number of characters generated on success; otherwise, <
- * 0 on error.
- */
-static int
-ppc4xx_edac_generate_plb_message(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status,
- char *buffer,
- size_t size)
-{
- unsigned int master;
- bool read;
-
- if ((status->besr & SDRAM_BESR_MASK) == 0)
- return 0;
-
- if ((status->besr & SDRAM_BESR_M0ET_MASK) == SDRAM_BESR_M0ET_NONE)
- return 0;
-
- read = ((status->besr & SDRAM_BESR_M0RW_MASK) == SDRAM_BESR_M0RW_READ);
-
- master = SDRAM_BESR_M0ID_DECODE(status->besr);
-
- return snprintf(buffer, size,
- "%s error w/ PLB master %u \"%s\"; ",
- (read ? "Read" : "Write"),
- master,
- (((master >= SDRAM_PLB_M0ID_FIRST) &&
- (master <= SDRAM_PLB_M0ID_LAST)) ?
- ppc4xx_plb_masters[master] : "UNKNOWN"));
-}
-
-/**
- * ppc4xx_edac_generate_message - generate interpretted status message
- * @mci: A pointer to the EDAC memory controller instance associated
- * with the driver-unique message being generated.
- * @status: A pointer to the ECC status structure to generate the
- * message from.
- * @buffer: A pointer to the buffer in which to generate the
- * message.
- * @size: The size, in bytes, of space available in buffer.
- *
- * This routine generates to the provided buffer the driver-unique
- * EDAC report message from the specified ECC status.
- */
-static void
-ppc4xx_edac_generate_message(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status,
- char *buffer,
- size_t size)
-{
- int n;
-
- if (buffer == NULL || size == 0)
- return;
-
- n = ppc4xx_edac_generate_ecc_message(mci, status, buffer, size);
-
- if (n < 0 || n >= size)
- return;
-
- buffer += n;
- size -= n;
-
- ppc4xx_edac_generate_plb_message(mci, status, buffer, size);
-}
-
-#ifdef DEBUG
-/**
- * ppc4xx_ecc_dump_status - dump controller ECC status registers
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the status being dumped.
- * @status: A pointer to the ECC status structure to generate the
- * dump from.
- *
- * This routine dumps to the kernel log buffer the raw and
- * interpretted specified ECC status.
- */
-static void
-ppc4xx_ecc_dump_status(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status)
-{
- char message[PPC4XX_EDAC_MESSAGE_SIZE];
-
- ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
-
- ppc4xx_edac_mc_printk(KERN_INFO, mci,
- "\n"
- "\tECCES: 0x%08x\n"
- "\tWMIRQ: 0x%08x\n"
- "\tBESR: 0x%08x\n"
- "\tBEAR: 0x%08x%08x\n"
- "\t%s\n",
- status->ecces,
- status->wmirq,
- status->besr,
- status->bearh,
- status->bearl,
- message);
-}
-#endif /* DEBUG */
-
-/**
- * ppc4xx_ecc_get_status - get controller ECC status
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the status being retrieved.
- * @status: A pointer to the ECC status structure to populate the
- * ECC status with.
- *
- * This routine reads and masks, as appropriate, all the relevant
- * status registers that deal with ibm,sdram-4xx-ddr2 ECC errors.
- * While we read all of them, for correctable errors, we only expect
- * to deal with ECCES. For uncorrectable errors, we expect to deal
- * with all of them.
- */
-static void
-ppc4xx_ecc_get_status(const struct mem_ctl_info *mci,
- struct ppc4xx_ecc_status *status)
-{
- const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
- const dcr_host_t *dcr_host = &pdata->dcr_host;
-
- status->ecces = mfsdram(dcr_host, SDRAM_ECCES) & SDRAM_ECCES_MASK;
- status->wmirq = mfsdram(dcr_host, SDRAM_WMIRQ) & SDRAM_WMIRQ_MASK;
- status->besr = mfsdram(dcr_host, SDRAM_BESR) & SDRAM_BESR_MASK;
- status->bearl = mfsdram(dcr_host, SDRAM_BEARL);
- status->bearh = mfsdram(dcr_host, SDRAM_BEARH);
-}
-
-/**
- * ppc4xx_ecc_clear_status - clear controller ECC status
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the status being cleared.
- * @status: A pointer to the ECC status structure containing the
- * values to write to clear the ECC status.
- *
- * This routine clears--by writing the masked (as appropriate) status
- * values back to--the status registers that deal with
- * ibm,sdram-4xx-ddr2 ECC errors.
- */
-static void
-ppc4xx_ecc_clear_status(const struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status)
-{
- const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
- const dcr_host_t *dcr_host = &pdata->dcr_host;
-
- mtsdram(dcr_host, SDRAM_ECCES, status->ecces & SDRAM_ECCES_MASK);
- mtsdram(dcr_host, SDRAM_WMIRQ, status->wmirq & SDRAM_WMIRQ_MASK);
- mtsdram(dcr_host, SDRAM_BESR, status->besr & SDRAM_BESR_MASK);
- mtsdram(dcr_host, SDRAM_BEARL, 0);
- mtsdram(dcr_host, SDRAM_BEARH, 0);
-}
-
-/**
- * ppc4xx_edac_handle_ce - handle controller correctable ECC error (CE)
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the correctable error being handled and reported.
- * @status: A pointer to the ECC status structure associated with
- * the correctable error being handled and reported.
- *
- * This routine handles an ibm,sdram-4xx-ddr2 controller ECC
- * correctable error. Per the aforementioned discussion, there's not
- * enough status available to use the full EDAC correctable error
- * interface, so we just pass driver-unique message to the "no info"
- * interface.
- */
-static void
-ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status)
-{
- int row;
- char message[PPC4XX_EDAC_MESSAGE_SIZE];
-
- ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
-
- for (row = 0; row < mci->nr_csrows; row++)
- if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
- 0, 0, 0,
- row, 0, -1,
- message, "");
-}
-
-/**
- * ppc4xx_edac_handle_ue - handle controller uncorrectable ECC error (UE)
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the uncorrectable error being handled and
- * reported.
- * @status: A pointer to the ECC status structure associated with
- * the uncorrectable error being handled and reported.
- *
- * This routine handles an ibm,sdram-4xx-ddr2 controller ECC
- * uncorrectable error.
- */
-static void
-ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
- const struct ppc4xx_ecc_status *status)
-{
- const u64 bear = ((u64)status->bearh << 32 | status->bearl);
- const unsigned long page = bear >> PAGE_SHIFT;
- const unsigned long offset = bear & ~PAGE_MASK;
- int row;
- char message[PPC4XX_EDAC_MESSAGE_SIZE];
-
- ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
-
- for (row = 0; row < mci->nr_csrows; row++)
- if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
- page, offset, 0,
- row, 0, -1,
- message, "");
-}
-
-/**
- * ppc4xx_edac_check - check controller for ECC errors
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the ibm,sdram-4xx-ddr2 controller being
- * checked.
- *
- * This routine is used to check and post ECC errors and is called by
- * both the EDAC polling thread and this driver's CE and UE interrupt
- * handler.
- */
-static void
-ppc4xx_edac_check(struct mem_ctl_info *mci)
-{
-#ifdef DEBUG
- static unsigned int count;
-#endif
- struct ppc4xx_ecc_status status;
-
- ppc4xx_ecc_get_status(mci, &status);
-
-#ifdef DEBUG
- if (count++ % 30 == 0)
- ppc4xx_ecc_dump_status(mci, &status);
-#endif
-
- if (status.ecces & SDRAM_ECCES_UE)
- ppc4xx_edac_handle_ue(mci, &status);
-
- if (status.ecces & SDRAM_ECCES_CE)
- ppc4xx_edac_handle_ce(mci, &status);
-
- ppc4xx_ecc_clear_status(mci, &status);
-}
-
-/**
- * ppc4xx_edac_isr - SEC (CE) and DED (UE) interrupt service routine
- * @irq: The virtual interrupt number being serviced.
- * @dev_id: A pointer to the EDAC memory controller instance
- * associated with the interrupt being handled.
- *
- * This routine implements the interrupt handler for both correctable
- * (CE) and uncorrectable (UE) ECC errors for the ibm,sdram-4xx-ddr2
- * controller. It simply calls through to the same routine used during
- * polling to check, report and clear the ECC status.
- *
- * Unconditionally returns IRQ_HANDLED.
- */
-static irqreturn_t
-ppc4xx_edac_isr(int irq, void *dev_id)
-{
- struct mem_ctl_info *mci = dev_id;
-
- ppc4xx_edac_check(mci);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ppc4xx_edac_get_dtype - return the controller memory width
- * @mcopt1: The 32-bit Memory Controller Option 1 register value
- * currently set for the controller, from which the width
- * is derived.
- *
- * This routine returns the EDAC device type width appropriate for the
- * current controller configuration.
- *
- * TODO: This needs to be conditioned dynamically through feature
- * flags or some such when other controller variants are supported as
- * the 405EX[r] is 16-/32-bit and the others are 32-/64-bit with the
- * 16- and 64-bit field definition/value/enumeration (b1) overloaded
- * among them.
- *
- * Returns a device type width enumeration.
- */
-static enum dev_type ppc4xx_edac_get_dtype(u32 mcopt1)
-{
- switch (mcopt1 & SDRAM_MCOPT1_WDTH_MASK) {
- case SDRAM_MCOPT1_WDTH_16:
- return DEV_X2;
- case SDRAM_MCOPT1_WDTH_32:
- return DEV_X4;
- default:
- return DEV_UNKNOWN;
- }
-}
-
-/**
- * ppc4xx_edac_get_mtype - return controller memory type
- * @mcopt1: The 32-bit Memory Controller Option 1 register value
- * currently set for the controller, from which the memory type
- * is derived.
- *
- * This routine returns the EDAC memory type appropriate for the
- * current controller configuration.
- *
- * Returns a memory type enumeration.
- */
-static enum mem_type ppc4xx_edac_get_mtype(u32 mcopt1)
-{
- bool rden = ((mcopt1 & SDRAM_MCOPT1_RDEN_MASK) == SDRAM_MCOPT1_RDEN);
-
- switch (mcopt1 & SDRAM_MCOPT1_DDR_TYPE_MASK) {
- case SDRAM_MCOPT1_DDR2_TYPE:
- return rden ? MEM_RDDR2 : MEM_DDR2;
- case SDRAM_MCOPT1_DDR1_TYPE:
- return rden ? MEM_RDDR : MEM_DDR;
- default:
- return MEM_UNKNOWN;
- }
-}
-
-/**
- * ppc4xx_edac_init_csrows - initialize driver instance rows
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the ibm,sdram-4xx-ddr2 controller for which
- * the csrows (i.e. banks/ranks) are being initialized.
- * @mcopt1: The 32-bit Memory Controller Option 1 register value
- * currently set for the controller, from which bank width
- * and memory typ information is derived.
- *
- * This routine initializes the virtual "chip select rows" associated
- * with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2
- * controller bank/rank is mapped to a row.
- *
- * Returns 0 if OK; otherwise, -EINVAL if the memory bank size
- * configuration cannot be determined.
- */
-static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
-{
- const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
- int status = 0;
- enum mem_type mtype;
- enum dev_type dtype;
- enum edac_type edac_mode;
- int row, j;
- u32 mbxcf, size, nr_pages;
-
- /* Establish the memory type and width */
-
- mtype = ppc4xx_edac_get_mtype(mcopt1);
- dtype = ppc4xx_edac_get_dtype(mcopt1);
-
- /* Establish EDAC mode */
-
- if (mci->edac_cap & EDAC_FLAG_SECDED)
- edac_mode = EDAC_SECDED;
- else if (mci->edac_cap & EDAC_FLAG_EC)
- edac_mode = EDAC_EC;
- else
- edac_mode = EDAC_NONE;
-
- /*
- * Initialize each chip select row structure which correspond
- * 1:1 with a controller bank/rank.
- */
-
- for (row = 0; row < mci->nr_csrows; row++) {
- struct csrow_info *csi = mci->csrows[row];
-
- /*
- * Get the configuration settings for this
- * row/bank/rank and skip disabled banks.
- */
-
- mbxcf = mfsdram(&pdata->dcr_host, SDRAM_MBXCF(row));
-
- if ((mbxcf & SDRAM_MBCF_BE_MASK) != SDRAM_MBCF_BE_ENABLE)
- continue;
-
- /* Map the bank configuration size setting to pages. */
-
- size = mbxcf & SDRAM_MBCF_SZ_MASK;
-
- switch (size) {
- case SDRAM_MBCF_SZ_4MB:
- case SDRAM_MBCF_SZ_8MB:
- case SDRAM_MBCF_SZ_16MB:
- case SDRAM_MBCF_SZ_32MB:
- case SDRAM_MBCF_SZ_64MB:
- case SDRAM_MBCF_SZ_128MB:
- case SDRAM_MBCF_SZ_256MB:
- case SDRAM_MBCF_SZ_512MB:
- case SDRAM_MBCF_SZ_1GB:
- case SDRAM_MBCF_SZ_2GB:
- case SDRAM_MBCF_SZ_4GB:
- case SDRAM_MBCF_SZ_8GB:
- nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
- break;
- default:
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Unrecognized memory bank %d "
- "size 0x%08x\n",
- row, SDRAM_MBCF_SZ_DECODE(size));
- status = -EINVAL;
- goto done;
- }
-
- /*
- * It's unclear exactly what grain should be set to
- * here. The SDRAM_ECCES register allows resolution of
- * an error down to a nibble which would potentially
- * argue for a grain of '1' byte, even though we only
- * know the associated address for uncorrectable
- * errors. This value is not used at present for
- * anything other than error reporting so getting it
- * wrong should be of little consequence. Other
- * possible values would be the PLB width (16), the
- * page size (PAGE_SIZE) or the memory width (2 or 4).
- */
- for (j = 0; j < csi->nr_channels; j++) {
- struct dimm_info *dimm = csi->channels[j]->dimm;
-
- dimm->nr_pages = nr_pages / csi->nr_channels;
- dimm->grain = 1;
-
- dimm->mtype = mtype;
- dimm->dtype = dtype;
-
- dimm->edac_mode = edac_mode;
- }
- }
-
- done:
- return status;
-}
-
-/**
- * ppc4xx_edac_mc_init - initialize driver instance
- * @mci: A pointer to the EDAC memory controller instance being
- * initialized.
- * @op: A pointer to the OpenFirmware device tree node associated
- * with the controller this EDAC instance is bound to.
- * @dcr_host: A pointer to the DCR data containing the DCR mapping
- * for this controller instance.
- * @mcopt1: The 32-bit Memory Controller Option 1 register value
- * currently set for the controller, from which ECC capabilities
- * and scrub mode are derived.
- *
- * This routine performs initialization of the EDAC memory controller
- * instance and related driver-private data associated with the
- * ibm,sdram-4xx-ddr2 memory controller the instance is bound to.
- *
- * Returns 0 if OK; otherwise, < 0 on error.
- */
-static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
- struct platform_device *op,
- const dcr_host_t *dcr_host, u32 mcopt1)
-{
- int status = 0;
- const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
- struct ppc4xx_edac_pdata *pdata = NULL;
- const struct device_node *np = op->dev.of_node;
-
- if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL)
- return -EINVAL;
-
- /* Initial driver pointers and private data */
-
- mci->pdev = &op->dev;
-
- dev_set_drvdata(mci->pdev, mci);
-
- pdata = mci->pvt_info;
-
- pdata->dcr_host = *dcr_host;
-
- /* Initialize controller capabilities and configuration */
-
- mci->mtype_cap = (MEM_FLAG_DDR | MEM_FLAG_RDDR |
- MEM_FLAG_DDR2 | MEM_FLAG_RDDR2);
-
- mci->edac_ctl_cap = (EDAC_FLAG_NONE |
- EDAC_FLAG_EC |
- EDAC_FLAG_SECDED);
-
- mci->scrub_cap = SCRUB_NONE;
- mci->scrub_mode = SCRUB_NONE;
-
- /*
- * Update the actual capabilites based on the MCOPT1[MCHK]
- * settings. Scrubbing is only useful if reporting is enabled.
- */
-
- switch (memcheck) {
- case SDRAM_MCOPT1_MCHK_CHK:
- mci->edac_cap = EDAC_FLAG_EC;
- break;
- case SDRAM_MCOPT1_MCHK_CHK_REP:
- mci->edac_cap = (EDAC_FLAG_EC | EDAC_FLAG_SECDED);
- mci->scrub_mode = SCRUB_SW_SRC;
- break;
- default:
- mci->edac_cap = EDAC_FLAG_NONE;
- break;
- }
-
- /* Initialize strings */
-
- mci->mod_name = PPC4XX_EDAC_MODULE_NAME;
- mci->ctl_name = ppc4xx_edac_match->compatible;
- mci->dev_name = np->full_name;
-
- /* Initialize callbacks */
-
- mci->edac_check = ppc4xx_edac_check;
- mci->ctl_page_to_phys = NULL;
-
- /* Initialize chip select rows */
-
- status = ppc4xx_edac_init_csrows(mci, mcopt1);
-
- if (status)
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Failed to initialize rows!\n");
-
- return status;
-}
-
-/**
- * ppc4xx_edac_register_irq - setup and register controller interrupts
- * @op: A pointer to the OpenFirmware device tree node associated
- * with the controller this EDAC instance is bound to.
- * @mci: A pointer to the EDAC memory controller instance
- * associated with the ibm,sdram-4xx-ddr2 controller for which
- * interrupts are being registered.
- *
- * This routine parses the correctable (CE) and uncorrectable error (UE)
- * interrupts from the device tree node and maps and assigns them to
- * the associated EDAC memory controller instance.
- *
- * Returns 0 if OK; otherwise, -ENODEV if the interrupts could not be
- * mapped and assigned.
- */
-static int ppc4xx_edac_register_irq(struct platform_device *op,
- struct mem_ctl_info *mci)
-{
- int status = 0;
- int ded_irq, sec_irq;
- struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
- struct device_node *np = op->dev.of_node;
-
- ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX);
- sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX);
-
- if (!ded_irq || !sec_irq) {
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Unable to map interrupts.\n");
- status = -ENODEV;
- goto fail;
- }
-
- status = request_irq(ded_irq,
- ppc4xx_edac_isr,
- 0,
- "[EDAC] MC ECCDED",
- mci);
-
- if (status < 0) {
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Unable to request irq %d for ECC DED",
- ded_irq);
- status = -ENODEV;
- goto fail1;
- }
-
- status = request_irq(sec_irq,
- ppc4xx_edac_isr,
- 0,
- "[EDAC] MC ECCSEC",
- mci);
-
- if (status < 0) {
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Unable to request irq %d for ECC SEC",
- sec_irq);
- status = -ENODEV;
- goto fail2;
- }
-
- ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCDED irq is %d\n", ded_irq);
- ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCSEC irq is %d\n", sec_irq);
-
- pdata->irqs.ded = ded_irq;
- pdata->irqs.sec = sec_irq;
-
- return 0;
-
- fail2:
- free_irq(sec_irq, mci);
-
- fail1:
- free_irq(ded_irq, mci);
-
- fail:
- return status;
-}
-
-/**
- * ppc4xx_edac_map_dcrs - locate and map controller registers
- * @np: A pointer to the device tree node containing the DCR
- * resources to map.
- * @dcr_host: A pointer to the DCR data to populate with the
- * DCR mapping.
- *
- * This routine attempts to locate in the device tree and map the DCR
- * register resources associated with the controller's indirect DCR
- * address and data windows.
- *
- * Returns 0 if the DCRs were successfully mapped; otherwise, < 0 on
- * error.
- */
-static int ppc4xx_edac_map_dcrs(const struct device_node *np,
- dcr_host_t *dcr_host)
-{
- unsigned int dcr_base, dcr_len;
-
- if (np == NULL || dcr_host == NULL)
- return -EINVAL;
-
- /* Get the DCR resource extent and sanity check the values. */
-
- dcr_base = dcr_resource_start(np, 0);
- dcr_len = dcr_resource_len(np, 0);
-
- if (dcr_base == 0 || dcr_len == 0) {
- ppc4xx_edac_printk(KERN_ERR,
- "Failed to obtain DCR property.\n");
- return -ENODEV;
- }
-
- if (dcr_len != SDRAM_DCR_RESOURCE_LEN) {
- ppc4xx_edac_printk(KERN_ERR,
- "Unexpected DCR length %d, expected %d.\n",
- dcr_len, SDRAM_DCR_RESOURCE_LEN);
- return -ENODEV;
- }
-
- /* Attempt to map the DCR extent. */
-
- *dcr_host = dcr_map(np, dcr_base, dcr_len);
-
- if (!DCR_MAP_OK(*dcr_host)) {
- ppc4xx_edac_printk(KERN_INFO, "Failed to map DCRs.\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-/**
- * ppc4xx_edac_probe - check controller and bind driver
- * @op: A pointer to the OpenFirmware device tree node associated
- * with the controller being probed for driver binding.
- *
- * This routine probes a specific ibm,sdram-4xx-ddr2 controller
- * instance for binding with the driver.
- *
- * Returns 0 if the controller instance was successfully bound to the
- * driver; otherwise, < 0 on error.
- */
-static int ppc4xx_edac_probe(struct platform_device *op)
-{
- int status = 0;
- u32 mcopt1, memcheck;
- dcr_host_t dcr_host;
- const struct device_node *np = op->dev.of_node;
- struct mem_ctl_info *mci = NULL;
- struct edac_mc_layer layers[2];
- static int ppc4xx_edac_instance;
-
- /*
- * At this point, we only support the controller realized on
- * the AMCC PPC 405EX[r]. Reject anything else.
- */
-
- if (!of_device_is_compatible(np, "ibm,sdram-405ex") &&
- !of_device_is_compatible(np, "ibm,sdram-405exr")) {
- ppc4xx_edac_printk(KERN_NOTICE,
- "Only the PPC405EX[r] is supported.\n");
- return -ENODEV;
- }
-
- /*
- * Next, get the DCR property and attempt to map it so that we
- * can probe the controller.
- */
-
- status = ppc4xx_edac_map_dcrs(np, &dcr_host);
-
- if (status)
- return status;
-
- /*
- * First determine whether ECC is enabled at all. If not,
- * there is no useful checking or monitoring that can be done
- * for this controller.
- */
-
- mcopt1 = mfsdram(&dcr_host, SDRAM_MCOPT1);
- memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
-
- if (memcheck == SDRAM_MCOPT1_MCHK_NON) {
- ppc4xx_edac_printk(KERN_INFO, "%pOF: No ECC memory detected or "
- "ECC is disabled.\n", np);
- status = -ENODEV;
- goto done;
- }
-
- /*
- * At this point, we know ECC is enabled, allocate an EDAC
- * controller instance and perform the appropriate
- * initialization.
- */
- layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = ppc4xx_edac_nr_csrows;
- layers[0].is_virt_csrow = true;
- layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = ppc4xx_edac_nr_chans;
- layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers,
- sizeof(struct ppc4xx_edac_pdata));
- if (mci == NULL) {
- ppc4xx_edac_printk(KERN_ERR, "%pOF: "
- "Failed to allocate EDAC MC instance!\n",
- np);
- status = -ENOMEM;
- goto done;
- }
-
- status = ppc4xx_edac_mc_init(mci, op, &dcr_host, mcopt1);
-
- if (status) {
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Failed to initialize instance!\n");
- goto fail;
- }
-
- /*
- * We have a valid, initialized EDAC instance bound to the
- * controller. Attempt to register it with the EDAC subsystem
- * and, if necessary, register interrupts.
- */
-
- if (edac_mc_add_mc(mci)) {
- ppc4xx_edac_mc_printk(KERN_ERR, mci,
- "Failed to add instance!\n");
- status = -ENODEV;
- goto fail;
- }
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- status = ppc4xx_edac_register_irq(op, mci);
-
- if (status)
- goto fail1;
- }
-
- ppc4xx_edac_instance++;
-
- return 0;
-
- fail1:
- edac_mc_del_mc(mci->pdev);
-
- fail:
- edac_mc_free(mci);
-
- done:
- return status;
-}
-
-/**
- * ppc4xx_edac_remove - unbind driver from controller
- * @op: A pointer to the OpenFirmware device tree node associated
- * with the controller this EDAC instance is to be unbound/removed
- * from.
- *
- * This routine unbinds the EDAC memory controller instance associated
- * with the specified ibm,sdram-4xx-ddr2 controller described by the
- * OpenFirmware device tree node passed as a parameter.
- *
- * Unconditionally returns 0.
- */
-static void ppc4xx_edac_remove(struct platform_device *op)
-{
- struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
- struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- free_irq(pdata->irqs.sec, mci);
- free_irq(pdata->irqs.ded, mci);
- }
-
- dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN);
-
- edac_mc_del_mc(mci->pdev);
- edac_mc_free(mci);
-}
-
-/**
- * ppc4xx_edac_opstate_init - initialize EDAC reporting method
- *
- * This routine ensures that the EDAC memory controller reporting
- * method is mapped to a sane value as the EDAC core defines the value
- * to EDAC_OPSTATE_INVAL by default. We don't call the global
- * opstate_init as that defaults to polling and we want interrupt as
- * the default.
- */
-static inline void __init
-ppc4xx_edac_opstate_init(void)
-{
- switch (edac_op_state) {
- case EDAC_OPSTATE_POLL:
- case EDAC_OPSTATE_INT:
- break;
- default:
- edac_op_state = EDAC_OPSTATE_INT;
- break;
- }
-
- ppc4xx_edac_printk(KERN_INFO, "Reporting type: %s\n",
- ((edac_op_state == EDAC_OPSTATE_POLL) ?
- EDAC_OPSTATE_POLL_STR :
- ((edac_op_state == EDAC_OPSTATE_INT) ?
- EDAC_OPSTATE_INT_STR :
- EDAC_OPSTATE_UNKNOWN_STR)));
-}
-
-static struct platform_driver ppc4xx_edac_driver = {
- .probe = ppc4xx_edac_probe,
- .remove_new = ppc4xx_edac_remove,
- .driver = {
- .name = PPC4XX_EDAC_MODULE_NAME,
- .of_match_table = ppc4xx_edac_match,
- },
-};
-
-/**
- * ppc4xx_edac_init - driver/module insertion entry point
- *
- * This routine is the driver/module insertion entry point. It
- * initializes the EDAC memory controller reporting state and
- * registers the driver as an OpenFirmware device tree platform
- * driver.
- */
-static int __init
-ppc4xx_edac_init(void)
-{
- ppc4xx_edac_printk(KERN_INFO, PPC4XX_EDAC_MODULE_REVISION "\n");
-
- ppc4xx_edac_opstate_init();
-
- return platform_driver_register(&ppc4xx_edac_driver);
-}
-
-/**
- * ppc4xx_edac_exit - driver/module removal entry point
- *
- * This routine is the driver/module removal entry point. It
- * unregisters the driver as an OpenFirmware device tree platform
- * driver.
- */
-static void __exit
-ppc4xx_edac_exit(void)
-{
- platform_driver_unregister(&ppc4xx_edac_driver);
-}
-
-module_init(ppc4xx_edac_init);
-module_exit(ppc4xx_edac_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Grant Erickson <gerickson@nuovations.com>");
-MODULE_DESCRIPTION("EDAC MC Driver for the PPC4xx IBM DDR2 Memory Controller");
-module_param(edac_op_state, int, 0444);
-MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting State: "
- "0=" EDAC_OPSTATE_POLL_STR ", 2=" EDAC_OPSTATE_INT_STR);
diff --git a/drivers/edac/ppc4xx_edac.h b/drivers/edac/ppc4xx_edac.h
deleted file mode 100644
index b38459aa58ee..000000000000
--- a/drivers/edac/ppc4xx_edac.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2008 Nuovation System Designs, LLC
- * Grant Erickson <gerickson@nuovations.com>
- *
- * This file defines processor mnemonics for accessing and managing
- * the IBM DDR1/DDR2 ECC controller found in the 405EX[r], 440SP,
- * 440SPe, 460EX, 460GT and 460SX.
- */
-
-#ifndef __PPC4XX_EDAC_H
-#define __PPC4XX_EDAC_H
-
-#include <linux/types.h>
-
-/*
- * Macro for generating register field mnemonics
- */
-#define PPC_REG_BITS 32
-#define PPC_REG_VAL(bit, val) ((val) << ((PPC_REG_BITS - 1) - (bit)))
-#define PPC_REG_DECODE(bit, val) ((val) >> ((PPC_REG_BITS - 1) - (bit)))
-
-/*
- * IBM 4xx DDR1/DDR2 SDRAM memory controller registers (at least those
- * relevant to ECC)
- */
-#define SDRAM_BESR 0x00 /* Error status (read/clear) */
-#define SDRAM_BESRT 0x01 /* Error statuss (test/set) */
-#define SDRAM_BEARL 0x02 /* Error address low */
-#define SDRAM_BEARH 0x03 /* Error address high */
-#define SDRAM_WMIRQ 0x06 /* Write master (read/clear) */
-#define SDRAM_WMIRQT 0x07 /* Write master (test/set) */
-#define SDRAM_MCOPT1 0x20 /* Controller options 1 */
-#define SDRAM_MBXCF_BASE 0x40 /* Bank n configuration base */
-#define SDRAM_MBXCF(n) (SDRAM_MBXCF_BASE + (4 * (n)))
-#define SDRAM_MB0CF SDRAM_MBXCF(0)
-#define SDRAM_MB1CF SDRAM_MBXCF(1)
-#define SDRAM_MB2CF SDRAM_MBXCF(2)
-#define SDRAM_MB3CF SDRAM_MBXCF(3)
-#define SDRAM_ECCCR 0x98 /* ECC error status */
-#define SDRAM_ECCES SDRAM_ECCCR
-
-/*
- * PLB Master IDs
- */
-#define SDRAM_PLB_M0ID_FIRST 0
-#define SDRAM_PLB_M0ID_ICU SDRAM_PLB_M0ID_FIRST
-#define SDRAM_PLB_M0ID_PCIE0 1
-#define SDRAM_PLB_M0ID_PCIE1 2
-#define SDRAM_PLB_M0ID_DMA 3
-#define SDRAM_PLB_M0ID_DCU 4
-#define SDRAM_PLB_M0ID_OPB 5
-#define SDRAM_PLB_M0ID_MAL 6
-#define SDRAM_PLB_M0ID_SEC 7
-#define SDRAM_PLB_M0ID_AHB 8
-#define SDRAM_PLB_M0ID_LAST SDRAM_PLB_M0ID_AHB
-#define SDRAM_PLB_M0ID_COUNT (SDRAM_PLB_M0ID_LAST - \
- SDRAM_PLB_M0ID_FIRST + 1)
-
-/*
- * Memory Controller Bus Error Status Register
- */
-#define SDRAM_BESR_MASK PPC_REG_VAL(7, 0xFF)
-#define SDRAM_BESR_M0ID_MASK PPC_REG_VAL(3, 0xF)
-#define SDRAM_BESR_M0ID_DECODE(n) PPC_REG_DECODE(3, n)
-#define SDRAM_BESR_M0ID_ICU PPC_REG_VAL(3, SDRAM_PLB_M0ID_ICU)
-#define SDRAM_BESR_M0ID_PCIE0 PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE0)
-#define SDRAM_BESR_M0ID_PCIE1 PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE1)
-#define SDRAM_BESR_M0ID_DMA PPC_REG_VAL(3, SDRAM_PLB_M0ID_DMA)
-#define SDRAM_BESR_M0ID_DCU PPC_REG_VAL(3, SDRAM_PLB_M0ID_DCU)
-#define SDRAM_BESR_M0ID_OPB PPC_REG_VAL(3, SDRAM_PLB_M0ID_OPB)
-#define SDRAM_BESR_M0ID_MAL PPC_REG_VAL(3, SDRAM_PLB_M0ID_MAL)
-#define SDRAM_BESR_M0ID_SEC PPC_REG_VAL(3, SDRAM_PLB_M0ID_SEC)
-#define SDRAM_BESR_M0ID_AHB PPC_REG_VAL(3, SDRAM_PLB_M0ID_AHB)
-#define SDRAM_BESR_M0ET_MASK PPC_REG_VAL(6, 0x7)
-#define SDRAM_BESR_M0ET_NONE PPC_REG_VAL(6, 0)
-#define SDRAM_BESR_M0ET_ECC PPC_REG_VAL(6, 1)
-#define SDRAM_BESR_M0RW_MASK PPC_REG_VAL(7, 1)
-#define SDRAM_BESR_M0RW_WRITE PPC_REG_VAL(7, 0)
-#define SDRAM_BESR_M0RW_READ PPC_REG_VAL(7, 1)
-
-/*
- * Memory Controller PLB Write Master Interrupt Register
- */
-#define SDRAM_WMIRQ_MASK PPC_REG_VAL(8, 0x1FF)
-#define SDRAM_WMIRQ_ENCODE(id) PPC_REG_VAL((id % \
- SDRAM_PLB_M0ID_COUNT), 1)
-#define SDRAM_WMIRQ_ICU PPC_REG_VAL(SDRAM_PLB_M0ID_ICU, 1)
-#define SDRAM_WMIRQ_PCIE0 PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE0, 1)
-#define SDRAM_WMIRQ_PCIE1 PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE1, 1)
-#define SDRAM_WMIRQ_DMA PPC_REG_VAL(SDRAM_PLB_M0ID_DMA, 1)
-#define SDRAM_WMIRQ_DCU PPC_REG_VAL(SDRAM_PLB_M0ID_DCU, 1)
-#define SDRAM_WMIRQ_OPB PPC_REG_VAL(SDRAM_PLB_M0ID_OPB, 1)
-#define SDRAM_WMIRQ_MAL PPC_REG_VAL(SDRAM_PLB_M0ID_MAL, 1)
-#define SDRAM_WMIRQ_SEC PPC_REG_VAL(SDRAM_PLB_M0ID_SEC, 1)
-#define SDRAM_WMIRQ_AHB PPC_REG_VAL(SDRAM_PLB_M0ID_AHB, 1)
-
-/*
- * Memory Controller Options 1 Register
- */
-#define SDRAM_MCOPT1_MCHK_MASK PPC_REG_VAL(3, 0x3) /* ECC mask */
-#define SDRAM_MCOPT1_MCHK_NON PPC_REG_VAL(3, 0x0) /* No ECC gen */
-#define SDRAM_MCOPT1_MCHK_GEN PPC_REG_VAL(3, 0x2) /* ECC gen */
-#define SDRAM_MCOPT1_MCHK_CHK PPC_REG_VAL(3, 0x1) /* ECC gen and chk */
-#define SDRAM_MCOPT1_MCHK_CHK_REP PPC_REG_VAL(3, 0x3) /* ECC gen/chk/rpt */
-#define SDRAM_MCOPT1_MCHK_DECODE(n) ((((u32)(n)) >> 28) & 0x3)
-#define SDRAM_MCOPT1_RDEN_MASK PPC_REG_VAL(4, 0x1) /* Rgstrd DIMM mask */
-#define SDRAM_MCOPT1_RDEN PPC_REG_VAL(4, 0x1) /* Rgstrd DIMM enbl */
-#define SDRAM_MCOPT1_WDTH_MASK PPC_REG_VAL(7, 0x1) /* Width mask */
-#define SDRAM_MCOPT1_WDTH_32 PPC_REG_VAL(7, 0x0) /* 32 bits */
-#define SDRAM_MCOPT1_WDTH_16 PPC_REG_VAL(7, 0x1) /* 16 bits */
-#define SDRAM_MCOPT1_DDR_TYPE_MASK PPC_REG_VAL(11, 0x1) /* DDR type mask */
-#define SDRAM_MCOPT1_DDR1_TYPE PPC_REG_VAL(11, 0x0) /* DDR1 type */
-#define SDRAM_MCOPT1_DDR2_TYPE PPC_REG_VAL(11, 0x1) /* DDR2 type */
-
-/*
- * Memory Bank 0 - n Configuration Register
- */
-#define SDRAM_MBCF_BA_MASK PPC_REG_VAL(12, 0x1FFF)
-#define SDRAM_MBCF_SZ_MASK PPC_REG_VAL(19, 0xF)
-#define SDRAM_MBCF_SZ_DECODE(mbxcf) PPC_REG_DECODE(19, mbxcf)
-#define SDRAM_MBCF_SZ_4MB PPC_REG_VAL(19, 0x0)
-#define SDRAM_MBCF_SZ_8MB PPC_REG_VAL(19, 0x1)
-#define SDRAM_MBCF_SZ_16MB PPC_REG_VAL(19, 0x2)
-#define SDRAM_MBCF_SZ_32MB PPC_REG_VAL(19, 0x3)
-#define SDRAM_MBCF_SZ_64MB PPC_REG_VAL(19, 0x4)
-#define SDRAM_MBCF_SZ_128MB PPC_REG_VAL(19, 0x5)
-#define SDRAM_MBCF_SZ_256MB PPC_REG_VAL(19, 0x6)
-#define SDRAM_MBCF_SZ_512MB PPC_REG_VAL(19, 0x7)
-#define SDRAM_MBCF_SZ_1GB PPC_REG_VAL(19, 0x8)
-#define SDRAM_MBCF_SZ_2GB PPC_REG_VAL(19, 0x9)
-#define SDRAM_MBCF_SZ_4GB PPC_REG_VAL(19, 0xA)
-#define SDRAM_MBCF_SZ_8GB PPC_REG_VAL(19, 0xB)
-#define SDRAM_MBCF_AM_MASK PPC_REG_VAL(23, 0xF)
-#define SDRAM_MBCF_AM_MODE0 PPC_REG_VAL(23, 0x0)
-#define SDRAM_MBCF_AM_MODE1 PPC_REG_VAL(23, 0x1)
-#define SDRAM_MBCF_AM_MODE2 PPC_REG_VAL(23, 0x2)
-#define SDRAM_MBCF_AM_MODE3 PPC_REG_VAL(23, 0x3)
-#define SDRAM_MBCF_AM_MODE4 PPC_REG_VAL(23, 0x4)
-#define SDRAM_MBCF_AM_MODE5 PPC_REG_VAL(23, 0x5)
-#define SDRAM_MBCF_AM_MODE6 PPC_REG_VAL(23, 0x6)
-#define SDRAM_MBCF_AM_MODE7 PPC_REG_VAL(23, 0x7)
-#define SDRAM_MBCF_AM_MODE8 PPC_REG_VAL(23, 0x8)
-#define SDRAM_MBCF_AM_MODE9 PPC_REG_VAL(23, 0x9)
-#define SDRAM_MBCF_BE_MASK PPC_REG_VAL(31, 0x1)
-#define SDRAM_MBCF_BE_DISABLE PPC_REG_VAL(31, 0x0)
-#define SDRAM_MBCF_BE_ENABLE PPC_REG_VAL(31, 0x1)
-
-/*
- * ECC Error Status
- */
-#define SDRAM_ECCES_MASK PPC_REG_VAL(21, 0x3FFFFF)
-#define SDRAM_ECCES_BNCE_MASK PPC_REG_VAL(15, 0xFFFF)
-#define SDRAM_ECCES_BNCE_ENCODE(lane) PPC_REG_VAL(((lane) & 0xF), 1)
-#define SDRAM_ECCES_CKBER_MASK PPC_REG_VAL(17, 0x3)
-#define SDRAM_ECCES_CKBER_NONE PPC_REG_VAL(17, 0)
-#define SDRAM_ECCES_CKBER_16_ECC_0_3 PPC_REG_VAL(17, 2)
-#define SDRAM_ECCES_CKBER_32_ECC_0_3 PPC_REG_VAL(17, 1)
-#define SDRAM_ECCES_CKBER_32_ECC_4_8 PPC_REG_VAL(17, 2)
-#define SDRAM_ECCES_CKBER_32_ECC_0_8 PPC_REG_VAL(17, 3)
-#define SDRAM_ECCES_CE PPC_REG_VAL(18, 1)
-#define SDRAM_ECCES_UE PPC_REG_VAL(19, 1)
-#define SDRAM_ECCES_BKNER_MASK PPC_REG_VAL(21, 0x3)
-#define SDRAM_ECCES_BK0ER PPC_REG_VAL(20, 1)
-#define SDRAM_ECCES_BK1ER PPC_REG_VAL(21, 1)
-
-#endif /* __PPC4XX_EDAC_H */
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index e5c05a876947..d5f12219598a 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -29,6 +29,8 @@
/* Static vars */
static LIST_HEAD(sbridge_edac_list);
+static char sb_msg[256];
+static char sb_msg_full[512];
/*
* Alter this version for the module when modifications are made
@@ -3079,7 +3081,6 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
enum hw_event_mc_err_type tp_event;
- char *optype, msg[256], msg_full[512];
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -3095,10 +3096,10 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
* aligned address reported by patrol scrubber.
*/
u32 lsb = GET_BITFIELD(m->misc, 0, 5);
+ char *optype, *area_type = "DRAM";
long channel_mask, first_channel;
u8 rank = 0xff, socket, ha;
int rc, dimm;
- char *area_type = "DRAM";
if (pvt->info.type != SANDY_BRIDGE)
recoverable = true;
@@ -3168,32 +3169,32 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
channel = knl_channel_remap(m->bank == 16, channel);
channel_mask = 1 << channel;
- snprintf(msg, sizeof(msg),
- "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
- overflow ? " OVERFLOW" : "",
- (uncorrected_error && recoverable)
- ? " recoverable" : " ",
- mscod, errcode, channel, A + channel);
+ snprintf(sb_msg, sizeof(sb_msg),
+ "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
+ overflow ? " OVERFLOW" : "",
+ (uncorrected_error && recoverable)
+ ? " recoverable" : " ",
+ mscod, errcode, channel, A + channel);
edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
channel, 0, -1,
- optype, msg);
+ optype, sb_msg);
}
return;
} else if (lsb < 12) {
rc = get_memory_error_data(mci, m->addr, &socket, &ha,
&channel_mask, &rank,
- &area_type, msg);
+ &area_type, sb_msg);
} else {
rc = get_memory_error_data_from_mce(mci, m, &socket, &ha,
- &channel_mask, msg);
+ &channel_mask, sb_msg);
}
if (rc < 0)
goto err_parsing;
new_mci = get_mci_for_node_id(socket, ha);
if (!new_mci) {
- strcpy(msg, "Error: socket got corrupted!");
+ strscpy(sb_msg, "Error: socket got corrupted!");
goto err_parsing;
}
mci = new_mci;
@@ -3218,7 +3219,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
*/
if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
channel = first_channel;
- snprintf(msg_full, sizeof(msg_full),
+ snprintf(sb_msg_full, sizeof(sb_msg_full),
"%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d %s",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
@@ -3226,9 +3227,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
mscod, errcode,
socket, ha,
channel_mask,
- rank, msg);
+ rank, sb_msg);
- edac_dbg(0, "%s\n", msg_full);
+ edac_dbg(0, "%s\n", sb_msg_full);
/* FIXME: need support for channel mask */
@@ -3239,12 +3240,12 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
channel, dimm, -1,
- optype, msg_full);
+ optype, sb_msg_full);
return;
err_parsing:
edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
-1, -1, -1,
- msg, "");
+ sb_msg, "");
}
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index af3fa807acdb..14cfd394b469 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -587,54 +587,6 @@ static struct notifier_block skx_mce_dec = {
.priority = MCE_PRIO_EDAC,
};
-#ifdef CONFIG_EDAC_DEBUG
-/*
- * Debug feature.
- * Exercise the address decode logic by writing an address to
- * /sys/kernel/debug/edac/skx_test/addr.
- */
-static struct dentry *skx_test;
-
-static int debugfs_u64_set(void *data, u64 val)
-{
- struct mce m;
-
- pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
-
- memset(&m, 0, sizeof(m));
- /* ADDRV + MemRd + Unknown channel */
- m.status = MCI_STATUS_ADDRV + 0x90;
- /* One corrected error */
- m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
- m.addr = val;
- skx_mce_check_error(NULL, 0, &m);
-
- return 0;
-}
-DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
-
-static void setup_skx_debug(void)
-{
- skx_test = edac_debugfs_create_dir("skx_test");
- if (!skx_test)
- return;
-
- if (!edac_debugfs_create_file("addr", 0200, skx_test,
- NULL, &fops_u64_wo)) {
- debugfs_remove(skx_test);
- skx_test = NULL;
- }
-}
-
-static void teardown_skx_debug(void)
-{
- debugfs_remove_recursive(skx_test);
-}
-#else
-static inline void setup_skx_debug(void) {}
-static inline void teardown_skx_debug(void) {}
-#endif /*CONFIG_EDAC_DEBUG*/
-
/*
* skx_init:
* make sure we are running on the correct cpu model
@@ -728,7 +680,7 @@ static int __init skx_init(void)
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
- setup_skx_debug();
+ skx_setup_debug("skx_test");
mce_register_decode_chain(&skx_mce_dec);
@@ -742,7 +694,7 @@ static void __exit skx_exit(void)
{
edac_dbg(2, "\n");
mce_unregister_decode_chain(&skx_mce_dec);
- teardown_skx_debug();
+ skx_teardown_debug();
if (nvdimm_count)
skx_adxl_put();
skx_remove();
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 8d18099fd528..85713646957b 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -363,7 +363,7 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
if (imc->hbm_mc) {
banks = 32;
mtype = MEM_HBM2;
- } else if (cfg->support_ddr5 && (amap & 0x8)) {
+ } else if (cfg->support_ddr5) {
banks = 32;
mtype = MEM_DDR5;
} else {
@@ -739,6 +739,53 @@ void skx_remove(void)
}
EXPORT_SYMBOL_GPL(skx_remove);
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Debug feature.
+ * Exercise the address decode logic by writing an address to
+ * /sys/kernel/debug/edac/{skx,i10nm}_test/addr.
+ */
+static struct dentry *skx_test;
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+ struct mce m;
+
+ pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
+
+ memset(&m, 0, sizeof(m));
+ /* ADDRV + MemRd + Unknown channel */
+ m.status = MCI_STATUS_ADDRV + 0x90;
+ /* One corrected error */
+ m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
+ m.addr = val;
+ skx_mce_check_error(NULL, 0, &m);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+void skx_setup_debug(const char *name)
+{
+ skx_test = edac_debugfs_create_dir(name);
+ if (!skx_test)
+ return;
+
+ if (!edac_debugfs_create_file("addr", 0200, skx_test,
+ NULL, &fops_u64_wo)) {
+ debugfs_remove(skx_test);
+ skx_test = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(skx_setup_debug);
+
+void skx_teardown_debug(void)
+{
+ debugfs_remove_recursive(skx_test);
+}
+EXPORT_SYMBOL_GPL(skx_teardown_debug);
+#endif /*CONFIG_EDAC_DEBUG*/
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Tony Luck");
MODULE_DESCRIPTION("MC Driver for Intel server processors");
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index 473421ba7a18..f945c1bf5ca4 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -259,4 +259,12 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
void skx_remove(void);
+#ifdef CONFIG_EDAC_DEBUG
+void skx_setup_debug(const char *name);
+void skx_teardown_debug(void);
+#else
+static inline void skx_setup_debug(const char *name) {}
+static inline void skx_teardown_debug(void) {}
+#endif
+
#endif /* _SKX_COMM_EDAC_H */
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index ea7a9a342dd3..d7416166fd8a 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
+#include <linux/sizes.h>
#include <linux/interrupt.h>
#include <linux/of.h>
@@ -337,6 +338,7 @@ struct synps_edac_priv {
* @get_mtype: Get mtype.
* @get_dtype: Get dtype.
* @get_ecc_state: Get ECC state.
+ * @get_mem_info: Get EDAC memory info
* @quirks: To differentiate IPs.
*/
struct synps_platform_data {
@@ -344,6 +346,9 @@ struct synps_platform_data {
enum mem_type (*get_mtype)(const void __iomem *base);
enum dev_type (*get_dtype)(const void __iomem *base);
bool (*get_ecc_state)(void __iomem *base);
+#ifdef CONFIG_EDAC_DEBUG
+ u64 (*get_mem_info)(struct synps_edac_priv *priv);
+#endif
int quirks;
};
@@ -402,6 +407,25 @@ out:
return 0;
}
+#ifdef CONFIG_EDAC_DEBUG
+/**
+ * zynqmp_get_mem_info - Get the current memory info.
+ * @priv: DDR memory controller private instance data.
+ *
+ * Return: host interface address.
+ */
+static u64 zynqmp_get_mem_info(struct synps_edac_priv *priv)
+{
+ u64 hif_addr = 0, linear_addr;
+
+ linear_addr = priv->poison_addr;
+ if (linear_addr >= SZ_32G)
+ linear_addr = linear_addr - SZ_32G + SZ_2G;
+ hif_addr = linear_addr >> 3;
+ return hif_addr;
+}
+#endif
+
/**
* zynqmp_get_error_info - Get the current ECC error info.
* @priv: DDR memory controller private instance data.
@@ -922,6 +946,9 @@ static const struct synps_platform_data zynqmp_edac_def = {
.get_mtype = zynqmp_get_mtype,
.get_dtype = zynqmp_get_dtype,
.get_ecc_state = zynqmp_get_ecc_state,
+#ifdef CONFIG_EDAC_DEBUG
+ .get_mem_info = zynqmp_get_mem_info,
+#endif
.quirks = (DDR_ECC_INTR_SUPPORT
#ifdef CONFIG_EDAC_DEBUG
| DDR_ECC_DATA_POISON_SUPPORT
@@ -975,10 +1002,16 @@ MODULE_DEVICE_TABLE(of, synps_edac_match);
static void ddr_poison_setup(struct synps_edac_priv *priv)
{
int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
+ const struct synps_platform_data *p_data;
int index;
ulong hif_addr = 0;
- hif_addr = priv->poison_addr >> 3;
+ p_data = priv->p_data;
+
+ if (p_data->get_mem_info)
+ hif_addr = p_data->get_mem_info(priv);
+ else
+ hif_addr = priv->poison_addr >> 3;
for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
if (priv->row_shift[index])
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index a703a8315634..d3bcbe839c09 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -108,7 +108,7 @@ struct axp288_extcon_info {
};
static const struct x86_cpu_id cherry_trail_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT, NULL),
{}
};
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 733c470c3102..93552dc3c895 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -461,14 +461,6 @@ static int cht_wc_extcon_psy_get_prop(struct power_supply *psy,
return 0;
}
-static const enum power_supply_usb_type cht_wc_extcon_psy_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_CDP,
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_ACA,
- POWER_SUPPLY_USB_TYPE_UNKNOWN,
-};
-
static const enum power_supply_property cht_wc_extcon_psy_props[] = {
POWER_SUPPLY_PROP_USB_TYPE,
POWER_SUPPLY_PROP_ONLINE,
@@ -477,8 +469,11 @@ static const enum power_supply_property cht_wc_extcon_psy_props[] = {
static const struct power_supply_desc cht_wc_extcon_psy_desc = {
.name = "cht_wcove_pwrsrc",
.type = POWER_SUPPLY_TYPE_USB,
- .usb_types = cht_wc_extcon_psy_usb_types,
- .num_usb_types = ARRAY_SIZE(cht_wc_extcon_psy_usb_types),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_ACA) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
.properties = cht_wc_extcon_psy_props,
.num_properties = ARRAY_SIZE(cht_wc_extcon_psy_props),
.get_property = cht_wc_extcon_psy_get_prop,
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index f8b99dd6cd82..01354b9de8b2 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -168,7 +168,6 @@ static size_t required_space(struct fw_descriptor *desc)
int fw_core_add_descriptor(struct fw_descriptor *desc)
{
size_t i;
- int ret;
/*
* Check descriptor is valid; the length of all blocks in the
@@ -182,29 +181,25 @@ int fw_core_add_descriptor(struct fw_descriptor *desc)
if (i != desc->length)
return -EINVAL;
- mutex_lock(&card_mutex);
+ guard(mutex)(&card_mutex);
- if (config_rom_length + required_space(desc) > 256) {
- ret = -EBUSY;
- } else {
- list_add_tail(&desc->link, &descriptor_list);
- config_rom_length += required_space(desc);
- descriptor_count++;
- if (desc->immediate > 0)
- descriptor_count++;
- update_config_roms();
- ret = 0;
- }
+ if (config_rom_length + required_space(desc) > 256)
+ return -EBUSY;
- mutex_unlock(&card_mutex);
+ list_add_tail(&desc->link, &descriptor_list);
+ config_rom_length += required_space(desc);
+ descriptor_count++;
+ if (desc->immediate > 0)
+ descriptor_count++;
+ update_config_roms();
- return ret;
+ return 0;
}
EXPORT_SYMBOL(fw_core_add_descriptor);
void fw_core_remove_descriptor(struct fw_descriptor *desc)
{
- mutex_lock(&card_mutex);
+ guard(mutex)(&card_mutex);
list_del(&desc->link);
config_rom_length -= required_space(desc);
@@ -212,8 +207,6 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
if (desc->immediate > 0)
descriptor_count--;
update_config_roms();
-
- mutex_unlock(&card_mutex);
}
EXPORT_SYMBOL(fw_core_remove_descriptor);
@@ -381,11 +374,11 @@ static void bm_work(struct work_struct *work)
bm_id = be32_to_cpu(transaction_data[0]);
- spin_lock_irq(&card->lock);
- if (rcode == RCODE_COMPLETE && generation == card->generation)
- card->bm_node_id =
- bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
- spin_unlock_irq(&card->lock);
+ scoped_guard(spinlock_irq, &card->lock) {
+ if (rcode == RCODE_COMPLETE && generation == card->generation)
+ card->bm_node_id =
+ bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
+ }
if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
/* Somebody else is BM. Only act as IRM. */
@@ -578,25 +571,47 @@ void fw_card_initialize(struct fw_card *card,
}
EXPORT_SYMBOL(fw_card_initialize);
-int fw_card_add(struct fw_card *card,
- u32 max_receive, u32 link_speed, u64 guid)
+int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
+ unsigned int supported_isoc_contexts)
{
+ struct workqueue_struct *isoc_wq;
int ret;
+ // This workqueue should be:
+ // * != WQ_BH Sleepable.
+ // * == WQ_UNBOUND Any core can process data for isoc context. The
+ // implementation of unit protocol could consumes the core
+ // longer somehow.
+ // * != WQ_MEM_RECLAIM Not used for any backend of block device.
+ // * == WQ_FREEZABLE Isochronous communication is at regular interval in real
+ // time, thus should be drained if possible at freeze phase.
+ // * == WQ_HIGHPRI High priority to process semi-realtime timestamped data.
+ // * == WQ_SYSFS Parameters are available via sysfs.
+ // * max_active == n_it + n_ir A hardIRQ could notify events for multiple isochronous
+ // contexts if they are scheduled to the same cycle.
+ isoc_wq = alloc_workqueue("firewire-isoc-card%u",
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ supported_isoc_contexts, card->index);
+ if (!isoc_wq)
+ return -ENOMEM;
+
card->max_receive = max_receive;
card->link_speed = link_speed;
card->guid = guid;
- mutex_lock(&card_mutex);
+ guard(mutex)(&card_mutex);
generate_config_rom(card, tmp_config_rom);
ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
- if (ret == 0)
- list_add_tail(&card->link, &card_list);
+ if (ret < 0) {
+ destroy_workqueue(isoc_wq);
+ return ret;
+ }
- mutex_unlock(&card_mutex);
+ card->isoc_wq = isoc_wq;
+ list_add_tail(&card->link, &card_list);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(fw_card_add);
@@ -714,29 +729,31 @@ EXPORT_SYMBOL_GPL(fw_card_release);
void fw_core_remove_card(struct fw_card *card)
{
struct fw_card_driver dummy_driver = dummy_driver_template;
- unsigned long flags;
+
+ might_sleep();
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
fw_schedule_bus_reset(card, false, true);
- mutex_lock(&card_mutex);
- list_del_init(&card->link);
- mutex_unlock(&card_mutex);
+ scoped_guard(mutex, &card_mutex)
+ list_del_init(&card->link);
/* Switch off most of the card driver interface. */
dummy_driver.free_iso_context = card->driver->free_iso_context;
dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
+ drain_workqueue(card->isoc_wq);
- spin_lock_irqsave(&card->lock, flags);
- fw_destroy_nodes(card);
- spin_unlock_irqrestore(&card->lock, flags);
+ scoped_guard(spinlock_irqsave, &card->lock)
+ fw_destroy_nodes(card);
/* Wait for all users, especially device workqueue jobs, to finish. */
fw_card_put(card);
wait_for_completion(&card->done);
+ destroy_workqueue(card->isoc_wq);
+
WARN_ON(!list_empty(&card->transaction_list));
}
EXPORT_SYMBOL(fw_core_remove_card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 9a7dc90330a3..518eaa073b2b 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -14,7 +14,6 @@
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
-#include <linux/idr.h>
#include <linux/irqflags.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -37,6 +36,8 @@
#include "core.h"
#include <trace/events/firewire.h>
+#include "packet-header-definitions.h"
+
/*
* ABI version history is documented in linux/firewire-cdev.h.
*/
@@ -52,7 +53,7 @@ struct client {
spinlock_t lock;
bool in_shutdown;
- struct idr resource_idr;
+ struct xarray resource_xa;
struct list_head event_list;
wait_queue_head_t wait;
wait_queue_head_t tx_flush_wait;
@@ -137,8 +138,41 @@ struct iso_resource {
struct iso_resource_event *e_alloc, *e_dealloc;
};
+static struct address_handler_resource *to_address_handler_resource(struct client_resource *resource)
+{
+ return container_of(resource, struct address_handler_resource, resource);
+}
+
+static struct inbound_transaction_resource *to_inbound_transaction_resource(struct client_resource *resource)
+{
+ return container_of(resource, struct inbound_transaction_resource, resource);
+}
+
+static struct descriptor_resource *to_descriptor_resource(struct client_resource *resource)
+{
+ return container_of(resource, struct descriptor_resource, resource);
+}
+
+static struct iso_resource *to_iso_resource(struct client_resource *resource)
+{
+ return container_of(resource, struct iso_resource, resource);
+}
+
static void release_iso_resource(struct client *, struct client_resource *);
+static int is_iso_resource(const struct client_resource *resource)
+{
+ return resource->release == release_iso_resource;
+}
+
+static void release_transaction(struct client *client,
+ struct client_resource *resource);
+
+static int is_outbound_transaction_resource(const struct client_resource *resource)
+{
+ return resource->release == release_transaction;
+}
+
static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
{
client_get(r->client);
@@ -146,13 +180,6 @@ static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
client_put(r->client);
}
-static void schedule_if_iso_resource(struct client_resource *resource)
-{
- if (resource->release == release_iso_resource)
- schedule_iso_resource(container_of(resource,
- struct iso_resource, resource), 0);
-}
-
/*
* dequeue_event() just kfree()'s the event, so the event has to be
* the first field in a struct XYZ_event.
@@ -269,7 +296,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
client->device = device;
spin_lock_init(&client->lock);
- idr_init(&client->resource_idr);
+ xa_init_flags(&client->resource_xa, XA_FLAGS_ALLOC1 | XA_FLAGS_LOCK_BH);
INIT_LIST_HEAD(&client->event_list);
init_waitqueue_head(&client->wait);
init_waitqueue_head(&client->tx_flush_wait);
@@ -285,19 +312,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
static void queue_event(struct client *client, struct event *event,
void *data0, size_t size0, void *data1, size_t size1)
{
- unsigned long flags;
-
event->v[0].data = data0;
event->v[0].size = size0;
event->v[1].data = data1;
event->v[1].size = size1;
- spin_lock_irqsave(&client->lock, flags);
- if (client->in_shutdown)
- kfree(event);
- else
- list_add_tail(&event->link, &client->event_list);
- spin_unlock_irqrestore(&client->lock, flags);
+ scoped_guard(spinlock_irqsave, &client->lock) {
+ if (client->in_shutdown)
+ kfree(event);
+ else
+ list_add_tail(&event->link, &client->event_list);
+ }
wake_up_interruptible(&client->wait);
}
@@ -319,10 +344,10 @@ static int dequeue_event(struct client *client,
fw_device_is_shutdown(client->device))
return -ENODEV;
- spin_lock_irq(&client->lock);
- event = list_first_entry(&client->event_list, struct event, link);
- list_del(&event->link);
- spin_unlock_irq(&client->lock);
+ scoped_guard(spinlock_irq, &client->lock) {
+ event = list_first_entry(&client->event_list, struct event, link);
+ list_del(&event->link);
+ }
total = 0;
for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
@@ -354,7 +379,7 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
{
struct fw_card *card = client->device->card;
- spin_lock_irq(&card->lock);
+ guard(spinlock_irq)(&card->lock);
event->closure = client->bus_reset_closure;
event->type = FW_CDEV_EVENT_BUS_RESET;
@@ -364,8 +389,6 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
event->bm_node_id = card->bm_node_id;
event->irm_node_id = card->irm_node->node_id;
event->root_node_id = card->root_node->node_id;
-
- spin_unlock_irq(&card->lock);
}
static void for_each_client(struct fw_device *device,
@@ -373,22 +396,17 @@ static void for_each_client(struct fw_device *device,
{
struct client *c;
- mutex_lock(&device->client_list_mutex);
+ guard(mutex)(&device->client_list_mutex);
+
list_for_each_entry(c, &device->client_list, link)
callback(c);
- mutex_unlock(&device->client_list_mutex);
-}
-
-static int schedule_reallocations(int id, void *p, void *data)
-{
- schedule_if_iso_resource(p);
-
- return 0;
}
static void queue_bus_reset_event(struct client *client)
{
struct bus_reset_event *e;
+ struct client_resource *resource;
+ unsigned long index;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL)
@@ -399,9 +417,12 @@ static void queue_bus_reset_event(struct client *client)
queue_event(client, &e->event,
&e->reset, sizeof(e->reset), NULL, 0);
- spin_lock_irq(&client->lock);
- idr_for_each(&client->resource_idr, schedule_reallocations, client);
- spin_unlock_irq(&client->lock);
+ guard(spinlock_irq)(&client->lock);
+
+ xa_for_each(&client->resource_xa, index, resource) {
+ if (is_iso_resource(resource))
+ schedule_iso_resource(to_iso_resource(resource), 0);
+ }
}
void fw_device_cdev_update(struct fw_device *device)
@@ -452,23 +473,20 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
a->version = FW_CDEV_KERNEL_VERSION;
a->card = client->device->card->index;
- down_read(&fw_device_rwsem);
-
- if (a->rom != 0) {
- size_t want = a->rom_length;
- size_t have = client->device->config_rom_length * 4;
+ scoped_guard(rwsem_read, &fw_device_rwsem) {
+ if (a->rom != 0) {
+ size_t want = a->rom_length;
+ size_t have = client->device->config_rom_length * 4;
- ret = copy_to_user(u64_to_uptr(a->rom),
- client->device->config_rom, min(want, have));
+ ret = copy_to_user(u64_to_uptr(a->rom), client->device->config_rom,
+ min(want, have));
+ if (ret != 0)
+ return -EFAULT;
+ }
+ a->rom_length = client->device->config_rom_length * 4;
}
- a->rom_length = client->device->config_rom_length * 4;
-
- up_read(&fw_device_rwsem);
- if (ret != 0)
- return -EFAULT;
-
- mutex_lock(&client->device->client_list_mutex);
+ guard(mutex)(&client->device->client_list_mutex);
client->bus_reset_closure = a->bus_reset_closure;
if (a->bus_reset != 0) {
@@ -479,37 +497,36 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
if (ret == 0 && list_empty(&client->link))
list_add_tail(&client->link, &client->device->client_list);
- mutex_unlock(&client->device->client_list_mutex);
-
return ret ? -EFAULT : 0;
}
-static int add_client_resource(struct client *client,
- struct client_resource *resource, gfp_t gfp_mask)
+static int add_client_resource(struct client *client, struct client_resource *resource,
+ gfp_t gfp_mask)
{
- bool preload = gfpflags_allow_blocking(gfp_mask);
- unsigned long flags;
int ret;
- if (preload)
- idr_preload(gfp_mask);
- spin_lock_irqsave(&client->lock, flags);
+ scoped_guard(spinlock_irqsave, &client->lock) {
+ u32 index;
- if (client->in_shutdown)
- ret = -ECANCELED;
- else
- ret = idr_alloc(&client->resource_idr, resource, 0, 0,
- GFP_NOWAIT);
- if (ret >= 0) {
- resource->handle = ret;
- client_get(client);
- schedule_if_iso_resource(resource);
+ if (client->in_shutdown) {
+ ret = -ECANCELED;
+ } else {
+ if (gfpflags_allow_blocking(gfp_mask)) {
+ ret = xa_alloc(&client->resource_xa, &index, resource, xa_limit_32b,
+ GFP_NOWAIT);
+ } else {
+ ret = xa_alloc_bh(&client->resource_xa, &index, resource,
+ xa_limit_32b, GFP_NOWAIT);
+ }
+ }
+ if (ret >= 0) {
+ resource->handle = index;
+ client_get(client);
+ if (is_iso_resource(resource))
+ schedule_iso_resource(to_iso_resource(resource), 0);
+ }
}
- spin_unlock_irqrestore(&client->lock, flags);
- if (preload)
- idr_preload_end();
-
return ret < 0 ? ret : 0;
}
@@ -517,19 +534,19 @@ static int release_client_resource(struct client *client, u32 handle,
client_resource_release_fn_t release,
struct client_resource **return_resource)
{
+ unsigned long index = handle;
struct client_resource *resource;
- spin_lock_irq(&client->lock);
- if (client->in_shutdown)
- resource = NULL;
- else
- resource = idr_find(&client->resource_idr, handle);
- if (resource && resource->release == release)
- idr_remove(&client->resource_idr, handle);
- spin_unlock_irq(&client->lock);
+ scoped_guard(spinlock_irq, &client->lock) {
+ if (client->in_shutdown)
+ return -EINVAL;
- if (!(resource && resource->release == release))
- return -EINVAL;
+ resource = xa_load(&client->resource_xa, index);
+ if (!resource || resource->release != release)
+ return -EINVAL;
+
+ xa_erase(&client->resource_xa, handle);
+ }
if (return_resource)
*return_resource = resource;
@@ -551,13 +568,13 @@ static void complete_transaction(struct fw_card *card, int rcode, u32 request_ts
{
struct outbound_transaction_event *e = data;
struct client *client = e->client;
- unsigned long flags;
+ unsigned long index = e->r.resource.handle;
- spin_lock_irqsave(&client->lock, flags);
- idr_remove(&client->resource_idr, e->r.resource.handle);
- if (client->in_shutdown)
- wake_up(&client->tx_flush_wait);
- spin_unlock_irqrestore(&client->lock, flags);
+ scoped_guard(spinlock_irqsave, &client->lock) {
+ xa_erase(&client->resource_xa, index);
+ if (client->in_shutdown)
+ wake_up(&client->tx_flush_wait);
+ }
switch (e->rsp.without_tstamp.type) {
case FW_CDEV_EVENT_RESPONSE:
@@ -599,13 +616,13 @@ static void complete_transaction(struct fw_card *card, int rcode, u32 request_ts
queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
break;
+ }
default:
WARN_ON(1);
break;
}
- }
- /* Drop the idr's reference */
+ // Drop the xarray's reference.
client_put(client);
}
@@ -693,8 +710,7 @@ static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
static void release_request(struct client *client,
struct client_resource *resource)
{
- struct inbound_transaction_resource *r = container_of(resource,
- struct inbound_transaction_resource, resource);
+ struct inbound_transaction_resource *r = to_inbound_transaction_resource(resource);
if (r->is_fcp)
fw_request_put(r->request);
@@ -804,8 +820,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
static void release_address_handler(struct client *client,
struct client_resource *resource)
{
- struct address_handler_resource *r =
- container_of(resource, struct address_handler_resource, resource);
+ struct address_handler_resource *r = to_address_handler_resource(resource);
fw_core_remove_address_handler(&r->handler);
kfree(r);
@@ -869,8 +884,7 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
release_request, &resource) < 0)
return -EINVAL;
- r = container_of(resource, struct inbound_transaction_resource,
- resource);
+ r = to_inbound_transaction_resource(resource);
if (r->is_fcp) {
fw_request_put(r->request);
goto out;
@@ -904,8 +918,7 @@ static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
static void release_descriptor(struct client *client,
struct client_resource *resource)
{
- struct descriptor_resource *r =
- container_of(resource, struct descriptor_resource, resource);
+ struct descriptor_resource *r = to_descriptor_resource(resource);
fw_core_remove_descriptor(&r->descriptor);
kfree(r);
@@ -969,7 +982,7 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
struct client *client = data;
struct iso_interrupt_event *e;
- e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
+ e = kmalloc(sizeof(*e) + header_length, GFP_KERNEL);
if (e == NULL)
return;
@@ -988,7 +1001,7 @@ static void iso_mc_callback(struct fw_iso_context *context,
struct client *client = data;
struct iso_interrupt_mc_event *e;
- e = kmalloc(sizeof(*e), GFP_ATOMIC);
+ e = kmalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL)
return;
@@ -1070,10 +1083,10 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
context->drop_overflow_headers = true;
- /* We only support one context at this time. */
- spin_lock_irq(&client->lock);
+ // We only support one context at this time.
+ guard(spinlock_irq)(&client->lock);
+
if (client->iso_context != NULL) {
- spin_unlock_irq(&client->lock);
fw_iso_context_destroy(context);
return -EBUSY;
@@ -1083,7 +1096,6 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
client->device->card,
iso_dma_direction(context));
if (ret < 0) {
- spin_unlock_irq(&client->lock);
fw_iso_context_destroy(context);
return ret;
@@ -1092,7 +1104,6 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
}
client->iso_closure = a->closure;
client->iso_context = context;
- spin_unlock_irq(&client->lock);
a->handle = 0;
@@ -1266,29 +1277,27 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
struct fw_card *card = client->device->card;
struct timespec64 ts = {0, 0};
u32 cycle_time = 0;
- int ret = 0;
+ int ret;
- local_irq_disable();
+ guard(irq)();
ret = fw_card_read_cycle_time(card, &cycle_time);
if (ret < 0)
- goto end;
+ return ret;
switch (a->clk_id) {
case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break;
case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break;
case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
-end:
- local_irq_enable();
a->tv_sec = ts.tv_sec;
a->tv_nsec = ts.tv_nsec;
a->cycle_timer = cycle_time;
- return ret;
+ return 0;
}
static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
@@ -1311,28 +1320,28 @@ static void iso_resource_work(struct work_struct *work)
struct iso_resource *r =
container_of(work, struct iso_resource, work.work);
struct client *client = r->client;
+ unsigned long index = r->resource.handle;
int generation, channel, bandwidth, todo;
bool skip, free, success;
- spin_lock_irq(&client->lock);
- generation = client->device->generation;
- todo = r->todo;
- /* Allow 1000ms grace period for other reallocations. */
- if (todo == ISO_RES_ALLOC &&
- time_before64(get_jiffies_64(),
- client->device->card->reset_jiffies + HZ)) {
- schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
- skip = true;
- } else {
- /* We could be called twice within the same generation. */
- skip = todo == ISO_RES_REALLOC &&
- r->generation == generation;
+ scoped_guard(spinlock_irq, &client->lock) {
+ generation = client->device->generation;
+ todo = r->todo;
+ // Allow 1000ms grace period for other reallocations.
+ if (todo == ISO_RES_ALLOC &&
+ time_before64(get_jiffies_64(), client->device->card->reset_jiffies + HZ)) {
+ schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
+ skip = true;
+ } else {
+ // We could be called twice within the same generation.
+ skip = todo == ISO_RES_REALLOC &&
+ r->generation == generation;
+ }
+ free = todo == ISO_RES_DEALLOC ||
+ todo == ISO_RES_ALLOC_ONCE ||
+ todo == ISO_RES_DEALLOC_ONCE;
+ r->generation = generation;
}
- free = todo == ISO_RES_DEALLOC ||
- todo == ISO_RES_ALLOC_ONCE ||
- todo == ISO_RES_DEALLOC_ONCE;
- r->generation = generation;
- spin_unlock_irq(&client->lock);
if (skip)
goto out;
@@ -1346,7 +1355,7 @@ static void iso_resource_work(struct work_struct *work)
todo == ISO_RES_ALLOC_ONCE);
/*
* Is this generation outdated already? As long as this resource sticks
- * in the idr, it will be scheduled again for a newer generation or at
+ * in the xarray, it will be scheduled again for a newer generation or at
* shutdown.
*/
if (channel == -EAGAIN &&
@@ -1355,24 +1364,20 @@ static void iso_resource_work(struct work_struct *work)
success = channel >= 0 || bandwidth > 0;
- spin_lock_irq(&client->lock);
- /*
- * Transit from allocation to reallocation, except if the client
- * requested deallocation in the meantime.
- */
- if (r->todo == ISO_RES_ALLOC)
- r->todo = ISO_RES_REALLOC;
- /*
- * Allocation or reallocation failure? Pull this resource out of the
- * idr and prepare for deletion, unless the client is shutting down.
- */
- if (r->todo == ISO_RES_REALLOC && !success &&
- !client->in_shutdown &&
- idr_remove(&client->resource_idr, r->resource.handle)) {
- client_put(client);
- free = true;
+ scoped_guard(spinlock_irq, &client->lock) {
+ // Transit from allocation to reallocation, except if the client
+ // requested deallocation in the meantime.
+ if (r->todo == ISO_RES_ALLOC)
+ r->todo = ISO_RES_REALLOC;
+ // Allocation or reallocation failure? Pull this resource out of the
+ // xarray and prepare for deletion, unless the client is shutting down.
+ if (r->todo == ISO_RES_REALLOC && !success &&
+ !client->in_shutdown &&
+ xa_erase(&client->resource_xa, index)) {
+ client_put(client);
+ free = true;
+ }
}
- spin_unlock_irq(&client->lock);
if (todo == ISO_RES_ALLOC && channel >= 0)
r->channels = 1ULL << channel;
@@ -1407,13 +1412,12 @@ static void iso_resource_work(struct work_struct *work)
static void release_iso_resource(struct client *client,
struct client_resource *resource)
{
- struct iso_resource *r =
- container_of(resource, struct iso_resource, resource);
+ struct iso_resource *r = to_iso_resource(resource);
+
+ guard(spinlock_irq)(&client->lock);
- spin_lock_irq(&client->lock);
r->todo = ISO_RES_DEALLOC;
schedule_iso_resource(r, 0);
- spin_unlock_irq(&client->lock);
}
static int init_iso_resource(struct client *client,
@@ -1635,7 +1639,7 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
e->client = client;
e->p.speed = SCODE_100;
e->p.generation = a->generation;
- e->p.header[0] = TCODE_LINK_INTERNAL << 4;
+ async_header_set_tcode(e->p.header, TCODE_LINK_INTERNAL);
e->p.header[1] = a->data[0];
e->p.header[2] = a->data[1];
e->p.header_length = 12;
@@ -1676,26 +1680,22 @@ static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg
if (!client->device->is_local)
return -ENOSYS;
- spin_lock_irq(&card->lock);
+ guard(spinlock_irq)(&card->lock);
list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
client->phy_receiver_closure = a->closure;
- spin_unlock_irq(&card->lock);
-
return 0;
}
void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
{
struct client *client;
- struct inbound_phy_packet_event *e;
- unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
+ guard(spinlock_irqsave)(&card->lock);
list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
- e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
+ struct inbound_phy_packet_event *e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
if (e == NULL)
break;
@@ -1723,8 +1723,6 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
}
}
-
- spin_unlock_irqrestore(&card->lock, flags);
}
static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
@@ -1821,16 +1819,15 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
if (ret < 0)
return ret;
- spin_lock_irq(&client->lock);
- if (client->iso_context) {
- ret = fw_iso_buffer_map_dma(&client->buffer,
- client->device->card,
- iso_dma_direction(client->iso_context));
- client->buffer_is_mapped = (ret == 0);
+ scoped_guard(spinlock_irq, &client->lock) {
+ if (client->iso_context) {
+ ret = fw_iso_buffer_map_dma(&client->buffer, client->device->card,
+ iso_dma_direction(client->iso_context));
+ if (ret < 0)
+ goto fail;
+ client->buffer_is_mapped = true;
+ }
}
- spin_unlock_irq(&client->lock);
- if (ret < 0)
- goto fail;
ret = vm_map_pages_zero(vma, client->buffer.pages,
client->buffer.page_count);
@@ -1843,48 +1840,33 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
-static int is_outbound_transaction_resource(int id, void *p, void *data)
+static bool has_outbound_transactions(struct client *client)
{
- struct client_resource *resource = p;
-
- return resource->release == release_transaction;
-}
-
-static int has_outbound_transactions(struct client *client)
-{
- int ret;
-
- spin_lock_irq(&client->lock);
- ret = idr_for_each(&client->resource_idr,
- is_outbound_transaction_resource, NULL);
- spin_unlock_irq(&client->lock);
+ struct client_resource *resource;
+ unsigned long index;
- return ret;
-}
+ guard(spinlock_irq)(&client->lock);
-static int shutdown_resource(int id, void *p, void *data)
-{
- struct client_resource *resource = p;
- struct client *client = data;
-
- resource->release(client, resource);
- client_put(client);
+ xa_for_each(&client->resource_xa, index, resource) {
+ if (is_outbound_transaction_resource(resource))
+ return true;
+ }
- return 0;
+ return false;
}
static int fw_device_op_release(struct inode *inode, struct file *file)
{
struct client *client = file->private_data;
struct event *event, *next_event;
+ struct client_resource *resource;
+ unsigned long index;
- spin_lock_irq(&client->device->card->lock);
- list_del(&client->phy_receiver_link);
- spin_unlock_irq(&client->device->card->lock);
+ scoped_guard(spinlock_irq, &client->device->card->lock)
+ list_del(&client->phy_receiver_link);
- mutex_lock(&client->device->client_list_mutex);
- list_del(&client->link);
- mutex_unlock(&client->device->client_list_mutex);
+ scoped_guard(mutex, &client->device->client_list_mutex)
+ list_del(&client->link);
if (client->iso_context)
fw_iso_context_destroy(client->iso_context);
@@ -1892,15 +1874,17 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
if (client->buffer.pages)
fw_iso_buffer_destroy(&client->buffer, client->device->card);
- /* Freeze client->resource_idr and client->event_list */
- spin_lock_irq(&client->lock);
- client->in_shutdown = true;
- spin_unlock_irq(&client->lock);
+ // Freeze client->resource_xa and client->event_list.
+ scoped_guard(spinlock_irq, &client->lock)
+ client->in_shutdown = true;
wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
- idr_for_each(&client->resource_idr, shutdown_resource, client);
- idr_destroy(&client->resource_idr);
+ xa_for_each(&client->resource_xa, index, resource) {
+ resource->release(client, resource);
+ client_put(client);
+ }
+ xa_destroy(&client->resource_xa);
list_for_each_entry_safe(event, next_event, &client->event_list, link)
kfree(event);
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 00e9a13e6c45..a99fe35f1f0d 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -12,7 +12,6 @@
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
-#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/kobject.h>
#include <linux/list.h>
@@ -288,7 +287,7 @@ static ssize_t show_immediate(struct device *dev,
const u32 *directories[] = {NULL, NULL};
int i, value = -1;
- down_read(&fw_device_rwsem);
+ guard(rwsem_read)(&fw_device_rwsem);
if (is_fw_unit(dev)) {
directories[0] = fw_unit(dev)->directory;
@@ -317,8 +316,6 @@ static ssize_t show_immediate(struct device *dev,
}
}
- up_read(&fw_device_rwsem);
-
if (value < 0)
return -ENOENT;
@@ -339,7 +336,7 @@ static ssize_t show_text_leaf(struct device *dev,
char dummy_buf[2];
int i, ret = -ENOENT;
- down_read(&fw_device_rwsem);
+ guard(rwsem_read)(&fw_device_rwsem);
if (is_fw_unit(dev)) {
directories[0] = fw_unit(dev)->directory;
@@ -382,15 +379,14 @@ static ssize_t show_text_leaf(struct device *dev,
}
}
- if (ret >= 0) {
- /* Strip trailing whitespace and add newline. */
- while (ret > 0 && isspace(buf[ret - 1]))
- ret--;
- strcpy(buf + ret, "\n");
- ret++;
- }
+ if (ret < 0)
+ return ret;
- up_read(&fw_device_rwsem);
+ // Strip trailing whitespace and add newline.
+ while (ret > 0 && isspace(buf[ret - 1]))
+ ret--;
+ strcpy(buf + ret, "\n");
+ ret++;
return ret;
}
@@ -466,10 +462,10 @@ static ssize_t config_rom_show(struct device *dev,
struct fw_device *device = fw_device(dev);
size_t length;
- down_read(&fw_device_rwsem);
+ guard(rwsem_read)(&fw_device_rwsem);
+
length = device->config_rom_length * 4;
memcpy(buf, device->config_rom, length);
- up_read(&fw_device_rwsem);
return length;
}
@@ -478,13 +474,10 @@ static ssize_t guid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
- int ret;
- down_read(&fw_device_rwsem);
- ret = sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]);
- up_read(&fw_device_rwsem);
+ guard(rwsem_read)(&fw_device_rwsem);
- return ret;
+ return sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]);
}
static ssize_t is_local_show(struct device *dev,
@@ -524,7 +517,8 @@ static ssize_t units_show(struct device *dev,
struct fw_csr_iterator ci;
int key, value, i = 0;
- down_read(&fw_device_rwsem);
+ guard(rwsem_read)(&fw_device_rwsem);
+
fw_csr_iterator_init(&ci, &device->config_rom[ROOT_DIR_OFFSET]);
while (fw_csr_iterator_next(&ci, &key, &value)) {
if (key != (CSR_UNIT | CSR_DIRECTORY))
@@ -533,7 +527,6 @@ static ssize_t units_show(struct device *dev,
if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
break;
}
- up_read(&fw_device_rwsem);
if (i)
buf[i - 1] = '\n';
@@ -571,7 +564,8 @@ static int read_rom(struct fw_device *device,
return rcode;
}
-#define MAX_CONFIG_ROM_SIZE 256
+// By quadlet unit.
+#define MAX_CONFIG_ROM_SIZE ((CSR_CONFIG_ROM_END - CSR_CONFIG_ROM) / sizeof(u32))
/*
* Read the bus info block, perform a speed probe, and read all of the rest of
@@ -729,10 +723,10 @@ static int read_config_rom(struct fw_device *device, int generation)
goto out;
}
- down_write(&fw_device_rwsem);
- device->config_rom = new_rom;
- device->config_rom_length = length;
- up_write(&fw_device_rwsem);
+ scoped_guard(rwsem_write, &fw_device_rwsem) {
+ device->config_rom = new_rom;
+ device->config_rom_length = length;
+ }
kfree(old_rom);
ret = RCODE_COMPLETE;
@@ -813,24 +807,21 @@ static int shutdown_unit(struct device *device, void *data)
/*
* fw_device_rwsem acts as dual purpose mutex:
- * - serializes accesses to fw_device_idr,
* - serializes accesses to fw_device.config_rom/.config_rom_length and
* fw_unit.directory, unless those accesses happen at safe occasions
*/
DECLARE_RWSEM(fw_device_rwsem);
-DEFINE_IDR(fw_device_idr);
+DEFINE_XARRAY_ALLOC(fw_device_xa);
int fw_cdev_major;
struct fw_device *fw_device_get_by_devt(dev_t devt)
{
struct fw_device *device;
- down_read(&fw_device_rwsem);
- device = idr_find(&fw_device_idr, MINOR(devt));
+ device = xa_load(&fw_device_xa, MINOR(devt));
if (device)
fw_device_get(device);
- up_read(&fw_device_rwsem);
return device;
}
@@ -864,7 +855,6 @@ static void fw_device_shutdown(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
- int minor = MINOR(device->device.devt);
if (time_before64(get_jiffies_64(),
device->card->reset_jiffies + SHUTDOWN_DELAY)
@@ -882,9 +872,7 @@ static void fw_device_shutdown(struct work_struct *work)
device_for_each_child(&device->device, NULL, shutdown_unit);
device_unregister(&device->device);
- down_write(&fw_device_rwsem);
- idr_remove(&fw_device_idr, minor);
- up_write(&fw_device_rwsem);
+ xa_erase(&fw_device_xa, MINOR(device->device.devt));
fw_device_put(device);
}
@@ -893,16 +881,14 @@ static void fw_device_release(struct device *dev)
{
struct fw_device *device = fw_device(dev);
struct fw_card *card = device->card;
- unsigned long flags;
/*
* Take the card lock so we don't set this to NULL while a
* FW_NODE_UPDATED callback is being handled or while the
* bus manager work looks at this node.
*/
- spin_lock_irqsave(&card->lock, flags);
- device->node->data = NULL;
- spin_unlock_irqrestore(&card->lock, flags);
+ scoped_guard(spinlock_irqsave, &card->lock)
+ device->node->data = NULL;
fw_node_put(device->node);
kfree(device->config_rom);
@@ -942,59 +928,6 @@ static void fw_device_update(struct work_struct *work)
device_for_each_child(&device->device, NULL, update_unit);
}
-/*
- * If a device was pending for deletion because its node went away but its
- * bus info block and root directory header matches that of a newly discovered
- * device, revive the existing fw_device.
- * The newly allocated fw_device becomes obsolete instead.
- */
-static int lookup_existing_device(struct device *dev, void *data)
-{
- struct fw_device *old = fw_device(dev);
- struct fw_device *new = data;
- struct fw_card *card = new->card;
- int match = 0;
-
- if (!is_fw_device(dev))
- return 0;
-
- down_read(&fw_device_rwsem); /* serialize config_rom access */
- spin_lock_irq(&card->lock); /* serialize node access */
-
- if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 &&
- atomic_cmpxchg(&old->state,
- FW_DEVICE_GONE,
- FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
- struct fw_node *current_node = new->node;
- struct fw_node *obsolete_node = old->node;
-
- new->node = obsolete_node;
- new->node->data = new;
- old->node = current_node;
- old->node->data = old;
-
- old->max_speed = new->max_speed;
- old->node_id = current_node->node_id;
- smp_wmb(); /* update node_id before generation */
- old->generation = card->generation;
- old->config_rom_retries = 0;
- fw_notice(card, "rediscovered device %s\n", dev_name(dev));
-
- old->workfn = fw_device_update;
- fw_schedule_device_work(old, 0);
-
- if (current_node == card->root_node)
- fw_schedule_bm_work(card, 0);
-
- match = 1;
- }
-
- spin_unlock_irq(&card->lock);
- up_read(&fw_device_rwsem);
-
- return match;
-}
-
enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
static void set_broadcast_channel(struct fw_device *device, int generation)
@@ -1055,13 +988,26 @@ int fw_device_set_broadcast_channel(struct device *dev, void *gen)
return 0;
}
+static int compare_configuration_rom(struct device *dev, void *data)
+{
+ const struct fw_device *old = fw_device(dev);
+ const u32 *config_rom = data;
+
+ if (!is_fw_device(dev))
+ return 0;
+
+ // Compare the bus information block and root_length/root_crc.
+ return !memcmp(old->config_rom, config_rom, 6 * 4);
+}
+
static void fw_device_init(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
struct fw_card *card = device->card;
- struct device *revived_dev;
- int minor, ret;
+ struct device *found;
+ u32 minor;
+ int ret;
/*
* All failure paths here set node->data to NULL, so that we
@@ -1087,24 +1033,62 @@ static void fw_device_init(struct work_struct *work)
return;
}
- revived_dev = device_find_child(card->device,
- device, lookup_existing_device);
- if (revived_dev) {
- put_device(revived_dev);
- fw_device_release(&device->device);
+ // If a device was pending for deletion because its node went away but its bus info block
+ // and root directory header matches that of a newly discovered device, revive the
+ // existing fw_device. The newly allocated fw_device becomes obsolete instead.
+ //
+ // serialize config_rom access.
+ scoped_guard(rwsem_read, &fw_device_rwsem) {
+ found = device_find_child(card->device, (void *)device->config_rom,
+ compare_configuration_rom);
+ }
+ if (found) {
+ struct fw_device *reused = fw_device(found);
+
+ if (atomic_cmpxchg(&reused->state,
+ FW_DEVICE_GONE,
+ FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
+ // serialize node access
+ scoped_guard(spinlock_irq, &card->lock) {
+ struct fw_node *current_node = device->node;
+ struct fw_node *obsolete_node = reused->node;
+
+ device->node = obsolete_node;
+ device->node->data = device;
+ reused->node = current_node;
+ reused->node->data = reused;
+
+ reused->max_speed = device->max_speed;
+ reused->node_id = current_node->node_id;
+ smp_wmb(); /* update node_id before generation */
+ reused->generation = card->generation;
+ reused->config_rom_retries = 0;
+ fw_notice(card, "rediscovered device %s\n",
+ dev_name(found));
+
+ reused->workfn = fw_device_update;
+ fw_schedule_device_work(reused, 0);
+
+ if (current_node == card->root_node)
+ fw_schedule_bm_work(card, 0);
+ }
- return;
+ put_device(found);
+ fw_device_release(&device->device);
+
+ return;
+ }
+
+ put_device(found);
}
device_initialize(&device->device);
fw_device_get(device);
- down_write(&fw_device_rwsem);
- minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS,
- GFP_KERNEL);
- up_write(&fw_device_rwsem);
- if (minor < 0)
+ // The index of allocated entry is used for minor identifier of device node.
+ ret = xa_alloc(&fw_device_xa, &minor, device, XA_LIMIT(0, MINORMASK), GFP_KERNEL);
+ if (ret < 0)
goto error;
device->device.bus = &fw_bus_type;
@@ -1165,11 +1149,9 @@ static void fw_device_init(struct work_struct *work)
return;
error_with_cdev:
- down_write(&fw_device_rwsem);
- idr_remove(&fw_device_idr, minor);
- up_write(&fw_device_rwsem);
+ xa_erase(&fw_device_xa, minor);
error:
- fw_device_put(device); /* fw_device_idr's reference */
+ fw_device_put(device); // fw_device_xa's reference.
put_device(&device->device); /* our reference */
}
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index b3eda38a36f3..a67493862c85 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -209,23 +209,63 @@ void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
}
EXPORT_SYMBOL(fw_iso_context_queue_flush);
+/**
+ * fw_iso_context_flush_completions() - process isochronous context in current process context.
+ * @ctx: the isochronous context
+ *
+ * Process the isochronous context in the current process context. The registered callback function
+ * is called when a queued packet buffer with the interrupt flag is completed, either after
+ * transmission in the IT context or after being filled in the IR context. Additionally, the
+ * callback function is also called for the packet buffer completed at last. Furthermore, the
+ * callback function is called as well when the header buffer in the context becomes full. If it is
+ * required to process the context asynchronously, fw_iso_context_schedule_flush_completions() is
+ * available instead.
+ *
+ * Context: Process context. May sleep due to disable_work_sync().
+ */
int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
{
+ int err;
+
trace_isoc_outbound_flush_completions(ctx);
trace_isoc_inbound_single_flush_completions(ctx);
trace_isoc_inbound_multiple_flush_completions(ctx);
- return ctx->card->driver->flush_iso_completions(ctx);
+ might_sleep();
+
+ // Avoid dead lock due to programming mistake.
+ if (WARN_ON_ONCE(current_work() == &ctx->work))
+ return 0;
+
+ disable_work_sync(&ctx->work);
+
+ err = ctx->card->driver->flush_iso_completions(ctx);
+
+ enable_work(&ctx->work);
+
+ return err;
}
EXPORT_SYMBOL(fw_iso_context_flush_completions);
int fw_iso_context_stop(struct fw_iso_context *ctx)
{
+ int err;
+
trace_isoc_outbound_stop(ctx);
trace_isoc_inbound_single_stop(ctx);
trace_isoc_inbound_multiple_stop(ctx);
- return ctx->card->driver->stop_iso(ctx);
+ might_sleep();
+
+ // Avoid dead lock due to programming mistake.
+ if (WARN_ON_ONCE(current_work() == &ctx->work))
+ return 0;
+
+ err = ctx->card->driver->stop_iso(ctx);
+
+ cancel_work_sync(&ctx->work);
+
+ return err;
}
EXPORT_SYMBOL(fw_iso_context_stop);
@@ -375,9 +415,8 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
int irm_id, ret, c = -EINVAL;
- spin_lock_irq(&card->lock);
- irm_id = card->irm_node->node_id;
- spin_unlock_irq(&card->lock);
+ scoped_guard(spinlock_irq, &card->lock)
+ irm_id = card->irm_node->node_id;
if (channels_hi)
c = manage_channel(card, irm_id, generation, channels_hi,
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index b4e637aa6932..6adadb11962e 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -39,7 +39,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
node->initiated_reset = phy_packet_self_id_zero_get_initiated_reset(sid);
node->port_count = port_count;
- refcount_set(&node->ref_count, 1);
+ kref_init(&node->kref);
INIT_LIST_HEAD(&node->link);
return node;
@@ -455,11 +455,10 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
int self_id_count, u32 *self_ids, bool bm_abdicate)
{
struct fw_node *local_node;
- unsigned long flags;
trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
- spin_lock_irqsave(&card->lock, flags);
+ guard(spinlock_irqsave)(&card->lock);
/*
* If the selfID buffer is not the immediate successor of the
@@ -500,7 +499,5 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
} else {
update_tree(card, local_node);
}
-
- spin_unlock_irqrestore(&card->lock, flags);
}
EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 4d2fc1f31fec..e141d24a7644 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -13,7 +13,6 @@
#include <linux/firewire-constants.h>
#include <linux/fs.h>
#include <linux/init.h>
-#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -49,35 +48,31 @@ static int close_transaction(struct fw_transaction *transaction, struct fw_card
u32 response_tstamp)
{
struct fw_transaction *t = NULL, *iter;
- unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(iter, &card->transaction_list, link) {
- if (iter == transaction) {
- if (!try_cancel_split_timeout(iter)) {
- spin_unlock_irqrestore(&card->lock, flags);
- goto timed_out;
+ scoped_guard(spinlock_irqsave, &card->lock) {
+ list_for_each_entry(iter, &card->transaction_list, link) {
+ if (iter == transaction) {
+ if (try_cancel_split_timeout(iter)) {
+ list_del_init(&iter->link);
+ card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ t = iter;
+ }
+ break;
}
- list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
- t = iter;
- break;
}
}
- spin_unlock_irqrestore(&card->lock, flags);
- if (t) {
- if (!t->with_tstamp) {
- t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
- } else {
- t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp,
- NULL, 0, t->callback_data);
- }
- return 0;
+ if (!t)
+ return -ENOENT;
+
+ if (!t->with_tstamp) {
+ t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
+ } else {
+ t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp, NULL, 0,
+ t->callback_data);
}
- timed_out:
- return -ENOENT;
+ return 0;
}
/*
@@ -121,16 +116,13 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
{
struct fw_transaction *t = from_timer(t, timer, split_timeout_timer);
struct fw_card *card = t->card;
- unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
- if (list_empty(&t->link)) {
- spin_unlock_irqrestore(&card->lock, flags);
- return;
+ scoped_guard(spinlock_irqsave, &card->lock) {
+ if (list_empty(&t->link))
+ return;
+ list_del(&t->link);
+ card->tlabel_mask &= ~(1ULL << t->tlabel);
}
- list_del(&t->link);
- card->tlabel_mask &= ~(1ULL << t->tlabel);
- spin_unlock_irqrestore(&card->lock, flags);
if (!t->with_tstamp) {
t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
@@ -143,20 +135,14 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
static void start_split_transaction_timeout(struct fw_transaction *t,
struct fw_card *card)
{
- unsigned long flags;
+ guard(spinlock_irqsave)(&card->lock);
- spin_lock_irqsave(&card->lock, flags);
-
- if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
- spin_unlock_irqrestore(&card->lock, flags);
+ if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
return;
- }
t->is_split_transaction = true;
mod_timer(&t->split_timeout_timer,
jiffies + card->split_timeout_jiffies);
-
- spin_unlock_irqrestore(&card->lock, flags);
}
static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
@@ -464,7 +450,6 @@ static void transmit_phy_packet_callback(struct fw_packet *packet,
static struct fw_packet phy_config_packet = {
.header_length = 12,
- .header[0] = TCODE_LINK_INTERNAL << 4,
.payload_length = 0,
.speed = SCODE_100,
.callback = transmit_phy_packet_callback,
@@ -495,8 +480,9 @@ void fw_send_phy_config(struct fw_card *card,
phy_packet_phy_config_set_gap_count(&data, gap_count);
phy_packet_phy_config_set_gap_count_optimization(&data, true);
- mutex_lock(&phy_config_mutex);
+ guard(mutex)(&phy_config_mutex);
+ async_header_set_tcode(phy_config_packet.header, TCODE_LINK_INTERNAL);
phy_config_packet.header[1] = data;
phy_config_packet.header[2] = ~data;
phy_config_packet.generation = generation;
@@ -508,8 +494,6 @@ void fw_send_phy_config(struct fw_card *card,
card->driver->send_request(card, &phy_config_packet);
wait_for_completion_timeout(&phy_config_done, timeout);
-
- mutex_unlock(&phy_config_mutex);
}
static struct fw_address_handler *lookup_overlapping_address_handler(
@@ -598,7 +582,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
handler->length == 0)
return -EINVAL;
- spin_lock(&address_handler_list_lock);
+ guard(spinlock)(&address_handler_list_lock);
handler->offset = region->start;
while (handler->offset + handler->length <= region->end) {
@@ -617,8 +601,6 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
}
}
- spin_unlock(&address_handler_list_lock);
-
return ret;
}
EXPORT_SYMBOL(fw_core_add_address_handler);
@@ -634,9 +616,9 @@ EXPORT_SYMBOL(fw_core_add_address_handler);
*/
void fw_core_remove_address_handler(struct fw_address_handler *handler)
{
- spin_lock(&address_handler_list_lock);
- list_del_rcu(&handler->link);
- spin_unlock(&address_handler_list_lock);
+ scoped_guard(spinlock, &address_handler_list_lock)
+ list_del_rcu(&handler->link);
+
synchronize_rcu();
}
EXPORT_SYMBOL(fw_core_remove_address_handler);
@@ -927,16 +909,14 @@ static void handle_exclusive_region_request(struct fw_card *card,
if (tcode == TCODE_LOCK_REQUEST)
tcode = 0x10 + async_header_get_extended_tcode(p->header);
- rcu_read_lock();
- handler = lookup_enclosing_address_handler(&address_handler_list,
- offset, request->length);
- if (handler)
- handler->address_callback(card, request,
- tcode, destination, source,
- p->generation, offset,
- request->data, request->length,
- handler->callback_data);
- rcu_read_unlock();
+ scoped_guard(rcu) {
+ handler = lookup_enclosing_address_handler(&address_handler_list, offset,
+ request->length);
+ if (handler)
+ handler->address_callback(card, request, tcode, destination, source,
+ p->generation, offset, request->data,
+ request->length, handler->callback_data);
+ }
if (!handler)
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
@@ -969,17 +949,14 @@ static void handle_fcp_region_request(struct fw_card *card,
return;
}
- rcu_read_lock();
- list_for_each_entry_rcu(handler, &address_handler_list, link) {
- if (is_enclosing_handler(handler, offset, request->length))
- handler->address_callback(card, request, tcode,
- destination, source,
- p->generation, offset,
- request->data,
- request->length,
- handler->callback_data);
+ scoped_guard(rcu) {
+ list_for_each_entry_rcu(handler, &address_handler_list, link) {
+ if (is_enclosing_handler(handler, offset, request->length))
+ handler->address_callback(card, request, tcode, destination, source,
+ p->generation, offset, request->data,
+ request->length, handler->callback_data);
+ }
}
- rcu_read_unlock();
fw_send_response(card, request, RCODE_COMPLETE);
}
@@ -1024,7 +1001,6 @@ EXPORT_SYMBOL(fw_core_handle_request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{
struct fw_transaction *t = NULL, *iter;
- unsigned long flags;
u32 *data;
size_t data_length;
int tcode, tlabel, source, rcode;
@@ -1063,26 +1039,23 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
- spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(iter, &card->transaction_list, link) {
- if (iter->node_id == source && iter->tlabel == tlabel) {
- if (!try_cancel_split_timeout(iter)) {
- spin_unlock_irqrestore(&card->lock, flags);
- goto timed_out;
+ scoped_guard(spinlock_irqsave, &card->lock) {
+ list_for_each_entry(iter, &card->transaction_list, link) {
+ if (iter->node_id == source && iter->tlabel == tlabel) {
+ if (try_cancel_split_timeout(iter)) {
+ list_del_init(&iter->link);
+ card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ t = iter;
+ }
+ break;
}
- list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
- t = iter;
- break;
}
}
- spin_unlock_irqrestore(&card->lock, flags);
trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack,
p->timestamp, p->header, data, data_length / 4);
if (!t) {
- timed_out:
fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
source, tlabel);
return;
@@ -1186,7 +1159,6 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
int reg = offset & ~CSR_REGISTER_BASE;
__be32 *data = payload;
int rcode = RCODE_COMPLETE;
- unsigned long flags;
switch (reg) {
case CSR_PRIORITY_BUDGET:
@@ -1228,10 +1200,10 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
if (tcode == TCODE_READ_QUADLET_REQUEST) {
*data = cpu_to_be32(card->split_timeout_hi);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- spin_lock_irqsave(&card->lock, flags);
+ guard(spinlock_irqsave)(&card->lock);
+
card->split_timeout_hi = be32_to_cpu(*data) & 7;
update_split_timeout(card);
- spin_unlock_irqrestore(&card->lock, flags);
} else {
rcode = RCODE_TYPE_ERROR;
}
@@ -1241,11 +1213,10 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
if (tcode == TCODE_READ_QUADLET_REQUEST) {
*data = cpu_to_be32(card->split_timeout_lo);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- spin_lock_irqsave(&card->lock, flags);
- card->split_timeout_lo =
- be32_to_cpu(*data) & 0xfff80000;
+ guard(spinlock_irqsave)(&card->lock);
+
+ card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000;
update_split_timeout(card);
- spin_unlock_irqrestore(&card->lock, flags);
} else {
rcode = RCODE_TYPE_ERROR;
}
@@ -1387,7 +1358,7 @@ static void __exit fw_core_cleanup(void)
unregister_chrdev(fw_cdev_major, "firewire");
bus_unregister(&fw_bus_type);
destroy_workqueue(fw_workqueue);
- idr_destroy(&fw_device_idr);
+ xa_destroy(&fw_device_xa);
}
module_init(fw_core_init);
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 7c36d2628e37..0ae2c84ecafe 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -7,7 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/list.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/mm_types.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
@@ -115,8 +115,8 @@ struct fw_card_driver {
void fw_card_initialize(struct fw_card *card,
const struct fw_card_driver *driver, struct device *device);
-int fw_card_add(struct fw_card *card,
- u32 max_receive, u32 link_speed, u64 guid);
+int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
+ unsigned int supported_isoc_contexts);
void fw_core_remove_card(struct fw_card *card);
int fw_compute_block_crc(__be32 *block);
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
@@ -133,7 +133,7 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
/* -device */
extern struct rw_semaphore fw_device_rwsem;
-extern struct idr fw_device_idr;
+extern struct xarray fw_device_xa;
extern int fw_cdev_major;
static inline struct fw_device *fw_device_get(struct fw_device *device)
@@ -159,6 +159,11 @@ int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction);
+static inline void fw_iso_context_init_work(struct fw_iso_context *ctx, work_func_t func)
+{
+ INIT_WORK(&ctx->work, func);
+}
+
/* -topology */
@@ -183,7 +188,8 @@ struct fw_node {
* local node to this node. */
u8 max_depth:4; /* Maximum depth to any leaf node */
u8 max_hops:4; /* Max hops in this sub tree */
- refcount_t ref_count;
+
+ struct kref kref;
/* For serializing node topology into a list. */
struct list_head link;
@@ -196,15 +202,21 @@ struct fw_node {
static inline struct fw_node *fw_node_get(struct fw_node *node)
{
- refcount_inc(&node->ref_count);
+ kref_get(&node->kref);
return node;
}
+static void release_node(struct kref *kref)
+{
+ struct fw_node *node = container_of(kref, struct fw_node, kref);
+
+ kfree(node);
+}
+
static inline void fw_node_put(struct fw_node *node)
{
- if (refcount_dec_and_test(&node->ref_count))
- kfree(node);
+ kref_put(&node->kref, release_node);
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
diff --git a/drivers/firewire/ohci-serdes-test.c b/drivers/firewire/ohci-serdes-test.c
index 304a09ff528e..258f668619ef 100644
--- a/drivers/firewire/ohci-serdes-test.c
+++ b/drivers/firewire/ohci-serdes-test.c
@@ -40,9 +40,75 @@ static void test_self_id_receive_buffer_deserialization(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0xf38b, timestamp);
}
+static void test_at_data_serdes(struct kunit *test)
+{
+ static const __le32 expected[] = {
+ cpu_to_le32(0x00020e80),
+ cpu_to_le32(0xffc2ffff),
+ cpu_to_le32(0xe0000000),
+ };
+ __le32 quadlets[] = {0, 0, 0};
+ bool has_src_bus_id = ohci1394_at_data_get_src_bus_id(expected);
+ unsigned int speed = ohci1394_at_data_get_speed(expected);
+ unsigned int tlabel = ohci1394_at_data_get_tlabel(expected);
+ unsigned int retry = ohci1394_at_data_get_retry(expected);
+ unsigned int tcode = ohci1394_at_data_get_tcode(expected);
+ unsigned int destination_id = ohci1394_at_data_get_destination_id(expected);
+ u64 destination_offset = ohci1394_at_data_get_destination_offset(expected);
+
+ KUNIT_EXPECT_FALSE(test, has_src_bus_id);
+ KUNIT_EXPECT_EQ(test, 0x02, speed);
+ KUNIT_EXPECT_EQ(test, 0x03, tlabel);
+ KUNIT_EXPECT_EQ(test, 0x02, retry);
+ KUNIT_EXPECT_EQ(test, 0x08, tcode);
+
+ ohci1394_at_data_set_src_bus_id(quadlets, has_src_bus_id);
+ ohci1394_at_data_set_speed(quadlets, speed);
+ ohci1394_at_data_set_tlabel(quadlets, tlabel);
+ ohci1394_at_data_set_retry(quadlets, retry);
+ ohci1394_at_data_set_tcode(quadlets, tcode);
+ ohci1394_at_data_set_destination_id(quadlets, destination_id);
+ ohci1394_at_data_set_destination_offset(quadlets, destination_offset);
+
+ KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
+}
+
+static void test_it_data_serdes(struct kunit *test)
+{
+ static const __le32 expected[] = {
+ cpu_to_le32(0x000349a7),
+ cpu_to_le32(0x02300000),
+ };
+ __le32 quadlets[] = {0, 0};
+ unsigned int scode = ohci1394_it_data_get_speed(expected);
+ unsigned int tag = ohci1394_it_data_get_tag(expected);
+ unsigned int channel = ohci1394_it_data_get_channel(expected);
+ unsigned int tcode = ohci1394_it_data_get_tcode(expected);
+ unsigned int sync = ohci1394_it_data_get_sync(expected);
+ unsigned int data_length = ohci1394_it_data_get_data_length(expected);
+
+ KUNIT_EXPECT_EQ(test, 0x03, scode);
+ KUNIT_EXPECT_EQ(test, 0x01, tag);
+ KUNIT_EXPECT_EQ(test, 0x09, channel);
+ KUNIT_EXPECT_EQ(test, 0x0a, tcode);
+ KUNIT_EXPECT_EQ(test, 0x7, sync);
+ KUNIT_EXPECT_EQ(test, 0x0230, data_length);
+
+ ohci1394_it_data_set_speed(quadlets, scode);
+ ohci1394_it_data_set_tag(quadlets, tag);
+ ohci1394_it_data_set_channel(quadlets, channel);
+ ohci1394_it_data_set_tcode(quadlets, tcode);
+ ohci1394_it_data_set_sync(quadlets, sync);
+ ohci1394_it_data_set_data_length(quadlets, data_length);
+
+ KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
+}
+
static struct kunit_case ohci_serdes_test_cases[] = {
KUNIT_CASE(test_self_id_count_register_deserialization),
KUNIT_CASE(test_self_id_receive_buffer_deserialization),
+ KUNIT_CASE(test_at_data_serdes),
+ KUNIT_CASE(test_it_data_serdes),
{}
};
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 314a29c0fd3e..7ee55c2804de 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -50,7 +50,6 @@ static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk);
#define CREATE_TRACE_POINTS
#include <trace/events/firewire_ohci.h>
-#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
@@ -77,7 +76,7 @@ struct descriptor {
__le32 branch_address;
__le16 res_count;
__le16 transfer_status;
-} __attribute__((aligned(16)));
+} __aligned(16);
#define CONTROL_SET(regs) (regs)
#define CONTROL_CLEAR(regs) ((regs) + 4)
@@ -162,13 +161,6 @@ struct context {
struct tasklet_struct tasklet;
};
-#define IT_HEADER_SY(v) ((v) << 0)
-#define IT_HEADER_TCODE(v) ((v) << 4)
-#define IT_HEADER_CHANNEL(v) ((v) << 8)
-#define IT_HEADER_TAG(v) ((v) << 14)
-#define IT_HEADER_SPEED(v) ((v) << 16)
-#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
-
struct iso_context {
struct fw_iso_context base;
struct context context;
@@ -182,7 +174,7 @@ struct iso_context {
u8 tags;
};
-#define CONFIG_ROM_SIZE 1024
+#define CONFIG_ROM_SIZE (CSR_CONFIG_ROM_END - CSR_CONFIG_ROM)
struct fw_ohci {
struct fw_card card;
@@ -264,7 +256,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
#define OHCI1394_REGISTER_SIZE 0x800
#define OHCI1394_PCI_HCI_Control 0x40
#define SELF_ID_BUF_SIZE 0x800
-#define OHCI_TCODE_PHY_PACKET 0x0e
#define OHCI_VERSION_1_1 0x010010
static char ohci_driver_name[] = KBUILD_MODNAME;
@@ -405,7 +396,7 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
static int param_debug;
module_param_named(debug, param_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
+MODULE_PARM_DESC(debug, "Verbose logging, deprecated in v6.11 kernel or later. (default = 0"
", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
@@ -532,20 +523,28 @@ static const char *evts[] = {
[0x1e] = "ack_type_error", [0x1f] = "-reserved-",
[0x20] = "pending/cancelled",
};
-static const char *tcodes[] = {
- [0x0] = "QW req", [0x1] = "BW req",
- [0x2] = "W resp", [0x3] = "-reserved-",
- [0x4] = "QR req", [0x5] = "BR req",
- [0x6] = "QR resp", [0x7] = "BR resp",
- [0x8] = "cycle start", [0x9] = "Lk req",
- [0xa] = "async stream packet", [0xb] = "Lk resp",
- [0xc] = "-reserved-", [0xd] = "-reserved-",
- [0xe] = "link internal", [0xf] = "-reserved-",
-};
static void log_ar_at_event(struct fw_ohci *ohci,
char dir, int speed, u32 *header, int evt)
{
+ static const char *const tcodes[] = {
+ [TCODE_WRITE_QUADLET_REQUEST] = "QW req",
+ [TCODE_WRITE_BLOCK_REQUEST] = "BW req",
+ [TCODE_WRITE_RESPONSE] = "W resp",
+ [0x3] = "-reserved-",
+ [TCODE_READ_QUADLET_REQUEST] = "QR req",
+ [TCODE_READ_BLOCK_REQUEST] = "BR req",
+ [TCODE_READ_QUADLET_RESPONSE] = "QR resp",
+ [TCODE_READ_BLOCK_RESPONSE] = "BR resp",
+ [TCODE_CYCLE_START] = "cycle start",
+ [TCODE_LOCK_REQUEST] = "Lk req",
+ [TCODE_STREAM_DATA] = "async stream packet",
+ [TCODE_LOCK_RESPONSE] = "Lk resp",
+ [0xc] = "-reserved-",
+ [0xd] = "-reserved-",
+ [TCODE_LINK_INTERNAL] = "link internal",
+ [0xf] = "-reserved-",
+ };
int tcode = async_header_get_tcode(header);
char specific[12];
@@ -586,7 +585,7 @@ static void log_ar_at_event(struct fw_ohci *ohci,
ohci_notice(ohci, "A%c %s, %s\n",
dir, evts[evt], tcodes[tcode]);
break;
- case 0xe:
+ case TCODE_LINK_INTERNAL:
ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
dir, evts[evt], header[1], header[2]);
break;
@@ -713,26 +712,20 @@ static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
static int ohci_read_phy_reg(struct fw_card *card, int addr)
{
struct fw_ohci *ohci = fw_ohci(card);
- int ret;
- mutex_lock(&ohci->phy_reg_mutex);
- ret = read_phy_reg(ohci, addr);
- mutex_unlock(&ohci->phy_reg_mutex);
+ guard(mutex)(&ohci->phy_reg_mutex);
- return ret;
+ return read_phy_reg(ohci, addr);
}
static int ohci_update_phy_reg(struct fw_card *card, int addr,
int clear_bits, int set_bits)
{
struct fw_ohci *ohci = fw_ohci(card);
- int ret;
- mutex_lock(&ohci->phy_reg_mutex);
- ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
- mutex_unlock(&ohci->phy_reg_mutex);
+ guard(mutex)(&ohci->phy_reg_mutex);
- return ret;
+ return update_phy_reg(ohci, addr, clear_bits, set_bits);
}
static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
@@ -939,7 +932,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
case TCODE_WRITE_RESPONSE:
case TCODE_READ_QUADLET_REQUEST:
- case OHCI_TCODE_PHY_PACKET:
+ case TCODE_LINK_INTERNAL:
p.header_length = 12;
p.payload_length = 0;
break;
@@ -967,7 +960,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
* Several controllers, notably from NEC and VIA, forget to
* write ack_complete status at PHY packet reception.
*/
- if (evt == OHCI1394_evt_no_status && tcode == OHCI1394_phy_tcode)
+ if (evt == OHCI1394_evt_no_status && tcode == TCODE_LINK_INTERNAL)
p.ack = ACK_COMPLETE;
/*
@@ -1148,9 +1141,8 @@ static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
return d + z - 1;
}
-static void context_tasklet(unsigned long data)
+static void context_retire_descriptors(struct context *ctx)
{
- struct context *ctx = (struct context *) data;
struct descriptor *d, *last;
u32 address;
int z;
@@ -1179,18 +1171,31 @@ static void context_tasklet(unsigned long data)
break;
if (old_desc != desc) {
- /* If we've advanced to the next buffer, move the
- * previous buffer to the free list. */
- unsigned long flags;
+ // If we've advanced to the next buffer, move the previous buffer to the
+ // free list.
old_desc->used = 0;
- spin_lock_irqsave(&ctx->ohci->lock, flags);
+ guard(spinlock_irqsave)(&ctx->ohci->lock);
list_move_tail(&old_desc->list, &ctx->buffer_list);
- spin_unlock_irqrestore(&ctx->ohci->lock, flags);
}
ctx->last = last;
}
}
+static void context_tasklet(unsigned long data)
+{
+ struct context *ctx = (struct context *) data;
+
+ context_retire_descriptors(ctx);
+}
+
+static void ohci_isoc_context_work(struct work_struct *work)
+{
+ struct fw_iso_context *base = container_of(work, struct fw_iso_context, work);
+ struct iso_context *isoc_ctx = container_of(base, struct iso_context, base);
+
+ context_retire_descriptors(&isoc_ctx->context);
+}
+
/*
* Allocate a new buffer and add it to the list of free buffers for this
* context. Must be called with ohci->lock held.
@@ -1402,12 +1407,6 @@ static int at_context_queue_packet(struct context *ctx,
d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
d[0].res_count = cpu_to_le16(packet->timestamp);
- /*
- * The DMA format for asynchronous link packets is different
- * from the IEEE1394 layout, so shift the fields around
- * accordingly.
- */
-
tcode = async_header_get_tcode(packet->header);
header = (__le32 *) &d[1];
switch (tcode) {
@@ -1420,11 +1419,21 @@ static int at_context_queue_packet(struct context *ctx,
case TCODE_READ_BLOCK_RESPONSE:
case TCODE_LOCK_REQUEST:
case TCODE_LOCK_RESPONSE:
- header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
- (packet->speed << 16));
- header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
- (packet->header[0] & 0xffff0000));
- header[2] = cpu_to_le32(packet->header[2]);
+ ohci1394_at_data_set_src_bus_id(header, false);
+ ohci1394_at_data_set_speed(header, packet->speed);
+ ohci1394_at_data_set_tlabel(header, async_header_get_tlabel(packet->header));
+ ohci1394_at_data_set_retry(header, async_header_get_retry(packet->header));
+ ohci1394_at_data_set_tcode(header, tcode);
+
+ ohci1394_at_data_set_destination_id(header,
+ async_header_get_destination(packet->header));
+
+ if (ctx == &ctx->ohci->at_response_ctx) {
+ ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header));
+ } else {
+ ohci1394_at_data_set_destination_offset(header,
+ async_header_get_offset(packet->header));
+ }
if (tcode_is_block_packet(tcode))
header[3] = cpu_to_le32(packet->header[3]);
@@ -1433,10 +1442,10 @@ static int at_context_queue_packet(struct context *ctx,
d[0].req_count = cpu_to_le16(packet->header_length);
break;
-
case TCODE_LINK_INTERNAL:
- header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
- (packet->speed << 16));
+ ohci1394_at_data_set_speed(header, packet->speed);
+ ohci1394_at_data_set_tcode(header, TCODE_LINK_INTERNAL);
+
header[1] = cpu_to_le32(packet->header[1]);
header[2] = cpu_to_le32(packet->header[2]);
d[0].req_count = cpu_to_le16(12);
@@ -1446,9 +1455,14 @@ static int at_context_queue_packet(struct context *ctx,
break;
case TCODE_STREAM_DATA:
- header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
- (packet->speed << 16));
- header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
+ ohci1394_it_data_set_speed(header, packet->speed);
+ ohci1394_it_data_set_tag(header, isoc_header_get_tag(packet->header[0]));
+ ohci1394_it_data_set_channel(header, isoc_header_get_channel(packet->header[0]));
+ ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
+ ohci1394_it_data_set_sync(header, isoc_header_get_sy(packet->header[0]));
+
+ ohci1394_it_data_set_data_length(header, isoc_header_get_data_length(packet->header[0]));
+
d[0].req_count = cpu_to_le16(8);
break;
@@ -1873,13 +1887,15 @@ static int get_status_for_port(struct fw_ohci *ohci, int port_index,
{
int reg;
- mutex_lock(&ohci->phy_reg_mutex);
- reg = write_phy_reg(ohci, 7, port_index);
- if (reg >= 0)
+ scoped_guard(mutex, &ohci->phy_reg_mutex) {
+ reg = write_phy_reg(ohci, 7, port_index);
+ if (reg < 0)
+ return reg;
+
reg = read_phy_reg(ohci, 8);
- mutex_unlock(&ohci->phy_reg_mutex);
- if (reg < 0)
- return reg;
+ if (reg < 0)
+ return reg;
+ }
switch (reg & 0x0f) {
case 0x06:
@@ -1917,29 +1933,36 @@ static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
return i;
}
-static bool initiated_reset(struct fw_ohci *ohci)
+static int detect_initiated_reset(struct fw_ohci *ohci, bool *is_initiated_reset)
{
int reg;
- int ret = false;
- mutex_lock(&ohci->phy_reg_mutex);
- reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
- if (reg >= 0) {
- reg = read_phy_reg(ohci, 8);
- reg |= 0x40;
- reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */
- if (reg >= 0) {
- reg = read_phy_reg(ohci, 12); /* read register 12 */
- if (reg >= 0) {
- if ((reg & 0x08) == 0x08) {
- /* bit 3 indicates "initiated reset" */
- ret = true;
- }
- }
- }
- }
- mutex_unlock(&ohci->phy_reg_mutex);
- return ret;
+ guard(mutex)(&ohci->phy_reg_mutex);
+
+ // Select page 7
+ reg = write_phy_reg(ohci, 7, 0xe0);
+ if (reg < 0)
+ return reg;
+
+ reg = read_phy_reg(ohci, 8);
+ if (reg < 0)
+ return reg;
+
+ // set PMODE bit
+ reg |= 0x40;
+ reg = write_phy_reg(ohci, 8, reg);
+ if (reg < 0)
+ return reg;
+
+ // read register 12
+ reg = read_phy_reg(ohci, 12);
+ if (reg < 0)
+ return reg;
+
+ // bit 3 indicates "initiated reset"
+ *is_initiated_reset = !!((reg & 0x08) == 0x08);
+
+ return 0;
}
/*
@@ -1949,7 +1972,8 @@ static bool initiated_reset(struct fw_ohci *ohci)
*/
static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
{
- int reg, i, pos;
+ int reg, i, pos, err;
+ bool is_initiated_reset;
u32 self_id = 0;
// link active 1, speed 3, bridge 0, contender 1, more packets 0.
@@ -1978,7 +2002,6 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
for (i = 0; i < 3; i++) {
enum phy_packet_self_id_port_status status;
- int err;
err = get_status_for_port(ohci, i, &status);
if (err < 0)
@@ -1987,7 +2010,10 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
self_id_sequence_set_port_status(&self_id, 1, i, status);
}
- phy_packet_self_id_zero_set_initiated_reset(&self_id, initiated_reset(ohci));
+ err = detect_initiated_reset(ohci, &is_initiated_reset);
+ if (err < 0)
+ return err;
+ phy_packet_self_id_zero_set_initiated_reset(&self_id, is_initiated_reset);
pos = get_self_id_pos(ohci, self_id, self_id_count);
if (pos >= 0) {
@@ -2112,14 +2138,12 @@ static void bus_reset_work(struct work_struct *work)
return;
}
- /* FIXME: Document how the locking works. */
- spin_lock_irq(&ohci->lock);
-
- ohci->generation = -1; /* prevent AT packet queueing */
- context_stop(&ohci->at_request_ctx);
- context_stop(&ohci->at_response_ctx);
-
- spin_unlock_irq(&ohci->lock);
+ // FIXME: Document how the locking works.
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ ohci->generation = -1; // prevent AT packet queueing
+ context_stop(&ohci->at_request_ctx);
+ context_stop(&ohci->at_response_ctx);
+ }
/*
* Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
@@ -2129,53 +2153,42 @@ static void bus_reset_work(struct work_struct *work)
at_context_flush(&ohci->at_request_ctx);
at_context_flush(&ohci->at_response_ctx);
- spin_lock_irq(&ohci->lock);
-
- ohci->generation = generation;
- reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
- reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
-
- if (ohci->quirks & QUIRK_RESET_PACKET)
- ohci->request_generation = generation;
-
- /*
- * This next bit is unrelated to the AT context stuff but we
- * have to do it under the spinlock also. If a new config rom
- * was set up before this reset, the old one is now no longer
- * in use and we can free it. Update the config rom pointers
- * to point to the current config rom and clear the
- * next_config_rom pointer so a new update can take place.
- */
-
- if (ohci->next_config_rom != NULL) {
- if (ohci->next_config_rom != ohci->config_rom) {
- free_rom = ohci->config_rom;
- free_rom_bus = ohci->config_rom_bus;
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ ohci->generation = generation;
+ reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
+ reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
+
+ if (ohci->quirks & QUIRK_RESET_PACKET)
+ ohci->request_generation = generation;
+
+ // This next bit is unrelated to the AT context stuff but we have to do it under the
+ // spinlock also. If a new config rom was set up before this reset, the old one is
+ // now no longer in use and we can free it. Update the config rom pointers to point
+ // to the current config rom and clear the next_config_rom pointer so a new update
+ // can take place.
+ if (ohci->next_config_rom != NULL) {
+ if (ohci->next_config_rom != ohci->config_rom) {
+ free_rom = ohci->config_rom;
+ free_rom_bus = ohci->config_rom_bus;
+ }
+ ohci->config_rom = ohci->next_config_rom;
+ ohci->config_rom_bus = ohci->next_config_rom_bus;
+ ohci->next_config_rom = NULL;
+
+ // Restore config_rom image and manually update config_rom registers.
+ // Writing the header quadlet will indicate that the config rom is ready,
+ // so we do that last.
+ reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(ohci->config_rom[2]));
+ ohci->config_rom[0] = ohci->next_header;
+ reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(ohci->next_header));
}
- ohci->config_rom = ohci->next_config_rom;
- ohci->config_rom_bus = ohci->next_config_rom_bus;
- ohci->next_config_rom = NULL;
-
- /*
- * Restore config_rom image and manually update
- * config_rom registers. Writing the header quadlet
- * will indicate that the config rom is ready, so we
- * do that last.
- */
- reg_write(ohci, OHCI1394_BusOptions,
- be32_to_cpu(ohci->config_rom[2]));
- ohci->config_rom[0] = ohci->next_header;
- reg_write(ohci, OHCI1394_ConfigROMhdr,
- be32_to_cpu(ohci->next_header));
- }
- if (param_remote_dma) {
- reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
- reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
+ if (param_remote_dma) {
+ reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
+ reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
+ }
}
- spin_unlock_irq(&ohci->lock);
-
if (free_rom)
dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
@@ -2198,6 +2211,11 @@ static irqreturn_t irq_handler(int irq, void *data)
if (!event || !~event)
return IRQ_NONE;
+ if (unlikely(param_debug > 0)) {
+ dev_notice_ratelimited(ohci->card.device,
+ "The debug parameter is superceded by tracepoints events, and deprecated.");
+ }
+
/*
* busReset and postedWriteErr events must not be cleared yet
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
@@ -2238,8 +2256,7 @@ static irqreturn_t irq_handler(int irq, void *data)
while (iso_event) {
i = ffs(iso_event) - 1;
- tasklet_schedule(
- &ohci->ir_context_list[i].context.tasklet);
+ fw_iso_context_schedule_flush_completions(&ohci->ir_context_list[i].base);
iso_event &= ~(1 << i);
}
}
@@ -2250,8 +2267,7 @@ static irqreturn_t irq_handler(int irq, void *data)
while (iso_event) {
i = ffs(iso_event) - 1;
- tasklet_schedule(
- &ohci->it_context_list[i].context.tasklet);
+ fw_iso_context_schedule_flush_completions(&ohci->it_context_list[i].base);
iso_event &= ~(1 << i);
}
}
@@ -2264,13 +2280,11 @@ static irqreturn_t irq_handler(int irq, void *data)
reg_read(ohci, OHCI1394_PostedWriteAddressLo);
reg_write(ohci, OHCI1394_IntEventClear,
OHCI1394_postedWriteErr);
- if (printk_ratelimit())
- ohci_err(ohci, "PCI posted write error\n");
+ dev_err_ratelimited(ohci->card.device, "PCI posted write error\n");
}
if (unlikely(event & OHCI1394_cycleTooLong)) {
- if (printk_ratelimit())
- ohci_notice(ohci, "isochronous cycle too long\n");
+ dev_notice_ratelimited(ohci->card.device, "isochronous cycle too long\n");
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_cycleMaster);
}
@@ -2282,17 +2296,15 @@ static irqreturn_t irq_handler(int irq, void *data)
* stop active cycleMatch iso contexts now and restart
* them at least two cycles later. (FIXME?)
*/
- if (printk_ratelimit())
- ohci_notice(ohci, "isochronous cycle inconsistent\n");
+ dev_notice_ratelimited(ohci->card.device, "isochronous cycle inconsistent\n");
}
if (unlikely(event & OHCI1394_unrecoverableError))
handle_dead_contexts(ohci);
if (event & OHCI1394_cycle64Seconds) {
- spin_lock(&ohci->lock);
+ guard(spinlock)(&ohci->lock);
update_bus_time(ohci);
- spin_unlock(&ohci->lock);
} else
flush_writes(ohci);
@@ -2617,33 +2629,26 @@ static int ohci_set_config_rom(struct fw_card *card,
if (next_config_rom == NULL)
return -ENOMEM;
- spin_lock_irq(&ohci->lock);
-
- /*
- * If there is not an already pending config_rom update,
- * push our new allocation into the ohci->next_config_rom
- * and then mark the local variable as null so that we
- * won't deallocate the new buffer.
- *
- * OTOH, if there is a pending config_rom update, just
- * use that buffer with the new config_rom data, and
- * let this routine free the unused DMA allocation.
- */
-
- if (ohci->next_config_rom == NULL) {
- ohci->next_config_rom = next_config_rom;
- ohci->next_config_rom_bus = next_config_rom_bus;
- next_config_rom = NULL;
- }
-
- copy_config_rom(ohci->next_config_rom, config_rom, length);
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ // If there is not an already pending config_rom update, push our new allocation
+ // into the ohci->next_config_rom and then mark the local variable as null so that
+ // we won't deallocate the new buffer.
+ //
+ // OTOH, if there is a pending config_rom update, just use that buffer with the new
+ // config_rom data, and let this routine free the unused DMA allocation.
+ if (ohci->next_config_rom == NULL) {
+ ohci->next_config_rom = next_config_rom;
+ ohci->next_config_rom_bus = next_config_rom_bus;
+ next_config_rom = NULL;
+ }
- ohci->next_header = config_rom[0];
- ohci->next_config_rom[0] = 0;
+ copy_config_rom(ohci->next_config_rom, config_rom, length);
- reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
+ ohci->next_header = config_rom[0];
+ ohci->next_config_rom[0] = 0;
- spin_unlock_irq(&ohci->lock);
+ reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
+ }
/* If we didn't use the DMA allocation, delete it. */
if (next_config_rom != NULL) {
@@ -2713,7 +2718,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
int node_id, int generation)
{
struct fw_ohci *ohci = fw_ohci(card);
- unsigned long flags;
int n, ret = 0;
if (param_remote_dma)
@@ -2724,12 +2728,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
* interrupt bit. Clear physReqResourceAllBuses on bus reset.
*/
- spin_lock_irqsave(&ohci->lock, flags);
+ guard(spinlock_irqsave)(&ohci->lock);
- if (ohci->generation != generation) {
- ret = -ESTALE;
- goto out;
- }
+ if (ohci->generation != generation)
+ return -ESTALE;
/*
* Note, if the node ID contains a non-local bus ID, physical DMA is
@@ -2743,8 +2745,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
flush_writes(ohci);
- out:
- spin_unlock_irqrestore(&ohci->lock, flags);
return ret;
}
@@ -2752,7 +2752,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
{
struct fw_ohci *ohci = fw_ohci(card);
- unsigned long flags;
u32 value;
switch (csr_offset) {
@@ -2776,16 +2775,14 @@ static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
return get_cycle_time(ohci);
case CSR_BUS_TIME:
- /*
- * We might be called just after the cycle timer has wrapped
- * around but just before the cycle64Seconds handler, so we
- * better check here, too, if the bus time needs to be updated.
- */
- spin_lock_irqsave(&ohci->lock, flags);
- value = update_bus_time(ohci);
- spin_unlock_irqrestore(&ohci->lock, flags);
- return value;
+ {
+ // We might be called just after the cycle timer has wrapped around but just before
+ // the cycle64Seconds handler, so we better check here, too, if the bus time needs
+ // to be updated.
+ guard(spinlock_irqsave)(&ohci->lock);
+ return update_bus_time(ohci);
+ }
case CSR_BUSY_TIMEOUT:
value = reg_read(ohci, OHCI1394_ATRetries);
return (value >> 4) & 0x0ffff00f;
@@ -2803,7 +2800,6 @@ static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
{
struct fw_ohci *ohci = fw_ohci(card);
- unsigned long flags;
switch (csr_offset) {
case CSR_STATE_CLEAR:
@@ -2839,12 +2835,11 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
break;
case CSR_BUS_TIME:
- spin_lock_irqsave(&ohci->lock, flags);
- ohci->bus_time = (update_bus_time(ohci) & 0x40) |
- (value & ~0x7f);
- spin_unlock_irqrestore(&ohci->lock, flags);
+ {
+ guard(spinlock_irqsave)(&ohci->lock);
+ ohci->bus_time = (update_bus_time(ohci) & 0x40) | (value & ~0x7f);
break;
-
+ }
case CSR_BUSY_TIMEOUT:
value = (value & 0xf) | ((value & 0xf) << 4) |
((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
@@ -2932,7 +2927,7 @@ static int handle_ir_packet_per_buffer(struct context *context,
copy_iso_headers(ctx, (u32 *) (last + 1));
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
- flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
return 1;
}
@@ -2968,7 +2963,7 @@ static int handle_ir_buffer_fill(struct context *context,
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
trace_isoc_inbound_multiple_completions(&ctx->base, completed,
- FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
ctx->base.callback.mc(&ctx->base,
buffer_dma + completed,
@@ -3064,7 +3059,7 @@ static int handle_it_packet(struct context *context,
ctx->header_length += 4;
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
- flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
return 1;
}
@@ -3090,55 +3085,53 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
u32 *mask, regs;
int index, ret = -EBUSY;
- spin_lock_irq(&ohci->lock);
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ switch (type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
+ mask = &ohci->it_context_mask;
+ callback = handle_it_packet;
+ index = ffs(*mask) - 1;
+ if (index >= 0) {
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoXmitContextBase(index);
+ ctx = &ohci->it_context_list[index];
+ }
+ break;
- switch (type) {
- case FW_ISO_CONTEXT_TRANSMIT:
- mask = &ohci->it_context_mask;
- callback = handle_it_packet;
- index = ffs(*mask) - 1;
- if (index >= 0) {
- *mask &= ~(1 << index);
- regs = OHCI1394_IsoXmitContextBase(index);
- ctx = &ohci->it_context_list[index];
- }
- break;
+ case FW_ISO_CONTEXT_RECEIVE:
+ channels = &ohci->ir_context_channels;
+ mask = &ohci->ir_context_mask;
+ callback = handle_ir_packet_per_buffer;
+ index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
+ if (index >= 0) {
+ *channels &= ~(1ULL << channel);
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoRcvContextBase(index);
+ ctx = &ohci->ir_context_list[index];
+ }
+ break;
- case FW_ISO_CONTEXT_RECEIVE:
- channels = &ohci->ir_context_channels;
- mask = &ohci->ir_context_mask;
- callback = handle_ir_packet_per_buffer;
- index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
- if (index >= 0) {
- *channels &= ~(1ULL << channel);
- *mask &= ~(1 << index);
- regs = OHCI1394_IsoRcvContextBase(index);
- ctx = &ohci->ir_context_list[index];
- }
- break;
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ mask = &ohci->ir_context_mask;
+ callback = handle_ir_buffer_fill;
+ index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
+ if (index >= 0) {
+ ohci->mc_allocated = true;
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoRcvContextBase(index);
+ ctx = &ohci->ir_context_list[index];
+ }
+ break;
- case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- mask = &ohci->ir_context_mask;
- callback = handle_ir_buffer_fill;
- index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
- if (index >= 0) {
- ohci->mc_allocated = true;
- *mask &= ~(1 << index);
- regs = OHCI1394_IsoRcvContextBase(index);
- ctx = &ohci->ir_context_list[index];
+ default:
+ index = -1;
+ ret = -ENOSYS;
}
- break;
- default:
- index = -1;
- ret = -ENOSYS;
+ if (index < 0)
+ return ERR_PTR(ret);
}
- spin_unlock_irq(&ohci->lock);
-
- if (index < 0)
- return ERR_PTR(ret);
-
memset(ctx, 0, sizeof(*ctx));
ctx->header_length = 0;
ctx->header = (void *) __get_free_page(GFP_KERNEL);
@@ -3149,6 +3142,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
ret = context_init(&ctx->context, ohci, regs, callback);
if (ret < 0)
goto out_with_header;
+ fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work);
if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
set_multichannel_mask(ohci, 0);
@@ -3160,20 +3154,18 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
out_with_header:
free_page((unsigned long)ctx->header);
out:
- spin_lock_irq(&ohci->lock);
-
- switch (type) {
- case FW_ISO_CONTEXT_RECEIVE:
- *channels |= 1ULL << channel;
- break;
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ switch (type) {
+ case FW_ISO_CONTEXT_RECEIVE:
+ *channels |= 1ULL << channel;
+ break;
- case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- ohci->mc_allocated = false;
- break;
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ ohci->mc_allocated = false;
+ break;
+ }
+ *mask |= 1 << index;
}
- *mask |= 1 << index;
-
- spin_unlock_irq(&ohci->lock);
return ERR_PTR(ret);
}
@@ -3248,7 +3240,6 @@ static int ohci_stop_iso(struct fw_iso_context *base)
}
flush_writes(ohci);
context_stop(&ctx->context);
- tasklet_kill(&ctx->context.tasklet);
return 0;
}
@@ -3257,14 +3248,13 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
{
struct fw_ohci *ohci = fw_ohci(base->card);
struct iso_context *ctx = container_of(base, struct iso_context, base);
- unsigned long flags;
int index;
ohci_stop_iso(base);
context_release(&ctx->context);
free_page((unsigned long)ctx->header);
- spin_lock_irqsave(&ohci->lock, flags);
+ guard(spinlock_irqsave)(&ohci->lock);
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
@@ -3286,38 +3276,29 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
ohci->mc_allocated = false;
break;
}
-
- spin_unlock_irqrestore(&ohci->lock, flags);
}
static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
{
struct fw_ohci *ohci = fw_ohci(base->card);
- unsigned long flags;
- int ret;
switch (base->type) {
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ {
+ guard(spinlock_irqsave)(&ohci->lock);
- spin_lock_irqsave(&ohci->lock, flags);
-
- /* Don't allow multichannel to grab other contexts' channels. */
+ // Don't allow multichannel to grab other contexts' channels.
if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
*channels = ohci->ir_context_channels;
- ret = -EBUSY;
+ return -EBUSY;
} else {
set_multichannel_mask(ohci, *channels);
- ret = 0;
+ return 0;
}
-
- spin_unlock_irqrestore(&ohci->lock, flags);
-
- break;
+ }
default:
- ret = -EINVAL;
+ return -EINVAL;
}
-
- return ret;
}
#ifdef CONFIG_PM
@@ -3392,14 +3373,14 @@ static int queue_iso_transmit(struct iso_context *ctx,
d[0].branch_address = cpu_to_le32(d_bus | z);
header = (__le32 *) &d[1];
- header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
- IT_HEADER_TAG(p->tag) |
- IT_HEADER_TCODE(TCODE_STREAM_DATA) |
- IT_HEADER_CHANNEL(ctx->base.channel) |
- IT_HEADER_SPEED(ctx->base.speed));
- header[1] =
- cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
- p->payload_length));
+
+ ohci1394_it_data_set_speed(header, ctx->base.speed);
+ ohci1394_it_data_set_tag(header, p->tag);
+ ohci1394_it_data_set_channel(header, ctx->base.channel);
+ ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
+ ohci1394_it_data_set_sync(header, p->sy);
+
+ ohci1394_it_data_set_data_length(header, p->header_length + p->payload_length);
}
if (p->header_length > 0) {
@@ -3587,24 +3568,19 @@ static int ohci_queue_iso(struct fw_iso_context *base,
unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
- unsigned long flags;
- int ret = -ENOSYS;
- spin_lock_irqsave(&ctx->context.ohci->lock, flags);
+ guard(spinlock_irqsave)(&ctx->context.ohci->lock);
+
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
- ret = queue_iso_transmit(ctx, packet, buffer, payload);
- break;
+ return queue_iso_transmit(ctx, packet, buffer, payload);
case FW_ISO_CONTEXT_RECEIVE:
- ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
- break;
+ return queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
- break;
+ return queue_iso_buffer_fill(ctx, packet, buffer, payload);
+ default:
+ return -ENOSYS;
}
- spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
-
- return ret;
}
static void ohci_flush_queue_iso(struct fw_iso_context *base)
@@ -3620,10 +3596,8 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
struct iso_context *ctx = container_of(base, struct iso_context, base);
int ret = 0;
- tasklet_disable_in_atomic(&ctx->context.tasklet);
-
if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
- context_tasklet((unsigned long)&ctx->context);
+ ohci_isoc_context_work(&base->work);
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
@@ -3643,8 +3617,6 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
smp_mb__after_atomic();
}
- tasklet_enable(&ctx->context.tasklet);
-
return ret;
}
@@ -3863,7 +3835,7 @@ static int pci_probe(struct pci_dev *dev,
goto fail_msi;
}
- err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
+ err = fw_card_add(&ohci->card, max_receive, link_speed, guid, ohci->n_it + ohci->n_ir);
if (err)
goto fail_irq;
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index 71c2ed84cafb..218666cfe14a 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -153,7 +153,205 @@
#define OHCI1394_evt_unknown 0xe
#define OHCI1394_evt_flushed 0xf
-#define OHCI1394_phy_tcode 0xe
+
+// Asynchronous Transmit DMA.
+//
+// The content of first two quadlets of data for AT DMA is different from the header for IEEE 1394
+// asynchronous packet.
+
+#define OHCI1394_AT_DATA_Q0_srcBusID_MASK 0x00800000
+#define OHCI1394_AT_DATA_Q0_srcBusID_SHIFT 23
+#define OHCI1394_AT_DATA_Q0_spd_MASK 0x00070000
+#define OHCI1394_AT_DATA_Q0_spd_SHIFT 16
+#define OHCI1394_AT_DATA_Q0_tLabel_MASK 0x0000fc00
+#define OHCI1394_AT_DATA_Q0_tLabel_SHIFT 10
+#define OHCI1394_AT_DATA_Q0_rt_MASK 0x00000300
+#define OHCI1394_AT_DATA_Q0_rt_SHIFT 8
+#define OHCI1394_AT_DATA_Q0_tCode_MASK 0x000000f0
+#define OHCI1394_AT_DATA_Q0_tCode_SHIFT 4
+#define OHCI1394_AT_DATA_Q1_destinationId_MASK 0xffff0000
+#define OHCI1394_AT_DATA_Q1_destinationId_SHIFT 16
+#define OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK 0x0000ffff
+#define OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT 0
+#define OHCI1394_AT_DATA_Q1_rCode_MASK 0x0000f000
+#define OHCI1394_AT_DATA_Q1_rCode_SHIFT 12
+
+static inline bool ohci1394_at_data_get_src_bus_id(const __le32 *data)
+{
+ return !!((data[0] & OHCI1394_AT_DATA_Q0_srcBusID_MASK) >> OHCI1394_AT_DATA_Q0_srcBusID_SHIFT);
+}
+
+static inline void ohci1394_at_data_set_src_bus_id(__le32 *data, bool src_bus_id)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_srcBusID_MASK);
+ data[0] |= cpu_to_le32((src_bus_id << OHCI1394_AT_DATA_Q0_srcBusID_SHIFT) & OHCI1394_AT_DATA_Q0_srcBusID_MASK);
+}
+
+static inline unsigned int ohci1394_at_data_get_speed(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_spd_MASK) >> OHCI1394_AT_DATA_Q0_spd_SHIFT;
+}
+
+static inline void ohci1394_at_data_set_speed(__le32 *data, unsigned int scode)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_spd_MASK);
+ data[0] |= cpu_to_le32((scode << OHCI1394_AT_DATA_Q0_spd_SHIFT) & OHCI1394_AT_DATA_Q0_spd_MASK);
+}
+
+static inline unsigned int ohci1394_at_data_get_tlabel(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_tLabel_MASK) >> OHCI1394_AT_DATA_Q0_tLabel_SHIFT;
+}
+
+static inline void ohci1394_at_data_set_tlabel(__le32 *data, unsigned int tlabel)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_tLabel_MASK);
+ data[0] |= cpu_to_le32((tlabel << OHCI1394_AT_DATA_Q0_tLabel_SHIFT) & OHCI1394_AT_DATA_Q0_tLabel_MASK);
+}
+
+static inline unsigned int ohci1394_at_data_get_retry(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_rt_MASK) >> OHCI1394_AT_DATA_Q0_rt_SHIFT;
+}
+
+static inline void ohci1394_at_data_set_retry(__le32 *data, unsigned int retry)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_rt_MASK);
+ data[0] |= cpu_to_le32((retry << OHCI1394_AT_DATA_Q0_rt_SHIFT) & OHCI1394_AT_DATA_Q0_rt_MASK);
+}
+
+static inline unsigned int ohci1394_at_data_get_tcode(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_tCode_MASK) >> OHCI1394_AT_DATA_Q0_tCode_SHIFT;
+}
+
+static inline void ohci1394_at_data_set_tcode(__le32 *data, unsigned int tcode)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_tCode_MASK);
+ data[0] |= cpu_to_le32((tcode << OHCI1394_AT_DATA_Q0_tCode_SHIFT) & OHCI1394_AT_DATA_Q0_tCode_MASK);
+}
+
+static inline unsigned int ohci1394_at_data_get_destination_id(const __le32 *data)
+{
+ return (le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_destinationId_MASK) >> OHCI1394_AT_DATA_Q1_destinationId_SHIFT;
+}
+
+static inline void ohci1394_at_data_set_destination_id(__le32 *data, unsigned int destination_id)
+{
+ data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_destinationId_MASK);
+ data[1] |= cpu_to_le32((destination_id << OHCI1394_AT_DATA_Q1_destinationId_SHIFT) & OHCI1394_AT_DATA_Q1_destinationId_MASK);
+}
+
+static inline u64 ohci1394_at_data_get_destination_offset(const __le32 *data)
+{
+ u64 hi = (u64)((le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK) >> OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT);
+ u64 lo = (u64)le32_to_cpu(data[2]);
+ return (hi << 32) | lo;
+}
+
+static inline void ohci1394_at_data_set_destination_offset(__le32 *data, u64 offset)
+{
+ u32 hi = (u32)(offset >> 32);
+ u32 lo = (u32)(offset & 0x00000000ffffffff);
+ data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK);
+ data[1] |= cpu_to_le32((hi << OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT) & OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK);
+ data[2] = cpu_to_le32(lo);
+}
+
+static inline unsigned int ohci1394_at_data_get_rcode(const __le32 *data)
+{
+ return (le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_rCode_MASK) >> OHCI1394_AT_DATA_Q1_rCode_SHIFT;
+}
+
+static inline void ohci1394_at_data_set_rcode(__le32 *data, unsigned int rcode)
+{
+ data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_rCode_MASK);
+ data[1] |= cpu_to_le32((rcode << OHCI1394_AT_DATA_Q1_rCode_SHIFT) & OHCI1394_AT_DATA_Q1_rCode_MASK);
+}
+
+// Isochronous Transmit DMA.
+//
+// The content of first two quadlets of data for IT DMA is different from the header for IEEE 1394
+// isochronous packet.
+
+#define OHCI1394_IT_DATA_Q0_spd_MASK 0x00070000
+#define OHCI1394_IT_DATA_Q0_spd_SHIFT 16
+#define OHCI1394_IT_DATA_Q0_tag_MASK 0x0000c000
+#define OHCI1394_IT_DATA_Q0_tag_SHIFT 14
+#define OHCI1394_IT_DATA_Q0_chanNum_MASK 0x00003f00
+#define OHCI1394_IT_DATA_Q0_chanNum_SHIFT 8
+#define OHCI1394_IT_DATA_Q0_tcode_MASK 0x000000f0
+#define OHCI1394_IT_DATA_Q0_tcode_SHIFT 4
+#define OHCI1394_IT_DATA_Q0_sy_MASK 0x0000000f
+#define OHCI1394_IT_DATA_Q0_sy_SHIFT 0
+#define OHCI1394_IT_DATA_Q1_dataLength_MASK 0xffff0000
+#define OHCI1394_IT_DATA_Q1_dataLength_SHIFT 16
+
+static inline unsigned int ohci1394_it_data_get_speed(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_spd_MASK) >> OHCI1394_IT_DATA_Q0_spd_SHIFT;
+}
+
+static inline void ohci1394_it_data_set_speed(__le32 *data, unsigned int scode)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_spd_MASK);
+ data[0] |= cpu_to_le32((scode << OHCI1394_IT_DATA_Q0_spd_SHIFT) & OHCI1394_IT_DATA_Q0_spd_MASK);
+}
+
+static inline unsigned int ohci1394_it_data_get_tag(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_tag_MASK) >> OHCI1394_IT_DATA_Q0_tag_SHIFT;
+}
+
+static inline void ohci1394_it_data_set_tag(__le32 *data, unsigned int tag)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_tag_MASK);
+ data[0] |= cpu_to_le32((tag << OHCI1394_IT_DATA_Q0_tag_SHIFT) & OHCI1394_IT_DATA_Q0_tag_MASK);
+}
+
+static inline unsigned int ohci1394_it_data_get_channel(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_chanNum_MASK) >> OHCI1394_IT_DATA_Q0_chanNum_SHIFT;
+}
+
+static inline void ohci1394_it_data_set_channel(__le32 *data, unsigned int channel)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_chanNum_MASK);
+ data[0] |= cpu_to_le32((channel << OHCI1394_IT_DATA_Q0_chanNum_SHIFT) & OHCI1394_IT_DATA_Q0_chanNum_MASK);
+}
+
+static inline unsigned int ohci1394_it_data_get_tcode(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_tcode_MASK) >> OHCI1394_IT_DATA_Q0_tcode_SHIFT;
+}
+
+static inline void ohci1394_it_data_set_tcode(__le32 *data, unsigned int tcode)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_tcode_MASK);
+ data[0] |= cpu_to_le32((tcode << OHCI1394_IT_DATA_Q0_tcode_SHIFT) & OHCI1394_IT_DATA_Q0_tcode_MASK);
+}
+
+static inline unsigned int ohci1394_it_data_get_sync(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_sy_MASK) >> OHCI1394_IT_DATA_Q0_sy_SHIFT;
+}
+
+static inline void ohci1394_it_data_set_sync(__le32 *data, unsigned int sync)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_sy_MASK);
+ data[0] |= cpu_to_le32((sync << OHCI1394_IT_DATA_Q0_sy_SHIFT) & OHCI1394_IT_DATA_Q0_sy_MASK);
+}
+
+static inline unsigned int ohci1394_it_data_get_data_length(const __le32 *data)
+{
+ return (le32_to_cpu(data[1]) & OHCI1394_IT_DATA_Q1_dataLength_MASK) >> OHCI1394_IT_DATA_Q1_dataLength_SHIFT;
+}
+
+static inline void ohci1394_it_data_set_data_length(__le32 *data, unsigned int data_length)
+{
+ data[1] &= cpu_to_le32(~OHCI1394_IT_DATA_Q1_dataLength_MASK);
+ data[1] |= cpu_to_le32((data_length << OHCI1394_IT_DATA_Q1_dataLength_SHIFT) & OHCI1394_IT_DATA_Q1_dataLength_MASK);
+}
// Self-ID DMA.
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 7ba98c7af2e9..4d231bc375e0 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -26,6 +26,7 @@
#include <linux/arm_ffa.h>
#include <linux/bitfield.h>
#include <linux/cpuhotplug.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/hashtable.h>
#include <linux/interrupt.h>
@@ -53,11 +54,8 @@
#define PACK_TARGET_INFO(s, r) \
(FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
-/*
- * Keeping RX TX buffer size as 4K for now
- * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config
- */
-#define RXTX_BUFFER_SIZE SZ_4K
+#define RXTX_MAP_MIN_BUFSZ_MASK GENMASK(1, 0)
+#define RXTX_MAP_MIN_BUFSZ(x) ((x) & RXTX_MAP_MIN_BUFSZ_MASK)
#define FFA_MAX_NOTIFICATIONS 64
@@ -75,6 +73,7 @@ static const int ffa_linux_errmap[] = {
-EAGAIN, /* FFA_RET_RETRY */
-ECANCELED, /* FFA_RET_ABORTED */
-ENODATA, /* FFA_RET_NO_DATA */
+ -EAGAIN, /* FFA_RET_NOT_READY */
};
static inline int ffa_to_linux_errno(int errno)
@@ -97,7 +96,9 @@ struct ffa_drv_info {
struct mutex tx_lock; /* lock to protect Tx buffer */
void *rx_buffer;
void *tx_buffer;
+ size_t rxtx_bufsz;
bool mem_ops_native;
+ bool msg_direct_req2_supp;
bool bitmap_created;
bool notif_enabled;
unsigned int sched_recv_irq;
@@ -211,6 +212,32 @@ static int ffa_rxtx_unmap(u16 vm_id)
return 0;
}
+static int ffa_features(u32 func_feat_id, u32 input_props,
+ u32 *if_props_1, u32 *if_props_2)
+{
+ ffa_value_t id;
+
+ if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) {
+ pr_err("%s: Invalid Parameters: %x, %x", __func__,
+ func_feat_id, input_props);
+ return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS);
+ }
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props,
+ }, &id);
+
+ if (id.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)id.a2);
+
+ if (if_props_1)
+ *if_props_1 = id.a2;
+ if (if_props_2)
+ *if_props_2 = id.a3;
+
+ return 0;
+}
+
#define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0)
/* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
@@ -260,17 +287,75 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
return count;
}
+#define LAST_INDEX_MASK GENMASK(15, 0)
+#define CURRENT_INDEX_MASK GENMASK(31, 16)
+#define UUID_INFO_TAG_MASK GENMASK(47, 32)
+#define PARTITION_INFO_SZ_MASK GENMASK(63, 48)
+#define PARTITION_COUNT(x) ((u16)(FIELD_GET(LAST_INDEX_MASK, (x))) + 1)
+#define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x))))
+#define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x))))
+#define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x))))
+static int
+__ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
+ struct ffa_partition_info *buffer, int num_parts)
+{
+ u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0;
+ ffa_value_t partition_info;
+
+ do {
+ start_idx = prev_idx ? prev_idx + 1 : 0;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_PARTITION_INFO_GET_REGS,
+ .a1 = (u64)uuid1 << 32 | uuid0,
+ .a2 = (u64)uuid3 << 32 | uuid2,
+ .a3 = start_idx | tag << 16,
+ }, &partition_info);
+
+ if (partition_info.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)partition_info.a2);
+
+ if (!count)
+ count = PARTITION_COUNT(partition_info.a2);
+ if (!buffer || !num_parts) /* count only */
+ return count;
+
+ cur_idx = CURRENT_INDEX(partition_info.a2);
+ tag = UUID_INFO_TAG(partition_info.a2);
+ buf_sz = PARTITION_INFO_SZ(partition_info.a2);
+ if (buf_sz > sizeof(*buffer))
+ buf_sz = sizeof(*buffer);
+
+ memcpy(buffer + prev_idx * buf_sz, &partition_info.a3,
+ (cur_idx - start_idx + 1) * buf_sz);
+ prev_idx = cur_idx;
+
+ } while (cur_idx < (count - 1));
+
+ return count;
+}
+
/* buffer is allocated and caller must free the same if returned count > 0 */
static int
ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer)
{
int count;
u32 uuid0_4[4];
+ bool reg_mode = false;
struct ffa_partition_info *pbuf;
+ if (!ffa_features(FFA_PARTITION_INFO_GET_REGS, 0, NULL, NULL))
+ reg_mode = true;
+
export_uuid((u8 *)uuid0_4, uuid);
- count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
- uuid0_4[3], NULL, 0);
+ if (reg_mode)
+ count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1],
+ uuid0_4[2], uuid0_4[3],
+ NULL, 0);
+ else
+ count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1],
+ uuid0_4[2], uuid0_4[3],
+ NULL, 0);
if (count <= 0)
return count;
@@ -278,8 +363,14 @@ ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer)
if (!pbuf)
return -ENOMEM;
- count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
- uuid0_4[3], pbuf, count);
+ if (reg_mode)
+ count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1],
+ uuid0_4[2], uuid0_4[3],
+ pbuf, count);
+ else
+ count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1],
+ uuid0_4[2], uuid0_4[3],
+ pbuf, count);
if (count <= 0)
kfree(pbuf);
else
@@ -305,6 +396,18 @@ static int ffa_id_get(u16 *vm_id)
return 0;
}
+static inline void ffa_msg_send_wait_for_completion(ffa_value_t *ret)
+{
+ while (ret->a0 == FFA_INTERRUPT || ret->a0 == FFA_YIELD) {
+ if (ret->a0 == FFA_YIELD)
+ fsleep(1000);
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_RUN, .a1 = ret->a1,
+ }, ret);
+ }
+}
+
static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
struct ffa_send_direct_data *data)
{
@@ -325,10 +428,7 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
.a6 = data->data3, .a7 = data->data4,
}, &ret);
- while (ret.a0 == FFA_INTERRUPT)
- invoke_ffa_fn((ffa_value_t){
- .a0 = FFA_RUN, .a1 = ret.a1,
- }, &ret);
+ ffa_msg_send_wait_for_completion(&ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
@@ -352,7 +452,7 @@ static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
ffa_value_t ret;
int retval = 0;
- if (sz > (RXTX_BUFFER_SIZE - sizeof(*msg)))
+ if (sz > (drv_info->rxtx_bufsz - sizeof(*msg)))
return -ERANGE;
mutex_lock(&drv_info->tx_lock);
@@ -377,6 +477,32 @@ static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
return retval;
}
+static int ffa_msg_send_direct_req2(u16 src_id, u16 dst_id, const uuid_t *uuid,
+ struct ffa_send_direct_data2 *data)
+{
+ u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
+ ffa_value_t ret, args = {
+ .a0 = FFA_MSG_SEND_DIRECT_REQ2, .a1 = src_dst_ids,
+ };
+
+ export_uuid((u8 *)&args.a2, uuid);
+ memcpy((void *)&args + offsetof(ffa_value_t, a4), data, sizeof(*data));
+
+ invoke_ffa_fn(args, &ret);
+
+ ffa_msg_send_wait_for_completion(&ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ if (ret.a0 == FFA_MSG_SEND_DIRECT_RESP2) {
+ memcpy(data, &ret.a4, sizeof(*data));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
u32 frag_len, u32 len, u64 *handle)
{
@@ -561,9 +687,10 @@ static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
{
int ret;
void *buffer;
+ size_t rxtx_bufsz = drv_info->rxtx_bufsz;
if (!args->use_txbuf) {
- buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
+ buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
} else {
@@ -571,12 +698,12 @@ static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
mutex_lock(&drv_info->tx_lock);
}
- ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args);
+ ret = ffa_setup_and_transmit(func_id, buffer, rxtx_bufsz, args);
if (args->use_txbuf)
mutex_unlock(&drv_info->tx_lock);
else
- free_pages_exact(buffer, RXTX_BUFFER_SIZE);
+ free_pages_exact(buffer, rxtx_bufsz);
return ret < 0 ? ret : 0;
}
@@ -597,32 +724,6 @@ static int ffa_memory_reclaim(u64 g_handle, u32 flags)
return 0;
}
-static int ffa_features(u32 func_feat_id, u32 input_props,
- u32 *if_props_1, u32 *if_props_2)
-{
- ffa_value_t id;
-
- if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) {
- pr_err("%s: Invalid Parameters: %x, %x", __func__,
- func_feat_id, input_props);
- return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS);
- }
-
- invoke_ffa_fn((ffa_value_t){
- .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props,
- }, &id);
-
- if (id.a0 == FFA_ERROR)
- return ffa_to_linux_errno((int)id.a2);
-
- if (if_props_1)
- *if_props_1 = id.a2;
- if (if_props_2)
- *if_props_2 = id.a3;
-
- return 0;
-}
-
static int ffa_notification_bitmap_create(void)
{
ffa_value_t ret;
@@ -858,11 +959,15 @@ static int ffa_run(struct ffa_device *dev, u16 vcpu)
return 0;
}
-static void ffa_set_up_mem_ops_native_flag(void)
+static void ffa_drvinfo_flags_init(void)
{
if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
!ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL))
drv_info->mem_ops_native = true;
+
+ if (!ffa_features(FFA_MSG_SEND_DIRECT_REQ2, 0, NULL, NULL) ||
+ !ffa_features(FFA_MSG_SEND_DIRECT_RESP2, 0, NULL, NULL))
+ drv_info->msg_direct_req2_supp = true;
}
static u32 ffa_api_version_get(void)
@@ -908,6 +1013,16 @@ static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz)
return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz);
}
+static int ffa_sync_send_receive2(struct ffa_device *dev, const uuid_t *uuid,
+ struct ffa_send_direct_data2 *data)
+{
+ if (!drv_info->msg_direct_req2_supp)
+ return -EOPNOTSUPP;
+
+ return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id,
+ uuid, data);
+}
+
static int ffa_memory_share(struct ffa_mem_ops_args *args)
{
if (drv_info->mem_ops_native)
@@ -1191,6 +1306,7 @@ static const struct ffa_msg_ops ffa_drv_msg_ops = {
.mode_32bit_set = ffa_mode_32bit_set,
.sync_send_receive = ffa_sync_send_receive,
.indirect_send = ffa_indirect_msg_send,
+ .sync_send_receive2 = ffa_sync_send_receive2,
};
static const struct ffa_mem_ops ffa_drv_mem_ops = {
@@ -1242,7 +1358,7 @@ ffa_bus_notifier(struct notifier_block *nb, unsigned long action, void *data)
if (action == BUS_NOTIFY_BIND_DRIVER) {
struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
- const struct ffa_device_id *id_table= ffa_drv->id_table;
+ const struct ffa_device_id *id_table = ffa_drv->id_table;
/*
* FF-A v1.1 provides UUID for each partition as part of the
@@ -1327,8 +1443,6 @@ static int ffa_setup_partitions(void)
/* Allocate for the host */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
- pr_err("%s: failed to alloc Host partition ID 0x%x. Abort.\n",
- __func__, drv_info->vm_id);
/* Already registered devices are freed on bus_exit */
ffa_partitions_cleanup();
return -ENOMEM;
@@ -1603,15 +1717,16 @@ cleanup:
static int __init ffa_init(void)
{
int ret;
+ u32 buf_sz;
+ size_t rxtx_bufsz = SZ_4K;
ret = ffa_transport_init(&invoke_ffa_fn);
if (ret)
return ret;
drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL);
- if (!drv_info) {
+ if (!drv_info)
return -ENOMEM;
- }
ret = ffa_version_check(&drv_info->version);
if (ret)
@@ -1623,13 +1738,24 @@ static int __init ffa_init(void)
goto free_drv_info;
}
- drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
+ ret = ffa_features(FFA_FN_NATIVE(RXTX_MAP), 0, &buf_sz, NULL);
+ if (!ret) {
+ if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 1)
+ rxtx_bufsz = SZ_64K;
+ else if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 2)
+ rxtx_bufsz = SZ_16K;
+ else
+ rxtx_bufsz = SZ_4K;
+ }
+
+ drv_info->rxtx_bufsz = rxtx_bufsz;
+ drv_info->rx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL);
if (!drv_info->rx_buffer) {
ret = -ENOMEM;
goto free_pages;
}
- drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
+ drv_info->tx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL);
if (!drv_info->tx_buffer) {
ret = -ENOMEM;
goto free_pages;
@@ -1637,7 +1763,7 @@ static int __init ffa_init(void)
ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer),
virt_to_phys(drv_info->rx_buffer),
- RXTX_BUFFER_SIZE / FFA_PAGE_SIZE);
+ rxtx_bufsz / FFA_PAGE_SIZE);
if (ret) {
pr_err("failed to register FFA RxTx buffers\n");
goto free_pages;
@@ -1646,7 +1772,7 @@ static int __init ffa_init(void)
mutex_init(&drv_info->rx_lock);
mutex_init(&drv_info->tx_lock);
- ffa_set_up_mem_ops_native_flag();
+ ffa_drvinfo_flags_init();
ffa_notifications_setup();
@@ -1667,8 +1793,8 @@ cleanup_notifs:
ffa_notifications_cleanup();
free_pages:
if (drv_info->tx_buffer)
- free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
- free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
+ free_pages_exact(drv_info->tx_buffer, rxtx_bufsz);
+ free_pages_exact(drv_info->rx_buffer, rxtx_bufsz);
free_drv_info:
kfree(drv_info);
return ret;
@@ -1680,8 +1806,8 @@ static void __exit ffa_exit(void)
ffa_notifications_cleanup();
ffa_partitions_cleanup();
ffa_rxtx_unmap(drv_info->vm_id);
- free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
- free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
+ free_pages_exact(drv_info->tx_buffer, drv_info->rxtx_bufsz);
+ free_pages_exact(drv_info->rx_buffer, drv_info->rxtx_bufsz);
kfree(drv_info);
}
module_exit(ffa_exit);
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index aa5842be19b2..dabd874641d0 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -55,116 +55,22 @@ config ARM_SCMI_RAW_MODE_SUPPORT_COEX
operate normally, thing which could make an SCMI test suite using the
SCMI Raw mode support unreliable. If unsure, say N.
-config ARM_SCMI_HAVE_TRANSPORT
- bool
- help
- This declares whether at least one SCMI transport has been configured.
- Used to trigger a build bug when trying to build SCMI without any
- configured transport.
-
-config ARM_SCMI_HAVE_SHMEM
- bool
- help
- This declares whether a shared memory based transport for SCMI is
- available.
-
-config ARM_SCMI_HAVE_MSG
- bool
- help
- This declares whether a message passing based transport for SCMI is
- available.
-
-config ARM_SCMI_TRANSPORT_MAILBOX
- bool "SCMI transport based on Mailbox"
- depends on MAILBOX
- select ARM_SCMI_HAVE_TRANSPORT
- select ARM_SCMI_HAVE_SHMEM
- default y
- help
- Enable mailbox based transport for SCMI.
-
- If you want the ARM SCMI PROTOCOL stack to include support for a
- transport based on mailboxes, answer Y.
-
-config ARM_SCMI_TRANSPORT_OPTEE
- bool "SCMI transport based on OP-TEE service"
- depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL
- select ARM_SCMI_HAVE_TRANSPORT
- select ARM_SCMI_HAVE_SHMEM
- select ARM_SCMI_HAVE_MSG
- default y
- help
- This enables the OP-TEE service based transport for SCMI.
-
- If you want the ARM SCMI PROTOCOL stack to include support for a
- transport based on OP-TEE SCMI service, answer Y.
-
-config ARM_SCMI_TRANSPORT_SMC
- bool "SCMI transport based on SMC"
- depends on HAVE_ARM_SMCCC_DISCOVERY
- select ARM_SCMI_HAVE_TRANSPORT
- select ARM_SCMI_HAVE_SHMEM
- default y
- help
- Enable SMC based transport for SCMI.
-
- If you want the ARM SCMI PROTOCOL stack to include support for a
- transport based on SMC, answer Y.
-
-config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE
- bool "Enable atomic mode support for SCMI SMC transport"
- depends on ARM_SCMI_TRANSPORT_SMC
- help
- Enable support of atomic operation for SCMI SMC based transport.
-
- If you want the SCMI SMC based transport to operate in atomic
- mode, avoiding any kind of sleeping behaviour for selected
- transactions on the TX path, answer Y.
- Enabling atomic mode operations allows any SCMI driver using this
- transport to optionally ask for atomic SCMI transactions and operate
- in atomic context too, at the price of using a number of busy-waiting
- primitives all over instead. If unsure say N.
-
-config ARM_SCMI_TRANSPORT_VIRTIO
- bool "SCMI transport based on VirtIO"
- depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL
- select ARM_SCMI_HAVE_TRANSPORT
- select ARM_SCMI_HAVE_MSG
- help
- This enables the virtio based transport for SCMI.
-
- If you want the ARM SCMI PROTOCOL stack to include support for a
- transport based on VirtIO, answer Y.
-
-config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
- bool "SCMI VirtIO transport Version 1 compliance"
- depends on ARM_SCMI_TRANSPORT_VIRTIO
- default y
- help
- This enforces strict compliance with VirtIO Version 1 specification.
-
- If you want the ARM SCMI VirtIO transport layer to refuse to work
- with Legacy VirtIO backends and instead support only VirtIO Version 1
- devices (or above), answer Y.
-
- If you want instead to support also old Legacy VirtIO backends (like
- the ones implemented by kvmtool) and let the core Kernel VirtIO layer
- take care of the needed conversions, say N.
-
-config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE
- bool "Enable atomic mode for SCMI VirtIO transport"
- depends on ARM_SCMI_TRANSPORT_VIRTIO
+config ARM_SCMI_DEBUG_COUNTERS
+ bool "Enable SCMI communication debug metrics tracking"
+ select ARM_SCMI_NEED_DEBUGFS
+ depends on DEBUG_FS
+ default n
help
- Enable support of atomic operation for SCMI VirtIO based transport.
+ Enables tracking of some key communication metrics for debug
+ purposes. It may track metrics like how many messages were sent
+ or received, were there any failures, what kind of failures, ..etc.
- If you want the SCMI VirtIO based transport to operate in atomic
- mode, avoiding any kind of sleeping behaviour for selected
- transactions on the TX path, answer Y.
+ Enable this option to create a new debugfs directory which contains
+ such useful debug counters. This can be helpful for debugging and
+ SCMI monitoring.
- Enabling atomic mode operations allows any SCMI driver using this
- transport to optionally ask for atomic SCMI transactions and operate
- in atomic context too, at the price of using a number of busy-waiting
- primitives all over instead. If unsure say N.
+source "drivers/firmware/arm_scmi/transports/Kconfig"
+source "drivers/firmware/arm_scmi/vendors/imx/Kconfig"
endif #ARM_SCMI_PROTOCOL
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index fd59f58ce8a2..9ac81adff567 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -5,23 +5,15 @@ scmi-core-objs := $(scmi-bus-y)
scmi-driver-y = driver.o notify.o
scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o
-scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o
-scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
-scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
-scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
scmi-protocols-y := base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o
scmi-protocols-y += pinctrl.o
scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y)
+obj-$(CONFIG_ARM_SCMI_PROTOCOL) += transports/
+obj-$(CONFIG_ARM_SCMI_PROTOCOL) += vendors/imx/
+
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o
-
-ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)
-# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame
-# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling
-# hooks are inserted via the -pg switch.
-CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE)
-endif
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index 97254de35ab0..86b376c50a13 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -14,7 +14,7 @@
#include "notify.h"
/* Updated only after ALL the mandatory features for that version are merged */
-#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20001
#define SCMI_BASE_NUM_SOURCES 1
#define SCMI_BASE_MAX_CMD_ERR_COUNT 1024
@@ -42,7 +42,6 @@ struct scmi_msg_resp_base_discover_agent {
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
-
struct scmi_msg_base_error_notify {
__le32 event_control;
#define BASE_TP_NOTIFY_ALL BIT(0)
@@ -105,7 +104,6 @@ scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor)
struct scmi_xfer *t;
struct scmi_revision_info *rev = ph->get_priv(ph);
-
if (sub_vendor) {
cmd = BASE_DISCOVER_SUB_VENDOR;
vendor_id = rev->sub_vendor_id;
@@ -386,7 +384,7 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph)
if (ret)
return ret;
- rev->major_ver = PROTOCOL_REV_MAJOR(version),
+ rev->major_ver = PROTOCOL_REV_MAJOR(version);
rev->minor_ver = PROTOCOL_REV_MINOR(version);
ph->set_priv(ph, rev, version);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 134019297d08..2ed2279388f0 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -365,6 +365,7 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u32 latency = 0;
+
attributes = le32_to_cpu(attr->attributes);
strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
/* clock_enable_latency field is present only since SCMI v3.1 */
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 4b8c5250cdb5..6d9227db473f 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -4,7 +4,7 @@
* driver common header file containing some definitions, structures
* and function prototypes used in all the different SCMI protocols.
*
- * Copyright (C) 2018-2022 ARM Ltd.
+ * Copyright (C) 2018-2024 ARM Ltd.
*/
#ifndef _SCMI_COMMON_H
#define _SCMI_COMMON_H
@@ -183,7 +183,6 @@ struct scmi_chan_info {
/**
* struct scmi_transport_ops - Structure representing a SCMI transport ops
*
- * @link_supplier: Optional callback to add link to a supplier device
* @chan_available: Callback to check if channel is available or not
* @chan_setup: Callback to allocate and setup a channel
* @chan_free: Callback to free a channel
@@ -198,7 +197,6 @@ struct scmi_chan_info {
* @poll_done: Callback to poll transfer status
*/
struct scmi_transport_ops {
- int (*link_supplier)(struct device *dev);
bool (*chan_available)(struct device_node *of_node, int idx);
int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
bool tx);
@@ -219,12 +217,6 @@ struct scmi_transport_ops {
/**
* struct scmi_desc - Description of SoC integration
*
- * @transport_init: An optional function that a transport can provide to
- * initialize some transport-specific setup during SCMI core
- * initialization, so ahead of SCMI core probing.
- * @transport_exit: An optional function that a transport can provide to
- * de-initialize some transport-specific setup during SCMI core
- * de-initialization, so after SCMI core removal.
* @ops: Pointer to the transport specific ops structure
* @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
* @max_msg: Maximum number of messages for a channel type (tx or rx) that can
@@ -245,8 +237,6 @@ struct scmi_transport_ops {
* when requested.
*/
struct scmi_desc {
- int (*transport_init)(void);
- void (*transport_exit)(void);
const struct scmi_transport_ops *ops;
int max_rx_timeout_ms;
int max_msg;
@@ -286,20 +276,30 @@ int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer,
unsigned int timeout_ms);
-#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
-extern const struct scmi_desc scmi_mailbox_desc;
-#endif
-#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
-extern const struct scmi_desc scmi_smc_desc;
-#endif
-#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
-extern const struct scmi_desc scmi_virtio_desc;
-#endif
-#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
-extern const struct scmi_desc scmi_optee_desc;
-#endif
-
-void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
+
+enum debug_counters {
+ SENT_OK,
+ SENT_FAIL,
+ SENT_FAIL_POLLING_UNSUPPORTED,
+ SENT_FAIL_CHANNEL_NOT_FOUND,
+ RESPONSE_OK,
+ NOTIFICATION_OK,
+ DELAYED_RESPONSE_OK,
+ XFERS_RESPONSE_TIMEOUT,
+ XFERS_RESPONSE_POLLED_TIMEOUT,
+ RESPONSE_POLLED_OK,
+ ERR_MSG_UNEXPECTED,
+ ERR_MSG_INVALID,
+ ERR_MSG_NOMEM,
+ ERR_PROTOCOL,
+ SCMI_DEBUG_COUNTERS_LAST
+};
+
+static inline void scmi_inc_count(atomic_t *arr, int stat)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
+ atomic_inc(&arr[stat]);
+}
enum scmi_bad_msg {
MSG_UNEXPECTED = -1,
@@ -309,24 +309,44 @@ enum scmi_bad_msg {
MSG_MBOX_SPURIOUS = -5,
};
-void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr,
- enum scmi_bad_msg err);
-
/* shmem related declarations */
struct scmi_shared_mem;
-void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
- struct scmi_xfer *xfer, struct scmi_chan_info *cinfo);
-u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
-void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+/**
+ * struct scmi_shared_mem_operations - Transport core operations for
+ * Shared Memory
+ *
+ * @tx_prepare: Prepare the @xfer message for transmission on the chosen @shmem
+ * @read_header: Read header of the message currently hold in @shmem
+ * @fetch_response: Copy the message response from @shmem into @xfer
+ * @fetch_notification: Copy the message notification from @shmem into @xfer
+ * @clear_channel: Clear the @shmem channel busy flag
+ * @poll_done: Check if poll has completed for @xfer on @shmem
+ * @channel_free: Check if @shmem channel is marked as free
+ * @channel_intr_enabled: Check is @shmem channel has requested a completion irq
+ * @setup_iomap: Setup IO shared memory for channel @cinfo
+ */
+struct scmi_shared_mem_operations {
+ void (*tx_prepare)(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer,
+ struct scmi_chan_info *cinfo);
+ u32 (*read_header)(struct scmi_shared_mem __iomem *shmem);
+
+ void (*fetch_response)(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
+ void (*fetch_notification)(struct scmi_shared_mem __iomem *shmem,
+ size_t max_len, struct scmi_xfer *xfer);
+ void (*clear_channel)(struct scmi_shared_mem __iomem *shmem);
+ bool (*poll_done)(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
-void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
- size_t max_len, struct scmi_xfer *xfer);
-void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
-bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
- struct scmi_xfer *xfer);
-bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem);
-bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem);
+ bool (*channel_free)(struct scmi_shared_mem __iomem *shmem);
+ bool (*channel_intr_enabled)(struct scmi_shared_mem __iomem *shmem);
+ void __iomem *(*setup_iomap)(struct scmi_chan_info *cinfo,
+ struct device *dev,
+ bool tx, struct resource *res);
+};
+
+const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void);
/* declarations for message passing transports */
struct scmi_msg_payld;
@@ -334,14 +354,108 @@ struct scmi_msg_payld;
/* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */
#define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32))
-size_t msg_response_size(struct scmi_xfer *xfer);
-size_t msg_command_size(struct scmi_xfer *xfer);
-void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer);
-u32 msg_read_header(struct scmi_msg_payld *msg);
-void msg_fetch_response(struct scmi_msg_payld *msg, size_t len,
- struct scmi_xfer *xfer);
-void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len,
- size_t max_len, struct scmi_xfer *xfer);
+/**
+ * struct scmi_message_operations - Transport core operations for Message
+ *
+ * @response_size: Get calculated response size for @xfer
+ * @command_size: Get calculated command size for @xfer
+ * @tx_prepare: Prepare the @xfer message for transmission on the provided @msg
+ * @read_header: Read header of the message currently hold in @msg
+ * @fetch_response: Copy the message response from @msg into @xfer
+ * @fetch_notification: Copy the message notification from @msg into @xfer
+ */
+struct scmi_message_operations {
+ size_t (*response_size)(struct scmi_xfer *xfer);
+ size_t (*command_size)(struct scmi_xfer *xfer);
+ void (*tx_prepare)(struct scmi_msg_payld *msg, struct scmi_xfer *xfer);
+ u32 (*read_header)(struct scmi_msg_payld *msg);
+ void (*fetch_response)(struct scmi_msg_payld *msg, size_t len,
+ struct scmi_xfer *xfer);
+ void (*fetch_notification)(struct scmi_msg_payld *msg, size_t len,
+ size_t max_len, struct scmi_xfer *xfer);
+};
+
+const struct scmi_message_operations *scmi_message_operations_get(void);
+
+/**
+ * struct scmi_transport_core_operations - Transpoert core operations
+ *
+ * @bad_message_trace: An helper to report a malformed/unexpected message
+ * @rx_callback: Callback to report received messages
+ * @shmem: Datagram operations for shared memory based transports
+ * @msg: Datagram operations for message based transports
+ */
+struct scmi_transport_core_operations {
+ void (*bad_message_trace)(struct scmi_chan_info *cinfo,
+ u32 msg_hdr, enum scmi_bad_msg err);
+ void (*rx_callback)(struct scmi_chan_info *cinfo, u32 msg_hdr,
+ void *priv);
+ const struct scmi_shared_mem_operations *shmem;
+ const struct scmi_message_operations *msg;
+};
+
+/**
+ * struct scmi_transport - A structure representing a configured transport
+ *
+ * @supplier: Device representing the transport and acting as a supplier for
+ * the core SCMI stack
+ * @desc: Transport descriptor
+ * @core_ops: A pointer to a pointer used by the core SCMI stack to make the
+ * core transport operations accessible to the transports.
+ */
+struct scmi_transport {
+ struct device *supplier;
+ struct scmi_desc *desc;
+ struct scmi_transport_core_operations **core_ops;
+};
+
+#define DEFINE_SCMI_TRANSPORT_DRIVER(__tag, __drv, __desc, __match, __core_ops)\
+static void __tag##_dev_free(void *data) \
+{ \
+ struct platform_device *spdev = data; \
+ \
+ platform_device_unregister(spdev); \
+} \
+ \
+static int __tag##_probe(struct platform_device *pdev) \
+{ \
+ struct device *dev = &pdev->dev; \
+ struct platform_device *spdev; \
+ struct scmi_transport strans; \
+ int ret; \
+ \
+ spdev = platform_device_alloc("arm-scmi", PLATFORM_DEVID_AUTO); \
+ if (!spdev) \
+ return -ENOMEM; \
+ \
+ device_set_of_node_from_dev(&spdev->dev, dev); \
+ \
+ strans.supplier = dev; \
+ strans.desc = &(__desc); \
+ strans.core_ops = &(__core_ops); \
+ \
+ ret = platform_device_add_data(spdev, &strans, sizeof(strans)); \
+ if (ret) \
+ goto err; \
+ \
+ ret = platform_device_add(spdev); \
+ if (ret) \
+ goto err; \
+ \
+ return devm_add_action_or_reset(dev, __tag##_dev_free, spdev); \
+ \
+err: \
+ platform_device_put(spdev); \
+ return ret; \
+} \
+ \
+static struct platform_driver __drv = { \
+ .driver = { \
+ .name = #__tag "_transport", \
+ .of_match_table = __match, \
+ }, \
+ .probe = __tag##_probe, \
+}
void scmi_notification_instance_data_set(const struct scmi_handle *handle,
void *priv);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 6b6957f4743f..69c15135371c 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -11,7 +11,7 @@
* various power domain DVFS including the core/cluster, certain system
* clocks configuration, thermal sensors and many others.
*
- * Copyright (C) 2018-2021 ARM Ltd.
+ * Copyright (C) 2018-2024 ARM Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -117,12 +117,14 @@ struct scmi_protocol_instance {
* @name: Name of this SCMI instance
* @type: Type of this SCMI instance
* @is_atomic: Flag to state if the transport of this instance is atomic
+ * @counters: An array of atomic_c's used for tracking statistics (if enabled)
*/
struct scmi_debug_info {
struct dentry *top_dentry;
const char *name;
const char *type;
bool is_atomic;
+ atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
};
/**
@@ -194,6 +196,16 @@ struct scmi_info {
#define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
+static void scmi_rx_callback(struct scmi_chan_info *cinfo,
+ u32 msg_hdr, void *priv);
+static void scmi_bad_message_trace(struct scmi_chan_info *cinfo,
+ u32 msg_hdr, enum scmi_bad_msg err);
+
+static struct scmi_transport_core_operations scmi_trans_core_ops = {
+ .bad_message_trace = scmi_bad_message_trace,
+ .rx_callback = scmi_rx_callback,
+};
+
static unsigned long
scmi_vendor_protocol_signature(unsigned int protocol_id, char *vendor_id,
char *sub_vendor_id, u32 impl_ver)
@@ -833,8 +845,8 @@ scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
* timed-out message that arrives and as such, can be traced only referring to
* the header content, since the payload is missing/unreliable.
*/
-void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr,
- enum scmi_bad_msg err)
+static void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr,
+ enum scmi_bad_msg err)
{
char *tag;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
@@ -988,6 +1000,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
+ scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED);
return xfer;
}
@@ -1015,6 +1028,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
msg_type, xfer_id, msg_hdr, xfer->state);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
+ scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID);
/* On error the refcount incremented above has to be dropped */
__scmi_xfer_put(minfo, xfer);
@@ -1054,6 +1068,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
PTR_ERR(xfer));
scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
+ scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM);
scmi_clear_channel(info, cinfo);
return;
@@ -1069,6 +1084,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "NOTI", xfer->hdr.seq,
xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
+ scmi_inc_count(info->dbg->counters, NOTIFICATION_OK);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -1128,8 +1144,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
scmi_clear_channel(info, cinfo);
complete(xfer->async_done);
+ scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK);
} else {
complete(&xfer->done);
+ scmi_inc_count(info->dbg->counters, RESPONSE_OK);
}
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
@@ -1160,7 +1178,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
* NOTE: This function will be invoked in IRQ context, hence should be
* as optimal as possible.
*/
-void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
+static void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr,
+ void *priv)
{
u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
@@ -1213,6 +1232,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
struct scmi_xfer *xfer, unsigned int timeout_ms)
{
int ret = 0;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
if (xfer->hdr.poll_completion) {
/*
@@ -1233,13 +1253,12 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
+ scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT);
}
}
if (!ret) {
unsigned long flags;
- struct scmi_info *info =
- handle_to_scmi_info(cinfo->handle);
/*
* Do not fetch_response if an out-of-order delayed
@@ -1259,11 +1278,9 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"RESP" : "resp",
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
+ scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK);
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
- struct scmi_info *info =
- handle_to_scmi_info(cinfo->handle);
-
scmi_raw_message_report(info->raw, xfer,
SCMI_RAW_REPLY_QUEUE,
cinfo->id);
@@ -1276,6 +1293,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
dev_err(dev, "timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
+ scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT);
}
}
@@ -1359,13 +1377,15 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
!is_transport_polling_capable(info->desc)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
+ scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED);
return -EINVAL;
}
cinfo = idr_find(&info->tx_idr, pi->proto->id);
- if (unlikely(!cinfo))
+ if (unlikely(!cinfo)) {
+ scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND);
return -EINVAL;
-
+ }
/* True ONLY if also supported by transport. */
if (is_polling_enabled(cinfo, info->desc))
xfer->hdr.poll_completion = true;
@@ -1397,16 +1417,20 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
ret = info->desc->ops->send_message(cinfo, xfer);
if (ret < 0) {
dev_dbg(dev, "Failed to send message %d\n", ret);
+ scmi_inc_count(info->dbg->counters, SENT_FAIL);
return ret;
}
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "CMND", xfer->hdr.seq,
xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
+ scmi_inc_count(info->dbg->counters, SENT_OK);
ret = scmi_wait_for_message_response(cinfo, xfer);
- if (!ret && xfer->hdr.status)
+ if (!ret && xfer->hdr.status) {
ret = scmi_to_linux_errno(xfer->hdr.status);
+ scmi_inc_count(info->dbg->counters, ERR_PROTOCOL);
+ }
if (info->desc->ops->mark_txdone)
info->desc->ops->mark_txdone(cinfo, ret, xfer);
@@ -2708,14 +2732,14 @@ scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
static int scmi_channels_setup(struct scmi_info *info)
{
int ret;
- struct device_node *child, *top_np = info->dev->of_node;
+ struct device_node *top_np = info->dev->of_node;
/* Initialize a common generic channel at first */
ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
if (ret)
return ret;
- for_each_available_child_of_node(top_np, child) {
+ for_each_available_child_of_node_scoped(top_np, child) {
u32 prot_id;
if (of_property_read_u32(child, "reg", &prot_id))
@@ -2726,10 +2750,8 @@ static int scmi_channels_setup(struct scmi_info *info)
"Out of range protocol %d\n", prot_id);
ret = scmi_txrx_setup(info, child, prot_id);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
}
return 0;
@@ -2833,6 +2855,56 @@ static int scmi_device_request_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
+static const char * const dbg_counter_strs[] = {
+ "sent_ok",
+ "sent_fail",
+ "sent_fail_polling_unsupported",
+ "sent_fail_channel_not_found",
+ "response_ok",
+ "notification_ok",
+ "delayed_response_ok",
+ "xfers_response_timeout",
+ "xfers_response_polled_timeout",
+ "response_polled_ok",
+ "err_msg_unexpected",
+ "err_msg_invalid",
+ "err_msg_nomem",
+ "err_protocol",
+};
+
+static ssize_t reset_all_on_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct scmi_debug_info *dbg = filp->private_data;
+
+ for (int i = 0; i < SCMI_DEBUG_COUNTERS_LAST; i++)
+ atomic_set(&dbg->counters[i], 0);
+
+ return count;
+}
+
+static const struct file_operations fops_reset_counts = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .llseek = no_llseek,
+ .write = reset_all_on_write,
+};
+
+static void scmi_debugfs_counters_setup(struct scmi_debug_info *dbg,
+ struct dentry *trans)
+{
+ struct dentry *counters;
+ int idx;
+
+ counters = debugfs_create_dir("counters", trans);
+
+ for (idx = 0; idx < SCMI_DEBUG_COUNTERS_LAST; idx++)
+ debugfs_create_atomic_t(dbg_counter_strs[idx], 0600, counters,
+ &dbg->counters[idx]);
+
+ debugfs_create_file("reset", 0200, counters, dbg, &fops_reset_counts);
+}
+
static void scmi_debugfs_common_cleanup(void *d)
{
struct scmi_debug_info *dbg = d;
@@ -2899,6 +2971,9 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
debugfs_create_u32("rx_max_msg", 0400, trans,
(u32 *)&info->rx_minfo.max_msg);
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
+ scmi_debugfs_counters_setup(dbg, trans);
+
dbg->top_dentry = top_dentry;
if (devm_add_action_or_reset(info->dev,
@@ -2950,6 +3025,37 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
return ret;
}
+static const struct scmi_desc *scmi_transport_setup(struct device *dev)
+{
+ struct scmi_transport *trans;
+ int ret;
+
+ trans = dev_get_platdata(dev);
+ if (!trans || !trans->desc || !trans->supplier || !trans->core_ops)
+ return NULL;
+
+ if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) {
+ dev_err(dev,
+ "Adding link to supplier transport device failed\n");
+ return NULL;
+ }
+
+ /* Provide core transport ops */
+ *trans->core_ops = &scmi_trans_core_ops;
+
+ dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
+
+ ret = of_property_read_u32(dev->of_node, "max-rx-timeout-ms",
+ &trans->desc->max_rx_timeout_ms);
+ if (ret && ret != -EINVAL)
+ dev_err(dev, "Malformed max-rx-timeout-ms DT property.\n");
+
+ dev_info(dev, "SCMI max-rx-timeout: %dms\n",
+ trans->desc->max_rx_timeout_ms);
+
+ return trans->desc;
+}
+
static int scmi_probe(struct platform_device *pdev)
{
int ret;
@@ -2961,9 +3067,12 @@ static int scmi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
- desc = of_device_get_match_data(dev);
- if (!desc)
- return -EINVAL;
+ desc = scmi_transport_setup(dev);
+ if (!desc) {
+ err_str = "transport invalid\n";
+ ret = -EINVAL;
+ goto out_err;
+ }
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -3002,14 +3111,6 @@ static int scmi_probe(struct platform_device *pdev)
info->atomic_threshold);
handle->is_transport_atomic = scmi_is_transport_atomic;
- if (desc->ops->link_supplier) {
- ret = desc->ops->link_supplier(dev);
- if (ret) {
- err_str = "transport not ready\n";
- goto clear_ida;
- }
- }
-
/* Setup all channels described in the DT at first */
ret = scmi_channels_setup(info);
if (ret) {
@@ -3130,6 +3231,7 @@ clear_txrx_setup:
clear_ida:
ida_free(&scmi_id, info->id);
+out_err:
return dev_err_probe(dev, ret, "%s", err_str);
}
@@ -3215,86 +3317,16 @@ static struct attribute *versions_attrs[] = {
};
ATTRIBUTE_GROUPS(versions);
-/* Each compatible listed below must have descriptor associated with it */
-static const struct of_device_id scmi_of_match[] = {
-#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
- { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
-#endif
-#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
- { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
-#endif
-#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
- { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
- { .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc},
- { .compatible = "qcom,scmi-smc", .data = &scmi_smc_desc},
-#endif
-#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
- { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
-#endif
- { /* Sentinel */ },
-};
-
-MODULE_DEVICE_TABLE(of, scmi_of_match);
-
static struct platform_driver scmi_driver = {
.driver = {
.name = "arm-scmi",
.suppress_bind_attrs = true,
- .of_match_table = scmi_of_match,
.dev_groups = versions_groups,
},
.probe = scmi_probe,
.remove_new = scmi_remove,
};
-/**
- * __scmi_transports_setup - Common helper to call transport-specific
- * .init/.exit code if provided.
- *
- * @init: A flag to distinguish between init and exit.
- *
- * Note that, if provided, we invoke .init/.exit functions for all the
- * transports currently compiled in.
- *
- * Return: 0 on Success.
- */
-static inline int __scmi_transports_setup(bool init)
-{
- int ret = 0;
- const struct of_device_id *trans;
-
- for (trans = scmi_of_match; trans->data; trans++) {
- const struct scmi_desc *tdesc = trans->data;
-
- if ((init && !tdesc->transport_init) ||
- (!init && !tdesc->transport_exit))
- continue;
-
- if (init)
- ret = tdesc->transport_init();
- else
- tdesc->transport_exit();
-
- if (ret) {
- pr_err("SCMI transport %s FAILED initialization!\n",
- trans->compatible);
- break;
- }
- }
-
- return ret;
-}
-
-static int __init scmi_transports_init(void)
-{
- return __scmi_transports_setup(true);
-}
-
-static void __exit scmi_transports_exit(void)
-{
- __scmi_transports_setup(false);
-}
-
static struct dentry *scmi_debugfs_init(void)
{
struct dentry *d;
@@ -3310,16 +3342,15 @@ static struct dentry *scmi_debugfs_init(void)
static int __init scmi_driver_init(void)
{
- int ret;
-
/* Bail out if no SCMI transport was configured */
if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
return -EINVAL;
- /* Initialize any compiled-in transport which provided an init/exit */
- ret = scmi_transports_init();
- if (ret)
- return ret;
+ if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_SHMEM))
+ scmi_trans_core_ops.shmem = scmi_shared_mem_operations_get();
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_MSG))
+ scmi_trans_core_ops.msg = scmi_message_operations_get();
if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
scmi_top_dentry = scmi_debugfs_init();
@@ -3354,8 +3385,6 @@ static void __exit scmi_driver_exit(void)
scmi_powercap_unregister();
scmi_pinctrl_unregister();
- scmi_transports_exit();
-
platform_driver_unregister(&scmi_driver);
debugfs_remove_recursive(scmi_top_dentry);
diff --git a/drivers/firmware/arm_scmi/msg.c b/drivers/firmware/arm_scmi/msg.c
index d33a704e5814..2cc74e6bbd72 100644
--- a/drivers/firmware/arm_scmi/msg.c
+++ b/drivers/firmware/arm_scmi/msg.c
@@ -4,7 +4,7 @@
*
* Derived from shm.c.
*
- * Copyright (C) 2019-2021 ARM Ltd.
+ * Copyright (C) 2019-2024 ARM Ltd.
* Copyright (C) 2020-2021 OpenSynergy GmbH
*/
@@ -30,7 +30,7 @@ struct scmi_msg_payld {
*
* Return: transport SDU size.
*/
-size_t msg_command_size(struct scmi_xfer *xfer)
+static size_t msg_command_size(struct scmi_xfer *xfer)
{
return sizeof(struct scmi_msg_payld) + xfer->tx.len;
}
@@ -42,7 +42,7 @@ size_t msg_command_size(struct scmi_xfer *xfer)
*
* Return: transport SDU size.
*/
-size_t msg_response_size(struct scmi_xfer *xfer)
+static size_t msg_response_size(struct scmi_xfer *xfer)
{
return sizeof(struct scmi_msg_payld) + sizeof(__le32) + xfer->rx.len;
}
@@ -53,7 +53,7 @@ size_t msg_response_size(struct scmi_xfer *xfer)
* @msg: transport SDU for command
* @xfer: message which is being sent
*/
-void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer)
+static void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer)
{
msg->msg_header = cpu_to_le32(pack_scmi_header(&xfer->hdr));
if (xfer->tx.buf)
@@ -67,7 +67,7 @@ void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer)
*
* Return: SCMI header
*/
-u32 msg_read_header(struct scmi_msg_payld *msg)
+static u32 msg_read_header(struct scmi_msg_payld *msg)
{
return le32_to_cpu(msg->msg_header);
}
@@ -79,8 +79,8 @@ u32 msg_read_header(struct scmi_msg_payld *msg)
* @len: transport SDU size
* @xfer: message being responded to
*/
-void msg_fetch_response(struct scmi_msg_payld *msg, size_t len,
- struct scmi_xfer *xfer)
+static void msg_fetch_response(struct scmi_msg_payld *msg,
+ size_t len, struct scmi_xfer *xfer)
{
size_t prefix_len = sizeof(*msg) + sizeof(msg->msg_payload[0]);
@@ -100,8 +100,8 @@ void msg_fetch_response(struct scmi_msg_payld *msg, size_t len,
* @max_len: maximum SCMI payload size to fetch
* @xfer: notification message
*/
-void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len,
- size_t max_len, struct scmi_xfer *xfer)
+static void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len,
+ size_t max_len, struct scmi_xfer *xfer)
{
xfer->rx.len = min_t(size_t, max_len,
len >= sizeof(*msg) ? len - sizeof(*msg) : 0);
@@ -109,3 +109,17 @@ void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len,
/* Take a copy to the rx buffer.. */
memcpy(xfer->rx.buf, msg->msg_payload, xfer->rx.len);
}
+
+static const struct scmi_message_operations scmi_msg_ops = {
+ .tx_prepare = msg_tx_prepare,
+ .command_size = msg_command_size,
+ .response_size = msg_response_size,
+ .read_header = msg_read_header,
+ .fetch_response = msg_fetch_response,
+ .fetch_notification = msg_fetch_notification,
+};
+
+const struct scmi_message_operations *scmi_message_operations_get(void)
+{
+ return &scmi_msg_ops;
+}
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 4b7f1cbb9b04..2d77b5f40ca7 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -310,7 +310,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
}
if (!dom_info->mult_factor)
dev_warn(ph->dev,
- "Wrong sustained perf/frequency(domain %d)\n",
+ "Wrong sustained perf/frequency(domain %d)\n",
dom_info->id);
strscpy(dom_info->info.name, attr->name,
diff --git a/drivers/firmware/arm_scmi/pinctrl.c b/drivers/firmware/arm_scmi/pinctrl.c
index a2a7f880d6a3..3855c98caf06 100644
--- a/drivers/firmware/arm_scmi/pinctrl.c
+++ b/drivers/firmware/arm_scmi/pinctrl.c
@@ -913,4 +913,5 @@ static const struct scmi_protocol scmi_pinctrl = {
.ops = &pinctrl_proto_ops,
.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
};
+
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(pinctrl, scmi_pinctrl)
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 49666bd1d8ac..59aa16444c64 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -14,7 +14,7 @@
#include "notify.h"
/* Updated only after ALL the mandatory features for that version are merged */
-#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30001
enum scmi_power_protocol_cmd {
POWER_DOMAIN_ATTRIBUTES = 0x3,
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index 1b318316535e..0aa82b96f41b 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -14,7 +14,7 @@
#include "notify.h"
/* Updated only after ALL the mandatory features for that version are merged */
-#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30001
enum scmi_reset_protocol_cmd {
RESET_DOMAIN_ATTRIBUTES = 0x3,
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 7fc5535ca34c..791efd0f82d7 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -15,7 +15,7 @@
#include "notify.h"
/* Updated only after ALL the mandatory features for that version are merged */
-#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30001
#define SCMI_MAX_NUM_SENSOR_AXIS 63
#define SCMIv2_SENSOR_PROTOCOL 0x10000
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
index b74e5a740f2c..01d8a9398fe8 100644
--- a/drivers/firmware/arm_scmi/shmem.c
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -2,11 +2,13 @@
/*
* For transport using shared mem structure.
*
- * Copyright (C) 2019 ARM Ltd.
+ * Copyright (C) 2019-2024 ARM Ltd.
*/
#include <linux/ktime.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/processor.h>
#include <linux/types.h>
@@ -32,8 +34,9 @@ struct scmi_shared_mem {
u8 msg_payload[];
};
-void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
- struct scmi_xfer *xfer, struct scmi_chan_info *cinfo)
+static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer,
+ struct scmi_chan_info *cinfo)
{
ktime_t stop;
@@ -73,13 +76,13 @@ void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
}
-u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
+static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
{
return ioread32(&shmem->msg_header);
}
-void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
- struct scmi_xfer *xfer)
+static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
{
size_t len = ioread32(&shmem->length);
@@ -91,8 +94,8 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
}
-void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
- size_t max_len, struct scmi_xfer *xfer)
+static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ size_t max_len, struct scmi_xfer *xfer)
{
size_t len = ioread32(&shmem->length);
@@ -103,13 +106,13 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
}
-void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
+static void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
{
iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
}
-bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
- struct scmi_xfer *xfer)
+static bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
{
u16 xfer_id;
@@ -123,13 +126,69 @@ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
}
-bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
+static bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
{
return (ioread32(&shmem->channel_status) &
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
}
-bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem)
+static bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem)
{
return ioread32(&shmem->flags) & SCMI_SHMEM_FLAG_INTR_ENABLED;
}
+
+static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo,
+ struct device *dev, bool tx,
+ struct resource *res)
+{
+ struct device_node *shmem __free(device_node);
+ const char *desc = tx ? "Tx" : "Rx";
+ int ret, idx = tx ? 0 : 1;
+ struct device *cdev = cinfo->dev;
+ struct resource lres = {};
+ resource_size_t size;
+ void __iomem *addr;
+
+ shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
+ if (!shmem)
+ return IOMEM_ERR_PTR(-ENODEV);
+
+ if (!of_device_is_compatible(shmem, "arm,scmi-shmem"))
+ return IOMEM_ERR_PTR(-ENXIO);
+
+ /* Use a local on-stack as a working area when not provided */
+ if (!res)
+ res = &lres;
+
+ ret = of_address_to_resource(shmem, 0, res);
+ if (ret) {
+ dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
+ return IOMEM_ERR_PTR(ret);
+ }
+
+ size = resource_size(res);
+ addr = devm_ioremap(dev, res->start, size);
+ if (!addr) {
+ dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
+ return IOMEM_ERR_PTR(-EADDRNOTAVAIL);
+ }
+
+ return addr;
+}
+
+static const struct scmi_shared_mem_operations scmi_shmem_ops = {
+ .tx_prepare = shmem_tx_prepare,
+ .read_header = shmem_read_header,
+ .fetch_response = shmem_fetch_response,
+ .fetch_notification = shmem_fetch_notification,
+ .clear_channel = shmem_clear_channel,
+ .poll_done = shmem_poll_done,
+ .channel_free = shmem_channel_free,
+ .channel_intr_enabled = shmem_channel_intr_enabled,
+ .setup_iomap = shmem_setup_iomap,
+};
+
+const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void)
+{
+ return &scmi_shmem_ops;
+}
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
index b6358c155f7f..ec3d355d1772 100644
--- a/drivers/firmware/arm_scmi/system.c
+++ b/drivers/firmware/arm_scmi/system.c
@@ -14,7 +14,7 @@
#include "notify.h"
/* Updated only after ALL the mandatory features for that version are merged */
-#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20001
#define SCMI_SYSTEM_NUM_SOURCES 1
diff --git a/drivers/firmware/arm_scmi/transports/Kconfig b/drivers/firmware/arm_scmi/transports/Kconfig
new file mode 100644
index 000000000000..57eccf316e26
--- /dev/null
+++ b/drivers/firmware/arm_scmi/transports/Kconfig
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "SCMI Transport Drivers"
+
+config ARM_SCMI_HAVE_TRANSPORT
+ bool
+ help
+ This declares whether at least one SCMI transport has been configured.
+ Used to trigger a build bug when trying to build SCMI without any
+ configured transport.
+
+config ARM_SCMI_HAVE_SHMEM
+ bool
+ help
+ This declares whether a shared memory based transport for SCMI is
+ available.
+
+config ARM_SCMI_HAVE_MSG
+ bool
+ help
+ This declares whether a message passing based transport for SCMI is
+ available.
+
+config ARM_SCMI_TRANSPORT_MAILBOX
+ tristate "SCMI transport based on Mailbox"
+ depends on MAILBOX
+ select ARM_SCMI_HAVE_TRANSPORT
+ select ARM_SCMI_HAVE_SHMEM
+ default y
+ help
+ Enable mailbox based transport for SCMI.
+
+ If you want the ARM SCMI PROTOCOL stack to include support for a
+ transport based on mailboxes, answer Y.
+ This driver can also be built as a module. If so, the module
+ will be called scmi_transport_mailbox.
+
+config ARM_SCMI_TRANSPORT_SMC
+ tristate "SCMI transport based on SMC"
+ depends on HAVE_ARM_SMCCC_DISCOVERY
+ select ARM_SCMI_HAVE_TRANSPORT
+ select ARM_SCMI_HAVE_SHMEM
+ default y
+ help
+ Enable SMC based transport for SCMI.
+
+ If you want the ARM SCMI PROTOCOL stack to include support for a
+ transport based on SMC, answer Y.
+ This driver can also be built as a module. If so, the module
+ will be called scmi_transport_smc.
+
+config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE
+ bool "Enable atomic mode support for SCMI SMC transport"
+ depends on ARM_SCMI_TRANSPORT_SMC
+ help
+ Enable support of atomic operation for SCMI SMC based transport.
+
+ If you want the SCMI SMC based transport to operate in atomic
+ mode, avoiding any kind of sleeping behaviour for selected
+ transactions on the TX path, answer Y.
+ Enabling atomic mode operations allows any SCMI driver using this
+ transport to optionally ask for atomic SCMI transactions and operate
+ in atomic context too, at the price of using a number of busy-waiting
+ primitives all over instead. If unsure say N.
+
+config ARM_SCMI_TRANSPORT_OPTEE
+ tristate "SCMI transport based on OP-TEE service"
+ depends on OPTEE
+ select ARM_SCMI_HAVE_TRANSPORT
+ select ARM_SCMI_HAVE_SHMEM
+ select ARM_SCMI_HAVE_MSG
+ default y
+ help
+ This enables the OP-TEE service based transport for SCMI.
+
+ If you want the ARM SCMI PROTOCOL stack to include support for a
+ transport based on OP-TEE SCMI service, answer Y.
+ This driver can also be built as a module. If so, the module
+ will be called scmi_transport_optee.
+
+config ARM_SCMI_TRANSPORT_VIRTIO
+ tristate "SCMI transport based on VirtIO"
+ depends on VIRTIO
+ select ARM_SCMI_HAVE_TRANSPORT
+ select ARM_SCMI_HAVE_MSG
+ help
+ This enables the virtio based transport for SCMI.
+
+ If you want the ARM SCMI PROTOCOL stack to include support for a
+ transport based on VirtIO, answer Y.
+ This driver can also be built as a module. If so, the module
+ will be called scmi_transport_virtio.
+
+config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
+ bool "SCMI VirtIO transport Version 1 compliance"
+ depends on ARM_SCMI_TRANSPORT_VIRTIO
+ default y
+ help
+ This enforces strict compliance with VirtIO Version 1 specification.
+
+ If you want the ARM SCMI VirtIO transport layer to refuse to work
+ with Legacy VirtIO backends and instead support only VirtIO Version 1
+ devices (or above), answer Y.
+
+ If you want instead to support also old Legacy VirtIO backends (like
+ the ones implemented by kvmtool) and let the core Kernel VirtIO layer
+ take care of the needed conversions, say N.
+
+config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE
+ bool "Enable atomic mode for SCMI VirtIO transport"
+ depends on ARM_SCMI_TRANSPORT_VIRTIO
+ help
+ Enable support of atomic operation for SCMI VirtIO based transport.
+
+ If you want the SCMI VirtIO based transport to operate in atomic
+ mode, avoiding any kind of sleeping behaviour for selected
+ transactions on the TX path, answer Y.
+
+ Enabling atomic mode operations allows any SCMI driver using this
+ transport to optionally ask for atomic SCMI transactions and operate
+ in atomic context too, at the price of using a number of busy-waiting
+ primitives all over instead. If unsure say N.
+
+endmenu
diff --git a/drivers/firmware/arm_scmi/transports/Makefile b/drivers/firmware/arm_scmi/transports/Makefile
new file mode 100644
index 000000000000..362a406f08e6
--- /dev/null
+++ b/drivers/firmware/arm_scmi/transports/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+scmi_transport_mailbox-objs := mailbox.o
+obj-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += scmi_transport_mailbox.o
+scmi_transport_smc-objs := smc.o
+obj-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += scmi_transport_smc.o
+scmi_transport_optee-objs := optee.o
+obj-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += scmi_transport_optee.o
+scmi_transport_virtio-objs := virtio.o
+obj-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += scmi_transport_virtio.o
+
+ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)
+# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame
+# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling
+# hooks are inserted via the -pg switch.
+CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE)
+endif
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c
index 0219a12e3209..1a754dee24f7 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/transports/mailbox.c
@@ -3,7 +3,7 @@
* System Control and Management Interface (SCMI) Message Mailbox Transport
* driver.
*
- * Copyright (C) 2019 ARM Ltd.
+ * Copyright (C) 2019-2024 ARM Ltd.
*/
#include <linux/err.h>
@@ -11,9 +11,10 @@
#include <linux/mailbox_client.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
-#include "common.h"
+#include "../common.h"
/**
* struct scmi_mailbox - Structure representing a SCMI mailbox transport
@@ -36,11 +37,13 @@ struct scmi_mailbox {
#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
+static struct scmi_transport_core_operations *core;
+
static void tx_prepare(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
- shmem_tx_prepare(smbox->shmem, m, smbox->cinfo);
+ core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo);
}
static void rx_callback(struct mbox_client *cl, void *m)
@@ -56,15 +59,17 @@ static void rx_callback(struct mbox_client *cl, void *m)
* a previous timed-out reply which arrived late could be wrongly
* associated with the next pending transaction.
*/
- if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) {
+ if (cl->knows_txdone &&
+ !core->shmem->channel_free(smbox->shmem)) {
dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");
- scmi_bad_message_trace(smbox->cinfo,
- shmem_read_header(smbox->shmem),
- MSG_MBOX_SPURIOUS);
+ core->bad_message_trace(smbox->cinfo,
+ core->shmem->read_header(smbox->shmem),
+ MSG_MBOX_SPURIOUS);
return;
}
- scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
+ core->rx_callback(smbox->cinfo,
+ core->shmem->read_header(smbox->shmem), NULL);
}
static bool mailbox_chan_available(struct device_node *of_node, int idx)
@@ -124,18 +129,16 @@ static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan,
/* Bail out if provided shmem descriptors do not refer distinct areas */
if (num_sh > 1) {
- struct device_node *np_tx, *np_rx;
+ struct device_node *np_tx __free(device_node) =
+ of_parse_phandle(np, "shmem", 0);
+ struct device_node *np_rx __free(device_node) =
+ of_parse_phandle(np, "shmem", 1);
- np_tx = of_parse_phandle(np, "shmem", 0);
- np_rx = of_parse_phandle(np, "shmem", 1);
if (!np_tx || !np_rx || np_tx == np_rx) {
dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
of_node_full_name(np));
ret = -EINVAL;
}
-
- of_node_put(np_tx);
- of_node_put(np_rx);
}
/* Calculate channels IDs to use depending on mboxes/shmem layout */
@@ -178,11 +181,8 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
const char *desc = tx ? "Tx" : "Rx";
struct device *cdev = cinfo->dev;
struct scmi_mailbox *smbox;
- struct device_node *shmem;
- int ret, a2p_rx_chan, p2a_chan, p2a_rx_chan, idx = tx ? 0 : 1;
+ int ret, a2p_rx_chan, p2a_chan, p2a_rx_chan;
struct mbox_client *cl;
- resource_size_t size;
- struct resource res;
ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan, &p2a_rx_chan);
if (ret)
@@ -195,25 +195,9 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!smbox)
return -ENOMEM;
- shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
- if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) {
- of_node_put(shmem);
- return -ENXIO;
- }
-
- ret = of_address_to_resource(shmem, 0, &res);
- of_node_put(shmem);
- if (ret) {
- dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
- return ret;
- }
-
- size = resource_size(&res);
- smbox->shmem = devm_ioremap(dev, res.start, size);
- if (!smbox->shmem) {
- dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
- return -EADDRNOTAVAIL;
- }
+ smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL);
+ if (IS_ERR(smbox->shmem))
+ return PTR_ERR(smbox->shmem);
cl = &smbox->cl;
cl->dev = cdev;
@@ -252,7 +236,6 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
}
}
-
cinfo->transport_info = smbox;
smbox->cinfo = cinfo;
@@ -312,7 +295,7 @@ static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
{
struct scmi_mailbox *smbox = cinfo->transport_info;
- shmem_fetch_response(smbox->shmem, xfer);
+ core->shmem->fetch_response(smbox->shmem, xfer);
}
static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
@@ -320,7 +303,7 @@ static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
{
struct scmi_mailbox *smbox = cinfo->transport_info;
- shmem_fetch_notification(smbox->shmem, max_len, xfer);
+ core->shmem->fetch_notification(smbox->shmem, max_len, xfer);
}
static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
@@ -329,9 +312,9 @@ static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
struct mbox_chan *intr_chan;
int ret;
- shmem_clear_channel(smbox->shmem);
+ core->shmem->clear_channel(smbox->shmem);
- if (!shmem_channel_intr_enabled(smbox->shmem))
+ if (!core->shmem->channel_intr_enabled(smbox->shmem))
return;
if (smbox->chan_platform_receiver)
@@ -354,7 +337,7 @@ mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
- return shmem_poll_done(smbox->shmem, xfer);
+ return core->shmem->poll_done(smbox->shmem, xfer);
}
static const struct scmi_transport_ops scmi_mailbox_ops = {
@@ -369,9 +352,22 @@ static const struct scmi_transport_ops scmi_mailbox_ops = {
.poll_done = mailbox_poll_done,
};
-const struct scmi_desc scmi_mailbox_desc = {
+static struct scmi_desc scmi_mailbox_desc = {
.ops = &scmi_mailbox_ops,
.max_rx_timeout_ms = 30, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
.max_msg_size = 128,
};
+
+static const struct of_device_id scmi_of_match[] = {
+ { .compatible = "arm,scmi" },
+ { /* Sentinel */ },
+};
+
+DEFINE_SCMI_TRANSPORT_DRIVER(scmi_mailbox, scmi_mailbox_driver,
+ scmi_mailbox_desc, scmi_of_match, core);
+module_platform_driver(scmi_mailbox_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("SCMI Mailbox Transport driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/transports/optee.c
index 4e7944b91e38..56fc63edf51e 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/transports/optee.c
@@ -9,12 +9,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/uuid.h>
#include <uapi/linux/tee.h>
-#include "common.h"
+#include "../common.h"
#define SCMI_OPTEE_MAX_MSG_SIZE 128
@@ -148,12 +149,11 @@ struct scmi_optee_agent {
struct list_head channel_list;
};
+static struct scmi_transport_core_operations *core;
+
/* There can be only 1 SCMI service in OP-TEE we connect to */
static struct scmi_optee_agent *scmi_optee_private;
-/* Forward reference to scmi_optee transport initialization */
-static int scmi_optee_init(void);
-
/* Open a session toward SCMI OP-TEE service with REE_KERNEL identity */
static int open_session(struct scmi_optee_agent *agent, u32 *tee_session)
{
@@ -312,24 +312,6 @@ static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t
return 0;
}
-static int scmi_optee_link_supplier(struct device *dev)
-{
- if (!scmi_optee_private) {
- if (scmi_optee_init())
- dev_dbg(dev, "Optee bus not yet ready\n");
-
- /* Wait for optee bus */
- return -EPROBE_DEFER;
- }
-
- if (!device_link_add(dev, scmi_optee_private->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) {
- dev_err(dev, "Adding link to supplier optee device failed\n");
- return -ECANCELED;
- }
-
- return 0;
-}
-
static bool scmi_optee_chan_available(struct device_node *of_node, int idx)
{
u32 channel_id;
@@ -343,7 +325,7 @@ static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo)
struct scmi_optee_channel *channel = cinfo->transport_info;
if (!channel->tee_shm)
- shmem_clear_channel(channel->req.shmem);
+ core->shmem->clear_channel(channel->req.shmem);
}
static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel)
@@ -368,38 +350,11 @@ static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *ch
static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
struct scmi_optee_channel *channel)
{
- struct device_node *np;
- resource_size_t size;
- struct resource res;
- int ret;
+ channel->req.shmem = core->shmem->setup_iomap(cinfo, dev, true, NULL);
+ if (IS_ERR(channel->req.shmem))
+ return PTR_ERR(channel->req.shmem);
- np = of_parse_phandle(cinfo->dev->of_node, "shmem", 0);
- if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
- ret = -ENXIO;
- goto out;
- }
-
- ret = of_address_to_resource(np, 0, &res);
- if (ret) {
- dev_err(dev, "Failed to get SCMI Tx shared memory\n");
- goto out;
- }
-
- size = resource_size(&res);
-
- channel->req.shmem = devm_ioremap(dev, res.start, size);
- if (!channel->req.shmem) {
- dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n");
- ret = -EADDRNOTAVAIL;
- goto out;
- }
-
- ret = 0;
-
-out:
- of_node_put(np);
-
- return ret;
+ return 0;
}
static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo,
@@ -473,6 +428,13 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
struct scmi_chan_info *cinfo = p;
struct scmi_optee_channel *channel = cinfo->transport_info;
+ /*
+ * Different protocols might share the same chan info, so a previous
+ * call might have already freed the structure.
+ */
+ if (!channel)
+ return 0;
+
mutex_lock(&scmi_optee_private->mu);
list_del(&channel->link);
mutex_unlock(&scmi_optee_private->mu);
@@ -499,10 +461,11 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
mutex_lock(&channel->mu);
if (channel->tee_shm) {
- msg_tx_prepare(channel->req.msg, xfer);
- ret = invoke_process_msg_channel(channel, msg_command_size(xfer));
+ core->msg->tx_prepare(channel->req.msg, xfer);
+ ret = invoke_process_msg_channel(channel,
+ core->msg->command_size(xfer));
} else {
- shmem_tx_prepare(channel->req.shmem, xfer, cinfo);
+ core->shmem->tx_prepare(channel->req.shmem, xfer, cinfo);
ret = invoke_process_smt_channel(channel);
}
@@ -518,9 +481,10 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_optee_channel *channel = cinfo->transport_info;
if (channel->tee_shm)
- msg_fetch_response(channel->req.msg, channel->rx_len, xfer);
+ core->msg->fetch_response(channel->req.msg,
+ channel->rx_len, xfer);
else
- shmem_fetch_response(channel->req.shmem, xfer);
+ core->shmem->fetch_response(channel->req.shmem, xfer);
}
static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
@@ -532,7 +496,6 @@ static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
}
static struct scmi_transport_ops scmi_optee_ops = {
- .link_supplier = scmi_optee_link_supplier,
.chan_available = scmi_optee_chan_available,
.chan_setup = scmi_optee_chan_setup,
.chan_free = scmi_optee_chan_free,
@@ -547,6 +510,22 @@ static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *
return ver->impl_id == TEE_IMPL_ID_OPTEE;
}
+static struct scmi_desc scmi_optee_desc = {
+ .ops = &scmi_optee_ops,
+ .max_rx_timeout_ms = 30,
+ .max_msg = 20,
+ .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE,
+ .sync_cmds_completed_on_ret = true,
+};
+
+static const struct of_device_id scmi_of_match[] = {
+ { .compatible = "linaro,scmi-optee" },
+ { /* Sentinel */ },
+};
+
+DEFINE_SCMI_TRANSPORT_DRIVER(scmi_optee, scmi_optee_driver, scmi_optee_desc,
+ scmi_of_match, core);
+
static int scmi_optee_service_probe(struct device *dev)
{
struct scmi_optee_agent *agent;
@@ -582,6 +561,12 @@ static int scmi_optee_service_probe(struct device *dev)
smp_mb();
scmi_optee_private = agent;
+ ret = platform_driver_register(&scmi_optee_driver);
+ if (ret) {
+ scmi_optee_private = NULL;
+ goto err;
+ }
+
return 0;
err:
@@ -597,6 +582,8 @@ static int scmi_optee_service_remove(struct device *dev)
if (!scmi_optee_private)
return -EINVAL;
+ platform_driver_unregister(&scmi_optee_driver);
+
if (!list_empty(&scmi_optee_private->channel_list))
return -EBUSY;
@@ -618,7 +605,7 @@ static const struct tee_client_device_id scmi_optee_service_id[] = {
MODULE_DEVICE_TABLE(tee, scmi_optee_service_id);
-static struct tee_client_driver scmi_optee_driver = {
+static struct tee_client_driver scmi_optee_service_driver = {
.id_table = scmi_optee_service_id,
.driver = {
.name = "scmi-optee",
@@ -628,22 +615,18 @@ static struct tee_client_driver scmi_optee_driver = {
},
};
-static int scmi_optee_init(void)
+static int __init scmi_transport_optee_init(void)
{
- return driver_register(&scmi_optee_driver.driver);
+ return driver_register(&scmi_optee_service_driver.driver);
}
+module_init(scmi_transport_optee_init);
-static void scmi_optee_exit(void)
+static void __exit scmi_transport_optee_exit(void)
{
- if (scmi_optee_private)
- driver_unregister(&scmi_optee_driver.driver);
+ driver_unregister(&scmi_optee_service_driver.driver);
}
+module_exit(scmi_transport_optee_exit);
-const struct scmi_desc scmi_optee_desc = {
- .transport_exit = scmi_optee_exit,
- .ops = &scmi_optee_ops,
- .max_rx_timeout_ms = 30,
- .max_msg = 20,
- .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE,
- .sync_cmds_completed_on_ret = true,
-};
+MODULE_AUTHOR("Etienne Carriere <etienne.carriere@foss.st.com>");
+MODULE_DESCRIPTION("SCMI OPTEE Transport driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/transports/smc.c
index 39936e1dd30e..f8dd108777f9 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/transports/smc.c
@@ -16,10 +16,11 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/limits.h>
+#include <linux/platform_device.h>
#include <linux/processor.h>
#include <linux/slab.h>
-#include "common.h"
+#include "../common.h"
/*
* The shmem address is split into 4K page and offset.
@@ -69,23 +70,25 @@ struct scmi_smc {
unsigned long cap_id;
};
+static struct scmi_transport_core_operations *core;
+
static irqreturn_t smc_msg_done_isr(int irq, void *data)
{
struct scmi_smc *scmi_info = data;
- scmi_rx_callback(scmi_info->cinfo,
- shmem_read_header(scmi_info->shmem), NULL);
+ core->rx_callback(scmi_info->cinfo,
+ core->shmem->read_header(scmi_info->shmem), NULL);
return IRQ_HANDLED;
}
static bool smc_chan_available(struct device_node *of_node, int idx)
{
- struct device_node *np = of_parse_phandle(of_node, "shmem", 0);
+ struct device_node *np __free(device_node) =
+ of_parse_phandle(of_node, "shmem", 0);
if (!np)
return false;
- of_node_put(np);
return true;
}
@@ -130,9 +133,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
struct device *cdev = cinfo->dev;
unsigned long cap_id = ULONG_MAX;
struct scmi_smc *scmi_info;
- resource_size_t size;
- struct resource res;
- struct device_node *np;
+ struct resource res = {};
u32 func_id;
int ret;
@@ -143,31 +144,16 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!scmi_info)
return -ENOMEM;
- np = of_parse_phandle(cdev->of_node, "shmem", 0);
- if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
- of_node_put(np);
- return -ENXIO;
- }
-
- ret = of_address_to_resource(np, 0, &res);
- of_node_put(np);
- if (ret) {
- dev_err(cdev, "failed to get SCMI Tx shared memory\n");
- return ret;
- }
-
- size = resource_size(&res);
- scmi_info->shmem = devm_ioremap(dev, res.start, size);
- if (!scmi_info->shmem) {
- dev_err(dev, "failed to ioremap SCMI Tx shared memory\n");
- return -EADDRNOTAVAIL;
- }
+ scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res);
+ if (IS_ERR(scmi_info->shmem))
+ return PTR_ERR(scmi_info->shmem);
ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id);
if (ret < 0)
return ret;
if (of_device_is_compatible(dev->of_node, "qcom,scmi-smc")) {
+ resource_size_t size = resource_size(&res);
void __iomem *ptr = (void __iomem *)scmi_info->shmem + size - 8;
/* The capability-id is kept in last 8 bytes of shmem.
* +-------+ <-- 0
@@ -243,7 +229,7 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
*/
smc_channel_lock_acquire(scmi_info, xfer);
- shmem_tx_prepare(scmi_info->shmem, xfer, cinfo);
+ core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo);
if (scmi_info->cap_id != ULONG_MAX)
arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0,
@@ -267,7 +253,7 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo,
{
struct scmi_smc *scmi_info = cinfo->transport_info;
- shmem_fetch_response(scmi_info->shmem, xfer);
+ core->shmem->fetch_response(scmi_info->shmem, xfer);
}
static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
@@ -287,7 +273,7 @@ static const struct scmi_transport_ops scmi_smc_ops = {
.fetch_response = smc_fetch_response,
};
-const struct scmi_desc scmi_smc_desc = {
+static struct scmi_desc scmi_smc_desc = {
.ops = &scmi_smc_ops,
.max_rx_timeout_ms = 30,
.max_msg = 20,
@@ -303,3 +289,19 @@ const struct scmi_desc scmi_smc_desc = {
.sync_cmds_completed_on_ret = true,
.atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE),
};
+
+static const struct of_device_id scmi_of_match[] = {
+ { .compatible = "arm,scmi-smc" },
+ { .compatible = "arm,scmi-smc-param" },
+ { .compatible = "qcom,scmi-smc" },
+ { /* Sentinel */ },
+};
+
+DEFINE_SCMI_TRANSPORT_DRIVER(scmi_smc, scmi_smc_driver, scmi_smc_desc,
+ scmi_of_match, core);
+module_platform_driver(scmi_smc_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_AUTHOR("Nikunj Kela <quic_nkela@quicinc.com>");
+MODULE_DESCRIPTION("SCMI SMC Transport driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c
index dd3459bdb9cb..d349766bc0b2 100644
--- a/drivers/firmware/arm_scmi/virtio.c
+++ b/drivers/firmware/arm_scmi/transports/virtio.c
@@ -4,7 +4,7 @@
* (SCMI).
*
* Copyright (C) 2020-2022 OpenSynergy.
- * Copyright (C) 2021-2022 ARM Ltd.
+ * Copyright (C) 2021-2024 ARM Ltd.
*/
/**
@@ -19,6 +19,7 @@
#include <linux/completion.h>
#include <linux/errno.h>
+#include <linux/platform_device.h>
#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/virtio.h>
@@ -27,7 +28,7 @@
#include <uapi/linux/virtio_ids.h>
#include <uapi/linux/virtio_scmi.h>
-#include "common.h"
+#include "../common.h"
#define VIRTIO_MAX_RX_TIMEOUT_MS 60000
#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
@@ -108,6 +109,8 @@ struct scmi_vio_msg {
refcount_t users;
};
+static struct scmi_transport_core_operations *core;
+
/* Only one SCMI VirtIO device can possibly exist */
static struct virtio_device *scmi_vdev;
@@ -294,8 +297,9 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
if (msg) {
msg->rx_len = length;
- scmi_rx_callback(vioch->cinfo,
- msg_read_header(msg->input), msg);
+ core->rx_callback(vioch->cinfo,
+ core->msg->read_header(msg->input),
+ msg);
scmi_finalize_message(vioch, msg);
}
@@ -339,8 +343,9 @@ static void scmi_vio_deferred_tx_worker(struct work_struct *work)
* is no more processed elsewhere so no poll_lock needed.
*/
if (msg->poll_status == VIO_MSG_NOT_POLLED)
- scmi_rx_callback(vioch->cinfo,
- msg_read_header(msg->input), msg);
+ core->rx_callback(vioch->cinfo,
+ core->msg->read_header(msg->input),
+ msg);
/* Free the processed message once done */
scmi_vio_msg_release(vioch, msg);
@@ -366,23 +371,6 @@ static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
return vioch->max_msg;
}
-static int virtio_link_supplier(struct device *dev)
-{
- if (!scmi_vdev) {
- dev_notice(dev,
- "Deferring probe after not finding a bound scmi-virtio device\n");
- return -EPROBE_DEFER;
- }
-
- if (!device_link_add(dev, &scmi_vdev->dev,
- DL_FLAG_AUTOREMOVE_CONSUMER)) {
- dev_err(dev, "Adding link to supplier virtio device failed\n");
- return -ECANCELED;
- }
-
- return 0;
-}
-
static bool virtio_chan_available(struct device_node *of_node, int idx)
{
struct scmi_vio_channel *channels, *vioch = NULL;
@@ -510,10 +498,10 @@ static int virtio_send_message(struct scmi_chan_info *cinfo,
return -EBUSY;
}
- msg_tx_prepare(msg->request, xfer);
+ core->msg->tx_prepare(msg->request, xfer);
- sg_init_one(&sg_out, msg->request, msg_command_size(xfer));
- sg_init_one(&sg_in, msg->input, msg_response_size(xfer));
+ sg_init_one(&sg_out, msg->request, core->msg->command_size(xfer));
+ sg_init_one(&sg_in, msg->input, core->msg->response_size(xfer));
spin_lock_irqsave(&vioch->lock, flags);
@@ -560,7 +548,7 @@ static void virtio_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_vio_msg *msg = xfer->priv;
if (msg)
- msg_fetch_response(msg->input, msg->rx_len, xfer);
+ core->msg->fetch_response(msg->input, msg->rx_len, xfer);
}
static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
@@ -569,7 +557,8 @@ static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
struct scmi_vio_msg *msg = xfer->priv;
if (msg)
- msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer);
+ core->msg->fetch_notification(msg->input, msg->rx_len,
+ max_len, xfer);
}
/**
@@ -669,7 +658,7 @@ static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret,
* the message we are polling for could be alternatively delivered via usual
* IRQs callbacks on another core which happened to have IRQs enabled while we
* are actively polling for it here: in such a case it will be handled as such
- * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be
+ * by rx_callback() and the polling loop in the SCMI Core TX path will be
* transparently terminated anyway.
*
* Return: True once polling has successfully completed.
@@ -790,7 +779,6 @@ static bool virtio_poll_done(struct scmi_chan_info *cinfo,
}
static const struct scmi_transport_ops scmi_virtio_ops = {
- .link_supplier = virtio_link_supplier,
.chan_available = virtio_chan_available,
.chan_setup = virtio_chan_setup,
.chan_free = virtio_chan_free,
@@ -802,6 +790,23 @@ static const struct scmi_transport_ops scmi_virtio_ops = {
.poll_done = virtio_poll_done,
};
+static struct scmi_desc scmi_virtio_desc = {
+ .ops = &scmi_virtio_ops,
+ /* for non-realtime virtio devices */
+ .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS,
+ .max_msg = 0, /* overridden by virtio_get_max_msg() */
+ .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
+ .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE),
+};
+
+static const struct of_device_id scmi_of_match[] = {
+ { .compatible = "arm,scmi-virtio" },
+ { /* Sentinel */ },
+};
+
+DEFINE_SCMI_TRANSPORT_DRIVER(scmi_virtio, scmi_virtio_driver, scmi_virtio_desc,
+ scmi_of_match, core);
+
static int scmi_vio_probe(struct virtio_device *vdev)
{
struct device *dev = &vdev->dev;
@@ -861,14 +866,27 @@ static int scmi_vio_probe(struct virtio_device *vdev)
}
vdev->priv = channels;
+
/* Ensure initialized scmi_vdev is visible */
smp_store_mb(scmi_vdev, vdev);
+ ret = platform_driver_register(&scmi_virtio_driver);
+ if (ret) {
+ vdev->priv = NULL;
+ vdev->config->del_vqs(vdev);
+ /* Ensure NULLified scmi_vdev is visible */
+ smp_store_mb(scmi_vdev, NULL);
+
+ return ret;
+ }
+
return 0;
}
static void scmi_vio_remove(struct virtio_device *vdev)
{
+ platform_driver_unregister(&scmi_virtio_driver);
+
/*
* Once we get here, virtio_chan_free() will have already been called by
* the SCMI core for any existing channel and, as a consequence, all the
@@ -913,23 +931,10 @@ static struct virtio_driver virtio_scmi_driver = {
.validate = scmi_vio_validate,
};
-static int __init virtio_scmi_init(void)
-{
- return register_virtio_driver(&virtio_scmi_driver);
-}
-
-static void virtio_scmi_exit(void)
-{
- unregister_virtio_driver(&virtio_scmi_driver);
-}
+module_virtio_driver(virtio_scmi_driver);
-const struct scmi_desc scmi_virtio_desc = {
- .transport_init = virtio_scmi_init,
- .transport_exit = virtio_scmi_exit,
- .ops = &scmi_virtio_ops,
- /* for non-realtime virtio devices */
- .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS,
- .max_msg = 0, /* overridden by virtio_get_max_msg() */
- .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
- .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE),
-};
+MODULE_AUTHOR("Igor Skalkin <igor.skalkin@opensynergy.com>");
+MODULE_AUTHOR("Peter Hilber <peter.hilber@opensynergy.com>");
+MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>");
+MODULE_DESCRIPTION("SCMI VirtIO Transport driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Kconfig b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
new file mode 100644
index 000000000000..2883ed24a84d
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "ARM SCMI NXP i.MX Vendor Protocols"
+
+config IMX_SCMI_BBM_EXT
+ tristate "i.MX SCMI BBM EXTENSION"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ default y if ARCH_MXC
+ help
+ This enables i.MX System BBM control logic which supports RTC
+ and BUTTON.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-sm-bbm.
+
+config IMX_SCMI_MISC_EXT
+ tristate "i.MX SCMI MISC EXTENSION"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ default y if ARCH_MXC
+ help
+ This enables i.MX System MISC control logic such as gpio expander
+ wakeup
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-sm-misc.
+endmenu
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Makefile b/drivers/firmware/arm_scmi/vendors/imx/Makefile
new file mode 100644
index 000000000000..d3ee6d544924
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_IMX_SCMI_BBM_EXT) += imx-sm-bbm.o
+obj-$(CONFIG_IMX_SCMI_MISC_EXT) += imx-sm-misc.o
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
new file mode 100644
index 000000000000..17799eacf06c
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) NXP BBM Protocol
+ *
+ * Copyright 2024 NXP
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications BBM - " fmt
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+#include "../../protocols.h"
+#include "../../notify.h"
+
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+enum scmi_imx_bbm_protocol_cmd {
+ IMX_BBM_GPR_SET = 0x3,
+ IMX_BBM_GPR_GET = 0x4,
+ IMX_BBM_RTC_ATTRIBUTES = 0x5,
+ IMX_BBM_RTC_TIME_SET = 0x6,
+ IMX_BBM_RTC_TIME_GET = 0x7,
+ IMX_BBM_RTC_ALARM_SET = 0x8,
+ IMX_BBM_BUTTON_GET = 0x9,
+ IMX_BBM_RTC_NOTIFY = 0xA,
+ IMX_BBM_BUTTON_NOTIFY = 0xB,
+};
+
+#define GET_RTCS_NR(x) le32_get_bits((x), GENMASK(23, 16))
+#define GET_GPRS_NR(x) le32_get_bits((x), GENMASK(15, 0))
+
+#define SCMI_IMX_BBM_NOTIFY_RTC_UPDATED BIT(2)
+#define SCMI_IMX_BBM_NOTIFY_RTC_ROLLOVER BIT(1)
+#define SCMI_IMX_BBM_NOTIFY_RTC_ALARM BIT(0)
+
+#define SCMI_IMX_BBM_RTC_ALARM_ENABLE_FLAG BIT(0)
+
+#define SCMI_IMX_BBM_NOTIFY_RTC_FLAG \
+ (SCMI_IMX_BBM_NOTIFY_RTC_UPDATED | SCMI_IMX_BBM_NOTIFY_RTC_ROLLOVER | \
+ SCMI_IMX_BBM_NOTIFY_RTC_ALARM)
+
+#define SCMI_IMX_BBM_EVENT_RTC_MASK GENMASK(31, 24)
+
+struct scmi_imx_bbm_info {
+ u32 version;
+ int nr_rtc;
+ int nr_gpr;
+};
+
+struct scmi_msg_imx_bbm_protocol_attributes {
+ __le32 attributes;
+};
+
+struct scmi_imx_bbm_set_time {
+ __le32 id;
+ __le32 flags;
+ __le32 value_low;
+ __le32 value_high;
+};
+
+struct scmi_imx_bbm_get_time {
+ __le32 id;
+ __le32 flags;
+};
+
+struct scmi_imx_bbm_alarm_time {
+ __le32 id;
+ __le32 flags;
+ __le32 value_low;
+ __le32 value_high;
+};
+
+struct scmi_msg_imx_bbm_rtc_notify {
+ __le32 rtc_id;
+ __le32 flags;
+};
+
+struct scmi_msg_imx_bbm_button_notify {
+ __le32 flags;
+};
+
+struct scmi_imx_bbm_notify_payld {
+ __le32 flags;
+};
+
+static int scmi_imx_bbm_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_imx_bbm_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_imx_bbm_protocol_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ pi->nr_rtc = GET_RTCS_NR(attr->attributes);
+ pi->nr_gpr = GET_GPRS_NR(attr->attributes);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_bbm_notify(const struct scmi_protocol_handle *ph,
+ u32 src_id, int message_id, bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ if (message_id == IMX_BBM_RTC_NOTIFY) {
+ struct scmi_msg_imx_bbm_rtc_notify *rtc_notify;
+
+ ret = ph->xops->xfer_get_init(ph, message_id,
+ sizeof(*rtc_notify), 0, &t);
+ if (ret)
+ return ret;
+
+ rtc_notify = t->tx.buf;
+ rtc_notify->rtc_id = cpu_to_le32(0);
+ rtc_notify->flags =
+ cpu_to_le32(enable ? SCMI_IMX_BBM_NOTIFY_RTC_FLAG : 0);
+ } else if (message_id == IMX_BBM_BUTTON_NOTIFY) {
+ struct scmi_msg_imx_bbm_button_notify *button_notify;
+
+ ret = ph->xops->xfer_get_init(ph, message_id,
+ sizeof(*button_notify), 0, &t);
+ if (ret)
+ return ret;
+
+ button_notify = t->tx.buf;
+ button_notify->flags = cpu_to_le32(enable ? 1 : 0);
+ } else {
+ return -EINVAL;
+ }
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static enum scmi_imx_bbm_protocol_cmd evt_2_cmd[] = {
+ IMX_BBM_RTC_NOTIFY,
+ IMX_BBM_BUTTON_NOTIFY
+};
+
+static int scmi_imx_bbm_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret, cmd_id;
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return -EINVAL;
+
+ cmd_id = evt_2_cmd[evt_id];
+ ret = scmi_imx_bbm_notify(ph, src_id, cmd_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_imx_bbm_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_imx_bbm_notify_payld *p = payld;
+ struct scmi_imx_bbm_notif_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ return NULL;
+
+ if (evt_id == SCMI_EVENT_IMX_BBM_RTC) {
+ r->is_rtc = true;
+ r->is_button = false;
+ r->timestamp = timestamp;
+ r->rtc_id = le32_get_bits(p->flags, SCMI_IMX_BBM_EVENT_RTC_MASK);
+ r->rtc_evt = le32_get_bits(p->flags, SCMI_IMX_BBM_NOTIFY_RTC_FLAG);
+ dev_dbg(ph->dev, "RTC: %d evt: %x\n", r->rtc_id, r->rtc_evt);
+ *src_id = r->rtc_evt;
+ } else if (evt_id == SCMI_EVENT_IMX_BBM_BUTTON) {
+ r->is_rtc = false;
+ r->is_button = true;
+ r->timestamp = timestamp;
+ dev_dbg(ph->dev, "BBM Button\n");
+ *src_id = 0;
+ } else {
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+
+ return r;
+}
+
+static const struct scmi_event scmi_imx_bbm_events[] = {
+ {
+ .id = SCMI_EVENT_IMX_BBM_RTC,
+ .max_payld_sz = sizeof(struct scmi_imx_bbm_notify_payld),
+ .max_report_sz = sizeof(struct scmi_imx_bbm_notif_report),
+ },
+ {
+ .id = SCMI_EVENT_IMX_BBM_BUTTON,
+ .max_payld_sz = sizeof(struct scmi_imx_bbm_notify_payld),
+ .max_report_sz = sizeof(struct scmi_imx_bbm_notif_report),
+ },
+};
+
+static const struct scmi_event_ops scmi_imx_bbm_event_ops = {
+ .set_notify_enabled = scmi_imx_bbm_set_notify_enabled,
+ .fill_custom_report = scmi_imx_bbm_fill_custom_report,
+};
+
+static const struct scmi_protocol_events scmi_imx_bbm_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &scmi_imx_bbm_event_ops,
+ .evts = scmi_imx_bbm_events,
+ .num_events = ARRAY_SIZE(scmi_imx_bbm_events),
+ .num_sources = 1,
+};
+
+static int scmi_imx_bbm_rtc_time_set(const struct scmi_protocol_handle *ph,
+ u32 rtc_id, u64 sec)
+{
+ struct scmi_imx_bbm_info *pi = ph->get_priv(ph);
+ struct scmi_imx_bbm_set_time *cfg;
+ struct scmi_xfer *t;
+ int ret;
+
+ if (rtc_id >= pi->nr_rtc)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, IMX_BBM_RTC_TIME_SET, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->id = cpu_to_le32(rtc_id);
+ cfg->flags = 0;
+ cfg->value_low = cpu_to_le32(lower_32_bits(sec));
+ cfg->value_high = cpu_to_le32(upper_32_bits(sec));
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_bbm_rtc_time_get(const struct scmi_protocol_handle *ph,
+ u32 rtc_id, u64 *value)
+{
+ struct scmi_imx_bbm_info *pi = ph->get_priv(ph);
+ struct scmi_imx_bbm_get_time *cfg;
+ struct scmi_xfer *t;
+ int ret;
+
+ if (rtc_id >= pi->nr_rtc)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, IMX_BBM_RTC_TIME_GET, sizeof(*cfg),
+ sizeof(u64), &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->id = cpu_to_le32(rtc_id);
+ cfg->flags = 0;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *value = get_unaligned_le64(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_bbm_rtc_alarm_set(const struct scmi_protocol_handle *ph,
+ u32 rtc_id, bool enable, u64 sec)
+{
+ struct scmi_imx_bbm_info *pi = ph->get_priv(ph);
+ struct scmi_imx_bbm_alarm_time *cfg;
+ struct scmi_xfer *t;
+ int ret;
+
+ if (rtc_id >= pi->nr_rtc)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, IMX_BBM_RTC_ALARM_SET, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->id = cpu_to_le32(rtc_id);
+ cfg->flags = enable ?
+ cpu_to_le32(SCMI_IMX_BBM_RTC_ALARM_ENABLE_FLAG) : 0;
+ cfg->value_low = cpu_to_le32(lower_32_bits(sec));
+ cfg->value_high = cpu_to_le32(upper_32_bits(sec));
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_bbm_button_get(const struct scmi_protocol_handle *ph, u32 *state)
+{
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, IMX_BBM_BUTTON_GET, 0, sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *state = get_unaligned_le32(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static const struct scmi_imx_bbm_proto_ops scmi_imx_bbm_proto_ops = {
+ .rtc_time_get = scmi_imx_bbm_rtc_time_get,
+ .rtc_time_set = scmi_imx_bbm_rtc_time_set,
+ .rtc_alarm_set = scmi_imx_bbm_rtc_alarm_set,
+ .button_get = scmi_imx_bbm_button_get,
+};
+
+static int scmi_imx_bbm_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ u32 version;
+ int ret;
+ struct scmi_imx_bbm_info *binfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_info(ph->dev, "NXP SM BBM Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ binfo = devm_kzalloc(ph->dev, sizeof(*binfo), GFP_KERNEL);
+ if (!binfo)
+ return -ENOMEM;
+
+ ret = scmi_imx_bbm_attributes_get(ph, binfo);
+ if (ret)
+ return ret;
+
+ return ph->set_priv(ph, binfo, version);
+}
+
+static const struct scmi_protocol scmi_imx_bbm = {
+ .id = SCMI_PROTOCOL_IMX_BBM,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_imx_bbm_protocol_init,
+ .ops = &scmi_imx_bbm_proto_ops,
+ .events = &scmi_imx_bbm_protocol_events,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+ .vendor_id = "NXP",
+ .sub_vendor_id = "IMX",
+};
+module_scmi_protocol(scmi_imx_bbm);
+
+MODULE_DESCRIPTION("i.MX SCMI BBM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
new file mode 100644
index 000000000000..a86ab9b35953
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System control and Management Interface (SCMI) NXP MISC Protocol
+ *
+ * Copyright 2024 NXP
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications MISC - " fmt
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+#include "../../protocols.h"
+#include "../../notify.h"
+
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+#define MAX_MISC_CTRL_SOURCES GENMASK(15, 0)
+
+enum scmi_imx_misc_protocol_cmd {
+ SCMI_IMX_MISC_CTRL_SET = 0x3,
+ SCMI_IMX_MISC_CTRL_GET = 0x4,
+ SCMI_IMX_MISC_CTRL_NOTIFY = 0x8,
+};
+
+struct scmi_imx_misc_info {
+ u32 version;
+ u32 nr_dev_ctrl;
+ u32 nr_brd_ctrl;
+ u32 nr_reason;
+};
+
+struct scmi_msg_imx_misc_protocol_attributes {
+ __le32 attributes;
+};
+
+#define GET_BRD_CTRLS_NR(x) le32_get_bits((x), GENMASK(31, 24))
+#define GET_REASONS_NR(x) le32_get_bits((x), GENMASK(23, 16))
+#define GET_DEV_CTRLS_NR(x) le32_get_bits((x), GENMASK(15, 0))
+#define BRD_CTRL_START_ID BIT(15)
+
+struct scmi_imx_misc_ctrl_set_in {
+ __le32 id;
+ __le32 num;
+ __le32 value[];
+};
+
+struct scmi_imx_misc_ctrl_notify_in {
+ __le32 ctrl_id;
+ __le32 flags;
+};
+
+struct scmi_imx_misc_ctrl_notify_payld {
+ __le32 ctrl_id;
+ __le32 flags;
+};
+
+struct scmi_imx_misc_ctrl_get_out {
+ __le32 num;
+ __le32 val[];
+};
+
+static int scmi_imx_misc_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_imx_misc_info *mi)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_imx_misc_protocol_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ mi->nr_dev_ctrl = GET_DEV_CTRLS_NR(attr->attributes);
+ mi->nr_brd_ctrl = GET_BRD_CTRLS_NR(attr->attributes);
+ mi->nr_reason = GET_REASONS_NR(attr->attributes);
+ dev_info(ph->dev, "i.MX MISC NUM DEV CTRL: %d, NUM BRD CTRL: %d,NUM Reason: %d\n",
+ mi->nr_dev_ctrl, mi->nr_brd_ctrl, mi->nr_reason);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_misc_ctrl_validate_id(const struct scmi_protocol_handle *ph,
+ u32 ctrl_id)
+{
+ struct scmi_imx_misc_info *mi = ph->get_priv(ph);
+
+ /*
+ * [0, BRD_CTRL_START_ID) is for Dev Ctrl which is SOC related
+ * [BRD_CTRL_START_ID, 0xffff) is for Board Ctrl which is board related
+ */
+ if (ctrl_id < BRD_CTRL_START_ID && ctrl_id > mi->nr_dev_ctrl)
+ return -EINVAL;
+ if (ctrl_id >= BRD_CTRL_START_ID + mi->nr_brd_ctrl)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int scmi_imx_misc_ctrl_notify(const struct scmi_protocol_handle *ph,
+ u32 ctrl_id, u32 evt_id, u32 flags)
+{
+ struct scmi_imx_misc_ctrl_notify_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = scmi_imx_misc_ctrl_validate_id(ph, ctrl_id);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_NOTIFY,
+ sizeof(*in), 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->ctrl_id = cpu_to_le32(ctrl_id);
+ in->flags = cpu_to_le32(flags);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int
+scmi_imx_misc_ctrl_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ /* misc_ctrl_req_notify is for enablement */
+ if (enable)
+ return 0;
+
+ ret = scmi_imx_misc_ctrl_notify(ph, src_id, evt_id, 0);
+ if (ret)
+ dev_err(ph->dev, "FAIL_ENABLED - evt[%X] src[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *
+scmi_imx_misc_ctrl_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_imx_misc_ctrl_notify_payld *p = payld;
+ struct scmi_imx_misc_ctrl_notify_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->ctrl_id = le32_to_cpu(p->ctrl_id);
+ r->flags = le32_to_cpu(p->flags);
+ if (src_id)
+ *src_id = r->ctrl_id;
+ dev_dbg(ph->dev, "%s: ctrl_id: %d flags: %d\n", __func__,
+ r->ctrl_id, r->flags);
+
+ return r;
+}
+
+static const struct scmi_event_ops scmi_imx_misc_event_ops = {
+ .set_notify_enabled = scmi_imx_misc_ctrl_set_notify_enabled,
+ .fill_custom_report = scmi_imx_misc_ctrl_fill_custom_report,
+};
+
+static const struct scmi_event scmi_imx_misc_events[] = {
+ {
+ .id = SCMI_EVENT_IMX_MISC_CONTROL,
+ .max_payld_sz = sizeof(struct scmi_imx_misc_ctrl_notify_payld),
+ .max_report_sz = sizeof(struct scmi_imx_misc_ctrl_notify_report),
+ },
+};
+
+static struct scmi_protocol_events scmi_imx_misc_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &scmi_imx_misc_event_ops,
+ .evts = scmi_imx_misc_events,
+ .num_events = ARRAY_SIZE(scmi_imx_misc_events),
+ .num_sources = MAX_MISC_CTRL_SOURCES,
+};
+
+static int scmi_imx_misc_ctrl_get(const struct scmi_protocol_handle *ph,
+ u32 ctrl_id, u32 *num, u32 *val)
+{
+ struct scmi_imx_misc_ctrl_get_out *out;
+ struct scmi_xfer *t;
+ int ret, i;
+ int max_msg_size = ph->hops->get_max_msg_size(ph);
+ int max_num = (max_msg_size - sizeof(*out)) / sizeof(__le32);
+
+ ret = scmi_imx_misc_ctrl_validate_id(ph, ctrl_id);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_GET, sizeof(u32),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(ctrl_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ *num = le32_to_cpu(out->num);
+
+ if (*num >= max_num ||
+ *num * sizeof(__le32) > t->rx.len - sizeof(__le32)) {
+ ph->xops->xfer_put(ph, t);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < *num; i++)
+ val[i] = le32_to_cpu(out->val[i]);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph,
+ u32 ctrl_id, u32 num, u32 *val)
+{
+ struct scmi_imx_misc_ctrl_set_in *in;
+ struct scmi_xfer *t;
+ int ret, i;
+ int max_msg_size = ph->hops->get_max_msg_size(ph);
+ int max_num = (max_msg_size - sizeof(*in)) / sizeof(__le32);
+
+ ret = scmi_imx_misc_ctrl_validate_id(ph, ctrl_id);
+ if (ret)
+ return ret;
+
+ if (num > max_num)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->id = cpu_to_le32(ctrl_id);
+ in->num = cpu_to_le32(num);
+ for (i = 0; i < num; i++)
+ in->value[i] = cpu_to_le32(val[i]);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static const struct scmi_imx_misc_proto_ops scmi_imx_misc_proto_ops = {
+ .misc_ctrl_set = scmi_imx_misc_ctrl_set,
+ .misc_ctrl_get = scmi_imx_misc_ctrl_get,
+ .misc_ctrl_req_notify = scmi_imx_misc_ctrl_notify,
+};
+
+static int scmi_imx_misc_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_misc_info *minfo;
+ u32 version;
+ int ret;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_info(ph->dev, "NXP SM MISC Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ minfo = devm_kzalloc(ph->dev, sizeof(*minfo), GFP_KERNEL);
+ if (!minfo)
+ return -ENOMEM;
+
+ ret = scmi_imx_misc_attributes_get(ph, minfo);
+ if (ret)
+ return ret;
+
+ return ph->set_priv(ph, minfo, version);
+}
+
+static const struct scmi_protocol scmi_imx_misc = {
+ .id = SCMI_PROTOCOL_IMX_MISC,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_imx_misc_protocol_init,
+ .ops = &scmi_imx_misc_proto_ops,
+ .events = &scmi_imx_misc_protocol_events,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+ .vendor_id = "NXP",
+ .sub_vendor_id = "IMX",
+};
+module_scmi_protocol(scmi_imx_misc);
+
+MODULE_DESCRIPTION("i.MX SCMI MISC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
new file mode 100644
index 000000000000..b2dfd6c46ca2
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
@@ -0,0 +1,886 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+===============================================================================
+i.MX95 System Control and Management Interface(SCMI) Vendor Protocols Extension
+===============================================================================
+
+:Copyright: |copy| 2024 NXP
+
+:Author: Peng Fan <peng.fan@nxp.com>
+
+The System Manager (SM) is a low-level system function which runs on a System
+Control Processor (SCP) to support isolation and management of power domains,
+clocks, resets, sensors, pins, etc. on complex application processors. It often
+runs on a Cortex-M processor and provides an abstraction to many of the
+underlying features of the hardware. The primary purpose of the SM is to allow
+isolation between software running on different cores in the SoC. It does this
+by having exclusive access to critical resources such as those controlling
+power, clocks, reset, PMIC, etc. and then providing an RPC interface to those
+clients. This allows the SM to provide access control, arbitration, and
+aggregation policies for those shared critical resources.
+
+SM introduces a concept Logic Machine(LM) which is analogous to VM and each has
+its own instance of SCMI. All normal SCMI calls only apply to that LM. That
+includes boot, shutdown, reset, suspend, wake, etc. Each LM (e.g. A55 and M7)
+are completely isolated from the others and each LM has its own communication
+channels talking to the same SCMI server.
+
+This document covers all the information necessary to understand, maintain,
+port, and deploy the SM on supported processors.
+
+The SM implements an interface compliant with the Arm SCMI Specification
+with additional vendor specific extensions.
+
+SCMI_BBM: System Control and Management BBM Vendor Protocol
+==============================================================
+
+This protocol is intended provide access to the battery-backed module. This
+contains persistent storage (GPR), an RTC, and the ON/OFF button. The protocol
+can also provide access to similar functions implemented via external board
+components. The BBM protocol provides functions to:
+
+- Describe the protocol version.
+- Discover implementation attributes.
+- Read/write GPR
+- Discover the RTCs available in the system.
+- Read/write the RTC time in seconds and ticks
+- Set an alarm (per LM) in seconds
+- Get notifications on RTC update, alarm, or rollover.
+- Get notification on ON/OFF button activity.
+
+For most SoC, there is one on-chip RTC (e.g. in BBNSM) and this is RTC ID 0.
+Board code can add additional GPR and RTC.
+
+GPR are not aggregated. The RTC time is also not aggregated. Setting these
+sets for all so normally exclusive access would be granted to one agent for
+each. However, RTC alarms are maintained for each LM and the hardware is
+programmed with the next nearest alarm time. So only one agent in an LM should
+be given access rights to set an RTC alarm.
+
+Commands:
+_________
+
+PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x81
+
++---------------+--------------------------------------------------------------+
+|Return values |
++---------------+--------------------------------------------------------------+
+|Name |Description |
++---------------+--------------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++---------------+--------------------------------------------------------------+
+|uint32 version | For this revision of the specification, this value must be |
+| | 0x10000. |
++---------------+--------------------------------------------------------------+
+
+PROTOCOL_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x1
+protocol_id: 0x81
+
++---------------+--------------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes | Bits[31:8] Number of RTCs. |
+| | Bits[15:0] Number of persistent storage (GPR) words. |
++------------------+-----------------------------------------------------------+
+
+PROTOCOL_MESSAGE_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x2
+protocol_id: 0x81
+
++---------------+--------------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: in case the message is implemented and available |
+| |to use. |
+| |NOT_FOUND: if the message identified by message_id is |
+| |invalid or not implemented |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Flags that are associated with a specific function in the |
+| |protocol. For all functions in this protocol, this |
+| |parameter has a value of 0 |
++------------------+-----------------------------------------------------------+
+
+BBM_GPR_SET
+~~~~~~~~~~~
+
+message_id: 0x3
+protocol_id: 0x81
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 index |Index of GPR to write |
++------------------+-----------------------------------------------------------+
+|uint32 value |32-bit value to write to the GPR |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the GPR was successfully written. |
+| |NOT_FOUND: if the index is not valid. |
+| |DENIED: if the agent does not have permission to write |
+| |the specified GPR |
++------------------+-----------------------------------------------------------+
+
+BBM_GPR_GET
+~~~~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x81
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 index |Index of GPR to read |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the GPR was successfully read. |
+| |NOT_FOUND: if the index is not valid. |
+| |DENIED: if the agent does not have permission to read |
+| |the specified GPR. |
++------------------+-----------------------------------------------------------+
+|uint32 value |32-bit value read from the GPR |
++------------------+-----------------------------------------------------------+
+
+BBM_RTC_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~
+
+message_id: 0x5
+protocol_id: 0x81
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 index |Index of RTC |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: returned the attributes. |
+| |NOT_FOUND: Index is invalid. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Bit[31:24] Bit width of RTC seconds. |
+| |Bit[23:16] Bit width of RTC ticks. |
+| |Bits[15:0] RTC ticks per second |
++------------------+-----------------------------------------------------------+
+|uint8 name[16] |Null-terminated ASCII string of up to 16 bytes in length |
+| |describing the RTC name |
++------------------+-----------------------------------------------------------+
+
+BBM_RTC_TIME_SET
+~~~~~~~~~~~~~~~~
+
+message_id: 0x6
+protocol_id: 0x81
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 index |Index of RTC |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] RTC time format: |
+| |Set to 1 if the time is in ticks. |
+| |Set to 0 if the time is in seconds |
++------------------+-----------------------------------------------------------+
+|uint32 time[2] |Lower word: Lower 32 bits of the time in seconds/ticks. |
+| |Upper word: Upper 32 bits of the time in seconds/ticks. |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: RTC time was successfully set. |
+| |NOT_FOUND: rtcId pertains to a non-existent RTC. |
+| |INVALID_PARAMETERS: time is not valid |
+| |(beyond the range of the RTC). |
+| |DENIED: the agent does not have permission to set the RTC. |
++------------------+-----------------------------------------------------------+
+
+BBM_RTC_TIME_GET
+~~~~~~~~~~~~~~~~
+
+message_id: 0x7
+protocol_id: 0x81
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 index |Index of RTC |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] RTC time format: |
+| |Set to 1 if the time is in ticks. |
+| |Set to 0 if the time is in seconds |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: RTC time was successfully get. |
+| |NOT_FOUND: rtcId pertains to a non-existent RTC. |
++------------------+-----------------------------------------------------------+
+|uint32 time[2] |Lower word: Lower 32 bits of the time in seconds/ticks. |
+| |Upper word: Upper 32 bits of the time in seconds/ticks. |
++------------------+-----------------------------------------------------------+
+
+BBM_RTC_ALARM_SET
+~~~~~~~~~~~~~~~~~
+
+message_id: 0x8
+protocol_id: 0x81
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 index |Index of RTC |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] RTC enable flag: |
+| |Set to 1 if the RTC alarm should be enabled. |
+| |Set to 0 if the RTC alarm should be disabled |
++------------------+-----------------------------------------------------------+
+|uint32 time[2] |Lower word: Lower 32 bits of the time in seconds. |
+| |Upper word: Upper 32 bits of the time in seconds. |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: RTC time was successfully set. |
+| |NOT_FOUND: rtcId pertains to a non-existent RTC. |
+| |INVALID_PARAMETERS: time is not valid |
+| |(beyond the range of the RTC). |
+| |DENIED: the agent does not have permission to set the RTC |
+| |alarm |
++------------------+-----------------------------------------------------------+
+
+BBM_BUTTON_GET
+~~~~~~~~~~~~~~
+
+message_id: 0x9
+protocol_id: 0x81
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the button status was read. |
+| |Other value: ARM SCMI Specification status code definitions|
++------------------+-----------------------------------------------------------+
+|uint32 state |State of the ON/OFF button. 1: ON, 0: OFF |
++------------------+-----------------------------------------------------------+
+
+BBM_RTC_NOTIFY
+~~~~~~~~~~~~~~
+
+message_id: 0xA
+protocol_id: 0x81
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 index |Index of RTC |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Notification flags |
+| |Bits[31:3] Reserved, must be zero. |
+| |Bit[2] Update enable: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[1] Rollover enable: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[0] Alarm enable: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: notification configuration was successfully |
+| |updated. |
+| |NOT_FOUND: rtcId pertains to a non-existent RTC. |
+| |DENIED: the agent does not have permission to request RTC |
+| |notifications. |
++------------------+-----------------------------------------------------------+
+
+BBM_BUTTON_NOTIFY
+~~~~~~~~~~~~~~~~~
+
+message_id: 0xB
+protocol_id: 0x81
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Notification flags |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] Enable button: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: notification configuration was successfully |
+| |updated. |
+| |DENIED: the agent does not have permission to request |
+| |button notifications. |
++------------------+-----------------------------------------------------------+
+
+NEGOTIATE_PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x10
+protocol_id: 0x81
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 version |The negotiated protocol version the agent intends to use |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: if the negotiated protocol version is supported |
+| |by the platform. All commands, responses, and |
+| |notifications post successful return of this command must|
+| |comply with the negotiated version. |
+| |NOT_SUPPORTED: if the protocol version is not supported. |
++--------------------+---------------------------------------------------------+
+
+Notifications
+_____________
+
+BBM_RTC_EVENT
+~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x81
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 flags |RTC events: |
+| |Bits[31:2] Reserved, must be zero. |
+| |Bit[1] RTC rollover notification: |
+| |1 RTC rollover detected. |
+| |0 no RTC rollover detected. |
+| |Bit[0] RTC alarm notification: |
+| |1 RTC alarm generated. |
+| |0 no RTC alarm generated. |
++------------------+-----------------------------------------------------------+
+
+BBM_BUTTON_EVENT
+~~~~~~~~~~~~~~~~
+
+message_id: 0x1
+protocol_id: 0x81
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 flags |RTC events: |
++------------------+-----------------------------------------------------------+
+| |Button events: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] Button notification: |
+| |1 button change detected. |
+| |0 no button change detected. |
++------------------+-----------------------------------------------------------+
+
+SCMI_MISC: System Control and Management MISC Vendor Protocol
+================================================================
+
+Provides miscellaneous functions. This includes controls that are miscellaneous
+settings/actions that must be exposed from the SM to agents. They are device
+specific and are usually define to access bit fields in various mix block
+control modules, IOMUX_GPR, and other GPR/CSR owned by the SM. This protocol
+supports the following functions:
+
+- Describe the protocol version.
+- Discover implementation attributes.
+- Set/Get a control.
+- Initiate an action on a control.
+- Obtain platform (i.e. SM) build information.
+- Obtain ROM passover data.
+- Read boot/shutdown/reset information for the LM or the system.
+
+Commands:
+_________
+
+PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x84
+
++---------------+--------------------------------------------------------------+
+|Return values |
++---------------+--------------------------------------------------------------+
+|Name |Description |
++---------------+--------------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++---------------+--------------------------------------------------------------+
+|uint32 version | For this revision of the specification, this value must be |
+| | 0x10000. |
++---------------+--------------------------------------------------------------+
+
+PROTOCOL_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x1
+protocol_id: 0x84
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Protocol attributes: |
+| |Bits[31:24] Reserved, must be zero. |
+| |Bits[23:16] Number of reset reasons. |
+| |Bits[15:0] Number of controls |
++------------------+-----------------------------------------------------------+
+
+PROTOCOL_MESSAGE_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x2
+protocol_id: 0x84
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: in case the message is implemented and available |
+| |to use. |
+| |NOT_FOUND: if the message identified by message_id is |
+| |invalid or not implemented |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Flags that are associated with a specific function in the |
+| |protocol. For all functions in this protocol, this |
+| |parameter has a value of 0 |
++------------------+-----------------------------------------------------------+
+
+MISC_CONTROL_SET
+~~~~~~~~~~~~~~~~
+
+message_id: 0x3
+protocol_id: 0x84
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 index |Index of the control |
++------------------+-----------------------------------------------------------+
+|uint32 num |Size of the value data in words |
++------------------+-----------------------------------------------------------+
+|uint32 val[8] |value data array |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the control was set successfully. |
+| |NOT_FOUND: if the index is not valid. |
+| |DENIED: if the agent does not have permission to set the |
+| |control |
++------------------+-----------------------------------------------------------+
+
+MISC_CONTROL_GET
+~~~~~~~~~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x84
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 index |Index of the control |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the control was get successfully. |
+| |NOT_FOUND: if the index is not valid. |
+| |DENIED: if the agent does not have permission to get the |
+| |control |
++------------------+-----------------------------------------------------------+
+|uint32 num |Size of the return data in words, max 8 |
++------------------+-----------------------------------------------------------+
+|uint32 | |
+|val[0, num - 1] |value data array |
++------------------+-----------------------------------------------------------+
+
+MISC_CONTROL_ACTION
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x5
+protocol_id: 0x84
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 index |Index of the control |
++------------------+-----------------------------------------------------------+
+|uint32 action |Action for the control |
++------------------+-----------------------------------------------------------+
+|uint32 numarg |Size of the argument data, max 8 |
++------------------+-----------------------------------------------------------+
+|uint32 | |
+|arg[0, numarg -1] |Argument data array |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the action was set successfully. |
+| |NOT_FOUND: if the index is not valid. |
+| |DENIED: if the agent does not have permission to get the |
+| |control |
++------------------+-----------------------------------------------------------+
+|uint32 num |Size of the return data in words, max 8 |
++------------------+-----------------------------------------------------------+
+|uint32 | |
+|val[0, num - 1] |value data array |
++------------------+-----------------------------------------------------------+
+
+MISC_DISCOVER_BUILD_INFO
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+This function is used to obtain the build commit, data, time, number.
+
+message_id: 0x6
+protocol_id: 0x84
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the build info was got successfully. |
+| |NOT_SUPPORTED: if the data is not available. |
++------------------+-----------------------------------------------------------+
+|uint32 buildnum |Build number |
++------------------+-----------------------------------------------------------+
+|uint32 buildcommit|Most significant 32 bits of the git commit hash |
++------------------+-----------------------------------------------------------+
+|uint8 date[16] |Date of build. Null terminated ASCII string of up to 16 |
+| |bytes in length |
++------------------+-----------------------------------------------------------+
+|uint8 time[16] |Time of build. Null terminated ASCII string of up to 16 |
+| |bytes in length |
++------------------+-----------------------------------------------------------+
+
+MISC_ROM_PASSOVER_GET
+~~~~~~~~~~~~~~~~~~~~~
+
+ROM passover data is information exported by ROM and could be used by others.
+It includes boot device, instance, type, mode and etc. This function is used
+to obtain the ROM passover data. The returned block of words is structured as
+defined in the ROM passover section in the SoC RM.
+
+message_id: 0x7
+protocol_id: 0x84
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the data was got successfully. |
+| |NOT_SUPPORTED: if the data is not available. |
++------------------+-----------------------------------------------------------+
+|uint32 num |Size of the passover data in words, max 13 |
++------------------+-----------------------------------------------------------+
+|uint32 | |
+|data[0, num - 1] |Passover data array |
++------------------+-----------------------------------------------------------+
+
+MISC_CONTROL_NOTIFY
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x8
+protocol_id: 0x84
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 index |Index of control |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Notification flags, varies by control |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: notification configuration was successfully |
+| |updated. |
+| |NOT_FOUND: control id not exists. |
+| |INVALID_PARAMETERS: if the input attributes flag specifies |
+| |unsupported or invalid configurations.. |
+| |DENIED: if the calling agent is not permitted to request |
+| |the notification. |
++------------------+-----------------------------------------------------------+
+
+MISC_RESET_REASON_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x9
+protocol_id: 0x84
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 reasonid |Identifier for the reason |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if valid reason attributes are returned |
+| |NOT_FOUND: if reasonId pertains to a non-existent reason. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Reason attributes. This parameter has the following |
+| |format: Bits[31:0] Reserved, must be zero |
+| |Bits[15:0] Number of persistent storage (GPR) words. |
++------------------+-----------------------------------------------------------+
+|uint8 name[16] |Null-terminated ASCII string of up to 16 bytes in length |
+| |describing the reason |
++------------------+-----------------------------------------------------------+
+
+MISC_RESET_REASON_GET
+~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0xA
+protocol_id: 0x84
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 flags |Reason flags. This parameter has the following format: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] System: |
+| |Set to 1 to return the system reason. |
+| |Set to 0 to return the LM reason |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: reset reason return |
++--------------------+---------------------------------------------------------+
+|uint32 bootflags |Boot reason flags. This parameter has the format: |
+| |Bits[31] Valid. |
+| |Set to 1 if the entire reason is valid. |
+| |Set to 0 if the entire reason is not valid. |
+| |Bits[30:29] Reserved, must be zero. |
+| |Bit[28] Valid origin: |
+| |Set to 1 if the origin field is valid. |
+| |Set to 0 if the origin field is not valid. |
+| |Bits[27:24] Origin. |
+| |Bit[23] Valid err ID: |
+| |Set to 1 if the error ID field is valid. |
+| |Set to 0 if the error ID field is not valid. |
+| |Bits[22:8] Error ID. |
+| |Bit[7:0] Reason |
++--------------------+---------------------------------------------------------+
+|uint32 shutdownflags|Shutdown reason flags. This parameter has the format: |
+| |Bits[31] Valid. |
+| |Set to 1 if the entire reason is valid. |
+| |Set to 0 if the entire reason is not valid. |
+| |Bits[30:29] Number of valid extended info words. |
+| |Bit[28] Valid origin: |
+| |Set to 1 if the origin field is valid. |
+| |Set to 0 if the origin field is not valid. |
+| |Bits[27:24] Origin. |
+| |Bit[23] Valid err ID: |
+| |Set to 1 if the error ID field is valid. |
+| |Set to 0 if the error ID field is not valid. |
+| |Bits[22:8] Error ID. |
+| |Bit[7:0] Reason |
++--------------------+---------------------------------------------------------+
+|uint32 extinfo[8] |Array of extended info words |
++--------------------+---------------------------------------------------------+
+
+MISC_SI_INFO_GET
+~~~~~~~~~~~~~~~~
+
+message_id: 0xB
+protocol_id: 0x84
+
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: silicon info return |
++--------------------+---------------------------------------------------------+
+|uint32 deviceid |Silicon specific device ID |
++--------------------+---------------------------------------------------------+
+|uint32 sirev |Silicon specific revision |
++--------------------+---------------------------------------------------------+
+|uint32 partnum |Silicon specific part number |
++--------------------+---------------------------------------------------------+
+|uint8 siname[16] |Silicon name/revision. Null terminated ASCII string of up|
+| |to 16 bytes in length |
++--------------------+---------------------------------------------------------+
+
+MISC_CFG_INFO_GET
+~~~~~~~~~~~~~~~~~
+
+message_id: 0xC
+protocol_id: 0x84
+
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: config name return |
+| |NOT_SUPPORTED: name not available |
++--------------------+---------------------------------------------------------+
+|uint32 msel |Mode selector value |
++--------------------+---------------------------------------------------------+
+|uint8 cfgname[16] |config file basename. Null terminated ASCII string of up |
+| |to 16 bytes in length |
++--------------------+---------------------------------------------------------+
+
+MISC_SYSLOG_GET
+~~~~~~~~~~~~~~~
+
+message_id: 0xD
+protocol_id: 0x84
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 flags |Device specific flags that might impact the data returned|
+| |or clearing of the data |
++--------------------+---------------------------------------------------------+
+|uint32 logindex |Index to the first log word. Will be the first element in|
+| |the return array |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: system log return |
++--------------------+---------------------------------------------------------+
+|uint32 numLogflags |Descriptor for the log data returned by this call. |
+| |Bits[31:20] Number of remaining log words. |
+| |Bits[15:12] Reserved, must be zero. |
+| |Bits[11:0] Number of log words that are returned by this |
+| |call |
++--------------------+---------------------------------------------------------+
+|uint32 syslog[N] |Log data array, N is defined in bits[11:0] of numLogflags|
++--------------------+---------------------------------------------------------+
+
+NEGOTIATE_PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x10
+protocol_id: 0x84
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 version |The negotiated protocol version the agent intends to use |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: if the negotiated protocol version is supported |
+| |by the platform. All commands, responses, and |
+| |notifications post successful return of this command must|
+| |comply with the negotiated version. |
+| |NOT_SUPPORTED: if the protocol version is not supported. |
++--------------------+---------------------------------------------------------+
+
+Notifications
+_____________
+
+MISC_CONTROL_EVENT
+~~~~~~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x81
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 ctrlid |Identifier for the control that caused the event. |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Event flags, varies by control. |
++------------------+-----------------------------------------------------------+
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
index 2175ffd6cef5..fda6a1573609 100644
--- a/drivers/firmware/arm_scmi/voltage.c
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -11,7 +11,7 @@
#include "protocols.h"
/* Updated only after ALL the mandatory features for that version are merged */
-#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20001
#define VOLTAGE_DOMS_NUM_MASK GENMASK(15, 0)
#define REMAINING_LEVELS_MASK GENMASK(31, 16)
@@ -229,8 +229,10 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
/* Retrieve domain attributes at first ... */
put_unaligned_le32(dom, td->tx.buf);
/* Skip domain on comms error */
- if (ph->xops->do_xfer(ph, td))
+ if (ph->xops->do_xfer(ph, td)) {
+ ph->xops->reset_rx_to_maxsz(ph, td);
continue;
+ }
v = vinfo->domains + dom;
v->id = dom;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index d33ccbc4a2c6..685098f9626f 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -1229,7 +1229,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab);
efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc,
struct efi_boot_memmap *map);
void process_unaccepted_memory(u64 start, u64 end);
-void accept_memory(phys_addr_t start, phys_addr_t end);
+void accept_memory(phys_addr_t start, unsigned long size);
void arch_accept_memory(phys_addr_t start, phys_addr_t end);
#endif
diff --git a/drivers/firmware/efi/libstub/unaccepted_memory.c b/drivers/firmware/efi/libstub/unaccepted_memory.c
index c295ea3a6efc..757dbe734a47 100644
--- a/drivers/firmware/efi/libstub/unaccepted_memory.c
+++ b/drivers/firmware/efi/libstub/unaccepted_memory.c
@@ -177,9 +177,10 @@ void process_unaccepted_memory(u64 start, u64 end)
start / unit_size, (end - start) / unit_size);
}
-void accept_memory(phys_addr_t start, phys_addr_t end)
+void accept_memory(phys_addr_t start, unsigned long size)
{
unsigned long range_start, range_end;
+ phys_addr_t end = start + size;
unsigned long bitmap_size;
u64 unit_size;
diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
index 50f6503fe49f..c2c067eff634 100644
--- a/drivers/firmware/efi/unaccepted_memory.c
+++ b/drivers/firmware/efi/unaccepted_memory.c
@@ -30,11 +30,12 @@ static LIST_HEAD(accepting_list);
* - memory that is below phys_base;
* - memory that is above the memory that addressable by the bitmap;
*/
-void accept_memory(phys_addr_t start, phys_addr_t end)
+void accept_memory(phys_addr_t start, unsigned long size)
{
struct efi_unaccepted_memory *unaccepted;
unsigned long range_start, range_end;
struct accept_range range, *entry;
+ phys_addr_t end = start + size;
unsigned long flags;
u64 unit_size;
@@ -74,13 +75,13 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
* "guard" page is accepted in addition to the memory that needs to be
* used:
*
- * 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
- * checks up to end+unit_size if 'end' is aligned on a unit_size
- * boundary.
+ * 1. Implicitly extend the range_contains_unaccepted_memory(start, size)
+ * checks up to the next unit_size if 'start+size' is aligned on a
+ * unit_size boundary.
*
- * 2. Implicitly extend accept_memory(start, end) to end+unit_size if
- * 'end' is aligned on a unit_size boundary. (immediately following
- * this comment)
+ * 2. Implicitly extend accept_memory(start, size) to the next unit_size
+ * if 'size+end' is aligned on a unit_size boundary. (immediately
+ * following this comment)
*/
if (!(end % unit_size))
end += unit_size;
@@ -156,9 +157,10 @@ retry:
spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
}
-bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
+bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size)
{
struct efi_unaccepted_memory *unaccepted;
+ phys_addr_t end = start + size;
unsigned long flags;
bool ret = false;
u64 unit_size;
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 183613f82a11..477d3f32d99a 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -22,3 +22,14 @@ config IMX_SCU
This driver manages the IPC interface between host CPU and the
SCU firmware running on M4.
+
+config IMX_SCMI_MISC_DRV
+ tristate "IMX SCMI MISC Protocol driver"
+ depends on IMX_SCMI_MISC_EXT || COMPILE_TEST
+ default y if ARCH_MXC
+ help
+ The System Controller Management Interface firmware (SCMI FW) is
+ a low-level system function which runs on a dedicated Cortex-M
+ core that could provide misc functions such as board control.
+
+ This driver can also be built as a module.
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
index 8f9f04a513a8..8d046c341be8 100644
--- a/drivers/firmware/imx/Makefile
+++ b/drivers/firmware/imx/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IMX_DSP) += imx-dsp.o
obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o
+obj-${CONFIG_IMX_SCMI_MISC_DRV} += sm-misc.o
diff --git a/drivers/firmware/imx/sm-misc.c b/drivers/firmware/imx/sm-misc.c
new file mode 100644
index 000000000000..fc3ee12c2be8
--- /dev/null
+++ b/drivers/firmware/imx/sm-misc.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/firmware/imx/sm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+static const struct scmi_imx_misc_proto_ops *imx_misc_ctrl_ops;
+static struct scmi_protocol_handle *ph;
+struct notifier_block scmi_imx_misc_ctrl_nb;
+
+int scmi_imx_misc_ctrl_set(u32 id, u32 val)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ return imx_misc_ctrl_ops->misc_ctrl_set(ph, id, 1, &val);
+};
+EXPORT_SYMBOL(scmi_imx_misc_ctrl_set);
+
+int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ return imx_misc_ctrl_ops->misc_ctrl_get(ph, id, num, val);
+}
+EXPORT_SYMBOL(scmi_imx_misc_ctrl_get);
+
+static int scmi_imx_misc_ctrl_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ /*
+ * notifier_chain_register requires a valid notifier_block and
+ * valid notifier_call. SCMI_EVENT_IMX_MISC_CONTROL is needed
+ * to let SCMI firmware enable control events, but the hook here
+ * is just a dummy function to avoid kernel panic as of now.
+ */
+ return 0;
+}
+
+static int scmi_imx_misc_ctrl_probe(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+ struct device_node *np = sdev->dev.of_node;
+ u32 src_id, flags;
+ int ret, i, num;
+
+ if (!handle)
+ return -ENODEV;
+
+ if (imx_misc_ctrl_ops) {
+ dev_err(&sdev->dev, "misc ctrl already initialized\n");
+ return -EEXIST;
+ }
+
+ imx_misc_ctrl_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_MISC, &ph);
+ if (IS_ERR(imx_misc_ctrl_ops))
+ return PTR_ERR(imx_misc_ctrl_ops);
+
+ num = of_property_count_u32_elems(np, "nxp,ctrl-ids");
+ if (num % 2) {
+ dev_err(&sdev->dev, "Invalid wakeup-sources\n");
+ return -EINVAL;
+ }
+
+ scmi_imx_misc_ctrl_nb.notifier_call = &scmi_imx_misc_ctrl_notifier;
+ for (i = 0; i < num; i += 2) {
+ ret = of_property_read_u32_index(np, "nxp,ctrl-ids", i, &src_id);
+ if (ret) {
+ dev_err(&sdev->dev, "Failed to read ctrl-id: %i\n", i);
+ continue;
+ }
+
+ ret = of_property_read_u32_index(np, "nxp,ctrl-ids", i + 1, &flags);
+ if (ret) {
+ dev_err(&sdev->dev, "Failed to read ctrl-id value: %d\n", i + 1);
+ continue;
+ }
+
+ ret = handle->notify_ops->devm_event_notifier_register(sdev, SCMI_PROTOCOL_IMX_MISC,
+ SCMI_EVENT_IMX_MISC_CONTROL,
+ &src_id,
+ &scmi_imx_misc_ctrl_nb);
+ if (ret) {
+ dev_err(&sdev->dev, "Failed to register scmi misc event: %d\n", src_id);
+ } else {
+ ret = imx_misc_ctrl_ops->misc_ctrl_req_notify(ph, src_id,
+ SCMI_EVENT_IMX_MISC_CONTROL,
+ flags);
+ if (ret)
+ dev_err(&sdev->dev, "Failed to req notify: %d\n", src_id);
+ }
+ }
+
+ return 0;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_IMX_MISC, "imx-misc-ctrl" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_imx_misc_ctrl_driver = {
+ .name = "scmi-imx-misc-ctrl",
+ .probe = scmi_imx_misc_ctrl_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_imx_misc_ctrl_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("IMX SM MISC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig
index 73a1a41bf92d..b477d54b495a 100644
--- a/drivers/firmware/qcom/Kconfig
+++ b/drivers/firmware/qcom/Kconfig
@@ -41,17 +41,6 @@ config QCOM_TZMEM_MODE_SHMBRIDGE
endchoice
-config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
- bool "Qualcomm download mode enabled by default"
- depends on QCOM_SCM
- help
- A device with "download mode" enabled will upon an unexpected
- warm-restart enter a special debug mode that allows the user to
- "download" memory content over USB for offline postmortem analysis.
- The feature can be enabled/disabled on the kernel command line.
-
- Say Y here to enable "download mode" by default.
-
config QCOM_QSEECOM
bool "Qualcomm QSEECOM interface driver"
depends on QCOM_SCM=y
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 6fefa4fe80e8..447246bd04be 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -715,6 +715,10 @@ static int qcuefi_set_reference(struct qcuefi_client *qcuefi)
static struct qcuefi_client *qcuefi_acquire(void)
{
mutex_lock(&__qcuefi_lock);
+ if (!__qcuefi) {
+ mutex_unlock(&__qcuefi_lock);
+ return NULL;
+ }
return __qcuefi;
}
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 00c379a3cceb..10986cb11ec0 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
+#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -32,8 +33,7 @@
#include "qcom_scm.h"
#include "qcom_tzmem.h"
-static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
-module_param(download_mode, bool, 0);
+static u32 download_mode;
struct qcom_scm {
struct device *dev;
@@ -126,6 +126,8 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
#define QCOM_DLOAD_MASK GENMASK(5, 4)
#define QCOM_DLOAD_NODUMP 0
#define QCOM_DLOAD_FULLDUMP 1
+#define QCOM_DLOAD_MINIDUMP 2
+#define QCOM_DLOAD_BOTHDUMP 3
static const char * const qcom_scm_convention_names[] = {
[SMC_CONVENTION_UNKNOWN] = "unknown",
@@ -134,6 +136,13 @@ static const char * const qcom_scm_convention_names[] = {
[SMC_CONVENTION_LEGACY] = "smc legacy",
};
+static const char * const download_mode_name[] = {
+ [QCOM_DLOAD_NODUMP] = "off",
+ [QCOM_DLOAD_FULLDUMP] = "full",
+ [QCOM_DLOAD_MINIDUMP] = "mini",
+ [QCOM_DLOAD_BOTHDUMP] = "full,mini",
+};
+
static struct qcom_scm *__scm;
static int qcom_scm_clk_enable(void)
@@ -526,17 +535,16 @@ static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val
return qcom_scm_io_writel(addr, new);
}
-static void qcom_scm_set_download_mode(bool enable)
+static void qcom_scm_set_download_mode(u32 dload_mode)
{
- u32 val = enable ? QCOM_DLOAD_FULLDUMP : QCOM_DLOAD_NODUMP;
int ret = 0;
if (__scm->dload_mode_addr) {
ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
- FIELD_PREP(QCOM_DLOAD_MASK, val));
+ FIELD_PREP(QCOM_DLOAD_MASK, dload_mode));
} else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
- ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
+ ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
} else {
dev_err(__scm->dev,
"No available mechanism for setting download mode\n");
@@ -1724,7 +1732,10 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
*/
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "lenovo,flex-5g" },
+ { .compatible = "lenovo,thinkpad-t14s" },
{ .compatible = "lenovo,thinkpad-x13s", },
+ { .compatible = "microsoft,romulus13", },
+ { .compatible = "microsoft,romulus15", },
{ .compatible = "qcom,sc8180x-primus" },
{ .compatible = "qcom,x1e80100-crd" },
{ .compatible = "qcom,x1e80100-qcp" },
@@ -1886,6 +1897,45 @@ out:
return IRQ_HANDLED;
}
+static int get_download_mode(char *buffer, const struct kernel_param *kp)
+{
+ if (download_mode >= ARRAY_SIZE(download_mode_name))
+ return sysfs_emit(buffer, "unknown mode\n");
+
+ return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]);
+}
+
+static int set_download_mode(const char *val, const struct kernel_param *kp)
+{
+ bool tmp;
+ int ret;
+
+ ret = sysfs_match_string(download_mode_name, val);
+ if (ret < 0) {
+ ret = kstrtobool(val, &tmp);
+ if (ret < 0) {
+ pr_err("qcom_scm: err: %d\n", ret);
+ return ret;
+ }
+
+ ret = tmp ? 1 : 0;
+ }
+
+ download_mode = ret;
+ if (__scm)
+ qcom_scm_set_download_mode(download_mode);
+
+ return 0;
+}
+
+static const struct kernel_param_ops download_mode_param_ops = {
+ .get = get_download_mode,
+ .set = set_download_mode,
+};
+
+module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644);
+MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values");
+
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_tzmem_pool_config pool_config;
@@ -1950,18 +2000,16 @@ static int qcom_scm_probe(struct platform_device *pdev)
__get_convention();
/*
- * If requested enable "download mode", from this point on warmboot
+ * If "download mode" is requested, from this point on warmboot
* will cause the boot stages to enter download mode, unless
* disabled below by a clean shutdown/reboot.
*/
- if (download_mode)
- qcom_scm_set_download_mode(true);
-
+ qcom_scm_set_download_mode(download_mode);
/*
* Disable SDI if indicated by DT that it is enabled by default.
*/
- if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled"))
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode)
qcom_scm_disable_sdi();
ret = of_reserved_mem_device_init(__scm->dev);
@@ -2003,7 +2051,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
static void qcom_scm_shutdown(struct platform_device *pdev)
{
/* Clean shutdown, disable download mode to allow normal restart */
- qcom_scm_set_download_mode(false);
+ qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP);
}
static const struct of_device_id qcom_scm_dt_match[] = {
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 5f43dfa22f79..85c525745b31 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -452,7 +452,7 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
}
/* kobj_type: ties together all properties required to register an entry */
-static struct kobj_type fw_cfg_sysfs_entry_ktype = {
+static const struct kobj_type fw_cfg_sysfs_entry_ktype = {
.default_groups = fw_cfg_sysfs_entry_groups,
.sysfs_ops = &fw_cfg_sysfs_attr_ops,
.release = fw_cfg_sysfs_release_entry,
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index ac34876a97f8..18cc34987108 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -62,7 +62,6 @@ rpi_firmware_transaction(struct rpi_firmware *fw, u32 chan, u32 data)
ret = 0;
} else {
ret = -ETIMEDOUT;
- WARN_ONCE(1, "Firmware transaction timeout");
}
} else {
dev_err(fw->cl.dev, "mbox_send_message returned %d\n", ret);
@@ -125,6 +124,8 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
dev_err(fw->cl.dev, "Request 0x%08x returned status 0x%08x\n",
buf[2], buf[1]);
ret = -EINVAL;
+ } else if (ret == -ETIMEDOUT) {
+ WARN_ONCE(1, "Firmware transaction 0x%08x timeout", buf[2]);
}
dma_free_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size), buf, bus_addr);
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
index 89a68e7eeaa6..f3319be20b36 100644
--- a/drivers/firmware/smccc/kvm_guest.c
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -39,6 +39,8 @@ void __init kvm_init_hyp_services(void)
pr_info("hypervisor services detected (0x%08lx 0x%08lx 0x%08lx 0x%08lx)\n",
res.a3, res.a2, res.a1, res.a0);
+
+ kvm_arch_init_hyp_services();
}
bool kvm_arm_hyp_service_available(u32 func_id)
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index c1590d3aa9cb..2bee6e918f81 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -3,6 +3,7 @@
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk/tegra.h>
#include <linux/genalloc.h>
#include <linux/mailbox_client.h>
@@ -24,12 +25,6 @@
#define MSG_RING BIT(1)
#define TAG_SZ 32
-static inline struct tegra_bpmp *
-mbox_client_to_bpmp(struct mbox_client *client)
-{
- return container_of(client, struct tegra_bpmp, mbox.client);
-}
-
static inline const struct tegra_bpmp_ops *
channel_to_ops(struct tegra_bpmp_channel *channel)
{
@@ -40,29 +35,24 @@ channel_to_ops(struct tegra_bpmp_channel *channel)
struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
{
+ struct device_node *np __free(device_node);
struct platform_device *pdev;
struct tegra_bpmp *bpmp;
- struct device_node *np;
np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
if (!np)
return ERR_PTR(-ENOENT);
pdev = of_find_device_by_node(np);
- if (!pdev) {
- bpmp = ERR_PTR(-ENODEV);
- goto put;
- }
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
bpmp = platform_get_drvdata(pdev);
if (!bpmp) {
- bpmp = ERR_PTR(-EPROBE_DEFER);
put_device(&pdev->dev);
- goto put;
+ return ERR_PTR(-EPROBE_DEFER);
}
-put:
- of_node_put(np);
return bpmp;
}
EXPORT_SYMBOL_GPL(tegra_bpmp_get);
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 3e7f186d239a..525ebdc7ded5 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -5,26 +5,43 @@
* Copyright (C) 2019, 2024 Marek Behún <kabel@kernel.org>
*/
+#include <crypto/sha2.h>
+#include <linux/align.h>
#include <linux/armada-37xx-rwtm-mailbox.h>
#include <linux/completion.h>
+#include <linux/container_of.h>
#include <linux/debugfs.h>
+#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
#include <linux/hw_random.h>
+#include <linux/if_ether.h>
+#include <linux/kobject.h>
#include <linux/mailbox_client.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
#define DRIVER_NAME "turris-mox-rwtm"
+#define RWTM_DMA_BUFFER_SIZE SZ_4K
+
/*
* The macros and constants below come from Turris Mox's rWTM firmware code.
* This firmware is open source and it's sources can be found at
* https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi.
*/
+#define MOX_ECC_NUMBER_WORDS 17
+#define MOX_ECC_NUMBER_LEN (MOX_ECC_NUMBER_WORDS * sizeof(u32))
+
+#define MOX_ECC_SIGNATURE_WORDS (2 * MOX_ECC_NUMBER_WORDS)
+
#define MBOX_STS_SUCCESS (0 << 30)
#define MBOX_STS_FAIL (1 << 30)
#define MBOX_STS_BADCMD (2 << 30)
@@ -44,13 +61,9 @@ enum mbox_cmd {
MBOX_CMD_OTP_WRITE = 8,
};
-struct mox_kobject;
-
struct mox_rwtm {
- struct device *dev;
struct mbox_client mbox_client;
struct mbox_chan *mbox;
- struct mox_kobject *kobj;
struct hwrng hwrng;
struct armada_37xx_rwtm_rx_msg reply;
@@ -62,13 +75,13 @@ struct mox_rwtm {
struct completion cmd_done;
/* board information */
- int has_board_info;
+ bool has_board_info;
u64 serial_number;
int board_version, ram_size;
- u8 mac_address1[6], mac_address2[6];
+ u8 mac_address1[ETH_ALEN], mac_address2[ETH_ALEN];
/* public key burned in eFuse */
- int has_pubkey;
+ bool has_pubkey;
u8 pubkey[135];
#ifdef CONFIG_DEBUG_FS
@@ -78,65 +91,27 @@ struct mox_rwtm {
* It should be rewritten via crypto API once akcipher API is available
* from userspace.
*/
- struct dentry *debugfs_root;
- u32 last_sig[34];
- int last_sig_done;
+ u32 last_sig[MOX_ECC_SIGNATURE_WORDS];
+ bool last_sig_done;
#endif
};
-struct mox_kobject {
- struct kobject kobj;
- struct mox_rwtm *rwtm;
-};
-
-static inline struct kobject *rwtm_to_kobj(struct mox_rwtm *rwtm)
-{
- return &rwtm->kobj->kobj;
-}
-
-static inline struct mox_rwtm *to_rwtm(struct kobject *kobj)
-{
- return container_of(kobj, struct mox_kobject, kobj)->rwtm;
-}
-
-static void mox_kobj_release(struct kobject *kobj)
-{
- kfree(to_rwtm(kobj)->kobj);
-}
-
-static const struct kobj_type mox_kobj_ktype = {
- .release = mox_kobj_release,
- .sysfs_ops = &kobj_sysfs_ops,
-};
-
-static int mox_kobj_create(struct mox_rwtm *rwtm)
+static inline struct device *rwtm_dev(struct mox_rwtm *rwtm)
{
- rwtm->kobj = kzalloc(sizeof(*rwtm->kobj), GFP_KERNEL);
- if (!rwtm->kobj)
- return -ENOMEM;
-
- kobject_init(rwtm_to_kobj(rwtm), &mox_kobj_ktype);
- if (kobject_add(rwtm_to_kobj(rwtm), firmware_kobj, "turris-mox-rwtm")) {
- kobject_put(rwtm_to_kobj(rwtm));
- return -ENXIO;
- }
-
- rwtm->kobj->rwtm = rwtm;
-
- return 0;
+ return rwtm->mbox_client.dev;
}
#define MOX_ATTR_RO(name, format, cat) \
static ssize_t \
-name##_show(struct kobject *kobj, struct kobj_attribute *a, \
+name##_show(struct device *dev, struct device_attribute *a, \
char *buf) \
{ \
- struct mox_rwtm *rwtm = to_rwtm(kobj); \
+ struct mox_rwtm *rwtm = dev_get_drvdata(dev); \
if (!rwtm->has_##cat) \
return -ENODATA; \
- return sprintf(buf, format, rwtm->name); \
+ return sysfs_emit(buf, format, rwtm->name); \
} \
-static struct kobj_attribute mox_attr_##name = __ATTR_RO(name)
+static DEVICE_ATTR_RO(name)
MOX_ATTR_RO(serial_number, "%016llX\n", board_info);
MOX_ATTR_RO(board_version, "%i\n", board_info);
@@ -145,6 +120,17 @@ MOX_ATTR_RO(mac_address1, "%pM\n", board_info);
MOX_ATTR_RO(mac_address2, "%pM\n", board_info);
MOX_ATTR_RO(pubkey, "%s\n", pubkey);
+static struct attribute *turris_mox_rwtm_attrs[] = {
+ &dev_attr_serial_number.attr,
+ &dev_attr_board_version.attr,
+ &dev_attr_ram_size.attr,
+ &dev_attr_mac_address1.attr,
+ &dev_attr_mac_address2.attr,
+ &dev_attr_pubkey.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(turris_mox_rwtm);
+
static int mox_get_status(enum mbox_cmd cmd, u32 retval)
{
if (MBOX_STS_CMD(retval) != cmd)
@@ -152,23 +138,13 @@ static int mox_get_status(enum mbox_cmd cmd, u32 retval)
else if (MBOX_STS_ERROR(retval) == MBOX_STS_FAIL)
return -(int)MBOX_STS_VALUE(retval);
else if (MBOX_STS_ERROR(retval) == MBOX_STS_BADCMD)
- return -ENOSYS;
+ return -EOPNOTSUPP;
else if (MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS)
return -EIO;
else
return MBOX_STS_VALUE(retval);
}
-static const struct attribute *mox_rwtm_attrs[] = {
- &mox_attr_serial_number.attr,
- &mox_attr_board_version.attr,
- &mox_attr_ram_size.attr,
- &mox_attr_mac_address1.attr,
- &mox_attr_mac_address2.attr,
- &mox_attr_pubkey.attr,
- NULL
-};
-
static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data)
{
struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev);
@@ -181,6 +157,34 @@ static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data)
complete(&rwtm->cmd_done);
}
+static int mox_rwtm_exec(struct mox_rwtm *rwtm, enum mbox_cmd cmd,
+ struct armada_37xx_rwtm_tx_msg *msg,
+ bool interruptible)
+{
+ struct armada_37xx_rwtm_tx_msg _msg = {};
+ int ret;
+
+ if (!msg)
+ msg = &_msg;
+
+ msg->command = cmd;
+
+ ret = mbox_send_message(rwtm->mbox, msg);
+ if (ret < 0)
+ return ret;
+
+ if (interruptible) {
+ ret = wait_for_completion_interruptible(&rwtm->cmd_done);
+ if (ret < 0)
+ return ret;
+ } else {
+ if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2))
+ return -ETIMEDOUT;
+ }
+
+ return mox_get_status(cmd, rwtm->reply.retval);
+}
+
static void reply_to_mac_addr(u8 *mac, u32 t1, u32 t2)
{
mac[0] = t1 >> 8;
@@ -193,24 +197,16 @@ static void reply_to_mac_addr(u8 *mac, u32 t1, u32 t2)
static int mox_get_board_info(struct mox_rwtm *rwtm)
{
- struct armada_37xx_rwtm_tx_msg msg;
+ struct device *dev = rwtm_dev(rwtm);
struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
int ret;
- msg.command = MBOX_CMD_BOARD_INFO;
- ret = mbox_send_message(rwtm->mbox, &msg);
- if (ret < 0)
- return ret;
-
- if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2))
- return -ETIMEDOUT;
-
- ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval);
+ ret = mox_rwtm_exec(rwtm, MBOX_CMD_BOARD_INFO, NULL, false);
if (ret == -ENODATA) {
- dev_warn(rwtm->dev,
+ dev_warn(dev,
"Board does not have manufacturing information burned!\n");
- } else if (ret == -ENOSYS) {
- dev_notice(rwtm->dev,
+ } else if (ret == -EOPNOTSUPP) {
+ dev_notice(dev,
"Firmware does not support the BOARD_INFO command\n");
} else if (ret < 0) {
return ret;
@@ -224,7 +220,7 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
reply->status[5]);
reply_to_mac_addr(rwtm->mac_address2, reply->status[6],
reply->status[7]);
- rwtm->has_board_info = 1;
+ rwtm->has_board_info = true;
pr_info("Turris Mox serial number %016llX\n",
rwtm->serial_number);
@@ -232,26 +228,18 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
pr_info(" burned RAM size %i MiB\n", rwtm->ram_size);
}
- msg.command = MBOX_CMD_ECDSA_PUB_KEY;
- ret = mbox_send_message(rwtm->mbox, &msg);
- if (ret < 0)
- return ret;
-
- if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2))
- return -ETIMEDOUT;
-
- ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval);
+ ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false);
if (ret == -ENODATA) {
- dev_warn(rwtm->dev, "Board has no public key burned!\n");
- } else if (ret == -ENOSYS) {
- dev_notice(rwtm->dev,
+ dev_warn(dev, "Board has no public key burned!\n");
+ } else if (ret == -EOPNOTSUPP) {
+ dev_notice(dev,
"Firmware does not support the ECDSA_PUB_KEY command\n");
} else if (ret < 0) {
return ret;
} else {
u32 *s = reply->status;
- rwtm->has_pubkey = 1;
+ rwtm->has_pubkey = true;
sprintf(rwtm->pubkey,
"%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
@@ -263,37 +251,22 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
static int check_get_random_support(struct mox_rwtm *rwtm)
{
- struct armada_37xx_rwtm_tx_msg msg;
- int ret;
+ struct armada_37xx_rwtm_tx_msg msg = {
+ .args = { 1, rwtm->buf_phys, 4 },
+ };
- msg.command = MBOX_CMD_GET_RANDOM;
- msg.args[0] = 1;
- msg.args[1] = rwtm->buf_phys;
- msg.args[2] = 4;
-
- ret = mbox_send_message(rwtm->mbox, &msg);
- if (ret < 0)
- return ret;
-
- if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2))
- return -ETIMEDOUT;
-
- return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
+ return mox_rwtm_exec(rwtm, MBOX_CMD_GET_RANDOM, &msg, false);
}
static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
- struct mox_rwtm *rwtm = (struct mox_rwtm *) rng->priv;
- struct armada_37xx_rwtm_tx_msg msg;
+ struct mox_rwtm *rwtm = container_of(rng, struct mox_rwtm, hwrng);
+ struct armada_37xx_rwtm_tx_msg msg = {
+ .args = { 1, rwtm->buf_phys, ALIGN(max, 4) },
+ };
int ret;
- if (max > 4096)
- max = 4096;
-
- msg.command = MBOX_CMD_GET_RANDOM;
- msg.args[0] = 1;
- msg.args[1] = rwtm->buf_phys;
- msg.args[2] = (max + 3) & ~3;
+ max = min(max, RWTM_DMA_BUFFER_SIZE);
if (!wait) {
if (!mutex_trylock(&rwtm->busy))
@@ -302,15 +275,7 @@ static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
mutex_lock(&rwtm->busy);
}
- ret = mbox_send_message(rwtm->mbox, &msg);
- if (ret < 0)
- goto unlock_mutex;
-
- ret = wait_for_completion_interruptible(&rwtm->cmd_done);
- if (ret < 0)
- goto unlock_mutex;
-
- ret = mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
+ ret = mox_rwtm_exec(rwtm, MBOX_CMD_GET_RANDOM, &msg, true);
if (ret < 0)
goto unlock_mutex;
@@ -336,19 +301,19 @@ static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len,
struct mox_rwtm *rwtm = file->private_data;
ssize_t ret;
- /* only allow one read, of 136 bytes, from position 0 */
+ /* only allow one read, of whole signature, from position 0 */
if (*ppos != 0)
return 0;
- if (len < 136)
+ if (len < sizeof(rwtm->last_sig))
return -EINVAL;
if (!rwtm->last_sig_done)
return -ENODATA;
- /* 2 arrays of 17 32-bit words are 136 bytes */
- ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig, 136);
- rwtm->last_sig_done = 0;
+ ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig,
+ sizeof(rwtm->last_sig));
+ rwtm->last_sig_done = false;
return ret;
}
@@ -357,13 +322,11 @@ static ssize_t do_sign_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct mox_rwtm *rwtm = file->private_data;
- struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
struct armada_37xx_rwtm_tx_msg msg;
loff_t dummy = 0;
ssize_t ret;
- /* the input is a SHA-512 hash, so exactly 64 bytes have to be read */
- if (len != 64)
+ if (len != SHA512_DIGEST_SIZE)
return -EINVAL;
/* if last result is not zero user has not read that information yet */
@@ -384,37 +347,32 @@ static ssize_t do_sign_write(struct file *file, const char __user *buf,
* 3. Address of the buffer where ECDSA signature value S shall be
* stored by the rWTM firmware.
*/
- memset(rwtm->buf, 0, 4);
- ret = simple_write_to_buffer(rwtm->buf + 4, 64, &dummy, buf, len);
+ memset(rwtm->buf, 0, sizeof(u32));
+ ret = simple_write_to_buffer(rwtm->buf + sizeof(u32),
+ SHA512_DIGEST_SIZE, &dummy, buf, len);
if (ret < 0)
goto unlock_mutex;
- be32_to_cpu_array(rwtm->buf, rwtm->buf, 17);
+ be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUMBER_WORDS);
- msg.command = MBOX_CMD_SIGN;
msg.args[0] = 1;
msg.args[1] = rwtm->buf_phys;
- msg.args[2] = rwtm->buf_phys + 68;
- msg.args[3] = rwtm->buf_phys + 2 * 68;
- ret = mbox_send_message(rwtm->mbox, &msg);
- if (ret < 0)
- goto unlock_mutex;
+ msg.args[2] = rwtm->buf_phys + MOX_ECC_NUMBER_LEN;
+ msg.args[3] = rwtm->buf_phys + 2 * MOX_ECC_NUMBER_LEN;
- ret = wait_for_completion_interruptible(&rwtm->cmd_done);
+ ret = mox_rwtm_exec(rwtm, MBOX_CMD_SIGN, &msg, true);
if (ret < 0)
goto unlock_mutex;
- ret = MBOX_STS_VALUE(reply->retval);
- if (MBOX_STS_ERROR(reply->retval) != MBOX_STS_SUCCESS)
- goto unlock_mutex;
-
/*
* Here we read the R and S values of the ECDSA signature
* computed by the rWTM firmware and convert their words from
* LE to BE.
*/
- memcpy(rwtm->last_sig, rwtm->buf + 68, 136);
- cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig, 34);
- rwtm->last_sig_done = 1;
+ memcpy(rwtm->last_sig, rwtm->buf + MOX_ECC_NUMBER_LEN,
+ sizeof(rwtm->last_sig));
+ cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig,
+ MOX_ECC_SIGNATURE_WORDS);
+ rwtm->last_sig_done = true;
mutex_unlock(&rwtm->busy);
return len;
@@ -431,42 +389,36 @@ static const struct file_operations do_sign_fops = {
.llseek = no_llseek,
};
-static int rwtm_register_debugfs(struct mox_rwtm *rwtm)
+static void rwtm_debugfs_release(void *root)
{
- struct dentry *root, *entry;
-
- root = debugfs_create_dir("turris-mox-rwtm", NULL);
+ debugfs_remove_recursive(root);
+}
- if (IS_ERR(root))
- return PTR_ERR(root);
+static void rwtm_register_debugfs(struct mox_rwtm *rwtm)
+{
+ struct dentry *root;
- entry = debugfs_create_file_unsafe("do_sign", 0600, root, rwtm,
- &do_sign_fops);
- if (IS_ERR(entry))
- goto err_remove;
+ root = debugfs_create_dir("turris-mox-rwtm", NULL);
- rwtm->debugfs_root = root;
+ debugfs_create_file_unsafe("do_sign", 0600, root, rwtm, &do_sign_fops);
- return 0;
-err_remove:
- debugfs_remove_recursive(root);
- return PTR_ERR(entry);
+ devm_add_action_or_reset(rwtm_dev(rwtm), rwtm_debugfs_release, root);
}
-
-static void rwtm_unregister_debugfs(struct mox_rwtm *rwtm)
+#else
+static inline void rwtm_register_debugfs(struct mox_rwtm *rwtm)
{
- debugfs_remove_recursive(rwtm->debugfs_root);
}
-#else
-static inline int rwtm_register_debugfs(struct mox_rwtm *rwtm)
+#endif
+
+static void rwtm_devm_mbox_release(void *mbox)
{
- return 0;
+ mbox_free_channel(mbox);
}
-static inline void rwtm_unregister_debugfs(struct mox_rwtm *rwtm)
+static void rwtm_firmware_symlink_drop(void *parent)
{
+ sysfs_remove_link(parent, DRIVER_NAME);
}
-#endif
static int turris_mox_rwtm_probe(struct platform_device *pdev)
{
@@ -478,40 +430,30 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
if (!rwtm)
return -ENOMEM;
- rwtm->dev = dev;
- rwtm->buf = dmam_alloc_coherent(dev, PAGE_SIZE, &rwtm->buf_phys,
- GFP_KERNEL);
+ rwtm->buf = dmam_alloc_coherent(dev, RWTM_DMA_BUFFER_SIZE,
+ &rwtm->buf_phys, GFP_KERNEL);
if (!rwtm->buf)
return -ENOMEM;
- ret = mox_kobj_create(rwtm);
- if (ret < 0) {
- dev_err(dev, "Cannot create turris-mox-rwtm kobject!\n");
- return ret;
- }
-
- ret = sysfs_create_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
- if (ret < 0) {
- dev_err(dev, "Cannot create sysfs files!\n");
- goto put_kobj;
- }
-
platform_set_drvdata(pdev, rwtm);
- mutex_init(&rwtm->busy);
+ ret = devm_mutex_init(dev, &rwtm->busy);
+ if (ret)
+ return ret;
+
init_completion(&rwtm->cmd_done);
rwtm->mbox_client.dev = dev;
rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback;
rwtm->mbox = mbox_request_channel(&rwtm->mbox_client, 0);
- if (IS_ERR(rwtm->mbox)) {
- ret = PTR_ERR(rwtm->mbox);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Cannot request mailbox channel: %i\n",
- ret);
- goto remove_files;
- }
+ if (IS_ERR(rwtm->mbox))
+ return dev_err_probe(dev, PTR_ERR(rwtm->mbox),
+ "Cannot request mailbox channel!\n");
+
+ ret = devm_add_action_or_reset(dev, rwtm_devm_mbox_release, rwtm->mbox);
+ if (ret)
+ return ret;
ret = mox_get_board_info(rwtm);
if (ret < 0)
@@ -521,46 +463,30 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
if (ret < 0) {
dev_notice(dev,
"Firmware does not support the GET_RANDOM command\n");
- goto free_channel;
+ return ret;
}
rwtm->hwrng.name = DRIVER_NAME "_hwrng";
rwtm->hwrng.read = mox_hwrng_read;
- rwtm->hwrng.priv = (unsigned long) rwtm;
ret = devm_hwrng_register(dev, &rwtm->hwrng);
- if (ret < 0) {
- dev_err(dev, "Cannot register HWRNG: %i\n", ret);
- goto free_channel;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot register HWRNG!\n");
- ret = rwtm_register_debugfs(rwtm);
- if (ret < 0) {
- dev_err(dev, "Failed creating debugfs entries: %i\n", ret);
- goto free_channel;
- }
+ rwtm_register_debugfs(rwtm);
dev_info(dev, "HWRNG successfully registered\n");
- return 0;
-
-free_channel:
- mbox_free_channel(rwtm->mbox);
-remove_files:
- sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
-put_kobj:
- kobject_put(rwtm_to_kobj(rwtm));
- return ret;
-}
-
-static void turris_mox_rwtm_remove(struct platform_device *pdev)
-{
- struct mox_rwtm *rwtm = platform_get_drvdata(pdev);
+ /*
+ * For sysfs ABI compatibility, create symlink
+ * /sys/firmware/turris-mox-rwtm to this device's sysfs directory.
+ */
+ ret = sysfs_create_link(firmware_kobj, &dev->kobj, DRIVER_NAME);
+ if (!ret)
+ devm_add_action_or_reset(dev, rwtm_firmware_symlink_drop,
+ firmware_kobj);
- rwtm_unregister_debugfs(rwtm);
- sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
- kobject_put(rwtm_to_kobj(rwtm));
- mbox_free_channel(rwtm->mbox);
+ return 0;
}
static const struct of_device_id turris_mox_rwtm_match[] = {
@@ -573,10 +499,10 @@ MODULE_DEVICE_TABLE(of, turris_mox_rwtm_match);
static struct platform_driver turris_mox_rwtm_driver = {
.probe = turris_mox_rwtm_probe,
- .remove_new = turris_mox_rwtm_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = turris_mox_rwtm_match,
+ .dev_groups = turris_mox_rwtm_groups,
},
};
module_platform_driver(turris_mox_rwtm_driver);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 58f43bcced7c..d93cd4f722b4 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1233,6 +1233,13 @@ config GPIO_ADP5520
This option enables support for on-chip GPIO found
on Analog Devices ADP5520 PMICs.
+config GPIO_ADP5585
+ tristate "GPIO Support for ADP5585"
+ depends on MFD_ADP5585
+ help
+ This option enables support for the GPIO function found in the Analog
+ Devices ADP5585.
+
config GPIO_ALTERA_A10SR
tristate "Altera Arria10 System Resource GPIO"
depends on MFD_ALTERA_A10SR
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 64dd6d9d730d..1429e8c0229b 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
obj-$(CONFIG_GPIO_74XX_MMIO) += gpio-74xx-mmio.o
obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
+obj-$(CONFIG_GPIO_ADP5585) += gpio-adp5585.o
obj-$(CONFIG_GPIO_AGGREGATOR) += gpio-aggregator.o
obj-$(CONFIG_GPIO_ALTERA_A10SR) += gpio-altera-a10sr.o
obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
diff --git a/drivers/gpio/gpio-adp5585.c b/drivers/gpio/gpio-adp5585.c
new file mode 100644
index 000000000000..000d31f09671
--- /dev/null
+++ b/drivers/gpio/gpio-adp5585.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices ADP5585 GPIO driver
+ *
+ * Copyright 2022 NXP
+ * Copyright 2024 Ideas on Board Oy
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/mfd/adp5585.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#define ADP5585_GPIO_MAX 11
+
+struct adp5585_gpio_dev {
+ struct gpio_chip gpio_chip;
+ struct regmap *regmap;
+};
+
+static int adp5585_gpio_get_direction(struct gpio_chip *chip, unsigned int off)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
+ unsigned int bank = ADP5585_BANK(off);
+ unsigned int bit = ADP5585_BIT(off);
+ unsigned int val;
+
+ regmap_read(adp5585_gpio->regmap, ADP5585_GPIO_DIRECTION_A + bank, &val);
+
+ return val & bit ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
+static int adp5585_gpio_direction_input(struct gpio_chip *chip, unsigned int off)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
+ unsigned int bank = ADP5585_BANK(off);
+ unsigned int bit = ADP5585_BIT(off);
+
+ return regmap_clear_bits(adp5585_gpio->regmap,
+ ADP5585_GPIO_DIRECTION_A + bank, bit);
+}
+
+static int adp5585_gpio_direction_output(struct gpio_chip *chip, unsigned int off, int val)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
+ unsigned int bank = ADP5585_BANK(off);
+ unsigned int bit = ADP5585_BIT(off);
+ int ret;
+
+ ret = regmap_update_bits(adp5585_gpio->regmap,
+ ADP5585_GPO_DATA_OUT_A + bank, bit,
+ val ? bit : 0);
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(adp5585_gpio->regmap,
+ ADP5585_GPIO_DIRECTION_A + bank, bit);
+}
+
+static int adp5585_gpio_get_value(struct gpio_chip *chip, unsigned int off)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
+ unsigned int bank = ADP5585_BANK(off);
+ unsigned int bit = ADP5585_BIT(off);
+ unsigned int reg;
+ unsigned int val;
+
+ /*
+ * The input status register doesn't reflect the pin state when the
+ * GPIO is configured as an output. Check the direction, and read the
+ * input status from GPI_STATUS or output value from GPO_DATA_OUT
+ * accordingly.
+ *
+ * We don't need any locking, as concurrent access to the same GPIO
+ * isn't allowed by the GPIO API, so there's no risk of the
+ * .direction_input(), .direction_output() or .set() operations racing
+ * with this.
+ */
+ regmap_read(adp5585_gpio->regmap, ADP5585_GPIO_DIRECTION_A + bank, &val);
+ reg = val & bit ? ADP5585_GPO_DATA_OUT_A : ADP5585_GPI_STATUS_A;
+ regmap_read(adp5585_gpio->regmap, reg + bank, &val);
+
+ return !!(val & bit);
+}
+
+static void adp5585_gpio_set_value(struct gpio_chip *chip, unsigned int off, int val)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
+ unsigned int bank = ADP5585_BANK(off);
+ unsigned int bit = ADP5585_BIT(off);
+
+ regmap_update_bits(adp5585_gpio->regmap, ADP5585_GPO_DATA_OUT_A + bank,
+ bit, val ? bit : 0);
+}
+
+static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio,
+ unsigned int off, unsigned int bias)
+{
+ unsigned int bit, reg, mask, val;
+
+ /*
+ * The bias configuration fields are 2 bits wide and laid down in
+ * consecutive registers ADP5585_RPULL_CONFIG_*, with a hole of 4 bits
+ * after R5.
+ */
+ bit = off * 2 + (off > 5 ? 4 : 0);
+ reg = ADP5585_RPULL_CONFIG_A + bit / 8;
+ mask = ADP5585_Rx_PULL_CFG_MASK << (bit % 8);
+ val = bias << (bit % 8);
+
+ return regmap_update_bits(adp5585_gpio->regmap, reg, mask, val);
+}
+
+static int adp5585_gpio_set_drive(struct adp5585_gpio_dev *adp5585_gpio,
+ unsigned int off, enum pin_config_param drive)
+{
+ unsigned int bank = ADP5585_BANK(off);
+ unsigned int bit = ADP5585_BIT(off);
+
+ return regmap_update_bits(adp5585_gpio->regmap,
+ ADP5585_GPO_OUT_MODE_A + bank, bit,
+ drive == PIN_CONFIG_DRIVE_OPEN_DRAIN ? bit : 0);
+}
+
+static int adp5585_gpio_set_debounce(struct adp5585_gpio_dev *adp5585_gpio,
+ unsigned int off, unsigned int debounce)
+{
+ unsigned int bank = ADP5585_BANK(off);
+ unsigned int bit = ADP5585_BIT(off);
+
+ return regmap_update_bits(adp5585_gpio->regmap,
+ ADP5585_DEBOUNCE_DIS_A + bank, bit,
+ debounce ? 0 : bit);
+}
+
+static int adp5585_gpio_set_config(struct gpio_chip *chip, unsigned int off,
+ unsigned long config)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
+ enum pin_config_param param = pinconf_to_config_param(config);
+ u32 arg = pinconf_to_config_argument(config);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ return adp5585_gpio_set_bias(adp5585_gpio, off,
+ ADP5585_Rx_PULL_CFG_DISABLE);
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ return adp5585_gpio_set_bias(adp5585_gpio, off, arg ?
+ ADP5585_Rx_PULL_CFG_PD_300K :
+ ADP5585_Rx_PULL_CFG_DISABLE);
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ return adp5585_gpio_set_bias(adp5585_gpio, off, arg ?
+ ADP5585_Rx_PULL_CFG_PU_300K :
+ ADP5585_Rx_PULL_CFG_DISABLE);
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ return adp5585_gpio_set_drive(adp5585_gpio, off, param);
+
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ return adp5585_gpio_set_debounce(adp5585_gpio, off, arg);
+
+ default:
+ return -ENOTSUPP;
+ };
+}
+
+static int adp5585_gpio_probe(struct platform_device *pdev)
+{
+ struct adp5585_dev *adp5585 = dev_get_drvdata(pdev->dev.parent);
+ struct adp5585_gpio_dev *adp5585_gpio;
+ struct device *dev = &pdev->dev;
+ struct gpio_chip *gc;
+ int ret;
+
+ adp5585_gpio = devm_kzalloc(dev, sizeof(*adp5585_gpio), GFP_KERNEL);
+ if (!adp5585_gpio)
+ return -ENOMEM;
+
+ adp5585_gpio->regmap = adp5585->regmap;
+
+ device_set_of_node_from_dev(dev, dev->parent);
+
+ gc = &adp5585_gpio->gpio_chip;
+ gc->parent = dev;
+ gc->get_direction = adp5585_gpio_get_direction;
+ gc->direction_input = adp5585_gpio_direction_input;
+ gc->direction_output = adp5585_gpio_direction_output;
+ gc->get = adp5585_gpio_get_value;
+ gc->set = adp5585_gpio_set_value;
+ gc->set_config = adp5585_gpio_set_config;
+ gc->can_sleep = true;
+
+ gc->base = -1;
+ gc->ngpio = ADP5585_GPIO_MAX;
+ gc->label = pdev->name;
+ gc->owner = THIS_MODULE;
+
+ ret = devm_gpiochip_add_data(dev, &adp5585_gpio->gpio_chip,
+ adp5585_gpio);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add GPIO chip\n");
+
+ return 0;
+}
+
+static const struct platform_device_id adp5585_gpio_id_table[] = {
+ { "adp5585-gpio" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, adp5585_gpio_id_table);
+
+static struct platform_driver adp5585_gpio_driver = {
+ .driver = {
+ .name = "adp5585-gpio",
+ },
+ .probe = adp5585_gpio_probe,
+ .id_table = adp5585_gpio_id_table,
+};
+module_platform_driver(adp5585_gpio_driver);
+
+MODULE_AUTHOR("Haibo Chen <haibo.chen@nxp.com>");
+MODULE_DESCRIPTION("GPIO ADP5585 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 6211d99a5770..de4cc12e5e03 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -8,13 +8,13 @@
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*/
+#include <linux/device.h>
#include <linux/gpio/driver.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/gpio-ath79.h>
-#include <linux/of.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#define AR71XX_GPIO_REG_OE 0x00
#define AR71XX_GPIO_REG_IN 0x04
@@ -224,9 +224,7 @@ MODULE_DEVICE_TABLE(of, ath79_gpio_of_match);
static int ath79_gpio_probe(struct platform_device *pdev)
{
- struct ath79_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct ath79_gpio_ctrl *ctrl;
struct gpio_irq_chip *girq;
u32 ath79_gpio_count;
@@ -237,21 +235,14 @@ static int ath79_gpio_probe(struct platform_device *pdev)
if (!ctrl)
return -ENOMEM;
- if (np) {
- err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
- if (err) {
- dev_err(dev, "ngpios property is not valid\n");
- return err;
- }
- oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio");
- } else if (pdata) {
- ath79_gpio_count = pdata->ngpios;
- oe_inverted = pdata->oe_inverted;
- } else {
- dev_err(dev, "No DT node or platform data found\n");
- return -EINVAL;
+ err = device_property_read_u32(dev, "ngpios", &ath79_gpio_count);
+ if (err) {
+ dev_err(dev, "ngpios property is not valid\n");
+ return err;
}
+ oe_inverted = device_is_compatible(dev, "qca,ar9340-gpio");
+
if (ath79_gpio_count >= 32) {
dev_err(dev, "ngpios must be less than 32\n");
return -EINVAL;
@@ -275,7 +266,7 @@ static int ath79_gpio_probe(struct platform_device *pdev)
}
/* Optional interrupt setup */
- if (!np || of_property_read_bool(np, "interrupt-controller")) {
+ if (device_property_read_bool(dev, "interrupt-controller")) {
girq = &ctrl->gc.irq;
gpio_irq_chip_set_chip(girq, &ath79_gpio_irqchip);
girq->parent_handler = ath79_gpio_irq_handler;
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index 6a439cf78459..1b8ffd0ddab6 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -31,7 +31,6 @@
struct cdns_gpio_chip {
struct gpio_chip gc;
- struct clk *pclk;
void __iomem *regs;
u32 bypass_orig;
};
@@ -155,6 +154,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
int ret, irq;
u32 dir_prev;
u32 num_gpios = 32;
+ struct clk *clk;
cgpio = devm_kzalloc(&pdev->dev, sizeof(*cgpio), GFP_KERNEL);
if (!cgpio)
@@ -203,21 +203,14 @@ static int cdns_gpio_probe(struct platform_device *pdev)
cgpio->gc.request = cdns_gpio_request;
cgpio->gc.free = cdns_gpio_free;
- cgpio->pclk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(cgpio->pclk)) {
- ret = PTR_ERR(cgpio->pclk);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(&pdev->dev,
"Failed to retrieve peripheral clock, %d\n", ret);
goto err_revert_dir;
}
- ret = clk_prepare_enable(cgpio->pclk);
- if (ret) {
- dev_err(&pdev->dev,
- "Failed to enable the peripheral clock, %d\n", ret);
- goto err_revert_dir;
- }
-
/*
* Optional irq_chip support
*/
@@ -234,7 +227,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!girq->parents) {
ret = -ENOMEM;
- goto err_disable_clk;
+ goto err_revert_dir;
}
girq->parents[0] = irq;
girq->default_type = IRQ_TYPE_NONE;
@@ -244,7 +237,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gc, cgpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
- goto err_disable_clk;
+ goto err_revert_dir;
}
cgpio->bypass_orig = ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE);
@@ -259,9 +252,6 @@ static int cdns_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cgpio);
return 0;
-err_disable_clk:
- clk_disable_unprepare(cgpio->pclk);
-
err_revert_dir:
iowrite32(dir_prev, cgpio->regs + CDNS_GPIO_DIRECTION_MODE);
@@ -273,7 +263,6 @@ static void cdns_gpio_remove(struct platform_device *pdev)
struct cdns_gpio_chip *cgpio = platform_get_drvdata(pdev);
iowrite32(cgpio->bypass_orig, cgpio->regs + CDNS_GPIO_BYPASS_MODE);
- clk_disable_unprepare(cgpio->pclk);
}
static const struct of_device_id cdns_of_ids[] = {
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 1d0175d6350b..b54fef6b1e12 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -18,7 +18,6 @@
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/gpio-davinci.h>
#include <linux/property.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/spinlock.h>
@@ -154,74 +153,37 @@ davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
value ? &g->set_data : &g->clr_data);
}
-static struct davinci_gpio_platform_data *
-davinci_gpio_get_pdata(struct platform_device *pdev)
-{
- struct device_node *dn = pdev->dev.of_node;
- struct davinci_gpio_platform_data *pdata;
- int ret;
- u32 val;
-
- if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
- return dev_get_platdata(&pdev->dev);
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- ret = of_property_read_u32(dn, "ti,ngpio", &val);
- if (ret)
- goto of_err;
-
- pdata->ngpio = val;
-
- ret = of_property_read_u32(dn, "ti,davinci-gpio-unbanked", &val);
- if (ret)
- goto of_err;
-
- pdata->gpio_unbanked = val;
-
- return pdata;
-
-of_err:
- dev_err(&pdev->dev, "Populating pdata from DT failed: err %d\n", ret);
- return NULL;
-}
-
static int davinci_gpio_probe(struct platform_device *pdev)
{
int bank, i, ret = 0;
- unsigned int ngpio, nbank, nirq;
+ unsigned int ngpio, nbank, nirq, gpio_unbanked;
struct davinci_gpio_controller *chips;
- struct davinci_gpio_platform_data *pdata;
struct device *dev = &pdev->dev;
-
- pdata = davinci_gpio_get_pdata(pdev);
- if (!pdata) {
- dev_err(dev, "No platform data found\n");
- return -EINVAL;
- }
-
- dev->platform_data = pdata;
+ struct device_node *dn = dev_of_node(dev);
/*
* The gpio banks conceptually expose a segmented bitmap,
* and "ngpio" is one more than the largest zero-based
* bit index that's valid.
*/
- ngpio = pdata->ngpio;
- if (ngpio == 0) {
- dev_err(dev, "How many GPIOs?\n");
- return -EINVAL;
- }
+ ret = of_property_read_u32(dn, "ti,ngpio", &ngpio);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get the number of GPIOs\n");
+ if (ngpio == 0)
+ return dev_err_probe(dev, -EINVAL, "How many GPIOs?\n");
/*
* If there are unbanked interrupts then the number of
* interrupts is equal to number of gpios else all are banked so
* number of interrupts is equal to number of banks(each with 16 gpios)
*/
- if (pdata->gpio_unbanked)
- nirq = pdata->gpio_unbanked;
+ ret = of_property_read_u32(dn, "ti,davinci-gpio-unbanked",
+ &gpio_unbanked);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get the unbanked GPIOs property\n");
+
+ if (gpio_unbanked)
+ nirq = gpio_unbanked;
else
nirq = DIV_ROUND_UP(ngpio, 16);
@@ -252,7 +214,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.set = davinci_gpio_set;
chips->chip.ngpio = ngpio;
- chips->chip.base = pdata->no_auto_base ? pdata->base : -1;
+ chips->chip.base = -1;
#ifdef CONFIG_OF_GPIO
chips->chip.parent = dev;
@@ -261,6 +223,8 @@ static int davinci_gpio_probe(struct platform_device *pdev)
#endif
spin_lock_init(&chips->lock);
+ chips->gpio_unbanked = gpio_unbanked;
+
nbank = DIV_ROUND_UP(ngpio, 32);
for (bank = 0; bank < nbank; bank++)
chips->regs[bank] = gpio_base + offset_array[bank];
@@ -482,13 +446,11 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
{
unsigned gpio, bank;
int irq;
- int ret;
struct clk *clk;
u32 binten = 0;
unsigned ngpio;
struct device *dev = &pdev->dev;
struct davinci_gpio_controller *chips = platform_get_drvdata(pdev);
- struct davinci_gpio_platform_data *pdata = dev->platform_data;
struct davinci_gpio_regs __iomem *g;
struct irq_domain *irq_domain = NULL;
struct irq_chip *irq_chip;
@@ -502,23 +464,18 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
if (dev->of_node)
gpio_get_irq_chip = (gpio_get_irq_chip_cb_t)device_get_match_data(dev);
- ngpio = pdata->ngpio;
+ ngpio = chips->chip.ngpio;
- clk = devm_clk_get(dev, "gpio");
+ clk = devm_clk_get_enabled(dev, "gpio");
if (IS_ERR(clk)) {
dev_err(dev, "Error %ld getting gpio clock\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- if (!pdata->gpio_unbanked) {
+ if (chips->gpio_unbanked) {
irq = devm_irq_alloc_descs(dev, -1, 0, ngpio, 0);
if (irq < 0) {
dev_err(dev, "Couldn't allocate IRQ numbers\n");
- clk_disable_unprepare(clk);
return irq;
}
@@ -527,7 +484,6 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
chips);
if (!irq_domain) {
dev_err(dev, "Couldn't register an IRQ domain\n");
- clk_disable_unprepare(clk);
return -ENODEV;
}
}
@@ -546,11 +502,11 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
* controller only handling trigger modes. We currently assume no
* IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs.
*/
- if (pdata->gpio_unbanked) {
+ if (chips->gpio_unbanked) {
/* pass "bank 0" GPIO IRQs to AINTC */
chips->chip.to_irq = gpio_to_irq_unbanked;
- chips->gpio_unbanked = pdata->gpio_unbanked;
- binten = GENMASK(pdata->gpio_unbanked / 16, 0);
+
+ binten = GENMASK(chips->gpio_unbanked / 16, 0);
/* AINTC handles mask/unmask; GPIO handles triggering */
irq = chips->irqs[0];
@@ -564,7 +520,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
writel_relaxed(~0, &g->set_rising);
/* set the direct IRQs up to use that irqchip */
- for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++) {
+ for (gpio = 0; gpio < chips->gpio_unbanked; gpio++) {
irq_set_chip(chips->irqs[gpio], irq_chip);
irq_set_handler_data(chips->irqs[gpio], chips);
irq_set_status_flags(chips->irqs[gpio],
@@ -596,10 +552,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
sizeof(struct
davinci_gpio_irq_data),
GFP_KERNEL);
- if (!irqdata) {
- clk_disable_unprepare(clk);
+ if (!irqdata)
return -ENOMEM;
- }
irqdata->regs = g;
irqdata->bank_num = bank;
@@ -675,8 +629,7 @@ static void davinci_gpio_restore_context(struct davinci_gpio_controller *chips,
static int davinci_gpio_suspend(struct device *dev)
{
struct davinci_gpio_controller *chips = dev_get_drvdata(dev);
- struct davinci_gpio_platform_data *pdata = dev_get_platdata(dev);
- u32 nbank = DIV_ROUND_UP(pdata->ngpio, 32);
+ u32 nbank = DIV_ROUND_UP(chips->chip.ngpio, 32);
davinci_gpio_save_context(chips, nbank);
@@ -686,8 +639,7 @@ static int davinci_gpio_suspend(struct device *dev)
static int davinci_gpio_resume(struct device *dev)
{
struct davinci_gpio_controller *chips = dev_get_drvdata(dev);
- struct davinci_gpio_platform_data *pdata = dev_get_platdata(dev);
- u32 nbank = DIV_ROUND_UP(pdata->ngpio, 32);
+ u32 nbank = DIV_ROUND_UP(chips->chip.ngpio, 32);
davinci_gpio_restore_context(chips, nbank);
diff --git a/drivers/gpio/gpio-fxl6408.c b/drivers/gpio/gpio-fxl6408.c
index 991549888904..86ebc66b1104 100644
--- a/drivers/gpio/gpio-fxl6408.c
+++ b/drivers/gpio/gpio-fxl6408.c
@@ -138,7 +138,7 @@ static const __maybe_unused struct of_device_id fxl6408_dt_ids[] = {
MODULE_DEVICE_TABLE(of, fxl6408_dt_ids);
static const struct i2c_device_id fxl6408_id[] = {
- { "fxl6408", 0 },
+ { "fxl6408" },
{ }
};
MODULE_DEVICE_TABLE(i2c, fxl6408_id);
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
index c5a9fa640566..28a8a6a8f05f 100644
--- a/drivers/gpio/gpio-ixp4xx.c
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -6,6 +6,7 @@
// based on previous work and know-how from:
// Deepak Saxena <dsaxena@plexity.net>
+#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -13,7 +14,7 @@
#include <linux/irqchip.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
-#include <linux/bitops.h>
+#include <linux/property.h>
#define IXP4XX_REG_GPOUT 0x00
#define IXP4XX_REG_GPOE 0x04
@@ -53,16 +54,14 @@
/**
* struct ixp4xx_gpio - IXP4 GPIO state container
* @dev: containing device for this instance
- * @fwnode: the fwnode for this GPIO chip
* @gc: gpiochip for this instance
* @base: remapped I/O-memory base
* @irq_edge: Each bit represents an IRQ: 1: edge-triggered,
* 0: level triggered
*/
struct ixp4xx_gpio {
- struct device *dev;
- struct fwnode_handle *fwnode;
struct gpio_chip gc;
+ struct device *dev;
void __iomem *base;
unsigned long long irq_edge;
};
@@ -237,7 +236,6 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
dev_err(dev, "no IRQ parent domain\n");
return -ENODEV;
}
- g->fwnode = of_node_to_fwnode(np);
/*
* If either clock output is enabled explicitly in the device tree
@@ -322,7 +320,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
girq = &g->gc.irq;
gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip);
- girq->fwnode = g->fwnode;
+ girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq;
girq->handler = handle_bad_irq;
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index 5c6bb57a8c99..e7c0ef6e54fa 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -47,7 +47,6 @@ struct lpc18xx_gpio_pin_ic {
struct lpc18xx_gpio_chip {
struct gpio_chip gpio;
void __iomem *base;
- struct clk *clk;
struct lpc18xx_gpio_pin_ic *pin_ic;
spinlock_t lock;
};
@@ -328,6 +327,7 @@ static int lpc18xx_gpio_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct lpc18xx_gpio_chip *gc;
int index, ret;
+ struct clk *clk;
gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
if (!gc)
@@ -352,16 +352,10 @@ static int lpc18xx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gc->base))
return PTR_ERR(gc->base);
- gc->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(gc->clk)) {
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk)) {
dev_err(dev, "input clock not found\n");
- return PTR_ERR(gc->clk);
- }
-
- ret = clk_prepare_enable(gc->clk);
- if (ret) {
- dev_err(dev, "unable to enable clock\n");
- return ret;
+ return PTR_ERR(clk);
}
spin_lock_init(&gc->lock);
@@ -369,11 +363,8 @@ static int lpc18xx_gpio_probe(struct platform_device *pdev)
gc->gpio.parent = dev;
ret = devm_gpiochip_add_data(dev, &gc->gpio, gc);
- if (ret) {
- dev_err(dev, "failed to add gpio chip\n");
- clk_disable_unprepare(gc->clk);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add gpio chip\n");
/* On error GPIO pin interrupt controller just won't be registered */
lpc18xx_gpio_pin_ic_probe(gc);
@@ -387,8 +378,6 @@ static void lpc18xx_gpio_remove(struct platform_device *pdev)
if (gc->pin_ic)
irq_domain_remove(gc->pin_ic->domain);
-
- clk_disable_unprepare(gc->clk);
}
static const struct of_device_id lpc18xx_gpio_match[] = {
diff --git a/drivers/gpio/gpio-max7300.c b/drivers/gpio/gpio-max7300.c
index 31c2b95321cc..621d609ece90 100644
--- a/drivers/gpio/gpio-max7300.c
+++ b/drivers/gpio/gpio-max7300.c
@@ -53,7 +53,7 @@ static void max7300_remove(struct i2c_client *client)
}
static const struct i2c_device_id max7300_id[] = {
- { "max7300", 0 },
+ { "max7300" },
{ }
};
MODULE_DEVICE_TABLE(i2c, max7300_id);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 7fb298b4571b..ccbb63c21d6f 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -35,7 +35,6 @@
struct mb86s70_gpio_chip {
struct gpio_chip gc;
void __iomem *base;
- struct clk *clk;
spinlock_t lock;
};
@@ -157,6 +156,7 @@ static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
static int mb86s70_gpio_probe(struct platform_device *pdev)
{
struct mb86s70_gpio_chip *gchip;
+ struct clk *clk;
int ret;
gchip = devm_kzalloc(&pdev->dev, sizeof(*gchip), GFP_KERNEL);
@@ -169,13 +169,9 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gchip->base))
return PTR_ERR(gchip->base);
- gchip->clk = devm_clk_get_optional(&pdev->dev, NULL);
- if (IS_ERR(gchip->clk))
- return PTR_ERR(gchip->clk);
-
- ret = clk_prepare_enable(gchip->clk);
- if (ret)
- return ret;
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
spin_lock_init(&gchip->lock);
@@ -193,11 +189,9 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.base = -1;
ret = gpiochip_add_data(&gchip->gc, gchip);
- if (ret) {
- dev_err(&pdev->dev, "couldn't register gpio driver\n");
- clk_disable_unprepare(gchip->clk);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "couldn't register gpio driver\n");
acpi_gpiochip_request_interrupts(&gchip->gc);
@@ -210,7 +204,6 @@ static void mb86s70_gpio_remove(struct platform_device *pdev)
acpi_gpiochip_free_interrupts(&gchip->gc);
gpiochip_remove(&gchip->gc);
- clk_disable_unprepare(gchip->clk);
}
static const struct of_device_id mb86s70_gpio_dt_ids[] = {
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index c0125ac73906..685ec31db409 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -7,19 +7,21 @@
*/
#include <linux/acpi.h>
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/property.h>
-#include <linux/mod_devicetable.h>
#include <linux/slab.h>
-#include <linux/irq.h>
-#include <linux/gpio/driver.h>
-#include <linux/bitops.h>
-#include <linux/interrupt.h>
+#include <linux/spinlock.h>
#define MPC8XXX_GPIO_PINS 32
@@ -413,6 +415,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
goto err;
}
+ device_init_wakeup(&pdev->dev, true);
+
return 0;
err:
irq_domain_remove(mpc8xxx_gc->irq);
@@ -429,6 +433,29 @@ static void mpc8xxx_remove(struct platform_device *pdev)
}
}
+static int mpc8xxx_suspend(struct device *dev)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = dev_get_drvdata(dev);
+
+ if (mpc8xxx_gc->irqn && device_may_wakeup(dev))
+ enable_irq_wake(mpc8xxx_gc->irqn);
+
+ return 0;
+}
+
+static int mpc8xxx_resume(struct device *dev)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = dev_get_drvdata(dev);
+
+ if (mpc8xxx_gc->irqn && device_may_wakeup(dev))
+ disable_irq_wake(mpc8xxx_gc->irqn);
+
+ return 0;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(mpc8xx_pm_ops,
+ mpc8xxx_suspend, mpc8xxx_resume, NULL);
+
#ifdef CONFIG_ACPI
static const struct acpi_device_id gpio_acpi_ids[] = {
{"NXP0031",},
@@ -444,6 +471,7 @@ static struct platform_driver mpc8xxx_plat_driver = {
.name = "gpio-mpc8xxx",
.of_match_table = mpc8xxx_gpio_ids,
.acpi_match_table = ACPI_PTR(gpio_acpi_ids),
+ .pm = pm_ptr(&mpc8xx_pm_ops),
},
};
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
index 2f448eb23abb..6db9e469e0dc 100644
--- a/drivers/gpio/gpio-msc313.c
+++ b/drivers/gpio/gpio-msc313.c
@@ -3,13 +3,14 @@
#include <linux/bitops.h>
#include <linux/kernel.h>
-#include <linux/types.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/types.h>
#include <dt-bindings/gpio/msc313-gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -662,7 +663,7 @@ static int msc313_gpio_probe(struct platform_device *pdev)
gpioirqchip = &gpiochip->irq;
gpio_irq_chip_set_chip(gpioirqchip, &msc313_gpio_irqchip);
- gpioirqchip->fwnode = of_node_to_fwnode(dev->of_node);
+ gpioirqchip->fwnode = dev_fwnode(dev);
gpioirqchip->parent_domain = parent_domain;
gpioirqchip->child_to_parent_hwirq = msc313e_gpio_child_to_parent_hwirq;
gpioirqchip->populate_parent_alloc_arg = msc313_gpio_populate_parent_fwspec;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 8baf3edd5274..3f2d33ee20cc 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -498,7 +498,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long
ret = regmap_bulk_write(chip->regmap, regaddr, value, NBANK(chip));
if (ret < 0) {
- dev_err(&chip->client->dev, "failed writing register\n");
+ dev_err(&chip->client->dev, "failed writing register: %d\n", ret);
return ret;
}
@@ -513,7 +513,7 @@ static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long *
ret = regmap_bulk_read(chip->regmap, regaddr, value, NBANK(chip));
if (ret < 0) {
- dev_err(&chip->client->dev, "failed reading register\n");
+ dev_err(&chip->client->dev, "failed reading register: %d\n", ret);
return ret;
}
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index ee37ecb615cb..63f25c72eac2 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -84,7 +84,6 @@ struct pch_gpio_reg_data {
* @gpio: Data for GPIO infrastructure.
* @pch_gpio_reg: Memory mapped Register data is saved here
* when suspend.
- * @lock: Used for register access protection
* @irq_base: Save base of IRQ number for interrupt
* @ioh: IOH ID
* @spinlock: Used for register access protection
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index d89da7300ddd..d770a6f3d846 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -191,15 +191,15 @@ static int sama5d2_piobu_probe(struct platform_device *pdev)
piobu->chip.label = pdev->name;
piobu->chip.parent = &pdev->dev;
- piobu->chip.owner = THIS_MODULE,
- piobu->chip.get_direction = sama5d2_piobu_get_direction,
- piobu->chip.direction_input = sama5d2_piobu_direction_input,
- piobu->chip.direction_output = sama5d2_piobu_direction_output,
- piobu->chip.get = sama5d2_piobu_get,
- piobu->chip.set = sama5d2_piobu_set,
- piobu->chip.base = -1,
- piobu->chip.ngpio = PIOBU_NUM,
- piobu->chip.can_sleep = 0,
+ piobu->chip.owner = THIS_MODULE;
+ piobu->chip.get_direction = sama5d2_piobu_get_direction;
+ piobu->chip.direction_input = sama5d2_piobu_direction_input;
+ piobu->chip.direction_output = sama5d2_piobu_direction_output;
+ piobu->chip.get = sama5d2_piobu_get;
+ piobu->chip.set = sama5d2_piobu_set;
+ piobu->chip.base = -1;
+ piobu->chip.ngpio = PIOBU_NUM;
+ piobu->chip.can_sleep = 0;
piobu->regmap = syscon_node_to_regmap(pdev->dev.of_node);
if (IS_ERR(piobu->regmap)) {
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 6c5ee81d71b3..75a3633ceddb 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -5,16 +5,16 @@
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
*/
+#include <linux/bitops.h>
#include <linux/cleanup.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
#include <linux/gpio/driver.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/of.h>
#include <linux/mfd/stmpe.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
#include <linux/seq_file.h>
-#include <linux/bitops.h>
+#include <linux/slab.h>
/*
* These registers are modified under the irq bus lock and cached to avoid
@@ -31,7 +31,6 @@ enum { LSB, CSB, MSB };
struct stmpe_gpio {
struct gpio_chip chip;
struct stmpe *stmpe;
- struct device *dev;
struct mutex irq_lock;
u32 norequest_mask;
/* Caches of interrupt control registers for bus_lock */
@@ -464,59 +463,49 @@ static void stmpe_gpio_disable(void *stmpe)
static int stmpe_gpio_probe(struct platform_device *pdev)
{
- struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct stmpe *stmpe = dev_get_drvdata(dev->parent);
struct stmpe_gpio *stmpe_gpio;
int ret, irq;
if (stmpe->num_gpios > MAX_GPIOS) {
- dev_err(&pdev->dev, "Need to increase maximum GPIO number\n");
+ dev_err(dev, "Need to increase maximum GPIO number\n");
return -EINVAL;
}
- stmpe_gpio = devm_kzalloc(&pdev->dev, sizeof(*stmpe_gpio), GFP_KERNEL);
+ stmpe_gpio = devm_kzalloc(dev, sizeof(*stmpe_gpio), GFP_KERNEL);
if (!stmpe_gpio)
return -ENOMEM;
mutex_init(&stmpe_gpio->irq_lock);
- stmpe_gpio->dev = &pdev->dev;
stmpe_gpio->stmpe = stmpe;
stmpe_gpio->chip = template_chip;
stmpe_gpio->chip.ngpio = stmpe->num_gpios;
- stmpe_gpio->chip.parent = &pdev->dev;
+ stmpe_gpio->chip.parent = dev;
stmpe_gpio->chip.base = -1;
if (IS_ENABLED(CONFIG_DEBUG_FS))
stmpe_gpio->chip.dbg_show = stmpe_dbg_show;
- of_property_read_u32(np, "st,norequest-mask",
- &stmpe_gpio->norequest_mask);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- dev_info(&pdev->dev,
- "device configured in no-irq mode: "
- "irqs are not available\n");
+ device_property_read_u32(dev, "st,norequest-mask", &stmpe_gpio->norequest_mask);
ret = stmpe_enable(stmpe, STMPE_BLOCK_GPIO);
if (ret)
return ret;
- ret = devm_add_action_or_reset(&pdev->dev, stmpe_gpio_disable, stmpe);
+ ret = devm_add_action_or_reset(dev, stmpe_gpio_disable, stmpe);
if (ret)
return ret;
+ irq = platform_get_irq(pdev, 0);
if (irq > 0) {
struct gpio_irq_chip *girq;
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- stmpe_gpio_irq, IRQF_ONESHOT,
- "stmpe-gpio", stmpe_gpio);
- if (ret) {
- dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
- return ret;
- }
+ ret = devm_request_threaded_irq(dev, irq, NULL, stmpe_gpio_irq,
+ IRQF_ONESHOT, "stmpe-gpio", stmpe_gpio);
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to register IRQ handler\n");
girq = &stmpe_gpio->chip.irq;
gpio_irq_chip_set_chip(girq, &stmpe_gpio_irq_chip);
@@ -530,7 +519,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
girq->init_valid_mask = stmpe_init_irq_valid_mask;
}
- return devm_gpiochip_add_data(&pdev->dev, &stmpe_gpio->chip, stmpe_gpio);
+ return devm_gpiochip_add_data(dev, &stmpe_gpio->chip, stmpe_gpio);
}
static struct platform_driver stmpe_gpio_driver = {
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 053d616f2e02..5a6406d1f03a 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -296,23 +296,17 @@ static int xway_stp_probe(struct platform_device *pdev)
if (!of_property_read_bool(pdev->dev.of_node, "lantiq,rising"))
chip->edge = XWAY_STP_FALLING;
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Failed to get clock\n");
return PTR_ERR(clk);
}
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
xway_stp_hw_init(chip);
ret = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
- if (ret) {
- clk_disable_unprepare(clk);
+ if (ret)
return ret;
- }
dev_info(&pdev->dev, "Init done\n");
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 3a90a3a1caea..5ab394ec81e6 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -23,7 +23,6 @@
/**
* struct syscon_gpio_data - Configuration for the device.
- * @compatible: SYSCON driver compatible string.
* @flags: Set of GPIO_SYSCON_FEAT_ flags:
* GPIO_SYSCON_FEAT_IN: GPIOs supports input,
* GPIO_SYSCON_FEAT_OUT: GPIOs supports output,
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index ea5f9cc14bc4..6d3a39a03f58 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -18,11 +18,12 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/seq_file.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/seq_file.h>
#define GPIO_BANK(x) ((x) >> 5)
#define GPIO_PORT(x) (((x) >> 3) & 0x3)
@@ -755,7 +756,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
}
irq = &tgi->gc.irq;
- irq->fwnode = of_node_to_fwnode(pdev->dev.of_node);
+ irq->fwnode = dev_fwnode(&pdev->dev);
irq->child_to_parent_hwirq = tegra_gpio_child_to_parent_hwirq;
irq->populate_parent_alloc_arg = tegra_gpio_populate_parent_fwspec;
irq->handler = handle_simple_irq;
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 9130c691a2dd..1ecb733a5e88 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/seq_file.h>
#include <dt-bindings/gpio/tegra186-gpio.h>
@@ -928,7 +929,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
irq = &gpio->gpio.irq;
gpio_irq_chip_set_chip(irq, &tegra186_gpio_irq_chip);
- irq->fwnode = of_node_to_fwnode(pdev->dev.of_node);
+ irq->fwnode = dev_fwnode(&pdev->dev);
irq->child_to_parent_hwirq = tegra186_gpio_child_to_parent_hwirq;
irq->populate_parent_alloc_arg = tegra186_gpio_populate_parent_fwspec;
irq->child_offset_to_irq = tegra186_gpio_child_offset_to_irq;
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index 8521c6aacace..5b851e904c11 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/property.h>
#include <linux/spinlock.h>
#define GPIO_RX_DAT 0x0
@@ -533,7 +534,7 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
chip->set_config = thunderx_gpio_set_config;
girq = &chip->irq;
gpio_irq_chip_set_chip(girq, &thunderx_gpio_irq_chip);
- girq->fwnode = of_node_to_fwnode(dev->of_node);
+ girq->fwnode = dev_fwnode(dev);
girq->parent_domain =
irq_get_irq_data(txgpio->msix_entries[0].vector)->domain;
girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq;
@@ -549,7 +550,7 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
for (i = 0; i < ngpio; i++) {
struct irq_fwspec fwspec;
- fwspec.fwnode = of_node_to_fwnode(dev->of_node);
+ fwspec.fwnode = dev_fwnode(dev);
fwspec.param_count = 2;
fwspec.param[0] = i;
fwspec.param[1] = IRQ_TYPE_NONE;
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 1f440707f8f4..da99ba13e82d 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/spinlock.h>
#include <dt-bindings/gpio/uniphier-gpio.h>
@@ -164,7 +165,7 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
if (offset < UNIPHIER_GPIO_IRQ_OFFSET)
return -ENXIO;
- fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
+ fwspec.fwnode = dev_fwnode(chip->parent);
fwspec.param_count = 2;
fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
/*
@@ -404,7 +405,7 @@ static int uniphier_gpio_probe(struct platform_device *pdev)
priv->domain = irq_domain_create_hierarchy(
parent_domain, 0,
UNIPHIER_GPIO_IRQ_MAX_NUM,
- of_node_to_fwnode(dev->of_node),
+ dev_fwnode(dev),
&uniphier_gpio_irq_domain_ops, priv);
if (!priv->domain)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 07e5e6323e86..27eff741fe9a 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -97,7 +97,7 @@ static inline u32 vf610_gpio_readl(void __iomem *reg)
static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct vf610_gpio_port *port = gpiochip_get_data(gc);
- unsigned long mask = BIT(gpio);
+ u32 mask = BIT(gpio);
unsigned long offset = GPIO_PDIR;
if (port->sdata->have_paddr) {
@@ -112,16 +112,16 @@ static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio)
static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct vf610_gpio_port *port = gpiochip_get_data(gc);
- unsigned long mask = BIT(gpio);
+ u32 mask = BIT(gpio);
unsigned long offset = val ? GPIO_PSOR : GPIO_PCOR;
vf610_gpio_writel(mask, port->gpio_base + offset);
}
-static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
{
struct vf610_gpio_port *port = gpiochip_get_data(chip);
- unsigned long mask = BIT(gpio);
+ u32 mask = BIT(gpio);
u32 val;
if (port->sdata->have_paddr) {
@@ -133,11 +133,11 @@ static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
return pinctrl_gpio_direction_input(chip, gpio);
}
-static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
int value)
{
struct vf610_gpio_port *port = gpiochip_get_data(chip);
- unsigned long mask = BIT(gpio);
+ u32 mask = BIT(gpio);
u32 val;
vf610_gpio_set(chip, gpio, value);
@@ -151,6 +151,19 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
return pinctrl_gpio_direction_output(chip, gpio);
}
+static int vf610_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct vf610_gpio_port *port = gpiochip_get_data(gc);
+ u32 mask = BIT(gpio);
+
+ mask &= vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
+
+ if (mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
static void vf610_gpio_irq_handler(struct irq_desc *desc)
{
struct vf610_gpio_port *port =
@@ -362,6 +375,12 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc->get = vf610_gpio_get;
gc->direction_output = vf610_gpio_direction_output;
gc->set = vf610_gpio_set;
+ /*
+ * only IP has Port Data Direction Register(PDDR) can
+ * support get direction
+ */
+ if (port->sdata->have_paddr)
+ gc->get_direction = vf610_gpio_get_direction;
/* Mask all GPIO interrupts */
for (i = 0; i < gc->ngpio; i++)
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index ccc47ea0b3e1..91b6352c957c 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -1410,7 +1410,6 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
size_t num_entries = gpio_virtuser_get_lookup_count(dev);
struct gpio_virtuser_lookup_entry *entry;
struct gpio_virtuser_lookup *lookup;
- struct gpiod_lookup *curr;
unsigned int i = 0;
lockdep_assert_held(&dev->lock);
@@ -1426,14 +1425,10 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
list_for_each_entry(lookup, &dev->lookup_list, siblings) {
list_for_each_entry(entry, &lookup->entry_list, siblings) {
- curr = &table->table[i];
-
- curr->con_id = lookup->con_id;
- curr->idx = i;
- curr->key = entry->key;
- curr->chip_hwnum = entry->offset < 0 ?
- U16_MAX : entry->offset;
- curr->flags = entry->flags;
+ table->table[i] =
+ GPIO_LOOKUP_IDX(entry->key,
+ entry->offset < 0 ? U16_MAX : entry->offset,
+ lookup->con_id, i, entry->flags);
i++;
}
}
diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
index 6734e7e1e2a4..ebc71ecdb6cf 100644
--- a/drivers/gpio/gpio-visconti.c
+++ b/drivers/gpio/gpio-visconti.c
@@ -8,6 +8,7 @@
* Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
*/
+#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -15,8 +16,8 @@
#include <linux/io.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/seq_file.h>
-#include <linux/bitops.h>
/* register offset */
#define GPIO_DIR 0x00
@@ -202,7 +203,7 @@ static int visconti_gpio_probe(struct platform_device *pdev)
girq = &priv->gpio_chip.irq;
gpio_irq_chip_set_chip(girq, &visconti_gpio_irq_chip);
- girq->fwnode = of_node_to_fwnode(dev->of_node);
+ girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
girq->child_to_parent_hwirq = visconti_gpio_child_to_parent_hwirq;
girq->populate_parent_alloc_arg = visconti_gpio_populate_parent_fwspec;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 7348df385198..afcf432a1573 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -333,12 +333,9 @@ static int __maybe_unused xgpio_suspend(struct device *dev)
*/
static void xgpio_remove(struct platform_device *pdev)
{
- struct xgpio_instance *gpio = platform_get_drvdata(pdev);
-
pm_runtime_get_sync(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(gpio->clk);
}
/**
@@ -644,15 +641,10 @@ static int xgpio_probe(struct platform_device *pdev)
return PTR_ERR(chip->regs);
}
- chip->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ chip->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
if (IS_ERR(chip->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(chip->clk), "input clock not found.\n");
- status = clk_prepare_enable(chip->clk);
- if (status < 0) {
- dev_err(&pdev->dev, "Failed to prepare clk\n");
- return status;
- }
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -699,7 +691,6 @@ skip_irq:
err_pm_put:
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- clk_disable_unprepare(chip->clk);
return status;
}
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 466e23031afc..1a42336dfc1d 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -940,16 +940,10 @@ static int zynq_gpio_probe(struct platform_device *pdev)
chip->ngpio = gpio->p_data->ngpio;
/* Retrieve GPIO clock */
- gpio->clk = devm_clk_get(&pdev->dev, NULL);
+ gpio->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(gpio->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(gpio->clk), "input clock not found.\n");
- ret = clk_prepare_enable(gpio->clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable clock.\n");
- return ret;
- }
-
spin_lock_init(&gpio->dirlock);
pm_runtime_set_active(&pdev->dev);
@@ -999,7 +993,6 @@ err_pm_put:
pm_runtime_put(&pdev->dev);
err_pm_dis:
pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(gpio->clk);
return ret;
}
@@ -1019,7 +1012,6 @@ static void zynq_gpio_remove(struct platform_device *pdev)
if (ret < 0)
dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n");
gpiochip_remove(&gpio->chip);
- clk_disable_unprepare(gpio->clk);
device_set_wakeup_capable(&pdev->dev, 0);
pm_runtime_disable(&pdev->dev);
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 69cd2be9c7f3..78ecd56123a3 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -153,8 +153,12 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data)
* @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1")
* @pin: ACPI GPIO pin number (0-based, controller-relative)
*
- * Return: GPIO descriptor to use with Linux generic GPIO API, or ERR_PTR
- * error value. Specifically returns %-EPROBE_DEFER if the referenced GPIO
+ * Returns:
+ * GPIO descriptor to use with Linux generic GPIO API.
+ * If the GPIO cannot be translated or there is an error an ERR_PTR is
+ * returned.
+ *
+ * Specifically returns %-EPROBE_DEFER if the referenced GPIO
* controller does not have GPIO chip registered at the moment. This is to
* support probe deferral.
*/
@@ -224,6 +228,9 @@ EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
* I/O resource or return False if not.
* @ares: Pointer to the ACPI resource to fetch
* @agpio: Pointer to a &struct acpi_resource_gpio to store the output pointer
+ *
+ * Returns:
+ * %true if GpioIo resource is found, %false otherwise.
*/
bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio)
@@ -876,7 +883,9 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
* that case @index is used to select the GPIO entry in the property value
* (in case of multiple).
*
- * If the GPIO cannot be translated or there is an error, an ERR_PTR is
+ * Returns:
+ * GPIO descriptor to use with Linux generic GPIO API.
+ * If the GPIO cannot be translated or there is an error an ERR_PTR is
* returned.
*
* Note: if the GPIO resource has multiple entries in the pin list, this
@@ -924,6 +933,8 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
* resource with the relevant information from a data-only ACPI firmware node
* and uses that to obtain the GPIO descriptor to return.
*
+ * Returns:
+ * GPIO descriptor to use with Linux generic GPIO API.
* If the GPIO cannot be translated or there is an error an ERR_PTR is
* returned.
*/
@@ -973,18 +984,9 @@ __acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int
struct acpi_device *adev = to_acpi_device_node(fwnode);
struct gpio_desc *desc;
char propname[32];
- int i;
/* Try first from _DSD */
- for (i = 0; i < gpio_suffix_count; i++) {
- if (con_id) {
- snprintf(propname, sizeof(propname), "%s-%s",
- con_id, gpio_suffixes[i]);
- } else {
- snprintf(propname, sizeof(propname), "%s",
- gpio_suffixes[i]);
- }
-
+ for_each_gpio_property_name(propname, con_id) {
if (adev)
desc = acpi_get_gpiod_by_index(adev,
propname, idx, info);
@@ -1051,7 +1053,8 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
* The GPIO is considered wake capable if the GpioInt resource specifies
* SharedAndWake or ExclusiveAndWake.
*
- * Return: Linux IRQ number (> %0) on success, negative errno on failure.
+ * Returns:
+ * Linux IRQ number (> 0) on success, negative errno on failure.
*/
int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index,
bool *wake_capable)
@@ -1438,7 +1441,7 @@ static int acpi_find_gpio_count(struct acpi_resource *ares, void *data)
* @fwnode: firmware node of the GPIO consumer
* @con_id: function within the GPIO consumer
*
- * Return:
+ * Returns:
* The number of GPIOs associated with a firmware node / function or %-ENOENT,
* if no GPIO has been assigned to the requested function.
*/
@@ -1450,17 +1453,9 @@ int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
int count = -ENOENT;
int ret;
char propname[32];
- unsigned int i;
/* Try first from _DSD */
- for (i = 0; i < gpio_suffix_count; i++) {
- if (con_id)
- snprintf(propname, sizeof(propname), "%s-%s",
- con_id, gpio_suffixes[i]);
- else
- snprintf(propname, sizeof(propname), "%s",
- gpio_suffixes[i]);
-
+ for_each_gpio_property_name(propname, con_id) {
ret = acpi_dev_get_property(adev, propname, ACPI_TYPE_ANY, &obj);
if (ret == 0) {
if (obj->type == ACPI_TYPE_LOCAL_REFERENCE)
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index ef08b23a56e2..5aac59de0d76 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -2748,7 +2748,9 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
* gpio_chrdev_open() - open the chardev for ioctl operations
* @inode: inode for this chardev
* @file: file struct for storing private data
- * Returns 0 on success
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
static int gpio_chrdev_open(struct inode *inode, struct file *file)
{
@@ -2814,7 +2816,9 @@ out_free_cdev:
* gpio_chrdev_release() - close chardev after ioctl operations
* @inode: inode for this chardev
* @file: file struct for storing private data
- * Returns 0 on success
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
static int gpio_chrdev_release(struct inode *inode, struct file *file)
{
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 4987e62dcb3d..08205f355ceb 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -6,15 +6,19 @@
* Copyright (c) 2011 John Crispin <john@phrozen.org>
*/
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
#include <linux/gfp.h>
+#include <linux/types.h>
+
+#include <linux/gpio/consumer.h>
#include "gpiolib.h"
+struct fwnode_handle;
+struct lock_class_key;
+
static void devm_gpiod_release(struct device *dev, void *res)
{
struct gpio_desc **desc = res;
@@ -52,6 +56,11 @@ static int devm_gpiod_match_array(struct device *dev, void *res, void *data)
* Managed gpiod_get(). GPIO descriptors returned from this function are
* automatically disposed on driver detach. See gpiod_get() for detailed
* information about behavior and return values.
+ *
+ * Returns:
+ * The GPIO descriptor corresponding to the function @con_id of device
+ * dev, %-ENOENT if no GPIO has been assigned to the requested function, or
+ * another IS_ERR() code if an error occurred while trying to acquire the GPIO.
*/
struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
const char *con_id,
@@ -70,6 +79,11 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get);
* Managed gpiod_get_optional(). GPIO descriptors returned from this function
* are automatically disposed on driver detach. See gpiod_get_optional() for
* detailed information about behavior and return values.
+ *
+ * Returns:
+ * The GPIO descriptor corresponding to the function @con_id of device
+ * dev, NULL if no GPIO has been assigned to the requested function, or
+ * another IS_ERR() code if an error occurred while trying to acquire the GPIO.
*/
struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
const char *con_id,
@@ -89,6 +103,11 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_optional);
* Managed gpiod_get_index(). GPIO descriptors returned from this function are
* automatically disposed on driver detach. See gpiod_get_index() for detailed
* information about behavior and return values.
+ *
+ * Returns:
+ * The GPIO descriptor corresponding to the function @con_id of device
+ * dev, %-ENOENT if no GPIO has been assigned to the requested function, or
+ * another IS_ERR() code if an error occurred while trying to acquire the GPIO.
*/
struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
const char *con_id,
@@ -141,8 +160,10 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_index);
* GPIO descriptors returned from this function are automatically disposed on
* driver detach.
*
- * On successful request the GPIO pin is configured in accordance with
- * provided @flags.
+ * Returns:
+ * The GPIO descriptor corresponding to the function @con_id of device
+ * dev, %-ENOENT if no GPIO has been assigned to the requested function, or
+ * another IS_ERR() code if an error occurred while trying to acquire the GPIO.
*/
struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
struct fwnode_handle *fwnode,
@@ -182,6 +203,11 @@ EXPORT_SYMBOL_GPL(devm_fwnode_gpiod_get_index);
* function are automatically disposed on driver detach. See
* gpiod_get_index_optional() for detailed information about behavior and
* return values.
+ *
+ * Returns:
+ * The GPIO descriptor corresponding to the function @con_id of device
+ * dev, %NULL if no GPIO has been assigned to the requested function, or
+ * another IS_ERR() code if an error occurred while trying to acquire the GPIO.
*/
struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev,
const char *con_id,
@@ -207,6 +233,12 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_index_optional);
* Managed gpiod_get_array(). GPIO descriptors returned from this function are
* automatically disposed on driver detach. See gpiod_get_array() for detailed
* information about behavior and return values.
+ *
+ * Returns:
+ * The GPIO descriptors corresponding to the function @con_id of device
+ * dev, %-ENOENT if no GPIO has been assigned to the requested function,
+ * or another IS_ERR() code if an error occurred while trying to acquire
+ * the GPIOs.
*/
struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
const char *con_id,
@@ -243,6 +275,12 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_array);
* function are automatically disposed on driver detach.
* See gpiod_get_array_optional() for detailed information about behavior and
* return values.
+ *
+ * Returns:
+ * The GPIO descriptors corresponding to the function @con_id of device
+ * dev, %NULL if no GPIO has been assigned to the requested function,
+ * or another IS_ERR() code if an error occurred while trying to acquire
+ * the GPIOs.
*/
struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
@@ -320,76 +358,6 @@ void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
}
EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
-static void devm_gpio_release(struct device *dev, void *res)
-{
- unsigned *gpio = res;
-
- gpio_free(*gpio);
-}
-
-/**
- * devm_gpio_request - request a GPIO for a managed device
- * @dev: device to request the GPIO for
- * @gpio: GPIO to allocate
- * @label: the name of the requested GPIO
- *
- * Except for the extra @dev argument, this function takes the
- * same arguments and performs the same function as
- * gpio_request(). GPIOs requested with this function will be
- * automatically freed on driver detach.
- */
-int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
-{
- unsigned *dr;
- int rc;
-
- dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
- if (!dr)
- return -ENOMEM;
-
- rc = gpio_request(gpio, label);
- if (rc) {
- devres_free(dr);
- return rc;
- }
-
- *dr = gpio;
- devres_add(dev, dr);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_gpio_request);
-
-/**
- * devm_gpio_request_one - request a single GPIO with initial setup
- * @dev: device to request for
- * @gpio: the GPIO number
- * @flags: GPIO configuration as specified by GPIOF_*
- * @label: a literal description string of this GPIO
- */
-int devm_gpio_request_one(struct device *dev, unsigned gpio,
- unsigned long flags, const char *label)
-{
- unsigned *dr;
- int rc;
-
- dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
- if (!dr)
- return -ENOMEM;
-
- rc = gpio_request_one(gpio, flags, label);
- if (rc) {
- devres_free(dr);
- return rc;
- }
-
- *dr = gpio;
- devres_add(dev, dr);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_gpio_request_one);
-
static void devm_gpio_chip_release(void *data)
{
struct gpio_chip *gc = data;
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
index 5a9911ae9125..28f1046fb670 100644
--- a/drivers/gpio/gpiolib-legacy.c
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -1,4 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/gfp.h>
+
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
@@ -22,6 +28,9 @@ EXPORT_SYMBOL_GPL(gpio_free);
* @label: a literal description string of this GPIO
*
* **DEPRECATED** This function is deprecated and must not be used in new code.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
{
@@ -40,11 +49,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
if (flags & GPIOF_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- if (flags & GPIOF_DIR_IN)
+ if (flags & GPIOF_IN)
err = gpiod_direction_input(desc);
else
- err = gpiod_direction_output_raw(desc,
- (flags & GPIOF_INIT_HIGH) ? 1 : 0);
+ err = gpiod_direction_output_raw(desc, !!(flags & GPIOF_OUT_INIT_HIGH));
if (err)
goto free_gpio;
@@ -72,3 +80,83 @@ int gpio_request(unsigned gpio, const char *label)
return gpiod_request(desc, label);
}
EXPORT_SYMBOL_GPL(gpio_request);
+
+static void devm_gpio_release(struct device *dev, void *res)
+{
+ unsigned *gpio = res;
+
+ gpio_free(*gpio);
+}
+
+/**
+ * devm_gpio_request - request a GPIO for a managed device
+ * @dev: device to request the GPIO for
+ * @gpio: GPIO to allocate
+ * @label: the name of the requested GPIO
+ *
+ * Except for the extra @dev argument, this function takes the
+ * same arguments and performs the same function as gpio_request().
+ * GPIOs requested with this function will be automatically freed
+ * on driver detach.
+ *
+ * **DEPRECATED** This function is deprecated and must not be used in new code.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
+ */
+int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
+{
+ unsigned *dr;
+ int rc;
+
+ dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ rc = gpio_request(gpio, label);
+ if (rc) {
+ devres_free(dr);
+ return rc;
+ }
+
+ *dr = gpio;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_gpio_request);
+
+/**
+ * devm_gpio_request_one - request a single GPIO with initial setup
+ * @dev: device to request for
+ * @gpio: the GPIO number
+ * @flags: GPIO configuration as specified by GPIOF_*
+ * @label: a literal description string of this GPIO
+ *
+ * **DEPRECATED** This function is deprecated and must not be used in new code.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
+ */
+int devm_gpio_request_one(struct device *dev, unsigned gpio,
+ unsigned long flags, const char *label)
+{
+ unsigned *dr;
+ int rc;
+
+ dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ rc = gpio_request_one(gpio, flags, label);
+ if (rc) {
+ devres_free(dr);
+ return rc;
+ }
+
+ *dr = gpio;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_gpio_request_one);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index f6af5e7be4d1..880f1efcaca5 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -46,16 +46,19 @@ enum of_gpio_flags {
* @propname: property name containing gpio specifier(s)
*
* The function returns the count of GPIOs specified for a node.
- * Note that the empty GPIO specifiers count too. Returns either
- * Number of gpios defined in property,
- * -EINVAL for an incorrectly formed gpios property, or
- * -ENOENT for a missing gpios property
+ * NOTE: The empty GPIO specifiers count too.
*
- * Example:
- * gpios = <0
- * &gpio1 1 2
- * 0
- * &gpio2 3 4>;
+ * Returns:
+ * Either number of GPIOs defined in the property, or
+ * * %-EINVAL for an incorrectly formed "gpios" property, or
+ * * %-ENOENT for a missing "gpios" property.
+ *
+ * Example::
+ *
+ * gpios = <0
+ * &gpio1 1 2
+ * 0
+ * &gpio2 3 4>;
*
* The above example defines four GPIOs, two of which are not specified.
* This function will return '4'
@@ -77,6 +80,11 @@ static int of_gpio_named_count(const struct device_node *np,
* "gpios" for the chip select lines. If we detect this, we redirect
* the counting of "cs-gpios" to count "gpios" transparent to the
* driver.
+ *
+ * Returns:
+ * Either number of GPIOs defined in the property, or
+ * * %-EINVAL for an incorrectly formed "gpios" property, or
+ * * %-ENOENT for a missing "gpios" property.
*/
static int of_gpio_spi_cs_get_count(const struct device_node *np,
const char *con_id)
@@ -97,20 +105,12 @@ int of_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
const struct device_node *np = to_of_node(fwnode);
int ret;
char propname[32];
- unsigned int i;
ret = of_gpio_spi_cs_get_count(np, con_id);
if (ret > 0)
return ret;
- for (i = 0; i < gpio_suffix_count; i++) {
- if (con_id)
- snprintf(propname, sizeof(propname), "%s-%s",
- con_id, gpio_suffixes[i]);
- else
- snprintf(propname, sizeof(propname), "%s",
- gpio_suffixes[i]);
-
+ for_each_gpio_property_name(propname, con_id) {
ret = of_gpio_named_count(np, propname);
if (ret > 0)
break;
@@ -338,11 +338,10 @@ static void of_gpio_flags_quirks(const struct device_node *np,
*/
if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") &&
of_property_read_bool(np, "cs-gpios")) {
- struct device_node *child;
u32 cs;
int ret;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = of_property_read_u32(child, "reg", &cs);
if (ret)
continue;
@@ -363,7 +362,6 @@ static void of_gpio_flags_quirks(const struct device_node *np,
"spi-cs-high");
of_gpio_quirk_polarity(child, active_high,
flags);
- of_node_put(child);
break;
}
}
@@ -383,7 +381,8 @@ static void of_gpio_flags_quirks(const struct device_node *np,
* @index: index of the GPIO
* @flags: a flags pointer to fill in
*
- * Returns GPIO descriptor to use with Linux GPIO API, or one of the errno
+ * Returns:
+ * GPIO descriptor to use with Linux GPIO API, or one of the errno
* value on the error condition. If @flags is not NULL the function also fills
* in flags for the GPIO.
*/
@@ -435,7 +434,8 @@ out:
*
* **DEPRECATED** This function is deprecated and must not be used in new code.
*
- * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
+ * Returns:
+ * GPIO number to use with Linux generic GPIO API, or one of the errno
* value on the error condition.
*/
int of_get_named_gpio(const struct device_node *np, const char *propname,
@@ -687,23 +687,14 @@ static const of_find_gpio_quirk of_find_gpio_quirks[] = {
struct gpio_desc *of_find_gpio(struct device_node *np, const char *con_id,
unsigned int idx, unsigned long *flags)
{
- char prop_name[32]; /* 32 is max size of property name */
+ char propname[32]; /* 32 is max size of property name */
enum of_gpio_flags of_flags;
const of_find_gpio_quirk *q;
struct gpio_desc *desc;
- unsigned int i;
/* Try GPIO property "foo-gpios" and "foo-gpio" */
- for (i = 0; i < gpio_suffix_count; i++) {
- if (con_id)
- snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id,
- gpio_suffixes[i]);
- else
- snprintf(prop_name, sizeof(prop_name), "%s",
- gpio_suffixes[i]);
-
- desc = of_get_named_gpiod_flags(np, prop_name, idx, &of_flags);
-
+ for_each_gpio_property_name(propname, con_id) {
+ desc = of_get_named_gpiod_flags(np, propname, idx, &of_flags);
if (!gpiod_not_found(desc))
break;
}
@@ -730,7 +721,8 @@ struct gpio_desc *of_find_gpio(struct device_node *np, const char *con_id,
* of_find_gpio() or of_parse_own_gpio()
* @dflags: gpiod_flags - optional GPIO initialization flags
*
- * Returns GPIO descriptor to use with Linux GPIO API, or one of the errno
+ * Returns:
+ * GPIO descriptor to use with Linux GPIO API, or one of the errno
* value on the error condition.
*/
static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
@@ -798,7 +790,8 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
* @chip: gpio chip to act on
* @hog: device node describing the hogs
*
- * Returns error if it fails otherwise 0 on success.
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
static int of_gpiochip_add_hog(struct gpio_chip *chip, struct device_node *hog)
{
@@ -832,22 +825,21 @@ static int of_gpiochip_add_hog(struct gpio_chip *chip, struct device_node *hog)
*
* This is only used by of_gpiochip_add to request/set GPIO initial
* configuration.
- * It returns error if it fails otherwise 0 on success.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
{
- struct device_node *np;
int ret;
- for_each_available_child_of_node(dev_of_node(&chip->gpiodev->dev), np) {
+ for_each_available_child_of_node_scoped(dev_of_node(&chip->gpiodev->dev), np) {
if (!of_property_read_bool(np, "gpio-hog"))
continue;
ret = of_gpiochip_add_hog(chip, np);
- if (ret < 0) {
- of_node_put(np);
+ if (ret < 0)
return ret;
- }
of_node_set_flag(np, OF_POPULATED);
}
@@ -945,6 +937,9 @@ struct notifier_block gpio_of_notifier = {
* This is simple translation function, suitable for the most 1:1 mapped
* GPIO chips. This function performs only one sanity check: whether GPIO
* is less than ngpios (that is specified in the gpio_chip).
+ *
+ * Returns:
+ * GPIO number (>= 0) on success, negative errno on failure.
*/
static int of_gpio_simple_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec,
@@ -994,6 +989,9 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc,
* If succeeded, this function will map bank's memory and will
* do all necessary work for you. Then you'll able to use .regs
* to manage GPIOs from the callbacks.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int of_mm_gpiochip_add_data(struct device_node *np,
struct of_mm_gpio_chip *mm_gc,
@@ -1058,13 +1056,13 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
int index = 0, ret, trim;
const char *name;
static const char group_names_propname[] = "gpio-ranges-group-names";
- struct property *group_names;
+ bool has_group_names;
np = dev_of_node(&chip->gpiodev->dev);
if (!np)
return 0;
- group_names = of_find_property(np, group_names_propname, NULL);
+ has_group_names = of_property_present(np, group_names_propname);
for (;; index++) {
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
@@ -1085,7 +1083,7 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
if (pinspec.args[2]) {
/* npins != 0: linear range */
- if (group_names) {
+ if (has_group_names) {
of_property_read_string_index(np,
group_names_propname,
index, &name);
@@ -1123,7 +1121,7 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
break;
}
- if (!group_names) {
+ if (!has_group_names) {
pr_err("%pOF: GPIO group range requested but no %s property.\n",
np, group_names_propname);
break;
diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c
index cec1ab878af8..2b2dd7e92211 100644
--- a/drivers/gpio/gpiolib-swnode.c
+++ b/drivers/gpio/gpiolib-swnode.c
@@ -24,20 +24,6 @@
#define GPIOLIB_SWNODE_UNDEFINED_NAME "swnode-gpio-undefined"
-static void swnode_format_propname(const char *con_id, char *propname,
- size_t max_size)
-{
- /*
- * Note we do not need to try both -gpios and -gpio suffixes,
- * as, unlike OF and ACPI, we can fix software nodes to conform
- * to the proper binding.
- */
- if (con_id)
- snprintf(propname, max_size, "%s-gpios", con_id);
- else
- strscpy(propname, "gpios", max_size);
-}
-
static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode)
{
const struct software_node *gdev_node;
@@ -59,6 +45,17 @@ static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode)
return gdev ?: ERR_PTR(-EPROBE_DEFER);
}
+static int swnode_gpio_get_reference(const struct fwnode_handle *fwnode,
+ const char *propname, unsigned int idx,
+ struct fwnode_reference_args *args)
+{
+ /*
+ * We expect all swnode-described GPIOs have GPIO number and
+ * polarity arguments, hence nargs is set to 2.
+ */
+ return fwnode_property_get_reference_args(fwnode, propname, NULL, 2, idx, args);
+}
+
struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode,
const char *con_id, unsigned int idx,
unsigned long *flags)
@@ -67,23 +64,21 @@ struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode,
struct fwnode_reference_args args;
struct gpio_desc *desc;
char propname[32]; /* 32 is max size of property name */
- int error;
+ int ret;
swnode = to_software_node(fwnode);
if (!swnode)
return ERR_PTR(-EINVAL);
- swnode_format_propname(con_id, propname, sizeof(propname));
-
- /*
- * We expect all swnode-described GPIOs have GPIO number and
- * polarity arguments, hence nargs is set to 2.
- */
- error = fwnode_property_get_reference_args(fwnode, propname, NULL, 2, idx, &args);
- if (error) {
+ for_each_gpio_property_name(propname, con_id) {
+ ret = swnode_gpio_get_reference(fwnode, propname, idx, &args);
+ if (ret == 0)
+ break;
+ }
+ if (ret) {
pr_debug("%s: can't parse '%s' property of node '%pfwP[%d]'\n",
__func__, propname, fwnode, idx);
- return ERR_PTR(error);
+ return ERR_PTR(ret);
}
struct gpio_device *gdev __free(gpio_device_put) =
@@ -111,7 +106,7 @@ struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode,
* system-global GPIOs
* @con_id: function within the GPIO consumer
*
- * Return:
+ * Returns:
* The number of GPIOs associated with a device / function or %-ENOENT,
* if no GPIO has been assigned to the requested function.
*/
@@ -121,20 +116,21 @@ int swnode_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
char propname[32];
int count;
- swnode_format_propname(con_id, propname, sizeof(propname));
-
/*
* This is not very efficient, but GPIO lists usually have only
* 1 or 2 entries.
*/
- count = 0;
- while (fwnode_property_get_reference_args(fwnode, propname, NULL, 0,
- count, &args) == 0) {
- fwnode_handle_put(args.fwnode);
- count++;
+ for_each_gpio_property_name(propname, con_id) {
+ count = 0;
+ while (swnode_gpio_get_reference(fwnode, propname, count, &args) == 0) {
+ fwnode_handle_put(args.fwnode);
+ count++;
+ }
+ if (count)
+ return count;
}
- return count ?: -ENOENT;
+ return -ENOENT;
}
#if IS_ENABLED(CONFIG_GPIO_SWNODE_UNDEFINED)
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 26202586fd39..17ed229412af 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -568,7 +568,8 @@ static struct class gpio_class = {
* will see "direction" sysfs attribute which may be used to change
* the gpio's direction. A "value" attribute will always be provided.
*
- * Returns zero on success, else an error.
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
{
@@ -667,7 +668,8 @@ static int match_export(struct device *dev, const void *desc)
* Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN
* node. Caller is responsible for unlinking.
*
- * Returns zero on success, else an error.
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3a9668cc100d..c6afbf434366 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -90,8 +90,7 @@ DEFINE_STATIC_SRCU(gpio_devices_srcu);
static DEFINE_MUTEX(gpio_machine_hogs_mutex);
static LIST_HEAD(gpio_machine_hogs);
-const char *const gpio_suffixes[] = { "gpios", "gpio" };
-const size_t gpio_suffix_count = ARRAY_SIZE(gpio_suffixes);
+const char *const gpio_suffixes[] = { "gpios", "gpio", NULL };
static void gpiochip_free_hogs(struct gpio_chip *gc);
static int gpiochip_add_irqchip(struct gpio_chip *gc,
@@ -231,6 +230,9 @@ EXPORT_SYMBOL_GPL(desc_to_gpio);
* This function is unsafe and should not be used. Using the chip address
* without taking the SRCU read lock may result in dereferencing a dangling
* pointer.
+ *
+ * Returns:
+ * Address of the GPIO chip backing this device.
*/
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
@@ -343,7 +345,8 @@ static int gpiochip_find_base_unlocked(u16 ngpio)
* gpiod_get_direction - return the current direction of a GPIO
* @desc: GPIO to get the direction of
*
- * Returns 0 for output, 1 for input, or an error code in case of error.
+ * Returns:
+ * 0 for output, 1 for input, or an error code in case of error.
*
* This function may sleep if gpiod_cansleep() is true.
*/
@@ -357,7 +360,7 @@ int gpiod_get_direction(struct gpio_desc *desc)
* We cannot use VALIDATE_DESC() as we must not return 0 for a NULL
* descriptor like we usually do.
*/
- if (!desc || IS_ERR(desc))
+ if (IS_ERR_OR_NULL(desc))
return -EINVAL;
CLASS(gpio_chip_guard, guard)(desc);
@@ -400,8 +403,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_direction);
* Add a new chip to the global chips list, keeping the list of chips sorted
* by range(means [base, base + ngpio - 1]) order.
*
- * Return -EBUSY if the new chip overlaps with some other chip's integer
- * space.
+ * Returns:
+ * -EBUSY if the new chip overlaps with some other chip's integer space.
*/
static int gpiodev_add_to_list_unlocked(struct gpio_device *gdev)
{
@@ -1517,6 +1520,9 @@ static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *gc,
* This function is a wrapper that calls gpiochip_lock_as_irq() and is to be
* used as the activate function for the &struct irq_domain_ops. The host_data
* for the IRQ domain must be the &struct gpio_chip.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
static int gpiochip_irq_domain_activate(struct irq_domain *domain,
struct irq_data *data, bool reserve)
@@ -1661,6 +1667,9 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
* This function will set up the mapping for a certain IRQ line on a
* gpiochip by assigning the gpiochip as chip data, and using the irqchip
* stored inside the gpiochip.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
@@ -1895,6 +1904,9 @@ static int gpiochip_irqchip_add_allocated_domain(struct gpio_chip *gc,
* @gc: the GPIO chip to add the IRQ chip to
* @lock_key: lockdep class for IRQ lock
* @request_key: lockdep class for IRQ request
+ *
+ * Returns:
+ * 0 on success, or a negative errno on failure.
*/
static int gpiochip_add_irqchip(struct gpio_chip *gc,
struct lock_class_key *lock_key,
@@ -2030,6 +2042,9 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gc)
* @domain: the irqdomain to add to the gpiochip
*
* This function adds an IRQ domain to the gpiochip.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
struct irq_domain *domain)
@@ -2066,6 +2081,9 @@ static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc)
* gpiochip_generic_request() - request the gpio function for a pin
* @gc: the gpiochip owning the GPIO
* @offset: the offset of the GPIO to request for GPIO function
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset)
{
@@ -2099,6 +2117,9 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_free);
* @gc: the gpiochip owning the GPIO
* @offset: the offset of the GPIO to apply the configuration
* @config: the configuration to be applied
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config)
@@ -2125,6 +2146,9 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_config);
* pinctrl driver is DEPRECATED. Please see Section 2.1 of
* Documentation/devicetree/bindings/gpio/gpio.txt on how to
* bind pinctrl and gpio drivers via the "gpio-ranges" property.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
@@ -2176,13 +2200,13 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
* @npins: the number of pins from the offset of each pin space (GPIO and
* pin controller) to accumulate in this range
*
- * Returns:
- * 0 on success, or a negative error-code on failure.
- *
* Calling this function directly from a DeviceTree-supported
* pinctrl driver is DEPRECATED. Please see Section 2.1 of
* Documentation/devicetree/bindings/gpio/gpio.txt on how to
* bind pinctrl and gpio drivers via the "gpio-ranges" property.
+ *
+ * Returns:
+ * 0 on success, or a negative errno on failure.
*/
int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
@@ -2586,7 +2610,8 @@ static int gpio_set_bias(struct gpio_desc *desc)
* The function calls the certain GPIO driver to set debounce timeout
* in the hardware.
*
- * Returns 0 on success, or negative error code otherwise.
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
{
@@ -2602,7 +2627,8 @@ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
* Set the direction of the passed GPIO to input, such as gpiod_get_value() can
* be called safely on it.
*
- * Return 0 in case of success, else an error code.
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_direction_input(struct gpio_desc *desc)
{
@@ -2709,7 +2735,8 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
* be called safely on it. The initial value of the output must be specified
* as raw value on the physical line without regard for the ACTIVE_LOW status.
*
- * Return 0 in case of success, else an error code.
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
@@ -2728,7 +2755,8 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
* as the logical value of the GPIO, i.e. taking its ACTIVE_LOW status into
* account.
*
- * Return 0 in case of success, else an error code.
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
@@ -2801,7 +2829,8 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output);
* @desc: GPIO to enable.
* @flags: Flags related to GPIO edge.
*
- * Return 0 in case of success, else negative error code.
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
{
@@ -2833,7 +2862,8 @@ EXPORT_SYMBOL_GPL(gpiod_enable_hw_timestamp_ns);
* @desc: GPIO to disable.
* @flags: Flags related to GPIO edge, same value as used during enable call.
*
- * Return 0 in case of success, else negative error code.
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
{
@@ -2925,7 +2955,8 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
* gpiod_is_active_low - test whether a GPIO is active-low or not
* @desc: the gpio descriptor to test
*
- * Returns 1 if the GPIO is active-low, 0 otherwise.
+ * Returns:
+ * 1 if the GPIO is active-low, 0 otherwise.
*/
int gpiod_is_active_low(const struct gpio_desc *desc)
{
@@ -3140,7 +3171,8 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
* gpiod_get_raw_value() - return a gpio's raw value
* @desc: gpio whose value will be returned
*
- * Return the GPIO's raw value, i.e. the value of the physical line disregarding
+ * Returns:
+ * The GPIO's raw value, i.e. the value of the physical line disregarding
* its ACTIVE_LOW status, or negative errno on failure.
*
* This function can be called from contexts where we cannot sleep, and will
@@ -3159,7 +3191,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value);
* gpiod_get_value() - return a gpio's value
* @desc: gpio whose value will be returned
*
- * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into
+ * Returns:
+ * The GPIO's logical value, i.e. taking the ACTIVE_LOW status into
* account, or negative errno on failure.
*
* This function can be called from contexts where we cannot sleep, and will
@@ -3192,11 +3225,13 @@ EXPORT_SYMBOL_GPL(gpiod_get_value);
* @value_bitmap: bitmap to store the read values
*
* Read the raw values of the GPIOs, i.e. the values of the physical lines
- * without regard for their ACTIVE_LOW status. Return 0 in case of success,
- * else an error code.
+ * without regard for their ACTIVE_LOW status.
*
* This function can be called from contexts where we cannot sleep,
* and it will complain if the GPIO chip functions potentially sleep.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_get_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -3219,10 +3254,13 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value);
* @value_bitmap: bitmap to store the read values
*
* Read the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
- * into account. Return 0 in case of success, else an error code.
+ * into account.
*
* This function can be called from contexts where we cannot sleep,
* and it will complain if the GPIO chip functions potentially sleep.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_get_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -3510,6 +3548,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_value);
*
* This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -3535,6 +3576,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value);
*
* This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -3553,6 +3597,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_array_value);
* gpiod_cansleep() - report whether gpio value access may sleep
* @desc: gpio to check
*
+ * Returns:
+ * 0 for non-sleepable, 1 for sleepable, or an error code in case of error.
*/
int gpiod_cansleep(const struct gpio_desc *desc)
{
@@ -3565,6 +3611,9 @@ EXPORT_SYMBOL_GPL(gpiod_cansleep);
* gpiod_set_consumer_name() - set the consumer name for the descriptor
* @desc: gpio to set the consumer name on
* @name: the new consumer name
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
{
@@ -3578,8 +3627,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
* gpiod_to_irq() - return the IRQ corresponding to a GPIO
* @desc: gpio whose IRQ will be returned (already requested)
*
- * Return the IRQ corresponding to the passed GPIO, or an error code in case of
- * error.
+ * Returns:
+ * The IRQ corresponding to the passed GPIO, or an error code in case of error.
*/
int gpiod_to_irq(const struct gpio_desc *desc)
{
@@ -3592,7 +3641,7 @@ int gpiod_to_irq(const struct gpio_desc *desc)
* requires this function to not return zero on an invalid descriptor
* but rather a negative error number.
*/
- if (!desc || IS_ERR(desc))
+ if (IS_ERR_OR_NULL(desc))
return -EINVAL;
gdev = desc->gdev;
@@ -3633,6 +3682,9 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq);
*
* This is used directly by GPIO drivers that want to lock down
* a certain GPIO line to be used for IRQs.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset)
{
@@ -3784,7 +3836,8 @@ EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
* gpiod_get_raw_value_cansleep() - return a gpio's raw value
* @desc: gpio whose value will be returned
*
- * Return the GPIO's raw value, i.e. the value of the physical line disregarding
+ * Returns:
+ * The GPIO's raw value, i.e. the value of the physical line disregarding
* its ACTIVE_LOW status, or negative errno on failure.
*
* This function is to be called from contexts that can sleep.
@@ -3801,7 +3854,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep);
* gpiod_get_value_cansleep() - return a gpio's value
* @desc: gpio whose value will be returned
*
- * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into
+ * Returns:
+ * The GPIO's logical value, i.e. taking the ACTIVE_LOW status into
* account, or negative errno on failure.
*
* This function is to be called from contexts that can sleep.
@@ -3831,10 +3885,12 @@ EXPORT_SYMBOL_GPL(gpiod_get_value_cansleep);
* @value_bitmap: bitmap to store the read values
*
* Read the raw values of the GPIOs, i.e. the values of the physical lines
- * without regard for their ACTIVE_LOW status. Return 0 in case of success,
- * else an error code.
+ * without regard for their ACTIVE_LOW status.
*
* This function is to be called from contexts that can sleep.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -3858,9 +3914,12 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value_cansleep);
* @value_bitmap: bitmap to store the read values
*
* Read the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
- * into account. Return 0 in case of success, else an error code.
+ * into account.
*
* This function is to be called from contexts that can sleep.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_get_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -3923,6 +3982,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
* without regard for their ACTIVE_LOW status.
*
* This function is to be called from contexts that can sleep.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -3965,6 +4027,9 @@ void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n)
* into account.
*
* This function is to be called from contexts that can sleep.
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -4298,9 +4363,12 @@ EXPORT_SYMBOL_GPL(fwnode_gpiod_get_index);
/**
* gpiod_count - return the number of GPIOs associated with a device / function
- * or -ENOENT if no GPIO has been assigned to the requested function
* @dev: GPIO consumer, can be NULL for system-global GPIOs
* @con_id: function within the GPIO consumer
+ *
+ * Returns:
+ * The number of GPIOs associated with a device / function or -ENOENT if no
+ * GPIO has been assigned to the requested function.
*/
int gpiod_count(struct device *dev, const char *con_id)
{
@@ -4327,7 +4395,8 @@ EXPORT_SYMBOL_GPL(gpiod_count);
* @con_id: function within the GPIO consumer
* @flags: optional GPIO initialization flags
*
- * Return the GPIO descriptor corresponding to the function con_id of device
+ * Returns:
+ * The GPIO descriptor corresponding to the function @con_id of device
* dev, -ENOENT if no GPIO has been assigned to the requested function, or
* another IS_ERR() code if an error occurred while trying to acquire the GPIO.
*/
@@ -4347,6 +4416,11 @@ EXPORT_SYMBOL_GPL(gpiod_get);
* This is equivalent to gpiod_get(), except that when no GPIO was assigned to
* the requested function it will return NULL. This is convenient for drivers
* that need to handle optional GPIOs.
+ *
+ * Returns:
+ * The GPIO descriptor corresponding to the function @con_id of device
+ * dev, NULL if no GPIO has been assigned to the requested function, or
+ * another IS_ERR() code if an error occurred while trying to acquire the GPIO.
*/
struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
const char *con_id,
@@ -4365,7 +4439,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
* of_find_gpio() or of_get_gpio_hog()
* @dflags: gpiod_flags - optional GPIO initialization flags
*
- * Return 0 on success, -ENOENT if no GPIO has been assigned to the
+ * Returns:
+ * 0 on success, -ENOENT if no GPIO has been assigned to the
* requested function and/or index, or another IS_ERR() code if an error
* occurred while trying to acquire the GPIO.
*/
@@ -4440,7 +4515,8 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
* This variant of gpiod_get() allows to access GPIOs other than the first
* defined one for functions that define several GPIOs.
*
- * Return a valid GPIO descriptor, -ENOENT if no GPIO has been assigned to the
+ * Returns:
+ * A valid GPIO descriptor, -ENOENT if no GPIO has been assigned to the
* requested function and/or index, or another IS_ERR() code if an error
* occurred while trying to acquire the GPIO.
*/
@@ -4468,6 +4544,11 @@ EXPORT_SYMBOL_GPL(gpiod_get_index);
* This is equivalent to gpiod_get_index(), except that when no GPIO with the
* specified index was assigned to the requested function it will return NULL.
* This is convenient for drivers that need to handle optional GPIOs.
+ *
+ * Returns:
+ * A valid GPIO descriptor, NULL if no GPIO has been assigned to the
+ * requested function and/or index, or another IS_ERR() code if an error
+ * occurred while trying to acquire the GPIO.
*/
struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
const char *con_id,
@@ -4491,6 +4572,9 @@ EXPORT_SYMBOL_GPL(gpiod_get_index_optional);
* @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from
* of_find_gpio() or of_get_gpio_hog()
* @dflags: gpiod_flags - optional GPIO initialization flags
+ *
+ * Returns:
+ * 0 on success, or negative errno on failure.
*/
int gpiod_hog(struct gpio_desc *desc, const char *name,
unsigned long lflags, enum gpiod_flags dflags)
@@ -4547,9 +4631,11 @@ static void gpiochip_free_hogs(struct gpio_chip *gc)
*
* This function acquires all the GPIOs defined under a given function.
*
- * Return a struct gpio_descs containing an array of descriptors, -ENOENT if
- * no GPIO has been assigned to the requested function, or another IS_ERR()
- * code if an error occurred while trying to acquire the GPIOs.
+ * Returns:
+ * The GPIO descriptors corresponding to the function @con_id of device
+ * dev, -ENOENT if no GPIO has been assigned to the requested function,
+ * or another IS_ERR() code if an error occurred while trying to acquire
+ * the GPIOs.
*/
struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
const char *con_id,
@@ -4675,6 +4761,12 @@ EXPORT_SYMBOL_GPL(gpiod_get_array);
*
* This is equivalent to gpiod_get_array(), except that when no GPIO was
* assigned to the requested function it will return NULL.
+ *
+ * Returns:
+ * The GPIO descriptors corresponding to the function @con_id of device
+ * dev, NULL if no GPIO has been assigned to the requested function,
+ * or another IS_ERR() code if an error occurred while trying to acquire
+ * the GPIOs.
*/
struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
const char *con_id,
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 4de0bf1a62d3..067197d61d57 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -89,9 +89,21 @@ static inline struct gpio_device *to_gpio_device(struct device *dev)
return container_of(dev, struct gpio_device, dev);
}
-/* gpio suffixes used for ACPI and device tree lookup */
+/* GPIO suffixes used for ACPI and device tree lookup */
extern const char *const gpio_suffixes[];
-extern const size_t gpio_suffix_count;
+
+#define for_each_gpio_property_name(propname, con_id) \
+ for (const char * const *__suffixes = gpio_suffixes; \
+ *__suffixes && ({ \
+ const char *__gs = *__suffixes; \
+ \
+ if (con_id) \
+ snprintf(propname, sizeof(propname), "%s-%s", con_id, __gs); \
+ else \
+ snprintf(propname, sizeof(propname), "%s", __gs); \
+ 1; \
+ }); \
+ __suffixes++)
/**
* struct gpio_array - Opaque descriptor for a structure of GPIO array attributes
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 6b2c6b91f962..1cb5a4f19293 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -107,7 +107,7 @@ config DRM_KMS_HELPER
config DRM_PANIC
bool "Display a user-friendly message when a kernel panic occurs"
- depends on DRM && !(FRAMEBUFFER_CONSOLE && VT_CONSOLE)
+ depends on DRM
select FONT_SUPPORT
help
Enable a drm panic handler, which will display a user-friendly message
@@ -149,6 +149,37 @@ config DRM_PANIC_SCREEN
or by writing to /sys/module/drm/parameters/panic_screen sysfs entry
Default is "user"
+config DRM_PANIC_SCREEN_QR_CODE
+ bool "Add a panic screen with a QR code"
+ depends on DRM_PANIC && RUST
+ help
+ This option adds a QR code generator, and a panic screen with a QR
+ code. The QR code will contain the last lines of kmsg and other debug
+ information. This should be easier for the user to report a kernel
+ panic, with all debug information available.
+ To use this panic screen, also set DRM_PANIC_SCREEN to "qr_code"
+
+config DRM_PANIC_SCREEN_QR_CODE_URL
+ string "Base URL of the QR code in the panic screen"
+ depends on DRM_PANIC_SCREEN_QR_CODE
+ help
+ This option sets the base URL to report the kernel panic. If it's set
+ the QR code will contain the URL and the kmsg compressed with zlib as
+ a URL parameter. If it's empty, the QR code will contain the kmsg as
+ uncompressed text only.
+ There is a demo code in javascript, to decode and uncompress the kmsg
+ data from the URL parameter at https://github.com/kdj0c/panic_report
+
+config DRM_PANIC_SCREEN_QR_VERSION
+ int "Maximum version (size) of the QR code."
+ depends on DRM_PANIC_SCREEN_QR_CODE
+ default 40
+ help
+ This option limits the version (or size) of the QR code. QR code
+ version ranges from Version 1 (21x21) to Version 40 (177x177).
+ Smaller QR code are easier to read, but will contain less debugging
+ data. Default is 40.
+
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
depends on STACKTRACE_SUPPORT
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index fa432a1ac9e2..784229d4504d 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -89,6 +89,7 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
drm_privacy_screen_x86.o
drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o
drm-$(CONFIG_DRM_PANIC) += drm_panic.o
+drm-$(CONFIG_DRM_PANIC_SCREEN_QR_CODE) += drm_panic_qr.o
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 38408e4e158e..c7b18c52825d 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -39,23 +39,7 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
-I$(FULL_AMD_PATH)/amdkfd
-subdir-ccflags-y := -Wextra
-subdir-ccflags-y += -Wunused
-subdir-ccflags-y += -Wmissing-prototypes
-subdir-ccflags-y += -Wmissing-declarations
-subdir-ccflags-y += -Wmissing-include-dirs
-subdir-ccflags-y += -Wold-style-definition
-subdir-ccflags-y += -Wmissing-format-attribute
-# Need this to avoid recursive variable evaluation issues
-cond-flags := $(call cc-option, -Wunused-but-set-variable) \
- $(call cc-option, -Wunused-const-variable) \
- $(call cc-option, -Wstringop-truncation) \
- $(call cc-option, -Wpacked-not-aligned)
-subdir-ccflags-y += $(cond-flags)
-subdir-ccflags-y += -Wno-unused-parameter
-subdir-ccflags-y += -Wno-type-limits
-subdir-ccflags-y += -Wno-sign-compare
-subdir-ccflags-y += -Wno-missing-field-initializers
+# Locally disable W=1 warnings enabled in drm subsystem Makefile
subdir-ccflags-y += -Wno-override-init
subdir-ccflags-$(CONFIG_DRM_AMDGPU_WERROR) += -Werror
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 137a88b8de45..dcd59040c449 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -118,6 +118,8 @@
#define MAX_GPU_INSTANCE 64
+#define GFX_SLICE_PERIOD msecs_to_jiffies(250)
+
struct amdgpu_gpu_instance {
struct amdgpu_device *adev;
int mgpu_fan_enabled;
@@ -235,6 +237,7 @@ extern int sched_policy;
extern bool debug_evictions;
extern bool no_system_mem_limit;
extern int halt_if_hws_hang;
+extern uint amdgpu_svm_default_granularity;
#else
static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS;
static const bool __maybe_unused debug_evictions; /* = false */
@@ -347,9 +350,9 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
-#define MAX_KIQ_REG_WAIT (amdgpu_sriov_vf(adev) ? 50000 : 5000) /* in usecs, extend for VF */
-#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
+#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
+#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
+#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
int amdgpu_device_ip_set_clockgating_state(void *dev,
@@ -823,17 +826,6 @@ struct amdgpu_mqd {
struct amdgpu_reset_domain;
struct amdgpu_fru_info;
-struct amdgpu_reset_info {
- /* reset dump register */
- u32 *reset_dump_reg_list;
- u32 *reset_dump_reg_value;
- int num_regs;
-
-#ifdef CONFIG_DEV_COREDUMP
- struct amdgpu_coredump_info *coredump_info;
-#endif
-};
-
/*
* Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
*/
@@ -1157,8 +1149,6 @@ struct amdgpu_device {
struct mutex benchmark_mutex;
- struct amdgpu_reset_info reset_info;
-
bool scpm_enabled;
uint32_t scpm_status;
@@ -1175,6 +1165,11 @@ struct amdgpu_device {
bool debug_disable_soft_recovery;
bool debug_use_vram_fw_buf;
bool debug_enable_ras_aca;
+ bool debug_exp_resets;
+
+ bool enforce_isolation[MAX_XCP];
+ /* Added this mutex for cleaner shader isolation between GFX and compute processes */
+ struct mutex enforce_isolation_mutex;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1484,7 +1479,6 @@ extern const int amdgpu_max_kms_ioctl;
int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev);
-void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
@@ -1588,13 +1582,6 @@ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return
static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif
-#if defined(CONFIG_DRM_AMD_DC)
-int amdgpu_dm_display_resume(struct amdgpu_device *adev );
-#else
-static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
-#endif
-
-
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 19158cc30f31..57bda66e85ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -80,6 +80,9 @@ static void aca_banks_release(struct aca_banks *banks)
{
struct aca_bank_node *node, *tmp;
+ if (list_empty(&banks->list))
+ return;
+
list_for_each_entry_safe(node, tmp, &banks->list, node) {
list_del(&node->node);
kvfree(node);
@@ -453,13 +456,13 @@ static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_er
switch (type) {
case ACA_ERROR_TYPE_UE:
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, count);
break;
case ACA_ERROR_TYPE_CE:
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, count);
break;
case ACA_ERROR_TYPE_DEFERRED:
- amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, count);
+ amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, count);
break;
default:
break;
@@ -562,9 +565,13 @@ static void aca_error_fini(struct aca_error *aerr)
struct aca_bank_error *bank_error, *tmp;
mutex_lock(&aerr->lock);
+ if (list_empty(&aerr->list))
+ goto out_unlock;
+
list_for_each_entry_safe(bank_error, tmp, &aerr->list, node)
aca_bank_error_remove(aerr, bank_error);
+out_unlock:
mutex_destroy(&aerr->lock);
}
@@ -680,6 +687,9 @@ static void aca_manager_fini(struct aca_handle_manager *mgr)
{
struct aca_handle *handle, *tmp;
+ if (list_empty(&mgr->list))
+ return;
+
list_for_each_entry_safe(handle, tmp, &mgr->list, node)
amdgpu_aca_remove_handle(handle);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 03205e3c3746..4f08b153cb66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -364,15 +364,15 @@ allocate_mem_reserve_bo_failed:
return r;
}
-void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj)
+void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
{
- struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
+ struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
- amdgpu_bo_reserve(bo, true);
- amdgpu_bo_kunmap(bo);
- amdgpu_bo_unpin(bo);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&(bo));
+ amdgpu_bo_reserve(*bo, true);
+ amdgpu_bo_kunmap(*bo);
+ amdgpu_bo_unpin(*bo);
+ amdgpu_bo_unreserve(*bo);
+ amdgpu_bo_unref(bo);
}
int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
@@ -783,22 +783,6 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
return 0;
}
-bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
- int hub_inst, int hub_type)
-{
- if (!hub_type) {
- if (adev->gfxhub.funcs->query_utcl2_poison_status)
- return adev->gfxhub.funcs->query_utcl2_poison_status(adev, hub_inst);
- else
- return false;
- } else {
- if (adev->mmhub.funcs->query_utcl2_poison_status)
- return adev->mmhub.funcs->query_utcl2_poison_status(adev, hub_inst);
- else
- return false;
- }
-}
-
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
{
return kgd2kfd_check_and_lock_kfd();
@@ -887,3 +871,21 @@ free_ring_funcs:
return r;
}
+
+/* Stop scheduling on KFD */
+int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id)
+{
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ return kgd2kfd_stop_sched(adev->kfd.dev, node_id);
+}
+
+/* Start scheduling on KFD */
+int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id)
+{
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ return kgd2kfd_start_sched(adev->kfd.dev, node_id);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index e7bb1ca35801..f9d119448442 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -235,7 +235,7 @@ int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr, bool mqd_gfx9);
-void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj);
+void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj);
int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
void **mem_obj);
void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
@@ -264,6 +264,8 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
uint32_t *payload);
int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
u32 inst);
+int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id);
+int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id);
/* Read user wptr from a specified user address space with page fault
* disabled. The memory must be pinned and mapped to the hardware when
@@ -322,7 +324,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
void **kptr, uint64_t *size);
void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
-int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo);
+int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart);
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence __rcu **ef);
@@ -345,11 +347,9 @@ void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *ad
pasid_notify pasid_fn, void *data, uint32_t reset);
bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev);
-bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
+bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem);
void amdgpu_amdkfd_block_mmu_notifications(void *p);
int amdgpu_amdkfd_criu_resume(void *p);
-bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
- int hub_inst, int hub_type);
int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 alloc_flag, int8_t xcp_id);
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
@@ -426,6 +426,8 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
int kgd2kfd_check_and_lock_kfd(void);
void kgd2kfd_unlock_kfd(void);
+int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
#else
static inline int kgd2kfd_init(void)
{
@@ -496,5 +498,15 @@ static inline int kgd2kfd_check_and_lock_kfd(void)
static inline void kgd2kfd_unlock_kfd(void)
{
}
+
+static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
+{
+ return 0;
+}
+
+static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
+{
+ return 0;
+}
#endif
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
index aff08321e976..8dfdb18197c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -191,4 +191,6 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 3a3f3ce09f00..9435af2e6bdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h>
-#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
#include "amdgpu.h"
@@ -300,7 +299,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
if (r)
goto out;
} else {
- drm_sched_start(&ring->sched, false);
+ drm_sched_start(&ring->sched);
}
}
@@ -418,5 +417,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
- .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
+ .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
index a5c7259cf2a3..e2ae714a700f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
@@ -541,5 +541,7 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
kgd_gfx_v9_4_3_set_wave_launch_trap_override,
.set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v9_4_3_set_address_watch,
- .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch
+ .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 3ab6c3aa0ad1..62176d607bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -1070,6 +1070,20 @@ static void program_trap_handler_settings(struct amdgpu_device *adev,
unlock_srbm(adev);
}
+uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst)
+{
+ return 0;
+}
+
+uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -1097,4 +1111,6 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
.build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
.program_trap_handler_settings = program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v10_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
index 67bcaa3d4226..9efd2dd4fdd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
@@ -56,3 +56,12 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data);
+uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst);
+uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst,
+ unsigned int utimeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 8c8437a4383f..c718bedda0ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -680,5 +680,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override,
.set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v10_set_address_watch,
- .clear_address_watch = kgd_gfx_v10_clear_address_watch
+ .clear_address_watch = kgd_gfx_v10_clear_address_watch,
+ .hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v10_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index b61a32d6af4b..a4ba49cb22db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -786,6 +786,20 @@ static uint32_t kgd_gfx_v11_clear_address_watch(struct amdgpu_device *adev,
return 0;
}
+static uint64_t kgd_gfx_v11_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst)
+{
+ return 0;
+}
+
+static uint64_t kgd_gfx_v11_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.program_sh_mem_settings = program_sh_mem_settings_v11,
.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11,
@@ -808,5 +822,7 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override,
.set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v11_set_address_watch,
- .clear_address_watch = kgd_gfx_v11_clear_address_watch
+ .clear_address_watch = kgd_gfx_v11_clear_address_watch,
+ .hqd_get_pq_addr = kgd_gfx_v11_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v11_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 5a35a8ca8922..1254a43ec96b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -1144,6 +1144,109 @@ void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
kgd_gfx_v9_unlock_srbm(adev, inst);
}
+uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst)
+{
+ uint32_t low, high;
+ uint64_t queue_addr = 0;
+
+ if (!adev->debug_exp_resets &&
+ !adev->gfx.num_gfx_rings)
+ return 0;
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
+
+ if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE))
+ goto unlock_out;
+
+ low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE);
+ high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI);
+
+ /* only concerned with user queues. */
+ if (!high)
+ goto unlock_out;
+
+ queue_addr = (((queue_addr | high) << 32) | low) << 8;
+
+unlock_out:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, inst);
+ kgd_gfx_v9_release_queue(adev, inst);
+
+ return queue_addr;
+}
+
+/* assume queue acquired */
+static int kgd_gfx_v9_hqd_dequeue_wait(struct amdgpu_device *adev, uint32_t inst,
+ unsigned int utimeout)
+{
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
+
+ while (true) {
+ uint32_t temp = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
+
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
+ return 0;
+
+ if (time_after(jiffies, end_jiffies))
+ return -ETIME;
+
+ usleep_range(500, 1000);
+ }
+}
+
+uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout)
+{
+ uint32_t low, high, pipe_reset_data = 0;
+ uint64_t queue_addr = 0;
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
+
+ if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE))
+ goto unlock_out;
+
+ low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE);
+ high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI);
+
+ /* only concerned with user queues. */
+ if (!high)
+ goto unlock_out;
+
+ queue_addr = (((queue_addr | high) << 32) | low) << 8;
+
+ pr_debug("Attempting queue reset on XCC %i pipe id %i queue id %i\n",
+ inst, pipe_id, queue_id);
+
+ /* assume previous dequeue request issued will take affect after reset */
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ if (!kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout))
+ goto unlock_out;
+
+ pr_debug("Attempting pipe reset on XCC %i pipe id %i\n", inst, pipe_id);
+
+ pipe_reset_data = REG_SET_FIELD(pipe_reset_data, CP_MEC_CNTL, MEC_ME1_PIPE0_RESET, 1);
+ pipe_reset_data = pipe_reset_data << pipe_id;
+
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, pipe_reset_data);
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, 0);
+
+ if (kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout))
+ queue_addr = 0;
+
+unlock_out:
+ pr_debug("queue reset on XCC %i pipe id %i queue id %i %s\n",
+ inst, pipe_id, queue_id, !!queue_addr ? "succeeded!" : "failed!");
+ amdgpu_gfx_rlc_exit_safe_mode(adev, inst);
+ kgd_gfx_v9_release_queue(adev, inst);
+
+ return queue_addr;
+}
+
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -1172,4 +1275,6 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index ce424615f59b..988c50ac3be0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -101,3 +101,12 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data);
+uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst);
+uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst,
+ unsigned int utimeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 11672bfe4fad..4afef5b46c7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,7 +25,6 @@
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
-#include <linux/fdtable.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_exec.h>
@@ -818,18 +817,13 @@ static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
if (!mem->dmabuf) {
struct amdgpu_device *bo_adev;
struct dma_buf *dmabuf;
- int r, fd;
bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
- r = drm_gem_prime_handle_to_fd(&bo_adev->ddev, bo_adev->kfd.client.file,
+ dmabuf = drm_gem_prime_handle_to_dmabuf(&bo_adev->ddev, bo_adev->kfd.client.file,
mem->gem_handle,
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
- DRM_RDWR : 0, &fd);
- if (r)
- return r;
- dmabuf = dma_buf_get(fd);
- close_fd(fd);
- if (WARN_ON_ONCE(IS_ERR(dmabuf)))
+ DRM_RDWR : 0);
+ if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
mem->dmabuf = dmabuf;
}
@@ -1252,7 +1246,7 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
return ret;
}
-static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
+static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync)
{
@@ -1260,11 +1254,18 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
struct amdgpu_device *adev = entry->adev;
struct amdgpu_vm *vm = bo_va->base.vm;
+ if (bo_va->queue_refcount) {
+ pr_debug("bo_va->queue_refcount %d\n", bo_va->queue_refcount);
+ return -EBUSY;
+ }
+
amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
amdgpu_sync_fence(sync, bo_va->last_pt_update);
+
+ return 0;
}
static int update_gpuvm_pte(struct kgd_mem *mem,
@@ -2191,7 +2192,10 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
entry->va, entry->va + bo_size, entry);
- unmap_bo_from_gpuvm(mem, entry, ctx.sync);
+ ret = unmap_bo_from_gpuvm(mem, entry, ctx.sync);
+ if (ret)
+ goto unreserve_out;
+
entry->is_mapped = false;
mem->mapped_to_gpu_memory--;
@@ -2226,11 +2230,12 @@ int amdgpu_amdkfd_gpuvm_sync_memory(
/**
* amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
* @bo: Buffer object to be mapped
+ * @bo_gart: Return bo reference
*
* Before return, bo reference count is incremented. To release the reference and unpin/
* unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
*/
-int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
+int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart)
{
int ret;
@@ -2257,7 +2262,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
amdgpu_bo_unreserve(bo);
- bo = amdgpu_bo_ref(bo);
+ *bo_gart = amdgpu_bo_ref(bo);
return 0;
@@ -3200,12 +3205,13 @@ int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
return 0;
}
-bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
+bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem)
{
+ struct amdgpu_vm *vm = drm_priv_to_vm(drm_priv);
struct kfd_mem_attachment *entry;
list_for_each_entry(entry, &mem->attachments, list) {
- if (entry->is_mapped && entry->adev == adev)
+ if (entry->is_mapped && entry->bo_va->base.vm == vm)
return true;
}
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 7dc102f0bc1d..0c8975ac5af9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1018,8 +1018,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->post_div = args.v3.ucPostDiv;
dividers->enable_post_div = (args.v3.ucCntlFlag &
@@ -1039,8 +1040,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
if (strobe_mode)
args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->post_div = args.v5.ucPostDiv;
dividers->enable_post_div = (args.v5.ucCntlFlag &
@@ -1058,8 +1060,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
/* fusion */
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
dividers->real_clock = le32_to_cpu(args.v4.ulClock);
@@ -1070,8 +1073,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
args.v6_in.ulClock.ulComputeClockFlag = clock_type;
args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
@@ -1113,8 +1117,9 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
if (strobe_mode)
args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
@@ -1211,8 +1216,9 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
args.v2.ucVoltageMode = 0;
args.v2.usVoltageLevel = 0;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
break;
@@ -1221,8 +1227,9 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
*voltage = le16_to_cpu(args.v3.usVoltageLevel);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 618e469e3622..42e64bce661e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -431,6 +431,11 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
goto success;
}
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
if (amdgpu_read_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
goto success;
@@ -446,11 +451,6 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
goto success;
}
- if (amdgpu_read_platform_bios(adev)) {
- dev_info(adev->dev, "Fetched VBIOS from platform\n");
- goto success;
- }
-
dev_err(adev->dev, "Unable to locate a BIOS ROM\n");
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index c3d89088123d..16153d275d7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -414,7 +414,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, "%s", fw_name);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
amdgpu_ucode_release(&adev->pm.fw);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index cae7479c3ecf..344e0a9ee08a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -249,11 +249,7 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
static struct edid *
amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
{
- if (adev->mode_info.bios_hardcoded_edid) {
- return kmemdup((unsigned char *)adev->mode_info.bios_hardcoded_edid,
- adev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
- }
- return NULL;
+ return drm_edid_duplicate(drm_edid_raw(adev->mode_info.bios_hardcoded_edid));
}
static void amdgpu_connector_get_edid(struct drm_connector *connector)
@@ -442,6 +438,9 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
continue;
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+ if (!mode)
+ return;
+
drm_mode_probed_add(connector, mode);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6dfdff58bffd..1e475eb01417 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -263,6 +263,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
if (size < sizeof(struct drm_amdgpu_bo_list_in))
goto free_partial_kdata;
+ /* Only a single BO list is allowed to simplify handling. */
+ if (p->bo_list)
+ ret = -EINVAL;
+
ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
if (ret)
goto free_partial_kdata;
@@ -292,6 +296,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
num_ibs[i], &p->jobs[i]);
if (ret)
goto free_all_kdata;
+ p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id];
}
p->gang_leader = p->jobs[p->gang_leader_idx];
@@ -1106,7 +1111,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct amdgpu_ring *ring = to_amdgpu_ring(sched);
- if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
+ if (amdgpu_vmid_uses_reserved(adev, vm, ring->vm_hub))
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 0e1a11b6b989..cbef720de779 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -2026,100 +2026,6 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
-static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
- char __user *buf, size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
- char reg_offset[12];
- int i, ret, len = 0;
-
- if (*pos)
- return 0;
-
- memset(reg_offset, 0, 12);
- ret = down_read_killable(&adev->reset_domain->sem);
- if (ret)
- return ret;
-
- for (i = 0; i < adev->reset_info.num_regs; i++) {
- sprintf(reg_offset, "0x%x\n", adev->reset_info.reset_dump_reg_list[i]);
- up_read(&adev->reset_domain->sem);
- if (copy_to_user(buf + len, reg_offset, strlen(reg_offset)))
- return -EFAULT;
-
- len += strlen(reg_offset);
- ret = down_read_killable(&adev->reset_domain->sem);
- if (ret)
- return ret;
- }
-
- up_read(&adev->reset_domain->sem);
- *pos += len;
-
- return len;
-}
-
-static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
- const char __user *buf, size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
- char reg_offset[11];
- uint32_t *new = NULL, *tmp = NULL;
- unsigned int len = 0;
- int ret, i = 0;
-
- do {
- memset(reg_offset, 0, 11);
- if (copy_from_user(reg_offset, buf + len,
- min(10, (size-len)))) {
- ret = -EFAULT;
- goto error_free;
- }
-
- new = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL);
- if (!new) {
- ret = -ENOMEM;
- goto error_free;
- }
- tmp = new;
- if (sscanf(reg_offset, "%X %n", &tmp[i], &ret) != 1) {
- ret = -EINVAL;
- goto error_free;
- }
-
- len += ret;
- i++;
- } while (len < size);
-
- new = kmalloc_array(i, sizeof(uint32_t), GFP_KERNEL);
- if (!new) {
- ret = -ENOMEM;
- goto error_free;
- }
- ret = down_write_killable(&adev->reset_domain->sem);
- if (ret)
- goto error_free;
-
- swap(adev->reset_info.reset_dump_reg_list, tmp);
- swap(adev->reset_info.reset_dump_reg_value, new);
- adev->reset_info.num_regs = i;
- up_write(&adev->reset_domain->sem);
- ret = size;
-
-error_free:
- if (tmp != new)
- kfree(tmp);
- kfree(new);
- return ret;
-}
-
-static const struct file_operations amdgpu_reset_dump_register_list = {
- .owner = THIS_MODULE,
- .read = amdgpu_reset_dump_register_list_read,
- .write = amdgpu_reset_dump_register_list_write,
- .llseek = default_llseek
-};
-
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -2204,8 +2110,6 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
&amdgpu_debugfs_vm_info_fops);
debugfs_create_file("amdgpu_benchmark", 0200, root, adev,
&amdgpu_benchmark_fops);
- debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
- &amdgpu_reset_dump_register_list);
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index f0a44d0dec27..5ac59b62020c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -28,8 +28,8 @@
#include "atom.h"
#ifndef CONFIG_DEV_COREDUMP
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context)
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job)
{
}
#else
@@ -203,7 +203,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
struct amdgpu_coredump_info *coredump = data;
struct drm_print_iterator iter;
struct amdgpu_vm_fault_info *fault_info;
- int i, ver;
+ int ver;
iter.data = buffer;
iter.offset = 0;
@@ -236,7 +236,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
drm_printf(&p, "\nSOC Memory Information\n");
drm_printf(&p, "real vram size: %llu\n", coredump->adev->gmc.real_vram_size);
drm_printf(&p, "visible vram size: %llu\n", coredump->adev->gmc.visible_vram_size);
- drm_printf(&p, "visible vram size: %llu\n", coredump->adev->mman.gtt_mgr.manager.size);
+ drm_printf(&p, "gtt size: %llu\n", coredump->adev->mman.gtt_mgr.manager.size);
/* GDS Config */
drm_printf(&p, "\nGDS Config\n");
@@ -315,16 +315,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
}
}
- if (coredump->reset_vram_lost)
+ if (coredump->skip_vram_check)
+ drm_printf(&p, "VRAM lost check is skipped!\n");
+ else if (coredump->reset_vram_lost)
drm_printf(&p, "VRAM is lost due to GPU reset!\n");
- if (coredump->adev->reset_info.num_regs) {
- drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
-
- for (i = 0; i < coredump->adev->reset_info.num_regs; i++)
- drm_printf(&p, "0x%08x: 0x%08x\n",
- coredump->adev->reset_info.reset_dump_reg_list[i],
- coredump->adev->reset_info.reset_dump_reg_value[i]);
- }
return count - iter.remain;
}
@@ -334,12 +328,11 @@ static void amdgpu_devcoredump_free(void *data)
kfree(data);
}
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context)
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job)
{
- struct amdgpu_coredump_info *coredump;
struct drm_device *dev = adev_to_drm(adev);
- struct amdgpu_job *job = reset_context->job;
+ struct amdgpu_coredump_info *coredump;
struct drm_sched_job *s_job;
coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
@@ -349,11 +342,12 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
return;
}
+ coredump->skip_vram_check = skip_vram_check;
coredump->reset_vram_lost = vram_lost;
- if (reset_context->job && reset_context->job->vm) {
+ if (job && job->vm) {
+ struct amdgpu_vm *vm = job->vm;
struct amdgpu_task_info *ti;
- struct amdgpu_vm *vm = reset_context->job->vm;
ti = amdgpu_vm_get_task_info_vm(vm);
if (ti) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
index 52459512cb2b..ef9772c6bcc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
@@ -26,7 +26,6 @@
#define __AMDGPU_DEV_COREDUMP_H__
#include "amdgpu.h"
-#include "amdgpu_reset.h"
#ifdef CONFIG_DEV_COREDUMP
@@ -36,12 +35,12 @@ struct amdgpu_coredump_info {
struct amdgpu_device *adev;
struct amdgpu_task_info reset_task_info;
struct timespec64 reset_time;
+ bool skip_vram_check;
bool reset_vram_lost;
struct amdgpu_ring *ring;
};
#endif
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context);
-
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index bcacf2e35eba..f4628412dac4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1916,6 +1916,8 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
*/
static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
{
+ int i;
+
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
@@ -1970,6 +1972,9 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
+ for (i = 0; i < MAX_XCP; i++)
+ adev->enforce_isolation[i] = !!enforce_isolation;
+
return 0;
}
@@ -2471,6 +2476,7 @@ out:
*/
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
+ struct amdgpu_ip_block *ip_block;
struct pci_dev *parent;
int i, r;
bool total;
@@ -2608,7 +2614,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (!total)
return -ENODEV;
- amdgpu_amdkfd_device_probe(adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (ip_block->status.valid != false)
+ amdgpu_amdkfd_device_probe(adev);
+
adev->cg_flags &= amdgpu_cg_mask;
adev->pg_flags &= amdgpu_pg_mask;
@@ -3948,6 +3957,27 @@ static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
adev->ram_is_direct_mapped = true;
}
+#if defined(CONFIG_HSA_AMD_P2P)
+/**
+ * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * return if IOMMU remapping bar address
+ */
+static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
+{
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev(adev->dev);
+ if (domain && (domain->type == IOMMU_DOMAIN_DMA ||
+ domain->type == IOMMU_DOMAIN_DMA_FQ))
+ return true;
+
+ return false;
+}
+#endif
+
static const struct attribute *amdgpu_dev_attributes[] = {
&dev_attr_pcie_replay_count.attr,
NULL
@@ -4055,6 +4085,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->notifier_lock);
mutex_init(&adev->pm.stable_pstate_ctx_lock);
mutex_init(&adev->benchmark_mutex);
+ mutex_init(&adev->gfx.reset_sem_mutex);
+ /* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
+ mutex_init(&adev->enforce_isolation_mutex);
+ mutex_init(&adev->gfx.kfd_sch_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4086,6 +4120,21 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
amdgpu_device_delay_enable_gfx_off);
+ /*
+ * Initialize the enforce_isolation work structures for each XCP
+ * partition. This work handler is responsible for enforcing shader
+ * isolation on AMD GPUs. It counts the number of emitted fences for
+ * each GFX and compute ring. If there are any fences, it schedules
+ * the `enforce_isolation_work` to be run after a delay. If there are
+ * no fences, it signals the Kernel Fusion Driver (KFD) to resume the
+ * runqueue.
+ */
+ for (i = 0; i < MAX_XCP; i++) {
+ INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work,
+ amdgpu_gfx_enforce_isolation_handler);
+ adev->gfx.enforce_isolation[i].adev = adev;
+ adev->gfx.enforce_isolation[i].xcp_id = i;
+ }
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
@@ -4482,6 +4531,9 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
{
dev_info(adev->dev, "amdgpu: finishing device.\n");
flush_delayed_work(&adev->delayed_init_work);
+
+ if (adev->mman.initialized)
+ drain_workqueue(adev->mman.bdev.wq);
adev->shutdown = true;
/* make sure IB test finished before entering exclusive mode
@@ -4502,9 +4554,6 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
}
amdgpu_fence_driver_hw_fini(adev);
- if (adev->mman.initialized)
- drain_workqueue(adev->mman.bdev.wq);
-
if (adev->pm.sysfs_initialized)
amdgpu_pm_sysfs_fini(adev);
if (adev->ucode_sysfs_en)
@@ -5278,16 +5327,15 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
{
int i, r = 0;
struct amdgpu_job *job = NULL;
+ struct amdgpu_device *tmp_adev = reset_context->reset_req_dev;
bool need_full_reset =
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
if (reset_context->reset_req_dev == adev)
job = reset_context->job;
- if (amdgpu_sriov_vf(adev)) {
- /* stop the data exchange thread */
- amdgpu_virt_fini_data_exchange(adev);
- }
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_pre_reset(adev);
amdgpu_fence_driver_isr_toggle(adev, true);
@@ -5336,6 +5384,16 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
}
}
+ if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
+ dev_info(tmp_adev->dev, "Dumping IP State\n");
+ /* Trigger ip dump before we reset the asic */
+ for (i = 0; i < tmp_adev->num_ip_blocks; i++)
+ if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
+ tmp_adev->ip_blocks[i].version->funcs
+ ->dump_ip_state((void *)tmp_adev);
+ dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
+ }
+
if (need_full_reset)
r = amdgpu_device_ip_suspend(adev);
if (need_full_reset)
@@ -5348,47 +5406,17 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
return r;
}
-static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
-{
- int i;
-
- lockdep_assert_held(&adev->reset_domain->sem);
-
- for (i = 0; i < adev->reset_info.num_regs; i++) {
- adev->reset_info.reset_dump_reg_value[i] =
- RREG32(adev->reset_info.reset_dump_reg_list[i]);
-
- trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i],
- adev->reset_info.reset_dump_reg_value[i]);
- }
-
- return 0;
-}
-
int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_reset_context *reset_context)
{
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset, skip_hw_reset, vram_lost = false;
int r = 0;
- uint32_t i;
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
- if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
- amdgpu_reset_reg_dumps(tmp_adev);
-
- dev_info(tmp_adev->dev, "Dumping IP State\n");
- /* Trigger ip dump before we reset the asic */
- for (i = 0; i < tmp_adev->num_ip_blocks; i++)
- if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
- tmp_adev->ip_blocks[i].version->funcs
- ->dump_ip_state((void *)tmp_adev);
- dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
- }
-
reset_context->reset_device_list = device_list_handle;
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
@@ -5461,7 +5489,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
- amdgpu_coredump(tmp_adev, vram_lost, reset_context);
+ amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
if (vram_lost) {
DRM_INFO("VRAM is lost due to GPU reset!\n");
@@ -5513,7 +5541,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
* bad_page_threshold value to fix this once
* probing driver again.
*/
- if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
+ if (!amdgpu_ras_is_rma(tmp_adev)) {
/* must succeed. */
amdgpu_ras_resume(tmp_adev);
} else {
@@ -5879,7 +5907,7 @@ skip_hw_reset:
if (!amdgpu_ring_sched_ready(ring))
continue;
- drm_sched_start(&ring->sched, true);
+ drm_sched_start(&ring->sched);
}
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
@@ -5891,8 +5919,14 @@ skip_hw_reset:
tmp_adev->asic_reset_res = 0;
if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
+ /* bad news, how to tell it to userspace ?
+ * for ras error, we should report GPU bad status instead of
+ * reset failure
+ */
+ if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
+ !amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n",
+ atomic_read(&tmp_adev->gpu_reset_counter));
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
@@ -6138,18 +6172,24 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev)
{
#ifdef CONFIG_HSA_AMD_P2P
- uint64_t address_mask = peer_adev->dev->dma_mask ?
- ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
- resource_size_t aper_limit =
- adev->gmc.aper_base + adev->gmc.aper_size - 1;
bool p2p_access =
!adev->gmc.xgmi.connected_to_cpu &&
!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
- return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
- adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
- !(adev->gmc.aper_base & address_mask ||
- aper_limit & address_mask));
+ bool is_large_bar = adev->gmc.visible_vram_size &&
+ adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
+ bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev);
+
+ if (!p2p_addressable) {
+ uint64_t address_mask = peer_adev->dev->dma_mask ?
+ ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
+ resource_size_t aper_limit =
+ adev->gmc.aper_base + adev->gmc.aper_size - 1;
+
+ p2p_addressable = !(adev->gmc.aper_base & address_mask ||
+ aper_limit & address_mask);
+ }
+ return is_large_bar && p2p_access && p2p_addressable;
#else
return false;
#endif
@@ -6374,7 +6414,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
if (!amdgpu_ring_sched_ready(ring))
continue;
- drm_sched_start(&ring->sched, true);
+ drm_sched_start(&ring->sched);
}
amdgpu_device_unset_mp1_state(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 094498a0964b..f57411ed2dc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -131,6 +131,7 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2),
AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
AMDGPU_DEBUG_ENABLE_RAS_ACA = BIT(4),
+ AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -168,6 +169,16 @@ uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu;
char *amdgpu_virtual_display;
bool enforce_isolation;
+
+/* Specifies the default granularity for SVM, used in buffer
+ * migration and restoration of backing memory when handling
+ * recoverable page faults.
+ *
+ * The value is given as log(numPages(buffer)); for a 2 MiB
+ * buffer it computes to be 9
+ */
+uint amdgpu_svm_default_granularity = 9;
+
/*
* OverDrive(bit 14) disabled by default
* GFX DCS(bit 19) disabled by default
@@ -320,6 +331,13 @@ MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
/**
+ * DOC: svm_default_granularity (uint)
+ * Used in buffer migration and handling of recoverable page faults
+ */
+MODULE_PARM_DESC(svm_default_granularity, "SVM's default granularity in log(2^Pages), default 9 = 2^9 = 2 MiB");
+module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint, 0644);
+
+/**
* DOC: lockup_timeout (string)
* Set GPU scheduler timeout value in ms.
*
@@ -2199,6 +2217,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: enable RAS ACA\n");
adev->debug_enable_ras_aca = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_EXP_RESETS) {
+ pr_info("debug: enable experimental reset features\n");
+ adev->debug_exp_resets = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2908,6 +2931,7 @@ static const struct file_operations amdgpu_driver_kms_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = drm_show_fdinfo,
#endif
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
@@ -2953,7 +2977,6 @@ static const struct drm_driver amdgpu_kms_driver = {
DRIVER_SYNCOBJ_TIMELINE,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
- .lastclose = amdgpu_driver_lastclose_kms,
.ioctls = amdgpu_ioctls_kms,
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
.dumb_create = amdgpu_mode_dumb_create,
@@ -2980,7 +3003,6 @@ const struct drm_driver amdgpu_partition_driver = {
DRIVER_SYNCOBJ_TIMELINE,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
- .lastclose = amdgpu_driver_lastclose_kms,
.ioctls = amdgpu_ioctls_kms,
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
.dumb_create = amdgpu_mode_dumb_create,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 8283d682f543..7cc980bf4725 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -55,8 +55,6 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
int amdgpu_gart_init(struct amdgpu_device *adev);
void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev);
void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 1849510a308a..83e54697f0ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -24,10 +24,13 @@
*/
#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"
@@ -882,8 +885,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
int r;
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
- if (!amdgpu_persistent_edc_harvesting_supported(adev))
- amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
+ if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
+ r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
+ if (r)
+ return r;
+ }
r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
@@ -1027,7 +1033,10 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
pr_err("critical bug! too many kiq readers\n");
goto failed_unlock;
}
- amdgpu_ring_alloc(ring, 32);
+ r = amdgpu_ring_alloc(ring, 32);
+ if (r)
+ goto failed_unlock;
+
amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r)
@@ -1093,7 +1102,10 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
}
spin_lock_irqsave(&kiq->ring_lock, flags);
- amdgpu_ring_alloc(ring, 32);
+ r = amdgpu_ring_alloc(ring, 32);
+ if (r)
+ goto failed_unlock;
+
amdgpu_ring_emit_wreg(ring, reg, v);
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r)
@@ -1129,6 +1141,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
failed_undo:
amdgpu_ring_undo(ring);
+failed_unlock:
spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_write:
dev_err(adev->dev, "failed to write reg:%x\n", reg);
@@ -1381,6 +1394,217 @@ static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
return sysfs_emit(buf, "%s\n", supported_partition);
}
+static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct drm_gpu_scheduler *sched = &ring->sched;
+ struct drm_sched_entity entity;
+ struct dma_fence *f;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ int i, r;
+
+ /* Initialize the scheduler entity */
+ r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ if (r) {
+ dev_err(adev->dev, "Failed setting up GFX kernel entity.\n");
+ goto err;
+ }
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
+ 64, 0,
+ &job);
+ if (r)
+ goto err;
+
+ job->enforce_isolation = true;
+
+ ib = &job->ibs[0];
+ for (i = 0; i <= ring->funcs->align_mask; ++i)
+ ib->ptr[i] = ring->funcs->nop;
+ ib->length_dw = ring->funcs->align_mask + 1;
+
+ f = amdgpu_job_submit(job);
+
+ r = dma_fence_wait(f, false);
+ if (r)
+ goto err;
+
+ dma_fence_put(f);
+
+ /* Clean up the scheduler entity */
+ drm_sched_entity_destroy(&entity);
+ return 0;
+
+err:
+ return r;
+}
+
+static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
+{
+ int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ struct amdgpu_ring *ring;
+ int num_xcc_to_clear;
+ int i, r, xcc_id;
+
+ if (adev->gfx.num_xcc_per_xcp)
+ num_xcc_to_clear = adev->gfx.num_xcc_per_xcp;
+ else
+ num_xcc_to_clear = 1;
+
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
+ if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
+ r = amdgpu_gfx_run_cleaner_shader_job(ring);
+ if (r)
+ return r;
+ num_xcc_to_clear--;
+ break;
+ }
+ }
+ }
+
+ if (num_xcc_to_clear)
+ return -ENOENT;
+
+ return 0;
+}
+
+static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int ret;
+ long value;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ ret = kstrtol(buf, 0, &value);
+
+ if (ret)
+ return -EINVAL;
+
+ if (value < 0)
+ return -EINVAL;
+
+ if (adev->xcp_mgr) {
+ if (value >= adev->xcp_mgr->num_xcps)
+ return -EINVAL;
+ } else {
+ if (value > 1)
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ ret = amdgpu_gfx_run_cleaner_shader(adev, value);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int i;
+ ssize_t size = 0;
+
+ if (adev->xcp_mgr) {
+ for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
+ size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]);
+ if (i < (adev->xcp_mgr->num_xcps - 1))
+ size += sysfs_emit_at(buf, size, " ");
+ }
+ buf[size++] = '\n';
+ } else {
+ size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]);
+ }
+
+ return size;
+}
+
+static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ long partition_values[MAX_XCP] = {0};
+ int ret, i, num_partitions;
+ const char *input_buf = buf;
+
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ ret = sscanf(input_buf, "%ld", &partition_values[i]);
+ if (ret <= 0)
+ break;
+
+ /* Move the pointer to the next value in the string */
+ input_buf = strchr(input_buf, ' ');
+ if (input_buf) {
+ input_buf++;
+ } else {
+ i++;
+ break;
+ }
+ }
+ num_partitions = i;
+
+ if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps)
+ return -EINVAL;
+
+ if (!adev->xcp_mgr && num_partitions != 1)
+ return -EINVAL;
+
+ for (i = 0; i < num_partitions; i++) {
+ if (partition_values[i] != 0 && partition_values[i] != 1)
+ return -EINVAL;
+ }
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+
+ for (i = 0; i < num_partitions; i++) {
+ if (adev->enforce_isolation[i] && !partition_values[i]) {
+ /* Going from enabled to disabled */
+ amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
+ } else if (!adev->enforce_isolation[i] && partition_values[i]) {
+ /* Going from disabled to enabled */
+ amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
+ }
+ adev->enforce_isolation[i] = partition_values[i];
+ }
+
+ mutex_unlock(&adev->enforce_isolation_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(run_cleaner_shader, 0200,
+ NULL, amdgpu_gfx_set_run_cleaner_shader);
+
+static DEVICE_ATTR(enforce_isolation, 0644,
+ amdgpu_gfx_get_enforce_isolation,
+ amdgpu_gfx_set_enforce_isolation);
+
static DEVICE_ATTR(current_compute_partition, 0644,
amdgpu_gfx_get_current_compute_partition,
amdgpu_gfx_set_compute_partition);
@@ -1406,3 +1630,229 @@ void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_current_compute_partition);
device_remove_file(adev->dev, &dev_attr_available_compute_partition);
}
+
+int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
+ if (r)
+ return r;
+ }
+
+ r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_sriov_vf(adev))
+ device_remove_file(adev->dev, &dev_attr_enforce_isolation);
+ device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
+}
+
+int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size)
+{
+ if (!adev->gfx.enable_cleaner_shader)
+ return -EOPNOTSUPP;
+
+ return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.cleaner_shader_obj,
+ &adev->gfx.cleaner_shader_gpu_addr,
+ (void **)&adev->gfx.cleaner_shader_cpu_ptr);
+}
+
+void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev)
+{
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj,
+ &adev->gfx.cleaner_shader_gpu_addr,
+ (void **)&adev->gfx.cleaner_shader_cpu_ptr);
+}
+
+void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size,
+ const void *cleaner_shader_ptr)
+{
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr)
+ memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr,
+ cleaner_shader_size);
+}
+
+/**
+ * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver)
+ * @adev: amdgpu_device pointer
+ * @idx: Index of the scheduler to control
+ * @enable: Whether to enable or disable the KFD scheduler
+ *
+ * This function is used to control the KFD (Kernel Fusion Driver) scheduler
+ * from the KGD. It is part of the cleaner shader feature. This function plays
+ * a key role in enforcing process isolation on the GPU.
+ *
+ * The function uses a reference count mechanism (kfd_sch_req_count) to keep
+ * track of the number of requests to enable the KFD scheduler. When a request
+ * to enable the KFD scheduler is made, the reference count is decremented.
+ * When the reference count reaches zero, a delayed work is scheduled to
+ * enforce isolation after a delay of GFX_SLICE_PERIOD.
+ *
+ * When a request to disable the KFD scheduler is made, the function first
+ * checks if the reference count is zero. If it is, it cancels the delayed work
+ * for enforcing isolation and checks if the KFD scheduler is active. If the
+ * KFD scheduler is active, it sends a request to stop the KFD scheduler and
+ * sets the KFD scheduler state to inactive. Then, it increments the reference
+ * count.
+ *
+ * The function is synchronized using the kfd_sch_mutex to ensure that the KFD
+ * scheduler state and reference count are updated atomically.
+ *
+ * Note: If the reference count is already zero when a request to enable the
+ * KFD scheduler is made, it means there's an imbalance bug somewhere. The
+ * function triggers a warning in this case.
+ */
+static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
+ bool enable)
+{
+ mutex_lock(&adev->gfx.kfd_sch_mutex);
+
+ if (enable) {
+ /* If the count is already 0, it means there's an imbalance bug somewhere.
+ * Note that the bug may be in a different caller than the one which triggers the
+ * WARN_ON_ONCE.
+ */
+ if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
+ dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
+ goto unlock;
+ }
+
+ adev->gfx.kfd_sch_req_count[idx]--;
+
+ if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
+ adev->gfx.kfd_sch_inactive[idx]) {
+ schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
+ GFX_SLICE_PERIOD);
+ }
+ } else {
+ if (adev->gfx.kfd_sch_req_count[idx] == 0) {
+ cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
+ if (!adev->gfx.kfd_sch_inactive[idx]) {
+ amdgpu_amdkfd_stop_sched(adev, idx);
+ adev->gfx.kfd_sch_inactive[idx] = true;
+ }
+ }
+
+ adev->gfx.kfd_sch_req_count[idx]++;
+ }
+
+unlock:
+ mutex_unlock(&adev->gfx.kfd_sch_mutex);
+}
+
+/**
+ * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation
+ *
+ * @work: work_struct.
+ *
+ * This function is the work handler for enforcing shader isolation on AMD GPUs.
+ * It counts the number of emitted fences for each GFX and compute ring. If there
+ * are any fences, it schedules the `enforce_isolation_work` to be run after a
+ * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion
+ * Driver (KFD) to resume the runqueue. The function is synchronized using the
+ * `enforce_isolation_mutex`.
+ */
+void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
+{
+ struct amdgpu_isolation_work *isolation_work =
+ container_of(work, struct amdgpu_isolation_work, work.work);
+ struct amdgpu_device *adev = isolation_work->adev;
+ u32 i, idx, fences = 0;
+
+ if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION)
+ idx = 0;
+ else
+ idx = isolation_work->xcp_id;
+
+ if (idx >= MAX_XCP)
+ return;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) {
+ if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
+ }
+ for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) {
+ if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
+ }
+ if (fences) {
+ schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
+ GFX_SLICE_PERIOD);
+ } else {
+ /* Tell KFD to resume the runqueue */
+ if (adev->kfd.init_complete) {
+ WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
+ WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
+ amdgpu_amdkfd_start_sched(adev, idx);
+ adev->gfx.kfd_sch_inactive[idx] = false;
+ }
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+}
+
+void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 idx;
+
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
+ idx = 0;
+ else
+ idx = ring->xcp_id;
+
+ if (idx >= MAX_XCP)
+ return;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ if (adev->enforce_isolation[idx]) {
+ if (adev->kfd.init_complete)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+}
+
+void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 idx;
+
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
+ idx = 0;
+ else
+ idx = ring->xcp_id;
+
+ if (idx >= MAX_XCP)
+ return;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ if (adev->enforce_isolation[idx]) {
+ if (adev->kfd.init_complete)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 56cc58edbb4e..5644e10a86a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -34,6 +34,7 @@
#include "soc15.h"
#include "amdgpu_ras.h"
#include "amdgpu_ring_mux.h"
+#include "amdgpu_xcp.h"
/* GFX current status */
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
@@ -138,6 +139,10 @@ struct kiq_pm4_funcs {
void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring,
uint16_t pasid, uint32_t flush_type,
bool all_hub);
+ void (*kiq_reset_hw_queue)(struct amdgpu_ring *kiq_ring,
+ uint32_t queue_type, uint32_t me_id,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t xcc_id, uint32_t vmid);
/* Packet sizes */
int set_resources_size;
int map_queues_size;
@@ -345,6 +350,12 @@ struct amdgpu_me {
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
};
+struct amdgpu_isolation_work {
+ struct amdgpu_device *adev;
+ u32 xcp_id;
+ struct delayed_work work;
+};
+
struct amdgpu_gfx {
struct mutex gpu_clock_mutex;
struct amdgpu_gfx_config config;
@@ -397,6 +408,7 @@ struct amdgpu_gfx {
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
struct amdgpu_irq_src priv_inst_irq;
+ struct amdgpu_irq_src bad_op_irq;
struct amdgpu_irq_src cp_ecc_error_irq;
struct amdgpu_irq_src sq_irq;
struct amdgpu_irq_src rlc_gc_fed_irq;
@@ -445,6 +457,21 @@ struct amdgpu_gfx {
uint32_t *ip_dump_core;
uint32_t *ip_dump_compute_queues;
uint32_t *ip_dump_gfx_queues;
+
+ struct mutex reset_sem_mutex;
+
+ /* cleaner shader */
+ struct amdgpu_bo *cleaner_shader_obj;
+ unsigned int cleaner_shader_size;
+ u64 cleaner_shader_gpu_addr;
+ void *cleaner_shader_cpu_ptr;
+ const void *cleaner_shader_ptr;
+ bool enable_cleaner_shader;
+ struct amdgpu_isolation_work enforce_isolation[MAX_XCP];
+ /* Mutex for synchronizing KFD scheduler operations */
+ struct mutex kfd_sch_mutex;
+ u64 kfd_sch_req_count[MAX_XCP];
+ bool kfd_sch_inactive[MAX_XCP];
};
struct amdgpu_gfx_ras_reg_entry {
@@ -546,6 +573,17 @@ void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
void *ras_error_status,
void (*func)(struct amdgpu_device *adev, void *ras_error_status,
int xcc_id));
+int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size);
+void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev);
+void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size,
+ const void *cleaner_shader_ptr);
+int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev);
+void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev);
+void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work);
+void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
static inline const char *amdgpu_gfx_compute_mode_desc(int mode)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
index 103a837ccc71..c7b44aeb671b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
@@ -38,8 +38,6 @@ struct amdgpu_gfxhub_funcs {
void (*mode2_save_regs)(struct amdgpu_device *adev);
void (*mode2_restore_regs)(struct amdgpu_device *adev);
void (*halt)(struct amdgpu_device *adev);
- bool (*query_utcl2_poison_status)(struct amdgpu_device *adev,
- int xcc_id);
};
struct amdgpu_gfxhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index b49b3650fd62..17a19d49d30a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -786,7 +786,8 @@ void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
goto failed_kiq;
might_sleep();
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
+ !amdgpu_reset_pending(adev->reset_domain)) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index b6a8bddada4c..92d27d32de41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -424,7 +424,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r || !idle)
goto error;
- if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
+ if (amdgpu_vmid_uses_reserved(adev, vm, vmhub)) {
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id)
goto error;
@@ -476,15 +476,19 @@ error:
/*
* amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
+ * @adev: amdgpu_device pointer
* @vm: the VM to check
* @vmhub: the VMHUB which will be used
*
* Returns: True if the VM will use a reserved VMID.
*/
-bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
+bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, unsigned int vmhub)
{
return vm->reserved_vmid[vmhub] ||
- (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)));
+ (adev->enforce_isolation[(vm->root.bo->xcp_id != AMDGPU_XCP_NO_PARTITION) ?
+ vm->root.bo->xcp_id : 0] &&
+ AMDGPU_IS_GFXHUB(vmhub));
}
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
@@ -600,9 +604,10 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
}
}
/* alloc a default reserved vmid to enforce isolation */
- if (enforce_isolation)
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
-
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ if (adev->enforce_isolation[i])
+ amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 240fa6751260..4012fb2dd08a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -78,7 +78,8 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
-bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
+bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, unsigned int vmhub);
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
unsigned vmhub);
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index 44e2ea8c9728..b03664c66dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -49,6 +49,7 @@ struct amdgpu_isp {
const struct isp_funcs *funcs;
struct mfd_cell *isp_cell;
struct resource *isp_res;
+ struct resource *isp_i2c_res;
struct isp_platform_data *isp_pdata;
unsigned int harvest_config;
const struct firmware *fw;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 908e13455152..ad6bf5d4e0a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -30,6 +30,60 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_reset.h"
+#include "amdgpu_dev_coredump.h"
+#include "amdgpu_xgmi.h"
+
+static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int i;
+
+ dev_info(adev->dev, "Dumping IP State\n");
+ for (i = 0; i < adev->num_ip_blocks; i++)
+ if (adev->ip_blocks[i].version->funcs->dump_ip_state)
+ adev->ip_blocks[i].version->funcs
+ ->dump_ip_state((void *)adev);
+ dev_info(adev->dev, "Dumping IP State Completed\n");
+
+ amdgpu_coredump(adev, true, false, job);
+}
+
+static void amdgpu_job_core_dump(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ struct list_head device_list, *device_list_handle = NULL;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct amdgpu_hive_info *hive = NULL;
+
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+ /*
+ * Reuse the logic in amdgpu_device_gpu_recover() to build list of
+ * devices for code dump
+ */
+ INIT_LIST_HEAD(&device_list);
+ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ if (!list_is_first(&adev->reset_list, &device_list))
+ list_rotate_to_front(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
+ } else {
+ list_add_tail(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
+ }
+
+ /* Do the coredump for each device */
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list)
+ amdgpu_job_do_core_dump(tmp_adev, job);
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
+}
static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
@@ -48,9 +102,14 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
return DRM_GPU_SCHED_STAT_ENODEV;
}
-
adev->job_hang = true;
+ /*
+ * Do the coredump immediately after a job timeout to get a very
+ * close dump/snapshot/representation of GPU's current error status
+ */
+ amdgpu_job_core_dump(adev, job);
+
if (amdgpu_gpu_recovery &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
@@ -72,6 +131,26 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
+ /* attempt a per ring reset */
+ if (amdgpu_gpu_recovery &&
+ ring->funcs->reset) {
+ /* stop the scheduler, but don't mess with the
+ * bad job yet because if ring reset fails
+ * we'll fall back to full GPU reset.
+ */
+ drm_sched_wqueue_stop(&ring->sched);
+ r = amdgpu_ring_reset(ring, job->vmid);
+ if (!r) {
+ if (amdgpu_ring_sched_ready(ring))
+ drm_sched_stop(&ring->sched, s_job);
+ atomic_inc(&ring->adev->gpu_reset_counter);
+ amdgpu_fence_driver_force_completion(ring);
+ if (amdgpu_ring_sched_ready(ring))
+ drm_sched_start(&ring->sched);
+ goto exit;
+ }
+ }
+
if (amdgpu_device_should_recover_gpu(ring->adev)) {
struct amdgpu_reset_context reset_context;
memset(&reset_context, 0, sizeof(reset_context));
@@ -81,6 +160,12 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
reset_context.src = AMDGPU_RESET_SRC_JOB;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ /*
+ * To avoid an unnecessary extra coredump, as we have already
+ * got the very close representation of GPU's error status
+ */
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
if (r)
dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index a963a25ddd62..ce6b9ba967ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -76,6 +76,9 @@ struct amdgpu_job {
/* job_run_counter >= 1 means a resubmit job */
uint32_t job_run_counter;
+ /* enforce isolation */
+ bool enforce_isolation;
+
uint32_t num_ibs;
struct amdgpu_ib ibs[];
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 66782be5917b..016a6f6c4267 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -43,6 +43,7 @@
#include "amdgpu_gem.h"
#include "amdgpu_display.h"
#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
#include "amd_pcie.h"
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
@@ -778,6 +779,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
? -EFAULT : 0;
}
case AMDGPU_INFO_READ_MMR_REG: {
+ int ret = 0;
unsigned int n, alloc_size;
uint32_t *regs;
unsigned int se_num = (info->read_mmr_reg.instance >>
@@ -787,24 +789,37 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SH_INDEX_MASK;
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -ENOENT;
+
/* set full masks if the userspace set all bits
* in the bitfields
*/
- if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
+ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) {
se_num = 0xffffffff;
- else if (se_num >= AMDGPU_GFX_MAX_SE)
- return -EINVAL;
- if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
+ } else if (se_num >= AMDGPU_GFX_MAX_SE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) {
sh_num = 0xffffffff;
- else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
- return -EINVAL;
+ } else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (info->read_mmr_reg.count > 128)
- return -EINVAL;
+ if (info->read_mmr_reg.count > 128) {
+ ret = -EINVAL;
+ goto out;
+ }
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
- if (!regs)
- return -ENOMEM;
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
amdgpu_gfx_off_ctrl(adev, false);
@@ -816,13 +831,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
info->read_mmr_reg.dword_offset + i);
kfree(regs);
amdgpu_gfx_off_ctrl(adev, true);
- return -EFAULT;
+ ret = -EFAULT;
+ goto out;
}
}
amdgpu_gfx_off_ctrl(adev, true);
n = copy_to_user(out, regs, min(size, alloc_size));
kfree(regs);
- return n ? -EFAULT : 0;
+ ret = (n ? -EFAULT : 0);
+out:
+ up_read(&adev->reset_domain->sem);
+ return ret;
}
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device *dev_info;
@@ -1269,23 +1288,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return 0;
}
-
-/*
- * Outdated mess for old drm with Xorg being in charge (void function now).
- */
-/**
- * amdgpu_driver_lastclose_kms - drm callback for last close
- *
- * @dev: drm dev pointer
- *
- * Switch vga_switcheroo state after last close (all asics).
- */
-void amdgpu_driver_lastclose_kms(struct drm_device *dev)
-{
- drm_fb_helper_lastclose(dev);
- vga_switcheroo_process_delayed_switch();
-}
-
/**
* amdgpu_driver_open_kms - drm callback for open
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 2542bd7aa7c7..18ee60378727 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -396,7 +396,6 @@ static int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum
static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
struct mca_bank_set *mca_set, struct ras_err_data *err_data)
{
- struct ras_err_addr err_addr;
struct amdgpu_smuio_mcm_config_info mcm_info;
struct mca_bank_node *node, *tmp;
struct mca_bank_entry *entry;
@@ -421,27 +420,20 @@ static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_r
continue;
memset(&mcm_info, 0, sizeof(mcm_info));
- memset(&err_addr, 0, sizeof(err_addr));
mcm_info.socket_id = entry->info.socket_id;
mcm_info.die_id = entry->info.aid;
- if (blk == AMDGPU_RAS_BLOCK__UMC) {
- err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS];
- err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID];
- err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR];
- }
-
if (type == AMDGPU_MCA_ERROR_TYPE_UE) {
amdgpu_ras_error_statistic_ue_count(err_data,
- &mcm_info, &err_addr, (uint64_t)count);
+ &mcm_info, (uint64_t)count);
} else {
if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]))
amdgpu_ras_error_statistic_de_count(err_data,
- &mcm_info, &err_addr, (uint64_t)count);
+ &mcm_info, (uint64_t)count);
else
amdgpu_ras_error_statistic_ce_count(err_data,
- &mcm_info, &err_addr, (uint64_t)count);
+ &mcm_info, (uint64_t)count);
}
amdgpu_mca_bank_set_remove_node(mca_set, node);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 1cb1ec7beefe..10b61ff63802 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -501,60 +501,50 @@ int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
int amdgpu_mes_suspend(struct amdgpu_device *adev)
{
- struct idr *idp;
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang;
struct mes_suspend_gang_input input;
- int r, pasid;
+ int r;
+
+ if (!amdgpu_mes_suspend_resume_all_supported(adev))
+ return 0;
+
+ memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
+ input.suspend_all_gangs = 1;
/*
* Avoid taking any other locks under MES lock to avoid circular
* lock dependencies.
*/
amdgpu_mes_lock(&adev->mes);
-
- idp = &adev->mes.pasid_idr;
-
- idr_for_each_entry(idp, process, pasid) {
- list_for_each_entry(gang, &process->gang_list, list) {
- r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
- if (r)
- DRM_ERROR("failed to suspend pasid %d gangid %d",
- pasid, gang->gang_id);
- }
- }
-
+ r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
- return 0;
+ if (r)
+ DRM_ERROR("failed to suspend all gangs");
+
+ return r;
}
int amdgpu_mes_resume(struct amdgpu_device *adev)
{
- struct idr *idp;
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang;
struct mes_resume_gang_input input;
- int r, pasid;
+ int r;
+
+ if (!amdgpu_mes_suspend_resume_all_supported(adev))
+ return 0;
+
+ memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
+ input.resume_all_gangs = 1;
/*
* Avoid taking any other locks under MES lock to avoid circular
* lock dependencies.
*/
amdgpu_mes_lock(&adev->mes);
-
- idp = &adev->mes.pasid_idr;
-
- idr_for_each_entry(idp, process, pasid) {
- list_for_each_entry(gang, &process->gang_list, list) {
- r = adev->mes.funcs->resume_gang(&adev->mes, &input);
- if (r)
- DRM_ERROR("failed to resume pasid %d gangid %d",
- pasid, gang->gang_id);
- }
- }
-
+ r = adev->mes.funcs->resume_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
- return 0;
+ if (r)
+ DRM_ERROR("failed to resume all gangs");
+
+ return r;
}
static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
@@ -793,6 +783,68 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
return 0;
}
+int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
+{
+ unsigned long flags;
+ struct amdgpu_mes_queue *queue;
+ struct amdgpu_mes_gang *gang;
+ struct mes_reset_queue_input queue_input;
+ int r;
+
+ /*
+ * Avoid taking any other locks under MES lock to avoid circular
+ * lock dependencies.
+ */
+ amdgpu_mes_lock(&adev->mes);
+
+ /* remove the mes gang from idr list */
+ spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
+
+ queue = idr_find(&adev->mes.queue_id_idr, queue_id);
+ if (!queue) {
+ spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
+ amdgpu_mes_unlock(&adev->mes);
+ DRM_ERROR("queue id %d doesn't exist\n", queue_id);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
+
+ DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
+ queue->doorbell_off);
+
+ gang = queue->gang;
+ queue_input.doorbell_offset = queue->doorbell_off;
+ queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
+
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
+ if (r)
+ DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
+ queue_id);
+
+ amdgpu_mes_unlock(&adev->mes);
+
+ return 0;
+}
+
+int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
+ int me_id, int pipe_id, int queue_id, int vmid)
+{
+ struct mes_reset_queue_input queue_input;
+ int r;
+
+ queue_input.queue_type = queue_type;
+ queue_input.use_mmio = true;
+ queue_input.me_id = me_id;
+ queue_input.pipe_id = pipe_id;
+ queue_input.queue_id = queue_id;
+ queue_input.vmid = vmid;
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
+ if (r)
+ DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
+ queue_id);
+ return r;
+}
+
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -838,6 +890,33 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
return r;
}
+int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int vmid,
+ bool use_mmio)
+{
+ struct mes_reset_legacy_queue_input queue_input;
+ int r;
+
+ memset(&queue_input, 0, sizeof(queue_input));
+
+ queue_input.queue_type = ring->funcs->type;
+ queue_input.doorbell_offset = ring->doorbell_index;
+ queue_input.me_id = ring->me;
+ queue_input.pipe_id = ring->pipe;
+ queue_input.queue_id = ring->queue;
+ queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ queue_input.wptr_addr = ring->wptr_gpu_addr;
+ queue_input.vmid = vmid;
+ queue_input.use_mmio = use_mmio;
+
+ r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
+ if (r)
+ DRM_ERROR("failed to reset legacy queue\n");
+
+ return r;
+}
+
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
{
struct mes_misc_op_input op_input;
@@ -1533,7 +1612,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
}
- r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
+ r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
@@ -1584,6 +1663,19 @@ out:
return r;
}
+bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
+{
+ uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
+ bool is_supported = false;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
+ mes_rev >= 0x63)
+ is_supported = true;
+
+ return is_supported;
+}
+
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index bcce1add4ef6..96788c0f42f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -249,6 +249,18 @@ struct mes_remove_queue_input {
uint64_t gang_context_addr;
};
+struct mes_reset_queue_input {
+ uint32_t doorbell_offset;
+ uint64_t gang_context_addr;
+ bool use_mmio;
+ uint32_t queue_type;
+ uint32_t me_id;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint32_t xcc_id;
+ uint32_t vmid;
+};
+
struct mes_map_legacy_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
@@ -280,6 +292,18 @@ struct mes_resume_gang_input {
uint64_t gang_context_addr;
};
+struct mes_reset_legacy_queue_input {
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ bool use_mmio;
+ uint32_t me_id;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint64_t mqd_addr;
+ uint64_t wptr_addr;
+ uint32_t vmid;
+};
+
enum mes_misc_opcode {
MES_MISC_OP_WRITE_REG,
MES_MISC_OP_READ_REG,
@@ -348,6 +372,12 @@ struct amdgpu_mes_funcs {
int (*misc_op)(struct amdgpu_mes *mes,
struct mes_misc_op_input *input);
+
+ int (*reset_legacy_queue)(struct amdgpu_mes *mes,
+ struct mes_reset_legacy_queue_input *input);
+
+ int (*reset_hw_queue)(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input);
};
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
@@ -375,6 +405,9 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
struct amdgpu_mes_queue_properties *qprops,
int *queue_id);
int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
+int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id);
+int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
+ int me_id, int pipe_id, int queue_id, int vmid);
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
@@ -382,6 +415,10 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
enum amdgpu_unmap_queues_action action,
u64 gpu_addr, u64 seq);
+int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int vmid,
+ bool use_mmio);
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
@@ -479,4 +516,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
memalloc_noreclaim_restore(mes->saved_flags);
mutex_unlock(&mes->mutex_hidden);
}
+
+bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
#endif /* __AMDGPU_MES_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 95d676ee207f..1ca9d4ed8063 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -63,8 +63,6 @@ struct amdgpu_mmhub_funcs {
uint64_t page_table_base);
void (*update_power_gating)(struct amdgpu_device *adev,
bool enable);
- bool (*query_utcl2_poison_status)(struct amdgpu_device *adev,
- int hub_inst);
};
struct amdgpu_mmhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index d002b845d8ac..5e3faefc5510 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -51,6 +51,7 @@ struct amdgpu_encoder;
struct amdgpu_router;
struct amdgpu_hpd;
struct edid;
+struct drm_edid;
#define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base)
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
@@ -326,8 +327,7 @@ struct amdgpu_mode_info {
/* FMT dithering */
struct drm_property *dither_property;
/* hardcoded DFP edid from BIOS */
- struct edid *bios_hardcoded_edid;
- int bios_hardcoded_edid_size;
+ const struct drm_edid *bios_hardcoded_edid;
/* firmware flags */
u32 firmware_flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index bc42ccbde659..d7e27957013f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -90,6 +90,12 @@ struct amdgpu_bo_va {
bool cleared;
bool is_xgmi;
+
+ /*
+ * protected by vm reservation lock
+ * if non-zero, cannot unmap from GPU because user queues may still access it
+ */
+ unsigned int queue_refcount;
};
struct amdgpu_bo {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
index 0bb2466d539a..675aa138ea11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
@@ -94,7 +94,7 @@ static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int n
ref_div_max = min(128 / post_div, ref_div_max);
/* get matching reference and feedback divider */
- *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+ *ref_div = clamp(DIV_ROUND_CLOSEST(den, post_div), 1u, ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index d0307c55da50..61a2f386d9fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1223,11 +1223,11 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
amdgpu_ras_error_statistic_de_count(&obj->err_data,
- &err_info->mcm_info, NULL, err_info->de_count);
+ &err_info->mcm_info, err_info->de_count);
amdgpu_ras_error_statistic_ce_count(&obj->err_data,
- &err_info->mcm_info, NULL, err_info->ce_count);
+ &err_info->mcm_info, err_info->ce_count);
amdgpu_ras_error_statistic_ue_count(&obj->err_data,
- &err_info->mcm_info, NULL, err_info->ue_count);
+ &err_info->mcm_info, err_info->ue_count);
}
} else {
/* for legacy asic path which doesn't has error source info */
@@ -2153,7 +2153,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
/* gpu reset is fallback for failed and default cases.
* For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
*/
- if (poison_stat && !con->is_rma) {
+ if (poison_stat && !amdgpu_ras_is_rma(adev)) {
event_id = amdgpu_ras_acquire_event_id(adev, type);
RAS_EVENT_LOG(adev, event_id,
"GPU reset for %s RAS poison consumption is issued!\n",
@@ -2881,9 +2881,6 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
{
mutex_init(&ecc_log->lock);
- /* Set any value as siphash key */
- memset(&ecc_log->ecc_key, 0xad, sizeof(ecc_log->ecc_key));
-
INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
ecc_log->de_queried_count = 0;
ecc_log->prev_de_queried_count = 0;
@@ -2948,7 +2945,7 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
amdgpu_ras_error_data_fini(&err_data);
- if (err_cnt && con->is_rma)
+ if (err_cnt && amdgpu_ras_is_rma(adev))
amdgpu_ras_reset_gpu(adev);
amdgpu_ras_schedule_retirement_dwork(con,
@@ -3049,7 +3046,7 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
}
/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
- if (reset_flags && !con->is_rma) {
+ if (reset_flags && !amdgpu_ras_is_rma(adev)) {
if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
@@ -3195,7 +3192,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
* This calling fails when is_rma is true or
* ret != 0.
*/
- if (con->is_rma || ret)
+ if (amdgpu_ras_is_rma(adev) || ret)
goto free;
if (con->eeprom_control.ras_num_recs) {
@@ -3244,7 +3241,7 @@ out:
* Except error threshold exceeding case, other failure cases in this
* function would not fail amdgpu driver init.
*/
- if (!con->is_rma)
+ if (!amdgpu_ras_is_rma(adev))
ret = 0;
else
ret = -EINVAL;
@@ -4287,7 +4284,7 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
/* mode1 is the only selection for RMA status */
- if (ras->is_rma) {
+ if (amdgpu_ras_is_rma(adev)) {
ras->gpu_reset_flags = 0;
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
}
@@ -4611,8 +4608,6 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
if (!err_node)
return NULL;
- INIT_LIST_HEAD(&err_node->err_info.err_addr_list);
-
memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
err_data->err_list_count++;
@@ -4622,21 +4617,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
return &err_node->err_info;
}
-void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr)
-{
- /* This function will be retired. */
- return;
-}
-
-void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr)
-{
- list_del(&mca_err_addr->node);
- kfree(mca_err_addr);
-}
-
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count)
{
struct ras_err_info *err_info;
@@ -4650,9 +4633,6 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
if (!err_info)
return -EINVAL;
- if (err_addr && err_addr->err_status)
- amdgpu_ras_add_mca_err_addr(err_info, err_addr);
-
err_info->ue_count += count;
err_data->ue_count += count;
@@ -4660,8 +4640,8 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
}
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count)
{
struct ras_err_info *err_info;
@@ -4682,8 +4662,8 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
}
int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count)
{
struct ras_err_info *err_info;
@@ -4697,9 +4677,6 @@ int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
if (!err_info)
return -EINVAL;
- if (err_addr && err_addr->err_status)
- amdgpu_ras_add_mca_err_addr(err_info, err_addr);
-
err_info->de_count += count;
err_data->de_count += count;
@@ -4771,6 +4748,16 @@ static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
dev_info(adev->dev,
"socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
socket_id, aid_id, hbm_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
+ socket_id, aid_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
+ socket_id, aid_id, fw_status);
}
static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
@@ -4837,3 +4824,13 @@ void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
va_end(args);
}
+
+bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return false;
+
+ return con->is_rma;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index dcf1f3dbb5c4..669720a9c60a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -28,7 +28,6 @@
#include <linux/list.h>
#include <linux/kfifo.h>
#include <linux/radix-tree.h>
-#include <linux/siphash.h>
#include "ta_ras_if.h"
#include "amdgpu_ras_eeprom.h"
#include "amdgpu_smuio.h"
@@ -47,6 +46,8 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
+#define AMDGPU_RAS_GPU_ERR_DATA_ABORT(x) AMDGPU_GET_REG_FIELD(x, 29, 29)
+#define AMDGPU_RAS_GPU_ERR_UNKNOWN(x) AMDGPU_GET_REG_FIELD(x, 30, 30)
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 100
#define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA
@@ -476,16 +477,15 @@ struct ras_err_pages {
};
struct ras_ecc_err {
- u64 hash_index;
uint64_t status;
uint64_t ipid;
uint64_t addr;
+ uint64_t pa_pfn;
struct ras_err_pages err_pages;
};
struct ras_ecc_log_info {
struct mutex lock;
- siphash_key_t ecc_key;
struct radix_tree_root de_page_tree;
uint64_t de_queried_count;
uint64_t prev_de_queried_count;
@@ -572,19 +572,11 @@ struct ras_fs_data {
char debugfs_name[32];
};
-struct ras_err_addr {
- struct list_head node;
- uint64_t err_status;
- uint64_t err_ipid;
- uint64_t err_addr;
-};
-
struct ras_err_info {
struct amdgpu_smuio_mcm_config_info mcm_info;
u64 ce_count;
u64 ue_count;
u64 de_count;
- struct list_head err_addr_list;
};
struct ras_err_node {
@@ -941,14 +933,14 @@ void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
int amdgpu_ras_error_data_init(struct ras_err_data *err_data);
void amdgpu_ras_error_data_fini(struct ras_err_data *err_data);
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count);
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count);
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count);
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count);
int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count);
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count);
void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances);
int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
const struct aca_info *aca_info, void *data);
@@ -957,12 +949,6 @@ int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
struct aca_handle *handle, char *buf, void *data);
-void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info,
- struct ras_err_addr *err_addr);
-
-void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info,
- struct ras_err_addr *mca_err_addr);
-
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status);
bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev);
@@ -982,4 +968,5 @@ __printf(3, 4)
void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
const char *fmt, ...);
+bool amdgpu_ras_is_rma(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 4ae581f3fcb5..1cb920abc2fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -136,6 +136,12 @@ static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *doma
return queue_work(domain->wq, work);
}
+static inline bool amdgpu_reset_pending(struct amdgpu_reset_domain *domain)
+{
+ lockdep_assert_held(&domain->sem);
+ return rwsem_is_contended(&domain->sem);
+}
+
void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain);
void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e6344a6b0a9f..690976665cf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -144,8 +144,10 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
/* We pad to match fetch size */
count = ring->funcs->align_mask + 1 -
(ring->wptr & ring->funcs->align_mask);
- count %= ring->funcs->align_mask + 1;
- ring->funcs->insert_nop(ring, count);
+ count &= ring->funcs->align_mask;
+
+ if (count != 0)
+ ring->funcs->insert_nop(ring, count);
mb();
amdgpu_ring_set_wptr(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 582053f1cd56..f93f51002201 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -235,6 +235,8 @@ struct amdgpu_ring_funcs {
void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
+ int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
+ void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
};
struct amdgpu_ring {
@@ -334,6 +336,7 @@ struct amdgpu_ring {
#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
+#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v))
unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
index d234b7ccfaaf..1c66da1c3fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -410,7 +410,7 @@ void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring)
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
WARN_ON(!ring->is_sw_ring);
- if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
+ if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
return;
amdgpu_ring_mux_end_ib(mux, ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 863b2a34b2d6..b0a8abc7a8ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -22,7 +22,6 @@
* Authors: Andres Rodriguez <andresx7@gmail.com>
*/
-#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/pid.h>
@@ -43,10 +42,10 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
uint32_t id;
int r;
- if (!f.file)
+ if (!fd_file(f))
return -EINVAL;
- r = amdgpu_file_to_fpriv(f.file, &fpriv);
+ r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
if (r) {
fdput(f);
return r;
@@ -72,10 +71,10 @@ static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
struct amdgpu_ctx *ctx;
int r;
- if (!f.file)
+ if (!fd_file(f))
return -EINVAL;
- r = amdgpu_file_to_fpriv(f.file, &fpriv);
+ r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
if (r) {
fdput(f);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index d3706a484870..087ce0f6fa07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -115,6 +115,7 @@ struct amdgpu_sdma {
bool has_page_queue;
struct ras_common_if *ras_if;
struct amdgpu_sdma_ras *ras;
+ uint32_t *ip_dump;
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 2f84bdb8c594..bb7b9b2eaac1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -196,7 +196,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
amdgpu_umc_handle_bad_pages(adev, ras_error_status);
if ((err_data->ue_count || err_data->de_count) &&
- (reset || (con && con->is_rma))) {
+ (reset || amdgpu_ras_is_rma(adev))) {
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
}
@@ -204,55 +204,6 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
return AMDGPU_RAS_SUCCESS;
}
-int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
- uint32_t reset, uint32_t timeout_ms)
-{
- struct ras_err_data err_data;
- struct ras_common_if head = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- };
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- uint32_t timeout = timeout_ms;
-
- memset(&err_data, 0, sizeof(err_data));
- amdgpu_ras_error_data_init(&err_data);
-
- do {
-
- amdgpu_umc_handle_bad_pages(adev, &err_data);
-
- if (timeout && !err_data.de_count) {
- msleep(1);
- timeout--;
- }
-
- } while (timeout && !err_data.de_count);
-
- if (!timeout)
- dev_warn(adev->dev, "Can't find bad pages\n");
-
- if (err_data.de_count)
- dev_info(adev->dev, "%ld new deferred hardware errors detected\n", err_data.de_count);
-
- if (obj) {
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
- obj->err_data.de_count += err_data.de_count;
- }
-
- amdgpu_ras_error_data_fini(&err_data);
-
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
-
- if (reset || (err_data.err_addr_cnt && con && con->is_rma)) {
- con->gpu_reset_flags |= reset;
- amdgpu_ras_reset_gpu(adev);
- }
-
- return 0;
-}
-
int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
enum amdgpu_ras_block block, uint16_t pasid,
pasid_notify pasid_fn, void *data, uint32_t reset)
@@ -472,43 +423,6 @@ int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_umc_uint64_cmp(const void *a, const void *b)
-{
- uint64_t *addr_a = (uint64_t *)a;
- uint64_t *addr_b = (uint64_t *)b;
-
- if (*addr_a > *addr_b)
- return 1;
- else if (*addr_a < *addr_b)
- return -1;
- else
- return 0;
-}
-
-/* Use string hash to avoid logging the same bad pages repeatedly */
-int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev,
- uint64_t *pfns, int len, uint64_t *val)
-{
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- char buf[MAX_UMC_HASH_STRING_SIZE] = {0};
- int offset = 0, i = 0;
- uint64_t hash_val;
-
- if (!pfns || !len)
- return -EINVAL;
-
- sort(pfns, len, sizeof(uint64_t), amdgpu_umc_uint64_cmp, NULL);
-
- for (i = 0; i < len; i++)
- offset += snprintf(&buf[offset], sizeof(buf) - offset, "%llx", pfns[i]);
-
- hash_val = siphash(buf, offset, &con->umc_ecc_log.ecc_key);
-
- *val = hash_val;
-
- return 0;
-}
-
int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err)
{
@@ -519,18 +433,10 @@ int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
ecc_log = &con->umc_ecc_log;
mutex_lock(&ecc_log->lock);
- ret = radix_tree_insert(ecc_tree, ecc_err->hash_index, ecc_err);
- if (!ret) {
- struct ras_err_pages *err_pages = &ecc_err->err_pages;
- int i;
-
- /* Reserve memory */
- for (i = 0; i < err_pages->count; i++)
- amdgpu_ras_reserve_page(adev, err_pages->pfn[i]);
-
+ ret = radix_tree_insert(ecc_tree, ecc_err->pa_pfn, ecc_err);
+ if (!ret)
radix_tree_tag_set(ecc_tree,
- ecc_err->hash_index, UMC_ECC_NEW_DETECTED_TAG);
- }
+ ecc_err->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
mutex_unlock(&ecc_log->lock);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 5f50c69c3cec..ce4179db2a6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -127,13 +127,8 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
umc_func func, void *data);
-int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
- uint32_t reset, uint32_t timeout_ms);
-
int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
uint64_t status, uint64_t ipid, uint64_t addr);
-int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev,
- uint64_t *pfns, int len, uint64_t *val);
int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index fbc2852278e1..6162582d0aa2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -587,7 +587,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
break;
}
- r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, fw_name);
+ r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, "%s", fw_name);
if (r) {
release_firmware(adev->umsch_mm.fw);
adev->umsch_mm.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 07d930339b07..31fd30dcd593 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -260,7 +260,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->uvd.fw, fw_name);
+ r = amdgpu_ucode_request(adev, &adev->uvd.fw, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
fw_name);
@@ -1088,7 +1088,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser,
int r;
job->vm = NULL;
- ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
if (ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 968ca2c84ef7..74fdbf71d95b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -158,7 +158,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->vce.fw, fw_name);
+ r = amdgpu_ucode_request(adev, &adev->vce.fw, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
fw_name);
@@ -749,7 +749,6 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
int i, r = 0;
job->vm = NULL;
- ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
for (idx = 0; idx < ib->length_dw;) {
uint32_t len = amdgpu_ib_get_value(ib, idx);
@@ -1044,7 +1043,6 @@ out:
if (!r) {
/* No error, free all destroyed handle slots */
tmp = destroyed;
- amdgpu_ib_free(p->adev, ib, NULL);
} else {
/* Error during parsing, free all allocated handle slots */
tmp = allocated;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index c87d68d4be53..2a1f3dbb14d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -330,6 +330,9 @@ struct amdgpu_vcn {
uint16_t inst_mask;
uint8_t num_inst_per_aid;
bool using_unified_queue;
+
+ /* IP reg dump */
+ uint32_t *ip_dump;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index b287a82e6177..b6397d3229e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -33,6 +33,7 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
+#include "amdgpu_dpm.h"
#include "vi.h"
#include "soc15.h"
#include "nv.h"
@@ -849,6 +850,13 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
return mode;
}
+void amdgpu_virt_pre_reset(struct amdgpu_device *adev)
+{
+ /* stop the data exchange thread */
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_FLR);
+}
+
void amdgpu_virt_post_reset(struct amdgpu_device *adev)
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index b42a8854dca0..b650a2032c42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -376,6 +376,7 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id);
bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev,
uint32_t ucode_id);
+void amdgpu_virt_pre_reset(struct amdgpu_device *adev);
void amdgpu_virt_post_reset(struct amdgpu_device *adev);
bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev);
bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 6415d0d039e1..e5f508d34ed8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -549,7 +549,7 @@ static int amdgpu_vkms_sw_fini(void *handle)
adev->mode_info.mode_config_initialized = false;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
kfree(adev->amdgpu_vkms_output);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a060c28f0877..2452dfa6314f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -681,6 +681,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
+ if (adev->gfx.enable_cleaner_shader &&
+ ring->funcs->emit_cleaner_shader &&
+ job->enforce_isolation)
+ ring->funcs->emit_cleaner_shader(ring);
+
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
return 0;
@@ -742,6 +747,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
amdgpu_ring_emit_switch_buffer(ring);
amdgpu_ring_emit_switch_buffer(ring);
}
+
amdgpu_ring_ib_end(ring);
return 0;
}
@@ -838,7 +844,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
+ r = vm->update_funcs->prepare(&params, NULL);
if (r)
goto error;
@@ -902,10 +908,12 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
{
struct amdgpu_vm *vm = params->vm;
- if (!fence || !*fence)
+ tlb_cb->vm = vm;
+ if (!fence || !*fence) {
+ amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
return;
+ }
- tlb_cb->vm = vm;
if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
amdgpu_vm_tlb_seq_cb)) {
dma_fence_put(vm->last_tlb_flush);
@@ -933,7 +941,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
* @unlocked: unlocked invalidation during MM callback
* @flush_tlb: trigger tlb invalidation after update completed
* @allow_override: change MTYPE for local NUMA nodes
- * @resv: fences we need to sync to
+ * @sync: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
@@ -949,16 +957,16 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
* 0 for success, negative erro code for failure.
*/
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
- struct dma_resv *resv, uint64_t start, uint64_t last,
- uint64_t flags, uint64_t offset, uint64_t vram_base,
+ bool immediate, bool unlocked, bool flush_tlb,
+ bool allow_override, struct amdgpu_sync *sync,
+ uint64_t start, uint64_t last, uint64_t flags,
+ uint64_t offset, uint64_t vram_base,
struct ttm_resource *res, dma_addr_t *pages_addr,
struct dma_fence **fence)
{
struct amdgpu_vm_tlb_seq_struct *tlb_cb;
struct amdgpu_vm_update_params params;
struct amdgpu_res_cursor cursor;
- enum amdgpu_sync_mode sync_mode;
int r, idx;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
@@ -991,14 +999,6 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.allow_override = allow_override;
INIT_LIST_HEAD(&params.tlb_flush_waitlist);
- /* Implicitly sync to command submissions in the same VM before
- * unmapping. Sync to moving fences before mapping.
- */
- if (!(flags & AMDGPU_PTE_VALID))
- sync_mode = AMDGPU_SYNC_EQ_OWNER;
- else
- sync_mode = AMDGPU_SYNC_EXPLICIT;
-
amdgpu_vm_eviction_lock(vm);
if (vm->evicting) {
r = -EBUSY;
@@ -1013,7 +1013,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
dma_fence_put(tmp);
}
- r = vm->update_funcs->prepare(&params, resv, sync_mode);
+ r = vm->update_funcs->prepare(&params, sync);
if (r)
goto error_free;
@@ -1155,23 +1155,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_bo_va_mapping *mapping;
+ struct dma_fence **last_update;
dma_addr_t *pages_addr = NULL;
struct ttm_resource *mem;
- struct dma_fence **last_update;
+ struct amdgpu_sync sync;
bool flush_tlb = clear;
- bool uncached;
- struct dma_resv *resv;
uint64_t vram_base;
uint64_t flags;
+ bool uncached;
int r;
+ amdgpu_sync_create(&sync);
if (clear || !bo) {
mem = NULL;
- resv = vm->root.bo->tbo.base.resv;
+
+ /* Implicitly sync to command submissions in the same VM before
+ * unmapping.
+ */
+ r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
+ AMDGPU_SYNC_EQ_OWNER, vm);
+ if (r)
+ goto error_free;
} else {
struct drm_gem_object *obj = &bo->tbo.base;
- resv = bo->tbo.base.resv;
if (obj->import_attach && bo_va->is_xgmi) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
struct drm_gem_object *gobj = dma_buf->priv;
@@ -1185,6 +1192,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (mem && (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_PREEMPT))
pages_addr = bo->tbo.ttm->dma_address;
+
+ /* Implicitly sync to moving fences before mapping anything */
+ r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
+ AMDGPU_SYNC_EXPLICIT, vm);
+ if (r)
+ goto error_free;
}
if (bo) {
@@ -1234,12 +1247,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
trace_amdgpu_vm_bo_update(mapping);
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
- !uncached, resv, mapping->start, mapping->last,
- update_flags, mapping->offset,
- vram_base, mem, pages_addr,
- last_update);
+ !uncached, &sync, mapping->start,
+ mapping->last, update_flags,
+ mapping->offset, vram_base, mem,
+ pages_addr, last_update);
if (r)
- return r;
+ goto error_free;
}
/* If the BO is not in its preferred location add it back to
@@ -1267,7 +1280,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
trace_amdgpu_vm_bo_mapping(mapping);
}
- return 0;
+error_free:
+ amdgpu_sync_free(&sync);
+ return r;
}
/**
@@ -1414,25 +1429,34 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence)
{
- struct dma_resv *resv = vm->root.bo->tbo.base.resv;
struct amdgpu_bo_va_mapping *mapping;
- uint64_t init_pte_value = 0;
struct dma_fence *f = NULL;
+ struct amdgpu_sync sync;
int r;
+
+ /*
+ * Implicitly sync to command submissions in the same VM before
+ * unmapping.
+ */
+ amdgpu_sync_create(&sync);
+ r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
+ AMDGPU_SYNC_EQ_OWNER, vm);
+ if (r)
+ goto error_free;
+
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
- resv, mapping->start, mapping->last,
- init_pte_value, 0, 0, NULL, NULL,
- &f);
+ &sync, mapping->start, mapping->last,
+ 0, 0, 0, NULL, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
- return r;
+ goto error_free;
}
}
@@ -1443,7 +1467,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
dma_fence_put(f);
}
- return 0;
+error_free:
+ amdgpu_sync_free(&sync);
+ return r;
}
@@ -2218,7 +2244,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
(1 << 30) - 1) >> 30;
vm_size = roundup_pow_of_two(
- min(max(phys_ram_gb * 3, min_vm_size), max_size));
+ clamp(phys_ram_gb * 3, min_vm_size, max_size));
}
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
@@ -2421,6 +2447,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
return r;
+ ttm_lru_bulk_move_init(&vm->lru_bulk_move);
+
vm->is_compute_context = false;
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
@@ -2485,6 +2513,7 @@ error_free_root:
error_free_delayed:
dma_fence_put(vm->last_tlb_flush);
dma_fence_put(vm->last_unlocked);
+ ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
amdgpu_vm_fini_entities(vm);
return r;
@@ -2641,6 +2670,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
}
+ ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
}
/**
@@ -2754,6 +2784,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
* amdgpu_vm_handle_fault - graceful handling of VM faults.
* @adev: amdgpu device pointer
* @pasid: PASID of the VM
+ * @ts: Timestamp of the fault
* @vmid: VMID, only used for GFX 9.4.3.
* @node_id: Node_id received in IH cookie. Only applicable for
* GFX 9.4.3.
@@ -2764,7 +2795,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
* shouldn't be reported any more.
*/
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
- u32 vmid, u32 node_id, uint64_t addr,
+ u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
bool write_fault)
{
bool is_compute_context = false;
@@ -2790,7 +2821,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
addr /= AMDGPU_GPU_PAGE_SIZE;
if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
- node_id, addr, write_fault)) {
+ node_id, addr, ts, write_fault)) {
amdgpu_bo_unref(&root);
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 046949c4b695..52dd7cdfdc81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -304,8 +304,8 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo_vm *bo);
- int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
- enum amdgpu_sync_mode sync_mode);
+ int (*prepare)(struct amdgpu_vm_update_params *p,
+ struct amdgpu_sync *sync);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
@@ -505,9 +505,10 @@ int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
struct amdgpu_vm *vm, struct amdgpu_bo *bo);
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
- struct dma_resv *resv, uint64_t start, uint64_t last,
- uint64_t flags, uint64_t offset, uint64_t vram_base,
+ bool immediate, bool unlocked, bool flush_tlb,
+ bool allow_override, struct amdgpu_sync *sync,
+ uint64_t start, uint64_t last, uint64_t flags,
+ uint64_t offset, uint64_t vram_base,
struct ttm_resource *res, dma_addr_t *pages_addr,
struct dma_fence **fence);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
@@ -558,7 +559,7 @@ amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm);
void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info);
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
- u32 vmid, u32 node_id, uint64_t addr,
+ u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
bool write_fault);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 3895bd7d176a..0c1ef5850a5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -39,20 +39,18 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
* amdgpu_vm_cpu_prepare - prepare page table update with the CPU
*
* @p: see amdgpu_vm_update_params definition
- * @resv: reservation object with embedded fence
- * @sync_mode: synchronization mode
+ * @sync: sync obj with fences to wait on
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
- struct dma_resv *resv,
- enum amdgpu_sync_mode sync_mode)
+ struct amdgpu_sync *sync)
{
- if (!resv)
+ if (!sync)
return 0;
- return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true);
+ return amdgpu_sync_wait(sync, true);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index e39d6e7643bf..a076f43097e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -403,7 +403,7 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
+ r = vm->update_funcs->prepare(&params, NULL);
if (r)
goto exit;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 9b748d7058b5..4772fba33285 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -77,32 +77,24 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
* amdgpu_vm_sdma_prepare - prepare SDMA command submission
*
* @p: see amdgpu_vm_update_params definition
- * @resv: reservation object with embedded fence
- * @sync_mode: synchronization mode
+ * @sync: amdgpu_sync object with fences to wait for
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
- struct dma_resv *resv,
- enum amdgpu_sync_mode sync_mode)
+ struct amdgpu_sync *sync)
{
- struct amdgpu_sync sync;
int r;
r = amdgpu_vm_sdma_alloc_job(p, 0);
if (r)
return r;
- if (!resv)
+ if (!sync)
return 0;
- amdgpu_sync_create(&sync);
- r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm);
- if (!r)
- r = amdgpu_sync_push_to_job(&sync, p->job);
- amdgpu_sync_free(&sync);
-
+ r = amdgpu_sync_push_to_job(sync, p->job);
if (r) {
p->num_dw_left = 0;
amdgpu_job_free(p->job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 821ba2309dec..7de449fae1e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -1389,10 +1389,10 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a
switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
case ACA_ERROR_TYPE_UE:
- amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL);
+ amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL);
break;
case ACA_ERROR_TYPE_CE:
- amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL);
+ amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index fb2b394bb9c5..6e9eeaeb3de1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -213,7 +213,7 @@ struct amd_sriov_msg_pf2vf_info {
uint32_t gpu_capacity;
/* reserved */
uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE];
-};
+} __packed;
struct amd_sriov_msg_vf2pf_info_header {
/* the total structure size in byte */
@@ -273,7 +273,7 @@ struct amd_sriov_msg_vf2pf_info {
uint32_t mes_info_size;
/* reserved */
uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE];
-};
+} __packed;
/* mailbox message send from guest to host */
enum amd_sriov_mailbox_request_message {
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 228fd4dd32f1..26e2188101e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -75,6 +75,8 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
uint32_t inst_mask;
ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
return;
@@ -103,6 +105,8 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
ring->xcp_id = xcp_id;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 25feab188dfe..a51f3414b65d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -215,7 +215,7 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
dig->bl_dev = bd;
bd->props.brightness = amdgpu_atombios_encoder_get_backlight_brightness(bd);
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
DRM_INFO("amdgpu atom DIG backlight initialized\n");
@@ -2064,27 +2064,25 @@ amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
if (fake_edid_record->ucFakeEDIDLength) {
- struct edid *edid;
- int edid_size =
- max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
- edid = kmalloc(edid_size, GFP_KERNEL);
- if (edid) {
- memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
- fake_edid_record->ucFakeEDIDLength);
-
- if (drm_edid_is_valid(edid)) {
- adev->mode_info.bios_hardcoded_edid = edid;
- adev->mode_info.bios_hardcoded_edid_size = edid_size;
- } else
- kfree(edid);
- }
+ const struct drm_edid *edid;
+ int edid_size;
+
+ if (fake_edid_record->ucFakeEDIDLength == 128)
+ edid_size = fake_edid_record->ucFakeEDIDLength;
+ else
+ edid_size = fake_edid_record->ucFakeEDIDLength * 128;
+ edid = drm_edid_alloc(fake_edid_record->ucFakeEDIDString, edid_size);
+ if (drm_edid_valid(edid))
+ adev->mode_info.bios_hardcoded_edid = edid;
+ else
+ drm_edid_free(edid);
+ record += struct_size(fake_edid_record,
+ ucFakeEDIDString,
+ edid_size);
+ } else {
+ /* empty fake edid record must be 3 bytes long */
+ record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
}
- record += fake_edid_record->ucFakeEDIDLength ?
- struct_size(fake_edid_record,
- ucFakeEDIDString,
- fake_edid_record->ucFakeEDIDLength) :
- /* empty fake edid record must be 3 bytes long */
- sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 55982c0064b5..06088d52d81c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -364,6 +364,7 @@
* 1 - Stream
* 2 - Bypass
*/
+#define EOP_EXEC (1 << 28) /* For Trailing Fence */
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index dddb5fe16f2c..742adbc460c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2846,7 +2846,7 @@ static int dce_v10_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev_to_drm(adev));
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 11780e4d7e9f..8d46ebadfa46 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2973,7 +2973,7 @@ static int dce_v11_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev_to_drm(adev));
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 05c0df97f01d..f08dc6a3886f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2745,7 +2745,7 @@ static int dce_v6_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev_to_drm(adev));
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index dc73e301d937..a6a3adf2ae13 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2766,7 +2766,7 @@ static int dce_v8_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev_to_drm(adev));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index e444e621ddaa..45ed97038df0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4649,7 +4649,7 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev)
uint32_t inst;
ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
adev->gfx.ip_dump_core = NULL;
} else {
@@ -4662,7 +4662,7 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
adev->gfx.ip_dump_compute_queues = NULL;
} else {
@@ -4675,7 +4675,7 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.me.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
adev->gfx.ip_dump_gfx_queues = NULL;
} else {
@@ -4741,6 +4741,13 @@ static int gfx_v10_0_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
+ GFX_10_1__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
@@ -5213,26 +5220,74 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
}
+static u32 gfx_v10_0_get_cpg_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ if (me != 0)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING1);
+ default:
+ return 0;
+ }
+}
+
+static u32 gfx_v10_0_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
+ case 2:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
+ case 3:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
- u32 tmp;
+ u32 tmp, cp_int_cntl_reg;
+ int i, j;
if (amdgpu_sriov_vf(adev))
return;
- tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
-
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
- enable ? 1 : 0);
-
- WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
+ enable ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
+ }
+ }
+ }
}
static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
@@ -6637,13 +6692,13 @@ static int gfx_v10_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
+static int gfx_v10_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v10_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -6695,7 +6750,7 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v10_0_gfx_init_queue(ring);
+ r = gfx_v10_0_kgq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -6975,13 +7030,13 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v10_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!restore && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -7043,7 +7098,7 @@ static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v10_0_kcq_init_queue(ring);
+ r = gfx_v10_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -7369,6 +7424,7 @@ static int gfx_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
/* WA added for Vangogh asic fixing the SMU suspend failure
* It needs to set power gating again during gfxoff control
@@ -7679,6 +7735,10 @@ static int gfx_v10_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
+
return 0;
}
@@ -8889,7 +8949,9 @@ static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, mmSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void
@@ -9074,12 +9136,39 @@ static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v10_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -9088,17 +9177,75 @@ static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v10_0_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v10_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -9122,8 +9269,8 @@ static void gfx_v10_0_handle_priv_fault(struct amdgpu_device *adev,
case 0:
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
- /* we only enabled 1 gfx queue per pipe for now */
- if (ring->me == me_id && ring->pipe == pipe_id)
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
drm_sched_fault(&ring->sched);
}
break;
@@ -9150,6 +9297,15 @@ static int gfx_v10_0_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v10_0_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream \n");
+ gfx_v10_0_handle_priv_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v10_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -9244,6 +9400,174 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
+static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ u64 addr;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ addr = amdgpu_bo_gpu_offset(ring->mqd_obj) +
+ offsetof(struct v10_gfx_mqd, cp_gfx_hqd_active);
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (ring->pipe == 0)
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE0_QUEUES, 1 << ring->queue);
+ else
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE1_QUEUES, 1 << ring->queue);
+
+ gfx_v10_0_ring_emit_wreg(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
+ gfx_v10_0_wait_reg_mem(kiq_ring, 0, 1, 0,
+ lower_32_bits(addr), upper_32_bits(addr),
+ 0, 1, 0x20);
+ gfx_v10_0_ring_emit_reg_wait(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff);
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v10_0_kgq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ DRM_ERROR("fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
+ 0, 0);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ /* make sure dequeue is complete*/
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ if (r) {
+ dev_err(adev->dev, "fail to wait on hqd deactivate\n");
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v10_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static void gfx_v10_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -9435,7 +9759,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
.test_ring = gfx_v10_0_ring_test_ring,
.test_ib = gfx_v10_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v10_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v10_0_ring_emit_sb,
.emit_cntxcntl = gfx_v10_0_ring_emit_cntxcntl,
@@ -9447,6 +9771,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
+ .reset = gfx_v10_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -9476,12 +9801,14 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
.test_ring = gfx_v10_0_ring_test_ring,
.test_ib = gfx_v10_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v10_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
+ .reset = gfx_v10_0_reset_kcq,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@@ -9536,6 +9863,11 @@ static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_reg_irq_funcs = {
.process = gfx_v10_0_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v10_0_bad_op_irq_funcs = {
+ .set = gfx_v10_0_set_bad_op_fault_state,
+ .process = gfx_v10_0_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_inst_irq_funcs = {
.set = gfx_v10_0_set_priv_inst_fault_state,
.process = gfx_v10_0_priv_inst_irq,
@@ -9557,6 +9889,9 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v10_0_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v10_0_priv_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index dcef39907449..d3e8be82a172 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -481,6 +481,24 @@ static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
amdgpu_ring_write(ring, inv); /* poll interval */
}
+static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -1484,7 +1502,7 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
uint32_t inst;
ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
adev->gfx.ip_dump_core = NULL;
} else {
@@ -1497,7 +1515,7 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
adev->gfx.ip_dump_compute_queues = NULL;
} else {
@@ -1510,7 +1528,7 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.me.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
adev->gfx.ip_dump_gfx_queues = NULL;
} else {
@@ -1569,6 +1587,13 @@ static int gfx_v11_0_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
+ GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
@@ -1953,26 +1978,74 @@ static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
gfx_v11_0_init_gds_vmid(adev);
}
+static u32 gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ if (me != 0)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
+ default:
+ return 0;
+ }
+}
+
+static u32 gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
+ case 2:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
+ case 3:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
- u32 tmp;
+ u32 tmp, cp_int_cntl_reg;
+ int i, j;
if (amdgpu_sriov_vf(adev))
return;
- tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
-
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
- enable ? 1 : 0);
-
- WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
+ enable ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
+ }
+ }
+ }
}
static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
@@ -3911,13 +3984,13 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
+static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v11_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -3953,7 +4026,7 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v11_0_gfx_init_queue(ring);
+ r = gfx_v11_0_kgq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -4248,13 +4321,13 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v11_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -4318,7 +4391,7 @@ static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v11_0_kcq_init_queue(ring);
+ r = gfx_v11_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -4598,6 +4671,7 @@ static int gfx_v11_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -4668,8 +4742,8 @@ static int gfx_v11_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
- int req)
+int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ bool req)
{
u32 i, tmp, val;
@@ -4707,6 +4781,8 @@ static int gfx_v11_0_soft_reset(void *handle)
int r, i, j, k;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
@@ -4714,8 +4790,6 @@ static int gfx_v11_0_soft_reset(void *handle)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
- gfx_v11_0_set_safe_mode(adev, 0);
-
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
@@ -4740,8 +4814,10 @@ static int gfx_v11_0_soft_reset(void *handle)
mutex_unlock(&adev->srbm_mutex);
/* Try to acquire the gfx mutex before access to CP_VMID_RESET */
- r = gfx_v11_0_request_gfx_index_mutex(adev, 1);
+ mutex_lock(&adev->gfx.reset_sem_mutex);
+ r = gfx_v11_0_request_gfx_index_mutex(adev, true);
if (r) {
+ mutex_unlock(&adev->gfx.reset_sem_mutex);
DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n");
return r;
}
@@ -4755,7 +4831,8 @@ static int gfx_v11_0_soft_reset(void *handle)
RREG32_SOC15(GC, 0, regCP_VMID_RESET);
/* release the gfx mutex */
- r = gfx_v11_0_request_gfx_index_mutex(adev, 0);
+ r = gfx_v11_0_request_gfx_index_mutex(adev, false);
+ mutex_unlock(&adev->gfx.reset_sem_mutex);
if (r) {
DRM_ERROR("Failed to release the gfx mutex during soft reset\n");
return r;
@@ -4823,7 +4900,7 @@ static int gfx_v11_0_soft_reset(void *handle)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
- gfx_v11_0_unset_safe_mode(adev, 0);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return gfx_v11_0_cp_resume(adev);
}
@@ -4954,6 +5031,9 @@ static int gfx_v11_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
return 0;
}
@@ -5843,6 +5923,9 @@ static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
+ if (adev->enable_mes)
+ return -EINVAL;
+
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
@@ -6008,7 +6091,9 @@ static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, regSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void
@@ -6201,15 +6286,42 @@ static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -6218,17 +6330,75 @@ static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v11_0_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -6252,8 +6422,8 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
case 0:
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
- /* we only enabled 1 gfx queue per pipe for now */
- if (ring->me == me_id && ring->pipe == pipe_id)
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
drm_sched_fault(&ring->sched);
}
break;
@@ -6281,6 +6451,15 @@ static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v11_0_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream \n");
+ gfx_v11_0_handle_priv_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -6367,6 +6546,99 @@ static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v11_0_kgq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kgq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, r = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ /* make sure dequeue is complete*/
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ if (r) {
+ dev_err(adev->dev, "fail to wait on hqd deactivate\n");
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v11_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kcq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static void gfx_v11_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -6556,7 +6828,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
.test_ring = gfx_v11_0_ring_test_ring,
.test_ib = gfx_v11_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v11_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
.emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
@@ -6568,6 +6840,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
+ .reset = gfx_v11_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
@@ -6598,12 +6871,14 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
.test_ring = gfx_v11_0_ring_test_ring,
.test_ib = gfx_v11_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v11_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
+ .reset = gfx_v11_0_reset_kcq,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
@@ -6658,6 +6933,11 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = {
.process = gfx_v11_0_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v11_0_bad_op_irq_funcs = {
+ .set = gfx_v11_0_set_bad_op_fault_state,
+ .process = gfx_v11_0_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
.set = gfx_v11_0_set_priv_inst_fault_state,
.process = gfx_v11_0_priv_inst_irq,
@@ -6675,6 +6955,9 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v11_0_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h
index 10cfc29c27c9..157a5c812259 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h
@@ -26,4 +26,7 @@
extern const struct amdgpu_ip_block_version gfx_v11_0_ip_block;
+int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ bool req);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
index 9cd221ed240c..999bb3cc88b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
@@ -97,7 +97,7 @@ static int gfx_v11_0_3_poison_consumption_handler(struct amdgpu_device *adev,
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
}
- if (con && !con->is_rma)
+ if (con && !amdgpu_ras_is_rma(adev))
amdgpu_ras_reset_gpu(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index e45d23e82878..d1357c01eb39 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -1281,7 +1281,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
uint32_t inst;
ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
adev->gfx.ip_dump_core = NULL;
} else {
@@ -1294,7 +1294,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
adev->gfx.ip_dump_compute_queues = NULL;
} else {
@@ -1307,7 +1307,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.me.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
adev->gfx.ip_dump_gfx_queues = NULL;
} else {
@@ -1355,6 +1355,13 @@ static int gfx_v12_0_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
+ GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
@@ -1686,26 +1693,68 @@ static void gfx_v12_0_constants_init(struct amdgpu_device *adev)
gfx_v12_0_init_compute_vmid(adev);
}
+static u32 gfx_v12_0_get_cpg_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ if (me != 0)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
+ default:
+ return 0;
+ }
+}
+
+static u32 gfx_v12_0_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static void gfx_v12_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
- bool enable)
+ bool enable)
{
- u32 tmp;
+ u32 tmp, cp_int_cntl_reg;
+ int i, j;
if (amdgpu_sriov_vf(adev))
return;
- tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
-
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
- enable ? 1 : 0);
-
- WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
+ enable ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
+ }
+ }
+ }
}
static int gfx_v12_0_init_csb(struct amdgpu_device *adev)
@@ -2867,13 +2916,13 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-static int gfx_v12_0_gfx_init_queue(struct amdgpu_ring *ring)
+static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v12_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -2909,7 +2958,7 @@ static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v12_0_gfx_init_queue(ring);
+ r = gfx_v12_0_kgq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -3213,13 +3262,13 @@ static int gfx_v12_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v12_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -3283,7 +3332,7 @@ static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v12_0_kcq_init_queue(ring);
+ r = gfx_v12_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -3553,6 +3602,7 @@ static int gfx_v12_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -3672,6 +3722,10 @@ static int gfx_v12_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
+
return 0;
}
@@ -4447,6 +4501,9 @@ static int gfx_v12_0_ring_preempt_ib(struct amdgpu_ring *ring)
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
+ if (adev->enable_mes)
+ return -EINVAL;
+
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
@@ -4563,7 +4620,9 @@ static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring,
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, regSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void
@@ -4747,15 +4806,42 @@ static int gfx_v12_0_eop_irq(struct amdgpu_device *adev,
static int gfx_v12_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -4764,17 +4850,75 @@ static int gfx_v12_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v12_0_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int gfx_v12_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -4798,8 +4942,8 @@ static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev,
case 0:
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
- /* we only enabled 1 gfx queue per pipe for now */
- if (ring->me == me_id && ring->pipe == pipe_id)
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
drm_sched_fault(&ring->sched);
}
break;
@@ -4827,6 +4971,15 @@ static int gfx_v12_0_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v12_0_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream \n");
+ gfx_v12_0_handle_priv_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v12_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -4859,6 +5012,24 @@ static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
static void gfx_v12_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -4989,6 +5160,93 @@ static void gfx_v12_ip_dump(void *handle)
amdgpu_gfx_off_ctrl(adev, true);
}
+static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
+ if (r) {
+ dev_err(adev->dev, "reset via MES failed %d\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v12_0_kgq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ DRM_ERROR("fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kgq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r, i;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v12_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ DRM_ERROR("fail to unresv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kcq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static const struct amd_ip_funcs gfx_v12_0_ip_funcs = {
.name = "gfx_v12_0",
.early_init = gfx_v12_0_early_init,
@@ -5040,7 +5298,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
.test_ring = gfx_v12_0_ring_test_ring,
.test_ib = gfx_v12_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v12_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl,
.init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec,
@@ -5051,6 +5309,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
+ .reset = gfx_v12_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
@@ -5078,12 +5337,14 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
.test_ring = gfx_v12_0_ring_test_ring,
.test_ib = gfx_v12_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v12_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
+ .reset = gfx_v12_0_reset_kcq,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
@@ -5138,6 +5399,11 @@ static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_reg_irq_funcs = {
.process = gfx_v12_0_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v12_0_bad_op_irq_funcs = {
+ .set = gfx_v12_0_set_bad_op_fault_state,
+ .process = gfx_v12_0_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_inst_irq_funcs = {
.set = gfx_v12_0_set_priv_inst_fault_state,
.process = gfx_v12_0_priv_inst_irq,
@@ -5151,6 +5417,9 @@ static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v12_0_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v12_0_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v12_0_priv_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index d84589137df9..f146806c4633 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2114,6 +2114,8 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+ bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
+
/* Workaround for cache flush problems. First send a dummy EOP
* event down the pipe with seq one below.
*/
@@ -2133,7 +2135,8 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
- EVENT_INDEX(5)));
+ EVENT_INDEX(5) |
+ (exec ? EOP_EXEC : 0)));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
@@ -4921,6 +4924,76 @@ static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
}
+static void gfx_v7_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
+ int mem_space, int opt, uint32_t addr0,
+ uint32_t addr1, uint32_t ref, uint32_t mask,
+ uint32_t inv)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring,
+ /* memory (1) or register (0) */
+ (WAIT_REG_MEM_MEM_SPACE(mem_space) |
+ WAIT_REG_MEM_OPERATION(opt) | /* wait */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(eng_sel)));
+
+ if (mem_space)
+ BUG_ON(addr0 & 0x3); /* Dword align */
+ amdgpu_ring_write(ring, addr0);
+ amdgpu_ring_write(ring, addr1);
+ amdgpu_ring_write(ring, ref);
+ amdgpu_ring_write(ring, mask);
+ amdgpu_ring_write(ring, inv); /* poll interval */
+}
+
+static void gfx_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ gfx_v7_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
+}
+
+static int gfx_v7_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ gfx_v7_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
+ return -ENOMEM;
+ gfx_v7_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
+ ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
+ gfx_v7_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
+ gfx_v7_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
@@ -4972,6 +5045,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_wreg = gfx_v7_0_ring_emit_wreg,
.soft_recovery = gfx_v7_0_ring_soft_recovery,
.emit_mem_sync = gfx_v7_0_emit_mem_sync,
+ .reset = gfx_v7_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5002,6 +5076,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v7_0_ring_emit_wreg,
+ .soft_recovery = gfx_v7_0_ring_soft_recovery,
.emit_mem_sync = gfx_v7_0_emit_mem_sync_compute,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b4658c7db0e1..bc8295812cc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6149,6 +6149,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+ bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
/* Workaround for cache flush problems. First send a dummy EOP
* event down the pipe with seq one below.
@@ -6172,7 +6173,8 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
EOP_TC_ACTION_EN |
EOP_TC_WB_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
- EVENT_INDEX(5)));
+ EVENT_INDEX(5) |
+ (exec ? EOP_EXEC : 0)));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
@@ -6380,6 +6382,34 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
amdgpu_ring_write(ring, val);
}
+static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
+ int mem_space, int opt, uint32_t addr0,
+ uint32_t addr1, uint32_t ref, uint32_t mask,
+ uint32_t inv)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring,
+ /* memory (1) or register (0) */
+ (WAIT_REG_MEM_MEM_SPACE(mem_space) |
+ WAIT_REG_MEM_OPERATION(opt) | /* wait */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(eng_sel)));
+
+ if (mem_space)
+ BUG_ON(addr0 & 0x3); /* Dword align */
+ amdgpu_ring_write(ring, addr0);
+ amdgpu_ring_write(ring, addr1);
+ amdgpu_ring_write(ring, ref);
+ amdgpu_ring_write(ring, mask);
+ amdgpu_ring_write(ring, inv); /* poll interval */
+}
+
+static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
+}
+
static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6856,6 +6886,48 @@ static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
+static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
+ return -ENOMEM;
+ gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
+ ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
+ gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
+ gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -6923,6 +6995,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.soft_recovery = gfx_v8_0_ring_soft_recovery,
.emit_mem_sync = gfx_v8_0_emit_mem_sync,
+ .reset = gfx_v8_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -6955,6 +7028,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
+ .soft_recovery = gfx_v8_0_ring_soft_recovery,
.emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
.emit_wave_limit = gfx_v8_0_emit_wave_limit,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 2929c8972ea7..23f0573ae47b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -50,6 +50,7 @@
#include "amdgpu_ring_mux.h"
#include "gfx_v9_4.h"
#include "gfx_v9_0.h"
+#include "gfx_v9_0_cleaner_shader.h"
#include "gfx_v9_4_2.h"
#include "asic_reg/pwr/pwr_10_0_offset.h"
@@ -893,10 +894,18 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
unsigned int vmid);
+static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
+static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
uint64_t queue_mask)
{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ u64 shader_mc_addr;
+
+ /* Cleaner shader MC address */
+ shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
+
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring,
PACKET3_SET_RESOURCES_VMID_MASK(0) |
@@ -906,8 +915,8 @@ static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring,
upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
+ amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
}
@@ -1004,12 +1013,47 @@ static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
}
+
+static void gfx_v9_0_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
+ uint32_t xcc_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ unsigned i;
+
+ /* enter save mode */
+ amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, 0);
+
+ if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, mmSPI_COMPUTE_QUEUE_RESET, 0x1);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ dev_err(adev->dev, "fail to wait on hqd deactive\n");
+ } else {
+ dev_err(adev->dev, "reset queue_type(%d) not supported\n", queue_type);
+ }
+
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ /* exit safe mode */
+ amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
+}
+
static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
.kiq_set_resources = gfx_v9_0_kiq_set_resources,
.kiq_map_queues = gfx_v9_0_kiq_map_queues,
.kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
.kiq_query_status = gfx_v9_0_kiq_query_status,
.kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
+ .kiq_reset_hw_queue = gfx_v9_0_kiq_reset_hw_queue,
.set_resources_size = 8,
.map_queues_size = 7,
.unmap_queues_size = 6,
@@ -1301,6 +1345,10 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
/* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
{ 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
+ /* https://bbs.openkylin.top/t/topic/171497 */
+ { 0x1002, 0x15d8, 0x19e5, 0x3e14, 0xc2 },
+ /* HP 705G4 DM with R5 2400G */
+ { 0x1002, 0x15dd, 0x103c, 0x8464, 0xd6 },
{ 0, 0, 0, 0, 0 },
};
@@ -2129,7 +2177,7 @@ static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev)
uint32_t inst;
ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
adev->gfx.ip_dump_core = NULL;
} else {
@@ -2142,7 +2190,7 @@ static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
adev->gfx.ip_dump_compute_queues = NULL;
} else {
@@ -2174,6 +2222,12 @@ static int gfx_v9_0_sw_init(void *handle)
break;
}
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ default:
+ adev->gfx.enable_cleaner_shader = false;
+ break;
+ }
+
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
@@ -2182,6 +2236,13 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
+ GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
@@ -2329,6 +2390,10 @@ static int gfx_v9_0_sw_init(void *handle)
gfx_v9_0_alloc_ip_dump(adev);
+ r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -2364,6 +2429,8 @@ static int gfx_v9_0_sw_fini(void *handle)
}
gfx_v9_0_free_microcode(adev);
+ amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+
kfree(adev->gfx.ip_dump_core);
kfree(adev->gfx.ip_dump_compute_queues);
@@ -2634,7 +2701,7 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
- if(adev->gfx.num_gfx_rings)
+ if (adev->gfx.num_gfx_rings)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
@@ -3735,7 +3802,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
@@ -3747,8 +3814,8 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
*/
tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
- if (!tmp_mqd->cp_hqd_pq_control ||
- (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
+ if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
+ (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -3812,7 +3879,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v9_0_kcq_init_queue(ring);
+ r = gfx_v9_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -3908,6 +3975,9 @@ static int gfx_v9_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
+ adev->gfx.cleaner_shader_ptr);
+
if (!amdgpu_sriov_vf(adev))
gfx_v9_0_init_golden_registers(adev);
@@ -3937,6 +4007,7 @@ static int gfx_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
/* DF freeze and kcq disable will fail */
if (!amdgpu_ras_intr_triggered())
@@ -4747,6 +4818,10 @@ static int gfx_v9_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
+
r = gfx_v9_0_ecc_late_init(handle);
if (r)
return r;
@@ -5858,7 +5933,9 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, mmSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
@@ -5929,17 +6006,95 @@ static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
}
}
+static u32 gfx_v9_0_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
+ case 2:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
+ case 3:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
PRIV_REG_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v9_0_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -6121,6 +6276,15 @@ static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v9_0_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream\n");
+ gfx_v9_0_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -7001,6 +7165,157 @@ static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
}
+static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
+static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ gfx_v9_0_ring_emit_wreg(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ if (amdgpu_ring_alloc(ring, 7 + 7 + 5))
+ return -ENOMEM;
+ gfx_v9_0_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
+ ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
+ gfx_v9_0_ring_emit_reg_wait(ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffff);
+ gfx_v9_0_ring_emit_wreg(ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0);
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ int i, r;
+
+ if (!adev->debug_exp_resets &&
+ !adev->gfx.num_gfx_rings)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
+ 0, 0);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ /* make sure dequeue is complete*/
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ if (r) {
+ dev_err(adev->dev, "fail to wait on hqd deactive\n");
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)){
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v9_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
+ if (r) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r) {
+ DRM_ERROR("fail to remap queue\n");
+ return r;
+ }
+ return amdgpu_ring_test_ring(ring);
+}
+
static void gfx_v9_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -7083,6 +7398,13 @@ static void gfx_v9_ip_dump(void *handle)
}
+static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
+{
+ /* Emit the cleaner shader */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -7132,7 +7454,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
2 + /* SWITCH_BUFFER */
- 7, /* gfx_v9_0_emit_mem_sync */
+ 7 + /* gfx_v9_0_emit_mem_sync */
+ 2, /* gfx_v9_0_ring_emit_cleaner_shader */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -7141,7 +7464,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v9_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
@@ -7153,6 +7476,10 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
+ .reset = gfx_v9_0_reset_kgq,
+ .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
@@ -7185,7 +7512,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
2 + /* SWITCH_BUFFER */
- 7, /* gfx_v9_0_emit_mem_sync */
+ 7 + /* gfx_v9_0_emit_mem_sync */
+ 2, /* gfx_v9_0_ring_emit_cleaner_shader */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -7195,7 +7523,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
- .insert_nop = amdgpu_sw_ring_insert_nop,
+ .insert_nop = gfx_v9_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
@@ -7209,6 +7537,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
.patch_cntl = gfx_v9_0_ring_patch_cntl,
.patch_de = gfx_v9_0_ring_patch_de_meta,
.patch_ce = gfx_v9_0_ring_patch_ce_meta,
+ .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -7229,7 +7560,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7 + /* gfx_v9_0_emit_mem_sync */
5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
- 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
+ 15 + /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
+ 2, /* gfx_v9_0_ring_emit_cleaner_shader */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -7239,13 +7571,18 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v9_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
.emit_wave_limit = gfx_v9_0_emit_wave_limit,
+ .reset = gfx_v9_0_reset_kcq,
+ .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -7303,6 +7640,11 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
.process = gfx_v9_0_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v9_0_bad_op_irq_funcs = {
+ .set = gfx_v9_0_set_bad_op_fault_state,
+ .process = gfx_v9_0_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
.set = gfx_v9_0_set_priv_inst_fault_state,
.process = gfx_v9_0_priv_inst_irq,
@@ -7322,6 +7664,9 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v9_0_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
new file mode 100644
index 000000000000..36c0292b5110
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/* Define the cleaner shader gfx_9_0 */
+static const u32 __maybe_unused gfx_9_0_cleaner_shader_hex[] = {
+ /* Add the cleaner shader code here */
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 20ea6cb01edf..408e5600bb61 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -37,6 +37,7 @@
#include "gc/gc_9_4_3_sh_mask.h"
#include "gfx_v9_4_3.h"
+#include "gfx_v9_4_3_cleaner_shader.h"
#include "amdgpu_xcp.h"
#include "amdgpu_aca.h"
@@ -63,6 +64,98 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
#define NORMALIZE_XCC_REG_OFFSET(offset) \
(offset & 0xFFFF)
+static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
+ SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2),
+ SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
+ /* SE status registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
+};
+
+static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
+ /* compute queue registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
+};
+
struct amdgpu_gfx_ras gfx_v9_4_3_ras;
static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
@@ -71,10 +164,18 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
+static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
+static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
uint64_t queue_mask)
{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ u64 shader_mc_addr;
+
+ /* Cleaner shader MC address */
+ shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
+
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring,
PACKET3_SET_RESOURCES_VMID_MASK(0) |
@@ -84,8 +185,8 @@ static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring,
upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
+ amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
}
@@ -182,12 +283,46 @@ static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
}
+static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
+ uint32_t xcc_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ unsigned i;
+
+ /* enter save mode */
+ amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id);
+
+ if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ dev_err(adev->dev, "fail to wait on hqd deactive\n");
+ } else {
+ dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type);
+ }
+
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ /* exit safe mode */
+ amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
+}
+
static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
.kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
.kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
.kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
.kiq_query_status = gfx_v9_4_3_kiq_query_status,
.kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
+ .kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue,
.set_resources_size = 8,
.map_queues_size = 7,
.unmap_queues_size = 6,
@@ -885,11 +1020,59 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio, NULL);
}
+static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
+{
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
+ uint32_t *ptr, num_xcc, inst;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+ ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
+ adev->gfx.ip_dump_core = NULL;
+ } else {
+ adev->gfx.ip_dump_core = ptr;
+ }
+
+ /* Allocate memory for compute queue registers for all the instances */
+ reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
+ inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe;
+
+ ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
+ adev->gfx.ip_dump_compute_queues = NULL;
+ } else {
+ adev->gfx.ip_dump_compute_queues = ptr;
+ }
+}
+
static int gfx_v9_4_3_sw_init(void *handle)
{
int i, j, k, r, ring_id, xcc_id, num_xcc;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex);
+ if (adev->gfx.mec_fw_version >= 153) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ default:
+ adev->gfx.enable_cleaner_shader = false;
+ break;
+ }
+
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
@@ -901,6 +1084,13 @@ static int gfx_v9_4_3_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
+ GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
@@ -976,10 +1166,19 @@ static int gfx_v9_4_3_sw_init(void *handle)
return r;
- if (!amdgpu_sriov_vf(adev))
+ if (!amdgpu_sriov_vf(adev)) {
r = amdgpu_gfx_sysfs_init(adev);
+ if (r)
+ return r;
+ }
- return r;
+ gfx_v9_4_3_alloc_ip_dump(adev);
+
+ r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ if (r)
+ return r;
+
+ return 0;
}
static int gfx_v9_4_3_sw_fini(void *handle)
@@ -997,11 +1196,17 @@ static int gfx_v9_4_3_sw_fini(void *handle)
amdgpu_gfx_kiq_fini(adev, i);
}
+ amdgpu_gfx_cleaner_shader_sw_fini(adev);
+
gfx_v9_4_3_mec_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
gfx_v9_4_3_free_microcode(adev);
if (!amdgpu_sriov_vf(adev))
amdgpu_gfx_sysfs_fini(adev);
+ amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+
+ kfree(adev->gfx.ip_dump_core);
+ kfree(adev->gfx.ip_dump_compute_queues);
return 0;
}
@@ -1910,7 +2115,7 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
return 0;
}
-static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id)
+static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
@@ -1922,8 +2127,8 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id)
*/
tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
- if (!tmp_mqd->cp_hqd_pq_control ||
- (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
+ if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
+ (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -2008,7 +2213,7 @@ static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id);
+ r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -2139,6 +2344,9 @@ static int gfx_v9_4_3_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
+ adev->gfx.cleaner_shader_ptr);
+
if (!amdgpu_sriov_vf(adev))
gfx_v9_4_3_init_golden_registers(adev);
@@ -2162,6 +2370,7 @@ static int gfx_v9_4_3_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++) {
@@ -2327,6 +2536,10 @@ static int gfx_v9_4_3_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
+
if (adev->gfx.ras &&
adev->gfx.ras->enable_watchdog_timer)
adev->gfx.ras->enable_watchdog_timer(adev);
@@ -2833,6 +3046,24 @@ static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask);
}
+static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
+ unsigned vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t value = 0;
+
+ if (!adev->debug_exp_resets)
+ return;
+
+ value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
+ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
+ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
+ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id);
+ WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id);
+}
+
static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
struct amdgpu_device *adev, int me, int pipe,
enum amdgpu_interrupt_state state, int xcc_id)
@@ -2886,21 +3117,103 @@ static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
}
}
+static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int xcc_id, int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
+ case 2:
+ return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
+ case 3:
+ return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
- int i, num_xcc;
+ u32 mec_int_cntl_reg, mec_int_cntl;
+ int i, j, k, num_xcc;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- for (i = 0; i < num_xcc; i++)
+ for (i = 0; i < num_xcc; i++) {
WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (j = 0; j < adev->gfx.mec.num_mec; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ /* MECs start at 1 */
+ mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
+
+ if (mec_int_cntl_reg) {
+ mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
+ mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ?
+ 1 : 0);
+ WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
+ }
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 mec_int_cntl_reg, mec_int_cntl;
+ int i, j, k, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < num_xcc; i++) {
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (j = 0; j < adev->gfx.mec.num_mec; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ /* MECs start at 1 */
+ mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
+
+ if (mec_int_cntl_reg) {
+ mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
+ mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ?
+ 1 : 0);
+ WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
+ }
+ }
+ }
+ }
break;
default:
break;
@@ -3061,6 +3374,15 @@ static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream\n");
+ gfx_v9_4_3_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -3147,6 +3469,183 @@ static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
}
+static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
+ uint32_t pipe, uint32_t queue,
+ uint32_t xcc_id)
+{
+ int i, r;
+ /* make sure dequeue is complete*/
+ gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id));
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ else
+ r = 0;
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id);
+
+ return r;
+
+}
+
+static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ adev->gfx.mec_fw_version >= 0x0000009b)
+ return true;
+ else
+ dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
+
+ return false;
+}
+
+static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe, clean_pipe;
+ int r;
+
+ if (!gfx_v9_4_3_pipe_reset_support(adev))
+ return -EINVAL;
+
+ gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+
+ reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (ring->me == 1) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 1);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 1);
+ break;
+ default:
+ break;
+ }
+ } else {
+ if (ring->pipe)
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 1);
+ else
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 1);
+ }
+
+ WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id);
+
+ r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
+ return r;
+}
+
+static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ int r;
+
+ if (!adev->debug_exp_resets)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
+ 0, 0);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r) {
+ dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n",
+ ring->name);
+ goto pipe_reset;
+ }
+
+ r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
+ if (r)
+ dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
+
+pipe_reset:
+ if(r) {
+ r = gfx_v9_4_3_reset_hw_pipe(ring);
+ dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
+ r ? "failed" : "successfully");
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)){
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
+ if (r) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r) {
+ dev_err(adev->dev, "fail to remap queue\n");
+ return r;
+ }
+ return amdgpu_ring_test_ring(ring);
+}
+
enum amdgpu_gfx_cp_ras_mem_id {
AMDGPU_GFX_CP_MEM1 = 1,
AMDGPU_GFX_CP_MEM2,
@@ -3959,8 +4458,8 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
/* the caller should make sure initialize value of
* err_data->ue_count and err_data->ce_count
*/
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
}
static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
@@ -4062,6 +4561,151 @@ static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
}
+static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
+static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t i, j, k;
+ uint32_t xcc_id, xcc_offset, inst_offset;
+ uint32_t num_xcc, reg, num_inst;
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
+
+ if (!adev->gfx.ip_dump_core)
+ return;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ drm_printf(p, "Number of Instances:%d\n", num_xcc);
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ xcc_offset = xcc_id * reg_count;
+ drm_printf(p, "\nInstance id:%d\n", xcc_id);
+ for (i = 0; i < reg_count; i++)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_reg_list_9_4_3[i].reg_name,
+ adev->gfx.ip_dump_core[xcc_offset + i]);
+ }
+
+ /* print compute queue registers for all instances */
+ if (!adev->gfx.ip_dump_compute_queues)
+ return;
+
+ num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe;
+
+ reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
+ drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n",
+ num_xcc,
+ adev->gfx.mec.num_mec,
+ adev->gfx.mec.num_pipe_per_mec,
+ adev->gfx.mec.num_queue_per_pipe);
+
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ xcc_offset = xcc_id * reg_count * num_inst;
+ inst_offset = 0;
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
+ drm_printf(p,
+ "\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
+ xcc_id, i, j, k);
+ for (reg = 0; reg < reg_count; reg++) {
+ drm_printf(p,
+ "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_9_4_3[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset + inst_offset +
+ reg]);
+ }
+ inst_offset += reg_count;
+ }
+ }
+ }
+ }
+}
+
+static void gfx_v9_4_3_ip_dump(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t i, j, k;
+ uint32_t num_xcc, reg, num_inst;
+ uint32_t xcc_id, xcc_offset, inst_offset;
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
+
+ if (!adev->gfx.ip_dump_core)
+ return;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ xcc_offset = xcc_id * reg_count;
+ for (i = 0; i < reg_count; i++)
+ adev->gfx.ip_dump_core[xcc_offset + i] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
+ GET_INST(GC, xcc_id)));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+
+ /* dump compute queue registers for all instances */
+ if (!adev->gfx.ip_dump_compute_queues)
+ return;
+
+ num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe;
+ reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
+ amdgpu_gfx_off_ctrl(adev, false);
+ mutex_lock(&adev->srbm_mutex);
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ xcc_offset = xcc_id * reg_count * num_inst;
+ inst_offset = 0;
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
+ /* ME0 is for GFX so start from 1 for CP */
+ soc15_grbm_select(adev, 1 + i, j, k, 0,
+ GET_INST(GC, xcc_id));
+
+ for (reg = 0; reg < reg_count; reg++) {
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset +
+ inst_offset + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(
+ gc_cp_reg_list_9_4_3[reg],
+ GET_INST(GC, xcc_id)));
+ }
+ inst_offset += reg_count;
+ }
+ }
+ }
+ }
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
+static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
+{
+ /* Emit the cleaner shader */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
+}
+
static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
.name = "gfx_v9_4_3",
.early_init = gfx_v9_4_3_early_init,
@@ -4078,8 +4722,8 @@ static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
.set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
.set_powergating_state = gfx_v9_4_3_set_powergating_state,
.get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = gfx_v9_4_3_ip_dump,
+ .print_ip_state = gfx_v9_4_3_ip_print,
};
static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
@@ -4101,7 +4745,8 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
7 + /* gfx_v9_4_3_emit_mem_sync */
5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
- 15, /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
+ 15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
+ 2, /* gfx_v9_4_3_ring_emit_cleaner_shader */
.emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
.emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
.emit_fence = gfx_v9_4_3_ring_emit_fence,
@@ -4111,13 +4756,18 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
.emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
.test_ring = gfx_v9_4_3_ring_test_ring,
.test_ib = gfx_v9_4_3_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v9_4_3_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v9_4_3_ring_soft_recovery,
.emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
.emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
+ .reset = gfx_v9_4_3_reset_kcq,
+ .emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
@@ -4172,6 +4822,11 @@ static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
.process = gfx_v9_4_3_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = {
+ .set = gfx_v9_4_3_set_bad_op_fault_state,
+ .process = gfx_v9_4_3_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
.set = gfx_v9_4_3_set_priv_inst_fault_state,
.process = gfx_v9_4_3_priv_inst_irq,
@@ -4185,6 +4840,9 @@ static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm
new file mode 100644
index 000000000000..d5325ef80ab0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader.
+//To turn this shader program on for complitaion change this to main and lower shader main to main_1
+
+// MI300 : Clear SGPRs, VGPRs and LDS
+// Uses two kernels launched separately:
+// 1. Clean VGPRs, LDS, and lower SGPRs
+// Launches one workgroup per CU, each workgroup with 4x wave64 per SIMD in the CU
+// Waves are "wave64" and have 128 VGPRs each, which uses all 512 VGPRs per SIMD
+// Waves in the workgroup share the 64KB of LDS
+// Each wave clears SGPRs 0 - 95. Because there are 4 waves/SIMD, this is physical SGPRs 0-383
+// Each wave clears 128 VGPRs, so all 512 in the SIMD
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+// 2. Clean remaining SGPRs
+// Launches a workgroup with 24 waves per workgroup, yielding 6 waves per SIMD in each CU
+// Waves are allocating 96 SGPRs
+// CP sets up SPI_RESOURCE_RESERVE_* registers to prevent these waves from allocating SGPRs 0-223.
+// As such, these 6 waves per SIMD are allocated physical SGPRs 224-799
+// Barriers do not work for >16 waves per workgroup, so we cannot start with S_BARRIER
+// Instead, the shader starts with an S_SETHALT 1. Once all waves are launched CP will send unhalt command
+// The shader then clears all SGPRs allocated to it, cleaning out physical SGPRs 224-799
+
+shader main
+ asic(MI300)
+ type(CS)
+ wave_size(64)
+// Note: original source code from SQ team
+
+// (theorhetical fastest = ~512clks vgpr + 1536 lds + ~128 sgpr = 2176 clks)
+
+ s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_3
+ s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s3 == 1)
+ S_BARRIER
+
+ s_movk_i32 m0, 0x0000
+ s_mov_b32 s2, 0x00000078 // Loop 128/8=16 times (loop unrolled for performance)
+ //
+ // CLEAR VGPRs
+ //
+ s_set_gpr_idx_on s2, 0x8 // enable Dest VGPR indexing
+label_0005:
+ v_mov_b32 v0, 0
+ v_mov_b32 v1, 0
+ v_mov_b32 v2, 0
+ v_mov_b32 v3, 0
+ v_mov_b32 v4, 0
+ v_mov_b32 v5, 0
+ v_mov_b32 v6, 0
+ v_mov_b32 v7, 0
+ s_sub_u32 s2, s2, 8
+ s_set_gpr_idx_idx s2
+ s_cbranch_scc0 label_0005
+ s_set_gpr_idx_off
+
+ //
+ //
+
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_clean_sgpr_1 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iteraions
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+ //
+ // CLEAR SGPRs
+ //
+label_clean_sgpr_1:
+ s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
+ s_nop 0
+label_sgpr_loop:
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc, flat scratch
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0 //clear vcc
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+s_endpgm
+
+label_0023:
+
+ s_sethalt 1
+
+ s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
+ s_nop 0
+label_sgpr_loop1:
+
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop1
+
+ //clear vcc, flat scratch
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0xee //clear vcc
+
+s_endpgm
+end
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h
new file mode 100644
index 000000000000..69aa567c6c1d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* Define the cleaner shader gfx_9_4_3 */
+static const u32 gfx_9_4_3_cleaner_shader_hex[] = {
+ 0xbf068100, 0xbf84003b,
+ 0xbf8a0000, 0xb07c0000,
+ 0xbe8200ff, 0x00000078,
+ 0xbf110802, 0x7e000280,
+ 0x7e020280, 0x7e040280,
+ 0x7e060280, 0x7e080280,
+ 0x7e0a0280, 0x7e0c0280,
+ 0x7e0e0280, 0x80828802,
+ 0xbe803202, 0xbf84fff5,
+ 0xbf9c0000, 0xbe8200ff,
+ 0x80000000, 0x86020102,
+ 0xbf840011, 0xbefe00c1,
+ 0xbeff00c1, 0xd28c0001,
+ 0x0001007f, 0xd28d0001,
+ 0x0002027e, 0x10020288,
+ 0xbe8200bf, 0xbefc00c1,
+ 0xd89c2000, 0x00020201,
+ 0xd89c6040, 0x00040401,
+ 0x320202ff, 0x00000400,
+ 0x80828102, 0xbf84fff8,
+ 0xbefc00ff, 0x0000005c,
+ 0xbf800000, 0xbe802c80,
+ 0xbe812c80, 0xbe822c80,
+ 0xbe832c80, 0x80fc847c,
+ 0xbf84fffa, 0xbee60080,
+ 0xbee70080, 0xbeea0180,
+ 0xbeec0180, 0xbeee0180,
+ 0xbef00180, 0xbef20180,
+ 0xbef40180, 0xbef60180,
+ 0xbef80180, 0xbefa0180,
+ 0xbf810000, 0xbf8d0001,
+ 0xbefc00ff, 0x0000005c,
+ 0xbf800000, 0xbe802c80,
+ 0xbe812c80, 0xbe822c80,
+ 0xbe832c80, 0x80fc847c,
+ 0xbf84fffa, 0xbee60080,
+ 0xbee70080, 0xbeea01ff,
+ 0x000000ee, 0xbf810000,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index d200310d1731..0e3ddea7b8e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -443,23 +443,6 @@ static void gfxhub_v1_0_init(struct amdgpu_device *adev)
mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
}
-static bool gfxhub_v1_0_query_utcl2_poison_status(struct amdgpu_device *adev,
- int xcc_id)
-{
- u32 status = 0;
- struct amdgpu_vmhub *hub;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2))
- return false;
-
- hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
- status = RREG32(hub->vm_l2_pro_fault_status);
- /* reset page fault status */
- WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
-
- return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
-}
-
const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
.get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
.setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs,
@@ -468,5 +451,4 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
.set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
.init = gfxhub_v1_0_init,
.get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
- .query_utcl2_poison_status = gfxhub_v1_0_query_utcl2_poison_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index 72109abe7c86..ed8e130c7d19 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -622,22 +622,6 @@ static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev)
return 0;
}
-static bool gfxhub_v1_2_query_utcl2_poison_status(struct amdgpu_device *adev,
- int xcc_id)
-{
- u32 fed, status;
-
- status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVM_L2_PROTECTION_FAULT_STATUS);
- fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
- if (!amdgpu_sriov_vf(adev)) {
- /* clear page fault status and address */
- WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
- regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1);
- }
-
- return fed;
-}
-
const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
.get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset,
.setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs,
@@ -646,7 +630,6 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
.set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default,
.init = gfxhub_v1_2_init,
.get_xgmi_info = gfxhub_v1_2_get_xgmi_info,
- .query_utcl2_poison_status = gfxhub_v1_2_query_utcl2_poison_status,
};
static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index f0ceab3ce5bf..9784a2892185 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -132,7 +132,8 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
/* Try to handle the recoverable page faults by filling page
* tables
*/
- if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, write_fault))
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
+ entry->timestamp, write_fault))
return 1;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index b73136d390cc..c76ac0dfe572 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -595,7 +595,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
cam_index = entry->src_data[2] & 0x3ff;
ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
- addr, write_fault);
+ addr, entry->timestamp, write_fault);
WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
if (ret)
return 1;
@@ -618,7 +618,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
* tables
*/
if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
- addr, write_fault))
+ addr, entry->timestamp, write_fault))
return 1;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index 077c6d920e27..e019249883fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -41,7 +41,7 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index a9ea23fa0def..ed7facacf2fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -32,7 +32,7 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
index ab06c2b4b20b..33736d361dd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
@@ -35,7 +35,7 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
index 8d7d0813e331..1c99bb09e2a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -32,7 +32,7 @@ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
index aac107898bae..964c29ef25dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
@@ -42,23 +42,23 @@ static const unsigned int isp_4_1_0_int_srcid[MAX_ISP410_INT_SRC] = {
static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
{
struct amdgpu_device *adev = isp->adev;
+ int idx, int_idx, num_res, r;
u64 isp_base;
- int int_idx;
- int r;
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
isp_base = adev->rmmio_base;
- isp->isp_cell = kcalloc(1, sizeof(struct mfd_cell), GFP_KERNEL);
+ isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
goto failure;
}
- isp->isp_res = kcalloc(MAX_ISP410_INT_SRC + 1, sizeof(struct resource),
+ num_res = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES + MAX_ISP410_INT_SRC;
+ isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
@@ -83,22 +83,53 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_res[0].start = isp_base;
isp->isp_res[0].end = isp_base + ISP_REGS_OFFSET_END;
- for (int_idx = 0; int_idx < MAX_ISP410_INT_SRC; int_idx++) {
- isp->isp_res[int_idx + 1].name = "isp_4_1_0_irq";
- isp->isp_res[int_idx + 1].flags = IORESOURCE_IRQ;
- isp->isp_res[int_idx + 1].start =
+ isp->isp_res[1].name = "isp_4_1_phy0_reg";
+ isp->isp_res[1].flags = IORESOURCE_MEM;
+ isp->isp_res[1].start = isp_base + ISP410_PHY0_OFFSET;
+ isp->isp_res[1].end = isp_base + ISP410_PHY0_OFFSET + ISP410_PHY0_SIZE;
+
+ isp->isp_res[2].name = "isp_gpio_sensor0_reg";
+ isp->isp_res[2].flags = IORESOURCE_MEM;
+ isp->isp_res[2].start = isp_base + ISP410_GPIO_SENSOR0_OFFSET;
+ isp->isp_res[2].end = isp_base + ISP410_GPIO_SENSOR0_OFFSET +
+ ISP410_GPIO_SENSOR0_SIZE;
+
+ for (idx = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES, int_idx = 0;
+ idx < num_res; idx++, int_idx++) {
+ isp->isp_res[idx].name = "isp_4_1_0_irq";
+ isp->isp_res[idx].flags = IORESOURCE_IRQ;
+ isp->isp_res[idx].start =
amdgpu_irq_create_mapping(adev, isp_4_1_0_int_srcid[int_idx]);
- isp->isp_res[int_idx + 1].end =
- isp->isp_res[int_idx + 1].start;
+ isp->isp_res[idx].end =
+ isp->isp_res[idx].start;
}
isp->isp_cell[0].name = "amd_isp_capture";
- isp->isp_cell[0].num_resources = MAX_ISP410_INT_SRC + 1;
+ isp->isp_cell[0].num_resources = num_res;
isp->isp_cell[0].resources = &isp->isp_res[0];
isp->isp_cell[0].platform_data = isp->isp_pdata;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 1);
+ isp->isp_i2c_res = kcalloc(1, sizeof(struct resource),
+ GFP_KERNEL);
+ if (!isp->isp_i2c_res) {
+ r = -ENOMEM;
+ DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_i2c_res[0].name = "isp_i2c0_reg";
+ isp->isp_i2c_res[0].flags = IORESOURCE_MEM;
+ isp->isp_i2c_res[0].start = isp_base + ISP410_I2C0_OFFSET;
+ isp->isp_i2c_res[0].end = isp_base + ISP410_I2C0_OFFSET + ISP410_I2C0_SIZE;
+
+ isp->isp_cell[1].name = "amd_isp_i2c_designware";
+ isp->isp_cell[1].num_resources = 1;
+ isp->isp_cell[1].resources = &isp->isp_i2c_res[0];
+ isp->isp_cell[1].platform_data = isp->isp_pdata;
+ isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
+
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
if (r) {
DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
goto failure;
@@ -111,6 +142,7 @@ failure:
kfree(isp->isp_pdata);
kfree(isp->isp_res);
kfree(isp->isp_cell);
+ kfree(isp->isp_i2c_res);
return r;
}
@@ -122,6 +154,7 @@ static int isp_v4_1_0_hw_fini(struct amdgpu_isp *isp)
kfree(isp->isp_res);
kfree(isp->isp_cell);
kfree(isp->isp_pdata);
+ kfree(isp->isp_i2c_res);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
index 315f2822410c..7db24c0f1080 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
@@ -32,8 +32,19 @@
#include "ivsrcid/isp/irqsrcs_isp_4_1.h"
+#define MAX_ISP410_MEM_RES 2
+#define MAX_ISP410_SENSOR_RES 1
#define MAX_ISP410_INT_SRC 8
+#define ISP410_PHY0_OFFSET 0x66700
+#define ISP410_PHY0_SIZE 0xD30
+
+#define ISP410_I2C0_OFFSET 0x66400
+#define ISP410_I2C0_SIZE 0x100
+
+#define ISP410_GPIO_SENSOR0_OFFSET 0x6613C
+#define ISP410_GPIO_SENSOR0_SIZE 0x4
+
void isp_v4_1_0_set_isp_funcs(struct amdgpu_isp *isp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index 4e17fa03f7b5..b56f27295468 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -42,23 +42,24 @@ static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = {
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
struct amdgpu_device *adev = isp->adev;
+ int idx, int_idx, num_res, r;
u64 isp_base;
- int int_idx;
- int r;
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
isp_base = adev->rmmio_base;
- isp->isp_cell = kcalloc(1, sizeof(struct mfd_cell), GFP_KERNEL);
+ isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
goto failure;
}
- isp->isp_res = kcalloc(MAX_ISP411_INT_SRC + 1, sizeof(struct resource),
+ num_res = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES + MAX_ISP411_INT_SRC;
+
+ isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
@@ -83,22 +84,52 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_res[0].start = isp_base;
isp->isp_res[0].end = isp_base + ISP_REGS_OFFSET_END;
- for (int_idx = 0; int_idx < MAX_ISP411_INT_SRC; int_idx++) {
- isp->isp_res[int_idx + 1].name = "isp_4_1_1_irq";
- isp->isp_res[int_idx + 1].flags = IORESOURCE_IRQ;
- isp->isp_res[int_idx + 1].start =
+ isp->isp_res[1].name = "isp_4_1_1_phy0_reg";
+ isp->isp_res[1].flags = IORESOURCE_MEM;
+ isp->isp_res[1].start = isp_base + ISP411_PHY0_OFFSET;
+ isp->isp_res[1].end = isp_base + ISP411_PHY0_OFFSET + ISP411_PHY0_SIZE;
+
+ isp->isp_res[2].name = "isp_4_1_1_sensor0_reg";
+ isp->isp_res[2].flags = IORESOURCE_MEM;
+ isp->isp_res[2].start = isp_base + ISP411_GPIO_SENSOR0_OFFSET;
+ isp->isp_res[2].end = isp_base + ISP411_GPIO_SENSOR0_OFFSET +
+ ISP411_GPIO_SENSOR0_SIZE;
+
+ for (idx = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES, int_idx = 0;
+ idx < num_res; idx++, int_idx++) {
+ isp->isp_res[idx].name = "isp_4_1_1_irq";
+ isp->isp_res[idx].flags = IORESOURCE_IRQ;
+ isp->isp_res[idx].start =
amdgpu_irq_create_mapping(adev, isp_4_1_1_int_srcid[int_idx]);
- isp->isp_res[int_idx + 1].end =
- isp->isp_res[int_idx + 1].start;
+ isp->isp_res[idx].end =
+ isp->isp_res[idx].start;
}
isp->isp_cell[0].name = "amd_isp_capture";
- isp->isp_cell[0].num_resources = MAX_ISP411_INT_SRC + 1;
+ isp->isp_cell[0].num_resources = num_res;
isp->isp_cell[0].resources = &isp->isp_res[0];
isp->isp_cell[0].platform_data = isp->isp_pdata;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 1);
+ isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
+ if (!isp->isp_i2c_res) {
+ r = -ENOMEM;
+ DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_i2c_res[0].name = "isp_i2c0_reg";
+ isp->isp_i2c_res[0].flags = IORESOURCE_MEM;
+ isp->isp_i2c_res[0].start = isp_base + ISP411_I2C0_OFFSET;
+ isp->isp_i2c_res[0].end = isp_base + ISP411_I2C0_OFFSET + ISP411_I2C0_SIZE;
+
+ isp->isp_cell[1].name = "amd_isp_i2c_designware";
+ isp->isp_cell[1].num_resources = 1;
+ isp->isp_cell[1].resources = &isp->isp_i2c_res[0];
+ isp->isp_cell[1].platform_data = isp->isp_pdata;
+ isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
+
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
if (r) {
DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
goto failure;
@@ -111,6 +142,7 @@ failure:
kfree(isp->isp_pdata);
kfree(isp->isp_res);
kfree(isp->isp_cell);
+ kfree(isp->isp_i2c_res);
return r;
}
@@ -122,6 +154,7 @@ static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
kfree(isp->isp_res);
kfree(isp->isp_cell);
kfree(isp->isp_pdata);
+ kfree(isp->isp_i2c_res);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
index dfb9522c9d6a..40887ddeb08c 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
@@ -32,8 +32,19 @@
#include "ivsrcid/isp/irqsrcs_isp_4_1.h"
+#define MAX_ISP411_MEM_RES 2
+#define MAX_ISP411_SENSOR_RES 1
#define MAX_ISP411_INT_SRC 8
+#define ISP411_PHY0_OFFSET 0x66700
+#define ISP411_PHY0_SIZE 0xD30
+
+#define ISP411_I2C0_OFFSET 0x66400
+#define ISP411_I2C0_SIZE 0x100
+
+#define ISP411_GPIO_SENSOR0_OFFSET 0x6613C
+#define ISP411_GPIO_SENSOR0_SIZE 0x4
+
void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 71f43a5c7f72..6e0e88076224 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
+#include "amdgpu_cs.h"
#include "soc15.h"
#include "soc15d.h"
#include "vcn_v1_0.h"
@@ -34,6 +35,9 @@
static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring);
+static int jpeg_v1_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
{
@@ -300,7 +304,10 @@ static void jpeg_v1_0_decode_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+ if (ring->funcs->parse_cs)
+ amdgpu_ring_write(ring, 0);
+ else
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
@@ -554,6 +561,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.get_rptr = jpeg_v1_0_decode_ring_get_rptr,
.get_wptr = jpeg_v1_0_decode_ring_get_wptr,
.set_wptr = jpeg_v1_0_decode_ring_set_wptr,
+ .parse_cs = jpeg_v1_dec_ring_parse_cs,
.emit_frame_size =
6 + 6 + /* hdp invalidate / flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
@@ -611,3 +619,69 @@ static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
}
+
+/**
+ * jpeg_v1_dec_ring_parse_cs - command submission parser
+ *
+ * @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to parse
+ *
+ * Parse the command stream, return -EINVAL for invalid packet,
+ * 0 otherwise
+ */
+static int jpeg_v1_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ u32 i, reg, res, cond, type;
+ int ret = 0;
+ struct amdgpu_device *adev = parser->adev;
+
+ for (i = 0; i < ib->length_dw ; i += 2) {
+ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+
+ if (res || cond != PACKETJ_CONDITION_CHECK0) /* only allow 0 for now */
+ return -EINVAL;
+
+ if (reg >= JPEG_V1_REG_RANGE_START && reg <= JPEG_V1_REG_RANGE_END)
+ continue;
+
+ switch (type) {
+ case PACKETJ_TYPE0:
+ if (reg != JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_HIGH &&
+ reg != JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_LOW &&
+ reg != JPEG_V1_LMI_JPEG_READ_64BIT_BAR_HIGH &&
+ reg != JPEG_V1_LMI_JPEG_READ_64BIT_BAR_LOW &&
+ reg != JPEG_V1_REG_CTX_INDEX &&
+ reg != JPEG_V1_REG_CTX_DATA) {
+ ret = -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE1:
+ if (reg != JPEG_V1_REG_CTX_DATA)
+ ret = -EINVAL;
+ break;
+ case PACKETJ_TYPE3:
+ if (reg != JPEG_V1_REG_SOFT_RESET)
+ ret = -EINVAL;
+ break;
+ case PACKETJ_TYPE6:
+ if (ib->ptr[i] != CP_PACKETJ_NOP)
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
index bbf33a6a3972..9654d22e0376 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
@@ -29,4 +29,15 @@ int jpeg_v1_0_sw_init(void *handle);
void jpeg_v1_0_sw_fini(void *handle);
void jpeg_v1_0_start(struct amdgpu_device *adev, int mode);
+#define JPEG_V1_REG_RANGE_START 0x8000
+#define JPEG_V1_REG_RANGE_END 0x803f
+
+#define JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x8238
+#define JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x8239
+#define JPEG_V1_LMI_JPEG_READ_64BIT_BAR_HIGH 0x825a
+#define JPEG_V1_LMI_JPEG_READ_64BIT_BAR_LOW 0x825b
+#define JPEG_V1_REG_CTX_INDEX 0x8328
+#define JPEG_V1_REG_CTX_DATA 0x8329
+#define JPEG_V1_REG_SOFT_RESET 0x83a0
+
#endif /*__JPEG_V1_0_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 98aa3ccd0d20..41c0f8750dc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
+#include "amdgpu_cs.h"
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
@@ -538,7 +539,11 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
+
+ if (ring->funcs->parse_cs)
+ amdgpu_ring_write(ring, 0);
+ else
+ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
@@ -764,6 +769,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_0_dec_ring_get_rptr,
.get_wptr = jpeg_v2_0_dec_ring_get_wptr,
.set_wptr = jpeg_v2_0_dec_ring_set_wptr,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -810,3 +816,58 @@ const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = {
.rev = 0,
.funcs = &jpeg_v2_0_ip_funcs,
};
+
+/**
+ * jpeg_v2_dec_ring_parse_cs - command submission parser
+ *
+ * @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to parse
+ *
+ * Parse the command stream, return -EINVAL for invalid packet,
+ * 0 otherwise
+ */
+int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ u32 i, reg, res, cond, type;
+ struct amdgpu_device *adev = parser->adev;
+
+ for (i = 0; i < ib->length_dw ; i += 2) {
+ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+
+ if (res) /* only support 0 at the moment */
+ return -EINVAL;
+
+ switch (type) {
+ case PACKETJ_TYPE0:
+ if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START ||
+ reg > JPEG_REG_RANGE_END) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE3:
+ if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START ||
+ reg > JPEG_REG_RANGE_END) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE6:
+ if (ib->ptr[i] == CP_PACKETJ_NOP)
+ continue;
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ default:
+ dev_err(adev->dev, "Unknown packet type %d !\n", type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
index 654e43e83e2c..63fadda7a673 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -45,6 +45,9 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+#define JPEG_REG_RANGE_START 0x4000
+#define JPEG_REG_RANGE_END 0x41c2
+
void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
@@ -57,6 +60,9 @@ void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr);
void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
+int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index d8ef95c847c2..eedb9a829d95 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -662,6 +662,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -691,6 +692,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 31cfa3ce6528..b1e7fd25afbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -560,6 +560,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 3dac8f259d7f..6c5c1a68a9b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -727,6 +727,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_dec_ring_set_wptr,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h
index 07d36c2abd6b..47638fd4d4e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h
@@ -32,5 +32,4 @@ enum amdgpu_jpeg_v4_0_sub_block {
};
extern const struct amdgpu_ip_block_version jpeg_v4_0_ip_block;
-
#endif /* __JPEG_V4_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 6ae5a784e187..86958cb2c2ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -23,9 +23,9 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
-#include "amdgpu_cs.h"
#include "soc15.h"
#include "soc15d.h"
+#include "jpeg_v2_0.h"
#include "jpeg_v4_0_3.h"
#include "mmsch_v4_0_3.h"
@@ -59,6 +59,12 @@ static int amdgpu_ih_srcid_jpeg[] = {
VCN_4_0__SRCID__JPEG7_DECODE
};
+static inline bool jpeg_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
+{
+ return amdgpu_sriov_vf(adev) ||
+ (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4));
+}
+
/**
* jpeg_v4_0_3_early_init - set function pointers
*
@@ -734,32 +740,20 @@ void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
amdgpu_ring_write(ring, 0);
- if (ring->adev->jpeg.inst[ring->me].aid_id) {
- amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET,
- 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x4);
- } else {
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
- amdgpu_ring_write(ring, 0);
- }
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x3fbc);
- if (ring->adev->jpeg.inst[ring->me].aid_id) {
- amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET,
- 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x0);
- } else {
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
- amdgpu_ring_write(ring, 0);
- }
-
amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x1);
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
+
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
amdgpu_ring_write(ring, 0);
}
@@ -834,8 +828,8 @@ void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
{
uint32_t reg_offset;
- /* For VF, only local offsets should be used */
- if (amdgpu_sriov_vf(ring->adev))
+ /* Use normalized offsets if required */
+ if (jpeg_v4_0_3_normalizn_reqd(ring->adev))
reg = NORMALIZE_JPEG_REG_OFFSET(reg);
reg_offset = (reg << 2);
@@ -881,8 +875,8 @@ void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint
{
uint32_t reg_offset;
- /* For VF, only local offsets should be used */
- if (amdgpu_sriov_vf(ring->adev))
+ /* Use normalized offsets if required */
+ if (jpeg_v4_0_3_normalizn_reqd(ring->adev))
reg = NORMALIZE_JPEG_REG_OFFSET(reg);
reg_offset = (reg << 2);
@@ -1089,7 +1083,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
- .parse_cs = jpeg_v4_0_3_dec_ring_parse_cs,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -1254,56 +1248,3 @@ static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
{
adev->jpeg.ras = &jpeg_v4_0_3_ras;
}
-
-/**
- * jpeg_v4_0_3_dec_ring_parse_cs - command submission parser
- *
- * @parser: Command submission parser context
- * @job: the job to parse
- * @ib: the IB to parse
- *
- * Parse the command stream, return -EINVAL for invalid packet,
- * 0 otherwise
- */
-int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib)
-{
- uint32_t i, reg, res, cond, type;
- struct amdgpu_device *adev = parser->adev;
-
- for (i = 0; i < ib->length_dw ; i += 2) {
- reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
- res = CP_PACKETJ_GET_RES(ib->ptr[i]);
- cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
- type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
-
- if (res) /* only support 0 at the moment */
- return -EINVAL;
-
- switch (type) {
- case PACKETJ_TYPE0:
- if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE3:
- if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE6:
- if (ib->ptr[i] == CP_PACKETJ_NOP)
- continue;
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- default:
- dev_err(adev->dev, "Unknown packet type %d !\n", type);
- return -EINVAL;
- }
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
index 71c54b294e15..747a3e5f6856 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
@@ -46,9 +46,6 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
-#define JPEG_REG_RANGE_START 0x4000
-#define JPEG_REG_RANGE_END 0x41c2
-
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
@@ -65,7 +62,5 @@ void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask);
-int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib);
+
#endif /* __JPEG_V4_0_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index f96ac6bce526..44eeed445ea9 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -768,6 +768,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_5_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_5_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_5_dec_ring_set_wptr,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index f4daff90c770..d662aa841f97 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -26,6 +26,7 @@
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
+#include "jpeg_v2_0.h"
#include "jpeg_v4_0_3.h"
#include "vcn/vcn_5_0_0_offset.h"
@@ -646,7 +647,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v4_0_3_dec_ring_parse_cs,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 8aded0a67037..ee91ff9e52a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "soc15_common.h"
#include "soc21.h"
+#include "gfx_v11_0.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
#include "gc/gc_11_0_0_default.h"
@@ -360,6 +361,100 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
+static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = mes->adev;
+ uint32_t value;
+ int i, r = 0;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
+ if (queue_type == AMDGPU_RING_TYPE_GFX) {
+ dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n",
+ me_id, pipe_id, queue_id, vmid);
+
+ mutex_lock(&adev->gfx.reset_sem_mutex);
+ gfx_v11_0_request_gfx_index_mutex(adev, true);
+ /* all se allow writes */
+ WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX,
+ (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+ value = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (pipe_id == 0)
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id);
+ else
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id);
+ WREG32_SOC15(GC, 0, regCP_VMID_RESET, value);
+ gfx_v11_0_request_gfx_index_mutex(adev, false);
+ mutex_unlock(&adev->gfx.reset_sem_mutex);
+
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n",
+ me_id, pipe_id, queue_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
+
+static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
+{
+ if (input->use_mmio)
+ return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
+ input->me_id, input->pipe_id,
+ input->queue_id, input->vmid);
+
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
+ mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
+ /*mes_reset_queue_pkt.reset_queue_only = 1;*/
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__REMOVE_QUEUE, api_status));
+}
+
static int mes_v11_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -421,13 +516,41 @@ static int mes_v11_0_unmap_legacy_queue(struct amdgpu_mes *mes,
static int mes_v11_0_suspend_gang(struct amdgpu_mes *mes,
struct mes_suspend_gang_input *input)
{
- return 0;
+ union MESAPI__SUSPEND mes_suspend_gang_pkt;
+
+ memset(&mes_suspend_gang_pkt, 0, sizeof(mes_suspend_gang_pkt));
+
+ mes_suspend_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_suspend_gang_pkt.header.opcode = MES_SCH_API_SUSPEND;
+ mes_suspend_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_suspend_gang_pkt.suspend_all_gangs = input->suspend_all_gangs;
+ mes_suspend_gang_pkt.gang_context_addr = input->gang_context_addr;
+ mes_suspend_gang_pkt.suspend_fence_addr = input->suspend_fence_addr;
+ mes_suspend_gang_pkt.suspend_fence_value = input->suspend_fence_value;
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_suspend_gang_pkt, sizeof(mes_suspend_gang_pkt),
+ offsetof(union MESAPI__SUSPEND, api_status));
}
static int mes_v11_0_resume_gang(struct amdgpu_mes *mes,
struct mes_resume_gang_input *input)
{
- return 0;
+ union MESAPI__RESUME mes_resume_gang_pkt;
+
+ memset(&mes_resume_gang_pkt, 0, sizeof(mes_resume_gang_pkt));
+
+ mes_resume_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_resume_gang_pkt.header.opcode = MES_SCH_API_RESUME;
+ mes_resume_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_resume_gang_pkt.resume_all_gangs = input->resume_all_gangs;
+ mes_resume_gang_pkt.gang_context_addr = input->gang_context_addr;
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_resume_gang_pkt, sizeof(mes_resume_gang_pkt),
+ offsetof(union MESAPI__RESUME, api_status));
}
static int mes_v11_0_query_sched_status(struct amdgpu_mes *mes)
@@ -595,6 +718,43 @@ static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
}
+static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
+ struct mes_reset_legacy_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ if (input->use_mmio)
+ return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
+ input->me_id, input->pipe_id,
+ input->queue_id, input->vmid);
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+
+ if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ mes_reset_queue_pkt.reset_legacy_gfx = 1;
+ mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
+ mes_reset_queue_pkt.queue_id_lp = input->queue_id;
+ mes_reset_queue_pkt.mqd_mc_addr_lp = input->mqd_addr;
+ mes_reset_queue_pkt.doorbell_offset_lp = input->doorbell_offset;
+ mes_reset_queue_pkt.wptr_addr_lp = input->wptr_addr;
+ mes_reset_queue_pkt.vmid_id_lp = input->vmid;
+ } else {
+ mes_reset_queue_pkt.reset_queue_only = 1;
+ mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
+ }
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.add_hw_queue = mes_v11_0_add_hw_queue,
.remove_hw_queue = mes_v11_0_remove_hw_queue,
@@ -603,6 +763,8 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.suspend_gang = mes_v11_0_suspend_gang,
.resume_gang = mes_v11_0_resume_gang,
.misc_op = mes_v11_0_misc_op,
+ .reset_legacy_queue = mes_v11_0_reset_legacy_queue,
+ .reset_hw_queue = mes_v11_0_reset_hw_queue,
};
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index a79a8adc3aa5..e499b2857a01 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -350,6 +350,32 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
+static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+ int pipe;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
+ mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
+ /*mes_reset_queue_pkt.reset_queue_only = 1;*/
+
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__REMOVE_QUEUE, api_status));
+}
+
static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -676,6 +702,44 @@ static void mes_v12_0_enable_unmapped_doorbell_handling(
WREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL, data);
}
+static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
+ struct mes_reset_legacy_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+ int pipe;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+
+ if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ mes_reset_queue_pkt.reset_legacy_gfx = 1;
+ mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
+ mes_reset_queue_pkt.queue_id_lp = input->queue_id;
+ mes_reset_queue_pkt.mqd_mc_addr_lp = input->mqd_addr;
+ mes_reset_queue_pkt.doorbell_offset_lp = input->doorbell_offset;
+ mes_reset_queue_pkt.wptr_addr_lp = input->wptr_addr;
+ mes_reset_queue_pkt.vmid_id_lp = input->vmid;
+ } else {
+ mes_reset_queue_pkt.reset_queue_only = 1;
+ mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
+ }
+
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.add_hw_queue = mes_v12_0_add_hw_queue,
.remove_hw_queue = mes_v12_0_remove_hw_queue,
@@ -684,6 +748,8 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.suspend_gang = mes_v12_0_suspend_gang,
.resume_gang = mes_v12_0_resume_gang,
.misc_op = mes_v12_0_misc_op,
+ .reset_legacy_queue = mes_v12_0_reset_legacy_queue,
+ .reset_hw_queue = mes_v12_0_reset_hw_queue,
};
static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 621761a17ac7..b01bb759d0f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -559,22 +559,6 @@ static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags)
}
-static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev,
- int hub_inst)
-{
- u32 fed, status;
-
- status = RREG32_SOC15(MMHUB, hub_inst, regVM_L2_PROTECTION_FAULT_STATUS);
- fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
- if (!amdgpu_sriov_vf(adev)) {
- /* clear page fault status and address */
- WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst,
- regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1);
- }
-
- return fed;
-}
-
const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
.get_fb_location = mmhub_v1_8_get_fb_location,
.init = mmhub_v1_8_init,
@@ -584,7 +568,6 @@ const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
.setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs,
.set_clockgating = mmhub_v1_8_set_clockgating,
.get_clockgating = mmhub_v1_8_get_clockgating,
- .query_utcl2_poison_status = mmhub_v1_8_query_utcl2_poison_status,
};
static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = {
@@ -670,8 +653,8 @@ static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev,
AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
&ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
}
static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index caf616a2c8a6..1d099ffb3a5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -25,7 +25,7 @@
#define __MXGPU_NV_H__
#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
-#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000
+#define NV_MAILBOX_POLL_MSG_TIMEDOUT 15000
#define NV_MAILBOX_POLL_FLR_TIMEDOUT 10000
#define NV_MAILBOX_POLL_MSG_REP_MAX 11
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 772604feb6ac..23ef4eb36b40 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -72,6 +72,53 @@ MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_sdma.bin");
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_0[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL)
+};
+
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
@@ -1750,6 +1797,8 @@ static int sdma_v4_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
+ uint32_t *ptr;
/* SDMA trap event */
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1870,6 +1919,13 @@ static int sdma_v4_0_sw_init(void *handle)
return -EINVAL;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1890,6 +1946,8 @@ static int sdma_v4_0_sw_fini(void *handle)
else
amdgpu_sdma_destroy_inst_ctx(adev, false);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -2292,6 +2350,48 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
+static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_4_0[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v4_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v4_0_get_reg_offset(adev, i,
+ sdma_reg_list_4_0[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
.name = "sdma_v4_0",
.early_init = sdma_v4_0_early_init,
@@ -2308,6 +2408,8 @@ const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
.set_clockgating_state = sdma_v4_0_set_clockgating_state,
.set_powergating_state = sdma_v4_0_set_powergating_state,
.get_clockgating_state = sdma_v4_0_get_clockgating_state,
+ .dump_ip_state = sdma_v4_0_dump_ip_state,
+ .print_ip_state = sdma_v4_0_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 2c55bfd935bb..c77889040760 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -46,6 +46,53 @@
MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
MODULE_FIRMWARE("amdgpu/sdma_4_4_5.bin");
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_VM_CNTL)
+};
+
#define mmSMNAID_AID0_MCA_SMU 0x03b30400
#define WREG32_SDMA(instance, offset, value) \
@@ -1291,6 +1338,8 @@ static int sdma_v4_4_2_sw_init(void *handle)
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 aid_id;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
+ uint32_t *ptr;
/* SDMA trap event */
for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
@@ -1386,6 +1435,13 @@ static int sdma_v4_4_2_sw_init(void *handle)
return -EINVAL;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1406,6 +1462,8 @@ static int sdma_v4_4_2_sw_fini(void *handle)
else
amdgpu_sdma_destroy_inst_ctx(adev, false);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1799,6 +1857,48 @@ static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
+static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_4_4_2[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v4_4_2_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v4_4_2_get_reg_offset(adev, i,
+ sdma_reg_list_4_4_2[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
.name = "sdma_v4_4_2",
.early_init = sdma_v4_4_2_early_init,
@@ -1815,6 +1915,8 @@ const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
.set_clockgating_state = sdma_v4_4_2_set_clockgating_state,
.set_powergating_state = sdma_v4_4_2_set_powergating_state,
.get_clockgating_state = sdma_v4_4_2_get_clockgating_state,
+ .dump_ip_state = sdma_v4_4_2_dump_ip_state,
+ .print_ip_state = sdma_v4_4_2_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
@@ -2141,7 +2243,7 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev,
AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
&ue_count);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
}
static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index b7d33d78bce0..3e48ea38385d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -59,6 +59,55 @@ MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma1.bin");
#define SDMA0_HYP_DEC_REG_END 0x5893
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_0[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_INT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2)
+};
+
static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -1341,6 +1390,8 @@ static int sdma_v5_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
+ uint32_t *ptr;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0,
@@ -1378,6 +1429,13 @@ static int sdma_v5_0_sw_init(void *handle)
return r;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1391,6 +1449,8 @@ static int sdma_v5_0_sw_fini(void *handle)
amdgpu_sdma_destroy_inst_ctx(adev, false);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1718,7 +1778,49 @@ static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
-const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
+static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_5_0[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v5_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v5_0_get_reg_offset(adev, i,
+ sdma_reg_list_5_0[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
+static const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
.name = "sdma_v5_0",
.early_init = sdma_v5_0_early_init,
.late_init = NULL,
@@ -1734,6 +1836,8 @@ const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
.set_clockgating_state = sdma_v5_0_set_clockgating_state,
.set_powergating_state = sdma_v5_0_set_powergating_state,
.get_clockgating_state = sdma_v5_0_get_clockgating_state,
+ .dump_ip_state = sdma_v5_0_dump_ip_state,
+ .print_ip_state = sdma_v5_0_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h
index d4e3c2e696f6..2ab71f21755a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h
@@ -24,7 +24,6 @@
#ifndef __SDMA_V5_0_H__
#define __SDMA_V5_0_H__
-extern const struct amd_ip_funcs sdma_v5_0_ip_funcs;
extern const struct amdgpu_ip_block_version sdma_v5_0_ip_block;
#endif /* __SDMA_V5_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 2e72d445415f..bc9b240a3488 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -60,6 +60,55 @@ MODULE_FIRMWARE("amdgpu/sdma_5_2_7.bin");
#define SDMA0_HYP_DEC_REG_END 0x5893
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_2[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_INT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2)
+};
+
static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -1224,6 +1273,8 @@ static int sdma_v5_2_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
+ uint32_t *ptr;
/* SDMA trap event */
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1255,6 +1306,13 @@ static int sdma_v5_2_sw_init(void *handle)
return r;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1268,6 +1326,8 @@ static int sdma_v5_2_sw_fini(void *handle)
amdgpu_sdma_destroy_inst_ctx(adev, true);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1676,7 +1736,49 @@ static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
amdgpu_gfx_off_ctrl(adev, true);
}
-const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
+static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_5_2[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v5_2_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v5_2_get_reg_offset(adev, i,
+ sdma_reg_list_5_2[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
+static const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
.name = "sdma_v5_2",
.early_init = sdma_v5_2_early_init,
.late_init = NULL,
@@ -1692,6 +1794,8 @@ const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
.set_clockgating_state = sdma_v5_2_set_clockgating_state,
.set_powergating_state = sdma_v5_2_set_powergating_state,
.get_clockgating_state = sdma_v5_2_get_clockgating_state,
+ .dump_ip_state = sdma_v5_2_dump_ip_state,
+ .print_ip_state = sdma_v5_2_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h
index b70414fef2a1..863145b3a77e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h
@@ -24,7 +24,6 @@
#ifndef __SDMA_V5_2_H__
#define __SDMA_V5_2_H__
-extern const struct amd_ip_funcs sdma_v5_2_ip_funcs;
extern const struct amdgpu_ip_block_version sdma_v5_2_ip_block;
#endif /* __SDMA_V5_2_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index dab4c2db8c9d..208a1fa9d4e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -57,6 +57,63 @@ MODULE_FIRMWARE("amdgpu/sdma_6_1_2.bin");
#define SDMA0_HYP_DEC_REG_END 0x589a
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_6_0[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS4_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS5_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS6_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE_STATUS0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_INT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_CHICKEN_BITS),
+};
+
static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -1239,6 +1296,8 @@ static int sdma_v6_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
+ uint32_t *ptr;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
@@ -1274,6 +1333,13 @@ static int sdma_v6_0_sw_init(void *handle)
return -EINVAL;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1287,6 +1353,8 @@ static int sdma_v6_0_sw_fini(void *handle)
amdgpu_sdma_destroy_inst_ctx(adev, true);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1488,6 +1556,48 @@ static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags)
{
}
+static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_6_0[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v6_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v6_0_get_reg_offset(adev, i,
+ sdma_reg_list_6_0[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v6_0_ip_funcs = {
.name = "sdma_v6_0",
.early_init = sdma_v6_0_early_init,
@@ -1505,6 +1615,8 @@ const struct amd_ip_funcs sdma_v6_0_ip_funcs = {
.set_clockgating_state = sdma_v6_0_set_clockgating_state,
.set_powergating_state = sdma_v6_0_set_powergating_state,
.get_clockgating_state = sdma_v6_0_get_clockgating_state,
+ .dump_ip_state = sdma_v6_0_dump_ip_state,
+ .print_ip_state = sdma_v6_0_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index ecee9e7d7e4c..cfd8e183ad50 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -51,6 +51,64 @@ MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
#define SDMA0_HYP_DEC_REG_END 0x589a
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS4_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS5_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS6_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UCODE_REV),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE_STATUS0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_INT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_VM_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_CHICKEN_BITS),
+};
+
static void sdma_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v7_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -1217,6 +1275,8 @@ static int sdma_v7_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
+ uint32_t *ptr;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
@@ -1247,6 +1307,13 @@ static int sdma_v7_0_sw_init(void *handle)
return r;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1263,6 +1330,8 @@ static int sdma_v7_0_sw_fini(void *handle)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
sdma_v12_0_free_ucode_buffer(adev);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1466,6 +1535,48 @@ static void sdma_v7_0_get_clockgating_state(void *handle, u64 *flags)
{
}
+static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_7_0[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v7_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v7_0_get_reg_offset(adev, i,
+ sdma_reg_list_7_0[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v7_0_ip_funcs = {
.name = "sdma_v7_0",
.early_init = sdma_v7_0_early_init,
@@ -1483,6 +1594,8 @@ const struct amd_ip_funcs sdma_v7_0_ip_funcs = {
.set_clockgating_state = sdma_v7_0_set_clockgating_state,
.set_powergating_state = sdma_v7_0_set_powergating_state,
.get_clockgating_state = sdma_v7_0_get_clockgating_state,
+ .dump_ip_state = sdma_v7_0_dump_ip_state,
+ .print_ip_state = sdma_v7_0_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v7_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 282584a48be0..ef7c603b50ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -93,6 +93,10 @@ struct soc15_ras_field_entry {
#define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset)
+/* Over ride the instance id */
+#define SOC15_REG_ENTRY_OFFSET_INST(entry, inst) \
+ (adev->reg_offset[entry.hwip][inst][entry.seg] + entry.reg_offset)
+
#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
{ ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index e74e1983da53..b9cbeb389edc 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -413,6 +413,10 @@
# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
+#define PACKET3_RUN_CLEANER_SHADER 0xD2
+/* 1. header
+ * 2. RESERVED [31:0]
+ */
#define VCE_CMD_NO_OP 0x00000000
#define VCE_CMD_END 0x00000001
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 9dbb13adb661..1a8ea834efa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -157,9 +157,9 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
&de_count, umc_v12_0_is_deferred_error);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
- amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, de_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
+ amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, de_count);
return 0;
}
@@ -225,26 +225,16 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
}
}
-static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
- struct ta_ras_query_address_input *addr_in,
- uint64_t *pfns, int len)
+static void umc_v12_0_dump_addr_info(struct amdgpu_device *adev,
+ struct ta_ras_query_address_output *addr_out,
+ uint64_t err_addr)
{
uint32_t col, row, row_xor, bank, channel_index;
- uint64_t soc_pa, retired_page, column, err_addr;
- struct ta_ras_query_address_output addr_out;
- uint32_t pos = 0;
-
- err_addr = addr_in->ma.err_addr;
- addr_in->addr_type = TA_RAS_MCA_TO_PA;
- if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
- dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
- err_addr);
- return 0;
- }
+ uint64_t soc_pa, retired_page, column;
- soc_pa = addr_out.pa.pa;
- bank = addr_out.pa.bank;
- channel_index = addr_out.pa.channel_idx;
+ soc_pa = addr_out->pa.pa;
+ bank = addr_out->pa.bank;
+ channel_index = addr_out->pa.channel_idx;
col = (err_addr >> 1) & 0x1fULL;
row = (err_addr >> 10) & 0x3fffULL;
@@ -258,11 +248,6 @@ static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
-
- if (pos >= len)
- return 0;
- pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
-
/* include column bit 0 and 1 */
col &= 0x3;
col |= (column << 2);
@@ -272,19 +257,74 @@ static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
/* shift R13 bit */
retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
+ dev_info(adev->dev,
+ "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ retired_page, row_xor, col, bank, channel_index);
+ }
+}
+
+static int umc_v12_0_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
+ uint64_t pa_addr, uint64_t *pfns, int len)
+{
+ uint64_t soc_pa, retired_page, column;
+ uint32_t pos = 0;
+
+ soc_pa = pa_addr;
+ /* clear [C3 C2] in soc physical address */
+ soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
+ /* clear [C4] in soc physical address */
+ soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
+
+ /* loop for all possibilities of [C4 C3 C2] */
+ for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
+ retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
+ retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
+
+ if (pos >= len)
+ return 0;
+ pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+
+ /* shift R13 bit */
+ retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
if (pos >= len)
return 0;
pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row_xor, col, bank, channel_index);
}
return pos;
}
+static int umc_v12_0_convert_mca_to_addr(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch, uint32_t umc,
+ uint32_t node, uint32_t socket,
+ uint64_t *addr, bool dump_addr)
+{
+ struct ta_ras_query_address_input addr_in;
+ struct ta_ras_query_address_output addr_out;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = ch;
+ addr_in.ma.umc_inst = umc;
+ addr_in.ma.node_inst = node;
+ addr_in.ma.socket_id = socket;
+ addr_in.addr_type = TA_RAS_MCA_TO_PA;
+ if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out)) {
+ dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
+ err_addr);
+ return -EINVAL;
+ }
+
+ if (dump_addr)
+ umc_v12_0_dump_addr_info(adev, &addr_out, err_addr);
+
+ *addr = addr_out.pa.pa;
+
+ return 0;
+}
+
static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
@@ -483,12 +523,10 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
uint16_t hwid, mcatype;
- struct ta_ras_query_address_input addr_in;
uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
- uint64_t err_addr, hash_val = 0;
+ uint64_t err_addr, pa_addr = 0;
struct ras_ecc_err *ecc_err;
- int count;
- int ret;
+ int count, ret, i;
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
@@ -514,46 +552,25 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
MCA_IPID_2_UMC_CH(ipid),
err_addr);
- memset(page_pfn, 0, sizeof(page_pfn));
-
- memset(&addr_in, 0, sizeof(addr_in));
- addr_in.ma.err_addr = err_addr;
- addr_in.ma.ch_inst = MCA_IPID_2_UMC_CH(ipid);
- addr_in.ma.umc_inst = MCA_IPID_2_UMC_INST(ipid);
- addr_in.ma.node_inst = MCA_IPID_2_DIE_ID(ipid);
- addr_in.ma.socket_id = MCA_IPID_2_SOCKET_ID(ipid);
-
- count = umc_v12_0_convert_err_addr(adev,
- &addr_in, page_pfn, ARRAY_SIZE(page_pfn));
- if (count <= 0) {
- dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count);
- return 0;
- }
-
- ret = amdgpu_umc_build_pages_hash(adev,
- page_pfn, count, &hash_val);
- if (ret) {
- dev_err(adev->dev, "Fail to build error pages hash\n");
+ ret = umc_v12_0_convert_mca_to_addr(adev,
+ err_addr, MCA_IPID_2_UMC_CH(ipid),
+ MCA_IPID_2_UMC_INST(ipid), MCA_IPID_2_DIE_ID(ipid),
+ MCA_IPID_2_SOCKET_ID(ipid), &pa_addr, true);
+ if (ret)
return ret;
- }
ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL);
if (!ecc_err)
return -ENOMEM;
- ecc_err->err_pages.pfn = kcalloc(count, sizeof(*ecc_err->err_pages.pfn), GFP_KERNEL);
- if (!ecc_err->err_pages.pfn) {
- kfree(ecc_err);
- return -ENOMEM;
- }
-
- memcpy(ecc_err->err_pages.pfn, page_pfn, count * sizeof(*ecc_err->err_pages.pfn));
- ecc_err->err_pages.count = count;
-
- ecc_err->hash_index = hash_val;
ecc_err->status = status;
ecc_err->ipid = ipid;
ecc_err->addr = addr;
+ ecc_err->pa_pfn = UMC_V12_ADDR_MASK_BAD_COLS(pa_addr) >> AMDGPU_GPU_PAGE_SHIFT;
+
+ /* If converted pa_pfn is 0, use pa C4 pfn. */
+ if (!ecc_err->pa_pfn)
+ ecc_err->pa_pfn = BIT_ULL(UMC_V12_0_PA_C4_BIT) >> AMDGPU_GPU_PAGE_SHIFT;
ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
if (ret) {
@@ -562,13 +579,25 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
else
dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret);
- kfree(ecc_err->err_pages.pfn);
kfree(ecc_err);
return ret;
}
con->umc_ecc_log.de_queried_count++;
+ memset(page_pfn, 0, sizeof(page_pfn));
+ count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
+ pa_addr,
+ page_pfn, ARRAY_SIZE(page_pfn));
+ if (count <= 0) {
+ dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count);
+ return 0;
+ }
+
+ /* Reserve memory */
+ for (i = 0; i < count; i++)
+ amdgpu_ras_reserve_page(adev, page_pfn[i]);
+
/* The problem case is as follows:
* 1. GPU A triggers a gpu ras reset, and GPU A drives
* GPU B to also perform a gpu ras reset.
@@ -593,16 +622,21 @@ static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
struct ras_ecc_err *ecc_err, void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- uint32_t i = 0;
- int ret = 0;
+ uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
+ int ret, i, count;
if (!err_data || !ecc_err)
return -EINVAL;
- for (i = 0; i < ecc_err->err_pages.count; i++) {
+ memset(page_pfn, 0, sizeof(page_pfn));
+ count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
+ ecc_err->pa_pfn << AMDGPU_GPU_PAGE_SHIFT,
+ page_pfn, ARRAY_SIZE(page_pfn));
+
+ for (i = 0; i < count; i++) {
ret = amdgpu_umc_fill_error_record(err_data,
ecc_err->addr,
- ecc_err->err_pages.pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
+ page_pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
MCA_IPID_2_UMC_CH(ecc_err->ipid),
MCA_IPID_2_UMC_INST(ecc_err->ipid));
if (ret)
@@ -636,7 +670,8 @@ static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev,
dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret);
break;
}
- radix_tree_tag_clear(ecc_tree, entries[i]->hash_index, UMC_ECC_NEW_DETECTED_TAG);
+ radix_tree_tag_clear(ecc_tree,
+ entries[i]->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
}
mutex_unlock(&con->umc_ecc_log.lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index b4974793850b..be5598d76c1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -81,6 +81,11 @@
(((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \
(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03))
+#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \
+ ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_C4_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_R13_BIT)))
+
bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 32517c364cf7..4bfba2931b08 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -950,7 +950,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
- .parse_cs = amdgpu_vce_ring_parse_cs_vm,
+ .patch_cs_in_place = amdgpu_vce_ring_parse_cs_vm,
.emit_frame_size =
6 + /* vce_v3_0_emit_vm_flush */
4 + /* vce_v3_0_emit_pipeline_sync */
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 06d787385ad4..0748bf44c880 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -1102,7 +1102,7 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.get_rptr = vce_v4_0_ring_get_rptr,
.get_wptr = vce_v4_0_ring_get_wptr,
.set_wptr = vce_v4_0_ring_set_wptr,
- .parse_cs = amdgpu_vce_ring_parse_cs_vm,
+ .patch_cs_in_place = amdgpu_vce_ring_parse_cs_vm,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index a280b9fecb77..ecdfbfefd66a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -45,6 +45,42 @@
#define mmUVD_REG_XX_MASK_1_0 0x05ac
#define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_1_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
+};
+
static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
@@ -90,6 +126,8 @@ static int vcn_v1_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
+ uint32_t *ptr;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCN DEC TRAP */
@@ -161,6 +199,14 @@ static int vcn_v1_0_sw_init(void *handle)
r = jpeg_v1_0_sw_init(handle);
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
return r;
}
@@ -184,6 +230,8 @@ static int vcn_v1_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1877,6 +1925,66 @@ void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
}
+static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_1_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v1_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_1_0[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
@@ -1895,8 +2003,8 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
.set_clockgating_state = vcn_v1_0_set_clockgating_state,
.set_powergating_state = vcn_v1_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v1_0_dump_ip_state,
+ .print_ip_state = vcn_v1_0_print_ip_state,
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index d3d096909a7f..bfd067e2d2f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -53,6 +53,42 @@
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
+};
+
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -96,6 +132,8 @@ static int vcn_v2_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
+ uint32_t *ptr;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
volatile struct amdgpu_fw_shared *fw_shared;
@@ -184,6 +222,15 @@ static int vcn_v2_0_sw_init(void *handle)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(adev->vcn.inst);
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -213,6 +260,8 @@ static int vcn_v2_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1985,6 +2034,66 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
}
+static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v2_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_0[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
@@ -2003,8 +2112,8 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
.set_powergating_state = vcn_v2_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v2_0_dump_ip_state,
+ .print_ip_state = vcn_v2_0_print_ip_state,
};
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 96f60c303161..04e9e806e318 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -55,6 +55,43 @@
#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
+};
+
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
@@ -122,6 +159,8 @@ static int vcn_v2_5_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int i, j, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
+ uint32_t *ptr;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
@@ -241,6 +280,15 @@ static int vcn_v2_5_sw_init(void *handle)
if (r)
return r;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -277,6 +325,8 @@ static int vcn_v2_5_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1876,6 +1926,66 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v2_5_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.name = "vcn_v2_5",
.early_init = vcn_v2_5_early_init,
@@ -1894,8 +2004,8 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v2_5_dump_ip_state,
+ .print_ip_state = vcn_v2_5_print_ip_state,
};
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
@@ -1916,8 +2026,8 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v2_5_dump_ip_state,
+ .print_ip_state = vcn_v2_5_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 24f947751c46..65dd68b32280 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -60,6 +60,42 @@
#define RDECODE_MSG_CREATE 0x00000000
#define RDECODE_MESSAGE_CREATE 0x00000001
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_3_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
+};
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -126,6 +162,8 @@ static int vcn_v3_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int i, j, r;
int vcn_doorbell_index = 0;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
+ uint32_t *ptr;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_vcn_sw_init(adev);
@@ -246,6 +284,15 @@ static int vcn_v3_0_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr == NULL) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -284,6 +331,7 @@ static int vcn_v3_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
return r;
}
@@ -2203,6 +2251,67 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
+ uint32_t inst_off;
+ bool is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v3_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.name = "vcn_v3_0",
.early_init = vcn_v3_0_early_init,
@@ -2221,8 +2330,8 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
.set_powergating_state = vcn_v3_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v3_0_dump_ip_state,
+ .print_ip_state = vcn_v3_0_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 776c539bfdda..26c6f10a8c8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -52,6 +52,42 @@
#define RDECODE_MSG_CREATE 0x00000000
#define RDECODE_MESSAGE_CREATE 0x00000001
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -137,6 +173,8 @@ static int vcn_v4_0_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
+ uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -200,6 +238,15 @@ static int vcn_v4_0_sw_init(void *handle)
if (r)
return r;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -239,6 +286,8 @@ static int vcn_v4_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -2109,6 +2158,67 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v4_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0[j],
+ i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.name = "vcn_v4_0",
.early_init = vcn_v4_0_early_init,
@@ -2127,8 +2237,8 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
.set_powergating_state = vcn_v4_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v4_0_dump_ip_state,
+ .print_ip_state = vcn_v4_0_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 9bae95538b62..0fda70336300 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -45,6 +45,42 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
#define NORMALIZE_VCN_REG_OFFSET(offset) \
(offset & 0x1FFFF)
@@ -92,6 +128,8 @@ static int vcn_v4_0_3_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
int i, r, vcn_inst;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
+ uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -159,6 +197,15 @@ static int vcn_v4_0_3_sw_init(void *handle)
}
}
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -194,6 +241,8 @@ static int vcn_v4_0_3_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1684,6 +1733,68 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
}
+static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v4_0_3_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off, inst_id;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_id = GET_INST(VCN, i);
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, inst_id, regUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_3[j],
+ inst_id));
+ }
+}
+
static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.name = "vcn_v4_0_3",
.early_init = vcn_v4_0_3_early_init,
@@ -1702,8 +1813,8 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
.set_powergating_state = vcn_v4_0_3_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v4_0_3_dump_ip_state,
+ .print_ip_state = vcn_v4_0_3_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 8d75061f9f38..b1fd226b7efb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -52,6 +52,42 @@
#define RDECODE_MSG_CREATE 0x00000000
#define RDECODE_MESSAGE_CREATE 0x00000001
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_5[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -97,6 +133,8 @@ static int vcn_v4_0_5_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
+ uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -168,6 +206,14 @@ static int vcn_v4_0_5_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
return 0;
}
@@ -207,6 +253,8 @@ static int vcn_v4_0_5_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1733,6 +1781,67 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v4_0_5_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_5[j],
+ i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.name = "vcn_v4_0_5",
.early_init = vcn_v4_0_5_early_init,
@@ -1751,8 +1860,8 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
.set_powergating_state = vcn_v4_0_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v4_0_5_dump_ip_state,
+ .print_ip_state = vcn_v4_0_5_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index 68c97fcd539b..c305386358b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -37,6 +37,40 @@
#include <drm/drm_drv.h>
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -83,6 +117,8 @@ static int vcn_v5_0_0_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
+ uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -137,6 +173,14 @@ static int vcn_v5_0_0_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
return 0;
}
@@ -173,6 +217,8 @@ static int vcn_v5_0_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1297,6 +1343,66 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v5_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.name = "vcn_v5_0_0",
.early_init = vcn_v5_0_0_early_init,
@@ -1315,8 +1421,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_v5_0_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v5_0_dump_ip_state,
+ .print_ip_state = vcn_v5_0_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 80ce42aacc0c..b61f6b838ec2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -246,6 +246,7 @@
* 1 - Stream
* 2 - Bypass
*/
+#define EOP_EXEC (1 << 28) /* For Trailing Fence */
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 32e5db509560..9044bdb38cf4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -36,7 +36,6 @@
#include <linux/mman.h>
#include <linux/ptrace.h>
#include <linux/dma-buf.h>
-#include <linux/fdtable.h>
#include <linux/processor.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
@@ -247,14 +246,15 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
q_properties->priority = args->queue_priority;
q_properties->queue_address = args->ring_base_address;
q_properties->queue_size = args->ring_size;
- q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
- q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
+ q_properties->read_ptr = (void __user *)args->read_pointer_address;
+ q_properties->write_ptr = (void __user *)args->write_pointer_address;
q_properties->eop_ring_buffer_address = args->eop_buffer_address;
q_properties->eop_ring_buffer_size = args->eop_buffer_size;
q_properties->ctx_save_restore_area_address =
args->ctx_save_restore_address;
q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
q_properties->ctl_stack_size = args->ctl_stack_size;
+ q_properties->sdma_engine_id = args->sdma_engine_id;
if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
@@ -262,6 +262,8 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
q_properties->type = KFD_QUEUE_TYPE_SDMA;
else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
+ else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID)
+ q_properties->type = KFD_QUEUE_TYPE_SDMA_BY_ENG_ID;
else
return -ENOTSUPP;
@@ -306,7 +308,6 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
struct kfd_process_device *pdd;
struct queue_properties q_properties;
uint32_t doorbell_offset_in_process = 0;
- struct amdgpu_bo *wptr_bo = NULL;
memset(&q_properties, 0, sizeof(struct queue_properties));
@@ -334,6 +335,18 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
+ if (q_properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) {
+ int max_sdma_eng_id = kfd_get_num_sdma_engines(dev) +
+ kfd_get_num_xgmi_sdma_engines(dev) - 1;
+
+ if (q_properties.sdma_engine_id > max_sdma_eng_id) {
+ err = -EINVAL;
+ pr_err("sdma_engine_id %i exceeds maximum id of %i\n",
+ q_properties.sdma_engine_id, max_sdma_eng_id);
+ goto err_sdma_engine_id;
+ }
+ }
+
if (!pdd->qpd.proc_doorbells) {
err = kfd_alloc_process_doorbells(dev->kfd, pdd);
if (err) {
@@ -342,53 +355,17 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
}
}
- /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
- * on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
- */
- if (dev->kfd->shared_resources.enable_mes &&
- ((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
- >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
- struct amdgpu_bo_va_mapping *wptr_mapping;
- struct amdgpu_vm *wptr_vm;
-
- wptr_vm = drm_priv_to_vm(pdd->drm_priv);
- err = amdgpu_bo_reserve(wptr_vm->root.bo, false);
- if (err)
- goto err_wptr_map_gart;
-
- wptr_mapping = amdgpu_vm_bo_lookup_mapping(
- wptr_vm, args->write_pointer_address >> PAGE_SHIFT);
- amdgpu_bo_unreserve(wptr_vm->root.bo);
- if (!wptr_mapping) {
- pr_err("Failed to lookup wptr bo\n");
- err = -EINVAL;
- goto err_wptr_map_gart;
- }
-
- wptr_bo = wptr_mapping->bo_va->base.bo;
- if (wptr_bo->tbo.base.size > PAGE_SIZE) {
- pr_err("Requested GART mapping for wptr bo larger than one page\n");
- err = -EINVAL;
- goto err_wptr_map_gart;
- }
- if (dev->adev != amdgpu_ttm_adev(wptr_bo->tbo.bdev)) {
- pr_err("Queue memory allocated to wrong device\n");
- err = -EINVAL;
- goto err_wptr_map_gart;
- }
-
- err = amdgpu_amdkfd_map_gtt_bo_to_gart(wptr_bo);
- if (err) {
- pr_err("Failed to map wptr bo to GART\n");
- goto err_wptr_map_gart;
- }
+ err = kfd_queue_acquire_buffers(pdd, &q_properties);
+ if (err) {
+ pr_debug("failed to acquire user queue buffers\n");
+ goto err_acquire_queue_buf;
}
pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
p->pasid,
dev->id);
- err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, wptr_bo,
+ err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
NULL, NULL, NULL, &doorbell_offset_in_process);
if (err != 0)
goto err_create_queue;
@@ -422,9 +399,10 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
return 0;
err_create_queue:
- if (wptr_bo)
- amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
-err_wptr_map_gart:
+ kfd_queue_unref_bo_vas(pdd, &q_properties);
+ kfd_queue_release_buffers(pdd, &q_properties);
+err_acquire_queue_buf:
+err_sdma_engine_id:
err_bind_process:
err_pdd:
mutex_unlock(&p->mutex);
@@ -1422,8 +1400,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
if (err) {
- pr_err("Failed to unmap from gpu %d/%d\n",
- i, args->n_devices);
+ pr_debug("Failed to unmap from gpu %d/%d\n", i, args->n_devices);
goto unmap_memory_from_gpu_failed;
}
args->n_success = i+1;
@@ -1857,7 +1834,8 @@ static uint32_t get_process_num_bos(struct kfd_process *p)
}
static int criu_get_prime_handle(struct kgd_mem *mem,
- int flags, u32 *shared_fd)
+ int flags, u32 *shared_fd,
+ struct file **file)
{
struct dma_buf *dmabuf;
int ret;
@@ -1868,13 +1846,14 @@ static int criu_get_prime_handle(struct kgd_mem *mem,
return ret;
}
- ret = dma_buf_fd(dmabuf, flags);
+ ret = get_unused_fd_flags(flags);
if (ret < 0) {
pr_err("dmabuf create fd failed, ret:%d\n", ret);
goto out_free_dmabuf;
}
*shared_fd = ret;
+ *file = dmabuf->file;
return 0;
out_free_dmabuf:
@@ -1882,6 +1861,25 @@ out_free_dmabuf:
return ret;
}
+static void commit_files(struct file **files,
+ struct kfd_criu_bo_bucket *bo_buckets,
+ unsigned int count,
+ int err)
+{
+ while (count--) {
+ struct file *file = files[count];
+
+ if (!file)
+ continue;
+ if (err) {
+ fput(file);
+ put_unused_fd(bo_buckets[count].dmabuf_fd);
+ } else {
+ fd_install(bo_buckets[count].dmabuf_fd, file);
+ }
+ }
+}
+
static int criu_checkpoint_bos(struct kfd_process *p,
uint32_t num_bos,
uint8_t __user *user_bos,
@@ -1890,6 +1888,7 @@ static int criu_checkpoint_bos(struct kfd_process *p,
{
struct kfd_criu_bo_bucket *bo_buckets;
struct kfd_criu_bo_priv_data *bo_privs;
+ struct file **files = NULL;
int ret = 0, pdd_index, bo_index = 0, id;
void *mem;
@@ -1903,6 +1902,12 @@ static int criu_checkpoint_bos(struct kfd_process *p,
goto exit;
}
+ files = kvzalloc(num_bos * sizeof(struct file *), GFP_KERNEL);
+ if (!files) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
struct kfd_process_device *pdd = p->pdds[pdd_index];
struct amdgpu_bo *dumper_bo;
@@ -1945,7 +1950,7 @@ static int criu_checkpoint_bos(struct kfd_process *p,
ret = criu_get_prime_handle(kgd_mem,
bo_bucket->alloc_flags &
KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
- &bo_bucket->dmabuf_fd);
+ &bo_bucket->dmabuf_fd, &files[bo_index]);
if (ret)
goto exit;
} else {
@@ -1963,7 +1968,7 @@ static int criu_checkpoint_bos(struct kfd_process *p,
bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo);
for (i = 0; i < p->n_pdds; i++) {
- if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->dev->adev, kgd_mem))
+ if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->drm_priv, kgd_mem))
bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id;
}
@@ -1996,12 +2001,8 @@ static int criu_checkpoint_bos(struct kfd_process *p,
*priv_offset += num_bos * sizeof(*bo_privs);
exit:
- while (ret && bo_index--) {
- if (bo_buckets[bo_index].alloc_flags
- & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
- close_fd(bo_buckets[bo_index].dmabuf_fd);
- }
-
+ commit_files(files, bo_buckets, bo_index, ret);
+ kvfree(files);
kvfree(bo_buckets);
kvfree(bo_privs);
return ret;
@@ -2353,7 +2354,8 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
static int criu_restore_bo(struct kfd_process *p,
struct kfd_criu_bo_bucket *bo_bucket,
- struct kfd_criu_bo_priv_data *bo_priv)
+ struct kfd_criu_bo_priv_data *bo_priv,
+ struct file **file)
{
struct kfd_process_device *pdd;
struct kgd_mem *kgd_mem;
@@ -2405,7 +2407,7 @@ static int criu_restore_bo(struct kfd_process *p,
if (bo_bucket->alloc_flags
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
ret = criu_get_prime_handle(kgd_mem, DRM_RDWR,
- &bo_bucket->dmabuf_fd);
+ &bo_bucket->dmabuf_fd, file);
if (ret)
return ret;
} else {
@@ -2422,6 +2424,7 @@ static int criu_restore_bos(struct kfd_process *p,
{
struct kfd_criu_bo_bucket *bo_buckets = NULL;
struct kfd_criu_bo_priv_data *bo_privs = NULL;
+ struct file **files = NULL;
int ret = 0;
uint32_t i = 0;
@@ -2435,6 +2438,12 @@ static int criu_restore_bos(struct kfd_process *p,
if (!bo_buckets)
return -ENOMEM;
+ files = kvzalloc(args->num_bos * sizeof(struct file *), GFP_KERNEL);
+ if (!files) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
ret = copy_from_user(bo_buckets, (void __user *)args->bos,
args->num_bos * sizeof(*bo_buckets));
if (ret) {
@@ -2460,7 +2469,7 @@ static int criu_restore_bos(struct kfd_process *p,
/* Create and map new BOs */
for (; i < args->num_bos; i++) {
- ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i]);
+ ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i], &files[i]);
if (ret) {
pr_debug("Failed to restore BO[%d] ret%d\n", i, ret);
goto exit;
@@ -2475,11 +2484,8 @@ static int criu_restore_bos(struct kfd_process *p,
ret = -EFAULT;
exit:
- while (ret && i--) {
- if (bo_buckets[i].alloc_flags
- & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
- close_fd(bo_buckets[i].dmabuf_fd);
- }
+ commit_files(files, bo_buckets, i, ret);
+ kvfree(files);
kvfree(bo_buckets);
kvfree(bo_privs);
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index cd7b81b7b939..48caecf7e72e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1434,7 +1434,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
- pcache_info[0].num_cu_shared = adev->gfx.config.gc_num_tcp_per_wpg / 2;
+ pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_tcp_per_wpg / 2;
+ pcache_info[i].cache_line_size = adev->gfx.config.gc_tcp_cache_line_size;
i++;
}
/* Scalar L1 Instruction Cache per SQC */
@@ -1446,6 +1447,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
+ pcache_info[i].cache_line_size = adev->gfx.config.gc_instruction_cache_line_size;
i++;
}
/* Scalar L1 Data Cache per SQC */
@@ -1456,6 +1458,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
+ pcache_info[i].cache_line_size = adev->gfx.config.gc_scalar_data_cache_line_size;
i++;
}
/* GL1 Data Cache per SA */
@@ -1468,6 +1471,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
+ pcache_info[i].cache_line_size = 0;
i++;
}
/* L2 Data Cache per GPU (Total Tex Cache) */
@@ -1478,6 +1482,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
+ pcache_info[i].cache_line_size = adev->gfx.config.gc_tcc_cache_line_size;
i++;
}
/* L3 Data Cache per GPU */
@@ -1488,6 +1493,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
+ pcache_info[i].cache_line_size = 0;
i++;
}
return i;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
index 34a282540c7e..312dfa84f29f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
@@ -365,47 +365,47 @@ static int kfd_dbg_get_dev_watch_id(struct kfd_process_device *pdd, int *watch_i
*watch_id = KFD_DEBUGGER_INVALID_WATCH_POINT_ID;
- spin_lock(&pdd->dev->kfd->watch_points_lock);
+ spin_lock(&pdd->dev->watch_points_lock);
for (i = 0; i < MAX_WATCH_ADDRESSES; i++) {
/* device watchpoint in use so skip */
- if ((pdd->dev->kfd->alloc_watch_ids >> i) & 0x1)
+ if ((pdd->dev->alloc_watch_ids >> i) & 0x1)
continue;
pdd->alloc_watch_ids |= 0x1 << i;
- pdd->dev->kfd->alloc_watch_ids |= 0x1 << i;
+ pdd->dev->alloc_watch_ids |= 0x1 << i;
*watch_id = i;
- spin_unlock(&pdd->dev->kfd->watch_points_lock);
+ spin_unlock(&pdd->dev->watch_points_lock);
return 0;
}
- spin_unlock(&pdd->dev->kfd->watch_points_lock);
+ spin_unlock(&pdd->dev->watch_points_lock);
return -ENOMEM;
}
static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
{
- spin_lock(&pdd->dev->kfd->watch_points_lock);
+ spin_lock(&pdd->dev->watch_points_lock);
/* process owns device watch point so safe to clear */
if ((pdd->alloc_watch_ids >> watch_id) & 0x1) {
pdd->alloc_watch_ids &= ~(0x1 << watch_id);
- pdd->dev->kfd->alloc_watch_ids &= ~(0x1 << watch_id);
+ pdd->dev->alloc_watch_ids &= ~(0x1 << watch_id);
}
- spin_unlock(&pdd->dev->kfd->watch_points_lock);
+ spin_unlock(&pdd->dev->watch_points_lock);
}
static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
{
bool owns_watch_id = false;
- spin_lock(&pdd->dev->kfd->watch_points_lock);
+ spin_lock(&pdd->dev->watch_points_lock);
owns_watch_id = watch_id < MAX_WATCH_ADDRESSES &&
((pdd->alloc_watch_ids >> watch_id) & 0x1);
- spin_unlock(&pdd->dev->kfd->watch_points_lock);
+ spin_unlock(&pdd->dev->watch_points_lock);
return owns_watch_id;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index f4d20adaa068..fad1c8f2bc83 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -884,13 +884,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
dev_err(kfd_device, "Error initializing KFD node\n");
goto node_init_error;
}
+
+ spin_lock_init(&node->watch_points_lock);
+
kfd->nodes[i] = node;
}
svm_range_set_max_pages(kfd->adev);
- spin_lock_init(&kfd->watch_points_lock);
-
kfd->init_complete = true;
dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
kfd->adev->pdev->device);
@@ -907,7 +908,7 @@ node_alloc_error:
kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
- amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
alloc_gtt_mem_failure:
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
@@ -925,7 +926,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfd_doorbell_fini(kfd);
ida_destroy(&kfd->doorbell_ida);
kfd_gtt_sa_fini(kfd);
- amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
}
kfree(kfd);
@@ -1445,6 +1446,45 @@ void kgd2kfd_unlock_kfd(void)
mutex_unlock(&kfd_processes_mutex);
}
+int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
+{
+ struct kfd_node *node;
+ int ret;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ if (node_id >= kfd->num_nodes) {
+ dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
+ node_id, kfd->num_nodes - 1);
+ return -EINVAL;
+ }
+ node = kfd->nodes[node_id];
+
+ ret = node->dqm->ops.unhalt(node->dqm);
+ if (ret)
+ dev_err(kfd_device, "Error in starting scheduler\n");
+
+ return ret;
+}
+
+int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
+{
+ struct kfd_node *node;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ if (node_id >= kfd->num_nodes) {
+ dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
+ node_id, kfd->num_nodes - 1);
+ return -EINVAL;
+ }
+
+ node = kfd->nodes[node_id];
+ return node->dqm->ops.halt(node->dqm);
+}
+
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 4f48507418d2..71b465f8d83e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -153,6 +153,20 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
static void kfd_hws_hang(struct device_queue_manager *dqm)
{
+ struct device_process_node *cur;
+ struct qcm_process_device *qpd;
+ struct queue *q;
+
+ /* Mark all device queues as reset. */
+ list_for_each_entry(cur, &dqm->queues, list) {
+ qpd = cur->qpd;
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
+
+ pdd->has_reset_queue = true;
+ }
+ }
+
/*
* Issue a GPU reset if HWS is unresponsive
*/
@@ -208,10 +222,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
queue_input.mqd_addr = q->gart_mqd_addr;
queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
- if (q->wptr_bo) {
- wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
- queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->wptr_bo) + wptr_addr_off;
- }
+ wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
+ queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->properties.wptr_bo) + wptr_addr_off;
queue_input.is_kfd_process = 1;
queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
@@ -307,6 +319,46 @@ static int remove_all_queues_mes(struct device_queue_manager *dqm)
return retval;
}
+static int suspend_all_queues_mes(struct device_queue_manager *dqm)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
+ int r = 0;
+
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -EIO;
+
+ r = amdgpu_mes_suspend(adev);
+ up_read(&adev->reset_domain->sem);
+
+ if (r) {
+ dev_err(adev->dev, "failed to suspend gangs from MES\n");
+ dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
+ kfd_hws_hang(dqm);
+ }
+
+ return r;
+}
+
+static int resume_all_queues_mes(struct device_queue_manager *dqm)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
+ int r = 0;
+
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -EIO;
+
+ r = amdgpu_mes_resume(adev);
+ up_read(&adev->reset_domain->sem);
+
+ if (r) {
+ dev_err(adev->dev, "failed to resume gangs from MES\n");
+ dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
+ kfd_hws_hang(dqm);
+ }
+
+ return r;
+}
+
static void increment_queue_count(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
@@ -880,6 +932,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
else if (prev_active)
retval = remove_queue_mes(dqm, q, &pdd->qpd);
+ /* queue is reset so inaccessable */
+ if (pdd->has_reset_queue) {
+ retval = -EACCES;
+ goto out_unlock;
+ }
+
if (retval) {
dev_err(dev, "unmap queue failed\n");
goto out_unlock;
@@ -1534,6 +1592,41 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
q->properties.sdma_queue_id = q->sdma_id /
kfd_get_num_xgmi_sdma_engines(dqm->dev);
+ } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) {
+ int i, num_queues, num_engines, eng_offset = 0, start_engine;
+ bool free_bit_found = false, is_xgmi = false;
+
+ if (q->properties.sdma_engine_id < kfd_get_num_sdma_engines(dqm->dev)) {
+ num_queues = get_num_sdma_queues(dqm);
+ num_engines = kfd_get_num_sdma_engines(dqm->dev);
+ q->properties.type = KFD_QUEUE_TYPE_SDMA;
+ } else {
+ num_queues = get_num_xgmi_sdma_queues(dqm);
+ num_engines = kfd_get_num_xgmi_sdma_engines(dqm->dev);
+ eng_offset = kfd_get_num_sdma_engines(dqm->dev);
+ q->properties.type = KFD_QUEUE_TYPE_SDMA_XGMI;
+ is_xgmi = true;
+ }
+
+ /* Scan available bit based on target engine ID. */
+ start_engine = q->properties.sdma_engine_id - eng_offset;
+ for (i = start_engine; i < num_queues; i += num_engines) {
+
+ if (!test_bit(i, is_xgmi ? dqm->xgmi_sdma_bitmap : dqm->sdma_bitmap))
+ continue;
+
+ clear_bit(i, is_xgmi ? dqm->xgmi_sdma_bitmap : dqm->sdma_bitmap);
+ q->sdma_id = i;
+ q->properties.sdma_queue_id = q->sdma_id / num_engines;
+ free_bit_found = true;
+ break;
+ }
+
+ if (!free_bit_found) {
+ dev_err(dev, "No more SDMA queue to allocate for target ID %i\n",
+ q->properties.sdma_engine_id);
+ return -ENOMEM;
+ }
}
pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
@@ -1626,10 +1719,64 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
return 0;
}
+/* halt_cpsch:
+ * Unmap queues so the schedule doesn't continue remaining jobs in the queue.
+ * Then set dqm->sched_halt so queues don't map to runlist until unhalt_cpsch
+ * is called.
+ */
+static int halt_cpsch(struct device_queue_manager *dqm)
+{
+ int ret = 0;
+
+ dqm_lock(dqm);
+ if (!dqm->sched_running) {
+ dqm_unlock(dqm);
+ return 0;
+ }
+
+ WARN_ONCE(dqm->sched_halt, "Scheduling is already on halt\n");
+
+ if (!dqm->is_hws_hang) {
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
+ ret = unmap_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD, false);
+ else
+ ret = remove_all_queues_mes(dqm);
+ }
+ dqm->sched_halt = true;
+ dqm_unlock(dqm);
+
+ return ret;
+}
+
+/* unhalt_cpsch
+ * Unset dqm->sched_halt and map queues back to runlist
+ */
+static int unhalt_cpsch(struct device_queue_manager *dqm)
+{
+ int ret = 0;
+
+ dqm_lock(dqm);
+ if (!dqm->sched_running || !dqm->sched_halt) {
+ WARN_ONCE(!dqm->sched_halt, "Scheduling is not on halt.\n");
+ dqm_unlock(dqm);
+ return 0;
+ }
+ dqm->sched_halt = false;
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
+ ret = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
+ 0, USE_DEFAULT_GRACE_PERIOD);
+ dqm_unlock(dqm);
+
+ return ret;
+}
+
static int start_cpsch(struct device_queue_manager *dqm)
{
struct device *dev = dqm->dev->adev->dev;
- int retval;
+ int retval, num_hw_queue_slots;
retval = 0;
@@ -1682,9 +1829,24 @@ static int start_cpsch(struct device_queue_manager *dqm)
&dqm->wait_times);
}
+ /* setup per-queue reset detection buffer */
+ num_hw_queue_slots = dqm->dev->kfd->shared_resources.num_queue_per_pipe *
+ dqm->dev->kfd->shared_resources.num_pipe_per_mec *
+ NUM_XCC(dqm->dev->xcc_mask);
+
+ dqm->detect_hang_info_size = num_hw_queue_slots * sizeof(struct dqm_detect_hang_info);
+ dqm->detect_hang_info = kzalloc(dqm->detect_hang_info_size, GFP_KERNEL);
+
+ if (!dqm->detect_hang_info) {
+ retval = -ENOMEM;
+ goto fail_detect_hang_buffer;
+ }
+
dqm_unlock(dqm);
return 0;
+fail_detect_hang_buffer:
+ kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
fail_allocate_vidmem:
fail_set_sched_resources:
if (!dqm->dev->kfd->shared_resources.enable_mes)
@@ -1715,6 +1877,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
if (!dqm->dev->kfd->shared_resources.enable_mes)
pm_uninit(&dqm->packet_mgr);
+ kfree(dqm->detect_hang_info);
+ dqm->detect_hang_info = NULL;
dqm_unlock(dqm);
return 0;
@@ -1786,7 +1950,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
- q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) {
dqm_lock(dqm);
retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
dqm_unlock(dqm);
@@ -1913,7 +2078,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
struct device *dev = dqm->dev->adev->dev;
int retval;
- if (!dqm->sched_running)
+ if (!dqm->sched_running || dqm->sched_halt)
return 0;
if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
return 0;
@@ -1931,6 +2096,135 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
return retval;
}
+static void set_queue_as_reset(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd)
+{
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
+
+ dev_err(dqm->dev->adev->dev, "queue id 0x%0x at pasid 0x%0x is reset\n",
+ q->properties.queue_id, q->process->pasid);
+
+ pdd->has_reset_queue = true;
+ if (q->properties.is_active) {
+ q->properties.is_active = false;
+ decrement_queue_count(dqm, qpd, q);
+ }
+}
+
+static int detect_queue_hang(struct device_queue_manager *dqm)
+{
+ int i;
+
+ /* detect should be used only in dqm locked queue reset */
+ if (WARN_ON(dqm->detect_hang_count > 0))
+ return 0;
+
+ memset(dqm->detect_hang_info, 0, dqm->detect_hang_info_size);
+
+ for (i = 0; i < AMDGPU_MAX_QUEUES; ++i) {
+ uint32_t mec, pipe, queue;
+ int xcc_id;
+
+ mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe)
+ / dqm->dev->kfd->shared_resources.num_pipe_per_mec;
+
+ if (mec || !test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap))
+ continue;
+
+ amdgpu_queue_mask_bit_to_mec_queue(dqm->dev->adev, i, &mec, &pipe, &queue);
+
+ for_each_inst(xcc_id, dqm->dev->xcc_mask) {
+ uint64_t queue_addr = dqm->dev->kfd2kgd->hqd_get_pq_addr(
+ dqm->dev->adev, pipe, queue, xcc_id);
+ struct dqm_detect_hang_info hang_info;
+
+ if (!queue_addr)
+ continue;
+
+ hang_info.pipe_id = pipe;
+ hang_info.queue_id = queue;
+ hang_info.xcc_id = xcc_id;
+ hang_info.queue_address = queue_addr;
+
+ dqm->detect_hang_info[dqm->detect_hang_count] = hang_info;
+ dqm->detect_hang_count++;
+ }
+ }
+
+ return dqm->detect_hang_count;
+}
+
+static struct queue *find_queue_by_address(struct device_queue_manager *dqm, uint64_t queue_address)
+{
+ struct device_process_node *cur;
+ struct qcm_process_device *qpd;
+ struct queue *q;
+
+ list_for_each_entry(cur, &dqm->queues, list) {
+ qpd = cur->qpd;
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (queue_address == q->properties.queue_address)
+ return q;
+ }
+ }
+
+ return NULL;
+}
+
+/* only for compute queue */
+static int reset_queues_on_hws_hang(struct device_queue_manager *dqm)
+{
+ int r = 0, reset_count = 0, i;
+
+ if (!dqm->detect_hang_info || dqm->is_hws_hang)
+ return -EIO;
+
+ /* assume dqm locked. */
+ if (!detect_queue_hang(dqm))
+ return -ENOTRECOVERABLE;
+
+ for (i = 0; i < dqm->detect_hang_count; i++) {
+ struct dqm_detect_hang_info hang_info = dqm->detect_hang_info[i];
+ struct queue *q = find_queue_by_address(dqm, hang_info.queue_address);
+ struct kfd_process_device *pdd;
+ uint64_t queue_addr = 0;
+
+ if (!q) {
+ r = -ENOTRECOVERABLE;
+ goto reset_fail;
+ }
+
+ pdd = kfd_get_process_device_data(dqm->dev, q->process);
+ if (!pdd) {
+ r = -ENOTRECOVERABLE;
+ goto reset_fail;
+ }
+
+ queue_addr = dqm->dev->kfd2kgd->hqd_reset(dqm->dev->adev,
+ hang_info.pipe_id, hang_info.queue_id, hang_info.xcc_id,
+ KFD_UNMAP_LATENCY_MS);
+
+ /* either reset failed or we reset an unexpected queue. */
+ if (queue_addr != q->properties.queue_address) {
+ r = -ENOTRECOVERABLE;
+ goto reset_fail;
+ }
+
+ set_queue_as_reset(dqm, q, &pdd->qpd);
+ reset_count++;
+ }
+
+ if (reset_count == dqm->detect_hang_count)
+ kfd_signal_reset_event(dqm->dev);
+ else
+ r = -ENOTRECOVERABLE;
+
+reset_fail:
+ dqm->detect_hang_count = 0;
+
+ return r;
+}
+
/* dqm->lock mutex has to be locked before calling this function */
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
@@ -1981,11 +2275,14 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
*/
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
- while (halt_if_hws_hang)
- schedule();
- kfd_hws_hang(dqm);
- retval = -ETIME;
- goto out;
+ if (reset_queues_on_hws_hang(dqm)) {
+ while (halt_if_hws_hang)
+ schedule();
+ dqm->is_hws_hang = true;
+ kfd_hws_hang(dqm);
+ retval = -ETIME;
+ goto out;
+ }
}
/* We need to reset the grace period value for this device */
@@ -2004,8 +2301,7 @@ out:
}
/* only for compute queue */
-static int reset_queues_cpsch(struct device_queue_manager *dqm,
- uint16_t pasid)
+static int reset_queues_cpsch(struct device_queue_manager *dqm, uint16_t pasid)
{
int retval;
@@ -2111,10 +2407,9 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
pdd->sdma_past_activity_counter += sdma_val;
}
- list_del(&q->list);
- qpd->queue_count--;
if (q->properties.is_active) {
decrement_queue_count(dqm, qpd, q);
+ q->properties.is_active = false;
if (!dqm->dev->kfd->shared_resources.enable_mes) {
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
@@ -2125,6 +2420,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
retval = remove_queue_mes(dqm, q, qpd);
}
}
+ list_del(&q->list);
+ qpd->queue_count--;
/*
* Unconditionally decrement this counter, regardless of the queue's
@@ -2525,6 +2822,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
dqm->ops.initialize = initialize_cpsch;
dqm->ops.start = start_cpsch;
dqm->ops.stop = stop_cpsch;
+ dqm->ops.halt = halt_cpsch;
+ dqm->ops.unhalt = unhalt_cpsch;
dqm->ops.destroy_queue = destroy_queue_cpsch;
dqm->ops.update_queue = update_queue;
dqm->ops.register_process = register_process;
@@ -2621,7 +2920,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
{
WARN(!mqd, "No hiq sdma mqd trunk to free");
- amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
}
void device_queue_manager_uninit(struct device_queue_manager *dqm)
@@ -2633,6 +2932,95 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
kfree(dqm);
}
+int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct device_queue_manager *dqm = knode->dqm;
+ struct device *dev = dqm->dev->adev->dev;
+ struct qcm_process_device *qpd;
+ struct queue *q = NULL;
+ int ret = 0;
+
+ if (!p)
+ return -EINVAL;
+
+ dqm_lock(dqm);
+
+ pdd = kfd_get_process_device_data(dqm->dev, p);
+ if (pdd) {
+ qpd = &pdd->qpd;
+
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (q->doorbell_id == doorbell_id && q->properties.is_active) {
+ ret = suspend_all_queues_mes(dqm);
+ if (ret) {
+ dev_err(dev, "Suspending all queues failed");
+ goto out;
+ }
+
+ q->properties.is_evicted = true;
+ q->properties.is_active = false;
+ decrement_queue_count(dqm, qpd, q);
+
+ ret = remove_queue_mes(dqm, q, qpd);
+ if (ret) {
+ dev_err(dev, "Removing bad queue failed");
+ goto out;
+ }
+
+ ret = resume_all_queues_mes(dqm);
+ if (ret)
+ dev_err(dev, "Resuming all queues failed");
+
+ break;
+ }
+ }
+ }
+
+out:
+ dqm_unlock(dqm);
+ return ret;
+}
+
+static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct device *dev = dqm->dev->adev->dev;
+ int ret = 0;
+
+ /* Check if process is already evicted */
+ dqm_lock(dqm);
+ if (qpd->evicted) {
+ /* Increment the evicted count to make sure the
+ * process stays evicted before its terminated.
+ */
+ qpd->evicted++;
+ dqm_unlock(dqm);
+ goto out;
+ }
+ dqm_unlock(dqm);
+
+ ret = suspend_all_queues_mes(dqm);
+ if (ret) {
+ dev_err(dev, "Suspending all queues failed");
+ goto out;
+ }
+
+ ret = dqm->ops.evict_process_queues(dqm, qpd);
+ if (ret) {
+ dev_err(dev, "Evicting process queues failed");
+ goto out;
+ }
+
+ ret = resume_all_queues_mes(dqm);
+ if (ret)
+ dev_err(dev, "Resuming all queues failed");
+
+out:
+ return ret;
+}
+
int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
{
struct kfd_process_device *pdd;
@@ -2643,8 +3031,13 @@ int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
return -EINVAL;
WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
pdd = kfd_get_process_device_data(dqm->dev, p);
- if (pdd)
- ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
+ if (pdd) {
+ if (dqm->dev->kfd->shared_resources.enable_mes)
+ ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd);
+ else
+ ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
+ }
+
kfd_unref_process(p);
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 3b9b8eabaacc..08b40826ad1e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -106,6 +106,12 @@ union GRBM_GFX_INDEX_BITS {
* @uninitialize: Destroys all the device queue manager resources allocated in
* initialize routine.
*
+ * @halt: This routine unmaps queues from runlist and set halt status to true
+ * so no more queues will be mapped to runlist until unhalt.
+ *
+ * @unhalt: This routine unset halt status to flase and maps queues back to
+ * runlist.
+ *
* @create_kernel_queue: Creates kernel queue. Used for debug queue.
*
* @destroy_kernel_queue: Destroys kernel queue. Used for debug queue.
@@ -153,6 +159,8 @@ struct device_queue_manager_ops {
int (*start)(struct device_queue_manager *dqm);
int (*stop)(struct device_queue_manager *dqm);
void (*uninitialize)(struct device_queue_manager *dqm);
+ int (*halt)(struct device_queue_manager *dqm);
+ int (*unhalt)(struct device_queue_manager *dqm);
int (*create_kernel_queue)(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd);
@@ -210,6 +218,13 @@ struct device_queue_manager_asic_ops {
struct kfd_node *dev);
};
+struct dqm_detect_hang_info {
+ int pipe_id;
+ int queue_id;
+ int xcc_id;
+ uint64_t queue_address;
+};
+
/**
* struct device_queue_manager
*
@@ -257,6 +272,7 @@ struct device_queue_manager {
struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
bool sched_running;
+ bool sched_halt;
/* used for GFX 9.4.3 only */
uint32_t current_logical_xcc_start;
@@ -264,6 +280,11 @@ struct device_queue_manager {
uint32_t wait_times;
wait_queue_head_t destroy_wait;
+
+ /* for per-queue reset support */
+ struct dqm_detect_hang_info *detect_hang_info;
+ size_t detect_hang_info_size;
+ int detect_hang_count;
};
void device_queue_manager_init_cik(
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 9b33d9d2c9ad..ea3792249209 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -31,6 +31,7 @@
#include <linux/memory.h>
#include "kfd_priv.h"
#include "kfd_events.h"
+#include "kfd_device_queue_manager.h"
#include <linux/device.h>
/*
@@ -1244,12 +1245,33 @@ void kfd_signal_reset_event(struct kfd_node *dev)
idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
int user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
+ struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p);
if (unlikely(user_gpu_id == -EINVAL)) {
WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
continue;
}
+ if (unlikely(!pdd)) {
+ WARN_ONCE(1, "Could not get device data from pasid:0x%x\n", p->pasid);
+ continue;
+ }
+
+ if (dev->dqm->detect_hang_count && !pdd->has_reset_queue)
+ continue;
+
+ if (dev->dqm->detect_hang_count) {
+ struct amdgpu_task_info *ti;
+
+ ti = amdgpu_vm_get_task_info_pasid(dev->adev, p->pasid);
+ if (ti) {
+ dev_err(dev->adev->dev,
+ "Queues reset on process %s tid %d thread %s pid %d\n",
+ ti->process_name, ti->tgid, ti->task_name, ti->pid);
+ amdgpu_vm_put_task_info(ti);
+ }
+ }
+
rcu_read_lock();
id = KFD_FIRST_NONSIGNAL_EVENT_ID;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
index 8e0d0356e810..bb8cbfc39b90 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -129,63 +129,6 @@ enum SQ_INTERRUPT_ERROR_TYPE {
KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \
>> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT)
-static void event_interrupt_poison_consumption(struct kfd_node *dev,
- uint16_t pasid, uint16_t client_id)
-{
- enum amdgpu_ras_block block = 0;
- int old_poison, ret = -EINVAL;
- uint32_t reset = 0;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
-
- if (!p)
- return;
-
- /* all queues of a process will be unmapped in one time */
- old_poison = atomic_cmpxchg(&p->poison, 0, 1);
- kfd_unref_process(p);
- if (old_poison)
- return;
-
- switch (client_id) {
- case SOC15_IH_CLIENTID_SE0SH:
- case SOC15_IH_CLIENTID_SE1SH:
- case SOC15_IH_CLIENTID_SE2SH:
- case SOC15_IH_CLIENTID_SE3SH:
- case SOC15_IH_CLIENTID_UTCL2:
- ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
- block = AMDGPU_RAS_BLOCK__GFX;
- if (ret)
- reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
- break;
- case SOC15_IH_CLIENTID_SDMA0:
- case SOC15_IH_CLIENTID_SDMA1:
- case SOC15_IH_CLIENTID_SDMA2:
- case SOC15_IH_CLIENTID_SDMA3:
- case SOC15_IH_CLIENTID_SDMA4:
- block = AMDGPU_RAS_BLOCK__SDMA;
- reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
- break;
- default:
- break;
- }
-
- kfd_signal_poison_consumed_event(dev, pasid);
-
- /* resetting queue passes, do page retirement without gpu reset
- * resetting queue fails, fallback to gpu reset solution
- */
- if (!ret)
- dev_warn(dev->adev->dev,
- "RAS poison consumption, unmap queue flow succeeded: client id %d\n",
- client_id);
- else
- dev_warn(dev->adev->dev,
- "RAS poison consumption, fall back to gpu reset flow: client id %d\n",
- client_id);
-
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset);
-}
-
static bool event_interrupt_isr_v10(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
@@ -332,11 +275,6 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
WGP_ID),
sq_intr_err_type);
- if (sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
- sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
- event_interrupt_poison_consumption(dev, pasid, source_id);
- return;
- }
break;
default:
break;
@@ -362,9 +300,6 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
client_id == SOC15_IH_CLIENTID_SDMA7) {
if (source_id == SOC15_INTSRC_SDMA_TRAP) {
kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
- } else if (source_id == SOC15_INTSRC_SDMA_ECC) {
- event_interrupt_poison_consumption(dev, pasid, source_id);
- return;
}
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_VMC1 ||
@@ -388,12 +323,6 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC)
hub_inst = node_id / 4;
- if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev,
- hub_inst, vmid_type)) {
- event_interrupt_poison_consumption(dev, pasid, client_id);
- return;
- }
-
info.vmid = vmid;
info.mc_id = client_id;
info.page_addr = ih_ring_entry[4] |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index f524a55eee11..b3f988b275a8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -330,11 +330,14 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
kfd_signal_event_interrupt(pasid, context_id0, 32);
else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
- KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)))
- kfd_set_dbg_ev_from_interrupt(dev, pasid,
- KFD_CTXID0_DOORBELL_ID(context_id0),
+ KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0))) {
+ u32 doorbell_id = KFD_CTXID0_DOORBELL_ID(context_id0);
+
+ kfd_set_dbg_ev_from_interrupt(dev, pasid, doorbell_id,
KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)),
NULL, 0);
+ kfd_dqm_suspend_bad_queue_mes(dev, pasid, doorbell_id);
+ }
/* SDMA */
else if (source_id == SOC21_INTSRC_SDMA_TRAP)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index a9c3580be8c9..d46a13156ee9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -167,11 +167,23 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SE3SH:
case SOC15_IH_CLIENTID_UTCL2:
block = AMDGPU_RAS_BLOCK__GFX;
- if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
- reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
- else
+ if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
+ /* driver mode-2 for gfx poison is only supported by
+ * pmfw 0x00557300 and onwards */
+ if (dev->adev->pm.fw_version < 0x00557300)
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ else
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ } else if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
+ /* driver mode-2 for gfx poison is only supported by
+ * pmfw 0x05550C00 and onwards */
+ if (dev->adev->pm.fw_version < 0x05550C00)
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ else
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ } else {
reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ }
break;
case SOC15_IH_CLIENTID_VMC:
case SOC15_IH_CLIENTID_VMC1:
@@ -184,11 +196,23 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
block = AMDGPU_RAS_BLOCK__SDMA;
- if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
- reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
- else
+ if (amdgpu_ip_version(dev->adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2)) {
+ /* driver mode-2 for gfx poison is only supported by
+ * pmfw 0x00557300 and onwards */
+ if (dev->adev->pm.fw_version < 0x00557300)
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ else
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ } else if (amdgpu_ip_version(dev->adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5)) {
+ /* driver mode-2 for gfx poison is only supported by
+ * pmfw 0x05550C00 and onwards */
+ if (dev->adev->pm.fw_version < 0x05550C00)
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ else
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ } else {
reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ }
break;
default:
dev_warn(dev->adev->dev,
@@ -431,25 +455,9 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
- uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
- uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry);
- int hub_inst = 0;
struct kfd_hsa_memory_exception_data exception_data;
- /* gfxhub */
- if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) {
- hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
- node_id);
- if (hub_inst < 0)
- hub_inst = 0;
- }
-
- /* mmhub */
- if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC)
- hub_inst = node_id / 4;
-
- if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev,
- hub_inst, vmid_type)) {
+ if (source_id == SOC15_INTSRC_VMC_UTCL2_POISON) {
event_interrupt_poison_consumption_v9(dev, pasid, client_id);
return;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 50a81da43ce1..d9ae854b6908 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -225,7 +225,7 @@ void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
if (mqd_mem_obj->gtt_mem) {
- amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, mqd_mem_obj->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, &mqd_mem_obj->gtt_mem);
kfree(mqd_mem_obj);
} else {
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 66c73825c0a0..84e8ea3a8a0c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -321,8 +321,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
{
struct v9_mqd *m = (struct v9_mqd *)mqd;
+ uint32_t doorbell_id = m->queue_doorbell_id0;
- return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
+ m->queue_doorbell_id0 = 0;
+
+ return kfd_check_hiq_mqd_doorbell_id(mm->dev, doorbell_id, 0);
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
@@ -624,6 +627,7 @@ static bool check_preemption_failed_v9_4_3(struct mqd_manager *mm, void *mqd)
m = get_mqd(mqd + hiq_mqd_size * inst);
ret |= kfd_check_hiq_mqd_doorbell_id(mm->dev,
m->queue_doorbell_id0, inst);
+ m->queue_doorbell_id0 = 0;
++inst;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 00776f08351c..1f9f5bfeaf86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -37,11 +37,14 @@ static int pm_map_process_v9(struct packet_manager *pm,
struct kfd_node *kfd = pm->dqm->dev;
struct kfd_process_device *pdd =
container_of(qpd, struct kfd_process_device, qpd);
+ struct amdgpu_device *adev = kfd->adev;
packet = (struct pm4_mes_map_process *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process));
+ if (adev->enforce_isolation[kfd->node_id])
+ packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
packet->bitfields2.pasid = qpd->pqm->process->pasid;
@@ -89,14 +92,18 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
struct pm4_mes_map_process_aldebaran *packet;
uint64_t vm_page_table_base_addr = qpd->page_table_base;
struct kfd_dev *kfd = pm->dqm->dev->kfd;
+ struct kfd_node *knode = pm->dqm->dev;
struct kfd_process_device *pdd =
container_of(qpd, struct kfd_process_device, qpd);
int i;
+ struct amdgpu_device *adev = kfd->adev;
packet = (struct pm4_mes_map_process_aldebaran *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process_aldebaran));
+ if (adev->enforce_isolation[knode->node_id])
+ packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
packet->bitfields2.pasid = qpd->pqm->process->pasid;
@@ -144,17 +151,22 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
int concurrent_proc_cnt = 0;
struct kfd_node *kfd = pm->dqm->dev;
+ struct amdgpu_device *adev = kfd->adev;
/* Determine the number of processes to map together to HW:
* it can not exceed the number of VMIDs available to the
* scheduler, and it is determined by the smaller of the number
* of processes in the runlist and kfd module parameter
* hws_max_conc_proc.
+ * However, if enforce_isolation is set (toggle LDS/VGPRs/SGPRs
+ * cleaner between process switch), enable single-process mode
+ * in HWS.
* Note: the arbitration between the number of VMIDs and
* hws_max_conc_proc has been done in
* kgd2kfd_device_init().
*/
- concurrent_proc_cnt = min(pm->dqm->processes_count,
+ concurrent_proc_cnt = adev->enforce_isolation[kfd->node_id] ?
+ 1 : min(pm->dqm->processes_count,
kfd->max_proc_per_quantum);
packet = (struct pm4_mes_runlist *)buffer;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
index 8b6b2bd5c148..cd8611401a66 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
@@ -145,8 +145,9 @@ struct pm4_mes_map_process {
union {
struct {
- uint32_t pasid:16;
- uint32_t reserved1:2;
+ uint32_t pasid:16; /* 0 - 15 */
+ uint32_t reserved1:1; /* 16 */
+ uint32_t exec_cleaner_shader:1; /* 17 */
uint32_t debug_vmid:4;
uint32_t new_debug:1;
uint32_t reserved2:1;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h
index 38f5cb6a222a..e0ed62c4ade0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h
@@ -37,7 +37,7 @@ struct pm4_mes_map_process_aldebaran {
struct {
uint32_t pasid:16; /* 0 - 15 */
uint32_t single_memops:1; /* 16 */
- uint32_t reserved1:1; /* 17 */
+ uint32_t exec_cleaner_shader:1; /* 17 */
uint32_t debug_vmid:4; /* 18 - 21 */
uint32_t new_debug:1; /* 22 */
uint32_t tmz:1; /* 23 */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 2b3ec92981e8..d6530febabad 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -310,6 +310,10 @@ struct kfd_node {
struct kfd_local_mem_info local_mem_info;
struct kfd_dev *kfd;
+
+ /* Track per device allocated watch points */
+ uint32_t alloc_watch_ids;
+ spinlock_t watch_points_lock;
};
struct kfd_dev {
@@ -362,10 +366,6 @@ struct kfd_dev {
struct kfd_node *nodes[MAX_KFD_NODES];
unsigned int num_nodes;
- /* Track per device allocated watch points */
- uint32_t alloc_watch_ids;
- spinlock_t watch_points_lock;
-
/* Kernel doorbells for KFD device */
struct amdgpu_bo *doorbells;
@@ -414,13 +414,16 @@ enum kfd_unmap_queues_filter {
* @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
*
* @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface.
+ *
+ * @KFD_QUEUE_TYPE_SDMA_BY_ENG_ID: SDMA user mode queue with target SDMA engine ID.
*/
enum kfd_queue_type {
KFD_QUEUE_TYPE_COMPUTE,
KFD_QUEUE_TYPE_SDMA,
KFD_QUEUE_TYPE_HIQ,
KFD_QUEUE_TYPE_DIQ,
- KFD_QUEUE_TYPE_SDMA_XGMI
+ KFD_QUEUE_TYPE_SDMA_XGMI,
+ KFD_QUEUE_TYPE_SDMA_BY_ENG_ID
};
enum kfd_queue_format {
@@ -494,8 +497,8 @@ struct queue_properties {
uint64_t queue_size;
uint32_t priority;
uint32_t queue_percent;
- uint32_t *read_ptr;
- uint32_t *write_ptr;
+ void __user *read_ptr;
+ void __user *write_ptr;
void __iomem *doorbell_ptr;
uint32_t doorbell_off;
bool is_interop;
@@ -522,6 +525,12 @@ struct queue_properties {
uint64_t tba_addr;
uint64_t tma_addr;
uint64_t exception_status;
+
+ struct amdgpu_bo *wptr_bo;
+ struct amdgpu_bo *rptr_bo;
+ struct amdgpu_bo *ring_bo;
+ struct amdgpu_bo *eop_buf_bo;
+ struct amdgpu_bo *cwsr_bo;
};
#define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \
@@ -604,7 +613,7 @@ struct queue {
uint64_t gang_ctx_gpu_addr;
void *gang_ctx_cpu_ptr;
- struct amdgpu_bo *wptr_bo;
+ struct amdgpu_bo *wptr_bo_gart;
};
enum KFD_MQD_TYPE {
@@ -837,6 +846,9 @@ struct kfd_process_device {
void *proc_ctx_bo;
uint64_t proc_ctx_gpu_addr;
void *proc_ctx_cpu_ptr;
+
+ /* Tracks queue reset status */
+ bool has_reset_queue;
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -854,6 +866,14 @@ struct svm_range_list {
struct delayed_work restore_work;
DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
struct task_struct *faulting_task;
+ /* check point ts decides if page fault recovery need be dropped */
+ uint64_t checkpoint_ts[MAX_GPU_INSTANCE];
+
+ /* Default granularity to use in buffer migration
+ * and restoration of backing memory while handling
+ * recoverable page faults
+ */
+ uint8_t default_granularity;
};
/* Process data */
@@ -1284,6 +1304,15 @@ int init_queue(struct queue **q, const struct queue_properties *properties);
void uninit_queue(struct queue *q);
void print_queue_properties(struct queue_properties *q);
void print_queue(struct queue *q);
+int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user *addr, struct amdgpu_bo **pbo,
+ u64 expected_size);
+void kfd_queue_buffer_put(struct amdgpu_bo **bo);
+int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties);
+int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_properties *properties);
+void kfd_queue_unref_bo_va(struct amdgpu_vm *vm, struct amdgpu_bo **bo);
+int kfd_queue_unref_bo_vas(struct kfd_process_device *pdd,
+ struct queue_properties *properties);
+void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev);
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
struct kfd_node *dev);
@@ -1303,6 +1332,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
enum kfd_queue_type type);
void kernel_queue_uninit(struct kernel_queue *kq);
int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
+int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id);
/* Process Queue Manager */
struct process_queue_node {
@@ -1320,7 +1350,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct file *f,
struct queue_properties *properties,
unsigned int *qid,
- struct amdgpu_bo *wptr_bo,
const struct kfd_criu_queue_priv_data *q_data,
const void *restore_mqd,
const void *restore_ctl_stack,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 17e42161b015..a902950cc060 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1048,7 +1048,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
if (pdd->dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
- pdd->proc_ctx_bo);
+ &pdd->proc_ctx_bo);
/*
* before destroying pdd, make sure to report availability
* for auto suspend
@@ -1851,6 +1851,8 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
goto fail;
}
n_evicted++;
+
+ pdd->dev->dqm->is_hws_hang = false;
}
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 21f5a1fb3bf8..b439d4d0bd84 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -204,19 +204,23 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
}
if (dev->kfd->shared_resources.enable_mes) {
- amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo);
- if (pqn->q->wptr_bo)
- amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, &pqn->q->gang_ctx_bo);
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&pqn->q->wptr_bo_gart);
}
}
void pqm_uninit(struct process_queue_manager *pqm)
{
struct process_queue_node *pqn, *next;
+ struct kfd_process_device *pdd;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
- if (pqn->q)
+ if (pqn->q) {
+ pdd = kfd_get_process_device_data(pqn->q->device, pqm->process);
+ kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
+ kfd_queue_release_buffers(pdd, &pqn->q->properties);
pqm_clean_queue_resource(pqm, pqn);
+ }
kfd_procfs_del_queue(pqn->q);
uninit_queue(pqn->q);
@@ -231,8 +235,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
static int init_user_queue(struct process_queue_manager *pqm,
struct kfd_node *dev, struct queue **q,
struct queue_properties *q_properties,
- struct file *f, struct amdgpu_bo *wptr_bo,
- unsigned int qid)
+ struct file *f, unsigned int qid)
{
int retval;
@@ -263,12 +266,32 @@ static int init_user_queue(struct process_queue_manager *pqm,
goto cleanup;
}
memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
- (*q)->wptr_bo = wptr_bo;
+
+ /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
+ * on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
+ */
+ if (((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
+ >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
+ if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
+ pr_err("Queue memory allocated to wrong device\n");
+ retval = -EINVAL;
+ goto free_gang_ctx_bo;
+ }
+
+ retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
+ &(*q)->wptr_bo_gart);
+ if (retval) {
+ pr_err("Failed to map wptr bo to GART\n");
+ goto free_gang_ctx_bo;
+ }
+ }
}
pr_debug("PQM After init queue");
return 0;
+free_gang_ctx_bo:
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, (*q)->gang_ctx_bo);
cleanup:
uninit_queue(*q);
*q = NULL;
@@ -280,7 +303,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct file *f,
struct queue_properties *properties,
unsigned int *qid,
- struct amdgpu_bo *wptr_bo,
const struct kfd_criu_queue_priv_data *q_data,
const void *restore_mqd,
const void *restore_ctl_stack,
@@ -345,13 +367,14 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
+ case KFD_QUEUE_TYPE_SDMA_BY_ENG_ID:
/* SDMA queues are always allocated statically no matter
* which scheduler mode is used. We also do not need to
* check whether a SDMA queue can be allocated here, because
* allocate_sdma_queue() in create_queue() has the
* corresponding check logic.
*/
- retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -372,7 +395,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
- retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -490,6 +513,10 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
}
if (pqn->q) {
+ retval = kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
+ if (retval)
+ goto err_destroy_queue;
+
kfd_procfs_del_queue(pqn->q);
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
@@ -500,7 +527,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (retval != -ETIME)
goto err_destroy_queue;
}
-
+ kfd_queue_release_buffers(pdd, &pqn->q->properties);
pqm_clean_queue_resource(pqm, pqn);
uninit_queue(pqn->q);
}
@@ -524,11 +551,42 @@ int pqm_update_queue_properties(struct process_queue_manager *pqm,
struct process_queue_node *pqn;
pqn = get_queue_by_qid(pqm, qid);
- if (!pqn) {
+ if (!pqn || !pqn->q) {
pr_debug("No queue %d exists for update operation\n", qid);
return -EFAULT;
}
+ /*
+ * Update with NULL ring address is used to disable the queue
+ */
+ if (p->queue_address && p->queue_size) {
+ struct kfd_process_device *pdd;
+ struct amdgpu_vm *vm;
+ struct queue *q = pqn->q;
+ int err;
+
+ pdd = kfd_get_process_device_data(q->device, q->process);
+ if (!pdd)
+ return -ENODEV;
+ vm = drm_priv_to_vm(pdd->drm_priv);
+ err = amdgpu_bo_reserve(vm->root.bo, false);
+ if (err)
+ return err;
+
+ if (kfd_queue_buffer_get(vm, (void *)p->queue_address, &p->ring_bo,
+ p->queue_size)) {
+ pr_debug("ring buf 0x%llx size 0x%llx not mapped on GPU\n",
+ p->queue_address, p->queue_size);
+ return -EFAULT;
+ }
+
+ kfd_queue_unref_bo_va(vm, &pqn->q->properties.ring_bo);
+ kfd_queue_buffer_put(&pqn->q->properties.ring_bo);
+ amdgpu_bo_unreserve(vm->root.bo);
+
+ pqn->q->properties.ring_bo = p->ring_bo;
+ }
+
pqn->q->properties.queue_address = p->queue_address;
pqn->q->properties.queue_size = p->queue_size;
pqn->q->properties.queue_percent = p->queue_percent;
@@ -971,7 +1029,7 @@ int kfd_criu_restore_queue(struct kfd_process *p,
print_queue_properties(&qp);
- ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, NULL, q_data, mqd, ctl_stack,
+ ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, q_data, mqd, ctl_stack,
NULL);
if (ret) {
pr_err("Failed to create new queue err:%d\n", ret);
@@ -988,6 +1046,7 @@ exit:
pr_debug("Queue id %d was restored successfully\n", queue_id);
kfree(q_data);
+ kfree(q_extra_data);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 0f6992b1895c..ad29634f8b44 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -24,6 +24,8 @@
#include <linux/slab.h>
#include "kfd_priv.h"
+#include "kfd_topology.h"
+#include "kfd_svm.h"
void print_queue_properties(struct queue_properties *q)
{
@@ -82,3 +84,374 @@ void uninit_queue(struct queue *q)
{
kfree(q);
}
+
+#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
+
+static int kfd_queue_buffer_svm_get(struct kfd_process_device *pdd, u64 addr, u64 size)
+{
+ struct kfd_process *p = pdd->process;
+ struct list_head update_list;
+ struct svm_range *prange;
+ int ret = -EINVAL;
+
+ INIT_LIST_HEAD(&update_list);
+ addr >>= PAGE_SHIFT;
+ size >>= PAGE_SHIFT;
+
+ mutex_lock(&p->svms.lock);
+
+ /*
+ * range may split to multiple svm pranges aligned to granularity boundaery.
+ */
+ while (size) {
+ uint32_t gpuid, gpuidx;
+ int r;
+
+ prange = svm_range_from_addr(&p->svms, addr, NULL);
+ if (!prange)
+ break;
+
+ if (!prange->mapped_to_gpu)
+ break;
+
+ r = kfd_process_gpuid_from_node(p, pdd->dev, &gpuid, &gpuidx);
+ if (r < 0)
+ break;
+ if (!test_bit(gpuidx, prange->bitmap_access) &&
+ !test_bit(gpuidx, prange->bitmap_aip))
+ break;
+
+ if (!(prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED))
+ break;
+
+ list_add(&prange->update_list, &update_list);
+
+ if (prange->last - prange->start + 1 >= size) {
+ size = 0;
+ break;
+ }
+
+ size -= prange->last - prange->start + 1;
+ addr += prange->last - prange->start + 1;
+ }
+ if (size) {
+ pr_debug("[0x%llx 0x%llx] not registered\n", addr, addr + size - 1);
+ goto out_unlock;
+ }
+
+ list_for_each_entry(prange, &update_list, update_list)
+ atomic_inc(&prange->queue_refcount);
+ ret = 0;
+
+out_unlock:
+ mutex_unlock(&p->svms.lock);
+ return ret;
+}
+
+static void kfd_queue_buffer_svm_put(struct kfd_process_device *pdd, u64 addr, u64 size)
+{
+ struct kfd_process *p = pdd->process;
+ struct svm_range *prange, *pchild;
+ struct interval_tree_node *node;
+ unsigned long last;
+
+ addr >>= PAGE_SHIFT;
+ last = addr + (size >> PAGE_SHIFT) - 1;
+
+ mutex_lock(&p->svms.lock);
+
+ node = interval_tree_iter_first(&p->svms.objects, addr, last);
+ while (node) {
+ struct interval_tree_node *next_node;
+ unsigned long next_start;
+
+ prange = container_of(node, struct svm_range, it_node);
+ next_node = interval_tree_iter_next(node, addr, last);
+ next_start = min(node->last, last) + 1;
+
+ if (atomic_add_unless(&prange->queue_refcount, -1, 0)) {
+ list_for_each_entry(pchild, &prange->child_list, child_list)
+ atomic_add_unless(&pchild->queue_refcount, -1, 0);
+ }
+
+ node = next_node;
+ addr = next_start;
+ }
+
+ mutex_unlock(&p->svms.lock);
+}
+#else
+
+static int kfd_queue_buffer_svm_get(struct kfd_process_device *pdd, u64 addr, u64 size)
+{
+ return -EINVAL;
+}
+
+static void kfd_queue_buffer_svm_put(struct kfd_process_device *pdd, u64 addr, u64 size)
+{
+}
+
+#endif
+
+int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user *addr, struct amdgpu_bo **pbo,
+ u64 expected_size)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ u64 user_addr;
+ u64 size;
+
+ user_addr = (u64)addr >> AMDGPU_GPU_PAGE_SHIFT;
+ size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+ if (!mapping)
+ goto out_err;
+
+ if (user_addr != mapping->start ||
+ (size != 0 && user_addr + size - 1 != mapping->last)) {
+ pr_debug("expected size 0x%llx not equal to mapping addr 0x%llx size 0x%llx\n",
+ expected_size, mapping->start << AMDGPU_GPU_PAGE_SHIFT,
+ (mapping->last - mapping->start + 1) << AMDGPU_GPU_PAGE_SHIFT);
+ goto out_err;
+ }
+
+ *pbo = amdgpu_bo_ref(mapping->bo_va->base.bo);
+ mapping->bo_va->queue_refcount++;
+ return 0;
+
+out_err:
+ *pbo = NULL;
+ return -EINVAL;
+}
+
+/* FIXME: remove this function, just call amdgpu_bo_unref directly */
+void kfd_queue_buffer_put(struct amdgpu_bo **bo)
+{
+ amdgpu_bo_unref(bo);
+}
+
+int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
+{
+ struct kfd_topology_device *topo_dev;
+ struct amdgpu_vm *vm;
+ u32 total_cwsr_size;
+ int err;
+
+ topo_dev = kfd_topology_device_by_id(pdd->dev->id);
+ if (!topo_dev)
+ return -EINVAL;
+
+ vm = drm_priv_to_vm(pdd->drm_priv);
+ err = amdgpu_bo_reserve(vm->root.bo, false);
+ if (err)
+ return err;
+
+ err = kfd_queue_buffer_get(vm, properties->write_ptr, &properties->wptr_bo, PAGE_SIZE);
+ if (err)
+ goto out_err_unreserve;
+
+ err = kfd_queue_buffer_get(vm, properties->read_ptr, &properties->rptr_bo, PAGE_SIZE);
+ if (err)
+ goto out_err_unreserve;
+
+ err = kfd_queue_buffer_get(vm, (void *)properties->queue_address,
+ &properties->ring_bo, properties->queue_size);
+ if (err)
+ goto out_err_unreserve;
+
+ /* only compute queue requires EOP buffer and CWSR area */
+ if (properties->type != KFD_QUEUE_TYPE_COMPUTE)
+ goto out_unreserve;
+
+ /* EOP buffer is not required for all ASICs */
+ if (properties->eop_ring_buffer_address) {
+ if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) {
+ pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n",
+ properties->eop_buf_bo->tbo.base.size,
+ topo_dev->node_props.eop_buffer_size);
+ err = -EINVAL;
+ goto out_err_unreserve;
+ }
+ err = kfd_queue_buffer_get(vm, (void *)properties->eop_ring_buffer_address,
+ &properties->eop_buf_bo,
+ properties->eop_ring_buffer_size);
+ if (err)
+ goto out_err_unreserve;
+ }
+
+ if (properties->ctl_stack_size != topo_dev->node_props.ctl_stack_size) {
+ pr_debug("queue ctl stack size 0x%x not equal to node ctl stack size 0x%x\n",
+ properties->ctl_stack_size,
+ topo_dev->node_props.ctl_stack_size);
+ err = -EINVAL;
+ goto out_err_unreserve;
+ }
+
+ if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) {
+ pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n",
+ properties->ctx_save_restore_area_size,
+ topo_dev->node_props.cwsr_size);
+ err = -EINVAL;
+ goto out_err_unreserve;
+ }
+
+ total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
+ * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
+
+ err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
+ &properties->cwsr_bo, total_cwsr_size);
+ if (!err)
+ goto out_unreserve;
+
+ amdgpu_bo_unreserve(vm->root.bo);
+
+ err = kfd_queue_buffer_svm_get(pdd, properties->ctx_save_restore_area_address,
+ total_cwsr_size);
+ if (err)
+ goto out_err_release;
+
+ return 0;
+
+out_unreserve:
+ amdgpu_bo_unreserve(vm->root.bo);
+ return 0;
+
+out_err_unreserve:
+ amdgpu_bo_unreserve(vm->root.bo);
+out_err_release:
+ /* FIXME: make a _locked version of this that can be called before
+ * dropping the VM reservation.
+ */
+ kfd_queue_unref_bo_vas(pdd, properties);
+ kfd_queue_release_buffers(pdd, properties);
+ return err;
+}
+
+int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
+{
+ struct kfd_topology_device *topo_dev;
+ u32 total_cwsr_size;
+
+ kfd_queue_buffer_put(&properties->wptr_bo);
+ kfd_queue_buffer_put(&properties->rptr_bo);
+ kfd_queue_buffer_put(&properties->ring_bo);
+ kfd_queue_buffer_put(&properties->eop_buf_bo);
+ kfd_queue_buffer_put(&properties->cwsr_bo);
+
+ topo_dev = kfd_topology_device_by_id(pdd->dev->id);
+ if (!topo_dev)
+ return -EINVAL;
+ total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
+ * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
+
+ kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size);
+ return 0;
+}
+
+void kfd_queue_unref_bo_va(struct amdgpu_vm *vm, struct amdgpu_bo **bo)
+{
+ if (*bo) {
+ struct amdgpu_bo_va *bo_va;
+
+ bo_va = amdgpu_vm_bo_find(vm, *bo);
+ if (bo_va && bo_va->queue_refcount)
+ bo_va->queue_refcount--;
+ }
+}
+
+int kfd_queue_unref_bo_vas(struct kfd_process_device *pdd,
+ struct queue_properties *properties)
+{
+ struct amdgpu_vm *vm;
+ int err;
+
+ vm = drm_priv_to_vm(pdd->drm_priv);
+ err = amdgpu_bo_reserve(vm->root.bo, false);
+ if (err)
+ return err;
+
+ kfd_queue_unref_bo_va(vm, &properties->wptr_bo);
+ kfd_queue_unref_bo_va(vm, &properties->rptr_bo);
+ kfd_queue_unref_bo_va(vm, &properties->ring_bo);
+ kfd_queue_unref_bo_va(vm, &properties->eop_buf_bo);
+ kfd_queue_unref_bo_va(vm, &properties->cwsr_bo);
+
+ amdgpu_bo_unreserve(vm->root.bo);
+ return 0;
+}
+
+#define SGPR_SIZE_PER_CU 0x4000
+#define LDS_SIZE_PER_CU 0x10000
+#define HWREG_SIZE_PER_CU 0x1000
+#define DEBUGGER_BYTES_ALIGN 64
+#define DEBUGGER_BYTES_PER_WAVE 32
+
+static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
+{
+ u32 vgpr_size = 0x40000;
+
+ if ((gfxv / 100 * 100) == 90400 || /* GFX_VERSION_AQUA_VANJARAM */
+ gfxv == 90010 || /* GFX_VERSION_ALDEBARAN */
+ gfxv == 90008) /* GFX_VERSION_ARCTURUS */
+ vgpr_size = 0x80000;
+ else if (gfxv == 110000 || /* GFX_VERSION_PLUM_BONITO */
+ gfxv == 110001 || /* GFX_VERSION_WHEAT_NAS */
+ gfxv == 120000 || /* GFX_VERSION_GFX1200 */
+ gfxv == 120001) /* GFX_VERSION_GFX1201 */
+ vgpr_size = 0x60000;
+
+ return vgpr_size;
+}
+
+#define WG_CONTEXT_DATA_SIZE_PER_CU(gfxv) \
+ (kfd_get_vgpr_size_per_cu(gfxv) + SGPR_SIZE_PER_CU +\
+ LDS_SIZE_PER_CU + HWREG_SIZE_PER_CU)
+
+#define CNTL_STACK_BYTES_PER_WAVE(gfxv) \
+ ((gfxv) >= 100100 ? 12 : 8) /* GFX_VERSION_NAVI10*/
+
+#define SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER 40
+
+void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev)
+{
+ struct kfd_node_properties *props = &dev->node_props;
+ u32 gfxv = props->gfx_target_version;
+ u32 ctl_stack_size;
+ u32 wg_data_size;
+ u32 wave_num;
+ u32 cu_num;
+
+ if (gfxv < 80001) /* GFX_VERSION_CARRIZO */
+ return;
+
+ cu_num = props->simd_count / props->simd_per_cu / NUM_XCC(dev->gpu->xcc_mask);
+ wave_num = (gfxv < 100100) ? /* GFX_VERSION_NAVI10 */
+ min(cu_num * 40, props->array_count / props->simd_arrays_per_engine * 512)
+ : cu_num * 32;
+
+ wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv), PAGE_SIZE);
+ ctl_stack_size = wave_num * CNTL_STACK_BYTES_PER_WAVE(gfxv) + 8;
+ ctl_stack_size = ALIGN(SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER + ctl_stack_size,
+ PAGE_SIZE);
+
+ if ((gfxv / 10000 * 10000) == 100000) {
+ /* HW design limits control stack size to 0x7000.
+ * This is insufficient for theoretical PM4 cases
+ * but sufficient for AQL, limited by SPI events.
+ */
+ ctl_stack_size = min(ctl_stack_size, 0x7000);
+ }
+
+ props->ctl_stack_size = ctl_stack_size;
+ props->debug_memory_size = ALIGN(wave_num * DEBUGGER_BYTES_PER_WAVE, DEBUGGER_BYTES_ALIGN);
+ props->cwsr_size = ctl_stack_size + wg_data_size;
+
+ if (gfxv == 80002) /* GFX_VERSION_TONGA */
+ props->eop_buffer_size = 0x8000;
+ else if ((gfxv / 100 * 100) == 90400) /* GFX_VERSION_AQUA_VANJARAM */
+ props->eop_buffer_size = 4096;
+ else if (gfxv >= 80000)
+ props->eop_buffer_size = 4096;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index ea6a8e43bd5b..de8b9abf7afc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -235,17 +235,16 @@ void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset,
amdgpu_reset_get_desc(reset_context, reset_cause,
sizeof(reset_cause));
- kfd_smi_event_add(0, dev, event, "%x %s\n",
- dev->reset_seq_num,
- reset_cause);
+ kfd_smi_event_add(0, dev, event, KFD_EVENT_FMT_UPDATE_GPU_RESET(
+ dev->reset_seq_num, reset_cause));
}
void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev,
uint64_t throttle_bitmask)
{
- kfd_smi_event_add(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, "%llx:%llx\n",
+ kfd_smi_event_add(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, KFD_EVENT_FMT_THERMAL_THROTTLING(
throttle_bitmask,
- amdgpu_dpm_get_thermal_throttling_counter(dev->adev));
+ amdgpu_dpm_get_thermal_throttling_counter(dev->adev)));
}
void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
@@ -256,8 +255,8 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
if (task_info) {
/* Report VM faults from user applications, not retry from kernel */
if (task_info->pid)
- kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, "%x:%s\n",
- task_info->pid, task_info->task_name);
+ kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, KFD_EVENT_FMT_VMFAULT(
+ task_info->pid, task_info->task_name));
amdgpu_vm_put_task_info(task_info);
}
}
@@ -267,16 +266,16 @@ void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid,
ktime_t ts)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_START,
- "%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid,
- address, node->id, write_fault ? 'W' : 'R');
+ KFD_EVENT_FMT_PAGEFAULT_START(ktime_to_ns(ts), pid,
+ address, node->id, write_fault ? 'W' : 'R'));
}
void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid,
unsigned long address, bool migration)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_END,
- "%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(),
- pid, address, node->id, migration ? 'M' : 'U');
+ KFD_EVENT_FMT_PAGEFAULT_END(ktime_get_boottime_ns(),
+ pid, address, node->id, migration ? 'M' : 'U'));
}
void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
@@ -286,9 +285,9 @@ void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
uint32_t trigger)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_START,
- "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n",
+ KFD_EVENT_FMT_MIGRATE_START(
ktime_get_boottime_ns(), pid, start, end - start,
- from, to, prefetch_loc, preferred_loc, trigger);
+ from, to, prefetch_loc, preferred_loc, trigger));
}
void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
@@ -296,24 +295,24 @@ void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
uint32_t from, uint32_t to, uint32_t trigger)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END,
- "%lld -%d @%lx(%lx) %x->%x %d\n",
+ KFD_EVENT_FMT_MIGRATE_END(
ktime_get_boottime_ns(), pid, start, end - start,
- from, to, trigger);
+ from, to, trigger));
}
void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
uint32_t trigger)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_EVICTION,
- "%lld -%d %x %d\n", ktime_get_boottime_ns(), pid,
- node->id, trigger);
+ KFD_EVENT_FMT_QUEUE_EVICTION(ktime_get_boottime_ns(), pid,
+ node->id, trigger));
}
void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_RESTORE,
- "%lld -%d %x\n", ktime_get_boottime_ns(), pid,
- node->id);
+ KFD_EVENT_FMT_QUEUE_RESTORE(ktime_get_boottime_ns(), pid,
+ node->id, 0));
}
void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
@@ -330,8 +329,8 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
kfd_smi_event_add(p->lead_thread->pid, pdd->dev,
KFD_SMI_EVENT_QUEUE_RESTORE,
- "%lld -%d %x %c\n", ktime_get_boottime_ns(),
- p->lead_thread->pid, pdd->dev->id, 'R');
+ KFD_EVENT_FMT_QUEUE_RESTORE(ktime_get_boottime_ns(),
+ p->lead_thread->pid, pdd->dev->id, 'R'));
}
kfd_unref_process(p);
}
@@ -341,8 +340,8 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
uint32_t trigger)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_UNMAP_FROM_GPU,
- "%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(),
- pid, address, last - address + 1, node->id, trigger);
+ KFD_EVENT_FMT_UNMAP_FROM_GPU(ktime_get_boottime_ns(),
+ pid, address, last - address + 1, node->id, trigger));
}
int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index bd9c2921e0dc..04e746923697 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -309,12 +309,13 @@ static void svm_range_free(struct svm_range *prange, bool do_unmap)
}
static void
-svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
- uint8_t *granularity, uint32_t *flags)
+svm_range_set_default_attributes(struct svm_range_list *svms, int32_t *location,
+ int32_t *prefetch_loc, uint8_t *granularity,
+ uint32_t *flags)
{
*location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
*prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
- *granularity = 9;
+ *granularity = svms->default_granularity;
*flags =
KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
}
@@ -358,7 +359,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
MAX_GPU_INSTANCE);
- svm_range_set_default_attributes(&prange->preferred_loc,
+ svm_range_set_default_attributes(svms, &prange->preferred_loc,
&prange->prefetch_loc,
&prange->granularity, &prange->flags);
@@ -1051,6 +1052,7 @@ svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
new->mapped_to_gpu = old->mapped_to_gpu;
bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
+ atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
return 0;
}
@@ -1992,6 +1994,7 @@ static struct svm_range *svm_range_clone(struct svm_range *old)
new->vram_pages = old->vram_pages;
bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
+ atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
return new;
}
@@ -2260,16 +2263,10 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
{
struct kfd_process_device *pdd;
struct kfd_process *p;
- int drain;
uint32_t i;
p = container_of(svms, struct kfd_process, svms);
-restart:
- drain = atomic_read(&svms->drain_pagefaults);
- if (!drain)
- return;
-
for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
pdd = p->pdds[i];
if (!pdd)
@@ -2289,8 +2286,6 @@ restart:
pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
}
- if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
- goto restart;
}
static void svm_range_deferred_list_work(struct work_struct *work)
@@ -2312,17 +2307,8 @@ static void svm_range_deferred_list_work(struct work_struct *work)
prange->start, prange->last, prange->work_item.op);
mm = prange->work_item.mm;
-retry:
- mmap_write_lock(mm);
- /* Checking for the need to drain retry faults must be inside
- * mmap write lock to serialize with munmap notifiers.
- */
- if (unlikely(atomic_read(&svms->drain_pagefaults))) {
- mmap_write_unlock(mm);
- svm_range_drain_retry_fault(svms);
- goto retry;
- }
+ mmap_write_lock(mm);
/* Remove from deferred_list must be inside mmap write lock, for
* two race cases:
@@ -2443,6 +2429,17 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
struct kfd_process *p;
unsigned long s, l;
bool unmap_parent;
+ uint32_t i;
+
+ if (atomic_read(&prange->queue_refcount)) {
+ int r;
+
+ pr_warn("Freeing queue vital buffer 0x%lx, queue evicted\n",
+ prange->start << PAGE_SHIFT);
+ r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
+ if (r)
+ pr_debug("failed %d to quiesce KFD queues\n", r);
+ }
p = kfd_lookup_process_by_mm(mm);
if (!p)
@@ -2452,11 +2449,38 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
prange, prange->start, prange->last, start, last);
- /* Make sure pending page faults are drained in the deferred worker
- * before the range is freed to avoid straggler interrupts on
- * unmapped memory causing "phantom faults".
+ /* calculate time stamps that are used to decide which page faults need be
+ * dropped or handled before unmap pages from gpu vm
*/
- atomic_inc(&svms->drain_pagefaults);
+ for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
+ struct kfd_process_device *pdd;
+ struct amdgpu_device *adev;
+ struct amdgpu_ih_ring *ih;
+ uint32_t checkpoint_wptr;
+
+ pdd = p->pdds[i];
+ if (!pdd)
+ continue;
+
+ adev = pdd->dev->adev;
+
+ /* Check and drain ih1 ring if cam not available */
+ if (adev->irq.ih1.ring_size) {
+ ih = &adev->irq.ih1;
+ checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
+ if (ih->rptr != checkpoint_wptr) {
+ svms->checkpoint_ts[i] =
+ amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
+ continue;
+ }
+ }
+
+ /* check if dev->irq.ih_soft is not empty */
+ ih = &adev->irq.ih_soft;
+ checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
+ if (ih->rptr != checkpoint_wptr)
+ svms->checkpoint_ts[i] = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
+ }
unmap_parent = start <= prange->start && last >= prange->last;
@@ -2680,9 +2704,10 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
*is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
start_limit = max(vma->vm_start >> PAGE_SHIFT,
- (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
+ (unsigned long)ALIGN_DOWN(addr, 1UL << p->svms.default_granularity));
end_limit = min(vma->vm_end >> PAGE_SHIFT,
- (unsigned long)ALIGN(addr + 1, 2UL << 8));
+ (unsigned long)ALIGN(addr + 1, 1UL << p->svms.default_granularity));
+
/* First range that starts after the fault address */
node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
if (node) {
@@ -2897,7 +2922,7 @@ svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
int
svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
uint32_t vmid, uint32_t node_id,
- uint64_t addr, bool write_fault)
+ uint64_t addr, uint64_t ts, bool write_fault)
{
unsigned long start, last, size;
struct mm_struct *mm = NULL;
@@ -2907,7 +2932,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
ktime_t timestamp = ktime_get_boottime();
struct kfd_node *node;
int32_t best_loc;
- int32_t gpuidx = MAX_GPU_INSTANCE;
+ int32_t gpuid, gpuidx = MAX_GPU_INSTANCE;
bool write_locked = false;
struct vm_area_struct *vma;
bool migration = false;
@@ -2928,11 +2953,38 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
if (atomic_read(&svms->drain_pagefaults)) {
- pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ pr_debug("page fault handling disabled, drop fault 0x%llx\n", addr);
r = 0;
goto out;
}
+ node = kfd_node_by_irq_ids(adev, node_id, vmid);
+ if (!node) {
+ pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
+ vmid);
+ r = -EFAULT;
+ goto out;
+ }
+
+ if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
+ pr_debug("failed to get gpuid/gpuidex for node_id: %d\n", node_id);
+ r = -EFAULT;
+ goto out;
+ }
+
+ /* check if this page fault time stamp is before svms->checkpoint_ts */
+ if (svms->checkpoint_ts[gpuidx] != 0) {
+ if (amdgpu_ih_ts_after(ts, svms->checkpoint_ts[gpuidx])) {
+ pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ r = 0;
+ goto out;
+ } else
+ /* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
+ * to zero to avoid following ts wrap around give wrong comparing
+ */
+ svms->checkpoint_ts[gpuidx] = 0;
+ }
+
if (!p->xnack_enabled) {
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
r = -EFAULT;
@@ -2949,13 +3001,6 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
goto out;
}
- node = kfd_node_by_irq_ids(adev, node_id, vmid);
- if (!node) {
- pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
- vmid);
- r = -EFAULT;
- goto out;
- }
mmap_read_lock(mm);
retry_write_locked:
mutex_lock(&svms->lock);
@@ -3170,8 +3215,9 @@ void svm_range_list_fini(struct kfd_process *p)
/*
* Ensure no retry fault comes in afterwards, as page fault handler will
* not find kfd process and take mm lock to recover fault.
+ * stop kfd page fault handing, then wait pending page faults got drained
*/
- atomic_inc(&p->svms.drain_pagefaults);
+ atomic_set(&p->svms.drain_pagefaults, 1);
svm_range_drain_retry_fault(&p->svms);
list_for_each_entry_safe(prange, next, &p->svms.list, list) {
@@ -3205,6 +3251,12 @@ int svm_range_list_init(struct kfd_process *p)
if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
bitmap_set(svms->bitmap_supported, i, 1);
+ /* Value of default granularity cannot exceed 0x1B, the
+ * number of pages supported by a 4-level paging table
+ */
+ svms->default_granularity = min_t(u8, amdgpu_svm_default_granularity, 0x1B);
+ pr_debug("Default SVM Granularity to use: %d\n", svms->default_granularity);
+
return 0;
}
@@ -3732,7 +3784,7 @@ svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
node = interval_tree_iter_first(&svms->objects, start, last);
if (!node) {
pr_debug("range attrs not found return default values\n");
- svm_range_set_default_attributes(&location, &prefetch_loc,
+ svm_range_set_default_attributes(svms, &location, &prefetch_loc,
&granularity, &flags_and);
flags_or = flags_and;
if (p->xnack_enabled)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 70c1776611c4..bddd24f04669 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -137,6 +137,7 @@ struct svm_range {
DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
bool mapped_to_gpu;
+ atomic_t queue_refcount;
};
static inline void svm_range_lock(struct svm_range *prange)
@@ -173,7 +174,7 @@ int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear);
void svm_range_vram_node_free(struct svm_range *prange);
int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
- uint32_t vmid, uint32_t node_id, uint64_t addr,
+ uint32_t vmid, uint32_t node_id, uint64_t addr, uint64_t ts,
bool write_fault);
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence);
void svm_range_add_list_work(struct svm_range_list *svms,
@@ -224,7 +225,7 @@ static inline void svm_range_list_fini(struct kfd_process *p)
static inline int svm_range_restore_pages(struct amdgpu_device *adev,
unsigned int pasid,
uint32_t client_id, uint32_t node_id,
- uint64_t addr, bool write_fault)
+ uint64_t addr, uint64_t ts, bool write_fault)
{
return -EFAULT;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 6f89b06f89d3..3871591c9aec 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -292,6 +292,8 @@ static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
iolink->max_bandwidth);
sysfs_show_32bit_prop(buffer, offs, "recommended_transfer_size",
iolink->rec_transfer_size);
+ sysfs_show_32bit_prop(buffer, offs, "recommended_sdma_engine_id_mask",
+ iolink->rec_sdma_eng_id_mask);
sysfs_show_32bit_prop(buffer, offs, "flags", iolink->flags);
return offs;
@@ -1265,6 +1267,54 @@ static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev,
}
}
+#define REC_SDMA_NUM_GPU 8
+static const int rec_sdma_eng_map[REC_SDMA_NUM_GPU][REC_SDMA_NUM_GPU] = {
+ { -1, 14, 12, 2, 4, 8, 10, 6 },
+ { 14, -1, 2, 10, 8, 4, 6, 12 },
+ { 10, 2, -1, 12, 14, 6, 4, 8 },
+ { 2, 12, 10, -1, 6, 14, 8, 4 },
+ { 4, 8, 14, 6, -1, 10, 12, 2 },
+ { 8, 4, 6, 14, 12, -1, 2, 10 },
+ { 10, 6, 4, 8, 12, 2, -1, 14 },
+ { 6, 12, 8, 4, 2, 10, 14, -1 }};
+
+static void kfd_set_recommended_sdma_engines(struct kfd_topology_device *to_dev,
+ struct kfd_iolink_properties *outbound_link,
+ struct kfd_iolink_properties *inbound_link)
+{
+ struct kfd_node *gpu = outbound_link->gpu;
+ struct amdgpu_device *adev = gpu->adev;
+ int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes;
+ bool support_rec_eng = !amdgpu_sriov_vf(adev) && to_dev->gpu &&
+ adev->aid_mask && num_xgmi_nodes && gpu->kfd->num_nodes == 1 &&
+ kfd_get_num_xgmi_sdma_engines(gpu) >= 14 &&
+ (!(adev->flags & AMD_IS_APU) && num_xgmi_nodes == 8);
+
+ if (support_rec_eng) {
+ int src_socket_id = adev->gmc.xgmi.physical_node_id;
+ int dst_socket_id = to_dev->gpu->adev->gmc.xgmi.physical_node_id;
+
+ outbound_link->rec_sdma_eng_id_mask =
+ 1 << rec_sdma_eng_map[src_socket_id][dst_socket_id];
+ inbound_link->rec_sdma_eng_id_mask =
+ 1 << rec_sdma_eng_map[dst_socket_id][src_socket_id];
+ } else {
+ int num_sdma_eng = kfd_get_num_sdma_engines(gpu);
+ int i, eng_offset = 0;
+
+ if (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
+ kfd_get_num_xgmi_sdma_engines(gpu) && to_dev->gpu) {
+ eng_offset = num_sdma_eng;
+ num_sdma_eng = kfd_get_num_xgmi_sdma_engines(gpu);
+ }
+
+ for (i = 0; i < num_sdma_eng; i++) {
+ outbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
+ inbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
+ }
+ }
+}
+
static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
{
struct kfd_iolink_properties *link, *inbound_link;
@@ -1303,6 +1353,7 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
inbound_link->flags = CRAT_IOLINK_FLAGS_ENABLED;
kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link);
kfd_set_iolink_non_coherent(peer_dev, link, inbound_link);
+ kfd_set_recommended_sdma_engines(peer_dev, link, inbound_link);
}
}
@@ -2027,7 +2078,7 @@ int kfd_topology_add_device(struct kfd_node *gpu)
HSA_CAP_ASIC_REVISION_MASK);
dev->node_props.location_id = pci_dev_id(gpu->adev->pdev);
- if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3))
+ if (gpu->kfd->num_nodes > 1)
dev->node_props.location_id |= dev->gpu->node_id;
dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus);
@@ -2120,6 +2171,8 @@ int kfd_topology_add_device(struct kfd_node *gpu)
dev->gpu->adev->gmc.xgmi.connected_to_cpu)
dev->node_props.capability |= HSA_CAP_FLAGS_COHERENTHOSTACCESS;
+ kfd_queue_ctx_save_restore_size(dev);
+
kfd_debug_print_topology();
kfd_notify_gpu_change(gpu_id, 1);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 2d1c9d771bef..155b5c410af1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -74,6 +74,10 @@ struct kfd_node_properties {
uint32_t num_sdma_xgmi_engines;
uint32_t num_sdma_queues_per_engine;
uint32_t num_cp_queues;
+ uint32_t cwsr_size;
+ uint32_t ctl_stack_size;
+ uint32_t eop_buffer_size;
+ uint32_t debug_memory_size;
char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
};
@@ -121,6 +125,7 @@ struct kfd_iolink_properties {
uint32_t min_bandwidth;
uint32_t max_bandwidth;
uint32_t rec_transfer_size;
+ uint32_t rec_sdma_eng_id_mask;
uint32_t flags;
struct kfd_node *gpu;
struct kobject *kobj;
diff --git a/drivers/gpu/drm/amd/amdkfd/soc15_int.h b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
index 10138676f27f..e5c0205f2618 100644
--- a/drivers/gpu/drm/amd/amdkfd/soc15_int.h
+++ b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
@@ -29,6 +29,7 @@
#define SOC15_INTSRC_CP_BAD_OPCODE 183
#define SOC15_INTSRC_SQ_INTERRUPT_MSG 239
#define SOC15_INTSRC_VMC_FAULT 0
+#define SOC15_INTSRC_VMC_UTCL2_POISON 1
#define SOC15_INTSRC_SDMA_TRAP 224
#define SOC15_INTSRC_SDMA_ECC 220
#define SOC21_INTSRC_SDMA_TRAP 49
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5942fc4e1c86..0cff66735cfe 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -176,6 +176,7 @@ MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB);
static int amdgpu_dm_init(struct amdgpu_device *adev);
static void amdgpu_dm_fini(struct amdgpu_device *adev);
static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
+static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state);
static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
{
@@ -877,6 +878,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
"HPD_IRQ",
"SET_CONFIGC_REPLY",
"DPIA_NOTIFICATION",
+ "HPD_SENSE_NOTIFY",
};
do {
@@ -1740,7 +1742,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
/* Send the chunk */
ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000);
if (ret != DMUB_STATUS_OK)
- /* No need to free bb here since it shall be done unconditionally <elsewhere> */
+ /* No need to free bb here since it shall be done in dm_sw_fini() */
return NULL;
}
@@ -1886,6 +1888,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+ else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC)
+ init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC;
+ else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC)
+ init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+ else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE)
+ init_data.flags.disable_ips = DMUB_IPS_ENABLE;
else
init_data.flags.disable_ips = dm_get_default_ips_mode(adev);
@@ -2242,7 +2250,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
- r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, "%s", fw_name_dmcu);
if (r == -ENODEV) {
/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
@@ -2489,8 +2497,17 @@ static int dm_sw_init(void *handle)
static int dm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct dal_allocation *da;
+
+ list_for_each_entry(da, &adev->dm.da_list, list) {
+ if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) {
+ amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
+ list_del(&da->list);
+ kfree(da);
+ break;
+ }
+ }
- kfree(adev->dm.bb_from_dmub);
adev->dm.bb_from_dmub = NULL;
kfree(adev->dm.dmub_fb_info);
@@ -2592,9 +2609,9 @@ static int dm_late_init(void *handle)
static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
{
+ u8 buf[UUID_SIZE];
+ guid_t guid;
int ret;
- u8 guid[16];
- u64 tmp64;
mutex_lock(&mgr->lock);
if (!mgr->mst_primary)
@@ -2615,26 +2632,27 @@ static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
}
/* Some hubs forget their guids after they resume */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
- if (ret != 16) {
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
- if (memchr_inv(guid, 0, 16) == NULL) {
- tmp64 = get_jiffies_64();
- memcpy(&guid[0], &tmp64, sizeof(u64));
- memcpy(&guid[8], &tmp64, sizeof(u64));
+ import_guid(&guid, buf);
- ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
+ if (guid_is_null(&guid)) {
+ guid_gen(&guid);
+ export_guid(buf, &guid);
- if (ret != 16) {
+ ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, buf, sizeof(buf));
+
+ if (ret != sizeof(buf)) {
drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
goto out_fail;
}
}
- memcpy(mgr->mst_primary->guid, guid, 16);
+ guid_copy(&mgr->mst_primary->guid, &guid);
out_fail:
mutex_unlock(&mgr->lock);
@@ -3230,8 +3248,11 @@ static int dm_resume(void *handle)
drm_connector_list_iter_end(&iter);
/* Force mode set in atomic commit */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
new_crtc_state->active_changed = true;
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+ }
/*
* atomic_check is expected to create the dc states. We need to release
@@ -4874,18 +4895,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* Determine whether to enable Replay support by default. */
if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
-/*
- * Disabled by default due to https://gitlab.freedesktop.org/drm/amd/-/issues/3344
- * case IP_VERSION(3, 1, 4):
- * case IP_VERSION(3, 1, 5):
- * case IP_VERSION(3, 1, 6):
- * case IP_VERSION(3, 2, 0):
- * case IP_VERSION(3, 2, 1):
- * case IP_VERSION(3, 5, 0):
- * case IP_VERSION(3, 5, 1):
- * replay_feature_enabled = true;
- * break;
- */
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ replay_feature_enabled = true;
+ break;
+
default:
replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
break;
@@ -4972,12 +4989,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
-
- /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
- * PSR is also supported.
- */
- if (link->psr_settings.psr_feature_enabled)
- adev_to_drm(adev)->vblank_disable_immediate = false;
}
}
amdgpu_set_panel_orientation(&aconnector->base);
@@ -5182,7 +5193,7 @@ static int dm_init_microcode(struct amdgpu_device *adev)
/* ASIC doesn't support DMUB. */
return 0;
}
- r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, "%s", fw_name_dmub);
return r;
}
@@ -6471,7 +6482,8 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
+ DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n",
+ __func__, drm_connector->name);
}
} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
@@ -6490,7 +6502,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
+ DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
__func__, drm_connector->name);
}
}
@@ -7233,6 +7245,9 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
enum dc_status dc_result = DC_OK;
+ if (!dm_state)
+ return NULL;
+
do {
stream = create_stream_for_sink(connector, drm_mode,
dm_state, old_stream,
@@ -8270,7 +8285,7 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
- bool enable)
+ struct dm_crtc_state *acrtc_state)
{
/*
* We have no guarantee that the frontend index maps to the same
@@ -8282,9 +8297,31 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
amdgpu_display_crtc_idx_to_irq_type(
adev,
acrtc->crtc_id);
+ struct drm_vblank_crtc_config config = {0};
+ struct dc_crtc_timing *timing;
+ int offdelay;
+
+ if (acrtc_state) {
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
+ IP_VERSION(3, 5, 0) ||
+ acrtc_state->stream->link->psr_settings.psr_version <
+ DC_PSR_VERSION_UNSUPPORTED) {
+ timing = &acrtc_state->stream->timing;
+
+ /* at least 2 frames */
+ offdelay = DIV64_U64_ROUND_UP((u64)20 *
+ timing->v_total *
+ timing->h_total,
+ timing->pix_clk_100hz);
+
+ config.offdelay_ms = offdelay ?: 30;
+ } else {
+ config.disable_immediate = true;
+ }
+
+ drm_crtc_vblank_on_config(&acrtc->base,
+ &config);
- if (enable) {
- drm_crtc_vblank_on(&acrtc->base);
amdgpu_irq_get(
adev,
&adev->pageflip_irq,
@@ -8750,7 +8787,8 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
adev->dm.dc->caps.color.dpp.gamma_corr)
attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
- attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
+ if (afb)
+ attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
if (crtc_state->stream) {
if (!dc_stream_set_cursor_attributes(crtc_state->stream,
@@ -9340,7 +9378,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
if (acrtc)
old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
- if (!acrtc->wb_enabled)
+ if (!acrtc || !acrtc->wb_enabled)
continue;
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -9358,7 +9396,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
if (old_crtc_state->active &&
(!new_crtc_state->active ||
drm_atomic_crtc_needs_modeset(new_crtc_state))) {
- manage_dm_interrupts(adev, acrtc, false);
+ manage_dm_interrupts(adev, acrtc, NULL);
dc_stream_release(dm_old_crtc_state->stream);
}
}
@@ -9744,9 +9782,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
- hdcp_update_display(
- adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
- new_con_state->hdcp_content_type, enable_encryption);
+ if (aconnector->dc_link)
+ hdcp_update_display(
+ adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
+ new_con_state->hdcp_content_type, enable_encryption);
}
}
@@ -9873,7 +9912,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_crtc_needs_modeset(new_crtc_state))) {
dc_stream_retain(dm_new_crtc_state->stream);
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
- manage_dm_interrupts(adev, acrtc, true);
+ manage_dm_interrupts(adev, acrtc, dm_new_crtc_state);
}
/* Handle vrr on->off / off->on transitions */
amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
@@ -10571,7 +10610,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,
* TODO: We can likely skip bandwidth validation if the only thing that
* changed about the plane was it'z z-ordering.
*/
- if (new_crtc_state->zpos_changed)
+ if (old_plane_state->normalized_zpos != new_plane_state->normalized_zpos)
return true;
if (drm_atomic_crtc_needs_modeset(new_crtc_state))
@@ -11419,6 +11458,17 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
drm_dbg(dev, "Failed to determine cursor mode\n");
goto fail;
}
+
+ /*
+ * If overlay cursor is needed, DC cannot go through the
+ * native cursor update path. All enabled planes on the CRTC
+ * need to be added for DC to not disable a plane by mistake
+ */
+ if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto fail;
+ }
}
/* Remove exiting planes if they are modified */
@@ -11640,7 +11690,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (dc_resource_is_dsc_encoding_supported(dc)) {
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
if (ret) {
- drm_dbg_atomic(dev, "compute_mst_dsc_configs_for_state() failed\n");
+ drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n");
ret = -EINVAL;
goto fail;
}
@@ -11661,7 +11711,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
*/
ret = drm_dp_mst_atomic_check(state);
if (ret) {
- drm_dbg_atomic(dev, "drm_dp_mst_atomic_check() failed\n");
+ drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n");
goto fail;
}
status = dc_validate_global_state(dc, dm_state->context, true);
@@ -11755,25 +11805,6 @@ fail:
return ret;
}
-static bool is_dp_capable_without_timing_msa(struct dc *dc,
- struct amdgpu_dm_connector *amdgpu_dm_connector)
-{
- u8 dpcd_data;
- bool capable = false;
-
- if (amdgpu_dm_connector->dc_link &&
- dm_helpers_dp_read_dpcd(
- NULL,
- amdgpu_dm_connector->dc_link,
- DP_DOWN_STREAM_PORT_COUNT,
- &dpcd_data,
- sizeof(dpcd_data))) {
- capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
- }
-
- return capable;
-}
-
static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
unsigned int offset,
unsigned int total_length,
@@ -12076,8 +12107,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
sink->sink_signal == SIGNAL_TYPE_EDP)) {
bool edid_check_required = false;
- if (is_dp_capable_without_timing_msa(adev->dm.dc,
- amdgpu_dm_connector)) {
+ if (amdgpu_dm_connector->dc_link &&
+ amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) {
if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
@@ -12159,7 +12190,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
}
}
- as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
+ if (amdgpu_dm_connector->dc_link)
+ as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
@@ -12183,6 +12215,12 @@ update:
if (dm_con_state)
dm_con_state->freesync_capable = freesync_capable;
+ if (connector->state && amdgpu_dm_connector->dc_link && !freesync_capable &&
+ amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported) {
+ amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported = false;
+ amdgpu_dm_connector->dc_link->replay_settings.replay_feature_enabled = false;
+ }
+
if (connector->vrr_capable_property)
drm_connector_set_vrr_capable_property(connector,
freesync_capable);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 99014339aaa3..a2cf2c066a76 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -251,9 +251,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
else if (dm->active_vblank_irq_count)
dm->active_vblank_irq_count--;
- dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
-
- DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
+ if (dm->active_vblank_irq_count > 0) {
+ DRM_DEBUG_KMS("Allow idle optimizations (MALL): false\n");
+ dc_allow_idle_optimizations(dm->dc, false);
+ }
/*
* Control PSR based on vblank requirements from OS
@@ -272,6 +273,11 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->stream->link->replay_settings.replay_feature_enabled);
}
+ if (dm->active_vblank_irq_count == 0) {
+ DRM_DEBUG_KMS("Allow idle optimizations (MALL): true\n");
+ dc_allow_idle_optimizations(dm->dc, true);
+ }
+
mutex_unlock(&dm->dc_lock);
dc_stream_release(vblank_work->stream);
@@ -286,11 +292,14 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
struct amdgpu_display_manager *dm = &adev->dm;
struct vblank_control_work *work;
+ int irq_type;
int rc = 0;
if (acrtc->otg_inst == -1)
goto skip;
+ irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
+
if (enable) {
/* vblank irq on -> Only need vupdate irq in vrr mode */
if (amdgpu_dm_crtc_vrr_active(acrtc_state))
@@ -303,13 +312,52 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
if (rc)
return rc;
- rc = (enable)
- ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
- : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
+ /* crtc vblank or vstartup interrupt */
+ if (enable) {
+ rc = amdgpu_irq_get(adev, &adev->crtc_irq, irq_type);
+ drm_dbg_vbl(crtc->dev, "Get crtc_irq ret=%d\n", rc);
+ } else {
+ rc = amdgpu_irq_put(adev, &adev->crtc_irq, irq_type);
+ drm_dbg_vbl(crtc->dev, "Put crtc_irq ret=%d\n", rc);
+ }
if (rc)
return rc;
+ /*
+ * hubp surface flip interrupt
+ *
+ * We have no guarantee that the frontend index maps to the same
+ * backend index - some even map to more than one.
+ *
+ * TODO: Use a different interrupt or check DC itself for the mapping.
+ */
+ if (enable) {
+ rc = amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type);
+ drm_dbg_vbl(crtc->dev, "Get pageflip_irq ret=%d\n", rc);
+ } else {
+ rc = amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type);
+ drm_dbg_vbl(crtc->dev, "Put pageflip_irq ret=%d\n", rc);
+ }
+
+ if (rc)
+ return rc;
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ /* crtc vline0 interrupt, only available on DCN+ */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) != 0) {
+ if (enable) {
+ rc = amdgpu_irq_get(adev, &adev->vline0_irq, irq_type);
+ drm_dbg_vbl(crtc->dev, "Get vline0_irq ret=%d\n", rc);
+ } else {
+ rc = amdgpu_irq_put(adev, &adev->vline0_irq, irq_type);
+ drm_dbg_vbl(crtc->dev, "Put vline0_irq ret=%d\n", rc);
+ }
+
+ if (rc)
+ return rc;
+ }
+#endif
skip:
if (amdgpu_in_reset(adev))
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 62cb59f00929..db56b0aa5454 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3804,9 +3804,12 @@ static int trigger_hpd_mst_set(void *data, u64 val)
if (aconnector->dc_link->type == dc_connection_mst_branch &&
aconnector->mst_mgr.aux) {
mutex_lock(&adev->dm.dc_lock);
- dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
mutex_unlock(&adev->dm.dc_lock);
+ if (!ret)
+ DRM_ERROR("DM_MST: Failed to detect dc link!");
+
ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
if (ret < 0)
DRM_ERROR("DM_MST: Failed to set the device into MST mode!");
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index b490ae67b6be..50109d13d967 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -759,7 +759,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
uint8_t ret = 0;
drm_dbg_dp(aux->drm_dev,
- "Configure DSC to non-virtual dpcd synaptics\n");
+ "MST_DSC Configure DSC to non-virtual dpcd synaptics\n");
if (enable) {
/* When DSC is enabled on previous boot and reboot with the hub,
@@ -772,7 +772,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
apply_synaptics_fifo_reset_wa(aux);
ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
- DRM_INFO("Send DSC enable to synaptics\n");
+ DRM_INFO("MST_DSC Send DSC enable to synaptics\n");
} else {
/* Synaptics hub not support virtual dpcd,
@@ -781,7 +781,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
*/
if (!stream->link->link_status.link_active) {
ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
- DRM_INFO("Send DSC disable to synaptics\n");
+ DRM_INFO("MST_DSC Send DSC disable to synaptics\n");
}
}
@@ -823,14 +823,14 @@ bool dm_helpers_dp_write_dsc_enable(
DP_DSC_ENABLE,
&enable_passthrough, 1);
drm_dbg_dp(dev,
- "Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",
+ "MST_DSC Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",
ret);
}
ret = drm_dp_dpcd_write(aconnector->dsc_aux,
DP_DSC_ENABLE, &enable_dsc, 1);
drm_dbg_dp(dev,
- "Sent DSC decoding enable to %s port, ret = %u\n",
+ "MST_DSC Sent DSC decoding enable to %s port, ret = %u\n",
(port->passthrough_aux) ? "remote RX" :
"virtual dpcd",
ret);
@@ -838,7 +838,7 @@ bool dm_helpers_dp_write_dsc_enable(
ret = drm_dp_dpcd_write(aconnector->dsc_aux,
DP_DSC_ENABLE, &enable_dsc, 1);
drm_dbg_dp(dev,
- "Sent DSC decoding disable to %s port, ret = %u\n",
+ "MST_DSC Sent DSC decoding disable to %s port, ret = %u\n",
(port->passthrough_aux) ? "remote RX" :
"virtual dpcd",
ret);
@@ -848,7 +848,7 @@ bool dm_helpers_dp_write_dsc_enable(
DP_DSC_ENABLE,
&enable_passthrough, 1);
drm_dbg_dp(dev,
- "Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",
+ "MST_DSC Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",
ret);
}
}
@@ -858,12 +858,12 @@ bool dm_helpers_dp_write_dsc_enable(
if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
drm_dbg_dp(dev,
- "Send DSC %s to SST RX\n",
+ "SST_DSC Send DSC %s to SST RX\n",
enable_dsc ? "enable" : "disable");
} else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
drm_dbg_dp(dev,
- "Send DSC %s to DP-HDMI PCON\n",
+ "SST_DSC Send DSC %s to DP-HDMI PCON\n",
enable_dsc ? "enable" : "disable");
}
}
@@ -1286,3 +1286,15 @@ enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link)
return as_type;
}
+
+bool dm_helpers_is_fullscreen(struct dc_context *ctx, struct dc_stream_state *stream)
+{
+ // TODO
+ return false;
+}
+
+bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream)
+{
+ // TODO
+ return false;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 2e9f6da1acdc..c0c61c03984c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -253,7 +253,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
/* synaptics cascaded MST hub case */
- if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
+ if (is_synaptics_cascaded_panamera(aconnector->dc_link, port))
aconnector->dsc_aux = port->mgr->aux;
if (!aconnector->dsc_aux)
@@ -578,6 +578,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
if (!aconnector)
return NULL;
+ DRM_DEBUG_DRIVER("%s: Create aconnector 0x%p for port 0x%p\n", __func__, aconnector, port);
+
connector = &aconnector->base;
aconnector->mst_output_port = port;
aconnector->mst_root = master;
@@ -872,11 +874,11 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
if (params[i].sink) {
if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
params[i].sink->sink_signal != SIGNAL_TYPE_NONE)
- DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i,
+ DRM_DEBUG_DRIVER("MST_DSC %s i=%d dispname=%s\n", __func__, i,
params[i].sink->edid_caps.display_name);
}
- DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n",
+ DRM_DEBUG_DRIVER("MST_DSC dsc=%d bits_per_pixel=%d pbn=%d\n",
params[i].timing->flags.DSC,
params[i].timing->dsc_cfg.bits_per_pixel,
vars[i + k].pbn);
@@ -1054,6 +1056,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1)
break;
+ DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
@@ -1064,10 +1067,12 @@ static int try_disable_dsc(struct drm_atomic_state *state,
ret = drm_dp_mst_atomic_check(state);
if (ret == 0) {
+ DRM_DEBUG_DRIVER("MST_DSC index #%d, greedily disable dsc\n", next_index);
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ DRM_DEBUG_DRIVER("MST_DSC index #%d, restore minimum compression\n", next_index);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1082,6 +1087,15 @@ static int try_disable_dsc(struct drm_atomic_state *state,
return 0;
}
+static void log_dsc_params(int count, struct dsc_mst_fairness_vars *vars, int k)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ DRM_DEBUG_DRIVER("MST_DSC DSC params: stream #%d --- dsc_enabled = %d, bpp_x16 = %d, pbn = %d\n",
+ i, vars[i + k].dsc_enabled, vars[i + k].bpp_x16, vars[i + k].pbn);
+}
+
static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dc_link *dc_link,
@@ -1104,6 +1118,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
return PTR_ERR(mst_state);
/* Set up params */
+ DRM_DEBUG_DRIVER("%s: MST_DSC Set up params for %d streams\n", __func__, dc_state->stream_count);
for (i = 0; i < dc_state->stream_count; i++) {
struct dc_dsc_policy dsc_policy = {0};
@@ -1145,6 +1160,9 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
dc_link_get_highest_encoding_format(dc_link));
+ DRM_DEBUG_DRIVER("MST_DSC #%d stream 0x%p - max_kbps = %u, min_kbps = %u, uncompressed_kbps = %u\n",
+ count, stream, params[count].bw_range.max_kbps, params[count].bw_range.min_kbps,
+ params[count].bw_range.stream_kbps);
count++;
}
@@ -1159,6 +1177,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
*link_vars_start_index += count;
/* Try no compression */
+ DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
@@ -1177,7 +1196,10 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
return ret;
}
+ log_dsc_params(count, vars, k);
+
/* Try max compression */
+ DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
@@ -1201,14 +1223,26 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret != 0)
return ret;
+ log_dsc_params(count, vars, k);
+
/* Optimize degree of compression */
+ DRM_DEBUG_DRIVER("MST_DSC Try optimize compression\n");
ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k);
- if (ret < 0)
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("MST_DSC Failed to optimize compression\n");
return ret;
+ }
+ log_dsc_params(count, vars, k);
+
+ DRM_DEBUG_DRIVER("MST_DSC Try disable compression\n");
ret = try_disable_dsc(state, dc_link, params, vars, count, k);
- if (ret < 0)
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("MST_DSC Failed to disable compression\n");
return ret;
+ }
+
+ log_dsc_params(count, vars, k);
set_dsc_configs_from_fairness_vars(params, vars, count, k);
@@ -1230,17 +1264,19 @@ static bool is_dsc_need_re_compute(
/* only check phy used by dsc mst branch */
if (dc_link->type != dc_connection_mst_branch)
- return false;
+ goto out;
/* add a check for older MST DSC with no virtual DPCDs */
if (needs_dsc_aux_workaround(dc_link) &&
(!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)))
- return false;
+ goto out;
for (i = 0; i < MAX_PIPES; i++)
stream_on_link[i] = NULL;
+ DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in new dc_state\n", __func__, dc_state->stream_count);
+
/* check if there is mode change in new request */
for (i = 0; i < dc_state->stream_count; i++) {
struct drm_crtc_state *new_crtc_state;
@@ -1250,6 +1286,8 @@ static bool is_dsc_need_re_compute(
if (!stream)
continue;
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC checking #%d stream 0x%p\n", __func__, __LINE__, i, stream);
+
/* check if stream using the same link for mst */
if (stream->link != dc_link)
continue;
@@ -1262,8 +1300,11 @@ static bool is_dsc_need_re_compute(
new_stream_on_link_num++;
new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
- if (!new_conn_state)
+ if (!new_conn_state) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_conn_state for stream 0x%p, aconnector 0x%p\n",
+ __func__, __LINE__, stream, aconnector);
continue;
+ }
if (IS_ERR(new_conn_state))
continue;
@@ -1272,21 +1313,36 @@ static bool is_dsc_need_re_compute(
continue;
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
- if (!new_crtc_state)
+ if (!new_crtc_state) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_crtc_state for crtc of stream 0x%p, aconnector 0x%p\n",
+ __func__, __LINE__, stream, aconnector);
continue;
+ }
if (IS_ERR(new_crtc_state))
continue;
if (new_crtc_state->enable && new_crtc_state->active) {
if (new_crtc_state->mode_changed || new_crtc_state->active_changed ||
- new_crtc_state->connectors_changed)
- return true;
+ new_crtc_state->connectors_changed) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required."
+ "stream 0x%p in new dc_state\n",
+ __func__, __LINE__, stream);
+ is_dsc_need_re_compute = true;
+ goto out;
+ }
}
}
- if (new_stream_on_link_num == 0)
- return false;
+ if (new_stream_on_link_num == 0) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC no mode change request for streams in new dc_state\n",
+ __func__, __LINE__);
+ is_dsc_need_re_compute = false;
+ goto out;
+ }
+
+ DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in current dc_state\n",
+ __func__, dc->current_state->stream_count);
/* check current_state if there stream on link but it is not in
* new request state
@@ -1310,11 +1366,18 @@ static bool is_dsc_need_re_compute(
if (j == new_stream_on_link_num) {
/* not in new state */
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required."
+ "stream 0x%p in current dc_state but not in new dc_state\n",
+ __func__, __LINE__, stream);
is_dsc_need_re_compute = true;
break;
}
}
+out:
+ DRM_DEBUG_DRIVER("%s: MST_DSC dsc recompute %s\n",
+ __func__, is_dsc_need_re_compute ? "required" : "not required");
+
return is_dsc_need_re_compute;
}
@@ -1343,6 +1406,9 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ DRM_DEBUG_DRIVER("%s: MST_DSC compute mst dsc configs for stream 0x%p, aconnector 0x%p\n",
+ __func__, stream, aconnector);
+
if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
continue;
@@ -1375,8 +1441,11 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1)
- if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
+ if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC Failed to request dsc hw resource for stream 0x%p\n",
+ __func__, __LINE__, stream);
return -EINVAL;
+ }
}
return ret;
@@ -1405,6 +1474,9 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ DRM_DEBUG_DRIVER("MST_DSC pre compute mst dsc configs for #%d stream 0x%p, aconnector 0x%p\n",
+ i, stream, aconnector);
+
if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
continue;
@@ -1494,12 +1566,12 @@ int pre_validate_dsc(struct drm_atomic_state *state,
int ret = 0;
if (!is_dsc_precompute_needed(state)) {
- DRM_INFO_ONCE("DSC precompute is not needed.\n");
+ DRM_INFO_ONCE("%s:%d MST_DSC dsc precompute is not needed\n", __func__, __LINE__);
return 0;
}
ret = dm_atomic_get_state(state, dm_state_ptr);
if (ret != 0) {
- DRM_INFO_ONCE("dm_atomic_get_state() failed\n");
+ DRM_INFO_ONCE("%s:%d MST_DSC dm_atomic_get_state() failed\n", __func__, __LINE__);
return ret;
}
dm_state = *dm_state_ptr;
@@ -1553,7 +1625,8 @@ int pre_validate_dsc(struct drm_atomic_state *state,
ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
if (ret != 0) {
- DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
+ DRM_INFO_ONCE("%s:%d MST_DSC dsc pre_compute_mst_dsc_configs_for_state() failed\n",
+ __func__, __LINE__);
ret = -EINVAL;
goto clean_exit;
}
@@ -1567,12 +1640,15 @@ int pre_validate_dsc(struct drm_atomic_state *state,
if (local_dc_state->streams[i] &&
dc_is_timing_changed(stream, local_dc_state->streams[i])) {
- DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i);
+ DRM_INFO_ONCE("%s:%d MST_DSC crtc[%d] needs mode_change\n", __func__, __LINE__, i);
} else {
int ind = find_crtc_index_in_state_by_stream(state, stream);
- if (ind >= 0)
+ if (ind >= 0) {
+ DRM_INFO_ONCE("%s:%d MST_DSC no mode changed for stream 0x%p\n",
+ __func__, __LINE__, stream);
state->crtcs[ind].new_state->mode_changed = 0;
+ }
}
}
clean_exit:
@@ -1697,7 +1773,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
if (stream_kbps <= end_to_end_bw_in_kbps) {
- DRM_DEBUG_DRIVER("No DSC needed. End-to-end bw sufficient.");
+ DRM_DEBUG_DRIVER("MST_DSC no dsc required. End-to-end bw sufficient\n");
return DC_OK;
}
@@ -1710,7 +1786,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
/*capable of dsc passthough. dsc bitstream along the entire path*/
if (aconnector->mst_output_port->passthrough_aux) {
if (bw_range.min_kbps > end_to_end_bw_in_kbps) {
- DRM_DEBUG_DRIVER("DSC passthrough. Max dsc compression can't fit into end-to-end bw\n");
+ DRM_DEBUG_DRIVER("MST_DSC dsc passthrough and decode at endpoint"
+ "Max dsc compression bw can't fit into end-to-end bw\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
} else {
@@ -1721,7 +1798,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
/*Get last DP link BW capability*/
if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
if (stream_kbps > end_link_bw) {
- DRM_DEBUG_DRIVER("DSC decode at last link. Mode required bw can't fit into available bw\n");
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
@@ -1734,7 +1812,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
- DRM_DEBUG_DRIVER("DSC decode at last link. Max dsc compression can't fit into MST available bw\n");
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Max dsc compression can't fit into MST available bw\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
@@ -1751,9 +1830,9 @@ enum dc_status dm_dp_mst_is_port_support_mode(
dc_link_get_highest_encoding_format(stream->link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("Require dsc and dsc config found\n");
+ DRM_DEBUG_DRIVER("MST_DSC require dsc and dsc config found\n");
} else {
- DRM_DEBUG_DRIVER("Require dsc but can't find appropriate dsc config\n");
+ DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find appropriate dsc config\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
@@ -1775,11 +1854,11 @@ enum dc_status dm_dp_mst_is_port_support_mode(
if (branch_max_throughput_mps != 0 &&
((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) {
- DRM_DEBUG_DRIVER("DSC is required but max throughput mps fails");
+ DRM_DEBUG_DRIVER("MST_DSC require dsc but max throughput mps fails\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
} else {
- DRM_DEBUG_DRIVER("DSC is required but can't find common dsc config.");
+ DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find common dsc config\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 5cb11cc2d063..25f63b2e7a8e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -1283,6 +1283,7 @@ int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc
struct dc_cursor_position *position)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
int x, y;
int xorigin = 0, yorigin = 0;
@@ -1314,12 +1315,14 @@ int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc
y = 0;
}
position->enable = true;
- position->translate_by_source = true;
position->x = x;
position->y = y;
position->x_hotspot = xorigin;
position->y_hotspot = yorigin;
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(4, 0, 1))
+ position->translate_by_source = true;
+
return 0;
}
@@ -1377,7 +1380,8 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
adev->dm.dc->caps.color.dpp.gamma_corr)
attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
- attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
+ if (afb)
+ attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
if (crtc_state->stream) {
mutex_lock(&adev->dm.dc_lock);
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 80069651def3..8992e697759f 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -35,7 +35,6 @@ DC_LIBS += dcn201
DC_LIBS += dcn30
DC_LIBS += dcn301
DC_LIBS += dcn31
-DC_LIBS += dcn314
DC_LIBS += dml
DC_LIBS += dml2
endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 506f82cd5cc6..88d3f9d7dd55 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -486,3 +486,30 @@ int dc_fixpt_s4d19(struct fixed31_32 arg)
else
return ux_dy(arg.value, 4, 19);
}
+
+struct fixed31_32 dc_fixpt_from_ux_dy(unsigned int value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
+{
+ struct fixed31_32 fixpt_value = dc_fixpt_zero;
+ struct fixed31_32 fixpt_int_value = dc_fixpt_zero;
+ long long frac_mask = ((long long)1 << (long long)integer_bits) - 1;
+
+ fixpt_value.value = (long long)value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ frac_mask = frac_mask << fractional_bits;
+ fixpt_int_value.value = value & frac_mask;
+ fixpt_int_value.value <<= (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ fixpt_value.value |= fixpt_int_value.value;
+ return fixpt_value;
+}
+
+struct fixed31_32 dc_fixpt_from_int_dy(unsigned int int_value,
+ unsigned int frac_value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
+{
+ struct fixed31_32 fixpt_value = dc_fixpt_from_int(int_value);
+
+ fixpt_value.value |= (long long)frac_value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ return fixpt_value;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 4254bdfefe38..7d18f372ce7a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -227,7 +227,7 @@ static void init_transmitter_control(struct bios_parser *bp)
uint8_t frev;
uint8_t crev = 0;
- if (!BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev))
+ if (!BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) && (bp->base.ctx->dc->ctx->dce_version <= DCN_VERSION_2_0))
BREAK_TO_DEBUGGER();
switch (crev) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index 78df96882d6e..f8409453434c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -195,7 +195,7 @@ void dce11_pplib_apply_display_requirements(
* , then change minimum memory clock based on real-time bandwidth
* limitation.
*/
- if ((dc->ctx->asic_id.chip_family == FAMILY_AI) &&
+ if (dc->bw_vbios && (dc->ctx->asic_id.chip_family == FAMILY_AI) &&
ASICREV_IS_VEGA20_P(dc->ctx->asic_id.hw_internal_rev) && (context->stream_count >= 2)) {
pp_display_cfg->min_memory_clock_khz = max(pp_display_cfg->min_memory_clock_khz,
(uint32_t) div64_s64(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 70ee0089a20d..97164b5585a8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -120,25 +120,40 @@ static int dcn35_get_active_display_cnt_wa(
return display_count;
}
-
static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
+ if (dc->ctx->dce_environment == DCE_ENV_DIAG)
+ return;
+
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *pipe = safe_to_lower
? &context->res_ctx.pipe_ctx[i]
: &dc->current_state->res_ctx.pipe_ctx[i];
-
+ bool stream_changed_otg_dig_on = false;
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
+ stream_changed_otg_dig_on = old_pipe->stream && new_pipe->stream &&
+ old_pipe->stream != new_pipe->stream &&
+ old_pipe->stream_res.tg == new_pipe->stream_res.tg &&
+ new_pipe->stream->link_enc && !new_pipe->stream->dpms_off &&
+ new_pipe->stream->link_enc->funcs->is_dig_enabled &&
+ new_pipe->stream->link_enc->funcs->is_dig_enabled(
+ new_pipe->stream->link_enc) &&
+ new_pipe->stream_res.stream_enc &&
+ new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled &&
+ new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc);
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
- !pipe->stream->link_enc)) {
+ !pipe->stream->link_enc) && !stream_changed_otg_dig_on) {
+ /* This w/a should not trigger when we have a dig active */
if (disable) {
- if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->disable_crtc)
- pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
} else {
@@ -367,6 +382,9 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ if (dc->debug.min_disp_clk_khz > 0 && new_clocks->dispclk_khz < dc->debug.min_disp_clk_khz)
+ new_clocks->dispclk_khz = dc->debug.min_disp_clk_khz;
+
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
@@ -1082,7 +1100,7 @@ void dcn35_clk_mgr_construct(
clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
- DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ DC_MEM_ALLOC_TYPE_GART,
sizeof(struct dcn35_watermarks),
&clk_mgr->smu_wm_set.mc_address.quad_part);
@@ -1094,7 +1112,7 @@ void dcn35_clk_mgr_construct(
smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn35 *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
- DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ DC_MEM_ALLOC_TYPE_GART,
sizeof(DpmClocks_t_dcn35),
&smu_dpm_clks.mc_address.quad_part);
@@ -1191,7 +1209,7 @@ void dcn35_clk_mgr_construct(
}
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
- dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART,
smu_dpm_clks.dpm_clks);
if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 45fe17a46890..8cfc5f435937 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -14,6 +14,7 @@
#include "core_types.h"
#include "dm_helpers.h"
#include "link.h"
+#include "dc_state_priv.h"
#include "atomfirmware.h"
#include "dcn401_smu14_driver_if.h"
@@ -29,6 +30,7 @@
#define mmCLK01_CLK0_CLK2_DFS_CNTL 0x16E6F
#define mmCLK01_CLK0_CLK3_DFS_CNTL 0x16E72
#define mmCLK01_CLK0_CLK4_DFS_CNTL 0x16E75
+#define mmCLK20_CLK2_CLK2_DFS_CNTL 0x1B051
#define CLK0_CLK_PLL_REQ__FbMult_int_MASK 0x000001ffUL
#define CLK0_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000f000UL
@@ -302,6 +304,197 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_build_wm_range_table(clk_mgr_base);
}
+static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+ struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ uint32_t dprefclk_did = 0;
+ uint32_t dcfclk_did = 0;
+ uint32_t dtbclk_did = 0;
+ uint32_t dispclk_did = 0;
+ uint32_t dppclk_did = 0;
+ uint32_t fclk_did = 0;
+ uint32_t target_div = 0;
+
+ /* DFS Slice 0 is used for DISPCLK */
+ dispclk_did = REG_READ(CLK0_CLK0_DFS_CNTL);
+ /* DFS Slice 1 is used for DPPCLK */
+ dppclk_did = REG_READ(CLK0_CLK1_DFS_CNTL);
+ /* DFS Slice 2 is used for DPREFCLK */
+ dprefclk_did = REG_READ(CLK0_CLK2_DFS_CNTL);
+ /* DFS Slice 3 is used for DCFCLK */
+ dcfclk_did = REG_READ(CLK0_CLK3_DFS_CNTL);
+ /* DFS Slice 4 is used for DTBCLK */
+ dtbclk_did = REG_READ(CLK0_CLK4_DFS_CNTL);
+ /* DFS Slice _ is used for FCLK */
+ fclk_did = REG_READ(CLK2_CLK2_DFS_CNTL);
+
+ /* Convert DISPCLK DFS Slice DID to divider*/
+ target_div = dentist_get_divider_from_did(dispclk_did);
+ //Get dispclk in khz
+ regs_and_bypass->dispclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+
+ /* Convert DISPCLK DFS Slice DID to divider*/
+ target_div = dentist_get_divider_from_did(dppclk_did);
+ //Get dppclk in khz
+ regs_and_bypass->dppclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+
+ /* Convert DPREFCLK DFS Slice DID to divider*/
+ target_div = dentist_get_divider_from_did(dprefclk_did);
+ //Get dprefclk in khz
+ regs_and_bypass->dprefclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+
+ /* Convert DCFCLK DFS Slice DID to divider*/
+ target_div = dentist_get_divider_from_did(dcfclk_did);
+ //Get dcfclk in khz
+ regs_and_bypass->dcfclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+
+ /* Convert DTBCLK DFS Slice DID to divider*/
+ target_div = dentist_get_divider_from_did(dtbclk_did);
+ //Get dtbclk in khz
+ regs_and_bypass->dtbclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+
+ /* Convert DTBCLK DFS Slice DID to divider*/
+ target_div = dentist_get_divider_from_did(fclk_did);
+ //Get fclk in khz
+ regs_and_bypass->fclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+}
+
+static bool dcn401_check_native_scaling(struct pipe_ctx *pipe)
+{
+ bool is_native_scaling = false;
+ int width = pipe->plane_state->src_rect.width;
+ int height = pipe->plane_state->src_rect.height;
+
+ if (pipe->stream->timing.h_addressable == width &&
+ pipe->stream->timing.v_addressable == height &&
+ pipe->plane_state->dst_rect.width == width &&
+ pipe->plane_state->dst_rect.height == height)
+ is_native_scaling = true;
+
+ return is_native_scaling;
+}
+
+static void dcn401_auto_dpm_test_log(
+ struct dc_clocks *new_clocks,
+ struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context)
+{
+ unsigned int mall_ss_size_bytes;
+ int dramclk_khz_override, fclk_khz_override, num_fclk_levels;
+
+ struct pipe_ctx *pipe_ctx_list[MAX_PIPES];
+ int active_pipe_count = 0;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
+ pipe_ctx_list[active_pipe_count] = pipe_ctx;
+ active_pipe_count++;
+ }
+ }
+
+ msleep(5);
+
+ mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes;
+
+ struct clk_log_info log_info = {0};
+ struct clk_state_registers_and_bypass clk_register_dump;
+
+ dcn401_dump_clk_registers(&clk_register_dump, &clk_mgr->base, &log_info);
+
+ // Overrides for these clocks in case there is no p_state change support
+ dramclk_khz_override = new_clocks->dramclk_khz;
+ fclk_khz_override = new_clocks->fclk_khz;
+
+ num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1;
+
+ if (!new_clocks->p_state_change_support)
+ dramclk_khz_override = clk_mgr->base.bw_params->max_memclk_mhz * 1000;
+
+ if (!new_clocks->fclk_p_state_change_support)
+ fclk_khz_override = clk_mgr->base.bw_params->clk_table.entries[num_fclk_levels].fclk_mhz * 1000;
+
+
+ ////////////////////////////////////////////////////////////////////////////
+ // IMPORTANT: When adding more clocks to these logs, do NOT put a newline
+ // anywhere other than at the very end of the string.
+ //
+ // Formatting example (make sure to have " - " between each entry):
+ //
+ // AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n"
+ ////////////////////////////////////////////////////////////////////////////
+ if (active_pipe_count > 0 &&
+ new_clocks->dramclk_khz > 0 &&
+ new_clocks->fclk_khz > 0 &&
+ new_clocks->dcfclk_khz > 0 &&
+ new_clocks->dppclk_khz > 0) {
+
+ uint32_t pix_clk_list[MAX_PIPES] = {0};
+ int p_state_list[MAX_PIPES] = {0};
+ int disp_src_width_list[MAX_PIPES] = {0};
+ int disp_src_height_list[MAX_PIPES] = {0};
+ uint64_t disp_src_refresh_list[MAX_PIPES] = {0};
+ bool is_scaled_list[MAX_PIPES] = {0};
+
+ for (int i = 0; i < active_pipe_count; i++) {
+ struct pipe_ctx *curr_pipe_ctx = pipe_ctx_list[i];
+ uint64_t refresh_rate;
+
+ pix_clk_list[i] = curr_pipe_ctx->stream->timing.pix_clk_100hz;
+ p_state_list[i] = curr_pipe_ctx->p_state_type;
+
+ refresh_rate = (curr_pipe_ctx->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ curr_pipe_ctx->stream->timing.v_total
+ * (uint64_t) curr_pipe_ctx->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.h_total);
+ disp_src_refresh_list[i] = refresh_rate;
+
+ if (curr_pipe_ctx->plane_state) {
+ is_scaled_list[i] = !(dcn401_check_native_scaling(curr_pipe_ctx));
+ disp_src_width_list[i] = curr_pipe_ctx->plane_state->src_rect.width;
+ disp_src_height_list[i] = curr_pipe_ctx->plane_state->src_rect.height;
+ }
+ }
+
+ DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - "
+ "dcfclk:%d - dppclk:%d - dispclk_hw:%d - "
+ "dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - "
+ "dtbclk_hw:%d - fclk_hw:%d - pix_clk_0:%d - pix_clk_1:%d - "
+ "pix_clk_2:%d - pix_clk_3:%d - mall_ss_size:%d - p_state_type_0:%d - "
+ "p_state_type_1:%d - p_state_type_2:%d - p_state_type_3:%d - "
+ "pix_width_0:%d - pix_height_0:%d - refresh_rate_0:%lld - is_scaled_0:%d - "
+ "pix_width_1:%d - pix_height_1:%d - refresh_rate_1:%lld - is_scaled_1:%d - "
+ "pix_width_2:%d - pix_height_2:%d - refresh_rate_2:%lld - is_scaled_2:%d - "
+ "pix_width_3:%d - pix_height_3:%d - refresh_rate_3:%lld - is_scaled_3:%d - LOG_END\n",
+ dramclk_khz_override,
+ fclk_khz_override,
+ new_clocks->dcfclk_khz,
+ new_clocks->dppclk_khz,
+ clk_register_dump.dispclk,
+ clk_register_dump.dppclk,
+ clk_register_dump.dprefclk,
+ clk_register_dump.dcfclk,
+ clk_register_dump.dtbclk,
+ clk_register_dump.fclk,
+ pix_clk_list[0], pix_clk_list[1], pix_clk_list[3], pix_clk_list[2],
+ mall_ss_size_bytes,
+ p_state_list[0], p_state_list[1], p_state_list[2], p_state_list[3],
+ disp_src_width_list[0], disp_src_height_list[0], disp_src_refresh_list[0], is_scaled_list[0],
+ disp_src_width_list[1], disp_src_height_list[1], disp_src_refresh_list[1], is_scaled_list[1],
+ disp_src_width_list[2], disp_src_height_list[2], disp_src_refresh_list[2], is_scaled_list[2],
+ disp_src_width_list[3], disp_src_height_list[3], disp_src_refresh_list[3], is_scaled_list[3]);
+ }
+}
+
static void dcn401_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
struct dc_state *context,
int ref_dtbclk_khz)
@@ -324,10 +517,12 @@ static void dcn401_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr
if (!use_hpo_encoder)
continue;
- otg_master->clock_source->funcs->program_pix_clk(
+ if (otg_master->stream_res.pix_clk_params.controller_id > CONTROLLER_ID_UNDEFINED)
+ otg_master->clock_source->funcs->program_pix_clk(
otg_master->clock_source,
&otg_master->stream_res.pix_clk_params,
- dccg->ctx->dc->link_srv->dp_get_encoding_format(&otg_master->link_config.dp_link_settings),
+ dccg->ctx->dc->link_srv->dp_get_encoding_format(
+ &otg_master->link_config.dp_link_settings),
&otg_master->pll_settings);
}
}
@@ -738,12 +933,12 @@ static void dcn401_execute_block_sequence(struct clk_mgr *clk_mgr_base, unsigned
static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
struct clk_mgr *clk_mgr_base,
struct dc_state *context,
+ struct dc_clocks *new_clocks,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dcn401_clk_mgr *clk_mgr401 = TO_DCN401_CLK_MGR(clk_mgr_internal);
struct dc *dc = clk_mgr_base->ctx->dc;
- struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dcn401_clk_mgr_block_sequence *block_sequence = clk_mgr401->block_sequence;
bool enter_display_off = false;
bool update_active_fclk = false;
@@ -1025,13 +1220,13 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
static unsigned int dcn401_build_update_display_clocks_sequence(
struct clk_mgr *clk_mgr_base,
struct dc_state *context,
+ struct dc_clocks *new_clocks,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dcn401_clk_mgr *clk_mgr401 = TO_DCN401_CLK_MGR(clk_mgr_internal);
struct dc *dc = clk_mgr_base->ctx->dc;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dcn401_clk_mgr_block_sequence *block_sequence = clk_mgr401->block_sequence;
bool force_reset = false;
bool update_dispclk = false;
@@ -1171,9 +1366,6 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
unsigned int num_steps = 0;
- if (dc->work_arounds.skip_clock_update)
- return;
-
if (dc->debug.enable_legacy_clock_update) {
dcn401_update_clocks_legacy(clk_mgr_base, context, safe_to_lower);
return;
@@ -1182,6 +1374,7 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
/* build bandwidth related clocks update sequence */
num_steps = dcn401_build_update_bandwidth_clocks_sequence(clk_mgr_base,
context,
+ &context->bw_ctx.bw.dcn.clk,
safe_to_lower);
/* execute sequence */
@@ -1190,10 +1383,15 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
/* build display related clocks update sequence */
num_steps = dcn401_build_update_display_clocks_sequence(clk_mgr_base,
context,
+ &context->bw_ctx.bw.dcn.clk,
safe_to_lower);
/* execute sequence */
dcn401_execute_block_sequence(clk_mgr_base, num_steps);
+
+ if (dc->config.enable_auto_dpm_test_logs)
+ dcn401_auto_dpm_test_log(&context->bw_ctx.bw.dcn.clk, TO_CLK_MGR_INTERNAL(clk_mgr_base), context);
+
}
@@ -1218,59 +1416,6 @@ static uint32_t dcn401_get_vco_frequency_from_reg(struct clk_mgr_internal *clk_m
return dc_fixpt_floor(pll_req);
}
-static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
- struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- uint32_t dprefclk_did = 0;
- uint32_t dcfclk_did = 0;
- uint32_t dtbclk_did = 0;
- uint32_t dispclk_did = 0;
- uint32_t dppclk_did = 0;
- uint32_t target_div = 0;
-
- /* DFS Slice 0 is used for DISPCLK */
- dispclk_did = REG_READ(CLK0_CLK0_DFS_CNTL);
- /* DFS Slice 1 is used for DPPCLK */
- dppclk_did = REG_READ(CLK0_CLK1_DFS_CNTL);
- /* DFS Slice 2 is used for DPREFCLK */
- dprefclk_did = REG_READ(CLK0_CLK2_DFS_CNTL);
- /* DFS Slice 3 is used for DCFCLK */
- dcfclk_did = REG_READ(CLK0_CLK3_DFS_CNTL);
- /* DFS Slice 4 is used for DTBCLK */
- dtbclk_did = REG_READ(CLK0_CLK4_DFS_CNTL);
-
- /* Convert DISPCLK DFS Slice DID to divider*/
- target_div = dentist_get_divider_from_did(dispclk_did);
- //Get dispclk in khz
- regs_and_bypass->dispclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
-
- /* Convert DISPCLK DFS Slice DID to divider*/
- target_div = dentist_get_divider_from_did(dppclk_did);
- //Get dppclk in khz
- regs_and_bypass->dppclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
-
- /* Convert DPREFCLK DFS Slice DID to divider*/
- target_div = dentist_get_divider_from_did(dprefclk_did);
- //Get dprefclk in khz
- regs_and_bypass->dprefclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
-
- /* Convert DCFCLK DFS Slice DID to divider*/
- target_div = dentist_get_divider_from_did(dcfclk_did);
- //Get dcfclk in khz
- regs_and_bypass->dcfclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
-
- /* Convert DTBCLK DFS Slice DID to divider*/
- target_div = dentist_get_divider_from_did(dtbclk_did);
- //Get dtbclk in khz
- regs_and_bypass->dtbclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
-}
-
static void dcn401_clock_read_ss_info(struct clk_mgr_internal *clk_mgr)
{
struct dc_bios *bp = clk_mgr->base.ctx->dc_bios;
@@ -1330,33 +1475,34 @@ static void dcn401_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
static void dcn401_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current_mode)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ const struct dc *dc = clk_mgr->base.ctx->dc;
+ struct dc_state *context = dc->current_state;
+ struct dc_clocks new_clocks;
+ int num_steps;
if (!clk_mgr->smu_present || !dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
return;
+ /* build clock update */
+ memcpy(&new_clocks, &clk_mgr_base->clks, sizeof(struct dc_clocks));
+
if (current_mode) {
- if (clk_mgr_base->clks.p_state_change_support)
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
- else
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->max_memclk_mhz);
+ new_clocks.dramclk_khz = context->bw_ctx.bw.dcn.clk.dramclk_khz;
+ new_clocks.idle_dramclk_khz = context->bw_ctx.bw.dcn.clk.idle_dramclk_khz;
+ new_clocks.p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
} else {
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
+ new_clocks.dramclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz * 1000;
+ new_clocks.idle_dramclk_khz = new_clocks.dramclk_khz;
+ new_clocks.p_state_change_support = true;
}
-}
-
-/* Set max memclk to highest DPM value */
-static void dcn401_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- if (!clk_mgr->smu_present || !dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- return;
+ num_steps = dcn401_build_update_bandwidth_clocks_sequence(clk_mgr_base,
+ context,
+ &new_clocks,
+ true);
- dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->max_memclk_mhz);
+ /* execute sequence */
+ dcn401_execute_block_sequence(clk_mgr_base, num_steps);
}
/* Get current memclk states, update bounding box */
@@ -1487,7 +1633,6 @@ static struct clk_mgr_funcs dcn401_funcs = {
.init_clocks = dcn401_init_clocks,
.notify_wm_ranges = dcn401_notify_wm_ranges,
.set_hard_min_memclk = dcn401_set_hard_min_memclk,
- .set_hard_max_memclk = dcn401_set_hard_max_memclk,
.get_memclk_states_from_smu = dcn401_get_memclk_states_from_smu,
.are_clock_states_equal = dcn401_are_clock_states_equal,
.enable_pme_wa = dcn401_enable_pme_wa,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 85a2ef82afa5..ae788154896c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1254,7 +1254,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
if (pipe->stream && pipe->plane_state) {
- set_p_state_switch_method(dc, context, pipe);
+ if (!dc->debug.using_dml2)
+ set_p_state_switch_method(dc, context, pipe);
dc_update_visual_confirm_color(dc, context, pipe);
}
@@ -1351,80 +1352,6 @@ static void disable_vbios_mode_if_required(
}
}
-/**
- * wait_for_blank_complete - wait for all active OPPs to finish pending blank
- * pattern updates
- *
- * @dc: [in] dc reference
- * @context: [in] hardware context in use
- */
-static void wait_for_blank_complete(struct dc *dc,
- struct dc_state *context)
-{
- struct pipe_ctx *opp_head;
- struct dce_hwseq *hws = dc->hwseq;
- int i;
-
- if (!hws->funcs.wait_for_blank_complete)
- return;
-
- for (i = 0; i < MAX_PIPES; i++) {
- opp_head = &context->res_ctx.pipe_ctx[i];
-
- if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
- dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
- continue;
-
- hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
- }
-}
-
-static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
-{
- struct pipe_ctx *otg_master;
- struct timing_generator *tg;
- int i;
-
- for (i = 0; i < MAX_PIPES; i++) {
- otg_master = &context->res_ctx.pipe_ctx[i];
- if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
- dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
- continue;
- tg = otg_master->stream_res.tg;
- if (tg->funcs->wait_odm_doublebuffer_pending_clear)
- tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
- }
-
- /* ODM update may require to reprogram blank pattern for each OPP */
- wait_for_blank_complete(dc, context);
-}
-
-static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
-{
- int i;
- PERF_TRACE();
- for (i = 0; i < MAX_PIPES; i++) {
- int count = 0;
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
- continue;
-
- /* Timeout 100 ms */
- while (count < 100000) {
- /* Must set to false to start with, due to OR in update function */
- pipe->plane_state->status.is_flip_pending = false;
- dc->hwss.update_pending_status(pipe);
- if (!pipe->plane_state->status.is_flip_pending)
- break;
- udelay(1);
- count++;
- }
- ASSERT(!pipe->plane_state->status.is_flip_pending);
- }
- PERF_TRACE();
-}
-
/* Public functions */
struct dc *dc_create(const struct dc_init_data *init_params)
@@ -1822,10 +1749,18 @@ bool dc_validate_boot_timing(const struct dc *dc,
tg->funcs->get_optc_source(tg,
&numOdmPipes, &id_src[0], &id_src[1]);
- if (numOdmPipes == 2)
+ if (numOdmPipes == 2) {
pix_clk_100hz *= 2;
- if (numOdmPipes == 4)
+ } else if (numOdmPipes == 4) {
pix_clk_100hz *= 4;
+ } else if (se && se->funcs->get_pixels_per_cycle) {
+ uint32_t pixels_per_cycle = se->funcs->get_pixels_per_cycle(se);
+
+ if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ return false;
+
+ pix_clk_100hz *= pixels_per_cycle;
+ }
// Note: In rare cases, HW pixclk may differ from crtc's pixclk
// slightly due to rounding issues in 10 kHz units.
@@ -2100,12 +2035,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (context->stream_count > get_seamless_boot_stream_count(context) ||
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
- wait_for_no_pipes_pending(dc, context);
+ hwss_wait_for_no_pipes_pending(dc, context);
/*
* optimized dispclk depends on ODM setup. Need to wait for ODM
* update pending complete before optimizing bandwidth.
*/
- wait_for_odm_update_pending_complete(dc, context);
+ hwss_wait_for_odm_update_pending_complete(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
/* Need to do otg sync again as otg could be out of sync due to otg
@@ -2716,6 +2651,10 @@ static enum surface_update_type check_update_surfaces_for_stream(
overall_type = UPDATE_TYPE_FULL;
}
+ if (stream_update && stream_update->hw_cursor_req) {
+ overall_type = UPDATE_TYPE_FULL;
+ }
+
/* some stream updates require passive update */
if (stream_update) {
union stream_update_flags *su_flags = &stream_update->stream->update_flags;
@@ -2751,6 +2690,9 @@ static enum surface_update_type check_update_surfaces_for_stream(
stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
su_flags->bits.fams_changed = 1;
+ if (stream_update->scaler_sharpener_update)
+ su_flags->bits.scaler_sharpener = 1;
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@@ -3011,6 +2953,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_infopacket)
stream->vrr_infopacket = *update->vrr_infopacket;
+ if (update->hw_cursor_req)
+ stream->hw_cursor_req = *update->hw_cursor_req;
+
if (update->allow_freesync)
stream->allow_freesync = *update->allow_freesync;
@@ -3080,6 +3025,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
update->dsc_config = NULL;
}
}
+ if (update->scaler_sharpener_update)
+ stream->scaler_sharpener_update = *update->scaler_sharpener_update;
}
static void backup_planes_and_stream_state(
@@ -3704,7 +3651,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->plane_state) {
- set_p_state_switch_method(dc, context, pipe);
+ if (!dc->debug.using_dml2)
+ set_p_state_switch_method(dc, context, pipe);
if (dc->debug.visual_confirm)
dc_update_visual_confirm_color(dc, context, pipe);
@@ -3739,7 +3687,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
surface_count,
stream,
context);
- } else {
+ } else if (stream_status) {
build_dmub_cmd_list(dc,
srf_updates,
surface_count,
@@ -3769,47 +3717,6 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
-static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
-{
-/*
- * This function calls HWSS to wait for any potentially double buffered
- * operations to complete. It should be invoked as a pre-amble prior
- * to full update programming before asserting any HW locks.
- */
- int pipe_idx;
- int opp_inst;
- int opp_count = dc->res_pool->res_cap->num_opp;
- struct hubp *hubp;
- int mpcc_inst;
- const struct pipe_ctx *pipe_ctx;
-
- for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
- pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
-
- if (!pipe_ctx->stream)
- continue;
-
- if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
- pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
-
- hubp = pipe_ctx->plane_res.hubp;
- if (!hubp)
- continue;
-
- mpcc_inst = hubp->inst;
- // MPCC inst is equal to pipe index in practice
- for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
- if ((dc->res_pool->opps[opp_inst] != NULL) &&
- (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
- dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
- dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
- break;
- }
- }
- }
- wait_for_odm_update_pending_complete(dc, dc_context);
-}
-
static void commit_planes_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@@ -3833,13 +3740,14 @@ static void commit_planes_for_stream(struct dc *dc,
dc_z10_restore(dc);
if (update_type == UPDATE_TYPE_FULL)
- wait_for_outstanding_hw_updates(dc, context);
+ hwss_process_outstanding_hw_updates(dc, dc->current_state);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->plane_state) {
- set_p_state_switch_method(dc, context, pipe);
+ if (!dc->debug.using_dml2)
+ set_p_state_switch_method(dc, context, pipe);
if (dc->debug.visual_confirm)
dc_update_visual_confirm_color(dc, context, pipe);
@@ -4127,7 +4035,8 @@ static void commit_planes_for_stream(struct dc *dc,
}
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
- if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (top_pipe_to_program &&
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
CRTC_STATE_VACTIVE);
@@ -4335,7 +4244,8 @@ static void backup_and_set_minimal_pipe_split_policy(struct dc *dc,
dc->debug.force_disable_subvp = true;
for (i = 0; i < context->stream_count; i++) {
policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments;
- context->streams[i]->debug.force_odm_combine_segments = 0;
+ if (context->streams[i]->debug.allow_transition_for_forced_odm)
+ context->streams[i]->debug.force_odm_combine_segments = 0;
}
}
@@ -4686,7 +4596,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
return true;
}
-static void populate_fast_updates(struct dc_fast_update *fast_update,
+void populate_fast_updates(struct dc_fast_update *fast_update,
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_update *stream_update)
@@ -4696,6 +4606,9 @@ static void populate_fast_updates(struct dc_fast_update *fast_update,
if (stream_update) {
fast_update[0].out_transfer_func = stream_update->out_transfer_func;
fast_update[0].output_csc_transform = stream_update->output_csc_transform;
+ } else {
+ fast_update[0].out_transfer_func = NULL;
+ fast_update[0].output_csc_transform = NULL;
}
for (i = 0; i < surface_count; i++) {
@@ -4729,6 +4642,26 @@ static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_c
return false;
}
+bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count)
+{
+ int i;
+
+ if (fast_update[0].out_transfer_func ||
+ fast_update[0].output_csc_transform)
+ return true;
+
+ for (i = 0; i < surface_count; i++) {
+ if (fast_update[i].input_csc_color_matrix ||
+ fast_update[i].gamma ||
+ fast_update[i].gamut_remap_matrix ||
+ fast_update[i].coeff_reduction_factor ||
+ fast_update[i].cursor_csc_color_matrix)
+ return true;
+ }
+
+ return false;
+}
+
static bool full_update_required(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@@ -4785,7 +4718,8 @@ static bool full_update_required(struct dc *dc,
stream_update->func_shaper ||
stream_update->lut3d_func ||
stream_update->pending_test_pattern ||
- stream_update->crtc_timing_adjust))
+ stream_update->crtc_timing_adjust ||
+ stream_update->scaler_sharpener_update))
return true;
if (stream) {
@@ -5233,6 +5167,8 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
dc_z10_restore(dc);
+ dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
+
dc->hwss.init_hw(dc);
if (dc->hwss.init_sys_ctx != NULL &&
@@ -5244,6 +5180,8 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
default:
ASSERT(dc->current_state->stream_count == 0);
+ dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
+
dc_state_destruct(dc->current_state);
break;
@@ -5382,7 +5320,8 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const
if (allow == dc->idle_optimizations_allowed)
return;
- if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
+ if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL &&
+ dc->hwss.apply_idle_power_optimizations(dc, allow))
dc->idle_optimizations_allowed = allow;
}
@@ -5451,9 +5390,10 @@ static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memcl
hubp->funcs->set_blank_regs(hubp, true);
}
}
-
- dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
- dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
+ if (dc->clk_mgr->funcs->set_max_memclk)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
+ if (dc->clk_mgr->funcs->set_min_memclk)
+ dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
@@ -5502,7 +5442,7 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
if (p_state_change_support) {
- if (funcMin <= softMax)
+ if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
// else: No-Op
} else {
@@ -5512,7 +5452,7 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
}
} else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
if (p_state_change_support) {
- if (funcMin <= softMax)
+ if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
// else: No-Op
} else {
@@ -5563,6 +5503,9 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
*/
bool dc_is_dmub_outbox_supported(struct dc *dc)
{
+ if (!dc->caps.dmcub_support)
+ return false;
+
switch (dc->ctx->asic_id.chip_family) {
case FAMILY_YELLOW_CARP:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 87e36d51c56d..7ee2be8f82c4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -636,57 +636,59 @@ void hwss_build_fast_sequence(struct dc *dc,
while (current_pipe) {
current_mpc_pipe = current_pipe;
while (current_mpc_pipe) {
- if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.raw) {
- block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate;
- block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
- (*num_steps)++;
- }
- if (dc->hwss.program_triplebuffer && dc->debug.enable_tri_buf && current_mpc_pipe->plane_state->update_flags.raw) {
- block_sequence[*num_steps].params.program_triplebuffer_params.dc = dc;
- block_sequence[*num_steps].params.program_triplebuffer_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].params.program_triplebuffer_params.enableTripleBuffer = current_mpc_pipe->plane_state->triplebuffer_flips;
- block_sequence[*num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER;
- (*num_steps)++;
- }
- if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) {
- if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) &&
- stream_status->mall_stream_config.type == SUBVP_MAIN) {
- block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
- block_sequence[*num_steps].params.subvp_save_surf_addr.addr = &current_mpc_pipe->plane_state->address;
- block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index;
- block_sequence[*num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR;
+ if (current_mpc_pipe->plane_state) {
+ if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state->update_flags.raw) {
+ block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate;
+ block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
+ (*num_steps)++;
+ }
+ if (dc->hwss.program_triplebuffer && dc->debug.enable_tri_buf && current_mpc_pipe->plane_state->update_flags.raw) {
+ block_sequence[*num_steps].params.program_triplebuffer_params.dc = dc;
+ block_sequence[*num_steps].params.program_triplebuffer_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.program_triplebuffer_params.enableTripleBuffer = current_mpc_pipe->plane_state->triplebuffer_flips;
+ block_sequence[*num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER;
+ (*num_steps)++;
+ }
+ if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) {
+ if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) &&
+ stream_status->mall_stream_config.type == SUBVP_MAIN) {
+ block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
+ block_sequence[*num_steps].params.subvp_save_surf_addr.addr = &current_mpc_pipe->plane_state->address;
+ block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index;
+ block_sequence[*num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR;
+ (*num_steps)++;
+ }
+
+ block_sequence[*num_steps].params.update_plane_addr_params.dc = dc;
+ block_sequence[*num_steps].params.update_plane_addr_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = HUBP_UPDATE_PLANE_ADDR;
(*num_steps)++;
}
- block_sequence[*num_steps].params.update_plane_addr_params.dc = dc;
- block_sequence[*num_steps].params.update_plane_addr_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].func = HUBP_UPDATE_PLANE_ADDR;
- (*num_steps)++;
- }
-
- if (hws->funcs.set_input_transfer_func && current_mpc_pipe->plane_state->update_flags.bits.gamma_change) {
- block_sequence[*num_steps].params.set_input_transfer_func_params.dc = dc;
- block_sequence[*num_steps].params.set_input_transfer_func_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].params.set_input_transfer_func_params.plane_state = current_mpc_pipe->plane_state;
- block_sequence[*num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC;
- (*num_steps)++;
- }
+ if (hws->funcs.set_input_transfer_func && current_mpc_pipe->plane_state->update_flags.bits.gamma_change) {
+ block_sequence[*num_steps].params.set_input_transfer_func_params.dc = dc;
+ block_sequence[*num_steps].params.set_input_transfer_func_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.set_input_transfer_func_params.plane_state = current_mpc_pipe->plane_state;
+ block_sequence[*num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC;
+ (*num_steps)++;
+ }
- if (dc->hwss.program_gamut_remap && current_mpc_pipe->plane_state->update_flags.bits.gamut_remap_change) {
- block_sequence[*num_steps].params.program_gamut_remap_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].func = DPP_PROGRAM_GAMUT_REMAP;
- (*num_steps)++;
- }
- if (current_mpc_pipe->plane_state->update_flags.bits.input_csc_change) {
- block_sequence[*num_steps].params.setup_dpp_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].func = DPP_SETUP_DPP;
- (*num_steps)++;
- }
- if (current_mpc_pipe->plane_state->update_flags.bits.coeff_reduction_change) {
- block_sequence[*num_steps].params.program_bias_and_scale_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE;
- (*num_steps)++;
+ if (dc->hwss.program_gamut_remap && current_mpc_pipe->plane_state->update_flags.bits.gamut_remap_change) {
+ block_sequence[*num_steps].params.program_gamut_remap_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = DPP_PROGRAM_GAMUT_REMAP;
+ (*num_steps)++;
+ }
+ if (current_mpc_pipe->plane_state->update_flags.bits.input_csc_change) {
+ block_sequence[*num_steps].params.setup_dpp_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = DPP_SETUP_DPP;
+ (*num_steps)++;
+ }
+ if (current_mpc_pipe->plane_state->update_flags.bits.coeff_reduction_change) {
+ block_sequence[*num_steps].params.program_bias_and_scale_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE;
+ (*num_steps)++;
+ }
}
if (hws->funcs.set_output_transfer_func && current_mpc_pipe->stream->update_flags.bits.out_tf) {
block_sequence[*num_steps].params.set_output_transfer_func_params.dc = dc;
@@ -901,12 +903,12 @@ void hwss_program_bias_and_scale(union block_sequence_params *params)
struct pipe_ctx *pipe_ctx = params->program_bias_and_scale_params.pipe_ctx;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- struct dc_bias_and_scale bns_params = {0};
+ struct dc_bias_and_scale bns_params = plane_state->bias_and_scale;
//TODO :for CNVC set scale and bias registers if necessary
- build_prescale_params(&bns_params, plane_state);
- if (dpp->funcs->dpp_program_bias_and_scale)
+ if (dpp->funcs->dpp_program_bias_and_scale) {
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
+ }
}
void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params)
@@ -976,3 +978,126 @@ void get_surface_tile_visual_confirm_color(
break;
}
}
+
+/**
+ * hwss_wait_for_all_blank_complete - wait for all active OPPs to finish pending blank
+ * pattern updates
+ *
+ * @dc: [in] dc reference
+ * @context: [in] hardware context in use
+ */
+void hwss_wait_for_all_blank_complete(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *opp_head;
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (!hws->funcs.wait_for_blank_complete)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ opp_head = &context->res_ctx.pipe_ctx[i];
+
+ if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
+ dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
+ continue;
+
+ hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
+ }
+}
+
+void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
+{
+ struct pipe_ctx *otg_master;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ otg_master = &context->res_ctx.pipe_ctx[i];
+ if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
+ dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
+ continue;
+ tg = otg_master->stream_res.tg;
+ if (tg->funcs->wait_odm_doublebuffer_pending_clear)
+ tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ }
+
+ /* ODM update may require to reprogram blank pattern for each OPP */
+ hwss_wait_for_all_blank_complete(dc, context);
+}
+
+void hwss_wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ for (i = 0; i < MAX_PIPES; i++) {
+ int count = 0;
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+ continue;
+
+ /* Timeout 100 ms */
+ while (count < 100000) {
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (!pipe->plane_state->status.is_flip_pending)
+ break;
+ udelay(1);
+ count++;
+ }
+ ASSERT(!pipe->plane_state->status.is_flip_pending);
+ }
+}
+
+void hwss_wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
+{
+/*
+ * This function calls HWSS to wait for any potentially double buffered
+ * operations to complete. It should be invoked as a pre-amble prior
+ * to full update programming before asserting any HW locks.
+ */
+ int pipe_idx;
+ int opp_inst;
+ int opp_count = dc->res_pool->res_cap->num_opp;
+ struct hubp *hubp;
+ int mpcc_inst;
+ const struct pipe_ctx *pipe_ctx;
+
+ for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
+ pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
+ pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+
+ hubp = pipe_ctx->plane_res.hubp;
+ if (!hubp)
+ continue;
+
+ mpcc_inst = hubp->inst;
+ // MPCC inst is equal to pipe index in practice
+ for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+ if ((dc->res_pool->opps[opp_inst] != NULL) &&
+ (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+ break;
+ }
+ }
+ }
+ hwss_wait_for_odm_update_pending_complete(dc, dc_context);
+}
+
+void hwss_process_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
+{
+ /* wait for outstanding updates */
+ hwss_wait_for_outstanding_hw_updates(dc, dc_context);
+
+ /* perform outstanding post update programming */
+ if (dc->hwss.program_outstanding_updates)
+ dc->hwss.program_outstanding_updates(dc, dc_context);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index bcb5267b5a6b..c7599c40d4be 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -342,11 +342,6 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
res_pool->ref_clocks.xtalin_clock_inKhz;
res_pool->ref_clocks.dchub_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
- if (dc->debug.using_dml2)
- if (res_pool->hubbub && res_pool->hubbub->funcs->get_dchub_ref_freq)
- res_pool->hubbub->funcs->get_dchub_ref_freq(res_pool->hubbub,
- res_pool->ref_clocks.dccg_ref_clock_inKhz,
- &res_pool->ref_clocks.dchub_ref_clock_inKhz);
} else
ASSERT_CRITICAL(false);
}
@@ -1511,8 +1506,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
- spl_out->scl_data.h_active = pipe_ctx->plane_res.scl_data.h_active;
- spl_out->scl_data.v_active = pipe_ctx->plane_res.scl_data.v_active;
// Convert pipe_ctx to respective input params for SPL
translate_SPL_in_params_from_pipe_ctx(pipe_ctx, spl_in);
@@ -3241,6 +3234,8 @@ static bool are_stream_backends_same(
bool dc_is_stream_unchanged(
struct dc_stream_state *old_stream, struct dc_stream_state *stream)
{
+ if (!old_stream || !stream)
+ return false;
if (!are_stream_backends_same(old_stream, stream))
return false;
@@ -3771,8 +3766,10 @@ static bool planes_changed_for_existing_stream(struct dc_state *context,
}
}
- if (!stream_status)
+ if (!stream_status) {
ASSERT(0);
+ return false;
+ }
for (i = 0; i < set_count; i++)
if (set[i].stream == stream)
@@ -5164,7 +5161,7 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy(
sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
if (sec_pipe->stream->timing.flags.DSC == 1) {
#if defined(CONFIG_DRM_AMD_DC_FP)
- dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
+ dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, sec_pipe->stream_res.opp->inst);
#endif
ASSERT(sec_pipe->stream_res.dsc);
if (sec_pipe->stream_res.dsc == NULL)
@@ -5271,3 +5268,44 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio
dml2_options->svp_pstate.callbacks.remove_phantom_streams_and_planes = &dc_state_remove_phantom_streams_and_planes;
dml2_options->svp_pstate.callbacks.release_phantom_streams_and_planes = &dc_state_release_phantom_streams_and_planes;
}
+
+/* Returns number of DET segments allocated for a given OTG_MASTER pipe */
+int resource_calculate_det_for_stream(struct dc_state *state, struct pipe_ctx *otg_master)
+{
+ struct pipe_ctx *opp_heads[MAX_PIPES];
+ struct pipe_ctx *dpp_pipes[MAX_PIPES];
+
+ int dpp_count = 0;
+ int det_segments = 0;
+
+ if (!otg_master->stream)
+ return 0;
+
+ int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
+ &state->res_ctx, opp_heads);
+
+ for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
+ if (opp_heads[slice_idx]->plane_state) {
+ dpp_count = resource_get_dpp_pipes_for_opp_head(
+ opp_heads[slice_idx],
+ &state->res_ctx,
+ dpp_pipes);
+ for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++)
+ det_segments += dpp_pipes[dpp_idx]->hubp_regs.det_size;
+ }
+ }
+ return det_segments;
+}
+
+bool resource_is_hpo_acquired(struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) {
+ if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i]) {
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index cd6570a1e20e..fe9f99f1bdf9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -61,6 +61,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
/* For HPD/HPD RX, convert dpia port index into link index */
if (notify->type == DMUB_NOTIFICATION_HPD ||
notify->type == DMUB_NOTIFICATION_HPD_IRQ ||
+ notify->type == DMUB_NOTIFICATION_AUX_REPLY ||
notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index e990346e51f6..2597e3fd562b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -211,10 +211,16 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p
#ifdef CONFIG_DRM_AMD_DC_FP
if (dc->debug.using_dml2) {
dml2_opt->use_clock_dc_limits = false;
- dml2_create(dc, dml2_opt, &state->bw_ctx.dml2);
+ if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2)) {
+ dc_state_release(state);
+ return NULL;
+ }
dml2_opt->use_clock_dc_limits = true;
- dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source);
+ if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source)) {
+ dc_state_release(state);
+ return NULL;
+ }
}
#endif
@@ -961,10 +967,10 @@ bool dc_state_is_fams2_in_use(
bool is_fams2_in_use = false;
if (state)
- is_fams2_in_use |= state->bw_ctx.bw.dcn.fams2_stream_count > 0;
+ is_fams2_in_use |= state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
if (dc->current_state)
- is_fams2_in_use |= dc->current_state->bw_ctx.bw.dcn.fams2_stream_count > 0;
+ is_fams2_in_use |= dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
return is_fams2_in_use;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 73cdebcd9f37..4c94dd38be4b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -55,7 +55,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.291"
+#define DC_VER "3.2.299"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -261,10 +261,7 @@ struct dc_caps {
bool zstate_support;
bool ips_support;
uint32_t num_of_internal_disp;
- uint32_t max_dwb_htap;
- uint32_t max_dwb_vtap;
enum dp_protocol_version max_dp_protocol_version;
- bool spdif_aud;
unsigned int mall_size_per_mem_channel;
unsigned int mall_size_total;
unsigned int cursor_cache_size;
@@ -309,8 +306,6 @@ struct dc_bug_wa {
uint8_t dcfclk_ds: 1;
} clock_update_disable_mask;
bool skip_psr_ips_crtc_disable;
- //Customer Specific WAs
- uint32_t force_backlight_start_level;
};
struct dc_dcc_surface_param {
struct dc_size surface_size;
@@ -466,6 +461,7 @@ struct dc_config {
bool use_assr_psp_message;
bool support_edp0_on_dp1;
unsigned int enable_fpo_flicker_detection;
+ bool disable_hbr_audio_dp2;
};
enum visual_confirm {
@@ -513,6 +509,7 @@ enum in_game_fams_config {
INGAME_FAMS_SINGLE_DISP_ENABLE, // enable in-game fams
INGAME_FAMS_DISABLE, // disable in-game fams
INGAME_FAMS_MULTI_DISP_ENABLE, //enable in-game fams for multi-display
+ INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY, //enable in-game fams for multi-display only for clamped RR strategies
};
/**
@@ -764,7 +761,8 @@ union dpia_debug_options {
uint32_t extend_aux_rd_interval:1; /* bit 2 */
uint32_t disable_mst_dsc_work_around:1; /* bit 3 */
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
- uint32_t reserved:27;
+ uint32_t disable_usb4_pm_support:1; /* bit 5 */
+ uint32_t reserved:26;
} bits;
uint32_t raw;
};
@@ -981,6 +979,7 @@ struct dc_debug_options {
bool disable_z10;
bool enable_z9_disable_interface;
bool psr_skip_crtc_disable;
+ uint32_t ips_skip_crtc_disable_mask;
union dpia_debug_options dpia_debug;
bool disable_fixed_vs_aux_timeout_wa;
uint32_t fixed_vs_aux_delay_config_wa;
@@ -1053,8 +1052,10 @@ struct dc_debug_options {
unsigned int disable_spl;
unsigned int force_easf;
unsigned int force_sharpness;
+ unsigned int force_sharpness_level;
unsigned int force_lls;
bool notify_dpia_hr_bw;
+ bool enable_ips_visual_confirm;
};
@@ -1291,7 +1292,7 @@ struct dc_plane_state {
struct dc_gamma gamma_correction;
struct dc_transfer_func in_transfer_func;
- struct dc_bias_and_scale *bias_and_scale;
+ struct dc_bias_and_scale bias_and_scale;
struct dc_csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
struct fixed31_32 hdr_mult;
@@ -1348,7 +1349,7 @@ struct dc_plane_state {
enum mpcc_movable_cm_location mcm_location;
struct dc_csc_transform cursor_csc_color_matrix;
bool adaptive_sharpness_en;
- unsigned int sharpnessX1000;
+ int sharpness_level;
enum linear_light_scaling linear_light_scaling;
};
@@ -1368,7 +1369,6 @@ struct dc_plane_info {
int global_alpha_value;
bool input_csc_enabled;
int layer_index;
- bool front_buffer_rendering_active;
enum chroma_cositing cositing;
};
@@ -1585,6 +1585,12 @@ bool dc_acquire_release_mpc_3dlut(
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
+
+bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count);
+void populate_fast_updates(struct dc_fast_update *fast_update,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update);
/*
* Set up streams and links associated to drive sinks
* The streams parameter is an absolute set of all active streams.
@@ -1756,6 +1762,7 @@ struct dc_link {
bool dongle_mode_timing_override;
bool blank_stream_on_ocs_change;
bool read_dpcd204h_on_irq_hpd;
+ bool disable_assr_for_uhbr;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index ded13026c8ff..1e7de0f03290 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -979,6 +979,9 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr);
DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr);
DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size);
+ DC_LOG_DEBUG(" outbox1_rptr : %08x", diag_data.outbox1_rptr);
+ DC_LOG_DEBUG(" outbox1_wptr : %08x", diag_data.outbox1_wptr);
+ DC_LOG_DEBUG(" outbox1_size : %08x", diag_data.outbox1_size);
DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled);
DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset);
DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset);
@@ -1282,7 +1285,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
union dmub_shared_state_ips_driver_signals new_signals;
DC_LOG_IPS(
- "%s wait idle (ips1_commit=%d ips2_commit=%d)",
+ "%s wait idle (ips1_commit=%u ips2_commit=%u)",
__func__,
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1328,7 +1331,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
}
DC_LOG_IPS(
- "%s send allow_idle=%d (ips1_commit=%d ips2_commit=%d)",
+ "%s send allow_idle=%d (ips1_commit=%u ips2_commit=%u)",
__func__,
allow_idle,
ips_fw->signals.bits.ips1_commit,
@@ -1371,7 +1374,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
dc_dmub_srv->driver_signals = ips_driver->signals;
DC_LOG_IPS(
- "%s (allow ips1=%d ips2=%d) (commit ips1=%d ips2=%d) (count rcg=%d ips1=%d ips2=%d)",
+ "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)",
__func__,
ips_driver->signals.bits.allow_ips1,
ips_driver->signals.bits.allow_ips2,
@@ -1390,7 +1393,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
(!dc->debug.optimize_ips_handshake ||
ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
DC_LOG_IPS(
- "wait IPS2 eval (ips1_commit=%d ips2_commit=%d)",
+ "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1399,7 +1402,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (ips_fw->signals.bits.ips2_commit) {
DC_LOG_IPS(
- "exit IPS2 #1 (ips1_commit=%d ips2_commit=%d)",
+ "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1407,7 +1410,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
DC_LOG_IPS(
- "wait IPS2 entry delay (ips1_commit=%d ips2_commit=%d)",
+ "wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1415,14 +1418,14 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
udelay(dc->debug.ips2_entry_delay_us);
DC_LOG_IPS(
- "exit IPS2 #2 (ips1_commit=%d ips2_commit=%d)",
+ "exit IPS2 #2 (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
DC_LOG_IPS(
- "wait IPS2 commit clear (ips1_commit=%d ips2_commit=%d)",
+ "wait IPS2 commit clear (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1430,7 +1433,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
udelay(1);
DC_LOG_IPS(
- "wait hw_pwr_up (ips1_commit=%d ips2_commit=%d)",
+ "wait hw_pwr_up (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1438,7 +1441,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
ASSERT(0);
DC_LOG_IPS(
- "resync inbox1 (ips1_commit=%d ips2_commit=%d)",
+ "resync inbox1 (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1449,7 +1452,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
dc_dmub_srv_notify_idle(dc, false);
if (prev_driver_signals.bits.allow_ips1) {
DC_LOG_IPS(
- "wait for IPS1 commit clear (ips1_commit=%d ips2_commit=%d)",
+ "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1457,7 +1460,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
udelay(1);
DC_LOG_IPS(
- "wait for IPS1 commit clear done (ips1_commit=%d ips2_commit=%d)",
+ "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
}
@@ -1466,14 +1469,14 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
- DC_LOG_IPS("%s exit (count rcg=%d ips1=%d ips2=%d)",
+ DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)",
__func__,
rcg_exit_count,
ips1_exit_count,
ips2_exit_count);
}
-void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState)
+void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
{
struct dmub_srv *dmub;
@@ -1482,12 +1485,38 @@ void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_c
dmub = dc_dmub_srv->dmub;
- if (powerState == DC_ACPI_CM_POWER_STATE_D0)
+ if (power_state == DC_ACPI_CM_POWER_STATE_D0)
dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0);
else
dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
}
+void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv,
+ enum dc_acpi_cm_power_state power_state)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.idle_opt_set_dc_power_state.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.idle_opt_set_dc_power_state.header.sub_type = DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE;
+ cmd.idle_opt_set_dc_power_state.header.payload_bytes =
+ sizeof(cmd.idle_opt_set_dc_power_state) - sizeof(cmd.idle_opt_set_dc_power_state.header);
+
+ if (power_state == DC_ACPI_CM_POWER_STATE_D0) {
+ cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D0;
+ } else if (power_state == DC_ACPI_CM_POWER_STATE_D3) {
+ cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D3;
+ } else {
+ cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN;
+ }
+
+ dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv)
{
volatile const struct dmub_shared_state_ips_fw *ips_fw;
@@ -1672,22 +1701,17 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
- /* send global configuration parameters */
- global_cmd->config.global.max_allow_delay_us = 100 * 1000; //100ms
- global_cmd->config.global.lock_wait_time_us = 5000; //5ms
- global_cmd->config.global.recovery_timeout_us = 5000; //5ms
- global_cmd->config.global.hwfq_flip_programming_delay_us = 100; //100us
-
- /* copy static feature configuration */
- global_cmd->config.global.features.all = dc->debug.fams2_config.all;
+ if (enable) {
+ /* send global configuration parameters */
+ memcpy(&global_cmd->config.global, &context->bw_ctx.bw.dcn.fams2_global_config, sizeof(struct dmub_cmd_fams2_global_config));
- /* apply feature configuration based on current driver state */
- global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
- global_cmd->config.global.features.bits.enable = enable;
+ /* copy static feature configuration overrides */
+ global_cmd->config.global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
+ global_cmd->config.global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
+ global_cmd->config.global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
- /* construct per-stream configs */
- if (enable) {
- for (i = 0; i < context->bw_ctx.bw.dcn.fams2_stream_count; i++) {
+ /* construct per-stream configs */
+ for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
struct dmub_rb_cmd_fams2 *stream_cmd = &cmd[i+1].fams2_config;
/* configure command header */
@@ -1702,12 +1726,15 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
}
}
- if (enable && context->bw_ctx.bw.dcn.fams2_stream_count) {
+ /* apply feature configuration based on current driver state */
+ global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
+ global_cmd->config.global.features.bits.enable = enable;
+
+ if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
/* set multi pending for global, and unset for last stream cmd */
- global_cmd->config.global.num_streams = context->bw_ctx.bw.dcn.fams2_stream_count;
global_cmd->header.multi_cmd_pending = 1;
- cmd[context->bw_ctx.bw.dcn.fams2_stream_count].fams2_config.header.multi_cmd_pending = 0;
- num_cmds += context->bw_ctx.bw.dcn.fams2_stream_count;
+ cmd[context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
+ num_cmds += context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
}
dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 580940222777..42f0cb672d8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -109,7 +109,29 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait);
void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle);
-void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState);
+/**
+ * dc_dmub_srv_set_power_state() - Sets the power state for DMUB service.
+ *
+ * Controls whether messaging the DMCUB or interfacing with it via HW register
+ * interaction is permittable.
+ *
+ * @dc_dmub_srv - The DC DMUB service pointer
+ * @power_state - the DC power state
+ */
+void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state);
+
+/**
+ * dc_dmub_srv_notify_fw_dc_power_state() - Notifies firmware of the DC power state.
+ *
+ * Differs from dc_dmub_srv_set_power_state in that it needs to access HW in order
+ * to message DMCUB of the state transition. Should come after the D0 exit and
+ * before D3 set power state.
+ *
+ * @dc_dmub_srv - The DC DMUB service pointer
+ * @power_state - the DC power state
+ */
+void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv,
+ enum dc_acpi_cm_power_state power_state);
/**
* @dc_dmub_srv_should_detect() - Checks if link detection is required.
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 959ae0df1e56..c10567ec1c81 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -763,13 +763,6 @@ enum scanning_type {
SCANNING_TYPE_UNDEFINED
};
-enum chroma_cositing {
- CHROMA_COSITING_NONE,
- CHROMA_COSITING_LEFT,
- CHROMA_COSITING_TOPLEFT,
- CHROMA_COSITING_COUNT
-};
-
struct dc_crtc_timing_flags {
uint32_t INTERLACE :1;
uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 582606319764..cd6de93eb91c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -42,26 +42,26 @@ static void populate_spltaps_from_taps(struct spl_taps *spl_scaling_quality,
static void populate_taps_from_spltaps(struct scaling_taps *scaling_quality,
const struct spl_taps *spl_scaling_quality)
{
- scaling_quality->h_taps_c = spl_scaling_quality->h_taps_c;
- scaling_quality->h_taps = spl_scaling_quality->h_taps;
- scaling_quality->v_taps_c = spl_scaling_quality->v_taps_c;
- scaling_quality->v_taps = spl_scaling_quality->v_taps;
+ scaling_quality->h_taps_c = spl_scaling_quality->h_taps_c + 1;
+ scaling_quality->h_taps = spl_scaling_quality->h_taps + 1;
+ scaling_quality->v_taps_c = spl_scaling_quality->v_taps_c + 1;
+ scaling_quality->v_taps = spl_scaling_quality->v_taps + 1;
}
static void populate_ratios_from_splratios(struct scaling_ratios *ratios,
- const struct spl_ratios *spl_ratios)
+ const struct ratio *spl_ratios)
{
- ratios->horz = spl_ratios->horz;
- ratios->vert = spl_ratios->vert;
- ratios->horz_c = spl_ratios->horz_c;
- ratios->vert_c = spl_ratios->vert_c;
+ ratios->horz = dc_fixpt_from_ux_dy(spl_ratios->h_scale_ratio >> 5, 3, 19);
+ ratios->vert = dc_fixpt_from_ux_dy(spl_ratios->v_scale_ratio >> 5, 3, 19);
+ ratios->horz_c = dc_fixpt_from_ux_dy(spl_ratios->h_scale_ratio_c >> 5, 3, 19);
+ ratios->vert_c = dc_fixpt_from_ux_dy(spl_ratios->v_scale_ratio_c >> 5, 3, 19);
}
static void populate_inits_from_splinits(struct scl_inits *inits,
- const struct spl_inits *spl_inits)
+ const struct init *spl_inits)
{
- inits->h = spl_inits->h;
- inits->v = spl_inits->v;
- inits->h_c = spl_inits->h_c;
- inits->v_c = spl_inits->v_c;
+ inits->h = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int, spl_inits->h_filter_init_frac >> 5, 0, 19);
+ inits->v = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int, spl_inits->v_filter_init_frac >> 5, 0, 19);
+ inits->h_c = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int_c, spl_inits->h_filter_init_frac_c >> 5, 0, 19);
+ inits->v_c = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int_c, spl_inits->v_filter_init_frac_c >> 5, 0, 19);
}
/// @brief Translate SPL input parameters from pipe context
/// @param pipe_ctx
@@ -128,6 +128,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->basic_out.always_scale = pipe_ctx->stream->ctx->dc->debug.always_scale;
// Make spl input basic output info alpha_en field point to plane res scl_data lb_params alpha_en
spl_in->basic_out.alpha_en = pipe_ctx->plane_res.scl_data.lb_params.alpha_en;
+ spl_in->basic_out.use_two_pixels_per_container = pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing);
// Make spl input basic input info scaling quality field point to plane state scaling_quality
populate_spltaps_from_taps(&spl_in->scaling_quality, &plane_state->scaling_quality);
// Translate edge adaptive scaler preference
@@ -138,24 +139,36 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 2)
spl_in->disable_easf = true;
/* Translate adaptive sharpening preference */
- if (pipe_ctx->stream->ctx->dc->debug.force_sharpness > 0) {
- spl_in->adaptive_sharpness.enable = (pipe_ctx->stream->ctx->dc->debug.force_sharpness > 1) ? true : false;
- if (pipe_ctx->stream->ctx->dc->debug.force_sharpness == 2)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_LOW;
- else if (pipe_ctx->stream->ctx->dc->debug.force_sharpness == 3)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_MID;
- else if (pipe_ctx->stream->ctx->dc->debug.force_sharpness >= 4)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_HIGH;
- } else {
- spl_in->adaptive_sharpness.enable = plane_state->adaptive_sharpness_en;
- if (plane_state->sharpnessX1000 == 0)
+ unsigned int sharpness_setting = pipe_ctx->stream->ctx->dc->debug.force_sharpness;
+ unsigned int force_sharpness_level = pipe_ctx->stream->ctx->dc->debug.force_sharpness_level;
+ if (sharpness_setting == SHARPNESS_HW_OFF)
+ spl_in->adaptive_sharpness.enable = false;
+ else if (sharpness_setting == SHARPNESS_ZERO) {
+ spl_in->adaptive_sharpness.enable = true;
+ spl_in->adaptive_sharpness.sharpness_level = 0;
+ } else if (sharpness_setting == SHARPNESS_CUSTOM) {
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = 0;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = 1750;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = 750;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = 0;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = 3500;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = 1500;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = 0;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = 2750;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = 1500;
+
+ if (force_sharpness_level > 0) {
+ if (force_sharpness_level > 10)
+ force_sharpness_level = 10;
+ spl_in->adaptive_sharpness.enable = true;
+ spl_in->adaptive_sharpness.sharpness_level = force_sharpness_level;
+ } else if (!plane_state->adaptive_sharpness_en) {
spl_in->adaptive_sharpness.enable = false;
- else if (plane_state->sharpnessX1000 < 999)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_LOW;
- else if (plane_state->sharpnessX1000 < 1999)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_MID;
- else // Any other value is high sharpness
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_HIGH;
+ spl_in->adaptive_sharpness.sharpness_level = 0;
+ } else {
+ spl_in->adaptive_sharpness.enable = true;
+ spl_in->adaptive_sharpness.sharpness_level = plane_state->sharpness_level;
+ }
}
// Translate linear light scaling preference
if (pipe_ctx->stream->ctx->dc->debug.force_lls > 0)
@@ -171,6 +184,21 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->basic_in.tf_type = (enum spl_transfer_func_type) plane_state->in_transfer_func.type;
spl_in->basic_in.tf_predefined_type = (enum spl_transfer_func_predefined) plane_state->in_transfer_func.tf;
+ spl_in->h_active = pipe_ctx->plane_res.scl_data.h_active;
+ spl_in->v_active = pipe_ctx->plane_res.scl_data.v_active;
+ /* Check if it is stream is in fullscreen and if its HDR.
+ * Use this to determine sharpness levels
+ */
+ spl_in->is_fullscreen = dm_helpers_is_fullscreen(pipe_ctx->stream->ctx, pipe_ctx->stream);
+ spl_in->is_hdr_on = dm_helpers_is_hdr_on(pipe_ctx->stream->ctx, pipe_ctx->stream);
+ spl_in->hdr_multx100 = 0;
+ if (spl_in->is_hdr_on) {
+ spl_in->hdr_multx100 = (uint32_t)dc_fixpt_floor(dc_fixpt_mul(plane_state->hdr_mult,
+ dc_fixpt_from_int(100)));
+ /* Disable sharpness for HDR Mult > 6.0 */
+ if (spl_in->hdr_multx100 > 600)
+ spl_in->adaptive_sharpness.enable = false;
+ }
}
/// @brief Translate SPL output parameters to pipe context
@@ -179,15 +207,15 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
void translate_SPL_out_params_to_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl_out *spl_out)
{
// Make scaler data recout point to spl output field recout
- populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.recout, &spl_out->scl_data.recout);
+ populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.recout, &spl_out->dscl_prog_data->recout);
// Make scaler data ratios point to spl output field ratios
- populate_ratios_from_splratios(&pipe_ctx->plane_res.scl_data.ratios, &spl_out->scl_data.ratios);
+ populate_ratios_from_splratios(&pipe_ctx->plane_res.scl_data.ratios, &spl_out->dscl_prog_data->ratios);
// Make scaler data viewport point to spl output field viewport
- populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport, &spl_out->scl_data.viewport);
+ populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport, &spl_out->dscl_prog_data->viewport);
// Make scaler data viewport_c point to spl output field viewport_c
- populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport_c, &spl_out->scl_data.viewport_c);
+ populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport_c, &spl_out->dscl_prog_data->viewport_c);
// Make scaler data taps point to spl output field scaling taps
- populate_taps_from_spltaps(&pipe_ctx->plane_res.scl_data.taps, &spl_out->scl_data.taps);
+ populate_taps_from_spltaps(&pipe_ctx->plane_res.scl_data.taps, &spl_out->dscl_prog_data->taps);
// Make scaler data init point to spl output field init
- populate_inits_from_splinits(&pipe_ctx->plane_res.scl_data.inits, &spl_out->scl_data.inits);
+ populate_inits_from_splinits(&pipe_ctx->plane_res.scl_data.inits, &spl_out->dscl_prog_data->init);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h
index c73d640c3632..eaa5c5373b28 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h
@@ -6,6 +6,7 @@
#define __DC_SPL_TRANSLATE_H__
#include "dc.h"
#include "resource.h"
+#include "dm_helpers.h"
/* Map SPL input parameters to pipe context
* @pipe_ctx: pipe context
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 8ebd7e9e776e..14ea47eda0c8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -142,6 +142,7 @@ union stream_update_flags {
uint32_t mst_bw : 1;
uint32_t crtc_timing_adjust : 1;
uint32_t fams_changed : 1;
+ uint32_t scaler_sharpener : 1;
} bits;
uint32_t raw;
@@ -159,6 +160,12 @@ struct test_pattern {
struct dc_stream_debug_options {
char force_odm_combine_segments;
+ /*
+ * When force_odm_combine_segments is non zero, allow dc to
+ * temporarily transition to ODM bypass when minimal transition state
+ * is required to prevent visual glitches showing on the screen
+ */
+ char allow_transition_for_forced_odm;
};
#define LUMINANCE_DATA_TABLE_SIZE 10
@@ -260,6 +267,8 @@ struct dc_stream_state {
struct dc_cursor_attributes cursor_attributes;
struct dc_cursor_position cursor_position;
+ bool hw_cursor_req;
+
uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
/* from stream struct */
@@ -300,6 +309,7 @@ struct dc_stream_state {
bool is_phantom;
struct luminance_data lumin_data;
+ bool scaler_sharpener_update;
};
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
@@ -344,6 +354,8 @@ struct dc_stream_update {
struct dc_cursor_attributes *cursor_attributes;
struct dc_cursor_position *cursor_position;
+ bool *hw_cursor_req;
+ bool *scaler_sharpener_update;
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index c550e8997033..fd6dca735714 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -590,6 +590,7 @@ enum dc_psr_state {
PSR_STATE5c,
PSR_STATE_HWLOCK_MGR,
PSR_STATE_POLLVUPDATE,
+ PSR_STATE_RELEASE_HWLOCK_MGR_FULL_FRAME,
PSR_STATE_INVALID = 0xFF
};
@@ -1049,6 +1050,23 @@ union replay_error_status {
unsigned char raw;
};
+union replay_low_refresh_rate_enable_options {
+ struct {
+ //BIT[0-3]: Replay Low Hz Support control
+ unsigned int ENABLE_LOW_RR_SUPPORT :1;
+ unsigned int RESERVED_1_3 :3;
+ //BIT[4-15]: Replay Low Hz Enable Scenarios
+ unsigned int ENABLE_STATIC_SCREEN :1;
+ unsigned int ENABLE_FULL_SCREEN_VIDEO :1;
+ unsigned int ENABLE_GENERAL_UI :1;
+ unsigned int RESERVED_7_15 :9;
+ //BIT[16-31]: Replay Low Hz Enable Check
+ unsigned int ENABLE_STATIC_FLICKER_CHECK :1;
+ unsigned int RESERVED_17_31 :15;
+ } bits;
+ unsigned int raw;
+};
+
struct replay_config {
/* Replay feature is supported */
bool replay_supported;
@@ -1072,6 +1090,8 @@ struct replay_config {
bool replay_support_fast_resync_in_ultra_sleep_mode;
/* Replay error status */
union replay_error_status replay_error_status;
+ /* Replay Low Hz enable Options */
+ union replay_low_refresh_rate_enable_options low_rr_enable_options;
};
/* Replay feature flags*/
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
index 1e0292861244..160c299419b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
@@ -328,6 +328,17 @@
type DPSTREAMCLK1_GATE_DISABLE;\
type DPSTREAMCLK2_GATE_DISABLE;\
type DPSTREAMCLK3_GATE_DISABLE;\
+ type SYMCLKA_FE_GATE_DISABLE;\
+ type SYMCLKB_FE_GATE_DISABLE;\
+ type SYMCLKC_FE_GATE_DISABLE;\
+ type SYMCLKD_FE_GATE_DISABLE;\
+ type SYMCLKE_FE_GATE_DISABLE;\
+ type SYMCLKA_GATE_DISABLE;\
+ type SYMCLKB_GATE_DISABLE;\
+ type SYMCLKC_GATE_DISABLE;\
+ type SYMCLKD_GATE_DISABLE;\
+ type SYMCLKE_GATE_DISABLE;\
+
#define DCCG401_REG_FIELD_LIST(type) \
type OTG0_TMDS_PIXEL_RATE_DIV;\
@@ -346,11 +357,7 @@
type SYMCLK32_LE3_SRC_SEL;\
type SYMCLK32_LE2_EN;\
type SYMCLK32_LE3_EN;\
- type DP_DTO_ENABLE[MAX_PIPES];\
- type DSCCLK0_DTO_DB_EN;\
- type DSCCLK1_DTO_DB_EN;\
- type DSCCLK2_DTO_DB_EN;\
- type DSCCLK3_DTO_DB_EN;
+ type DP_DTO_ENABLE[MAX_PIPES];
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index 68cd3258f4a9..838d72eaa87f 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -24,6 +24,7 @@
#include "reg_helper.h"
#include "core_types.h"
+#include "resource.h"
#include "dcn35_dccg.h"
#define TO_DCN_DCCG(dccg)\
@@ -41,13 +42,1069 @@
#define DC_LOGGER \
dccg->ctx->logger
+enum symclk_fe_source {
+ SYMCLK_FE_SYMCLK_A = 0, // Select functional clock from backend symclk A
+ SYMCLK_FE_SYMCLK_B,
+ SYMCLK_FE_SYMCLK_C,
+ SYMCLK_FE_SYMCLK_D,
+ SYMCLK_FE_SYMCLK_E,
+ SYMCLK_FE_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum symclk_be_source {
+ SYMCLK_BE_PHYCLK = 0, // Select phy clk when sym_clk_enable = 1
+ SYMCLK_BE_DPIACLK_810 = 4,
+ SYMCLK_BE_DPIACLK_162 = 5,
+ SYMCLK_BE_DPIACLK_540 = 6,
+ SYMCLK_BE_DPIACLK_270 = 7,
+ SYMCLK_BE_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum physymclk_source {
+ PHYSYMCLK_PHYCLK = 0, // Select symclk as source of clock which is output to PHY through DCIO.
+ PHYSYMCLK_PHYD18CLK, // Select phyd18clk as the source of clock which is output to PHY through DCIO.
+ PHYSYMCLK_PHYD32CLK, // Select phyd32clk as the source of clock which is output to PHY through DCIO.
+ PHYSYMCLK_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum dtbclk_source {
+ DTBCLK_DPREFCLK = 0, // Selects source for DTBCLK_P# as DPREFCLK (src sel 0 and 1 are same)
+ DTBCLK_DPREFCLK_0, // Selects source for DTBCLK_P# as DPREFCLK (src sel 0 and 1 are same)
+ DTBCLK_DTBCLK0, // Selects source for DTBCLK_P# as DTBCLK0
+ DTBCLK_DTBCLK1, // Selects source for DTBCLK_P# as DTBCLK0
+ DTBCLK_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum dppclk_clock_source {
+ DPP_REFCLK = 0, // refclk is selected
+ DPP_DCCG_DTO, // Functional clock selected is DTO tuned DPPCLK
+};
+
+enum dp_stream_clk_source {
+ DP_STREAM_DTBCLK_P0 = 0, // Selects functional for DP_STREAM_CLK as DTBCLK_P#
+ DP_STREAM_DTBCLK_P1,
+ DP_STREAM_DTBCLK_P2,
+ DP_STREAM_DTBCLK_P3,
+ DP_STREAM_DTBCLK_P4,
+ DP_STREAM_DTBCLK_P5,
+ DP_STREAM_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum hdmi_char_clk {
+ HDMI_CHAR_PHYAD18CLK = 0, // Selects functional for hdmi_char_clk as UNIPHYA PHYD18CLK
+ HDMI_CHAR_PHYBD18CLK,
+ HDMI_CHAR_PHYCD18CLK,
+ HDMI_CHAR_PHYDD18CLK,
+ HDMI_CHAR_PHYED18CLK,
+ HDMI_CHAR_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum hdmi_stream_clk_source {
+ HDMI_STREAM_DTBCLK_P0 = 0, // Selects functional for HDMI_STREAM_CLK as DTBCLK_P#
+ HDMI_STREAM_DTBCLK_P1,
+ HDMI_STREAM_DTBCLK_P2,
+ HDMI_STREAM_DTBCLK_P3,
+ HDMI_STREAM_DTBCLK_P4,
+ HDMI_STREAM_DTBCLK_P5,
+ HDMI_STREAM_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum symclk32_se_clk_source {
+ SYMCLK32_SE_PHYAD32CLK = 0, // Selects functional for SYMCLK32 as UNIPHYA PHYD32CLK
+ SYMCLK32_SE_PHYBD32CLK,
+ SYMCLK32_SE_PHYCD32CLK,
+ SYMCLK32_SE_PHYDD32CLK,
+ SYMCLK32_SE_PHYED32CLK,
+ SYMCLK32_SE_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum symclk32_le_clk_source {
+ SYMCLK32_LE_PHYAD32CLK = 0, // Selects functional for SYMCLK32 as UNIPHYA PHYD32CLK
+ SYMCLK32_LE_PHYBD32CLK,
+ SYMCLK32_LE_PHYCD32CLK,
+ SYMCLK32_LE_PHYDD32CLK,
+ SYMCLK32_LE_PHYED32CLK,
+ SYMCLK32_LE_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum dsc_clk_source {
+ DSC_CLK_REF_CLK = 0, // Ref clock selected for DSC_CLK
+ DSC_DTO_TUNED_CK_GPU_DISCLK_3, // DTO divided clock selected as functional clock
+};
+
+
+static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_symclk32_se_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se && enable)
+ return;
+
+ /* SYMCLK32_ROOT_SE#_GATE_DISABLE will clock gate in DCCG */
+ /* SYMCLK32_SE#_GATE_DISABLE will clock gate in HPO only */
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE0_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE1_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE2_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE3_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_symclk32_le_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_LE0_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_LE0_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_LE1_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_LE1_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_physymclk_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 4:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_symclk_fe_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk_fe && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKA_FE_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKA_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKB_FE_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKB_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKC_FE_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKC_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKD_FE_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKD_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 4:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKE_FE_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKE_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_symclk_be_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* TBD add symclk_be in rcg control bits */
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk_fe && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKA_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKA_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKB_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKB_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKC_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKC_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKD_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKD_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 4:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKE_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_dtbclk_p_rcg(struct dccg *dccg, int inst, bool enable)
+{
+
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static void dccg35_set_dppclk_rcg(struct dccg *dccg,
+ int inst, bool enable)
+{
+
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static void dccg35_set_dpstreamclk_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
+ DPSTREAMCLK0_GATE_DISABLE, enable ? 0 : 1,
+ DPSTREAMCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
+ DPSTREAMCLK1_GATE_DISABLE, enable ? 0 : 1,
+ DPSTREAMCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
+ DPSTREAMCLK2_GATE_DISABLE, enable ? 0 : 1,
+ DPSTREAMCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
+ DPSTREAMCLK3_GATE_DISABLE, enable ? 0 : 1,
+ DPSTREAMCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_smclk32_se_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE0_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE1_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE2_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE3_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_dsc_clk_src_new(struct dccg *dccg, int inst, enum dsc_clk_source src)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* DSCCLK#_EN=0 switches to refclock from functional clock */
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, src);
+ break;
+ case 1:
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, src);
+ break;
+ case 2:
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, src);
+ break;
+ case 3:
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, src);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_symclk32_se_src_new(
+ struct dccg *dccg,
+ int inst,
+ enum symclk32_se_clk_source src
+ )
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE0_SRC_SEL, (src == SYMCLK32_SE_REFCLK) ? 0 : src,
+ SYMCLK32_SE0_EN, (src == SYMCLK32_SE_REFCLK) ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE1_SRC_SEL, (src == SYMCLK32_SE_REFCLK) ? 0 : src,
+ SYMCLK32_SE1_EN, (src == SYMCLK32_SE_REFCLK) ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE2_SRC_SEL, (src == SYMCLK32_SE_REFCLK) ? 0 : src,
+ SYMCLK32_SE2_EN, (src == SYMCLK32_SE_REFCLK) ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE3_SRC_SEL, (src == SYMCLK32_SE_REFCLK) ? 0 : src,
+ SYMCLK32_SE3_EN, (src == SYMCLK32_SE_REFCLK) ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static int
+dccg35_is_symclk32_se_src_functional_le_new(struct dccg *dccg, int symclk_32_se_inst, int symclk_32_le_inst)
+{
+ uint32_t en;
+ uint32_t src_sel;
+
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ REG_GET_2(SYMCLK32_SE_CNTL, SYMCLK32_SE3_SRC_SEL, &src_sel, SYMCLK32_SE3_EN, &en);
+
+ if (en == 1 && src_sel == symclk_32_le_inst)
+ return 1;
+
+ return 0;
+}
+
+
+static void dccg35_set_symclk32_le_src_new(
+ struct dccg *dccg,
+ int inst,
+ enum symclk32_le_clk_source src)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE0_SRC_SEL, (src == SYMCLK32_LE_REFCLK) ? 0 : src,
+ SYMCLK32_LE0_EN, (src == SYMCLK32_LE_REFCLK) ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE1_SRC_SEL, (src == SYMCLK32_LE_REFCLK) ? 0 : src,
+ SYMCLK32_LE1_EN, (src == SYMCLK32_LE_REFCLK) ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dcn35_set_dppclk_src_new(struct dccg *dccg,
+ int inst, enum dppclk_clock_source src)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, src);
+ break;
+ case 1:
+ REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, src);
+ break;
+ case 2:
+ REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, src);
+ break;
+ case 3:
+ REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, src);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static void dccg35_set_dtbclk_p_src_new(
+ struct dccg *dccg,
+ enum dtbclk_source src,
+ int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* If DTBCLK_P#_EN is 0 refclock is selected as functional clock
+ * If DTBCLK_P#_EN is 1 functional clock is selected as DTBCLK_P#_SRC_SEL
+ */
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DTBCLK_P_CNTL,
+ DTBCLK_P0_SRC_SEL, (src == DTBCLK_REFCLK) ? 0 : src,
+ DTBCLK_P0_EN, (src == DTBCLK_REFCLK) ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(DTBCLK_P_CNTL,
+ DTBCLK_P1_SRC_SEL, (src == DTBCLK_REFCLK) ? 0 : src,
+ DTBCLK_P1_EN, (src == DTBCLK_REFCLK) ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE_2(DTBCLK_P_CNTL,
+ DTBCLK_P2_SRC_SEL, (src == DTBCLK_REFCLK) ? 0 : src,
+ DTBCLK_P2_EN, (src == DTBCLK_REFCLK) ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE_2(DTBCLK_P_CNTL,
+ DTBCLK_P3_SRC_SEL, (src == DTBCLK_REFCLK) ? 0 : src,
+ DTBCLK_P3_EN, (src == DTBCLK_REFCLK) ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_dpstreamclk_src_new(
+ struct dccg *dccg,
+ enum dp_stream_clk_source src,
+ int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN,
+ (src == DP_STREAM_REFCLK) ? 0 : 1,
+ DPSTREAMCLK0_SRC_SEL,
+ (src == DP_STREAM_REFCLK) ? 0 : src);
+ break;
+ case 1:
+ REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN,
+ (src == DP_STREAM_REFCLK) ? 0 : 1,
+ DPSTREAMCLK1_SRC_SEL,
+ (src == DP_STREAM_REFCLK) ? 0 : src);
+
+ break;
+ case 2:
+ REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN,
+ (src == DP_STREAM_REFCLK) ? 0 : 1,
+ DPSTREAMCLK2_SRC_SEL,
+ (src == DP_STREAM_REFCLK) ? 0 : src);
+
+ break;
+ case 3:
+ REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN,
+ (src == DP_STREAM_REFCLK) ? 0 : 1,
+ DPSTREAMCLK3_SRC_SEL,
+ (src == DP_STREAM_REFCLK) ? 0 : src);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_physymclk_src_new(
+ struct dccg *dccg,
+ enum physymclk_source src,
+ int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_EN,
+ (src == PHYSYMCLK_REFCLK) ? 0 : 1,
+ PHYASYMCLK_SRC_SEL,
+ (src == PHYSYMCLK_REFCLK) ? 0 : src);
+ break;
+ case 1:
+ REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_EN,
+ (src == PHYSYMCLK_REFCLK) ? 0 : 1,
+ PHYBSYMCLK_SRC_SEL,
+ (src == PHYSYMCLK_REFCLK) ? 0 : src);
+ break;
+ case 2:
+ REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_EN,
+ (src == PHYSYMCLK_REFCLK) ? 0 : 1,
+ PHYCSYMCLK_SRC_SEL,
+ (src == PHYSYMCLK_REFCLK) ? 0 : src);
+ break;
+ case 3:
+ REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_EN,
+ (src == PHYSYMCLK_REFCLK) ? 0 : 1,
+ PHYDSYMCLK_SRC_SEL,
+ (src == PHYSYMCLK_REFCLK) ? 0 : src);
+ break;
+ case 4:
+ REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_EN,
+ (src == PHYSYMCLK_REFCLK) ? 0 : 1,
+ PHYESYMCLK_SRC_SEL,
+ (src == PHYSYMCLK_REFCLK) ? 0 : src);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_symclk_be_src_new(
+ struct dccg *dccg,
+ enum symclk_be_source src,
+ int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE,
+ SYMCLKA_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1,
+ SYMCLKA_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE,
+ SYMCLKB_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1,
+ SYMCLKB_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src);
+ break;
+ case 2:
+ REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE,
+ SYMCLKC_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1,
+ SYMCLKC_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src);
+ break;
+ case 3:
+ REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE,
+ SYMCLKD_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1,
+ SYMCLKD_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src);
+ break;
+ case 4:
+ REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
+ SYMCLKE_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1,
+ SYMCLKE_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src);
+ break;
+ }
+}
+
+static int dccg35_is_symclk_fe_src_functional_be(struct dccg *dccg,
+ int symclk_fe_inst,
+ int symclk_be_inst)
+{
+
+ uint32_t en = 0;
+ uint32_t src_sel = 0;
+
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (symclk_fe_inst) {
+ case 0:
+ REG_GET_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_SRC_SEL, &src_sel, SYMCLKA_FE_EN, &en);
+ break;
+ case 1:
+ REG_GET_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_SRC_SEL, &src_sel, SYMCLKB_FE_EN, &en);
+ break;
+ case 2:
+ REG_GET_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_SRC_SEL, &src_sel, SYMCLKC_FE_EN, &en);
+ break;
+ case 3:
+ REG_GET_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_SRC_SEL, &src_sel, SYMCLKD_FE_EN, &en);
+ break;
+ case 4:
+ REG_GET_2(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, &src_sel, SYMCLKE_FE_EN, &en);
+ break;
+ }
+
+ if (en == 1 && src_sel == symclk_be_inst)
+ return 1;
+
+ return 0;
+}
+
+static void dccg35_set_symclk_fe_src_new(struct dccg *dccg, enum symclk_fe_source src, int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE,
+ SYMCLKA_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1,
+ SYMCLKA_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE,
+ SYMCLKB_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1,
+ SYMCLKB_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src);
+ break;
+ case 2:
+ REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE,
+ SYMCLKC_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1,
+ SYMCLKC_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src);
+ break;
+ case 3:
+ REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE,
+ SYMCLKD_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1,
+ SYMCLKD_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src);
+ break;
+ case 4:
+ REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
+ SYMCLKE_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1,
+ SYMCLKE_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src);
+ break;
+ }
+}
+
+static uint32_t dccg35_is_fe_rcg(struct dccg *dccg, int inst)
+{
+ uint32_t enable = 0;
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_GET(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKA_FE_ROOT_GATE_DISABLE, &enable);
+ break;
+ case 1:
+ REG_GET(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKB_FE_ROOT_GATE_DISABLE, &enable);
+ break;
+ case 2:
+ REG_GET(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKC_FE_ROOT_GATE_DISABLE, &enable);
+ break;
+ case 3:
+ REG_GET(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKD_FE_ROOT_GATE_DISABLE, &enable);
+ break;
+ case 4:
+ REG_GET(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKE_FE_ROOT_GATE_DISABLE, &enable);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+ return enable;
+}
+
+static uint32_t dccg35_is_symclk32_se_rcg(struct dccg *dccg, int inst)
+{
+ uint32_t disable_l1 = 0;
+ uint32_t disable_l2 = 0;
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_GET_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE0_GATE_DISABLE, &disable_l1,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, &disable_l2);
+ break;
+ case 1:
+ REG_GET_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE1_GATE_DISABLE, &disable_l1,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, &disable_l2);
+ break;
+ case 2:
+ REG_GET_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE2_GATE_DISABLE, &disable_l1,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, &disable_l2);
+ break;
+ case 3:
+ REG_GET_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE3_GATE_DISABLE, &disable_l1,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, &disable_l2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return 0;
+ }
+
+ /* return true if either block level or DCCG level gating is active */
+ return (disable_l1 | disable_l2);
+}
+
+static void dccg35_enable_symclk_fe_new(
+ struct dccg *dccg,
+ int inst,
+ enum symclk_fe_source src)
+{
+ dccg35_set_symclk_fe_rcg(dccg, inst, false);
+ dccg35_set_symclk_fe_src_new(dccg, src, inst);
+}
+
+static void dccg35_disable_symclk_fe_new(
+ struct dccg *dccg,
+ int inst)
+{
+ dccg35_set_symclk_fe_src_new(dccg, SYMCLK_FE_REFCLK, inst);
+ dccg35_set_symclk_fe_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_symclk_be_new(
+ struct dccg *dccg,
+ int inst,
+ enum symclk_be_source src)
+{
+ dccg35_set_symclk_be_rcg(dccg, inst, false);
+ dccg35_set_symclk_be_src_new(dccg, inst, src);
+}
+
+static void dccg35_disable_symclk_be_new(
+ struct dccg *dccg,
+ int inst)
+{
+ int i;
+
+ /* Switch from functional clock to refclock */
+ dccg35_set_symclk_be_src_new(dccg, inst, SYMCLK_BE_REFCLK);
+
+ /* Check if any other SE connected LE and disable them */
+ for (i = 0; i < 4; i++) {
+ /* Make sure FE is not already in RCG */
+ if (dccg35_is_fe_rcg(dccg, i) == 0) {
+ if (dccg35_is_symclk_fe_src_functional_be(dccg, i, inst))
+ dccg35_disable_symclk_fe_new(dccg, i);
+ }
+ }
+ /* Safe to RCG SYMCLK*/
+ dccg35_set_symclk_be_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_symclk32_se_new(
+ struct dccg *dccg,
+ int inst,
+ enum symclk32_se_clk_source src)
+{
+ dccg35_set_symclk32_se_rcg(dccg, inst, false);
+ dccg35_set_symclk32_se_src_new(dccg, inst, src);
+}
+
+static void dccg35_disable_symclk32_se_new(
+ struct dccg *dccg,
+ int inst)
+{
+ dccg35_set_symclk32_se_src_new(dccg, SYMCLK32_SE_REFCLK, inst);
+ dccg35_set_symclk32_se_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_symclk32_le_new(
+ struct dccg *dccg,
+ int inst,
+ enum symclk32_le_clk_source src)
+{
+ dccg35_set_symclk32_le_rcg(dccg, inst, false);
+ dccg35_set_symclk32_le_src_new(dccg, inst, src);
+}
+
+static void dccg35_disable_symclk32_le_new(
+ struct dccg *dccg,
+ int inst)
+{
+ int i;
+
+ /* Switch from functional clock to refclock */
+ dccg35_set_symclk32_le_src_new(dccg, inst, SYMCLK32_LE_REFCLK);
+
+ /* Check if any SE are connected and disable SE as well */
+ for (i = 0; i < 4; i++) {
+ /* Make sure FE is not already in RCG */
+ if (dccg35_is_symclk32_se_rcg(dccg, i) == 0) {
+ /* Disable and SE connected to this LE before RCG */
+ if (dccg35_is_symclk32_se_src_functional_le_new(dccg, i, inst))
+ dccg35_disable_symclk32_se_new(dccg, i);
+ }
+ }
+ /* Safe to RCG SYM32_LE*/
+ dccg35_set_symclk32_le_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_physymclk_new(struct dccg *dccg,
+ int inst,
+ enum physymclk_source src)
+{
+ dccg35_set_physymclk_rcg(dccg, inst, false);
+ dccg35_set_physymclk_src_new(dccg, src, inst);
+}
+
+static void dccg35_disable_physymclk_new(struct dccg *dccg,
+ int inst)
+{
+ dccg35_set_physymclk_src_new(dccg, PHYSYMCLK_REFCLK, inst);
+ dccg35_set_physymclk_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_dpp_clk_new(
+ struct dccg *dccg,
+ int inst,
+ enum dppclk_clock_source src)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ /* Sanitize inst before use in array de-ref */
+ if (inst < 0) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ dccg35_set_dppclk_rcg(dccg, inst, false);
+ dcn35_set_dppclk_src_new(dccg, inst, src);
+ /* Switch DPP clock to DTO */
+ REG_SET_2(DPPCLK_DTO_PARAM[inst], 0,
+ DPPCLK0_DTO_PHASE, 0xFF,
+ DPPCLK0_DTO_MODULO, 0xFF);
+}
+
+static void dccg35_disable_dpp_clk_new(
+ struct dccg *dccg,
+ int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ /* Sanitize inst before use in array de-ref */
+ if (inst < 0) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ dcn35_set_dppclk_src_new(dccg, inst, DPP_REFCLK);
+ REG_SET_2(DPPCLK_DTO_PARAM[inst], 0,
+ DPPCLK0_DTO_PHASE, 0,
+ DPPCLK0_DTO_MODULO, 1);
+ dccg35_set_dppclk_rcg(dccg, inst, true);
+}
+
+static void dccg35_disable_dscclk_new(struct dccg *dccg,
+ int inst)
+{
+ dccg35_set_dsc_clk_src_new(dccg, inst, DSC_CLK_REF_CLK);
+ dccg35_set_dsc_clk_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_dscclk_new(struct dccg *dccg,
+ int inst,
+ enum dsc_clk_source src)
+{
+ dccg35_set_dsc_clk_rcg(dccg, inst, false);
+ dccg35_set_dsc_clk_src_new(dccg, inst, src);
+}
+
+static void dccg35_enable_dtbclk_p_new(struct dccg *dccg,
+ enum dtbclk_source src,
+ int inst)
+{
+ dccg35_set_dtbclk_p_rcg(dccg, inst, false);
+ dccg35_set_dtbclk_p_src_new(dccg, src, inst);
+}
+
+static void dccg35_disable_dtbclk_p_new(struct dccg *dccg,
+ int inst)
+{
+ dccg35_set_dtbclk_p_src_new(dccg, DTBCLK_REFCLK, inst);
+ dccg35_set_dtbclk_p_rcg(dccg, inst, true);
+}
+
+static void dccg35_disable_dpstreamclk_new(struct dccg *dccg,
+ int inst)
+{
+ dccg35_set_dpstreamclk_src_new(dccg, DP_STREAM_REFCLK, inst);
+ dccg35_set_dpstreamclk_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_dpstreamclk_new(struct dccg *dccg,
+ enum dp_stream_clk_source src,
+ int inst)
+{
+ dccg35_set_dpstreamclk_rcg(dccg, inst, false);
+ dccg35_set_dpstreamclk_src_new(dccg, src, inst);
+}
+
static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
uint32_t dispclk_rdivider_value = 0;
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value);
- REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
+ if (dispclk_rdivider_value != 0)
+ REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
}
static void dcn35_set_dppclk_enable(struct dccg *dccg,
@@ -657,6 +1714,12 @@ static void dccg35_disable_symclk32_se(
}
}
+static void dccg35_init_cb(struct dccg *dccg)
+{
+ (void)dccg;
+ /* Any RCG should be done when driver enter low power mode*/
+}
+
void dccg35_init(struct dccg *dccg)
{
int otg_inst;
@@ -685,10 +1748,6 @@ void dccg35_init(struct dccg *dccg)
dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false);
}
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- for (otg_inst = 0; otg_inst < 4; otg_inst++)
- dccg35_set_dppclk_root_clock_gating(dccg, otg_inst, 0);
-
/*
dccg35_enable_global_fgcg_rep(
dccg, dccg->ctx->dc->debug.enable_fine_grain_clock_gating.bits
@@ -869,47 +1928,32 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst,
}
/*get other front end connected to this backend*/
-static uint8_t dccg35_get_other_enabled_symclk_fe(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
+static uint8_t dccg35_get_number_enabled_symclk_fe_connected_to_be(struct dccg *dccg, uint32_t link_enc_inst)
{
uint8_t num_enabled_symclk_fe = 0;
- uint32_t be_clk_en = 0, fe_clk_en[5] = {0}, be_clk_sel[5] = {0};
+ uint32_t fe_clk_en[5] = {0}, be_clk_sel[5] = {0};
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- switch (link_enc_inst) {
- case 0:
- REG_GET_3(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, &be_clk_en,
- SYMCLKA_FE_EN, &fe_clk_en[0],
- SYMCLKA_FE_SRC_SEL, &be_clk_sel[0]);
- break;
- case 1:
- REG_GET_3(SYMCLKB_CLOCK_ENABLE, SYMCLKB_CLOCK_ENABLE, &be_clk_en,
- SYMCLKB_FE_EN, &fe_clk_en[1],
- SYMCLKB_FE_SRC_SEL, &be_clk_sel[1]);
- break;
- case 2:
- REG_GET_3(SYMCLKC_CLOCK_ENABLE, SYMCLKC_CLOCK_ENABLE, &be_clk_en,
- SYMCLKC_FE_EN, &fe_clk_en[2],
- SYMCLKC_FE_SRC_SEL, &be_clk_sel[2]);
- break;
- case 3:
- REG_GET_3(SYMCLKD_CLOCK_ENABLE, SYMCLKD_CLOCK_ENABLE, &be_clk_en,
- SYMCLKD_FE_EN, &fe_clk_en[3],
- SYMCLKD_FE_SRC_SEL, &be_clk_sel[3]);
- break;
- case 4:
- REG_GET_3(SYMCLKE_CLOCK_ENABLE, SYMCLKE_CLOCK_ENABLE, &be_clk_en,
- SYMCLKE_FE_EN, &fe_clk_en[4],
- SYMCLKE_FE_SRC_SEL, &be_clk_sel[4]);
- break;
- }
- if (be_clk_en) {
- /* for DPMST, this backend could be used by multiple front end.
- only disable the backend if this stream_enc_ins is the last active stream enc connected to this back_end*/
- uint8_t i;
- for (i = 0; i != link_enc_inst && i < ARRAY_SIZE(fe_clk_en); i++) {
- if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst)
- num_enabled_symclk_fe++;
- }
+ REG_GET_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, &fe_clk_en[0],
+ SYMCLKA_FE_SRC_SEL, &be_clk_sel[0]);
+
+ REG_GET_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, &fe_clk_en[1],
+ SYMCLKB_FE_SRC_SEL, &be_clk_sel[1]);
+
+ REG_GET_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, &fe_clk_en[2],
+ SYMCLKC_FE_SRC_SEL, &be_clk_sel[2]);
+
+ REG_GET_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, &fe_clk_en[3],
+ SYMCLKD_FE_SRC_SEL, &be_clk_sel[3]);
+
+ REG_GET_2(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_EN, &fe_clk_en[4],
+ SYMCLKE_FE_SRC_SEL, &be_clk_sel[4]);
+
+ uint8_t i;
+
+ for (i = 0; i < ARRAY_SIZE(fe_clk_en); i++) {
+ if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst)
+ num_enabled_symclk_fe++;
}
return num_enabled_symclk_fe;
}
@@ -957,9 +2001,9 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
break;
}
- /*check other enabled symclk fe */
- num_enabled_symclk_fe = dccg35_get_other_enabled_symclk_fe(dccg, stream_enc_inst, link_enc_inst);
- /*only turn off backend clk if other front end attachecd to this backend are all off,
+ /*check other enabled symclk fe connected to this be */
+ num_enabled_symclk_fe = dccg35_get_number_enabled_symclk_fe_connected_to_be(dccg, link_enc_inst);
+ /*only turn off backend clk if other front end attached to this backend are all off,
for mst, only turn off the backend if this is the last front end*/
if (num_enabled_symclk_fe == 0) {
switch (link_enc_inst) {
@@ -997,6 +2041,336 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
}
}
+static void dccg35_set_dpstreamclk_cb(
+ struct dccg *dccg,
+ enum streamclk_source src,
+ int otg_inst,
+ int dp_hpo_inst)
+{
+
+ enum dtbclk_source dtb_clk_src;
+ enum dp_stream_clk_source dp_stream_clk_src;
+
+ switch (src) {
+ case REFCLK:
+ dtb_clk_src = DTBCLK_REFCLK;
+ dp_stream_clk_src = DP_STREAM_REFCLK;
+ break;
+ case DPREFCLK:
+ dtb_clk_src = DTBCLK_DPREFCLK;
+ dp_stream_clk_src = (enum dp_stream_clk_source)otg_inst;
+ break;
+ case DTBCLK0:
+ dtb_clk_src = DTBCLK_DTBCLK0;
+ dp_stream_clk_src = (enum dp_stream_clk_source)otg_inst;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (dtb_clk_src == DTBCLK_REFCLK &&
+ dp_stream_clk_src == DP_STREAM_REFCLK) {
+ dccg35_disable_dtbclk_p_new(dccg, otg_inst);
+ dccg35_disable_dpstreamclk_new(dccg, dp_hpo_inst);
+ } else {
+ dccg35_enable_dtbclk_p_new(dccg, dtb_clk_src, otg_inst);
+ dccg35_enable_dpstreamclk_new(dccg,
+ dp_stream_clk_src,
+ dp_hpo_inst);
+ }
+}
+
+static void dccg35_set_dpstreamclk_root_clock_gating_cb(
+ struct dccg *dccg,
+ int dp_hpo_inst,
+ bool power_on)
+{
+ /* power_on set indicates we need to ungate
+ * Currently called from optimize_bandwidth and prepare_bandwidth calls
+ * Since clock source is not passed restore to refclock on ungate
+ * Instance 0 is implied here since only one streamclock resource
+ * Redundant as gating when enabled is acheived through set_dpstreamclk
+ */
+ if (power_on)
+ dccg35_enable_dpstreamclk_new(dccg,
+ DP_STREAM_REFCLK,
+ dp_hpo_inst);
+ else
+ dccg35_disable_dpstreamclk_new(dccg, dp_hpo_inst);
+}
+
+static void dccg35_update_dpp_dto_cb(struct dccg *dccg, int dpp_inst,
+ int req_dppclk)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (dccg->dpp_clock_gated[dpp_inst]) {
+ /*
+ * Do not update the DPPCLK DTO if the clock is stopped.
+ */
+ return;
+ }
+
+ if (dccg->ref_dppclk && req_dppclk) {
+ int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
+
+ // phase / modulo = dpp pipe clk / dpp global clk
+ modulo = 0xff; // use FF at the end
+ phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
+
+ if (phase > 0xff) {
+ ASSERT(false);
+ phase = 0xff;
+ }
+
+ /* Enable DPP CLK DTO output */
+ dccg35_enable_dpp_clk_new(dccg, dpp_inst, DPP_DCCG_DTO);
+
+ /* Program DTO */
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, phase,
+ DPPCLK0_DTO_MODULO, modulo);
+ } else
+ dccg35_disable_dpp_clk_new(dccg, dpp_inst);
+
+ dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
+}
+
+static void dccg35_dpp_root_clock_control_cb(
+ struct dccg *dccg,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ if (dccg->dpp_clock_gated[dpp_inst] == power_on)
+ return;
+ /* power_on set indicates we need to ungate
+ * Currently called from optimize_bandwidth and prepare_bandwidth calls
+ * Since clock source is not passed restore to refclock on ungate
+ * Redundant as gating when enabled is acheived through update_dpp_dto
+ */
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, !power_on);
+
+ dccg->dpp_clock_gated[dpp_inst] = !power_on;
+}
+
+static void dccg35_enable_symclk32_se_cb(
+ struct dccg *dccg,
+ int inst,
+ enum phyd32clk_clock_source phyd32clk)
+{
+ dccg35_enable_symclk32_se_new(dccg, inst, (enum symclk32_se_clk_source)phyd32clk);
+}
+
+static void dccg35_disable_symclk32_se_cb(struct dccg *dccg, int inst)
+{
+ dccg35_disable_symclk32_se_new(dccg, inst);
+}
+
+static void dccg35_enable_symclk32_le_cb(
+ struct dccg *dccg,
+ int inst,
+ enum phyd32clk_clock_source src)
+{
+ dccg35_enable_symclk32_le_new(dccg, inst, (enum symclk32_le_clk_source) src);
+}
+
+static void dccg35_disable_symclk32_le_cb(struct dccg *dccg, int inst)
+{
+ dccg35_disable_symclk32_le_new(dccg, inst);
+}
+
+static void dccg35_set_symclk32_le_root_clock_gating_cb(
+ struct dccg *dccg,
+ int inst,
+ bool power_on)
+{
+ /* power_on set indicates we need to ungate
+ * Currently called from optimize_bandwidth and prepare_bandwidth calls
+ * Since clock source is not passed restore to refclock on ungate
+ * Redundant as gating when enabled is acheived through disable_symclk32_le
+ */
+ if (power_on)
+ dccg35_enable_symclk32_le_new(dccg, inst, SYMCLK32_LE_REFCLK);
+ else
+ dccg35_disable_symclk32_le_new(dccg, inst);
+}
+
+static void dccg35_set_physymclk_cb(
+ struct dccg *dccg,
+ int inst,
+ enum physymclk_clock_source clk_src,
+ bool force_enable)
+{
+ /* force_enable = 0 indicates we can switch to ref clock */
+ if (force_enable)
+ dccg35_enable_physymclk_new(dccg, inst, (enum physymclk_source)clk_src);
+ else
+ dccg35_disable_physymclk_new(dccg, inst);
+}
+
+static void dccg35_set_physymclk_root_clock_gating_cb(
+ struct dccg *dccg,
+ int inst,
+ bool power_on)
+{
+ /* Redundant RCG already done in disable_physymclk
+ * power_on = 1 indicates we need to ungate
+ */
+ if (power_on)
+ dccg35_enable_physymclk_new(dccg, inst, PHYSYMCLK_REFCLK);
+ else
+ dccg35_disable_physymclk_new(dccg, inst);
+}
+
+static void dccg35_set_symclk32_le_root_clock_gating(
+ struct dccg *dccg,
+ int inst,
+ bool power_on)
+{
+ /* power_on set indicates we need to ungate
+ * Currently called from optimize_bandwidth and prepare_bandwidth calls
+ * Since clock source is not passed restore to refclock on ungate
+ * Redundant as gating when enabled is acheived through disable_symclk32_le
+ */
+ if (power_on)
+ dccg35_enable_symclk32_le_new(dccg, inst, SYMCLK32_LE_REFCLK);
+ else
+ dccg35_disable_symclk32_le_new(dccg, inst);
+}
+
+static void dccg35_set_dtbclk_p_src_cb(
+ struct dccg *dccg,
+ enum streamclk_source src,
+ uint32_t inst)
+{
+ if (src == DTBCLK0)
+ dccg35_enable_dtbclk_p_new(dccg, DTBCLK_DTBCLK0, inst);
+ else
+ dccg35_disable_dtbclk_p_new(dccg, inst);
+}
+
+static void dccg35_set_dtbclk_dto_cb(
+ struct dccg *dccg,
+ const struct dtbclk_dto_params *params)
+{
+ /* set_dtbclk_p_src typ called earlier to switch to DTBCLK
+ * if params->ref_dtbclk_khz and req_dtbclk_khz are 0 switch to ref-clock
+ */
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ /* DTO Output Rate / Pixel Rate = 1/4 */
+ int req_dtbclk_khz = params->pixclk_khz / 4;
+
+ if (params->ref_dtbclk_khz && req_dtbclk_khz) {
+ uint32_t modulo, phase;
+
+ dccg35_enable_dtbclk_p_new(dccg, DTBCLK_DTBCLK0, params->otg_inst);
+
+ // phase / modulo = dtbclk / dtbclk ref
+ modulo = params->ref_dtbclk_khz * 1000;
+ phase = req_dtbclk_khz * 1000;
+
+ REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo);
+ REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase);
+
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLK_DTO_ENABLE[params->otg_inst], 1);
+
+ REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1,
+ 1, 100);
+
+ /* program OTG_PIXEL_RATE_DIV for DIVK1 and DIVK2 fields */
+ dccg35_set_pixel_rate_div(dccg, params->otg_inst, PIXEL_RATE_DIV_BY_1, PIXEL_RATE_DIV_BY_1);
+
+ /* The recommended programming sequence to enable DTBCLK DTO to generate
+ * valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should
+ * be set only after DTO is enabled
+ */
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ PIPE_DTO_SRC_SEL[params->otg_inst], 2);
+ } else {
+ dccg35_disable_dtbclk_p_new(dccg, params->otg_inst);
+
+ REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLK_DTO_ENABLE[params->otg_inst], 0,
+ PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1);
+
+ REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
+ REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
+ }
+}
+
+static void dccg35_disable_dscclk_cb(struct dccg *dccg,
+ int inst)
+{
+ dccg35_disable_dscclk_new(dccg, inst);
+}
+
+static void dccg35_enable_dscclk_cb(struct dccg *dccg, int inst)
+{
+ dccg35_enable_dscclk_new(dccg, inst, DSC_DTO_TUNED_CK_GPU_DISCLK_3);
+}
+
+static void dccg35_enable_symclk_se_cb(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
+{
+ /* Switch to functional clock if already not selected */
+ dccg35_enable_symclk_be_new(dccg, SYMCLK_BE_PHYCLK, link_enc_inst);
+
+ dccg35_enable_symclk_fe_new(dccg, stream_enc_inst, (enum symclk_fe_source) link_enc_inst);
+
+}
+
+static void dccg35_disable_symclk_se_cb(
+ struct dccg *dccg,
+ uint32_t stream_enc_inst,
+ uint32_t link_enc_inst)
+{
+ dccg35_disable_symclk_fe_new(dccg, stream_enc_inst);
+
+ /* DMU PHY sequence switches SYMCLK_BE (link_enc_inst) to ref clock once PHY is turned off */
+}
+
+void dccg35_root_gate_disable_control(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating)
+{
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) {
+ dccg35_set_dppclk_root_clock_gating(dccg, pipe_idx, disable_clock_gating);
+ }
+}
+
+static const struct dccg_funcs dccg35_funcs_new = {
+ .update_dpp_dto = dccg35_update_dpp_dto_cb,
+ .dpp_root_clock_control = dccg35_dpp_root_clock_control_cb,
+ .get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
+ .dccg_init = dccg35_init_cb,
+ .set_dpstreamclk = dccg35_set_dpstreamclk_cb,
+ .set_dpstreamclk_root_clock_gating = dccg35_set_dpstreamclk_root_clock_gating_cb,
+ .enable_symclk32_se = dccg35_enable_symclk32_se_cb,
+ .disable_symclk32_se = dccg35_disable_symclk32_se_cb,
+ .enable_symclk32_le = dccg35_enable_symclk32_le_cb,
+ .disable_symclk32_le = dccg35_disable_symclk32_le_cb,
+ .set_symclk32_le_root_clock_gating = dccg35_set_symclk32_le_root_clock_gating_cb,
+ .set_physymclk = dccg35_set_physymclk_cb,
+ .set_physymclk_root_clock_gating = dccg35_set_physymclk_root_clock_gating_cb,
+ .set_dtbclk_dto = dccg35_set_dtbclk_dto_cb,
+ .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto,
+ .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
+ .otg_add_pixel = dccg31_otg_add_pixel,
+ .otg_drop_pixel = dccg31_otg_drop_pixel,
+ .set_dispclk_change_mode = dccg31_set_dispclk_change_mode,
+ .disable_dsc = dccg35_disable_dscclk_cb,
+ .enable_dsc = dccg35_enable_dscclk_cb,
+ .set_pixel_rate_div = dccg35_set_pixel_rate_div,
+ .get_pixel_rate_div = dccg35_get_pixel_rate_div,
+ .trigger_dio_fifo_resync = dccg35_trigger_dio_fifo_resync,
+ .set_valid_pixel_rate = dccg35_set_valid_pixel_rate,
+ .enable_symclk_se = dccg35_enable_symclk_se_cb,
+ .disable_symclk_se = dccg35_disable_symclk_se_cb,
+ .set_dtbclk_p_src = dccg35_set_dtbclk_p_src_cb,
+};
+
static const struct dccg_funcs dccg35_funcs = {
.update_dpp_dto = dccg35_update_dpp_dto,
.dpp_root_clock_control = dccg35_dpp_root_clock_control,
@@ -1026,6 +2400,7 @@ static const struct dccg_funcs dccg35_funcs = {
.enable_symclk_se = dccg35_enable_symclk_se,
.disable_symclk_se = dccg35_disable_symclk_se,
.set_dtbclk_p_src = dccg35_set_dtbclk_p_src,
+ .dccg_root_gate_disable_control = dccg35_root_gate_disable_control,
};
struct dccg *dccg35_create(
@@ -1041,6 +2416,10 @@ struct dccg *dccg35_create(
BREAK_TO_DEBUGGER();
return NULL;
}
+ (void)&dccg35_disable_symclk_be_new;
+ (void)&dccg35_set_symclk32_le_root_clock_gating;
+ (void)&dccg35_set_smclk32_se_rcg;
+ (void)&dccg35_funcs_new;
base = &dccg_dcn->base;
base->ctx = ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
index 1586a45ca3bd..51f98c5c51c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
@@ -241,6 +241,7 @@ struct dccg *dccg35_create(
void dccg35_init(struct dccg *dccg);
void dccg35_enable_global_fgcg_rep(struct dccg *dccg, bool value);
+void dccg35_root_gate_disable_control(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating);
#endif //__DCN35_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 07f1f396ba52..0b889004509a 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -730,35 +730,35 @@ void dccg401_init(struct dccg *dccg)
}
}
-static void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, bool enable)
+static void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- uint32_t phase = enable ? 1 : 0;
switch (inst) {
case 0:
- REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1, DSCCLK0_DTO_DB_EN, 1);
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
- DSCCLK0_DTO_PHASE, phase,
+ DSCCLK0_DTO_PHASE, 1,
DSCCLK0_DTO_MODULO, 1);
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1);
+
break;
case 1:
- REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1, DSCCLK1_DTO_DB_EN, 1);
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
- DSCCLK1_DTO_PHASE, phase,
+ DSCCLK1_DTO_PHASE, 1,
DSCCLK1_DTO_MODULO, 1);
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1);
break;
case 2:
- REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1, DSCCLK2_DTO_DB_EN, 1);
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
- DSCCLK2_DTO_PHASE, phase,
+ DSCCLK2_DTO_PHASE, 1,
DSCCLK2_DTO_MODULO, 1);
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1);
break;
case 3:
- REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1, DSCCLK3_DTO_DB_EN, 1);
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
- DSCCLK3_DTO_PHASE, phase,
+ DSCCLK3_DTO_PHASE, 1,
DSCCLK3_DTO_MODULO, 1);
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1);
break;
default:
BREAK_TO_DEBUGGER();
@@ -774,15 +774,27 @@ static void dccg401_set_ref_dscclk(struct dccg *dccg,
switch (dsc_inst) {
case 0:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 0);
+ REG_UPDATE_2(DSCCLK0_DTO_PARAM,
+ DSCCLK0_DTO_PHASE, 0,
+ DSCCLK0_DTO_MODULO, 0);
break;
case 1:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 0);
+ REG_UPDATE_2(DSCCLK1_DTO_PARAM,
+ DSCCLK1_DTO_PHASE, 0,
+ DSCCLK1_DTO_MODULO, 0);
break;
case 2:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 0);
+ REG_UPDATE_2(DSCCLK2_DTO_PARAM,
+ DSCCLK2_DTO_PHASE, 0,
+ DSCCLK2_DTO_MODULO, 0);
break;
case 3:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 0);
+ REG_UPDATE_2(DSCCLK3_DTO_PARAM,
+ DSCCLK3_DTO_PHASE, 0,
+ DSCCLK3_DTO_MODULO, 0);
break;
default:
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
index 8bcddc836347..a196ce9e8127 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
@@ -117,10 +117,6 @@
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_EN, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_EN, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK3_EN, mask_sh),\
- DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_DB_EN, mask_sh),\
- DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_DB_EN, mask_sh),\
- DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_DB_EN, mask_sh),\
- DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK3_DTO_DB_EN, mask_sh),\
DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\
DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index cf5f84fb9c69..eeed840073fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -630,6 +630,11 @@ void dce_aud_az_enable(struct audio *audio)
audio->inst, value);
}
+void dce_aud_az_disable_hbr_audio(struct audio *audio)
+{
+ set_high_bit_rate_capable(audio, false);
+}
+
void dce_aud_az_disable(struct audio *audio)
{
uint32_t value;
@@ -1293,6 +1298,7 @@ static const struct audio_funcs funcs = {
.az_enable = dce_aud_az_enable,
.az_disable = dce_aud_az_disable,
.az_configure = dce_aud_az_configure,
+ .az_disable_hbr_audio = dce_aud_az_disable_hbr_audio,
.destroy = dce_aud_destroy,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
index 539f881928d1..1b7b8b079af4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -166,6 +166,7 @@ void dce_aud_hw_init(struct audio *audio);
void dce_aud_az_enable(struct audio *audio);
void dce_aud_az_disable(struct audio *audio);
+void dce_aud_az_disable_hbr_audio(struct audio *audio);
void dce_aud_az_configure(struct audio *audio,
enum signal_type signal,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index b8996d285f00..bb4ac5042c80 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -735,7 +735,15 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
(unsigned int) payload->mot);
if (payload->write)
dce_aux_log_payload(" write", payload->data, payload->length, 16);
- ret = dce_aux_transfer_raw(ddc, payload, &operation_result);
+
+ /* Check whether aux to be processed via dmub or dcn directly */
+ if (ddc->ctx->dc->debug.enable_dmub_aux_for_legacy_ddc
+ || ddc->ddc_pin == NULL) {
+ ret = dce_aux_transfer_dmub_raw(ddc, payload, &operation_result);
+ } else {
+ ret = dce_aux_transfer_raw(ddc, payload, &operation_result);
+ }
+
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
"dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u",
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index ccf153b7a467..cae18f8c1c9a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -94,6 +94,8 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
state = PSR_STATE_HWLOCK_MGR;
else if (raw_state == 0x61)
state = PSR_STATE_POLLVUPDATE;
+ else if (raw_state == 0x62)
+ state = PSR_STATE_RELEASE_HWLOCK_MGR_FULL_FRAME;
else
state = PSR_STATE_INVALID;
@@ -363,6 +365,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR;
copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1;
copy_settings_data->debug.bitfields.force_full_frame_update = 0;
+ copy_settings_data->debug.bitfields.enable_ips_visual_confirm = dc->dc->debug.enable_ips_visual_confirm;
if (psr_context->su_granularity_required == 0)
copy_settings_data->su_y_granularity = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index 4d960dc5ce89..c31e4f26a305 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -12,6 +12,8 @@
#define MAX_PIPES 6
+#define GPINT_RETRY_NUM 20
+
static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3};
static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5};
@@ -167,6 +169,8 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable;
copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported;
+ copy_settings_data->debug.bitfields.enable_ips_visual_confirm = dc->dc->debug.enable_ips_visual_confirm;
+
copy_settings_data->flags.u32All = 0;
copy_settings_data->flags.bitfields.fec_enable_status = (link->fec_state == dc_link_fec_enabled);
copy_settings_data->flags.bitfields.dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1);
@@ -220,6 +224,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
uint32_t *residency, const bool is_start, enum pr_residency_mode mode)
{
uint16_t param = (uint16_t)(panel_inst << 8);
+ uint32_t i = 0;
switch (mode) {
case PR_RESIDENCY_MODE_PHY:
@@ -247,10 +252,17 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
if (is_start)
param |= REPLAY_RESIDENCY_ENABLE;
- // Send gpint command and wait for ack
- if (!dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__REPLAY_RESIDENCY, param,
- residency, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- *residency = 0;
+ for (i = 0; i < GPINT_RETRY_NUM; i++) {
+ // Send gpint command and wait for ack
+ if (dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__REPLAY_RESIDENCY, param,
+ residency, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return;
+
+ udelay(100);
+ }
+
+ // it means gpint retry many times
+ *residency = 0;
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 49bcfe6ec999..fa422a8cbced 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -1955,6 +1955,7 @@ void dce110_tg_program_timing(struct timing_generator *tg,
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
index 28c58f1dff2d..ee4de740aceb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -261,6 +261,7 @@ void dce110_tg_program_timing(struct timing_generator *tg,
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index bf35dc65ca29..9837dec837ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -438,6 +438,7 @@ static void dce110_timing_generator_v_program_timing(struct timing_generator *tg
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index eb3557965781..fcf59348eb62 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -697,6 +697,7 @@ static void dce120_tg_program_timing(struct timing_generator *tg,
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
index c1a85ee374d9..e5fb0e8333e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
@@ -111,13 +111,14 @@ static void program_timing(struct timing_generator *tg,
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios)
{
if (!use_vbios)
program_pix_dur(tg, timing->pix_clk_100hz);
- dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, use_vbios);
+ dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, 0, use_vbios);
}
static void dce60_timing_generator_enable_advanced_request(
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
index 2df4654858be..003a9330c286 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -111,13 +111,14 @@ static void program_timing(struct timing_generator *tg,
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios)
{
if (!use_vbios)
program_pix_dur(tg, timing->pix_clk_100hz);
- dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, use_vbios);
+ dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, 0, use_vbios);
}
static void dce80_timing_generator_enable_advanced_request(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 9923d0d620d4..e1f6623d4936 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -24,8 +24,6 @@
DCN10 = dcn10_ipp.o \
dcn10_hw_sequencer_debug.o \
- dcn10_opp.o \
- dcn10_mpc.o \
dcn10_cm_common.o \
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 0b49362f71b0..eaed5d1c398a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -591,6 +591,8 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
i += increment) {
if (j == hw_points - 1)
break;
+ if (i >= TRANSFER_FUNC_POINTS)
+ return false;
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index b3aeabc4d605..25ba0d310d46 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: MIT
# Copyright © 2019-2024 Advanced Micro Devices, Inc. All rights reserved.
-DCN20 = dcn20_mpc.o dcn20_opp.o dcn20_mmhubbub.o \
- dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
+DCN20 = dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index 4c43af867d86..b17277de0340 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -23,15 +23,11 @@
#
#
-DCN30 := dcn30_mpc.o dcn30_vpg.o \
+DCN30 := dcn30_vpg.o \
dcn30_afmt.o \
- dcn30_dwb.o \
- dcn30_dwb_cm.o \
dcn30_cm_common.o \
dcn30_mmhubbub.o \
-
-
AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN30)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
index b8327237ed44..f31f0e3abfc0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
@@ -28,7 +28,7 @@
#include "reg_helper.h"
#include "dcn30/dcn30_dpp.h"
#include "basics/conversion.h"
-#include "dcn30_cm_common.h"
+#include "dcn30/dcn30_cm_common.h"
#include "custom_float.h"
#define REG(reg) reg
@@ -177,6 +177,8 @@ bool cm3_helper_translate_curve_to_hw_format(
i += increment) {
if (j == hw_points)
break;
+ if (i >= TRANSFER_FUNC_POINTS)
+ return false;
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
@@ -335,6 +337,8 @@ bool cm3_helper_translate_curve_to_degamma_hw_format(
i += increment) {
if (j == hw_points - 1)
break;
+ if (i >= TRANSFER_FUNC_POINTS)
+ return false;
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index dc37dbf870df..fb4814ab3f05 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -3,7 +3,7 @@
#
# Makefile for dcn30.
-DCN301 = dcn301_dio_link_encoder.o dcn301_panel_cntl.o
+DCN301 = dcn301_panel_cntl.o
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile
deleted file mode 100644
index a954e316aca2..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: MIT
-#
-# Copyright (C) 2021 Advanced Micro Devices, Inc. All the rights reserved
-#
-# Authors: AMD
-#
-# Makefile for dcn303.
-
-DCN3_03 = dcn303_init.o
-
-AMD_DAL_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/dcn303/,$(DCN3_03))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_03)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
index e2601d0aba41..d510e4652c18 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -5,7 +5,7 @@
# Makefile for dcn31.
DCN31 = dcn31_panel_cntl.o \
- dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
+ dcn31_apg.o \
dcn31_afmt.o dcn31_vpg.o
AMD_DAL_DCN31 = $(addprefix $(AMDDALPATH)/dc/dcn31/,$(DCN31))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
deleted file mode 100644
index 15fdcf7c6466..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2024 Advanced Micro Devices, Inc. All rights reserved.
-#
-# Makefile for dcn314.
-
-DCN314 = dcn314_dio_stream_encoder.o
-
-AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN314)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn401/Makefile b/drivers/gpu/drm/amd/display/dc/dcn401/Makefile
deleted file mode 100644
index ded1f3140beb..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn401/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2024 Advanced Micro Devices, Inc. All rights reserved.
-
-DCN401 += dcn401_dio_link_encoder.o
-DCN401 += dcn401_dio_stream_encoder.o
-DCN401 += dcn401_mpc.o
-
-AMD_DAL_DCN401 = $(addprefix $(AMDDALPATH)/dc/dcn401/,$(DCN401))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN401)
diff --git a/drivers/gpu/drm/amd/display/dc/dio/Makefile b/drivers/gpu/drm/amd/display/dc/dio/Makefile
index 67840e474d7a..0dfd480976f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dio/Makefile
@@ -52,6 +52,15 @@ AMD_DAL_DIO_DCN30 = $(addprefix $(AMDDALPATH)/dc/dio/dcn30/,$(DIO_DCN30))
AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN30)
###############################################################################
+# DCN301
+###############################################################################
+DIO_DCN301 = dcn301_dio_link_encoder.o
+
+AMD_DAL_DIO_DCN301 = $(addprefix $(AMDDALPATH)/dc/dio/dcn301/,$(DIO_DCN301))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN301)
+
+###############################################################################
# DCN31
###############################################################################
DIO_DCN31 = dcn31_dio_link_encoder.o
@@ -61,6 +70,15 @@ AMD_DAL_DIO_DCN31 = $(addprefix $(AMDDALPATH)/dc/dio/dcn31/,$(DIO_DCN31))
AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN31)
###############################################################################
+# DCN314
+###############################################################################
+DIO_DCN314 = dcn314_dio_stream_encoder.o
+
+AMD_DAL_DIO_DCN314 = $(addprefix $(AMDDALPATH)/dc/dio/dcn314/,$(DIO_DCN314))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN314)
+
+###############################################################################
# DCN32
###############################################################################
DIO_DCN32 = dcn32_dio_link_encoder.o dcn32_dio_stream_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.c
index 1b39a6e8a1ac..1b39a6e8a1ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.h
index 49f8d91d4951..49f8d91d4951 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
index 5b343f745cf3..5b343f745cf3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.h
index 86548be591be..86548be591be 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c
index 05783daa62ac..2ed382a8e79c 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c
@@ -23,7 +23,6 @@
*
*/
-
#include "reg_helper.h"
#include "core_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
index 6a179e5ab417..6ab2a218b769 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
@@ -22,7 +22,6 @@
*
*/
-
#include "dc_bios_types.h"
#include "dcn30/dcn30_dio_stream_encoder.h"
#include "dcn314/dcn314_dio_stream_encoder.h"
@@ -392,6 +391,14 @@ static void enc35_reset_fifo(struct stream_encoder *enc, bool reset)
udelay(10);
}
+static bool enc35_is_fifo_enabled(struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t reset_val;
+
+ REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val);
+ return (reset_val == 0) ? false : true;
+}
void enc35_disable_fifo(struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -415,6 +422,24 @@ void enc35_enable_fifo(struct stream_encoder *enc)
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1);
}
+static uint32_t enc35_get_pixels_per_cycle(struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t value;
+
+ REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, &value);
+
+ switch (value) {
+ case 0:
+ return 1;
+ case 1:
+ return 2;
+ default:
+ ASSERT_CRITICAL(false);
+ return 1;
+ }
+}
+
static const struct stream_encoder_funcs dcn35_str_enc_funcs = {
.dp_set_odm_combine =
enc314_dp_set_odm_combine,
@@ -465,7 +490,9 @@ static const struct stream_encoder_funcs dcn35_str_enc_funcs = {
.set_input_mode = enc314_set_dig_input_mode,
.enable_fifo = enc35_enable_fifo,
.disable_fifo = enc35_disable_fifo,
+ .is_fifo_enabled = enc35_is_fifo_enabled,
.map_stream_to_link = enc35_stream_encoder_map_to_link,
+ .get_pixels_per_cycle = enc35_get_pixels_per_cycle,
};
void dcn35_dio_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 34adae7ab6e8..2e4a46f1b499 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -210,4 +210,7 @@ enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link);
enum dc_edid_status dm_helpers_get_sbios_edid(struct dc_link *link, struct dc_edid *edid);
+bool dm_helpers_is_fullscreen(struct dc_context *ctx, struct dc_stream_state *stream);
+bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream);
+
#endif /* __DM_HELPERS__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 8a8efe408a9d..e9fea9c2162e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1132,7 +1132,8 @@ static void dcn20_adjust_freesync_v_startup(
patched_crtc_timing.v_addressable -
patched_crtc_timing.v_border_top;
- newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
+ /* The newVStartUp is 1 line before vsync point */
+ newVstartup = asic_blank_end + 1;
*vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
}
@@ -1562,6 +1563,8 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc,
pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width;
pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 255) / 256) * 256;
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
+ pipes[pipe_cnt].pipe.src.cur0_src_width = 0;
+ pipes[pipe_cnt].pipe.src.cur1_src_width = 0;
pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/
pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width; /*when is_hsplit != 1*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 7c56ad0f8812..e7019c95ba79 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -78,7 +78,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
- unsigned int ret_val = 0;
+ unsigned int ret_val = 1;
if (source_format == dm_444_16) {
if (!is_chroma)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 3d95bfa5aca2..ae5251041728 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -78,7 +78,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
- unsigned int ret_val = 0;
+ unsigned int ret_val = 1;
if (source_format == dm_444_16) {
if (!is_chroma)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 98502a4f0567..9e1c18b90805 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -53,7 +53,7 @@ static void calculate_ttu_cursor(
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
- unsigned int ret_val = 0;
+ unsigned int ret_val = 1;
if (source_format == dm_444_16) {
if (!is_chroma)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 9d399c4ce957..6f490d8d7038 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -160,8 +160,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, // N/A, for now keep as is until DML implemented
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
.pct_ideal_dram_bw_after_urgent_strobe = 67.0,
.max_avg_sdp_bw_use_normal_percent = 80.0,
.max_avg_fabric_bw_use_normal_percent = 60.0,
@@ -871,8 +871,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
* for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
* and the max of (VBLANK blanking time, MALL region)).
*/
- if (stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 &&
- subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
+ if (drr_timing &&
+ stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 &&
+ subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
schedulable = true;
return schedulable;
@@ -937,7 +938,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
subvp_pipe = pipe;
}
- if (found) {
+ if (found && subvp_pipe) {
phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
main_timing = &subvp_pipe->stream->timing;
phantom_timing = &phantom_stream->timing;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 4297402bdab3..8839faf42207 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -139,8 +139,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
.pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, // N/A, for now keep as is until DML implemented
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
.pct_ideal_dram_bw_after_urgent_strobe = 67.0,
.max_avg_sdp_bw_use_normal_percent = 80.0,
.max_avg_fabric_bw_use_normal_percent = 60.0,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 410e4b671228..641a8cd019cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -523,6 +523,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int vupdate_offset;
unsigned int vupdate_width;
unsigned int vready_offset;
+ unsigned int pstate_keepout;
unsigned char interlaced;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index dae13f202220..d8bfc85e5dcd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -39,7 +39,7 @@
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
- unsigned int ret_val = 0;
+ unsigned int ret_val = 1;
if (source_format == dm_444_16) {
if (!is_chroma)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index fea857214c0f..c4378e620cbf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -35,8 +35,6 @@ frame_warn_flag := -Wframe-larger-than=2048
endif
endif
-# DRIVER_BUILD is mostly used in DML2.1 source
-subdir-ccflags-y += -DDRIVER_BUILD=1
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/
@@ -81,13 +79,11 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization := $(dml2_
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_shared.o := $(dml2_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
@@ -104,13 +100,11 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.o :
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_shared.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
@@ -126,13 +120,11 @@ DML21 += src/inc/dml2_debug.o
DML21 += src/dml2_core/dml2_core_dcn4.o
DML21 += src/dml2_core/dml2_core_factory.o
DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
-DML21 += src/dml2_core/dml2_core_shared.o
DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
DML21 += src/dml2_mcg/dml2_mcg_factory.o
DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
-DML21 += src/dml2_pmo/dml2_pmo_dcn4.o
DML21 += src/dml2_pmo/dml2_pmo_factory.o
DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
DML21 += src/dml2_standalone_libraries/lib_float_math.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index 06387b8b0aee..b0d9aed0f265 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -31,7 +31,7 @@ static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_
else
soc_bb = &dml2_socbb_dcn401;
- qos_params = &dml_dcn401_soc_qos_params;
+ qos_params = &dml_dcn4_variant_a_soc_qos_params;
}
/* patch soc bb */
@@ -516,7 +516,7 @@ static void populate_dml21_stream_overrides_from_stream_state(
if (!stream->ctx->dc->debug.enable_single_display_2to1_odm_policy ||
stream->debug.force_odm_combine_segments > 0)
stream_desc->overrides.disable_dynamic_odm = true;
- stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp;
+ stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || stream->hw_cursor_req;
}
static enum dml2_swizzle_mode gfx_addr3_to_dml2_swizzle_mode(enum swizzle_mode_addr3_values addr3_mode)
@@ -725,18 +725,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
const struct scaler_data *scaler_data = get_scaler_data_for_plane(dml_ctx, plane_state, context);
struct dc_stream_state *stream = context->streams[stream_index];
- if (stream->cursor_attributes.color_format == CURSOR_MODE_MONO)
- plane->cursor.cursor_bpp = 2;
- else if (stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_1BIT_AND
- || stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
- || stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
- plane->cursor.cursor_bpp = 32;
- } else if (stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED
- || stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED) {
- plane->cursor.cursor_bpp = 64;
- } else
- plane->cursor.cursor_bpp = 32;
-
+ plane->cursor.cursor_bpp = 32;
plane->cursor.cursor_width = 256;
plane->cursor.num_cursors = 1;
@@ -788,6 +777,14 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
* certain cases. Hence do corrective active and disable scaling.
*/
plane->composition.scaler_info.enabled = false;
+ } else if ((plane_state->ctx->dc->config.use_spl == true) &&
+ (plane->composition.scaler_info.enabled == false)) {
+ /* To enable sharpener for 1:1, scaler must be enabled. If use_spl is set, then
+ * allow case where ratio is 1 but taps > 1
+ */
+ if ((scaler_data->taps.h_taps > 1) || (scaler_data->taps.v_taps > 1) ||
+ (scaler_data->taps.h_taps_c > 1) || (scaler_data->taps.v_taps_c > 1))
+ plane->composition.scaler_info.enabled = true;
}
/* always_scale is only used for debug purposes not used in production but has to be
@@ -827,6 +824,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
plane->tdlut.setup_for_tdlut = true;
+
switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.layout) {
case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
@@ -836,6 +834,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->tdlut.tdlut_addressing_mode = dml2_tdlut_simple_linear;
break;
}
+
switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.size) {
case DC_CM2_GPU_MEM_SIZE_171717:
plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
@@ -844,8 +843,8 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
//plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined
break;
}
- } else
- plane->tdlut.setup_for_tdlut = false;
+ }
+ plane->tdlut.setup_for_tdlut |= dml_ctx->config.force_tdlut_enable;
plane->dynamic_meta_data.enable = false;
plane->dynamic_meta_data.lines_before_active_required = 0;
@@ -949,6 +948,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
int stream_index, plane_index;
int disp_cfg_stream_location, disp_cfg_plane_location;
struct dml2_display_cfg *dml_dispcfg = &dml_ctx->v21.display_config;
+ unsigned int plane_count = 0;
memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
@@ -958,6 +958,11 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
dml_dispcfg->minimize_det_reallocation = true;
dml_dispcfg->overrides.enable_subvp_implicit_pmo = true;
+ if (in_dc->debug.disable_unbounded_requesting) {
+ dml_dispcfg->overrides.hw.force_unbounded_requesting.enable = true;
+ dml_dispcfg->overrides.hw.force_unbounded_requesting.value = false;
+ }
+
for (stream_index = 0; stream_index < context->stream_count; stream_index++) {
disp_cfg_stream_location = map_stream_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]);
@@ -1002,33 +1007,39 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].overrides.uclk_pstate_change_strategy =
dml21_force_pstate_method_to_uclk_state_change_strategy(dml_ctx->config.pmo.force_pstate_method_values[stream_index]);
}
+
+ plane_count++;
}
}
}
+ if (plane_count == 0) {
+ dml_dispcfg->overrides.all_streams_blanked = true;
+ }
+
return true;
}
void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context)
{
/* TODO these should be the max of active, svp prefetch and idle should be tracked seperately */
- context->bw_ctx.bw.dcn.clk.dispclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.dispclk_khz;
- context->bw_ctx.bw.dcn.clk.dcfclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.active.dcfclk_khz;
- context->bw_ctx.bw.dcn.clk.dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.active.uclk_khz;
- context->bw_ctx.bw.dcn.clk.fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.active.fclk_khz;
- context->bw_ctx.bw.dcn.clk.idle_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.idle.uclk_khz;
- context->bw_ctx.bw.dcn.clk.idle_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.idle.fclk_khz;
- context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.deepsleep_dcfclk_khz;
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dispclk_khz;
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.active.dcfclk_khz;
+ context->bw_ctx.bw.dcn.clk.dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.active.uclk_khz;
+ context->bw_ctx.bw.dcn.clk.fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.active.fclk_khz;
+ context->bw_ctx.bw.dcn.clk.idle_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.idle.uclk_khz;
+ context->bw_ctx.bw.dcn.clk.idle_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.idle.fclk_khz;
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.deepsleep_dcfclk_khz;
context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = in_ctx->v21.mode_programming.programming->fclk_pstate_supported;
context->bw_ctx.bw.dcn.clk.p_state_change_support = in_ctx->v21.mode_programming.programming->uclk_pstate_supported;
- context->bw_ctx.bw.dcn.clk.dtbclk_en = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.dtbrefclk_khz > 0;
- context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.dtbrefclk_khz;
+ context->bw_ctx.bw.dcn.clk.dtbclk_en = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz > 0;
+ context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz;
}
void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx)
{
struct dml2_core_internal_display_mode_lib *mode_lib = &in_ctx->v21.dml_init.dml2_instance->core_instance.clean_me_up.mode_lib;
- double refclk_freq_in_mhz = (in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz > 0) ? (double)in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;;
+ double refclk_freq_in_mhz = (in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz > 0) ? (double)in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
if (reg_set_idx >= DML2_DCHUB_WATERMARK_SET_NUM) {
/* invalid register set index */
@@ -1053,16 +1064,16 @@ static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_wat
switch (wm_index) {
case DML2_DCHUB_WATERMARK_SET_A:
- wm_regs = &watermarks->dcn4.a;
+ wm_regs = &watermarks->dcn4x.a;
break;
case DML2_DCHUB_WATERMARK_SET_B:
- wm_regs = &watermarks->dcn4.b;
+ wm_regs = &watermarks->dcn4x.b;
break;
case DML2_DCHUB_WATERMARK_SET_C:
- wm_regs = &watermarks->dcn4.c;
+ wm_regs = &watermarks->dcn4x.c;
break;
case DML2_DCHUB_WATERMARK_SET_D:
- wm_regs = &watermarks->dcn4.d;
+ wm_regs = &watermarks->dcn4x.d;
break;
case DML2_DCHUB_WATERMARK_SET_NUM:
default:
@@ -1110,10 +1121,11 @@ void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_
global_sync = &stream_programming->phantom_stream.global_sync;
}
- pipe_ctx->pipe_dlg_param.vstartup_start = global_sync->dcn4.vstartup_lines;
- pipe_ctx->pipe_dlg_param.vupdate_offset = global_sync->dcn4.vupdate_offset_pixels;
- pipe_ctx->pipe_dlg_param.vupdate_width = global_sync->dcn4.vupdate_vupdate_width_pixels;
- pipe_ctx->pipe_dlg_param.vready_offset = global_sync->dcn4.vready_offset_pixels;
+ pipe_ctx->pipe_dlg_param.vstartup_start = global_sync->dcn4x.vstartup_lines;
+ pipe_ctx->pipe_dlg_param.vupdate_offset = global_sync->dcn4x.vupdate_offset_pixels;
+ pipe_ctx->pipe_dlg_param.vupdate_width = global_sync->dcn4x.vupdate_vupdate_width_pixels;
+ pipe_ctx->pipe_dlg_param.vready_offset = global_sync->dcn4x.vready_offset_pixels;
+ pipe_ctx->pipe_dlg_param.pstate_keepout = global_sync->dcn4x.pstate_keepout_start_lines;
pipe_ctx->pipe_dlg_param.otg_inst = pipe_ctx->stream_res.tg->inst;
@@ -1164,3 +1176,37 @@ void dml21_get_pipe_mcache_config(
mcache_pipe_config->plane1_enabled =
dml21_is_plane1_enabled(pln_prog->plane_descriptor->pixel_format);
}
+
+void dml21_set_dc_p_state_type(
+ struct pipe_ctx *pipe_ctx,
+ struct dml2_per_stream_programming *stream_programming,
+ bool sub_vp_enabled)
+{
+ switch (stream_programming->uclk_pstate_method) {
+ case dml2_uclk_pstate_support_method_vactive:
+ case dml2_uclk_pstate_support_method_fw_vactive_drr:
+ pipe_ctx->p_state_type = P_STATE_V_ACTIVE;
+ break;
+ case dml2_uclk_pstate_support_method_vblank:
+ case dml2_uclk_pstate_support_method_fw_vblank_drr:
+ if (sub_vp_enabled)
+ pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP;
+ else
+ pipe_ctx->p_state_type = P_STATE_V_BLANK;
+ break;
+ case dml2_uclk_pstate_support_method_fw_subvp_phantom:
+ case dml2_uclk_pstate_support_method_fw_subvp_phantom_drr:
+ pipe_ctx->p_state_type = P_STATE_SUB_VP;
+ break;
+ case dml2_uclk_pstate_support_method_fw_drr:
+ if (sub_vp_enabled)
+ pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP;
+ else
+ pipe_ctx->p_state_type = P_STATE_FPO;
+ break;
+ default:
+ pipe_ctx->p_state_type = P_STATE_UNKNOWN;
+ break;
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
index 4cc0a1fbb93d..476a7f6e4875 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
@@ -26,4 +26,5 @@ void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_water
void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx);
void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
+void dml21_set_dc_p_state_type(struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming, bool sub_vp_enabled);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
index d276458e50fd..51d491bffa32 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
@@ -11,7 +11,6 @@
#include "dml2_core_dcn4_calcs.h"
-
int dml21_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id)
{
int i;
@@ -280,6 +279,23 @@ bool check_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx)
dc_is_dp_signal(pipe_ctx->stream->signal));
}
+
+static bool is_sub_vp_enabled(struct dc *dc, struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream && dc_state_get_paired_subvp_stream(context, pipe_ctx->stream) &&
+ dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog,
struct dml2_per_stream_programming *stream_prog)
{
@@ -310,12 +326,16 @@ void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *contex
pipe_ctx->det_buffer_size_kb = pln_prog->pipe_regs[pipe_reg_index]->det_size * 64;
}
- pipe_ctx->plane_res.bw.dppclk_khz = pln_prog->min_clocks.dcn4.dppclk_khz;
+ pipe_ctx->plane_res.bw.dppclk_khz = pln_prog->min_clocks.dcn4x.dppclk_khz;
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipe_ctx->plane_res.bw.dppclk_khz)
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipe_ctx->plane_res.bw.dppclk_khz;
dml21_populate_mall_allocation_size(context, dml_ctx, pln_prog, pipe_ctx);
memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[pipe_ctx->pipe_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation));
+
+ bool sub_vp_enabled = is_sub_vp_enabled(pipe_ctx->stream->ctx->dc, context);
+
+ dml21_set_dc_p_state_type(pipe_ctx, stream_prog, sub_vp_enabled);
}
static struct dc_stream_state *dml21_add_phantom_stream(struct dml2_context *dml_ctx,
@@ -459,94 +479,103 @@ void dml21_build_fams2_programming(const struct dc *dc,
struct dml2_context *dml_ctx)
{
int i, j, k;
+ unsigned int num_fams2_streams = 0;
/* reset fams2 data */
- context->bw_ctx.bw.dcn.fams2_stream_count = 0;
memset(&context->bw_ctx.bw.dcn.fams2_stream_params, 0, sizeof(struct dmub_fams2_stream_static_state) * DML2_MAX_PLANES);
+ memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config));
- if (!dml_ctx->v21.mode_programming.programming->fams2_required)
- return;
+ if (dml_ctx->v21.mode_programming.programming->fams2_required) {
+ for (i = 0; i < context->stream_count; i++) {
+ int dml_stream_idx;
+ struct dc_stream_state *phantom_stream;
+ struct dc_stream_status *phantom_status;
- for (i = 0; i < context->stream_count; i++) {
- int dml_stream_idx;
- struct dc_stream_state *phantom_stream;
- struct dc_stream_status *phantom_status;
-
- struct dmub_fams2_stream_static_state *static_state = &context->bw_ctx.bw.dcn.fams2_stream_params[context->bw_ctx.bw.dcn.fams2_stream_count];
-
- struct dc_stream_state *stream = context->streams[i];
-
- if (context->stream_status[i].plane_count == 0 ||
- dml_ctx->config.svp_pstate.callbacks.get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) {
- /* can ignore blanked or phantom streams */
- continue;
- }
+ struct dmub_fams2_stream_static_state *static_state = &context->bw_ctx.bw.dcn.fams2_stream_params[num_fams2_streams];
- dml_stream_idx = dml21_helper_find_dml_pipe_idx_by_stream_id(dml_ctx, stream->stream_id);
- if (dml_stream_idx < 0) {
- ASSERT(dml_stream_idx >= 0);
- continue;
- }
+ struct dc_stream_state *stream = context->streams[i];
- /* copy static state from PMO */
- memcpy(static_state,
- &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_params,
- sizeof(struct dmub_fams2_stream_static_state));
-
- /* get information from context */
- static_state->num_planes = context->stream_status[i].plane_count;
- static_state->otg_inst = context->stream_status[i].primary_otg_inst;
-
- /* populate pipe masks for planes */
- for (j = 0; j < context->stream_status[i].plane_count; j++) {
- for (k = 0; k < dc->res_pool->pipe_count; k++) {
- if (context->res_ctx.pipe_ctx[k].stream &&
- context->res_ctx.pipe_ctx[k].stream->stream_id == stream->stream_id &&
- context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) {
- static_state->pipe_mask |= (1 << k);
- static_state->plane_pipe_masks[j] |= (1 << k);
- }
+ if (context->stream_status[i].plane_count == 0 ||
+ dml_ctx->config.svp_pstate.callbacks.get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) {
+ /* can ignore blanked or phantom streams */
+ continue;
}
- }
- /* get per method programming */
- switch (static_state->type) {
- case FAMS2_STREAM_TYPE_VBLANK:
- case FAMS2_STREAM_TYPE_VACTIVE:
- case FAMS2_STREAM_TYPE_DRR:
- break;
- case FAMS2_STREAM_TYPE_SUBVP:
- phantom_stream = dml_ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, stream);
- if (!phantom_stream)
- break;
+ dml_stream_idx = dml21_helper_find_dml_pipe_idx_by_stream_id(dml_ctx, stream->stream_id);
+ if (dml_stream_idx < 0) {
+ ASSERT(dml_stream_idx >= 0);
+ continue;
+ }
- phantom_status = dml_ctx->config.callbacks.get_stream_status(context, phantom_stream);
+ /* copy static state from PMO */
+ memcpy(static_state,
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_params,
+ sizeof(struct dmub_fams2_stream_static_state));
- /* phantom status should always be present */
- ASSERT(phantom_status);
- static_state->sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst;
+ /* get information from context */
+ static_state->num_planes = context->stream_status[i].plane_count;
+ static_state->otg_inst = context->stream_status[i].primary_otg_inst;
- /* populate pipe masks for phantom planes */
- for (j = 0; j < phantom_status->plane_count; j++) {
+ /* populate pipe masks for planes */
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
for (k = 0; k < dc->res_pool->pipe_count; k++) {
if (context->res_ctx.pipe_ctx[k].stream &&
- context->res_ctx.pipe_ctx[k].stream->stream_id == phantom_stream->stream_id &&
- context->res_ctx.pipe_ctx[k].plane_state == phantom_status->plane_states[j]) {
- static_state->sub_state.subvp.phantom_pipe_mask |= (1 << k);
- static_state->sub_state.subvp.phantom_plane_pipe_masks[j] |= (1 << k);
+ context->res_ctx.pipe_ctx[k].stream->stream_id == stream->stream_id &&
+ context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) {
+ static_state->pipe_mask |= (1 << k);
+ static_state->plane_pipe_masks[j] |= (1 << k);
+ }
+ }
+ }
+
+ /* get per method programming */
+ switch (static_state->type) {
+ case FAMS2_STREAM_TYPE_VBLANK:
+ case FAMS2_STREAM_TYPE_VACTIVE:
+ case FAMS2_STREAM_TYPE_DRR:
+ break;
+ case FAMS2_STREAM_TYPE_SUBVP:
+ phantom_stream = dml_ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, stream);
+ if (!phantom_stream)
+ break;
+
+ phantom_status = dml_ctx->config.callbacks.get_stream_status(context, phantom_stream);
+
+ /* phantom status should always be present */
+ ASSERT(phantom_status);
+ static_state->sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst;
+
+ /* populate pipe masks for phantom planes */
+ for (j = 0; j < phantom_status->plane_count; j++) {
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (context->res_ctx.pipe_ctx[k].stream &&
+ context->res_ctx.pipe_ctx[k].stream->stream_id == phantom_stream->stream_id &&
+ context->res_ctx.pipe_ctx[k].plane_state == phantom_status->plane_states[j]) {
+ static_state->sub_state.subvp.phantom_pipe_mask |= (1 << k);
+ static_state->sub_state.subvp.phantom_plane_pipe_masks[j] |= (1 << k);
+ }
}
}
+ break;
+ default:
+ ASSERT(false);
+ break;
}
- break;
- default:
- ASSERT(false);
- break;
+
+ num_fams2_streams++;
}
+ }
+
+ if (num_fams2_streams > 0) {
+ /* copy FAMS2 configuration */
+ memcpy(&context->bw_ctx.bw.dcn.fams2_global_config,
+ &dml_ctx->v21.mode_programming.programming->fams2_global_config,
+ sizeof(struct dmub_cmd_fams2_global_config));
- context->bw_ctx.bw.dcn.fams2_stream_count++;
+ context->bw_ctx.bw.dcn.fams2_global_config.num_streams = num_fams2_streams;
}
- context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = context->bw_ctx.bw.dcn.fams2_stream_count > 0;
+ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
}
bool dml21_is_plane1_enabled(enum dml2_source_format_class source_format)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index 41ecf00ed196..d35dd507cb9f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -66,7 +66,9 @@ static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_contex
disable_fams2;
pmo_options->disable_fams2 = disable_fams2;
- pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming;
+ pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE ||
+ in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY;
+ pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
index 521f77b8ac44..d82c681a5402 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
@@ -72,7 +72,7 @@ static const struct dml2_soc_qos_parameters dml_dcn31_soc_qos_params = {
.scaling_factor_mhz = 0,
},
.qos_params = {
- .dcn4 = {
+ .dcn4x = {
.df_qos_response_time_fclk_cycles = 300,
.max_round_trip_to_furthest_cs_fclk_cycles = 350,
.mall_overhead_fclk_cycles = 50,
@@ -128,7 +128,7 @@ static const struct dml2_soc_qos_parameters dml_dcn31_soc_qos_params = {
},
},
},
- .qos_type = dml2_qos_param_type_dcn4,
+ .qos_type = dml2_qos_param_type_dcn4x,
};
static const struct dml2_soc_bb dml2_socbb_dcn31 = {
@@ -228,7 +228,7 @@ static const struct dml2_soc_bb dml2_socbb_dcn31 = {
.scaling_factor_mhz = 0,
},
.qos_params = {
- .dcn4 = {
+ .dcn4x = {
.df_qos_response_time_fclk_cycles = 300,
.max_round_trip_to_furthest_cs_fclk_cycles = 350,
.mall_overhead_fclk_cycles = 50,
@@ -332,7 +332,7 @@ static const struct dml2_soc_bb dml2_socbb_dcn31 = {
},
},
},
- .qos_type = dml2_qos_param_type_dcn4,
+ .qos_type = dml2_qos_param_type_dcn4x,
},
.power_management_parameters = {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
index fe07fcc3d0d5..8ef7977841de 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
@@ -8,7 +8,7 @@
#include "dml_top_soc_parameter_types.h"
-static const struct dml2_soc_qos_parameters dml_dcn401_soc_qos_params = {
+static const struct dml2_soc_qos_parameters dml_dcn4_variant_a_soc_qos_params = {
.derate_table = {
.system_active_urgent = {
.dram_derate_percent_pixel = 22,
@@ -52,7 +52,7 @@ static const struct dml2_soc_qos_parameters dml_dcn401_soc_qos_params = {
.scaling_factor_mhz = 0,
},
.qos_params = {
- .dcn4 = {
+ .dcn4x = {
.df_qos_response_time_fclk_cycles = 300,
.max_round_trip_to_furthest_cs_fclk_cycles = 350,
.mall_overhead_fclk_cycles = 50,
@@ -78,7 +78,7 @@ static const struct dml2_soc_qos_parameters dml_dcn401_soc_qos_params = {
},
},
},
- .qos_type = dml2_qos_param_type_dcn4,
+ .qos_type = dml2_qos_param_type_dcn4x,
};
static const struct dml2_soc_bb dml2_socbb_dcn401 = {
@@ -178,7 +178,7 @@ static const struct dml2_soc_bb dml2_socbb_dcn401 = {
.scaling_factor_mhz = 0,
},
.qos_params = {
- .dcn4 = {
+ .dcn4x = {
.df_qos_response_time_fclk_cycles = 300,
.max_round_trip_to_furthest_cs_fclk_cycles = 350,
.mall_overhead_fclk_cycles = 50,
@@ -282,7 +282,7 @@ static const struct dml2_soc_bb dml2_socbb_dcn401 = {
},
},
},
- .qos_type = dml2_qos_param_type_dcn4,
+ .qos_type = dml2_qos_param_type_dcn4x,
},
.power_management_parameters = {
@@ -344,6 +344,9 @@ static const struct dml2_ip_capabilities dml2_dcn401_max_ip_caps = {
.config_return_buffer_segment_size_in_kbytes = 64,
.meta_fifo_size_in_kentries = 22,
.compressed_buffer_segment_size_in_kbytes = 64,
+ .max_flip_time_us = 80,
+ .max_flip_time_lines = 32,
+ .hostvm_mode = 0,
.subvp_drr_scheduling_margin_us = 100,
.subvp_prefetch_end_to_mall_start_us = 15,
.subvp_fw_processing_delay = 15,
@@ -351,14 +354,18 @@ static const struct dml2_ip_capabilities dml2_dcn401_max_ip_caps = {
.fams2 = {
.max_allow_delay_us = 100 * 1000,
- .scheduling_delay_us = 50,
- .vertical_interrupt_ack_delay_us = 18,
+ .scheduling_delay_us = 125,
+ .vertical_interrupt_ack_delay_us = 40,
.allow_programming_delay_us = 18,
.min_allow_width_us = 20,
.subvp_df_throttle_delay_us = 100,
- .subvp_programming_delay_us = 18,
+ .subvp_programming_delay_us = 200,
.subvp_prefetch_to_mall_delay_us = 18,
- .drr_programming_delay_us = 18,
+ .drr_programming_delay_us = 35,
+
+ .lock_timeout_us = 5000,
+ .recovery_timeout_us = 5000,
+ .flip_programming_delay_us = 300,
},
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
index a25f4e5977cf..a64ec4dcf11a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_TOP_H__
#define __DML_TOP_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
index 8247289ce7d3..83fc15bf13cf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __dml2_TOP_DCHUB_REGISTERS_H__
#define __dml2_TOP_DCHUB_REGISTERS_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
index daae77f2672b..b132f676a68d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_TOP_DISPLAY_CFG_TYPES_H__
#define __DML_TOP_DISPLAY_CFG_TYPES_H__
@@ -411,7 +410,6 @@ struct dml2_stream_parameters {
enum dml2_odm_mode odm_mode;
bool disable_dynamic_odm;
bool disable_subvp;
- bool disable_fams2_drr;
int minimum_vblank_idle_requirement_us;
bool minimize_active_latency_hiding;
@@ -478,6 +476,7 @@ struct dml2_display_cfg {
bool max_outstanding_when_urgent_expected_disable;
bool enable_subvp_implicit_pmo; //enables PMO to switch pipe uclk strategy to subvp, and generate phantom programming
unsigned int best_effort_min_active_latency_hiding_us;
+ bool all_streams_blanked;
} overrides;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h
index 2f444f448770..8f624a912e78 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_TOP_POLICY_TYPES_H__
#define __DML_TOP_POLICY_TYPES_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
index 065b2afab6fb..ebd8abe894a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_TOP_SOC_PARAMETER_TYPES_H__
#define __DML_TOP_SOC_PARAMETER_TYPES_H__
@@ -27,7 +26,7 @@ struct dml2_soc_derates {
struct dml2_soc_derate_values system_idle_average;
};
-struct dml2_dcn3_soc_qos_params {
+struct dml2_dcn32x_soc_qos_params {
struct {
unsigned int base_latency_us;
unsigned int base_latency_pixel_vm_us;
@@ -53,7 +52,7 @@ struct dml2_dcn4_uclk_dpm_dependent_qos_params {
unsigned int average_latency_when_non_urgent_uclk_cycles;
};
-struct dml2_dcn4_soc_qos_params {
+struct dml2_dcn4x_soc_qos_params {
unsigned int df_qos_response_time_fclk_cycles;
unsigned int max_round_trip_to_furthest_cs_fclk_cycles;
unsigned int mall_overhead_fclk_cycles;
@@ -69,7 +68,7 @@ struct dml2_dcn4_soc_qos_params {
enum dml2_qos_param_type {
dml2_qos_param_type_dcn3,
- dml2_qos_param_type_dcn4
+ dml2_qos_param_type_dcn4x
};
struct dml2_soc_qos_parameters {
@@ -81,8 +80,8 @@ struct dml2_soc_qos_parameters {
} writeback;
union {
- struct dml2_dcn3_soc_qos_params dcn3;
- struct dml2_dcn4_soc_qos_params dcn4;
+ struct dml2_dcn32x_soc_qos_params dcn32x;
+ struct dml2_dcn4x_soc_qos_params dcn4x;
} qos_params;
enum dml2_qos_param_type qos_type;
@@ -152,6 +151,7 @@ struct dml2_soc_bb {
double phy_downspread_percent;
double dcn_downspread_percent;
double dispclk_dppclk_vco_speed_mhz;
+ bool no_dfs;
bool do_urgent_latency_adjustment;
unsigned int mem_word_bytes;
unsigned int num_dcc_mcaches;
@@ -173,6 +173,7 @@ struct dml2_ip_capabilities {
unsigned int meta_fifo_size_in_kentries;
unsigned int compressed_buffer_segment_size_in_kbytes;
unsigned int max_flip_time_us;
+ unsigned int max_flip_time_lines;
unsigned int hostvm_mode;
unsigned int subvp_drr_scheduling_margin_us;
unsigned int subvp_prefetch_end_to_mall_start_us;
@@ -190,6 +191,10 @@ struct dml2_ip_capabilities {
unsigned int subvp_programming_delay_us;
unsigned int subvp_prefetch_to_mall_delay_us;
unsigned int drr_programming_delay_us;
+
+ unsigned int lock_timeout_us;
+ unsigned int recovery_timeout_us;
+ unsigned int flip_programming_delay_us;
} fams2;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index 8aa77bb190ea..eeb96c455658 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -5,7 +5,6 @@
#ifndef __DML_TOP_TYPES_H__
#define __DML_TOP_TYPES_H__
-#include "dml_top_types.h"
#include "dml_top_display_cfg_types.h"
#include "dml_top_soc_parameter_types.h"
#include "dml_top_policy_types.h"
@@ -74,6 +73,7 @@ struct dml2_pmo_options {
bool disable_drr_var;
bool disable_drr_clamped;
bool disable_drr_var_when_var_active;
+ bool disable_drr_clamped_when_var_active;
bool disable_fams2;
bool disable_vactive_det_fill_bw_pad; /* dml2_project_dcn4x_stage2_auto_drr_svp and above only */
bool disable_dyn_odm;
@@ -228,7 +228,7 @@ struct dml2_per_plane_programming {
union {
struct {
unsigned long dppclk_khz;
- } dcn4;
+ } dcn4x;
} min_clocks;
struct dml2_mcache_surface_allocation mcache_allocation;
@@ -262,7 +262,8 @@ union dml2_global_sync_programming {
unsigned int vupdate_offset_pixels;
unsigned int vupdate_vupdate_width_pixels;
unsigned int vready_offset_pixels;
- } dcn4;
+ unsigned int pstate_keepout_start_lines;
+ } dcn4x;
};
struct dml2_per_stream_programming {
@@ -273,7 +274,7 @@ struct dml2_per_stream_programming {
unsigned long dscclk_khz;
unsigned long dtbclk_khz;
unsigned long phyclk_khz;
- } dcn4;
+ } dcn4x;
} min_clocks;
union dml2_global_sync_programming global_sync;
@@ -374,7 +375,7 @@ struct dml2_display_cfg_programming {
unsigned long dispclk_khz;
unsigned long dcfclk_deepsleep_khz;
unsigned long dpp_ref_khz;
- } dcn3;
+ } dcn32x;
struct {
struct {
unsigned long uclk_khz;
@@ -403,7 +404,7 @@ struct dml2_display_cfg_programming {
uint32_t dpprefclk_did;
uint32_t dtbrefclk_did;
} divider_ids;
- } dcn4;
+ } dcn4x;
} min_clocks;
bool uclk_pstate_supported;
@@ -411,6 +412,7 @@ struct dml2_display_cfg_programming {
/* indicates this configuration requires FW to support */
bool fams2_required;
+ struct dmub_cmd_fams2_global_config fams2_global_config;
struct {
bool supported_in_blank; // Changing to configurations where this is false requires stutter to be disabled during the transition
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index 04edcde423a9..0aa4e4d343b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_internal_shared_types.h"
#include "dml2_core_shared_types.h"
#include "dml2_core_dcn4.h"
@@ -10,7 +9,7 @@
#include "dml2_debug.h"
#include "lib_float_math.h"
-struct dml2_core_ip_params core_dcn4_ip_caps_base = {
+static const struct dml2_core_ip_params core_dcn4_ip_caps_base = {
// Hardcoded values for DCN3x
.vblank_nom_default_us = 668,
.remote_iommu_outstanding_translations = 256,
@@ -70,6 +69,7 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = {
.max_num_dp2p0_streams = 4,
.imall_supported = 1,
.max_flip_time_us = 80,
+ .max_flip_time_lines = 32,
.words_per_channel = 16,
.subvp_fw_processing_delay_us = 15,
@@ -77,84 +77,6 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = {
.subvp_swath_height_margin_lines = 16,
};
-struct dml2_core_ip_params core_dcn4sw_ip_caps_base = {
- .vblank_nom_default_us = 668,
- .remote_iommu_outstanding_translations = 256,
- .rob_buffer_size_kbytes = 192,
- .config_return_buffer_size_in_kbytes = 1280,
- .config_return_buffer_segment_size_in_kbytes = 64,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .dpte_buffer_size_in_pte_reqs_luma = 68,
- .dpte_buffer_size_in_pte_reqs_chroma = 36,
- .pixel_chunk_size_kbytes = 8,
- .alpha_pixel_chunk_size_kbytes = 4,
- .min_pixel_chunk_size_bytes = 1024,
- .writeback_chunk_size_kbytes = 8,
- .line_buffer_size_bits = 1171920,
- .max_line_buffer_lines = 32,
- .writeback_interface_buffer_size_kbytes = 90,
-
- //Number of pipes after DCN Pipe harvesting
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dispclk_ramp_margin_percent = 1,
- .dppclk_delay_subtotal = 47,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 28,
- .dppclk_delay_cnvc_cursor = 6,
- .cursor_buffer_size = 24,
- .cursor_chunk_size = 2,
- .dispclk_delay_subtotal = 125,
- .max_inter_dcn_tile_repeaters = 8,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .writeback_line_buffer_buffer_size = 0,
- .num_dsc = 4,
- .maximum_dsc_bits_per_component = 12,
- .maximum_pixels_per_line_per_dsc_unit = 5760,
- .dsc422_native_support = true,
- .dcc_supported = true,
- .ptoi_supported = false,
-
- .cursor_64bpp_support = true,
- .dynamic_metadata_vm_enabled = false,
-
- .max_num_hdmi_frl_outputs = 1,
- .max_num_dp2p0_outputs = 4,
- .max_num_dp2p0_streams = 4,
- .imall_supported = 1,
- .max_flip_time_us = 80,
- .words_per_channel = 16,
-
- .subvp_fw_processing_delay_us = 15,
- .subvp_pstate_allow_width_us = 20,
- .subvp_swath_height_margin_lines = 16,
-
- .dcn_mrq_present = 1,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_zs = 64,
- .dcc_meta_buffer_size_bytes = 6272,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
-
- .dchub_arb_to_ret_delay = 102,
- .hostvm_mode = 1,
-};
-
static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *ip_caps, const struct dml2_core_ip_params *ip_params)
{
ip_caps->pipe_count = ip_params->max_num_dpp;
@@ -169,6 +91,7 @@ static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *i
ip_caps->meta_fifo_size_in_kentries = ip_params->meta_fifo_size_in_kentries;
ip_caps->compressed_buffer_segment_size_in_kbytes = ip_params->compressed_buffer_segment_size_in_kbytes;
ip_caps->max_flip_time_us = ip_params->max_flip_time_us;
+ ip_caps->max_flip_time_lines = ip_params->max_flip_time_lines;
ip_caps->hostvm_mode = ip_params->hostvm_mode;
// FIXME_STAGE2: cleanup after adding all dv override to ip_caps
@@ -192,6 +115,7 @@ static void patch_ip_params_with_ip_caps(struct dml2_core_ip_params *ip_params,
ip_params->meta_fifo_size_in_kentries = ip_caps->meta_fifo_size_in_kentries;
ip_params->compressed_buffer_segment_size_in_kbytes = ip_caps->compressed_buffer_segment_size_in_kbytes;
ip_params->max_flip_time_us = ip_caps->max_flip_time_us;
+ ip_params->max_flip_time_lines = ip_caps->max_flip_time_lines;
ip_params->hostvm_mode = ip_caps->hostvm_mode;
}
@@ -222,6 +146,7 @@ bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out)
}
memcpy(&core->clean_me_up.mode_lib.soc, in_out->soc_bb, sizeof(struct dml2_soc_bb));
+ memcpy(&core->clean_me_up.mode_lib.ip_caps, in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
return true;
}
@@ -246,10 +171,12 @@ static void create_phantom_plane_from_main_plane(struct dml2_plane_parameters *p
phantom->stream_index = phantom_stream_index;
phantom->overrides.refresh_from_mall = dml2_refresh_from_mall_mode_override_force_disable;
phantom->overrides.legacy_svp_config = dml2_svp_mode_override_phantom_pipe_no_data_return;
- phantom->composition.viewport.plane0.height = (long int unsigned) math_ceil2(
- (double)phantom->composition.viewport.plane0.height * (double)phantom_stream->timing.v_active / (double)main_stream->timing.v_active, 16.0);
- phantom->composition.viewport.plane1.height = (long int unsigned) math_ceil2(
- (double)phantom->composition.viewport.plane1.height * (double)phantom_stream->timing.v_active / (double)main_stream->timing.v_active, 16.0);
+ phantom->composition.viewport.plane0.height = (long int unsigned) math_min2(math_ceil2(
+ (double)main->composition.scaler_info.plane0.v_ratio * (double)phantom_stream->timing.v_active, 16.0),
+ (double)main->composition.viewport.plane0.height);
+ phantom->composition.viewport.plane1.height = (long int unsigned) math_min2(math_ceil2(
+ (double)main->composition.scaler_info.plane1.v_ratio * (double)phantom_stream->timing.v_active, 16.0),
+ (double)main->composition.viewport.plane1.height);
phantom->immediate_flip = false;
phantom->dynamic_meta_data.enable = false;
phantom->cursor.num_cursors = 0;
@@ -344,6 +271,8 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
// Check if FAMS2 is required
if (display_cfg->stage3.performed && display_cfg->stage3.success) {
programming->fams2_required = display_cfg->stage3.fams2_required;
+
+ dml2_core_calcs_get_global_fams2_programming(&core->clean_me_up.mode_lib, display_cfg, &programming->fams2_global_config);
}
// Only loop over all the main streams (the implicit svp streams will be packed as part of the main stream)
@@ -621,7 +550,7 @@ bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out
l->mode_programming_ex_params.min_clk_table = in_out->instance->minimum_clock_table;
l->mode_programming_ex_params.cfg_support_info = in_out->cfg_support_info;
l->mode_programming_ex_params.programming = in_out->programming;
- l->mode_programming_ex_params.min_clk_index = lookup_uclk_dpm_index_by_freq(in_out->programming->min_clocks.dcn4.active.uclk_khz,
+ l->mode_programming_ex_params.min_clk_index = lookup_uclk_dpm_index_by_freq(in_out->programming->min_clocks.dcn4x.active.uclk_khz,
&core->clean_me_up.mode_lib.soc);
result = dml2_core_calcs_mode_programming_ex(&l->mode_programming_ex_params);
@@ -641,20 +570,20 @@ bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out
for (plane_index = 0; plane_index < in_out->programming->display_config.num_planes; plane_index++) {
in_out->programming->plane_programming[plane_index].num_dpps_required = core->clean_me_up.mode_lib.mp.NoOfDPP[plane_index];
- if (in_out->programming->display_config.plane_descriptors->overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
- else if (in_out->programming->display_config.plane_descriptors->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
- else if (in_out->programming->display_config.plane_descriptors->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
- else {
- if (core->clean_me_up.mode_lib.mp.MaxActiveDRAMClockChangeLatencySupported[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vactive;
- else if (core->clean_me_up.mode_lib.mp.TWait[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vblank;
- else
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
- }
+ if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe)
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ else if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe)
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ else if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return)
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ else {
+ if (core->clean_me_up.mode_lib.mp.MaxActiveDRAMClockChangeLatencySupported[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vactive;
+ else if (core->clean_me_up.mode_lib.mp.TWait[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vblank;
+ else
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
+ }
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &in_out->programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
index 235280c6dcf5..e62b2d3eeee6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_CORE_DCN4_H__
#define __DML2_CORE_DCN4_H__
bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 6f4026e396e0..3ea54fd52e46 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -8,35 +8,55 @@
#include "dml2_debug.h"
#include "lib_float_math.h"
#include "dml_top_types.h"
-#include "dml2_core_shared.h"
-#define DML_VM_PTE_ADL_PATCH_EN
-//#define DML_TVM_UPDATE_EN
-#define DML_TDLUT_ROW_BYTES_FIX_EN
-#define DML_REG_LIMIT_CLAMP_EN
#define DML2_MAX_FMT_420_BUFFER_WIDTH 4096
#define DML_MAX_NUM_OF_SLICES_PER_DSC 4
-static void dml2_print_dml_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
+const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type)
+{
+ switch (bw_type) {
+ case (dml2_core_internal_bw_sdp):
+ return("dml2_core_internal_bw_sdp");
+ case (dml2_core_internal_bw_dram):
+ return("dml2_core_internal_bw_dram");
+ case (dml2_core_internal_bw_max):
+ return("dml2_core_internal_bw_max");
+ default:
+ return("dml2_core_internal_bw_unknown");
+ }
+}
+
+const char *dml2_core_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type)
+{
+ switch (dml2_core_internal_soc_state_type) {
+ case (dml2_core_internal_soc_state_sys_idle):
+ return("dml2_core_internal_soc_state_sys_idle");
+ case (dml2_core_internal_soc_state_sys_active):
+ return("dml2_core_internal_soc_state_sys_active");
+ case (dml2_core_internal_soc_state_svp_prefetch):
+ return("dml2_core_internal_soc_state_svp_prefetch");
+ case dml2_core_internal_soc_state_max:
+ default:
+ return("dml2_core_internal_soc_state_unknown");
+ }
+}
+
+static double dml2_core_div_rem(double dividend, unsigned int divisor, unsigned int *remainder)
+{
+ *remainder = ((dividend / divisor) - (int)(dividend / divisor) > 0);
+ return dividend / divisor;
+}
+
+static void dml2_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
{
dml2_printf("DML: ===================================== \n");
dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
- if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
- if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
- if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
- if (!fail_only || support->DSCOnlyIfNecessaryWithBPP == 1)
- dml2_printf("DML: support: DSCOnlyIfNecessaryWithBPP = %d\n", support->DSCOnlyIfNecessaryWithBPP);
- if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
- if (!fail_only || support->DSCSlicesODMModeSupported == 0)
- dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ if (!fail_only || support->ViewportSizeSupport == 0)
+ dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
@@ -45,74 +65,87 @@ static void dml2_print_dml_mode_support_info(const struct dml2_core_internal_mod
dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ if (!fail_only || support->ExceededMultistreamSlots == 1)
+ dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
if (!fail_only || support->NotEnoughLanesForMSO == 1)
dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
- if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
- if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
- if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
- if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
- if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
- if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
- if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
- if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
- if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
- if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
- if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
- if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
- if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ if (!fail_only || support->P2IWith420 == 1)
+ dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ if (!fail_only || support->DSC422NativeNotSupported == 1)
+ dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ if (!fail_only || support->DSCSlicesODMModeSupported == 0)
+ dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
if (!fail_only || support->NotEnoughDSCUnits == 1)
dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
if (!fail_only || support->NotEnoughDSCSlices == 1)
dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
- if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
+ dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
+ dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
+ dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
- if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+ if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
+ dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
if (!fail_only || support->ROBSupport == 0)
dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
if (!fail_only || support->OutstandingRequestsSupport == 0)
dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
- if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
- if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
- if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
+ dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ if (!fail_only || support->TotalAvailablePipesSupport == 0)
+ dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ if (!fail_only || support->NumberOfOTGSupport == 0)
+ dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
+ dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ if (!fail_only || support->NumberOfDP2p0Support == 0)
+ dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ if (!fail_only || support->EnoughWritebackUnits == 0)
+ dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
+ dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ if (!fail_only || support->WritebackLatencySupport == 0)
+ dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ if (!fail_only || support->CursorSupport == 0)
+ dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ if (!fail_only || support->PitchSupport == 0)
+ dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ if (!fail_only || support->ViewportExceedsSurface == 1)
+ dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
if (!fail_only || support->PrefetchSupported == 0)
dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
+ dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ if (!fail_only || support->AvgBandwidthSupport == 0)
+ dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
if (!fail_only || support->DynamicMetadataSupported == 0)
dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
- if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
- if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ if (!fail_only || support->PTEBufferSizeNotExceeded == 1)
+ dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 1)
+ dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ if (!fail_only || support->ExceededMALLSize == 1)
+ dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ if (!fail_only || support->g6_temp_read_support == 0)
+ dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ if (!fail_only || support->ImmediateFlipSupport == 0)
+ dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ if (!fail_only || support->LinkCapacitySupport == 0)
+ dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+
if (!fail_only || support->ModeSupport == 0)
dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
dml2_printf("DML: ===================================== \n");
}
@@ -235,6 +268,7 @@ dml_get_per_pipe_var_func(vstartup_calculated, unsigned int, mode_lib->mp.VStart
dml_get_per_pipe_var_func(vupdate_offset, unsigned int, mode_lib->mp.VUpdateOffsetPix);
dml_get_per_pipe_var_func(vupdate_width, unsigned int, mode_lib->mp.VUpdateWidthPix);
dml_get_per_pipe_var_func(vready_offset, unsigned int, mode_lib->mp.VReadyOffsetPix);
+dml_get_per_pipe_var_func(pstate_keepout_dst_lines, unsigned int, mode_lib->mp.pstate_keepout_dst_lines);
dml_get_per_pipe_var_func(det_stored_buffer_size_l_bytes, unsigned int, mode_lib->mp.DETBufferSizeY);
dml_get_per_pipe_var_func(det_stored_buffer_size_c_bytes, unsigned int, mode_lib->mp.DETBufferSizeC);
dml_get_per_pipe_var_func(det_buffer_size_kbytes, unsigned int, mode_lib->mp.DETBufferSizeInKByte);
@@ -480,7 +514,7 @@ static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode
default:
DML2_ASSERT(0);
return 256;
- };
+ }
}
static bool dml_is_vertical_rotation(enum dml2_rotation_angle Scan)
@@ -2051,7 +2085,11 @@ static void CalculateDCCConfiguration(
unsigned int full_swath_bytes_vert_wc_l;
unsigned int full_swath_bytes_vert_wc_c;
- yuv420 = dml_is_420(SourcePixelFormat);
+ if (dml_is_420(SourcePixelFormat))
+ yuv420 = 1;
+ else
+ yuv420 = 0;
+
horz_div_l = 1;
horz_div_c = 1;
vert_div_l = 1;
@@ -2343,16 +2381,16 @@ static void calculate_mcache_row_bytes(
}
if (p->gpuvm_enable) {
- meta_per_mvmpg_per_channel = (float)vmpg_bytes / 256 / p->num_chans;
+ meta_per_mvmpg_per_channel = (float)vmpg_bytes / (float)256 / p->num_chans;
//but using the est_blk_per_vmpg between 2 and 4, to be not as pessimestic
if (p->surf_vert && vmpg_bytes > blk_bytes) {
- meta_per_mvmpg_per_channel = (float)est_blk_per_vmpg * blk_bytes / 256 / p->num_chans;
+ meta_per_mvmpg_per_channel = (float)est_blk_per_vmpg * blk_bytes / (float)256 / p->num_chans;
}
*p->dcc_dram_bw_nom_overhead_factor = 1 + math_max2(1.0 / 256.0, math_ceil2(meta_per_mvmpg_per_channel, p->mem_word_bytes) / (256 * meta_per_mvmpg_per_channel)); // dcc_dr_oh_nom
} else {
- meta_per_mvmpg_per_channel = (float) blk_bytes / 256 / p->num_chans;
+ meta_per_mvmpg_per_channel = (float) blk_bytes / (float)256 / p->num_chans;
if (!p->surf_vert)
*p->dcc_dram_bw_nom_overhead_factor = 1 + 1.0 / 256.0;
@@ -2519,8 +2557,11 @@ static void calculate_mcache_setting(
l->luma_time_factor = (double)l->mvmpg_width_c / l->mvmpg_width_l * 2;
// The algorithm starts with computing a non-integer, avg_mcache_element_size_l/c:
- l->avg_mcache_element_size_l = l->meta_row_width_l / *p->num_mcaches_l;
- if (l->is_dual_plane) {
+ if (*p->num_mcaches_l) {
+ l->avg_mcache_element_size_l = l->meta_row_width_l / *p->num_mcaches_l;
+ }
+
+ if (l->is_dual_plane && *p->num_mcaches_c) {
l->avg_mcache_element_size_c = l->meta_row_width_c / *p->num_mcaches_c;
if (!p->imall_enable || (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c)) {
@@ -2649,9 +2690,9 @@ static double dml_get_return_bandwidth_available(
double ideal_fabric_bandwidth = fclk_mhz * (double)soc->fabric_datapath_to_dcn_data_return_bytes;
double ideal_dram_bandwidth = dram_bw_mbps; //dram_speed_mts * soc->clk_table.dram_config.channel_count * soc->clk_table.dram_config.channel_width_bytes;
- double derate_sdp_factor = 1;
- double derate_fabric_factor = 1;
- double derate_dram_factor = 1;
+ double derate_sdp_factor;
+ double derate_fabric_factor;
+ double derate_dram_factor;
double derate_sdp_bandwidth;
double derate_fabric_bandwidth;
@@ -2851,16 +2892,9 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
s->HostVMDynamicLevels = CalculateHostVMDynamicLevels(p->display_cfg->gpuvm_enable, p->display_cfg->hostvm_enable, p->HostVMMinPageSize, p->display_cfg->hostvm_max_non_cached_page_table_levels);
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->hostvm_enable == true) {
+ if (p->display_cfg->gpuvm_enable == true) {
p->vm_group_bytes[k] = 512;
p->dpte_group_bytes[k] = 512;
- } else if (p->display_cfg->gpuvm_enable == true) {
- p->vm_group_bytes[k] = 2048;
- if (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes >= 64 && dml_is_vertical_rotation(p->myPipe[k].RotationAngle)) {
- p->dpte_group_bytes[k] = 512;
- } else {
- p->dpte_group_bytes[k] = 2048;
- }
} else {
p->vm_group_bytes[k] = 0;
p->dpte_group_bytes[k] = 0;
@@ -3185,7 +3219,7 @@ static double CalculateUrgentLatency(
double fabric_max_transport_latency_margin)
{
double urgent_latency = 0;
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
urgent_latency = (df_qos_response_time_fclk_cycles + mall_overhead_fclk_cycles) / FabricClock
+ max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1 + fabric_max_transport_latency_margin / 100.0)
+ urgent_ramp_uclk_cycles / uclk_freq_mhz * (1 + umc_urgent_ramp_latency_margin / 100.0);
@@ -3196,7 +3230,7 @@ static double CalculateUrgentLatency(
}
}
#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
@@ -3226,7 +3260,7 @@ static double CalculateTripToMemory(
double fabric_max_transport_latency_margin)
{
double trip_to_memory_us;
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
trip_to_memory_us = mall_overhead_fclk_cycles / FabricClock
+ max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0)
+ trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0);
@@ -3235,7 +3269,7 @@ static double CalculateTripToMemory(
}
#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
@@ -3265,7 +3299,7 @@ static double CalculateMetaTripToMemory(
double fabric_max_transport_latency_margin)
{
double meta_trip_to_memory_us;
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
meta_trip_to_memory_us = meta_trip_to_memory_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0)
+ meta_trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0);
} else {
@@ -3273,7 +3307,7 @@ static double CalculateMetaTripToMemory(
}
#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
@@ -3781,8 +3815,8 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2;
RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2;
- p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
- p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
+ p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
+ p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
}
if (p->SwathHeightC[k] == 0)
@@ -3841,7 +3875,7 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
*p->compbuf_reserved_space_64b = 2 * p->pixel_chunk_size_kbytes * 1024 / 64;
if (*p->UnboundedRequestEnabled) {
*p->compbuf_reserved_space_64b = (unsigned int)math_ceil2(math_max2(*p->compbuf_reserved_space_64b,
- (double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / 64)), 1.0);
+ (double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / (p->mrq_present ? MAXIMUMCOMPRESSION : 1) / 64)), 1.0);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
dml2_printf("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
@@ -3852,21 +3886,20 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
#endif
*p->hw_debug5 = false;
- if (!p->mrq_present) {
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!(*p->UnboundedRequestEnabled)
- && p->display_cfg->plane_descriptors[k].surface.dcc.enable
- && ((p->rob_buffer_size_kbytes * 1024 + *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (RoundedUpSwathSizeBytesY[k] + RoundedUpSwathSizeBytesC[k])))
- *p->hw_debug5 = true;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
- dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
- dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
+ for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
+ if (!(p->mrq_present) && (!(*p->UnboundedRequestEnabled)) && (TotalActiveDPP == 1)
+ && p->display_cfg->plane_descriptors[k].surface.dcc.enable
+ && ((p->rob_buffer_size_kbytes * 1024 * (p->mrq_present ? MAXIMUMCOMPRESSION : 1)
+ + *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (RoundedUpSwathSizeBytesY[k] + RoundedUpSwathSizeBytesC[k])))
+ *p->hw_debug5 = true;
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
+ dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
+ dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
+ dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
+ dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
+ dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
#endif
- }
}
}
@@ -4559,15 +4592,6 @@ static void calculate_tdlut_setting(
return;
}
-
- if (!p->setup_for_tdlut) {
- *p->tdlut_groups_per_2row_ub = 0;
- *p->tdlut_opt_time = 0;
- *p->tdlut_drain_time = 0;
- *p->tdlut_bytes_per_group = 0;
- return;
- }
-
if (p->tdlut_mpc_width_flag) {
tdlut_mpc_width = 33;
tdlut_bytes_per_group_simple = 39*256;
@@ -4616,7 +4640,7 @@ static void calculate_tdlut_setting(
*p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width;
//the delivery cycles is DispClk cycles per line * number of lines * number of slices
tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
- tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / 9.0;
+ tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1);
} else {
//tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements
*p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256);
@@ -4627,7 +4651,7 @@ static void calculate_tdlut_setting(
//the tdlut is fetched during the 2 row times of prefetch.
if (p->setup_for_tdlut) {
- *p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2(*p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
+ *p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
*p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
*p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
}
@@ -4640,7 +4664,7 @@ static void calculate_tdlut_setting(
dml2_printf("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
dml2_printf("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
- dml2_printf("DML::%s: tdlut_addressing_mode = %u\n", __func__, p->tdlut_addressing_mode);
+ dml2_printf("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear");
dml2_printf("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
dml2_printf("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
dml2_printf("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
@@ -4706,11 +4730,12 @@ static void CalculateTarb(
static double CalculateTWait(
long reserved_vblank_time_ns,
double UrgentLatency,
- double Ttrip)
+ double Ttrip,
+ double g6_temp_read_blackout_us)
{
double TWait;
double t_urg_trip = math_max2(UrgentLatency, Ttrip);
- TWait = reserved_vblank_time_ns/1000.0 + t_urg_trip;
+ TWait = math_max2(reserved_vblank_time_ns/1000.0, g6_temp_read_blackout_us) + t_urg_trip;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: reserved_vblank_time_ns = %d\n", __func__, reserved_vblank_time_ns);
@@ -4858,13 +4883,23 @@ static double get_urgent_bandwidth_required(
}
if (!exclude_this_plane) {
- surface_required_bw[k] = math_max4(NumberOfDPP[k] * prefetch_vmrow_bw[k],
- l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur,
- l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre,
- (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k]);
+ l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k];
+ l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur;
+ l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
+ l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k];
+ surface_required_bw[k] = math_max4(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw);
/* export peak required bandwidth for the surface */
surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]);
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw);
+ dml2_printf("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw);
+ dml2_printf("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw);
+ dml2_printf("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw);
+ dml2_printf("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]);
+ dml2_printf("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]);
+#endif
} else {
surface_required_bw[k] = 0.0;
}
@@ -4873,6 +4908,8 @@ static double get_urgent_bandwidth_required(
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
+ dml2_printf("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw);
+ dml2_printf("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
dml2_printf("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
dml2_printf("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
dml2_printf("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
@@ -4886,6 +4923,8 @@ static double get_urgent_bandwidth_required(
dml2_printf("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
dml2_printf("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
dml2_printf("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
+ dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]);
+ dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]);
dml2_printf("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
dml2_printf("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
@@ -4964,7 +5003,7 @@ static void CalculateExtraLatency(
max_request_size_bytes = request_size_bytes_chroma[k];
}
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
*ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK;
*ExtraLatency = *ExtraLatency_sr;
if (max_oustanding_when_urgent_expected)
@@ -4980,11 +5019,14 @@ static void CalculateExtraLatency(
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type);
+ dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
+ dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips);
dml2_printf("DML::%s: max_oustanding_when_urgent_expected=%u\n", __func__, max_oustanding_when_urgent_expected);
dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock);
dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
+ dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb);
dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
@@ -5021,6 +5063,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->trip_to_mem = 0.0;
*p->Tvm_trips = 0.0;
*p->Tr0_trips = 0.0;
+ s->Tvm_no_trip_oto = 0.0;
+ s->Tr0_no_trip_oto = 0.0;
s->Tvm_trips_rounded = 0.0;
s->Tr0_trips_rounded = 0.0;
s->max_Tsw = 0.0;
@@ -5037,7 +5081,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->bytes_pp = 0.0;
s->dep_bytes = 0.0;
s->min_Lsw_oto = 0.0;
+ s->min_Lsw_equ = 0.0;
s->Tsw_est1 = 0.0;
+ s->Tsw_est2 = 0.0;
s->Tsw_est3 = 0.0;
s->cursor_prefetch_bytes = 0;
*p->prefetch_cursor_bw = 0;
@@ -5059,7 +5105,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
dml2_printf("DML::%s: VStartup = %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: MaxVStartup = %u\n", __func__, p->MaxVStartup);
dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
@@ -5092,21 +5137,15 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->LineTime = p->myPipe->HTotal / p->myPipe->PixelClock;
s->trip_to_mem = p->Ttrip;
-#ifdef DML_TVM_UPDATE_EN
*p->Tvm_trips = p->ExtraLatencyPrefetch + math_max2(s->trip_to_mem * (p->display_cfg->gpuvm_max_page_table_levels * (s->HostVMDynamicLevelsTrips + 1)), p->Turg);
if (dcc_mrq_enable)
*p->Tvm_trips_flip = *p->Tvm_trips;
else
*p->Tvm_trips_flip = *p->Tvm_trips - s->trip_to_mem;
-#else
- *p->Tvm_trips = p->ExtraLatencyPrefetch + s->trip_to_mem * (p->display_cfg->gpuvm_max_page_table_levels * (s->HostVMDynamicLevelsTrips + 1));
- *p->Tvm_trips_flip = *p->Tvm_trips - s->trip_to_mem;
-#endif
*p->Tr0_trips_flip = s->trip_to_mem * (s->HostVMDynamicLevelsTrips + 1);
*p->Tr0_trips = math_max2(*p->Tr0_trips_flip, p->tdlut_opt_time / 2);
-#ifdef DML_TVM_UPDATE_EN
if (p->DynamicMetadataVMEnabled == true) {
*p->Tdmdl_vm = s->TWait_p + *p->Tvm_trips;
*p->Tdmdl = *p->Tdmdl_vm + p->Ttrip;
@@ -5114,15 +5153,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->Tdmdl_vm = 0;
*p->Tdmdl = s->TWait_p + p->ExtraLatencyPrefetch + p->Ttrip; // Tex
}
-#else
- if (p->DynamicMetadataVMEnabled == true) {
- *p->Tdmdl_vm = s->TWait_p + *p->Tvm_trips;
- *p->Tdmdl = *p->Tdmdl_vm + p->Ttrip;
- } else {
- *p->Tdmdl_vm = 0;
- *p->Tdmdl = p->TWait + p->ExtraLatencyPrefetch; // Tex
- }
-#endif
if (p->DynamicMetadataEnable == true) {
if (p->VStartup * s->LineTime < *p->TSetup + *p->Tdmdl + s->Tdmbf + s->Tdmec + s->Tdmsks) {
@@ -5186,7 +5216,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
#endif
- s->NoTimeToPrefetch = false;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
@@ -5199,14 +5228,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->Tvm_trips_rounded = math_ceil2(4.0 * *p->Tvm_trips / s->LineTime, 1.0) / 4.0 * s->LineTime;
*p->Tvm_trips_flip_rounded = math_ceil2(4.0 * *p->Tvm_trips_flip / s->LineTime, 1.0) / 4.0 * s->LineTime;
} else {
-#ifdef DML_TVM_UPDATE_EN
if (p->DynamicMetadataEnable || dcc_mrq_enable || p->setup_for_tdlut)
s->Tvm_trips_rounded = math_max2(s->LineTime * math_ceil2(4.0*math_max3(p->ExtraLatencyPrefetch, p->Turg, s->trip_to_mem)/s->LineTime, 1)/4, s->LineTime/4.0);
else
- s->Tvm_trips_rounded = s->LineTime / 4.0;
-#else
- s->Tvm_trips_rounded = s->LineTime / 4.0;
-#endif
+ s->Tvm_trips_rounded = s->LineTime / 4.0;
*p->Tvm_trips_flip_rounded = s->LineTime / 4.0;
}
@@ -5235,16 +5260,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->Tno_bw = 0;
}
-#ifdef DML_TVM_UPDATE_EN
if (p->mrq_present || p->display_cfg->gpuvm_max_page_table_levels >= 3)
*p->Tno_bw_flip = *p->Tno_bw;
else
*p->Tno_bw_flip = 0; //because there is no 3DLUT for iFlip
-#else
- *p->Tno_bw_flip = 0;
- if (p->display_cfg->gpuvm_enable == true)
- *p->Tno_bw_flip = *p->Tno_bw;
-#endif
if (dml_is_420(p->myPipe->SourcePixelFormat)) {
s->bytes_pp = p->myPipe->BytePerPixelY + p->myPipe->BytePerPixelC / 4.0;
@@ -5258,64 +5277,63 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->max_Tsw = (math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) * s->LineTime);
s->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC;
-#ifdef DML_TDLUT_ROW_BYTES_FIX_EN
s->prefetch_bw_pr = s->prefetch_bw_pr * p->mall_prefetch_sdp_overhead_factor;
s->prefetch_sw_bytes = s->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor;
-#endif
s->prefetch_bw_oto = math_max2(s->prefetch_bw_pr, s->prefetch_sw_bytes / s->max_Tsw);
s->min_Lsw_oto = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_OTO__;
s->min_Lsw_oto = math_max2(s->min_Lsw_oto, 2.0);
s->min_Lsw_oto = math_max2(s->min_Lsw_oto, p->tdlut_drain_time / s->LineTime);
+ s->min_Lsw_equ = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_EQU__;
+ s->min_Lsw_equ = math_max2(s->min_Lsw_equ, 2.0);
+ s->min_Lsw_equ = math_max2(s->min_Lsw_equ, p->tdlut_drain_time / s->LineTime);
+
vm_bytes = p->vm_bytes; // vm_bytes is dpde0_bytes_per_frame_ub_l + dpde0_bytes_per_frame_ub_c + 2*extra_dpde_bytes;
extra_tdpe_bytes = (unsigned int)math_max2(0, (p->display_cfg->gpuvm_max_page_table_levels - 1) * 128);
if (p->setup_for_tdlut)
vm_bytes = vm_bytes + p->tdlut_pte_bytes_per_frame + (p->display_cfg->gpuvm_enable ? extra_tdpe_bytes : 0);
-#ifdef DML_TDLUT_ROW_BYTES_FIX_EN
tdlut_row_bytes = (unsigned long) math_ceil2(p->tdlut_bytes_per_frame/2.0, 1.0);
-#else
- tdlut_row_bytes = p->tdlut_pte_bytes_per_frame;
-#endif
-#ifdef DML_REG_LIMIT_CLAMP_EN
s->prefetch_bw_oto = math_max3(s->prefetch_bw_oto,
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
-#endif
s->Lsw_oto = math_ceil2(4.0 * math_max2(s->prefetch_sw_bytes / s->prefetch_bw_oto / s->LineTime, s->min_Lsw_oto), 1.0) / 4.0;
if (p->display_cfg->gpuvm_enable == true) {
- s->Tvm_oto = math_max3(
- *p->Tvm_trips,
+ s->Tvm_no_trip_oto = math_max2(
*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto,
s->LineTime / 4.0);
+ s->Tvm_oto = math_max2(
+ *p->Tvm_trips,
+ s->Tvm_no_trip_oto);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
#endif
} else {
-#ifdef DML_TVM_UPDATE_EN
+ s->Tvm_no_trip_oto = s->Tvm_trips_rounded;
s->Tvm_oto = s->Tvm_trips_rounded;
-#else
- s->Tvm_oto = s->LineTime / 4.0;
-#endif
}
if ((p->display_cfg->gpuvm_enable == true || p->setup_for_tdlut || dcc_mrq_enable)) {
- s->Tr0_oto = math_max3(
- *p->Tr0_trips,
+ s->Tr0_no_trip_oto = math_max2(
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto,
s->LineTime / 4.0);
+ s->Tr0_oto = math_max2(
+ *p->Tr0_trips,
+ s->Tr0_no_trip_oto);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
#endif
- } else
- s->Tr0_oto = (s->LineTime - s->Tvm_oto) / 4.0;
+ } else {
+ s->Tr0_no_trip_oto = (s->LineTime - s->Tvm_oto) / 4.0;
+ s->Tr0_oto = s->Tr0_no_trip_oto;
+ }
s->Tvm_oto_lines = math_ceil2(4.0 * s->Tvm_oto / s->LineTime, 1) / 4.0;
s->Tr0_oto_lines = math_ceil2(4.0 * s->Tr0_oto / s->LineTime, 1) / 4.0;
@@ -5325,19 +5343,16 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
Lo = (unsigned int)(*p->DSTYAfterScaler + (double)*p->DSTXAfterScaler / (double)p->myPipe->HTotal);
//Tpre_equ in line time
-#ifdef DML_TVM_UPDATE_EN
if (p->DynamicMetadataVMEnabled && p->DynamicMetadataEnable)
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(p->TCalc, *p->Tvm_trips) + s->TWait_p) / s->LineTime - Lo;
else
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(p->TCalc, p->ExtraLatencyPrefetch) + s->TWait_p) / s->LineTime - Lo;
-#else
- s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(s->TWait_p + p->TCalc, *p->Tdmdl - p->Ttrip)) / s->LineTime - Lo;
-#endif
s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
+ dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
@@ -5375,6 +5390,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0;
s->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
+#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
dml2_printf("DML::%s: LineTime: %f\n", __func__, s->LineTime);
dml2_printf("DML::%s: VStartup: %u\n", __func__, p->VStartup);
@@ -5395,18 +5411,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
dml2_printf("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
dml2_printf("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
-
- s->dep_bytes = math_max2(vm_bytes * p->HostVMInefficiencyFactor, p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes);
-
- dml2_printf("DML::%s: dep_bytes: %f\n", __func__, s->dep_bytes);
- dml2_printf("DML::%s: prefetch_sw_bytes: %f\n", __func__, s->prefetch_sw_bytes);
dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
-
- if (s->prefetch_sw_bytes < s->dep_bytes) {
- s->prefetch_sw_bytes = 2 * s->dep_bytes;
- dml2_printf("DML::%s: bump prefetch_sw_bytes to %f\n", __func__, s->prefetch_sw_bytes);
- }
+ dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
+ dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, s->Tpre_rounded, (s->Tpre_rounded - Tpre));
+ dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
+#endif
*p->dst_y_per_vm_vblank = 0;
*p->dst_y_per_row_vblank = 0;
@@ -5419,7 +5429,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
// Tvm_trips_rounded is Tvm_trips ceiling to 1/4 line time
// Tr0_trips_rounded is Tr0_trips ceiling to 1/4 line time
// So that means prefetch bw calculated can be higher since the total time availabe for prefetch is less
- if (s->dst_y_prefetch_equ > 1) {
+ bool min_Lsw_equ_ok = s->Tpre_rounded >= s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded + s->min_Lsw_equ*s->LineTime;
+
+ if (s->dst_y_prefetch_equ > 1 && min_Lsw_equ_ok) {
s->prefetch_bw1 = 0.;
s->prefetch_bw2 = 0.;
s->prefetch_bw3 = 0.;
@@ -5436,28 +5448,35 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw1 = 0;
dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
- if ((p->VStartup == p->MaxVStartup) && (s->Tsw_est1 / s->LineTime < s->min_Lsw_oto) && (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
+ if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) /
- (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
+ (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, s->Tpre_rounded);
- dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_oto * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
- dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
+ dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
+ dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
+ dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
#endif
}
// prefetch_bw2: VM + SW
- if (s->Tpre_rounded - *p->Tno_bw - 2 * s->Tr0_trips_rounded > 0)
+ if (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded > 0) {
s->prefetch_bw2 = (vm_bytes * p->HostVMInefficiencyFactor + s->prefetch_sw_bytes) /
- (s->Tpre_rounded - *p->Tno_bw - 2 * s->Tr0_trips_rounded);
- else
+ (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded);
+ s->Tsw_est2 = s->prefetch_sw_bytes / s->prefetch_bw2;
+ } else
s->prefetch_bw2 = 0;
+ dml2_printf("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
+ if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) {
+ s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime);
+ dml2_printf("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
+ }
+
// prefetch_bw3: 2*R0 + SW
if (s->Tpre_rounded - s->Tvm_trips_rounded > 0) {
s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) + s->prefetch_sw_bytes) /
@@ -5467,8 +5486,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw3 = 0;
dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
- if (p->VStartup == p->MaxVStartup && (s->Tsw_est3 / s->LineTime < s->min_Lsw_oto) && ((s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
- s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
+ if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
+ s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
}
@@ -5484,6 +5503,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
dml2_printf("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
+ dml2_printf("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2);
dml2_printf("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
dml2_printf("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
dml2_printf("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
@@ -5504,9 +5524,18 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
// here is to make sure equ bw wont be more agressive than the latency-based requirement.
// check vm time >= vm_trips
// check r0 time >= r0_trips
+
+ double total_row_bytes = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes);
+
+ dml2_printf("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded);
+ dml2_printf("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded);
+
if (s->prefetch_bw1 > 0) {
- if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1 >= s->Tvm_trips_rounded &&
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw1 >= s->Tr0_trips_rounded) {
+ double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1;
+ double row_transfer_time = total_row_bytes / s->prefetch_bw1;
+ dml2_printf("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ dml2_printf("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time);
+ if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case1OK = true;
}
}
@@ -5516,8 +5545,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
// check vm time >= vm_trips
// check r0 time < r0_trips
if (s->prefetch_bw2 > 0) {
- if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2 >= s->Tvm_trips_rounded &&
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw2 < s->Tr0_trips_rounded) {
+ double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2;
+ double row_transfer_time = total_row_bytes / s->prefetch_bw2;
+ dml2_printf("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ dml2_printf("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time);
+ if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time < s->Tr0_trips_rounded) {
Case2OK = true;
}
}
@@ -5526,8 +5558,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
// check vm time < vm_trips
// check r0 time >= r0_trips
if (s->prefetch_bw3 > 0) {
- if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3 < s->Tvm_trips_rounded &&
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw3 >= s->Tr0_trips_rounded) {
+ double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3;
+ double row_transfer_time = total_row_bytes / s->prefetch_bw3;
+ dml2_printf("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ dml2_printf("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time);
+ if (vm_transfer_time < s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case3OK = true;
}
}
@@ -5542,11 +5577,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw_equ = s->prefetch_bw4;
}
-#ifdef DML_REG_LIMIT_CLAMP_EN
s->prefetch_bw_equ = math_max3(s->prefetch_bw_equ,
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
-#endif
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Case1OK: %u\n", __func__, Case1OK);
dml2_printf("DML::%s: Case2OK: %u\n", __func__, Case2OK);
@@ -5578,6 +5611,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
#endif
+ // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
+ s->Lsw_equ = s->dst_y_prefetch_equ - math_ceil2(4.0 * (s->Tvm_equ + 2 * s->Tr0_equ) / s->LineTime, 1.0) / 4.0;
+
// Use the more stressful prefetch schedule
if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) {
*p->dst_y_prefetch = s->dst_y_prefetch_oto;
@@ -5586,29 +5622,28 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+ s->dst_y_per_vm_no_trip_vblank = math_ceil2(4.0 * s->Tvm_no_trip_oto / s->LineTime, 1.0) / 4.0;
+ s->dst_y_per_row_no_trip_vblank = math_ceil2(4.0 * s->Tr0_no_trip_oto / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Using oto scheduling for prefetch\n", __func__);
#endif
-
} else {
*p->dst_y_prefetch = s->dst_y_prefetch_equ;
s->TimeForFetchingVM = s->Tvm_equ;
s->TimeForFetchingRowInVBlank = s->Tr0_equ;
- if (p->VStartup == p->MaxVStartup) {
- *p->dst_y_per_vm_vblank = math_floor2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_floor2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- } else {
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- }
+ *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+ s->dst_y_per_vm_no_trip_vblank = *p->dst_y_per_vm_vblank;
+ s->dst_y_per_row_no_trip_vblank = *p->dst_y_per_row_vblank;
+
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
#endif
}
- // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
- s->LinesToRequestPrefetchPixelData = *p->dst_y_prefetch - *p->dst_y_per_vm_vblank - 2 * *p->dst_y_per_row_vblank; // Lsw
+ /* take worst case Lsw to calculate bandwidth requirement regardless of schedule */
+ s->LinesToRequestPrefetchPixelData = math_min2(s->Lsw_equ, s->Lsw_oto); // Lsw
s->cursor_prefetch_bytes = (unsigned int)math_max2(p->cursor_bytes_per_chunk, 4 * p->cursor_bytes_per_line);
*p->prefetch_cursor_bw = p->num_cursors * s->cursor_prefetch_bytes / (s->LinesToRequestPrefetchPixelData * s->LineTime);
@@ -5645,7 +5680,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
(double)p->MaxNumSwathY * p->SwathHeightY / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillY - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
+ dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
*p->VRatioPrefetchY = 0;
}
#ifdef __DML_VBA_DEBUG__
@@ -5668,7 +5703,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, (double)p->MaxNumSwathC * p->SwathHeightC / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillC - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
+ dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
*p->VRatioPrefetchC = 0;
}
#ifdef __DML_VBA_DEBUG__
@@ -5690,14 +5725,13 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
#endif
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
- dml2_printf("DML::%s: MyErr set, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
+ dml2_printf("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
+ dml2_printf("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
*p->VRatioPrefetchY = 0;
*p->VRatioPrefetchC = 0;
*p->RequiredPrefetchPixelDataBWLuma = 0;
*p->RequiredPrefetchPixelDataBWChroma = 0;
}
-
dml2_printf("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
dml2_printf("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
dml2_printf("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
@@ -5708,7 +5742,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
} else {
- dml2_printf("DML::%s: MyErr set, dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
+ dml2_printf("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
+ dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
+ __func__, min_Lsw_equ_ok, s->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime);
s->NoTimeToPrefetch = true;
s->TimeForFetchingVM = 0;
s->TimeForFetchingRowInVBlank = 0;
@@ -5727,26 +5763,26 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (vm_bytes == 0) {
prefetch_vm_bw = 0;
- } else if (*p->dst_y_per_vm_vblank > 0) {
+ } else if (s->dst_y_per_vm_no_trip_vblank > 0) {
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
#endif
- prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime);
+ prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (s->dst_y_per_vm_no_trip_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
} else {
prefetch_vm_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
+ dml2_printf("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
}
if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) {
prefetch_row_bw = 0;
- } else if (*p->dst_y_per_row_vblank > 0) {
- prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime);
+ } else if (s->dst_y_per_row_no_trip_vblank > 0) {
+ prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (s->dst_y_per_row_no_trip_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
@@ -5756,7 +5792,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else {
prefetch_row_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
+ dml2_printf("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
}
*p->prefetch_vmrow_bw = math_max2(prefetch_vm_bw, prefetch_row_bw);
@@ -5773,11 +5809,16 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->VRatioPrefetchC = 0;
*p->RequiredPrefetchPixelDataBWLuma = 0;
*p->RequiredPrefetchPixelDataBWChroma = 0;
+ *p->prefetch_vmrow_bw = 0;
}
dml2_printf("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
dml2_printf("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
+ dml2_printf("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw);
+ dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
+ dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
dml2_printf("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
+
return s->NoTimeToPrefetch;
}
@@ -6169,6 +6210,7 @@ static void CalculateFlipSchedule(
unsigned int dpte_row_height_chroma,
bool use_one_row_for_frame_flip,
unsigned int max_flip_time_us,
+ unsigned int max_flip_time_lines,
unsigned int per_pipe_flip_bytes,
unsigned int meta_row_bytes,
unsigned int meta_row_height,
@@ -6183,12 +6225,13 @@ static void CalculateFlipSchedule(
{
struct dml2_core_shared_CalculateFlipSchedule_locals *l = &s->CalculateFlipSchedule_locals;
- l->dual_plane = dml2_core_shared_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha;
+ l->dual_plane = dml_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha;
l->dpte_row_bytes = DPTEBytesPerRow;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
dml2_printf("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
+ dml2_printf("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines);
dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
dml2_printf("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
@@ -6239,7 +6282,8 @@ static void CalculateFlipSchedule(
if (use_lb_flip_bw) {
// For mode check, calculation the flip bw requirement with worst case flip time
- l->max_flip_time = math_min2(l->min_row_time, math_max2(Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded, (double)max_flip_time_us));
+ l->max_flip_time = math_min2(math_min2(l->min_row_time, (double)max_flip_time_lines * LineTime / VRatio),
+ math_max2(Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded, (double)max_flip_time_us));
//The lower bound on flip bandwidth
// Note: The get_urgent_bandwidth_required already consider dpte_row_bw and meta_row_bw in bandwidth calculation, so leave final_flip_bw = 0 if iflip not required
@@ -6257,7 +6301,7 @@ static void CalculateFlipSchedule(
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
- dml2_printf("DML::%s: total row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_row_bytes);
+ dml2_printf("DML::%s: total row bytes (%d row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes);
dml2_printf("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
dml2_printf("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
dml2_printf("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
@@ -6268,6 +6312,7 @@ static void CalculateFlipSchedule(
dml2_printf("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
dml2_printf("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
dml2_printf("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
+ dml2_printf("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded));
}
#endif
l->lb_flip_bw = math_max3(l->lb_flip_bw,
@@ -6284,7 +6329,7 @@ static void CalculateFlipSchedule(
*dst_y_per_vm_flip = 1; // not used
*dst_y_per_row_flip = 1; // not used
- *ImmediateFlipSupportedForPipe = true;
+ *ImmediateFlipSupportedForPipe = l->min_row_time >= (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded);
} else {
if (iflip_enable) {
l->ImmediateFlipBW = (double)per_pipe_flip_bytes * BandwidthAvailableForImmediateFlip / (double)TotImmediateFlipBytes; // flip_bw(i)
@@ -6350,6 +6395,7 @@ static void CalculateFlipSchedule(
dml2_printf("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
dml2_printf("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
dml2_printf("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
+ dml2_printf("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time);
}
dml2_printf("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
dml2_printf("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
@@ -6380,6 +6426,12 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->StutterEnterPlusExitWatermark = p->mmSOCParameters.SREnterPlusExitTime + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
p->Watermark->Z8StutterExitWatermark = p->mmSOCParameters.SRExitZ8Time + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
p->Watermark->Z8StutterEnterPlusExitWatermark = p->mmSOCParameters.SREnterPlusExitZ8Time + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
+ if (p->mmSOCParameters.qos_type == dml2_qos_param_type_dcn4x) {
+ p->Watermark->StutterExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
+ p->Watermark->StutterEnterPlusExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
+ p->Watermark->Z8StutterExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
+ p->Watermark->Z8StutterEnterPlusExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
+ }
p->Watermark->g6_temp_read_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
@@ -6541,7 +6593,8 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_unsupported;
if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_auto) {
- if (s->ActiveDRAMClockChangeLatencyMargin[k] > 0 && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
+ if (p->display_cfg->overrides.all_streams_blanked ||
+ (s->ActiveDRAMClockChangeLatencyMargin[k] > 0 && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency))
p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank_and_vactive;
else if (s->ActiveDRAMClockChangeLatencyMargin[k] > 0)
p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vactive;
@@ -6585,13 +6638,13 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->src_y_ahead_c = (unsigned int)(math_floor2(p->DETBufferSizeC[k] / p->BytePerPixelDETC[k] / p->SwathWidthC[k], p->SwathHeightC[k]) + s->LBLatencyHidingSourceLinesC[k]);
s->sub_vp_lines_c = s->src_y_pstate_c + s->src_y_ahead_c + p->meta_row_height_c[k];
- if (dml2_core_shared_is_420(p->display_cfg->plane_descriptors[k].pixel_format))
+ if (dml_is_420(p->display_cfg->plane_descriptors[k].pixel_format))
p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, 2 * s->sub_vp_lines_c));
else
p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, s->sub_vp_lines_c));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, p->meta_row_height_c[k]);
+ dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]);
dml2_printf("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
dml2_printf("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
dml2_printf("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
@@ -6856,7 +6909,8 @@ struct dml2_core_internal_g6_temp_read_blackouts_table {
} entries[DML_MAX_CLK_TABLE_SIZE];
};
-struct dml2_core_internal_g6_temp_read_blackouts_table core_dcn4_g6_temp_read_blackout_table = {
+static const struct dml2_core_internal_g6_temp_read_blackouts_table
+ core_dcn4_g6_temp_read_blackout_table = {
.entries = {
{
.uclk_khz = 96000,
@@ -6921,6 +6975,43 @@ static double get_g6_temp_read_blackout_us(
return (double)blackout_us;
}
+static double get_max_urgent_latency_us(
+ struct dml2_dcn4x_soc_qos_params *dcn4x,
+ double uclk_freq_mhz,
+ double FabricClock,
+ unsigned int min_clk_index)
+{
+ double latency;
+ latency = dcn4x->per_uclk_dpm_params[min_clk_index].maximum_latency_when_urgent_uclk_cycles / uclk_freq_mhz
+ * (1 + dcn4x->umc_max_latency_margin / 100.0)
+ + dcn4x->mall_overhead_fclk_cycles / FabricClock
+ + dcn4x->max_round_trip_to_furthest_cs_fclk_cycles / FabricClock
+ * (1 + dcn4x->fabric_max_transport_latency_margin / 100.0);
+ return latency;
+}
+
+static void calculate_pstate_keepout_dst_lines(
+ const struct dml2_display_cfg *display_cfg,
+ const struct dml2_core_internal_watermarks *watermarks,
+ unsigned int pstate_keepout_dst_lines[])
+{
+ const struct dml2_stream_parameters *stream_descriptor;
+ unsigned int i;
+
+ for (i = 0; i < display_cfg->num_planes; i++) {
+ if (!dml_is_phantom_pipe(&display_cfg->plane_descriptors[i])) {
+ stream_descriptor = &display_cfg->stream_descriptors[display_cfg->plane_descriptors[i].stream_index];
+
+ pstate_keepout_dst_lines[i] =
+ (unsigned int)math_ceil(watermarks->DRAMClockChangeWatermark / ((double)stream_descriptor->timing.h_total * 1000.0 / (double)stream_descriptor->timing.pixel_clock_khz));
+
+ if (pstate_keepout_dst_lines[i] > stream_descriptor->timing.v_total - 1) {
+ pstate_keepout_dst_lines[i] = stream_descriptor->timing.v_total - 1;
+ }
+ }
+ }
+}
+
static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params)
{
struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
@@ -6963,7 +7054,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config);
mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000);
mode_lib->ms.max_dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[min_clk_table->dram_bw_table.num_entries - 1].pre_derate_dram_bw_kbps / 1000);
- mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params);
+ mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params);
mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table);
#if defined(__DML_VBA_DEBUG__)
@@ -6981,7 +7072,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
dml2_printf("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
dml2_printf("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
- dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
dml2_printf("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
dml2_printf("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
@@ -7126,7 +7216,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.WritebackLatencySupport = true;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true &&
- (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024.0 / mode_lib->soc.qos_parameters.writeback.base_latency_us)) {
+ (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024 / ((double)mode_lib->soc.qos_parameters.writeback.base_latency_us))) {
mode_lib->ms.support.WritebackLatencySupport = false;
}
}
@@ -7202,17 +7292,17 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
#if defined(DV_BUILD)
// Assume a memory config setting of 3 in 420 mode or get a new ip parameter that reflects the programming.
if (mode_lib->ms.BytePerPixelC[k] != 0.0 && display_cfg->plane_descriptors[k].pixel_format != dml2_rgbe_alpha) {
- lb_buffer_size_bits_luma = 34620 * 57;;
+ lb_buffer_size_bits_luma = 34620 * 57;
lb_buffer_size_bits_chroma = 13560 * 57;
}
#endif
*/
- mode_lib->ms.MaximumSwathWidthInLineBufferLuma = lb_buffer_size_bits_luma * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio, 1.0) / 57 /*FIXME_STAGE2 was: LBBitPerPixel*/ /
+ mode_lib->ms.MaximumSwathWidthInLineBufferLuma = lb_buffer_size_bits_luma * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio, 1.0) / 57 /
(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps + math_max2(math_ceil2(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio, 1.0) - 2, 0.0));
if (mode_lib->ms.BytePerPixelC[k] == 0.0) {
mode_lib->ms.MaximumSwathWidthInLineBufferChroma = 0;
} else {
- mode_lib->ms.MaximumSwathWidthInLineBufferChroma = lb_buffer_size_bits_chroma * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio, 1.0) / 57 /*FIXME_STAGE2 was: LBBitPerPixel*/ /
+ mode_lib->ms.MaximumSwathWidthInLineBufferChroma = lb_buffer_size_bits_chroma * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio, 1.0) / 57 /
(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps + math_max2(math_ceil2(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio, 1.0) - 2, 0.0));
}
@@ -7231,10 +7321,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
/* Cursor Support Check */
mode_lib->ms.support.CursorSupport = true;
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].cursor.cursor_width > 0.0) {
- if (display_cfg->plane_descriptors[k].cursor.cursor_bpp == 64 && mode_lib->ip.cursor_64bpp_support == false) {
+ if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
+ if (display_cfg->plane_descriptors[k].cursor.cursor_bpp == 64 && mode_lib->ip.cursor_64bpp_support == false)
mode_lib->ms.support.CursorSupport = false;
- }
}
}
@@ -7295,7 +7384,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.ViewportExceedsSurface = false;
if (!display_cfg->overrides.hw.surface_viewport_size_check_disable) {
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].composition.viewport.plane0.width > display_cfg->plane_descriptors[k].surface.plane0.width || display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) {
+ if (display_cfg->plane_descriptors[k].composition.viewport.plane0.width > display_cfg->plane_descriptors[k].surface.plane0.width ||
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) {
mode_lib->ms.support.ViewportExceedsSurface = true;
#if defined(__DML_VBA_DEBUG__)
dml2_printf("DML::%s: k=%u ViewportWidth = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
@@ -7304,11 +7394,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: k=%u SurfaceHeightY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
dml2_printf("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
#endif
- if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
- if (display_cfg->plane_descriptors[k].composition.viewport.plane1.width > display_cfg->plane_descriptors[k].surface.plane1.width ||
- display_cfg->plane_descriptors[k].composition.viewport.plane1.height > display_cfg->plane_descriptors[k].surface.plane1.height) {
- mode_lib->ms.support.ViewportExceedsSurface = true;
- }
+ }
+ if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
+ if (display_cfg->plane_descriptors[k].composition.viewport.plane1.width > display_cfg->plane_descriptors[k].surface.plane1.width ||
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.height > display_cfg->plane_descriptors[k].surface.plane1.height) {
+ mode_lib->ms.support.ViewportExceedsSurface = true;
}
}
}
@@ -7466,6 +7556,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
&mode_lib->ms.OutputRate[k],
&mode_lib->ms.RequiredSlots[k]);
+ if (s->OutputBpp[k] == 0.0) {
+ s->OutputBpp[k] = mode_lib->ms.OutputBpp[k];
+ }
+
if (mode_lib->ms.RequiresDSC[k] == false) {
mode_lib->ms.ODMMode[k] = s->ODMModeNoDSC;
mode_lib->ms.RequiredDISPCLKPerSurface[k] = s->RequiredDISPCLKPerSurfaceNoDSC;
@@ -7580,7 +7674,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
mode_lib->ip.writeback_line_buffer_buffer_size));
}
@@ -7665,8 +7759,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_420 && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced == 1 && mode_lib->ip.ptoi_supported == true)
mode_lib->ms.support.P2IWith420 = true;
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable_if_necessary && s->OutputBpp[k] != 0)
- mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP = true;
if ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable_if_necessary) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_n422 && !mode_lib->ip.dsc422_native_support)
mode_lib->ms.support.DSC422NativeNotSupported = true;
@@ -7819,7 +7911,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.DSCDelay[k] = DSCDelayRequirement(mode_lib->ms.RequiresDSC[k],
mode_lib->ms.ODMMode[k],
mode_lib->ip.maximum_dsc_bits_per_component,
- mode_lib->ms.OutputBpp[k],
+ s->OutputBpp[k],
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
mode_lib->ms.support.NumberOfDSCSlices[k],
@@ -8059,59 +8151,63 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.excess_vactive_fill_bw_c);
mode_lib->ms.UrgLatency = CalculateUrgentLatency(
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_pixel_vm_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us,
mode_lib->soc.do_urgent_latency_adjustment,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_fclk_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_mhz,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz,
mode_lib->ms.FabricClock,
mode_lib->ms.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.df_qos_response_time_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_urgent_ramp_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->ms.TripToMemory = CalculateTripToMemory(
mode_lib->ms.UrgLatency,
mode_lib->ms.FabricClock,
mode_lib->ms.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->ms.TripToMemory = math_max2(mode_lib->ms.UrgLatency, mode_lib->ms.TripToMemory);
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
double line_time_us = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
bool cursor_not_enough_urgent_latency_hiding = 0;
- calculate_cursor_req_attributes(
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- display_cfg->plane_descriptors[k].cursor.cursor_bpp,
- // output
- &s->cursor_lines_per_chunk[k],
- &s->cursor_bytes_per_line[k],
- &s->cursor_bytes_per_chunk[k],
- &s->cursor_bytes[k]);
-
- calculate_cursor_urgent_burst_factor(
- mode_lib->ip.cursor_buffer_size,
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- s->cursor_bytes_per_chunk[k],
- s->cursor_lines_per_chunk[k],
- line_time_us,
- mode_lib->ms.UrgLatency,
+ if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
+ calculate_cursor_req_attributes(
+ display_cfg->plane_descriptors[k].cursor.cursor_width,
+ display_cfg->plane_descriptors[k].cursor.cursor_bpp,
+
+ // output
+ &s->cursor_lines_per_chunk[k],
+ &s->cursor_bytes_per_line[k],
+ &s->cursor_bytes_per_chunk[k],
+ &s->cursor_bytes[k]);
+
+ calculate_cursor_urgent_burst_factor(
+ mode_lib->ip.cursor_buffer_size,
+ display_cfg->plane_descriptors[k].cursor.cursor_width,
+ s->cursor_bytes_per_chunk[k],
+ s->cursor_lines_per_chunk[k],
+ line_time_us,
+ mode_lib->ms.UrgLatency,
+
+ // output
+ &mode_lib->ms.UrgentBurstFactorCursor[k],
+ &cursor_not_enough_urgent_latency_hiding);
+ }
- // output
- &mode_lib->ms.UrgentBurstFactorCursor[k],
- &cursor_not_enough_urgent_latency_hiding);
mode_lib->ms.UrgentBurstFactorCursorPre[k] = mode_lib->ms.UrgentBurstFactorCursor[k];
#ifdef __DML_VBA_DEBUG__
@@ -8254,20 +8350,20 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = true;
mode_lib->ms.support.avg_urgent_latency_us
- = (mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_average_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_average_transport_latency_margin / 100.0);
+ = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
mode_lib->ms.support.avg_non_urgent_latency_us
- = (mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_average_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_average_transport_latency_margin / 100.0);
+ = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_luma[k]
/ (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes));
@@ -8287,7 +8383,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
#endif
}
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4 && mode_lib->ms.BytePerPixelC[k] > 0) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x && mode_lib->ms.BytePerPixelC[k] > 0) {
outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_chroma[k]
/ (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes));
@@ -8460,7 +8556,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
{
mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
-
calculate_hostvm_inefficiency_factor(
&s->HostVMInefficiencyFactor,
&s->HostVMInefficiencyFactorPrefetch,
@@ -8501,10 +8596,15 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
+ s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
+
CalculateExtraLatency(
display_cfg,
mode_lib->ip.rob_buffer_size_kbytes,
- 0, //mode_lib->soc.round_trip_ping_latency_dcfclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
s->ReorderingBytes,
mode_lib->ms.DCFCLK,
mode_lib->ms.FabricClock,
@@ -8540,7 +8640,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.TWait[k] = CalculateTWait(
display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
mode_lib->ms.UrgLatency,
- mode_lib->ms.TripToMemory);
+ mode_lib->ms.TripToMemory,
+ !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
+ get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
@@ -8587,7 +8689,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
- CalculatePrefetchSchedule_params->MaxVStartup = s->MaximumVStartup[k];
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
@@ -8669,8 +8770,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
dml2_printf("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
dml2_printf("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
- dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
dml2_printf("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
+ dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
}
}
@@ -8683,20 +8784,15 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.VRatioInPrefetchSupported = true;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ ||
- mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__) {
+ if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
+ mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
mode_lib->ms.support.VRatioInPrefetchSupported = false;
+ dml2_printf("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ dml2_printf("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
dml2_printf("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
}
}
- s->AnyLinesForVMOrRowTooLarge = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.LinesForDPTERow[k] >= 16 || mode_lib->ms.LinesForVM[k] >= 32) {
- s->AnyLinesForVMOrRowTooLarge = true;
- }
- }
-
// Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
if (mode_lib->ms.support.PrefetchSupported) {
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
@@ -8845,6 +8941,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.dpte_row_height_chroma[k],
mode_lib->ms.use_one_row_for_frame_flip[k],
mode_lib->ip.max_flip_time_us,
+ mode_lib->ip.max_flip_time_lines,
s->per_pipe_flip_bytes[k],
mode_lib->ms.meta_row_bytes[k],
s->meta_row_height_luma[k],
@@ -8932,6 +9029,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
s->mSOCParameters.USRRetrainingLatency = 0;
s->mSOCParameters.SMNLatency = 0;
s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index);
+ s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index);
+ s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
+ s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
CalculateWatermarks_params->display_cfg = display_cfg;
CalculateWatermarks_params->USRRetrainingRequired = false;
@@ -8951,7 +9051,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
- //CalculateWatermarks_params->LBBitPerPixel = 57; // FIXME_STAGE2, need a new ip param?
CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
@@ -8979,29 +9078,24 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
- }
+ calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]);
+ }
+ dml2_printf("DML::%s: Done prefetch calculation\n", __func__);
// End of Prefetch Check
- dml2_printf("DML::%s: Done prefetch calculation\n", __func__);
+ mode_lib->ms.support.max_urgent_latency_us = s->mSOCParameters.max_urgent_latency_us;
//Re-ordering Buffer Support Check
- mode_lib->ms.support.max_urgent_latency_us
- = mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin / 100.0);
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
- / mode_lib->ms.support.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= mode_lib->ms.support.max_urgent_latency_us) {
+ / mode_lib->ms.support.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= s->mSOCParameters.max_urgent_latency_us) {
mode_lib->ms.support.ROBSupport = true;
} else {
mode_lib->ms.support.ROBSupport = false;
}
} else {
- if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn3.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) {
+ if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) {
mode_lib->ms.support.ROBSupport = true;
} else {
mode_lib->ms.support.ROBSupport = false;
@@ -9024,15 +9118,12 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.dram_change_vactive_det_fill_delay_us);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.max_urgent_latency_us);
+ dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
dml2_printf("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
#endif
/*Mode Support, Voltage State and SOC Configuration*/
{
- // s->dram_clock_change_support = 1;
- // s->f_clock_change_support = 1;
-
if (mode_lib->ms.support.ScaleRatioAndTapsSupport
&& mode_lib->ms.support.SourceFormatPixelAndScanSupport
&& mode_lib->ms.support.ViewportSizeSupport
@@ -9043,9 +9134,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
&& !mode_lib->ms.support.ExceededMultistreamSlots
&& !mode_lib->ms.support.MSOOrODMSplitWithNonDPLink
&& !mode_lib->ms.support.NotEnoughLanesForMSO
- //&& mode_lib->ms.support.LinkCapacitySupport == true // FIXME_STAGE2
&& !mode_lib->ms.support.P2IWith420
- && !mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP
&& !mode_lib->ms.support.DSC422NativeNotSupported
&& mode_lib->ms.support.DSCSlicesODMModeSupported
&& !mode_lib->ms.support.NotEnoughDSCUnits
@@ -9113,7 +9202,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
#if defined(__DML_VBA_DEBUG__)
if (!mode_lib->ms.support.ModeSupport)
- dml2_print_dml_mode_support_info(&mode_lib->ms.support, true);
+ dml2_print_mode_support_info(&mode_lib->ms.support, true);
dml2_printf("DML::%s: --- DONE --- \n", __func__);
#endif
@@ -9132,6 +9221,10 @@ unsigned int dml2_core_calcs_mode_support_ex(struct dml2_core_calcs_mode_support
*in_out_params->out_evaluation_info = in_out_params->mode_lib->ms.support;
dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
+
+ for (unsigned int k = 0; k < in_out_params->in_display_cfg->num_planes; k++)
+ dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+
dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
return result;
@@ -9373,11 +9466,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
} else {
dpte_groups_per_row_luma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_luma_ub[k] / (double)dpte_group_width_luma, 1.0));
}
-#ifdef DML_VM_PTE_ADL_PATCH_EN
if (dpte_groups_per_row_luma_ub <= 2) {
dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1;
}
-#endif
dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
@@ -9406,11 +9497,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
} else {
dpte_groups_per_row_chroma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_chroma_ub[k] / (double)dpte_group_width_chroma, 1.0));
}
-#ifdef DML_VM_PTE_ADL_PATCH_EN
if (dpte_groups_per_row_chroma_ub <= 2) {
dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1;
}
-#endif
dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
@@ -9535,17 +9624,16 @@ static void CalculateVMGroupAndRequestTimes(
line_time = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz;
-#ifdef DML_VM_PTE_ADL_PATCH_EN
- if (num_group_per_lower_vm_stage_flip <= 2) {
- num_group_per_lower_vm_stage_flip = num_group_per_lower_vm_stage_flip + 1;
- }
+ if (num_group_per_lower_vm_stage_pref > 0)
+ TimePerVMGroupVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_group_per_lower_vm_stage_pref;
+ else
+ TimePerVMGroupVBlank[k] = 0;
+
+ if (num_group_per_lower_vm_stage_flip > 0)
+ TimePerVMGroupFlip[k] = dst_y_per_vm_flip[k] * line_time / num_group_per_lower_vm_stage_flip;
+ else
+ TimePerVMGroupFlip[k] = 0;
- if (num_group_per_lower_vm_stage_pref <= 2) {
- num_group_per_lower_vm_stage_pref = num_group_per_lower_vm_stage_pref + 1;
- }
-#endif
- TimePerVMGroupVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_group_per_lower_vm_stage_pref;
- TimePerVMGroupFlip[k] = dst_y_per_vm_flip[k] * line_time / num_group_per_lower_vm_stage_flip;
if (num_req_per_lower_vm_stage_pref > 0)
TimePerVMRequestVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_req_per_lower_vm_stage_pref;
else
@@ -9599,10 +9687,6 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
bool FoundCriticalSurface = false;
double LastZ8StutterPeriod = 0;
- unsigned int SwathSizeCriticalSurface;
- unsigned int LastChunkOfSwathSize;
- unsigned int MissingPartOfLastSwathOfDETSize;
-
memset(l, 0, sizeof(struct dml2_core_calcs_CalculateStutterEfficiency_locals));
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
@@ -9777,7 +9861,7 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->StutterBurstTime = l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer
/ (p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
(*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer)
- / math_max2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
+ / math_min2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
*p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
@@ -9871,19 +9955,11 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
#endif
- SwathSizeCriticalSurface = (unsigned int)(l->BytePerPixelYCriticalSurface * l->SwathHeightYCriticalSurface * math_ceil2(l->SwathWidthYCriticalSurface, l->BlockWidth256BytesYCriticalSurface));
- LastChunkOfSwathSize = SwathSizeCriticalSurface % (p->PixelChunkSizeInKByte * 1024);
- MissingPartOfLastSwathOfDETSize = (unsigned int)(math_ceil2(l->DETBufferSizeYCriticalSurface, SwathSizeCriticalSurface) - l->DETBufferSizeYCriticalSurface);
-
- *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface && (LastChunkOfSwathSize > 0) &&
- (LastChunkOfSwathSize <= 4096) && (MissingPartOfLastSwathOfDETSize > 0) && (MissingPartOfLastSwathOfDETSize <= LastChunkOfSwathSize));
+ *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SwathSizeCriticalSurface = %u\n", __func__, SwathSizeCriticalSurface);
dml2_printf("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
dml2_printf("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
- dml2_printf("DML::%s: LastChunkOfSwathSize = %u\n", __func__, LastChunkOfSwathSize);
- dml2_printf("DML::%s: MissingPartOfLastSwathOfDETSize = %u\n", __func__, MissingPartOfLastSwathOfDETSize);
dml2_printf("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
#endif
}
@@ -9928,14 +10004,14 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.num_active_pipes = dml_get_num_active_pipes(display_cfg->num_planes, cfg_support_info);
dml_calc_pipe_plane_mapping(cfg_support_info, mode_lib->mp.pipe_plane);
- mode_lib->mp.Dcfclk = programming->min_clocks.dcn4.active.dcfclk_khz / 1000.0;
- mode_lib->mp.FabricClock = programming->min_clocks.dcn4.active.fclk_khz / 1000.0;
- mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4.active.uclk_khz, &mode_lib->soc.clk_table.dram_config);
- mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4.active.uclk_khz / 1000.0;
- mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4.dpprefclk_khz / 1000.0;
- s->SOCCLK = (double)programming->min_clocks.dcn4.socclk_khz / 1000;
- mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params);
- mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4.active.uclk_khz, &mode_lib->soc.clk_table);
+ mode_lib->mp.Dcfclk = programming->min_clocks.dcn4x.active.dcfclk_khz / 1000.0;
+ mode_lib->mp.FabricClock = programming->min_clocks.dcn4x.active.fclk_khz / 1000.0;
+ mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table.dram_config);
+ mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4x.active.uclk_khz / 1000.0;
+ mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4x.dpprefclk_khz / 1000.0;
+ s->SOCCLK = (double)programming->min_clocks.dcn4x.socclk_khz / 1000;
+ mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4x.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params);
+ mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table);
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
@@ -9970,18 +10046,18 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used;
- mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4.dppclk_khz / 1000.0;
+ mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0;
dml2_assert(mode_lib->mp.Dppclk[k] > 0);
}
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4.dscclk_khz / 1000.0;
+ mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0;
dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
}
- mode_lib->mp.Dispclk = programming->min_clocks.dcn4.dispclk_khz / 1000.0;
- mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4.deepsleep_dcfclk_khz / 1000.0;
+ mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0;
+ mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0;
dml2_assert(mode_lib->mp.Dcfclk > 0);
dml2_assert(mode_lib->mp.FabricClock > 0);
@@ -10462,11 +10538,16 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
}
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
+ s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
+
CalculateExtraLatency(
display_cfg,
mode_lib->ip.rob_buffer_size_kbytes,
- 0, //mode_lib->soc.round_trip_ping_latency_dcfclk_cycles,
- s->ReorderBytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
+ s->ReorderingBytes,
mode_lib->mp.Dcfclk,
mode_lib->mp.FabricClock,
mode_lib->ip.pixel_chunk_size_kbytes,
@@ -10551,32 +10632,32 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.excess_vactive_fill_bw_c);
mode_lib->mp.UrgentLatency = CalculateUrgentLatency(
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_pixel_vm_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us,
mode_lib->soc.do_urgent_latency_adjustment,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_fclk_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_mhz,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz,
mode_lib->mp.FabricClock,
mode_lib->mp.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.df_qos_response_time_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_urgent_ramp_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->mp.TripToMemory = CalculateTripToMemory(
mode_lib->mp.UrgentLatency,
mode_lib->mp.FabricClock,
mode_lib->mp.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->mp.TripToMemory = math_max2(mode_lib->mp.UrgentLatency, mode_lib->mp.TripToMemory);
@@ -10585,38 +10666,40 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.FabricClock,
mode_lib->mp.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.meta_trip_adder_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.meta_trip_adder_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
for (k = 0; k < s->num_active_planes; ++k) {
bool cursor_not_enough_urgent_latency_hiding = 0;
- double line_time_us;
+ double line_time_us = 0.0;
- calculate_cursor_req_attributes(
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- display_cfg->plane_descriptors[k].cursor.cursor_bpp,
+ line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
+ ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
+ calculate_cursor_req_attributes(
+ display_cfg->plane_descriptors[k].cursor.cursor_width,
+ display_cfg->plane_descriptors[k].cursor.cursor_bpp,
- // output
- &s->cursor_lines_per_chunk[k],
- &s->cursor_bytes_per_line[k],
- &s->cursor_bytes_per_chunk[k],
- &s->cursor_bytes[k]);
-
- line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
-
- calculate_cursor_urgent_burst_factor(
- mode_lib->ip.cursor_buffer_size,
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- s->cursor_bytes_per_chunk[k],
- s->cursor_lines_per_chunk[k],
- line_time_us,
- mode_lib->mp.UrgentLatency,
+ // output
+ &s->cursor_lines_per_chunk[k],
+ &s->cursor_bytes_per_line[k],
+ &s->cursor_bytes_per_chunk[k],
+ &s->cursor_bytes[k]);
+
+ calculate_cursor_urgent_burst_factor(
+ mode_lib->ip.cursor_buffer_size,
+ display_cfg->plane_descriptors[k].cursor.cursor_width,
+ s->cursor_bytes_per_chunk[k],
+ s->cursor_lines_per_chunk[k],
+ line_time_us,
+ mode_lib->mp.UrgentLatency,
- // output
- &mode_lib->mp.UrgentBurstFactorCursor[k],
- &cursor_not_enough_urgent_latency_hiding);
+ // output
+ &mode_lib->mp.UrgentBurstFactorCursor[k],
+ &cursor_not_enough_urgent_latency_hiding);
+ }
mode_lib->mp.UrgentBurstFactorCursorPre[k] = mode_lib->mp.UrgentBurstFactorCursor[k];
CalculateUrgentBurstFactor(
@@ -10676,7 +10759,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.TWait[k] = CalculateTWait(
display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
mode_lib->mp.UrgentLatency,
- mode_lib->mp.TripToMemory);
+ mode_lib->mp.TripToMemory,
+ !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
+ get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
myPipe->Dppclk = mode_lib->mp.Dppclk[k];
myPipe->Dispclk = mode_lib->mp.Dispclk;
@@ -10722,7 +10807,6 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
CalculatePrefetchSchedule_params->VStartup = s->MaxVStartupLines[k];
- CalculatePrefetchSchedule_params->MaxVStartup = s->MaxVStartupLines[k];
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
@@ -10808,9 +10892,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
if (mode_lib->mp.dst_y_prefetch[k] < 2)
s->DestinationLineTimesForPrefetchLessThan2 = true;
- if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ ||
- mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__)
+ if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
+ mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
s->VRatioPrefetchMoreThanMax = true;
+ dml2_printf("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ dml2_printf("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ }
if (mode_lib->mp.NotEnoughUrgentLatencyHiding[k]) {
dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
@@ -10994,6 +11082,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.dpte_row_height_chroma[k],
mode_lib->mp.use_one_row_for_frame_flip[k],
mode_lib->ip.max_flip_time_us,
+ mode_lib->ip.max_flip_time_lines,
s->per_pipe_flip_bytes[k],
mode_lib->mp.meta_row_bytes[k],
mode_lib->mp.meta_row_height[k],
@@ -11143,6 +11232,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->mmSOCParameters.USRRetrainingLatency = 0;
s->mmSOCParameters.SMNLatency = 0;
s->mmSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index);
+ s->mmSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index);
+ s->mmSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
+ s->mmSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
CalculateWatermarks_params->display_cfg = display_cfg;
CalculateWatermarks_params->USRRetrainingRequired = false;
@@ -11162,7 +11254,6 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculateWatermarks_params->DETBufferSizeC = mode_lib->mp.DETBufferSizeC;
CalculateWatermarks_params->SwathHeightY = mode_lib->mp.SwathHeightY;
CalculateWatermarks_params->SwathHeightC = mode_lib->mp.SwathHeightC;
- //CalculateWatermarks_params->LBBitPerPixel = 57; //FIXME_STAGE2
CalculateWatermarks_params->SwathWidthY = mode_lib->mp.SwathWidthY;
CalculateWatermarks_params->SwathWidthC = mode_lib->mp.SwathWidthC;
CalculateWatermarks_params->BytePerPixelDETY = mode_lib->mp.BytePerPixelInDETY;
@@ -11203,6 +11294,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
}
}
+ calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->mp.Watermark, mode_lib->mp.pstate_keepout_dst_lines);
+
dml2_printf("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
dml2_printf("DML::%s: DEBUG PixelClock = %d kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
@@ -11491,9 +11584,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
bool dml2_core_calcs_mode_programming_ex(struct dml2_core_calcs_mode_programming_ex *in_out_params)
{
+ dml2_printf("DML::%s: ------------- START ----------\n", __func__);
bool result = dml_core_mode_programming(in_out_params);
- dml2_printf("DML::%s: ------------- START ----------\n", __func__);
dml2_printf("DML::%s: result = %0d\n", __func__, result);
dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
return result;
@@ -12186,10 +12279,11 @@ void dml2_core_calcs_get_pipe_regs(const struct dml2_display_cfg *display_cfg,
void dml2_core_calcs_get_global_sync_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, union dml2_global_sync_programming *out, int pipe_index)
{
- out->dcn4.vready_offset_pixels = dml_get_vready_offset(mode_lib, pipe_index);
- out->dcn4.vstartup_lines = dml_get_vstartup_calculated(mode_lib, pipe_index);
- out->dcn4.vupdate_offset_pixels = dml_get_vupdate_offset(mode_lib, pipe_index);
- out->dcn4.vupdate_vupdate_width_pixels = dml_get_vupdate_width(mode_lib, pipe_index);
+ out->dcn4x.vready_offset_pixels = dml_get_vready_offset(mode_lib, pipe_index);
+ out->dcn4x.vstartup_lines = dml_get_vstartup_calculated(mode_lib, pipe_index);
+ out->dcn4x.vupdate_offset_pixels = dml_get_vupdate_offset(mode_lib, pipe_index);
+ out->dcn4x.vupdate_vupdate_width_pixels = dml_get_vupdate_width(mode_lib, pipe_index);
+ out->dcn4x.pstate_keepout_start_lines = dml_get_pstate_keepout_dst_lines(mode_lib, pipe_index);
}
void dml2_core_calcs_get_stream_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_per_stream_programming *out, int pipe_index)
@@ -12197,6 +12291,25 @@ void dml2_core_calcs_get_stream_programming(const struct dml2_core_internal_disp
dml2_core_calcs_get_global_sync_programming(mode_lib, &out->global_sync, pipe_index);
}
+void dml2_core_calcs_get_global_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib,
+ const struct display_configuation_with_meta *display_cfg,
+ struct dmub_cmd_fams2_global_config *fams2_global_config)
+{
+ fams2_global_config->features.bits.enable = display_cfg->stage3.fams2_required;
+
+ if (fams2_global_config->features.bits.enable) {
+ fams2_global_config->features.bits.enable_stall_recovery = true;
+ fams2_global_config->features.bits.allow_delay_check_mode = FAMS2_ALLOW_DELAY_CHECK_FROM_START;
+
+ fams2_global_config->max_allow_delay_us = mode_lib->ip_caps.fams2.max_allow_delay_us;
+ fams2_global_config->lock_wait_time_us = mode_lib->ip_caps.fams2.lock_timeout_us;
+ fams2_global_config->recovery_timeout_us = mode_lib->ip_caps.fams2.recovery_timeout_us;
+ fams2_global_config->hwfq_flip_programming_delay_us = mode_lib->ip_caps.fams2.flip_programming_delay_us;
+
+ fams2_global_config->num_streams = display_cfg->display_config.num_streams;
+ }
+}
+
void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib,
const struct display_configuation_with_meta *display_cfg,
struct dmub_fams2_stream_static_state *fams2_programming,
@@ -12209,6 +12322,11 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
unsigned int i;
+ if (display_cfg->display_config.overrides.all_streams_blanked) {
+ /* stream is blanked, so do nothing */
+ return;
+ }
+
/* from display configuration */
fams2_programming->htotal = (uint16_t)stream_descriptor->timing.h_total;
fams2_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total;
@@ -12368,6 +12486,7 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp
{
double phantom_processing_delay_pix;
unsigned int phantom_processing_delay_lines;
+ unsigned int phantom_min_v_active_lines;
unsigned int phantom_v_active_lines;
unsigned int phantom_v_startup_lines;
unsigned int phantom_v_blank_lines;
@@ -12377,14 +12496,16 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp
phantom_processing_delay_pix = (double)((mode_lib->ip.subvp_fw_processing_delay_us + mode_lib->ip.subvp_pstate_allow_width_us) *
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.pixel_clock_khz / 1000));
phantom_processing_delay_lines = (unsigned int)(phantom_processing_delay_pix / (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.h_total);
- dml2_core_shared_div_rem(phantom_processing_delay_pix,
+ dml2_core_div_rem(phantom_processing_delay_pix,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.h_total,
&rem);
if (rem)
phantom_processing_delay_lines++;
phantom_v_startup_lines = dml_get_plane_max_vstartup_lines(mode_lib, plane_index);
- phantom_v_active_lines = phantom_processing_delay_lines + dml_get_plane_subviewport_lines_needed_in_mall(mode_lib, plane_index) + mode_lib->ip.subvp_swath_height_margin_lines;
+ phantom_min_v_active_lines = (unsigned int)math_ceil((double)dml_get_plane_subviewport_lines_needed_in_mall(mode_lib, plane_index) /
+ display_cfg->plane_descriptors[plane_index].composition.scaler_info.plane0.v_ratio);
+ phantom_v_active_lines = phantom_processing_delay_lines + phantom_min_v_active_lines + mode_lib->ip.subvp_swath_height_margin_lines;
// phantom_vblank = max(vbp(vstartup) + vactive + vfp(always 1) + vsync(can be 1), main_vblank)
phantom_v_blank_lines = phantom_v_startup_lines + 1 + 1;
@@ -12396,8 +12517,8 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp
// phantom_vtotal = vactive + vblank
out->phantom_v_total = phantom_v_active_lines + phantom_v_blank_lines;
- out->phantom_min_v_active = dml_get_plane_subviewport_lines_needed_in_mall(mode_lib, plane_index);
- out->phantom_v_startup = dml_get_plane_max_vstartup_lines(mode_lib, plane_index);
+ out->phantom_min_v_active = phantom_min_v_active_lines;
+ out->phantom_v_startup = phantom_v_startup_lines;
out->vblank_reserved_time_us = display_cfg->plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
#if defined(__DML_VBA_DEBUG__)
@@ -12418,7 +12539,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.ScaleRatioAndTapsSupport = mode_lib->ms.support.ScaleRatioAndTapsSupport;
out->informative.mode_support_info.SourceFormatPixelAndScanSupport = mode_lib->ms.support.SourceFormatPixelAndScanSupport;
out->informative.mode_support_info.P2IWith420 = mode_lib->ms.support.P2IWith420;
- out->informative.mode_support_info.DSCOnlyIfNecessaryWithBPP = mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP;
+ out->informative.mode_support_info.DSCOnlyIfNecessaryWithBPP = false;
out->informative.mode_support_info.DSC422NativeNotSupported = mode_lib->ms.support.DSC422NativeNotSupported;
out->informative.mode_support_info.LinkRateDoesNotMatchDPVersion = mode_lib->ms.support.LinkRateDoesNotMatchDPVersion;
out->informative.mode_support_info.LinkRateForMultistreamNotIndicated = mode_lib->ms.support.LinkRateForMultistreamNotIndicated;
@@ -12611,7 +12732,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.cstate_max_cap_mode = dml_get_cstate_max_cap_mode(mode_lib);
- out->min_clocks.dcn4.dpprefclk_khz = (int unsigned)dml_get_global_dppclk_khz(mode_lib);
+ out->min_clocks.dcn4x.dpprefclk_khz = (int unsigned)dml_get_global_dppclk_khz(mode_lib);
out->informative.qos.max_active_fclk_change_latency_supported = dml_get_fclk_change_latency(mode_lib);
@@ -12724,13 +12845,13 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
}
}
- out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
- / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin / 100.0);
+ out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
+ / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
/ mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) {
out->informative.misc.ROBUrgencyAvoidance = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
index b280ab573fbb..df2d1550a14b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_CORE_DCN4_CALCS_H__
#define __DML2_CORE_DCN4_CALCS_H__
@@ -30,6 +29,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index);
void dml2_core_calcs_get_mall_allocation(struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int *out, int pipe_index);
void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_fams2_stream_static_state *fams2_programming, enum dml2_uclk_pstate_support_method pstate_method, int plane_index);
+void dml2_core_calcs_get_global_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_cmd_fams2_global_config *fams2_global_config);
void dml2_core_calcs_get_dpte_row_height(unsigned int *dpte_row_height, struct dml2_core_internal_display_mode_lib *mode_lib, bool is_plane1, enum dml2_source_format_class SourcePixelFormat, enum dml2_swizzle_mode SurfaceTiling, enum dml2_rotation_angle ScanDirection, unsigned int pitch, unsigned int GPUVMMinPageSizeKBytes);
void dml2_core_calcs_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs, const struct dml2_get_cursor_dlg_reg *p);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
index f56abe9ab919..28394de02885 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_core_factory.h"
#include "dml2_core_dcn4.h"
#include "dml2_external_lib_deps.h"
@@ -11,7 +10,7 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
{
bool result = false;
- if (!out)
+ if (out == 0)
return false;
memset(out, 0, sizeof(struct dml2_core_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h
index 53636a8f52aa..411c514fe65c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_CORE_FACTORY_H__
#define __DML2_CORE_FACTORY_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
index 81f0a6f19f87..8f3c1c0b1cc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
@@ -779,7 +779,7 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dppclk / 1000;
mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config);
mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000);
- mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params);
+ mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params);
mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table);
#if defined(__DML_VBA_DEBUG__)
@@ -1776,32 +1776,32 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
#endif
mode_lib->ms.UrgLatency = CalculateUrgentLatency(
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_pixel_vm_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us,
mode_lib->soc.do_urgent_latency_adjustment,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_fclk_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_mhz,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz,
mode_lib->ms.FabricClock,
mode_lib->ms.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.df_qos_response_time_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_urgent_ramp_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->ms.TripToMemory = CalculateTripToMemory(
mode_lib->ms.UrgLatency,
mode_lib->ms.FabricClock,
mode_lib->ms.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->ms.TripToMemory = math_max2(mode_lib->ms.UrgLatency, mode_lib->ms.TripToMemory);
@@ -1995,21 +1995,21 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = true;
mode_lib->ms.support.avg_urgent_latency_us
- = (mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_average_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_average_transport_latency_margin / 100.0);
+ = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
mode_lib->ms.support.avg_non_urgent_latency_us
- = (mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_average_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_average_transport_latency_margin / 100.0);
+ = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
double outstanding_latency_us = 0;
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_luma[k]
/ (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes));
@@ -2029,7 +2029,7 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
#endif
}
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4 && mode_lib->ms.BytePerPixelC[k] > 0) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x && mode_lib->ms.BytePerPixelC[k] > 0) {
outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_chroma[k]
/ (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes));
@@ -2242,11 +2242,15 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
}
double min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
+ s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
CalculateExtraLatency(
display_cfg,
mode_lib->ip.rob_buffer_size_kbytes,
- 0, //mode_lib->soc.round_trip_ping_latency_dcfclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
s->ReorderingBytes,
mode_lib->ms.DCFCLK,
mode_lib->ms.FabricClock,
@@ -2713,13 +2717,13 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
//Re-ordering Buffer Support Check
mode_lib->ms.support.max_urgent_latency_us
- = mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin / 100.0);
+ = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
/ mode_lib->ms.support.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= mode_lib->ms.support.max_urgent_latency_us) {
mode_lib->ms.support.ROBSupport = true;
@@ -2727,7 +2731,7 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
mode_lib->ms.support.ROBSupport = false;
}
} else {
- if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn3.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) {
+ if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) {
mode_lib->ms.support.ROBSupport = true;
} else {
mode_lib->ms.support.ROBSupport = false;
@@ -5050,7 +5054,7 @@ static void calculate_mcache_row_bytes(
unsigned int meta_per_mvmpg_per_channel_ub = 0;
if (p->gpuvm_enable) {
- meta_per_mvmpg_per_channel = (float)vmpg_bytes / 256 / p->num_chans;
+ meta_per_mvmpg_per_channel = (float)vmpg_bytes / (float)256 / p->num_chans;
//but using the est_blk_per_vmpg between 2 and 4, to be not as pessimestic
if (p->surf_vert && vmpg_bytes > blk_bytes) {
@@ -5059,7 +5063,7 @@ static void calculate_mcache_row_bytes(
*p->dcc_dram_bw_nom_overhead_factor = 1 + math_max2(1.0 / 256.0, math_ceil2(meta_per_mvmpg_per_channel, p->mem_word_bytes) / (256 * meta_per_mvmpg_per_channel)); // dcc_dr_oh_nom
} else {
- meta_per_mvmpg_per_channel = (float)blk_bytes / 256 / p->num_chans;
+ meta_per_mvmpg_per_channel = (float)blk_bytes / (float)256 / p->num_chans;
if (!p->surf_vert)
*p->dcc_dram_bw_nom_overhead_factor = 1 + 1.0 / 256.0;
@@ -5881,7 +5885,7 @@ static double CalculateUrgentLatency(
double fabric_max_transport_latency_margin)
{
double urgent_latency = 0;
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
urgent_latency = (df_qos_response_time_fclk_cycles + mall_overhead_fclk_cycles) / FabricClock
+ max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1 + fabric_max_transport_latency_margin / 100.0)
+ urgent_ramp_uclk_cycles / uclk_freq_mhz * (1 + umc_urgent_ramp_latency_margin / 100.0);
@@ -5892,7 +5896,7 @@ static double CalculateUrgentLatency(
}
}
#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
@@ -5922,7 +5926,7 @@ static double CalculateTripToMemory(
double fabric_max_transport_latency_margin)
{
double trip_to_memory_us;
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
trip_to_memory_us = mall_overhead_fclk_cycles / FabricClock
+ max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0)
+ trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0);
@@ -5931,7 +5935,7 @@ static double CalculateTripToMemory(
}
#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
@@ -5961,7 +5965,7 @@ static double CalculateMetaTripToMemory(
double fabric_max_transport_latency_margin)
{
double meta_trip_to_memory_us;
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
meta_trip_to_memory_us = meta_trip_to_memory_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0)
+ meta_trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0);
} else {
@@ -5969,7 +5973,7 @@ static double CalculateMetaTripToMemory(
}
#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
@@ -6460,8 +6464,8 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->SwathHeightC[k] = l->MaximumSwathHeightC[k] / 2;
l->RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2;
l->RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2;
- p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
- p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
+ p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
+ p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
}
if (p->SwathHeightC[k] == 0)
@@ -7165,7 +7169,7 @@ static void calculate_tdlut_setting(
*p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width;
//the delivery cycles is DispClk cycles per line * number of lines * number of slices
tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width / 2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
- tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / 9.0;
+ tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1);
} else {
//tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements
*p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256);
@@ -7485,7 +7489,7 @@ static void CalculateExtraLatency(
max_request_size_bytes = request_size_bytes_chroma[k];
}
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
*ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK;
*ExtraLatency = *ExtraLatency_sr;
if (max_oustanding_when_urgent_expected)
@@ -7501,11 +7505,14 @@ static void CalculateExtraLatency(
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type);
+ dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
+ dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips);
dml2_printf("DML::%s: max_oustanding_when_urgent_expected=%u\n", __func__, max_oustanding_when_urgent_expected);
dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock);
dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
+ dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb);
dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
@@ -7739,7 +7746,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->max_Tsw = (math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) * s->LineTime);
s->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC;
-
s->prefetch_bw_pr = s->prefetch_bw_pr * p->mall_prefetch_sdp_overhead_factor;
s->prefetch_sw_bytes = s->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor;
s->prefetch_bw_oto = math_max2(s->prefetch_bw_pr, s->prefetch_sw_bytes / s->max_Tsw);
@@ -9304,6 +9310,10 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
dpte_groups_per_row_luma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_luma_ub[k] / (double)dpte_group_width_luma, 1.0));
}
+ if (dpte_groups_per_row_luma_ub <= 2) {
+ dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1;
+ }
+
dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
@@ -9332,6 +9342,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
} else {
dpte_groups_per_row_chroma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_chroma_ub[k] / (double)dpte_group_width_chroma, 1.0));
}
+ if (dpte_groups_per_row_chroma_ub <= 2) {
+ dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1;
+ }
dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
@@ -9386,8 +9399,8 @@ static void CalculateVMGroupAndRequestTimes(
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[])
{
- unsigned int num_group_per_lower_vm_stage = 0;
- unsigned int num_req_per_lower_vm_stage = 0;
+ unsigned int num_group_per_lower_vm_stage = 1;
+ unsigned int num_req_per_lower_vm_stage = 1;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
@@ -9451,6 +9464,14 @@ static void CalculateVMGroupAndRequestTimes(
double line_time = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz;
+ if (num_group_per_lower_vm_stage_flip <= 2) {
+ num_group_per_lower_vm_stage_flip = num_group_per_lower_vm_stage_flip + 1;
+ }
+
+ if (num_group_per_lower_vm_stage_pref <= 2) {
+ num_group_per_lower_vm_stage_pref = num_group_per_lower_vm_stage_pref + 1;
+ }
+
TimePerVMGroupVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_group_per_lower_vm_stage_pref;
TimePerVMGroupFlip[k] = dst_y_per_vm_flip[k] * line_time / num_group_per_lower_vm_stage_flip;
TimePerVMRequestVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_req_per_lower_vm_stage_pref;
@@ -9814,14 +9835,14 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e
mode_lib->mp.num_active_pipes = dml_get_num_active_pipes(display_cfg->num_planes, cfg_support_info);
dml_calc_pipe_plane_mapping(cfg_support_info, mode_lib->mp.pipe_plane);
- mode_lib->mp.Dcfclk = programming->min_clocks.dcn4.active.dcfclk_khz / 1000.0;
- mode_lib->mp.FabricClock = programming->min_clocks.dcn4.active.fclk_khz / 1000.0;
- mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4.active.uclk_khz, &mode_lib->soc.clk_table.dram_config);
- mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4.active.uclk_khz / 1000.0;
- mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4.dpprefclk_khz / 1000.0;
- s->SOCCLK = (double)programming->min_clocks.dcn4.socclk_khz / 1000;
- mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params);
- mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4.active.uclk_khz, &mode_lib->soc.clk_table);
+ mode_lib->mp.Dcfclk = programming->min_clocks.dcn4x.active.dcfclk_khz / 1000.0;
+ mode_lib->mp.FabricClock = programming->min_clocks.dcn4x.active.fclk_khz / 1000.0;
+ mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table.dram_config);
+ mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4x.active.uclk_khz / 1000.0;
+ mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4x.dpprefclk_khz / 1000.0;
+ s->SOCCLK = (double)programming->min_clocks.dcn4x.socclk_khz / 1000;
+ mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4x.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params);
+ mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table);
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
@@ -9856,18 +9877,18 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used;
- mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4.dppclk_khz / 1000.0;
+ mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0;
dml2_assert(mode_lib->mp.Dppclk[k] > 0);
}
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4.dscclk_khz / 1000.0;
+ mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0;
dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
}
- mode_lib->mp.Dispclk = programming->min_clocks.dcn4.dispclk_khz / 1000.0;
- mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4.deepsleep_dcfclk_khz / 1000.0;
+ mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0;
+ mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0;
dml2_assert(mode_lib->mp.Dcfclk > 0);
dml2_assert(mode_lib->mp.FabricClock > 0);
@@ -10388,11 +10409,16 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e
calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
}
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
+ s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
+
CalculateExtraLatency(
display_cfg,
mode_lib->ip.rob_buffer_size_kbytes,
- 0, //mode_lib->soc.round_trip_ping_latency_dcfclk_cycles,
- s->ReorderBytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
+ s->ReorderingBytes,
mode_lib->mp.Dcfclk,
mode_lib->mp.FabricClock,
mode_lib->ip.pixel_chunk_size_kbytes,
@@ -10465,32 +10491,32 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e
mode_lib->mp.WritebackDelay[k] = mode_lib->mp.WritebackDelay[j];
mode_lib->mp.UrgentLatency = CalculateUrgentLatency(
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_pixel_vm_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us,
mode_lib->soc.do_urgent_latency_adjustment,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_fclk_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_mhz,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz,
mode_lib->mp.FabricClock,
mode_lib->mp.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.df_qos_response_time_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_urgent_ramp_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->mp.TripToMemory = CalculateTripToMemory(
mode_lib->mp.UrgentLatency,
mode_lib->mp.FabricClock,
mode_lib->mp.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->mp.TripToMemory = math_max2(mode_lib->mp.UrgentLatency, mode_lib->mp.TripToMemory);
@@ -10499,10 +10525,10 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e
mode_lib->mp.FabricClock,
mode_lib->mp.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.meta_trip_adder_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.meta_trip_adder_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
for (k = 0; k < s->num_active_planes; ++k) {
calculate_cursor_req_attributes(
@@ -11945,14 +11971,14 @@ void dml2_core_shared_get_pipe_regs(const struct dml2_display_cfg *display_cfg,
void dml2_core_shared_get_stream_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_per_stream_programming *out, int pipe_index)
{
- // out->min_clocks.dcn4.dscclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000); // FIXME_STAGE2
- // out->min_clocks.dcn4.dtbclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000);
- // out->min_clocks.dcn4.phyclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000);
-
- out->global_sync.dcn4.vready_offset_pixels = mode_lib->mp.VReadyOffsetPix[mode_lib->mp.pipe_plane[pipe_index]];
- out->global_sync.dcn4.vstartup_lines = mode_lib->mp.VStartup[mode_lib->mp.pipe_plane[pipe_index]];
- out->global_sync.dcn4.vupdate_offset_pixels = mode_lib->mp.VUpdateOffsetPix[mode_lib->mp.pipe_plane[pipe_index]];
- out->global_sync.dcn4.vupdate_vupdate_width_pixels = mode_lib->mp.VUpdateWidthPix[mode_lib->mp.pipe_plane[pipe_index]];
+ // out->min_clocks.dcn4x.dscclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000); // FIXME_STAGE2
+ // out->min_clocks.dcn4x.dtbclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000);
+ // out->min_clocks.dcn4x.phyclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000);
+
+ out->global_sync.dcn4x.vready_offset_pixels = mode_lib->mp.VReadyOffsetPix[mode_lib->mp.pipe_plane[pipe_index]];
+ out->global_sync.dcn4x.vstartup_lines = mode_lib->mp.VStartup[mode_lib->mp.pipe_plane[pipe_index]];
+ out->global_sync.dcn4x.vupdate_offset_pixels = mode_lib->mp.VUpdateOffsetPix[mode_lib->mp.pipe_plane[pipe_index]];
+ out->global_sync.dcn4x.vupdate_vupdate_width_pixels = mode_lib->mp.VUpdateWidthPix[mode_lib->mp.pipe_plane[pipe_index]];
}
void dml2_core_shared_get_mcache_allocation(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_mcache_surface_allocation *out, int plane_idx)
@@ -12255,7 +12281,7 @@ void dml2_core_shared_get_informative(const struct dml2_core_internal_display_mo
out->informative.misc.cstate_max_cap_mode = mode_lib->mp.DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE;
- out->min_clocks.dcn4.dpprefclk_khz = (int unsigned)(mode_lib->mp.GlobalDPPCLK * 1000.0);
+ out->min_clocks.dcn4x.dpprefclk_khz = (int unsigned)(mode_lib->mp.GlobalDPPCLK * 1000.0);
out->informative.qos.max_active_fclk_change_latency_supported = mode_lib->mp.MaxActiveFCLKChangeLatencySupported;
@@ -12368,13 +12394,13 @@ void dml2_core_shared_get_informative(const struct dml2_core_internal_display_mo
}
}
- out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
- / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin / 100.0);
+ out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
+ / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
/ mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) {
out->informative.misc.ROBUrgencyAvoidance = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.h
deleted file mode 100644
index d76bda907ec8..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-
-#ifndef __DML2_CORE_SHARED_H__
-#define __DML2_CORE_SHARED_H__
-
-#define __DML_VBA_DEBUG__
-#define __DML2_CALCS_MAX_VRATIO_PRE_OTO__ 4.0 //<brief Prefetch schedule max vratio for one to one scheduling calculation for prefetch
-#define __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ 6.0 //<brief Prefetch schedule max vratio when enhance prefetch schedule acceleration is enabled and vstartup is earliest possible already
-#define __DML2_CALCS_DPP_INVALID__ 0
-#define __DML2_CALCS_DCFCLK_FACTOR__ 1.15 //<brief fudge factor for min dcfclk calclation
-#define __DML2_CALCS_PIPE_NO_PLANE__ 99
-
-#include "dml2_core_shared_types.h"
-#include "dml2_internal_shared_types.h"
-
-double dml2_core_shared_div_rem(double dividend, unsigned int divisor, unsigned int *remainder);
-
-const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type);
-const char *dml2_core_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type);
-bool dml2_core_shared_is_420(enum dml2_source_format_class source_format);
-
-bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params);
-bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_ex *in_out_params);
-void dml2_core_shared_get_watermarks(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_watermark_regs *out);
-void dml2_core_shared_get_arb_params(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *out);
-void dml2_core_shared_get_pipe_regs(const struct dml2_display_cfg *display_cfg, struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_per_pipe_register_set *out, int pipe_index);
-void dml2_core_shared_get_stream_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_per_stream_programming *out, int pipe_index);
-void dml2_core_shared_get_mcache_allocation(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_mcache_surface_allocation *out, int plane_idx);
-void dml2_core_shared_get_mall_allocation(struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int *out, int pipe_index);
-void dml2_core_shared_get_plane_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_plane_support_info *out, int plane_idx);
-void dml2_core_shared_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index);
-void dml2_core_shared_get_informative(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_cfg_programming *out);
-void dml2_core_shared_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs, const struct dml2_get_cursor_dlg_reg *p);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index 1343b744eeb3..cbdfbd5a0bde 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_CORE_SHARED_TYPES_H__
#define __DML2_CORE_SHARED_TYPES_H__
@@ -10,6 +9,15 @@
#include "dml_top_display_cfg_types.h"
#include "dml_top_types.h"
+#define __DML_VBA_DEBUG__
+#define __DML2_CALCS_MAX_VRATIO_PRE_OTO__ 4.0 //<brief max vratio for one-to-one prefetch bw scheduling
+#define __DML2_CALCS_MAX_VRATIO_PRE_EQU__ 6.0 //<brief max vratio for equalized prefetch bw scheduling
+#define __DML2_CALCS_MAX_VRATIO_PRE__ 8.0 //<brief max prefetch vratio register limit
+
+#define __DML2_CALCS_DPP_INVALID__ 0
+#define __DML2_CALCS_DCFCLK_FACTOR__ 1.15 //<brief fudge factor for min dcfclk calclation
+#define __DML2_CALCS_PIPE_NO_PLANE__ 99
+
struct dml2_core_ip_params {
unsigned int vblank_nom_default_us;
unsigned int remote_iommu_outstanding_translations;
@@ -70,6 +78,7 @@ struct dml2_core_ip_params {
unsigned int words_per_channel;
bool imall_supported;
unsigned int max_flip_time_us;
+ unsigned int max_flip_time_lines;
unsigned int subvp_swath_height_margin_lines;
unsigned int subvp_fw_processing_delay_us;
unsigned int subvp_pstate_allow_width_us;
@@ -782,6 +791,7 @@ struct dml2_core_internal_mode_program {
unsigned int VUpdateOffsetPix[DML2_MAX_PLANES];
unsigned int VUpdateWidthPix[DML2_MAX_PLANES];
unsigned int VReadyOffsetPix[DML2_MAX_PLANES];
+ unsigned int pstate_keepout_dst_lines[DML2_MAX_PLANES];
// Latency and Support
double MaxActiveFCLKChangeLatencySupported;
@@ -852,6 +862,9 @@ struct dml2_core_internal_SOCParametersList {
double USRRetrainingLatency;
double SMNLatency;
double g6_temp_read_blackout_us;
+ double max_urgent_latency_us;
+ double df_response_time_us;
+ enum dml2_qos_param_type qos_type;
};
struct dml2_core_calcs_mode_support_locals {
@@ -865,7 +878,7 @@ struct dml2_core_calcs_mode_support_locals {
unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
- bool dummy_boolean[2];
+ bool dummy_boolean[3];
unsigned int dummy_integer[3];
unsigned int dummy_integer_array[36][DML2_MAX_PLANES];
enum dml2_odm_mode dummy_odm_mode[DML2_MAX_PLANES];
@@ -913,9 +926,7 @@ struct dml2_core_calcs_mode_support_locals {
double HostVMInefficiencyFactor;
double HostVMInefficiencyFactorPrefetch;
- unsigned int NextMaxVStartup;
unsigned int MaxVStartup;
- bool AnyLinesForVMOrRowTooLarge;
double PixelClockBackEndFactor;
unsigned int NumDSCUnitRequired;
@@ -975,7 +986,7 @@ struct dml2_core_calcs_mode_programming_locals {
unsigned int DSCFormatFactor;
struct dml2_core_internal_DmlPipe SurfaceParameters[DML2_MAX_PLANES];
- unsigned int ReorderBytes;
+ unsigned int ReorderingBytes;
double HostVMInefficiencyFactor;
double HostVMInefficiencyFactorPrefetch;
unsigned int TotalDCCActiveDPP;
@@ -1176,11 +1187,15 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double prefetch_bw_oto;
double Tvm_oto;
double Tr0_oto;
+ double Tvm_no_trip_oto;
+ double Tr0_no_trip_oto;
double Tvm_oto_lines;
double Tr0_oto_lines;
double dst_y_prefetch_oto;
double TimeForFetchingVM;
double TimeForFetchingRowInVBlank;
+ double dst_y_per_vm_no_trip_vblank;
+ double dst_y_per_row_no_trip_vblank;
double LinesToRequestPrefetchPixelData;
unsigned int HostVMDynamicLevelsTrips;
double trip_to_mem;
@@ -1188,6 +1203,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double Tr0_trips_rounded;
double max_Tsw;
double Lsw_oto;
+ double Lsw_equ;
double Tpre_rounded;
double prefetch_bw_equ;
double Tvm_equ;
@@ -1196,11 +1212,14 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double Tdmec;
double Tdmsks;
double prefetch_sw_bytes;
+ double total_row_bytes;
double prefetch_bw_pr;
double bytes_pp;
double dep_bytes;
double min_Lsw_oto;
+ double min_Lsw_equ;
double Tsw_est1;
+ double Tsw_est2;
double Tsw_est3;
double prefetch_bw1;
double prefetch_bw2;
@@ -1332,6 +1351,10 @@ struct dml2_core_shared_get_urgent_bandwidth_required_locals {
double tmp_nom_adj_factor_p1;
double tmp_pref_adj_factor_p0;
double tmp_pref_adj_factor_p1;
+ double vm_row_bw;
+ double flip_and_active_bw;
+ double flip_and_prefetch_bw;
+ double active_and_excess_bw;
};
struct dml2_core_shared_calculate_peak_bandwidth_required_locals {
@@ -1688,7 +1711,6 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
enum dml2_output_format_class OutputFormat;
unsigned int MaxInterDCNTileRepeaters;
unsigned int VStartup;
- unsigned int MaxVStartup;
unsigned int HostVMMinPageSize;
bool DynamicMetadataEnable;
bool DynamicMetadataVMEnabled;
@@ -2010,6 +2032,7 @@ struct dml2_core_internal_scratch {
struct dml2_core_internal_display_mode_lib {
struct dml2_core_ip_params ip;
struct dml2_soc_bb soc;
+ struct dml2_ip_capabilities ip_caps;
//@brief Mode Support and Mode programming struct
// Used to hold input; intermediate and output of the calculations
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
new file mode 100644
index 000000000000..ab229e1598ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
@@ -0,0 +1,631 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dml2_core_utils.h"
+
+double dml2_core_utils_div_rem(double dividend, unsigned int divisor, unsigned int *remainder)
+{
+ *remainder = ((dividend / divisor) - (int)(dividend / divisor) > 0);
+ return dividend / divisor;
+
+}
+
+const char *dml2_core_utils_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type)
+{
+ switch (bw_type) {
+ case (dml2_core_internal_bw_sdp):
+ return("dml2_core_internal_bw_sdp");
+ case (dml2_core_internal_bw_dram):
+ return("dml2_core_internal_bw_dram");
+ case (dml2_core_internal_bw_max):
+ return("dml2_core_internal_bw_max");
+ default:
+ return("dml2_core_internal_bw_unknown");
+ }
+}
+
+bool dml2_core_utils_is_420(enum dml2_source_format_class source_format)
+{
+ bool val = false;
+
+ switch (source_format) {
+ case dml2_444_8:
+ val = 0;
+ break;
+ case dml2_444_16:
+ val = 0;
+ break;
+ case dml2_444_32:
+ val = 0;
+ break;
+ case dml2_444_64:
+ val = 0;
+ break;
+ case dml2_420_8:
+ val = 1;
+ break;
+ case dml2_420_10:
+ val = 1;
+ break;
+ case dml2_420_12:
+ val = 1;
+ break;
+ case dml2_rgbe_alpha:
+ val = 0;
+ break;
+ case dml2_rgbe:
+ val = 0;
+ break;
+ case dml2_mono_8:
+ val = 0;
+ break;
+ case dml2_mono_16:
+ val = 0;
+ break;
+ default:
+ DML2_ASSERT(0);
+ break;
+ }
+ return val;
+}
+
+void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
+{
+ dml2_printf("DML: ===================================== \n");
+ dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
+ if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
+ dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
+ if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
+ dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
+ if (!fail_only || support->ViewportSizeSupport == 0)
+ dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
+ if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
+ dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
+ if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
+ dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
+ if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
+ dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
+ if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
+ dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ if (!fail_only || support->ExceededMultistreamSlots == 1)
+ dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
+ dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
+ if (!fail_only || support->NotEnoughLanesForMSO == 1)
+ dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
+ if (!fail_only || support->P2IWith420 == 1)
+ dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ if (!fail_only || support->DSC422NativeNotSupported == 1)
+ dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ if (!fail_only || support->DSCSlicesODMModeSupported == 0)
+ dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ if (!fail_only || support->NotEnoughDSCUnits == 1)
+ dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
+ if (!fail_only || support->NotEnoughDSCSlices == 1)
+ dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
+ if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
+ dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
+ dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
+ if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
+ dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
+ dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
+ dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
+ if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
+ dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
+ if (!fail_only || support->ROBSupport == 0)
+ dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
+ if (!fail_only || support->OutstandingRequestsSupport == 0)
+ dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
+ if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
+ dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
+ if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
+ dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ if (!fail_only || support->TotalAvailablePipesSupport == 0)
+ dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ if (!fail_only || support->NumberOfOTGSupport == 0)
+ dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
+ dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ if (!fail_only || support->NumberOfDP2p0Support == 0)
+ dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ if (!fail_only || support->EnoughWritebackUnits == 0)
+ dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
+ dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ if (!fail_only || support->WritebackLatencySupport == 0)
+ dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ if (!fail_only || support->CursorSupport == 0)
+ dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ if (!fail_only || support->PitchSupport == 0)
+ dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ if (!fail_only || support->ViewportExceedsSurface == 1)
+ dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
+ if (!fail_only || support->PrefetchSupported == 0)
+ dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
+ dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ if (!fail_only || support->AvgBandwidthSupport == 0)
+ dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
+ if (!fail_only || support->DynamicMetadataSupported == 0)
+ dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
+ if (!fail_only || support->VRatioInPrefetchSupported == 0)
+ dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
+ if (!fail_only || support->PTEBufferSizeNotExceeded == 1)
+ dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 1)
+ dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ if (!fail_only || support->ExceededMALLSize == 1)
+ dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ if (!fail_only || support->g6_temp_read_support == 0)
+ dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ if (!fail_only || support->ImmediateFlipSupport == 0)
+ dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ if (!fail_only || support->LinkCapacitySupport == 0)
+ dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+
+ if (!fail_only || support->ModeSupport == 0)
+ dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
+ dml2_printf("DML: ===================================== \n");
+}
+
+const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type)
+{
+ switch (dml2_core_internal_soc_state_type) {
+ case (dml2_core_internal_soc_state_sys_idle):
+ return("dml2_core_internal_soc_state_sys_idle");
+ case (dml2_core_internal_soc_state_sys_active):
+ return("dml2_core_internal_soc_state_sys_active");
+ case (dml2_core_internal_soc_state_svp_prefetch):
+ return("dml2_core_internal_soc_state_svp_prefetch");
+ case dml2_core_internal_soc_state_max:
+ default:
+ return("dml2_core_internal_soc_state_unknown");
+ }
+}
+
+
+void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg)
+{
+ for (unsigned int k = 0; k < display_cfg->num_planes; k++) {
+ double bpc = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.bpc;
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_disable) {
+ switch (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format) {
+ case dml2_444:
+ out_bpp[k] = bpc * 3;
+ break;
+ case dml2_s422:
+ out_bpp[k] = bpc * 2;
+ break;
+ case dml2_n422:
+ out_bpp[k] = bpc * 2;
+ break;
+ case dml2_420:
+ default:
+ out_bpp[k] = bpc * 1.5;
+ break;
+ }
+ } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable) {
+ out_bpp[k] = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.dsc_compressed_bpp_x16 / 16;
+ } else {
+ out_bpp[k] = 0;
+ }
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
+ dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
+ dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
+#endif
+ }
+}
+
+unsigned int dml2_core_utils_round_to_multiple(unsigned int num, unsigned int multiple, bool up)
+{
+ unsigned int remainder;
+
+ if (multiple == 0)
+ return num;
+
+ remainder = num % multiple;
+ if (remainder == 0)
+ return num;
+
+ if (up)
+ return (num + multiple - remainder);
+ else
+ return (num - remainder);
+}
+
+unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info)
+{
+ unsigned int num_active_pipes = 0;
+
+ for (unsigned int k = 0; k < num_planes; k++) {
+ num_active_pipes = num_active_pipes + (unsigned int)cfg_support_info->plane_support_info[k].dpps_used;
+ }
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
+#endif
+ return num_active_pipes;
+}
+
+void dml2_core_utils_pipe_plane_mapping(const struct core_display_cfg_support_info *cfg_support_info, unsigned int *pipe_plane)
+{
+ unsigned int pipe_idx = 0;
+
+ for (unsigned int k = 0; k < DML2_MAX_PLANES; ++k) {
+ pipe_plane[k] = __DML2_CALCS_PIPE_NO_PLANE__;
+ }
+
+ for (unsigned int plane_idx = 0; plane_idx < DML2_MAX_PLANES; plane_idx++) {
+ for (int i = 0; i < cfg_support_info->plane_support_info[plane_idx].dpps_used; i++) {
+ pipe_plane[pipe_idx] = plane_idx;
+ pipe_idx++;
+ }
+ }
+}
+
+bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg)
+{
+ bool is_phantom = false;
+
+ if (plane_cfg->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe ||
+ plane_cfg->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return) {
+ is_phantom = true;
+ }
+
+ return is_phantom;
+}
+
+unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode)
+{
+ switch (sw_mode) {
+ case (dml2_sw_linear):
+ return 256; break;
+ case (dml2_sw_256b_2d):
+ return 256; break;
+ case (dml2_sw_4kb_2d):
+ return 4096; break;
+ case (dml2_sw_64kb_2d):
+ return 65536; break;
+ case (dml2_sw_256kb_2d):
+ return 262144; break;
+ case (dml2_gfx11_sw_linear):
+ return 256; break;
+ case (dml2_gfx11_sw_64kb_d):
+ return 65536; break;
+ case (dml2_gfx11_sw_64kb_d_t):
+ return 65536; break;
+ case (dml2_gfx11_sw_64kb_d_x):
+ return 65536; break;
+ case (dml2_gfx11_sw_64kb_r_x):
+ return 65536; break;
+ case (dml2_gfx11_sw_256kb_d_x):
+ return 262144; break;
+ case (dml2_gfx11_sw_256kb_r_x):
+ return 262144; break;
+ default:
+ DML2_ASSERT(0);
+ return 256;
+ };
+}
+
+
+bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan)
+{
+ bool is_vert = false;
+ if (Scan == dml2_rotation_90 || Scan == dml2_rotation_270) {
+ is_vert = true;
+ } else {
+ is_vert = false;
+ }
+ return is_vert;
+}
+
+
+int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
+{
+ int unsigned version = 0;
+
+ if (sw_mode == dml2_sw_linear ||
+ sw_mode == dml2_sw_256b_2d ||
+ sw_mode == dml2_sw_4kb_2d ||
+ sw_mode == dml2_sw_64kb_2d ||
+ sw_mode == dml2_sw_256kb_2d) {
+ version = 12;
+ } else if (sw_mode == dml2_gfx11_sw_linear ||
+ sw_mode == dml2_gfx11_sw_64kb_d ||
+ sw_mode == dml2_gfx11_sw_64kb_d_t ||
+ sw_mode == dml2_gfx11_sw_64kb_d_x ||
+ sw_mode == dml2_gfx11_sw_64kb_r_x ||
+ sw_mode == dml2_gfx11_sw_256kb_d_x ||
+ sw_mode == dml2_gfx11_sw_256kb_r_x) {
+ version = 11;
+ } else {
+ dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
+ DML2_ASSERT(0);
+ }
+
+ return version;
+}
+
+unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, const struct dml2_dcn4_uclk_dpm_dependent_qos_params *per_uclk_dpm_params)
+{
+ unsigned int i;
+ unsigned int index = 0;
+
+ for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
+ dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
+
+ if (i == 0)
+ index = 0;
+ else
+ index = i - 1;
+
+ if (uclk_freq_khz < per_uclk_dpm_params[i].minimum_uclk_khz ||
+ per_uclk_dpm_params[i].minimum_uclk_khz == 0) {
+ break;
+ }
+ }
+#if defined(__DML_VBA_DEBUG__)
+ dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
+ dml2_printf("DML::%s: index = %d\n", __func__, index);
+#endif
+ return index;
+}
+
+unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
+{
+ unsigned int i;
+ bool clk_entry_found = 0;
+
+ for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
+ dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
+
+ if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
+ clk_entry_found = 1;
+ break;
+ }
+ }
+
+ dml2_assert(clk_entry_found);
+#if defined(__DML_VBA_DEBUG__)
+ dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ dml2_printf("DML::%s: index = %d\n", __func__, i);
+#endif
+ return i;
+}
+
+bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format)
+{
+ bool ret_val = 0;
+
+ if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha))
+ ret_val = 1;
+
+ return ret_val;
+}
+
+unsigned int dml2_core_utils_log_and_substract_if_non_zero(unsigned int a, unsigned int subtrahend)
+{
+ if (a == 0)
+ return 0;
+
+ return (math_log2_approx(a) - subtrahend);
+}
+
+static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters *phantom, const struct dml2_stream_parameters *main,
+ const struct dml2_implicit_svp_meta *meta)
+{
+ memcpy(phantom, main, sizeof(struct dml2_stream_parameters));
+
+ phantom->timing.v_total = meta->v_total;
+ phantom->timing.v_active = meta->v_active;
+ phantom->timing.v_front_porch = meta->v_front_porch;
+ phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active;
+ phantom->timing.drr_config.enabled = false;
+}
+
+static void create_phantom_plane_from_main_plane(struct dml2_plane_parameters *phantom, const struct dml2_plane_parameters *main,
+ const struct dml2_stream_parameters *phantom_stream, int phantom_stream_index, const struct dml2_stream_parameters *main_stream)
+{
+ memcpy(phantom, main, sizeof(struct dml2_plane_parameters));
+
+ phantom->stream_index = phantom_stream_index;
+ phantom->overrides.refresh_from_mall = dml2_refresh_from_mall_mode_override_force_disable;
+ phantom->overrides.legacy_svp_config = dml2_svp_mode_override_phantom_pipe_no_data_return;
+ phantom->composition.viewport.plane0.height = (long int unsigned) math_min2(math_ceil2(
+ (double)main->composition.scaler_info.plane0.v_ratio * (double)phantom_stream->timing.v_active, 16.0),
+ (double)main->composition.viewport.plane0.height);
+ phantom->composition.viewport.plane1.height = (long int unsigned) math_min2(math_ceil2(
+ (double)main->composition.scaler_info.plane1.v_ratio * (double)phantom_stream->timing.v_active, 16.0),
+ (double)main->composition.viewport.plane1.height);
+ phantom->immediate_flip = false;
+ phantom->dynamic_meta_data.enable = false;
+ phantom->cursor.num_cursors = 0;
+ phantom->cursor.cursor_width = 0;
+ phantom->tdlut.setup_for_tdlut = false;
+}
+
+void dml2_core_utils_expand_implict_subvp(const struct display_configuation_with_meta *display_cfg, struct dml2_display_cfg *svp_expanded_display_cfg,
+ struct dml2_core_scratch *scratch)
+{
+ unsigned int stream_index, plane_index;
+ const struct dml2_plane_parameters *main_plane;
+ const struct dml2_stream_parameters *main_stream;
+ const struct dml2_stream_parameters *phantom_stream;
+
+ memcpy(svp_expanded_display_cfg, &display_cfg->display_config, sizeof(struct dml2_display_cfg));
+ memset(scratch->main_stream_index_from_svp_stream_index, 0, sizeof(int) * DML2_MAX_PLANES);
+ memset(scratch->svp_stream_index_from_main_stream_index, 0, sizeof(int) * DML2_MAX_PLANES);
+ memset(scratch->main_plane_index_to_phantom_plane_index, 0, sizeof(int) * DML2_MAX_PLANES);
+
+ if (!display_cfg->display_config.overrides.enable_subvp_implicit_pmo)
+ return;
+
+ /* disable unbounded requesting for all planes until stage 3 has been performed */
+ if (!display_cfg->stage3.performed) {
+ svp_expanded_display_cfg->overrides.hw.force_unbounded_requesting.enable = true;
+ svp_expanded_display_cfg->overrides.hw.force_unbounded_requesting.value = false;
+ }
+ // Create the phantom streams
+ for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
+ main_stream = &display_cfg->display_config.stream_descriptors[stream_index];
+ scratch->main_stream_index_from_svp_stream_index[stream_index] = stream_index;
+ scratch->svp_stream_index_from_main_stream_index[stream_index] = stream_index;
+
+ if (display_cfg->stage3.stream_svp_meta[stream_index].valid) {
+ // Create the phantom stream
+ create_phantom_stream_from_main_stream(&svp_expanded_display_cfg->stream_descriptors[svp_expanded_display_cfg->num_streams],
+ main_stream, &display_cfg->stage3.stream_svp_meta[stream_index]);
+
+ // Associate this phantom stream to the main stream
+ scratch->main_stream_index_from_svp_stream_index[svp_expanded_display_cfg->num_streams] = stream_index;
+ scratch->svp_stream_index_from_main_stream_index[stream_index] = svp_expanded_display_cfg->num_streams;
+
+ // Increment num streams
+ svp_expanded_display_cfg->num_streams++;
+ }
+ }
+
+ // Create the phantom planes
+ for (plane_index = 0; plane_index < display_cfg->display_config.num_planes; plane_index++) {
+ main_plane = &display_cfg->display_config.plane_descriptors[plane_index];
+
+ if (display_cfg->stage3.stream_svp_meta[main_plane->stream_index].valid) {
+ main_stream = &display_cfg->display_config.stream_descriptors[main_plane->stream_index];
+ phantom_stream = &svp_expanded_display_cfg->stream_descriptors[scratch->svp_stream_index_from_main_stream_index[main_plane->stream_index]];
+ create_phantom_plane_from_main_plane(&svp_expanded_display_cfg->plane_descriptors[svp_expanded_display_cfg->num_planes],
+ main_plane, phantom_stream, scratch->svp_stream_index_from_main_stream_index[main_plane->stream_index], main_stream);
+
+ // Associate this phantom plane to the main plane
+ scratch->phantom_plane_index_to_main_plane_index[svp_expanded_display_cfg->num_planes] = plane_index;
+ scratch->main_plane_index_to_phantom_plane_index[plane_index] = svp_expanded_display_cfg->num_planes;
+
+ // Increment num planes
+ svp_expanded_display_cfg->num_planes++;
+
+ // Adjust the main plane settings
+ svp_expanded_display_cfg->plane_descriptors[plane_index].overrides.legacy_svp_config = dml2_svp_mode_override_main_pipe;
+ }
+ }
+}
+
+bool dml2_core_utils_is_stream_encoder_required(const struct dml2_stream_parameters *stream_descriptor)
+{
+ switch (stream_descriptor->output.output_encoder) {
+ case dml2_dp:
+ case dml2_dp2p0:
+ case dml2_edp:
+ case dml2_hdmi:
+ case dml2_hdmifrl:
+ return true;
+ case dml2_none:
+ default:
+ return false;
+ }
+}
+bool dml2_core_utils_is_encoder_dsc_capable(const struct dml2_stream_parameters *stream_descriptor)
+{
+ switch (stream_descriptor->output.output_encoder) {
+ case dml2_dp:
+ case dml2_dp2p0:
+ case dml2_edp:
+ case dml2_hdmifrl:
+ return true;
+ case dml2_hdmi:
+ case dml2_none:
+ default:
+ return false;
+ }
+}
+
+
+bool dml2_core_utils_is_dio_dp_encoder(const struct dml2_stream_parameters *stream_descriptor)
+{
+ switch (stream_descriptor->output.output_encoder) {
+ case dml2_dp:
+ case dml2_edp:
+ return true;
+ case dml2_dp2p0:
+ case dml2_hdmi:
+ case dml2_hdmifrl:
+ case dml2_none:
+ default:
+ return false;
+ }
+}
+
+bool dml2_core_utils_is_hpo_dp_encoder(const struct dml2_stream_parameters *stream_descriptor)
+{
+ switch (stream_descriptor->output.output_encoder) {
+ case dml2_dp2p0:
+ return true;
+ case dml2_dp:
+ case dml2_edp:
+ case dml2_hdmi:
+ case dml2_hdmifrl:
+ case dml2_none:
+ default:
+ return false;
+ }
+}
+
+bool dml2_core_utils_is_dp_encoder(const struct dml2_stream_parameters *stream_descriptor)
+{
+ return dml2_core_utils_is_dio_dp_encoder(stream_descriptor)
+ || dml2_core_utils_is_hpo_dp_encoder(stream_descriptor);
+}
+
+
+bool dml2_core_utils_is_dp_8b_10b_link_rate(enum dml2_output_link_dp_rate rate)
+{
+ switch (rate) {
+ case dml2_dp_rate_hbr:
+ case dml2_dp_rate_hbr2:
+ case dml2_dp_rate_hbr3:
+ return true;
+ case dml2_dp_rate_na:
+ case dml2_dp_rate_uhbr10:
+ case dml2_dp_rate_uhbr13p5:
+ case dml2_dp_rate_uhbr20:
+ default:
+ return false;
+ }
+}
+
+bool dml2_core_utils_is_dp_128b_132b_link_rate(enum dml2_output_link_dp_rate rate)
+{
+ switch (rate) {
+ case dml2_dp_rate_uhbr10:
+ case dml2_dp_rate_uhbr13p5:
+ case dml2_dp_rate_uhbr20:
+ return true;
+ case dml2_dp_rate_hbr:
+ case dml2_dp_rate_hbr2:
+ case dml2_dp_rate_hbr3:
+ case dml2_dp_rate_na:
+ default:
+ return false;
+ }
+}
+
+bool dml2_core_utils_is_odm_split(enum dml2_odm_mode odm_mode)
+{
+ switch (odm_mode) {
+ case dml2_odm_mode_split_1to2:
+ case dml2_odm_mode_mso_1to2:
+ case dml2_odm_mode_mso_1to4:
+ return true;
+ case dml2_odm_mode_auto:
+ case dml2_odm_mode_bypass:
+ case dml2_odm_mode_combine_2to1:
+ case dml2_odm_mode_combine_3to1:
+ case dml2_odm_mode_combine_4to1:
+ default:
+ return false;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
new file mode 100644
index 000000000000..a5cc6a07167a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#ifndef __DML2_CORE_UTILS_H__
+#define __DML2_CORE_UTILS_H__
+#include "dml2_internal_shared_types.h"
+#include "dml2_debug.h"
+#include "lib_float_math.h"
+
+double dml2_core_utils_div_rem(double dividend, unsigned int divisor, unsigned int *remainder);
+const char *dml2_core_utils_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type);
+bool dml2_core_utils_is_420(enum dml2_source_format_class source_format);
+void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only);
+const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type);
+void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg);
+unsigned int dml2_core_utils_round_to_multiple(unsigned int num, unsigned int multiple, bool up);
+unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info);
+void dml2_core_utils_pipe_plane_mapping(const struct core_display_cfg_support_info *cfg_support_info, unsigned int *pipe_plane);
+bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg);
+unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode);
+bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan);
+int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode);
+unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, const struct dml2_dcn4_uclk_dpm_dependent_qos_params *per_uclk_dpm_params);
+unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table);
+bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format);
+unsigned int dml2_core_utils_log_and_substract_if_non_zero(unsigned int a, unsigned int subtrahend);
+void dml2_core_utils_expand_implict_subvp(const struct display_configuation_with_meta *display_cfg, struct dml2_display_cfg *svp_expanded_display_cfg,
+ struct dml2_core_scratch *scratch);
+bool dml2_core_utils_is_stream_encoder_required(const struct dml2_stream_parameters *stream_descriptor);
+bool dml2_core_utils_is_encoder_dsc_capable(const struct dml2_stream_parameters *stream_descriptor);
+bool dml2_core_utils_is_dp_encoder(const struct dml2_stream_parameters *stream_descriptor);
+bool dml2_core_utils_is_dio_dp_encoder(const struct dml2_stream_parameters *stream_descriptor);
+bool dml2_core_utils_is_hpo_dp_encoder(const struct dml2_stream_parameters *stream_descriptor);
+bool dml2_core_utils_is_dp_8b_10b_link_rate(enum dml2_output_link_dp_rate rate);
+bool dml2_core_utils_is_dp_128b_132b_link_rate(enum dml2_output_link_dp_rate rate);
+bool dml2_core_utils_is_odm_split(enum dml2_odm_mode odm_mode);
+
+#endif /* __DML2_CORE_UTILS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index c94c4f32c957..8869ea089312 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_dpmm_dcn4.h"
#include "dml2_internal_shared_types.h"
#include "dml_top_types.h"
@@ -83,9 +82,9 @@ static void calculate_system_active_minimums(struct dml2_dpmm_map_mode_to_soc_dp
get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency);
- in_out->programming->min_clocks.dcn4.active.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
- in_out->programming->min_clocks.dcn4.active.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
- in_out->programming->min_clocks.dcn4.active.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
+ in_out->programming->min_clocks.dcn4x.active.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
+ in_out->programming->min_clocks.dcn4x.active.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
+ in_out->programming->min_clocks.dcn4x.active.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
}
static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
@@ -123,9 +122,9 @@ static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm
get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency);
- in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
- in_out->programming->min_clocks.dcn4.svp_prefetch.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
- in_out->programming->min_clocks.dcn4.svp_prefetch.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
+ in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
+ in_out->programming->min_clocks.dcn4x.svp_prefetch.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
+ in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
}
static void calculate_idle_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
@@ -147,9 +146,9 @@ static void calculate_idle_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_
get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency);
- in_out->programming->min_clocks.dcn4.idle.uclk_khz = dml_round_up(min_uclk_avg > min_uclk_latency ? min_uclk_avg : min_uclk_latency);
- in_out->programming->min_clocks.dcn4.idle.fclk_khz = dml_round_up(min_fclk_avg > min_fclk_latency ? min_fclk_avg : min_fclk_latency);
- in_out->programming->min_clocks.dcn4.idle.dcfclk_khz = dml_round_up(min_dcfclk_avg > min_dcfclk_latency ? min_dcfclk_avg : min_dcfclk_latency);
+ in_out->programming->min_clocks.dcn4x.idle.uclk_khz = dml_round_up(min_uclk_avg > min_uclk_latency ? min_uclk_avg : min_uclk_latency);
+ in_out->programming->min_clocks.dcn4x.idle.fclk_khz = dml_round_up(min_fclk_avg > min_fclk_latency ? min_fclk_avg : min_fclk_latency);
+ in_out->programming->min_clocks.dcn4x.idle.dcfclk_khz = dml_round_up(min_dcfclk_avg > min_dcfclk_latency ? min_dcfclk_avg : min_dcfclk_latency);
}
static bool add_margin_and_round_to_dfs_grainularity(double clock_khz, double margin, unsigned long vco_freq_khz, unsigned long *rounded_khz, uint32_t *divider_id)
@@ -204,6 +203,26 @@ static bool add_margin_and_round_to_dfs_grainularity(double clock_khz, double ma
return true;
}
+static bool round_to_non_dfs_granularity(unsigned long dispclk_khz, unsigned long dpprefclk_khz, unsigned long dtbrefclk_khz,
+ unsigned long *rounded_dispclk_khz, unsigned long *rounded_dpprefclk_khz, unsigned long *rounded_dtbrefclk_khz)
+{
+ unsigned long pll_frequency_khz;
+
+ pll_frequency_khz = (unsigned long) math_max2(600000, math_ceil2(math_max3(dispclk_khz, dpprefclk_khz, dtbrefclk_khz), 1000));
+
+ *rounded_dispclk_khz = pll_frequency_khz / (unsigned long) math_min2(pll_frequency_khz / dispclk_khz, 32);
+
+ *rounded_dpprefclk_khz = pll_frequency_khz / (unsigned long) math_min2(pll_frequency_khz / dpprefclk_khz, 32);
+
+ if (dtbrefclk_khz > 0) {
+ *rounded_dtbrefclk_khz = pll_frequency_khz / (unsigned long) math_min2(pll_frequency_khz / dtbrefclk_khz, 32);
+ } else {
+ *rounded_dtbrefclk_khz = 0;
+ }
+
+ return true;
+}
+
static bool round_up_and_copy_to_next_dpm(unsigned long min_value, unsigned long *rounded_value, const struct dml2_clk_table *clock_table)
{
bool result = false;
@@ -233,25 +252,25 @@ static bool map_soc_min_clocks_to_dpm_fine_grained(struct dml2_display_cfg_progr
{
bool result;
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.active.dcfclk_khz, &state_table->dcfclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.active.dcfclk_khz, &state_table->dcfclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.active.fclk_khz, &state_table->fclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.active.fclk_khz, &state_table->fclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.active.uclk_khz, &state_table->uclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.active.uclk_khz, &state_table->uclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.svp_prefetch.dcfclk_khz, &state_table->dcfclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch.dcfclk_khz, &state_table->dcfclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.svp_prefetch.fclk_khz, &state_table->fclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch.fclk_khz, &state_table->fclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.svp_prefetch.uclk_khz, &state_table->uclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch.uclk_khz, &state_table->uclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.idle.dcfclk_khz, &state_table->dcfclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.idle.dcfclk_khz, &state_table->dcfclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.idle.fclk_khz, &state_table->fclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.idle.fclk_khz, &state_table->fclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.idle.uclk_khz, &state_table->uclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.idle.uclk_khz, &state_table->uclk);
return result;
}
@@ -263,12 +282,12 @@ static bool map_soc_min_clocks_to_dpm_coarse_grained(struct dml2_display_cfg_pro
result = false;
for (index = 0; index < state_table->uclk.num_clk_values; index++) {
- if (display_cfg->min_clocks.dcn4.active.dcfclk_khz <= state_table->dcfclk.clk_values_khz[index] &&
- display_cfg->min_clocks.dcn4.active.fclk_khz <= state_table->fclk.clk_values_khz[index] &&
- display_cfg->min_clocks.dcn4.active.uclk_khz <= state_table->uclk.clk_values_khz[index]) {
- display_cfg->min_clocks.dcn4.active.dcfclk_khz = state_table->dcfclk.clk_values_khz[index];
- display_cfg->min_clocks.dcn4.active.fclk_khz = state_table->fclk.clk_values_khz[index];
- display_cfg->min_clocks.dcn4.active.uclk_khz = state_table->uclk.clk_values_khz[index];
+ if (display_cfg->min_clocks.dcn4x.active.dcfclk_khz <= state_table->dcfclk.clk_values_khz[index] &&
+ display_cfg->min_clocks.dcn4x.active.fclk_khz <= state_table->fclk.clk_values_khz[index] &&
+ display_cfg->min_clocks.dcn4x.active.uclk_khz <= state_table->uclk.clk_values_khz[index]) {
+ display_cfg->min_clocks.dcn4x.active.dcfclk_khz = state_table->dcfclk.clk_values_khz[index];
+ display_cfg->min_clocks.dcn4x.active.fclk_khz = state_table->fclk.clk_values_khz[index];
+ display_cfg->min_clocks.dcn4x.active.uclk_khz = state_table->uclk.clk_values_khz[index];
result = true;
break;
}
@@ -277,12 +296,12 @@ static bool map_soc_min_clocks_to_dpm_coarse_grained(struct dml2_display_cfg_pro
if (result) {
result = false;
for (index = 0; index < state_table->uclk.num_clk_values; index++) {
- if (display_cfg->min_clocks.dcn4.idle.dcfclk_khz <= state_table->dcfclk.clk_values_khz[index] &&
- display_cfg->min_clocks.dcn4.idle.fclk_khz <= state_table->fclk.clk_values_khz[index] &&
- display_cfg->min_clocks.dcn4.idle.uclk_khz <= state_table->uclk.clk_values_khz[index]) {
- display_cfg->min_clocks.dcn4.idle.dcfclk_khz = state_table->dcfclk.clk_values_khz[index];
- display_cfg->min_clocks.dcn4.idle.fclk_khz = state_table->fclk.clk_values_khz[index];
- display_cfg->min_clocks.dcn4.idle.uclk_khz = state_table->uclk.clk_values_khz[index];
+ if (display_cfg->min_clocks.dcn4x.idle.dcfclk_khz <= state_table->dcfclk.clk_values_khz[index] &&
+ display_cfg->min_clocks.dcn4x.idle.fclk_khz <= state_table->fclk.clk_values_khz[index] &&
+ display_cfg->min_clocks.dcn4x.idle.uclk_khz <= state_table->uclk.clk_values_khz[index]) {
+ display_cfg->min_clocks.dcn4x.idle.dcfclk_khz = state_table->dcfclk.clk_values_khz[index];
+ display_cfg->min_clocks.dcn4x.idle.fclk_khz = state_table->fclk.clk_values_khz[index];
+ display_cfg->min_clocks.dcn4x.idle.uclk_khz = state_table->uclk.clk_values_khz[index];
result = true;
break;
}
@@ -290,9 +309,9 @@ static bool map_soc_min_clocks_to_dpm_coarse_grained(struct dml2_display_cfg_pro
}
// SVP is not supported on any coarse grained SoCs
- display_cfg->min_clocks.dcn4.svp_prefetch.dcfclk_khz = 0;
- display_cfg->min_clocks.dcn4.svp_prefetch.fclk_khz = 0;
- display_cfg->min_clocks.dcn4.svp_prefetch.uclk_khz = 0;
+ display_cfg->min_clocks.dcn4x.svp_prefetch.dcfclk_khz = 0;
+ display_cfg->min_clocks.dcn4x.svp_prefetch.fclk_khz = 0;
+ display_cfg->min_clocks.dcn4x.svp_prefetch.uclk_khz = 0;
return result;
}
@@ -325,30 +344,30 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo
result = map_soc_min_clocks_to_dpm_coarse_grained(display_cfg, state_table);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.dispclk_khz, &state_table->dispclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dispclk_khz, &state_table->dispclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.deepsleep_dcfclk_khz, &state_table->dcfclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.deepsleep_dcfclk_khz, &state_table->dcfclk);
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
if (result)
- result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4.dppclk_khz, &state_table->dppclk);
+ result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4x.dppclk_khz, &state_table->dppclk);
}
for (i = 0; i < display_cfg->display_config.num_streams; i++) {
if (result)
- result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dscclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4.dscclk_khz, &state_table->dscclk);
+ result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dscclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4x.dscclk_khz, &state_table->dscclk);
if (result)
- result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dtbclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4.dtbclk_khz, &state_table->dtbclk);
+ result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dtbclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4x.dtbclk_khz, &state_table->dtbclk);
if (result)
- result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].phyclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4.phyclk_khz, &state_table->phyclk);
+ result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].phyclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4x.phyclk_khz, &state_table->phyclk);
}
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.dpprefclk_khz, &state_table->dppclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dpprefclk_khz, &state_table->dppclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.dtbrefclk_khz, &state_table->dtbclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dtbrefclk_khz, &state_table->dtbclk);
return result;
}
@@ -516,15 +535,15 @@ static bool determine_power_management_features_with_fams(struct dml2_dpmm_map_m
static void clamp_uclk_to_max(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
- in_out->programming->min_clocks.dcn4.active.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
- in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
- in_out->programming->min_clocks.dcn4.idle.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
+ in_out->programming->min_clocks.dcn4x.active.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
+ in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
+ in_out->programming->min_clocks.dcn4x.idle.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
}
static void clamp_fclk_to_max(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
- in_out->programming->min_clocks.dcn4.active.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1];
- in_out->programming->min_clocks.dcn4.idle.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1];
+ in_out->programming->min_clocks.dcn4x.active.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1];
+ in_out->programming->min_clocks.dcn4x.idle.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1];
}
static bool map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
@@ -540,14 +559,14 @@ static bool map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_o
// In NV4, there's no support for FCLK or DCFCLK DPM change before SVP prefetch starts, therefore
// active minimums must be boosted to prefetch minimums
- if (in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz > in_out->programming->min_clocks.dcn4.active.uclk_khz)
- in_out->programming->min_clocks.dcn4.active.uclk_khz = in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz;
+ if (in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz > in_out->programming->min_clocks.dcn4x.active.uclk_khz)
+ in_out->programming->min_clocks.dcn4x.active.uclk_khz = in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz;
- if (in_out->programming->min_clocks.dcn4.svp_prefetch.fclk_khz > in_out->programming->min_clocks.dcn4.active.fclk_khz)
- in_out->programming->min_clocks.dcn4.active.fclk_khz = in_out->programming->min_clocks.dcn4.svp_prefetch.fclk_khz;
+ if (in_out->programming->min_clocks.dcn4x.svp_prefetch.fclk_khz > in_out->programming->min_clocks.dcn4x.active.fclk_khz)
+ in_out->programming->min_clocks.dcn4x.active.fclk_khz = in_out->programming->min_clocks.dcn4x.svp_prefetch.fclk_khz;
- if (in_out->programming->min_clocks.dcn4.svp_prefetch.dcfclk_khz > in_out->programming->min_clocks.dcn4.active.dcfclk_khz)
- in_out->programming->min_clocks.dcn4.active.dcfclk_khz = in_out->programming->min_clocks.dcn4.svp_prefetch.dcfclk_khz;
+ if (in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz > in_out->programming->min_clocks.dcn4x.active.dcfclk_khz)
+ in_out->programming->min_clocks.dcn4x.active.dcfclk_khz = in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz;
// need some massaging for the dispclk ramping cases:
dispclk_khz = mode_support_result->global.dispclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0) * (1.0 + in_out->ip->dispclk_ramp_margin_percent / 100.0);
@@ -556,34 +575,42 @@ static bool map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_o
// but still the required dispclk can be more than the maximum dispclk speed:
dispclk_khz = math_max2(dispclk_khz, mode_support_result->global.dispclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0));
- add_margin_and_round_to_dfs_grainularity(dispclk_khz, 0.0,
- (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4.dispclk_khz, &in_out->programming->min_clocks.dcn4.divider_ids.dispclk_did);
-
// DPP Ref is always set to max of all DPP clocks
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
- if (in_out->programming->min_clocks.dcn4.dpprefclk_khz < mode_support_result->per_plane[i].dppclk_khz)
- in_out->programming->min_clocks.dcn4.dpprefclk_khz = mode_support_result->per_plane[i].dppclk_khz;
+ if (in_out->programming->min_clocks.dcn4x.dpprefclk_khz < mode_support_result->per_plane[i].dppclk_khz)
+ in_out->programming->min_clocks.dcn4x.dpprefclk_khz = mode_support_result->per_plane[i].dppclk_khz;
}
+ in_out->programming->min_clocks.dcn4x.dpprefclk_khz = (unsigned long) (in_out->programming->min_clocks.dcn4x.dpprefclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0));
- add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4.dpprefclk_khz, in_out->soc_bb->dcn_downspread_percent / 100.0,
- (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4.dpprefclk_khz, &in_out->programming->min_clocks.dcn4.divider_ids.dpprefclk_did);
-
+ // DTB Ref is always set to max of all DTB clocks
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
- in_out->programming->plane_programming[i].min_clocks.dcn4.dppclk_khz = (unsigned long)(in_out->programming->min_clocks.dcn4.dpprefclk_khz / 255.0
- * math_ceil2(in_out->display_cfg->mode_support_result.per_plane[i].dppclk_khz * (1.0 + in_out->soc_bb->dcn_downspread_percent / 100.0) * 255.0 / in_out->programming->min_clocks.dcn4.dpprefclk_khz, 1.0));
+ if (in_out->programming->min_clocks.dcn4x.dtbrefclk_khz < mode_support_result->per_stream[i].dtbclk_khz)
+ in_out->programming->min_clocks.dcn4x.dtbrefclk_khz = mode_support_result->per_stream[i].dtbclk_khz;
}
+ in_out->programming->min_clocks.dcn4x.dtbrefclk_khz = (unsigned long)(in_out->programming->min_clocks.dcn4x.dtbrefclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0));
- // DTB Ref is always set to max of all DTB clocks
- for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
- if (in_out->programming->min_clocks.dcn4.dtbrefclk_khz < mode_support_result->per_stream[i].dtbclk_khz)
- in_out->programming->min_clocks.dcn4.dtbrefclk_khz = mode_support_result->per_stream[i].dtbclk_khz;
+ if (in_out->soc_bb->no_dfs) {
+ round_to_non_dfs_granularity((unsigned long)dispclk_khz, in_out->programming->min_clocks.dcn4x.dpprefclk_khz, in_out->programming->min_clocks.dcn4x.dtbrefclk_khz,
+ &in_out->programming->min_clocks.dcn4x.dispclk_khz, &in_out->programming->min_clocks.dcn4x.dpprefclk_khz, &in_out->programming->min_clocks.dcn4x.dtbrefclk_khz);
+ } else {
+ add_margin_and_round_to_dfs_grainularity(dispclk_khz, 0.0,
+ (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4x.dispclk_khz, &in_out->programming->min_clocks.dcn4x.divider_ids.dispclk_did);
+
+ add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4x.dpprefclk_khz, 0.0,
+ (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4x.dpprefclk_khz, &in_out->programming->min_clocks.dcn4x.divider_ids.dpprefclk_did);
+
+ add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4x.dtbrefclk_khz, 0.0,
+ (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4x.dtbrefclk_khz, &in_out->programming->min_clocks.dcn4x.divider_ids.dtbrefclk_did);
}
- add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4.dtbrefclk_khz, in_out->soc_bb->dcn_downspread_percent / 100.0,
- (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4.dtbrefclk_khz, &in_out->programming->min_clocks.dcn4.divider_ids.dtbrefclk_did);
- in_out->programming->min_clocks.dcn4.deepsleep_dcfclk_khz = mode_support_result->global.dcfclk_deepsleep_khz;
- in_out->programming->min_clocks.dcn4.socclk_khz = mode_support_result->global.socclk_khz;
+ for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
+ in_out->programming->plane_programming[i].min_clocks.dcn4x.dppclk_khz = (unsigned long)(in_out->programming->min_clocks.dcn4x.dpprefclk_khz / 255.0
+ * math_ceil2(in_out->display_cfg->mode_support_result.per_plane[i].dppclk_khz * (1.0 + in_out->soc_bb->dcn_downspread_percent / 100.0) * 255.0 / in_out->programming->min_clocks.dcn4x.dpprefclk_khz, 1.0));
+ }
+
+ in_out->programming->min_clocks.dcn4x.deepsleep_dcfclk_khz = mode_support_result->global.dcfclk_deepsleep_khz;
+ in_out->programming->min_clocks.dcn4x.socclk_khz = mode_support_result->global.socclk_khz;
result = map_min_clocks_to_dpm(mode_support_result, in_out->programming, &in_out->soc_bb->clk_table);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
index 3afb69dfd040..b165c58dfd11 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_DPMM_DCN4_H__
#define __DML2_DPMM_DCN4_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
index 2c983daf2dad..3861bc6c9621 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_dpmm_factory.h"
#include "dml2_dpmm_dcn4.h"
#include "dml2_external_lib_deps.h"
@@ -21,7 +20,7 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance
{
bool result = false;
- if (!out)
+ if (out == 0)
return false;
memset(out, 0, sizeof(struct dml2_dpmm_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
index 80b44b4c2e68..20ba2e446f1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_DPMM_FACTORY_H__
#define __DML2_DPMM_FACTORY_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
index 5d8887ac766d..f4b1a7d02d42 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_mcg_dcn4.h"
#include "dml_top_soc_parameter_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
index 19d178651435..02da6f45cbf7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_MCG_DCN4_H__
#define __DML2_MCG_DCN4_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
index 55085b85f8ed..c60b8fe90819 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_mcg_factory.h"
#include "dml2_mcg_dcn4.h"
#include "dml2_external_lib_deps.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h
index 5dfdfed04e22..ad307deca3b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_MCG_FACTORY_H__
#define __DML2_MCG_FACTORY_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
index 671f9ac2627c..a31db5742675 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
@@ -2,22 +2,17 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_pmo_factory.h"
#include "dml2_pmo_dcn3.h"
static void sort(double *list_a, int list_a_size)
{
- double temp;
// For all elements b[i] in list_b[]
for (int i = 0; i < list_a_size - 1; i++) {
// Find the first element of list_a that's larger than b[i]
for (int j = i; j < list_a_size - 1; j++) {
- if (list_a[j] > list_a[j + 1]) {
- temp = list_a[j];
- list_a[j] = list_a[j + 1];
- list_a[j + 1] = temp;
- }
+ if (list_a[j] > list_a[j + 1])
+ swap(list_a[j], list_a[j + 1]);
}
}
}
@@ -502,7 +497,6 @@ bool pmo_dcn3_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in
in_out->cfg_support_info->plane_support_info[i].dpps_used)) {
result = false;
} else {
- free_pipes -= planes_on_stream;
break;
}
} else {
@@ -671,7 +665,7 @@ bool pmo_dcn3_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_su
struct dml2_pmo_instance *pmo = in_out->instance;
unsigned int stream_index;
bool success = false;
- bool reached_end = true;
+ bool reached_end;
memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
index cc350f88d4d2..f00bd9e72a86 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_PMO_DCN3_H__
#define __DML2_PMO_DCN3_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.c
deleted file mode 100644
index 8952dd7e36cb..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.c
+++ /dev/null
@@ -1,1250 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-
-#include "dml2_pmo_factory.h"
-#include "dml2_pmo_dcn4.h"
-
-static const int MIN_VACTIVE_MARGIN_US = 100; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding
-static const int SUBVP_DRR_MARGIN_US = 100;
-
-static const enum dml2_pmo_pstate_strategy full_strategy_list_1_display[][4] = {
- // VActive Preferred
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then SVP
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then VBlank
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Finally DRR
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-};
-
-static const int full_strategy_list_1_display_size = sizeof(full_strategy_list_1_display) / (sizeof(enum dml2_pmo_pstate_strategy) * 4);
-
-static const enum dml2_pmo_pstate_strategy full_strategy_list_2_display[][4] = {
- // VActive only is preferred
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then VActive + VBlank
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then VBlank only
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then SVP + VBlank
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then SVP + SVP
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Finally DRR + DRR
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-};
-
-static const int full_strategy_list_2_display_size = sizeof(full_strategy_list_2_display) / (sizeof(enum dml2_pmo_pstate_strategy) * 4);
-
-static const enum dml2_pmo_pstate_strategy full_strategy_list_3_display[][4] = {
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // All VActive
-
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // VActive + 1 VBlank
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na },
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
-
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // VActive + 2 VBlank
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
-// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
-
-// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, // VActive + 3 VBlank
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na },
-
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, // All VBlank
-
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na }, // All DRR
-};
-
-static const int full_strategy_list_3_display_size = sizeof(full_strategy_list_3_display) / (sizeof(enum dml2_pmo_pstate_strategy) * 4);
-
-static const enum dml2_pmo_pstate_strategy full_strategy_list_4_display[][4] = {
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, // All VActive
-
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, // VActive + 1 VBlank
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive },
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive },
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank },
-
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, // VActive + 2 VBlank
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive },
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank },
-// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive },
-// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank },
-// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
-
-// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // VActive + 3 VBlank
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank },
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive },
-
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // All Vblank
-
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr }, // All DRR
-};
-
-static const int full_strategy_list_4_display_size = sizeof(full_strategy_list_4_display) / (sizeof(enum dml2_pmo_pstate_strategy) * 4);
-
-static bool increase_odm_combine_factor(enum dml2_odm_mode *odm_mode, int odms_calculated)
-{
- bool result = true;
-
- if (*odm_mode == dml2_odm_mode_auto) {
- switch (odms_calculated) {
- case 1:
- *odm_mode = dml2_odm_mode_bypass;
- break;
- case 2:
- *odm_mode = dml2_odm_mode_combine_2to1;
- break;
- case 3:
- *odm_mode = dml2_odm_mode_combine_3to1;
- break;
- case 4:
- *odm_mode = dml2_odm_mode_combine_4to1;
- break;
- default:
- result = false;
- break;
- }
- }
-
- if (result) {
- if (*odm_mode == dml2_odm_mode_bypass) {
- *odm_mode = dml2_odm_mode_combine_2to1;
- } else if (*odm_mode == dml2_odm_mode_combine_2to1) {
- *odm_mode = dml2_odm_mode_combine_3to1;
- } else if (*odm_mode == dml2_odm_mode_combine_3to1) {
- *odm_mode = dml2_odm_mode_combine_4to1;
- } else {
- result = false;
- }
- }
-
- return result;
-}
-
-static bool increase_mpc_combine_factor(unsigned int *mpc_combine_factor, unsigned int limit)
-{
- if (*mpc_combine_factor < limit) {
- (*mpc_combine_factor)++;
- return true;
- }
-
- return false;
-}
-
-static int count_planes_with_stream_index(const struct dml2_display_cfg *display_cfg, unsigned int stream_index)
-{
- unsigned int i;
- int count;
-
- count = 0;
- for (i = 0; i < display_cfg->num_planes; i++) {
- if (display_cfg->plane_descriptors[i].stream_index == stream_index)
- count++;
- }
-
- return count;
-}
-
-static bool optimize_dcc_mcache_no_odm(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out,
- int free_pipes)
-{
- struct dml2_pmo_instance *pmo = in_out->instance;
-
- unsigned int i;
- bool result = true;
-
- for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
- // For pipes that failed dcc mcache check, we want to increase the pipe count.
- // The logic for doing this depends on how many pipes is already being used,
- // and whether it's mpcc or odm combine.
- if (!in_out->dcc_mcache_supported[i]) {
- // For the general case of "n displays", we can only optimize streams with an ODM combine factor of 1
- if (in_out->cfg_support_info->stream_support_info[in_out->optimized_display_cfg->plane_descriptors[i].stream_index].odms_used == 1) {
- in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor =
- in_out->cfg_support_info->plane_support_info[i].dpps_used;
- // For each plane that is not passing mcache validation, just add another pipe to it, up to the limit.
- if (free_pipes > 0) {
- if (!increase_mpc_combine_factor(&in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor,
- pmo->mpc_combine_limit)) {
- // We've reached max pipes allocatable to a single plane, so we fail.
- result = false;
- break;
- } else {
- // Successfully added another pipe to this failing plane.
- free_pipes--;
- }
- } else {
- // No free pipes to add.
- result = false;
- break;
- }
- } else {
- // If the stream of this plane needs ODM combine, no further optimization can be done.
- result = false;
- break;
- }
- }
- }
-
- return result;
-}
-
-bool pmo_dcn4_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out)
-{
- struct dml2_pmo_instance *pmo = in_out->instance;
-
- unsigned int i, used_pipes, free_pipes, planes_on_stream;
- bool result;
-
- if (in_out->display_config != in_out->optimized_display_cfg) {
- memcpy(in_out->optimized_display_cfg, in_out->display_config, sizeof(struct dml2_display_cfg));
- }
-
- //Count number of free pipes, and check if any odm combine is in use.
- used_pipes = 0;
- for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
- used_pipes += in_out->cfg_support_info->plane_support_info[i].dpps_used;
- }
- free_pipes = pmo->ip_caps->pipe_count - used_pipes;
-
- // Optimization loop
- // The goal here is to add more pipes to any planes
- // which are failing mcache admissibility
- result = true;
-
- // The optimization logic depends on whether ODM combine is enabled, and the stream count.
- if (in_out->optimized_display_cfg->num_streams > 1) {
- // If there are multiple streams, we are limited to only be able to optimize mcache failures on planes
- // which are not ODM combined.
-
- result = optimize_dcc_mcache_no_odm(in_out, free_pipes);
- } else if (in_out->optimized_display_cfg->num_streams == 1) {
- // In single stream cases, we still optimize mcache failures when there's ODM combine with some
- // additional logic.
-
- if (in_out->cfg_support_info->stream_support_info[0].odms_used > 1) {
- // If ODM combine is enabled, then the logic is to increase ODM combine factor.
-
- // Optimization for streams with > 1 ODM combine factor is only supported for single display.
- planes_on_stream = count_planes_with_stream_index(in_out->optimized_display_cfg, 0);
-
- for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
- // For pipes that failed dcc mcache check, we want to increase the pipe count.
- // The logic for doing this depends on how many pipes is already being used,
- // and whether it's mpcc or odm combine.
- if (!in_out->dcc_mcache_supported[i]) {
- // Increasing ODM combine factor on a stream requires a free pipe for each plane on the stream.
- if (free_pipes >= planes_on_stream) {
- if (!increase_odm_combine_factor(&in_out->optimized_display_cfg->stream_descriptors[i].overrides.odm_mode,
- in_out->cfg_support_info->plane_support_info[i].dpps_used)) {
- result = false;
- } else {
- free_pipes -= planes_on_stream;
- break;
- }
- } else {
- result = false;
- break;
- }
- }
- }
- } else {
- // If ODM combine is not enabled, then we can actually use the same logic as before.
-
- result = optimize_dcc_mcache_no_odm(in_out, free_pipes);
- }
- } else {
- result = true;
- }
-
- return result;
-}
-
-bool pmo_dcn4_initialize(struct dml2_pmo_initialize_in_out *in_out)
-{
- struct dml2_pmo_instance *pmo = in_out->instance;
-
- pmo->soc_bb = in_out->soc_bb;
- pmo->ip_caps = in_out->ip_caps;
- pmo->mpc_combine_limit = 2;
- pmo->odm_combine_limit = 4;
- pmo->mcg_clock_table_size = in_out->mcg_clock_table_size;
-
- pmo->fams_params.v1.subvp.fw_processing_delay_us = 10;
- pmo->fams_params.v1.subvp.prefetch_end_to_mall_start_us = 50;
- pmo->fams_params.v1.subvp.refresh_rate_limit_max = 175;
- pmo->fams_params.v1.subvp.refresh_rate_limit_min = 0;
-
- pmo->options = in_out->options;
-
- return true;
-}
-
-static bool is_h_timing_divisible_by(const struct dml2_timing_cfg *timing, unsigned char denominator)
-{
- /*
- * Htotal, Hblank start/end, and Hsync start/end all must be divisible
- * in order for the horizontal timing params to be considered divisible
- * by 2. Hsync start is always 0.
- */
- unsigned long h_blank_start = timing->h_total - timing->h_front_porch;
-
- return (timing->h_total % denominator == 0) &&
- (h_blank_start % denominator == 0) &&
- (timing->h_blank_end % denominator == 0) &&
- (timing->h_sync_width % denominator == 0);
-}
-
-static bool is_dp_encoder(enum dml2_output_encoder_class encoder_type)
-{
- switch (encoder_type) {
- case dml2_dp:
- case dml2_edp:
- case dml2_dp2p0:
- case dml2_none:
- return true;
- case dml2_hdmi:
- case dml2_hdmifrl:
- default:
- return false;
- }
-}
-
-bool pmo_dcn4_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out)
-{
- unsigned int i;
- const struct dml2_display_cfg *display_config =
- &in_out->base_display_config->display_config;
- const struct dml2_core_mode_support_result *mode_support_result =
- &in_out->base_display_config->mode_support_result;
-
- if (in_out->instance->options->disable_dyn_odm ||
- (in_out->instance->options->disable_dyn_odm_for_multi_stream && display_config->num_streams > 1))
- return false;
-
- for (i = 0; i < display_config->num_planes; i++)
- /*
- * vmin optimization is required to be seamlessly switched off
- * at any time when the new configuration is no longer
- * supported. However switching from ODM combine to MPC combine
- * is not always seamless. When there not enough free pipes, we
- * will have to use the same secondary OPP heads as secondary
- * DPP pipes in MPC combine in new state. This transition is
- * expected to cause glitches. To avoid the transition, we only
- * allow vmin optimization if the stream's base configuration
- * doesn't require MPC combine. This condition checks if MPC
- * combine is enabled. If so do not optimize the stream.
- */
- if (mode_support_result->cfg_support_info.plane_support_info[i].dpps_used > 1 &&
- mode_support_result->cfg_support_info.stream_support_info[display_config->plane_descriptors[i].stream_index].odms_used == 1)
- in_out->base_display_config->stage4.unoptimizable_streams[display_config->plane_descriptors[i].stream_index] = true;
-
- for (i = 0; i < display_config->num_streams; i++) {
- if (display_config->stream_descriptors[i].overrides.disable_dynamic_odm)
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
- else if (in_out->base_display_config->stage3.stream_svp_meta[i].valid &&
- in_out->instance->options->disable_dyn_odm_for_stream_with_svp)
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
- /*
- * ODM Combine requires horizontal timing divisible by 2 so each
- * ODM segment has the same size.
- */
- else if (!is_h_timing_divisible_by(&display_config->stream_descriptors[i].timing, 2))
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
- /*
- * Our hardware support seamless ODM transitions for DP encoders
- * only.
- */
- else if (!is_dp_encoder(display_config->stream_descriptors[i].output.output_encoder))
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
- }
-
- return true;
-}
-
-bool pmo_dcn4_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out *in_out)
-{
- bool is_vmin = true;
-
- if (in_out->vmin_limits->dispclk_khz > 0 &&
- in_out->display_config->mode_support_result.global.dispclk_khz > in_out->vmin_limits->dispclk_khz)
- is_vmin = false;
-
- return is_vmin;
-}
-
-static int find_highest_odm_load_stream_index(
- const struct dml2_display_cfg *display_config,
- const struct dml2_core_mode_support_result *mode_support_result)
-{
- unsigned int i;
- int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
-
- for (i = 0; i < display_config->num_streams; i++) {
- odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
- / mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
- if (odm_load > highest_odm_load) {
- highest_odm_load_index = i;
- highest_odm_load = odm_load;
- }
- }
-
- return highest_odm_load_index;
-}
-
-bool pmo_dcn4_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out *in_out)
-{
- int stream_index;
- const struct dml2_display_cfg *display_config =
- &in_out->base_display_config->display_config;
- const struct dml2_core_mode_support_result *mode_support_result =
- &in_out->base_display_config->mode_support_result;
- unsigned int odms_used;
- struct dml2_stream_parameters *stream_descriptor;
- bool optimizable = false;
-
- /*
- * highest odm load stream must be optimizable to continue as dispclk is
- * bounded by it.
- */
- stream_index = find_highest_odm_load_stream_index(display_config,
- mode_support_result);
-
- if (stream_index < 0 ||
- in_out->base_display_config->stage4.unoptimizable_streams[stream_index])
- return false;
-
- odms_used = mode_support_result->cfg_support_info.stream_support_info[stream_index].odms_used;
- if ((int)odms_used >= in_out->instance->odm_combine_limit)
- return false;
-
- memcpy(in_out->optimized_display_config,
- in_out->base_display_config,
- sizeof(struct display_configuation_with_meta));
-
- stream_descriptor = &in_out->optimized_display_config->display_config.stream_descriptors[stream_index];
- while (!optimizable && increase_odm_combine_factor(
- &stream_descriptor->overrides.odm_mode,
- odms_used)) {
- switch (stream_descriptor->overrides.odm_mode) {
- case dml2_odm_mode_combine_2to1:
- optimizable = true;
- break;
- case dml2_odm_mode_combine_3to1:
- /*
- * In ODM Combine 3:1 OTG_valid_pixel rate is 1/4 of
- * actual pixel rate. Therefore horizontal timing must
- * be divisible by 4.
- */
- if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) {
- if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) {
- /*
- * DSC h slice count must be divisible
- * by 3.
- */
- if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 3 == 0)
- optimizable = true;
- } else {
- optimizable = true;
- }
- }
- break;
- case dml2_odm_mode_combine_4to1:
- /*
- * In ODM Combine 4:1 OTG_valid_pixel rate is 1/4 of
- * actual pixel rate. Therefore horizontal timing must
- * be divisible by 4.
- */
- if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) {
- if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) {
- /*
- * DSC h slice count must be divisible
- * by 4.
- */
- if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 4 == 0)
- optimizable = true;
- } else {
- optimizable = true;
- }
- }
- break;
- case dml2_odm_mode_auto:
- case dml2_odm_mode_bypass:
- case dml2_odm_mode_split_1to2:
- case dml2_odm_mode_mso_1to2:
- case dml2_odm_mode_mso_1to4:
- default:
- break;
- }
- }
-
- return optimizable;
-}
-
-static bool are_timings_trivially_synchronizable(const struct display_configuation_with_meta *display_config, int mask)
-{
- unsigned char i;
- bool identical = true;
- bool contains_drr = false;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
-
- // Create a remap array to enable simple iteration through only masked stream indicies
- for (i = 0; i < display_config->display_config.num_streams; i++) {
- if (mask & (0x1 << i)) {
- remap_array[remap_array_size++] = i;
- }
- }
-
- // 0 or 1 display is always trivially synchronizable
- if (remap_array_size <= 1)
- return true;
-
- for (i = 1; i < remap_array_size; i++) {
- if (memcmp(&display_config->display_config.stream_descriptors[remap_array[i - 1]].timing,
- &display_config->display_config.stream_descriptors[remap_array[i]].timing,
- sizeof(struct dml2_timing_cfg))) {
- identical = false;
- break;
- }
- }
-
- for (i = 0; i < remap_array_size; i++) {
- if (display_config->display_config.stream_descriptors[remap_array[i]].timing.drr_config.enabled) {
- contains_drr = true;
- break;
- }
- }
-
- return !contains_drr && identical;
-}
-
-static void set_bit_in_bitfield(unsigned int *bit_field, unsigned int bit_offset)
-{
- *bit_field = *bit_field | (0x1 << bit_offset);
-}
-
-static bool is_bit_set_in_bitfield(unsigned int bit_field, unsigned int bit_offset)
-{
- if (bit_field & (0x1 << bit_offset))
- return true;
-
- return false;
-}
-
-static bool are_all_timings_drr_enabled(const struct display_configuation_with_meta *display_config, int mask)
-{
- unsigned char i;
- for (i = 0; i < DML2_MAX_PLANES; i++) {
- if (is_bit_set_in_bitfield(mask, i)) {
- if (!display_config->display_config.stream_descriptors[i].timing.drr_config.enabled)
- return false;
- }
- }
-
- return true;
-}
-
-static void insert_into_candidate_list(const enum dml2_pmo_pstate_strategy *per_stream_pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch)
-{
- int stream_index;
-
- scratch->pmo_dcn4.allow_state_increase_for_strategy[scratch->pmo_dcn4.num_pstate_candidates] = true;
-
- for (stream_index = 0; stream_index < stream_count; stream_index++) {
- scratch->pmo_dcn4.per_stream_pstate_strategy[scratch->pmo_dcn4.num_pstate_candidates][stream_index] = per_stream_pstate_strategy[stream_index];
-
- if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vblank)
- scratch->pmo_dcn4.allow_state_increase_for_strategy[scratch->pmo_dcn4.num_pstate_candidates] = false;
- }
-
- scratch->pmo_dcn4.num_pstate_candidates++;
-}
-
-static bool all_planes_match_strategy(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pmo_pstate_strategy strategy)
-{
- unsigned char i;
- enum dml2_uclk_pstate_change_strategy matching_strategy = (enum dml2_uclk_pstate_change_strategy) dml2_pmo_pstate_strategy_na;
-
- if (strategy == dml2_pmo_pstate_strategy_vactive)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_vactive;
- else if (strategy == dml2_pmo_pstate_strategy_vblank)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_vblank;
- else if (strategy == dml2_pmo_pstate_strategy_fw_svp)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp;
- else if (strategy == dml2_pmo_pstate_strategy_fw_drr)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_drr;
-
- for (i = 0; i < DML2_MAX_PLANES; i++) {
- if (is_bit_set_in_bitfield(plane_mask, i)) {
- if (display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto &&
- display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != matching_strategy)
- return false;
- }
- }
-
- return true;
-}
-
-static bool subvp_subvp_schedulable(struct dml2_pmo_instance *pmo, const struct display_configuation_with_meta *display_cfg,
- unsigned char *svp_stream_indicies, char svp_stream_count)
-{
- struct dml2_pmo_scratch *s = &pmo->scratch;
- int i;
- int microschedule_lines, time_us, refresh_hz;
- int max_microschedule_us = 0;
- int vactive1_us, vactive2_us, vblank1_us, vblank2_us;
-
- const struct dml2_timing_cfg *svp_timing1 = 0;
- const struct dml2_implicit_svp_meta *svp_meta1 = 0;
-
- const struct dml2_timing_cfg *svp_timing2 = 0;
-
- if (svp_stream_count <= 1)
- return true;
- else if (svp_stream_count > 2)
- return false;
-
- /* Loop to calculate the maximum microschedule time between the two SubVP pipes,
- * and also to store the two main SubVP pipe pointers in subvp_pipes[2].
- */
- for (i = 0; i < svp_stream_count; i++) {
- svp_timing1 = &display_cfg->display_config.stream_descriptors[svp_stream_indicies[i]].timing;
- svp_meta1 = &s->pmo_dcn4.stream_svp_meta[svp_stream_indicies[i]];
-
- microschedule_lines = svp_meta1->v_active;
-
- // Round up when calculating microschedule time (+ 1 at the end)
- time_us = (int)((microschedule_lines * svp_timing1->h_total) / (double)(svp_timing1->pixel_clock_khz * 1000) * 1000000 +
- pmo->fams_params.v1.subvp.prefetch_end_to_mall_start_us + pmo->fams_params.v1.subvp.fw_processing_delay_us + 1);
-
- if (time_us > max_microschedule_us)
- max_microschedule_us = time_us;
-
- refresh_hz = (int)((double)(svp_timing1->pixel_clock_khz * 1000) / (svp_timing1->v_total * svp_timing1->h_total));
-
- if (refresh_hz < pmo->fams_params.v1.subvp.refresh_rate_limit_min ||
- refresh_hz > pmo->fams_params.v1.subvp.refresh_rate_limit_max) {
- return false;
- }
- }
-
- svp_timing1 = &display_cfg->display_config.stream_descriptors[svp_stream_indicies[0]].timing;
- svp_meta1 = &s->pmo_dcn4.stream_svp_meta[svp_stream_indicies[0]];
-
- vactive1_us = (int)((svp_timing1->v_active * svp_timing1->h_total) / (double)(svp_timing1->pixel_clock_khz * 1000) * 1000000);
-
- vblank1_us = (int)(((svp_timing1->v_total - svp_timing1->v_active) * svp_timing1->h_total) / (double)(svp_timing1->pixel_clock_khz * 1000) * 1000000);
-
- svp_timing2 = &display_cfg->display_config.stream_descriptors[svp_stream_indicies[1]].timing;
-
- vactive2_us = (int)((svp_timing2->v_active * svp_timing2->h_total) / (double)(svp_timing2->pixel_clock_khz * 1000) * 1000000);
-
- vblank2_us = (int)(((svp_timing2->v_total - svp_timing2->v_active) * svp_timing2->h_total) / (double)(svp_timing2->pixel_clock_khz * 1000) * 1000000);
-
- if ((vactive1_us - vblank2_us) / 2 > max_microschedule_us &&
- (vactive2_us - vblank1_us) / 2 > max_microschedule_us)
- return true;
-
- return false;
-}
-
-static bool validate_svp_cofunctionality(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg, int svp_stream_mask)
-{
- bool result = false;
- unsigned char stream_index;
-
- unsigned char svp_stream_indicies[2] = { 0 };
- unsigned char svp_stream_count = 0;
-
- // Find the SVP streams, store only the first 2, but count all of them
- for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
- if (is_bit_set_in_bitfield(svp_stream_mask, stream_index)) {
- if (svp_stream_count < 2)
- svp_stream_indicies[svp_stream_count] = stream_index;
-
- svp_stream_count++;
- }
- }
-
- if (svp_stream_count == 1) {
- result = true; // 1 SVP is always co_functional
- } else if (svp_stream_count == 2) {
- result = subvp_subvp_schedulable(pmo, display_cfg, svp_stream_indicies, svp_stream_count);
- }
-
- return result;
-}
-
-static bool validate_drr_cofunctionality(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg, int drr_stream_mask)
-{
- unsigned char stream_index;
- int drr_stream_count = 0;
-
- // Find the SVP streams and count all of them
- for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
- if (is_bit_set_in_bitfield(drr_stream_mask, stream_index)) {
- drr_stream_count++;
- }
- }
-
- return drr_stream_count <= 4;
-}
-
-static bool validate_svp_drr_cofunctionality(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg, int svp_stream_mask, int drr_stream_mask)
-{
- unsigned char stream_index;
- int drr_stream_count = 0;
- int svp_stream_count = 0;
-
- int prefetch_us = 0;
- int mall_region_us = 0;
- int drr_frame_us = 0; // nominal frame time
- int subvp_active_us = 0;
- int stretched_drr_us = 0;
- int drr_stretched_vblank_us = 0;
- int max_vblank_mallregion = 0;
-
- const struct dml2_timing_cfg *svp_timing = 0;
- const struct dml2_timing_cfg *drr_timing = 0;
- const struct dml2_implicit_svp_meta *svp_meta = 0;
-
- bool schedulable = false;
-
- // Find the SVP streams and count all of them
- for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
- if (is_bit_set_in_bitfield(svp_stream_mask, stream_index)) {
- svp_timing = &display_cfg->display_config.stream_descriptors[stream_index].timing;
- svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index];
- svp_stream_count++;
- }
- if (is_bit_set_in_bitfield(drr_stream_mask, stream_index)) {
- drr_timing = &display_cfg->display_config.stream_descriptors[stream_index].timing;
- drr_stream_count++;
- }
- }
-
- if (svp_stream_count == 1 && drr_stream_count == 1 && svp_timing != drr_timing) {
- prefetch_us = (int)((svp_meta->v_total - svp_meta->v_front_porch)
- * svp_timing->h_total / (double)(svp_timing->pixel_clock_khz * 1000) * 1000000 +
- pmo->fams_params.v1.subvp.prefetch_end_to_mall_start_us);
-
- subvp_active_us = (int)(svp_timing->v_active * svp_timing->h_total /
- (double)(svp_timing->pixel_clock_khz * 1000) * 1000000);
-
- drr_frame_us = (int)(drr_timing->v_total * drr_timing->h_total /
- (double)(drr_timing->pixel_clock_khz * 1000) * 1000000);
-
- // P-State allow width and FW delays already included phantom_timing->v_addressable
- mall_region_us = (int)(svp_meta->v_active * svp_timing->h_total /
- (double)(svp_timing->pixel_clock_khz * 1000) * 1000000);
-
- stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
-
- drr_stretched_vblank_us = (int)((drr_timing->v_total - drr_timing->v_active) * drr_timing->h_total /
- (double)(drr_timing->pixel_clock_khz * 1000) * 1000000 + (stretched_drr_us - drr_frame_us));
-
- max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
-
- /* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the
- * highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis
- * for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
- * and the max of (VBLANK blanking time, MALL region)).
- */
- if (stretched_drr_us < (1 / (double)drr_timing->drr_config.min_refresh_uhz) * 1000000 * 1000000 &&
- subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
- schedulable = true;
- }
-
- return schedulable;
-}
-
-static bool validate_svp_vblank_cofunctionality(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg, int svp_stream_mask, int vblank_stream_mask)
-{
- unsigned char stream_index;
- int vblank_stream_count = 0;
- int svp_stream_count = 0;
-
- const struct dml2_timing_cfg *svp_timing = 0;
- const struct dml2_timing_cfg *vblank_timing = 0;
- const struct dml2_implicit_svp_meta *svp_meta = 0;
-
- int prefetch_us = 0;
- int mall_region_us = 0;
- int vblank_frame_us = 0;
- int subvp_active_us = 0;
- int vblank_blank_us = 0;
- int max_vblank_mallregion = 0;
-
- bool schedulable = false;
-
- // Find the SVP streams and count all of them
- for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
- if (is_bit_set_in_bitfield(svp_stream_mask, stream_index)) {
- svp_timing = &display_cfg->display_config.stream_descriptors[stream_index].timing;
- svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index];
- svp_stream_count++;
- }
- if (is_bit_set_in_bitfield(vblank_stream_mask, stream_index)) {
- vblank_timing = &display_cfg->display_config.stream_descriptors[stream_index].timing;
- vblank_stream_count++;
- }
- }
-
- if (svp_stream_count == 1 && vblank_stream_count > 0) {
- // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
- // Also include the prefetch end to mallstart delay time
- prefetch_us = (int)((svp_meta->v_total - svp_meta->v_front_porch) * svp_timing->h_total
- / (double)(svp_timing->pixel_clock_khz * 1000) * 1000000 +
- pmo->fams_params.v1.subvp.prefetch_end_to_mall_start_us);
-
- // P-State allow width and FW delays already included phantom_timing->v_addressable
- mall_region_us = (int)(svp_meta->v_active * svp_timing->h_total /
- (double)(svp_timing->pixel_clock_khz * 1000) * 1000000);
-
- vblank_frame_us = (int)(vblank_timing->v_total * vblank_timing->h_total /
- (double)(vblank_timing->pixel_clock_khz * 1000) * 1000000);
-
- vblank_blank_us = (int)((vblank_timing->v_total - vblank_timing->v_active) * vblank_timing->h_total /
- (double)(vblank_timing->pixel_clock_khz * 1000) * 1000000);
-
- subvp_active_us = (int)(svp_timing->v_active * svp_timing->h_total /
- (double)(svp_timing->pixel_clock_khz * 1000) * 1000000);
-
- max_vblank_mallregion = vblank_blank_us > mall_region_us ? vblank_blank_us : mall_region_us;
-
- // Schedulable if VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
- // and the max of (VBLANK blanking time, MALL region)
- // TODO: Possibly add some margin (i.e. the below conditions should be [...] > X instead of [...] > 0)
- if (subvp_active_us - prefetch_us - vblank_frame_us - max_vblank_mallregion > 0)
- schedulable = true;
- }
- return schedulable;
-}
-
-static bool validate_drr_vblank_cofunctionality(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg, int drr_stream_mask, int vblank_stream_mask)
-{
- return false;
-}
-
-static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg, const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[4])
-{
- struct dml2_pmo_scratch *s = &pmo->scratch;
-
- unsigned char stream_index = 0;
-
- unsigned int svp_count = 0;
- unsigned int svp_stream_mask = 0;
- unsigned int drr_count = 0;
- unsigned int drr_stream_mask = 0;
- unsigned int vactive_count = 0;
- unsigned int vactive_stream_mask = 0;
- unsigned int vblank_count = 0;
- unsigned int vblank_stream_mask = 0;
-
- bool strategy_matches_forced_requirements = true;
-
- bool admissible = false;
-
- // Tabulate everything
- for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
-
- if (!all_planes_match_strategy(display_cfg, s->pmo_dcn4.stream_plane_mask[stream_index],
- per_stream_pstate_strategy[stream_index])) {
- strategy_matches_forced_requirements = false;
- break;
- }
-
- if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_svp) {
- svp_count++;
- set_bit_in_bitfield(&svp_stream_mask, stream_index);
- } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
- drr_count++;
- set_bit_in_bitfield(&drr_stream_mask, stream_index);
- } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vactive) {
- vactive_count++;
- set_bit_in_bitfield(&vactive_stream_mask, stream_index);
- } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vblank) {
- vblank_count++;
- set_bit_in_bitfield(&vblank_stream_mask, stream_index);
- }
- }
-
- if (!strategy_matches_forced_requirements)
- return false;
-
- // Check for trivial synchronization for vblank
- if (vblank_count > 0 && (pmo->options->disable_vblank || !are_timings_trivially_synchronizable(display_cfg, vblank_stream_mask)))
- return false;
-
- if (svp_count > 0 && pmo->options->disable_svp)
- return false;
-
- if (drr_count > 0 && (pmo->options->disable_drr_var || !are_all_timings_drr_enabled(display_cfg, drr_stream_mask)))
- return false;
-
- // Validate for FAMS admissibiliy
- if (svp_count == 0 && drr_count == 0) {
- // No FAMS
- admissible = true;
- } else {
- admissible = false;
- if (svp_count > 0 && drr_count == 0 && vactive_count == 0 && vblank_count == 0) {
- // All SVP
- admissible = validate_svp_cofunctionality(pmo, display_cfg, svp_stream_mask);
- } else if (svp_count == 0 && drr_count > 0 && vactive_count == 0 && vblank_count == 0) {
- // All DRR
- admissible = validate_drr_cofunctionality(pmo, display_cfg, drr_stream_mask);
- } else if (svp_count > 0 && drr_count > 0 && vactive_count == 0 && vblank_count == 0) {
- // SVP + DRR
- admissible = validate_svp_drr_cofunctionality(pmo, display_cfg, svp_stream_mask, drr_stream_mask);
- } else if (svp_count > 0 && drr_count == 0 && vactive_count == 0 && vblank_count > 0) {
- // SVP + VBlank
- admissible = validate_svp_vblank_cofunctionality(pmo, display_cfg, svp_stream_mask, vblank_stream_mask);
- } else if (svp_count == 0 && drr_count > 0 && vactive_count == 0 && vblank_count > 0) {
- // DRR + VBlank
- admissible = validate_drr_vblank_cofunctionality(pmo, display_cfg, drr_stream_mask, vblank_stream_mask);
- }
- }
-
- return admissible;
-}
-
-static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask)
-{
- unsigned char i;
- int min_vactive_margin_us = 0xFFFFFFF;
-
- for (i = 0; i < DML2_MAX_PLANES; i++) {
- if (is_bit_set_in_bitfield(plane_mask, i)) {
- if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_latency_hiding_margin_in_active < min_vactive_margin_us)
- min_vactive_margin_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_latency_hiding_margin_in_active;
- }
- }
-
- return min_vactive_margin_us;
-}
-
-bool pmo_dcn4_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out)
-{
- struct dml2_pmo_instance *pmo = in_out->instance;
- struct dml2_optimization_stage3_state *state = &in_out->base_display_config->stage3;
- struct dml2_pmo_scratch *s = &pmo->scratch;
-
- struct display_configuation_with_meta *display_config;
- const struct dml2_plane_parameters *plane_descriptor;
- const enum dml2_pmo_pstate_strategy (*strategy_list)[4] = 0;
- unsigned int strategy_list_size = 0;
- unsigned int plane_index, stream_index, i;
-
- state->performed = true;
-
- display_config = in_out->base_display_config;
- display_config->display_config.overrides.enable_subvp_implicit_pmo = true;
-
- memset(s, 0, sizeof(struct dml2_pmo_scratch));
-
- pmo->scratch.pmo_dcn4.min_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
- pmo->scratch.pmo_dcn4.max_latency_index = pmo->mcg_clock_table_size - 1;
- pmo->scratch.pmo_dcn4.cur_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
-
- // First build the stream plane mask (array of bitfields indexed by stream, indicating plane mapping)
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- plane_descriptor = &display_config->display_config.plane_descriptors[plane_index];
-
- set_bit_in_bitfield(&s->pmo_dcn4.stream_plane_mask[plane_descriptor->stream_index], plane_index);
-
- state->pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vactive;
- }
-
- // Figure out which streams can do vactive, and also build up implicit SVP meta
- for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
- if (get_vactive_pstate_margin(display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) >=
- MIN_VACTIVE_MARGIN_US)
- set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index);
-
- s->pmo_dcn4.stream_svp_meta[stream_index].valid = true;
- s->pmo_dcn4.stream_svp_meta[stream_index].v_active =
- display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index].phantom_v_active;
- s->pmo_dcn4.stream_svp_meta[stream_index].v_total =
- display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index].phantom_v_total;
- s->pmo_dcn4.stream_svp_meta[stream_index].v_front_porch = 1;
- }
-
- switch (display_config->display_config.num_streams) {
- case 1:
- strategy_list = full_strategy_list_1_display;
- strategy_list_size = full_strategy_list_1_display_size;
- break;
- case 2:
- strategy_list = full_strategy_list_2_display;
- strategy_list_size = full_strategy_list_2_display_size;
- break;
- case 3:
- strategy_list = full_strategy_list_3_display;
- strategy_list_size = full_strategy_list_3_display_size;
- break;
- case 4:
- strategy_list = full_strategy_list_4_display;
- strategy_list_size = full_strategy_list_4_display_size;
- break;
- default:
- strategy_list_size = 0;
- break;
- }
-
- if (strategy_list_size == 0)
- return false;
-
- s->pmo_dcn4.num_pstate_candidates = 0;
-
- for (i = 0; i < strategy_list_size && i < DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE; i++) {
- if (validate_pstate_support_strategy_cofunctionality(pmo, display_config, strategy_list[i])) {
- insert_into_candidate_list(strategy_list[i], display_config->display_config.num_streams, s);
- }
- }
-
- if (s->pmo_dcn4.num_pstate_candidates > 0) {
- // There's this funny case...
- // If the first entry in the candidate list is all vactive, then we can consider it "tested", so the current index is 0
- // Otherwise the current index should be -1 because we run the optimization at least once
- s->pmo_dcn4.cur_pstate_candidate = 0;
- for (i = 0; i < display_config->display_config.num_streams; i++) {
- if (s->pmo_dcn4.per_stream_pstate_strategy[0][i] != dml2_pmo_pstate_strategy_vactive) {
- s->pmo_dcn4.cur_pstate_candidate = -1;
- break;
- }
- }
- return true;
- } else {
- return false;
- }
-}
-
-static void reset_display_configuration(struct display_configuation_with_meta *display_config)
-{
- unsigned int plane_index;
- unsigned int stream_index;
- struct dml2_plane_parameters *plane;
-
- for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
- display_config->stage3.stream_svp_meta[stream_index].valid = false;
- }
-
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- plane = &display_config->display_config.plane_descriptors[plane_index];
-
- // Unset SubVP
- plane->overrides.legacy_svp_config = dml2_svp_mode_override_auto;
-
- // Remove reserve time
- plane->overrides.reserved_vblank_time_ns = 0;
-
- // Reset strategy to auto
- plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_auto;
-
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_not_supported;
- }
-}
-
-static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *display_config, int plane_mask)
-{
- unsigned char plane_index;
- struct dml2_plane_parameters *plane;
-
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
- plane = &display_config->display_config.plane_descriptors[plane_index];
-
- // Setup DRR
- plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr;
-
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_drr;
- }
- }
-}
-
-static void setup_planes_for_svp_by_mask(struct display_configuation_with_meta *display_config, int plane_mask)
-{
- unsigned char plane_index;
- int stream_index = -1;
-
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
- stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_subvp_phantom;
- }
- }
-
- if (stream_index >= 0) {
- display_config->stage3.stream_svp_meta[stream_index].valid = true;
- display_config->stage3.stream_svp_meta[stream_index].v_active =
- display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index].phantom_v_active;
- display_config->stage3.stream_svp_meta[stream_index].v_total =
- display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index].phantom_v_total;
- display_config->stage3.stream_svp_meta[stream_index].v_front_porch = 1;
- }
-}
-
-static void setup_planes_for_vblank_by_mask(struct display_configuation_with_meta *display_config, int plane_mask)
-{
- unsigned char plane_index;
- struct dml2_plane_parameters *plane;
-
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
- plane = &display_config->display_config.plane_descriptors[plane_index];
-
- // Setup reserve time
- plane->overrides.reserved_vblank_time_ns = 400 * 1000;
-
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vblank;
- }
- }
-}
-
-static void setup_planes_for_vactive_by_mask(struct display_configuation_with_meta *display_config, int plane_mask)
-{
- unsigned char plane_index;
-
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vactive;
- }
- }
-}
-
-static bool setup_display_config(struct display_configuation_with_meta *display_config, struct dml2_pmo_scratch *scratch, int strategy_index)
-{
- bool success = true;
- unsigned char stream_index;
-
- reset_display_configuration(display_config);
-
- for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
- if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_na) {
- success = false;
- break;
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_vblank) {
- setup_planes_for_vblank_by_mask(display_config, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_svp) {
- setup_planes_for_svp_by_mask(display_config, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
- setup_planes_for_drr_by_mask(display_config, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_vactive) {
- setup_planes_for_vactive_by_mask(display_config, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- }
- }
-
- return success;
-}
-
-static int get_minimum_reserved_time_us_for_planes(struct display_configuation_with_meta *display_config, int plane_mask)
-{
- int min_time_us = 0xFFFFFF;
- unsigned char plane_index = 0;
-
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
- if (min_time_us > (display_config->display_config.plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000))
- min_time_us = display_config->display_config.plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
- }
- }
- return min_time_us;
-}
-
-bool pmo_dcn4_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out)
-{
- bool p_state_supported = true;
- unsigned int stream_index;
- struct dml2_pmo_scratch *s = &in_out->instance->scratch;
-
- if (s->pmo_dcn4.cur_pstate_candidate < 0)
- return false;
-
- for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
-
- if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_vactive) {
- if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_US) {
- p_state_supported = false;
- break;
- }
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_vblank) {
- if (get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) <
- in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) {
- p_state_supported = false;
- break;
- }
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_svp) {
- if (in_out->base_display_config->stage3.stream_svp_meta[stream_index].valid == false) {
- p_state_supported = false;
- break;
- }
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
- if (!all_planes_match_strategy(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pmo_pstate_strategy_fw_drr)) {
- p_state_supported = false;
- break;
- }
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_na) {
- p_state_supported = false;
- break;
- }
- }
-
- return p_state_supported;
-}
-
-bool pmo_dcn4_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out)
-{
- bool success = false;
- struct dml2_pmo_scratch *s = &in_out->instance->scratch;
-
- memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
-
- if (in_out->last_candidate_failed) {
- if (s->pmo_dcn4.allow_state_increase_for_strategy[s->pmo_dcn4.cur_pstate_candidate] &&
- s->pmo_dcn4.cur_latency_index < s->pmo_dcn4.max_latency_index) {
- s->pmo_dcn4.cur_latency_index++;
-
- success = true;
- }
- }
-
- if (!success) {
- s->pmo_dcn4.cur_latency_index = s->pmo_dcn4.min_latency_index;
- s->pmo_dcn4.cur_pstate_candidate++;
-
- if (s->pmo_dcn4.cur_pstate_candidate < s->pmo_dcn4.num_pstate_candidates) {
- success = true;
- }
- }
-
- if (success) {
- in_out->optimized_display_config->stage3.min_clk_index_for_latency = s->pmo_dcn4.cur_latency_index;
- setup_display_config(in_out->optimized_display_config, &in_out->instance->scratch, in_out->instance->scratch.pmo_dcn4.cur_pstate_candidate);
- }
-
- return success;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.h
deleted file mode 100644
index 09cacc933d21..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-
-#ifndef __DML2_PMO_DCN4_H__
-#define __DML2_PMO_DCN4_H__
-
-#include "dml2_internal_shared_types.h"
-
-bool pmo_dcn4_initialize(struct dml2_pmo_initialize_in_out *in_out);
-
-bool pmo_dcn4_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out);
-
-bool pmo_dcn4_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out);
-bool pmo_dcn4_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out *in_out);
-bool pmo_dcn4_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out *in_out);
-
-bool pmo_dcn4_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out);
-bool pmo_dcn4_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out);
-bool pmo_dcn4_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out);
-
-bool pmo_dcn4_unit_test(void);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index 6547cc2c2a77..d63558ee3135 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -1,122 +1,181 @@
-/*
-* Copyright 2022 Advanced Micro Devices, Inc.
-*
-* Permission is hereby granted, free of charge, to any person obtaining a
-* copy of this software and associated documentation files (the "Software"),
-* to deal in the Software without restriction, including without limitation
-* the rights to use, copy, modify, merge, publish, distribute, sublicense,
-* and/or sell copies of the Software, and to permit persons to whom the
-* Software is furnished to do so, subject to the following conditions:
-*
-* The above copyright notice and this permission notice shall be included in
-* all copies or substantial portions of the Software.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-* OTHER DEALINGS IN THE SOFTWARE.
-*
-* Authors: AMD
-*
-*/
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_pmo_factory.h"
-#include "dml2_pmo_dcn4.h"
#include "dml2_debug.h"
#include "lib_float_math.h"
#include "dml2_pmo_dcn4_fams2.h"
static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding
-static const enum dml2_pmo_pstate_strategy base_strategy_list_1_display[][PMO_DCN4_MAX_DISPLAYS] = {
+static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
// VActive Preferred
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
// Then SVP
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
// Then VBlank
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Finally DRR
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = false,
+ },
+
+ // Then DRR
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+
+ // Finally VBlank, but allow base clocks for latency to increase
+ /*
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+ */
};
-static const int base_strategy_list_1_display_size = sizeof(base_strategy_list_1_display) / (sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS);
+static const int base_strategy_list_1_display_size = sizeof(base_strategy_list_1_display) / sizeof(struct dml2_pmo_pstate_strategy);
-static const enum dml2_pmo_pstate_strategy base_strategy_list_2_display[][PMO_DCN4_MAX_DISPLAYS] = {
+static const struct dml2_pmo_pstate_strategy base_strategy_list_2_display[] = {
// VActive only is preferred
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
// Then VActive + VBlank
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = false,
+ },
// Then VBlank only
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = false,
+ },
// Then SVP + VBlank
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = false,
+ },
// Then SVP + DRR
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
// Then SVP + SVP
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
// Then DRR + VActive
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then DRR + VBlank
- //{ dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Finally DRR + DRR
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+
+ // Then DRR + DRR
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+
+ // Finally VBlank, but allow base clocks for latency to increase
+ /*
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+ */
};
-static const int base_strategy_list_2_display_size = sizeof(base_strategy_list_2_display) / (sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS);
-
-static const enum dml2_pmo_pstate_strategy base_strategy_list_3_display[][PMO_DCN4_MAX_DISPLAYS] = {
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // All VActive
-
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, // VActive + 1 VBlank
-
- //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // VActive + 2 VBlank
-
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, // All VBlank
-
- //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na }, // VBlank + 1 DRR
-
- //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na }, // VBlank + 2 DRR
-
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na }, // All DRR
+static const int base_strategy_list_2_display_size = sizeof(base_strategy_list_2_display) / sizeof(struct dml2_pmo_pstate_strategy);
+
+static const struct dml2_pmo_pstate_strategy base_strategy_list_3_display[] = {
+ // All VActive
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+
+ // VActive + 1 VBlank
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = false,
+ },
+
+ // All VBlank
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = false,
+ },
+
+ // All DRR
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+
+ // All VBlank, with state increase allowed
+ /*
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+ */
};
-static const int base_strategy_list_3_display_size = sizeof(base_strategy_list_3_display) / (sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS);
-
-static const enum dml2_pmo_pstate_strategy base_strategy_list_4_display[][PMO_DCN4_MAX_DISPLAYS] = {
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, // All VActive
-
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank }, // VActive + 1 VBlank
-
- //{ dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // VActive + 2 VBlank
-
- //{ dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // VActive + 3 VBlank
-
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // All Vblank
-
- //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr }, // VBlank + 1 DRR
-
- //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr }, // VBlank + 2 DRR
-
- //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr }, // VBlank + 3 DRR
-
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr }, // All DRR
+static const int base_strategy_list_3_display_size = sizeof(base_strategy_list_3_display) / sizeof(struct dml2_pmo_pstate_strategy);
+
+static const struct dml2_pmo_pstate_strategy base_strategy_list_4_display[] = {
+ // All VActive
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive },
+ .allow_state_increase = true,
+ },
+
+ // VActive + 1 VBlank
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank },
+ .allow_state_increase = false,
+ },
+
+ // All Vblank
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
+ .allow_state_increase = false,
+ },
+
+ // All DRR
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr },
+ .allow_state_increase = true,
+ },
+
+ // All VBlank, with state increase allowed
+ /*
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
+ .allow_state_increase = true,
+ },
+ */
};
-static const int base_strategy_list_4_display_size = sizeof(base_strategy_list_4_display) / (sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS);
+static const int base_strategy_list_4_display_size = sizeof(base_strategy_list_4_display) / sizeof(struct dml2_pmo_pstate_strategy);
static bool increase_odm_combine_factor(enum dml2_odm_mode *odm_mode, int odms_calculated)
@@ -275,7 +334,6 @@ bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_o
in_out->cfg_support_info->plane_support_info[i].dpps_used)) {
result = false;
} else {
- free_pipes -= planes_on_stream;
break;
}
} else {
@@ -296,9 +354,9 @@ bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_o
return result;
}
-static enum dml2_pmo_pstate_strategy convert_strategy_to_drr_variant(const enum dml2_pmo_pstate_strategy base_strategy)
+static enum dml2_pmo_pstate_method convert_strategy_to_drr_variant(const enum dml2_pmo_pstate_method base_strategy)
{
- enum dml2_pmo_pstate_strategy variant_strategy = 0;
+ enum dml2_pmo_pstate_method variant_strategy = 0;
switch (base_strategy) {
case dml2_pmo_pstate_strategy_vactive:
@@ -327,11 +385,9 @@ static enum dml2_pmo_pstate_strategy convert_strategy_to_drr_variant(const enum
return variant_strategy;
}
-static enum dml2_pmo_pstate_strategy(*get_expanded_strategy_list(
- struct dml2_pmo_init_data *init_data,
- int stream_count))[PMO_DCN4_MAX_DISPLAYS]
+static struct dml2_pmo_pstate_strategy *get_expanded_strategy_list(struct dml2_pmo_init_data *init_data, int stream_count)
{
- enum dml2_pmo_pstate_strategy(*expanded_strategy_list)[PMO_DCN4_MAX_DISPLAYS] = NULL;
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list = NULL;
switch (stream_count) {
case 1:
@@ -361,23 +417,23 @@ static unsigned int get_num_expanded_strategies(
}
static void insert_strategy_into_expanded_list(
- const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[PMO_DCN4_MAX_DISPLAYS],
+ const struct dml2_pmo_pstate_strategy *per_stream_pstate_strategy,
int stream_count,
struct dml2_pmo_init_data *init_data)
{
- enum dml2_pmo_pstate_strategy(*expanded_strategy_list)[PMO_DCN4_MAX_DISPLAYS] = NULL;
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list = NULL;
expanded_strategy_list = get_expanded_strategy_list(init_data, stream_count);
if (expanded_strategy_list) {
- memcpy(&expanded_strategy_list[init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]++],
- per_stream_pstate_strategy,
- sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS);
+ memcpy(&expanded_strategy_list[init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy));
+
+ init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]++;
}
}
static void expand_base_strategy(struct dml2_pmo_instance *pmo,
- const enum dml2_pmo_pstate_strategy base_strategy_list[PMO_DCN4_MAX_DISPLAYS],
+ const struct dml2_pmo_pstate_strategy *base_strategy,
unsigned int stream_count)
{
bool skip_to_next_stream;
@@ -386,19 +442,21 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo,
unsigned int i, j;
unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
unsigned int stream_iteration_indices[PMO_DCN4_MAX_DISPLAYS] = { 0 };
- enum dml2_pmo_pstate_strategy cur_strategy_list[PMO_DCN4_MAX_DISPLAYS] = { 0 };
+ struct dml2_pmo_pstate_strategy cur_strategy_list = { 0 };
/* determine number of displays per method */
for (i = 0; i < stream_count; i++) {
/* increment the count of the earliest index with the same method */
for (j = 0; j < stream_count; j++) {
- if (base_strategy_list[i] == base_strategy_list[j]) {
+ if (base_strategy->per_stream_pstate_method[i] == base_strategy->per_stream_pstate_method[j]) {
num_streams_per_method[j] = num_streams_per_method[j] + 1;
break;
}
}
}
+ cur_strategy_list.allow_state_increase = base_strategy->allow_state_increase;
+
i = 0;
/* uses a while loop instead of recursion to build permutations of base strategy */
while (stream_iteration_indices[0] < stream_count) {
@@ -409,12 +467,12 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo,
/* determine what to do for this iteration */
if (stream_iteration_indices[i] < stream_count && num_streams_per_method[stream_iteration_indices[i]] != 0) {
/* decrement count and assign method */
- cur_strategy_list[i] = base_strategy_list[stream_iteration_indices[i]];
+ cur_strategy_list.per_stream_pstate_method[i] = base_strategy->per_stream_pstate_method[stream_iteration_indices[i]];
num_streams_per_method[stream_iteration_indices[i]] -= 1;
if (i >= stream_count - 1) {
/* insert into strategy list */
- insert_strategy_into_expanded_list(cur_strategy_list, stream_count, &pmo->init_data);
+ insert_strategy_into_expanded_list(&cur_strategy_list, stream_count, &pmo->init_data);
expanded_strategy_added = true;
} else {
/* skip to next stream */
@@ -450,55 +508,122 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo,
}
}
-static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
- const enum dml2_pmo_pstate_strategy base_strategy_list[PMO_DCN4_MAX_DISPLAYS],
+
+static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_strategy,
+ const struct dml2_pmo_pstate_strategy *variant_strategy,
+ unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],
+ unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],
unsigned int stream_count)
{
+ bool valid = true;
unsigned int i;
- bool variant_found = false;
- enum dml2_pmo_pstate_strategy cur_strategy_list[PMO_DCN4_MAX_DISPLAYS] = { 0 };
+ /* check all restrictions are met */
+ for (i = 0; i < stream_count; i++) {
+ /* vblank + vblank_drr variants are invalid */
+ if (base_strategy->per_stream_pstate_method[i] == dml2_pmo_pstate_strategy_vblank &&
+ ((num_streams_per_base_method[i] > 0 && num_streams_per_variant_method[i] > 0) ||
+ num_streams_per_variant_method[i] > 1)) {
+ valid = false;
+ break;
+ }
+ }
- /* setup variant list as base to start */
- memcpy(cur_strategy_list, base_strategy_list, sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS);
+ return valid;
+}
+
+static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
+ const struct dml2_pmo_pstate_strategy *base_strategy,
+ unsigned int stream_count)
+{
+ bool variant_found;
+ unsigned int i, j;
+ unsigned int method_index;
+ unsigned int stream_index;
+ unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
+ unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
+ unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
+ enum dml2_pmo_pstate_method per_stream_variant_method[DML2_MAX_PLANES];
+ struct dml2_pmo_pstate_strategy variant_strategy = { 0 };
+ /* determine number of displays per method */
for (i = 0; i < stream_count; i++) {
- cur_strategy_list[i] = convert_strategy_to_drr_variant(base_strategy_list[i]);
+ /* increment the count of the earliest index with the same method */
+ for (j = 0; j < stream_count; j++) {
+ if (base_strategy->per_stream_pstate_method[i] == base_strategy->per_stream_pstate_method[j]) {
+ num_streams_per_method[j] = num_streams_per_method[j] + 1;
+ break;
+ }
+ }
+
+ per_stream_variant_method[i] = convert_strategy_to_drr_variant(base_strategy->per_stream_pstate_method[i]);
+ }
+ memcpy(num_streams_per_base_method, num_streams_per_method, sizeof(unsigned int) * PMO_DCN4_MAX_DISPLAYS);
+
+ memcpy(&variant_strategy, base_strategy, sizeof(struct dml2_pmo_pstate_strategy));
+
+ method_index = 0;
+ /* uses a while loop instead of recursion to build permutations of base strategy */
+ while (num_streams_per_base_method[0] > 0 || method_index != 0) {
+ if (method_index == stream_count) {
+ /* construct variant strategy */
+ variant_found = false;
+ stream_index = 0;
+
+ for (i = 0; i < stream_count; i++) {
+ for (j = 0; j < num_streams_per_base_method[i]; j++) {
+ variant_strategy.per_stream_pstate_method[stream_index++] = base_strategy->per_stream_pstate_method[i];
+ }
+
+ for (j = 0; j < num_streams_per_variant_method[i]; j++) {
+ variant_strategy.per_stream_pstate_method[stream_index++] = per_stream_variant_method[i];
+ if (base_strategy->per_stream_pstate_method[i] != per_stream_variant_method[i]) {
+ variant_found = true;
+ }
+ }
+ }
- if (cur_strategy_list[i] != base_strategy_list[i]) {
- variant_found = true;
+ if (variant_found && is_variant_method_valid(base_strategy, &variant_strategy, num_streams_per_base_method, num_streams_per_variant_method, stream_count)) {
+ expand_base_strategy(pmo, &variant_strategy, stream_count);
+ }
+
+ /* rollback to earliest method with bases remaining */
+ for (method_index = stream_count - 1; method_index > 0; method_index--) {
+ if (num_streams_per_base_method[method_index]) {
+ /* bases remaining */
+ break;
+ } else {
+ /* reset counters */
+ num_streams_per_base_method[method_index] = num_streams_per_method[method_index];
+ num_streams_per_variant_method[method_index] = 0;
+ }
+ }
}
- if (i == stream_count - 1 && variant_found) {
- insert_strategy_into_expanded_list(cur_strategy_list, stream_count, &pmo->init_data);
+ if (num_streams_per_base_method[method_index]) {
+ num_streams_per_base_method[method_index]--;
+ num_streams_per_variant_method[method_index]++;
+
+ method_index++;
+ } else if (method_index != 0) {
+ method_index++;
}
}
}
static void expand_base_strategies(
struct dml2_pmo_instance *pmo,
- const enum dml2_pmo_pstate_strategy(*base_strategies_list)[PMO_DCN4_MAX_DISPLAYS],
+ const struct dml2_pmo_pstate_strategy *base_strategies_list,
const unsigned int num_base_strategies,
unsigned int stream_count)
{
unsigned int i;
- unsigned int num_pre_variant_strategies;
- enum dml2_pmo_pstate_strategy(*expanded_strategy_list)[PMO_DCN4_MAX_DISPLAYS];
/* expand every explicit base strategy (except all DRR) */
- for (i = 0; i < num_base_strategies - 1; i++) {
- expand_base_strategy(pmo, base_strategies_list[i], stream_count);
- }
-
- /* expand base strategies to DRR variants */
- num_pre_variant_strategies = get_num_expanded_strategies(&pmo->init_data, stream_count);
- expanded_strategy_list = get_expanded_strategy_list(&pmo->init_data, stream_count);
- for (i = 0; i < num_pre_variant_strategies; i++) {
- expand_variant_strategy(pmo, expanded_strategy_list[i], stream_count);
+ for (i = 0; i < num_base_strategies; i++) {
+ expand_base_strategy(pmo, &base_strategies_list[i], stream_count);
+ expand_variant_strategy(pmo, &base_strategies_list[i], stream_count);
}
-
- /* add back all DRR */
- insert_strategy_into_expanded_list(base_strategies_list[num_base_strategies - 1], stream_count, &pmo->init_data);
}
bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
@@ -546,8 +671,6 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
/* populate list */
expand_base_strategies(pmo, base_strategy_list_4_display, base_strategy_list_4_display_size, 4);
break;
- default:
- break;
}
}
@@ -591,6 +714,8 @@ bool pmo_dcn4_fams2_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out)
&in_out->base_display_config->display_config;
const struct dml2_core_mode_support_result *mode_support_result =
&in_out->base_display_config->mode_support_result;
+ struct dml2_optimization_stage4_state *state =
+ &in_out->base_display_config->stage4;
if (in_out->instance->options->disable_dyn_odm ||
(in_out->instance->options->disable_dyn_odm_for_multi_stream && display_config->num_streams > 1))
@@ -611,28 +736,30 @@ bool pmo_dcn4_fams2_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out)
*/
if (mode_support_result->cfg_support_info.plane_support_info[i].dpps_used > 1 &&
mode_support_result->cfg_support_info.stream_support_info[display_config->plane_descriptors[i].stream_index].odms_used == 1)
- in_out->base_display_config->stage4.unoptimizable_streams[display_config->plane_descriptors[i].stream_index] = true;
+ state->unoptimizable_streams[display_config->plane_descriptors[i].stream_index] = true;
for (i = 0; i < display_config->num_streams; i++) {
if (display_config->stream_descriptors[i].overrides.disable_dynamic_odm)
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
+ state->unoptimizable_streams[i] = true;
else if (in_out->base_display_config->stage3.stream_svp_meta[i].valid &&
in_out->instance->options->disable_dyn_odm_for_stream_with_svp)
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
+ state->unoptimizable_streams[i] = true;
/*
* ODM Combine requires horizontal timing divisible by 2 so each
* ODM segment has the same size.
*/
else if (!is_h_timing_divisible_by(&display_config->stream_descriptors[i].timing, 2))
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
+ state->unoptimizable_streams[i] = true;
/*
* Our hardware support seamless ODM transitions for DP encoders
* only.
*/
else if (!is_dp_encoder(display_config->stream_descriptors[i].output.output_encoder))
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
+ state->unoptimizable_streams[i] = true;
}
+ state->performed = true;
+
return true;
}
@@ -783,6 +910,7 @@ static void build_synchronized_timing_groups(
/* clear all group masks */
memset(s->pmo_dcn4.synchronized_timing_group_masks, 0, sizeof(s->pmo_dcn4.synchronized_timing_group_masks));
memset(s->pmo_dcn4.group_is_drr_enabled, 0, sizeof(s->pmo_dcn4.group_is_drr_enabled));
+ memset(s->pmo_dcn4.group_is_drr_active, 0, sizeof(s->pmo_dcn4.group_is_drr_active));
memset(s->pmo_dcn4.group_line_time_us, 0, sizeof(s->pmo_dcn4.group_line_time_us));
s->pmo_dcn4.num_timing_groups = 0;
@@ -804,6 +932,8 @@ static void build_synchronized_timing_groups(
/* if drr is in use, timing is not sychnronizable */
if (master_timing->drr_config.enabled) {
s->pmo_dcn4.group_is_drr_enabled[timing_group_idx] = true;
+ s->pmo_dcn4.group_is_drr_active[timing_group_idx] = !master_timing->drr_config.disallowed &&
+ (master_timing->drr_config.drr_active_fixed || master_timing->drr_config.drr_active_variable);
continue;
}
@@ -884,7 +1014,7 @@ static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
stream_descriptor = &display_config->display_config.stream_descriptors[i];
stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
- if (!stream_descriptor->timing.drr_config.enabled || stream_descriptor->overrides.disable_fams2_drr)
+ if (!stream_descriptor->timing.drr_config.enabled)
return false;
/* cannot support required vtotal */
@@ -967,35 +1097,24 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
return true;
}
-static void insert_into_candidate_list(const enum dml2_pmo_pstate_strategy *per_stream_pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch)
+static void insert_into_candidate_list(const struct dml2_pmo_pstate_strategy *pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch)
{
- int stream_index;
-
- scratch->pmo_dcn4.allow_state_increase_for_strategy[scratch->pmo_dcn4.num_pstate_candidates] = true;
-
- for (stream_index = 0; stream_index < stream_count; stream_index++) {
- scratch->pmo_dcn4.per_stream_pstate_strategy[scratch->pmo_dcn4.num_pstate_candidates][stream_index] = per_stream_pstate_strategy[stream_index];
-
- if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vblank ||
- per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr)
- scratch->pmo_dcn4.allow_state_increase_for_strategy[scratch->pmo_dcn4.num_pstate_candidates] = false;
- }
-
+ scratch->pmo_dcn4.pstate_strategy_candidates[scratch->pmo_dcn4.num_pstate_candidates] = *pstate_strategy;
scratch->pmo_dcn4.num_pstate_candidates++;
}
-static bool all_planes_match_strategy(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pmo_pstate_strategy strategy)
+static bool all_planes_match_method(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pmo_pstate_method method)
{
unsigned char i;
enum dml2_uclk_pstate_change_strategy matching_strategy = (enum dml2_uclk_pstate_change_strategy) dml2_pmo_pstate_strategy_na;
- if (strategy == dml2_pmo_pstate_strategy_vactive || strategy == dml2_pmo_pstate_strategy_fw_vactive_drr)
+ if (method == dml2_pmo_pstate_strategy_vactive || method == dml2_pmo_pstate_strategy_fw_vactive_drr)
matching_strategy = dml2_uclk_pstate_change_strategy_force_vactive;
- else if (strategy == dml2_pmo_pstate_strategy_vblank || strategy == dml2_pmo_pstate_strategy_fw_vblank_drr)
+ else if (method == dml2_pmo_pstate_strategy_vblank || method == dml2_pmo_pstate_strategy_fw_vblank_drr)
matching_strategy = dml2_uclk_pstate_change_strategy_force_vblank;
- else if (strategy == dml2_pmo_pstate_strategy_fw_svp)
+ else if (method == dml2_pmo_pstate_strategy_fw_svp)
matching_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp;
- else if (strategy == dml2_pmo_pstate_strategy_fw_drr)
+ else if (method == dml2_pmo_pstate_strategy_fw_drr)
matching_strategy = dml2_uclk_pstate_change_strategy_force_drr;
for (i = 0; i < DML2_MAX_PLANES; i++) {
@@ -1027,12 +1146,12 @@ static void build_method_scheduling_params(
static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
struct dml2_pmo_instance *pmo,
- enum dml2_pmo_pstate_strategy stream_pstate_strategy,
+ enum dml2_pmo_pstate_method stream_pstate_method,
int stream_idx)
{
struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL;
- switch (stream_pstate_strategy) {
+ switch (stream_pstate_method) {
case dml2_pmo_pstate_strategy_vactive:
case dml2_pmo_pstate_strategy_fw_vactive_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common;
@@ -1063,7 +1182,7 @@ static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
static bool is_timing_group_schedulable(
struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_cfg,
- const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[PMO_DCN4_MAX_DISPLAYS],
+ const struct dml2_pmo_pstate_strategy *pstate_strategy,
const unsigned int timing_group_idx,
struct dml2_fams2_per_method_common_meta *group_fams2_meta)
{
@@ -1082,7 +1201,7 @@ static bool is_timing_group_schedulable(
}
/* init allow start and end lines for timing group */
- stream_method_fams2_meta = get_per_method_common_meta(pmo, per_stream_pstate_strategy[base_stream_idx], base_stream_idx);
+ stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
if (!stream_method_fams2_meta)
return false;
@@ -1091,9 +1210,9 @@ static bool is_timing_group_schedulable(
group_fams2_meta->period_us = stream_method_fams2_meta->period_us;
for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) {
if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
- stream_method_fams2_meta = get_per_method_common_meta(pmo, per_stream_pstate_strategy[i], i);
+ stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
if (!stream_method_fams2_meta)
- continue;
+ return false;
if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) {
/* set group allow start to larger otg vline */
@@ -1123,7 +1242,7 @@ static bool is_timing_group_schedulable(
static bool is_config_schedulable(
struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_cfg,
- const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[PMO_DCN4_MAX_DISPLAYS])
+ const struct dml2_pmo_pstate_strategy *pstate_strategy)
{
unsigned int i, j;
bool schedulable;
@@ -1146,7 +1265,7 @@ static bool is_config_schedulable(
for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i;
s->pmo_dcn4.sorted_group_gtl_period_index[i] = i;
- if (!is_timing_group_schedulable(pmo, display_cfg, per_stream_pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) {
+ if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) {
/* synchronized timing group was not schedulable */
schedulable = false;
break;
@@ -1248,7 +1367,7 @@ static bool is_config_schedulable(
unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1];
if (s->pmo_dcn4.group_common_fams2_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_fams2_meta[sorted_ip1].period_us ||
- s->pmo_dcn4.group_is_drr_enabled[sorted_ip1]) {
+ (s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) {
schedulable = false;
break;
}
@@ -1260,8 +1379,8 @@ static bool is_config_schedulable(
/* STAGE 4: When using HW exclusive modes, check disallow alignments are within allowed threshold */
if (s->pmo_dcn4.num_timing_groups == 2 &&
- !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, per_stream_pstate_strategy[0]) &&
- !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, per_stream_pstate_strategy[1])) {
+ !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, pstate_strategy->per_stream_pstate_method[0]) &&
+ !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, pstate_strategy->per_stream_pstate_method[1])) {
double period_ratio;
double max_shift_us;
double shift_per_period;
@@ -1290,44 +1409,48 @@ static bool is_config_schedulable(
}
static bool stream_matches_drr_policy(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg,
- const enum dml2_pmo_pstate_strategy stream_pstate_strategy,
- unsigned int stream_index)
+ const struct display_configuation_with_meta *display_cfg,
+ const enum dml2_pmo_pstate_method stream_pstate_method,
+ unsigned int stream_index)
{
const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[stream_index];
bool strategy_matches_drr_requirements = true;
/* check if strategy is compatible with stream drr capability and strategy */
- if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_strategy) &&
+ if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_method) &&
display_cfg->display_config.num_streams > 1 &&
stream_descriptor->timing.drr_config.enabled &&
(stream_descriptor->timing.drr_config.drr_active_fixed || stream_descriptor->timing.drr_config.drr_active_variable)) {
/* DRR is active, so config may become unschedulable */
strategy_matches_drr_requirements = false;
- } else if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_strategy) &&
- is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_strategy) &&
+ } else if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_method) &&
+ is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_method) &&
stream_descriptor->timing.drr_config.enabled &&
stream_descriptor->timing.drr_config.drr_active_variable) {
/* DRR is variable, fw exclusive methods require DRR to be clamped */
strategy_matches_drr_requirements = false;
- } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_strategy) &&
+ } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_method) &&
pmo->options->disable_drr_var_when_var_active &&
stream_descriptor->timing.drr_config.enabled &&
stream_descriptor->timing.drr_config.drr_active_variable) {
/* DRR variable is active, but policy blocks DRR for p-state when this happens */
strategy_matches_drr_requirements = false;
- } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_strategy) &&
+ } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_method) &&
(pmo->options->disable_drr_var ||
!stream_descriptor->timing.drr_config.enabled ||
stream_descriptor->timing.drr_config.disallowed)) {
/* DRR variable strategies are disallowed due to settings or policy */
strategy_matches_drr_requirements = false;
- } else if (is_bit_set_in_bitfield(PMO_DRR_CLAMPED_STRATEGY_MASK, stream_pstate_strategy) &&
- (pmo->options->disable_drr_clamped ||
- !stream_descriptor->timing.drr_config.enabled)) {
+ } else if (is_bit_set_in_bitfield(PMO_DRR_CLAMPED_STRATEGY_MASK, stream_pstate_method) &&
+ (pmo->options->disable_drr_clamped ||
+ (!stream_descriptor->timing.drr_config.enabled ||
+ (!stream_descriptor->timing.drr_config.drr_active_fixed && !stream_descriptor->timing.drr_config.drr_active_variable)) ||
+ (pmo->options->disable_drr_clamped_when_var_active &&
+ stream_descriptor->timing.drr_config.enabled &&
+ stream_descriptor->timing.drr_config.drr_active_variable))) {
/* DRR fixed strategies are disallowed due to settings or policy */
strategy_matches_drr_requirements = false;
- } else if (is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_strategy) &&
+ } else if (is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_method) &&
pmo->options->disable_fams2) {
/* FW modes require FAMS2 */
strategy_matches_drr_requirements = false;
@@ -1338,7 +1461,7 @@ static bool stream_matches_drr_policy(struct dml2_pmo_instance *pmo,
static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_cfg,
- const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[PMO_DCN4_MAX_DISPLAYS])
+ const struct dml2_pmo_pstate_strategy *pstate_strategy)
{
struct dml2_pmo_scratch *s = &pmo->scratch;
@@ -1359,28 +1482,28 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
// Tabulate everything
for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
- if (!all_planes_match_strategy(display_cfg, s->pmo_dcn4.stream_plane_mask[stream_index],
- per_stream_pstate_strategy[stream_index])) {
+ if (!all_planes_match_method(display_cfg, s->pmo_dcn4.stream_plane_mask[stream_index],
+ pstate_strategy->per_stream_pstate_method[stream_index])) {
strategy_matches_forced_requirements = false;
break;
}
strategy_matches_drr_requirements &=
- stream_matches_drr_policy(pmo, display_cfg, per_stream_pstate_strategy[stream_index], stream_index);
+ stream_matches_drr_policy(pmo, display_cfg, pstate_strategy->per_stream_pstate_method[stream_index], stream_index);
- if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
- per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
svp_count++;
set_bit_in_bitfield(&svp_stream_mask, stream_index);
- } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
drr_count++;
set_bit_in_bitfield(&drr_stream_mask, stream_index);
- } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vactive ||
- per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
vactive_count++;
set_bit_in_bitfield(&vactive_stream_mask, stream_index);
- } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vblank ||
- per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
vblank_count++;
set_bit_in_bitfield(&vblank_stream_mask, stream_index);
}
@@ -1389,7 +1512,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
if (!strategy_matches_forced_requirements || !strategy_matches_drr_requirements)
return false;
- if (vactive_count > 0 && (pmo->options->disable_vblank || !all_timings_support_vactive(pmo, display_cfg, vactive_stream_mask)))
+ if (vactive_count > 0 && !all_timings_support_vactive(pmo, display_cfg, vactive_stream_mask))
return false;
if (vblank_count > 0 && (pmo->options->disable_vblank || !all_timings_support_vblank(pmo, display_cfg, vblank_stream_mask)))
@@ -1401,7 +1524,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
if (svp_count > 0 && (pmo->options->disable_svp || !all_timings_support_svp(pmo, display_cfg, svp_stream_mask)))
return false;
- return is_config_schedulable(pmo, display_cfg, per_stream_pstate_strategy);
+ return is_config_schedulable(pmo, display_cfg, pstate_strategy);
}
static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask)
@@ -1457,6 +1580,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
(stream_fams2_meta->nom_vtotal * timing->h_total);
stream_fams2_meta->nom_frame_time_us =
(double)stream_fams2_meta->nom_vtotal * stream_fams2_meta->otg_vline_time_us;
+ stream_fams2_meta->vblank_start = timing->v_blank_end + timing->v_active;
if (stream_descriptor->timing.drr_config.enabled == true) {
if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) {
@@ -1510,7 +1634,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
stream_fams2_meta->method_vactive.common.allow_start_otg_vline =
timing->v_blank_end + stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
stream_fams2_meta->method_vactive.common.allow_end_otg_vline =
- timing->v_blank_end + timing->v_active -
+ stream_fams2_meta->vblank_start -
stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
} else {
stream_fams2_meta->method_vactive.common.allow_start_otg_vline = 0;
@@ -1520,8 +1644,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
build_method_scheduling_params(&stream_fams2_meta->method_vactive.common, stream_fams2_meta);
/* vblank */
- stream_fams2_meta->method_vblank.common.allow_start_otg_vline =
- timing->v_blank_end + timing->v_active;
+ stream_fams2_meta->method_vblank.common.allow_start_otg_vline = stream_fams2_meta->vblank_start;
stream_fams2_meta->method_vblank.common.allow_end_otg_vline =
stream_fams2_meta->method_vblank.common.allow_start_otg_vline + 1;
stream_fams2_meta->method_vblank.common.period_us = stream_fams2_meta->nom_frame_time_us;
@@ -1555,8 +1678,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
stream_fams2_meta->allow_to_target_delay_otg_vlines;
stream_fams2_meta->method_subvp.common.allow_end_otg_vline =
- stream_fams2_meta->nom_vtotal -
- timing->v_front_porch -
+ stream_fams2_meta->vblank_start -
stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
stream_fams2_meta->method_subvp.common.period_us = stream_fams2_meta->nom_frame_time_us;
build_method_scheduling_params(&stream_fams2_meta->method_subvp.common, stream_fams2_meta);
@@ -1565,20 +1687,21 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
stream_fams2_meta->method_drr.programming_delay_otg_vlines =
(unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
stream_fams2_meta->method_drr.common.allow_start_otg_vline =
- stream_fams2_meta->nom_vtotal +
+ stream_fams2_meta->vblank_start +
stream_fams2_meta->allow_to_target_delay_otg_vlines;
stream_fams2_meta->method_drr.common.period_us = stream_fams2_meta->nom_frame_time_us;
if (display_config->display_config.num_streams <= 1) {
/* only need to stretch vblank for blackout time */
stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->method_drr.common.allow_start_otg_vline +
+ stream_fams2_meta->nom_vtotal +
+ stream_fams2_meta->allow_to_target_delay_otg_vlines +
stream_fams2_meta->min_allow_width_otg_vlines +
stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
} else {
/* multi display needs to always be schedulable */
stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->method_drr.common.allow_start_otg_vline +
- stream_fams2_meta->nom_vtotal +
+ stream_fams2_meta->nom_vtotal * 2 +
+ stream_fams2_meta->allow_to_target_delay_otg_vlines +
stream_fams2_meta->min_allow_width_otg_vlines +
stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
}
@@ -1611,7 +1734,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
struct display_configuation_with_meta *display_config;
const struct dml2_plane_parameters *plane_descriptor;
- const enum dml2_pmo_pstate_strategy(*strategy_list)[PMO_DCN4_MAX_DISPLAYS] = NULL;
+ const struct dml2_pmo_pstate_strategy *strategy_list = NULL;
unsigned int strategy_list_size = 0;
unsigned char plane_index, stream_index, i;
@@ -1623,6 +1746,10 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
memset(s, 0, sizeof(struct dml2_pmo_scratch));
+ if (display_config->display_config.overrides.all_streams_blanked) {
+ return true;
+ }
+
pmo->scratch.pmo_dcn4.min_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
pmo->scratch.pmo_dcn4.max_latency_index = pmo->mcg_clock_table_size;
pmo->scratch.pmo_dcn4.cur_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
@@ -1652,6 +1779,9 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
build_synchronized_timing_groups(pmo, display_config);
strategy_list = get_expanded_strategy_list(&pmo->init_data, display_config->display_config.num_streams);
+ if (!strategy_list)
+ return false;
+
strategy_list_size = get_num_expanded_strategies(&pmo->init_data, display_config->display_config.num_streams);
if (strategy_list_size == 0)
@@ -1660,8 +1790,8 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
s->pmo_dcn4.num_pstate_candidates = 0;
for (i = 0; i < strategy_list_size && s->pmo_dcn4.num_pstate_candidates < DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE; i++) {
- if (validate_pstate_support_strategy_cofunctionality(pmo, display_config, strategy_list[i])) {
- insert_into_candidate_list(strategy_list[i], display_config->display_config.num_streams, s);
+ if (validate_pstate_support_strategy_cofunctionality(pmo, display_config, &strategy_list[i])) {
+ insert_into_candidate_list(&strategy_list[i], display_config->display_config.num_streams, s);
}
}
@@ -1778,7 +1908,8 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
plane = &display_config->display_config.plane_descriptors[plane_index];
- plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000);
+ plane->overrides.reserved_vblank_time_ns = (long)math_max2(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000.0,
+ plane->overrides.reserved_vblank_time_ns);
display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vblank;
@@ -1857,26 +1988,26 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
- if (pmo->scratch.pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_na) {
+ if (pmo->scratch.pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_na) {
success = false;
break;
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_vactive) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive) {
setup_planes_for_vactive_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_vblank) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank) {
setup_planes_for_vblank_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_svp) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp) {
fams2_required = true;
setup_planes_for_svp_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
fams2_required = true;
setup_planes_for_vactive_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
fams2_required = true;
setup_planes_for_vblank_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
fams2_required = true;
setup_planes_for_svp_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
fams2_required = true;
setup_planes_for_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
}
@@ -1917,6 +2048,10 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
int MIN_VACTIVE_MARGIN_DRR = 0;
int REQUIRED_RESERVED_TIME = 0;
+ if (in_out->base_display_config->display_config.overrides.all_streams_blanked) {
+ return true;
+ }
+
MIN_VACTIVE_MARGIN_VBLANK = INT_MIN;
MIN_VACTIVE_MARGIN_DRR = INT_MIN;
REQUIRED_RESERVED_TIME = (int)in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us;
@@ -1927,34 +2062,34 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index];
- if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_vactive ||
- s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_vblank ||
- s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
if (get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) <
REQUIRED_RESERVED_TIME ||
get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_VBLANK) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
- s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
if (in_out->base_display_config->stage3.stream_svp_meta[stream_index].valid == false) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
- if (!all_planes_match_strategy(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pmo_pstate_strategy_fw_drr) ||
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
+ if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pmo_pstate_strategy_fw_drr) ||
get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_DRR) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_na) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_na) {
p_state_supported = false;
break;
}
@@ -1971,8 +2106,8 @@ bool pmo_dcn4_fams2_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pst
memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
if (in_out->last_candidate_failed) {
- if (s->pmo_dcn4.allow_state_increase_for_strategy[s->pmo_dcn4.cur_pstate_candidate] &&
- s->pmo_dcn4.cur_latency_index < s->pmo_dcn4.max_latency_index) {
+ if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].allow_state_increase &&
+ s->pmo_dcn4.cur_latency_index < s->pmo_dcn4.max_latency_index - 1) {
s->pmo_dcn4.cur_latency_index++;
success = true;
@@ -2060,15 +2195,15 @@ bool pmo_dcn4_fams2_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out *in
unsigned int i;
- for (i = 0; i < in_out->base_display_config->display_config.num_streams; i++) {
+ for (i = 0; i < in_out->base_display_config->display_config.num_planes; i++) {
if (pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us > 0 &&
pmo->scratch.pmo_dcn4.z8_vblank_optimizable &&
- in_out->base_display_config->display_config.stream_descriptors[i].overrides.minimum_vblank_idle_requirement_us < (int)pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us) {
+ in_out->base_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns < (int)pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * 1000) {
success = false;
break;
}
if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0 &&
- in_out->base_display_config->display_config.stream_descriptors[i].overrides.minimum_vblank_idle_requirement_us < (int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us) {
+ in_out->base_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns < (int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * 1000) {
success = false;
break;
}
@@ -2087,8 +2222,11 @@ bool pmo_dcn4_fams2_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in
if (!in_out->last_candidate_failed) {
if (pmo->scratch.pmo_dcn4.cur_stutter_candidate < pmo->scratch.pmo_dcn4.num_stutter_candidates) {
- for (i = 0; i < in_out->optimized_display_config->display_config.num_streams; i++) {
- in_out->optimized_display_config->display_config.stream_descriptors[i].overrides.minimum_vblank_idle_requirement_us = pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.cur_stutter_candidate];
+ for (i = 0; i < in_out->optimized_display_config->display_config.num_planes; i++) {
+ /* take the max of the current and the optimal reserved time */
+ in_out->optimized_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns =
+ (long)math_max2(pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.cur_stutter_candidate] * 1000,
+ in_out->optimized_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns);
}
success = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
index 75175d93add4..0c25bd3e9ac0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_PMO_FAMS2_DCN4_H__
#define __DML2_PMO_FAMS2_DCN4_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
index e0b9ece7901d..add51d41a515 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
@@ -2,10 +2,8 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_pmo_factory.h"
#include "dml2_pmo_dcn4_fams2.h"
-#include "dml2_pmo_dcn4.h"
#include "dml2_pmo_dcn3.h"
#include "dml2_external_lib_deps.h"
@@ -28,15 +26,15 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
{
bool result = false;
- if (!out)
+ if (out == 0)
return false;
memset(out, 0, sizeof(struct dml2_pmo_instance));
switch (project_id) {
case dml2_project_dcn4x_stage1:
- out->initialize = pmo_dcn4_initialize;
- out->optimize_dcc_mcache = pmo_dcn4_optimize_dcc_mcache;
+ out->initialize = pmo_dcn4_fams2_initialize;
+ out->optimize_dcc_mcache = pmo_dcn4_fams2_optimize_dcc_mcache;
result = true;
break;
case dml2_project_dcn4x_stage2:
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h
index 9d3dc5e94be1..7218de1824cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_PMO_FACTORY_H__
#define __DML2_PMO_FACTORY_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
index e73579f1a88e..e17b5ceba447 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "lib_float_math.h"
#define ASSERT(condition)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h
index 537cf6fd4c15..e13b0c5939b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __LIB_FLOAT_MATH_H__
#define __LIB_FLOAT_MATH_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
index 1b6dbfaa7ae8..d0e026d981b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_top_optimization.h"
#include "dml2_internal_shared_types.h"
#include "dml_top_mcache.h"
@@ -220,7 +219,6 @@ bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization
copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
highest_state = l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency;
lowest_state = 0;
- cur_state = 0;
while (highest_state > lowest_state) {
cur_state = (highest_state + lowest_state) / 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h
index 1536afcbf73a..9f22ab33eab1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_TOP_OPTIMIZATION_H__
#define __DML2_TOP_OPTIMIZATION_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
index 2fb3e2f45e07..f9f8869cd8b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_internal_shared_types.h"
#include "dml_top.h"
#include "dml2_mcg_factory.h"
@@ -28,6 +27,7 @@ bool dml2_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
bool result = false;
memset(l, 0, sizeof(struct dml2_initialize_instance_locals));
+ memset(dml, 0, sizeof(struct dml2_instance));
memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
memcpy(&dml->soc_bbox, &in_out->soc_bb, sizeof(struct dml2_soc_bb));
@@ -96,14 +96,12 @@ bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
{
struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
struct dml2_check_mode_supported_locals *l = &dml->scratch.check_mode_supported_locals;
- /* Borrow the build_mode_programming_locals programming struct for DPMM call. */
- struct dml2_display_cfg_programming *dpmm_programming = dml->scratch.build_mode_programming_locals.mode_programming_params.programming;
+ struct dml2_display_cfg_programming *dpmm_programming = &dml->dpmm_instance.dpmm_scratch.programming;
bool result = false;
bool mcache_success = false;
- if (dpmm_programming)
- memset(dpmm_programming, 0, sizeof(struct dml2_display_cfg_programming));
+ memset(dpmm_programming, 0, sizeof(struct dml2_display_cfg_programming));
setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
@@ -130,7 +128,7 @@ bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
/*
* Call DPMM to map all requirements to minimum clock state
*/
- if (result && dpmm_programming) {
+ if (result) {
l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
l->dppm_map_mode_params.programming = dpmm_programming;
@@ -268,9 +266,18 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o
vmin_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->vmin_phase);
- if (vmin_success) {
+ if (l->optimized_display_config_with_meta.stage4.performed) {
+ /*
+ * when performed is true, optimization has applied to
+ * optimized_display_config_with_meta and it has passed mode
+ * support. However it may or may not pass the test function to
+ * reach actual Vmin. As long as voltage is optimized even if it
+ * doesn't reach Vmin level, there is still power benefit so in
+ * this case we will still copy this optimization into base
+ * display config.
+ */
memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
- l->base_display_config_with_meta.stage4.success = true;
+ l->base_display_config_with_meta.stage4.success = vmin_success;
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c
index 7afd417071a5..a342ebfbe4e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_debug.h"
#include "dml_top_mcache.h"
@@ -143,12 +142,12 @@ static unsigned int count_elements_in_span(int *array, unsigned int array_size,
while (span_start_index < array_size) {
for (i = span_start_index; i < array_size; i++) {
- if (array[i] - span_start_value > span) {
+ if (array[i] - span_start_value <= span) {
if (i - span_start_index + 1 > greatest_element_count) {
greatest_element_count = i - span_start_index + 1;
}
+ } else
break;
- }
}
span_start_index++;
@@ -208,9 +207,9 @@ bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissabi
int temp, p0shift, p1shift;
unsigned int plane_index = 0;
unsigned int i;
- char odm_combine_factor = 1;
- char mpc_combine_factor = 1;
- char num_dpps;
+ unsigned int odm_combine_factor;
+ unsigned int mpc_combine_factor;
+ unsigned int num_dpps;
unsigned int num_boundaries;
enum dml2_scaling_transform scaling_transform;
const struct dml2_plane_parameters *plane;
@@ -227,10 +226,10 @@ bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissabi
plane = &params->display_cfg->plane_descriptors[plane_index];
stream = &params->display_cfg->stream_descriptors[plane->stream_index];
- odm_combine_factor = (char)params->cfg_support_info->stream_support_info[plane->stream_index].odms_used;
+ num_dpps = odm_combine_factor = params->cfg_support_info->stream_support_info[plane->stream_index].odms_used;
if (odm_combine_factor == 1)
- mpc_combine_factor = (char)params->cfg_support_info->plane_support_info[plane_index].dpps_used;
+ num_dpps = mpc_combine_factor = (unsigned int)params->cfg_support_info->plane_support_info[plane_index].dpps_used;
else
mpc_combine_factor = 1;
@@ -260,13 +259,13 @@ bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissabi
// The last element in the unshifted boundary array will always be the first pixel outside the
// plane, which means theres no mcache associated with it, so -1
num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane0 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane0 - 1;
- if (count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
- num_boundaries, max_per_pipe_vp_p0) <= 1) {
+ if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
+ num_boundaries, max_per_pipe_vp_p0) <= 1) && (num_boundaries <= num_dpps)) {
p0pass = true;
}
num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane1 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane1 - 1;
- if (count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
- num_boundaries, max_per_pipe_vp_p1) <= 1) {
+ if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
+ num_boundaries, max_per_pipe_vp_p1) <= 1) && (num_boundaries <= num_dpps)) {
p1pass = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h
index bb12e4c30690..7b1f6f7143d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_TOP_MCACHE_H__
#define __DML_TOP_MCACHE_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
index de7d8a6a2d3d..e9b8e10695ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_debug.h"
int dml2_printf(const char *format, ...)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
index 0403238df107..d51a1b6c62f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_DEBUG_H__
#define __DML2_DEBUG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
index 5632cdacb7f4..aeac9f159fa5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_INTERNAL_SHARED_TYPES_H__
#define __DML2_INTERNAL_SHARED_TYPES_H__
@@ -107,10 +106,16 @@ struct dml2_dpmm_map_watermarks_params_in_out {
struct dml2_display_cfg_programming *programming;
};
+struct dml2_dpmm_scratch {
+ struct dml2_display_cfg_programming programming;
+};
+
struct dml2_dpmm_instance {
bool (*map_mode_to_soc_dpm)(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out);
bool (*map_watermarks)(struct dml2_dpmm_map_watermarks_params_in_out *in_out);
bool (*unit_test)(void);
+
+ struct dml2_dpmm_scratch dpmm_scratch;
};
/*
@@ -266,6 +271,7 @@ struct dml2_fams2_meta {
unsigned int contention_delay_otg_vlines;
unsigned int min_allow_width_otg_vlines;
unsigned int nom_vtotal;
+ unsigned int vblank_start;
double nom_refresh_rate_hz;
double nom_frame_time_us;
unsigned int max_vtotal;
@@ -594,7 +600,7 @@ struct dml2_pmo_optimize_for_stutter_in_out {
struct display_configuation_with_meta *optimized_display_config;
};
-enum dml2_pmo_pstate_strategy {
+enum dml2_pmo_pstate_method {
dml2_pmo_pstate_strategy_na = 0,
/* hw exclusive modes */
dml2_pmo_pstate_strategy_vactive = 1,
@@ -612,6 +618,11 @@ enum dml2_pmo_pstate_strategy {
dml2_pmo_pstate_strategy_reserved_fw_drr_var = 22,
};
+struct dml2_pmo_pstate_strategy {
+ enum dml2_pmo_pstate_method per_stream_pstate_method[DML2_MAX_PLANES];
+ bool allow_state_increase;
+};
+
#define PMO_NO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw - dml2_pmo_pstate_strategy_na + 1)) - 1) << dml2_pmo_pstate_strategy_na)
#define PMO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr)
#define PMO_DRR_CLAMPED_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_clamped - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr)
@@ -634,8 +645,7 @@ struct dml2_pmo_scratch {
int stream_mask;
} pmo_dcn3;
struct {
- enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[DML2_MAX_PLANES][DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE];
- bool allow_state_increase_for_strategy[DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE];
+ struct dml2_pmo_pstate_strategy pstate_strategy_candidates[DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE];
int num_pstate_candidates;
int cur_pstate_candidate;
@@ -661,6 +671,7 @@ struct dml2_pmo_scratch {
unsigned int num_timing_groups;
unsigned int synchronized_timing_group_masks[DML2_MAX_PLANES];
bool group_is_drr_enabled[DML2_MAX_PLANES];
+ bool group_is_drr_active[DML2_MAX_PLANES];
double group_line_time_us[DML2_MAX_PLANES];
/* scheduling check locals */
@@ -676,10 +687,10 @@ struct dml2_pmo_init_data {
union {
struct {
/* populated once during initialization */
- enum dml2_pmo_pstate_strategy expanded_strategy_list_1_display[PMO_DCN4_MAX_BASE_STRATEGIES * 2][PMO_DCN4_MAX_DISPLAYS];
- enum dml2_pmo_pstate_strategy expanded_strategy_list_2_display[PMO_DCN4_MAX_BASE_STRATEGIES * 2 * 2][PMO_DCN4_MAX_DISPLAYS];
- enum dml2_pmo_pstate_strategy expanded_strategy_list_3_display[PMO_DCN4_MAX_BASE_STRATEGIES * 6 * 2][PMO_DCN4_MAX_DISPLAYS];
- enum dml2_pmo_pstate_strategy expanded_strategy_list_4_display[PMO_DCN4_MAX_BASE_STRATEGIES * 24 * 2][PMO_DCN4_MAX_DISPLAYS];
+ struct dml2_pmo_pstate_strategy expanded_strategy_list_1_display[PMO_DCN4_MAX_BASE_STRATEGIES * 2];
+ struct dml2_pmo_pstate_strategy expanded_strategy_list_2_display[PMO_DCN4_MAX_BASE_STRATEGIES * 4 * 4];
+ struct dml2_pmo_pstate_strategy expanded_strategy_list_3_display[PMO_DCN4_MAX_BASE_STRATEGIES * 6 * 6 * 6];
+ struct dml2_pmo_pstate_strategy expanded_strategy_list_4_display[PMO_DCN4_MAX_BASE_STRATEGIES * 8 * 8 * 8 * 8];
unsigned int num_expanded_strategies_per_list[PMO_DCN4_MAX_DISPLAYS];
} pmo_dcn4;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
index b566f53608c6..140ec01545db 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
@@ -101,6 +101,7 @@ struct dml2_wrapper_scratch {
struct dml2_dml_to_dc_pipe_mapping dml_to_dc_pipe_mapping;
bool enable_flexible_pipe_mapping;
bool plane_duplicate_exists;
+ int hpo_stream_to_link_encoder_mapping[MAX_HPO_DP2_ENCODERS];
};
struct dml2_helper_det_policy_scratch {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 8b9dcee77266..bde4250853b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -733,7 +733,7 @@ static void populate_dml_timing_cfg_from_stream_state(struct dml_timing_cfg_st *
}
static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *out, unsigned int location,
- const struct dc_stream_state *in, const struct pipe_ctx *pipe)
+ const struct dc_stream_state *in, const struct pipe_ctx *pipe, struct dml2_context *dml2)
{
unsigned int output_bpc;
@@ -746,8 +746,8 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
out->OutputEncoder[location] = dml_dp;
- if (is_dp2p0_output_encoder(pipe))
- out->OutputEncoder[location] = dml_dp2p0;
+ if (dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
+ out->OutputEncoder[dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location]] = dml_dp2p0;
break;
case SIGNAL_TYPE_EDP:
out->OutputEncoder[location] = dml_edp;
@@ -953,7 +953,9 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc
memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out));
}
-static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
+static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location,
+ const struct dc_stream_state *in,
+ const struct soc_bounding_box_st *soc)
{
dml_uint_t width, height;
@@ -970,7 +972,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
- out->GPUVMMinPageSizeKBytes[location] = 256;
+ out->GPUVMMinPageSizeKBytes[location] = soc->gpuvm_min_page_size_kbytes;
out->ViewportWidth[location] = width;
out->ViewportHeight[location] = height;
@@ -1007,7 +1009,9 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->ScalerEnabled[location] = false;
}
-static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, struct dc_state *context)
+static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location,
+ const struct dc_plane_state *in, struct dc_state *context,
+ const struct soc_bounding_box_st *soc)
{
struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL);
if (!scaler_data)
@@ -1018,7 +1022,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
- out->GPUVMMinPageSizeKBytes[location] = 256;
+ out->GPUVMMinPageSizeKBytes[location] = soc->gpuvm_min_page_size_kbytes;
out->ViewportWidth[location] = scaler_data->viewport.width;
out->ViewportHeight[location] = scaler_data->viewport.height;
@@ -1193,6 +1197,7 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2,
plane_index = 0;
}
}
+
static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cfg_st *out,
unsigned int location, const struct dc_stream_state *in)
{
@@ -1233,6 +1238,30 @@ static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cf
}
}
}
+
+static void dml2_map_hpo_stream_encoder_to_hpo_link_encoder_index(struct dml2_context *dml2, struct dc_state *context)
+{
+ int i;
+ struct pipe_ctx *current_pipe_context;
+
+ /* Scratch gets reset to zero in dml, but link encoder instance can be zero, so reset to -1 */
+ for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) {
+ dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[i] = -1;
+ }
+
+ /* If an HPO stream encoder is allocated to a pipe, get the instance of it's allocated HPO Link encoder */
+ for (i = 0; i < MAX_PIPES; i++) {
+ current_pipe_context = &context->res_ctx.pipe_ctx[i];
+ if (current_pipe_context->stream &&
+ current_pipe_context->stream_res.hpo_dp_stream_enc &&
+ current_pipe_context->link_res.hpo_dp_link_enc &&
+ dc_is_dp_signal(current_pipe_context->stream->signal)) {
+ dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[current_pipe_context->stream_res.hpo_dp_stream_enc->inst] =
+ current_pipe_context->link_res.hpo_dp_link_enc->inst;
+ }
+ }
+}
+
void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg)
{
int i = 0, j = 0, k = 0;
@@ -1256,6 +1285,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
dml2->v20.dml_core_ctx.policy.AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter;
dml2_populate_pipe_to_plane_index_mapping(dml2, context);
+ dml2_map_hpo_stream_encoder_to_hpo_link_encoder_index(dml2, context);
for (i = 0; i < context->stream_count; i++) {
current_pipe_context = NULL;
@@ -1276,7 +1306,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
- populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context);
+ populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context, dml2);
/*Call site for populate_dml_writeback_cfg_from_stream_state*/
populate_dml_writeback_cfg_from_stream_state(&dml_dispcfg->writeback,
disp_cfg_stream_location, context->streams[i]);
@@ -1299,7 +1329,8 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
disp_cfg_plane_location = dml_dispcfg->num_surfaces++;
populate_dummy_dml_surface_cfg(&dml_dispcfg->surface, disp_cfg_plane_location, context->streams[i]);
- populate_dummy_dml_plane_cfg(&dml_dispcfg->plane, disp_cfg_plane_location, context->streams[i]);
+ populate_dummy_dml_plane_cfg(&dml_dispcfg->plane, disp_cfg_plane_location,
+ context->streams[i], &dml2->v20.dml_core_ctx.soc);
dml_dispcfg->plane.BlendingAndTiming[disp_cfg_plane_location] = disp_cfg_stream_location;
@@ -1315,7 +1346,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]);
- populate_dml_plane_cfg_from_plane_state(&dml_dispcfg->plane, disp_cfg_plane_location, context->stream_status[i].plane_states[j], context);
+ populate_dml_plane_cfg_from_plane_state(
+ &dml_dispcfg->plane, disp_cfg_plane_location,
+ context->stream_status[i].plane_states[j], context,
+ &dml2->v20.dml_core_ctx.soc);
if (stream_mall_type == SUBVP_MAIN) {
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport;
@@ -1337,7 +1371,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (j >= 1) {
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_plane_location, context->streams[i]);
- populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context);
+ populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context, dml2);
switch (context->streams[i]->debug.force_odm_combine_segments) {
case 2:
dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_plane_location] = dml_odm_use_policy_combine_2to1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
index 92238ff333a4..9a33158b63bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
@@ -161,14 +161,6 @@ bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx)
/* If this assert is hit then we have a link encoder dynamic management issue */
ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
- /* Count MST hubs once by treating only 1st remote sink in topology as an encoder */
- if (pipe_ctx->stream->link && pipe_ctx->stream->link->remote_sinks[0]) {
- return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
- pipe_ctx->link_res.hpo_dp_link_enc &&
- dc_is_dp_signal(pipe_ctx->stream->signal) &&
- (pipe_ctx->stream->link->remote_sinks[0]->sink_id == pipe_ctx->stream->sink->sink_id));
- }
-
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
pipe_ctx->link_res.hpo_dp_link_enc &&
dc_is_dp_signal(pipe_ctx->stream->signal));
@@ -421,7 +413,7 @@ unsigned int dml2_calc_max_scaled_time(
void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx)
{
- int i, j = 0;;
+ int i, j = 0;
struct mcif_arb_params *wb_arb_params = NULL;
struct dcn_bw_writeback *bw_writeback = NULL;
enum mmhubbub_wbif_mode wbif_mode = PACKED_444_FP16; /*for now*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index d5dcc8b77281..866b0abcff1b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -575,7 +575,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
unsigned int lowest_state_idx = 0;
out_clks.p_state_supported = true;
- out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dispclk_mhz * 1000;
+ out_clks.dispclk_khz = 0; /* No requirement, and lowest index will generally be maximum dispclk. */
out_clks.dcfclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dcfclk_mhz * 1000;
out_clks.fclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].fabricclk_mhz * 1000;
out_clks.uclk_mts = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dram_speed_mts;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 023325e8f6e2..0f944fcfd5a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -236,6 +236,7 @@ struct dml2_configuration_options {
bool use_clock_dc_limits;
bool gpuvm_enable;
+ bool force_tdlut_enable;
struct dml2_soc_bb *bb_from_dmub;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c
index f2a2d53e9689..f8f6019d8304 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c
@@ -684,9 +684,6 @@ void dpp1_set_degamma(
BREAK_TO_DEBUGGER();
break;
}
-
- REG_SEQ_SUBMIT();
- REG_SEQ_WAIT_DONE();
}
void dpp1_degamma_ram_select(
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
index e16274fee31d..8473c694bfdc 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -59,6 +59,31 @@ void dpp35_dppclk_control(
DISPCLK_R_GATE_DISABLE, 0);
}
+void dpp35_program_bias_and_scale_fcnv(
+ struct dpp *dpp_base,
+ struct dc_bias_and_scale *params)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ if (!params->bias_and_scale_valid) {
+ REG_SET(FCNV_FP_BIAS_R, 0, FCNV_FP_BIAS_R, 0);
+ REG_SET(FCNV_FP_BIAS_G, 0, FCNV_FP_BIAS_G, 0);
+ REG_SET(FCNV_FP_BIAS_B, 0, FCNV_FP_BIAS_B, 0);
+
+ REG_SET(FCNV_FP_SCALE_R, 0, FCNV_FP_SCALE_R, 0x1F000);
+ REG_SET(FCNV_FP_SCALE_G, 0, FCNV_FP_SCALE_G, 0x1F000);
+ REG_SET(FCNV_FP_SCALE_B, 0, FCNV_FP_SCALE_B, 0x1F000);
+ } else {
+ REG_SET(FCNV_FP_BIAS_R, 0, FCNV_FP_BIAS_R, params->bias_red);
+ REG_SET(FCNV_FP_BIAS_G, 0, FCNV_FP_BIAS_G, params->bias_green);
+ REG_SET(FCNV_FP_BIAS_B, 0, FCNV_FP_BIAS_B, params->bias_blue);
+
+ REG_SET(FCNV_FP_SCALE_R, 0, FCNV_FP_SCALE_R, params->scale_red);
+ REG_SET(FCNV_FP_SCALE_G, 0, FCNV_FP_SCALE_G, params->scale_green);
+ REG_SET(FCNV_FP_SCALE_B, 0, FCNV_FP_SCALE_B, params->scale_blue);
+ }
+}
+
static struct dpp_funcs dcn35_dpp_funcs = {
.dpp_program_gamcor_lut = dpp3_program_gamcor_lut,
.dpp_read_state = dpp30_read_state,
@@ -81,7 +106,7 @@ static struct dpp_funcs dcn35_dpp_funcs = {
.dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
.dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
- .dpp_program_bias_and_scale = NULL,
+ .dpp_program_bias_and_scale = dpp35_program_bias_and_scale_fcnv,
.dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
.set_cursor_attributes = dpp3_set_cursor_attributes,
.set_cursor_position = dpp1_set_cursor_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h
index 135872d88219..3ca339a16e5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h
@@ -61,4 +61,7 @@ bool dpp35_construct(struct dcn3_dpp *dpp3, struct dc_context *ctx,
void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable);
+void dpp35_program_bias_and_scale_fcnv(struct dpp *dpp_base,
+ struct dc_bias_and_scale *bias_and_scale);
+
#endif // __DCN35_DPP_H
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
index 7cae18fd7be9..97bf26fa3573 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
@@ -30,6 +30,7 @@
#include "basics/conversion.h"
#include "dcn30/dcn30_cm_common.h"
#include "dcn32/dcn32_dpp.h"
+#include "dcn35/dcn35_dpp.h"
#define REG(reg)\
dpp->tf_regs->reg
@@ -240,7 +241,7 @@ static struct dpp_funcs dcn401_dpp_funcs = {
.dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
.dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
- .dpp_program_bias_and_scale = NULL,
+ .dpp_program_bias_and_scale = dpp35_program_bias_and_scale_fcnv,
.dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
.set_cursor_attributes = dpp401_set_cursor_attributes,
.set_cursor_position = dpp401_set_cursor_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index d0f8c9ff5232..3b6ca7974e18 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -120,11 +120,10 @@ void dpp401_set_cursor_attributes(
enum dc_cursor_color_format color_format = cursor_attributes->color_format;
int cur_rom_en = 0;
+ // DCN4 should always do Cursor degamma for Cursor Color modes
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
- if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
- cur_rom_en = 1;
- }
+ cur_rom_en = 1;
}
REG_UPDATE_3(CURSOR0_CONTROL,
@@ -246,16 +245,6 @@ void dpp401_set_cursor_matrix(
enum dc_color_space color_space,
struct dc_csc_transform cursor_csc_color_matrix)
{
- struct dpp_input_csc_matrix cursor_tbl_entry;
- unsigned int i;
-
- if (cursor_csc_color_matrix.enable_adjustment == true) {
- for (i = 0; i < 12; i++)
- cursor_tbl_entry.regval[i] = cursor_csc_color_matrix.matrix[i];
-
- cursor_tbl_entry.color_space = color_space;
- dpp401_program_cursor_csc(dpp_base, color_space, &cursor_tbl_entry);
- } else {
- dpp401_program_cursor_csc(dpp_base, color_space, NULL);
- }
+ //Since we don't have cursor matrix information, force bypass mode by passing in unknown color space
+ dpp401_program_cursor_csc(dpp_base, COLOR_SPACE_UNKNOWN, NULL);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
index 505929800426..5105fd580017 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
@@ -280,7 +280,8 @@ static void dpp401_dscl_set_scaler_filter(
static void dpp401_dscl_set_scl_filter(
struct dcn401_dpp *dpp,
const struct scaler_data *scl_data,
- bool chroma_coef_mode)
+ bool chroma_coef_mode,
+ bool force_coeffs_update)
{
bool h_2tap_hardcode_coef_en = false;
bool v_2tap_hardcode_coef_en = false;
@@ -343,7 +344,7 @@ static void dpp401_dscl_set_scl_filter(
|| (filter_v_c && (filter_v_c != dpp->filter_v_c));
}
- if (filter_updated) {
+ if ((filter_updated) || (force_coeffs_update)) {
uint32_t scl_mode = REG_READ(SCL_MODE);
if (!h_2tap_hardcode_coef_en && filter_h) {
@@ -656,274 +657,252 @@ static void dpp401_dscl_set_recout(struct dcn401_dpp *dpp,
RECOUT_HEIGHT, recout->height);
}
/**
- * dpp401_dscl_program_easf - Program EASF
+ * dpp401_dscl_program_easf_v - Program EASF_V
*
* @dpp_base: High level DPP struct
* @scl_data: scalaer_data info
*
- * This is the primary function to program EASF
+ * This is the primary function to program vertical EASF registers
*
*/
-static void dpp401_dscl_program_easf(struct dpp *dpp_base, const struct scaler_data *scl_data)
+static void dpp401_dscl_program_easf_v(struct dpp *dpp_base, const struct scaler_data *scl_data)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
PERF_TRACE();
- REG_UPDATE(DSCL_SC_MODE,
- SCL_SC_MATRIX_MODE, scl_data->dscl_prog_data.easf_matrix_mode);
- REG_UPDATE(DSCL_SC_MODE,
- SCL_SC_LTONL_EN, scl_data->dscl_prog_data.easf_ltonl_en);
/* DSCL_EASF_V_MODE */
- REG_UPDATE(DSCL_EASF_V_MODE,
- SCL_EASF_V_EN, scl_data->dscl_prog_data.easf_v_en);
- REG_UPDATE(DSCL_EASF_V_MODE,
- SCL_EASF_V_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_v_sharp_factor);
- REG_UPDATE(DSCL_EASF_V_MODE,
+ REG_SET_3(DSCL_EASF_V_MODE, 0,
+ SCL_EASF_V_EN, scl_data->dscl_prog_data.easf_v_en,
+ SCL_EASF_V_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_v_sharp_factor,
SCL_EASF_V_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_v_ring);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF1_EN, scl_data->dscl_prog_data.easf_v_bf1_en);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF2_MODE, scl_data->dscl_prog_data.easf_v_bf2_mode);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF3_MODE, scl_data->dscl_prog_data.easf_v_bf3_mode);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat1_gain);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat2_gain);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
+
+ if (!scl_data->dscl_prog_data.easf_v_en) {
+ PERF_TRACE();
+ return;
+ }
+
+ /* DSCL_EASF_V_BF_CNTL */
+ REG_SET_6(DSCL_EASF_V_BF_CNTL, 0,
+ SCL_EASF_V_BF1_EN, scl_data->dscl_prog_data.easf_v_bf1_en,
+ SCL_EASF_V_BF2_MODE, scl_data->dscl_prog_data.easf_v_bf2_mode,
+ SCL_EASF_V_BF3_MODE, scl_data->dscl_prog_data.easf_v_bf3_mode,
+ SCL_EASF_V_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat1_gain,
+ SCL_EASF_V_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat2_gain,
SCL_EASF_V_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_v_bf2_roc_gain);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL1,
- SCL_EASF_V_RINGEST_3TAP_DNTILT_UPTILT, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_uptilt);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL1,
+ /* DSCL_EASF_V_RINGEST_3TAP_CNTLn */
+ REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL1, 0,
+ SCL_EASF_V_RINGEST_3TAP_DNTILT_UPTILT, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_uptilt,
SCL_EASF_V_RINGEST_3TAP_UPTILT_MAXVAL, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt_max);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL2,
- SCL_EASF_V_RINGEST_3TAP_DNTILT_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_slope);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL2,
+ REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL2, 0,
+ SCL_EASF_V_RINGEST_3TAP_DNTILT_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_slope,
SCL_EASF_V_RINGEST_3TAP_UPTILT1_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt1_slope);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL3,
- SCL_EASF_V_RINGEST_3TAP_UPTILT2_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_slope);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL3,
+ REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL3, 0,
+ SCL_EASF_V_RINGEST_3TAP_UPTILT2_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_slope,
SCL_EASF_V_RINGEST_3TAP_UPTILT2_OFFSET, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_offset);
- REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE,
- SCL_EASF_V_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg1);
- REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE,
+ /* DSCL_EASF_V_RINGEST_EVENTAP_REDUCE */
+ REG_SET_2(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE, 0,
+ SCL_EASF_V_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg1,
SCL_EASF_V_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg2);
- REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_GAIN,
- SCL_EASF_V_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain1);
- REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_GAIN,
+ /* DSCL_EASF_V_RINGEST_EVENTAP_GAIN */
+ REG_SET_2(DSCL_EASF_V_RINGEST_EVENTAP_GAIN, 0,
+ SCL_EASF_V_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain1,
SCL_EASF_V_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain2);
- REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
- SCL_EASF_V_BF_MAXA, scl_data->dscl_prog_data.easf_v_bf_maxa);
- REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
- SCL_EASF_V_BF_MAXB, scl_data->dscl_prog_data.easf_v_bf_maxb);
- REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
- SCL_EASF_V_BF_MINA, scl_data->dscl_prog_data.easf_v_bf_mina);
- REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
+ /* DSCL_EASF_V_BF_FINAL_MAX_MIN */
+ REG_SET_4(DSCL_EASF_V_BF_FINAL_MAX_MIN, 0,
+ SCL_EASF_V_BF_MAXA, scl_data->dscl_prog_data.easf_v_bf_maxa,
+ SCL_EASF_V_BF_MAXB, scl_data->dscl_prog_data.easf_v_bf_maxb,
+ SCL_EASF_V_BF_MINA, scl_data->dscl_prog_data.easf_v_bf_mina,
SCL_EASF_V_BF_MINB, scl_data->dscl_prog_data.easf_v_bf_minb);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0,
- SCL_EASF_V_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg0);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0,
- SCL_EASF_V_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg0);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0,
+ /* DSCL_EASF_V_BF1_PWL_SEGn */
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG0, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg0,
+ SCL_EASF_V_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg0,
SCL_EASF_V_BF1_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg0);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1,
- SCL_EASF_V_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg1);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1,
- SCL_EASF_V_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg1);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG1, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg1,
+ SCL_EASF_V_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg1,
SCL_EASF_V_BF1_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg1);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2,
- SCL_EASF_V_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg2);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2,
- SCL_EASF_V_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg2);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG2, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg2,
+ SCL_EASF_V_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg2,
SCL_EASF_V_BF1_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg2);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3,
- SCL_EASF_V_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg3);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3,
- SCL_EASF_V_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg3);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG3, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg3,
+ SCL_EASF_V_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg3,
SCL_EASF_V_BF1_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg3);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4,
- SCL_EASF_V_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg4);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4,
- SCL_EASF_V_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg4);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG4, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg4,
+ SCL_EASF_V_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg4,
SCL_EASF_V_BF1_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg4);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5,
- SCL_EASF_V_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg5);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5,
- SCL_EASF_V_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg5);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG5, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg5,
+ SCL_EASF_V_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg5,
SCL_EASF_V_BF1_PWL_SLOPE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg5);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6,
- SCL_EASF_V_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg6);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6,
- SCL_EASF_V_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg6);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG6, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg6,
+ SCL_EASF_V_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg6,
SCL_EASF_V_BF1_PWL_SLOPE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg6);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG7,
- SCL_EASF_V_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg7);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG7,
+ REG_SET_2(DSCL_EASF_V_BF1_PWL_SEG7, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg7,
SCL_EASF_V_BF1_PWL_BASE_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg7);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0,
- SCL_EASF_V_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set0);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0,
- SCL_EASF_V_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set0);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0,
+ /* DSCL_EASF_V_BF3_PWL_SEGn */
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG0, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set0,
+ SCL_EASF_V_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set0,
SCL_EASF_V_BF3_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set0);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1,
- SCL_EASF_V_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set1);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1,
- SCL_EASF_V_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set1);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1,
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG1, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set1,
+ SCL_EASF_V_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set1,
SCL_EASF_V_BF3_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set1);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2,
- SCL_EASF_V_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set2);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2,
- SCL_EASF_V_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set2);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2,
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG2, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set2,
+ SCL_EASF_V_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set2,
SCL_EASF_V_BF3_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set2);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3,
- SCL_EASF_V_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set3);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3,
- SCL_EASF_V_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set3);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3,
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG3, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set3,
+ SCL_EASF_V_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set3,
SCL_EASF_V_BF3_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set3);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4,
- SCL_EASF_V_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set4);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4,
- SCL_EASF_V_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set4);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4,
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG4, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set4,
+ SCL_EASF_V_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set4,
SCL_EASF_V_BF3_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set4);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG5,
- SCL_EASF_V_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set5);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG5,
+ REG_SET_2(DSCL_EASF_V_BF3_PWL_SEG5, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set5,
SCL_EASF_V_BF3_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set5);
+ PERF_TRACE();
+}
+/**
+ * dpp401_dscl_program_easf_h - Program EASF_H
+ *
+ * @dpp_base: High level DPP struct
+ * @scl_data: scalaer_data info
+ *
+ * This is the primary function to program horizontal EASF registers
+ *
+ */
+static void dpp401_dscl_program_easf_h(struct dpp *dpp_base, const struct scaler_data *scl_data)
+{
+ struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
+
+ PERF_TRACE();
/* DSCL_EASF_H_MODE */
- REG_UPDATE(DSCL_EASF_H_MODE,
- SCL_EASF_H_EN, scl_data->dscl_prog_data.easf_h_en);
- REG_UPDATE(DSCL_EASF_H_MODE,
- SCL_EASF_H_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_h_sharp_factor);
- REG_UPDATE(DSCL_EASF_H_MODE,
+ REG_SET_3(DSCL_EASF_H_MODE, 0,
+ SCL_EASF_H_EN, scl_data->dscl_prog_data.easf_h_en,
+ SCL_EASF_H_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_h_sharp_factor,
SCL_EASF_H_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_h_ring);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF1_EN, scl_data->dscl_prog_data.easf_h_bf1_en);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF2_MODE, scl_data->dscl_prog_data.easf_h_bf2_mode);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF3_MODE, scl_data->dscl_prog_data.easf_h_bf3_mode);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat1_gain);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat2_gain);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
+
+ if (!scl_data->dscl_prog_data.easf_h_en) {
+ PERF_TRACE();
+ return;
+ }
+
+ /* DSCL_EASF_H_BF_CNTL */
+ REG_SET_6(DSCL_EASF_H_BF_CNTL, 0,
+ SCL_EASF_H_BF1_EN, scl_data->dscl_prog_data.easf_h_bf1_en,
+ SCL_EASF_H_BF2_MODE, scl_data->dscl_prog_data.easf_h_bf2_mode,
+ SCL_EASF_H_BF3_MODE, scl_data->dscl_prog_data.easf_h_bf3_mode,
+ SCL_EASF_H_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat1_gain,
+ SCL_EASF_H_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat2_gain,
SCL_EASF_H_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_h_bf2_roc_gain);
- REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE,
- SCL_EASF_H_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg1);
- REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE,
+ /* DSCL_EASF_H_RINGEST_EVENTAP_REDUCE */
+ REG_SET_2(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE, 0,
+ SCL_EASF_H_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg1,
SCL_EASF_H_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg2);
- REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_GAIN,
- SCL_EASF_H_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain1);
- REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_GAIN,
+ /* DSCL_EASF_H_RINGEST_EVENTAP_GAIN */
+ REG_SET_2(DSCL_EASF_H_RINGEST_EVENTAP_GAIN, 0,
+ SCL_EASF_H_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain1,
SCL_EASF_H_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain2);
- REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
- SCL_EASF_H_BF_MAXA, scl_data->dscl_prog_data.easf_h_bf_maxa);
- REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
- SCL_EASF_H_BF_MAXB, scl_data->dscl_prog_data.easf_h_bf_maxb);
- REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
- SCL_EASF_H_BF_MINA, scl_data->dscl_prog_data.easf_h_bf_mina);
- REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
+ /* DSCL_EASF_H_BF_FINAL_MAX_MIN */
+ REG_SET_4(DSCL_EASF_H_BF_FINAL_MAX_MIN, 0,
+ SCL_EASF_H_BF_MAXA, scl_data->dscl_prog_data.easf_h_bf_maxa,
+ SCL_EASF_H_BF_MAXB, scl_data->dscl_prog_data.easf_h_bf_maxb,
+ SCL_EASF_H_BF_MINA, scl_data->dscl_prog_data.easf_h_bf_mina,
SCL_EASF_H_BF_MINB, scl_data->dscl_prog_data.easf_h_bf_minb);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0,
- SCL_EASF_H_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg0);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0,
- SCL_EASF_H_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg0);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0,
+ /* DSCL_EASF_H_BF1_PWL_SEGn */
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG0, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg0,
+ SCL_EASF_H_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg0,
SCL_EASF_H_BF1_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg0);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1,
- SCL_EASF_H_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg1);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1,
- SCL_EASF_H_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg1);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG1, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg1,
+ SCL_EASF_H_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg1,
SCL_EASF_H_BF1_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg1);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2,
- SCL_EASF_H_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg2);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2,
- SCL_EASF_H_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg2);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG2, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg2,
+ SCL_EASF_H_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg2,
SCL_EASF_H_BF1_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg2);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3,
- SCL_EASF_H_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg3);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3,
- SCL_EASF_H_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg3);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG3, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg3,
+ SCL_EASF_H_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg3,
SCL_EASF_H_BF1_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg3);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4,
- SCL_EASF_H_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg4);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4,
- SCL_EASF_H_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg4);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG4, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg4,
+ SCL_EASF_H_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg4,
SCL_EASF_H_BF1_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg4);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5,
- SCL_EASF_H_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg5);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5,
- SCL_EASF_H_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg5);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG5, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg5,
+ SCL_EASF_H_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg5,
SCL_EASF_H_BF1_PWL_SLOPE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg5);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6,
- SCL_EASF_H_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg6);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6,
- SCL_EASF_H_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg6);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG6, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg6,
+ SCL_EASF_H_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg6,
SCL_EASF_H_BF1_PWL_SLOPE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg6);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG7,
- SCL_EASF_H_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg7);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG7,
+ REG_SET_2(DSCL_EASF_H_BF1_PWL_SEG7, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg7,
SCL_EASF_H_BF1_PWL_BASE_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg7);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0,
- SCL_EASF_H_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set0);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0,
- SCL_EASF_H_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set0);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0,
+ /* DSCL_EASF_H_BF3_PWL_SEGn */
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG0, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set0,
+ SCL_EASF_H_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set0,
SCL_EASF_H_BF3_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set0);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1,
- SCL_EASF_H_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set1);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1,
- SCL_EASF_H_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set1);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1,
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG1, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set1,
+ SCL_EASF_H_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set1,
SCL_EASF_H_BF3_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set1);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2,
- SCL_EASF_H_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set2);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2,
- SCL_EASF_H_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set2);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2,
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG2, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set2,
+ SCL_EASF_H_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set2,
SCL_EASF_H_BF3_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set2);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3,
- SCL_EASF_H_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set3);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3,
- SCL_EASF_H_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set3);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3,
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG3, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set3,
+ SCL_EASF_H_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set3,
SCL_EASF_H_BF3_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set3);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4,
- SCL_EASF_H_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set4);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4,
- SCL_EASF_H_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set4);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4,
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG4, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set4,
+ SCL_EASF_H_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set4,
SCL_EASF_H_BF3_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set4);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG5,
- SCL_EASF_H_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set5);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG5,
+ REG_SET_2(DSCL_EASF_H_BF3_PWL_SEG5, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set5,
SCL_EASF_H_BF3_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set5);
+ PERF_TRACE();
+}
+/**
+ * dpp401_dscl_program_easf - Program EASF
+ *
+ * @dpp_base: High level DPP struct
+ * @scl_data: scalaer_data info
+ *
+ * This is the primary function to program EASF
+ *
+ */
+static void dpp401_dscl_program_easf(struct dpp *dpp_base, const struct scaler_data *scl_data)
+{
+ struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
+
+ PERF_TRACE();
+ /* DSCL_SC_MODE */
+ REG_SET_2(DSCL_SC_MODE, 0,
+ SCL_SC_MATRIX_MODE, scl_data->dscl_prog_data.easf_matrix_mode,
+ SCL_SC_LTONL_EN, scl_data->dscl_prog_data.easf_ltonl_en);
/* DSCL_EASF_SC_MATRIX_C0C1, DSCL_EASF_SC_MATRIX_C2C3 */
- REG_UPDATE(DSCL_SC_MATRIX_C0C1,
- SCL_SC_MATRIX_C0, scl_data->dscl_prog_data.easf_matrix_c0);
- REG_UPDATE(DSCL_SC_MATRIX_C0C1,
+ REG_SET_2(DSCL_SC_MATRIX_C0C1, 0,
+ SCL_SC_MATRIX_C0, scl_data->dscl_prog_data.easf_matrix_c0,
SCL_SC_MATRIX_C1, scl_data->dscl_prog_data.easf_matrix_c1);
- REG_UPDATE(DSCL_SC_MATRIX_C2C3,
- SCL_SC_MATRIX_C2, scl_data->dscl_prog_data.easf_matrix_c2);
- REG_UPDATE(DSCL_SC_MATRIX_C2C3,
+ REG_SET_2(DSCL_SC_MATRIX_C2C3, 0,
+ SCL_SC_MATRIX_C2, scl_data->dscl_prog_data.easf_matrix_c2,
SCL_SC_MATRIX_C3, scl_data->dscl_prog_data.easf_matrix_c3);
+ dpp401_dscl_program_easf_v(dpp_base, scl_data);
+ dpp401_dscl_program_easf_h(dpp_base, scl_data);
PERF_TRACE();
}
/**
@@ -958,10 +937,11 @@ static void dpp401_dscl_set_isharp_filter(
REG_UPDATE(ISHARP_DELTA_CTRL,
ISHARP_DELTA_LUT_HOST_SELECT, 0);
+ /* LUT data write is auto-indexed. Write index once */
+ REG_SET(ISHARP_DELTA_INDEX, 0,
+ ISHARP_DELTA_INDEX, 0);
for (level = 0; level < NUM_LEVELS; level++) {
filter_data = filter[level];
- REG_SET(ISHARP_DELTA_INDEX, 0,
- ISHARP_DELTA_INDEX, level);
REG_SET(ISHARP_DELTA_DATA, 0,
ISHARP_DELTA_DATA, filter_data);
}
@@ -971,112 +951,83 @@ static void dpp401_dscl_set_isharp_filter(
*
* @dpp_base: High level DPP struct
* @scl_data: scalaer_data info
+ * @program_isharp_1dlut: flag to program isharp 1D LUT
+ * @bs_coeffs_updated: Blur and Scale Coefficients update flag
*
* This is the primary function to program isharp
*
*/
static void dpp401_dscl_program_isharp(struct dpp *dpp_base,
- const struct scaler_data *scl_data)
+ const struct scaler_data *scl_data,
+ bool program_isharp_1dlut,
+ bool *bs_coeffs_updated)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
+ *bs_coeffs_updated = false;
PERF_TRACE();
- /* ISHARP_EN */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_EN, scl_data->dscl_prog_data.isharp_en);
- /* ISHARP_NOISEDET_EN */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_NOISEDET_EN, scl_data->dscl_prog_data.isharp_noise_det.enable);
- /* ISHARP_NOISEDET_MODE */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode);
- /* ISHARP_NOISEDET_UTHRE */
- REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
- ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold);
- /* ISHARP_NOISEDET_DTHRE */
- REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
- ISHARP_NOISEDET_DTHRE, scl_data->dscl_prog_data.isharp_noise_det.dthreshold);
- REG_UPDATE(ISHARP_MODE,
- ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode);
- /* ISHARP_NOISEDET_UTHRE */
- REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
- ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold);
- /* ISHARP_NOISEDET_DTHRE */
- REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
+ /* ISHARP_MODE */
+ REG_SET_6(ISHARP_MODE, 0,
+ ISHARP_EN, scl_data->dscl_prog_data.isharp_en,
+ ISHARP_NOISEDET_EN, scl_data->dscl_prog_data.isharp_noise_det.enable,
+ ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode,
+ ISHARP_LBA_MODE, scl_data->dscl_prog_data.isharp_lba.mode,
+ ISHARP_FMT_MODE, scl_data->dscl_prog_data.isharp_fmt.mode,
+ ISHARP_FMT_NORM, scl_data->dscl_prog_data.isharp_fmt.norm);
+
+ /* Skip remaining register programming if ISHARP is disabled */
+ if (!scl_data->dscl_prog_data.isharp_en) {
+ PERF_TRACE();
+ return;
+ }
+
+ /* ISHARP_NOISEDET_THRESHOLD */
+ REG_SET_2(ISHARP_NOISEDET_THRESHOLD, 0,
+ ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold,
ISHARP_NOISEDET_DTHRE, scl_data->dscl_prog_data.isharp_noise_det.dthreshold);
- /* ISHARP_NOISEDET_PWL_START_IN */
- REG_UPDATE(ISHARP_NOISE_GAIN_PWL,
- ISHARP_NOISEDET_PWL_START_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_start_in);
- /* ISHARP_NOISEDET_PWL_END_IN */
- REG_UPDATE(ISHARP_NOISE_GAIN_PWL,
- ISHARP_NOISEDET_PWL_END_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_end_in);
- /* ISHARP_NOISEDET_PWL_SLOPE */
- REG_UPDATE(ISHARP_NOISE_GAIN_PWL,
+
+ /* ISHARP_NOISE_GAIN_PWL */
+ REG_SET_3(ISHARP_NOISE_GAIN_PWL, 0,
+ ISHARP_NOISEDET_PWL_START_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_start_in,
+ ISHARP_NOISEDET_PWL_END_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_end_in,
ISHARP_NOISEDET_PWL_SLOPE, scl_data->dscl_prog_data.isharp_noise_det.pwl_slope);
- /* ISHARP_LBA_MODE */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_LBA_MODE, scl_data->dscl_prog_data.isharp_lba.mode);
+
/* ISHARP_LBA: IN_SEG, BASE_SEG, SLOPE_SEG */
- REG_UPDATE(ISHARP_LBA_PWL_SEG0,
- ISHARP_LBA_PWL_IN_SEG0, scl_data->dscl_prog_data.isharp_lba.in_seg[0]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG0,
- ISHARP_LBA_PWL_BASE_SEG0, scl_data->dscl_prog_data.isharp_lba.base_seg[0]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG0,
+ REG_SET_3(ISHARP_LBA_PWL_SEG0, 0,
+ ISHARP_LBA_PWL_IN_SEG0, scl_data->dscl_prog_data.isharp_lba.in_seg[0],
+ ISHARP_LBA_PWL_BASE_SEG0, scl_data->dscl_prog_data.isharp_lba.base_seg[0],
ISHARP_LBA_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.isharp_lba.slope_seg[0]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG1,
- ISHARP_LBA_PWL_IN_SEG1, scl_data->dscl_prog_data.isharp_lba.in_seg[1]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG1,
- ISHARP_LBA_PWL_BASE_SEG1, scl_data->dscl_prog_data.isharp_lba.base_seg[1]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG1,
+ REG_SET_3(ISHARP_LBA_PWL_SEG1, 0,
+ ISHARP_LBA_PWL_IN_SEG1, scl_data->dscl_prog_data.isharp_lba.in_seg[1],
+ ISHARP_LBA_PWL_BASE_SEG1, scl_data->dscl_prog_data.isharp_lba.base_seg[1],
ISHARP_LBA_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.isharp_lba.slope_seg[1]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG2,
- ISHARP_LBA_PWL_IN_SEG2, scl_data->dscl_prog_data.isharp_lba.in_seg[2]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG2,
- ISHARP_LBA_PWL_BASE_SEG2, scl_data->dscl_prog_data.isharp_lba.base_seg[2]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG2,
+ REG_SET_3(ISHARP_LBA_PWL_SEG2, 0,
+ ISHARP_LBA_PWL_IN_SEG2, scl_data->dscl_prog_data.isharp_lba.in_seg[2],
+ ISHARP_LBA_PWL_BASE_SEG2, scl_data->dscl_prog_data.isharp_lba.base_seg[2],
ISHARP_LBA_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.isharp_lba.slope_seg[2]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG3,
- ISHARP_LBA_PWL_IN_SEG3, scl_data->dscl_prog_data.isharp_lba.in_seg[3]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG3,
- ISHARP_LBA_PWL_BASE_SEG3, scl_data->dscl_prog_data.isharp_lba.base_seg[3]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG3,
+ REG_SET_3(ISHARP_LBA_PWL_SEG3, 0,
+ ISHARP_LBA_PWL_IN_SEG3, scl_data->dscl_prog_data.isharp_lba.in_seg[3],
+ ISHARP_LBA_PWL_BASE_SEG3, scl_data->dscl_prog_data.isharp_lba.base_seg[3],
ISHARP_LBA_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.isharp_lba.slope_seg[3]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG4,
- ISHARP_LBA_PWL_IN_SEG4, scl_data->dscl_prog_data.isharp_lba.in_seg[4]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG4,
- ISHARP_LBA_PWL_BASE_SEG4, scl_data->dscl_prog_data.isharp_lba.base_seg[4]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG4,
+ REG_SET_3(ISHARP_LBA_PWL_SEG4, 0,
+ ISHARP_LBA_PWL_IN_SEG4, scl_data->dscl_prog_data.isharp_lba.in_seg[4],
+ ISHARP_LBA_PWL_BASE_SEG4, scl_data->dscl_prog_data.isharp_lba.base_seg[4],
ISHARP_LBA_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.isharp_lba.slope_seg[4]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG5,
- ISHARP_LBA_PWL_IN_SEG5, scl_data->dscl_prog_data.isharp_lba.in_seg[5]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG5,
+ REG_SET_2(ISHARP_LBA_PWL_SEG5, 0,
+ ISHARP_LBA_PWL_IN_SEG5, scl_data->dscl_prog_data.isharp_lba.in_seg[5],
ISHARP_LBA_PWL_BASE_SEG5, scl_data->dscl_prog_data.isharp_lba.base_seg[5]);
- /* ISHARP_FMT_MODE */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_FMT_MODE, scl_data->dscl_prog_data.isharp_fmt.mode);
- /* ISHARP_FMT_NORM */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_FMT_NORM, scl_data->dscl_prog_data.isharp_fmt.norm);
/* ISHARP_DELTA_LUT */
- dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta);
- /* ISHARP_NLDELTA_SCLIP_EN_P */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_EN_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_p);
- /* ISHARP_NLDELTA_SCLIP_PIVOT_P */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_PIVOT_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_p);
- /* ISHARP_NLDELTA_SCLIP_SLOPE_P */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_SLOPE_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_p);
- /* ISHARP_NLDELTA_SCLIP_EN_N */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_EN_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_n);
- /* ISHARP_NLDELTA_SCLIP_PIVOT_N */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_PIVOT_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_n);
- /* ISHARP_NLDELTA_SCLIP_SLOPE_N */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
+ if (!program_isharp_1dlut)
+ dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta);
+
+ /* ISHARP_NLDELTA_SOFT_CLIP */
+ REG_SET_6(ISHARP_NLDELTA_SOFT_CLIP, 0,
+ ISHARP_NLDELTA_SCLIP_EN_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_p,
+ ISHARP_NLDELTA_SCLIP_PIVOT_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_p,
+ ISHARP_NLDELTA_SCLIP_SLOPE_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_p,
+ ISHARP_NLDELTA_SCLIP_EN_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_n,
+ ISHARP_NLDELTA_SCLIP_PIVOT_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_n,
ISHARP_NLDELTA_SCLIP_SLOPE_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_n);
/* Blur and Scale Coefficients - SCL_COEF_RAM_TAP_SELECT */
@@ -1086,12 +1037,14 @@ static void dpp401_dscl_program_isharp(struct dpp *dpp_base,
dpp, scl_data->taps.v_taps,
SCL_COEF_VERTICAL_BLUR_SCALE,
scl_data->dscl_prog_data.filter_blur_scale_v);
+ *bs_coeffs_updated = true;
}
if (scl_data->dscl_prog_data.filter_blur_scale_h) {
dpp401_dscl_set_scaler_filter(
dpp, scl_data->taps.h_taps,
SCL_COEF_HORIZONTAL_BLUR_SCALE,
scl_data->dscl_prog_data.filter_blur_scale_h);
+ *bs_coeffs_updated = true;
}
}
PERF_TRACE();
@@ -1122,12 +1075,29 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
&& scl_data->format <= PIXEL_FORMAT_VIDEO_END;
+ bool program_isharp_1dlut = false;
+ bool bs_coeffs_updated = false;
+
if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0)
return;
PERF_TRACE();
+ /* If only sharpness has changed, then only update 1dlut, then return */
+ if (scl_data->dscl_prog_data.isharp_en &&
+ (dpp->scl_data.dscl_prog_data.sharpness_level
+ != scl_data->dscl_prog_data.sharpness_level)) {
+ /* ISHARP_DELTA_LUT */
+ dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta);
+ dpp->scl_data.dscl_prog_data.sharpness_level = scl_data->dscl_prog_data.sharpness_level;
+ dpp->scl_data.dscl_prog_data.isharp_delta = scl_data->dscl_prog_data.isharp_delta;
+
+ if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0)
+ return;
+ program_isharp_1dlut = true;
+ }
+
dpp->scl_data = *scl_data;
if ((dpp->base.ctx->dc->config.use_spl) && (!dpp->base.ctx->dc->debug.disable_spl)) {
@@ -1181,7 +1151,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) {
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_disable_easf(dpp_base, scl_data);
- dpp401_dscl_program_isharp(dpp_base, scl_data);
+ dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated);
return;
}
@@ -1208,12 +1178,18 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
SCL_V_NUM_TAPS_C, v_num_taps_c,
SCL_H_NUM_TAPS_C, h_num_taps_c);
- dpp401_dscl_set_scl_filter(dpp, scl_data, ycbcr);
+ /* ISharp configuration
+ * - B&S coeffs are written to same coeff RAM as WB scaler coeffs
+ * - coeff RAM toggle is in EASF programming
+ * - if we are only programming B&S coeffs, then need to reprogram
+ * WB scaler coeffs and toggle coeff RAM together
+ */
+ //if (dpp->base.ctx->dc->config.prefer_easf)
+ dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated);
+
+ dpp401_dscl_set_scl_filter(dpp, scl_data, ycbcr, bs_coeffs_updated);
/* Edge adaptive scaler function configuration */
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_program_easf(dpp_base, scl_data);
- /* isharp configuration */
- //if (dpp->base.ctx->dc->config.prefer_easf)
- dpp401_dscl_program_isharp(dpp_base, scl_data);
PERF_TRACE();
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 6acb6699f146..61678b0a5a1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -27,7 +27,7 @@ static void dsc401_disconnect(struct display_stream_compressor *dsc);
static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
-const struct dsc_funcs dcn401_dsc_funcs = {
+static const struct dsc_funcs dcn401_dsc_funcs = {
.dsc_get_enc_caps = dsc401_get_enc_caps,
.dsc_read_state = dsc401_read_state,
.dsc_validate_stream = dsc401_validate_stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dwb/Makefile b/drivers/gpu/drm/amd/display/dc/dwb/Makefile
index 16f7a454fed9..3952ba4cd508 100644
--- a/drivers/gpu/drm/amd/display/dc/dwb/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dwb/Makefile
@@ -25,6 +25,15 @@
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
+# DCN30
+###############################################################################
+DWB_DCN30 = dcn30_dwb.o dcn30_dwb_cm.o
+
+AMD_DAL_DWB_DCN30 = $(addprefix $(AMDDALPATH)/dc/dwb/dcn30/,$(DWB_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DWB_DCN30)
+
+###############################################################################
# DCN35
###############################################################################
DWB_DCN35 = dcn35_dwb.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h
index bd98b327a6c7..bd98b327a6c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
index fae98cf52020..fae98cf52020 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h
index 0f3f7c5fbaec..0f3f7c5fbaec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb_cm.c
index 03a50c32fcfe..03a50c32fcfe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb_cm.c
diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c
index b23a809999ed..d5e8294f5a16 100644
--- a/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c
@@ -21,7 +21,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "reg_helper.h"
#include "dcn35_dwb.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c
index 46415cab23ab..928abca18a18 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c
@@ -86,7 +86,13 @@ static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(2),
ddc_data_regs_dcn2(3),
ddc_data_regs_dcn2(4),
-// ddc_data_regs_dcn2(5),
+ {
+ // add a dummy entry for cases no such port
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ },
{
// add a dummy entry for cases no such port
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
@@ -107,7 +113,13 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(2),
ddc_clk_regs_dcn2(3),
ddc_clk_regs_dcn2(4),
-// ddc_clk_regs_dcn2(5),
+ {
+ // add a dummy entry for cases no such port
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ },
{
// add a dummy entry for cases no such port
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/Makefile b/drivers/gpu/drm/amd/display/dc/hpo/Makefile
index c248bd86b477..7f2c9ee0dff1 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hpo/Makefile
@@ -25,6 +25,21 @@
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
+# DCN30
+###############################################################################
+
+AMD_DAL_HPO_DCN30 = $(addprefix $(AMDDALPATH)/dc/hpo/dcn30/,$(HPO_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HPO_DCN30)
+###############################################################################
+# DCN31
+###############################################################################
+HPO_DCN31 = dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o
+
+AMD_DAL_HPO_DCN31 = $(addprefix $(AMDDALPATH)/dc/hpo/dcn31/,$(HPO_DCN31))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HPO_DCN31)
+###############################################################################
# DCN32
###############################################################################
HPO_DCN32 = dcn32_hpo_dp_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
index 03b4ac2f1991..03b4ac2f1991 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
index 51f5781325e8..51f5781325e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
index 678db949cfe3..678db949cfe3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.h
index 82c3b3ac1f0d..82c3b3ac1f0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index 181041d6d177..37d26fa0b6fb 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -75,108 +75,108 @@ bool hubbub401_program_urgent_watermarks(
/* Repeat for water mark set A and B */
/* clock state A */
- if (safe_to_lower || watermarks->dcn4.a.urgent > hubbub2->watermarks.dcn4.a.urgent) {
- hubbub2->watermarks.dcn4.a.urgent = watermarks->dcn4.a.urgent;
+ if (safe_to_lower || watermarks->dcn4x.a.urgent > hubbub2->watermarks.dcn4x.a.urgent) {
+ hubbub2->watermarks.dcn4x.a.urgent = watermarks->dcn4x.a.urgent;
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
- DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, watermarks->dcn4.a.urgent);
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, watermarks->dcn4x.a.urgent);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->dcn4.a.urgent, watermarks->dcn4.a.urgent);
- } else if (watermarks->dcn4.a.urgent < hubbub2->watermarks.dcn4.a.urgent)
+ watermarks->dcn4x.a.urgent, watermarks->dcn4x.a.urgent);
+ } else if (watermarks->dcn4x.a.urgent < hubbub2->watermarks.dcn4x.a.urgent)
wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
- if (safe_to_lower || watermarks->dcn4.a.frac_urg_bw_flip
- > hubbub2->watermarks.dcn4.a.frac_urg_bw_flip) {
- hubbub2->watermarks.dcn4.a.frac_urg_bw_flip = watermarks->dcn4.a.frac_urg_bw_flip;
+ if (safe_to_lower || watermarks->dcn4x.a.frac_urg_bw_flip
+ > hubbub2->watermarks.dcn4x.a.frac_urg_bw_flip) {
+ hubbub2->watermarks.dcn4x.a.frac_urg_bw_flip = watermarks->dcn4x.a.frac_urg_bw_flip;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
- DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->dcn4.a.frac_urg_bw_flip);
- } else if (watermarks->dcn4.a.frac_urg_bw_flip
- < hubbub2->watermarks.dcn4.a.frac_urg_bw_flip)
+ DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->dcn4x.a.frac_urg_bw_flip);
+ } else if (watermarks->dcn4x.a.frac_urg_bw_flip
+ < hubbub2->watermarks.dcn4x.a.frac_urg_bw_flip)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.a.frac_urg_bw_nom
- > hubbub2->watermarks.dcn4.a.frac_urg_bw_nom) {
- hubbub2->watermarks.dcn4.a.frac_urg_bw_nom = watermarks->dcn4.a.frac_urg_bw_nom;
+ if (safe_to_lower || watermarks->dcn4x.a.frac_urg_bw_nom
+ > hubbub2->watermarks.dcn4x.a.frac_urg_bw_nom) {
+ hubbub2->watermarks.dcn4x.a.frac_urg_bw_nom = watermarks->dcn4x.a.frac_urg_bw_nom;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
- DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->dcn4.a.frac_urg_bw_nom);
- } else if (watermarks->dcn4.a.frac_urg_bw_nom
- < hubbub2->watermarks.dcn4.a.frac_urg_bw_nom)
+ DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->dcn4x.a.frac_urg_bw_nom);
+ } else if (watermarks->dcn4x.a.frac_urg_bw_nom
+ < hubbub2->watermarks.dcn4x.a.frac_urg_bw_nom)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.a.frac_urg_bw_mall
- > hubbub2->watermarks.dcn4.a.frac_urg_bw_mall) {
- hubbub2->watermarks.dcn4.a.frac_urg_bw_mall = watermarks->dcn4.a.frac_urg_bw_mall;
+ if (safe_to_lower || watermarks->dcn4x.a.frac_urg_bw_mall
+ > hubbub2->watermarks.dcn4x.a.frac_urg_bw_mall) {
+ hubbub2->watermarks.dcn4x.a.frac_urg_bw_mall = watermarks->dcn4x.a.frac_urg_bw_mall;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, 0,
- DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, watermarks->dcn4.a.frac_urg_bw_mall);
- } else if (watermarks->dcn4.a.frac_urg_bw_mall < hubbub2->watermarks.dcn4.a.frac_urg_bw_mall)
+ DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, watermarks->dcn4x.a.frac_urg_bw_mall);
+ } else if (watermarks->dcn4x.a.frac_urg_bw_mall < hubbub2->watermarks.dcn4x.a.frac_urg_bw_mall)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.a.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4.a.refcyc_per_trip_to_mem) {
- hubbub2->watermarks.dcn4.a.refcyc_per_trip_to_mem = watermarks->dcn4.a.refcyc_per_trip_to_mem;
+ if (safe_to_lower || watermarks->dcn4x.a.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4x.a.refcyc_per_trip_to_mem) {
+ hubbub2->watermarks.dcn4x.a.refcyc_per_trip_to_mem = watermarks->dcn4x.a.refcyc_per_trip_to_mem;
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
- DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, watermarks->dcn4.a.refcyc_per_trip_to_mem);
- } else if (watermarks->dcn4.a.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4.a.refcyc_per_trip_to_mem)
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, watermarks->dcn4x.a.refcyc_per_trip_to_mem);
+ } else if (watermarks->dcn4x.a.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4x.a.refcyc_per_trip_to_mem)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.a.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4.a.refcyc_per_meta_trip_to_mem) {
- hubbub2->watermarks.dcn4.a.refcyc_per_meta_trip_to_mem = watermarks->dcn4.a.refcyc_per_meta_trip_to_mem;
+ if (safe_to_lower || watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4x.a.refcyc_per_meta_trip_to_mem) {
+ hubbub2->watermarks.dcn4x.a.refcyc_per_meta_trip_to_mem = watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem;
REG_SET(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, 0,
- DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, watermarks->dcn4.a.refcyc_per_meta_trip_to_mem);
- } else if (watermarks->dcn4.a.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4.a.refcyc_per_meta_trip_to_mem)
+ DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem);
+ } else if (watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4x.a.refcyc_per_meta_trip_to_mem)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.urgent > hubbub2->watermarks.dcn4.b.urgent) {
- hubbub2->watermarks.dcn4.b.urgent = watermarks->dcn4.b.urgent;
+ if (safe_to_lower || watermarks->dcn4x.b.urgent > hubbub2->watermarks.dcn4x.b.urgent) {
+ hubbub2->watermarks.dcn4x.b.urgent = watermarks->dcn4x.b.urgent;
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
- DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, watermarks->dcn4.b.urgent);
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, watermarks->dcn4x.b.urgent);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->dcn4.b.urgent, watermarks->dcn4.b.urgent);
- } else if (watermarks->dcn4.b.urgent < hubbub2->watermarks.dcn4.b.urgent)
+ watermarks->dcn4x.b.urgent, watermarks->dcn4x.b.urgent);
+ } else if (watermarks->dcn4x.b.urgent < hubbub2->watermarks.dcn4x.b.urgent)
wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
- if (safe_to_lower || watermarks->dcn4.b.frac_urg_bw_flip
- > hubbub2->watermarks.dcn4.b.frac_urg_bw_flip) {
- hubbub2->watermarks.dcn4.b.frac_urg_bw_flip = watermarks->dcn4.b.frac_urg_bw_flip;
+ if (safe_to_lower || watermarks->dcn4x.b.frac_urg_bw_flip
+ > hubbub2->watermarks.dcn4x.b.frac_urg_bw_flip) {
+ hubbub2->watermarks.dcn4x.b.frac_urg_bw_flip = watermarks->dcn4x.b.frac_urg_bw_flip;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
- DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->dcn4.b.frac_urg_bw_flip);
- } else if (watermarks->dcn4.b.frac_urg_bw_flip
- < hubbub2->watermarks.dcn4.b.frac_urg_bw_flip)
+ DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->dcn4x.b.frac_urg_bw_flip);
+ } else if (watermarks->dcn4x.b.frac_urg_bw_flip
+ < hubbub2->watermarks.dcn4x.b.frac_urg_bw_flip)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.b.frac_urg_bw_nom
- > hubbub2->watermarks.dcn4.b.frac_urg_bw_nom) {
- hubbub2->watermarks.dcn4.b.frac_urg_bw_nom = watermarks->dcn4.b.frac_urg_bw_nom;
+ if (safe_to_lower || watermarks->dcn4x.b.frac_urg_bw_nom
+ > hubbub2->watermarks.dcn4x.b.frac_urg_bw_nom) {
+ hubbub2->watermarks.dcn4x.b.frac_urg_bw_nom = watermarks->dcn4x.b.frac_urg_bw_nom;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
- DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->dcn4.b.frac_urg_bw_nom);
- } else if (watermarks->dcn4.b.frac_urg_bw_nom
- < hubbub2->watermarks.dcn4.b.frac_urg_bw_nom)
+ DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->dcn4x.b.frac_urg_bw_nom);
+ } else if (watermarks->dcn4x.b.frac_urg_bw_nom
+ < hubbub2->watermarks.dcn4x.b.frac_urg_bw_nom)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.b.frac_urg_bw_mall
- > hubbub2->watermarks.dcn4.b.frac_urg_bw_mall) {
- hubbub2->watermarks.dcn4.b.frac_urg_bw_mall = watermarks->dcn4.b.frac_urg_bw_mall;
+ if (safe_to_lower || watermarks->dcn4x.b.frac_urg_bw_mall
+ > hubbub2->watermarks.dcn4x.b.frac_urg_bw_mall) {
+ hubbub2->watermarks.dcn4x.b.frac_urg_bw_mall = watermarks->dcn4x.b.frac_urg_bw_mall;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, 0,
- DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, watermarks->dcn4.b.frac_urg_bw_mall);
- } else if (watermarks->dcn4.b.frac_urg_bw_mall < hubbub2->watermarks.dcn4.b.frac_urg_bw_mall)
+ DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, watermarks->dcn4x.b.frac_urg_bw_mall);
+ } else if (watermarks->dcn4x.b.frac_urg_bw_mall < hubbub2->watermarks.dcn4x.b.frac_urg_bw_mall)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.b.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4.b.refcyc_per_trip_to_mem) {
- hubbub2->watermarks.dcn4.b.refcyc_per_trip_to_mem = watermarks->dcn4.b.refcyc_per_trip_to_mem;
+ if (safe_to_lower || watermarks->dcn4x.b.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4x.b.refcyc_per_trip_to_mem) {
+ hubbub2->watermarks.dcn4x.b.refcyc_per_trip_to_mem = watermarks->dcn4x.b.refcyc_per_trip_to_mem;
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
- DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, watermarks->dcn4.b.refcyc_per_trip_to_mem);
- } else if (watermarks->dcn4.b.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4.b.refcyc_per_trip_to_mem)
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, watermarks->dcn4x.b.refcyc_per_trip_to_mem);
+ } else if (watermarks->dcn4x.b.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4x.b.refcyc_per_trip_to_mem)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.b.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4.b.refcyc_per_meta_trip_to_mem) {
- hubbub2->watermarks.dcn4.b.refcyc_per_meta_trip_to_mem = watermarks->dcn4.b.refcyc_per_meta_trip_to_mem;
+ if (safe_to_lower || watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4x.b.refcyc_per_meta_trip_to_mem) {
+ hubbub2->watermarks.dcn4x.b.refcyc_per_meta_trip_to_mem = watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem;
REG_SET(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, 0,
- DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, watermarks->dcn4.b.refcyc_per_meta_trip_to_mem);
- } else if (watermarks->dcn4.b.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4.b.refcyc_per_meta_trip_to_mem)
+ DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem);
+ } else if (watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4x.b.refcyc_per_meta_trip_to_mem)
wm_pending = true;
return wm_pending;
@@ -192,89 +192,89 @@ bool hubbub401_program_stutter_watermarks(
bool wm_pending = false;
/* clock state A */
- if (safe_to_lower || watermarks->dcn4.a.sr_enter
- > hubbub2->watermarks.dcn4.a.sr_enter) {
- hubbub2->watermarks.dcn4.a.sr_enter =
- watermarks->dcn4.a.sr_enter;
+ if (safe_to_lower || watermarks->dcn4x.a.sr_enter
+ > hubbub2->watermarks.dcn4x.a.sr_enter) {
+ hubbub2->watermarks.dcn4x.a.sr_enter =
+ watermarks->dcn4x.a.sr_enter;
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, watermarks->dcn4.a.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, watermarks->dcn4x.a.sr_enter);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->dcn4.a.sr_enter, watermarks->dcn4.a.sr_enter);
+ watermarks->dcn4x.a.sr_enter, watermarks->dcn4x.a.sr_enter);
// On dGPU Z states are N/A, so program all other 3 Stutter Enter wm A with the same value
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, watermarks->dcn4.a.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, watermarks->dcn4x.a.sr_enter);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, watermarks->dcn4.a.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, watermarks->dcn4x.a.sr_enter);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, watermarks->dcn4.a.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, watermarks->dcn4x.a.sr_enter);
- } else if (watermarks->dcn4.a.sr_enter
- < hubbub2->watermarks.dcn4.a.sr_enter)
+ } else if (watermarks->dcn4x.a.sr_enter
+ < hubbub2->watermarks.dcn4x.a.sr_enter)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.a.sr_exit
- > hubbub2->watermarks.dcn4.a.sr_exit) {
- hubbub2->watermarks.dcn4.a.sr_exit =
- watermarks->dcn4.a.sr_exit;
+ if (safe_to_lower || watermarks->dcn4x.a.sr_exit
+ > hubbub2->watermarks.dcn4x.a.sr_exit) {
+ hubbub2->watermarks.dcn4x.a.sr_exit =
+ watermarks->dcn4x.a.sr_exit;
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, watermarks->dcn4.a.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, watermarks->dcn4x.a.sr_exit);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->dcn4.a.sr_exit, watermarks->dcn4.a.sr_exit);
+ watermarks->dcn4x.a.sr_exit, watermarks->dcn4x.a.sr_exit);
// On dGPU Z states are N/A, so program all other 3 Stutter Exit wm A with the same value
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, watermarks->dcn4.a.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, watermarks->dcn4x.a.sr_exit);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, watermarks->dcn4.a.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, watermarks->dcn4x.a.sr_exit);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, watermarks->dcn4.a.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, watermarks->dcn4x.a.sr_exit);
- } else if (watermarks->dcn4.a.sr_exit
- < hubbub2->watermarks.dcn4.a.sr_exit)
+ } else if (watermarks->dcn4x.a.sr_exit
+ < hubbub2->watermarks.dcn4x.a.sr_exit)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.sr_enter
- > hubbub2->watermarks.dcn4.b.sr_enter) {
- hubbub2->watermarks.dcn4.b.sr_enter =
- watermarks->dcn4.b.sr_enter;
+ if (safe_to_lower || watermarks->dcn4x.b.sr_enter
+ > hubbub2->watermarks.dcn4x.b.sr_enter) {
+ hubbub2->watermarks.dcn4x.b.sr_enter =
+ watermarks->dcn4x.b.sr_enter;
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, watermarks->dcn4.b.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, watermarks->dcn4x.b.sr_enter);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->dcn4.b.sr_enter, watermarks->dcn4.b.sr_enter);
+ watermarks->dcn4x.b.sr_enter, watermarks->dcn4x.b.sr_enter);
// On dGPU Z states are N/A, so program all other 3 Stutter Enter wm A with the same value
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, watermarks->dcn4.b.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, watermarks->dcn4x.b.sr_enter);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, watermarks->dcn4.b.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, watermarks->dcn4x.b.sr_enter);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, watermarks->dcn4.b.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, watermarks->dcn4x.b.sr_enter);
- } else if (watermarks->dcn4.b.sr_enter
- < hubbub2->watermarks.dcn4.b.sr_enter)
+ } else if (watermarks->dcn4x.b.sr_enter
+ < hubbub2->watermarks.dcn4x.b.sr_enter)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.b.sr_exit
- > hubbub2->watermarks.dcn4.b.sr_exit) {
- hubbub2->watermarks.dcn4.b.sr_exit =
- watermarks->dcn4.b.sr_exit;
+ if (safe_to_lower || watermarks->dcn4x.b.sr_exit
+ > hubbub2->watermarks.dcn4x.b.sr_exit) {
+ hubbub2->watermarks.dcn4x.b.sr_exit =
+ watermarks->dcn4x.b.sr_exit;
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, watermarks->dcn4.b.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, watermarks->dcn4x.b.sr_exit);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->dcn4.b.sr_exit, watermarks->dcn4.b.sr_exit);
+ watermarks->dcn4x.b.sr_exit, watermarks->dcn4x.b.sr_exit);
// On dGPU Z states are N/A, so program all other 3 Stutter Exit wm A with the same value
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, watermarks->dcn4.b.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, watermarks->dcn4x.b.sr_exit);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, watermarks->dcn4.b.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, watermarks->dcn4x.b.sr_exit);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, watermarks->dcn4.b.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, watermarks->dcn4x.b.sr_exit);
- } else if (watermarks->dcn4.b.sr_exit
- < hubbub2->watermarks.dcn4.b.sr_exit)
+ } else if (watermarks->dcn4x.b.sr_exit
+ < hubbub2->watermarks.dcn4x.b.sr_exit)
wm_pending = true;
return wm_pending;
@@ -292,116 +292,116 @@ bool hubbub401_program_pstate_watermarks(
/* Section for UCLK_PSTATE_CHANGE_WATERMARKS */
/* clock state A */
- if (safe_to_lower || watermarks->dcn4.a.uclk_pstate
- > hubbub2->watermarks.dcn4.a.uclk_pstate) {
- hubbub2->watermarks.dcn4.a.uclk_pstate =
- watermarks->dcn4.a.uclk_pstate;
+ if (safe_to_lower || watermarks->dcn4x.a.uclk_pstate
+ > hubbub2->watermarks.dcn4x.a.uclk_pstate) {
+ hubbub2->watermarks.dcn4x.a.uclk_pstate =
+ watermarks->dcn4x.a.uclk_pstate;
REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 0,
- DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4.a.uclk_pstate);
+ DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4x.a.uclk_pstate);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.a.uclk_pstate, watermarks->dcn4.a.uclk_pstate);
- } else if (watermarks->dcn4.a.uclk_pstate
- < hubbub2->watermarks.dcn4.a.uclk_pstate)
+ watermarks->dcn4x.a.uclk_pstate, watermarks->dcn4x.a.uclk_pstate);
+ } else if (watermarks->dcn4x.a.uclk_pstate
+ < hubbub2->watermarks.dcn4x.a.uclk_pstate)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.uclk_pstate
- > hubbub2->watermarks.dcn4.b.uclk_pstate) {
- hubbub2->watermarks.dcn4.b.uclk_pstate =
- watermarks->dcn4.b.uclk_pstate;
+ if (safe_to_lower || watermarks->dcn4x.b.uclk_pstate
+ > hubbub2->watermarks.dcn4x.b.uclk_pstate) {
+ hubbub2->watermarks.dcn4x.b.uclk_pstate =
+ watermarks->dcn4x.b.uclk_pstate;
REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 0,
- DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4.b.uclk_pstate);
+ DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4x.b.uclk_pstate);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.b.uclk_pstate, watermarks->dcn4.b.uclk_pstate);
- } else if (watermarks->dcn4.b.uclk_pstate
- < hubbub2->watermarks.dcn4.b.uclk_pstate)
+ watermarks->dcn4x.b.uclk_pstate, watermarks->dcn4x.b.uclk_pstate);
+ } else if (watermarks->dcn4x.b.uclk_pstate
+ < hubbub2->watermarks.dcn4x.b.uclk_pstate)
wm_pending = true;
/* Section for UCLK_PSTATE_CHANGE_WATERMARKS1 (DUMMY_PSTATE/TEMP_READ/PPT) */
- if (safe_to_lower || watermarks->dcn4.a.temp_read_or_ppt
- > hubbub2->watermarks.dcn4.a.temp_read_or_ppt) {
- hubbub2->watermarks.dcn4.a.temp_read_or_ppt =
- watermarks->dcn4.a.temp_read_or_ppt;
+ if (safe_to_lower || watermarks->dcn4x.a.temp_read_or_ppt
+ > hubbub2->watermarks.dcn4x.a.temp_read_or_ppt) {
+ hubbub2->watermarks.dcn4x.a.temp_read_or_ppt =
+ watermarks->dcn4x.a.temp_read_or_ppt;
REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, 0,
- DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4.a.temp_read_or_ppt);
+ DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4x.a.temp_read_or_ppt);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK1_A calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.a.temp_read_or_ppt, watermarks->dcn4.a.temp_read_or_ppt);
- } else if (watermarks->dcn4.a.temp_read_or_ppt
- < hubbub2->watermarks.dcn4.a.temp_read_or_ppt)
+ watermarks->dcn4x.a.temp_read_or_ppt, watermarks->dcn4x.a.temp_read_or_ppt);
+ } else if (watermarks->dcn4x.a.temp_read_or_ppt
+ < hubbub2->watermarks.dcn4x.a.temp_read_or_ppt)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.temp_read_or_ppt
- > hubbub2->watermarks.dcn4.b.temp_read_or_ppt) {
- hubbub2->watermarks.dcn4.b.temp_read_or_ppt =
- watermarks->dcn4.b.temp_read_or_ppt;
+ if (safe_to_lower || watermarks->dcn4x.b.temp_read_or_ppt
+ > hubbub2->watermarks.dcn4x.b.temp_read_or_ppt) {
+ hubbub2->watermarks.dcn4x.b.temp_read_or_ppt =
+ watermarks->dcn4x.b.temp_read_or_ppt;
REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, 0,
- DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4.b.temp_read_or_ppt);
+ DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4x.b.temp_read_or_ppt);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK1_B calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.b.temp_read_or_ppt, watermarks->dcn4.b.temp_read_or_ppt);
- } else if (watermarks->dcn4.b.temp_read_or_ppt
- < hubbub2->watermarks.dcn4.b.temp_read_or_ppt)
+ watermarks->dcn4x.b.temp_read_or_ppt, watermarks->dcn4x.b.temp_read_or_ppt);
+ } else if (watermarks->dcn4x.b.temp_read_or_ppt
+ < hubbub2->watermarks.dcn4x.b.temp_read_or_ppt)
wm_pending = true;
/* Section for FCLK_PSTATE_CHANGE_WATERMARKS */
/* clock state A */
- if (safe_to_lower || watermarks->dcn4.a.fclk_pstate
- > hubbub2->watermarks.dcn4.a.fclk_pstate) {
- hubbub2->watermarks.dcn4.a.fclk_pstate =
- watermarks->dcn4.a.fclk_pstate;
+ if (safe_to_lower || watermarks->dcn4x.a.fclk_pstate
+ > hubbub2->watermarks.dcn4x.a.fclk_pstate) {
+ hubbub2->watermarks.dcn4x.a.fclk_pstate =
+ watermarks->dcn4x.a.fclk_pstate;
REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 0,
- DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4.a.fclk_pstate);
+ DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4x.a.fclk_pstate);
DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.a.fclk_pstate, watermarks->dcn4.a.fclk_pstate);
- } else if (watermarks->dcn4.a.fclk_pstate
- < hubbub2->watermarks.dcn4.a.fclk_pstate)
+ watermarks->dcn4x.a.fclk_pstate, watermarks->dcn4x.a.fclk_pstate);
+ } else if (watermarks->dcn4x.a.fclk_pstate
+ < hubbub2->watermarks.dcn4x.a.fclk_pstate)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.fclk_pstate
- > hubbub2->watermarks.dcn4.b.fclk_pstate) {
- hubbub2->watermarks.dcn4.b.fclk_pstate =
- watermarks->dcn4.b.fclk_pstate;
+ if (safe_to_lower || watermarks->dcn4x.b.fclk_pstate
+ > hubbub2->watermarks.dcn4x.b.fclk_pstate) {
+ hubbub2->watermarks.dcn4x.b.fclk_pstate =
+ watermarks->dcn4x.b.fclk_pstate;
REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 0,
- DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4.b.fclk_pstate);
+ DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4x.b.fclk_pstate);
DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.b.fclk_pstate, watermarks->dcn4.b.fclk_pstate);
- } else if (watermarks->dcn4.b.fclk_pstate
- < hubbub2->watermarks.dcn4.b.fclk_pstate)
+ watermarks->dcn4x.b.fclk_pstate, watermarks->dcn4x.b.fclk_pstate);
+ } else if (watermarks->dcn4x.b.fclk_pstate
+ < hubbub2->watermarks.dcn4x.b.fclk_pstate)
wm_pending = true;
/* Section for FCLK_CHANGE_WATERMARKS1 (DUMMY_PSTATE/TEMP_READ/PPT) */
- if (safe_to_lower || watermarks->dcn4.a.temp_read_or_ppt
- > hubbub2->watermarks.dcn4.a.temp_read_or_ppt) {
- hubbub2->watermarks.dcn4.a.temp_read_or_ppt =
- watermarks->dcn4.a.temp_read_or_ppt;
+ if (safe_to_lower || watermarks->dcn4x.a.temp_read_or_ppt
+ > hubbub2->watermarks.dcn4x.a.temp_read_or_ppt) {
+ hubbub2->watermarks.dcn4x.a.temp_read_or_ppt =
+ watermarks->dcn4x.a.temp_read_or_ppt;
REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, 0,
- DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4.a.temp_read_or_ppt);
+ DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4x.a.temp_read_or_ppt);
DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK1_A calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.a.temp_read_or_ppt, watermarks->dcn4.a.temp_read_or_ppt);
- } else if (watermarks->dcn4.a.temp_read_or_ppt
- < hubbub2->watermarks.dcn4.a.temp_read_or_ppt)
+ watermarks->dcn4x.a.temp_read_or_ppt, watermarks->dcn4x.a.temp_read_or_ppt);
+ } else if (watermarks->dcn4x.a.temp_read_or_ppt
+ < hubbub2->watermarks.dcn4x.a.temp_read_or_ppt)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.temp_read_or_ppt
- > hubbub2->watermarks.dcn4.b.temp_read_or_ppt) {
- hubbub2->watermarks.dcn4.b.temp_read_or_ppt =
- watermarks->dcn4.b.temp_read_or_ppt;
+ if (safe_to_lower || watermarks->dcn4x.b.temp_read_or_ppt
+ > hubbub2->watermarks.dcn4x.b.temp_read_or_ppt) {
+ hubbub2->watermarks.dcn4x.b.temp_read_or_ppt =
+ watermarks->dcn4x.b.temp_read_or_ppt;
REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, 0,
- DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4.b.temp_read_or_ppt);
+ DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4x.b.temp_read_or_ppt);
DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK1_B calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.b.temp_read_or_ppt, watermarks->dcn4.b.temp_read_or_ppt);
- } else if (watermarks->dcn4.b.temp_read_or_ppt
- < hubbub2->watermarks.dcn4.b.temp_read_or_ppt)
+ watermarks->dcn4x.b.temp_read_or_ppt, watermarks->dcn4x.b.temp_read_or_ppt);
+ } else if (watermarks->dcn4x.b.temp_read_or_ppt
+ < hubbub2->watermarks.dcn4x.b.temp_read_or_ppt)
wm_pending = true;
return wm_pending;
@@ -418,29 +418,29 @@ bool hubbub401_program_usr_watermarks(
bool wm_pending = false;
/* clock state A */
- if (safe_to_lower || watermarks->dcn4.a.usr
- > hubbub2->watermarks.dcn4.a.usr) {
- hubbub2->watermarks.dcn4.a.usr = watermarks->dcn4.a.usr;
+ if (safe_to_lower || watermarks->dcn4x.a.usr
+ > hubbub2->watermarks.dcn4x.a.usr) {
+ hubbub2->watermarks.dcn4x.a.usr = watermarks->dcn4x.a.usr;
REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 0,
- DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, watermarks->dcn4.a.usr);
+ DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, watermarks->dcn4x.a.usr);
DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.a.usr, watermarks->dcn4.a.usr);
- } else if (watermarks->dcn4.a.usr
- < hubbub2->watermarks.dcn4.a.usr)
+ watermarks->dcn4x.a.usr, watermarks->dcn4x.a.usr);
+ } else if (watermarks->dcn4x.a.usr
+ < hubbub2->watermarks.dcn4x.a.usr)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.usr
- > hubbub2->watermarks.dcn4.b.usr) {
- hubbub2->watermarks.dcn4.b.usr = watermarks->dcn4.b.usr;
+ if (safe_to_lower || watermarks->dcn4x.b.usr
+ > hubbub2->watermarks.dcn4x.b.usr) {
+ hubbub2->watermarks.dcn4x.b.usr = watermarks->dcn4x.b.usr;
REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 0,
- DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, watermarks->dcn4.b.usr);
+ DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, watermarks->dcn4x.b.usr);
DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.b.usr, watermarks->dcn4.b.usr);
- } else if (watermarks->dcn4.b.usr
- < hubbub2->watermarks.dcn4.b.usr)
+ watermarks->dcn4x.b.usr, watermarks->dcn4x.b.usr);
+ } else if (watermarks->dcn4x.b.usr
+ < hubbub2->watermarks.dcn4x.b.usr)
wm_pending = true;
return wm_pending;
@@ -1170,6 +1170,28 @@ static void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned comp
}
}
+static void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ switch (hubp_inst) {
+ case 0:
+ REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100000); /* 1 vupdate at 10hz */
+ break;
+ case 1:
+ REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100000);
+ break;
+ case 2:
+ REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100000);
+ break;
+ case 3:
+ REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100000);
+ break;
+ default:
+ break;
+ }
+}
+
static const struct hubbub_funcs hubbub4_01_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
@@ -1192,6 +1214,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = {
.set_request_limit = hubbub32_set_request_limit,
.program_det_segments = dcn401_program_det_segments,
.program_compbuf_segments = dcn401_program_compbuf_segments,
+ .wait_for_det_update = dcn401_wait_for_det_update,
};
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
index bf399819ca80..22ac2b7e49ae 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
@@ -749,7 +749,8 @@ bool hubp1_is_flip_pending(struct hubp *hubp)
if (flip_pending)
return true;
- if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
+ if (hubp &&
+ earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
return true;
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
index 6bba020ad6fb..0637e4c552d8 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
@@ -927,7 +927,8 @@ bool hubp2_is_flip_pending(struct hubp *hubp)
if (flip_pending)
return true;
- if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
+ if (hubp &&
+ earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
return true;
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index 771fcd0d3b99..d1f05b82b3dd 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -188,7 +188,7 @@ void hubp35_program_surface_config(
hubp35_program_pixel_format(hubp, format);
}
-struct hubp_funcs dcn35_hubp_funcs = {
+static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index eb0da6c6b87c..b1ebf5053b4f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -725,8 +725,8 @@ void hubp401_cursor_set_position(
CURSOR_ENABLE, cur_en);
REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, pos->x,
- CURSOR_Y_POSITION, pos->y);
+ CURSOR_X_POSITION, x_pos,
+ CURSOR_Y_POSITION, y_pos);
REG_SET_2(CURSOR_HOT_SPOT, 0,
CURSOR_HOT_SPOT_X, pos->x_hotspot,
@@ -990,7 +990,6 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_soft_reset = hubp31_soft_reset,
.hubp_set_flip_int = hubp401_set_flip_int,
.hubp_in_blank = hubp401_in_blank,
- .hubp_update_force_pstate_disallow = hubp32_update_force_pstate_disallow,
.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
.hubp_update_mall_sel = hubp401_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 1f2eb2f727dc..d52ce58c6a98 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -949,7 +949,7 @@ void dce110_edp_backlight_control(
{
struct dc_context *ctx = link->ctx;
struct bp_transmitter_control cntl = { 0 };
- uint8_t pwrseq_instance;
+ uint8_t pwrseq_instance = 0;
unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
unsigned int post_T7_delay = OLED_POST_T7_DELAY;
@@ -1002,7 +1002,8 @@ void dce110_edp_backlight_control(
*/
/* dc_service_sleep_in_milliseconds(50); */
/*edp 1.2*/
- pwrseq_instance = link->panel_cntl->pwrseq_inst;
+ if (link->panel_cntl)
+ pwrseq_instance = link->panel_cntl->pwrseq_inst;
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) {
if (!link->dc->config.edp_no_power_sequencing)
@@ -1231,20 +1232,21 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
* has changed or they enter protection state and hang.
*/
msleep(60);
- } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
- if (!link->dc->config.edp_no_power_sequencing) {
- /*
- * Sometimes, DP receiver chip power-controlled externally by an
- * Embedded Controller could be treated and used as eDP,
- * if it drives mobile display. In this case,
- * we shouldn't be doing power-sequencing, hence we can skip
- * waiting for T9-ready.
- */
- link->dc->link_srv->edp_receiver_ready_T9(link);
- }
}
}
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ !link->dc->config.edp_no_power_sequencing) {
+ /*
+ * Sometimes, DP receiver chip power-controlled externally by an
+ * Embedded Controller could be treated and used as eDP,
+ * if it drives mobile display. In this case,
+ * we shouldn't be doing power-sequencing, hence we can skip
+ * waiting for T9-ready.
+ */
+ link->dc->link_srv->edp_receiver_ready_T9(link);
+ }
+
}
@@ -1549,6 +1551,7 @@ static enum dc_status dce110_enable_stream_timing(
0,
0,
0,
+ 0,
pipe_ctx->stream->signal,
true);
}
@@ -1597,6 +1600,11 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
&audio_output.crtc_info,
&pipe_ctx->stream->audio_info,
&audio_output.dp_link_info);
+
+ if (dc->config.disable_hbr_audio_dp2)
+ if (pipe_ctx->stream_res.audio->funcs->az_disable_hbr_audio &&
+ dc->link_srv->dp_is_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.audio->funcs->az_disable_hbr_audio(pipe_ctx->stream_res.audio);
}
/* make sure no pipes syncd to the pipe being enabled */
@@ -1838,6 +1846,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_apply_edp_fast_boot = false;
bool can_apply_seamless_boot = false;
bool keep_edp_vdd_on = false;
+ struct dc_bios *dcb = dc->ctx->dc_bios;
DC_LOGGER_INIT();
@@ -1914,13 +1923,15 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
hws->funcs.edp_backlight_control(edp_link_with_sink, false);
}
/*resume from S3, no vbios posting, no need to power down again*/
- clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+ if (dcb && dcb->funcs && !dcb->funcs->is_accelerated_mode(dcb))
+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
power_down_all_hw_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc);
if (edp_link_with_sink && !keep_edp_vdd_on)
dc->hwss.edp_power_control(edp_link_with_sink, false);
- clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
+ if (dcb && dcb->funcs && !dcb->funcs->is_accelerated_mode(dcb))
+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
}
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1);
}
@@ -2340,19 +2351,6 @@ static void dce110_setup_audio_dto(
}
}
-static bool dce110_is_hpo_enabled(struct dc_state *context)
-{
- int i;
-
- for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) {
- if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i]) {
- return true;
- }
- }
-
- return false;
-}
-
enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context)
@@ -2361,8 +2359,8 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc_bios *dcb = dc->ctx->dc_bios;
enum dc_status status;
int i;
- bool was_hpo_enabled = dce110_is_hpo_enabled(dc->current_state);
- bool is_hpo_enabled = dce110_is_hpo_enabled(context);
+ bool was_hpo_acquired = resource_is_hpo_acquired(dc->current_state);
+ bool is_hpo_acquired = resource_is_hpo_acquired(context);
/* reset syncd pipes from disabled pipes */
if (dc->config.use_pipe_ctx_sync_logic)
@@ -2405,8 +2403,8 @@ enum dc_status dce110_apply_ctx_to_hw(
dce110_setup_audio_dto(dc, context);
- if (dc->hwseq->funcs.setup_hpo_hw_control && was_hpo_enabled != is_hpo_enabled) {
- dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, is_hpo_enabled);
+ if (dc->hwseq->funcs.setup_hpo_hw_control && was_hpo_acquired != is_hpo_acquired) {
+ dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, is_hpo_acquired);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2438,7 +2436,7 @@ enum dc_status dce110_apply_ctx_to_hw(
#ifdef CONFIG_DRM_AMD_DC_FP
if (hws->funcs.resync_fifo_dccg_dio)
- hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+ hws->funcs.resync_fifo_dccg_dio(hws, dc, context, i);
#endif
}
@@ -3312,7 +3310,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
static const struct hwseq_private_funcs dce110_private_funcs = {
.init_pipes = init_pipes,
- .update_plane_addr = update_plane_addr,
.set_input_transfer_func = dce110_set_input_transfer_func,
.set_output_transfer_func = dce110_set_output_transfer_func,
.power_down = dce110_power_down,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 14a902ff3b8a..a6a1db5ba8ba 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -1005,6 +1005,7 @@ enum dc_status dcn10_enable_stream_timing(
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout,
pipe_ctx->stream->signal,
true);
@@ -1554,7 +1555,7 @@ void dcn10_init_hw(struct dc *dc)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
/* Align bw context with hw config when system resume. */
- if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
+ if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
}
@@ -1674,7 +1675,7 @@ void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (dc->clk_mgr->funcs->notify_wm_ranges)
+ if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
}
@@ -1697,10 +1698,10 @@ void dcn10_power_down_on_boot(struct dc *dc)
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwseq->funcs.edp_backlight_control &&
- dc->hwss.power_down &&
+ dc->hwseq->funcs.power_down &&
dc->hwss.edp_power_control) {
dc->hwseq->funcs.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
+ dc->hwseq->funcs.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
} else {
for (i = 0; i < dc->link_count; i++) {
@@ -1708,8 +1709,8 @@ void dcn10_power_down_on_boot(struct dc *dc)
if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
+ dc->hwseq->funcs.power_down) {
+ dc->hwseq->funcs.power_down(dc);
break;
}
@@ -2585,8 +2586,11 @@ static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_co
while (top->top_pipe)
top = top->top_pipe; // Traverse to top pipe_ctx
- if (top->plane_state && top->plane_state->layer_index == 0)
- return true; // Front MPO plane not hidden
+ if (top->plane_state && top->plane_state->layer_index == 0 && !top->plane_state->global_alpha)
+ // Global alpha used by top plane for PIP overlay
+ // Pre-multiplied/per-pixel alpha used by MPO
+ // Check top plane's global alpha to ensure layer_index > 0 not caused by PIP
+ return true; // MPO in use and front plane not hidden
}
}
return false;
@@ -2914,7 +2918,7 @@ static void dcn10_update_dchubp_dpp(
hubp->power_gated = false;
- hws->funcs.update_plane_addr(dc, pipe_ctx);
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
if (is_pipe_tree_visible(pipe_ctx))
hubp->funcs->set_blank(hubp, false);
@@ -2997,7 +3001,8 @@ void dcn10_program_pipe(
calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width);
+ pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
@@ -3207,15 +3212,19 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
* as well.
*/
for (i = 0; i < num_pipes; i++) {
- if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
- if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
- pipe_ctx[i]->stream_res.tg->funcs->set_drr(
- pipe_ctx[i]->stream_res.tg, &params);
+ /* dc_state_destruct() might null the stream resources, so fetch tg
+ * here first to avoid a race condition. The lifetime of the pointee
+ * itself (the timing_generator object) is not a problem here.
+ */
+ struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
+
+ if ((tg != NULL) && tg->funcs) {
+ if (tg->funcs->set_drr)
+ tg->funcs->set_drr(tg, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
- if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
- pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx[i]->stream_res.tg,
- event_triggers, num_frames);
+ if (tg->funcs->set_static_screen_control)
+ tg->funcs->set_static_screen_control(
+ tg, event_triggers, num_frames);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
index a5bdac79a744..5e51e1761707 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
@@ -78,7 +78,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.get_clock = dcn10_get_clock,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .power_down = dce110_power_down,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
@@ -92,7 +91,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
static const struct hwseq_private_funcs dcn10_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn10_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.program_pipe = dcn10_program_pipe,
.update_mpcc = dcn10_update_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 2532ad410cb5..a80c08582932 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -909,6 +909,7 @@ enum dc_status dcn20_enable_stream_timing(
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout,
pipe_ctx->stream->signal,
true);
@@ -1044,7 +1045,8 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
/*
* if above if is not executed then 'params' equal to 0 and set in bypass
*/
- mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+ if (mpc->funcs->set_output_gamma)
+ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
return true;
}
@@ -1698,7 +1700,7 @@ static void dcn20_update_dchubp_dpp(
plane_state->update_flags.bits.input_csc_change ||
plane_state->update_flags.bits.color_space_change ||
plane_state->update_flags.bits.coeff_reduction_change) {
- struct dc_bias_and_scale bns_params = {0};
+ struct dc_bias_and_scale bns_params = plane_state->bias_and_scale;
// program the input csc
dpp->funcs->dpp_setup(dpp,
@@ -1715,7 +1717,6 @@ static void dcn20_update_dchubp_dpp(
}
if (dpp->funcs->dpp_program_bias_and_scale) {
//TODO :for CNVC set scale and bias registers if necessary
- build_prescale_params(&bns_params, plane_state);
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
}
@@ -1825,7 +1826,7 @@ static void dcn20_update_dchubp_dpp(
params.subvp_save_surf_addr.subvp_index = pipe_ctx->subvp_index;
hwss_subvp_save_surf_addr(&params);
}
- hws->funcs.update_plane_addr(dc, pipe_ctx);
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
}
if (pipe_ctx->update_flags.bits.enable)
@@ -1886,7 +1887,8 @@ static void dcn20_program_pipe(
calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width);
+ pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout);
if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
@@ -1921,22 +1923,28 @@ static void dcn20_program_pipe(
dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
}
- if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
+ if (pipe_ctx->update_flags.raw ||
+ (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
+ pipe_ctx->stream->update_flags.raw)
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
- if (pipe_ctx->update_flags.bits.enable
- || pipe_ctx->plane_state->update_flags.bits.hdr_mult)
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
if (hws->funcs.populate_mcm_luts) {
- hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
- pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
+ if (pipe_ctx->plane_state) {
+ hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
+ pipe_ctx->plane_state->lut_bank_a);
+ pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
+ }
}
- if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+
+ if (pipe_ctx->plane_state &&
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change ||
- pipe_ctx->plane_state->update_flags.bits.lut_3d)
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable))
hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
@@ -1946,7 +1954,8 @@ static void dcn20_program_pipe(
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.plane_changed ||
pipe_ctx->stream->update_flags.bits.out_tf ||
- pipe_ctx->plane_state->update_flags.bits.output_tf_change)
+ (pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->update_flags.bits.output_tf_change))
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
@@ -1970,7 +1979,7 @@ static void dcn20_program_pipe(
}
/* Set ABM pipe after other pipe configurations done */
- if (pipe_ctx->plane_state->visible) {
+ if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
if (pipe_ctx->stream_res.abm) {
dc->hwss.set_pipe(pipe_ctx);
pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
@@ -2186,9 +2195,9 @@ static void post_unlock_reset_opp(struct dc *dc,
* yet power gated.
*/
dsc->funcs->dsc_wait_disconnect_pending_clear(dsc);
+ dsc->funcs->dsc_disable(dsc);
if (dccg->funcs->set_ref_dscclk)
dccg->funcs->set_ref_dscclk(dccg, dsc->inst);
- dsc->funcs->dsc_disable(dsc);
}
}
}
@@ -2283,6 +2292,9 @@ void dcn20_post_unlock_program_front_end(
}
}
+ if (!hwseq)
+ return;
+
/* P-State support transitions:
* Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
* FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
@@ -2290,7 +2302,7 @@ void dcn20_post_unlock_program_front_end(
* FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
* FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
*/
- if (hwseq && hwseq->funcs.update_force_pstate)
+ if (hwseq->funcs.update_force_pstate)
dc->hwseq->funcs.update_force_pstate(dc, context);
/* Only program the MALL registers after all the main and phantom pipes
@@ -2459,7 +2471,8 @@ bool dcn20_update_bandwidth(
calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width);
+ pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
@@ -2529,6 +2542,9 @@ bool dcn20_wait_for_blank_complete(
{
int counter;
+ if (!opp)
+ return false;
+
for (counter = 0; counter < 1000; counter++) {
if (!opp->funcs->dpg_is_pending(opp))
break;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
index ef6488165b8f..32707b344f0b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
@@ -105,7 +105,6 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
static const struct hwseq_private_funcs dcn20_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn20_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
index a13bf6c9386e..78351408e864 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
@@ -96,7 +96,6 @@ static const struct hw_sequencer_funcs dcn201_funcs = {
static const struct hwseq_private_funcs dcn201_private_funcs = {
.init_pipes = NULL,
- .update_plane_addr = dcn201_update_plane_addr,
.plane_atomic_disconnect = dcn201_plane_atomic_disconnect,
.program_pipe = dcn10_program_pipe,
.update_mpcc = dcn201_update_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
index 3dfac372d165..e044e9e0a3a1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
@@ -93,7 +93,6 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
@@ -109,7 +108,6 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
static const struct hwseq_private_funcs dcn21_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn20_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index eaeeade31ed7..42c52284a868 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -398,7 +398,11 @@ bool dcn30_set_output_transfer_func(struct dc *dc,
}
}
- mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+ if (mpc->funcs->set_output_gamma)
+ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+ else
+ DC_LOG_ERROR("%s: set_output_gamma function pointer is NULL.\n", __func__);
+
return ret;
}
@@ -625,7 +629,7 @@ void dcn30_init_hw(struct dc *dc)
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
- if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// Initialize the dccg
@@ -731,10 +735,10 @@ void dcn30_init_hw(struct dc *dc)
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwss.edp_backlight_control &&
- dc->hwss.power_down &&
+ hws->funcs.power_down &&
dc->hwss.edp_power_control) {
dc->hwss.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
+ hws->funcs.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
} else {
for (i = 0; i < dc->link_count; i++) {
@@ -742,8 +746,8 @@ void dcn30_init_hw(struct dc *dc)
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
+ hws->funcs.power_down) {
+ hws->funcs.power_down(dc);
break;
}
@@ -786,11 +790,12 @@ void dcn30_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
- if (dc->clk_mgr->funcs->notify_wm_ranges)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
//if softmax is enabled then hardmax will be set by a different call
- if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk &&
+ !dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 4b32497c09d0..2a8dc40d2847 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -113,7 +113,6 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
static const struct hwseq_private_funcs dcn30_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn30_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
index 97e33eb7ac5a..93e49d87a67c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
@@ -111,7 +111,6 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
static const struct hwseq_private_funcs dcn301_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn30_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 746c522adf84..3d4b31bd9946 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -256,10 +256,10 @@ void dcn31_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
- if (dc->clk_mgr->funcs->notify_wm_ranges)
+ if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
- if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr && dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 9cb7afe0e731..56f3c70d4b55 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -98,7 +98,6 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
@@ -112,11 +111,11 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
+ .setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
};
static const struct hwseq_private_funcs dcn31_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn30_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 388404cdeeaa..4e93eeedfc1b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -355,14 +355,18 @@ void dcn314_calculate_pix_rate_divider(
}
}
-void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context)
+void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx)
{
unsigned int i;
struct pipe_ctx *pipe = NULL;
bool otg_disabled[MAX_PIPES] = {false};
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (i <= current_pipe_idx) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ } else {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ }
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
@@ -377,7 +381,10 @@ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc
hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (i <= current_pipe_idx)
+ pipe = &context->res_ctx.pipe_ctx[i];
+ else
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (otg_disabled[i]) {
int opp_inst[MAX_PIPES] = { pipe->stream_res.opp->inst };
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
index fb4f90f61b22..2305ad282f21 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
@@ -41,7 +41,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
void dcn314_calculate_pix_rate_divider(struct dc *dc, struct dc_state *context, const struct dc_stream_state *stream);
-void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context);
+void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx);
void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index 7a8db4b81471..68e6de6b5758 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -100,7 +100,6 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
@@ -115,11 +114,11 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.calculate_pix_rate_divider = dcn314_calculate_pix_rate_divider,
+ .setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
};
static const struct hwseq_private_funcs dcn314_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn30_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 05d8f81daa06..a36e11606f90 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -582,7 +582,9 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
}
}
- mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+ if (mpc->funcs->set_output_gamma)
+ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+
return ret;
}
@@ -779,7 +781,7 @@ void dcn32_init_hw(struct dc *dc)
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
- if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// Initialize the dccg
@@ -901,10 +903,10 @@ void dcn32_init_hw(struct dc *dc)
if (edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwss.edp_backlight_control &&
- dc->hwss.power_down &&
+ hws->funcs.power_down &&
dc->hwss.edp_power_control) {
dc->hwss.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
+ hws->funcs.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
}
}
@@ -914,8 +916,8 @@ void dcn32_init_hw(struct dc *dc)
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
+ hws->funcs.power_down) {
+ hws->funcs.power_down(dc);
break;
}
@@ -958,10 +960,11 @@ void dcn32_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
- if (dc->clk_mgr->funcs->notify_wm_ranges)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
- if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk &&
+ !dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
@@ -982,8 +985,19 @@ void dcn32_init_hw(struct dc *dc)
dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
- if (dc->ctx->dmub_srv->dmub->fw_version <
+ /* for DCN401 testing only */
+ dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
+ if (dc->caps.dmub_caps.fams_ver == 2) {
+ /* FAMS2 is enabled */
+ dc->debug.fams2_config.bits.enable &= true;
+ } else if (dc->ctx->dmub_srv->dmub->fw_version <
DMUB_FW_VERSION(7, 0, 35)) {
+ /* FAMS2 is disabled */
+ dc->debug.fams2_config.bits.enable = false;
+ if (dc->debug.using_dml2 && dc->res_pool->funcs->update_bw_bounding_box) {
+ /* update bounding box if FAMS2 disabled */
+ dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
+ }
dc->debug.force_disable_subvp = true;
dc->debug.disable_fpo_optimizations = true;
}
@@ -1029,24 +1043,20 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ if (should_use_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
- if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst, true);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
ASSERT(odm_dsc);
+ if (should_use_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
- if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, true);
}
- dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
- dsc_cfg.pic_width *= opp_cnt;
-
optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
-
/* Enable DSC in OPTC */
DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
@@ -1060,13 +1070,9 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
OPTC_DSC_DISABLED, 0, 0);
/* only disconnect DSC block, DSC is disabled when OPP head pipe is reset */
- if (dccg->funcs->set_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, pipe_ctx->stream_res.dsc->inst, false);
- dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
+ dsc->funcs->dsc_disconnect(pipe_ctx->stream_res.dsc);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
ASSERT(odm_pipe->stream_res.dsc);
- if (dccg->funcs->set_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_pipe->stream_res.dsc->inst, false);
odm_pipe->stream_res.dsc->funcs->dsc_disconnect(odm_pipe->stream_res.dsc);
}
}
@@ -1137,10 +1143,7 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
- struct dccg *dccg = dc->res_pool->dccg;
- if (dccg->funcs->set_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst, false);
/* disconnect DSC block from stream */
dsc->funcs->dsc_disconnect(dsc);
}
@@ -1212,20 +1215,27 @@ void dcn32_calculate_pix_rate_divider(
}
}
-void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context)
+void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx)
{
unsigned int i;
struct pipe_ctx *pipe = NULL;
bool otg_disabled[MAX_PIPES] = {false};
+ struct dc_state *dc_state = NULL;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (i <= current_pipe_idx) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc_state = context;
+ } else {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ dc_state = dc->current_state;
+ }
if (!resource_is_pipe_type(pipe, OTG_MASTER))
continue;
if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))
- && dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_PHANTOM) {
+ && dc_state_get_pipe_subvp_type(dc_state, pipe) != SUBVP_PHANTOM) {
pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
otg_disabled[i] = true;
@@ -1235,7 +1245,10 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_
hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (i <= current_pipe_idx)
+ pipe = &context->res_ctx.pipe_ctx[i];
+ else
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (otg_disabled[i]) {
int opp_inst[MAX_PIPES] = { pipe->stream_res.opp->inst };
@@ -1583,7 +1596,7 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
#ifdef CONFIG_DRM_AMD_DC_FP
if (hws->funcs.resync_fifo_dccg_dio)
- hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+ hws->funcs.resync_fifo_dccg_dio(hws, dc, context, i);
#endif
}
}
@@ -1717,6 +1730,28 @@ void dcn32_blank_phantom(struct dc *dc,
hws->funcs.wait_for_blank_complete(opp);
}
+/* phantom stream id's can change often, but can be identical between contexts.
+* This function checks for the condition the streams are identical to avoid
+* redundant pipe transitions.
+*/
+static bool is_subvp_phantom_topology_transition_seamless(
+ const struct dc_state *cur_ctx,
+ const struct dc_state *new_ctx,
+ const struct pipe_ctx *cur_pipe,
+ const struct pipe_ctx *new_pipe)
+{
+ enum mall_stream_type cur_pipe_type = dc_state_get_pipe_subvp_type(cur_ctx, cur_pipe);
+ enum mall_stream_type new_pipe_type = dc_state_get_pipe_subvp_type(new_ctx, new_pipe);
+
+ const struct dc_stream_state *cur_paired_stream = dc_state_get_paired_subvp_stream(cur_ctx, cur_pipe->stream);
+ const struct dc_stream_state *new_paired_stream = dc_state_get_paired_subvp_stream(new_ctx, new_pipe->stream);
+
+ return cur_pipe_type == SUBVP_PHANTOM &&
+ cur_pipe_type == new_pipe_type &&
+ cur_paired_stream && new_paired_stream &&
+ cur_paired_stream->stream_id == new_paired_stream->stream_id;
+}
+
bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
const struct dc_state *cur_ctx,
const struct dc_state *new_ctx)
@@ -1735,7 +1770,8 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
continue;
else if (resource_is_pipe_type(cur_pipe, OTG_MASTER)) {
if (resource_is_pipe_type(new_pipe, OTG_MASTER))
- if (cur_pipe->stream->stream_id == new_pipe->stream->stream_id)
+ if (cur_pipe->stream->stream_id == new_pipe->stream->stream_id ||
+ is_subvp_phantom_topology_transition_seamless(cur_ctx, new_ctx, cur_pipe, new_pipe))
/* OTG master with the same stream is seamless */
continue;
} else if (resource_is_pipe_type(cur_pipe, OPP_HEAD)) {
@@ -1821,3 +1857,13 @@ void dcn32_interdependent_update_lock(struct dc *dc,
dc->hwss.pipe_control_lock(dc, pipe, false);
}
}
+
+void dcn32_program_outstanding_updates(struct dc *dc,
+ struct dc_state *context)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ /* update compbuf if required */
+ if (hubbub->funcs->program_compbuf_size)
+ hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
index db562e45d6ff..cac4a08b92a4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
@@ -75,7 +75,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
-void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context);
+void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx);
void dcn32_subvp_pipe_control_lock(struct dc *dc,
struct dc_state *context,
@@ -133,4 +133,8 @@ void dcn32_prepare_bandwidth(struct dc *dc,
void dcn32_interdependent_update_lock(struct dc *dc,
struct dc_state *context, bool lock);
+
+void dcn32_program_outstanding_updates(struct dc *dc,
+ struct dc_state *context);
+
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 5c50458b12cb..3422b564ae98 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -120,11 +120,11 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.blank_phantom = dcn32_blank_phantom,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
+ .program_outstanding_updates = dcn32_program_outstanding_updates,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn32_set_input_transfer_func,
@@ -163,7 +163,6 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
.reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
- .populate_mcm_luts = dcn401_populate_mcm_luts,
};
void dcn32_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index f115c7a285e7..479fd3e89e5a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -147,37 +147,6 @@ void dcn35_init_hw(struct dc *dc)
hws->funcs.bios_golden_init(dc);
}
- if (!dc->debug.disable_clock_gate) {
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
- REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
-
- /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */
- REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1,
- PHYBSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYCSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYDSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYESYMCLK_ROOT_GATE_DISABLE, 1);
-
- REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL4,
- DPIASYMCLK0_GATE_DISABLE, 0,
- DPIASYMCLK1_GATE_DISABLE, 0,
- DPIASYMCLK2_GATE_DISABLE, 0,
- DPIASYMCLK3_GATE_DISABLE, 0);
-
- REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0xFFFFFFFF);
- REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
- DTBCLK_P0_GATE_DISABLE, 0,
- DTBCLK_P1_GATE_DISABLE, 0,
- DTBCLK_P2_GATE_DISABLE, 0,
- DTBCLK_P3_GATE_DISABLE, 0);
- REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
- DPSTREAMCLK0_GATE_DISABLE, 0,
- DPSTREAMCLK1_GATE_DISABLE, 0,
- DPSTREAMCLK2_GATE_DISABLE, 0,
- DPSTREAMCLK3_GATE_DISABLE, 0);
-
- }
-
// Initialize the dccg
if (res_pool->dccg->funcs->dccg_init)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
@@ -235,7 +204,7 @@ void dcn35_init_hw(struct dc *dc)
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
*/
- if (res_pool->hubbub->funcs->dchubbub_init)
+ if (res_pool->hubbub && res_pool->hubbub->funcs->dchubbub_init)
res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -271,6 +240,10 @@ void dcn35_init_hw(struct dc *dc)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
+ if (res_pool->dccg->funcs->dccg_root_gate_disable_control) {
+ for (i = 0; i < res_pool->pipe_count; i++)
+ res_pool->dccg->funcs->dccg_root_gate_disable_control(res_pool->dccg, i, 0);
+ }
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
@@ -305,20 +278,6 @@ void dcn35_init_hw(struct dc *dc)
if (!dc->debug.disable_clock_gate) {
/* enable all DCN clock gating */
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
-
- REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, 0,
- SYMCLKB_FE_GATE_DISABLE, 0,
- SYMCLKC_FE_GATE_DISABLE, 0,
- SYMCLKD_FE_GATE_DISABLE, 0,
- SYMCLKE_FE_GATE_DISABLE, 0);
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, 0);
- REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, 0,
- SYMCLKB_GATE_DISABLE, 0,
- SYMCLKC_GATE_DISABLE, 0,
- SYMCLKD_GATE_DISABLE, 0,
- SYMCLKE_GATE_DISABLE, 0);
-
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
@@ -328,10 +287,10 @@ void dcn35_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
- if (dc->clk_mgr->funcs->notify_wm_ranges)
+ if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
- if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr && dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
@@ -629,10 +588,10 @@ void dcn35_power_down_on_boot(struct dc *dc)
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwseq->funcs.edp_backlight_control &&
- dc->hwss.power_down &&
+ dc->hwseq->funcs.power_down &&
dc->hwss.edp_power_control) {
dc->hwseq->funcs.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
+ dc->hwseq->funcs.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
} else {
for (i = 0; i < dc->link_count; i++) {
@@ -640,8 +599,8 @@ void dcn35_power_down_on_boot(struct dc *dc)
if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
+ dc->hwseq->funcs.power_down) {
+ dc->hwseq->funcs.power_down(dc);
break;
}
@@ -1024,9 +983,6 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired)
update_state->pg_res_update[PG_HPO] = true;
- if (hpo_frl_stream_enc_acquired)
- update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
-
update_state->pg_res_update[PG_DWB] = true;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1041,7 +997,7 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->plane_res.hubp)
update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->plane_res.hubp->inst] = false;
- if (pipe_ctx->plane_res.dpp)
+ if (pipe_ctx->plane_res.dpp && pipe_ctx->plane_res.hubp)
update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false;
if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
@@ -1462,11 +1418,16 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
for (i = 0; i < num_pipes; i++) {
- if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
- struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
- struct dc *dc = pipe_ctx[i]->stream->ctx->dc;
+ /* dc_state_destruct() might null the stream resources, so fetch tg
+ * here first to avoid a race condition. The lifetime of the pointee
+ * itself (the timing_generator object) is not a problem here.
+ */
+ struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
- if (dc->debug.static_screen_wait_frames) {
+ if ((tg != NULL) && tg->funcs) {
+ if (pipe_ctx[i]->stream && pipe_ctx[i]->stream->ctx->dc->debug.static_screen_wait_frames) {
+ struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
+ struct dc *dc = pipe_ctx[i]->stream->ctx->dc;
unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total);
if (frame_rate >= 120 && dc->caps.ips_support &&
@@ -1475,14 +1436,12 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
num_frames = 2 * (frame_rate % 60);
}
}
- if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
- pipe_ctx[i]->stream_res.tg->funcs->set_drr(
- pipe_ctx[i]->stream_res.tg, &params);
+ if (tg->funcs->set_drr)
+ tg->funcs->set_drr(tg, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
- if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
- pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx[i]->stream_res.tg,
- event_triggers, num_frames);
+ if (tg->funcs->set_static_screen_control)
+ tg->funcs->set_static_screen_control(
+ tg, event_triggers, num_frames);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index 428912f37129..2bbf1fef94fd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -101,7 +101,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
@@ -124,11 +123,11 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.root_clock_control = dcn35_root_clock_control,
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
+ .program_outstanding_updates = dcn32_program_outstanding_updates,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
.init_pipes = dcn35_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn32_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 55e791552bca..d00822e8daa5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -100,7 +100,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
@@ -123,11 +122,12 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.root_clock_control = dcn35_root_clock_control,
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
+ .program_outstanding_updates = dcn32_program_outstanding_updates,
+ .setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
};
static const struct hwseq_private_funcs dcn351_private_funcs = {
.init_pipes = dcn35_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn32_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 2c50c0f745a0..0b743669f23b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -221,8 +221,9 @@ void dcn401_init_hw(struct dc *dc)
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
+ int current_dchub_ref_freq = 0;
- if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) {
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// mark dcmode limits present if any clock has distinct AC and DC values from SMU
@@ -264,6 +265,8 @@ void dcn401_init_hw(struct dc *dc)
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
&res_pool->ref_clocks.dccg_ref_clock_inKhz);
+ current_dchub_ref_freq = res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
+
(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
res_pool->ref_clocks.dccg_ref_clock_inKhz,
&res_pool->ref_clocks.dchub_ref_clock_inKhz);
@@ -354,10 +357,10 @@ void dcn401_init_hw(struct dc *dc)
if (edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwss.edp_backlight_control &&
- dc->hwss.power_down &&
+ hws->funcs.power_down &&
dc->hwss.edp_power_control) {
dc->hwss.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
+ hws->funcs.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
}
}
@@ -367,8 +370,8 @@ void dcn401_init_hw(struct dc *dc)
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
+ hws->funcs.power_down) {
+ hws->funcs.power_down(dc);
break;
}
@@ -413,12 +416,9 @@ void dcn401_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
- if (dc->clk_mgr->funcs->notify_wm_ranges)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
- if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
- dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
-
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
@@ -436,9 +436,12 @@ void dcn401_init_hw(struct dc *dc)
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
dc->debug.fams2_config.bits.enable &= dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver == 2;
- if (!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box) {
- /* update bounding box if FAMS2 disabled */
- dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
+ if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
+ || res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
+ /* update bounding box if FAMS2 disabled, or if dchub clk has changed */
+ if (dc->clk_mgr)
+ dc->res_pool->funcs->update_bw_bounding_box(dc,
+ dc->clk_mgr->bw_params);
}
}
}
@@ -498,6 +501,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
bool is_17x17x17 = true;
+ bool rval;
dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
@@ -507,11 +511,10 @@ void dcn401_populate_mcm_luts(struct dc *dc,
if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(
- dc->ctx,
+ rval = cm3_helper_translate_curve_to_hw_format(
mcm_luts.lut1d_func,
&dpp_base->regamma_params, false);
- m_lut_params.pwl = &dpp_base->regamma_params;
+ m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
}
if (m_lut_params.pwl) {
if (mpc->funcs->populate_lut)
@@ -528,11 +531,10 @@ void dcn401_populate_mcm_luts(struct dc *dc,
m_lut_params.pwl = &mcm_luts.shaper->pwl;
else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
ASSERT(false);
- cm_helper_translate_curve_to_hw_format(
- dc->ctx,
+ rval = cm3_helper_translate_curve_to_hw_format(
mcm_luts.shaper,
&dpp_base->regamma_params, true);
- m_lut_params.pwl = &dpp_base->regamma_params;
+ m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
}
if (m_lut_params.pwl) {
if (mpc->funcs->populate_lut)
@@ -668,47 +670,40 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
- bool result = true;
+ bool result;
const struct pwl_params *lut_params = NULL;
+ bool rval;
mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
// 1D LUT
- if (plane_state->mcm_shaper_3dlut_setting == DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL) {
- if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
- lut_params = &plane_state->blend_tf.pwl;
- else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
- &plane_state->blend_tf,
- &dpp_base->regamma_params, false);
- lut_params = &dpp_base->regamma_params;
- }
- result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
- lut_params = NULL;
+ if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
+ lut_params = &plane_state->blend_tf.pwl;
+ else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf,
+ &dpp_base->regamma_params, false);
+ lut_params = rval ? &dpp_base->regamma_params : NULL;
}
+ result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
+ lut_params = NULL;
// Shaper
- if (plane_state->mcm_shaper_3dlut_setting == DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL) {
- if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
- lut_params = &plane_state->in_shaper_func.pwl;
- else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
- // TODO: dpp_base replace
- ASSERT(false);
- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
- &plane_state->in_shaper_func,
- &dpp_base->shaper_params, true);
- lut_params = &dpp_base->shaper_params;
- }
-
- result = mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
+ if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
+ lut_params = &plane_state->in_shaper_func.pwl;
+ else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ // TODO: dpp_base replace
+ rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func,
+ &dpp_base->shaper_params, true);
+ lut_params = rval ? &dpp_base->shaper_params : NULL;
}
+ result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
// 3D
- if (plane_state->mcm_shaper_3dlut_setting == DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL) {
+ if (mpc->funcs->program_3dlut) {
if (plane_state->lut3d_func.state.bits.initialized == 1)
- result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
+ result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
else
- result = mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
+ result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
}
return result;
@@ -742,7 +737,9 @@ bool dcn401_set_output_transfer_func(struct dc *dc,
}
}
- mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+ if (mpc->funcs->set_output_gamma)
+ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+
return ret;
}
@@ -871,6 +868,7 @@ enum dc_status dcn401_enable_stream_timing(
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout,
pipe_ctx->stream->signal,
true);
@@ -1115,10 +1113,10 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
.mirror = pipe_ctx->plane_state->horizontal_mirror,
.stream = pipe_ctx->stream
};
+ struct rect odm_slice_src = { 0 };
bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
(pipe_ctx->prev_odm_pipe != NULL);
int prev_odm_width = 0;
- int prev_odm_offset = 0;
struct pipe_ctx *prev_odm_pipe = NULL;
bool mpc_combine_on = false;
int bottom_pipe_x_pos = 0;
@@ -1183,12 +1181,12 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
prev_odm_pipe = pipe_ctx->prev_odm_pipe;
while (prev_odm_pipe != NULL) {
- prev_odm_width += prev_odm_pipe->plane_res.scl_data.recout.width;
- prev_odm_offset += prev_odm_pipe->plane_res.scl_data.recout.x;
+ odm_slice_src = resource_get_odm_slice_src_rect(prev_odm_pipe);
+ prev_odm_width += odm_slice_src.width;
prev_odm_pipe = prev_odm_pipe->prev_odm_pipe;
}
- x_pos -= (prev_odm_width + prev_odm_offset);
+ x_pos -= (prev_odm_width);
}
/* If the position is negative then we need to add to the hotspot
@@ -1311,8 +1309,10 @@ bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
for (i = 0; i < dc->current_state->stream_count; i++) {
/* MALL SS messaging is not supported with PSR at this time */
if (dc->current_state->streams[i] != NULL &&
- dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
+ dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
+ DC_LOG_MALL("MALL SS not supported with PSR at this time\n");
return false;
+ }
}
memset(&cmd, 0, sizeof(cmd));
@@ -1322,8 +1322,9 @@ bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
if (enable) {
if (dcn401_check_no_memory_request_for_cab(dc)) {
/* 1. Check no memory request case for CAB.
- * If no memory request case, send CAB_ACTION NO_DF_REQ DMUB message
+ * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message
*/
+ DC_LOG_MALL("sending CAB action NO_DCN_REQ\n");
cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
} else {
/* 2. Check if all surfaces can fit in CAB.
@@ -1351,13 +1352,16 @@ bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
cmd.cab.cab_alloc_ways = ways;
+ DC_LOG_MALL("cab allocation: %d ways. CAB action: DCN_SS_FIT_IN_CAB\n", ways);
} else {
cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB;
+ DC_LOG_MALL("frame does not fit in CAB: %d ways required. CAB action: DCN_SS_NOT_FIT_IN_CAB\n", ways);
}
}
} else {
/* Disable CAB */
cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION;
+ DC_LOG_MALL("idle optimization disabled\n");
}
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
@@ -1395,10 +1399,10 @@ void dcn401_prepare_bandwidth(struct dc *dc,
{
struct hubbub *hubbub = dc->res_pool->hubbub;
bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
- unsigned int compbuf_size_kb = 0;
+ unsigned int compbuf_size = 0;
- /* Any transition into or out of a FAMS config should disable MCLK switching first to avoid hangs */
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+ /* Any transition into P-State support should disable MCLK switching first to avoid hangs */
+ if (p_state_change_support) {
dc->optimized_required = true;
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
}
@@ -1425,10 +1429,10 @@ void dcn401_prepare_bandwidth(struct dc *dc,
/* decrease compbuf size */
if (hubbub->funcs->program_compbuf_segments) {
- compbuf_size_kb = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
+ compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
+ dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
- hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size_kb, false);
+ hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
}
if (dc->debug.fams2_config.bits.enable) {
@@ -1437,7 +1441,7 @@ void dcn401_prepare_bandwidth(struct dc *dc,
dcn401_fams2_global_control_lock(dc, context, false);
}
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+ if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
/* After disabling P-State, restore the original value to ensure we get the correct P-State
* on the next optimize. */
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
@@ -1530,7 +1534,7 @@ void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool en
if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
return;
- fams2_required = context->bw_ctx.bw.dcn.fams2_stream_count > 0;
+ fams2_required = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
dc_dmub_srv_fams2_update_config(dc, context, enable && fams2_required);
}
@@ -1542,7 +1546,6 @@ static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context,
struct pipe_ctx *old_pipe;
struct pipe_ctx *new_pipe;
struct pipe_ctx *old_opp_heads[MAX_PIPES];
- struct dccg *dccg = dc->res_pool->dccg;
struct pipe_ctx *old_otg_master;
int old_opp_head_count = 0;
@@ -1568,12 +1571,9 @@ static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context,
for (i = 0; i < old_opp_head_count; i++) {
old_pipe = old_opp_heads[i];
new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
- if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) {
- dccg->funcs->set_dto_dscclk(dccg,
- old_pipe->stream_res.dsc->inst, false);
+ if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc)
old_pipe->stream_res.dsc->funcs->dsc_disconnect(
old_pipe->stream_res.dsc);
- }
}
}
}
@@ -1659,7 +1659,7 @@ void dcn401_hardware_release(struct dc *dc)
*/
if (dc->current_state) {
if ((!dc->clk_mgr->clks.p_state_change_support ||
- dc->current_state->bw_ctx.bw.dcn.fams2_stream_count > 0) &&
+ dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, true, true);
@@ -1669,3 +1669,104 @@ void dcn401_hardware_release(struct dc *dc)
}
}
+void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
+{
+ struct pipe_ctx *opp_heads[MAX_PIPES];
+ struct pipe_ctx *dpp_pipes[MAX_PIPES];
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+ int dpp_count = 0;
+
+ if (!otg_master->stream)
+ return;
+
+ int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
+ &context->res_ctx, opp_heads);
+
+ for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
+ if (opp_heads[slice_idx]->plane_state) {
+ dpp_count = resource_get_dpp_pipes_for_opp_head(
+ opp_heads[slice_idx],
+ &context->res_ctx,
+ dpp_pipes);
+ for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
+ struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx];
+ if (dpp_pipe && hubbub &&
+ dpp_pipe->plane_res.hubp &&
+ hubbub->funcs->wait_for_det_update)
+ hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst);
+ }
+ }
+ }
+}
+
+void dcn401_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock)
+{
+ unsigned int i = 0;
+ struct pipe_ctx *pipe = NULL;
+ struct timing_generator *tg = NULL;
+ bool pipe_unlocked[MAX_PIPES] = {0};
+
+ if (lock) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+ continue;
+ dc->hwss.pipe_control_lock(dc, pipe, true);
+ }
+ } else {
+ /* Unlock pipes based on the change in DET allocation instead of pipe index
+ * Prevents over allocation of DET during unlock process
+ * e.g. 2 pipe config with different streams with a max of 20 DET segments
+ * Before: After:
+ * - Pipe0: 10 DET segments - Pipe0: 12 DET segments
+ * - Pipe1: 10 DET segments - Pipe1: 8 DET segments
+ * If Pipe0 gets updated first, 22 DET segments will be allocated
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+ int current_pipe_idx = i;
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ pipe_unlocked[i] = true;
+ continue;
+ }
+
+ // If the same stream exists in old context, ensure the OTG_MASTER pipes for the same stream get compared
+ struct pipe_ctx *old_otg_master = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, pipe->stream);
+
+ if (old_otg_master)
+ current_pipe_idx = old_otg_master->pipe_idx;
+ if (resource_calculate_det_for_stream(context, pipe) <
+ resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[current_pipe_idx])) {
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ pipe_unlocked[i] = true;
+ dcn401_wait_for_det_buffer_update(dc, context, pipe);
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (pipe_unlocked[i])
+ continue;
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ }
+ }
+}
+
+void dcn401_program_outstanding_updates(struct dc *dc,
+ struct dc_state *context)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ /* update compbuf if required */
+ if (hubbub->funcs->program_compbuf_segments)
+ hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 8e9c1c17aa66..a27e62081685 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -81,4 +81,7 @@ void dcn401_hardware_release(struct dc *dc);
void dcn401_update_odm(struct dc *dc, struct dc_state *context,
struct pipe_ctx *otg_master);
void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy);
+void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
+void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock);
+void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index 6a768702c7bd..a2ca07235c83 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -38,7 +38,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
- .interdependent_update_lock = dcn32_interdependent_update_lock,
+ .interdependent_update_lock = dcn401_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn401_prepare_bandwidth,
.optimize_bandwidth = dcn401_optimize_bandwidth,
@@ -99,12 +99,11 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.fams2_global_control_lock = dcn401_fams2_global_control_lock,
.fams2_update_config = dcn401_fams2_update_config,
.fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
- .power_down = dce110_power_down,
+ .program_outstanding_updates = dcn401_program_outstanding_updates,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn32_set_input_transfer_func,
@@ -115,8 +114,6 @@ static const struct hwseq_private_funcs dcn401_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn401_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
- .disable_stream_gating = dcn20_disable_stream_gating,
- .enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
.did_underflow_occur = dcn10_did_underflow_occur,
.init_blank = dcn32_init_blank,
@@ -136,12 +133,11 @@ static const struct hwseq_private_funcs dcn401_private_funcs = {
.dccg_init = dcn20_dccg_init,
.set_mcm_luts = dcn401_set_mcm_luts,
.program_mall_pipe_config = dcn32_program_mall_pipe_config,
- .update_force_pstate = dcn32_update_force_pstate,
.update_mall_sel = dcn32_update_mall_sel,
.calculate_dccg_k1_k2_values = NULL,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
.reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
- .populate_mcm_luts = dcn401_populate_mcm_luts,
+ .populate_mcm_luts = NULL,
};
void dcn401_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index d05be65a2256..ac9205625623 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -240,7 +240,6 @@ struct hw_sequencer_funcs {
void (*program_triplebuffer)(const struct dc *dc,
struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
- void (*power_down)(struct dc *dc);
void (*update_dsc_pg)(struct dc *dc, struct dc_state *context, bool safe_to_disable);
/* Pipe Lock Related */
@@ -460,6 +459,9 @@ struct hw_sequencer_funcs {
bool enable);
void (*fams2_global_control_lock_fast)(union block_sequence_params *params);
void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
+ void (*program_outstanding_updates)(struct dc *dc,
+ struct dc_state *context);
+ void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
};
void color_space_to_black_color(
@@ -520,6 +522,21 @@ void hwss_build_fast_sequence(struct dc *dc,
struct dc_stream_status *stream_status,
struct dc_state *context);
+void hwss_wait_for_all_blank_complete(struct dc *dc,
+ struct dc_state *context);
+
+void hwss_wait_for_odm_update_pending_complete(struct dc *dc,
+ struct dc_state *context);
+
+void hwss_wait_for_no_pipes_pending(struct dc *dc,
+ struct dc_state *context);
+
+void hwss_wait_for_outstanding_hw_updates(struct dc *dc,
+ struct dc_state *dc_context);
+
+void hwss_process_outstanding_hw_updates(struct dc *dc,
+ struct dc_state *dc_context);
+
void hwss_send_dmcub_cmd(union block_sequence_params *params);
void hwss_program_manual_trigger(union block_sequence_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 7ac3f2a09487..0ac675456979 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -76,8 +76,6 @@ struct hwseq_private_funcs {
void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*init_pipes)(struct dc *dc, struct dc_state *context);
void (*reset_hw_ctx_wrap)(struct dc *dc, struct dc_state *context);
- void (*update_plane_addr)(const struct dc *dc,
- struct pipe_ctx *pipe_ctx);
void (*plane_atomic_disconnect)(struct dc *dc,
struct dc_state *state,
struct pipe_ctx *pipe_ctx);
@@ -170,7 +168,8 @@ struct hwseq_private_funcs {
unsigned int *k1_div,
unsigned int *k2_div);
void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc,
- struct dc_state *context);
+ struct dc_state *context,
+ unsigned int current_pipe_idx);
enum dc_status (*apply_single_controller_ctx_to_hw)(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 4c8e6436c7e1..bfb8b8502d20 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -534,8 +534,8 @@ struct dcn_bw_output {
unsigned int legacy_svp_drr_stream_index;
bool legacy_svp_drr_stream_index_valid;
struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES];
+ struct dmub_cmd_fams2_global_config fams2_global_config;
struct dmub_fams2_stream_static_state fams2_stream_params[DML2_MAX_PLANES];
- unsigned fams2_stream_count;
struct dml2_display_arb_regs arb_regs;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
index b6203253111c..8c18efc2aa70 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
@@ -46,6 +46,8 @@ struct audio_funcs {
const struct audio_info *audio_info,
const struct audio_dp_link_info *dp_link_info);
+ void (*az_disable_hbr_audio)(struct audio *audio);
+
void (*wall_dto_setup)(struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index d5fefce3e74b..2d06067ff36d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -29,9 +29,6 @@
#include "dc.h"
#include "dm_pp_smu.h"
-#define DCN_MINIMUM_DISPCLK_Khz 100000
-#define DCN_MINIMUM_DPPCLK_Khz 100000
-
/* Constants */
#define DDR4_DRAM_WIDTH 64
#define WM_A 0
@@ -180,6 +177,7 @@ struct clk_state_registers_and_bypass {
uint32_t dispclk;
uint32_t dppclk;
uint32_t dtbclk;
+ uint32_t fclk;
uint32_t dppclk_bypass;
uint32_t dcfclk_bypass;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 12282f96dfe1..c2dd061892f4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -191,7 +191,8 @@ enum dentist_divider_range {
CLK_SR_DCN401(CLK0_CLK1_DFS_CNTL, CLK01, 0), \
CLK_SR_DCN401(CLK0_CLK2_DFS_CNTL, CLK01, 0), \
CLK_SR_DCN401(CLK0_CLK3_DFS_CNTL, CLK01, 0), \
- CLK_SR_DCN401(CLK0_CLK4_DFS_CNTL, CLK01, 0)
+ CLK_SR_DCN401(CLK0_CLK4_DFS_CNTL, CLK01, 0), \
+ CLK_SR_DCN401(CLK2_CLK2_DFS_CNTL, CLK20, 0)
#define CLK_COMMON_MASK_SH_LIST_DCN401(mask_sh) \
CLK_COMMON_MASK_SH_LIST_DCN321(mask_sh)
@@ -235,6 +236,7 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK2_DFS_CNTL;
uint32_t CLK1_CLK3_DFS_CNTL;
uint32_t CLK1_CLK4_DFS_CNTL;
+ uint32_t CLK2_CLK2_DFS_CNTL;
uint32_t CLK1_CLK0_CURRENT_CNT;
uint32_t CLK1_CLK1_CURRENT_CNT;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 4fb1aacee894..e94e9ba60f55 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -211,11 +211,9 @@ struct dccg_funcs {
struct dccg *dccg,
enum streamclk_source src,
uint32_t otg_inst);
- void (*set_dto_dscclk)(
- struct dccg *dccg,
- uint32_t dsc_inst,
- bool enable);
+ void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
+ void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index dd2b2864876c..67c32401893e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -227,6 +227,7 @@ struct hubbub_funcs {
void (*get_mall_en)(struct hubbub *hubbub, unsigned int *mall_in_use);
void (*program_det_segments)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg);
void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase);
+ void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 27bba47186e9..41c76ba9ba56 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -217,12 +217,13 @@ enum optc_dsc_mode {
};
struct dc_bias_and_scale {
- uint16_t scale_red;
- uint16_t bias_red;
- uint16_t scale_green;
- uint16_t bias_green;
- uint16_t scale_blue;
- uint16_t bias_blue;
+ uint32_t scale_red;
+ uint32_t bias_red;
+ uint32_t scale_green;
+ uint32_t bias_green;
+ uint32_t scale_blue;
+ uint32_t bias_blue;
+ bool bias_and_scale_valid;
};
enum test_pattern_dyn_range {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 5f6c7daa14d9..a8b44f398ce6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -63,7 +63,7 @@ union dcn_watermark_set {
struct dml2_dchub_watermark_regs b;
struct dml2_dchub_watermark_regs c;
struct dml2_dchub_watermark_regs d;
- } dcn4; //dcn4+
+ } dcn4x; //dcn4+
};
struct dce_watermarks {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 287bf8a90ff6..03cbcbb36f1c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -65,6 +65,7 @@ struct optc {
int vupdate_offset;
int vupdate_width;
int vready_offset;
+ int pstate_keepout;
struct dc_crtc_timing orginal_patched_timing;
enum signal_type signal;
};
@@ -110,6 +111,7 @@ void optc1_program_timing(struct timing_generator *optc,
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios);
@@ -127,7 +129,8 @@ void optc1_program_global_sync(struct timing_generator *optc,
int vready_offset,
int vstartup_start,
int vupdate_offset,
- int vupdate_width);
+ int vupdate_width,
+ int pstate_keepout);
bool optc1_disable_crtc(struct timing_generator *optc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index e5e11c84e9e2..fe7f3137f228 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -271,7 +271,9 @@ struct stream_encoder_funcs {
struct stream_encoder *enc, unsigned int pix_per_container);
void (*enable_fifo)(struct stream_encoder *enc);
void (*disable_fifo)(struct stream_encoder *enc);
+ bool (*is_fifo_enabled)(struct stream_encoder *enc);
void (*map_stream_to_link)(struct stream_encoder *enc, uint32_t stream_enc_inst, uint32_t link_enc_inst);
+ uint32_t (*get_pixels_per_cycle)(struct stream_encoder *enc);
};
struct hpo_dp_stream_encoder_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 0f453452234c..3d4c8bd42b49 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -172,6 +172,7 @@ struct timing_generator_funcs {
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios
);
@@ -256,7 +257,8 @@ struct timing_generator_funcs {
int vready_offset,
int vstartup_start,
int vupdate_offset,
- int vupdate_width);
+ int vupdate_width,
+ int pstate_keepout);
void (*enable_optc_clock)(struct timing_generator *tg, bool enable);
void (*program_stereo)(struct timing_generator *tg,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index 28da1dddf0a0..45262cba675e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -245,16 +245,6 @@ struct transform_funcs {
void (*set_cursor_attributes)(
struct transform *xfm_base,
const struct dc_cursor_attributes *attr);
-
- bool (*transform_program_blnd_lut)(
- struct transform *xfm,
- const struct pwl_params *params);
- bool (*transform_program_shaper_lut)(
- struct transform *xfm,
- const struct pwl_params *params);
- bool (*transform_program_3dlut)(
- struct transform *xfm,
- struct tetrahedral_params *params);
};
const uint16_t *get_filter_2tap_16p(void);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 96d40d33a1f9..cd1157d225ab 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -639,4 +639,11 @@ struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx);
* @dml2_options: struct to hold callbacks
*/
void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options);
+
+/*
+ *Calculate total DET allocated for all pipes for a given OTG_MASTER pipe
+ */
+int resource_calculate_det_for_stream(struct dc_state *state, struct pipe_ctx *otg_master);
+
+bool resource_is_hpo_acquired(struct dc_state *context);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 555c1c484cfd..ff8fe1a94965 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -67,6 +67,8 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
{
struct pipe_ctx *pipes[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
+ bool was_hpo_acquired = resource_is_hpo_acquired(link->dc->current_state);
+ bool is_hpo_acquired;
uint8_t count;
int i;
@@ -83,6 +85,12 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
pipes[i]);
}
+ if (link->dc->hwss.setup_hpo_hw_control) {
+ is_hpo_acquired = resource_is_hpo_acquired(state);
+ if (was_hpo_acquired != is_hpo_acquired)
+ link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired);
+ }
+
for (i = count-1; i >= 0; i--)
link_set_dpms_on(state, pipes[i]);
}
@@ -804,8 +812,11 @@ bool dp_set_test_pattern(
break;
}
+ if (!pipe_ctx->stream)
+ return false;
+
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
+ if (should_use_dmub_lock(pipe_ctx->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index b76737b7b9e4..3e47a6735912 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -74,7 +74,10 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
- if (stream_enc && stream_enc->funcs->disable_fifo)
+ if (!stream_enc)
+ return;
+
+ if (stream_enc->funcs->disable_fifo)
stream_enc->funcs->disable_fifo(stream_enc);
if (stream_enc->funcs->set_input_mode)
stream_enc->funcs->set_input_mode(stream_enc, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
index e1257404357b..cec68c5dba13 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
@@ -28,6 +28,8 @@
#include "dccg.h"
#include "clk_mgr.h"
+#define DC_LOGGER link->ctx->logger
+
void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
struct fixed31_32 throttled_vcp_size)
{
@@ -108,6 +110,11 @@ void enable_hpo_dp_link_output(struct dc_link *link,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
+ if (!link_res->hpo_dp_link_enc) {
+ DC_LOG_ERROR("%s: invalid hpo_dp_link_enc\n", __func__);
+ return;
+ }
+
if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating)
link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating(
link->dc->res_pool->dccg,
@@ -124,6 +131,11 @@ void disable_hpo_dp_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
+ if (!link_res->hpo_dp_link_enc) {
+ DC_LOG_ERROR("%s: invalid hpo_dp_link_enc\n", __func__);
+ return;
+ }
+
link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
link_res->hpo_dp_link_enc->funcs->disable_link_phy(
link_res->hpo_dp_link_enc, signal);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index bba644024780..d21ee9d12d26 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -863,7 +863,6 @@ static bool detect_link_and_local_sink(struct dc_link *link,
struct dc_sink *prev_sink = NULL;
struct dpcd_caps prev_dpcd_caps;
enum dc_connection_type new_connection_type = dc_connection_none;
- enum dc_connection_type pre_connection_type = link->type;
const uint32_t post_oui_delay = 30; // 30ms
DC_LOGGER_INIT(link->ctx->logger);
@@ -965,7 +964,6 @@ static bool detect_link_and_local_sink(struct dc_link *link,
}
if (!detect_dp(link, &sink_caps, reason)) {
- link->type = pre_connection_type;
if (prev_sink)
dc_sink_release(prev_sink);
@@ -1191,8 +1189,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
//sink only can use supported link rate table, we are foreced to enable it
if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)
link->panel_config.ilr.optimize_edp_link_rate = true;
- if (edp_is_ilr_optimization_enabled(link))
- link->reported_link_cap.link_rate = get_max_link_rate_from_ilr_table(link);
+ link->reported_link_cap.link_rate = get_max_edp_link_rate(link);
}
} else {
@@ -1299,8 +1296,7 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
link->dpcd_caps.is_mst_capable)
is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason);
- if (is_local_sink_detect_success &&
- pre_link_type == dc_connection_mst_branch &&
+ if (pre_link_type == dc_connection_mst_branch &&
link->type != dc_connection_mst_branch)
is_delegated_to_mst_top_mgr = link_reset_cur_dp_mst_topology(link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 65607589495f..c4e03482ba9a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -817,17 +817,17 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ if (should_use_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
- if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst, true);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
+ if (should_use_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
- if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, true);
}
dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
dsc_cfg.pic_width *= opp_cnt;
@@ -879,19 +879,32 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
}
/* disable DSC block */
- if (dccg->funcs->set_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, pipe_ctx->stream_res.dsc->inst, false);
- pipe_ctx->stream_res.dsc->funcs->dsc_disconnect(pipe_ctx->stream_res.dsc);
- if (dccg->funcs->set_ref_dscclk)
- dccg->funcs->set_ref_dscclk(dccg, pipe_ctx->stream_res.dsc->inst);
- pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- if (dccg->funcs->set_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_pipe->stream_res.dsc->inst, false);
+ for (odm_pipe = pipe_ctx; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.dsc->funcs->dsc_disconnect(odm_pipe->stream_res.dsc);
+ /*
+ * TODO - dsc_disconnect is a double buffered register.
+ * by the time we call dsc_disable, dsc may still remain
+ * connected to OPP. In this case OPTC will no longer
+ * get correct pixel data because DSCC is off. However
+ * we also can't wait for the disconnect pending
+ * complete, because this function can be called
+ * with/without OTG master lock acquired. When the lock
+ * is acquired we will never get pending complete until
+ * we release the lock later. So there is no easy way to
+ * solve this problem especially when the lock is
+ * acquired. DSC is a front end hw block it should be
+ * programmed as part of front end sequence, where the
+ * commit sequence without lock and update sequence
+ * with lock are completely separated. However because
+ * we are programming dsc as part of back end link
+ * programming sequence, we don't know if front end OPTC
+ * master lock is acquired. The back end should be
+ * agnostic to front end lock. DSC programming shouldn't
+ * belong to this sequence.
+ */
+ odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
if (dccg->funcs->set_ref_dscclk)
dccg->funcs->set_ref_dscclk(dccg, odm_pipe->stream_res.dsc->inst);
- odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
}
}
}
@@ -2345,7 +2358,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
- else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ else if (dc_is_dp_sst_signal(pipe_ctx->stream->signal) &&
dp_is_128b_132b_signal(pipe_ctx))
update_sst_payload(pipe_ctx, false);
@@ -2578,7 +2591,7 @@ void link_set_dpms_on(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
allocate_mst_payload(pipe_ctx);
- else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ else if (dc_is_dp_sst_signal(pipe_ctx->stream->signal) &&
dp_is_128b_132b_signal(pipe_ctx))
update_sst_payload(pipe_ctx, true);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 8246006857b3..5e1b5ab9fbc6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -385,7 +385,7 @@ static void link_destruct(struct dc_link *link)
if (link->panel_cntl)
link->panel_cntl->funcs->destroy(&link->panel_cntl);
- if (link->link_enc) {
+ if (link->link_enc && !link->is_dig_mapping_flexible) {
/* Update link encoder resource tracking variables. These are used for
* the dynamic assignment of link encoders to streams. Virtual links
* are not assigned encoder resources on creation.
@@ -524,6 +524,7 @@ static bool construct_phy(struct dc_link *link,
link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_MXM:
case CONNECTOR_ID_USBC:
link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 46bb7a855bc2..34a618a7278b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -212,6 +212,13 @@ static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in
case 10000000:
link_rate = LINK_RATE_UHBR10; // UHBR10 - 10.0 Gbps/Lane
break;
+ case 13500000:
+ link_rate = LINK_RATE_UHBR13_5; // UHBR13.5 - 13.5 Gbps/Lane
+ break;
+ case 20000000:
+ link_rate = LINK_RATE_UHBR20; // UHBR20 - 20.0 Gbps/Lane
+ break;
+
default:
link_rate = LINK_RATE_UNKNOWN;
break;
@@ -541,6 +548,23 @@ static enum dc_link_rate increase_link_rate(struct dc_link *link,
}
}
+static void increase_edp_link_rate(struct dc_link *link,
+ struct dc_link_settings *current_link_setting)
+{
+ if (current_link_setting->use_link_rate_set) {
+ if (current_link_setting->link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ current_link_setting->link_rate_set++;
+ current_link_setting->link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting->link_rate_set];
+ } else {
+ current_link_setting->use_link_rate_set = false;
+ current_link_setting->link_rate = LINK_RATE_UHBR10;
+ }
+ } else {
+ current_link_setting->link_rate = increase_link_rate(link, current_link_setting->link_rate);
+ }
+}
+
static bool decide_fallback_link_setting_max_bw_policy(
struct dc_link *link,
const struct dc_link_settings *max,
@@ -759,14 +783,7 @@ bool edp_decide_link_settings(struct dc_link *link,
increase_lane_count(
current_link_setting.lane_count);
} else {
- if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- current_link_setting.link_rate_set++;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
- } else
- break;
+ increase_edp_link_rate(link, &current_link_setting);
}
}
return false;
@@ -818,9 +835,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
if (policy) {
/* minimize lane */
if (current_link_setting.link_rate < max_link_rate) {
- current_link_setting.link_rate =
- increase_link_rate(link,
- current_link_setting.link_rate);
+ increase_edp_link_rate(link, &current_link_setting);
} else {
if (current_link_setting.lane_count <
link->verified_link_cap.lane_count) {
@@ -839,9 +854,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
increase_lane_count(
current_link_setting.lane_count);
} else {
- current_link_setting.link_rate =
- increase_link_rate(link,
- current_link_setting.link_rate);
+ increase_edp_link_rate(link, &current_link_setting);
current_link_setting.lane_count =
initial_link_setting.lane_count;
}
@@ -874,18 +887,15 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
}
if (policy) {
/* minimize lane */
- if (current_link_setting.link_rate_set <
- link->dpcd_caps.edp_supported_link_rates_count
- && current_link_setting.link_rate < max_link_rate) {
- current_link_setting.link_rate_set++;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ if (current_link_setting.link_rate < max_link_rate) {
+ increase_edp_link_rate(link, &current_link_setting);
} else {
if (current_link_setting.lane_count < link->verified_link_cap.lane_count) {
current_link_setting.lane_count =
increase_lane_count(
current_link_setting.lane_count);
current_link_setting.link_rate_set = initial_link_setting.link_rate_set;
+ current_link_setting.use_link_rate_set = initial_link_setting.use_link_rate_set;
current_link_setting.link_rate =
link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
} else
@@ -899,13 +909,8 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
increase_lane_count(
current_link_setting.lane_count);
} else {
- if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- current_link_setting.link_rate_set++;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
- } else
+ increase_edp_link_rate(link, &current_link_setting);
+ if (current_link_setting.link_rate == LINK_RATE_UNKNOWN)
break;
}
}
@@ -1166,6 +1171,8 @@ static void get_active_converter_info(
link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
hdmi_encoded_link_bw);
+ DC_LOG_DC("%s: pcon frl link bw = %u\n", __func__,
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps);
}
if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0)
@@ -1541,7 +1548,11 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
* Override count to 1 if we receive a known bad count (0 or an invalid value) */
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
(dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
- ASSERT(0);
+ /* If you see this message consistently, either the host platform has FIXED_VS flag
+ * incorrectly configured or the sink device is returning an invalid count.
+ */
+ DC_LOG_ERROR("lttpr_caps phy_repeater_cnt is 0x%x, forcing it to 0x80.",
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
}
@@ -2254,7 +2265,7 @@ bool dp_verify_link_cap_with_retries(
memset(&link->verified_link_cap, 0,
sizeof(struct dc_link_settings));
- if (!link_detect_connection_type(link, &type) || type == dc_connection_none) {
+ if (link->link_enc && (!link_detect_connection_type(link, &type) || type == dc_connection_none)) {
link->verified_link_cap = fail_safe_link_settings;
break;
} else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count)) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index bf820d2b4dc4..3aa05a2be6c0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -305,16 +305,17 @@ bool edp_is_ilr_optimization_enabled(struct dc_link *link)
return true;
}
-enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link)
+enum dc_link_rate get_max_edp_link_rate(struct dc_link *link)
{
- enum dc_link_rate link_rate = link->reported_link_cap.link_rate;
+ enum dc_link_rate max_ilr_rate = LINK_RATE_UNKNOWN;
+ enum dc_link_rate max_non_ilr_rate = dp_get_max_link_cap(link).link_rate;
for (int i = 0; i < link->dpcd_caps.edp_supported_link_rates_count; i++) {
- if (link_rate < link->dpcd_caps.edp_supported_link_rates[i])
- link_rate = link->dpcd_caps.edp_supported_link_rates[i];
+ if (max_ilr_rate < link->dpcd_caps.edp_supported_link_rates[i])
+ max_ilr_rate = link->dpcd_caps.edp_supported_link_rates[i];
}
- return link_rate;
+ return (max_ilr_rate > max_non_ilr_rate ? max_ilr_rate : max_non_ilr_rate);
}
bool edp_is_ilr_optimization_required(struct dc_link *link,
@@ -1167,6 +1168,9 @@ static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link,
link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
if (link_res->hpo_dp_link_enc) {
+ if (link->wa_flags.disable_assr_for_uhbr)
+ return;
+
link_enc_index = link_res->hpo_dp_link_enc->inst;
use_hpo_dp_link_enc = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 8df8ac5bde5b..30dc8c24c008 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -69,7 +69,7 @@ bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
bool edp_is_ilr_optimization_enabled(struct dc_link *link);
-enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link);
+enum dc_link_rate get_max_edp_link_rate(struct dc_link *link);
bool edp_backlight_enable_aux(struct dc_link *link, bool enable);
void edp_add_delay_for_T9(struct dc_link *link);
bool edp_receiver_ready_T9(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile b/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
index 505bc0517e08..eab196c57c6c 100644
--- a/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
@@ -25,6 +25,15 @@
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
+# DCN20
+###############################################################################
+MMHUBBUB_DCN20 = dcn20_mmhubbub.o
+
+AMD_DAL_MMHUBBUB_DCN20 = $(addprefix $(AMDDALPATH)/dc/mmhubbub/dcn20/,$(MMHUBBUB_DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MMHUBBUB_DCN20)
+
+###############################################################################
# DCN32
###############################################################################
MMHUBBUB_DCN32 = dcn32_mmhubbub.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
index 259a98e4ee2c..259a98e4ee2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.h
index 5ab32aa51e13..5ab32aa51e13 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.h
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/Makefile b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
index 7f7458c07e2a..5402c3529f5e 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
@@ -25,6 +25,33 @@
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
+# DCN10
+###############################################################################
+MPC_DCN10 = dcn10_mpc.o
+
+AMD_DAL_MPC_DCN10 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn10/,$(MPC_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN10)
+
+###############################################################################
+# DCN20
+###############################################################################
+MPC_DCN20 = dcn20_mpc.o
+
+AMD_DAL_MPC_DCN20 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn20/,$(MPC_DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN20)
+
+###############################################################################
+# DCN30
+###############################################################################
+MPC_DCN30 = dcn30_mpc.o
+
+AMD_DAL_MPC_DCN30 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn30/,$(MPC_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN30)
+
+###############################################################################
# DCN32
###############################################################################
MPC_DCN32 = dcn32_mpc.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
index f2f55565e98a..f2f55565e98a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h
index dbfffc6383dc..dbfffc6383dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.c
index ea73473b970a..ea73473b970a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.h
index 496658f420db..496658f420db 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
index 3aeb85ec40b0..fe26fde12eeb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
@@ -25,7 +25,7 @@
#include "reg_helper.h"
#include "dcn30_mpc.h"
-#include "dcn30_cm_common.h"
+#include "dcn30/dcn30_cm_common.h"
#include "basics/conversion.h"
#include "dcn10/dcn10_cm_common.h"
#include "dc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
index ce93003dae01..ce93003dae01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
diff --git a/drivers/gpu/drm/amd/display/dc/opp/Makefile b/drivers/gpu/drm/amd/display/dc/opp/Makefile
index fbfb3c3ad819..1be76754db30 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/opp/Makefile
@@ -25,6 +25,22 @@
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
+# DCN10
+###############################################################################
+OPP_DCN10 = dcn10_opp.o
+
+AMD_DAL_OPP_DCN10 = $(addprefix $(AMDDALPATH)/dc/opp/dcn10/,$(OPP_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPP_DCN10)
+###############################################################################
+# DCN20
+###############################################################################
+OPP_DCN20 = dcn20_opp.o
+
+AMD_DAL_OPP_DCN20 = $(addprefix $(AMDDALPATH)/dc/opp/dcn20/,$(OPP_DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPP_DCN20)
+###############################################################################
# DCN35
###############################################################################
OPP_DCN35 = dcn35_opp.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
index 71e9288d60ed..71e9288d60ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
index c87de68a509e..c87de68a509e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
index f5fe0cac7cb0..f5fe0cac7cb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
index 34936e6c49f3..34936e6c49f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 94427875bcdd..097d06023e64 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
@@ -65,7 +65,8 @@ void optc1_program_global_sync(
int vready_offset,
int vstartup_start,
int vupdate_offset,
- int vupdate_width)
+ int vupdate_width,
+ int pstate_keepout)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -73,6 +74,7 @@ void optc1_program_global_sync(
optc1->vstartup_start = vstartup_start;
optc1->vupdate_offset = vupdate_offset;
optc1->vupdate_width = vupdate_width;
+ optc1->pstate_keepout = pstate_keepout;
if (optc1->vstartup_start == 0) {
BREAK_TO_DEBUGGER();
@@ -146,6 +148,7 @@ void optc1_setup_vertical_interrupt2(
* @vstartup_start: Vstartup period.
* @vupdate_offset: Vupdate starting position.
* @vupdate_width: Vupdate duration.
+ * @pstate_keepout: determines low power mode timing during refresh
* @signal: DC signal types.
* @use_vbios: to program timings from BIOS command table.
*
@@ -157,6 +160,7 @@ void optc1_program_timing(
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios)
{
@@ -177,6 +181,7 @@ void optc1_program_timing(
optc1->vstartup_start = vstartup_start;
optc1->vupdate_offset = vupdate_offset;
optc1->vupdate_width = vupdate_width;
+ optc1->pstate_keepout = pstate_keepout;
patched_crtc_timing = *dc_crtc_timing;
apply_front_porch_workaround(&patched_crtc_timing);
optc1->orginal_patched_timing = patched_crtc_timing;
@@ -282,7 +287,8 @@ void optc1_program_timing(
vready_offset,
vstartup_start,
vupdate_offset,
- vupdate_width);
+ vupdate_width,
+ pstate_keepout);
optc->funcs->set_vtg_params(optc, dc_crtc_timing, true);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index 369a13244e5e..b7a57f98553d 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -201,6 +201,7 @@ struct dcn_optc_registers {
uint32_t OTG_CRC1_WINDOWB_Y_CONTROL_READBACK;
uint32_t OPTC_CLOCK_CONTROL;
uint32_t OPTC_WIDTH_CONTROL2;
+ uint32_t OTG_PSTATE_REGISTER;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -590,7 +591,11 @@ struct dcn_optc_registers {
type OTG_V_COUNT_STOP_TIMER;
#define TG_REG_FIELD_LIST_DCN401(type) \
- type OPTC_SEGMENT_WIDTH_LAST;
+ type OPTC_SEGMENT_WIDTH_LAST;\
+ type OTG_PSTATE_KEEPOUT_START;\
+ type OTG_PSTATE_EXTEND;\
+ type OTG_UNBLANK;\
+ type OTG_PSTATE_ALLOW_WIDTH_MIN;
struct dcn_optc_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index 6bbbf313b2bb..4b6446ed4ce4 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
@@ -149,7 +149,9 @@ static bool optc31_disable_crtc(struct timing_generator *optc)
return true;
}
-
+/*
+ * Immediate_Disable_Crtc - this is to temp disable Timing generator without reset ODM.
+ */
bool optc31_immediate_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -162,10 +164,12 @@ bool optc31_immediate_disable_crtc(struct timing_generator *optc)
VTG0_ENABLE, 0);
/* CRTC disabled, so disable clock. */
- REG_WAIT(OTG_CLOCK_CONTROL,
+ if (optc->ctx->dce_environment != DCE_ENV_DIAG)
+ REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 100000);
+
/* clear the false state */
optc1_clear_optc_underflow(optc);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index 9f5c2efa7560..a5d6a7dca554 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -396,13 +396,47 @@ void optc401_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, i
}
}
+static void optc401_program_global_sync(
+ struct timing_generator *optc,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width,
+ int pstate_keepout)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ optc1->vready_offset = vready_offset;
+ optc1->vstartup_start = vstartup_start;
+ optc1->vupdate_offset = vupdate_offset;
+ optc1->vupdate_width = vupdate_width;
+ optc1->pstate_keepout = pstate_keepout;
+
+ if (optc1->vstartup_start == 0) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ REG_SET(OTG_VSTARTUP_PARAM, 0,
+ VSTARTUP_START, optc1->vstartup_start);
+
+ REG_SET_2(OTG_VUPDATE_PARAM, 0,
+ VUPDATE_OFFSET, optc1->vupdate_offset,
+ VUPDATE_WIDTH, optc1->vupdate_width);
+
+ REG_SET(OTG_VREADY_PARAM, 0,
+ VREADY_OFFSET, optc1->vready_offset);
+
+ REG_UPDATE(OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, pstate_keepout);
+}
+
static struct timing_generator_funcs dcn401_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
.setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
- .program_global_sync = optc1_program_global_sync,
+ .program_global_sync = optc401_program_global_sync,
.enable_crtc = optc401_enable_crtc,
.disable_crtc = optc401_disable_crtc,
.phantom_crtc_post_enable = optc401_phantom_crtc_post_enable,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
index 3114ecef332a..bb13a645802d 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
@@ -155,7 +155,11 @@
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
- SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
+ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\
+ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, mask_sh),\
+ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_EXTEND, mask_sh),\
+ SF(OTG0_OTG_PSTATE_REGISTER, OTG_UNBLANK, mask_sh),\
+ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_ALLOW_WIDTH_MIN, mask_sh)
void dcn401_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
index 4860bb2531a1..09320344d8e9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -198,8 +198,6 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN351)
###############################################################################
-###############################################################################
-
RESOURCE_DCN401 = dcn401_resource.o
AMD_DAL_RESOURCE_DCN401 = $(addprefix $(AMDDALPATH)/dc/resource/dcn401/,$(RESOURCE_DCN401))
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index fe518fd27b08..91da5cf85b69 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -1163,6 +1163,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
0,
0,
0,
+ 0,
pipe_ctx->stream->signal,
false);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 88afb2a30eef..162856c523e4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -1067,7 +1067,10 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
struct dm_pp_clock_levels clks = {0};
int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ;
- if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm)
+ if (!dc->bw_vbios)
+ return;
+
+ if (dc->bw_vbios->memory_type == bw_def_hbm)
memory_type_multiplier = MEMORY_TYPE_HBM;
/*do system clock TODO PPLIB: after PPLIB implement,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 5e7cfa8e8ec9..eea2b3b307cd 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -2040,6 +2040,7 @@ bool dcn20_fast_validate_bw(
{
bool out = false;
int split[MAX_PIPES] = { 0 };
+ bool merge[MAX_PIPES] = { false };
int pipe_cnt, i, pipe_idx, vlevel;
ASSERT(pipes);
@@ -2064,7 +2065,7 @@ bool dcn20_fast_validate_bw(
if (vlevel > context->bw_ctx.dml.soc.num_states)
goto validate_fail;
- vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
/*initialize pipe_just_split_from to invalid idx*/
for (i = 0; i < MAX_PIPES; i++)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index 131d98025bd4..fc54483b9104 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -1007,8 +1007,10 @@ static struct pipe_ctx *dcn201_acquire_free_pipe_for_layer(
struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream);
struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe);
- if (!head_pipe)
+ if (!head_pipe) {
ASSERT(0);
+ return NULL;
+ }
if (!idle_pipe)
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 8663cbc3d1cf..347e6aaea582 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -774,6 +774,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
{
bool out = false;
int split[MAX_PIPES] = { 0 };
+ bool merge[MAX_PIPES] = { false };
int pipe_cnt, i, pipe_idx, vlevel;
ASSERT(pipes);
@@ -816,7 +817,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
goto validate_fail;
}
- vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 5d1801dce273..ac8cb20e2e3b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1948,6 +1948,7 @@ static bool dcn31_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
+ dc->config.disable_hbr_audio_dp2 = true;
/* read VBIOS LTTPR caps */
{
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 969658313fd6..a124ad9bd108 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1651,6 +1651,9 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
else
phantom_plane = dc_state_create_phantom_plane(dc, context, curr_pipe->plane_state);
+ if (!phantom_plane)
+ continue;
+
memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
sizeof(phantom_plane->scaling_quality));
@@ -1717,6 +1720,9 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context,
// be a valid candidate for SubVP (i.e. has a plane, stream, doesn't
// already have phantom pipe assigned, etc.) by previous checks.
phantom_stream = dcn32_enable_phantom_stream(dc, context, pipes, pipe_cnt, index);
+ if (!phantom_stream)
+ return;
+
dcn32_enable_phantom_plane(dc, context, phantom_stream, index);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2220,6 +2226,7 @@ static bool dcn32_resource_construct(
dc->config.dc_mode_clk_limit_support = true;
dc->config.enable_windowed_mpo_odm = true;
+ dc->config.disable_hbr_audio_dp2 = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -2671,8 +2678,10 @@ static struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx;
int head_index;
- if (!head_pipe)
+ if (!head_pipe) {
ASSERT(0);
+ return NULL;
+ }
/*
* Modified from dcn20_acquire_idle_pipe_for_layer
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index fee67fbab8e2..7901792afb7b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -505,6 +505,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
SRI_ARR(CM_POST_CSC_B_C11_C12, CM, id), \
SRI_ARR(CM_POST_CSC_B_C33_C34, CM, id), \
SRI_ARR(CM_MEM_PWR_CTRL, CM, id), SRI_ARR(CM_CONTROL, CM, id), \
+ SRI_ARR(CM_TEST_DEBUG_INDEX, CM, id), \
+ SRI_ARR(CM_TEST_DEBUG_DATA, CM, id), \
SRI_ARR(FORMAT_CONTROL, CNVC_CFG, id), \
SRI_ARR(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
SRI_ARR(CURSOR0_CONTROL, CNVC_CUR, id), \
@@ -761,6 +763,7 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
SRI_ARR(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id), \
SRI_ARR(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id), \
SRI_ARR(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_TEST_DEBUG_BUS_ROTATE, DSCC, id), \
SRI_ARR(DSCCIF_CONFIG0, DSCCIF, id), \
SRI_ARR(DSCCIF_CONFIG1, DSCCIF, id), \
SRI_ARR(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
@@ -1185,6 +1188,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL), \
SR(DCHUBBUB_ARB_DRAM_STATE_CNTL), SR(DCHUBBUB_ARB_SAT_LEVEL), \
SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND), SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_TEST_DEBUG_INDEX), \
+ SR(DCHUBBUB_TEST_DEBUG_DATA), \
SR(DCHUBBUB_SOFT_RESET), SR(DCHUBBUB_CRC_CTRL), \
SR(DCN_VM_FB_LOCATION_BASE), SR(DCN_VM_FB_LOCATION_TOP), \
SR(DCN_VM_FB_OFFSET), SR(DCN_VM_AGP_BOT), SR(DCN_VM_AGP_TOP), \
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c
index d184105ce2b3..f5a4e97c40ce 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c
@@ -218,12 +218,12 @@ bool dcn32_is_center_timing(struct pipe_ctx *pipe)
pipe->stream->timing.v_addressable != pipe->stream->src.height) {
is_center_timing = true;
}
- }
- if (pipe->plane_state) {
- if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height &&
- pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) {
- is_center_timing = true;
+ if (pipe->plane_state) {
+ if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height &&
+ pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) {
+ is_center_timing = true;
+ }
}
}
@@ -663,7 +663,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ pipe->stream->timing.v_total * (unsigned long long)pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
}
@@ -724,7 +724,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ pipe->stream->timing.v_total * (unsigned long long)pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 8e0588b1cf30..827a94f84f10 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1783,6 +1783,7 @@ static bool dcn321_resource_construct(
dc->config.dc_mode_clk_limit_support = true;
dc->config.enable_windowed_mpo_odm = true;
+ dc->config.disable_hbr_audio_dp2 = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index ddf251901fb3..46ad684fe192 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -786,6 +786,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dmub_reallow_idle = false,
.static_screen_wait_frames = 2,
.disable_timeout = true,
+ .min_disp_clk_khz = 50000,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1899,6 +1900,7 @@ static bool dcn35_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
+ dc->config.disable_hbr_audio_dp2 = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 34b02147881d..9d56fbdcd06a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -76,6 +76,9 @@
#include "dml2/dml2_wrapper.h"
+#include "spl/dc_spl_scl_easf_filters.h"
+#include "spl/dc_spl_isharp_filters.h"
+
#define DC_LOGGER_INIT(logger)
enum dcn401_clk_src_array_id {
@@ -1188,7 +1191,7 @@ static struct stream_encoder *dcn401_stream_encoder_create(
vpg = dcn401_vpg_create(ctx, vpg_inst);
afmt = dcn401_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt) {
+ if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) {
kfree(enc1);
kfree(vpg);
kfree(afmt);
@@ -1822,6 +1825,7 @@ static bool dcn401_resource_construct(
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
+ dc->caps.max_v_total = (1 << 15) - 1;
if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev))
dc->caps.dcc_plane_width_limit = 7680;
@@ -2099,6 +2103,7 @@ static bool dcn401_resource_construct(
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.map_dc_pipes_with_callbacks = true;
+ dc->dml2_options.force_tdlut_enable = true;
resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
@@ -2124,6 +2129,10 @@ static bool dcn401_resource_construct(
dc->dml2_options.max_segments_per_hubp = 20;
dc->dml2_options.det_segment_size = DCN4_01_CRB_SEGMENT_SIZE_KB;
+ /* SPL */
+ spl_init_easf_filter_coeffs();
+ spl_init_blur_scale_coeffs();
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index bb46f30d11d0..514d1ce20df9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -536,7 +536,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \
SRI_ARR(OPTC_WIDTH_CONTROL2, ODM, inst), \
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
- SRI_ARR(OTG_DRR_CONTROL, OTG, inst)
+ SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst)
/* HUBBUB */
#define HUBBUB_REG_LIST_DCN4_01_RI(id) \
diff --git a/drivers/gpu/drm/amd/display/dc/spl/Makefile b/drivers/gpu/drm/amd/display/dc/spl/Makefile
index 89cad60b1a10..5edf3c6cf3e2 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/spl/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'spl' sub-component of DAL.
# It provides the scaling library interface.
-SPL = dc_spl.o dc_spl_scl_filters.o dc_spl_isharp_filters.o
+SPL = dc_spl.o dc_spl_scl_filters.o dc_spl_scl_easf_filters.o dc_spl_isharp_filters.o dc_spl_filters.o spl_fixpt31_32.o spl_custom_float.o
AMD_DAL_SPL = $(addprefix $(AMDDALPATH)/dc/spl/,$(SPL))
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
index e3e20cd86af6..15f7eda903e6 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
@@ -4,9 +4,11 @@
#include "dc_spl.h"
#include "dc_spl_scl_filters.h"
+#include "dc_spl_scl_easf_filters.h"
#include "dc_spl_isharp_filters.h"
+#include "spl_debug.h"
-#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
+#define IDENTITY_RATIO(ratio) (spl_fixpt_u2d19(ratio) == (1 << 19))
#define MIN_VIEWPORT_SIZE 12
static struct spl_rect intersect_rec(const struct spl_rect *r0, const struct spl_rect *r1)
@@ -107,26 +109,26 @@ static struct spl_rect calculate_plane_rec_in_timing_active(
const struct spl_rect *stream_src = &spl_in->basic_out.src_rect;
const struct spl_rect *stream_dst = &spl_in->basic_out.dst_rect;
struct spl_rect rec_out = {0};
- struct fixed31_32 temp;
+ struct spl_fixed31_32 temp;
- temp = dc_fixpt_from_fraction(rec_in->x * (long long)stream_dst->width,
+ temp = spl_fixpt_from_fraction(rec_in->x * (long long)stream_dst->width,
stream_src->width);
- rec_out.x = stream_dst->x + dc_fixpt_round(temp);
+ rec_out.x = stream_dst->x + spl_fixpt_round(temp);
- temp = dc_fixpt_from_fraction(
+ temp = spl_fixpt_from_fraction(
(rec_in->x + rec_in->width) * (long long)stream_dst->width,
stream_src->width);
- rec_out.width = stream_dst->x + dc_fixpt_round(temp) - rec_out.x;
+ rec_out.width = stream_dst->x + spl_fixpt_round(temp) - rec_out.x;
- temp = dc_fixpt_from_fraction(rec_in->y * (long long)stream_dst->height,
+ temp = spl_fixpt_from_fraction(rec_in->y * (long long)stream_dst->height,
stream_src->height);
- rec_out.y = stream_dst->y + dc_fixpt_round(temp);
+ rec_out.y = stream_dst->y + spl_fixpt_round(temp);
- temp = dc_fixpt_from_fraction(
+ temp = spl_fixpt_from_fraction(
(rec_in->y + rec_in->height) * (long long)stream_dst->height,
stream_src->height);
- rec_out.height = stream_dst->y + dc_fixpt_round(temp) - rec_out.y;
+ rec_out.height = stream_dst->y + spl_fixpt_round(temp) - rec_out.y;
return rec_out;
}
@@ -144,7 +146,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
mpc_rec.height = plane_clip_rec->height;
mpc_rec.y = plane_clip_rec->y;
- ASSERT(mpc_slice_count == 1 ||
+ SPL_ASSERT(mpc_slice_count == 1 ||
spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE ||
mpc_rec.width % 2 == 0);
@@ -157,7 +159,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
}
if (spl_in->basic_out.view_format == SPL_VIEW_3D_TOP_AND_BOTTOM) {
- ASSERT(mpc_rec.height % 2 == 0);
+ SPL_ASSERT(mpc_rec.height % 2 == 0);
mpc_rec.height /= 2;
}
return mpc_rec;
@@ -197,7 +199,7 @@ static struct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_i
return spl_in->basic_out.odm_slice_rect;
}
-static void spl_calculate_recout(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_calculate_recout(struct spl_in *spl_in, struct spl_scratch *spl_scratch, struct spl_out *spl_out)
{
/*
* A plane clip represents the desired plane size and position in Stream
@@ -340,20 +342,23 @@ static void spl_calculate_recout(struct spl_in *spl_in, struct spl_out *spl_out)
/* shift the overlapping area so it is with respect to current
* ODM slice's position
*/
- spl_out->scl_data.recout = shift_rec(
+ spl_scratch->scl_data.recout = shift_rec(
&overlapping_area,
-odm_slice.x, -odm_slice.y);
- spl_out->scl_data.recout.height -=
+ spl_scratch->scl_data.recout.height -=
spl_in->debug.visual_confirm_base_offset;
- spl_out->scl_data.recout.height -=
+ spl_scratch->scl_data.recout.height -=
spl_in->debug.visual_confirm_dpp_offset;
} else
/* if there is no overlap, zero recout */
- memset(&spl_out->scl_data.recout, 0,
+ memset(&spl_scratch->scl_data.recout, 0,
sizeof(struct spl_rect));
}
+
/* Calculate scaling ratios */
-static void spl_calculate_scaling_ratios(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_calculate_scaling_ratios(struct spl_in *spl_in,
+ struct spl_scratch *spl_scratch,
+ struct spl_out *spl_out)
{
const int in_w = spl_in->basic_out.src_rect.width;
const int in_h = spl_in->basic_out.src_rect.height;
@@ -364,59 +369,75 @@ static void spl_calculate_scaling_ratios(struct spl_in *spl_in, struct spl_out *
/*Swap surf_src height and width since scaling ratios are in recout rotation*/
if (spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_90 ||
spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_270)
- swap(surf_src.height, surf_src.width);
+ spl_swap(surf_src.height, surf_src.width);
- spl_out->scl_data.ratios.horz = dc_fixpt_from_fraction(
+ spl_scratch->scl_data.ratios.horz = spl_fixpt_from_fraction(
surf_src.width,
spl_in->basic_in.dst_rect.width);
- spl_out->scl_data.ratios.vert = dc_fixpt_from_fraction(
+ spl_scratch->scl_data.ratios.vert = spl_fixpt_from_fraction(
surf_src.height,
spl_in->basic_in.dst_rect.height);
if (spl_in->basic_out.view_format == SPL_VIEW_3D_SIDE_BY_SIDE)
- spl_out->scl_data.ratios.horz.value *= 2;
+ spl_scratch->scl_data.ratios.horz.value *= 2;
else if (spl_in->basic_out.view_format == SPL_VIEW_3D_TOP_AND_BOTTOM)
- spl_out->scl_data.ratios.vert.value *= 2;
+ spl_scratch->scl_data.ratios.vert.value *= 2;
- spl_out->scl_data.ratios.vert.value = div64_s64(
- spl_out->scl_data.ratios.vert.value * in_h, out_h);
- spl_out->scl_data.ratios.horz.value = div64_s64(
- spl_out->scl_data.ratios.horz.value * in_w, out_w);
+ spl_scratch->scl_data.ratios.vert.value = spl_div64_s64(
+ spl_scratch->scl_data.ratios.vert.value * in_h, out_h);
+ spl_scratch->scl_data.ratios.horz.value = spl_div64_s64(
+ spl_scratch->scl_data.ratios.horz.value * in_w, out_w);
- spl_out->scl_data.ratios.horz_c = spl_out->scl_data.ratios.horz;
- spl_out->scl_data.ratios.vert_c = spl_out->scl_data.ratios.vert;
+ spl_scratch->scl_data.ratios.horz_c = spl_scratch->scl_data.ratios.horz;
+ spl_scratch->scl_data.ratios.vert_c = spl_scratch->scl_data.ratios.vert;
if (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
|| spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) {
- spl_out->scl_data.ratios.horz_c.value /= 2;
- spl_out->scl_data.ratios.vert_c.value /= 2;
+ spl_scratch->scl_data.ratios.horz_c.value /= 2;
+ spl_scratch->scl_data.ratios.vert_c.value /= 2;
}
- spl_out->scl_data.ratios.horz = dc_fixpt_truncate(
- spl_out->scl_data.ratios.horz, 19);
- spl_out->scl_data.ratios.vert = dc_fixpt_truncate(
- spl_out->scl_data.ratios.vert, 19);
- spl_out->scl_data.ratios.horz_c = dc_fixpt_truncate(
- spl_out->scl_data.ratios.horz_c, 19);
- spl_out->scl_data.ratios.vert_c = dc_fixpt_truncate(
- spl_out->scl_data.ratios.vert_c, 19);
+ spl_scratch->scl_data.ratios.horz = spl_fixpt_truncate(
+ spl_scratch->scl_data.ratios.horz, 19);
+ spl_scratch->scl_data.ratios.vert = spl_fixpt_truncate(
+ spl_scratch->scl_data.ratios.vert, 19);
+ spl_scratch->scl_data.ratios.horz_c = spl_fixpt_truncate(
+ spl_scratch->scl_data.ratios.horz_c, 19);
+ spl_scratch->scl_data.ratios.vert_c = spl_fixpt_truncate(
+ spl_scratch->scl_data.ratios.vert_c, 19);
+
+ /*
+ * Coefficient table and some registers are different based on ratio
+ * that is output/input. Currently we calculate input/output
+ * Store 1/ratio in recip_ratio for those lookups
+ */
+ spl_scratch->scl_data.recip_ratios.horz = spl_fixpt_recip(
+ spl_scratch->scl_data.ratios.horz);
+ spl_scratch->scl_data.recip_ratios.vert = spl_fixpt_recip(
+ spl_scratch->scl_data.ratios.vert);
+ spl_scratch->scl_data.recip_ratios.horz_c = spl_fixpt_recip(
+ spl_scratch->scl_data.ratios.horz_c);
+ spl_scratch->scl_data.recip_ratios.vert_c = spl_fixpt_recip(
+ spl_scratch->scl_data.ratios.vert_c);
}
+
/* Calculate Viewport size */
-static void spl_calculate_viewport_size(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_calculate_viewport_size(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
{
- spl_out->scl_data.viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.horz,
- spl_out->scl_data.recout.width));
- spl_out->scl_data.viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.vert,
- spl_out->scl_data.recout.height));
- spl_out->scl_data.viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.horz_c,
- spl_out->scl_data.recout.width));
- spl_out->scl_data.viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.vert_c,
- spl_out->scl_data.recout.height));
+ spl_scratch->scl_data.viewport.width = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.horz,
+ spl_scratch->scl_data.recout.width));
+ spl_scratch->scl_data.viewport.height = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.vert,
+ spl_scratch->scl_data.recout.height));
+ spl_scratch->scl_data.viewport_c.width = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.horz_c,
+ spl_scratch->scl_data.recout.width));
+ spl_scratch->scl_data.viewport_c.height = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.vert_c,
+ spl_scratch->scl_data.recout.height));
if (spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_90 ||
spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_270) {
- swap(spl_out->scl_data.viewport.width, spl_out->scl_data.viewport.height);
- swap(spl_out->scl_data.viewport_c.width, spl_out->scl_data.viewport_c.height);
+ spl_swap(spl_scratch->scl_data.viewport.width, spl_scratch->scl_data.viewport.height);
+ spl_swap(spl_scratch->scl_data.viewport_c.width, spl_scratch->scl_data.viewport_c.height);
}
}
+
static void spl_get_vp_scan_direction(enum spl_rotation_angle rotation,
bool horizontal_mirror,
bool *orthogonal_rotation,
@@ -440,6 +461,7 @@ static void spl_get_vp_scan_direction(enum spl_rotation_angle rotation,
if (horizontal_mirror)
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
+
/*
* We completely calculate vp offset, size and inits here based entirely on scaling
* ratios and recout for pixel perfect pipe combine.
@@ -449,13 +471,13 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir,
int recout_size,
int src_size,
int taps,
- struct fixed31_32 ratio,
- struct fixed31_32 init_adj,
- struct fixed31_32 *init,
+ struct spl_fixed31_32 ratio,
+ struct spl_fixed31_32 init_adj,
+ struct spl_fixed31_32 *init,
int *vp_offset,
int *vp_size)
{
- struct fixed31_32 temp;
+ struct spl_fixed31_32 temp;
int int_part;
/*
@@ -468,33 +490,33 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir,
* init_bot = init + scaling_ratio
* to get pixel perfect combine add the fraction from calculating vp offset
*/
- temp = dc_fixpt_mul_int(ratio, recout_offset_within_recout_full);
- *vp_offset = dc_fixpt_floor(temp);
+ temp = spl_fixpt_mul_int(ratio, recout_offset_within_recout_full);
+ *vp_offset = spl_fixpt_floor(temp);
temp.value &= 0xffffffff;
- *init = dc_fixpt_add(dc_fixpt_div_int(dc_fixpt_add_int(ratio, taps + 1), 2), temp);
- *init = dc_fixpt_add(*init, init_adj);
- *init = dc_fixpt_truncate(*init, 19);
+ *init = spl_fixpt_add(spl_fixpt_div_int(spl_fixpt_add_int(ratio, taps + 1), 2), temp);
+ *init = spl_fixpt_add(*init, init_adj);
+ *init = spl_fixpt_truncate(*init, 19);
/*
* If viewport has non 0 offset and there are more taps than covered by init then
* we should decrease the offset and increase init so we are never sampling
* outside of viewport.
*/
- int_part = dc_fixpt_floor(*init);
+ int_part = spl_fixpt_floor(*init);
if (int_part < taps) {
int_part = taps - int_part;
if (int_part > *vp_offset)
int_part = *vp_offset;
*vp_offset -= int_part;
- *init = dc_fixpt_add_int(*init, int_part);
+ *init = spl_fixpt_add_int(*init, int_part);
}
/*
* If taps are sampling outside of viewport at end of recout and there are more pixels
* available in the surface we should increase the viewport size, regardless set vp to
* only what is used.
*/
- temp = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_size - 1));
- *vp_size = dc_fixpt_floor(temp);
+ temp = spl_fixpt_add(*init, spl_fixpt_mul_int(ratio, recout_size - 1));
+ *vp_size = spl_fixpt_floor(temp);
if (*vp_size + *vp_offset > src_size)
*vp_size = src_size - *vp_offset;
@@ -509,15 +531,24 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir,
static bool spl_is_yuv420(enum spl_pixel_format format)
{
- if ((format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN) &&
- (format <= SPL_PIXEL_FORMAT_VIDEO_END))
+ if ((format >= SPL_PIXEL_FORMAT_420BPP8) &&
+ (format <= SPL_PIXEL_FORMAT_420BPP10))
+ return true;
+
+ return false;
+}
+
+static bool spl_is_rgb8(enum spl_pixel_format format)
+{
+ if (format == SPL_PIXEL_FORMAT_ARGB8888)
return true;
return false;
}
/*Calculate inits and viewport */
-static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
+ struct spl_scratch *spl_scratch)
{
struct spl_rect src = spl_in->basic_in.src_rect;
struct spl_rect recout_dst_in_active_timing;
@@ -528,11 +559,11 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_
int vpc_div = (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
|| spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) ? 2 : 1;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
- struct fixed31_32 init_adj_h = dc_fixpt_zero;
- struct fixed31_32 init_adj_v = dc_fixpt_zero;
+ struct spl_fixed31_32 init_adj_h = spl_fixpt_zero;
+ struct spl_fixed31_32 init_adj_v = spl_fixpt_zero;
recout_clip_in_active_timing = shift_rec(
- &spl_out->scl_data.recout, odm_slice.x, odm_slice.y);
+ &spl_scratch->scl_data.recout, odm_slice.x, odm_slice.y);
recout_dst_in_active_timing = calculate_plane_rec_in_timing_active(
spl_in, &spl_in->basic_in.dst_rect);
overlap_in_active_timing = intersect_rec(&recout_clip_in_active_timing,
@@ -555,8 +586,8 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_
&flip_horz_scan_dir);
if (orthogonal_rotation) {
- swap(src.width, src.height);
- swap(flip_vert_scan_dir, flip_horz_scan_dir);
+ spl_swap(src.width, src.height);
+ spl_swap(flip_vert_scan_dir, flip_horz_scan_dir);
}
if (spl_is_yuv420(spl_in->basic_in.format)) {
@@ -568,17 +599,17 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_
switch (spl_in->basic_in.cositing) {
case CHROMA_COSITING_LEFT:
- init_adj_h = dc_fixpt_zero;
- init_adj_v = dc_fixpt_from_fraction(sign, 2);
+ init_adj_h = spl_fixpt_zero;
+ init_adj_v = spl_fixpt_from_fraction(sign, 4);
break;
case CHROMA_COSITING_NONE:
- init_adj_h = dc_fixpt_from_fraction(sign, 2);
- init_adj_v = dc_fixpt_from_fraction(sign, 2);
+ init_adj_h = spl_fixpt_from_fraction(sign, 4);
+ init_adj_v = spl_fixpt_from_fraction(sign, 4);
break;
case CHROMA_COSITING_TOPLEFT:
default:
- init_adj_h = dc_fixpt_zero;
- init_adj_v = dc_fixpt_zero;
+ init_adj_h = spl_fixpt_zero;
+ init_adj_v = spl_fixpt_zero;
break;
}
}
@@ -586,59 +617,60 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_
spl_calculate_init_and_vp(
flip_horz_scan_dir,
recout_clip_in_recout_dst.x,
- spl_out->scl_data.recout.width,
+ spl_scratch->scl_data.recout.width,
src.width,
- spl_out->scl_data.taps.h_taps,
- spl_out->scl_data.ratios.horz,
- dc_fixpt_zero,
- &spl_out->scl_data.inits.h,
- &spl_out->scl_data.viewport.x,
- &spl_out->scl_data.viewport.width);
+ spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.ratios.horz,
+ spl_fixpt_zero,
+ &spl_scratch->scl_data.inits.h,
+ &spl_scratch->scl_data.viewport.x,
+ &spl_scratch->scl_data.viewport.width);
spl_calculate_init_and_vp(
flip_horz_scan_dir,
recout_clip_in_recout_dst.x,
- spl_out->scl_data.recout.width,
+ spl_scratch->scl_data.recout.width,
src.width / vpc_div,
- spl_out->scl_data.taps.h_taps_c,
- spl_out->scl_data.ratios.horz_c,
+ spl_scratch->scl_data.taps.h_taps_c,
+ spl_scratch->scl_data.ratios.horz_c,
init_adj_h,
- &spl_out->scl_data.inits.h_c,
- &spl_out->scl_data.viewport_c.x,
- &spl_out->scl_data.viewport_c.width);
+ &spl_scratch->scl_data.inits.h_c,
+ &spl_scratch->scl_data.viewport_c.x,
+ &spl_scratch->scl_data.viewport_c.width);
spl_calculate_init_and_vp(
flip_vert_scan_dir,
recout_clip_in_recout_dst.y,
- spl_out->scl_data.recout.height,
+ spl_scratch->scl_data.recout.height,
src.height,
- spl_out->scl_data.taps.v_taps,
- spl_out->scl_data.ratios.vert,
- dc_fixpt_zero,
- &spl_out->scl_data.inits.v,
- &spl_out->scl_data.viewport.y,
- &spl_out->scl_data.viewport.height);
+ spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.ratios.vert,
+ spl_fixpt_zero,
+ &spl_scratch->scl_data.inits.v,
+ &spl_scratch->scl_data.viewport.y,
+ &spl_scratch->scl_data.viewport.height);
spl_calculate_init_and_vp(
flip_vert_scan_dir,
recout_clip_in_recout_dst.y,
- spl_out->scl_data.recout.height,
+ spl_scratch->scl_data.recout.height,
src.height / vpc_div,
- spl_out->scl_data.taps.v_taps_c,
- spl_out->scl_data.ratios.vert_c,
+ spl_scratch->scl_data.taps.v_taps_c,
+ spl_scratch->scl_data.ratios.vert_c,
init_adj_v,
- &spl_out->scl_data.inits.v_c,
- &spl_out->scl_data.viewport_c.y,
- &spl_out->scl_data.viewport_c.height);
+ &spl_scratch->scl_data.inits.v_c,
+ &spl_scratch->scl_data.viewport_c.y,
+ &spl_scratch->scl_data.viewport_c.height);
if (orthogonal_rotation) {
- swap(spl_out->scl_data.viewport.x, spl_out->scl_data.viewport.y);
- swap(spl_out->scl_data.viewport.width, spl_out->scl_data.viewport.height);
- swap(spl_out->scl_data.viewport_c.x, spl_out->scl_data.viewport_c.y);
- swap(spl_out->scl_data.viewport_c.width, spl_out->scl_data.viewport_c.height);
+ spl_swap(spl_scratch->scl_data.viewport.x, spl_scratch->scl_data.viewport.y);
+ spl_swap(spl_scratch->scl_data.viewport.width, spl_scratch->scl_data.viewport.height);
+ spl_swap(spl_scratch->scl_data.viewport_c.x, spl_scratch->scl_data.viewport_c.y);
+ spl_swap(spl_scratch->scl_data.viewport_c.width, spl_scratch->scl_data.viewport_c.height);
}
- spl_out->scl_data.viewport.x += src.x;
- spl_out->scl_data.viewport.y += src.y;
- ASSERT(src.x % vpc_div == 0 && src.y % vpc_div == 0);
- spl_out->scl_data.viewport_c.x += src.x / vpc_div;
- spl_out->scl_data.viewport_c.y += src.y / vpc_div;
+ spl_scratch->scl_data.viewport.x += src.x;
+ spl_scratch->scl_data.viewport.y += src.y;
+ SPL_ASSERT(src.x % vpc_div == 0 && src.y % vpc_div == 0);
+ spl_scratch->scl_data.viewport_c.x += src.x / vpc_div;
+ spl_scratch->scl_data.viewport_c.y += src.y / vpc_div;
}
+
static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
{
/*
@@ -647,7 +679,7 @@ static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
* This may break with rotation, good thing we aren't mixing hw rotation and 3d
*/
if (spl_in->basic_in.mpc_combine_v) {
- ASSERT(spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_0 ||
+ SPL_ASSERT(spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_0 ||
(spl_in->basic_out.view_format != SPL_VIEW_3D_TOP_AND_BOTTOM &&
spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE));
if (spl_in->basic_out.view_format == SPL_VIEW_3D_TOP_AND_BOTTOM)
@@ -665,6 +697,7 @@ static void spl_clamp_viewport(struct spl_rect *viewport)
if (viewport->width < MIN_VIEWPORT_SIZE)
viewport->width = MIN_VIEWPORT_SIZE;
}
+
static bool spl_dscl_is_420_format(enum spl_pixel_format format)
{
if (format == SPL_PIXEL_FORMAT_420BPP8 ||
@@ -673,6 +706,7 @@ static bool spl_dscl_is_420_format(enum spl_pixel_format format)
else
return false;
}
+
static bool spl_dscl_is_video_format(enum spl_pixel_format format)
{
if (format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN
@@ -681,17 +715,21 @@ static bool spl_dscl_is_video_format(enum spl_pixel_format format)
else
return false;
}
+
static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
- const struct spl_scaler_data *data)
+ const struct spl_scaler_data *data,
+ bool enable_isharp, bool enable_easf)
{
- const long long one = dc_fixpt_one.value;
+ const long long one = spl_fixpt_one.value;
enum spl_pixel_format pixel_format = spl_in->basic_in.format;
+ /* Bypass if ratio is 1:1 with no ISHARP or force scale on */
if (data->ratios.horz.value == one
&& data->ratios.vert.value == one
&& data->ratios.horz_c.value == one
&& data->ratios.vert_c.value == one
- && !spl_in->basic_out.always_scale)
+ && !spl_in->basic_out.always_scale
+ && !enable_isharp)
return SCL_MODE_SCALING_444_BYPASS;
if (!spl_dscl_is_420_format(pixel_format)) {
@@ -700,69 +738,196 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
else
return SCL_MODE_SCALING_444_RGB_ENABLE;
}
- if (data->ratios.horz.value == one && data->ratios.vert.value == one)
- return SCL_MODE_SCALING_420_LUMA_BYPASS;
- if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
- return SCL_MODE_SCALING_420_CHROMA_BYPASS;
+
+ /* Bypass YUV if at 1:1 with no ISHARP or if doing 2:1 YUV
+ * downscale without EASF
+ */
+ if ((!enable_isharp) && (!enable_easf)) {
+ if (data->ratios.horz.value == one && data->ratios.vert.value == one)
+ return SCL_MODE_SCALING_420_LUMA_BYPASS;
+ if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
+ return SCL_MODE_SCALING_420_CHROMA_BYPASS;
+ }
return SCL_MODE_SCALING_420_YCBCR_ENABLE;
}
+
+static bool spl_choose_lls_policy(enum spl_pixel_format format,
+ enum spl_transfer_func_type tf_type,
+ enum spl_transfer_func_predefined tf_predefined_type,
+ enum linear_light_scaling *lls_pref)
+{
+ if (spl_is_yuv420(format)) {
+ *lls_pref = LLS_PREF_NO;
+ if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
+ (tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS))
+ return true;
+ } else { /* RGB or YUV444 */
+ if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
+ (tf_type == SPL_TF_TYPE_BYPASS)) {
+ *lls_pref = LLS_PREF_YES;
+ return true;
+ }
+ }
+ *lls_pref = LLS_PREF_NO;
+ return false;
+}
+
+/* Enable EASF ?*/
+static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
+{
+ int vratio = 0;
+ int hratio = 0;
+ bool skip_easf = false;
+ bool lls_enable_easf = true;
+
+ if (spl_in->disable_easf)
+ skip_easf = true;
+
+ vratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert);
+ hratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz);
+
+ /*
+ * No EASF support for downscaling > 2:1
+ * EASF support for upscaling or downscaling up to 2:1
+ */
+ if ((vratio > 2) || (hratio > 2))
+ skip_easf = true;
+
+ /*
+ * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format and transfer
+ * function to determine whether to use LINEAR or NONLINEAR scaling
+ */
+ if (spl_in->lls_pref == LLS_PREF_DONT_CARE)
+ lls_enable_easf = spl_choose_lls_policy(spl_in->basic_in.format,
+ spl_in->basic_in.tf_type, spl_in->basic_in.tf_predefined_type,
+ &spl_in->lls_pref);
+
+ if (!lls_enable_easf)
+ skip_easf = true;
+
+ /* Check for linear scaling or EASF preferred */
+ if (spl_in->lls_pref != LLS_PREF_YES && !spl_in->prefer_easf)
+ skip_easf = true;
+
+ return skip_easf;
+}
+
+static bool spl_get_isharp_en(struct spl_in *spl_in,
+ struct spl_scratch *spl_scratch)
+{
+ bool enable_isharp = false;
+ int vratio = 0;
+ int hratio = 0;
+ struct spl_taps taps = spl_scratch->scl_data.taps;
+
+ /* Return if adaptive sharpness is disabled */
+ if (spl_in->adaptive_sharpness.enable == false)
+ return enable_isharp;
+
+ vratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert);
+ hratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz);
+
+ /* No iSHARP support for downscaling */
+ if (vratio > 1 || hratio > 1)
+ return enable_isharp;
+
+ // Scaling is up to 1:1 (no scaling) or upscaling
+
+ /*
+ * Apply sharpness to all RGB surfaces and to
+ * NV12/P010 surfaces
+ */
+
+ /*
+ * Apply sharpness if supports horizontal taps 4,6 AND
+ * vertical taps 3, 4, 6
+ */
+ if ((taps.h_taps == 4 || taps.h_taps == 6) &&
+ (taps.v_taps == 3 || taps.v_taps == 4 || taps.v_taps == 6))
+ enable_isharp = true;
+
+ return enable_isharp;
+}
+
/* Calculate optimal number of taps */
static bool spl_get_optimal_number_of_taps(
- int max_downscale_src_width, struct spl_in *spl_in, struct spl_out *spl_out,
- const struct spl_taps *in_taps)
+ int max_downscale_src_width, struct spl_in *spl_in, struct spl_scratch *spl_scratch,
+ const struct spl_taps *in_taps, bool *enable_easf_v, bool *enable_easf_h,
+ bool *enable_isharp)
{
int num_part_y, num_part_c;
int max_taps_y, max_taps_c;
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
+ bool skip_easf = false;
- if (spl_out->scl_data.viewport.width > spl_out->scl_data.h_active &&
+ if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active &&
max_downscale_src_width != 0 &&
- spl_out->scl_data.viewport.width > max_downscale_src_width)
+ spl_scratch->scl_data.viewport.width > max_downscale_src_width)
return false;
+
+ /* Check if we are using EASF or not */
+ skip_easf = enable_easf(spl_in, spl_scratch);
+
/*
* Set default taps if none are provided
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
* taps = 4 for upscaling
*/
- if (in_taps->h_taps == 0) {
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.horz) > 1)
- spl_out->scl_data.taps.h_taps = min(2 * dc_fixpt_ceil(spl_out->scl_data.ratios.horz), 8);
- else
- spl_out->scl_data.taps.h_taps = 4;
- } else
- spl_out->scl_data.taps.h_taps = in_taps->h_taps;
- if (in_taps->v_taps == 0) {
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert) > 1)
- spl_out->scl_data.taps.v_taps = min(dc_fixpt_ceil(dc_fixpt_mul_int(
- spl_out->scl_data.ratios.vert, 2)), 8);
- else
- spl_out->scl_data.taps.v_taps = 4;
- } else
- spl_out->scl_data.taps.v_taps = in_taps->v_taps;
- if (in_taps->v_taps_c == 0) {
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c) > 1)
- spl_out->scl_data.taps.v_taps_c = min(dc_fixpt_ceil(dc_fixpt_mul_int(
- spl_out->scl_data.ratios.vert_c, 2)), 8);
- else
- spl_out->scl_data.taps.v_taps_c = 4;
- } else
- spl_out->scl_data.taps.v_taps_c = in_taps->v_taps_c;
- if (in_taps->h_taps_c == 0) {
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.horz_c) > 1)
- spl_out->scl_data.taps.h_taps_c = min(2 * dc_fixpt_ceil(spl_out->scl_data.ratios.horz_c), 8);
+ if (skip_easf) {
+ if (in_taps->h_taps == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1)
+ spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.horz), 8);
+ else
+ spl_scratch->scl_data.taps.h_taps = 4;
+ } else
+ spl_scratch->scl_data.taps.h_taps = in_taps->h_taps;
+ if (in_taps->v_taps == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1)
+ spl_scratch->scl_data.taps.v_taps = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int(
+ spl_scratch->scl_data.ratios.vert, 2)), 8);
+ else
+ spl_scratch->scl_data.taps.v_taps = 4;
+ } else
+ spl_scratch->scl_data.taps.v_taps = in_taps->v_taps;
+ if (in_taps->v_taps_c == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1)
+ spl_scratch->scl_data.taps.v_taps_c = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int(
+ spl_scratch->scl_data.ratios.vert_c, 2)), 8);
+ else
+ spl_scratch->scl_data.taps.v_taps_c = 4;
+ } else
+ spl_scratch->scl_data.taps.v_taps_c = in_taps->v_taps_c;
+ if (in_taps->h_taps_c == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz_c) > 1)
+ spl_scratch->scl_data.taps.h_taps_c = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.horz_c), 8);
+ else
+ spl_scratch->scl_data.taps.h_taps_c = 4;
+ } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
+ /* Only 1 and even h_taps_c are supported by hw */
+ spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1;
else
- spl_out->scl_data.taps.h_taps_c = 4;
- } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
- /* Only 1 and even h_taps_c are supported by hw */
- spl_out->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1;
- else
- spl_out->scl_data.taps.h_taps_c = in_taps->h_taps_c;
+ spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c;
+ } else {
+ if (spl_is_yuv420(spl_in->basic_in.format)) {
+ spl_scratch->scl_data.taps.h_taps = 6;
+ spl_scratch->scl_data.taps.v_taps = 6;
+ spl_scratch->scl_data.taps.h_taps_c = 4;
+ spl_scratch->scl_data.taps.v_taps_c = 4;
+ } else { /* RGB */
+ spl_scratch->scl_data.taps.h_taps = 6;
+ spl_scratch->scl_data.taps.v_taps = 6;
+ spl_scratch->scl_data.taps.h_taps_c = 6;
+ spl_scratch->scl_data.taps.v_taps_c = 6;
+ }
+ }
/*Ensure we can support the requested number of vtaps*/
- min_taps_y = dc_fixpt_ceil(spl_out->scl_data.ratios.vert);
- min_taps_c = dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c);
+ min_taps_y = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert);
+ min_taps_c = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c);
/* Use LB_MEMORY_CONFIG_3 for 4:2:0 */
if ((spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8)
@@ -771,16 +936,16 @@ static bool spl_get_optimal_number_of_taps(
else
lb_config = LB_MEMORY_CONFIG_0;
// Determine max vtap support by calculating how much line buffer can fit
- spl_in->funcs->spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_out->scl_data,
+ spl_in->funcs->spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_scratch->scl_data,
lb_config, &num_part_y, &num_part_c);
/* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert) > 2)
- max_taps_y = num_part_y - (dc_fixpt_ceil(spl_out->scl_data.ratios.vert) - 2);
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 2)
+ max_taps_y = num_part_y - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2);
else
max_taps_y = num_part_y;
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c) > 2)
- max_taps_c = num_part_c - (dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c) - 2);
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 2)
+ max_taps_c = num_part_c - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) - 2);
else
max_taps_c = num_part_c;
@@ -789,48 +954,108 @@ static bool spl_get_optimal_number_of_taps(
else if (max_taps_c < min_taps_c)
return false;
- if (spl_out->scl_data.taps.v_taps > max_taps_y)
- spl_out->scl_data.taps.v_taps = max_taps_y;
-
- if (spl_out->scl_data.taps.v_taps_c > max_taps_c)
- spl_out->scl_data.taps.v_taps_c = max_taps_c;
- if (spl_in->prefer_easf) {
- // EASF can be enabled only for taps 3,4,6
- // If optimal no of taps is 5, then set it to 4
- // If optimal no of taps is 7 or 8, then set it to 6
- if (spl_out->scl_data.taps.v_taps == 5)
- spl_out->scl_data.taps.v_taps = 4;
- if (spl_out->scl_data.taps.v_taps == 7 || spl_out->scl_data.taps.v_taps == 8)
- spl_out->scl_data.taps.v_taps = 6;
-
- if (spl_out->scl_data.taps.v_taps_c == 5)
- spl_out->scl_data.taps.v_taps_c = 4;
- if (spl_out->scl_data.taps.v_taps_c == 7 || spl_out->scl_data.taps.v_taps_c == 8)
- spl_out->scl_data.taps.v_taps_c = 6;
-
- if (spl_out->scl_data.taps.h_taps == 5)
- spl_out->scl_data.taps.h_taps = 4;
- if (spl_out->scl_data.taps.h_taps == 7 || spl_out->scl_data.taps.h_taps == 8)
- spl_out->scl_data.taps.h_taps = 6;
-
- if (spl_out->scl_data.taps.h_taps_c == 5)
- spl_out->scl_data.taps.h_taps_c = 4;
- if (spl_out->scl_data.taps.h_taps_c == 7 || spl_out->scl_data.taps.h_taps_c == 8)
- spl_out->scl_data.taps.h_taps_c = 6;
+ if (spl_scratch->scl_data.taps.v_taps > max_taps_y)
+ spl_scratch->scl_data.taps.v_taps = max_taps_y;
+ if (spl_scratch->scl_data.taps.v_taps_c > max_taps_c)
+ spl_scratch->scl_data.taps.v_taps_c = max_taps_c;
+
+ if (!skip_easf) {
+ /*
+ * RGB ( L + NL ) and Linear HDR support 6x6, 6x4, 6x3, 4x4, 4x3
+ * NL YUV420 only supports 6x6, 6x4 for Y and 4x4 for UV
+ *
+ * If LB does not support 3, 4, or 6 taps, then disable EASF_V
+ * and only enable EASF_H. So for RGB, support 6x2, 4x2
+ * and for NL YUV420, support 6x2 for Y and 4x2 for UV
+ *
+ * All other cases, have to disable EASF_V and EASF_H
+ *
+ * If optimal no of taps is 5, then set it to 4
+ * If optimal no of taps is 7 or 8, then fine since max tap is 6
+ *
+ */
+ if (spl_scratch->scl_data.taps.v_taps == 5)
+ spl_scratch->scl_data.taps.v_taps = 4;
+
+ if (spl_scratch->scl_data.taps.v_taps_c == 5)
+ spl_scratch->scl_data.taps.v_taps_c = 4;
+
+ if (spl_scratch->scl_data.taps.h_taps == 5)
+ spl_scratch->scl_data.taps.h_taps = 4;
+
+ if (spl_scratch->scl_data.taps.h_taps_c == 5)
+ spl_scratch->scl_data.taps.h_taps_c = 4;
+
+ if (spl_is_yuv420(spl_in->basic_in.format)) {
+ if ((spl_scratch->scl_data.taps.h_taps <= 4) ||
+ (spl_scratch->scl_data.taps.h_taps_c <= 3)) {
+ *enable_easf_v = false;
+ *enable_easf_h = false;
+ } else if ((spl_scratch->scl_data.taps.v_taps <= 3) ||
+ (spl_scratch->scl_data.taps.v_taps_c <= 3)) {
+ *enable_easf_v = false;
+ *enable_easf_h = true;
+ } else {
+ *enable_easf_v = true;
+ *enable_easf_h = true;
+ }
+ SPL_ASSERT((spl_scratch->scl_data.taps.v_taps > 1) &&
+ (spl_scratch->scl_data.taps.v_taps_c > 1));
+ } else { /* RGB */
+ if (spl_scratch->scl_data.taps.h_taps <= 3) {
+ *enable_easf_v = false;
+ *enable_easf_h = false;
+ } else if (spl_scratch->scl_data.taps.v_taps < 3) {
+ *enable_easf_v = false;
+ *enable_easf_h = true;
+ } else {
+ *enable_easf_v = true;
+ *enable_easf_h = true;
+ }
+ SPL_ASSERT(spl_scratch->scl_data.taps.v_taps > 1);
+ }
+ } else {
+ *enable_easf_v = false;
+ *enable_easf_h = false;
} // end of if prefer_easf
- if (!spl_in->basic_out.always_scale) {
- if (IDENTITY_RATIO(spl_out->scl_data.ratios.horz))
- spl_out->scl_data.taps.h_taps = 1;
- if (IDENTITY_RATIO(spl_out->scl_data.ratios.vert))
- spl_out->scl_data.taps.v_taps = 1;
- if (IDENTITY_RATIO(spl_out->scl_data.ratios.horz_c))
- spl_out->scl_data.taps.h_taps_c = 1;
- if (IDENTITY_RATIO(spl_out->scl_data.ratios.vert_c))
- spl_out->scl_data.taps.v_taps_c = 1;
+
+ /* Sharpener requires scaler to be enabled, including for 1:1
+ * Check if ISHARP can be enabled
+ * If ISHARP is not enabled, for 1:1, set taps to 1 and disable
+ * EASF
+ * For case of 2:1 YUV where chroma is 1:1, set taps to 1 if
+ * EASF is not enabled
+ */
+
+ *enable_isharp = spl_get_isharp_en(spl_in, spl_scratch);
+ if (!*enable_isharp && !spl_in->basic_out.always_scale) {
+ if ((IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz)) &&
+ (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) {
+ spl_scratch->scl_data.taps.h_taps = 1;
+ spl_scratch->scl_data.taps.v_taps = 1;
+
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c))
+ spl_scratch->scl_data.taps.h_taps_c = 1;
+
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))
+ spl_scratch->scl_data.taps.v_taps_c = 1;
+
+ *enable_easf_v = false;
+ *enable_easf_h = false;
+ } else {
+ if ((!*enable_easf_h) &&
+ (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c)))
+ spl_scratch->scl_data.taps.h_taps_c = 1;
+
+ if ((!*enable_easf_v) &&
+ (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)))
+ spl_scratch->scl_data.taps.v_taps_c = 1;
+ }
}
return true;
}
+
static void spl_set_black_color_data(enum spl_pixel_format format,
struct scl_black_color *scl_black_color)
{
@@ -848,38 +1073,38 @@ static void spl_set_black_color_data(enum spl_pixel_format format,
static void spl_set_manual_ratio_init_data(struct dscl_prog_data *dscl_prog_data,
const struct spl_scaler_data *scl_data)
{
- struct fixed31_32 bot;
+ struct spl_fixed31_32 bot;
- dscl_prog_data->ratios.h_scale_ratio = dc_fixpt_u3d19(scl_data->ratios.horz) << 5;
- dscl_prog_data->ratios.v_scale_ratio = dc_fixpt_u3d19(scl_data->ratios.vert) << 5;
- dscl_prog_data->ratios.h_scale_ratio_c = dc_fixpt_u3d19(scl_data->ratios.horz_c) << 5;
- dscl_prog_data->ratios.v_scale_ratio_c = dc_fixpt_u3d19(scl_data->ratios.vert_c) << 5;
+ dscl_prog_data->ratios.h_scale_ratio = spl_fixpt_u3d19(scl_data->ratios.horz) << 5;
+ dscl_prog_data->ratios.v_scale_ratio = spl_fixpt_u3d19(scl_data->ratios.vert) << 5;
+ dscl_prog_data->ratios.h_scale_ratio_c = spl_fixpt_u3d19(scl_data->ratios.horz_c) << 5;
+ dscl_prog_data->ratios.v_scale_ratio_c = spl_fixpt_u3d19(scl_data->ratios.vert_c) << 5;
/*
* 0.24 format for fraction, first five bits zeroed
*/
dscl_prog_data->init.h_filter_init_frac =
- dc_fixpt_u0d19(scl_data->inits.h) << 5;
+ spl_fixpt_u0d19(scl_data->inits.h) << 5;
dscl_prog_data->init.h_filter_init_int =
- dc_fixpt_floor(scl_data->inits.h);
+ spl_fixpt_floor(scl_data->inits.h);
dscl_prog_data->init.h_filter_init_frac_c =
- dc_fixpt_u0d19(scl_data->inits.h_c) << 5;
+ spl_fixpt_u0d19(scl_data->inits.h_c) << 5;
dscl_prog_data->init.h_filter_init_int_c =
- dc_fixpt_floor(scl_data->inits.h_c);
+ spl_fixpt_floor(scl_data->inits.h_c);
dscl_prog_data->init.v_filter_init_frac =
- dc_fixpt_u0d19(scl_data->inits.v) << 5;
+ spl_fixpt_u0d19(scl_data->inits.v) << 5;
dscl_prog_data->init.v_filter_init_int =
- dc_fixpt_floor(scl_data->inits.v);
+ spl_fixpt_floor(scl_data->inits.v);
dscl_prog_data->init.v_filter_init_frac_c =
- dc_fixpt_u0d19(scl_data->inits.v_c) << 5;
+ spl_fixpt_u0d19(scl_data->inits.v_c) << 5;
dscl_prog_data->init.v_filter_init_int_c =
- dc_fixpt_floor(scl_data->inits.v_c);
-
- bot = dc_fixpt_add(scl_data->inits.v, scl_data->ratios.vert);
- dscl_prog_data->init.v_filter_init_bot_frac = dc_fixpt_u0d19(bot) << 5;
- dscl_prog_data->init.v_filter_init_bot_int = dc_fixpt_floor(bot);
- bot = dc_fixpt_add(scl_data->inits.v_c, scl_data->ratios.vert_c);
- dscl_prog_data->init.v_filter_init_bot_frac_c = dc_fixpt_u0d19(bot) << 5;
- dscl_prog_data->init.v_filter_init_bot_int_c = dc_fixpt_floor(bot);
+ spl_fixpt_floor(scl_data->inits.v_c);
+
+ bot = spl_fixpt_add(scl_data->inits.v, scl_data->ratios.vert);
+ dscl_prog_data->init.v_filter_init_bot_frac = spl_fixpt_u0d19(bot) << 5;
+ dscl_prog_data->init.v_filter_init_bot_int = spl_fixpt_floor(bot);
+ bot = spl_fixpt_add(scl_data->inits.v_c, scl_data->ratios.vert_c);
+ dscl_prog_data->init.v_filter_init_bot_frac_c = spl_fixpt_u0d19(bot) << 5;
+ dscl_prog_data->init.v_filter_init_bot_int_c = spl_fixpt_floor(bot);
}
static void spl_set_taps_data(struct dscl_prog_data *dscl_prog_data,
@@ -890,77 +1115,28 @@ static void spl_set_taps_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->taps.v_taps_c = scl_data->taps.v_taps_c - 1;
dscl_prog_data->taps.h_taps_c = scl_data->taps.h_taps_c - 1;
}
-static const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
-{
- if (taps == 8)
- return spl_get_filter_8tap_64p(ratio);
- else if (taps == 7)
- return spl_get_filter_7tap_64p(ratio);
- else if (taps == 6)
- return spl_get_filter_6tap_64p(ratio);
- else if (taps == 5)
- return spl_get_filter_5tap_64p(ratio);
- else if (taps == 4)
- return spl_get_filter_4tap_64p(ratio);
- else if (taps == 3)
- return spl_get_filter_3tap_64p(ratio);
- else if (taps == 2)
- return spl_get_filter_2tap_64p();
- else if (taps == 1)
- return NULL;
- else {
- /* should never happen, bug */
- return NULL;
- }
-}
-static void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
- const struct spl_scaler_data *data)
-{
- dscl_prog_data->filter_h = spl_dscl_get_filter_coeffs_64p(
- data->taps.h_taps, data->ratios.horz);
- dscl_prog_data->filter_v = spl_dscl_get_filter_coeffs_64p(
- data->taps.v_taps, data->ratios.vert);
- dscl_prog_data->filter_h_c = spl_dscl_get_filter_coeffs_64p(
- data->taps.h_taps_c, data->ratios.horz_c);
- dscl_prog_data->filter_v_c = spl_dscl_get_filter_coeffs_64p(
- data->taps.v_taps_c, data->ratios.vert_c);
-}
-
-static const uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps)
-{
- if ((taps == 3) || (taps == 4) || (taps == 6))
- return spl_get_filter_isharp_bs_4tap_64p();
- else {
- /* should never happen, bug */
- return NULL;
- }
-}
-static void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
- const struct spl_scaler_data *data)
-{
- dscl_prog_data->filter_blur_scale_h = spl_dscl_get_blur_scale_coeffs_64p(
- data->taps.h_taps);
- dscl_prog_data->filter_blur_scale_v = spl_dscl_get_blur_scale_coeffs_64p(
- data->taps.v_taps);
-}
/* Populate dscl prog data structure from scaler data calculated by SPL */
-static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_scratch *spl_scratch,
+ struct spl_out *spl_out, bool enable_easf_v, bool enable_easf_h, bool enable_isharp)
{
struct dscl_prog_data *dscl_prog_data = spl_out->dscl_prog_data;
- const struct spl_scaler_data *data = &spl_out->scl_data;
+ const struct spl_scaler_data *data = &spl_scratch->scl_data;
struct scl_black_color *scl_black_color = &dscl_prog_data->scl_black_color;
+ bool enable_easf = enable_easf_v || enable_easf_h;
+
// Set values for recout
- dscl_prog_data->recout = spl_out->scl_data.recout;
+ dscl_prog_data->recout = spl_scratch->scl_data.recout;
// Set values for MPC Size
- dscl_prog_data->mpc_size.width = spl_out->scl_data.h_active;
- dscl_prog_data->mpc_size.height = spl_out->scl_data.v_active;
+ dscl_prog_data->mpc_size.width = spl_scratch->scl_data.h_active;
+ dscl_prog_data->mpc_size.height = spl_scratch->scl_data.v_active;
// SCL_MODE - Set SCL_MODE data
- dscl_prog_data->dscl_mode = spl_get_dscl_mode(spl_in, data);
+ dscl_prog_data->dscl_mode = spl_get_dscl_mode(spl_in, data, enable_isharp,
+ enable_easf);
// SCL_BLACK_COLOR
spl_set_black_color_data(spl_in->basic_in.format, scl_black_color);
@@ -971,103 +1147,135 @@ static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_out *spl_ou
// Set HTaps/VTaps
spl_set_taps_data(dscl_prog_data, data);
// Set viewport
- dscl_prog_data->viewport = spl_out->scl_data.viewport;
+ dscl_prog_data->viewport = spl_scratch->scl_data.viewport;
// Set viewport_c
- dscl_prog_data->viewport_c = spl_out->scl_data.viewport_c;
+ dscl_prog_data->viewport_c = spl_scratch->scl_data.viewport_c;
// Set filters data
- spl_set_filters_data(dscl_prog_data, data);
+ spl_set_filters_data(dscl_prog_data, data, enable_easf_v, enable_easf_h);
}
-/* Enable EASF ?*/
-static bool enable_easf(int scale_ratio, int taps,
- enum linear_light_scaling lls_pref, bool prefer_easf)
+
+/* Calculate C0-C3 coefficients based on HDR_mult */
+static void spl_calculate_c0_c3_hdr(struct dscl_prog_data *dscl_prog_data, uint32_t hdr_multx100)
{
- // Is downscaling > 6:1 ?
- if (scale_ratio > 6) {
- // END - No EASF support for downscaling > 6:1
- return false;
- }
- // Is upscaling or downscaling up to 2:1?
- if (scale_ratio <= 2) {
- // Is linear scaling or EASF preferred?
- if (lls_pref == LLS_PREF_YES || prefer_easf) {
- // LB support taps 3, 4, 6
- if (taps == 3 || taps == 4 || taps == 6) {
- // END - EASF supported
- return true;
- }
- }
- }
- // END - EASF not supported
- return false;
+ struct spl_fixed31_32 hdr_mult, c0_mult, c1_mult, c2_mult;
+ struct spl_fixed31_32 c0_calc, c1_calc, c2_calc;
+ struct spl_custom_float_format fmt;
+
+ SPL_ASSERT(hdr_multx100);
+ hdr_mult = spl_fixpt_from_fraction((long long)hdr_multx100, 100LL);
+ c0_mult = spl_fixpt_from_fraction(2126LL, 10000LL);
+ c1_mult = spl_fixpt_from_fraction(7152LL, 10000LL);
+ c2_mult = spl_fixpt_from_fraction(722LL, 10000LL);
+
+ c0_calc = spl_fixpt_mul(hdr_mult, spl_fixpt_mul(c0_mult, spl_fixpt_from_fraction(
+ 16384LL, 125LL)));
+ c1_calc = spl_fixpt_mul(hdr_mult, spl_fixpt_mul(c1_mult, spl_fixpt_from_fraction(
+ 16384LL, 125LL)));
+ c2_calc = spl_fixpt_mul(hdr_mult, spl_fixpt_mul(c2_mult, spl_fixpt_from_fraction(
+ 16384LL, 125LL)));
+
+ fmt.exponenta_bits = 5;
+ fmt.mantissa_bits = 10;
+ fmt.sign = true;
+
+ // fp1.5.10, C0 coefficient (LN_rec709: HDR_MULT * 0.212600 * 2^14/125)
+ spl_convert_to_custom_float_format(c0_calc, &fmt, &dscl_prog_data->easf_matrix_c0);
+ // fp1.5.10, C1 coefficient (LN_rec709: HDR_MULT * 0.715200 * 2^14/125)
+ spl_convert_to_custom_float_format(c1_calc, &fmt, &dscl_prog_data->easf_matrix_c1);
+ // fp1.5.10, C2 coefficient (LN_rec709: HDR_MULT * 0.072200 * 2^14/125)
+ spl_convert_to_custom_float_format(c2_calc, &fmt, &dscl_prog_data->easf_matrix_c2);
+ dscl_prog_data->easf_matrix_c3 = 0x0; // fp1.5.10, C3 coefficient
}
+
/* Set EASF data */
-static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
- bool enable_easf_v, bool enable_easf_h, enum linear_light_scaling lls_pref,
- enum spl_pixel_format format)
+static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *spl_out, bool enable_easf_v,
+ bool enable_easf_h, enum linear_light_scaling lls_pref,
+ enum spl_pixel_format format, enum system_setup setup,
+ uint32_t hdr_multx100)
{
- if (spl_is_yuv420(format)) /* TODO: 0 = RGB, 1 = YUV */
- dscl_prog_data->easf_matrix_mode = 1;
- else
- dscl_prog_data->easf_matrix_mode = 0;
-
+ struct dscl_prog_data *dscl_prog_data = spl_out->dscl_prog_data;
if (enable_easf_v) {
dscl_prog_data->easf_v_en = true;
dscl_prog_data->easf_v_ring = 0;
- dscl_prog_data->easf_v_sharp_factor = 1;
+ dscl_prog_data->easf_v_sharp_factor = 0;
dscl_prog_data->easf_v_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_v_bf2_mode = 0xF; // 4-bit, BF2 calculation mode
- dscl_prog_data->easf_v_bf3_mode = 2; // 2-bit, BF3 chroma mode correction calculation mode
- dscl_prog_data->easf_v_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control
- dscl_prog_data->easf_v_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control
- dscl_prog_data->easf_v_bf2_roc_gain = 4; // U2.2, Rate Of Change control
+ /* 2-bit, BF3 chroma mode correction calculation mode */
+ dscl_prog_data->easf_v_bf3_mode = spl_get_v_bf3_mode(
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ minCoef ]*/
dscl_prog_data->easf_v_ringest_3tap_dntilt_uptilt =
- 0x9F00;// FP1.5.10 [minCoef] (-0.036109167214271)
+ spl_get_3tap_dntilt_uptilt_offset(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ upTiltMaxVal ]*/
dscl_prog_data->easf_v_ringest_3tap_uptilt_max =
- 0x24FE; // FP1.5.10 [upTiltMaxVal] ( 0.904556445553545)
+ spl_get_3tap_uptilt_maxval(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ dnTiltSlope ]*/
dscl_prog_data->easf_v_ringest_3tap_dntilt_slope =
- 0x3940; // FP1.5.10 [dnTiltSlope] ( 0.910488988173371)
+ spl_get_3tap_dntilt_slope(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ upTilt1Slope ]*/
dscl_prog_data->easf_v_ringest_3tap_uptilt1_slope =
- 0x359C; // FP1.5.10 [upTilt1Slope] ( 0.125620179040899)
+ spl_get_3tap_uptilt1_slope(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ upTilt2Slope ]*/
dscl_prog_data->easf_v_ringest_3tap_uptilt2_slope =
- 0x359C; // FP1.5.10 [upTilt2Slope] ( 0.006786817723568)
+ spl_get_3tap_uptilt2_slope(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ upTilt2Offset ]*/
dscl_prog_data->easf_v_ringest_3tap_uptilt2_offset =
- 0x9F00; // FP1.5.10 [upTilt2Offset] (-0.006139059716651)
+ spl_get_3tap_uptilt2_offset(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4] */
dscl_prog_data->easf_v_ringest_eventap_reduceg1 =
- 0x4000; // FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4]
+ spl_get_reducer_gain4(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6] */
dscl_prog_data->easf_v_ringest_eventap_reduceg2 =
- 0x4100; // FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6]
+ spl_get_reducer_gain6(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024 */
dscl_prog_data->easf_v_ringest_eventap_gain1 =
- 0xB058; // FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024
+ spl_get_gainRing4(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024 */
dscl_prog_data->easf_v_ringest_eventap_gain2 =
- 0xA640; // FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024
+ spl_get_gainRing6(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
dscl_prog_data->easf_v_bf_maxa = 63; //Vertical Max BF value A in U0.6 format.Selected if V_FCNTL == 0
dscl_prog_data->easf_v_bf_maxb = 63; //Vertical Max BF value A in U0.6 format.Selected if V_FCNTL == 1
dscl_prog_data->easf_v_bf_mina = 0; //Vertical Min BF value A in U0.6 format.Selected if V_FCNTL == 0
dscl_prog_data->easf_v_bf_minb = 0; //Vertical Min BF value A in U0.6 format.Selected if V_FCNTL == 1
- dscl_prog_data->easf_v_bf1_pwl_in_seg0 = -512; // S0.10, BF1 PWL Segment 0
- dscl_prog_data->easf_v_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
- dscl_prog_data->easf_v_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0
- dscl_prog_data->easf_v_bf1_pwl_in_seg1 = -20; // S0.10, BF1 PWL Segment 1
- dscl_prog_data->easf_v_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
- dscl_prog_data->easf_v_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1
- dscl_prog_data->easf_v_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
- dscl_prog_data->easf_v_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
- dscl_prog_data->easf_v_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
- dscl_prog_data->easf_v_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3
- dscl_prog_data->easf_v_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
- dscl_prog_data->easf_v_bf1_pwl_slope_seg3 = -56; // S7.3, BF1 Slope PWL Segment 3
- dscl_prog_data->easf_v_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4
- dscl_prog_data->easf_v_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
- dscl_prog_data->easf_v_bf1_pwl_slope_seg4 = -48; // S7.3, BF1 Slope PWL Segment 4
- dscl_prog_data->easf_v_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5
- dscl_prog_data->easf_v_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
- dscl_prog_data->easf_v_bf1_pwl_slope_seg5 = -240; // S7.3, BF1 Slope PWL Segment 5
- dscl_prog_data->easf_v_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6
- dscl_prog_data->easf_v_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
- dscl_prog_data->easf_v_bf1_pwl_slope_seg6 = -160; // S7.3, BF1 Slope PWL Segment 6
- dscl_prog_data->easf_v_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7
- dscl_prog_data->easf_v_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
if (lls_pref == LLS_PREF_YES) {
+ dscl_prog_data->easf_v_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control
+ dscl_prog_data->easf_v_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control
+ dscl_prog_data->easf_v_bf2_roc_gain = 4; // U2.2, Rate Of Change control
+
+ dscl_prog_data->easf_v_bf1_pwl_in_seg0 = 0x600; // S0.10, BF1 PWL Segment 0 = -512
+ dscl_prog_data->easf_v_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0
+ dscl_prog_data->easf_v_bf1_pwl_in_seg1 = 0x7EC; // S0.10, BF1 PWL Segment 1 = -20
+ dscl_prog_data->easf_v_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1
+ dscl_prog_data->easf_v_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3
+ dscl_prog_data->easf_v_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg3 = 0x7C8; // S7.3, BF1 Slope PWL Segment 3 = -56
+ dscl_prog_data->easf_v_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4
+ dscl_prog_data->easf_v_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg4 = 0x7D0; // S7.3, BF1 Slope PWL Segment 4 = -48
+ dscl_prog_data->easf_v_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5
+ dscl_prog_data->easf_v_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg5 = 0x710; // S7.3, BF1 Slope PWL Segment 5 = -240
+ dscl_prog_data->easf_v_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6
+ dscl_prog_data->easf_v_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg6 = 0x760; // S7.3, BF1 Slope PWL Segment 6 = -160
+ dscl_prog_data->easf_v_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7
+ dscl_prog_data->easf_v_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
+
dscl_prog_data->easf_v_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0
dscl_prog_data->easf_v_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0
dscl_prog_data->easf_v_bf3_pwl_slope_set0 = 0x12C5; // FP1.6.6, BF3 Slope PWL Segment 0
@@ -1088,13 +1296,41 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
0x136B; // FP1.6.6, BF3 Slope PWL Segment 3
dscl_prog_data->easf_v_bf3_pwl_in_set4 =
0x0C37; // FP0.6.6, BF3 Input value PWL Segment 4 (0.125 * 125^3)
- dscl_prog_data->easf_v_bf3_pwl_base_set4 = -50; // S0.6, BF3 Base PWL Segment 4
+ dscl_prog_data->easf_v_bf3_pwl_base_set4 = 0x4E; // S0.6, BF3 Base PWL Segment 4 = -50
dscl_prog_data->easf_v_bf3_pwl_slope_set4 =
0x1200; // FP1.6.6, BF3 Slope PWL Segment 4
dscl_prog_data->easf_v_bf3_pwl_in_set5 =
0x0CF7; // FP0.6.6, BF3 Input value PWL Segment 5 (1.0 * 125^3)
- dscl_prog_data->easf_v_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5
+ dscl_prog_data->easf_v_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63
} else {
+ dscl_prog_data->easf_v_bf2_flat1_gain = 13; // U1.3, BF2 Flat1 Gain control
+ dscl_prog_data->easf_v_bf2_flat2_gain = 15; // U4.0, BF2 Flat2 Gain control
+ dscl_prog_data->easf_v_bf2_roc_gain = 14; // U2.2, Rate Of Change control
+
+ dscl_prog_data->easf_v_bf1_pwl_in_seg0 = 0x440; // S0.10, BF1 PWL Segment 0 = -960
+ dscl_prog_data->easf_v_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg0 = 2; // S7.3, BF1 Slope PWL Segment 0
+ dscl_prog_data->easf_v_bf1_pwl_in_seg1 = 0x7C4; // S0.10, BF1 PWL Segment 1 = -60
+ dscl_prog_data->easf_v_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg1 = 109; // S7.3, BF1 Slope PWL Segment 1
+ dscl_prog_data->easf_v_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_in_seg3 = 48; // S0.10, BF1 PWL Segment 3
+ dscl_prog_data->easf_v_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg3 = 0x7ED; // S7.3, BF1 Slope PWL Segment 3 = -19
+ dscl_prog_data->easf_v_bf1_pwl_in_seg4 = 96; // S0.10, BF1 PWL Segment 4
+ dscl_prog_data->easf_v_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg4 = 0x7F0; // S7.3, BF1 Slope PWL Segment 4 = -16
+ dscl_prog_data->easf_v_bf1_pwl_in_seg5 = 144; // S0.10, BF1 PWL Segment 5
+ dscl_prog_data->easf_v_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg5 = 0x7B0; // S7.3, BF1 Slope PWL Segment 5 = -80
+ dscl_prog_data->easf_v_bf1_pwl_in_seg6 = 192; // S0.10, BF1 PWL Segment 6
+ dscl_prog_data->easf_v_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg6 = 0x7CB; // S7.3, BF1 Slope PWL Segment 6 = -53
+ dscl_prog_data->easf_v_bf1_pwl_in_seg7 = 240; // S0.10, BF1 PWL Segment 7
+ dscl_prog_data->easf_v_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
+
dscl_prog_data->easf_v_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0
dscl_prog_data->easf_v_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0
dscl_prog_data->easf_v_bf3_pwl_slope_set0 = 0x0000; // FP1.6.6, BF3 Slope PWL Segment 0
@@ -1113,11 +1349,11 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
0x1878; // FP1.6.6, BF3 Slope PWL Segment 3
dscl_prog_data->easf_v_bf3_pwl_in_set4 =
0x0761; // FP0.6.6, BF3 Input value PWL Segment 4 (0.375)
- dscl_prog_data->easf_v_bf3_pwl_base_set4 = -60; // S0.6, BF3 Base PWL Segment 4
+ dscl_prog_data->easf_v_bf3_pwl_base_set4 = 0x44; // S0.6, BF3 Base PWL Segment 4 = -60
dscl_prog_data->easf_v_bf3_pwl_slope_set4 = 0x1760; // FP1.6.6, BF3 Slope PWL Segment 4
dscl_prog_data->easf_v_bf3_pwl_in_set5 =
0x0780; // FP0.6.6, BF3 Input value PWL Segment 5 (0.5)
- dscl_prog_data->easf_v_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5
+ dscl_prog_data->easf_v_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63
}
} else
dscl_prog_data->easf_v_en = false;
@@ -1125,52 +1361,63 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
if (enable_easf_h) {
dscl_prog_data->easf_h_en = true;
dscl_prog_data->easf_h_ring = 0;
- dscl_prog_data->easf_h_sharp_factor = 1;
+ dscl_prog_data->easf_h_sharp_factor = 0;
dscl_prog_data->easf_h_bf1_en =
1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_h_bf2_mode =
0xF; // 4-bit, BF2 calculation mode
- dscl_prog_data->easf_h_bf3_mode =
- 2; // 2-bit, BF3 chroma mode correction calculation mode
- dscl_prog_data->easf_h_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control
- dscl_prog_data->easf_h_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control
- dscl_prog_data->easf_h_bf2_roc_gain = 4; // U2.2, Rate Of Change control
+ /* 2-bit, BF3 chroma mode correction calculation mode */
+ dscl_prog_data->easf_h_bf3_mode = spl_get_h_bf3_mode(
+ spl_scratch->scl_data.recip_ratios.horz);
+ /* FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4] */
dscl_prog_data->easf_h_ringest_eventap_reduceg1 =
- 0x4000; // FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4]
+ spl_get_reducer_gain4(spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.recip_ratios.horz);
+ /* FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6] */
dscl_prog_data->easf_h_ringest_eventap_reduceg2 =
- 0x4100; // FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6]
+ spl_get_reducer_gain6(spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.recip_ratios.horz);
+ /* FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024 */
dscl_prog_data->easf_h_ringest_eventap_gain1 =
- 0xB058; // FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024
+ spl_get_gainRing4(spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.recip_ratios.horz);
+ /* FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024 */
dscl_prog_data->easf_h_ringest_eventap_gain2 =
- 0xA640; // FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024
+ spl_get_gainRing6(spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.recip_ratios.horz);
dscl_prog_data->easf_h_bf_maxa = 63; //Horz Max BF value A in U0.6 format.Selected if H_FCNTL==0
dscl_prog_data->easf_h_bf_maxb = 63; //Horz Max BF value B in U0.6 format.Selected if H_FCNTL==1
dscl_prog_data->easf_h_bf_mina = 0; //Horz Min BF value B in U0.6 format.Selected if H_FCNTL==0
dscl_prog_data->easf_h_bf_minb = 0; //Horz Min BF value B in U0.6 format.Selected if H_FCNTL==1
- dscl_prog_data->easf_h_bf1_pwl_in_seg0 = -512; // S0.10, BF1 PWL Segment 0
- dscl_prog_data->easf_h_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
- dscl_prog_data->easf_h_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0
- dscl_prog_data->easf_h_bf1_pwl_in_seg1 = -20; // S0.10, BF1 PWL Segment 1
- dscl_prog_data->easf_h_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
- dscl_prog_data->easf_h_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1
- dscl_prog_data->easf_h_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
- dscl_prog_data->easf_h_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
- dscl_prog_data->easf_h_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
- dscl_prog_data->easf_h_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3
- dscl_prog_data->easf_h_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
- dscl_prog_data->easf_h_bf1_pwl_slope_seg3 = -56; // S7.3, BF1 Slope PWL Segment 3
- dscl_prog_data->easf_h_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4
- dscl_prog_data->easf_h_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
- dscl_prog_data->easf_h_bf1_pwl_slope_seg4 = -48; // S7.3, BF1 Slope PWL Segment 4
- dscl_prog_data->easf_h_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5
- dscl_prog_data->easf_h_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
- dscl_prog_data->easf_h_bf1_pwl_slope_seg5 = -240; // S7.3, BF1 Slope PWL Segment 5
- dscl_prog_data->easf_h_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6
- dscl_prog_data->easf_h_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
- dscl_prog_data->easf_h_bf1_pwl_slope_seg6 = -160; // S7.3, BF1 Slope PWL Segment 6
- dscl_prog_data->easf_h_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7
- dscl_prog_data->easf_h_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
if (lls_pref == LLS_PREF_YES) {
+ dscl_prog_data->easf_h_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control
+ dscl_prog_data->easf_h_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control
+ dscl_prog_data->easf_h_bf2_roc_gain = 4; // U2.2, Rate Of Change control
+
+ dscl_prog_data->easf_h_bf1_pwl_in_seg0 = 0x600; // S0.10, BF1 PWL Segment 0 = -512
+ dscl_prog_data->easf_h_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0
+ dscl_prog_data->easf_h_bf1_pwl_in_seg1 = 0x7EC; // S0.10, BF1 PWL Segment 1 = -20
+ dscl_prog_data->easf_h_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1
+ dscl_prog_data->easf_h_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3
+ dscl_prog_data->easf_h_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg3 = 0x7C8; // S7.3, BF1 Slope PWL Segment 3 = -56
+ dscl_prog_data->easf_h_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4
+ dscl_prog_data->easf_h_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg4 = 0x7D0; // S7.3, BF1 Slope PWL Segment 4 = -48
+ dscl_prog_data->easf_h_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5
+ dscl_prog_data->easf_h_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg5 = 0x710; // S7.3, BF1 Slope PWL Segment 5 = -240
+ dscl_prog_data->easf_h_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6
+ dscl_prog_data->easf_h_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg6 = 0x760; // S7.3, BF1 Slope PWL Segment 6 = -160
+ dscl_prog_data->easf_h_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7
+ dscl_prog_data->easf_h_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
+
dscl_prog_data->easf_h_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0
dscl_prog_data->easf_h_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0
dscl_prog_data->easf_h_bf3_pwl_slope_set0 = 0x12C5; // FP1.6.6, BF3 Slope PWL Segment 0
@@ -1188,12 +1435,40 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->easf_h_bf3_pwl_slope_set3 = 0x136B; // FP1.6.6, BF3 Slope PWL Segment 3
dscl_prog_data->easf_h_bf3_pwl_in_set4 =
0x0C37; // FP0.6.6, BF3 Input value PWL Segment 4 (0.125 * 125^3)
- dscl_prog_data->easf_h_bf3_pwl_base_set4 = -50; // S0.6, BF3 Base PWL Segment 4
+ dscl_prog_data->easf_h_bf3_pwl_base_set4 = 0x4E; // S0.6, BF3 Base PWL Segment 4 = -50
dscl_prog_data->easf_h_bf3_pwl_slope_set4 = 0x1200; // FP1.6.6, BF3 Slope PWL Segment 4
dscl_prog_data->easf_h_bf3_pwl_in_set5 =
0x0CF7; // FP0.6.6, BF3 Input value PWL Segment 5 (1.0 * 125^3)
- dscl_prog_data->easf_h_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5
+ dscl_prog_data->easf_h_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63
} else {
+ dscl_prog_data->easf_h_bf2_flat1_gain = 13; // U1.3, BF2 Flat1 Gain control
+ dscl_prog_data->easf_h_bf2_flat2_gain = 15; // U4.0, BF2 Flat2 Gain control
+ dscl_prog_data->easf_h_bf2_roc_gain = 14; // U2.2, Rate Of Change control
+
+ dscl_prog_data->easf_h_bf1_pwl_in_seg0 = 0x440; // S0.10, BF1 PWL Segment 0 = -960
+ dscl_prog_data->easf_h_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg0 = 2; // S7.3, BF1 Slope PWL Segment 0
+ dscl_prog_data->easf_h_bf1_pwl_in_seg1 = 0x7C4; // S0.10, BF1 PWL Segment 1 = -60
+ dscl_prog_data->easf_h_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg1 = 109; // S7.3, BF1 Slope PWL Segment 1
+ dscl_prog_data->easf_h_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_in_seg3 = 48; // S0.10, BF1 PWL Segment 3
+ dscl_prog_data->easf_h_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg3 = 0x7ED; // S7.3, BF1 Slope PWL Segment 3 = -19
+ dscl_prog_data->easf_h_bf1_pwl_in_seg4 = 96; // S0.10, BF1 PWL Segment 4
+ dscl_prog_data->easf_h_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg4 = 0x7F0; // S7.3, BF1 Slope PWL Segment 4 = -16
+ dscl_prog_data->easf_h_bf1_pwl_in_seg5 = 144; // S0.10, BF1 PWL Segment 5
+ dscl_prog_data->easf_h_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg5 = 0x7B0; // S7.3, BF1 Slope PWL Segment 5 = -80
+ dscl_prog_data->easf_h_bf1_pwl_in_seg6 = 192; // S0.10, BF1 PWL Segment 6
+ dscl_prog_data->easf_h_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg6 = 0x7CB; // S7.3, BF1 Slope PWL Segment 6 = -53
+ dscl_prog_data->easf_h_bf1_pwl_in_seg7 = 240; // S0.10, BF1 PWL Segment 7
+ dscl_prog_data->easf_h_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
+
dscl_prog_data->easf_h_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0
dscl_prog_data->easf_h_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0
dscl_prog_data->easf_h_bf3_pwl_slope_set0 = 0x0000; // FP1.6.6, BF3 Slope PWL Segment 0
@@ -1211,25 +1486,30 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->easf_h_bf3_pwl_slope_set3 = 0x1878; // FP1.6.6, BF3 Slope PWL Segment 3
dscl_prog_data->easf_h_bf3_pwl_in_set4 =
0x0761; // FP0.6.6, BF3 Input value PWL Segment 4 (0.375)
- dscl_prog_data->easf_h_bf3_pwl_base_set4 = -60; // S0.6, BF3 Base PWL Segment 4
+ dscl_prog_data->easf_h_bf3_pwl_base_set4 = 0x44; // S0.6, BF3 Base PWL Segment 4 = -60
dscl_prog_data->easf_h_bf3_pwl_slope_set4 = 0x1760; // FP1.6.6, BF3 Slope PWL Segment 4
dscl_prog_data->easf_h_bf3_pwl_in_set5 =
0x0780; // FP0.6.6, BF3 Input value PWL Segment 5 (0.5)
- dscl_prog_data->easf_h_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5
+ dscl_prog_data->easf_h_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63
} // if (lls_pref == LLS_PREF_YES)
} else
dscl_prog_data->easf_h_en = false;
if (lls_pref == LLS_PREF_YES) {
dscl_prog_data->easf_ltonl_en = 1; // Linear input
- dscl_prog_data->easf_matrix_c0 =
- 0x504E; // fp1.5.10, C0 coefficient (LN_BT2020: 0.2627 * (2^14)/125 = 34.43750000)
- dscl_prog_data->easf_matrix_c1 =
- 0x558E; // fp1.5.10, C1 coefficient (LN_BT2020: 0.6780 * (2^14)/125 = 88.87500000)
- dscl_prog_data->easf_matrix_c2 =
- 0x47C6; // fp1.5.10, C2 coefficient (LN_BT2020: 0.0593 * (2^14)/125 = 7.77343750)
- dscl_prog_data->easf_matrix_c3 =
- 0x0; // fp1.5.10, C3 coefficient
+ if ((setup == HDR_L) && (spl_is_rgb8(format))) {
+ /* Calculate C0-C3 coefficients based on HDR multiplier */
+ spl_calculate_c0_c3_hdr(dscl_prog_data, hdr_multx100);
+ } else { // HDR_L ( DWM ) and SDR_L
+ dscl_prog_data->easf_matrix_c0 =
+ 0x4EF7; // fp1.5.10, C0 coefficient (LN_rec709: 0.2126 * (2^14)/125 = 27.86590720)
+ dscl_prog_data->easf_matrix_c1 =
+ 0x55DC; // fp1.5.10, C1 coefficient (LN_rec709: 0.7152 * (2^14)/125 = 93.74269440)
+ dscl_prog_data->easf_matrix_c2 =
+ 0x48BB; // fp1.5.10, C2 coefficient (LN_rec709: 0.0722 * (2^14)/125 = 9.46339840)
+ dscl_prog_data->easf_matrix_c3 =
+ 0x0; // fp1.5.10, C3 coefficient
+ }
} else {
dscl_prog_data->easf_ltonl_en = 0; // Non-Linear input
dscl_prog_data->easf_matrix_c0 =
@@ -1241,27 +1521,43 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->easf_matrix_c3 =
0x0; // fp1.5.10, C3 coefficient
}
+
+ if (spl_is_yuv420(format)) { /* TODO: 0 = RGB, 1 = YUV */
+ dscl_prog_data->easf_matrix_mode = 1;
+ /*
+ * 2-bit, BF3 chroma mode correction calculation mode
+ * Needs to be disabled for YUV420 mode
+ * Override lookup value
+ */
+ dscl_prog_data->easf_v_bf3_mode = 0;
+ dscl_prog_data->easf_h_bf3_mode = 0;
+ } else
+ dscl_prog_data->easf_matrix_mode = 0;
+
}
+
/*Set isharp noise detection */
-static void spl_set_isharp_noise_det_mode(struct dscl_prog_data *dscl_prog_data)
+static void spl_set_isharp_noise_det_mode(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data)
{
// ISHARP_NOISEDET_MODE
// 0: 3x5 as VxH
// 1: 4x5 as VxH
// 2:
// 3: 5x5 as VxH
- if (dscl_prog_data->taps.v_taps == 6)
- dscl_prog_data->isharp_noise_det.mode = 3; // ISHARP_NOISEDET_MODE
- else if (dscl_prog_data->taps.h_taps == 4)
- dscl_prog_data->isharp_noise_det.mode = 1; // ISHARP_NOISEDET_MODE
- else if (dscl_prog_data->taps.h_taps == 3)
- dscl_prog_data->isharp_noise_det.mode = 0; // ISHARP_NOISEDET_MODE
+ if (data->taps.v_taps == 6)
+ dscl_prog_data->isharp_noise_det.mode = 3;
+ else if (data->taps.v_taps == 4)
+ dscl_prog_data->isharp_noise_det.mode = 1;
+ else if (data->taps.v_taps == 3)
+ dscl_prog_data->isharp_noise_det.mode = 0;
};
/* Set Sharpener data */
static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
struct adaptive_sharpness adp_sharpness, bool enable_isharp,
enum linear_light_scaling lls_pref, enum spl_pixel_format format,
- const struct spl_scaler_data *data)
+ const struct spl_scaler_data *data, struct spl_fixed31_32 ratio,
+ enum system_setup setup)
{
/* Turn off sharpener if not required */
if (!enable_isharp) {
@@ -1270,10 +1566,12 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
}
dscl_prog_data->isharp_en = 1; // ISHARP_EN
- dscl_prog_data->isharp_noise_det.enable = 1; // ISHARP_NOISEDET_EN
// Set ISHARP_NOISEDET_MODE if htaps = 6-tap
- if (dscl_prog_data->taps.h_taps == 6)
- spl_set_isharp_noise_det_mode(dscl_prog_data); // ISHARP_NOISEDET_MODE
+ if (data->taps.h_taps == 6) {
+ dscl_prog_data->isharp_noise_det.enable = 1; /* ISHARP_NOISEDET_EN */
+ spl_set_isharp_noise_det_mode(dscl_prog_data, data); /* ISHARP_NOISEDET_MODE */
+ } else
+ dscl_prog_data->isharp_noise_det.enable = 0; // ISHARP_NOISEDET_EN
// Program noise detection threshold
dscl_prog_data->isharp_noise_det.uthreshold = 24; // ISHARP_NOISEDET_UTHRE
dscl_prog_data->isharp_noise_det.dthreshold = 4; // ISHARP_NOISEDET_DTHRE
@@ -1282,50 +1580,93 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->isharp_noise_det.pwl_end_in = 13; // ISHARP_NOISEDET_PWL_END_IN
dscl_prog_data->isharp_noise_det.pwl_slope = 1623; // ISHARP_NOISEDET_PWL_SLOPE
- if ((lls_pref == LLS_PREF_NO) && !spl_is_yuv420(format)) /* ISHARP_FMT_MODE */
+ if (lls_pref == LLS_PREF_NO) /* ISHARP_FMT_MODE */
dscl_prog_data->isharp_fmt.mode = 1;
else
dscl_prog_data->isharp_fmt.mode = 0;
dscl_prog_data->isharp_fmt.norm = 0x3C00; // ISHARP_FMT_NORM
dscl_prog_data->isharp_lba.mode = 0; // ISHARP_LBA_MODE
- // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0
- dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[0] = 32; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1
- dscl_prog_data->isharp_lba.in_seg[1] = 256; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2
- dscl_prog_data->isharp_lba.in_seg[2] = 614; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[2] = -20; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3
- dscl_prog_data->isharp_lba.in_seg[3] = 1023; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4
- dscl_prog_data->isharp_lba.in_seg[4] = 1023; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5
- dscl_prog_data->isharp_lba.in_seg[5] = 1023; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format
- switch (adp_sharpness.sharpness) {
- case SHARPNESS_LOW:
- dscl_prog_data->isharp_delta = spl_get_filter_isharp_1D_lut_0p5x();
- break;
- case SHARPNESS_MID:
- dscl_prog_data->isharp_delta = spl_get_filter_isharp_1D_lut_1p0x();
- break;
- case SHARPNESS_HIGH:
- dscl_prog_data->isharp_delta = spl_get_filter_isharp_1D_lut_2p0x();
- break;
- default:
- BREAK_TO_DEBUGGER();
+
+ if (setup == SDR_L) {
+ // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0
+ dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[0] = 62; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1
+ dscl_prog_data->isharp_lba.in_seg[1] = 130; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2
+ dscl_prog_data->isharp_lba.in_seg[2] = 450; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[2] = 0x18D; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format = -115
+ // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3
+ dscl_prog_data->isharp_lba.in_seg[3] = 520; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4
+ dscl_prog_data->isharp_lba.in_seg[4] = 520; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5
+ dscl_prog_data->isharp_lba.in_seg[5] = 520; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format
+ } else if (setup == HDR_L) {
+ // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0
+ dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[0] = 32; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1
+ dscl_prog_data->isharp_lba.in_seg[1] = 254; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2
+ dscl_prog_data->isharp_lba.in_seg[2] = 559; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[2] = 0x10C; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format = -244
+ // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3
+ dscl_prog_data->isharp_lba.in_seg[3] = 592; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4
+ dscl_prog_data->isharp_lba.in_seg[4] = 1023; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5
+ dscl_prog_data->isharp_lba.in_seg[5] = 1023; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format
+ } else {
+ // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0
+ dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[0] = 40; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1
+ dscl_prog_data->isharp_lba.in_seg[1] = 204; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2
+ dscl_prog_data->isharp_lba.in_seg[2] = 818; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[2] = 0x1D9; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format = -39
+ // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3
+ dscl_prog_data->isharp_lba.in_seg[3] = 1023; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4
+ dscl_prog_data->isharp_lba.in_seg[4] = 1023; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5
+ dscl_prog_data->isharp_lba.in_seg[5] = 1023; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format
}
+
+ spl_build_isharp_1dlut_from_reference_curve(ratio, setup, adp_sharpness);
+ dscl_prog_data->isharp_delta = spl_get_pregen_filter_isharp_1D_lut(setup);
+ dscl_prog_data->sharpness_level = adp_sharpness.sharpness_level;
+
// Program the nldelta soft clip values
if (lls_pref == LLS_PREF_YES) {
dscl_prog_data->isharp_nldelta_sclip.enable_p = 0; /* ISHARP_NLDELTA_SCLIP_EN_P */
@@ -1346,59 +1687,6 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
// Set the values as per lookup table
spl_set_blur_scale_data(dscl_prog_data, data);
}
-static bool spl_get_isharp_en(struct adaptive_sharpness adp_sharpness,
- int vscale_ratio, int hscale_ratio, struct spl_taps taps,
- enum spl_pixel_format format)
-{
- bool enable_isharp = false;
-
- if (adp_sharpness.enable == false)
- return enable_isharp; // Return if adaptive sharpness is disabled
- // Is downscaling ?
- if (vscale_ratio > 1 || hscale_ratio > 1) {
- // END - No iSHARP support for downscaling
- return enable_isharp;
- }
- // Scaling is up to 1:1 (no scaling) or upscaling
-
- /* Only apply sharpness to NV12 and not P010 */
- if (format != SPL_PIXEL_FORMAT_420BPP8)
- return enable_isharp;
-
- // LB support horizontal taps 4,6 or vertical taps 3, 4, 6
- if (taps.h_taps == 4 || taps.h_taps == 6 ||
- taps.v_taps == 3 || taps.v_taps == 4 || taps.v_taps == 6) {
- // END - iSHARP supported
- enable_isharp = true;
- }
- return enable_isharp;
-}
-
-static bool spl_choose_lls_policy(enum spl_pixel_format format,
- enum spl_transfer_func_type tf_type,
- enum spl_transfer_func_predefined tf_predefined_type,
- enum linear_light_scaling *lls_pref)
-{
- if (spl_is_yuv420(format)) {
- *lls_pref = LLS_PREF_NO;
- if ((tf_type == SPL_TF_TYPE_PREDEFINED) || (tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS))
- return true;
- } else { /* RGB or YUV444 */
- if (tf_type == SPL_TF_TYPE_PREDEFINED) {
- if ((tf_predefined_type == SPL_TRANSFER_FUNCTION_HLG) ||
- (tf_predefined_type == SPL_TRANSFER_FUNCTION_HLG12))
- *lls_pref = LLS_PREF_NO;
- else
- *lls_pref = LLS_PREF_YES;
- return true;
- } else if (tf_type == SPL_TF_TYPE_BYPASS) {
- *lls_pref = LLS_PREF_YES;
- return true;
- }
- }
- *lls_pref = LLS_PREF_NO;
- return false;
-}
/* Calculate scaler parameters */
bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
@@ -1406,65 +1694,74 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
bool res = false;
bool enable_easf_v = false;
bool enable_easf_h = false;
- bool lls_enable_easf = true;
- const struct spl_scaler_data *data = &spl_out->scl_data;
+ int vratio = 0;
+ int hratio = 0;
+ struct spl_scratch spl_scratch;
+ struct spl_fixed31_32 isharp_scale_ratio;
+ enum system_setup setup;
+ bool enable_isharp = false;
+ const struct spl_scaler_data *data = &spl_scratch.scl_data;
+
+ memset(&spl_scratch, 0, sizeof(struct spl_scratch));
+ spl_scratch.scl_data.h_active = spl_in->h_active;
+ spl_scratch.scl_data.v_active = spl_in->v_active;
+
// All SPL calls
/* recout calculation */
/* depends on h_active */
- spl_calculate_recout(spl_in, spl_out);
+ spl_calculate_recout(spl_in, &spl_scratch, spl_out);
/* depends on pixel format */
- spl_calculate_scaling_ratios(spl_in, spl_out);
+ spl_calculate_scaling_ratios(spl_in, &spl_scratch, spl_out);
/* depends on scaling ratios and recout, does not calculate offset yet */
- spl_calculate_viewport_size(spl_in, spl_out);
+ spl_calculate_viewport_size(spl_in, &spl_scratch);
res = spl_get_optimal_number_of_taps(
spl_in->basic_out.max_downscale_src_width, spl_in,
- spl_out, &spl_in->scaling_quality);
+ &spl_scratch, &spl_in->scaling_quality, &enable_easf_v,
+ &enable_easf_h, &enable_isharp);
/*
* Depends on recout, scaling ratios, h_active and taps
* May need to re-check lb size after this in some obscure scenario
*/
if (res)
- spl_calculate_inits_and_viewports(spl_in, spl_out);
+ spl_calculate_inits_and_viewports(spl_in, &spl_scratch);
// Handle 3d recout
- spl_handle_3d_recout(spl_in, &spl_out->scl_data.recout);
+ spl_handle_3d_recout(spl_in, &spl_scratch.scl_data.recout);
// Clamp
- spl_clamp_viewport(&spl_out->scl_data.viewport);
+ spl_clamp_viewport(&spl_scratch.scl_data.viewport);
if (!res)
return res;
- /*
- * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format and transfer
- * function to determine whether to use LINEAR or NONLINEAR scaling
- */
- if (spl_in->lls_pref == LLS_PREF_DONT_CARE)
- lls_enable_easf = spl_choose_lls_policy(spl_in->basic_in.format,
- spl_in->basic_in.tf_type, spl_in->basic_in.tf_predefined_type,
- &spl_in->lls_pref);
-
// Save all calculated parameters in dscl_prog_data structure to program hw registers
- spl_set_dscl_prog_data(spl_in, spl_out);
+ spl_set_dscl_prog_data(spl_in, &spl_scratch, spl_out, enable_easf_v, enable_easf_h, enable_isharp);
- int vratio = dc_fixpt_ceil(spl_out->scl_data.ratios.vert);
- int hratio = dc_fixpt_ceil(spl_out->scl_data.ratios.horz);
- if (!lls_enable_easf || spl_in->disable_easf) {
- enable_easf_v = false;
- enable_easf_h = false;
+ if (spl_in->lls_pref == LLS_PREF_YES) {
+ if (spl_in->is_hdr_on)
+ setup = HDR_L;
+ else
+ setup = SDR_L;
} else {
- /* Enable EASF on vertical? */
- enable_easf_v = enable_easf(vratio, spl_out->scl_data.taps.v_taps, spl_in->lls_pref, spl_in->prefer_easf);
- /* Enable EASF on horizontal? */
- enable_easf_h = enable_easf(hratio, spl_out->scl_data.taps.h_taps, spl_in->lls_pref, spl_in->prefer_easf);
+ if (spl_in->is_hdr_on)
+ setup = HDR_NL;
+ else
+ setup = SDR_NL;
}
+
// Set EASF
- spl_set_easf_data(spl_out->dscl_prog_data, enable_easf_v, enable_easf_h, spl_in->lls_pref,
- spl_in->basic_in.format);
+ spl_set_easf_data(&spl_scratch, spl_out, enable_easf_v, enable_easf_h, spl_in->lls_pref,
+ spl_in->basic_in.format, setup, spl_in->hdr_multx100);
+
// Set iSHARP
- bool enable_isharp = spl_get_isharp_en(spl_in->adaptive_sharpness, vratio, hratio,
- spl_out->scl_data.taps, spl_in->basic_in.format);
+ vratio = spl_fixpt_ceil(spl_scratch.scl_data.ratios.vert);
+ hratio = spl_fixpt_ceil(spl_scratch.scl_data.ratios.horz);
+ if (vratio <= hratio)
+ isharp_scale_ratio = spl_scratch.scl_data.recip_ratios.vert;
+ else
+ isharp_scale_ratio = spl_scratch.scl_data.recip_ratios.horz;
+
spl_set_isharp_data(spl_out->dscl_prog_data, spl_in->adaptive_sharpness, enable_isharp,
- spl_in->lls_pref, spl_in->basic_in.format, data);
+ spl_in->lls_pref, spl_in->basic_in.format, data, isharp_scale_ratio, setup);
return res;
}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h
index f1fd3eb92f8a..205e59a2a8ee 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h
@@ -9,16 +9,8 @@
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/* SPL interfaces */
bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out);
-#ifdef __cplusplus
-}
-#endif
-
#endif /* __DC_SPL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c
new file mode 100644
index 000000000000..99238644e0a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dc_spl_filters.h"
+
+void convert_filter_s1_10_to_s1_12(const uint16_t *s1_10_filter,
+ uint16_t *s1_12_filter, int num_taps)
+{
+ int num_entries = NUM_PHASES_COEFF * num_taps;
+ int i;
+
+ for (i = 0; i < num_entries; i++)
+ *(s1_12_filter + i) = *(s1_10_filter + i) * 4;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h
new file mode 100644
index 000000000000..20439cdbdb10
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef __DC_SPL_FILTERS_H__
+#define __DC_SPL_FILTERS_H__
+
+#include "dc_spl_types.h"
+
+#define NUM_PHASES_COEFF 33
+
+void convert_filter_s1_10_to_s1_12(const uint16_t *s1_10_filter,
+ uint16_t *s1_12_filter, int num_taps);
+
+#endif /* __DC_SPL_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
index 8bc838c7c3c5..33712f50d303 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
@@ -2,7 +2,8 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#include "dc_spl_types.h"
+#include "spl_debug.h"
+#include "dc_spl_filters.h"
#include "dc_spl_isharp_filters.h"
//========================================
@@ -16,7 +17,7 @@
// C_start = 40.000000
// C_end = 64.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_0[32] = {
+static const uint32_t filter_isharp_1D_lut_0[ISHARP_LUT_TABLE_SIZE] = {
0x02010000,
0x0A070503,
0x1614100D,
@@ -62,7 +63,7 @@ static const uint32_t filter_isharp_1D_lut_0[32] = {
// C_end = 127.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_0p5x[32] = {
+static const uint32_t filter_isharp_1D_lut_0p5x[ISHARP_LUT_TABLE_SIZE] = {
0x00000000,
0x02020101,
0x06050403,
@@ -107,7 +108,7 @@ static const uint32_t filter_isharp_1D_lut_0p5x[32] = {
// C_start = 96.000000
// C_end = 127.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_1p0x[32] = {
+static const uint32_t filter_isharp_1D_lut_1p0x[ISHARP_LUT_TABLE_SIZE] = {
0x01000000,
0x05040302,
0x0B0A0806,
@@ -152,7 +153,7 @@ static const uint32_t filter_isharp_1D_lut_1p0x[32] = {
// C_start = 96.000000
// C_end = 127.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_1p5x[32] = {
+static const uint32_t filter_isharp_1D_lut_1p5x[ISHARP_LUT_TABLE_SIZE] = {
0x01010000,
0x07050402,
0x110F0C0A,
@@ -197,7 +198,7 @@ static const uint32_t filter_isharp_1D_lut_1p5x[32] = {
// C_start = 40.000000
// C_end = 127.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_2p0x[32] = {
+static const uint32_t filter_isharp_1D_lut_2p0x[ISHARP_LUT_TABLE_SIZE] = {
0x02010000,
0x0A070503,
0x1614100D,
@@ -231,6 +232,53 @@ static const uint32_t filter_isharp_1D_lut_2p0x[32] = {
0x080B0D0E,
0x00020406,
};
+//========================================
+// Delta Gain 1DLUT
+// LUT content is packed as 4-bytes into one DWORD/entry
+// A_start = 0.000000
+// A_end = 10.000000
+// A_gain = 3.000000
+// B_start = 11.000000
+// B_end = 127.000000
+// C_start = 40.000000
+// C_end = 127.000000
+//========================================
+static const uint32_t filter_isharp_1D_lut_3p0x[ISHARP_LUT_TABLE_SIZE] = {
+0x03010000,
+0x0F0B0805,
+0x211E1813,
+0x2B292624,
+0x3533302E,
+0x3E3C3A37,
+0x46444240,
+0x4D4B4A48,
+0x5352504F,
+0x59575655,
+0x5D5C5B5A,
+0x61605F5E,
+0x64646362,
+0x66666565,
+0x68686767,
+0x68686868,
+0x68686868,
+0x67676868,
+0x65656666,
+0x62636464,
+0x5E5F6061,
+0x5A5B5C5D,
+0x55565759,
+0x4F505253,
+0x484A4B4D,
+0x40424446,
+0x373A3C3E,
+0x2E303335,
+0x2426292B,
+0x191B1E21,
+0x0D101316,
+0x0003060A,
+};
+
+//========================================
// Wide scaler coefficients
//========================================================
// <using> gen_scaler_coeffs.m
@@ -285,7 +333,7 @@ static const uint16_t filter_isharp_wide_6tap_64p[198] = {
// <CoefType> Blur & Scale LPF
// <CoefQuant> S1.10
//========================================================
-static const uint16_t filter_isharp_bs_4tap_64p[198] = {
+static const uint16_t filter_isharp_bs_4tap_in_6_64p[198] = {
0x0000, 0x00E5, 0x0237, 0x00E4, 0x0000, 0x0000,
0x0000, 0x00DE, 0x0237, 0x00EB, 0x0000, 0x0000,
0x0000, 0x00D7, 0x0236, 0x00F2, 0x0001, 0x0000,
@@ -320,6 +368,138 @@ static const uint16_t filter_isharp_bs_4tap_64p[198] = {
0x0000, 0x003B, 0x01CF, 0x01C2, 0x0034, 0x0000,
0x0000, 0x0037, 0x01C9, 0x01C9, 0x0037, 0x0000
};
+//========================================================
+// <using> gen_BlurScale_coeffs.m
+// <date> 25-Apr-2022
+// <num_taps> 4
+// <num_phases> 64
+// <CoefType> Blur & Scale LPF
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t filter_isharp_bs_4tap_64p[132] = {
+0x00E5, 0x0237, 0x00E4, 0x0000,
+0x00DE, 0x0237, 0x00EB, 0x0000,
+0x00D7, 0x0236, 0x00F2, 0x0001,
+0x00D0, 0x0235, 0x00FA, 0x0001,
+0x00C9, 0x0234, 0x0101, 0x0002,
+0x00C2, 0x0233, 0x0108, 0x0003,
+0x00BB, 0x0232, 0x0110, 0x0003,
+0x00B5, 0x0230, 0x0117, 0x0004,
+0x00AE, 0x022E, 0x011F, 0x0005,
+0x00A8, 0x022C, 0x0126, 0x0006,
+0x00A2, 0x022A, 0x012D, 0x0007,
+0x009C, 0x0228, 0x0134, 0x0008,
+0x0096, 0x0225, 0x013C, 0x0009,
+0x0090, 0x0222, 0x0143, 0x000B,
+0x008A, 0x021F, 0x014B, 0x000C,
+0x0085, 0x021C, 0x0151, 0x000E,
+0x007F, 0x0218, 0x015A, 0x000F,
+0x007A, 0x0215, 0x0160, 0x0011,
+0x0074, 0x0211, 0x0168, 0x0013,
+0x006F, 0x020D, 0x016F, 0x0015,
+0x006A, 0x0209, 0x0176, 0x0017,
+0x0065, 0x0204, 0x017E, 0x0019,
+0x0060, 0x0200, 0x0185, 0x001B,
+0x005C, 0x01FB, 0x018C, 0x001D,
+0x0057, 0x01F6, 0x0193, 0x0020,
+0x0053, 0x01F1, 0x019A, 0x0022,
+0x004E, 0x01EC, 0x01A1, 0x0025,
+0x004A, 0x01E6, 0x01A8, 0x0028,
+0x0046, 0x01E1, 0x01AF, 0x002A,
+0x0042, 0x01DB, 0x01B6, 0x002D,
+0x003F, 0x01D5, 0x01BB, 0x0031,
+0x003B, 0x01CF, 0x01C2, 0x0034,
+0x0037, 0x01C9, 0x01C9, 0x0037,
+};
+//========================================================
+// <using> gen_BlurScale_coeffs.m
+// <date> 09-Jun-2022
+// <num_taps> 3
+// <num_phases> 64
+// <CoefType> Blur & Scale LPF
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t filter_isharp_bs_3tap_64p[99] = {
+0x0200, 0x0200, 0x0000,
+0x01F6, 0x0206, 0x0004,
+0x01EC, 0x020B, 0x0009,
+0x01E2, 0x0211, 0x000D,
+0x01D8, 0x0216, 0x0012,
+0x01CE, 0x021C, 0x0016,
+0x01C4, 0x0221, 0x001B,
+0x01BA, 0x0226, 0x0020,
+0x01B0, 0x022A, 0x0026,
+0x01A6, 0x022F, 0x002B,
+0x019C, 0x0233, 0x0031,
+0x0192, 0x0238, 0x0036,
+0x0188, 0x023C, 0x003C,
+0x017E, 0x0240, 0x0042,
+0x0174, 0x0244, 0x0048,
+0x016A, 0x0248, 0x004E,
+0x0161, 0x024A, 0x0055,
+0x0157, 0x024E, 0x005B,
+0x014D, 0x0251, 0x0062,
+0x0144, 0x0253, 0x0069,
+0x013A, 0x0256, 0x0070,
+0x0131, 0x0258, 0x0077,
+0x0127, 0x025B, 0x007E,
+0x011E, 0x025C, 0x0086,
+0x0115, 0x025E, 0x008D,
+0x010B, 0x0260, 0x0095,
+0x0102, 0x0262, 0x009C,
+0x00F9, 0x0263, 0x00A4,
+0x00F0, 0x0264, 0x00AC,
+0x00E7, 0x0265, 0x00B4,
+0x00DF, 0x0264, 0x00BD,
+0x00D6, 0x0265, 0x00C5,
+0x00CD, 0x0266, 0x00CD,
+};
+
+/* Converted Blur & Scale coeff tables from S1.10 to S1.12 */
+static uint16_t filter_isharp_bs_4tap_in_6_64p_s1_12[198];
+static uint16_t filter_isharp_bs_4tap_64p_s1_12[132];
+static uint16_t filter_isharp_bs_3tap_64p_s1_12[99];
+
+/* Pre-generated 1DLUT for given setup and sharpness level */
+struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = {
+ {
+ 0, 0,
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ },
+ {
+ 0, 0,
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ },
+ {
+ 0, 0,
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ },
+ {
+ 0, 0,
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ },
+};
+
const uint32_t *spl_get_filter_isharp_1D_lut_0(void)
{
return filter_isharp_1D_lut_0;
@@ -340,11 +520,165 @@ const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void)
{
return filter_isharp_1D_lut_2p0x;
}
+const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void)
+{
+ return filter_isharp_1D_lut_3p0x;
+}
const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void)
{
return filter_isharp_wide_6tap_64p;
}
-const uint16_t *spl_get_filter_isharp_bs_4tap_64p(void)
+uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void)
{
- return filter_isharp_bs_4tap_64p;
+ return filter_isharp_bs_4tap_in_6_64p_s1_12;
}
+uint16_t *spl_get_filter_isharp_bs_4tap_64p(void)
+{
+ return filter_isharp_bs_4tap_64p_s1_12;
+}
+uint16_t *spl_get_filter_isharp_bs_3tap_64p(void)
+{
+ return filter_isharp_bs_3tap_64p_s1_12;
+}
+
+static unsigned int spl_calculate_sharpness_level(int discrete_sharpness_level, enum system_setup setup,
+ struct spl_sharpness_range sharpness_range)
+{
+ unsigned int sharpness_level = 0;
+
+ int min_sharpness, max_sharpness, mid_sharpness;
+
+ switch (setup) {
+
+ case HDR_L:
+ min_sharpness = sharpness_range.hdr_rgb_min;
+ max_sharpness = sharpness_range.hdr_rgb_max;
+ mid_sharpness = sharpness_range.hdr_rgb_mid;
+ break;
+ case HDR_NL:
+ /* currently no use case, use Non-linear SDR values for now */
+ case SDR_NL:
+ min_sharpness = sharpness_range.sdr_yuv_min;
+ max_sharpness = sharpness_range.sdr_yuv_max;
+ mid_sharpness = sharpness_range.sdr_yuv_mid;
+ break;
+ case SDR_L:
+ default:
+ min_sharpness = sharpness_range.sdr_rgb_min;
+ max_sharpness = sharpness_range.sdr_rgb_max;
+ mid_sharpness = sharpness_range.sdr_rgb_mid;
+ break;
+ }
+
+ int lower_half_step_size = (mid_sharpness - min_sharpness) / 5;
+ int upper_half_step_size = (max_sharpness - mid_sharpness) / 5;
+
+ // lower half linear approximation
+ if (discrete_sharpness_level < 5)
+ sharpness_level = min_sharpness + (lower_half_step_size * discrete_sharpness_level);
+ // upper half linear approximation
+ else
+ sharpness_level = mid_sharpness + (upper_half_step_size * (discrete_sharpness_level - 5));
+
+ return sharpness_level;
+}
+
+void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup,
+ struct adaptive_sharpness sharpness)
+{
+ uint8_t *byte_ptr_1dlut_src, *byte_ptr_1dlut_dst;
+ struct spl_fixed31_32 sharp_base, sharp_calc, sharp_level;
+ int j;
+ int size_1dlut;
+ int sharp_calc_int;
+ uint32_t filter_pregen_store[ISHARP_LUT_TABLE_SIZE];
+
+ /* Custom sharpnessX1000 value */
+ unsigned int sharpnessX1000 = spl_calculate_sharpness_level(sharpness.sharpness_level,
+ setup, sharpness.sharpness_range);
+ sharp_level = spl_fixpt_from_fraction(sharpnessX1000, 1000);
+
+ /*
+ * Check if pregen 1dlut table is already precalculated
+ * If numer/denom is different, then recalculate
+ */
+ if ((filter_isharp_1D_lut_pregen[setup].sharpness_numer == sharpnessX1000) &&
+ (filter_isharp_1D_lut_pregen[setup].sharpness_denom == 1000))
+ return;
+
+
+ /*
+ * Calculate LUT_128_gained with this equation:
+ *
+ * LUT_128_gained[i] = (uint8)(0.5 + min(255,(double)(LUT_128[i])*sharpLevel/iGain))
+ * where LUT_128[i] is contents of 3p0x isharp 1dlut
+ * where sharpLevel is desired sharpness level
+ * where iGain is base sharpness level 3.0
+ * where LUT_128_gained[i] is adjusted 1dlut value based on desired sharpness level
+ */
+ byte_ptr_1dlut_src = (uint8_t *)filter_isharp_1D_lut_3p0x;
+ byte_ptr_1dlut_dst = (uint8_t *)filter_pregen_store;
+ size_1dlut = sizeof(filter_isharp_1D_lut_3p0x);
+ memset(byte_ptr_1dlut_dst, 0, size_1dlut);
+ for (j = 0; j < size_1dlut; j++) {
+ sharp_base = spl_fixpt_from_int((int)*byte_ptr_1dlut_src);
+ sharp_calc = spl_fixpt_mul(sharp_base, sharp_level);
+ sharp_calc = spl_fixpt_div(sharp_calc, spl_fixpt_from_int(3));
+ sharp_calc = spl_fixpt_min(spl_fixpt_from_int(255), sharp_calc);
+ sharp_calc = spl_fixpt_add(sharp_calc, spl_fixpt_from_fraction(1, 2));
+ sharp_calc_int = spl_fixpt_floor(sharp_calc);
+ /* Clamp it at 0x7F so it doesn't wrap */
+ if (sharp_calc_int > 127)
+ sharp_calc_int = 127;
+ *byte_ptr_1dlut_dst = (uint8_t)sharp_calc_int;
+
+ byte_ptr_1dlut_src++;
+ byte_ptr_1dlut_dst++;
+ }
+
+ /* Update 1dlut table and sharpness level */
+ memcpy((void *)filter_isharp_1D_lut_pregen[setup].value, (void *)filter_pregen_store, size_1dlut);
+ filter_isharp_1D_lut_pregen[setup].sharpness_numer = sharpnessX1000;
+ filter_isharp_1D_lut_pregen[setup].sharpness_denom = 1000;
+}
+
+uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup)
+{
+ return filter_isharp_1D_lut_pregen[setup].value;
+}
+
+void spl_init_blur_scale_coeffs(void)
+{
+ convert_filter_s1_10_to_s1_12(filter_isharp_bs_3tap_64p,
+ filter_isharp_bs_3tap_64p_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_64p,
+ filter_isharp_bs_4tap_64p_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_in_6_64p,
+ filter_isharp_bs_4tap_in_6_64p_s1_12, 6);
+}
+
+uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps)
+{
+ if (taps == 3)
+ return spl_get_filter_isharp_bs_3tap_64p();
+ else if (taps == 4)
+ return spl_get_filter_isharp_bs_4tap_64p();
+ else if (taps == 6)
+ return spl_get_filter_isharp_bs_4tap_in_6_64p();
+ else {
+ /* should never happen, bug */
+ SPL_BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data)
+{
+ dscl_prog_data->filter_blur_scale_h =
+ spl_dscl_get_blur_scale_coeffs_64p(data->taps.h_taps);
+
+ dscl_prog_data->filter_blur_scale_v =
+ spl_dscl_get_blur_scale_coeffs_64p(data->taps.v_taps);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
index 1aaf4c50c1bc..fe0b12571f2c 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
@@ -7,11 +7,44 @@
#include "dc_spl_types.h"
+#define ISHARP_LUT_TABLE_SIZE 32
const uint32_t *spl_get_filter_isharp_1D_lut_0(void);
const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_1p5x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void);
-const uint16_t *spl_get_filter_isharp_bs_4tap_64p(void);
+const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void);
+uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void);
+uint16_t *spl_get_filter_isharp_bs_4tap_64p(void);
+uint16_t *spl_get_filter_isharp_bs_3tap_64p(void);
const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void);
+uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps);
+
+struct scale_ratio_to_sharpness_level_lookup {
+ unsigned int ratio_numer;
+ unsigned int ratio_denom;
+ unsigned int sharpness_numer;
+ unsigned int sharpness_denom;
+};
+
+struct isharp_1D_lut_pregen {
+ unsigned int sharpness_numer;
+ unsigned int sharpness_denom;
+ uint32_t value[ISHARP_LUT_TABLE_SIZE];
+};
+
+enum system_setup {
+ SDR_NL = 0,
+ SDR_L,
+ HDR_NL,
+ HDR_L,
+ NUM_SHARPNESS_SETUPS
+};
+
+void spl_init_blur_scale_coeffs(void);
+void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data);
+
+void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup, struct adaptive_sharpness sharpness);
+uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup);
#endif /* __DC_SPL_ISHARP_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c
new file mode 100644
index 000000000000..09bf82f7d468
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c
@@ -0,0 +1,1726 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "spl_debug.h"
+#include "dc_spl_filters.h"
+#include "dc_spl_scl_filters.h"
+#include "dc_spl_scl_easf_filters.h"
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.3_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.300000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_30[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F6, 0x0206, 0x0004,
+ 0x01EC, 0x020B, 0x0009,
+ 0x01E2, 0x0211, 0x000D,
+ 0x01D8, 0x0216, 0x0012,
+ 0x01CE, 0x021C, 0x0016,
+ 0x01C4, 0x0221, 0x001B,
+ 0x01BA, 0x0226, 0x0020,
+ 0x01B0, 0x022A, 0x0026,
+ 0x01A6, 0x022F, 0x002B,
+ 0x019C, 0x0233, 0x0031,
+ 0x0192, 0x0238, 0x0036,
+ 0x0188, 0x023C, 0x003C,
+ 0x017E, 0x0240, 0x0042,
+ 0x0174, 0x0244, 0x0048,
+ 0x016A, 0x0248, 0x004E,
+ 0x0161, 0x024A, 0x0055,
+ 0x0157, 0x024E, 0x005B,
+ 0x014D, 0x0251, 0x0062,
+ 0x0144, 0x0253, 0x0069,
+ 0x013A, 0x0256, 0x0070,
+ 0x0131, 0x0258, 0x0077,
+ 0x0127, 0x025B, 0x007E,
+ 0x011E, 0x025C, 0x0086,
+ 0x0115, 0x025E, 0x008D,
+ 0x010B, 0x0260, 0x0095,
+ 0x0102, 0x0262, 0x009C,
+ 0x00F9, 0x0263, 0x00A4,
+ 0x00F0, 0x0264, 0x00AC,
+ 0x00E7, 0x0265, 0x00B4,
+ 0x00DF, 0x0264, 0x00BD,
+ 0x00D6, 0x0265, 0x00C5,
+ 0x00CD, 0x0266, 0x00CD,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.4_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.400000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_40[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F6, 0x0206, 0x0004,
+ 0x01EB, 0x020E, 0x0007,
+ 0x01E1, 0x0214, 0x000B,
+ 0x01D7, 0x021A, 0x000F,
+ 0x01CD, 0x0220, 0x0013,
+ 0x01C2, 0x0226, 0x0018,
+ 0x01B8, 0x022C, 0x001C,
+ 0x01AE, 0x0231, 0x0021,
+ 0x01A3, 0x0237, 0x0026,
+ 0x0199, 0x023C, 0x002B,
+ 0x018F, 0x0240, 0x0031,
+ 0x0185, 0x0245, 0x0036,
+ 0x017A, 0x024A, 0x003C,
+ 0x0170, 0x024F, 0x0041,
+ 0x0166, 0x0253, 0x0047,
+ 0x015C, 0x0257, 0x004D,
+ 0x0152, 0x025A, 0x0054,
+ 0x0148, 0x025E, 0x005A,
+ 0x013E, 0x0261, 0x0061,
+ 0x0134, 0x0264, 0x0068,
+ 0x012B, 0x0266, 0x006F,
+ 0x0121, 0x0269, 0x0076,
+ 0x0117, 0x026C, 0x007D,
+ 0x010E, 0x026E, 0x0084,
+ 0x0104, 0x0270, 0x008C,
+ 0x00FB, 0x0271, 0x0094,
+ 0x00F2, 0x0272, 0x009C,
+ 0x00E9, 0x0273, 0x00A4,
+ 0x00E0, 0x0274, 0x00AC,
+ 0x00D7, 0x0275, 0x00B4,
+ 0x00CE, 0x0275, 0x00BD,
+ 0x00C5, 0x0276, 0x00C5,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.5_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.500000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_50[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F5, 0x0209, 0x0002,
+ 0x01EA, 0x0211, 0x0005,
+ 0x01DF, 0x021A, 0x0007,
+ 0x01D4, 0x0222, 0x000A,
+ 0x01C9, 0x022A, 0x000D,
+ 0x01BE, 0x0232, 0x0010,
+ 0x01B3, 0x0239, 0x0014,
+ 0x01A8, 0x0241, 0x0017,
+ 0x019D, 0x0248, 0x001B,
+ 0x0192, 0x024F, 0x001F,
+ 0x0187, 0x0255, 0x0024,
+ 0x017C, 0x025C, 0x0028,
+ 0x0171, 0x0262, 0x002D,
+ 0x0166, 0x0268, 0x0032,
+ 0x015B, 0x026E, 0x0037,
+ 0x0150, 0x0273, 0x003D,
+ 0x0146, 0x0278, 0x0042,
+ 0x013B, 0x027D, 0x0048,
+ 0x0130, 0x0282, 0x004E,
+ 0x0126, 0x0286, 0x0054,
+ 0x011B, 0x028A, 0x005B,
+ 0x0111, 0x028D, 0x0062,
+ 0x0107, 0x0290, 0x0069,
+ 0x00FD, 0x0293, 0x0070,
+ 0x00F3, 0x0296, 0x0077,
+ 0x00E9, 0x0298, 0x007F,
+ 0x00DF, 0x029A, 0x0087,
+ 0x00D5, 0x029C, 0x008F,
+ 0x00CC, 0x029D, 0x0097,
+ 0x00C3, 0x029E, 0x009F,
+ 0x00BA, 0x029E, 0x00A8,
+ 0x00B1, 0x029E, 0x00B1,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.6_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.600000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_60[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F4, 0x020B, 0x0001,
+ 0x01E8, 0x0216, 0x0002,
+ 0x01DC, 0x0221, 0x0003,
+ 0x01D0, 0x022B, 0x0005,
+ 0x01C4, 0x0235, 0x0007,
+ 0x01B8, 0x0240, 0x0008,
+ 0x01AC, 0x0249, 0x000B,
+ 0x01A0, 0x0253, 0x000D,
+ 0x0194, 0x025C, 0x0010,
+ 0x0188, 0x0265, 0x0013,
+ 0x017C, 0x026E, 0x0016,
+ 0x0170, 0x0277, 0x0019,
+ 0x0164, 0x027F, 0x001D,
+ 0x0158, 0x0287, 0x0021,
+ 0x014C, 0x028F, 0x0025,
+ 0x0140, 0x0297, 0x0029,
+ 0x0135, 0x029D, 0x002E,
+ 0x0129, 0x02A4, 0x0033,
+ 0x011D, 0x02AB, 0x0038,
+ 0x0112, 0x02B0, 0x003E,
+ 0x0107, 0x02B5, 0x0044,
+ 0x00FC, 0x02BA, 0x004A,
+ 0x00F1, 0x02BF, 0x0050,
+ 0x00E6, 0x02C3, 0x0057,
+ 0x00DB, 0x02C7, 0x005E,
+ 0x00D1, 0x02CA, 0x0065,
+ 0x00C7, 0x02CC, 0x006D,
+ 0x00BD, 0x02CE, 0x0075,
+ 0x00B3, 0x02D0, 0x007D,
+ 0x00A9, 0x02D2, 0x0085,
+ 0x00A0, 0x02D2, 0x008E,
+ 0x0097, 0x02D2, 0x0097,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.7_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.700000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_70[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F3, 0x020D, 0x0000,
+ 0x01E5, 0x021B, 0x0000,
+ 0x01D8, 0x0228, 0x0000,
+ 0x01CB, 0x0235, 0x0000,
+ 0x01BD, 0x0243, 0x0000,
+ 0x01B0, 0x024F, 0x0001,
+ 0x01A2, 0x025C, 0x0002,
+ 0x0195, 0x0268, 0x0003,
+ 0x0187, 0x0275, 0x0004,
+ 0x017A, 0x0280, 0x0006,
+ 0x016D, 0x028C, 0x0007,
+ 0x015F, 0x0298, 0x0009,
+ 0x0152, 0x02A2, 0x000C,
+ 0x0145, 0x02AD, 0x000E,
+ 0x0138, 0x02B7, 0x0011,
+ 0x012B, 0x02C0, 0x0015,
+ 0x011E, 0x02CA, 0x0018,
+ 0x0111, 0x02D3, 0x001C,
+ 0x0105, 0x02DB, 0x0020,
+ 0x00F8, 0x02E3, 0x0025,
+ 0x00EC, 0x02EA, 0x002A,
+ 0x00E0, 0x02F1, 0x002F,
+ 0x00D5, 0x02F6, 0x0035,
+ 0x00C9, 0x02FC, 0x003B,
+ 0x00BE, 0x0301, 0x0041,
+ 0x00B3, 0x0305, 0x0048,
+ 0x00A8, 0x0309, 0x004F,
+ 0x009E, 0x030C, 0x0056,
+ 0x0094, 0x030E, 0x005E,
+ 0x008A, 0x0310, 0x0066,
+ 0x0081, 0x0310, 0x006F,
+ 0x0077, 0x0312, 0x0077,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.8_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.800000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_80[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F1, 0x0210, 0x0FFF,
+ 0x01E2, 0x0220, 0x0FFE,
+ 0x01D2, 0x0232, 0x0FFC,
+ 0x01C3, 0x0241, 0x0FFC,
+ 0x01B4, 0x0251, 0x0FFB,
+ 0x01A4, 0x0262, 0x0FFA,
+ 0x0195, 0x0271, 0x0FFA,
+ 0x0186, 0x0281, 0x0FF9,
+ 0x0176, 0x0291, 0x0FF9,
+ 0x0167, 0x02A0, 0x0FF9,
+ 0x0158, 0x02AE, 0x0FFA,
+ 0x0149, 0x02BD, 0x0FFA,
+ 0x013A, 0x02CB, 0x0FFB,
+ 0x012C, 0x02D7, 0x0FFD,
+ 0x011D, 0x02E5, 0x0FFE,
+ 0x010F, 0x02F1, 0x0000,
+ 0x0101, 0x02FD, 0x0002,
+ 0x00F3, 0x0308, 0x0005,
+ 0x00E5, 0x0313, 0x0008,
+ 0x00D8, 0x031D, 0x000B,
+ 0x00CB, 0x0326, 0x000F,
+ 0x00BE, 0x032F, 0x0013,
+ 0x00B2, 0x0337, 0x0017,
+ 0x00A6, 0x033E, 0x001C,
+ 0x009A, 0x0345, 0x0021,
+ 0x008F, 0x034A, 0x0027,
+ 0x0084, 0x034F, 0x002D,
+ 0x0079, 0x0353, 0x0034,
+ 0x006F, 0x0356, 0x003B,
+ 0x0065, 0x0358, 0x0043,
+ 0x005C, 0x0359, 0x004B,
+ 0x0053, 0x035A, 0x0053,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.9_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.900000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_90[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01EE, 0x0214, 0x0FFE,
+ 0x01DC, 0x0228, 0x0FFC,
+ 0x01CA, 0x023C, 0x0FFA,
+ 0x01B9, 0x024F, 0x0FF8,
+ 0x01A7, 0x0262, 0x0FF7,
+ 0x0195, 0x0276, 0x0FF5,
+ 0x0183, 0x028A, 0x0FF3,
+ 0x0172, 0x029C, 0x0FF2,
+ 0x0160, 0x02AF, 0x0FF1,
+ 0x014F, 0x02C2, 0x0FEF,
+ 0x013E, 0x02D4, 0x0FEE,
+ 0x012D, 0x02E5, 0x0FEE,
+ 0x011C, 0x02F7, 0x0FED,
+ 0x010C, 0x0307, 0x0FED,
+ 0x00FB, 0x0318, 0x0FED,
+ 0x00EC, 0x0327, 0x0FED,
+ 0x00DC, 0x0336, 0x0FEE,
+ 0x00CD, 0x0344, 0x0FEF,
+ 0x00BE, 0x0352, 0x0FF0,
+ 0x00B0, 0x035E, 0x0FF2,
+ 0x00A2, 0x036A, 0x0FF4,
+ 0x0095, 0x0375, 0x0FF6,
+ 0x0088, 0x037F, 0x0FF9,
+ 0x007B, 0x0388, 0x0FFD,
+ 0x006F, 0x0391, 0x0000,
+ 0x0064, 0x0397, 0x0005,
+ 0x0059, 0x039D, 0x000A,
+ 0x004E, 0x03A3, 0x000F,
+ 0x0045, 0x03A6, 0x0015,
+ 0x003B, 0x03A9, 0x001C,
+ 0x0033, 0x03AA, 0x0023,
+ 0x002A, 0x03AC, 0x002A,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_1_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 1.000000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_1_00[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01EB, 0x0217, 0x0FFE,
+ 0x01D5, 0x022F, 0x0FFC,
+ 0x01C0, 0x0247, 0x0FF9,
+ 0x01AB, 0x025E, 0x0FF7,
+ 0x0196, 0x0276, 0x0FF4,
+ 0x0181, 0x028D, 0x0FF2,
+ 0x016C, 0x02A5, 0x0FEF,
+ 0x0158, 0x02BB, 0x0FED,
+ 0x0144, 0x02D1, 0x0FEB,
+ 0x0130, 0x02E8, 0x0FE8,
+ 0x011C, 0x02FE, 0x0FE6,
+ 0x0109, 0x0313, 0x0FE4,
+ 0x00F6, 0x0328, 0x0FE2,
+ 0x00E4, 0x033C, 0x0FE0,
+ 0x00D2, 0x034F, 0x0FDF,
+ 0x00C0, 0x0363, 0x0FDD,
+ 0x00B0, 0x0374, 0x0FDC,
+ 0x009F, 0x0385, 0x0FDC,
+ 0x0090, 0x0395, 0x0FDB,
+ 0x0081, 0x03A4, 0x0FDB,
+ 0x0072, 0x03B3, 0x0FDB,
+ 0x0064, 0x03C0, 0x0FDC,
+ 0x0057, 0x03CC, 0x0FDD,
+ 0x004B, 0x03D6, 0x0FDF,
+ 0x003F, 0x03E0, 0x0FE1,
+ 0x0034, 0x03E8, 0x0FE4,
+ 0x002A, 0x03EF, 0x0FE7,
+ 0x0020, 0x03F5, 0x0FEB,
+ 0x0017, 0x03FA, 0x0FEF,
+ 0x000F, 0x03FD, 0x0FF4,
+ 0x0007, 0x03FF, 0x0FFA,
+ 0x0000, 0x0400, 0x0000,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.3_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.300000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_30[132] = {
+ 0x0104, 0x01F8, 0x0104, 0x0000,
+ 0x00FE, 0x01F7, 0x010A, 0x0001,
+ 0x00F8, 0x01F6, 0x010F, 0x0003,
+ 0x00F2, 0x01F5, 0x0114, 0x0005,
+ 0x00EB, 0x01F4, 0x011B, 0x0006,
+ 0x00E5, 0x01F3, 0x0120, 0x0008,
+ 0x00DF, 0x01F2, 0x0125, 0x000A,
+ 0x00DA, 0x01F0, 0x012A, 0x000C,
+ 0x00D4, 0x01EE, 0x0130, 0x000E,
+ 0x00CE, 0x01ED, 0x0135, 0x0010,
+ 0x00C8, 0x01EB, 0x013A, 0x0013,
+ 0x00C2, 0x01E9, 0x0140, 0x0015,
+ 0x00BD, 0x01E7, 0x0145, 0x0017,
+ 0x00B7, 0x01E5, 0x014A, 0x001A,
+ 0x00B1, 0x01E2, 0x0151, 0x001C,
+ 0x00AC, 0x01E0, 0x0155, 0x001F,
+ 0x00A7, 0x01DD, 0x015A, 0x0022,
+ 0x00A1, 0x01DB, 0x015F, 0x0025,
+ 0x009C, 0x01D8, 0x0165, 0x0027,
+ 0x0097, 0x01D5, 0x016A, 0x002A,
+ 0x0092, 0x01D2, 0x016E, 0x002E,
+ 0x008C, 0x01CF, 0x0174, 0x0031,
+ 0x0087, 0x01CC, 0x0179, 0x0034,
+ 0x0083, 0x01C9, 0x017D, 0x0037,
+ 0x007E, 0x01C5, 0x0182, 0x003B,
+ 0x0079, 0x01C2, 0x0187, 0x003E,
+ 0x0074, 0x01BE, 0x018C, 0x0042,
+ 0x0070, 0x01BA, 0x0190, 0x0046,
+ 0x006B, 0x01B7, 0x0195, 0x0049,
+ 0x0066, 0x01B3, 0x019A, 0x004D,
+ 0x0062, 0x01AF, 0x019E, 0x0051,
+ 0x005E, 0x01AB, 0x01A2, 0x0055,
+ 0x005A, 0x01A6, 0x01A6, 0x005A,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.4_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.400000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_40[132] = {
+ 0x00FB, 0x0209, 0x00FC, 0x0000,
+ 0x00F5, 0x0209, 0x0101, 0x0001,
+ 0x00EE, 0x0208, 0x0108, 0x0002,
+ 0x00E8, 0x0207, 0x010E, 0x0003,
+ 0x00E2, 0x0206, 0x0114, 0x0004,
+ 0x00DB, 0x0205, 0x011A, 0x0006,
+ 0x00D5, 0x0204, 0x0120, 0x0007,
+ 0x00CF, 0x0203, 0x0125, 0x0009,
+ 0x00C9, 0x0201, 0x012C, 0x000A,
+ 0x00C3, 0x01FF, 0x0132, 0x000C,
+ 0x00BD, 0x01FD, 0x0138, 0x000E,
+ 0x00B7, 0x01FB, 0x013E, 0x0010,
+ 0x00B1, 0x01F9, 0x0144, 0x0012,
+ 0x00AC, 0x01F7, 0x0149, 0x0014,
+ 0x00A6, 0x01F4, 0x0150, 0x0016,
+ 0x00A0, 0x01F2, 0x0156, 0x0018,
+ 0x009B, 0x01EF, 0x015C, 0x001A,
+ 0x0095, 0x01EC, 0x0162, 0x001D,
+ 0x0090, 0x01E9, 0x0168, 0x001F,
+ 0x008B, 0x01E6, 0x016D, 0x0022,
+ 0x0085, 0x01E3, 0x0173, 0x0025,
+ 0x0080, 0x01DF, 0x0179, 0x0028,
+ 0x007B, 0x01DC, 0x017E, 0x002B,
+ 0x0076, 0x01D8, 0x0184, 0x002E,
+ 0x0071, 0x01D4, 0x018A, 0x0031,
+ 0x006D, 0x01D1, 0x018E, 0x0034,
+ 0x0068, 0x01CD, 0x0193, 0x0038,
+ 0x0063, 0x01C8, 0x019A, 0x003B,
+ 0x005F, 0x01C4, 0x019E, 0x003F,
+ 0x005B, 0x01C0, 0x01A3, 0x0042,
+ 0x0056, 0x01BB, 0x01A9, 0x0046,
+ 0x0052, 0x01B7, 0x01AD, 0x004A,
+ 0x004E, 0x01B2, 0x01B2, 0x004E,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.5_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.500000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_50[132] = {
+ 0x00E5, 0x0236, 0x00E5, 0x0000,
+ 0x00DE, 0x0235, 0x00ED, 0x0000,
+ 0x00D7, 0x0235, 0x00F4, 0x0000,
+ 0x00D0, 0x0235, 0x00FB, 0x0000,
+ 0x00C9, 0x0234, 0x0102, 0x0001,
+ 0x00C2, 0x0233, 0x010A, 0x0001,
+ 0x00BC, 0x0232, 0x0111, 0x0001,
+ 0x00B5, 0x0230, 0x0119, 0x0002,
+ 0x00AE, 0x022F, 0x0121, 0x0002,
+ 0x00A8, 0x022D, 0x0128, 0x0003,
+ 0x00A2, 0x022B, 0x012F, 0x0004,
+ 0x009B, 0x0229, 0x0137, 0x0005,
+ 0x0095, 0x0226, 0x013F, 0x0006,
+ 0x008F, 0x0224, 0x0146, 0x0007,
+ 0x0089, 0x0221, 0x014E, 0x0008,
+ 0x0083, 0x021E, 0x0155, 0x000A,
+ 0x007E, 0x021B, 0x015C, 0x000B,
+ 0x0078, 0x0217, 0x0164, 0x000D,
+ 0x0072, 0x0213, 0x016D, 0x000E,
+ 0x006D, 0x0210, 0x0173, 0x0010,
+ 0x0068, 0x020C, 0x017A, 0x0012,
+ 0x0063, 0x0207, 0x0182, 0x0014,
+ 0x005E, 0x0203, 0x0189, 0x0016,
+ 0x0059, 0x01FE, 0x0191, 0x0018,
+ 0x0054, 0x01F9, 0x0198, 0x001B,
+ 0x0050, 0x01F4, 0x019F, 0x001D,
+ 0x004B, 0x01EF, 0x01A6, 0x0020,
+ 0x0047, 0x01EA, 0x01AC, 0x0023,
+ 0x0043, 0x01E4, 0x01B3, 0x0026,
+ 0x003F, 0x01DF, 0x01B9, 0x0029,
+ 0x003B, 0x01D9, 0x01C0, 0x002C,
+ 0x0037, 0x01D3, 0x01C6, 0x0030,
+ 0x0033, 0x01CD, 0x01CD, 0x0033,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.6_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.600000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_60[132] = {
+ 0x00C8, 0x026F, 0x00C9, 0x0000,
+ 0x00C0, 0x0270, 0x00D1, 0x0FFF,
+ 0x00B8, 0x0270, 0x00D9, 0x0FFF,
+ 0x00B1, 0x0270, 0x00E1, 0x0FFE,
+ 0x00A9, 0x026F, 0x00EB, 0x0FFD,
+ 0x00A2, 0x026E, 0x00F3, 0x0FFD,
+ 0x009A, 0x026D, 0x00FD, 0x0FFC,
+ 0x0093, 0x026C, 0x0105, 0x0FFC,
+ 0x008C, 0x026A, 0x010F, 0x0FFB,
+ 0x0085, 0x0268, 0x0118, 0x0FFB,
+ 0x007E, 0x0265, 0x0122, 0x0FFB,
+ 0x0078, 0x0263, 0x012A, 0x0FFB,
+ 0x0071, 0x0260, 0x0134, 0x0FFB,
+ 0x006B, 0x025C, 0x013E, 0x0FFB,
+ 0x0065, 0x0259, 0x0147, 0x0FFB,
+ 0x005F, 0x0255, 0x0151, 0x0FFB,
+ 0x0059, 0x0251, 0x015A, 0x0FFC,
+ 0x0054, 0x024D, 0x0163, 0x0FFC,
+ 0x004E, 0x0248, 0x016D, 0x0FFD,
+ 0x0049, 0x0243, 0x0176, 0x0FFE,
+ 0x0044, 0x023E, 0x017F, 0x0FFF,
+ 0x003F, 0x0238, 0x0189, 0x0000,
+ 0x003A, 0x0232, 0x0193, 0x0001,
+ 0x0036, 0x022C, 0x019C, 0x0002,
+ 0x0031, 0x0226, 0x01A5, 0x0004,
+ 0x002D, 0x021F, 0x01AF, 0x0005,
+ 0x0029, 0x0218, 0x01B8, 0x0007,
+ 0x0025, 0x0211, 0x01C1, 0x0009,
+ 0x0022, 0x020A, 0x01C9, 0x000B,
+ 0x001E, 0x0203, 0x01D2, 0x000D,
+ 0x001B, 0x01FB, 0x01DA, 0x0010,
+ 0x0018, 0x01F3, 0x01E3, 0x0012,
+ 0x0015, 0x01EB, 0x01EB, 0x0015,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.7_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.700000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_70[132] = {
+ 0x00A3, 0x02B9, 0x00A4, 0x0000,
+ 0x009A, 0x02BA, 0x00AD, 0x0FFF,
+ 0x0092, 0x02BA, 0x00B6, 0x0FFE,
+ 0x0089, 0x02BA, 0x00C1, 0x0FFC,
+ 0x0081, 0x02B9, 0x00CB, 0x0FFB,
+ 0x0079, 0x02B8, 0x00D5, 0x0FFA,
+ 0x0071, 0x02B7, 0x00DF, 0x0FF9,
+ 0x0069, 0x02B5, 0x00EA, 0x0FF8,
+ 0x0062, 0x02B3, 0x00F4, 0x0FF7,
+ 0x005B, 0x02B0, 0x00FF, 0x0FF6,
+ 0x0054, 0x02AD, 0x010B, 0x0FF4,
+ 0x004D, 0x02A9, 0x0117, 0x0FF3,
+ 0x0046, 0x02A5, 0x0123, 0x0FF2,
+ 0x0040, 0x02A1, 0x012D, 0x0FF2,
+ 0x003A, 0x029C, 0x0139, 0x0FF1,
+ 0x0034, 0x0297, 0x0145, 0x0FF0,
+ 0x002F, 0x0292, 0x0150, 0x0FEF,
+ 0x0029, 0x028C, 0x015C, 0x0FEF,
+ 0x0024, 0x0285, 0x0169, 0x0FEE,
+ 0x001F, 0x027F, 0x0174, 0x0FEE,
+ 0x001B, 0x0278, 0x017F, 0x0FEE,
+ 0x0016, 0x0270, 0x018D, 0x0FED,
+ 0x0012, 0x0268, 0x0199, 0x0FED,
+ 0x000E, 0x0260, 0x01A4, 0x0FEE,
+ 0x000B, 0x0258, 0x01AF, 0x0FEE,
+ 0x0007, 0x024F, 0x01BC, 0x0FEE,
+ 0x0004, 0x0246, 0x01C7, 0x0FEF,
+ 0x0001, 0x023D, 0x01D3, 0x0FEF,
+ 0x0FFE, 0x0233, 0x01DF, 0x0FF0,
+ 0x0FFC, 0x0229, 0x01EA, 0x0FF1,
+ 0x0FFA, 0x021F, 0x01F4, 0x0FF3,
+ 0x0FF8, 0x0215, 0x01FF, 0x0FF4,
+ 0x0FF6, 0x020A, 0x020A, 0x0FF6,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.8_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.800000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_80[132] = {
+ 0x0075, 0x0315, 0x0076, 0x0000,
+ 0x006C, 0x0316, 0x007F, 0x0FFF,
+ 0x0062, 0x0316, 0x008A, 0x0FFE,
+ 0x0059, 0x0315, 0x0096, 0x0FFC,
+ 0x0050, 0x0314, 0x00A1, 0x0FFB,
+ 0x0048, 0x0312, 0x00AD, 0x0FF9,
+ 0x0040, 0x0310, 0x00B8, 0x0FF8,
+ 0x0038, 0x030D, 0x00C5, 0x0FF6,
+ 0x0030, 0x030A, 0x00D1, 0x0FF5,
+ 0x0029, 0x0306, 0x00DE, 0x0FF3,
+ 0x0022, 0x0301, 0x00EB, 0x0FF2,
+ 0x001C, 0x02FC, 0x00F8, 0x0FF0,
+ 0x0015, 0x02F7, 0x0106, 0x0FEE,
+ 0x0010, 0x02F1, 0x0112, 0x0FED,
+ 0x000A, 0x02EA, 0x0121, 0x0FEB,
+ 0x0005, 0x02E3, 0x012F, 0x0FE9,
+ 0x0000, 0x02DB, 0x013D, 0x0FE8,
+ 0x0FFB, 0x02D3, 0x014C, 0x0FE6,
+ 0x0FF7, 0x02CA, 0x015A, 0x0FE5,
+ 0x0FF3, 0x02C1, 0x0169, 0x0FE3,
+ 0x0FF0, 0x02B7, 0x0177, 0x0FE2,
+ 0x0FEC, 0x02AD, 0x0186, 0x0FE1,
+ 0x0FE9, 0x02A2, 0x0196, 0x0FDF,
+ 0x0FE7, 0x0297, 0x01A4, 0x0FDE,
+ 0x0FE4, 0x028C, 0x01B3, 0x0FDD,
+ 0x0FE2, 0x0280, 0x01C2, 0x0FDC,
+ 0x0FE0, 0x0274, 0x01D0, 0x0FDC,
+ 0x0FDF, 0x0268, 0x01DE, 0x0FDB,
+ 0x0FDD, 0x025B, 0x01EE, 0x0FDA,
+ 0x0FDC, 0x024E, 0x01FC, 0x0FDA,
+ 0x0FDB, 0x0241, 0x020A, 0x0FDA,
+ 0x0FDB, 0x0233, 0x0218, 0x0FDA,
+ 0x0FDA, 0x0226, 0x0226, 0x0FDA,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.9_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.900000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_90[132] = {
+ 0x003F, 0x0383, 0x003E, 0x0000,
+ 0x0034, 0x0383, 0x004A, 0x0FFF,
+ 0x002B, 0x0383, 0x0054, 0x0FFE,
+ 0x0021, 0x0381, 0x0061, 0x0FFD,
+ 0x0019, 0x037F, 0x006C, 0x0FFC,
+ 0x0010, 0x037C, 0x0079, 0x0FFB,
+ 0x0008, 0x0378, 0x0086, 0x0FFA,
+ 0x0001, 0x0374, 0x0093, 0x0FF8,
+ 0x0FFA, 0x036E, 0x00A1, 0x0FF7,
+ 0x0FF3, 0x0368, 0x00B0, 0x0FF5,
+ 0x0FED, 0x0361, 0x00BF, 0x0FF3,
+ 0x0FE8, 0x035A, 0x00CD, 0x0FF1,
+ 0x0FE2, 0x0352, 0x00DC, 0x0FF0,
+ 0x0FDE, 0x0349, 0x00EB, 0x0FEE,
+ 0x0FD9, 0x033F, 0x00FC, 0x0FEC,
+ 0x0FD5, 0x0335, 0x010D, 0x0FE9,
+ 0x0FD2, 0x032A, 0x011D, 0x0FE7,
+ 0x0FCF, 0x031E, 0x012E, 0x0FE5,
+ 0x0FCC, 0x0312, 0x013F, 0x0FE3,
+ 0x0FCA, 0x0305, 0x0150, 0x0FE1,
+ 0x0FC8, 0x02F8, 0x0162, 0x0FDE,
+ 0x0FC6, 0x02EA, 0x0174, 0x0FDC,
+ 0x0FC5, 0x02DC, 0x0185, 0x0FDA,
+ 0x0FC4, 0x02CD, 0x0197, 0x0FD8,
+ 0x0FC3, 0x02BE, 0x01AA, 0x0FD5,
+ 0x0FC3, 0x02AF, 0x01BB, 0x0FD3,
+ 0x0FC3, 0x029F, 0x01CD, 0x0FD1,
+ 0x0FC3, 0x028E, 0x01E0, 0x0FCF,
+ 0x0FC3, 0x027E, 0x01F2, 0x0FCD,
+ 0x0FC4, 0x026D, 0x0203, 0x0FCC,
+ 0x0FC5, 0x025C, 0x0215, 0x0FCA,
+ 0x0FC6, 0x024B, 0x0227, 0x0FC8,
+ 0x0FC7, 0x0239, 0x0239, 0x0FC7,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_1_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 1.000000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_1_00[132] = {
+ 0x0000, 0x0400, 0x0000, 0x0000,
+ 0x0FF6, 0x03FF, 0x000B, 0x0000,
+ 0x0FED, 0x03FE, 0x0015, 0x0000,
+ 0x0FE4, 0x03FB, 0x0022, 0x0FFF,
+ 0x0FDC, 0x03F7, 0x002E, 0x0FFF,
+ 0x0FD5, 0x03F2, 0x003B, 0x0FFE,
+ 0x0FCE, 0x03EC, 0x0048, 0x0FFE,
+ 0x0FC8, 0x03E5, 0x0056, 0x0FFD,
+ 0x0FC3, 0x03DC, 0x0065, 0x0FFC,
+ 0x0FBE, 0x03D3, 0x0075, 0x0FFA,
+ 0x0FB9, 0x03C9, 0x0085, 0x0FF9,
+ 0x0FB6, 0x03BE, 0x0094, 0x0FF8,
+ 0x0FB2, 0x03B2, 0x00A6, 0x0FF6,
+ 0x0FB0, 0x03A5, 0x00B7, 0x0FF4,
+ 0x0FAD, 0x0397, 0x00CA, 0x0FF2,
+ 0x0FAB, 0x0389, 0x00DC, 0x0FF0,
+ 0x0FAA, 0x0379, 0x00EF, 0x0FEE,
+ 0x0FA9, 0x0369, 0x0102, 0x0FEC,
+ 0x0FA9, 0x0359, 0x0115, 0x0FE9,
+ 0x0FA9, 0x0348, 0x0129, 0x0FE6,
+ 0x0FA9, 0x0336, 0x013D, 0x0FE4,
+ 0x0FA9, 0x0323, 0x0153, 0x0FE1,
+ 0x0FAA, 0x0310, 0x0168, 0x0FDE,
+ 0x0FAC, 0x02FD, 0x017C, 0x0FDB,
+ 0x0FAD, 0x02E9, 0x0192, 0x0FD8,
+ 0x0FAF, 0x02D5, 0x01A7, 0x0FD5,
+ 0x0FB1, 0x02C0, 0x01BD, 0x0FD2,
+ 0x0FB3, 0x02AC, 0x01D2, 0x0FCF,
+ 0x0FB5, 0x0296, 0x01E9, 0x0FCC,
+ 0x0FB8, 0x0281, 0x01FE, 0x0FC9,
+ 0x0FBA, 0x026C, 0x0214, 0x0FC6,
+ 0x0FBD, 0x0256, 0x022A, 0x0FC3,
+ 0x0FC0, 0x0240, 0x0240, 0x0FC0,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.3_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.300000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_30[198] = {
+ 0x004B, 0x0100, 0x0169, 0x0101, 0x004B, 0x0000,
+ 0x0049, 0x00FD, 0x0169, 0x0103, 0x004E, 0x0000,
+ 0x0047, 0x00FA, 0x0169, 0x0106, 0x0050, 0x0000,
+ 0x0045, 0x00F7, 0x0168, 0x0109, 0x0052, 0x0001,
+ 0x0043, 0x00F5, 0x0168, 0x010B, 0x0054, 0x0001,
+ 0x0040, 0x00F2, 0x0168, 0x010E, 0x0057, 0x0001,
+ 0x003E, 0x00EF, 0x0168, 0x0110, 0x0059, 0x0002,
+ 0x003C, 0x00EC, 0x0167, 0x0113, 0x005C, 0x0002,
+ 0x003A, 0x00E9, 0x0167, 0x0116, 0x005E, 0x0002,
+ 0x0038, 0x00E6, 0x0166, 0x0118, 0x0061, 0x0003,
+ 0x0036, 0x00E3, 0x0165, 0x011C, 0x0063, 0x0003,
+ 0x0034, 0x00E0, 0x0165, 0x011D, 0x0066, 0x0004,
+ 0x0033, 0x00DD, 0x0164, 0x0120, 0x0068, 0x0004,
+ 0x0031, 0x00DA, 0x0163, 0x0122, 0x006B, 0x0005,
+ 0x002F, 0x00D7, 0x0163, 0x0125, 0x006D, 0x0005,
+ 0x002D, 0x00D3, 0x0162, 0x0128, 0x0070, 0x0006,
+ 0x002B, 0x00D0, 0x0161, 0x012A, 0x0073, 0x0007,
+ 0x002A, 0x00CD, 0x0160, 0x012D, 0x0075, 0x0007,
+ 0x0028, 0x00CA, 0x015F, 0x012F, 0x0078, 0x0008,
+ 0x0026, 0x00C7, 0x015E, 0x0131, 0x007B, 0x0009,
+ 0x0025, 0x00C4, 0x015D, 0x0133, 0x007E, 0x0009,
+ 0x0023, 0x00C1, 0x015C, 0x0136, 0x0080, 0x000A,
+ 0x0022, 0x00BE, 0x015A, 0x0138, 0x0083, 0x000B,
+ 0x0020, 0x00BB, 0x0159, 0x013A, 0x0086, 0x000C,
+ 0x001F, 0x00B8, 0x0158, 0x013B, 0x0089, 0x000D,
+ 0x001E, 0x00B5, 0x0156, 0x013E, 0x008C, 0x000D,
+ 0x001C, 0x00B2, 0x0155, 0x0140, 0x008F, 0x000E,
+ 0x001B, 0x00AF, 0x0153, 0x0143, 0x0091, 0x000F,
+ 0x0019, 0x00AC, 0x0152, 0x0145, 0x0094, 0x0010,
+ 0x0018, 0x00A9, 0x0150, 0x0147, 0x0097, 0x0011,
+ 0x0017, 0x00A6, 0x014F, 0x0148, 0x009A, 0x0012,
+ 0x0016, 0x00A3, 0x014D, 0x0149, 0x009D, 0x0014,
+ 0x0015, 0x00A0, 0x014B, 0x014B, 0x00A0, 0x0015,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.4_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.400000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_40[198] = {
+ 0x0028, 0x0106, 0x01A3, 0x0107, 0x0028, 0x0000,
+ 0x0026, 0x0102, 0x01A3, 0x010A, 0x002B, 0x0000,
+ 0x0024, 0x00FE, 0x01A3, 0x010F, 0x002D, 0x0FFF,
+ 0x0022, 0x00FA, 0x01A3, 0x0113, 0x002F, 0x0FFF,
+ 0x0021, 0x00F6, 0x01A3, 0x0116, 0x0031, 0x0FFF,
+ 0x001F, 0x00F2, 0x01A2, 0x011B, 0x0034, 0x0FFE,
+ 0x001D, 0x00EE, 0x01A2, 0x011F, 0x0036, 0x0FFE,
+ 0x001B, 0x00EA, 0x01A1, 0x0123, 0x0039, 0x0FFE,
+ 0x0019, 0x00E6, 0x01A1, 0x0127, 0x003B, 0x0FFE,
+ 0x0018, 0x00E2, 0x01A0, 0x012A, 0x003E, 0x0FFE,
+ 0x0016, 0x00DE, 0x01A0, 0x012E, 0x0041, 0x0FFD,
+ 0x0015, 0x00DA, 0x019F, 0x0132, 0x0043, 0x0FFD,
+ 0x0013, 0x00D6, 0x019E, 0x0136, 0x0046, 0x0FFD,
+ 0x0012, 0x00D2, 0x019D, 0x0139, 0x0049, 0x0FFD,
+ 0x0010, 0x00CE, 0x019C, 0x013D, 0x004C, 0x0FFD,
+ 0x000F, 0x00CA, 0x019A, 0x0141, 0x004F, 0x0FFD,
+ 0x000E, 0x00C6, 0x0199, 0x0144, 0x0052, 0x0FFD,
+ 0x000D, 0x00C2, 0x0197, 0x0148, 0x0055, 0x0FFD,
+ 0x000B, 0x00BE, 0x0196, 0x014C, 0x0058, 0x0FFD,
+ 0x000A, 0x00BA, 0x0195, 0x014F, 0x005B, 0x0FFD,
+ 0x0009, 0x00B6, 0x0193, 0x0153, 0x005E, 0x0FFD,
+ 0x0008, 0x00B2, 0x0191, 0x0157, 0x0061, 0x0FFD,
+ 0x0007, 0x00AE, 0x0190, 0x015A, 0x0064, 0x0FFD,
+ 0x0006, 0x00AA, 0x018E, 0x015D, 0x0068, 0x0FFD,
+ 0x0005, 0x00A6, 0x018C, 0x0161, 0x006B, 0x0FFD,
+ 0x0005, 0x00A2, 0x0189, 0x0164, 0x006F, 0x0FFD,
+ 0x0004, 0x009E, 0x0187, 0x0167, 0x0072, 0x0FFE,
+ 0x0003, 0x009A, 0x0185, 0x016B, 0x0075, 0x0FFE,
+ 0x0002, 0x0096, 0x0183, 0x016E, 0x0079, 0x0FFE,
+ 0x0002, 0x0093, 0x0180, 0x016F, 0x007D, 0x0FFF,
+ 0x0001, 0x008F, 0x017E, 0x0173, 0x0080, 0x0FFF,
+ 0x0001, 0x008B, 0x017B, 0x0175, 0x0084, 0x0000,
+ 0x0000, 0x0087, 0x0179, 0x0179, 0x0087, 0x0000,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.5_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.500000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_50[198] = {
+ 0x0000, 0x0107, 0x01F3, 0x0106, 0x0000, 0x0000,
+ 0x0FFE, 0x0101, 0x01F3, 0x010D, 0x0002, 0x0FFF,
+ 0x0FFD, 0x00FB, 0x01F3, 0x0113, 0x0003, 0x0FFF,
+ 0x0FFC, 0x00F6, 0x01F3, 0x0118, 0x0005, 0x0FFE,
+ 0x0FFA, 0x00F0, 0x01F3, 0x011E, 0x0007, 0x0FFE,
+ 0x0FF9, 0x00EB, 0x01F2, 0x0124, 0x0009, 0x0FFD,
+ 0x0FF8, 0x00E5, 0x01F2, 0x0129, 0x000B, 0x0FFD,
+ 0x0FF7, 0x00E0, 0x01F1, 0x012F, 0x000D, 0x0FFC,
+ 0x0FF6, 0x00DA, 0x01F0, 0x0135, 0x0010, 0x0FFB,
+ 0x0FF5, 0x00D4, 0x01EF, 0x013B, 0x0012, 0x0FFB,
+ 0x0FF4, 0x00CF, 0x01EE, 0x0141, 0x0014, 0x0FFA,
+ 0x0FF3, 0x00C9, 0x01ED, 0x0147, 0x0017, 0x0FF9,
+ 0x0FF2, 0x00C4, 0x01EB, 0x014C, 0x001A, 0x0FF9,
+ 0x0FF1, 0x00BF, 0x01EA, 0x0152, 0x001C, 0x0FF8,
+ 0x0FF1, 0x00B9, 0x01E8, 0x0157, 0x001F, 0x0FF8,
+ 0x0FF0, 0x00B4, 0x01E6, 0x015D, 0x0022, 0x0FF7,
+ 0x0FF0, 0x00AE, 0x01E4, 0x0163, 0x0025, 0x0FF6,
+ 0x0FEF, 0x00A9, 0x01E2, 0x0168, 0x0028, 0x0FF6,
+ 0x0FEF, 0x00A4, 0x01DF, 0x016E, 0x002B, 0x0FF5,
+ 0x0FEF, 0x009F, 0x01DD, 0x0172, 0x002E, 0x0FF5,
+ 0x0FEE, 0x009A, 0x01DA, 0x0178, 0x0032, 0x0FF4,
+ 0x0FEE, 0x0094, 0x01D8, 0x017E, 0x0035, 0x0FF3,
+ 0x0FEE, 0x008F, 0x01D5, 0x0182, 0x0039, 0x0FF3,
+ 0x0FEE, 0x008A, 0x01D2, 0x0188, 0x003C, 0x0FF2,
+ 0x0FEE, 0x0085, 0x01CF, 0x018C, 0x0040, 0x0FF2,
+ 0x0FEE, 0x0081, 0x01CB, 0x0191, 0x0044, 0x0FF1,
+ 0x0FEE, 0x007C, 0x01C8, 0x0196, 0x0047, 0x0FF1,
+ 0x0FEE, 0x0077, 0x01C4, 0x019C, 0x004B, 0x0FF0,
+ 0x0FEE, 0x0072, 0x01C1, 0x01A0, 0x004F, 0x0FF0,
+ 0x0FEE, 0x006E, 0x01BD, 0x01A4, 0x0053, 0x0FF0,
+ 0x0FEE, 0x0069, 0x01B9, 0x01A9, 0x0058, 0x0FEF,
+ 0x0FEE, 0x0065, 0x01B5, 0x01AD, 0x005C, 0x0FEF,
+ 0x0FEF, 0x0060, 0x01B1, 0x01B1, 0x0060, 0x0FEF,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.6_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.600000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_60[198] = {
+ 0x0FD9, 0x00FB, 0x0258, 0x00FB, 0x0FD9, 0x0000,
+ 0x0FD9, 0x00F3, 0x0258, 0x0102, 0x0FDA, 0x0000,
+ 0x0FD8, 0x00EB, 0x0258, 0x010B, 0x0FDB, 0x0FFF,
+ 0x0FD8, 0x00E3, 0x0258, 0x0112, 0x0FDC, 0x0FFF,
+ 0x0FD8, 0x00DC, 0x0257, 0x011B, 0x0FDC, 0x0FFE,
+ 0x0FD7, 0x00D4, 0x0256, 0x0123, 0x0FDE, 0x0FFE,
+ 0x0FD7, 0x00CD, 0x0255, 0x012B, 0x0FDF, 0x0FFD,
+ 0x0FD7, 0x00C5, 0x0254, 0x0133, 0x0FE0, 0x0FFD,
+ 0x0FD7, 0x00BE, 0x0252, 0x013C, 0x0FE1, 0x0FFC,
+ 0x0FD7, 0x00B6, 0x0251, 0x0143, 0x0FE3, 0x0FFC,
+ 0x0FD8, 0x00AF, 0x024F, 0x014B, 0x0FE4, 0x0FFB,
+ 0x0FD8, 0x00A8, 0x024C, 0x0154, 0x0FE6, 0x0FFA,
+ 0x0FD8, 0x00A1, 0x024A, 0x015B, 0x0FE8, 0x0FFA,
+ 0x0FD9, 0x009A, 0x0247, 0x0163, 0x0FEA, 0x0FF9,
+ 0x0FD9, 0x0093, 0x0244, 0x016C, 0x0FEC, 0x0FF8,
+ 0x0FD9, 0x008C, 0x0241, 0x0174, 0x0FEF, 0x0FF7,
+ 0x0FDA, 0x0085, 0x023E, 0x017B, 0x0FF1, 0x0FF7,
+ 0x0FDB, 0x007F, 0x023A, 0x0183, 0x0FF3, 0x0FF6,
+ 0x0FDB, 0x0078, 0x0237, 0x018B, 0x0FF6, 0x0FF5,
+ 0x0FDC, 0x0072, 0x0233, 0x0192, 0x0FF9, 0x0FF4,
+ 0x0FDD, 0x006C, 0x022F, 0x0199, 0x0FFC, 0x0FF3,
+ 0x0FDD, 0x0065, 0x022A, 0x01A3, 0x0FFF, 0x0FF2,
+ 0x0FDE, 0x005F, 0x0226, 0x01AA, 0x0002, 0x0FF1,
+ 0x0FDF, 0x005A, 0x0221, 0x01B0, 0x0006, 0x0FF0,
+ 0x0FE0, 0x0054, 0x021C, 0x01B7, 0x0009, 0x0FF0,
+ 0x0FE1, 0x004E, 0x0217, 0x01BE, 0x000D, 0x0FEF,
+ 0x0FE2, 0x0048, 0x0212, 0x01C6, 0x0010, 0x0FEE,
+ 0x0FE3, 0x0043, 0x020C, 0x01CD, 0x0014, 0x0FED,
+ 0x0FE4, 0x003E, 0x0207, 0x01D3, 0x0018, 0x0FEC,
+ 0x0FE5, 0x0039, 0x0200, 0x01DA, 0x001D, 0x0FEB,
+ 0x0FE6, 0x0034, 0x01FA, 0x01E1, 0x0021, 0x0FEA,
+ 0x0FE7, 0x002F, 0x01F5, 0x01E7, 0x0025, 0x0FE9,
+ 0x0FE8, 0x002A, 0x01EE, 0x01EE, 0x002A, 0x0FE8,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.7_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.700000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_70[198] = {
+ 0x0FC0, 0x00DA, 0x02CC, 0x00DA, 0x0FC0, 0x0000,
+ 0x0FC1, 0x00D0, 0x02CC, 0x00E4, 0x0FBF, 0x0000,
+ 0x0FC2, 0x00C6, 0x02CB, 0x00EF, 0x0FBE, 0x0000,
+ 0x0FC3, 0x00BC, 0x02CA, 0x00F9, 0x0FBE, 0x0000,
+ 0x0FC4, 0x00B2, 0x02C9, 0x0104, 0x0FBD, 0x0000,
+ 0x0FC5, 0x00A8, 0x02C7, 0x010F, 0x0FBD, 0x0000,
+ 0x0FC7, 0x009F, 0x02C5, 0x0119, 0x0FBC, 0x0000,
+ 0x0FC8, 0x0095, 0x02C3, 0x0124, 0x0FBC, 0x0000,
+ 0x0FC9, 0x008C, 0x02C0, 0x012F, 0x0FBC, 0x0000,
+ 0x0FCB, 0x0083, 0x02BD, 0x0139, 0x0FBC, 0x0000,
+ 0x0FCC, 0x007A, 0x02BA, 0x0144, 0x0FBC, 0x0000,
+ 0x0FCE, 0x0072, 0x02B6, 0x014D, 0x0FBD, 0x0000,
+ 0x0FD0, 0x0069, 0x02B2, 0x0159, 0x0FBD, 0x0FFF,
+ 0x0FD1, 0x0061, 0x02AD, 0x0164, 0x0FBE, 0x0FFF,
+ 0x0FD3, 0x0059, 0x02A9, 0x016E, 0x0FBF, 0x0FFE,
+ 0x0FD4, 0x0051, 0x02A4, 0x017A, 0x0FBF, 0x0FFE,
+ 0x0FD6, 0x0049, 0x029E, 0x0184, 0x0FC1, 0x0FFE,
+ 0x0FD8, 0x0042, 0x0299, 0x018E, 0x0FC2, 0x0FFD,
+ 0x0FD9, 0x003A, 0x0293, 0x019B, 0x0FC3, 0x0FFC,
+ 0x0FDB, 0x0033, 0x028D, 0x01A4, 0x0FC5, 0x0FFC,
+ 0x0FDC, 0x002D, 0x0286, 0x01AF, 0x0FC7, 0x0FFB,
+ 0x0FDE, 0x0026, 0x0280, 0x01BA, 0x0FC8, 0x0FFA,
+ 0x0FE0, 0x001F, 0x0279, 0x01C4, 0x0FCB, 0x0FF9,
+ 0x0FE1, 0x0019, 0x0272, 0x01CE, 0x0FCD, 0x0FF9,
+ 0x0FE3, 0x0013, 0x026A, 0x01D9, 0x0FCF, 0x0FF8,
+ 0x0FE4, 0x000D, 0x0263, 0x01E3, 0x0FD2, 0x0FF7,
+ 0x0FE6, 0x0008, 0x025B, 0x01EC, 0x0FD5, 0x0FF6,
+ 0x0FE7, 0x0002, 0x0253, 0x01F7, 0x0FD8, 0x0FF5,
+ 0x0FE9, 0x0FFD, 0x024A, 0x0202, 0x0FDB, 0x0FF3,
+ 0x0FEA, 0x0FF8, 0x0242, 0x020B, 0x0FDF, 0x0FF2,
+ 0x0FEC, 0x0FF3, 0x0239, 0x0215, 0x0FE2, 0x0FF1,
+ 0x0FED, 0x0FEF, 0x0230, 0x021E, 0x0FE6, 0x0FF0,
+ 0x0FEF, 0x0FEB, 0x0226, 0x0226, 0x0FEB, 0x0FEF,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.8_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.800000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_80[198] = {
+ 0x0FBF, 0x00A1, 0x0340, 0x00A1, 0x0FBF, 0x0000,
+ 0x0FC1, 0x0095, 0x0340, 0x00AD, 0x0FBC, 0x0001,
+ 0x0FC4, 0x0089, 0x033E, 0x00BA, 0x0FBA, 0x0001,
+ 0x0FC6, 0x007D, 0x033D, 0x00C6, 0x0FB8, 0x0002,
+ 0x0FC9, 0x0072, 0x033A, 0x00D3, 0x0FB6, 0x0002,
+ 0x0FCC, 0x0067, 0x0338, 0x00DF, 0x0FB3, 0x0003,
+ 0x0FCE, 0x005C, 0x0334, 0x00EE, 0x0FB1, 0x0003,
+ 0x0FD1, 0x0051, 0x0331, 0x00FA, 0x0FAF, 0x0004,
+ 0x0FD3, 0x0047, 0x032D, 0x0108, 0x0FAD, 0x0004,
+ 0x0FD6, 0x003D, 0x0328, 0x0116, 0x0FAB, 0x0004,
+ 0x0FD8, 0x0033, 0x0323, 0x0123, 0x0FAA, 0x0005,
+ 0x0FDB, 0x002A, 0x031D, 0x0131, 0x0FA8, 0x0005,
+ 0x0FDD, 0x0021, 0x0317, 0x013F, 0x0FA7, 0x0005,
+ 0x0FDF, 0x0018, 0x0311, 0x014D, 0x0FA5, 0x0006,
+ 0x0FE2, 0x0010, 0x030A, 0x015A, 0x0FA4, 0x0006,
+ 0x0FE4, 0x0008, 0x0302, 0x0169, 0x0FA3, 0x0006,
+ 0x0FE6, 0x0000, 0x02FB, 0x0177, 0x0FA2, 0x0006,
+ 0x0FE8, 0x0FF9, 0x02F3, 0x0185, 0x0FA1, 0x0006,
+ 0x0FEB, 0x0FF1, 0x02EA, 0x0193, 0x0FA1, 0x0006,
+ 0x0FED, 0x0FEB, 0x02E1, 0x01A1, 0x0FA0, 0x0006,
+ 0x0FEE, 0x0FE4, 0x02D8, 0x01B0, 0x0FA0, 0x0006,
+ 0x0FF0, 0x0FDE, 0x02CE, 0x01BE, 0x0FA0, 0x0006,
+ 0x0FF2, 0x0FD8, 0x02C5, 0x01CB, 0x0FA0, 0x0006,
+ 0x0FF4, 0x0FD3, 0x02BA, 0x01D8, 0x0FA1, 0x0006,
+ 0x0FF6, 0x0FCD, 0x02B0, 0x01E7, 0x0FA1, 0x0005,
+ 0x0FF7, 0x0FC8, 0x02A5, 0x01F5, 0x0FA2, 0x0005,
+ 0x0FF9, 0x0FC4, 0x029A, 0x0202, 0x0FA3, 0x0004,
+ 0x0FFA, 0x0FC0, 0x028E, 0x0210, 0x0FA4, 0x0004,
+ 0x0FFB, 0x0FBC, 0x0283, 0x021D, 0x0FA6, 0x0003,
+ 0x0FFD, 0x0FB8, 0x0276, 0x022A, 0x0FA8, 0x0003,
+ 0x0FFE, 0x0FB4, 0x026B, 0x0237, 0x0FAA, 0x0002,
+ 0x0FFF, 0x0FB1, 0x025E, 0x0245, 0x0FAC, 0x0001,
+ 0x0000, 0x0FAE, 0x0252, 0x0252, 0x0FAE, 0x0000,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.9_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.900000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_90[198] = {
+ 0x0FD8, 0x0055, 0x03A7, 0x0054, 0x0FD8, 0x0000,
+ 0x0FDB, 0x0047, 0x03A7, 0x0063, 0x0FD4, 0x0000,
+ 0x0FDF, 0x003B, 0x03A5, 0x006F, 0x0FD1, 0x0001,
+ 0x0FE2, 0x002E, 0x03A3, 0x007E, 0x0FCD, 0x0002,
+ 0x0FE5, 0x0022, 0x03A0, 0x008D, 0x0FCA, 0x0002,
+ 0x0FE8, 0x0017, 0x039D, 0x009B, 0x0FC6, 0x0003,
+ 0x0FEB, 0x000C, 0x0398, 0x00AC, 0x0FC2, 0x0003,
+ 0x0FEE, 0x0001, 0x0394, 0x00BA, 0x0FBF, 0x0004,
+ 0x0FF1, 0x0FF7, 0x038E, 0x00CA, 0x0FBB, 0x0005,
+ 0x0FF4, 0x0FED, 0x0388, 0x00DA, 0x0FB8, 0x0005,
+ 0x0FF6, 0x0FE4, 0x0381, 0x00EB, 0x0FB4, 0x0006,
+ 0x0FF9, 0x0FDB, 0x037A, 0x00FA, 0x0FB1, 0x0007,
+ 0x0FFB, 0x0FD3, 0x0372, 0x010B, 0x0FAD, 0x0008,
+ 0x0FFD, 0x0FCB, 0x0369, 0x011D, 0x0FAA, 0x0008,
+ 0x0000, 0x0FC3, 0x0360, 0x012E, 0x0FA6, 0x0009,
+ 0x0002, 0x0FBC, 0x0356, 0x013F, 0x0FA3, 0x000A,
+ 0x0003, 0x0FB6, 0x034C, 0x0150, 0x0FA0, 0x000B,
+ 0x0005, 0x0FB0, 0x0341, 0x0162, 0x0F9D, 0x000B,
+ 0x0007, 0x0FAA, 0x0336, 0x0173, 0x0F9A, 0x000C,
+ 0x0008, 0x0FA5, 0x032A, 0x0185, 0x0F97, 0x000D,
+ 0x000A, 0x0FA0, 0x031E, 0x0197, 0x0F94, 0x000D,
+ 0x000B, 0x0F9B, 0x0311, 0x01A9, 0x0F92, 0x000E,
+ 0x000C, 0x0F97, 0x0303, 0x01BC, 0x0F8F, 0x000F,
+ 0x000D, 0x0F94, 0x02F6, 0x01CD, 0x0F8D, 0x000F,
+ 0x000E, 0x0F91, 0x02E8, 0x01DE, 0x0F8B, 0x0010,
+ 0x000F, 0x0F8E, 0x02D9, 0x01F1, 0x0F89, 0x0010,
+ 0x0010, 0x0F8B, 0x02CA, 0x0202, 0x0F88, 0x0011,
+ 0x0010, 0x0F89, 0x02BB, 0x0214, 0x0F87, 0x0011,
+ 0x0011, 0x0F87, 0x02AB, 0x0226, 0x0F86, 0x0011,
+ 0x0011, 0x0F86, 0x029C, 0x0236, 0x0F85, 0x0012,
+ 0x0011, 0x0F85, 0x028B, 0x0249, 0x0F84, 0x0012,
+ 0x0012, 0x0F84, 0x027B, 0x0259, 0x0F84, 0x0012,
+ 0x0012, 0x0F84, 0x026A, 0x026A, 0x0F84, 0x0012,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_1_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 1.000000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_1_00[198] = {
+ 0x0000, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000,
+ 0x0003, 0x0FF3, 0x0400, 0x000D, 0x0FFD, 0x0000,
+ 0x0006, 0x0FE7, 0x03FE, 0x001C, 0x0FF9, 0x0000,
+ 0x0009, 0x0FDB, 0x03FC, 0x002B, 0x0FF5, 0x0000,
+ 0x000C, 0x0FD0, 0x03F9, 0x003A, 0x0FF1, 0x0000,
+ 0x000E, 0x0FC5, 0x03F5, 0x004A, 0x0FED, 0x0001,
+ 0x0011, 0x0FBB, 0x03F0, 0x005A, 0x0FE9, 0x0001,
+ 0x0013, 0x0FB2, 0x03EB, 0x006A, 0x0FE5, 0x0001,
+ 0x0015, 0x0FA9, 0x03E4, 0x007B, 0x0FE1, 0x0002,
+ 0x0017, 0x0FA1, 0x03DD, 0x008D, 0x0FDC, 0x0002,
+ 0x0018, 0x0F99, 0x03D4, 0x00A0, 0x0FD8, 0x0003,
+ 0x001A, 0x0F92, 0x03CB, 0x00B2, 0x0FD3, 0x0004,
+ 0x001B, 0x0F8C, 0x03C1, 0x00C6, 0x0FCE, 0x0004,
+ 0x001C, 0x0F86, 0x03B7, 0x00D9, 0x0FC9, 0x0005,
+ 0x001D, 0x0F80, 0x03AB, 0x00EE, 0x0FC4, 0x0006,
+ 0x001E, 0x0F7C, 0x039F, 0x0101, 0x0FBF, 0x0007,
+ 0x001F, 0x0F78, 0x0392, 0x0115, 0x0FBA, 0x0008,
+ 0x001F, 0x0F74, 0x0385, 0x012B, 0x0FB5, 0x0008,
+ 0x0020, 0x0F71, 0x0376, 0x0140, 0x0FB0, 0x0009,
+ 0x0020, 0x0F6E, 0x0367, 0x0155, 0x0FAB, 0x000B,
+ 0x0020, 0x0F6C, 0x0357, 0x016B, 0x0FA6, 0x000C,
+ 0x0020, 0x0F6A, 0x0347, 0x0180, 0x0FA2, 0x000D,
+ 0x0020, 0x0F69, 0x0336, 0x0196, 0x0F9D, 0x000E,
+ 0x0020, 0x0F69, 0x0325, 0x01AB, 0x0F98, 0x000F,
+ 0x001F, 0x0F68, 0x0313, 0x01C3, 0x0F93, 0x0010,
+ 0x001F, 0x0F69, 0x0300, 0x01D8, 0x0F8F, 0x0011,
+ 0x001E, 0x0F69, 0x02ED, 0x01EF, 0x0F8B, 0x0012,
+ 0x001D, 0x0F6A, 0x02D9, 0x0205, 0x0F87, 0x0014,
+ 0x001D, 0x0F6C, 0x02C5, 0x021A, 0x0F83, 0x0015,
+ 0x001C, 0x0F6E, 0x02B1, 0x0230, 0x0F7F, 0x0016,
+ 0x001B, 0x0F70, 0x029C, 0x0247, 0x0F7B, 0x0017,
+ 0x001A, 0x0F72, 0x0287, 0x025D, 0x0F78, 0x0018,
+ 0x0019, 0x0F75, 0x0272, 0x0272, 0x0F75, 0x0019,
+};
+
+/* Converted scaler coeff tables from S1.10 to S1.12 */
+static uint16_t easf_filter_3tap_64p_ratio_0_30_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_40_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_50_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_60_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_70_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_80_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_90_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_1_00_s1_12[99];
+static uint16_t easf_filter_4tap_64p_ratio_0_30_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_40_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_50_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_60_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_70_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_80_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_90_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_1_00_s1_12[132];
+static uint16_t easf_filter_6tap_64p_ratio_0_30_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_40_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_50_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_60_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_70_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_80_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_90_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_1_00_s1_12[198];
+
+struct scale_ratio_to_reg_value_lookup easf_v_bf3_mode_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x0000},
+ {1, 1, 0x0000},
+ {-1, -1, 0x0002},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_h_bf3_mode_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x0000},
+ {1, 1, 0x0000},
+ {-1, -1, 0x0002},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_6tap_lookup[] = {
+ {3, 10, 0x4100},
+ {4, 10, 0x4100},
+ {5, 10, 0x4100},
+ {6, 10, 0x4100},
+ {7, 10, 0x4100},
+ {8, 10, 0x4100},
+ {9, 10, 0x4100},
+ {1, 1, 0x4100},
+ {-1, -1, 0x4100},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_6tap_lookup[] = {
+ {3, 10, 0x4000},
+ {4, 10, 0x4000},
+ {5, 10, 0x4000},
+ {6, 10, 0x4000},
+ {7, 10, 0x4000},
+ {8, 10, 0x4000},
+ {9, 10, 0x4000},
+ {1, 1, 0x4000},
+ {-1, -1, 0x4000},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_gain_ring6_6tap_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x251F},
+ {5, 10, 0x291F},
+ {6, 10, 0xA51F},
+ {7, 10, 0xA51F},
+ {8, 10, 0xAA66},
+ {9, 10, 0xA51F},
+ {1, 1, 0xA640},
+ {-1, -1, 0xA640},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_gain_ring4_6tap_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x9600},
+ {5, 10, 0xA460},
+ {6, 10, 0xA8E0},
+ {7, 10, 0xAC00},
+ {8, 10, 0xAD20},
+ {9, 10, 0xAFC0},
+ {1, 1, 0xB058},
+ {-1, -1, 0xB058},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_4tap_lookup[] = {
+ {3, 10, 0x4100},
+ {4, 10, 0x4100},
+ {5, 10, 0x4100},
+ {6, 10, 0x4100},
+ {7, 10, 0x4100},
+ {8, 10, 0x4100},
+ {9, 10, 0x4100},
+ {1, 1, 0x4100},
+ {-1, -1, 0x4100},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_4tap_lookup[] = {
+ {3, 10, 0x4000},
+ {4, 10, 0x4000},
+ {5, 10, 0x4000},
+ {6, 10, 0x4000},
+ {7, 10, 0x4000},
+ {8, 10, 0x4000},
+ {9, 10, 0x4000},
+ {1, 1, 0x4000},
+ {-1, -1, 0x4000},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_gain_ring6_4tap_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x0000},
+ {1, 1, 0x0000},
+ {-1, -1, 0x0000},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_gain_ring4_4tap_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x9900},
+ {7, 10, 0xA100},
+ {8, 10, 0xA8C0},
+ {9, 10, 0xAB20},
+ {1, 1, 0xAC00},
+ {-1, -1, 0xAC00},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_uptilt_offset_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x4100},
+ {9, 10, 0x9F00},
+ {1, 1, 0xA4C0},
+ {-1, -1, 0xA8D8},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt_maxval_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x4000},
+ {9, 10, 0x24FE},
+ {1, 1, 0x2D64},
+ {-1, -1, 0x3ADB},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_slope_lookup[] = {
+ {3, 10, 0x3800},
+ {4, 10, 0x3800},
+ {5, 10, 0x3800},
+ {6, 10, 0x3800},
+ {7, 10, 0x3800},
+ {8, 10, 0x3886},
+ {9, 10, 0x3940},
+ {1, 1, 0x3A4E},
+ {-1, -1, 0x3B66},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt1_slope_lookup[] = {
+ {3, 10, 0x3800},
+ {4, 10, 0x3800},
+ {5, 10, 0x3800},
+ {6, 10, 0x3800},
+ {7, 10, 0x3800},
+ {8, 10, 0x36F4},
+ {9, 10, 0x359C},
+ {1, 1, 0x3360},
+ {-1, -1, 0x2F20},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_slope_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x359C},
+ {1, 1, 0x31F0},
+ {-1, -1, 0x1F00},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_offset_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x9F00},
+ {1, 1, 0xA400},
+ {-1, -1, 0x9E00},
+};
+
+void spl_init_easf_filter_coeffs(void)
+{
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_30,
+ easf_filter_3tap_64p_ratio_0_30_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_40,
+ easf_filter_3tap_64p_ratio_0_40_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_50,
+ easf_filter_3tap_64p_ratio_0_50_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_60,
+ easf_filter_3tap_64p_ratio_0_60_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_70,
+ easf_filter_3tap_64p_ratio_0_70_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_80,
+ easf_filter_3tap_64p_ratio_0_80_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_90,
+ easf_filter_3tap_64p_ratio_0_90_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_1_00,
+ easf_filter_3tap_64p_ratio_1_00_s1_12, 3);
+
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_30,
+ easf_filter_4tap_64p_ratio_0_30_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_40,
+ easf_filter_4tap_64p_ratio_0_40_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_50,
+ easf_filter_4tap_64p_ratio_0_50_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_60,
+ easf_filter_4tap_64p_ratio_0_60_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_70,
+ easf_filter_4tap_64p_ratio_0_70_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_80,
+ easf_filter_4tap_64p_ratio_0_80_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_90,
+ easf_filter_4tap_64p_ratio_0_90_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_1_00,
+ easf_filter_4tap_64p_ratio_1_00_s1_12, 4);
+
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_30,
+ easf_filter_6tap_64p_ratio_0_30_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_40,
+ easf_filter_6tap_64p_ratio_0_40_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_50,
+ easf_filter_6tap_64p_ratio_0_50_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_60,
+ easf_filter_6tap_64p_ratio_0_60_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_70,
+ easf_filter_6tap_64p_ratio_0_70_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_80,
+ easf_filter_6tap_64p_ratio_0_80_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_90,
+ easf_filter_6tap_64p_ratio_0_90_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_1_00,
+ easf_filter_6tap_64p_ratio_1_00_s1_12, 6);
+}
+
+uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_3tap_64p_ratio_0_30_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_3tap_64p_ratio_0_40_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_3tap_64p_ratio_0_50_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_3tap_64p_ratio_0_60_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_3tap_64p_ratio_0_70_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_3tap_64p_ratio_0_80_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_3tap_64p_ratio_0_90_s1_12;
+ else
+ return easf_filter_3tap_64p_ratio_1_00_s1_12;
+}
+
+uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_4tap_64p_ratio_0_30_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_4tap_64p_ratio_0_40_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_4tap_64p_ratio_0_50_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_4tap_64p_ratio_0_60_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_4tap_64p_ratio_0_70_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_4tap_64p_ratio_0_80_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_4tap_64p_ratio_0_90_s1_12;
+ else
+ return easf_filter_4tap_64p_ratio_1_00_s1_12;
+}
+
+uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_6tap_64p_ratio_0_30_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_6tap_64p_ratio_0_40_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_6tap_64p_ratio_0_50_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_6tap_64p_ratio_0_60_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_6tap_64p_ratio_0_70_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_6tap_64p_ratio_0_80_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_6tap_64p_ratio_0_90_s1_12;
+ else
+ return easf_filter_6tap_64p_ratio_1_00_s1_12;
+}
+
+uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio)
+{
+ if (taps == 6)
+ return spl_get_easf_filter_6tap_64p(ratio);
+ else if (taps == 4)
+ return spl_get_easf_filter_4tap_64p(ratio);
+ else if (taps == 3)
+ return spl_get_easf_filter_3tap_64p(ratio);
+ else {
+ /* should never happen, bug */
+ SPL_BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data, bool enable_easf_v,
+ bool enable_easf_h)
+{
+ /*
+ * Old coefficients calculated scaling ratio = input / output
+ * New coefficients are calculated based on = output / input
+ */
+ if (enable_easf_h) {
+ dscl_prog_data->filter_h = spl_dscl_get_easf_filter_coeffs_64p(
+ data->taps.h_taps, data->recip_ratios.horz);
+
+ dscl_prog_data->filter_h_c = spl_dscl_get_easf_filter_coeffs_64p(
+ data->taps.h_taps_c, data->recip_ratios.horz_c);
+ } else {
+ dscl_prog_data->filter_h = spl_dscl_get_filter_coeffs_64p(
+ data->taps.h_taps, data->ratios.horz);
+
+ dscl_prog_data->filter_h_c = spl_dscl_get_filter_coeffs_64p(
+ data->taps.h_taps_c, data->ratios.horz_c);
+ }
+ if (enable_easf_v) {
+ dscl_prog_data->filter_v = spl_dscl_get_easf_filter_coeffs_64p(
+ data->taps.v_taps, data->recip_ratios.vert);
+
+ dscl_prog_data->filter_v_c = spl_dscl_get_easf_filter_coeffs_64p(
+ data->taps.v_taps_c, data->recip_ratios.vert_c);
+ } else {
+ dscl_prog_data->filter_v = spl_dscl_get_filter_coeffs_64p(
+ data->taps.v_taps, data->ratios.vert);
+
+ dscl_prog_data->filter_v_c = spl_dscl_get_filter_coeffs_64p(
+ data->taps.v_taps_c, data->ratios.vert_c);
+ }
+}
+
+static uint32_t spl_easf_get_scale_ratio_to_reg_value(struct spl_fixed31_32 ratio,
+ struct scale_ratio_to_reg_value_lookup *lookup_table_base_ptr,
+ unsigned int num_entries)
+{
+ unsigned int count = 0;
+ uint32_t value = 0;
+ struct scale_ratio_to_reg_value_lookup *lookup_table_index_ptr;
+
+ lookup_table_index_ptr = (lookup_table_base_ptr + num_entries - 1);
+ value = lookup_table_index_ptr->reg_value;
+
+ while (count < num_entries) {
+
+ lookup_table_index_ptr = (lookup_table_base_ptr + count);
+ if (lookup_table_index_ptr->numer < 0)
+ break;
+
+ if (ratio.value < spl_fixpt_from_fraction(
+ lookup_table_index_ptr->numer,
+ lookup_table_index_ptr->denom).value) {
+ value = lookup_table_index_ptr->reg_value;
+ break;
+ }
+
+ count++;
+ }
+ return value;
+}
+uint32_t spl_get_v_bf3_mode(struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries = sizeof(easf_v_bf3_mode_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_v_bf3_mode_lookup, num_entries);
+ return value;
+}
+uint32_t spl_get_h_bf3_mode(struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries = sizeof(easf_h_bf3_mode_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_h_bf3_mode_lookup, num_entries);
+ return value;
+}
+uint32_t spl_get_reducer_gain6(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 4) {
+ num_entries = sizeof(easf_reducer_gain6_4tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_reducer_gain6_4tap_lookup, num_entries);
+ } else if (taps == 6) {
+ num_entries = sizeof(easf_reducer_gain6_6tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_reducer_gain6_6tap_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_reducer_gain4(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 4) {
+ num_entries = sizeof(easf_reducer_gain4_4tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_reducer_gain4_4tap_lookup, num_entries);
+ } else if (taps == 6) {
+ num_entries = sizeof(easf_reducer_gain4_6tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_reducer_gain4_6tap_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_gainRing6(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 4) {
+ num_entries = sizeof(easf_gain_ring6_4tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_gain_ring6_4tap_lookup, num_entries);
+ } else if (taps == 6) {
+ num_entries = sizeof(easf_gain_ring6_6tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_gain_ring6_6tap_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_gainRing4(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 4) {
+ num_entries = sizeof(easf_gain_ring4_4tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_gain_ring4_4tap_lookup, num_entries);
+ } else if (taps == 6) {
+ num_entries = sizeof(easf_gain_ring4_6tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_gain_ring4_6tap_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_dntilt_uptilt_offset(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_dntilt_uptilt_offset_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_dntilt_uptilt_offset_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_uptilt_maxval(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_uptilt_maxval_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_uptilt_maxval_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_dntilt_slope(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_dntilt_slope_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_dntilt_slope_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_uptilt1_slope(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_uptilt1_slope_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_uptilt1_slope_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_uptilt2_slope(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_uptilt2_slope_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_uptilt2_slope_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_uptilt2_offset(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_uptilt2_offset_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_uptilt2_offset_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h
new file mode 100644
index 000000000000..8bb2b8108e38
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef __DC_SPL_SCL_EASF_FILTERS_H__
+#define __DC_SPL_SCL_EASF_FILTERS_H__
+
+#include "dc_spl_types.h"
+
+struct scale_ratio_to_reg_value_lookup {
+ int numer;
+ int denom;
+ const uint32_t reg_value;
+};
+
+void spl_init_easf_filter_coeffs(void);
+uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio);
+uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio);
+uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio);
+uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
+void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data, bool enable_easf_v,
+ bool enable_easf_h);
+
+uint32_t spl_get_v_bf3_mode(struct spl_fixed31_32 ratio);
+uint32_t spl_get_h_bf3_mode(struct spl_fixed31_32 ratio);
+uint32_t spl_get_reducer_gain6(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_reducer_gain4(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_gainRing6(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_gainRing4(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_dntilt_uptilt_offset(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_uptilt_maxval(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_dntilt_slope(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_uptilt1_slope(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_uptilt2_slope(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_uptilt2_offset(int taps, struct spl_fixed31_32 ratio);
+
+#endif /* __DC_SPL_SCL_EASF_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c
index c174b2e8a150..b02c7b0b262b 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c
@@ -2,7 +2,7 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#include "dc_spl_types.h"
+#include "spl_debug.h"
#include "dc_spl_scl_filters.h"
//=========================================
// <num_taps> = 2
@@ -1318,97 +1318,97 @@ static const uint16_t filter_8tap_64p_183[264] = {
0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4
};
-const uint16_t *spl_get_filter_3tap_16p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_3tap_16p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_3tap_16p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_3tap_16p_149;
else
return filter_3tap_16p_183;
}
-const uint16_t *spl_get_filter_3tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_3tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_3tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_3tap_64p_149;
else
return filter_3tap_64p_183;
}
-const uint16_t *spl_get_filter_4tap_16p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_4tap_16p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_4tap_16p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_4tap_16p_149;
else
return filter_4tap_16p_183;
}
-const uint16_t *spl_get_filter_4tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_4tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_4tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_4tap_64p_149;
else
return filter_4tap_64p_183;
}
-const uint16_t *spl_get_filter_5tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_5tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_5tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_5tap_64p_149;
else
return filter_5tap_64p_183;
}
-const uint16_t *spl_get_filter_6tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_6tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_6tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_6tap_64p_149;
else
return filter_6tap_64p_183;
}
-const uint16_t *spl_get_filter_7tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_7tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_7tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_7tap_64p_149;
else
return filter_7tap_64p_183;
}
-const uint16_t *spl_get_filter_8tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_8tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_8tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_8tap_64p_149;
else
return filter_8tap_64p_183;
@@ -1423,3 +1423,29 @@ const uint16_t *spl_get_filter_2tap_64p(void)
{
return filter_2tap_64p;
}
+
+const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio)
+{
+ if (taps == 8)
+ return spl_get_filter_8tap_64p(ratio);
+ else if (taps == 7)
+ return spl_get_filter_7tap_64p(ratio);
+ else if (taps == 6)
+ return spl_get_filter_6tap_64p(ratio);
+ else if (taps == 5)
+ return spl_get_filter_5tap_64p(ratio);
+ else if (taps == 4)
+ return spl_get_filter_4tap_64p(ratio);
+ else if (taps == 3)
+ return spl_get_filter_3tap_64p(ratio);
+ else if (taps == 2)
+ return spl_get_filter_2tap_64p();
+ else if (taps == 1)
+ return NULL;
+ else {
+ /* should never happen, bug */
+ SPL_BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h
index 6d96aca53b24..48202bc4f81e 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h
@@ -7,53 +7,16 @@
#include "dc_spl_types.h"
-const uint16_t *spl_get_filter_3tap_16p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_3tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_4tap_16p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_4tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_5tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_6tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_7tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_8tap_64p(struct fixed31_32 ratio);
+const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio);
const uint16_t *spl_get_filter_2tap_16p(void);
const uint16_t *spl_get_filter_2tap_64p(void);
-const uint16_t *spl_get_filter_3tap_16p_upscale(void);
-const uint16_t *spl_get_filter_3tap_16p_116(void);
-const uint16_t *spl_get_filter_3tap_16p_149(void);
-const uint16_t *spl_get_filter_3tap_16p_183(void);
+const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_4tap_16p_upscale(void);
-const uint16_t *spl_get_filter_4tap_16p_116(void);
-const uint16_t *spl_get_filter_4tap_16p_149(void);
-const uint16_t *spl_get_filter_4tap_16p_183(void);
-
-const uint16_t *spl_get_filter_3tap_64p_upscale(void);
-const uint16_t *spl_get_filter_3tap_64p_116(void);
-const uint16_t *spl_get_filter_3tap_64p_149(void);
-const uint16_t *spl_get_filter_3tap_64p_183(void);
-
-const uint16_t *spl_get_filter_4tap_64p_upscale(void);
-const uint16_t *spl_get_filter_4tap_64p_116(void);
-const uint16_t *spl_get_filter_4tap_64p_149(void);
-const uint16_t *spl_get_filter_4tap_64p_183(void);
-
-const uint16_t *spl_get_filter_5tap_64p_upscale(void);
-const uint16_t *spl_get_filter_5tap_64p_116(void);
-const uint16_t *spl_get_filter_5tap_64p_149(void);
-const uint16_t *spl_get_filter_5tap_64p_183(void);
-
-const uint16_t *spl_get_filter_6tap_64p_upscale(void);
-const uint16_t *spl_get_filter_6tap_64p_116(void);
-const uint16_t *spl_get_filter_6tap_64p_149(void);
-const uint16_t *spl_get_filter_6tap_64p_183(void);
-
-const uint16_t *spl_get_filter_7tap_64p_upscale(void);
-const uint16_t *spl_get_filter_7tap_64p_116(void);
-const uint16_t *spl_get_filter_7tap_64p_149(void);
-const uint16_t *spl_get_filter_7tap_64p_183(void);
-
-const uint16_t *spl_get_filter_8tap_64p_upscale(void);
-const uint16_t *spl_get_filter_8tap_64p_116(void);
-const uint16_t *spl_get_filter_8tap_64p_149(void);
-const uint16_t *spl_get_filter_8tap_64p_183(void);
#endif /* __DC_SPL_SCL_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
index 201201d3f55b..85b19ebe2c57 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
@@ -2,30 +2,15 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#include "os_types.h"
-#include "dc_hw_types.h"
-#ifndef ASSERT
-#define ASSERT(_bool) (void *)0
-#endif
-#include "include/fixed31_32.h" // fixed31_32 and related functions
#ifndef __DC_SPL_TYPES_H__
#define __DC_SPL_TYPES_H__
-enum lb_memory_config {
- /* Enable all 3 pieces of memory */
- LB_MEMORY_CONFIG_0 = 0,
-
- /* Enable only the first piece of memory */
- LB_MEMORY_CONFIG_1 = 1,
-
- /* Enable only the second piece of memory */
- LB_MEMORY_CONFIG_2 = 2,
-
- /* Only applicable in 4:2:0 mode, enable all 3 pieces of memory and the
- * last piece of chroma memory used for the luma storage
- */
- LB_MEMORY_CONFIG_3 = 3
-};
+#include "spl_os_types.h" // swap
+#ifndef SPL_ASSERT
+#define SPL_ASSERT(_bool) ((void *)0)
+#endif
+#include "spl_fixpt31_32.h" // fixed31_32 and related functions
+#include "spl_custom_float.h" // custom float and related functions
struct spl_size {
uint32_t width;
@@ -39,16 +24,16 @@ struct spl_rect {
};
struct spl_ratios {
- struct fixed31_32 horz;
- struct fixed31_32 vert;
- struct fixed31_32 horz_c;
- struct fixed31_32 vert_c;
+ struct spl_fixed31_32 horz;
+ struct spl_fixed31_32 vert;
+ struct spl_fixed31_32 horz_c;
+ struct spl_fixed31_32 vert_c;
};
struct spl_inits {
- struct fixed31_32 h;
- struct fixed31_32 h_c;
- struct fixed31_32 v;
- struct fixed31_32 v_c;
+ struct spl_fixed31_32 h;
+ struct spl_fixed31_32 h_c;
+ struct spl_fixed31_32 v;
+ struct spl_fixed31_32 v_c;
};
struct spl_taps {
@@ -81,6 +66,8 @@ enum spl_pixel_format {
SPL_PIXEL_FORMAT_420BPP10,
/*end of pixel format definition*/
SPL_PIXEL_FORMAT_INVALID,
+ SPL_PIXEL_FORMAT_422BPP8,
+ SPL_PIXEL_FORMAT_422BPP10,
SPL_PIXEL_FORMAT_GRPH_BEGIN = SPL_PIXEL_FORMAT_INDEX8,
SPL_PIXEL_FORMAT_GRPH_END = SPL_PIXEL_FORMAT_FP16,
SPL_PIXEL_FORMAT_VIDEO_BEGIN = SPL_PIXEL_FORMAT_420BPP8,
@@ -88,6 +75,22 @@ enum spl_pixel_format {
SPL_PIXEL_FORMAT_UNKNOWN
};
+enum lb_memory_config {
+ /* Enable all 3 pieces of memory */
+ LB_MEMORY_CONFIG_0 = 0,
+
+ /* Enable only the first piece of memory */
+ LB_MEMORY_CONFIG_1 = 1,
+
+ /* Enable only the second piece of memory */
+ LB_MEMORY_CONFIG_2 = 2,
+
+ /* Only applicable in 4:2:0 mode, enable all 3 pieces of memory and the
+ * last piece of chroma memory used for the luma storage
+ */
+ LB_MEMORY_CONFIG_3 = 3
+};
+
/* Rotation angle */
enum spl_rotation_angle {
SPL_ROTATION_ANGLE_0 = 0,
@@ -120,6 +123,13 @@ enum spl_color_space {
SPL_COLOR_SPACE_YCBCR709_BLACK,
};
+enum chroma_cositing {
+ CHROMA_COSITING_NONE,
+ CHROMA_COSITING_LEFT,
+ CHROMA_COSITING_TOPLEFT,
+ CHROMA_COSITING_COUNT
+};
+
// Scratch space for calculating scaler params
struct spl_scaler_data {
int h_active;
@@ -129,6 +139,7 @@ struct spl_scaler_data {
struct spl_rect viewport_c;
struct spl_rect recout;
struct spl_ratios ratios;
+ struct spl_ratios recip_ratios;
struct spl_inits inits;
};
@@ -396,13 +407,19 @@ struct dscl_prog_data {
/* blur and scale filter */
const uint16_t *filter_blur_scale_v;
const uint16_t *filter_blur_scale_h;
+ int sharpness_level; /* Track sharpness level */
};
/* SPL input and output definitions */
-// SPL outputs struct
-struct spl_out {
+// SPL scratch struct
+struct spl_scratch {
// Pack all SPL outputs in scl_data
struct spl_scaler_data scl_data;
+};
+
+/* SPL input and output definitions */
+// SPL outputs struct
+struct spl_out {
// Pack all output need to program hw registers
struct dscl_prog_data *dscl_prog_data;
};
@@ -444,14 +461,26 @@ struct basic_out {
bool alpha_en;
bool use_two_pixels_per_container;
};
-enum explicit_sharpness {
- SHARPNESS_LOW = 0,
- SHARPNESS_MID,
- SHARPNESS_HIGH
-};
-struct adaptive_sharpness {
+enum sharpness_setting {
+ SHARPNESS_HW_OFF = 0,
+ SHARPNESS_ZERO,
+ SHARPNESS_CUSTOM
+};
+struct spl_sharpness_range {
+ int sdr_rgb_min;
+ int sdr_rgb_max;
+ int sdr_rgb_mid;
+ int sdr_yuv_min;
+ int sdr_yuv_max;
+ int sdr_yuv_mid;
+ int hdr_rgb_min;
+ int hdr_rgb_max;
+ int hdr_rgb_mid;
+};
+struct adaptive_sharpness {
bool enable;
- enum explicit_sharpness sharpness;
+ int sharpness_level;
+ struct spl_sharpness_range sharpness_range;
};
enum linear_light_scaling { // convert it in translation logic
LLS_PREF_DONT_CARE = 0,
@@ -485,6 +514,11 @@ struct spl_in {
bool prefer_easf;
bool disable_easf;
struct spl_debug debug;
+ bool is_fullscreen;
+ bool is_hdr_on;
+ int h_active;
+ int v_active;
+ int hdr_multx100;
};
// end of SPL inputs
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c
new file mode 100644
index 000000000000..be2f34d034c5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "spl_debug.h"
+#include "spl_custom_float.h"
+
+static bool spl_build_custom_float(struct spl_fixed31_32 value,
+ const struct spl_custom_float_format *format,
+ bool *negative,
+ uint32_t *mantissa,
+ uint32_t *exponenta)
+{
+ uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1;
+
+ const struct spl_fixed31_32 mantissa_constant_plus_max_fraction =
+ spl_fixpt_from_fraction((1LL << (format->mantissa_bits + 1)) - 1,
+ 1LL << format->mantissa_bits);
+
+ struct spl_fixed31_32 mantiss;
+
+ if (spl_fixpt_eq(value, spl_fixpt_zero)) {
+ *negative = false;
+ *mantissa = 0;
+ *exponenta = 0;
+ return true;
+ }
+
+ if (spl_fixpt_lt(value, spl_fixpt_zero)) {
+ *negative = format->sign;
+ value = spl_fixpt_neg(value);
+ } else {
+ *negative = false;
+ }
+
+ if (spl_fixpt_lt(value, spl_fixpt_one)) {
+ uint32_t i = 1;
+
+ do {
+ value = spl_fixpt_shl(value, 1);
+ ++i;
+ } while (spl_fixpt_lt(value, spl_fixpt_one));
+
+ --i;
+
+ if (exp_offset <= i) {
+ *mantissa = 0;
+ *exponenta = 0;
+ return true;
+ }
+
+ *exponenta = exp_offset - i;
+ } else if (spl_fixpt_le(mantissa_constant_plus_max_fraction, value)) {
+ uint32_t i = 1;
+
+ do {
+ value = spl_fixpt_shr(value, 1);
+ ++i;
+ } while (spl_fixpt_lt(mantissa_constant_plus_max_fraction, value));
+
+ *exponenta = exp_offset + i - 1;
+ } else {
+ *exponenta = exp_offset;
+ }
+
+ mantiss = spl_fixpt_sub(value, spl_fixpt_one);
+
+ if (spl_fixpt_lt(mantiss, spl_fixpt_zero) ||
+ spl_fixpt_lt(spl_fixpt_one, mantiss))
+ mantiss = spl_fixpt_zero;
+ else
+ mantiss = spl_fixpt_shl(mantiss, format->mantissa_bits);
+
+ *mantissa = spl_fixpt_floor(mantiss);
+
+ return true;
+}
+
+static bool spl_setup_custom_float(const struct spl_custom_float_format *format,
+ bool negative,
+ uint32_t mantissa,
+ uint32_t exponenta,
+ uint32_t *result)
+{
+ uint32_t i = 0;
+ uint32_t j = 0;
+ uint32_t value = 0;
+
+ /* verification code:
+ * once calculation is ok we can remove it
+ */
+
+ const uint32_t mantissa_mask =
+ (1 << (format->mantissa_bits + 1)) - 1;
+
+ const uint32_t exponenta_mask =
+ (1 << (format->exponenta_bits + 1)) - 1;
+
+ if (mantissa & ~mantissa_mask) {
+ SPL_BREAK_TO_DEBUGGER();
+ mantissa = mantissa_mask;
+ }
+
+ if (exponenta & ~exponenta_mask) {
+ SPL_BREAK_TO_DEBUGGER();
+ exponenta = exponenta_mask;
+ }
+
+ /* end of verification code */
+
+ while (i < format->mantissa_bits) {
+ uint32_t mask = 1 << i;
+
+ if (mantissa & mask)
+ value |= mask;
+
+ ++i;
+ }
+
+ while (j < format->exponenta_bits) {
+ uint32_t mask = 1 << j;
+
+ if (exponenta & mask)
+ value |= mask << i;
+
+ ++j;
+ }
+
+ if (negative && format->sign)
+ value |= 1 << (i + j);
+
+ *result = value;
+
+ return true;
+}
+
+bool spl_convert_to_custom_float_format(struct spl_fixed31_32 value,
+ const struct spl_custom_float_format *format,
+ uint32_t *result)
+{
+ uint32_t mantissa;
+ uint32_t exponenta;
+ bool negative;
+
+ return spl_build_custom_float(value, format, &negative, &mantissa, &exponenta) &&
+ spl_setup_custom_float(format,
+ negative,
+ mantissa,
+ exponenta,
+ result);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h
new file mode 100644
index 000000000000..cdc4e107b9de
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef SPL_CUSTOM_FLOAT_H_
+#define SPL_CUSTOM_FLOAT_H_
+
+#include "spl_os_types.h"
+#include "spl_fixpt31_32.h"
+
+struct spl_custom_float_format {
+ uint32_t mantissa_bits;
+ uint32_t exponenta_bits;
+ bool sign;
+};
+
+struct spl_custom_float_value {
+ uint32_t mantissa;
+ uint32_t exponenta;
+ uint32_t value;
+ bool negative;
+};
+
+bool spl_convert_to_custom_float_format(
+ struct spl_fixed31_32 value,
+ const struct spl_custom_float_format *format,
+ uint32_t *result);
+
+#endif //SPL_CUSTOM_FLOAT_H_
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
new file mode 100644
index 000000000000..5696dafd0894
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef SPL_DEBUG_H
+#define SPL_DEBUG_H
+
+#ifdef SPL_ASSERT
+#undef SPL_ASSERT
+#endif
+#define SPL_ASSERT(b)
+
+#define SPL_ASSERT_CRITICAL(expr) do {if (expr)/* Do nothing */; } while (0)
+
+#ifdef SPL_DALMSG
+#undef SPL_DALMSG
+#endif
+#define SPL_DALMSG(b)
+
+#ifdef SPL_DAL_ASSERT_MSG
+#undef SPL_DAL_ASSERT_MSG
+#endif
+#define SPL_DAL_ASSERT_MSG(b, m)
+
+#endif // SPL_DEBUG_H
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
new file mode 100644
index 000000000000..a95565df5487
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "spl_fixpt31_32.h"
+
+static const struct spl_fixed31_32 spl_fixpt_two_pi = { 26986075409LL };
+static const struct spl_fixed31_32 spl_fixpt_ln2 = { 2977044471LL };
+static const struct spl_fixed31_32 spl_fixpt_ln2_div_2 = { 1488522236LL };
+
+static inline unsigned long long abs_i64(
+ long long arg)
+{
+ if (arg > 0)
+ return (unsigned long long)arg;
+ else
+ return (unsigned long long)(-arg);
+}
+
+/*
+ * @brief
+ * result = dividend / divisor
+ * *remainder = dividend % divisor
+ */
+static inline unsigned long long complete_integer_division_u64(
+ unsigned long long dividend,
+ unsigned long long divisor,
+ unsigned long long *remainder)
+{
+ unsigned long long result;
+
+ ASSERT(divisor);
+
+ result = spl_div64_u64_rem(dividend, divisor, remainder);
+
+ return result;
+}
+
+
+#define FRACTIONAL_PART_MASK \
+ ((1ULL << FIXED31_32_BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_INTEGER_PART(x) \
+ ((x) >> FIXED31_32_BITS_PER_FRACTIONAL_PART)
+
+#define GET_FRACTIONAL_PART(x) \
+ (FRACTIONAL_PART_MASK & (x))
+
+struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long denominator)
+{
+ struct spl_fixed31_32 res;
+
+ bool arg1_negative = numerator < 0;
+ bool arg2_negative = denominator < 0;
+
+ unsigned long long arg1_value = arg1_negative ? -numerator : numerator;
+ unsigned long long arg2_value = arg2_negative ? -denominator : denominator;
+
+ unsigned long long remainder;
+
+ /* determine integer part */
+
+ unsigned long long res_value = complete_integer_division_u64(
+ arg1_value, arg2_value, &remainder);
+
+ ASSERT(res_value <= LONG_MAX);
+
+ /* determine fractional part */
+ {
+ unsigned int i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ do {
+ remainder <<= 1;
+
+ res_value <<= 1;
+
+ if (remainder >= arg2_value) {
+ res_value |= 1;
+ remainder -= arg2_value;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ unsigned long long summand = (remainder << 1) >= arg2_value;
+
+ ASSERT(res_value <= LLONG_MAX - summand);
+
+ res_value += summand;
+ }
+
+ res.value = (long long)res_value;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ struct spl_fixed31_32 res;
+
+ bool arg1_negative = arg1.value < 0;
+ bool arg2_negative = arg2.value < 0;
+
+ unsigned long long arg1_value = arg1_negative ? -arg1.value : arg1.value;
+ unsigned long long arg2_value = arg2_negative ? -arg2.value : arg2.value;
+
+ unsigned long long arg1_int = GET_INTEGER_PART(arg1_value);
+ unsigned long long arg2_int = GET_INTEGER_PART(arg2_value);
+
+ unsigned long long arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+ unsigned long long arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+ unsigned long long tmp;
+
+ res.value = arg1_int * arg2_int;
+
+ ASSERT(res.value <= (long long)LONG_MAX);
+
+ res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg1_int * arg2_fra;
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg2_int * arg1_fra;
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg1_fra * arg2_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (unsigned long long)spl_fixpt_half.value);
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg)
+{
+ struct spl_fixed31_32 res;
+
+ unsigned long long arg_value = abs_i64(arg.value);
+
+ unsigned long long arg_int = GET_INTEGER_PART(arg_value);
+
+ unsigned long long arg_fra = GET_FRACTIONAL_PART(arg_value);
+
+ unsigned long long tmp;
+
+ res.value = arg_int * arg_int;
+
+ ASSERT(res.value <= (long long)LONG_MAX);
+
+ res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg_int * arg_fra;
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg_fra * arg_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (unsigned long long)spl_fixpt_half.value);
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ return res;
+}
+
+struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg)
+{
+ /*
+ * @note
+ * Good idea to use Newton's method
+ */
+
+ ASSERT(arg.value);
+
+ return spl_fixpt_from_fraction(
+ spl_fixpt_one.value,
+ arg.value);
+}
+
+struct spl_fixed31_32 spl_fixpt_sinc(struct spl_fixed31_32 arg)
+{
+ struct spl_fixed31_32 square;
+
+ struct spl_fixed31_32 res = spl_fixpt_one;
+
+ int n = 27;
+
+ struct spl_fixed31_32 arg_norm = arg;
+
+ if (spl_fixpt_le(
+ spl_fixpt_two_pi,
+ spl_fixpt_abs(arg))) {
+ arg_norm = spl_fixpt_sub(
+ arg_norm,
+ spl_fixpt_mul_int(
+ spl_fixpt_two_pi,
+ (int)spl_div64_s64(
+ arg_norm.value,
+ spl_fixpt_two_pi.value)));
+ }
+
+ square = spl_fixpt_sqr(arg_norm);
+
+ do {
+ res = spl_fixpt_sub(
+ spl_fixpt_one,
+ spl_fixpt_div_int(
+ spl_fixpt_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n > 2);
+
+ if (arg.value != arg_norm.value)
+ res = spl_fixpt_div(
+ spl_fixpt_mul(res, arg_norm),
+ arg);
+
+ return res;
+}
+
+struct spl_fixed31_32 spl_fixpt_sin(struct spl_fixed31_32 arg)
+{
+ return spl_fixpt_mul(
+ arg,
+ spl_fixpt_sinc(arg));
+}
+
+struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg)
+{
+ /* TODO implement argument normalization */
+
+ const struct spl_fixed31_32 square = spl_fixpt_sqr(arg);
+
+ struct spl_fixed31_32 res = spl_fixpt_one;
+
+ int n = 26;
+
+ do {
+ res = spl_fixpt_sub(
+ spl_fixpt_one,
+ spl_fixpt_div_int(
+ spl_fixpt_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n != 0);
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = exp(arg),
+ * where abs(arg) < 1
+ *
+ * Calculated as Taylor series.
+ */
+static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed31_32 arg)
+{
+ unsigned int n = 9;
+
+ struct spl_fixed31_32 res = spl_fixpt_from_fraction(
+ n + 2,
+ n + 1);
+ /* TODO find correct res */
+
+ ASSERT(spl_fixpt_lt(arg, spl_fixpt_one));
+
+ do
+ res = spl_fixpt_add(
+ spl_fixpt_one,
+ spl_fixpt_div_int(
+ spl_fixpt_mul(
+ arg,
+ res),
+ n));
+ while (--n != 1);
+
+ return spl_fixpt_add(
+ spl_fixpt_one,
+ spl_fixpt_mul(
+ arg,
+ res));
+}
+
+struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg)
+{
+ /*
+ * @brief
+ * Main equation is:
+ * exp(x) = exp(r + m * ln(2)) = (1 << m) * exp(r),
+ * where m = round(x / ln(2)), r = x - m * ln(2)
+ */
+
+ if (spl_fixpt_le(
+ spl_fixpt_ln2_div_2,
+ spl_fixpt_abs(arg))) {
+ int m = spl_fixpt_round(
+ spl_fixpt_div(
+ arg,
+ spl_fixpt_ln2));
+
+ struct spl_fixed31_32 r = spl_fixpt_sub(
+ arg,
+ spl_fixpt_mul_int(
+ spl_fixpt_ln2,
+ m));
+
+ ASSERT(m != 0);
+
+ ASSERT(spl_fixpt_lt(
+ spl_fixpt_abs(r),
+ spl_fixpt_one));
+
+ if (m > 0)
+ return spl_fixpt_shl(
+ fixed31_32_exp_from_taylor_series(r),
+ (unsigned char)m);
+ else
+ return spl_fixpt_div_int(
+ fixed31_32_exp_from_taylor_series(r),
+ 1LL << -m);
+ } else if (arg.value != 0)
+ return fixed31_32_exp_from_taylor_series(arg);
+ else
+ return spl_fixpt_one;
+}
+
+struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg)
+{
+ struct spl_fixed31_32 res = spl_fixpt_neg(spl_fixpt_one);
+ /* TODO improve 1st estimation */
+
+ struct spl_fixed31_32 error;
+
+ ASSERT(arg.value > 0);
+ /* TODO if arg is negative, return NaN */
+ /* TODO if arg is zero, return -INF */
+
+ do {
+ struct spl_fixed31_32 res1 = spl_fixpt_add(
+ spl_fixpt_sub(
+ res,
+ spl_fixpt_one),
+ spl_fixpt_div(
+ arg,
+ spl_fixpt_exp(res)));
+
+ error = spl_fixpt_sub(
+ res,
+ res1);
+
+ res = res1;
+ /* TODO determine max_allowed_error based on quality of exp() */
+ } while (abs_i64(error.value) > 100ULL);
+
+ return res;
+}
+
+
+/* this function is a generic helper to translate fixed point value to
+ * specified integer format that will consist of integer_bits integer part and
+ * fractional_bits fractional part. For example it is used in
+ * spl_fixpt_u2d19 to receive 2 bits integer part and 19 bits fractional
+ * part in 32 bits. It is used in hw programming (scaler)
+ */
+
+static inline unsigned int ux_dy(
+ long long value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
+{
+ /* 1. create mask of integer part */
+ unsigned int result = (1 << integer_bits) - 1;
+ /* 2. mask out fractional part */
+ unsigned int fractional_part = FRACTIONAL_PART_MASK & value;
+ /* 3. shrink fixed point integer part to be of integer_bits width*/
+ result &= GET_INTEGER_PART(value);
+ /* 4. make space for fractional part to be filled in after integer */
+ result <<= fractional_bits;
+ /* 5. shrink fixed point fractional part to of fractional_bits width*/
+ fractional_part >>= FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits;
+ /* 6. merge the result */
+ return result | fractional_part;
+}
+
+static inline unsigned int clamp_ux_dy(
+ long long value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits,
+ unsigned int min_clamp)
+{
+ unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits);
+
+ if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART)))
+ return (1 << (integer_bits + fractional_bits)) - 1;
+ else if (truncated_val > min_clamp)
+ return truncated_val;
+ else
+ return min_clamp;
+}
+
+unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg)
+{
+ return ux_dy(arg.value, 4, 19);
+}
+
+unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg)
+{
+ return ux_dy(arg.value, 3, 19);
+}
+
+unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg)
+{
+ return ux_dy(arg.value, 2, 19);
+}
+
+unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg)
+{
+ return ux_dy(arg.value, 0, 19);
+}
+
+unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg)
+{
+ return clamp_ux_dy(arg.value, 0, 14, 1);
+}
+
+unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg)
+{
+ return clamp_ux_dy(arg.value, 0, 10, 1);
+}
+
+int spl_fixpt_s4d19(struct spl_fixed31_32 arg)
+{
+ if (arg.value < 0)
+ return -(int)ux_dy(spl_fixpt_abs(arg).value, 4, 19);
+ else
+ return ux_dy(arg.value, 4, 19);
+}
+
+struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
+{
+ struct spl_fixed31_32 fixpt_value = spl_fixpt_zero;
+ struct spl_fixed31_32 fixpt_int_value = spl_fixpt_zero;
+ long long frac_mask = ((long long)1 << (long long)integer_bits) - 1;
+
+ fixpt_value.value = (long long)value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ frac_mask = frac_mask << fractional_bits;
+ fixpt_int_value.value = value & frac_mask;
+ fixpt_int_value.value <<= (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ fixpt_value.value |= fixpt_int_value.value;
+ return fixpt_value;
+}
+
+struct spl_fixed31_32 spl_fixpt_from_int_dy(unsigned int int_value,
+ unsigned int frac_value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
+{
+ struct spl_fixed31_32 fixpt_value = spl_fixpt_from_int(int_value);
+
+ fixpt_value.value |= (long long)frac_value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ return fixpt_value;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
new file mode 100644
index 000000000000..8a045e2f8699
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
@@ -0,0 +1,525 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef __SPL_FIXED31_32_H__
+#define __SPL_FIXED31_32_H__
+
+#include "os_types.h"
+#include "spl_os_types.h" // swap
+#ifndef ASSERT
+#define ASSERT(_bool) ((void *)0)
+#endif
+
+#ifndef LLONG_MAX
+#define LLONG_MAX 9223372036854775807ll
+#endif
+#ifndef LLONG_MIN
+#define LLONG_MIN (-LLONG_MAX - 1ll)
+#endif
+
+#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
+#ifndef LLONG_MIN
+#define LLONG_MIN (1LL<<63)
+#endif
+#ifndef LLONG_MAX
+#define LLONG_MAX (-1LL>>1)
+#endif
+
+/*
+ * @brief
+ * Arithmetic operations on real numbers
+ * represented as fixed-point numbers.
+ * There are: 1 bit for sign,
+ * 31 bit for integer part,
+ * 32 bits for fractional part.
+ *
+ * @note
+ * Currently, overflows and underflows are asserted;
+ * no special result returned.
+ */
+
+struct spl_fixed31_32 {
+ long long value;
+};
+
+
+/*
+ * @brief
+ * Useful constants
+ */
+
+static const struct spl_fixed31_32 spl_fixpt_zero = { 0 };
+static const struct spl_fixed31_32 spl_fixpt_epsilon = { 1LL };
+static const struct spl_fixed31_32 spl_fixpt_half = { 0x80000000LL };
+static const struct spl_fixed31_32 spl_fixpt_one = { 0x100000000LL };
+
+/*
+ * @brief
+ * Initialization routines
+ */
+
+/*
+ * @brief
+ * result = numerator / denominator
+ */
+struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long denominator);
+
+/*
+ * @brief
+ * result = arg
+ */
+static inline struct spl_fixed31_32 spl_fixpt_from_int(int arg)
+{
+ struct spl_fixed31_32 res;
+
+ res.value = (long long) arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ return res;
+}
+
+/*
+ * @brief
+ * Unary operators
+ */
+
+/*
+ * @brief
+ * result = -arg
+ */
+static inline struct spl_fixed31_32 spl_fixpt_neg(struct spl_fixed31_32 arg)
+{
+ struct spl_fixed31_32 res;
+
+ res.value = -arg.value;
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = abs(arg) := (arg >= 0) ? arg : -arg
+ */
+static inline struct spl_fixed31_32 spl_fixpt_abs(struct spl_fixed31_32 arg)
+{
+ if (arg.value < 0)
+ return spl_fixpt_neg(arg);
+ else
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary relational operators
+ */
+
+/*
+ * @brief
+ * result = arg1 < arg2
+ */
+static inline bool spl_fixpt_lt(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ return arg1.value < arg2.value;
+}
+
+/*
+ * @brief
+ * result = arg1 <= arg2
+ */
+static inline bool spl_fixpt_le(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ return arg1.value <= arg2.value;
+}
+
+/*
+ * @brief
+ * result = arg1 == arg2
+ */
+static inline bool spl_fixpt_eq(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ return arg1.value == arg2.value;
+}
+
+/*
+ * @brief
+ * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_min(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg1;
+ else
+ return arg2;
+}
+
+/*
+ * @brief
+ * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1
+ */
+static inline struct spl_fixed31_32 spl_fixpt_max(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg2;
+ else
+ return arg1;
+}
+
+/*
+ * @brief
+ * | min_value, when arg <= min_value
+ * result = | arg, when min_value < arg < max_value
+ * | max_value, when arg >= max_value
+ */
+static inline struct spl_fixed31_32 spl_fixpt_clamp(
+ struct spl_fixed31_32 arg,
+ struct spl_fixed31_32 min_value,
+ struct spl_fixed31_32 max_value)
+{
+ if (spl_fixpt_le(arg, min_value))
+ return min_value;
+ else if (spl_fixpt_le(max_value, arg))
+ return max_value;
+ else
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary shift operators
+ */
+
+/*
+ * @brief
+ * result = arg << shift
+ */
+static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift)
+{
+ ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+ ((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift))));
+
+ arg.value = arg.value << shift;
+
+ return arg;
+}
+
+/*
+ * @brief
+ * result = arg >> shift
+ */
+static inline struct spl_fixed31_32 spl_fixpt_shr(struct spl_fixed31_32 arg, unsigned char shift)
+{
+ bool negative = arg.value < 0;
+
+ if (negative)
+ arg.value = -arg.value;
+ arg.value = arg.value >> shift;
+ if (negative)
+ arg.value = -arg.value;
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary additive operators
+ */
+
+/*
+ * @brief
+ * result = arg1 + arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_add(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ struct spl_fixed31_32 res;
+
+ ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
+ ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
+
+ res.value = arg1.value + arg2.value;
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = arg1 + arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_add_int(struct spl_fixed31_32 arg1, int arg2)
+{
+ return spl_fixpt_add(arg1, spl_fixpt_from_int(arg2));
+}
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_sub(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ struct spl_fixed31_32 res;
+
+ ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
+ ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
+
+ res.value = arg1.value - arg2.value;
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_sub_int(struct spl_fixed31_32 arg1, int arg2)
+{
+ return spl_fixpt_sub(arg1, spl_fixpt_from_int(arg2));
+}
+
+
+/*
+ * @brief
+ * Binary multiplicative operators
+ */
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2);
+
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_mul_int(struct spl_fixed31_32 arg1, int arg2)
+{
+ return spl_fixpt_mul(arg1, spl_fixpt_from_int(arg2));
+}
+
+/*
+ * @brief
+ * result = square(arg) := arg * arg
+ */
+struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_div_int(struct spl_fixed31_32 arg1, long long arg2)
+{
+ return spl_fixpt_from_fraction(arg1.value, spl_fixpt_from_int((int)arg2).value);
+}
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_div(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ return spl_fixpt_from_fraction(arg1.value, arg2.value);
+}
+
+/*
+ * @brief
+ * Reciprocal function
+ */
+
+/*
+ * @brief
+ * result = reciprocal(arg) := 1 / arg
+ *
+ * @note
+ * No special actions taken in case argument is zero.
+ */
+struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * Trigonometric functions
+ */
+
+/*
+ * @brief
+ * result = sinc(arg) := sin(arg) / arg
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct spl_fixed31_32 spl_fixpt_sinc(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * result = sin(arg)
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct spl_fixed31_32 spl_fixpt_sin(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * result = cos(arg)
+ *
+ * @note
+ * Argument specified in radians
+ * and should be in [-2pi...2pi] range -
+ * passing arguments outside that range
+ * will cause incorrect result!
+ */
+struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * Transcendent functions
+ */
+
+/*
+ * @brief
+ * result = exp(arg)
+ *
+ * @note
+ * Currently, function is verified for abs(arg) <= 1.
+ */
+struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * result = log(arg)
+ *
+ * @note
+ * Currently, abs(arg) should be less than 1.
+ * No normalization is done.
+ * Currently, no special actions taken
+ * in case of invalid argument(s). Take care!
+ */
+struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * Power function
+ */
+
+/*
+ * @brief
+ * result = pow(arg1, arg2)
+ *
+ * @note
+ * Currently, abs(arg1) should be less than 1. Take care!
+ */
+static inline struct spl_fixed31_32 spl_fixpt_pow(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ if (arg1.value == 0)
+ return arg2.value == 0 ? spl_fixpt_one : spl_fixpt_zero;
+
+ return spl_fixpt_exp(
+ spl_fixpt_mul(
+ spl_fixpt_log(arg1),
+ arg2));
+}
+
+/*
+ * @brief
+ * Rounding functions
+ */
+
+/*
+ * @brief
+ * result = floor(arg) := greatest integer lower than or equal to arg
+ */
+static inline int spl_fixpt_floor(struct spl_fixed31_32 arg)
+{
+ unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
+
+ if (arg.value >= 0)
+ return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ else
+ return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+}
+
+/*
+ * @brief
+ * result = round(arg) := integer nearest to arg
+ */
+static inline int spl_fixpt_round(struct spl_fixed31_32 arg)
+{
+ unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
+
+ const long long summand = spl_fixpt_half.value;
+
+ ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ else
+ return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+}
+
+/*
+ * @brief
+ * result = ceil(arg) := lowest integer greater than or equal to arg
+ */
+static inline int spl_fixpt_ceil(struct spl_fixed31_32 arg)
+{
+ unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
+
+ const long long summand = spl_fixpt_one.value -
+ spl_fixpt_epsilon.value;
+
+ ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ else
+ return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+}
+
+/* the following two function are used in scaler hw programming to convert fixed
+ * point value to format 2 bits from integer part and 19 bits from fractional
+ * part. The same applies for u0d19, 0 bits from integer part and 19 bits from
+ * fractional
+ */
+
+unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg);
+
+int spl_fixpt_s4d19(struct spl_fixed31_32 arg);
+
+static inline struct spl_fixed31_32 spl_fixpt_truncate(struct spl_fixed31_32 arg, unsigned int frac_bits)
+{
+ bool negative = arg.value < 0;
+
+ if (frac_bits >= FIXED31_32_BITS_PER_FRACTIONAL_PART) {
+ ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ return arg;
+ }
+
+ if (negative)
+ arg.value = -arg.value;
+ arg.value &= (~0ULL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits);
+ if (negative)
+ arg.value = -arg.value;
+ return arg;
+}
+
+struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value, unsigned int integer_bits, unsigned int fractional_bits);
+struct spl_fixed31_32 spl_fixpt_from_int_dy(unsigned int int_value,
+ unsigned int frac_value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
new file mode 100644
index 000000000000..709706ed4f2c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+/* Copyright 2019 Raptor Engineering, LLC */
+
+#ifndef _SPL_OS_TYPES_H_
+#define _SPL_OS_TYPES_H_
+
+#include <linux/slab.h>
+#include <linux/kgdb.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+
+/*
+ *
+ * general debug capabilities
+ *
+ */
+#define SPL_BREAK_TO_DEBUGGER() ASSERT(0)
+
+static inline uint64_t spl_div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
+{
+ return div_u64_rem(dividend, divisor, remainder);
+}
+
+static inline uint64_t spl_div_u64(uint64_t dividend, uint32_t divisor)
+{
+ return div_u64(dividend, divisor);
+}
+
+static inline uint64_t spl_div64_u64(uint64_t dividend, uint64_t divisor)
+{
+ return div64_u64(dividend, divisor);
+}
+
+static inline uint64_t spl_div64_u64_rem(uint64_t dividend, uint64_t divisor, uint64_t *remainder)
+{
+ return div64_u64_rem(dividend, divisor, remainder);
+}
+
+static inline int64_t spl_div64_s64(int64_t dividend, int64_t divisor)
+{
+ return div64_s64(dividend, divisor);
+}
+
+#define spl_swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+
+#ifndef spl_min
+#define spl_min(a, b) (((a) < (b)) ? (a):(b))
+#endif
+
+#endif /* _SPL_OS_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 6589bb9aea6b..cd70453aeae0 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -330,6 +330,9 @@ struct dmub_diagnostic_data {
uint32_t inbox0_rptr;
uint32_t inbox0_wptr;
uint32_t inbox0_size;
+ uint32_t outbox1_rptr;
+ uint32_t outbox1_wptr;
+ uint32_t outbox1_size;
uint32_t gpint_datain0;
struct dmub_srv_debug timeout_info;
uint8_t is_dmcub_enabled : 1;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 5ff0a865705f..e20c220aa8b4 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -111,7 +111,7 @@
#define DMUB_MAX_PHANTOM_PLANES ((DMUB_MAX_PLANES) / 2)
/* Trace buffer offset for entry */
-#define TRACE_BUFFER_ENTRY_OFFSET 16
+#define TRACE_BUFFER_ENTRY_OFFSET 16
/**
* Maximum number of dirty rects supported by FW.
@@ -336,6 +336,10 @@ union dmub_psr_debug_flags {
*/
uint32_t back_to_back_flip : 1;
+ /**
+ * Enable visual confirm for IPS
+ */
+ uint32_t enable_ips_visual_confirm : 1;
} bitfields;
/**
@@ -1875,7 +1879,12 @@ enum dmub_cmd_idle_opt_type {
/**
* DCN hardware notify idle.
*/
- DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE = 2
+ DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE = 2,
+
+ /**
+ * DCN hardware notify power state.
+ */
+ DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3,
};
/**
@@ -1903,6 +1912,33 @@ struct dmub_rb_cmd_idle_opt_dcn_notify_idle {
};
/**
+ * enum dmub_idle_opt_dc_power_state - DC power states.
+ */
+enum dmub_idle_opt_dc_power_state {
+ DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN = 0,
+ DMUB_IDLE_OPT_DC_POWER_STATE_D0 = 1,
+ DMUB_IDLE_OPT_DC_POWER_STATE_D1 = 2,
+ DMUB_IDLE_OPT_DC_POWER_STATE_D2 = 4,
+ DMUB_IDLE_OPT_DC_POWER_STATE_D3 = 8,
+};
+
+/**
+ * struct dmub_idle_opt_set_dc_power_state_data - Data passed to FW in a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
+ */
+struct dmub_idle_opt_set_dc_power_state_data {
+ uint8_t power_state; /**< power state */
+ uint8_t pad[3]; /**< padding */
+};
+
+/**
+ * struct dmub_rb_cmd_idle_opt_set_dc_power_state - Data passed to FW in a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
+ */
+struct dmub_rb_cmd_idle_opt_set_dc_power_state {
+ struct dmub_cmd_header header; /**< header */
+ struct dmub_idle_opt_set_dc_power_state_data data;
+};
+
+/**
* struct dmub_clocks - Clock update notification.
*/
struct dmub_clocks {
@@ -3024,14 +3060,6 @@ struct dmub_cmd_update_dirty_rect_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
- /**
- * 16-bit value dicated by driver that indicates the coasting vtotal high byte part.
- */
- uint16_t coasting_vtotal_high;
- /**
- * Explicit padding to 4 byte boundary.
- */
- uint8_t pad[2];
};
/**
@@ -5302,6 +5330,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE command.
*/
struct dmub_rb_cmd_idle_opt_dcn_notify_idle idle_opt_notify_idle;
+ /**
+ * Definition of a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
+ */
+ struct dmub_rb_cmd_idle_opt_set_dc_power_state idle_opt_set_dc_power_state;
/*
* Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 662c34e9495c..d9f31b191c69 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -449,6 +449,10 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+ diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
diag_data->is_dmcub_enabled = is_dmub_enabled;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index e1da270502cc..9600b7f858b0 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -459,6 +459,10 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+ diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
diag_data->is_dmcub_enabled = is_dmub_enabled;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 916ed022e96b..746696b6f09a 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -502,6 +502,10 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+ diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
diag_data->is_dmcub_enabled = is_dmub_enabled;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index cf139e9cc20e..39a8cb6d7523 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -444,6 +444,10 @@ void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnost
diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+ diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
diag_data->is_dmcub_enabled = is_dmub_enabled;
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index d4cf7ead1d87..990fa1f19c22 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -531,4 +531,10 @@ static inline struct fixed31_32 dc_fixpt_truncate(struct fixed31_32 arg, unsigne
return arg;
}
+struct fixed31_32 dc_fixpt_from_ux_dy(unsigned int value, unsigned int integer_bits, unsigned int fractional_bits);
+struct fixed31_32 dc_fixpt_from_int_dy(unsigned int int_value,
+ unsigned int frac_value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 83479951732a..a48d564d1660 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -61,6 +61,7 @@
#define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__)
#define DC_LOG_DSC(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__)
+#define DC_LOG_MALL(...) pr_debug("[MALL]:"__VA_ARGS__)
#define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 1e495e884484..8bc377560787 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -432,18 +432,18 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
goto out;
}
- if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
&input->bstatus_read, &status,
- hdcp, "bstatus_read"))
- goto out;
- if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ hdcp, "bstatus_read");
+
+ mod_hdcp_execute_and_set(check_link_integrity_dp,
&input->link_integrity_check, &status,
- hdcp, "link_integrity_check"))
- goto out;
- if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ hdcp, "link_integrity_check");
+
+ mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
&input->reauth_request_check, &status,
- hdcp, "reauth_request_check"))
- goto out;
+ hdcp, "reauth_request_check");
+
out:
return status;
}
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index f5b725f10a7c..745fd052840d 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -61,7 +61,7 @@ enum amd_apu_flags {
* acquires the list of IP blocks for the GPU in use on initialization.
* It can then operate on this list to perform standard driver operations
* such as: init, fini, suspend, resume, etc.
-*
+*
*
* IP block implementations are named using the following convention:
* <functionality>_v<version> (E.g.: gfx_v6_0).
@@ -251,19 +251,92 @@ enum DC_FEATURE_MASK {
DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
};
+/**
+ * enum DC_DEBUG_MASK - Bits that are useful for debugging the Display Core IP
+ */
enum DC_DEBUG_MASK {
+ /**
+ * @DC_DISABLE_PIPE_SPLIT: If set, disable pipe-splitting
+ */
DC_DISABLE_PIPE_SPLIT = 0x1,
+
+ /**
+ * @DC_DISABLE_STUTTER: If set, disable memory stutter mode
+ */
DC_DISABLE_STUTTER = 0x2,
+
+ /**
+ * @DC_DISABLE_DSC: If set, disable display stream compression
+ */
DC_DISABLE_DSC = 0x4,
+
+ /**
+ * @DC_DISABLE_CLOCK_GATING: If set, disable clock gating optimizations
+ */
DC_DISABLE_CLOCK_GATING = 0x8,
+
+ /**
+ * @DC_DISABLE_PSR: If set, disable Panel self refresh v1 and PSR-SU
+ */
DC_DISABLE_PSR = 0x10,
+
+ /**
+ * @DC_FORCE_SUBVP_MCLK_SWITCH: If set, force mclk switch in subvp, even
+ * if mclk switch in vblank is possible
+ */
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
+
+ /**
+ * @DC_DISABLE_MPO: If set, disable multi-plane offloading
+ */
DC_DISABLE_MPO = 0x40,
+
+ /**
+ * @DC_ENABLE_DPIA_TRACE: If set, enable trace logging for DPIA
+ */
DC_ENABLE_DPIA_TRACE = 0x80,
+
+ /**
+ * @DC_ENABLE_DML2: If set, force usage of DML2, even if the DCN version
+ * does not default to it.
+ */
DC_ENABLE_DML2 = 0x100,
+
+ /**
+ * @DC_DISABLE_PSR_SU: If set, disable PSR SU
+ */
DC_DISABLE_PSR_SU = 0x200,
+
+ /**
+ * @DC_DISABLE_REPLAY: If set, disable Panel Replay
+ */
DC_DISABLE_REPLAY = 0x400,
+
+ /**
+ * @DC_DISABLE_IPS: If set, disable all Idle Power States, all the time.
+ * If more than one IPS debug bit is set, the lowest bit takes
+ * precedence. For example, if DC_FORCE_IPS_ENABLE and
+ * DC_DISABLE_IPS_DYNAMIC are set, then DC_DISABLE_IPS_DYNAMIC takes
+ * precedence.
+ */
DC_DISABLE_IPS = 0x800,
+
+ /**
+ * @DC_DISABLE_IPS_DYNAMIC: If set, disable all IPS, all the time,
+ * *except* when driver goes into suspend.
+ */
+ DC_DISABLE_IPS_DYNAMIC = 0x1000,
+
+ /**
+ * @DC_DISABLE_IPS2_DYNAMIC: If set, disable IPS2 (IPS1 allowed) if
+ * there is an enabled display. Otherwise, enable all IPS.
+ */
+ DC_DISABLE_IPS2_DYNAMIC = 0x2000,
+
+ /**
+ * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time.
+ */
+ DC_FORCE_IPS_ENABLE = 0x4000,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h
index 8ee3149df5b7..2ef1273e65ab 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h
@@ -340,8 +340,6 @@
#define UVD_LMI_CTRL__REQ_MODE_MASK 0x00000200L
#define UVD_LMI_CTRL__REQ_MODE__SHIFT 0x00000009
#define UVD_LMI_CTRL__RFU_MASK 0xf8000000L
-#define UVD_LMI_CTRL__RFU_MASK 0xfc000000L
-#define UVD_LMI_CTRL__RFU__SHIFT 0x0000001a
#define UVD_LMI_CTRL__RFU__SHIFT 0x0000001b
#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L
#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x00000015
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 09cbc3afd6d8..b0fc22383e28 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1038,7 +1038,7 @@ struct display_object_info_table_v1_4
uint16_t supporteddevices;
uint8_t number_of_path;
uint8_t reserved;
- struct atom_display_object_path_v2 display_path[8]; //the real number of this included in the structure is calculated by using the (whole structure size - the header size- number_of_path)/size of atom_display_object_path
+ struct atom_display_object_path_v2 display_path[]; //the real number of this included in the structure is calculated by using the (whole structure size - the header size- number_of_path)/size of atom_display_object_path
};
struct display_object_info_table_v1_5 {
@@ -1048,7 +1048,7 @@ struct display_object_info_table_v1_5 {
uint8_t reserved;
// the real number of this included in the structure is calculated by using the
// (whole structure size - the header size- number_of_path)/size of atom_display_object_path
- struct atom_display_object_path_v3 display_path[8];
+ struct atom_display_object_path_v3 display_path[];
};
/*
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 6d094cf3587d..7744ca3ef4b1 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -318,6 +318,12 @@ struct kfd2kgd_calls {
void (*program_trap_handler_settings)(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
uint32_t inst);
+ uint64_t (*hqd_get_pq_addr)(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst);
+ uint64_t (*hqd_reset)(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout);
};
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 4b20e2274313..19a48d98830a 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -218,6 +218,7 @@ enum pp_mp1_state {
PP_MP1_STATE_SHUTDOWN,
PP_MP1_STATE_UNLOAD,
PP_MP1_STATE_RESET,
+ PP_MP1_STATE_FLR,
};
enum pp_df_cstate {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 8b7d6ed7e2ed..9dc82f4d7c93 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -168,7 +168,11 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- if (pp_funcs && pp_funcs->set_mp1_state) {
+ if (mp1_state == PP_MP1_STATE_FLR) {
+ /* VF lost access to SMU */
+ if (amdgpu_sriov_vf(adev))
+ adev->pm.dpm_enabled = false;
+ } else if (pp_funcs && pp_funcs->set_mp1_state) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_mp1_state(
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
index ca1c7ae8d146..f06b29e33ba4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
@@ -1183,6 +1183,8 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr,
fw_info = smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, FirmwareInfo),
&size, &frev, &crev);
+ PP_ASSERT_WITH_CODE(fw_info != NULL,
+ "Missing firmware info!", return -EINVAL);
if ((fw_info->ucTableFormatRevision == 1)
&& (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V1_4)))
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 6e717ddbb029..9ace863792d4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -2934,9 +2934,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
}
- vega10_enable_smc_features(hwmgr, false, feature_mask);
-
- return 0;
+ return vega10_enable_smc_features(hwmgr, false, feature_mask);
}
/**
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 2cf951184561..bb3bc68dfc39 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1257,7 +1257,6 @@ static int smu_sw_init(void *handle)
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
- smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
@@ -1265,6 +1264,7 @@ static int smu_sw_init(void *handle)
smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index ac0dd6b97f8d..e71a721c12b9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -439,7 +439,16 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(BACO_CG), \
__SMU_DUMMY_MAP(SOC_CG), \
__SMU_DUMMY_MAP(LOW_POWER_DCNCLKS), \
- __SMU_DUMMY_MAP(WHISPER_MODE),
+ __SMU_DUMMY_MAP(WHISPER_MODE), \
+ __SMU_DUMMY_MAP(EDC_PWRBRK), \
+ __SMU_DUMMY_MAP(SOC_EDC_XVMIN), \
+ __SMU_DUMMY_MAP(GFX_PSM_DIDT), \
+ __SMU_DUMMY_MAP(APT_ALL_ENABLE), \
+ __SMU_DUMMY_MAP(APT_SQ_THROTTLE), \
+ __SMU_DUMMY_MAP(APT_PF_DCS), \
+ __SMU_DUMMY_MAP(GFX_EDC_XVMIN), \
+ __SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \
+ __SMU_DUMMY_MAP(FAN_ABNORMAL),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 076620fa3ef5..16af1a329621 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1989,7 +1989,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
2,
- "MEMLK",
+ "MEMCLK",
activity_monitor.Mem_FPS,
activity_monitor.Mem_MinFreqStep,
activity_monitor.Mem_MinActiveFreqType,
@@ -2051,7 +2051,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
activity_monitor.Soc_PD_Data_error_coeff = input[8];
activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
break;
- case 2: /* Memlk */
+ case 2: /* Memclk */
activity_monitor.Mem_FPS = input[1];
activity_monitor.Mem_MinFreqStep = input[2];
activity_monitor.Mem_MinActiveFreqType = input[3];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 0d3e1a121b67..9c3c48297cba 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1691,7 +1691,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
2,
- "MEMLK",
+ "MEMCLK",
activity_monitor->Mem_FPS,
activity_monitor->Mem_MinFreqStep,
activity_monitor->Mem_MinActiveFreqType,
@@ -1756,7 +1756,7 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
activity_monitor->Fclk_PD_Data_error_coeff = input[8];
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9];
break;
- case 2: /* Memlk */
+ case 2: /* Memclk */
activity_monitor->Mem_FPS = input[1];
activity_monitor->Mem_MinFreqStep = input[2];
activity_monitor->Mem_MinActiveFreqType = input[3];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 09973615f210..865e916fc425 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -452,17 +452,26 @@ int smu_v14_0_init_smc_tables(struct smu_context *smu)
ret = -ENOMEM;
goto err3_out;
}
+
+ smu_table->user_overdrive_table =
+ kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
+ if (!smu_table->user_overdrive_table) {
+ ret = -ENOMEM;
+ goto err4_out;
+ }
}
smu_table->combo_pptable =
kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL);
if (!smu_table->combo_pptable) {
ret = -ENOMEM;
- goto err4_out;
+ goto err5_out;
}
return 0;
+err5_out:
+ kfree(smu_table->user_overdrive_table);
err4_out:
kfree(smu_table->boot_overdrive_table);
err3_out:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 0c09b8c4ff49..43820d7d2c54 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -68,6 +68,18 @@
#define DEBUGSMC_MSG_Mode1Reset 2
#define LINK_SPEED_MAX 3
+#define PP_OD_FEATURE_GFXCLK_FMIN 0
+#define PP_OD_FEATURE_GFXCLK_FMAX 1
+#define PP_OD_FEATURE_UCLK_FMIN 2
+#define PP_OD_FEATURE_UCLK_FMAX 3
+#define PP_OD_FEATURE_GFX_VF_CURVE 4
+#define PP_OD_FEATURE_FAN_CURVE_TEMP 5
+#define PP_OD_FEATURE_FAN_CURVE_PWM 6
+#define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7
+#define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
+#define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
+#define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
+
static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -187,6 +199,15 @@ static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] =
FEA_MAP(MEM_TEMP_READ),
FEA_MAP(ATHUB_MMHUB_PG),
FEA_MAP(SOC_PCC),
+ FEA_MAP(EDC_PWRBRK),
+ FEA_MAP(SOC_EDC_XVMIN),
+ FEA_MAP(GFX_PSM_DIDT),
+ FEA_MAP(APT_ALL_ENABLE),
+ FEA_MAP(APT_SQ_THROTTLE),
+ FEA_MAP(APT_PF_DCS),
+ FEA_MAP(GFX_EDC_XVMIN),
+ FEA_MAP(GFX_DIDT_XVMIN),
+ FEA_MAP(FAN_ABNORMAL),
[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
[SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
@@ -203,6 +224,7 @@ static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = {
[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
TAB_MAP(I2C_COMMANDS),
TAB_MAP(ECCINFO),
+ TAB_MAP(OVERDRIVE),
};
static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -674,6 +696,9 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
pcie_table->clk_freq[pcie_table->num_of_link_levels] =
skutable->LclkFreq[link_level];
pcie_table->num_of_link_levels++;
+
+ if (link_level == 0)
+ link_level++;
}
/* dcefclk dpm table setup */
@@ -1028,16 +1053,97 @@ static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu,
value);
}
+static bool smu_v14_0_2_is_od_feature_supported(struct smu_context *smu,
+ int od_feature_bit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+
+ return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
+}
+
+static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
+ int od_feature_bit,
+ int32_t *min,
+ int32_t *max)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+ const OverDriveLimits_t * const overdrive_lowerlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMin;
+ int32_t od_min_setting, od_max_setting;
+
+ switch (od_feature_bit) {
+ case PP_OD_FEATURE_GFXCLK_FMIN:
+ od_min_setting = overdrive_lowerlimits->GfxclkFmin;
+ od_max_setting = overdrive_upperlimits->GfxclkFmin;
+ break;
+ case PP_OD_FEATURE_GFXCLK_FMAX:
+ od_min_setting = overdrive_lowerlimits->GfxclkFmax;
+ od_max_setting = overdrive_upperlimits->GfxclkFmax;
+ break;
+ case PP_OD_FEATURE_UCLK_FMIN:
+ od_min_setting = overdrive_lowerlimits->UclkFmin;
+ od_max_setting = overdrive_upperlimits->UclkFmin;
+ break;
+ case PP_OD_FEATURE_UCLK_FMAX:
+ od_min_setting = overdrive_lowerlimits->UclkFmax;
+ od_max_setting = overdrive_upperlimits->UclkFmax;
+ break;
+ case PP_OD_FEATURE_GFX_VF_CURVE:
+ od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary[0];
+ od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary[0];
+ break;
+ case PP_OD_FEATURE_FAN_CURVE_TEMP:
+ od_min_setting = overdrive_lowerlimits->FanLinearTempPoints[0];
+ od_max_setting = overdrive_upperlimits->FanLinearTempPoints[0];
+ break;
+ case PP_OD_FEATURE_FAN_CURVE_PWM:
+ od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints[0];
+ od_max_setting = overdrive_upperlimits->FanLinearPwmPoints[0];
+ break;
+ case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT:
+ od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold;
+ od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold;
+ break;
+ case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET:
+ od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold;
+ od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold;
+ break;
+ case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE:
+ od_min_setting = overdrive_lowerlimits->FanTargetTemperature;
+ od_max_setting = overdrive_upperlimits->FanTargetTemperature;
+ break;
+ case PP_OD_FEATURE_FAN_MINIMUM_PWM:
+ od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
+ od_max_setting = overdrive_upperlimits->FanMinimumPwm;
+ break;
+ default:
+ od_min_setting = od_max_setting = INT_MAX;
+ break;
+ }
+
+ if (min)
+ *min = od_min_setting;
+ if (max)
+ *max = od_max_setting;
+}
+
static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type,
char *buf)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
struct smu_14_0_dpm_table *single_dpm_table;
struct smu_14_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
int i, curr_freq, size = 0;
+ int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
@@ -1158,6 +1264,183 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
"*" : "");
break;
+ case SMU_OD_SCLK:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_GFXCLK_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ od_table->OverDriveTable.GfxclkFmin,
+ od_table->OverDriveTable.GfxclkFmax);
+ break;
+
+ case SMU_OD_MCLK:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_UCLK_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n",
+ od_table->OverDriveTable.UclkFmin,
+ od_table->OverDriveTable.UclkFmax);
+ break;
+
+ case SMU_OD_VDDGFX_OFFSET:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
+ size += sysfs_emit_at(buf, size, "%dmV\n",
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]);
+ break;
+
+ case SMU_OD_FAN_CURVE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n");
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++)
+ size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n",
+ i,
+ (int)od_table->OverDriveTable.FanLinearTempPoints[i],
+ (int)od_table->OverDriveTable.FanLinearPwmPoints[i]);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_TEMP,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n",
+ min_value, max_value);
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_PWM,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n",
+ min_value, max_value);
+
+ break;
+
+ case SMU_OD_ACOUSTIC_LIMIT:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.AcousticLimitRpmThreshold);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_ACOUSTIC_TARGET:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.AcousticTargetRpmThreshold);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_TARGET_TEMPERATURE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanTargetTemperature);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_MINIMUM_PWM:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanMinimumPwm);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_MINIMUM_PWM,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_RANGE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
+ !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
+ !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+
+ if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMIN,
+ &min_value,
+ NULL);
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ NULL,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMIN,
+ &min_value,
+ NULL);
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMAX,
+ NULL,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n",
+ min_value, max_value);
+ }
+ break;
+
default:
break;
}
@@ -1399,7 +1682,27 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
uint32_t *max_power_limit,
uint32_t *min_power_limit)
{
- // TODO
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
+ uint32_t power_limit;
+ uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+
+ if (smu_v14_0_get_current_power_limit(smu, &power_limit))
+ power_limit = smu->adev->pm.ac_power ?
+ skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] :
+ skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0];
+
+ if (current_power_limit)
+ *current_power_limit = power_limit;
+ if (default_power_limit)
+ *default_power_limit = power_limit;
+
+ if (max_power_limit)
+ *max_power_limit = msg_limit;
+
+ if (min_power_limit)
+ *min_power_limit = 0;
return 0;
}
@@ -1905,6 +2208,594 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
+static void smu_v14_0_2_dump_od_table(struct smu_context *smu,
+ OverDriveTableExternal_t *od_table)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin,
+ od_table->OverDriveTable.GfxclkFmax);
+ dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
+ od_table->OverDriveTable.UclkFmax);
+}
+
+static int smu_v14_0_2_upload_overdrive_table(struct smu_context *smu,
+ OverDriveTableExternal_t *od_table)
+{
+ int ret;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_OVERDRIVE,
+ 0,
+ (void *)od_table,
+ true);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+
+ return ret;
+}
+
+static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_CURVE_SET |
+ OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE |
+ OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET |
+ OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE |
+ OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET |
+ OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET;
+}
+
+static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu,
+ OverDriveTableExternal_t *od_table)
+{
+ int ret;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_OVERDRIVE,
+ 0,
+ (void *)od_table,
+ false);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
+
+ return ret;
+}
+
+static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
+{
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
+ OverDriveTableExternal_t *boot_od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table;
+ OverDriveTableExternal_t *user_od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table;
+ OverDriveTableExternal_t user_od_table_bak;
+ int ret;
+ int i;
+
+ ret = smu_v14_0_2_get_overdrive_table(smu, boot_od_table);
+ if (ret)
+ return ret;
+
+ smu_v14_0_2_dump_od_table(smu, boot_od_table);
+
+ memcpy(od_table,
+ boot_od_table,
+ sizeof(OverDriveTableExternal_t));
+
+ /*
+ * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
+ * but we have to preserve user defined values in "user_od_table".
+ */
+ if (!smu->adev->in_suspend) {
+ memcpy(user_od_table,
+ boot_od_table,
+ sizeof(OverDriveTableExternal_t));
+ smu->user_dpm_profile.user_od = false;
+ } else if (smu->user_dpm_profile.user_od) {
+ memcpy(&user_od_table_bak,
+ user_od_table,
+ sizeof(OverDriveTableExternal_t));
+ memcpy(user_od_table,
+ boot_od_table,
+ sizeof(OverDriveTableExternal_t));
+ user_od_table->OverDriveTable.GfxclkFmin =
+ user_od_table_bak.OverDriveTable.GfxclkFmin;
+ user_od_table->OverDriveTable.GfxclkFmax =
+ user_od_table_bak.OverDriveTable.GfxclkFmax;
+ user_od_table->OverDriveTable.UclkFmin =
+ user_od_table_bak.OverDriveTable.UclkFmin;
+ user_od_table->OverDriveTable.UclkFmax =
+ user_od_table_bak.OverDriveTable.UclkFmax;
+ for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
+ user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
+ user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) {
+ user_od_table->OverDriveTable.FanLinearTempPoints[i] =
+ user_od_table_bak.OverDriveTable.FanLinearTempPoints[i];
+ user_od_table->OverDriveTable.FanLinearPwmPoints[i] =
+ user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i];
+ }
+ user_od_table->OverDriveTable.AcousticLimitRpmThreshold =
+ user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold;
+ user_od_table->OverDriveTable.AcousticTargetRpmThreshold =
+ user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold;
+ user_od_table->OverDriveTable.FanTargetTemperature =
+ user_od_table_bak.OverDriveTable.FanTargetTemperature;
+ user_od_table->OverDriveTable.FanMinimumPwm =
+ user_od_table_bak.OverDriveTable.FanMinimumPwm;
+ }
+
+ smu_v14_0_2_set_supported_od_feature_mask(smu);
+
+ return 0;
+}
+
+static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table = table_context->overdrive_table;
+ OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
+ int res;
+
+ user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
+ BIT(PP_OD_FEATURE_UCLK_BIT) |
+ BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table);
+ user_od_table->OverDriveTable.FeatureCtrlMask = 0;
+ if (res == 0)
+ memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t));
+
+ return res;
+}
+
+static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long input)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *boot_overdrive_table =
+ (OverDriveTableExternal_t *)table_context->boot_overdrive_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ switch (input) {
+ case PP_OD_EDIT_FAN_CURVE:
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) {
+ od_table->OverDriveTable.FanLinearTempPoints[i] =
+ boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i];
+ od_table->OverDriveTable.FanLinearPwmPoints[i] =
+ boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i];
+ }
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_ACOUSTIC_LIMIT:
+ od_table->OverDriveTable.AcousticLimitRpmThreshold =
+ boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_ACOUSTIC_TARGET:
+ od_table->OverDriveTable.AcousticTargetRpmThreshold =
+ boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
+ od_table->OverDriveTable.FanTargetTemperature =
+ boot_overdrive_table->OverDriveTable.FanTargetTemperature;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_FAN_MINIMUM_PWM:
+ od_table->OverDriveTable.FanMinimumPwm =
+ boot_overdrive_table->OverDriveTable.FanMinimumPwm;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ default:
+ dev_info(adev->dev, "Invalid table index: %ld\n", input);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[],
+ uint32_t size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t offset_of_voltageoffset;
+ int32_t minimum, maximum;
+ uint32_t feature_ctrlmask;
+ int i, ret = 0;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
+ dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMIN,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.GfxclkFmin = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
+ break;
+
+ case 1:
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.GfxclkFmax = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
+ break;
+
+ default:
+ dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ }
+
+ if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) {
+ dev_err(adev->dev,
+ "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n",
+ (uint32_t)od_table->OverDriveTable.GfxclkFmin,
+ (uint32_t)od_table->OverDriveTable.GfxclkFmax);
+ return -EINVAL;
+ }
+ break;
+
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
+ dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMIN,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.UclkFmin = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
+ break;
+
+ case 1:
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.UclkFmax = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
+ break;
+
+ default:
+ dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ }
+
+ if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) {
+ dev_err(adev->dev,
+ "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n",
+ (uint32_t)od_table->OverDriveTable.UclkFmin,
+ (uint32_t)od_table->OverDriveTable.UclkFmax);
+ return -EINVAL;
+ }
+ break;
+
+ case PP_OD_EDIT_VDDGFX_OFFSET:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
+ dev_warn(adev->dev, "Gfx offset setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_CURVE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 ||
+ input[0] < 0)
+ return -EINVAL;
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_TEMP,
+ &minimum,
+ &maximum);
+ if (input[1] < minimum ||
+ input[1] > maximum) {
+ dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n",
+ input[1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_PWM,
+ &minimum,
+ &maximum);
+ if (input[2] < minimum ||
+ input[2] > maximum) {
+ dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n",
+ input[2], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1];
+ od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2];
+ od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_ACOUSTIC_LIMIT:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_ACOUSTIC_TARGET:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanTargetTemperature = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_MINIMUM_PWM:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_MINIMUM_PWM,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanMinimumPwm = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size == 1) {
+ ret = smu_v14_0_2_od_restore_table_single(smu, input[0]);
+ if (ret)
+ return ret;
+ } else {
+ feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
+ memcpy(od_table,
+ table_context->boot_overdrive_table,
+ sizeof(OverDriveTableExternal_t));
+ od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
+ }
+ fallthrough;
+ case PP_OD_COMMIT_DPM_TABLE:
+ /*
+ * The member below instructs PMFW the settings focused in
+ * this single operation.
+ * `uint32_t FeatureCtrlMask;`
+ * It does not contain actual informations about user's custom
+ * settings. Thus we do not cache it.
+ */
+ offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary);
+ if (memcmp((u8 *)od_table + offset_of_voltageoffset,
+ table_context->user_overdrive_table + offset_of_voltageoffset,
+ sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) {
+ smu_v14_0_2_dump_od_table(smu, od_table);
+
+ ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ od_table->OverDriveTable.FeatureCtrlMask = 0;
+ memcpy(table_context->user_overdrive_table + offset_of_voltageoffset,
+ (u8 *)od_table + offset_of_voltageoffset,
+ sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset);
+
+ if (!memcmp(table_context->user_overdrive_table,
+ table_context->boot_overdrive_table,
+ sizeof(OverDriveTableExternal_t)))
+ smu->user_dpm_profile.user_od = false;
+ else
+ smu->user_dpm_profile.user_od = true;
+ }
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_2_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (limit <= msg_limit) {
+ if (smu->current_power_limit > msg_limit) {
+ od_table->OverDriveTable.Ppt = 0;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+ }
+ return smu_v14_0_set_power_limit(smu, limit_type, limit);
+ } else if (smu->od_enabled) {
+ ret = smu_v14_0_set_power_limit(smu, limit_type, msg_limit);
+ if (ret)
+ return ret;
+
+ od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
.set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
@@ -1943,13 +2834,16 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
.get_gpu_metrics = smu_v14_0_2_get_gpu_metrics,
.set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
+ .set_default_od_settings = smu_v14_0_2_set_default_od_settings,
+ .restore_user_od_settings = smu_v14_0_2_restore_user_od_settings,
+ .od_edit_dpm_table = smu_v14_0_2_od_edit_dpm_table,
.init_pptable_microcode = smu_v14_0_init_pptable_microcode,
.populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,
.set_performance_level = smu_v14_0_set_performance_level,
.gfx_off_control = smu_v14_0_gfx_off_control,
.get_unique_id = smu_v14_0_2_get_unique_id,
.get_power_limit = smu_v14_0_2_get_power_limit,
- .set_power_limit = smu_v14_0_set_power_limit,
+ .set_power_limit = smu_v14_0_2_set_power_limit,
.set_power_source = smu_v14_0_set_power_source,
.get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
.set_power_profile_mode = smu_v14_0_2_set_power_profile_mode,
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index d794c076bc24..47da848fa3fc 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -11,6 +11,8 @@ ast-y := \
ast_main.o \
ast_mm.o \
ast_mode.o \
- ast_post.o
+ ast_post.o \
+ ast_sil164.o \
+ ast_vga.o
obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index e6c7f0d64e99..00b364f9a71e 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -4,87 +4,94 @@
#include <linux/firmware.h>
#include <linux/delay.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
#include "ast_drv.h"
-bool ast_astdp_is_connected(struct ast_device *ast)
+static bool ast_astdp_is_connected(struct ast_device *ast)
{
- if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING))
- return false;
- if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD))
- return false;
- if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS))
+ if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, AST_IO_VGACRDF_HPD))
return false;
return true;
}
-int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
+static int ast_astdp_read_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
{
- struct ast_device *ast = to_ast_device(dev);
- u8 i = 0, j = 0;
+ struct ast_device *ast = data;
+ size_t rdlen = round_up(len, 4);
+ int ret = 0;
+ unsigned int i;
+
+ if (block > 0)
+ return -EIO; /* extension headers not supported */
/*
- * CRD1[b5]: DP MCU FW is executing
- * CRDC[b0]: DP link success
- * CRDF[b0]: DP HPD
- * CRE5[b0]: Host reading EDID process is done
+ * Protect access to I/O registers from concurrent modesetting
+ * by acquiring the I/O-register lock.
*/
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE5,
- ASTDP_HOST_EDID_READ_DONE_MASK))) {
- goto err_astdp_edid_not_ready;
- }
+ mutex_lock(&ast->modeset_lock);
+
+ /* Start reading EDID data */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe5, (u8)~AST_IO_VGACRE5_EDID_READ_DONE, 0x00);
+
+ for (i = 0; i < rdlen; i += 4) {
+ unsigned int offset;
+ unsigned int j;
+ u8 ediddata[4];
+ u8 vgacre4;
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- 0x00);
+ offset = (i + block * EDID_LENGTH) / 4;
+ if (offset >= 64) {
+ ret = -EIO;
+ goto out;
+ }
+ vgacre4 = offset;
- for (i = 0; i < 32; i++) {
/*
* CRE4[7:0]: Read-Pointer for EDID (Unit: 4bytes); valid range: 0~64
*/
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE4,
- ASTDP_AND_CLEAR_MASK, (u8)i);
- j = 0;
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xe4, vgacre4);
/*
* CRD7[b0]: valid flag for EDID
* CRD6[b0]: mirror read pointer for EDID
*/
- while ((ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD7,
- ASTDP_EDID_VALID_FLAG_MASK) != 0x01) ||
- (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD6,
- ASTDP_EDID_READ_POINTER_MASK) != i)) {
+ for (j = 0; j < 200; ++j) {
+ u8 vgacrd7, vgacrd6;
+
/*
* Delay are getting longer with each retry.
- * 1. The Delays are often 2 loops when users request "Display Settings"
+ *
+ * 1. No delay on first try
+ * 2. The Delays are often 2 loops when users request "Display Settings"
* of right-click of mouse.
- * 2. The Delays are often longer a lot when system resume from S3/S4.
+ * 3. The Delays are often longer a lot when system resume from S3/S4.
*/
- mdelay(j+1);
-
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1,
- ASTDP_MCU_FW_EXECUTING) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC,
- ASTDP_LINK_SUCCESS) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD))) {
- goto err_astdp_jump_out_loop_of_edid;
+ if (j)
+ mdelay(j + 1);
+
+ /* Wait for EDID offset to show up in mirror register */
+ vgacrd7 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd7);
+ if (vgacrd7 & AST_IO_VGACRD7_EDID_VALID_FLAG) {
+ vgacrd6 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd6);
+ if (vgacrd6 == offset)
+ break;
}
-
- j++;
- if (j > 200)
- goto err_astdp_jump_out_loop_of_edid;
+ }
+ if (j == 200) {
+ ret = -EBUSY;
+ goto out;
}
- *(ediddata) = ast_get_index_reg_mask(ast, AST_IO_VGACRI,
- 0xD8, ASTDP_EDID_READ_DATA_MASK);
- *(ediddata + 1) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD9,
- ASTDP_EDID_READ_DATA_MASK);
- *(ediddata + 2) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDA,
- ASTDP_EDID_READ_DATA_MASK);
- *(ediddata + 3) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDB,
- ASTDP_EDID_READ_DATA_MASK);
+ ediddata[0] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd8);
+ ediddata[1] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd9);
+ ediddata[2] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xda);
+ ediddata[3] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xdb);
if (i == 31) {
/*
@@ -96,69 +103,53 @@ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
* The Bytes-126 indicates the Number of extensions to
* follow. 0 represents noextensions.
*/
- *(ediddata + 3) = *(ediddata + 3) + *(ediddata + 2);
- *(ediddata + 2) = 0;
+ ediddata[3] = ediddata[3] + ediddata[2];
+ ediddata[2] = 0;
}
- ediddata += 4;
+ memcpy(buf, ediddata, min((len - i), 4));
+ buf += 4;
}
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- ASTDP_HOST_EDID_READ_DONE);
+out:
+ /* Signal end of reading */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe5, (u8)~AST_IO_VGACRE5_EDID_READ_DONE,
+ AST_IO_VGACRE5_EDID_READ_DONE);
- return 0;
+ mutex_unlock(&ast->modeset_lock);
-err_astdp_jump_out_loop_of_edid:
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5,
- (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- ASTDP_HOST_EDID_READ_DONE);
- return (~(j+256) + 1);
-
-err_astdp_edid_not_ready:
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING)))
- return (~0xD1 + 1);
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS)))
- return (~0xDC + 1);
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD)))
- return (~0xDF + 1);
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, ASTDP_HOST_EDID_READ_DONE_MASK)))
- return (~0xE5 + 1);
-
- return 0;
+ return ret;
}
/*
* Launch Aspeed DP
*/
-void ast_dp_launch(struct drm_device *dev)
+int ast_dp_launch(struct ast_device *ast)
{
- u32 i = 0;
- u8 bDPExecute = 1;
- struct ast_device *ast = to_ast_device(dev);
+ struct drm_device *dev = &ast->base;
+ unsigned int i = 10;
- // Wait one second then timeout.
- while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING) !=
- ASTDP_MCU_FW_EXECUTING) {
- i++;
- // wait 100 ms
- msleep(100);
+ while (i) {
+ u8 vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1);
- if (i >= 10) {
- // DP would not be ready.
- bDPExecute = 0;
+ if (vgacrd1 & AST_IO_VGACRD1_MCU_FW_EXECUTING)
break;
- }
+ --i;
+ msleep(100);
}
-
- if (!bDPExecute)
+ if (!i) {
drm_err(dev, "Wait DPMCU executing timeout\n");
+ return -ENODEV;
+ }
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5,
- (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- ASTDP_HOST_EDID_READ_DONE);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe5,
+ (u8) ~AST_IO_VGACRE5_EDID_READ_DONE,
+ AST_IO_VGACRE5_EDID_READ_DONE);
+
+ return 0;
}
-bool ast_dp_power_is_on(struct ast_device *ast)
+static bool ast_dp_power_is_on(struct ast_device *ast)
{
u8 vgacre3;
@@ -167,7 +158,7 @@ bool ast_dp_power_is_on(struct ast_device *ast)
return !(vgacre3 & AST_DP_PHY_SLEEP);
}
-void ast_dp_power_on_off(struct drm_device *dev, bool on)
+static void ast_dp_power_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
// Read and Turn off DP PHY sleep
@@ -179,11 +170,29 @@ void ast_dp_power_on_off(struct drm_device *dev, bool on)
// DP Power on/off
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_PHY_SLEEP, bE3);
+
+ msleep(50);
}
+static void ast_dp_link_training(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ u8 vgacrdc;
+
+ if (i)
+ msleep(100);
+ vgacrdc = ast_get_index_reg(ast, AST_IO_VGACRI, 0xdc);
+ if (vgacrdc & AST_IO_VGACRDC_LINK_SUCCESS)
+ return;
+ }
+ drm_err(dev, "Link training failed\n");
+}
-void ast_dp_set_on_off(struct drm_device *dev, bool on)
+static void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
@@ -192,21 +201,17 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
- // If DP plug in and link successful then check video on / off status
- if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD)) {
- video_on_off <<= 4;
- while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF,
+ video_on_off <<= 4;
+ while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF,
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
- // wait 1 ms
- mdelay(1);
- if (++i > 200)
- break;
- }
+ // wait 1 ms
+ mdelay(1);
+ if (++i > 200)
+ break;
}
}
-void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode)
+static void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode)
{
struct ast_device *ast = to_ast_device(crtc->dev);
@@ -279,3 +284,188 @@ void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mo
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE1, ASTDP_AND_CLEAR_MASK, ASTDP_MISC1);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE2, ASTDP_AND_CLEAR_MASK, ModeIdx);
}
+
+static void ast_wait_for_vretrace(struct ast_device *ast)
+{
+ unsigned long timeout = jiffies + HZ;
+ u8 vgair1;
+
+ do {
+ vgair1 = ast_io_read8(ast, AST_IO_VGAIR1_R);
+ } while (!(vgair1 & AST_IO_VGAIR1_VREFRESH) && time_before(jiffies, timeout));
+}
+
+/*
+ * Encoder
+ */
+
+static const struct drm_encoder_funcs ast_astdp_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static void ast_astdp_encoder_helper_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
+ struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
+
+ ast_dp_set_mode(crtc, vbios_mode_info);
+}
+
+static void ast_astdp_encoder_helper_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = encoder->dev;
+ struct ast_device *ast = to_ast_device(dev);
+ struct ast_connector *ast_connector = &ast->output.astdp.connector;
+
+ if (ast_connector->physical_status == connector_status_connected) {
+ ast_dp_power_on_off(dev, AST_DP_POWER_ON);
+ ast_dp_link_training(ast);
+
+ ast_wait_for_vretrace(ast);
+ ast_dp_set_on_off(dev, 1);
+ }
+}
+
+static void ast_astdp_encoder_helper_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = encoder->dev;
+
+ ast_dp_set_on_off(dev, 0);
+ ast_dp_power_on_off(dev, AST_DP_POWER_OFF);
+}
+
+static const struct drm_encoder_helper_funcs ast_astdp_encoder_helper_funcs = {
+ .atomic_mode_set = ast_astdp_encoder_helper_atomic_mode_set,
+ .atomic_enable = ast_astdp_encoder_helper_atomic_enable,
+ .atomic_disable = ast_astdp_encoder_helper_atomic_disable,
+};
+
+/*
+ * Connector
+ */
+
+static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ int count;
+
+ if (ast_connector->physical_status == connector_status_connected) {
+ struct ast_device *ast = to_ast_device(connector->dev);
+ const struct drm_edid *drm_edid;
+
+ drm_edid = drm_edid_read_custom(connector, ast_astdp_read_edid_block, ast);
+ drm_edid_connector_update(connector, drm_edid);
+ count = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
+ } else {
+ drm_edid_connector_update(connector, NULL);
+
+ /*
+ * There's no EDID data without a connected monitor. Set BMC-
+ * compatible modes in this case. The XGA default resolution
+ * should work well for all BMCs.
+ */
+ count = drm_add_modes_noedid(connector, 4096, 4096);
+ if (count)
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return count;
+}
+
+static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ struct drm_device *dev = connector->dev;
+ struct ast_device *ast = to_ast_device(connector->dev);
+ enum drm_connector_status status = connector_status_disconnected;
+ bool power_is_on;
+
+ mutex_lock(&ast->modeset_lock);
+
+ power_is_on = ast_dp_power_is_on(ast);
+ if (!power_is_on)
+ ast_dp_power_on_off(dev, true);
+
+ if (ast_astdp_is_connected(ast))
+ status = connector_status_connected;
+
+ if (!power_is_on && status == connector_status_disconnected)
+ ast_dp_power_on_off(dev, false);
+
+ mutex_unlock(&ast->modeset_lock);
+
+ if (status != ast_connector->physical_status)
+ ++connector->epoch_counter;
+ ast_connector->physical_status = status;
+
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
+ .get_modes = ast_astdp_connector_helper_get_modes,
+ .detect_ctx = ast_astdp_connector_helper_detect_ctx,
+};
+
+static const struct drm_connector_funcs ast_astdp_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector *connector)
+{
+ int ret;
+
+ ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return 0;
+}
+
+int ast_astdp_output_init(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.astdp.encoder;
+ struct ast_connector *ast_connector = &ast->output.astdp.connector;
+ struct drm_connector *connector = &ast_connector->base;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder, &ast_astdp_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ret;
+ drm_encoder_helper_add(encoder, &ast_astdp_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = ast_astdp_connector_init(dev, connector);
+ if (ret)
+ return ret;
+ ast_connector->physical_status = connector->status;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 9a4c3a0963f9..e4c636f45082 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -4,6 +4,11 @@
#include <linux/firmware.h>
#include <linux/module.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
#include "ast_drv.h"
MODULE_FIRMWARE("ast_dp501_fw.bin");
@@ -170,7 +175,7 @@ static void clear_cmd(struct ast_device *ast)
}
#endif
-void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
+static void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
{
ast_write_cmd(dev, 0x40);
ast_write_data(dev, mode);
@@ -272,7 +277,7 @@ static bool ast_launch_m68k(struct drm_device *dev)
return true;
}
-bool ast_dp501_is_connected(struct ast_device *ast)
+static bool ast_dp501_is_connected(struct ast_device *ast)
{
u32 boot_address, offset, data;
@@ -313,32 +318,30 @@ bool ast_dp501_is_connected(struct ast_device *ast)
return true;
}
-bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
+static int ast_dp512_read_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
{
- struct ast_device *ast = to_ast_device(dev);
- u32 i, boot_address, offset, data;
- u32 *pEDIDidx;
+ struct ast_device *ast = data;
+ size_t rdlen = round_up(len, 4);
+ u32 i, boot_address, offset, ediddata;
- if (!ast_dp501_is_connected(ast))
- return false;
+ if (block > (512 / EDID_LENGTH))
+ return -EIO;
+
+ offset = AST_DP501_EDID_DATA + block * EDID_LENGTH;
if (ast->config_mode == ast_use_p2a) {
boot_address = get_fw_base(ast);
- /* Read EDID */
- offset = AST_DP501_EDID_DATA;
- for (i = 0; i < 128; i += 4) {
- data = ast_mindwm(ast, boot_address + offset + i);
- pEDIDidx = (u32 *)(ediddata + i);
- *pEDIDidx = data;
+ for (i = 0; i < rdlen; i += 4) {
+ ediddata = ast_mindwm(ast, boot_address + offset + i);
+ memcpy(buf, &ediddata, min((len - i), 4));
+ buf += 4;
}
} else {
- /* Read EDID */
- offset = AST_DP501_EDID_DATA;
- for (i = 0; i < 128; i += 4) {
- data = readl(ast->dp501_fw_buf + offset + i);
- pEDIDidx = (u32 *)(ediddata + i);
- *pEDIDidx = data;
+ for (i = 0; i < rdlen; i += 4) {
+ ediddata = readl(ast->dp501_fw_buf + offset + i);
+ memcpy(buf, &ediddata, min((len - i), 4));
+ buf += 4;
}
}
@@ -470,3 +473,144 @@ void ast_init_3rdtx(struct drm_device *dev)
}
}
}
+
+/*
+ * Encoder
+ */
+
+static const struct drm_encoder_funcs ast_dp501_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static void ast_dp501_encoder_helper_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = encoder->dev;
+
+ ast_set_dp501_video_output(dev, 1);
+}
+
+static void ast_dp501_encoder_helper_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = encoder->dev;
+
+ ast_set_dp501_video_output(dev, 0);
+}
+
+static const struct drm_encoder_helper_funcs ast_dp501_encoder_helper_funcs = {
+ .atomic_enable = ast_dp501_encoder_helper_atomic_enable,
+ .atomic_disable = ast_dp501_encoder_helper_atomic_disable,
+};
+
+/*
+ * Connector
+ */
+
+static int ast_dp501_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ int count;
+
+ if (ast_connector->physical_status == connector_status_connected) {
+ struct ast_device *ast = to_ast_device(connector->dev);
+ const struct drm_edid *drm_edid;
+
+ drm_edid = drm_edid_read_custom(connector, ast_dp512_read_edid_block, ast);
+ drm_edid_connector_update(connector, drm_edid);
+ count = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
+ } else {
+ drm_edid_connector_update(connector, NULL);
+
+ /*
+ * There's no EDID data without a connected monitor. Set BMC-
+ * compatible modes in this case. The XGA default resolution
+ * should work well for all BMCs.
+ */
+ count = drm_add_modes_noedid(connector, 4096, 4096);
+ if (count)
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return count;
+}
+
+static int ast_dp501_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ struct ast_device *ast = to_ast_device(connector->dev);
+ enum drm_connector_status status = connector_status_disconnected;
+
+ if (ast_dp501_is_connected(ast))
+ status = connector_status_connected;
+
+ if (status != ast_connector->physical_status)
+ ++connector->epoch_counter;
+ ast_connector->physical_status = status;
+
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = {
+ .get_modes = ast_dp501_connector_helper_get_modes,
+ .detect_ctx = ast_dp501_connector_helper_detect_ctx,
+};
+
+static const struct drm_connector_funcs ast_dp501_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector *connector)
+{
+ int ret;
+
+ ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return 0;
+}
+
+int ast_dp501_output_init(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.dp501.encoder;
+ struct ast_connector *ast_connector = &ast->output.dp501.connector;
+ struct drm_connector *connector = &ast_connector->base;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder, &ast_dp501_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ret;
+ drm_encoder_helper_add(encoder, &ast_dp501_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = ast_dp501_connector_init(dev, connector);
+ if (ret)
+ return ret;
+ ast_connector->physical_status = connector->status;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 225817087b4d..3a908bb015fe 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -287,9 +287,9 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- regs = pcim_iomap(pdev, 1, 0);
- if (!regs)
- return -EIO;
+ regs = pcim_iomap_region(pdev, 1, "ast");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
if (pdev->revision >= 0x40) {
/*
@@ -311,9 +311,9 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (len < AST_IO_MM_LENGTH)
return -EIO;
- ioregs = pcim_iomap(pdev, 2, 0);
- if (!ioregs)
- return -EIO;
+ ioregs = pcim_iomap_region(pdev, 2, "ast");
+ if (IS_ERR(ioregs))
+ return PTR_ERR(ioregs);
} else {
/*
* Anything else is best effort.
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 47bab5596c16..91fe07cf7b07 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -147,18 +147,19 @@ static inline struct ast_plane *to_ast_plane(struct drm_plane *plane)
}
/*
- * BMC
+ * Connector
*/
-struct ast_bmc_connector {
+struct ast_connector {
struct drm_connector base;
- struct drm_connector *physical_connector;
+
+ enum drm_connector_status physical_status;
};
-static inline struct ast_bmc_connector *
-to_ast_bmc_connector(struct drm_connector *connector)
+static inline struct ast_connector *
+to_ast_connector(struct drm_connector *connector)
{
- return container_of(connector, struct ast_bmc_connector, base);
+ return container_of(connector, struct ast_connector, base);
}
/*
@@ -192,24 +193,20 @@ struct ast_device {
struct {
struct {
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct ast_connector connector;
} vga;
struct {
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct ast_connector connector;
} sil164;
struct {
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct ast_connector connector;
} dp501;
struct {
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct ast_connector connector;
} astdp;
- struct {
- struct drm_encoder encoder;
- struct ast_bmc_connector bmc_connector;
- } bmc;
} output;
bool support_wide_screen;
@@ -460,21 +457,17 @@ void ast_post_gpu(struct drm_device *dev);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
void ast_patch_ahb_2500(void __iomem *regs);
+
+int ast_vga_output_init(struct ast_device *ast);
+int ast_sil164_output_init(struct ast_device *ast);
+
/* ast dp501 */
-void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
-bool ast_dp501_is_connected(struct ast_device *ast);
-bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
-u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
+int ast_dp501_output_init(struct ast_device *ast);
/* aspeed DP */
-bool ast_astdp_is_connected(struct ast_device *ast);
-int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
-void ast_dp_launch(struct drm_device *dev);
-bool ast_dp_power_is_on(struct ast_device *ast);
-void ast_dp_power_on_off(struct drm_device *dev, bool no);
-void ast_dp_set_on_off(struct drm_device *dev, bool no);
-void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);
+int ast_dp_launch(struct ast_device *ast);
+int ast_astdp_output_init(struct ast_device *ast);
#endif
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 0637abb70361..d836f2a4f9f3 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -115,8 +115,10 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
} else if (IS_AST_GEN7(ast)) {
if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, TX_TYPE_MASK) ==
ASTDP_DPMCU_TX) {
- ast->tx_chip_types = AST_TX_ASTDP_BIT;
- ast_dp_launch(&ast->base);
+ int ret = ast_dp_launch(ast);
+
+ if (!ret)
+ ast->tx_chip_types = AST_TX_ASTDP_BIT;
}
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 049ee1477c33..ed496fb32bf3 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -34,10 +34,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
-#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -47,7 +45,6 @@
#include <drm/drm_panic.h>
#include <drm/drm_probe_helper.h>
-#include "ast_ddc.h"
#include "ast_drv.h"
#include "ast_tables.h"
@@ -1311,571 +1308,6 @@ static int ast_crtc_init(struct drm_device *dev)
}
/*
- * VGA Encoder
- */
-
-static const struct drm_encoder_funcs ast_vga_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-/*
- * VGA Connector
- */
-
-static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = {
- .get_modes = drm_connector_helper_get_modes,
- .detect_ctx = drm_connector_helper_detect_from_ddc,
-};
-
-static const struct drm_connector_funcs ast_vga_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int ast_vga_connector_init(struct drm_device *dev, struct drm_connector *connector)
-{
- struct ast_device *ast = to_ast_device(dev);
- struct i2c_adapter *ddc;
- int ret;
-
- ddc = ast_ddc_create(ast);
- if (IS_ERR(ddc)) {
- ret = PTR_ERR(ddc);
- drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
- return ret;
- }
-
- ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA, ddc);
- if (ret)
- return ret;
-
- drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs);
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
-
- return 0;
-}
-
-static int ast_vga_output_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.vga.encoder;
- struct drm_connector *connector = &ast->output.vga.connector;
- int ret;
-
- ret = drm_encoder_init(dev, encoder, &ast_vga_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- if (ret)
- return ret;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- ret = ast_vga_connector_init(dev, connector);
- if (ret)
- return ret;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/*
- * SIL164 Encoder
- */
-
-static const struct drm_encoder_funcs ast_sil164_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-/*
- * SIL164 Connector
- */
-
-static const struct drm_connector_helper_funcs ast_sil164_connector_helper_funcs = {
- .get_modes = drm_connector_helper_get_modes,
- .detect_ctx = drm_connector_helper_detect_from_ddc,
-};
-
-static const struct drm_connector_funcs ast_sil164_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int ast_sil164_connector_init(struct drm_device *dev, struct drm_connector *connector)
-{
- struct ast_device *ast = to_ast_device(dev);
- struct i2c_adapter *ddc;
- int ret;
-
- ddc = ast_ddc_create(ast);
- if (IS_ERR(ddc)) {
- ret = PTR_ERR(ddc);
- drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
- return ret;
- }
-
- ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs,
- DRM_MODE_CONNECTOR_DVII, ddc);
- if (ret)
- return ret;
-
- drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs);
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
-
- return 0;
-}
-
-static int ast_sil164_output_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.sil164.encoder;
- struct drm_connector *connector = &ast->output.sil164.connector;
- int ret;
-
- ret = drm_encoder_init(dev, encoder, &ast_sil164_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- if (ret)
- return ret;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- ret = ast_sil164_connector_init(dev, connector);
- if (ret)
- return ret;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/*
- * DP501 Encoder
- */
-
-static const struct drm_encoder_funcs ast_dp501_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static void ast_dp501_encoder_helper_atomic_enable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = encoder->dev;
-
- ast_set_dp501_video_output(dev, 1);
-}
-
-static void ast_dp501_encoder_helper_atomic_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = encoder->dev;
-
- ast_set_dp501_video_output(dev, 0);
-}
-
-static const struct drm_encoder_helper_funcs ast_dp501_encoder_helper_funcs = {
- .atomic_enable = ast_dp501_encoder_helper_atomic_enable,
- .atomic_disable = ast_dp501_encoder_helper_atomic_disable,
-};
-
-/*
- * DP501 Connector
- */
-
-static int ast_dp501_connector_helper_get_modes(struct drm_connector *connector)
-{
- void *edid;
- bool succ;
- int count;
-
- edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
- if (!edid)
- goto err_drm_connector_update_edid_property;
-
- succ = ast_dp501_read_edid(connector->dev, edid);
- if (!succ)
- goto err_kfree;
-
- drm_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return count;
-
-err_kfree:
- kfree(edid);
-err_drm_connector_update_edid_property:
- drm_connector_update_edid_property(connector, NULL);
- return 0;
-}
-
-static int ast_dp501_connector_helper_detect_ctx(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct ast_device *ast = to_ast_device(connector->dev);
-
- if (ast_dp501_is_connected(ast))
- return connector_status_connected;
- return connector_status_disconnected;
-}
-
-static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = {
- .get_modes = ast_dp501_connector_helper_get_modes,
- .detect_ctx = ast_dp501_connector_helper_detect_ctx,
-};
-
-static const struct drm_connector_funcs ast_dp501_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector *connector)
-{
- int ret;
-
- ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
- if (ret)
- return ret;
-
- drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs);
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
-
- return 0;
-}
-
-static int ast_dp501_output_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.dp501.encoder;
- struct drm_connector *connector = &ast->output.dp501.connector;
- int ret;
-
- ret = drm_encoder_init(dev, encoder, &ast_dp501_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- if (ret)
- return ret;
- drm_encoder_helper_add(encoder, &ast_dp501_encoder_helper_funcs);
-
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- ret = ast_dp501_connector_init(dev, connector);
- if (ret)
- return ret;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/*
- * ASPEED Display-Port Encoder
- */
-
-static const struct drm_encoder_funcs ast_astdp_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static void ast_astdp_encoder_helper_atomic_mode_set(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct drm_crtc *crtc = crtc_state->crtc;
- struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
- struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
-
- ast_dp_set_mode(crtc, vbios_mode_info);
-}
-
-static void ast_astdp_encoder_helper_atomic_enable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = encoder->dev;
- struct ast_device *ast = to_ast_device(dev);
-
- ast_dp_power_on_off(dev, AST_DP_POWER_ON);
- ast_wait_for_vretrace(ast);
- ast_dp_set_on_off(dev, 1);
-}
-
-static void ast_astdp_encoder_helper_atomic_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = encoder->dev;
-
- ast_dp_set_on_off(dev, 0);
- ast_dp_power_on_off(dev, AST_DP_POWER_OFF);
-}
-
-static const struct drm_encoder_helper_funcs ast_astdp_encoder_helper_funcs = {
- .atomic_mode_set = ast_astdp_encoder_helper_atomic_mode_set,
- .atomic_enable = ast_astdp_encoder_helper_atomic_enable,
- .atomic_disable = ast_astdp_encoder_helper_atomic_disable,
-};
-
-/*
- * ASPEED Display-Port Connector
- */
-
-static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
-{
- void *edid;
- struct drm_device *dev = connector->dev;
- struct ast_device *ast = to_ast_device(dev);
-
- int succ;
- int count;
-
- edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
- if (!edid)
- goto err_drm_connector_update_edid_property;
-
- /*
- * Protect access to I/O registers from concurrent modesetting
- * by acquiring the I/O-register lock.
- */
- mutex_lock(&ast->modeset_lock);
-
- succ = ast_astdp_read_edid(connector->dev, edid);
- if (succ < 0)
- goto err_mutex_unlock;
-
- mutex_unlock(&ast->modeset_lock);
-
- drm_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return count;
-
-err_mutex_unlock:
- mutex_unlock(&ast->modeset_lock);
- kfree(edid);
-err_drm_connector_update_edid_property:
- drm_connector_update_edid_property(connector, NULL);
- return 0;
-}
-
-static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct drm_device *dev = connector->dev;
- struct ast_device *ast = to_ast_device(connector->dev);
- enum drm_connector_status status = connector_status_disconnected;
- struct drm_connector_state *connector_state = connector->state;
- bool is_active = false;
-
- mutex_lock(&ast->modeset_lock);
-
- if (connector_state && connector_state->crtc) {
- struct drm_crtc_state *crtc_state = connector_state->crtc->state;
-
- if (crtc_state && crtc_state->active)
- is_active = true;
- }
-
- if (!is_active && !ast_dp_power_is_on(ast)) {
- ast_dp_power_on_off(dev, true);
- msleep(50);
- }
-
- if (ast_astdp_is_connected(ast))
- status = connector_status_connected;
-
- if (!is_active && status == connector_status_disconnected)
- ast_dp_power_on_off(dev, false);
-
- mutex_unlock(&ast->modeset_lock);
-
- return status;
-}
-
-static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
- .get_modes = ast_astdp_connector_helper_get_modes,
- .detect_ctx = ast_astdp_connector_helper_detect_ctx,
-};
-
-static const struct drm_connector_funcs ast_astdp_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector *connector)
-{
- int ret;
-
- ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
- if (ret)
- return ret;
-
- drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs);
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
-
- return 0;
-}
-
-static int ast_astdp_output_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.astdp.encoder;
- struct drm_connector *connector = &ast->output.astdp.connector;
- int ret;
-
- ret = drm_encoder_init(dev, encoder, &ast_astdp_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- if (ret)
- return ret;
- drm_encoder_helper_add(encoder, &ast_astdp_encoder_helper_funcs);
-
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- ret = ast_astdp_connector_init(dev, connector);
- if (ret)
- return ret;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/*
- * BMC virtual Connector
- */
-
-static const struct drm_encoder_funcs ast_bmc_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static int ast_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct ast_bmc_connector *bmc_connector = to_ast_bmc_connector(connector);
- struct drm_connector *physical_connector = bmc_connector->physical_connector;
-
- /*
- * Most user-space compositors cannot handle more than one connected
- * connector per CRTC. Hence, we only mark the BMC as connected if the
- * physical connector is disconnected. If the physical connector's status
- * is connected or unknown, the BMC remains disconnected. This has no
- * effect on the output of the BMC.
- *
- * FIXME: Remove this logic once user-space compositors can handle more
- * than one connector per CRTC. The BMC should always be connected.
- */
-
- if (physical_connector && physical_connector->status == connector_status_disconnected)
- return connector_status_connected;
-
- return connector_status_disconnected;
-}
-
-static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
-{
- return drm_add_modes_noedid(connector, 4096, 4096);
-}
-
-static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = {
- .get_modes = ast_bmc_connector_helper_get_modes,
- .detect_ctx = ast_bmc_connector_helper_detect_ctx,
-};
-
-static const struct drm_connector_funcs ast_bmc_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int ast_bmc_connector_init(struct drm_device *dev,
- struct ast_bmc_connector *bmc_connector,
- struct drm_connector *physical_connector)
-{
- struct drm_connector *connector = &bmc_connector->base;
- int ret;
-
- ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL);
- if (ret)
- return ret;
-
- drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
-
- bmc_connector->physical_connector = physical_connector;
-
- return 0;
-}
-
-static int ast_bmc_output_init(struct ast_device *ast,
- struct drm_connector *physical_connector)
-{
- struct drm_device *dev = &ast->base;
- struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.bmc.encoder;
- struct ast_bmc_connector *bmc_connector = &ast->output.bmc.bmc_connector;
- struct drm_connector *connector = &bmc_connector->base;
- int ret;
-
- ret = drm_encoder_init(dev, encoder,
- &ast_bmc_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, "ast_bmc");
- if (ret)
- return ret;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- ret = ast_bmc_connector_init(dev, bmc_connector, physical_connector);
- if (ret)
- return ret;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/*
* Mode config
*/
@@ -1926,7 +1358,6 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
int ast_mode_config_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
- struct drm_connector *physical_connector = NULL;
int ret;
ret = drmm_mutex_init(dev, &ast->modeset_lock);
@@ -1971,29 +1402,22 @@ int ast_mode_config_init(struct ast_device *ast)
ret = ast_vga_output_init(ast);
if (ret)
return ret;
- physical_connector = &ast->output.vga.connector;
}
if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
ret = ast_sil164_output_init(ast);
if (ret)
return ret;
- physical_connector = &ast->output.sil164.connector;
}
if (ast->tx_chip_types & AST_TX_DP501_BIT) {
ret = ast_dp501_output_init(ast);
if (ret)
return ret;
- physical_connector = &ast->output.dp501.connector;
}
if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
ret = ast_astdp_output_init(ast);
if (ret)
return ret;
- physical_connector = &ast->output.astdp.connector;
}
- ret = ast_bmc_output_init(ast, physical_connector);
- if (ret)
- return ret;
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 22f548805dfb..65755798ab94 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -351,7 +351,7 @@ void ast_post_gpu(struct drm_device *dev)
if (IS_AST_GEN7(ast)) {
if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
- ast_dp_launch(dev);
+ ast_dp_launch(ast);
} else if (ast->config_mode == ast_use_p2a) {
if (IS_AST_GEN6(ast))
ast_post_chip_2500(dev);
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index 75671d345057..040961cc1a19 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -37,6 +37,12 @@
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
+#define AST_IO_VGACRD1_MCU_FW_EXECUTING BIT(5)
+#define AST_IO_VGACRD7_EDID_VALID_FLAG BIT(0)
+#define AST_IO_VGACRDC_LINK_SUCCESS BIT(0)
+#define AST_IO_VGACRDF_HPD BIT(0)
+#define AST_IO_VGACRE5_EDID_READ_DONE BIT(0)
+
#define AST_IO_VGAIR1_R (0x5A)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
@@ -67,18 +73,6 @@
#define AST_DP_VIDEO_ENABLE BIT(0)
/*
- * CRD1[b5]: DP MCU FW is executing
- * CRDC[b0]: DP link success
- * CRDF[b0]: DP HPD
- * CRE5[b0]: Host reading EDID process is done
- */
-#define ASTDP_MCU_FW_EXECUTING BIT(5)
-#define ASTDP_LINK_SUCCESS BIT(0)
-#define ASTDP_HPD BIT(0)
-#define ASTDP_HOST_EDID_READ_DONE BIT(0)
-#define ASTDP_HOST_EDID_READ_DONE_MASK GENMASK(0, 0)
-
-/*
* CRDF[b4]: Mirror of AST_DP_VIDEO_ENABLE
* Precondition: A. ~AST_DP_PHY_SLEEP &&
* B. DP_HPD &&
@@ -86,10 +80,6 @@
*/
#define ASTDP_MIRROR_VIDEO_ENABLE BIT(4)
-#define ASTDP_EDID_READ_POINTER_MASK GENMASK(7, 0)
-#define ASTDP_EDID_VALID_FLAG_MASK GENMASK(0, 0)
-#define ASTDP_EDID_READ_DATA_MASK GENMASK(7, 0)
-
/*
* ASTDP setmode registers:
* CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
diff --git a/drivers/gpu/drm/ast/ast_sil164.c b/drivers/gpu/drm/ast/ast_sil164.c
new file mode 100644
index 000000000000..496c7120e515
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_sil164.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "ast_ddc.h"
+#include "ast_drv.h"
+
+/*
+ * Encoder
+ */
+
+static const struct drm_encoder_funcs ast_sil164_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+/*
+ * Connector
+ */
+
+static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ int count;
+
+ if (ast_connector->physical_status == connector_status_connected) {
+ count = drm_connector_helper_get_modes(connector);
+ } else {
+ /*
+ * There's no EDID data without a connected monitor. Set BMC-
+ * compatible modes in this case. The XGA default resolution
+ * should work well for all BMCs.
+ */
+ count = drm_add_modes_noedid(connector, 4096, 4096);
+ if (count)
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return count;
+}
+
+static int ast_sil164_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ enum drm_connector_status status;
+
+ status = drm_connector_helper_detect_from_ddc(connector, ctx, force);
+
+ if (status != ast_connector->physical_status)
+ ++connector->epoch_counter;
+ ast_connector->physical_status = status;
+
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_sil164_connector_helper_funcs = {
+ .get_modes = ast_sil164_connector_helper_get_modes,
+ .detect_ctx = ast_sil164_connector_helper_detect_ctx,
+};
+
+static const struct drm_connector_funcs ast_sil164_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ast_sil164_connector_init(struct drm_device *dev, struct drm_connector *connector)
+{
+ struct ast_device *ast = to_ast_device(dev);
+ struct i2c_adapter *ddc;
+ int ret;
+
+ ddc = ast_ddc_create(ast);
+ if (IS_ERR(ddc)) {
+ ret = PTR_ERR(ddc);
+ drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs,
+ DRM_MODE_CONNECTOR_DVII, ddc);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return 0;
+}
+
+int ast_sil164_output_init(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.sil164.encoder;
+ struct ast_connector *ast_connector = &ast->output.sil164.connector;
+ struct drm_connector *connector = &ast_connector->base;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder, &ast_sil164_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = ast_sil164_connector_init(dev, connector);
+ if (ret)
+ return ret;
+ ast_connector->physical_status = connector->status;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_vga.c b/drivers/gpu/drm/ast/ast_vga.c
new file mode 100644
index 000000000000..3e815da43fbd
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_vga.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "ast_ddc.h"
+#include "ast_drv.h"
+
+/*
+ * Encoder
+ */
+
+static const struct drm_encoder_funcs ast_vga_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+/*
+ * Connector
+ */
+
+static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ int count;
+
+ if (ast_connector->physical_status == connector_status_connected) {
+ count = drm_connector_helper_get_modes(connector);
+ } else {
+ /*
+ * There's no EDID data without a connected monitor. Set BMC-
+ * compatible modes in this case. The XGA default resolution
+ * should work well for all BMCs.
+ */
+ count = drm_add_modes_noedid(connector, 4096, 4096);
+ if (count)
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return count;
+}
+
+static int ast_vga_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ enum drm_connector_status status;
+
+ status = drm_connector_helper_detect_from_ddc(connector, ctx, force);
+
+ if (status != ast_connector->physical_status)
+ ++connector->epoch_counter;
+ ast_connector->physical_status = status;
+
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = {
+ .get_modes = ast_vga_connector_helper_get_modes,
+ .detect_ctx = ast_vga_connector_helper_detect_ctx,
+};
+
+static const struct drm_connector_funcs ast_vga_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ast_vga_connector_init(struct drm_device *dev, struct drm_connector *connector)
+{
+ struct ast_device *ast = to_ast_device(dev);
+ struct i2c_adapter *ddc;
+ int ret;
+
+ ddc = ast_ddc_create(ast);
+ if (IS_ERR(ddc)) {
+ ret = PTR_ERR(ddc);
+ drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA, ddc);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return 0;
+}
+
+int ast_vga_output_init(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.vga.encoder;
+ struct ast_connector *ast_connector = &ast->output.vga.connector;
+ struct drm_connector *connector = &ast_connector->base;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder, &ast_vga_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret)
+ return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = ast_vga_connector_init(dev, connector);
+ if (ret)
+ return ret;
+ ast_connector->physical_status = connector->status;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index ddf1e4424ffd..bfa88409a7ff 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -36,11 +36,6 @@
static const bool verify_fast_training;
-struct bridge_init {
- struct i2c_client *client;
- struct device_node *node;
-};
-
static void analogix_dp_init_dp(struct analogix_dp_device *dp)
{
analogix_dp_reset(dp);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 88e4aa5830f3..a2e9bb485c36 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1647,25 +1647,15 @@ static int anx7625_get_swing_setting(struct device *dev,
{
int num_regs;
- if (of_get_property(dev->of_node,
- "analogix,lane0-swing", &num_regs)) {
- if (num_regs > DP_TX_SWING_REG_CNT)
- num_regs = DP_TX_SWING_REG_CNT;
-
+ num_regs = of_property_read_variable_u8_array(dev->of_node, "analogix,lane0-swing",
+ pdata->lane0_reg_data, 1, DP_TX_SWING_REG_CNT);
+ if (num_regs > 0)
pdata->dp_lane0_swing_reg_cnt = num_regs;
- of_property_read_u8_array(dev->of_node, "analogix,lane0-swing",
- pdata->lane0_reg_data, num_regs);
- }
-
- if (of_get_property(dev->of_node,
- "analogix,lane1-swing", &num_regs)) {
- if (num_regs > DP_TX_SWING_REG_CNT)
- num_regs = DP_TX_SWING_REG_CNT;
+ num_regs = of_property_read_variable_u8_array(dev->of_node, "analogix,lane1-swing",
+ pdata->lane1_reg_data, 1, DP_TX_SWING_REG_CNT);
+ if (num_regs > 0)
pdata->dp_lane1_swing_reg_cnt = num_regs;
- of_property_read_u8_array(dev->of_node, "analogix,lane1-swing",
- pdata->lane1_reg_data, num_regs);
- }
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 1e1c06fdf206..87b8545fccc0 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -460,6 +460,8 @@ struct it6505 {
bool enable_drv_hold;
const struct drm_edid *cached_edid;
+
+ int irq;
};
struct it6505_step_train_para {
@@ -2624,6 +2626,8 @@ static int it6505_poweron(struct it6505 *it6505)
it6505_init(it6505);
it6505_lane_off(it6505);
+ enable_irq(it6505->irq);
+
return 0;
}
@@ -2640,6 +2644,8 @@ static int it6505_poweroff(struct it6505 *it6505)
return 0;
}
+ disable_irq_nosync(it6505->irq);
+
if (pdata->gpiod_reset)
gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
@@ -3389,7 +3395,7 @@ static int it6505_i2c_probe(struct i2c_client *client)
struct it6505 *it6505;
struct device *dev = &client->dev;
struct extcon_dev *extcon;
- int err, intp_irq;
+ int err;
it6505 = devm_kzalloc(&client->dev, sizeof(*it6505), GFP_KERNEL);
if (!it6505)
@@ -3430,17 +3436,18 @@ static int it6505_i2c_probe(struct i2c_client *client)
it6505_parse_dt(it6505);
- intp_irq = client->irq;
+ it6505->irq = client->irq;
- if (!intp_irq) {
+ if (!it6505->irq) {
dev_err(dev, "Failed to get INTP IRQ");
err = -ENODEV;
return err;
}
- err = devm_request_threaded_irq(&client->dev, intp_irq, NULL,
+ err = devm_request_threaded_irq(&client->dev, it6505->irq, NULL,
it6505_int_threaded_handler,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT |
+ IRQF_NO_AUTOEN,
"it6505-intp", it6505);
if (err) {
dev_err(dev, "Failed to request INTP threaded IRQ: %d", err);
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 1a9defa15663..e265ab3c8c92 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -422,22 +422,6 @@ static const struct drm_connector_funcs lt8912_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static enum drm_mode_status
-lt8912_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- if (mode->clock > 150000)
- return MODE_CLOCK_HIGH;
-
- if (mode->hdisplay > 1920)
- return MODE_BAD_HVALUE;
-
- if (mode->vdisplay > 1080)
- return MODE_BAD_VVALUE;
-
- return MODE_OK;
-}
-
static int lt8912_connector_get_modes(struct drm_connector *connector)
{
const struct drm_edid *drm_edid;
@@ -463,7 +447,6 @@ static int lt8912_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs lt8912_connector_helper_funcs = {
.get_modes = lt8912_connector_get_modes,
- .mode_valid = lt8912_connector_mode_valid,
};
static void lt8912_bridge_mode_set(struct drm_bridge *bridge,
@@ -605,6 +588,23 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge)
drm_bridge_hpd_disable(lt->hdmi_port);
}
+static enum drm_mode_status
+lt8912_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock > 150000)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->hdisplay > 1920)
+ return MODE_BAD_HVALUE;
+
+ if (mode->vdisplay > 1080)
+ return MODE_BAD_VVALUE;
+
+ return MODE_OK;
+}
+
static enum drm_connector_status
lt8912_bridge_detect(struct drm_bridge *bridge)
{
@@ -635,6 +635,7 @@ static const struct drm_edid *lt8912_bridge_edid_read(struct drm_bridge *bridge,
static const struct drm_bridge_funcs lt8912_bridge_funcs = {
.attach = lt8912_bridge_attach,
.detach = lt8912_bridge_detach,
+ .mode_valid = lt8912_bridge_mode_valid,
.mode_set = lt8912_bridge_mode_set,
.enable = lt8912_bridge_enable,
.detect = lt8912_bridge_detect,
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 4e802b54a1cb..4d1d40e1f1b4 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -23,6 +23,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -34,7 +35,7 @@
struct lt9611uxc {
struct device *dev;
struct drm_bridge bridge;
- struct drm_connector connector;
+ struct drm_bridge *next_bridge;
struct regmap *regmap;
/* Protects all accesses to registers by stopping the on-chip MCU */
@@ -120,11 +121,6 @@ static struct lt9611uxc *bridge_to_lt9611uxc(struct drm_bridge *bridge)
return container_of(bridge, struct lt9611uxc, bridge);
}
-static struct lt9611uxc *connector_to_lt9611uxc(struct drm_connector *connector)
-{
- return container_of(connector, struct lt9611uxc, connector);
-}
-
static void lt9611uxc_lock(struct lt9611uxc *lt9611uxc)
{
mutex_lock(&lt9611uxc->ocm_lock);
@@ -171,20 +167,14 @@ static void lt9611uxc_hpd_work(struct work_struct *work)
struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
bool connected;
- if (lt9611uxc->connector.dev) {
- if (lt9611uxc->connector.dev->mode_config.funcs)
- drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
- } else {
-
- mutex_lock(&lt9611uxc->ocm_lock);
- connected = lt9611uxc->hdmi_connected;
- mutex_unlock(&lt9611uxc->ocm_lock);
+ mutex_lock(&lt9611uxc->ocm_lock);
+ connected = lt9611uxc->hdmi_connected;
+ mutex_unlock(&lt9611uxc->ocm_lock);
- drm_bridge_hpd_notify(&lt9611uxc->bridge,
- connected ?
- connector_status_connected :
- connector_status_disconnected);
- }
+ drm_bridge_hpd_notify(&lt9611uxc->bridge,
+ connected ?
+ connector_status_connected :
+ connector_status_disconnected);
}
static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc)
@@ -289,82 +279,13 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
return dsi;
}
-static int lt9611uxc_connector_get_modes(struct drm_connector *connector)
-{
- struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
- const struct drm_edid *drm_edid;
- int count;
-
- drm_edid = drm_bridge_edid_read(&lt9611uxc->bridge, connector);
- drm_edid_connector_update(connector, drm_edid);
- count = drm_edid_connector_add_modes(connector);
- drm_edid_free(drm_edid);
-
- return count;
-}
-
-static enum drm_connector_status lt9611uxc_connector_detect(struct drm_connector *connector,
- bool force)
-{
- struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
-
- return lt9611uxc->bridge.funcs->detect(&lt9611uxc->bridge);
-}
-
-static enum drm_mode_status lt9611uxc_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct lt9611uxc_mode *lt9611uxc_mode = lt9611uxc_find_mode(mode);
-
- return lt9611uxc_mode ? MODE_OK : MODE_BAD;
-}
-
-static const struct drm_connector_helper_funcs lt9611uxc_bridge_connector_helper_funcs = {
- .get_modes = lt9611uxc_connector_get_modes,
- .mode_valid = lt9611uxc_connector_mode_valid,
-};
-
-static const struct drm_connector_funcs lt9611uxc_bridge_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = lt9611uxc_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc *lt9611uxc)
-{
- int ret;
-
- lt9611uxc->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
- drm_connector_helper_add(&lt9611uxc->connector,
- &lt9611uxc_bridge_connector_helper_funcs);
- ret = drm_connector_init(bridge->dev, &lt9611uxc->connector,
- &lt9611uxc_bridge_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
- }
-
- return drm_connector_attach_encoder(&lt9611uxc->connector, bridge->encoder);
-}
-
static int lt9611uxc_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
- int ret;
-
- if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
- ret = lt9611uxc_connector_init(bridge, lt9611uxc);
- if (ret < 0)
- return ret;
- }
- return 0;
+ return drm_bridge_attach(bridge->encoder, lt9611uxc->next_bridge,
+ bridge, flags);
}
static enum drm_mode_status
@@ -525,7 +446,7 @@ static int lt9611uxc_parse_dt(struct device *dev,
lt9611uxc->dsi1_node = of_graph_get_remote_node(dev->of_node, 1, -1);
- return 0;
+ return drm_of_find_panel_or_bridge(dev->of_node, 2, -1, NULL, &lt9611uxc->next_bridge);
}
static int lt9611uxc_gpio_init(struct lt9611uxc *lt9611uxc)
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 8d54091ec66e..5f05647a3bea 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -289,13 +289,13 @@ static int nwl_dsi_config_dpi(struct nwl_dsi *dsi)
nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT);
nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format);
- /*
- * Adjusting input polarity based on the video mode results in
- * a black screen so always pick active low:
- */
nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY,
+ dsi->mode.flags & DRM_MODE_FLAG_PVSYNC ?
+ NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH :
NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW);
nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY,
+ dsi->mode.flags & DRM_MODE_FLAG_PHSYNC ?
+ NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH :
NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW);
burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.h b/drivers/gpu/drm/bridge/nwl-dsi.h
index a247a8a11c7c..61e7d65cb1eb 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.h
+++ b/drivers/gpu/drm/bridge/nwl-dsi.h
@@ -30,11 +30,11 @@
#define NWL_DSI_PIXEL_FORMAT 0x20c
#define NWL_DSI_VSYNC_POLARITY 0x210
#define NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW 0
-#define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH BIT(1)
+#define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH BIT(0)
#define NWL_DSI_HSYNC_POLARITY 0x214
#define NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW 0
-#define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH BIT(1)
+#define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH BIT(0)
#define NWL_DSI_VIDEO_MODE 0x218
#define NWL_DSI_HFP 0x21c
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index 67b8d17a722a..221e9a4edb40 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_edid.h>
@@ -388,15 +389,36 @@ static int dw_hdmi_close(struct snd_pcm_substream *substream)
static int dw_hdmi_hw_free(struct snd_pcm_substream *substream)
{
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ vfree(runtime->dma_area);
+ runtime->dma_area = NULL;
+ return 0;
}
static int dw_hdmi_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ size_t size = params_buffer_bytes(params);
+
/* Allocate the PCM runtime buffer, which is exposed to userspace. */
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(params));
+ if (runtime->dma_area) {
+ if (runtime->dma_bytes >= size)
+ return 0; /* already large enough */
+ vfree(runtime->dma_area);
+ }
+ runtime->dma_area = vzalloc(size);
+ if (!runtime->dma_area)
+ return -ENOMEM;
+ runtime->dma_bytes = size;
+ return 1;
+}
+
+static struct page *dw_hdmi_get_page(struct snd_pcm_substream *substream,
+ unsigned long offset)
+{
+ return vmalloc_to_page(substream->runtime->dma_area + offset);
}
static int dw_hdmi_prepare(struct snd_pcm_substream *substream)
@@ -515,7 +537,7 @@ static const struct snd_pcm_ops snd_dw_hdmi_ops = {
.prepare = dw_hdmi_prepare,
.trigger = dw_hdmi_trigger,
.pointer = dw_hdmi_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
+ .page = dw_hdmi_get_page,
};
static int snd_dw_hdmi_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 9f2bc932c371..0031f3c54882 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -138,9 +138,6 @@ struct dw_hdmi {
struct platform_device *audio;
struct platform_device *cec;
struct device *dev;
- struct clk *isfr_clk;
- struct clk *iahb_clk;
- struct clk *cec_clk;
struct dw_hdmi_i2c *i2c;
struct hdmi_data_info hdmi_data;
@@ -3326,6 +3323,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
struct device_node *ddc_node;
struct dw_hdmi_cec_data cec;
struct dw_hdmi *hdmi;
+ struct clk *clk;
struct resource *iores = NULL;
int irq;
int ret;
@@ -3405,50 +3403,27 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
hdmi->regm = plat_data->regm;
}
- hdmi->isfr_clk = devm_clk_get(hdmi->dev, "isfr");
- if (IS_ERR(hdmi->isfr_clk)) {
- ret = PTR_ERR(hdmi->isfr_clk);
+ clk = devm_clk_get_enabled(hdmi->dev, "isfr");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(hdmi->dev, "Unable to get HDMI isfr clk: %d\n", ret);
goto err_res;
}
- ret = clk_prepare_enable(hdmi->isfr_clk);
- if (ret) {
- dev_err(hdmi->dev, "Cannot enable HDMI isfr clock: %d\n", ret);
- goto err_res;
- }
-
- hdmi->iahb_clk = devm_clk_get(hdmi->dev, "iahb");
- if (IS_ERR(hdmi->iahb_clk)) {
- ret = PTR_ERR(hdmi->iahb_clk);
+ clk = devm_clk_get_enabled(hdmi->dev, "iahb");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(hdmi->dev, "Unable to get HDMI iahb clk: %d\n", ret);
- goto err_isfr;
- }
-
- ret = clk_prepare_enable(hdmi->iahb_clk);
- if (ret) {
- dev_err(hdmi->dev, "Cannot enable HDMI iahb clock: %d\n", ret);
- goto err_isfr;
+ goto err_res;
}
- hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec");
- if (PTR_ERR(hdmi->cec_clk) == -ENOENT) {
- hdmi->cec_clk = NULL;
- } else if (IS_ERR(hdmi->cec_clk)) {
- ret = PTR_ERR(hdmi->cec_clk);
+ clk = devm_clk_get_optional_enabled(hdmi->dev, "cec");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
if (ret != -EPROBE_DEFER)
dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n",
ret);
-
- hdmi->cec_clk = NULL;
- goto err_iahb;
- } else {
- ret = clk_prepare_enable(hdmi->cec_clk);
- if (ret) {
- dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n",
- ret);
- goto err_iahb;
- }
+ goto err_res;
}
/* Product and revision IDs */
@@ -3462,12 +3437,12 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
dev_err(dev, "Unsupported HDMI controller (%04x:%02x:%02x)\n",
hdmi->version, prod_id0, prod_id1);
ret = -ENODEV;
- goto err_iahb;
+ goto err_res;
}
ret = dw_hdmi_detect_phy(hdmi);
if (ret < 0)
- goto err_iahb;
+ goto err_res;
dev_info(dev, "Detected HDMI TX controller v%x.%03x %s HDCP (%s)\n",
hdmi->version >> 12, hdmi->version & 0xfff,
@@ -3479,14 +3454,14 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- goto err_iahb;
+ goto err_res;
}
ret = devm_request_threaded_irq(dev, irq, dw_hdmi_hardirq,
dw_hdmi_irq, IRQF_SHARED,
dev_name(dev), hdmi);
if (ret)
- goto err_iahb;
+ goto err_res;
/*
* To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator
@@ -3603,11 +3578,6 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
return hdmi;
-err_iahb:
- clk_disable_unprepare(hdmi->iahb_clk);
- clk_disable_unprepare(hdmi->cec_clk);
-err_isfr:
- clk_disable_unprepare(hdmi->isfr_clk);
err_res:
i2c_put_adapter(hdmi->ddc);
@@ -3627,10 +3597,6 @@ void dw_hdmi_remove(struct dw_hdmi *hdmi)
/* Disable all interrupts */
hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
- clk_disable_unprepare(hdmi->iahb_clk);
- clk_disable_unprepare(hdmi->isfr_clk);
- clk_disable_unprepare(hdmi->cec_clk);
-
if (hdmi->i2c)
i2c_del_adapter(&hdmi->i2c->adap);
else
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index c4e9d96933dc..0fb02e4e7f4e 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -722,7 +722,12 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
{
- dsi_write(dsi, DSI_PCKHDL_CFG, CRC_RX_EN | ECC_RX_EN | BTA_EN);
+ u32 val = CRC_RX_EN | ECC_RX_EN | BTA_EN | EOTP_TX_EN;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ val &= ~EOTP_TX_EN;
+
+ dsi_write(dsi, DSI_PCKHDL_CFG, val);
}
static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index b8b7a227addf..290e2532fab1 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -241,6 +241,10 @@
/* Link Training */
#define DP0_SRCCTRL 0x06a0
+#define DP0_SRCCTRL_PRE1 GENMASK(29, 28)
+#define DP0_SRCCTRL_SWG1 GENMASK(25, 24)
+#define DP0_SRCCTRL_PRE0 GENMASK(21, 20)
+#define DP0_SRCCTRL_SWG0 GENMASK(17, 16)
#define DP0_SRCCTRL_SCRMBLDIS BIT(13)
#define DP0_SRCCTRL_EN810B BIT(12)
#define DP0_SRCCTRL_NOTP (0 << 8)
@@ -278,6 +282,8 @@
#define AUDIFDATA6 0x0720 /* DP0 Audio Info Frame Bytes 27 to 24 */
#define DP1_SRCCTRL 0x07a0 /* DP1 Control Register */
+#define DP1_SRCCTRL_PRE GENMASK(21, 20)
+#define DP1_SRCCTRL_SWG GENMASK(17, 16)
/* PHY */
#define DP_PHY_CTRL 0x0800
@@ -369,6 +375,7 @@ struct tc_data {
u32 rev;
u8 assr;
+ u8 pre_emphasis[2];
struct gpio_desc *sd_gpio;
struct gpio_desc *reset_gpio;
@@ -1090,13 +1097,17 @@ static int tc_main_link_enable(struct tc_data *tc)
return ret;
}
- ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc));
+ ret = regmap_write(tc->regmap, DP0_SRCCTRL,
+ tc_srcctrl(tc) |
+ FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) |
+ FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1]));
if (ret)
return ret;
/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
ret = regmap_write(tc->regmap, DP1_SRCCTRL,
(tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
- ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
+ ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0) |
+ FIELD_PREP(DP1_SRCCTRL_PRE, tc->pre_emphasis[1]));
if (ret)
return ret;
@@ -1188,8 +1199,10 @@ static int tc_main_link_enable(struct tc_data *tc)
goto err_dpcd_write;
/* Reset voltage-swing & pre-emphasis */
- tmp[0] = tmp[1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 |
- DP_TRAIN_PRE_EMPH_LEVEL_0;
+ tmp[0] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 |
+ FIELD_PREP(DP_TRAIN_PRE_EMPHASIS_MASK, tc->pre_emphasis[0]);
+ tmp[1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 |
+ FIELD_PREP(DP_TRAIN_PRE_EMPHASIS_MASK, tc->pre_emphasis[1]);
ret = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, tmp, 2);
if (ret < 0)
goto err_dpcd_write;
@@ -1213,7 +1226,9 @@ static int tc_main_link_enable(struct tc_data *tc)
ret = regmap_write(tc->regmap, DP0_SRCCTRL,
tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
DP0_SRCCTRL_AUTOCORRECT |
- DP0_SRCCTRL_TP1);
+ DP0_SRCCTRL_TP1 |
+ FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) |
+ FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1]));
if (ret)
return ret;
@@ -1248,7 +1263,9 @@ static int tc_main_link_enable(struct tc_data *tc)
ret = regmap_write(tc->regmap, DP0_SRCCTRL,
tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
DP0_SRCCTRL_AUTOCORRECT |
- DP0_SRCCTRL_TP2);
+ DP0_SRCCTRL_TP2 |
+ FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) |
+ FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1]));
if (ret)
return ret;
@@ -1274,7 +1291,9 @@ static int tc_main_link_enable(struct tc_data *tc)
/* Clear Training Pattern, set AutoCorrect Mode = 1 */
ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc) |
- DP0_SRCCTRL_AUTOCORRECT);
+ DP0_SRCCTRL_AUTOCORRECT |
+ FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) |
+ FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1]));
if (ret)
return ret;
@@ -2363,6 +2382,18 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
return -EINVAL;
}
mode |= BIT(endpoint.port);
+
+ if (endpoint.port == 2) {
+ of_property_read_u8_array(node, "toshiba,pre-emphasis",
+ tc->pre_emphasis,
+ ARRAY_SIZE(tc->pre_emphasis));
+
+ if (tc->pre_emphasis[0] < 0 || tc->pre_emphasis[0] > 2 ||
+ tc->pre_emphasis[1] < 0 || tc->pre_emphasis[1] > 2) {
+ dev_err(dev, "Incorrect Pre-Emphasis setting, use either 0=0dB 1=3.5dB 2=6dB\n");
+ return -EINVAL;
+ }
+ }
}
if (mode == mode_dpi_to_edp || mode == mode_dpi_to_dp) {
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
index 4140303d6260..66e70ced796f 100644
--- a/drivers/gpu/drm/ci/arm64.config
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -187,6 +187,7 @@ CONFIG_MTK_DEVAPC=y
CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y
CONFIG_REGULATOR_DA9211=y
+CONFIG_DRM_ANALOGIX_ANX7625=y
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
CONFIG_ARCH_TEGRA=y
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
index d6edf3635b23..2a94f54ce4cf 100644
--- a/drivers/gpu/drm/ci/container.yml
+++ b/drivers/gpu/drm/ci/container.yml
@@ -28,6 +28,14 @@ debian/x86_64_test-vk:
rules:
- when: never
+debian/arm64_test-vk:
+ rules:
+ - when: never
+
+debian/arm64_test-gl:
+ rules:
+ - when: never
+
fedora/x86_64_build:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index 80fb0f57ae46..eca47d4f816f 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,13 +1,13 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha e2b9c5a9e3e4f9b532067af8022eaef8d6fc6c00
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha d9849ac46623797a9f56fb9d46dc52460ac477de
- UPSTREAM_REPO: git://anongit.freedesktop.org/drm/drm
+ UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: 0df7b9b97f9da0e364f5ee30fe331004b8c86b56
+ IGT_VERSION: f13702b8e4e847c56da3ef6f0969065d686049c5
- DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/anholt/deqp-runner.git
+ DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.15.0
FDO_UPSTREAM_REPO: helen.fornazier/linux # The repo where the git-archive daily runs
@@ -85,22 +85,24 @@ include:
- project: *drm-ci-project-path
ref: *drm-ci-commit-sha
file:
+ - '/.gitlab-ci/container/gitlab-ci.yml'
- '/.gitlab-ci/farm-rules.yml'
+ - '/.gitlab-ci/lava/lava-gitlab-ci.yml'
- '/.gitlab-ci/test-source-dep.yml'
- - '/.gitlab-ci/container/gitlab-ci.yml'
- '/.gitlab-ci/test/gitlab-ci.yml'
- - '/.gitlab-ci/lava/lava-gitlab-ci.yml'
- - '/src/microsoft/ci/gitlab-ci-inc.yml'
- - '/src/gallium/drivers/zink/ci/gitlab-ci-inc.yml'
+ - '/src/amd/ci/gitlab-ci-inc.yml'
+ - '/src/freedreno/ci/gitlab-ci-inc.yml'
- '/src/gallium/drivers/crocus/ci/gitlab-ci-inc.yml'
- - '/src/gallium/drivers/softpipe/ci/gitlab-ci-inc.yml'
- '/src/gallium/drivers/llvmpipe/ci/gitlab-ci-inc.yml'
- - '/src/gallium/drivers/virgl/ci/gitlab-ci-inc.yml'
- '/src/gallium/drivers/nouveau/ci/gitlab-ci-inc.yml'
+ - '/src/gallium/drivers/softpipe/ci/gitlab-ci-inc.yml'
+ - '/src/gallium/drivers/virgl/ci/gitlab-ci-inc.yml'
+ - '/src/gallium/drivers/zink/ci/gitlab-ci-inc.yml'
- '/src/gallium/frontends/lavapipe/ci/gitlab-ci-inc.yml'
+ - '/src/gallium/frontends/rusticl/ci/gitlab-ci.yml'
- '/src/intel/ci/gitlab-ci-inc.yml'
- - '/src/freedreno/ci/gitlab-ci-inc.yml'
- - '/src/amd/ci/gitlab-ci-inc.yml'
+ - '/src/microsoft/ci/gitlab-ci-inc.yml'
+ - '/src/nouveau/ci/gitlab-ci-inc.yml'
- '/src/virtio/ci/gitlab-ci-inc.yml'
- drivers/gpu/drm/ci/image-tags.yml
- drivers/gpu/drm/ci/container.yml
@@ -121,8 +123,9 @@ stages:
- mediatek
- meson
- msm
+ - panfrost
+ - powervr
- rockchip
- - virtio-gpu
- software-driver
# YAML anchors for rule conditions
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index 79f41d7da772..f38836ec837c 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -20,16 +20,6 @@ cat /sys/kernel/debug/dri/*/state
set -e
case "$DRIVER_NAME" in
- rockchip|meson)
- export IGT_FORCE_DRIVER="panfrost"
- ;;
- mediatek)
- if [ "$GPU_VERSION" = "mt8173" ]; then
- export IGT_FORCE_DRIVER=${DRIVER_NAME}
- elif [ "$GPU_VERSION" = "mt8183" ]; then
- export IGT_FORCE_DRIVER="panfrost"
- fi
- ;;
amdgpu|vkms)
# Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib
mv /install/modules/lib/modules/* /lib/modules/. || true
@@ -80,6 +70,7 @@ igt-runner \
--igt-folder /igt/libexec/igt-gpu-tools \
--caselist $TESTLIST \
--output /results \
+ -vvvv \
$IGT_SKIPS \
$IGT_FLAKES \
$IGT_FAILS \
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
index 13eda37bdf05..2c340d063a96 100644
--- a/drivers/gpu/drm/ci/image-tags.yml
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -1,15 +1,15 @@
variables:
- CONTAINER_TAG: "2024-05-09-mesa-uprev"
+ CONTAINER_TAG: "2024-08-07-mesa-uprev"
DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build"
- DEBIAN_BUILD_TAG: "2024-06-10-vkms"
+ DEBIAN_BUILD_TAG: "${CONTAINER_TAG}"
- KERNEL_ROOTFS_TAG: "2023-10-06-amd"
+ KERNEL_ROOTFS_TAG: "${CONTAINER_TAG}"
DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base"
DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl"
- DEBIAN_X86_64_TEST_GL_TAG: "${CONTAINER_TAG}"
+ DEBIAN_TEST_GL_TAG: "${CONTAINER_TAG}"
ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}" \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
index 0707fa706a48..6add15083c78 100755
--- a/drivers/gpu/drm/ci/lava-submit.sh
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -44,6 +44,7 @@ PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
--first-stage-init artifacts/ci-common/init-stage1.sh \
--ci-project-dir "${CI_PROJECT_DIR}" \
--device-type "${DEVICE_TYPE}" \
+ --farm "${FARM}" \
--dtb-filename "${DTB}" \
--jwt-file "${S3_JWT_FILE}" \
--kernel-image-name "${KERNEL_IMAGE_NAME}" \
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index ee908b66aad2..09d8447840e9 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -10,6 +10,7 @@
.lava-test:
extends:
- .test-rules
+ timeout: "1h30m"
script:
# Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
- rm -rf install
@@ -69,8 +70,9 @@
.baremetal-igt-arm64:
extends:
- .baremetal-test-arm64
- - .use-debian/arm64_test
+ - .use-debian/baremetal_arm64_test
- .test-rules
+ timeout: "1h30m"
variables:
FDO_CI_CONCURRENT: 10
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
@@ -79,7 +81,7 @@
BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
FARM: google
needs:
- - debian/arm64_test
+ - debian/baremetal_arm64_test
- job: testing:arm64
artifacts: false
- igt:arm64
@@ -160,38 +162,61 @@ msm:sdm845:
script:
- ./install/bare-metal/cros-servo.sh
-rockchip:rk3288:
- extends:
- - .lava-igt:arm32
+.rockchip-device:
+ variables:
+ DTB: ${DEVICE_TYPE}
+ BOOT_METHOD: depthcharge
+
+.rockchip-display:
stage: rockchip
variables:
DRIVER_NAME: rockchip
+
+.rk3288:
+ extends:
+ - .lava-igt:arm32
+ - .rockchip-device
+ variables:
DEVICE_TYPE: rk3288-veyron-jaq
- DTB: ${DEVICE_TYPE}
- BOOT_METHOD: depthcharge
- KERNEL_IMAGE_TYPE: "zimage"
GPU_VERSION: rk3288
+ KERNEL_IMAGE_TYPE: "zimage"
RUNNER_TAG: mesa-ci-x86-64-lava-rk3288-veyron-jaq
-rockchip:rk3399:
+.rk3399:
extends:
- .lava-igt:arm64
- stage: rockchip
+ - .rockchip-device
parallel: 2
variables:
- DRIVER_NAME: rockchip
DEVICE_TYPE: rk3399-gru-kevin
- DTB: ${DEVICE_TYPE}
- BOOT_METHOD: depthcharge
- KERNEL_IMAGE_TYPE: ""
GPU_VERSION: rk3399
+ KERNEL_IMAGE_TYPE: ""
RUNNER_TAG: mesa-ci-x86-64-lava-rk3399-gru-kevin
+rockchip:rk3288:
+ extends:
+ - .rk3288
+ - .rockchip-display
+
+panfrost:rk3288:
+ extends:
+ - .rk3288
+ - .panfrost-gpu
+
+rockchip:rk3399:
+ extends:
+ - .rk3399
+ - .rockchip-display
+
+panfrost:rk3399:
+ extends:
+ - .rk3399
+ - .panfrost-gpu
+
.i915:
extends:
- .lava-igt:x86_64
stage: i915
- timeout: "1h30m"
variables:
DRIVER_NAME: i915
DTB: ""
@@ -280,65 +305,117 @@ amdgpu:stoney:
GPU_VERSION: stoney
RUNNER_TAG: mesa-ci-x86-64-lava-hp-11A-G6-EE-grunt
-.mediatek:
+.mediatek-device:
extends:
- .lava-igt:arm64
stage: mediatek
variables:
- DRIVER_NAME: mediatek
DTB: ${DEVICE_TYPE}
BOOT_METHOD: depthcharge
KERNEL_IMAGE_TYPE: ""
-mediatek:mt8173:
+.mediatek-display:
+ stage: mediatek
+ variables:
+ DRIVER_NAME: mediatek
+
+.powervr-gpu:
+ stage: powervr
+ variables:
+ DRIVER_NAME: powervr
+
+.panfrost-gpu:
+ stage: panfrost
+ variables:
+ DRIVER_NAME: panfrost
+
+.mt8173:
extends:
- - .mediatek
+ - .mediatek-device
parallel: 4
variables:
DEVICE_TYPE: mt8173-elm-hana
GPU_VERSION: mt8173
RUNNER_TAG: mesa-ci-x86-64-lava-mt8173-elm-hana
-mediatek:mt8183:
+.mt8183:
extends:
- - .mediatek
+ - .mediatek-device
parallel: 3
variables:
DEVICE_TYPE: mt8183-kukui-jacuzzi-juniper-sku16
GPU_VERSION: mt8183
RUNNER_TAG: mesa-ci-x86-64-lava-mt8183-kukui-jacuzzi-juniper-sku16
+mediatek:mt8173:
+ extends:
+ - .mt8173
+ - .mediatek-display
+
+powervr:mt8173:
+ extends:
+ - .mt8173
+ - .powervr-gpu
+ rules:
+ # TODO: powervr driver was merged in linux kernel, but there's no mediatek support yet
+ # Remove the rule once mediatek support is added for powervr
+ - when: never
+
+mediatek:mt8183:
+ extends:
+ - .mt8183
+ - .mediatek-display
+
+panfrost:mt8183:
+ extends:
+ - .mt8183
+ - .panfrost-gpu
+
# drm-mtk doesn't even probe yet in mainline for mt8192
.mediatek:mt8192:
extends:
- - .mediatek
+ - .mediatek-device
parallel: 3
variables:
DEVICE_TYPE: mt8192-asurada-spherion-r0
GPU_VERSION: mt8192
RUNNER_TAG: mesa-ci-x86-64-lava-mt8192-asurada-spherion-r0
-.meson:
+.meson-device:
extends:
- .lava-igt:arm64
- stage: meson
variables:
- DRIVER_NAME: meson
DTB: ${DEVICE_TYPE}
BOOT_METHOD: u-boot
KERNEL_IMAGE_TYPE: "image"
-meson:g12b:
+.meson-display:
+ stage: meson
+ variables:
+ DRIVER_NAME: meson
+
+.g12b:
extends:
- - .meson
+ - .meson-device
parallel: 3
variables:
DEVICE_TYPE: meson-g12b-a311d-khadas-vim3
GPU_VERSION: g12b
RUNNER_TAG: mesa-ci-x86-64-lava-meson-g12b-a311d-khadas-vim3
+meson:g12b:
+ extends:
+ - .g12b
+ - .meson-display
+
+panfrost:g12b:
+ extends:
+ - .g12b
+ - .panfrost-gpu
+
virtio_gpu:none:
stage: software-driver
+ timeout: "1h30m"
variables:
CROSVM_GALLIUM_DRIVER: llvmpipe
DRIVER_NAME: virtio_gpu
@@ -361,6 +438,7 @@ virtio_gpu:none:
vkms:none:
stage: software-driver
+ timeout: "1h30m"
variables:
DRIVER_NAME: vkms
GPU_VERSION: none
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
index e8c2f4044a92..8e2fed6d76a3 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
@@ -30,6 +30,7 @@ kms_cursor_crc@cursor-random-64x64,Fail
kms_cursor_crc@cursor-size-change,Fail
kms_cursor_crc@cursor-sliding-64x21,Fail
kms_cursor_crc@cursor-sliding-64x64,Fail
+kms_cursor_edge_walk@64x64-left-edge,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_lease@lease-uevent,Fail
@@ -37,4 +38,3 @@ kms_plane@pixel-format,Fail
kms_plane_cursor@primary,Fail
kms_rotation_crc@primary-rotation-180,Fail
perf@i915-ref-count,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
index ea512ff8c352..e4faa96fa000 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
@@ -1,8 +1,20 @@
# Board Name: hp-11A-G6-EE-grunt
# Bug Report: https://lore.kernel.org/amd-gfx/3542730f-b8d7-404d-a947-b7a5e95d661c@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_async_flips@async-flip-with-page-flip-events
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/amd-gfx/3542730f-b8d7-404d-a947-b7a5e95d661c@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_async_flips@crc
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/amd-gfx/3542730f-b8d7-404d-a947-b7a5e95d661c@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_plane@pixel-format-source-clamping
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
index 3a2ce45d3cb9..f41b3e112976 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
@@ -2,9 +2,9 @@
.*suspend.*
# Skip driver specific tests
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -13,6 +13,7 @@ panfrost_.*
gem_.*
i915_.*
xe_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
index 6641520ac587..9b84f68a5122 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -6,11 +6,11 @@ i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
-kms_async_flips@invalid-async-flip,Timeout
-kms_atomic_transition@modeset-transition-fencing,Timeout
kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
kms_fb_coherency@memset-crc,Crash
-kms_flip@flip-vs-dpms-off-vs-modeset,Timeout
+kms_flip@busy-flip,Timeout
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -33,16 +33,20 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
-kms_pm_rpm@modeset-lpsp-stress,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
+kms_prop_blob@invalid-set-prop,Fail
+kms_rotation_crc@primary-rotation-180,Timeout
+kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
index 0a76547a103d..581f0da4d0f2 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
@@ -1,9 +1,48 @@
# Board Name: asus-C433TA-AJ0005-rammus
# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
i915_hangman@engine-engine-error
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
i915_hangman@gt-engine-hang
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_async_flips@crc
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_universal_plane@cursor-fb-leak
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_sysfs_edid_timing
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+i915_hangman@engine-engine-hang
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_pm_rpm@modeset-lpsp-stress
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
index 5663ed0420a7..5186ba3dbbc6 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
@@ -5,9 +5,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -19,6 +19,7 @@ gem_.*
i915_pm_rc6_residency.*
i915_suspend.*
kms_scaling_modes.*
+i915_pm_rpm.*
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
index cb010c153a6a..4663d4d13f35 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
@@ -1,6 +1,6 @@
# Board Name: asus-C523NA-A20057-coral
# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_fb_coherency@memset-crc
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
index ab588e7a447c..4f50e0240ff4 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
@@ -7,9 +7,9 @@ kms_3d
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
index 26cd62bbf30a..2723e2832797 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -9,11 +9,10 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@invalid-async-flip,Timeout
-kms_atomic_transition@modeset-transition-fencing,Timeout
kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout
kms_fb_coherency@memset-crc,Crash
-kms_flip@flip-vs-dpms-off-vs-modeset,Timeout
+kms_flip@busy-flip,Timeout
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -41,20 +40,25 @@ kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_alpha_blend@constant-alpha-min,Fail
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
+kms_prop_blob@invalid-set-prop,Fail
+kms_psr2_sf@cursor-plane-update-sf,Fail
kms_psr2_sf@fbc-plane-move-sf-dmg-area,Timeout
kms_psr2_sf@overlay-plane-update-continuous-sf,Fail
kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail
+kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail
+kms_psr2_sf@plane-move-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_su@page_flip-NV12,Fail
kms_psr2_su@page_flip-P010,Fail
-kms_psr@psr-sprite-render,Timeout
+kms_rotation_crc@primary-rotation-180,Timeout
kms_setmode@basic,Fail
+kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
index bb560ff1e2cd..58a6001abb28 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
@@ -1,6 +1,13 @@
# Board Name: asus-C436FA-Flip-hatch
# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_plane_alpha_blend@constant-alpha-min
+
+# Board Name: asus-C436FA-Flip-hatch
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_atomic_transition@plane-all-modeset-transition-internal-panels
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
index 93b7736fffbb..9d753d97c9ab 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
@@ -3,9 +3,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -19,6 +19,7 @@ i915_suspend.*
xe_module_load.*
api_intel_allocator.*
kms_cursor_legacy.*
+i915_pm_rpm.*
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
index fca15b487929..4821c9adefd1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -1,20 +1,16 @@
core_setmaster@master-drop-set-user,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-kms_async_flips@invalid-async-flip,Timeout
-kms_atomic_transition@modeset-transition-fencing,Timeout
-kms_big_fb@linear-16bpp-rotate-0,Fail
-kms_big_fb@linear-16bpp-rotate-180,Fail
-kms_big_fb@linear-32bpp-rotate-0,Fail
-kms_big_fb@linear-32bpp-rotate-180,Fail
-kms_big_fb@linear-8bpp-rotate-0,Fail
-kms_big_fb@linear-8bpp-rotate-180,Fail
-kms_big_fb@linear-max-hw-stride-32bpp-rotate-0,Fail
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
kms_dirtyfb@default-dirtyfb-ioctl,Fail
-kms_draw_crc@draw-method-render,Fail
-kms_flip@flip-vs-dpms-off-vs-modeset,Timeout
+kms_dirtyfb@drrs-dirtyfb-ioctl,Fail
+kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@busy-flip,Timeout
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
@@ -26,6 +22,7 @@ kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
@@ -38,19 +35,24 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@legacy-planes,Timeout
kms_pm_rpm@legacy-planes-dpms,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
+kms_prop_blob@invalid-set-prop,Fail
kms_rotation_crc@multiplane-rotation,Fail
kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
+kms_rotation_crc@primary-rotation-180,Timeout
+kms_vblank@query-forked-hang,Timeout
perf@non-zero-reason,Timeout
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
index 58fc424f8a42..077886b76093 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
@@ -1,7 +1,13 @@
# Board Name: hp-x360-12b-ca0010nr-n4020-octopus
# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
core_hotunplug@unplug-rescan
+
+# Board Name: hp-x360-12b-ca0010nr-n4020-octopus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_fb_coherency@memset-crc
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
index b3226b2d9ba1..9c64146aed90 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
@@ -6,9 +6,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
index d4fba4f55ec1..1de04a3308c4 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
@@ -17,10 +17,12 @@ perf@i915-ref-count,Fail
perf_pmu@busy-accuracy-50,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
+prime_busy@after,Fail
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
+testdisplay,Timeout
xe_module_load@force-load,Fail
xe_module_load@load,Fail
xe_module_load@many-reload,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt
index 6cf1fed2e575..549501e40461 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt
@@ -1,6 +1,6 @@
# Board Name: hp-x360-14-G1-sona
# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
prime_busy@hang
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
index f0cf8a6dda25..6ec2f83ffe13 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
@@ -6,9 +6,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
index 9a50e894c3e7..e728ccc62326 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -1,34 +1,39 @@
-api_intel_bb@blit-noreloc-keep-cache,Timeout
+api_intel_allocator@simple-allocator,Timeout
+api_intel_bb@object-reloc-keep-cache,Timeout
api_intel_bb@offset-control,Timeout
-api_intel_bb@render-ccs,Timeout
-core_getclient,Timeout
-core_hotunplug@hotreplug-lateclose,Timeout
-drm_read@short-buffer-block,Timeout
+core_auth@getclient-simple,Timeout
+core_hotunplug@hotunbind-rebind,Timeout
+debugfs_test@read_all_entries_display_on,Timeout
+drm_read@invalid-buffer,Timeout
drm_read@short-buffer-nonblock,Timeout
-dumb_buffer@map-uaf,Timeout
gen3_render_tiledx_blits,Timeout
gen7_exec_parse@basic-allocation,Timeout
gen7_exec_parse@batch-without-end,Timeout
gen9_exec_parse@batch-invalid-length,Timeout
gen9_exec_parse@bb-secure,Timeout
+gen9_exec_parse@secure-batches,Timeout
+gen9_exec_parse@shadow-peek,Timeout
+gen9_exec_parse@unaligned-jump,Timeout
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-i915_pciid,Timeout
i915_query@engine-info,Timeout
+i915_query@query-topology-kernel-writes,Timeout
+i915_query@test-query-geometry-subslices,Timeout
kms_lease@lease-uevent,Fail
kms_rotation_crc@multiplane-rotation,Fail
perf@i915-ref-count,Fail
-perf_pmu@busy,Timeout
perf_pmu@enable-race,Timeout
perf_pmu@event-wait,Timeout
perf_pmu@gt-awake,Timeout
+perf_pmu@interrupts,Timeout
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
prime_mmap@test_map_unmap,Timeout
+prime_mmap@test_refcounting,Timeout
prime_self_import@basic-with_one_bo,Timeout
-syncobj_basic@bad-destroy,Timeout
+syncobj_basic@bad-flags-fd-to-handle,Timeout
syncobj_eventfd@invalid-bad-pad,Timeout
syncobj_wait@invalid-multi-wait-unsubmitted-signaled,Timeout
syncobj_wait@invalid-signal-illegal-handle,Timeout
@@ -37,7 +42,9 @@ syncobj_wait@multi-wait-all-submitted,Timeout
syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout
syncobj_wait@wait-any-complex,Timeout
syncobj_wait@wait-delayed-signal,Timeout
+template@A,Timeout
xe_module_load@force-load,Fail
xe_module_load@load,Fail
+xe_module_load@many-reload,Fail
xe_module_load@reload,Fail
xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
index e600782ef96a..b47df5855e8d 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
@@ -12,9 +12,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
index 7582d313dd9b..2adae2175501 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -7,18 +7,10 @@ i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@invalid-async-flip,Timeout
-kms_atomic_transition@modeset-transition-fencing,Timeout
-kms_big_fb@linear-16bpp-rotate-0,Fail
-kms_big_fb@linear-16bpp-rotate-180,Fail
-kms_big_fb@linear-32bpp-rotate-0,Fail
-kms_big_fb@linear-32bpp-rotate-180,Fail
-kms_big_fb@linear-8bpp-rotate-0,Fail
-kms_big_fb@linear-8bpp-rotate-180,Fail
-kms_big_fb@linear-max-hw-stride-32bpp-rotate-0,Fail
kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
kms_dirtyfb@default-dirtyfb-ioctl,Fail
-kms_draw_crc@draw-method-render,Fail
+kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
@@ -40,6 +32,7 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-basic,Fail
@@ -47,9 +40,13 @@ kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
+kms_prop_blob@invalid-set-prop,Fail
+kms_rotation_crc@primary-rotation-180,Timeout
+kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
index 1167a58c7dd1..60b8d1c64e70 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
@@ -1,6 +1,6 @@
# Board Name: dell-latitude-5400-8665U-sarien
# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_pm_rpm@modeset-lpsp-stress
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
index 20bd91525f45..29bff8922ae1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
@@ -3,9 +3,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -17,6 +17,7 @@ gem_.*
i915_pm_rc6_residency.*
i915_suspend.*
kms_flip.*
+i915_pm_rpm.*
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index cc5e9c1c2d57..a14349a1967f 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -5,8 +5,15 @@ device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
fbdev@eof,Fail
fbdev@read,Fail
-fbdev@unaligned-write,Fail
kms_3d,Fail
+kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-2-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-2-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-2560x1440p,Fail
@@ -27,4 +34,3 @@ kms_properties@get_properties-sanity-atomic,Fail
kms_properties@plane-properties-atomic,Fail
kms_properties@plane-properties-legacy,Fail
kms_rmfb@close-fd,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
index 395ac0463404..2e5bf6ae25f2 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
@@ -1,11 +1,41 @@
# Board Name: mt8173-elm-hana
# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
core_setmaster_vs_auth
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
dumb_buffer@create-clear
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
fbdev@unaligned-write
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
fbdev@write
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@cursor-vs-flip-atomic-transitions
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_prop_blob@invalid-set-prop
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
index 0c6108392140..8198e06344a3 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
@@ -1,8 +1,8 @@
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -10,6 +10,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
index 9ef460646d76..8cb2cb67853d 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -1,8 +1,22 @@
-dumb_buffer@create-clear,Fail
-dumb_buffer@create-valid-dumb,Fail
+core_setmaster@master-drop-set-shared-fd,Fail
+device_reset@cold-reset-bound,Fail
+device_reset@reset-bound,Fail
+device_reset@unbind-cold-reset-rebind,Fail
+device_reset@unbind-reset-rebind,Fail
+dumb_buffer@create-clear,Crash
dumb_buffer@invalid-bpp,Fail
-dumb_buffer@map-invalid-size,Fail
-dumb_buffer@map-uaf,Fail
-dumb_buffer@map-valid,Fail
-panfrost_prime@gem-prime-import,Fail
-tools_test@tools_test,Fail
+fbdev@eof,Fail
+fbdev@pan,Fail
+fbdev@read,Fail
+fbdev@unaligned-read,Fail
+kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_color@invalid-gamma-lut-sizes,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_flip@flip-vs-suspend,Fail
+kms_lease@lease-uevent,Fail
+kms_properties@plane-properties-atomic,Fail
+kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
new file mode 100644
index 000000000000..df7e5ce7a036
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
@@ -0,0 +1,20 @@
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_bw@linear-tiling-1-displays-2560x1440p
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+fbdev@write
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
index 715b9a8f4997..8198e06344a3 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
@@ -1,7 +1,8 @@
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -9,9 +10,7 @@ nouveau_.*
# Skip intel specific tests
gem_.*
i915_.*
-
-# Panfrost is not a KMS driver, so skip the KMS tests
-kms_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
index 9ef460646d76..328967d3e23d 100644
--- a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
@@ -1,8 +1,13 @@
-dumb_buffer@create-clear,Fail
-dumb_buffer@create-valid-dumb,Fail
dumb_buffer@invalid-bpp,Fail
-dumb_buffer@map-invalid-size,Fail
-dumb_buffer@map-uaf,Fail
-dumb_buffer@map-valid,Fail
-panfrost_prime@gem-prime-import,Fail
-tools_test@tools_test,Fail
+kms_3d,Fail
+kms_cursor_legacy@forked-bo,Fail
+kms_cursor_legacy@forked-move,Fail
+kms_cursor_legacy@single-bo,Fail
+kms_cursor_legacy@single-move,Fail
+kms_cursor_legacy@torture-bo,Fail
+kms_cursor_legacy@torture-move,Fail
+kms_lease@lease-uevent,Fail
+kms_properties@connector-properties-atomic,Fail
+kms_properties@connector-properties-legacy,Fail
+kms_properties@get_properties-sanity-atomic,Fail
+kms_properties@get_properties-sanity-non-atomic,Fail
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
index 715b9a8f4997..8198e06344a3 100644
--- a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
@@ -1,7 +1,8 @@
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -9,9 +10,7 @@ nouveau_.*
# Skip intel specific tests
gem_.*
i915_.*
-
-# Panfrost is not a KMS driver, so skip the KMS tests
-kms_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 6e7fd1ccd1e3..4ac46168eff3 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -4,12 +4,8 @@ device_reset@unbind-cold-reset-rebind,Fail
device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
kms_3d,Fail
-kms_cursor_legacy@forked-move,Fail
-kms_cursor_legacy@single-bo,Fail
kms_cursor_legacy@torture-bo,Fail
-kms_cursor_legacy@torture-move,Fail
kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
kms_lease@lease-uevent,Fail
-msm_mapping@ring,Fail
-tools_test@tools_test,Fail
+msm/msm_mapping@ring,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
index ff12202abb6e..1674c8e214d6 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
@@ -1,7 +1,7 @@
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -9,6 +9,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
index 46ca69ce2ffe..bd0653caf7a0 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
@@ -5,4 +5,3 @@ device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
kms_3d,Fail
kms_lease@lease-uevent,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt
index a275584c8bbb..123d92cb4470 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt
@@ -1,6 +1,6 @@
# Board Name: apq8096-db820c
# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
dumb_buffer@create-clear
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
index 1c45fc6c512d..5550be5486ed 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
@@ -4,7 +4,7 @@ kms_cursor_legacy@all-pipes-torture-move
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -12,6 +12,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -23,4 +24,4 @@ core_hotunplug.*
# *** gpu fault: ttbr0=00000001030ea000 iova=0000000001074000 dir=WRITE type=PERMISSION source=1f030000 (0,0,0,0)
# msm_mdp 901000.display-controller: RBBM | ME master split | status=0x701000B0
# watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [kworker/u16:3:46]
-msm_mapping@shadow
+msm/msm_mapping@shadow
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
index eb7a3886d397..d42004cd6977 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
@@ -3,13 +3,11 @@ device_reset@reset-bound,Fail
device_reset@unbind-cold-reset-rebind,Fail
device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
-kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail
kms_color@ctm-0-25,Fail
kms_color@ctm-0-50,Fail
kms_color@ctm-0-75,Fail
kms_color@ctm-blue-to-red,Fail
kms_color@ctm-green-to-red,Fail
-kms_color@ctm-max,Fail
kms_color@ctm-negative,Fail
kms_color@ctm-red-to-blue,Fail
kms_color@ctm-signed,Fail
@@ -21,72 +19,6 @@ kms_content_protection@lic-type-1,Crash
kms_content_protection@srm,Crash
kms_content_protection@type1,Crash
kms_content_protection@uevent,Crash
-kms_cursor_crc@cursor-alpha-opaque,Fail
-kms_cursor_crc@cursor-alpha-transparent,Fail
-kms_cursor_crc@cursor-dpms,Fail
-kms_cursor_crc@cursor-offscreen-128x128,Fail
-kms_cursor_crc@cursor-offscreen-128x42,Fail
-kms_cursor_crc@cursor-offscreen-256x256,Fail
-kms_cursor_crc@cursor-offscreen-256x85,Fail
-kms_cursor_crc@cursor-offscreen-32x10,Fail
-kms_cursor_crc@cursor-offscreen-32x32,Fail
-kms_cursor_crc@cursor-offscreen-512x170,Fail
-kms_cursor_crc@cursor-offscreen-512x512,Fail
-kms_cursor_crc@cursor-offscreen-64x21,Fail
-kms_cursor_crc@cursor-offscreen-64x64,Fail
-kms_cursor_crc@cursor-onscreen-128x128,Fail
-kms_cursor_crc@cursor-onscreen-128x42,Fail
-kms_cursor_crc@cursor-onscreen-256x256,Fail
-kms_cursor_crc@cursor-onscreen-256x85,Fail
-kms_cursor_crc@cursor-onscreen-32x10,Fail
-kms_cursor_crc@cursor-onscreen-32x32,Fail
-kms_cursor_crc@cursor-onscreen-512x170,Fail
-kms_cursor_crc@cursor-onscreen-512x512,Fail
-kms_cursor_crc@cursor-onscreen-64x21,Fail
-kms_cursor_crc@cursor-onscreen-64x64,Fail
-kms_cursor_crc@cursor-random-128x128,Fail
-kms_cursor_crc@cursor-random-128x42,Fail
-kms_cursor_crc@cursor-random-256x256,Fail
-kms_cursor_crc@cursor-random-256x85,Fail
-kms_cursor_crc@cursor-random-32x10,Fail
-kms_cursor_crc@cursor-random-32x32,Fail
-kms_cursor_crc@cursor-random-512x170,Fail
-kms_cursor_crc@cursor-random-512x512,Fail
-kms_cursor_crc@cursor-random-64x21,Fail
-kms_cursor_crc@cursor-random-64x64,Fail
-kms_cursor_crc@cursor-rapid-movement-128x128,Fail
-kms_cursor_crc@cursor-rapid-movement-128x42,Fail
-kms_cursor_crc@cursor-rapid-movement-256x256,Fail
-kms_cursor_crc@cursor-rapid-movement-256x85,Fail
-kms_cursor_crc@cursor-rapid-movement-32x10,Fail
-kms_cursor_crc@cursor-rapid-movement-32x32,Fail
-kms_cursor_crc@cursor-rapid-movement-512x170,Fail
-kms_cursor_crc@cursor-rapid-movement-512x512,Fail
-kms_cursor_crc@cursor-rapid-movement-64x21,Fail
-kms_cursor_crc@cursor-rapid-movement-64x64,Fail
-kms_cursor_crc@cursor-size-change,Fail
-kms_cursor_crc@cursor-sliding-128x128,Fail
-kms_cursor_crc@cursor-sliding-128x42,Fail
-kms_cursor_crc@cursor-sliding-256x256,Fail
-kms_cursor_crc@cursor-sliding-256x85,Fail
-kms_cursor_crc@cursor-sliding-32x10,Fail
-kms_cursor_crc@cursor-sliding-32x32,Fail
-kms_cursor_crc@cursor-sliding-512x170,Fail
-kms_cursor_crc@cursor-sliding-512x512,Fail
-kms_cursor_crc@cursor-sliding-64x21,Fail
-kms_cursor_crc@cursor-sliding-64x64,Fail
-kms_cursor_edge_walk@128x128-left-edge,Fail
-kms_cursor_edge_walk@128x128-right-edge,Fail
-kms_cursor_edge_walk@128x128-top-bottom,Fail
-kms_cursor_edge_walk@128x128-top-edge,Fail
-kms_cursor_edge_walk@256x256-left-edge,Fail
-kms_cursor_edge_walk@256x256-right-edge,Fail
-kms_cursor_edge_walk@256x256-top-bottom,Fail
-kms_cursor_edge_walk@256x256-top-edge,Fail
-kms_cursor_edge_walk@64x64-left-edge,Fail
-kms_cursor_edge_walk@64x64-right-edge,Fail
-kms_cursor_edge_walk@64x64-top-bottom,Fail
-kms_cursor_edge_walk@64x64-top-edge,Fail
kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail
kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail
kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail
@@ -100,92 +32,14 @@ kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
kms_display_modes@extended-mode-basic,Fail
kms_flip@2x-flip-vs-modeset-vs-hang,Fail
kms_flip@2x-flip-vs-panning-vs-hang,Fail
-kms_flip@absolute-wf_vblank,Fail
-kms_flip@absolute-wf_vblank-interruptible,Fail
-kms_flip@basic-flip-vs-wf_vblank,Fail
-kms_flip@basic-plain-flip,Fail
-kms_flip@blocking-absolute-wf_vblank,Fail
-kms_flip@blocking-absolute-wf_vblank-interruptible,Fail
-kms_flip@blocking-wf_vblank,Fail
-kms_flip@busy-flip,Fail
-kms_flip@dpms-off-confusion,Fail
-kms_flip@dpms-off-confusion-interruptible,Fail
-kms_flip@dpms-vs-vblank-race,Fail
-kms_flip@dpms-vs-vblank-race-interruptible,Fail
-kms_flip@flip-vs-absolute-wf_vblank,Fail
-kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
-kms_flip@flip-vs-blocking-wf-vblank,Fail
-kms_flip@flip-vs-expired-vblank,Fail
-kms_flip@flip-vs-expired-vblank-interruptible,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
-kms_flip@flip-vs-panning,Fail
-kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
-kms_flip@flip-vs-rmfb,Fail
-kms_flip@flip-vs-rmfb-interruptible,Fail
-kms_flip@flip-vs-wf_vblank-interruptible,Fail
-kms_flip@modeset-vs-vblank-race,Fail
-kms_flip@modeset-vs-vblank-race-interruptible,Fail
-kms_flip@plain-flip-fb-recreate,Fail
-kms_flip@plain-flip-fb-recreate-interruptible,Fail
-kms_flip@plain-flip-interruptible,Fail
-kms_flip@plain-flip-ts-check,Fail
-kms_flip@plain-flip-ts-check-interruptible,Fail
-kms_flip@wf_vblank-ts-check,Fail
-kms_flip@wf_vblank-ts-check-interruptible,Fail
-kms_lease@cursor-implicit-plane,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Fail
-kms_lease@setcrtc-implicit-plane,Fail
-kms_lease@simple-lease,Fail
kms_multipipe_modeset@basic-max-pipe-crc-check,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
-kms_pipe_crc_basic@compare-crc-sanitycheck-xr24,Fail
-kms_pipe_crc_basic@disable-crc-after-crtc,Fail
-kms_pipe_crc_basic@nonblocking-crc,Fail
-kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
-kms_pipe_crc_basic@read-crc,Fail
-kms_pipe_crc_basic@read-crc-frame-sequence,Fail
-kms_plane@pixel-format,Fail
-kms_plane@pixel-format-source-clamping,Fail
-kms_plane@plane-panning-bottom-right,Fail
-kms_plane@plane-panning-top-left,Fail
-kms_plane@plane-position-covered,Fail
-kms_plane@plane-position-hole,Fail
-kms_plane@plane-position-hole-dpms,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@alpha-basic,Fail
-kms_plane_alpha_blend@alpha-opaque-fb,Fail
-kms_plane_alpha_blend@alpha-transparent-fb,Fail
-kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_alpha_blend@constant-alpha-mid,Fail
-kms_plane_alpha_blend@constant-alpha-min,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_cursor@primary,Fail
kms_plane_lowres@tiling-none,Fail
-kms_plane_multiple@tiling-none,Fail
kms_rmfb@close-fd,Fail
-kms_rotation_crc@cursor-rotation-180,Fail
-kms_rotation_crc@primary-rotation-180,Fail
-kms_sequence@get-busy,Fail
-kms_sequence@get-forked,Fail
-kms_sequence@get-forked-busy,Fail
-kms_sequence@get-idle,Fail
-kms_sequence@queue-busy,Fail
-kms_sequence@queue-idle,Fail
-kms_vblank@accuracy-idle,Fail
-kms_vblank@crtc-id,Fail
-kms_vblank@query-busy,Fail
-kms_vblank@query-forked,Fail
-kms_vblank@query-forked-busy,Fail
-kms_vblank@query-idle,Fail
kms_vblank@ts-continuation-dpms-rpm,Fail
-kms_vblank@ts-continuation-idle,Fail
-kms_vblank@ts-continuation-modeset,Fail
-kms_vblank@ts-continuation-modeset-rpm,Fail
-kms_vblank@wait-busy,Fail
-kms_vblank@wait-forked,Fail
-kms_vblank@wait-forked-busy,Fail
-kms_vblank@wait-idle,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
index 6dec63d48cfb..d74e04405e65 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
@@ -1,8 +1,20 @@
# Board Name: sc7180-trogdor-kingoftown
# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+msm/msm_mapping@shadow
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
+msm/msm_shrink@copy-gpu-oom-32
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
# Failure Rate: 50
-msm_mapping@shadow
-msm_shrink@copy-gpu-oom-32
-msm_shrink@copy-gpu-oom-8
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+msm/msm_shrink@copy-gpu-oom-8
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
index 68c96005ba54..c2833eee1c4b 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -4,7 +4,7 @@
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -12,6 +12,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -19,3 +20,6 @@ core_hotunplug.*
# Timeout occurs
kms_flip@2x-wf_vblank-ts-check
+
+# Hangs the machine
+kms_cursor_crc@cursor-random-max-size
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
index eb7a3886d397..d42004cd6977 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
@@ -3,13 +3,11 @@ device_reset@reset-bound,Fail
device_reset@unbind-cold-reset-rebind,Fail
device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
-kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail
kms_color@ctm-0-25,Fail
kms_color@ctm-0-50,Fail
kms_color@ctm-0-75,Fail
kms_color@ctm-blue-to-red,Fail
kms_color@ctm-green-to-red,Fail
-kms_color@ctm-max,Fail
kms_color@ctm-negative,Fail
kms_color@ctm-red-to-blue,Fail
kms_color@ctm-signed,Fail
@@ -21,72 +19,6 @@ kms_content_protection@lic-type-1,Crash
kms_content_protection@srm,Crash
kms_content_protection@type1,Crash
kms_content_protection@uevent,Crash
-kms_cursor_crc@cursor-alpha-opaque,Fail
-kms_cursor_crc@cursor-alpha-transparent,Fail
-kms_cursor_crc@cursor-dpms,Fail
-kms_cursor_crc@cursor-offscreen-128x128,Fail
-kms_cursor_crc@cursor-offscreen-128x42,Fail
-kms_cursor_crc@cursor-offscreen-256x256,Fail
-kms_cursor_crc@cursor-offscreen-256x85,Fail
-kms_cursor_crc@cursor-offscreen-32x10,Fail
-kms_cursor_crc@cursor-offscreen-32x32,Fail
-kms_cursor_crc@cursor-offscreen-512x170,Fail
-kms_cursor_crc@cursor-offscreen-512x512,Fail
-kms_cursor_crc@cursor-offscreen-64x21,Fail
-kms_cursor_crc@cursor-offscreen-64x64,Fail
-kms_cursor_crc@cursor-onscreen-128x128,Fail
-kms_cursor_crc@cursor-onscreen-128x42,Fail
-kms_cursor_crc@cursor-onscreen-256x256,Fail
-kms_cursor_crc@cursor-onscreen-256x85,Fail
-kms_cursor_crc@cursor-onscreen-32x10,Fail
-kms_cursor_crc@cursor-onscreen-32x32,Fail
-kms_cursor_crc@cursor-onscreen-512x170,Fail
-kms_cursor_crc@cursor-onscreen-512x512,Fail
-kms_cursor_crc@cursor-onscreen-64x21,Fail
-kms_cursor_crc@cursor-onscreen-64x64,Fail
-kms_cursor_crc@cursor-random-128x128,Fail
-kms_cursor_crc@cursor-random-128x42,Fail
-kms_cursor_crc@cursor-random-256x256,Fail
-kms_cursor_crc@cursor-random-256x85,Fail
-kms_cursor_crc@cursor-random-32x10,Fail
-kms_cursor_crc@cursor-random-32x32,Fail
-kms_cursor_crc@cursor-random-512x170,Fail
-kms_cursor_crc@cursor-random-512x512,Fail
-kms_cursor_crc@cursor-random-64x21,Fail
-kms_cursor_crc@cursor-random-64x64,Fail
-kms_cursor_crc@cursor-rapid-movement-128x128,Fail
-kms_cursor_crc@cursor-rapid-movement-128x42,Fail
-kms_cursor_crc@cursor-rapid-movement-256x256,Fail
-kms_cursor_crc@cursor-rapid-movement-256x85,Fail
-kms_cursor_crc@cursor-rapid-movement-32x10,Fail
-kms_cursor_crc@cursor-rapid-movement-32x32,Fail
-kms_cursor_crc@cursor-rapid-movement-512x170,Fail
-kms_cursor_crc@cursor-rapid-movement-512x512,Fail
-kms_cursor_crc@cursor-rapid-movement-64x21,Fail
-kms_cursor_crc@cursor-rapid-movement-64x64,Fail
-kms_cursor_crc@cursor-size-change,Fail
-kms_cursor_crc@cursor-sliding-128x128,Fail
-kms_cursor_crc@cursor-sliding-128x42,Fail
-kms_cursor_crc@cursor-sliding-256x256,Fail
-kms_cursor_crc@cursor-sliding-256x85,Fail
-kms_cursor_crc@cursor-sliding-32x10,Fail
-kms_cursor_crc@cursor-sliding-32x32,Fail
-kms_cursor_crc@cursor-sliding-512x170,Fail
-kms_cursor_crc@cursor-sliding-512x512,Fail
-kms_cursor_crc@cursor-sliding-64x21,Fail
-kms_cursor_crc@cursor-sliding-64x64,Fail
-kms_cursor_edge_walk@128x128-left-edge,Fail
-kms_cursor_edge_walk@128x128-right-edge,Fail
-kms_cursor_edge_walk@128x128-top-bottom,Fail
-kms_cursor_edge_walk@128x128-top-edge,Fail
-kms_cursor_edge_walk@256x256-left-edge,Fail
-kms_cursor_edge_walk@256x256-right-edge,Fail
-kms_cursor_edge_walk@256x256-top-bottom,Fail
-kms_cursor_edge_walk@256x256-top-edge,Fail
-kms_cursor_edge_walk@64x64-left-edge,Fail
-kms_cursor_edge_walk@64x64-right-edge,Fail
-kms_cursor_edge_walk@64x64-top-bottom,Fail
-kms_cursor_edge_walk@64x64-top-edge,Fail
kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail
kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail
kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail
@@ -100,92 +32,14 @@ kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
kms_display_modes@extended-mode-basic,Fail
kms_flip@2x-flip-vs-modeset-vs-hang,Fail
kms_flip@2x-flip-vs-panning-vs-hang,Fail
-kms_flip@absolute-wf_vblank,Fail
-kms_flip@absolute-wf_vblank-interruptible,Fail
-kms_flip@basic-flip-vs-wf_vblank,Fail
-kms_flip@basic-plain-flip,Fail
-kms_flip@blocking-absolute-wf_vblank,Fail
-kms_flip@blocking-absolute-wf_vblank-interruptible,Fail
-kms_flip@blocking-wf_vblank,Fail
-kms_flip@busy-flip,Fail
-kms_flip@dpms-off-confusion,Fail
-kms_flip@dpms-off-confusion-interruptible,Fail
-kms_flip@dpms-vs-vblank-race,Fail
-kms_flip@dpms-vs-vblank-race-interruptible,Fail
-kms_flip@flip-vs-absolute-wf_vblank,Fail
-kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
-kms_flip@flip-vs-blocking-wf-vblank,Fail
-kms_flip@flip-vs-expired-vblank,Fail
-kms_flip@flip-vs-expired-vblank-interruptible,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
-kms_flip@flip-vs-panning,Fail
-kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
-kms_flip@flip-vs-rmfb,Fail
-kms_flip@flip-vs-rmfb-interruptible,Fail
-kms_flip@flip-vs-wf_vblank-interruptible,Fail
-kms_flip@modeset-vs-vblank-race,Fail
-kms_flip@modeset-vs-vblank-race-interruptible,Fail
-kms_flip@plain-flip-fb-recreate,Fail
-kms_flip@plain-flip-fb-recreate-interruptible,Fail
-kms_flip@plain-flip-interruptible,Fail
-kms_flip@plain-flip-ts-check,Fail
-kms_flip@plain-flip-ts-check-interruptible,Fail
-kms_flip@wf_vblank-ts-check,Fail
-kms_flip@wf_vblank-ts-check-interruptible,Fail
-kms_lease@cursor-implicit-plane,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Fail
-kms_lease@setcrtc-implicit-plane,Fail
-kms_lease@simple-lease,Fail
kms_multipipe_modeset@basic-max-pipe-crc-check,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
-kms_pipe_crc_basic@compare-crc-sanitycheck-xr24,Fail
-kms_pipe_crc_basic@disable-crc-after-crtc,Fail
-kms_pipe_crc_basic@nonblocking-crc,Fail
-kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
-kms_pipe_crc_basic@read-crc,Fail
-kms_pipe_crc_basic@read-crc-frame-sequence,Fail
-kms_plane@pixel-format,Fail
-kms_plane@pixel-format-source-clamping,Fail
-kms_plane@plane-panning-bottom-right,Fail
-kms_plane@plane-panning-top-left,Fail
-kms_plane@plane-position-covered,Fail
-kms_plane@plane-position-hole,Fail
-kms_plane@plane-position-hole-dpms,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@alpha-basic,Fail
-kms_plane_alpha_blend@alpha-opaque-fb,Fail
-kms_plane_alpha_blend@alpha-transparent-fb,Fail
-kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_alpha_blend@constant-alpha-mid,Fail
-kms_plane_alpha_blend@constant-alpha-min,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_cursor@primary,Fail
kms_plane_lowres@tiling-none,Fail
-kms_plane_multiple@tiling-none,Fail
kms_rmfb@close-fd,Fail
-kms_rotation_crc@cursor-rotation-180,Fail
-kms_rotation_crc@primary-rotation-180,Fail
-kms_sequence@get-busy,Fail
-kms_sequence@get-forked,Fail
-kms_sequence@get-forked-busy,Fail
-kms_sequence@get-idle,Fail
-kms_sequence@queue-busy,Fail
-kms_sequence@queue-idle,Fail
-kms_vblank@accuracy-idle,Fail
-kms_vblank@crtc-id,Fail
-kms_vblank@query-busy,Fail
-kms_vblank@query-forked,Fail
-kms_vblank@query-forked-busy,Fail
-kms_vblank@query-idle,Fail
kms_vblank@ts-continuation-dpms-rpm,Fail
-kms_vblank@ts-continuation-idle,Fail
-kms_vblank@ts-continuation-modeset,Fail
-kms_vblank@ts-continuation-modeset-rpm,Fail
-kms_vblank@wait-busy,Fail
-kms_vblank@wait-forked,Fail
-kms_vblank@wait-forked-busy,Fail
-kms_vblank@wait-idle,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
index dcb24b835dc3..cd3d3b0befe4 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
@@ -1,6 +1,13 @@
# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5
# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
-msm_mapping@shadow
+msm/msm_mapping@shadow
+
+# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_lease@page-flip-implicit-plane
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
index 1168c53acd2d..7c69c1f1d55b 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -4,7 +4,7 @@
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -12,6 +12,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
index 8f010c8a9c4f..770a1c685fde 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
@@ -33,4 +33,3 @@ kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
kms_plane_cursor@overlay,Fail
kms_plane_cursor@viewport,Fail
kms_rmfb@close-fd,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
index 2c5f62b07632..2aa96b1241c3 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
@@ -1,19 +1,118 @@
# Board Name: sdm845-cheza-r3
# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_cursor_legacy@basic-flip-after-cursor-atomic
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@basic-flip-after-cursor-legacy
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@basic-flip-after-cursor-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@basic-flip-before-cursor-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@flip-vs-cursor-atomic-transitions
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@flip-vs-cursor-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-after-cursor-atomic-transitions
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-after-cursor-toggle
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-before-cursor-atomic-transitions
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size
-msm_shrink@copy-gpu-32
-msm_shrink@copy-gpu-oom-32
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+msm/msm_shrink@copy-gpu-32
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+msm/msm_shrink@copy-gpu-oom-32
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@short-flip-before-cursor-toggle
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@flip-vs-cursor-toggle
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+msm/msm_shrink@copy-mmap-oom-8
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
index 5185212c8fb2..90651048ab61 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -4,12 +4,12 @@ kms_bw.*
# Failing due to a bootloader/fw issue. The workaround in mesa CI involves these two patches
# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/4b49f902ec6f2bb382cbbf489870573f4b43371e
# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/38cdf4c5559771e2474ae0fecef8469f65147bc1
-msm_mapping@*
+msm/msm_mapping@*
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -17,6 +17,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt
new file mode 100644
index 000000000000..fe8ce2ce33e6
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt
@@ -0,0 +1 @@
+panfrost/panfrost_prime@gem-prime-import,Fail
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
new file mode 100644
index 000000000000..3c7e494857b5
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
@@ -0,0 +1,23 @@
+# Skip driver specific tests
+^amdgpu.*
+^msm.*
+nouveau_.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# Skip intel specific tests
+gem_.*
+i915_.*
+tools_test.*
+
+# Panfrost is not a KMS driver, so skip the KMS tests
+kms_.*
+
+# Skip display functionality tests for GPU-only drivers
+dumb_buffer.*
+fbdev.*
+
+# Currently fails and causes coverage loss for other tests
+# since core_getversion also fails.
+core_hotunplug.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt
new file mode 100644
index 000000000000..fe8ce2ce33e6
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt
@@ -0,0 +1 @@
+panfrost/panfrost_prime@gem-prime-import,Fail
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
new file mode 100644
index 000000000000..3c7e494857b5
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
@@ -0,0 +1,23 @@
+# Skip driver specific tests
+^amdgpu.*
+^msm.*
+nouveau_.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# Skip intel specific tests
+gem_.*
+i915_.*
+tools_test.*
+
+# Panfrost is not a KMS driver, so skip the KMS tests
+kms_.*
+
+# Skip display functionality tests for GPU-only drivers
+dumb_buffer.*
+fbdev.*
+
+# Currently fails and causes coverage loss for other tests
+# since core_getversion also fails.
+core_hotunplug.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt
new file mode 100644
index 000000000000..4a2f4b6b14c1
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt
@@ -0,0 +1 @@
+panfrost/panfrost_prime@gem-prime-import,Crash
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
new file mode 100644
index 000000000000..feeed89b6c3f
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
@@ -0,0 +1,26 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# Skip driver specific tests
+^amdgpu.*
+^msm.*
+nouveau_.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# Skip intel specific tests
+gem_.*
+i915_.*
+tools_test.*
+
+# Panfrost is not a KMS driver, so skip the KMS tests
+kms_.*
+
+# Skip display functionality tests for GPU-only drivers
+dumb_buffer.*
+fbdev.*
+
+# Currently fails and causes coverage loss for other tests
+# since core_getversion also fails.
+core_hotunplug.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt
new file mode 100644
index 000000000000..fe8ce2ce33e6
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt
@@ -0,0 +1 @@
+panfrost/panfrost_prime@gem-prime-import,Fail
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt
new file mode 100644
index 000000000000..ac4f8f7244d4
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt
@@ -0,0 +1,6 @@
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/5cc34a8b-c1fa-4744-9031-2d33ecf41011@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+panfrost/panfrost_submit@pan-unhandled-pagefault
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
new file mode 100644
index 000000000000..feeed89b6c3f
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
@@ -0,0 +1,26 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# Skip driver specific tests
+^amdgpu.*
+^msm.*
+nouveau_.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# Skip intel specific tests
+gem_.*
+i915_.*
+tools_test.*
+
+# Panfrost is not a KMS driver, so skip the KMS tests
+kms_.*
+
+# Skip display functionality tests for GPU-only drivers
+dumb_buffer.*
+fbdev.*
+
+# Currently fails and causes coverage loss for other tests
+# since core_getversion also fails.
+core_hotunplug.*
diff --git a/drivers/gpu/drm/ci/xfails/requirements.txt b/drivers/gpu/drm/ci/xfails/requirements.txt
index e9994c9db799..5e6d48d98e4e 100644
--- a/drivers/gpu/drm/ci/xfails/requirements.txt
+++ b/drivers/gpu/drm/ci/xfails/requirements.txt
@@ -11,7 +11,7 @@ requests==2.31.0
requests-toolbelt==1.0.0
ruamel.yaml==0.17.32
ruamel.yaml.clib==0.2.7
-setuptools==68.0.0
+setuptools==70.0.0
tenacity==8.2.3
urllib3==2.0.7
wheel==0.41.1
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
index f9b99bf27105..ea7b2ceb95b9 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -1,8 +1,18 @@
+core_setmaster@master-drop-set-root,Crash
+core_setmaster@master-drop-set-user,Crash
+core_setmaster_vs_auth,Crash
+device_reset@cold-reset-bound,Crash
+device_reset@reset-bound,Crash
+device_reset@unbind-cold-reset-rebind,Crash
+device_reset@unbind-reset-rebind,Crash
dumb_buffer@create-clear,Crash
-dumb_buffer@create-valid-dumb,Crash
dumb_buffer@invalid-bpp,Crash
-dumb_buffer@map-invalid-size,Crash
-dumb_buffer@map-uaf,Crash
-dumb_buffer@map-valid,Crash
-panfrost_prime@gem-prime-import,Crash
-tools_test@tools_test,Crash
+fbdev@pan,Crash
+kms_cursor_crc@cursor-onscreen-32x10,Crash
+kms_cursor_crc@cursor-onscreen-32x32,Crash
+kms_cursor_crc@cursor-random-32x10,Crash
+kms_cursor_crc@cursor-sliding-32x32,Crash
+kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_prop_blob@invalid-set-prop,Crash
+kms_prop_blob@invalid-set-prop-any,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt
new file mode 100644
index 000000000000..7ede273aab20
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt
@@ -0,0 +1,6 @@
+# Board Name: rk3288-veyron-jaq
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@flip-vs-cursor-atomic
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
index 6d3757dca83b..eb16b29dee48 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
@@ -1,60 +1,11 @@
# Suspend to RAM seems to be broken on this machine
.*suspend.*
-# Too unstable, machine ends up hanging after lots of Oopses
-kms_cursor_legacy.*
-
-# Started hanging the machine on Linux 5.19-rc2:
-#
-# [IGT] kms_plane_lowres: executing
-# [IGT] kms_plane_lowres: starting subtest pipe-F-tiling-y
-# [IGT] kms_plane_lowres: exiting, ret=77
-# Console: switching to colour frame buffer device 170x48
-# rockchip-drm display-subsystem: [drm] *ERROR* flip_done timed out
-# rockchip-drm display-subsystem: [drm] *ERROR* [CRTC:35:crtc-0] commit wait timed out
-# BUG: spinlock bad magic on CPU#3, kms_plane_lowre/482
-# 8<--- cut here ---
-# Unable to handle kernel paging request at virtual address 7812078e
-# [7812078e] *pgd=00000000
-# Internal error: Oops: 5 [#1] SMP ARM
-# Modules linked in:
-# CPU: 3 PID: 482 Comm: kms_plane_lowre Tainted: G W 5.19.0-rc2-323596-g00535de92171 #1
-# Hardware name: Rockchip (Device Tree)
-# Process kms_plane_lowre (pid: 482, stack limit = 0x1193ac2b)
-# spin_dump from do_raw_spin_lock+0xa4/0xe8
-# do_raw_spin_lock from wait_for_completion_timeout+0x2c/0x120
-# wait_for_completion_timeout from drm_crtc_commit_wait+0x18/0x7c
-# drm_crtc_commit_wait from drm_atomic_helper_wait_for_dependencies+0x44/0x168
-# drm_atomic_helper_wait_for_dependencies from commit_tail+0x34/0x180
-# commit_tail from drm_atomic_helper_commit+0x164/0x18c
-# drm_atomic_helper_commit from drm_atomic_commit+0xac/0xe4
-# drm_atomic_commit from drm_client_modeset_commit_atomic+0x23c/0x284
-# drm_client_modeset_commit_atomic from drm_client_modeset_commit_locked+0x60/0x1c8
-# drm_client_modeset_commit_locked from drm_client_modeset_commit+0x24/0x40
-# drm_client_modeset_commit from drm_fbdev_client_restore+0x58/0x94
-# drm_fbdev_client_restore from drm_client_dev_restore+0x70/0xbc
-# drm_client_dev_restore from drm_release+0xf4/0x114
-# drm_release from __fput+0x74/0x240
-# __fput from task_work_run+0x84/0xb4
-# task_work_run from do_exit+0x34c/0xa20
-# do_exit from do_group_exit+0x34/0x98
-# do_group_exit from __wake_up_parent+0x0/0x18
-# Code: e595c008 12843d19 03e00000 03093168 (15940508)
-# ---[ end trace 0000000000000000 ]---
-# note: kms_plane_lowre[482] exited with preempt_count 1
-# Fixing recursive fault but reboot is needed!
-kms_plane_lowres@pipe-F-tiling-y
-
-# Take too long, we have only two machines, and these are very flaky
-kms_cursor_crc.*
-
-# Machine is hanging in this test, so skip it
-kms_pipe_crc_basic@disable-crc-after-crtc
-
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -62,9 +13,7 @@ nouveau_.*
# Skip intel specific tests
gem_.*
i915_.*
-
-# Panfrost is not a KMS driver, so skip the KMS tests
-kms_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
index 9ef460646d76..9309ff15e23a 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -1,8 +1,84 @@
-dumb_buffer@create-clear,Fail
-dumb_buffer@create-valid-dumb,Fail
+device_reset@cold-reset-bound,Fail
+device_reset@reset-bound,Fail
+device_reset@unbind-cold-reset-rebind,Fail
+device_reset@unbind-reset-rebind,Fail
+dumb_buffer@create-clear,Crash
dumb_buffer@invalid-bpp,Fail
-dumb_buffer@map-invalid-size,Fail
-dumb_buffer@map-uaf,Fail
-dumb_buffer@map-valid,Fail
-panfrost_prime@gem-prime-import,Fail
-tools_test@tools_test,Fail
+kms_atomic_transition@modeset-transition,Fail
+kms_atomic_transition@modeset-transition-fencing,Fail
+kms_atomic_transition@plane-toggle-modeset-transition,Fail
+kms_color@gamma,Fail
+kms_color@legacy-gamma,Fail
+kms_cursor_crc@cursor-alpha-opaque,Fail
+kms_cursor_crc@cursor-alpha-transparent,Fail
+kms_cursor_crc@cursor-dpms,Fail
+kms_cursor_crc@cursor-offscreen-32x10,Fail
+kms_cursor_crc@cursor-offscreen-32x32,Fail
+kms_cursor_crc@cursor-offscreen-64x21,Fail
+kms_cursor_crc@cursor-offscreen-64x64,Fail
+kms_cursor_crc@cursor-onscreen-32x10,Fail
+kms_cursor_crc@cursor-onscreen-32x32,Fail
+kms_cursor_crc@cursor-onscreen-64x21,Fail
+kms_cursor_crc@cursor-onscreen-64x64,Fail
+kms_cursor_crc@cursor-random-32x10,Fail
+kms_cursor_crc@cursor-random-32x32,Fail
+kms_cursor_crc@cursor-random-64x21,Fail
+kms_cursor_crc@cursor-random-64x64,Fail
+kms_cursor_crc@cursor-rapid-movement-32x10,Fail
+kms_cursor_crc@cursor-rapid-movement-32x32,Fail
+kms_cursor_crc@cursor-rapid-movement-64x21,Fail
+kms_cursor_crc@cursor-rapid-movement-64x64,Fail
+kms_cursor_crc@cursor-size-change,Fail
+kms_cursor_crc@cursor-sliding-32x10,Fail
+kms_cursor_crc@cursor-sliding-32x32,Fail
+kms_cursor_crc@cursor-sliding-64x21,Fail
+kms_cursor_crc@cursor-sliding-64x64,Fail
+kms_cursor_edge_walk@64x64-left-edge,Fail
+kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
+kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@cursor-vs-flip-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@long-nonblocking-modeset-vs-cursor-atomic,Fail
+kms_flip@basic-flip-vs-wf_vblank,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@dpms-vs-vblank-race,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_flip@modeset-vs-vblank-race,Fail
+kms_flip@modeset-vs-vblank-race-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_flip@wf_vblank-ts-check,Fail
+kms_flip@wf_vblank-ts-check-interruptible,Fail
+kms_invalid_mode@int-max-clock,Fail
+kms_lease@lease-uevent,Fail
+kms_lease@page-flip-implicit-plane,Fail
+kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
+kms_pipe_crc_basic@compare-crc-sanitycheck-xr24,Fail
+kms_pipe_crc_basic@disable-crc-after-crtc,Fail
+kms_pipe_crc_basic@nonblocking-crc,Fail
+kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
+kms_pipe_crc_basic@read-crc,Fail
+kms_pipe_crc_basic@read-crc-frame-sequence,Fail
+kms_plane@pixel-format,Crash
+kms_plane@pixel-format-source-clamping,Crash
+kms_plane@plane-panning-bottom-right,Fail
+kms_plane@plane-panning-top-left,Fail
+kms_plane@plane-position-covered,Fail
+kms_plane@plane-position-hole,Fail
+kms_plane@plane-position-hole-dpms,Fail
+kms_plane_cursor@primary,Fail
+kms_plane_multiple@tiling-none,Fail
+kms_rmfb@close-fd,Fail
+kms_universal_plane@universal-plane-functional,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
index 742c27d9a598..d98f6a17343c 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -1,6 +1,48 @@
# Board Name: rk3399-gru-kevin
-# Bug Report: https://lore.kernel.org/dri-devel/5cc34a8b-c1fa-4744-9031-2d33ecf41011@collabora.com/T/#u
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
# Failure Rate: 50
-panfrost_submit@pan-unhandled-pagefault
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_bw@linear-tiling-1-displays-2560x1440p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@nonblocking-modeset-vs-cursor-atomic
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_flip@dpms-vs-vblank-race-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_flip@flip-vs-absolute-wf_vblank-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_flip@flip-vs-wf_vblank-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_setmode@basic
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_bw@connected-linear-tiling-1-displays-2560x1440p
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
index 5c52b25b4213..eb16b29dee48 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
@@ -1,13 +1,11 @@
# Suspend to RAM seems to be broken on this machine
.*suspend.*
-# Too unstable, machine ends up hanging after lots of Oopses
-kms_cursor_legacy.*
-
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -15,9 +13,7 @@ nouveau_.*
# Skip intel specific tests
gem_.*
i915_.*
-
-# Panfrost is not a KMS driver, so skip the KMS tests
-kms_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
index fdf09fe11566..c72fee70e739 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
@@ -3,6 +3,70 @@ kms_addfb_basic@bo-too-small,Fail
kms_addfb_basic@size-max,Fail
kms_addfb_basic@too-high,Fail
kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail
+kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-10-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-10-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-10-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-10-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-11-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-11-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-11-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-11-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-12-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-12-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-12-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-12-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-13-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-13-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-13-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-13-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-14-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-14-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-14-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-14-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-15-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-15-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-15-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-15-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-16-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-16-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-16-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-16-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-2-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-3-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-3-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-4-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-4-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-4-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-4-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-5-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-5-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-5-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-5-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-6-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-6-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-6-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-6-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-7-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-7-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-7-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-7-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-8-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-8-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-8-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-8-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-9-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-9-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-9-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-9-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-2560x1440p,Fail
@@ -123,4 +187,3 @@ kms_vblank@wait-forked,Fail
kms_vblank@wait-forked-busy,Fail
kms_vblank@wait-idle,Fail
perf@i915-ref-count,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
index e0ca4fadb84f..9c9e048725f8 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
@@ -7,9 +7,9 @@ kms_flip@flip-vs-suspend.*
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -18,6 +18,7 @@ panfrost_.*
gem_.*
i915_.*
xe_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
index 691c383b21a0..5408110f4c60 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
@@ -41,12 +41,8 @@ kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
kms_cursor_legacy@flip-vs-cursor-legacy,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
-kms_flip@flip-vs-suspend,Timeout
-kms_flip@flip-vs-suspend-interruptible,Timeout
-kms_flip@plain-flip-fb-recreate,Fail
kms_lease@lease-uevent,Fail
kms_pipe_crc_basic@nonblocking-crc,Fail
-kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
kms_writeback@writeback-check-output,Fail
kms_writeback@writeback-check-output-XRGB2101010,Fail
kms_writeback@writeback-fb-id,Fail
@@ -54,4 +50,3 @@ kms_writeback@writeback-fb-id-XRGB2101010,Fail
kms_writeback@writeback-invalid-parameters,Fail
kms_writeback@writeback-pixel-formats,Fail
perf@i915-ref-count,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
index eeaa1d5825af..62428f3c8f31 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
@@ -67,3 +67,24 @@ kms_flip@flip-vs-absolute-wf_vblank-interruptible
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_flip@flip-vs-blocking-wf-vblank
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@flip-vs-cursor-varying-size
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_flip@flip-vs-expired-vblank
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_pipe_crc_basic@nonblocking-crc-frame-sequence
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
index fd5d1271115f..5ccc771fbb36 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
@@ -104,11 +104,112 @@ kms_cursor_crc@cursor-rapid-movement-256x85
# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
# CR2: 0000000000000078 CR3: 0000000109b38000 CR4: 0000000000350ef0
+kms_cursor_crc@cursor-onscreen-256x256
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 PID: 1913 Comm: kworker/u8:6 Not tainted 6.10.0-rc5-g8a28e73ebead #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffb477409fbd58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffff8b124a242000
+# RDX: 00000000000000ff RSI: ffff8b124a243ff8 RDI: ffff8b124a244000
+# RBP: 0000000000000002 R08: 0000000000000000 R09: 00000000000003ff
+# R10: ffff8b124a244000 R11: 0000000000000000 R12: ffff8b1249282f30
+# R13: 0000000000000002 R14: 0000000000000002 R15: 0000000000000000
+# FS: 0000000000000000(0000) GS:ffff8b126bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 0000000000000018 CR3: 0000000107a86000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x344/0x4e0 [vkms]
+# ? compose_active_planes+0x32f/0x4e0 [vkms]
+# ? srso_return_thunk+0x5/0x5f
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x350
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 0000000000000018
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffb477409fbd58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffff8b124a242000
+# RDX: 00000000000000ff RSI: ffff8b124a243ff8 RDI: ffff8b124a244000
+# RBP: 0000000000000002 R08: 0000000000000000 R09: 00000000000003ff
+# R10: ffff8b124a244000 R11: 0000000000000000 R12: ffff8b1249282f30
+# R13: 0000000000000002 R14: 0000000000000002 R15: 0000000000000000
+# FS: 0000000000000000(0000) GS:ffff8b126bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 0000000000000018 CR3: 0000000107a86000 CR4: 0000000000350ef0
+
+kms_cursor_edge_walk@128x128-right-edge
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 0 PID: 1911 Comm: kworker/u8:3 Not tainted 6.10.0-rc5-g5e7a002eefe5 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffb2f040a43d58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa2c181792000
+# RDX: 0000000000000000 RSI: ffffa2c181793ff8 RDI: ffffa2c181790000
+# RBP: 0000000000000031 R08: 0000000000000000 R09: 00000000000003ff
+# R10: ffffa2c181790000 R11: 0000000000000000 R12: ffffa2c1814fa810
+# R13: 0000000000000031 R14: 0000000000000031 R15: 0000000000000000
+# FS: 0000000000000000(0000) GS:ffffa2c1abc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 0000000000000018 CR3: 0000000106768000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? srso_return_thunk+0x5/0x5f
+# ? mark_held_locks+0x49/0x80
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x344/0x4e0 [vkms]
+# ? compose_active_planes+0x32f/0x4e0 [vkms]
+# ? srso_return_thunk+0x5/0x5f
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x350
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 0000000000000018
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffb2f040a43d58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa2c181792000
+# RDX: 0000000000000000 RSI: ffffa2c181793ff8 RDI: ffffa2c181790000
+# RBP: 0000000000000031 R08: 0000000000000000 R09: 00000000000003ff
+# R10: ffffa2c181790000 R11: 0000000000000000 R12: ffffa2c1814fa810
+# R13: 0000000000000031 R14: 0000000000000031 R15: 000000000000
+
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -117,3 +218,4 @@ panfrost_.*
gem_.*
i915_.*
xe_.*
+tools_test.*
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index d4c34f364140..6ee51003de3c 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2328,6 +2328,31 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
#undef DEVICE_ID_ANY
#undef DEVICE_ID
+static int drm_dp_read_ident(struct drm_dp_aux *aux, unsigned int offset,
+ struct drm_dp_dpcd_ident *ident)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+
+ return ret < 0 ? ret : 0;
+}
+
+static void drm_dp_dump_desc(struct drm_dp_aux *aux,
+ const char *device_name, const struct drm_dp_desc *desc)
+{
+ const struct drm_dp_dpcd_ident *ident = &desc->ident;
+
+ drm_dbg_kms(aux->drm_dev,
+ "%s: %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
+ aux->name, device_name,
+ (int)sizeof(ident->oui), ident->oui,
+ (int)strnlen(ident->device_id, sizeof(ident->device_id)), ident->device_id,
+ ident->hw_rev >> 4, ident->hw_rev & 0xf,
+ ident->sw_major_rev, ident->sw_minor_rev,
+ desc->quirks);
+}
+
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
@@ -2344,28 +2369,49 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
{
struct drm_dp_dpcd_ident *ident = &desc->ident;
unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI;
- int ret, dev_id_len;
+ int ret;
- ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+ ret = drm_dp_read_ident(aux, offset, ident);
if (ret < 0)
return ret;
desc->quirks = drm_dp_get_quirks(ident, is_branch);
- dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
-
- drm_dbg_kms(aux->drm_dev,
- "%s: DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
- aux->name, is_branch ? "branch" : "sink",
- (int)sizeof(ident->oui), ident->oui, dev_id_len,
- ident->device_id, ident->hw_rev >> 4, ident->hw_rev & 0xf,
- ident->sw_major_rev, ident->sw_minor_rev, desc->quirks);
+ drm_dp_dump_desc(aux, is_branch ? "DP branch" : "DP sink", desc);
return 0;
}
EXPORT_SYMBOL(drm_dp_read_desc);
/**
+ * drm_dp_dump_lttpr_desc - read and dump the DPCD descriptor for an LTTPR PHY
+ * @aux: DisplayPort AUX channel
+ * @dp_phy: LTTPR PHY instance
+ *
+ * Read the DPCD LTTPR PHY descriptor for @dp_phy and print a debug message
+ * with its details to dmesg.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_dump_lttpr_desc(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy)
+{
+ struct drm_dp_desc desc = {};
+ int ret;
+
+ if (drm_WARN_ON(aux->drm_dev, dp_phy < DP_PHY_LTTPR1 || dp_phy > DP_MAX_LTTPR_COUNT))
+ return -EINVAL;
+
+ ret = drm_dp_read_ident(aux, DP_OUI_PHY_REPEATER(dp_phy), &desc.ident);
+ if (ret < 0)
+ return ret;
+
+ drm_dp_dump_desc(aux, drm_dp_phy_name(dp_phy), &desc);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dump_lttpr_desc);
+
+/**
* drm_dp_dsc_sink_bpp_incr() - Get bits per pixel increment
* @dsc_dpcd: DSC capabilities from DPCD
*
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index fc2ceae61db2..a040d7dfced1 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -89,7 +89,7 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
- u8 *guid);
+ guid_t *guid);
static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
@@ -801,7 +801,7 @@ static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_
int idx = 1;
int i;
- memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
+ import_guid(&repmsg->u.link_addr.guid, &raw->msg[idx]);
idx += 16;
repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
idx++;
@@ -829,7 +829,7 @@ static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_
idx++;
if (idx > raw->curlen)
goto fail_len;
- memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
+ import_guid(&repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx]);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
@@ -1029,7 +1029,7 @@ static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mg
msg->req_type = (raw->msg[0] & 0x7f);
if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
- memcpy(msg->u.nak.guid, &raw->msg[1], 16);
+ import_guid(&msg->u.nak.guid, &raw->msg[1]);
msg->u.nak.reason = raw->msg[17];
msg->u.nak.nak_data = raw->msg[18];
return false;
@@ -1078,7 +1078,7 @@ drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_
if (idx > raw->curlen)
goto fail_len;
- memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
+ import_guid(&msg->u.conn_stat.guid, &raw->msg[idx]);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
@@ -1107,7 +1107,7 @@ static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst
if (idx > raw->curlen)
goto fail_len;
- memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
+ import_guid(&msg->u.resource_stat.guid, &raw->msg[idx]);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
@@ -2174,20 +2174,24 @@ ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
offset, size, buffer);
}
-static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
+static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid)
{
int ret = 0;
- memcpy(mstb->guid, guid, 16);
+ guid_copy(&mstb->guid, guid);
+
+ if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) {
+ u8 buf[UUID_SIZE];
+
+ export_guid(buf, &mstb->guid);
- if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
if (mstb->port_parent) {
ret = drm_dp_send_dpcd_write(mstb->mgr,
mstb->port_parent,
- DP_GUID, 16, mstb->guid);
+ DP_GUID, sizeof(buf), buf);
} else {
ret = drm_dp_dpcd_write(mstb->mgr->aux,
- DP_GUID, mstb->guid, 16);
+ DP_GUID, buf, sizeof(buf));
}
}
@@ -2339,7 +2343,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_ddps = 0, ret;
+ int ret;
u8 new_pdt = DP_PEER_DEVICE_NONE;
bool new_mcs = 0;
bool created = false, send_link_addr = false, changed = false;
@@ -2372,7 +2376,6 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
*/
drm_modeset_lock(&mgr->base.lock, NULL);
- old_ddps = port->ddps;
changed = port->ddps != port_msg->ddps ||
(port->ddps &&
(port->ldps != port_msg->legacy_device_plug_status ||
@@ -2407,15 +2410,13 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
* Reprobe PBN caps on both hotplug, and when re-probing the link
* for our parent mstb
*/
- if (old_ddps != port->ddps || !created) {
- if (port->ddps && !port->input) {
- ret = drm_dp_send_enum_path_resources(mgr, mstb,
- port);
- if (ret == 1)
- changed = true;
- } else {
- port->full_pbn = 0;
- }
+ if (port->ddps && !port->input) {
+ ret = drm_dp_send_enum_path_resources(mgr, mstb,
+ port);
+ if (ret == 1)
+ changed = true;
+ } else {
+ port->full_pbn = 0;
}
ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
@@ -2570,9 +2571,9 @@ out:
return mstb;
}
-static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
- struct drm_dp_mst_branch *mstb,
- const uint8_t *guid)
+static struct drm_dp_mst_branch *
+get_mst_branch_device_by_guid_helper(struct drm_dp_mst_branch *mstb,
+ const guid_t *guid)
{
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
@@ -2580,10 +2581,9 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
if (!mstb)
return NULL;
- if (memcmp(mstb->guid, guid, 16) == 0)
+ if (guid_equal(&mstb->guid, guid))
return mstb;
-
list_for_each_entry(port, &mstb->ports, next) {
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
@@ -2596,7 +2596,7 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
static struct drm_dp_mst_branch *
drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
- const uint8_t *guid)
+ const guid_t *guid)
{
struct drm_dp_mst_branch *mstb;
int ret;
@@ -2692,18 +2692,18 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
-static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
- u8 *guid)
+static void drm_dp_mst_queue_probe_work(struct drm_dp_mst_topology_mgr *mgr)
{
- u64 salt;
+ queue_work(system_long_wq, &mgr->work);
+}
- if (memchr_inv(guid, 0, 16))
+static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
+ guid_t *guid)
+{
+ if (!guid_is_null(guid))
return true;
- salt = get_jiffies_64();
-
- memcpy(&guid[0], &salt, sizeof(u64));
- memcpy(&guid[8], &salt, sizeof(u64));
+ guid_gen(guid);
return false;
}
@@ -2943,7 +2943,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);
drm_dp_dump_link_address(mgr, reply);
- ret = drm_dp_check_mstb_guid(mstb, reply->guid);
+ ret = drm_dp_check_mstb_guid(mstb, &reply->guid);
if (ret) {
char buf[64];
@@ -3685,7 +3685,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* Write reset payload */
drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
- queue_work(system_long_wq, &mgr->work);
+ drm_dp_mst_queue_probe_work(mgr);
ret = 0;
} else {
@@ -3724,6 +3724,33 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
}
/**
+ * drm_dp_mst_topology_queue_probe - Queue a topology probe
+ * @mgr: manager to probe
+ *
+ * Queue a work to probe the MST topology. Driver's should call this only to
+ * sync the topology's HW->SW state after the MST link's parameters have
+ * changed in a way the state could've become out-of-sync. This is the case
+ * for instance when the link rate between the source and first downstream
+ * branch device has switched between UHBR and non-UHBR rates. Except of those
+ * cases - for instance when a sink gets plugged/unplugged to a port - the SW
+ * state will get updated automatically via MST UP message notifications.
+ */
+void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->lock);
+
+ if (drm_WARN_ON(mgr->dev, !mgr->mst_state || !mgr->mst_primary))
+ goto out_unlock;
+
+ drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
+ drm_dp_mst_queue_probe_work(mgr);
+
+out_unlock:
+ mutex_unlock(&mgr->lock);
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe);
+
+/**
* drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
* @mgr: manager to suspend
*
@@ -3770,8 +3797,9 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
bool sync)
{
+ u8 buf[UUID_SIZE];
+ guid_t guid;
int ret;
- u8 guid[16];
mutex_lock(&mgr->lock);
if (!mgr->mst_primary)
@@ -3792,13 +3820,15 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
}
/* Some hubs forget their guids after they resume */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
- if (ret != 16) {
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
- ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+ import_guid(&guid, buf);
+
+ ret = drm_dp_check_mstb_guid(mgr->mst_primary, &guid);
if (ret) {
drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");
goto out_fail;
@@ -3809,7 +3839,7 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
* state of our in-memory topology back into sync with reality. So,
* restart the probing process as if we're probing a new hub
*/
- queue_work(system_long_wq, &mgr->work);
+ drm_dp_mst_queue_probe_work(mgr);
mutex_unlock(&mgr->lock);
if (sync) {
@@ -3976,12 +4006,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
bool hotplug = false, dowork = false;
if (hdr->broadcast) {
- const u8 *guid = NULL;
+ const guid_t *guid = NULL;
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
- guid = msg->u.conn_stat.guid;
+ guid = &msg->u.conn_stat.guid;
else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
- guid = msg->u.resource_stat.guid;
+ guid = &msg->u.resource_stat.guid;
if (guid)
mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
@@ -4963,7 +4993,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
for (i = 0x3; i < 0x8 && buf[i]; i++)
- seq_printf(m, "%c", buf[i]);
+ seq_putc(m, buf[i]);
seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
if (dump_dp_payload_table(mgr, buf))
@@ -5569,7 +5599,6 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr);
* drm_dp_atomic_release_time_slots()
*
* Returns:
- *
* 0 if the new state is valid, negative error code otherwise.
*/
int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
@@ -5606,7 +5635,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
* topology object.
*
* RETURNS:
- *
* The MST topology state or error pointer.
*/
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
@@ -5626,7 +5654,6 @@ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
* topology object.
*
* Returns:
- *
* The old MST topology state, or NULL if there's no topology state for this MST mgr
* in the global atomic state
*/
@@ -5651,7 +5678,6 @@ EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
* topology object.
*
* Returns:
- *
* The new MST topology state, or NULL if there's no topology state for this MST mgr
* in the global atomic state
*/
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 6e516c39a372..0fc99da93afe 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -63,7 +63,6 @@ EXPORT_SYMBOL(__drm_crtc_commit_free);
* hardware and flipped to.
*
* Returns:
- *
* 0 on success, a negative error code otherwise.
*/
int drm_crtc_commit_wait(struct drm_crtc_commit *commit)
@@ -337,7 +336,6 @@ EXPORT_SYMBOL(__drm_atomic_state_free);
* not created by userspace through an IOCTL call.
*
* Returns:
- *
* Either the allocated state or the error code encoded into the pointer. When
* the error is EDEADLK then the w/w mutex code has detected a deadlock and the
* entire atomic sequence must be restarted. All other errors are fatal.
@@ -518,7 +516,6 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
* is consistent.
*
* Returns:
- *
* Either the allocated state or the error code encoded into the pointer. When
* the error is EDEADLK then the w/w mutex code has detected a deadlock and the
* entire atomic sequence must be restarted. All other errors are fatal.
@@ -828,7 +825,6 @@ EXPORT_SYMBOL(drm_atomic_private_obj_fini);
* object lock to make sure that the state is consistent.
*
* RETURNS:
- *
* Either the allocated state or the error code encoded into a pointer.
*/
struct drm_private_state *
@@ -1061,7 +1057,6 @@ EXPORT_SYMBOL(drm_atomic_get_new_crtc_for_encoder);
* make sure that the state is consistent.
*
* Returns:
- *
* Either the allocated state or the error code encoded into the pointer. When
* the error is EDEADLK then the w/w mutex code has detected a deadlock and the
* entire atomic sequence must be restarted. All other errors are fatal.
@@ -1169,7 +1164,6 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
* state is consistent.
*
* Returns:
- *
* Either the allocated state or the error code encoded into the pointer. When
* the error is EDEADLK then the w/w mutex code has detected a deadlock and the
* entire atomic sequence must be restarted.
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index fb97b51b38f1..43cdf39019a4 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2266,7 +2266,6 @@ crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
* automatically.
*
* Returns:
- *
* 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
* -ENOMEM on allocation failures and -EINTR when a signal is pending.
*/
@@ -3009,7 +3008,6 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
* don't pass the right state structures to the callbacks.
*
* Returns:
- *
* Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
* waiting for the previous commits has been interrupted.
*/
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index d44f055dbe3e..c6af46dd02bf 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -353,8 +353,13 @@ err_reset_bridge:
bridge->encoder = NULL;
list_del(&bridge->chain_node);
- DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
- bridge->of_node, encoder->name, ret);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
+ bridge->of_node, encoder->name, ret);
+ else
+ dev_err_probe(encoder->dev->dev, -EPROBE_DEFER,
+ "failed to attach bridge %pOF to encoder %s\n",
+ bridge->of_node, encoder->name);
return ret;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index ab6ab7ff7ea8..fc35f47e2849 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -426,6 +426,8 @@ static void drm_connector_cleanup_action(struct drm_device *dev,
*
* The connector structure should be allocated with drmm_kzalloc().
*
+ * The @drm_connector_funcs.destroy hook must be NULL.
+ *
* Returns:
* Zero on success, error code on failure.
*/
@@ -474,6 +476,8 @@ EXPORT_SYMBOL(drmm_connector_init);
*
* The connector structure should be allocated with drmm_kzalloc().
*
+ * The @drm_connector_funcs.destroy hook must be NULL.
+ *
* Returns:
* Zero on success, error code on failure.
*/
@@ -2315,24 +2319,71 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
* DOC: standard connector properties
*
* Colorspace:
- * This property helps select a suitable colorspace based on the sink
- * capability. Modern sink devices support wider gamut like BT2020.
- * This helps switch to BT2020 mode if the BT2020 encoded video stream
- * is being played by the user, same for any other colorspace. Thereby
- * giving a good visual experience to users.
- *
- * The expectation from userspace is that it should parse the EDID
- * and get supported colorspaces. Use this property and switch to the
- * one supported. Sink supported colorspaces should be retrieved by
- * userspace from EDID and driver will not explicitly expose them.
- *
- * Basically the expectation from userspace is:
- * - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink
- * colorspace
- * - Set this new property to let the sink know what it
- * converted the CRTC output to.
- * - This property is just to inform sink what colorspace
- * source is trying to drive.
+ * This property is used to inform the driver about the color encoding
+ * user space configured the pixel operation properties to produce.
+ * The variants set the colorimetry, transfer characteristics, and which
+ * YCbCr conversion should be used when necessary.
+ * The transfer characteristics from HDR_OUTPUT_METADATA takes precedence
+ * over this property.
+ * User space always configures the pixel operation properties to produce
+ * full quantization range data (see the Broadcast RGB property).
+ *
+ * Drivers inform the sink about what colorimetry, transfer
+ * characteristics, YCbCr conversion, and quantization range to expect
+ * (this can depend on the output mode, output format and other
+ * properties). Drivers also convert the user space provided data to what
+ * the sink expects.
+ *
+ * User space has to check if the sink supports all of the possible
+ * colorimetries that the driver is allowed to pick by parsing the EDID.
+ *
+ * For historical reasons this property exposes a number of variants which
+ * result in undefined behavior.
+ *
+ * Default:
+ * The behavior is driver-specific.
+ *
+ * BT2020_RGB:
+ *
+ * BT2020_YCC:
+ * User space configures the pixel operation properties to produce
+ * RGB content with Rec. ITU-R BT.2020 colorimetry, Rec.
+ * ITU-R BT.2020 (Table 4, RGB) transfer characteristics and full
+ * quantization range.
+ * User space can use the HDR_OUTPUT_METADATA property to set the
+ * transfer characteristics to PQ (Rec. ITU-R BT.2100 Table 4) or
+ * HLG (Rec. ITU-R BT.2100 Table 5) in which case, user space
+ * configures pixel operation properties to produce content with
+ * the respective transfer characteristics.
+ * User space has to make sure the sink supports Rec.
+ * ITU-R BT.2020 R'G'B' and Rec. ITU-R BT.2020 Y'C'BC'R
+ * colorimetry.
+ * Drivers can configure the sink to use an RGB format, tell the
+ * sink to expect Rec. ITU-R BT.2020 R'G'B' colorimetry and convert
+ * to the appropriate quantization range.
+ * Drivers can configure the sink to use a YCbCr format, tell the
+ * sink to expect Rec. ITU-R BT.2020 Y'C'BC'R colorimetry, convert
+ * to YCbCr using the Rec. ITU-R BT.2020 non-constant luminance
+ * conversion matrix and convert to the appropriate quantization
+ * range.
+ * The variants BT2020_RGB and BT2020_YCC are equivalent and the
+ * driver chooses between RGB and YCbCr on its own.
+ *
+ * SMPTE_170M_YCC:
+ * BT709_YCC:
+ * XVYCC_601:
+ * XVYCC_709:
+ * SYCC_601:
+ * opYCC_601:
+ * opRGB:
+ * BT2020_CYCC:
+ * DCI-P3_RGB_D65:
+ * DCI-P3_RGB_Theater:
+ * RGB_WIDE_FIXED:
+ * RGB_WIDE_FLOAT:
+ *
+ * BT601_YCC:
+ * The behavior is undefined.
*
* Because between HDMI and DP have different colorspaces,
* drm_mode_create_hdmi_colorspace_property() is used for HDMI connector and
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 1f73b8d6d750..89706aa8232f 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -315,4 +315,19 @@ drm_edid_load_firmware(struct drm_connector *connector)
}
#endif
+/* drm_panic.c */
+#ifdef CONFIG_DRM_PANIC
+bool drm_panic_is_enabled(struct drm_device *dev);
+void drm_panic_register(struct drm_device *dev);
+void drm_panic_unregister(struct drm_device *dev);
+void drm_panic_init(void);
+void drm_panic_exit(void);
+#else
+static inline bool drm_panic_is_enabled(struct drm_device *dev) { return false; }
+static inline void drm_panic_register(struct drm_device *dev) {}
+static inline void drm_panic_unregister(struct drm_device *dev) {}
+static inline void drm_panic_init(void) {}
+static inline void drm_panic_exit(void) {}
+#endif
+
#endif /* __DRM_CRTC_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c
index 9d01d762801f..b4fd43783c50 100644
--- a/drivers/gpu/drm/drm_displayid.c
+++ b/drivers/gpu/drm/drm_displayid.c
@@ -33,9 +33,6 @@ validate_displayid(const u8 *displayid, int length, int idx)
if (IS_ERR(base))
return base;
- DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
- base->rev, base->bytes, base->prod_id, base->ext_count);
-
/* +1 for DispID checksum */
dispid_length = sizeof(*base) + base->bytes + 1;
if (dispid_length > length - idx)
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 93543071a500..ac30b0ec9d93 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -34,6 +34,7 @@
#include <linux/pseudo_fs.h>
#include <linux/slab.h>
#include <linux/srcu.h>
+#include <linux/xarray.h>
#include <drm/drm_accel.h>
#include <drm/drm_cache.h>
@@ -54,8 +55,7 @@ MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
MODULE_DESCRIPTION("DRM shared core routines");
MODULE_LICENSE("GPL and additional rights");
-static DEFINE_SPINLOCK(drm_minor_lock);
-static struct idr drm_minors_idr;
+DEFINE_XARRAY_ALLOC(drm_minors_xa);
/*
* If the drm core fails to init for whatever reason,
@@ -83,6 +83,18 @@ DEFINE_STATIC_SRCU(drm_unplug_srcu);
* registered and unregistered dynamically according to device-state.
*/
+static struct xarray *drm_minor_get_xa(enum drm_minor_type type)
+{
+ if (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER)
+ return &drm_minors_xa;
+#if IS_ENABLED(CONFIG_DRM_ACCEL)
+ else if (type == DRM_MINOR_ACCEL)
+ return &accel_minors_xa;
+#endif
+ else
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
enum drm_minor_type type)
{
@@ -101,25 +113,31 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
static void drm_minor_alloc_release(struct drm_device *dev, void *data)
{
struct drm_minor *minor = data;
- unsigned long flags;
WARN_ON(dev != minor->dev);
put_device(minor->kdev);
- if (minor->type == DRM_MINOR_ACCEL) {
- accel_minor_remove(minor->index);
- } else {
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- }
+ xa_erase(drm_minor_get_xa(minor->type), minor->index);
}
+/*
+ * DRM used to support 64 devices, for backwards compatibility we need to maintain the
+ * minor allocation scheme where minors 0-63 are primary nodes, 64-127 are control nodes,
+ * and 128-191 are render nodes.
+ * After reaching the limit, we're allocating minors dynamically - first-come, first-serve.
+ * Accel nodes are using a distinct major, so the minors are allocated in continuous 0-MAX
+ * range.
+ */
+#define DRM_MINOR_LIMIT(t) ({ \
+ typeof(t) _t = (t); \
+ _t == DRM_MINOR_ACCEL ? XA_LIMIT(0, ACCEL_MAX_MINORS) : XA_LIMIT(64 * _t, 64 * _t + 63); \
+})
+#define DRM_EXTENDED_MINOR_LIMIT XA_LIMIT(192, (1 << MINORBITS) - 1)
+
static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
{
struct drm_minor *minor;
- unsigned long flags;
int r;
minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
@@ -129,25 +147,14 @@ static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
minor->type = type;
minor->dev = dev;
- idr_preload(GFP_KERNEL);
- if (type == DRM_MINOR_ACCEL) {
- r = accel_minor_alloc();
- } else {
- spin_lock_irqsave(&drm_minor_lock, flags);
- r = idr_alloc(&drm_minors_idr,
- NULL,
- 64 * type,
- 64 * (type + 1),
- GFP_NOWAIT);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- }
- idr_preload_end();
-
+ r = xa_alloc(drm_minor_get_xa(type), &minor->index,
+ NULL, DRM_MINOR_LIMIT(type), GFP_KERNEL);
+ if (r == -EBUSY && (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER))
+ r = xa_alloc(&drm_minors_xa, &minor->index,
+ NULL, DRM_EXTENDED_MINOR_LIMIT, GFP_KERNEL);
if (r < 0)
return r;
- minor->index = r;
-
r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
if (r)
return r;
@@ -163,7 +170,7 @@ static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
{
struct drm_minor *minor;
- unsigned long flags;
+ void *entry;
int ret;
DRM_DEBUG("\n");
@@ -186,13 +193,12 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
goto err_debugfs;
/* replace NULL with @minor so lookups will succeed from now on */
- if (minor->type == DRM_MINOR_ACCEL) {
- accel_minor_replace(minor, minor->index);
- } else {
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_replace(&drm_minors_idr, minor, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
+ entry = xa_store(drm_minor_get_xa(type), minor->index, minor, GFP_KERNEL);
+ if (xa_is_err(entry)) {
+ ret = xa_err(entry);
+ goto err_debugfs;
}
+ WARN_ON(entry);
DRM_DEBUG("new minor registered %d\n", minor->index);
return 0;
@@ -205,20 +211,13 @@ err_debugfs:
static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type)
{
struct drm_minor *minor;
- unsigned long flags;
minor = *drm_minor_get_slot(dev, type);
if (!minor || !device_is_registered(minor->kdev))
return;
/* replace @minor with NULL so lookups will fail from now on */
- if (minor->type == DRM_MINOR_ACCEL) {
- accel_minor_replace(NULL, minor->index);
- } else {
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_replace(&drm_minors_idr, NULL, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- }
+ xa_store(drm_minor_get_xa(type), minor->index, NULL, GFP_KERNEL);
device_del(minor->kdev);
dev_set_drvdata(minor->kdev, NULL); /* safety belt */
@@ -234,16 +233,15 @@ static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type typ
* minor->dev pointer will stay valid! However, the device may get unplugged and
* unregistered while you hold the minor.
*/
-struct drm_minor *drm_minor_acquire(unsigned int minor_id)
+struct drm_minor *drm_minor_acquire(struct xarray *minor_xa, unsigned int minor_id)
{
struct drm_minor *minor;
- unsigned long flags;
- spin_lock_irqsave(&drm_minor_lock, flags);
- minor = idr_find(&drm_minors_idr, minor_id);
+ xa_lock(minor_xa);
+ minor = xa_load(minor_xa, minor_id);
if (minor)
drm_dev_get(minor->dev);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
+ xa_unlock(minor_xa);
if (!minor) {
return ERR_PTR(-ENODEV);
@@ -1036,7 +1034,7 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
DRM_DEBUG("\n");
- minor = drm_minor_acquire(iminor(inode));
+ minor = drm_minor_acquire(&drm_minors_xa, iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
@@ -1067,11 +1065,12 @@ static const struct file_operations drm_stub_fops = {
static void drm_core_exit(void)
{
drm_privacy_screen_lookup_exit();
+ drm_panic_exit();
accel_core_exit();
unregister_chrdev(DRM_MAJOR, "drm");
debugfs_remove(drm_debugfs_root);
drm_sysfs_destroy();
- idr_destroy(&drm_minors_idr);
+ WARN_ON(!xa_empty(&drm_minors_xa));
drm_connector_ida_destroy();
}
@@ -1080,7 +1079,6 @@ static int __init drm_core_init(void)
int ret;
drm_connector_ida_init();
- idr_init(&drm_minors_idr);
drm_memcpy_init_early();
ret = drm_sysfs_init();
@@ -1099,6 +1097,8 @@ static int __init drm_core_init(void)
if (ret < 0)
goto error;
+ drm_panic_init();
+
drm_privacy_screen_lookup_init();
drm_core_init_complete = true;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f68a41eeb1fa..855beafb76ff 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1817,7 +1817,7 @@ static int edid_block_tag(const void *_block)
static bool edid_block_is_zero(const void *edid)
{
- return !memchr_inv(edid, 0, EDID_LENGTH);
+ return mem_is_zero(edid, EDID_LENGTH);
}
static bool drm_edid_eq(const struct drm_edid *drm_edid,
@@ -1966,22 +1966,14 @@ static void edid_block_dump(const char *level, const void *block, int block_num)
block, EDID_LENGTH, false);
}
-/**
- * drm_edid_block_valid - Sanity check the EDID block (base or extension)
- * @_block: pointer to raw EDID block
- * @block_num: type of block to validate (0 for base, extension otherwise)
- * @print_bad_edid: if true, dump bad EDID blocks to the console
- * @edid_corrupt: if true, the header or checksum is invalid
- *
+/*
* Validate a base or extension EDID block and optionally dump bad blocks to
* the console.
- *
- * Return: True if the block is valid, false otherwise.
*/
-bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid,
- bool *edid_corrupt)
+static bool drm_edid_block_valid(void *_block, int block_num, bool print_bad_edid,
+ bool *edid_corrupt)
{
- struct edid *block = (struct edid *)_block;
+ struct edid *block = _block;
enum edid_block_status status;
bool is_base_block = block_num == 0;
bool valid;
@@ -2024,7 +2016,6 @@ bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid,
return valid;
}
-EXPORT_SYMBOL(drm_edid_block_valid);
/**
* drm_edid_is_valid - sanity check EDID data
@@ -6629,6 +6620,11 @@ static void update_displayid_info(struct drm_connector *connector,
displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] DisplayID extension version 0x%02x, primary use 0x%02x\n",
+ connector->base.id, connector->name,
+ displayid_version(&iter),
+ displayid_primary_use(&iter));
if (displayid_version(&iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
(displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_VR ||
displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_AR))
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
index 2da094bdf8a4..18e366cc4993 100644
--- a/drivers/gpu/drm/drm_exec.c
+++ b/drivers/gpu/drm/drm_exec.c
@@ -145,8 +145,7 @@ static int drm_exec_obj_locked(struct drm_exec *exec,
size_t size = exec->max_objects * sizeof(void *);
void *tmp;
- tmp = kvrealloc(exec->objects, size, size + PAGE_SIZE,
- GFP_KERNEL);
+ tmp = kvrealloc(exec->objects, size + PAGE_SIZE, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 56ac37ea2f27..29c53f9f449c 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -44,6 +44,7 @@
#include <drm/drm_vblank.h>
#include "drm_internal.h"
+#include "drm_crtc_internal.h"
static bool drm_fbdev_emulation = true;
module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600);
@@ -88,14 +89,6 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* interfaces. Drivers that use one of the shared memory managers, TTM, SHMEM,
* DMA, should instead use the corresponding fbdev emulation.
*
- * Existing fbdev implementations should restore the fbdev console by using
- * drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
- * They should also notify the fb helper code from updates to the output
- * configuration by using drm_fb_helper_output_poll_changed() as their
- * &drm_mode_config_funcs.output_poll_changed callback. New implementations
- * of fbdev should be build on top of struct &drm_client_funcs, which handles
- * this automatically. Setting the old callbacks should be avoided.
- *
* For suspend/resume consider using drm_mode_config_helper_suspend() and
* drm_mode_config_helper_resume() which takes care of fbdev as well.
*
@@ -259,12 +252,12 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
* @fb_helper: driver-allocated fbdev helper, can be NULL
*
- * This should be called from driver's drm &drm_driver.lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
+ * This helper should be called from fbdev emulation's &drm_client_funcs.restore
+ * callback. It ensures that the user isn't greeted with a black screen when the
+ * userspace compositor releases the display device.
*
- * RETURNS:
- * Zero if everything went ok, negative error code otherwise.
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
*/
int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
{
@@ -527,6 +520,7 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
fb_helper->info = info;
info->skip_vt_switch = true;
+ info->skip_panic = drm_panic_is_enabled(fb_helper->dev);
return info;
err_release:
@@ -2001,26 +1995,11 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
* drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation
* @dev: DRM device
*
- * This function can be used as the &drm_driver->lastclose callback for drivers
- * that only need to call drm_fb_helper_restore_fbdev_mode_unlocked().
+ * This function is obsolete. Call drm_fb_helper_restore_fbdev_mode_unlocked()
+ * instead.
*/
void drm_fb_helper_lastclose(struct drm_device *dev)
{
drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper);
}
EXPORT_SYMBOL(drm_fb_helper_lastclose);
-
-/**
- * drm_fb_helper_output_poll_changed - DRM mode config \.output_poll_changed
- * helper for fbdev emulation
- * @dev: DRM device
- *
- * This function can be used as the
- * &drm_mode_config_funcs.output_poll_changed callback for drivers that only
- * need to call drm_fbdev.hotplug_event().
- */
-void drm_fb_helper_output_poll_changed(struct drm_device *dev)
-{
- drm_fb_helper_hotplug_event(dev->fb_helper);
-}
-EXPORT_SYMBOL(drm_fb_helper_output_poll_changed);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 714e42b05108..07e493d14d0c 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -38,6 +38,7 @@
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/slab.h>
+#include <linux/vga_switcheroo.h>
#include <drm/drm_client.h>
#include <drm/drm_drv.h>
@@ -62,15 +63,6 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
if (dev->driver->load || dev->driver->unload)
return true;
- /*
- * Drivers with the lastclose callback assume that it's synchronized
- * against concurrent opens, which again needs the BKL. The proper fix
- * is to use the drm_client infrastructure with proper locking for each
- * client.
- */
- if (dev->driver->lastclose)
- return true;
-
return false;
}
@@ -318,6 +310,8 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor)
if (dev->switch_power_state != DRM_SWITCH_POWER_ON &&
dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
return -EINVAL;
+ if (WARN_ON_ONCE(!(filp->f_op->fop_flags & FOP_UNSIGNED_OFFSET)))
+ return -EINVAL;
drm_dbg_core(dev, "comm=\"%s\", pid=%d, minor=%d\n",
current->comm, task_pid_nr(current), minor->index);
@@ -335,7 +329,6 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor)
}
filp->private_data = priv;
- filp->f_mode |= FMODE_UNSIGNED_OFFSET;
priv->filp = filp;
mutex_lock(&dev->filelist_mutex);
@@ -355,7 +348,6 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor)
* resources for it. It also calls the &drm_driver.open driver callback.
*
* RETURNS:
- *
* 0 on success or negative errno value on failure.
*/
int drm_open(struct inode *inode, struct file *filp)
@@ -364,7 +356,7 @@ int drm_open(struct inode *inode, struct file *filp)
struct drm_minor *minor;
int retcode;
- minor = drm_minor_acquire(iminor(inode));
+ minor = drm_minor_acquire(&drm_minors_xa, iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
@@ -395,15 +387,12 @@ err_undo:
}
EXPORT_SYMBOL(drm_open);
-void drm_lastclose(struct drm_device * dev)
+static void drm_lastclose(struct drm_device *dev)
{
- drm_dbg_core(dev, "\n");
-
- if (dev->driver->lastclose)
- dev->driver->lastclose(dev);
- drm_dbg_core(dev, "driver lastclose completed\n");
-
drm_client_dev_restore(dev);
+
+ if (dev_is_pci(dev->dev))
+ vga_switcheroo_process_delayed_switch();
}
/**
@@ -412,12 +401,11 @@ void drm_lastclose(struct drm_device * dev)
* @filp: file pointer.
*
* This function must be used by drivers as their &file_operations.release
- * method. It frees any resources associated with the open file, and calls the
- * &drm_driver.postclose driver callback. If this is the last open file for the
- * DRM device also proceeds to call the &drm_driver.lastclose driver callback.
+ * method. It frees any resources associated with the open file. If this
+ * is the last open file for the DRM device, it also restores the active
+ * in-kernel DRM client.
*
* RETURNS:
- *
* Always succeeds and returns 0.
*/
int drm_release(struct inode *inode, struct file *filp)
@@ -484,12 +472,10 @@ void drm_file_update_pid(struct drm_file *filp)
*
* This function may be used by drivers as their &file_operations.release
* method. It frees any resources associated with the open file prior to taking
- * the drm_global_mutex, which then calls the &drm_driver.postclose driver
- * callback. If this is the last open file for the DRM device also proceeds to
- * call the &drm_driver.lastclose driver callback.
+ * the drm_global_mutex. If this is the last open file for the DRM device, it
+ * then restores the active in-kernel DRM client.
*
* RETURNS:
- *
* Always succeeds and returns 0.
*/
int drm_release_noglobal(struct inode *inode, struct file *filp)
@@ -532,7 +518,6 @@ EXPORT_SYMBOL(drm_release_noglobal);
* safety.
*
* RETURNS:
- *
* Number of bytes read (always aligned to full events, and can be 0) or a
* negative error code on failure.
*/
@@ -618,7 +603,6 @@ EXPORT_SYMBOL(drm_read);
* See also drm_read().
*
* RETURNS:
- *
* Mask of POLL flags indicating the current status of the file.
*/
__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
@@ -656,7 +640,6 @@ EXPORT_SYMBOL(drm_poll);
* already hold &drm_device.event_lock.
*
* RETURNS:
- *
* 0 on success or a negative error code on failure.
*/
int drm_event_reserve_init_locked(struct drm_device *dev,
@@ -698,7 +681,6 @@ EXPORT_SYMBOL(drm_event_reserve_init_locked);
* drm_event_reserve_init_locked() instead.
*
* RETURNS:
- *
* 0 on success or a negative error code on failure.
*/
int drm_event_reserve_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index d4bbc5d109c8..149b8e25da5b 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -689,7 +689,6 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
* For a single handle lookup, use drm_gem_object_lookup().
*
* Returns:
- *
* @objs filled in with GEM object pointers. Returned GEM objects need to be
* released with drm_gem_object_put(). -ENOENT is returned on a lookup
* failure. 0 is returned on success.
@@ -737,12 +736,11 @@ EXPORT_SYMBOL(drm_gem_objects_lookup);
* @filp: DRM file private date
* @handle: userspace handle
*
- * Returns:
+ * If looking up an array of handles, use drm_gem_objects_lookup().
*
+ * Returns:
* A reference to the object named by the handle if such exists on @filp, NULL
* otherwise.
- *
- * If looking up an array of handles, use drm_gem_objects_lookup().
*/
struct drm_gem_object *
drm_gem_object_lookup(struct drm_file *filp, u32 handle)
@@ -763,7 +761,6 @@ EXPORT_SYMBOL(drm_gem_object_lookup);
* @timeout: timeout value in jiffies or zero to return immediately
*
* Returns:
- *
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than 0 on success.
*/
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 690505a1f7a5..1705bfc90b1e 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -53,7 +53,6 @@ extern struct mutex drm_global_mutex;
bool drm_dev_needs_global_mutex(struct drm_device *dev);
struct drm_file *drm_file_alloc(struct drm_minor *minor);
void drm_file_free(struct drm_file *file);
-void drm_lastclose(struct drm_device *dev);
#ifdef CONFIG_PCI
@@ -81,10 +80,6 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
uint32_t handle);
-/* drm_drv.c */
-struct drm_minor *drm_minor_acquire(unsigned int minor_id);
-void drm_minor_release(struct drm_minor *minor);
-
/* drm_managed.c */
void drm_managed_release(struct drm_device *dev);
void drmm_add_final_kfree(struct drm_device *dev, void *container);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 969cfd5a01ae..2bc3973d35a1 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -603,6 +603,8 @@ EXPORT_SYMBOL(mipi_dsi_shutdown_peripheral);
* mipi_dsi_turn_on_peripheral() - sends a Turn On Peripheral command
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_turn_on_peripheral_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi)
@@ -652,6 +654,7 @@ EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
* @pps_selector: Select PPS from the table of pre-stored or uploaded PPS entries
*
* Enable or disable Display Stream Compression on the peripheral.
+ * This function is deprecated. Use mipi_dsi_compression_mode_ext_multi() instead.
*
* Return: 0 on success or a negative error code on failure.
*/
@@ -703,6 +706,7 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode);
* @pps: VESA DSC 1.1 Picture Parameter Set
*
* Transmit the VESA DSC 1.1 Picture Parameter Set to the peripheral.
+ * This function is deprecated. Use mipi_dsi_picture_parameter_set_multi() instead.
*
* Return: 0 on success or a negative error code on failure.
*/
@@ -1037,6 +1041,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_read);
* mipi_dsi_dcs_nop() - send DCS nop packet
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_dcs_nop_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi)
@@ -1055,6 +1061,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_nop);
* mipi_dsi_dcs_soft_reset() - perform a software reset of the display module
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_dcs_soft_reset_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi)
@@ -1124,6 +1132,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_get_pixel_format);
* display module except interface communication
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_dcs_enter_sleep_mode_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi)
@@ -1143,6 +1153,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_enter_sleep_mode);
* module
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_dcs_exit_sleep_mode_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi)
@@ -1162,6 +1174,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_exit_sleep_mode);
* display device
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_display_off_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi)
@@ -1181,6 +1195,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_display_off);
* display device
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_display_on_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi)
@@ -1202,6 +1218,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_display_on);
* @start: first column of frame memory
* @end: last column of frame memory
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_column_address_multi()
+ * instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
@@ -1226,6 +1245,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_column_address);
* @start: first page of frame memory
* @end: last page of frame memory
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_page_address_multi()
+ * instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
@@ -1268,6 +1290,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off);
* @dsi: DSI peripheral device
* @mode: the Tearing Effect Output Line mode
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_tear_on_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
@@ -1291,6 +1315,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
* @dsi: DSI peripheral device
* @format: pixel format
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_pixel_format_multi()
+ * instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
@@ -1312,6 +1339,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
* @dsi: DSI peripheral device
* @scanline: scanline to use as trigger
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_tear_scanline_multi()
+ * instead.
+ *
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
@@ -1334,6 +1364,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
* @dsi: DSI peripheral device
* @brightness: brightness value
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_display_brightness_multi()
+ * instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
@@ -1639,6 +1672,198 @@ void mipi_dsi_dcs_set_tear_on_multi(struct mipi_dsi_multi_context *ctx,
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on_multi);
+/**
+ * mipi_dsi_turn_on_peripheral_multi() - sends a Turn On Peripheral command
+ * @ctx: Context for multiple DSI transactions
+ *
+ * Like mipi_dsi_turn_on_peripheral() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_turn_on_peripheral_multi(struct mipi_dsi_multi_context *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_turn_on_peripheral(dsi);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to turn on peripheral: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral_multi);
+
+/**
+ * mipi_dsi_dcs_soft_reset_multi() - perform a software reset of the display module
+ * @ctx: Context for multiple DSI transactions
+ *
+ * Like mipi_dsi_dcs_soft_reset() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_soft_reset_multi(struct mipi_dsi_multi_context *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_dcs_soft_reset(dsi);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to mipi_dsi_dcs_soft_reset: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_soft_reset_multi);
+
+/**
+ * mipi_dsi_dcs_set_display_brightness_multi() - sets the brightness value of
+ * the display
+ * @ctx: Context for multiple DSI transactions
+ * @brightness: brightness value
+ *
+ * Like mipi_dsi_dcs_set_display_brightness() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_set_display_brightness_multi(struct mipi_dsi_multi_context *ctx,
+ u16 brightness)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to write display brightness: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness_multi);
+
+/**
+ * mipi_dsi_dcs_set_pixel_format_multi() - sets the pixel format for the RGB image
+ * data used by the interface
+ * @ctx: Context for multiple DSI transactions
+ * @format: pixel format
+ *
+ * Like mipi_dsi_dcs_set_pixel_format() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_set_pixel_format_multi(struct mipi_dsi_multi_context *ctx,
+ u8 format)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_dcs_set_pixel_format(dsi, format);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to set pixel format: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format_multi);
+
+/**
+ * mipi_dsi_dcs_set_column_address_multi() - define the column extent of the
+ * frame memory accessed by the host processor
+ * @ctx: Context for multiple DSI transactions
+ * @start: first column of frame memory
+ * @end: last column of frame memory
+ *
+ * Like mipi_dsi_dcs_set_column_address() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_set_column_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_dcs_set_column_address(dsi, start, end);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to set column address: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_column_address_multi);
+
+/**
+ * mipi_dsi_dcs_set_page_address_multi() - define the page extent of the
+ * frame memory accessed by the host processor
+ * @ctx: Context for multiple DSI transactions
+ * @start: first page of frame memory
+ * @end: last page of frame memory
+ *
+ * Like mipi_dsi_dcs_set_page_address() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_set_page_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_dcs_set_page_address(dsi, start, end);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to set page address: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_page_address_multi);
+
+/**
+ * mipi_dsi_dcs_set_tear_scanline_multi() - set the scanline to use as trigger for
+ * the Tearing Effect output signal of the display module
+ * @ctx: Context for multiple DSI transactions
+ * @scanline: scanline to use as trigger
+ *
+ * Like mipi_dsi_dcs_set_tear_scanline() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx,
+ u16 scanline)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_dcs_set_tear_scanline(dsi, scanline);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to set tear scanline: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline_multi);
+
static int mipi_dsi_drv_probe(struct device *dev)
{
struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 568972258222..37d2e0a4ef4b 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -456,6 +456,8 @@ int drmm_mode_config_init(struct drm_device *dev)
if (ret == -EDEADLK)
ret = drm_modeset_backoff(&modeset_ctx);
+ might_fault();
+
ww_acquire_init(&resv_ctx, &reservation_ww_class);
ret = dma_resv_lock(&resv, &resv_ctx);
if (ret == -EDEADLK)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 1a0890083aee..6ba167a33461 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -539,7 +539,6 @@ static int fill_analog_mode(struct drm_device *dev,
* to reach those resolutions.
*
* Returns:
- *
* A pointer to the mode, allocated with drm_mode_create(). Returns NULL
* on error.
*/
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index cfbe020de54e..19ab0a794add 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -161,6 +161,15 @@ int drm_panel_unprepare(struct drm_panel *panel)
if (!panel)
return -EINVAL;
+ /*
+ * If you are seeing the warning below it likely means one of two things:
+ * - Your panel driver incorrectly calls drm_panel_unprepare() in its
+ * shutdown routine. You should delete this.
+ * - You are using panel-edp or panel-simple and your DRM modeset
+ * driver's shutdown() callback happened after the panel's shutdown().
+ * In this case the warning is harmless though ideally you should
+ * figure out how to reverse the order of the shutdown() callbacks.
+ */
if (!panel->prepared) {
dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n");
return 0;
@@ -245,6 +254,15 @@ int drm_panel_disable(struct drm_panel *panel)
if (!panel)
return -EINVAL;
+ /*
+ * If you are seeing the warning below it likely means one of two things:
+ * - Your panel driver incorrectly calls drm_panel_disable() in its
+ * shutdown routine. You should delete this.
+ * - You are using panel-edp or panel-simple and your DRM modeset
+ * driver's shutdown() callback happened after the panel's shutdown().
+ * In this case the warning is harmless though ideally you should
+ * figure out how to reverse the order of the shutdown() callbacks.
+ */
if (!panel->enabled) {
dev_warn(panel->dev, "Skipping disable of already disabled panel\n");
return 0;
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index 948aed00595e..74412b7bf936 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -18,6 +18,8 @@
#include <linux/overflow.h>
#include <linux/printk.h>
#include <linux/types.h>
+#include <linux/utsname.h>
+#include <linux/zlib.h>
#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
@@ -26,6 +28,9 @@
#include <drm/drm_panic.h>
#include <drm/drm_plane.h>
#include <drm/drm_print.h>
+#include <drm/drm_rect.h>
+
+#include "drm_crtc_internal.h"
MODULE_AUTHOR("Jocelyn Falempe");
MODULE_DESCRIPTION("DRM panic handler");
@@ -76,11 +81,15 @@ struct drm_panic_line {
#define PANIC_LINE(s) {.len = sizeof(s) - 1, .txt = s}
static struct drm_panic_line panic_msg[] = {
- PANIC_LINE("KERNEL PANIC !"),
+ PANIC_LINE("KERNEL PANIC!"),
PANIC_LINE(""),
PANIC_LINE("Please reboot your computer."),
+ PANIC_LINE(""),
+ PANIC_LINE(""), /* will be replaced by the panic description */
};
+static const size_t panic_msg_lines = ARRAY_SIZE(panic_msg);
+
static const struct drm_panic_line logo_ascii[] = {
PANIC_LINE(" .--. _"),
PANIC_LINE(" |o_o | | |"),
@@ -91,6 +100,8 @@ static const struct drm_panic_line logo_ascii[] = {
PANIC_LINE(" \\___)=(___/"),
};
+static const size_t logo_ascii_lines = ARRAY_SIZE(logo_ascii);
+
#if defined(CONFIG_LOGO) && !defined(MODULE)
static const struct linux_logo *logo_mono;
@@ -249,20 +260,20 @@ static bool drm_panic_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, i
static void drm_panic_blit16(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
- u16 fg16)
+ unsigned int scale, u16 fg16)
{
unsigned int y, x;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y))
+ if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
}
static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
- u32 fg32)
+ unsigned int scale, u32 fg32)
{
unsigned int y, x;
@@ -270,7 +281,7 @@ static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
for (x = 0; x < width; x++) {
u32 off = y * dpitch + x * 3;
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y)) {
+ if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
/* write blue-green-red to output in little endianness */
iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
@@ -283,24 +294,25 @@ static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
- u32 fg32)
+ unsigned int scale, u32 fg32)
{
unsigned int y, x;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y))
+ if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
}
static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect *clip,
- const u8 *sbuf8, unsigned int spitch, u32 fg_color)
+ const u8 *sbuf8, unsigned int spitch, unsigned int scale,
+ u32 fg_color)
{
unsigned int y, x;
for (y = 0; y < drm_rect_height(clip); y++)
for (x = 0; x < drm_rect_width(clip); x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y))
+ if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
}
@@ -310,18 +322,22 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
* @clip: destination rectangle
* @sbuf8: source buffer, in monochrome format, 8 pixels per byte.
* @spitch: source pitch in bytes
+ * @scale: integer scale, source buffer is scale time smaller than destination
+ * rectangle
* @fg_color: foreground color, in destination format
*
* This can be used to draw a font character, which is a monochrome image, to a
* framebuffer in other supported format.
*/
static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
- const u8 *sbuf8, unsigned int spitch, u32 fg_color)
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int scale, u32 fg_color)
+
{
struct iosys_map map;
if (sb->set_pixel)
- return drm_panic_blit_pixel(sb, clip, sbuf8, spitch, fg_color);
+ return drm_panic_blit_pixel(sb, clip, sbuf8, spitch, scale, fg_color);
map = sb->map[0];
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
@@ -329,15 +345,15 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
switch (sb->format->cpp[0]) {
case 2:
drm_panic_blit16(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), fg_color);
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
case 3:
drm_panic_blit24(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), fg_color);
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
case 4:
drm_panic_blit32(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), fg_color);
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
default:
WARN_ONCE(1, "Can't blit with pixel width %d\n", sb->format->cpp[0]);
@@ -477,39 +493,51 @@ static void draw_txt_rectangle(struct drm_scanout_buffer *sb,
for (j = 0; j < line_len; j++) {
src = get_char_bitmap(font, msg[i].txt[j], font_pitch);
rec.x2 = rec.x1 + font->width;
- drm_panic_blit(sb, &rec, src, font_pitch, color);
+ drm_panic_blit(sb, &rec, src, font_pitch, 1, color);
rec.x1 += font->width;
}
}
}
+static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *font)
+{
+ if (logo_mono) {
+ drm_rect_init(rect, 0, 0, logo_mono->width, logo_mono->height);
+ } else {
+ int logo_width = get_max_line_len(logo_ascii, logo_ascii_lines) * font->width;
+
+ drm_rect_init(rect, 0, 0, logo_width, logo_ascii_lines * font->height);
+ }
+}
+
+static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect,
+ const struct font_desc *font, u32 fg_color)
+{
+ if (logo_mono)
+ drm_panic_blit(sb, rect, logo_mono->data,
+ DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color);
+ else
+ draw_txt_rectangle(sb, font, logo_ascii, logo_ascii_lines, false, rect,
+ fg_color);
+}
+
static void draw_panic_static_user(struct drm_scanout_buffer *sb)
{
- size_t msg_lines = ARRAY_SIZE(panic_msg);
- size_t logo_ascii_lines = ARRAY_SIZE(logo_ascii);
u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen, r_logo, r_msg;
- unsigned int logo_width, logo_height;
+ unsigned int msg_width, msg_height;
if (!font)
return;
r_screen = DRM_RECT_INIT(0, 0, sb->width, sb->height);
+ drm_panic_logo_rect(&r_logo, font);
- if (logo_mono) {
- logo_width = logo_mono->width;
- logo_height = logo_mono->height;
- } else {
- logo_width = get_max_line_len(logo_ascii, logo_ascii_lines) * font->width;
- logo_height = logo_ascii_lines * font->height;
- }
-
- r_logo = DRM_RECT_INIT(0, 0, logo_width, logo_height);
- r_msg = DRM_RECT_INIT(0, 0,
- min(get_max_line_len(panic_msg, msg_lines) * font->width, sb->width),
- min(msg_lines * font->height, sb->height));
+ msg_width = min(get_max_line_len(panic_msg, panic_msg_lines) * font->width, sb->width);
+ msg_height = min(panic_msg_lines * font->height, sb->height);
+ r_msg = DRM_RECT_INIT(0, 0, msg_width, msg_height);
/* Center the panic message */
drm_rect_translate(&r_msg, (sb->width - r_msg.x2) / 2, (sb->height - r_msg.y2) / 2);
@@ -517,16 +545,10 @@ static void draw_panic_static_user(struct drm_scanout_buffer *sb)
/* Fill with the background color, and draw text on top */
drm_panic_fill(sb, &r_screen, bg_color);
- if ((r_msg.x1 >= logo_width || r_msg.y1 >= logo_height) &&
- logo_width <= sb->width && logo_height <= sb->height) {
- if (logo_mono)
- drm_panic_blit(sb, &r_logo, logo_mono->data, DIV_ROUND_UP(logo_width, 8),
- fg_color);
- else
- draw_txt_rectangle(sb, font, logo_ascii, logo_ascii_lines, false, &r_logo,
- fg_color);
- }
- draw_txt_rectangle(sb, font, panic_msg, msg_lines, true, &r_msg, fg_color);
+ if (!drm_rect_overlap(&r_logo, &r_msg))
+ drm_panic_logo_draw(sb, &r_logo, font, fg_color);
+
+ draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);
}
/*
@@ -608,6 +630,233 @@ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
}
}
+#if defined(CONFIG_DRM_PANIC_SCREEN_QR_CODE)
+/*
+ * It is unwise to allocate memory in the panic callback, so the buffers are
+ * pre-allocated. Only 2 buffers and the zlib workspace are needed.
+ * Two buffers are enough, using the following buffer usage:
+ * 1) kmsg messages are dumped in buffer1
+ * 2) kmsg is zlib-compressed into buffer2
+ * 3) compressed kmsg is encoded as QR-code Numeric stream in buffer1
+ * 4) QR-code image is generated in buffer2
+ * The Max QR code size is V40, 177x177, 4071 bytes for image, 2956 bytes for
+ * data segments.
+ *
+ * Typically, ~7500 bytes of kmsg, are compressed into 2800 bytes, which fits in
+ * a V40 QR-code (177x177).
+ *
+ * If CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL is not set, the kmsg data will be put
+ * directly in the QR code.
+ * 1) kmsg messages are dumped in buffer1
+ * 2) kmsg message is encoded as byte stream in buffer2
+ * 3) QR-code image is generated in buffer1
+ */
+
+static uint panic_qr_version = CONFIG_DRM_PANIC_SCREEN_QR_VERSION;
+module_param(panic_qr_version, uint, 0644);
+MODULE_PARM_DESC(panic_qr_version, "maximum version (size) of the QR code");
+
+#define MAX_QR_DATA 2956
+#define MAX_ZLIB_RATIO 3
+#define QR_BUFFER1_SIZE (MAX_ZLIB_RATIO * MAX_QR_DATA) /* Must also be > 4071 */
+#define QR_BUFFER2_SIZE 4096
+#define QR_MARGIN 4 /* 4 modules of foreground color around the qr code */
+
+/* Compression parameters */
+#define COMPR_LEVEL 6
+#define WINDOW_BITS 12
+#define MEM_LEVEL 4
+
+static char *qrbuf1;
+static char *qrbuf2;
+static struct z_stream_s stream;
+
+static void __init drm_panic_qr_init(void)
+{
+ qrbuf1 = kmalloc(QR_BUFFER1_SIZE, GFP_KERNEL);
+ qrbuf2 = kmalloc(QR_BUFFER2_SIZE, GFP_KERNEL);
+ stream.workspace = kmalloc(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
+ GFP_KERNEL);
+}
+
+static void drm_panic_qr_exit(void)
+{
+ kfree(qrbuf1);
+ qrbuf1 = NULL;
+ kfree(qrbuf2);
+ qrbuf2 = NULL;
+ kfree(stream.workspace);
+ stream.workspace = NULL;
+}
+
+extern size_t drm_panic_qr_max_data_size(u8 version, size_t url_len);
+
+extern u8 drm_panic_qr_generate(const char *url, u8 *data, size_t data_len, size_t data_size,
+ u8 *tmp, size_t tmp_size);
+
+static int drm_panic_get_qr_code_url(u8 **qr_image)
+{
+ struct kmsg_dump_iter iter;
+ char url[256];
+ size_t kmsg_len, max_kmsg_size;
+ char *kmsg;
+ int max_qr_data_size, url_len;
+
+ url_len = snprintf(url, sizeof(url), CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL "?a=%s&v=%s&zl=",
+ utsname()->machine, utsname()->release);
+
+ max_qr_data_size = drm_panic_qr_max_data_size(panic_qr_version, url_len);
+ max_kmsg_size = min(MAX_ZLIB_RATIO * max_qr_data_size, QR_BUFFER1_SIZE);
+
+ /* get kmsg to buffer 1 */
+ kmsg_dump_rewind(&iter);
+ kmsg_dump_get_buffer(&iter, false, qrbuf1, max_kmsg_size, &kmsg_len);
+
+ if (!kmsg_len)
+ return -ENODATA;
+ kmsg = qrbuf1;
+
+try_again:
+ if (zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
+ MEM_LEVEL, Z_DEFAULT_STRATEGY) != Z_OK)
+ return -EINVAL;
+
+ stream.next_in = kmsg;
+ stream.avail_in = kmsg_len;
+ stream.total_in = 0;
+ stream.next_out = qrbuf2;
+ stream.avail_out = QR_BUFFER2_SIZE;
+ stream.total_out = 0;
+
+ if (zlib_deflate(&stream, Z_FINISH) != Z_STREAM_END)
+ return -EINVAL;
+
+ if (zlib_deflateEnd(&stream) != Z_OK)
+ return -EINVAL;
+
+ if (stream.total_out > max_qr_data_size) {
+ /* too much data for the QR code, so skip the first line and try again */
+ kmsg = strchr(kmsg, '\n');
+ if (!kmsg)
+ return -EINVAL;
+ /* skip the first \n */
+ kmsg += 1;
+ kmsg_len = strlen(kmsg);
+ goto try_again;
+ }
+ *qr_image = qrbuf2;
+
+ /* generate qr code image in buffer2 */
+ return drm_panic_qr_generate(url, qrbuf2, stream.total_out, QR_BUFFER2_SIZE,
+ qrbuf1, QR_BUFFER1_SIZE);
+}
+
+static int drm_panic_get_qr_code_raw(u8 **qr_image)
+{
+ struct kmsg_dump_iter iter;
+ size_t kmsg_len;
+ size_t max_kmsg_size = min(drm_panic_qr_max_data_size(panic_qr_version, 0),
+ QR_BUFFER1_SIZE);
+
+ kmsg_dump_rewind(&iter);
+ kmsg_dump_get_buffer(&iter, false, qrbuf1, max_kmsg_size, &kmsg_len);
+ if (!kmsg_len)
+ return -ENODATA;
+
+ *qr_image = qrbuf1;
+ return drm_panic_qr_generate(NULL, qrbuf1, kmsg_len, QR_BUFFER1_SIZE,
+ qrbuf2, QR_BUFFER2_SIZE);
+}
+
+static int drm_panic_get_qr_code(u8 **qr_image)
+{
+ if (strlen(CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL) > 0)
+ return drm_panic_get_qr_code_url(qr_image);
+ else
+ return drm_panic_get_qr_code_raw(qr_image);
+}
+
+/*
+ * Draw the panic message at the center of the screen, with a QR Code
+ */
+static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
+{
+ u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
+ u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
+ const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
+ struct drm_rect r_screen, r_logo, r_msg, r_qr, r_qr_canvas;
+ unsigned int max_qr_size, scale;
+ unsigned int msg_width, msg_height;
+ int qr_width, qr_canvas_width, qr_pitch, v_margin;
+ u8 *qr_image;
+
+ if (!font || !qrbuf1 || !qrbuf2 || !stream.workspace)
+ return -ENOMEM;
+
+ r_screen = DRM_RECT_INIT(0, 0, sb->width, sb->height);
+
+ drm_panic_logo_rect(&r_logo, font);
+
+ msg_width = min(get_max_line_len(panic_msg, panic_msg_lines) * font->width, sb->width);
+ msg_height = min(panic_msg_lines * font->height, sb->height);
+ r_msg = DRM_RECT_INIT(0, 0, msg_width, msg_height);
+
+ max_qr_size = min(3 * sb->width / 4, 3 * sb->height / 4);
+
+ qr_width = drm_panic_get_qr_code(&qr_image);
+ if (qr_width <= 0)
+ return -ENOSPC;
+
+ qr_canvas_width = qr_width + QR_MARGIN * 2;
+ scale = max_qr_size / qr_canvas_width;
+ /* QR code is not readable if not scaled at least by 2 */
+ if (scale < 2)
+ return -ENOSPC;
+
+ pr_debug("QR width %d and scale %d\n", qr_width, scale);
+ r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale);
+
+ v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5;
+
+ drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin);
+ r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale,
+ qr_width * scale, qr_width * scale);
+
+ /* Center the panic message */
+ drm_rect_translate(&r_msg, (sb->width - r_msg.x2) / 2,
+ 3 * v_margin + drm_rect_height(&r_qr_canvas));
+
+ /* Fill with the background color, and draw text on top */
+ drm_panic_fill(sb, &r_screen, bg_color);
+
+ if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr))
+ drm_panic_logo_draw(sb, &r_logo, font, fg_color);
+
+ draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);
+
+ /* Draw the qr code */
+ qr_pitch = DIV_ROUND_UP(qr_width, 8);
+ drm_panic_fill(sb, &r_qr_canvas, fg_color);
+ drm_panic_fill(sb, &r_qr, bg_color);
+ drm_panic_blit(sb, &r_qr, qr_image, qr_pitch, scale, fg_color);
+ return 0;
+}
+
+static void draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
+{
+ if (_draw_panic_static_qr_code(sb))
+ draw_panic_static_user(sb);
+}
+#else
+static void draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
+{
+ draw_panic_static_user(sb);
+}
+
+static void drm_panic_qr_init(void) {};
+static void drm_panic_qr_exit(void) {};
+#endif
+
/*
* drm_panic_is_format_supported()
* @format: a fourcc color code
@@ -626,12 +875,38 @@ static void draw_panic_dispatch(struct drm_scanout_buffer *sb)
{
if (!strcmp(drm_panic_screen, "kmsg")) {
draw_panic_static_kmsg(sb);
+ } else if (!strcmp(drm_panic_screen, "qr_code")) {
+ draw_panic_static_qr_code(sb);
} else {
draw_panic_static_user(sb);
}
}
-static void draw_panic_plane(struct drm_plane *plane)
+static void drm_panic_set_description(const char *description)
+{
+ u32 len;
+
+ if (description) {
+ struct drm_panic_line *desc_line = &panic_msg[panic_msg_lines - 1];
+
+ desc_line->txt = description;
+ len = strlen(description);
+ /* ignore the last newline character */
+ if (len && description[len - 1] == '\n')
+ len -= 1;
+ desc_line->len = len;
+ }
+}
+
+static void drm_panic_clear_description(void)
+{
+ struct drm_panic_line *desc_line = &panic_msg[panic_msg_lines - 1];
+
+ desc_line->len = 0;
+ desc_line->txt = NULL;
+}
+
+static void draw_panic_plane(struct drm_plane *plane, const char *description)
{
struct drm_scanout_buffer sb = { };
int ret;
@@ -640,6 +915,8 @@ static void draw_panic_plane(struct drm_plane *plane)
if (!drm_panic_trylock(plane->dev, flags))
return;
+ drm_panic_set_description(description);
+
ret = plane->helper_private->get_scanout_buffer(plane, &sb);
if (!ret && drm_panic_is_format_supported(sb.format)) {
@@ -647,6 +924,7 @@ static void draw_panic_plane(struct drm_plane *plane)
if (plane->helper_private->panic_flush)
plane->helper_private->panic_flush(plane);
}
+ drm_panic_clear_description();
drm_panic_unlock(plane->dev, flags);
}
@@ -655,12 +933,12 @@ static struct drm_plane *to_drm_plane(struct kmsg_dumper *kd)
return container_of(kd, struct drm_plane, kmsg_panic);
}
-static void drm_panic(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason)
+static void drm_panic(struct kmsg_dumper *dumper, struct kmsg_dump_detail *detail)
{
struct drm_plane *plane = to_drm_plane(dumper);
- if (reason == KMSG_DUMP_PANIC)
- draw_panic_plane(plane);
+ if (detail->reason == KMSG_DUMP_PANIC)
+ draw_panic_plane(plane, detail->description);
}
@@ -680,7 +958,7 @@ static ssize_t debugfs_trigger_write(struct file *file, const char __user *user_
if (kstrtobool_from_user(user_buf, count, &run) == 0 && run) {
struct drm_plane *plane = file->private_data;
- draw_panic_plane(plane);
+ draw_panic_plane(plane, "Test from debugfs");
}
return count;
}
@@ -704,6 +982,26 @@ static void debugfs_register_plane(struct drm_plane *plane, int index) {}
#endif /* CONFIG_DRM_PANIC_DEBUG */
/**
+ * drm_panic_is_enabled
+ * @dev: the drm device that may supports drm_panic
+ *
+ * returns true if the drm device supports drm_panic
+ */
+bool drm_panic_is_enabled(struct drm_device *dev)
+{
+ struct drm_plane *plane;
+
+ if (!dev->mode_config.num_total_plane)
+ return false;
+
+ drm_for_each_plane(plane, dev)
+ if (plane->helper_private && plane->helper_private->get_scanout_buffer)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(drm_panic_is_enabled);
+
+/**
* drm_panic_register() - Initialize DRM panic for a device
* @dev: the drm device on which the panic screen will be displayed.
*/
@@ -730,7 +1028,6 @@ void drm_panic_register(struct drm_device *dev)
if (registered_plane)
drm_info(dev, "Registered %d planes with drm panic\n", registered_plane);
}
-EXPORT_SYMBOL(drm_panic_register);
/**
* drm_panic_unregister()
@@ -749,4 +1046,19 @@ void drm_panic_unregister(struct drm_device *dev)
kmsg_dump_unregister(&plane->kmsg_panic);
}
}
-EXPORT_SYMBOL(drm_panic_unregister);
+
+/**
+ * drm_panic_init() - initialize DRM panic.
+ */
+void __init drm_panic_init(void)
+{
+ drm_panic_qr_init();
+}
+
+/**
+ * drm_panic_exit() - Free the resources taken by drm_panic_exit()
+ */
+void drm_panic_exit(void)
+{
+ drm_panic_qr_exit();
+}
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
new file mode 100644
index 000000000000..1ef56cb07dfb
--- /dev/null
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -0,0 +1,1003 @@
+// SPDX-License-Identifier: MIT
+
+//! This is a simple QR encoder for DRM panic.
+//!
+//! It is called from a panic handler, so it should't allocate memory and
+//! does all the work on the stack or on the provided buffers. For
+//! simplification, it only supports low error correction, and applies the
+//! first mask (checkerboard). It will draw the smallest QRcode that can
+//! contain the string passed as parameter. To get the most compact
+//! QR code, the start of the URL is encoded as binary, and the
+//! compressed kmsg is encoded as numeric.
+//!
+//! The binary data must be a valid URL parameter, so the easiest way is
+//! to use base64 encoding. But this wastes 25% of data space, so the
+//! whole stack trace won't fit in the QR code. So instead it encodes
+//! every 13bits of input into 4 decimal digits, and then uses the
+//! efficient numeric encoding, that encode 3 decimal digits into
+//! 10bits. This makes 39bits of compressed data into 12 decimal digits,
+//! into 40bits in the QR code, so wasting only 2.5%. And the numbers are
+//! valid URL parameter, so the website can do the reverse, to get the
+//! binary data.
+//!
+//! Inspired by these 3 projects, all under MIT license:
+//!
+//! * <https://github.com/kennytm/qrcode-rust>
+//! * <https://github.com/erwanvivien/fast_qr>
+//! * <https://github.com/bjguillot/qr>
+
+use core::cmp;
+use kernel::str::CStr;
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
+struct Version(usize);
+
+// Generator polynomials for ECC, only those that are needed for low quality.
+const P7: [u8; 7] = [87, 229, 146, 149, 238, 102, 21];
+const P10: [u8; 10] = [251, 67, 46, 61, 118, 70, 64, 94, 32, 45];
+const P15: [u8; 15] = [
+ 8, 183, 61, 91, 202, 37, 51, 58, 58, 237, 140, 124, 5, 99, 105,
+];
+const P18: [u8; 18] = [
+ 215, 234, 158, 94, 184, 97, 118, 170, 79, 187, 152, 148, 252, 179, 5, 98, 96, 153,
+];
+const P20: [u8; 20] = [
+ 17, 60, 79, 50, 61, 163, 26, 187, 202, 180, 221, 225, 83, 239, 156, 164, 212, 212, 188, 190,
+];
+const P22: [u8; 22] = [
+ 210, 171, 247, 242, 93, 230, 14, 109, 221, 53, 200, 74, 8, 172, 98, 80, 219, 134, 160, 105,
+ 165, 231,
+];
+const P24: [u8; 24] = [
+ 229, 121, 135, 48, 211, 117, 251, 126, 159, 180, 169, 152, 192, 226, 228, 218, 111, 0, 117,
+ 232, 87, 96, 227, 21,
+];
+const P26: [u8; 26] = [
+ 173, 125, 158, 2, 103, 182, 118, 17, 145, 201, 111, 28, 165, 53, 161, 21, 245, 142, 13, 102,
+ 48, 227, 153, 145, 218, 70,
+];
+const P28: [u8; 28] = [
+ 168, 223, 200, 104, 224, 234, 108, 180, 110, 190, 195, 147, 205, 27, 232, 201, 21, 43, 245, 87,
+ 42, 195, 212, 119, 242, 37, 9, 123,
+];
+const P30: [u8; 30] = [
+ 41, 173, 145, 152, 216, 31, 179, 182, 50, 48, 110, 86, 239, 96, 222, 125, 42, 173, 226, 193,
+ 224, 130, 156, 37, 251, 216, 238, 40, 192, 180,
+];
+
+/// QR Code parameters for Low quality ECC:
+/// - Error Correction polynomial.
+/// - Number of blocks in group 1.
+/// - Number of blocks in group 2.
+/// - Block size in group 1.
+///
+/// (Block size in group 2 is one more than group 1).
+struct VersionParameter(&'static [u8], u8, u8, u8);
+const VPARAM: [VersionParameter; 40] = [
+ VersionParameter(&P7, 1, 0, 19), // V1
+ VersionParameter(&P10, 1, 0, 34), // V2
+ VersionParameter(&P15, 1, 0, 55), // V3
+ VersionParameter(&P20, 1, 0, 80), // V4
+ VersionParameter(&P26, 1, 0, 108), // V5
+ VersionParameter(&P18, 2, 0, 68), // V6
+ VersionParameter(&P20, 2, 0, 78), // V7
+ VersionParameter(&P24, 2, 0, 97), // V8
+ VersionParameter(&P30, 2, 0, 116), // V9
+ VersionParameter(&P18, 2, 2, 68), // V10
+ VersionParameter(&P20, 4, 0, 81), // V11
+ VersionParameter(&P24, 2, 2, 92), // V12
+ VersionParameter(&P26, 4, 0, 107), // V13
+ VersionParameter(&P30, 3, 1, 115), // V14
+ VersionParameter(&P22, 5, 1, 87), // V15
+ VersionParameter(&P24, 5, 1, 98), // V16
+ VersionParameter(&P28, 1, 5, 107), // V17
+ VersionParameter(&P30, 5, 1, 120), // V18
+ VersionParameter(&P28, 3, 4, 113), // V19
+ VersionParameter(&P28, 3, 5, 107), // V20
+ VersionParameter(&P28, 4, 4, 116), // V21
+ VersionParameter(&P28, 2, 7, 111), // V22
+ VersionParameter(&P30, 4, 5, 121), // V23
+ VersionParameter(&P30, 6, 4, 117), // V24
+ VersionParameter(&P26, 8, 4, 106), // V25
+ VersionParameter(&P28, 10, 2, 114), // V26
+ VersionParameter(&P30, 8, 4, 122), // V27
+ VersionParameter(&P30, 3, 10, 117), // V28
+ VersionParameter(&P30, 7, 7, 116), // V29
+ VersionParameter(&P30, 5, 10, 115), // V30
+ VersionParameter(&P30, 13, 3, 115), // V31
+ VersionParameter(&P30, 17, 0, 115), // V32
+ VersionParameter(&P30, 17, 1, 115), // V33
+ VersionParameter(&P30, 13, 6, 115), // V34
+ VersionParameter(&P30, 12, 7, 121), // V35
+ VersionParameter(&P30, 6, 14, 121), // V36
+ VersionParameter(&P30, 17, 4, 122), // V37
+ VersionParameter(&P30, 4, 18, 122), // V38
+ VersionParameter(&P30, 20, 4, 117), // V39
+ VersionParameter(&P30, 19, 6, 118), // V40
+];
+
+const MAX_EC_SIZE: usize = 30;
+const MAX_BLK_SIZE: usize = 123;
+
+/// Position of the alignment pattern grid.
+const ALIGNMENT_PATTERNS: [&[u8]; 40] = [
+ &[],
+ &[6, 18],
+ &[6, 22],
+ &[6, 26],
+ &[6, 30],
+ &[6, 34],
+ &[6, 22, 38],
+ &[6, 24, 42],
+ &[6, 26, 46],
+ &[6, 28, 50],
+ &[6, 30, 54],
+ &[6, 32, 58],
+ &[6, 34, 62],
+ &[6, 26, 46, 66],
+ &[6, 26, 48, 70],
+ &[6, 26, 50, 74],
+ &[6, 30, 54, 78],
+ &[6, 30, 56, 82],
+ &[6, 30, 58, 86],
+ &[6, 34, 62, 90],
+ &[6, 28, 50, 72, 94],
+ &[6, 26, 50, 74, 98],
+ &[6, 30, 54, 78, 102],
+ &[6, 28, 54, 80, 106],
+ &[6, 32, 58, 84, 110],
+ &[6, 30, 58, 86, 114],
+ &[6, 34, 62, 90, 118],
+ &[6, 26, 50, 74, 98, 122],
+ &[6, 30, 54, 78, 102, 126],
+ &[6, 26, 52, 78, 104, 130],
+ &[6, 30, 56, 82, 108, 134],
+ &[6, 34, 60, 86, 112, 138],
+ &[6, 30, 58, 86, 114, 142],
+ &[6, 34, 62, 90, 118, 146],
+ &[6, 30, 54, 78, 102, 126, 150],
+ &[6, 24, 50, 76, 102, 128, 154],
+ &[6, 28, 54, 80, 106, 132, 158],
+ &[6, 32, 58, 84, 110, 136, 162],
+ &[6, 26, 54, 82, 110, 138, 166],
+ &[6, 30, 58, 86, 114, 142, 170],
+];
+
+/// Version information for format V7-V40.
+const VERSION_INFORMATION: [u32; 34] = [
+ 0b00_0111_1100_1001_0100,
+ 0b00_1000_0101_1011_1100,
+ 0b00_1001_1010_1001_1001,
+ 0b00_1010_0100_1101_0011,
+ 0b00_1011_1011_1111_0110,
+ 0b00_1100_0111_0110_0010,
+ 0b00_1101_1000_0100_0111,
+ 0b00_1110_0110_0000_1101,
+ 0b00_1111_1001_0010_1000,
+ 0b01_0000_1011_0111_1000,
+ 0b01_0001_0100_0101_1101,
+ 0b01_0010_1010_0001_0111,
+ 0b01_0011_0101_0011_0010,
+ 0b01_0100_1001_1010_0110,
+ 0b01_0101_0110_1000_0011,
+ 0b01_0110_1000_1100_1001,
+ 0b01_0111_0111_1110_1100,
+ 0b01_1000_1110_1100_0100,
+ 0b01_1001_0001_1110_0001,
+ 0b01_1010_1111_1010_1011,
+ 0b01_1011_0000_1000_1110,
+ 0b01_1100_1100_0001_1010,
+ 0b01_1101_0011_0011_1111,
+ 0b01_1110_1101_0111_0101,
+ 0b01_1111_0010_0101_0000,
+ 0b10_0000_1001_1101_0101,
+ 0b10_0001_0110_1111_0000,
+ 0b10_0010_1000_1011_1010,
+ 0b10_0011_0111_1001_1111,
+ 0b10_0100_1011_0000_1011,
+ 0b10_0101_0100_0010_1110,
+ 0b10_0110_1010_0110_0100,
+ 0b10_0111_0101_0100_0001,
+ 0b10_1000_1100_0110_1001,
+];
+
+/// Format info for low quality ECC.
+const FORMAT_INFOS_QR_L: [u16; 8] = [
+ 0x77c4, 0x72f3, 0x7daa, 0x789d, 0x662f, 0x6318, 0x6c41, 0x6976,
+];
+
+impl Version {
+ /// Returns the smallest QR version than can hold these segments.
+ fn from_segments(segments: &[&Segment<'_>]) -> Option<Version> {
+ for v in (1..=40).map(|k| Version(k)) {
+ if v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum() {
+ return Some(v);
+ }
+ }
+ None
+ }
+
+ fn width(&self) -> u8 {
+ (self.0 as u8) * 4 + 17
+ }
+
+ fn max_data(&self) -> usize {
+ self.g1_blk_size() * self.g1_blocks() + (self.g1_blk_size() + 1) * self.g2_blocks()
+ }
+
+ fn ec_size(&self) -> usize {
+ VPARAM[self.0 - 1].0.len()
+ }
+
+ fn g1_blocks(&self) -> usize {
+ VPARAM[self.0 - 1].1 as usize
+ }
+
+ fn g2_blocks(&self) -> usize {
+ VPARAM[self.0 - 1].2 as usize
+ }
+
+ fn g1_blk_size(&self) -> usize {
+ VPARAM[self.0 - 1].3 as usize
+ }
+
+ fn alignment_pattern(&self) -> &'static [u8] {
+ &ALIGNMENT_PATTERNS[self.0 - 1]
+ }
+
+ fn poly(&self) -> &'static [u8] {
+ VPARAM[self.0 - 1].0
+ }
+
+ fn version_info(&self) -> u32 {
+ if *self >= Version(7) {
+ VERSION_INFORMATION[self.0 - 7]
+ } else {
+ 0
+ }
+ }
+}
+
+/// Exponential table for Galois Field GF(256).
+const EXP_TABLE: [u8; 256] = [
+ 1, 2, 4, 8, 16, 32, 64, 128, 29, 58, 116, 232, 205, 135, 19, 38, 76, 152, 45, 90, 180, 117,
+ 234, 201, 143, 3, 6, 12, 24, 48, 96, 192, 157, 39, 78, 156, 37, 74, 148, 53, 106, 212, 181,
+ 119, 238, 193, 159, 35, 70, 140, 5, 10, 20, 40, 80, 160, 93, 186, 105, 210, 185, 111, 222, 161,
+ 95, 190, 97, 194, 153, 47, 94, 188, 101, 202, 137, 15, 30, 60, 120, 240, 253, 231, 211, 187,
+ 107, 214, 177, 127, 254, 225, 223, 163, 91, 182, 113, 226, 217, 175, 67, 134, 17, 34, 68, 136,
+ 13, 26, 52, 104, 208, 189, 103, 206, 129, 31, 62, 124, 248, 237, 199, 147, 59, 118, 236, 197,
+ 151, 51, 102, 204, 133, 23, 46, 92, 184, 109, 218, 169, 79, 158, 33, 66, 132, 21, 42, 84, 168,
+ 77, 154, 41, 82, 164, 85, 170, 73, 146, 57, 114, 228, 213, 183, 115, 230, 209, 191, 99, 198,
+ 145, 63, 126, 252, 229, 215, 179, 123, 246, 241, 255, 227, 219, 171, 75, 150, 49, 98, 196, 149,
+ 55, 110, 220, 165, 87, 174, 65, 130, 25, 50, 100, 200, 141, 7, 14, 28, 56, 112, 224, 221, 167,
+ 83, 166, 81, 162, 89, 178, 121, 242, 249, 239, 195, 155, 43, 86, 172, 69, 138, 9, 18, 36, 72,
+ 144, 61, 122, 244, 245, 247, 243, 251, 235, 203, 139, 11, 22, 44, 88, 176, 125, 250, 233, 207,
+ 131, 27, 54, 108, 216, 173, 71, 142, 1,
+];
+
+/// Reverse exponential table for Galois Field GF(256).
+const LOG_TABLE: [u8; 256] = [
+ 175, 0, 1, 25, 2, 50, 26, 198, 3, 223, 51, 238, 27, 104, 199, 75, 4, 100, 224, 14, 52, 141,
+ 239, 129, 28, 193, 105, 248, 200, 8, 76, 113, 5, 138, 101, 47, 225, 36, 15, 33, 53, 147, 142,
+ 218, 240, 18, 130, 69, 29, 181, 194, 125, 106, 39, 249, 185, 201, 154, 9, 120, 77, 228, 114,
+ 166, 6, 191, 139, 98, 102, 221, 48, 253, 226, 152, 37, 179, 16, 145, 34, 136, 54, 208, 148,
+ 206, 143, 150, 219, 189, 241, 210, 19, 92, 131, 56, 70, 64, 30, 66, 182, 163, 195, 72, 126,
+ 110, 107, 58, 40, 84, 250, 133, 186, 61, 202, 94, 155, 159, 10, 21, 121, 43, 78, 212, 229, 172,
+ 115, 243, 167, 87, 7, 112, 192, 247, 140, 128, 99, 13, 103, 74, 222, 237, 49, 197, 254, 24,
+ 227, 165, 153, 119, 38, 184, 180, 124, 17, 68, 146, 217, 35, 32, 137, 46, 55, 63, 209, 91, 149,
+ 188, 207, 205, 144, 135, 151, 178, 220, 252, 190, 97, 242, 86, 211, 171, 20, 42, 93, 158, 132,
+ 60, 57, 83, 71, 109, 65, 162, 31, 45, 67, 216, 183, 123, 164, 118, 196, 23, 73, 236, 127, 12,
+ 111, 246, 108, 161, 59, 82, 41, 157, 85, 170, 251, 96, 134, 177, 187, 204, 62, 90, 203, 89, 95,
+ 176, 156, 169, 160, 81, 11, 245, 22, 235, 122, 117, 44, 215, 79, 174, 213, 233, 230, 231, 173,
+ 232, 116, 214, 244, 234, 168, 80, 88, 175,
+];
+
+// 4 bits segment header.
+const MODE_STOP: u16 = 0;
+const MODE_NUMERIC: u16 = 1;
+const MODE_BINARY: u16 = 4;
+/// Padding bytes.
+const PADDING: [u8; 2] = [236, 17];
+
+/// Get the next 13 bits of data, starting at specified offset (in bits).
+fn get_next_13b(data: &[u8], offset: usize) -> Option<(u16, usize)> {
+ if offset < data.len() * 8 {
+ let size = cmp::min(13, data.len() * 8 - offset);
+ let byte_off = offset / 8;
+ let bit_off = offset % 8;
+ // `b` is 20 at max (`bit_off` <= 7 and `size` <= 13).
+ let b = (bit_off + size) as u16;
+
+ let first_byte = (data[byte_off] << bit_off >> bit_off) as u16;
+
+ let number = match b {
+ 0..=8 => first_byte >> (8 - b),
+ 9..=16 => (first_byte << (b - 8)) + (data[byte_off + 1] >> (16 - b)) as u16,
+ _ => {
+ (first_byte << (b - 8))
+ + ((data[byte_off + 1] as u16) << (b - 16))
+ + (data[byte_off + 2] >> (24 - b)) as u16
+ }
+ };
+ Some((number, size))
+ } else {
+ None
+ }
+}
+
+/// Number of bits to encode characters in numeric mode.
+const NUM_CHARS_BITS: [usize; 4] = [0, 4, 7, 10];
+const POW10: [u16; 4] = [1, 10, 100, 1000];
+
+enum Segment<'a> {
+ Numeric(&'a [u8]),
+ Binary(&'a [u8]),
+}
+
+impl Segment<'_> {
+ fn get_header(&self) -> (u16, usize) {
+ match self {
+ Segment::Binary(_) => (MODE_BINARY, 4),
+ Segment::Numeric(_) => (MODE_NUMERIC, 4),
+ }
+ }
+
+ // Returns the size of the length field in bits, depending on QR Version.
+ fn length_bits_count(&self, version: Version) -> usize {
+ let Version(v) = version;
+ match self {
+ Segment::Binary(_) => match v {
+ 1..=9 => 8,
+ _ => 16,
+ },
+ Segment::Numeric(_) => match v {
+ 1..=9 => 10,
+ 10..=26 => 12,
+ _ => 14,
+ },
+ }
+ }
+
+ // Number of characters in the segment.
+ fn character_count(&self) -> usize {
+ match self {
+ Segment::Binary(data) => data.len(),
+ Segment::Numeric(data) => {
+ let data_bits = data.len() * 8;
+ let last_chars = match data_bits % 13 {
+ 1 => 1,
+ k => (k + 1) / 3,
+ };
+ // 4 decimal numbers per 13bits + remainder.
+ 4 * (data_bits / 13) + last_chars
+ }
+ }
+ }
+
+ fn get_length_field(&self, version: Version) -> (u16, usize) {
+ (
+ self.character_count() as u16,
+ self.length_bits_count(version),
+ )
+ }
+
+ fn total_size_bits(&self, version: Version) -> usize {
+ let data_size = match self {
+ Segment::Binary(data) => data.len() * 8,
+ Segment::Numeric(_) => {
+ let digits = self.character_count();
+ 10 * (digits / 3) + NUM_CHARS_BITS[digits % 3]
+ }
+ };
+ // header + length + data.
+ 4 + self.length_bits_count(version) + data_size
+ }
+
+ fn iter(&self) -> SegmentIterator<'_> {
+ SegmentIterator {
+ segment: self,
+ offset: 0,
+ carry: 0,
+ carry_len: 0,
+ }
+ }
+}
+
+struct SegmentIterator<'a> {
+ segment: &'a Segment<'a>,
+ offset: usize,
+ carry: u16,
+ carry_len: usize,
+}
+
+impl Iterator for SegmentIterator<'_> {
+ type Item = (u16, usize);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ match self.segment {
+ Segment::Binary(data) => {
+ if self.offset < data.len() {
+ let byte = data[self.offset] as u16;
+ self.offset += 1;
+ Some((byte, 8))
+ } else {
+ None
+ }
+ }
+ Segment::Numeric(data) => {
+ if self.carry_len == 3 {
+ let out = (self.carry, NUM_CHARS_BITS[self.carry_len]);
+ self.carry_len = 0;
+ self.carry = 0;
+ Some(out)
+ } else if let Some((bits, size)) = get_next_13b(data, self.offset) {
+ self.offset += size;
+ let new_chars = match size {
+ 1 => 1,
+ k => (k + 1) / 3,
+ };
+ if self.carry_len + new_chars > 3 {
+ self.carry_len = new_chars + self.carry_len - 3;
+ let out = (
+ self.carry * POW10[new_chars - self.carry_len]
+ + bits / POW10[self.carry_len],
+ NUM_CHARS_BITS[3],
+ );
+ self.carry = bits % POW10[self.carry_len];
+ Some(out)
+ } else {
+ let out = (
+ self.carry * POW10[new_chars] + bits,
+ NUM_CHARS_BITS[self.carry_len + new_chars],
+ );
+ self.carry_len = 0;
+ Some(out)
+ }
+ } else if self.carry_len > 0 {
+ let out = (self.carry, NUM_CHARS_BITS[self.carry_len]);
+ self.carry_len = 0;
+ Some(out)
+ } else {
+ None
+ }
+ }
+ }
+ }
+}
+
+struct EncodedMsg<'a> {
+ data: &'a mut [u8],
+ ec_size: usize,
+ g1_blocks: usize,
+ g2_blocks: usize,
+ g1_blk_size: usize,
+ g2_blk_size: usize,
+ poly: &'static [u8],
+ version: Version,
+}
+
+/// Data to be put in the QR code, with correct segment encoding, padding, and
+/// Error Code Correction.
+impl EncodedMsg<'_> {
+ fn new<'a, 'b>(segments: &[&Segment<'b>], data: &'a mut [u8]) -> Option<EncodedMsg<'a>> {
+ let version = Version::from_segments(segments)?;
+ let ec_size = version.ec_size();
+ let g1_blocks = version.g1_blocks();
+ let g2_blocks = version.g2_blocks();
+ let g1_blk_size = version.g1_blk_size();
+ let g2_blk_size = g1_blk_size + 1;
+ let poly = version.poly();
+
+ // clear the output.
+ data.fill(0);
+
+ let mut em = EncodedMsg {
+ data: data,
+ ec_size,
+ g1_blocks,
+ g2_blocks,
+ g1_blk_size,
+ g2_blk_size,
+ poly,
+ version,
+ };
+ em.encode(segments);
+ Some(em)
+ }
+
+ /// Push bits of data at an offset (in bits).
+ fn push(&mut self, offset: &mut usize, bits: (u16, usize)) {
+ let (number, len_bits) = bits;
+ let byte_off = *offset / 8;
+ let bit_off = *offset % 8;
+ let b = bit_off + len_bits;
+
+ match (bit_off, b) {
+ (0, 0..=8) => {
+ self.data[byte_off] = (number << (8 - b)) as u8;
+ }
+ (0, _) => {
+ self.data[byte_off] = (number >> (b - 8)) as u8;
+ self.data[byte_off + 1] = (number << (16 - b)) as u8;
+ }
+ (_, 0..=8) => {
+ self.data[byte_off] |= (number << (8 - b)) as u8;
+ }
+ (_, 9..=16) => {
+ self.data[byte_off] |= (number >> (b - 8)) as u8;
+ self.data[byte_off + 1] = (number << (16 - b)) as u8;
+ }
+ _ => {
+ self.data[byte_off] |= (number >> (b - 8)) as u8;
+ self.data[byte_off + 1] = (number >> (b - 16)) as u8;
+ self.data[byte_off + 2] = (number << (24 - b)) as u8;
+ }
+ }
+ *offset += len_bits;
+ }
+
+ fn add_segments(&mut self, segments: &[&Segment<'_>]) {
+ let mut offset: usize = 0;
+
+ for s in segments.iter() {
+ self.push(&mut offset, s.get_header());
+ self.push(&mut offset, s.get_length_field(self.version));
+ for bits in s.iter() {
+ self.push(&mut offset, bits);
+ }
+ }
+ self.push(&mut offset, (MODE_STOP, 4));
+
+ let pad_offset = (offset + 7) / 8;
+ for i in pad_offset..self.version.max_data() {
+ self.data[i] = PADDING[(i & 1) ^ (pad_offset & 1)];
+ }
+ }
+
+ fn error_code_for_blocks(&mut self, offset: usize, size: usize, ec_offset: usize) {
+ let mut tmp: [u8; MAX_BLK_SIZE + MAX_EC_SIZE] = [0; MAX_BLK_SIZE + MAX_EC_SIZE];
+
+ tmp[0..size].copy_from_slice(&self.data[offset..offset + size]);
+ for i in 0..size {
+ let lead_coeff = tmp[i] as usize;
+ if lead_coeff == 0 {
+ continue;
+ }
+ let log_lead_coeff = usize::from(LOG_TABLE[lead_coeff]);
+ for (u, &v) in tmp[i + 1..].iter_mut().zip(self.poly.iter()) {
+ *u ^= EXP_TABLE[(usize::from(v) + log_lead_coeff) % 255];
+ }
+ }
+ self.data[ec_offset..ec_offset + self.ec_size]
+ .copy_from_slice(&tmp[size..size + self.ec_size]);
+ }
+
+ fn compute_error_code(&mut self) {
+ let mut offset = 0;
+ let mut ec_offset = self.g1_blocks * self.g1_blk_size + self.g2_blocks * self.g2_blk_size;
+
+ for _ in 0..self.g1_blocks {
+ self.error_code_for_blocks(offset, self.g1_blk_size, ec_offset);
+ offset += self.g1_blk_size;
+ ec_offset += self.ec_size;
+ }
+ for _ in 0..self.g2_blocks {
+ self.error_code_for_blocks(offset, self.g2_blk_size, ec_offset);
+ offset += self.g2_blk_size;
+ ec_offset += self.ec_size;
+ }
+ }
+
+ fn encode(&mut self, segments: &[&Segment<'_>]) {
+ self.add_segments(segments);
+ self.compute_error_code();
+ }
+
+ fn iter(&self) -> EncodedMsgIterator<'_> {
+ EncodedMsgIterator {
+ em: self,
+ offset: 0,
+ }
+ }
+}
+
+/// Iterator, to retrieve the data in the interleaved order needed by QR code.
+struct EncodedMsgIterator<'a> {
+ em: &'a EncodedMsg<'a>,
+ offset: usize,
+}
+
+impl Iterator for EncodedMsgIterator<'_> {
+ type Item = u8;
+
+ // Send the bytes in interleaved mode, first byte of first block of group1,
+ // then first byte of second block of group1, ...
+ fn next(&mut self) -> Option<Self::Item> {
+ let em = self.em;
+ let blocks = em.g1_blocks + em.g2_blocks;
+ let g1_end = em.g1_blocks * em.g1_blk_size;
+ let g2_end = g1_end + em.g2_blocks * em.g2_blk_size;
+ let ec_end = g2_end + em.ec_size * blocks;
+
+ if self.offset >= ec_end {
+ return None;
+ }
+
+ let offset = if self.offset < em.g1_blk_size * blocks {
+ // group1 and group2 interleaved
+ let blk = self.offset % blocks;
+ let blk_off = self.offset / blocks;
+ if blk < em.g1_blocks {
+ blk * em.g1_blk_size + blk_off
+ } else {
+ g1_end + em.g2_blk_size * (blk - em.g1_blocks) + blk_off
+ }
+ } else if self.offset < g2_end {
+ // last byte of group2 blocks
+ let blk2 = self.offset - blocks * em.g1_blk_size;
+ em.g1_blk_size * em.g1_blocks + blk2 * em.g2_blk_size + em.g2_blk_size - 1
+ } else {
+ // EC blocks
+ let ec_offset = self.offset - g2_end;
+ let blk = ec_offset % blocks;
+ let blk_off = ec_offset / blocks;
+
+ g2_end + blk * em.ec_size + blk_off
+ };
+ self.offset += 1;
+ Some(em.data[offset])
+ }
+}
+
+/// A QR code image, encoded as a linear binary framebuffer.
+/// 1 bit per module (pixel), each new line start at next byte boundary.
+/// Max width is 177 for V40 QR code, so `u8` is enough for coordinate.
+struct QrImage<'a> {
+ data: &'a mut [u8],
+ width: u8,
+ stride: u8,
+ version: Version,
+}
+
+impl QrImage<'_> {
+ fn new<'a, 'b>(em: &'b EncodedMsg<'b>, qrdata: &'a mut [u8]) -> QrImage<'a> {
+ let width = em.version.width();
+ let stride = (width + 7) / 8;
+ let data = qrdata;
+
+ let mut qr_image = QrImage {
+ data,
+ width,
+ stride,
+ version: em.version,
+ };
+ qr_image.draw_all(em.iter());
+ qr_image
+ }
+
+ fn clear(&mut self) {
+ self.data.fill(0);
+ }
+
+ // Set pixel to light color.
+ fn set(&mut self, x: u8, y: u8) {
+ let off = y as usize * self.stride as usize + x as usize / 8;
+ let mut v = self.data[off];
+ v |= 0x80 >> (x % 8);
+ self.data[off] = v;
+ }
+
+ // Invert a module color.
+ fn xor(&mut self, x: u8, y: u8) {
+ let off = y as usize * self.stride as usize + x as usize / 8;
+ self.data[off] ^= 0x80 >> (x % 8);
+ }
+
+ // Draw a light square at (x, y) top left corner.
+ fn draw_square(&mut self, x: u8, y: u8, size: u8) {
+ for k in 0..size {
+ self.set(x + k, y);
+ self.set(x, y + k + 1);
+ self.set(x + size, y + k);
+ self.set(x + k + 1, y + size);
+ }
+ }
+
+ // Finder pattern: 3 8x8 square at the corners.
+ fn draw_finders(&mut self) {
+ self.draw_square(1, 1, 4);
+ self.draw_square(self.width - 6, 1, 4);
+ self.draw_square(1, self.width - 6, 4);
+ for k in 0..8 {
+ self.set(k, 7);
+ self.set(self.width - k - 1, 7);
+ self.set(k, self.width - 8);
+ }
+ for k in 0..7 {
+ self.set(7, k);
+ self.set(self.width - 8, k);
+ self.set(7, self.width - 1 - k);
+ }
+ }
+
+ fn is_finder(&self, x: u8, y: u8) -> bool {
+ let end = self.width - 8;
+ (x < 8 && y < 8) || (x < 8 && y >= end) || (x >= end && y < 8)
+ }
+
+ // Alignment pattern: 5x5 squares in a grid.
+ fn draw_alignments(&mut self) {
+ let positions = self.version.alignment_pattern();
+ for &x in positions.iter() {
+ for &y in positions.iter() {
+ if !self.is_finder(x, y) {
+ self.draw_square(x - 1, y - 1, 2);
+ }
+ }
+ }
+ }
+
+ fn is_alignment(&self, x: u8, y: u8) -> bool {
+ let positions = self.version.alignment_pattern();
+ for &ax in positions.iter() {
+ for &ay in positions.iter() {
+ if self.is_finder(ax, ay) {
+ continue;
+ }
+ if x >= ax - 2 && x <= ax + 2 && y >= ay - 2 && y <= ay + 2 {
+ return true;
+ }
+ }
+ }
+ false
+ }
+
+ // Timing pattern: 2 dotted line between the finder patterns.
+ fn draw_timing_patterns(&mut self) {
+ let end = self.width - 8;
+
+ for x in (9..end).step_by(2) {
+ self.set(x, 6);
+ self.set(6, x);
+ }
+ }
+
+ fn is_timing(&self, x: u8, y: u8) -> bool {
+ x == 6 || y == 6
+ }
+
+ // Mask info: 15 bits around the finders, written twice for redundancy.
+ fn draw_maskinfo(&mut self) {
+ let info: u16 = FORMAT_INFOS_QR_L[0];
+ let mut skip = 0;
+
+ for k in 0..7 {
+ if k == 6 {
+ skip = 1;
+ }
+ if info & (1 << (14 - k)) == 0 {
+ self.set(k + skip, 8);
+ self.set(8, self.width - 1 - k);
+ }
+ }
+ skip = 0;
+ for k in 0..8 {
+ if k == 2 {
+ skip = 1;
+ }
+ if info & (1 << (7 - k)) == 0 {
+ self.set(8, 8 - skip - k);
+ self.set(self.width - 8 + k, 8);
+ }
+ }
+ }
+
+ fn is_maskinfo(&self, x: u8, y: u8) -> bool {
+ let end = self.width - 8;
+ // Count the dark module as mask info.
+ (x <= 8 && y == 8) || (y <= 8 && x == 8) || (x == 8 && y >= end) || (x >= end && y == 8)
+ }
+
+ // Version info: 18bits written twice, close to the finders.
+ fn draw_version_info(&mut self) {
+ let vinfo = self.version.version_info();
+ let pos = self.width - 11;
+
+ if vinfo != 0 {
+ for x in 0..3 {
+ for y in 0..6 {
+ if vinfo & (1 << (x + y * 3)) == 0 {
+ self.set(x + pos, y);
+ self.set(y, x + pos);
+ }
+ }
+ }
+ }
+ }
+
+ fn is_version_info(&self, x: u8, y: u8) -> bool {
+ let vinfo = self.version.version_info();
+ let pos = self.width - 11;
+
+ vinfo != 0 && ((x >= pos && x < pos + 3 && y < 6) || (y >= pos && y < pos + 3 && x < 6))
+ }
+
+ // Returns true if the module is reserved (Not usable for data and EC).
+ fn is_reserved(&self, x: u8, y: u8) -> bool {
+ self.is_alignment(x, y)
+ || self.is_finder(x, y)
+ || self.is_timing(x, y)
+ || self.is_maskinfo(x, y)
+ || self.is_version_info(x, y)
+ }
+
+ // Last module to draw, at bottom left corner.
+ fn is_last(&self, x: u8, y: u8) -> bool {
+ x == 0 && y == self.width - 1
+ }
+
+ // Move to the next module according to QR code order.
+ // From bottom right corner, to bottom left corner.
+ fn next(&self, x: u8, y: u8) -> (u8, u8) {
+ let x_adj = if x <= 6 { x + 1 } else { x };
+ let column_type = (self.width - x_adj) % 4;
+
+ match column_type {
+ 2 if y > 0 => (x + 1, y - 1),
+ 0 if y < self.width - 1 => (x + 1, y + 1),
+ 0 | 2 if x == 7 => (x - 2, y),
+ _ => (x - 1, y),
+ }
+ }
+
+ // Find next module that can hold data.
+ fn next_available(&self, x: u8, y: u8) -> (u8, u8) {
+ let (mut x, mut y) = self.next(x, y);
+ while self.is_reserved(x, y) && !self.is_last(x, y) {
+ (x, y) = self.next(x, y);
+ }
+ (x, y)
+ }
+
+ fn draw_data(&mut self, data: impl Iterator<Item = u8>) {
+ let (mut x, mut y) = (self.width - 1, self.width - 1);
+ for byte in data {
+ for s in 0..8 {
+ if byte & (0x80 >> s) == 0 {
+ self.set(x, y);
+ }
+ (x, y) = self.next_available(x, y);
+ }
+ }
+ // Set the remaining modules (0, 3 or 7 depending on version).
+ // because 0 correspond to a light module.
+ while !self.is_last(x, y) {
+ if !self.is_reserved(x, y) {
+ self.set(x, y);
+ }
+ (x, y) = self.next(x, y);
+ }
+ }
+
+ // Apply checkerboard mask to all non-reserved modules.
+ fn apply_mask(&mut self) {
+ for x in 0..self.width {
+ for y in 0..self.width {
+ if (x ^ y) % 2 == 0 && !self.is_reserved(x, y) {
+ self.xor(x, y);
+ }
+ }
+ }
+ }
+
+ // Draw the QR code with the provided data iterator.
+ fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
+ // First clear the table, as it may have already some data.
+ self.clear();
+ self.draw_finders();
+ self.draw_alignments();
+ self.draw_timing_patterns();
+ self.draw_version_info();
+ self.draw_data(data);
+ self.draw_maskinfo();
+ self.apply_mask();
+ }
+}
+
+/// C entry point for the rust QR Code generator.
+///
+/// Write the QR code image in the data buffer, and return the QR code width,
+/// or 0, if the data doesn't fit in a QR code.
+///
+/// * `url`: The base URL of the QR code. It will be encoded as Binary segment.
+/// * `data`: A pointer to the binary data, to be encoded. if URL is NULL, it
+/// will be encoded as binary segment, otherwise it will be encoded
+/// efficiently as a numeric segment, and appended to the URL.
+/// * `data_len`: Length of the data, that needs to be encoded, must be less
+/// than data_size.
+/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
+/// a V40 QR code. It will then be overwritten with the QR code image.
+/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
+/// segments and ECC.
+/// * `tmp_size`: Size of the temporary buffer, it must be at least 3706 bytes
+/// long for V40.
+///
+/// # Safety
+///
+/// * `url` must be null or point at a nul-terminated string.
+/// * `data` must be valid for reading and writing for `data_size` bytes.
+/// * `tmp` must be valid for reading and writing for `tmp_size` bytes.
+///
+/// They must remain valid for the duration of the function call.
+
+#[no_mangle]
+pub unsafe extern "C" fn drm_panic_qr_generate(
+ url: *const i8,
+ data: *mut u8,
+ data_len: usize,
+ data_size: usize,
+ tmp: *mut u8,
+ tmp_size: usize,
+) -> u8 {
+ if data_size < 4071 || tmp_size < 3706 || data_len > data_size {
+ return 0;
+ }
+ // SAFETY: The caller ensures that `data` is a valid pointer for reading and
+ // writing `data_size` bytes.
+ let data_slice: &mut [u8] = unsafe { core::slice::from_raw_parts_mut(data, data_size) };
+ // SAFETY: The caller ensures that `tmp` is a valid pointer for reading and
+ // writing `tmp_size` bytes.
+ let tmp_slice: &mut [u8] = unsafe { core::slice::from_raw_parts_mut(tmp, tmp_size) };
+ if url.is_null() {
+ match EncodedMsg::new(&[&Segment::Binary(&data_slice[0..data_len])], tmp_slice) {
+ None => 0,
+ Some(em) => {
+ let qr_image = QrImage::new(&em, data_slice);
+ qr_image.width
+ }
+ }
+ } else {
+ // SAFETY: The caller ensures that `url` is a valid pointer to a
+ // nul-terminated string.
+ let url_cstr: &CStr = unsafe { CStr::from_char_ptr(url) };
+ let segments = &[
+ &Segment::Binary(url_cstr.as_bytes()),
+ &Segment::Numeric(&data_slice[0..data_len]),
+ ];
+ match EncodedMsg::new(segments, tmp_slice) {
+ None => 0,
+ Some(em) => {
+ let qr_image = QrImage::new(&em, data_slice);
+ qr_image.width
+ }
+ }
+ }
+}
+
+/// Returns the maximum data size that can fit in a QR code of this version.
+/// * `version`: QR code version, between 1-40.
+/// * `url_len`: Length of the URL.
+///
+/// * If `url_len` > 0, remove the 2 segments header/length and also count the
+/// conversion to numeric segments.
+/// * If `url_len` = 0, only removes 3 bytes for 1 binary segment.
+#[no_mangle]
+pub extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize {
+ if version < 1 || version > 40 {
+ return 0;
+ }
+ let max_data = Version(version as usize).max_data();
+
+ if url_len > 0 {
+ // Binary segment (URL) 4 + 16 bits, numeric segment (kmsg) 4 + 12 bits => 5 bytes.
+ if url_len + 5 >= max_data {
+ 0
+ } else {
+ let max = max_data - url_len - 5;
+ (max * 39) / 40
+ }
+ } else {
+ // Remove 3 bytes for the binary segment (header 4 bits, length 16 bits, stop 4bits).
+ max_data - 3
+ }
+}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 03bd3c7bd0dc..0e3f8adf162f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -410,22 +410,30 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
}
/**
- * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
+ * drm_gem_prime_handle_to_dmabuf - PRIME export function for GEM drivers
* @dev: dev to export the buffer from
* @file_priv: drm file-private structure
* @handle: buffer handle to export
* @flags: flags like DRM_CLOEXEC
- * @prime_fd: pointer to storage for the fd id of the create dma-buf
*
* This is the PRIME export function which must be used mandatorily by GEM
* drivers to ensure correct lifetime management of the underlying GEM object.
* The actual exporting from GEM object to a dma-buf is done through the
* &drm_gem_object_funcs.export callback.
+ *
+ * Unlike drm_gem_prime_handle_to_fd(), it returns the struct dma_buf it
+ * has created, without attaching it to any file descriptors. The difference
+ * between those two is similar to that between anon_inode_getfile() and
+ * anon_inode_getfd(); insertion into descriptor table is something you
+ * can not revert if any cleanup is needed, so the descriptor-returning
+ * variants should only be used when you are past the last failure exit
+ * and the only thing left is passing the new file descriptor to userland.
+ * When all you need is the object itself or when you need to do something
+ * else that might fail, use that one instead.
*/
-int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle,
- uint32_t flags,
- int *prime_fd)
+ uint32_t flags)
{
struct drm_gem_object *obj;
int ret = 0;
@@ -434,14 +442,14 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
mutex_lock(&file_priv->prime.lock);
obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
- ret = -ENOENT;
+ dmabuf = ERR_PTR(-ENOENT);
goto out_unlock;
}
dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
if (dmabuf) {
get_dma_buf(dmabuf);
- goto out_have_handle;
+ goto out;
}
mutex_lock(&dev->object_name_lock);
@@ -463,7 +471,6 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
*/
- ret = PTR_ERR(dmabuf);
mutex_unlock(&dev->object_name_lock);
goto out;
}
@@ -478,34 +485,51 @@ out_have_obj:
ret = drm_prime_add_buf_handle(&file_priv->prime,
dmabuf, handle);
mutex_unlock(&dev->object_name_lock);
- if (ret)
- goto fail_put_dmabuf;
-
-out_have_handle:
- ret = dma_buf_fd(dmabuf, flags);
- /*
- * We must _not_ remove the buffer from the handle cache since the newly
- * created dma buf is already linked in the global obj->dma_buf pointer,
- * and that is invariant as long as a userspace gem handle exists.
- * Closing the handle will clean out the cache anyway, so we don't leak.
- */
- if (ret < 0) {
- goto fail_put_dmabuf;
- } else {
- *prime_fd = ret;
- ret = 0;
+ if (ret) {
+ dma_buf_put(dmabuf);
+ dmabuf = ERR_PTR(ret);
}
-
- goto out;
-
-fail_put_dmabuf:
- dma_buf_put(dmabuf);
out:
drm_gem_object_put(obj);
out_unlock:
mutex_unlock(&file_priv->prime.lock);
+ return dmabuf;
+}
+EXPORT_SYMBOL(drm_gem_prime_handle_to_dmabuf);
- return ret;
+/**
+ * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
+ * @dev: dev to export the buffer from
+ * @file_priv: drm file-private structure
+ * @handle: buffer handle to export
+ * @flags: flags like DRM_CLOEXEC
+ * @prime_fd: pointer to storage for the fd id of the create dma-buf
+ *
+ * This is the PRIME export function which must be used mandatorily by GEM
+ * drivers to ensure correct lifetime management of the underlying GEM object.
+ * The actual exporting from GEM object to a dma-buf is done through the
+ * &drm_gem_object_funcs.export callback.
+ */
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags,
+ int *prime_fd)
+{
+ struct dma_buf *dmabuf;
+ int fd = get_unused_fd_flags(flags);
+
+ if (fd < 0)
+ return fd;
+
+ dmabuf = drm_gem_prime_handle_to_dmabuf(dev, file_priv, handle, flags);
+ if (IS_ERR(dmabuf)) {
+ put_unused_fd(fd);
+ return PTR_ERR(dmabuf);
+ }
+
+ fd_install(fd, dmabuf->file);
+ *prime_fd = fd;
+ return 0;
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index cf24dfdeb6b2..0081190201a7 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -100,8 +100,9 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str)
copy = iterator->remain;
/* Copy out the bit of the string that we need */
- memcpy(iterator->data,
- str + (iterator->start - iterator->offset), copy);
+ if (iterator->data)
+ memcpy(iterator->data,
+ str + (iterator->start - iterator->offset), copy);
iterator->offset = iterator->start + copy;
iterator->remain -= copy;
@@ -110,7 +111,8 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str)
len = min_t(ssize_t, strlen(str), iterator->remain);
- memcpy(iterator->data + pos, str, len);
+ if (iterator->data)
+ memcpy(iterator->data + pos, str, len);
iterator->offset += len;
iterator->remain -= len;
@@ -140,8 +142,9 @@ void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf)
if ((iterator->offset >= iterator->start) && (len < iterator->remain)) {
ssize_t pos = iterator->offset - iterator->start;
- snprintf(((char *) iterator->data) + pos,
- iterator->remain, "%pV", vaf);
+ if (iterator->data)
+ snprintf(((char *) iterator->data) + pos,
+ iterator->remain, "%pV", vaf);
iterator->offset += len;
iterator->remain -= len;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index bb49d552e671..92f21764246f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -714,7 +714,7 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
* @dev: drm_device whose connector state changed
*
* This function fires off the uevent for userspace and also calls the
- * output_poll_changed function, which is most commonly used to inform the fbdev
+ * client hotplug function, which is most commonly used to inform the fbdev
* emulation code and allow it to update the fbcon output configuration.
*
* Drivers should call this from their hotplug handling code when a change is
@@ -730,11 +730,7 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
*/
void drm_kms_helper_hotplug_event(struct drm_device *dev)
{
- /* send a uevent + call fbdev */
drm_sysfs_hotplug_event(dev);
- if (dev->mode_config.funcs->output_poll_changed)
- dev->mode_config.funcs->output_poll_changed(dev);
-
drm_client_dev_hotplug(dev);
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
@@ -750,11 +746,7 @@ void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- /* send a uevent + call fbdev */
drm_sysfs_connector_hotplug_event(connector);
- if (dev->mode_config.funcs->output_poll_changed)
- dev->mode_config.funcs->output_poll_changed(dev);
-
drm_client_dev_hotplug(dev);
}
EXPORT_SYMBOL(drm_kms_helper_connector_hotplug_event);
@@ -888,7 +880,7 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
* disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
*
* If however, the polling was never initialized, this call will trigger a
- * warning and return
+ * warning and return.
*
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 85c79a38c13a..492acce0516f 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -85,7 +85,6 @@ static u32 clip_scaled(int src, int dst, int *clip)
* factors from @src to @dst.
*
* RETURNS:
- *
* %true if rectangle @dst is still visible after being clipped,
* %false otherwise.
*/
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index a0e94217b511..8e3d2d7060f8 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -715,16 +715,16 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
struct fd f = fdget(fd);
int ret;
- if (!f.file)
+ if (!fd_file(f))
return -EINVAL;
- if (f.file->f_op != &drm_syncobj_file_fops) {
+ if (fd_file(f)->f_op != &drm_syncobj_file_fops) {
fdput(f);
return -EINVAL;
}
/* take a reference to put in the idr */
- syncobj = f.file->private_data;
+ syncobj = fd_file(f)->private_data;
drm_syncobj_get(syncobj);
idr_preload(GFP_KERNEL);
@@ -1464,6 +1464,7 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
struct drm_syncobj *syncobj;
struct eventfd_ctx *ev_fd_ctx;
struct syncobj_eventfd_entry *entry;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
@@ -1479,13 +1480,15 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
ev_fd_ctx = eventfd_ctx_fdget(args->fd);
- if (IS_ERR(ev_fd_ctx))
- return PTR_ERR(ev_fd_ctx);
+ if (IS_ERR(ev_fd_ctx)) {
+ ret = PTR_ERR(ev_fd_ctx);
+ goto err_fdget;
+ }
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
- eventfd_ctx_put(ev_fd_ctx);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_kzalloc;
}
entry->syncobj = syncobj;
entry->ev_fd_ctx = ev_fd_ctx;
@@ -1496,6 +1499,12 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
drm_syncobj_put(syncobj);
return 0;
+
+err_kzalloc:
+ eventfd_ctx_put(ev_fd_ctx);
+err_fdget:
+ drm_syncobj_put(syncobj);
+ return ret;
}
int
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index cc3571e25a9a..94e45ed6869d 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -131,7 +131,7 @@
* guaranteed to be enabled.
*
* On many hardware disabling the vblank interrupt cannot be done in a race-free
- * manner, see &drm_driver.vblank_disable_immediate and
+ * manner, see &drm_vblank_crtc_config.disable_immediate and
* &drm_driver.max_vblank_count. In that case the vblank core only disables the
* vblanks after a timer has expired, which can be configured through the
* ``vblankoffdelay`` module parameter.
@@ -686,7 +686,6 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* drm_atomic_helper_calc_timestamping_constants().
*
* Returns:
- *
* Returns true on success, and false on failure, i.e. when no accurate
* timestamp could be acquired.
*/
@@ -831,7 +830,6 @@ EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal);
* drm_atomic_helper_calc_timestamping_constants().
*
* Returns:
- *
* Returns true on success, and false on failure, i.e. when no accurate
* timestamp could be acquired.
*/
@@ -1241,6 +1239,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get);
void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
+ int vblank_offdelay = vblank->config.offdelay_ms;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
@@ -1250,13 +1249,13 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&vblank->refcount)) {
- if (drm_vblank_offdelay == 0)
+ if (!vblank_offdelay)
return;
- else if (drm_vblank_offdelay < 0)
+ else if (vblank_offdelay < 0)
vblank_disable_fn(&vblank->disable_timer);
- else if (!dev->vblank_disable_immediate)
+ else if (!vblank->config.disable_immediate)
mod_timer(&vblank->disable_timer,
- jiffies + ((drm_vblank_offdelay * HZ)/1000));
+ jiffies + ((vblank_offdelay * HZ) / 1000));
}
}
@@ -1265,7 +1264,8 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
- * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ * if possible. Disable interrupts after &drm_vblank_crtc_config.offdelay_ms
+ * milliseconds.
*/
void drm_crtc_vblank_put(struct drm_crtc *crtc)
{
@@ -1466,16 +1466,20 @@ void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_crtc_set_max_vblank_count);
/**
- * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * drm_crtc_vblank_on_config - enable vblank events on a CRTC with custom
+ * configuration options
* @crtc: CRTC in question
+ * @config: Vblank configuration value
*
- * This functions restores the vblank interrupt state captured with
- * drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note
- * that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be
- * unbalanced and so can also be unconditionally called in driver load code to
- * reflect the current hardware state of the crtc.
+ * See drm_crtc_vblank_on(). In addition, this function allows you to provide a
+ * custom vblank configuration for a given CRTC.
+ *
+ * Note that @config is copied, the pointer does not need to stay valid beyond
+ * this function call. For details of the parameters see
+ * struct drm_vblank_crtc_config.
*/
-void drm_crtc_vblank_on(struct drm_crtc *crtc)
+void drm_crtc_vblank_on_config(struct drm_crtc *crtc,
+ const struct drm_vblank_crtc_config *config)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
@@ -1488,6 +1492,8 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
drm_dbg_vbl(dev, "crtc %d, vblank enabled %d, inmodeset %d\n",
pipe, vblank->enabled, vblank->inmodeset);
+ vblank->config = *config;
+
/* Drop our private "prevent drm_vblank_get" refcount */
if (vblank->inmodeset) {
atomic_dec(&vblank->refcount);
@@ -1500,10 +1506,33 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
* re-enable interrupts if there are users left, or the
* user wishes vblank interrupts to be enabled all the time.
*/
- if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
+ if (atomic_read(&vblank->refcount) != 0 || !vblank->config.offdelay_ms)
drm_WARN_ON(dev, drm_vblank_enable(dev, pipe));
spin_unlock_irq(&dev->vbl_lock);
}
+EXPORT_SYMBOL(drm_crtc_vblank_on_config);
+
+/**
+ * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * @crtc: CRTC in question
+ *
+ * This functions restores the vblank interrupt state captured with
+ * drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note
+ * that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be
+ * unbalanced and so can also be unconditionally called in driver load code to
+ * reflect the current hardware state of the crtc.
+ *
+ * Note that unlike in drm_crtc_vblank_on_config(), default values are used.
+ */
+void drm_crtc_vblank_on(struct drm_crtc *crtc)
+{
+ const struct drm_vblank_crtc_config config = {
+ .offdelay_ms = drm_vblank_offdelay,
+ .disable_immediate = crtc->dev->vblank_disable_immediate
+ };
+
+ drm_crtc_vblank_on_config(crtc, &config);
+}
EXPORT_SYMBOL(drm_crtc_vblank_on);
static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
@@ -1556,16 +1585,21 @@ static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
*
* Note that drivers must have race-free high-precision timestamping support,
* i.e. &drm_crtc_funcs.get_vblank_timestamp must be hooked up and
- * &drm_driver.vblank_disable_immediate must be set to indicate the
+ * &drm_vblank_crtc_config.disable_immediate must be set to indicate the
* time-stamping functions are race-free against vblank hardware counter
* increments.
*/
void drm_crtc_vblank_restore(struct drm_crtc *crtc)
{
- WARN_ON_ONCE(!crtc->funcs->get_vblank_timestamp);
- WARN_ON_ONCE(!crtc->dev->vblank_disable_immediate);
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
+
+ drm_WARN_ON_ONCE(dev, !crtc->funcs->get_vblank_timestamp);
+ drm_WARN_ON_ONCE(dev, vblank->inmodeset);
+ drm_WARN_ON_ONCE(dev, !vblank->config.disable_immediate);
- drm_vblank_restore(crtc->dev, drm_crtc_index(crtc));
+ drm_vblank_restore(dev, pipe);
}
EXPORT_SYMBOL(drm_crtc_vblank_restore);
@@ -1754,7 +1788,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
/* If the counter is currently enabled and accurate, short-circuit
* queries to return the cached timestamp of the last vblank.
*/
- if (dev->vblank_disable_immediate &&
+ if (vblank->config.disable_immediate &&
drm_wait_vblank_is_query(vblwait) &&
READ_ONCE(vblank->enabled)) {
drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
@@ -1918,8 +1952,8 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
* been signaled. The disable has to be last (after
* drm_handle_vblank_events) so that the timestamp is always accurate.
*/
- disable_irq = (dev->vblank_disable_immediate &&
- drm_vblank_offdelay > 0 &&
+ disable_irq = (vblank->config.disable_immediate &&
+ vblank->config.offdelay_ms > 0 &&
!atomic_read(&vblank->refcount));
drm_handle_vblank_events(dev, pipe);
@@ -1992,7 +2026,8 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
pipe = drm_crtc_index(crtc);
vblank = drm_crtc_vblank_crtc(crtc);
- vblank_enabled = dev->vblank_disable_immediate && READ_ONCE(vblank->enabled);
+ vblank_enabled = READ_ONCE(vblank->config.disable_immediate) &&
+ READ_ONCE(vblank->enabled);
if (!vblank_enabled) {
ret = drm_crtc_vblank_get(crtc);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 62dcfdc7894d..ab9ca4824b62 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -72,7 +72,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
drm_sched_resubmit_jobs(&gpu->sched);
- drm_sched_start(&gpu->sched, true);
+ drm_sched_start(&gpu->sched);
return DRM_GPU_SCHED_STAT_NOMINAL;
out_no_timeout:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index e2c7373f20c6..6a6761935224 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -110,7 +110,7 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
void *mapping = NULL;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
- mapping = arm_iommu_create_mapping(&platform_bus_type,
+ mapping = arm_iommu_create_mapping(dev,
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
mapping = iommu_get_domain_for_dev(priv->dma_dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 81d501efd013..23646e55f142 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -254,10 +254,6 @@ static inline int exynos_drm_check_fimc_device(struct device *dev)
}
#endif
-int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
- bool nonblock);
-
-
extern struct platform_driver fimd_driver;
extern struct platform_driver exynos5433_decon_driver;
extern struct platform_driver decon_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 142184c8c3bc..4d7ea65b7dd8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1125,7 +1125,7 @@ static void fimc_abort(struct exynos_drm_ipp *ipp,
}
}
-static struct exynos_drm_ipp_funcs ipp_funcs = {
+static const struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = fimc_commit,
.abort = fimc_abort,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 1b111e2c3347..59fa22050717 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1162,7 +1162,7 @@ static void gsc_abort(struct exynos_drm_ipp *ipp,
}
}
-static struct exynos_drm_ipp_funcs ipp_funcs = {
+static const struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = gsc_commit,
.abort = gsc_abort,
};
@@ -1174,7 +1174,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
- ctx->drm_dev = drm_dev;
+ ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index a9d469896824..2788105ac780 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -403,7 +403,7 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
return 0;
}
-static struct exynos_drm_ipp_funcs ipp_funcs = {
+static const struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = scaler_commit,
};
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 3adc2c9ab72d..f3a4517bdf27 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -568,7 +568,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
dev->dev, "I2C bus registration failed.\n");
goto err_encoder_cleanup;
}
- gma_encoder->i2c_bus->slave_addr = 0x2C;
+ gma_encoder->i2c_bus->target_addr = 0x2C;
dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus;
/*
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 8245b5603d2c..d5924ca3ed05 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -14,8 +14,8 @@
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#define SLAVE_ADDR1 0x70
-#define SLAVE_ADDR2 0x72
+#define TARGET_ADDR1 0x70
+#define TARGET_ADDR2 0x72
static void *find_section(struct bdb_header *bdb, int section_id)
{
@@ -357,10 +357,10 @@ parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
/* skip the device block if device type is invalid */
continue;
}
- if (p_child->slave_addr != SLAVE_ADDR1 &&
- p_child->slave_addr != SLAVE_ADDR2) {
+ if (p_child->target_addr != TARGET_ADDR1 &&
+ p_child->target_addr != TARGET_ADDR2) {
/*
- * If the slave address is neither 0x70 nor 0x72,
+ * If the target address is neither 0x70 nor 0x72,
* it is not a SDVO device. Skip it.
*/
continue;
@@ -371,22 +371,22 @@ parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
continue;
}
- DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+ DRM_DEBUG_KMS("the SDVO device with target addr %2x is found on"
" %s port\n",
- p_child->slave_addr,
+ p_child->target_addr,
(p_child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
if (!p_mapping->initialized) {
p_mapping->dvo_port = p_child->dvo_port;
- p_mapping->slave_addr = p_child->slave_addr;
+ p_mapping->target_addr = p_child->target_addr;
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->ddc_pin = p_child->ddc_pin;
p_mapping->i2c_pin = p_child->i2c_pin;
p_mapping->initialized = 1;
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
p_mapping->dvo_port,
- p_mapping->slave_addr,
+ p_mapping->target_addr,
p_mapping->dvo_wiring,
p_mapping->ddc_pin,
p_mapping->i2c_pin);
@@ -394,10 +394,10 @@ parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
- if (p_child->slave2_addr) {
+ if (p_child->target2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
- DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+ DRM_DEBUG_KMS("there exists the target2_addr. Maybe this"
" is a SDVO device with multiple inputs.\n");
}
count++;
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 0e6facf21e33..b5adea2a20c3 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -186,13 +186,13 @@ struct child_device_config {
u16 addin_offset;
u8 dvo_port; /* See Device_PORT_* above */
u8 i2c_pin;
- u8 slave_addr;
+ u8 target_addr;
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_cfg; /* See DEVICE_CFG_* above */
u8 dvo2_port;
u8 i2c2_pin;
- u8 slave2_addr;
+ u8 target2_addr;
u8 ddc2_pin;
u8 capabilities;
u8 dvo_wiring;/* See DEVICE_WIRE_* above */
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index aa45509859f2..ee8b047587f2 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -333,7 +333,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
clear_err:
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
- * BUS_ERROR raised by the slave's NAK.
+ * BUS_ERROR raised by the target's NAK.
*/
GMBUS_REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
GMBUS_REG_WRITE(GMBUS1 + reg_offset, 0);
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 8b64f61ffaf9..d67c2b3ad901 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -498,6 +498,7 @@ static const struct file_operations psb_gem_fops = {
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
static const struct drm_driver driver = {
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 83c17689c454..bddf89b82fec 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -202,7 +202,7 @@ struct psb_intel_opregion {
struct sdvo_device_mapping {
u8 initialized;
u8 dvo_port;
- u8 slave_addr;
+ u8 target_addr;
u8 dvo_wiring;
u8 i2c_pin;
u8 i2c_speed;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index c111e933e1ed..2499fd6a80c9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -80,7 +80,7 @@ struct psb_intel_mode_device {
struct gma_i2c_chan {
struct i2c_adapter base;
struct i2c_algo_bit_data algo;
- u8 slave_addr;
+ u8 target_addr;
/* for getting at dev. private (mmio etc.) */
struct drm_device *drm_dev;
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 8d1be94a443b..138f153d38ba 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -97,7 +97,7 @@ static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
struct i2c_msg msgs[] = {
{
- .addr = lvds_i2c_bus->slave_addr,
+ .addr = lvds_i2c_bus->target_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -710,7 +710,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
dev->dev, "I2C bus registration failed.\n");
goto err_encoder_cleanup;
}
- lvds_priv->i2c_bus->slave_addr = 0x2C;
+ lvds_priv->i2c_bus->target_addr = 0x2C;
dev_priv->lvds_i2c_bus = lvds_priv->i2c_bus;
/*
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index e4f914deceba..8dafff963ca8 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -70,7 +70,7 @@ struct psb_intel_sdvo {
struct gma_encoder base;
struct i2c_adapter *i2c;
- u8 slave_addr;
+ u8 target_addr;
struct i2c_adapter ddc;
@@ -259,13 +259,13 @@ static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 a
{
struct i2c_msg msgs[] = {
{
- .addr = psb_intel_sdvo->slave_addr,
+ .addr = psb_intel_sdvo->target_addr,
.flags = 0,
.len = 1,
.buf = &addr,
},
{
- .addr = psb_intel_sdvo->slave_addr,
+ .addr = psb_intel_sdvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = ch,
@@ -463,14 +463,14 @@ static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 c
psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
- msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].addr = psb_intel_sdvo->target_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2 *i;
buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
buf[2*i + 1] = ((u8*)args)[i];
}
- msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].addr = psb_intel_sdvo->target_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2*i;
@@ -479,12 +479,12 @@ static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 c
/* the following two are to read the response */
status = SDVO_I2C_CMD_STATUS;
- msgs[i+1].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+1].addr = psb_intel_sdvo->target_addr;
msgs[i+1].flags = 0;
msgs[i+1].len = 1;
msgs[i+1].buf = &status;
- msgs[i+2].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+2].addr = psb_intel_sdvo->target_addr;
msgs[i+2].flags = I2C_M_RD;
msgs[i+2].len = 1;
msgs[i+2].buf = &status;
@@ -1899,7 +1899,7 @@ psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int devi
}
static u8
-psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+psb_intel_sdvo_get_target_addr(struct drm_device *dev, int sdvo_reg)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct sdvo_device_mapping *my_mapping, *other_mapping;
@@ -1913,14 +1913,14 @@ psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
}
/* If the BIOS described our SDVO device, take advantage of it. */
- if (my_mapping->slave_addr)
- return my_mapping->slave_addr;
+ if (my_mapping->target_addr)
+ return my_mapping->target_addr;
/* If the BIOS only described a different SDVO device, use the
* address that it isn't using.
*/
- if (other_mapping->slave_addr) {
- if (other_mapping->slave_addr == 0x70)
+ if (other_mapping->target_addr) {
+ if (other_mapping->target_addr == 0x70)
return 0x72;
else
return 0x70;
@@ -2446,7 +2446,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
return false;
psb_intel_sdvo->sdvo_reg = sdvo_reg;
- psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ psb_intel_sdvo->target_addr = psb_intel_sdvo_get_target_addr(dev, sdvo_reg) >> 1;
psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
kfree(psb_intel_sdvo);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 207aa3f660b0..6b566f3aeecb 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -57,7 +57,6 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv,
int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
-int hibmc_mm_init(struct hibmc_drm_private *hibmc);
int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
#endif
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index d0c3880d7f80..493e730c685b 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -170,13 +170,13 @@ static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
{
struct i2c_msg msgs[] = {
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 1,
.buf = &addr,
},
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = val,
@@ -189,7 +189,7 @@ static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
{
u8 buf[2] = { addr, val };
struct i2c_msg msg = {
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 2,
.buf = buf,
@@ -197,7 +197,7 @@ static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
}
-/** Probes for a CH7017 on the given bus and slave address. */
+/** Probes for a CH7017 on the given bus and target address. */
static bool ch7017_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
@@ -227,13 +227,13 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
break;
default:
DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
- "slave %d.\n",
- val, adapter->name, dvo->slave_addr);
+ "target %d.\n",
+ val, adapter->name, dvo->target_addr);
goto fail;
}
DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
- str, adapter->name, dvo->slave_addr);
+ str, adapter->name, dvo->target_addr);
return true;
fail:
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 2e8e85da5a40..534b8544e0a4 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -153,13 +153,13 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
struct i2c_msg msgs[] = {
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -176,7 +176,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
}
@@ -188,7 +188,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
struct i2c_msg msg = {
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -202,7 +202,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
@@ -229,8 +229,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
name = ch7xxx_get_id(vendor);
if (!name) {
- DRM_DEBUG_KMS("ch7xxx not detected; got VID 0x%02x from %s slave %d.\n",
- vendor, adapter->name, dvo->slave_addr);
+ DRM_DEBUG_KMS("ch7xxx not detected; got VID 0x%02x from %s target %d.\n",
+ vendor, adapter->name, dvo->target_addr);
goto out;
}
@@ -240,8 +240,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
devid = ch7xxx_get_did(device);
if (!devid) {
- DRM_DEBUG_KMS("ch7xxx not detected; got DID 0x%02x from %s slave %d.\n",
- device, adapter->name, dvo->slave_addr);
+ DRM_DEBUG_KMS("ch7xxx not detected; got DID 0x%02x from %s target %d.\n",
+ device, adapter->name, dvo->target_addr);
goto out;
}
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index eef72bb3b767..0d5cce6051b1 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -198,7 +198,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
struct i2c_msg msgs[] = {
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD,
.len = 0,
},
@@ -209,7 +209,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
.buf = out_buf,
},
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD | I2C_M_NOSTART,
.len = 2,
.buf = in_buf,
@@ -226,7 +226,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from "
"%s:%02x.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
}
@@ -238,7 +238,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data)
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[3];
struct i2c_msg msg = {
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 3,
.buf = out_buf,
@@ -253,13 +253,13 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data)
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
}
-/* Probes the given bus and slave address for an ivch */
+/* Probes the given bus and target address for an ivch */
static bool ivch_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
@@ -283,10 +283,10 @@ static bool ivch_init(struct intel_dvo_device *dvo,
* very unique, check that the value in the base address field matches
* the address it's responding on.
*/
- if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
+ if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->target_addr) {
DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
"(%d vs %d)\n",
- (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
+ (temp & VR00_BASE_ADDRESS_MASK), dvo->target_addr);
goto out;
}
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 21486008dae9..9d47f8a93e94 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -398,13 +398,13 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
struct i2c_msg msgs[] = {
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -422,7 +422,7 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
if (!ns->quiet) {
DRM_DEBUG_KMS
("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
- adapter->name, dvo->slave_addr);
+ adapter->name, dvo->target_addr);
}
return false;
@@ -441,7 +441,7 @@ static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
u8 out_buf[2];
struct i2c_msg msg = {
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -456,7 +456,7 @@ static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
if (!ns->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
@@ -487,8 +487,8 @@ static bool ns2501_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (NS2501_VID & 0xff)) {
- DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
- ch, adapter->name, dvo->slave_addr);
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Target %d.\n",
+ ch, adapter->name, dvo->target_addr);
goto out;
}
@@ -496,8 +496,8 @@ static bool ns2501_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (NS2501_DID & 0xff)) {
- DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
- ch, adapter->name, dvo->slave_addr);
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Target %d.\n",
+ ch, adapter->name, dvo->target_addr);
goto out;
}
ns->quiet = false;
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index 6c461024c8e3..a8dd40c00997 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -79,13 +79,13 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
struct i2c_msg msgs[] = {
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -102,7 +102,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
}
@@ -113,7 +113,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
struct i2c_msg msg = {
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -127,7 +127,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
@@ -153,8 +153,8 @@ static bool sil164_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (SIL164_VID & 0xff)) {
- DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
- ch, adapter->name, dvo->slave_addr);
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Target %d.\n",
+ ch, adapter->name, dvo->target_addr);
goto out;
}
@@ -162,8 +162,8 @@ static bool sil164_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (SIL164_DID & 0xff)) {
- DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
- ch, adapter->name, dvo->slave_addr);
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Target %d.\n",
+ ch, adapter->name, dvo->target_addr);
goto out;
}
sil->quiet = false;
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index 0939e097f4f9..d9a0cd753a87 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -100,13 +100,13 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
struct i2c_msg msgs[] = {
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -123,7 +123,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
}
@@ -134,7 +134,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
struct i2c_msg msg = {
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -148,7 +148,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
@@ -183,15 +183,15 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
- "Slave %d.\n",
- id, adapter->name, dvo->slave_addr);
+ "Target %d.\n",
+ id, adapter->name, dvo->target_addr);
goto out;
}
if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
- "Slave %d.\n",
- id, adapter->name, dvo->slave_addr);
+ "Target %d.\n",
+ id, adapter->name, dvo->target_addr);
goto out;
}
tfp->quiet = false;
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index a8e746a0f670..526c8c4d7b53 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -89,6 +89,7 @@ void g4x_dp_set_clock(struct intel_encoder *encoder,
static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
@@ -118,7 +119,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
- intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+ intel_dp->DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED;
/* Handle DP bits in common between all three register formats */
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
@@ -140,7 +141,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe),
+ intel_de_rmw(display, TRANS_DP_CTL(crtc->pipe),
TRANS_DP_ENH_FRAMING,
pipe_config->enhanced_framing ?
TRANS_DP_ENH_FRAMING : 0);
@@ -166,9 +167,10 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
+ bool cur_state = intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN;
I915_STATE_WARN(dev_priv, cur_state != state,
"[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
@@ -179,7 +181,8 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
{
- bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
+ struct intel_display *display = &dev_priv->display;
+ bool cur_state = intel_de_read(display, DP_A) & DP_PLL_ENABLE;
I915_STATE_WARN(dev_priv, cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
@@ -191,6 +194,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
static void ilk_edp_pll_on(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -198,7 +202,7 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
assert_dp_port_disabled(intel_dp);
assert_edp_pll_disabled(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
+ drm_dbg_kms(display->drm, "enabling eDP PLL for clock %d\n",
pipe_config->port_clock);
intel_dp->DP &= ~DP_PLL_FREQ_MASK;
@@ -208,8 +212,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
- intel_de_write(dev_priv, DP_A, intel_dp->DP);
- intel_de_posting_read(dev_priv, DP_A);
+ intel_de_write(display, DP_A, intel_dp->DP);
+ intel_de_posting_read(display, DP_A);
udelay(500);
/*
@@ -223,14 +227,15 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
intel_dp->DP |= DP_PLL_ENABLE;
- intel_de_write(dev_priv, DP_A, intel_dp->DP);
- intel_de_posting_read(dev_priv, DP_A);
+ intel_de_write(display, DP_A, intel_dp->DP);
+ intel_de_posting_read(display, DP_A);
udelay(200);
}
static void ilk_edp_pll_off(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -238,22 +243,23 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
assert_dp_port_disabled(intel_dp);
assert_edp_pll_enabled(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
+ drm_dbg_kms(display->drm, "disabling eDP PLL\n");
intel_dp->DP &= ~DP_PLL_ENABLE;
- intel_de_write(dev_priv, DP_A, intel_dp->DP);
- intel_de_posting_read(dev_priv, DP_A);
+ intel_de_write(display, DP_A, intel_dp->DP);
+ intel_de_posting_read(display, DP_A);
udelay(200);
}
static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
enum port port, enum pipe *pipe)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe p;
- for_each_pipe(dev_priv, p) {
- u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
+ for_each_pipe(display, p) {
+ u32 val = intel_de_read(display, TRANS_DP_CTL(p));
if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
*pipe = p;
@@ -261,7 +267,7 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
}
}
- drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
+ drm_dbg_kms(display->drm, "No pipe for DP port %c found\n",
port_name(port));
/* must initialize pipe to something for the asserts */
@@ -274,10 +280,11 @@ bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe)
{
+ struct intel_display *display = &dev_priv->display;
bool ret;
u32 val;
- val = intel_de_read(dev_priv, dp_reg);
+ val = intel_de_read(display, dp_reg);
ret = val & DP_PORT_EN;
@@ -333,6 +340,7 @@ static void g4x_dp_get_m_n(struct intel_crtc_state *crtc_state)
static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 tmp, flags = 0;
@@ -344,12 +352,12 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
else
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
- tmp = intel_de_read(dev_priv, intel_dp->output_reg);
+ tmp = intel_de_read(display, intel_dp->output_reg);
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- u32 trans_dp = intel_de_read(dev_priv,
+ u32 trans_dp = intel_de_read(display,
TRANS_DP_CTL(crtc->pipe));
if (trans_dp & TRANS_DP_ENH_FRAMING)
@@ -390,7 +398,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
g4x_dp_get_m_n(pipe_config);
if (port == PORT_A) {
- if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+ if ((intel_de_read(display, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
@@ -410,17 +418,18 @@ static void
intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
- if (drm_WARN_ON(&dev_priv->drm,
- (intel_de_read(dev_priv, intel_dp->output_reg) &
+ if (drm_WARN_ON(display->drm,
+ (intel_de_read(display, intel_dp->output_reg) &
DP_PORT_EN) == 0))
return;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -430,12 +439,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
}
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
intel_dp->DP &= ~DP_PORT_EN;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
/*
* HW workaround for IBX, we need to move the port
@@ -454,12 +463,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_dp->DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
intel_dp->DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
DP_LINK_TRAIN_PAT_1;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
intel_dp->DP &= ~DP_PORT_EN;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
@@ -480,7 +489,7 @@ static void g4x_dp_audio_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (!crtc_state->has_audio)
@@ -488,7 +497,7 @@ static void g4x_dp_audio_enable(struct intel_encoder *encoder,
/* Enable audio presence detect */
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
@@ -497,7 +506,7 @@ static void g4x_dp_audio_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (!old_crtc_state->has_audio)
@@ -507,7 +516,7 @@ static void g4x_dp_audio_disable(struct intel_encoder *encoder,
/* Disable audio presence detect */
intel_dp->DP &= ~DP_AUDIO_OUTPUT_ENABLE;
- intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
}
static void intel_disable_dp(struct intel_atomic_state *state,
@@ -596,7 +605,7 @@ cpt_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
@@ -615,8 +624,8 @@ cpt_set_link_train(struct intel_dp *intel_dp,
return;
}
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
static void
@@ -624,7 +633,7 @@ g4x_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
@@ -643,14 +652,14 @@ g4x_set_link_train(struct intel_dp *intel_dp,
return;
}
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
static void intel_dp_enable_port(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
/* enable with pattern 1 (as per spec) */
@@ -665,8 +674,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
*/
intel_dp->DP |= DP_PORT_EN;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
static void intel_enable_dp(struct intel_atomic_state *state,
@@ -674,12 +683,13 @@ static void intel_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
+ u32 dp_reg = intel_de_read(display, intel_dp->output_reg);
intel_wakeref_t wakeref;
- if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
+ if (drm_WARN_ON(display->drm, dp_reg & DP_PORT_EN))
return;
with_intel_pps_lock(intel_dp, wakeref) {
@@ -1026,21 +1036,21 @@ static void
g4x_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
signal_levels = g4x_signal_levels(train_set);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
/* SNB CPU eDP voltage swing and pre-emphasis control */
@@ -1074,21 +1084,21 @@ static void
snb_cpu_edp_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
signal_levels = snb_cpu_edp_signal_levels(train_set);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
/* IVB CPU eDP voltage swing and pre-emphasis control */
@@ -1126,21 +1136,21 @@ static void
ivb_cpu_edp_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
signal_levels = ivb_cpu_edp_signal_levels(train_set);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
/*
@@ -1185,15 +1195,15 @@ intel_dp_hotplug(struct intel_encoder *encoder,
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.pch_hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, SDEISR) & bit;
+ return intel_de_read(display, SDEISR) & bit;
}
static bool g4x_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 bit;
switch (encoder->hpd_pin) {
@@ -1211,15 +1221,15 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return intel_de_read(dev_priv, PORT_HOTPLUG_STAT(dev_priv)) & bit;
+ return intel_de_read(display, PORT_HOTPLUG_STAT(display)) & bit;
}
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, DEISR) & bit;
+ return intel_de_read(display, DEISR) & bit;
}
static void g4x_dp_suspend_complete(struct intel_encoder *encoder)
@@ -1241,7 +1251,8 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum pipe pipe;
@@ -1254,10 +1265,11 @@ enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder->dev);
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
- intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
+ intel_dp->DP = intel_de_read(display, intel_dp->output_reg);
intel_dp->reset_link_params = true;
@@ -1279,6 +1291,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
bool g4x_dp_init(struct drm_i915_private *dev_priv,
i915_reg_t output_reg, enum port port)
{
+ struct intel_display *display = &dev_priv->display;
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
@@ -1288,11 +1301,11 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
if (!assert_port_valid(dev_priv, port))
return false;
- devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+ devdata = intel_bios_encoder_data_lookup(display, port);
/* FIXME bail? */
if (!devdata)
- drm_dbg_kms(&dev_priv->drm, "No VBT child device for DP-%c\n",
+ drm_dbg_kms(display->drm, "No VBT child device for DP-%c\n",
port_name(port));
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
@@ -1312,7 +1325,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
mutex_init(&dig_port->hdcp_mutex);
- if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
"DP %c", port_name(port)))
goto err_encoder_init;
@@ -1396,7 +1409,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
dig_port->hpd_pulse = intel_dp_hpd_pulse;
- if (HAS_GMCH(dev_priv)) {
+ if (HAS_GMCH(display)) {
dig_port->connected = g4x_digital_port_connected;
} else {
if (port == PORT_A)
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 8096492b3fad..46f23bdb4c17 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -686,6 +686,7 @@ static bool assert_hdmi_port_valid(struct drm_i915_private *i915, enum port port
void g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
+ struct intel_display *display = &dev_priv->display;
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
@@ -697,7 +698,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
if (!assert_hdmi_port_valid(dev_priv, port))
return;
- devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+ devdata = intel_bios_encoder_data_lookup(display, port);
/* FIXME bail? */
if (!devdata)
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 2b7c3d270b17..15cda57fbc91 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -4028,7 +4028,7 @@ void i9xx_wm_init(struct drm_i915_private *dev_priv)
dev_priv->display.funcs.wm = &g4x_wm_funcs;
} else if (IS_PINEVIEW(dev_priv)) {
if (!pnv_get_cxsr_latency(dev_priv)) {
- drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
/* Disable CxSR and never update its watermark again */
intel_set_memory_cxsr(dev_priv, false);
dev_priv->display.funcs.wm = &nop_funcs;
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index ae8f6617aa70..293efc1f841d 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -27,6 +27,7 @@
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_mipi_dsi.h>
#include "i915_reg.h"
@@ -330,7 +331,7 @@ static int afe_clk(struct intel_encoder *encoder,
int bpp;
if (crtc_state->dsc.compression_enable)
- bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
+ bpp = fxp_q4_to_int(crtc_state->dsc.compressed_bpp_x16);
else
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -863,7 +864,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* compressed and non-compressed bpp.
*/
if (crtc_state->dsc.compression_enable) {
- mul = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
+ mul = fxp_q4_to_int(crtc_state->dsc.compressed_bpp_x16);
div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
}
@@ -887,7 +888,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
int bpp, line_time_us, byte_clk_period_ns;
if (crtc_state->dsc.compression_enable)
- bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
+ bpp = fxp_q4_to_int(crtc_state->dsc.compressed_bpp_x16);
else
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1470,7 +1471,7 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
&pipe_config->hw.adjusted_mode;
if (pipe_config->dsc.compressed_bpp_x16) {
- int div = to_bpp_int(pipe_config->dsc.compressed_bpp_x16);
+ int div = fxp_q4_to_int(pipe_config->dsc.compressed_bpp_x16);
int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
adjusted_mode->crtc_htotal =
@@ -1944,6 +1945,7 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
void icl_dsi_init(struct drm_i915_private *dev_priv,
const struct intel_bios_encoder_data *devdata)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_dsi *intel_dsi;
struct intel_encoder *encoder;
struct intel_connector *intel_connector;
@@ -2007,7 +2009,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv,
intel_dsi->panel_power_off_time = ktime_get_boottime();
- intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL);
+ intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata, NULL);
mutex_lock(&dev_priv->drm.mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 0aa3999374e2..c3b29a331d72 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -183,9 +183,9 @@ void intel_unregister_dsm_handler(void)
{
}
-void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915)
+void intel_dsm_get_bios_data_funcs_supported(struct intel_display *display)
{
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
acpi_handle dhandle;
union acpi_object *obj;
@@ -263,15 +263,14 @@ static u32 acpi_display_type(struct intel_connector *connector)
return display_type;
}
-void intel_acpi_device_id_update(struct drm_i915_private *dev_priv)
+void intel_acpi_device_id_update(struct intel_display *display)
{
- struct drm_device *drm_dev = &dev_priv->drm;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
u8 display_index[16] = {};
/* Populate the ACPI IDs for all connectors for a given drm_device */
- drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
u32 device_id, type;
@@ -288,10 +287,10 @@ void intel_acpi_device_id_update(struct drm_i915_private *dev_priv)
}
/* NOTE: The connector order must be final before this is called. */
-void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915)
+void intel_acpi_assign_connector_fwnodes(struct intel_display *display)
{
+ struct drm_device *drm_dev = display->drm;
struct drm_connector_list_iter conn_iter;
- struct drm_device *drm_dev = &i915->drm;
struct fwnode_handle *fwnode = NULL;
struct drm_connector *connector;
struct acpi_device *adev;
@@ -333,7 +332,7 @@ void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915)
fwnode_handle_put(fwnode);
}
-void intel_acpi_video_register(struct drm_i915_private *i915)
+void intel_acpi_video_register(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
@@ -347,7 +346,7 @@ void intel_acpi_video_register(struct drm_i915_private *i915)
* a native backlight later and acpi_video_register_backlight() should
* only be called after any native backlights have been registered.
*/
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_panel *panel = &to_intel_connector(connector)->panel;
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
index 6a0007452f95..788a63071661 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.h
+++ b/drivers/gpu/drm/i915/display/intel_acpi.h
@@ -6,26 +6,26 @@
#ifndef __INTEL_ACPI_H__
#define __INTEL_ACPI_H__
-struct drm_i915_private;
+struct intel_display;
#ifdef CONFIG_ACPI
void intel_register_dsm_handler(void);
void intel_unregister_dsm_handler(void);
-void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915);
-void intel_acpi_device_id_update(struct drm_i915_private *i915);
-void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915);
-void intel_acpi_video_register(struct drm_i915_private *i915);
+void intel_dsm_get_bios_data_funcs_supported(struct intel_display *display);
+void intel_acpi_device_id_update(struct intel_display *display);
+void intel_acpi_assign_connector_fwnodes(struct intel_display *display);
+void intel_acpi_video_register(struct intel_display *display);
#else
static inline void intel_register_dsm_handler(void) { return; }
static inline void intel_unregister_dsm_handler(void) { return; }
static inline
-void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915) { return; }
+void intel_dsm_get_bios_data_funcs_supported(struct intel_display *display) { return; }
static inline
-void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; }
+void intel_acpi_device_id_update(struct intel_display *display) { return; }
static inline
-void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) { return; }
+void intel_acpi_assign_connector_fwnodes(struct intel_display *display) { return; }
static inline
-void intel_acpi_video_register(struct drm_i915_private *i915) { return; }
+void intel_acpi_video_register(struct intel_display *display) { return; }
#endif /* CONFIG_ACPI */
#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index 10689480338e..186cf4833f71 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -139,7 +139,7 @@ static int
_lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int aux_less_wake_time, aux_less_wake_lines, silence_period,
lfps_half_cycle;
@@ -158,7 +158,7 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
lfps_half_cycle > PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK)
return false;
- if (i915->display.params.psr_safest_params)
+ if (display->params.psr_safest_params)
aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK;
intel_dp->alpm_parameters.aux_less_wake_lines = aux_less_wake_lines;
@@ -171,10 +171,10 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int check_entry_lines;
- if (DISPLAY_VER(i915) < 20)
+ if (DISPLAY_VER(display) < 20)
return true;
/* ALPM Entry Check = 2 + CEILING( 5us /tline ) */
@@ -187,7 +187,7 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
if (!_lnl_compute_aux_less_alpm_params(intel_dp, crtc_state))
return false;
- if (i915->display.params.psr_safest_params)
+ if (display->params.psr_safest_params)
check_entry_lines = 15;
intel_dp->alpm_parameters.check_entry_lines = check_entry_lines;
@@ -212,9 +212,9 @@ static int tgl_io_buffer_wake_time(void)
static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return tgl_io_buffer_wake_time();
else
return skl_io_buffer_wake_time();
@@ -223,7 +223,7 @@ static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
int tfw_exit_latency = 20; /* eDP spec */
int phy_wake = 4; /* eDP spec */
@@ -236,9 +236,9 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp,
fast_wake_time = precharge + preamble + phy_wake +
tfw_exit_latency;
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
max_wake_lines = 68;
- else if (DISPLAY_VER(i915) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
max_wake_lines = 12;
else
max_wake_lines = 8;
@@ -255,7 +255,7 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp,
if (!_lnl_compute_alpm_params(intel_dp, crtc_state))
return false;
- if (i915->display.params.psr_safest_params)
+ if (display->params.psr_safest_params)
io_wake_lines = fast_wake_lines = max_wake_lines;
/* According to Bspec lower limit should be set as 7 lines. */
@@ -269,7 +269,7 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
int waketime_in_lines, first_sdp_position;
int context_latency, guardband;
@@ -277,10 +277,10 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
if (!intel_dp_is_edp(intel_dp))
return;
- if (DISPLAY_VER(i915) < 20)
+ if (DISPLAY_VER(display) < 20)
return;
- if (!intel_dp_as_sdp_supported(intel_dp))
+ if (!intel_dp->as_sdp_supported)
return;
if (crtc_state->has_psr)
@@ -309,13 +309,13 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
static void lnl_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = dp_to_dig_port(intel_dp)->base.port;
u32 alpm_ctl;
- if (DISPLAY_VER(dev_priv) < 20 || (!intel_dp->psr.sel_update_enabled &&
- !intel_dp_is_edp(intel_dp)))
+ if (DISPLAY_VER(display) < 20 ||
+ (!intel_dp->psr.sel_update_enabled && !intel_dp_is_edp(intel_dp)))
return;
/*
@@ -329,16 +329,16 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS |
ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines);
- intel_de_write(dev_priv,
- PORT_ALPM_CTL(dev_priv, port),
+ intel_de_write(display,
+ PORT_ALPM_CTL(display, port),
PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
PORT_ALPM_CTL_SILENCE_PERIOD(
intel_dp->alpm_parameters.silence_period_sym_clocks));
- intel_de_write(dev_priv,
- PORT_ALPM_LFPS_CTL(dev_priv, port),
+ intel_de_write(display,
+ PORT_ALPM_LFPS_CTL(display, port),
PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
@@ -356,7 +356,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(intel_dp->alpm_parameters.check_entry_lines);
- intel_de_write(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder), alpm_ctl);
+ intel_de_write(display, ALPM_CTL(display, cpu_transcoder), alpm_ctl);
}
void intel_alpm_configure(struct intel_dp *intel_dp,
@@ -368,14 +368,14 @@ void intel_alpm_configure(struct intel_dp *intel_dp,
static int i915_edp_lobf_info_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_crtc *crtc;
struct intel_crtc_state *crtc_state;
enum transcoder cpu_transcoder;
u32 alpm_ctl;
int ret;
- ret = drm_modeset_lock_single_interruptible(&dev_priv->drm.mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (ret)
return ret;
@@ -387,14 +387,14 @@ static int i915_edp_lobf_info_show(struct seq_file *m, void *data)
crtc_state = to_intel_crtc_state(crtc->state);
cpu_transcoder = crtc_state->cpu_transcoder;
- alpm_ctl = intel_de_read(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder));
+ alpm_ctl = intel_de_read(display, ALPM_CTL(display, cpu_transcoder));
seq_printf(m, "LOBF status: %s\n", str_enabled_disabled(alpm_ctl & ALPM_CTL_LOBF_ENABLE));
seq_printf(m, "Aux-wake alpm status: %s\n",
str_enabled_disabled(!(alpm_ctl & ALPM_CTL_ALPM_AUX_LESS_ENABLE)));
seq_printf(m, "Aux-less alpm status: %s\n",
str_enabled_disabled(alpm_ctl & ALPM_CTL_ALPM_AUX_LESS_ENABLE));
out:
- drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return ret;
}
@@ -403,10 +403,10 @@ DEFINE_SHOW_ATTRIBUTE(i915_edp_lobf_info);
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct dentry *root = connector->base.debugfs_entry;
- if (DISPLAY_VER(i915) < 20 ||
+ if (DISPLAY_VER(display) < 20 ||
connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 76aa10b6f647..12d6ed940751 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -276,7 +276,8 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->do_async_flip = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
- crtc_state->dsb = NULL;
+ crtc_state->dsb_color_vblank = NULL;
+ crtc_state->dsb_color_commit = NULL;
return &crtc_state->uapi;
}
@@ -310,7 +311,8 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
{
struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
- drm_WARN_ON(crtc->dev, crtc_state->dsb);
+ drm_WARN_ON(crtc->dev, crtc_state->dsb_color_vblank);
+ drm_WARN_ON(crtc->dev, crtc_state->dsb_color_commit);
__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
intel_crtc_free_hw_state(crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index b9bafec06fb8..f5e7eefab2f1 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -26,6 +26,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_fixed.h>
#include <drm/intel/i915_component.h>
#include "i915_drv.h"
@@ -452,8 +453,8 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
lanes = crtc_state->lane_count;
drm_dbg_kms(&i915->drm,
- "h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " BPP_X16_FMT " cdclk = %u\n",
- h_active, link_clk, lanes, BPP_X16_ARGS(vdsc_bppx16), cdclk);
+ "h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " FXP_Q4_FMT " cdclk = %u\n",
+ h_active, link_clk, lanes, FXP_Q4_ARGS(vdsc_bppx16), cdclk);
if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bppx16 || !cdclk))
return 0;
@@ -979,7 +980,8 @@ retry:
static unsigned long i915_audio_component_get_power(struct device *kdev)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t ret;
/* Catch potential impedance mismatches before they occur! */
@@ -1011,7 +1013,8 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
static void i915_audio_component_put_power(struct device *kdev,
unsigned long cookie)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--i915->display.audio.power_refcount == 0)
@@ -1024,7 +1027,8 @@ static void i915_audio_component_put_power(struct device *kdev,
static void i915_audio_component_codec_wake_override(struct device *kdev,
bool enable)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned long cookie;
if (DISPLAY_VER(i915) < 9)
@@ -1052,7 +1056,8 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
/* Get CDCLK in kHz */
static int i915_audio_component_get_cdclk_freq(struct device *kdev)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (drm_WARN_ON_ONCE(&i915->drm, !HAS_DDI(i915)))
return -ENODEV;
@@ -1111,7 +1116,8 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
int cpu_transcoder, int rate)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = i915->display.audio.component;
const struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
@@ -1153,7 +1159,8 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
int cpu_transcoder, bool *enabled,
unsigned char *buf, int max_bytes)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_audio_state *audio_state;
int ret = 0;
@@ -1188,24 +1195,25 @@ static const struct drm_audio_component_ops i915_audio_component_ops = {
.get_eld = i915_audio_component_get_eld,
};
-static int i915_audio_component_bind(struct device *i915_kdev,
+static int i915_audio_component_bind(struct device *drv_kdev,
struct device *hda_kdev, void *data)
{
+ struct intel_display *display = to_intel_display(drv_kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = data;
- struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
int i;
if (drm_WARN_ON(&i915->drm, acomp->base.ops || acomp->base.dev))
return -EEXIST;
if (drm_WARN_ON(&i915->drm,
- !device_link_add(hda_kdev, i915_kdev,
+ !device_link_add(hda_kdev, drv_kdev,
DL_FLAG_STATELESS)))
return -ENOMEM;
drm_modeset_lock_all(&i915->drm);
acomp->base.ops = &i915_audio_component_ops;
- acomp->base.dev = i915_kdev;
+ acomp->base.dev = drv_kdev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
@@ -1215,11 +1223,12 @@ static int i915_audio_component_bind(struct device *i915_kdev,
return 0;
}
-static void i915_audio_component_unbind(struct device *i915_kdev,
+static void i915_audio_component_unbind(struct device *drv_kdev,
struct device *hda_kdev, void *data)
{
+ struct intel_display *display = to_intel_display(drv_kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = data;
- struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
drm_modeset_lock_all(&i915->drm);
acomp->base.ops = NULL;
@@ -1227,7 +1236,7 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
i915->display.audio.component = NULL;
drm_modeset_unlock_all(&i915->drm);
- device_link_remove(hda_kdev, i915_kdev);
+ device_link_remove(hda_kdev, drv_kdev);
if (i915->display.audio.power_refcount)
drm_err(&i915->drm, "audio power refcount %d after unbind\n",
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 6c3333136737..9e05745d797d 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -455,7 +455,7 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
mutex_lock(&i915->display.backlight.lock);
if (panel->backlight.device)
- panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
+ panel->backlight.device->props.power = BACKLIGHT_POWER_OFF;
panel->backlight.enabled = false;
panel->backlight.funcs->disable(old_conn_state, 0);
@@ -773,7 +773,7 @@ static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level);
panel->backlight.enabled = true;
if (panel->backlight.device)
- panel->backlight.device->props.power = FB_BLANK_UNBLANK;
+ panel->backlight.device->props.power = BACKLIGHT_POWER_ON;
}
void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
@@ -870,12 +870,12 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
*/
if (panel->backlight.enabled) {
if (panel->backlight.power) {
- bool enable = bd->props.power == FB_BLANK_UNBLANK &&
+ bool enable = bd->props.power == BACKLIGHT_POWER_ON &&
bd->props.brightness != 0;
panel->backlight.power(connector, enable);
}
} else {
- bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.power = BACKLIGHT_POWER_OFF;
}
drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
@@ -945,9 +945,9 @@ int intel_backlight_device_register(struct intel_connector *connector)
props.max_brightness);
if (panel->backlight.enabled)
- props.power = FB_BLANK_UNBLANK;
+ props.power = BACKLIGHT_POWER_ON;
else
- props.power = FB_BLANK_POWERDOWN;
+ props.power = BACKLIGHT_POWER_OFF;
name = kstrdup_const("intel_backlight", GFP_KERNEL);
if (!name)
@@ -1011,7 +1011,7 @@ static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(i915)->rawclk_freq),
+ return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq),
pwm_freq_hz);
}
@@ -1073,7 +1073,7 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(i915)->rawclk_freq),
+ return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq),
pwm_freq_hz * 128);
}
@@ -1091,7 +1091,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock;
if (IS_PINEVIEW(i915))
- clock = KHz(RUNTIME_INFO(i915)->rawclk_freq);
+ clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq);
else
clock = KHz(i915->display.cdclk.hw.cdclk);
@@ -1109,7 +1109,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock;
if (IS_G4X(i915))
- clock = KHz(RUNTIME_INFO(i915)->rawclk_freq);
+ clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq);
else
clock = KHz(i915->display.cdclk.hw.cdclk);
@@ -1133,7 +1133,7 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
clock = MHz(25);
mul = 16;
} else {
- clock = KHz(RUNTIME_INFO(i915)->rawclk_freq);
+ clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq);
mul = 128;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index ec1e3a380360..bed485374ab0 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -30,6 +30,7 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -65,15 +66,15 @@
/* Wrapper for VBT child device config */
struct intel_bios_encoder_data {
- struct drm_i915_private *i915;
+ struct intel_display *display;
struct child_device_config child;
struct dsc_compression_parameters_entry *dsc;
struct list_head node;
};
-#define SLAVE_ADDR1 0x70
-#define SLAVE_ADDR2 0x72
+#define TARGET_ADDR1 0x70
+#define TARGET_ADDR2 0x72
/* Get BDB block size given a pointer to Block ID. */
static u32 _get_blocksize(const u8 *block_base)
@@ -144,12 +145,12 @@ struct bdb_block_entry {
};
static const void *
-bdb_find_section(struct drm_i915_private *i915,
+bdb_find_section(struct intel_display *display,
enum bdb_block_id section_id)
{
struct bdb_block_entry *entry;
- list_for_each_entry(entry, &i915->display.vbt.bdb_blocks, node) {
+ list_for_each_entry(entry, &display->vbt.bdb_blocks, node) {
if (entry->section_id == section_id)
return entry->data + 3;
}
@@ -199,12 +200,12 @@ static const struct {
.min_size = sizeof(struct bdb_generic_dtd), },
};
-static size_t lfp_data_min_size(struct drm_i915_private *i915)
+static size_t lfp_data_min_size(struct intel_display *display)
{
const struct bdb_lfp_data_ptrs *ptrs;
size_t size;
- ptrs = bdb_find_section(i915, BDB_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(display, BDB_LFP_DATA_PTRS);
if (!ptrs)
return 0;
@@ -359,7 +360,7 @@ static void next_lfp_data_ptr(struct lfp_data_ptr_table *next,
next->offset = prev->offset + size;
}
-static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
+static void *generate_lfp_data_ptrs(struct intel_display *display,
const void *bdb)
{
int i, size, table_size, block_size, offset, fp_timing_size;
@@ -373,7 +374,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
* include block 41 and thus we don't need to
* generate one.
*/
- if (i915->display.vbt.version < 155)
+ if (display->vbt.version < 155)
return NULL;
fp_timing_size = 38;
@@ -382,7 +383,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
if (!block)
return NULL;
- drm_dbg_kms(&i915->drm, "Generating LFP data table pointers\n");
+ drm_dbg_kms(display->drm, "Generating LFP data table pointers\n");
block_size = get_blocksize(block);
@@ -450,7 +451,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
}
static void
-init_bdb_block(struct drm_i915_private *i915,
+init_bdb_block(struct intel_display *display,
const void *bdb, enum bdb_block_id section_id,
size_t min_size)
{
@@ -463,14 +464,14 @@ init_bdb_block(struct drm_i915_private *i915,
/* Modern VBTs lack the LFP data table pointers block, make one up */
if (!block && section_id == BDB_LFP_DATA_PTRS) {
- temp_block = generate_lfp_data_ptrs(i915, bdb);
+ temp_block = generate_lfp_data_ptrs(display, bdb);
if (temp_block)
block = temp_block + 3;
}
if (!block)
return;
- drm_WARN(&i915->drm, min_size == 0,
+ drm_WARN(display->drm, min_size == 0,
"Block %d min_size is zero\n", section_id);
block_size = get_blocksize(block);
@@ -494,20 +495,22 @@ init_bdb_block(struct drm_i915_private *i915,
kfree(temp_block);
- drm_dbg_kms(&i915->drm, "Found BDB block %d (size %zu, min size %zu)\n",
+ drm_dbg_kms(display->drm,
+ "Found BDB block %d (size %zu, min size %zu)\n",
section_id, block_size, min_size);
if (section_id == BDB_LFP_DATA_PTRS &&
!fixup_lfp_data_ptrs(bdb, entry->data + 3)) {
- drm_err(&i915->drm, "VBT has malformed LFP data table pointers\n");
+ drm_err(display->drm,
+ "VBT has malformed LFP data table pointers\n");
kfree(entry);
return;
}
- list_add_tail(&entry->node, &i915->display.vbt.bdb_blocks);
+ list_add_tail(&entry->node, &display->vbt.bdb_blocks);
}
-static void init_bdb_blocks(struct drm_i915_private *i915,
+static void init_bdb_blocks(struct intel_display *display,
const void *bdb)
{
int i;
@@ -517,14 +520,14 @@ static void init_bdb_blocks(struct drm_i915_private *i915,
size_t min_size = bdb_blocks[i].min_size;
if (section_id == BDB_LFP_DATA)
- min_size = lfp_data_min_size(i915);
+ min_size = lfp_data_min_size(display);
- init_bdb_block(i915, bdb, section_id, min_size);
+ init_bdb_block(display, bdb, section_id, min_size);
}
}
static void
-fill_detail_timing_data(struct drm_i915_private *i915,
+fill_detail_timing_data(struct intel_display *display,
struct drm_display_mode *panel_fixed_mode,
const struct bdb_edid_dtd *dvo_timing)
{
@@ -567,12 +570,12 @@ fill_detail_timing_data(struct drm_i915_private *i915,
/* Some VBTs have bogus h/vsync_end values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) {
- drm_dbg_kms(&i915->drm, "reducing hsync_end %d->%d\n",
+ drm_dbg_kms(display->drm, "reducing hsync_end %d->%d\n",
panel_fixed_mode->hsync_end, panel_fixed_mode->htotal);
panel_fixed_mode->hsync_end = panel_fixed_mode->htotal;
}
if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) {
- drm_dbg_kms(&i915->drm, "reducing vsync_end %d->%d\n",
+ drm_dbg_kms(display->drm, "reducing vsync_end %d->%d\n",
panel_fixed_mode->vsync_end, panel_fixed_mode->vtotal);
panel_fixed_mode->vsync_end = panel_fixed_mode->vtotal;
}
@@ -617,26 +620,26 @@ get_lfp_data_tail(const struct bdb_lfp_data *data,
return NULL;
}
-static int opregion_get_panel_type(struct drm_i915_private *i915,
+static int opregion_get_panel_type(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback)
{
- return intel_opregion_get_panel_type(i915);
+ return intel_opregion_get_panel_type(display);
}
-static int vbt_get_panel_type(struct drm_i915_private *i915,
+static int vbt_get_panel_type(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback)
{
const struct bdb_lfp_options *lfp_options;
- lfp_options = bdb_find_section(i915, BDB_LFP_OPTIONS);
+ lfp_options = bdb_find_section(display, BDB_LFP_OPTIONS);
if (!lfp_options)
return -1;
if (lfp_options->panel_type > 0xf &&
lfp_options->panel_type != 0xff) {
- drm_dbg_kms(&i915->drm, "Invalid VBT panel type 0x%x\n",
+ drm_dbg_kms(display->drm, "Invalid VBT panel type 0x%x\n",
lfp_options->panel_type);
return -1;
}
@@ -644,12 +647,13 @@ static int vbt_get_panel_type(struct drm_i915_private *i915,
if (devdata && devdata->child.handle == DEVICE_HANDLE_LFP2)
return lfp_options->panel_type2;
- drm_WARN_ON(&i915->drm, devdata && devdata->child.handle != DEVICE_HANDLE_LFP1);
+ drm_WARN_ON(display->drm,
+ devdata && devdata->child.handle != DEVICE_HANDLE_LFP1);
return lfp_options->panel_type;
}
-static int pnpid_get_panel_type(struct drm_i915_private *i915,
+static int pnpid_get_panel_type(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback)
{
@@ -668,14 +672,14 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
product_id_nodate.week_of_manufacture = 0;
product_id_nodate.year_of_manufacture = 0;
- p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "EDID");
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, "EDID");
drm_edid_print_product_id(&p, &product_id, true);
- ptrs = bdb_find_section(i915, BDB_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(display, BDB_LFP_DATA_PTRS);
if (!ptrs)
return -1;
- data = bdb_find_section(i915, BDB_LFP_DATA);
+ data = bdb_find_section(display, BDB_LFP_DATA);
if (!data)
return -1;
@@ -699,7 +703,7 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
return best;
}
-static int fallback_get_panel_type(struct drm_i915_private *i915,
+static int fallback_get_panel_type(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback)
{
@@ -713,13 +717,13 @@ enum panel_type {
PANEL_TYPE_FALLBACK,
};
-static int get_panel_type(struct drm_i915_private *i915,
+static int get_panel_type(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback)
{
struct {
const char *name;
- int (*get_panel_type)(struct drm_i915_private *i915,
+ int (*get_panel_type)(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback);
int panel_type;
@@ -744,14 +748,14 @@ static int get_panel_type(struct drm_i915_private *i915,
int i;
for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
- panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata,
+ panel_types[i].panel_type = panel_types[i].get_panel_type(display, devdata,
drm_edid, use_fallback);
- drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf &&
+ drm_WARN_ON(display->drm, panel_types[i].panel_type > 0xf &&
panel_types[i].panel_type != 0xff);
if (panel_types[i].panel_type >= 0)
- drm_dbg_kms(&i915->drm, "Panel type (%s): %d\n",
+ drm_dbg_kms(display->drm, "Panel type (%s): %d\n",
panel_types[i].name, panel_types[i].panel_type);
}
@@ -766,7 +770,7 @@ static int get_panel_type(struct drm_i915_private *i915,
else
i = PANEL_TYPE_FALLBACK;
- drm_dbg_kms(&i915->drm, "Selected panel type (%s): %d\n",
+ drm_dbg_kms(display->drm, "Selected panel type (%s): %d\n",
panel_types[i].name, panel_types[i].panel_type);
return panel_types[i].panel_type;
@@ -784,14 +788,14 @@ static bool panel_bool(unsigned int value, int panel_type)
/* Parse general panel options */
static void
-parse_panel_options(struct drm_i915_private *i915,
+parse_panel_options(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_lfp_options *lfp_options;
int panel_type = panel->vbt.panel_type;
int drrs_mode;
- lfp_options = bdb_find_section(i915, BDB_LFP_OPTIONS);
+ lfp_options = bdb_find_section(display, BDB_LFP_OPTIONS);
if (!lfp_options)
return;
@@ -815,23 +819,23 @@ parse_panel_options(struct drm_i915_private *i915,
switch (drrs_mode) {
case 0:
panel->vbt.drrs_type = DRRS_TYPE_STATIC;
- drm_dbg_kms(&i915->drm, "DRRS supported mode is static\n");
+ drm_dbg_kms(display->drm, "DRRS supported mode is static\n");
break;
case 2:
panel->vbt.drrs_type = DRRS_TYPE_SEAMLESS;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"DRRS supported mode is seamless\n");
break;
default:
panel->vbt.drrs_type = DRRS_TYPE_NONE;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"DRRS not supported (VBT input)\n");
break;
}
}
static void
-parse_lfp_panel_dtd(struct drm_i915_private *i915,
+parse_lfp_panel_dtd(struct intel_display *display,
struct intel_panel *panel,
const struct bdb_lfp_data *lfp_data,
const struct bdb_lfp_data_ptrs *lfp_data_ptrs)
@@ -849,11 +853,11 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915,
if (!panel_fixed_mode)
return;
- fill_detail_timing_data(i915, panel_fixed_mode, panel_dvo_timing);
+ fill_detail_timing_data(display, panel_fixed_mode, panel_dvo_timing);
panel->vbt.lfp_vbt_mode = panel_fixed_mode;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Found panel mode in BIOS VBT legacy lfp table: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(panel_fixed_mode));
@@ -865,14 +869,14 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915,
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
fp_timing->y_res == panel_fixed_mode->vdisplay) {
panel->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT initial LVDS value %x\n",
panel->vbt.bios_lvds_val);
}
}
static void
-parse_lfp_data(struct drm_i915_private *i915,
+parse_lfp_data(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_lfp_data *data;
@@ -882,41 +886,41 @@ parse_lfp_data(struct drm_i915_private *i915,
struct drm_printer p;
int panel_type = panel->vbt.panel_type;
- ptrs = bdb_find_section(i915, BDB_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(display, BDB_LFP_DATA_PTRS);
if (!ptrs)
return;
- data = bdb_find_section(i915, BDB_LFP_DATA);
+ data = bdb_find_section(display, BDB_LFP_DATA);
if (!data)
return;
if (!panel->vbt.lfp_vbt_mode)
- parse_lfp_panel_dtd(i915, panel, data, ptrs);
+ parse_lfp_panel_dtd(display, panel, data, ptrs);
pnp_id = get_lfp_pnp_id(data, ptrs, panel_type);
- p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "Panel");
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, "Panel");
drm_edid_print_product_id(&p, pnp_id, false);
tail = get_lfp_data_tail(data, ptrs);
if (!tail)
return;
- drm_dbg_kms(&i915->drm, "Panel name: %.*s\n",
+ drm_dbg_kms(display->drm, "Panel name: %.*s\n",
(int)sizeof(tail->panel_name[0].name),
tail->panel_name[panel_type].name);
- if (i915->display.vbt.version >= 188) {
+ if (display->vbt.version >= 188) {
panel->vbt.seamless_drrs_min_refresh_rate =
tail->seamless_drrs_min_refresh_rate[panel_type];
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Seamless DRRS min refresh rate: %d Hz\n",
panel->vbt.seamless_drrs_min_refresh_rate);
}
}
static void
-parse_generic_dtd(struct drm_i915_private *i915,
+parse_generic_dtd(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_generic_dtd *generic_dtd;
@@ -932,20 +936,20 @@ parse_generic_dtd(struct drm_i915_private *i915,
* first on VBT >= 229, but still fall back to trying the old LFP
* block if that fails.
*/
- if (i915->display.vbt.version < 229)
+ if (display->vbt.version < 229)
return;
- generic_dtd = bdb_find_section(i915, BDB_GENERIC_DTD);
+ generic_dtd = bdb_find_section(display, BDB_GENERIC_DTD);
if (!generic_dtd)
return;
if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) {
- drm_err(&i915->drm, "GDTD size %u is too small.\n",
+ drm_err(display->drm, "GDTD size %u is too small.\n",
generic_dtd->gdtd_size);
return;
} else if (generic_dtd->gdtd_size !=
sizeof(struct generic_dtd_entry)) {
- drm_err(&i915->drm, "Unexpected GDTD size %u\n",
+ drm_err(display->drm, "Unexpected GDTD size %u\n",
generic_dtd->gdtd_size);
/* DTD has unknown fields, but keep going */
}
@@ -953,7 +957,7 @@ parse_generic_dtd(struct drm_i915_private *i915,
num_dtd = (get_blocksize(generic_dtd) -
sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
if (panel->vbt.panel_type >= num_dtd) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Panel type %d not found in table of %d DTD's\n",
panel->vbt.panel_type, num_dtd);
return;
@@ -998,7 +1002,7 @@ parse_generic_dtd(struct drm_i915_private *i915,
else
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Found panel mode in BIOS VBT generic dtd table: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(panel_fixed_mode));
@@ -1006,7 +1010,7 @@ parse_generic_dtd(struct drm_i915_private *i915,
}
static void
-parse_lfp_backlight(struct drm_i915_private *i915,
+parse_lfp_backlight(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_lfp_backlight *backlight_data;
@@ -1014,12 +1018,12 @@ parse_lfp_backlight(struct drm_i915_private *i915,
int panel_type = panel->vbt.panel_type;
u16 level;
- backlight_data = bdb_find_section(i915, BDB_LFP_BACKLIGHT);
+ backlight_data = bdb_find_section(display, BDB_LFP_BACKLIGHT);
if (!backlight_data)
return;
if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unsupported backlight data entry size %u\n",
backlight_data->entry_size);
return;
@@ -1029,7 +1033,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
if (!panel->vbt.backlight.present) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PWM backlight not present in VBT (type %u)\n",
entry->type);
return;
@@ -1037,7 +1041,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
panel->vbt.backlight.controller = 0;
- if (i915->display.vbt.version >= 191) {
+ if (display->vbt.version >= 191) {
const struct lfp_backlight_control_method *method;
method = &backlight_data->backlight_control[panel_type];
@@ -1048,14 +1052,14 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
panel->vbt.backlight.active_low_pwm = entry->active_low_pwm;
- if (i915->display.vbt.version >= 234) {
+ if (display->vbt.version >= 234) {
u16 min_level;
bool scale;
level = backlight_data->brightness_level[panel_type].level;
min_level = backlight_data->brightness_min_level[panel_type].level;
- if (i915->display.vbt.version >= 236)
+ if (display->vbt.version >= 236)
scale = backlight_data->brightness_precision_bits[panel_type] == 16;
else
scale = level > 255;
@@ -1064,7 +1068,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
min_level = min_level / 255;
if (min_level > 255) {
- drm_warn(&i915->drm, "Brightness min level > 255\n");
+ drm_warn(display->drm, "Brightness min level > 255\n");
level = 255;
}
panel->vbt.backlight.min_brightness = min_level;
@@ -1076,13 +1080,13 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.min_brightness = entry->min_brightness;
}
- if (i915->display.vbt.version >= 239)
+ if (display->vbt.version >= 239)
panel->vbt.backlight.hdr_dpcd_refresh_timeout =
DIV_ROUND_UP(backlight_data->hdr_dpcd_refresh_timeout[panel_type], 100);
else
panel->vbt.backlight.hdr_dpcd_refresh_timeout = 30;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u, controller %u\n",
panel->vbt.backlight.pwm_freq_hz,
@@ -1093,16 +1097,16 @@ parse_lfp_backlight(struct drm_i915_private *i915,
}
static void
-parse_sdvo_lvds_data(struct drm_i915_private *i915,
+parse_sdvo_lvds_data(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_sdvo_lvds_dtd *dtd;
struct drm_display_mode *panel_fixed_mode;
int index;
- index = i915->display.params.vbt_sdvo_panel_type;
+ index = display->params.vbt_sdvo_panel_type;
if (index == -2) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Ignore SDVO LVDS mode from BIOS VBT tables.\n");
return;
}
@@ -1110,14 +1114,14 @@ parse_sdvo_lvds_data(struct drm_i915_private *i915,
if (index == -1) {
const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
- sdvo_lvds_options = bdb_find_section(i915, BDB_SDVO_LVDS_OPTIONS);
+ sdvo_lvds_options = bdb_find_section(display, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
index = sdvo_lvds_options->panel_type;
}
- dtd = bdb_find_section(i915, BDB_SDVO_LVDS_DTD);
+ dtd = bdb_find_section(display, BDB_SDVO_LVDS_DTD);
if (!dtd)
return;
@@ -1128,7 +1132,8 @@ parse_sdvo_lvds_data(struct drm_i915_private *i915,
* it here to be sure.
*/
if (index >= ARRAY_SIZE(dtd->dtd)) {
- drm_err(&i915->drm, "index %d is larger than dtd->dtd[4] array\n",
+ drm_err(display->drm,
+ "index %d is larger than dtd->dtd[4] array\n",
index);
return;
}
@@ -1137,19 +1142,19 @@ parse_sdvo_lvds_data(struct drm_i915_private *i915,
if (!panel_fixed_mode)
return;
- fill_detail_timing_data(i915, panel_fixed_mode, &dtd->dtd[index]);
+ fill_detail_timing_data(display, panel_fixed_mode, &dtd->dtd[index]);
panel->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Found SDVO LVDS mode in BIOS VBT tables: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(panel_fixed_mode));
}
-static int intel_bios_ssc_frequency(struct drm_i915_private *i915,
+static int intel_bios_ssc_frequency(struct intel_display *display,
bool alternate)
{
- switch (DISPLAY_VER(i915)) {
+ switch (DISPLAY_VER(display)) {
case 2:
return alternate ? 66667 : 48000;
case 3:
@@ -1161,45 +1166,46 @@ static int intel_bios_ssc_frequency(struct drm_i915_private *i915,
}
static void
-parse_general_features(struct drm_i915_private *i915)
+parse_general_features(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct bdb_general_features *general;
- general = bdb_find_section(i915, BDB_GENERAL_FEATURES);
+ general = bdb_find_section(display, BDB_GENERAL_FEATURES);
if (!general)
return;
- i915->display.vbt.int_tv_support = general->int_tv_support;
+ display->vbt.int_tv_support = general->int_tv_support;
/* int_crt_support can't be trusted on earlier platforms */
- if (i915->display.vbt.version >= 155 &&
- (HAS_DDI(i915) || IS_VALLEYVIEW(i915)))
- i915->display.vbt.int_crt_support = general->int_crt_support;
- i915->display.vbt.lvds_use_ssc = general->enable_ssc;
- i915->display.vbt.lvds_ssc_freq =
- intel_bios_ssc_frequency(i915, general->ssc_freq);
- i915->display.vbt.display_clock_mode = general->display_clock_mode;
- i915->display.vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
- if (i915->display.vbt.version >= 181) {
- i915->display.vbt.orientation = general->rotate_180 ?
+ if (display->vbt.version >= 155 &&
+ (HAS_DDI(display) || IS_VALLEYVIEW(i915)))
+ display->vbt.int_crt_support = general->int_crt_support;
+ display->vbt.lvds_use_ssc = general->enable_ssc;
+ display->vbt.lvds_ssc_freq =
+ intel_bios_ssc_frequency(display, general->ssc_freq);
+ display->vbt.display_clock_mode = general->display_clock_mode;
+ display->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ if (display->vbt.version >= 181) {
+ display->vbt.orientation = general->rotate_180 ?
DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
DRM_MODE_PANEL_ORIENTATION_NORMAL;
} else {
- i915->display.vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ display->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
- if (i915->display.vbt.version >= 249 && general->afc_startup_config) {
- i915->display.vbt.override_afc_startup = true;
- i915->display.vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7;
+ if (display->vbt.version >= 249 && general->afc_startup_config) {
+ display->vbt.override_afc_startup = true;
+ display->vbt.override_afc_startup_val = general->afc_startup_config == 1 ? 0 : 7;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
- i915->display.vbt.int_tv_support,
- i915->display.vbt.int_crt_support,
- i915->display.vbt.lvds_use_ssc,
- i915->display.vbt.lvds_ssc_freq,
- i915->display.vbt.display_clock_mode,
- i915->display.vbt.fdi_rx_polarity_inverted);
+ display->vbt.int_tv_support,
+ display->vbt.int_crt_support,
+ display->vbt.lvds_use_ssc,
+ display->vbt.lvds_ssc_freq,
+ display->vbt.display_clock_mode,
+ display->vbt.fdi_rx_polarity_inverted);
}
static const struct child_device_config *
@@ -1209,7 +1215,7 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i)
}
static void
-parse_sdvo_device_mapping(struct drm_i915_private *i915)
+parse_sdvo_device_mapping(struct intel_display *display)
{
const struct intel_bios_encoder_data *devdata;
int count = 0;
@@ -1218,19 +1224,19 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
* Only parse SDVO mappings on gens that could have SDVO. This isn't
* accurate and doesn't have to be, as long as it's not too strict.
*/
- if (!IS_DISPLAY_VER(i915, 3, 7)) {
- drm_dbg_kms(&i915->drm, "Skipping SDVO device mapping\n");
+ if (!IS_DISPLAY_VER(display, 3, 7)) {
+ drm_dbg_kms(display->drm, "Skipping SDVO device mapping\n");
return;
}
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
struct sdvo_device_mapping *mapping;
- if (child->slave_addr != SLAVE_ADDR1 &&
- child->slave_addr != SLAVE_ADDR2) {
+ if (child->target_addr != TARGET_ADDR1 &&
+ child->target_addr != TARGET_ADDR2) {
/*
- * If the slave address is neither 0x70 nor 0x72,
+ * If the target address is neither 0x70 nor 0x72,
* it is not a SDVO device. Skip it.
*/
continue;
@@ -1238,39 +1244,39 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
if (child->dvo_port != DEVICE_PORT_DVOB &&
child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Incorrect SDVO port. Skip it\n");
continue;
}
- drm_dbg_kms(&i915->drm,
- "the SDVO device with slave addr %2x is found on"
+ drm_dbg_kms(display->drm,
+ "the SDVO device with target addr %2x is found on"
" %s port\n",
- child->slave_addr,
+ child->target_addr,
(child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
- mapping = &i915->display.vbt.sdvo_mappings[child->dvo_port - 1];
+ mapping = &display->vbt.sdvo_mappings[child->dvo_port - 1];
if (!mapping->initialized) {
mapping->dvo_port = child->dvo_port;
- mapping->slave_addr = child->slave_addr;
+ mapping->target_addr = child->target_addr;
mapping->dvo_wiring = child->dvo_wiring;
mapping->ddc_pin = child->ddc_pin;
mapping->i2c_pin = child->i2c_pin;
mapping->initialized = 1;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
- mapping->dvo_port, mapping->slave_addr,
+ mapping->dvo_port, mapping->target_addr,
mapping->dvo_wiring, mapping->ddc_pin,
mapping->i2c_pin);
} else {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
- if (child->slave2_addr) {
+ if (child->target2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
- drm_dbg_kms(&i915->drm,
- "there exists the slave2_addr. Maybe this"
+ drm_dbg_kms(display->drm,
+ "there exists the target2_addr. Maybe this"
" is a SDVO device with multiple inputs.\n");
}
count++;
@@ -1278,28 +1284,28 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
if (!count) {
/* No SDVO device info is found */
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"No SDVO device info is found in VBT\n");
}
}
static void
-parse_driver_features(struct drm_i915_private *i915)
+parse_driver_features(struct intel_display *display)
{
const struct bdb_driver_features *driver;
- driver = bdb_find_section(i915, BDB_DRIVER_FEATURES);
+ driver = bdb_find_section(display, BDB_DRIVER_FEATURES);
if (!driver)
return;
- if (DISPLAY_VER(i915) >= 5) {
+ if (DISPLAY_VER(display) >= 5) {
/*
* Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
* to mean "eDP". The VBT spec doesn't agree with that
* interpretation, but real world VBTs seem to.
*/
if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
- i915->display.vbt.int_lvds_support = 0;
+ display->vbt.int_lvds_support = 0;
} else {
/*
* FIXME it's not clear which BDB version has the LVDS config
@@ -1312,25 +1318,25 @@ parse_driver_features(struct drm_i915_private *i915)
* in the wild with the bits correctly populated. Version
* 108 (on i85x) does not have the bits correctly populated.
*/
- if (i915->display.vbt.version >= 134 &&
+ if (display->vbt.version >= 134 &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
- i915->display.vbt.int_lvds_support = 0;
+ display->vbt.int_lvds_support = 0;
}
}
static void
-parse_panel_driver_features(struct drm_i915_private *i915,
+parse_panel_driver_features(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_driver_features *driver;
- driver = bdb_find_section(i915, BDB_DRIVER_FEATURES);
+ driver = bdb_find_section(display, BDB_DRIVER_FEATURES);
if (!driver)
return;
- if (i915->display.vbt.version < 228) {
- drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n",
+ if (display->vbt.version < 228) {
+ drm_dbg_kms(display->drm, "DRRS State Enabled:%d\n",
driver->drrs_enabled);
/*
* If DRRS is not supported, drrs_type has to be set to 0.
@@ -1354,7 +1360,7 @@ parse_panel_driver_features(struct drm_i915_private *i915,
}
static void
-parse_power_conservation_features(struct drm_i915_private *i915,
+parse_power_conservation_features(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_lfp_power *power;
@@ -1362,10 +1368,10 @@ parse_power_conservation_features(struct drm_i915_private *i915,
panel->vbt.vrr = true; /* matches Windows behaviour */
- if (i915->display.vbt.version < 228)
+ if (display->vbt.version < 228)
return;
- power = bdb_find_section(i915, BDB_LFP_POWER);
+ power = bdb_find_section(display, BDB_LFP_POWER);
if (!power)
return;
@@ -1388,16 +1394,16 @@ parse_power_conservation_features(struct drm_i915_private *i915,
panel->vbt.drrs_type = DRRS_TYPE_NONE;
}
- if (i915->display.vbt.version >= 232)
+ if (display->vbt.version >= 232)
panel->vbt.edp.hobl = panel_bool(power->hobl, panel_type);
- if (i915->display.vbt.version >= 233)
+ if (display->vbt.version >= 233)
panel->vbt.vrr = panel_bool(power->vrr_feature_enabled,
panel_type);
}
static void
-parse_edp(struct drm_i915_private *i915,
+parse_edp(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_edp *edp;
@@ -1405,7 +1411,7 @@ parse_edp(struct drm_i915_private *i915,
const struct edp_fast_link_params *edp_link_params;
int panel_type = panel->vbt.panel_type;
- edp = bdb_find_section(i915, BDB_EDP);
+ edp = bdb_find_section(display, BDB_EDP);
if (!edp)
return;
@@ -1427,7 +1433,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.pps = *edp_pps;
- if (i915->display.vbt.version >= 224) {
+ if (display->vbt.version >= 224) {
panel->vbt.edp.rate =
edp->edp_fast_link_training_rate[panel_type] * 20;
} else {
@@ -1442,7 +1448,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.rate = 540000;
break;
default:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT has unknown eDP link rate value %u\n",
edp_link_params->rate);
break;
@@ -1460,7 +1466,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.lanes = 4;
break;
default:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT has unknown eDP lane count value %u\n",
edp_link_params->lanes);
break;
@@ -1480,7 +1486,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break;
default:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT has unknown eDP pre-emphasis value %u\n",
edp_link_params->preemphasis);
break;
@@ -1500,19 +1506,19 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break;
default:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT has unknown eDP voltage swing value %u\n",
edp_link_params->vswing);
break;
}
- if (i915->display.vbt.version >= 173) {
+ if (display->vbt.version >= 173) {
u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
- if (i915->display.params.edp_vswing) {
+ if (display->params.edp_vswing) {
panel->vbt.edp.low_vswing =
- i915->display.params.edp_vswing == 1;
+ display->params.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
panel->vbt.edp.low_vswing = vswing == 0;
@@ -1522,26 +1528,27 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.drrs_msa_timing_delay =
panel_bits(edp->sdrrs_msa_timing_delay, panel_type, 2);
- if (i915->display.vbt.version >= 244)
+ if (display->vbt.version >= 244)
panel->vbt.edp.max_link_rate =
edp->edp_max_port_link_rate[panel_type] * 20;
- if (i915->display.vbt.version >= 251)
+ if (display->vbt.version >= 251)
panel->vbt.edp.dsc_disable =
panel_bool(edp->edp_dsc_disable, panel_type);
}
static void
-parse_psr(struct drm_i915_private *i915,
+parse_psr(struct intel_display *display,
struct intel_panel *panel)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct bdb_psr *psr;
const struct psr_table *psr_table;
int panel_type = panel->vbt.panel_type;
- psr = bdb_find_section(i915, BDB_PSR);
+ psr = bdb_find_section(display, BDB_PSR);
if (!psr) {
- drm_dbg_kms(&i915->drm, "No PSR BDB found.\n");
+ drm_dbg_kms(display->drm, "No PSR BDB found.\n");
return;
}
@@ -1558,8 +1565,8 @@ parse_psr(struct drm_i915_private *i915,
* New psr options 0=500us, 1=100us, 2=2500us, 3=0us
* Old decimal value is wake up time in multiples of 100 us.
*/
- if (i915->display.vbt.version >= 205 &&
- (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) {
+ if (display->vbt.version >= 205 &&
+ (DISPLAY_VER(display) >= 9 && !IS_BROXTON(i915))) {
switch (psr_table->tp1_wakeup_time) {
case 0:
panel->vbt.psr.tp1_wakeup_time_us = 500;
@@ -1571,7 +1578,7 @@ parse_psr(struct drm_i915_private *i915,
panel->vbt.psr.tp1_wakeup_time_us = 0;
break;
default:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
psr_table->tp1_wakeup_time);
fallthrough;
@@ -1591,7 +1598,7 @@ parse_psr(struct drm_i915_private *i915,
panel->vbt.psr.tp2_tp3_wakeup_time_us = 0;
break;
default:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
psr_table->tp2_tp3_wakeup_time);
fallthrough;
@@ -1604,7 +1611,7 @@ parse_psr(struct drm_i915_private *i915,
panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
- if (i915->display.vbt.version >= 226) {
+ if (display->vbt.version >= 226) {
u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
wakeup_time = panel_bits(wakeup_time, panel_type, 2);
@@ -1630,13 +1637,13 @@ parse_psr(struct drm_i915_private *i915,
}
}
-static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
+static void parse_dsi_backlight_ports(struct intel_display *display,
struct intel_panel *panel,
enum port port)
{
- enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C;
+ enum port port_bc = DISPLAY_VER(display) >= 11 ? PORT_B : PORT_C;
- if (!panel->vbt.dsi.config->dual_link || i915->display.vbt.version < 197) {
+ if (!panel->vbt.dsi.config->dual_link || display->vbt.version < 197) {
panel->vbt.dsi.bl_ports = BIT(port);
if (panel->vbt.dsi.config->cabc_supported)
panel->vbt.dsi.cabc_ports = BIT(port);
@@ -1676,7 +1683,7 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
}
static void
-parse_mipi_config(struct drm_i915_private *i915,
+parse_mipi_config(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_mipi_config *start;
@@ -1686,27 +1693,19 @@ parse_mipi_config(struct drm_i915_private *i915,
enum port port;
/* parse MIPI blocks only if LFP type is MIPI */
- if (!intel_bios_is_dsi_present(i915, &port))
+ if (!intel_bios_is_dsi_present(display, &port))
return;
/* Initialize this to undefined indicating no generic MIPI support */
panel->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
- /* Block #40 is already parsed and panel_fixed_mode is
- * stored in i915->lfp_vbt_mode
- * resuse this when needed
- */
-
- /* Parse #52 for panel index used from panel_type already
- * parsed
- */
- start = bdb_find_section(i915, BDB_MIPI_CONFIG);
+ start = bdb_find_section(display, BDB_MIPI_CONFIG);
if (!start) {
- drm_dbg_kms(&i915->drm, "No MIPI config BDB found");
+ drm_dbg_kms(display->drm, "No MIPI config BDB found");
return;
}
- drm_dbg(&i915->drm, "Found MIPI Config block, panel index = %d\n",
+ drm_dbg(display->drm, "Found MIPI Config block, panel index = %d\n",
panel_type);
/*
@@ -1727,7 +1726,7 @@ parse_mipi_config(struct drm_i915_private *i915,
return;
}
- parse_dsi_backlight_ports(i915, panel, port);
+ parse_dsi_backlight_ports(display, panel, port);
/* FIXME is the 90 vs. 270 correct? */
switch (config->rotation) {
@@ -1759,7 +1758,7 @@ parse_mipi_config(struct drm_i915_private *i915,
/* Find the sequence block and size for the given panel. */
static const u8 *
-find_panel_sequence_block(struct drm_i915_private *i915,
+find_panel_sequence_block(struct intel_display *display,
const struct bdb_mipi_sequence *sequence,
u16 panel_id, u32 *seq_size)
{
@@ -1777,7 +1776,8 @@ find_panel_sequence_block(struct drm_i915_private *i915,
for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
if (index + header_size > total) {
- drm_err(&i915->drm, "Invalid sequence block (header)\n");
+ drm_err(display->drm,
+ "Invalid sequence block (header)\n");
return NULL;
}
@@ -1790,7 +1790,7 @@ find_panel_sequence_block(struct drm_i915_private *i915,
index += header_size;
if (index + current_size > total) {
- drm_err(&i915->drm, "Invalid sequence block\n");
+ drm_err(display->drm, "Invalid sequence block\n");
return NULL;
}
@@ -1802,12 +1802,13 @@ find_panel_sequence_block(struct drm_i915_private *i915,
index += current_size;
}
- drm_err(&i915->drm, "Sequence block detected but no valid configuration\n");
+ drm_err(display->drm,
+ "Sequence block detected but no valid configuration\n");
return NULL;
}
-static int goto_next_sequence(struct drm_i915_private *i915,
+static int goto_next_sequence(struct intel_display *display,
const u8 *data, int index, int total)
{
u16 len;
@@ -1838,7 +1839,7 @@ static int goto_next_sequence(struct drm_i915_private *i915,
len = *(data + index + 6) + 7;
break;
default:
- drm_err(&i915->drm, "Unknown operation byte\n");
+ drm_err(display->drm, "Unknown operation byte\n");
return 0;
}
}
@@ -1846,7 +1847,7 @@ static int goto_next_sequence(struct drm_i915_private *i915,
return 0;
}
-static int goto_next_sequence_v3(struct drm_i915_private *i915,
+static int goto_next_sequence_v3(struct intel_display *display,
const u8 *data, int index, int total)
{
int seq_end;
@@ -1858,7 +1859,7 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915,
* checking on the structure.
*/
if (total < 5) {
- drm_err(&i915->drm, "Too small sequence size\n");
+ drm_err(display->drm, "Too small sequence size\n");
return 0;
}
@@ -1875,7 +1876,7 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915,
seq_end = index + size_of_sequence;
if (seq_end > total) {
- drm_err(&i915->drm, "Invalid sequence size\n");
+ drm_err(display->drm, "Invalid sequence size\n");
return 0;
}
@@ -1885,7 +1886,8 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915,
if (operation_byte == MIPI_SEQ_ELEM_END) {
if (index != seq_end) {
- drm_err(&i915->drm, "Invalid element structure\n");
+ drm_err(display->drm,
+ "Invalid element structure\n");
return 0;
}
return index;
@@ -1907,7 +1909,7 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915,
case MIPI_SEQ_ELEM_PMIC:
break;
default:
- drm_err(&i915->drm, "Unknown operation byte %u\n",
+ drm_err(display->drm, "Unknown operation byte %u\n",
operation_byte);
break;
}
@@ -1920,13 +1922,13 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915,
* Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
* skip all delay + gpio operands and stop at the first DSI packet op.
*/
-static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
+static int get_init_otp_deassert_fragment_len(struct intel_display *display,
struct intel_panel *panel)
{
const u8 *data = panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
int index, len;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
!data || panel->vbt.dsi.seq_version != 1))
return 0;
@@ -1955,7 +1957,7 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
* these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part.
*/
-static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
+static void vlv_fixup_mipi_sequences(struct intel_display *display,
struct intel_panel *panel)
{
u8 *init_otp;
@@ -1973,11 +1975,11 @@ static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
return;
/* The deassert-sequence ends at the first DSI packet */
- len = get_init_otp_deassert_fragment_len(i915, panel);
+ len = get_init_otp_deassert_fragment_len(display, panel);
if (!len)
return;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Using init OTP fragment to deassert reset\n");
/* Copy the fragment, update seq byte and terminate it */
@@ -2010,29 +2012,32 @@ static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
* or examine the contents of the sequences to
* avoid false positives?
*/
-static void icl_fixup_mipi_sequences(struct drm_i915_private *i915,
+static void icl_fixup_mipi_sequences(struct intel_display *display,
struct intel_panel *panel)
{
if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] &&
panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) {
- drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
+ drm_dbg_kms(display->drm,
+ "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP],
panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]);
}
}
-static void fixup_mipi_sequences(struct drm_i915_private *i915,
+static void fixup_mipi_sequences(struct intel_display *display,
struct intel_panel *panel)
{
- if (DISPLAY_VER(i915) >= 11)
- icl_fixup_mipi_sequences(i915, panel);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 11)
+ icl_fixup_mipi_sequences(display, panel);
else if (IS_VALLEYVIEW(i915))
- vlv_fixup_mipi_sequences(i915, panel);
+ vlv_fixup_mipi_sequences(display, panel);
}
static void
-parse_mipi_sequence(struct drm_i915_private *i915,
+parse_mipi_sequence(struct intel_display *display,
struct intel_panel *panel)
{
int panel_type = panel->vbt.panel_type;
@@ -2046,25 +2051,25 @@ parse_mipi_sequence(struct drm_i915_private *i915,
if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
return;
- sequence = bdb_find_section(i915, BDB_MIPI_SEQUENCE);
+ sequence = bdb_find_section(display, BDB_MIPI_SEQUENCE);
if (!sequence) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"No MIPI Sequence found, parsing complete\n");
return;
}
/* Fail gracefully for forward incompatible sequence block. */
if (sequence->version >= 4) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Unable to parse MIPI Sequence Block v%u\n",
sequence->version);
return;
}
- drm_dbg(&i915->drm, "Found MIPI sequence block v%u\n",
+ drm_dbg(display->drm, "Found MIPI sequence block v%u\n",
sequence->version);
- seq_data = find_panel_sequence_block(i915, sequence, panel_type, &seq_size);
+ seq_data = find_panel_sequence_block(display, sequence, panel_type, &seq_size);
if (!seq_data)
return;
@@ -2079,24 +2084,24 @@ parse_mipi_sequence(struct drm_i915_private *i915,
break;
if (seq_id >= MIPI_SEQ_MAX) {
- drm_err(&i915->drm, "Unknown sequence %u\n",
+ drm_err(display->drm, "Unknown sequence %u\n",
seq_id);
goto err;
}
/* Log about presence of sequences we won't run. */
if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unsupported sequence %u\n", seq_id);
panel->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
- index = goto_next_sequence_v3(i915, data, index, seq_size);
+ index = goto_next_sequence_v3(display, data, index, seq_size);
else
- index = goto_next_sequence(i915, data, index, seq_size);
+ index = goto_next_sequence(display, data, index, seq_size);
if (!index) {
- drm_err(&i915->drm, "Invalid sequence %u\n",
+ drm_err(display->drm, "Invalid sequence %u\n",
seq_id);
goto err;
}
@@ -2106,9 +2111,9 @@ parse_mipi_sequence(struct drm_i915_private *i915,
panel->vbt.dsi.size = seq_size;
panel->vbt.dsi.seq_version = sequence->version;
- fixup_mipi_sequences(i915, panel);
+ fixup_mipi_sequences(display, panel);
- drm_dbg(&i915->drm, "MIPI related VBT parsing complete\n");
+ drm_dbg(display->drm, "MIPI related VBT parsing complete\n");
return;
err:
@@ -2117,47 +2122,47 @@ err:
}
static void
-parse_compression_parameters(struct drm_i915_private *i915)
+parse_compression_parameters(struct intel_display *display)
{
const struct bdb_compression_parameters *params;
struct intel_bios_encoder_data *devdata;
u16 block_size;
int index;
- if (i915->display.vbt.version < 198)
+ if (display->vbt.version < 198)
return;
- params = bdb_find_section(i915, BDB_COMPRESSION_PARAMETERS);
+ params = bdb_find_section(display, BDB_COMPRESSION_PARAMETERS);
if (params) {
/* Sanity checks */
if (params->entry_size != sizeof(params->data[0])) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT: unsupported compression param entry size\n");
return;
}
block_size = get_blocksize(params);
if (block_size < sizeof(*params)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT: expected 16 compression param entries\n");
return;
}
}
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
if (!child->compression_enable)
continue;
if (!params) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT: compression params not available\n");
continue;
}
if (child->compression_method_cps) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT: CPS compression not supported\n");
continue;
}
@@ -2169,12 +2174,12 @@ parse_compression_parameters(struct drm_i915_private *i915)
}
}
-static u8 translate_iboost(struct drm_i915_private *i915, u8 val)
+static u8 translate_iboost(struct intel_display *display, u8 val)
{
static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
if (val >= ARRAY_SIZE(mapping)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
return 0;
}
@@ -2231,8 +2236,9 @@ static const u8 adlp_ddc_pin_map[] = {
[GMBUS_PIN_12_TC4_ICP] = ADLP_DDC_BUS_PORT_TC4,
};
-static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
+static u8 map_ddc_pin(struct intel_display *display, u8 vbt_pin)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const u8 *ddc_pin_map;
int i, n_entries;
@@ -2247,7 +2253,7 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
} else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
- } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(i915) == 9) {
+ } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(display) == 9) {
ddc_pin_map = gen9bc_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
} else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
@@ -2266,7 +2272,7 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
return i;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
vbt_pin);
return 0;
@@ -2324,9 +2330,10 @@ static enum port __dvo_port_to_port(int n_ports, int n_dvo,
return PORT_NONE;
}
-static enum port dvo_port_to_port(struct drm_i915_private *i915,
+static enum port dvo_port_to_port(struct intel_display *display,
u8 dvo_port)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
/*
* Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port.
@@ -2378,7 +2385,7 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
[PORT_TC4] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 },
};
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return __dvo_port_to_port(ARRAY_SIZE(xelpd_port_mapping),
ARRAY_SIZE(xelpd_port_mapping[0]),
xelpd_port_mapping,
@@ -2401,13 +2408,13 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
}
static enum port
-dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
+dsi_dvo_port_to_port(struct intel_display *display, u8 dvo_port)
{
switch (dvo_port) {
case DVO_PORT_MIPIA:
return PORT_A;
case DVO_PORT_MIPIC:
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return PORT_B;
else
return PORT_C;
@@ -2418,13 +2425,13 @@ dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = devdata->i915;
+ struct intel_display *display = devdata->display;
const struct child_device_config *child = &devdata->child;
enum port port;
- port = dvo_port_to_port(i915, child->dvo_port);
- if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
- port = dsi_dvo_port_to_port(i915, child->dvo_port);
+ port = dvo_port_to_port(display, child->dvo_port);
+ if (port == PORT_NONE && DISPLAY_VER(display) >= 11)
+ port = dsi_dvo_port_to_port(display, child->dvo_port);
return port;
}
@@ -2469,10 +2476,10 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->display.vbt.version < 216)
+ if (!devdata || devdata->display->vbt.version < 216)
return 0;
- if (devdata->i915->display.vbt.version >= 230)
+ if (devdata->display->vbt.version >= 230)
return parse_bdb_230_dp_max_link_rate(devdata->child.dp_max_link_rate);
else
return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate);
@@ -2480,7 +2487,7 @@ int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->display.vbt.version < 244)
+ if (!devdata || devdata->display->vbt.version < 244)
return 0;
return devdata->child.dp_max_lane_count + 1;
@@ -2489,10 +2496,10 @@ int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
enum port port)
{
- struct drm_i915_private *i915 = devdata->i915;
+ struct intel_display *display = devdata->display;
bool is_hdmi;
- if (port != PORT_A || DISPLAY_VER(i915) >= 12)
+ if (port != PORT_A || DISPLAY_VER(display) >= 12)
return;
if (!intel_bios_encoder_supports_dvi(devdata))
@@ -2500,7 +2507,7 @@ static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
- drm_dbg_kms(&i915->drm, "VBT claims port A supports DVI%s, ignoring\n",
+ drm_dbg_kms(display->drm, "VBT claims port A supports DVI%s, ignoring\n",
is_hdmi ? "/HDMI" : "");
devdata->child.device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING;
@@ -2510,7 +2517,8 @@ static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata,
enum port port)
{
- struct drm_i915_private *i915 = devdata->i915;
+ struct intel_display *display = devdata->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (!intel_bios_encoder_supports_dvi(devdata))
return;
@@ -2521,7 +2529,8 @@ static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata,
* up to 11, whereas the BDW max is 9.
*/
if (IS_BROADWELL(i915) && devdata->child.hdmi_level_shifter_value > 9) {
- drm_dbg_kms(&i915->drm, "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n",
+ drm_dbg_kms(display->drm,
+ "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n",
port_name(port), devdata->child.hdmi_level_shifter_value, 9);
devdata->child.hdmi_level_shifter_value = 9;
@@ -2569,14 +2578,14 @@ intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
bool
intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata)
{
- return devdata && HAS_LSPCON(devdata->i915) && devdata->child.lspcon;
+ return devdata && HAS_LSPCON(devdata->display) && devdata->child.lspcon;
}
/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->display.vbt.version < 158 ||
- DISPLAY_VER(devdata->i915) >= 14)
+ if (!devdata || devdata->display->vbt.version < 158 ||
+ DISPLAY_VER(devdata->display) >= 14)
return -1;
return devdata->child.hdmi_level_shifter_value;
@@ -2584,7 +2593,7 @@ int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->display.vbt.version < 204)
+ if (!devdata || devdata->display->vbt.version < 204)
return 0;
switch (devdata->child.hdmi_max_data_rate) {
@@ -2606,8 +2615,9 @@ int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata
}
}
-static bool is_port_valid(struct drm_i915_private *i915, enum port port)
+static bool is_port_valid(struct intel_display *display, enum port port)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
/*
* On some ICL SKUs port F is not present, but broken VBTs mark
* the port as present. Only try to initialize port F for the
@@ -2621,7 +2631,7 @@ static bool is_port_valid(struct drm_i915_private *i915, enum port port)
static void print_ddi_port(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = devdata->i915;
+ struct intel_display *display = devdata->display;
const struct child_device_config *child = &devdata->child;
bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt;
int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
@@ -2641,7 +2651,7 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata)
supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata);
supports_tbt = intel_bios_encoder_supports_tbt(devdata);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d DP++:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi,
intel_bios_encoder_supports_dp_dual_mode(devdata),
@@ -2651,33 +2661,33 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata)
hdmi_level_shift = intel_bios_hdmi_level_shift(devdata);
if (hdmi_level_shift >= 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %c VBT HDMI level shift: %d\n",
port_name(port), hdmi_level_shift);
}
max_tmds_clock = intel_bios_hdmi_max_tmds_clock(devdata);
if (max_tmds_clock)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %c VBT HDMI max TMDS clock: %d kHz\n",
port_name(port), max_tmds_clock);
/* I_boost config for SKL and above */
dp_boost_level = intel_bios_dp_boost_level(devdata);
if (dp_boost_level)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %c VBT (e)DP boost level: %d\n",
port_name(port), dp_boost_level);
hdmi_boost_level = intel_bios_hdmi_boost_level(devdata);
if (hdmi_boost_level)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %c VBT HDMI boost level: %d\n",
port_name(port), hdmi_boost_level);
dp_max_link_rate = intel_bios_dp_max_link_rate(devdata);
if (dp_max_link_rate)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %c VBT DP max link rate: %d\n",
port_name(port), dp_max_link_rate);
@@ -2685,22 +2695,22 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata)
* FIXME need to implement support for VBT
* vswing/preemph tables should this ever trigger.
*/
- drm_WARN(&i915->drm, child->use_vbt_vswing,
+ drm_WARN(display->drm, child->use_vbt_vswing,
"Port %c asks to use VBT vswing/preemph tables\n",
port_name(port));
}
static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = devdata->i915;
+ struct intel_display *display = devdata->display;
enum port port;
port = intel_bios_encoder_port(devdata);
if (port == PORT_NONE)
return;
- if (!is_port_valid(i915, port)) {
- drm_dbg_kms(&i915->drm,
+ if (!is_port_valid(display, port)) {
+ drm_dbg_kms(display->drm,
"VBT reports port %c as supported, but that can't be true: skipping\n",
port_name(port));
return;
@@ -2710,22 +2720,24 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
sanitize_hdmi_level_shift(devdata, port);
}
-static bool has_ddi_port_info(struct drm_i915_private *i915)
+static bool has_ddi_port_info(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 5 || IS_G4X(i915);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ return DISPLAY_VER(display) >= 5 || IS_G4X(i915);
}
-static void parse_ddi_ports(struct drm_i915_private *i915)
+static void parse_ddi_ports(struct intel_display *display)
{
struct intel_bios_encoder_data *devdata;
- if (!has_ddi_port_info(i915))
+ if (!has_ddi_port_info(display))
return;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
+ list_for_each_entry(devdata, &display->vbt.display_devices, node)
parse_ddi_port(devdata);
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
+ list_for_each_entry(devdata, &display->vbt.display_devices, node)
print_ddi_port(devdata);
}
@@ -2751,27 +2763,27 @@ static int child_device_expected_size(u16 version)
return 22;
}
-static bool child_device_size_valid(struct drm_i915_private *i915, int size)
+static bool child_device_size_valid(struct intel_display *display, int size)
{
int expected_size;
- expected_size = child_device_expected_size(i915->display.vbt.version);
+ expected_size = child_device_expected_size(display->vbt.version);
if (expected_size < 0) {
expected_size = sizeof(struct child_device_config);
- drm_dbg(&i915->drm,
+ drm_dbg(display->drm,
"Expected child device config size for VBT version %u not known; assuming %d\n",
- i915->display.vbt.version, expected_size);
+ display->vbt.version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
if (size != expected_size)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Unexpected child device config size %d (expected %d for VBT version %u)\n",
- size, expected_size, i915->display.vbt.version);
+ size, expected_size, display->vbt.version);
/* The legacy sized child device config is the minimum we need. */
if (size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Child device config size %d is too small.\n",
size);
return false;
@@ -2781,8 +2793,9 @@ static bool child_device_size_valid(struct drm_i915_private *i915, int size)
}
static void
-parse_general_definitions(struct drm_i915_private *i915)
+parse_general_definitions(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct bdb_general_definitions *defs;
struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
@@ -2790,27 +2803,27 @@ parse_general_definitions(struct drm_i915_private *i915)
u16 block_size;
int bus_pin;
- defs = bdb_find_section(i915, BDB_GENERAL_DEFINITIONS);
+ defs = bdb_find_section(display, BDB_GENERAL_DEFINITIONS);
if (!defs) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"No general definition block is found, no devices defined.\n");
return;
}
block_size = get_blocksize(defs);
if (block_size < sizeof(*defs)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"General definitions block too small (%u)\n",
block_size);
return;
}
bus_pin = defs->crt_ddc_gmbus_pin;
- drm_dbg_kms(&i915->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
+ drm_dbg_kms(display->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_valid_pin(i915, bus_pin))
- i915->display.vbt.crt_ddc_pin = bus_pin;
+ display->vbt.crt_ddc_pin = bus_pin;
- if (!child_device_size_valid(i915, defs->child_dev_size))
+ if (!child_device_size_valid(display, defs->child_dev_size))
return;
/* get the number of child device */
@@ -2821,7 +2834,7 @@ parse_general_definitions(struct drm_i915_private *i915)
if (!child->device_type)
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Found VBT child device with type 0x%x\n",
child->device_type);
@@ -2829,7 +2842,7 @@ parse_general_definitions(struct drm_i915_private *i915)
if (!devdata)
break;
- devdata->i915 = i915;
+ devdata->display = display;
/*
* Copy as much as we know (sizeof) and is available
@@ -2839,37 +2852,39 @@ parse_general_definitions(struct drm_i915_private *i915)
memcpy(&devdata->child, child,
min_t(size_t, defs->child_dev_size, sizeof(*child)));
- list_add_tail(&devdata->node, &i915->display.vbt.display_devices);
+ list_add_tail(&devdata->node, &display->vbt.display_devices);
}
- if (list_empty(&i915->display.vbt.display_devices))
- drm_dbg_kms(&i915->drm,
+ if (list_empty(&display->vbt.display_devices))
+ drm_dbg_kms(display->drm,
"no child dev is parsed from VBT\n");
}
/* Common defaults which may be overridden by VBT. */
static void
-init_vbt_defaults(struct drm_i915_private *i915)
+init_vbt_defaults(struct intel_display *display)
{
- i915->display.vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ display->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* general features */
- i915->display.vbt.int_tv_support = 1;
- i915->display.vbt.int_crt_support = 1;
+ display->vbt.int_tv_support = 1;
+ display->vbt.int_crt_support = 1;
/* driver features */
- i915->display.vbt.int_lvds_support = 1;
+ display->vbt.int_lvds_support = 1;
/* Default to using SSC */
- i915->display.vbt.lvds_use_ssc = 1;
+ display->vbt.lvds_use_ssc = 1;
/*
* Core/SandyBridge/IvyBridge use alternative (120MHz) reference
* clock for LVDS.
*/
- i915->display.vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915,
- !HAS_PCH_SPLIT(i915));
- drm_dbg_kms(&i915->drm, "Set default to SSC at %d kHz\n",
- i915->display.vbt.lvds_ssc_freq);
+ display->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(display,
+ !HAS_PCH_SPLIT(i915));
+ drm_dbg_kms(display->drm, "Set default to SSC at %d kHz\n",
+ display->vbt.lvds_ssc_freq);
}
/* Common defaults which may be overridden by VBT. */
@@ -2885,12 +2900,13 @@ init_vbt_panel_defaults(struct intel_panel *panel)
/* Defaults to initialize only if there is no VBT. */
static void
-init_vbt_missing_defaults(struct drm_i915_private *i915)
+init_vbt_missing_defaults(struct intel_display *display)
{
- unsigned int ports = DISPLAY_RUNTIME_INFO(i915)->port_mask;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ unsigned int ports = DISPLAY_RUNTIME_INFO(display)->port_mask;
enum port port;
- if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
+ if (!HAS_DDI(display) && !IS_CHERRYVIEW(i915))
return;
for_each_port_masked(port, ports) {
@@ -2910,7 +2926,7 @@ init_vbt_missing_defaults(struct drm_i915_private *i915)
if (!devdata)
break;
- devdata->i915 = i915;
+ devdata->display = display;
child = &devdata->child;
if (port == PORT_F)
@@ -2929,15 +2945,15 @@ init_vbt_missing_defaults(struct drm_i915_private *i915)
if (port == PORT_A)
child->device_type |= DEVICE_TYPE_INTERNAL_CONNECTOR;
- list_add_tail(&devdata->node, &i915->display.vbt.display_devices);
+ list_add_tail(&devdata->node, &display->vbt.display_devices);
- drm_dbg_kms(&i915->drm,
- "Generating default VBT child device with type 0x04%x on port %c\n",
+ drm_dbg_kms(display->drm,
+ "Generating default VBT child device with type 0x%04x on port %c\n",
child->device_type, port_name(port));
}
/* Bypass some minimum baseline VBT version checks */
- i915->display.vbt.version = 155;
+ display->vbt.version = 155;
}
static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
@@ -2949,13 +2965,13 @@ static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
/**
* intel_bios_is_valid_vbt - does the given buffer contain a valid VBT
- * @i915: the device
+ * @display: display device
* @buf: pointer to a buffer to validate
* @size: size of the buffer
*
* Returns true on valid VBT.
*/
-bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
+bool intel_bios_is_valid_vbt(struct intel_display *display,
const void *buf, size_t size)
{
const struct vbt_header *vbt = buf;
@@ -2965,17 +2981,18 @@ bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
return false;
if (sizeof(struct vbt_header) > size) {
- drm_dbg_kms(&i915->drm, "VBT header incomplete\n");
+ drm_dbg_kms(display->drm, "VBT header incomplete\n");
return false;
}
if (memcmp(vbt->signature, "$VBT", 4)) {
- drm_dbg_kms(&i915->drm, "VBT invalid signature\n");
+ drm_dbg_kms(display->drm, "VBT invalid signature\n");
return false;
}
if (vbt->vbt_size > size) {
- drm_dbg_kms(&i915->drm, "VBT incomplete (vbt_size overflows)\n");
+ drm_dbg_kms(display->drm,
+ "VBT incomplete (vbt_size overflows)\n");
return false;
}
@@ -2985,48 +3002,48 @@ bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
vbt->bdb_offset,
sizeof(struct bdb_header),
size)) {
- drm_dbg_kms(&i915->drm, "BDB header incomplete\n");
+ drm_dbg_kms(display->drm, "BDB header incomplete\n");
return false;
}
bdb = get_bdb_header(vbt);
if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) {
- drm_dbg_kms(&i915->drm, "BDB incomplete\n");
+ drm_dbg_kms(display->drm, "BDB incomplete\n");
return false;
}
return vbt;
}
-static struct vbt_header *firmware_get_vbt(struct drm_i915_private *i915,
+static struct vbt_header *firmware_get_vbt(struct intel_display *display,
size_t *size)
{
struct vbt_header *vbt = NULL;
const struct firmware *fw = NULL;
- const char *name = i915->display.params.vbt_firmware;
+ const char *name = display->params.vbt_firmware;
int ret;
if (!name || !*name)
return NULL;
- ret = request_firmware(&fw, name, i915->drm.dev);
+ ret = request_firmware(&fw, name, display->drm->dev);
if (ret) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Requesting VBT firmware \"%s\" failed (%d)\n",
name, ret);
return NULL;
}
- if (intel_bios_is_valid_vbt(i915, fw->data, fw->size)) {
+ if (intel_bios_is_valid_vbt(display, fw->data, fw->size)) {
vbt = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (vbt) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Found valid VBT firmware \"%s\"\n", name);
if (size)
*size = fw->size;
}
} else {
- drm_dbg_kms(&i915->drm, "Invalid VBT firmware \"%s\"\n",
+ drm_dbg_kms(display->drm, "Invalid VBT firmware \"%s\"\n",
name);
}
@@ -3042,9 +3059,10 @@ static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset)
return intel_uncore_read(uncore, PRIMARY_SPI_TRIGGER);
}
-static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915,
+static struct vbt_header *spi_oprom_get_vbt(struct intel_display *display,
size_t *size)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 count, data, found, store = 0;
u32 static_region, oprom_offset;
u32 oprom_size = 0x200000;
@@ -3081,10 +3099,10 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915,
for (count = 0; count < vbt_size; count += 4)
*(vbt + store++) = intel_spi_read(&i915->uncore, found + count);
- if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size))
+ if (!intel_bios_is_valid_vbt(display, vbt, vbt_size))
goto err_free_vbt;
- drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n");
+ drm_dbg_kms(display->drm, "Found valid VBT in SPI flash\n");
if (size)
*size = vbt_size;
@@ -3097,10 +3115,10 @@ err_not_found:
return NULL;
}
-static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915,
+static struct vbt_header *oprom_get_vbt(struct intel_display *display,
size_t *sizep)
{
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
void __iomem *p = NULL, *oprom;
struct vbt_header *vbt;
u16 vbt_size;
@@ -3124,13 +3142,13 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915,
goto err_unmap_oprom;
if (sizeof(struct vbt_header) > size) {
- drm_dbg(&i915->drm, "VBT header incomplete\n");
+ drm_dbg(display->drm, "VBT header incomplete\n");
goto err_unmap_oprom;
}
vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size));
if (vbt_size > size) {
- drm_dbg(&i915->drm,
+ drm_dbg(display->drm,
"VBT incomplete (vbt_size overflows)\n");
goto err_unmap_oprom;
}
@@ -3142,7 +3160,7 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915,
memcpy_fromio(vbt, p, vbt_size);
- if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size))
+ if (!intel_bios_is_valid_vbt(display, vbt, vbt_size))
goto err_free_vbt;
pci_unmap_rom(pdev, oprom);
@@ -3150,7 +3168,7 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915,
if (sizep)
*sizep = vbt_size;
- drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n");
+ drm_dbg_kms(display->drm, "Found valid VBT in PCI ROM\n");
return vbt;
@@ -3162,16 +3180,17 @@ err_unmap_oprom:
return NULL;
}
-static const struct vbt_header *intel_bios_get_vbt(struct drm_i915_private *i915,
+static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display,
size_t *sizep)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct vbt_header *vbt = NULL;
intel_wakeref_t wakeref;
- vbt = firmware_get_vbt(i915, sizep);
+ vbt = firmware_get_vbt(display, sizep);
if (!vbt)
- vbt = intel_opregion_get_vbt(i915, sizep);
+ vbt = intel_opregion_get_vbt(display, sizep);
/*
* If the OpRegion does not have VBT, look in SPI flash
@@ -3179,76 +3198,77 @@ static const struct vbt_header *intel_bios_get_vbt(struct drm_i915_private *i915
*/
if (!vbt && IS_DGFX(i915))
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vbt = spi_oprom_get_vbt(i915, sizep);
+ vbt = spi_oprom_get_vbt(display, sizep);
if (!vbt)
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vbt = oprom_get_vbt(i915, sizep);
+ vbt = oprom_get_vbt(display, sizep);
return vbt;
}
/**
* intel_bios_init - find VBT and initialize settings from the BIOS
- * @i915: i915 device instance
+ * @display: display device instance
*
* Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
* was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
* initialize some defaults if the VBT is not present at all.
*/
-void intel_bios_init(struct drm_i915_private *i915)
+void intel_bios_init(struct intel_display *display)
{
const struct vbt_header *vbt;
const struct bdb_header *bdb;
- INIT_LIST_HEAD(&i915->display.vbt.display_devices);
- INIT_LIST_HEAD(&i915->display.vbt.bdb_blocks);
+ INIT_LIST_HEAD(&display->vbt.display_devices);
+ INIT_LIST_HEAD(&display->vbt.bdb_blocks);
- if (!HAS_DISPLAY(i915)) {
- drm_dbg_kms(&i915->drm,
+ if (!HAS_DISPLAY(display)) {
+ drm_dbg_kms(display->drm,
"Skipping VBT init due to disabled display.\n");
return;
}
- init_vbt_defaults(i915);
+ init_vbt_defaults(display);
- vbt = intel_bios_get_vbt(i915, NULL);
+ vbt = intel_bios_get_vbt(display, NULL);
if (!vbt)
goto out;
bdb = get_bdb_header(vbt);
- i915->display.vbt.version = bdb->version;
+ display->vbt.version = bdb->version;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT signature \"%.*s\", BDB version %d\n",
- (int)sizeof(vbt->signature), vbt->signature, i915->display.vbt.version);
+ (int)sizeof(vbt->signature), vbt->signature,
+ display->vbt.version);
- init_bdb_blocks(i915, bdb);
+ init_bdb_blocks(display, bdb);
/* Grab useful general definitions */
- parse_general_features(i915);
- parse_general_definitions(i915);
- parse_driver_features(i915);
+ parse_general_features(display);
+ parse_general_definitions(display);
+ parse_driver_features(display);
/* Depends on child device list */
- parse_compression_parameters(i915);
+ parse_compression_parameters(display);
out:
if (!vbt) {
- drm_info(&i915->drm,
+ drm_info(display->drm,
"Failed to find VBIOS tables (VBT)\n");
- init_vbt_missing_defaults(i915);
+ init_vbt_missing_defaults(display);
}
/* Further processing on pre-parsed or generated child device data */
- parse_sdvo_device_mapping(i915);
- parse_ddi_ports(i915);
+ parse_sdvo_device_mapping(display);
+ parse_ddi_ports(display);
kfree(vbt);
}
-static void intel_bios_init_panel(struct drm_i915_private *i915,
+static void intel_bios_init_panel(struct intel_display *display,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid,
@@ -3256,63 +3276,64 @@ static void intel_bios_init_panel(struct drm_i915_private *i915,
{
/* already have it? */
if (panel->vbt.panel_type >= 0) {
- drm_WARN_ON(&i915->drm, !use_fallback);
+ drm_WARN_ON(display->drm, !use_fallback);
return;
}
- panel->vbt.panel_type = get_panel_type(i915, devdata,
+ panel->vbt.panel_type = get_panel_type(display, devdata,
drm_edid, use_fallback);
if (panel->vbt.panel_type < 0) {
- drm_WARN_ON(&i915->drm, use_fallback);
+ drm_WARN_ON(display->drm, use_fallback);
return;
}
init_vbt_panel_defaults(panel);
- parse_panel_options(i915, panel);
- parse_generic_dtd(i915, panel);
- parse_lfp_data(i915, panel);
- parse_lfp_backlight(i915, panel);
- parse_sdvo_lvds_data(i915, panel);
- parse_panel_driver_features(i915, panel);
- parse_power_conservation_features(i915, panel);
- parse_edp(i915, panel);
- parse_psr(i915, panel);
- parse_mipi_config(i915, panel);
- parse_mipi_sequence(i915, panel);
+ parse_panel_options(display, panel);
+ parse_generic_dtd(display, panel);
+ parse_lfp_data(display, panel);
+ parse_lfp_backlight(display, panel);
+ parse_sdvo_lvds_data(display, panel);
+ parse_panel_driver_features(display, panel);
+ parse_power_conservation_features(display, panel);
+ parse_edp(display, panel);
+ parse_psr(display, panel);
+ parse_mipi_config(display, panel);
+ parse_mipi_sequence(display, panel);
}
-void intel_bios_init_panel_early(struct drm_i915_private *i915,
+void intel_bios_init_panel_early(struct intel_display *display,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata)
{
- intel_bios_init_panel(i915, panel, devdata, NULL, false);
+ intel_bios_init_panel(display, panel, devdata, NULL, false);
}
-void intel_bios_init_panel_late(struct drm_i915_private *i915,
+void intel_bios_init_panel_late(struct intel_display *display,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid)
{
- intel_bios_init_panel(i915, panel, devdata, drm_edid, true);
+ intel_bios_init_panel(display, panel, devdata, drm_edid, true);
}
/**
* intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
- * @i915: i915 device instance
+ * @display: display device instance
*/
-void intel_bios_driver_remove(struct drm_i915_private *i915)
+void intel_bios_driver_remove(struct intel_display *display)
{
struct intel_bios_encoder_data *devdata, *nd;
struct bdb_block_entry *entry, *ne;
- list_for_each_entry_safe(devdata, nd, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry_safe(devdata, nd, &display->vbt.display_devices,
+ node) {
list_del(&devdata->node);
kfree(devdata->dsc);
kfree(devdata);
}
- list_for_each_entry_safe(entry, ne, &i915->display.vbt.bdb_blocks, node) {
+ list_for_each_entry_safe(entry, ne, &display->vbt.bdb_blocks, node) {
list_del(&entry->node);
kfree(entry);
}
@@ -3336,22 +3357,22 @@ void intel_bios_fini_panel(struct intel_panel *panel)
/**
* intel_bios_is_tv_present - is integrated TV present in VBT
- * @i915: i915 device instance
+ * @display: display device instance
*
* Return true if TV is present. If no child devices were parsed from VBT,
* assume TV is present.
*/
-bool intel_bios_is_tv_present(struct drm_i915_private *i915)
+bool intel_bios_is_tv_present(struct intel_display *display)
{
const struct intel_bios_encoder_data *devdata;
- if (!i915->display.vbt.int_tv_support)
+ if (!display->vbt.int_tv_support)
return false;
- if (list_empty(&i915->display.vbt.display_devices))
+ if (list_empty(&display->vbt.display_devices))
return true;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
/*
@@ -3377,20 +3398,21 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915)
/**
* intel_bios_is_lvds_present - is LVDS present in VBT
- * @i915: i915 device instance
+ * @display: display device instance
* @i2c_pin: i2c pin for LVDS if present
*
* Return true if LVDS is present. If no child devices were parsed from VBT,
* assume LVDS is present.
*/
-bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
+bool intel_bios_is_lvds_present(struct intel_display *display, u8 *i2c_pin)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_bios_encoder_data *devdata;
- if (list_empty(&i915->display.vbt.display_devices))
+ if (list_empty(&display->vbt.display_devices))
return true;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
/* If the device type is not LFP, continue.
@@ -3417,7 +3439,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
- return intel_opregion_vbt_present(i915);
+ return intel_opregion_vbt_present(display);
}
return false;
@@ -3425,25 +3447,25 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
/**
* intel_bios_is_port_present - is the specified digital port present
- * @i915: i915 device instance
+ * @display: display device instance
* @port: port to check
*
* Return true if the device in %port is present.
*/
-bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
+bool intel_bios_is_port_present(struct intel_display *display, enum port port)
{
const struct intel_bios_encoder_data *devdata;
- if (WARN_ON(!has_ddi_port_info(i915)))
+ if (WARN_ON(!has_ddi_port_info(display)))
return true;
- if (!is_port_valid(i915, port))
+ if (!is_port_valid(display, port))
return false;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
- if (dvo_port_to_port(i915, child->dvo_port) == port)
+ if (dvo_port_to_port(display, child->dvo_port) == port)
return true;
}
@@ -3474,32 +3496,32 @@ bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_da
/**
* intel_bios_is_dsi_present - is DSI present in VBT
- * @i915: i915 device instance
+ * @display: display device instance
* @port: port for DSI if present
*
* Return true if DSI is present, and return the port in %port.
*/
-bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
+bool intel_bios_is_dsi_present(struct intel_display *display,
enum port *port)
{
const struct intel_bios_encoder_data *devdata;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
u8 dvo_port = child->dvo_port;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
- drm_dbg_kms(&i915->drm,
+ if (dsi_dvo_port_to_port(display, dvo_port) == PORT_NONE) {
+ drm_dbg_kms(display->drm,
"VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
continue;
}
if (port)
- *port = dsi_dvo_port_to_port(i915, dvo_port);
+ *port = dsi_dvo_port_to_port(display, dvo_port);
return true;
}
@@ -3510,7 +3532,7 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
struct dsc_compression_parameters_entry *dsc,
int dsc_max_bpc)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
int bpc = 8;
@@ -3524,13 +3546,13 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
else if (dsc->support_8bpc && dsc_max_bpc >= 8)
bpc = 8;
else
- drm_dbg_kms(&i915->drm, "VBT: Unsupported BPC %d for DCS\n",
+ drm_dbg_kms(display->drm, "VBT: Unsupported BPC %d for DCS\n",
dsc_max_bpc);
crtc_state->pipe_bpp = bpc * 3;
- crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(min(crtc_state->pipe_bpp,
- VBT_DSC_MAX_BPP(dsc->max_bpp)));
+ crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(min(crtc_state->pipe_bpp,
+ VBT_DSC_MAX_BPP(dsc->max_bpp)));
/*
* FIXME: This is ugly, and slice count should take DSC engine
@@ -3545,14 +3567,16 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
} else {
/* FIXME */
if (!(dsc->slices_per_line & BIT(0)))
- drm_dbg_kms(&i915->drm, "VBT: Unsupported DSC slice count for DSI\n");
+ drm_dbg_kms(display->drm,
+ "VBT: Unsupported DSC slice count for DSI\n");
crtc_state->dsc.slice_count = 1;
}
if (crtc_state->hw.adjusted_mode.crtc_hdisplay %
crtc_state->dsc.slice_count != 0)
- drm_dbg_kms(&i915->drm, "VBT: DSC hdisplay %d not divisible by slice count %d\n",
+ drm_dbg_kms(display->drm,
+ "VBT: DSC hdisplay %d not divisible by slice count %d\n",
crtc_state->hw.adjusted_mode.crtc_hdisplay,
crtc_state->dsc.slice_count);
@@ -3576,16 +3600,16 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_bios_encoder_data *devdata;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) {
+ if (dsi_dvo_port_to_port(display, child->dvo_port) == encoder->port) {
if (!devdata->dsc)
return false;
@@ -3645,12 +3669,13 @@ static const u8 direct_aux_ch_map[] = {
[AUX_CH_I] = DP_AUX_I, /* aka AUX_CH_USBC6 */
};
-static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel)
+static enum aux_ch map_aux_ch(struct intel_display *display, u8 aux_channel)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const u8 *aux_ch_map;
int i, n_entries;
- if (DISPLAY_VER(i915) >= 13) {
+ if (DISPLAY_VER(display) >= 13) {
aux_ch_map = adlp_aux_ch_map;
n_entries = ARRAY_SIZE(adlp_aux_ch_map);
} else if (IS_ALDERLAKE_S(i915)) {
@@ -3669,7 +3694,7 @@ static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel)
return i;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Ignoring alternate AUX CH: VBT claims AUX 0x%x, which is not valid for this platform\n",
aux_channel);
@@ -3681,22 +3706,22 @@ enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata)
if (!devdata || !devdata->child.aux_channel)
return AUX_CH_NONE;
- return map_aux_ch(devdata->i915, devdata->child.aux_channel);
+ return map_aux_ch(devdata->display, devdata->child.aux_channel);
}
bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915;
+ struct intel_display *display;
u8 aux_channel;
int count = 0;
if (!devdata || !devdata->child.aux_channel)
return false;
- i915 = devdata->i915;
+ display = devdata->display;
aux_channel = devdata->child.aux_channel;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
if (intel_bios_encoder_supports_dp(devdata) &&
aux_channel == devdata->child.aux_channel)
count++;
@@ -3707,18 +3732,18 @@ bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devda
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
+ if (!devdata || devdata->display->vbt.version < 196 || !devdata->child.iboost)
return 0;
- return translate_iboost(devdata->i915, devdata->child.dp_iboost_level);
+ return translate_iboost(devdata->display, devdata->child.dp_iboost_level);
}
int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
+ if (!devdata || devdata->display->vbt.version < 196 || !devdata->child.iboost)
return 0;
- return translate_iboost(devdata->i915, devdata->child.hdmi_iboost_level);
+ return translate_iboost(devdata->display, devdata->child.hdmi_iboost_level);
}
int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata)
@@ -3726,17 +3751,17 @@ int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata)
if (!devdata || !devdata->child.ddc_pin)
return 0;
- return map_ddc_pin(devdata->i915, devdata->child.ddc_pin);
+ return map_ddc_pin(devdata->display, devdata->child.ddc_pin);
}
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata)
{
- return devdata->i915->display.vbt.version >= 195 && devdata->child.dp_usb_type_c;
+ return devdata->display->vbt.version >= 195 && devdata->child.dp_usb_type_c;
}
bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata)
{
- return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt;
+ return devdata->display->vbt.version >= 209 && devdata->child.tbt;
}
bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata)
@@ -3750,11 +3775,11 @@ bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata
}
const struct intel_bios_encoder_data *
-intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
+intel_bios_encoder_data_lookup(struct intel_display *display, enum port port)
{
struct intel_bios_encoder_data *devdata;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
if (intel_bios_encoder_port(devdata) == port)
return devdata;
}
@@ -3762,23 +3787,23 @@ intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
return NULL;
}
-void intel_bios_for_each_encoder(struct drm_i915_private *i915,
- void (*func)(struct drm_i915_private *i915,
+void intel_bios_for_each_encoder(struct intel_display *display,
+ void (*func)(struct intel_display *display,
const struct intel_bios_encoder_data *devdata))
{
struct intel_bios_encoder_data *devdata;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
- func(i915, devdata);
+ list_for_each_entry(devdata, &display->vbt.display_devices, node)
+ func(display, devdata);
}
static int intel_bios_vbt_show(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = m->private;
+ struct intel_display *display = m->private;
const void *vbt;
size_t vbt_size;
- vbt = intel_bios_get_vbt(i915, &vbt_size);
+ vbt = intel_bios_get_vbt(display, &vbt_size);
if (vbt) {
seq_write(m, vbt, vbt_size);
@@ -3790,10 +3815,10 @@ static int intel_bios_vbt_show(struct seq_file *m, void *unused)
DEFINE_SHOW_ATTRIBUTE(intel_bios_vbt);
-void intel_bios_debugfs_register(struct drm_i915_private *i915)
+void intel_bios_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_vbt", 0444, minor->debugfs_root,
- i915, &intel_bios_vbt_fops);
+ display, &intel_bios_vbt_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 06a51be4afd8..8b703f6cfe17 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -33,9 +33,9 @@
#include <linux/types.h>
struct drm_edid;
-struct drm_i915_private;
struct intel_bios_encoder_data;
struct intel_crtc_state;
+struct intel_display;
struct intel_encoder;
struct intel_panel;
enum aux_ch;
@@ -232,28 +232,28 @@ struct mipi_pps_data {
u16 panel_power_cycle_delay;
} __packed;
-void intel_bios_init(struct drm_i915_private *dev_priv);
-void intel_bios_init_panel_early(struct drm_i915_private *dev_priv,
+void intel_bios_init(struct intel_display *display);
+void intel_bios_init_panel_early(struct intel_display *display,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata);
-void intel_bios_init_panel_late(struct drm_i915_private *dev_priv,
+void intel_bios_init_panel_late(struct intel_display *display,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid);
void intel_bios_fini_panel(struct intel_panel *panel);
-void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
-bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
+void intel_bios_driver_remove(struct intel_display *display);
+bool intel_bios_is_valid_vbt(struct intel_display *display,
const void *buf, size_t size);
-bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
-bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
-bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
+bool intel_bios_is_tv_present(struct intel_display *display);
+bool intel_bios_is_lvds_present(struct intel_display *display, u8 *i2c_pin);
+bool intel_bios_is_port_present(struct intel_display *display, enum port port);
+bool intel_bios_is_dsi_present(struct intel_display *display, enum port *port);
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc);
const struct intel_bios_encoder_data *
-intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port);
+intel_bios_encoder_data_lookup(struct intel_display *display, enum port port);
bool intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata);
@@ -277,10 +277,10 @@ int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata);
-void intel_bios_for_each_encoder(struct drm_i915_private *i915,
- void (*func)(struct drm_i915_private *i915,
+void intel_bios_for_each_encoder(struct intel_display *display,
+ void (*func)(struct intel_display *display,
const struct intel_bios_encoder_data *devdata));
-void intel_bios_debugfs_register(struct drm_i915_private *i915);
+void intel_bios_debugfs_register(struct intel_display *display);
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 16d5550f7e5e..aa3ba66c5307 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -23,7 +23,10 @@
#include <linux/time.h>
+#include <drm/drm_fixed.h>
+
#include "soc/intel_dram.h"
+
#include "hsw_ips.h"
#include "i915_reg.h"
#include "intel_atomic.h"
@@ -2750,7 +2753,7 @@ static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
*/
int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24;
int min_cdclk_bj =
- (to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
+ (fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
pixel_clock) / (2 * bigjoiner_interface_bits);
min_cdclk = max(min_cdclk, min_cdclk_bj);
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 7ac50aacec73..5d701f48351b 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -1313,8 +1313,8 @@ static void ilk_lut_write(const struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- if (crtc_state->dsb)
- intel_dsb_reg_write(crtc_state->dsb, reg, val);
+ if (crtc_state->dsb_color_vblank)
+ intel_dsb_reg_write(crtc_state->dsb_color_vblank, reg, val);
else
intel_de_write_fw(i915, reg, val);
}
@@ -1337,15 +1337,15 @@ static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
* unless we either write each entry twice,
* or use non-posted writes
*/
- if (crtc_state->dsb)
- intel_dsb_nonpost_start(crtc_state->dsb);
+ if (crtc_state->dsb_color_vblank)
+ intel_dsb_nonpost_start(crtc_state->dsb_color_vblank);
for (i = 0; i < 256; i++)
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
- if (crtc_state->dsb)
- intel_dsb_nonpost_end(crtc_state->dsb);
+ if (crtc_state->dsb_color_vblank)
+ intel_dsb_nonpost_end(crtc_state->dsb_color_vblank);
}
static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state,
@@ -1870,7 +1870,7 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- if (crtc_state->dsb)
+ if (crtc_state->dsb_color_vblank)
return;
i915->display.funcs.color->load_luts(crtc_state);
@@ -1890,8 +1890,8 @@ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
i915->display.funcs.color->color_commit_arm(crtc_state);
- if (crtc_state->dsb)
- intel_dsb_commit(crtc_state->dsb, true);
+ if (crtc_state->dsb_color_commit)
+ intel_dsb_commit(crtc_state->dsb_color_commit, false);
}
void intel_color_post_update(const struct intel_crtc_state *crtc_state)
@@ -1919,33 +1919,51 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut)
return;
- crtc_state->dsb = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 1024);
- if (!crtc_state->dsb)
+ crtc_state->dsb_color_vblank = intel_dsb_prepare(state, crtc, INTEL_DSB_1, 1024);
+ if (!crtc_state->dsb_color_vblank)
return;
i915->display.funcs.color->load_luts(crtc_state);
- intel_dsb_finish(crtc_state->dsb);
+ intel_dsb_finish(crtc_state->dsb_color_vblank);
+
+ crtc_state->dsb_color_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 16);
+ if (!crtc_state->dsb_color_commit) {
+ intel_dsb_cleanup(crtc_state->dsb_color_vblank);
+ crtc_state->dsb_color_vblank = NULL;
+ return;
+ }
+
+ intel_dsb_chain(state, crtc_state->dsb_color_commit,
+ crtc_state->dsb_color_vblank, true);
+
+ intel_dsb_finish(crtc_state->dsb_color_commit);
}
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state)
{
- if (!crtc_state->dsb)
- return;
+ if (crtc_state->dsb_color_commit) {
+ intel_dsb_cleanup(crtc_state->dsb_color_commit);
+ crtc_state->dsb_color_commit = NULL;
+ }
- intel_dsb_cleanup(crtc_state->dsb);
- crtc_state->dsb = NULL;
+ if (crtc_state->dsb_color_vblank) {
+ intel_dsb_cleanup(crtc_state->dsb_color_vblank);
+ crtc_state->dsb_color_vblank = NULL;
+ }
}
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state)
{
- if (crtc_state->dsb)
- intel_dsb_wait(crtc_state->dsb);
+ if (crtc_state->dsb_color_commit)
+ intel_dsb_wait(crtc_state->dsb_color_commit);
+ if (crtc_state->dsb_color_vblank)
+ intel_dsb_wait(crtc_state->dsb_color_vblank);
}
bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->dsb;
+ return crtc_state->dsb_color_vblank;
}
static bool intel_can_preload_luts(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 143d66951631..3252dab56430 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -159,9 +159,11 @@ static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
{
- bool ddi_a_present = intel_bios_is_port_present(i915, PORT_A);
- bool ddi_d_present = intel_bios_is_port_present(i915, PORT_D);
- bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
+ struct intel_display *display = &i915->display;
+
+ bool ddi_a_present = intel_bios_is_port_present(display, PORT_A);
+ bool ddi_d_present = intel_bios_is_port_present(display, PORT_D);
+ bool dsi_present = intel_bios_is_dsi_present(display, NULL);
/*
* VBT's 'dvo port' field for child devices references the DDI, not
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 6df526e189b5..705ec5ad385c 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -10,6 +10,7 @@
#include "intel_crtc_state_dump.h"
#include "intel_display_types.h"
#include "intel_hdmi.h"
+#include "intel_vdsc.h"
#include "intel_vrr.h"
static void intel_dump_crtc_timings(struct drm_printer *p,
@@ -369,6 +370,8 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
else if (IS_VALLEYVIEW(i915))
vlv_dump_csc(&p, "wgc csc", &pipe_config->csc);
+ intel_vdsc_state_dump(&p, 0, pipe_config);
+
dump_planes:
if (!state)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index a07aca96e551..00fbe9f8c03a 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1400,7 +1400,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
static int translate_signal_level(struct intel_dp *intel_dp,
u8 signal_levels)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int i;
for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
@@ -1408,7 +1408,7 @@ static int translate_signal_level(struct intel_dp *intel_dp,
return i;
}
- drm_WARN(&i915->drm, 1,
+ drm_WARN(display->drm, 1,
"Unsupported voltage swing/pre-emphasis level: 0x%x\n",
signal_levels);
@@ -2211,14 +2211,14 @@ static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel
const struct intel_crtc_state *crtc_state,
bool enable)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!crtc_state->vrr.enable)
return;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_DOWNSPREAD_CTRL,
enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0) <= 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s MSA_TIMING_PAR_IGNORE in the sink\n",
str_enable_disable(enable));
}
@@ -2227,20 +2227,20 @@ static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!crtc_state->fec_enable)
return;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION,
enable ? DP_FEC_READY : 0) <= 0)
- drm_dbg_kms(&i915->drm, "Failed to set FEC_READY to %s in the sink\n",
+ drm_dbg_kms(display->drm, "Failed to set FEC_READY to %s in the sink\n",
enable ? "enabled" : "disabled");
if (enable &&
drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_STATUS,
DP_FEC_DECODE_EN_DETECTED | DP_FEC_DECODE_DIS_DETECTED) <= 0)
- drm_dbg_kms(&i915->drm, "Failed to clear FEC detected flags\n");
+ drm_dbg_kms(display->drm, "Failed to clear FEC detected flags\n");
}
static int read_fec_detected_status(struct drm_dp_aux *aux)
@@ -4172,7 +4172,8 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
intel_tc_port_sanitize_mode(enc_to_dig_port(encoder),
crtc_state);
- if (intel_encoder_is_dp(encoder))
+ if ((crtc_state && intel_crtc_has_dp_encoder(crtc_state)) ||
+ (!crtc_state && intel_encoder_is_dp(encoder)))
intel_dp_sync_state(encoder, crtc_state);
}
@@ -4853,9 +4854,10 @@ static bool port_in_use(struct drm_i915_private *i915, enum port port)
return false;
}
-void intel_ddi_init(struct drm_i915_private *dev_priv,
+void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
bool init_hdmi, init_dp;
@@ -4898,7 +4900,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
* driver. In that case we should skip initializing the corresponding
* outputs.
*/
- if (intel_hti_uses_phy(dev_priv, phy)) {
+ if (intel_hti_uses_phy(display, phy)) {
drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n",
port_name(port), phy_name(phy));
return;
@@ -4972,7 +4974,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
} else {
drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
- "DDI %c/PHY %c", port_name(port), phy_name(phy));
+ "DDI %c/PHY %c", port_name(port), phy_name(phy));
}
intel_encoder_link_check_init(encoder, intel_ddi_link_check);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 434de7196875..6d85422bdefe 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -15,6 +15,7 @@ struct intel_bios_encoder_data;
struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dp;
struct intel_dpll_hw_state;
struct intel_encoder;
@@ -53,7 +54,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port);
-void intel_ddi_init(struct drm_i915_private *dev_priv,
+void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index c2c388212e2e..b4ef4d59da1a 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -39,6 +39,7 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
@@ -1014,9 +1015,14 @@ static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state,
old_crtc_state->cmrr.cmrr_n != new_crtc_state->cmrr.cmrr_n;
}
-static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
+static bool intel_crtc_vrr_enabling(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
if (!new_crtc_state->hw.active)
return false;
@@ -1026,9 +1032,14 @@ static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
vrr_params_changed(old_crtc_state, new_crtc_state)));
}
-static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
+bool intel_crtc_vrr_disabling(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
if (!old_crtc_state->hw.active)
return false;
@@ -1181,7 +1192,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
- if (vrr_disabling(old_crtc_state, new_crtc_state)) {
+ if (intel_crtc_vrr_disabling(state, crtc)) {
intel_vrr_disable(old_crtc_state);
intel_crtc_update_active_timings(old_crtc_state, false);
}
@@ -4669,11 +4680,11 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe);
crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
- if (crtc_state->pipe_bpp > to_bpp_int(crtc_state->max_link_bpp_x16)) {
+ if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) {
drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] Link bpp limited to " BPP_X16_FMT "\n",
+ "[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n",
crtc->base.base.id, crtc->base.name,
- BPP_X16_ARGS(crtc_state->max_link_bpp_x16));
+ FXP_Q4_ARGS(crtc_state->max_link_bpp_x16));
crtc_state->bw_constrained = true;
}
@@ -5100,7 +5111,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (current_config->name != pipe_config->name) { \
BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \
__stringify(name) " is not bool"); \
- pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected %s, found %s)", \
str_yes_no(current_config->name), \
str_yes_no(pipe_config->name)); \
@@ -6249,6 +6260,8 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
case I915_FORMAT_MOD_4_TILED:
+ case I915_FORMAT_MOD_4_TILED_BMG_CCS:
+ case I915_FORMAT_MOD_4_TILED_LNL_CCS:
break;
default:
drm_dbg_kms(&i915->drm,
@@ -6830,8 +6843,6 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -6844,7 +6855,7 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
!intel_crtc_needs_modeset(new_crtc_state))
skl_detach_scalers(new_crtc_state);
- if (vrr_enabling(old_crtc_state, new_crtc_state))
+ if (intel_crtc_vrr_enabling(state, crtc))
intel_vrr_enable(new_crtc_state);
}
@@ -6944,7 +6955,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
*
* FIXME Should be synchronized with the start of vblank somehow...
*/
- if (vrr_enabling(old_crtc_state, new_crtc_state) ||
+ if (intel_crtc_vrr_enabling(state, crtc) ||
new_crtc_state->update_m_n || new_crtc_state->update_lrr)
intel_crtc_update_active_timings(new_crtc_state,
new_crtc_state->vrr.enable);
@@ -7502,7 +7513,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*
* FIXME get rid of this funny new->old swapping
*/
- old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
+ old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank);
+ old_crtc_state->dsb_color_commit = fetch_and_zero(&new_crtc_state->dsb_color_commit);
}
/* Underruns don't always raise interrupts, so check manually */
@@ -7777,10 +7789,11 @@ bool assert_port_valid(struct drm_i915_private *i915, enum port port)
void intel_setup_outputs(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
- intel_pps_unlock_regs_wa(dev_priv);
+ intel_pps_unlock_regs_wa(display);
if (!HAS_DISPLAY(dev_priv))
return;
@@ -7789,7 +7802,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (intel_ddi_crt_present(dev_priv))
intel_crt_init(dev_priv);
- intel_bios_for_each_encoder(dev_priv, intel_ddi_init);
+ intel_bios_for_each_encoder(display, intel_ddi_init);
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
vlv_dsi_init(dev_priv);
@@ -7851,14 +7864,14 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
* HDMI ports that the VBT claim are DP or eDP.
*/
has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
- has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+ has_port = intel_bios_is_port_present(display, PORT_B);
if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
- has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+ has_port = intel_bios_is_port_present(display, PORT_C);
if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
@@ -7869,7 +7882,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
* eDP not supported on port D,
* so no need to worry about it
*/
- has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+ has_port = intel_bios_is_port_present(display, PORT_D);
if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
@@ -7923,7 +7936,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
g4x_dp_init(dev_priv, DP_D, PORT_D);
if (SUPPORTS_TV(dev_priv))
- intel_tv_init(dev_priv);
+ intel_tv_init(display);
} else if (DISPLAY_VER(dev_priv) == 2) {
if (IS_I85X(dev_priv))
intel_lvds_init(dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index b0cf6ca70952..b21d9578d5db 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -532,6 +532,9 @@ void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state);
void intel_update_watermarks(struct drm_i915_private *i915);
+bool intel_crtc_vrr_disabling(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+
/* modesetting */
int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
const char *reason, u8 pipe_mask);
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 7715fc329057..0a711114ff2b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -237,7 +237,7 @@ struct intel_vbt_data {
struct sdvo_device_mapping {
u8 initialized;
u8 dvo_port;
- u8 slave_addr;
+ u8 target_addr;
u8 dvo_wiring;
u8 i2c_pin;
u8 ddc_pin;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 91757fed9c6d..f5f618199d39 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -36,6 +36,7 @@
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#include "intel_vdsc.h"
#include "intel_wm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -492,7 +493,7 @@ static void crtc_updates_info(struct seq_file *m,
seq_printf(m, "%sMax update: %lluns\n",
hdr, crtc->debug.vbl.max);
seq_printf(m, "%sAverage update: %lluns\n",
- hdr, div64_u64(crtc->debug.vbl.sum, count));
+ hdr, div64_u64(crtc->debug.vbl.sum, count));
seq_printf(m, "%sOverruns > %uus: %u\n",
hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
}
@@ -551,6 +552,7 @@ static void crtc_updates_add(struct intel_crtc *crtc)
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_encoder *encoder;
@@ -581,6 +583,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
crtc_state->joiner_pipes,
intel_crtc_is_joiner_secondary(crtc_state) ? "slave" : "master");
+ intel_vdsc_state_dump(&p, 1, crtc_state);
+
for_each_intel_encoder_mask(&dev_priv->drm, encoder,
crtc_state->uapi.encoder_mask)
intel_encoder_info(m, crtc, encoder);
@@ -1008,7 +1012,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
return ret;
}
- intel_fbc_reset_underrun(dev_priv);
+ intel_fbc_reset_underrun(&dev_priv->display);
return cnt;
}
@@ -1045,6 +1049,7 @@ static const struct {
void intel_display_debugfs_register(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct drm_minor *minor = i915->drm.primary;
int i;
@@ -1060,15 +1065,15 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
ARRAY_SIZE(intel_display_debugfs_list),
minor->debugfs_root, minor);
- intel_bios_debugfs_register(i915);
+ intel_bios_debugfs_register(display);
intel_cdclk_debugfs_register(i915);
intel_dmc_debugfs_register(i915);
- intel_fbc_debugfs_register(i915);
+ intel_fbc_debugfs_register(display);
intel_hpd_debugfs_register(i915);
- intel_opregion_debugfs_register(i915);
- intel_psr_debugfs_register(i915);
+ intel_opregion_debugfs_register(display);
+ intel_psr_debugfs_register(display);
intel_wm_debugfs_register(i915);
- intel_display_debugfs_params(i915);
+ intel_display_debugfs_params(display);
}
static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
index f35718748555..ec3ed29a83c9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -151,13 +151,13 @@ intel_display_debugfs_create_uint(const char *name, umode_t mode,
} while (0)
/* add a subdirectory with files for each intel display param */
-void intel_display_debugfs_params(struct drm_i915_private *i915)
+void intel_display_debugfs_params(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
struct dentry *dir;
char dirname[16];
- snprintf(dirname, sizeof(dirname), "%s_params", i915->drm.driver->name);
+ snprintf(dirname, sizeof(dirname), "%s_params", display->drm->driver->name);
dir = debugfs_lookup(dirname, minor->debugfs_root);
if (!dir)
dir = debugfs_create_dir(dirname, minor->debugfs_root);
@@ -171,7 +171,7 @@ void intel_display_debugfs_params(struct drm_i915_private *i915)
*/
#define REGISTER(T, x, unused, mode, ...) _intel_display_param_create_file( \
- dir, #x, mode, &i915->display.params.x);
+ dir, #x, mode, &display->params.x);
INTEL_DISPLAY_PARAMS_FOR_EACH(REGISTER);
#undef REGISTER
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
index 1e9945a4044c..a1120915a5a8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
@@ -6,8 +6,8 @@
#ifndef __INTEL_DISPLAY_DEBUGFS_PARAMS__
#define __INTEL_DISPLAY_DEBUGFS_PARAMS__
-struct drm_i915_private;
+struct intel_display;
-void intel_display_debugfs_params(struct drm_i915_private *i915);
+void intel_display_debugfs_params(struct intel_display *display);
#endif /* __INTEL_DISPLAY_DEBUGFS_PARAMS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index dd7dce4b0e7a..1b46ba985580 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -16,14 +16,25 @@
#include "intel_display_power.h"
#include "intel_display_reg_defs.h"
#include "intel_fbc.h"
+#include "intel_step.h"
__diag_push();
__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for display info");
+struct stepping_desc {
+ const enum intel_step *map; /* revid to step map */
+ size_t size; /* map size */
+};
+
+#define STEP_INFO(_map) \
+ .step_info.map = _map, \
+ .step_info.size = ARRAY_SIZE(_map)
+
struct subplatform_desc {
enum intel_display_subplatform subplatform;
const char *name;
const u16 *pciidlist;
+ struct stepping_desc step_info;
};
struct platform_desc {
@@ -31,6 +42,7 @@ struct platform_desc {
const char *name;
const struct subplatform_desc *subplatforms;
const struct intel_display_device_info *info; /* NULL for GMD ID */
+ struct stepping_desc step_info;
};
#define PLATFORM(_platform) \
@@ -610,6 +622,13 @@ static const u16 skl_ulx_ids[] = {
0
};
+static const enum intel_step skl_steppings[] = {
+ [0x6] = STEP_G0,
+ [0x7] = STEP_H0,
+ [0x9] = STEP_J0,
+ [0xA] = STEP_I1,
+};
+
static const struct platform_desc skl_desc = {
PLATFORM(SKYLAKE),
.subplatforms = (const struct subplatform_desc[]) {
@@ -618,6 +637,7 @@ static const struct platform_desc skl_desc = {
{},
},
.info = &skl_display,
+ STEP_INFO(skl_steppings),
};
static const u16 kbl_ult_ids[] = {
@@ -634,6 +654,16 @@ static const u16 kbl_ulx_ids[] = {
0
};
+static const enum intel_step kbl_steppings[] = {
+ [1] = STEP_B0,
+ [2] = STEP_B0,
+ [3] = STEP_B0,
+ [4] = STEP_C0,
+ [5] = STEP_B1,
+ [6] = STEP_B1,
+ [7] = STEP_C0,
+};
+
static const struct platform_desc kbl_desc = {
PLATFORM(KABYLAKE),
.subplatforms = (const struct subplatform_desc[]) {
@@ -642,6 +672,7 @@ static const struct platform_desc kbl_desc = {
{},
},
.info = &skl_display,
+ STEP_INFO(kbl_steppings),
};
static const u16 cfl_ult_ids[] = {
@@ -706,6 +737,13 @@ static const struct platform_desc cml_desc = {
BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
.__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C)
+static const enum intel_step bxt_steppings[] = {
+ [0xA] = STEP_C0,
+ [0xB] = STEP_C0,
+ [0xC] = STEP_D0,
+ [0xD] = STEP_E0,
+};
+
static const struct platform_desc bxt_desc = {
PLATFORM(BROXTON),
.info = &(const struct intel_display_device_info) {
@@ -714,6 +752,11 @@ static const struct platform_desc bxt_desc = {
.__runtime_defaults.ip.ver = 9,
},
+ STEP_INFO(bxt_steppings),
+};
+
+static const enum intel_step glk_steppings[] = {
+ [3] = STEP_B0,
};
static const struct platform_desc glk_desc = {
@@ -725,6 +768,7 @@ static const struct platform_desc glk_desc = {
.__runtime_defaults.ip.ver = 10,
},
+ STEP_INFO(glk_steppings),
};
#define ICL_DISPLAY \
@@ -773,6 +817,10 @@ static const u16 icl_port_f_ids[] = {
0
};
+static const enum intel_step icl_steppings[] = {
+ [7] = STEP_D0,
+};
+
static const struct platform_desc icl_desc = {
PLATFORM(ICELAKE),
.subplatforms = (const struct subplatform_desc[]) {
@@ -784,6 +832,7 @@ static const struct platform_desc icl_desc = {
.__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E),
},
+ STEP_INFO(icl_steppings),
};
static const struct intel_display_device_info jsl_ehl_display = {
@@ -792,14 +841,21 @@ static const struct intel_display_device_info jsl_ehl_display = {
.__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D),
};
+static const enum intel_step jsl_ehl_steppings[] = {
+ [0] = STEP_A0,
+ [1] = STEP_B0,
+};
+
static const struct platform_desc jsl_desc = {
PLATFORM(JASPERLAKE),
.info = &jsl_ehl_display,
+ STEP_INFO(jsl_ehl_steppings),
};
static const struct platform_desc ehl_desc = {
PLATFORM(ELKHARTLAKE),
.info = &jsl_ehl_display,
+ STEP_INFO(jsl_ehl_steppings),
};
#define XE_D_DISPLAY \
@@ -850,10 +906,23 @@ static const u16 tgl_uy_ids[] = {
0
};
+static const enum intel_step tgl_steppings[] = {
+ [0] = STEP_B0,
+ [1] = STEP_D0,
+};
+
+static const enum intel_step tgl_uy_steppings[] = {
+ [0] = STEP_A0,
+ [1] = STEP_C0,
+ [2] = STEP_C0,
+ [3] = STEP_D0,
+};
+
static const struct platform_desc tgl_desc = {
PLATFORM(TIGERLAKE),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_TIGERLAKE_UY, "UY", tgl_uy_ids },
+ { INTEL_DISPLAY_TIGERLAKE_UY, "UY", tgl_uy_ids,
+ STEP_INFO(tgl_uy_steppings) },
{},
},
.info = &(const struct intel_display_device_info) {
@@ -866,6 +935,12 @@ static const struct platform_desc tgl_desc = {
.__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4) | BIT(PORT_TC5) | BIT(PORT_TC6),
},
+ STEP_INFO(tgl_steppings),
+};
+
+static const enum intel_step dg1_steppings[] = {
+ [0] = STEP_A0,
+ [1] = STEP_B0,
};
static const struct platform_desc dg1_desc = {
@@ -876,6 +951,13 @@ static const struct platform_desc dg1_desc = {
.__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
BIT(PORT_TC1) | BIT(PORT_TC2),
},
+ STEP_INFO(dg1_steppings),
+};
+
+static const enum intel_step rkl_steppings[] = {
+ [0] = STEP_A0,
+ [1] = STEP_B0,
+ [4] = STEP_C0,
};
static const struct platform_desc rkl_desc = {
@@ -892,6 +974,7 @@ static const struct platform_desc rkl_desc = {
.__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
BIT(PORT_TC1) | BIT(PORT_TC2),
},
+ STEP_INFO(rkl_steppings),
};
static const u16 adls_rpls_ids[] = {
@@ -899,10 +982,24 @@ static const u16 adls_rpls_ids[] = {
0
};
+static const enum intel_step adl_s_steppings[] = {
+ [0x0] = STEP_A0,
+ [0x1] = STEP_A2,
+ [0x4] = STEP_B0,
+ [0x8] = STEP_B0,
+ [0xC] = STEP_C0,
+};
+
+static const enum intel_step adl_s_rpl_s_steppings[] = {
+ [0x4] = STEP_D0,
+ [0xC] = STEP_C0,
+};
+
static const struct platform_desc adl_s_desc = {
PLATFORM(ALDERLAKE_S),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_ALDERLAKE_S_RAPTORLAKE_S, "RPL-S", adls_rpls_ids },
+ { INTEL_DISPLAY_ALDERLAKE_S_RAPTORLAKE_S, "RPL-S", adls_rpls_ids,
+ STEP_INFO(adl_s_rpl_s_steppings) },
{},
},
.info = &(const struct intel_display_device_info) {
@@ -913,6 +1010,7 @@ static const struct platform_desc adl_s_desc = {
.__runtime_defaults.port_mask = BIT(PORT_A) |
BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4),
},
+ STEP_INFO(adl_s_steppings),
};
#define XE_LPD_FEATURES \
@@ -986,15 +1084,34 @@ static const u16 adlp_rplp_ids[] = {
0
};
+static const enum intel_step adl_p_steppings[] = {
+ [0x0] = STEP_A0,
+ [0x4] = STEP_B0,
+ [0x8] = STEP_C0,
+ [0xC] = STEP_D0,
+};
+
+static const enum intel_step adl_p_adl_n_steppings[] = {
+ [0x0] = STEP_D0,
+};
+
+static const enum intel_step adl_p_rpl_pu_steppings[] = {
+ [0x4] = STEP_E0,
+};
+
static const struct platform_desc adl_p_desc = {
PLATFORM(ALDERLAKE_P),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_ALDERLAKE_P_ALDERLAKE_N, "ADL-N", adlp_adln_ids },
- { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_U, "RPL-U", adlp_rplu_ids },
- { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_P, "RPL-P", adlp_rplp_ids },
+ { INTEL_DISPLAY_ALDERLAKE_P_ALDERLAKE_N, "ADL-N", adlp_adln_ids,
+ STEP_INFO(adl_p_adl_n_steppings) },
+ { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_P, "RPL-P", adlp_rplp_ids,
+ STEP_INFO(adl_p_rpl_pu_steppings) },
+ { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_U, "RPL-U", adlp_rplu_ids,
+ STEP_INFO(adl_p_rpl_pu_steppings) },
{},
},
.info = &xe_lpd_display,
+ STEP_INFO(adl_p_steppings),
};
static const struct intel_display_device_info xe_hpd_display = {
@@ -1023,12 +1140,33 @@ static const u16 dg2_g12_ids[] = {
0
};
+static const enum intel_step dg2_g10_steppings[] = {
+ [0x0] = STEP_A0,
+ [0x1] = STEP_A0,
+ [0x4] = STEP_B0,
+ [0x8] = STEP_C0,
+};
+
+static const enum intel_step dg2_g11_steppings[] = {
+ [0x0] = STEP_B0,
+ [0x4] = STEP_C0,
+ [0x5] = STEP_C0,
+};
+
+static const enum intel_step dg2_g12_steppings[] = {
+ [0x0] = STEP_C0,
+ [0x1] = STEP_C0,
+};
+
static const struct platform_desc dg2_desc = {
PLATFORM(DG2),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_DG2_G10, "G10", dg2_g10_ids },
- { INTEL_DISPLAY_DG2_G11, "G11", dg2_g11_ids },
- { INTEL_DISPLAY_DG2_G12, "G12", dg2_g12_ids },
+ { INTEL_DISPLAY_DG2_G10, "G10", dg2_g10_ids,
+ STEP_INFO(dg2_g10_steppings) },
+ { INTEL_DISPLAY_DG2_G11, "G11", dg2_g11_ids,
+ STEP_INFO(dg2_g11_steppings) },
+ { INTEL_DISPLAY_DG2_G12, "G12", dg2_g12_ids,
+ STEP_INFO(dg2_g12_steppings) },
{},
},
.info = &xe_hpd_display,
@@ -1261,13 +1399,66 @@ find_subplatform_desc(struct pci_dev *pdev, const struct platform_desc *desc)
return NULL;
}
+static enum intel_step get_pre_gmdid_step(struct intel_display *display,
+ const struct stepping_desc *main,
+ const struct stepping_desc *sub)
+{
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+ const enum intel_step *map = main->map;
+ int size = main->size;
+ int revision = pdev->revision;
+ enum intel_step step;
+
+ /* subplatform stepping info trumps main platform info */
+ if (sub && sub->map && sub->size) {
+ map = sub->map;
+ size = sub->size;
+ }
+
+ /* not all platforms define steppings, and it's fine */
+ if (!map || !size)
+ return STEP_NONE;
+
+ if (revision < size && map[revision] != STEP_NONE) {
+ step = map[revision];
+ } else {
+ drm_warn(display->drm, "Unknown revision 0x%02x\n", revision);
+
+ /*
+ * If we hit a gap in the revision to step map, use the information
+ * for the next revision.
+ *
+ * This may be wrong in all sorts of ways, especially if the
+ * steppings in the array are not monotonically increasing, but
+ * it's better than defaulting to 0.
+ */
+ while (revision < size && map[revision] == STEP_NONE)
+ revision++;
+
+ if (revision < size) {
+ drm_dbg_kms(display->drm, "Using display stepping for revision 0x%02x\n",
+ revision);
+ step = map[revision];
+ } else {
+ drm_dbg_kms(display->drm, "Using future display stepping\n");
+ step = STEP_FUTURE;
+ }
+ }
+
+ drm_WARN_ON(display->drm, step == STEP_NONE);
+
+ return step;
+}
+
void intel_display_device_probe(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
const struct intel_display_device_info *info;
struct intel_display_ip_ver ip_ver = {};
const struct platform_desc *desc;
const struct subplatform_desc *subdesc;
+ enum intel_step step;
/* Add drm device backpointer as early as possible. */
i915->display.drm = &i915->drm;
@@ -1307,13 +1498,25 @@ void intel_display_device_probe(struct drm_i915_private *i915)
DISPLAY_RUNTIME_INFO(i915)->subplatform = subdesc->subplatform;
}
- if (ip_ver.ver || ip_ver.rel || ip_ver.step)
+ if (ip_ver.ver || ip_ver.rel || ip_ver.step) {
DISPLAY_RUNTIME_INFO(i915)->ip = ip_ver;
+ step = STEP_A0 + ip_ver.step;
+ if (step > STEP_FUTURE) {
+ drm_dbg_kms(display->drm, "Using future display stepping\n");
+ step = STEP_FUTURE;
+ }
+ } else {
+ step = get_pre_gmdid_step(display, &desc->step_info,
+ subdesc ? &subdesc->step_info : NULL);
+ }
- drm_info(&i915->drm, "Found %s%s%s (device ID %04x) display version %u.%02u\n",
+ DISPLAY_RUNTIME_INFO(i915)->step = step;
+
+ drm_info(&i915->drm, "Found %s%s%s (device ID %04x) display version %u.%02u stepping %s\n",
desc->name, subdesc ? "/" : "", subdesc ? subdesc->name : "",
pdev->device, DISPLAY_RUNTIME_INFO(i915)->ip.ver,
- DISPLAY_RUNTIME_INFO(i915)->ip.rel);
+ DISPLAY_RUNTIME_INFO(i915)->ip.rel,
+ step != STEP_NONE ? intel_step_name(step) : "N/A");
return;
@@ -1474,6 +1677,9 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
}
}
+ display_runtime->rawclk_freq = intel_read_rawclk(i915);
+ drm_dbg_kms(&i915->drm, "rawclk rate: %d kHz\n", display_runtime->rawclk_freq);
+
return;
display_fused_off:
@@ -1509,6 +1715,8 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
drm_printf(p, "display version: %u\n",
runtime->ip.ver);
+ drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step));
+
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name))
DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
@@ -1516,6 +1724,8 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp));
drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc));
drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc));
+
+ drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
}
/*
@@ -1529,9 +1739,11 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
*/
bool intel_display_device_enabled(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
/* Only valid when HAS_DISPLAY() is true */
- drm_WARN_ON(&i915->drm, !HAS_DISPLAY(i915));
+ drm_WARN_ON(display->drm, !HAS_DISPLAY(display));
- return !i915->display.params.disable_display &&
- !intel_opregion_headless_sku(i915);
+ return !display->params.disable_display &&
+ !intel_opregion_headless_sku(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 13453ea4daea..dfb0c8bf5ca2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -161,7 +161,7 @@ enum intel_display_subplatform {
#define SUPPORTS_TV(i915) (DISPLAY_INFO(i915)->supports_tv)
/* Check that device has a display IP version within the specific range. */
-#define IS_DISPLAY_IP_RANGE(__i915, from, until) ( \
+#define IS_DISPLAY_VER_FULL(__i915, from, until) ( \
BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \
(DISPLAY_VER_FULL(__i915) >= (from) && \
DISPLAY_VER_FULL(__i915) <= (until)))
@@ -175,14 +175,14 @@ enum intel_display_subplatform {
* hardware fix is present and the software workaround is no longer necessary.
* E.g.,
*
- * IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_B2)
- * IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_C0, STEP_FOREVER)
+ * IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_B2)
+ * IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_C0, STEP_FOREVER)
*
* "STEP_FOREVER" can be passed as "until" for workarounds that have no upper
* stepping bound for the specified IP version.
*/
-#define IS_DISPLAY_IP_STEP(__i915, ipver, from, until) \
- (IS_DISPLAY_IP_RANGE((__i915), (ipver), (ipver)) && \
+#define IS_DISPLAY_VER_STEP(__i915, ipver, from, until) \
+ (IS_DISPLAY_VER_FULL((__i915), (ipver), (ipver)) && \
IS_DISPLAY_STEP((__i915), (from), (until)))
#define DISPLAY_INFO(i915) (__to_intel_display(i915)->info.__device_info)
@@ -194,6 +194,12 @@ enum intel_display_subplatform {
#define IS_DISPLAY_VER(i915, from, until) \
(DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
+#define INTEL_DISPLAY_STEP(__i915) (DISPLAY_RUNTIME_INFO(__i915)->step)
+
+#define IS_DISPLAY_STEP(__i915, since, until) \
+ (drm_WARN_ON(__to_intel_display(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
+ INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
+
struct intel_display_runtime_info {
enum intel_display_platform platform;
enum intel_display_subplatform subplatform;
@@ -201,8 +207,11 @@ struct intel_display_runtime_info {
struct intel_display_ip_ver {
u16 ver;
u16 rel;
- u16 step;
+ u16 step; /* hardware */
} ip;
+ int step; /* symbolic */
+
+ u32 rawclk_freq;
u8 pipe_mask;
u8 cpu_transcoder_mask;
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 794b4af38055..069426d9260b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -217,7 +217,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
return ret;
}
- intel_bios_init(i915);
+ intel_bios_init(display);
ret = intel_vga_register(i915);
if (ret)
@@ -265,7 +265,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
intel_init_quirks(display);
- intel_fbc_init(i915);
+ intel_fbc_init(display);
return 0;
@@ -275,7 +275,7 @@ cleanup_vga_client_pw_domain_dmc:
cleanup_vga:
intel_vga_unregister(i915);
cleanup_bios:
- intel_bios_driver_remove(i915);
+ intel_bios_driver_remove(display);
return ret;
}
@@ -416,7 +416,8 @@ bool intel_display_driver_check_access(struct drm_i915_private *i915)
/* part #2: call after irq install, but before gem init */
int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
{
- struct drm_device *dev = &i915->drm;
+ struct intel_display *display = &i915->display;
+ struct drm_device *dev = display->drm;
enum pipe pipe;
int ret;
@@ -427,7 +428,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_panel_sanitize_ssc(i915);
- intel_pps_setup(i915);
+ intel_pps_setup(display);
intel_gmbus_setup(i915);
@@ -452,13 +453,13 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
if (i915->display.cdclk.max_cdclk_freq == 0)
intel_update_max_cdclk(i915);
- intel_hti_init(i915);
+ intel_hti_init(display);
/* Just disable it once at startup */
intel_vga_disable(i915);
intel_setup_outputs(i915);
- ret = intel_dp_tunnel_mgr_init(i915);
+ ret = intel_dp_tunnel_mgr_init(display);
if (ret)
goto err_hdcp;
@@ -466,7 +467,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
- intel_acpi_assign_connector_fwnodes(i915);
+ intel_acpi_assign_connector_fwnodes(display);
drm_modeset_unlock_all(dev);
intel_initial_plane_config(i915);
@@ -526,6 +527,7 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
void intel_display_driver_register(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS,
"i915 display info:");
@@ -533,8 +535,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
return;
/* Must be done after probing outputs */
- intel_opregion_register(i915);
- intel_acpi_video_register(i915);
+ intel_opregion_register(display);
+ intel_acpi_video_register(display);
intel_audio_init(i915);
@@ -578,6 +580,8 @@ void intel_display_driver_remove(struct drm_i915_private *i915)
/* part #2: call after irq uninstall */
void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
if (!HAS_DISPLAY(i915))
return;
@@ -598,7 +602,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
intel_mode_config_cleanup(i915);
- intel_dp_tunnel_mgr_cleanup(i915);
+ intel_dp_tunnel_mgr_cleanup(display);
intel_overlay_cleanup(i915);
@@ -607,23 +611,27 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
destroy_workqueue(i915->display.wq.flip);
destroy_workqueue(i915->display.wq.modeset);
- intel_fbc_cleanup(i915);
+ intel_fbc_cleanup(&i915->display);
}
/* part #3: call after gem init */
void intel_display_driver_remove_nogem(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
intel_dmc_fini(i915);
intel_power_domains_driver_remove(i915);
intel_vga_unregister(i915);
- intel_bios_driver_remove(i915);
+ intel_bios_driver_remove(display);
}
void intel_display_driver_unregister(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
if (!HAS_DISPLAY(i915))
return;
@@ -643,7 +651,7 @@ void intel_display_driver_unregister(struct drm_i915_private *i915)
drm_atomic_helper_shutdown(&i915->drm);
acpi_video_unregister();
- intel_opregion_unregister(i915);
+ intel_opregion_unregister(display);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 5219ba295c74..73369847ed66 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -14,6 +14,7 @@
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
+#include "intel_dsb.h"
#include "intel_fdi_regs.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
@@ -270,10 +271,12 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
static bool i915_has_asle(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
if (!IS_PINEVIEW(i915) && !IS_MOBILE(i915))
return false;
- return intel_opregion_asle_present(i915);
+ return intel_opregion_asle_present(display);
}
/**
@@ -497,6 +500,8 @@ void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
+ struct intel_display *display = &dev_priv->display;
+
bool blc_event = false;
enum pipe pipe;
@@ -515,12 +520,13 @@ void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev_priv);
+ intel_opregion_asle_intr(display);
}
void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
+ struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
@@ -539,7 +545,7 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev_priv);
+ intel_opregion_asle_intr(display);
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
intel_gmbus_irq_handler(dev_priv);
@@ -570,6 +576,7 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
@@ -583,7 +590,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
}
if (pch_iir & SDE_AUX_MASK)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
if (pch_iir & SDE_GMBUS)
intel_gmbus_irq_handler(dev_priv);
@@ -658,6 +665,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
@@ -671,7 +679,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
}
if (pch_iir & SDE_AUX_MASK_CPT)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
if (pch_iir & SDE_GMBUS_CPT)
intel_gmbus_irq_handler(dev_priv);
@@ -695,6 +703,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
@@ -702,10 +711,10 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
if (de_iir & DE_AUX_CHANNEL_A)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
if (de_iir & DE_GSE)
- intel_opregion_asle_intr(dev_priv);
+ intel_opregion_asle_intr(display);
if (de_iir & DE_POISON)
drm_err(&dev_priv->drm, "Poison interrupt\n");
@@ -743,6 +752,7 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
@@ -767,10 +777,10 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
}
if (de_iir & DE_AUX_CHANNEL_A_IVB)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
if (de_iir & DE_GSE_IVB)
- intel_opregion_asle_intr(dev_priv);
+ intel_opregion_asle_intr(display);
for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
@@ -894,6 +904,7 @@ static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
static void
gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
+ struct intel_display *display = &dev_priv->display;
bool found = false;
if (DISPLAY_VER(dev_priv) >= 14) {
@@ -906,8 +917,15 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_pmdemand_irq_handler(dev_priv);
found = true;
}
+
+ if (iir & XELPDP_RM_TIMEOUT) {
+ u32 val = intel_uncore_read(&dev_priv->uncore,
+ RM_TIMEOUT_REG_CAPTURE);
+ drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val);
+ found = true;
+ }
} else if (iir & GEN8_DE_MISC_GSE) {
- intel_opregion_asle_intr(dev_priv);
+ intel_opregion_asle_intr(display);
found = true;
}
@@ -1049,6 +1067,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
+ struct intel_display *display = &dev_priv->display;
u32 iir;
enum pipe pipe;
@@ -1084,7 +1103,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
if (iir & gen8_de_port_aux_mask(dev_priv)) {
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
found = true;
}
@@ -1149,6 +1168,17 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
flip_done_handler(dev_priv, pipe);
+ if (HAS_DSB(dev_priv)) {
+ if (iir & GEN12_DSB_INT(INTEL_DSB_0))
+ intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0);
+
+ if (iir & GEN12_DSB_INT(INTEL_DSB_1))
+ intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1);
+
+ if (iir & GEN12_DSB_INT(INTEL_DSB_2))
+ intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2);
+ }
+
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1211,8 +1241,10 @@ u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
{
+ struct intel_display *display = &i915->display;
+
if (iir & GEN11_GU_MISC_GSE)
- intel_opregion_asle_intr(i915);
+ intel_opregion_asle_intr(display);
}
void gen11_display_irq_handler(struct drm_i915_private *i915)
@@ -1680,6 +1712,7 @@ static void icp_irq_postinstall(struct drm_i915_private *i915);
void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
@@ -1710,14 +1743,19 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (DISPLAY_VER(dev_priv) >= 14) {
de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
- XELPDP_PMDEMAND_RSP;
+ XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT;
} else if (DISPLAY_VER(dev_priv) >= 11) {
enum port port;
- if (intel_bios_is_dsi_present(dev_priv, &port))
+ if (intel_bios_is_dsi_present(display, &port))
de_port_masked |= DSI0_TE | DSI1_TE;
}
+ if (HAS_DSB(dev_priv))
+ de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) |
+ GEN12_DSB_INT(INTEL_DSB_1) |
+ GEN12_DSB_INT(INTEL_DSB_2);
+
de_pipe_enables = de_pipe_masked |
GEN8_PIPE_VBLANK |
gen8_de_pipe_underrun_mask(dev_priv) |
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index e82bd72d32fa..1a45d300b6f0 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -173,14 +173,14 @@ static void _param_print_charp(struct drm_printer *p, const char *driver_name,
/**
* intel_display_params_dump - dump intel display modparams
- * @i915: i915 device
+ * @display: display device
* @p: the &drm_printer
*
* Pretty printer for i915 modparams.
*/
-void intel_display_params_dump(struct drm_i915_private *i915, struct drm_printer *p)
+void intel_display_params_dump(struct intel_display *display, struct drm_printer *p)
{
-#define PRINT(T, x, ...) _param_print(p, i915->drm.driver->name, #x, i915->display.params.x);
+#define PRINT(T, x, ...) _param_print(p, display->drm->driver->name, #x, display->params.x);
INTEL_DISPLAY_PARAMS_FOR_EACH(PRINT);
#undef PRINT
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index 48c29c55c939..da8dc943234b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -9,7 +9,7 @@
#include <linux/types.h>
struct drm_printer;
-struct drm_i915_private;
+struct intel_display;
/*
* Invoke param, a function-like macro, for each intel display param, with
@@ -56,7 +56,7 @@ struct intel_display_params {
};
#undef MEMBER
-void intel_display_params_dump(struct drm_i915_private *i915,
+void intel_display_params_dump(struct intel_display *display,
struct drm_printer *p);
void intel_display_params_copy(struct intel_display_params *dest);
void intel_display_params_free(struct intel_display_params *params);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index e288a1b21d7e..ef2fdbf97346 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -36,7 +36,7 @@
for_each_power_well_reverse(__dev_priv, __power_well) \
for_each_if(test_bit((__domain), (__power_well)->domains.bits))
-const char *
+static const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)
{
switch (domain) {
@@ -198,20 +198,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
}
}
-/**
- * __intel_display_power_is_enabled - unlocked check for a power domain
- * @dev_priv: i915 device instance
- * @domain: power domain to check
- *
- * This is the unlocked version of intel_display_power_is_enabled() and should
- * only be used from error capture and recovery code where deadlocks are
- * possible.
- *
- * Returns:
- * True when the power domain is enabled, false otherwise.
- */
-bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
+static bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
{
struct i915_power_well *power_well;
bool is_enabled;
@@ -1696,7 +1684,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_dmc_load_program(dev_priv);
/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
- if (IS_DISPLAY_IP_RANGE(dev_priv, IP_VER(12, 0), IP_VER(13, 0)))
+ if (IS_DISPLAY_VER_FULL(dev_priv, IP_VER(12, 0), IP_VER(13, 0)))
intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
@@ -1704,6 +1692,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
/* Wa_14011503030:xelpd */
if (DISPLAY_VER(dev_priv) == 13)
intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
+
+ /* Wa_15013987218 */
+ if (DISPLAY_VER(dev_priv) == 20) {
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ 0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE);
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 0);
+ }
}
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index d6c2a5846bdc..425452c5a469 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -183,13 +183,8 @@ void intel_display_power_resume(struct drm_i915_private *i915);
void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
u32 state);
-const char *
-intel_display_power_domain_str(enum intel_display_power_domain domain);
-
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
intel_wakeref_t
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 919f712fef13..46e9eff12c23 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -861,6 +861,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
assert_can_enable_dc9(dev_priv);
drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
@@ -870,19 +872,21 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
* because PPS registers are always on.
*/
if (!HAS_PCH_SPLIT(dev_priv))
- intel_pps_reset_all(dev_priv);
+ intel_pps_reset_all(display);
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
void bxt_disable_dc9(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
assert_can_disable_dc9(dev_priv);
drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- intel_pps_unlock_regs_wa(dev_priv);
+ intel_pps_unlock_regs_wa(display);
}
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -1176,14 +1180,15 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
intel_de_write(dev_priv, CBR1_VLV, 0);
- drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
+ drm_WARN_ON(&dev_priv->drm, DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
- DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
+ DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq,
1000));
}
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_encoder *encoder;
enum pipe pipe;
@@ -1229,11 +1234,13 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_vga_redisable_power_on(dev_priv);
- intel_pps_unlock_regs_wa(dev_priv);
+ intel_pps_unlock_regs_wa(display);
}
static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1241,7 +1248,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
- intel_pps_reset_all(dev_priv);
+ intel_pps_reset_all(display);
/* Prevent us from re-enabling polling on accident in late suspend */
if (!dev_priv->drm.dev->power.is_suspended)
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index c2c347b22448..49e2e650ebcd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -83,7 +83,8 @@ void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
void intel_display_reset_finish(struct drm_i915_private *i915)
{
- struct drm_modeset_acquire_ctx *ctx = &i915->display.restore.reset_ctx;
+ struct intel_display *display = &i915->display;
+ struct drm_modeset_acquire_ctx *ctx = &display->restore.reset_ctx;
struct drm_atomic_state *state;
int ret;
@@ -94,7 +95,7 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
return;
- state = fetch_and_zero(&i915->display.restore.modeset_state);
+ state = fetch_and_zero(&display->restore.modeset_state);
if (!state)
goto unlock;
@@ -112,7 +113,7 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
* The display has been reset as well,
* so need a full re-initialization.
*/
- intel_pps_unlock_regs_wa(i915);
+ intel_pps_unlock_regs_wa(display);
intel_display_driver_init_hw(i915);
intel_clock_gating_init(i915);
intel_hpd_init(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index f9d3cc3c342b..f29e5dc3db91 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1396,8 +1396,8 @@ struct intel_crtc_state {
/* Only valid on TGL+ */
enum transcoder mst_master_transcoder;
- /* For DSB related info */
- struct intel_dsb *dsb;
+ /* For DSB based color LUT updates */
+ struct intel_dsb *dsb_color_vblank, *dsb_color_commit;
u32 psr2_man_track_ctl;
@@ -1754,6 +1754,7 @@ struct intel_dp {
u8 lane_count;
u8 sink_count;
bool link_trained;
+ bool needs_modeset_retry;
bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
@@ -1777,10 +1778,30 @@ struct intel_dp {
int common_rates[DP_MAX_SUPPORTED_RATES];
struct {
/* TODO: move the rest of link specific fields to here */
+ /* common rate,lane_count configs in bw order */
+ int num_configs;
+#define INTEL_DP_MAX_LANE_COUNT 4
+#define INTEL_DP_MAX_SUPPORTED_LANE_CONFIGS (ilog2(INTEL_DP_MAX_LANE_COUNT) + 1)
+#define INTEL_DP_LANE_COUNT_EXP_BITS order_base_2(INTEL_DP_MAX_SUPPORTED_LANE_CONFIGS)
+#define INTEL_DP_LINK_RATE_IDX_BITS (BITS_PER_TYPE(u8) - INTEL_DP_LANE_COUNT_EXP_BITS)
+#define INTEL_DP_MAX_LINK_CONFIGS (DP_MAX_SUPPORTED_RATES * \
+ INTEL_DP_MAX_SUPPORTED_LANE_CONFIGS)
+ struct intel_dp_link_config {
+ u8 link_rate_idx:INTEL_DP_LINK_RATE_IDX_BITS;
+ u8 lane_count_exp:INTEL_DP_LANE_COUNT_EXP_BITS;
+ } configs[INTEL_DP_MAX_LINK_CONFIGS];
/* Max lane count for the current link */
int max_lane_count;
/* Max rate for the current link */
int max_rate;
+ /*
+ * Link parameters for which the MST topology was probed.
+ * Tracking these ensures that the MST path resources are
+ * re-enumerated whenever the link is retrained with new link
+ * parameters, as required by the DP standard.
+ */
+ int mst_probed_lane_count;
+ int mst_probed_rate;
int force_lane_count;
int force_rate;
bool retrain_disabled;
@@ -1806,6 +1827,7 @@ struct intel_dp {
/* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
+ bool as_sdp_supported;
struct drm_dp_tunnel *tunnel;
bool tunnel_suspended:1;
@@ -2063,8 +2085,6 @@ dp_to_lspcon(struct intel_dp *intel_dp)
return &dp_to_dig_port(intel_dp)->lspcon;
}
-#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
-
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
@@ -2182,35 +2202,18 @@ to_intel_frontbuffer(struct drm_framebuffer *fb)
return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
}
-static inline int to_bpp_int(int bpp_x16)
-{
- return bpp_x16 >> 4;
-}
-
-static inline int to_bpp_frac(int bpp_x16)
-{
- return bpp_x16 & 0xf;
-}
-
-#define BPP_X16_FMT "%d.%04d"
-#define BPP_X16_ARGS(bpp_x16) to_bpp_int(bpp_x16), (to_bpp_frac(bpp_x16) * 625)
-
-static inline int to_bpp_int_roundup(int bpp_x16)
-{
- return (bpp_x16 + 0xf) >> 4;
-}
-
-static inline int to_bpp_x16(int bpp)
-{
- return bpp << 4;
-}
-
/*
* Conversion functions/macros from various pointer types to struct
* intel_display pointer.
*/
#define __drm_device_to_intel_display(p) \
- (&to_i915(p)->display)
+ ((p) ? &to_i915(p)->display : NULL)
+#define __device_to_intel_display(p) \
+ __drm_device_to_intel_display(dev_get_drvdata(p))
+#define __pci_dev_to_intel_display(p) \
+ __drm_device_to_intel_display(pci_get_drvdata(p))
+#define __intel_atomic_state_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.dev)
#define __intel_connector_to_intel_display(p) \
__drm_device_to_intel_display((p)->base.dev)
#define __intel_crtc_to_intel_display(p) \
@@ -2234,6 +2237,9 @@ static inline int to_bpp_x16(int bpp)
#define to_intel_display(p) \
_Generic(*p, \
__assoc(drm_device, p), \
+ __assoc(device, p), \
+ __assoc(pci_dev, p), \
+ __assoc(intel_atomic_state, p), \
__assoc(intel_connector, p), \
__assoc(intel_crtc, p), \
__assoc(intel_crtc_state, p), \
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index 63201d09852c..be644ab6ae00 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -6,8 +6,16 @@
#ifndef __INTEL_DISPLAY_WA_H__
#define __INTEL_DISPLAY_WA_H__
+#include <linux/types.h>
+
struct drm_i915_private;
void intel_display_wa_apply(struct drm_i915_private *i915);
+#ifdef I915
+static inline bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) { return false; }
+#else
+bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 73977b173898..7c756d5ba2a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -391,7 +391,7 @@ static const struct stepping_info *
intel_get_stepping_info(struct drm_i915_private *i915,
struct stepping_info *si)
{
- const char *step_name = intel_display_step_name(i915);
+ const char *step_name = intel_step_name(INTEL_DISPLAY_STEP(i915));
si->stepping = step_name[0];
si->substepping = step_name[1];
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index ebe7fe5417ae..a1fcedfd404b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/notifier.h>
#include <linux/slab.h>
+#include <linux/sort.h>
#include <linux/string_helpers.h>
#include <linux/timekeeping.h>
#include <linux/types.h>
@@ -42,6 +43,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
@@ -88,6 +90,8 @@
#include "intel_vrr.h"
#include "intel_crtc_state_dump.h"
+#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
+
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
@@ -130,14 +134,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
return dig_port->base.type == INTEL_OUTPUT_EDP;
}
-bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
- return HAS_AS_SDP(i915) &&
- drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
-}
-
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* Is link rate UHBR and thus 128b/132b? */
@@ -643,6 +639,106 @@ int intel_dp_rate_index(const int *rates, int len, int rate)
return -1;
}
+static int intel_dp_link_config_rate(struct intel_dp *intel_dp,
+ const struct intel_dp_link_config *lc)
+{
+ return intel_dp_common_rate(intel_dp, lc->link_rate_idx);
+}
+
+static int intel_dp_link_config_lane_count(const struct intel_dp_link_config *lc)
+{
+ return 1 << lc->lane_count_exp;
+}
+
+static int intel_dp_link_config_bw(struct intel_dp *intel_dp,
+ const struct intel_dp_link_config *lc)
+{
+ return drm_dp_max_dprx_data_rate(intel_dp_link_config_rate(intel_dp, lc),
+ intel_dp_link_config_lane_count(lc));
+}
+
+static int link_config_cmp_by_bw(const void *a, const void *b, const void *p)
+{
+ struct intel_dp *intel_dp = (struct intel_dp *)p; /* remove const */
+ const struct intel_dp_link_config *lc_a = a;
+ const struct intel_dp_link_config *lc_b = b;
+ int bw_a = intel_dp_link_config_bw(intel_dp, lc_a);
+ int bw_b = intel_dp_link_config_bw(intel_dp, lc_b);
+
+ if (bw_a != bw_b)
+ return bw_a - bw_b;
+
+ return intel_dp_link_config_rate(intel_dp, lc_a) -
+ intel_dp_link_config_rate(intel_dp, lc_b);
+}
+
+static void intel_dp_link_config_init(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_dp_link_config *lc;
+ int num_common_lane_configs;
+ int i;
+ int j;
+
+ if (drm_WARN_ON(&i915->drm, !is_power_of_2(intel_dp_max_common_lane_count(intel_dp))))
+ return;
+
+ num_common_lane_configs = ilog2(intel_dp_max_common_lane_count(intel_dp)) + 1;
+
+ if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates * num_common_lane_configs >
+ ARRAY_SIZE(intel_dp->link.configs)))
+ return;
+
+ intel_dp->link.num_configs = intel_dp->num_common_rates * num_common_lane_configs;
+
+ lc = &intel_dp->link.configs[0];
+ for (i = 0; i < intel_dp->num_common_rates; i++) {
+ for (j = 0; j < num_common_lane_configs; j++) {
+ lc->lane_count_exp = j;
+ lc->link_rate_idx = i;
+
+ lc++;
+ }
+ }
+
+ sort_r(intel_dp->link.configs, intel_dp->link.num_configs,
+ sizeof(intel_dp->link.configs[0]),
+ link_config_cmp_by_bw, NULL,
+ intel_dp);
+}
+
+void intel_dp_link_config_get(struct intel_dp *intel_dp, int idx, int *link_rate, int *lane_count)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ const struct intel_dp_link_config *lc;
+
+ if (drm_WARN_ON(&i915->drm, idx < 0 || idx >= intel_dp->link.num_configs))
+ idx = 0;
+
+ lc = &intel_dp->link.configs[idx];
+
+ *link_rate = intel_dp_link_config_rate(intel_dp, lc);
+ *lane_count = intel_dp_link_config_lane_count(lc);
+}
+
+int intel_dp_link_config_index(struct intel_dp *intel_dp, int link_rate, int lane_count)
+{
+ int link_rate_idx = intel_dp_rate_index(intel_dp->common_rates, intel_dp->num_common_rates,
+ link_rate);
+ int lane_count_exp = ilog2(lane_count);
+ int i;
+
+ for (i = 0; i < intel_dp->link.num_configs; i++) {
+ const struct intel_dp_link_config *lc = &intel_dp->link.configs[i];
+
+ if (lc->lane_count_exp == lane_count_exp &&
+ lc->link_rate_idx == link_rate_idx)
+ return i;
+ }
+
+ return -1;
+}
+
static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -661,6 +757,8 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
intel_dp->common_rates[0] = 162000;
intel_dp->num_common_rates = 1;
}
+
+ intel_dp_link_config_init(intel_dp);
}
static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
@@ -1599,8 +1697,8 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state);
int mode_rate, link_rate, link_avail;
- for (bpp = to_bpp_int(limits->link.max_bpp_x16);
- bpp >= to_bpp_int(limits->link.min_bpp_x16);
+ for (bpp = fxp_q4_to_int(limits->link.max_bpp_x16);
+ bpp >= fxp_q4_to_int(limits->link.min_bpp_x16);
bpp -= 2 * 3) {
int link_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
@@ -1928,7 +2026,7 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
timeslots);
if (ret == 0) {
pipe_config->dsc.compressed_bpp_x16 =
- to_bpp_x16(valid_dsc_bpp[i]);
+ fxp_q4_from_int(valid_dsc_bpp[i]);
return 0;
}
}
@@ -1971,7 +2069,7 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
compressed_bppx16 >= dsc_min_bpp;
compressed_bppx16 -= bppx16_step) {
if (intel_dp->force_dsc_fractional_bpp_en &&
- !to_bpp_frac(compressed_bppx16))
+ !fxp_q4_to_frac(compressed_bppx16))
continue;
ret = dsc_compute_link_config(intel_dp,
pipe_config,
@@ -1981,7 +2079,7 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
if (ret == 0) {
pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
if (intel_dp->force_dsc_fractional_bpp_en &&
- to_bpp_frac(compressed_bppx16))
+ fxp_q4_to_frac(compressed_bppx16))
drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n");
return 0;
@@ -2006,7 +2104,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
dsc_src_min_bpp = dsc_src_min_compressed_bpp();
dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
- dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
+ dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
@@ -2018,7 +2116,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
adjusted_mode->hdisplay,
pipe_config->joiner_pipes);
dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp);
- dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
+ dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
if (DISPLAY_VER(i915) >= 13)
return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
@@ -2168,20 +2266,20 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
dsc_src_min_bpp = dsc_src_min_compressed_bpp();
dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
- dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
+ dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
pipe_config,
pipe_bpp / 3);
dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
- dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
+ dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
/* Compressed BPP should be less than the Input DSC bpp */
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
pipe_config->dsc.compressed_bpp_x16 =
- to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp));
+ fxp_q4_from_int(max(dsc_min_bpp, dsc_max_bpp));
pipe_config->pipe_bpp = pipe_bpp;
@@ -2271,17 +2369,17 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
if (ret < 0) {
drm_dbg_kms(&dev_priv->drm,
"Cannot compute valid DSC parameters for Input Bpp = %d"
- "Compressed BPP = " BPP_X16_FMT "\n",
+ "Compressed BPP = " FXP_Q4_FMT "\n",
pipe_config->pipe_bpp,
- BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
+ FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16));
return ret;
}
pipe_config->dsc.compression_enable = true;
drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
- "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n",
+ "Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n",
pipe_config->pipe_bpp,
- BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
+ FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16),
pipe_config->dsc.slice_count);
return 0;
@@ -2313,15 +2411,15 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
int max_link_bpp_x16;
max_link_bpp_x16 = min(crtc_state->max_link_bpp_x16,
- to_bpp_x16(limits->pipe.max_bpp));
+ fxp_q4_from_int(limits->pipe.max_bpp));
if (!dsc) {
- max_link_bpp_x16 = rounddown(max_link_bpp_x16, to_bpp_x16(2 * 3));
+ max_link_bpp_x16 = rounddown(max_link_bpp_x16, fxp_q4_from_int(2 * 3));
- if (max_link_bpp_x16 < to_bpp_x16(limits->pipe.min_bpp))
+ if (max_link_bpp_x16 < fxp_q4_from_int(limits->pipe.min_bpp))
return false;
- limits->link.min_bpp_x16 = to_bpp_x16(limits->pipe.min_bpp);
+ limits->link.min_bpp_x16 = fxp_q4_from_int(limits->pipe.min_bpp);
} else {
/*
* TODO: set the DSC link limits already here, atm these are
@@ -2334,7 +2432,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
limits->link.max_bpp_x16 = max_link_bpp_x16;
drm_dbg_kms(&i915->drm,
- "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " BPP_X16_FMT "\n",
+ "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " FXP_Q4_FMT "\n",
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
adjusted_mode->crtc_clock,
@@ -2342,7 +2440,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
limits->max_lane_count,
limits->max_rate,
limits->pipe.max_bpp,
- BPP_X16_ARGS(limits->link.max_bpp_x16));
+ FXP_Q4_ARGS(limits->link.max_bpp_x16));
return true;
}
@@ -2394,7 +2492,7 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpp = crtc_state->dsc.compression_enable ?
- to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) :
+ fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) :
crtc_state->pipe_bpp;
return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
@@ -2473,10 +2571,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
}
drm_dbg_kms(&i915->drm,
- "DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n",
+ "DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " link rate required %d available %d\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
- BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
+ FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16),
intel_dp_config_required_rate(pipe_config),
intel_dp_max_link_data_rate(intel_dp,
pipe_config->port_clock,
@@ -2626,8 +2724,7 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- if (!crtc_state->vrr.enable ||
- !intel_dp_as_sdp_supported(intel_dp))
+ if (!crtc_state->vrr.enable || !intel_dp->as_sdp_supported)
return;
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
@@ -2876,7 +2973,6 @@ static void intel_dp_queue_modeset_retry_work(struct intel_connector *connector)
drm_connector_put(&connector->base);
}
-/* NOTE: @state is only valid for MST links and can be %NULL for SST. */
void
intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
struct intel_encoder *encoder,
@@ -2885,18 +2981,19 @@ intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
struct intel_connector *connector;
struct intel_digital_connector_state *conn_state;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int i;
+ if (intel_dp->needs_modeset_retry)
+ return;
+
+ intel_dp->needs_modeset_retry = true;
+
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
intel_dp_queue_modeset_retry_work(intel_dp->attached_connector);
return;
}
- if (drm_WARN_ON(&i915->drm, !state))
- return;
-
for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
if (!conn_state->base.crtc)
continue;
@@ -2968,8 +3065,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (pipe_config->dsc.compression_enable)
link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16;
else
- link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format,
- pipe_config->pipe_bpp));
+ link_bpp_x16 = fxp_q4_from_int(intel_dp_output_bpp(pipe_config->output_format,
+ pipe_config->pipe_bpp));
if (intel_dp->mso_link_count) {
int n = intel_dp->mso_link_count;
@@ -3024,6 +3121,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp->link_trained = false;
+ intel_dp->needs_modeset_retry = false;
intel_dp->link_rate = link_rate;
intel_dp->lane_count = lane_count;
}
@@ -3032,6 +3130,8 @@ void intel_dp_reset_link_params(struct intel_dp *intel_dp)
{
intel_dp->link.max_lane_count = intel_dp_max_common_lane_count(intel_dp);
intel_dp->link.max_rate = intel_dp_max_common_rate(intel_dp);
+ intel_dp->link.mst_probed_lane_count = 0;
+ intel_dp->link.mst_probed_rate = 0;
intel_dp->link.retrain_disabled = false;
intel_dp->link.seq_train_failures = 0;
}
@@ -3367,8 +3467,11 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated);
- if (crtc_state)
+ if (crtc_state) {
intel_dp_reset_link_params(intel_dp);
+ intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count);
+ intel_dp->link_trained = true;
+ }
}
bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
@@ -3435,7 +3538,7 @@ static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
{
- int bw_gbps[] = {9, 18, 24, 32, 40, 48};
+ static const int bw_gbps[] = {9, 18, 24, 32, 40, 48};
int i;
for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
@@ -4158,6 +4261,9 @@ intel_dp_mst_configure(struct intel_dp *intel_dp)
intel_dp->is_mst = intel_dp->mst_detect != DRM_DP_SST;
+ if (intel_dp->is_mst)
+ intel_dp_mst_prepare_probe(intel_dp);
+
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
/* Avoid stale info on the next detect cycle. */
@@ -4387,8 +4493,11 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
if (!enable && HAS_DSC(dev_priv))
val &= ~VDIP_ENABLE_PPS;
- /* When PSR is enabled, this routine doesn't disable VSC DIP */
- if (!crtc_state->has_psr)
+ /*
+ * This routine disables VSC DIP if the function is called
+ * to disable SDP or if it does not have PSR
+ */
+ if (!enable || !crtc_state->has_psr)
val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
intel_de_write(dev_priv, reg, val);
@@ -5081,7 +5190,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
ack[3] |= DP_TUNNELING_IRQ;
}
- if (!memchr_inv(ack, 0, sizeof(ack)))
+ if (mem_is_zero(ack, sizeof(ack)))
break;
if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
@@ -5255,8 +5364,6 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_crtc *crtc;
- bool mst_output = false;
u8 pipe_mask;
int ret;
@@ -5285,78 +5392,28 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp->link.force_retrain));
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
- mst_output = true;
- break;
- }
-
- /* Suppress underruns caused by re-training */
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- if (crtc_state->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv,
- intel_crtc_pch_transcoder(crtc), false);
- }
-
- /* TODO: use a modeset for SST as well. */
- if (mst_output) {
- ret = intel_modeset_commit_pipes(dev_priv, pipe_mask, ctx);
-
- if (ret && ret != -EDEADLK)
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] link retraining failed: %pe\n",
- encoder->base.base.id, encoder->base.name,
- ERR_PTR(ret));
-
- goto out;
- }
-
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- intel_dp->link_trained = false;
-
- intel_dp_check_frl_training(intel_dp);
- intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
- intel_dp_start_link_train(NULL, intel_dp, crtc_state);
- intel_dp_stop_link_train(intel_dp, crtc_state);
- break;
- }
-
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
+ ret = intel_modeset_commit_pipes(dev_priv, pipe_mask, ctx);
+ if (ret == -EDEADLK)
+ return ret;
- /* Keep underrun reporting disabled until things are stable */
- intel_crtc_wait_for_next_vblank(crtc);
+ intel_dp->link.force_retrain = false;
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
- if (crtc_state->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv,
- intel_crtc_pch_transcoder(crtc), true);
- }
-
-out:
- if (ret != -EDEADLK)
- intel_dp->link.force_retrain = false;
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] link retraining failed: %pe\n",
+ encoder->base.base.id, encoder->base.name,
+ ERR_PTR(ret));
return ret;
}
void intel_dp_link_check(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_modeset_acquire_ctx ctx;
int ret;
intel_modeset_lock_ctx_retry(&ctx, NULL, 0, ret)
ret = intel_dp_retrain_link(encoder, &ctx);
-
- drm_WARN_ON(&i915->drm, ret);
}
void intel_dp_check_link_state(struct intel_dp *intel_dp)
@@ -5906,6 +5963,15 @@ intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *conn
connector);
}
+static void
+intel_dp_detect_sdp_caps(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ intel_dp->as_sdp_supported = HAS_AS_SDP(i915) &&
+ drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
+}
+
static int
intel_dp_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
@@ -5976,13 +6042,15 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp_detect_dsc_caps(intel_dp, intel_connector);
- intel_dp_mst_configure(intel_dp);
+ intel_dp_detect_sdp_caps(intel_dp);
if (intel_dp->reset_link_params) {
intel_dp_reset_link_params(intel_dp);
intel_dp->reset_link_params = false;
}
+ intel_dp_mst_configure(intel_dp);
+
intel_dp_print_rates(intel_dp);
if (intel_dp->is_mst) {
@@ -6453,8 +6521,9 @@ static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port)
{
+ struct intel_display *display = &i915->display;
const struct intel_bios_encoder_data *devdata =
- intel_bios_encoder_data_lookup(i915, port);
+ intel_bios_encoder_data_lookup(display, port);
return _intel_dp_is_port_edp(i915, devdata, port);
}
@@ -6557,6 +6626,7 @@ static void intel_edp_backlight_setup(struct intel_dp *intel_dp,
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct drm_connector *connector = &intel_connector->base;
struct drm_display_mode *fixed_mode;
@@ -6582,7 +6652,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return false;
}
- intel_bios_init_panel_early(dev_priv, &intel_connector->panel,
+ intel_bios_init_panel_early(display, &intel_connector->panel,
encoder->devdata);
if (!intel_pps_init(intel_dp)) {
@@ -6679,7 +6749,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
drm_edid = ERR_PTR(-ENOENT);
}
- intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata,
+ intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata,
IS_ERR(drm_edid) ? NULL : drm_edid);
intel_panel_add_edid_fixed_modes(intel_connector, true);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index a0f990a95ecc..1b9aaddd8c35 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -85,7 +85,6 @@ void intel_dp_audio_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state);
bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
-bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp);
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
bool intel_dp_has_dsc(const struct intel_connector *connector);
int intel_dp_link_symbol_size(int rate);
@@ -108,6 +107,8 @@ int intel_dp_max_common_rate(struct intel_dp *intel_dp);
int intel_dp_max_common_lane_count(struct intel_dp *intel_dp);
int intel_dp_common_rate(struct intel_dp *intel_dp, int index);
int intel_dp_rate_index(const int *rates, int len, int rate);
+int intel_dp_link_config_index(struct intel_dp *intel_dp, int link_rate, int lane_count);
+void intel_dp_link_config_get(struct intel_dp *intel_dp, int idx, int *link_rate, int *lane_count);
void intel_dp_update_sink_caps(struct intel_dp *intel_dp);
void intel_dp_reset_link_params(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index be58185a77c0..04a7acd7f73c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -18,12 +18,12 @@
#define AUX_CH_NAME_BUFSIZE 6
-static const char *aux_ch_name(struct drm_i915_private *i915,
+static const char *aux_ch_name(struct intel_display *display,
char *buf, int size, enum aux_ch aux_ch)
{
- if (DISPLAY_VER(i915) >= 13 && aux_ch >= AUX_CH_D_XELPD)
+ if (DISPLAY_VER(display) >= 13 && aux_ch >= AUX_CH_D_XELPD)
snprintf(buf, size, "%c", 'A' + aux_ch - AUX_CH_D_XELPD + AUX_CH_D);
- else if (DISPLAY_VER(i915) >= 12 && aux_ch >= AUX_CH_USBC1)
+ else if (DISPLAY_VER(display) >= 12 && aux_ch >= AUX_CH_USBC1)
snprintf(buf, size, "USBC%c", '1' + aux_ch - AUX_CH_USBC1);
else
snprintf(buf, size, "%c", 'A' + aux_ch);
@@ -56,17 +56,18 @@ static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes)
static u32
intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
const unsigned int timeout_ms = 10;
u32 status;
int ret;
- ret = intel_de_wait_custom(i915, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, 0,
+ ret = intel_de_wait_custom(display, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY,
+ 0,
2, timeout_ms, &status);
if (ret == -ETIMEDOUT)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"%s: did not complete or timeout within %ums (status 0x%08x)\n",
intel_dp->aux.name, timeout_ms, status);
@@ -75,7 +76,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (index)
return 0;
@@ -84,12 +85,12 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2000 and use that
*/
- return DIV_ROUND_CLOSEST(RUNTIME_INFO(i915)->rawclk_freq, 2000);
+ return DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq, 2000);
}
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 freq;
@@ -102,15 +103,16 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (dig_port->aux_ch == AUX_CH_A)
- freq = i915->display.cdclk.hw.cdclk;
+ freq = display->cdclk.hw.cdclk;
else
- freq = RUNTIME_INFO(i915)->rawclk_freq;
+ freq = DISPLAY_RUNTIME_INFO(display)->rawclk_freq;
return DIV_ROUND_CLOSEST(freq, 2000);
}
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
@@ -201,8 +203,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 unused)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 ret;
/*
@@ -227,7 +229,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
* Power request bit is already set during aux power well enable.
* Preserve the bit across aux transactions.
*/
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST;
return ret;
@@ -239,6 +241,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u8 *recv, int recv_size,
u32 aux_send_ctl_flags)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -297,7 +300,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
- status = intel_de_read_notrace(i915, ch_ctl);
+ status = intel_de_read_notrace(display, ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
msleep(1);
@@ -306,10 +309,10 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (try == 3) {
- const u32 status = intel_de_read(i915, ch_ctl);
+ const u32 status = intel_de_read(display, ch_ctl);
if (status != intel_dp->aux_busy_last_status) {
- drm_WARN(&i915->drm, 1,
+ drm_WARN(display->drm, 1,
"%s: not started (status 0x%08x)\n",
intel_dp->aux.name, status);
intel_dp->aux_busy_last_status = status;
@@ -320,7 +323,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
}
/* Only 5 data registers! */
- if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
+ if (drm_WARN_ON(display->drm, send_bytes > 20 || recv_size > 20)) {
ret = -E2BIG;
goto out;
}
@@ -336,17 +339,17 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
- intel_de_write(i915, ch_data[i >> 2],
+ intel_de_write(display, ch_data[i >> 2],
intel_dp_aux_pack(send + i,
send_bytes - i));
/* Send the command and wait for it to complete */
- intel_de_write(i915, ch_ctl, send_ctl);
+ intel_de_write(display, ch_ctl, send_ctl);
status = intel_dp_aux_wait_done(intel_dp);
/* Clear done status and any errors */
- intel_de_write(i915, ch_ctl,
+ intel_de_write(display, ch_ctl,
status | DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
@@ -370,7 +373,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
}
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
- drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
+ drm_err(display->drm, "%s: not done (status 0x%08x)\n",
intel_dp->aux.name, status);
ret = -EBUSY;
goto out;
@@ -382,7 +385,7 @@ done:
* not connected.
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
+ drm_err(display->drm, "%s: receive error (status 0x%08x)\n",
intel_dp->aux.name, status);
ret = -EIO;
goto out;
@@ -393,7 +396,7 @@ done:
* -- don't fill the kernel log with these
*/
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
- drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
+ drm_dbg_kms(display->drm, "%s: timeout (status 0x%08x)\n",
intel_dp->aux.name, status);
ret = -ETIMEDOUT;
goto out;
@@ -408,7 +411,7 @@ done:
* drm layer takes care for the necessary retries.
*/
if (recv_bytes == 0 || recv_bytes > 20) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"%s: Forbidden recv_bytes = %d on aux transaction\n",
intel_dp->aux.name, recv_bytes);
ret = -EBUSY;
@@ -419,7 +422,7 @@ done:
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
- intel_dp_aux_unpack(intel_de_read(i915, ch_data[i >> 2]),
+ intel_dp_aux_unpack(intel_de_read(display, ch_data[i >> 2]),
recv + i, recv_bytes - i);
ret = recv_bytes;
@@ -468,7 +471,7 @@ static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 txbuf[20], rxbuf[20];
size_t txsize, rxsize;
u32 flags = intel_dp_aux_xfer_flags(msg);
@@ -483,10 +486,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
rxsize = 2; /* 0 or 1 data bytes */
- if (drm_WARN_ON(&i915->drm, txsize > 20))
+ if (drm_WARN_ON(display->drm, txsize > 20))
return -E2BIG;
- drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
+ drm_WARN_ON(display->drm, !msg->buffer != !msg->size);
if (msg->buffer)
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
@@ -511,7 +514,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
rxsize = msg->size + 1;
- if (drm_WARN_ON(&i915->drm, rxsize > 20))
+ if (drm_WARN_ON(display->drm, rxsize > 20))
return -E2BIG;
ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
@@ -721,7 +724,7 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -732,16 +735,16 @@ static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- return XELPDP_DP_AUX_CH_CTL(i915, aux_ch);
+ return XELPDP_DP_AUX_CH_CTL(display, aux_ch);
default:
MISSING_CASE(aux_ch);
- return XELPDP_DP_AUX_CH_CTL(i915, AUX_CH_A);
+ return XELPDP_DP_AUX_CH_CTL(display, AUX_CH_A);
}
}
static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -752,10 +755,10 @@ static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- return XELPDP_DP_AUX_CH_DATA(i915, aux_ch, index);
+ return XELPDP_DP_AUX_CH_DATA(display, aux_ch, index);
default:
MISSING_CASE(aux_ch);
- return XELPDP_DP_AUX_CH_DATA(i915, AUX_CH_A, index);
+ return XELPDP_DP_AUX_CH_DATA(display, AUX_CH_A, index);
}
}
@@ -769,19 +772,20 @@ void intel_dp_aux_fini(struct intel_dp *intel_dp)
void intel_dp_aux_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
char buf[AUX_CH_NAME_BUFSIZE];
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
- } else if (DISPLAY_VER(i915) >= 9) {
+ } else if (DISPLAY_VER(display) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = skl_aux_data_reg;
} else if (HAS_PCH_SPLIT(i915)) {
@@ -795,7 +799,7 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
}
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -804,17 +808,17 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
- intel_dp->aux.drm_dev = &i915->drm;
+ intel_dp->aux.drm_dev = display->drm;
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s",
- aux_ch_name(i915, buf, sizeof(buf), aux_ch),
+ aux_ch_name(display, buf, sizeof(buf), aux_ch),
encoder->base.name);
intel_dp->aux.transfer = intel_dp_aux_transfer;
@@ -823,10 +827,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
/* SKL has DDI E but no AUX E */
- if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E)
+ if (DISPLAY_VER(display) == 9 && encoder->port == PORT_E)
return AUX_CH_A;
return (enum aux_ch)encoder->port;
@@ -836,10 +840,10 @@ static struct intel_encoder *
get_encoder_by_aux_ch(struct intel_encoder *encoder,
enum aux_ch aux_ch)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_encoder *other;
- for_each_intel_encoder(&i915->drm, other) {
+ for_each_intel_encoder(display->drm, other) {
if (other == encoder)
continue;
@@ -855,7 +859,7 @@ get_encoder_by_aux_ch(struct intel_encoder *encoder,
enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_encoder *other;
const char *source;
enum aux_ch aux_ch;
@@ -876,23 +880,23 @@ enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
other = get_encoder_by_aux_ch(encoder, aux_ch);
if (other) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] AUX CH %s already claimed by [ENCODER:%d:%s]\n",
encoder->base.base.id, encoder->base.name,
- aux_ch_name(i915, buf, sizeof(buf), aux_ch),
+ aux_ch_name(display, buf, sizeof(buf), aux_ch),
other->base.base.id, other->base.name);
return AUX_CH_NONE;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] Using AUX CH %s (%s)\n",
encoder->base.base.id, encoder->base.name,
- aux_ch_name(i915, buf, sizeof(buf), aux_ch), source);
+ aux_ch_name(display, buf, sizeof(buf), aux_ch), source);
return aux_ch;
}
-void intel_dp_aux_irq_handler(struct drm_i915_private *i915)
+void intel_dp_aux_irq_handler(struct intel_display *display)
{
- wake_up_all(&i915->display.gmbus.wait_queue);
+ wake_up_all(&display->gmbus.wait_queue);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
index 593f58fafab7..90ee1c5fae28 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -9,7 +9,7 @@
#include <linux/types.h>
enum aux_ch;
-struct drm_i915_private;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
@@ -18,7 +18,7 @@ void intel_dp_aux_init(struct intel_dp *intel_dp);
enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
-void intel_dp_aux_irq_handler(struct drm_i915_private *i915);
+void intel_dp_aux_irq_handler(struct intel_display *display);
u32 intel_dp_aux_pack(const u8 *src, int src_bytes);
int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 8ce60d53dcde..33f72db99b58 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -109,7 +109,7 @@ static bool is_intel_tcon_cap(const u8 tcon_cap[4])
static bool
intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct drm_dp_aux *aux = &intel_dp->aux;
struct intel_panel *panel = &connector->panel;
@@ -122,7 +122,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
if (ret != sizeof(tcon_cap))
return false;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n",
connector->base.base.id, connector->base.name,
is_intel_tcon_cap(tcon_cap) ? "Intel" : "unsupported", tcon_cap[0]);
@@ -141,10 +142,10 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
* HDR static metadata we need to start maintaining table of
* ranges for such panels.
*/
- if (i915->display.params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
+ if (display->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
- drm_info(&i915->drm,
+ drm_info(display->drm,
"[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
connector->base.base.id, connector->base.name,
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
@@ -170,14 +171,15 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
static u32
intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
u8 tmp;
u8 buf[2] = {};
if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) != 1) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n",
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n",
connector->base.base.id, connector->base.name);
return 0;
}
@@ -195,7 +197,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe
if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf,
sizeof(buf)) != sizeof(buf)) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n",
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n",
connector->base.base.id, connector->base.name);
return 0;
}
@@ -253,8 +256,8 @@ static void
intel_dp_aux_write_content_luminance(struct intel_connector *connector,
struct hdr_output_metadata *hdr_metadata)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret;
u8 buf[4];
@@ -270,7 +273,7 @@ intel_dp_aux_write_content_luminance(struct intel_connector *connector,
INTEL_EDP_HDR_CONTENT_LUMINANCE,
buf, sizeof(buf));
if (ret < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Content Luminance DPCD reg write failed, err:-%d\n",
ret);
}
@@ -280,7 +283,7 @@ intel_dp_aux_fill_hdr_tcon_params(const struct drm_connector_state *conn_state,
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/*
* According to spec segmented backlight needs to be set whenever panel is in
@@ -291,7 +294,7 @@ intel_dp_aux_fill_hdr_tcon_params(const struct drm_connector_state *conn_state,
*ctrl |= INTEL_EDP_HDR_TCON_2084_DECODE_ENABLE;
}
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
*ctrl &= ~INTEL_EDP_HDR_TCON_TONE_MAPPING_ENABLE;
if (panel->backlight.edp.intel_cap.supports_2020_gamut &&
@@ -311,9 +314,9 @@ static void
intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct hdr_output_metadata *hdr_metadata;
int ret;
@@ -323,7 +326,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
if (ret != 1) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n",
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n",
connector->base.base.id, connector->base.name, ret);
return;
}
@@ -346,7 +350,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
if (ctrl != old_ctrl &&
drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1)
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n",
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n",
connector->base.base.id, connector->base.name);
if (intel_dp_in_hdr_mode(conn_state)) {
@@ -377,7 +382,7 @@ static const char *dpcd_vs_pwm_str(bool aux)
static void
intel_dp_aux_write_panel_luminance_override(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
int ret;
@@ -392,7 +397,7 @@ intel_dp_aux_write_panel_luminance_override(struct intel_connector *connector)
INTEL_EDP_HDR_PANEL_LUMINANCE_OVERRIDE,
buf, sizeof(buf));
if (ret < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel Luminance DPCD reg write failed, err:-%d\n",
ret);
}
@@ -400,20 +405,21 @@ intel_dp_aux_write_panel_luminance_override(struct intel_connector *connector)
static int
intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
struct drm_luminance_range_info *luminance_range =
&connector->base.display_info.luminance_range;
int ret;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n",
connector->base.base.id, connector->base.name,
dpcd_vs_pwm_str(panel->backlight.edp.intel_cap.sdr_uses_aux));
if (!panel->backlight.edp.intel_cap.sdr_uses_aux) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[CONNECTOR:%d:%s] Failed to setup SDR backlight controls through PWM: %d\n",
connector->base.base.id, connector->base.name, ret);
return ret;
@@ -430,7 +436,8 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
intel_dp_aux_write_panel_luminance_override(connector);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n",
connector->base.base.id, connector->base.name,
panel->backlight.min, panel->backlight.max);
@@ -501,9 +508,9 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_panel *panel = &connector->panel;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u16 current_level;
u8 current_mode;
int ret;
@@ -514,17 +521,19 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
if (ret < 0)
return ret;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
connector->base.base.id, connector->base.name,
dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
connector->base.base.id, connector->base.name,
dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
connector->base.base.id, connector->base.name, ret);
return ret;
@@ -553,7 +562,8 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
}
}
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n",
connector->base.base.id, connector->base.name);
return 0;
@@ -562,11 +572,12 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
static bool
intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n",
connector->base.base.id, connector->base.name);
return true;
}
@@ -591,16 +602,15 @@ static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct drm_device *dev = connector->base.dev;
struct intel_panel *panel = &connector->panel;
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
bool try_intel_interface = false, try_vesa_interface = false;
/* Check the VBT and user's module parameters to figure out which
* interfaces to probe
*/
- switch (i915->display.params.enable_dpcd_backlight) {
+ switch (display->params.enable_dpcd_backlight) {
case INTEL_DP_AUX_BACKLIGHT_OFF:
return -ENODEV;
case INTEL_DP_AUX_BACKLIGHT_AUTO:
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index b0101d72b9c1..3425b3643143 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -152,7 +152,7 @@ int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
ssize_t ret;
u8 bcaps;
- ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps);
+ ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps);
if (ret)
return ret;
@@ -677,8 +677,15 @@ static
int intel_dp_hdcp2_get_capability(struct intel_connector *connector,
bool *capable)
{
- struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_dp_aux *aux = &dig_port->dp.aux;
+ struct intel_digital_port *dig_port;
+ struct drm_dp_aux *aux;
+
+ *capable = false;
+ if (!intel_attached_encoder(connector))
+ return -EINVAL;
+
+ dig_port = intel_attached_dig_port(connector);
+ aux = &dig_port->dp.aux;
return _intel_dp_hdcp2_get_capability(aux, capable);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index d044c8e36bb3..40bedc31d6bf 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -21,6 +21,8 @@
* IN THE SOFTWARE.
*/
+#include <drm/display/drm_dp_helper.h>
+
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -37,13 +39,13 @@
drm_dp_phy_name(_dp_phy)
#define lt_dbg(_intel_dp, _dp_phy, _format, ...) \
- drm_dbg_kms(&dp_to_i915(_intel_dp)->drm, \
+ drm_dbg_kms(to_intel_display(_intel_dp)->drm, \
LT_MSG_PREFIX _format, \
LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__)
#define lt_err(_intel_dp, _dp_phy, _format, ...) do { \
if (intel_digital_port_connected(&dp_to_dig_port(_intel_dp)->base)) \
- drm_err(&dp_to_i915(_intel_dp)->drm, \
+ drm_err(to_intel_display(_intel_dp)->drm, \
LT_MSG_PREFIX _format, \
LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__); \
else \
@@ -114,7 +116,13 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
- return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) != 1)
+ return false;
+
+ intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = val;
+
+ return true;
}
static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
@@ -174,7 +182,7 @@ static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_
* still taking into account any LTTPR common lane- rate/count limits.
*/
if (lttpr_count < 0)
- return 0;
+ goto out_reset_lttpr_count;
if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
lt_dbg(intel_dp, DP_PHY_DPRX,
@@ -208,7 +216,8 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (intel_dp_is_edp(intel_dp))
return 0;
@@ -217,7 +226,7 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
- if (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))
+ if (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))
if (drm_dp_dpcd_probe(&intel_dp->aux,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
return -EIO;
@@ -248,7 +257,8 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S
*/
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
int lttpr_count = 0;
/*
@@ -256,7 +266,7 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
if (!intel_dp_is_edp(intel_dp) &&
- (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) {
+ (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))) {
u8 dpcd[DP_RECEIVER_CAP_SIZE];
int err = intel_dp_read_dprx_caps(intel_dp, dpcd);
@@ -319,10 +329,11 @@ static bool
intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
- drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
+ drm_WARN_ON_ONCE(display->drm,
+ lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
}
@@ -331,7 +342,7 @@ static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 voltage_max;
/*
@@ -343,7 +354,7 @@ static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
else
voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);
- drm_WARN_ON_ONCE(&i915->drm,
+ drm_WARN_ON_ONCE(display->drm,
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
@@ -353,7 +364,7 @@ static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 preemph_max;
/*
@@ -365,7 +376,7 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
else
preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);
- drm_WARN_ON_ONCE(&i915->drm,
+ drm_WARN_ON_ONCE(display->drm,
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
@@ -375,10 +386,11 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
- DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915);
+ DISPLAY_VER(display) >= 10 || IS_BROXTON(i915);
}
/* 128b/132b */
@@ -697,26 +709,28 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
return true;
}
-static void
-intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, int link_rate, bool is_vrr)
{
u8 link_config[2];
- link_config[0] = crtc_state->vrr.flipline ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
- link_config[1] = intel_dp_is_uhbr(crtc_state) ?
+ link_config[0] = is_vrr ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
+ link_config[1] = drm_dp_is_uhbr_rate(link_rate) ?
DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B;
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
}
-static void
-intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- u8 link_bw, u8 rate_select)
+static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
- u8 lane_count = crtc_state->lane_count;
+ intel_dp_link_training_set_mode(intel_dp,
+ crtc_state->port_clock, crtc_state->vrr.flipline);
+}
- if (crtc_state->enhanced_framing)
+void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
+ int link_bw, int rate_select, int lane_count,
+ bool enhanced_framing)
+{
+ if (enhanced_framing)
lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
if (link_bw) {
@@ -740,6 +754,14 @@ intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
}
}
+static void intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ u8 link_bw, u8 rate_select)
+{
+ intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, crtc_state->lane_count,
+ crtc_state->enhanced_framing);
+}
+
/*
* Prepare link training by configuring the link parameters. On DDI platforms
* also enable the port here.
@@ -932,7 +954,8 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
bool source_tps3, sink_tps3, source_tps4, sink_tps4;
/* UHBR+ use separate 128b/132b TPS2 */
@@ -1152,6 +1175,36 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
return true;
}
+static bool reduce_link_params_in_bw_order(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int *new_link_rate, int *new_lane_count)
+{
+ int link_rate;
+ int lane_count;
+ int i;
+
+ i = intel_dp_link_config_index(intel_dp, crtc_state->port_clock, crtc_state->lane_count);
+ for (i--; i >= 0; i--) {
+ intel_dp_link_config_get(intel_dp, i, &link_rate, &lane_count);
+
+ if ((intel_dp->link.force_rate &&
+ intel_dp->link.force_rate != link_rate) ||
+ (intel_dp->link.force_lane_count &&
+ intel_dp->link.force_lane_count != lane_count))
+ continue;
+
+ break;
+ }
+
+ if (i < 0)
+ return false;
+
+ *new_link_rate = link_rate;
+ *new_lane_count = lane_count;
+
+ return true;
+}
+
static int reduce_link_rate(struct intel_dp *intel_dp, int current_rate)
{
int rate_index;
@@ -1187,6 +1240,41 @@ static int reduce_lane_count(struct intel_dp *intel_dp, int current_lane_count)
return current_lane_count >> 1;
}
+static bool reduce_link_params_in_rate_lane_order(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int *new_link_rate, int *new_lane_count)
+{
+ int link_rate;
+ int lane_count;
+
+ lane_count = crtc_state->lane_count;
+ link_rate = reduce_link_rate(intel_dp, crtc_state->port_clock);
+ if (link_rate < 0) {
+ lane_count = reduce_lane_count(intel_dp, crtc_state->lane_count);
+ link_rate = intel_dp_max_common_rate(intel_dp);
+ }
+
+ if (lane_count < 0)
+ return false;
+
+ *new_link_rate = link_rate;
+ *new_lane_count = lane_count;
+
+ return true;
+}
+
+static bool reduce_link_params(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state,
+ int *new_link_rate, int *new_lane_count)
+{
+ /* TODO: Use the same fallback logic on SST as on MST. */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ return reduce_link_params_in_bw_order(intel_dp, crtc_state,
+ new_link_rate, new_lane_count);
+ else
+ return reduce_link_params_in_rate_lane_order(intel_dp, crtc_state,
+ new_link_rate, new_lane_count);
+}
+
static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -1200,14 +1288,7 @@ static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
return 0;
}
- new_lane_count = crtc_state->lane_count;
- new_link_rate = reduce_link_rate(intel_dp, crtc_state->port_clock);
- if (new_link_rate < 0) {
- new_lane_count = reduce_lane_count(intel_dp, crtc_state->lane_count);
- new_link_rate = intel_dp_max_common_rate(intel_dp);
- }
-
- if (new_lane_count < 0)
+ if (!reduce_link_params(intel_dp, crtc_state, &new_link_rate, &new_lane_count))
return -1;
if (intel_dp_is_edp(intel_dp) &&
@@ -1228,12 +1309,10 @@ static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
return 0;
}
-/* NOTE: @state is only valid for MST links and can be %NULL for SST. */
static bool intel_dp_schedule_fallback_link_training(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) {
@@ -1249,11 +1328,6 @@ static bool intel_dp_schedule_fallback_link_training(struct intel_atomic_state *
return false;
}
- if (drm_WARN_ON(&i915->drm,
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
- !state))
- return false;
-
/* Schedule a Hotplug Uevent to userspace to start modeset */
intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
@@ -1512,14 +1586,12 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
* retraining with reduced link rate/lane parameters if the link training
* fails.
* After calling this function intel_dp_stop_link_train() must be called.
- *
- * NOTE: @state is only valid for MST links and can be %NULL for SST.
*/
void intel_dp_start_link_train(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(state);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
bool passed;
@@ -1530,11 +1602,6 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
*/
int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
- if (drm_WARN_ON(&i915->drm,
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
- !state))
- return;
-
if (lttpr_count < 0)
/* Still continue with enabling the port and link training. */
lttpr_count = 0;
@@ -1569,7 +1636,7 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
* For test cases which rely on the link training or processing of HPDs
* ignore_long_hpd flag can unset from the testcase.
*/
- if (i915->display.hotplug.ignore_long_hpd) {
+ if (display->hotplug.ignore_long_hpd) {
lt_dbg(intel_dp, DP_PHY_DPRX, "Ignore the link failure\n");
return;
}
@@ -1621,14 +1688,14 @@ static struct intel_dp *intel_connector_to_intel_dp(struct intel_connector *conn
static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int current_rate = -1;
int force_rate;
int err;
int i;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
@@ -1636,7 +1703,7 @@ static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
current_rate = intel_dp->link_rate;
force_rate = intel_dp->link.force_rate;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
seq_printf(m, "%sauto%s",
force_rate == 0 ? "[" : "",
@@ -1692,7 +1759,7 @@ static ssize_t i915_dp_force_link_rate_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int rate;
int err;
@@ -1701,14 +1768,14 @@ static ssize_t i915_dp_force_link_rate_write(struct file *file,
if (rate < 0)
return rate;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
intel_dp_reset_link_params(intel_dp);
intel_dp->link.force_rate = rate;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
*offp += len;
@@ -1719,14 +1786,14 @@ DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_link_rate);
static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int current_lane_count = -1;
int force_lane_count;
int err;
int i;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
@@ -1734,7 +1801,7 @@ static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
current_lane_count = intel_dp->lane_count;
force_lane_count = intel_dp->link.force_lane_count;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
seq_printf(m, "%sauto%s",
force_lane_count == 0 ? "[" : "",
@@ -1794,7 +1861,7 @@ static ssize_t i915_dp_force_lane_count_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int lane_count;
int err;
@@ -1803,14 +1870,14 @@ static ssize_t i915_dp_force_lane_count_write(struct file *file,
if (lane_count < 0)
return lane_count;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
intel_dp_reset_link_params(intel_dp);
intel_dp->link.force_lane_count = lane_count;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
*offp += len;
@@ -1821,17 +1888,17 @@ DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_lane_count);
static int i915_dp_max_link_rate_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
*val = intel_dp->link.max_rate;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1840,17 +1907,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_link_rate_fops, i915_dp_max_link_rate_show,
static int i915_dp_max_lane_count_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
*val = intel_dp->link.max_lane_count;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1859,17 +1926,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_lane_count_fops, i915_dp_max_lane_count_sho
static int i915_dp_force_link_training_failure_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
*val = intel_dp->link.force_train_failure;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1877,20 +1944,20 @@ static int i915_dp_force_link_training_failure_show(void *data, u64 *val)
static int i915_dp_force_link_training_failure_write(void *data, u64 val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
if (val > 2)
return -EINVAL;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
intel_dp->link.force_train_failure = val;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1901,17 +1968,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_training_failure_fops,
static int i915_dp_force_link_retrain_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
*val = intel_dp->link.force_retrain;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1919,17 +1986,17 @@ static int i915_dp_force_link_retrain_show(void *data, u64 *val)
static int i915_dp_force_link_retrain_write(void *data, u64 val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
intel_dp->link.force_retrain = val;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
@@ -1942,17 +2009,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_retrain_fops,
static int i915_dp_link_retrain_disabled_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
seq_printf(m, "%s\n", str_yes_no(intel_dp->link.retrain_disabled));
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 42e7fc6cb171..2066b9146762 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -16,6 +16,12 @@ struct intel_dp;
int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]);
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp);
+void intel_dp_link_training_set_mode(struct intel_dp *intel_dp,
+ int link_rate, bool is_vrr);
+void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
+ int link_bw, int rate_select, int lane_count,
+ bool enhanced_framing);
+
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 17978a1f9ab0..15541932b809 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -43,6 +43,7 @@
#include "intel_dp_hdcp.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
+#include "intel_dp_link_training.h"
#include "intel_dpio_phy.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@@ -211,8 +212,8 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
- link_bpp_x16 = to_bpp_x16(dsc ? bpp :
- intel_dp_output_bpp(crtc_state->output_format, bpp));
+ link_bpp_x16 = fxp_q4_from_int(dsc ? bpp :
+ intel_dp_output_bpp(crtc_state->output_format, bpp));
local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
false, dsc, link_bpp_x16);
@@ -289,7 +290,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
if (!dsc)
crtc_state->pipe_bpp = bpp;
else
- crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(bpp);
+ crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp);
drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
}
@@ -308,8 +309,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
* YUV420 is only half of the pipe bpp value.
*/
slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
- to_bpp_int(limits->link.max_bpp_x16),
- to_bpp_int(limits->link.min_bpp_x16),
+ fxp_q4_to_int(limits->link.max_bpp_x16),
+ fxp_q4_to_int(limits->link.min_bpp_x16),
limits,
conn_state, 2 * 3, false);
@@ -374,11 +375,11 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
crtc_state,
max_bpp / 3);
max_compressed_bpp = min(max_compressed_bpp,
- to_bpp_int(limits->link.max_bpp_x16));
+ fxp_q4_to_int(limits->link.max_bpp_x16));
min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
min_compressed_bpp = max(min_compressed_bpp,
- to_bpp_int_roundup(limits->link.min_bpp_x16));
+ fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
min_compressed_bpp, max_compressed_bpp);
@@ -478,10 +479,10 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name);
- if (limits->link.max_bpp_x16 < to_bpp_x16(24))
+ if (limits->link.max_bpp_x16 < fxp_q4_from_int(24))
return false;
- limits->link.min_bpp_x16 = to_bpp_x16(24);
+ limits->link.min_bpp_x16 = fxp_q4_from_int(24);
return true;
}
@@ -489,18 +490,18 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate);
if (limits->max_rate < 540000)
- min_bpp_x16 = to_bpp_x16(13);
+ min_bpp_x16 = fxp_q4_from_int(13);
else if (limits->max_rate < 810000)
- min_bpp_x16 = to_bpp_x16(10);
+ min_bpp_x16 = fxp_q4_from_int(10);
if (limits->link.min_bpp_x16 >= min_bpp_x16)
return true;
drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " BPP_X16_FMT " in DSC mode due to hblank expansion quirk\n",
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " FXP_Q4_FMT " in DSC mode due to hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name,
- BPP_X16_ARGS(min_bpp_x16));
+ FXP_Q4_ARGS(min_bpp_x16));
if (limits->link.max_bpp_x16 < min_bpp_x16)
return false;
@@ -1113,6 +1114,33 @@ static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
to_intel_crtc(pipe_config->uapi.crtc));
}
+static bool intel_mst_probed_link_params_valid(struct intel_dp *intel_dp,
+ int link_rate, int lane_count)
+{
+ return intel_dp->link.mst_probed_rate == link_rate &&
+ intel_dp->link.mst_probed_lane_count == lane_count;
+}
+
+static void intel_mst_set_probed_link_params(struct intel_dp *intel_dp,
+ int link_rate, int lane_count)
+{
+ intel_dp->link.mst_probed_rate = link_rate;
+ intel_dp->link.mst_probed_lane_count = lane_count;
+}
+
+static void intel_mst_reprobe_topology(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (intel_mst_probed_link_params_valid(intel_dp,
+ crtc_state->port_clock, crtc_state->lane_count))
+ return;
+
+ drm_dp_mst_topology_queue_probe(&intel_dp->mst_mgr);
+
+ intel_mst_set_probed_link_params(intel_dp,
+ crtc_state->port_clock, crtc_state->lane_count);
+}
+
static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@@ -1149,17 +1177,19 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
intel_dp_sink_enable_decompression(state, connector, pipe_config);
- if (first_mst_stream)
+ if (first_mst_stream) {
dig_port->base.pre_enable(state, &dig_port->base,
pipe_config, NULL);
+ intel_mst_reprobe_topology(intel_dp, pipe_config);
+ }
+
intel_dp->active_mst_links++;
ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
if (ret < 0)
- drm_dbg_kms(&dev_priv->drm, "Failed to create MST payload for %s: %d\n",
- connector->base.name, ret);
+ intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
/*
* Before Gen 12 this is not done as part of
@@ -1223,6 +1253,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
enum transcoder trans = pipe_config->cpu_transcoder;
bool first_mst_stream = intel_dp->active_mst_links == 1;
struct intel_crtc *pipe_crtc;
+ int ret;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
@@ -1254,8 +1285,11 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
if (first_mst_stream)
intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
- drm_dp_add_payload_part2(&intel_dp->mst_mgr,
- drm_atomic_get_mst_payload_state(mst_state, connector->port));
+ ret = drm_dp_add_payload_part2(&intel_dp->mst_mgr,
+ drm_atomic_get_mst_payload_state(mst_state,
+ connector->port));
+ if (ret < 0)
+ intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
if (DISPLAY_VER(dev_priv) >= 12)
intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans),
@@ -1999,6 +2033,36 @@ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
return false;
}
+/**
+ * intel_dp_mst_prepare_probe - Prepare an MST link for topology probing
+ * @intel_dp: DP port object
+ *
+ * Prepare an MST link for topology probing, programming the target
+ * link parameters to DPCD. This step is a requirement of the enumaration
+ * of path resources during probing.
+ */
+void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp)
+{
+ int link_rate = intel_dp_max_link_rate(intel_dp);
+ int lane_count = intel_dp_max_lane_count(intel_dp);
+ u8 rate_select;
+ u8 link_bw;
+
+ if (intel_dp->link_trained)
+ return;
+
+ if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count))
+ return;
+
+ intel_dp_compute_rate(intel_dp, link_rate, &link_bw, &rate_select);
+
+ intel_dp_link_training_set_mode(intel_dp, link_rate, false);
+ intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, lane_count,
+ drm_dp_enhanced_frame_cap(intel_dp->dpcd));
+
+ intel_mst_set_probed_link_params(intel_dp, link_rate, lane_count);
+}
+
/*
* intel_dp_mst_verify_dpcd_state - verify the MST SW enabled state wrt. the DPCD
* @intel_dp: DP port object
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 9e4c7679f1c3..8343804ce3f8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -27,6 +27,7 @@ int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits);
bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp);
bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 6503abdc2b98..94198bc04939 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -69,7 +69,7 @@ static int get_current_link_bw(struct intel_dp *intel_dp,
static int update_tunnel_state(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool old_bw_below_dprx;
bool new_bw_below_dprx;
@@ -81,7 +81,7 @@ static int update_tunnel_state(struct intel_dp *intel_dp)
ret = drm_dp_tunnel_update_state(intel_dp->tunnel);
if (ret < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -103,7 +103,7 @@ static int update_tunnel_state(struct intel_dp *intel_dp)
!new_bw_below_dprx)
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -121,20 +121,20 @@ static int update_tunnel_state(struct intel_dp *intel_dp)
*/
static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_crtc *crtc;
int tunnel_bw = 0;
int err;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int stream_bw = intel_dp_config_required_rate(crtc_state);
tunnel_bw += stream_bw;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -145,7 +145,7 @@ static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pi
err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw);
if (err) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -172,12 +172,12 @@ static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_dp_tunnel *tunnel;
int ret;
- tunnel = drm_dp_tunnel_detect(i915->display.dp_tunnel_mgr,
+ tunnel = drm_dp_tunnel_detect(display->dp_tunnel_mgr,
&intel_dp->aux);
if (IS_ERR(tunnel))
return PTR_ERR(tunnel);
@@ -189,7 +189,7 @@ static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acqui
if (ret == -EOPNOTSUPP)
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -266,14 +266,15 @@ bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
*/
void intel_dp_tunnel_suspend(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return;
- drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
+ drm_dbg_kms(display->drm,
+ "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
@@ -295,7 +296,7 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool dpcd_updated)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -307,7 +308,8 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
intel_dp->tunnel_suspended = false;
- drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
+ drm_dbg_kms(display->drm,
+ "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
@@ -347,7 +349,7 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
return;
out_err:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
@@ -369,12 +371,12 @@ add_inherited_tunnel(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct drm_dp_tunnel *old_tunnel;
old_tunnel = get_inherited_tunnel(state, crtc);
if (old_tunnel) {
- drm_WARN_ON(&i915->drm, old_tunnel != tunnel);
+ drm_WARN_ON(display->drm, old_tunnel != tunnel);
return 0;
}
@@ -394,7 +396,7 @@ static int check_inherited_tunnel_state(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_digital_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_connector *connector =
to_intel_connector(old_conn_state->base.connector);
@@ -422,7 +424,7 @@ static int check_inherited_tunnel_state(struct intel_atomic_state *state,
old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel)
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
@@ -441,12 +443,13 @@ static int check_inherited_tunnel_state(struct intel_atomic_state *state,
*/
void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
enum pipe pipe;
if (!state->inherited_dp_tunnels)
return;
- for_each_pipe(to_i915(state->base.dev), pipe)
+ for_each_pipe(display, pipe)
if (state->inherited_dp_tunnels->ref[pipe].tunnel)
drm_dp_tunnel_ref_put(&state->inherited_dp_tunnels->ref[pipe]);
@@ -457,7 +460,7 @@ void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *s
static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
u32 pipe_mask;
int err;
@@ -466,7 +469,7 @@ static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *sta
if (err)
return err;
- drm_WARN_ON(&i915->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
+ drm_WARN_ON(display->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask);
}
@@ -504,7 +507,7 @@ static int check_group_state(struct intel_atomic_state *state,
struct intel_connector *connector,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -512,7 +515,7 @@ static int check_group_state(struct intel_atomic_state *state,
if (!crtc_state->dp_tunnel_ref.tunnel)
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
@@ -583,7 +586,7 @@ int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
const struct intel_connector *connector,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int required_rate = intel_dp_config_required_rate(crtc_state);
@@ -592,7 +595,7 @@ int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
@@ -708,7 +711,7 @@ static void queue_retry_work(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder;
encoder = intel_get_crtc_new_encoder(state, crtc_state);
@@ -716,7 +719,7 @@ static void queue_retry_work(struct intel_atomic_state *state,
if (!intel_digital_port_connected(encoder))
return;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n",
drm_dp_tunnel_name(tunnel),
encoder->base.base.id,
@@ -765,7 +768,7 @@ void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
/**
* intel_dp_tunnel_mgr_init - Initialize the DP tunnel manager
- * @i915: i915 device object
+ * @display: display device
*
* Initialize the DP tunnel manager. The tunnel manager will support the
* detection/management of DP tunnels on all DP connectors, so the function
@@ -773,14 +776,14 @@ void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
*
* Return 0 in case of success, a negative error code otherwise.
*/
-int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
+int intel_dp_tunnel_mgr_init(struct intel_display *display)
{
struct drm_dp_tunnel_mgr *tunnel_mgr;
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector;
int dp_connectors = 0;
- drm_connector_list_iter_begin(&i915->drm, &connector_list_iter);
+ drm_connector_list_iter_begin(display->drm, &connector_list_iter);
for_each_intel_connector_iter(connector, &connector_list_iter) {
if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -789,23 +792,23 @@ int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
}
drm_connector_list_iter_end(&connector_list_iter);
- tunnel_mgr = drm_dp_tunnel_mgr_create(&i915->drm, dp_connectors);
+ tunnel_mgr = drm_dp_tunnel_mgr_create(display->drm, dp_connectors);
if (IS_ERR(tunnel_mgr))
return PTR_ERR(tunnel_mgr);
- i915->display.dp_tunnel_mgr = tunnel_mgr;
+ display->dp_tunnel_mgr = tunnel_mgr;
return 0;
}
/**
* intel_dp_tunnel_mgr_cleanup - Clean up the DP tunnel manager state
- * @i915: i915 device object
+ * @display: display device
*
* Clean up the DP tunnel manager state.
*/
-void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915)
+void intel_dp_tunnel_mgr_cleanup(struct intel_display *display)
{
- drm_dp_tunnel_mgr_destroy(i915->display.dp_tunnel_mgr);
- i915->display.dp_tunnel_mgr = NULL;
+ drm_dp_tunnel_mgr_destroy(display->dp_tunnel_mgr);
+ display->dp_tunnel_mgr = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
index 08b2cba84af2..a0c00b7d3303 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
@@ -9,14 +9,13 @@
#include <linux/errno.h>
#include <linux/types.h>
-struct drm_i915_private;
struct drm_connector_state;
struct drm_modeset_acquire_ctx;
-
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
struct intel_link_bw_limits;
@@ -53,8 +52,8 @@ int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state);
-int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915);
-void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915);
+int intel_dp_tunnel_mgr_init(struct intel_display *display);
+void intel_dp_tunnel_mgr_cleanup(struct intel_display *display);
#else
@@ -121,12 +120,12 @@ intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
}
static inline int
-intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
+intel_dp_tunnel_mgr_init(struct intel_display *display)
{
return 0;
}
-static inline void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915) {}
+static inline void intel_dp_tunnel_mgr_cleanup(struct intel_display *display) {}
#endif /* CONFIG_DRM_I915_DP_TUNNEL */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index d67d5e2fd570..340dfce480b8 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -1823,6 +1823,7 @@ static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -1833,7 +1834,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
/* PLL is protected by panel, make sure we can write it */
if (i9xx_has_pps(dev_priv))
- assert_pps_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(display, pipe);
intel_de_write(dev_priv, FP0(pipe), hw_state->fp0);
intel_de_write(dev_priv, FP1(pipe), hw_state->fp1);
@@ -2004,6 +2005,7 @@ static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -2012,7 +2014,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
- assert_pps_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(display, pipe);
/* Enable Refclk */
intel_de_write(dev_priv, DPLL(dev_priv, pipe),
@@ -2150,6 +2152,7 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
void chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -2158,7 +2161,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
- assert_pps_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(display, pipe);
/* Enable Refclk and SSC */
intel_de_write(dev_priv, DPLL(dev_priv, pipe),
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 292d163036b1..f490b2157828 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -3339,6 +3339,7 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -3379,7 +3380,7 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
}
/* Eliminate DPLLs from consideration if reserved by HTI */
- dpll_mask &= ~intel_hti_dpll_mask(i915);
+ dpll_mask &= ~intel_hti_dpll_mask(display);
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 73a1918e2537..3a6d99044828 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -317,3 +317,7 @@ void intel_dpt_destroy(struct i915_address_space *vm)
i915_vm_put(&dpt->vm);
}
+u64 intel_dpt_offset(struct i915_vma *dpt_vma)
+{
+ return dpt_vma->node.start;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
index ff18a525bfbe..1f88b0ee17e7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.h
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -6,6 +6,8 @@
#ifndef __INTEL_DPT_H__
#define __INTEL_DPT_H__
+#include <linux/types.h>
+
struct drm_i915_private;
struct i915_address_space;
@@ -20,5 +22,6 @@ void intel_dpt_suspend(struct drm_i915_private *i915);
void intel_dpt_resume(struct drm_i915_private *i915);
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb);
+u64 intel_dpt_offset(struct i915_vma *dpt_vma);
#endif /* __INTEL_DPT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 2ab3765f6c06..da24e041d269 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "i915_reg.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -42,7 +43,8 @@ struct intel_dsb {
*/
unsigned int ins_start_offset;
- int dewake_scanline;
+ u32 chicken;
+ int hw_dewake_scanline;
};
/**
@@ -82,6 +84,93 @@ struct intel_dsb {
#define DSB_OPCODE_POLL 0xA
/* see DSB_REG_VALUE_MASK */
+static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ /* VRR will be enabled afterwards, if necessary */
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ return false;
+
+ /* VRR will have been disabled during intel_pre_plane_update() */
+ return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc);
+}
+
+static const struct intel_crtc_state *
+pre_commit_crtc_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ /*
+ * During fastsets/etc. the transcoder is still
+ * running with the old timings at this point.
+ */
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ return new_crtc_state;
+ else
+ return old_crtc_state;
+}
+
+static int dsb_vtotal(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+
+ if (pre_commit_is_vrr_active(state, crtc))
+ return crtc_state->vrr.vmax;
+ else
+ return intel_mode_vtotal(&crtc_state->hw.adjusted_mode);
+}
+
+static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ unsigned int latency = skl_watermark_max_latency(i915, 0);
+
+ return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) -
+ intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency);
+}
+
+static int dsb_dewake_scanline_end(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+
+ return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode);
+}
+
+static int dsb_scanline_to_hw(struct intel_atomic_state *state,
+ struct intel_crtc *crtc, int scanline)
+{
+ const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ int vtotal = dsb_vtotal(state, crtc);
+
+ return (scanline + vtotal - intel_crtc_scanline_offset(crtc_state)) % vtotal;
+}
+
+static u32 dsb_chicken(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ if (pre_commit_is_vrr_active(state, crtc))
+ return DSB_SKIP_WAITS_EN |
+ DSB_CTRL_WAIT_SAFE_WINDOW |
+ DSB_CTRL_NO_WAIT_VBLANK |
+ DSB_INST_WAIT_SAFE_WINDOW |
+ DSB_INST_NO_WAIT_VBLANK;
+ else
+ return DSB_SKIP_WAITS_EN;
+}
+
static bool assert_dsb_has_room(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
@@ -281,6 +370,79 @@ void intel_dsb_nonpost_end(struct intel_dsb *dsb)
intel_dsb_noop(dsb, 4);
}
+static void intel_dsb_emit_wait_dsl(struct intel_dsb *dsb,
+ u32 opcode, int lower, int upper)
+{
+ u64 window = ((u64)upper << DSB_SCANLINE_UPPER_SHIFT) |
+ ((u64)lower << DSB_SCANLINE_LOWER_SHIFT);
+
+ intel_dsb_emit(dsb, lower_32_bits(window),
+ (opcode << DSB_OPCODE_SHIFT) |
+ upper_32_bits(window));
+}
+
+static void intel_dsb_wait_dsl(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ int lower_in, int upper_in,
+ int lower_out, int upper_out)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+
+ lower_in = dsb_scanline_to_hw(state, crtc, lower_in);
+ upper_in = dsb_scanline_to_hw(state, crtc, upper_in);
+
+ lower_out = dsb_scanline_to_hw(state, crtc, lower_out);
+ upper_out = dsb_scanline_to_hw(state, crtc, upper_out);
+
+ if (upper_in >= lower_in)
+ intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_IN,
+ lower_in, upper_in);
+ else if (upper_out >= lower_out)
+ intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT,
+ lower_out, upper_out);
+ else
+ drm_WARN_ON(crtc->base.dev, 1); /* assert_dsl_ok() should have caught it already */
+}
+
+static void assert_dsl_ok(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ int start, int end)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ int vtotal = dsb_vtotal(state, crtc);
+
+ /*
+ * Waiting for the entire frame doesn't make sense,
+ * (IN==don't wait, OUT=wait forever).
+ */
+ drm_WARN(crtc->base.dev, (end - start + vtotal) % vtotal == vtotal - 1,
+ "[CRTC:%d:%s] DSB %d bad scanline window wait: %d-%d (vt=%d)\n",
+ crtc->base.base.id, crtc->base.name, dsb->id,
+ start, end, vtotal);
+}
+
+void intel_dsb_wait_scanline_in(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ int start, int end)
+{
+ assert_dsl_ok(state, dsb, start, end);
+
+ intel_dsb_wait_dsl(state, dsb,
+ start, end,
+ end + 1, start - 1);
+}
+
+void intel_dsb_wait_scanline_out(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ int start, int end)
+{
+ assert_dsl_ok(state, dsb, start, end);
+
+ intel_dsb_wait_dsl(state, dsb,
+ end + 1, start - 1,
+ start, end);
+}
+
static void intel_dsb_align_tail(struct intel_dsb *dsb)
{
u32 aligned_tail, tail;
@@ -302,8 +464,10 @@ void intel_dsb_finish(struct intel_dsb *dsb)
/*
* DSB_FORCE_DEWAKE remains active even after DSB is
* disabled, so make sure to clear it (if set during
- * intel_dsb_commit()).
+ * intel_dsb_commit()). And clear DSB_ENABLE_DEWAKE as
+ * well for good measure.
*/
+ intel_dsb_reg_write(dsb, DSB_PMCTRL(crtc->pipe, dsb->id), 0);
intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id),
DSB_FORCE_DEWAKE, 0);
@@ -312,35 +476,109 @@ void intel_dsb_finish(struct intel_dsb *dsb)
intel_dsb_buffer_flush_map(&dsb->dsb_buf);
}
-static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
+static u32 dsb_error_int_status(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- unsigned int latency = skl_watermark_max_latency(i915, 0);
- int vblank_start;
+ u32 errors;
- if (crtc_state->vrr.enable)
- vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
- else
- vblank_start = intel_mode_vblank_start(adjusted_mode);
+ errors = DSB_GTT_FAULT_INT_STATUS |
+ DSB_RSPTIMEOUT_INT_STATUS |
+ DSB_POLL_ERR_INT_STATUS;
+
+ /*
+ * All the non-existing status bits operate as
+ * normal r/w bits, so any attempt to clear them
+ * will just end up setting them. Never do that so
+ * we won't mistake them for actual error interrupts.
+ */
+ if (DISPLAY_VER(display) >= 14)
+ errors |= DSB_ATS_FAULT_INT_STATUS;
- return max(0, vblank_start - intel_usecs_to_scanlines(adjusted_mode, latency));
+ return errors;
}
-static u32 dsb_chicken(struct intel_crtc *crtc)
+static u32 dsb_error_int_en(struct intel_display *display)
{
- if (crtc->mode_flags & I915_MODE_FLAG_VRR)
- return DSB_SKIP_WAITS_EN |
- DSB_CTRL_WAIT_SAFE_WINDOW |
- DSB_CTRL_NO_WAIT_VBLANK |
- DSB_INST_WAIT_SAFE_WINDOW |
- DSB_INST_NO_WAIT_VBLANK;
- else
- return DSB_SKIP_WAITS_EN;
+ u32 errors;
+
+ errors = DSB_GTT_FAULT_INT_EN |
+ DSB_RSPTIMEOUT_INT_EN |
+ DSB_POLL_ERR_INT_EN;
+
+ if (DISPLAY_VER(display) >= 14)
+ errors |= DSB_ATS_FAULT_INT_EN;
+
+ return errors;
+}
+
+static void _intel_dsb_chain(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ struct intel_dsb *chained_dsb,
+ u32 ctrl)
+{
+ struct intel_display *display = to_intel_display(state->base.dev);
+ struct intel_crtc *crtc = dsb->crtc;
+ enum pipe pipe = crtc->pipe;
+ u32 tail;
+
+ if (drm_WARN_ON(display->drm, dsb->id == chained_dsb->id))
+ return;
+
+ tail = chained_dsb->free_pos * 4;
+ if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
+ return;
+
+ intel_dsb_reg_write(dsb, DSB_CTRL(pipe, chained_dsb->id),
+ ctrl | DSB_ENABLE);
+
+ intel_dsb_reg_write(dsb, DSB_CHICKEN(pipe, chained_dsb->id),
+ dsb_chicken(state, crtc));
+
+ intel_dsb_reg_write(dsb, DSB_INTERRUPT(pipe, chained_dsb->id),
+ dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
+ dsb_error_int_en(display));
+
+ if (ctrl & DSB_WAIT_FOR_VBLANK) {
+ int dewake_scanline = dsb_dewake_scanline_start(state, crtc);
+ int hw_dewake_scanline = dsb_scanline_to_hw(state, crtc, dewake_scanline);
+
+ intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id),
+ DSB_ENABLE_DEWAKE |
+ DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
+ }
+
+ intel_dsb_reg_write(dsb, DSB_HEAD(pipe, chained_dsb->id),
+ intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf));
+
+ intel_dsb_reg_write(dsb, DSB_TAIL(pipe, chained_dsb->id),
+ intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf) + tail);
+
+ if (ctrl & DSB_WAIT_FOR_VBLANK) {
+ /*
+ * Keep DEwake alive via the first DSB, in
+ * case we're already past dewake_scanline,
+ * and thus DSB_ENABLE_DEWAKE on the second
+ * DSB won't do its job.
+ */
+ intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(pipe, dsb->id),
+ DSB_FORCE_DEWAKE, DSB_FORCE_DEWAKE);
+
+ intel_dsb_wait_scanline_out(state, dsb,
+ dsb_dewake_scanline_start(state, crtc),
+ dsb_dewake_scanline_end(state, crtc));
+ }
+}
+
+void intel_dsb_chain(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ struct intel_dsb *chained_dsb,
+ bool wait_for_vblank)
+{
+ _intel_dsb_chain(state, dsb, chained_dsb,
+ wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0);
}
static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
- int dewake_scanline)
+ int hw_dewake_scanline)
{
struct intel_crtc *crtc = dsb->crtc;
struct intel_display *display = to_intel_display(crtc->base.dev);
@@ -361,15 +599,17 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
ctrl | DSB_ENABLE);
intel_de_write_fw(display, DSB_CHICKEN(pipe, dsb->id),
- dsb_chicken(crtc));
+ dsb->chicken);
+
+ intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
+ dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
+ dsb_error_int_en(display));
intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
- if (dewake_scanline >= 0) {
- int diff, hw_dewake_scanline;
-
- hw_dewake_scanline = intel_crtc_scanline_to_hw(crtc, dewake_scanline);
+ if (hw_dewake_scanline >= 0) {
+ int diff, position;
intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id),
DSB_ENABLE_DEWAKE |
@@ -379,7 +619,9 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
* Force DEwake immediately if we're already past
* or close to racing past the target scanline.
*/
- diff = dewake_scanline - intel_get_crtc_scanline(crtc);
+ position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK;
+
+ diff = hw_dewake_scanline - position;
intel_de_write_fw(display, DSB_PMCTRL_2(pipe, dsb->id),
(diff >= 0 && diff < 5 ? DSB_FORCE_DEWAKE : 0) |
DSB_BLOCK_DEWAKE_EXTENSION);
@@ -401,7 +643,7 @@ void intel_dsb_commit(struct intel_dsb *dsb,
{
_intel_dsb_commit(dsb,
wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0,
- wait_for_vblank ? dsb->dewake_scanline : -1);
+ wait_for_vblank ? dsb->hw_dewake_scanline : -1);
}
void intel_dsb_wait(struct intel_dsb *dsb)
@@ -430,6 +672,9 @@ void intel_dsb_wait(struct intel_dsb *dsb)
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 0);
+
+ intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
+ dsb_error_int_status(display) | DSB_PROG_INT_STATUS);
}
/**
@@ -451,8 +696,6 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
unsigned int max_cmds)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
intel_wakeref_t wakeref;
struct intel_dsb *dsb;
unsigned int size;
@@ -486,7 +729,10 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
dsb->size = size / 4; /* in dwords */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
- dsb->dewake_scanline = intel_dsb_dewake_scanline(crtc_state);
+
+ dsb->chicken = dsb_chicken(state, crtc);
+ dsb->hw_dewake_scanline =
+ dsb_scanline_to_hw(state, crtc, dsb_dewake_scanline_start(state, crtc));
return dsb;
@@ -513,3 +759,18 @@ void intel_dsb_cleanup(struct intel_dsb *dsb)
intel_dsb_buffer_cleanup(&dsb->dsb_buf);
kfree(dsb);
}
+
+void intel_dsb_irq_handler(struct intel_display *display,
+ enum pipe pipe, enum intel_dsb_id dsb_id)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(display->drm), pipe);
+ u32 tmp, errors;
+
+ tmp = intel_de_read_fw(display, DSB_INTERRUPT(pipe, dsb_id));
+ intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb_id), tmp);
+
+ errors = tmp & dsb_error_int_status(display);
+ if (errors)
+ drm_err(display->drm, "[CRTC:%d:%s] DSB %d error interrupt: 0x%x\n",
+ crtc->base.base.id, crtc->base.name, dsb_id, errors);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index bb42749f2ea4..c352c12aa59f 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -13,8 +13,11 @@
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dsb;
+enum pipe;
+
enum intel_dsb_id {
INTEL_DSB_0,
INTEL_DSB_1,
@@ -36,9 +39,22 @@ void intel_dsb_reg_write_masked(struct intel_dsb *dsb,
void intel_dsb_noop(struct intel_dsb *dsb, int count);
void intel_dsb_nonpost_start(struct intel_dsb *dsb);
void intel_dsb_nonpost_end(struct intel_dsb *dsb);
+void intel_dsb_wait_scanline_in(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ int lower, int upper);
+void intel_dsb_wait_scanline_out(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ int lower, int upper);
+void intel_dsb_chain(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ struct intel_dsb *chained_dsb,
+ bool wait_for_vblank);
void intel_dsb_commit(struct intel_dsb *dsb,
bool wait_for_vblank);
void intel_dsb_wait(struct intel_dsb *dsb);
+void intel_dsb_irq_handler(struct intel_display *display,
+ enum pipe pipe, enum intel_dsb_id dsb_id);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index e99c94edfaae..e8ba4ccd99d3 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -66,7 +66,7 @@ struct intel_dsi {
/* number of DSI lanes */
unsigned int lane_count;
- /* i2c bus associated with the slave device */
+ /* i2c bus associated with the target device */
int i2c_bus_num;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 072ef1d62bda..d8951464bd2b 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -56,7 +56,7 @@
#define MIPI_PORT_SHIFT 3
struct i2c_adapter_lookup {
- u16 slave_addr;
+ u16 target_addr;
struct intel_dsi *intel_dsi;
acpi_handle dev_handle;
};
@@ -443,7 +443,7 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
if (!i2c_acpi_get_i2c_resource(ares, &sb))
return 1;
- if (lookup->slave_addr != sb->slave_address)
+ if (lookup->target_addr != sb->slave_address)
return 1;
status = acpi_get_handle(lookup->dev_handle,
@@ -460,12 +460,12 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
}
static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
- const u16 slave_addr)
+ const u16 target_addr)
{
struct drm_device *drm_dev = intel_dsi->base.base.dev;
struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev);
struct i2c_adapter_lookup lookup = {
- .slave_addr = slave_addr,
+ .target_addr = target_addr,
.intel_dsi = intel_dsi,
.dev_handle = acpi_device_handle(adev),
};
@@ -476,7 +476,7 @@ static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
}
#else
static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
- const u16 slave_addr)
+ const u16 target_addr)
{
}
#endif
@@ -488,17 +488,17 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
struct i2c_msg msg;
int ret;
u8 vbt_i2c_bus_num = *(data + 2);
- u16 slave_addr = *(u16 *)(data + 3);
+ u16 target_addr = *(u16 *)(data + 3);
u8 reg_offset = *(data + 5);
u8 payload_size = *(data + 6);
u8 *payload_data;
- drm_dbg_kms(&i915->drm, "bus %d client-addr 0x%02x reg 0x%02x data %*ph\n",
- vbt_i2c_bus_num, slave_addr, reg_offset, payload_size, data + 7);
+ drm_dbg_kms(&i915->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n",
+ vbt_i2c_bus_num, target_addr, reg_offset, payload_size, data + 7);
if (intel_dsi->i2c_bus_num < 0) {
intel_dsi->i2c_bus_num = vbt_i2c_bus_num;
- i2c_acpi_find_adapter(intel_dsi, slave_addr);
+ i2c_acpi_find_adapter(intel_dsi, target_addr);
}
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
@@ -514,7 +514,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
payload_data[0] = reg_offset;
memcpy(&payload_data[1], (data + 7), payload_size);
- msg.addr = slave_addr;
+ msg.addr = target_addr;
msg.flags = 0;
msg.len = payload_size + 1;
msg.buf = payload_data;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 091824334f26..12e7628cbecf 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -60,42 +60,42 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.type = INTEL_DVO_CHIP_TMDS,
.name = "sil164",
.port = PORT_C,
- .slave_addr = SIL164_ADDR,
+ .target_addr = SIL164_ADDR,
.dev_ops = &sil164_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
.port = PORT_C,
- .slave_addr = CH7xxx_ADDR,
+ .target_addr = CH7xxx_ADDR,
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
.port = PORT_C,
- .slave_addr = 0x75, /* For some ch7010 */
+ .target_addr = 0x75, /* For some ch7010 */
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ivch",
.port = PORT_A,
- .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
+ .target_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
.dev_ops = &ivch_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "tfp410",
.port = PORT_C,
- .slave_addr = TFP410_ADDR,
+ .target_addr = TFP410_ADDR,
.dev_ops = &tfp410_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ch7017",
.port = PORT_C,
- .slave_addr = 0x75,
+ .target_addr = 0x75,
.gpio = GMBUS_PIN_DPB,
.dev_ops = &ch7017_ops,
},
@@ -103,7 +103,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.type = INTEL_DVO_CHIP_LVDS_NO_FIXED,
.name = "ns2501",
.port = PORT_B,
- .slave_addr = NS2501_ADDR,
+ .target_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops,
},
};
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index af7b04539b93..4bf476656b8c 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -38,7 +38,7 @@ struct intel_dvo_device {
enum port port;
/* GPIO register used for i2c bus to control this device */
u32 gpio;
- int slave_addr;
+ int target_addr;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index f23547a88b1f..5be7bb43e2e0 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -163,6 +163,14 @@ struct intel_modifier_desc {
static const struct intel_modifier_desc intel_modifiers[] = {
{
+ .modifier = I915_FORMAT_MOD_4_TILED_LNL_CCS,
+ .display_ver = { 20, -1 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_4,
+ }, {
+ .modifier = I915_FORMAT_MOD_4_TILED_BMG_CCS,
+ .display_ver = { 14, -1 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_NEED64K_PHYS,
+ }, {
.modifier = I915_FORMAT_MOD_4_TILED_MTL_MC_CCS,
.display_ver = { 14, 14 },
.plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_MC,
@@ -412,6 +420,24 @@ bool intel_fb_is_mc_ccs_modifier(u64 modifier)
INTEL_PLANE_CAP_CCS_MC);
}
+/**
+ * intel_fb_needs_64k_phys: Check if modifier requires 64k physical placement.
+ * @modifier: Modifier to check
+ *
+ * Returns:
+ * Returns %true if @modifier requires 64k aligned physical pages.
+ */
+bool intel_fb_needs_64k_phys(u64 modifier)
+{
+ const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier);
+
+ if (!md)
+ return false;
+
+ return plane_caps_contain_any(md->plane_caps,
+ INTEL_PLANE_CAP_NEED64K_PHYS);
+}
+
static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md,
u8 display_ver_from, u8 display_ver_until)
{
@@ -437,6 +463,14 @@ static bool plane_has_modifier(struct drm_i915_private *i915,
HAS_FLAT_CCS(i915) != !md->ccs.packed_aux_planes)
return false;
+ if (md->modifier == I915_FORMAT_MOD_4_TILED_BMG_CCS &&
+ (GRAPHICS_VER(i915) < 20 || !IS_DGFX(i915)))
+ return false;
+
+ if (md->modifier == I915_FORMAT_MOD_4_TILED_LNL_CCS &&
+ (GRAPHICS_VER(i915) < 20 || IS_DGFX(i915)))
+ return false;
+
return true;
}
@@ -653,6 +687,8 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 128;
else
return 512;
+ case I915_FORMAT_MOD_4_TILED_BMG_CCS:
+ case I915_FORMAT_MOD_4_TILED_LNL_CCS:
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index 6dee0c8b7f22..10de437e8ef8 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -28,11 +28,13 @@ struct intel_plane_state;
#define INTEL_PLANE_CAP_TILING_Y BIT(4)
#define INTEL_PLANE_CAP_TILING_Yf BIT(5)
#define INTEL_PLANE_CAP_TILING_4 BIT(6)
+#define INTEL_PLANE_CAP_NEED64K_PHYS BIT(7)
bool intel_fb_is_tiled_modifier(u64 modifier);
bool intel_fb_is_ccs_modifier(u64 modifier);
bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier);
bool intel_fb_is_mc_ccs_modifier(u64 modifier);
+bool intel_fb_needs_64k_phys(u64 modifier);
bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane);
int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 67116c9f1464..52b79bacef4d 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -56,17 +56,18 @@
#include "intel_display_device.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_fbc.h"
#include "intel_fbc_regs.h"
#include "intel_frontbuffer.h"
-#define for_each_fbc_id(__dev_priv, __fbc_id) \
+#define for_each_fbc_id(__display, __fbc_id) \
for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \
- for_each_if(DISPLAY_RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id))
+ for_each_if(DISPLAY_RUNTIME_INFO(__display)->fbc_mask & BIT(__fbc_id))
-#define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \
- for_each_fbc_id((__dev_priv), (__fbc_id)) \
- for_each_if((__fbc) = (__dev_priv)->display.fbc[(__fbc_id)])
+#define for_each_intel_fbc(__display, __fbc, __fbc_id) \
+ for_each_fbc_id((__display), (__fbc_id)) \
+ for_each_if((__fbc) = (__display)->fbc[(__fbc_id)])
struct intel_fbc_funcs {
void (*activate)(struct intel_fbc *fbc);
@@ -89,7 +90,7 @@ struct intel_fbc_state {
};
struct intel_fbc {
- struct drm_i915_private *i915;
+ struct intel_display *display;
const struct intel_fbc_funcs *funcs;
/*
@@ -139,21 +140,24 @@ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane
return stride;
}
+static unsigned int intel_fbc_cfb_cpp(void)
+{
+ return 4; /* FBC always 4 bytes per pixel */
+}
+
/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
-static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
+static unsigned int intel_fbc_plane_cfb_stride(const struct intel_plane_state *plane_state)
{
- unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
+ unsigned int cpp = intel_fbc_cfb_cpp();
return intel_fbc_plane_stride(plane_state) * cpp;
}
/* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
-static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state)
+static unsigned int skl_fbc_min_cfb_stride(struct intel_display *display,
+ unsigned int cpp, unsigned int width)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
unsigned int limit = 4; /* 1:4 compression limit is the worst case */
- unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
- unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
unsigned int height = 4; /* FBC segment is 4 lines */
unsigned int stride;
@@ -164,7 +168,7 @@ static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane
* Wa_16011863758: icl+
* Avoid some hardware segment address miscalculation.
*/
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
stride += 64;
/*
@@ -178,40 +182,67 @@ static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane
}
/* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
-static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
+static unsigned int _intel_fbc_cfb_stride(struct intel_display *display,
+ unsigned int cpp, unsigned int width,
+ unsigned int stride)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- unsigned int stride = _intel_fbc_cfb_stride(plane_state);
-
/*
* At least some of the platforms require each 4 line segment to
* be 512 byte aligned. Aligning each line to 512 bytes guarantees
* that regardless of the compression limit we choose later.
*/
- if (DISPLAY_VER(i915) >= 9)
- return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state));
+ if (DISPLAY_VER(display) >= 9)
+ return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(display, cpp, width));
else
return stride;
}
-static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
+static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- int lines = drm_rect_height(&plane_state->uapi.src) >> 16;
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ unsigned int stride = intel_fbc_plane_cfb_stride(plane_state);
+ unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
+ unsigned int cpp = intel_fbc_cfb_cpp();
- if (DISPLAY_VER(i915) == 7)
- lines = min(lines, 2048);
- else if (DISPLAY_VER(i915) >= 8)
- lines = min(lines, 2560);
+ return _intel_fbc_cfb_stride(display, cpp, width, stride);
+}
- return lines * intel_fbc_cfb_stride(plane_state);
+/*
+ * Maximum height the hardware will compress, on HSW+
+ * additional lines (up to the actual plane height) will
+ * remain uncompressed.
+ */
+static unsigned int intel_fbc_max_cfb_height(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 8)
+ return 2560;
+ else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
+ return 2048;
+ else
+ return 1536;
+}
+
+static unsigned int _intel_fbc_cfb_size(struct intel_display *display,
+ unsigned int height, unsigned int stride)
+{
+ return min(height, intel_fbc_max_cfb_height(display)) * stride;
+}
+
+static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ unsigned int height = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ return _intel_fbc_cfb_size(display, height, intel_fbc_cfb_stride(plane_state));
}
static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state);
- unsigned int stride = _intel_fbc_cfb_stride(plane_state);
+ unsigned int stride = intel_fbc_plane_cfb_stride(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
/*
@@ -222,23 +253,31 @@ static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_s
* we always need to use the override there.
*/
if (stride != stride_aligned ||
- (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
+ (DISPLAY_VER(display) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
return stride_aligned * 4 / 64;
return 0;
}
+static bool intel_fbc_has_fences(struct intel_display *display)
+{
+ struct drm_i915_private __maybe_unused *i915 = to_i915(display->drm);
+
+ return intel_gt_support_legacy_fencing(to_gt(i915));
+}
+
static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int cfb_stride;
u32 fbc_ctl;
cfb_stride = fbc_state->cfb_stride / fbc->limit;
/* FBC_CTL wants 32B or 64B units */
- if (DISPLAY_VER(i915) == 2)
+ if (DISPLAY_VER(display) == 2)
cfb_stride = (cfb_stride / 32) - 1;
else
cfb_stride = (cfb_stride / 64) - 1;
@@ -272,21 +311,21 @@ static u32 i965_fbc_ctl2(struct intel_fbc *fbc)
static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 fbc_ctl;
/* Disable compression */
- fbc_ctl = intel_de_read(i915, FBC_CONTROL);
+ fbc_ctl = intel_de_read(display, FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
fbc_ctl &= ~FBC_CTL_EN;
- intel_de_write(i915, FBC_CONTROL, fbc_ctl);
+ intel_de_write(display, FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (intel_de_wait_for_clear(i915, FBC_STATUS,
+ if (intel_de_wait_for_clear(display, FBC_STATUS,
FBC_STAT_COMPRESSING, 10)) {
- drm_dbg_kms(&i915->drm, "FBC idle timed out\n");
+ drm_dbg_kms(display->drm, "FBC idle timed out\n");
return;
}
}
@@ -294,32 +333,32 @@ static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
static void i8xx_fbc_activate(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
int i;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- intel_de_write(i915, FBC_TAG(i), 0);
+ intel_de_write(display, FBC_TAG(i), 0);
- if (DISPLAY_VER(i915) == 4) {
- intel_de_write(i915, FBC_CONTROL2,
+ if (DISPLAY_VER(display) == 4) {
+ intel_de_write(display, FBC_CONTROL2,
i965_fbc_ctl2(fbc));
- intel_de_write(i915, FBC_FENCE_OFF,
+ intel_de_write(display, FBC_FENCE_OFF,
fbc_state->fence_y_offset);
}
- intel_de_write(i915, FBC_CONTROL,
+ intel_de_write(display, FBC_CONTROL,
FBC_CTL_EN | i8xx_fbc_ctl(fbc));
}
static bool i8xx_fbc_is_active(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN;
+ return intel_de_read(fbc->display, FBC_CONTROL) & FBC_CTL_EN;
}
static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, FBC_STATUS) &
+ return intel_de_read(fbc->display, FBC_STATUS) &
(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
}
@@ -327,7 +366,7 @@ static void i8xx_fbc_nuke(struct intel_fbc *fbc)
{
struct intel_fbc_state *fbc_state = &fbc->state;
enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
- struct drm_i915_private *dev_priv = fbc->i915;
+ struct drm_i915_private *dev_priv = to_i915(fbc->display->drm);
intel_de_write_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane),
intel_de_read_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane)));
@@ -335,13 +374,14 @@ static void i8xx_fbc_nuke(struct intel_fbc *fbc)
static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
i915_gem_stolen_node_offset(&fbc->compressed_fb),
U32_MAX));
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
i915_gem_stolen_node_offset(&fbc->compressed_llb),
U32_MAX));
@@ -364,7 +404,7 @@ static void i965_fbc_nuke(struct intel_fbc *fbc)
{
struct intel_fbc_state *fbc_state = &fbc->state;
enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
- struct drm_i915_private *dev_priv = fbc->i915;
+ struct drm_i915_private *dev_priv = to_i915(fbc->display->drm);
intel_de_write_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane),
intel_de_read_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane)));
@@ -397,7 +437,8 @@ static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 dpfc_ctl;
dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
@@ -409,7 +450,7 @@ static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
if (fbc_state->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X;
- if (DISPLAY_VER(i915) < 6)
+ if (DISPLAY_VER(display) < 6)
dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id);
}
@@ -419,43 +460,43 @@ static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
static void g4x_fbc_activate(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
- intel_de_write(i915, DPFC_FENCE_YOFF,
+ intel_de_write(display, DPFC_FENCE_YOFF,
fbc_state->fence_y_offset);
- intel_de_write(i915, DPFC_CONTROL,
+ intel_de_write(display, DPFC_CONTROL,
DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
}
static void g4x_fbc_deactivate(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 dpfc_ctl;
/* Disable compression */
- dpfc_ctl = intel_de_read(i915, DPFC_CONTROL);
+ dpfc_ctl = intel_de_read(display, DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
- intel_de_write(i915, DPFC_CONTROL, dpfc_ctl);
+ intel_de_write(display, DPFC_CONTROL, dpfc_ctl);
}
}
static bool g4x_fbc_is_active(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN;
+ return intel_de_read(fbc->display, DPFC_CONTROL) & DPFC_CTL_EN;
}
static bool g4x_fbc_is_compressing(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
+ return intel_de_read(fbc->display, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
}
static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
- intel_de_write(i915, DPFC_CB_BASE,
+ intel_de_write(display, DPFC_CB_BASE,
i915_gem_stolen_node_offset(&fbc->compressed_fb));
}
@@ -471,43 +512,43 @@ static const struct intel_fbc_funcs g4x_fbc_funcs = {
static void ilk_fbc_activate(struct intel_fbc *fbc)
{
struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
- intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id),
+ intel_de_write(display, ILK_DPFC_FENCE_YOFF(fbc->id),
fbc_state->fence_y_offset);
- intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
+ intel_de_write(display, ILK_DPFC_CONTROL(fbc->id),
DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
}
static void ilk_fbc_deactivate(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 dpfc_ctl;
/* Disable compression */
- dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id));
+ dpfc_ctl = intel_de_read(display, ILK_DPFC_CONTROL(fbc->id));
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
- intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+ intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
}
}
static bool ilk_fbc_is_active(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN;
+ return intel_de_read(fbc->display, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN;
}
static bool ilk_fbc_is_compressing(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK;
+ return intel_de_read(fbc->display, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK;
}
static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
- intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id),
+ intel_de_write(display, ILK_DPFC_CB_BASE(fbc->id),
i915_gem_stolen_node_offset(&fbc->compressed_fb));
}
@@ -523,14 +564,14 @@ static const struct intel_fbc_funcs ilk_fbc_funcs = {
static void snb_fbc_program_fence(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 ctl = 0;
if (fbc_state->fence_id >= 0)
ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id);
- intel_de_write(i915, SNB_DPFC_CTL_SA, ctl);
- intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset);
+ intel_de_write(display, SNB_DPFC_CTL_SA, ctl);
+ intel_de_write(display, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset);
}
static void snb_fbc_activate(struct intel_fbc *fbc)
@@ -542,10 +583,10 @@ static void snb_fbc_activate(struct intel_fbc *fbc)
static void snb_fbc_nuke(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
- intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE);
- intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id));
+ intel_de_write(display, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE);
+ intel_de_posting_read(display, MSG_FBC_REND_STATE(fbc->id));
}
static const struct intel_fbc_funcs snb_fbc_funcs = {
@@ -560,20 +601,20 @@ static const struct intel_fbc_funcs snb_fbc_funcs = {
static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 val = 0;
if (fbc_state->override_cfb_stride)
val |= FBC_STRIDE_OVERRIDE |
FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
- intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val);
+ intel_de_write(display, GLK_FBC_STRIDE(fbc->id), val);
}
static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 val = 0;
/* Display WA #0529: skl, kbl, bxt. */
@@ -581,7 +622,7 @@ static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
val |= CHICKEN_FBC_STRIDE_OVERRIDE |
CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
- intel_de_rmw(i915, CHICKEN_MISC_4,
+ intel_de_rmw(display, CHICKEN_MISC_4,
CHICKEN_FBC_STRIDE_OVERRIDE |
CHICKEN_FBC_STRIDE_MASK, val);
}
@@ -589,7 +630,8 @@ static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 dpfc_ctl;
dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
@@ -597,7 +639,7 @@ static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
if (IS_IVYBRIDGE(i915))
dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane);
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
dpfc_ctl |= DPFC_CTL_PLANE_BINDING(fbc_state->plane->id);
if (fbc_state->fence_id >= 0)
@@ -611,35 +653,35 @@ static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
static void ivb_fbc_activate(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 dpfc_ctl;
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 10)
glk_fbc_program_cfb_stride(fbc);
- else if (DISPLAY_VER(i915) == 9)
+ else if (DISPLAY_VER(display) == 9)
skl_fbc_program_cfb_stride(fbc);
- if (intel_gt_support_legacy_fencing(to_gt(i915)))
+ if (intel_fbc_has_fences(display))
snb_fbc_program_fence(fbc);
/* wa_14019417088 Alternative WA*/
dpfc_ctl = ivb_dpfc_ctl(fbc);
- if (DISPLAY_VER(i915) >= 20)
- intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+ if (DISPLAY_VER(display) >= 20)
+ intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
- intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
+ intel_de_write(display, ILK_DPFC_CONTROL(fbc->id),
DPFC_CTL_EN | dpfc_ctl);
}
static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB;
+ return intel_de_read(fbc->display, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB;
}
static void ivb_fbc_set_false_color(struct intel_fbc *fbc,
bool enable)
{
- intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id),
+ intel_de_rmw(fbc->display, ILK_DPFC_CONTROL(fbc->id),
DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0);
}
@@ -684,10 +726,10 @@ static bool intel_fbc_is_compressing(struct intel_fbc *fbc)
static void intel_fbc_nuke(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
lockdep_assert_held(&fbc->lock);
- drm_WARN_ON(&i915->drm, fbc->flip_pending);
+ drm_WARN_ON(display->drm, fbc->flip_pending);
trace_intel_fbc_nuke(fbc->state.plane);
@@ -714,16 +756,19 @@ static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
fbc->no_fbc_reason = reason;
}
-static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
+static u64 intel_fbc_cfb_base_max(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
return BIT_ULL(28);
else
return BIT_ULL(32);
}
-static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
+static u64 intel_fbc_stolen_end(struct intel_display *display)
{
+ struct drm_i915_private __maybe_unused *i915 = to_i915(display->drm);
u64 end;
/* The FBC hardware for BDW/SKL doesn't have access to the stolen
@@ -731,12 +776,12 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(i915) ||
- (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
+ (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915)))
end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024;
else
end = U64_MAX;
- return min(end, intel_fbc_cfb_base_max(i915));
+ return min(end, intel_fbc_cfb_base_max(display));
}
static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
@@ -744,8 +789,10 @@ static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1;
}
-static int intel_fbc_max_limit(struct drm_i915_private *i915)
+static int intel_fbc_max_limit(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
/* WaFbcOnly1to1Ratio:ctg */
if (IS_G4X(i915))
return 1;
@@ -760,8 +807,9 @@ static int intel_fbc_max_limit(struct drm_i915_private *i915)
static int find_compression_limit(struct intel_fbc *fbc,
unsigned int size, int min_limit)
{
- struct drm_i915_private *i915 = fbc->i915;
- u64 end = intel_fbc_stolen_end(i915);
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ u64 end = intel_fbc_stolen_end(display);
int ret, limit = min_limit;
size /= limit;
@@ -772,7 +820,7 @@ static int find_compression_limit(struct intel_fbc *fbc,
if (ret == 0)
return limit;
- for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) {
+ for (; limit <= intel_fbc_max_limit(display); limit <<= 1) {
ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
size >>= 1, 4096, 0, end);
if (ret == 0)
@@ -785,15 +833,16 @@ static int find_compression_limit(struct intel_fbc *fbc,
static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
unsigned int size, int min_limit)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
i915_gem_stolen_node_allocated(&fbc->compressed_fb));
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
i915_gem_stolen_node_allocated(&fbc->compressed_llb));
- if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
+ if (DISPLAY_VER(display) < 5 && !IS_G4X(i915)) {
ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
4096, 4096);
if (ret)
@@ -804,12 +853,12 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
if (!ret)
goto err_llb;
else if (ret > min_limit)
- drm_info_once(&i915->drm,
+ drm_info_once(display->drm,
"Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
fbc->limit = ret;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit);
return 0;
@@ -819,7 +868,8 @@ err_llb:
i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
err:
if (i915_gem_stolen_initialized(i915))
- drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+ drm_info_once(display->drm,
+ "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@@ -830,14 +880,15 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (IS_SKYLAKE(i915) || IS_BROXTON(i915)) {
/*
* WaFbcHighMemBwCorruptionAvoidance:skl,bxt
* Display WA #0883: skl,bxt
*/
- intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_DISABLE_DUMMY0);
}
@@ -847,24 +898,25 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
* WaFbcNukeOnHostModify:skl,kbl,cfl
* Display WA #0873: skl,kbl,cfl
*/
- intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_NUKE_ON_ANY_MODIFICATION);
}
/* Wa_1409120013:icl,jsl,tgl,dg1 */
- if (IS_DISPLAY_VER(i915, 11, 12))
- intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ if (IS_DISPLAY_VER(display, 11, 12))
+ intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
- if (DISPLAY_VER(i915) >= 11 && !IS_DG2(i915))
- intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ if (DISPLAY_VER(display) >= 11 && !IS_DG2(i915))
+ intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
}
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (WARN_ON(intel_fbc_hw_is_active(fbc)))
return;
@@ -875,12 +927,12 @@ static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
}
-void intel_fbc_cleanup(struct drm_i915_private *i915)
+void intel_fbc_cleanup(struct intel_display *display)
{
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(i915, fbc, fbc_id) {
+ for_each_intel_fbc(display, fbc, fbc_id) {
mutex_lock(&fbc->lock);
__intel_fbc_cleanup_cfb(fbc);
mutex_unlock(&fbc->lock);
@@ -932,15 +984,16 @@ static bool icl_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
static bool stride_is_valid(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return icl_fbc_stride_is_valid(plane_state);
- else if (DISPLAY_VER(i915) >= 9)
+ else if (DISPLAY_VER(display) >= 9)
return skl_fbc_stride_is_valid(plane_state);
- else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
return g4x_fbc_stride_is_valid(plane_state);
- else if (DISPLAY_VER(i915) == 4)
+ else if (DISPLAY_VER(display) == 4)
return i965_fbc_stride_is_valid(plane_state);
else
return i8xx_fbc_stride_is_valid(plane_state);
@@ -948,7 +1001,7 @@ static bool stride_is_valid(const struct intel_plane_state *plane_state)
static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
switch (fb->format->format) {
@@ -958,7 +1011,7 @@ static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_RGB565:
/* 16bpp not supported on gen2 */
- if (DISPLAY_VER(i915) == 2)
+ if (DISPLAY_VER(display) == 2)
return false;
return true;
default:
@@ -968,7 +1021,8 @@ static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane
static bool g4x_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct drm_framebuffer *fb = plane_state->hw.fb;
switch (fb->format->format) {
@@ -1003,11 +1057,12 @@ static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_
static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
return lnl_fbc_pixel_format_is_valid(plane_state);
- else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
return g4x_fbc_pixel_format_is_valid(plane_state);
else
return i8xx_fbc_pixel_format_is_valid(plane_state);
@@ -1037,43 +1092,52 @@ static bool skl_fbc_rotation_is_valid(const struct intel_plane_state *plane_stat
static bool rotation_is_valid(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return skl_fbc_rotation_is_valid(plane_state);
- else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
return g4x_fbc_rotation_is_valid(plane_state);
else
return i8xx_fbc_rotation_is_valid(plane_state);
}
+static void intel_fbc_max_surface_size(struct intel_display *display,
+ unsigned int *w, unsigned int *h)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 11) {
+ *w = 8192;
+ *h = 4096;
+ } else if (DISPLAY_VER(display) >= 10) {
+ *w = 5120;
+ *h = 4096;
+ } else if (DISPLAY_VER(display) >= 7) {
+ *w = 4096;
+ *h = 4096;
+ } else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) {
+ *w = 4096;
+ *h = 2048;
+ } else {
+ *w = 2048;
+ *h = 1536;
+ }
+}
+
/*
* For some reason, the hardware tracking starts looking at whatever we
* programmed as the display plane base address register. It does not look at
* the X and Y offset registers. That's why we include the src x/y offsets
* instead of just looking at the plane size.
*/
-static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state)
+static bool intel_fbc_surface_size_ok(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
unsigned int effective_w, effective_h, max_w, max_h;
- if (DISPLAY_VER(i915) >= 11) {
- max_w = 8192;
- max_h = 4096;
- } else if (DISPLAY_VER(i915) >= 10) {
- max_w = 5120;
- max_h = 4096;
- } else if (DISPLAY_VER(i915) >= 7) {
- max_w = 4096;
- max_h = 4096;
- } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
- max_w = 4096;
- max_h = 2048;
- } else {
- max_w = 2048;
- max_h = 1536;
- }
+ intel_fbc_max_surface_size(display, &max_w, &max_h);
effective_w = plane_state->view.color_plane[0].x +
(drm_rect_width(&plane_state->uapi.src) >> 16);
@@ -1083,24 +1147,32 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *
return effective_w <= max_w && effective_h <= max_h;
}
-static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state)
+static void intel_fbc_max_plane_size(struct intel_display *display,
+ unsigned int *w, unsigned int *h)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- unsigned int w, h, max_w, max_h;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (DISPLAY_VER(i915) >= 10) {
- max_w = 5120;
- max_h = 4096;
- } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
- max_w = 4096;
- max_h = 4096;
- } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
- max_w = 4096;
- max_h = 2048;
+ if (DISPLAY_VER(display) >= 10) {
+ *w = 5120;
+ *h = 4096;
+ } else if (DISPLAY_VER(display) >= 8 || IS_HASWELL(i915)) {
+ *w = 4096;
+ *h = 4096;
+ } else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) {
+ *w = 4096;
+ *h = 2048;
} else {
- max_w = 2048;
- max_h = 1536;
+ *w = 2048;
+ *h = 1536;
}
+}
+
+static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ unsigned int w, h, max_w, max_h;
+
+ intel_fbc_max_plane_size(display, &max_w, &max_h);
w = drm_rect_width(&plane_state->uapi.src) >> 16;
h = drm_rect_height(&plane_state->uapi.src) >> 16;
@@ -1122,9 +1194,9 @@ static bool skl_fbc_tiling_valid(const struct intel_plane_state *plane_state)
static bool tiling_is_valid(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return skl_fbc_tiling_valid(plane_state);
else
return i8xx_fbc_tiling_valid(plane_state);
@@ -1134,7 +1206,7 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state->base.dev);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_plane_state *plane_state =
@@ -1152,8 +1224,8 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
- drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
- !intel_gt_support_legacy_fencing(to_gt(i915)));
+ drm_WARN_ON(display->drm, plane_state->flags & PLANE_HAS_FENCE &&
+ !intel_fbc_has_fences(display));
if (plane_state->flags & PLANE_HAS_FENCE)
fbc_state->fence_id = i915_vma_fence_id(plane_state->ggtt_vma);
@@ -1167,7 +1239,7 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
/*
* The use of a CPU fence is one of two ways to detect writes by the
@@ -1181,7 +1253,7 @@ static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
* so have no fence associated with it) due to aperture constraints
* at the time of pinning.
*/
- return DISPLAY_VER(i915) >= 9 ||
+ return DISPLAY_VER(display) >= 9 ||
(plane_state->flags & PLANE_HAS_FENCE &&
i915_vma_fence_id(plane_state->ggtt_vma) != -1);
}
@@ -1206,7 +1278,8 @@ static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
static int intel_fbc_check_plane(struct intel_atomic_state *state,
struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state->base.dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1227,7 +1300,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (!i915->display.params.enable_fbc) {
+ if (!display->params.enable_fbc) {
plane_state->no_fbc_reason = "disabled per module param or by default";
return 0;
}
@@ -1237,6 +1310,11 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
+ if (intel_display_needs_wa_16023588340(i915)) {
+ plane_state->no_fbc_reason = "Wa_16023588340";
+ return 0;
+ }
+
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
if (i915_vtd_active(i915) && (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
plane_state->no_fbc_reason = "VT-d enabled";
@@ -1260,15 +1338,15 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Recommendation is to keep this combination disabled
* Bspec: 50422 HSD: 14010260002
*/
- if (IS_DISPLAY_VER(i915, 12, 14) && crtc_state->has_sel_update &&
+ if (IS_DISPLAY_VER(display, 12, 14) && crtc_state->has_sel_update &&
!crtc_state->has_panel_replay) {
plane_state->no_fbc_reason = "PSR2 enabled";
return 0;
}
/* Wa_14016291713 */
- if ((IS_DISPLAY_VER(i915, 12, 13) ||
- IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) &&
+ if ((IS_DISPLAY_VER(display, 12, 13) ||
+ IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) &&
crtc_state->has_psr && !crtc_state->has_panel_replay) {
plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
return 0;
@@ -1294,7 +1372,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (DISPLAY_VER(i915) < 20 &&
+ if (DISPLAY_VER(display) < 20 &&
plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
fb->format->has_alpha) {
plane_state->no_fbc_reason = "per-pixel alpha not supported";
@@ -1306,7 +1384,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
+ if (!intel_fbc_surface_size_ok(plane_state)) {
plane_state->no_fbc_reason = "surface size too big";
return 0;
}
@@ -1316,14 +1394,14 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* having a Y offset that isn't divisible by 4 causes FIFO underrun
* and screen flicker.
*/
- if (DISPLAY_VER(i915) >= 9 &&
+ if (DISPLAY_VER(display) >= 9 &&
plane_state->view.color_plane[0].y & 3) {
plane_state->no_fbc_reason = "plane start Y offset misaligned";
return 0;
}
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
- if (DISPLAY_VER(i915) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
(plane_state->view.color_plane[0].y +
(drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
plane_state->no_fbc_reason = "plane end Y offset misaligned";
@@ -1399,7 +1477,7 @@ static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state->base.dev);
struct intel_fbc *fbc = plane->fbc;
bool need_vblank_wait = false;
@@ -1425,7 +1503,7 @@ static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
* and skipping the extra vblank wait before the plane update
* if at least one frame has already passed.
*/
- if (fbc->activated && DISPLAY_VER(i915) >= 10)
+ if (fbc->activated && DISPLAY_VER(display) >= 10)
need_vblank_wait = true;
fbc->activated = false;
@@ -1459,13 +1537,13 @@ bool intel_fbc_pre_update(struct intel_atomic_state *state,
static void __intel_fbc_disable(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
struct intel_plane *plane = fbc->state.plane;
lockdep_assert_held(&fbc->lock);
- drm_WARN_ON(&i915->drm, fbc->active);
+ drm_WARN_ON(display->drm, fbc->active);
- drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n",
+ drm_dbg_kms(display->drm, "Disabling FBC on [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
__intel_fbc_cleanup_cfb(fbc);
@@ -1542,7 +1620,7 @@ void intel_fbc_invalidate(struct drm_i915_private *i915,
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(i915, fbc, fbc_id)
+ for_each_intel_fbc(&i915->display, fbc, fbc_id)
__intel_fbc_invalidate(fbc, frontbuffer_bits, origin);
}
@@ -1581,7 +1659,7 @@ void intel_fbc_flush(struct drm_i915_private *i915,
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(i915, fbc, fbc_id)
+ for_each_intel_fbc(&i915->display, fbc, fbc_id)
__intel_fbc_flush(fbc, frontbuffer_bits, origin);
}
@@ -1606,7 +1684,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state->base.dev);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
struct intel_fbc *fbc = plane->fbc;
@@ -1625,7 +1703,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
__intel_fbc_disable(fbc);
}
- drm_WARN_ON(&i915->drm, fbc->active);
+ drm_WARN_ON(display->drm, fbc->active);
fbc->no_fbc_reason = plane_state->no_fbc_reason;
if (fbc->no_fbc_reason)
@@ -1647,7 +1725,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
return;
}
- drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n",
+ drm_dbg_kms(display->drm, "Enabling FBC on [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
@@ -1665,10 +1743,10 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
*/
void intel_fbc_disable(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc->base.dev);
struct intel_plane *plane;
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_fbc *fbc = plane->fbc;
if (!fbc || plane->pipe != crtc->pipe)
@@ -1713,7 +1791,8 @@ void intel_fbc_update(struct intel_atomic_state *state,
static void intel_fbc_underrun_work_fn(struct work_struct *work)
{
struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
mutex_lock(&fbc->lock);
@@ -1721,7 +1800,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
if (fbc->underrun_detected || !fbc->state.plane)
goto out;
- drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n");
+ drm_dbg_kms(display->drm, "Disabling FBC due to FIFO underrun.\n");
fbc->underrun_detected = true;
intel_fbc_deactivate(fbc, "FIFO underrun");
@@ -1734,14 +1813,14 @@ out:
static void __intel_fbc_reset_underrun(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
cancel_work_sync(&fbc->underrun_work);
mutex_lock(&fbc->lock);
if (fbc->underrun_detected) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Re-allowing FBC after fifo underrun\n");
fbc->no_fbc_reason = "FIFO underrun cleared";
}
@@ -1752,22 +1831,24 @@ static void __intel_fbc_reset_underrun(struct intel_fbc *fbc)
/*
* intel_fbc_reset_underrun - reset FBC fifo underrun status.
- * @i915: the i915 device
+ * @display: display
*
* See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
* want to re-enable FBC after an underrun to increase test coverage.
*/
-void intel_fbc_reset_underrun(struct drm_i915_private *i915)
+void intel_fbc_reset_underrun(struct intel_display *display)
{
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(i915, fbc, fbc_id)
+ for_each_intel_fbc(display, fbc, fbc_id)
__intel_fbc_reset_underrun(fbc);
}
static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
{
+ struct drm_i915_private *i915 = to_i915(fbc->display->drm);
+
/*
* There's no guarantee that underrun_detected won't be set to true
* right after this check and before the work is scheduled, but that's
@@ -1779,12 +1860,12 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
if (READ_ONCE(fbc->underrun_detected))
return;
- queue_work(fbc->i915->unordered_wq, &fbc->underrun_work);
+ queue_work(i915->unordered_wq, &fbc->underrun_work);
}
/**
* intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
- * @i915: i915 device
+ * @display: display
*
* Without FBC, most underruns are harmless and don't really cause too many
* problems, except for an annoying message on dmesg. With FBC, underruns can
@@ -1796,12 +1877,12 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
*
* This function is called from the IRQ handler.
*/
-void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
+void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display)
{
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(i915, fbc, fbc_id)
+ for_each_intel_fbc(display, fbc, fbc_id)
__intel_fbc_handle_fifo_underrun_irq(fbc);
}
@@ -1814,15 +1895,17 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
* space to change the value during runtime without sanitizing it again. IGT
* relies on being able to change i915.enable_fbc at runtime.
*/
-static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
+static int intel_sanitize_fbc_option(struct intel_display *display)
{
- if (i915->display.params.enable_fbc >= 0)
- return !!i915->display.params.enable_fbc;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (display->params.enable_fbc >= 0)
+ return !!display->params.enable_fbc;
- if (!HAS_FBC(i915))
+ if (!HAS_FBC(display))
return 0;
- if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9)
+ if (IS_BROADWELL(i915) || DISPLAY_VER(display) >= 9)
return 1;
return 0;
@@ -1833,9 +1916,10 @@ void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
plane->fbc = fbc;
}
-static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
+static struct intel_fbc *intel_fbc_create(struct intel_display *display,
enum intel_fbc_id fbc_id)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_fbc *fbc;
fbc = kzalloc(sizeof(*fbc), GFP_KERNEL);
@@ -1843,19 +1927,19 @@ static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
return NULL;
fbc->id = fbc_id;
- fbc->i915 = i915;
+ fbc->display = display;
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
- if (DISPLAY_VER(i915) >= 7)
+ if (DISPLAY_VER(display) >= 7)
fbc->funcs = &ivb_fbc_funcs;
- else if (DISPLAY_VER(i915) == 6)
+ else if (DISPLAY_VER(display) == 6)
fbc->funcs = &snb_fbc_funcs;
- else if (DISPLAY_VER(i915) == 5)
+ else if (DISPLAY_VER(display) == 5)
fbc->funcs = &ilk_fbc_funcs;
else if (IS_G4X(i915))
fbc->funcs = &g4x_fbc_funcs;
- else if (DISPLAY_VER(i915) == 4)
+ else if (DISPLAY_VER(display) == 4)
fbc->funcs = &i965_fbc_funcs;
else
fbc->funcs = &i8xx_fbc_funcs;
@@ -1865,36 +1949,36 @@ static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
/**
* intel_fbc_init - Initialize FBC
- * @i915: the i915 device
+ * @display: display
*
* This function might be called during PM init process.
*/
-void intel_fbc_init(struct drm_i915_private *i915)
+void intel_fbc_init(struct intel_display *display)
{
enum intel_fbc_id fbc_id;
- i915->display.params.enable_fbc = intel_sanitize_fbc_option(i915);
- drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
- i915->display.params.enable_fbc);
+ display->params.enable_fbc = intel_sanitize_fbc_option(display);
+ drm_dbg_kms(display->drm, "Sanitized enable_fbc value: %d\n",
+ display->params.enable_fbc);
- for_each_fbc_id(i915, fbc_id)
- i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
+ for_each_fbc_id(display, fbc_id)
+ display->fbc[fbc_id] = intel_fbc_create(display, fbc_id);
}
/**
* intel_fbc_sanitize - Sanitize FBC
- * @i915: the i915 device
+ * @display: display
*
* Make sure FBC is initially disabled since we have no
* idea eg. into which parts of stolen it might be scribbling
* into.
*/
-void intel_fbc_sanitize(struct drm_i915_private *i915)
+void intel_fbc_sanitize(struct intel_display *display)
{
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(i915, fbc, fbc_id) {
+ for_each_intel_fbc(display, fbc, fbc_id) {
if (intel_fbc_hw_is_active(fbc))
intel_fbc_hw_deactivate(fbc);
}
@@ -1903,11 +1987,12 @@ void intel_fbc_sanitize(struct drm_i915_private *i915)
static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_fbc *fbc = m->private;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_plane *plane;
intel_wakeref_t wakeref;
- drm_modeset_lock_all(&i915->drm);
+ drm_modeset_lock_all(display->drm);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
mutex_lock(&fbc->lock);
@@ -1920,7 +2005,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
}
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -1936,7 +2021,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
mutex_unlock(&fbc->lock);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- drm_modeset_unlock_all(&i915->drm);
+ drm_modeset_unlock_all(display->drm);
return 0;
}
@@ -1993,12 +2078,12 @@ void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
}
/* FIXME: remove this once igt is on board with per-crtc stuff */
-void intel_fbc_debugfs_register(struct drm_i915_private *i915)
+void intel_fbc_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
struct intel_fbc *fbc;
- fbc = i915->display.fbc[INTEL_FBC_A];
+ fbc = display->fbc[INTEL_FBC_A];
if (fbc)
intel_fbc_debugfs_add(fbc, minor->debugfs_root);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 6720ec8ee8a2..ceae55458e14 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -13,6 +13,7 @@ struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_fbc;
struct intel_plane;
struct intel_plane_state;
@@ -31,9 +32,9 @@ bool intel_fbc_pre_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_post_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
-void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_cleanup(struct drm_i915_private *dev_priv);
-void intel_fbc_sanitize(struct drm_i915_private *dev_priv);
+void intel_fbc_init(struct intel_display *display);
+void intel_fbc_cleanup(struct intel_display *display);
+void intel_fbc_sanitize(struct intel_display *display);
void intel_fbc_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_disable(struct intel_crtc *crtc);
@@ -43,9 +44,9 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane);
-void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915);
-void intel_fbc_reset_underrun(struct drm_i915_private *i915);
+void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display);
+void intel_fbc_reset_underrun(struct intel_display *display);
void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc);
-void intel_fbc_debugfs_register(struct drm_i915_private *i915);
+void intel_fbc_debugfs_register(struct intel_display *display);
#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index d33befd7994d..222cd0e1a2bc 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -5,6 +5,8 @@
#include <linux/string_helpers.h>
+#include <drm/drm_fixed.h>
+
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
@@ -304,7 +306,7 @@ int intel_fdi_link_freq(struct drm_i915_private *i915,
bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
{
int pipe_bpp = min(crtc_state->pipe_bpp,
- to_bpp_int(crtc_state->max_link_bpp_x16));
+ fxp_q4_to_int(crtc_state->max_link_bpp_x16));
pipe_bpp = rounddown(pipe_bpp, 2 * 3);
@@ -340,7 +342,7 @@ int ilk_fdi_compute_config(struct intel_crtc *crtc,
pipe_config->fdi_lanes = lane;
- intel_link_compute_m_n(to_bpp_x16(pipe_config->pipe_bpp),
+ intel_link_compute_m_n(fxp_q4_from_int(pipe_config->pipe_bpp),
lane, fdi_dotclock,
link_bw,
intel_dp_bw_fec_overhead(false),
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index e5e4ca7cc499..8949fbb1cc60 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -440,7 +440,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe));
}
- intel_fbc_handle_fifo_underrun_irq(dev_priv);
+ intel_fbc_handle_fifo_underrun_irq(&dev_priv->display);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 4923c340a0b6..af4576dee92a 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -83,6 +83,8 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
+ struct intel_display *display = &i915->display;
+
/* Delay flushing when rings are still busy.*/
spin_lock(&i915->display.fb_tracking.lock);
frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits;
@@ -96,7 +98,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
might_sleep();
intel_td_flush(i915);
intel_drrs_flush(i915, frontbuffer_bits);
- intel_psr_flush(i915, frontbuffer_bits, origin);
+ intel_psr_flush(display, frontbuffer_bits, origin);
intel_fbc_flush(i915, frontbuffer_bits, origin);
}
@@ -172,6 +174,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
unsigned int frontbuffer_bits)
{
struct drm_i915_private *i915 = intel_bo_to_i915(front->obj);
+ struct intel_display *display = &i915->display;
if (origin == ORIGIN_CS) {
spin_lock(&i915->display.fb_tracking.lock);
@@ -183,7 +186,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
trace_intel_frontbuffer_invalidate(i915, frontbuffer_bits, origin);
might_sleep();
- intel_psr_invalidate(i915, frontbuffer_bits, origin);
+ intel_psr_invalidate(display, frontbuffer_bits, origin);
intel_drrs_invalidate(i915, frontbuffer_bits);
intel_fbc_invalidate(i915, frontbuffer_bits, origin);
}
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 9c8e1e91ff1c..6470f75106bd 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -478,7 +478,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *i915,
/*
* HW spec says that 512Bytes in Burst read need special treatment.
* But it doesn't talk about other multiple of 256Bytes. And couldn't locate
- * an I2C slave, which supports such a lengthy burst read too for experiments.
+ * an I2C target, which supports such a lengthy burst read too for experiments.
*
* So until things get clarified on HW support, to avoid the burst read length
* in fold of 256Bytes except 512, max burst read length is fixed at 767Bytes.
@@ -701,7 +701,7 @@ clear_err:
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
- * BUS_ERROR raised by the slave's NAK.
+ * BUS_ERROR raised by the target's NAK.
*/
intel_de_write_fw(i915, GMBUS1(i915), GMBUS_SW_CLR_INT);
intel_de_write_fw(i915, GMBUS1(i915), 0);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 3ebe035f382e..6980b98792c2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -42,11 +42,11 @@ intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder,
return;
if (DISPLAY_VER(dev_priv) >= 14) {
- if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_D0, STEP_FOREVER))
+ if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_D0, STEP_FOREVER))
intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder),
0, HDCP_LINE_REKEY_DISABLE);
- else if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 1), STEP_B0, STEP_FOREVER) ||
- IS_DISPLAY_IP_STEP(dev_priv, IP_VER(20, 0), STEP_B0, STEP_FOREVER))
+ else if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 1), STEP_B0, STEP_FOREVER) ||
+ IS_DISPLAY_VER_STEP(dev_priv, IP_VER(20, 0), STEP_B0, STEP_FOREVER))
intel_de_rmw(dev_priv,
TRANS_DDI_FUNC_CTL(dev_priv, hdcp->cpu_transcoder),
0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE);
@@ -203,11 +203,16 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
/* Is HDCP1.4 capable on Platform and Sink */
bool intel_hdcp_get_capability(struct intel_connector *connector)
{
- struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port;
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
bool capable = false;
u8 bksv[5];
+ if (!intel_attached_encoder(connector))
+ return capable;
+
+ dig_port = intel_attached_dig_port(connector);
+
if (!shim)
return capable;
@@ -2176,10 +2181,11 @@ static void intel_hdcp_check_work(struct work_struct *work)
DRM_HDCP_CHECK_PERIOD_MS);
}
-static int i915_hdcp_component_bind(struct device *i915_kdev,
+static int i915_hdcp_component_bind(struct device *drv_kdev,
struct device *mei_kdev, void *data)
{
- struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_display *display = to_intel_display(drv_kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
drm_dbg(&i915->drm, "I915 HDCP comp bind\n");
mutex_lock(&i915->display.hdcp.hdcp_mutex);
@@ -2190,10 +2196,11 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
return 0;
}
-static void i915_hdcp_component_unbind(struct device *i915_kdev,
+static void i915_hdcp_component_unbind(struct device *drv_kdev,
struct device *mei_kdev, void *data)
{
- struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_display *display = to_intel_display(drv_kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
drm_dbg(&i915->drm, "I915 HDCP comp unbind\n");
mutex_lock(&i915->display.hdcp.hdcp_mutex);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
index 6548e71b4c49..35bdb532bbb3 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
@@ -7,6 +7,7 @@
#include <drm/intel/i915_hdcp_interface.h>
#include "i915_drv.h"
+#include "intel_display_types.h"
#include "intel_hdcp_gsc_message.h"
int
@@ -15,17 +16,19 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
{
struct wired_cmd_initiate_hdcp2_session_in session_init_in = {};
struct wired_cmd_initiate_hdcp2_session_out session_init_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ake_data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
session_init_in.header.api_version = HDCP_API_VERSION;
session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
@@ -72,17 +75,19 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
{
struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = {};
struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
verify_rxcert_in.header.api_version = HDCP_API_VERSION;
verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
@@ -135,17 +140,19 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
{
struct wired_cmd_ake_send_hprime_in send_hprime_in = {};
struct wired_cmd_ake_send_hprime_out send_hprime_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_hprime)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
send_hprime_in.header.api_version = HDCP_API_VERSION;
send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
@@ -183,17 +190,19 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
{
struct wired_cmd_ake_send_pairing_info_in pairing_info_in = {};
struct wired_cmd_ake_send_pairing_info_out pairing_info_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !pairing_info)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
pairing_info_in.header.api_version = HDCP_API_VERSION;
pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
@@ -234,17 +243,19 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
{
struct wired_cmd_init_locality_check_in lc_init_in = {};
struct wired_cmd_init_locality_check_out lc_init_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !lc_init_data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
lc_init_in.header.api_version = HDCP_API_VERSION;
lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
@@ -280,17 +291,19 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
{
struct wired_cmd_validate_locality_in verify_lprime_in = {};
struct wired_cmd_validate_locality_out verify_lprime_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_lprime)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
verify_lprime_in.header.api_version = HDCP_API_VERSION;
verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
@@ -330,17 +343,19 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
{
struct wired_cmd_get_session_key_in get_skey_in = {};
struct wired_cmd_get_session_key_out get_skey_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ske_data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
get_skey_in.header.api_version = HDCP_API_VERSION;
get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
@@ -382,17 +397,19 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
{
struct wired_cmd_verify_repeater_in verify_repeater_in = {};
struct wired_cmd_verify_repeater_out verify_repeater_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !rep_topology || !rep_send_ack || !data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
verify_repeater_in.header.api_version = HDCP_API_VERSION;
verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
@@ -442,6 +459,7 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
{
struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
struct wired_cmd_repeater_auth_stream_req_out verify_mprime_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
size_t cmd_size;
@@ -449,11 +467,12 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
if (!dev || !stream_ready || !data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
cmd_size = struct_size(verify_mprime_in, streams, data->k);
if (cmd_size == SIZE_MAX)
@@ -504,17 +523,19 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
{
struct wired_cmd_enable_auth_in enable_auth_in = {};
struct wired_cmd_enable_auth_out enable_auth_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
enable_auth_in.header.api_version = HDCP_API_VERSION;
enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
@@ -549,17 +570,19 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
{
struct wired_cmd_close_session_in session_close_in = {};
struct wired_cmd_close_session_out session_close_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
session_close_in.header.api_version = HDCP_API_VERSION;
session_close_in.header.command_id = WIRED_CLOSE_SESSION;
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 19498ee455fa..cd9ee171e0df 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -60,30 +60,25 @@
#include "intel_panel.h"
#include "intel_snps_phy.h"
-inline struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi)
-{
- return to_i915(hdmi_to_dig_port(intel_hdmi)->base.base.dev);
-}
-
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
- struct drm_i915_private *dev_priv = intel_hdmi_to_i915(intel_hdmi);
+ struct intel_display *display = to_intel_display(intel_hdmi);
u32 enabled_bits;
- enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+ enabled_bits = HAS_DDI(display) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
- drm_WARN(&dev_priv->drm,
- intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits,
+ drm_WARN(display->drm,
+ intel_de_read(display, intel_hdmi->hdmi_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
}
static void
-assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
+assert_hdmi_transcoder_func_disabled(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- drm_WARN(&dev_priv->drm,
- intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) &
+ drm_WARN(display->drm,
+ intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) &
TRANS_DDI_FUNC_ENABLE,
"HDMI transcoder function enabled, expecting disabled\n");
}
@@ -158,35 +153,35 @@ static u32 hsw_infoframe_enable(unsigned int type)
}
static i915_reg_t
-hsw_dip_data_reg(struct drm_i915_private *dev_priv,
+hsw_dip_data_reg(struct intel_display *display,
enum transcoder cpu_transcoder,
unsigned int type,
int i)
{
switch (type) {
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- return HSW_TVIDEO_DIP_GMP_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_GMP_DATA(display, cpu_transcoder, i);
case DP_SDP_VSC:
- return HSW_TVIDEO_DIP_VSC_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_VSC_DATA(display, cpu_transcoder, i);
case DP_SDP_ADAPTIVE_SYNC:
- return ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, cpu_transcoder, i);
+ return ADL_TVIDEO_DIP_AS_SDP_DATA(display, cpu_transcoder, i);
case DP_SDP_PPS:
- return ICL_VIDEO_DIP_PPS_DATA(dev_priv, cpu_transcoder, i);
+ return ICL_VIDEO_DIP_PPS_DATA(display, cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI:
- return HSW_TVIDEO_DIP_AVI_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_AVI_DATA(display, cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_SPD:
- return HSW_TVIDEO_DIP_SPD_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_SPD_DATA(display, cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_VENDOR:
- return HSW_TVIDEO_DIP_VS_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_VS_DATA(display, cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_DRM:
- return GLK_TVIDEO_DIP_DRM_DATA(dev_priv, cpu_transcoder, i);
+ return GLK_TVIDEO_DIP_DRM_DATA(display, cpu_transcoder, i);
default:
MISSING_CASE(type);
return INVALID_MMIO_REG;
}
}
-static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
+static int hsw_dip_data_size(struct intel_display *display,
unsigned int type)
{
switch (type) {
@@ -197,7 +192,7 @@ static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
case DP_SDP_PPS:
return VIDEO_DIP_PPS_DATA_SIZE;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return VIDEO_DIP_GMP_DATA_SIZE;
else
return VIDEO_DIP_DATA_SIZE;
@@ -211,12 +206,12 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
+ u32 val = intel_de_read(display, VIDEO_DIP_CTL);
int i;
- drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE),
"Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
@@ -224,22 +219,22 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
val &= ~g4x_infoframe_enable(type);
- intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+ intel_de_write(display, VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv, VIDEO_DIP_DATA, *data);
+ intel_de_write(display, VIDEO_DIP_DATA, *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv, VIDEO_DIP_DATA, 0);
+ intel_de_write(display, VIDEO_DIP_DATA, 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
- intel_de_posting_read(dev_priv, VIDEO_DIP_CTL);
+ intel_de_write(display, VIDEO_DIP_CTL, val);
+ intel_de_posting_read(display, VIDEO_DIP_CTL);
}
static void g4x_read_infoframe(struct intel_encoder *encoder,
@@ -247,22 +242,22 @@ static void g4x_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 *data = frame;
int i;
- intel_de_rmw(dev_priv, VIDEO_DIP_CTL,
+ intel_de_rmw(display, VIDEO_DIP_CTL,
VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA);
+ *data++ = intel_de_read(display, VIDEO_DIP_DATA);
}
static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
+ struct intel_display *display = to_intel_display(encoder);
+ u32 val = intel_de_read(display, VIDEO_DIP_CTL);
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -279,14 +274,14 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
int i;
- drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE),
"Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
@@ -294,23 +289,23 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
val &= ~g4x_infoframe_enable(type);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe),
+ intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe),
*data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0);
+ intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
}
static void ibx_read_infoframe(struct intel_encoder *encoder,
@@ -318,25 +313,25 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 *data = frame;
int i;
- intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ intel_de_rmw(display, TVIDEO_DIP_CTL(crtc->pipe),
VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(display, TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -354,14 +349,14 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
int i;
- drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE),
"Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
@@ -372,23 +367,23 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
if (type != HDMI_INFOFRAME_TYPE_AVI)
val &= ~g4x_infoframe_enable(type);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe),
+ intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe),
*data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0);
+ intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
}
static void cpt_read_infoframe(struct intel_encoder *encoder,
@@ -396,24 +391,24 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 *data = frame;
int i;
- intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ intel_de_rmw(display, TVIDEO_DIP_CTL(crtc->pipe),
VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(display, TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
- u32 val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(pipe));
+ u32 val = intel_de_read(display, TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -428,14 +423,14 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
int i;
- drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE),
"Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
@@ -443,24 +438,24 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
val &= ~g4x_infoframe_enable(type);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv,
+ intel_de_write(display,
VLV_TVIDEO_DIP_DATA(crtc->pipe), *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv,
+ intel_de_write(display,
VLV_TVIDEO_DIP_DATA(crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
}
static void vlv_read_infoframe(struct intel_encoder *encoder,
@@ -468,25 +463,25 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 *data = frame;
int i;
- intel_de_rmw(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe),
+ intel_de_rmw(display, VLV_TVIDEO_DIP_CTL(crtc->pipe),
VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv,
+ *data++ = intel_de_read(display,
VLV_TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
- u32 val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(pipe));
+ u32 val = intel_de_read(display, VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -504,75 +499,75 @@ void hsw_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(dev_priv, cpu_transcoder);
+ i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(display, cpu_transcoder);
int data_size;
int i;
- u32 val = intel_de_read(dev_priv, ctl_reg);
+ u32 val = intel_de_read(display, ctl_reg);
- data_size = hsw_dip_data_size(dev_priv, type);
+ data_size = hsw_dip_data_size(display, type);
- drm_WARN_ON(&dev_priv->drm, len > data_size);
+ drm_WARN_ON(display->drm, len > data_size);
val &= ~hsw_infoframe_enable(type);
- intel_de_write(dev_priv, ctl_reg, val);
+ intel_de_write(display, ctl_reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv,
- hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ intel_de_write(display,
+ hsw_dip_data_reg(display, cpu_transcoder, type, i >> 2),
*data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < data_size; i += 4)
- intel_de_write(dev_priv,
- hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ intel_de_write(display,
+ hsw_dip_data_reg(display, cpu_transcoder, type, i >> 2),
0);
/* Wa_14013475917 */
- if (!(IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr &&
+ if (!(IS_DISPLAY_VER(display, 13, 14) && crtc_state->has_psr &&
!crtc_state->has_panel_replay && type == DP_SDP_VSC))
val |= hsw_infoframe_enable(type);
if (type == DP_SDP_VSC)
val |= VSC_DIP_HW_DATA_SW_HEA;
- intel_de_write(dev_priv, ctl_reg, val);
- intel_de_posting_read(dev_priv, ctl_reg);
+ intel_de_write(display, ctl_reg, val);
+ intel_de_posting_read(display, ctl_reg);
}
void hsw_read_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type, void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 *data = frame;
int i;
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv,
- hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2));
+ *data++ = intel_de_read(display,
+ hsw_dip_data_reg(display, cpu_transcoder, type, i >> 2));
}
static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = intel_de_read(dev_priv,
- HSW_TVIDEO_DIP_CTL(dev_priv, pipe_config->cpu_transcoder));
+ struct intel_display *display = to_intel_display(encoder);
+ u32 val = intel_de_read(display,
+ HSW_TVIDEO_DIP_CTL(display, pipe_config->cpu_transcoder));
u32 mask;
mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
mask |= VIDEO_DIP_ENABLE_DRM_GLK;
- if (HAS_AS_SDP(dev_priv))
+ if (HAS_AS_SDP(display))
mask |= VIDEO_DIP_ENABLE_AS_ADL;
return val & mask;
@@ -604,7 +599,7 @@ u32 intel_hdmi_infoframe_enable(unsigned int type)
u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 val, ret = 0;
int i;
@@ -615,7 +610,7 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
unsigned int type = infoframe_type_to_idx[i];
- if (HAS_DDI(dev_priv)) {
+ if (HAS_DDI(display)) {
if (val & hsw_infoframe_enable(type))
ret |= BIT(i);
} else {
@@ -830,11 +825,11 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct hdmi_drm_infoframe *frame = &crtc_state->infoframes.drm.drm;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int ret;
- if (DISPLAY_VER(dev_priv) < 10)
+ if (DISPLAY_VER(display) < 10)
return true;
if (!crtc_state->has_infoframe)
@@ -848,13 +843,13 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state);
if (ret < 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"couldn't set HDR metadata in infoframe\n");
return false;
}
ret = hdmi_drm_infoframe_check(frame);
- if (drm_WARN_ON(&dev_priv->drm, ret))
+ if (drm_WARN_ON(display->drm, ret))
return false;
return true;
@@ -865,11 +860,11 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -889,21 +884,21 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
if (!(val & VIDEO_DIP_ENABLE))
return;
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"video DIP still enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
return;
}
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"video DIP already enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
return;
@@ -916,8 +911,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -977,6 +972,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -985,8 +981,8 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
return false;
- if (HAS_DDI(dev_priv))
- reg = HSW_TVIDEO_DIP_GCP(dev_priv, crtc_state->cpu_transcoder);
+ if (HAS_DDI(display))
+ reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
else if (HAS_PCH_SPLIT(dev_priv))
@@ -994,7 +990,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
else
return false;
- intel_de_write(dev_priv, reg, crtc_state->infoframes.gcp);
+ intel_de_write(display, reg, crtc_state->infoframes.gcp);
return true;
}
@@ -1002,6 +998,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -1010,8 +1007,8 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
return;
- if (HAS_DDI(dev_priv))
- reg = HSW_TVIDEO_DIP_GCP(dev_priv, crtc_state->cpu_transcoder);
+ if (HAS_DDI(display))
+ reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
else if (HAS_PCH_SPLIT(dev_priv))
@@ -1019,7 +1016,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
else
return;
- crtc_state->infoframes.gcp = intel_de_read(dev_priv, reg);
+ crtc_state->infoframes.gcp = intel_de_read(display, reg);
}
static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
@@ -1049,12 +1046,12 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1068,13 +1065,13 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ drm_WARN(display->drm, val & VIDEO_DIP_ENABLE,
"DIP already enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
@@ -1089,8 +1086,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1108,11 +1105,11 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1125,8 +1122,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
@@ -1138,8 +1135,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1157,11 +1154,11 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1175,13 +1172,13 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ drm_WARN(display->drm, val & VIDEO_DIP_ENABLE,
"DIP already enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
@@ -1196,8 +1193,8 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1215,12 +1212,12 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- i915_reg_t reg = HSW_TVIDEO_DIP_CTL(dev_priv,
+ struct intel_display *display = to_intel_display(encoder);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(display,
crtc_state->cpu_transcoder);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
- assert_hdmi_transcoder_func_disabled(dev_priv,
+ assert_hdmi_transcoder_func_disabled(display,
crtc_state->cpu_transcoder);
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -1229,16 +1226,16 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_DRM_GLK | VIDEO_DIP_ENABLE_AS_ADL);
if (!enable) {
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP_HSW;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1256,16 +1253,16 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
{
- struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
+ struct intel_display *display = to_intel_display(hdmi);
struct i2c_adapter *ddc = hdmi->attached_connector->base.ddc;
if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return;
- drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n",
+ drm_dbg_kms(display->drm, "%s DP dual mode adaptor TMDS output\n",
enable ? "Enabling" : "Disabling");
- drm_dp_dual_mode_set_tmds_output(&dev_priv->drm,
+ drm_dp_dual_mode_set_tmds_output(display->drm,
hdmi->dp_dual_mode.type, ddc, enable);
}
@@ -1331,7 +1328,7 @@ static
int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
u8 *an)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_hdmi *hdmi = &dig_port->hdmi;
struct i2c_adapter *ddc = hdmi->attached_connector->base.ddc;
int ret;
@@ -1339,14 +1336,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
ret = intel_hdmi_hdcp_write(dig_port, DRM_HDCP_DDC_AN, an,
DRM_HDCP_AN_LEN);
if (ret) {
- drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Write An over DDC failed (%d)\n",
ret);
return ret;
}
ret = intel_gmbus_output_aksv(ddc);
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "Failed to output aksv (%d)\n", ret);
+ drm_dbg_kms(display->drm, "Failed to output aksv (%d)\n", ret);
return ret;
}
return 0;
@@ -1355,13 +1352,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *dig_port,
u8 *bksv)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret)
- drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Read Bksv over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1370,13 +1367,14 @@ static
int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *dig_port,
u8 *bstatus)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BSTATUS,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret)
- drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm,
+ "Read bstatus over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1385,13 +1383,13 @@ static
int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *dig_port,
bool *repeater_present)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
u8 val;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Read bcaps over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1403,13 +1401,13 @@ static
int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
u8 *ri_prime)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret)
- drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Read Ri' over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1418,13 +1416,13 @@ static
int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
bool *ksv_ready)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
u8 val;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Read bcaps over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1436,12 +1434,12 @@ static
int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
int num_downstream, u8 *ksv_fifo)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_KSV_FIFO,
ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Read ksv fifo over DDC failed (%d)\n", ret);
return ret;
}
@@ -1452,7 +1450,7 @@ static
int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
int i, u32 *part)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
@@ -1461,7 +1459,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_V_PRIME(i),
part, DRM_HDCP_V_PRIME_PART_LEN);
if (ret)
- drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm,
+ "Read V'[%d] over DDC failed (%d)\n",
i, ret);
return ret;
}
@@ -1469,15 +1468,15 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
enum transcoder cpu_transcoder)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_crtc *crtc = to_intel_crtc(connector->base.state->crtc);
u32 scanline;
int ret;
for (;;) {
- scanline = intel_de_read(dev_priv,
- PIPEDSL(dev_priv, crtc->pipe));
+ scanline = intel_de_read(display,
+ PIPEDSL(display, crtc->pipe));
if (scanline > 100 && scanline < 200)
break;
usleep_range(25, 50);
@@ -1486,7 +1485,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
false, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Disable HDCP signalling failed (%d)\n", ret);
return ret;
}
@@ -1494,7 +1493,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
true, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Enable HDCP signalling failed (%d)\n", ret);
return ret;
}
@@ -1507,6 +1506,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
enum transcoder cpu_transcoder,
bool enable)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_connector *connector = hdmi->attached_connector;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1519,7 +1519,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
cpu_transcoder, enable,
TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
- drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
+ drm_err(display->drm, "%s HDCP signalling failed (%d)\n",
enable ? "Enable" : "Disable", ret);
return ret;
}
@@ -1539,6 +1539,7 @@ static
bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
@@ -1558,9 +1559,9 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
- drm_dbg_kms(&i915->drm, "Ri' mismatch detected (%x)\n",
- intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
- port)));
+ drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n",
+ intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
+ port)));
return false;
}
return true;
@@ -1570,14 +1571,14 @@ static
bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int retry;
for (retry = 0; retry < 3; retry++)
if (intel_hdmi_hdcp_check_link_once(dig_port, connector))
return true;
- drm_err(&i915->drm, "Link check failed\n");
+ drm_err(display->drm, "Link check failed\n");
return false;
}
@@ -1628,13 +1629,13 @@ hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
u8 msg_id, bool *msg_ready,
ssize_t *msg_sz)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status);
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n",
+ drm_dbg_kms(display->drm, "rx_status read failed. Err %d\n",
ret);
return ret;
}
@@ -1655,7 +1656,7 @@ static ssize_t
intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
u8 msg_id, bool paired)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
bool msg_ready = false;
int timeout, ret;
ssize_t msg_sz = 0;
@@ -1670,7 +1671,8 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
!ret && msg_ready && msg_sz, timeout * 1000,
1000, 5 * 1000);
if (ret)
- drm_dbg_kms(&i915->drm, "msg_id: %d, ret: %d, timeout: %d\n",
+ drm_dbg_kms(display->drm,
+ "msg_id: %d, ret: %d, timeout: %d\n",
msg_id, ret, timeout);
return ret ? ret : msg_sz;
@@ -1691,8 +1693,8 @@ static
int intel_hdmi_hdcp2_read_msg(struct intel_connector *connector,
u8 msg_id, void *buf, size_t size)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
unsigned int offset;
@@ -1708,7 +1710,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_connector *connector,
* available buffer.
*/
if (ret > size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"msg_sz(%zd) is more than exp size(%zu)\n",
ret, size);
return -EINVAL;
@@ -1717,7 +1719,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_connector *connector,
offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
ret = intel_hdmi_hdcp_read(dig_port, offset, buf, ret);
if (ret)
- drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n",
+ drm_dbg_kms(display->drm, "Failed to read msg_id: %d(%zd)\n",
msg_id, ret);
return ret;
@@ -1783,16 +1785,17 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int max_tmds_clock, vbt_max_tmds_clock;
- if (DISPLAY_VER(dev_priv) >= 13 || IS_ALDERLAKE_S(dev_priv))
+ if (DISPLAY_VER(display) >= 13 || IS_ALDERLAKE_S(dev_priv))
max_tmds_clock = 600000;
- else if (DISPLAY_VER(dev_priv) >= 10)
+ else if (DISPLAY_VER(display) >= 10)
max_tmds_clock = 594000;
- else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv))
+ else if (DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv))
max_tmds_clock = 300000;
- else if (DISPLAY_VER(dev_priv) >= 5)
+ else if (DISPLAY_VER(display) >= 5)
max_tmds_clock = 225000;
else
max_tmds_clock = 165000;
@@ -1848,7 +1851,8 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits,
bool has_hdmi_sink)
{
- struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
+ struct intel_display *display = to_intel_display(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
if (clock < 25000)
@@ -1885,7 +1889,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
* FIXME: We will hopefully get an algorithmic way of programming
* the MPLLB for HDMI in the future.
*/
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return intel_cx0_phy_check_hdmi_link_rate(hdmi, clock);
else if (IS_DG2(dev_priv))
return intel_snps_phy_check_hdmi_link_rate(clock);
@@ -1908,13 +1912,13 @@ int intel_hdmi_tmds_clock(int clock, int bpc,
return DIV_ROUND_CLOSEST(clock * bpc, 8);
}
-static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc)
+static bool intel_hdmi_source_bpc_possible(struct intel_display *display, int bpc)
{
switch (bpc) {
case 12:
- return !HAS_GMCH(i915);
+ return !HAS_GMCH(display);
case 10:
- return DISPLAY_VER(i915) >= 11;
+ return DISPLAY_VER(display) >= 11;
case 8:
return true;
default:
@@ -1960,7 +1964,7 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
bool has_hdmi_sink,
enum intel_output_format sink_format)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
enum drm_mode_status status = MODE_OK;
int bpc;
@@ -1973,7 +1977,7 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
for (bpc = 12; bpc >= 8; bpc -= 2) {
int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format);
- if (!intel_hdmi_source_bpc_possible(i915, bpc))
+ if (!intel_hdmi_source_bpc_possible(display, bpc))
continue;
if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, sink_format))
@@ -1985,7 +1989,7 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
}
/* can never happen */
- drm_WARN_ON(&i915->drm, status == MODE_OK);
+ drm_WARN_ON(display->drm, status == MODE_OK);
return status;
}
@@ -1994,8 +1998,9 @@ static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum drm_mode_status status;
int clock = mode->clock;
int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
@@ -2073,17 +2078,16 @@ bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
static bool hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, int bpc)
{
- struct drm_i915_private *dev_priv =
- to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- if (!intel_hdmi_source_bpc_possible(dev_priv, bpc))
+ if (!intel_hdmi_source_bpc_possible(display, bpc))
return false;
/* Display Wa_1405510057:icl,ehl */
if (intel_hdmi_is_ycbcr420(crtc_state) &&
- bpc == 10 && DISPLAY_VER(dev_priv) == 11 &&
+ bpc == 10 && DISPLAY_VER(display) == 11 &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
@@ -2130,7 +2134,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpc, clock = adjusted_mode->crtc_clock;
@@ -2153,7 +2157,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
*/
crtc_state->pipe_bpp = min(crtc_state->pipe_bpp, bpc * 3);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"picking %d bpc for HDMI output (pipe bpp: %d)\n",
bpc, crtc_state->pipe_bpp);
@@ -2230,10 +2234,10 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state,
bool respect_downstream_limits)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct drm_display_info *info = &connector->base.display_info;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
int ret;
@@ -2241,7 +2245,7 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder,
intel_hdmi_sink_format(crtc_state, connector, ycbcr_420_only);
if (ycbcr_420_only && crtc_state->sink_format != INTEL_OUTPUT_FORMAT_YCBCR420) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
}
@@ -2302,7 +2306,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
@@ -2335,7 +2339,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (ret)
ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, false);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unsupported HDMI clock (%d kHz), rejecting mode\n",
pipe_config->hw.adjusted_mode.crtc_clock);
return ret;
@@ -2370,22 +2374,22 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
conn_state);
if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
- drm_dbg_kms(&dev_priv->drm, "bad AVI infoframe\n");
+ drm_dbg_kms(display->drm, "bad AVI infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
- drm_dbg_kms(&dev_priv->drm, "bad SPD infoframe\n");
+ drm_dbg_kms(display->drm, "bad SPD infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
- drm_dbg_kms(&dev_priv->drm, "bad HDMI infoframe\n");
+ drm_dbg_kms(display->drm, "bad HDMI infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) {
- drm_dbg_kms(&dev_priv->drm, "bad DRM infoframe\n");
+ drm_dbg_kms(display->drm, "bad DRM infoframe\n");
return -EINVAL;
}
@@ -2418,13 +2422,14 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
static void
intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
struct i2c_adapter *ddc = connector->ddc;
enum drm_dp_dual_mode_type type;
- type = drm_dp_dual_mode_detect(&dev_priv->drm, ddc);
+ type = drm_dp_dual_mode_detect(display->drm, ddc);
/*
* Type 1 DVI adaptors are not required to implement any
@@ -2438,7 +2443,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
if (!connector->force &&
intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Assuming DP dual mode adaptor presence based on VBT\n");
type = DRM_DP_DUAL_MODE_TYPE1_DVI;
} else {
@@ -2451,17 +2456,17 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
hdmi->dp_dual_mode.type = type;
hdmi->dp_dual_mode.max_tmds_clock =
- drm_dp_dual_mode_max_tmds_clock(&dev_priv->drm, type, ddc);
+ drm_dp_dual_mode_max_tmds_clock(display->drm, type, ddc);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
drm_dp_get_dual_mode_type_name(type),
hdmi->dp_dual_mode.max_tmds_clock);
/* Older VBTs are often buggy and can't be trusted :( Play it safe. */
- if ((DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
+ if ((DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv)) &&
!intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n");
hdmi->dp_dual_mode.max_tmds_clock = 0;
}
@@ -2470,6 +2475,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
static bool
intel_hdmi_set_edid(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct i2c_adapter *ddc = connector->ddc;
@@ -2482,7 +2488,7 @@ intel_hdmi_set_edid(struct drm_connector *connector)
drm_edid = drm_edid_read_ddc(connector, ddc);
if (!drm_edid && !intel_gmbus_is_forced_bit(ddc)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(ddc, true);
drm_edid = drm_edid_read_ddc(connector, ddc);
@@ -2511,13 +2517,14 @@ intel_hdmi_set_edid(struct drm_connector *connector)
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
+ struct intel_display *display = to_intel_display(connector->dev);
enum drm_connector_status status = connector_status_disconnected;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (!intel_display_device_enabled(dev_priv))
@@ -2528,7 +2535,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- if (DISPLAY_VER(dev_priv) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
!intel_digital_port_connected(encoder))
goto out;
@@ -2549,9 +2556,10 @@ out:
static void
intel_hdmi_force(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *i915 = to_i915(connector->dev);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (!intel_display_driver_check_access(i915))
@@ -2608,9 +2616,9 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
static int intel_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
- if (HAS_DDI(i915))
+ if (HAS_DDI(display))
return intel_digital_connector_atomic_check(connector, state);
else
return g4x_hdmi_connector_atomic_check(connector, state);
@@ -2625,7 +2633,7 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(intel_hdmi);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
@@ -2634,10 +2642,10 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_hdmi_colorspace_property(connector);
drm_connector_attach_content_type_property(connector);
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
drm_connector_attach_hdr_output_metadata_property(connector);
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(display))
drm_connector_attach_max_bpc_property(connector, 8, 12);
}
@@ -2664,14 +2672,14 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
bool high_tmds_clock_ratio,
bool scrambling)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_scrambling *sink_scrambling =
&connector->display_info.hdmi.scdc.scrambling;
if (!sink_scrambling->supported)
return true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
connector->base.id, connector->name,
str_yes_no(scrambling), high_tmds_clock_ratio ? 40 : 10);
@@ -2752,7 +2760,7 @@ static u8 cnp_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 icl_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
if (intel_encoder_is_combo(encoder))
@@ -2760,7 +2768,7 @@ static u8 icl_encoder_to_ddc_pin(struct intel_encoder *encoder)
else if (intel_encoder_is_tc(encoder))
return GMBUS_PIN_9_TC1_ICP + intel_encoder_to_tc(encoder);
- drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port));
+ drm_WARN(display->drm, 1, "Unknown port:%c\n", port_name(port));
return GMBUS_PIN_2_BXT;
}
@@ -2808,10 +2816,11 @@ static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum phy phy = intel_encoder_to_phy(encoder);
- drm_WARN_ON(&i915->drm, encoder->port == PORT_A);
+ drm_WARN_ON(display->drm, encoder->port == PORT_A);
/*
* Pin mapping for GEN9 BC depends on which PCH is present. With TGP,
@@ -2871,6 +2880,7 @@ static u8 g4x_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u8 ddc_pin;
@@ -2880,7 +2890,7 @@ static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
ddc_pin = dg1_encoder_to_ddc_pin(encoder);
else if (IS_ROCKETLAKE(dev_priv))
ddc_pin = rkl_encoder_to_ddc_pin(encoder);
- else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
+ else if (DISPLAY_VER(display) == 9 && HAS_PCH_TGP(dev_priv))
ddc_pin = gen9bc_tgp_encoder_to_ddc_pin(encoder);
else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
HAS_PCH_TGP(dev_priv))
@@ -2902,10 +2912,11 @@ static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
static struct intel_encoder *
get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_encoder *other;
- for_each_intel_encoder(&i915->drm, other) {
+ for_each_intel_encoder(display->drm, other) {
struct intel_connector *connector;
if (other == encoder)
@@ -2925,6 +2936,7 @@ get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin)
static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_encoder *other;
const char *source;
@@ -2939,20 +2951,22 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
}
if (!intel_gmbus_is_valid_pin(i915, ddc_pin)) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Invalid DDC pin %d\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Invalid DDC pin %d\n",
encoder->base.base.id, encoder->base.name, ddc_pin);
return 0;
}
other = get_encoder_by_ddc_pin(encoder, ddc_pin);
if (other) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] DDC pin %d already claimed by [ENCODER:%d:%s]\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] DDC pin %d already claimed by [ENCODER:%d:%s]\n",
encoder->base.base.id, encoder->base.name, ddc_pin,
other->base.base.id, other->base.name);
return 0;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] Using DDC pin 0x%x (%s)\n",
encoder->base.base.id, encoder->base.name,
ddc_pin, source);
@@ -2962,6 +2976,7 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
void intel_infoframe_init(struct intel_digital_port *dig_port)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct drm_i915_private *dev_priv =
to_i915(dig_port->base.base.dev);
@@ -2975,7 +2990,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
dig_port->read_infoframe = g4x_read_infoframe;
dig_port->set_infoframes = g4x_set_infoframes;
dig_port->infoframes_enabled = g4x_infoframes_enabled;
- } else if (HAS_DDI(dev_priv)) {
+ } else if (HAS_DDI(display)) {
if (intel_bios_encoder_is_lspcon(dig_port->base.devdata)) {
dig_port->write_infoframe = lspcon_write_infoframe;
dig_port->read_infoframe = lspcon_read_infoframe;
@@ -3003,6 +3018,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct drm_connector *connector = &intel_connector->base;
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
struct intel_encoder *intel_encoder = &dig_port->base;
@@ -3012,11 +3028,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct cec_connector_info conn_info;
u8 ddc_pin;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Adding HDMI connector on [ENCODER:%d:%s]\n",
intel_encoder->base.base.id, intel_encoder->base.name);
- if (DISPLAY_VER(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
+ if (DISPLAY_VER(display) < 12 && drm_WARN_ON(dev, port == PORT_A))
return;
if (drm_WARN(dev, dig_port->max_lanes < 4,
@@ -3036,18 +3052,18 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
connector->interlace_allowed = true;
connector->stereo_allowed = true;
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
connector->ycbcr_420_allowed = true;
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_connector->base.polled = intel_connector->polled;
- if (HAS_DDI(dev_priv))
+ if (HAS_DDI(display))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -3061,7 +3077,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
int ret = intel_hdcp_init(intel_connector, dig_port,
&intel_hdmi_hdcp_shim);
if (ret)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HDCP init failed, skipping.\n");
}
@@ -3071,7 +3087,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
cec_notifier_conn_register(dev->dev, port_identifier(port),
&conn_info);
if (!intel_hdmi->cec_notifier)
- drm_dbg_kms(&dev_priv->drm, "CEC notifier get failed\n");
+ drm_dbg_kms(display->drm, "CEC notifier get failed\n");
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 6b39df38d57a..9b97623665c5 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -58,6 +58,5 @@ int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
int src_max_slices, int src_max_slice_width,
int hdmi_max_slices, int hdmi_throughput);
int intel_hdmi_dsc_get_slice_height(int vactive);
-struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi);
#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index a1f07ee69a86..2c4e946d5575 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -456,6 +456,7 @@ u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status)
{
+ struct intel_display *display = &dev_priv->display;
u32 pin_mask = 0, long_mask = 0;
u32 hotplug_trigger;
@@ -477,7 +478,7 @@ void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status)
if ((IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
}
void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
@@ -513,6 +514,7 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
{
+ struct intel_display *display = &i915->display;
enum hpd_pin pin;
u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK);
u32 trigger_aux = iir & XELPDP_AUX_TC_MASK;
@@ -545,7 +547,7 @@ void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
}
if (trigger_aux)
- intel_dp_aux_irq_handler(i915);
+ intel_dp_aux_irq_handler(display);
if (!pin_mask && !trigger_aux)
drm_err(&i915->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c
index a92d008d4e6e..19d1f196d9fb 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.c
+++ b/drivers/gpu/drm/i915/display/intel_hti.c
@@ -9,33 +9,33 @@
#include "intel_hti.h"
#include "intel_hti_regs.h"
-void intel_hti_init(struct drm_i915_private *i915)
+void intel_hti_init(struct intel_display *display)
{
/*
* If the platform has HTI, we need to find out whether it has reserved
* any display resources before we create our display outputs.
*/
- if (DISPLAY_INFO(i915)->has_hti)
- i915->display.hti.state = intel_de_read(i915, HDPORT_STATE);
+ if (DISPLAY_INFO(display)->has_hti)
+ display->hti.state = intel_de_read(display, HDPORT_STATE);
}
-bool intel_hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
+bool intel_hti_uses_phy(struct intel_display *display, enum phy phy)
{
- if (drm_WARN_ON(&i915->drm, phy == PHY_NONE))
+ if (drm_WARN_ON(display->drm, phy == PHY_NONE))
return false;
- return i915->display.hti.state & HDPORT_ENABLED &&
- i915->display.hti.state & HDPORT_DDI_USED(phy);
+ return display->hti.state & HDPORT_ENABLED &&
+ display->hti.state & HDPORT_DDI_USED(phy);
}
-u32 intel_hti_dpll_mask(struct drm_i915_private *i915)
+u32 intel_hti_dpll_mask(struct intel_display *display)
{
- if (!(i915->display.hti.state & HDPORT_ENABLED))
+ if (!(display->hti.state & HDPORT_ENABLED))
return 0;
/*
* Note: This is subtle. The values must coincide with what's defined
* for the platform.
*/
- return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->display.hti.state);
+ return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, display->hti.state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hti.h b/drivers/gpu/drm/i915/display/intel_hti.h
index 2893d6668657..b692571c5558 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.h
+++ b/drivers/gpu/drm/i915/display/intel_hti.h
@@ -8,11 +8,11 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
enum phy;
-void intel_hti_init(struct drm_i915_private *i915);
-bool intel_hti_uses_phy(struct drm_i915_private *i915, enum phy phy);
-u32 intel_hti_dpll_mask(struct drm_i915_private *i915);
+void intel_hti_init(struct intel_display *display);
+bool intel_hti_uses_phy(struct intel_display *display, enum phy phy);
+u32 intel_hti_dpll_mask(struct intel_display *display);
#endif /* __INTEL_HTI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index dfd7d5e23f3f..e7a9b860fac6 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -3,6 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
+#include <drm/drm_fixed.h>
+
#include "i915_drv.h"
#include "intel_atomic.h"
@@ -23,12 +25,13 @@
void intel_link_bw_init_limits(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe pipe;
limits->force_fec_pipes = 0;
limits->bpp_limit_reached_pipes = 0;
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state,
intel_crtc_for_pipe(i915, pipe));
@@ -67,12 +70,12 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
u8 pipe_mask,
const char *reason)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
enum pipe max_bpp_pipe = INVALID_PIPE;
struct intel_crtc *crtc;
int max_bpp_x16 = 0;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
struct intel_crtc_state *crtc_state;
int link_bpp_x16;
@@ -93,7 +96,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
* is based on the pipe bpp value, set the actual link bpp
* limit here once the MST BW allocation is fixed.
*/
- link_bpp_x16 = to_bpp_x16(crtc_state->pipe_bpp);
+ link_bpp_x16 = fxp_q4_from_int(crtc_state->pipe_bpp);
if (link_bpp_x16 > max_bpp_x16) {
max_bpp_x16 = link_bpp_x16;
@@ -134,7 +137,7 @@ intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state,
struct intel_link_bw_limits *new_limits,
enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
if (pipe == INVALID_PIPE)
return false;
@@ -143,7 +146,7 @@ intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state,
old_limits->max_bpp_x16[pipe])
return false;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
new_limits->bpp_limit_reached_pipes & BIT(pipe)))
return false;
@@ -176,7 +179,7 @@ static int check_all_link_config(struct intel_atomic_state *state,
}
static bool
-assert_link_limit_change_valid(struct drm_i915_private *i915,
+assert_link_limit_change_valid(struct intel_display *display,
const struct intel_link_bw_limits *old_limits,
const struct intel_link_bw_limits *new_limits)
{
@@ -184,14 +187,14 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
enum pipe pipe;
/* FEC can't be forced off after it was forced on. */
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
(old_limits->force_fec_pipes & new_limits->force_fec_pipes) !=
old_limits->force_fec_pipes))
return false;
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
/* The bpp limit can only decrease. */
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
new_limits->max_bpp_x16[pipe] >
old_limits->max_bpp_x16[pipe]))
return false;
@@ -202,7 +205,7 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
}
/* At least one limit must change. */
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
!bpps_changed &&
new_limits->force_fec_pipes ==
old_limits->force_fec_pipes))
@@ -230,7 +233,7 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
int intel_link_bw_atomic_check(struct intel_atomic_state *state,
struct intel_link_bw_limits *new_limits)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_link_bw_limits old_limits = *new_limits;
int ret;
@@ -238,7 +241,7 @@ int intel_link_bw_atomic_check(struct intel_atomic_state *state,
if (ret != -EAGAIN)
return ret;
- if (!assert_link_limit_change_valid(i915, &old_limits, new_limits))
+ if (!assert_link_limit_change_valid(display, &old_limits, new_limits))
return -EINVAL;
return -EAGAIN;
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index 6b0ccfff59da..e69049cf178f 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -10,8 +10,6 @@
#include "intel_display_limits.h"
-struct drm_i915_private;
-
struct intel_atomic_state;
struct intel_crtc_state;
diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c
index d5a0aecf3e8f..b457c69dc0be 100644
--- a/drivers/gpu/drm/i915/display/intel_load_detect.c
+++ b/drivers/gpu/drm/i915/display/intel_load_detect.c
@@ -48,23 +48,22 @@ struct drm_atomic_state *
intel_load_detect_get_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_encoder *encoder =
intel_attached_encoder(to_intel_connector(connector));
struct intel_crtc *possible_crtc;
struct intel_crtc *crtc = NULL;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_mode_config *config = &dev->mode_config;
+ struct drm_mode_config *config = &display->drm->mode_config;
struct drm_atomic_state *state = NULL, *restore_state = NULL;
struct drm_connector_state *connector_state;
struct intel_crtc_state *crtc_state;
int ret;
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
encoder->base.base.id, encoder->base.name);
- drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
+ drm_WARN_ON(display->drm, !drm_modeset_is_locked(&config->connection_mutex));
/*
* Algorithm gets a little messy:
@@ -89,7 +88,7 @@ intel_load_detect_get_pipe(struct drm_connector *connector,
}
/* Find an unused one (if possible) */
- for_each_intel_crtc(dev, possible_crtc) {
+ for_each_intel_crtc(display->drm, possible_crtc) {
if (!(encoder->base.possible_crtcs &
drm_crtc_mask(&possible_crtc->base)))
continue;
@@ -111,15 +110,15 @@ intel_load_detect_get_pipe(struct drm_connector *connector,
* If we didn't find an unused CRTC, don't use any.
*/
if (!crtc) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"no pipe available for load-detect\n");
ret = -ENODEV;
goto fail;
}
found:
- state = drm_atomic_state_alloc(dev);
- restore_state = drm_atomic_state_alloc(dev);
+ state = drm_atomic_state_alloc(display->drm);
+ restore_state = drm_atomic_state_alloc(display->drm);
if (!state || !restore_state) {
ret = -ENOMEM;
goto fail;
@@ -164,7 +163,7 @@ found:
if (!ret)
ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to create a copy of old state to restore: %i\n",
ret);
goto fail;
@@ -172,7 +171,7 @@ found:
ret = drm_atomic_commit(state);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"failed to set mode on load-detect pipe\n");
goto fail;
}
@@ -204,13 +203,13 @@ void intel_load_detect_release_pipe(struct drm_connector *connector,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_encoder *intel_encoder =
intel_attached_encoder(to_intel_connector(connector));
- struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
struct drm_encoder *encoder = &intel_encoder->base;
int ret;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
encoder->base.id, encoder->name);
@@ -219,7 +218,7 @@ void intel_load_detect_release_pipe(struct drm_connector *connector,
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
if (ret)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Couldn't release load detect pipe: %i\n", ret);
drm_atomic_state_put(state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 8b26354d6e53..f9db867fae89 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -79,33 +79,33 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode)
static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
{
- struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(dp);
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_dp_dpcd_ident *ident;
u32 vendor_oui;
- if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) {
- drm_err(&i915->drm, "Can't read description\n");
+ if (drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd))) {
+ drm_err(display->drm, "Can't read description\n");
return false;
}
- ident = &dp->desc.ident;
+ ident = &intel_dp->desc.ident;
vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) |
ident->oui[2];
switch (vendor_oui) {
case LSPCON_VENDOR_MCA_OUI:
lspcon->vendor = LSPCON_VENDOR_MCA;
- drm_dbg_kms(&i915->drm, "Vendor: Mega Chips\n");
+ drm_dbg_kms(display->drm, "Vendor: Mega Chips\n");
break;
case LSPCON_VENDOR_PARADE_OUI:
lspcon->vendor = LSPCON_VENDOR_PARADE;
- drm_dbg_kms(&i915->drm, "Vendor: Parade Tech\n");
+ drm_dbg_kms(display->drm, "Vendor: Parade Tech\n");
break;
default:
- drm_err(&i915->drm, "Invalid/Unknown vendor OUI\n");
+ drm_err(display->drm, "Invalid/Unknown vendor OUI\n");
return false;
}
@@ -123,7 +123,7 @@ static u32 get_hdr_status_reg(struct intel_lspcon *lspcon)
void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 hdr_caps;
int ret;
@@ -131,10 +131,10 @@ void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
&hdr_caps, 1);
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "HDR capability detection failed\n");
+ drm_dbg_kms(display->drm, "HDR capability detection failed\n");
lspcon->hdr_supported = false;
} else if (hdr_caps & 0x1) {
- drm_dbg_kms(&i915->drm, "LSPCON capable of HDR\n");
+ drm_dbg_kms(display->drm, "LSPCON capable of HDR\n");
lspcon->hdr_supported = true;
}
}
@@ -142,12 +142,12 @@ void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum drm_lspcon_mode current_mode;
struct i2c_adapter *ddc = &intel_dp->aux.ddc;
if (drm_lspcon_get_mode(intel_dp->aux.drm_dev, ddc, &current_mode)) {
- drm_dbg_kms(&i915->drm, "Error reading LSPCON mode\n");
+ drm_dbg_kms(display->drm, "Error reading LSPCON mode\n");
return DRM_LSPCON_MODE_INVALID;
}
return current_mode;
@@ -169,23 +169,23 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
enum drm_lspcon_mode mode)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum drm_lspcon_mode current_mode;
current_mode = lspcon_get_current_mode(lspcon);
if (current_mode == mode)
goto out;
- drm_dbg_kms(&i915->drm, "Waiting for LSPCON mode %s to settle\n",
+ drm_dbg_kms(display->drm, "Waiting for LSPCON mode %s to settle\n",
lspcon_mode_name(mode));
wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode,
lspcon_get_mode_settle_timeout(lspcon));
if (current_mode != mode)
- drm_err(&i915->drm, "LSPCON mode hasn't settled\n");
+ drm_err(display->drm, "LSPCON mode hasn't settled\n");
out:
- drm_dbg_kms(&i915->drm, "Current LSPCON mode %s\n",
+ drm_dbg_kms(display->drm, "Current LSPCON mode %s\n",
lspcon_mode_name(current_mode));
return current_mode;
@@ -195,46 +195,46 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
enum drm_lspcon_mode mode)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int err;
enum drm_lspcon_mode current_mode;
struct i2c_adapter *ddc = &intel_dp->aux.ddc;
err = drm_lspcon_get_mode(intel_dp->aux.drm_dev, ddc, &current_mode);
if (err) {
- drm_err(&i915->drm, "Error reading LSPCON mode\n");
+ drm_err(display->drm, "Error reading LSPCON mode\n");
return err;
}
if (current_mode == mode) {
- drm_dbg_kms(&i915->drm, "Current mode = desired LSPCON mode\n");
+ drm_dbg_kms(display->drm, "Current mode = desired LSPCON mode\n");
return 0;
}
err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, ddc, mode);
if (err < 0) {
- drm_err(&i915->drm, "LSPCON mode change failed\n");
+ drm_err(display->drm, "LSPCON mode change failed\n");
return err;
}
lspcon->mode = mode;
- drm_dbg_kms(&i915->drm, "LSPCON mode changed done\n");
+ drm_dbg_kms(display->drm, "LSPCON mode changed done\n");
return 0;
}
static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 rev;
if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV,
&rev) != 1) {
- drm_dbg_kms(&i915->drm, "Native AUX CH down\n");
+ drm_dbg_kms(display->drm, "Native AUX CH down\n");
return false;
}
- drm_dbg_kms(&i915->drm, "Native AUX CH up, DPCD version: %d.%d\n",
+ drm_dbg_kms(display->drm, "Native AUX CH up, DPCD version: %d.%d\n",
rev >> 4, rev & 0xf);
return true;
@@ -242,12 +242,12 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
static bool lspcon_probe(struct intel_lspcon *lspcon)
{
- int retry;
- enum drm_dp_dual_mode_type adaptor_type;
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct i2c_adapter *ddc = &intel_dp->aux.ddc;
+ enum drm_dp_dual_mode_type adaptor_type;
enum drm_lspcon_mode expected_mode;
+ int retry;
expected_mode = lspcon_wake_native_aux_ch(lspcon) ?
DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS;
@@ -263,13 +263,13 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
}
if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
- drm_dbg_kms(&i915->drm, "No LSPCON detected, found %s\n",
+ drm_dbg_kms(display->drm, "No LSPCON detected, found %s\n",
drm_dp_get_dual_mode_type_name(adaptor_type));
return false;
}
/* Yay ... got a LSPCON device */
- drm_dbg_kms(&i915->drm, "LSPCON detected\n");
+ drm_dbg_kms(display->drm, "LSPCON detected\n");
lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
/*
@@ -279,7 +279,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
*/
if (lspcon->mode != DRM_LSPCON_MODE_PCON) {
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
- drm_err(&i915->drm, "LSPCON mode change to PCON failed\n");
+ drm_err(display->drm, "LSPCON mode change to PCON failed\n");
return false;
}
}
@@ -289,13 +289,13 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
unsigned long start = jiffies;
while (1) {
if (intel_digital_port_connected(&dig_port->base)) {
- drm_dbg_kms(&i915->drm, "LSPCON recovering in PCON mode after %u ms\n",
+ drm_dbg_kms(display->drm, "LSPCON recovering in PCON mode after %u ms\n",
jiffies_to_msecs(jiffies - start));
return;
}
@@ -306,7 +306,7 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
usleep_range(10000, 15000);
}
- drm_dbg_kms(&i915->drm, "LSPCON DP descriptor mismatch after resume\n");
+ drm_dbg_kms(display->drm, "LSPCON DP descriptor mismatch after resume\n");
}
static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
@@ -477,10 +477,10 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- bool ret = true;
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
+ bool ret = true;
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
@@ -492,7 +492,7 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
frame, len);
break;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- drm_dbg_kms(&i915->drm, "Update HDR metadata for lspcon\n");
+ drm_dbg_kms(display->drm, "Update HDR metadata for lspcon\n");
/* It uses the legacy hsw implementation for the same */
hsw_write_infoframe(encoder, crtc_state, type, frame, len);
break;
@@ -501,7 +501,7 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
}
if (!ret) {
- drm_err(&i915->drm, "Failed to write infoframes\n");
+ drm_err(display->drm, "Failed to write infoframes\n");
return;
}
}
@@ -522,17 +522,17 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- ssize_t ret;
- union hdmi_infoframe frame;
- u8 buf[VIDEO_DIP_DATA_SIZE];
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_lspcon *lspcon = &dig_port->lspcon;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ union hdmi_infoframe frame;
+ u8 buf[VIDEO_DIP_DATA_SIZE];
+ ssize_t ret;
if (!lspcon->active) {
- drm_err(&i915->drm, "Writing infoframes while LSPCON disabled ?\n");
+ drm_err(display->drm, "Writing infoframes while LSPCON disabled ?\n");
return;
}
@@ -542,7 +542,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
conn_state->connector,
adjusted_mode);
if (ret < 0) {
- drm_err(&i915->drm, "couldn't fill AVI infoframe\n");
+ drm_err(display->drm, "couldn't fill AVI infoframe\n");
return;
}
@@ -583,7 +583,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
if (ret < 0) {
- drm_err(&i915->drm, "Failed to pack AVI IF\n");
+ drm_err(display->drm, "Failed to pack AVI IF\n");
return;
}
@@ -624,9 +624,9 @@ static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux)
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
bool infoframes_enabled;
u32 val = 0;
u32 mask, tmp;
@@ -640,8 +640,8 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
val |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
if (lspcon->hdr_supported) {
- tmp = intel_de_read(dev_priv,
- HSW_TVIDEO_DIP_CTL(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display,
+ HSW_TVIDEO_DIP_CTL(display, pipe_config->cpu_transcoder));
mask = VIDEO_DIP_ENABLE_GMP_HSW;
if (tmp & mask)
@@ -658,32 +658,32 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
bool lspcon_init(struct intel_digital_port *dig_port)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_lspcon *lspcon = &dig_port->lspcon;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct drm_connector *connector = &intel_dp->attached_connector->base;
lspcon->active = false;
lspcon->mode = DRM_LSPCON_MODE_INVALID;
if (!lspcon_probe(lspcon)) {
- drm_err(&i915->drm, "Failed to probe lspcon\n");
+ drm_err(display->drm, "Failed to probe lspcon\n");
return false;
}
if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) {
- drm_err(&i915->drm, "LSPCON DPCD read failed\n");
+ drm_err(display->drm, "LSPCON DPCD read failed\n");
return false;
}
if (!lspcon_detect_vendor(lspcon)) {
- drm_err(&i915->drm, "LSPCON vendor detection failed\n");
+ drm_err(display->drm, "LSPCON vendor detection failed\n");
return false;
}
connector->ycbcr_420_allowed = true;
lspcon->active = true;
- drm_dbg_kms(&i915->drm, "Success: LSPCON init\n");
+ drm_dbg_kms(display->drm, "Success: LSPCON init\n");
return true;
}
@@ -697,9 +697,8 @@ u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
void lspcon_resume(struct intel_digital_port *dig_port)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_lspcon *lspcon = &dig_port->lspcon;
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
enum drm_lspcon_mode expected_mode;
if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
@@ -707,7 +706,7 @@ void lspcon_resume(struct intel_digital_port *dig_port)
if (!lspcon->active) {
if (!lspcon_init(dig_port)) {
- drm_err(&i915->drm, "LSPCON init failed on port %c\n",
+ drm_err(display->drm, "LSPCON init failed on port %c\n",
port_name(dig_port->base.port));
return;
}
@@ -724,7 +723,7 @@ void lspcon_resume(struct intel_digital_port *dig_port)
return;
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON))
- drm_err(&i915->drm, "LSPCON resume failed\n");
+ drm_err(display->drm, "LSPCON resume failed\n");
else
- drm_dbg_kms(&i915->drm, "LSPCON resume success\n");
+ drm_dbg_kms(display->drm, "LSPCON resume success\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 9f018503d4fd..fb4ed9f7855b 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -838,6 +838,7 @@ static void intel_lvds_add_properties(struct drm_connector *connector)
*/
void intel_lvds_init(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_lvds_encoder *lvds_encoder;
struct intel_connector *connector;
const struct drm_edid *drm_edid;
@@ -872,7 +873,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
}
ddc_pin = GMBUS_PIN_PANEL;
- if (!intel_bios_is_lvds_present(i915, &ddc_pin)) {
+ if (!intel_bios_is_lvds_present(display, &ddc_pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
drm_dbg_kms(&i915->drm,
"LVDS is not present in VBT\n");
@@ -966,7 +967,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
} else {
drm_edid = ERR_PTR(-ENOENT);
}
- intel_bios_init_panel_late(i915, &connector->panel, NULL,
+ intel_bios_init_panel_late(display, &connector->panel, NULL,
IS_ERR(drm_edid) ? NULL : drm_edid);
/* Try EDID first */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index e1213f3d93cc..72694dde3c22 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -987,7 +987,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
}
}
- intel_fbc_sanitize(i915);
+ intel_fbc_sanitize(&i915->display);
intel_sanitize_plane_mapping(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 68bd5101ec89..ff11836459de 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -252,7 +252,7 @@ struct opregion_asle_ext {
#define OPREGION_SIZE (8 * 1024)
struct intel_opregion {
- struct drm_i915_private *i915;
+ struct intel_display *display;
struct opregion_header *header;
struct opregion_acpi *acpi;
@@ -268,9 +268,9 @@ struct intel_opregion {
struct notifier_block acpi_notifier;
};
-static int check_swsci_function(struct drm_i915_private *i915, u32 function)
+static int check_swsci_function(struct intel_display *display, u32 function)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
struct opregion_swsci *swsci;
u32 main_function, sub_function;
@@ -300,20 +300,20 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
return 0;
}
-static int swsci(struct drm_i915_private *dev_priv,
+static int swsci(struct intel_display *display,
u32 function, u32 parm, u32 *parm_out)
{
struct opregion_swsci *swsci;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u32 scic, dslp;
u16 swsci_val;
int ret;
- ret = check_swsci_function(dev_priv, function);
+ ret = check_swsci_function(display, function);
if (ret)
return ret;
- swsci = dev_priv->display.opregion->swsci;
+ swsci = display->opregion->swsci;
/* Driver sleep timeout in ms. */
dslp = swsci->dslp;
@@ -331,7 +331,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* The spec tells us to do this, but we are the only user... */
scic = swsci->scic;
if (scic & SWSCI_SCIC_INDICATOR) {
- drm_dbg(&dev_priv->drm, "SWSCI request already in progress\n");
+ drm_dbg(display->drm, "SWSCI request already in progress\n");
return -EBUSY;
}
@@ -355,7 +355,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* Poll for the result. */
#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
if (wait_for(C, dslp)) {
- drm_dbg(&dev_priv->drm, "SWSCI request timed out\n");
+ drm_dbg(display->drm, "SWSCI request timed out\n");
return -ETIMEDOUT;
}
@@ -364,7 +364,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* Note: scic == 0 is an error! */
if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
- drm_dbg(&dev_priv->drm, "SWSCI request error %u\n", scic);
+ drm_dbg(display->drm, "SWSCI request error %u\n", scic);
return -EIO;
}
@@ -381,28 +381,28 @@ static int swsci(struct drm_i915_private *dev_priv,
#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2
#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3
-int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+int intel_opregion_notify_encoder(struct intel_encoder *encoder,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 parm = 0;
u32 type = 0;
u32 port;
int ret;
/* don't care about old stuff for now */
- if (!HAS_DDI(dev_priv))
+ if (!HAS_DDI(display))
return 0;
/* Avoid port out of bounds checks if SWSCI isn't there. */
- ret = check_swsci_function(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE);
+ ret = check_swsci_function(display, SWSCI_SBCB_DISPLAY_POWER_STATE);
if (ret)
return ret;
- if (intel_encoder->type == INTEL_OUTPUT_DSI)
+ if (encoder->type == INTEL_OUTPUT_DSI)
port = 0;
else
- port = intel_encoder->port;
+ port = encoder->port;
if (port == PORT_E) {
port = 0;
@@ -419,17 +419,17 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
* number is out of bounds after mapping.
*/
if (port > 4) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
- intel_encoder->base.base.id, intel_encoder->base.name,
- port_name(intel_encoder->port), port);
+ encoder->base.base.id, encoder->base.name,
+ port_name(encoder->port), port);
return -EINVAL;
}
if (!enable)
parm |= 4 << 8;
- switch (intel_encoder->type) {
+ switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
type = DISPLAY_TYPE_CRT;
break;
@@ -444,15 +444,15 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
break;
default:
- drm_WARN_ONCE(&dev_priv->drm, 1,
+ drm_WARN_ONCE(display->drm, 1,
"unsupported intel_encoder type %d\n",
- intel_encoder->type);
+ encoder->type);
return -EINVAL;
}
parm |= type << (16 + port * 3);
- return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+ return swsci(display, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
}
static const struct {
@@ -466,33 +466,33 @@ static const struct {
{ PCI_D3cold, 0x04 },
};
-int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+int intel_opregion_notify_adapter(struct intel_display *display,
pci_power_t state)
{
int i;
- if (!HAS_DDI(dev_priv))
+ if (!HAS_DDI(display))
return 0;
for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
if (state == power_state_map[i].pci_power_state)
- return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
+ return swsci(display, SWSCI_SBCB_ADAPTER_POWER_STATE,
power_state_map[i].parm, NULL);
}
return -EINVAL;
}
-static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
+static u32 asle_set_backlight(struct intel_display *display, u32 bclp)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- struct opregion_asle *asle = dev_priv->display.opregion->asle;
+ struct opregion_asle *asle = display->opregion->asle;
- drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
+ drm_dbg(display->drm, "bclp = 0x%08x\n", bclp);
if (acpi_video_get_backlight_type() == acpi_backlight_native) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"opregion backlight request ignored\n");
return 0;
}
@@ -504,104 +504,104 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
if (bclp > 255)
return ASLC_BACKLIGHT_FAILED;
- drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
/*
* Update backlight on all connectors that support backlight (usually
* only one).
*/
- drm_dbg_kms(&dev_priv->drm, "updating opregion backlight %d/255\n",
+ drm_dbg_kms(display->drm, "updating opregion backlight %d/255\n",
bclp);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter)
intel_backlight_set_acpi(connector->base.state, bclp, 255);
drm_connector_list_iter_end(&conn_iter);
asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
- drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
-static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
+static u32 asle_set_als_illum(struct intel_display *display, u32 alsi)
{
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
- drm_dbg(&dev_priv->drm, "Illum is not supported\n");
+ drm_dbg(display->drm, "Illum is not supported\n");
return ASLC_ALS_ILLUM_FAILED;
}
-static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
+static u32 asle_set_pwm_freq(struct intel_display *display, u32 pfmb)
{
- drm_dbg(&dev_priv->drm, "PWM freq is not supported\n");
+ drm_dbg(display->drm, "PWM freq is not supported\n");
return ASLC_PWM_FREQ_FAILED;
}
-static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
+static u32 asle_set_pfit(struct intel_display *display, u32 pfit)
{
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
- drm_dbg(&dev_priv->drm, "Pfit is not supported\n");
+ drm_dbg(display->drm, "Pfit is not supported\n");
return ASLC_PFIT_FAILED;
}
-static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
+static u32 asle_set_supported_rotation_angles(struct intel_display *display, u32 srot)
{
- drm_dbg(&dev_priv->drm, "SROT is not supported\n");
+ drm_dbg(display->drm, "SROT is not supported\n");
return ASLC_ROTATION_ANGLES_FAILED;
}
-static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
+static u32 asle_set_button_array(struct intel_display *display, u32 iuer)
{
if (!iuer)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Button array event is not supported (nothing)\n");
if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Button array event is not supported (rotation lock)\n");
if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Button array event is not supported (volume down)\n");
if (iuer & ASLE_IUER_VOLUME_UP_BTN)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Button array event is not supported (volume up)\n");
if (iuer & ASLE_IUER_WINDOWS_BTN)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Button array event is not supported (windows)\n");
if (iuer & ASLE_IUER_POWER_BTN)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Button array event is not supported (power)\n");
return ASLC_BUTTON_ARRAY_FAILED;
}
-static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
+static u32 asle_set_convertible(struct intel_display *display, u32 iuer)
{
if (iuer & ASLE_IUER_CONVERTIBLE)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Convertible is not supported (clamshell)\n");
else
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Convertible is not supported (slate)\n");
return ASLC_CONVERTIBLE_FAILED;
}
-static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
+static u32 asle_set_docking(struct intel_display *display, u32 iuer)
{
if (iuer & ASLE_IUER_DOCKING)
- drm_dbg(&dev_priv->drm, "Docking is not supported (docked)\n");
+ drm_dbg(display->drm, "Docking is not supported (docked)\n");
else
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Docking is not supported (undocked)\n");
return ASLC_DOCKING_FAILED;
}
-static u32 asle_isct_state(struct drm_i915_private *dev_priv)
+static u32 asle_isct_state(struct intel_display *display)
{
- drm_dbg(&dev_priv->drm, "ISCT is not supported\n");
+ drm_dbg(display->drm, "ISCT is not supported\n");
return ASLC_ISCT_STATE_FAILED;
}
@@ -609,7 +609,7 @@ static void asle_work(struct work_struct *work)
{
struct intel_opregion *opregion =
container_of(work, struct intel_opregion, asle_work);
- struct drm_i915_private *dev_priv = opregion->i915;
+ struct intel_display *display = opregion->display;
struct opregion_asle *asle = opregion->asle;
u32 aslc_stat = 0;
u32 aslc_req;
@@ -620,50 +620,51 @@ static void asle_work(struct work_struct *work)
aslc_req = asle->aslc;
if (!(aslc_req & ASLC_REQ_MSK)) {
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"No request on ASLC interrupt 0x%08x\n", aslc_req);
return;
}
if (aslc_req & ASLC_SET_ALS_ILLUM)
- aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
+ aslc_stat |= asle_set_als_illum(display, asle->alsi);
if (aslc_req & ASLC_SET_BACKLIGHT)
- aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
+ aslc_stat |= asle_set_backlight(display, asle->bclp);
if (aslc_req & ASLC_SET_PFIT)
- aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
+ aslc_stat |= asle_set_pfit(display, asle->pfit);
if (aslc_req & ASLC_SET_PWM_FREQ)
- aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
+ aslc_stat |= asle_set_pwm_freq(display, asle->pfmb);
if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
- aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
+ aslc_stat |= asle_set_supported_rotation_angles(display,
asle->srot);
if (aslc_req & ASLC_BUTTON_ARRAY)
- aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
+ aslc_stat |= asle_set_button_array(display, asle->iuer);
if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
- aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
+ aslc_stat |= asle_set_convertible(display, asle->iuer);
if (aslc_req & ASLC_DOCKING_INDICATOR)
- aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
+ aslc_stat |= asle_set_docking(display, asle->iuer);
if (aslc_req & ASLC_ISCT_STATE_CHANGE)
- aslc_stat |= asle_isct_state(dev_priv);
+ aslc_stat |= asle_isct_state(display);
asle->aslc = aslc_stat;
}
-bool intel_opregion_asle_present(struct drm_i915_private *i915)
+bool intel_opregion_asle_present(struct intel_display *display)
{
- return i915->display.opregion && i915->display.opregion->asle;
+ return display->opregion && display->opregion->asle;
}
-void intel_opregion_asle_intr(struct drm_i915_private *i915)
+void intel_opregion_asle_intr(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_opregion *opregion = display->opregion;
if (opregion && opregion->asle)
queue_work(i915->unordered_wq, &opregion->asle_work);
@@ -720,9 +721,9 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
}
}
-static void intel_didl_outputs(struct drm_i915_private *dev_priv)
+static void intel_didl_outputs(struct intel_display *display)
{
- struct intel_opregion *opregion = dev_priv->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0, max_outputs;
@@ -737,9 +738,9 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
ARRAY_SIZE(opregion->acpi->did2);
- intel_acpi_device_id_update(dev_priv);
+ intel_acpi_device_id_update(display);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (i < max_outputs)
set_did(opregion, i, connector->acpi_device_id);
@@ -747,10 +748,10 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
}
drm_connector_list_iter_end(&conn_iter);
- drm_dbg_kms(&dev_priv->drm, "%d outputs detected\n", i);
+ drm_dbg_kms(display->drm, "%d outputs detected\n", i);
if (i > max_outputs)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"More than %d outputs in connector list\n",
max_outputs);
@@ -759,9 +760,9 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
set_did(opregion, i, 0);
}
-static void intel_setup_cadls(struct drm_i915_private *dev_priv)
+static void intel_setup_cadls(struct intel_display *display)
{
- struct intel_opregion *opregion = dev_priv->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0;
@@ -776,7 +777,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
* Note that internal panels should be at the front of the connector
* list already, ensuring they're not left out.
*/
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (i >= ARRAY_SIZE(opregion->acpi->cadl))
break;
@@ -789,9 +790,9 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
opregion->acpi->cadl[i] = 0;
}
-static void swsci_setup(struct drm_i915_private *dev_priv)
+static void swsci_setup(struct intel_display *display)
{
- struct intel_opregion *opregion = dev_priv->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
bool requested_callbacks = false;
u32 tmp;
@@ -800,7 +801,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
opregion->swsci_sbcb_sub_functions = 1;
/* We use GBDA to ask for supported GBDA calls. */
- if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+ if (swsci(display, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
tmp <<= 1;
opregion->swsci_gbda_sub_functions |= tmp;
@@ -811,7 +812,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
* must not call interfaces that are not specifically requested by the
* bios.
*/
- if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+ if (swsci(display, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
/* here, the bits already match sub-function codes */
opregion->swsci_sbcb_sub_functions |= tmp;
requested_callbacks = true;
@@ -822,7 +823,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
* the callback is _requested_. But we still can't call interfaces that
* are not requested.
*/
- if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+ if (swsci(display, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
u32 low = tmp & 0x7ff;
u32 high = tmp & ~0xfff; /* bit 11 is reserved */
@@ -832,7 +833,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
if (requested_callbacks) {
u32 req = opregion->swsci_sbcb_sub_functions;
if ((req & tmp) != req)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n",
req, tmp);
/* XXX: for now, trust the requested callbacks */
@@ -842,7 +843,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
}
}
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
opregion->swsci_gbda_sub_functions,
opregion->swsci_sbcb_sub_functions);
@@ -867,10 +868,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
{ }
};
-int intel_opregion_setup(struct drm_i915_private *dev_priv)
+int intel_opregion_setup(struct intel_display *display)
{
struct intel_opregion *opregion;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
@@ -885,10 +886,10 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
pci_read_config_dword(pdev, ASLS, &asls);
- drm_dbg(&dev_priv->drm, "graphic opregion physical addr: 0x%x\n",
+ drm_dbg(display->drm, "graphic opregion physical addr: 0x%x\n",
asls);
if (asls == 0) {
- drm_dbg(&dev_priv->drm, "ACPI OpRegion not supported!\n");
+ drm_dbg(display->drm, "ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
@@ -896,8 +897,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
if (!opregion)
return -ENOMEM;
- opregion->i915 = dev_priv;
- dev_priv->display.opregion = opregion;
+ opregion->display = display;
+ display->opregion = opregion;
INIT_WORK(&opregion->asle_work, asle_work);
@@ -910,20 +911,20 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
memcpy(buf, base, sizeof(buf));
if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
- drm_dbg(&dev_priv->drm, "opregion signature mismatch\n");
+ drm_dbg(display->drm, "opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
opregion->header = base;
- drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n",
+ drm_dbg(display->drm, "ACPI OpRegion version %u.%u.%u\n",
opregion->header->over.major,
opregion->header->over.minor,
opregion->header->over.revision);
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
- drm_dbg(&dev_priv->drm, "Public ACPI methods supported\n");
+ drm_dbg(display->drm, "Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
/*
* Indicate we handle monitor hotplug events ourselves so we do
@@ -938,30 +939,30 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
u8 major = opregion->header->over.major;
if (major >= 3) {
- drm_err(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v3.x, ignoring\n");
+ drm_err(display->drm, "SWSCI Mailbox #2 present for opregion v3.x, ignoring\n");
} else {
if (major >= 2)
- drm_dbg(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v2.x\n");
- drm_dbg(&dev_priv->drm, "SWSCI supported\n");
+ drm_dbg(display->drm, "SWSCI Mailbox #2 present for opregion v2.x\n");
+ drm_dbg(display->drm, "SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
- swsci_setup(dev_priv);
+ swsci_setup(display);
}
}
if (mboxes & MBOX_ASLE) {
- drm_dbg(&dev_priv->drm, "ASLE supported\n");
+ drm_dbg(display->drm, "ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
}
if (mboxes & MBOX_ASLE_EXT) {
- drm_dbg(&dev_priv->drm, "ASLE extension supported\n");
+ drm_dbg(display->drm, "ASLE extension supported\n");
opregion->asle_ext = base + OPREGION_ASLE_EXT_OFFSET;
}
if (mboxes & MBOX_BACKLIGHT) {
- drm_dbg(&dev_priv->drm, "Mailbox #2 for backlight present\n");
+ drm_dbg(display->drm, "Mailbox #2 for backlight present\n");
}
if (dmi_check_system(intel_no_opregion_vbt))
@@ -979,7 +980,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
*/
if (opregion->header->over.major > 2 ||
opregion->header->over.minor >= 1) {
- drm_WARN_ON(&dev_priv->drm, rvda < OPREGION_SIZE);
+ drm_WARN_ON(display->drm, rvda < OPREGION_SIZE);
rvda += asls;
}
@@ -989,14 +990,14 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
vbt = opregion->rvda;
vbt_size = opregion->asle->rvds;
- if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (intel_bios_is_valid_vbt(display, vbt, vbt_size)) {
+ drm_dbg_kms(display->drm,
"Found valid VBT in ACPI OpRegion (RVDA)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
goto out;
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Invalid VBT in ACPI OpRegion (RVDA)\n");
memunmap(opregion->rvda);
opregion->rvda = NULL;
@@ -1014,13 +1015,13 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
vbt_size = (mboxes & MBOX_ASLE_EXT) ?
OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
vbt_size -= OPREGION_VBT_OFFSET;
- if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (intel_bios_is_valid_vbt(display, vbt, vbt_size)) {
+ drm_dbg_kms(display->drm,
"Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
}
@@ -1031,7 +1032,7 @@ err_out:
memunmap(base);
err_memremap:
kfree(opregion);
- dev_priv->display.opregion = NULL;
+ display->opregion = NULL;
return err;
}
@@ -1054,25 +1055,25 @@ static const struct dmi_system_id intel_use_opregion_panel_type[] = {
};
int
-intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
+intel_opregion_get_panel_type(struct intel_display *display)
{
u32 panel_details;
int ret;
- ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
+ ret = swsci(display, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
if (ret)
return ret;
ret = (panel_details >> 8) & 0xff;
if (ret > 0x10) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Invalid OpRegion panel type 0x%x\n", ret);
return -EINVAL;
}
/* fall back to VBT panel type? */
if (ret == 0x0) {
- drm_dbg_kms(&dev_priv->drm, "No panel type in OpRegion\n");
+ drm_dbg_kms(display->drm, "No panel type in OpRegion\n");
return -ENODEV;
}
@@ -1082,7 +1083,7 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
* via a quirk list :(
*/
if (!dmi_check_system(intel_use_opregion_panel_type)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Ignoring OpRegion panel type (%d)\n", ret - 1);
return -ENODEV;
}
@@ -1092,7 +1093,7 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
/**
* intel_opregion_get_edid - Fetch EDID from ACPI OpRegion mailbox #5
- * @intel_connector: eDP connector
+ * @connector: eDP connector
*
* This reads the ACPI Opregion mailbox #5 to extract the EDID that is passed
* to it.
@@ -1101,11 +1102,10 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
* The EDID in the OpRegion, or NULL if there is none or it's invalid.
*
*/
-const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
+const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector)
{
- struct drm_connector *connector = &intel_connector->base;
- struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_opregion *opregion = display->opregion;
const struct drm_edid *drm_edid;
const void *edid;
int len;
@@ -1117,13 +1117,13 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
/* Validity corresponds to number of 128-byte blocks */
len = (opregion->asle_ext->phed & ASLE_PHED_EDID_VALID_MASK) * 128;
- if (!len || !memchr_inv(edid, 0, len))
+ if (!len || mem_is_zero(edid, len))
return NULL;
drm_edid = drm_edid_alloc(edid, len);
if (!drm_edid_valid(drm_edid)) {
- drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5)\n");
+ drm_dbg_kms(display->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5)\n");
drm_edid_free(drm_edid);
drm_edid = NULL;
}
@@ -1131,9 +1131,9 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
return drm_edid;
}
-bool intel_opregion_vbt_present(struct drm_i915_private *i915)
+bool intel_opregion_vbt_present(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (!opregion || !opregion->vbt)
return false;
@@ -1141,9 +1141,9 @@ bool intel_opregion_vbt_present(struct drm_i915_private *i915)
return true;
}
-const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
+const void *intel_opregion_get_vbt(struct intel_display *display, size_t *size)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (!opregion || !opregion->vbt)
return NULL;
@@ -1154,9 +1154,9 @@ const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
return kmemdup(opregion->vbt, opregion->vbt_size, GFP_KERNEL);
}
-bool intel_opregion_headless_sku(struct drm_i915_private *i915)
+bool intel_opregion_headless_sku(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
struct opregion_header *header;
if (!opregion)
@@ -1171,9 +1171,9 @@ bool intel_opregion_headless_sku(struct drm_i915_private *i915)
return opregion->header->pcon & PCON_HEADLESS_SKU;
}
-void intel_opregion_register(struct drm_i915_private *i915)
+void intel_opregion_register(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (!opregion)
return;
@@ -1184,16 +1184,16 @@ void intel_opregion_register(struct drm_i915_private *i915)
register_acpi_notifier(&opregion->acpi_notifier);
}
- intel_opregion_resume(i915);
+ intel_opregion_resume(display);
}
-static void intel_opregion_resume_display(struct drm_i915_private *i915)
+static void intel_opregion_resume_display(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (opregion->acpi) {
- intel_didl_outputs(i915);
- intel_setup_cadls(i915);
+ intel_didl_outputs(display);
+ intel_setup_cadls(display);
/*
* Notify BIOS we are ready to handle ACPI video ext notifs.
@@ -1210,25 +1210,25 @@ static void intel_opregion_resume_display(struct drm_i915_private *i915)
}
/* Some platforms abuse the _DSM to enable MUX */
- intel_dsm_get_bios_data_funcs_supported(i915);
+ intel_dsm_get_bios_data_funcs_supported(display);
}
-void intel_opregion_resume(struct drm_i915_private *i915)
+void intel_opregion_resume(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (!opregion)
return;
- if (HAS_DISPLAY(i915))
- intel_opregion_resume_display(i915);
+ if (HAS_DISPLAY(display))
+ intel_opregion_resume_display(display);
- intel_opregion_notify_adapter(i915, PCI_D0);
+ intel_opregion_notify_adapter(display, PCI_D0);
}
-static void intel_opregion_suspend_display(struct drm_i915_private *i915)
+static void intel_opregion_suspend_display(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (opregion->asle)
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
@@ -1239,24 +1239,24 @@ static void intel_opregion_suspend_display(struct drm_i915_private *i915)
opregion->acpi->drdy = 0;
}
-void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+void intel_opregion_suspend(struct intel_display *display, pci_power_t state)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (!opregion)
return;
- intel_opregion_notify_adapter(i915, state);
+ intel_opregion_notify_adapter(display, state);
- if (HAS_DISPLAY(i915))
- intel_opregion_suspend_display(i915);
+ if (HAS_DISPLAY(display))
+ intel_opregion_suspend_display(display);
}
-void intel_opregion_unregister(struct drm_i915_private *i915)
+void intel_opregion_unregister(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
- intel_opregion_suspend(i915, PCI_D1);
+ intel_opregion_suspend(display, PCI_D1);
if (!opregion)
return;
@@ -1267,9 +1267,9 @@ void intel_opregion_unregister(struct drm_i915_private *i915)
}
}
-void intel_opregion_cleanup(struct drm_i915_private *i915)
+void intel_opregion_cleanup(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (!opregion)
return;
@@ -1278,13 +1278,13 @@ void intel_opregion_cleanup(struct drm_i915_private *i915)
if (opregion->rvda)
memunmap(opregion->rvda);
kfree(opregion);
- i915->display.opregion = NULL;
+ display->opregion = NULL;
}
static int intel_opregion_show(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = m->private;
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_display *display = m->private;
+ struct intel_opregion *opregion = display->opregion;
if (opregion)
seq_write(m, opregion->header, OPREGION_SIZE);
@@ -1294,10 +1294,10 @@ static int intel_opregion_show(struct seq_file *m, void *unused)
DEFINE_SHOW_ATTRIBUTE(intel_opregion);
-void intel_opregion_debugfs_register(struct drm_i915_private *i915)
+void intel_opregion_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_opregion", 0444, minor->debugfs_root,
- i915, &intel_opregion_fops);
+ display, &intel_opregion_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index 4b2b8e752632..8101eeebfd8b 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -28,88 +28,88 @@
#include <linux/pci.h>
#include <linux/types.h>
-struct drm_i915_private;
struct intel_connector;
+struct intel_display;
struct intel_encoder;
#ifdef CONFIG_ACPI
-int intel_opregion_setup(struct drm_i915_private *dev_priv);
-void intel_opregion_cleanup(struct drm_i915_private *i915);
+int intel_opregion_setup(struct intel_display *display);
+void intel_opregion_cleanup(struct intel_display *display);
-void intel_opregion_register(struct drm_i915_private *dev_priv);
-void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+void intel_opregion_register(struct intel_display *display);
+void intel_opregion_unregister(struct intel_display *display);
-void intel_opregion_resume(struct drm_i915_private *dev_priv);
-void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+void intel_opregion_resume(struct intel_display *display);
+void intel_opregion_suspend(struct intel_display *display,
pci_power_t state);
-bool intel_opregion_asle_present(struct drm_i915_private *i915);
-void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
-int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+bool intel_opregion_asle_present(struct intel_display *display);
+void intel_opregion_asle_intr(struct intel_display *display);
+int intel_opregion_notify_encoder(struct intel_encoder *encoder,
bool enable);
-int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+int intel_opregion_notify_adapter(struct intel_display *display,
pci_power_t state);
-int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
+int intel_opregion_get_panel_type(struct intel_display *display);
const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector);
-bool intel_opregion_vbt_present(struct drm_i915_private *i915);
-const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size);
+bool intel_opregion_vbt_present(struct intel_display *display);
+const void *intel_opregion_get_vbt(struct intel_display *display, size_t *size);
-bool intel_opregion_headless_sku(struct drm_i915_private *i915);
+bool intel_opregion_headless_sku(struct intel_display *display);
-void intel_opregion_debugfs_register(struct drm_i915_private *i915);
+void intel_opregion_debugfs_register(struct intel_display *display);
#else /* CONFIG_ACPI*/
-static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
+static inline int intel_opregion_setup(struct intel_display *display)
{
return 0;
}
-static inline void intel_opregion_cleanup(struct drm_i915_private *i915)
+static inline void intel_opregion_cleanup(struct intel_display *display)
{
}
-static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_register(struct intel_display *display)
{
}
-static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_unregister(struct intel_display *display)
{
}
-static inline void intel_opregion_resume(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_resume(struct intel_display *display)
{
}
-static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+static inline void intel_opregion_suspend(struct intel_display *display,
pci_power_t state)
{
}
-static inline bool intel_opregion_asle_present(struct drm_i915_private *i915)
+static inline bool intel_opregion_asle_present(struct intel_display *display)
{
return false;
}
-static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_asle_intr(struct intel_display *display)
{
}
static inline int
-intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+intel_opregion_notify_encoder(struct intel_encoder *encoder, bool enable)
{
return 0;
}
static inline int
-intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
+intel_opregion_notify_adapter(struct intel_display *display, pci_power_t state)
{
return 0;
}
-static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
+static inline int intel_opregion_get_panel_type(struct intel_display *display)
{
return -ENODEV;
}
@@ -120,23 +120,23 @@ intel_opregion_get_edid(struct intel_connector *connector)
return NULL;
}
-static inline bool intel_opregion_vbt_present(struct drm_i915_private *i915)
+static inline bool intel_opregion_vbt_present(struct intel_display *display)
{
return false;
}
static inline const void *
-intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
+intel_opregion_get_vbt(struct intel_display *display, size_t *size)
{
return NULL;
}
-static inline bool intel_opregion_headless_sku(struct drm_i915_private *i915)
+static inline bool intel_opregion_headless_sku(struct intel_display *display)
{
return false;
}
-static inline void intel_opregion_debugfs_register(struct drm_i915_private *i915)
+static inline void intel_opregion_debugfs_register(struct intel_display *display)
{
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 0d48b9bec29c..f13ab680c2cf 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -358,6 +358,7 @@ void ilk_pch_pre_enable(struct intel_atomic_state *state,
void ilk_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -399,7 +400,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
intel_enable_shared_dpll(crtc_state);
/* set transcoder timing, panel must allow it */
- assert_pps_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(display, pipe);
if (intel_crtc_has_dp_encoder(crtc_state)) {
intel_pch_transcoder_set_m1_n1(crtc, &crtc_state->dp_m_n);
intel_pch_transcoder_set_m2_n2(crtc, &crtc_state->dp_m2_n2);
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 9ca981b7a12c..ceaf9e3147da 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -92,7 +92,7 @@ int intel_pmdemand_init(struct drm_i915_private *i915)
&pmdemand_state->base,
&intel_pmdemand_funcs);
- if (IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0))
+ if (IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0))
/* Wa_14016740474 */
intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 7ce926241e83..feddc30e3375 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -18,15 +18,18 @@
#include "intel_pps_regs.h"
#include "intel_quirks.h"
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+static void vlv_steal_power_sequencer(struct intel_display *display,
enum pipe pipe);
static void pps_init_delays(struct intel_dp *intel_dp);
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
-static const char *pps_name(struct drm_i915_private *i915,
- struct intel_pps *pps)
+static const char *pps_name(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_pps *pps = &intel_dp->pps;
+
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
switch (pps->pps_pipe) {
case INVALID_PIPE:
@@ -60,14 +63,15 @@ static const char *pps_name(struct drm_i915_private *i915,
intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
intel_wakeref_t wakeref;
/*
* See intel_pps_reset_all() why we need a power domain reference here.
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
- mutex_lock(&dev_priv->display.pps.mutex);
+ mutex_lock(&display->pps.mutex);
return wakeref;
}
@@ -75,9 +79,10 @@ intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
intel_wakeref_t wakeref)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- mutex_unlock(&dev_priv->display.pps.mutex);
+ mutex_unlock(&display->pps.mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return 0;
@@ -86,7 +91,8 @@ intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps.pps_pipe;
bool pll_enabled, release_cl_override = false;
@@ -94,22 +100,22 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
u32 DP;
- if (drm_WARN(&dev_priv->drm,
- intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+ if (drm_WARN(display->drm,
+ intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN,
"skipping %s kick due to [ENCODER:%d:%s] being active\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
dig_port->base.base.base.id, dig_port->base.base.name))
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"kicking %s for [ENCODER:%d:%s]\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
dig_port->base.base.base.id, dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
- DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+ DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED;
DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
DP |= DP_PORT_WIDTH(1);
DP |= DP_LINK_TRAIN_PAT_1;
@@ -119,7 +125,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
else
DP |= DP_PIPE_SEL(pipe);
- pll_enabled = intel_de_read(dev_priv, DPLL(dev_priv, pipe)) & DPLL_VCO_ENABLE;
+ pll_enabled = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
/*
* The DPLL for the pipe must be enabled for this to work.
@@ -130,7 +136,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to force on PLL for pipe %c!\n",
pipe_name(pipe));
return;
@@ -143,14 +149,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
* to make this power sequencer lock onto the port.
* Otherwise even VDD force bit won't work.
*/
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
- intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, DP | DP_PORT_EN);
+ intel_de_posting_read(display, intel_dp->output_reg);
- intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, DP & ~DP_PORT_EN);
+ intel_de_posting_read(display, intel_dp->output_reg);
if (!pll_enabled) {
vlv_force_pll_off(dev_priv, pipe);
@@ -160,7 +166,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
}
}
-static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
+static enum pipe vlv_find_free_pps(struct intel_display *display)
{
struct intel_encoder *encoder;
unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
@@ -169,11 +175,11 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
* We don't have power sequencer currently.
* Pick one that's not used by other ports.
*/
- for_each_intel_dp(&dev_priv->drm, encoder) {
+ for_each_intel_dp(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (encoder->type == INTEL_OUTPUT_EDP) {
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_dp->pps.active_pipe != INVALID_PIPE &&
intel_dp->pps.active_pipe !=
intel_dp->pps.pps_pipe);
@@ -181,7 +187,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
if (intel_dp->pps.pps_pipe != INVALID_PIPE)
pipes &= ~(1 << intel_dp->pps.pps_pipe);
} else {
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_dp->pps.pps_pipe != INVALID_PIPE);
if (intel_dp->pps.active_pipe != INVALID_PIPE)
@@ -198,36 +204,36 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
+ drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
if (intel_dp->pps.pps_pipe != INVALID_PIPE)
return intel_dp->pps.pps_pipe;
- pipe = vlv_find_free_pps(dev_priv);
+ pipe = vlv_find_free_pps(display);
/*
* Didn't find one. This should not happen since there
* are two power sequencers and up to two eDP ports.
*/
- if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
+ if (drm_WARN_ON(display->drm, pipe == INVALID_PIPE))
pipe = PIPE_A;
- vlv_steal_power_sequencer(dev_priv, pipe);
+ vlv_steal_power_sequencer(display, pipe);
intel_dp->pps.pps_pipe = pipe;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"picked %s for [ENCODER:%d:%s]\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
dig_port->base.base.base.id, dig_port->base.base.name);
/* init power sequencer on this pipe and port */
@@ -246,13 +252,13 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int pps_idx = intel_dp->pps.pps_idx;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
if (!intel_dp->pps.pps_reset)
return pps_idx;
@@ -268,37 +274,38 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
return pps_idx;
}
-typedef bool (*pps_check)(struct drm_i915_private *dev_priv, int pps_idx);
+typedef bool (*pps_check)(struct intel_display *display, int pps_idx);
-static bool pps_has_pp_on(struct drm_i915_private *dev_priv, int pps_idx)
+static bool pps_has_pp_on(struct intel_display *display, int pps_idx)
{
- return intel_de_read(dev_priv, PP_STATUS(dev_priv, pps_idx)) & PP_ON;
+ return intel_de_read(display, PP_STATUS(display, pps_idx)) & PP_ON;
}
-static bool pps_has_vdd_on(struct drm_i915_private *dev_priv, int pps_idx)
+static bool pps_has_vdd_on(struct intel_display *display, int pps_idx)
{
- return intel_de_read(dev_priv, PP_CONTROL(dev_priv, pps_idx)) & EDP_FORCE_VDD;
+ return intel_de_read(display, PP_CONTROL(display, pps_idx)) & EDP_FORCE_VDD;
}
-static bool pps_any(struct drm_i915_private *dev_priv, int pps_idx)
+static bool pps_any(struct intel_display *display, int pps_idx)
{
return true;
}
static enum pipe
-vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
+vlv_initial_pps_pipe(struct intel_display *display,
enum port port, pps_check check)
{
enum pipe pipe;
for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
- u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, pipe)) &
+ u32 port_sel = intel_de_read(display,
+ PP_ON_DELAYS(display, pipe)) &
PANEL_PORT_SELECT_MASK;
if (port_sel != PANEL_PORT_SELECT_VLV(port))
continue;
- if (!check(dev_priv, pipe))
+ if (!check(display, pipe))
continue;
return pipe;
@@ -310,41 +317,43 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
static void
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum port port = dig_port->base.port;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* try to find a pipe with this port selected */
/* first pick one where the panel is on */
- intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
pps_has_pp_on);
/* didn't find one? pick one where vdd is on */
if (intel_dp->pps.pps_pipe == INVALID_PIPE)
- intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
pps_has_vdd_on);
/* didn't find one? pick one with just the correct port */
if (intel_dp->pps.pps_pipe == INVALID_PIPE)
- intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
pps_any);
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] no initial power sequencer\n",
dig_port->base.base.base.id, dig_port->base.base.name);
return;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] initial power sequencer: %s\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
}
-static int intel_num_pps(struct drm_i915_private *i915)
+static int intel_num_pps(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
return 2;
@@ -365,23 +374,24 @@ static int intel_num_pps(struct drm_i915_private *i915)
static bool intel_pps_is_valid(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (intel_dp->pps.pps_idx == 1 &&
INTEL_PCH_TYPE(i915) >= PCH_ICP &&
INTEL_PCH_TYPE(i915) <= PCH_ADP)
- return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
+ return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
}
static int
-bxt_initial_pps_idx(struct drm_i915_private *i915, pps_check check)
+bxt_initial_pps_idx(struct intel_display *display, pps_check check)
{
- int pps_idx, pps_num = intel_num_pps(i915);
+ int pps_idx, pps_num = intel_num_pps(display);
for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
- if (check(i915, pps_idx))
+ if (check(display, pps_idx))
return pps_idx;
}
@@ -391,11 +401,12 @@ bxt_initial_pps_idx(struct drm_i915_private *i915, pps_check check)
static bool
pps_initial_setup(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- lockdep_assert_held(&i915->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
vlv_initial_power_sequencer_setup(intel_dp);
@@ -403,46 +414,47 @@ pps_initial_setup(struct intel_dp *intel_dp)
}
/* first ask the VBT */
- if (intel_num_pps(i915) > 1)
+ if (intel_num_pps(display) > 1)
intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
else
intel_dp->pps.pps_idx = 0;
- if (drm_WARN_ON(&i915->drm, intel_dp->pps.pps_idx >= intel_num_pps(i915)))
+ if (drm_WARN_ON(display->drm, intel_dp->pps.pps_idx >= intel_num_pps(display)))
intel_dp->pps.pps_idx = -1;
/* VBT wasn't parsed yet? pick one where the panel is on */
if (intel_dp->pps.pps_idx < 0)
- intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_pp_on);
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_pp_on);
/* didn't find one? pick one where vdd is on */
if (intel_dp->pps.pps_idx < 0)
- intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_vdd_on);
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_vdd_on);
/* didn't find one? pick any */
if (intel_dp->pps.pps_idx < 0) {
- intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_any);
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_any);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] no initial power sequencer, assuming %s\n",
encoder->base.base.id, encoder->base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
} else {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] initial power sequencer: %s\n",
encoder->base.base.id, encoder->base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
}
return intel_pps_is_valid(intel_dp);
}
-void intel_pps_reset_all(struct drm_i915_private *dev_priv)
+void intel_pps_reset_all(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
- if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv)))
+ if (drm_WARN_ON(display->drm, !IS_LP(dev_priv)))
return;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
/*
@@ -455,16 +467,16 @@ void intel_pps_reset_all(struct drm_i915_private *dev_priv)
* should use them always.
*/
- for_each_intel_dp(&dev_priv->drm, encoder) {
+ for_each_intel_dp(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_dp->pps.active_pipe != INVALID_PIPE);
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
intel_dp->pps.pps_reset = true;
else
intel_dp->pps.pps_pipe = INVALID_PIPE;
@@ -482,7 +494,8 @@ struct pps_registers {
static void intel_pps_get_registers(struct intel_dp *intel_dp,
struct pps_registers *regs)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int pps_idx;
memset(regs, 0, sizeof(*regs));
@@ -494,17 +507,17 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
else
pps_idx = intel_dp->pps.pps_idx;
- regs->pp_ctrl = PP_CONTROL(dev_priv, pps_idx);
- regs->pp_stat = PP_STATUS(dev_priv, pps_idx);
- regs->pp_on = PP_ON_DELAYS(dev_priv, pps_idx);
- regs->pp_off = PP_OFF_DELAYS(dev_priv, pps_idx);
+ regs->pp_ctrl = PP_CONTROL(display, pps_idx);
+ regs->pp_stat = PP_STATUS(display, pps_idx);
+ regs->pp_on = PP_ON_DELAYS(display, pps_idx);
+ regs->pp_off = PP_OFF_DELAYS(display, pps_idx);
/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
regs->pp_div = INVALID_MMIO_REG;
else
- regs->pp_div = PP_DIVISOR(dev_priv, pps_idx);
+ regs->pp_div = PP_DIVISOR(display, pps_idx);
}
static i915_reg_t
@@ -529,49 +542,51 @@ _pp_stat_reg(struct intel_dp *intel_dp)
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps.pps_pipe == INVALID_PIPE)
return false;
- return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
+ return (intel_de_read(display, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps.pps_pipe == INVALID_PIPE)
return false;
- return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+ return intel_de_read(display, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
}
void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
- drm_WARN(&dev_priv->drm, 1,
+ drm_WARN(display->drm, 1,
"[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
- drm_dbg_kms(&dev_priv->drm,
+ pps_name(intel_dp));
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
- intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
- intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
+ pps_name(intel_dp),
+ intel_de_read(display, _pp_stat_reg(intel_dp)),
+ intel_de_read(display, _pp_ctrl_reg(intel_dp)));
}
}
@@ -589,68 +604,71 @@ static void intel_pps_verify_state(struct intel_dp *intel_dp);
static void wait_panel_status(struct intel_dp *intel_dp,
u32 mask, u32 value)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
intel_pps_verify_state(intel_dp);
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
mask, value,
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
+ intel_de_read(display, pp_stat_reg),
+ intel_de_read(display, pp_ctrl_reg));
- if (intel_de_wait(dev_priv, pp_stat_reg, mask, value, 5000))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait(display, pp_stat_reg, mask, value, 5000))
+ drm_err(display->drm,
"[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
+ pps_name(intel_dp),
+ intel_de_read(display, pp_stat_reg),
+ intel_de_read(display, pp_ctrl_reg));
- drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
+ drm_dbg_kms(display->drm, "Wait complete\n");
}
static void wait_panel_on(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power on\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s wait for panel power on\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
static void wait_panel_off(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power off time\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s wait for panel power off time\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ktime_t panel_power_on_time;
s64 panel_power_off_duration;
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power cycle\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s wait for panel power cycle\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
/* take the difference of current time and panel power off time
* and then make panel wait for t11_t12 if needed. */
@@ -695,13 +713,13 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u32 control;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
- control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
- if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+ control = intel_de_read(display, _pp_ctrl_reg(intel_dp));
+ if (drm_WARN_ON(display->drm, !HAS_DDI(display) &&
(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
@@ -716,13 +734,14 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
*/
bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->pps.want_panel_vdd;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return false;
@@ -733,16 +752,16 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
if (edp_have_panel_vdd(intel_dp))
return need_to_disable;
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dig_port));
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -750,21 +769,22 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
+ pps_name(intel_dp),
+ intel_de_read(display, pp_stat_reg),
+ intel_de_read(display, pp_ctrl_reg));
/*
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s panel power wasn't enabled\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
msleep(intel_dp->pps.panel_power_up_delay);
}
@@ -779,7 +799,8 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
*/
void intel_pps_vdd_on(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
bool vdd;
@@ -792,27 +813,27 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp)
I915_STATE_WARN(i915, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
}
static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port =
- dp_to_dig_port(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
+ drm_WARN_ON(display->drm, intel_dp->pps.want_panel_vdd);
if (!edp_have_panel_vdd(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -820,15 +841,16 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp_stat_reg = _pp_stat_reg(intel_dp);
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
/* Make sure sequencer is idle before allowing subsequent activity */
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
+ pps_name(intel_dp),
+ intel_de_read(display, pp_stat_reg),
+ intel_de_read(display, pp_ctrl_reg));
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->pps.panel_power_off_time = ktime_get_boottime();
@@ -869,7 +891,8 @@ static void edp_panel_vdd_work(struct work_struct *__work)
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned long delay;
/*
@@ -896,9 +919,10 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
*/
void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
@@ -907,7 +931,7 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
"[ENCODER:%d:%s] %s VDD not forced on",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
intel_dp->pps.want_panel_vdd = false;
@@ -919,25 +943,26 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
void intel_pps_on_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pp;
i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
- if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+ if (drm_WARN(display->drm, edp_have_panel_power(intel_dp),
"[ENCODER:%d:%s] %s panel power already on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(dev_priv, &intel_dp->pps)))
+ pps_name(intel_dp)))
return;
wait_panel_power_cycle(intel_dp);
@@ -947,24 +972,36 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
if (IS_IRONLAKE(dev_priv)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
}
+ /*
+ * WA: 22019252566
+ * Disable DPLS gating around power sequence.
+ */
+ if (IS_DISPLAY_VER(display, 13, 14))
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
+ 0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+
pp |= PANEL_POWER_ON;
if (!IS_IRONLAKE(dev_priv))
pp |= PANEL_POWER_RESET;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
wait_panel_on(intel_dp);
intel_dp->pps.last_power_on = jiffies;
+ if (IS_DISPLAY_VER(display, 13, 14))
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
+ PCH_DPLSUNIT_CLOCK_GATE_DISABLE, 0);
+
if (IS_IRONLAKE(dev_priv)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
}
}
@@ -981,24 +1018,25 @@ void intel_pps_on(struct intel_dp *intel_dp)
void intel_pps_off_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
- drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
+ drm_WARN(display->drm, !intel_dp->pps.want_panel_vdd,
"[ENCODER:%d:%s] %s need VDD to turn off panel\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
pp = ilk_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -1010,8 +1048,8 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp)
intel_dp->pps.want_panel_vdd = false;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
wait_panel_off(intel_dp);
intel_dp->pps.panel_power_off_time = ktime_get_boottime();
@@ -1036,7 +1074,7 @@ void intel_pps_off(struct intel_dp *intel_dp)
/* Enable backlight in the panel power control. */
void intel_pps_backlight_on(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
intel_wakeref_t wakeref;
/*
@@ -1054,15 +1092,15 @@ void intel_pps_backlight_on(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
}
}
/* Disable backlight in the panel power control. */
void intel_pps_backlight_off(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
@@ -1075,8 +1113,8 @@ void intel_pps_backlight_off(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
}
intel_dp->pps.last_backlight_off = jiffies;
@@ -1089,7 +1127,7 @@ void intel_pps_backlight_off(struct intel_dp *intel_dp)
*/
void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
intel_wakeref_t wakeref;
bool is_enabled;
@@ -1100,7 +1138,7 @@ void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
if (is_enabled == enable)
return;
- drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
+ drm_dbg_kms(display->drm, "panel power control backlight %s\n",
enable ? "enable" : "disable");
if (enable)
@@ -1111,14 +1149,14 @@ void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps.pps_pipe;
- i915_reg_t pp_on_reg = PP_ON_DELAYS(dev_priv, pipe);
+ i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe);
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+ drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B))
return;
intel_pps_vdd_off_sync_unlocked(intel_dp);
@@ -1132,27 +1170,27 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* port select always when logically disconnecting a power sequencer
* from a port.
*/
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"detaching %s from [ENCODER:%d:%s]\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
dig_port->base.base.base.id, dig_port->base.base.name);
- intel_de_write(dev_priv, pp_on_reg, 0);
- intel_de_posting_read(dev_priv, pp_on_reg);
+ intel_de_write(display, pp_on_reg, 0);
+ intel_de_posting_read(display, pp_on_reg);
intel_dp->pps.pps_pipe = INVALID_PIPE;
}
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+static void vlv_steal_power_sequencer(struct intel_display *display,
enum pipe pipe)
{
struct intel_encoder *encoder;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
- for_each_intel_dp(&dev_priv->drm, encoder) {
+ for_each_intel_dp(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
+ drm_WARN(display->drm, intel_dp->pps.active_pipe == pipe,
"stealing PPS %c from active [ENCODER:%d:%s]\n",
pipe_name(pipe), encoder->base.base.id,
encoder->base.name);
@@ -1160,7 +1198,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
if (intel_dp->pps.pps_pipe != pipe)
continue;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"stealing PPS %c from [ENCODER:%d:%s]\n",
pipe_name(pipe), encoder->base.base.id,
encoder->base.name);
@@ -1173,13 +1211,13 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
void vlv_pps_init(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+ drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
intel_dp->pps.pps_pipe != crtc->pipe) {
@@ -1195,7 +1233,7 @@ void vlv_pps_init(struct intel_encoder *encoder,
* We may be stealing the power
* sequencer from another port.
*/
- vlv_steal_power_sequencer(dev_priv, crtc->pipe);
+ vlv_steal_power_sequencer(display, crtc->pipe);
intel_dp->pps.active_pipe = crtc->pipe;
@@ -1205,9 +1243,9 @@ void vlv_pps_init(struct intel_encoder *encoder,
/* now it's all ours */
intel_dp->pps.pps_pipe = crtc->pipe;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"initializing %s for [ENCODER:%d:%s]\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
encoder->base.base.id, encoder->base.name);
/* init power sequencer on this pipe and port */
@@ -1217,10 +1255,11 @@ void vlv_pps_init(struct intel_encoder *encoder,
static void pps_vdd_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!edp_have_panel_vdd(intel_dp))
return;
@@ -1231,11 +1270,11 @@ static void pps_vdd_init(struct intel_dp *intel_dp)
* schedule a vdd off, so we don't hold on to the reference
* indefinitely.
*/
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ pps_name(intel_dp));
+ drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dig_port));
}
@@ -1269,7 +1308,7 @@ static void pps_init_timestamps(struct intel_dp *intel_dp)
static void
intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u32 pp_on, pp_off, pp_ctl;
struct pps_registers regs;
@@ -1278,11 +1317,11 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
pp_ctl = ilk_get_pp_control(intel_dp);
/* Ensure PPS is unlocked */
- if (!HAS_DDI(dev_priv))
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+ if (!HAS_DDI(display))
+ intel_de_write(display, regs.pp_ctrl, pp_ctl);
- pp_on = intel_de_read(dev_priv, regs.pp_on);
- pp_off = intel_de_read(dev_priv, regs.pp_off);
+ pp_on = intel_de_read(display, regs.pp_on);
+ pp_off = intel_de_read(display, regs.pp_off);
/* Pull timing values out of registers */
seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
@@ -1293,7 +1332,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
if (i915_mmio_reg_valid(regs.pp_div)) {
u32 pp_div;
- pp_div = intel_de_read(dev_priv, regs.pp_div);
+ pp_div = intel_de_read(display, regs.pp_div);
seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
} else {
@@ -1305,9 +1344,10 @@ static void
intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
const struct edp_power_seq *seq)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- drm_dbg_kms(&i915->drm, "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ drm_dbg_kms(display->drm,
+ "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
state_name,
seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
}
@@ -1315,7 +1355,7 @@ intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
static void
intel_pps_verify_state(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct edp_power_seq hw;
struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
@@ -1323,7 +1363,7 @@ intel_pps_verify_state(struct intel_dp *intel_dp)
if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
- drm_err(&i915->drm, "PPS state mismatch\n");
+ drm_err(display->drm, "PPS state mismatch\n");
intel_pps_dump_state(intel_dp, "sw", sw);
intel_pps_dump_state(intel_dp, "hw", &hw);
}
@@ -1338,9 +1378,9 @@ static bool pps_delays_valid(struct edp_power_seq *delays)
static void pps_init_delays_bios(struct intel_dp *intel_dp,
struct edp_power_seq *bios)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
@@ -1385,9 +1425,9 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
static void pps_init_delays_spec(struct intel_dp *intel_dp,
struct edp_power_seq *spec)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
@@ -1406,11 +1446,11 @@ static void pps_init_delays_spec(struct intel_dp *intel_dp,
static void pps_init_delays(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps.pps_delays;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* already initialized? */
if (pps_delays_valid(final))
@@ -1440,13 +1480,13 @@ static void pps_init_delays(struct intel_dp *intel_dp)
intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
#undef get_delay
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"panel power up delay %d, power down delay %d, power cycle delay %d\n",
intel_dp->pps.panel_power_up_delay,
intel_dp->pps.panel_power_down_delay,
intel_dp->pps.panel_power_cycle_delay);
- drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
+ drm_dbg_kms(display->drm, "backlight on delay %d, off delay %d\n",
intel_dp->pps.backlight_on_delay,
intel_dp->pps.backlight_off_delay);
@@ -1469,14 +1509,15 @@ static void pps_init_delays(struct intel_dp *intel_dp)
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pp_on, pp_off, port_sel = 0;
- int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
+ int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->base.port;
const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
intel_pps_get_registers(intel_dp, &regs);
@@ -1495,16 +1536,16 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
if (force_disable_vdd) {
u32 pp = ilk_get_pp_control(intel_dp);
- drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+ drm_WARN(display->drm, pp & PANEL_POWER_ON,
"Panel power already on\n");
if (pp & EDP_FORCE_VDD)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VDD already on, disabling first\n");
pp &= ~EDP_FORCE_VDD;
- intel_de_write(dev_priv, regs.pp_ctrl, pp);
+ intel_de_write(display, regs.pp_ctrl, pp);
}
pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
@@ -1535,32 +1576,33 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
pp_on |= port_sel;
- intel_de_write(dev_priv, regs.pp_on, pp_on);
- intel_de_write(dev_priv, regs.pp_off, pp_off);
+ intel_de_write(display, regs.pp_on, pp_on);
+ intel_de_write(display, regs.pp_off, pp_off);
/*
* Compute the divisor for the pp clock, simply match the Bspec formula.
*/
if (i915_mmio_reg_valid(regs.pp_div))
- intel_de_write(dev_priv, regs.pp_div,
+ intel_de_write(display, regs.pp_div,
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
else
- intel_de_rmw(dev_priv, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
+ intel_de_rmw(display, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
DIV_ROUND_UP(seq->t11_t12, 1000)));
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- intel_de_read(dev_priv, regs.pp_on),
- intel_de_read(dev_priv, regs.pp_off),
+ intel_de_read(display, regs.pp_on),
+ intel_de_read(display, regs.pp_off),
i915_mmio_reg_valid(regs.pp_div) ?
- intel_de_read(dev_priv, regs.pp_div) :
- (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
+ intel_de_read(display, regs.pp_div) :
+ (intel_de_read(display, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
}
void intel_pps_encoder_reset(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
@@ -1606,17 +1648,19 @@ bool intel_pps_init(struct intel_dp *intel_dp)
static void pps_init_late(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_connector *connector = intel_dp->attached_connector;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
return;
- if (intel_num_pps(i915) < 2)
+ if (intel_num_pps(display) < 2)
return;
- drm_WARN(&i915->drm, connector->panel.vbt.backlight.controller >= 0 &&
+ drm_WARN(display->drm,
+ connector->panel.vbt.backlight.controller >= 0 &&
intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller,
"[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n",
encoder->base.base.id, encoder->base.name,
@@ -1645,32 +1689,34 @@ void intel_pps_init_late(struct intel_dp *intel_dp)
}
}
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+void intel_pps_unlock_regs_wa(struct intel_display *display)
{
int pps_num;
int pps_idx;
- if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv))
+ if (!HAS_DISPLAY(display) || HAS_DDI(display))
return;
/*
* This w/a is needed at least on CPT/PPT, but to be sure apply it
* everywhere where registers can be write protected.
*/
- pps_num = intel_num_pps(dev_priv);
+ pps_num = intel_num_pps(display);
for (pps_idx = 0; pps_idx < pps_num; pps_idx++)
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, pps_idx),
+ intel_de_rmw(display, PP_CONTROL(display, pps_idx),
PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS);
}
-void intel_pps_setup(struct drm_i915_private *i915)
+void intel_pps_setup(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- i915->display.pps.mmio_base = PCH_PPS_BASE;
+ display->pps.mmio_base = PCH_PPS_BASE;
else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->display.pps.mmio_base = VLV_PPS_BASE;
+ display->pps.mmio_base = VLV_PPS_BASE;
else
- i915->display.pps.mmio_base = PPS_BASE;
+ display->pps.mmio_base = PPS_BASE;
}
static int intel_pps_show(struct seq_file *m, void *data)
@@ -1704,21 +1750,23 @@ void intel_pps_connector_debugfs_add(struct intel_connector *connector)
connector, &intel_pps_fops);
}
-void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
+void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = INVALID_PIPE;
bool locked = true;
- if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
+ if (drm_WARN_ON(display->drm, HAS_DDI(display)))
return;
if (HAS_PCH_SPLIT(dev_priv)) {
u32 port_sel;
- pp_reg = PP_CONTROL(dev_priv, 0);
- port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0)) & PANEL_PORT_SELECT_MASK;
+ pp_reg = PP_CONTROL(display, 0);
+ port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
+ PANEL_PORT_SELECT_MASK;
switch (port_sel) {
case PANEL_PORT_SELECT_LVDS:
@@ -1739,20 +1787,21 @@ void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* presumably write lock depends on pipe, not port select */
- pp_reg = PP_CONTROL(dev_priv, pipe);
+ pp_reg = PP_CONTROL(display, pipe);
panel_pipe = pipe;
} else {
u32 port_sel;
- pp_reg = PP_CONTROL(dev_priv, 0);
- port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0)) & PANEL_PORT_SELECT_MASK;
+ pp_reg = PP_CONTROL(display, 0);
+ port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
+ PANEL_PORT_SELECT_MASK;
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
port_sel != PANEL_PORT_SELECT_LVDS);
intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
}
- val = intel_de_read(dev_priv, pp_reg);
+ val = intel_de_read(display, pp_reg);
if (!(val & PANEL_POWER_ON) ||
((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
locked = false;
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
index 07ef96ca8da2..0c5da83a559e 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.h
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -11,9 +11,9 @@
#include "intel_wakeref.h"
enum pipe;
-struct drm_i915_private;
struct intel_connector;
struct intel_crtc_state;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
@@ -43,16 +43,16 @@ void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
bool intel_pps_init(struct intel_dp *intel_dp);
void intel_pps_init_late(struct intel_dp *intel_dp);
void intel_pps_encoder_reset(struct intel_dp *intel_dp);
-void intel_pps_reset_all(struct drm_i915_private *i915);
+void intel_pps_reset_all(struct intel_display *display);
void vlv_pps_init(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
-void intel_pps_setup(struct drm_i915_private *i915);
+void intel_pps_unlock_regs_wa(struct intel_display *display);
+void intel_pps_setup(struct intel_display *display);
void intel_pps_connector_debugfs_add(struct intel_connector *connector);
-void assert_pps_unlocked(struct drm_i915_private *i915, enum pipe pipe);
+void assert_pps_unlocked(struct intel_display *display, enum pipe pipe);
#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 9cb1cdaaeefa..1f83b3b67ea6 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -205,14 +205,14 @@ bool intel_encoder_can_psr(struct intel_encoder *encoder)
static bool psr_global_enabled(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
- if (i915->display.params.enable_psr == -1)
+ if (display->params.enable_psr == -1)
return connector->panel.vbt.psr.enable;
- return i915->display.params.enable_psr;
+ return display->params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
return false;
default:
@@ -222,14 +222,14 @@ static bool psr_global_enabled(struct intel_dp *intel_dp)
static bool psr2_global_enabled(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
default:
- if (i915->display.params.enable_psr == 1)
+ if (display->params.enable_psr == 1)
return false;
return true;
}
@@ -237,9 +237,9 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (i915->display.params.enable_psr != -1)
+ if (display->params.enable_psr != -1)
return false;
return true;
@@ -247,9 +247,9 @@ static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if ((i915->display.params.enable_psr != -1) ||
+ if ((display->params.enable_psr != -1) ||
(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
return false;
return true;
@@ -257,111 +257,111 @@ static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
+ return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
EDP_PSR_ERROR(intel_dp->psr.transcoder);
}
static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
+ return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
}
static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
+ return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
}
static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
+ return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
EDP_PSR_MASK(intel_dp->psr.transcoder);
}
-static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_ctl_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_CTL(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_CTL(display, cpu_transcoder);
else
return HSW_SRD_CTL;
}
-static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_debug_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_DEBUG(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_DEBUG(display, cpu_transcoder);
else
return HSW_SRD_DEBUG;
}
-static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_PERF_CNT(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_PERF_CNT(display, cpu_transcoder);
else
return HSW_SRD_PERF_CNT;
}
-static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_status_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_STATUS(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_STATUS(display, cpu_transcoder);
else
return HSW_SRD_STATUS;
}
-static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_imr_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 12)
- return TRANS_PSR_IMR(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 12)
+ return TRANS_PSR_IMR(display, cpu_transcoder);
else
return EDP_PSR_IMR;
}
-static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_iir_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 12)
- return TRANS_PSR_IIR(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 12)
+ return TRANS_PSR_IIR(display, cpu_transcoder);
else
return EDP_PSR_IIR;
}
-static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_AUX_CTL(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_AUX_CTL(display, cpu_transcoder);
else
return HSW_SRD_AUX_CTL;
}
-static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_aux_data_reg(struct intel_display *display,
enum transcoder cpu_transcoder, int i)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_AUX_DATA(dev_priv, cpu_transcoder, i);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
else
return HSW_SRD_AUX_DATA(i);
}
static void psr_irq_control(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask;
@@ -373,80 +373,81 @@ static void psr_irq_control(struct intel_dp *intel_dp)
mask |= psr_irq_post_exit_bit_get(intel_dp) |
psr_irq_pre_entry_bit_get(intel_dp);
- intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
psr_irq_mask_get(intel_dp), ~mask);
}
-static void psr_event_print(struct drm_i915_private *i915,
+static void psr_event_print(struct intel_display *display,
u32 val, bool sel_update_enabled)
{
- drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
+ drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
- drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
+ drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
- drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
+ drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
- drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
+ drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
- drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
+ drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
if (val & PSR_EVENT_GRAPHICS_RESET)
- drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
+ drm_dbg_kms(display->drm, "\tGraphics reset\n");
if (val & PSR_EVENT_PCH_INTERRUPT)
- drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
+ drm_dbg_kms(display->drm, "\tPCH interrupt\n");
if (val & PSR_EVENT_MEMORY_UP)
- drm_dbg_kms(&i915->drm, "\tMemory up\n");
+ drm_dbg_kms(display->drm, "\tMemory up\n");
if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
- drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
+ drm_dbg_kms(display->drm, "\tFront buffer modification\n");
if (val & PSR_EVENT_WD_TIMER_EXPIRE)
- drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
+ drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
- drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
+ drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
if (val & PSR_EVENT_REGISTER_UPDATE)
- drm_dbg_kms(&i915->drm, "\tRegister updated\n");
+ drm_dbg_kms(display->drm, "\tRegister updated\n");
if (val & PSR_EVENT_HDCP_ENABLE)
- drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
+ drm_dbg_kms(display->drm, "\tHDCP enabled\n");
if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
- drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
+ drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
if (val & PSR_EVENT_VBI_ENABLE)
- drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
+ drm_dbg_kms(display->drm, "\tVBI enabled\n");
if (val & PSR_EVENT_LPSP_MODE_EXIT)
- drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
+ drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
- drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
+ drm_dbg_kms(display->drm, "\tPSR disabled\n");
}
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
ktime_t time_ns = ktime_get();
if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
intel_dp->psr.last_entry_attempt = time_ns;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
intel_dp->psr.last_exit = time_ns;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[transcoder %s] PSR exit completed\n",
transcoder_name(cpu_transcoder));
- if (DISPLAY_VER(dev_priv) >= 9) {
+ if (DISPLAY_VER(display) >= 9) {
u32 val;
val = intel_de_rmw(dev_priv,
PSR_EVENT(dev_priv, cpu_transcoder),
0, 0);
- psr_event_print(dev_priv, val, intel_dp->psr.sel_update_enabled);
+ psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
}
}
if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
- drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
+ drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
intel_dp->psr.irq_aux_error = true;
@@ -459,7 +460,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
* again so we don't care about unmask the interruption
* or unset irq_aux_error.
*/
- intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
0, psr_irq_psr_error_bit_get(intel_dp));
queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
@@ -468,14 +469,14 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 val = 8; /* assume the worst if we can't read the value */
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
else
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unable to get sink synchronization latency, assuming 8 frames\n");
return val;
}
@@ -516,7 +517,7 @@ intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
*/
static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
ssize_t r;
u16 w;
u8 y;
@@ -542,7 +543,7 @@ static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
intel_dp_get_su_x_granularity_offset(intel_dp),
&w, 2);
if (r != 2)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unable to read selective update x granularity\n");
/*
* Spec says that if the value read is 0 the default granularity should
@@ -555,7 +556,7 @@ static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
intel_dp_get_su_y_granularity_offset(intel_dp),
&y, 1);
if (r != 1) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unable to read selective update y granularity\n");
y = 4;
}
@@ -569,17 +570,17 @@ exit:
static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (intel_dp_is_edp(intel_dp)) {
if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
return;
}
if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel doesn't support early transport, eDP Panel Replay not possible\n");
return;
}
@@ -590,7 +591,7 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
intel_dp->psr.sink_panel_replay_su_support = true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel replay %sis supported by panel\n",
intel_dp->psr.sink_panel_replay_su_support ?
"selective_update " : "");
@@ -598,20 +599,19 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
static void _psr_init_dpcd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 =
- to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
- drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
+ drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR support not currently available for this panel\n");
return;
}
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel lacks power state control, PSR cannot be enabled\n");
return;
}
@@ -620,7 +620,7 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
intel_dp->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
- if (DISPLAY_VER(i915) >= 9 &&
+ if (DISPLAY_VER(display) >= 9 &&
intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
bool y_req = intel_dp->psr_dpcd[1] &
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
@@ -638,7 +638,7 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
*/
intel_dp->psr.sink_psr2_support = y_req &&
intel_alpm_aux_wake_supported(intel_dp);
- drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
+ drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
intel_dp->psr.sink_psr2_support ? "" : "not ");
}
}
@@ -663,7 +663,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 aux_clock_divider, aux_ctl;
/* write DP_SET_POWER=D0 */
@@ -679,7 +680,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
intel_de_write(dev_priv,
- psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
+ psr_aux_data_reg(display, cpu_transcoder, i >> 2),
intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -694,15 +695,15 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
- intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
+ intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
aux_ctl);
}
static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (DISPLAY_VER(i915) < 20 || !intel_dp_is_edp(intel_dp) ||
+ if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
return false;
@@ -741,7 +742,7 @@ static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
static void _psr_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 val = DP_PSR_ENABLE;
if (crtc_state->has_sel_update) {
@@ -750,7 +751,7 @@ static void _psr_enable_sink(struct intel_dp *intel_dp,
if (intel_dp->psr.link_standby)
val |= DP_PSR_MAIN_LINK_ACTIVE;
- if (DISPLAY_VER(i915) >= 8)
+ if (DISPLAY_VER(display) >= 8)
val |= DP_PSR_CRC_VERIFICATION;
}
@@ -802,14 +803,15 @@ void intel_psr_enable_sink(struct intel_dp *intel_dp,
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val = 0;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
val |= EDP_PSR_TP4_TIME_0us;
- if (dev_priv->display.params.psr_safest_params) {
+ if (display->params.psr_safest_params) {
val |= EDP_PSR_TP1_TIME_2500us;
val |= EDP_PSR_TP2_TP3_TIME_2500us;
goto check_tp3_sel;
@@ -854,8 +856,8 @@ check_tp3_sel:
static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
int idle_frames;
/* Let's use 6 as the minimum to cover all known cases including the
@@ -864,7 +866,7 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
- if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
+ if (drm_WARN_ON(display->drm, idle_frames > 0xf))
idle_frames = 0xf;
return idle_frames;
@@ -872,14 +874,15 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 max_sleep_time = 0x1f;
u32 val = EDP_PSR_ENABLE;
val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
- if (DISPLAY_VER(dev_priv) < 20)
+ if (DISPLAY_VER(display) < 20)
val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
if (IS_HASWELL(dev_priv))
@@ -890,23 +893,23 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
val |= intel_psr1_get_tp_time(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
- intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
}
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val = 0;
- if (dev_priv->display.params.psr_safest_params)
+ if (display->params.psr_safest_params)
return EDP_PSR2_TP2_TIME_2500us;
if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
@@ -950,7 +953,7 @@ static u8 frames_before_su_entry(struct intel_dp *intel_dp)
static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
@@ -961,38 +964,39 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
val |= EDP_PSR2_SU_SDP_SCANLINE;
- intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
val);
}
- intel_de_rmw(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, intel_dp->psr.transcoder),
+ intel_de_rmw(display,
+ PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
- intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
+ intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
TRANS_DP2_PANEL_REPLAY_ENABLE);
}
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
u32 psr_val = 0;
val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
- if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
+ if (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))
val |= EDP_SU_TRACK_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
+ if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
val |= EDP_Y_COORDINATE_ENABLE;
val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
val |= intel_psr2_get_tp_time(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 12 && DISPLAY_VER(dev_priv) < 20) {
+ if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
if (psr2_block_count(intel_dp) > 2)
val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
else
@@ -1000,7 +1004,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
}
/* Wa_22012278275:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
static const u8 map[] = {
2, /* 5 lines */
1, /* 6 lines */
@@ -1023,12 +1027,12 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
- } else if (DISPLAY_VER(dev_priv) >= 20) {
+ } else if (DISPLAY_VER(display) >= 20) {
val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
- } else if (DISPLAY_VER(dev_priv) >= 9) {
+ } else if (DISPLAY_VER(display) >= 9) {
val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
}
@@ -1036,18 +1040,18 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
val |= EDP_PSR2_SU_SDP_SCANLINE;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 tmp;
- tmp = intel_de_read(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
- drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
- } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder), 0);
+ tmp = intel_de_read(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder));
+ drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
+ } else if (HAS_PSR2_SEL_FETCH(display)) {
+ intel_de_write(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
}
if (intel_dp->psr.su_region_et_enabled)
@@ -1057,19 +1061,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
+ intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
- intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder), val);
+ intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
}
static bool
-transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
+transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
{
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
return cpu_transcoder == TRANSCODER_A;
- else if (DISPLAY_VER(dev_priv) >= 9)
+ else if (DISPLAY_VER(display) >= 9)
return cpu_transcoder == TRANSCODER_EDP;
else
return false;
@@ -1087,17 +1093,18 @@ static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
static void psr2_program_idle_frames(struct intel_dp *intel_dp,
u32 idle_frames)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- intel_de_rmw(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
EDP_PSR2_IDLE_FRAMES_MASK,
EDP_PSR2_IDLE_FRAMES(idle_frames));
}
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
psr2_program_idle_frames(intel_dp, 0);
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
@@ -1105,7 +1112,8 @@ static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
@@ -1140,12 +1148,13 @@ static bool
dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = dig_port->base.port;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
return pipe <= PIPE_B && port <= PORT_B;
else
return pipe == PIPE_A && port == PORT_A;
@@ -1155,9 +1164,10 @@ static void
tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
u32 exit_scanlines;
/*
@@ -1181,7 +1191,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
return;
/* Wa_16011303918:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
return;
/*
@@ -1191,7 +1201,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
exit_scanlines =
intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
- if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
+ if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
return;
crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
@@ -1200,17 +1210,17 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (!dev_priv->display.params.enable_psr2_sel_fetch &&
+ if (!display->params.enable_psr2_sel_fetch &&
intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR2 sel fetch not enabled, disabled by parameter\n");
return false;
}
if (crtc_state->uapi.async_flip) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR2 sel fetch not enabled, async flip enabled\n");
return false;
}
@@ -1221,7 +1231,8 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
static bool psr2_granularity_check(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
@@ -1243,7 +1254,7 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
* For other platforms with SW tracking we can adjust the y coordinates
* to match sink requirement if multiple of 4.
*/
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
y_granularity = intel_dp->psr.su_y_granularity;
else if (intel_dp->psr.su_y_granularity <= 2)
y_granularity = 4;
@@ -1264,8 +1275,8 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 hblank_total, hblank_ns, req_ns;
hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
@@ -1278,7 +1289,7 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
/* Not supported <13 / Wa_22012279113:adl-p */
- if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
+ if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
return false;
crtc_state->req_psr2_sdp_prior_scanline = true;
@@ -1288,12 +1299,12 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
int entry_setup_frames = 0;
if (psr_setup_time < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR condition failed: Invalid PSR setup time (0x%02x)\n",
intel_dp->psr_dpcd[1]);
return -ETIME;
@@ -1301,14 +1312,14 @@ static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
- if (DISPLAY_VER(i915) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
/* setup entry frames can be up to 3 frames */
entry_setup_frames = 1;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR setup entry frames %d\n",
entry_setup_frames);
} else {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR condition failed: PSR setup time (%d us) too long\n",
psr_setup_time);
return -ETIME;
@@ -1322,7 +1333,7 @@ static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool aux_less)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
crtc_state->hw.adjusted_mode.crtc_vblank_start;
int wake_lines;
@@ -1330,7 +1341,7 @@ static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
if (aux_less)
wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
else
- wake_lines = DISPLAY_VER(i915) < 20 ?
+ wake_lines = DISPLAY_VER(display) < 20 ?
psr2_block_count_lines(intel_dp) :
intel_dp->alpm_parameters.io_wake_lines;
@@ -1348,16 +1359,16 @@ static bool alpm_config_valid(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool aux_less)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR2/Panel Replay not enabled, Unable to use long enough wake times\n");
return false;
}
if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR2/Panel Replay not enabled, too short vblank time\n");
return false;
}
@@ -1368,7 +1379,8 @@ static bool alpm_config_valid(struct intel_dp *intel_dp,
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
@@ -1378,24 +1390,26 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
/* JSL and EHL only supports eDP 1.3 */
if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
+ drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
return false;
}
/* Wa_16011181250 */
if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
IS_DG2(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
+ drm_dbg_kms(display->drm,
+ "PSR2 is defeatured for this platform\n");
return false;
}
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ drm_dbg_kms(display->drm,
+ "PSR2 not completely functional in this stepping\n");
return false;
}
- if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
+ drm_dbg_kms(display->drm,
"PSR2 not supported in transcoder %s\n",
transcoder_name(crtc_state->cpu_transcoder));
return false;
@@ -1407,28 +1421,28 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable &&
- (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
- drm_dbg_kms(&dev_priv->drm,
+ (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
+ drm_dbg_kms(display->drm,
"PSR2 cannot be enabled since DSC is enabled\n");
return false;
}
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
psr_max_h = 5120;
psr_max_v = 3200;
max_bpp = 30;
- } else if (DISPLAY_VER(dev_priv) >= 10) {
+ } else if (DISPLAY_VER(display) >= 10) {
psr_max_h = 4096;
psr_max_v = 2304;
max_bpp = 24;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
psr_max_h = 3640;
psr_max_v = 2304;
max_bpp = 24;
}
if (crtc_state->pipe_bpp > max_bpp) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR2 not enabled, pipe bpp %d > max supported %d\n",
crtc_state->pipe_bpp, max_bpp);
return false;
@@ -1436,8 +1450,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
- IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
- drm_dbg_kms(&dev_priv->drm,
+ IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ drm_dbg_kms(display->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
}
@@ -1447,7 +1461,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!crtc_state->enable_psr2_sel_fetch &&
(crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
crtc_hdisplay, crtc_vdisplay,
psr_max_h, psr_max_v);
@@ -1462,18 +1476,19 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (HAS_PSR2_SEL_FETCH(dev_priv) &&
+ if (HAS_PSR2_SEL_FETCH(display) &&
!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
- !HAS_PSR_HW_TRACKING(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ !HAS_PSR_HW_TRACKING(display)) {
+ drm_dbg_kms(display->drm,
"Selective update not enabled, selective fetch not valid and no HW tracking available\n");
goto unsupported;
}
if (!psr2_global_enabled(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm, "Selective update disabled by flag\n");
+ drm_dbg_kms(display->drm,
+ "Selective update disabled by flag\n");
goto unsupported;
}
@@ -1481,23 +1496,23 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
goto unsupported;
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Selective update not enabled, SDP indication do not fit in hblank\n");
goto unsupported;
}
- if (crtc_state->has_panel_replay && (DISPLAY_VER(dev_priv) < 14 ||
+ if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
!intel_dp->psr.sink_panel_replay_su_support))
goto unsupported;
if (crtc_state->crc_enabled) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Selective update not enabled because it would inhibit pipe CRC calculation\n");
goto unsupported;
}
if (!psr2_granularity_check(intel_dp, crtc_state)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Selective update not enabled, SU granularity not compatible\n");
goto unsupported;
}
@@ -1515,7 +1530,7 @@ unsupported:
static bool _psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
int entry_setup_frames;
@@ -1534,7 +1549,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
if (entry_setup_frames >= 0) {
intel_dp->psr.entry_setup_frames = entry_setup_frames;
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR condition failed: PSR setup timing not met\n");
return false;
}
@@ -1547,7 +1562,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_hdcp *hdcp = &connector->hdcp;
@@ -1556,7 +1571,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
if (!panel_replay_global_enabled(intel_dp)) {
- drm_dbg_kms(&i915->drm, "Panel Replay disabled by flag\n");
+ drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
return false;
}
@@ -1567,7 +1582,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
/* 128b/132b Panel Replay is not supported on eDP */
if (intel_dp_is_uhbr(crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel Replay is not supported with 128b/132b\n");
return false;
}
@@ -1578,7 +1593,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
(conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel Replay is not supported with HDCP\n");
return false;
}
@@ -1586,6 +1601,12 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
if (!alpm_config_valid(intel_dp, crtc_state, true))
return false;
+ if (crtc_state->crc_enabled) {
+ drm_dbg_kms(display->drm,
+ "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
+ return false;
+ }
+
return true;
}
@@ -1593,22 +1614,22 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
if (!psr_global_enabled(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
+ drm_dbg_kms(display->drm, "PSR disabled by flag\n");
return;
}
if (intel_dp->psr.sink_not_reliable) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR sink implementation is not reliable\n");
return;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR condition failed: Interlaced mode enabled\n");
return;
}
@@ -1619,7 +1640,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
* PSR is a transcoder level feature.
*/
if (crtc_state->joiner_pipes) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR disabled due to joiner\n");
return;
}
@@ -1640,7 +1661,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
void intel_psr_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_dp *intel_dp;
@@ -1673,18 +1694,18 @@ void intel_psr_get_config(struct intel_encoder *encoder,
if (!intel_dp->psr.sel_update_enabled)
goto unlock;
- if (HAS_PSR2_SEL_FETCH(dev_priv)) {
- val = intel_de_read(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
+ if (HAS_PSR2_SEL_FETCH(display)) {
+ val = intel_de_read(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder));
if (val & PSR2_MAN_TRK_CTL_ENABLE)
pipe_config->enable_psr2_sel_fetch = true;
}
pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
- if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv,
- TRANS_EXITLINE(dev_priv, cpu_transcoder));
+ if (DISPLAY_VER(display) >= 12) {
+ val = intel_de_read(display,
+ TRANS_EXITLINE(display, cpu_transcoder));
pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
}
unlock:
@@ -1693,17 +1714,17 @@ unlock:
static void intel_psr_activate(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- drm_WARN_ON(&dev_priv->drm,
- transcoder_has_psr2(dev_priv, cpu_transcoder) &&
- intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder)) & EDP_PSR2_ENABLE);
+ drm_WARN_ON(display->drm,
+ transcoder_has_psr2(display, cpu_transcoder) &&
+ intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
- drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
+ drm_WARN_ON(display->drm, intel_dp->psr.active);
lockdep_assert_held(&intel_dp->psr.lock);
@@ -1742,30 +1763,31 @@ static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
static void wm_optimization_wa(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
bool set_wa_bit = false;
/* Wa_14015648006 */
- if (IS_DISPLAY_VER(dev_priv, 11, 14))
+ if (IS_DISPLAY_VER(display, 11, 14))
set_wa_bit |= crtc_state->wm_level_disabled;
/* Wa_16013835468 */
- if (DISPLAY_VER(dev_priv) == 12)
+ if (DISPLAY_VER(display) == 12)
set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
crtc_state->hw.adjusted_mode.crtc_vdisplay;
if (set_wa_bit)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
0, wa_16013835468_bit_get(intel_dp));
else
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
wa_16013835468_bit_get(intel_dp), 0);
}
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask = 0;
@@ -1773,7 +1795,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* Only HSW and BDW have PSR AUX registers that need to be setup.
* SKL+ use hardcoded values PSR AUX transactions
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
hsw_psr_setup_aux(intel_dp);
/*
@@ -1790,7 +1812,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* Panel Replay on DP: No bits are applicable
* Panel Replay on eDP: All bits are applicable
*/
- if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp))
+ if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
mask = EDP_PSR_DEBUG_MASK_HPD;
if (intel_dp_is_edp(intel_dp)) {
@@ -1804,17 +1826,17 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* As a workaround leave LPSP unmasked to prevent PSR entry
* when external displays are active.
*/
- if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
+ if (DISPLAY_VER(display) >= 8 || IS_HASWELL_ULT(dev_priv))
mask |= EDP_PSR_DEBUG_MASK_LPSP;
- if (DISPLAY_VER(dev_priv) < 20)
+ if (DISPLAY_VER(display) < 20)
mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
/*
* No separate pipe reg write mask on hsw/bdw, so have to unmask all
* registers in order to keep the CURSURFLIVE tricks working :(
*/
- if (IS_DISPLAY_VER(dev_priv, 9, 10))
+ if (IS_DISPLAY_VER(display, 9, 10))
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
/* allow PSR with sprite enabled */
@@ -1822,7 +1844,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
}
- intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
+ intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
psr_irq_control(intel_dp);
@@ -1831,13 +1853,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* transcoder, EXITLINE will need to be unset when disabling PSR
*/
if (intel_dp->psr.dc3co_exitline)
- intel_de_rmw(dev_priv,
- TRANS_EXITLINE(dev_priv, cpu_transcoder),
+ intel_de_rmw(display,
+ TRANS_EXITLINE(display, cpu_transcoder),
EXITLINE_MASK,
intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
- if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
+ if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
+ intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
@@ -1851,8 +1873,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
wm_optimization_wa(intel_dp, crtc_state);
if (intel_dp->psr.sel_update_enabled) {
- if (DISPLAY_VER(dev_priv) == 9)
- intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
+ if (DISPLAY_VER(display) == 9)
+ intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder), 0,
PSR2_VSC_ENABLE_PROG_HEADER |
PSR2_ADD_VERTICAL_LINE_COUNT);
@@ -1862,27 +1884,27 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* cause issues if non-supported panels are used.
*/
if (!intel_dp->psr.panel_replay_enabled &&
- (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
+ (IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) ||
IS_ALDERLAKE_P(dev_priv)))
- intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
0, ADLP_1_BASED_X_GRANULARITY);
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (!intel_dp->psr.panel_replay_enabled &&
- IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv,
- MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
+ IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0))
+ intel_de_rmw(display,
+ MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
0,
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
else if (IS_ALDERLAKE_P(dev_priv))
- intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
+ intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
CLKGATE_DIS_MISC_DMASC_GATING_DIS);
}
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val;
@@ -1897,11 +1919,11 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
* first time that PSR HW tries to activate so lets keep PSR disabled
* to avoid any rendering problems.
*/
- val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
val &= psr_irq_psr_error_bit_get(intel_dp);
if (val) {
intel_dp->psr.sink_not_reliable = true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR interruption error set, not enabling PSR\n");
return false;
}
@@ -1913,11 +1935,11 @@ no_err:
static void intel_psr_enable_locked(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val;
- drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
+ drm_WARN_ON(display->drm, intel_dp->psr.enabled);
intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
@@ -1938,9 +1960,9 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
return;
if (intel_dp->psr.panel_replay_enabled) {
- drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
+ drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
} else {
- drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
+ drm_dbg_kms(display->drm, "Enabling PSR%s\n",
intel_dp->psr.sel_update_enabled ? "2" : "1");
/*
@@ -1962,68 +1984,71 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
static void intel_psr_exit(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val;
if (!intel_dp->psr.active) {
- if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
- val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv, cpu_transcoder));
- drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
+ if (transcoder_has_psr2(display, cpu_transcoder)) {
+ val = intel_de_read(display,
+ EDP_PSR2_CTL(display, cpu_transcoder));
+ drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
}
- val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
- drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
+ val = intel_de_read(display,
+ psr_ctl_reg(display, cpu_transcoder));
+ drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
return;
}
if (intel_dp->psr.panel_replay_enabled) {
- intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
+ intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
} else if (intel_dp->psr.sel_update_enabled) {
tgl_disallow_dc3co_on_psr2_exit(intel_dp);
- val = intel_de_rmw(dev_priv,
- EDP_PSR2_CTL(dev_priv, cpu_transcoder),
+ val = intel_de_rmw(display,
+ EDP_PSR2_CTL(display, cpu_transcoder),
EDP_PSR2_ENABLE, 0);
- drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
+ drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
} else {
- val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
+ val = intel_de_rmw(display,
+ psr_ctl_reg(display, cpu_transcoder),
EDP_PSR_ENABLE, 0);
- drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
+ drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
}
intel_dp->psr.active = false;
}
static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
i915_reg_t psr_status;
u32 psr_status_mask;
if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
intel_dp->psr.panel_replay_enabled)) {
- psr_status = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
+ psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- psr_status = psr_status_reg(dev_priv, cpu_transcoder);
+ psr_status = psr_status_reg(display, cpu_transcoder);
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
}
/* Wait till PSR is idle */
- if (intel_de_wait_for_clear(dev_priv, psr_status,
+ if (intel_de_wait_for_clear(display, psr_status,
psr_status_mask, 2000))
- drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
+ drm_err(display->drm, "Timed out waiting PSR idle state\n");
}
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
lockdep_assert_held(&intel_dp->psr.lock);
@@ -2032,9 +2057,9 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
return;
if (intel_dp->psr.panel_replay_enabled)
- drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
+ drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
else
- drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
+ drm_dbg_kms(display->drm, "Disabling PSR%s\n",
intel_dp->psr.sel_update_enabled ? "2" : "1");
intel_psr_exit(intel_dp);
@@ -2044,19 +2069,19 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
* Wa_16013835468
* Wa_14015648006
*/
- if (DISPLAY_VER(dev_priv) >= 11)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ if (DISPLAY_VER(display) >= 11)
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
wa_16013835468_bit_get(intel_dp), 0);
if (intel_dp->psr.sel_update_enabled) {
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (!intel_dp->psr.panel_replay_enabled &&
- IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv,
- MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
+ IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0))
+ intel_de_rmw(display,
+ MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
else if (IS_ALDERLAKE_P(dev_priv))
- intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
+ intel_de_rmw(display, CLKGATE_DIS_MISC,
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
}
@@ -2065,12 +2090,12 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
/* Panel Replay on eDP is always using ALPM aux less. */
if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
- intel_de_rmw(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
- intel_de_rmw(dev_priv,
- PORT_ALPM_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display,
+ PORT_ALPM_CTL(display, cpu_transcoder),
PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
}
@@ -2101,12 +2126,12 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!old_crtc_state->has_psr)
return;
- if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
+ if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp)))
return;
mutex_lock(&intel_dp->psr.lock);
@@ -2126,7 +2151,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
*/
void intel_psr_pause(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
@@ -2140,7 +2165,7 @@ void intel_psr_pause(struct intel_dp *intel_dp)
}
/* If we ever hit this, we will need to add refcount to pause/resume */
- drm_WARN_ON(&dev_priv->drm, psr->paused);
+ drm_WARN_ON(display->drm, psr->paused);
intel_psr_exit(intel_dp);
intel_psr_wait_exit_locked(intel_dp);
@@ -2177,45 +2202,53 @@ unlock:
mutex_unlock(&psr->lock);
}
-static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
{
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? 0 :
PSR2_MAN_TRK_CTL_ENABLE;
}
-static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
{
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
}
-static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
{
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
}
-static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
{
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
}
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled)
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
- man_trk_ctl_enable_bit_get(dev_priv) |
- man_trk_ctl_partial_frame_bit_get(dev_priv) |
- man_trk_ctl_single_full_frame_bit_get(dev_priv) |
- man_trk_ctl_continuos_full_frame(dev_priv));
+ intel_de_write(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder),
+ man_trk_ctl_enable_bit_get(display) |
+ man_trk_ctl_partial_frame_bit_get(display) |
+ man_trk_ctl_single_full_frame_bit_get(display) |
+ man_trk_ctl_continuos_full_frame(display));
/*
* Display WA #0884: skl+
@@ -2230,20 +2263,20 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
* but testing proved that it works for up display 13, for newer
* than that testing will be needed.
*/
- intel_de_write(dev_priv, CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
+ intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
}
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_encoder *encoder;
if (!crtc_state->enable_psr2_sel_fetch)
return;
- for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
+ for_each_intel_encoder_mask_with_psr(display->drm, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2253,36 +2286,37 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
break;
}
- intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder),
crtc_state->psr2_man_track_ctl);
if (!crtc_state->enable_psr2_su_region_et)
return;
- intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
+ intel_de_write(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
crtc_state->pipe_srcsz_early_tpt);
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
bool full_update)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 val = man_trk_ctl_enable_bit_get(dev_priv);
+ u32 val = man_trk_ctl_enable_bit_get(display);
/* SF partial frame enable has to be set even on full update */
- val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
+ val |= man_trk_ctl_partial_frame_bit_get(display);
if (full_update) {
- val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
- val |= man_trk_ctl_continuos_full_frame(dev_priv);
+ val |= man_trk_ctl_single_full_frame_bit_get(display);
+ val |= man_trk_ctl_continuos_full_frame(display);
goto exit;
}
if (crtc_state->psr2_su_area.y1 == -1)
goto exit;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) {
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
} else {
@@ -2335,13 +2369,14 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u16 y_alignment;
/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
if (crtc_state->dsc.compression_enable &&
- (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
+ (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14))
y_alignment = vdsc_cfg->slice_height;
else
y_alignment = crtc_state->su_y_granularity;
@@ -2429,6 +2464,7 @@ static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *c
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane_state *new_plane_state, *old_plane_state;
@@ -2525,7 +2561,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
* calculation for those.
*/
if (crtc_state->psr2_su_area.y1 == -1) {
- drm_info_once(&dev_priv->drm,
+ drm_info_once(display->drm,
"Selective fetch area calculation failed in pipe %c\n",
pipe_name(crtc->pipe));
full_update = true;
@@ -2536,7 +2572,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
/* Wa_14014971492 */
if (!crtc_state->has_panel_replay &&
- ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
+ ((IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) ||
IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv))) &&
crtc_state->splitter.enable)
crtc_state->psr2_su_area.y1 = 0;
@@ -2622,6 +2658,7 @@ skip_sel_fetch_set_loop:
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -2629,7 +2666,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
- if (!HAS_PSR(i915))
+ if (!HAS_PSR(display))
return;
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
@@ -2670,7 +2707,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
void intel_psr_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
@@ -2686,13 +2723,14 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
mutex_lock(&psr->lock);
- drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
+ drm_WARN_ON(display->drm,
+ psr->enabled && !crtc_state->active_planes);
keep_disabled |= psr->sink_not_reliable;
keep_disabled |= !crtc_state->active_planes;
/* Display WA #1136: skl, bxt */
- keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
+ keep_disabled |= DISPLAY_VER(display) < 11 &&
crtc_state->wm_level_disabled;
if (!psr->enabled && !keep_disabled)
@@ -2717,7 +2755,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
/*
@@ -2725,14 +2763,14 @@ static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
* As all higher states has bit 4 of PSR2 state set we can just wait for
* EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
*/
- return intel_de_wait_for_clear(dev_priv,
- EDP_PSR2_STATUS(dev_priv, cpu_transcoder),
+ return intel_de_wait_for_clear(display,
+ EDP_PSR2_STATUS(display, cpu_transcoder),
EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
}
static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
/*
@@ -2741,8 +2779,8 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
* exit training time + 1.5 ms of aux channel handshake. 50 ms is
* defensive enough to cover everything.
*/
- return intel_de_wait_for_clear(dev_priv,
- psr_status_reg(dev_priv, cpu_transcoder),
+ return intel_de_wait_for_clear(display,
+ psr_status_reg(display, cpu_transcoder),
EDP_PSR_STATUS_STATE_MASK, 50);
}
@@ -2762,13 +2800,13 @@ static int _panel_replay_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
*/
void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_encoder *encoder;
if (!new_crtc_state->has_psr)
return;
- for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
+ for_each_intel_encoder_mask_with_psr(display->drm, encoder,
new_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int ret;
@@ -2786,13 +2824,14 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat
ret = _psr1_ready_for_pipe_update_locked(intel_dp);
if (ret)
- drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
+ drm_err(display->drm,
+ "PSR wait timed out, atomic update may fail\n");
}
}
static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
i915_reg_t reg;
u32 mask;
@@ -2803,18 +2842,18 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
intel_dp->psr.panel_replay_enabled)) {
- reg = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
+ reg = EDP_PSR2_STATUS(display, cpu_transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- reg = psr_status_reg(dev_priv, cpu_transcoder);
+ reg = psr_status_reg(display, cpu_transcoder);
mask = EDP_PSR_STATUS_STATE_MASK;
}
mutex_unlock(&intel_dp->psr.lock);
- err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
+ err = intel_de_wait_for_clear(display, reg, mask, 50);
if (err)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timed out waiting for PSR Idle for re-enable\n");
/* After the unlocked wait, verify that PSR is still wanted! */
@@ -2822,7 +2861,7 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
return err == 0 && intel_dp->psr.enabled;
}
-static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
+static int intel_psr_fastset_force(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct drm_modeset_acquire_ctx ctx;
@@ -2830,7 +2869,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
struct drm_connector *conn;
int err = 0;
- state = drm_atomic_state_alloc(&dev_priv->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state)
return -ENOMEM;
@@ -2840,7 +2879,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
to_intel_atomic_state(state)->internal = true;
retry:
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(conn, &conn_iter) {
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
@@ -2887,7 +2926,7 @@ retry:
int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
@@ -2898,7 +2937,7 @@ int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
I915_PSR_DEBUG_MODE_MASK) ||
mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
- drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
+ drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
return -EINVAL;
}
@@ -2923,7 +2962,7 @@ int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
mutex_unlock(&intel_dp->psr.lock);
if (old_mode != mode || old_disable_bits != disable_bits)
- ret = intel_psr_fastset_force(dev_priv);
+ ret = intel_psr_fastset_force(display);
return ret;
}
@@ -2975,7 +3014,7 @@ unlock:
static void _psr_invalidate_handle(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled) {
@@ -2983,20 +3022,20 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* Send one update otherwise lag is observed in screen */
- intel_de_write(dev_priv,
- CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
+ intel_de_write(display,
+ CURSURFLIVE(display, intel_dp->psr.pipe),
0);
return;
}
- val = man_trk_ctl_enable_bit_get(dev_priv) |
- man_trk_ctl_partial_frame_bit_get(dev_priv) |
- man_trk_ctl_continuos_full_frame(dev_priv);
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
+ val = man_trk_ctl_enable_bit_get(display) |
+ man_trk_ctl_partial_frame_bit_get(display) |
+ man_trk_ctl_continuos_full_frame(display);
+ intel_de_write(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder),
val);
- intel_de_write(dev_priv,
- CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
+ intel_de_write(display,
+ CURSURFLIVE(display, intel_dp->psr.pipe), 0);
intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
} else {
intel_psr_exit(intel_dp);
@@ -3005,7 +3044,7 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
/**
* intel_psr_invalidate - Invalidate PSR
- * @dev_priv: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the invalidate
*
@@ -3016,7 +3055,7 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
*/
-void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+void intel_psr_invalidate(struct intel_display *display,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
struct intel_encoder *encoder;
@@ -3024,7 +3063,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
if (origin == ORIGIN_FLIP)
return;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3054,7 +3093,8 @@ static void
tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
!intel_dp->psr.active)
@@ -3075,17 +3115,18 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
static void _psr_flush_handle(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled) {
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* can we turn CFF off? */
if (intel_dp->psr.busy_frontbuffer_bits == 0) {
- u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
- man_trk_ctl_partial_frame_bit_get(dev_priv) |
- man_trk_ctl_single_full_frame_bit_get(dev_priv) |
- man_trk_ctl_continuos_full_frame(dev_priv);
+ u32 val = man_trk_ctl_enable_bit_get(display) |
+ man_trk_ctl_partial_frame_bit_get(display) |
+ man_trk_ctl_single_full_frame_bit_get(display) |
+ man_trk_ctl_continuos_full_frame(display);
/*
* Set psr2_sel_fetch_cff_enabled as false to allow selective
@@ -3093,11 +3134,11 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
* SU configuration in case update is sent for any reason after
* sff bit gets cleared by the HW on next vblank.
*/
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder),
val);
- intel_de_write(dev_priv,
- CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
+ intel_de_write(display,
+ CURSURFLIVE(display, intel_dp->psr.pipe),
0);
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
}
@@ -3118,7 +3159,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
/**
* intel_psr_flush - Flush PSR
- * @dev_priv: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
@@ -3129,12 +3170,12 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
*/
-void intel_psr_flush(struct drm_i915_private *dev_priv,
+void intel_psr_flush(struct intel_display *display,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
struct intel_encoder *encoder;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3183,11 +3224,12 @@ unlock:
*/
void intel_psr_init(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
+ if (!(HAS_PSR(display) || HAS_DP20(dev_priv)))
return;
/*
@@ -3199,21 +3241,21 @@ void intel_psr_init(struct intel_dp *intel_dp)
* So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
* But GEN12 supports a instance of PSR registers per transcoder.
*/
- if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
- drm_dbg_kms(&dev_priv->drm,
+ if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
+ drm_dbg_kms(display->drm,
"PSR condition failed: Port not supported\n");
return;
}
if ((HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) ||
- DISPLAY_VER(dev_priv) >= 20)
+ DISPLAY_VER(display) >= 20)
intel_dp->psr.source_panel_replay_support = true;
- if (HAS_PSR(dev_priv) && intel_dp_is_edp(intel_dp))
+ if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
intel_dp->psr.source_support = true;
/* Set link_standby x link_off defaults */
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
/* For new platforms up to TGL let's respect VBT back again */
intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
@@ -3250,7 +3292,7 @@ static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
static void psr_alpm_check(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_dp_aux *aux = &intel_dp->aux;
struct intel_psr *psr = &intel_dp->psr;
u8 val;
@@ -3261,14 +3303,14 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
if (r != 1) {
- drm_err(&dev_priv->drm, "Error reading ALPM status\n");
+ drm_err(display->drm, "Error reading ALPM status\n");
return;
}
if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"ALPM lock timeout error, disabling PSR\n");
/* Clearing error */
@@ -3278,21 +3320,21 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
static void psr_capability_changed_check(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
u8 val;
int r;
r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
if (r != 1) {
- drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
+ drm_err(display->drm, "Error reading DP_PSR_ESI\n");
return;
}
if (val & DP_PSR_CAPS_CHANGE) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Sink PSR capability changed, disabling PSR\n");
/* Clearing it */
@@ -3309,7 +3351,7 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
*/
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
u8 status, error_status;
const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
@@ -3325,7 +3367,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
goto exit;
if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Error reading PSR status or error status\n");
goto exit;
}
@@ -3338,20 +3380,20 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
!error_status)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR sink internal error, disabling PSR\n");
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR RFB storage error, disabling PSR\n");
if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR VSC SDP uncorrectable error, disabling PSR\n");
if (error_status & DP_PSR_LINK_CRC_ERROR)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR Link CRC error, disabling PSR\n");
if (error_status & ~errors)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"PSR_ERROR_STATUS unhandled errors %x\n",
error_status & ~errors);
/* clear status register */
@@ -3390,13 +3432,13 @@ bool intel_psr_enabled(struct intel_dp *intel_dp)
*/
void intel_psr_lock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_encoder *encoder;
if (!crtc_state->has_psr)
return;
- for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
+ for_each_intel_encoder_mask_with_psr(display->drm, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3413,13 +3455,13 @@ void intel_psr_lock(const struct intel_crtc_state *crtc_state)
*/
void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_encoder *encoder;
if (!crtc_state->has_psr)
return;
- for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
+ for_each_intel_encoder_mask_with_psr(display->drm, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3431,7 +3473,7 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
static void
psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
const char *status = "unknown";
u32 val, status_val;
@@ -3451,8 +3493,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
"BUF_ON",
"TG_ON"
};
- val = intel_de_read(dev_priv,
- EDP_PSR2_STATUS(dev_priv, cpu_transcoder));
+ val = intel_de_read(display,
+ EDP_PSR2_STATUS(display, cpu_transcoder));
status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
@@ -3467,7 +3509,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
"SRDOFFACK",
"SRDENT_ON",
};
- val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
+ val = intel_de_read(display,
+ psr_status_reg(display, cpu_transcoder));
status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
@@ -3528,7 +3571,8 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp,
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
intel_wakeref_t wakeref;
@@ -3553,20 +3597,20 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
}
if (psr->panel_replay_enabled) {
- val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
+ val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
if (intel_dp_is_edp(intel_dp))
- psr2_ctl = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv,
+ psr2_ctl = intel_de_read(display,
+ EDP_PSR2_CTL(display,
cpu_transcoder));
enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
} else if (psr->sel_update_enabled) {
- val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv, cpu_transcoder));
+ val = intel_de_read(display,
+ EDP_PSR2_CTL(display, cpu_transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
- val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
@@ -3581,7 +3625,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
/*
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
- val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
seq_printf(m, "Performance counter: %u\n",
REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
@@ -3600,8 +3644,8 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
* frame boundary between register reads
*/
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = intel_de_read(dev_priv,
- PSR2_SU_STATUS(dev_priv, cpu_transcoder, frame));
+ val = intel_de_read(display,
+ PSR2_SU_STATUS(display, cpu_transcoder, frame));
su_frames_val[frame / 3] = val;
}
@@ -3629,15 +3673,15 @@ unlock:
static int i915_edp_psr_status_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
struct intel_dp *intel_dp = NULL;
struct intel_encoder *encoder;
- if (!HAS_PSR(dev_priv))
+ if (!HAS_PSR(display))
return -ENODEV;
/* Find the first EDP which supports PSR */
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
intel_dp = enc_to_intel_dp(encoder);
break;
}
@@ -3652,18 +3696,19 @@ DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
static int
i915_edp_psr_debug_set(void *data, u64 val)
{
- struct drm_i915_private *dev_priv = data;
+ struct intel_display *display = data;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
intel_wakeref_t wakeref;
int ret = -ENODEV;
- if (!HAS_PSR(dev_priv))
+ if (!HAS_PSR(display))
return ret;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+ drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
@@ -3679,13 +3724,13 @@ i915_edp_psr_debug_set(void *data, u64 val)
static int
i915_edp_psr_debug_get(void *data, u64 *val)
{
- struct drm_i915_private *dev_priv = data;
+ struct intel_display *display = data;
struct intel_encoder *encoder;
- if (!HAS_PSR(dev_priv))
+ if (!HAS_PSR(display))
return -ENODEV;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
// TODO: split to each transcoder's PSR debug state
@@ -3700,15 +3745,15 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
i915_edp_psr_debug_get, i915_edp_psr_debug_set,
"%llu\n");
-void intel_psr_debugfs_register(struct drm_i915_private *i915)
+void intel_psr_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
- i915, &i915_edp_psr_debug_fops);
+ display, &i915_edp_psr_debug_fops);
debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
- i915, &i915_edp_psr_status_fops);
+ display, &i915_edp_psr_status_fops);
}
static const char *psr_mode_str(struct intel_dp *intel_dp)
@@ -3789,6 +3834,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
void intel_psr_connector_debugfs_add(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct dentry *root = connector->base.debugfs_entry;
@@ -3801,7 +3847,7 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
debugfs_create_file("i915_psr_sink_status", 0444, root,
connector, &i915_psr_sink_status_fops);
- if (HAS_PSR(i915) || HAS_DP20(i915))
+ if (HAS_PSR(display) || HAS_DP20(i915))
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index d483c85870e1..4e09c10908e4 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -11,11 +11,11 @@
enum fb_op_origin;
struct drm_connector;
struct drm_connector_state;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
struct intel_plane;
@@ -35,10 +35,10 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);
int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value);
-void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+void intel_psr_invalidate(struct intel_display *display,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
-void intel_psr_flush(struct drm_i915_private *dev_priv,
+void intel_psr_flush(struct intel_display *display,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
void intel_psr_init(struct intel_dp *intel_dp);
@@ -60,6 +60,6 @@ void intel_psr_resume(struct intel_dp *intel_dp);
void intel_psr_lock(const struct intel_crtc_state *crtc_state);
void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
-void intel_psr_debugfs_register(struct drm_i915_private *i915);
+void intel_psr_debugfs_register(struct intel_display *display);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index dfd8b4960e6d..29b56d53a340 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -282,7 +282,7 @@ void intel_init_dpcd_quirks(struct intel_dp *intel_dp,
!memcmp(q->sink_oui, ident->oui, sizeof(ident->oui)) &&
(!memcmp(q->sink_device_id, ident->device_id,
sizeof(ident->device_id)) ||
- !memchr_inv(q->sink_device_id, 0, sizeof(q->sink_device_id))))
+ mem_is_zero(q->sink_device_id, sizeof(q->sink_device_id))))
q->hook(intel_dp);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index d0d712405129..7cc519b402e9 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -95,7 +95,7 @@ struct intel_sdvo {
struct intel_encoder base;
struct i2c_adapter *i2c;
- u8 slave_addr;
+ u8 target_addr;
struct intel_sdvo_ddc ddc[3];
@@ -255,13 +255,13 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct i2c_msg msgs[] = {
{
- .addr = intel_sdvo->slave_addr,
+ .addr = intel_sdvo->target_addr,
.flags = 0,
.len = 1,
.buf = &addr,
},
{
- .addr = intel_sdvo->slave_addr,
+ .addr = intel_sdvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = ch,
@@ -483,14 +483,14 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
- msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].addr = intel_sdvo->target_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2 *i;
buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
buf[2*i + 1] = ((u8*)args)[i];
}
- msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].addr = intel_sdvo->target_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2*i;
@@ -499,12 +499,12 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
/* the following two are to read the response */
status = SDVO_I2C_CMD_STATUS;
- msgs[i+1].addr = intel_sdvo->slave_addr;
+ msgs[i+1].addr = intel_sdvo->target_addr;
msgs[i+1].flags = 0;
msgs[i+1].len = 1;
msgs[i+1].buf = &status;
- msgs[i+2].addr = intel_sdvo->slave_addr;
+ msgs[i+2].addr = intel_sdvo->target_addr;
msgs[i+2].flags = I2C_M_RD;
msgs[i+2].len = 1;
msgs[i+2].buf = &status;
@@ -2652,9 +2652,9 @@ intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo)
else
pin = GMBUS_PIN_DPB;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] I2C pin %d, slave addr 0x%x\n",
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] I2C pin %d, target addr 0x%x\n",
sdvo->base.base.base.id, sdvo->base.base.name,
- pin, sdvo->slave_addr);
+ pin, sdvo->target_addr);
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
@@ -2680,7 +2680,7 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo)
}
static u8
-intel_sdvo_get_slave_addr(struct intel_sdvo *sdvo)
+intel_sdvo_get_target_addr(struct intel_sdvo *sdvo)
{
struct drm_i915_private *dev_priv = to_i915(sdvo->base.base.dev);
const struct sdvo_device_mapping *my_mapping, *other_mapping;
@@ -2694,15 +2694,15 @@ intel_sdvo_get_slave_addr(struct intel_sdvo *sdvo)
}
/* If the BIOS described our SDVO device, take advantage of it. */
- if (my_mapping->slave_addr)
- return my_mapping->slave_addr;
+ if (my_mapping->target_addr)
+ return my_mapping->target_addr;
/*
* If the BIOS only described a different SDVO device, use the
* address that it isn't using.
*/
- if (other_mapping->slave_addr) {
- if (other_mapping->slave_addr == 0x70)
+ if (other_mapping->target_addr) {
+ if (other_mapping->target_addr == 0x70)
return 0x72;
else
return 0x70;
@@ -2919,6 +2919,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type)
static bool
intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
{
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_i915_private *i915 = to_i915(encoder->dev);
struct drm_connector *connector;
@@ -2946,7 +2947,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
- intel_bios_init_panel_late(i915, &intel_connector->panel, NULL, NULL);
+ intel_bios_init_panel_late(display, &intel_connector->panel, NULL, NULL);
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
@@ -3405,7 +3406,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
"SDVO %c", port_name(port));
intel_sdvo->sdvo_reg = sdvo_reg;
- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(intel_sdvo) >> 1;
+ intel_sdvo->target_addr = intel_sdvo_get_target_addr(intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(intel_sdvo);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index f8cceb3e5d8e..e657b09ede99 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -48,9 +48,9 @@
#include "intel_sprite.h"
#include "intel_sprite_regs.h"
-static char sprite_name(struct drm_i915_private *i915, enum pipe pipe, int sprite)
+static char sprite_name(struct intel_display *display, enum pipe pipe, int sprite)
{
- return pipe * DISPLAY_RUNTIME_INFO(i915)->num_sprites[pipe] + sprite + 'A';
+ return pipe * DISPLAY_RUNTIME_INFO(display)->num_sprites[pipe] + sprite + 'A';
}
static void i9xx_plane_linear_gamma(u16 gamma[8])
@@ -67,7 +67,7 @@ static void
chv_sprite_update_csc(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id plane_id = plane->id;
/*
@@ -100,35 +100,35 @@ chv_sprite_update_csc(const struct intel_plane_state *plane_state)
if (!fb->format->is_yuv)
return;
- intel_de_write_fw(dev_priv, SPCSCYGOFF(plane_id),
+ intel_de_write_fw(display, SPCSCYGOFF(plane_id),
SPCSC_OOFF(0) | SPCSC_IOFF(0));
- intel_de_write_fw(dev_priv, SPCSCCBOFF(plane_id),
+ intel_de_write_fw(display, SPCSCCBOFF(plane_id),
SPCSC_OOFF(0) | SPCSC_IOFF(0));
- intel_de_write_fw(dev_priv, SPCSCCROFF(plane_id),
+ intel_de_write_fw(display, SPCSCCROFF(plane_id),
SPCSC_OOFF(0) | SPCSC_IOFF(0));
- intel_de_write_fw(dev_priv, SPCSCC01(plane_id),
+ intel_de_write_fw(display, SPCSCC01(plane_id),
SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
- intel_de_write_fw(dev_priv, SPCSCC23(plane_id),
+ intel_de_write_fw(display, SPCSCC23(plane_id),
SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
- intel_de_write_fw(dev_priv, SPCSCC45(plane_id),
+ intel_de_write_fw(display, SPCSCC45(plane_id),
SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
- intel_de_write_fw(dev_priv, SPCSCC67(plane_id),
+ intel_de_write_fw(display, SPCSCC67(plane_id),
SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
- intel_de_write_fw(dev_priv, SPCSCC8(plane_id), SPCSC_C0(csc[8]));
+ intel_de_write_fw(display, SPCSCC8(plane_id), SPCSC_C0(csc[8]));
- intel_de_write_fw(dev_priv, SPCSCYGICLAMP(plane_id),
+ intel_de_write_fw(display, SPCSCYGICLAMP(plane_id),
SPCSC_IMAX(1023) | SPCSC_IMIN(0));
- intel_de_write_fw(dev_priv, SPCSCCBICLAMP(plane_id),
+ intel_de_write_fw(display, SPCSCCBICLAMP(plane_id),
SPCSC_IMAX(512) | SPCSC_IMIN(-512));
- intel_de_write_fw(dev_priv, SPCSCCRICLAMP(plane_id),
+ intel_de_write_fw(display, SPCSCCRICLAMP(plane_id),
SPCSC_IMAX(512) | SPCSC_IMIN(-512));
- intel_de_write_fw(dev_priv, SPCSCYGOCLAMP(plane_id),
+ intel_de_write_fw(display, SPCSCYGOCLAMP(plane_id),
SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- intel_de_write_fw(dev_priv, SPCSCCBOCLAMP(plane_id),
+ intel_de_write_fw(display, SPCSCCBOCLAMP(plane_id),
SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- intel_de_write_fw(dev_priv, SPCSCCROCLAMP(plane_id),
+ intel_de_write_fw(display, SPCSCCROCLAMP(plane_id),
SPCSC_OMAX(1023) | SPCSC_OMIN(0));
}
@@ -139,7 +139,7 @@ static void
vlv_sprite_update_clrc(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
@@ -168,9 +168,9 @@ vlv_sprite_update_clrc(const struct intel_plane_state *plane_state)
}
/* FIXME these register are single buffered :( */
- intel_de_write_fw(dev_priv, SPCLRC0(pipe, plane_id),
+ intel_de_write_fw(display, SPCLRC0(pipe, plane_id),
SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
- intel_de_write_fw(dev_priv, SPCLRC1(pipe, plane_id),
+ intel_de_write_fw(display, SPCLRC1(pipe, plane_id),
SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
}
@@ -357,7 +357,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
@@ -373,7 +373,7 @@ static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
/* The two end points are implicit (0.0 and 1.0) */
for (i = 1; i < 8 - 1; i++)
- intel_de_write_fw(dev_priv, SPGAMC(pipe, plane_id, i - 1),
+ intel_de_write_fw(display, SPGAMC(pipe, plane_id, i - 1),
gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
}
@@ -382,7 +382,7 @@ vlv_sprite_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
int crtc_x = plane_state->uapi.dst.x1;
@@ -390,11 +390,11 @@ vlv_sprite_update_noarm(struct intel_plane *plane,
u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
- intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id),
+ intel_de_write_fw(display, SPSTRIDE(pipe, plane_id),
plane_state->view.color_plane[0].mapping_stride);
- intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id),
+ intel_de_write_fw(display, SPPOS(pipe, plane_id),
SP_POS_Y(crtc_y) | SP_POS_X(crtc_x));
- intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id),
+ intel_de_write_fw(display, SPSIZE(pipe, plane_id),
SP_HEIGHT(crtc_h - 1) | SP_WIDTH(crtc_w - 1));
}
@@ -403,6 +403,7 @@ vlv_sprite_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
@@ -420,18 +421,18 @@ vlv_sprite_update_arm(struct intel_plane *plane,
chv_sprite_update_csc(plane_state);
if (key->flags) {
- intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id),
+ intel_de_write_fw(display, SPKEYMINVAL(pipe, plane_id),
key->min_value);
- intel_de_write_fw(dev_priv, SPKEYMSK(pipe, plane_id),
+ intel_de_write_fw(display, SPKEYMSK(pipe, plane_id),
key->channel_mask);
- intel_de_write_fw(dev_priv, SPKEYMAXVAL(pipe, plane_id),
+ intel_de_write_fw(display, SPKEYMAXVAL(pipe, plane_id),
key->max_value);
}
- intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0);
+ intel_de_write_fw(display, SPCONSTALPHA(pipe, plane_id), 0);
- intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset);
- intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id),
+ intel_de_write_fw(display, SPLINOFF(pipe, plane_id), linear_offset);
+ intel_de_write_fw(display, SPTILEOFF(pipe, plane_id),
SP_OFFSET_Y(y) | SP_OFFSET_X(x));
/*
@@ -439,8 +440,8 @@ vlv_sprite_update_arm(struct intel_plane *plane,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), sprctl);
- intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id),
+ intel_de_write_fw(display, SPCNTR(pipe, plane_id), sprctl);
+ intel_de_write_fw(display, SPSURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
vlv_sprite_update_clrc(plane_state);
@@ -451,18 +452,19 @@ static void
vlv_sprite_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
- intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), 0);
- intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), 0);
+ intel_de_write_fw(display, SPCNTR(pipe, plane_id), 0);
+ intel_de_write_fw(display, SPSURF(pipe, plane_id), 0);
}
static bool
vlv_sprite_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
@@ -474,7 +476,7 @@ vlv_sprite_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = intel_de_read(dev_priv, SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+ ret = intel_de_read(display, SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
*pipe = plane->pipe;
@@ -766,7 +768,7 @@ static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state,
static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
u16 gamma[18];
int i;
@@ -778,17 +780,17 @@ static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
- intel_de_write_fw(dev_priv, SPRGAMC(pipe, i),
+ intel_de_write_fw(display, SPRGAMC(pipe, i),
gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
- intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 0), gamma[i]);
- intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 1), gamma[i]);
- intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 2), gamma[i]);
+ intel_de_write_fw(display, SPRGAMC16(pipe, 0), gamma[i]);
+ intel_de_write_fw(display, SPRGAMC16(pipe, 1), gamma[i]);
+ intel_de_write_fw(display, SPRGAMC16(pipe, 2), gamma[i]);
i++;
- intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 0), gamma[i]);
- intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 1), gamma[i]);
- intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 2), gamma[i]);
+ intel_de_write_fw(display, SPRGAMC17(pipe, 0), gamma[i]);
+ intel_de_write_fw(display, SPRGAMC17(pipe, 1), gamma[i]);
+ intel_de_write_fw(display, SPRGAMC17(pipe, 2), gamma[i]);
i++;
}
@@ -797,6 +799,7 @@ ivb_sprite_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
int crtc_x = plane_state->uapi.dst.x1;
@@ -812,14 +815,14 @@ ivb_sprite_update_noarm(struct intel_plane *plane,
SPRITE_SRC_WIDTH(src_w - 1) |
SPRITE_SRC_HEIGHT(src_h - 1);
- intel_de_write_fw(dev_priv, SPRSTRIDE(pipe),
+ intel_de_write_fw(display, SPRSTRIDE(pipe),
plane_state->view.color_plane[0].mapping_stride);
- intel_de_write_fw(dev_priv, SPRPOS(pipe),
+ intel_de_write_fw(display, SPRPOS(pipe),
SPRITE_POS_Y(crtc_y) | SPRITE_POS_X(crtc_x));
- intel_de_write_fw(dev_priv, SPRSIZE(pipe),
+ intel_de_write_fw(display, SPRSIZE(pipe),
SPRITE_HEIGHT(crtc_h - 1) | SPRITE_WIDTH(crtc_w - 1));
if (IS_IVYBRIDGE(dev_priv))
- intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale);
+ intel_de_write_fw(display, SPRSCALE(pipe), sprscale);
}
static void
@@ -827,6 +830,7 @@ ivb_sprite_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
@@ -840,20 +844,20 @@ ivb_sprite_update_arm(struct intel_plane *plane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
if (key->flags) {
- intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value);
- intel_de_write_fw(dev_priv, SPRKEYMSK(pipe),
+ intel_de_write_fw(display, SPRKEYVAL(pipe), key->min_value);
+ intel_de_write_fw(display, SPRKEYMSK(pipe),
key->channel_mask);
- intel_de_write_fw(dev_priv, SPRKEYMAX(pipe), key->max_value);
+ intel_de_write_fw(display, SPRKEYMAX(pipe), key->max_value);
}
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- intel_de_write_fw(dev_priv, SPROFFSET(pipe),
+ intel_de_write_fw(display, SPROFFSET(pipe),
SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x));
} else {
- intel_de_write_fw(dev_priv, SPRLINOFF(pipe), linear_offset);
- intel_de_write_fw(dev_priv, SPRTILEOFF(pipe),
+ intel_de_write_fw(display, SPRLINOFF(pipe), linear_offset);
+ intel_de_write_fw(display, SPRTILEOFF(pipe),
SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x));
}
@@ -862,8 +866,8 @@ ivb_sprite_update_arm(struct intel_plane *plane,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- intel_de_write_fw(dev_priv, SPRCTL(pipe), sprctl);
- intel_de_write_fw(dev_priv, SPRSURF(pipe),
+ intel_de_write_fw(display, SPRCTL(pipe), sprctl);
+ intel_de_write_fw(display, SPRSURF(pipe),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
ivb_sprite_update_gamma(plane_state);
@@ -873,20 +877,22 @@ static void
ivb_sprite_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- intel_de_write_fw(dev_priv, SPRCTL(pipe), 0);
+ intel_de_write_fw(display, SPRCTL(pipe), 0);
/* Disable the scaler */
if (IS_IVYBRIDGE(dev_priv))
- intel_de_write_fw(dev_priv, SPRSCALE(pipe), 0);
- intel_de_write_fw(dev_priv, SPRSURF(pipe), 0);
+ intel_de_write_fw(display, SPRSCALE(pipe), 0);
+ intel_de_write_fw(display, SPRSURF(pipe), 0);
}
static bool
ivb_sprite_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -897,7 +903,7 @@ ivb_sprite_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = intel_de_read(dev_priv, SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+ ret = intel_de_read(display, SPRCTL(plane->pipe)) & SPRITE_ENABLE;
*pipe = plane->pipe;
@@ -1073,7 +1079,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
u16 gamma[8];
@@ -1088,7 +1094,7 @@ static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
/* The two end points are implicit (0.0 and 1.0) */
for (i = 1; i < 8 - 1; i++)
- intel_de_write_fw(dev_priv, DVSGAMC_G4X(pipe, i - 1),
+ intel_de_write_fw(display, DVSGAMC_G4X(pipe, i - 1),
gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
}
@@ -1103,7 +1109,7 @@ static void ilk_sprite_linear_gamma(u16 gamma[17])
static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
u16 gamma[17];
@@ -1117,12 +1123,12 @@ static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
- intel_de_write_fw(dev_priv, DVSGAMC_ILK(pipe, i),
+ intel_de_write_fw(display, DVSGAMC_ILK(pipe, i),
gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
- intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
- intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
- intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
+ intel_de_write_fw(display, DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
+ intel_de_write_fw(display, DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
+ intel_de_write_fw(display, DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
i++;
}
@@ -1131,7 +1137,7 @@ g4x_sprite_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
@@ -1146,13 +1152,13 @@ g4x_sprite_update_noarm(struct intel_plane *plane,
DVS_SRC_WIDTH(src_w - 1) |
DVS_SRC_HEIGHT(src_h - 1);
- intel_de_write_fw(dev_priv, DVSSTRIDE(pipe),
+ intel_de_write_fw(display, DVSSTRIDE(pipe),
plane_state->view.color_plane[0].mapping_stride);
- intel_de_write_fw(dev_priv, DVSPOS(pipe),
+ intel_de_write_fw(display, DVSPOS(pipe),
DVS_POS_Y(crtc_y) | DVS_POS_X(crtc_x));
- intel_de_write_fw(dev_priv, DVSSIZE(pipe),
+ intel_de_write_fw(display, DVSSIZE(pipe),
DVS_HEIGHT(crtc_h - 1) | DVS_WIDTH(crtc_w - 1));
- intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale);
+ intel_de_write_fw(display, DVSSCALE(pipe), dvsscale);
}
static void
@@ -1160,6 +1166,7 @@ g4x_sprite_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
@@ -1173,14 +1180,14 @@ g4x_sprite_update_arm(struct intel_plane *plane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
if (key->flags) {
- intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value);
- intel_de_write_fw(dev_priv, DVSKEYMSK(pipe),
+ intel_de_write_fw(display, DVSKEYVAL(pipe), key->min_value);
+ intel_de_write_fw(display, DVSKEYMSK(pipe),
key->channel_mask);
- intel_de_write_fw(dev_priv, DVSKEYMAX(pipe), key->max_value);
+ intel_de_write_fw(display, DVSKEYMAX(pipe), key->max_value);
}
- intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset);
- intel_de_write_fw(dev_priv, DVSTILEOFF(pipe),
+ intel_de_write_fw(display, DVSLINOFF(pipe), linear_offset);
+ intel_de_write_fw(display, DVSTILEOFF(pipe),
DVS_OFFSET_Y(y) | DVS_OFFSET_X(x));
/*
@@ -1188,8 +1195,8 @@ g4x_sprite_update_arm(struct intel_plane *plane,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- intel_de_write_fw(dev_priv, DVSCNTR(pipe), dvscntr);
- intel_de_write_fw(dev_priv, DVSSURF(pipe),
+ intel_de_write_fw(display, DVSCNTR(pipe), dvscntr);
+ intel_de_write_fw(display, DVSSURF(pipe),
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
if (IS_G4X(dev_priv))
@@ -1202,19 +1209,20 @@ static void
g4x_sprite_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
- intel_de_write_fw(dev_priv, DVSCNTR(pipe), 0);
+ intel_de_write_fw(display, DVSCNTR(pipe), 0);
/* Disable the scaler */
- intel_de_write_fw(dev_priv, DVSSCALE(pipe), 0);
- intel_de_write_fw(dev_priv, DVSSURF(pipe), 0);
+ intel_de_write_fw(display, DVSSCALE(pipe), 0);
+ intel_de_write_fw(display, DVSSURF(pipe), 0);
}
static bool
g4x_sprite_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -1225,7 +1233,7 @@ g4x_sprite_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = intel_de_read(dev_priv, DVSCNTR(plane->pipe)) & DVS_ENABLE;
+ ret = intel_de_read(display, DVSCNTR(plane->pipe)) & DVS_ENABLE;
*pipe = plane->pipe;
@@ -1255,7 +1263,7 @@ static int
g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
const struct drm_rect *src = &plane_state->uapi.src;
const struct drm_rect *dst = &plane_state->uapi.dst;
@@ -1281,7 +1289,8 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (src_h & 1) {
- drm_dbg_kms(&i915->drm, "Source height must be even with interlaced modes\n");
+ drm_dbg_kms(display->drm,
+ "Source height must be even with interlaced modes\n");
return -EINVAL;
}
min_height = 6;
@@ -1293,19 +1302,22 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
if (src_w < min_width || src_h < min_height ||
src_w > 2048 || src_h > 2048) {
- drm_dbg_kms(&i915->drm, "Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n",
+ drm_dbg_kms(display->drm,
+ "Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n",
src_w, src_h, min_width, min_height, 2048, 2048);
return -EINVAL;
}
if (width_bytes > 4096) {
- drm_dbg_kms(&i915->drm, "Fetch width (%d) exceeds hardware max with scaling (%u)\n",
+ drm_dbg_kms(display->drm,
+ "Fetch width (%d) exceeds hardware max with scaling (%u)\n",
width_bytes, 4096);
return -EINVAL;
}
if (stride > 4096) {
- drm_dbg_kms(&i915->drm, "Stride (%u) exceeds hardware max with scaling (%u)\n",
+ drm_dbg_kms(display->drm,
+ "Stride (%u) exceeds hardware max with scaling (%u)\n",
stride, 4096);
return -EINVAL;
}
@@ -1317,6 +1329,7 @@ static int
g4x_sprite_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int min_scale = DRM_PLANE_NO_SCALING;
@@ -1324,7 +1337,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
int ret;
if (g4x_fb_scalable(plane_state->hw.fb)) {
- if (DISPLAY_VER(dev_priv) < 7) {
+ if (DISPLAY_VER(display) < 7) {
min_scale = 1;
max_scale = 16 << 16;
} else if (IS_IVYBRIDGE(dev_priv)) {
@@ -1353,7 +1366,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (DISPLAY_VER(dev_priv) >= 7)
+ if (DISPLAY_VER(display) >= 7)
plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
else
plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state);
@@ -1364,6 +1377,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
unsigned int rotation = plane_state->hw.rotation;
@@ -1371,7 +1385,7 @@ int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
if (IS_CHERRYVIEW(dev_priv) &&
rotation & DRM_MODE_ROTATE_180 &&
rotation & DRM_MODE_REFLECT_X) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Cannot rotate and reflect at the same time\n");
return -EINVAL;
}
@@ -1573,6 +1587,7 @@ struct intel_plane *
intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int sprite)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
unsigned int supported_rotations;
@@ -1604,7 +1619,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
}
plane_funcs = &vlv_sprite_funcs;
- } else if (DISPLAY_VER(dev_priv) >= 7) {
+ } else if (DISPLAY_VER(display) >= 7) {
plane->update_noarm = ivb_sprite_update_noarm;
plane->update_arm = ivb_sprite_update_arm;
plane->disable_arm = ivb_sprite_disable_arm;
@@ -1663,11 +1678,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X);
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ ret = drm_universal_plane_init(display->drm, &plane->base,
0, plane_funcs,
formats, num_formats, modifiers,
DRM_PLANE_TYPE_OVERLAY,
- "sprite %c", sprite_name(dev_priv, pipe, sprite));
+ "sprite %c", sprite_name(display, pipe, sprite));
kfree(modifiers);
if (ret)
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 9887967b2ca5..6f2ee7dbc43b 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -393,6 +393,9 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
u32 val;
+ if (DISPLAY_VER(i915) >= 14)
+ return;
+
drm_WARN_ON(&i915->drm,
lane_reversal && tc->mode != TC_PORT_LEGACY);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 9df0f1263913..581844d1db9a 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -914,8 +914,8 @@ static struct intel_tv *intel_attached_tv(struct intel_connector *connector)
static bool
intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 tmp = intel_de_read(dev_priv, TV_CTL);
+ struct intel_display *display = to_intel_display(encoder);
+ u32 tmp = intel_de_read(display, TV_CTL);
*pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
@@ -928,13 +928,12 @@ intel_enable_tv(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(state);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc));
- intel_de_rmw(dev_priv, TV_CTL, 0, TV_ENC_ENABLE);
+ intel_de_rmw(display, TV_CTL, 0, TV_ENC_ENABLE);
}
static void
@@ -943,10 +942,9 @@ intel_disable_tv(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(state);
- intel_de_rmw(dev_priv, TV_CTL, TV_ENC_ENABLE, 0);
+ intel_de_rmw(display, TV_CTL, TV_ENC_ENABLE, 0);
}
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
@@ -960,9 +958,10 @@ static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *i915 = to_i915(connector->dev);
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
- int max_dotclk = i915->display.cdclk.max_dotclk_freq;
+ int max_dotclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
status = intel_cpu_transcoder_mode_valid(i915, mode);
@@ -1092,6 +1091,7 @@ static void
intel_tv_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
@@ -1104,11 +1104,11 @@ intel_tv_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT);
- tv_ctl = intel_de_read(dev_priv, TV_CTL);
- hctl1 = intel_de_read(dev_priv, TV_H_CTL_1);
- hctl3 = intel_de_read(dev_priv, TV_H_CTL_3);
- vctl1 = intel_de_read(dev_priv, TV_V_CTL_1);
- vctl2 = intel_de_read(dev_priv, TV_V_CTL_2);
+ tv_ctl = intel_de_read(display, TV_CTL);
+ hctl1 = intel_de_read(display, TV_H_CTL_1);
+ hctl3 = intel_de_read(display, TV_H_CTL_3);
+ vctl1 = intel_de_read(display, TV_V_CTL_1);
+ vctl2 = intel_de_read(display, TV_V_CTL_2);
tv_mode.htotal = (hctl1 & TV_HTOTAL_MASK) >> TV_HTOTAL_SHIFT;
tv_mode.hsync_end = (hctl1 & TV_HSYNC_END_MASK) >> TV_HSYNC_END_SHIFT;
@@ -1143,17 +1143,17 @@ intel_tv_get_config(struct intel_encoder *encoder,
break;
}
- tmp = intel_de_read(dev_priv, TV_WIN_POS);
+ tmp = intel_de_read(display, TV_WIN_POS);
xpos = tmp >> 16;
ypos = tmp & 0xffff;
- tmp = intel_de_read(dev_priv, TV_WIN_SIZE);
+ tmp = intel_de_read(display, TV_WIN_SIZE);
xsize = tmp >> 16;
ysize = tmp & 0xffff;
intel_tv_mode_to_mode(&mode, &tv_mode, pipe_config->port_clock);
- drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
+ drm_dbg_kms(display->drm, "TV mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&mode));
intel_tv_scale_mode_horiz(&mode, hdisplay,
@@ -1171,10 +1171,10 @@ intel_tv_get_config(struct intel_encoder *encoder,
I915_MODE_FLAG_USE_SCANLINE_COUNTER;
}
-static bool intel_tv_source_too_wide(struct drm_i915_private *dev_priv,
+static bool intel_tv_source_too_wide(struct intel_display *display,
int hdisplay)
{
- return DISPLAY_VER(dev_priv) == 3 && hdisplay > 1024;
+ return DISPLAY_VER(display) == 3 && hdisplay > 1024;
}
static bool intel_tv_vert_scaling(const struct drm_display_mode *tv_mode,
@@ -1192,6 +1192,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state =
to_intel_atomic_state(pipe_config->uapi.state);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
@@ -1214,7 +1215,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- drm_dbg_kms(&dev_priv->drm, "forcing bpc to 8 for TV\n");
+ drm_dbg_kms(display->drm, "forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
pipe_config->port_clock = tv_mode->clock;
@@ -1228,14 +1229,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
intel_tv_mode_to_mode(adjusted_mode, tv_mode, pipe_config->port_clock);
drm_mode_set_crtcinfo(adjusted_mode, 0);
- if (intel_tv_source_too_wide(dev_priv, hdisplay) ||
+ if (intel_tv_source_too_wide(display, hdisplay) ||
!intel_tv_vert_scaling(adjusted_mode, conn_state, vdisplay)) {
int extra, top, bottom;
extra = adjusted_mode->crtc_vdisplay - vdisplay;
if (extra < 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"No vertical scaling for >1024 pixel wide modes\n");
return -EINVAL;
}
@@ -1269,7 +1270,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
tv_conn_state->bypass_vfilter = false;
}
- drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
+ drm_dbg_kms(display->drm, "TV mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(adjusted_mode));
/*
@@ -1355,7 +1356,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
}
static void
-set_tv_mode_timings(struct drm_i915_private *dev_priv,
+set_tv_mode_timings(struct intel_display *display,
const struct tv_mode *tv_mode,
bool burst_ena)
{
@@ -1401,32 +1402,32 @@ set_tv_mode_timings(struct drm_i915_private *dev_priv,
vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
(tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
- intel_de_write(dev_priv, TV_H_CTL_1, hctl1);
- intel_de_write(dev_priv, TV_H_CTL_2, hctl2);
- intel_de_write(dev_priv, TV_H_CTL_3, hctl3);
- intel_de_write(dev_priv, TV_V_CTL_1, vctl1);
- intel_de_write(dev_priv, TV_V_CTL_2, vctl2);
- intel_de_write(dev_priv, TV_V_CTL_3, vctl3);
- intel_de_write(dev_priv, TV_V_CTL_4, vctl4);
- intel_de_write(dev_priv, TV_V_CTL_5, vctl5);
- intel_de_write(dev_priv, TV_V_CTL_6, vctl6);
- intel_de_write(dev_priv, TV_V_CTL_7, vctl7);
+ intel_de_write(display, TV_H_CTL_1, hctl1);
+ intel_de_write(display, TV_H_CTL_2, hctl2);
+ intel_de_write(display, TV_H_CTL_3, hctl3);
+ intel_de_write(display, TV_V_CTL_1, vctl1);
+ intel_de_write(display, TV_V_CTL_2, vctl2);
+ intel_de_write(display, TV_V_CTL_3, vctl3);
+ intel_de_write(display, TV_V_CTL_4, vctl4);
+ intel_de_write(display, TV_V_CTL_5, vctl5);
+ intel_de_write(display, TV_V_CTL_6, vctl6);
+ intel_de_write(display, TV_V_CTL_7, vctl7);
}
-static void set_color_conversion(struct drm_i915_private *dev_priv,
+static void set_color_conversion(struct intel_display *display,
const struct color_conversion *color_conversion)
{
- intel_de_write(dev_priv, TV_CSC_Y,
+ intel_de_write(display, TV_CSC_Y,
(color_conversion->ry << 16) | color_conversion->gy);
- intel_de_write(dev_priv, TV_CSC_Y2,
+ intel_de_write(display, TV_CSC_Y2,
(color_conversion->by << 16) | color_conversion->ay);
- intel_de_write(dev_priv, TV_CSC_U,
+ intel_de_write(display, TV_CSC_U,
(color_conversion->ru << 16) | color_conversion->gu);
- intel_de_write(dev_priv, TV_CSC_U2,
+ intel_de_write(display, TV_CSC_U2,
(color_conversion->bu << 16) | color_conversion->au);
- intel_de_write(dev_priv, TV_CSC_V,
+ intel_de_write(display, TV_CSC_V,
(color_conversion->rv << 16) | color_conversion->gv);
- intel_de_write(dev_priv, TV_CSC_V2,
+ intel_de_write(display, TV_CSC_V2,
(color_conversion->bv << 16) | color_conversion->av);
}
@@ -1435,6 +1436,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
@@ -1450,7 +1452,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
int xpos, ypos;
unsigned int xsize, ysize;
- tv_ctl = intel_de_read(dev_priv, TV_CTL);
+ tv_ctl = intel_de_read(display, TV_CTL);
tv_ctl &= TV_CTL_SAVE;
switch (intel_tv->type) {
@@ -1525,21 +1527,21 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
if (IS_I915GM(dev_priv))
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
- set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
+ set_tv_mode_timings(display, tv_mode, burst_ena);
- intel_de_write(dev_priv, TV_SC_CTL_1, scctl1);
- intel_de_write(dev_priv, TV_SC_CTL_2, scctl2);
- intel_de_write(dev_priv, TV_SC_CTL_3, scctl3);
+ intel_de_write(display, TV_SC_CTL_1, scctl1);
+ intel_de_write(display, TV_SC_CTL_2, scctl2);
+ intel_de_write(display, TV_SC_CTL_3, scctl3);
- set_color_conversion(dev_priv, color_conversion);
+ set_color_conversion(display, color_conversion);
- if (DISPLAY_VER(dev_priv) >= 4)
- intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00404000);
+ if (DISPLAY_VER(display) >= 4)
+ intel_de_write(display, TV_CLR_KNOBS, 0x00404000);
else
- intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00606000);
+ intel_de_write(display, TV_CLR_KNOBS, 0x00606000);
if (video_levels)
- intel_de_write(dev_priv, TV_CLR_LEVEL,
+ intel_de_write(display, TV_CLR_LEVEL,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder);
@@ -1548,7 +1550,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
tv_filter_ctl = TV_AUTO_SCALE;
if (tv_conn_state->bypass_vfilter)
tv_filter_ctl |= TV_V_FILTER_BYPASS;
- intel_de_write(dev_priv, TV_FILTER_CTL_1, tv_filter_ctl);
+ intel_de_write(display, TV_FILTER_CTL_1, tv_filter_ctl);
xsize = tv_mode->hblank_start - tv_mode->hblank_end;
ysize = intel_tv_mode_vdisplay(tv_mode);
@@ -1559,31 +1561,32 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
conn_state->tv.margins.right);
ysize -= (tv_conn_state->margins.top +
tv_conn_state->margins.bottom);
- intel_de_write(dev_priv, TV_WIN_POS, (xpos << 16) | ypos);
- intel_de_write(dev_priv, TV_WIN_SIZE, (xsize << 16) | ysize);
+ intel_de_write(display, TV_WIN_POS, (xpos << 16) | ypos);
+ intel_de_write(display, TV_WIN_SIZE, (xsize << 16) | ysize);
j = 0;
for (i = 0; i < 60; i++)
- intel_de_write(dev_priv, TV_H_LUMA(i),
+ intel_de_write(display, TV_H_LUMA(i),
tv_mode->filter_table[j++]);
for (i = 0; i < 60; i++)
- intel_de_write(dev_priv, TV_H_CHROMA(i),
+ intel_de_write(display, TV_H_CHROMA(i),
tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
- intel_de_write(dev_priv, TV_V_LUMA(i),
+ intel_de_write(display, TV_V_LUMA(i),
tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
- intel_de_write(dev_priv, TV_V_CHROMA(i),
+ intel_de_write(display, TV_V_CHROMA(i),
tv_mode->filter_table[j++]);
- intel_de_write(dev_priv, TV_DAC,
- intel_de_read(dev_priv, TV_DAC) & TV_DAC_SAVE);
- intel_de_write(dev_priv, TV_CTL, tv_ctl);
+ intel_de_write(display, TV_DAC,
+ intel_de_read(display, TV_DAC) & TV_DAC_SAVE);
+ intel_de_write(display, TV_CTL, tv_ctl);
}
static int
intel_tv_detect_type(struct intel_tv *intel_tv,
struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_crtc *crtc = to_intel_crtc(connector->state->crtc);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1600,8 +1603,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
spin_unlock_irq(&dev_priv->irq_lock);
}
- save_tv_dac = tv_dac = intel_de_read(dev_priv, TV_DAC);
- save_tv_ctl = tv_ctl = intel_de_read(dev_priv, TV_CTL);
+ save_tv_dac = tv_dac = intel_de_read(display, TV_DAC);
+ save_tv_ctl = tv_ctl = intel_de_read(display, TV_CTL);
/* Poll for TV detection */
tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
@@ -1627,15 +1630,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
- intel_de_write(dev_priv, TV_CTL, tv_ctl);
- intel_de_write(dev_priv, TV_DAC, tv_dac);
- intel_de_posting_read(dev_priv, TV_DAC);
+ intel_de_write(display, TV_CTL, tv_ctl);
+ intel_de_write(display, TV_DAC, tv_dac);
+ intel_de_posting_read(display, TV_DAC);
intel_crtc_wait_for_next_vblank(crtc);
type = -1;
- tv_dac = intel_de_read(dev_priv, TV_DAC);
- drm_dbg_kms(&dev_priv->drm, "TV detected: %x, %x\n", tv_ctl, tv_dac);
+ tv_dac = intel_de_read(display, TV_DAC);
+ drm_dbg_kms(display->drm, "TV detected: %x, %x\n", tv_ctl, tv_dac);
/*
* A B C
* 0 1 1 Composite
@@ -1643,25 +1646,25 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
* 0 0 0 Component
*/
if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Detected Composite TV connection\n");
type = DRM_MODE_CONNECTOR_Composite;
} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Detected S-Video TV connection\n");
type = DRM_MODE_CONNECTOR_SVIDEO;
} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Detected Component TV connection\n");
type = DRM_MODE_CONNECTOR_Component;
} else {
- drm_dbg_kms(&dev_priv->drm, "Unrecognised TV connection\n");
+ drm_dbg_kms(display->drm, "Unrecognised TV connection\n");
type = -1;
}
- intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
- intel_de_write(dev_priv, TV_CTL, save_tv_ctl);
- intel_de_posting_read(dev_priv, TV_CTL);
+ intel_de_write(display, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ intel_de_write(display, TV_CTL, save_tv_ctl);
+ intel_de_posting_read(display, TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
intel_crtc_wait_for_next_vblank(crtc);
@@ -1711,12 +1714,13 @@ intel_tv_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
enum drm_connector_status status;
int type;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name, force);
if (!intel_display_device_enabled(i915))
@@ -1791,7 +1795,7 @@ intel_tv_set_mode_type(struct drm_display_mode *mode,
static int
intel_tv_get_modes(struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int i, count = 0;
@@ -1805,7 +1809,7 @@ intel_tv_get_modes(struct drm_connector *connector)
continue;
/* no vertical scaling with wide sources on gen3 */
- if (DISPLAY_VER(dev_priv) == 3 && input->w > 1024 &&
+ if (DISPLAY_VER(display) == 3 && input->w > 1024 &&
input->h > intel_tv_mode_vdisplay(tv_mode))
continue;
@@ -1822,7 +1826,8 @@ intel_tv_get_modes(struct drm_connector *connector)
*/
intel_tv_mode_to_mode(mode, tv_mode, tv_mode->clock);
if (count == 0) {
- drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
+ drm_dbg_kms(display->drm,
+ "TV mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(mode));
}
intel_tv_scale_mode_horiz(mode, input->w, 0, 0);
@@ -1887,7 +1892,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
static void intel_tv_add_properties(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_connector_state *conn_state = connector->state;
const char *tv_format_names[ARRAY_SIZE(tv_modes)];
int i;
@@ -1903,32 +1908,32 @@ static void intel_tv_add_properties(struct drm_connector *connector)
/* Create TV properties then attach current values */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
/* 1080p50/1080p60 not supported on gen3 */
- if (DISPLAY_VER(i915) == 3 && tv_modes[i].oversample == 1)
+ if (DISPLAY_VER(display) == 3 && tv_modes[i].oversample == 1)
break;
tv_format_names[i] = tv_modes[i].name;
}
- drm_mode_create_tv_properties_legacy(&i915->drm, i, tv_format_names);
+ drm_mode_create_tv_properties_legacy(display->drm, i, tv_format_names);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.legacy_tv_mode_property,
+ display->drm->mode_config.legacy_tv_mode_property,
conn_state->tv.legacy_mode);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tv_left_margin_property,
+ display->drm->mode_config.tv_left_margin_property,
conn_state->tv.margins.left);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tv_top_margin_property,
+ display->drm->mode_config.tv_top_margin_property,
conn_state->tv.margins.top);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tv_right_margin_property,
+ display->drm->mode_config.tv_right_margin_property,
conn_state->tv.margins.right);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tv_bottom_margin_property,
+ display->drm->mode_config.tv_bottom_margin_property,
conn_state->tv.margins.bottom);
}
void
-intel_tv_init(struct drm_i915_private *dev_priv)
+intel_tv_init(struct intel_display *display)
{
struct drm_connector *connector;
struct intel_tv *intel_tv;
@@ -1936,11 +1941,11 @@ intel_tv_init(struct drm_i915_private *dev_priv)
struct intel_connector *intel_connector;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
- if ((intel_de_read(dev_priv, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+ if ((intel_de_read(display, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
return;
- if (!intel_bios_is_tv_present(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "Integrated TV is not present.\n");
+ if (!intel_bios_is_tv_present(display)) {
+ drm_dbg_kms(display->drm, "Integrated TV is not present.\n");
return;
}
@@ -1948,15 +1953,15 @@ intel_tv_init(struct drm_i915_private *dev_priv)
* Sanity check the TV output by checking to see if the
* DAC register holds a value
*/
- save_tv_dac = intel_de_read(dev_priv, TV_DAC);
+ save_tv_dac = intel_de_read(display, TV_DAC);
- intel_de_write(dev_priv, TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
- tv_dac_on = intel_de_read(dev_priv, TV_DAC);
+ intel_de_write(display, TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+ tv_dac_on = intel_de_read(display, TV_DAC);
- intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
- tv_dac_off = intel_de_read(dev_priv, TV_DAC);
+ intel_de_write(display, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ tv_dac_off = intel_de_read(display, TV_DAC);
- intel_de_write(dev_priv, TV_DAC, save_tv_dac);
+ intel_de_write(display, TV_DAC, save_tv_dac);
/*
* If the register does not hold the state change enable
@@ -1994,10 +1999,11 @@ intel_tv_init(struct drm_i915_private *dev_priv)
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
intel_connector->base.polled = intel_connector->polled;
- drm_connector_init(&dev_priv->drm, connector, &intel_tv_connector_funcs,
+ drm_connector_init(display->drm, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_tv_enc_funcs,
+ drm_encoder_init(display->drm, &intel_encoder->base,
+ &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC, "TV");
intel_encoder->compute_config = intel_tv_compute_config;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.h b/drivers/gpu/drm/i915/display/intel_tv.h
index f08827b8bf2b..0f280f69e73c 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.h
+++ b/drivers/gpu/drm/i915/display/intel_tv.h
@@ -6,12 +6,12 @@
#ifndef __INTEL_TV_H__
#define __INTEL_TV_H__
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-void intel_tv_init(struct drm_i915_private *dev_priv);
+void intel_tv_init(struct intel_display *display);
#else
-static inline void intel_tv_init(struct drm_i915_private *dev_priv)
+static inline void intel_tv_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 5b065e1cd4e4..0b7f2134e441 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -67,8 +67,8 @@
*/
u32 i915_get_vblank_counter(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
+ struct intel_display *display = to_intel_display(crtc->dev);
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
const struct drm_display_mode *mode = &vblank->hwmode;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
u32 pixel, vbl_start, hsync_start, htotal;
@@ -103,8 +103,8 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc)
* we get a low value that's stable across two reads of the high
* register.
*/
- frame = intel_de_read64_2x32(dev_priv, PIPEFRAMEPIXEL(dev_priv, pipe),
- PIPEFRAME(dev_priv, pipe));
+ frame = intel_de_read64_2x32(display, PIPEFRAMEPIXEL(display, pipe),
+ PIPEFRAME(display, pipe));
pixel = frame & PIPE_PIXEL_MASK;
frame = (frame >> PIPE_FRAME_LOW_SHIFT) & 0xffffff;
@@ -119,19 +119,19 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc)
u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
+ struct intel_display *display = to_intel_display(crtc->dev);
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
if (!vblank->max_vblank_count)
return 0;
- return intel_de_read(dev_priv, PIPE_FRMCOUNT_G4X(dev_priv, pipe));
+ return intel_de_read(display, PIPE_FRMCOUNT_G4X(display, pipe));
}
static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
const struct drm_display_mode *mode = &vblank->hwmode;
u32 htotal = mode->crtc_htotal;
@@ -150,16 +150,16 @@ static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
* pipe frame time stamp. The time stamp value
* is sampled at every start of vertical blank.
*/
- scan_prev_time = intel_de_read_fw(dev_priv,
+ scan_prev_time = intel_de_read_fw(display,
PIPE_FRMTMSTMP(crtc->pipe));
/*
* The TIMESTAMP_CTR register has the current
* time stamp value.
*/
- scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
+ scan_curr_time = intel_de_read_fw(display, IVB_TIMESTAMP_CTR);
- scan_post_time = intel_de_read_fw(dev_priv,
+ scan_post_time = intel_de_read_fw(display,
PIPE_FRMTMSTMP(crtc->pipe));
} while (scan_post_time != scan_prev_time);
@@ -190,8 +190,9 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
return scanline;
}
-static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
+int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
/*
@@ -220,7 +221,7 @@ static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
* However if queried just before the start of vblank we'll get an
* answer that's slightly in the future.
*/
- if (DISPLAY_VER(i915) == 2)
+ if (DISPLAY_VER(display) == 2)
return -1;
else if (HAS_DDI(i915) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return 2;
@@ -234,8 +235,7 @@ static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
*/
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
const struct drm_display_mode *mode = &vblank->hwmode;
enum pipe pipe = crtc->pipe;
@@ -249,7 +249,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
vtotal = intel_mode_vtotal(mode);
- position = intel_de_read_fw(dev_priv, PIPEDSL(dev_priv, pipe)) & PIPEDSL_LINE_MASK;
+ position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK;
/*
* On HSW, the DSL reg (0x70000) appears to return 0 if we
@@ -263,13 +263,13 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
* problem. We may need to extend this to include other platforms,
* but so far testing only shows the problem on HSW.
*/
- if (HAS_DDI(dev_priv) && !position) {
+ if (HAS_DDI(display) && !position) {
int i, temp;
for (i = 0; i < 100; i++) {
udelay(1);
- temp = intel_de_read_fw(dev_priv,
- PIPEDSL(dev_priv, pipe)) & PIPEDSL_LINE_MASK;
+ temp = intel_de_read_fw(display,
+ PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK;
if (temp != position) {
position = temp;
break;
@@ -284,15 +284,6 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
return (position + vtotal + crtc->scanline_offset) % vtotal;
}
-int intel_crtc_scanline_to_hw(struct intel_crtc *crtc, int scanline)
-{
- const struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
- const struct drm_display_mode *mode = &vblank->hwmode;
- int vtotal = intel_mode_vtotal(mode);
-
- return (scanline + vtotal - crtc->scanline_offset) % vtotal;
-}
-
/*
* The uncore version of the spin lock functions is used to decide
* whether we need to lock the uncore lock or not. This is only
@@ -303,41 +294,49 @@ int intel_crtc_scanline_to_hw(struct intel_crtc *crtc, int scanline)
* all register accesses to the same cacheline to be serialized,
* otherwise they may hang.
*/
-static void intel_vblank_section_enter(struct drm_i915_private *i915)
+#ifdef I915
+static void intel_vblank_section_enter(struct intel_display *display)
__acquires(i915->uncore.lock)
{
-#ifdef I915
+ struct drm_i915_private *i915 = to_i915(display->drm);
spin_lock(&i915->uncore.lock);
-#endif
}
-static void intel_vblank_section_exit(struct drm_i915_private *i915)
+static void intel_vblank_section_exit(struct intel_display *display)
__releases(i915->uncore.lock)
{
-#ifdef I915
+ struct drm_i915_private *i915 = to_i915(display->drm);
spin_unlock(&i915->uncore.lock);
-#endif
+}
+#else
+static void intel_vblank_section_enter(struct intel_display *display)
+{
}
+static void intel_vblank_section_exit(struct intel_display *display)
+{
+}
+#endif
+
static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
bool in_vblank_irq,
int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
- struct drm_device *dev = _crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(_crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = to_intel_crtc(_crtc);
enum pipe pipe = crtc->pipe;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
unsigned long irqflags;
- bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
- IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
+ bool use_scanline_counter = DISPLAY_VER(display) >= 5 ||
+ IS_G4X(dev_priv) || DISPLAY_VER(display) == 2 ||
crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
- if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
- drm_dbg(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm, !mode->crtc_clock)) {
+ drm_dbg(display->drm,
"trying to get scanoutpos for disabled pipe %c\n",
pipe_name(pipe));
return false;
@@ -355,7 +354,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
* preemption disabled, so the following code must not block.
*/
local_irq_save(irqflags);
- intel_vblank_section_enter(dev_priv);
+ intel_vblank_section_enter(display);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -387,7 +386,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
* We can split this into vertical and horizontal
* scanout position.
*/
- position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(dev_priv, pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+ position = (intel_de_read_fw(display, PIPEFRAMEPIXEL(display, pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
/* convert to pixel counts */
vbl_start *= htotal;
@@ -423,7 +422,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
- intel_vblank_section_exit(dev_priv);
+ intel_vblank_section_exit(display);
local_irq_restore(irqflags);
/*
@@ -458,42 +457,42 @@ bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
unsigned long irqflags;
int position;
local_irq_save(irqflags);
- intel_vblank_section_enter(dev_priv);
+ intel_vblank_section_enter(display);
position = __intel_get_crtc_scanline(crtc);
- intel_vblank_section_exit(dev_priv);
+ intel_vblank_section_exit(display);
local_irq_restore(irqflags);
return position;
}
-static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
+static bool pipe_scanline_is_moving(struct intel_display *display,
enum pipe pipe)
{
- i915_reg_t reg = PIPEDSL(dev_priv, pipe);
+ i915_reg_t reg = PIPEDSL(display, pipe);
u32 line1, line2;
- line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
+ line1 = intel_de_read(display, reg) & PIPEDSL_LINE_MASK;
msleep(5);
- line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
+ line2 = intel_de_read(display, reg) & PIPEDSL_LINE_MASK;
return line1 != line2;
}
static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
/* Wait for the display line to settle/start moving */
- if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
- drm_err(&dev_priv->drm,
+ if (wait_for(pipe_scanline_is_moving(display, pipe) == state, 100))
+ drm_err(display->drm,
"pipe %c scanline %s wait timed out\n",
pipe_name(pipe), str_on_off(state));
}
@@ -511,8 +510,8 @@ void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
bool vrr_enable)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u8 mode_flags = crtc_state->mode_flags;
struct drm_display_mode adjusted_mode;
int vmax_vblank_start = 0;
@@ -521,7 +520,8 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
if (vrr_enable) {
- drm_WARN_ON(&i915->drm, (mode_flags & I915_MODE_FLAG_VRR) == 0);
+ drm_WARN_ON(display->drm,
+ (mode_flags & I915_MODE_FLAG_VRR) == 0);
adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
@@ -543,8 +543,8 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
* __intel_get_crtc_scanline()) with vblank_time_lock?
* Need to audit everything to make sure it's safe.
*/
- spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags);
- intel_vblank_section_enter(i915);
+ spin_lock_irqsave(&display->drm->vblank_time_lock, irqflags);
+ intel_vblank_section_enter(display);
drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
@@ -553,8 +553,8 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
crtc->mode_flags = mode_flags;
crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state);
- intel_vblank_section_exit(i915);
- spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
+ intel_vblank_section_exit(display);
+ spin_unlock_irqrestore(&display->drm->vblank_time_lock, irqflags);
}
int intel_mode_vdisplay(const struct drm_display_mode *mode)
@@ -652,14 +652,15 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
*/
if (intel_color_uses_dsb(new_crtc_state) ||
new_crtc_state->update_m_n || new_crtc_state->update_lrr)
- evade->min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
+ evade->min -= intel_mode_vblank_start(adjusted_mode) -
+ intel_mode_vdisplay(adjusted_mode);
}
/* must be called with vblank interrupt already enabled! */
int intel_vblank_evade(struct intel_vblank_evade_ctx *evade)
{
struct intel_crtc *crtc = evade->crtc;
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
long timeout = msecs_to_jiffies_timeout(1);
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
DEFINE_WAIT(wait);
@@ -681,7 +682,7 @@ int intel_vblank_evade(struct intel_vblank_evade_ctx *evade)
break;
if (!timeout) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Potential atomic update failure on pipe %c\n",
pipe_name(crtc->pipe));
break;
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
index 7e526f6861e4..6d7336256982 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.h
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -40,6 +40,6 @@ void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc);
void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc);
void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
bool vrr_enable);
-int intel_crtc_scanline_to_hw(struct intel_crtc *crtc, int scanline);
+int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VBLANK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 1af8407e2081..42022756bbd5 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -493,7 +493,7 @@ struct child_device_config {
u16 addin_offset;
u8 dvo_port; /* See DEVICE_PORT_* and DVO_PORT_* above */
u8 i2c_pin;
- u8 slave_addr;
+ u8 target_addr;
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_cfg; /* See DEVICE_CFG_* above */
@@ -502,7 +502,7 @@ struct child_device_config {
struct {
u8 dvo2_port;
u8 i2c2_pin;
- u8 slave2_addr;
+ u8 target2_addr;
u8 ddc2_pin;
} __packed;
struct {
@@ -1080,6 +1080,8 @@ struct bdb_edp {
u16 edp_fast_link_training_rate[16]; /* 224+ */
u16 edp_max_port_link_rate[16]; /* 244+ */
u16 edp_dsc_disable; /* 251+ */
+ u16 t6_delay_support; /* 260+ */
+ u16 link_idle_time[16]; /* 260+ */
} __packed;
/*
@@ -1321,7 +1323,7 @@ struct als_data_entry {
} __packed;
struct aggressiveness_profile_entry {
- u8 dpst_aggressiveness : 4;
+ u8 dpst_aggressiveness : 4; /* (228/252)-256 */
u8 lace_aggressiveness : 4;
} __packed;
@@ -1330,12 +1332,27 @@ struct aggressiveness_profile2_entry {
u8 elp_aggressiveness : 4;
} __packed;
+struct aggressiveness_profile3_entry {
+ u8 apd_aggressiveness:4;
+ u8 pixoptix_aggressiveness:4;
+} __packed;
+
+struct aggressiveness_profile4_entry {
+ u8 xpst_aggressiveness:4;
+ u8 tcon_aggressiveness:4;
+} __packed;
+
+struct panel_identification {
+ u8 panel_technology:4;
+ u8 reserved:4;
+} __packed;
+
struct bdb_lfp_power {
struct lfp_power_features features; /* ???-227 */
struct als_data_entry als[5];
u8 lace_aggressiveness_profile:3; /* 210-227 */
u8 reserved1:5;
- u16 dpst; /* 228+ */
+ u16 dpst; /* 228-256 */
u16 psr; /* 228+ */
u16 drrs; /* 228+ */
u16 lace_support; /* 228+ */
@@ -1343,12 +1360,20 @@ struct bdb_lfp_power {
u16 dmrrs; /* 228+ */
u16 adb; /* 228+ */
u16 lace_enabled_status; /* 228+ */
- struct aggressiveness_profile_entry aggressiveness[16]; /* 228+ */
+ struct aggressiveness_profile_entry aggressiveness[16];
u16 hobl; /* 232+ */
u16 vrr_feature_enabled; /* 233+ */
- u16 elp; /* 247+ */
- u16 opst; /* 247+ */
- struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */
+ u16 elp; /* 247-256 */
+ u16 opst; /* 247-256 */
+ struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247-256 */
+ u16 apd; /* 253-256 */
+ u16 pixoptix; /* 253-256 */
+ struct aggressiveness_profile3_entry aggressiveness3[16]; /* 253-256 */
+ struct panel_identification panel_identification[16]; /* 257+ */
+ u16 xpst_support; /* 257+ */
+ u16 tcon_based_backlight_optimization; /* 257+ */
+ struct aggressiveness_profile4_entry aggressiveness4[16]; /* 257+ */
+ u16 tcon_backlight_xpst_coexistence; /* 257+ */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index b9687b7692b8..2e849b015e74 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -8,6 +8,7 @@
#include <linux/limits.h>
#include <drm/display/drm_dsc_helper.h>
+#include <drm/drm_fixed.h>
#include "i915_drv.h"
#include "intel_crtc.h"
@@ -76,7 +77,7 @@ intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf,
static void
calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
{
- int bpp = to_bpp_int(vdsc_cfg->bits_per_pixel);
+ int bpp = fxp_q4_to_int(vdsc_cfg->bits_per_pixel);
int bpc = vdsc_cfg->bits_per_component;
int qp_bpc_modifier = (bpc - 8) * 2;
int uncompressed_bpg_rate;
@@ -184,7 +185,7 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
}
} else {
/* fractional bpp part * 10000 (for precision up to 4 decimal places) */
- int fractional_bits = to_bpp_frac(vdsc_cfg->bits_per_pixel);
+ int fractional_bits = fxp_q4_to_frac(vdsc_cfg->bits_per_pixel);
static const s8 ofs_und6[] = {
0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
@@ -263,7 +264,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
- u16 compressed_bpp = to_bpp_int(pipe_config->dsc.compressed_bpp_x16);
+ u16 compressed_bpp = fxp_q4_to_int(pipe_config->dsc.compressed_bpp_x16);
int err;
int ret;
@@ -456,36 +457,30 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_PPS0_422_ENABLE;
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_PPS0_VBR_ENABLE;
- drm_dbg_kms(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 0, pps_val);
/* PPS 1 */
pps_val = DSC_PPS1_BPP(vdsc_cfg->bits_per_pixel);
- drm_dbg_kms(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 1, pps_val);
/* PPS 2 */
pps_val = DSC_PPS2_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PPS2_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
- drm_dbg_kms(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 2, pps_val);
/* PPS 3 */
pps_val = DSC_PPS3_SLICE_HEIGHT(vdsc_cfg->slice_height) |
DSC_PPS3_SLICE_WIDTH(vdsc_cfg->slice_width);
- drm_dbg_kms(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 3, pps_val);
/* PPS 4 */
pps_val = DSC_PPS4_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
DSC_PPS4_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
- drm_dbg_kms(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 4, pps_val);
/* PPS 5 */
pps_val = DSC_PPS5_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
DSC_PPS5_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
- drm_dbg_kms(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 5, pps_val);
/* PPS 6 */
@@ -493,25 +488,21 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_PPS6_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
DSC_PPS6_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
DSC_PPS6_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
- drm_dbg_kms(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 6, pps_val);
/* PPS 7 */
pps_val = DSC_PPS7_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
DSC_PPS7_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
- drm_dbg_kms(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 7, pps_val);
/* PPS 8 */
pps_val = DSC_PPS8_FINAL_OFFSET(vdsc_cfg->final_offset) |
DSC_PPS8_INITIAL_OFFSET(vdsc_cfg->initial_offset);
- drm_dbg_kms(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 8, pps_val);
/* PPS 9 */
pps_val = DSC_PPS9_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) |
DSC_PPS9_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
- drm_dbg_kms(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 9, pps_val);
/* PPS 10 */
@@ -519,7 +510,6 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_PPS10_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
DSC_PPS10_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
DSC_PPS10_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
- drm_dbg_kms(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 10, pps_val);
/* PPS 16 */
@@ -528,31 +518,25 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
vdsc_cfg->slice_width) |
DSC_PPS16_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
vdsc_cfg->slice_height);
- drm_dbg_kms(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 16, pps_val);
if (DISPLAY_VER(dev_priv) >= 14) {
/* PPS 17 */
pps_val = DSC_PPS17_SL_BPG_OFFSET(vdsc_cfg->second_line_bpg_offset);
- drm_dbg_kms(&dev_priv->drm, "PPS17 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 17, pps_val);
/* PPS 18 */
pps_val = DSC_PPS18_NSL_BPG_OFFSET(vdsc_cfg->nsl_bpg_offset) |
DSC_PPS18_SL_OFFSET_ADJ(vdsc_cfg->second_line_offset_adj);
- drm_dbg_kms(&dev_priv->drm, "PPS18 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 18, pps_val);
}
/* Populate the RC_BUF_THRESH registers */
memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword));
- for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
rc_buf_thresh_dword[i / 4] |=
(u32)(vdsc_cfg->rc_buf_thresh[i] <<
BITS_PER_BYTE * (i % 4));
- drm_dbg_kms(&dev_priv->drm, "RC_BUF_THRESH_%d = 0x%08x\n", i,
- rc_buf_thresh_dword[i / 4]);
- }
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0,
rc_buf_thresh_dword[0]);
@@ -599,7 +583,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
/* Populate the RC_RANGE_PARAMETERS registers */
memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword));
- for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++)
rc_range_params_dword[i / 2] |=
(u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset <<
RC_BPG_OFFSET_SHIFT) |
@@ -607,9 +591,6 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
RC_MAX_QP_SHIFT) |
(vdsc_cfg->rc_range_params[i].range_min_qp <<
RC_MIN_QP_SHIFT)) << 16 * (i % 2));
- drm_dbg_kms(&dev_priv->drm, "RC_RANGE_PARAM_%d = 0x%08x\n", i,
- rc_range_params_dword[i / 2]);
- }
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
@@ -989,3 +970,23 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
out:
intel_display_power_put(dev_priv, power_domain, wakeref);
}
+
+static void intel_vdsc_dump_state(struct drm_printer *p, int indent,
+ const struct intel_crtc_state *crtc_state)
+{
+ drm_printf_indent(p, indent,
+ "dsc-dss: compressed-bpp:" FXP_Q4_FMT ", slice-count: %d, split: %s\n",
+ FXP_Q4_ARGS(crtc_state->dsc.compressed_bpp_x16),
+ crtc_state->dsc.slice_count,
+ str_yes_no(crtc_state->dsc.dsc_split));
+}
+
+void intel_vdsc_state_dump(struct drm_printer *p, int indent,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->dsc.compression_enable)
+ return;
+
+ intel_vdsc_dump_state(p, indent, crtc_state);
+ drm_dsc_dump_config(p, indent, &crtc_state->dsc.config);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 2cc41ff08909..290b2e9b3482 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+struct drm_printer;
+
enum transcoder;
struct intel_crtc;
struct intel_crtc_state;
@@ -27,5 +29,7 @@ void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_vdsc_state_dump(struct drm_printer *p, int indent,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 5a0da64c7db3..9a51f5bac307 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -17,8 +17,8 @@
bool intel_vrr_is_capable(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
const struct drm_display_info *info = &connector->base.display_info;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp;
/*
@@ -43,7 +43,7 @@ bool intel_vrr_is_capable(struct intel_connector *connector)
return false;
}
- return HAS_VRR(i915) &&
+ return HAS_VRR(display) &&
info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
}
@@ -89,10 +89,9 @@ intel_vrr_check_modeset(struct intel_atomic_state *state)
*/
static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return crtc_state->vrr.guardband;
else
/* The hw imposes the extra scanline before frame start */
@@ -113,11 +112,11 @@ int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
static bool
is_cmrr_frac_required(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
int calculated_refresh_k, actual_refresh_k, pixel_clock_per_line;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- if (!HAS_CMRR(i915))
+ if (!HAS_CMRR(display))
return false;
actual_refresh_k =
@@ -161,8 +160,7 @@ void
intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
@@ -186,7 +184,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
if (!crtc_state->vrr.in_range)
return;
- if (HAS_LRR(i915))
+ if (HAS_LRR(display))
crtc_state->update_lrr = true;
vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
@@ -233,8 +231,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
- if (intel_dp_as_sdp_supported(intel_dp) &&
- crtc_state->vrr.enable) {
+ if (intel_dp->as_sdp_supported && crtc_state->vrr.enable) {
crtc_state->vrr.vsync_start =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
crtc_state->hw.adjusted_mode.vsync_start);
@@ -247,7 +244,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
* For XE_LPD+, we use guardband and pipeline override
* is deprecated.
*/
- if (DISPLAY_VER(i915) >= 13) {
+ if (DISPLAY_VER(display) >= 13) {
crtc_state->vrr.guardband =
crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start;
} else {
@@ -259,9 +256,9 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
else
@@ -272,7 +269,7 @@ static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/*
@@ -280,133 +277,130 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
* TGL: generate VRR "safe window" for DSB vblank waits
* ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
*/
- if (IS_DISPLAY_VER(dev_priv, 12, 13))
- intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
+ if (IS_DISPLAY_VER(display, 12, 13))
+ intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder),
0, PIPE_VBLANK_WITH_DELAY);
if (!crtc_state->vrr.flipline) {
- intel_de_write(dev_priv,
- TRANS_VRR_CTL(dev_priv, cpu_transcoder), 0);
+ intel_de_write(display,
+ TRANS_VRR_CTL(display, cpu_transcoder), 0);
return;
}
if (crtc_state->cmrr.enable) {
- intel_de_write(dev_priv, TRANS_CMRR_M_HI(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_CMRR_M_HI(display, cpu_transcoder),
upper_32_bits(crtc_state->cmrr.cmrr_m));
- intel_de_write(dev_priv, TRANS_CMRR_M_LO(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_CMRR_M_LO(display, cpu_transcoder),
lower_32_bits(crtc_state->cmrr.cmrr_m));
- intel_de_write(dev_priv, TRANS_CMRR_N_HI(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_CMRR_N_HI(display, cpu_transcoder),
upper_32_bits(crtc_state->cmrr.cmrr_n));
- intel_de_write(dev_priv, TRANS_CMRR_N_LO(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_CMRR_N_LO(display, cpu_transcoder),
lower_32_bits(crtc_state->cmrr.cmrr_n));
}
- intel_de_write(dev_priv, TRANS_VRR_VMIN(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
crtc_state->vrr.vmin - 1);
- intel_de_write(dev_priv, TRANS_VRR_VMAX(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
crtc_state->vrr.vmax - 1);
- intel_de_write(dev_priv, TRANS_VRR_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
trans_vrr_ctl(crtc_state));
- intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
crtc_state->vrr.flipline - 1);
}
void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (!crtc_state->vrr.enable)
return;
- intel_de_write(dev_priv, TRANS_PUSH(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
TRANS_PUSH_EN | TRANS_PUSH_SEND);
}
bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (!crtc_state->vrr.enable)
return false;
- return intel_de_read(dev_priv, TRANS_PUSH(dev_priv, cpu_transcoder)) & TRANS_PUSH_SEND;
+ return intel_de_read(display, TRANS_PUSH(display, cpu_transcoder)) & TRANS_PUSH_SEND;
}
void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (!crtc_state->vrr.enable)
return;
- intel_de_write(dev_priv, TRANS_PUSH(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
TRANS_PUSH_EN);
- if (HAS_AS_SDP(dev_priv))
- intel_de_write(dev_priv,
- TRANS_VRR_VSYNC(dev_priv, cpu_transcoder),
+ if (HAS_AS_SDP(display))
+ intel_de_write(display,
+ TRANS_VRR_VSYNC(display, cpu_transcoder),
VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
VRR_VSYNC_START(crtc_state->vrr.vsync_start));
if (crtc_state->cmrr.enable) {
- intel_de_write(dev_priv, TRANS_VRR_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
trans_vrr_ctl(crtc_state));
} else {
- intel_de_write(dev_priv, TRANS_VRR_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
}
}
void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(old_crtc_state);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
if (!old_crtc_state->vrr.enable)
return;
- intel_de_write(dev_priv, TRANS_VRR_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
trans_vrr_ctl(old_crtc_state));
- intel_de_wait_for_clear(dev_priv,
- TRANS_VRR_STATUS(dev_priv, cpu_transcoder),
+ intel_de_wait_for_clear(display,
+ TRANS_VRR_STATUS(display, cpu_transcoder),
VRR_STATUS_VRR_EN_LIVE, 1000);
- intel_de_write(dev_priv, TRANS_PUSH(dev_priv, cpu_transcoder), 0);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
- if (HAS_AS_SDP(dev_priv))
- intel_de_write(dev_priv,
- TRANS_VRR_VSYNC(dev_priv, cpu_transcoder), 0);
+ if (HAS_AS_SDP(display))
+ intel_de_write(display,
+ TRANS_VRR_VSYNC(display, cpu_transcoder), 0);
}
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 trans_vrr_ctl, trans_vrr_vsync;
- trans_vrr_ctl = intel_de_read(dev_priv,
- TRANS_VRR_CTL(dev_priv, cpu_transcoder));
+ trans_vrr_ctl = intel_de_read(display,
+ TRANS_VRR_CTL(display, cpu_transcoder));
crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
- if (HAS_CMRR(dev_priv))
+ if (HAS_CMRR(display))
crtc_state->cmrr.enable = (trans_vrr_ctl & VRR_CTL_CMRR_ENABLE);
if (crtc_state->cmrr.enable) {
crtc_state->cmrr.cmrr_n =
- intel_de_read64_2x32(dev_priv, TRANS_CMRR_N_LO(dev_priv, cpu_transcoder),
- TRANS_CMRR_N_HI(dev_priv, cpu_transcoder));
+ intel_de_read64_2x32(display, TRANS_CMRR_N_LO(display, cpu_transcoder),
+ TRANS_CMRR_N_HI(display, cpu_transcoder));
crtc_state->cmrr.cmrr_m =
- intel_de_read64_2x32(dev_priv, TRANS_CMRR_M_LO(dev_priv, cpu_transcoder),
- TRANS_CMRR_M_HI(dev_priv, cpu_transcoder));
+ intel_de_read64_2x32(display, TRANS_CMRR_M_LO(display, cpu_transcoder),
+ TRANS_CMRR_M_HI(display, cpu_transcoder));
}
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
crtc_state->vrr.guardband =
REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl);
else
@@ -415,21 +409,21 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) {
- crtc_state->vrr.flipline = intel_de_read(dev_priv,
- TRANS_VRR_FLIPLINE(dev_priv, cpu_transcoder)) + 1;
- crtc_state->vrr.vmax = intel_de_read(dev_priv,
- TRANS_VRR_VMAX(dev_priv, cpu_transcoder)) + 1;
- crtc_state->vrr.vmin = intel_de_read(dev_priv,
- TRANS_VRR_VMIN(dev_priv, cpu_transcoder)) + 1;
+ crtc_state->vrr.flipline = intel_de_read(display,
+ TRANS_VRR_FLIPLINE(display, cpu_transcoder)) + 1;
+ crtc_state->vrr.vmax = intel_de_read(display,
+ TRANS_VRR_VMAX(display, cpu_transcoder)) + 1;
+ crtc_state->vrr.vmin = intel_de_read(display,
+ TRANS_VRR_VMIN(display, cpu_transcoder)) + 1;
}
if (crtc_state->vrr.enable) {
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
- if (HAS_AS_SDP(dev_priv)) {
+ if (HAS_AS_SDP(display)) {
trans_vrr_vsync =
- intel_de_read(dev_priv,
- TRANS_VRR_VSYNC(dev_priv, cpu_transcoder));
+ intel_de_read(display,
+ TRANS_VRR_VSYNC(display, cpu_transcoder));
crtc_state->vrr.vsync_start =
REG_FIELD_GET(VRR_VSYNC_START_MASK, trans_vrr_vsync);
crtc_state->vrr.vsync_end =
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index ba5a628b4757..17d4c880ecc4 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -14,6 +14,7 @@
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_types.h"
+#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
@@ -537,6 +538,8 @@ static u32 tgl_plane_min_alignment(struct intel_plane *plane,
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_BMG_CCS:
+ case I915_FORMAT_MOD_4_TILED_LNL_CCS:
/*
* Align to at least 4x1 main surface
* tiles (16K) to match 64B of AUX.
@@ -948,6 +951,9 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier)
return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
return PLANE_CTL_TILED_4 | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
+ case I915_FORMAT_MOD_4_TILED_BMG_CCS:
+ case I915_FORMAT_MOD_4_TILED_LNL_CCS:
+ return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
@@ -1085,11 +1091,6 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
if (DISPLAY_VER(dev_priv) == 13)
plane_ctl |= adlp_plane_ctl_arb_slots(plane_state);
- if (GRAPHICS_VER(dev_priv) >= 20 &&
- fb->modifier == I915_FORMAT_MOD_4_TILED) {
- plane_ctl |= PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
- }
-
return plane_ctl;
}
@@ -1162,7 +1163,7 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
* within the DPT is always 0.
*/
drm_WARN_ON(&i915->drm, plane_state->dpt_vma &&
- plane_state->dpt_vma->node.start);
+ intel_dpt_offset(plane_state->dpt_vma));
drm_WARN_ON(&i915->drm, offset & 0x1fffff);
return offset >> 9;
} else {
@@ -2452,6 +2453,9 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
if (gen12_plane_has_mc_ccs(i915, plane_id))
caps |= INTEL_PLANE_CAP_CCS_MC;
+ if (DISPLAY_VER(i915) >= 14 && IS_DGFX(i915))
+ caps |= INTEL_PLANE_CAP_NEED64K_PHYS;
+
return caps;
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index a2726364b34d..045c7cac166b 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -2830,17 +2830,17 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
}
/*
- * If Fixed Refresh Rate:
+ * If Fixed Refresh Rate or For VRR case Vmin = Vmax = Flipline:
* Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from
* watermark level1 and up and above. If watermark level 1 is
* invalid program it with all 1's.
* Program PKG_C_LATENCY Added Wake Time = DSB execution time
- * If Variable Refresh Rate:
+ * If Variable Refresh Rate where Vmin != Vmax != Flipline:
* Program DEEP PKG_C_LATENCY Pkg C with all 1's.
* Program PKG_C_LATENCY Added Wake Time = 0
*/
static void
-skl_program_dpkgc_latency(struct drm_i915_private *i915, bool vrr_enabled)
+skl_program_dpkgc_latency(struct drm_i915_private *i915, bool enable_dpkgc)
{
u32 max_latency = 0;
u32 clear = 0, val = 0;
@@ -2849,15 +2849,15 @@ skl_program_dpkgc_latency(struct drm_i915_private *i915, bool vrr_enabled)
if (DISPLAY_VER(i915) < 20)
return;
- if (vrr_enabled) {
- max_latency = LNL_PKG_C_LATENCY_MASK;
- added_wake_time = 0;
- } else {
+ if (enable_dpkgc) {
max_latency = skl_watermark_max_latency(i915, 1);
if (max_latency == 0)
max_latency = LNL_PKG_C_LATENCY_MASK;
added_wake_time = DSB_EXE_TIME +
i915->display.sagv.block_time_us;
+ } else {
+ max_latency = LNL_PKG_C_LATENCY_MASK;
+ added_wake_time = 0;
}
clear |= LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
@@ -2873,7 +2873,7 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc *crtc;
struct intel_crtc_state __maybe_unused *new_crtc_state;
int ret, i;
- bool vrr_enabled = false;
+ bool enable_dpkgc = false;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = skl_build_pipe_wm(state, crtc);
@@ -2899,11 +2899,13 @@ skl_compute_wm(struct intel_atomic_state *state)
if (ret)
return ret;
- if (new_crtc_state->vrr.enable)
- vrr_enabled = true;
+ if ((new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax &&
+ new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline) ||
+ !new_crtc_state->vrr.enable)
+ enable_dpkgc = true;
}
- skl_program_dpkgc_latency(to_i915(state->base.dev), vrr_enabled);
+ skl_program_dpkgc_latency(to_i915(state->base.dev), enable_dpkgc);
skl_print_wm_changes(state);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 931d2cf74ed8..d21f3fb39706 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1879,6 +1879,7 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = {
void vlv_dsi_init(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_dsi *intel_dsi;
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -1890,7 +1891,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "\n");
/* There is no detection method for MIPI so rely on VBT */
- if (!intel_bios_is_dsi_present(dev_priv, &port))
+ if (!intel_bios_is_dsi_present(display, &port))
return;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
@@ -1945,7 +1946,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi->panel_power_off_time = ktime_get_boottime();
- intel_bios_init_panel_late(dev_priv, &connector->panel, NULL, NULL);
+ intel_bios_init_panel_late(display, &connector->panel, NULL, NULL);
if (connector->panel.vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index d54162ce0f99..a3b83cfe1726 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -12,8 +12,6 @@
#include <drm/drm_auth.h>
#include <drm/drm_syncobj.h>
-#include "display/intel_frontbuffer.h"
-
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
#include "gt/intel_gpu_commands.h"
@@ -827,7 +825,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
struct i915_gem_context *ctx;
ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
- if (unlikely(IS_ERR(ctx)))
+ if (IS_ERR(ctx))
return PTR_ERR(ctx);
eb->gem_context = ctx;
@@ -1533,7 +1531,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
u64_to_user_ptr(entry->relocs_ptr);
unsigned long remain = entry->relocation_count;
- if (unlikely(remain > N_RELOC(ULONG_MAX)))
+ if (unlikely(remain > N_RELOC(INT_MAX)))
return -EINVAL;
/*
@@ -1641,7 +1639,7 @@ static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
if (size == 0)
return 0;
- if (size > N_RELOC(ULONG_MAX))
+ if (size > N_RELOC(INT_MAX))
return -EINVAL;
addr = u64_to_user_ptr(entry->relocs_ptr);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index cac6d4184506..21274aa9bddd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -252,6 +252,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
struct vm_area_struct *area = vmf->vma;
struct i915_mmap_offset *mmo = area->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
+ unsigned long obj_offset;
resource_size_t iomap;
int err;
@@ -273,10 +274,11 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
iomap -= obj->mm.region->region.start;
}
+ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
/* PTEs are revoked in obj->ops->put_pages() */
err = remap_io_sg(area,
area->vm_start, area->vm_end - area->vm_start,
- obj->mm.pages->sgl, iomap);
+ obj->mm.pages->sgl, obj_offset, iomap);
if (area->vm_flags & VM_WRITE) {
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -293,8 +295,10 @@ out:
static void set_address_limits(struct vm_area_struct *area,
struct i915_vma *vma,
unsigned long obj_offset,
+ resource_size_t gmadr_start,
unsigned long *start_vaddr,
- unsigned long *end_vaddr)
+ unsigned long *end_vaddr,
+ unsigned long *pfn)
{
unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
long start, end; /* memory boundaries */
@@ -323,6 +327,10 @@ static void set_address_limits(struct vm_area_struct *area,
/* Let's move back into the "<< PAGE_SHIFT" domain */
*start_vaddr = (unsigned long)start << PAGE_SHIFT;
*end_vaddr = (unsigned long)end << PAGE_SHIFT;
+
+ *pfn = (gmadr_start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
+ *pfn += (*start_vaddr - area->vm_start) >> PAGE_SHIFT;
+ *pfn += obj_offset - vma->gtt_view.partial.offset;
}
static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
@@ -441,11 +449,13 @@ retry:
if (ret)
goto err_unpin;
- set_address_limits(area, vma, obj_offset, &start, &end);
-
- pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
- pfn += (start - area->vm_start) >> PAGE_SHIFT;
- pfn += obj_offset - vma->gtt_view.partial.offset;
+ /*
+ * Dump all the necessary parameters in this function to perform the
+ * arithmetic calculation for the virtual address start and end and
+ * the PFN (Page Frame Number).
+ */
+ set_address_limits(area, vma, obj_offset, ggtt->gmadr.start,
+ &start, &end, &pfn);
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
@@ -1071,9 +1081,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
rcu_read_lock();
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
- node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
+ node = drm_vma_offset_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
if (node && drm_vma_node_is_allowed(node, priv)) {
/*
* Skip 0-refcnted objects as it is in the process of being
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 5d7446a48ae7..3dc61cbd2e11 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -89,7 +89,6 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
* @handle: userspace handle
*
* Returns:
- *
* A pointer to the object named by the handle if such exists on @filp, NULL
* otherwise. This object is only valid whilst under the RCU read lock, and
* note carefully the object may be in the process of being destroyed.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 1495b6074492..68413c05c812 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -535,7 +535,7 @@ struct drm_i915_gem_object {
* I915_CACHE_NONE. The only exception is userptr objects, where we
* instead force I915_CACHE_LLC, but we also don't allow userspace to
* ever change the @cache_level for such objects. Another special case
- * is dma-buf, which doesn't rely on @cache_dirty, but there we
+ * is dma-buf, which doesn't rely on @cache_dirty, but there we
* always do a forced flush when acquiring the pages, if there is a
* chance that the pages can be read directly from main memory with
* the GPU.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index c5e1c718a6d2..fe69f2c8527d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -424,7 +424,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
struct address_space *mapping = obj->base.filp->f_mapping;
const struct address_space_operations *aops = mapping->a_ops;
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
- u64 remain, offset;
+ u64 remain;
+ loff_t pos;
unsigned int pg;
/* Caller already validated user args */
@@ -457,12 +458,12 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
*/
remain = arg->size;
- offset = arg->offset;
- pg = offset_in_page(offset);
+ pos = arg->offset;
+ pg = offset_in_page(pos);
do {
unsigned int len, unwritten;
- struct page *page;
+ struct folio *folio;
void *data, *vaddr;
int err;
char __maybe_unused c;
@@ -480,21 +481,19 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
if (err)
return err;
- err = aops->write_begin(obj->base.filp, mapping, offset, len,
- &page, &data);
+ err = aops->write_begin(obj->base.filp, mapping, pos, len,
+ &folio, &data);
if (err < 0)
return err;
- vaddr = kmap_local_page(page);
+ vaddr = kmap_local_folio(folio, offset_in_folio(folio, pos));
pagefault_disable();
- unwritten = __copy_from_user_inatomic(vaddr + pg,
- user_data,
- len);
+ unwritten = __copy_from_user_inatomic(vaddr, user_data, len);
pagefault_enable();
kunmap_local(vaddr);
- err = aops->write_end(obj->base.filp, mapping, offset, len,
- len - unwritten, page, data);
+ err = aops->write_end(obj->base.filp, mapping, pos, len,
+ len - unwritten, folio, data);
if (err < 0)
return err;
@@ -504,7 +503,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
remain -= len;
user_data += len;
- offset += len;
+ pos += len;
pg = 0;
} while (remain);
@@ -660,7 +659,7 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj;
struct file *file;
const struct address_space_operations *aops;
- resource_size_t offset;
+ loff_t pos;
int err;
GEM_WARN_ON(IS_DGFX(i915));
@@ -672,29 +671,27 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
file = obj->base.filp;
aops = file->f_mapping->a_ops;
- offset = 0;
+ pos = 0;
do {
unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
- struct page *page;
- void *pgdata, *vaddr;
+ struct folio *folio;
+ void *fsdata;
- err = aops->write_begin(file, file->f_mapping, offset, len,
- &page, &pgdata);
+ err = aops->write_begin(file, file->f_mapping, pos, len,
+ &folio, &fsdata);
if (err < 0)
goto fail;
- vaddr = kmap(page);
- memcpy(vaddr, data, len);
- kunmap(page);
+ memcpy_to_folio(folio, offset_in_folio(folio, pos), data, len);
- err = aops->write_end(file, file->f_mapping, offset, len, len,
- page, pgdata);
+ err = aops->write_end(file, file->f_mapping, pos, len, len,
+ folio, fsdata);
if (err < 0)
goto fail;
size -= len;
data += len;
- offset += len;
+ pos += len;
} while (size);
return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index d4b918fb11ce..1f55e62044a4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -266,7 +266,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
args->timeout_ns = 0;
- /* Asked to wait beyond the jiffie/scheduler precision? */
+ /* Asked to wait beyond the jiffy/scheduler precision? */
if (ret == -ETIME && args->timeout_ns)
ret = -EAGAIN;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 3527b8f446fe..2fda549dd82d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -506,7 +506,7 @@ static int igt_dmabuf_export_vmap(void *arg)
goto out;
}
- if (memchr_inv(ptr, 0, dmabuf->size)) {
+ if (!mem_is_zero(ptr, dmabuf->size)) {
pr_err("Exported object not initialised to zero!\n");
err = -EINVAL;
goto out;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 3b740ca25000..4d30a86016f2 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -693,6 +693,8 @@ void intel_engines_release(struct intel_gt *gt)
memset(&engine->reset, 0, sizeof(engine->reset));
}
+
+ llist_del_all(&gt->i915->uabi_engines_llist);
}
void intel_engine_free_request_pool(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 206a5e0fedf1..d60a6ca0cae5 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -12,7 +12,6 @@
#include <drm/intel/i915_drm.h>
#include <drm/intel/intel-gtt.h>
-#include "display/intel_display.h"
#include "gem/i915_gem_lmem.h"
#include "intel_context.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 93bc1cc1ee7e..0ffba50981e3 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -418,7 +418,6 @@ out_unpin:
* For an untiled surface, this removes any existing fence.
*
* Returns:
- *
* 0 on success, negative error code on failure.
*/
int i915_vma_pin_fence(struct i915_vma *vma)
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 2bd8d98d2110..5394bc7d4daf 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -220,6 +220,7 @@
#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+#define CMD_3DSTATE_MESH_CONTROL ((0x3 << 29) | (0x3 << 27) | (0x0 << 24) | (0x77 << 16) | (0x3))
#define XY_CTRL_SURF_INSTR_SIZE 5
#define MI_FLUSH_DW_SIZE 3
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index b5e114d284ad..998ca029b73a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -174,7 +174,6 @@ static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
int intel_gt_probe_all(struct drm_i915_private *i915);
int intel_gt_tiles_init(struct drm_i915_private *i915);
-void intel_gt_release_all(struct drm_i915_private *i915);
#define for_each_gt(gt__, i915__, id__) \
for ((id__) = 0; \
@@ -208,4 +207,10 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
void intel_gt_bind_context_set_ready(struct intel_gt *gt);
void intel_gt_bind_context_set_unready(struct intel_gt *gt);
bool intel_gt_is_bind_context_ready(struct intel_gt *gt);
+
+static inline void intel_gt_set_wedged_async(struct intel_gt *gt)
+{
+ queue_work(system_highpri_wq, &gt->wedge);
+}
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index e42b3a5d4e63..57a3c83d3655 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -1553,6 +1553,8 @@
#define VLV_RENDER_C0_COUNT _MMIO(0x138118)
#define VLV_MEDIA_C0_COUNT _MMIO(0x13811c)
+#define PCU_PWM_FAN_SPEED _MMIO(0x138140)
+
#define GEN12_RPSTAT1 _MMIO(0x1381b4)
#define GEN12_VOLTAGE_MASK REG_GENMASK(10, 0)
#define GEN12_CAGF_MASK REG_GENMASK(19, 11)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index cfdd2ad5e954..bcee084b1f27 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -292,6 +292,8 @@ struct intel_gt {
struct gt_defaults defaults;
struct kobject *sysfs_defaults;
+ struct work_struct wedge;
+
struct i915_perf_gt perf;
/** link: &ggtt.gt_list */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 735cd23a43c6..8f1ea95471ef 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1013,6 +1013,15 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
GT_TRACE(gt, "end\n");
}
+static void set_wedged_work(struct work_struct *w)
+{
+ struct intel_gt *gt = container_of(w, struct intel_gt, wedge);
+ intel_wakeref_t wf;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wf)
+ __intel_gt_set_wedged(gt);
+}
+
void intel_gt_set_wedged(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
@@ -1614,6 +1623,7 @@ void intel_gt_init_reset(struct intel_gt *gt)
init_waitqueue_head(&gt->reset.queue);
mutex_init(&gt->reset.mutex);
init_srcu_struct(&gt->reset.backoff_srcu);
+ INIT_WORK(&gt->wedge, set_wedged_work);
/*
* While undesirable to wait inside the shrinker, complain anyway.
@@ -1640,7 +1650,7 @@ static void intel_wedge_me(struct work_struct *work)
struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
gt_err(w->gt, "%s timed out, cancelling all in-flight rendering.\n", w->name);
- intel_gt_set_wedged(w->gt);
+ set_wedged_work(&w->gt->wedge);
}
void __intel_init_wedge(struct intel_wedge_me *w,
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 09a287c1aedd..e539a656cfc3 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -111,9 +111,8 @@ static void wa_init_finish(struct i915_wa_list *wal)
{
/* Trim unused entries. */
if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
- struct i915_wa *list = kmemdup(wal->list,
- wal->count * sizeof(*list),
- GFP_KERNEL);
+ struct i915_wa *list = kmemdup_array(wal->list, wal->count,
+ sizeof(*list), GFP_KERNEL);
if (list) {
kfree(wal->list);
@@ -974,7 +973,12 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
if (ret)
return ret;
- cs = intel_ring_begin(rq, (wal->count * 2 + 2));
+ if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
+ IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS)
+ cs = intel_ring_begin(rq, (wal->count * 2 + 6));
+ else
+ cs = intel_ring_begin(rq, (wal->count * 2 + 2));
+
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1004,6 +1008,15 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
}
*cs++ = MI_NOOP;
+ /* Wa_14019789679 */
+ if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
+ IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS) {
+ *cs++ = CMD_3DSTATE_MESH_CONTROL;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ }
+
intel_uncore_forcewake_put__locked(uncore, fw);
spin_unlock(&uncore->lock);
intel_gt_mcr_unlock(wal->gt, flags);
@@ -2058,7 +2071,7 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine)
case RENDER_CLASS:
/* Required by recommended tuning setting (not a workaround) */
whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
-
+ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
break;
default:
break;
@@ -2073,7 +2086,7 @@ static void xelpg_whitelist_build(struct intel_engine_cs *engine)
case RENDER_CLASS:
/* Required by recommended tuning setting (not a workaround) */
whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
-
+ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
break;
default:
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 4202df5b8c12..222ca7c44951 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -93,7 +93,7 @@ static int wait_for_reset(struct intel_engine_cs *engine,
return -EINVAL;
}
- /* Give the request a jiffie to complete after flushing the worker */
+ /* Give the request a jiffy to complete after flushing the worker */
if (i915_request_wait(rq, 0,
max(0l, (long)(timeout - jiffies)) + 1) < 0) {
pr_err("%s: hanging request %llx:%lld did not complete\n",
@@ -3426,7 +3426,7 @@ static int live_preempt_timeout(void *arg)
cpu_relax();
saved_timeout = engine->props.preempt_timeout_ms;
- engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
+ engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffy */
i915_request_get(rq);
i915_request_add(rq);
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 3eff364ccf3a..ca460cee4f8b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -336,7 +336,7 @@ static int clear(struct intel_migrate *migrate,
if (vaddr[x] != val) {
pr_err("%ps failed, (%u != %u), offset: %zu\n",
- fn, vaddr[x], val, x * sizeof(u32));
+ fn, vaddr[x], val, x * sizeof(u32));
igt_hexdump(vaddr + i * 1024, 4096);
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index 021f51d9b456..aab2759067d2 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -530,9 +530,8 @@ void intel_engines_add_sysfs(struct drm_i915_private *i915)
err_object:
kobject_put(kobj);
err_engine:
- dev_err(kdev, "Failed to add sysfs engine '%s'\n",
- engine->name);
- break;
+ dev_warn(kdev, "Failed to add sysfs engine '%s'\n",
+ engine->name);
}
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index 37ff539a6963..0c709e6c15be 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -107,6 +107,7 @@ enum {
enum {
GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE = 0x9001,
GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED = 0x9002,
+ GUC_WORKAROUND_KLV_AVOID_GFX_CLEAR_WHILE_ACTIVE = 0x9006,
};
#endif /* _ABI_GUC_KLVS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 5e60a34692af..097fc6bd1285 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -296,7 +296,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
/* Wa_16019325821 */
/* Wa_14019159160 */
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
flags |= GUC_WA_RCS_CCS_SWITCHOUT;
/*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 7995f059f30d..46fabbfc775e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -815,8 +815,7 @@ engine_instance_list:
return PAGE_ALIGN(total_size);
}
-static void guc_waklv_enable_simple(struct intel_guc *guc,
- u32 klv_id, u32 *offset, u32 *remain)
+static void guc_waklv_enable_simple(struct intel_guc *guc, u32 *offset, u32 *remain, u32 klv_id)
{
u32 size;
u32 klv_entry[] = {
@@ -850,19 +849,20 @@ static void guc_waklv_init(struct intel_guc *guc)
remain = guc_ads_waklv_size(guc);
/* Wa_14019159160 */
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
- guc_waklv_enable_simple(guc,
- GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE,
- &offset, &remain);
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
+ guc_waklv_enable_simple(guc, &offset, &remain,
+ GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE);
+ guc_waklv_enable_simple(guc, &offset, &remain,
+ GUC_WORKAROUND_KLV_AVOID_GFX_CLEAR_WHILE_ACTIVE);
+ }
/* Wa_16021333562 */
if ((GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 21, 1)) &&
(IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) ||
IS_MEDIA_GT_IP_RANGE(gt, IP_VER(13, 0), IP_VER(13, 0)) ||
IS_DG2(gt->i915)))
- guc_waklv_enable_simple(guc,
- GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED,
- &offset, &remain);
+ guc_waklv_enable_simple(guc, &offset, &remain,
+ GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED);
size = guc_ads_waklv_size(guc) - remain;
if (!size)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 9400d0eb682b..ed979847187f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -2014,11 +2014,12 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
/*
* Technically possible for either of these values to be non-zero here,
- * but very unlikely + harmless. Regardless let's add a warn so we can
+ * but very unlikely + harmless. Regardless let's add an error so we can
* see in CI if this happens frequently / a precursor to taking down the
* machine.
*/
- GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
+ if (atomic_read(&guc->outstanding_submission_g2h))
+ guc_err(guc, "Unexpected outstanding GuC to Host in reset finish\n");
atomic_set(&guc->outstanding_submission_g2h, 0);
intel_guc_global_policies_update(guc);
@@ -2842,9 +2843,9 @@ static void prepare_context_registration_info_v70(struct intel_context *ce,
ce->parallel.guc.wqi_tail = 0;
ce->parallel.guc.wqi_head = 0;
- wq_desc_offset = i915_ggtt_offset(ce->state) +
+ wq_desc_offset = (u64)i915_ggtt_offset(ce->state) +
__get_parent_scratch_offset(ce);
- wq_base_offset = i915_ggtt_offset(ce->state) +
+ wq_base_offset = (u64)i915_ggtt_offset(ce->state) +
__get_wq_offset(ce);
info->wq_desc_lo = lower_32_bits(wq_desc_offset);
info->wq_desc_hi = upper_32_bits(wq_desc_offset);
@@ -4506,7 +4507,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
/* Wa_16019325821 */
/* Wa_14019159160 */
if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
- IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
/*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 7a63abf8f644..5b8080ec5315 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -99,7 +99,7 @@ static void __confirm_options(struct intel_uc *uc)
}
if (!intel_uc_supports_guc(uc))
- gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index af9afdb53c7f..c022dc736045 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -42,8 +42,8 @@
#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
#define gmbus1_total_byte_count(v) (((v) >> \
GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK)
-#define gmbus1_slave_addr(v) (((v) & 0xff) >> 1)
-#define gmbus1_slave_index(v) (((v) >> 8) & 0xff)
+#define gmbus1_target_addr(v) (((v) & 0xff) >> 1)
+#define gmbus1_target_index(v) (((v) >> 8) & 0xff)
#define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7)
/* GMBUS0 bits definitions */
@@ -54,7 +54,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
unsigned char chr = 0;
- if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
+ if (edid->state == I2C_NOT_SPECIFIED || !edid->target_selected) {
gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
return 0;
}
@@ -179,7 +179,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
- u32 slave_addr;
+ u32 target_addr;
u32 wvalue = *(u32 *)p_data;
if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) {
@@ -210,21 +210,21 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
i2c_edid->gmbus.total_byte_count =
gmbus1_total_byte_count(wvalue);
- slave_addr = gmbus1_slave_addr(wvalue);
+ target_addr = gmbus1_target_addr(wvalue);
/* vgpu gmbus only support EDID */
- if (slave_addr == EDID_ADDR) {
- i2c_edid->slave_selected = true;
- } else if (slave_addr != 0) {
+ if (target_addr == EDID_ADDR) {
+ i2c_edid->target_selected = true;
+ } else if (target_addr != 0) {
gvt_dbg_dpy(
- "vgpu%d: unsupported gmbus slave addr(0x%x)\n"
+ "vgpu%d: unsupported gmbus target addr(0x%x)\n"
" gmbus operations will be ignored.\n",
- vgpu->id, slave_addr);
+ vgpu->id, target_addr);
}
if (wvalue & GMBUS_CYCLE_INDEX)
i2c_edid->current_edid_read =
- gmbus1_slave_index(wvalue);
+ gmbus1_target_index(wvalue);
i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue);
switch (gmbus1_bus_cycle(wvalue)) {
@@ -523,7 +523,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
} else if (addr == EDID_ADDR) {
i2c_edid->state = I2C_AUX_CH;
i2c_edid->port = port_idx;
- i2c_edid->slave_selected = true;
+ i2c_edid->target_selected = true;
if (intel_vgpu_has_monitor_on_port(vgpu,
port_idx) &&
intel_vgpu_port_is_dp(vgpu, port_idx))
@@ -542,7 +542,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
return;
if (drm_WARN_ON(&i915->drm, msg_length != 4))
return;
- if (i2c_edid->edid_available && i2c_edid->slave_selected) {
+ if (i2c_edid->edid_available && i2c_edid->target_selected) {
unsigned char val = edid_get_byte(vgpu);
aux_data_for_write = (val << 16);
@@ -571,7 +571,7 @@ void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu)
edid->state = I2C_NOT_SPECIFIED;
edid->port = -1;
- edid->slave_selected = false;
+ edid->target_selected = false;
edid->edid_available = false;
edid->current_edid_read = 0;
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
index dfe0cbc6aad8..c3b5a55aecb3 100644
--- a/drivers/gpu/drm/i915/gvt/edid.h
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -80,7 +80,7 @@ enum gmbus_cycle_type {
* R/W Protect
* Command and Status.
* bit0 is the direction bit: 1 is read; 0 is write.
- * bit1 - bit7 is slave 7-bit address.
+ * bit1 - bit7 is target 7-bit address.
* bit16 - bit24 total byte count (ignore?)
*
* GMBUS2:
@@ -130,7 +130,7 @@ struct intel_vgpu_i2c_edid {
enum i2c_state state;
unsigned int port;
- bool slave_selected;
+ bool target_selected;
bool edid_available;
unsigned int current_edid_read;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 38830818c120..ca0fb126b02d 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -425,6 +425,18 @@ static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
.release = intel_vgpu_reg_release_opregion,
};
+static bool edid_valid(const void *edid, size_t size)
+{
+ const struct drm_edid *drm_edid;
+ bool is_valid;
+
+ drm_edid = drm_edid_alloc(edid, size);
+ is_valid = drm_edid_valid(drm_edid);
+ drm_edid_free(drm_edid);
+
+ return is_valid;
+}
+
static int handle_edid_regs(struct intel_vgpu *vgpu,
struct vfio_edid_region *region, char *buf,
size_t count, u16 offset, bool is_write)
@@ -443,11 +455,7 @@ static int handle_edid_regs(struct intel_vgpu *vgpu,
switch (offset) {
case offsetof(struct vfio_region_gfx_edid, link_state):
if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
- if (!drm_edid_block_valid(
- (u8 *)region->edid_blob,
- 0,
- true,
- NULL)) {
+ if (!edid_valid(region->edid_blob, EDID_SIZE)) {
gvt_vgpu_err("invalid EDID blob\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index d2bed466540a..908f910420c2 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -86,7 +86,7 @@ struct efp_child_device_config {
u8 skip2;
u8 dvo_port;
u8 i2c_pin; /* for add-in card */
- u8 slave_addr; /* for add-in card */
+ u8 target_addr; /* for add-in card */
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_config;
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 020f1aa28322..63874d385c6f 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -227,7 +227,7 @@ TRACE_EVENT(oos_sync,
#define GVT_CMD_STR_LEN 40
TRACE_EVENT(gvt_command,
TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va,
- u32 cmd_len, u32 buf_type, u32 buf_addr_type,
+ u32 cmd_len, u32 buf_type, u32 buf_addr_type,
void *workload, const char *cmd_name),
TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type,
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index bc717cf544e4..f969f585d07b 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -66,6 +66,7 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
static int i915_capabilities(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_display *display = &i915->display;
struct drm_printer p = drm_seq_file_printer(m);
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
@@ -77,7 +78,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
kernel_param_lock(THIS_MODULE);
i915_params_dump(&i915->params, &p);
- intel_display_params_dump(i915, &p);
+ intel_display_params_dump(display, &p);
kernel_param_unlock(THIS_MODULE);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index fb8e9c2fcea5..a40f05b993da 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -49,7 +49,7 @@
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
#include "display/intel_display_driver.h"
-#include "display/intel_display_types.h"
+#include "display/intel_display.h"
#include "display/intel_dmc.h"
#include "display/intel_dp.h"
#include "display/intel_dpt.h"
@@ -58,10 +58,8 @@
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pch_refclk.h"
-#include "display/intel_pipe_crc.h"
#include "display/intel_pps.h"
#include "display/intel_sprite.h"
-#include "display/intel_vga.h"
#include "display/skl_watermark.h"
#include "gem/i915_gem_context.h"
@@ -442,6 +440,7 @@ static int i915_pcode_init(struct drm_i915_private *i915)
*/
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
@@ -451,8 +450,8 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
!intel_vgpu_has_full_ppgtt(dev_priv)) {
- i915_report_error(dev_priv,
- "incompatible vGPU found, support for isolated ppGTT required\n");
+ drm_err(&dev_priv->drm,
+ "incompatible vGPU found, support for isolated ppGTT required\n");
return -ENXIO;
}
}
@@ -465,8 +464,8 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
*/
if (intel_vgpu_active(dev_priv) &&
!intel_vgpu_has_hwsp_emulation(dev_priv)) {
- i915_report_error(dev_priv,
- "old vGPU host found, support for HWSP emulation required\n");
+ drm_err(&dev_priv->drm,
+ "old vGPU host found, support for HWSP emulation required\n");
return -ENXIO;
}
}
@@ -542,7 +541,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_msi;
- intel_opregion_setup(dev_priv);
+ intel_opregion_setup(display);
ret = i915_pcode_init(dev_priv);
if (ret)
@@ -559,7 +558,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
return 0;
err_opregion:
- intel_opregion_cleanup(dev_priv);
+ intel_opregion_cleanup(display);
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
@@ -580,11 +579,12 @@ err_perf:
*/
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
i915_perf_fini(dev_priv);
- intel_opregion_cleanup(dev_priv);
+ intel_opregion_cleanup(display);
if (pdev->msi_enabled)
pci_disable_msi(pdev);
@@ -723,7 +723,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(i915))
return i915;
- pci_set_drvdata(pdev, i915);
+ pci_set_drvdata(pdev, &i915->drm);
/* Device parameters start as a copy of module parameters. */
i915_params_copy(&i915->params, &i915_modparams);
@@ -1014,6 +1014,7 @@ static int i915_drm_prepare(struct drm_device *dev)
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
pci_power_t opregion_target_state;
@@ -1049,7 +1050,7 @@ static int i915_drm_suspend(struct drm_device *dev)
i915_save_display(dev_priv);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
- intel_opregion_suspend(dev_priv, opregion_target_state);
+ intel_opregion_suspend(display, opregion_target_state);
dev_priv->suspend_count++;
@@ -1138,6 +1139,7 @@ int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
int ret, i;
@@ -1165,7 +1167,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_dmc_resume(dev_priv);
i915_restore_display(dev_priv);
- intel_pps_unlock_regs_wa(dev_priv);
+ intel_pps_unlock_regs_wa(display);
intel_init_pch_refclk(dev_priv);
@@ -1205,7 +1207,7 @@ static int i915_drm_resume(struct drm_device *dev)
}
intel_hpd_poll_disable(dev_priv);
- intel_opregion_resume(dev_priv);
+ intel_opregion_resume(display);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
@@ -1454,6 +1456,7 @@ static int i915_pm_restore(struct device *kdev)
static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct intel_display *display = &dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
@@ -1528,7 +1531,7 @@ static int intel_runtime_suspend(struct device *kdev)
* won't be able to restore them. Since PCI_D3hot matches the
* actual specification and appears to be working, use it.
*/
- intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
+ intel_opregion_notify_adapter(display, PCI_D3hot);
} else {
/*
* current versions of firmware which depend on this opregion
@@ -1537,7 +1540,7 @@ static int intel_runtime_suspend(struct device *kdev)
* to distinguish it from notifications that might be sent via
* the suspend path.
*/
- intel_opregion_notify_adapter(dev_priv, PCI_D1);
+ intel_opregion_notify_adapter(display, PCI_D1);
}
assert_forcewakes_inactive(&dev_priv->uncore);
@@ -1552,6 +1555,7 @@ static int intel_runtime_suspend(struct device *kdev)
static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct intel_display *display = &dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
@@ -1566,7 +1570,7 @@ static int intel_runtime_resume(struct device *kdev)
drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
disable_rpm_wakeref_asserts(rpm);
- intel_opregion_notify_adapter(dev_priv, PCI_D0);
+ intel_opregion_notify_adapter(display, PCI_D0);
root_pdev = pcie_find_root_port(pdev);
if (root_pdev)
@@ -1671,6 +1675,7 @@ static const struct file_operations i915_driver_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = drm_show_fdinfo,
#endif
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
static int
@@ -1693,9 +1698,9 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 110340e02a02..39f6614a0a99 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -365,12 +365,16 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
{
- return dev_get_drvdata(kdev);
+ struct drm_device *drm = dev_get_drvdata(kdev);
+
+ return drm ? to_i915(drm) : NULL;
}
static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
{
- return pci_get_drvdata(pdev);
+ struct drm_device *drm = pci_get_drvdata(pdev);
+
+ return drm ? to_i915(drm) : NULL;
}
static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
@@ -408,14 +412,8 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
#define INTEL_REVID(i915) (to_pci_dev((i915)->drm.dev)->revision)
-#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
-#define INTEL_BASEDIE_STEP(__i915) (RUNTIME_INFO(__i915)->step.basedie_step)
-
-#define IS_DISPLAY_STEP(__i915, since, until) \
- (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
- INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
#define IS_GRAPHICS_STEP(__i915, since, until) \
(drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \
@@ -425,10 +423,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
(drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \
INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until))
-#define IS_BASEDIE_STEP(__i915, since, until) \
- (drm_WARN_ON(&(__i915)->drm, INTEL_BASEDIE_STEP(__i915) == STEP_NONE), \
- INTEL_BASEDIE_STEP(__i915) >= (since) && INTEL_BASEDIE_STEP(__i915) < (until))
-
static __always_inline unsigned int
__platform_mask_index(const struct intel_runtime_info *info,
enum intel_platform p)
@@ -680,9 +674,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
((sizes) & ~RUNTIME_INFO(i915)->page_sizes) == 0; \
})
-/* Early gen2 have a totally busted CS tlb and require pinned batches. */
-#define HAS_BROKEN_CS_TLB(i915) (IS_I830(i915) || IS_I845G(i915))
-
#define NEEDS_RC6_CTX_CORRUPTION_WA(i915) \
(IS_BROADWELL(i915) || GRAPHICS_VER(i915) == 9)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1391c01d7663..070ab6546987 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -39,8 +39,6 @@
#include <drm/drm_cache.h>
#include <drm/drm_vma_manager.h>
-#include "display/intel_display.h"
-
#include "gem/i915_gem_clflush.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7bd1861ddbdf..a9662cc6ed1e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -15,7 +15,6 @@
#include <asm/set_memory.h>
#include <asm/smp.h>
-#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 96c6cafd5b9e..6469b9bcf2ec 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -660,9 +660,10 @@ static void err_print_params(struct drm_i915_error_state_buf *m,
const struct i915_params *params)
{
struct drm_printer p = i915_error_printer(m);
+ struct intel_display *display = &m->i915->display;
i915_params_dump(params, &p);
- intel_display_params_dump(m->i915, &p);
+ intel_display_params_dump(display, &p);
}
static void err_print_pciid(struct drm_i915_error_state_buf *m,
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 49db3e09826c..17d30f6b84b0 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -5,6 +5,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
#include <linux/types.h>
#include "i915_drv.h"
@@ -36,6 +37,7 @@ struct hwm_reg {
i915_reg_t pkg_rapl_limit;
i915_reg_t energy_status_all;
i915_reg_t energy_status_tile;
+ i915_reg_t fan_speed;
};
struct hwm_energy_info {
@@ -43,11 +45,17 @@ struct hwm_energy_info {
long accum_energy; /* Accumulated energy for energy1_input */
};
+struct hwm_fan_info {
+ u32 reg_val_prev;
+ u64 time_prev;
+};
+
struct hwm_drvdata {
struct i915_hwmon *hwmon;
struct intel_uncore *uncore;
struct device *hwmon_dev;
struct hwm_energy_info ei; /* Energy info for energy1_input */
+ struct hwm_fan_info fi; /* Fan info for fan1_input */
char name[12];
int gt_n;
bool reset_in_progress;
@@ -276,6 +284,7 @@ static const struct hwmon_channel_info * const hwm_info[] = {
HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
NULL
};
@@ -614,6 +623,69 @@ hwm_curr_write(struct hwm_drvdata *ddat, u32 attr, long val)
}
static umode_t
+hwm_fan_is_visible(const struct hwm_drvdata *ddat, u32 attr)
+{
+ struct i915_hwmon *hwmon = ddat->hwmon;
+
+ if (attr == hwmon_fan_input && i915_mmio_reg_valid(hwmon->rg.fan_speed))
+ return 0444;
+
+ return 0;
+}
+
+static int
+hwm_fan_input_read(struct hwm_drvdata *ddat, long *val)
+{
+ struct i915_hwmon *hwmon = ddat->hwmon;
+ struct hwm_fan_info *fi = &ddat->fi;
+ u64 rotations, time_now, time;
+ intel_wakeref_t wakeref;
+ u32 reg_val;
+ int ret = 0;
+
+ wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
+ mutex_lock(&hwmon->hwmon_lock);
+
+ reg_val = intel_uncore_read(ddat->uncore, hwmon->rg.fan_speed);
+ time_now = get_jiffies_64();
+
+ /*
+ * HW register value is accumulated count of pulses from
+ * PWM fan with the scale of 2 pulses per rotation.
+ */
+ rotations = (reg_val - fi->reg_val_prev) / 2;
+
+ time = jiffies_delta_to_msecs(time_now - fi->time_prev);
+ if (unlikely(!time)) {
+ ret = -EAGAIN;
+ goto exit;
+ }
+
+ /*
+ * Calculate fan speed in RPM by time averaging two subsequent
+ * readings in minutes.
+ * RPM = number of rotations * msecs per minute / time in msecs
+ */
+ *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time);
+
+ fi->reg_val_prev = reg_val;
+ fi->time_prev = time_now;
+exit:
+ mutex_unlock(&hwmon->hwmon_lock);
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
+ return ret;
+}
+
+static int
+hwm_fan_read(struct hwm_drvdata *ddat, u32 attr, long *val)
+{
+ if (attr == hwmon_fan_input)
+ return hwm_fan_input_read(ddat, val);
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t
hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -628,6 +700,8 @@ hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
return hwm_energy_is_visible(ddat, attr);
case hwmon_curr:
return hwm_curr_is_visible(ddat, attr);
+ case hwmon_fan:
+ return hwm_fan_is_visible(ddat, attr);
default:
return 0;
}
@@ -648,6 +722,8 @@ hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
return hwm_energy_read(ddat, attr, val);
case hwmon_curr:
return hwm_curr_read(ddat, attr, val);
+ case hwmon_fan:
+ return hwm_fan_read(ddat, attr, val);
default:
return -EOPNOTSUPP;
}
@@ -739,12 +815,14 @@ hwm_get_preregistration_info(struct drm_i915_private *i915)
hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT;
hwmon->rg.energy_status_all = PCU_PACKAGE_ENERGY_STATUS;
hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
+ hwmon->rg.fan_speed = PCU_PWM_FAN_SPEED;
} else {
hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG;
hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
hwmon->rg.pkg_rapl_limit = INVALID_MMIO_REG;
hwmon->rg.energy_status_all = INVALID_MMIO_REG;
hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
+ hwmon->rg.fan_speed = INVALID_MMIO_REG;
}
with_intel_runtime_pm(uncore->rpm, wakeref) {
@@ -755,6 +833,16 @@ hwm_get_preregistration_info(struct drm_i915_private *i915)
if (i915_mmio_reg_valid(hwmon->rg.pkg_power_sku_unit))
val_sku_unit = intel_uncore_read(uncore,
hwmon->rg.pkg_power_sku_unit);
+
+ /*
+ * Store the initial fan register value, so that we can use it for
+ * initial fan speed calculation.
+ */
+ if (i915_mmio_reg_valid(hwmon->rg.fan_speed)) {
+ ddat->fi.reg_val_prev = intel_uncore_read(uncore,
+ hwmon->rg.fan_speed);
+ ddat->fi.time_prev = get_jiffies_64();
+ }
}
hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8059ac7e15fe..2321de48d169 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -34,7 +34,6 @@
#include <drm/drm_drv.h>
#include "display/intel_display_irq.h"
-#include "display/intel_display_types.h"
#include "display/intel_hotplug.h"
#include "display/intel_hotplug_irq.h"
#include "display/intel_lpe_audio.h"
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 7998bc74ab49..f5c97a620962 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -122,13 +122,15 @@ int remap_io_mapping(struct vm_area_struct *vma,
* @addr: target user address to start at
* @size: size of map area
* @sgl: Start sg entry
+ * @offset: offset from the start of the page
* @iobase: Use stored dma address offset by this address or pfn if -1
*
* Note: this is only safe if the mm semaphore is held when called.
*/
int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
- struct scatterlist *sgl, resource_size_t iobase)
+ struct scatterlist *sgl, unsigned long offset,
+ resource_size_t iobase)
{
struct remap_pfn r = {
.mm = vma->vm_mm,
@@ -141,6 +143,14 @@ int remap_io_sg(struct vm_area_struct *vma,
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+ while (offset >= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT) {
+ offset -= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT;
+ r.sgt = __sgt_iter(__sg_next(r.sgt.sgp), use_dma(iobase));
+ if (!r.sgt.sgp)
+ return -EINVAL;
+ }
+ r.sgt.curr = offset << PAGE_SHIFT;
+
if (!use_dma(iobase))
flush_cache_range(vma, addr, size);
diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h
index 04c8974d822b..69f9351b1a1c 100644
--- a/drivers/gpu/drm/i915/i915_mm.h
+++ b/drivers/gpu/drm/i915/i915_mm.h
@@ -30,6 +30,7 @@ int remap_io_mapping(struct vm_area_struct *vma,
int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
- struct scatterlist *sgl, resource_size_t iobase);
+ struct scatterlist *sgl, unsigned long offset,
+ resource_size_t iobase);
#endif /* __I915_MM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index 65acd7bf75d0..7ed6d70389af 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -30,15 +30,20 @@ static int i915_check_nomodeset(void)
*/
if (i915_modparams.modeset == 0)
+ pr_warn("i915.modeset=0 is deprecated. Please use the 'nomodeset' kernel parameter instead.\n");
+ else if (i915_modparams.modeset != -1)
+ pr_warn("i915.modeset=%d is deprecated. Please remove it and the 'nomodeset' kernel parameter instead.\n",
+ i915_modparams.modeset);
+
+ if (i915_modparams.modeset == 0)
use_kms = false;
if (drm_firmware_drivers_only() && i915_modparams.modeset == -1)
use_kms = false;
if (!use_kms) {
- /* Silently fail loading to not upset userspace. */
DRM_DEBUG_DRIVER("KMS disabled.\n");
- return 1;
+ return -ENODEV;
}
return 0;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 316e55f3e87b..37746dd619fd 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -64,8 +64,7 @@ struct i915_params i915_modparams __read_mostly = {
*/
i915_param_named(modeset, int, 0400,
- "Use kernel modesetting [KMS] (0=disable, "
- "1=on, -1=force vga console preference [default])");
+ "Deprecated. Use the 'nomodeset' kernel parameter instead.");
i915_param_named_unsafe(reset, uint, 0400,
"Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index ce4dfd65fafa..d37bb3a704d0 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,7 +26,6 @@
#include <drm/drm_drv.h>
#include <drm/intel/i915_pciids.h>
-#include "display/intel_display.h"
#include "display/intel_display_driver.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_sa_media.h"
@@ -880,7 +879,7 @@ static void i915_pci_remove(struct pci_dev *pdev)
{
struct drm_i915_private *i915;
- i915 = pci_get_drvdata(pdev);
+ i915 = pdev_to_i915(pdev);
if (!i915) /* driver load aborted, nothing to cleanup */
return;
@@ -1003,7 +1002,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (i915_inject_probe_failure(pci_get_drvdata(pdev))) {
+ if (i915_inject_probe_failure(pdev_to_i915(pdev))) {
i915_pci_remove(pdev);
return -ENODEV;
}
@@ -1025,7 +1024,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static void i915_pci_shutdown(struct pci_dev *pdev)
{
- struct drm_i915_private *i915 = pci_get_drvdata(pdev);
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
i915_driver_shutdown(i915);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0e3d79227e3c..41f4350a7c6c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2396,6 +2396,7 @@
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
+#define RM_TIMEOUT_REG_CAPTURE _MMIO(0x420E0)
#define MMIO_TIMEOUT_US(us) ((us) << 0)
/* interrupts */
@@ -2515,6 +2516,10 @@
#define GEN11_PIPE_PLANE7_FLIP_DONE REG_BIT(18) /* icl/tgl */
#define GEN11_PIPE_PLANE6_FLIP_DONE REG_BIT(17) /* icl/tgl */
#define GEN11_PIPE_PLANE5_FLIP_DONE REG_BIT(16) /* icl+ */
+#define GEN12_DSB_2_INT REG_BIT(15) /* tgl+ */
+#define GEN12_DSB_1_INT REG_BIT(14) /* tgl+ */
+#define GEN12_DSB_0_INT REG_BIT(13) /* tgl+ */
+#define GEN12_DSB_INT(dsb_id) REG_BIT(13 + (dsb_id))
#define GEN9_PIPE_CURSOR_FAULT REG_BIT(11) /* skl+ */
#define GEN9_PIPE_PLANE4_FAULT REG_BIT(10) /* skl+ */
#define GEN8_PIPE_CURSOR_FAULT REG_BIT(10) /* bdw */
@@ -2574,6 +2579,7 @@
#define GEN8_DE_MISC_IMR _MMIO(0x44464)
#define GEN8_DE_MISC_IIR _MMIO(0x44468)
#define GEN8_DE_MISC_IER _MMIO(0x4446c)
+#define XELPDP_RM_TIMEOUT REG_BIT(29)
#define XELPDP_PMDEMAND_RSPTOUT_ERR REG_BIT(27)
#define GEN8_DE_MISC_GSE REG_BIT(27)
#define GEN8_DE_EDP_PSR REG_BIT(19)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 613decd47760..8775beab9cb8 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -191,8 +191,8 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
i915_gpu_error_sysfs_teardown(dev_priv);
- device_remove_bin_file(kdev, &dpf_attrs_1);
- device_remove_bin_file(kdev, &dpf_attrs);
+ device_remove_bin_file(kdev, &dpf_attrs_1);
+ device_remove_bin_file(kdev, &dpf_attrs);
kobject_put(dev_priv->sysfs_gt);
}
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 6f9e7b354b54..2576f8f6c0f6 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -11,51 +11,10 @@
#include "i915_reg.h"
#include "i915_utils.h"
-#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
-
-void
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
- const char *fmt, ...)
-{
- static bool shown_bug_once;
- struct device *kdev = dev_priv->drm.dev;
- bool is_error = level[1] <= KERN_ERR[1];
- bool is_debug = level[1] == KERN_DEBUG[1];
- struct va_format vaf;
- va_list args;
-
- if (is_debug && !drm_debug_enabled(DRM_UT_DRIVER))
- return;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- if (is_error)
- dev_printk(level, kdev, "%pV", &vaf);
- else
- dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
- __builtin_return_address(0), &vaf);
-
- va_end(args);
-
- if (is_error && !shown_bug_once) {
- /*
- * Ask the user to file a bug report for the error, except
- * if they may have caused the bug by fiddling with unsafe
- * module parameters.
- */
- if (!test_taint(TAINT_USER))
- dev_notice(kdev, "%s", FDO_BUG_MSG);
- shown_bug_once = true;
- }
-}
-
void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint)
{
- __i915_printk(i915, KERN_NOTICE, "CI tainted:%#x by %pS\n",
- taint, (void *)_RET_IP_);
+ drm_notice(&i915->drm, "CI tainted: %#x by %pS\n",
+ taint, __builtin_return_address(0));
/* Failures that occur during fault injection testing are expected */
if (!i915_error_injected())
@@ -74,9 +33,9 @@ int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
if (++i915_probe_fail_count < i915_modparams.inject_probe_failure)
return 0;
- __i915_printk(i915, KERN_INFO,
- "Injecting failure %d at checkpoint %u [%s:%d]\n",
- err, i915_modparams.inject_probe_failure, func, line);
+ drm_info(&i915->drm, "Injecting failure %d at checkpoint %u [%s:%d]\n",
+ err, i915_modparams.inject_probe_failure, func, line);
+
i915_modparams.inject_probe_failure = 0;
return err;
}
@@ -110,7 +69,7 @@ void set_timer_ms(struct timer_list *t, unsigned long timeout)
* Paranoia to make sure the compiler computes the timeout before
* loading 'jiffies' as jiffies is volatile and may be updated in
* the background by a timer tick. All to reduce the complexity
- * of the addition and reduce the risk of losing a jiffie.
+ * of the addition and reduce the risk of losing a jiffy.
*/
barrier();
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 06ec6ceb61d5..71bdc89bd621 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -45,13 +45,6 @@ struct timer_list;
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
-void __printf(3, 4)
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
- const char *fmt, ...);
-
-#define i915_report_error(dev_priv, fmt, ...) \
- __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
-
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
@@ -69,9 +62,12 @@ bool i915_error_injected(void);
#define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV)
-#define i915_probe_error(i915, fmt, ...) \
- __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
- fmt, ##__VA_ARGS__)
+#define i915_probe_error(i915, fmt, ...) ({ \
+ if (i915_error_injected()) \
+ drm_dbg(&(i915)->drm, fmt, ##__VA_ARGS__); \
+ else \
+ drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \
+})
#define range_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index e356dfb883d3..6a6be8048aa8 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -389,7 +389,6 @@ void i915_vma_unpin_iomap(struct i915_vma *vma);
* i915_vma_unpin_fence().
*
* Returns:
- *
* True if the vma has a fence, false otherwise.
*/
int __must_check i915_vma_pin_fence(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index eede5417cb3f..3c47c625993e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -108,8 +108,6 @@ void intel_device_info_print(const struct intel_device_info *info,
drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step));
drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step));
- drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step.display_step));
- drm_printf(p, "base die stepping: %s\n", intel_step_name(runtime->step.basedie_step));
drm_printf(p, "gt: %d\n", info->gt);
drm_printf(p, "memory-regions: 0x%x\n", info->memory_regions);
@@ -124,7 +122,6 @@ void intel_device_info_print(const struct intel_device_info *info,
#undef PRINT_FLAG
drm_printf(p, "has_pooled_eu: %s\n", str_yes_no(runtime->has_pooled_eu));
- drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
}
#define ID(id) (id)
@@ -377,10 +374,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
"Disabling ppGTT for VT-d support\n");
runtime->ppgtt_type = INTEL_PPGTT_NONE;
}
-
- runtime->rawclk_freq = intel_read_rawclk(dev_priv);
- drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
-
}
/*
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index df73ef94615d..643ff1bf74ee 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -207,8 +207,6 @@ struct intel_runtime_info {
u16 device_id;
- u32 rawclk_freq;
-
struct intel_step_info step;
unsigned int page_sizes; /* page sizes supported by the HW */
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index a5adfb5d8fd2..285b96fadfd5 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -23,8 +23,7 @@
* use a macro to define these to make it easier to identify the platforms
* where the two steppings can deviate.
*/
-#define COMMON_STEP(x) .graphics_step = STEP_##x, .display_step = STEP_##x, .media_step = STEP_##x
-#define COMMON_GT_MEDIA_STEP(x) .graphics_step = STEP_##x, .media_step = STEP_##x
+#define COMMON_STEP(x) .graphics_step = STEP_##x, .media_step = STEP_##x
static const struct intel_step_info skl_revids[] = {
[0x6] = { COMMON_STEP(G0) },
@@ -34,13 +33,13 @@ static const struct intel_step_info skl_revids[] = {
};
static const struct intel_step_info kbl_revids[] = {
- [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
- [2] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 },
- [3] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_B0 },
- [4] = { COMMON_GT_MEDIA_STEP(F0), .display_step = STEP_C0 },
- [5] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B1 },
- [6] = { COMMON_GT_MEDIA_STEP(D1), .display_step = STEP_B1 },
- [7] = { COMMON_GT_MEDIA_STEP(G0), .display_step = STEP_C0 },
+ [1] = { COMMON_STEP(B0) },
+ [2] = { COMMON_STEP(C0) },
+ [3] = { COMMON_STEP(D0) },
+ [4] = { COMMON_STEP(F0) },
+ [5] = { COMMON_STEP(C0) },
+ [6] = { COMMON_STEP(D1) },
+ [7] = { COMMON_STEP(G0) },
};
static const struct intel_step_info bxt_revids[] = {
@@ -64,16 +63,16 @@ static const struct intel_step_info jsl_ehl_revids[] = {
};
static const struct intel_step_info tgl_uy_revids[] = {
- [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
- [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 },
- [2] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 },
- [3] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 },
+ [0] = { COMMON_STEP(A0) },
+ [1] = { COMMON_STEP(B0) },
+ [2] = { COMMON_STEP(B1) },
+ [3] = { COMMON_STEP(C0) },
};
/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
static const struct intel_step_info tgl_revids[] = {
- [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 },
- [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_D0 },
+ [0] = { COMMON_STEP(A0) },
+ [1] = { COMMON_STEP(B0) },
};
static const struct intel_step_info rkl_revids[] = {
@@ -88,49 +87,49 @@ static const struct intel_step_info dg1_revids[] = {
};
static const struct intel_step_info adls_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
- [0x1] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A2 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 },
- [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x1] = { COMMON_STEP(A0) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x8] = { COMMON_STEP(C0) },
+ [0xC] = { COMMON_STEP(D0) },
};
static const struct intel_step_info adlp_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 },
- [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x8] = { COMMON_STEP(C0) },
+ [0xC] = { COMMON_STEP(C0) },
};
static const struct intel_step_info dg2_g10_revid_step_tbl[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
- [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_A0 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x1] = { COMMON_STEP(A1) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x8] = { COMMON_STEP(C0) },
};
static const struct intel_step_info dg2_g11_revid_step_tbl[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 },
- [0x5] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x5] = { COMMON_STEP(B1) },
};
static const struct intel_step_info dg2_g12_revid_step_tbl[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_C0 },
- [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x1] = { COMMON_STEP(A1) },
};
static const struct intel_step_info adls_rpls_revids[] = {
- [0x4] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_D0 },
- [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 },
+ [0x4] = { COMMON_STEP(D0) },
+ [0xC] = { COMMON_STEP(D0) },
};
static const struct intel_step_info adlp_rplp_revids[] = {
- [0x4] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_E0 },
+ [0x4] = { COMMON_STEP(C0) },
};
static const struct intel_step_info adlp_n_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_D0 },
+ [0x0] = { COMMON_STEP(A0) },
};
static u8 gmd_to_intel_step(struct drm_i915_private *i915,
@@ -158,11 +157,6 @@ void intel_step_init(struct drm_i915_private *i915)
&RUNTIME_INFO(i915)->graphics.ip);
step.media_step = gmd_to_intel_step(i915,
&RUNTIME_INFO(i915)->media.ip);
- step.display_step = STEP_A0 + DISPLAY_RUNTIME_INFO(i915)->ip.step;
- if (step.display_step >= STEP_FUTURE) {
- drm_dbg(&i915->drm, "Using future display steppings\n");
- step.display_step = STEP_FUTURE;
- }
RUNTIME_INFO(i915)->step = step;
@@ -252,7 +246,6 @@ void intel_step_init(struct drm_i915_private *i915)
} else {
drm_dbg(&i915->drm, "Using future steppings\n");
step.graphics_step = STEP_FUTURE;
- step.display_step = STEP_FUTURE;
}
}
@@ -275,8 +268,3 @@ const char *intel_step_name(enum intel_step step)
return "**";
}
}
-
-const char *intel_display_step_name(struct drm_i915_private *i915)
-{
- return intel_step_name(RUNTIME_INFO(i915)->step.display_step);
-}
diff --git a/drivers/gpu/drm/i915/intel_step.h b/drivers/gpu/drm/i915/intel_step.h
index b6f43b624774..22f1d6905160 100644
--- a/drivers/gpu/drm/i915/intel_step.h
+++ b/drivers/gpu/drm/i915/intel_step.h
@@ -16,9 +16,7 @@ struct intel_step_info {
* the expectation breaks gmd_to_intel_step().
*/
u8 graphics_step; /* Represents the compute tile on Xe_HPC */
- u8 display_step;
u8 media_step;
- u8 basedie_step;
};
#define STEP_ENUM_VAL(name) STEP_##name,
@@ -78,6 +76,5 @@ enum intel_step {
void intel_step_init(struct drm_i915_private *i915);
const char *intel_step_name(enum intel_step step);
-const char *intel_display_step_name(struct drm_i915_private *i915);
#endif /* __INTEL_STEP_H__ */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 2eba289d88ad..6aa179a3e92a 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -24,6 +24,7 @@
#include <drm/drm_managed.h>
#include <linux/pm_runtime.h>
+#include "gt/intel_gt.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt_regs.h"
@@ -180,14 +181,16 @@ fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
if (!wait_ack_clear(d, FORCEWAKE_KERNEL))
return;
- if (fw_ack(d) == ~0)
+ if (fw_ack(d) == ~0) {
drm_err(&d->uncore->i915->drm,
"%s: MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
intel_uncore_forcewake_domain_to_str(d->id));
- else
+ intel_gt_set_wedged_async(d->uncore->gt);
+ } else {
drm_err(&d->uncore->i915->drm,
"%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
+ }
add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index ae6070b5bf07..f08f6674911e 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -517,7 +517,7 @@ static int igt_mock_max_segment(void *arg)
if (!IS_ALIGNED(daddr, ps)) {
pr_err("%s: Created an unaligned scatterlist entry, addr=%pa, ps=%u\n",
- __func__, &daddr, ps);
+ __func__, &daddr, ps);
err = -EINVAL;
goto out_close;
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 0bd29846873b..91794ca17a58 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -172,7 +172,7 @@ struct drm_i915_private *mock_gem_device(void)
return NULL;
}
- pci_set_drvdata(pdev, i915);
+ pci_set_drvdata(pdev, &i915->drm);
/* Device parameters start as a copy of module parameters. */
i915_params_copy(&i915->params, &i915_modparams);
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index ecdd5767d8ef..b574e23d484b 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -668,7 +668,7 @@ pvr_ioctl_union_padding_check(void *instance, size_t union_offset,
void *padding_start = ((u8 *)instance) + union_offset + member_size;
size_t padding_size = union_size - member_size;
- return !memchr_inv(padding_start, 0, padding_size);
+ return mem_is_zero(padding_start, padding_size);
}
/**
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index 5ed9c98fb599..20cb46012082 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -782,7 +782,7 @@ static void pvr_queue_start(struct pvr_queue *queue)
}
}
- drm_sched_start(&queue->scheduler, true);
+ drm_sched_start(&queue->scheduler);
}
/**
@@ -842,7 +842,7 @@ pvr_queue_timedout_job(struct drm_sched_job *s_job)
}
mutex_unlock(&pvr_dev->queues.lock);
- drm_sched_start(sched, true);
+ drm_sched_start(sched);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 55dedd73f528..91d7808a2d8d 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -34,7 +34,7 @@ struct imx_parallel_display_encoder {
struct imx_parallel_display {
struct device *dev;
- void *edid;
+ const struct drm_edid *drm_edid;
u32 bus_format;
u32 bus_flags;
struct drm_display_mode mode;
@@ -62,9 +62,9 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (num_modes > 0)
return num_modes;
- if (imxpd->edid) {
- drm_connector_update_edid_property(connector, imxpd->edid);
- num_modes = drm_add_edid_modes(connector, imxpd->edid);
+ if (imxpd->drm_edid) {
+ drm_edid_connector_update(connector, imxpd->drm_edid);
+ num_modes = drm_edid_connector_add_modes(connector);
}
if (np) {
@@ -331,7 +331,7 @@ static int imx_pd_probe(struct platform_device *pdev)
edidp = of_get_property(np, "edid", &edid_len);
if (edidp)
- imxpd->edid = devm_kmemdup(dev, edidp, edid_len, GFP_KERNEL);
+ imxpd->drm_edid = drm_edid_alloc(edidp, edid_len);
ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
if (!ret) {
@@ -355,7 +355,11 @@ static int imx_pd_probe(struct platform_device *pdev)
static void imx_pd_remove(struct platform_device *pdev)
{
+ struct imx_parallel_display *imxpd = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &imx_pd_ops);
+
+ drm_edid_free(imxpd->drm_edid);
}
static const struct of_device_id imx_pd_dt_ids[] = {
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index bbf3f8feab94..1a944edb6ddc 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -463,7 +463,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
lima_pm_idle(ldev);
drm_sched_resubmit_jobs(&pipe->base);
- drm_sched_start(&pipe->base, true);
+ drm_sched_start(&pipe->base);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
diff --git a/drivers/gpu/drm/loongson/lsdc_ttm.c b/drivers/gpu/drm/loongson/lsdc_ttm.c
index 465f622ac05d..2e42c6970c9f 100644
--- a/drivers/gpu/drm/loongson/lsdc_ttm.c
+++ b/drivers/gpu/drm/loongson/lsdc_ttm.c
@@ -341,16 +341,12 @@ void lsdc_bo_unpin(struct lsdc_bo *lbo)
void lsdc_bo_ref(struct lsdc_bo *lbo)
{
- struct ttm_buffer_object *tbo = &lbo->tbo;
-
- ttm_bo_get(tbo);
+ drm_gem_object_get(&lbo->tbo.base);
}
void lsdc_bo_unref(struct lsdc_bo *lbo)
{
- struct ttm_buffer_object *tbo = &lbo->tbo;
-
- ttm_bo_put(tbo);
+ drm_gem_object_put(&lbo->tbo.base);
}
int lsdc_bo_kmap(struct lsdc_bo *lbo)
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index 6f34f573e127..175b00e5a253 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -69,6 +69,8 @@ struct mtk_crtc {
/* lock for display hardware access */
struct mutex hw_lock;
bool config_updating;
+ /* lock for config_updating to cmd buffer */
+ spinlock_t config_lock;
};
struct mtk_crtc_state {
@@ -106,51 +108,18 @@ static void mtk_crtc_finish_page_flip(struct mtk_crtc *mtk_crtc)
static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc)
{
+ unsigned long flags;
+
drm_crtc_handle_vblank(&mtk_crtc->base);
+
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
mtk_crtc_finish_page_flip(mtk_crtc);
mtk_crtc->pending_needs_vblank = false;
}
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
}
-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
-static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
- size_t size)
-{
- struct device *dev;
- dma_addr_t dma_addr;
-
- pkt->va_base = kzalloc(size, GFP_KERNEL);
- if (!pkt->va_base)
- return -ENOMEM;
-
- pkt->buf_size = size;
- pkt->cl = (void *)client;
-
- dev = client->chan->mbox->dev;
- dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
- kfree(pkt->va_base);
- return -ENOMEM;
- }
-
- pkt->pa_base = dma_addr;
-
- return 0;
-}
-
-static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
-{
- struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
-
- dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
- DMA_TO_DEVICE);
- kfree(pkt->va_base);
-}
-#endif
-
static void mtk_crtc_destroy(struct drm_crtc *crtc)
{
struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
@@ -158,7 +127,7 @@ static void mtk_crtc_destroy(struct drm_crtc *crtc)
mtk_mutex_put(mtk_crtc->mutex);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle);
+ cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle);
if (mtk_crtc->cmdq_client.chan) {
mbox_free_channel(mtk_crtc->cmdq_client.chan);
@@ -308,12 +277,19 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
struct mtk_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_crtc, cmdq_client);
struct mtk_crtc_state *state;
unsigned int i;
+ unsigned long flags;
if (data->sta < 0)
return;
state = to_mtk_crtc_state(mtk_crtc->base.state);
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+ if (mtk_crtc->config_updating) {
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+ goto ddp_cmdq_cb_out;
+ }
+
state->pending_config = false;
if (mtk_crtc->pending_planes) {
@@ -340,6 +316,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
mtk_crtc->pending_async_planes = false;
}
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
+ddp_cmdq_cb_out:
+
mtk_crtc->cmdq_vblank_cnt = 0;
wake_up(&mtk_crtc->cb_blocking_queue);
}
@@ -449,6 +429,7 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc)
{
struct drm_device *drm = mtk_crtc->base.dev;
struct drm_crtc *crtc = &mtk_crtc->base;
+ unsigned long flags;
int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
@@ -480,10 +461,10 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc)
pm_runtime_put(drm->dev);
if (crtc->state->event && !crtc->state->active) {
- spin_lock_irq(&crtc->dev->event_lock);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
}
@@ -569,9 +550,14 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
struct mtk_drm_private *priv = crtc->dev->dev_private;
unsigned int pending_planes = 0, pending_async_planes = 0;
int i;
+ unsigned long flags;
mutex_lock(&mtk_crtc->hw_lock);
+
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
mtk_crtc->config_updating = true;
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
if (needs_vblank)
mtk_crtc->pending_needs_vblank = true;
@@ -607,7 +593,7 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle);
- cmdq_pkt_finalize(cmdq_handle);
+ cmdq_pkt_eoc(cmdq_handle);
dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev,
cmdq_handle->pa_base,
cmdq_handle->cmd_buf_size,
@@ -625,7 +611,10 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
}
#endif
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
mtk_crtc->config_updating = false;
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
mutex_unlock(&mtk_crtc->hw_lock);
}
@@ -925,7 +914,7 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes),
mtk_ddp_comp_supported_rotations(comp),
mtk_ddp_comp_get_formats(comp),
- mtk_ddp_comp_get_num_formats(comp));
+ mtk_ddp_comp_get_num_formats(comp), i);
if (ret)
return ret;
@@ -1068,6 +1057,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
mutex_init(&mtk_crtc->hw_lock);
+ spin_lock_init(&mtk_crtc->config_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
i = priv->mbox_index++;
@@ -1094,9 +1084,9 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
mbox_free_channel(mtk_crtc->cmdq_client.chan);
mtk_crtc->cmdq_client.chan = NULL;
} else {
- ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client,
- &mtk_crtc->cmdq_handle,
- PAGE_SIZE);
+ ret = cmdq_pkt_create(&mtk_crtc->cmdq_client,
+ &mtk_crtc->cmdq_handle,
+ PAGE_SIZE);
if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
drm_crtc_index(&mtk_crtc->base));
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 9d6d9fd8342e..89b439dcf3a6 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -56,8 +56,12 @@
#define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4)
#define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8)
+#define OVL_CON_CLRFMT_MAN BIT(23)
#define OVL_CON_BYTE_SWAP BIT(24)
-#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
+
+/* OVL_CON_RGB_SWAP works only if OVL_CON_CLRFMT_MAN is enabled */
+#define OVL_CON_RGB_SWAP BIT(25)
+
#define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (2 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (3 << 12)
@@ -65,6 +69,11 @@
#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
#define OVL_CON_CLRFMT_UYVY (4 << 12)
#define OVL_CON_CLRFMT_YUYV (5 << 12)
+#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
+#define OVL_CON_CLRFMT_PARGB8888 ((3 << 12) | OVL_CON_CLRFMT_MAN)
+#define OVL_CON_CLRFMT_PABGR8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_RGB_SWAP)
+#define OVL_CON_CLRFMT_PBGRA8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_BYTE_SWAP)
+#define OVL_CON_CLRFMT_PRGBA8888 (OVL_CON_CLRFMT_PABGR8888 | OVL_CON_BYTE_SWAP)
#define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
0 : OVL_CON_CLRFMT_RGB)
#define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
@@ -377,7 +386,8 @@ void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
DISP_REG_OVL_RDMA_CTRL(idx));
}
-static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
+static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt,
+ unsigned int blend_mode)
{
/* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
* is defined in mediatek HW data sheet.
@@ -398,22 +408,30 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_RGBA1010102:
- return OVL_CON_CLRFMT_RGBA8888;
+ return blend_mode == DRM_MODE_BLEND_COVERAGE ?
+ OVL_CON_CLRFMT_RGBA8888 :
+ OVL_CON_CLRFMT_PRGBA8888;
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_BGRA1010102:
- return OVL_CON_CLRFMT_BGRA8888;
+ return blend_mode == DRM_MODE_BLEND_COVERAGE ?
+ OVL_CON_CLRFMT_BGRA8888 :
+ OVL_CON_CLRFMT_PBGRA8888;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- return OVL_CON_CLRFMT_ARGB8888;
+ return blend_mode == DRM_MODE_BLEND_COVERAGE ?
+ OVL_CON_CLRFMT_ARGB8888 :
+ OVL_CON_CLRFMT_PARGB8888;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
- return OVL_CON_CLRFMT_ABGR8888;
+ return blend_mode == DRM_MODE_BLEND_COVERAGE ?
+ OVL_CON_CLRFMT_ABGR8888 :
+ OVL_CON_CLRFMT_PABGR8888;
case DRM_FORMAT_UYVY:
return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB;
case DRM_FORMAT_YUYV:
@@ -434,6 +452,7 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
unsigned int fmt = pending->format;
unsigned int offset = (pending->y << 16) | pending->x;
unsigned int src_size = (pending->height << 16) | pending->width;
+ unsigned int blend_mode = state->base.pixel_blend_mode;
unsigned int ignore_pixel_alpha = 0;
unsigned int con;
bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR;
@@ -452,7 +471,7 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
return;
}
- con = ovl_fmt_convert(ovl, fmt);
+ con = ovl_fmt_convert(ovl, fmt, blend_mode);
if (state->base.fb) {
con |= OVL_CON_AEN;
con |= state->base.alpha & OVL_CON_ALPHA;
@@ -463,7 +482,8 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
* For RGB888 related formats, whether CONST_BLD is enabled or not won't
* affect the result. Therefore we use !has_alpha as the condition.
*/
- if (state->base.fb && !state->base.fb->format->has_alpha)
+ if ((state->base.fb && !state->base.fb->format->has_alpha) ||
+ blend_mode == DRM_MODE_BLEND_PIXEL_NONE)
ignore_pixel_alpha = OVL_CONST_BLEND;
if (pending->rotation & DRM_MODE_REFLECT_Y) {
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index 1a2a73757370..c6768210b08b 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -17,7 +17,6 @@
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
-#include "mtk_crtc.h"
#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
#include "mtk_drm_drv.h"
@@ -494,12 +493,12 @@ static int compare_of(struct device *dev, void *data)
static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match)
{
struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev);
- struct device_node *node, *parent;
+ struct device_node *parent;
struct platform_device *comp_pdev;
parent = dev->parent->parent->of_node->parent;
- for_each_child_of_node(parent, node) {
+ for_each_child_of_node_scoped(parent, node) {
const struct of_device_id *of_id;
enum mtk_ovl_adaptor_comp_type type;
int id;
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 634bbba5d43f..07243f372260 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -341,14 +341,11 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
- if (of_find_property(dev->of_node, "mediatek,rdma-fifo-size", &ret)) {
- ret = of_property_read_u32(dev->of_node,
- "mediatek,rdma-fifo-size",
- &priv->fifo_size);
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to get rdma fifo size\n");
- }
+ ret = of_property_read_u32(dev->of_node,
+ "mediatek,rdma-fifo-size",
+ &priv->fifo_size);
+ if (ret && (ret != -EINVAL))
+ return dev_err_probe(dev, ret, "Failed to get rdma fifo size\n");
/* Disable and clear pending interrupts */
writel(0x0, priv->regs + DISP_REG_RDMA_INT_ENABLE);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 77b50c56c124..3e807195a0d0 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -559,11 +559,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
* Configure the DMA segment size to make sure we get contiguous IOVA
* when importing PRIME buffers.
*/
- ret = dma_set_max_seg_size(dma_dev, UINT_MAX);
- if (ret) {
- dev_err(dma_dev, "Failed to set DMA segment size\n");
- goto err_component_unbind;
- }
+ dma_set_max_seg_size(dma_dev, UINT_MAX);
ret = drm_vblank_init(drm, MAX_CRTC);
if (ret < 0)
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index b6e3c011a12d..eeec641cab60 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -88,12 +88,15 @@
#define DSI_HSA_WC 0x50
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
+#define HFP_HS_VB_PS_WC GENMASK(30, 16)
+#define HFP_HS_EN BIT(31)
#define DSI_CMDQ_SIZE 0x60
#define CMDQ_SIZE 0x3f
#define CMDQ_SIZE_SEL BIT(15)
#define DSI_HSTX_CKL_WC 0x64
+#define HSTX_CKL_WC GENMASK(15, 2)
#define DSI_RX_DATA0 0x74
#define DSI_RX_DATA1 0x78
@@ -187,6 +190,7 @@ struct mtk_dsi_driver_data {
bool has_shadow_ctl;
bool has_size_ctl;
bool cmdq_long_packet_ctl;
+ bool support_per_frame_lp;
};
struct mtk_dsi {
@@ -426,7 +430,75 @@ static void mtk_dsi_ps_control(struct mtk_dsi *dsi, bool config_vact)
writel(ps_val, dsi->regs + DSI_PSCTRL);
}
-static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+static void mtk_dsi_config_vdo_timing_per_frame_lp(struct mtk_dsi *dsi)
+{
+ u32 horizontal_sync_active_byte;
+ u32 horizontal_backporch_byte;
+ u32 horizontal_frontporch_byte;
+ u32 hfp_byte_adjust, v_active_adjust;
+ u32 cklp_wc_min_adjust, cklp_wc_max_adjust;
+ u32 dsi_tmp_buf_bpp;
+ unsigned int da_hs_trail;
+ unsigned int ps_wc, hs_vb_ps_wc;
+ u32 v_active_roundup, hstx_cklp_wc;
+ u32 hstx_cklp_wc_max, hstx_cklp_wc_min;
+ struct videomode *vm = &dsi->vm;
+
+ if (dsi->format == MIPI_DSI_FMT_RGB565)
+ dsi_tmp_buf_bpp = 2;
+ else
+ dsi_tmp_buf_bpp = 3;
+
+ da_hs_trail = dsi->phy_timing.da_hs_trail;
+ ps_wc = vm->hactive * dsi_tmp_buf_bpp;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
+ horizontal_sync_active_byte =
+ vm->hsync_len * dsi_tmp_buf_bpp - 10;
+ horizontal_backporch_byte =
+ vm->hback_porch * dsi_tmp_buf_bpp - 10;
+ hfp_byte_adjust = 12;
+ v_active_adjust = 32 + horizontal_sync_active_byte;
+ cklp_wc_min_adjust = 12 + 2 + 4 + horizontal_sync_active_byte;
+ cklp_wc_max_adjust = 20 + 6 + 4 + horizontal_sync_active_byte;
+ } else {
+ horizontal_sync_active_byte = vm->hsync_len * dsi_tmp_buf_bpp - 4;
+ horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
+ dsi_tmp_buf_bpp - 10;
+ cklp_wc_min_adjust = 4;
+ cklp_wc_max_adjust = 12 + 4 + 4;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ hfp_byte_adjust = 18;
+ v_active_adjust = 28;
+ } else {
+ hfp_byte_adjust = 12;
+ v_active_adjust = 22;
+ }
+ }
+ horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp - hfp_byte_adjust;
+ v_active_roundup = (v_active_adjust + horizontal_backporch_byte + ps_wc +
+ horizontal_frontporch_byte) % dsi->lanes;
+ if (v_active_roundup)
+ horizontal_backporch_byte += dsi->lanes - v_active_roundup;
+ hstx_cklp_wc_min = (DIV_ROUND_UP(cklp_wc_min_adjust, dsi->lanes) + da_hs_trail + 1)
+ * dsi->lanes / 6 - 1;
+ hstx_cklp_wc_max = (DIV_ROUND_UP((cklp_wc_max_adjust + horizontal_backporch_byte +
+ ps_wc), dsi->lanes) + da_hs_trail + 1) * dsi->lanes / 6 - 1;
+
+ hstx_cklp_wc = FIELD_PREP(HSTX_CKL_WC, (hstx_cklp_wc_min + hstx_cklp_wc_max) / 2);
+ writel(hstx_cklp_wc, dsi->regs + DSI_HSTX_CKL_WC);
+
+ hs_vb_ps_wc = ps_wc - (dsi->phy_timing.lpx + dsi->phy_timing.da_hs_exit +
+ dsi->phy_timing.da_hs_prepare + dsi->phy_timing.da_hs_zero + 2) * dsi->lanes;
+ horizontal_frontporch_byte |= FIELD_PREP(HFP_HS_EN, 1) |
+ FIELD_PREP(HFP_HS_VB_PS_WC, hs_vb_ps_wc);
+
+ writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
+ writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
+ writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
+}
+
+static void mtk_dsi_config_vdo_timing_per_line_lp(struct mtk_dsi *dsi)
{
u32 horizontal_sync_active_byte;
u32 horizontal_backporch_byte;
@@ -436,7 +508,6 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
u32 dsi_tmp_buf_bpp, data_phy_cycles;
u32 delta;
struct mtk_phy_timing *timing = &dsi->phy_timing;
-
struct videomode *vm = &dsi->vm;
if (dsi->format == MIPI_DSI_FMT_RGB565)
@@ -444,16 +515,6 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
else
dsi_tmp_buf_bpp = 3;
- writel(vm->vsync_len, dsi->regs + DSI_VSA_NL);
- writel(vm->vback_porch, dsi->regs + DSI_VBP_NL);
- writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
- writel(vm->vactive, dsi->regs + DSI_VACT_NL);
-
- if (dsi->driver_data->has_size_ctl)
- writel(FIELD_PREP(DSI_HEIGHT, vm->vactive) |
- FIELD_PREP(DSI_WIDTH, vm->hactive),
- dsi->regs + DSI_SIZE_CON);
-
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
@@ -499,6 +560,26 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
+}
+
+static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+{
+ struct videomode *vm = &dsi->vm;
+
+ writel(vm->vsync_len, dsi->regs + DSI_VSA_NL);
+ writel(vm->vback_porch, dsi->regs + DSI_VBP_NL);
+ writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
+ writel(vm->vactive, dsi->regs + DSI_VACT_NL);
+
+ if (dsi->driver_data->has_size_ctl)
+ writel(FIELD_PREP(DSI_HEIGHT, vm->vactive) |
+ FIELD_PREP(DSI_WIDTH, vm->hactive),
+ dsi->regs + DSI_SIZE_CON);
+
+ if (dsi->driver_data->support_per_frame_lp)
+ mtk_dsi_config_vdo_timing_per_frame_lp(dsi);
+ else
+ mtk_dsi_config_vdo_timing_per_line_lp(dsi);
mtk_dsi_ps_control(dsi, false);
}
@@ -1197,6 +1278,7 @@ static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = {
.has_shadow_ctl = true,
.has_size_ctl = true,
.cmdq_long_packet_ctl = true,
+ .support_per_frame_lp = true,
};
static const struct of_device_id mtk_dsi_of_match[] = {
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c
index 9dfd13d32dfa..d1d9cf8b10e1 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.c
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c
@@ -3,6 +3,7 @@
* Copyright (c) 2021 MediaTek Inc.
*/
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <linux/clk.h>
@@ -35,6 +36,7 @@
#define MIX_SRC_L0_EN BIT(0)
#define MIX_L_SRC_CON(n) (0x28 + 0x18 * (n))
#define NON_PREMULTI_SOURCE (2 << 12)
+#define PREMULTI_SOURCE (3 << 12)
#define MIX_L_SRC_SIZE(n) (0x30 + 0x18 * (n))
#define MIX_L_SRC_OFFSET(n) (0x34 + 0x18 * (n))
#define MIX_FUNC_DCM0 0x120
@@ -175,7 +177,13 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
alpha_con |= state->base.alpha & MIXER_ALPHA;
}
- if (state->base.fb && !state->base.fb->format->has_alpha) {
+ if (state->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ alpha_con |= PREMULTI_SOURCE;
+ else
+ alpha_con |= NON_PREMULTI_SOURCE;
+
+ if ((state->base.fb && !state->base.fb->format->has_alpha) ||
+ state->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) {
/*
* Mixer doesn't support CONST_BLD mode,
* use a trick to make the output equivalent
@@ -191,8 +199,7 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
mtk_ddp_write(cmdq_pkt, pending->height << 16 | align_width, &mixer->cmdq_base,
mixer->regs, MIX_L_SRC_SIZE(idx));
mtk_ddp_write(cmdq_pkt, offset, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_OFFSET(idx));
- mtk_ddp_write_mask(cmdq_pkt, alpha_con, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_CON(idx),
- 0x1ff);
+ mtk_ddp_write(cmdq_pkt, alpha_con, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_CON(idx));
mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &mixer->cmdq_base, mixer->regs, MIX_SRC_CON,
BIT(idx));
}
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 1723d4333f37..7d2cb4e0fafa 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -321,7 +321,7 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 *formats,
- size_t num_formats)
+ size_t num_formats, unsigned int plane_idx)
{
int err;
@@ -338,6 +338,22 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
return err;
}
+ /*
+ * The hardware does not support repositioning planes by muxing: their
+ * Z-position is infact fixed and the only way to change the actual
+ * order is to swap the contents of the entire register set of one
+ * overlay with another, which may be more expensive than desired.
+ *
+ * With no repositioning, the caller of this function guarantees that
+ * the plane_idx is correct. This means that, for example, the PRIMARY
+ * plane fed to this function will always have plane_idx zero.
+ */
+ err = drm_plane_create_zpos_immutable_property(plane, plane_idx);
+ if (err) {
+ DRM_ERROR("Failed to create zpos property for plane %u\n", plane_idx);
+ return err;
+ }
+
if (supported_rotations) {
err = drm_plane_create_rotation_property(plane,
DRM_MODE_ROTATE_0,
@@ -346,6 +362,17 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
DRM_INFO("Create rotation property failed\n");
}
+ err = drm_plane_create_alpha_property(plane);
+ if (err)
+ DRM_ERROR("failed to create property: alpha\n");
+
+ err = drm_plane_create_blend_mode_property(plane,
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE));
+ if (err)
+ DRM_ERROR("failed to create property: blend_mode\n");
+
drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h
index 231bb7aac947..5b177eac67b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_plane.h
@@ -49,6 +49,5 @@ to_mtk_plane_state(struct drm_plane_state *state)
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 *formats,
- size_t num_formats);
-
+ size_t num_formats, unsigned int plane_idx);
#endif
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index d1b25f9f6586..5a02203fad12 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -12,6 +12,7 @@ mgag200-y := \
mgag200_g200se.o \
mgag200_g200wb.o \
mgag200_mode.o \
+ mgag200_vga_bmc.o \
mgag200_vga.o
obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_bmc.c b/drivers/gpu/drm/mgag200/mgag200_bmc.c
index 23ef85aa7e37..a689c71ff165 100644
--- a/drivers/gpu/drm/mgag200/mgag200_bmc.c
+++ b/drivers/gpu/drm/mgag200/mgag200_bmc.c
@@ -9,12 +9,7 @@
#include "mgag200_drv.h"
-static struct mgag200_bmc_connector *to_mgag200_bmc_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct mgag200_bmc_connector, base);
-}
-
-void mgag200_bmc_disable_vidrst(struct mga_device *mdev)
+void mgag200_bmc_stop_scanout(struct mga_device *mdev)
{
u8 tmp;
int iter_max;
@@ -73,15 +68,10 @@ void mgag200_bmc_disable_vidrst(struct mga_device *mdev)
}
}
-void mgag200_bmc_enable_vidrst(struct mga_device *mdev)
+void mgag200_bmc_start_scanout(struct mga_device *mdev)
{
u8 tmp;
- /* Ensure that the vrsten and hrsten are set */
- WREG8(MGAREG_CRTCEXT_INDEX, 1);
- tmp = RREG8(MGAREG_CRTCEXT_DATA);
- WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
-
/* Assert rstlvl2 */
WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
tmp = RREG8(DAC_DATA);
@@ -107,100 +97,3 @@ void mgag200_bmc_enable_vidrst(struct mga_device *mdev)
tmp &= ~0x10;
WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
}
-
-static const struct drm_encoder_funcs mgag200_bmc_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static int mgag200_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct mgag200_bmc_connector *bmc_connector = to_mgag200_bmc_connector(connector);
- struct drm_connector *physical_connector = bmc_connector->physical_connector;
-
- /*
- * Most user-space compositors cannot handle more than one connected
- * connector per CRTC. Hence, we only mark the BMC as connected if the
- * physical connector is disconnected. If the physical connector's status
- * is connected or unknown, the BMC remains disconnected. This has no
- * effect on the output of the BMC.
- *
- * FIXME: Remove this logic once user-space compositors can handle more
- * than one connector per CRTC. The BMC should always be connected.
- */
-
- if (physical_connector && physical_connector->status == connector_status_disconnected)
- return connector_status_connected;
-
- return connector_status_disconnected;
-}
-
-static int mgag200_bmc_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct mga_device *mdev = to_mga_device(dev);
- const struct mgag200_device_info *minfo = mdev->info;
-
- return drm_add_modes_noedid(connector, minfo->max_hdisplay, minfo->max_vdisplay);
-}
-
-static const struct drm_connector_helper_funcs mgag200_bmc_connector_helper_funcs = {
- .get_modes = mgag200_bmc_connector_helper_get_modes,
- .detect_ctx = mgag200_bmc_connector_helper_detect_ctx,
-};
-
-static const struct drm_connector_funcs mgag200_bmc_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int mgag200_bmc_connector_init(struct drm_device *dev,
- struct mgag200_bmc_connector *bmc_connector,
- struct drm_connector *physical_connector)
-{
- struct drm_connector *connector = &bmc_connector->base;
- int ret;
-
- ret = drm_connector_init(dev, connector, &mgag200_bmc_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL);
- if (ret)
- return ret;
- drm_connector_helper_add(connector, &mgag200_bmc_connector_helper_funcs);
-
- bmc_connector->physical_connector = physical_connector;
-
- return 0;
-}
-
-int mgag200_bmc_output_init(struct mga_device *mdev, struct drm_connector *physical_connector)
-{
- struct drm_device *dev = &mdev->base;
- struct drm_crtc *crtc = &mdev->crtc;
- struct drm_encoder *encoder;
- struct mgag200_bmc_connector *bmc_connector;
- struct drm_connector *connector;
- int ret;
-
- encoder = &mdev->output.bmc.encoder;
- ret = drm_encoder_init(dev, encoder, &mgag200_bmc_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- if (ret)
- return ret;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- bmc_connector = &mdev->output.bmc.bmc_connector;
- ret = mgag200_bmc_connector_init(dev, bmc_connector, physical_connector);
- if (ret)
- return ret;
- connector = &bmc_connector->base;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 62080cf0f2da..6623ee4e3277 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
#include <drm/drm_pciids.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -84,6 +85,34 @@ resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size)
return offset - 65536;
}
+static irqreturn_t mgag200_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc *crtc;
+ u32 status, ien;
+
+ status = RREG32(MGAREG_STATUS);
+
+ if (status & MGAREG_STATUS_VLINEPEN) {
+ ien = RREG32(MGAREG_IEN);
+ if (!(ien & MGAREG_IEN_VLINEIEN))
+ goto out;
+
+ crtc = drm_crtc_from_index(dev, 0);
+ if (WARN_ON_ONCE(!crtc))
+ goto out;
+ drm_crtc_handle_vblank(crtc);
+
+ WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
+
+ return IRQ_HANDLED;
+ }
+
+out:
+ return IRQ_NONE;
+}
+
/*
* DRM driver
*/
@@ -167,6 +196,7 @@ int mgag200_device_init(struct mga_device *mdev,
const struct mgag200_device_funcs *funcs)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 crtcext3, misc;
int ret;
@@ -192,6 +222,16 @@ int mgag200_device_init(struct mga_device *mdev,
mutex_unlock(&mdev->rmmio_lock);
+ WREG32(MGAREG_IEN, 0);
+ WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
+
+ ret = devm_request_irq(&pdev->dev, pdev->irq, mgag200_irq_handler, IRQF_SHARED,
+ dev->driver->name, dev);
+ if (ret) {
+ drm_err(dev, "Failed to acquire interrupt, error %d\n", ret);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 7f7dfbd0f013..4760ba92871b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -179,6 +179,8 @@ struct mgag200_crtc_state {
const struct drm_format_info *format;
struct mgag200_pll_values pixpllc;
+
+ bool set_vidrst;
};
static inline struct mgag200_crtc_state *to_mgag200_crtc_state(struct drm_crtc_state *base)
@@ -186,11 +188,6 @@ static inline struct mgag200_crtc_state *to_mgag200_crtc_state(struct drm_crtc_s
return container_of(base, struct mgag200_crtc_state, base);
}
-struct mgag200_bmc_connector {
- struct drm_connector base;
- struct drm_connector *physical_connector;
-};
-
enum mga_type {
G200_PCI,
G200_AGP,
@@ -214,8 +211,8 @@ struct mgag200_device_info {
*/
unsigned long max_mem_bandwidth;
- /* HW has external source (e.g., BMC) to synchronize with */
- bool has_vidrst:1;
+ /* Synchronize scanout with BMC */
+ bool sync_bmc:1;
struct {
unsigned data_bit:3;
@@ -230,13 +227,13 @@ struct mgag200_device_info {
};
#define MGAG200_DEVICE_INFO_INIT(_max_hdisplay, _max_vdisplay, _max_mem_bandwidth, \
- _has_vidrst, _i2c_data_bit, _i2c_clock_bit, \
+ _sync_bmc, _i2c_data_bit, _i2c_clock_bit, \
_bug_no_startadd) \
{ \
.max_hdisplay = (_max_hdisplay), \
.max_vdisplay = (_max_vdisplay), \
.max_mem_bandwidth = (_max_mem_bandwidth), \
- .has_vidrst = (_has_vidrst), \
+ .sync_bmc = (_sync_bmc), \
.i2c = { \
.data_bit = (_i2c_data_bit), \
.clock_bit = (_i2c_clock_bit), \
@@ -246,18 +243,6 @@ struct mgag200_device_info {
struct mgag200_device_funcs {
/*
- * Disables an external reset source (i.e., BMC) before programming
- * a new display mode.
- */
- void (*disable_vidrst)(struct mga_device *mdev);
-
- /*
- * Enables an external reset source (i.e., BMC) after programming
- * a new display mode.
- */
- void (*enable_vidrst)(struct mga_device *mdev);
-
- /*
* Validate that the given state can be programmed into PIXPLLC. On
* success, the calculated parameters should be stored in the CRTC's
* state in struct @mgag200_crtc_state.pixpllc.
@@ -293,10 +278,6 @@ struct mga_device {
struct drm_encoder encoder;
struct drm_connector connector;
} vga;
- struct {
- struct drm_encoder encoder;
- struct mgag200_bmc_connector bmc_connector;
- } bmc;
} output;
};
@@ -410,17 +391,24 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st
void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
#define MGAG200_CRTC_HELPER_FUNCS \
.mode_valid = mgag200_crtc_helper_mode_valid, \
.atomic_check = mgag200_crtc_helper_atomic_check, \
.atomic_flush = mgag200_crtc_helper_atomic_flush, \
.atomic_enable = mgag200_crtc_helper_atomic_enable, \
- .atomic_disable = mgag200_crtc_helper_atomic_disable
+ .atomic_disable = mgag200_crtc_helper_atomic_disable, \
+ .get_scanout_position = mgag200_crtc_helper_get_scanout_position
void mgag200_crtc_reset(struct drm_crtc *crtc);
struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc);
void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state);
+int mgag200_crtc_enable_vblank(struct drm_crtc *crtc);
+void mgag200_crtc_disable_vblank(struct drm_crtc *crtc);
#define MGAG200_CRTC_FUNCS \
.reset = mgag200_crtc_reset, \
@@ -428,20 +416,26 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st
.set_config = drm_atomic_helper_set_config, \
.page_flip = drm_atomic_helper_page_flip, \
.atomic_duplicate_state = mgag200_crtc_atomic_duplicate_state, \
- .atomic_destroy_state = mgag200_crtc_atomic_destroy_state
+ .atomic_destroy_state = mgag200_crtc_atomic_destroy_state, \
+ .enable_vblank = mgag200_crtc_enable_vblank, \
+ .disable_vblank = mgag200_crtc_disable_vblank, \
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp
-void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode);
+void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode,
+ bool set_vidrst);
void mgag200_set_format_regs(struct mga_device *mdev, const struct drm_format_info *format);
void mgag200_enable_display(struct mga_device *mdev);
void mgag200_init_registers(struct mga_device *mdev);
int mgag200_mode_config_init(struct mga_device *mdev, resource_size_t vram_available);
+/* mgag200_vga_bmc.c */
+int mgag200_vga_bmc_output_init(struct mga_device *mdev);
+
/* mgag200_vga.c */
int mgag200_vga_output_init(struct mga_device *mdev);
- /* mgag200_bmc.c */
-void mgag200_bmc_disable_vidrst(struct mga_device *mdev);
-void mgag200_bmc_enable_vidrst(struct mga_device *mdev);
-int mgag200_bmc_output_init(struct mga_device *mdev, struct drm_connector *physical_connector);
+/* mgag200_bmc.c */
+void mgag200_bmc_stop_scanout(struct mga_device *mdev);
+void mgag200_bmc_start_scanout(struct mga_device *mdev);
#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c
index f874e2949840..77ce8d36cef0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -403,5 +404,9 @@ struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
index 52bf49ead5c5..09ced65c1d2f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -214,11 +215,7 @@ static int mgag200_g200eh_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -279,5 +276,9 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
index e7f89b2a59fd..5daa469137bd 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
@@ -7,6 +7,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -118,11 +119,7 @@ static int mgag200_g200eh3_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -184,5 +181,9 @@ struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index 4e8a1756138d..09cfffafe130 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -191,11 +192,8 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
const struct drm_format_info *format = mgag200_crtc_state->format;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
-
mgag200_set_format_regs(mdev, format);
- mgag200_set_mode_regs(mdev, adjusted_mode);
+ mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst);
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
@@ -209,8 +207,7 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_enable_display(mdev);
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
@@ -218,7 +215,8 @@ static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200er_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable
+ .atomic_disable = mgag200_crtc_helper_atomic_disable,
+ .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
};
static const struct drm_crtc_funcs mgag200_g200er_crtc_funcs = {
@@ -257,11 +255,7 @@ static int mgag200_g200er_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -318,5 +312,9 @@ struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index d884f3cb0ec7..3d48baa91d8b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -192,11 +193,8 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
const struct drm_format_info *format = mgag200_crtc_state->format;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
-
mgag200_set_format_regs(mdev, format);
- mgag200_set_mode_regs(mdev, adjusted_mode);
+ mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst);
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
@@ -210,8 +208,7 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_enable_display(mdev);
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
@@ -219,7 +216,8 @@ static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200ev_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable
+ .atomic_disable = mgag200_crtc_helper_atomic_disable,
+ .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
};
static const struct drm_crtc_funcs mgag200_g200ev_crtc_funcs = {
@@ -258,11 +256,7 @@ static int mgag200_g200ev_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -323,5 +317,9 @@ struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
index 839401e8b465..dabc778e64e8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
@@ -7,6 +7,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -127,11 +128,7 @@ static int mgag200_g200ew3_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -146,8 +143,6 @@ static const struct mgag200_device_info mgag200_g200ew3_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, true, 0, 1, false);
static const struct mgag200_device_funcs mgag200_g200ew3_device_funcs = {
- .disable_vidrst = mgag200_bmc_disable_vidrst,
- .enable_vidrst = mgag200_bmc_enable_vidrst,
.pixpllc_atomic_check = mgag200_g200ew3_pixpllc_atomic_check,
.pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update, // same as G200WB
};
@@ -204,5 +199,9 @@ struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index a824bb8ad579..9dcbe8304271 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -323,11 +324,8 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
const struct drm_format_info *format = mgag200_crtc_state->format;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
-
mgag200_set_format_regs(mdev, format);
- mgag200_set_mode_regs(mdev, adjusted_mode);
+ mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst);
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
@@ -341,8 +339,7 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_enable_display(mdev);
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
@@ -350,7 +347,8 @@ static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200se_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable
+ .atomic_disable = mgag200_crtc_helper_atomic_disable,
+ .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
};
static const struct drm_crtc_funcs mgag200_g200se_crtc_funcs = {
@@ -389,11 +387,7 @@ static int mgag200_g200se_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -523,5 +517,9 @@ struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
index 835df0f4fc13..83a24aedbf2f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200wb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -261,11 +262,7 @@ static int mgag200_g200wb_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -280,8 +277,6 @@ static const struct mgag200_device_info mgag200_g200wb_device_info =
MGAG200_DEVICE_INFO_INIT(1280, 1024, 31877, true, 0, 1, false);
static const struct mgag200_device_funcs mgag200_g200wb_device_funcs = {
- .disable_vidrst = mgag200_bmc_disable_vidrst,
- .enable_vidrst = mgag200_bmc_enable_vidrst,
.pixpllc_atomic_check = mgag200_g200wb_pixpllc_atomic_check,
.pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update,
};
@@ -328,5 +323,9 @@ struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d4550e4b3b01..7159909aca1e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -22,6 +22,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include "mgag200_ddc.h"
#include "mgag200_drv.h"
@@ -201,26 +202,39 @@ void mgag200_init_registers(struct mga_device *mdev)
WREG8(MGA_MISC_OUT, misc);
}
-void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode)
+void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode,
+ bool set_vidrst)
{
- const struct mgag200_device_info *info = mdev->info;
- unsigned int hdisplay, hsyncstart, hsyncend, htotal;
- unsigned int vdisplay, vsyncstart, vsyncend, vtotal;
+ unsigned int hdispend, hsyncstr, hsyncend, htotal, hblkstr, hblkend;
+ unsigned int vdispend, vsyncstr, vsyncend, vtotal, vblkstr, vblkend;
+ unsigned int linecomp;
u8 misc, crtcext1, crtcext2, crtcext5;
- hdisplay = mode->hdisplay / 8 - 1;
- hsyncstart = mode->hsync_start / 8 - 1;
- hsyncend = mode->hsync_end / 8 - 1;
- htotal = mode->htotal / 8 - 1;
-
+ hdispend = mode->crtc_hdisplay / 8 - 1;
+ hsyncstr = mode->crtc_hsync_start / 8 - 1;
+ hsyncend = mode->crtc_hsync_end / 8 - 1;
+ htotal = mode->crtc_htotal / 8 - 1;
/* Work around hardware quirk */
if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04)
htotal++;
+ hblkstr = mode->crtc_hblank_start / 8 - 1;
+ hblkend = htotal;
+
+ vdispend = mode->crtc_vdisplay - 1;
+ vsyncstr = mode->crtc_vsync_start - 1;
+ vsyncend = mode->crtc_vsync_end - 1;
+ vtotal = mode->crtc_vtotal - 2;
+ vblkstr = mode->crtc_vblank_start;
+ vblkend = vtotal + 1;
- vdisplay = mode->vdisplay - 1;
- vsyncstart = mode->vsync_start - 1;
- vsyncend = mode->vsync_end - 1;
- vtotal = mode->vtotal - 2;
+ /*
+ * There's no VBLANK interrupt on Matrox chipsets, so we use
+ * the VLINE interrupt instead. It triggers when the current
+ * <linecomp> has been reached. For VBLANK, this is the first
+ * non-visible line at the bottom of the screen. Therefore,
+ * keep <linecomp> in sync with <vblkstr>.
+ */
+ linecomp = vblkstr;
misc = RREG8(MGA_MISC_IN);
@@ -235,45 +249,45 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod
misc &= ~MGAREG_MISC_VSYNCPOL;
crtcext1 = (((htotal - 4) & 0x100) >> 8) |
- ((hdisplay & 0x100) >> 7) |
- ((hsyncstart & 0x100) >> 6) |
- (htotal & 0x40);
- if (info->has_vidrst)
+ ((hblkstr & 0x100) >> 7) |
+ ((hsyncstr & 0x100) >> 6) |
+ (hblkend & 0x40);
+ if (set_vidrst)
crtcext1 |= MGAREG_CRTCEXT1_VRSTEN |
MGAREG_CRTCEXT1_HRSTEN;
crtcext2 = ((vtotal & 0xc00) >> 10) |
- ((vdisplay & 0x400) >> 8) |
- ((vdisplay & 0xc00) >> 7) |
- ((vsyncstart & 0xc00) >> 5) |
- ((vdisplay & 0x400) >> 3);
+ ((vdispend & 0x400) >> 8) |
+ ((vblkstr & 0xc00) >> 7) |
+ ((vsyncstr & 0xc00) >> 5) |
+ ((linecomp & 0x400) >> 3);
crtcext5 = 0x00;
- WREG_CRT(0, htotal - 4);
- WREG_CRT(1, hdisplay);
- WREG_CRT(2, hdisplay);
- WREG_CRT(3, (htotal & 0x1F) | 0x80);
- WREG_CRT(4, hsyncstart);
- WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F));
- WREG_CRT(6, vtotal & 0xFF);
- WREG_CRT(7, ((vtotal & 0x100) >> 8) |
- ((vdisplay & 0x100) >> 7) |
- ((vsyncstart & 0x100) >> 6) |
- ((vdisplay & 0x100) >> 5) |
- ((vdisplay & 0x100) >> 4) | /* linecomp */
- ((vtotal & 0x200) >> 4) |
- ((vdisplay & 0x200) >> 3) |
- ((vsyncstart & 0x200) >> 2));
- WREG_CRT(9, ((vdisplay & 0x200) >> 4) |
- ((vdisplay & 0x200) >> 3));
- WREG_CRT(16, vsyncstart & 0xFF);
- WREG_CRT(17, (vsyncend & 0x0F) | 0x20);
- WREG_CRT(18, vdisplay & 0xFF);
- WREG_CRT(20, 0);
- WREG_CRT(21, vdisplay & 0xFF);
- WREG_CRT(22, (vtotal + 1) & 0xFF);
- WREG_CRT(23, 0xc3);
- WREG_CRT(24, vdisplay & 0xFF);
+ WREG_CRT(0x00, htotal - 4);
+ WREG_CRT(0x01, hdispend);
+ WREG_CRT(0x02, hblkstr);
+ WREG_CRT(0x03, (hblkend & 0x1f) | 0x80);
+ WREG_CRT(0x04, hsyncstr);
+ WREG_CRT(0x05, ((hblkend & 0x20) << 2) | (hsyncend & 0x1f));
+ WREG_CRT(0x06, vtotal & 0xff);
+ WREG_CRT(0x07, ((vtotal & 0x100) >> 8) |
+ ((vdispend & 0x100) >> 7) |
+ ((vsyncstr & 0x100) >> 6) |
+ ((vblkstr & 0x100) >> 5) |
+ ((linecomp & 0x100) >> 4) |
+ ((vtotal & 0x200) >> 4) |
+ ((vdispend & 0x200) >> 3) |
+ ((vsyncstr & 0x200) >> 2));
+ WREG_CRT(0x09, ((vblkstr & 0x200) >> 4) |
+ ((linecomp & 0x200) >> 3));
+ WREG_CRT(0x10, vsyncstr & 0xff);
+ WREG_CRT(0x11, (vsyncend & 0x0f) | 0x20);
+ WREG_CRT(0x12, vdispend & 0xff);
+ WREG_CRT(0x14, 0);
+ WREG_CRT(0x15, vblkstr & 0xff);
+ WREG_CRT(0x16, vblkend & 0xff);
+ WREG_CRT(0x17, 0xc3);
+ WREG_CRT(0x18, linecomp & 0xff);
WREG_ECRT(0x01, crtcext1);
WREG_ECRT(0x02, crtcext2);
@@ -631,6 +645,8 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = to_mga_device(dev);
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
const struct drm_format_info *format = mgag200_crtc_state->format;
@@ -640,6 +656,18 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
else
mgag200_crtc_set_gamma_linear(mdev, format);
}
+
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, event);
+ else
+ drm_crtc_arm_vblank_event(crtc, event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
}
void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
@@ -652,11 +680,8 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
const struct drm_format_info *format = mgag200_crtc_state->format;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
-
mgag200_set_format_regs(mdev, format);
- mgag200_set_mode_regs(mdev, adjusted_mode);
+ mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst);
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
@@ -668,22 +693,41 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
mgag200_enable_display(mdev);
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ drm_crtc_vblank_on(crtc);
}
void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
{
struct mga_device *mdev = to_mga_device(crtc->dev);
- const struct mgag200_device_funcs *funcs = mdev->funcs;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
+ drm_crtc_vblank_off(crtc);
mgag200_disable_display(mdev);
+}
+
+bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct mga_device *mdev = to_mga_device(crtc->dev);
+ u32 vcount;
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ if (stime)
+ *stime = ktime_get();
+
+ if (vpos) {
+ vcount = RREG32(MGAREG_VCOUNT);
+ *vpos = vcount & GENMASK(11, 0);
+ }
+
+ if (hpos)
+ *hpos = mode->htotal >> 1; // near middle of scanline on average
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
}
void mgag200_crtc_reset(struct drm_crtc *crtc)
@@ -717,6 +761,7 @@ struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc
new_mgag200_crtc_state->format = mgag200_crtc_state->format;
memcpy(&new_mgag200_crtc_state->pixpllc, &mgag200_crtc_state->pixpllc,
sizeof(new_mgag200_crtc_state->pixpllc));
+ new_mgag200_crtc_state->set_vidrst = mgag200_crtc_state->set_vidrst;
return &new_mgag200_crtc_state->base;
}
@@ -729,6 +774,30 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st
kfree(mgag200_crtc_state);
}
+int mgag200_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct mga_device *mdev = to_mga_device(crtc->dev);
+ u32 ien;
+
+ WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
+
+ ien = RREG32(MGAREG_IEN);
+ ien |= MGAREG_IEN_VLINEIEN;
+ WREG32(MGAREG_IEN, ien);
+
+ return 0;
+}
+
+void mgag200_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct mga_device *mdev = to_mga_device(crtc->dev);
+ u32 ien;
+
+ ien = RREG32(MGAREG_IEN);
+ ien &= ~(MGAREG_IEN_VLINEIEN);
+ WREG32(MGAREG_IEN, ien);
+}
+
/*
* Mode config
*/
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
index aa73463674e4..d4fef8f25871 100644
--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -102,10 +102,17 @@
#define MGAREG_EXEC 0x0100
#define MGAREG_FIFOSTATUS 0x1e10
+
#define MGAREG_STATUS 0x1e14
+#define MGAREG_STATUS_VLINEPEN BIT(5)
+
#define MGAREG_CACHEFLUSH 0x1fff
+
#define MGAREG_ICLEAR 0x1e18
+#define MGAREG_ICLEAR_VLINEICLR BIT(5)
+
#define MGAREG_IEN 0x1e1c
+#define MGAREG_IEN_VLINEIEN BIT(5)
#define MGAREG_VCOUNT 0x1e20
diff --git a/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c b/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c
new file mode 100644
index 000000000000..a5a3ac108bd5
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mgag200_ddc.h"
+#include "mgag200_drv.h"
+
+static void mgag200_vga_bmc_encoder_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct mga_device *mdev = to_mga_device(encoder->dev);
+
+ if (mdev->info->sync_bmc)
+ mgag200_bmc_stop_scanout(mdev);
+}
+
+static void mgag200_vga_bmc_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct mga_device *mdev = to_mga_device(encoder->dev);
+
+ if (mdev->info->sync_bmc)
+ mgag200_bmc_start_scanout(mdev);
+}
+
+static int mgag200_vga_bmc_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *new_crtc_state,
+ struct drm_connector_state *new_connector_state)
+{
+ struct mga_device *mdev = to_mga_device(encoder->dev);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+
+ new_mgag200_crtc_state->set_vidrst = mdev->info->sync_bmc;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs mgag200_dac_encoder_helper_funcs = {
+ .atomic_disable = mgag200_vga_bmc_encoder_atomic_disable,
+ .atomic_enable = mgag200_vga_bmc_encoder_atomic_enable,
+ .atomic_check = mgag200_vga_bmc_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs mgag200_dac_encoder_funcs = {
+ .destroy = drm_encoder_cleanup
+};
+
+static int mgag200_vga_bmc_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct mga_device *mdev = to_mga_device(connector->dev);
+ const struct mgag200_device_info *minfo = mdev->info;
+ int count;
+
+ count = drm_connector_helper_get_modes(connector);
+
+ if (!count) {
+ /*
+ * There's no EDID data without a connected monitor. Set BMC-
+ * compatible modes in this case. The XGA default resolution
+ * should work well for all BMCs.
+ */
+ count = drm_add_modes_noedid(connector, minfo->max_hdisplay, minfo->max_vdisplay);
+ if (count)
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return count;
+}
+
+/*
+ * There's no monitor connected if the DDC did not return an EDID. Still
+ * return 'connected' as there's always a BMC. Incrementing the connector's
+ * epoch counter triggers an update of the related properties.
+ */
+static int mgag200_vga_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ enum drm_connector_status old_status, status;
+
+ if (connector->edid_blob_ptr)
+ old_status = connector_status_connected;
+ else
+ old_status = connector_status_disconnected;
+
+ status = drm_connector_helper_detect_from_ddc(connector, ctx, force);
+
+ if (status != old_status)
+ ++connector->epoch_counter;
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs mgag200_vga_connector_helper_funcs = {
+ .get_modes = mgag200_vga_bmc_connector_helper_get_modes,
+ .detect_ctx = mgag200_vga_bmc_connector_helper_detect_ctx,
+};
+
+static const struct drm_connector_funcs mgag200_vga_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state
+};
+
+int mgag200_vga_bmc_output_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct i2c_adapter *ddc;
+ int ret;
+
+ encoder = &mdev->output.vga.encoder;
+ ret = drm_encoder_init(dev, encoder, &mgag200_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_encoder_helper_add(encoder, &mgag200_dac_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ddc = mgag200_ddc_create(mdev);
+ if (IS_ERR(ddc)) {
+ ret = PTR_ERR(ddc);
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ connector = &mdev->output.vga.connector;
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA, ddc);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_vga_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index f5e2838c6a76..13110fcc46a8 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -37,6 +37,7 @@ msm-display-$(CONFIG_DRM_MSM_HDMI) += \
hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8996.o \
+ hdmi/hdmi_phy_8998.o \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
hdmi/hdmi_pll_8960.o \
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
index 0de8465b6cf0..2eb6c3e93748 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
@@ -42,6 +42,17 @@ static const struct adreno_info a3xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x03000620),
+ .family = ADRENO_3XX,
+ .revn = 308,
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
+ .gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a3xx_gpu_init,
+ }, {
.chip_ids = ADRENO_CHIP_IDS(
0x03020000,
0x03020001,
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 5273dc849838..b46ff49f47cf 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -145,6 +145,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a);
gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a);
+ } else if (adreno_is_a306a(adreno_gpu)) {
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000010);
} else if (adreno_is_a320(adreno_gpu)) {
/* Set up 16 deep read/write request queues: */
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
@@ -237,7 +241,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
/* Enable Clock gating: */
- if (adreno_is_a305b(adreno_gpu) || adreno_is_a306(adreno_gpu))
+ if (adreno_is_a305b(adreno_gpu) ||
+ adreno_is_a306(adreno_gpu) ||
+ adreno_is_a306a(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
else if (adreno_is_a320(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
@@ -334,8 +340,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
/* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
- if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) ||
- adreno_is_a320(adreno_gpu)) {
+ if (adreno_is_a305(adreno_gpu) ||
+ adreno_is_a306(adreno_gpu) ||
+ adreno_is_a306a(adreno_gpu) ||
+ adreno_is_a320(adreno_gpu)) {
gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index c0b5373e90d7..e09044930547 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -65,6 +65,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
struct drm_gem_object *obj;
uint32_t *ptr, dwords;
@@ -109,6 +111,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
}
}
+ a5xx_gpu->last_seqno[ring->id] = submit->seqno;
a5xx_flush(gpu, ring, true);
a5xx_preempt_trigger(gpu);
@@ -150,9 +153,13 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
- /* Enable local preemption for finegrain preemption */
+ /*
+ * Disable local preemption by default because it requires
+ * user-space to be aware of it and provide additional handling
+ * to restore rendering state or do various flushes on switch.
+ */
OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
- OUT_RING(ring, 0x1);
+ OUT_RING(ring, 0x0);
/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
@@ -206,6 +213,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->seqno);
+ a5xx_gpu->last_seqno[ring->id] = submit->seqno;
/*
* Execute a CACHE_FLUSH_TS event. This will ensure that the
@@ -1793,5 +1801,9 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
else
adreno_gpu->ubwc_config.highest_bank_bit = 14;
+ /* a5xx only supports UBWC 1.0, these are not configurable */
+ adreno_gpu->ubwc_config.macrotile_mode = 0;
+ adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index c7187bcc5e90..9c0d701fe4b8 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -34,8 +34,10 @@ struct a5xx_gpu {
struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+ uint32_t last_seqno[MSM_GPU_MAX_RINGS];
atomic_t preempt_state;
+ spinlock_t preempt_start_lock;
struct timer_list preempt_timer;
struct drm_gem_object *shadow_bo;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index f58dd564d122..0469fea55010 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -55,6 +55,8 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* Return the highest priority ringbuffer with something in it */
static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
unsigned long flags;
int i;
@@ -64,6 +66,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
spin_lock_irqsave(&ring->preempt_lock, flags);
empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
+ if (!empty && ring == a5xx_gpu->cur_ring)
+ empty = ring->memptrs->fence == a5xx_gpu->last_seqno[i];
spin_unlock_irqrestore(&ring->preempt_lock, flags);
if (!empty)
@@ -98,11 +102,18 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
return;
/*
+ * Serialize preemption start to ensure that we always make
+ * decision on latest state. Otherwise we can get stuck in
+ * lower priority or empty ring.
+ */
+ spin_lock_irqsave(&a5xx_gpu->preempt_start_lock, flags);
+
+ /*
* Try to start preemption by moving from NONE to START. If
* unsuccessful, a preemption is already in flight
*/
if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
- return;
+ goto out;
/* Get the next ring to preempt to */
ring = get_next_ring(gpu);
@@ -127,9 +138,11 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
- return;
+ goto out;
}
+ spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
+
/* Make sure the wptr doesn't update while we're in motion */
spin_lock_irqsave(&ring->preempt_lock, flags);
a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
@@ -152,6 +165,10 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
/* And actually start the preemption */
gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
+ return;
+
+out:
+ spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
}
void a5xx_preempt_irq(struct msm_gpu *gpu)
@@ -188,6 +205,12 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+
+ /*
+ * Try to trigger preemption again in case there was a submit or
+ * retire during ring switch
+ */
+ a5xx_preempt_trigger(gpu);
}
void a5xx_preempt_hw_init(struct msm_gpu *gpu)
@@ -204,6 +227,8 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
return;
for (i = 0; i < gpu->nr_rings; i++) {
+ a5xx_gpu->preempt[i]->data = 0;
+ a5xx_gpu->preempt[i]->info = 0;
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
@@ -298,5 +323,6 @@ void a5xx_preempt_init(struct msm_gpu *gpu)
}
}
+ spin_lock_init(&a5xx_gpu->preempt_start_lock);
timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 68ba9aed5506..0312b6ee0356 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -129,6 +129,59 @@ static const struct adreno_reglist a615_hwcg[] = {
{},
};
+static const struct adreno_reglist a620_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
static const struct adreno_reglist a630_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
@@ -448,7 +501,6 @@ static const struct adreno_reglist a690_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
- {REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, 0x20200},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555},
{}
@@ -491,7 +543,6 @@ static const u32 a630_protect_regs[] = {
};
DECLARE_ADRENO_PROTECT(a630_protect, 32);
-/* These are for a620 and a650 */
static const u32 a650_protect_regs[] = {
A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
A6XX_PROTECT_RDONLY(0x00501, 0x0005),
@@ -636,6 +687,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a612_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00080000,
},
/*
* There are (at least) three SoCs implementing A610: SM6125
@@ -652,6 +705,35 @@ static const struct adreno_info a6xx_gpus[] = {
{ 127, 4 },
),
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06010500),
+ .family = ADRENO_6XX_GEN1,
+ .revn = 615,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a615_zap.mdt",
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a615_hwcg,
+ .protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x0018000,
+ },
+ .speedbins = ADRENO_SPEEDBINS(
+ /*
+ * The default speed bin (0) has the same values as
+ * speed bin 90 which goes up to 432 MHz.
+ */
+ { 0, 0 },
+ { 90, 0 },
+ { 105, 1 },
+ { 146, 2 },
+ { 163, 3 },
+ ),
+ }, {
.machine = "qcom,sm7150",
.chip_ids = ADRENO_CHIP_IDS(0x06010800),
.family = ADRENO_6XX_GEN1,
@@ -667,6 +749,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00180000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -689,6 +773,8 @@ static const struct adreno_info a6xx_gpus[] = {
.init = a6xx_gpu_init,
.a6xx = &(const struct a6xx_info) {
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00180000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -711,6 +797,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00018000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -733,6 +821,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00018000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -755,6 +845,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00018000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -764,6 +856,30 @@ static const struct adreno_info a6xx_gpus[] = {
{ 180, 1 },
),
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06020100),
+ .family = ADRENO_6XX_GEN3,
+ .fw = {
+ [ADRENO_FW_SQE] = "a650_sqe.fw",
+ [ADRENO_FW_GMU] = "a621_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .zapfw = "a620_zap.mbn",
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a620_hwcg,
+ .protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020200,
+ .prim_fifo_threshold = 0x00010000,
+ },
+ .address_space_size = SZ_16G,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 137, 1 },
+ ),
+ }, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
0x06030002
@@ -782,6 +898,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a630_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00180000,
},
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06040001),
@@ -799,6 +917,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00180000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -821,6 +941,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a650_hwcg,
.protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00300200,
},
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
@@ -846,6 +968,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.protect = &a660_protect,
+ .gmu_cgc_mode = 0x00020000,
+ .prim_fifo_threshold = 0x00300200,
},
.address_space_size = SZ_16G,
}, {
@@ -864,11 +988,14 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.protect = &a660_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00200200,
},
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 117, 0 },
+ { 129, 4 },
{ 172, 2 }, /* Called speedbin 1 downstream, but let's not break things! */
{ 190, 1 },
),
@@ -888,6 +1015,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00200200,
},
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06090000),
@@ -905,6 +1034,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a690_hwcg,
.protect = &a690_protect,
+ .gmu_cgc_mode = 0x00020200,
+ .prim_fifo_threshold = 0x00800200,
},
.address_space_size = SZ_16G,
}
@@ -1165,6 +1296,8 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a702_hwcg,
.protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x0000c000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -1188,6 +1321,7 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a730_hwcg,
.protect = &a730_protect,
+ .gmu_cgc_mode = 0x00020000,
},
.address_space_size = SZ_16G,
}, {
@@ -1207,6 +1341,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.gmu_chipid = 0x7020100,
+ .gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_16G,
}, {
@@ -1225,6 +1360,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.gmu_chipid = 0x7050001,
+ .gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_256G,
}, {
@@ -1243,6 +1379,7 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.protect = &a730_protect,
.gmu_chipid = 0x7090100,
+ .gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_16G,
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index cb538a262d1c..37927bdd6fbe 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -423,6 +423,20 @@ static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
}
+static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+
+ /*
+ * GEMNoC can power collapse whilst the GPU is being powered down, resulting
+ * in the power down sequence not being fully executed. That in turn can
+ * prevent CX_GDSC from collapsing. Assert Qactive to avoid this.
+ */
+ if (adreno_is_a621(adreno_gpu) || adreno_is_7c3(adreno_gpu))
+ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, BIT(0));
+}
+
/* Let the GMU know that we are about to go into slumber */
static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
{
@@ -456,6 +470,8 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
}
out:
+ a6xx_gemnoc_workaround(gmu);
+
/* Put fence into allow mode */
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
return ret;
@@ -525,8 +541,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
if (IS_ERR(pdcptr))
goto err;
- if (adreno_is_a650(adreno_gpu) ||
- adreno_is_a660_family(adreno_gpu) ||
+ if (adreno_is_a650_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu))
pdc_in_aop = true;
else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
@@ -946,6 +961,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Force off SPTP in case the GMU is managing it */
a6xx_sptprac_disable(gmu);
+ a6xx_gemnoc_workaround(gmu);
+
/* Make sure there are no outstanding RPMh votes */
a6xx_gmu_rpmh_off(gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index bcaec86ac67a..06cab2c6fd66 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -402,7 +402,8 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
const struct adreno_reglist *reg;
unsigned int i;
- u32 val, clock_cntl_on, cgc_mode;
+ u32 cgc_delay, cgc_hyst;
+ u32 val, clock_cntl_on;
if (!(adreno_gpu->info->a6xx->hwcg || adreno_is_a7xx(adreno_gpu)))
return;
@@ -416,16 +417,15 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
else
clock_cntl_on = 0x8aa8aa82;
- if (adreno_is_a7xx(adreno_gpu)) {
- cgc_mode = adreno_is_a740_family(adreno_gpu) ? 0x20222 : 0x20000;
-
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
- state ? cgc_mode : 0);
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
- state ? 0x10111 : 0);
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
- state ? 0x5555 : 0);
- }
+ cgc_delay = adreno_is_a615_family(adreno_gpu) ? 0x111 : 0x10111;
+ cgc_hyst = adreno_is_a615_family(adreno_gpu) ? 0x555 : 0x5555;
+
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
+ state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
+ state ? cgc_delay : 0);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
+ state ? cgc_hyst : 0);
if (!adreno_gpu->info->a6xx->hwcg) {
gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 1);
@@ -493,24 +493,17 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
{
- /* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */
gpu->ubwc_config.rgb565_predicator = 0;
- /* Unknown, introduced with A650 family */
gpu->ubwc_config.uavflagprd_inv = 0;
- /* Whether the minimum access length is 64 bits */
gpu->ubwc_config.min_acc_len = 0;
- /* Entirely magic, per-GPU-gen value */
- gpu->ubwc_config.ubwc_mode = 0;
- /*
- * The Highest Bank Bit value represents the bit of the highest DDR bank.
- * This should ideally use DRAM type detection.
- */
+ gpu->ubwc_config.ubwc_swizzle = 0x6;
+ gpu->ubwc_config.macrotile_mode = 0;
gpu->ubwc_config.highest_bank_bit = 15;
if (adreno_is_a610(gpu)) {
gpu->ubwc_config.highest_bank_bit = 13;
gpu->ubwc_config.min_acc_len = 1;
- gpu->ubwc_config.ubwc_mode = 1;
+ gpu->ubwc_config.ubwc_swizzle = 0x7;
}
if (adreno_is_a618(gpu))
@@ -523,9 +516,18 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
if (adreno_is_a619_holi(gpu))
gpu->ubwc_config.highest_bank_bit = 13;
+ if (adreno_is_a621(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 13;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
+ }
+
if (adreno_is_a640_family(gpu))
gpu->ubwc_config.amsbc = 1;
+ if (adreno_is_a680(gpu))
+ gpu->ubwc_config.macrotile_mode = 1;
+
if (adreno_is_a650(gpu) ||
adreno_is_a660(gpu) ||
adreno_is_a690(gpu) ||
@@ -536,6 +538,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.amsbc = 1;
gpu->ubwc_config.rgb565_predicator = 1;
gpu->ubwc_config.uavflagprd_inv = 2;
+ gpu->ubwc_config.macrotile_mode = 1;
}
if (adreno_is_7c3(gpu)) {
@@ -543,12 +546,12 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.amsbc = 1;
gpu->ubwc_config.rgb565_predicator = 1;
gpu->ubwc_config.uavflagprd_inv = 2;
+ gpu->ubwc_config.macrotile_mode = 1;
}
if (adreno_is_a702(gpu)) {
gpu->ubwc_config.highest_bank_bit = 14;
gpu->ubwc_config.min_acc_len = 1;
- gpu->ubwc_config.ubwc_mode = 0;
}
}
@@ -564,21 +567,26 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
u32 hbb_hi = hbb >> 2;
u32 hbb_lo = hbb & 3;
+ u32 ubwc_mode = adreno_gpu->ubwc_config.ubwc_swizzle & 1;
+ u32 level2_swizzling_dis = !(adreno_gpu->ubwc_config.ubwc_swizzle & 2);
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
+ level2_swizzling_dis << 12 |
adreno_gpu->ubwc_config.rgb565_predicator << 11 |
hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 |
adreno_gpu->ubwc_config.min_acc_len << 3 |
- hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
+ hbb_lo << 1 | ubwc_mode);
- gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 |
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL,
+ level2_swizzling_dis << 6 | hbb_hi << 4 |
adreno_gpu->ubwc_config.min_acc_len << 3 |
- hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
+ hbb_lo << 1 | ubwc_mode);
- gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 |
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
+ level2_swizzling_dis << 12 | hbb_hi << 10 |
adreno_gpu->ubwc_config.uavflagprd_inv << 4 |
adreno_gpu->ubwc_config.min_acc_len << 3 |
- hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
+ hbb_lo << 1 | ubwc_mode);
if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
@@ -586,6 +594,9 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL,
adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21);
+
+ gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL,
+ adreno_gpu->ubwc_config.macrotile_mode);
}
static int a6xx_cp_init(struct msm_gpu *gpu)
@@ -976,25 +987,11 @@ static int hw_init(struct msm_gpu *gpu)
} else if (!adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
- /* Setting the primFifo thresholds default values,
- * and vccCacheSkipDis=1 bit (0x200) for A640 and newer
- */
- if (adreno_is_a702(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x0000c000);
- else if (adreno_is_a690(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00800200);
- else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
- else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
- else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
- else if (adreno_is_a619(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000);
- else if (adreno_is_a610(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000);
- else if (!adreno_is_a7xx(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
+
+ /* Set the default primFifo threshold values */
+ if (adreno_gpu->info->a6xx->prim_fifo_threshold)
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL,
+ adreno_gpu->info->a6xx->prim_fifo_threshold);
/* Set the AHB default slave response to "ERROR" */
gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index e3e5c53ae8af..0fb7febf70e7 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -22,6 +22,8 @@ struct a6xx_info {
const struct adreno_reglist *hwcg;
const struct adreno_protect *protect;
u32 gmu_chipid;
+ u32 gmu_cgc_mode;
+ u32 prim_fifo_threshold;
};
struct a6xx_gpu {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 789a11416f7a..0fcae53c0b14 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -388,18 +388,18 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu,
const u32 *debugbus_blocks, *gbif_debugbus_blocks;
int i;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
debugbus_blocks = gen7_0_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_0_0_debugbus_blocks);
gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks;
gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks);
- } else if (adreno_is_a740_family(adreno_gpu)) {
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
debugbus_blocks = gen7_2_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_2_0_debugbus_blocks);
gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks;
gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks);
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
debugbus_blocks = gen7_9_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_9_0_debugbus_blocks);
gbif_debugbus_blocks = gen7_9_0_gbif_debugbus_blocks;
@@ -509,7 +509,7 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
const struct a6xx_debugbus_block *cx_debugbus_blocks;
if (adreno_is_a7xx(adreno_gpu)) {
- BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)));
+ BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3);
cx_debugbus_blocks = a7xx_cx_debugbus_blocks;
nr_cx_debugbus_blocks = ARRAY_SIZE(a7xx_cx_debugbus_blocks);
} else {
@@ -660,13 +660,16 @@ static void a7xx_get_dbgahb_clusters(struct msm_gpu *gpu,
const struct gen7_sptp_cluster_registers *dbgahb_clusters;
unsigned dbgahb_clusters_size;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
dbgahb_clusters = gen7_0_0_sptp_clusters;
dbgahb_clusters_size = ARRAY_SIZE(gen7_0_0_sptp_clusters);
- } else {
- BUG_ON(!adreno_is_a740_family(adreno_gpu));
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
dbgahb_clusters = gen7_2_0_sptp_clusters;
dbgahb_clusters_size = ARRAY_SIZE(gen7_2_0_sptp_clusters);
+ } else {
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
+ dbgahb_clusters = gen7_9_0_sptp_clusters;
+ dbgahb_clusters_size = ARRAY_SIZE(gen7_9_0_sptp_clusters);
}
a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
@@ -818,14 +821,14 @@ static void a7xx_get_clusters(struct msm_gpu *gpu,
const struct gen7_cluster_registers *clusters;
unsigned clusters_size;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
clusters = gen7_0_0_clusters;
clusters_size = ARRAY_SIZE(gen7_0_0_clusters);
- } else if (adreno_is_a740_family(adreno_gpu)) {
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
clusters = gen7_2_0_clusters;
clusters_size = ARRAY_SIZE(gen7_2_0_clusters);
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
clusters = gen7_9_0_clusters;
clusters_size = ARRAY_SIZE(gen7_9_0_clusters);
}
@@ -893,7 +896,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu,
if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
return;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 3);
}
@@ -923,7 +926,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu,
datasize);
out:
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 0);
}
}
@@ -956,14 +959,14 @@ static void a7xx_get_shaders(struct msm_gpu *gpu,
unsigned num_shader_blocks;
int i;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
shader_blocks = gen7_0_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_0_0_shader_blocks);
- } else if (adreno_is_a740_family(adreno_gpu)) {
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
shader_blocks = gen7_2_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_2_0_shader_blocks);
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
shader_blocks = gen7_9_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_9_0_shader_blocks);
}
@@ -1348,14 +1351,14 @@ static void a7xx_get_registers(struct msm_gpu *gpu,
const u32 *pre_crashdumper_regs;
const struct gen7_reg_list *reglist;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
reglist = gen7_0_0_reg_list;
pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers;
- } else if (adreno_is_a740_family(adreno_gpu)) {
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
reglist = gen7_2_0_reg_list;
pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers;
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
reglist = gen7_9_0_reg_list;
pre_crashdumper_regs = gen7_9_0_pre_crashdumper_gpu_registers;
}
@@ -1405,8 +1408,7 @@ static void a7xx_get_post_crashdumper_registers(struct msm_gpu *gpu,
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
const u32 *regs;
- BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu) ||
- adreno_is_a750(adreno_gpu)));
+ BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3);
regs = gen7_0_0_post_crashdumper_registers;
a7xx_get_ahb_gpu_registers(gpu,
@@ -1514,11 +1516,11 @@ static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
const struct a6xx_indexed_registers *indexed_regs;
int i, indexed_count, mempool_count;
- if (adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)) {
+ if (adreno_gpu->info->family <= ADRENO_7XX_GEN2) {
indexed_regs = a7xx_indexed_reglist;
indexed_count = ARRAY_SIZE(a7xx_indexed_reglist);
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
indexed_regs = gen7_9_0_cp_indexed_reg_list;
indexed_count = ARRAY_SIZE(gen7_9_0_cp_indexed_reg_list);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
index 260d66eccfec..9a327d543f27 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
@@ -1303,7 +1303,7 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
REG_A6XX_CP_ROQ_DBG_DATA, 0x00800},
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000},
- { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
+ { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200},
{ "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800},
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index ecc3fc5cec22..465a4cd14a43 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -379,6 +379,12 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
case MSM_PARAM_RAYTRACING:
*value = adreno_gpu->has_ray_tracing;
return 0;
+ case MSM_PARAM_UBWC_SWIZZLE:
+ *value = adreno_gpu->ubwc_config.ubwc_swizzle;
+ return 0;
+ case MSM_PARAM_MACROTILE_MODE:
+ *value = adreno_gpu->ubwc_config.macrotile_mode;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
@@ -478,7 +484,7 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware_direct(&fw, fwname, drm->dev);
if (!ret) {
DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
- newname);
+ fwname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
@@ -688,11 +694,9 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
size = j + 1;
if (size) {
- state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
- if (state->ring[i].data) {
- memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
+ state->ring[i].data = kvmemdup(gpu->rb[i]->start, size << 2, GFP_KERNEL);
+ if (state->ring[i].data)
state->ring[i].data_size = size << 2;
- }
}
}
@@ -1083,6 +1087,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->chip_id = config->chip_id;
gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1;
+ gpu->pdev = pdev;
/* Only handle the core clock when GMU is not in use (or is absent). */
if (adreno_has_gmu_wrapper(adreno_gpu) ||
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 1ab523a163a0..58d7e7915c57 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -191,12 +191,42 @@ struct adreno_gpu {
const struct firmware *fw[ADRENO_FW_MAX];
struct {
+ /**
+ * @rgb565_predicator: Unknown, introduced with A650 family,
+ * related to UBWC mode/ver 4
+ */
u32 rgb565_predicator;
+ /** @uavflagprd_inv: Unknown, introduced with A650 family */
u32 uavflagprd_inv;
+ /** @min_acc_len: Whether the minimum access length is 64 bits */
u32 min_acc_len;
- u32 ubwc_mode;
+ /**
+ * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling.
+ *
+ * UBWC 1.0 always enables all three levels.
+ * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3.
+ * UBWC 4.0 adds the optional ability to disable levels 2 & 3.
+ *
+ * This is a bitmask where BIT(0) enables level 1, BIT(1)
+ * controls level 2, and BIT(2) enables level 3.
+ */
+ u32 ubwc_swizzle;
+ /**
+ * @highest_bank_bit: Highest Bank Bit
+ *
+ * The Highest Bank Bit value represents the bit of the highest
+ * DDR bank. This should ideally use DRAM type detection.
+ */
u32 highest_bank_bit;
u32 amsbc;
+ /**
+ * @macrotile_mode: Macrotile Mode
+ *
+ * Whether to use 4-channel macrotiling mode or the newer
+ * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is
+ * 4-channel and 1 is 8-channel.
+ */
+ u32 macrotile_mode;
} ubwc_config;
/*
@@ -294,6 +324,12 @@ static inline bool adreno_is_a306(const struct adreno_gpu *gpu)
return adreno_is_revn(gpu, 307);
}
+static inline bool adreno_is_a306a(const struct adreno_gpu *gpu)
+{
+ /* a306a (marketing name is a308) */
+ return adreno_is_revn(gpu, 308);
+}
+
static inline bool adreno_is_a320(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 320);
@@ -384,6 +420,11 @@ static inline int adreno_is_a619_holi(const struct adreno_gpu *gpu)
return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu);
}
+static inline int adreno_is_a621(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06020100;
+}
+
static inline int adreno_is_a630(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 630);
@@ -433,7 +474,13 @@ static inline int adreno_is_a610_family(const struct adreno_gpu *gpu)
return adreno_is_a610(gpu) || adreno_is_a702(gpu);
}
-/* check for a615, a616, a618, a619 or any a630 derivatives */
+/* TODO: 615/616 */
+static inline int adreno_is_a615_family(const struct adreno_gpu *gpu)
+{
+ return adreno_is_a618(gpu) ||
+ adreno_is_a619(gpu);
+}
+
static inline int adreno_is_a630_family(const struct adreno_gpu *gpu)
{
if (WARN_ON_ONCE(!gpu->info))
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 145f3d5953a3..6ccfde82fecd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -33,6 +33,7 @@ static const struct dpu_mdp_cfg sm8150_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -290,6 +291,21 @@ static const struct dpu_dsc_cfg sm8150_dsc[] = {
},
};
+static const struct dpu_wb_cfg sm8150_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm8150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -384,6 +400,8 @@ const struct dpu_mdss_cfg dpu_sm8150_cfg = {
.pingpong = sm8150_pp,
.merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
.merge_3d = sm8150_merge_3d,
+ .wb_count = ARRAY_SIZE(sm8150_wb),
+ .wb = sm8150_wb,
.intf_count = ARRAY_SIZE(sm8150_intf),
.intf = sm8150_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index 9e3bec8bc121..bab19ddd1d4f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -33,6 +33,7 @@ static const struct dpu_mdp_cfg sc8180x_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -297,6 +298,21 @@ static const struct dpu_dsc_cfg sc8180x_dsc[] = {
},
};
+static const struct dpu_wb_cfg sc8180x_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sc8180x_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -410,6 +426,8 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = {
.pingpong = sc8180x_pp,
.merge_3d_count = ARRAY_SIZE(sc8180x_merge_3d),
.merge_3d = sc8180x_merge_3d,
+ .wb_count = ARRAY_SIZE(sc8180x_wb),
+ .wb = sc8180x_wb,
.intf_count = ARRAY_SIZE(sc8180x_intf),
.intf = sc8180x_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index 76b2ec0d2489..d039b96beb97 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -27,6 +27,7 @@ static const struct dpu_mdp_cfg sm6125_mdp = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -139,6 +140,21 @@ static const struct dpu_pingpong_cfg sm6125_pp[] = {
},
};
+static const struct dpu_wb_cfg sm6125_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 2160,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm6125_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -210,6 +226,8 @@ const struct dpu_mdss_cfg dpu_sm6125_cfg = {
.dspp = sm6125_dspp,
.pingpong_count = ARRAY_SIZE(sm6125_pp),
.pingpong = sm6125_pp,
+ .wb_count = ARRAY_SIZE(sm6125_wb),
+ .wb = sm6125_wb,
.intf_count = ARRAY_SIZE(sm6125_intf),
.intf = sm6125_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index e17a30be7525..0502cee2f116 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -26,6 +26,7 @@ static const struct dpu_mdp_cfg sm6350_mdp = {
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
@@ -145,6 +146,21 @@ static const struct dpu_dsc_cfg sm6350_dsc[] = {
},
};
+static const struct dpu_wb_cfg sm6350_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 1920,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm6350_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -218,6 +234,8 @@ const struct dpu_mdss_cfg dpu_sm6350_cfg = {
.dsc = sm6350_dsc,
.pingpong_count = ARRAY_SIZE(sm6350_pp),
.pingpong = sm6350_pp,
+ .wb_count = ARRAY_SIZE(sm6350_wb),
+ .wb = sm6350_wb,
.intf_count = ARRAY_SIZE(sm6350_intf),
.intf = sm6350_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index b26d5fe40c72..febc3e764a63 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -231,7 +231,7 @@ struct dpu_crtc_state {
container_of(x, struct dpu_crtc_state, base)
/**
- * dpu_crtc_frame_pending - retun the number of pending frames
+ * dpu_crtc_frame_pending - return the number of pending frames
* @crtc: Pointer to drm crtc object
*/
static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 648c8d0a4c36..dcb4fd85e73b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -96,14 +96,16 @@
#define INTF_SC7280_MASK (INTF_SC7180_MASK)
-#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \
+#define WB_SDM845_MASK (BIT(DPU_WB_LINE_MODE) | \
BIT(DPU_WB_UBWC) | \
BIT(DPU_WB_YUV_CONFIG) | \
BIT(DPU_WB_PIPE_ALPHA) | \
BIT(DPU_WB_XY_ROI_OFFSET) | \
BIT(DPU_WB_QOS) | \
BIT(DPU_WB_QOS_8LVL) | \
- BIT(DPU_WB_CDP) | \
+ BIT(DPU_WB_CDP))
+
+#define WB_SM8250_MASK (WB_SDM845_MASK | \
BIT(DPU_WB_INPUT_CTRL))
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index 6e2ac50b94a4..0f40eea7f5e2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <linux/bitfield.h>
+
#include <drm/drm_managed.h>
#include "dpu_hwio.h"
@@ -231,8 +233,38 @@ static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
}
+static void dpu_hw_dp_phy_intf_sel(struct dpu_hw_mdp *mdp,
+ enum dpu_dp_phy_sel phys[2])
+{
+ struct dpu_hw_blk_reg_map *c = &mdp->hw;
+ unsigned int intf;
+ u32 sel = 0;
+
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_INTF0, phys[0]);
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_INTF1, phys[1]);
+
+ for (intf = 0; intf < 2; intf++) {
+ switch (phys[intf]) {
+ case DPU_DP_PHY_0:
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY0, intf + 1);
+ break;
+ case DPU_DP_PHY_1:
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY1, intf + 1);
+ break;
+ case DPU_DP_PHY_2:
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY2, intf + 1);
+ break;
+ default:
+ /* ignore */
+ break;
+ }
+ }
+
+ DPU_REG_WRITE(c, MDP_DP_PHY_INTF_SEL, sel);
+}
+
static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
- unsigned long cap)
+ unsigned long cap, const struct dpu_mdss_version *mdss_rev)
{
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
@@ -245,6 +277,9 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
ops->get_safe_status = dpu_hw_get_safe_status;
+ if (mdss_rev->core_major_ver >= 5)
+ ops->dp_phy_intf_sel = dpu_hw_dp_phy_intf_sel;
+
if (cap & BIT(DPU_MDP_AUDIO_SELECT))
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
@@ -252,7 +287,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
- const struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_mdp *mdp;
@@ -270,7 +305,7 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
* Assign ops
*/
mdp->caps = cfg;
- _setup_mdp_ops(&mdp->ops, mdp->caps->features);
+ _setup_mdp_ops(&mdp->ops, mdp->caps->features, mdss_rev);
return mdp;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 5c9a7ede991e..f1ab9fd106e5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -67,6 +67,13 @@ struct dpu_vsync_source_cfg {
enum dpu_vsync_source vsync_source;
};
+enum dpu_dp_phy_sel {
+ DPU_DP_PHY_NONE,
+ DPU_DP_PHY_0,
+ DPU_DP_PHY_1,
+ DPU_DP_PHY_2,
+};
+
/**
* struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
* Assumption is these functions will be called after clocks are enabled.
@@ -126,6 +133,13 @@ struct dpu_hw_mdp_ops {
struct dpu_danger_safe_status *status);
/**
+ * dp_phy_intf_sel - configure intf to phy mapping
+ * @mdp: mdp top context driver
+ * @phys: list of phys the DP interfaces should be connected to. 0 disables the INTF.
+ */
+ void (*dp_phy_intf_sel)(struct dpu_hw_mdp *mdp, enum dpu_dp_phy_sel phys[2]);
+
+ /**
* intf_audio_select - select the external interface for audio
* @mdp: mdp top context driver
*/
@@ -148,12 +162,12 @@ struct dpu_hw_mdp {
* @dev: Corresponding device for devres management
* @cfg: MDP TOP configuration from catalog
* @addr: Mapped register io address of MDP
- * @m: Pointer to mdss catalog data
+ * @mdss_rev: dpu core's major and minor versions
*/
struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
- const struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_version *mdss_rev);
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
index 5acd5683d25a..054fe097ebf8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -60,6 +60,13 @@
#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
#define DCE_SEL 0x450
+#define MDP_DP_PHY_INTF_SEL 0x460
+#define MDP_DP_PHY_INTF_SEL_INTF0 GENMASK(2, 0)
+#define MDP_DP_PHY_INTF_SEL_INTF1 GENMASK(5, 3)
+#define MDP_DP_PHY_INTF_SEL_PHY0 GENMASK(8, 6)
+#define MDP_DP_PHY_INTF_SEL_PHY1 GENMASK(11, 9)
+#define MDP_DP_PHY_INTF_SEL_PHY2 GENMASK(14, 12)
+
#define MDP_PERIPH_TOP0 MDP_WD_TIMER_0_CTL
#define MDP_PERIPH_TOP0_END CLK_CTRL3
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index d1e2143110f2..9bcae53c4f45 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1146,7 +1146,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->hw_mdp = dpu_hw_mdptop_init(dev,
dpu_kms->catalog->mdp,
dpu_kms->mmio,
- dpu_kms->catalog);
+ dpu_kms->catalog->mdss_ver);
if (IS_ERR(dpu_kms->hw_mdp)) {
rc = PTR_ERR(dpu_kms->hw_mdp);
DPU_ERROR("failed to get hw_mdp: %d\n", rc);
@@ -1181,6 +1181,16 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto err_pm_put;
}
+ /*
+ * We need to program DP <-> PHY relationship only for SC8180X since it
+ * has fewer DP controllers than DP PHYs.
+ * If any other platform requires the same kind of programming, or if
+ * the INTF <->DP relationship isn't static anymore, this needs to be
+ * configured through the DT.
+ */
+ if (of_device_is_compatible(dpu_kms->pdev->dev.of_node, "qcom,sc8180x-dpu"))
+ dpu_kms->hw_mdp->ops.dp_phy_intf_sel(dpu_kms->hw_mdp, (unsigned int[]){ 1, 2, });
+
dpu_kms->hw_intr = dpu_hw_intr_init(dev, dpu_kms->mmio, dpu_kms->catalog);
if (IS_ERR(dpu_kms->hw_intr)) {
rc = PTR_ERR(dpu_kms->hw_intr);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index 3a7f7edda96b..500b7dc895d0 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -351,7 +351,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p,
drm_printf(p, "%s:%d\t%d\t%s\n",
pipe2name(pipe), j, inuse,
- plane ? plane->name : NULL);
+ plane ? plane->name : "(null)");
total += inuse;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 9622e58dce3e..e1228fb093ee 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -119,7 +119,7 @@ struct msm_dp_desc {
};
static const struct msm_dp_desc sc7180_dp_descs[] = {
- { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
};
@@ -130,9 +130,9 @@ static const struct msm_dp_desc sc7280_dp_descs[] = {
};
static const struct msm_dp_desc sc8180x_dp_descs[] = {
- { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
- { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1 },
- { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2 },
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
+ { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
+ { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
{}
};
@@ -149,7 +149,7 @@ static const struct msm_dp_desc sc8280xp_dp_descs[] = {
};
static const struct msm_dp_desc sm8650_dp_descs[] = {
- { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0 },
+ { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 3b59137ca674..031446c87dae 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -135,7 +135,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
config->pll_clock_inverters = 0x00;
else
config->pll_clock_inverters = 0x40;
- } else {
+ } else if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
if (pll_freq <= 1000000000ULL)
config->pll_clock_inverters = 0xa0;
else if (pll_freq <= 2500000000ULL)
@@ -144,6 +144,16 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
config->pll_clock_inverters = 0x00;
else
config->pll_clock_inverters = 0x40;
+ } else {
+ /* 4.2, 4.3 */
+ if (pll_freq <= 1000000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq <= 2500000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq <= 3500000000ULL)
+ config->pll_clock_inverters = 0x00;
+ else
+ config->pll_clock_inverters = 0x40;
}
config->decimal_div_start = dec;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 24abcb7254cc..0bfee41c2e71 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -549,6 +549,7 @@ static void msm_hdmi_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id msm_hdmi_dt_match[] = {
+ { .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8974_config },
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 4586baf36415..a62d2aedfbb7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -137,6 +137,7 @@ enum hdmi_phy_type {
MSM_HDMI_PHY_8960,
MSM_HDMI_PHY_8x74,
MSM_HDMI_PHY_8996,
+ MSM_HDMI_PHY_8998,
MSM_HDMI_PHY_MAX,
};
@@ -154,6 +155,7 @@ extern const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg;
extern const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg;
extern const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg;
extern const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg;
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8998_cfg;
struct hdmi_phy {
struct platform_device *pdev;
@@ -184,6 +186,7 @@ void __exit msm_hdmi_phy_driver_unregister(void);
#ifdef CONFIG_COMMON_CLK
int msm_hdmi_pll_8960_init(struct platform_device *pdev);
int msm_hdmi_pll_8996_init(struct platform_device *pdev);
+int msm_hdmi_pll_8998_init(struct platform_device *pdev);
#else
static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev)
{
@@ -194,6 +197,11 @@ static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev)
{
return -ENODEV;
}
+
+static inline int msm_hdmi_pll_8998_init(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
#endif
/*
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 88a3423b7f24..95b3f7535d84 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -118,6 +118,9 @@ static int msm_hdmi_phy_pll_init(struct platform_device *pdev,
case MSM_HDMI_PHY_8996:
ret = msm_hdmi_pll_8996_init(pdev);
break;
+ case MSM_HDMI_PHY_8998:
+ ret = msm_hdmi_pll_8998_init(pdev);
+ break;
/*
* we don't have PLL support for these, don't report an error for now
*/
@@ -193,6 +196,8 @@ static const struct of_device_id msm_hdmi_phy_dt_match[] = {
.data = &msm_hdmi_phy_8x74_cfg },
{ .compatible = "qcom,hdmi-phy-8996",
.data = &msm_hdmi_phy_8996_cfg },
+ { .compatible = "qcom,hdmi-phy-8998",
+ .data = &msm_hdmi_phy_8998_cfg },
{}
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
new file mode 100644
index 000000000000..0e3a2b16a2ce
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Freebox SAS
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+
+#include "hdmi.h"
+
+#define HDMI_VCO_MAX_FREQ 12000000000UL
+#define HDMI_VCO_MIN_FREQ 8000000000UL
+
+#define HDMI_PCLK_MAX_FREQ 600000000
+#define HDMI_PCLK_MIN_FREQ 25000000
+
+#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD 3400000000UL
+#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD 1500000000UL
+#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD 750000000UL
+#define HDMI_CORECLK_DIV 5
+#define HDMI_DEFAULT_REF_CLOCK 19200000
+#define HDMI_PLL_CMP_CNT 1024
+
+#define HDMI_PLL_POLL_MAX_READS 100
+#define HDMI_PLL_POLL_TIMEOUT_US 150
+
+#define HDMI_NUM_TX_CHANNEL 4
+
+struct hdmi_pll_8998 {
+ struct platform_device *pdev;
+ struct clk_hw clk_hw;
+ unsigned long rate;
+
+ /* pll mmio base */
+ void __iomem *mmio_qserdes_com;
+ /* tx channel base */
+ void __iomem *mmio_qserdes_tx[HDMI_NUM_TX_CHANNEL];
+};
+
+#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8998, clk_hw)
+
+struct hdmi_8998_phy_pll_reg_cfg {
+ u32 com_svs_mode_clk_sel;
+ u32 com_hsclk_sel;
+ u32 com_pll_cctrl_mode0;
+ u32 com_pll_rctrl_mode0;
+ u32 com_cp_ctrl_mode0;
+ u32 com_dec_start_mode0;
+ u32 com_div_frac_start1_mode0;
+ u32 com_div_frac_start2_mode0;
+ u32 com_div_frac_start3_mode0;
+ u32 com_integloop_gain0_mode0;
+ u32 com_integloop_gain1_mode0;
+ u32 com_lock_cmp_en;
+ u32 com_lock_cmp1_mode0;
+ u32 com_lock_cmp2_mode0;
+ u32 com_lock_cmp3_mode0;
+ u32 com_core_clk_en;
+ u32 com_coreclk_div_mode0;
+
+ u32 tx_lx_tx_band[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_tx_drv_lvl[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_tx_emp_post1_lvl[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_pre_driver_1[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_pre_driver_2[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_res_code_offset[HDMI_NUM_TX_CHANNEL];
+
+ u32 phy_mode;
+};
+
+struct hdmi_8998_post_divider {
+ u64 vco_freq;
+ int hsclk_divsel;
+ int vco_ratio;
+ int tx_band_sel;
+ int half_rate_mode;
+};
+
+static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8998 *pll)
+{
+ return platform_get_drvdata(pll->pdev);
+}
+
+static inline void hdmi_pll_write(struct hdmi_pll_8998 *pll, int offset,
+ u32 data)
+{
+ writel(data, pll->mmio_qserdes_com + offset);
+}
+
+static inline u32 hdmi_pll_read(struct hdmi_pll_8998 *pll, int offset)
+{
+ return readl(pll->mmio_qserdes_com + offset);
+}
+
+static inline void hdmi_tx_chan_write(struct hdmi_pll_8998 *pll, int channel,
+ int offset, int data)
+{
+ writel(data, pll->mmio_qserdes_tx[channel] + offset);
+}
+
+static inline u32 pll_get_cpctrl(u64 frac_start, unsigned long ref_clk,
+ bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return 0x8;
+
+ return 0x30;
+}
+
+static inline u32 pll_get_rctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return 0x16;
+
+ return 0x18;
+}
+
+static inline u32 pll_get_cctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return 0x34;
+
+ return 0x2;
+}
+
+static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
+ bool gen_ssc)
+{
+ int digclk_divsel = bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2;
+ u64 base;
+
+ if ((frac_start != 0) || gen_ssc)
+ base = 0x3F;
+ else
+ base = 0xC4;
+
+ base <<= (digclk_divsel == 2 ? 1 : 0);
+
+ return (base <= 2046 ? base : 2046);
+}
+
+static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
+{
+ u64 dividend = HDMI_PLL_CMP_CNT * fdata;
+ u32 divisor = ref_clk * 10;
+ u32 rem;
+
+ rem = do_div(dividend, divisor);
+ if (rem > (divisor >> 1))
+ dividend++;
+
+ return dividend - 1;
+}
+
+static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk)
+{
+ u64 fdata = ((u64)pll_cmp) * ref_clk * 10;
+
+ do_div(fdata, HDMI_PLL_CMP_CNT);
+
+ return fdata;
+}
+
+#define HDMI_REF_CLOCK_HZ ((u64)19200000)
+#define HDMI_MHZ_TO_HZ ((u64)1000000)
+static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk)
+{
+ u32 const ratio_list[] = {1, 2, 3, 4, 5, 6,
+ 9, 10, 12, 15, 25};
+ u32 const band_list[] = {0, 1, 2, 3};
+ u32 const sz_ratio = ARRAY_SIZE(ratio_list);
+ u32 const sz_band = ARRAY_SIZE(band_list);
+ u32 const cmp_cnt = 1024;
+ u32 const th_min = 500, th_max = 1000;
+ u32 half_rate_mode = 0;
+ u32 list_elements;
+ int optimal_index;
+ u32 i, j, k;
+ u32 found_hsclk_divsel = 0, found_vco_ratio;
+ u32 found_tx_band_sel;
+ u64 const min_freq = HDMI_VCO_MIN_FREQ, max_freq = HDMI_VCO_MAX_FREQ;
+ u64 freq_list[ARRAY_SIZE(ratio_list) * ARRAY_SIZE(band_list)];
+ u64 found_vco_freq;
+ u64 freq_optimal;
+
+find_optimal_index:
+ freq_optimal = max_freq;
+ optimal_index = -1;
+ list_elements = 0;
+
+ for (i = 0; i < sz_ratio; i++) {
+ for (j = 0; j < sz_band; j++) {
+ u64 freq = div_u64(bclk, (1 << half_rate_mode));
+
+ freq *= (ratio_list[i] * (1 << band_list[j]));
+ freq_list[list_elements++] = freq;
+ }
+ }
+
+ for (k = 0; k < ARRAY_SIZE(freq_list); k++) {
+ u32 const clks_pll_div = 2, core_clk_div = 5;
+ u32 const rng1 = 16, rng2 = 8;
+ u32 th1, th2;
+ u64 core_clk, rvar1, rem;
+
+ core_clk = div_u64(freq_list[k],
+ ratio_list[k / sz_band] * clks_pll_div *
+ core_clk_div);
+
+ rvar1 = HDMI_REF_CLOCK_HZ * rng1 * HDMI_MHZ_TO_HZ;
+ rvar1 = div64_u64_rem(rvar1, (cmp_cnt * core_clk), &rem);
+ if (rem > ((cmp_cnt * core_clk) >> 1))
+ rvar1++;
+ th1 = rvar1;
+
+ rvar1 = HDMI_REF_CLOCK_HZ * rng2 * HDMI_MHZ_TO_HZ;
+ rvar1 = div64_u64_rem(rvar1, (cmp_cnt * core_clk), &rem);
+ if (rem > ((cmp_cnt * core_clk) >> 1))
+ rvar1++;
+ th2 = rvar1;
+
+ if (freq_list[k] >= min_freq &&
+ freq_list[k] <= max_freq) {
+ if ((th1 >= th_min && th1 <= th_max) ||
+ (th2 >= th_min && th2 <= th_max)) {
+ if (freq_list[k] <= freq_optimal) {
+ freq_optimal = freq_list[k];
+ optimal_index = k;
+ }
+ }
+ }
+ }
+
+ if (optimal_index == -1) {
+ if (!half_rate_mode) {
+ half_rate_mode = 1;
+ goto find_optimal_index;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ found_vco_ratio = ratio_list[optimal_index / sz_band];
+ found_tx_band_sel = band_list[optimal_index % sz_band];
+ found_vco_freq = freq_optimal;
+ }
+
+ switch (found_vco_ratio) {
+ case 1:
+ found_hsclk_divsel = 15;
+ break;
+ case 2:
+ found_hsclk_divsel = 0;
+ break;
+ case 3:
+ found_hsclk_divsel = 4;
+ break;
+ case 4:
+ found_hsclk_divsel = 8;
+ break;
+ case 5:
+ found_hsclk_divsel = 12;
+ break;
+ case 6:
+ found_hsclk_divsel = 1;
+ break;
+ case 9:
+ found_hsclk_divsel = 5;
+ break;
+ case 10:
+ found_hsclk_divsel = 2;
+ break;
+ case 12:
+ found_hsclk_divsel = 9;
+ break;
+ case 15:
+ found_hsclk_divsel = 13;
+ break;
+ case 25:
+ found_hsclk_divsel = 14;
+ break;
+ };
+
+ pd->vco_freq = found_vco_freq;
+ pd->tx_band_sel = found_tx_band_sel;
+ pd->vco_ratio = found_vco_ratio;
+ pd->hsclk_divsel = found_hsclk_divsel;
+
+ return 0;
+}
+
+static int pll_calculate(unsigned long pix_clk, unsigned long ref_clk,
+ struct hdmi_8998_phy_pll_reg_cfg *cfg)
+{
+ struct hdmi_8998_post_divider pd;
+ u64 bclk;
+ u64 dec_start;
+ u64 frac_start;
+ u64 fdata;
+ u32 pll_divisor;
+ u32 rem;
+ u32 cpctrl;
+ u32 rctrl;
+ u32 cctrl;
+ u32 integloop_gain;
+ u32 pll_cmp;
+ int i, ret;
+
+ /* bit clk = 10 * pix_clk */
+ bclk = ((u64)pix_clk) * 10;
+
+ ret = pll_get_post_div(&pd, bclk);
+ if (ret)
+ return ret;
+
+ dec_start = pd.vco_freq;
+ pll_divisor = 4 * ref_clk;
+ do_div(dec_start, pll_divisor);
+
+ frac_start = pd.vco_freq * (1 << 20);
+
+ rem = do_div(frac_start, pll_divisor);
+ frac_start -= dec_start * (1 << 20);
+ if (rem > (pll_divisor >> 1))
+ frac_start++;
+
+ cpctrl = pll_get_cpctrl(frac_start, ref_clk, false);
+ rctrl = pll_get_rctrl(frac_start, false);
+ cctrl = pll_get_cctrl(frac_start, false);
+ integloop_gain = pll_get_integloop_gain(frac_start, bclk,
+ ref_clk, false);
+
+ fdata = pd.vco_freq;
+ do_div(fdata, pd.vco_ratio);
+
+ pll_cmp = pll_get_pll_cmp(fdata, ref_clk);
+
+ /* Convert these values to register specific values */
+ if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+ cfg->com_svs_mode_clk_sel = 1;
+ else
+ cfg->com_svs_mode_clk_sel = 2;
+
+ cfg->com_hsclk_sel = (0x20 | pd.hsclk_divsel);
+ cfg->com_pll_cctrl_mode0 = cctrl;
+ cfg->com_pll_rctrl_mode0 = rctrl;
+ cfg->com_cp_ctrl_mode0 = cpctrl;
+ cfg->com_dec_start_mode0 = dec_start;
+ cfg->com_div_frac_start1_mode0 = (frac_start & 0xff);
+ cfg->com_div_frac_start2_mode0 = ((frac_start & 0xff00) >> 8);
+ cfg->com_div_frac_start3_mode0 = ((frac_start & 0xf0000) >> 16);
+ cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xff);
+ cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xf00) >> 8);
+ cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xff);
+ cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xff00) >> 8);
+ cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+ cfg->com_lock_cmp_en = 0x0;
+ cfg->com_core_clk_en = 0x2c;
+ cfg->com_coreclk_div_mode0 = HDMI_CORECLK_DIV;
+ cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x5 : 0x4;
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++)
+ cfg->tx_lx_tx_band[i] = pd.tx_band_sel;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_lx_tx_drv_lvl[0] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[1] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[2] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x0f;
+ cfg->tx_lx_tx_emp_post1_lvl[0] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[1] = 0x02;
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
+ cfg->tx_lx_pre_driver_1[0] = 0x00;
+ cfg->tx_lx_pre_driver_1[1] = 0x00;
+ cfg->tx_lx_pre_driver_1[2] = 0x00;
+ cfg->tx_lx_pre_driver_1[3] = 0x00;
+ cfg->tx_lx_pre_driver_2[0] = 0x1C;
+ cfg->tx_lx_pre_driver_2[1] = 0x1C;
+ cfg->tx_lx_pre_driver_2[2] = 0x1C;
+ cfg->tx_lx_pre_driver_2[3] = 0x00;
+ cfg->tx_lx_res_code_offset[0] = 0x03;
+ cfg->tx_lx_res_code_offset[1] = 0x00;
+ cfg->tx_lx_res_code_offset[2] = 0x00;
+ cfg->tx_lx_res_code_offset[3] = 0x03;
+ } else if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_lx_tx_drv_lvl[0] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[1] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[2] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x0f;
+ cfg->tx_lx_tx_emp_post1_lvl[0] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[1] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
+ cfg->tx_lx_pre_driver_1[0] = 0x00;
+ cfg->tx_lx_pre_driver_1[1] = 0x00;
+ cfg->tx_lx_pre_driver_1[2] = 0x00;
+ cfg->tx_lx_pre_driver_1[3] = 0x00;
+ cfg->tx_lx_pre_driver_2[0] = 0x16;
+ cfg->tx_lx_pre_driver_2[1] = 0x16;
+ cfg->tx_lx_pre_driver_2[2] = 0x16;
+ cfg->tx_lx_pre_driver_2[3] = 0x18;
+ cfg->tx_lx_res_code_offset[0] = 0x03;
+ cfg->tx_lx_res_code_offset[1] = 0x00;
+ cfg->tx_lx_res_code_offset[2] = 0x00;
+ cfg->tx_lx_res_code_offset[3] = 0x00;
+ } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_lx_tx_drv_lvl[0] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[1] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[2] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x0f;
+ cfg->tx_lx_tx_emp_post1_lvl[0] = 0x05;
+ cfg->tx_lx_tx_emp_post1_lvl[1] = 0x05;
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x05;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
+ cfg->tx_lx_pre_driver_1[0] = 0x00;
+ cfg->tx_lx_pre_driver_1[1] = 0x00;
+ cfg->tx_lx_pre_driver_1[2] = 0x00;
+ cfg->tx_lx_pre_driver_1[3] = 0x00;
+ cfg->tx_lx_pre_driver_2[0] = 0x0E;
+ cfg->tx_lx_pre_driver_2[1] = 0x0E;
+ cfg->tx_lx_pre_driver_2[2] = 0x0E;
+ cfg->tx_lx_pre_driver_2[3] = 0x0E;
+ cfg->tx_lx_res_code_offset[0] = 0x00;
+ cfg->tx_lx_res_code_offset[1] = 0x00;
+ cfg->tx_lx_res_code_offset[2] = 0x00;
+ cfg->tx_lx_res_code_offset[3] = 0x00;
+ } else {
+ cfg->tx_lx_tx_drv_lvl[0] = 0x01;
+ cfg->tx_lx_tx_drv_lvl[1] = 0x01;
+ cfg->tx_lx_tx_drv_lvl[2] = 0x01;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x00;
+ cfg->tx_lx_tx_emp_post1_lvl[0] = 0x00;
+ cfg->tx_lx_tx_emp_post1_lvl[1] = 0x00;
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x00;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
+ cfg->tx_lx_pre_driver_1[0] = 0x00;
+ cfg->tx_lx_pre_driver_1[1] = 0x00;
+ cfg->tx_lx_pre_driver_1[2] = 0x00;
+ cfg->tx_lx_pre_driver_1[3] = 0x00;
+ cfg->tx_lx_pre_driver_2[0] = 0x16;
+ cfg->tx_lx_pre_driver_2[1] = 0x16;
+ cfg->tx_lx_pre_driver_2[2] = 0x16;
+ cfg->tx_lx_pre_driver_2[3] = 0x18;
+ cfg->tx_lx_res_code_offset[0] = 0x00;
+ cfg->tx_lx_res_code_offset[1] = 0x00;
+ cfg->tx_lx_res_code_offset[2] = 0x00;
+ cfg->tx_lx_res_code_offset[3] = 0x00;
+ }
+
+ return 0;
+}
+
+static int hdmi_8998_pll_set_clk_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ struct hdmi_8998_phy_pll_reg_cfg cfg = {};
+ int i, ret;
+
+ ret = pll_calculate(rate, parent_rate, &cfg);
+ if (ret) {
+ DRM_ERROR("PLL calculation failed\n");
+ return ret;
+ }
+
+ /* Initially shut down PHY */
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0x0);
+ udelay(500);
+
+ /* Power up sequence */
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0x1);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_RESETSM_CNTRL, 0x20);
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CMN_CTRL, 0x6);
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_INTERFACE_SELECT_TX_BAND,
+ cfg.tx_lx_tx_band[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_CLKBUF_TERM_ENABLE,
+ 0x1);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_LANE_MODE,
+ 0x20);
+ }
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE, 0x02);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x0B);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYS_CLK_CTRL, 0x02);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CLK_ENABLE1, 0x0E);
+
+ /* Bypass VCO calibration */
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SVS_MODE_CLK_SEL,
+ cfg.com_svs_mode_clk_sel);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_IVCO, 0x07);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_VCO_TUNE_CTRL, 0x00);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CLK_SEL, 0x30);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_HSCLK_SEL,
+ cfg.com_hsclk_sel);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP_EN,
+ cfg.com_lock_cmp_en);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_CCTRL_MODE0,
+ cfg.com_pll_cctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_RCTRL_MODE0,
+ cfg.com_pll_rctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CP_CTRL_MODE0,
+ cfg.com_cp_ctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DEC_START_MODE0,
+ cfg.com_dec_start_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0,
+ cfg.com_div_frac_start1_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0,
+ cfg.com_div_frac_start2_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0,
+ cfg.com_div_frac_start3_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
+ cfg.com_integloop_gain0_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
+ cfg.com_integloop_gain1_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP1_MODE0,
+ cfg.com_lock_cmp1_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP2_MODE0,
+ cfg.com_lock_cmp2_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP3_MODE0,
+ cfg.com_lock_cmp3_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_VCO_TUNE_MAP, 0x00);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CORE_CLK_EN,
+ cfg.com_core_clk_en);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CORECLK_DIV_MODE0,
+ cfg.com_coreclk_div_mode0);
+
+ /* TX lanes setup (TX 0/1/2/3) */
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_DRV_LVL,
+ cfg.tx_lx_tx_drv_lvl[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_EMP_POST1_LVL,
+ cfg.tx_lx_tx_emp_post1_lvl[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_PRE_DRIVER_1,
+ cfg.tx_lx_pre_driver_1[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_PRE_DRIVER_2,
+ cfg.tx_lx_pre_driver_2[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_DRV_LVL_RES_CODE_OFFSET,
+ cfg.tx_lx_res_code_offset[i]);
+ }
+
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_MODE, cfg.phy_mode);
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_LANE_CONFIG,
+ 0x10);
+ }
+
+ /*
+ * Ensure that vco configuration gets flushed to hardware before
+ * enabling the PLL
+ */
+ wmb();
+
+ pll->rate = rate;
+
+ return 0;
+}
+
+static int hdmi_8998_phy_ready_status(struct hdmi_phy *phy)
+{
+ u32 nb_tries = HDMI_PLL_POLL_MAX_READS;
+ unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
+ u32 status;
+ int phy_ready = 0;
+
+ while (nb_tries--) {
+ status = hdmi_phy_read(phy, REG_HDMI_8998_PHY_STATUS);
+ phy_ready = status & BIT(0);
+
+ if (phy_ready)
+ break;
+
+ udelay(timeout);
+ }
+
+ return phy_ready;
+}
+
+static int hdmi_8998_pll_lock_status(struct hdmi_pll_8998 *pll)
+{
+ u32 status;
+ int nb_tries = HDMI_PLL_POLL_MAX_READS;
+ unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
+ int pll_locked = 0;
+
+ while (nb_tries--) {
+ status = hdmi_pll_read(pll,
+ REG_HDMI_8998_PHY_QSERDES_COM_C_READY_STATUS);
+ pll_locked = status & BIT(0);
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout);
+ }
+
+ return pll_locked;
+}
+
+static int hdmi_8998_pll_prepare(struct clk_hw *hw)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ int i, ret = 0;
+
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x1);
+ udelay(100);
+
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x59);
+ udelay(100);
+
+ ret = hdmi_8998_pll_lock_status(pll);
+ if (!ret)
+ return ret;
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_LANE_CONFIG, 0x1F);
+ }
+
+ /* Ensure all registers are flushed to hardware */
+ wmb();
+
+ ret = hdmi_8998_phy_ready_status(phy);
+ if (!ret)
+ return ret;
+
+ /* Restart the retiming buffer */
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x58);
+ udelay(1);
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x59);
+
+ /* Ensure all registers are flushed to hardware */
+ wmb();
+
+ return 0;
+}
+
+static long hdmi_8998_pll_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ if (rate < HDMI_PCLK_MIN_FREQ)
+ return HDMI_PCLK_MIN_FREQ;
+ else if (rate > HDMI_PCLK_MAX_FREQ)
+ return HDMI_PCLK_MAX_FREQ;
+ else
+ return rate;
+}
+
+static unsigned long hdmi_8998_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ return pll->rate;
+}
+
+static void hdmi_8998_pll_unprepare(struct clk_hw *hw)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0);
+ usleep_range(100, 150);
+}
+
+static int hdmi_8998_pll_is_enabled(struct clk_hw *hw)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ u32 status;
+ int pll_locked;
+
+ status = hdmi_pll_read(pll, REG_HDMI_8998_PHY_QSERDES_COM_C_READY_STATUS);
+ pll_locked = status & BIT(0);
+
+ return pll_locked;
+}
+
+static const struct clk_ops hdmi_8998_pll_ops = {
+ .set_rate = hdmi_8998_pll_set_clk_rate,
+ .round_rate = hdmi_8998_pll_round_rate,
+ .recalc_rate = hdmi_8998_pll_recalc_rate,
+ .prepare = hdmi_8998_pll_prepare,
+ .unprepare = hdmi_8998_pll_unprepare,
+ .is_enabled = hdmi_8998_pll_is_enabled,
+};
+
+static const struct clk_init_data pll_init = {
+ .name = "hdmipll",
+ .ops = &hdmi_8998_pll_ops,
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
+ .num_parents = 1,
+ .flags = CLK_IGNORE_UNUSED,
+};
+
+int msm_hdmi_pll_8998_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hdmi_pll_8998 *pll;
+ int ret, i;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return -ENOMEM;
+
+ pll->pdev = pdev;
+
+ pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll");
+ if (IS_ERR(pll->mmio_qserdes_com)) {
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ char name[32];
+
+ snprintf(name, sizeof(name), "hdmi_tx_l%d", i);
+
+ pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name);
+ if (IS_ERR(pll->mmio_qserdes_tx[i])) {
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
+ return -ENOMEM;
+ }
+ }
+ pll->clk_hw.init = &pll_init;
+
+ ret = devm_clk_hw_register(dev, &pll->clk_hw);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register pll clock\n");
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const char * const hdmi_phy_8998_reg_names[] = {
+ "vddio",
+ "vcca",
+};
+
+static const char * const hdmi_phy_8998_clk_names[] = {
+ "iface", "ref", "xo",
+};
+
+const struct hdmi_phy_cfg msm_hdmi_phy_8998_cfg = {
+ .type = MSM_HDMI_PHY_8998,
+ .reg_names = hdmi_phy_8998_reg_names,
+ .num_regs = ARRAY_SIZE(hdmi_phy_8998_reg_names),
+ .clk_names = hdmi_phy_8998_clk_names,
+ .num_clks = ARRAY_SIZE(hdmi_phy_8998_clk_names),
+};
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 4494f6d1c7cb..7ab607252d18 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -357,12 +357,10 @@ void msm_debugfs_init(struct drm_minor *minor)
if (priv->kms && priv->kms->funcs->debugfs_init)
priv->kms->funcs->debugfs_init(priv->kms, minor);
-#ifdef CONFIG_FAULT_INJECTION
fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
&fail_gem_alloc);
fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
&fail_gem_iova);
-#endif
}
#endif
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9c33f4e3f822..8c13b08708d2 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -7,6 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/of_address.h>
#include <linux/uaccess.h>
@@ -58,10 +59,8 @@ static bool modeset = true;
MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
module_param(modeset, bool, 0600);
-#ifdef CONFIG_FAULT_INJECTION
DECLARE_FAULT_ATTR(fail_gem_alloc);
DECLARE_FAULT_ATTR(fail_gem_iova);
-#endif
static int msm_drm_uninit(struct device *dev)
{
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index be016d7b4ef1..2e28a1344636 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -33,12 +33,8 @@
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
-#ifdef CONFIG_FAULT_INJECTION
extern struct fault_attr fail_gem_alloc;
extern struct fault_attr fail_gem_iova;
-#else
-# define should_fail(attr, size) 0
-#endif
struct msm_kms;
struct msm_gpu;
@@ -215,8 +211,6 @@ struct msm_drm_private {
struct notifier_block vmap_notifier;
struct shrinker *shrinker;
- struct drm_atomic_state *pm_state;
-
/**
* hangcheck_period: For hang detection, in ms
*
@@ -254,8 +248,6 @@ void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer);
void msm_atomic_commit_tail(struct drm_atomic_state *state);
int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
-void msm_atomic_state_clear(struct drm_atomic_state *state);
-void msm_atomic_state_free(struct drm_atomic_state *state);
int msm_crtc_enable_vblank(struct drm_crtc *crtc);
void msm_crtc_disable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 3666b42b4ecd..a274b8466423 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -931,7 +931,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
- gpu->pdev = pdev;
platform_set_drvdata(pdev, &gpu->adreno_smmu);
msm_devfreq_init(gpu);
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
index 2dfe6913ab4f..97608603ea62 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -1198,6 +1198,1027 @@ to upconvert to 32b float internally?
<value value="0x3" name="TESS_CCW_TRIS"/>
</enum>
+<enum name="a7xx_cp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CP_ALWAYS_COUNT"/>
+ <value value="1" name="A7XX_PERF_CP_BUSY_GFX_CORE_IDLE"/>
+ <value value="2" name="A7XX_PERF_CP_BUSY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_CP_NUM_PREEMPTIONS"/>
+ <value value="4" name="A7XX_PERF_CP_PREEMPTION_REACTION_DELAY"/>
+ <value value="5" name="A7XX_PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
+ <value value="6" name="A7XX_PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
+ <value value="7" name="A7XX_PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
+ <value value="8" name="A7XX_PERF_CP_PREDICATED_DRAWS_KILLED"/>
+ <value value="9" name="A7XX_PERF_CP_MODE_SWITCH"/>
+ <value value="10" name="A7XX_PERF_CP_ZPASS_DONE"/>
+ <value value="11" name="A7XX_PERF_CP_CONTEXT_DONE"/>
+ <value value="12" name="A7XX_PERF_CP_CACHE_FLUSH"/>
+ <value value="13" name="A7XX_PERF_CP_LONG_PREEMPTIONS"/>
+ <value value="14" name="A7XX_PERF_CP_SQE_I_CACHE_STARVE"/>
+ <value value="15" name="A7XX_PERF_CP_SQE_IDLE"/>
+ <value value="16" name="A7XX_PERF_CP_SQE_PM4_STARVE_RB_IB"/>
+ <value value="17" name="A7XX_PERF_CP_SQE_PM4_STARVE_SDS"/>
+ <value value="18" name="A7XX_PERF_CP_SQE_MRB_STARVE"/>
+ <value value="19" name="A7XX_PERF_CP_SQE_RRB_STARVE"/>
+ <value value="20" name="A7XX_PERF_CP_SQE_VSD_STARVE"/>
+ <value value="21" name="A7XX_PERF_CP_VSD_DECODE_STARVE"/>
+ <value value="22" name="A7XX_PERF_CP_SQE_PIPE_OUT_STALL"/>
+ <value value="23" name="A7XX_PERF_CP_SQE_SYNC_STALL"/>
+ <value value="24" name="A7XX_PERF_CP_SQE_PM4_WFI_STALL"/>
+ <value value="25" name="A7XX_PERF_CP_SQE_SYS_WFI_STALL"/>
+ <value value="26" name="A7XX_PERF_CP_SQE_T4_EXEC"/>
+ <value value="27" name="A7XX_PERF_CP_SQE_LOAD_STATE_EXEC"/>
+ <value value="28" name="A7XX_PERF_CP_SQE_SAVE_SDS_STATE"/>
+ <value value="29" name="A7XX_PERF_CP_SQE_DRAW_EXEC"/>
+ <value value="30" name="A7XX_PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
+ <value value="31" name="A7XX_PERF_CP_SQE_EXEC_PROFILED"/>
+ <value value="32" name="A7XX_PERF_CP_MEMORY_POOL_EMPTY"/>
+ <value value="33" name="A7XX_PERF_CP_MEMORY_POOL_SYNC_STALL"/>
+ <value value="34" name="A7XX_PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
+ <value value="35" name="A7XX_PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
+ <value value="36" name="A7XX_PERF_CP_AHB_STALL_SQE_GMU"/>
+ <value value="37" name="A7XX_PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
+ <value value="38" name="A7XX_PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
+ <value value="39" name="A7XX_PERF_CP_CLUSTER0_EMPTY"/>
+ <value value="40" name="A7XX_PERF_CP_CLUSTER1_EMPTY"/>
+ <value value="41" name="A7XX_PERF_CP_CLUSTER2_EMPTY"/>
+ <value value="42" name="A7XX_PERF_CP_CLUSTER3_EMPTY"/>
+ <value value="43" name="A7XX_PERF_CP_CLUSTER4_EMPTY"/>
+ <value value="44" name="A7XX_PERF_CP_CLUSTER5_EMPTY"/>
+ <value value="45" name="A7XX_PERF_CP_PM4_DATA"/>
+ <value value="46" name="A7XX_PERF_CP_PM4_HEADERS"/>
+ <value value="47" name="A7XX_PERF_CP_VBIF_READ_BEATS"/>
+ <value value="48" name="A7XX_PERF_CP_VBIF_WRITE_BEATS"/>
+ <value value="49" name="A7XX_PERF_CP_SQE_INSTR_COUNTER"/>
+ <value value="50" name="A7XX_PERF_CP_RESERVED_50"/>
+ <value value="51" name="A7XX_PERF_CP_RESERVED_51"/>
+ <value value="52" name="A7XX_PERF_CP_RESERVED_52"/>
+ <value value="53" name="A7XX_PERF_CP_RESERVED_53"/>
+ <value value="54" name="A7XX_PERF_CP_RESERVED_54"/>
+ <value value="55" name="A7XX_PERF_CP_RESERVED_55"/>
+ <value value="56" name="A7XX_PERF_CP_RESERVED_56"/>
+ <value value="57" name="A7XX_PERF_CP_RESERVED_57"/>
+ <value value="58" name="A7XX_PERF_CP_RESERVED_58"/>
+ <value value="59" name="A7XX_PERF_CP_RESERVED_59"/>
+ <value value="60" name="A7XX_PERF_CP_CLUSTER0_FULL"/>
+ <value value="61" name="A7XX_PERF_CP_CLUSTER1_FULL"/>
+ <value value="62" name="A7XX_PERF_CP_CLUSTER2_FULL"/>
+ <value value="63" name="A7XX_PERF_CP_CLUSTER3_FULL"/>
+ <value value="64" name="A7XX_PERF_CP_CLUSTER4_FULL"/>
+ <value value="65" name="A7XX_PERF_CP_CLUSTER5_FULL"/>
+ <value value="66" name="A7XX_PERF_CP_CLUSTER6_FULL"/>
+ <value value="67" name="A7XX_PERF_CP_CLUSTER6_EMPTY"/>
+ <value value="68" name="A7XX_PERF_CP_ICACHE_MISSES"/>
+ <value value="69" name="A7XX_PERF_CP_ICACHE_HITS"/>
+ <value value="70" name="A7XX_PERF_CP_ICACHE_STALL"/>
+ <value value="71" name="A7XX_PERF_CP_DCACHE_MISSES"/>
+ <value value="72" name="A7XX_PERF_CP_DCACHE_HITS"/>
+ <value value="73" name="A7XX_PERF_CP_DCACHE_STALLS"/>
+ <value value="74" name="A7XX_PERF_CP_AQE_SQE_STALL"/>
+ <value value="75" name="A7XX_PERF_CP_SQE_AQE_STARVE"/>
+ <value value="76" name="A7XX_PERF_CP_PREEMPT_LATENCY"/>
+ <value value="77" name="A7XX_PERF_CP_SQE_MD8_STALL_CYCLES"/>
+ <value value="78" name="A7XX_PERF_CP_SQE_MESH_EXEC_CYCLES"/>
+ <value value="79" name="A7XX_PERF_CP_AQE_NUM_AS_CHUNKS"/>
+ <value value="80" name="A7XX_PERF_CP_AQE_NUM_MS_CHUNKS"/>
+</enum>
+
+<enum name="a7xx_rbbm_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RBBM_ALWAYS_COUNT"/>
+ <value value="1" name="A7XX_PERF_RBBM_ALWAYS_ON"/>
+ <value value="2" name="A7XX_PERF_RBBM_TSE_BUSY"/>
+ <value value="3" name="A7XX_PERF_RBBM_RAS_BUSY"/>
+ <value value="4" name="A7XX_PERF_RBBM_PC_DCALL_BUSY"/>
+ <value value="5" name="A7XX_PERF_RBBM_PC_VSD_BUSY"/>
+ <value value="6" name="A7XX_PERF_RBBM_STATUS_MASKED"/>
+ <value value="7" name="A7XX_PERF_RBBM_COM_BUSY"/>
+ <value value="8" name="A7XX_PERF_RBBM_DCOM_BUSY"/>
+ <value value="9" name="A7XX_PERF_RBBM_VBIF_BUSY"/>
+ <value value="10" name="A7XX_PERF_RBBM_VSC_BUSY"/>
+ <value value="11" name="A7XX_PERF_RBBM_TESS_BUSY"/>
+ <value value="12" name="A7XX_PERF_RBBM_UCHE_BUSY"/>
+ <value value="13" name="A7XX_PERF_RBBM_HLSQ_BUSY"/>
+</enum>
+
+<enum name="a7xx_pc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_PC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_PC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_PC_STALL_CYCLES_VFD"/>
+ <value value="3" name="A7XX_PERF_PC_RESERVED"/>
+ <value value="4" name="A7XX_PERF_PC_STALL_CYCLES_VPC"/>
+ <value value="5" name="A7XX_PERF_PC_STALL_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_PC_STALL_CYCLES_TESS"/>
+ <value value="7" name="A7XX_PERF_PC_STALL_CYCLES_VFD_ONLY"/>
+ <value value="8" name="A7XX_PERF_PC_STALL_CYCLES_VPC_ONLY"/>
+ <value value="9" name="A7XX_PERF_PC_PASS1_TF_STALL_CYCLES"/>
+ <value value="10" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
+ <value value="11" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
+ <value value="12" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
+ <value value="13" name="A7XX_PERF_PC_STARVE_CYCLES_DI"/>
+ <value value="14" name="A7XX_PERF_PC_VIS_STREAMS_LOADED"/>
+ <value value="15" name="A7XX_PERF_PC_INSTANCES"/>
+ <value value="16" name="A7XX_PERF_PC_VPC_PRIMITIVES"/>
+ <value value="17" name="A7XX_PERF_PC_DEAD_PRIM"/>
+ <value value="18" name="A7XX_PERF_PC_LIVE_PRIM"/>
+ <value value="19" name="A7XX_PERF_PC_VERTEX_HITS"/>
+ <value value="20" name="A7XX_PERF_PC_IA_VERTICES"/>
+ <value value="21" name="A7XX_PERF_PC_IA_PRIMITIVES"/>
+ <value value="22" name="A7XX_PERF_PC_RESERVED_22"/>
+ <value value="23" name="A7XX_PERF_PC_HS_INVOCATIONS"/>
+ <value value="24" name="A7XX_PERF_PC_DS_INVOCATIONS"/>
+ <value value="25" name="A7XX_PERF_PC_VS_INVOCATIONS"/>
+ <value value="26" name="A7XX_PERF_PC_GS_INVOCATIONS"/>
+ <value value="27" name="A7XX_PERF_PC_DS_PRIMITIVES"/>
+ <value value="28" name="A7XX_PERF_PC_3D_DRAWCALLS"/>
+ <value value="29" name="A7XX_PERF_PC_2D_DRAWCALLS"/>
+ <value value="30" name="A7XX_PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
+ <value value="31" name="A7XX_PERF_PC_TESS_BUSY_CYCLES"/>
+ <value value="32" name="A7XX_PERF_PC_TESS_WORKING_CYCLES"/>
+ <value value="33" name="A7XX_PERF_PC_TESS_STALL_CYCLES_PC"/>
+ <value value="34" name="A7XX_PERF_PC_TESS_STARVE_CYCLES_PC"/>
+ <value value="35" name="A7XX_PERF_PC_TESS_SINGLE_PRIM_CYCLES"/>
+ <value value="36" name="A7XX_PERF_PC_TESS_PC_UV_TRANS"/>
+ <value value="37" name="A7XX_PERF_PC_TESS_PC_UV_PATCHES"/>
+ <value value="38" name="A7XX_PERF_PC_TESS_FACTOR_TRANS"/>
+ <value value="39" name="A7XX_PERF_PC_TAG_CHECKED_VERTICES"/>
+ <value value="40" name="A7XX_PERF_PC_MESH_VS_WAVES"/>
+ <value value="41" name="A7XX_PERF_PC_MESH_DRAWS"/>
+ <value value="42" name="A7XX_PERF_PC_MESH_DEAD_DRAWS"/>
+ <value value="43" name="A7XX_PERF_PC_MESH_MVIS_EN_DRAWS"/>
+ <value value="44" name="A7XX_PERF_PC_MESH_DEAD_PRIM"/>
+ <value value="45" name="A7XX_PERF_PC_MESH_LIVE_PRIM"/>
+ <value value="46" name="A7XX_PERF_PC_MESH_PA_EN_PRIM"/>
+ <value value="47" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_MVIS_STREAM"/>
+ <value value="48" name="A7XX_PERF_PC_STARVE_CYCLES_PREDRAW"/>
+ <value value="49" name="A7XX_PERF_PC_STALL_CYCLES_COMPUTE_GFX"/>
+ <value value="50" name="A7XX_PERF_PC_STALL_CYCLES_GFX_COMPUTE"/>
+ <value value="51" name="A7XX_PERF_PC_TESS_PC_MULTI_PATCH_TRANS"/>
+</enum>
+
+<enum name="a7xx_vfd_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VFD_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VFD_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
+ <value value="3" name="A7XX_PERF_VFD_STALL_CYCLES_SP_INFO"/>
+ <value value="4" name="A7XX_PERF_VFD_STALL_CYCLES_SP_ATTR"/>
+ <value value="5" name="A7XX_PERF_VFD_STARVE_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_VFD_RBUFFER_FULL"/>
+ <value value="7" name="A7XX_PERF_VFD_ATTR_INFO_FIFO_FULL"/>
+ <value value="8" name="A7XX_PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
+ <value value="9" name="A7XX_PERF_VFD_NUM_ATTRIBUTES"/>
+ <value value="10" name="A7XX_PERF_VFD_UPPER_SHADER_FIBERS"/>
+ <value value="11" name="A7XX_PERF_VFD_LOWER_SHADER_FIBERS"/>
+ <value value="12" name="A7XX_PERF_VFD_MODE_0_FIBERS"/>
+ <value value="13" name="A7XX_PERF_VFD_MODE_1_FIBERS"/>
+ <value value="14" name="A7XX_PERF_VFD_MODE_2_FIBERS"/>
+ <value value="15" name="A7XX_PERF_VFD_MODE_3_FIBERS"/>
+ <value value="16" name="A7XX_PERF_VFD_MODE_4_FIBERS"/>
+ <value value="17" name="A7XX_PERF_VFD_TOTAL_VERTICES"/>
+ <value value="18" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD"/>
+ <value value="19" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
+ <value value="20" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
+ <value value="21" name="A7XX_PERF_VFDP_STARVE_CYCLES_PC"/>
+ <value value="22" name="A7XX_PERF_VFDP_VS_STAGE_WAVES"/>
+ <value value="23" name="A7XX_PERF_VFD_STALL_CYCLES_PRG_END_FE"/>
+ <value value="24" name="A7XX_PERF_VFD_STALL_CYCLES_CBSYNC"/>
+</enum>
+
+<enum name="a7xx_hlsq_perfcounter_select">
+ <value value="0" name="A7XX_PERF_HLSQ_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_HLSQ_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
+ <value value="3" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
+ <value value="4" name="A7XX_PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
+ <value value="5" name="A7XX_PERF_HLSQ_UCHE_LATENCY_COUNT"/>
+ <value value="6" name="A7XX_PERF_HLSQ_RESERVED_6"/>
+ <value value="7" name="A7XX_PERF_HLSQ_RESERVED_7"/>
+ <value value="8" name="A7XX_PERF_HLSQ_RESERVED_8"/>
+ <value value="9" name="A7XX_PERF_HLSQ_RESERVED_9"/>
+ <value value="10" name="A7XX_PERF_HLSQ_COMPUTE_DRAWCALLS"/>
+ <value value="11" name="A7XX_PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
+ <value value="12" name="A7XX_PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
+ <value value="13" name="A7XX_PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
+ <value value="14" name="A7XX_PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
+ <value value="15" name="A7XX_PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
+ <value value="16" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
+ <value value="17" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
+ <value value="18" name="A7XX_PERF_HLSQ_STALL_CYCLES_VPC"/>
+ <value value="19" name="A7XX_PERF_HLSQ_RESERVED_19"/>
+ <value value="20" name="A7XX_PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
+ <value value="21" name="A7XX_PERF_HLSQ_VSBR_STALL_CYCLES"/>
+ <value value="22" name="A7XX_PERF_HLSQ_FS_STALL_CYCLES"/>
+ <value value="23" name="A7XX_PERF_HLSQ_LPAC_STALL_CYCLES"/>
+ <value value="24" name="A7XX_PERF_HLSQ_BV_STALL_CYCLES"/>
+ <value value="25" name="A7XX_PERF_HLSQ_VSBR_DEREF_CYCLES"/>
+ <value value="26" name="A7XX_PERF_HLSQ_FS_DEREF_CYCLES"/>
+ <value value="27" name="A7XX_PERF_HLSQ_LPAC_DEREF_CYCLES"/>
+ <value value="28" name="A7XX_PERF_HLSQ_BV_DEREF_CYCLES"/>
+ <value value="29" name="A7XX_PERF_HLSQ_VSBR_S2W_CYCLES"/>
+ <value value="30" name="A7XX_PERF_HLSQ_FS_S2W_CYCLES"/>
+ <value value="31" name="A7XX_PERF_HLSQ_LPAC_S2W_CYCLES"/>
+ <value value="32" name="A7XX_PERF_HLSQ_BV_S2W_CYCLES"/>
+ <value value="33" name="A7XX_PERF_HLSQ_VSBR_WAIT_FS_S2W"/>
+ <value value="34" name="A7XX_PERF_HLSQ_FS_WAIT_VS_S2W"/>
+ <value value="35" name="A7XX_PERF_HLSQ_LPAC_WAIT_VS_S2W"/>
+ <value value="36" name="A7XX_PERF_HLSQ_BV_WAIT_FS_S2W"/>
+ <value value="37" name="A7XX_PERF_HLSQ_VS_WAIT_CONST_RESOURCE"/>
+ <value value="38" name="A7XX_PERF_HLSQ_FS_WAIT_SAME_VS_S2W"/>
+ <value value="39" name="A7XX_PERF_HLSQ_FS_STARVING_SP"/>
+ <value value="40" name="A7XX_PERF_HLSQ_VS_DATA_WAIT_PROGRAMMING"/>
+ <value value="41" name="A7XX_PERF_HLSQ_BV_DATA_WAIT_PROGRAMMING"/>
+ <value value="42" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_VS"/>
+ <value value="43" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_VS"/>
+ <value value="44" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_FS"/>
+ <value value="45" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_FS"/>
+ <value value="46" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_BV"/>
+ <value value="47" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_BV"/>
+ <value value="48" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_LPAC"/>
+ <value value="49" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_LPAC"/>
+ <value value="50" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_VS"/>
+ <value value="51" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_FS"/>
+ <value value="52" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_BV"/>
+ <value value="53" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_LPAC"/>
+ <value value="54" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_VS"/>
+ <value value="55" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_FS"/>
+ <value value="56" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_BV"/>
+ <value value="57" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_LPAC"/>
+</enum>
+
+<enum name="a7xx_vpc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VPC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VPC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_VPC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="A7XX_PERF_VPC_STALL_CYCLES_VFD_WACK"/>
+ <value value="4" name="A7XX_PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
+ <value value="5" name="A7XX_PERF_VPC_RESERVED_5"/>
+ <value value="6" name="A7XX_PERF_VPC_STALL_CYCLES_SP_LM"/>
+ <value value="7" name="A7XX_PERF_VPC_STARVE_CYCLES_SP"/>
+ <value value="8" name="A7XX_PERF_VPC_STARVE_CYCLES_LRZ"/>
+ <value value="9" name="A7XX_PERF_VPC_PC_PRIMITIVES"/>
+ <value value="10" name="A7XX_PERF_VPC_SP_COMPONENTS"/>
+ <value value="11" name="A7XX_PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
+ <value value="12" name="A7XX_PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
+ <value value="13" name="A7XX_PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
+ <value value="14" name="A7XX_PERF_VPC_LM_TRANSACTION"/>
+ <value value="15" name="A7XX_PERF_VPC_STREAMOUT_TRANSACTION"/>
+ <value value="16" name="A7XX_PERF_VPC_VS_BUSY_CYCLES"/>
+ <value value="17" name="A7XX_PERF_VPC_PS_BUSY_CYCLES"/>
+ <value value="18" name="A7XX_PERF_VPC_VS_WORKING_CYCLES"/>
+ <value value="19" name="A7XX_PERF_VPC_PS_WORKING_CYCLES"/>
+ <value value="20" name="A7XX_PERF_VPC_STARVE_CYCLES_RB"/>
+ <value value="21" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_POS"/>
+ <value value="22" name="A7XX_PERF_VPC_WIT_FULL_CYCLES"/>
+ <value value="23" name="A7XX_PERF_VPC_VPCRAM_FULL_CYCLES"/>
+ <value value="24" name="A7XX_PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
+ <value value="25" name="A7XX_PERF_VPC_NUM_VPCRAM_WRITE"/>
+ <value value="26" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_SO"/>
+ <value value="27" name="A7XX_PERF_VPC_NUM_ATTR_REQ_LM"/>
+ <value value="28" name="A7XX_PERF_VPC_STALL_CYCLE_TSE"/>
+ <value value="29" name="A7XX_PERF_VPC_TSE_PRIMITIVES"/>
+ <value value="30" name="A7XX_PERF_VPC_GS_PRIMITIVES"/>
+ <value value="31" name="A7XX_PERF_VPC_TSE_TRANSACTIONS"/>
+ <value value="32" name="A7XX_PERF_VPC_STALL_CYCLES_CCU"/>
+ <value value="33" name="A7XX_PERF_VPC_NUM_WM_HIT"/>
+ <value value="34" name="A7XX_PERF_VPC_STALL_DQ_WACK"/>
+ <value value="35" name="A7XX_PERF_VPC_STALL_CYCLES_CCHE"/>
+ <value value="36" name="A7XX_PERF_VPC_STARVE_CYCLES_CCHE"/>
+ <value value="37" name="A7XX_PERF_VPC_NUM_PA_REQ"/>
+ <value value="38" name="A7XX_PERF_VPC_NUM_LM_REQ_HIT"/>
+ <value value="39" name="A7XX_PERF_VPC_CCHE_REQBUF_FULL"/>
+ <value value="40" name="A7XX_PERF_VPC_STALL_CYCLES_LM_ACK"/>
+ <value value="41" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_FE"/>
+ <value value="42" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_PCVS"/>
+ <value value="43" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_VPCPS"/>
+</enum>
+
+<enum name="a7xx_tse_perfcounter_select">
+ <value value="0" name="A7XX_PERF_TSE_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_TSE_CLIPPING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_TSE_STALL_CYCLES_RAS"/>
+ <value value="3" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
+ <value value="4" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
+ <value value="5" name="A7XX_PERF_TSE_STARVE_CYCLES_PC"/>
+ <value value="6" name="A7XX_PERF_TSE_INPUT_PRIM"/>
+ <value value="7" name="A7XX_PERF_TSE_INPUT_NULL_PRIM"/>
+ <value value="8" name="A7XX_PERF_TSE_TRIVAL_REJ_PRIM"/>
+ <value value="9" name="A7XX_PERF_TSE_CLIPPED_PRIM"/>
+ <value value="10" name="A7XX_PERF_TSE_ZERO_AREA_PRIM"/>
+ <value value="11" name="A7XX_PERF_TSE_FACENESS_CULLED_PRIM"/>
+ <value value="12" name="A7XX_PERF_TSE_ZERO_PIXEL_PRIM"/>
+ <value value="13" name="A7XX_PERF_TSE_OUTPUT_NULL_PRIM"/>
+ <value value="14" name="A7XX_PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
+ <value value="15" name="A7XX_PERF_TSE_CINVOCATION"/>
+ <value value="16" name="A7XX_PERF_TSE_CPRIMITIVES"/>
+ <value value="17" name="A7XX_PERF_TSE_2D_INPUT_PRIM"/>
+ <value value="18" name="A7XX_PERF_TSE_2D_ALIVE_CYCLES"/>
+ <value value="19" name="A7XX_PERF_TSE_CLIP_PLANES"/>
+</enum>
+
+<enum name="a7xx_ras_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RAS_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
+ <value value="2" name="A7XX_PERF_RAS_STALL_CYCLES_LRZ"/>
+ <value value="3" name="A7XX_PERF_RAS_STARVE_CYCLES_TSE"/>
+ <value value="4" name="A7XX_PERF_RAS_SUPER_TILES"/>
+ <value value="5" name="A7XX_PERF_RAS_8X4_TILES"/>
+ <value value="6" name="A7XX_PERF_RAS_MASKGEN_ACTIVE"/>
+ <value value="7" name="A7XX_PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
+ <value value="8" name="A7XX_PERF_RAS_FULLY_COVERED_8X4_TILES"/>
+ <value value="9" name="A7XX_PERF_RAS_PRIM_KILLED_INVISILBE"/>
+ <value value="10" name="A7XX_PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
+ <value value="11" name="A7XX_PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
+ <value value="12" name="A7XX_PERF_RAS_BLOCKS"/>
+ <value value="13" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_0_WORKING_CC_l2"/>
+ <value value="14" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_1_WORKING_CC_l2"/>
+ <value value="15" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_2_WORKING_CC_l2"/>
+ <value value="16" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_3_WORKING_CC_l2"/>
+ <value value="17" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_4_WORKING_CC_l2"/>
+ <value value="18" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_5_WORKING_CC_l2"/>
+ <value value="19" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_6_WORKING_CC_l2"/>
+ <value value="20" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_7_WORKING_CC_l2"/>
+ <value value="21" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_8_WORKING_CC_l2"/>
+ <value value="22" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_9_WORKING_CC_l2"/>
+ <value value="23" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_10_WORKING_CC_l2"/>
+ <value value="24" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_11_WORKING_CC_l2"/>
+ <value value="25" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_12_WORKING_CC_l2"/>
+ <value value="26" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_13_WORKING_CC_l2"/>
+ <value value="27" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_14_WORKING_CC_l2"/>
+ <value value="28" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_15_WORKING_CC_l2"/>
+ <value value="29" name="A7XX_PERF_RAS_FALSE_PARTIAL_STILE"/>
+
+</enum>
+
+<enum name="a7xx_uche_perfcounter_select">
+ <value value="0" name="A7XX_PERF_UCHE_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_UCHE_STALL_CYCLES_ARBITER"/>
+ <value value="2" name="A7XX_PERF_UCHE_VBIF_LATENCY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
+ <value value="4" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_TP"/>
+ <value value="5" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_VFD"/>
+ <value value="6" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
+ <value value="7" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
+ <value value="8" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_SP"/>
+ <value value="9" name="A7XX_PERF_UCHE_READ_REQUESTS_TP"/>
+ <value value="10" name="A7XX_PERF_UCHE_READ_REQUESTS_VFD"/>
+ <value value="11" name="A7XX_PERF_UCHE_READ_REQUESTS_HLSQ"/>
+ <value value="12" name="A7XX_PERF_UCHE_READ_REQUESTS_LRZ"/>
+ <value value="13" name="A7XX_PERF_UCHE_READ_REQUESTS_SP"/>
+ <value value="14" name="A7XX_PERF_UCHE_WRITE_REQUESTS_LRZ"/>
+ <value value="15" name="A7XX_PERF_UCHE_WRITE_REQUESTS_SP"/>
+ <value value="16" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VPC"/>
+ <value value="17" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VSC"/>
+ <value value="18" name="A7XX_PERF_UCHE_EVICTS"/>
+ <value value="19" name="A7XX_PERF_UCHE_BANK_REQ0"/>
+ <value value="20" name="A7XX_PERF_UCHE_BANK_REQ1"/>
+ <value value="21" name="A7XX_PERF_UCHE_BANK_REQ2"/>
+ <value value="22" name="A7XX_PERF_UCHE_BANK_REQ3"/>
+ <value value="23" name="A7XX_PERF_UCHE_BANK_REQ4"/>
+ <value value="24" name="A7XX_PERF_UCHE_BANK_REQ5"/>
+ <value value="25" name="A7XX_PERF_UCHE_BANK_REQ6"/>
+ <value value="26" name="A7XX_PERF_UCHE_BANK_REQ7"/>
+ <value value="27" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH0"/>
+ <value value="28" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH1"/>
+ <value value="29" name="A7XX_PERF_UCHE_GMEM_READ_BEATS"/>
+ <value value="30" name="A7XX_PERF_UCHE_TPH_REF_FULL"/>
+ <value value="31" name="A7XX_PERF_UCHE_TPH_VICTIM_FULL"/>
+ <value value="32" name="A7XX_PERF_UCHE_TPH_EXT_FULL"/>
+ <value value="33" name="A7XX_PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
+ <value value="34" name="A7XX_PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
+ <value value="35" name="A7XX_PERF_UCHE_DCMP_LATENCY_CYCLES"/>
+ <value value="36" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_PC"/>
+ <value value="37" name="A7XX_PERF_UCHE_READ_REQUESTS_PC"/>
+ <value value="38" name="A7XX_PERF_UCHE_RAM_READ_REQ"/>
+ <value value="39" name="A7XX_PERF_UCHE_RAM_WRITE_REQ"/>
+ <value value="40" name="A7XX_PERF_UCHE_STARVED_CYCLES_VBIF_DECMP"/>
+ <value value="41" name="A7XX_PERF_UCHE_STALL_CYCLES_DECMP"/>
+ <value value="42" name="A7XX_PERF_UCHE_ARBITER_STALL_CYCLES_VBIF"/>
+ <value value="43" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_UBWC"/>
+ <value value="44" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_NONUBWC"/>
+ <value value="45" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_GMEM"/>
+ <value value="46" name="A7XX_PERF_UCHE_LONG_LINE_ALL_EVICTS_KAILUA"/>
+ <value value="47" name="A7XX_PERF_UCHE_LONG_LINE_PARTIAL_EVICTS_KAILUA"/>
+ <value value="48" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_CCHE"/>
+ <value value="49" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_OTHER_KAILUA"/>
+ <value value="50" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_CCHE"/>
+ <value value="51" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_OTHER_CLIENTS"/>
+ <value value="52" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH0"/>
+ <value value="53" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH1"/>
+ <value value="54" name="A7XX_PERF_UCHE_CCHE_TPH_QUEUE_FULL"/>
+ <value value="55" name="A7XX_PERF_UCHE_CCHE_DPH_QUEUE_FULL"/>
+ <value value="56" name="A7XX_PERF_UCHE_GMEM_WRITE_BEATS"/>
+ <value value="57" name="A7XX_PERF_UCHE_UBWC_READ_BEATS"/>
+ <value value="58" name="A7XX_PERF_UCHE_UBWC_WRITE_BEATS"/>
+</enum>
+
+<enum name="a7xx_tp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_TP_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_TP_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_TP_LATENCY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_TP_LATENCY_TRANS"/>
+ <value value="4" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_SAMPLES"/>
+ <value value="5" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_CYCLES"/>
+ <value value="6" name="A7XX_PERF_TP_L1_CACHELINE_REQUESTS"/>
+ <value value="7" name="A7XX_PERF_TP_L1_CACHELINE_MISSES"/>
+ <value value="8" name="A7XX_PERF_TP_SP_TP_TRANS"/>
+ <value value="9" name="A7XX_PERF_TP_TP_SP_TRANS"/>
+ <value value="10" name="A7XX_PERF_TP_OUTPUT_PIXELS"/>
+ <value value="11" name="A7XX_PERF_TP_FILTER_WORKLOAD_16BIT"/>
+ <value value="12" name="A7XX_PERF_TP_FILTER_WORKLOAD_32BIT"/>
+ <value value="13" name="A7XX_PERF_TP_QUADS_RECEIVED"/>
+ <value value="14" name="A7XX_PERF_TP_QUADS_OFFSET"/>
+ <value value="15" name="A7XX_PERF_TP_QUADS_SHADOW"/>
+ <value value="16" name="A7XX_PERF_TP_QUADS_ARRAY"/>
+ <value value="17" name="A7XX_PERF_TP_QUADS_GRADIENT"/>
+ <value value="18" name="A7XX_PERF_TP_QUADS_1D"/>
+ <value value="19" name="A7XX_PERF_TP_QUADS_2D"/>
+ <value value="20" name="A7XX_PERF_TP_QUADS_BUFFER"/>
+ <value value="21" name="A7XX_PERF_TP_QUADS_3D"/>
+ <value value="22" name="A7XX_PERF_TP_QUADS_CUBE"/>
+ <value value="23" name="A7XX_PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
+ <value value="24" name="A7XX_PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
+ <value value="25" name="A7XX_PERF_TP_OUTPUT_PIXELS_POINT"/>
+ <value value="26" name="A7XX_PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="27" name="A7XX_PERF_TP_OUTPUT_PIXELS_MIP"/>
+ <value value="28" name="A7XX_PERF_TP_OUTPUT_PIXELS_ANISO"/>
+ <value value="29" name="A7XX_PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
+ <value value="30" name="A7XX_PERF_TP_FLAG_CACHE_REQUESTS"/>
+ <value value="31" name="A7XX_PERF_TP_FLAG_CACHE_MISSES"/>
+ <value value="32" name="A7XX_PERF_TP_L1_5_L2_REQUESTS"/>
+ <value value="33" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS"/>
+ <value value="34" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
+ <value value="35" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="36" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
+ <value value="37" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
+ <value value="38" name="A7XX_PERF_TP_TPA2TPC_TRANS"/>
+ <value value="39" name="A7XX_PERF_TP_L1_MISSES_ASTC_1TILE"/>
+ <value value="40" name="A7XX_PERF_TP_L1_MISSES_ASTC_2TILE"/>
+ <value value="41" name="A7XX_PERF_TP_L1_MISSES_ASTC_4TILE"/>
+ <value value="42" name="A7XX_PERF_TP_L1_5_COMPRESS_REQS"/>
+ <value value="43" name="A7XX_PERF_TP_L1_5_L2_COMPRESS_MISS"/>
+ <value value="44" name="A7XX_PERF_TP_L1_BANK_CONFLICT"/>
+ <value value="45" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
+ <value value="46" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
+ <value value="47" name="A7XX_PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
+ <value value="48" name="A7XX_PERF_TP_FRONTEND_WORKING_CYCLES"/>
+ <value value="49" name="A7XX_PERF_TP_L1_TAG_WORKING_CYCLES"/>
+ <value value="50" name="A7XX_PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
+ <value value="51" name="A7XX_PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
+ <value value="52" name="A7XX_PERF_TP_BACKEND_WORKING_CYCLES"/>
+ <value value="53" name="A7XX_PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
+ <value value="54" name="A7XX_PERF_TP_STARVE_CYCLES_SP"/>
+ <value value="55" name="A7XX_PERF_TP_STARVE_CYCLES_UCHE"/>
+ <value value="56" name="A7XX_PERF_TP_STALL_CYCLES_UFC"/>
+ <value value="57" name="A7XX_PERF_TP_FORMAT_DECOMP"/>
+ <value value="58" name="A7XX_PERF_TP_FILTER_POINT_FP16"/>
+ <value value="59" name="A7XX_PERF_TP_FILTER_POINT_FP32"/>
+ <value value="60" name="A7XX_PERF_TP_LATENCY_FIFO_FULL"/>
+ <value value="61" name="A7XX_PERF_TP_RESERVED_61"/>
+ <value value="62" name="A7XX_PERF_TP_RESERVED_62"/>
+ <value value="63" name="A7XX_PERF_TP_RESERVED_63"/>
+ <value value="64" name="A7XX_PERF_TP_RESERVED_64"/>
+ <value value="65" name="A7XX_PERF_TP_RESERVED_65"/>
+ <value value="66" name="A7XX_PERF_TP_RESERVED_66"/>
+ <value value="67" name="A7XX_PERF_TP_RESERVED_67"/>
+ <value value="68" name="A7XX_PERF_TP_RESERVED_68"/>
+ <value value="69" name="A7XX_PERF_TP_RESERVED_69"/>
+ <value value="70" name="A7XX_PERF_TP_RESERVED_70"/>
+ <value value="71" name="A7XX_PERF_TP_RESERVED_71"/>
+ <value value="72" name="A7XX_PERF_TP_RESERVED_72"/>
+ <value value="73" name="A7XX_PERF_TP_RESERVED_73"/>
+ <value value="74" name="A7XX_PERF_TP_RESERVED_74"/>
+ <value value="75" name="A7XX_PERF_TP_RESERVED_75"/>
+ <value value="76" name="A7XX_PERF_TP_RESERVED_76"/>
+ <value value="77" name="A7XX_PERF_TP_RESERVED_77"/>
+ <value value="78" name="A7XX_PERF_TP_RESERVED_78"/>
+ <value value="79" name="A7XX_PERF_TP_RESERVED_79"/>
+ <value value="80" name="A7XX_PERF_TP_RESERVED_80"/>
+ <value value="81" name="A7XX_PERF_TP_RESERVED_81"/>
+ <value value="82" name="A7XX_PERF_TP_RESERVED_82"/>
+ <value value="83" name="A7XX_PERF_TP_RESERVED_83"/>
+ <value value="84" name="A7XX_PERF_TP_RESERVED_84"/>
+ <value value="85" name="A7XX_PERF_TP_RESERVED_85"/>
+ <value value="86" name="A7XX_PERF_TP_RESERVED_86"/>
+ <value value="87" name="A7XX_PERF_TP_RESERVED_87"/>
+ <value value="88" name="A7XX_PERF_TP_RESERVED_88"/>
+ <value value="89" name="A7XX_PERF_TP_RESERVED_89"/>
+ <value value="90" name="A7XX_PERF_TP_RESERVED_90"/>
+ <value value="91" name="A7XX_PERF_TP_RESERVED_91"/>
+ <value value="92" name="A7XX_PERF_TP_RESERVED_92"/>
+ <value value="93" name="A7XX_PERF_TP_RESERVED_93"/>
+ <value value="94" name="A7XX_PERF_TP_RESERVED_94"/>
+ <value value="95" name="A7XX_PERF_TP_RESERVED_95"/>
+ <value value="96" name="A7XX_PERF_TP_RESERVED_96"/>
+ <value value="97" name="A7XX_PERF_TP_RESERVED_97"/>
+ <value value="98" name="A7XX_PERF_TP_RESERVED_98"/>
+ <value value="99" name="A7XX_PERF_TP_RESERVED_99"/>
+ <value value="100" name="A7XX_PERF_TP_RESERVED_100"/>
+ <value value="101" name="A7XX_PERF_TP_RESERVED_101"/>
+ <value value="102" name="A7XX_PERF_TP_RESERVED_102"/>
+ <value value="103" name="A7XX_PERF_TP_RESERVED_103"/>
+ <value value="104" name="A7XX_PERF_TP_RESERVED_104"/>
+ <value value="105" name="A7XX_PERF_TP_RESERVED_105"/>
+ <value value="106" name="A7XX_PERF_TP_RESERVED_106"/>
+ <value value="107" name="A7XX_PERF_TP_RESERVED_107"/>
+ <value value="108" name="A7XX_PERF_TP_RESERVED_108"/>
+ <value value="109" name="A7XX_PERF_TP_RESERVED_109"/>
+ <value value="110" name="A7XX_PERF_TP_RESERVED_110"/>
+ <value value="111" name="A7XX_PERF_TP_RESERVED_111"/>
+ <value value="112" name="A7XX_PERF_TP_RESERVED_112"/>
+ <value value="113" name="A7XX_PERF_TP_RESERVED_113"/>
+ <value value="114" name="A7XX_PERF_TP_RESERVED_114"/>
+ <value value="115" name="A7XX_PERF_TP_RESERVED_115"/>
+ <value value="116" name="A7XX_PERF_TP_RESERVED_116"/>
+ <value value="117" name="A7XX_PERF_TP_RESERVED_117"/>
+ <value value="118" name="A7XX_PERF_TP_RESERVED_118"/>
+ <value value="119" name="A7XX_PERF_TP_RESERVED_119"/>
+ <value value="120" name="A7XX_PERF_TP_RESERVED_120"/>
+ <value value="121" name="A7XX_PERF_TP_RESERVED_121"/>
+ <value value="122" name="A7XX_PERF_TP_RESERVED_122"/>
+ <value value="123" name="A7XX_PERF_TP_RESERVED_123"/>
+ <value value="124" name="A7XX_PERF_TP_RESERVED_124"/>
+ <value value="125" name="A7XX_PERF_TP_RESERVED_125"/>
+ <value value="126" name="A7XX_PERF_TP_RESERVED_126"/>
+ <value value="127" name="A7XX_PERF_TP_RESERVED_127"/>
+ <value value="128" name="A7XX_PERF_TP_FORMAT_DECOMP_BILINEAR"/>
+ <value value="129" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP16"/>
+ <value value="130" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP16"/>
+ <value value="131" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP32"/>
+ <value value="132" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP32"/>
+</enum>
+
+<enum name="a7xx_sp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_SP_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_SP_ALU_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_SP_EFU_WORKING_CYCLES"/>
+ <value value="3" name="A7XX_PERF_SP_STALL_CYCLES_VPC"/>
+ <value value="4" name="A7XX_PERF_SP_STALL_CYCLES_TP"/>
+ <value value="5" name="A7XX_PERF_SP_STALL_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_SP_STALL_CYCLES_RB"/>
+ <value value="7" name="A7XX_PERF_SP_NON_EXECUTION_CYCLES"/>
+ <value value="8" name="A7XX_PERF_SP_WAVE_CONTEXTS"/>
+ <value value="9" name="A7XX_PERF_SP_WAVE_CONTEXT_CYCLES"/>
+ <value value="10" name="A7XX_PERF_SP_STAGE_WAVE_CYCLES"/>
+ <value value="11" name="A7XX_PERF_SP_STAGE_WAVE_SAMPLES"/>
+ <value value="12" name="A7XX_PERF_SP_VS_STAGE_WAVE_CYCLES"/>
+ <value value="13" name="A7XX_PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
+ <value value="14" name="A7XX_PERF_SP_FS_STAGE_DURATION_CYCLES"/>
+ <value value="15" name="A7XX_PERF_SP_VS_STAGE_DURATION_CYCLES"/>
+ <value value="16" name="A7XX_PERF_SP_WAVE_CTRL_CYCLES"/>
+ <value value="17" name="A7XX_PERF_SP_WAVE_LOAD_CYCLES"/>
+ <value value="18" name="A7XX_PERF_SP_WAVE_EMIT_CYCLES"/>
+ <value value="19" name="A7XX_PERF_SP_WAVE_NOP_CYCLES"/>
+ <value value="20" name="A7XX_PERF_SP_WAVE_WAIT_CYCLES"/>
+ <value value="21" name="A7XX_PERF_SP_WAVE_FETCH_CYCLES"/>
+ <value value="22" name="A7XX_PERF_SP_WAVE_IDLE_CYCLES"/>
+ <value value="23" name="A7XX_PERF_SP_WAVE_END_CYCLES"/>
+ <value value="24" name="A7XX_PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
+ <value value="25" name="A7XX_PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
+ <value value="26" name="A7XX_PERF_SP_WAVE_JOIN_CYCLES"/>
+ <value value="27" name="A7XX_PERF_SP_LM_LOAD_INSTRUCTIONS"/>
+ <value value="28" name="A7XX_PERF_SP_LM_STORE_INSTRUCTIONS"/>
+ <value value="29" name="A7XX_PERF_SP_LM_ATOMICS"/>
+ <value value="30" name="A7XX_PERF_SP_GM_LOAD_INSTRUCTIONS"/>
+ <value value="31" name="A7XX_PERF_SP_GM_STORE_INSTRUCTIONS"/>
+ <value value="32" name="A7XX_PERF_SP_GM_ATOMICS"/>
+ <value value="33" name="A7XX_PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="34" name="A7XX_PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="35" name="A7XX_PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="36" name="A7XX_PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="37" name="A7XX_PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="38" name="A7XX_PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="39" name="A7XX_PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="40" name="A7XX_PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="41" name="A7XX_PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="42" name="A7XX_PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
+ <value value="43" name="A7XX_PERF_SP_VS_INSTRUCTIONS"/>
+ <value value="44" name="A7XX_PERF_SP_FS_INSTRUCTIONS"/>
+ <value value="45" name="A7XX_PERF_SP_ADDR_LOCK_COUNT"/>
+ <value value="46" name="A7XX_PERF_SP_UCHE_READ_TRANS"/>
+ <value value="47" name="A7XX_PERF_SP_UCHE_WRITE_TRANS"/>
+ <value value="48" name="A7XX_PERF_SP_EXPORT_VPC_TRANS"/>
+ <value value="49" name="A7XX_PERF_SP_EXPORT_RB_TRANS"/>
+ <value value="50" name="A7XX_PERF_SP_PIXELS_KILLED"/>
+ <value value="51" name="A7XX_PERF_SP_ICL1_REQUESTS"/>
+ <value value="52" name="A7XX_PERF_SP_ICL1_MISSES"/>
+ <value value="53" name="A7XX_PERF_SP_HS_INSTRUCTIONS"/>
+ <value value="54" name="A7XX_PERF_SP_DS_INSTRUCTIONS"/>
+ <value value="55" name="A7XX_PERF_SP_GS_INSTRUCTIONS"/>
+ <value value="56" name="A7XX_PERF_SP_CS_INSTRUCTIONS"/>
+ <value value="57" name="A7XX_PERF_SP_GPR_READ"/>
+ <value value="58" name="A7XX_PERF_SP_GPR_WRITE"/>
+ <value value="59" name="A7XX_PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="60" name="A7XX_PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="61" name="A7XX_PERF_SP_LM_BANK_CONFLICTS"/>
+ <value value="62" name="A7XX_PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
+ <value value="63" name="A7XX_PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
+ <value value="64" name="A7XX_PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
+ <value value="65" name="A7XX_PERF_SP_LM_WORKING_CYCLES"/>
+ <value value="66" name="A7XX_PERF_SP_DISPATCHER_WORKING_CYCLES"/>
+ <value value="67" name="A7XX_PERF_SP_SEQUENCER_WORKING_CYCLES"/>
+ <value value="68" name="A7XX_PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
+ <value value="69" name="A7XX_PERF_SP_STARVE_CYCLES_HLSQ"/>
+ <value value="70" name="A7XX_PERF_SP_NON_EXECUTION_LS_CYCLES"/>
+ <value value="71" name="A7XX_PERF_SP_WORKING_EU"/>
+ <value value="72" name="A7XX_PERF_SP_ANY_EU_WORKING"/>
+ <value value="73" name="A7XX_PERF_SP_WORKING_EU_FS_STAGE"/>
+ <value value="74" name="A7XX_PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
+ <value value="75" name="A7XX_PERF_SP_WORKING_EU_VS_STAGE"/>
+ <value value="76" name="A7XX_PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
+ <value value="77" name="A7XX_PERF_SP_WORKING_EU_CS_STAGE"/>
+ <value value="78" name="A7XX_PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
+ <value value="79" name="A7XX_PERF_SP_GPR_READ_PREFETCH"/>
+ <value value="80" name="A7XX_PERF_SP_GPR_READ_CONFLICT"/>
+ <value value="81" name="A7XX_PERF_SP_GPR_WRITE_CONFLICT"/>
+ <value value="82" name="A7XX_PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
+ <value value="83" name="A7XX_PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
+ <value value="84" name="A7XX_PERF_SP_EXECUTABLE_WAVES"/>
+ <value value="85" name="A7XX_PERF_SP_ICL1_MISS_FETCH_CYCLES"/>
+ <value value="86" name="A7XX_PERF_SP_WORKING_EU_LPAC"/>
+ <value value="87" name="A7XX_PERF_SP_BYPASS_BUSY_CYCLES"/>
+ <value value="88" name="A7XX_PERF_SP_ANY_EU_WORKING_LPAC"/>
+ <value value="89" name="A7XX_PERF_SP_WAVE_ALU_CYCLES"/>
+ <value value="90" name="A7XX_PERF_SP_WAVE_EFU_CYCLES"/>
+ <value value="91" name="A7XX_PERF_SP_WAVE_INT_CYCLES"/>
+ <value value="92" name="A7XX_PERF_SP_WAVE_CSP_CYCLES"/>
+ <value value="93" name="A7XX_PERF_SP_EWAVE_CONTEXTS"/>
+ <value value="94" name="A7XX_PERF_SP_EWAVE_CONTEXT_CYCLES"/>
+ <value value="95" name="A7XX_PERF_SP_LPAC_BUSY_CYCLES"/>
+ <value value="96" name="A7XX_PERF_SP_LPAC_INSTRUCTIONS"/>
+ <value value="97" name="A7XX_PERF_SP_FS_STAGE_1X_WAVES"/>
+ <value value="98" name="A7XX_PERF_SP_FS_STAGE_2X_WAVES"/>
+ <value value="99" name="A7XX_PERF_SP_QUADS"/>
+ <value value="100" name="A7XX_PERF_SP_CS_INVOCATIONS"/>
+ <value value="101" name="A7XX_PERF_SP_PIXELS"/>
+ <value value="102" name="A7XX_PERF_SP_LPAC_DRAWCALLS"/>
+ <value value="103" name="A7XX_PERF_SP_PI_WORKING_CYCLES"/>
+ <value value="104" name="A7XX_PERF_SP_WAVE_INPUT_CYCLES"/>
+ <value value="105" name="A7XX_PERF_SP_WAVE_OUTPUT_CYCLES"/>
+ <value value="106" name="A7XX_PERF_SP_WAVE_HWAVE_WAIT_CYCLES"/>
+ <value value="107" name="A7XX_PERF_SP_WAVE_HWAVE_SYNC"/>
+ <value value="108" name="A7XX_PERF_SP_OUTPUT_3D_PIXELS"/>
+ <value value="109" name="A7XX_PERF_SP_FULL_ALU_MAD_INSTRUCTIONS"/>
+ <value value="110" name="A7XX_PERF_SP_HALF_ALU_MAD_INSTRUCTIONS"/>
+ <value value="111" name="A7XX_PERF_SP_FULL_ALU_MUL_INSTRUCTIONS"/>
+ <value value="112" name="A7XX_PERF_SP_HALF_ALU_MUL_INSTRUCTIONS"/>
+ <value value="113" name="A7XX_PERF_SP_FULL_ALU_ADD_INSTRUCTIONS"/>
+ <value value="114" name="A7XX_PERF_SP_HALF_ALU_ADD_INSTRUCTIONS"/>
+ <value value="115" name="A7XX_PERF_SP_BARY_FP32_INSTRUCTIONS"/>
+ <value value="116" name="A7XX_PERF_SP_ALU_GPR_READ_CYCLES"/>
+ <value value="117" name="A7XX_PERF_SP_ALU_DATA_FORWARDING_CYCLES"/>
+ <value value="118" name="A7XX_PERF_SP_LM_FULL_CYCLES"/>
+ <value value="119" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_CYCLES"/>
+ <value value="120" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_SAMPLES"/>
+ <value value="121" name="A7XX_PERF_SP_FS_STAGE_PI_TEX_INSTRUCTION"/>
+ <value value="122" name="A7XX_PERF_SP_RAY_QUERY_INSTRUCTIONS"/>
+ <value value="123" name="A7XX_PERF_SP_RBRT_KICKOFF_FIBERS"/>
+ <value value="124" name="A7XX_PERF_SP_RBRT_KICKOFF_DQUADS"/>
+ <value value="125" name="A7XX_PERF_SP_RTU_BUSY_CYCLES"/>
+ <value value="126" name="A7XX_PERF_SP_RTU_L0_HITS"/>
+ <value value="127" name="A7XX_PERF_SP_RTU_L0_MISSES"/>
+ <value value="128" name="A7XX_PERF_SP_RTU_L0_HIT_ON_MISS"/>
+ <value value="129" name="A7XX_PERF_SP_RTU_STALL_CYCLES_WAVE_QUEUE"/>
+ <value value="130" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_HIT_QUEUE"/>
+ <value value="131" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_MISS_QUEUE"/>
+ <value value="132" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0D_IDX_QUEUE"/>
+ <value value="133" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0DATA"/>
+ <value value="134" name="A7XX_PERF_SP_RTU_STALL_CYCLES_REPLACE_CNT"/>
+ <value value="135" name="A7XX_PERF_SP_RTU_STALL_CYCLES_MRG_CNT"/>
+ <value value="136" name="A7XX_PERF_SP_RTU_STALL_CYCLES_UCHE"/>
+ <value value="137" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_L0"/>
+ <value value="138" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_INS_FIFO"/>
+ <value value="139" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_CYCLES"/>
+ <value value="140" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_SAMPLES"/>
+ <value value="141" name="A7XX_PERF_SP_STCHE_MISS_INC_VS"/>
+ <value value="142" name="A7XX_PERF_SP_STCHE_MISS_INC_FS"/>
+ <value value="143" name="A7XX_PERF_SP_STCHE_MISS_INC_BV"/>
+ <value value="144" name="A7XX_PERF_SP_STCHE_MISS_INC_LPAC"/>
+ <value value="145" name="A7XX_PERF_SP_VGPR_ACTIVE_CONTEXTS"/>
+ <value value="146" name="A7XX_PERF_SP_PGPR_ALLOC_CONTEXTS"/>
+ <value value="147" name="A7XX_PERF_SP_VGPR_ALLOC_CONTEXTS"/>
+ <value value="148" name="A7XX_PERF_SP_RTU_RAY_BOX_INTERSECTIONS"/>
+ <value value="149" name="A7XX_PERF_SP_RTU_RAY_TRIANGLE_INTERSECTIONS"/>
+ <value value="150" name="A7XX_PERF_SP_SCH_STALL_CYCLES_RTU"/>
+</enum>
+
+<enum name="a7xx_rb_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RB_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_RB_STALL_CYCLES_HLSQ"/>
+ <value value="2" name="A7XX_PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
+ <value value="3" name="A7XX_PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
+ <value value="4" name="A7XX_PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
+ <value value="5" name="A7XX_PERF_RB_STARVE_CYCLES_SP"/>
+ <value value="6" name="A7XX_PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
+ <value value="7" name="A7XX_PERF_RB_STARVE_CYCLES_CCU"/>
+ <value value="8" name="A7XX_PERF_RB_STARVE_CYCLES_Z_PLANE"/>
+ <value value="9" name="A7XX_PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
+ <value value="10" name="A7XX_PERF_RB_Z_WORKLOAD"/>
+ <value value="11" name="A7XX_PERF_RB_HLSQ_ACTIVE"/>
+ <value value="12" name="A7XX_PERF_RB_Z_READ"/>
+ <value value="13" name="A7XX_PERF_RB_Z_WRITE"/>
+ <value value="14" name="A7XX_PERF_RB_C_READ"/>
+ <value value="15" name="A7XX_PERF_RB_C_WRITE"/>
+ <value value="16" name="A7XX_PERF_RB_TOTAL_PASS"/>
+ <value value="17" name="A7XX_PERF_RB_Z_PASS"/>
+ <value value="18" name="A7XX_PERF_RB_Z_FAIL"/>
+ <value value="19" name="A7XX_PERF_RB_S_FAIL"/>
+ <value value="20" name="A7XX_PERF_RB_BLENDED_FXP_COMPONENTS"/>
+ <value value="21" name="A7XX_PERF_RB_BLENDED_FP16_COMPONENTS"/>
+ <value value="22" name="A7XX_PERF_RB_PS_INVOCATIONS"/>
+ <value value="23" name="A7XX_PERF_RB_2D_ALIVE_CYCLES"/>
+ <value value="24" name="A7XX_PERF_RB_2D_STALL_CYCLES_A2D"/>
+ <value value="25" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SRC"/>
+ <value value="26" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SP"/>
+ <value value="27" name="A7XX_PERF_RB_2D_STARVE_CYCLES_DST"/>
+ <value value="28" name="A7XX_PERF_RB_2D_VALID_PIXELS"/>
+ <value value="29" name="A7XX_PERF_RB_3D_PIXELS"/>
+ <value value="30" name="A7XX_PERF_RB_BLENDER_WORKING_CYCLES"/>
+ <value value="31" name="A7XX_PERF_RB_ZPROC_WORKING_CYCLES"/>
+ <value value="32" name="A7XX_PERF_RB_CPROC_WORKING_CYCLES"/>
+ <value value="33" name="A7XX_PERF_RB_SAMPLER_WORKING_CYCLES"/>
+ <value value="34" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
+ <value value="35" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
+ <value value="36" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
+ <value value="37" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
+ <value value="38" name="A7XX_PERF_RB_STALL_CYCLES_VPC"/>
+ <value value="39" name="A7XX_PERF_RB_2D_INPUT_TRANS"/>
+ <value value="40" name="A7XX_PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
+ <value value="41" name="A7XX_PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
+ <value value="42" name="A7XX_PERF_RB_BLENDED_FP32_COMPONENTS"/>
+ <value value="43" name="A7XX_PERF_RB_COLOR_PIX_TILES"/>
+ <value value="44" name="A7XX_PERF_RB_STALL_CYCLES_CCU"/>
+ <value value="45" name="A7XX_PERF_RB_EARLY_Z_ARB3_GRANT"/>
+ <value value="46" name="A7XX_PERF_RB_LATE_Z_ARB3_GRANT"/>
+ <value value="47" name="A7XX_PERF_RB_EARLY_Z_SKIP_GRANT"/>
+ <value value="48" name="A7XX_PERF_RB_VRS_1x1_QUADS"/>
+ <value value="49" name="A7XX_PERF_RB_VRS_2x1_QUADS"/>
+ <value value="50" name="A7XX_PERF_RB_VRS_1x2_QUADS"/>
+ <value value="51" name="A7XX_PERF_RB_VRS_2x2_QUADS"/>
+ <value value="52" name="A7XX_PERF_RB_VRS_4x2_QUADS"/>
+ <value value="53" name="A7XX_PERF_RB_VRS_4x4_QUADS"/>
+</enum>
+
+<enum name="a7xx_vsc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VSC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VSC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_VSC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="A7XX_PERF_VSC_EOT_NUM"/>
+ <value value="4" name="A7XX_PERF_VSC_INPUT_TILES"/>
+</enum>
+
+<enum name="a7xx_ccu_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CCU_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
+ <value value="2" name="A7XX_PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
+ <value value="3" name="A7XX_PERF_CCU_DEPTH_BLOCKS"/>
+ <value value="4" name="A7XX_PERF_CCU_COLOR_BLOCKS"/>
+ <value value="5" name="A7XX_PERF_CCU_DEPTH_BLOCK_HIT"/>
+ <value value="6" name="A7XX_PERF_CCU_COLOR_BLOCK_HIT"/>
+ <value value="7" name="A7XX_PERF_CCU_PARTIAL_BLOCK_READ"/>
+ <value value="8" name="A7XX_PERF_CCU_GMEM_READ"/>
+ <value value="9" name="A7XX_PERF_CCU_GMEM_WRITE"/>
+ <value value="10" name="A7XX_PERF_CCU_2D_RD_REQ"/>
+ <value value="11" name="A7XX_PERF_CCU_2D_WR_REQ"/>
+ <value value="12" name="A7XX_PERF_CCU_UBWC_COLOR_BLOCKS_CONCURRENT"/>
+ <value value="13" name="A7XX_PERF_CCU_UBWC_DEPTH_BLOCKS_CONCURRENT"/>
+ <value value="14" name="A7XX_PERF_CCU_COLOR_RESOLVE_DROPPED"/>
+ <value value="15" name="A7XX_PERF_CCU_DEPTH_RESOLVE_DROPPED"/>
+ <value value="16" name="A7XX_PERF_CCU_COLOR_RENDER_CONCURRENT"/>
+ <value value="17" name="A7XX_PERF_CCU_DEPTH_RENDER_CONCURRENT"/>
+ <value value="18" name="A7XX_PERF_CCU_COLOR_RESOLVE_AFTER_RENDER"/>
+ <value value="19" name="A7XX_PERF_CCU_DEPTH_RESOLVE_AFTER_RENDER"/>
+ <value value="20" name="A7XX_PERF_CCU_GMEM_EXTRA_DEPTH_READ"/>
+ <value value="21" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA"/>
+ <value value="22" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA_FULL"/>
+</enum>
+
+<enum name="a7xx_lrz_perfcounter_select">
+ <value value="0" name="A7XX_PERF_LRZ_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_LRZ_STARVE_CYCLES_RAS"/>
+ <value value="2" name="A7XX_PERF_LRZ_STALL_CYCLES_RB"/>
+ <value value="3" name="A7XX_PERF_LRZ_STALL_CYCLES_VSC"/>
+ <value value="4" name="A7XX_PERF_LRZ_STALL_CYCLES_VPC"/>
+ <value value="5" name="A7XX_PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
+ <value value="6" name="A7XX_PERF_LRZ_STALL_CYCLES_UCHE"/>
+ <value value="7" name="A7XX_PERF_LRZ_LRZ_READ"/>
+ <value value="8" name="A7XX_PERF_LRZ_LRZ_WRITE"/>
+ <value value="9" name="A7XX_PERF_LRZ_READ_LATENCY"/>
+ <value value="10" name="A7XX_PERF_LRZ_MERGE_CACHE_UPDATING"/>
+ <value value="11" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
+ <value value="12" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
+ <value value="13" name="A7XX_PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
+ <value value="14" name="A7XX_PERF_LRZ_FULL_8X8_TILES"/>
+ <value value="15" name="A7XX_PERF_LRZ_PARTIAL_8X8_TILES"/>
+ <value value="16" name="A7XX_PERF_LRZ_TILE_KILLED"/>
+ <value value="17" name="A7XX_PERF_LRZ_TOTAL_PIXEL"/>
+ <value value="18" name="A7XX_PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
+ <value value="19" name="A7XX_PERF_LRZ_FEEDBACK_ACCEPT"/>
+ <value value="20" name="A7XX_PERF_LRZ_FEEDBACK_DISCARD"/>
+ <value value="21" name="A7XX_PERF_LRZ_FEEDBACK_STALL"/>
+ <value value="22" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
+ <value value="23" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
+ <value value="24" name="A7XX_PERF_LRZ_RAS_MASK_TRANS"/>
+ <value value="25" name="A7XX_PERF_LRZ_STALL_CYCLES_MVC"/>
+ <value value="26" name="A7XX_PERF_LRZ_TILE_KILLED_BY_IMAGE_VRS"/>
+ <value value="27" name="A7XX_PERF_LRZ_TILE_KILLED_BY_Z"/>
+</enum>
+
+<enum name="a7xx_cmp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CMPDECMP_STALL_CYCLES_ARB"/>
+ <value value="1" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
+ <value value="2" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
+ <value value="3" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
+ <value value="4" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
+ <value value="5" name="A7XX_PERF_CMPDECMP_VBIF_READ_REQUEST"/>
+ <value value="6" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
+ <value value="7" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA"/>
+ <value value="8" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA"/>
+ <value value="9" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
+ <value value="10" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
+ <value value="11" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
+ <value value="12" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
+ <value value="13" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
+ <value value="14" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
+ <value value="15" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
+ <value value="16" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
+ <value value="17" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
+ <value value="18" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
+ <value value="19" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
+ <value value="20" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
+ <value value="21" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
+ <value value="22" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
+ <value value="23" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
+ <value value="24" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
+ <value value="25" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
+ <value value="26" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
+ <value value="27" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
+ <value value="28" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
+ <value value="29" name="A7XX_PERF_CMPDECMP_RESOLVE_EVENTS"/>
+ <value value="30" name="A7XX_PERF_CMPDECMP_CONCURRENT_RESOLVE_EVENTS"/>
+ <value value="31" name="A7XX_PERF_CMPDECMP_DROPPED_CLEAR_EVENTS"/>
+ <value value="32" name="A7XX_PERF_CMPDECMP_ST_BLOCKS_CONCURRENT"/>
+ <value value="33" name="A7XX_PERF_CMPDECMP_LRZ_ST_BLOCKS_CONCURRENT"/>
+ <value value="34" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG0_COUNT"/>
+ <value value="35" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG1_COUNT"/>
+ <value value="36" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG2_COUNT"/>
+ <value value="37" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG3_COUNT"/>
+ <value value="38" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG4_COUNT"/>
+ <value value="39" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG5_COUNT"/>
+ <value value="40" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG6_COUNT"/>
+ <value value="41" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG8_COUNT"/>
+ <value value="42" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG0_COUNT"/>
+ <value value="43" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG1_COUNT"/>
+ <value value="44" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG2_COUNT"/>
+ <value value="45" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG3_COUNT"/>
+ <value value="46" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG4_COUNT"/>
+ <value value="47" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG5_COUNT"/>
+ <value value="48" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG6_COUNT"/>
+ <value value="49" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG8_COUNT"/>
+</enum>
+
+<enum name="a7xx_gbif_perfcounter_select">
+ <value value="0" name="A7XX_PERF_GBIF_RESERVED_0"/>
+ <value value="1" name="A7XX_PERF_GBIF_RESERVED_1"/>
+ <value value="2" name="A7XX_PERF_GBIF_RESERVED_2"/>
+ <value value="3" name="A7XX_PERF_GBIF_RESERVED_3"/>
+ <value value="4" name="A7XX_PERF_GBIF_RESERVED_4"/>
+ <value value="5" name="A7XX_PERF_GBIF_RESERVED_5"/>
+ <value value="6" name="A7XX_PERF_GBIF_RESERVED_6"/>
+ <value value="7" name="A7XX_PERF_GBIF_RESERVED_7"/>
+ <value value="8" name="A7XX_PERF_GBIF_RESERVED_8"/>
+ <value value="9" name="A7XX_PERF_GBIF_RESERVED_9"/>
+ <value value="10" name="A7XX_PERF_GBIF_AXI0_READ_REQUESTS_TOTAL"/>
+ <value value="11" name="A7XX_PERF_GBIF_AXI1_READ_REQUESTS_TOTAL"/>
+ <value value="12" name="A7XX_PERF_GBIF_RESERVED_12"/>
+ <value value="13" name="A7XX_PERF_GBIF_RESERVED_13"/>
+ <value value="14" name="A7XX_PERF_GBIF_RESERVED_14"/>
+ <value value="15" name="A7XX_PERF_GBIF_RESERVED_15"/>
+ <value value="16" name="A7XX_PERF_GBIF_RESERVED_16"/>
+ <value value="17" name="A7XX_PERF_GBIF_RESERVED_17"/>
+ <value value="18" name="A7XX_PERF_GBIF_RESERVED_18"/>
+ <value value="19" name="A7XX_PERF_GBIF_RESERVED_19"/>
+ <value value="20" name="A7XX_PERF_GBIF_RESERVED_20"/>
+ <value value="21" name="A7XX_PERF_GBIF_RESERVED_21"/>
+ <value value="22" name="A7XX_PERF_GBIF_AXI0_WRITE_REQUESTS_TOTAL"/>
+ <value value="23" name="A7XX_PERF_GBIF_AXI1_WRITE_REQUESTS_TOTAL"/>
+ <value value="24" name="A7XX_PERF_GBIF_RESERVED_24"/>
+ <value value="25" name="A7XX_PERF_GBIF_RESERVED_25"/>
+ <value value="26" name="A7XX_PERF_GBIF_RESERVED_26"/>
+ <value value="27" name="A7XX_PERF_GBIF_RESERVED_27"/>
+ <value value="28" name="A7XX_PERF_GBIF_RESERVED_28"/>
+ <value value="29" name="A7XX_PERF_GBIF_RESERVED_29"/>
+ <value value="30" name="A7XX_PERF_GBIF_RESERVED_30"/>
+ <value value="31" name="A7XX_PERF_GBIF_RESERVED_31"/>
+ <value value="32" name="A7XX_PERF_GBIF_RESERVED_32"/>
+ <value value="33" name="A7XX_PERF_GBIF_RESERVED_33"/>
+ <value value="34" name="A7XX_PERF_GBIF_AXI0_READ_DATA_BEATS_TOTAL"/>
+ <value value="35" name="A7XX_PERF_GBIF_AXI1_READ_DATA_BEATS_TOTAL"/>
+ <value value="36" name="A7XX_PERF_GBIF_RESERVED_36"/>
+ <value value="37" name="A7XX_PERF_GBIF_RESERVED_37"/>
+ <value value="38" name="A7XX_PERF_GBIF_RESERVED_38"/>
+ <value value="39" name="A7XX_PERF_GBIF_RESERVED_39"/>
+ <value value="40" name="A7XX_PERF_GBIF_RESERVED_40"/>
+ <value value="41" name="A7XX_PERF_GBIF_RESERVED_41"/>
+ <value value="42" name="A7XX_PERF_GBIF_RESERVED_42"/>
+ <value value="43" name="A7XX_PERF_GBIF_RESERVED_43"/>
+ <value value="44" name="A7XX_PERF_GBIF_RESERVED_44"/>
+ <value value="45" name="A7XX_PERF_GBIF_RESERVED_45"/>
+ <value value="46" name="A7XX_PERF_GBIF_AXI0_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="47" name="A7XX_PERF_GBIF_AXI1_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="48" name="A7XX_PERF_GBIF_RESERVED_48"/>
+ <value value="49" name="A7XX_PERF_GBIF_RESERVED_49"/>
+ <value value="50" name="A7XX_PERF_GBIF_RESERVED_50"/>
+ <value value="51" name="A7XX_PERF_GBIF_RESERVED_51"/>
+ <value value="52" name="A7XX_PERF_GBIF_RESERVED_52"/>
+ <value value="53" name="A7XX_PERF_GBIF_RESERVED_53"/>
+ <value value="54" name="A7XX_PERF_GBIF_RESERVED_54"/>
+ <value value="55" name="A7XX_PERF_GBIF_RESERVED_55"/>
+ <value value="56" name="A7XX_PERF_GBIF_RESERVED_56"/>
+ <value value="57" name="A7XX_PERF_GBIF_RESERVED_57"/>
+ <value value="58" name="A7XX_PERF_GBIF_RESERVED_58"/>
+ <value value="59" name="A7XX_PERF_GBIF_RESERVED_59"/>
+ <value value="60" name="A7XX_PERF_GBIF_RESERVED_60"/>
+ <value value="61" name="A7XX_PERF_GBIF_RESERVED_61"/>
+ <value value="62" name="A7XX_PERF_GBIF_RESERVED_62"/>
+ <value value="63" name="A7XX_PERF_GBIF_RESERVED_63"/>
+ <value value="64" name="A7XX_PERF_GBIF_RESERVED_64"/>
+ <value value="65" name="A7XX_PERF_GBIF_RESERVED_65"/>
+ <value value="66" name="A7XX_PERF_GBIF_RESERVED_66"/>
+ <value value="67" name="A7XX_PERF_GBIF_RESERVED_67"/>
+ <value value="68" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_RD_ALL"/>
+ <value value="69" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_RD_ALL"/>
+ <value value="70" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_WR_ALL"/>
+ <value value="71" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_WR_ALL"/>
+ <value value="72" name="A7XX_PERF_GBIF_AXI_CH0_REQUEST_HELD_OFF"/>
+ <value value="73" name="A7XX_PERF_GBIF_AXI_CH1_REQUEST_HELD_OFF"/>
+ <value value="74" name="A7XX_PERF_GBIF_AXI_REQUEST_HELD_OFF"/>
+ <value value="75" name="A7XX_PERF_GBIF_AXI_CH0_WRITE_DATA_HELD_OFF"/>
+ <value value="76" name="A7XX_PERF_GBIF_AXI_CH1_WRITE_DATA_HELD_OFF"/>
+ <value value="77" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_DATA_HELD_OFF"/>
+ <value value="78" name="A7XX_PERF_GBIF_AXI_ALL_READ_BEATS"/>
+ <value value="79" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_BEATS"/>
+ <value value="80" name="A7XX_PERF_GBIF_AXI_ALL_BEATS"/>
+</enum>
+
+<enum name="a7xx_ufc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_UFC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_UFC_READ_DATA_VBIF"/>
+ <value value="2" name="A7XX_PERF_UFC_WRITE_DATA_VBIF"/>
+ <value value="3" name="A7XX_PERF_UFC_READ_REQUEST_VBIF"/>
+ <value value="4" name="A7XX_PERF_UFC_WRITE_REQUEST_VBIF"/>
+ <value value="5" name="A7XX_PERF_UFC_LRZ_FILTER_HIT"/>
+ <value value="6" name="A7XX_PERF_UFC_LRZ_FILTER_MISS"/>
+ <value value="7" name="A7XX_PERF_UFC_CRE_FILTER_HIT"/>
+ <value value="8" name="A7XX_PERF_UFC_CRE_FILTER_MISS"/>
+ <value value="9" name="A7XX_PERF_UFC_SP_FILTER_HIT"/>
+ <value value="10" name="A7XX_PERF_UFC_SP_FILTER_MISS"/>
+ <value value="11" name="A7XX_PERF_UFC_SP_REQUESTS"/>
+ <value value="12" name="A7XX_PERF_UFC_TP_FILTER_HIT"/>
+ <value value="13" name="A7XX_PERF_UFC_TP_FILTER_MISS"/>
+ <value value="14" name="A7XX_PERF_UFC_TP_REQUESTS"/>
+ <value value="15" name="A7XX_PERF_UFC_MAIN_HIT_LRZ_PREFETCH"/>
+ <value value="16" name="A7XX_PERF_UFC_MAIN_HIT_CRE_PREFETCH"/>
+ <value value="17" name="A7XX_PERF_UFC_MAIN_HIT_SP_PREFETCH"/>
+ <value value="18" name="A7XX_PERF_UFC_MAIN_HIT_TP_PREFETCH"/>
+ <value value="19" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_READ"/>
+ <value value="20" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_WRITE"/>
+ <value value="21" name="A7XX_PERF_UFC_MAIN_MISS_LRZ_PREFETCH"/>
+ <value value="22" name="A7XX_PERF_UFC_MAIN_MISS_CRE_PREFETCH"/>
+ <value value="23" name="A7XX_PERF_UFC_MAIN_MISS_SP_PREFETCH"/>
+ <value value="24" name="A7XX_PERF_UFC_MAIN_MISS_TP_PREFETCH"/>
+ <value value="25" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_READ"/>
+ <value value="26" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_WRITE"/>
+ <value value="27" name="A7XX_PERF_UFC_UBWC_READ_UFC_TRANS"/>
+ <value value="28" name="A7XX_PERF_UFC_UBWC_WRITE_UFC_TRANS"/>
+ <value value="29" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_CMD"/>
+ <value value="30" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_RDATA"/>
+ <value value="31" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_WDATA"/>
+ <value value="32" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_WR_FLAG"/>
+ <value value="33" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_FLAG_RTN"/>
+ <value value="34" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_EVENT"/>
+ <value value="35" name="A7XX_PERF_UFC_LRZ_PREFETCH_STALLED_CYCLES"/>
+ <value value="36" name="A7XX_PERF_UFC_CRE_PREFETCH_STALLED_CYCLES"/>
+ <value value="37" name="A7XX_PERF_UFC_SPTP_PREFETCH_STALLED_CYCLES"/>
+ <value value="38" name="A7XX_PERF_UFC_UBWC_RD_STALLED_CYCLES"/>
+ <value value="39" name="A7XX_PERF_UFC_UBWC_WR_STALLED_CYCLES"/>
+ <value value="40" name="A7XX_PERF_UFC_PREFETCH_STALLED_CYCLES"/>
+ <value value="41" name="A7XX_PERF_UFC_EVICTION_STALLED_CYCLES"/>
+ <value value="42" name="A7XX_PERF_UFC_LOCK_STALLED_CYCLES"/>
+ <value value="43" name="A7XX_PERF_UFC_MISS_LATENCY_CYCLES"/>
+ <value value="44" name="A7XX_PERF_UFC_MISS_LATENCY_SAMPLES"/>
+ <value value="45" name="A7XX_PERF_UFC_UBWC_REQ_STALLED_CYCLES"/>
+ <value value="46" name="A7XX_PERF_UFC_TP_HINT_TAG_MISS"/>
+ <value value="47" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_RDY"/>
+ <value value="48" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_NRDY"/>
+ <value value="49" name="A7XX_PERF_UFC_TP_HINT_IS_FCLEAR"/>
+ <value value="50" name="A7XX_PERF_UFC_TP_HINT_IS_ALPHA0"/>
+ <value value="51" name="A7XX_PERF_UFC_SP_L1_FILTER_HIT"/>
+ <value value="52" name="A7XX_PERF_UFC_SP_L1_FILTER_MISS"/>
+ <value value="53" name="A7XX_PERF_UFC_SP_L1_FILTER_REQUESTS"/>
+ <value value="54" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_RDY"/>
+ <value value="55" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_NRDY"/>
+ <value value="56" name="A7XX_PERF_UFC_TP_L1_TAG_MISS"/>
+ <value value="57" name="A7XX_PERF_UFC_TP_L1_FILTER_REQUESTS"/>
+</enum>
+
<domain name="A6XX" width="32" prefix="variant" varset="chip">
<bitset name="A6XX_RBBM_INT_0_MASK" inline="no" varset="chip">
<bitfield name="RBBM_GPU_IDLE" pos="0" type="boolean"/>
@@ -1584,7 +2605,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x050e" name="RBBM_PERFCTR_SRAM_INIT_CMD"/>
<reg32 offset="0x050f" name="RBBM_PERFCTR_SRAM_INIT_STATUS"/>
<reg32 offset="0x0533" name="RBBM_ISDB_CNT"/>
- <reg32 offset="0x0534" name="RBBM_NC_MODE_CNTL" variants="A7XX-"/>
+ <reg32 offset="0x0534" name="RBBM_NC_MODE_CNTL"/>
<reg32 offset="0x0535" name="RBBM_SNAPSHOT_STATUS" variants="A7XX-"/>
<!---
@@ -2184,13 +3205,28 @@ to upconvert to 32b float internally?
<value value="3" name="BUFFERS_IN_SYSMEM"/>
</enum>
+ <enum name="a6xx_lrz_feedback_mask">
+ <value value="0x0" name="LRZ_FEEDBACK_NONE"/>
+ <value value="0x1" name="LRZ_FEEDBACK_EARLY_Z"/>
+ <value value="0x2" name="LRZ_FEEDBACK_EARLY_LRZ_LATE_Z"/>
+ <!-- We don't have a flag type and this flags combination is often used -->
+ <value value="0x3" name="LRZ_FEEDBACK_EARLY_Z_OR_EARLY_LRZ_LATE_Z"/>
+ <value value="0x4" name="LRZ_FEEDBACK_LATE_Z"/>
+ </enum>
+
<reg32 offset="0x80a1" name="GRAS_BIN_CONTROL" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
+ <doc>Disable LRZ feedback writes</doc>
<bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
<bitfield name="BUFFERS_LOCATION" low="22" high="23" type="a6xx_buffers_location" variants="A6XX"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26"/>
+ <doc>
+ Allows draws that don't have GRAS_LRZ_CNTL.LRZ_WRITE but have
+ GRAS_LRZ_CNTL.ENABLE to contribute to LRZ during RENDERING pass.
+ In sysmem mode GRAS_LRZ_CNTL.LRZ_WRITE is not considered.
+ </doc>
+ <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
<bitfield name="UNK27" pos="27"/>
</reg32>
@@ -2270,7 +3306,7 @@ to upconvert to 32b float internally?
- 0.0 if GREATER
- 1.0 if LESS
</doc>
- <bitfield name="FC_ENABLE" pos="3" type="boolean"/>
+ <bitfield name="FC_ENABLE" pos="3" type="boolean" variants="A6XX"/>
<!-- set when depth-test + depth-write enabled -->
<bitfield name="Z_TEST_ENABLE" pos="4" type="boolean"/>
<bitfield name="Z_BOUNDS_ENABLE" pos="5" type="boolean"/>
@@ -2284,7 +3320,7 @@ to upconvert to 32b float internally?
Disable LRZ based on previous direction and the current one.
If DIR_WRITE is not enabled - there is no write to direction buffer.
</doc>
- <bitfield name="DISABLE_ON_WRONG_DIR" pos="9" type="boolean"/>
+ <bitfield name="DISABLE_ON_WRONG_DIR" pos="9" type="boolean" variants="A6XX"/>
<bitfield name="Z_FUNC" low="11" high="13" type="adreno_compare_func" variants="A7XX-"/>
</reg32>
@@ -2357,7 +3393,10 @@ to upconvert to 32b float internally?
<bitfield name="BASE_MIP_LEVEL" low="28" high="31" type="uint"/>
</reg32>
- <reg32 offset="0x810b" name="GRAS_UNKNOWN_810B" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x810b" name="GRAS_LRZ_CNTL2" variants="A7XX-" usage="rp_blit">
+ <bitfield name="DISABLE_ON_WRONG_DIR" pos="0" type="boolean"/>
+ <bitfield name="FC_ENABLE" pos="1" type="boolean"/>
+ </reg32>
<!-- 0x810c-0x810f invalid -->
@@ -2366,7 +3405,10 @@ to upconvert to 32b float internally?
<!-- A bit tentative but it's a color and it is followed by LRZ_CLEAR -->
<reg32 offset="0x8111" name="GRAS_LRZ_CLEAR_DEPTH_F32" type="float" variants="A7XX-"/>
- <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
+ <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
+ <bitfield name="UNK3" pos="3"/>
+ </reg32>
<!-- Always written together and always equal 09510840 00000a62 -->
<reg32 offset="0x8120" name="GRAS_UNKNOWN_8120" variants="A7XX-" usage="cmd"/>
@@ -2440,7 +3482,7 @@ to upconvert to 32b float internally?
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
<bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
<bitfield name="BUFFERS_LOCATION" low="22" high="23" type="a6xx_buffers_location"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26"/>
+ <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
</reg32>
<reg32 offset="0x8800" name="RB_BIN_CONTROL" variants="A7XX-" usage="rp_blit">
@@ -2448,7 +3490,7 @@ to upconvert to 32b float internally?
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
<bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26"/>
+ <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
</reg32>
<reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A6XX" usage="rp_blit">
@@ -2605,6 +3647,7 @@ to upconvert to 32b float internally?
<bitfield name="UNK10" pos="10"/>
<bitfield name="LOSSLESSCOMPEN" pos="11" type="boolean"/>
<bitfield name="COLOR_SWAP" low="13" high="14" type="a3xx_color_swap"/>
+ <bitfield name="MUTABLEEN" pos="16" type="boolean" variants="A7XX-"/>
</reg32>
<!--
at least in gmem, things seem to be aligned to pitch of 64..
@@ -2770,6 +3813,7 @@ to upconvert to 32b float internally?
<bitfield name="COLOR_SWAP" low="5" high="6" type="a3xx_color_swap"/>
<bitfield name="COLOR_FORMAT" low="7" high="14" type="a6xx_format"/>
<bitfield name="UNK15" pos="15" type="boolean"/>
+ <bitfield name="MUTABLEEN" pos="16" type="boolean" variants="A7XX-"/>
</reg32>
<reg64 offset="0x88d8" name="RB_BLIT_DST" type="waddress" align="64" usage="rp_blit"/>
<reg32 offset="0x88da" name="RB_BLIT_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
@@ -2886,13 +3930,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x8c00" name="RB_2D_BLIT_CNTL" type="a6xx_2d_blit_cntl" usage="rp_blit"/>
<reg32 offset="0x8c01" name="RB_2D_UNKNOWN_8C01" low="0" high="31" usage="rp_blit"/>
- <bitset name="a6xx_2d_surf_info" inline="yes">
+ <bitset name="a6xx_2d_src_surf_info" inline="yes">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
<bitfield name="TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
<bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
<bitfield name="FLAGS" pos="12" type="boolean"/>
<bitfield name="SRGB" pos="13" type="boolean"/>
- <!-- the rest is only for src -->
<bitfield name="SAMPLES" low="14" high="15" type="a3xx_msaa_samples"/>
<bitfield name="FILTER" pos="16" type="boolean"/>
<bitfield name="UNK17" pos="17" type="boolean"/>
@@ -2903,11 +3946,21 @@ to upconvert to 32b float internally?
<bitfield name="UNK22" pos="22" type="boolean"/>
<bitfield name="UNK23" low="23" high="26"/>
<bitfield name="UNK28" pos="28" type="boolean"/>
+ <bitfield name="MUTABLEEN" pos="29" type="boolean" variants="A7XX-"/>
+ </bitset>
+
+ <bitset name="a6xx_2d_dst_surf_info" inline="yes">
+ <bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
+ <bitfield name="TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
+ <bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
+ <bitfield name="FLAGS" pos="12" type="boolean"/>
+ <bitfield name="SRGB" pos="13" type="boolean"/>
+ <bitfield name="SAMPLES" low="14" high="15" type="a3xx_msaa_samples"/>
+ <bitfield name="MUTABLEEN" pos="17" type="boolean" variants="A7XX-"/>
</bitset>
<!-- 0x8c02-0x8c16 invalid -->
- <!-- TODO: RB_2D_DST_INFO has 17 valid bits (doesn't match a6xx_2d_surf_info) -->
- <reg32 offset="0x8c17" name="RB_2D_DST_INFO" type="a6xx_2d_surf_info" usage="rp_blit"/>
+ <reg32 offset="0x8c17" name="RB_2D_DST_INFO" type="a6xx_2d_dst_surf_info" usage="rp_blit"/>
<reg64 offset="0x8c18" name="RB_2D_DST" type="waddress" align="64" usage="rp_blit"/>
<reg32 offset="0x8c1a" name="RB_2D_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
<!-- this is a guess but seems likely (for NV12/IYUV): -->
@@ -2927,7 +3980,10 @@ to upconvert to 32b float internally?
<reg32 offset="0x8c2d" name="RB_2D_SRC_SOLID_C1" usage="rp_blit"/>
<reg32 offset="0x8c2e" name="RB_2D_SRC_SOLID_C2" usage="rp_blit"/>
<reg32 offset="0x8c2f" name="RB_2D_SRC_SOLID_C3" usage="rp_blit"/>
- <!-- 0x8c34-0x8dff invalid -->
+
+ <reg32 offset="0x8c34" name="RB_UNKNOWN_8C34" variants="A7XX-" usage="cmd"/>
+
+ <!-- 0x8c35-0x8dff invalid -->
<!-- always 0x1 ? either doesn't exist for a650 or write-only: -->
<reg32 offset="0x8e01" name="RB_UNKNOWN_8E01" usage="cmd"/>
@@ -4275,7 +5331,7 @@ to upconvert to 32b float internally?
badly named or the functionality moved in a6xx. But downstream kernel
calls this "a6xx_sp_ps_tp_2d_cluster"
-->
- <reg32 offset="0xb4c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_surf_info" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_src_surf_info" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0xb4c1" name="SP_PS_2D_SRC_SIZE" variants="A6XX" usage="rp_blit">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
@@ -4286,7 +5342,7 @@ to upconvert to 32b float internally?
<bitfield name="PITCH" low="9" high="23" shr="6" type="uint"/>
</reg32>
- <reg32 offset="0xb2c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_surf_info" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_src_surf_info" variants="A7XX-" usage="rp_blit"/>
<reg32 offset="0xb2c1" name="SP_PS_2D_SRC_SIZE" variants="A7XX">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
@@ -4329,7 +5385,12 @@ to upconvert to 32b float internally?
<!-- always 0x100000 or 0x1000000? -->
<reg32 offset="0xb600" name="TPL1_DBG_ECO_CNTL" low="0" high="25" usage="cmd"/>
<reg32 offset="0xb601" name="TPL1_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
- <reg32 offset="0xb602" name="TPL1_DBG_ECO_CNTL1" usage="cmd"/>
+ <reg32 offset="0xb602" name="TPL1_DBG_ECO_CNTL1" usage="cmd">
+ <!-- Affects UBWC in some way, if BLIT_OP_SCALE is done with this bit set
+ and if other blit is done without it - UBWC image may be copied incorrectly.
+ -->
+ <bitfield name="TP_UBWC_FLAG_HINT" pos="18" type="boolean"/>
+ </reg32>
<reg32 offset="0xb604" name="TPL1_NC_MODE_CNTL">
<bitfield name="MODE" pos="0" type="boolean"/>
<bitfield name="LOWER_BIT" low="1" high="2" type="uint"/>
@@ -4351,7 +5412,8 @@ to upconvert to 32b float internally?
<reg32 offset="0xb60b" name="TPL1_BICUBIC_WEIGHTS_TABLE_3" low="0" high="29" variants="A7XX" usage="cmd"/>
<reg32 offset="0xb60c" name="TPL1_BICUBIC_WEIGHTS_TABLE_4" low="0" high="29" variants="A7XX" usage="cmd"/>
- <array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="12"/>
+ <array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="12" variants="A6XX"/>
+ <array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="18" variants="A7XX"/>
<!-- TODO: 4 more perfcntr sel at 0xb620 ? -->
@@ -4582,15 +5644,15 @@ to upconvert to 32b float internally?
<bitfield name="UNK6" pos="6" type="boolean"/>
</reg32>
- <reg32 offset="0xbb00" name="HLSQ_DRAW_CMD">
+ <reg32 offset="0xbb00" name="HLSQ_DRAW_CMD" variants="A6XX">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xbb01" name="HLSQ_DISPATCH_CMD">
+ <reg32 offset="0xbb01" name="HLSQ_DISPATCH_CMD" variants="A6XX">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xbb02" name="HLSQ_EVENT_CMD">
+ <reg32 offset="0xbb02" name="HLSQ_EVENT_CMD" variants="A6XX">
<!-- I think only the low bit is actually used? -->
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
@@ -4623,6 +5685,19 @@ to upconvert to 32b float internally?
<bitfield name="GFX_BINDLESS" low="14" high="18" type="hex"/>
</reg32>
+ <reg32 offset="0xab1c" name="HLSQ_DRAW_CMD" variants="A7XX-">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+
+ <reg32 offset="0xab1d" name="HLSQ_DISPATCH_CMD" variants="A7XX-">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+
+ <reg32 offset="0xab1e" name="HLSQ_EVENT_CMD" variants="A7XX-">
+ <bitfield name="STATE_ID" low="16" high="23"/>
+ <bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
+ </reg32>
+
<reg32 offset="0xab1f" name="HLSQ_INVALIDATE_CMD" variants="A7XX-" usage="cmd">
<doc>
This register clears pending loads queued up by
@@ -4791,7 +5866,7 @@ to upconvert to 32b float internally?
<reg32 offset="3" name="3"/>
</domain>
-<domain name="A6XX_TEX_CONST" width="32">
+<domain name="A6XX_TEX_CONST" width="32" varset="chip">
<doc>Texture constant dwords</doc>
<enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
<value name="A6XX_TEX_X" value="0"/>
@@ -4831,6 +5906,7 @@ to upconvert to 32b float internally?
<reg32 offset="1" name="1">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
+ <bitfield name="MUTABLEEN" pos="31" type="boolean" variants="A7XX-"/>
</reg32>
<reg32 offset="2" name="2">
<!--
diff --git a/drivers/gpu/drm/msm/registers/display/hdmi.xml b/drivers/gpu/drm/msm/registers/display/hdmi.xml
index 6c81581016c7..1cf1b14fbd91 100644
--- a/drivers/gpu/drm/msm/registers/display/hdmi.xml
+++ b/drivers/gpu/drm/msm/registers/display/hdmi.xml
@@ -1012,4 +1012,93 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00110" name="TX_ALOG_INTF_OBSV"/>
</domain>
+<domain name="HDMI_8998_PHY" width="32">
+ <reg32 offset="0x00000" name="CFG"/>
+ <reg32 offset="0x00004" name="PD_CTL"/>
+ <reg32 offset="0x00010" name="MODE"/>
+ <reg32 offset="0x0005C" name="CLOCK"/>
+ <reg32 offset="0x00068" name="CMN_CTRL"/>
+ <reg32 offset="0x000B4" name="STATUS"/>
+</domain>
+
+<domain name="HDMI_8998_PHY_QSERDES_COM" width="32">
+ <reg32 offset="0x0000" name="ATB_SEL1"/>
+ <reg32 offset="0x0004" name="ATB_SEL2"/>
+ <reg32 offset="0x0008" name="FREQ_UPDATE"/>
+ <reg32 offset="0x000C" name="BG_TIMER"/>
+ <reg32 offset="0x0010" name="SSC_EN_CENTER"/>
+ <reg32 offset="0x0014" name="SSC_ADJ_PER1"/>
+ <reg32 offset="0x0018" name="SSC_ADJ_PER2"/>
+ <reg32 offset="0x001C" name="SSC_PER1"/>
+ <reg32 offset="0x0020" name="SSC_PER2"/>
+ <reg32 offset="0x0024" name="SSC_STEP_SIZE1"/>
+ <reg32 offset="0x0028" name="SSC_STEP_SIZE2"/>
+ <reg32 offset="0x002C" name="POST_DIV"/>
+ <reg32 offset="0x0030" name="POST_DIV_MUX"/>
+ <reg32 offset="0x0034" name="BIAS_EN_CLKBUFLR_EN"/>
+ <reg32 offset="0x0038" name="CLK_ENABLE1"/>
+ <reg32 offset="0x003C" name="SYS_CLK_CTRL"/>
+ <reg32 offset="0x0040" name="SYSCLK_BUF_ENABLE"/>
+ <reg32 offset="0x0044" name="PLL_EN"/>
+ <reg32 offset="0x0048" name="PLL_IVCO"/>
+ <reg32 offset="0x004C" name="CMN_IETRIM"/>
+ <reg32 offset="0x0050" name="CMN_IPTRIM"/>
+ <reg32 offset="0x0060" name="CP_CTRL_MODE0"/>
+ <reg32 offset="0x0064" name="CP_CTRL_MODE1"/>
+ <reg32 offset="0x0068" name="PLL_RCTRL_MODE0"/>
+ <reg32 offset="0x006C" name="PLL_RCTRL_MODE1"/>
+ <reg32 offset="0x0070" name="PLL_CCTRL_MODE0"/>
+ <reg32 offset="0x0074" name="PLL_CCTRL_MODE1"/>
+ <reg32 offset="0x0078" name="PLL_CNTRL"/>
+ <reg32 offset="0x007C" name="BIAS_EN_CTRL_BY_PSM"/>
+ <reg32 offset="0x0080" name="SYSCLK_EN_SEL"/>
+ <reg32 offset="0x0084" name="CML_SYSCLK_SEL"/>
+ <reg32 offset="0x0088" name="RESETSM_CNTRL"/>
+ <reg32 offset="0x008C" name="RESETSM_CNTRL2"/>
+ <reg32 offset="0x0090" name="LOCK_CMP_EN"/>
+ <reg32 offset="0x0094" name="LOCK_CMP_CFG"/>
+ <reg32 offset="0x0098" name="LOCK_CMP1_MODE0"/>
+ <reg32 offset="0x009C" name="LOCK_CMP2_MODE0"/>
+ <reg32 offset="0x00A0" name="LOCK_CMP3_MODE0"/>
+ <reg32 offset="0x00B0" name="DEC_START_MODE0"/>
+ <reg32 offset="0x00B4" name="DEC_START_MODE1"/>
+ <reg32 offset="0x00B8" name="DIV_FRAC_START1_MODE0"/>
+ <reg32 offset="0x00BC" name="DIV_FRAC_START2_MODE0"/>
+ <reg32 offset="0x00C0" name="DIV_FRAC_START3_MODE0"/>
+ <reg32 offset="0x00C4" name="DIV_FRAC_START1_MODE1"/>
+ <reg32 offset="0x00C8" name="DIV_FRAC_START2_MODE1"/>
+ <reg32 offset="0x00CC" name="DIV_FRAC_START3_MODE1"/>
+ <reg32 offset="0x00D0" name="INTEGLOOP_INITVAL"/>
+ <reg32 offset="0x00D4" name="INTEGLOOP_EN"/>
+ <reg32 offset="0x00D8" name="INTEGLOOP_GAIN0_MODE0"/>
+ <reg32 offset="0x00DC" name="INTEGLOOP_GAIN1_MODE0"/>
+ <reg32 offset="0x00E0" name="INTEGLOOP_GAIN0_MODE1"/>
+ <reg32 offset="0x00E4" name="INTEGLOOP_GAIN1_MODE1"/>
+ <reg32 offset="0x00E8" name="VCOCAL_DEADMAN_CTRL"/>
+ <reg32 offset="0x00EC" name="VCO_TUNE_CTRL"/>
+ <reg32 offset="0x00F0" name="VCO_TUNE_MAP"/>
+ <reg32 offset="0x0124" name="CMN_STATUS"/>
+ <reg32 offset="0x0128" name="RESET_SM_STATUS"/>
+ <reg32 offset="0x0138" name="CLK_SEL"/>
+ <reg32 offset="0x013C" name="HSCLK_SEL"/>
+ <reg32 offset="0x0148" name="CORECLK_DIV_MODE0"/>
+ <reg32 offset="0x0150" name="SW_RESET"/>
+ <reg32 offset="0x0154" name="CORE_CLK_EN"/>
+ <reg32 offset="0x0158" name="C_READY_STATUS"/>
+ <reg32 offset="0x015C" name="CMN_CONFIG"/>
+ <reg32 offset="0x0164" name="SVS_MODE_CLK_SEL"/>
+</domain>
+
+<domain name="HDMI_8998_PHY_TXn" width="32">
+ <reg32 offset="0x0000" name="EMP_POST1_LVL"/>
+ <reg32 offset="0x0008" name="INTERFACE_SELECT_TX_BAND"/>
+ <reg32 offset="0x000C" name="CLKBUF_TERM_ENABLE"/>
+ <reg32 offset="0x0014" name="DRV_LVL_RES_CODE_OFFSET"/>
+ <reg32 offset="0x0018" name="DRV_LVL"/>
+ <reg32 offset="0x001C" name="LANE_CONFIG"/>
+ <reg32 offset="0x0024" name="PRE_DRIVER_1"/>
+ <reg32 offset="0x0028" name="PRE_DRIVER_2"/>
+ <reg32 offset="0x002C" name="LANE_MODE"/>
+</domain>
+
</database>
diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c
index 2541d2de4e45..dbd42cc1da87 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_kms.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c
@@ -407,8 +407,7 @@ static void lcdif_crtc_mode_set_nofb(struct drm_crtc_state *crtc_state,
struct drm_display_mode *m = &crtc_state->adjusted_mode;
DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
- m->crtc_clock,
- (int)(clk_get_rate(lcdif->clk) / 1000));
+ m->clock, (int)(clk_get_rate(lcdif->clk) / 1000));
DRM_DEV_DEBUG_DRIVER(drm->dev, "Bridge bus_flags: 0x%08X\n",
lcdif_crtc_state->bus_flags);
DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
@@ -538,7 +537,7 @@ static void lcdif_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_device *drm = lcdif->drm;
dma_addr_t paddr;
- clk_set_rate(lcdif->clk, m->crtc_clock * 1000);
+ clk_set_rate(lcdif->clk, m->clock * 1000);
pm_runtime_get_sync(drm->dev);
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index c32c01827c1d..7b863355c5c6 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -25,7 +25,6 @@ nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
nouveau-$(CONFIG_LEDS_CLASS) += nouveau_led.o
nouveau-y += nouveau_nvif.o
nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
-nouveau-y += nouveau_usif.o # userspace <-> nvif
nouveau-y += nouveau_vga.o
# DRM - memory management
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 4310ad71870b..67146f1e8482 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -118,8 +118,8 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
{
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
- struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
+ struct nvkm_bios *bios = nvxx_bios(drm);
+ struct nvkm_clk *clk = nvxx_clk(drm);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
@@ -617,9 +617,15 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret == 0) {
- if (disp->image[nv_crtc->index])
- nouveau_bo_unpin(disp->image[nv_crtc->index]);
- nouveau_bo_ref(nvbo, &disp->image[nv_crtc->index]);
+ if (disp->image[nv_crtc->index]) {
+ struct nouveau_bo *bo = disp->image[nv_crtc->index];
+
+ nouveau_bo_unpin(bo);
+ drm_gem_object_put(&bo->bo.base);
+ }
+
+ drm_gem_object_get(&nvbo->bo.base);
+ disp->image[nv_crtc->index] = nvbo;
}
return ret;
@@ -754,13 +760,17 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
- if (disp->image[nv_crtc->index])
- nouveau_bo_unpin(disp->image[nv_crtc->index]);
- nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+ if (disp->image[nv_crtc->index]) {
+ struct nouveau_bo *bo = disp->image[nv_crtc->index];
+
+ nouveau_bo_unpin(bo);
+ drm_gem_object_put(&bo->bo.base);
+ disp->image[nv_crtc->index] = NULL;
+ }
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ nouveau_bo_fini(nv_crtc->cursor.nvbo);
nvif_event_dtor(&nv_crtc->vblank);
nvif_head_dtor(&nv_crtc->head);
kfree(nv_crtc);
@@ -794,9 +804,14 @@ nv_crtc_disable(struct drm_crtc *crtc)
{
struct nv04_display *disp = nv04_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- if (disp->image[nv_crtc->index])
- nouveau_bo_unpin(disp->image[nv_crtc->index]);
- nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+
+ if (disp->image[nv_crtc->index]) {
+ struct nouveau_bo *bo = disp->image[nv_crtc->index];
+
+ nouveau_bo_unpin(bo);
+ drm_gem_object_put(&bo->bo.base);
+ disp->image[nv_crtc->index] = NULL;
+ }
}
static int
@@ -1042,7 +1057,7 @@ nv04_finish_page_flip(struct nouveau_channel *chan,
struct nv04_page_flip_state *ps)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_drm *drm = chan->cli->drm;
struct drm_device *dev = drm->dev;
struct nv04_page_flip_state *s;
unsigned long flags;
@@ -1098,9 +1113,9 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_fence **pfence)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_drm *drm = chan->cli->drm;
struct drm_device *dev = drm->dev;
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
unsigned long flags;
int ret;
@@ -1157,8 +1172,8 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
chan = drm->channel;
if (!chan)
return -ENODEV;
- cli = (void *)chan->user.client;
- push = chan->chan.push;
+ cli = chan->cli;
+ push = &chan->chan.push;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
@@ -1210,7 +1225,11 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
PUSH_NVSQ(push, NV05F, 0x0130, 0);
}
- nouveau_bo_ref(new_bo, &dispnv04->image[head]);
+ if (dispnv04->image[head])
+ drm_gem_object_put(&dispnv04->image[head]->bo.base);
+
+ drm_gem_object_get(&new_bo->bo.base);
+ dispnv04->image[head] = new_bo;
ret = nv04_page_flip_emit(chan, old_bo, new_bo, s, &fence);
if (ret)
@@ -1329,7 +1348,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
}
if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ nouveau_bo_fini(nv_crtc->cursor.nvbo);
}
nv04_cursor_init(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index d6b8e0cce2ac..2e12bf136607 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -237,7 +237,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
+ struct nvkm_gpio *gpio = nvxx_gpio(drm);
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index d5b129dc623b..504c421aa176 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -626,7 +626,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
struct nvkm_i2c_bus_probe info[] = {
{
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4b7497a8755c..f71199a39bc4 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -189,7 +189,6 @@ static void
nv04_display_destroy(struct drm_device *dev)
{
struct nv04_display *disp = nv04_display(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_encoder *encoder;
struct nouveau_crtc *nv_crtc;
@@ -206,15 +205,13 @@ nv04_display_destroy(struct drm_device *dev)
nouveau_display(dev)->priv = NULL;
vfree(disp);
-
- nvif_object_unmap(&drm->client.device.object);
}
int
nv04_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
@@ -229,8 +226,6 @@ nv04_display_create(struct drm_device *dev)
disp->drm = drm;
- nvif_object_map(&drm->client.device.object, NULL, 0);
-
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv04_display_destroy;
nouveau_display(dev)->init = nv04_display_init;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 11a6663758ec..85ec0f534392 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -176,7 +176,7 @@ static inline void
nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
struct dcb_output *outp, int crtc)
{
- nvbios_init(&nvxx_bios(&nouveau_drm(dev)->client.device)->subdev, table,
+ nvbios_init(&nvxx_bios(nouveau_drm(dev))->subdev, table,
init.outp = outp;
init.head = crtc;
);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index f7d35657aa64..8b376f9c8746 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -166,7 +166,7 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &drm->client.device.object;
- struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
+ struct nvkm_bios *bios = nvxx_bios(drm);
uint32_t reg1, pll1, pll2 = 0;
struct nvbios_pll pll_lim;
int ret;
@@ -258,9 +258,8 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->client.device;
- struct nvkm_clk *clk = nvxx_clk(device);
- struct nvkm_bios *bios = nvxx_bios(device);
+ struct nvkm_clk *clk = nvxx_clk(drm);
+ struct nvkm_bios *bios = nvxx_bios(drm);
struct nvbios_pll pll_lim;
struct nvkm_pll_vals pv;
enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0;
@@ -470,7 +469,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
+ struct nvkm_clk *clk = nvxx_clk(drm);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
int i;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index de3ea731d6e6..d3014027a812 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -53,7 +53,7 @@ static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
int nv04_tv_identify(struct drm_device *dev, int i2c_index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index);
if (bus) {
return nvkm_i2c_bus_probe(bus, "TV encoder",
@@ -205,7 +205,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
struct drm_encoder *encoder;
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index);
int type, ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 2033214c4b78..3ecb101d23e9 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -47,7 +47,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
+ struct nvkm_gpio *gpio = nvxx_gpio(drm);
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -131,7 +131,7 @@ static bool
get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
if (device->quirk && device->quirk->tv_pin_mask) {
*pin_mask = device->quirk->tv_pin_mask;
@@ -369,7 +369,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
+ struct nvkm_gpio *gpio = nvxx_gpio(drm);
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 70c62b861276..a431f6c5f6fa 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -35,7 +35,7 @@
int
base507c_update(struct nv50_wndw *wndw, u32 *interlock)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -48,7 +48,7 @@ base507c_update(struct nv50_wndw *wndw, u32 *interlock)
int
base507c_image_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
@@ -65,7 +65,7 @@ base507c_image_clr(struct nv50_wndw *wndw)
static int
base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
@@ -118,7 +118,7 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
base507c_xlut_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -132,7 +132,7 @@ base507c_xlut_clr(struct nv50_wndw *wndw)
int
base507c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -158,7 +158,7 @@ base507c_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
int
base507c_ntfy_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -171,7 +171,7 @@ base507c_ntfy_clr(struct nv50_wndw *wndw)
int
base507c_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 3)))
@@ -195,7 +195,7 @@ base507c_ntfy_reset(struct nouveau_bo *bo, u32 offset)
int
base507c_sema_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -208,7 +208,7 @@ base507c_sema_clr(struct nv50_wndw *wndw)
int
base507c_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
@@ -307,7 +307,6 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
struct nvif_disp_chan_v0 args = {
.id = head,
};
- struct nouveau_display *disp = nouveau_display(drm->dev);
struct nv50_disp *disp50 = nv50_disp(drm->dev);
struct nv50_wndw *wndw;
int ret;
@@ -318,7 +317,7 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
if (*pwndw = wndw, ret)
return ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp.object,
+ ret = nv50_dmac_create(drm,
&oclass, head, &args, sizeof(args),
disp50->sync->offset, &wndw->wndw);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index 093d4ba6910e..4545cc5f3a14 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -28,7 +28,7 @@
static int
base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index e6b0417c325b..4a2d5a259e15 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -28,7 +28,7 @@
static int
base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 10)))
@@ -65,7 +65,7 @@ base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
static int
base907c_xlut_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 6)))
@@ -84,7 +84,7 @@ base907c_xlut_clr(struct nv50_wndw *wndw)
static int
base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 6)))
@@ -156,7 +156,7 @@ base907c_csc(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
static int
base907c_csc_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -170,7 +170,7 @@ base907c_csc_clr(struct nv50_wndw *wndw)
static int
base907c_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index e5bb5ca950c8..ce2cb78bbdd3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -33,7 +33,7 @@
int
core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, (ntfy ? 2 : 0) + 3)))
@@ -80,7 +80,7 @@ core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
int
core507d_read_caps(struct nv50_disp *disp)
{
- struct nvif_push *push = disp->core->chan.push;
+ struct nvif_push *push = &disp->core->chan.push;
int ret;
ret = PUSH_WAIT(push, 6);
@@ -130,7 +130,7 @@ core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
int
core507d_init(struct nv50_core *core)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -166,7 +166,7 @@ core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
return -ENOMEM;
core->func = func;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args),
disp->sync->offset, &core->chan);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 42f877f2ced2..7f637b8830be 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -33,7 +33,7 @@
int
corec37d_wndw_owner(struct nv50_core *core)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
const u32 windows = 8; /*XXX*/
int ret, i;
@@ -51,7 +51,7 @@ corec37d_wndw_owner(struct nv50_core *core)
int
corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, (ntfy ? 2 * 2 : 0) + 5)))
@@ -127,7 +127,7 @@ int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
static int
corec37d_init(struct nv50_core *core)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
const u32 windows = 8; /*XXX*/
int ret, i;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index 53b1e2a569c1..421d0d57e1d8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -29,7 +29,7 @@
static int
corec57d_init(struct nv50_core *core)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
const u32 windows = 8; /*XXX*/
int ret, i;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
index f9ad641555b7..a674ba435b05 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
@@ -26,7 +26,7 @@ static int
crc907d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 crc_args = NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
@@ -74,7 +74,7 @@ crc907d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source
static int
crc907d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
index f10f6c484408..4821ce32f9ed 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
@@ -15,7 +15,7 @@ static int
crcc37d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 crc_args = NVVAL(NVC37D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, i * 4) |
NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
@@ -53,7 +53,7 @@ crcc37d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source
int crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c
index cc0130e3d496..ad591dcb0bc9 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c
@@ -13,7 +13,7 @@
static int crcc57d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 crc_args = NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
index 09de78d96679..99ae692f219e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
@@ -29,7 +29,7 @@ static int
dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
u32 sync = 0;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac907d.c b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
index 95efa625b691..74bc9f81e3f1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
@@ -29,7 +29,7 @@ static int
dac907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 0efd6b4906cf..eed579a6c858 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -93,8 +93,11 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
ret = nvif_object_ctor(disp, "kmsChan", 0,
oclass[0], data, size,
&chan->user);
- if (ret == 0)
- nvif_object_map(&chan->user, NULL, 0);
+ if (ret == 0) {
+ ret = nvif_object_map(&chan->user, NULL, 0);
+ if (ret)
+ nvif_object_dtor(&chan->user);
+ }
nvif_object_sclass_put(&sclass);
return ret;
}
@@ -124,20 +127,20 @@ nv50_dmac_destroy(struct nv50_dmac *dmac)
nv50_chan_destroy(&dmac->base);
- nvif_mem_dtor(&dmac->_push.mem);
+ nvif_mem_dtor(&dmac->push.mem);
}
static void
nv50_dmac_kick(struct nvif_push *push)
{
- struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
+ struct nv50_dmac *dmac = container_of(push, typeof(*dmac), push);
- dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
+ dmac->cur = push->cur - (u32 __iomem *)dmac->push.mem.object.map.ptr;
if (dmac->put != dmac->cur) {
/* Push buffer fetches are not coherent with BAR1, we need to ensure
* writes have been flushed right through to VRAM before writing PUT.
*/
- if (dmac->push->mem.type & NVIF_MEM_VRAM) {
+ if (dmac->push.mem.type & NVIF_MEM_VRAM) {
struct nvif_device *device = dmac->base.device;
nvif_wr32(&device->object, 0x070000, 0x00000001);
nvif_msec(device, 2000,
@@ -172,7 +175,7 @@ nv50_dmac_wind(struct nv50_dmac *dmac)
if (get == 0) {
/* Corner-case, HW idle, but non-committed work pending. */
if (dmac->put == 0)
- nv50_dmac_kick(dmac->push);
+ nv50_dmac_kick(&dmac->push);
if (nvif_msec(dmac->base.device, 2000,
if (NVIF_TV32(&dmac->base.user, NV507C, GET, PTR, >, 0))
@@ -181,7 +184,7 @@ nv50_dmac_wind(struct nv50_dmac *dmac)
return -ETIMEDOUT;
}
- PUSH_RSVD(dmac->push, PUSH_JUMP(dmac->push, 0));
+ PUSH_RSVD(&dmac->push, PUSH_JUMP(&dmac->push, 0));
dmac->cur = 0;
return 0;
}
@@ -189,19 +192,19 @@ nv50_dmac_wind(struct nv50_dmac *dmac)
static int
nv50_dmac_wait(struct nvif_push *push, u32 size)
{
- struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
+ struct nv50_dmac *dmac = container_of(push, typeof(*dmac), push);
int free;
if (WARN_ON(size > dmac->max))
return -EINVAL;
- dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
+ dmac->cur = push->cur - (u32 __iomem *)dmac->push.mem.object.map.ptr;
if (dmac->cur + size >= dmac->max) {
int ret = nv50_dmac_wind(dmac);
if (ret)
return ret;
- push->cur = dmac->_push.mem.object.map.ptr;
+ push->cur = dmac->push.mem.object.map.ptr;
push->cur = push->cur + dmac->cur;
nv50_dmac_kick(push);
}
@@ -214,7 +217,7 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
return -ETIMEDOUT;
}
- push->bgn = dmac->_push.mem.object.map.ptr;
+ push->bgn = dmac->push.mem.object.map.ptr;
push->bgn = push->bgn + dmac->cur;
push->cur = push->bgn;
push->end = push->cur + free;
@@ -226,17 +229,16 @@ static int nv50_dmac_vram_pushbuf = -1;
module_param_named(kms_vram_pushbuf, nv50_dmac_vram_pushbuf, int, 0400);
int
-nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+nv50_dmac_create(struct nouveau_drm *drm,
const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
struct nv50_dmac *dmac)
{
- struct nouveau_cli *cli = (void *)device->object.client;
+ struct nvif_device *device = &drm->device;
+ struct nvif_object *disp = &drm->display->disp.object;
struct nvif_disp_chan_v0 *args = data;
u8 type = NVIF_MEM_COHERENT;
int ret;
- mutex_init(&dmac->lock);
-
/* Pascal added support for 47-bit physical addresses, but some
* parts of EVO still only accept 40-bit PAs.
*
@@ -250,18 +252,15 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
(nv50_dmac_vram_pushbuf < 0 && device->info.family == NV_DEVICE_INFO_V0_PASCAL))
type |= NVIF_MEM_VRAM;
- ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
- &dmac->_push.mem);
+ ret = nvif_mem_ctor_map(&drm->mmu, "kmsChanPush", type, 0x1000, &dmac->push.mem);
if (ret)
return ret;
- dmac->ptr = dmac->_push.mem.object.map.ptr;
- dmac->_push.wait = nv50_dmac_wait;
- dmac->_push.kick = nv50_dmac_kick;
- dmac->push = &dmac->_push;
- dmac->push->bgn = dmac->_push.mem.object.map.ptr;
- dmac->push->cur = dmac->push->bgn;
- dmac->push->end = dmac->push->bgn;
+ dmac->push.wait = nv50_dmac_wait;
+ dmac->push.kick = nv50_dmac_kick;
+ dmac->push.bgn = dmac->push.mem.object.map.ptr;
+ dmac->push.cur = dmac->push.bgn;
+ dmac->push.end = dmac->push.bgn;
dmac->max = 0x1000/4 - 1;
/* EVO channels are affected by a HW bug where the last 12 DWORDs
@@ -270,7 +269,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (disp->oclass < GV100_DISP)
dmac->max -= 12;
- args->pushbuf = nvif_handle(&dmac->_push.mem.object);
+ args->pushbuf = nvif_handle(&dmac->push.mem.object);
ret = nv50_chan_create(device, disp, oclass, head, data, size,
&dmac->base);
@@ -558,7 +557,7 @@ nv50_dac_create(struct nouveau_encoder *nv_encoder)
{
struct drm_connector *connector = &nv_encoder->conn->base;
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus;
struct drm_encoder *encoder;
struct dcb_output *dcbe = nv_encoder->dcb;
@@ -593,8 +592,7 @@ static int
nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
bool *enabled, unsigned char *buf, int max_bytes)
{
- struct drm_device *drm_dev = dev_get_drvdata(kdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nouveau_drm *drm = dev_get_drvdata(kdev);
struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder;
struct nouveau_crtc *nv_crtc;
@@ -639,18 +637,17 @@ static int
nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev,
void *data)
{
- struct drm_device *drm_dev = dev_get_drvdata(kdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nouveau_drm *drm = dev_get_drvdata(kdev);
struct drm_audio_component *acomp = data;
if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
return -ENOMEM;
- drm_modeset_lock_all(drm_dev);
+ drm_modeset_lock_all(drm->dev);
acomp->ops = &nv50_audio_component_ops;
acomp->dev = kdev;
drm->audio.component = acomp;
- drm_modeset_unlock_all(drm_dev);
+ drm_modeset_unlock_all(drm->dev);
return 0;
}
@@ -658,15 +655,14 @@ static void
nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev,
void *data)
{
- struct drm_device *drm_dev = dev_get_drvdata(kdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nouveau_drm *drm = dev_get_drvdata(kdev);
struct drm_audio_component *acomp = data;
- drm_modeset_lock_all(drm_dev);
+ drm_modeset_lock_all(drm->dev);
drm->audio.component = NULL;
acomp->ops = NULL;
acomp->dev = NULL;
- drm_modeset_unlock_all(drm_dev);
+ drm_modeset_unlock_all(drm->dev);
}
static const struct component_ops nv50_audio_component_bind_ops = {
@@ -1884,7 +1880,7 @@ nv50_sor_create(struct nouveau_encoder *nv_encoder)
struct drm_connector *connector = &nv_encoder->conn->base;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct drm_encoder *encoder;
struct dcb_output *dcbe = nv_encoder->dcb;
struct nv50_disp *disp = nv50_disp(connector->dev);
@@ -2051,7 +2047,7 @@ nv50_pior_create(struct nouveau_encoder *nv_encoder)
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus = NULL;
struct nvkm_i2c_aux *aux = NULL;
struct i2c_adapter *ddc;
@@ -2652,7 +2648,6 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
static const struct drm_mode_config_funcs
nv50_disp_func = {
.fb_create = nouveau_user_framebuffer_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = nv50_disp_atomic_check,
.atomic_commit = nv50_disp_atomic_commit,
.atomic_state_alloc = nv50_disp_atomic_state_alloc,
@@ -2819,7 +2814,7 @@ nv50_display_destroy(struct drm_device *dev)
nouveau_bo_unmap(disp->sync);
if (disp->sync)
nouveau_bo_unpin(disp->sync);
- nouveau_bo_ref(NULL, &disp->sync);
+ nouveau_bo_fini(disp->sync);
nouveau_display(dev)->priv = NULL;
kfree(disp);
@@ -2862,7 +2857,7 @@ nv50_display_create(struct drm_device *dev)
nouveau_bo_unpin(disp->sync);
}
if (ret)
- nouveau_bo_ref(NULL, &disp->sync);
+ nouveau_bo_fini(disp->sync);
}
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 5508a7cfd492..15f9242b72ac 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -62,18 +62,11 @@ struct nv50_chan {
struct nv50_dmac {
struct nv50_chan base;
- struct nvif_push _push;
- struct nvif_push *push;
- u32 *ptr;
+ struct nvif_push push;
struct nvif_object sync;
struct nvif_object vram;
- /* Protects against concurrent pushbuf access to this channel, lock is
- * grabbed by evo_wait (if the pushbuf reservation is successful) and
- * dropped again by evo_kick. */
- struct mutex lock;
-
u32 cur;
u32 put;
u32 max;
@@ -95,7 +88,7 @@ struct nv50_outp_atom {
} set, clr;
};
-int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+int nv50_dmac_create(struct nouveau_drm *,
const s32 *oclass, u8 head, void *data, u32 size,
s64 syncbuf, struct nv50_dmac *dmac);
void nv50_dmac_destroy(struct nv50_dmac *);
@@ -108,9 +101,6 @@ void nv50_dmac_destroy(struct nv50_dmac *);
*/
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder);
-u32 *evo_wait(struct nv50_dmac *, int nr);
-void evo_kick(u32 *, struct nv50_dmac *);
-
extern const u64 disp50xx_modifiers[];
extern const u64 disp90xx_modifiers[];
extern const u64 wndwc57e_modifiers[];
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 0edd4e520c8e..7fa1e0279d7d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -29,7 +29,7 @@
int
head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -48,7 +48,7 @@ head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -66,7 +66,7 @@ head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -94,7 +94,7 @@ head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -122,7 +122,7 @@ head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head507d_curs_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -139,7 +139,7 @@ head507d_curs_clr(struct nv50_head *head)
static int
head507d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -188,7 +188,7 @@ head507d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
int
head507d_core_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -202,7 +202,7 @@ head507d_core_clr(struct nv50_head *head)
static int
head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -278,7 +278,7 @@ head507d_core_calc(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head507d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -293,7 +293,7 @@ head507d_olut_clr(struct nv50_head *head)
static int
head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -345,7 +345,7 @@ head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
int
head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
@@ -400,7 +400,7 @@ head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head507d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
index 194d1771c481..1545d576fe9c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -29,7 +29,7 @@
static int
head827d_curs_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -48,7 +48,7 @@ head827d_curs_clr(struct nv50_head *head)
static int
head827d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -73,7 +73,7 @@ head827d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -110,7 +110,7 @@ head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head827d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -127,7 +127,7 @@ head827d_olut_clr(struct nv50_head *head)
static int
head827d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 18fe4c1e2d6a..6c9e0438e55c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -36,7 +36,7 @@
int
head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -57,7 +57,7 @@ head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -77,7 +77,7 @@ head907d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head907d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -95,7 +95,7 @@ head907d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -124,7 +124,7 @@ head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -152,7 +152,7 @@ head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_curs_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -171,7 +171,7 @@ head907d_curs_clr(struct nv50_head *head)
int
head907d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -195,7 +195,7 @@ head907d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_core_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -209,7 +209,7 @@ head907d_core_clr(struct nv50_head *head)
int
head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -246,7 +246,7 @@ head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -263,7 +263,7 @@ head907d_olut_clr(struct nv50_head *head)
int
head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -322,7 +322,7 @@ bool head907d_ilut_check(int size)
int
head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
@@ -378,7 +378,7 @@ head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index 4ce47b55f72c..2d9aee050510 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -30,7 +30,7 @@
static int
head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -48,7 +48,7 @@ head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -77,7 +77,7 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index a4a3b78ea42c..2bcb3790fc10 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -30,7 +30,7 @@
static int
headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u8 depth;
int ret;
@@ -64,7 +64,7 @@ headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -85,7 +85,7 @@ headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
int
headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -104,7 +104,7 @@ headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
int
headc37d_curs_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -122,7 +122,7 @@ headc37d_curs_clr(struct nv50_head *head)
int
headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -161,7 +161,7 @@ headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
static int
headc37d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -175,7 +175,7 @@ headc37d_olut_clr(struct nv50_head *head)
static int
headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -209,7 +209,7 @@ headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
static int
headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
@@ -254,7 +254,7 @@ headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
int
headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index 53b1248c40ec..fde4087e7691 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -30,7 +30,7 @@
static int
headc57d_display_id(struct nv50_head *head, u32 display_id)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -43,7 +43,7 @@ headc57d_display_id(struct nv50_head *head, u32 display_id)
static int
headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u8 depth;
int ret;
@@ -78,7 +78,7 @@ headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -96,7 +96,7 @@ headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
headc57d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -110,7 +110,7 @@ headc57d_olut_clr(struct nv50_head *head)
static int
headc57d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -201,7 +201,7 @@ headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
static int
headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
index 797c1e4e0eaa..654e506f8431 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -33,7 +33,7 @@
int
ovly507e_scale_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
@@ -55,7 +55,7 @@ ovly507e_scale_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
static int
ovly507e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 12)))
@@ -159,7 +159,7 @@ ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
if (*pwndw = wndw, ret)
return ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args),
disp->sync->offset, &wndw->wndw);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
index 02dc02d9260f..a5ae22ed663d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
@@ -32,7 +32,7 @@
static int
ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 12)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
index 645130d18a99..8cf0e18fa596 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
@@ -29,7 +29,7 @@
static int
ovly907e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 12)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
index 17d230256bdd..79507d169778 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
@@ -30,7 +30,7 @@ static int
pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if (asyh) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
index ca73d7710885..08cc9845322e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
@@ -30,7 +30,7 @@ static int
sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if (asyh) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
index c86cd8fa61d6..23957cc8f326 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
@@ -32,7 +32,7 @@ static int
sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
index 9eaef34816da..da05d4614e00 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
@@ -29,7 +29,7 @@ static int
sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index ee76b091d4ef..7985da61aaac 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -31,7 +31,7 @@
static int
wimmc37b_update(struct nv50_wndw *wndw, u32 *interlock)
{
- struct nvif_push *push = wndw->wimm.push;
+ struct nvif_push *push = &wndw->wimm.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -46,7 +46,7 @@ wimmc37b_update(struct nv50_wndw *wndw, u32 *interlock)
static int
wimmc37b_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wimm.push;
+ struct nvif_push *push = &wndw->wimm.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -71,10 +71,9 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
struct nvif_disp_chan_v0 args = {
.id = wndw->id,
};
- struct nv50_disp *disp = nv50_disp(drm->dev);
int ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args), -1,
&wndw->wimm);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index b3deea5aca58..50a7b97d37a2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -39,7 +39,7 @@ wndwc37e_csc_clr(struct nv50_wndw *wndw)
static int
wndwc37e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
@@ -52,7 +52,7 @@ wndwc37e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
static int
wndwc37e_ilut_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -65,7 +65,7 @@ wndwc37e_ilut_clr(struct nv50_wndw *wndw)
static int
wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
@@ -94,7 +94,7 @@ wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
int
wndwc37e_blend_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 8)))
@@ -139,7 +139,7 @@ wndwc37e_blend_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc37e_image_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
@@ -156,7 +156,7 @@ wndwc37e_image_clr(struct nv50_wndw *wndw)
static int
wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 17)))
@@ -209,7 +209,7 @@ wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -222,7 +222,7 @@ wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
int
wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 3)))
@@ -239,7 +239,7 @@ wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc37e_sema_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -252,7 +252,7 @@ wndwc37e_sema_clr(struct nv50_wndw *wndw)
int
wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
@@ -268,7 +268,7 @@ wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
@@ -363,7 +363,7 @@ wndwc37e_new_(const struct nv50_wndw_func *func, struct nouveau_drm *drm,
if (*pwndw = wndw, ret)
return ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args),
disp->sync->offset, &wndw->wndw);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index 1d214a4b960a..d1ca51aae58c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -32,7 +32,7 @@
static int
wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 17)))
@@ -81,7 +81,7 @@ wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc57e_csc_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
const u32 identity[12] = {
0x00010000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00010000, 0x00000000, 0x00000000,
@@ -99,7 +99,7 @@ wndwc57e_csc_clr(struct nv50_wndw *wndw)
int
wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
@@ -112,7 +112,7 @@ wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc57e_ilut_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -125,7 +125,7 @@ wndwc57e_ilut_clr(struct nv50_wndw *wndw)
int
wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
index 7a370fa1df20..52af293c98f4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
@@ -29,7 +29,7 @@
static int
wndwc67e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 17)))
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index fa161b74d967..ea937fa7bc55 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -2,13 +2,6 @@
#ifndef __NVIF_CL0080_H__
#define __NVIF_CL0080_H__
-struct nv_device_v0 {
- __u8 version;
- __u8 priv;
- __u8 pad02[6];
- __u64 device; /* device identifier, ~0 for client default */
-};
-
#define NV_DEVICE_V0_INFO 0x00
#define NV_DEVICE_V0_TIME 0x01
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index e668ab1664f0..824e052dcc25 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -7,9 +7,6 @@
#define NVIF_CLASS_CONTROL /* if0001.h */ -0x00000001
-#define NVIF_CLASS_PERFMON /* if0002.h */ -0x00000002
-#define NVIF_CLASS_PERFDOM /* if0003.h */ -0x00000003
-
#define NVIF_CLASS_SW_NV04 /* if0004.h */ -0x00000004
#define NVIF_CLASS_SW_NV10 /* if0005.h */ -0x00000005
#define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index 5d9395e651b6..03f1d564eb12 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -7,21 +7,12 @@
struct nvif_client {
struct nvif_object object;
const struct nvif_driver *driver;
- u64 version;
- u8 route;
};
-int nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
- struct nvif_client *);
+int nvif_client_ctor(struct nvif_client *parent, const char *name, struct nvif_client *);
void nvif_client_dtor(struct nvif_client *);
-int nvif_client_ioctl(struct nvif_client *, void *, u32);
int nvif_client_suspend(struct nvif_client *);
int nvif_client_resume(struct nvif_client *);
/*XXX*/
-#include <core/client.h>
-#define nvxx_client(a) ({ \
- struct nvif_client *_client = (a); \
- (struct nvkm_client *)_client->object.priv; \
-})
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index b0e59800a320..7877a2a79da9 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -18,41 +18,8 @@ struct nvif_device {
struct nvif_user user;
};
-int nvif_device_ctor(struct nvif_object *, const char *name, u32 handle,
- s32 oclass, void *, u32, struct nvif_device *);
+int nvif_device_ctor(struct nvif_client *, const char *name, struct nvif_device *);
void nvif_device_dtor(struct nvif_device *);
+int nvif_device_map(struct nvif_device *);
u64 nvif_device_time(struct nvif_device *);
-
-/*XXX*/
-#include <subdev/bios.h>
-#include <subdev/fb.h>
-#include <subdev/bar.h>
-#include <subdev/gpio.h>
-#include <subdev/clk.h>
-#include <subdev/i2c.h>
-#include <subdev/timer.h>
-#include <subdev/therm.h>
-#include <subdev/pci.h>
-
-#define nvxx_device(a) ({ \
- struct nvif_device *_device = (a); \
- struct { \
- struct nvkm_object object; \
- struct nvkm_device *device; \
- } *_udevice = _device->object.priv; \
- _udevice->device; \
-})
-#define nvxx_bios(a) nvxx_device(a)->bios
-#define nvxx_fb(a) nvxx_device(a)->fb
-#define nvxx_gpio(a) nvxx_device(a)->gpio
-#define nvxx_clk(a) nvxx_device(a)->clk
-#define nvxx_i2c(a) nvxx_device(a)->i2c
-#define nvxx_iccsense(a) nvxx_device(a)->iccsense
-#define nvxx_therm(a) nvxx_device(a)->therm
-#define nvxx_volt(a) nvxx_device(a)->volt
-
-#include <engine/fifo.h>
-#include <engine/gr.h>
-
-#define nvxx_gr(a) nvxx_device(a)->gr
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h
index 7a3af05f7f98..7b08ff769039 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h
@@ -8,20 +8,15 @@ struct nvif_driver {
const char *name;
int (*init)(const char *name, u64 device, const char *cfg,
const char *dbg, void **priv);
- void (*fini)(void *priv);
int (*suspend)(void *priv);
int (*resume)(void *priv);
int (*ioctl)(void *priv, void *data, u32 size, void **hack);
void __iomem *(*map)(void *priv, u64 handle, u32 size);
void (*unmap)(void *priv, void __iomem *ptr, u32 size);
- bool keep;
};
int nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
const char *name, u64 device, struct nvif_client *);
extern const struct nvif_driver nvif_driver_nvkm;
-extern const struct nvif_driver nvif_driver_drm;
-extern const struct nvif_driver nvif_driver_lib;
-extern const struct nvif_driver nvif_driver_null;
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0000.h b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
index f7b8f8f48760..c06383835337 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0000.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
@@ -5,16 +5,6 @@
struct nvif_client_v0 {
__u8 version;
__u8 pad01[7];
- __u64 device;
char name[32];
};
-
-#define NVIF_CLIENT_V0_DEVLIST 0x00
-
-struct nvif_client_devlist_v0 {
- __u8 version;
- __u8 count;
- __u8 pad02[6];
- __u64 device[];
-};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0002.h b/drivers/gpu/drm/nouveau/include/nvif/if0002.h
deleted file mode 100644
index df2915d6a61e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvif/if0002.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVIF_IF0002_H__
-#define __NVIF_IF0002_H__
-
-#define NVIF_PERFMON_V0_QUERY_DOMAIN 0x00
-#define NVIF_PERFMON_V0_QUERY_SIGNAL 0x01
-#define NVIF_PERFMON_V0_QUERY_SOURCE 0x02
-
-struct nvif_perfmon_query_domain_v0 {
- __u8 version;
- __u8 id;
- __u8 counter_nr;
- __u8 iter;
- __u16 signal_nr;
- __u8 pad05[2];
- char name[64];
-};
-
-struct nvif_perfmon_query_signal_v0 {
- __u8 version;
- __u8 domain;
- __u16 iter;
- __u8 signal;
- __u8 source_nr;
- __u8 pad05[2];
- char name[64];
-};
-
-struct nvif_perfmon_query_source_v0 {
- __u8 version;
- __u8 domain;
- __u8 signal;
- __u8 iter;
- __u8 pad04[4];
- __u32 source;
- __u32 mask;
- char name[64];
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0003.h b/drivers/gpu/drm/nouveau/include/nvif/if0003.h
deleted file mode 100644
index 78467da07c37..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvif/if0003.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVIF_IF0003_H__
-#define __NVIF_IF0003_H__
-
-struct nvif_perfdom_v0 {
- __u8 version;
- __u8 domain;
- __u8 mode;
- __u8 pad03[1];
- struct {
- __u8 signal[4];
- __u64 source[4][8];
- __u16 logic_op;
- } ctr[4];
-};
-
-#define NVIF_PERFDOM_V0_INIT 0x00
-#define NVIF_PERFDOM_V0_SAMPLE 0x01
-#define NVIF_PERFDOM_V0_READ 0x02
-
-struct nvif_perfdom_init {
-};
-
-struct nvif_perfdom_sample {
-};
-
-struct nvif_perfdom_read_v0 {
- __u8 version;
- __u8 pad01[7];
- __u32 ctr[4];
- __u32 clk;
- __u8 pad04[4];
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index 4e047bb1fc07..e825c8a1d9ca 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -2,17 +2,12 @@
#ifndef __NVIF_IOCTL_H__
#define __NVIF_IOCTL_H__
-#define NVIF_VERSION_LATEST 0x0000000000000100ULL
-
struct nvif_ioctl_v0 {
__u8 version;
-#define NVIF_IOCTL_V0_NOP 0x00
#define NVIF_IOCTL_V0_SCLASS 0x01
#define NVIF_IOCTL_V0_NEW 0x02
#define NVIF_IOCTL_V0_DEL 0x03
#define NVIF_IOCTL_V0_MTHD 0x04
-#define NVIF_IOCTL_V0_RD 0x05
-#define NVIF_IOCTL_V0_WR 0x06
#define NVIF_IOCTL_V0_MAP 0x07
#define NVIF_IOCTL_V0_UNMAP 0x08
__u8 type;
@@ -28,10 +23,6 @@ struct nvif_ioctl_v0 {
__u8 data[]; /* ioctl data (below) */
};
-struct nvif_ioctl_nop_v0 {
- __u64 version;
-};
-
struct nvif_ioctl_sclass_v0 {
/* nvif_ioctl ... */
__u8 version;
@@ -67,24 +58,6 @@ struct nvif_ioctl_mthd_v0 {
__u8 data[]; /* method data (class.h) */
};
-struct nvif_ioctl_rd_v0 {
- /* nvif_ioctl ... */
- __u8 version;
- __u8 size;
- __u8 pad02[2];
- __u32 data;
- __u64 addr;
-};
-
-struct nvif_ioctl_wr_v0 {
- /* nvif_ioctl ... */
- __u8 version;
- __u8 size;
- __u8 pad02[2];
- __u32 data;
- __u64 addr;
-};
-
struct nvif_ioctl_map_v0 {
/* nvif_ioctl ... */
__u8 version;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index f52399caee82..8d205b6af46a 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -34,8 +34,6 @@ void nvif_object_dtor(struct nvif_object *);
int nvif_object_ioctl(struct nvif_object *, void *, u32, void **);
int nvif_object_sclass_get(struct nvif_object *, struct nvif_sclass **);
void nvif_object_sclass_put(struct nvif_sclass **);
-u32 nvif_object_rd(struct nvif_object *, int, u64);
-void nvif_object_wr(struct nvif_object *, int, u64, u32);
int nvif_object_mthd(struct nvif_object *, u32, void *, u32);
int nvif_object_map_handle(struct nvif_object *, void *, u32,
u64 *handle, u64 *length);
@@ -47,20 +45,11 @@ void nvif_object_unmap(struct nvif_object *);
#define nvif_object(a) (a)->object
#define nvif_rd(a,f,b,c) ({ \
- struct nvif_object *_object = (a); \
- u32 _data; \
- if (likely(_object->map.ptr)) \
- _data = f((u8 __iomem *)_object->map.ptr + (c)); \
- else \
- _data = nvif_object_rd(_object, (b), (c)); \
+ u32 _data = f((u8 __iomem *)(a)->map.ptr + (c)); \
_data; \
})
#define nvif_wr(a,f,b,c,d) ({ \
- struct nvif_object *_object = (a); \
- if (likely(_object->map.ptr)) \
- f((d), (u8 __iomem *)_object->map.ptr + (c)); \
- else \
- nvif_object_wr(_object, (b), (c), (d)); \
+ f((d), (u8 __iomem *)(a)->map.ptr + (c)); \
})
#define nvif_rd08(a,b) ({ ((u8)nvif_rd((a), ioread8, 1, (b))); })
#define nvif_rd16(a,b) ({ ((u16)nvif_rd((a), ioread16_native, 2, (b))); })
@@ -69,7 +58,7 @@ void nvif_object_unmap(struct nvif_object *);
#define nvif_wr16(a,b,c) nvif_wr((a), iowrite16_native, 2, (b), (u16)(c))
#define nvif_wr32(a,b,c) nvif_wr((a), iowrite32_native, 4, (b), (u32)(c))
#define nvif_mask(a,b,c,d) ({ \
- struct nvif_object *__object = (a); \
+ typeof(a) __object = (a); \
u32 _addr = (b), _data = nvif_rd32(__object, _addr); \
nvif_wr32(__object, _addr, (_data & ~(c)) | (d)); \
_data; \
@@ -134,11 +123,4 @@ struct nvif_mclass {
#define NVIF_MR32(p,A...) DRF_MR(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
#define NVIF_MV32(p,A...) DRF_MV(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
#define NVIF_MD32(p,A...) DRF_MD(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
-
-/*XXX*/
-#include <core/object.h>
-#define nvxx_object(a) ({ \
- struct nvif_object *_object = (a); \
- (struct nvkm_object *)_object->priv; \
-})
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index 429d0106c123..a2eaf3929ac3 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -34,4 +34,23 @@
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
+
+#ifdef __BIG_ENDIAN
+#define ioread16_native ioread16be
+#define iowrite16_native iowrite16be
+#define ioread32_native ioread32be
+#define iowrite32_native iowrite32be
+#else
+#define ioread16_native ioread16
+#define iowrite16_native iowrite16
+#define ioread32_native ioread32
+#define iowrite32_native iowrite32
+#endif
+
+#define iowrite64_native(v,p) do { \
+ u32 __iomem *_p = (u32 __iomem *)(p); \
+ u64 _v = (v); \
+ iowrite32_native(lower_32_bits(_v), &_p[0]); \
+ iowrite32_native(upper_32_bits(_v), &_p[1]); \
+} while(0)
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index 932c9fd0b2d8..15f27fdd877a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -22,7 +22,6 @@ struct nvkm_client {
int nvkm_client_new(const char *name, u64 device, const char *cfg, const char *dbg,
int (*)(u64, void *, u32), struct nvkm_client **);
-struct nvkm_client *nvkm_client_search(struct nvkm_client *, u64 handle);
/* logging for client-facing objects */
#define nvif_printk(o,l,p,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index f057d348221e..46afb877a296 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -109,7 +109,6 @@ struct nvkm_device_chip {
};
struct nvkm_device *nvkm_device_find(u64 name);
-int nvkm_device_list(u64 *name, int size);
/* privileged register interface accessor macros */
#define nvkm_rd08(d,a) ioread8((d)->pri + (a))
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
index 30c17db483cb..9d2a1abf64f9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
@@ -46,7 +46,6 @@ NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 8)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 3)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg, 8)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA , struct nvkm_engine , ofa)
-NVKM_LAYOUT_ONCE(NVKM_ENGINE_PM , struct nvkm_pm , pm)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC , struct nvkm_engine , sec)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC2 , struct nvkm_sec2 , sec2)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SW , struct nvkm_sw , sw)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index ed1f66360782..10107ef3ca49 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -15,8 +15,6 @@ struct nvkm_object {
struct list_head head;
struct list_head tree;
- u8 route;
- u64 token;
u64 object;
struct rb_node node;
};
@@ -35,12 +33,6 @@ struct nvkm_object_func {
int (*map)(struct nvkm_object *, void *argv, u32 argc,
enum nvkm_object_map *, u64 *addr, u64 *size);
int (*unmap)(struct nvkm_object *);
- int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
- int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
- int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
- int (*wr08)(struct nvkm_object *, u64 addr, u8 data);
- int (*wr16)(struct nvkm_object *, u64 addr, u16 data);
- int (*wr32)(struct nvkm_object *, u64 addr, u32 data);
int (*bind)(struct nvkm_object *, struct nvkm_gpuobj *, int align,
struct nvkm_gpuobj **);
int (*sclass)(struct nvkm_object *, int index, struct nvkm_oclass *);
@@ -63,12 +55,6 @@ int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
int nvkm_object_map(struct nvkm_object *, void *argv, u32 argc,
enum nvkm_object_map *, u64 *addr, u64 *size);
int nvkm_object_unmap(struct nvkm_object *);
-int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8 *data);
-int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
-int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
-int nvkm_object_wr08(struct nvkm_object *, u64 addr, u8 data);
-int nvkm_object_wr16(struct nvkm_object *, u64 addr, u16 data);
-int nvkm_object_wr32(struct nvkm_object *, u64 addr, u32 data);
int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align,
struct nvkm_gpuobj **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
index 8e1b945d38f3..cad05f0e7948 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
@@ -21,8 +21,6 @@ struct nvkm_oclass {
const void *priv;
const void *engn;
u32 handle;
- u8 route;
- u64 token;
u64 object;
struct nvkm_client *client;
struct nvkm_object *parent;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index 3fd5c007a663..9b05612e6490 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -3,25 +3,6 @@
#define __NVKM_OS_H__
#include <nvif/os.h>
-#ifdef __BIG_ENDIAN
-#define ioread16_native ioread16be
-#define iowrite16_native iowrite16be
-#define ioread32_native ioread32be
-#define iowrite32_native iowrite32be
-#else
-#define ioread16_native ioread16
-#define iowrite16_native iowrite16
-#define ioread32_native ioread32
-#define iowrite32_native iowrite32
-#endif
-
-#define iowrite64_native(v,p) do { \
- u32 __iomem *_p = (u32 __iomem *)(p); \
- u64 _v = (v); \
- iowrite32_native(lower_32_bits(_v), &_p[0]); \
- iowrite32_native(upper_32_bits(_v), &_p[1]); \
-} while(0)
-
struct nvkm_blob {
void *data;
u32 size;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
index b4b5df3e1610..7444c4d59e09 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
@@ -10,6 +10,5 @@ struct nvkm_device_pci {
};
int nvkm_device_pci_new(struct pci_dev *, const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index ccee53d4e4ec..22f74fc88cd7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -51,6 +51,5 @@ struct nvkm_device_tegra_func {
int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
struct platform_device *,
const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
deleted file mode 100644
index af89d46ea360..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_PM_H__
-#define __NVKM_PM_H__
-#include <core/engine.h>
-
-struct nvkm_pm {
- const struct nvkm_pm_func *func;
- struct nvkm_engine engine;
-
- struct {
- spinlock_t lock;
- struct nvkm_object *object;
- } client;
-
- struct list_head domains;
- struct list_head sources;
- u32 sequence;
-};
-
-int nv40_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int nv50_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int g84_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gt200_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gt215_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gf100_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gf108_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gf117_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gk104_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index d56909071de6..2a0617e5fe2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -46,23 +46,9 @@ nouveau_abi16(struct drm_file *file_priv)
struct nouveau_abi16 *abi16;
cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
if (cli->abi16) {
- struct nv_device_v0 args = {
- .device = ~0ULL,
- };
-
+ abi16->cli = cli;
INIT_LIST_HEAD(&abi16->channels);
-
- /* allocate device object targeting client's default
- * device (ie. the one that belongs to the fd it
- * opened)
- */
- if (nvif_device_ctor(&cli->base.object, "abi16Device",
- 0, NV_DEVICE, &args, sizeof(args),
- &abi16->device) == 0)
- return cli->abi16;
-
- kfree(cli->abi16);
- cli->abi16 = NULL;
+ INIT_LIST_HEAD(&abi16->objects);
}
}
return cli->abi16;
@@ -82,11 +68,72 @@ nouveau_abi16_get(struct drm_file *file_priv)
int
nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
{
- struct nouveau_cli *cli = (void *)abi16->device.object.client;
+ struct nouveau_cli *cli = abi16->cli;
mutex_unlock(&cli->mutex);
return ret;
}
+/* Tracks objects created via the DRM_NOUVEAU_NVIF ioctl.
+ *
+ * The only two types of object that userspace ever allocated via this
+ * interface are 'device', in order to retrieve basic device info, and
+ * 'engine objects', which instantiate HW classes on a channel.
+ *
+ * The remainder of what used to be available via DRM_NOUVEAU_NVIF has
+ * been removed, but these object types need to be tracked to maintain
+ * compatibility with userspace.
+ */
+struct nouveau_abi16_obj {
+ enum nouveau_abi16_obj_type {
+ DEVICE,
+ ENGOBJ,
+ } type;
+ u64 object;
+
+ struct nvif_object engobj;
+
+ struct list_head head; /* protected by nouveau_abi16.cli.mutex */
+};
+
+static struct nouveau_abi16_obj *
+nouveau_abi16_obj_find(struct nouveau_abi16 *abi16, u64 object)
+{
+ struct nouveau_abi16_obj *obj;
+
+ list_for_each_entry(obj, &abi16->objects, head) {
+ if (obj->object == object)
+ return obj;
+ }
+
+ return NULL;
+}
+
+static void
+nouveau_abi16_obj_del(struct nouveau_abi16_obj *obj)
+{
+ list_del(&obj->head);
+ kfree(obj);
+}
+
+static struct nouveau_abi16_obj *
+nouveau_abi16_obj_new(struct nouveau_abi16 *abi16, enum nouveau_abi16_obj_type type, u64 object)
+{
+ struct nouveau_abi16_obj *obj;
+
+ obj = nouveau_abi16_obj_find(abi16, object);
+ if (obj)
+ return ERR_PTR(-EEXIST);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->type = type;
+ obj->object = object;
+ list_add_tail(&obj->head, &abi16->objects);
+ return obj;
+}
+
s32
nouveau_abi16_swclass(struct nouveau_drm *drm)
{
@@ -164,17 +211,20 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
void
nouveau_abi16_fini(struct nouveau_abi16 *abi16)
{
- struct nouveau_cli *cli = (void *)abi16->device.object.client;
+ struct nouveau_cli *cli = abi16->cli;
struct nouveau_abi16_chan *chan, *temp;
+ struct nouveau_abi16_obj *obj, *tmp;
+
+ /* cleanup objects */
+ list_for_each_entry_safe(obj, tmp, &abi16->objects, head) {
+ nouveau_abi16_obj_del(obj);
+ }
/* cleanup channels */
list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
nouveau_abi16_chan_fini(abi16, chan);
}
- /* destroy the device object */
- nvif_device_dtor(&abi16->device);
-
kfree(cli->abi16);
cli->abi16 = NULL;
}
@@ -199,8 +249,8 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
- struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
- struct nvkm_gr *gr = nvxx_gr(device);
+ struct nvkm_device *nvkm_device = nvxx_device(drm);
+ struct nvkm_gr *gr = nvxx_gr(drm);
struct drm_nouveau_getparam *getparam = data;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -291,7 +341,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
- struct nvif_device *device;
+ struct nvif_device *device = &cli->device;
u64 engine, runm;
int ret;
@@ -308,7 +358,6 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
*/
__nouveau_cli_disable_uvmm_noinit(cli);
- device = &abi16->device;
engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
/* hack to allow channel engine type specification on kepler */
@@ -356,7 +405,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
list_add(&chan->head, &abi16->channels);
/* create channel object and initialise dma and fence management */
- ret = nouveau_channel_new(drm, device, false, runm, init->fb_ctxdma_handle,
+ ret = nouveau_channel_new(cli, false, runm, init->fb_ctxdma_handle,
init->tt_ctxdma_handle, &chan->chan);
if (ret)
goto done;
@@ -458,44 +507,6 @@ nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
}
int
-nouveau_abi16_usif(struct drm_file *file_priv, void *data, u32 size)
-{
- union {
- struct nvif_ioctl_v0 v0;
- } *args = data;
- struct nouveau_abi16_chan *chan;
- struct nouveau_abi16 *abi16;
- int ret = -ENOSYS;
-
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- switch (args->v0.type) {
- case NVIF_IOCTL_V0_NEW:
- case NVIF_IOCTL_V0_MTHD:
- case NVIF_IOCTL_V0_SCLASS:
- break;
- default:
- return -EACCES;
- }
- } else
- return ret;
-
- if (!(abi16 = nouveau_abi16(file_priv)))
- return -ENOMEM;
-
- if (args->v0.token != ~0ULL) {
- if (!(chan = nouveau_abi16_chan(abi16, args->v0.token)))
- return -EINVAL;
- args->v0.object = nvif_handle(&chan->chan->user);
- args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
- return 0;
- }
-
- args->v0.object = nvif_handle(&abi16->device.object);
- args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
- return 0;
-}
-
-int
nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_channel_free *req = data;
@@ -519,7 +530,6 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
- struct nvif_client *client;
struct nvif_sclass *sclass;
s32 oclass = 0;
int ret, i;
@@ -529,7 +539,6 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
if (init->handle == ~0)
return nouveau_abi16_put(abi16, -EINVAL);
- client = abi16->device.object.client;
chan = nouveau_abi16_chan(abi16, init->channel);
if (!chan)
@@ -594,10 +603,8 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
list_add(&ntfy->head, &chan->notifiers);
- client->route = NVDRM_OBJECT_ABI16;
ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
oclass, NULL, 0, &ntfy->object);
- client->route = NVDRM_OBJECT_NVIF;
if (ret)
nouveau_abi16_ntfy_fini(chan, ntfy);
@@ -612,18 +619,17 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
- struct nvif_device *device = &abi16->device;
- struct nvif_client *client;
+ struct nvif_device *device;
struct nv_dma_v0 args = {};
int ret;
if (unlikely(!abi16))
return -ENOMEM;
+ device = &abi16->cli->device;
/* completely unnecessary for these chipsets... */
if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
return nouveau_abi16_put(abi16, -EINVAL);
- client = abi16->device.object.client;
chan = nouveau_abi16_chan(abi16, info->channel);
if (!chan)
@@ -660,11 +666,9 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
args.limit += chan->ntfy->offset;
}
- client->route = NVDRM_OBJECT_ABI16;
ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
NV_DMA_IN_MEMORY, &args, sizeof(args),
&ntfy->object);
- client->route = NVDRM_OBJECT_NVIF;
if (ret)
goto done;
@@ -704,3 +708,183 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
return nouveau_abi16_put(abi16, ret);
}
+
+static int
+nouveau_abi16_ioctl_mthd(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
+{
+ struct nouveau_cli *cli = abi16->cli;
+ struct nvif_ioctl_mthd_v0 *args;
+ struct nouveau_abi16_obj *obj;
+ struct nv_device_info_v0 *info;
+
+ if (ioctl->route || argc < sizeof(*args))
+ return -EINVAL;
+ args = (void *)ioctl->data;
+ argc -= sizeof(*args);
+
+ obj = nouveau_abi16_obj_find(abi16, ioctl->object);
+ if (!obj || obj->type != DEVICE)
+ return -EINVAL;
+
+ if (args->method != NV_DEVICE_V0_INFO ||
+ argc != sizeof(*info))
+ return -EINVAL;
+
+ info = (void *)args->data;
+ if (info->version != 0x00)
+ return -EINVAL;
+
+ info = &cli->device.info;
+ memcpy(args->data, info, sizeof(*info));
+ return 0;
+}
+
+static int
+nouveau_abi16_ioctl_del(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
+{
+ struct nouveau_abi16_obj *obj;
+
+ if (ioctl->route || argc)
+ return -EINVAL;
+
+ obj = nouveau_abi16_obj_find(abi16, ioctl->object);
+ if (obj) {
+ if (obj->type == ENGOBJ)
+ nvif_object_dtor(&obj->engobj);
+ nouveau_abi16_obj_del(obj);
+ }
+
+ return 0;
+}
+
+static int
+nouveau_abi16_ioctl_new(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
+{
+ struct nvif_ioctl_new_v0 *args;
+ struct nouveau_abi16_chan *chan;
+ struct nouveau_abi16_obj *obj;
+ int ret;
+
+ if (argc < sizeof(*args))
+ return -EINVAL;
+ args = (void *)ioctl->data;
+ argc -= sizeof(*args);
+
+ if (args->version != 0)
+ return -EINVAL;
+
+ if (!ioctl->route) {
+ if (ioctl->object || args->oclass != NV_DEVICE)
+ return -EINVAL;
+
+ obj = nouveau_abi16_obj_new(abi16, DEVICE, args->object);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ return 0;
+ }
+
+ chan = nouveau_abi16_chan(abi16, ioctl->token);
+ if (!chan)
+ return -EINVAL;
+
+ obj = nouveau_abi16_obj_new(abi16, ENGOBJ, args->object);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", args->handle, args->oclass,
+ NULL, 0, &obj->engobj);
+ if (ret)
+ nouveau_abi16_obj_del(obj);
+
+ return ret;
+}
+
+static int
+nouveau_abi16_ioctl_sclass(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
+{
+ struct nvif_ioctl_sclass_v0 *args;
+ struct nouveau_abi16_chan *chan;
+ struct nvif_sclass *sclass;
+ int ret;
+
+ if (!ioctl->route || argc < sizeof(*args))
+ return -EINVAL;
+ args = (void *)ioctl->data;
+ argc -= sizeof(*args);
+
+ if (argc != args->count * sizeof(args->oclass[0]))
+ return -EINVAL;
+
+ chan = nouveau_abi16_chan(abi16, ioctl->token);
+ if (!chan)
+ return -EINVAL;
+
+ ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
+ if (ret < 0)
+ return ret;
+
+ for (int i = 0; i < min_t(u8, args->count, ret); i++) {
+ args->oclass[i].oclass = sclass[i].oclass;
+ args->oclass[i].minver = sclass[i].minver;
+ args->oclass[i].maxver = sclass[i].maxver;
+ }
+ args->count = ret;
+
+ nvif_object_sclass_put(&sclass);
+ return 0;
+}
+
+int
+nouveau_abi16_ioctl(struct drm_file *filp, void __user *user, u32 size)
+{
+ struct nvif_ioctl_v0 *ioctl;
+ struct nouveau_abi16 *abi16;
+ u32 argc = size;
+ int ret;
+
+ if (argc < sizeof(*ioctl))
+ return -EINVAL;
+ argc -= sizeof(*ioctl);
+
+ ioctl = kmalloc(size, GFP_KERNEL);
+ if (!ioctl)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(ioctl, user, size))
+ goto done_free;
+
+ if (ioctl->version != 0x00 ||
+ (ioctl->route && ioctl->route != 0xff)) {
+ ret = -EINVAL;
+ goto done_free;
+ }
+
+ abi16 = nouveau_abi16_get(filp);
+ if (unlikely(!abi16)) {
+ ret = -ENOMEM;
+ goto done_free;
+ }
+
+ switch (ioctl->type) {
+ case NVIF_IOCTL_V0_SCLASS: ret = nouveau_abi16_ioctl_sclass(abi16, ioctl, argc); break;
+ case NVIF_IOCTL_V0_NEW : ret = nouveau_abi16_ioctl_new (abi16, ioctl, argc); break;
+ case NVIF_IOCTL_V0_DEL : ret = nouveau_abi16_ioctl_del (abi16, ioctl, argc); break;
+ case NVIF_IOCTL_V0_MTHD : ret = nouveau_abi16_ioctl_mthd (abi16, ioctl, argc); break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ nouveau_abi16_put(abi16, 0);
+
+ if (ret == 0) {
+ if (copy_to_user(user, ioctl, size))
+ ret = -EFAULT;
+ }
+
+done_free:
+ kfree(ioctl);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 661b901d8ecc..af6b4e1cefd2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -30,16 +30,16 @@ struct nouveau_abi16_chan {
};
struct nouveau_abi16 {
- struct nvif_device device;
+ struct nouveau_cli *cli;
struct list_head channels;
- u64 handles;
+ struct list_head objects;
};
struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *);
int nouveau_abi16_put(struct nouveau_abi16 *, int);
void nouveau_abi16_fini(struct nouveau_abi16 *);
s32 nouveau_abi16_swclass(struct nouveau_drm *);
-int nouveau_abi16_usif(struct drm_file *, void *data, u32 size);
+int nouveau_abi16_ioctl(struct drm_file *, void __user *user, u32 size);
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 8c3c1f1e01c5..c8335f5b49db 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2015,7 +2015,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
static bool NVInitVBIOS(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
+ struct nvkm_bios *bios = nvxx_bios(drm);
struct nvbios *legacy = &drm->vbios;
memset(legacy, 0, sizeof(struct nvbios));
@@ -2086,7 +2086,7 @@ nouveau_bios_init(struct drm_device *dev)
/* only relevant for PCI devices */
if (!dev_is_pci(dev->dev) ||
- nvkm_gsp_rm(nvxx_device(&drm->client.device)->gsp))
+ nvkm_gsp_rm(nvxx_device(drm)->gsp))
return 0;
if (!NVInitVBIOS(dev))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 18eb061ccafb..62b5f5889041 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -48,6 +48,7 @@ struct bit_entry {
int bit_table(struct drm_device *, u8 id, struct bit_entry *);
+#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/conn.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 70fb003a6666..db961eade225 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -58,7 +58,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
+ struct nvkm_fb *fb = nvxx_fb(drm);
struct nvkm_fb_tile *tile = &fb->tile.region[i];
nouveau_fence_unref(&reg->fence);
@@ -109,7 +109,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 zeta)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
+ struct nvkm_fb *fb = nvxx_fb(drm);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
@@ -859,7 +859,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
struct nouveau_fence *fence;
int ret;
@@ -1171,7 +1171,7 @@ static int
nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
struct nouveau_mem *mem = nouveau_mem(reg);
struct nvif_mmu *mmu = &drm->client.mmu;
int ret;
@@ -1291,7 +1291,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
int i, ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 4e891752c255..596a63a50a20 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -53,25 +53,10 @@ nouveau_bo(struct ttm_buffer_object *bo)
return container_of(bo, struct nouveau_bo, bo);
}
-static inline int
-nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
+static inline void
+nouveau_bo_fini(struct nouveau_bo *bo)
{
- struct nouveau_bo *prev;
-
- if (!pnvbo)
- return -EINVAL;
- prev = *pnvbo;
-
- if (ref) {
- ttm_bo_get(&ref->bo);
- *pnvbo = nouveau_bo(&ref->bo);
- } else {
- *pnvbo = NULL;
- }
- if (prev)
- ttm_bo_put(&prev->bo);
-
- return 0;
+ ttm_bo_put(&bo->bo);
}
extern struct ttm_device_funcs nouveau_bo_driver;
@@ -115,35 +100,6 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
return ioptr;
}
-static inline void
-nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo)
-{
- if (*pnvbo) {
- nouveau_bo_unmap(*pnvbo);
- nouveau_bo_unpin(*pnvbo);
- nouveau_bo_ref(NULL, pnvbo);
- }
-}
-
-static inline int
-nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 domain,
- struct nouveau_bo **pnvbo)
-{
- int ret = nouveau_bo_new(cli, size, align, domain,
- 0, 0, NULL, NULL, pnvbo);
- if (ret == 0) {
- ret = nouveau_bo_pin(*pnvbo, domain, true);
- if (ret == 0) {
- ret = nouveau_bo_map(*pnvbo);
- if (ret == 0)
- return ret;
- nouveau_bo_unpin(*pnvbo);
- }
- nouveau_bo_ref(NULL, pnvbo);
- }
- return ret;
-}
-
int nv04_bo_move_init(struct nouveau_channel *, u32);
int nv04_bo_move_m2mf(struct nouveau_channel *, struct ttm_buffer_object *,
struct ttm_resource *, struct ttm_resource *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo0039.c b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
index e2ce44adaa5c..0b6758e024a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo0039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
@@ -47,7 +47,7 @@ int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
u32 src_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, old_reg);
u32 src_offset = old_reg->start << PAGE_SHIFT;
u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg);
@@ -96,7 +96,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int
nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 4);
@@ -104,6 +104,6 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
return ret;
PUSH_MTHD(push, NV039, SET_OBJECT, handle);
- PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_NOTIFIES, chan->drm->ntfy.handle);
+ PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_NOTIFIES, chan->cli->drm->ntfy.handle);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo5039.c b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
index c6cf3629a9f9..c3de17548d97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo5039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
@@ -40,7 +40,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
u64 length = new_reg->size;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
@@ -136,7 +136,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int
nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 6);
@@ -144,7 +144,7 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
return ret;
PUSH_MTHD(push, NV5039, SET_OBJECT, handle);
- PUSH_MTHD(push, NV5039, SET_CONTEXT_DMA_NOTIFY, chan->drm->ntfy.handle,
+ PUSH_MTHD(push, NV5039, SET_CONTEXT_DMA_NOTIFY, chan->cli->drm->ntfy.handle,
SET_CONTEXT_DMA_BUFFER_IN, chan->vram.handle,
SET_CONTEXT_DMA_BUFFER_OUT, chan->vram.handle);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
index 9b7ba31fae13..e6ef79de2498 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
@@ -37,7 +37,7 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 7);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
index a15a38a87a95..c4861d073ad4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
@@ -41,7 +41,7 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
u32 page_count = PFN_UP(new_reg->size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo9039.c b/drivers/gpu/drm/nouveau/nouveau_bo9039.c
index d2bb2687d401..ad82269c7725 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo9039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo9039.c
@@ -38,7 +38,7 @@ int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
struct nouveau_mem *mem = nouveau_mem(old_reg);
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
@@ -86,7 +86,7 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int
nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 2);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
index 4618f4f5ab56..5eaeef9d25e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
@@ -34,7 +34,7 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
u32 page_count = PFN_UP(new_reg->size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
index 07a5c6302c98..dff2ae0e1e45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
@@ -39,7 +39,7 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 10);
@@ -78,7 +78,7 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int
nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 2);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 7c97b2886807..2cb2e5675807 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -52,7 +52,7 @@ static int
nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
{
struct nouveau_channel *chan = container_of(event, typeof(*chan), kill);
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
@@ -66,7 +66,7 @@ int
nouveau_channel_idle(struct nouveau_channel *chan)
{
if (likely(chan && chan->fence && !atomic_read(&chan->killed))) {
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
struct nouveau_fence *fence = NULL;
int ret;
@@ -78,7 +78,7 @@ nouveau_channel_idle(struct nouveau_channel *chan)
if (ret) {
NV_PRINTK(err, cli, "failed to idle channel %d [%s]\n",
- chan->chid, nvxx_client(&cli->base)->name);
+ chan->chid, cli->name);
return ret;
}
}
@@ -90,12 +90,10 @@ nouveau_channel_del(struct nouveau_channel **pchan)
{
struct nouveau_channel *chan = *pchan;
if (chan) {
- struct nouveau_cli *cli = (void *)chan->user.client;
-
if (chan->fence)
- nouveau_fence(chan->drm)->context_del(chan);
+ nouveau_fence(chan->cli->drm)->context_del(chan);
- if (cli)
+ if (nvif_object_constructed(&chan->user))
nouveau_svmm_part(chan->vmm->svmm, chan->inst);
nvif_object_dtor(&chan->blit);
@@ -110,7 +108,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nouveau_bo_unmap(chan->push.buffer);
if (chan->push.buffer && chan->push.buffer->bo.pin_count)
nouveau_bo_unpin(chan->push.buffer);
- nouveau_bo_ref(NULL, &chan->push.buffer);
+ nouveau_bo_fini(chan->push.buffer);
kfree(chan);
}
*pchan = NULL;
@@ -119,33 +117,34 @@ nouveau_channel_del(struct nouveau_channel **pchan)
static void
nouveau_channel_kick(struct nvif_push *push)
{
- struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
- chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
+ struct nouveau_channel *chan = container_of(push, typeof(*chan), chan.push);
+ chan->dma.cur = chan->dma.cur + (chan->chan.push.cur - chan->chan.push.bgn);
FIRE_RING(chan);
- chan->chan._push.bgn = chan->chan._push.cur;
+ chan->chan.push.bgn = chan->chan.push.cur;
}
static int
nouveau_channel_wait(struct nvif_push *push, u32 size)
{
- struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
+ struct nouveau_channel *chan = container_of(push, typeof(*chan), chan.push);
int ret;
- chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
+ chan->dma.cur = chan->dma.cur + (chan->chan.push.cur - chan->chan.push.bgn);
ret = RING_SPACE(chan, size);
if (ret == 0) {
- chan->chan._push.bgn = chan->chan._push.mem.object.map.ptr;
- chan->chan._push.bgn = chan->chan._push.bgn + chan->dma.cur;
- chan->chan._push.cur = chan->chan._push.bgn;
- chan->chan._push.end = chan->chan._push.bgn + size;
+ chan->chan.push.bgn = chan->chan.push.mem.object.map.ptr;
+ chan->chan.push.bgn = chan->chan.push.bgn + chan->dma.cur;
+ chan->chan.push.cur = chan->chan.push.bgn;
+ chan->chan.push.end = chan->chan.push.bgn + size;
}
return ret;
}
static int
-nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
+nouveau_channel_prep(struct nouveau_cli *cli,
u32 size, struct nouveau_channel **pchan)
{
- struct nouveau_cli *cli = (void *)device->object.client;
+ struct nouveau_drm *drm = cli->drm;
+ struct nvif_device *device = &cli->device;
struct nv_dma_v0 args = {};
struct nouveau_channel *chan;
u32 target;
@@ -155,8 +154,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
if (!chan)
return -ENOMEM;
- chan->device = device;
- chan->drm = drm;
+ chan->cli = cli;
chan->vmm = nouveau_cli_vmm(cli);
atomic_set(&chan->killed, 0);
@@ -178,13 +176,12 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
return ret;
}
- chan->chan._push.mem.object.parent = cli->base.object.parent;
- chan->chan._push.mem.object.client = &cli->base;
- chan->chan._push.mem.object.name = "chanPush";
- chan->chan._push.mem.object.map.ptr = chan->push.buffer->kmap.virtual;
- chan->chan._push.wait = nouveau_channel_wait;
- chan->chan._push.kick = nouveau_channel_kick;
- chan->chan.push = &chan->chan._push;
+ chan->chan.push.mem.object.parent = cli->base.object.parent;
+ chan->chan.push.mem.object.client = &cli->base;
+ chan->chan.push.mem.object.name = "chanPush";
+ chan->chan.push.mem.object.map.ptr = chan->push.buffer->kmap.virtual;
+ chan->chan.push.wait = nouveau_channel_wait;
+ chan->chan.push.kick = nouveau_channel_kick;
/* create dma object covering the *entire* memory space that the
* pushbuf lives in, this is because the GEM code requires that
@@ -218,8 +215,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
*/
args.target = NV_DMA_V0_TARGET_PCI;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = nvxx_device(device)->func->
- resource_addr(nvxx_device(device), 1);
+ args.start = nvxx_device(drm)->func->resource_addr(nvxx_device(drm), 1);
args.limit = args.start + device->info.ram_user - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
@@ -228,12 +224,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.limit = device->info.ram_user - 1;
}
} else {
- if (chan->drm->agp.bridge) {
+ if (drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = chan->drm->agp.base;
- args.limit = chan->drm->agp.base +
- chan->drm->agp.size - 1;
+ args.start = drm->agp.base;
+ args.limit = drm->agp.base + drm->agp.size - 1;
} else {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
@@ -254,7 +249,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
}
static int
-nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool priv, u64 runm,
+nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
struct nouveau_channel **pchan)
{
const struct nvif_mclass hosts[] = {
@@ -279,7 +274,7 @@ nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool p
struct nvif_chan_v0 chan;
char name[TASK_COMM_LEN+16];
} args;
- struct nouveau_cli *cli = (void *)device->object.client;
+ struct nvif_device *device = &cli->device;
struct nouveau_channel *chan;
const u64 plength = 0x10000;
const u64 ioffset = plength;
@@ -298,7 +293,7 @@ nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool p
size = ioffset + ilength;
/* allocate dma push buffer */
- ret = nouveau_channel_prep(drm, device, size, &chan);
+ ret = nouveau_channel_prep(cli, size, &chan);
*pchan = chan;
if (ret)
return ret;
@@ -363,8 +358,9 @@ nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool p
static int
nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
- struct nvif_device *device = chan->device;
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_cli *cli = chan->cli;
+ struct nouveau_drm *drm = cli->drm;
+ struct nvif_device *device = &cli->device;
struct nv_dma_v0 args = {};
int ret, i;
@@ -419,12 +415,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.start = 0;
args.limit = chan->vmm->vmm.limit - 1;
} else
- if (chan->drm->agp.bridge) {
+ if (drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = chan->drm->agp.base;
- args.limit = chan->drm->agp.base +
- chan->drm->agp.size - 1;
+ args.start = drm->agp.base;
+ args.limit = drm->agp.base + drm->agp.size - 1;
} else {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
@@ -465,12 +460,12 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
chan->dma.cur = chan->dma.put;
chan->dma.free = chan->dma.max - chan->dma.cur;
- ret = PUSH_WAIT(chan->chan.push, NOUVEAU_DMA_SKIPS);
+ ret = PUSH_WAIT(&chan->chan.push, NOUVEAU_DMA_SKIPS);
if (ret)
return ret;
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
- PUSH_DATA(chan->chan.push, 0x00000000);
+ PUSH_DATA(&chan->chan.push, 0x00000000);
/* allocate software object class (used for fences on <= nv05) */
if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
@@ -480,26 +475,25 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
if (ret)
return ret;
- ret = PUSH_WAIT(chan->chan.push, 2);
+ ret = PUSH_WAIT(&chan->chan.push, 2);
if (ret)
return ret;
- PUSH_NVSQ(chan->chan.push, NV_SW, 0x0000, chan->nvsw.handle);
- PUSH_KICK(chan->chan.push);
+ PUSH_NVSQ(&chan->chan.push, NV_SW, 0x0000, chan->nvsw.handle);
+ PUSH_KICK(&chan->chan.push);
}
/* initialise synchronisation */
- return nouveau_fence(chan->drm)->context_new(chan);
+ return nouveau_fence(drm)->context_new(chan);
}
int
-nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
+nouveau_channel_new(struct nouveau_cli *cli,
bool priv, u64 runm, u32 vram, u32 gart, struct nouveau_channel **pchan)
{
- struct nouveau_cli *cli = (void *)device->object.client;
int ret;
- ret = nouveau_channel_ctor(drm, device, priv, runm, pchan);
+ ret = nouveau_channel_ctor(cli, priv, runm, pchan);
if (ret) {
NV_PRINTK(dbg, cli, "channel create, %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 5de2ef4e98c2..016f668c0bc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -8,12 +8,10 @@ struct nvif_device;
struct nouveau_channel {
struct {
- struct nvif_push _push;
- struct nvif_push *push;
+ struct nvif_push push;
} chan;
- struct nvif_device *device;
- struct nouveau_drm *drm;
+ struct nouveau_cli *cli;
struct nouveau_vmm *vmm;
struct nvif_mem mem_userd;
@@ -62,7 +60,7 @@ struct nouveau_channel {
int nouveau_channels_init(struct nouveau_drm *);
void nouveau_channels_fini(struct nouveau_drm *);
-int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, bool priv, u64 runm,
+int nouveau_channel_new(struct nouveau_cli *, bool priv, u64 runm,
u32 vram, u32 gart, struct nouveau_channel **);
void nouveau_channel_del(struct nouveau_channel **);
int nouveau_channel_idle(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index d4725a968827..e2fd561cd23f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -391,7 +391,6 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
};
@@ -446,10 +445,8 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
} while(0)
void
-nouveau_display_hpd_resume(struct drm_device *dev)
+nouveau_display_hpd_resume(struct nouveau_drm *drm)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
-
if (drm->headless)
return;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 2ab2ddb1eadf..1f506f8b289c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -45,7 +45,7 @@ nouveau_display(struct drm_device *dev)
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev, bool resume, bool runtime);
-void nouveau_display_hpd_resume(struct drm_device *dev);
+void nouveau_display_hpd_resume(struct nouveau_drm *);
void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index b01c029f3a90..a1f329ef0641 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -72,7 +72,7 @@ void
nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length,
bool no_prefetch)
{
- struct nvif_user *user = &chan->drm->client.device.user;
+ struct nvif_user *user = &chan->cli->drm->client.device.user;
struct nouveau_bo *pb = chan->push.buffer;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 6fb65b01d778..1f2d649f4b96 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -294,7 +294,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
out_bo_unpin:
nouveau_bo_unpin(chunk->bo);
out_bo_free:
- nouveau_bo_ref(NULL, &chunk->bo);
+ nouveau_bo_fini(chunk->bo);
out_release:
release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
out_free:
@@ -426,7 +426,7 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
nouveau_dmem_evict_chunk(chunk);
nouveau_bo_unpin(chunk->bo);
- nouveau_bo_ref(NULL, &chunk->bo);
+ nouveau_bo_fini(chunk->bo);
WARN_ON(chunk->callocated);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
@@ -443,7 +443,7 @@ nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
enum nouveau_aper dst_aper, u64 dst_addr,
enum nouveau_aper src_aper, u64 src_addr)
{
- struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
+ struct nvif_push *push = &drm->dmem->migrate.chan->chan.push;
u32 launch_dma = 0;
int ret;
@@ -516,7 +516,7 @@ static int
nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
enum nouveau_aper dst_aper, u64 dst_addr)
{
- struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
+ struct nvif_push *push = &drm->dmem->migrate.chan->chan.push;
u32 launch_dma = 0;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index a58c31089613..f6e78dba594f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -63,7 +63,6 @@
#include "nouveau_abi16.h"
#include "nouveau_fence.h"
#include "nouveau_debugfs.h"
-#include "nouveau_usif.h"
#include "nouveau_connector.h"
#include "nouveau_platform.h"
#include "nouveau_svm.h"
@@ -200,7 +199,6 @@ nouveau_cli_fini(struct nouveau_cli *cli)
flush_work(&cli->work);
WARN_ON(!list_empty(&cli->worker));
- usif_client_fini(cli);
if (cli->sched)
nouveau_sched_destroy(&cli->sched);
if (uvmm)
@@ -208,10 +206,11 @@ nouveau_cli_fini(struct nouveau_cli *cli)
nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
nvif_mmu_dtor(&cli->mmu);
+ cli->device.object.map.ptr = NULL;
nvif_device_dtor(&cli->device);
- mutex_lock(&cli->drm->master.lock);
+ mutex_lock(&cli->drm->client_mutex);
nvif_client_dtor(&cli->base);
- mutex_unlock(&cli->drm->master.lock);
+ mutex_unlock(&cli->drm->client_mutex);
}
static int
@@ -226,13 +225,6 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
{}
};
static const struct nvif_mclass
- mmus[] = {
- { NVIF_CLASS_MMU_GF100, -1 },
- { NVIF_CLASS_MMU_NV50 , -1 },
- { NVIF_CLASS_MMU_NV04 , -1 },
- {}
- };
- static const struct nvif_mclass
vmms[] = {
{ NVIF_CLASS_VMM_GP100, -1 },
{ NVIF_CLASS_VMM_GM200, -1 },
@@ -241,50 +233,33 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
{ NVIF_CLASS_VMM_NV04 , -1 },
{}
};
- u64 device = nouveau_name(drm->dev);
int ret;
snprintf(cli->name, sizeof(cli->name), "%s", sname);
cli->drm = drm;
mutex_init(&cli->mutex);
- usif_client_init(cli);
INIT_WORK(&cli->work, nouveau_cli_work);
INIT_LIST_HEAD(&cli->worker);
mutex_init(&cli->lock);
- if (cli == &drm->master) {
- ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
- cli->name, device, &cli->base);
- } else {
- mutex_lock(&drm->master.lock);
- ret = nvif_client_ctor(&drm->master.base, cli->name, device,
- &cli->base);
- mutex_unlock(&drm->master.lock);
- }
+ mutex_lock(&drm->client_mutex);
+ ret = nvif_client_ctor(&drm->_client, cli->name, &cli->base);
+ mutex_unlock(&drm->client_mutex);
if (ret) {
NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
goto done;
}
- ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
- &(struct nv_device_v0) {
- .device = ~0,
- .priv = true,
- }, sizeof(struct nv_device_v0),
- &cli->device);
+ ret = nvif_device_ctor(&cli->base, "drmDevice", &cli->device);
if (ret) {
NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
goto done;
}
- ret = nvif_mclass(&cli->device.object, mmus);
- if (ret < 0) {
- NV_PRINTK(err, cli, "No supported MMU class\n");
- goto done;
- }
+ cli->device.object.map.ptr = drm->device.object.map.ptr;
- ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", mmus[ret].oclass,
+ ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", drm->mmu.object.oclass,
&cli->mmu);
if (ret) {
NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
@@ -356,7 +331,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm)
return;
}
- ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
+ ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
}
@@ -384,7 +359,7 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
return;
}
- ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
+ ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
if (ret) {
NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
nouveau_accel_gr_fini(drm);
@@ -407,7 +382,8 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
}
if (ret == 0) {
- struct nvif_push *push = drm->channel->chan.push;
+ struct nvif_push *push = &drm->channel->chan.push;
+
ret = PUSH_WAIT(push, 8);
if (ret == 0) {
if (device->info.chipset >= 0x11) {
@@ -432,8 +408,7 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
* any GPU where it's possible we'll end up using M2MF for BO moves.
*/
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
- ret = nvkm_gpuobj_new(nvxx_device(device), 32, 0, false, NULL,
- &drm->notify);
+ ret = nvkm_gpuobj_new(nvxx_device(drm), 32, 0, false, NULL, &drm->notify);
if (ret) {
NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
nouveau_accel_gr_fini(drm);
@@ -578,37 +553,70 @@ nouveau_parent = {
.errorf = nouveau_drm_errorf,
};
-static int
-nouveau_drm_device_init(struct drm_device *dev)
+static void
+nouveau_drm_device_fini(struct nouveau_drm *drm)
{
- struct nouveau_drm *drm;
- int ret;
+ struct drm_device *dev = drm->dev;
+ struct nouveau_cli *cli, *temp_cli;
- if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL)))
- return -ENOMEM;
- dev->dev_private = drm;
- drm->dev = dev;
+ if (nouveau_pmops_runtime()) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
- nvif_parent_ctor(&nouveau_parent, &drm->parent);
- drm->master.base.object.parent = &drm->parent;
+ nouveau_led_fini(dev);
+ nouveau_dmem_fini(drm);
+ nouveau_svm_fini(drm);
+ nouveau_hwmon_fini(dev);
+ nouveau_debugfs_fini(drm);
- drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0,
- WQ_MAX_ACTIVE);
- if (!drm->sched_wq) {
- ret = -ENOMEM;
- goto fail_alloc;
+ if (dev->mode_config.num_crtc)
+ nouveau_display_fini(dev, false, false);
+ nouveau_display_destroy(dev);
+
+ nouveau_accel_fini(drm);
+ nouveau_bios_takedown(dev);
+
+ nouveau_ttm_fini(drm);
+ nouveau_vga_fini(drm);
+
+ /*
+ * There may be existing clients from as-yet unclosed files. For now,
+ * clean them up here rather than deferring until the file is closed,
+ * but this likely not correct if we want to support hot-unplugging
+ * properly.
+ */
+ mutex_lock(&drm->clients_lock);
+ list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) {
+ list_del(&cli->head);
+ mutex_lock(&cli->mutex);
+ if (cli->abi16)
+ nouveau_abi16_fini(cli->abi16);
+ mutex_unlock(&cli->mutex);
+ nouveau_cli_fini(cli);
+ kfree(cli);
}
+ mutex_unlock(&drm->clients_lock);
- ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
- if (ret)
- goto fail_wq;
+ nouveau_cli_fini(&drm->client);
+ destroy_workqueue(drm->sched_wq);
+ mutex_destroy(&drm->clients_lock);
+}
+
+static int
+nouveau_drm_device_init(struct nouveau_drm *drm)
+{
+ struct drm_device *dev = drm->dev;
+ int ret;
+
+ drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0,
+ WQ_MAX_ACTIVE);
+ if (!drm->sched_wq)
+ return -ENOMEM;
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
- goto fail_master;
-
- nvxx_client(&drm->client.base)->debug =
- nvkm_dbgopt(nouveau_debug, "DRM");
+ goto fail_wq;
INIT_LIST_HEAD(&drm->clients);
mutex_init(&drm->clients_lock);
@@ -658,6 +666,12 @@ nouveau_drm_device_init(struct drm_device *dev)
pm_runtime_put(dev->dev);
}
+ ret = drm_dev_register(drm->dev, 0);
+ if (ret) {
+ nouveau_drm_device_fini(drm);
+ return ret;
+ }
+
return 0;
fail_dispinit:
nouveau_display_destroy(dev);
@@ -669,67 +683,95 @@ fail_bios:
fail_ttm:
nouveau_vga_fini(drm);
nouveau_cli_fini(&drm->client);
-fail_master:
- nouveau_cli_fini(&drm->master);
fail_wq:
destroy_workqueue(drm->sched_wq);
-fail_alloc:
- nvif_parent_dtor(&drm->parent);
- kfree(drm);
return ret;
}
static void
-nouveau_drm_device_fini(struct drm_device *dev)
+nouveau_drm_device_del(struct nouveau_drm *drm)
{
- struct nouveau_cli *cli, *temp_cli;
- struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->dev)
+ drm_dev_put(drm->dev);
- if (nouveau_pmops_runtime()) {
- pm_runtime_get_sync(dev->dev);
- pm_runtime_forbid(dev->dev);
+ nvif_mmu_dtor(&drm->mmu);
+ nvif_device_dtor(&drm->device);
+ nvif_client_dtor(&drm->_client);
+ nvif_parent_dtor(&drm->parent);
+
+ mutex_destroy(&drm->client_mutex);
+ kfree(drm);
+}
+
+static struct nouveau_drm *
+nouveau_drm_device_new(const struct drm_driver *drm_driver, struct device *parent,
+ struct nvkm_device *device)
+{
+ static const struct nvif_mclass
+ mmus[] = {
+ { NVIF_CLASS_MMU_GF100, -1 },
+ { NVIF_CLASS_MMU_NV50 , -1 },
+ { NVIF_CLASS_MMU_NV04 , -1 },
+ {}
+ };
+ struct nouveau_drm *drm;
+ int ret;
+
+ drm = kzalloc(sizeof(*drm), GFP_KERNEL);
+ if (!drm)
+ return ERR_PTR(-ENOMEM);
+
+ drm->nvkm = device;
+
+ drm->dev = drm_dev_alloc(drm_driver, parent);
+ if (IS_ERR(drm->dev)) {
+ ret = PTR_ERR(drm->dev);
+ goto done;
}
- nouveau_led_fini(dev);
- nouveau_dmem_fini(drm);
- nouveau_svm_fini(drm);
- nouveau_hwmon_fini(dev);
- nouveau_debugfs_fini(drm);
+ drm->dev->dev_private = drm;
+ dev_set_drvdata(parent, drm);
- if (dev->mode_config.num_crtc)
- nouveau_display_fini(dev, false, false);
- nouveau_display_destroy(dev);
+ nvif_parent_ctor(&nouveau_parent, &drm->parent);
+ mutex_init(&drm->client_mutex);
+ drm->_client.object.parent = &drm->parent;
- nouveau_accel_fini(drm);
- nouveau_bios_takedown(dev);
+ ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug, "drm",
+ nouveau_name(drm->dev), &drm->_client);
+ if (ret)
+ goto done;
- nouveau_ttm_fini(drm);
- nouveau_vga_fini(drm);
+ ret = nvif_device_ctor(&drm->_client, "drmDevice", &drm->device);
+ if (ret) {
+ NV_ERROR(drm, "Device allocation failed: %d\n", ret);
+ goto done;
+ }
- /*
- * There may be existing clients from as-yet unclosed files. For now,
- * clean them up here rather than deferring until the file is closed,
- * but this likely not correct if we want to support hot-unplugging
- * properly.
- */
- mutex_lock(&drm->clients_lock);
- list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) {
- list_del(&cli->head);
- mutex_lock(&cli->mutex);
- if (cli->abi16)
- nouveau_abi16_fini(cli->abi16);
- mutex_unlock(&cli->mutex);
- nouveau_cli_fini(cli);
- kfree(cli);
+ ret = nvif_device_map(&drm->device);
+ if (ret) {
+ NV_ERROR(drm, "Failed to map PRI: %d\n", ret);
+ goto done;
}
- mutex_unlock(&drm->clients_lock);
- nouveau_cli_fini(&drm->client);
- nouveau_cli_fini(&drm->master);
- destroy_workqueue(drm->sched_wq);
- nvif_parent_dtor(&drm->parent);
- mutex_destroy(&drm->clients_lock);
- kfree(drm);
+ ret = nvif_mclass(&drm->device.object, mmus);
+ if (ret < 0) {
+ NV_ERROR(drm, "No supported MMU class\n");
+ goto done;
+ }
+
+ ret = nvif_mmu_ctor(&drm->device.object, "drmMmu", mmus[ret].oclass, &drm->mmu);
+ if (ret) {
+ NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
+ goto done;
+ }
+
+done:
+ if (ret) {
+ nouveau_drm_device_del(drm);
+ drm = NULL;
+ }
+
+ return ret ? ERR_PTR(ret) : drm;
}
/*
@@ -774,8 +816,7 @@ nouveau_drm_device_fini(struct drm_device *dev)
static void quirk_broken_nv_runpm(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
struct pci_dev *bridge = pci_upstream_bridge(pdev);
if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL)
@@ -794,7 +835,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
struct nvkm_device *device;
- struct drm_device *drm_dev;
+ struct nouveau_drm *drm;
int ret;
if (vga_switcheroo_client_probe_defer(pdev))
@@ -803,31 +844,23 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
/* We need to check that the chipset is supported before booting
* fbdev off the hardware, as there's no way to put it back.
*/
- ret = nvkm_device_pci_new(pdev, nouveau_config, "error",
- true, false, 0, &device);
+ ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug, &device);
if (ret)
return ret;
- nvkm_device_del(&device);
-
/* Remove conflicting drivers (vesafb, efifb etc). */
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver_pci);
if (ret)
return ret;
- ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
- true, true, ~0ULL, &device);
- if (ret)
- return ret;
-
pci_set_master(pdev);
if (nouveau_atomic)
driver_pci.driver_features |= DRIVER_ATOMIC;
- drm_dev = drm_dev_alloc(&driver_pci, &pdev->dev);
- if (IS_ERR(drm_dev)) {
- ret = PTR_ERR(drm_dev);
+ drm = nouveau_drm_device_new(&driver_pci, &pdev->dev, device);
+ if (IS_ERR(drm)) {
+ ret = PTR_ERR(drm);
goto fail_nvkm;
}
@@ -835,69 +868,55 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
if (ret)
goto fail_drm;
- pci_set_drvdata(pdev, drm_dev);
-
- ret = nouveau_drm_device_init(drm_dev);
+ ret = nouveau_drm_device_init(drm);
if (ret)
goto fail_pci;
- ret = drm_dev_register(drm_dev, pent->driver_data);
- if (ret)
- goto fail_drm_dev_init;
-
- if (nouveau_drm(drm_dev)->client.device.info.ram_size <= 32 * 1024 * 1024)
- drm_fbdev_ttm_setup(drm_dev, 8);
+ if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
+ drm_fbdev_ttm_setup(drm->dev, 8);
else
- drm_fbdev_ttm_setup(drm_dev, 32);
+ drm_fbdev_ttm_setup(drm->dev, 32);
quirk_broken_nv_runpm(pdev);
return 0;
-fail_drm_dev_init:
- nouveau_drm_device_fini(drm_dev);
fail_pci:
pci_disable_device(pdev);
fail_drm:
- drm_dev_put(drm_dev);
+ nouveau_drm_device_del(drm);
fail_nvkm:
nvkm_device_del(&device);
return ret;
}
void
-nouveau_drm_device_remove(struct drm_device *dev)
+nouveau_drm_device_remove(struct nouveau_drm *drm)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_client *client;
- struct nvkm_device *device;
+ struct nvkm_device *device = drm->nvkm;
- drm_dev_unplug(dev);
+ drm_dev_unplug(drm->dev);
- client = nvxx_client(&drm->client.base);
- device = nvkm_device_find(client->device);
-
- nouveau_drm_device_fini(dev);
- drm_dev_put(dev);
+ nouveau_drm_device_fini(drm);
+ nouveau_drm_device_del(drm);
nvkm_device_del(&device);
}
static void
nouveau_drm_remove(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
/* revert our workaround */
if (drm->old_pm_cap)
pdev->pm_cap = drm->old_pm_cap;
- nouveau_drm_device_remove(dev);
+ nouveau_drm_device_remove(drm);
pci_disable_device(pdev);
}
static int
-nouveau_do_suspend(struct drm_device *dev, bool runtime)
+nouveau_do_suspend(struct nouveau_drm *drm, bool runtime)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct drm_device *dev = drm->dev;
struct ttm_resource_manager *man;
int ret;
@@ -939,7 +958,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
}
NV_DEBUG(drm, "suspending object tree...\n");
- ret = nvif_client_suspend(&drm->master.base);
+ ret = nvif_client_suspend(&drm->_client);
if (ret)
goto fail_client;
@@ -958,13 +977,13 @@ fail_display:
}
static int
-nouveau_do_resume(struct drm_device *dev, bool runtime)
+nouveau_do_resume(struct nouveau_drm *drm, bool runtime)
{
+ struct drm_device *dev = drm->dev;
int ret = 0;
- struct nouveau_drm *drm = nouveau_drm(dev);
NV_DEBUG(drm, "resuming object tree...\n");
- ret = nvif_client_resume(&drm->master.base);
+ ret = nvif_client_resume(&drm->_client);
if (ret) {
NV_ERROR(drm, "Client resume failed with error: %d\n", ret);
return ret;
@@ -991,14 +1010,14 @@ int
nouveau_pmops_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
int ret;
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
- ret = nouveau_do_suspend(drm_dev, false);
+ ret = nouveau_do_suspend(drm, false);
if (ret)
return ret;
@@ -1013,11 +1032,11 @@ int
nouveau_pmops_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
int ret;
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
pci_set_power_state(pdev, PCI_D0);
@@ -1027,10 +1046,10 @@ nouveau_pmops_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- ret = nouveau_do_resume(drm_dev, false);
+ ret = nouveau_do_resume(drm, false);
/* Monitors may have been connected / disconnected during suspend */
- nouveau_display_hpd_resume(drm_dev);
+ nouveau_display_hpd_resume(drm);
return ret;
}
@@ -1038,17 +1057,17 @@ nouveau_pmops_resume(struct device *dev)
static int
nouveau_pmops_freeze(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return nouveau_do_suspend(drm_dev, false);
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+
+ return nouveau_do_suspend(drm, false);
}
static int
nouveau_pmops_thaw(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return nouveau_do_resume(drm_dev, false);
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+
+ return nouveau_do_resume(drm, false);
}
bool
@@ -1063,7 +1082,7 @@ static int
nouveau_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
int ret;
if (!nouveau_pmops_runtime()) {
@@ -1072,12 +1091,12 @@ nouveau_pmops_runtime_suspend(struct device *dev)
}
nouveau_switcheroo_optimus_dsm();
- ret = nouveau_do_suspend(drm_dev, true);
+ ret = nouveau_do_suspend(drm, true);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold);
- drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ drm->dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return ret;
}
@@ -1085,9 +1104,8 @@ static int
nouveau_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
+ struct nvif_device *device = &drm->client.device;
int ret;
if (!nouveau_pmops_runtime()) {
@@ -1102,7 +1120,7 @@ nouveau_pmops_runtime_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- ret = nouveau_do_resume(drm_dev, true);
+ ret = nouveau_do_resume(drm, true);
if (ret) {
NV_ERROR(drm, "resume failed with: %d\n", ret);
return ret;
@@ -1110,10 +1128,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
- drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ drm->dev->switch_power_state = DRM_SWITCH_POWER_ON;
/* Monitors may have been connected / disconnected during suspend */
- nouveau_display_hpd_resume(drm_dev);
+ nouveau_display_hpd_resume(drm);
return ret;
}
@@ -1249,7 +1267,7 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
case DRM_NOUVEAU_NVIF:
- ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
+ ret = nouveau_abi16_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
break;
default:
ret = drm_ioctl(file, cmd, arg);
@@ -1274,6 +1292,7 @@ nouveau_driver_fops = {
.compat_ioctl = nouveau_compat_ioctl,
#endif
.llseek = noop_llseek,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
static struct drm_driver
@@ -1285,7 +1304,6 @@ driver_stub = {
DRIVER_RENDER,
.open = nouveau_drm_open,
.postclose = nouveau_drm_postclose,
- .lastclose = nouveau_vga_lastclose,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = nouveau_drm_debugfs_init,
@@ -1369,15 +1387,14 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
struct platform_device *pdev,
struct nvkm_device **pdevice)
{
- struct drm_device *drm;
+ struct nouveau_drm *drm;
int err;
- err = nvkm_device_tegra_new(func, pdev, nouveau_config, nouveau_debug,
- true, true, ~0ULL, pdevice);
+ err = nvkm_device_tegra_new(func, pdev, nouveau_config, nouveau_debug, pdevice);
if (err)
goto err_free;
- drm = drm_dev_alloc(&driver_platform, &pdev->dev);
+ drm = nouveau_drm_device_new(&driver_platform, &pdev->dev, *pdevice);
if (IS_ERR(drm)) {
err = PTR_ERR(drm);
goto err_free;
@@ -1387,12 +1404,10 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
if (err)
goto err_put;
- platform_set_drvdata(pdev, drm);
-
- return drm;
+ return drm->dev;
err_put:
- drm_dev_put(drm);
+ nouveau_drm_device_del(drm);
err_free:
nvkm_device_del(pdevice);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 25fca98a20bc..685d6ca3d8aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -201,8 +201,13 @@ u_memcpya(uint64_t user, unsigned int nmemb, unsigned int size)
#include <nvif/parent.h>
struct nouveau_drm {
+ struct nvkm_device *nvkm;
struct nvif_parent parent;
- struct nouveau_cli master;
+ struct mutex client_mutex;
+ struct nvif_client _client;
+ struct nvif_device device;
+ struct nvif_mmu mmu;
+
struct nouveau_cli client;
struct drm_device *dev;
@@ -326,25 +331,28 @@ bool nouveau_pmops_runtime(void);
struct drm_device *
nouveau_platform_device_create(const struct nvkm_device_tegra_func *,
struct platform_device *, struct nvkm_device **);
-void nouveau_drm_device_remove(struct drm_device *dev);
+void nouveau_drm_device_remove(struct nouveau_drm *);
#define NV_PRINTK(l,c,f,a...) do { \
struct nouveau_cli *_cli = (c); \
dev_##l(_cli->drm->dev->dev, "%s: "f, _cli->name, ##a); \
} while(0)
-#define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
-#define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
-#define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a)
-#define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
+#define NV_PRINTK_(l,drm,f,a...) do { \
+ dev_##l((drm)->nvkm->dev, "drm: "f, ##a); \
+} while(0)
+#define NV_FATAL(drm,f,a...) NV_PRINTK_(crit, (drm), f, ##a)
+#define NV_ERROR(drm,f,a...) NV_PRINTK_(err, (drm), f, ##a)
+#define NV_WARN(drm,f,a...) NV_PRINTK_(warn, (drm), f, ##a)
+#define NV_INFO(drm,f,a...) NV_PRINTK_(info, (drm), f, ##a)
#define NV_DEBUG(drm,f,a...) do { \
if (drm_debug_enabled(DRM_UT_DRIVER)) \
- NV_PRINTK(info, &(drm)->client, f, ##a); \
+ NV_PRINTK_(info, (drm), f, ##a); \
} while(0)
#define NV_ATOMIC(drm,f,a...) do { \
if (drm_debug_enabled(DRM_UT_ATOMIC)) \
- NV_PRINTK(info, &(drm)->client, f, ##a); \
+ NV_PRINTK_(info, (drm), f, ##a); \
} while(0)
#define NV_PRINTK_ONCE(l,c,f,a...) NV_PRINTK(l##_once,c,f, ##a)
@@ -355,4 +363,41 @@ void nouveau_drm_device_remove(struct drm_device *dev);
extern int nouveau_modeset;
+/*XXX: Don't use these in new code.
+ *
+ * These accessors are used in a few places (mostly older code paths)
+ * to get direct access to NVKM structures, where a more well-defined
+ * interface doesn't exist. Outside of the current use, these should
+ * not be relied on, and instead be implemented as NVIF.
+ *
+ * This is especially important when considering GSP-RM, as a lot the
+ * modules don't exist, or are "stub" implementations that just allow
+ * the GSP-RM paths to be bootstrapped.
+ */
+#include <subdev/bios.h>
+#include <subdev/fb.h>
+#include <subdev/gpio.h>
+#include <subdev/clk.h>
+#include <subdev/i2c.h>
+#include <subdev/timer.h>
+#include <subdev/therm.h>
+
+static inline struct nvkm_device *
+nvxx_device(struct nouveau_drm *drm)
+{
+ return drm->nvkm;
+}
+
+#define nvxx_bios(a) nvxx_device(a)->bios
+#define nvxx_fb(a) nvxx_device(a)->fb
+#define nvxx_gpio(a) nvxx_device(a)->gpio
+#define nvxx_clk(a) nvxx_device(a)->clk
+#define nvxx_i2c(a) nvxx_device(a)->i2c
+#define nvxx_iccsense(a) nvxx_device(a)->iccsense
+#define nvxx_therm(a) nvxx_device(a)->therm
+#define nvxx_volt(a) nvxx_device(a)->volt
+
+#include <engine/gr.h>
+
+#define nvxx_gr(a) nvxx_device(a)->gr
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 93f08f9479d8..09686d038d60 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -181,8 +181,9 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
void
nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
- struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
+ struct nouveau_drm *drm = cli->drm;
+ struct nouveau_fence_priv *priv = (void*)drm->fence;
struct {
struct nvif_event_v0 base;
struct nvif_chan_event_v0 host;
@@ -193,14 +194,14 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
spin_lock_init(&fctx->lock);
- fctx->context = chan->drm->runl[chan->runlist].context_base + chan->chid;
+ fctx->context = drm->runl[chan->runlist].context_base + chan->chid;
- if (chan == chan->drm->cechan)
+ if (chan == drm->cechan)
strcpy(fctx->name, "copy engine channel");
- else if (chan == chan->drm->channel)
+ else if (chan == drm->channel)
strcpy(fctx->name, "generic kernel channel");
else
- strcpy(fctx->name, nvxx_client(&cli->base)->name);
+ strcpy(fctx->name, cli->name);
kref_init(&fctx->fence_ref);
if (!priv->uevent)
@@ -221,7 +222,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = unrcu_pointer(fence->channel);
struct nouveau_fence_chan *fctx = chan->fence;
- struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
+ struct nouveau_fence_priv *priv = (void*)chan->cli->drm->fence;
int ret;
fence->timeout = jiffies + (15 * HZ);
@@ -382,7 +383,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
if (i == 0 && usage == DMA_RESV_USAGE_WRITE)
continue;
- f = nouveau_local_fence(fence, chan->drm);
+ f = nouveau_local_fence(fence, chan->cli->drm);
if (f) {
struct nouveau_channel *prev;
bool must_wait = true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5a887d67dc0e..9ae2cee1c7c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -567,10 +567,11 @@ retry:
}
static int
-validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
+validate_list(struct nouveau_channel *chan,
struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_cli *cli = chan->cli;
+ struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
int ret, relocs = 0;
@@ -642,7 +643,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return ret;
}
- ret = validate_list(chan, cli, &op->list, pbbo);
+ ret = validate_list(chan, &op->list, pbbo);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validating bo list\n");
@@ -870,7 +871,7 @@ revalidate:
}
} else
if (drm->client.device.info.chipset >= 0x25) {
- ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
+ ret = PUSH_WAIT(&chan->chan.push, req->nr_push * 2);
if (ret) {
NV_PRINTK(err, cli, "cal_space: %d\n", ret);
goto out;
@@ -880,11 +881,11 @@ revalidate:
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
- PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
- PUSH_DATA(chan->chan.push, 0);
+ PUSH_CALL(&chan->chan.push, nvbo->offset + push[i].offset);
+ PUSH_DATA(&chan->chan.push, 0);
}
} else {
- ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
+ ret = PUSH_WAIT(&chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
goto out;
@@ -913,10 +914,10 @@ revalidate:
push[i].length - 8) / 4, cmd);
}
- PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
- PUSH_DATA(chan->chan.push, 0);
+ PUSH_JUMP(&chan->chan.push, nvbo->offset + push[i].offset);
+ PUSH_DATA(&chan->chan.push, 0);
for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
- PUSH_DATA(chan->chan.push, 0);
+ PUSH_DATA(&chan->chan.push, 0);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index db30a4c2cd4d..5c07a9ee8b77 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -52,7 +52,7 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
return sysfs_emit(buf, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
@@ -64,7 +64,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
long value;
if (kstrtol(buf, 10, &value))
@@ -85,7 +85,7 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
return sysfs_emit(buf, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
@@ -97,7 +97,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
long value;
if (kstrtol(buf, 10, &value))
@@ -118,7 +118,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
int ret;
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY);
@@ -134,7 +134,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
int ret;
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY);
@@ -150,7 +150,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
long value;
int ret;
@@ -173,7 +173,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
long value;
int ret;
@@ -247,7 +247,7 @@ static umode_t
nouveau_power_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(drm);
if (!iccsense || !iccsense->data_valid || list_empty(&iccsense->rails))
return 0;
@@ -272,7 +272,7 @@ static umode_t
nouveau_temp_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_get || nvkm_therm_temp_get(therm) < 0)
return 0;
@@ -296,7 +296,7 @@ static umode_t
nouveau_pwm_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_get || !therm->fan_get ||
therm->fan_get(therm) < 0)
@@ -315,7 +315,7 @@ static umode_t
nouveau_input_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
+ struct nvkm_volt *volt = nvxx_volt(drm);
if (!volt || nvkm_volt_get(volt) < 0)
return 0;
@@ -335,7 +335,7 @@ static umode_t
nouveau_fan_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_get || nvkm_therm_fan_sense(therm) < 0)
return 0;
@@ -367,7 +367,7 @@ nouveau_temp_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
int ret;
if (!therm || !therm->attr_get)
@@ -416,7 +416,7 @@ nouveau_fan_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm)
return -EOPNOTSUPP;
@@ -439,7 +439,7 @@ nouveau_in_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
+ struct nvkm_volt *volt = nvxx_volt(drm);
int ret;
if (!volt)
@@ -470,7 +470,7 @@ nouveau_pwm_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_get || !therm->fan_get)
return -EOPNOTSUPP;
@@ -496,7 +496,7 @@ nouveau_power_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(drm);
if (!iccsense)
return -EOPNOTSUPP;
@@ -525,7 +525,7 @@ nouveau_temp_write(struct device *dev, u32 attr, int channel, long val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_set)
return -EOPNOTSUPP;
@@ -559,7 +559,7 @@ nouveau_pwm_write(struct device *dev, u32 attr, int channel, long val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_set)
return -EOPNOTSUPP;
@@ -664,9 +664,9 @@ nouveau_hwmon_init(struct drm_device *dev)
{
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
- struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(drm);
+ struct nvkm_therm *therm = nvxx_therm(drm);
+ struct nvkm_volt *volt = nvxx_volt(drm);
const struct attribute_group *special_groups[N_ATTR_GROUPS];
struct nouveau_hwmon *hwmon;
struct device *hwmon_dev;
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.c b/drivers/gpu/drm/nouveau/nouveau_led.c
index 2c5e0628da12..ac950518a820 100644
--- a/drivers/gpu/drm/nouveau/nouveau_led.c
+++ b/drivers/gpu/drm/nouveau/nouveau_led.c
@@ -78,7 +78,7 @@ int
nouveau_led_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
+ struct nvkm_gpio *gpio = nvxx_gpio(drm);
struct dcb_gpio_func logo_led;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 25f31d5169e5..fac92fdbf9cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -78,20 +78,19 @@ nouveau_mem_map(struct nouveau_mem *mem,
void
nouveau_mem_fini(struct nouveau_mem *mem)
{
- nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]);
- nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]);
- mutex_lock(&mem->cli->drm->master.lock);
+ nvif_vmm_put(&mem->drm->client.vmm.vmm, &mem->vma[1]);
+ nvif_vmm_put(&mem->drm->client.vmm.vmm, &mem->vma[0]);
+ mutex_lock(&mem->drm->client_mutex);
nvif_mem_dtor(&mem->mem);
- mutex_unlock(&mem->cli->drm->master.lock);
+ mutex_unlock(&mem->drm->client_mutex);
}
int
nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
{
struct nouveau_mem *mem = nouveau_mem(reg);
- struct nouveau_cli *cli = mem->cli;
- struct nouveau_drm *drm = cli->drm;
- struct nvif_mmu *mmu = &cli->mmu;
+ struct nouveau_drm *drm = mem->drm;
+ struct nvif_mmu *mmu = &drm->mmu;
struct nvif_mem_ram_v0 args = {};
u8 type;
int ret;
@@ -114,11 +113,11 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
else
args.dma = tt->dma_address;
- mutex_lock(&drm->master.lock);
- ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
+ mutex_lock(&drm->client_mutex);
+ ret = nvif_mem_ctor_type(mmu, "ttmHostMem", mmu->mem, type, PAGE_SHIFT,
reg->size,
&args, sizeof(args), &mem->mem);
- mutex_unlock(&drm->master.lock);
+ mutex_unlock(&drm->client_mutex);
return ret;
}
@@ -126,16 +125,15 @@ int
nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
{
struct nouveau_mem *mem = nouveau_mem(reg);
- struct nouveau_cli *cli = mem->cli;
- struct nouveau_drm *drm = cli->drm;
- struct nvif_mmu *mmu = &cli->mmu;
+ struct nouveau_drm *drm = mem->drm;
+ struct nvif_mmu *mmu = &drm->mmu;
u64 size = ALIGN(reg->size, 1 << page);
int ret;
- mutex_lock(&drm->master.lock);
- switch (cli->mem->oclass) {
+ mutex_lock(&drm->client_mutex);
+ switch (mmu->mem) {
case NVIF_CLASS_MEM_GF100:
- ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
+ ret = nvif_mem_ctor_type(mmu, "ttmVram", mmu->mem,
drm->ttm.type_vram, page, size,
&(struct gf100_mem_v0) {
.contig = contig,
@@ -143,7 +141,7 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
&mem->mem);
break;
case NVIF_CLASS_MEM_NV50:
- ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
+ ret = nvif_mem_ctor_type(mmu, "ttmVram", mmu->mem,
drm->ttm.type_vram, page, size,
&(struct nv50_mem_v0) {
.bankswz = mmu->kind[mem->kind] == 2,
@@ -156,7 +154,7 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
WARN_ON(1);
break;
}
- mutex_unlock(&drm->master.lock);
+ mutex_unlock(&drm->client_mutex);
reg->start = mem->mem.addr >> PAGE_SHIFT;
return ret;
@@ -173,7 +171,7 @@ nouveau_mem_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
}
int
-nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
+nouveau_mem_new(struct nouveau_drm *drm, u8 kind, u8 comp,
struct ttm_resource **res)
{
struct nouveau_mem *mem;
@@ -181,7 +179,7 @@ nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
return -ENOMEM;
- mem->cli = cli;
+ mem->drm = drm;
mem->kind = kind;
mem->comp = comp;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 5365a3d3a17f..a070ee049f6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -8,7 +8,7 @@ struct ttm_tt;
struct nouveau_mem {
struct ttm_resource base;
- struct nouveau_cli *cli;
+ struct nouveau_drm *drm;
u8 kind;
u8 comp;
struct nvif_mem mem;
@@ -21,7 +21,7 @@ nouveau_mem(struct ttm_resource *reg)
return container_of(reg, struct nouveau_mem, base);
}
-int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
+int nouveau_mem_new(struct nouveau_drm *, u8 kind, u8 comp,
struct ttm_resource **);
void nouveau_mem_del(struct ttm_resource_manager *man,
struct ttm_resource *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index 1d49ebdfd5dc..adb802421fda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -35,7 +35,6 @@
#include <nvif/ioctl.h>
#include "nouveau_drv.h"
-#include "nouveau_usif.h"
static void
nvkm_client_unmap(void *priv, void __iomem *ptr, u32 size)
@@ -98,5 +97,4 @@ nvif_driver_nvkm = {
.ioctl = nvkm_client_ioctl,
.map = nvkm_client_map,
.unmap = nvkm_client_unmap,
- .keep = false,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index bf2dc7567ea4..829fdc6e4031 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -26,7 +26,6 @@ static int nouveau_platform_probe(struct platform_device *pdev)
const struct nvkm_device_tegra_func *func;
struct nvkm_device *device = NULL;
struct drm_device *drm;
- int ret;
func = of_device_get_match_data(&pdev->dev);
@@ -34,19 +33,14 @@ static int nouveau_platform_probe(struct platform_device *pdev)
if (IS_ERR(drm))
return PTR_ERR(drm);
- ret = drm_dev_register(drm, 0);
- if (ret < 0) {
- drm_dev_put(drm);
- return ret;
- }
-
return 0;
}
static void nouveau_platform_remove(struct platform_device *pdev)
{
- struct drm_device *dev = platform_get_drvdata(pdev);
- nouveau_drm_device_remove(dev);
+ struct nouveau_drm *drm = platform_get_drvdata(pdev);
+
+ nouveau_drm_device_remove(drm);
}
#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index 32fa2e273965..eb6c3f9a01f5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -379,7 +379,7 @@ nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
else
NV_PRINTK(warn, job->cli, "Generic job timeout.\n");
- drm_sched_start(sched, true);
+ drm_sched_start(sched);
return stat;
}
@@ -404,7 +404,7 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
{
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
- long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
+ const long timeout = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
int ret;
if (!wq) {
@@ -418,7 +418,7 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq,
NOUVEAU_SCHED_PRIORITY_COUNT,
- credit_limit, 0, job_hang_limit,
+ credit_limit, 0, timeout,
NULL, NULL, "nouveau_sched", drm->dev->dev);
if (ret)
goto fail_wq;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index b14895f75b3c..bd870028514b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -43,7 +43,7 @@ nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resou
return ret;
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
+ ret = nouveau_mem_map(mem, &drm->client.vmm.vmm, &mem->vma[0]);
if (ret) {
nouveau_mem_fini(mem);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 486f39f31a38..e244927eb5d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -73,7 +73,7 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
+ ret = nouveau_mem_new(drm, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
@@ -105,7 +105,7 @@ nouveau_gart_manager_new(struct ttm_resource_manager *man,
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
+ ret = nouveau_mem_new(drm, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
@@ -132,13 +132,13 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
struct nouveau_mem *mem;
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
+ ret = nouveau_mem_new(drm, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
mem = nouveau_mem(*res);
ttm_resource_init(bo, place, *res);
- ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
+ ret = nvif_vmm_get(&drm->client.vmm.vmm, PTES, false, 12, 0,
(long)(*res)->size, &mem->vma[0]);
if (ret) {
nouveau_mem_del(man, *res);
@@ -261,7 +261,7 @@ nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
int
nouveau_ttm_init(struct nouveau_drm *drm)
{
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
struct nvkm_pci *pci = device->pci;
struct nvif_mmu *mmu = &drm->client.mmu;
struct drm_device *dev = drm->dev;
@@ -348,7 +348,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
nouveau_ttm_fini_vram(drm);
nouveau_ttm_fini_gtt(drm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
deleted file mode 100644
index 002d1479ba89..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Copyright 2014 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-
-#include "nouveau_drv.h"
-#include "nouveau_usif.h"
-#include "nouveau_abi16.h"
-
-#include <nvif/unpack.h>
-#include <nvif/client.h>
-#include <nvif/ioctl.h>
-
-#include <nvif/class.h>
-#include <nvif/cl0080.h>
-
-struct usif_object {
- struct list_head head;
- u8 route;
- u64 token;
-};
-
-static void
-usif_object_dtor(struct usif_object *object)
-{
- list_del(&object->head);
- kfree(object);
-}
-
-static int
-usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16)
-{
- struct nouveau_cli *cli = nouveau_cli(f);
- struct nvif_client *client = &cli->base;
- union {
- struct nvif_ioctl_new_v0 v0;
- } *args = data;
- struct usif_object *object;
- int ret = -ENOSYS;
-
- if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true)))
- return ret;
-
- switch (args->v0.oclass) {
- case NV_DMA_FROM_MEMORY:
- case NV_DMA_TO_MEMORY:
- case NV_DMA_IN_MEMORY:
- return -EINVAL;
- case NV_DEVICE: {
- union {
- struct nv_device_v0 v0;
- } *args = data;
-
- if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false)))
- return ret;
-
- args->v0.priv = false;
- break;
- }
- default:
- if (!parent_abi16)
- return -EINVAL;
- break;
- }
-
- if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
- return -ENOMEM;
- list_add(&object->head, &cli->objects);
-
- object->route = args->v0.route;
- object->token = args->v0.token;
- args->v0.route = NVDRM_OBJECT_USIF;
- args->v0.token = (unsigned long)(void *)object;
- ret = nvif_client_ioctl(client, argv, argc);
- if (ret) {
- usif_object_dtor(object);
- return ret;
- }
-
- args->v0.token = object->token;
- args->v0.route = object->route;
- return 0;
-}
-
-int
-usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
-{
- struct nouveau_cli *cli = nouveau_cli(filp);
- struct nvif_client *client = &cli->base;
- void *data = kmalloc(argc, GFP_KERNEL);
- u32 size = argc;
- union {
- struct nvif_ioctl_v0 v0;
- } *argv = data;
- struct usif_object *object;
- bool abi16 = false;
- u8 owner;
- int ret;
-
- if (ret = -ENOMEM, !argv)
- goto done;
- if (ret = -EFAULT, copy_from_user(argv, user, size))
- goto done;
-
- if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
- /* block access to objects not created via this interface */
- owner = argv->v0.owner;
- if (argv->v0.object == 0ULL &&
- argv->v0.type != NVIF_IOCTL_V0_DEL)
- argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
- else
- argv->v0.owner = NVDRM_OBJECT_USIF;
- } else
- goto done;
-
- /* USIF slightly abuses some return-only ioctl members in order
- * to provide interoperability with the older ABI16 objects
- */
- mutex_lock(&cli->mutex);
- if (argv->v0.route) {
- if (ret = -EINVAL, argv->v0.route == 0xff)
- ret = nouveau_abi16_usif(filp, argv, argc);
- if (ret) {
- mutex_unlock(&cli->mutex);
- goto done;
- }
-
- abi16 = true;
- }
-
- switch (argv->v0.type) {
- case NVIF_IOCTL_V0_NEW:
- ret = usif_object_new(filp, data, size, argv, argc, abi16);
- break;
- default:
- ret = nvif_client_ioctl(client, argv, argc);
- break;
- }
- if (argv->v0.route == NVDRM_OBJECT_USIF) {
- object = (void *)(unsigned long)argv->v0.token;
- argv->v0.route = object->route;
- argv->v0.token = object->token;
- if (ret == 0 && argv->v0.type == NVIF_IOCTL_V0_DEL) {
- list_del(&object->head);
- kfree(object);
- }
- } else {
- argv->v0.route = NVIF_IOCTL_V0_ROUTE_HIDDEN;
- argv->v0.token = 0;
- }
- argv->v0.owner = owner;
- mutex_unlock(&cli->mutex);
-
- if (copy_to_user(user, argv, argc))
- ret = -EFAULT;
-done:
- kfree(argv);
- return ret;
-}
-
-void
-usif_client_fini(struct nouveau_cli *cli)
-{
- struct usif_object *object, *otemp;
-
- list_for_each_entry_safe(object, otemp, &cli->objects, head) {
- usif_object_dtor(object);
- }
-}
-
-void
-usif_client_init(struct nouveau_cli *cli)
-{
- INIT_LIST_HEAD(&cli->objects);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.h b/drivers/gpu/drm/nouveau/nouveau_usif.h
deleted file mode 100644
index dc90d4a9d0d9..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_usif.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NOUVEAU_USIF_H__
-#define __NOUVEAU_USIF_H__
-
-void usif_client_init(struct nouveau_cli *);
-void usif_client_fini(struct nouveau_cli *);
-int usif_ioctl(struct drm_file *, void __user *, u32);
-int usif_notify(const void *, u32, const void *, u32);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index f8bf0ec26844..ab4e11dc0b8a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -11,7 +11,7 @@
static unsigned int
nouveau_vga_set_decode(struct pci_dev *pdev, bool state)
{
- struct nouveau_drm *drm = nouveau_drm(pci_get_drvdata(pdev));
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
struct nvif_object *device = &drm->client.device.object;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE &&
@@ -34,7 +34,8 @@ static void
nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
+ struct drm_device *dev = drm->dev;
if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
return;
@@ -56,21 +57,23 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
static void
nouveau_switcheroo_reprobe(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- drm_fb_helper_output_poll_changed(dev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
+ struct drm_device *dev = drm->dev;
+
+ drm_client_dev_hotplug(dev);
}
static bool
nouveau_switcheroo_can_switch(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
/*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return atomic_read(&dev->open_count) == 0;
+ return atomic_read(&drm->dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops
@@ -125,10 +128,3 @@ nouveau_vga_fini(struct nouveau_drm *drm)
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
}
-
-
-void
-nouveau_vga_lastclose(struct drm_device *dev)
-{
- vga_switcheroo_process_delayed_switch();
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.h b/drivers/gpu/drm/nouveau/nouveau_vga.h
index 951a83f984dd..63be415d2a44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.h
@@ -4,6 +4,5 @@
void nouveau_vga_init(struct nouveau_drm *);
void nouveau_vga_fini(struct nouveau_drm *);
-void nouveau_vga_lastclose(struct drm_device *dev);
#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index cdbc75e3d1f6..fa5c6029f783 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -39,7 +39,7 @@ struct nv04_fence_priv {
static int
nv04_fence_emit(struct nouveau_fence *fence)
{
- struct nvif_push *push = unrcu_pointer(fence->channel)->chan.push;
+ struct nvif_push *push = &unrcu_pointer(fence->channel)->chan.push;
int ret = PUSH_WAIT(push, 2);
if (ret == 0) {
PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index c6a0db5b9e21..8c73f40e3bda 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -32,7 +32,7 @@
int
nv10_fence_emit(struct nouveau_fence *fence)
{
- struct nvif_push *push = fence->channel->chan.push;
+ struct nvif_push *push = &fence->channel->chan.push;
int ret = PUSH_WAIT(push, 2);
if (ret == 0) {
PUSH_MTHD(push, NV06E, SET_REFERENCE, fence->base.seqno);
@@ -88,7 +88,7 @@ nv10_fence_destroy(struct nouveau_drm *drm)
nouveau_bo_unmap(priv->bo);
if (priv->bo)
nouveau_bo_unpin(priv->bo);
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
drm->fence = NULL;
kfree(priv);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 07c2e0878c24..d09bfd11369f 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -36,11 +36,11 @@ int
nv17_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
- struct nouveau_cli *cli = (void *)prev->user.client;
- struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nouveau_cli *cli = prev->cli;
+ struct nv10_fence_priv *priv = cli->drm->fence;
struct nv10_fence_chan *fctx = chan->fence;
- struct nvif_push *ppush = prev->chan.push;
- struct nvif_push *npush = chan->chan.push;
+ struct nvif_push *ppush = &prev->chan.push;
+ struct nvif_push *npush = &chan->chan.push;
u32 value;
int ret;
@@ -76,7 +76,7 @@ nv17_fence_sync(struct nouveau_fence *fence,
static int
nv17_fence_context_new(struct nouveau_channel *chan)
{
- struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_priv *priv = chan->cli->drm->fence;
struct ttm_resource *reg = priv->bo->bo.resource;
struct nv10_fence_chan *fctx;
u32 start = reg->start * PAGE_SIZE;
@@ -141,7 +141,7 @@ nv17_fence_create(struct nouveau_drm *drm)
nouveau_bo_unpin(priv->bo);
}
if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
}
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index ea1e1f480bfe..62e28dddf87c 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -35,7 +35,7 @@
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
- struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_priv *priv = chan->cli->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_resource *reg = priv->bo->bo.resource;
u32 start = reg->start * PAGE_SIZE;
@@ -92,7 +92,7 @@ nv50_fence_create(struct nouveau_drm *drm)
nouveau_bo_unpin(priv->bo);
}
if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
}
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 812b8c62eeba..aa7dd0c5d917 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -35,7 +35,7 @@
static int
nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret = PUSH_WAIT(push, 8);
if (ret == 0) {
PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle);
@@ -58,7 +58,7 @@ nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
static int
nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret = PUSH_WAIT(push, 7);
if (ret == 0) {
PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle);
@@ -79,7 +79,7 @@ nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
static inline u32
nv84_fence_chid(struct nouveau_channel *chan)
{
- return chan->drm->runl[chan->runlist].chan_id_base + chan->chid;
+ return chan->cli->drm->runl[chan->runlist].chan_id_base + chan->chid;
}
static int
@@ -105,14 +105,14 @@ nv84_fence_sync(struct nouveau_fence *fence,
static u32
nv84_fence_read(struct nouveau_channel *chan)
{
- struct nv84_fence_priv *priv = chan->drm->fence;
+ struct nv84_fence_priv *priv = chan->cli->drm->fence;
return nouveau_bo_rd32(priv->bo, nv84_fence_chid(chan) * 16/4);
}
static void
nv84_fence_context_del(struct nouveau_channel *chan)
{
- struct nv84_fence_priv *priv = chan->drm->fence;
+ struct nv84_fence_priv *priv = chan->cli->drm->fence;
struct nv84_fence_chan *fctx = chan->fence;
nouveau_bo_wr32(priv->bo, nv84_fence_chid(chan) * 16 / 4, fctx->base.sequence);
@@ -127,7 +127,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
int
nv84_fence_context_new(struct nouveau_channel *chan)
{
- struct nv84_fence_priv *priv = chan->drm->fence;
+ struct nv84_fence_priv *priv = chan->cli->drm->fence;
struct nv84_fence_chan *fctx;
int ret;
@@ -188,7 +188,7 @@ nv84_fence_destroy(struct nouveau_drm *drm)
nouveau_bo_unmap(priv->bo);
if (priv->bo)
nouveau_bo_unpin(priv->bo);
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
drm->fence = NULL;
kfree(priv);
}
@@ -232,7 +232,7 @@ nv84_fence_create(struct nouveau_drm *drm)
nouveau_bo_unpin(priv->bo);
}
if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
}
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index e1461c0b0779..a5e98d0d4217 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -34,7 +34,7 @@
static int
nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret = PUSH_WAIT(push, 6);
if (ret == 0) {
PUSH_MTHD(push, NV906F, SEMAPHOREA,
@@ -57,7 +57,7 @@ nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
static int
nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret = PUSH_WAIT(push, 5);
if (ret == 0) {
PUSH_MTHD(push, NV906F, SEMAPHOREA,
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 3a27245f467f..fdf5054ed7d8 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -30,12 +30,6 @@
#include <nvif/if0000.h>
int
-nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
-{
- return client->driver->ioctl(client->object.priv, data, size, NULL);
-}
-
-int
nvif_client_suspend(struct nvif_client *client)
{
return client->driver->suspend(client->object.priv);
@@ -51,22 +45,13 @@ void
nvif_client_dtor(struct nvif_client *client)
{
nvif_object_dtor(&client->object);
- if (client->driver) {
- if (client->driver->fini)
- client->driver->fini(client->object.priv);
- client->driver = NULL;
- }
+ client->driver = NULL;
}
int
-nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
- struct nvif_client *client)
+nvif_client_ctor(struct nvif_client *parent, const char *name, struct nvif_client *client)
{
- struct nvif_client_v0 args = { .device = device };
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_nop_v0 nop;
- } nop = {};
+ struct nvif_client_v0 args = {};
int ret;
strscpy_pad(args.name, name, sizeof(args.name));
@@ -79,15 +64,6 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
client->object.client = client;
client->object.handle = ~0;
- client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
client->driver = parent->driver;
-
- if (ret == 0) {
- ret = nvif_client_ioctl(client, &nop, sizeof(nop));
- client->version = nop.nop.version;
- }
-
- if (ret)
- nvif_client_dtor(client);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 8c3d883f3313..24880931039f 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -21,8 +21,8 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
#include <nvif/device.h>
+#include <nvif/client.h>
u64
nvif_device_time(struct nvif_device *device)
@@ -38,6 +38,12 @@ nvif_device_time(struct nvif_device *device)
return device->user.func->time(&device->user);
}
+int
+nvif_device_map(struct nvif_device *device)
+{
+ return nvif_object_map(&device->object, NULL, 0);
+}
+
void
nvif_device_dtor(struct nvif_device *device)
{
@@ -48,11 +54,10 @@ nvif_device_dtor(struct nvif_device *device)
}
int
-nvif_device_ctor(struct nvif_object *parent, const char *name, u32 handle,
- s32 oclass, void *data, u32 size, struct nvif_device *device)
+nvif_device_ctor(struct nvif_client *client, const char *name, struct nvif_device *device)
{
- int ret = nvif_object_ctor(parent, name ? name : "nvifDevice", handle,
- oclass, data, size, &device->object);
+ int ret = nvif_object_ctor(&client->object, name ? name : "nvifDevice", 0,
+ 0x0080, NULL, 0, &device->object);
device->runlist = NULL;
device->user.func = NULL;
if (ret == 0) {
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.c b/drivers/gpu/drm/nouveau/nvif/driver.c
index 5e00dd07afed..78706e97a6a2 100644
--- a/drivers/gpu/drm/nouveau/nvif/driver.c
+++ b/drivers/gpu/drm/nouveau/nvif/driver.c
@@ -24,35 +24,17 @@
#include <nvif/driver.h>
#include <nvif/client.h>
-static const struct nvif_driver *
-nvif_driver[] = {
-#ifdef __KERNEL__
- &nvif_driver_nvkm,
-#else
- &nvif_driver_drm,
- &nvif_driver_lib,
- &nvif_driver_null,
-#endif
- NULL
-};
-
int
nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
const char *name, u64 device, struct nvif_client *client)
{
- int ret = -EINVAL, i;
+ int ret;
+
+ client->driver = &nvif_driver_nvkm;
- for (i = 0; (client->driver = nvif_driver[i]); i++) {
- if (!drv || !strcmp(client->driver->name, drv)) {
- ret = client->driver->init(name, device, cfg, dbg,
- &client->object.priv);
- if (ret == 0)
- break;
- client->driver->fini(client->object.priv);
- }
- }
+ ret = client->driver->init(name, device, cfg, dbg, &client->object.priv);
+ if (ret)
+ return ret;
- if (ret == 0)
- ret = nvif_client_ctor(client, name, device, client);
- return ret;
+ return nvif_client_ctor(client, name, client);
}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 1d19c87eaec1..0b87278ac0f8 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -40,7 +40,6 @@ nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
args->v0.object = nvif_handle(object);
else
args->v0.object = 0;
- args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
} else
return -ENOSYS;
@@ -98,43 +97,6 @@ nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass)
return ret;
}
-u32
-nvif_object_rd(struct nvif_object *object, int size, u64 addr)
-{
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_rd_v0 rd;
- } args = {
- .ioctl.type = NVIF_IOCTL_V0_RD,
- .rd.size = size,
- .rd.addr = addr,
- };
- int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
- if (ret) {
- /*XXX: warn? */
- return 0;
- }
- return args.rd.data;
-}
-
-void
-nvif_object_wr(struct nvif_object *object, int size, u64 addr, u32 data)
-{
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_wr_v0 wr;
- } args = {
- .ioctl.type = NVIF_IOCTL_V0_WR,
- .wr.size = size,
- .wr.addr = addr,
- .wr.data = data,
- };
- int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
- if (ret) {
- /*XXX: warn? */
- }
-}
-
int
nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
{
@@ -299,8 +261,6 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
args->ioctl.version = 0;
args->ioctl.type = NVIF_IOCTL_V0_NEW;
args->new.version = 0;
- args->new.route = parent->client->route;
- args->new.token = nvif_handle(object);
args->new.object = nvif_handle(object);
args->new.handle = handle;
args->new.oclass = oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index c55662937ab2..72c88db627a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -42,7 +42,7 @@ nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))){
args->v0.name[sizeof(args->v0.name) - 1] = 0;
- ret = nvkm_client_new(args->v0.name, args->v0.device, NULL,
+ ret = nvkm_client_new(args->v0.name, oclass->client->device, NULL,
NULL, oclass->client->event, &client);
if (ret)
return ret;
@@ -51,8 +51,6 @@ nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
client->object.client = oclass->client;
client->object.handle = oclass->handle;
- client->object.route = oclass->route;
- client->object.token = oclass->token;
client->object.object = oclass->object;
client->debug = oclass->client->debug;
*pobject = &client->object;
@@ -67,58 +65,6 @@ nvkm_uclient_sclass = {
.ctor = nvkm_uclient_new,
};
-static const struct nvkm_object_func nvkm_client;
-struct nvkm_client *
-nvkm_client_search(struct nvkm_client *client, u64 handle)
-{
- struct nvkm_object *object;
-
- object = nvkm_object_search(client, handle, &nvkm_client);
- if (IS_ERR(object))
- return (void *)object;
-
- return nvkm_client(object);
-}
-
-static int
-nvkm_client_mthd_devlist(struct nvkm_client *client, void *data, u32 size)
-{
- union {
- struct nvif_client_devlist_v0 v0;
- } *args = data;
- int ret = -ENOSYS;
-
- nvif_ioctl(&client->object, "client devlist size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- nvif_ioctl(&client->object, "client devlist vers %d count %d\n",
- args->v0.version, args->v0.count);
- if (size == sizeof(args->v0.device[0]) * args->v0.count) {
- ret = nvkm_device_list(args->v0.device, args->v0.count);
- if (ret >= 0) {
- args->v0.count = ret;
- ret = 0;
- }
- } else {
- ret = -EINVAL;
- }
- }
-
- return ret;
-}
-
-static int
-nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
- struct nvkm_client *client = nvkm_client(object);
- switch (mthd) {
- case NVIF_CLIENT_V0_DEVLIST:
- return nvkm_client_mthd_devlist(client, data, size);
- default:
- break;
- }
- return -EINVAL;
-}
-
static int
nvkm_client_child_new(const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
@@ -144,12 +90,6 @@ nvkm_client_child_get(struct nvkm_object *object, int index,
return 0;
}
-static int
-nvkm_client_fini(struct nvkm_object *object, bool suspend)
-{
- return 0;
-}
-
static void *
nvkm_client_dtor(struct nvkm_object *object)
{
@@ -159,8 +99,6 @@ nvkm_client_dtor(struct nvkm_object *object)
static const struct nvkm_object_func
nvkm_client = {
.dtor = nvkm_client_dtor,
- .fini = nvkm_client_fini,
- .mthd = nvkm_client_mthd,
.sclass = nvkm_client_child_get,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index 0b33287e43a7..45051a1249da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -33,18 +33,7 @@ static int
nvkm_ioctl_nop(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
- union {
- struct nvif_ioctl_nop_v0 v0;
- } *args = data;
- int ret = -ENOSYS;
-
- nvif_ioctl(object, "nop size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object, "nop vers %lld\n", args->v0.version);
- args->v0.version = NVIF_VERSION_LATEST;
- }
-
- return ret;
+ return -ENOSYS;
}
#include <nvif/class.h>
@@ -112,10 +101,9 @@ nvkm_ioctl_new(struct nvkm_client *client,
nvif_ioctl(parent, "new size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- nvif_ioctl(parent, "new vers %d handle %08x class %08x "
- "route %02x token %llx object %016llx\n",
+ nvif_ioctl(parent, "new vers %d handle %08x class %08x object %016llx\n",
args->v0.version, args->v0.handle, args->v0.oclass,
- args->v0.route, args->v0.token, args->v0.object);
+ args->v0.object);
} else
return ret;
@@ -127,8 +115,6 @@ nvkm_ioctl_new(struct nvkm_client *client,
do {
memset(&oclass, 0x00, sizeof(oclass));
oclass.handle = args->v0.handle;
- oclass.route = args->v0.route;
- oclass.token = args->v0.token;
oclass.object = args->v0.object;
oclass.client = client;
oclass.parent = parent;
@@ -205,69 +191,14 @@ static int
nvkm_ioctl_rd(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
- union {
- struct nvif_ioctl_rd_v0 v0;
- } *args = data;
- union {
- u8 b08;
- u16 b16;
- u32 b32;
- } v;
- int ret = -ENOSYS;
-
- nvif_ioctl(object, "rd size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object, "rd vers %d size %d addr %016llx\n",
- args->v0.version, args->v0.size, args->v0.addr);
- switch (args->v0.size) {
- case 1:
- ret = nvkm_object_rd08(object, args->v0.addr, &v.b08);
- args->v0.data = v.b08;
- break;
- case 2:
- ret = nvkm_object_rd16(object, args->v0.addr, &v.b16);
- args->v0.data = v.b16;
- break;
- case 4:
- ret = nvkm_object_rd32(object, args->v0.addr, &v.b32);
- args->v0.data = v.b32;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- }
-
- return ret;
+ return -ENOSYS;
}
static int
nvkm_ioctl_wr(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
- union {
- struct nvif_ioctl_wr_v0 v0;
- } *args = data;
- int ret = -ENOSYS;
-
- nvif_ioctl(object, "wr size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object,
- "wr vers %d size %d addr %016llx data %08x\n",
- args->v0.version, args->v0.size, args->v0.addr,
- args->v0.data);
- } else
- return ret;
-
- switch (args->v0.size) {
- case 1: return nvkm_object_wr08(object, args->v0.addr, args->v0.data);
- case 2: return nvkm_object_wr16(object, args->v0.addr, args->v0.data);
- case 4: return nvkm_object_wr32(object, args->v0.addr, args->v0.data);
- default:
- break;
- }
-
- return -EINVAL;
+ return -ENOSYS;
}
static int
@@ -331,7 +262,7 @@ nvkm_ioctl_v0[] = {
static int
nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
- void *data, u32 size, u8 owner, u8 *route, u64 *token)
+ void *data, u32 size)
{
struct nvkm_object *object;
int ret;
@@ -342,13 +273,6 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
return PTR_ERR(object);
}
- if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) {
- nvif_ioctl(&client->object, "route != owner\n");
- return -EACCES;
- }
- *route = object->route;
- *token = object->token;
-
if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
if (nvkm_ioctl_v0[type].version == 0)
ret = nvkm_ioctl_v0[type].func(client, object, data, size);
@@ -374,8 +298,7 @@ nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack)
args->v0.version, args->v0.type, args->v0.object,
args->v0.owner);
ret = nvkm_ioctl_path(client, args->v0.object, args->v0.type,
- data, size, args->v0.owner,
- &args->v0.route, &args->v0.token);
+ data, size);
}
if (ret != 1) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index aea3ba72027a..390c265cf8af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -133,54 +133,6 @@ nvkm_object_unmap(struct nvkm_object *object)
}
int
-nvkm_object_rd08(struct nvkm_object *object, u64 addr, u8 *data)
-{
- if (likely(object->func->rd08))
- return object->func->rd08(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_rd16(struct nvkm_object *object, u64 addr, u16 *data)
-{
- if (likely(object->func->rd16))
- return object->func->rd16(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_rd32(struct nvkm_object *object, u64 addr, u32 *data)
-{
- if (likely(object->func->rd32))
- return object->func->rd32(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
- if (likely(object->func->wr08))
- return object->func->wr08(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
- if (likely(object->func->wr16))
- return object->func->wr16(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- if (likely(object->func->wr32))
- return object->func->wr32(object, addr, data);
- return -ENODEV;
-}
-
-int
nvkm_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *gpuobj,
int align, struct nvkm_gpuobj **pgpuobj)
{
@@ -313,8 +265,6 @@ nvkm_object_ctor(const struct nvkm_object_func *func,
object->engine = nvkm_engine_ref(oclass->engine);
object->oclass = oclass->base.oclass;
object->handle = oclass->handle;
- object->route = oclass->route;
- object->token = oclass->token;
object->object = oclass->object;
INIT_LIST_HEAD(&object->head);
INIT_LIST_HEAD(&object->tree);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
index 3385528da650..5db80d1780f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
@@ -56,42 +56,6 @@ nvkm_oproxy_unmap(struct nvkm_object *object)
}
static int
-nvkm_oproxy_rd08(struct nvkm_object *object, u64 addr, u8 *data)
-{
- return nvkm_object_rd08(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_rd16(struct nvkm_object *object, u64 addr, u16 *data)
-{
- return nvkm_object_rd16(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_rd32(struct nvkm_object *object, u64 addr, u32 *data)
-{
- return nvkm_object_rd32(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
- return nvkm_object_wr08(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
- return nvkm_object_wr16(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- return nvkm_object_wr32(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
nvkm_oproxy_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
@@ -197,12 +161,6 @@ nvkm_oproxy_func = {
.ntfy = nvkm_oproxy_ntfy,
.map = nvkm_oproxy_map,
.unmap = nvkm_oproxy_unmap,
- .rd08 = nvkm_oproxy_rd08,
- .rd16 = nvkm_oproxy_rd16,
- .rd32 = nvkm_oproxy_rd32,
- .wr08 = nvkm_oproxy_wr08,
- .wr16 = nvkm_oproxy_wr16,
- .wr32 = nvkm_oproxy_wr32,
.bind = nvkm_oproxy_bind,
.sclass = nvkm_oproxy_sclass,
.uevent = nvkm_oproxy_uevent,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/uevent.c b/drivers/gpu/drm/nouveau/nvkm/core/uevent.c
index ba9d9edaec75..cc254c390a57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/uevent.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/uevent.c
@@ -116,9 +116,9 @@ nvkm_uevent_ntfy(struct nvkm_event_ntfy *ntfy, u32 bits)
struct nvkm_client *client = uevent->object.client;
if (uevent->func)
- return uevent->func(uevent->parent, uevent->object.token, bits);
+ return uevent->func(uevent->parent, uevent->object.object, bits);
- return client->event(uevent->object.token, NULL, 0);
+ return client->event(uevent->object.object, NULL, 0);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index bfaaff645a34..2e48b0816670 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -19,7 +19,6 @@ include $(src)/nvkm/engine/nvenc/Kbuild
include $(src)/nvkm/engine/nvdec/Kbuild
include $(src)/nvkm/engine/nvjpg/Kbuild
include $(src)/nvkm/engine/ofa/Kbuild
-include $(src)/nvkm/engine/pm/Kbuild
include $(src)/nvkm/engine/sec/Kbuild
include $(src)/nvkm/engine/sec2/Kbuild
include $(src)/nvkm/engine/sw/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 31ed3da32fe7..9093d89b16f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -53,26 +53,6 @@ nvkm_device_find(u64 handle)
return device;
}
-int
-nvkm_device_list(u64 *name, int size)
-{
- struct nvkm_device *device;
- int nr = 0;
- mutex_lock(&nv_devices_mutex);
- list_for_each_entry(device, &nv_devices, head) {
- if (nr++ < size)
- name[nr - 1] = device->handle;
- }
- mutex_unlock(&nv_devices_mutex);
- return nr;
-}
-
-static const struct nvkm_device_chip
-null_chipset = {
- .name = "NULL",
- .bios = { 0x00000001, nvkm_bios_new },
-};
-
static const struct nvkm_device_chip
nv4_chipset = {
.name = "NV04",
@@ -490,7 +470,6 @@ nv40_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -516,7 +495,6 @@ nv41_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -542,7 +520,6 @@ nv42_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -568,7 +545,6 @@ nv43_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -594,7 +570,6 @@ nv44_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -620,7 +595,6 @@ nv45_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -646,7 +620,6 @@ nv46_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -672,7 +645,6 @@ nv47_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -698,7 +670,6 @@ nv49_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -724,7 +695,6 @@ nv4a_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -750,7 +720,6 @@ nv4b_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -776,7 +745,6 @@ nv4c_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -802,7 +770,6 @@ nv4e_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -831,7 +798,6 @@ nv50_chipset = {
.fifo = { 0x00000001, nv50_fifo_new },
.gr = { 0x00000001, nv50_gr_new },
.mpeg = { 0x00000001, nv50_mpeg_new },
- .pm = { 0x00000001, nv50_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -857,7 +823,6 @@ nv63_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -883,7 +848,6 @@ nv67_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -909,7 +873,6 @@ nv68_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -940,7 +903,6 @@ nv84_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -972,7 +934,6 @@ nv86_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1004,7 +965,6 @@ nv92_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1036,7 +996,6 @@ nv94_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1068,7 +1027,6 @@ nv96_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1100,7 +1058,6 @@ nv98_chipset = {
.mspdec = { 0x00000001, g98_mspdec_new },
.msppp = { 0x00000001, g98_msppp_new },
.msvld = { 0x00000001, g98_msvld_new },
- .pm = { 0x00000001, g84_pm_new },
.sec = { 0x00000001, g98_sec_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1132,7 +1089,6 @@ nva0_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, gt200_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, gt200_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1167,7 +1123,6 @@ nva3_chipset = {
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, gt215_msvld_new },
- .pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1200,7 +1155,6 @@ nva5_chipset = {
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, gt215_msvld_new },
- .pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1233,7 +1187,6 @@ nva8_chipset = {
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, gt215_msvld_new },
- .pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1264,7 +1217,6 @@ nvaa_chipset = {
.mspdec = { 0x00000001, g98_mspdec_new },
.msppp = { 0x00000001, g98_msppp_new },
.msvld = { 0x00000001, g98_msvld_new },
- .pm = { 0x00000001, g84_pm_new },
.sec = { 0x00000001, g98_sec_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1296,7 +1248,6 @@ nvac_chipset = {
.mspdec = { 0x00000001, g98_mspdec_new },
.msppp = { 0x00000001, g98_msppp_new },
.msvld = { 0x00000001, g98_msvld_new },
- .pm = { 0x00000001, g84_pm_new },
.sec = { 0x00000001, g98_sec_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1330,7 +1281,6 @@ nvaf_chipset = {
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, mcp89_msvld_new },
- .pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1366,7 +1316,6 @@ nvc0_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1402,7 +1351,6 @@ nvc1_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf108_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1438,7 +1386,6 @@ nvc3_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1474,7 +1421,6 @@ nvc4_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1510,7 +1456,6 @@ nvc8_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1546,7 +1491,6 @@ nvce_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1582,7 +1526,6 @@ nvcf_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1617,7 +1560,6 @@ nvd7_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf117_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1653,7 +1595,6 @@ nvd9_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf117_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1690,7 +1631,6 @@ nve4_chipset = {
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
- .pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1727,7 +1667,6 @@ nve6_chipset = {
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
- .pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1764,7 +1703,6 @@ nve7_chipset = {
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
- .pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1789,7 +1727,6 @@ nvea_chipset = {
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gk20a_fifo_new },
.gr = { 0x00000001, gk20a_gr_new },
- .pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -3104,7 +3041,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
const struct nvkm_device_quirk *quirk,
struct device *dev, enum nvkm_device_type type, u64 handle,
const char *name, const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device *device)
{
struct nvkm_subdev *subdev;
@@ -3132,233 +3068,228 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
mmio_base = device->func->resource_addr(device, 0);
mmio_size = device->func->resource_size(device, 0);
- if (detect || mmio) {
- device->pri = ioremap(mmio_base, mmio_size);
- if (device->pri == NULL) {
- nvdev_error(device, "unable to map PRI\n");
- ret = -ENOMEM;
- goto done;
- }
+ device->pri = ioremap(mmio_base, mmio_size);
+ if (device->pri == NULL) {
+ nvdev_error(device, "unable to map PRI\n");
+ ret = -ENOMEM;
+ goto done;
}
/* identify the chipset, and determine classes of subdev/engines */
- if (detect) {
- /* switch mmio to cpu's native endianness */
- if (!nvkm_device_endianness(device)) {
- nvdev_error(device,
- "Couldn't switch GPU to CPUs endianness\n");
- ret = -ENOSYS;
- goto done;
- }
- boot0 = nvkm_rd32(device, 0x000000);
-
- /* chipset can be overridden for devel/testing purposes */
- chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
- if (chipset) {
- u32 override_boot0;
-
- if (chipset >= 0x10) {
- override_boot0 = ((chipset & 0x1ff) << 20);
- override_boot0 |= 0x000000a1;
- } else {
- if (chipset != 0x04)
- override_boot0 = 0x20104000;
- else
- override_boot0 = 0x20004000;
- }
+ /* switch mmio to cpu's native endianness */
+ if (!nvkm_device_endianness(device)) {
+ nvdev_error(device,
+ "Couldn't switch GPU to CPUs endianness\n");
+ ret = -ENOSYS;
+ goto done;
+ }
- nvdev_warn(device, "CHIPSET OVERRIDE: %08x -> %08x\n",
- boot0, override_boot0);
- boot0 = override_boot0;
- }
+ boot0 = nvkm_rd32(device, 0x000000);
- /* determine chipset and derive architecture from it */
- if ((boot0 & 0x1f000000) > 0) {
- device->chipset = (boot0 & 0x1ff00000) >> 20;
- device->chiprev = (boot0 & 0x000000ff);
- switch (device->chipset & 0x1f0) {
- case 0x010: {
- if (0x461 & (1 << (device->chipset & 0xf)))
- device->card_type = NV_10;
- else
- device->card_type = NV_11;
- device->chiprev = 0x00;
- break;
- }
- case 0x020: device->card_type = NV_20; break;
- case 0x030: device->card_type = NV_30; break;
- case 0x040:
- case 0x060: device->card_type = NV_40; break;
- case 0x050:
- case 0x080:
- case 0x090:
- case 0x0a0: device->card_type = NV_50; break;
- case 0x0c0:
- case 0x0d0: device->card_type = NV_C0; break;
- case 0x0e0:
- case 0x0f0:
- case 0x100: device->card_type = NV_E0; break;
- case 0x110:
- case 0x120: device->card_type = GM100; break;
- case 0x130: device->card_type = GP100; break;
- case 0x140: device->card_type = GV100; break;
- case 0x160: device->card_type = TU100; break;
- case 0x170: device->card_type = GA100; break;
- case 0x190: device->card_type = AD100; break;
- default:
- break;
- }
- } else
- if ((boot0 & 0xff00fff0) == 0x20004000) {
- if (boot0 & 0x00f00000)
- device->chipset = 0x05;
+ /* chipset can be overridden for devel/testing purposes */
+ chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
+ if (chipset) {
+ u32 override_boot0;
+
+ if (chipset >= 0x10) {
+ override_boot0 = ((chipset & 0x1ff) << 20);
+ override_boot0 |= 0x000000a1;
+ } else {
+ if (chipset != 0x04)
+ override_boot0 = 0x20104000;
else
- device->chipset = 0x04;
- device->card_type = NV_04;
+ override_boot0 = 0x20004000;
}
- switch (device->chipset) {
- case 0x004: device->chip = &nv4_chipset; break;
- case 0x005: device->chip = &nv5_chipset; break;
- case 0x010: device->chip = &nv10_chipset; break;
- case 0x011: device->chip = &nv11_chipset; break;
- case 0x015: device->chip = &nv15_chipset; break;
- case 0x017: device->chip = &nv17_chipset; break;
- case 0x018: device->chip = &nv18_chipset; break;
- case 0x01a: device->chip = &nv1a_chipset; break;
- case 0x01f: device->chip = &nv1f_chipset; break;
- case 0x020: device->chip = &nv20_chipset; break;
- case 0x025: device->chip = &nv25_chipset; break;
- case 0x028: device->chip = &nv28_chipset; break;
- case 0x02a: device->chip = &nv2a_chipset; break;
- case 0x030: device->chip = &nv30_chipset; break;
- case 0x031: device->chip = &nv31_chipset; break;
- case 0x034: device->chip = &nv34_chipset; break;
- case 0x035: device->chip = &nv35_chipset; break;
- case 0x036: device->chip = &nv36_chipset; break;
- case 0x040: device->chip = &nv40_chipset; break;
- case 0x041: device->chip = &nv41_chipset; break;
- case 0x042: device->chip = &nv42_chipset; break;
- case 0x043: device->chip = &nv43_chipset; break;
- case 0x044: device->chip = &nv44_chipset; break;
- case 0x045: device->chip = &nv45_chipset; break;
- case 0x046: device->chip = &nv46_chipset; break;
- case 0x047: device->chip = &nv47_chipset; break;
- case 0x049: device->chip = &nv49_chipset; break;
- case 0x04a: device->chip = &nv4a_chipset; break;
- case 0x04b: device->chip = &nv4b_chipset; break;
- case 0x04c: device->chip = &nv4c_chipset; break;
- case 0x04e: device->chip = &nv4e_chipset; break;
- case 0x050: device->chip = &nv50_chipset; break;
- case 0x063: device->chip = &nv63_chipset; break;
- case 0x067: device->chip = &nv67_chipset; break;
- case 0x068: device->chip = &nv68_chipset; break;
- case 0x084: device->chip = &nv84_chipset; break;
- case 0x086: device->chip = &nv86_chipset; break;
- case 0x092: device->chip = &nv92_chipset; break;
- case 0x094: device->chip = &nv94_chipset; break;
- case 0x096: device->chip = &nv96_chipset; break;
- case 0x098: device->chip = &nv98_chipset; break;
- case 0x0a0: device->chip = &nva0_chipset; break;
- case 0x0a3: device->chip = &nva3_chipset; break;
- case 0x0a5: device->chip = &nva5_chipset; break;
- case 0x0a8: device->chip = &nva8_chipset; break;
- case 0x0aa: device->chip = &nvaa_chipset; break;
- case 0x0ac: device->chip = &nvac_chipset; break;
- case 0x0af: device->chip = &nvaf_chipset; break;
- case 0x0c0: device->chip = &nvc0_chipset; break;
- case 0x0c1: device->chip = &nvc1_chipset; break;
- case 0x0c3: device->chip = &nvc3_chipset; break;
- case 0x0c4: device->chip = &nvc4_chipset; break;
- case 0x0c8: device->chip = &nvc8_chipset; break;
- case 0x0ce: device->chip = &nvce_chipset; break;
- case 0x0cf: device->chip = &nvcf_chipset; break;
- case 0x0d7: device->chip = &nvd7_chipset; break;
- case 0x0d9: device->chip = &nvd9_chipset; break;
- case 0x0e4: device->chip = &nve4_chipset; break;
- case 0x0e6: device->chip = &nve6_chipset; break;
- case 0x0e7: device->chip = &nve7_chipset; break;
- case 0x0ea: device->chip = &nvea_chipset; break;
- case 0x0f0: device->chip = &nvf0_chipset; break;
- case 0x0f1: device->chip = &nvf1_chipset; break;
- case 0x106: device->chip = &nv106_chipset; break;
- case 0x108: device->chip = &nv108_chipset; break;
- case 0x117: device->chip = &nv117_chipset; break;
- case 0x118: device->chip = &nv118_chipset; break;
- case 0x120: device->chip = &nv120_chipset; break;
- case 0x124: device->chip = &nv124_chipset; break;
- case 0x126: device->chip = &nv126_chipset; break;
- case 0x12b: device->chip = &nv12b_chipset; break;
- case 0x130: device->chip = &nv130_chipset; break;
- case 0x132: device->chip = &nv132_chipset; break;
- case 0x134: device->chip = &nv134_chipset; break;
- case 0x136: device->chip = &nv136_chipset; break;
- case 0x137: device->chip = &nv137_chipset; break;
- case 0x138: device->chip = &nv138_chipset; break;
- case 0x13b: device->chip = &nv13b_chipset; break;
- case 0x140: device->chip = &nv140_chipset; break;
- case 0x162: device->chip = &nv162_chipset; break;
- case 0x164: device->chip = &nv164_chipset; break;
- case 0x166: device->chip = &nv166_chipset; break;
- case 0x167: device->chip = &nv167_chipset; break;
- case 0x168: device->chip = &nv168_chipset; break;
- case 0x172: device->chip = &nv172_chipset; break;
- case 0x173: device->chip = &nv173_chipset; break;
- case 0x174: device->chip = &nv174_chipset; break;
- case 0x176: device->chip = &nv176_chipset; break;
- case 0x177: device->chip = &nv177_chipset; break;
- case 0x192: device->chip = &nv192_chipset; break;
- case 0x193: device->chip = &nv193_chipset; break;
- case 0x194: device->chip = &nv194_chipset; break;
- case 0x196: device->chip = &nv196_chipset; break;
- case 0x197: device->chip = &nv197_chipset; break;
- default:
- if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
- switch (device->chipset) {
- case 0x170: device->chip = &nv170_chipset; break;
- default:
- break;
- }
- }
+ nvdev_warn(device, "CHIPSET OVERRIDE: %08x -> %08x\n",
+ boot0, override_boot0);
+ boot0 = override_boot0;
+ }
- if (!device->chip) {
- nvdev_error(device, "unknown chipset (%08x)\n", boot0);
- ret = -ENODEV;
- goto done;
- }
+ /* determine chipset and derive architecture from it */
+ if ((boot0 & 0x1f000000) > 0) {
+ device->chipset = (boot0 & 0x1ff00000) >> 20;
+ device->chiprev = (boot0 & 0x000000ff);
+ switch (device->chipset & 0x1f0) {
+ case 0x010: {
+ if (0x461 & (1 << (device->chipset & 0xf)))
+ device->card_type = NV_10;
+ else
+ device->card_type = NV_11;
+ device->chiprev = 0x00;
+ break;
+ }
+ case 0x020: device->card_type = NV_20; break;
+ case 0x030: device->card_type = NV_30; break;
+ case 0x040:
+ case 0x060: device->card_type = NV_40; break;
+ case 0x050:
+ case 0x080:
+ case 0x090:
+ case 0x0a0: device->card_type = NV_50; break;
+ case 0x0c0:
+ case 0x0d0: device->card_type = NV_C0; break;
+ case 0x0e0:
+ case 0x0f0:
+ case 0x100: device->card_type = NV_E0; break;
+ case 0x110:
+ case 0x120: device->card_type = GM100; break;
+ case 0x130: device->card_type = GP100; break;
+ case 0x140: device->card_type = GV100; break;
+ case 0x160: device->card_type = TU100; break;
+ case 0x170: device->card_type = GA100; break;
+ case 0x190: device->card_type = AD100; break;
+ default:
break;
}
+ } else
+ if ((boot0 & 0xff00fff0) == 0x20004000) {
+ if (boot0 & 0x00f00000)
+ device->chipset = 0x05;
+ else
+ device->chipset = 0x04;
+ device->card_type = NV_04;
+ }
- nvdev_info(device, "NVIDIA %s (%08x)\n",
- device->chip->name, boot0);
+ switch (device->chipset) {
+ case 0x004: device->chip = &nv4_chipset; break;
+ case 0x005: device->chip = &nv5_chipset; break;
+ case 0x010: device->chip = &nv10_chipset; break;
+ case 0x011: device->chip = &nv11_chipset; break;
+ case 0x015: device->chip = &nv15_chipset; break;
+ case 0x017: device->chip = &nv17_chipset; break;
+ case 0x018: device->chip = &nv18_chipset; break;
+ case 0x01a: device->chip = &nv1a_chipset; break;
+ case 0x01f: device->chip = &nv1f_chipset; break;
+ case 0x020: device->chip = &nv20_chipset; break;
+ case 0x025: device->chip = &nv25_chipset; break;
+ case 0x028: device->chip = &nv28_chipset; break;
+ case 0x02a: device->chip = &nv2a_chipset; break;
+ case 0x030: device->chip = &nv30_chipset; break;
+ case 0x031: device->chip = &nv31_chipset; break;
+ case 0x034: device->chip = &nv34_chipset; break;
+ case 0x035: device->chip = &nv35_chipset; break;
+ case 0x036: device->chip = &nv36_chipset; break;
+ case 0x040: device->chip = &nv40_chipset; break;
+ case 0x041: device->chip = &nv41_chipset; break;
+ case 0x042: device->chip = &nv42_chipset; break;
+ case 0x043: device->chip = &nv43_chipset; break;
+ case 0x044: device->chip = &nv44_chipset; break;
+ case 0x045: device->chip = &nv45_chipset; break;
+ case 0x046: device->chip = &nv46_chipset; break;
+ case 0x047: device->chip = &nv47_chipset; break;
+ case 0x049: device->chip = &nv49_chipset; break;
+ case 0x04a: device->chip = &nv4a_chipset; break;
+ case 0x04b: device->chip = &nv4b_chipset; break;
+ case 0x04c: device->chip = &nv4c_chipset; break;
+ case 0x04e: device->chip = &nv4e_chipset; break;
+ case 0x050: device->chip = &nv50_chipset; break;
+ case 0x063: device->chip = &nv63_chipset; break;
+ case 0x067: device->chip = &nv67_chipset; break;
+ case 0x068: device->chip = &nv68_chipset; break;
+ case 0x084: device->chip = &nv84_chipset; break;
+ case 0x086: device->chip = &nv86_chipset; break;
+ case 0x092: device->chip = &nv92_chipset; break;
+ case 0x094: device->chip = &nv94_chipset; break;
+ case 0x096: device->chip = &nv96_chipset; break;
+ case 0x098: device->chip = &nv98_chipset; break;
+ case 0x0a0: device->chip = &nva0_chipset; break;
+ case 0x0a3: device->chip = &nva3_chipset; break;
+ case 0x0a5: device->chip = &nva5_chipset; break;
+ case 0x0a8: device->chip = &nva8_chipset; break;
+ case 0x0aa: device->chip = &nvaa_chipset; break;
+ case 0x0ac: device->chip = &nvac_chipset; break;
+ case 0x0af: device->chip = &nvaf_chipset; break;
+ case 0x0c0: device->chip = &nvc0_chipset; break;
+ case 0x0c1: device->chip = &nvc1_chipset; break;
+ case 0x0c3: device->chip = &nvc3_chipset; break;
+ case 0x0c4: device->chip = &nvc4_chipset; break;
+ case 0x0c8: device->chip = &nvc8_chipset; break;
+ case 0x0ce: device->chip = &nvce_chipset; break;
+ case 0x0cf: device->chip = &nvcf_chipset; break;
+ case 0x0d7: device->chip = &nvd7_chipset; break;
+ case 0x0d9: device->chip = &nvd9_chipset; break;
+ case 0x0e4: device->chip = &nve4_chipset; break;
+ case 0x0e6: device->chip = &nve6_chipset; break;
+ case 0x0e7: device->chip = &nve7_chipset; break;
+ case 0x0ea: device->chip = &nvea_chipset; break;
+ case 0x0f0: device->chip = &nvf0_chipset; break;
+ case 0x0f1: device->chip = &nvf1_chipset; break;
+ case 0x106: device->chip = &nv106_chipset; break;
+ case 0x108: device->chip = &nv108_chipset; break;
+ case 0x117: device->chip = &nv117_chipset; break;
+ case 0x118: device->chip = &nv118_chipset; break;
+ case 0x120: device->chip = &nv120_chipset; break;
+ case 0x124: device->chip = &nv124_chipset; break;
+ case 0x126: device->chip = &nv126_chipset; break;
+ case 0x12b: device->chip = &nv12b_chipset; break;
+ case 0x130: device->chip = &nv130_chipset; break;
+ case 0x132: device->chip = &nv132_chipset; break;
+ case 0x134: device->chip = &nv134_chipset; break;
+ case 0x136: device->chip = &nv136_chipset; break;
+ case 0x137: device->chip = &nv137_chipset; break;
+ case 0x138: device->chip = &nv138_chipset; break;
+ case 0x13b: device->chip = &nv13b_chipset; break;
+ case 0x140: device->chip = &nv140_chipset; break;
+ case 0x162: device->chip = &nv162_chipset; break;
+ case 0x164: device->chip = &nv164_chipset; break;
+ case 0x166: device->chip = &nv166_chipset; break;
+ case 0x167: device->chip = &nv167_chipset; break;
+ case 0x168: device->chip = &nv168_chipset; break;
+ case 0x172: device->chip = &nv172_chipset; break;
+ case 0x173: device->chip = &nv173_chipset; break;
+ case 0x174: device->chip = &nv174_chipset; break;
+ case 0x176: device->chip = &nv176_chipset; break;
+ case 0x177: device->chip = &nv177_chipset; break;
+ case 0x192: device->chip = &nv192_chipset; break;
+ case 0x193: device->chip = &nv193_chipset; break;
+ case 0x194: device->chip = &nv194_chipset; break;
+ case 0x196: device->chip = &nv196_chipset; break;
+ case 0x197: device->chip = &nv197_chipset; break;
+ default:
+ if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
+ switch (device->chipset) {
+ case 0x170: device->chip = &nv170_chipset; break;
+ default:
+ break;
+ }
+ }
- /* vGPU detection */
- boot1 = nvkm_rd32(device, 0x0000004);
- if (device->card_type >= TU100 && (boot1 & 0x00030000)) {
- nvdev_info(device, "vGPUs are not supported\n");
+ if (!device->chip) {
+ nvdev_error(device, "unknown chipset (%08x)\n", boot0);
ret = -ENODEV;
goto done;
}
+ break;
+ }
- /* read strapping information */
- strap = nvkm_rd32(device, 0x101000);
+ nvdev_info(device, "NVIDIA %s (%08x)\n",
+ device->chip->name, boot0);
- /* determine frequency of timing crystal */
- if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
- (device->chipset >= 0x20 && device->chipset < 0x25))
- strap &= 0x00000040;
- else
- strap &= 0x00400040;
+ /* vGPU detection */
+ boot1 = nvkm_rd32(device, 0x0000004);
+ if (device->card_type >= TU100 && (boot1 & 0x00030000)) {
+ nvdev_info(device, "vGPUs are not supported\n");
+ ret = -ENODEV;
+ goto done;
+ }
- switch (strap) {
- case 0x00000000: device->crystal = 13500; break;
- case 0x00000040: device->crystal = 14318; break;
- case 0x00400000: device->crystal = 27000; break;
- case 0x00400040: device->crystal = 25000; break;
- }
- } else {
- device->chip = &null_chipset;
+ /* read strapping information */
+ strap = nvkm_rd32(device, 0x101000);
+
+ /* determine frequency of timing crystal */
+ if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
+ (device->chipset >= 0x20 && device->chipset < 0x25))
+ strap &= 0x00000040;
+ else
+ strap &= 0x00400040;
+
+ switch (strap) {
+ case 0x00000000: device->crystal = 13500; break;
+ case 0x00000040: device->crystal = 14318; break;
+ case 0x00400000: device->crystal = 27000; break;
+ case 0x00400040: device->crystal = 25000; break;
}
if (!device->name)
@@ -3368,7 +3299,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
nvkm_intr_ctor(device);
#define NVKM_LAYOUT_ONCE(type,data,ptr) \
- if (device->chip->ptr.inst && (subdev_mask & (BIT_ULL(type)))) { \
+ if (device->chip->ptr.inst) { \
WARN_ON(device->chip->ptr.inst != 0x00000001); \
ret = device->chip->ptr.ctor(device, (type), -1, &device->ptr); \
subdev = nvkm_device_subdev(device, (type), 0); \
@@ -3387,7 +3318,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
#define NVKM_LAYOUT_INST(type,data,ptr,cnt) \
WARN_ON(device->chip->ptr.inst & ~((1 << ARRAY_SIZE(device->ptr)) - 1)); \
for (j = 0; device->chip->ptr.inst && j < ARRAY_SIZE(device->ptr); j++) { \
- if ((device->chip->ptr.inst & BIT(j)) && (subdev_mask & BIT_ULL(type))) { \
+ if (device->chip->ptr.inst & BIT(j)) { \
ret = device->chip->ptr.ctor(device, (type), (j), &device->ptr[j]); \
subdev = nvkm_device_subdev(device, (type), (j)); \
if (ret) { \
@@ -3409,7 +3340,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
ret = nvkm_intr_install(device);
done:
- if (device->pri && (!mmio || ret)) {
+ if (ret && device->pri) {
iounmap(device->pri);
device->pri = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index abccb2bb68a6..3ff6436007fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1626,7 +1626,6 @@ nvkm_device_pci_func = {
int
nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **pdevice)
{
const struct nvkm_device_quirk *quirk = NULL;
@@ -1680,8 +1679,7 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
pci_dev->bus->number << 16 |
PCI_SLOT(pci_dev->devfn) << 8 |
PCI_FUNC(pci_dev->devfn), name,
- cfg, dbg, detect, mmio, subdev_mask,
- &pdev->device);
+ cfg, dbg, &pdev->device);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index bf3176bec18a..e42b18820a95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -45,7 +45,6 @@
#include <engine/nvdec.h>
#include <engine/nvjpg.h>
#include <engine/ofa.h>
-#include <engine/pm.h>
#include <engine/sec.h>
#include <engine/sec2.h>
#include <engine/sw.h>
@@ -56,7 +55,6 @@ int nvkm_device_ctor(const struct nvkm_device_func *,
const struct nvkm_device_quirk *,
struct device *, enum nvkm_device_type, u64 handle,
const char *name, const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device *);
int nvkm_device_init(struct nvkm_device *);
int nvkm_device_fini(struct nvkm_device *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 87caa4a72921..d1c294f00665 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -237,7 +237,6 @@ int
nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
struct platform_device *pdev,
const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **pdevice)
{
struct nvkm_device_tegra *tdev;
@@ -311,8 +310,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
NVKM_DEVICE_TEGRA, pdev->id, NULL,
- cfg, dbg, detect, mmio, subdev_mask,
- &tdev->device);
+ cfg, dbg, &tdev->device);
if (ret)
goto powerdown;
@@ -333,7 +331,6 @@ int
nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
struct platform_device *pdev,
const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **pdevice)
{
return -ENOSYS;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 7fd4800a876a..d7f75b3a43c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -203,54 +203,6 @@ nvkm_udevice_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
}
static int
-nvkm_udevice_rd08(struct nvkm_object *object, u64 addr, u8 *data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- *data = nvkm_rd08(udev->device, addr);
- return 0;
-}
-
-static int
-nvkm_udevice_rd16(struct nvkm_object *object, u64 addr, u16 *data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- *data = nvkm_rd16(udev->device, addr);
- return 0;
-}
-
-static int
-nvkm_udevice_rd32(struct nvkm_object *object, u64 addr, u32 *data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- *data = nvkm_rd32(udev->device, addr);
- return 0;
-}
-
-static int
-nvkm_udevice_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- nvkm_wr08(udev->device, addr, data);
- return 0;
-}
-
-static int
-nvkm_udevice_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- nvkm_wr16(udev->device, addr, data);
- return 0;
-}
-
-static int
-nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- nvkm_wr32(udev->device, addr, data);
- return 0;
-}
-
-static int
nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{
@@ -322,8 +274,7 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
struct nvkm_engine *engine;
u64 mask = (1ULL << NVKM_ENGINE_DMAOBJ) |
(1ULL << NVKM_ENGINE_FIFO) |
- (1ULL << NVKM_ENGINE_DISP) |
- (1ULL << NVKM_ENGINE_PM);
+ (1ULL << NVKM_ENGINE_DISP);
const struct nvkm_device_oclass *sclass = NULL;
int i;
@@ -358,25 +309,11 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
}
static const struct nvkm_object_func
-nvkm_udevice_super = {
- .init = nvkm_udevice_init,
- .fini = nvkm_udevice_fini,
- .mthd = nvkm_udevice_mthd,
- .map = nvkm_udevice_map,
- .rd08 = nvkm_udevice_rd08,
- .rd16 = nvkm_udevice_rd16,
- .rd32 = nvkm_udevice_rd32,
- .wr08 = nvkm_udevice_wr08,
- .wr16 = nvkm_udevice_wr16,
- .wr32 = nvkm_udevice_wr32,
- .sclass = nvkm_udevice_child_get,
-};
-
-static const struct nvkm_object_func
nvkm_udevice = {
.init = nvkm_udevice_init,
.fini = nvkm_udevice_fini,
.mthd = nvkm_udevice_mthd,
+ .map = nvkm_udevice_map,
.sclass = nvkm_udevice_child_get,
};
@@ -384,38 +321,16 @@ static int
nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
- union {
- struct nv_device_v0 v0;
- } *args = data;
struct nvkm_client *client = oclass->client;
- struct nvkm_object *parent = &client->object;
- const struct nvkm_object_func *func;
struct nvkm_udevice *udev;
- int ret = -ENOSYS;
-
- nvif_ioctl(parent, "create device size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create device v%d device %016llx\n",
- args->v0.version, args->v0.device);
- } else
- return ret;
-
- /* give priviledged clients register access */
- if (args->v0.priv)
- func = &nvkm_udevice_super;
- else
- func = &nvkm_udevice;
if (!(udev = kzalloc(sizeof(*udev), GFP_KERNEL)))
return -ENOMEM;
- nvkm_object_ctor(func, oclass, &udev->object);
+ nvkm_object_ctor(&nvkm_udevice, oclass, &udev->object);
*pobject = &udev->object;
/* find the device that matches what the client requested */
- if (args->v0.device != ~0)
- udev->device = nvkm_device_find(args->v0.device);
- else
- udev->device = nvkm_device_find(client->device);
+ udev->device = nvkm_device_find(client->device);
if (!udev->device)
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
index d5e18daed79f..4e43ee383c34 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
@@ -27,28 +27,6 @@
#include <nvif/if0014.h>
static int
-nvkm_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
-{
- struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
- struct nvkm_device *device = chan->disp->engine.subdev.device;
- u64 size, base = chan->func->user(chan, &size);
-
- *data = nvkm_rd32(device, base + addr);
- return 0;
-}
-
-static int
-nvkm_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
- struct nvkm_device *device = chan->disp->engine.subdev.device;
- u64 size, base = chan->func->user(chan, &size);
-
- nvkm_wr32(device, base + addr, data);
- return 0;
-}
-
-static int
nvkm_disp_chan_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **pevent)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
@@ -188,8 +166,6 @@ nvkm_disp_chan = {
.dtor = nvkm_disp_chan_dtor,
.init = nvkm_disp_chan_init,
.fini = nvkm_disp_chan_fini,
- .rd32 = nvkm_disp_chan_rd32,
- .wr32 = nvkm_disp_chan_wr32,
.ntfy = nvkm_disp_chan_ntfy,
.map = nvkm_disp_chan_map,
.sclass = nvkm_disp_chan_child_get,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
deleted file mode 100644
index 2cc8a5f6fe0c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/engine/pm/base.o
-nvkm-y += nvkm/engine/pm/nv40.o
-nvkm-y += nvkm/engine/pm/nv50.o
-nvkm-y += nvkm/engine/pm/g84.o
-nvkm-y += nvkm/engine/pm/gt200.o
-nvkm-y += nvkm/engine/pm/gt215.o
-nvkm-y += nvkm/engine/pm/gf100.o
-nvkm-y += nvkm/engine/pm/gf108.o
-nvkm-y += nvkm/engine/pm/gf117.o
-nvkm-y += nvkm/engine/pm/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
deleted file mode 100644
index 131db2645f84..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ /dev/null
@@ -1,867 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/option.h>
-
-#include <nvif/class.h>
-#include <nvif/if0002.h>
-#include <nvif/if0003.h>
-#include <nvif/ioctl.h>
-#include <nvif/unpack.h>
-
-static u8
-nvkm_pm_count_perfdom(struct nvkm_pm *pm)
-{
- struct nvkm_perfdom *dom;
- u8 domain_nr = 0;
-
- list_for_each_entry(dom, &pm->domains, head)
- domain_nr++;
- return domain_nr;
-}
-
-static u16
-nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
-{
- u16 signal_nr = 0;
- int i;
-
- if (dom) {
- for (i = 0; i < dom->signal_nr; i++) {
- if (dom->signal[i].name)
- signal_nr++;
- }
- }
- return signal_nr;
-}
-
-static struct nvkm_perfdom *
-nvkm_perfdom_find(struct nvkm_pm *pm, int di)
-{
- struct nvkm_perfdom *dom;
- int tmp = 0;
-
- list_for_each_entry(dom, &pm->domains, head) {
- if (tmp++ == di)
- return dom;
- }
- return NULL;
-}
-
-static struct nvkm_perfsig *
-nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
-{
- struct nvkm_perfdom *dom = *pdom;
-
- if (dom == NULL) {
- dom = nvkm_perfdom_find(pm, di);
- if (dom == NULL)
- return NULL;
- *pdom = dom;
- }
-
- if (!dom->signal[si].name)
- return NULL;
- return &dom->signal[si];
-}
-
-static u8
-nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
-{
- u8 source_nr = 0, i;
-
- for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
- if (sig->source[i])
- source_nr++;
- }
- return source_nr;
-}
-
-static struct nvkm_perfsrc *
-nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
-{
- struct nvkm_perfsrc *src;
- bool found = false;
- int tmp = 1; /* Sources ID start from 1 */
- u8 i;
-
- for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
- if (sig->source[i] == si) {
- found = true;
- break;
- }
- }
-
- if (found) {
- list_for_each_entry(src, &pm->sources, head) {
- if (tmp++ == si)
- return src;
- }
- }
-
- return NULL;
-}
-
-static int
-nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
-{
- struct nvkm_subdev *subdev = &pm->engine.subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_perfdom *dom = NULL;
- struct nvkm_perfsig *sig;
- struct nvkm_perfsrc *src;
- u32 mask, value;
- int i, j;
-
- for (i = 0; i < 4; i++) {
- for (j = 0; j < 8 && ctr->source[i][j]; j++) {
- sig = nvkm_perfsig_find(pm, ctr->domain,
- ctr->signal[i], &dom);
- if (!sig)
- return -EINVAL;
-
- src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
- if (!src)
- return -EINVAL;
-
- /* set enable bit if needed */
- mask = value = 0x00000000;
- if (src->enable)
- mask = value = 0x80000000;
- mask |= (src->mask << src->shift);
- value |= ((ctr->source[i][j] >> 32) << src->shift);
-
- /* enable the source */
- nvkm_mask(device, src->addr, mask, value);
- nvkm_debug(subdev,
- "enabled source %08x %08x %08x\n",
- src->addr, mask, value);
- }
- }
- return 0;
-}
-
-static int
-nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
-{
- struct nvkm_subdev *subdev = &pm->engine.subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_perfdom *dom = NULL;
- struct nvkm_perfsig *sig;
- struct nvkm_perfsrc *src;
- u32 mask;
- int i, j;
-
- for (i = 0; i < 4; i++) {
- for (j = 0; j < 8 && ctr->source[i][j]; j++) {
- sig = nvkm_perfsig_find(pm, ctr->domain,
- ctr->signal[i], &dom);
- if (!sig)
- return -EINVAL;
-
- src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
- if (!src)
- return -EINVAL;
-
- /* unset enable bit if needed */
- mask = 0x00000000;
- if (src->enable)
- mask = 0x80000000;
- mask |= (src->mask << src->shift);
-
- /* disable the source */
- nvkm_mask(device, src->addr, mask, 0);
- nvkm_debug(subdev, "disabled source %08x %08x\n",
- src->addr, mask);
- }
- }
- return 0;
-}
-
-/*******************************************************************************
- * Perfdom object classes
- ******************************************************************************/
-static int
-nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
-{
- union {
- struct nvif_perfdom_init none;
- } *args = data;
- struct nvkm_object *object = &dom->object;
- struct nvkm_pm *pm = dom->perfmon->pm;
- int ret = -ENOSYS, i;
-
- nvif_ioctl(object, "perfdom init size %d\n", size);
- if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
- nvif_ioctl(object, "perfdom init\n");
- } else
- return ret;
-
- for (i = 0; i < 4; i++) {
- if (dom->ctr[i]) {
- dom->func->init(pm, dom, dom->ctr[i]);
-
- /* enable sources */
- nvkm_perfsrc_enable(pm, dom->ctr[i]);
- }
- }
-
- /* start next batch of counters for sampling */
- dom->func->next(pm, dom);
- return 0;
-}
-
-static int
-nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
-{
- union {
- struct nvif_perfdom_sample none;
- } *args = data;
- struct nvkm_object *object = &dom->object;
- struct nvkm_pm *pm = dom->perfmon->pm;
- int ret = -ENOSYS;
-
- nvif_ioctl(object, "perfdom sample size %d\n", size);
- if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
- nvif_ioctl(object, "perfdom sample\n");
- } else
- return ret;
- pm->sequence++;
-
- /* sample previous batch of counters */
- list_for_each_entry(dom, &pm->domains, head)
- dom->func->next(pm, dom);
-
- return 0;
-}
-
-static int
-nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
-{
- union {
- struct nvif_perfdom_read_v0 v0;
- } *args = data;
- struct nvkm_object *object = &dom->object;
- struct nvkm_pm *pm = dom->perfmon->pm;
- int ret = -ENOSYS, i;
-
- nvif_ioctl(object, "perfdom read size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
- } else
- return ret;
-
- for (i = 0; i < 4; i++) {
- if (dom->ctr[i])
- dom->func->read(pm, dom, dom->ctr[i]);
- }
-
- if (!dom->clk)
- return -EAGAIN;
-
- for (i = 0; i < 4; i++)
- if (dom->ctr[i])
- args->v0.ctr[i] = dom->ctr[i]->ctr;
- args->v0.clk = dom->clk;
- return 0;
-}
-
-static int
-nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
- struct nvkm_perfdom *dom = nvkm_perfdom(object);
- switch (mthd) {
- case NVIF_PERFDOM_V0_INIT:
- return nvkm_perfdom_init(dom, data, size);
- case NVIF_PERFDOM_V0_SAMPLE:
- return nvkm_perfdom_sample(dom, data, size);
- case NVIF_PERFDOM_V0_READ:
- return nvkm_perfdom_read(dom, data, size);
- default:
- break;
- }
- return -EINVAL;
-}
-
-static void *
-nvkm_perfdom_dtor(struct nvkm_object *object)
-{
- struct nvkm_perfdom *dom = nvkm_perfdom(object);
- struct nvkm_pm *pm = dom->perfmon->pm;
- int i;
-
- for (i = 0; i < 4; i++) {
- struct nvkm_perfctr *ctr = dom->ctr[i];
- if (ctr) {
- nvkm_perfsrc_disable(pm, ctr);
- if (ctr->head.next)
- list_del(&ctr->head);
- }
- kfree(ctr);
- }
-
- return dom;
-}
-
-static int
-nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
- struct nvkm_perfsig *signal[4], u64 source[4][8],
- u16 logic_op, struct nvkm_perfctr **pctr)
-{
- struct nvkm_perfctr *ctr;
- int i, j;
-
- if (!dom)
- return -EINVAL;
-
- ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
- if (!ctr)
- return -ENOMEM;
-
- ctr->domain = domain;
- ctr->logic_op = logic_op;
- ctr->slot = slot;
- for (i = 0; i < 4; i++) {
- if (signal[i]) {
- ctr->signal[i] = signal[i] - dom->signal;
- for (j = 0; j < 8; j++)
- ctr->source[i][j] = source[i][j];
- }
- }
- list_add_tail(&ctr->head, &dom->list);
-
- return 0;
-}
-
-static const struct nvkm_object_func
-nvkm_perfdom = {
- .dtor = nvkm_perfdom_dtor,
- .mthd = nvkm_perfdom_mthd,
-};
-
-static int
-nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
- const struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- union {
- struct nvif_perfdom_v0 v0;
- } *args = data;
- struct nvkm_pm *pm = perfmon->pm;
- struct nvkm_object *parent = oclass->parent;
- struct nvkm_perfdom *sdom = NULL;
- struct nvkm_perfctr *ctr[4] = {};
- struct nvkm_perfdom *dom;
- int c, s, m;
- int ret = -ENOSYS;
-
- nvif_ioctl(parent, "create perfdom size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
- args->v0.version, args->v0.domain, args->v0.mode);
- } else
- return ret;
-
- for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
- struct nvkm_perfsig *sig[4] = {};
- u64 src[4][8] = {};
-
- for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
- sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
- args->v0.ctr[c].signal[s],
- &sdom);
- if (args->v0.ctr[c].signal[s] && !sig[s])
- return -EINVAL;
-
- for (m = 0; m < 8; m++) {
- src[s][m] = args->v0.ctr[c].source[s][m];
- if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
- src[s][m]))
- return -EINVAL;
- }
- }
-
- ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
- args->v0.ctr[c].logic_op, &ctr[c]);
- if (ret)
- return ret;
- }
-
- if (!sdom)
- return -EINVAL;
-
- if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
- dom->perfmon = perfmon;
- *pobject = &dom->object;
-
- dom->func = sdom->func;
- dom->addr = sdom->addr;
- dom->mode = args->v0.mode;
- for (c = 0; c < ARRAY_SIZE(ctr); c++)
- dom->ctr[c] = ctr[c];
- return 0;
-}
-
-/*******************************************************************************
- * Perfmon object classes
- ******************************************************************************/
-static int
-nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
- void *data, u32 size)
-{
- union {
- struct nvif_perfmon_query_domain_v0 v0;
- } *args = data;
- struct nvkm_object *object = &perfmon->object;
- struct nvkm_pm *pm = perfmon->pm;
- struct nvkm_perfdom *dom;
- u8 domain_nr;
- int di, ret = -ENOSYS;
-
- nvif_ioctl(object, "perfmon query domain size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
- args->v0.version, args->v0.iter);
- di = (args->v0.iter & 0xff) - 1;
- } else
- return ret;
-
- domain_nr = nvkm_pm_count_perfdom(pm);
- if (di >= (int)domain_nr)
- return -EINVAL;
-
- if (di >= 0) {
- dom = nvkm_perfdom_find(pm, di);
- if (dom == NULL)
- return -EINVAL;
-
- args->v0.id = di;
- args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
- strscpy(args->v0.name, dom->name, sizeof(args->v0.name));
-
- /* Currently only global counters (PCOUNTER) are implemented
- * but this will be different for local counters (MP). */
- args->v0.counter_nr = 4;
- }
-
- if (++di < domain_nr) {
- args->v0.iter = ++di;
- return 0;
- }
-
- args->v0.iter = 0xff;
- return 0;
-}
-
-static int
-nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
- void *data, u32 size)
-{
- union {
- struct nvif_perfmon_query_signal_v0 v0;
- } *args = data;
- struct nvkm_object *object = &perfmon->object;
- struct nvkm_pm *pm = perfmon->pm;
- struct nvkm_device *device = pm->engine.subdev.device;
- struct nvkm_perfdom *dom;
- struct nvkm_perfsig *sig;
- const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
- const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
- int ret = -ENOSYS, si;
-
- nvif_ioctl(object, "perfmon query signal size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object,
- "perfmon query signal vers %d dom %d iter %04x\n",
- args->v0.version, args->v0.domain, args->v0.iter);
- si = (args->v0.iter & 0xffff) - 1;
- } else
- return ret;
-
- dom = nvkm_perfdom_find(pm, args->v0.domain);
- if (dom == NULL || si >= (int)dom->signal_nr)
- return -EINVAL;
-
- if (si >= 0) {
- sig = &dom->signal[si];
- if (raw || !sig->name) {
- snprintf(args->v0.name, sizeof(args->v0.name),
- "/%s/%02x", dom->name, si);
- } else {
- strscpy(args->v0.name, sig->name, sizeof(args->v0.name));
- }
-
- args->v0.signal = si;
- args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
- }
-
- while (++si < dom->signal_nr) {
- if (all || dom->signal[si].name) {
- args->v0.iter = ++si;
- return 0;
- }
- }
-
- args->v0.iter = 0xffff;
- return 0;
-}
-
-static int
-nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
- void *data, u32 size)
-{
- union {
- struct nvif_perfmon_query_source_v0 v0;
- } *args = data;
- struct nvkm_object *object = &perfmon->object;
- struct nvkm_pm *pm = perfmon->pm;
- struct nvkm_perfdom *dom = NULL;
- struct nvkm_perfsig *sig;
- struct nvkm_perfsrc *src;
- u8 source_nr = 0;
- int si, ret = -ENOSYS;
-
- nvif_ioctl(object, "perfmon query source size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object,
- "perfmon source vers %d dom %d sig %02x iter %02x\n",
- args->v0.version, args->v0.domain, args->v0.signal,
- args->v0.iter);
- si = (args->v0.iter & 0xff) - 1;
- } else
- return ret;
-
- sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
- if (!sig)
- return -EINVAL;
-
- source_nr = nvkm_perfsig_count_perfsrc(sig);
- if (si >= (int)source_nr)
- return -EINVAL;
-
- if (si >= 0) {
- src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
- if (!src)
- return -EINVAL;
-
- args->v0.source = sig->source[si];
- args->v0.mask = src->mask;
- strscpy(args->v0.name, src->name, sizeof(args->v0.name));
- }
-
- if (++si < source_nr) {
- args->v0.iter = ++si;
- return 0;
- }
-
- args->v0.iter = 0xff;
- return 0;
-}
-
-static int
-nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
- struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
- switch (mthd) {
- case NVIF_PERFMON_V0_QUERY_DOMAIN:
- return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
- case NVIF_PERFMON_V0_QUERY_SIGNAL:
- return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
- case NVIF_PERFMON_V0_QUERY_SOURCE:
- return nvkm_perfmon_mthd_query_source(perfmon, data, size);
- default:
- break;
- }
- return -EINVAL;
-}
-
-static int
-nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
- return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
-}
-
-static int
-nvkm_perfmon_child_get(struct nvkm_object *object, int index,
- struct nvkm_oclass *oclass)
-{
- if (index == 0) {
- oclass->base.oclass = NVIF_CLASS_PERFDOM;
- oclass->base.minver = 0;
- oclass->base.maxver = 0;
- oclass->ctor = nvkm_perfmon_child_new;
- return 0;
- }
- return -EINVAL;
-}
-
-static void *
-nvkm_perfmon_dtor(struct nvkm_object *object)
-{
- struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
- struct nvkm_pm *pm = perfmon->pm;
- spin_lock(&pm->client.lock);
- if (pm->client.object == &perfmon->object)
- pm->client.object = NULL;
- spin_unlock(&pm->client.lock);
- return perfmon;
-}
-
-static const struct nvkm_object_func
-nvkm_perfmon = {
- .dtor = nvkm_perfmon_dtor,
- .mthd = nvkm_perfmon_mthd,
- .sclass = nvkm_perfmon_child_get,
-};
-
-static int
-nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
- void *data, u32 size, struct nvkm_object **pobject)
-{
- struct nvkm_perfmon *perfmon;
-
- if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
- perfmon->pm = pm;
- *pobject = &perfmon->object;
- return 0;
-}
-
-/*******************************************************************************
- * PPM engine/subdev functions
- ******************************************************************************/
-
-static int
-nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
- void *data, u32 size, struct nvkm_object **pobject)
-{
- struct nvkm_pm *pm = nvkm_pm(oclass->engine);
- int ret;
-
- ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
- if (ret)
- return ret;
-
- spin_lock(&pm->client.lock);
- if (pm->client.object == NULL)
- pm->client.object = *pobject;
- ret = (pm->client.object == *pobject) ? 0 : -EBUSY;
- spin_unlock(&pm->client.lock);
- return ret;
-}
-
-static const struct nvkm_device_oclass
-nvkm_pm_oclass = {
- .base.oclass = NVIF_CLASS_PERFMON,
- .base.minver = -1,
- .base.maxver = -1,
- .ctor = nvkm_pm_oclass_new,
-};
-
-static int
-nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
- const struct nvkm_device_oclass **class)
-{
- if (index == 0) {
- oclass->base = nvkm_pm_oclass.base;
- *class = &nvkm_pm_oclass;
- return index;
- }
- return 1;
-}
-
-static int
-nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
- const struct nvkm_specsrc *spec)
-{
- const struct nvkm_specsrc *ssrc;
- const struct nvkm_specmux *smux;
- struct nvkm_perfsrc *src;
- u8 source_nr = 0;
-
- if (!spec) {
- /* No sources are defined for this signal. */
- return 0;
- }
-
- ssrc = spec;
- while (ssrc->name) {
- smux = ssrc->mux;
- while (smux->name) {
- bool found = false;
- u8 source_id = 0;
- u32 len;
-
- list_for_each_entry(src, &pm->sources, head) {
- if (src->addr == ssrc->addr &&
- src->shift == smux->shift) {
- found = true;
- break;
- }
- source_id++;
- }
-
- if (!found) {
- src = kzalloc(sizeof(*src), GFP_KERNEL);
- if (!src)
- return -ENOMEM;
-
- src->addr = ssrc->addr;
- src->mask = smux->mask;
- src->shift = smux->shift;
- src->enable = smux->enable;
-
- len = strlen(ssrc->name) +
- strlen(smux->name) + 2;
- src->name = kzalloc(len, GFP_KERNEL);
- if (!src->name) {
- kfree(src);
- return -ENOMEM;
- }
- snprintf(src->name, len, "%s_%s", ssrc->name,
- smux->name);
-
- list_add_tail(&src->head, &pm->sources);
- }
-
- sig->source[source_nr++] = source_id + 1;
- smux++;
- }
- ssrc++;
- }
-
- return 0;
-}
-
-int
-nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
- u32 base, u32 size_unit, u32 size_domain,
- const struct nvkm_specdom *spec)
-{
- const struct nvkm_specdom *sdom;
- const struct nvkm_specsig *ssig;
- struct nvkm_perfdom *dom;
- int ret, i;
-
- for (i = 0; i == 0 || mask; i++) {
- u32 addr = base + (i * size_unit);
- if (i && !(mask & (1 << i)))
- continue;
-
- sdom = spec;
- while (sdom->signal_nr) {
- dom = kzalloc(struct_size(dom, signal, sdom->signal_nr),
- GFP_KERNEL);
- if (!dom)
- return -ENOMEM;
-
- if (mask) {
- snprintf(dom->name, sizeof(dom->name),
- "%s/%02x/%02x", name, i,
- (int)(sdom - spec));
- } else {
- snprintf(dom->name, sizeof(dom->name),
- "%s/%02x", name, (int)(sdom - spec));
- }
-
- list_add_tail(&dom->head, &pm->domains);
- INIT_LIST_HEAD(&dom->list);
- dom->func = sdom->func;
- dom->addr = addr;
- dom->signal_nr = sdom->signal_nr;
-
- ssig = (sdom++)->signal;
- while (ssig->name) {
- struct nvkm_perfsig *sig =
- &dom->signal[ssig->signal];
- sig->name = ssig->name;
- ret = nvkm_perfsrc_new(pm, sig, ssig->source);
- if (ret)
- return ret;
- ssig++;
- }
-
- addr += size_domain;
- }
-
- mask &= ~(1 << i);
- }
-
- return 0;
-}
-
-static int
-nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
-{
- struct nvkm_pm *pm = nvkm_pm(engine);
- if (pm->func->fini)
- pm->func->fini(pm);
- return 0;
-}
-
-static void *
-nvkm_pm_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_pm *pm = nvkm_pm(engine);
- struct nvkm_perfdom *dom, *next_dom;
- struct nvkm_perfsrc *src, *next_src;
-
- list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
- list_del(&dom->head);
- kfree(dom);
- }
-
- list_for_each_entry_safe(src, next_src, &pm->sources, head) {
- list_del(&src->head);
- kfree(src->name);
- kfree(src);
- }
-
- return pm;
-}
-
-static const struct nvkm_engine_func
-nvkm_pm = {
- .dtor = nvkm_pm_dtor,
- .fini = nvkm_pm_fini,
- .base.sclass = nvkm_pm_oclass_get,
-};
-
-int
-nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_pm *pm)
-{
- pm->func = func;
- INIT_LIST_HEAD(&pm->domains);
- INIT_LIST_HEAD(&pm->sources);
- spin_lock_init(&pm->client.lock);
- return nvkm_engine_ctor(&nvkm_pm, device, type, inst, true, &pm->engine);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
deleted file mode 100644
index 0086d00eb162..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv40.h"
-
-const struct nvkm_specsrc
-g84_vfetch_sources[] = {
- { 0x400c0c, (const struct nvkm_specmux[]) {
- { 0x3, 0, "unk0" },
- {}
- }, "pgraph_vfetch_unk0c" },
- {}
-};
-
-static const struct nvkm_specsrc
-g84_prop_sources[] = {
- { 0x408e50, (const struct nvkm_specmux[]) {
- { 0x1f, 0, "sel", true },
- {}
- }, "pgraph_tpc0_prop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-g84_crop_sources[] = {
- { 0x407008, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0x7, 16, "sel1", true },
- {}
- }, "pgraph_rop0_crop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-g84_tex_sources[] = {
- { 0x408808, (const struct nvkm_specmux[]) {
- { 0xfffff, 0, "unk0" },
- {}
- }, "pgraph_tpc0_tex_unk08" },
- {}
-};
-
-static const struct nvkm_specdom
-g84_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0xbd, "pc01_gr_idle" },
- { 0x5e, "pc01_strmout_00" },
- { 0x5f, "pc01_strmout_01" },
- { 0xd2, "pc01_trast_00" },
- { 0xd3, "pc01_trast_01" },
- { 0xd4, "pc01_trast_02" },
- { 0xd5, "pc01_trast_03" },
- { 0xd8, "pc01_trast_04" },
- { 0xd9, "pc01_trast_05" },
- { 0x5c, "pc01_vattr_00" },
- { 0x5d, "pc01_vattr_01" },
- { 0x66, "pc01_vfetch_00", g84_vfetch_sources },
- { 0x67, "pc01_vfetch_01", g84_vfetch_sources },
- { 0x68, "pc01_vfetch_02", g84_vfetch_sources },
- { 0x69, "pc01_vfetch_03", g84_vfetch_sources },
- { 0x6a, "pc01_vfetch_04", g84_vfetch_sources },
- { 0x6b, "pc01_vfetch_05", g84_vfetch_sources },
- { 0x6c, "pc01_vfetch_06", g84_vfetch_sources },
- { 0x6d, "pc01_vfetch_07", g84_vfetch_sources },
- { 0x6e, "pc01_vfetch_08", g84_vfetch_sources },
- { 0x6f, "pc01_vfetch_09", g84_vfetch_sources },
- { 0x70, "pc01_vfetch_0a", g84_vfetch_sources },
- { 0x71, "pc01_vfetch_0b", g84_vfetch_sources },
- { 0x72, "pc01_vfetch_0c", g84_vfetch_sources },
- { 0x73, "pc01_vfetch_0d", g84_vfetch_sources },
- { 0x74, "pc01_vfetch_0e", g84_vfetch_sources },
- { 0x75, "pc01_vfetch_0f", g84_vfetch_sources },
- { 0x76, "pc01_vfetch_10", g84_vfetch_sources },
- { 0x77, "pc01_vfetch_11", g84_vfetch_sources },
- { 0x78, "pc01_vfetch_12", g84_vfetch_sources },
- { 0x79, "pc01_vfetch_13", g84_vfetch_sources },
- { 0x7a, "pc01_vfetch_14", g84_vfetch_sources },
- { 0x7b, "pc01_vfetch_15", g84_vfetch_sources },
- { 0x7c, "pc01_vfetch_16", g84_vfetch_sources },
- { 0x7d, "pc01_vfetch_17", g84_vfetch_sources },
- { 0x7e, "pc01_vfetch_18", g84_vfetch_sources },
- { 0x7f, "pc01_vfetch_19", g84_vfetch_sources },
- { 0x07, "pc01_zcull_00", nv50_zcull_sources },
- { 0x08, "pc01_zcull_01", nv50_zcull_sources },
- { 0x09, "pc01_zcull_02", nv50_zcull_sources },
- { 0x0a, "pc01_zcull_03", nv50_zcull_sources },
- { 0x0b, "pc01_zcull_04", nv50_zcull_sources },
- { 0x0c, "pc01_zcull_05", nv50_zcull_sources },
- { 0xa4, "pc01_unk00" },
- { 0xec, "pc01_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0xa0, (const struct nvkm_specsig[]) {
- { 0x30, "pc02_crop_00", g84_crop_sources },
- { 0x31, "pc02_crop_01", g84_crop_sources },
- { 0x32, "pc02_crop_02", g84_crop_sources },
- { 0x33, "pc02_crop_03", g84_crop_sources },
- { 0x00, "pc02_prop_00", g84_prop_sources },
- { 0x01, "pc02_prop_01", g84_prop_sources },
- { 0x02, "pc02_prop_02", g84_prop_sources },
- { 0x03, "pc02_prop_03", g84_prop_sources },
- { 0x04, "pc02_prop_04", g84_prop_sources },
- { 0x05, "pc02_prop_05", g84_prop_sources },
- { 0x06, "pc02_prop_06", g84_prop_sources },
- { 0x07, "pc02_prop_07", g84_prop_sources },
- { 0x48, "pc02_tex_00", g84_tex_sources },
- { 0x49, "pc02_tex_01", g84_tex_sources },
- { 0x4a, "pc02_tex_02", g84_tex_sources },
- { 0x4b, "pc02_tex_03", g84_tex_sources },
- { 0x1a, "pc02_tex_04", g84_tex_sources },
- { 0x1b, "pc02_tex_05", g84_tex_sources },
- { 0x1c, "pc02_tex_06", g84_tex_sources },
- { 0x44, "pc02_zrop_00", nv50_zrop_sources },
- { 0x45, "pc02_zrop_01", nv50_zrop_sources },
- { 0x46, "pc02_zrop_02", nv50_zrop_sources },
- { 0x47, "pc02_zrop_03", nv50_zrop_sources },
- { 0x8c, "pc02_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-g84_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(g84_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
deleted file mode 100644
index 8e02701def8e..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "gf100.h"
-
-const struct nvkm_specsrc
-gf100_pbfb_sources[] = {
- { 0x10f100, (const struct nvkm_specmux[]) {
- { 0x1, 0, "unk0" },
- { 0x3f, 4, "unk4" },
- {}
- }, "pbfb_broadcast_pm_unk100" },
- {}
-};
-
-const struct nvkm_specsrc
-gf100_pmfb_sources[] = {
- { 0x140028, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- { 0x7, 16, "unk16" },
- { 0x3, 24, "unk24" },
- { 0x2, 29, "unk29" },
- {}
- }, "pmfb0_pm_unk28" },
- {}
-};
-
-static const struct nvkm_specsrc
-gf100_l1_sources[] = {
- { 0x5044a8, (const struct nvkm_specmux[]) {
- { 0x3f, 0, "sel", true },
- {}
- }, "pgraph_gpc0_tpc0_l1_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-gf100_tex_sources[] = {
- { 0x5042c0, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0x7, 8, "sel1", true },
- {}
- }, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
- {}
-};
-
-static const struct nvkm_specsrc
-gf100_unk400_sources[] = {
- { 0x50440c, (const struct nvkm_specmux[]) {
- { 0x3f, 0, "sel", true },
- {}
- }, "pgraph_gpc0_tpc0_unk400_pm_mux" },
- {}
-};
-
-static const struct nvkm_specdom
-gf100_pm_hub[] = {
- {}
-};
-
-const struct nvkm_specdom
-gf100_pm_gpc[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x00, "gpc00_l1_00", gf100_l1_sources },
- { 0x01, "gpc00_l1_01", gf100_l1_sources },
- { 0x02, "gpc00_l1_02", gf100_l1_sources },
- { 0x03, "gpc00_l1_03", gf100_l1_sources },
- { 0x05, "gpc00_l1_04", gf100_l1_sources },
- { 0x06, "gpc00_l1_05", gf100_l1_sources },
- { 0x0a, "gpc00_tex_00", gf100_tex_sources },
- { 0x0b, "gpc00_tex_01", gf100_tex_sources },
- { 0x0c, "gpc00_tex_02", gf100_tex_sources },
- { 0x0d, "gpc00_tex_03", gf100_tex_sources },
- { 0x0e, "gpc00_tex_04", gf100_tex_sources },
- { 0x0f, "gpc00_tex_05", gf100_tex_sources },
- { 0x10, "gpc00_tex_06", gf100_tex_sources },
- { 0x11, "gpc00_tex_07", gf100_tex_sources },
- { 0x12, "gpc00_tex_08", gf100_tex_sources },
- { 0x26, "gpc00_unk400_00", gf100_unk400_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct nvkm_specdom
-gf100_pm_part[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x0f, "part00_pbfb_00", gf100_pbfb_sources },
- { 0x10, "part00_pbfb_01", gf100_pbfb_sources },
- { 0x21, "part00_pmfb_00", gf100_pmfb_sources },
- { 0x04, "part00_pmfb_01", gf100_pmfb_sources },
- { 0x00, "part00_pmfb_02", gf100_pmfb_sources },
- { 0x02, "part00_pmfb_03", gf100_pmfb_sources },
- { 0x01, "part00_pmfb_04", gf100_pmfb_sources },
- { 0x2e, "part00_pmfb_05", gf100_pmfb_sources },
- { 0x2f, "part00_pmfb_06", gf100_pmfb_sources },
- { 0x1b, "part00_pmfb_07", gf100_pmfb_sources },
- { 0x1c, "part00_pmfb_08", gf100_pmfb_sources },
- { 0x1d, "part00_pmfb_09", gf100_pmfb_sources },
- { 0x1e, "part00_pmfb_0a", gf100_pmfb_sources },
- { 0x1f, "part00_pmfb_0b", gf100_pmfb_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static void
-gf100_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- u32 log = ctr->logic_op;
- u32 src = 0x00000000;
- int i;
-
- for (i = 0; i < 4; i++)
- src |= ctr->signal[i] << (i * 8);
-
- nvkm_wr32(device, dom->addr + 0x09c, 0x00040002 | (dom->mode << 3));
- nvkm_wr32(device, dom->addr + 0x100, 0x00000000);
- nvkm_wr32(device, dom->addr + 0x040 + (ctr->slot * 0x08), src);
- nvkm_wr32(device, dom->addr + 0x044 + (ctr->slot * 0x08), log);
-}
-
-static void
-gf100_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
-
- switch (ctr->slot) {
- case 0: ctr->ctr = nvkm_rd32(device, dom->addr + 0x08c); break;
- case 1: ctr->ctr = nvkm_rd32(device, dom->addr + 0x088); break;
- case 2: ctr->ctr = nvkm_rd32(device, dom->addr + 0x080); break;
- case 3: ctr->ctr = nvkm_rd32(device, dom->addr + 0x090); break;
- }
- dom->clk = nvkm_rd32(device, dom->addr + 0x070);
-}
-
-static void
-gf100_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- nvkm_wr32(device, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
- nvkm_wr32(device, dom->addr + 0x0ec, 0x00000011);
-}
-
-const struct nvkm_funcdom
-gf100_perfctr_func = {
- .init = gf100_perfctr_init,
- .read = gf100_perfctr_read,
- .next = gf100_perfctr_next,
-};
-
-static void
-gf100_pm_fini(struct nvkm_pm *pm)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- nvkm_mask(device, 0x000200, 0x10000000, 0x00000000);
- nvkm_mask(device, 0x000200, 0x10000000, 0x10000000);
-}
-
-static const struct nvkm_pm_func
-gf100_pm_ = {
- .fini = gf100_pm_fini,
-};
-
-int
-gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- struct nvkm_pm *pm;
- u32 mask;
- int ret;
-
- if (!(pm = *ppm = kzalloc(sizeof(*pm), GFP_KERNEL)))
- return -ENOMEM;
-
- ret = nvkm_pm_ctor(&gf100_pm_, device, type, inst, pm);
- if (ret)
- return ret;
-
- /* HUB */
- ret = nvkm_perfdom_new(pm, "hub", 0, 0x1b0000, 0, 0x200,
- func->doms_hub);
- if (ret)
- return ret;
-
- /* GPC */
- mask = (1 << nvkm_rd32(device, 0x022430)) - 1;
- mask &= ~nvkm_rd32(device, 0x022504);
- mask &= ~nvkm_rd32(device, 0x022584);
-
- ret = nvkm_perfdom_new(pm, "gpc", mask, 0x180000,
- 0x1000, 0x200, func->doms_gpc);
- if (ret)
- return ret;
-
- /* PART */
- mask = (1 << nvkm_rd32(device, 0x022438)) - 1;
- mask &= ~nvkm_rd32(device, 0x022548);
- mask &= ~nvkm_rd32(device, 0x0225c8);
-
- ret = nvkm_perfdom_new(pm, "part", mask, 0x1a0000,
- 0x1000, 0x200, func->doms_part);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static const struct gf100_pm_func
-gf100_pm = {
- .doms_gpc = gf100_pm_gpc,
- .doms_hub = gf100_pm_hub,
- .doms_part = gf100_pm_part,
-};
-
-int
-gf100_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return gf100_pm_new_(&gf100_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
deleted file mode 100644
index bc4b014c4e8e..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_PM_NVC0_H__
-#define __NVKM_PM_NVC0_H__
-#include "priv.h"
-
-struct gf100_pm_func {
- const struct nvkm_specdom *doms_hub;
- const struct nvkm_specdom *doms_gpc;
- const struct nvkm_specdom *doms_part;
-};
-
-int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_pm **);
-
-extern const struct nvkm_funcdom gf100_perfctr_func;
-extern const struct nvkm_specdom gf100_pm_gpc[];
-
-extern const struct nvkm_specsrc gf100_pbfb_sources[];
-extern const struct nvkm_specsrc gf100_pmfb_sources[];
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
deleted file mode 100644
index 505565866b59..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright 2015 Samuel Pitoiset
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Samuel Pitoiset
- */
-#include "gf100.h"
-
-static const struct nvkm_specdom
-gf108_pm_hub[] = {
- {}
-};
-
-static const struct nvkm_specdom
-gf108_pm_part[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x14, "part00_pbfb_00", gf100_pbfb_sources },
- { 0x15, "part00_pbfb_01", gf100_pbfb_sources },
- { 0x20, "part00_pbfb_02", gf100_pbfb_sources },
- { 0x21, "part00_pbfb_03", gf100_pbfb_sources },
- { 0x01, "part00_pmfb_00", gf100_pmfb_sources },
- { 0x04, "part00_pmfb_01", gf100_pmfb_sources },
- { 0x05, "part00_pmfb_02", gf100_pmfb_sources},
- { 0x07, "part00_pmfb_03", gf100_pmfb_sources },
- { 0x0d, "part00_pmfb_04", gf100_pmfb_sources },
- { 0x12, "part00_pmfb_05", gf100_pmfb_sources },
- { 0x13, "part00_pmfb_06", gf100_pmfb_sources },
- { 0x2c, "part00_pmfb_07", gf100_pmfb_sources },
- { 0x2d, "part00_pmfb_08", gf100_pmfb_sources },
- { 0x2e, "part00_pmfb_09", gf100_pmfb_sources },
- { 0x2f, "part00_pmfb_0a", gf100_pmfb_sources },
- { 0x30, "part00_pmfb_0b", gf100_pmfb_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct gf100_pm_func
-gf108_pm = {
- .doms_gpc = gf100_pm_gpc,
- .doms_hub = gf108_pm_hub,
- .doms_part = gf108_pm_part,
-};
-
-int
-gf108_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return gf100_pm_new_(&gf108_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
deleted file mode 100644
index c61e8c010bb3..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright 2015 Samuel Pitoiset
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Samuel Pitoiset
- */
-#include "gf100.h"
-
-static const struct nvkm_specsrc
-gf117_pmfb_sources[] = {
- { 0x140028, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- { 0x7, 16, "unk16" },
- { 0x3, 24, "unk24" },
- { 0x2, 28, "unk28" },
- {}
- }, "pmfb0_pm_unk28" },
- { 0x14125c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp0_pm_unk25c" },
- {}
-};
-
-static const struct nvkm_specdom
-gf117_pm_hub[] = {
- {}
-};
-
-static const struct nvkm_specdom
-gf117_pm_part[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x00, "part00_pbfb_00", gf100_pbfb_sources },
- { 0x01, "part00_pbfb_01", gf100_pbfb_sources },
- { 0x12, "part00_pmfb_00", gf117_pmfb_sources },
- { 0x15, "part00_pmfb_01", gf117_pmfb_sources },
- { 0x16, "part00_pmfb_02", gf117_pmfb_sources },
- { 0x18, "part00_pmfb_03", gf117_pmfb_sources },
- { 0x1e, "part00_pmfb_04", gf117_pmfb_sources },
- { 0x23, "part00_pmfb_05", gf117_pmfb_sources },
- { 0x24, "part00_pmfb_06", gf117_pmfb_sources },
- { 0x0c, "part00_pmfb_07", gf117_pmfb_sources },
- { 0x0d, "part00_pmfb_08", gf117_pmfb_sources },
- { 0x0e, "part00_pmfb_09", gf117_pmfb_sources },
- { 0x0f, "part00_pmfb_0a", gf117_pmfb_sources },
- { 0x10, "part00_pmfb_0b", gf117_pmfb_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct gf100_pm_func
-gf117_pm = {
- .doms_gpc = gf100_pm_gpc,
- .doms_hub = gf117_pm_hub,
- .doms_part = gf117_pm_part,
-};
-
-int
-gf117_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return gf100_pm_new_(&gf117_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
deleted file mode 100644
index 75bf3df1cb18..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "gf100.h"
-
-static const struct nvkm_specsrc
-gk104_pmfb_sources[] = {
- { 0x140028, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- { 0x7, 16, "unk16" },
- { 0x3, 24, "unk24" },
- { 0x2, 28, "unk28" },
- {}
- }, "pmfb0_pm_unk28" },
- { 0x14125c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp0_pm_unk25c" },
- { 0x14165c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp1_pm_unk25c" },
- { 0x141a5c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp2_pm_unk25c" },
- { 0x141e5c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp3_pm_unk25c" },
- {}
-};
-
-static const struct nvkm_specsrc
-gk104_tex_sources[] = {
- { 0x5042c0, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0x7, 8, "sel1", true },
- {}
- }, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
- { 0x5042c8, (const struct nvkm_specmux[]) {
- { 0x1f, 0, "sel", true },
- {}
- }, "pgraph_gpc0_tpc0_tex_pm_unkc8" },
- { 0x5042b8, (const struct nvkm_specmux[]) {
- { 0xff, 0, "sel", true },
- {}
- }, "pgraph_gpc0_tpc0_tex_pm_unkb8" },
- {}
-};
-
-static const struct nvkm_specdom
-gk104_pm_hub[] = {
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub00_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x40, (const struct nvkm_specsig[]) {
- { 0x27, "hub01_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub02_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub03_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x40, (const struct nvkm_specsig[]) {
- { 0x03, "host_mmio_rd" },
- { 0x27, "hub04_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub05_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0xc0, (const struct nvkm_specsig[]) {
- { 0x74, "host_fb_rd3x" },
- { 0x75, "host_fb_rd3x_2" },
- { 0xa7, "hub06_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub07_user_0" },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct nvkm_specdom
-gk104_pm_gpc[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0xc7, "gpc00_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &gf100_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- { 0x00, "gpc02_tex_00", gk104_tex_sources },
- { 0x01, "gpc02_tex_01", gk104_tex_sources },
- { 0x02, "gpc02_tex_02", gk104_tex_sources },
- { 0x03, "gpc02_tex_03", gk104_tex_sources },
- { 0x04, "gpc02_tex_04", gk104_tex_sources },
- { 0x05, "gpc02_tex_05", gk104_tex_sources },
- { 0x06, "gpc02_tex_06", gk104_tex_sources },
- { 0x07, "gpc02_tex_07", gk104_tex_sources },
- { 0x08, "gpc02_tex_08", gk104_tex_sources },
- { 0x0a, "gpc02_tex_0a", gk104_tex_sources },
- { 0x0b, "gpc02_tex_0b", gk104_tex_sources },
- { 0x0d, "gpc02_tex_0c", gk104_tex_sources },
- { 0x0c, "gpc02_tex_0d", gk104_tex_sources },
- { 0x0e, "gpc02_tex_0e", gk104_tex_sources },
- { 0x0f, "gpc02_tex_0f", gk104_tex_sources },
- { 0x10, "gpc02_tex_10", gk104_tex_sources },
- { 0x11, "gpc02_tex_11", gk104_tex_sources },
- { 0x12, "gpc02_tex_12", gk104_tex_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct nvkm_specdom
-gk104_pm_part[] = {
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x00, "part00_pbfb_00", gf100_pbfb_sources },
- { 0x01, "part00_pbfb_01", gf100_pbfb_sources },
- { 0x0c, "part00_pmfb_00", gk104_pmfb_sources },
- { 0x0d, "part00_pmfb_01", gk104_pmfb_sources },
- { 0x0e, "part00_pmfb_02", gk104_pmfb_sources },
- { 0x0f, "part00_pmfb_03", gk104_pmfb_sources },
- { 0x10, "part00_pmfb_04", gk104_pmfb_sources },
- { 0x12, "part00_pmfb_05", gk104_pmfb_sources },
- { 0x15, "part00_pmfb_06", gk104_pmfb_sources },
- { 0x16, "part00_pmfb_07", gk104_pmfb_sources },
- { 0x18, "part00_pmfb_08", gk104_pmfb_sources },
- { 0x21, "part00_pmfb_09", gk104_pmfb_sources },
- { 0x25, "part00_pmfb_0a", gk104_pmfb_sources },
- { 0x26, "part00_pmfb_0b", gk104_pmfb_sources },
- { 0x27, "part00_pmfb_0c", gk104_pmfb_sources },
- { 0x47, "part00_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "part01_user_0" },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct gf100_pm_func
-gk104_pm = {
- .doms_gpc = gk104_pm_gpc,
- .doms_hub = gk104_pm_hub,
- .doms_part = gk104_pm_part,
-};
-
-int
-gk104_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return gf100_pm_new_(&gk104_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
deleted file mode 100644
index 25874c541486..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright 2015 Nouveau project
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Samuel Pitoiset
- */
-#include "nv40.h"
-
-const struct nvkm_specsrc
-gt200_crop_sources[] = {
- { 0x407008, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0x1f, 16, "sel1", true },
- {}
- }, "pgraph_rop0_crop_pm_mux" },
- {}
-};
-
-const struct nvkm_specsrc
-gt200_prop_sources[] = {
- { 0x408750, (const struct nvkm_specmux[]) {
- { 0x3f, 0, "sel", true },
- {}
- }, "pgraph_tpc0_prop_pm_mux" },
- {}
-};
-
-const struct nvkm_specsrc
-gt200_tex_sources[] = {
- { 0x408508, (const struct nvkm_specmux[]) {
- { 0xfffff, 0, "unk0" },
- {}
- }, "pgraph_tpc0_tex_unk08" },
- {}
-};
-
-static const struct nvkm_specdom
-gt200_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0xc9, "pc01_gr_idle" },
- { 0x84, "pc01_strmout_00" },
- { 0x85, "pc01_strmout_01" },
- { 0xde, "pc01_trast_00" },
- { 0xdf, "pc01_trast_01" },
- { 0xe0, "pc01_trast_02" },
- { 0xe1, "pc01_trast_03" },
- { 0xe4, "pc01_trast_04" },
- { 0xe5, "pc01_trast_05" },
- { 0x82, "pc01_vattr_00" },
- { 0x83, "pc01_vattr_01" },
- { 0x46, "pc01_vfetch_00", g84_vfetch_sources },
- { 0x47, "pc01_vfetch_01", g84_vfetch_sources },
- { 0x48, "pc01_vfetch_02", g84_vfetch_sources },
- { 0x49, "pc01_vfetch_03", g84_vfetch_sources },
- { 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
- { 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
- { 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
- { 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
- { 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
- { 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
- { 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
- { 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
- { 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
- { 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
- { 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
- { 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
- { 0x56, "pc01_vfetch_10", g84_vfetch_sources },
- { 0x57, "pc01_vfetch_11", g84_vfetch_sources },
- { 0x58, "pc01_vfetch_12", g84_vfetch_sources },
- { 0x59, "pc01_vfetch_13", g84_vfetch_sources },
- { 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
- { 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
- { 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
- { 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
- { 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
- { 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
- { 0x07, "pc01_zcull_00", nv50_zcull_sources },
- { 0x08, "pc01_zcull_01", nv50_zcull_sources },
- { 0x09, "pc01_zcull_02", nv50_zcull_sources },
- { 0x0a, "pc01_zcull_03", nv50_zcull_sources },
- { 0x0b, "pc01_zcull_04", nv50_zcull_sources },
- { 0x0c, "pc01_zcull_05", nv50_zcull_sources },
-
- { 0xb0, "pc01_unk00" },
- { 0xec, "pc01_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0x55, "pc02_crop_00", gt200_crop_sources },
- { 0x56, "pc02_crop_01", gt200_crop_sources },
- { 0x57, "pc02_crop_02", gt200_crop_sources },
- { 0x58, "pc02_crop_03", gt200_crop_sources },
- { 0x00, "pc02_prop_00", gt200_prop_sources },
- { 0x01, "pc02_prop_01", gt200_prop_sources },
- { 0x02, "pc02_prop_02", gt200_prop_sources },
- { 0x03, "pc02_prop_03", gt200_prop_sources },
- { 0x04, "pc02_prop_04", gt200_prop_sources },
- { 0x05, "pc02_prop_05", gt200_prop_sources },
- { 0x06, "pc02_prop_06", gt200_prop_sources },
- { 0x07, "pc02_prop_07", gt200_prop_sources },
- { 0x78, "pc02_tex_00", gt200_tex_sources },
- { 0x79, "pc02_tex_01", gt200_tex_sources },
- { 0x7a, "pc02_tex_02", gt200_tex_sources },
- { 0x7b, "pc02_tex_03", gt200_tex_sources },
- { 0x32, "pc02_tex_04", gt200_tex_sources },
- { 0x33, "pc02_tex_05", gt200_tex_sources },
- { 0x34, "pc02_tex_06", gt200_tex_sources },
- { 0x74, "pc02_zrop_00", nv50_zrop_sources },
- { 0x75, "pc02_zrop_01", nv50_zrop_sources },
- { 0x76, "pc02_zrop_02", nv50_zrop_sources },
- { 0x77, "pc02_zrop_03", nv50_zrop_sources },
- { 0xec, "pc02_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-gt200_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(gt200_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
deleted file mode 100644
index 54c23e2b6645..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv40.h"
-
-static const struct nvkm_specsrc
-gt215_zcull_sources[] = {
- { 0x402ca4, (const struct nvkm_specmux[]) {
- { 0x7fff, 0, "unk0" },
- { 0xff, 24, "unk24" },
- {}
- }, "pgraph_zcull_pm_unka4" },
- {}
-};
-
-static const struct nvkm_specdom
-gt215_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0xcb, "pc01_gr_idle" },
- { 0x86, "pc01_strmout_00" },
- { 0x87, "pc01_strmout_01" },
- { 0xe0, "pc01_trast_00" },
- { 0xe1, "pc01_trast_01" },
- { 0xe2, "pc01_trast_02" },
- { 0xe3, "pc01_trast_03" },
- { 0xe6, "pc01_trast_04" },
- { 0xe7, "pc01_trast_05" },
- { 0x84, "pc01_vattr_00" },
- { 0x85, "pc01_vattr_01" },
- { 0x46, "pc01_vfetch_00", g84_vfetch_sources },
- { 0x47, "pc01_vfetch_01", g84_vfetch_sources },
- { 0x48, "pc01_vfetch_02", g84_vfetch_sources },
- { 0x49, "pc01_vfetch_03", g84_vfetch_sources },
- { 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
- { 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
- { 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
- { 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
- { 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
- { 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
- { 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
- { 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
- { 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
- { 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
- { 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
- { 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
- { 0x56, "pc01_vfetch_10", g84_vfetch_sources },
- { 0x57, "pc01_vfetch_11", g84_vfetch_sources },
- { 0x58, "pc01_vfetch_12", g84_vfetch_sources },
- { 0x59, "pc01_vfetch_13", g84_vfetch_sources },
- { 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
- { 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
- { 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
- { 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
- { 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
- { 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
- { 0x07, "pc01_zcull_00", gt215_zcull_sources },
- { 0x08, "pc01_zcull_01", gt215_zcull_sources },
- { 0x09, "pc01_zcull_02", gt215_zcull_sources },
- { 0x0a, "pc01_zcull_03", gt215_zcull_sources },
- { 0x0b, "pc01_zcull_04", gt215_zcull_sources },
- { 0x0c, "pc01_zcull_05", gt215_zcull_sources },
- { 0xb2, "pc01_unk00" },
- { 0xec, "pc01_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x64, "pc02_crop_00", gt200_crop_sources },
- { 0x65, "pc02_crop_01", gt200_crop_sources },
- { 0x66, "pc02_crop_02", gt200_crop_sources },
- { 0x67, "pc02_crop_03", gt200_crop_sources },
- { 0x00, "pc02_prop_00", gt200_prop_sources },
- { 0x01, "pc02_prop_01", gt200_prop_sources },
- { 0x02, "pc02_prop_02", gt200_prop_sources },
- { 0x03, "pc02_prop_03", gt200_prop_sources },
- { 0x04, "pc02_prop_04", gt200_prop_sources },
- { 0x05, "pc02_prop_05", gt200_prop_sources },
- { 0x06, "pc02_prop_06", gt200_prop_sources },
- { 0x07, "pc02_prop_07", gt200_prop_sources },
- { 0x80, "pc02_tex_00", gt200_tex_sources },
- { 0x81, "pc02_tex_01", gt200_tex_sources },
- { 0x82, "pc02_tex_02", gt200_tex_sources },
- { 0x83, "pc02_tex_03", gt200_tex_sources },
- { 0x3a, "pc02_tex_04", gt200_tex_sources },
- { 0x3b, "pc02_tex_05", gt200_tex_sources },
- { 0x3c, "pc02_tex_06", gt200_tex_sources },
- { 0x7c, "pc02_zrop_00", nv50_zrop_sources },
- { 0x7d, "pc02_zrop_01", nv50_zrop_sources },
- { 0x7e, "pc02_zrop_02", nv50_zrop_sources },
- { 0x7f, "pc02_zrop_03", nv50_zrop_sources },
- { 0xcc, "pc02_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-gt215_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(gt215_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
deleted file mode 100644
index eba5b3b79340..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv40.h"
-
-static void
-nv40_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- u32 log = ctr->logic_op;
- u32 src = 0x00000000;
- int i;
-
- for (i = 0; i < 4; i++)
- src |= ctr->signal[i] << (i * 8);
-
- nvkm_wr32(device, 0x00a7c0 + dom->addr, 0x00000001 | (dom->mode << 4));
- nvkm_wr32(device, 0x00a400 + dom->addr + (ctr->slot * 0x40), src);
- nvkm_wr32(device, 0x00a420 + dom->addr + (ctr->slot * 0x40), log);
-}
-
-static void
-nv40_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
-
- switch (ctr->slot) {
- case 0: ctr->ctr = nvkm_rd32(device, 0x00a700 + dom->addr); break;
- case 1: ctr->ctr = nvkm_rd32(device, 0x00a6c0 + dom->addr); break;
- case 2: ctr->ctr = nvkm_rd32(device, 0x00a680 + dom->addr); break;
- case 3: ctr->ctr = nvkm_rd32(device, 0x00a740 + dom->addr); break;
- }
- dom->clk = nvkm_rd32(device, 0x00a600 + dom->addr);
-}
-
-static void
-nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base);
-
- if (nv40pm->sequence != pm->sequence) {
- nvkm_wr32(device, 0x400084, 0x00000020);
- nv40pm->sequence = pm->sequence;
- }
-}
-
-const struct nvkm_funcdom
-nv40_perfctr_func = {
- .init = nv40_perfctr_init,
- .read = nv40_perfctr_read,
- .next = nv40_perfctr_next,
-};
-
-static const struct nvkm_pm_func
-nv40_pm_ = {
-};
-
-int
-nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- struct nv40_pm *pm;
- int ret;
-
- if (!(pm = kzalloc(sizeof(*pm), GFP_KERNEL)))
- return -ENOMEM;
- *ppm = &pm->base;
-
- ret = nvkm_pm_ctor(&nv40_pm_, device, type, inst, &pm->base);
- if (ret)
- return ret;
-
- return nvkm_perfdom_new(&pm->base, "pc", 0, 0, 0, 4, doms);
-}
-
-static const struct nvkm_specdom
-nv40_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-nv40_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(nv40_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
deleted file mode 100644
index afb79843723d..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_PM_NV40_H__
-#define __NVKM_PM_NV40_H__
-#define nv40_pm(p) container_of((p), struct nv40_pm, base)
-#include "priv.h"
-
-struct nv40_pm {
- struct nvkm_pm base;
- u32 sequence;
-};
-
-int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_pm **);
-extern const struct nvkm_funcdom nv40_perfctr_func;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
deleted file mode 100644
index bbd3404901f9..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv40.h"
-
-const struct nvkm_specsrc
-nv50_zcull_sources[] = {
- { 0x402ca4, (const struct nvkm_specmux[]) {
- { 0x7fff, 0, "unk0" },
- {}
- }, "pgraph_zcull_pm_unka4" },
- {}
-};
-
-const struct nvkm_specsrc
-nv50_zrop_sources[] = {
- { 0x40708c, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0xf, 16, "sel1", true },
- {}
- }, "pgraph_rop0_zrop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-nv50_prop_sources[] = {
- { 0x40be50, (const struct nvkm_specmux[]) {
- { 0x1f, 0, "sel", true },
- {}
- }, "pgraph_tpc3_prop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-nv50_crop_sources[] = {
- { 0x407008, (const struct nvkm_specmux[]) {
- { 0x7, 0, "sel0", true },
- { 0x7, 16, "sel1", true },
- {}
- }, "pgraph_rop0_crop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-nv50_tex_sources[] = {
- { 0x40b808, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pgraph_tpc3_tex_unk08" },
- {}
-};
-
-static const struct nvkm_specsrc
-nv50_vfetch_sources[] = {
- { 0x400c0c, (const struct nvkm_specmux[]) {
- { 0x1, 0, "unk0" },
- {}
- }, "pgraph_vfetch_unk0c" },
- {}
-};
-
-static const struct nvkm_specdom
-nv50_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0xc8, "pc01_gr_idle" },
- { 0x7f, "pc01_strmout_00" },
- { 0x80, "pc01_strmout_01" },
- { 0xdc, "pc01_trast_00" },
- { 0xdd, "pc01_trast_01" },
- { 0xde, "pc01_trast_02" },
- { 0xdf, "pc01_trast_03" },
- { 0xe2, "pc01_trast_04" },
- { 0xe3, "pc01_trast_05" },
- { 0x7c, "pc01_vattr_00" },
- { 0x7d, "pc01_vattr_01" },
- { 0x26, "pc01_vfetch_00", nv50_vfetch_sources },
- { 0x27, "pc01_vfetch_01", nv50_vfetch_sources },
- { 0x28, "pc01_vfetch_02", nv50_vfetch_sources },
- { 0x29, "pc01_vfetch_03", nv50_vfetch_sources },
- { 0x2a, "pc01_vfetch_04", nv50_vfetch_sources },
- { 0x2b, "pc01_vfetch_05", nv50_vfetch_sources },
- { 0x2c, "pc01_vfetch_06", nv50_vfetch_sources },
- { 0x2d, "pc01_vfetch_07", nv50_vfetch_sources },
- { 0x2e, "pc01_vfetch_08", nv50_vfetch_sources },
- { 0x2f, "pc01_vfetch_09", nv50_vfetch_sources },
- { 0x30, "pc01_vfetch_0a", nv50_vfetch_sources },
- { 0x31, "pc01_vfetch_0b", nv50_vfetch_sources },
- { 0x32, "pc01_vfetch_0c", nv50_vfetch_sources },
- { 0x33, "pc01_vfetch_0d", nv50_vfetch_sources },
- { 0x34, "pc01_vfetch_0e", nv50_vfetch_sources },
- { 0x35, "pc01_vfetch_0f", nv50_vfetch_sources },
- { 0x36, "pc01_vfetch_10", nv50_vfetch_sources },
- { 0x37, "pc01_vfetch_11", nv50_vfetch_sources },
- { 0x38, "pc01_vfetch_12", nv50_vfetch_sources },
- { 0x39, "pc01_vfetch_13", nv50_vfetch_sources },
- { 0x3a, "pc01_vfetch_14", nv50_vfetch_sources },
- { 0x3b, "pc01_vfetch_15", nv50_vfetch_sources },
- { 0x3c, "pc01_vfetch_16", nv50_vfetch_sources },
- { 0x3d, "pc01_vfetch_17", nv50_vfetch_sources },
- { 0x3e, "pc01_vfetch_18", nv50_vfetch_sources },
- { 0x3f, "pc01_vfetch_19", nv50_vfetch_sources },
- { 0x20, "pc01_zcull_00", nv50_zcull_sources },
- { 0x21, "pc01_zcull_01", nv50_zcull_sources },
- { 0x22, "pc01_zcull_02", nv50_zcull_sources },
- { 0x23, "pc01_zcull_03", nv50_zcull_sources },
- { 0x24, "pc01_zcull_04", nv50_zcull_sources },
- { 0x25, "pc01_zcull_05", nv50_zcull_sources },
- { 0xae, "pc01_unk00" },
- { 0xee, "pc01_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0x52, "pc02_crop_00", nv50_crop_sources },
- { 0x53, "pc02_crop_01", nv50_crop_sources },
- { 0x54, "pc02_crop_02", nv50_crop_sources },
- { 0x55, "pc02_crop_03", nv50_crop_sources },
- { 0x00, "pc02_prop_00", nv50_prop_sources },
- { 0x01, "pc02_prop_01", nv50_prop_sources },
- { 0x02, "pc02_prop_02", nv50_prop_sources },
- { 0x03, "pc02_prop_03", nv50_prop_sources },
- { 0x04, "pc02_prop_04", nv50_prop_sources },
- { 0x05, "pc02_prop_05", nv50_prop_sources },
- { 0x06, "pc02_prop_06", nv50_prop_sources },
- { 0x07, "pc02_prop_07", nv50_prop_sources },
- { 0x70, "pc02_tex_00", nv50_tex_sources },
- { 0x71, "pc02_tex_01", nv50_tex_sources },
- { 0x72, "pc02_tex_02", nv50_tex_sources },
- { 0x73, "pc02_tex_03", nv50_tex_sources },
- { 0x40, "pc02_tex_04", nv50_tex_sources },
- { 0x41, "pc02_tex_05", nv50_tex_sources },
- { 0x42, "pc02_tex_06", nv50_tex_sources },
- { 0x6c, "pc02_zrop_00", nv50_zrop_sources },
- { 0x6d, "pc02_zrop_01", nv50_zrop_sources },
- { 0x6e, "pc02_zrop_02", nv50_zrop_sources },
- { 0x6f, "pc02_zrop_03", nv50_zrop_sources },
- { 0xee, "pc02_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-nv50_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(nv50_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
deleted file mode 100644
index c011227f7052..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_PM_PRIV_H__
-#define __NVKM_PM_PRIV_H__
-#define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
-#include <engine/pm.h>
-
-int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_pm *);
-
-struct nvkm_pm_func {
- void (*fini)(struct nvkm_pm *);
-};
-
-struct nvkm_perfctr {
- struct list_head head;
- u8 domain;
- u8 signal[4];
- u64 source[4][8];
- int slot;
- u32 logic_op;
- u32 ctr;
-};
-
-struct nvkm_specmux {
- u32 mask;
- u8 shift;
- const char *name;
- bool enable;
-};
-
-struct nvkm_specsrc {
- u32 addr;
- const struct nvkm_specmux *mux;
- const char *name;
-};
-
-struct nvkm_perfsrc {
- struct list_head head;
- char *name;
- u32 addr;
- u32 mask;
- u8 shift;
- bool enable;
-};
-
-extern const struct nvkm_specsrc nv50_zcull_sources[];
-extern const struct nvkm_specsrc nv50_zrop_sources[];
-extern const struct nvkm_specsrc g84_vfetch_sources[];
-extern const struct nvkm_specsrc gt200_crop_sources[];
-extern const struct nvkm_specsrc gt200_prop_sources[];
-extern const struct nvkm_specsrc gt200_tex_sources[];
-
-struct nvkm_specsig {
- u8 signal;
- const char *name;
- const struct nvkm_specsrc *source;
-};
-
-struct nvkm_perfsig {
- const char *name;
- u8 source[8];
-};
-
-struct nvkm_specdom {
- u16 signal_nr;
- const struct nvkm_specsig *signal;
- const struct nvkm_funcdom *func;
-};
-
-#define nvkm_perfdom(p) container_of((p), struct nvkm_perfdom, object)
-#include <core/object.h>
-
-struct nvkm_perfdom {
- struct nvkm_object object;
- struct nvkm_perfmon *perfmon;
- struct list_head head;
- struct list_head list;
- const struct nvkm_funcdom *func;
- struct nvkm_perfctr *ctr[4];
- char name[32];
- u32 addr;
- u8 mode;
- u32 clk;
- u16 signal_nr;
- struct nvkm_perfsig signal[] __counted_by(signal_nr);
-};
-
-struct nvkm_funcdom {
- void (*init)(struct nvkm_pm *, struct nvkm_perfdom *,
- struct nvkm_perfctr *);
- void (*read)(struct nvkm_pm *, struct nvkm_perfdom *,
- struct nvkm_perfctr *);
- void (*next)(struct nvkm_pm *, struct nvkm_perfdom *);
-};
-
-int nvkm_perfdom_new(struct nvkm_pm *, const char *, u32, u32, u32, u32,
- const struct nvkm_specdom *);
-
-#define nvkm_perfmon(p) container_of((p), struct nvkm_perfmon, object)
-
-struct nvkm_perfmon {
- struct nvkm_object object;
- struct nvkm_pm *pm;
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index 50f0c1914f58..4c3f74396579 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -46,6 +46,8 @@ u32 gm107_ram_probe_fbp(const struct nvkm_ram_func *,
u32 gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
struct nvkm_device *, int, int *);
+int gp100_ram_init(struct nvkm_ram *);
+
/* RAM type-specific MR calculation routines */
int nvkm_sddr2_calc(struct nvkm_ram *);
int nvkm_sddr3_calc(struct nvkm_ram *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
index 378f6fb70990..8987a21e81d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -27,7 +27,7 @@
#include <subdev/bios/init.h>
#include <subdev/bios/rammap.h>
-static int
+int
gp100_ram_init(struct nvkm_ram *ram)
{
struct nvkm_subdev *subdev = &ram->fb->subdev;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c
index 8550f5e47347..b6b6ee59019d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c
@@ -5,6 +5,7 @@
static const struct nvkm_ram_func
gp102_ram = {
+ .init = gp100_ram_init,
};
int
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index 050ca7eafac5..5f8002f6bb7a 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -242,8 +242,7 @@ static void omapdss_walk_device(struct device *dev, struct device_node *node,
of_node_put(n);
- n = NULL;
- while ((n = of_graph_get_next_endpoint(node, n)) != NULL) {
+ for_each_endpoint_of_node(node, n) {
struct device_node *pn = of_graph_get_remote_port_parent(n);
if (!pn)
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 6598c9c08ba1..d3eac4817d76 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -695,6 +695,10 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
soc = soc_device_match(omapdrm_soc_devices);
priv->omaprev = soc ? (uintptr_t)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto err_alloc_workqueue;
+ }
mutex_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
@@ -753,6 +757,7 @@ err_gem_deinit:
drm_mode_config_cleanup(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
+err_alloc_workqueue:
omap_disconnect_pipelines(ddev);
drm_dev_put(ddev);
return ret;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 9f49b0189d3b..d3a9a9fafe4e 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -87,6 +87,15 @@ config DRM_PANEL_BOE_TV101WUM_NL6
Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
45NA WUXGA PANEL DSI Video Mode panel
+config DRM_PANEL_BOE_TV101WUM_LL2
+ tristate "BOE TV101WUM LL2 1200x1920 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to support for BOE TV101WUM-LL2
+ WUXGA PANEL DSI Video Mode panel
+
config DRM_PANEL_EBBG_FT8719
tristate "EBBG FT8719 panel driver"
depends on OF
@@ -784,7 +793,8 @@ config DRM_PANEL_SHARP_LS060T1SX01
config DRM_PANEL_SITRONIX_ST7701
tristate "Sitronix ST7701 panel driver"
depends on OF
- depends on DRM_MIPI_DSI
+ depends on SPI || DRM_MIPI_DSI
+ select DRM_MIPI_DBI if SPI
depends on BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to enable support for the Sitronix
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 5581387707c6..987a08702410 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_PANEL_AUO_A030JTN01) += panel-auo-a030jtn01.o
obj-$(CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0) += panel-boe-bf060y8m-aj0.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
obj-$(CONFIG_DRM_PANEL_BOE_TH101MB31UIG002_28A) += panel-boe-th101mb31ig002-28a.o
+obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_LL2) += panel-boe-tv101wum-ll2.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
index e77db8597eb7..7e66db4a88bb 100644
--- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -377,6 +377,8 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&boe->panel, dev, &boe_bf060y8m_aj0_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
+ boe->panel.prepare_prev_first = true;
+
boe->panel.backlight = boe_bf060y8m_aj0_create_backlight(dsi);
if (IS_ERR(boe->panel.backlight))
return dev_err_probe(dev, PTR_ERR(boe->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
index 763e9f8342d3..0b87f1e6ecae 100644
--- a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
+++ b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
@@ -16,12 +16,31 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct boe_th101mb31ig002;
+
+struct panel_desc {
+ const struct drm_display_mode *modes;
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ int (*init)(struct boe_th101mb31ig002 *ctx);
+ unsigned int lanes;
+ bool lp11_before_reset;
+ unsigned int vcioo_to_lp11_delay_ms;
+ unsigned int lp11_to_reset_delay_ms;
+ unsigned int backlight_off_to_display_off_delay_ms;
+ unsigned int enter_sleep_to_reset_down_delay_ms;
+ unsigned int power_off_delay_ms;
+};
struct boe_th101mb31ig002 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
+ const struct panel_desc *desc;
+
struct regulator *power;
struct gpio_desc *enable;
struct gpio_desc *reset;
@@ -39,74 +58,123 @@ static void boe_th101mb31ig002_reset(struct boe_th101mb31ig002 *ctx)
usleep_range(5000, 6000);
}
-static int boe_th101mb31ig002_enable(struct drm_panel *panel)
+static int boe_th101mb31ig002_enable(struct boe_th101mb31ig002 *ctx)
{
- struct boe_th101mb31ig002 *ctx = container_of(panel,
- struct boe_th101mb31ig002,
- panel);
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- mipi_dsi_dcs_write_seq(dsi, 0xE0, 0xAB, 0xBA);
- mipi_dsi_dcs_write_seq(dsi, 0xE1, 0xBA, 0xAB);
- mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x10, 0x01, 0x47, 0xFF);
- mipi_dsi_dcs_write_seq(dsi, 0xB2, 0x0C, 0x14, 0x04, 0x50, 0x50, 0x14);
- mipi_dsi_dcs_write_seq(dsi, 0xB3, 0x56, 0x53, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x33, 0x30, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xB6, 0xB0, 0x00, 0x00, 0x10, 0x00, 0x10,
- 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x05, 0x12, 0x29, 0x49, 0x48, 0x00,
- 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xB9, 0x7C, 0x65, 0x55, 0x49, 0x46, 0x36,
- 0x3B, 0x24, 0x3D, 0x3C, 0x3D, 0x5C, 0x4C,
- 0x55, 0x47, 0x46, 0x39, 0x26, 0x06, 0x7C,
- 0x65, 0x55, 0x49, 0x46, 0x36, 0x3B, 0x24,
- 0x3D, 0x3C, 0x3D, 0x5C, 0x4C, 0x55, 0x47,
- 0x46, 0x39, 0x26, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0xFF, 0x87, 0x12, 0x34, 0x44, 0x44,
- 0x44, 0x44, 0x98, 0x04, 0x98, 0x04, 0x0F,
- 0x00, 0x00, 0xC1);
- mipi_dsi_dcs_write_seq(dsi, 0xC1, 0x54, 0x94, 0x02, 0x85, 0x9F, 0x00,
- 0x7F, 0x00, 0x54, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xC2, 0x17, 0x09, 0x08, 0x89, 0x08, 0x11,
- 0x22, 0x20, 0x44, 0xFF, 0x18, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xC3, 0x86, 0x46, 0x05, 0x05, 0x1C, 0x1C,
- 0x1D, 0x1D, 0x02, 0x1F, 0x1F, 0x1E, 0x1E,
- 0x0F, 0x0F, 0x0D, 0x0D, 0x13, 0x13, 0x11,
- 0x11, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xC4, 0x07, 0x07, 0x04, 0x04, 0x1C, 0x1C,
- 0x1D, 0x1D, 0x02, 0x1F, 0x1F, 0x1E, 0x1E,
- 0x0E, 0x0E, 0x0C, 0x0C, 0x12, 0x12, 0x10,
- 0x10, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xC6, 0x2A, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0xC8, 0x21, 0x00, 0x31, 0x42, 0x34, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0xCA, 0xCB, 0x43);
- mipi_dsi_dcs_write_seq(dsi, 0xCD, 0x0E, 0x4B, 0x4B, 0x20, 0x19, 0x6B,
- 0x06, 0xB3);
- mipi_dsi_dcs_write_seq(dsi, 0xD2, 0xE3, 0x2B, 0x38, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xD4, 0x00, 0x01, 0x00, 0x0E, 0x04, 0x44,
- 0x08, 0x10, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xE6, 0x80, 0x01, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF);
- mipi_dsi_dcs_write_seq(dsi, 0xF0, 0x12, 0x03, 0x20, 0x00, 0xFF);
- mipi_dsi_dcs_write_seq(dsi, 0xF3, 0x00);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
-
- msleep(120);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set panel on: %d\n", ret);
- return ret;
- }
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0xab, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0xba, 0xab);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x10, 0x01, 0x47, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x0c, 0x14, 0x04, 0x50, 0x50, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x56, 0x53, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x33, 0x30, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0xb0, 0x00, 0x00, 0x10, 0x00, 0x10,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x05, 0x12, 0x29, 0x49, 0x48, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x7c, 0x65, 0x55, 0x49, 0x46, 0x36,
+ 0x3b, 0x24, 0x3d, 0x3c, 0x3d, 0x5c, 0x4c,
+ 0x55, 0x47, 0x46, 0x39, 0x26, 0x06, 0x7c,
+ 0x65, 0x55, 0x49, 0x46, 0x36, 0x3b, 0x24,
+ 0x3d, 0x3c, 0x3d, 0x5c, 0x4c, 0x55, 0x47,
+ 0x46, 0x39, 0x26, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0xff, 0x87, 0x12, 0x34, 0x44, 0x44,
+ 0x44, 0x44, 0x98, 0x04, 0x98, 0x04, 0x0f,
+ 0x00, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0x54, 0x94, 0x02, 0x85, 0x9f, 0x00,
+ 0x7f, 0x00, 0x54, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x17, 0x09, 0x08, 0x89, 0x08, 0x11,
+ 0x22, 0x20, 0x44, 0xff, 0x18, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc3, 0x86, 0x46, 0x05, 0x05, 0x1c, 0x1c,
+ 0x1d, 0x1d, 0x02, 0x1f, 0x1f, 0x1e, 0x1e,
+ 0x0f, 0x0f, 0x0d, 0x0d, 0x13, 0x13, 0x11,
+ 0x11, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc4, 0x07, 0x07, 0x04, 0x04, 0x1c, 0x1c,
+ 0x1d, 0x1d, 0x02, 0x1f, 0x1f, 0x1e, 0x1e,
+ 0x0e, 0x0e, 0x0c, 0x0c, 0x12, 0x12, 0x10,
+ 0x10, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc6, 0x2a, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc8, 0x21, 0x00, 0x31, 0x42, 0x34, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0xcb, 0x43);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcd, 0x0e, 0x4b, 0x4b, 0x20, 0x19, 0x6b,
+ 0x06, 0xb3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd2, 0xe3, 0x2b, 0x38, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd4, 0x00, 0x01, 0x00, 0x0e, 0x04, 0x44,
+ 0x08, 0x10, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x80, 0x01, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x12, 0x03, 0x20, 0x00, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x00);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
- return 0;
+static int starry_er88577_init_cmd(struct boe_th101mb31ig002 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ msleep(70);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0xab, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0xba, 0xab);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x10, 0x01, 0x47, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x0c, 0x14, 0x04, 0x50, 0x50, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x56, 0x53, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x33, 0x30, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0xb0, 0x00, 0x00, 0x10, 0x00, 0x10,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x05, 0x12, 0x29, 0x49, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x7c, 0x61, 0x4f, 0x42, 0x3e, 0x2d,
+ 0x31, 0x1a, 0x33, 0x33, 0x33, 0x52, 0x40,
+ 0x47, 0x38, 0x34, 0x26, 0x0e, 0x06, 0x7c,
+ 0x61, 0x4f, 0x42, 0x3e, 0x2d, 0x31, 0x1a,
+ 0x33, 0x33, 0x33, 0x52, 0x40, 0x47, 0x38,
+ 0x34, 0x26, 0x0e, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0, 0xcc, 0x76, 0x12, 0x34, 0x44, 0x44,
+ 0x44, 0x44, 0x98, 0x04, 0x98, 0x04, 0x0f,
+ 0x00, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0x54, 0x94, 0x02, 0x85, 0x9f, 0x00,
+ 0x6f, 0x00, 0x54, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x17, 0x09, 0x08, 0x89, 0x08, 0x11,
+ 0x22, 0x20, 0x44, 0xff, 0x18, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc3, 0x87, 0x47, 0x05, 0x05, 0x1c, 0x1c,
+ 0x1d, 0x1d, 0x02, 0x1e, 0x1e, 0x1f, 0x1f,
+ 0x0f, 0x0f, 0x0d, 0x0d, 0x13, 0x13, 0x11,
+ 0x11, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc4, 0x06, 0x06, 0x04, 0x04, 0x1c, 0x1c,
+ 0x1d, 0x1d, 0x02, 0x1e, 0x1e, 0x1f, 0x1f,
+ 0x0e, 0x0e, 0x0c, 0x0c, 0x12, 0x12, 0x10,
+ 0x10, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc8, 0x21, 0x00, 0x31, 0x42, 0x34, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0xcb, 0x43);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcd, 0x0e, 0x4b, 0x4b, 0x20, 0x19, 0x6b,
+ 0x06, 0xb3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd1, 0x40, 0x0d, 0xff, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd2, 0xe3, 0x2b, 0x38, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x33, 0x20, 0x3a, 0xd5, 0x86, 0xf3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd4, 0x00, 0x01, 0x00, 0x0e, 0x04, 0x44,
+ 0x08, 0x10, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x80, 0x09, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x12, 0x03, 0x20, 0x00, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x00);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
}
static int boe_th101mb31ig002_disable(struct drm_panel *panel)
@@ -114,21 +182,21 @@ static int boe_th101mb31ig002_disable(struct drm_panel *panel)
struct boe_th101mb31ig002 *ctx = container_of(panel,
struct boe_th101mb31ig002,
panel);
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- dev_err(dev, "Failed to set panel off: %d\n", ret);
+ if (ctx->desc->backlight_off_to_display_off_delay_ms)
+ mipi_dsi_msleep(&dsi_ctx, ctx->desc->backlight_off_to_display_off_delay_ms);
- msleep(120);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0)
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- return 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+
+ if (ctx->desc->enter_sleep_to_reset_down_delay_ms)
+ mipi_dsi_msleep(&dsi_ctx, ctx->desc->enter_sleep_to_reset_down_delay_ms);
+
+ return dsi_ctx.accum_err;
}
static int boe_th101mb31ig002_unprepare(struct drm_panel *panel)
@@ -141,6 +209,9 @@ static int boe_th101mb31ig002_unprepare(struct drm_panel *panel)
gpiod_set_value_cansleep(ctx->enable, 0);
regulator_disable(ctx->power);
+ if (ctx->desc->power_off_delay_ms)
+ msleep(ctx->desc->power_off_delay_ms);
+
return 0;
}
@@ -158,10 +229,25 @@ static int boe_th101mb31ig002_prepare(struct drm_panel *panel)
return ret;
}
+ if (ctx->desc->vcioo_to_lp11_delay_ms)
+ msleep(ctx->desc->vcioo_to_lp11_delay_ms);
+
+ if (ctx->desc->lp11_before_reset) {
+ ret = mipi_dsi_dcs_nop(ctx->dsi);
+ if (ret)
+ return ret;
+ }
+
+ if (ctx->desc->lp11_to_reset_delay_ms)
+ msleep(ctx->desc->lp11_to_reset_delay_ms);
+
gpiod_set_value_cansleep(ctx->enable, 1);
msleep(50);
boe_th101mb31ig002_reset(ctx);
- boe_th101mb31ig002_enable(panel);
+
+ ret = ctx->desc->init(ctx);
+ if (ret)
+ return ret;
return 0;
}
@@ -181,39 +267,62 @@ static const struct drm_display_mode boe_th101mb31ig002_default_mode = {
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
+static const struct panel_desc boe_th101mb31ig002_desc = {
+ .modes = &boe_th101mb31ig002_default_mode,
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_NO_EOT_PACKET |
+ MIPI_DSI_MODE_LPM,
+ .init = boe_th101mb31ig002_enable,
+};
+
+static const struct drm_display_mode starry_er88577_default_mode = {
+ .clock = (800 + 25 + 25 + 25) * (1280 + 20 + 4 + 12) * 60 / 1000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 25,
+ .hsync_end = 800 + 25 + 25,
+ .htotal = 800 + 25 + 25 + 25,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 20,
+ .vsync_end = 1280 + 20 + 4,
+ .vtotal = 1280 + 20 + 4 + 12,
+ .width_mm = 135,
+ .height_mm = 216,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc starry_er88577_desc = {
+ .modes = &starry_er88577_default_mode,
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init = starry_er88577_init_cmd,
+ .lp11_before_reset = true,
+ .vcioo_to_lp11_delay_ms = 5,
+ .lp11_to_reset_delay_ms = 50,
+ .backlight_off_to_display_off_delay_ms = 100,
+ .enter_sleep_to_reset_down_delay_ms = 100,
+ .power_off_delay_ms = 1000,
+};
+
static int boe_th101mb31ig002_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct boe_th101mb31ig002 *ctx = container_of(panel,
struct boe_th101mb31ig002,
panel);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev,
- &boe_th101mb31ig002_default_mode);
- if (!mode) {
- dev_err(panel->dev, "Failed to add mode %ux%u@%u\n",
- boe_th101mb31ig002_default_mode.hdisplay,
- boe_th101mb31ig002_default_mode.vdisplay,
- drm_mode_vrefresh(&boe_th101mb31ig002_default_mode));
- return -ENOMEM;
- }
-
- drm_mode_set_name(mode);
+ const struct drm_display_mode *desc_mode = ctx->desc->modes;
connector->display_info.bpc = 8;
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
-
/*
* TODO: Remove once all drm drivers call
* drm_connector_set_orientation_from_panel()
*/
drm_connector_set_panel_orientation(connector, ctx->orientation);
- drm_mode_probed_add(connector, mode);
-
- return 1;
+ return drm_connector_helper_get_modes_fixed(connector, desc_mode);
}
static enum drm_panel_orientation
@@ -237,6 +346,7 @@ static const struct drm_panel_funcs boe_th101mb31ig002_funcs = {
static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
{
struct boe_th101mb31ig002 *ctx;
+ const struct panel_desc *desc;
int ret;
ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
@@ -246,11 +356,11 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- dsi->lanes = 4;
- dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_NO_EOT_PACKET |
- MIPI_DSI_MODE_LPM;
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->lanes = desc->lanes;
+ dsi->format = desc->format;
+ dsi->mode_flags = desc->mode_flags;
+ ctx->desc = desc;
ctx->power = devm_regulator_get(&dsi->dev, "power");
if (IS_ERR(ctx->power))
@@ -262,7 +372,7 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->enable),
"Failed to get enable GPIO\n");
- ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_HIGH);
+ ctx->reset = devm_gpiod_get_optional(&dsi->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset))
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
"Failed to get reset GPIO\n");
@@ -302,7 +412,14 @@ static void boe_th101mb31ig002_dsi_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id boe_th101mb31ig002_of_match[] = {
- { .compatible = "boe,th101mb31ig002-28a", },
+ {
+ .compatible = "boe,th101mb31ig002-28a",
+ .data = &boe_th101mb31ig002_desc
+ },
+ {
+ .compatible = "starry,er88577",
+ .data = &starry_er88577_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, boe_th101mb31ig002_of_match);
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
new file mode 100644
index 000000000000..50e4a5341bc6
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+// Copyright (c) 2013, The Linux Foundation. All rights reserved.
+// Copyright (c) 2024, Neil Armstrong <neil.armstrong@linaro.org>
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct boe_tv101wum_ll2 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data boe_tv101wum_ll2_supplies[] = {
+ { .supply = "vsp" },
+ { .supply = "vsn" },
+};
+
+static inline struct boe_tv101wum_ll2 *to_boe_tv101wum_ll2(struct drm_panel *panel)
+{
+ return container_of(panel, struct boe_tv101wum_ll2, panel);
+}
+
+static void boe_tv101wum_ll2_reset(struct boe_tv101wum_ll2 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+ msleep(120);
+}
+
+static int boe_tv101wum_ll2_on(struct boe_tv101wum_ll2 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x5a, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0xff, 0x81, 0x68, 0x6c, 0x22,
+ 0x6d, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x5a, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x90, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x94, 0x2c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x5a, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa2, 0x38);
+
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x50, 0x5a, 0x0c);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x80, 0xfd);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x50, 0x00);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static void boe_tv101wum_ll2_off(struct boe_tv101wum_ll2 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 70);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x5a);
+
+ mipi_dsi_msleep(&dsi_ctx, 150);
+}
+
+static int boe_tv101wum_ll2_prepare(struct drm_panel *panel)
+{
+ struct boe_tv101wum_ll2 *ctx = to_boe_tv101wum_ll2(panel);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(boe_tv101wum_ll2_supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ boe_tv101wum_ll2_reset(ctx);
+
+ ret = boe_tv101wum_ll2_on(ctx);
+ if (ret < 0) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(boe_tv101wum_ll2_supplies),
+ ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int boe_tv101wum_ll2_unprepare(struct drm_panel *panel)
+{
+ struct boe_tv101wum_ll2 *ctx = to_boe_tv101wum_ll2(panel);
+
+ /* Ignore errors on failure, in any case set gpio and disable regulators */
+ boe_tv101wum_ll2_off(ctx);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ regulator_bulk_disable(ARRAY_SIZE(boe_tv101wum_ll2_supplies),
+ ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode boe_tv101wum_ll2_mode = {
+ .clock = (1200 + 27 + 8 + 12) * (1920 + 155 + 8 + 32) * 60 / 1000,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 27,
+ .hsync_end = 1200 + 27 + 8,
+ .htotal = 1200 + 27 + 8 + 12,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 155,
+ .vsync_end = 1920 + 155 + 8,
+ .vtotal = 1920 + 155 + 8 + 32,
+ .width_mm = 136,
+ .height_mm = 217,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int boe_tv101wum_ll2_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ /* We do not set display_info.bpc since unset value is bpc=8 by default */
+ return drm_connector_helper_get_modes_fixed(connector, &boe_tv101wum_ll2_mode);
+}
+
+static const struct drm_panel_funcs boe_tv101wum_ll2_panel_funcs = {
+ .prepare = boe_tv101wum_ll2_prepare,
+ .unprepare = boe_tv101wum_ll2_unprepare,
+ .get_modes = boe_tv101wum_ll2_get_modes,
+};
+
+static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct boe_tv101wum_ll2 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(&dsi->dev,
+ ARRAY_SIZE(boe_tv101wum_ll2_supplies),
+ boe_tv101wum_ll2_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_HSE;
+
+ drm_panel_init(&ctx->panel, dev, &boe_tv101wum_ll2_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void boe_tv101wum_ll2_remove(struct mipi_dsi_device *dsi)
+{
+ struct boe_tv101wum_ll2 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id boe_tv101wum_ll2_of_match[] = {
+ { .compatible = "boe,tv101wum-ll2" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_tv101wum_ll2_of_match);
+
+static struct mipi_dsi_driver boe_tv101wum_ll2_driver = {
+ .probe = boe_tv101wum_ll2_probe,
+ .remove = boe_tv101wum_ll2_remove,
+ .driver = {
+ .name = "panel-boe-tv101wum_ll2",
+ .of_match_table = boe_tv101wum_ll2_of_match,
+ },
+};
+module_mipi_dsi_driver(boe_tv101wum_ll2_driver);
+
+MODULE_DESCRIPTION("DRM driver for BOE TV101WUM-LL2 Panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index ce919a980875..3e5b0d8636d0 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -54,12 +54,22 @@ struct boe_panel {
struct gpio_desc *enable_gpio;
};
+#define NT36523_DCS_SWITCH_PAGE 0xff
+
+#define nt36523_switch_page(ctx, page) \
+ mipi_dsi_dcs_write_seq_multi(ctx, NT36523_DCS_SWITCH_PAGE, (page))
+
+static void nt36523_enable_reload_cmds(struct mipi_dsi_multi_context *ctx)
+{
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+}
+
static int boe_tv110c9m_init(struct boe_panel *boe)
{
struct mipi_dsi_multi_context ctx = { .dsi = boe->dsi };
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x20);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0xd9);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x07, 0x78);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x08, 0x5a);
@@ -99,16 +109,14 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
0x03, 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x21);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x21);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
0x03, 0xf5, 0x03, 0xe0);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
@@ -119,89 +127,66 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
0x03, 0xf5, 0x03, 0xe0);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
0x03, 0xf5, 0x03, 0xe0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x24);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x24);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x01, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x02, 0x1c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x03, 0x1c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x04, 0x1d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0x1d);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x06, 0x04);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x07, 0x04);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x08, 0x0f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x09, 0x0f);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0a, 0x0e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0b, 0x0e);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0c, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0d, 0x0d);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0e, 0x0c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0f, 0x0c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x10, 0x08);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x11, 0x08);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x12, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x13, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x14, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x15, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x16, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x17, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x18, 0x1c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x19, 0x1c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1a, 0x1d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1b, 0x1d);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1c, 0x04);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1d, 0x04);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1e, 0x0f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x0f);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x20, 0x0e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x21, 0x0e);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x22, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x0d);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x24, 0x0c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x25, 0x0c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x26, 0x08);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x27, 0x08);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x28, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x29, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2d, 0x20);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2f, 0x0a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x30, 0x44);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x33, 0x0c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x34, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x37, 0x44);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x38, 0x40);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x39, 0x00);
@@ -244,7 +229,6 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdb, 0x05);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdc, 0xa9);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdd, 0x22);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdf, 0x05);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xe0, 0xa9);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xe1, 0x05);
@@ -258,8 +242,9 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x8d, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x8e, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb5, 0x90);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x25);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+
+ nt36523_switch_page(&ctx, 0x25);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x19, 0x07);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x60);
@@ -281,26 +266,22 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x61, 0x60);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x62, 0x50);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xf1, 0x10);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x2a);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x64, 0x16);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x67, 0x16);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x6a, 0x16);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x70, 0x30);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa2, 0xf3);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa3, 0xff);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa4, 0xff);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa5, 0xff);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd6, 0x08);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x26);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x26);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0xa1);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x02, 0x31);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x04, 0x28);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x06, 0x30);
@@ -323,7 +304,6 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x7f);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1d, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1e, 0x65);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x65);
@@ -343,7 +323,6 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc9, 0x9e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xca, 0x4e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xcb, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa9, 0x49);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xaa, 0x4b);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xab, 0x48);
@@ -373,9 +352,9 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc3, 0x4f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc4, 0x3a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc5, 0x42);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x27);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x27);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x56, 0x06);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x58, 0x80);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x59, 0x75);
@@ -394,17 +373,14 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x66, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x67, 0x01);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x68, 0x44);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x78, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc3, 0x00);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x2a);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x22, 0x2f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x08);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x24, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x25, 0x65);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x26, 0xf8);
@@ -415,30 +391,30 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2d, 0x1a);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x23);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x23);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x80);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x07, 0x00);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0xe0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0xe0);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x14, 0x60);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x16, 0xc0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0xf0);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3a, 0x08);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x10);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x01);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+
+ nt36523_switch_page(&ctx, 0x20);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x18, 0x40);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x10);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x02);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x35, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x51, 0x00, 0xff);
@@ -464,13 +440,12 @@ static int inx_hj110iz_init(struct boe_panel *boe)
{
struct mipi_dsi_multi_context ctx = { .dsi = boe->dsi };
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x20);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0xd1);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x06, 0xc0);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x07, 0x87);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x08, 0x4b);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0d, 0x63);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0e, 0x91);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0f, 0x69);
@@ -482,10 +457,10 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x69, 0x98);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x75, 0xa2);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x77, 0xb3);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x58, 0x43);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x24);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+
+ nt36523_switch_page(&ctx, 0x24);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x91, 0x44);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x92, 0x4c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x94, 0x86);
@@ -493,7 +468,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x61, 0xd0);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x63, 0x70);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc2, 0xca);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x03);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x01, 0x03);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x02, 0x03);
@@ -538,7 +512,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x29, 0x04);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x03);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2f, 0x0a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x30, 0x35);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x37, 0xa7);
@@ -546,7 +519,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3a, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3b, 0x32);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3d, 0x12);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3f, 0x33);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x40, 0x31);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x41, 0x40);
@@ -556,7 +528,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4a, 0x45);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4b, 0x45);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4c, 0x14);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4d, 0x21);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4e, 0x43);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4f, 0x65);
@@ -569,7 +540,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5c, 0x88);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5e, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5f, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7a, 0xff);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7b, 0xff);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7c, 0x00);
@@ -581,7 +551,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x82, 0x08);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x97, 0x02);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc5, 0x10);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd7, 0x55);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd8, 0x55);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd9, 0x23);
@@ -609,43 +578,32 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x05, 0x00, 0x00);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x25);
-
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x25);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xf1, 0x10);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1e, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x20, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x25, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x26, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x27, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3f, 0x80);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x40, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x43, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x44, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x45, 0x46);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x48, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x49, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5b, 0x80);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5c, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5d, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5e, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5f, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x60, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x61, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x62, 0x32);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x68, 0x0c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x6c, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x6e, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x78, 0x00);
@@ -653,9 +611,8 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7a, 0x0c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7b, 0xb0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x26);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x26);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0xa1);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x02, 0x31);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0a, 0xf4);
@@ -674,18 +631,15 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x18, 0x86);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x22, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x19, 0x0e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1a, 0x31);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1b, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1c, 0x29);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x0e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x31);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1d, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1e, 0x62);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x62);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2f, 0x06);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x30, 0x62);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x31, 0x06);
@@ -693,11 +647,9 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x33, 0x11);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x34, 0x89);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x35, 0x67);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x39, 0x0b);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3a, 0x62);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3b, 0x06);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc8, 0x04);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc9, 0x89);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xca, 0x4e);
@@ -711,21 +663,18 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xaf, 0x39);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x38);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x27);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x27);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd0, 0x11);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd1, 0x54);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xde, 0x43);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdf, 0x02);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc0, 0x18);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc1, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc2, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc3, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x56, 0x06);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x58, 0x80);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x59, 0x78);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5a, 0x00);
@@ -743,20 +692,17 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x66, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x67, 0x01);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x68, 0x44);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x98, 0x01);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb4, 0x03);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x9b, 0xbe);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xab, 0x14);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbc, 0x08);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbd, 0x28);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x2a);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x22, 0x2f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x08);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x24, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x25, 0x62);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x26, 0xf8);
@@ -766,7 +712,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x1a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2d, 0x1a);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x64, 0x96);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x65, 0x10);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x66, 0x00);
@@ -783,14 +728,11 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7a, 0x10);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x88, 0x96);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x89, 0x10);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa2, 0x3f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa3, 0x30);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa4, 0xc0);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa5, 0x03);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xe8, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x97, 0x3c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x98, 0x02);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x99, 0x95);
@@ -800,7 +742,7 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x9d, 0x0a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x9e, 0x90);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x25);
+ nt36523_switch_page(&ctx, 0x25);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x13, 0x02);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x14, 0xd7);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdb, 0x02);
@@ -809,8 +751,7 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x19, 0x0f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1b, 0x5b);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
-
+ nt36523_switch_page(&ctx, 0x20);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x24, 0x00, 0x38,
0x00, 0x4c, 0x00, 0x5e, 0x00, 0x6f, 0x00, 0x7e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb1, 0x00, 0x8c, 0x00, 0xbe, 0x00, 0xe5, 0x01, 0x27,
@@ -819,7 +760,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
0x03, 0x00, 0x03, 0x31, 0x03, 0x40, 0x03, 0x51);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb3, 0x03, 0x62, 0x03, 0x75, 0x03, 0x89, 0x03, 0x9c,
0x03, 0xaa, 0x03, 0xb2);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb4, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x27, 0x00, 0x3d,
0x00, 0x52, 0x00, 0x64, 0x00, 0x75, 0x00, 0x84);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb5, 0x00, 0x93, 0x00, 0xc5, 0x00, 0xec, 0x01, 0x2c,
@@ -828,7 +768,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
0x03, 0x01, 0x03, 0x31, 0x03, 0x41, 0x03, 0x51);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb7, 0x03, 0x63, 0x03, 0x75, 0x03, 0x89, 0x03, 0x9c,
0x03, 0xaa, 0x03, 0xb2);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb8, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x2a, 0x00, 0x40,
0x00, 0x56, 0x00, 0x68, 0x00, 0x7a, 0x00, 0x89);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x00, 0x98, 0x00, 0xc9, 0x00, 0xf1, 0x01, 0x30,
@@ -838,7 +777,7 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbb, 0x03, 0x66, 0x03, 0x75, 0x03, 0x89, 0x03, 0x9c,
0x03, 0xaa, 0x03, 0xb2);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x21);
+ nt36523_switch_page(&ctx, 0x21);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x24, 0x00, 0x38,
0x00, 0x4c, 0x00, 0x5e, 0x00, 0x6f, 0x00, 0x7e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb1, 0x00, 0x8c, 0x00, 0xbe, 0x00, 0xe5, 0x01, 0x27,
@@ -847,7 +786,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
0x03, 0x00, 0x03, 0x31, 0x03, 0x40, 0x03, 0x51);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb3, 0x03, 0x62, 0x03, 0x77, 0x03, 0x90, 0x03, 0xac,
0x03, 0xca, 0x03, 0xda);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb4, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x27, 0x00, 0x3d,
0x00, 0x52, 0x00, 0x64, 0x00, 0x75, 0x00, 0x84);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb5, 0x00, 0x93, 0x00, 0xc5, 0x00, 0xec, 0x01, 0x2c,
@@ -856,7 +794,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
0x03, 0x01, 0x03, 0x31, 0x03, 0x41, 0x03, 0x51);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb7, 0x03, 0x63, 0x03, 0x77, 0x03, 0x90, 0x03, 0xac,
0x03, 0xca, 0x03, 0xda);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb8, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x2a, 0x00, 0x40,
0x00, 0x56, 0x00, 0x68, 0x00, 0x7a, 0x00, 0x89);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x00, 0x98, 0x00, 0xc9, 0x00, 0xf1, 0x01, 0x30,
@@ -866,22 +803,21 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbb, 0x03, 0x66, 0x03, 0x77, 0x03, 0x90, 0x03, 0xac,
0x03, 0xca, 0x03, 0xda);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0xf0);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3a, 0x08);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
+ nt36523_switch_page(&ctx, 0x10);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x01);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
-
+ nt36523_switch_page(&ctx, 0x20);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x18, 0x40);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
+ nt36523_switch_page(&ctx, 0x10);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x02);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x10);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x01);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x35, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3b, 0x03, 0xae, 0x1a, 0x04, 0x04);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 3a574a9b46e7..767e47a2b0c1 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -954,16 +954,24 @@ static void panel_edp_shutdown(struct device *dev)
* drm_atomic_helper_shutdown() at shutdown time and that should
* cause the panel to be disabled / unprepared if needed. For now,
* however, we'll keep these calls due to the sheer number of
- * different DRM modeset drivers used with panel-edp. The fact that
- * we're calling these and _also_ the drm_atomic_helper_shutdown()
- * will try to disable/unprepare means that we can get a warning about
- * trying to disable/unprepare an already disabled/unprepared panel,
- * but that's something we'll have to live with until we've confirmed
- * that all DRM modeset drivers are properly calling
- * drm_atomic_helper_shutdown().
+ * different DRM modeset drivers used with panel-edp. Once we've
+ * confirmed that all DRM modeset drivers using this panel properly
+ * call drm_atomic_helper_shutdown() we can simply delete the two
+ * calls below.
+ *
+ * TO BE EXPLICIT: THE CALLS BELOW SHOULDN'T BE COPIED TO ANY NEW
+ * PANEL DRIVERS.
+ *
+ * FIXME: If we're still haven't figured out if all DRM modeset
+ * drivers properly call drm_atomic_helper_shutdown() but we _have_
+ * managed to make sure that DRM modeset drivers get their shutdown()
+ * callback before the panel's shutdown() callback (perhaps using
+ * device link), we could add a WARN_ON here to help move forward.
*/
- drm_panel_disable(&panel->base);
- drm_panel_unprepare(&panel->base);
+ if (panel->base.enabled)
+ drm_panel_disable(&panel->base);
+ if (panel->base.prepared)
+ drm_panel_unprepare(&panel->base);
}
static void panel_edp_remove(struct device *dev)
@@ -1845,7 +1853,10 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x73aa, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
@@ -1891,15 +1902,19 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ad, &delay_200_500_e80, "NV116WHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ae, &delay_200_500_e200, "NT140FHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a1b, &delay_200_500_e50, "NV133WUM-N63"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a36, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80, "NV116WHM-N49"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b66, &delay_200_500_e80, "NE140WUM-N6G"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
@@ -1915,8 +1930,10 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1156, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1157, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x115d, &delay_200_500_e80_d50, "N116BCA-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1160, &delay_200_500_e80_d50, "N116BCJ-EAK"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1161, &delay_200_500_e80, "N116BCP-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
@@ -1929,9 +1946,10 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"),
- EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "MB116AN01"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x048e, &delay_200_500_e200_d10, "M116NWR6 R5"),
@@ -1960,8 +1978,6 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('L', 'G', 'D', 0x05af, &delay_200_500_e200_d200, "Unknown"),
EDP_PANEL_ENTRY('L', 'G', 'D', 0x05f1, &delay_200_500_e200_d200, "Unknown"),
- EDP_PANEL_ENTRY('S', 'D', 'C', 0x416d, &delay_100_500_e200, "ATNA45AF01"),
-
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &delay_80_500_e50, "LQ140M1JW46"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x153a, &delay_200_500_e50, "LQ140T1JH01"),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index cb9f46e853de..92b03a2f65a3 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -339,6 +339,156 @@ static const struct hx8394_panel_desc powkiddy_x55_desc = {
.init_sequence = powkiddy_x55_init_sequence,
};
+static int mchp_ac40t08a_init_sequence(struct hx8394 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* DCS commands do not seem to be sent correclty without this delay */
+ msleep(20);
+
+ /* 5.19.8 SETEXTC: Set extension command (B9h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
+
+ /* 5.19.9 SETMIPI: Set MIPI control (BAh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54,
+ 0x71, 0x71, 0x57, 0x47);
+
+ /* 5.19.3 SETDISP: Set display related register (B2h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f);
+
+ /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86);
+
+ /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
+
+ /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07,
+ 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00,
+ 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a,
+ 0x02, 0x15, 0x06, 0x05, 0x06, 0x47,
+ 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
+
+ /* 5.19.20 Set GIP Option1 (D5h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25,
+ 0x18, 0x18, 0x26, 0x27, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18);
+
+ /* 5.19.21 Set GIP Option2 (D6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
+ 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20,
+ 0x18, 0x18, 0x27, 0x26, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18);
+
+ /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21,
+ 0x24, 0x22, 0x47, 0x56, 0x65, 0x66,
+ 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d,
+ 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61,
+ 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24,
+ 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e,
+ 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99,
+ 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67,
+ 0x6b, 0x72, 0x7f, 0x7f);
+
+ /* Unknown command, not listed in the HX8394-F datasheet (C0H) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x73);
+
+ /* Set CABC control (C9h)*/
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCABC,
+ 0x76, 0x00, 0x30);
+
+ /* 5.19.17 SETPANEL (CCh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
+ 0x0b);
+
+ /* Unknown command, not listed in the HX8394-F datasheet (D4h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
+ 0x02);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x02);
+
+ /* 5.19.11 Set register bank (D8h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x01);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* Unknown command, not listed in the HX8394-F datasheet (C6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
+ 0xed);
+
+ return 0;
+}
+
+static const struct drm_display_mode mchp_ac40t08a_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 12,
+ .hsync_end = 720 + 12 + 24,
+ .htotal = 720 + 12 + 12 + 24,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 13,
+ .vsync_end = 1280 + 14,
+ .vtotal = 1280 + 14 + 13,
+ .clock = 60226,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 76,
+ .height_mm = 132,
+};
+
+static const struct hx8394_panel_desc mchp_ac40t08a_desc = {
+ .mode = &mchp_ac40t08a_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = mchp_ac40t08a_init_sequence,
+};
+
static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
@@ -486,7 +636,7 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
if (!ctx)
return -ENOMEM;
- ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset gpio\n");
@@ -555,6 +705,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id hx8394_of_match[] = {
{ .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
{ .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc },
+ { .compatible = "microchip,ac40t08a-mipi-panel", .data = &mchp_ac40t08a_desc },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, hx8394_of_match);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index 775d5d5e828c..1fbc5d433d75 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -121,19 +121,19 @@ struct ili9341_config {
const struct drm_display_mode mode;
/* ca: TODO: need comments for this register */
u8 ca[ILI9341_CA_LEN];
- /* power_b: TODO: need comments for this register */
+ /* power_b: Power control B (CFh) */
u8 power_b[ILI9341_POWER_B_LEN];
- /* power_seq: TODO: need comments for this register */
+ /* power_seq: Power on sequence control (EDh) */
u8 power_seq[ILI9341_POWER_SEQ_LEN];
- /* dtca: TODO: need comments for this register */
+ /* dtca: Driver timing control A (E8h) */
u8 dtca[ILI9341_DTCA_LEN];
- /* dtcb: TODO: need comments for this register */
+ /* dtcb: Driver timing control B (EAh) */
u8 dtcb[ILI9341_DTCB_LEN];
- /* power_a: TODO: need comments for this register */
+ /* power_a: Power control A (CBh) */
u8 power_a[ILI9341_POWER_A_LEN];
/* frc: Frame Rate Control (In Normal Mode/Full Colors) (B1h) */
u8 frc[ILI9341_FRC_LEN];
- /* prc: TODO: need comments for this register */
+ /* prc: Pump ratio control (F7h) */
u8 prc;
/* dfc_1: B6h DISCTRL (Display Function Control) */
u8 dfc_1[ILI9341_DFC_1_LEN];
@@ -147,7 +147,7 @@ struct ili9341_config {
u8 vcom_2;
/* address_mode: Memory Access Control (36h) */
u8 address_mode;
- /* g3amma_en: TODO: need comments for this register */
+ /* g3amma_en: Enable 3G (F2h) */
u8 g3amma_en;
/* rgb_interface: RGB Interface Signal Control (B0h) */
u8 rgb_interface;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
index e4a44cd26c4d..a3c79ad99d0b 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
@@ -380,7 +380,172 @@ static const struct panel_desc com35h3p70ulc_desc = {
.lanes = 2,
};
+static void dmt028vghmcmi_1d_init(struct mipi_dsi_multi_context *ctx)
+{
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xff, 0x98, 0x06, 0x04, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x08, 0x10);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x21, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x30, 0x03);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x31, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x60, 0x06);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x61, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x62, 0x07);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x63, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x40, 0x16);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x41, 0x44);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x42, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x43, 0x83);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x44, 0x89);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x45, 0x8a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x46, 0x44);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x47, 0x44);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x50, 0x78);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x51, 0x78);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x52, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x53, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x54, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x55, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x56, 0x00);
+ /* Gamma settings */
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa1, 0x09);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa2, 0x14);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa3, 0x09);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa4, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa5, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa6, 0x07);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa7, 0x07);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa8, 0x08);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa9, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xaa, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xab, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xac, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xad, 0x19);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xae, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xaf, 0x00);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc1, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc2, 0x14);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc3, 0x11);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc4, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc5, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc6, 0x08);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc7, 0x03);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc8, 0x06);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc9, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xca, 0x10);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xcb, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xcc, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xcd, 0x15);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xce, 0x13);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xcf, 0x00);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xff, 0x98, 0x06, 0x04, 0x07);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x17, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x18, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x02, 0x77);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xe1, 0x79);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x06, 0x13);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xff, 0x98, 0x06, 0x04, 0x06);
+ /* GIP 0 */
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x00, 0x21);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x01, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x02, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x03, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x04, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x05, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x06, 0x98);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x07, 0x06);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x08, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x09, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0c, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0d, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x10, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x11, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x12, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x13, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x14, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x15, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x16, 0x08);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x17, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x18, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x19, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x1a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x1b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x1c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x1d, 0x00);
+ /* GIP 1 */
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x20, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x21, 0x23);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x22, 0x44);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x23, 0x67);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x24, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x25, 0x23);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x26, 0x45);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x27, 0x67);
+ /* GIP 2 */
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x30, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x31, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x32, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x33, 0xbc);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x34, 0xad);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x35, 0xda);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x36, 0xcb);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x37, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x38, 0x55);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x39, 0x76);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3a, 0x67);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3b, 0x88);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3c, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3d, 0x11);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3f, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x40, 0x22);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x52, 0x10);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x53, 0x10);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x54, 0x13);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xff, 0x98, 0x06, 0x04, 0x00);
+};
+
+static const struct drm_display_mode dmt028vghmcmi_1d_default_mode = {
+ .clock = 22000,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 20,
+ .hsync_end = 480 + 20 + 4,
+ .htotal = 480 + 20 + 4 + 10,
+
+ .vdisplay = 640,
+ .vsync_start = 640 + 40,
+ .vsync_end = 640 + 40 + 4,
+ .vtotal = 640 + 40 + 4 + 20,
+
+ .width_mm = 53,
+ .height_mm = 79,
+
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc dmt028vghmcmi_1d_desc = {
+ .init_sequence = dmt028vghmcmi_1d_init,
+ .display_mode = &dmt028vghmcmi_1d_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 2,
+};
+
static const struct of_device_id ili9806e_of_match[] = {
+ { .compatible = "densitron,dmt028vghmcmi-1d", .data = &dmt028vghmcmi_1d_desc },
{ .compatible = "ortustech,com35h3p70ulc", .data = &com35h3p70ulc_desc },
{ }
};
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index c6b669866fed..44897e5218a6 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -31,8 +31,6 @@ struct jadard_panel_desc {
bool reset_before_power_off_vcioo;
unsigned int vcioo_to_lp11_delay_ms;
unsigned int lp11_to_reset_delay_ms;
- unsigned int exit_sleep_to_display_on_delay_ms;
- unsigned int display_on_delay_ms;
unsigned int backlight_off_to_display_off_delay_ms;
unsigned int display_off_to_enter_sleep_delay_ms;
unsigned int enter_sleep_to_reset_down_delay_ms;
@@ -48,29 +46,22 @@ struct jadard {
struct gpio_desc *reset;
};
-static inline struct jadard *panel_to_jadard(struct drm_panel *panel)
+#define JD9365DA_DCS_SWITCH_PAGE 0xe0
+
+#define jd9365da_switch_page(dsi_ctx, page) \
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, JD9365DA_DCS_SWITCH_PAGE, (page))
+
+static void jadard_enable_standard_cmds(struct mipi_dsi_multi_context *dsi_ctx)
{
- return container_of(panel, struct jadard, panel);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe1, 0x93);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe2, 0x65);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe3, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0x80, 0x03);
}
-static int jadard_enable(struct drm_panel *panel)
+static inline struct jadard *panel_to_jadard(struct drm_panel *panel)
{
- struct jadard *jadard = panel_to_jadard(panel);
- struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
-
- msleep(120);
-
- mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
-
- if (jadard->desc->exit_sleep_to_display_on_delay_ms)
- mipi_dsi_msleep(&dsi_ctx, jadard->desc->exit_sleep_to_display_on_delay_ms);
-
- mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
-
- if (jadard->desc->display_on_delay_ms)
- mipi_dsi_msleep(&dsi_ctx, jadard->desc->display_on_delay_ms);
-
- return dsi_ctx.accum_err;
+ return container_of(panel, struct jadard, panel);
}
static int jadard_disable(struct drm_panel *panel)
@@ -189,7 +180,6 @@ static const struct drm_panel_funcs jadard_funcs = {
.disable = jadard_disable,
.unprepare = jadard_unprepare,
.prepare = jadard_prepare,
- .enable = jadard_enable,
.get_modes = jadard_get_modes,
.get_orientation = jadard_panel_get_orientation,
};
@@ -198,12 +188,10 @@ static int radxa_display_8hd_ad002_init_cmds(struct jadard *jadard)
{
struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE1, 0x93);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE2, 0x65);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE3, 0xF8);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x03);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x01);
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ jadard_enable_standard_cmds(&dsi_ctx);
+
+ jd9365da_switch_page(&dsi_ctx, 0x01);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x7E);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x00);
@@ -276,7 +264,8 @@ static int radxa_display_8hd_ad002_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x37);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x23);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x10);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x02);
+
+ jd9365da_switch_page(&dsi_ctx, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x47);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x47);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x45);
@@ -360,13 +349,21 @@ static int radxa_display_8hd_ad002_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7C, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7D, 0x03);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7E, 0x7B);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x04);
+
+ jd9365da_switch_page(&dsi_ctx, 0x04);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x0E);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0xB3);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x60);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0E, 0x2A);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x59);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x00);
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
return dsi_ctx.accum_err;
};
@@ -398,12 +395,10 @@ static int cz101b4001_init_cmds(struct jadard *jadard)
{
struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE1, 0x93);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE2, 0x65);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE3, 0xF8);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x03);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x01);
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ jadard_enable_standard_cmds(&dsi_ctx);
+
+ jd9365da_switch_page(&dsi_ctx, 0x01);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x3B);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0C, 0x74);
@@ -471,7 +466,8 @@ static int cz101b4001_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x20);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x0F);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x02);
+
+ jd9365da_switch_page(&dsi_ctx, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x00);
@@ -584,15 +580,23 @@ static int cz101b4001_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7A, 0x17);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7D, 0x14);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7E, 0x82);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x04);
+
+ jd9365da_switch_page(&dsi_ctx, 0x04);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x0E);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0xB3);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x61);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0E, 0x48);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x00);
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE6, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE7, 0x0C);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
return dsi_ctx.accum_err;
};
@@ -623,12 +627,10 @@ static int kingdisplay_kd101ne3_init_cmds(struct jadard *jadard)
{
struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x93);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x65);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0xf8);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x03);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x01);
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ jadard_enable_standard_cmds(&dsi_ctx);
+
+ jd9365da_switch_page(&dsi_ctx, 0x01);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x74);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0xc7);
@@ -694,7 +696,8 @@ static int kingdisplay_kd101ne3_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x26);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x14);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x02);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x02);
+
+ jd9365da_switch_page(&dsi_ctx, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x52);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x5f);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x5f);
@@ -808,12 +811,24 @@ static int kingdisplay_kd101ne3_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x76, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0x05);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x2a);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x04);
+
+ jd9365da_switch_page(&dsi_ctx, 0x04);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x0e);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0xb3);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x61);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x48);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x00);
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 20);
return dsi_ctx.accum_err;
};
@@ -843,8 +858,257 @@ static const struct jadard_panel_desc kingdisplay_kd101ne3_40ti_desc = {
.reset_before_power_off_vcioo = true,
.vcioo_to_lp11_delay_ms = 5,
.lp11_to_reset_delay_ms = 10,
- .exit_sleep_to_display_on_delay_ms = 120,
- .display_on_delay_ms = 20,
+ .backlight_off_to_display_off_delay_ms = 100,
+ .display_off_to_enter_sleep_delay_ms = 50,
+ .enter_sleep_to_reset_down_delay_ms = 100,
+};
+
+static int melfas_lmfbx101117480_init_cmds(struct jadard *jadard)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ jadard_enable_standard_cmds(&dsi_ctx);
+
+ jd9365da_switch_page(&dsi_ctx, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x74);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0xd7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0xd7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x2d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x2d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x7e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0xfd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3a, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3c, 0x7e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3d, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3e, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x40, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x41, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x1e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x74);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x57, 0x6a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x2e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5e, 0x56);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x43);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x69, 0x3e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x47);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6b, 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6c, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6e, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x71, 0x56);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x72, 0x43);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x73, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x74, 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x76, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x79, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7a, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7b, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7c, 0x3e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7d, 0x47);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7e, 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x02);
+
+ jd9365da_switch_page(&dsi_ctx, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x57);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x4e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0x4a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0b, 0x48);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x46);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x10, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x53);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x51);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x57);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x4d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x4b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x49);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x47);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x45);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x41);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x26, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2c, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2d, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2e, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x30, 0x37);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x31, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3a, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3c, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3d, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3e, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x40, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x41, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x42, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x45, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x46, 0x37);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x47, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4c, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4e, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4f, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x51, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x53, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x54, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x57, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0xb4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x69, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6b, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x76, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x2a);
+
+ jd9365da_switch_page(&dsi_ctx, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x48);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x49);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2e, 0x03);
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe7, 0x06);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+};
+
+static const struct jadard_panel_desc melfas_lmfbx101117480_desc = {
+ .mode = {
+ .clock = (800 + 24 + 24 + 24) * (1280 + 30 + 4 + 8) * 60 / 1000,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 24,
+ .hsync_end = 800 + 24 + 24,
+ .htotal = 800 + 24 + 24 + 24,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 30,
+ .vsync_end = 1280 + 30 + 4,
+ .vtotal = 1280 + 30 + 4 + 8,
+
+ .width_mm = 135,
+ .height_mm = 216,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init = melfas_lmfbx101117480_init_cmds,
+ .lp11_before_reset = true,
+ .reset_before_power_off_vcioo = true,
+ .vcioo_to_lp11_delay_ms = 5,
+ .lp11_to_reset_delay_ms = 10,
.backlight_off_to_display_off_delay_ms = 100,
.display_off_to_enter_sleep_delay_ms = 50,
.enter_sleep_to_reset_down_delay_ms = 100,
@@ -927,6 +1191,10 @@ static const struct of_device_id jadard_of_match[] = {
.data = &kingdisplay_kd101ne3_40ti_desc
},
{
+ .compatible = "melfas,lmfbx101117480",
+ .data = &melfas_lmfbx101117480_desc
+ },
+ {
.compatible = "radxa,display-10hd-ad001",
.data = &cz101b4001_desc
},
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
index 483dc88d16d8..4eb71e85e9e9 100644
--- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -41,142 +41,89 @@ static void jdi_fhd_r63452_reset(struct jdi_fhd_r63452 *ctx)
static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- mipi_dsi_generic_write_seq(dsi, 0xb0, 0x00);
- mipi_dsi_generic_write_seq(dsi, 0xd6, 0x01);
- mipi_dsi_generic_write_seq(dsi, 0xec,
- 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
- 0x13, 0x15, 0x68, 0x0b, 0xb5);
- mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03);
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xd6, 0x01);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xec,
+ 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
+ 0x13, 0x15, 0x68, 0x0b, 0xb5);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x03);
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_column_address(dsi, 0x0000, 0x0437);
- if (ret < 0) {
- dev_err(dev, "Failed to set column address: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- ret = mipi_dsi_dcs_set_page_address(dsi, 0x0000, 0x077f);
- if (ret < 0) {
- dev_err(dev, "Failed to set page address: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
- ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0x0000);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear scanline: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x77);
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 0x0437);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x077f);
+ mipi_dsi_dcs_set_tear_scanline_multi(&dsi_ctx, 0x0000);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x00ff);
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x84, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x84, 0x00);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 80);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(20);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x84, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xc8, 0x11);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x03);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(80);
-
- mipi_dsi_generic_write_seq(dsi, 0xb0, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x84, 0x00);
- mipi_dsi_generic_write_seq(dsi, 0xc8, 0x11);
- mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03);
-
- return 0;
+ return dsi_ctx.accum_err;
}
-static int jdi_fhd_r63452_off(struct jdi_fhd_r63452 *ctx)
+static void jdi_fhd_r63452_off(struct jdi_fhd_r63452 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- mipi_dsi_generic_write_seq(dsi, 0xb0, 0x00);
- mipi_dsi_generic_write_seq(dsi, 0xd6, 0x01);
- mipi_dsi_generic_write_seq(dsi, 0xec,
- 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
- 0x13, 0x15, 0x68, 0x0b, 0x95);
- mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03);
-
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- usleep_range(2000, 3000);
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
-
- return 0;
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xd6, 0x01);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xec,
+ 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
+ 0x13, 0x15, 0x68, 0x0b, 0x95);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x03);
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 2000, 3000);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
}
static int jdi_fhd_r63452_prepare(struct drm_panel *panel)
{
struct jdi_fhd_r63452 *ctx = to_jdi_fhd_r63452(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
jdi_fhd_r63452_reset(ctx);
ret = jdi_fhd_r63452_on(ctx);
- if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ if (ret < 0)
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- return ret;
- }
- return 0;
+ return ret;
}
static int jdi_fhd_r63452_unprepare(struct drm_panel *panel)
{
struct jdi_fhd_r63452 *ctx = to_jdi_fhd_r63452(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = jdi_fhd_r63452_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ /*
+ * NOTE: We don't return an error here as while the panel won't have
+ * been cleanly turned off at least we've asserted the reset signal
+ * so it should be safe to power it back on again later
+ */
+ jdi_fhd_r63452_off(ctx);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index ea4a6bf6d35b..4db852ffb0f6 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -23,7 +23,7 @@
/* Manufacturer specific Commands send via DSI */
#define MANTIX_CMD_OTP_STOP_RELOAD_MIPI 0x41
-#define MANTIX_CMD_INT_CANCEL 0x4C
+#define MANTIX_CMD_INT_CANCEL 0x4c
#define MANTIX_CMD_SPI_FINISH 0x90
struct mantix {
@@ -45,82 +45,57 @@ static inline struct mantix *panel_to_mantix(struct drm_panel *panel)
return container_of(panel, struct mantix, panel);
}
-static int mantix_init_sequence(struct mantix *ctx)
+static void mantix_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- struct device *dev = ctx->dev;
-
/*
* Init sequence was supplied by the panel vendor.
*/
- mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A);
-
- mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_INT_CANCEL, 0x03);
- mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x03);
- mipi_dsi_generic_write_seq(dsi, 0x80, 0xA9, 0x00);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5a);
- mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x09);
- mipi_dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
- msleep(20);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, MANTIX_CMD_INT_CANCEL, 0x03);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5a, 0x03);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, 0x80, 0xa9, 0x00);
- mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5);
- mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F);
- msleep(20);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5a, 0x09);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
+ mipi_dsi_msleep(dsi_ctx, 20);
- dev_dbg(dev, "Panel init sequence done\n");
- return 0;
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, MANTIX_CMD_SPI_FINISH, 0xa5);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2f);
+ mipi_dsi_msleep(dsi_ctx, 20);
}
static int mantix_enable(struct drm_panel *panel)
{
struct mantix *ctx = panel_to_mantix(panel);
- struct device *dev = ctx->dev;
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
- int ret;
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mantix_init_sequence(ctx);
- if (ret < 0) {
- dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
- return ret;
- }
+ mantix_init_sequence(&dsi_ctx);
+ if (!dsi_ctx.accum_err)
+ dev_dbg(ctx->dev, "Panel init sequence done\n");
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode\n");
- return ret;
- }
- msleep(20);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret)
- return ret;
- usleep_range(10000, 12000);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 12000);
- ret = mipi_dsi_turn_on_peripheral(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to turn on peripheral\n");
- return ret;
- }
+ mipi_dsi_turn_on_peripheral_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int mantix_disable(struct drm_panel *panel)
{
struct mantix *ctx = panel_to_mantix(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
-
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- dev_err(ctx->dev, "Failed to turn off the display: %d\n", ret);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0)
- dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
-
- return 0;
+ return dsi_ctx.accum_err;
}
static int mantix_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
index 94d89ffd596b..5d115ecd5dd4 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
@@ -47,195 +47,196 @@ static inline struct panel_nv3051d *panel_to_panelnv3051d(struct drm_panel *pane
static int panel_nv3051d_init_sequence(struct panel_nv3051d *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct mipi_dsi_multi_context dsi_ctx = {.dsi = dsi};
/*
* Init sequence was supplied by device vendor with no
* documentation.
*/
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xE3, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x03, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x03);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x12);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x1E);
- mipi_dsi_dcs_write_seq(dsi, 0x26, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x52);
- mipi_dsi_dcs_write_seq(dsi, 0x28, 0x57);
- mipi_dsi_dcs_write_seq(dsi, 0x29, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x2A, 0xDF);
- mipi_dsi_dcs_write_seq(dsi, 0x38, 0x9C);
- mipi_dsi_dcs_write_seq(dsi, 0x39, 0xA7);
- mipi_dsi_dcs_write_seq(dsi, 0x3A, 0x53);
- mipi_dsi_dcs_write_seq(dsi, 0x44, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0x3C);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0xFE);
- mipi_dsi_dcs_write_seq(dsi, 0x5C, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x91, 0x77);
- mipi_dsi_dcs_write_seq(dsi, 0x92, 0x77);
- mipi_dsi_dcs_write_seq(dsi, 0xA0, 0x55);
- mipi_dsi_dcs_write_seq(dsi, 0xA1, 0x50);
- mipi_dsi_dcs_write_seq(dsi, 0xA4, 0x9C);
- mipi_dsi_dcs_write_seq(dsi, 0xA7, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0xA8, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xA9, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xAA, 0xFC);
- mipi_dsi_dcs_write_seq(dsi, 0xAB, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0xAC, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xAD, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xAE, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xAF, 0x03);
- mipi_dsi_dcs_write_seq(dsi, 0xB0, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xB2, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0xB3, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x33);
- mipi_dsi_dcs_write_seq(dsi, 0xB5, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xB6, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xB7, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x0E);
- mipi_dsi_dcs_write_seq(dsi, 0xD1, 0x0E);
- mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xD4, 0x2B);
- mipi_dsi_dcs_write_seq(dsi, 0xB2, 0x0C);
- mipi_dsi_dcs_write_seq(dsi, 0xD2, 0x0A);
- mipi_dsi_dcs_write_seq(dsi, 0xB3, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0xD3, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0xB6, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0xD6, 0x0D);
- mipi_dsi_dcs_write_seq(dsi, 0xB7, 0x32);
- mipi_dsi_dcs_write_seq(dsi, 0xD7, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xC1, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xE1, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x0A);
- mipi_dsi_dcs_write_seq(dsi, 0xD8, 0x0A);
- mipi_dsi_dcs_write_seq(dsi, 0xB9, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xD9, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xBD, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0xDD, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0xBC, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0xDC, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0xBB, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xDB, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xBA, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xDA, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xBE, 0x18);
- mipi_dsi_dcs_write_seq(dsi, 0xDE, 0x18);
- mipi_dsi_dcs_write_seq(dsi, 0xBF, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xDF, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xC0, 0x17);
- mipi_dsi_dcs_write_seq(dsi, 0xE0, 0x17);
- mipi_dsi_dcs_write_seq(dsi, 0xB5, 0x3B);
- mipi_dsi_dcs_write_seq(dsi, 0xD5, 0x3C);
- mipi_dsi_dcs_write_seq(dsi, 0xB0, 0x0B);
- mipi_dsi_dcs_write_seq(dsi, 0xD0, 0x0C);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x03);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x01, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x03, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x61);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0xC7);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0x09, 0x83);
- mipi_dsi_dcs_write_seq(dsi, 0x30, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x31, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x32, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x61);
- mipi_dsi_dcs_write_seq(dsi, 0x35, 0xC5);
- mipi_dsi_dcs_write_seq(dsi, 0x36, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x37, 0x23);
- mipi_dsi_dcs_write_seq(dsi, 0x40, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0x41, 0x83);
- mipi_dsi_dcs_write_seq(dsi, 0x42, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x43, 0x81);
- mipi_dsi_dcs_write_seq(dsi, 0x44, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x45, 0xF2);
- mipi_dsi_dcs_write_seq(dsi, 0x46, 0xF1);
- mipi_dsi_dcs_write_seq(dsi, 0x47, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x48, 0xF4);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0xF3);
- mipi_dsi_dcs_write_seq(dsi, 0x50, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x51, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x52, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x53, 0x03);
- mipi_dsi_dcs_write_seq(dsi, 0x54, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x55, 0xF6);
- mipi_dsi_dcs_write_seq(dsi, 0x56, 0xF5);
- mipi_dsi_dcs_write_seq(dsi, 0x57, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x58, 0xF8);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0xF7);
- mipi_dsi_dcs_write_seq(dsi, 0x7E, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x7F, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0xE0, 0x5A);
- mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x0E);
- mipi_dsi_dcs_write_seq(dsi, 0xB5, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xB6, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xB7, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xB9, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xBA, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xC7, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xCA, 0x0E);
- mipi_dsi_dcs_write_seq(dsi, 0xCB, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xCC, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xCD, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xCE, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xCF, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xD0, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0x81, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0x84, 0x0E);
- mipi_dsi_dcs_write_seq(dsi, 0x85, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0x86, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x87, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x88, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0x89, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x8A, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x97, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0x9A, 0x0E);
- mipi_dsi_dcs_write_seq(dsi, 0x9B, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0x9C, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x9D, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x9E, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0x9F, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xA0, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x01, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0xDA);
- mipi_dsi_dcs_write_seq(dsi, 0x03, 0xBA);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0xA8);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x9A);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0xFF);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x91);
- mipi_dsi_dcs_write_seq(dsi, 0x09, 0x90);
- mipi_dsi_dcs_write_seq(dsi, 0x0A, 0xFF);
- mipi_dsi_dcs_write_seq(dsi, 0x0B, 0x8F);
- mipi_dsi_dcs_write_seq(dsi, 0x0C, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x0D, 0x58);
- mipi_dsi_dcs_write_seq(dsi, 0x0E, 0x48);
- mipi_dsi_dcs_write_seq(dsi, 0x0F, 0x38);
- mipi_dsi_dcs_write_seq(dsi, 0x10, 0x2B);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x36, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x3A, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE3, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x1E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x26, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x57);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2A, 0xDF);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x9C);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0xA7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3A, 0x53);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0x3C);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0xFE);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5C, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA0, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA1, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA4, 0x9C);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA7, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA8, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xAA, 0xFC);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xAB, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xAC, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xAD, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xAE, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xAF, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB0, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB1, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB2, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB3, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB4, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB5, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB6, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB7, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB8, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB1, 0x0E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD1, 0x0E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB4, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD4, 0x2B);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB2, 0x0C);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD2, 0x0A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB3, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD3, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB6, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD6, 0x0D);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB7, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD7, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xC1, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE1, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB8, 0x0A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD8, 0x0A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBD, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xDD, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBC, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xDC, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBB, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xDB, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBA, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xDA, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBE, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xDE, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBF, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xDF, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xC0, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB5, 0x3B);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD5, 0x3C);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB0, 0x0B);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD0, 0x0C);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x61);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0xC7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x83);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x30, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x31, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x61);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0xC5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x40, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x41, 0x83);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x42, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x45, 0xF2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x46, 0xF1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x47, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0xF4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0xF3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x51, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x53, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x54, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0xF6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0xF5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x57, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0xF8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0xF7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7E, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7F, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x5A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB1, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB4, 0x0E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB5, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB6, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB7, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB8, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB9, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBA, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xC7, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xCA, 0x0E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xCB, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xCC, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xCD, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xCE, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xCF, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD0, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x84, 0x0E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x85, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x86, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x87, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x88, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x89, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8A, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x97, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9A, 0x0E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9B, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9C, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9D, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9E, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9F, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0xDA);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0xBA);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0xA8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x9A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0xFF);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x91);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x90);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0A, 0xFF);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0B, 0x8F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0C, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0D, 0x58);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0E, 0x48);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0F, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x10, 0x2B);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3A, 0x70);
dev_dbg(ctx->dev, "Panel init sequence done\n");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index d3bfdfc9cff6..57686340de49 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -1166,7 +1166,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
bl->props.brightness = nt->conf->wrdisbv;
else
bl->props.brightness = 255;
- bl->props.power = FB_BLANK_POWERDOWN;
+ bl->props.power = BACKLIGHT_POWER_OFF;
nt->panel.backlight = bl;
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 028fdac293f7..b036208f9356 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -100,106 +100,87 @@ static void nt35950_reset(struct nt35950 *nt)
/*
* nt35950_set_cmd2_page - Select manufacturer control (CMD2) page
+ * @dsi_ctx: context for mipi_dsi functions
* @nt: Main driver structure
* @page: Page number (0-7)
- *
- * Return: Number of transferred bytes or negative number on error
*/
-static int nt35950_set_cmd2_page(struct nt35950 *nt, u8 page)
+static void nt35950_set_cmd2_page(struct mipi_dsi_multi_context *dsi_ctx,
+ struct nt35950 *nt, u8 page)
{
const u8 mauc_cmd2_page[] = { MCS_CMD_MAUCCTR, 0x55, 0xaa, 0x52,
0x08, page };
- int ret;
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], mauc_cmd2_page,
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, mauc_cmd2_page,
ARRAY_SIZE(mauc_cmd2_page));
- if (ret < 0)
- return ret;
-
- nt->last_page = page;
- return 0;
+ if (!dsi_ctx->accum_err)
+ nt->last_page = page;
}
/*
* nt35950_set_data_compression - Set data compression mode
+ * @dsi_ctx: context for mipi_dsi functions
* @nt: Main driver structure
* @comp_mode: Compression mode
- *
- * Return: Number of transferred bytes or negative number on error
*/
-static int nt35950_set_data_compression(struct nt35950 *nt, u8 comp_mode)
+static void nt35950_set_data_compression(struct mipi_dsi_multi_context *dsi_ctx,
+ struct nt35950 *nt, u8 comp_mode)
{
u8 cmd_data_compression[] = { MCS_PARAM_DATA_COMPRESSION, comp_mode };
u8 cmd_vesa_dsc_on[] = { MCS_PARAM_VESA_DSC_ON, !!comp_mode };
u8 cmd_vesa_dsc_setting[] = { MCS_PARAM_VESA_DSC_SETTING, 0x03 };
u8 last_page = nt->last_page;
- int ret;
/* Set CMD2 Page 0 if we're not there yet */
- if (last_page != 0) {
- ret = nt35950_set_cmd2_page(nt, 0);
- if (ret < 0)
- return ret;
- }
+ if (last_page != 0)
+ nt35950_set_cmd2_page(dsi_ctx, nt, 0);
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_data_compression,
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_data_compression,
ARRAY_SIZE(cmd_data_compression));
- if (ret < 0)
- return ret;
-
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_vesa_dsc_on,
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_vesa_dsc_on,
ARRAY_SIZE(cmd_vesa_dsc_on));
- if (ret < 0)
- return ret;
/* Set the vesa dsc setting on Page 4 */
- ret = nt35950_set_cmd2_page(nt, 4);
- if (ret < 0)
- return ret;
+ nt35950_set_cmd2_page(dsi_ctx, nt, 4);
/* Display Stream Compression setting, always 0x03 */
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_vesa_dsc_setting,
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_vesa_dsc_setting,
ARRAY_SIZE(cmd_vesa_dsc_setting));
- if (ret < 0)
- return ret;
/* Get back to the previously set page */
- return nt35950_set_cmd2_page(nt, last_page);
+ nt35950_set_cmd2_page(dsi_ctx, nt, last_page);
}
/*
* nt35950_set_scaler - Enable/disable resolution upscaling
- * @nt: Main driver structure
+ * @dsi_ctx: context for mipi_dsi functions
* @scale_up: Scale up function control
- *
- * Return: Number of transferred bytes or negative number on error
*/
-static int nt35950_set_scaler(struct nt35950 *nt, u8 scale_up)
+static void nt35950_set_scaler(struct mipi_dsi_multi_context *dsi_ctx,
+ u8 scale_up)
{
u8 cmd_scaler[] = { MCS_PARAM_SCALER_FUNCTION, scale_up };
- return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_scaler,
- ARRAY_SIZE(cmd_scaler));
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_scaler,
+ ARRAY_SIZE(cmd_scaler));
}
/*
* nt35950_set_scale_mode - Resolution upscaling mode
- * @nt: Main driver structure
+ * @dsi_ctx: context for mipi_dsi functions
* @mode: Scaler mode (MCS_DATA_COMPRESSION_*)
- *
- * Return: Number of transferred bytes or negative number on error
*/
-static int nt35950_set_scale_mode(struct nt35950 *nt, u8 mode)
+static void nt35950_set_scale_mode(struct mipi_dsi_multi_context *dsi_ctx,
+ u8 mode)
{
u8 cmd_scaler[] = { MCS_PARAM_SCALEUP_MODE, mode };
- return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_scaler,
- ARRAY_SIZE(cmd_scaler));
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_scaler,
+ ARRAY_SIZE(cmd_scaler));
}
/*
* nt35950_inject_black_image - Display a completely black image
- * @nt: Main driver structure
+ * @dsi_ctx: context for mipi_dsi functions
*
* After IC setup, the attached panel may show random data
* due to driveric behavior changes (resolution, compression,
@@ -208,43 +189,34 @@ static int nt35950_set_scale_mode(struct nt35950 *nt, u8 mode)
* the display.
* It makes sense to push a black image before sending the sleep-out
* and display-on commands.
- *
- * Return: Number of transferred bytes or negative number on error
*/
-static int nt35950_inject_black_image(struct nt35950 *nt)
+static void nt35950_inject_black_image(struct mipi_dsi_multi_context *dsi_ctx)
{
const u8 cmd0_black_img[] = { 0x6f, 0x01 };
const u8 cmd1_black_img[] = { 0xf3, 0x10 };
u8 cmd_test[] = { 0xff, 0xaa, 0x55, 0xa5, 0x80 };
- int ret;
/* Enable test command */
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_test, ARRAY_SIZE(cmd_test));
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_test, ARRAY_SIZE(cmd_test));
/* Send a black image */
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd0_black_img,
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd0_black_img,
ARRAY_SIZE(cmd0_black_img));
- if (ret < 0)
- return ret;
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd1_black_img,
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd1_black_img,
ARRAY_SIZE(cmd1_black_img));
- if (ret < 0)
- return ret;
/* Disable test command */
cmd_test[ARRAY_SIZE(cmd_test) - 1] = 0x00;
- return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_test, ARRAY_SIZE(cmd_test));
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_test, ARRAY_SIZE(cmd_test));
}
/*
* nt35950_set_dispout - Set Display Output register parameters
* @nt: Main driver structure
- *
- * Return: Number of transferred bytes or negative number on error
+ * @dsi_ctx: context for mipi_dsi functions
*/
-static int nt35950_set_dispout(struct nt35950 *nt)
+static void nt35950_set_dispout(struct mipi_dsi_multi_context *dsi_ctx,
+ struct nt35950 *nt)
{
u8 cmd_dispout[] = { MCS_PARAM_DISP_OUTPUT_CTRL, 0x00 };
const struct nt35950_panel_mode *mode_data = nt->desc->mode_data;
@@ -254,8 +226,8 @@ static int nt35950_set_dispout(struct nt35950 *nt)
if (mode_data[nt->cur_mode].enable_sram)
cmd_dispout[1] |= MCS_DISP_OUT_SRAM_EN;
- return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_dispout,
- ARRAY_SIZE(cmd_dispout));
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_dispout,
+ ARRAY_SIZE(cmd_dispout));
}
static int nt35950_get_current_mode(struct nt35950 *nt)
@@ -284,78 +256,47 @@ static int nt35950_on(struct nt35950 *nt)
{
const struct nt35950_panel_mode *mode_data = nt->desc->mode_data;
struct mipi_dsi_device *dsi = nt->dsi[0];
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
nt->cur_mode = nt35950_get_current_mode(nt);
nt->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM;
nt->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = nt35950_set_cmd2_page(nt, 0);
- if (ret < 0)
- return ret;
+ nt35950_set_cmd2_page(&dsi_ctx, nt, 0);
+ nt35950_set_data_compression(&dsi_ctx, nt, mode_data[nt->cur_mode].compression);
+ nt35950_set_scale_mode(&dsi_ctx, mode_data[nt->cur_mode].scaler_mode);
+ nt35950_set_scaler(&dsi_ctx, mode_data[nt->cur_mode].scaler_on);
+ nt35950_set_dispout(&dsi_ctx, nt);
- ret = nt35950_set_data_compression(nt, mode_data[nt->cur_mode].compression);
- if (ret < 0)
- return ret;
-
- ret = nt35950_set_scale_mode(nt, mode_data[nt->cur_mode].scaler_mode);
- if (ret < 0)
- return ret;
-
- ret = nt35950_set_scaler(nt, mode_data[nt->cur_mode].scaler_on);
- if (ret < 0)
- return ret;
-
- ret = nt35950_set_dispout(nt);
- if (ret < 0)
- return ret;
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear scanline: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_set_tear_scanline_multi(&dsi_ctx, 0);
/* CMD2 Page 1 */
- ret = nt35950_set_cmd2_page(nt, 1);
- if (ret < 0)
- return ret;
+ nt35950_set_cmd2_page(&dsi_ctx, nt, 1);
/* Unknown command */
- mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x88, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd4, 0x88, 0x88);
/* CMD2 Page 7 */
- ret = nt35950_set_cmd2_page(nt, 7);
- if (ret < 0)
- return ret;
+ nt35950_set_cmd2_page(&dsi_ctx, nt, 7);
/* Enable SubPixel Rendering */
- mipi_dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_EN, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PARAM_SPR_EN, 0x01);
/* SPR Mode: YYG Rainbow-RGB */
- mipi_dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_MODE, MCS_SPR_MODE_YYG_RAINBOW_RGB);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PARAM_SPR_MODE,
+ MCS_SPR_MODE_YYG_RAINBOW_RGB);
/* CMD3 */
- ret = nt35950_inject_black_image(nt);
- if (ret < 0)
- return ret;
+ nt35950_inject_black_image(&dsi_ctx);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0)
- return ret;
- msleep(120);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0)
- return ret;
- msleep(120);
+ if (dsi_ctx.accum_err)
+ return dsi_ctx.accum_err;
nt->dsi[0]->mode_flags &= ~MIPI_DSI_MODE_LPM;
nt->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM;
@@ -363,30 +304,19 @@ static int nt35950_on(struct nt35950 *nt)
return 0;
}
-static int nt35950_off(struct nt35950 *nt)
+static void nt35950_off(struct nt35950 *nt)
{
- struct device *dev = &nt->dsi[0]->dev;
- int ret;
+ struct mipi_dsi_device *dsi = nt->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_off(nt->dsi[0]);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- goto set_lpm;
- }
- usleep_range(10000, 11000);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
- ret = mipi_dsi_dcs_enter_sleep_mode(nt->dsi[0]);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- goto set_lpm;
- }
- msleep(150);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 150);
-set_lpm:
nt->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM;
nt->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
-
- return 0;
}
static int nt35950_sharp_init_vregs(struct nt35950 *nt, struct device *dev)
@@ -427,7 +357,6 @@ static int nt35950_sharp_init_vregs(struct nt35950 *nt, struct device *dev)
static int nt35950_prepare(struct drm_panel *panel)
{
struct nt35950 *nt = to_nt35950(panel);
- struct device *dev = &nt->dsi[0]->dev;
int ret;
ret = regulator_enable(nt->vregs[0].consumer);
@@ -452,10 +381,6 @@ static int nt35950_prepare(struct drm_panel *panel)
nt35950_reset(nt);
ret = nt35950_on(nt);
- if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
- goto end;
- }
end:
if (ret < 0) {
@@ -469,12 +394,8 @@ end:
static int nt35950_unprepare(struct drm_panel *panel)
{
struct nt35950 *nt = to_nt35950(panel);
- struct device *dev = &nt->dsi[0]->dev;
- int ret;
- ret = nt35950_off(nt);
- if (ret < 0)
- dev_err(dev, "Failed to deinitialize panel: %d\n", ret);
+ nt35950_off(nt);
gpiod_set_value_cansleep(nt->reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(nt->vregs), nt->vregs);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
index e81a70147259..8c9e04207ba9 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -44,6 +44,16 @@ struct nt36672e_panel {
const struct panel_desc *desc;
};
+#define NT36672E_DCS_SWITCH_PAGE 0xff
+
+#define nt36672e_switch_page(ctx, page) \
+ mipi_dsi_dcs_write_seq_multi(ctx, NT36672E_DCS_SWITCH_PAGE, (page))
+
+static void nt36672e_enable_reload_cmds(struct mipi_dsi_multi_context *ctx)
+{
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+}
+
static inline struct nt36672e_panel *to_nt36672e_panel(struct drm_panel *panel)
{
return container_of(panel, struct nt36672e_panel, panel);
@@ -51,16 +61,16 @@ static inline struct nt36672e_panel *to_nt36672e_panel(struct drm_panel *panel)
static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
{
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+ nt36672e_switch_page(ctx, 0x10);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb0, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0xc0, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0xc1, 0x89, 0x28, 0x00, 0x08, 0x00, 0xaa, 0x02,
0x0e, 0x00, 0x2b, 0x00, 0x07, 0x0d, 0xb7, 0x0c, 0xb7);
-
mipi_dsi_dcs_write_seq_multi(ctx, 0xc2, 0x1b, 0xa0);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x20);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x01, 0x66);
mipi_dsi_dcs_write_seq_multi(ctx, 0x06, 0x40);
mipi_dsi_dcs_write_seq_multi(ctx, 0x07, 0x38);
@@ -76,8 +86,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0xf7, 0x54);
mipi_dsi_dcs_write_seq_multi(ctx, 0xf8, 0x64);
mipi_dsi_dcs_write_seq_multi(ctx, 0xf9, 0x54);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x24);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x24);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x01, 0x0f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x03, 0x0c);
mipi_dsi_dcs_write_seq_multi(ctx, 0x05, 0x1d);
@@ -139,8 +150,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0xc9, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0xd9, 0x80);
mipi_dsi_dcs_write_seq_multi(ctx, 0xe9, 0x02);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x25);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x25);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x18, 0x22);
mipi_dsi_dcs_write_seq_multi(ctx, 0x19, 0xe4);
mipi_dsi_dcs_write_seq_multi(ctx, 0x21, 0x40);
@@ -164,8 +176,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0xd7, 0x80);
mipi_dsi_dcs_write_seq_multi(ctx, 0xef, 0x20);
mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0x84);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x26);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x26);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x81, 0x0f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x83, 0x01);
mipi_dsi_dcs_write_seq_multi(ctx, 0x84, 0x03);
@@ -185,8 +198,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0x9c, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0x9d, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0x9e, 0x00);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x27);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x27);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x01, 0x68);
mipi_dsi_dcs_write_seq_multi(ctx, 0x20, 0x81);
mipi_dsi_dcs_write_seq_multi(ctx, 0x21, 0x6a);
@@ -215,8 +229,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0xe6, 0xd3);
mipi_dsi_dcs_write_seq_multi(ctx, 0xeb, 0x03);
mipi_dsi_dcs_write_seq_multi(ctx, 0xec, 0x28);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x2a);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x00, 0x91);
mipi_dsi_dcs_write_seq_multi(ctx, 0x03, 0x20);
mipi_dsi_dcs_write_seq_multi(ctx, 0x07, 0x50);
@@ -260,8 +275,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0x8c, 0x7d);
mipi_dsi_dcs_write_seq_multi(ctx, 0x8d, 0x7d);
mipi_dsi_dcs_write_seq_multi(ctx, 0x8e, 0x7d);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x20);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb0, 0x00, 0x00, 0x00, 0x17, 0x00, 0x49, 0x00,
0x6a, 0x00, 0x89, 0x00, 0x9f, 0x00, 0xb6, 0x00, 0xc8);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb1, 0x00, 0xd9, 0x01, 0x10, 0x01, 0x3a, 0x01,
@@ -286,8 +302,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
0x01, 0x03, 0x1f, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
mipi_dsi_dcs_write_seq_multi(ctx, 0xbb, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x21);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x21);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb0, 0x00, 0x00, 0x00, 0x17, 0x00, 0x49, 0x00,
0x6a, 0x00, 0x89, 0x00, 0x9f, 0x00, 0xb6, 0x00, 0xc8);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb1, 0x00, 0xd9, 0x01, 0x10, 0x01, 0x3a, 0x01,
@@ -312,8 +329,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
0x01, 0x03, 0x1f, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
mipi_dsi_dcs_write_seq_multi(ctx, 0xbb, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x2c);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x2c);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x61, 0x1f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x62, 0x1f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x7e, 0x03);
@@ -327,12 +345,13 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0x56, 0x0f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x58, 0x0f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x59, 0x0f);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0xf0);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x5a, 0x00);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+ nt36672e_switch_page(ctx, 0x10);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x51, 0xff);
mipi_dsi_dcs_write_seq_multi(ctx, 0x53, 0x24);
mipi_dsi_dcs_write_seq_multi(ctx, 0x55, 0x01);
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 93183f30d7d6..a9b5dad70bc1 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -389,7 +389,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
return -ENXIO;
}
- if (bd->props.power <= FB_BLANK_NORMAL) {
+ if (bd->props.power <= BACKLIGHT_POWER_REDUCED) {
/* Power on the backlight with the requested brightness
* Note We can not use mipi_dsi_dcs_set_display_brightness()
* as otm8009a driver support only 8-bit brightness (1 param).
@@ -465,7 +465,7 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.max_brightness = OTM8009A_BACKLIGHT_MAX;
ctx->bl_dev->props.brightness = OTM8009A_BACKLIGHT_DEFAULT;
- ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_OFF;
ctx->bl_dev->props.type = BACKLIGHT_RAW;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index 639a4fdf57bb..ab8b58545284 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -458,7 +458,7 @@ static int s6e3ha2_set_brightness(struct backlight_device *bl_dev)
return -EINVAL;
}
- if (bl_dev->props.power > FB_BLANK_NORMAL)
+ if (bl_dev->props.power > BACKLIGHT_POWER_REDUCED)
return -EPERM;
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
@@ -508,7 +508,7 @@ static int s6e3ha2_disable(struct drm_panel *panel)
s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_off(dsi));
msleep(40);
- ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_REDUCED;
return 0;
}
@@ -554,7 +554,7 @@ static int s6e3ha2_prepare(struct drm_panel *panel)
if (ret < 0)
goto err;
- ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_REDUCED;
return 0;
@@ -601,7 +601,7 @@ static int s6e3ha2_enable(struct drm_panel *panel)
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_on(dsi));
- ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_ON;
return 0;
}
@@ -729,7 +729,7 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.max_brightness = S6E3HA2_MAX_BRIGHTNESS;
ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
- ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_OFF;
drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index 46d6f4a87bf7..ed53787d1dea 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -225,7 +225,7 @@ static int s6e63j0x03_disable(struct drm_panel *panel)
if (ret < 0)
return ret;
- ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_REDUCED;
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0)
@@ -245,7 +245,7 @@ static int s6e63j0x03_unprepare(struct drm_panel *panel)
if (ret < 0)
return ret;
- ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_OFF;
return 0;
}
@@ -332,7 +332,7 @@ static int s6e63j0x03_prepare(struct drm_panel *panel)
if (ret < 0)
goto err;
- ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_REDUCED;
return 0;
@@ -393,7 +393,7 @@ static int s6e63j0x03_enable(struct drm_panel *panel)
if (ret < 0)
return ret;
- ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_ON;
return 0;
}
@@ -473,7 +473,7 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.max_brightness = MAX_BRIGHTNESS;
ctx->bl_dev->props.brightness = DEFAULT_BRIGHTNESS;
- ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_OFF;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index bf40057c5cf3..86735430462f 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -726,16 +726,24 @@ static void panel_simple_shutdown(struct device *dev)
* drm_atomic_helper_shutdown() at shutdown time and that should
* cause the panel to be disabled / unprepared if needed. For now,
* however, we'll keep these calls due to the sheer number of
- * different DRM modeset drivers used with panel-simple. The fact that
- * we're calling these and _also_ the drm_atomic_helper_shutdown()
- * will try to disable/unprepare means that we can get a warning about
- * trying to disable/unprepare an already disabled/unprepared panel,
- * but that's something we'll have to live with until we've confirmed
- * that all DRM modeset drivers are properly calling
- * drm_atomic_helper_shutdown().
+ * different DRM modeset drivers used with panel-simple. Once we've
+ * confirmed that all DRM modeset drivers using this panel properly
+ * call drm_atomic_helper_shutdown() we can simply delete the two
+ * calls below.
+ *
+ * TO BE EXPLICIT: THE CALLS BELOW SHOULDN'T BE COPIED TO ANY NEW
+ * PANEL DRIVERS.
+ *
+ * FIXME: If we're still haven't figured out if all DRM modeset
+ * drivers properly call drm_atomic_helper_shutdown() but we _have_
+ * managed to make sure that DRM modeset drivers get their shutdown()
+ * callback before the panel's shutdown() callback (perhaps using
+ * device link), we could add a WARN_ON here to help move forward.
*/
- drm_panel_disable(&panel->base);
- drm_panel_unprepare(&panel->base);
+ if (panel->base.enabled)
+ drm_panel_disable(&panel->base);
+ if (panel->base.prepared)
+ drm_panel_unprepare(&panel->base);
}
static void panel_simple_remove(struct device *dev)
@@ -2519,6 +2527,38 @@ static const struct panel_desc innolux_g070y2_l01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing innolux_g070ace_lh3_timing = {
+ .pixelclock = { 25200000, 25400000, 35700000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 30, 32, 87 },
+ .hback_porch = { 29, 31, 86 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 4, 5, 65 },
+ .vback_porch = { 3, 4, 65 },
+ .vsync_len = { 1, 1, 1 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc innolux_g070ace_lh3 = {
+ .timings = &innolux_g070ace_lh3_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .delay = {
+ .prepare = 10,
+ .enable = 450,
+ .disable = 200,
+ .unprepare = 510,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode innolux_g070y2_t02_mode = {
.clock = 33333,
.hdisplay = 800,
@@ -3478,6 +3518,39 @@ static const struct panel_desc olimex_lcd_olinuxino_43ts = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct display_timing ontat_kd50g21_40nt_a1_timing = {
+ .pixelclock = { 30000000, 30000000, 50000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 1, 40, 255 },
+ .hback_porch = { 1, 40, 87 },
+ .hsync_len = { 1, 48, 87 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 1, 13, 255 },
+ .vback_porch = { 1, 29, 29 },
+ .vsync_len = { 3, 3, 31 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
+};
+
+static const struct panel_desc ontat_kd50g21_40nt_a1 = {
+ .timings = &ontat_kd50g21_40nt_a1_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 108,
+ .height = 65,
+ },
+ .delay = {
+ .prepare = 147, /* 5 VSDs */
+ .enable = 147, /* 5 VSDs */
+ .disable = 88, /* 3 VSDs */
+ .unprepare = 117, /* 4 VSDs */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
/*
* 800x480 CVT. The panel appears to be quite accepting, at least as far as
* pixel clocks, but this is the timing that was being used in the Adafruit
@@ -4727,6 +4800,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,g070ace-l01",
.data = &innolux_g070ace_l01,
}, {
+ .compatible = "innolux,g070ace-lh3",
+ .data = &innolux_g070ace_lh3,
+ }, {
.compatible = "innolux,g070y2-l01",
.data = &innolux_g070y2_l01,
}, {
@@ -4838,6 +4914,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "olimex,lcd-olinuxino-43-ts",
.data = &olimex_lcd_olinuxino_43ts,
}, {
+ .compatible = "ontat,kd50g21-40nt-a1",
+ .data = &ontat_kd50g21_40nt_a1,
+ }, {
.compatible = "ontat,yx700wv03",
.data = &ontat_yx700wv03,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 421eb4592b61..eef03d04e0cd 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -4,6 +4,7 @@
* Author: Jagan Teki <jagan@amarulasolutions.com>
*/
+#include <drm/drm_mipi_dbi.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
@@ -14,79 +15,80 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
#include <video/mipi_display.h>
/* Command2 BKx selection command */
-#define DSI_CMD2BKX_SEL 0xFF
-#define DSI_CMD1 0
-#define DSI_CMD2 BIT(4)
-#define DSI_CMD2BK_MASK GENMASK(3, 0)
+#define ST7701_CMD2BKX_SEL 0xFF
+#define ST7701_CMD1 0
+#define ST7701_CMD2 BIT(4)
+#define ST7701_CMD2BK_MASK GENMASK(3, 0)
/* Command2, BK0 commands */
-#define DSI_CMD2_BK0_PVGAMCTRL 0xB0 /* Positive Voltage Gamma Control */
-#define DSI_CMD2_BK0_NVGAMCTRL 0xB1 /* Negative Voltage Gamma Control */
-#define DSI_CMD2_BK0_LNESET 0xC0 /* Display Line setting */
-#define DSI_CMD2_BK0_PORCTRL 0xC1 /* Porch control */
-#define DSI_CMD2_BK0_INVSEL 0xC2 /* Inversion selection, Frame Rate Control */
+#define ST7701_CMD2_BK0_PVGAMCTRL 0xB0 /* Positive Voltage Gamma Control */
+#define ST7701_CMD2_BK0_NVGAMCTRL 0xB1 /* Negative Voltage Gamma Control */
+#define ST7701_CMD2_BK0_LNESET 0xC0 /* Display Line setting */
+#define ST7701_CMD2_BK0_PORCTRL 0xC1 /* Porch control */
+#define ST7701_CMD2_BK0_INVSEL 0xC2 /* Inversion selection, Frame Rate Control */
/* Command2, BK1 commands */
-#define DSI_CMD2_BK1_VRHS 0xB0 /* Vop amplitude setting */
-#define DSI_CMD2_BK1_VCOM 0xB1 /* VCOM amplitude setting */
-#define DSI_CMD2_BK1_VGHSS 0xB2 /* VGH Voltage setting */
-#define DSI_CMD2_BK1_TESTCMD 0xB3 /* TEST Command Setting */
-#define DSI_CMD2_BK1_VGLS 0xB5 /* VGL Voltage setting */
-#define DSI_CMD2_BK1_PWCTLR1 0xB7 /* Power Control 1 */
-#define DSI_CMD2_BK1_PWCTLR2 0xB8 /* Power Control 2 */
-#define DSI_CMD2_BK1_SPD1 0xC1 /* Source pre_drive timing set1 */
-#define DSI_CMD2_BK1_SPD2 0xC2 /* Source EQ2 Setting */
-#define DSI_CMD2_BK1_MIPISET1 0xD0 /* MIPI Setting 1 */
+#define ST7701_CMD2_BK1_VRHS 0xB0 /* Vop amplitude setting */
+#define ST7701_CMD2_BK1_VCOM 0xB1 /* VCOM amplitude setting */
+#define ST7701_CMD2_BK1_VGHSS 0xB2 /* VGH Voltage setting */
+#define ST7701_CMD2_BK1_TESTCMD 0xB3 /* TEST Command Setting */
+#define ST7701_CMD2_BK1_VGLS 0xB5 /* VGL Voltage setting */
+#define ST7701_CMD2_BK1_PWCTLR1 0xB7 /* Power Control 1 */
+#define ST7701_CMD2_BK1_PWCTLR2 0xB8 /* Power Control 2 */
+#define ST7701_CMD2_BK1_SPD1 0xC1 /* Source pre_drive timing set1 */
+#define ST7701_CMD2_BK1_SPD2 0xC2 /* Source EQ2 Setting */
+#define ST7701_CMD2_BK1_MIPISET1 0xD0 /* MIPI Setting 1 */
/* Command2, BK0 bytes */
-#define DSI_CMD2_BK0_GAMCTRL_AJ_MASK GENMASK(7, 6)
-#define DSI_CMD2_BK0_GAMCTRL_VC0_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC4_MASK GENMASK(5, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC8_MASK GENMASK(5, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC16_MASK GENMASK(4, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC24_MASK GENMASK(4, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC52_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC80_MASK GENMASK(5, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC108_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC147_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC175_MASK GENMASK(5, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC203_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC231_MASK GENMASK(4, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC239_MASK GENMASK(4, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC247_MASK GENMASK(5, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC251_MASK GENMASK(5, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC255_MASK GENMASK(4, 0)
-#define DSI_CMD2_BK0_LNESET_LINE_MASK GENMASK(6, 0)
-#define DSI_CMD2_BK0_LNESET_LDE_EN BIT(7)
-#define DSI_CMD2_BK0_LNESET_LINEDELTA GENMASK(1, 0)
-#define DSI_CMD2_BK0_PORCTRL_VBP_MASK GENMASK(7, 0)
-#define DSI_CMD2_BK0_PORCTRL_VFP_MASK GENMASK(7, 0)
-#define DSI_CMD2_BK0_INVSEL_ONES_MASK GENMASK(5, 4)
-#define DSI_CMD2_BK0_INVSEL_NLINV_MASK GENMASK(2, 0)
-#define DSI_CMD2_BK0_INVSEL_RTNI_MASK GENMASK(4, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_AJ_MASK GENMASK(7, 6)
+#define ST7701_CMD2_BK0_GAMCTRL_VC0_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC4_MASK GENMASK(5, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC8_MASK GENMASK(5, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC16_MASK GENMASK(4, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC24_MASK GENMASK(4, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC52_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC80_MASK GENMASK(5, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC108_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC147_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC175_MASK GENMASK(5, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC203_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC231_MASK GENMASK(4, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC239_MASK GENMASK(4, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC247_MASK GENMASK(5, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC251_MASK GENMASK(5, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC255_MASK GENMASK(4, 0)
+#define ST7701_CMD2_BK0_LNESET_LINE_MASK GENMASK(6, 0)
+#define ST7701_CMD2_BK0_LNESET_LDE_EN BIT(7)
+#define ST7701_CMD2_BK0_LNESET_LINEDELTA GENMASK(1, 0)
+#define ST7701_CMD2_BK0_PORCTRL_VBP_MASK GENMASK(7, 0)
+#define ST7701_CMD2_BK0_PORCTRL_VFP_MASK GENMASK(7, 0)
+#define ST7701_CMD2_BK0_INVSEL_ONES_MASK GENMASK(5, 4)
+#define ST7701_CMD2_BK0_INVSEL_NLINV_MASK GENMASK(2, 0)
+#define ST7701_CMD2_BK0_INVSEL_RTNI_MASK GENMASK(4, 0)
/* Command2, BK1 bytes */
-#define DSI_CMD2_BK1_VRHA_MASK GENMASK(7, 0)
-#define DSI_CMD2_BK1_VCOM_MASK GENMASK(7, 0)
-#define DSI_CMD2_BK1_VGHSS_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK1_TESTCMD_VAL BIT(7)
-#define DSI_CMD2_BK1_VGLS_ONES BIT(6)
-#define DSI_CMD2_BK1_VGLS_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK1_PWRCTRL1_AP_MASK GENMASK(7, 6)
-#define DSI_CMD2_BK1_PWRCTRL1_APIS_MASK GENMASK(3, 2)
-#define DSI_CMD2_BK1_PWRCTRL1_APOS_MASK GENMASK(1, 0)
-#define DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK GENMASK(5, 4)
-#define DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK GENMASK(1, 0)
-#define DSI_CMD2_BK1_SPD1_ONES_MASK GENMASK(6, 4)
-#define DSI_CMD2_BK1_SPD1_T2D_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK1_SPD2_ONES_MASK GENMASK(6, 4)
-#define DSI_CMD2_BK1_SPD2_T3D_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK1_MIPISET1_ONES BIT(7)
-#define DSI_CMD2_BK1_MIPISET1_EOT_EN BIT(3)
+#define ST7701_CMD2_BK1_VRHA_MASK GENMASK(7, 0)
+#define ST7701_CMD2_BK1_VCOM_MASK GENMASK(7, 0)
+#define ST7701_CMD2_BK1_VGHSS_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK1_TESTCMD_VAL BIT(7)
+#define ST7701_CMD2_BK1_VGLS_ONES BIT(6)
+#define ST7701_CMD2_BK1_VGLS_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK1_PWRCTRL1_AP_MASK GENMASK(7, 6)
+#define ST7701_CMD2_BK1_PWRCTRL1_APIS_MASK GENMASK(3, 2)
+#define ST7701_CMD2_BK1_PWRCTRL1_APOS_MASK GENMASK(1, 0)
+#define ST7701_CMD2_BK1_PWRCTRL2_AVDD_MASK GENMASK(5, 4)
+#define ST7701_CMD2_BK1_PWRCTRL2_AVCL_MASK GENMASK(1, 0)
+#define ST7701_CMD2_BK1_SPD1_ONES_MASK GENMASK(6, 4)
+#define ST7701_CMD2_BK1_SPD1_T2D_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK1_SPD2_ONES_MASK GENMASK(6, 4)
+#define ST7701_CMD2_BK1_SPD2_T3D_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK1_MIPISET1_ONES BIT(7)
+#define ST7701_CMD2_BK1_MIPISET1_EOT_EN BIT(3)
#define CFIELD_PREP(_mask, _val) \
(((typeof(_mask))(_val) << (__builtin_ffsll(_mask) - 1)) & (_mask))
@@ -130,12 +132,16 @@ struct st7701_panel_desc {
struct st7701 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
+ struct mipi_dbi dbi;
const struct st7701_panel_desc *desc;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset;
unsigned int sleep_delay;
enum drm_panel_orientation orientation;
+
+ int (*write_command)(struct st7701 *st7701, u8 cmd, const u8 *seq,
+ size_t len);
};
static inline struct st7701 *panel_to_st7701(struct drm_panel *panel)
@@ -143,16 +149,22 @@ static inline struct st7701 *panel_to_st7701(struct drm_panel *panel)
return container_of(panel, struct st7701, panel);
}
-static inline int st7701_dsi_write(struct st7701 *st7701, const void *seq,
- size_t len)
+static int st7701_dsi_write(struct st7701 *st7701, u8 cmd, const u8 *seq,
+ size_t len)
+{
+ return mipi_dsi_dcs_write(st7701->dsi, cmd, seq, len);
+}
+
+static int st7701_dbi_write(struct st7701 *st7701, u8 cmd, const u8 *seq,
+ size_t len)
{
- return mipi_dsi_dcs_write_buffer(st7701->dsi, seq, len);
+ return mipi_dbi_command_stackbuf(&st7701->dbi, cmd, seq, len);
}
-#define ST7701_DSI(st7701, seq...) \
- { \
- const u8 d[] = { seq }; \
- st7701_dsi_write(st7701, d, ARRAY_SIZE(d)); \
+#define ST7701_WRITE(st7701, cmd, seq...) \
+ { \
+ const u8 d[] = { seq }; \
+ st7701->write_command(st7701, cmd, d, ARRAY_SIZE(d)); \
}
static u8 st7701_vgls_map(struct st7701 *st7701)
@@ -185,11 +197,11 @@ static void st7701_switch_cmd_bkx(struct st7701 *st7701, bool cmd2, u8 bkx)
u8 val;
if (cmd2)
- val = DSI_CMD2 | FIELD_PREP(DSI_CMD2BK_MASK, bkx);
+ val = ST7701_CMD2 | FIELD_PREP(ST7701_CMD2BK_MASK, bkx);
else
- val = DSI_CMD1;
+ val = ST7701_CMD1;
- ST7701_DSI(st7701, DSI_CMD2BKX_SEL, 0x77, 0x01, 0x00, 0x00, val);
+ ST7701_WRITE(st7701, ST7701_CMD2BKX_SEL, 0x77, 0x01, 0x00, 0x00, val);
}
static void st7701_init_sequence(struct st7701 *st7701)
@@ -199,22 +211,22 @@ static void st7701_init_sequence(struct st7701 *st7701)
const u8 linecount8 = mode->vdisplay / 8;
const u8 linecountrem2 = (mode->vdisplay % 8) / 2;
- ST7701_DSI(st7701, MIPI_DCS_SOFT_RESET, 0x00);
+ ST7701_WRITE(st7701, MIPI_DCS_SOFT_RESET, 0x00);
/* We need to wait 5ms before sending new commands */
msleep(5);
- ST7701_DSI(st7701, MIPI_DCS_EXIT_SLEEP_MODE, 0x00);
+ ST7701_WRITE(st7701, MIPI_DCS_EXIT_SLEEP_MODE, 0x00);
msleep(st7701->sleep_delay);
/* Command2, BK0 */
st7701_switch_cmd_bkx(st7701, true, 0);
- mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_PVGAMCTRL,
- desc->pv_gamma, ARRAY_SIZE(desc->pv_gamma));
- mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_NVGAMCTRL,
- desc->nv_gamma, ARRAY_SIZE(desc->nv_gamma));
+ st7701->write_command(st7701, ST7701_CMD2_BK0_PVGAMCTRL, desc->pv_gamma,
+ ARRAY_SIZE(desc->pv_gamma));
+ st7701->write_command(st7701, ST7701_CMD2_BK0_NVGAMCTRL, desc->nv_gamma,
+ ARRAY_SIZE(desc->nv_gamma));
/*
* Vertical line count configuration:
* Line[6:0]: select number of vertical lines of the TFT matrix in
@@ -226,14 +238,14 @@ static void st7701_init_sequence(struct st7701 *st7701)
* Total number of vertical lines:
* LN = ((Line[6:0] + 1) * 8) + (LDE_EN ? Line_delta[1:0] * 2 : 0)
*/
- ST7701_DSI(st7701, DSI_CMD2_BK0_LNESET,
- FIELD_PREP(DSI_CMD2_BK0_LNESET_LINE_MASK, linecount8 - 1) |
- (linecountrem2 ? DSI_CMD2_BK0_LNESET_LDE_EN : 0),
- FIELD_PREP(DSI_CMD2_BK0_LNESET_LINEDELTA, linecountrem2));
- ST7701_DSI(st7701, DSI_CMD2_BK0_PORCTRL,
- FIELD_PREP(DSI_CMD2_BK0_PORCTRL_VBP_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK0_LNESET,
+ FIELD_PREP(ST7701_CMD2_BK0_LNESET_LINE_MASK, linecount8 - 1) |
+ (linecountrem2 ? ST7701_CMD2_BK0_LNESET_LDE_EN : 0),
+ FIELD_PREP(ST7701_CMD2_BK0_LNESET_LINEDELTA, linecountrem2));
+ ST7701_WRITE(st7701, ST7701_CMD2_BK0_PORCTRL,
+ FIELD_PREP(ST7701_CMD2_BK0_PORCTRL_VBP_MASK,
mode->vtotal - mode->vsync_end),
- FIELD_PREP(DSI_CMD2_BK0_PORCTRL_VFP_MASK,
+ FIELD_PREP(ST7701_CMD2_BK0_PORCTRL_VFP_MASK,
mode->vsync_start - mode->vdisplay));
/*
* Horizontal pixel count configuration:
@@ -241,70 +253,70 @@ static void st7701_init_sequence(struct st7701 *st7701)
* The PCLK is number of pixel clock per line, which matches
* mode htotal. The minimum is 512 PCLK.
*/
- ST7701_DSI(st7701, DSI_CMD2_BK0_INVSEL,
- DSI_CMD2_BK0_INVSEL_ONES_MASK |
- FIELD_PREP(DSI_CMD2_BK0_INVSEL_NLINV_MASK, desc->nlinv),
- FIELD_PREP(DSI_CMD2_BK0_INVSEL_RTNI_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK0_INVSEL,
+ ST7701_CMD2_BK0_INVSEL_ONES_MASK |
+ FIELD_PREP(ST7701_CMD2_BK0_INVSEL_NLINV_MASK, desc->nlinv),
+ FIELD_PREP(ST7701_CMD2_BK0_INVSEL_RTNI_MASK,
(clamp((u32)mode->htotal, 512U, 1008U) - 512) / 16));
/* Command2, BK1 */
st7701_switch_cmd_bkx(st7701, true, 1);
/* Vop = 3.5375V + (VRHA[7:0] * 0.0125V) */
- ST7701_DSI(st7701, DSI_CMD2_BK1_VRHS,
- FIELD_PREP(DSI_CMD2_BK1_VRHA_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_VRHS,
+ FIELD_PREP(ST7701_CMD2_BK1_VRHA_MASK,
DIV_ROUND_CLOSEST(desc->vop_uv - 3537500, 12500)));
/* Vcom = 0.1V + (VCOM[7:0] * 0.0125V) */
- ST7701_DSI(st7701, DSI_CMD2_BK1_VCOM,
- FIELD_PREP(DSI_CMD2_BK1_VCOM_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_VCOM,
+ FIELD_PREP(ST7701_CMD2_BK1_VCOM_MASK,
DIV_ROUND_CLOSEST(desc->vcom_uv - 100000, 12500)));
/* Vgh = 11.5V + (VGHSS[7:0] * 0.5V) */
- ST7701_DSI(st7701, DSI_CMD2_BK1_VGHSS,
- FIELD_PREP(DSI_CMD2_BK1_VGHSS_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_VGHSS,
+ FIELD_PREP(ST7701_CMD2_BK1_VGHSS_MASK,
DIV_ROUND_CLOSEST(clamp(desc->vgh_mv,
(u16)11500,
(u16)17000) - 11500,
500)));
- ST7701_DSI(st7701, DSI_CMD2_BK1_TESTCMD, DSI_CMD2_BK1_TESTCMD_VAL);
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_TESTCMD, ST7701_CMD2_BK1_TESTCMD_VAL);
/* Vgl is non-linear */
- ST7701_DSI(st7701, DSI_CMD2_BK1_VGLS,
- DSI_CMD2_BK1_VGLS_ONES |
- FIELD_PREP(DSI_CMD2_BK1_VGLS_MASK, st7701_vgls_map(st7701)));
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_VGLS,
+ ST7701_CMD2_BK1_VGLS_ONES |
+ FIELD_PREP(ST7701_CMD2_BK1_VGLS_MASK, st7701_vgls_map(st7701)));
- ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR1,
- FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_AP_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_PWCTLR1,
+ FIELD_PREP(ST7701_CMD2_BK1_PWRCTRL1_AP_MASK,
desc->gamma_op_bias) |
- FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_APIS_MASK,
+ FIELD_PREP(ST7701_CMD2_BK1_PWRCTRL1_APIS_MASK,
desc->input_op_bias) |
- FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_APOS_MASK,
+ FIELD_PREP(ST7701_CMD2_BK1_PWRCTRL1_APOS_MASK,
desc->output_op_bias));
/* Avdd = 6.2V + (AVDD[1:0] * 0.2V) , Avcl = -4.4V - (AVCL[1:0] * 0.2V) */
- ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR2,
- FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_PWCTLR2,
+ FIELD_PREP(ST7701_CMD2_BK1_PWRCTRL2_AVDD_MASK,
DIV_ROUND_CLOSEST(desc->avdd_mv - 6200, 200)) |
- FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK,
+ FIELD_PREP(ST7701_CMD2_BK1_PWRCTRL2_AVCL_MASK,
DIV_ROUND_CLOSEST(-4400 - desc->avcl_mv, 200)));
/* T2D = 0.2us * T2D[3:0] */
- ST7701_DSI(st7701, DSI_CMD2_BK1_SPD1,
- DSI_CMD2_BK1_SPD1_ONES_MASK |
- FIELD_PREP(DSI_CMD2_BK1_SPD1_T2D_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_SPD1,
+ ST7701_CMD2_BK1_SPD1_ONES_MASK |
+ FIELD_PREP(ST7701_CMD2_BK1_SPD1_T2D_MASK,
DIV_ROUND_CLOSEST(desc->t2d_ns, 200)));
/* T3D = 4us + (0.8us * T3D[3:0]) */
- ST7701_DSI(st7701, DSI_CMD2_BK1_SPD2,
- DSI_CMD2_BK1_SPD2_ONES_MASK |
- FIELD_PREP(DSI_CMD2_BK1_SPD2_T3D_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_SPD2,
+ ST7701_CMD2_BK1_SPD2_ONES_MASK |
+ FIELD_PREP(ST7701_CMD2_BK1_SPD2_T3D_MASK,
DIV_ROUND_CLOSEST(desc->t3d_ns - 4000, 800)));
- ST7701_DSI(st7701, DSI_CMD2_BK1_MIPISET1,
- DSI_CMD2_BK1_MIPISET1_ONES |
- (desc->eot_en ? DSI_CMD2_BK1_MIPISET1_EOT_EN : 0));
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_MIPISET1,
+ ST7701_CMD2_BK1_MIPISET1_ONES |
+ (desc->eot_en ? ST7701_CMD2_BK1_MIPISET1_EOT_EN : 0));
}
static void ts8550b_gip_sequence(struct st7701 *st7701)
@@ -313,89 +325,89 @@ static void ts8550b_gip_sequence(struct st7701 *st7701)
* ST7701_SPEC_V1.2 is unable to provide enough information above this
* specific command sequence, so grab the same from vendor BSP driver.
*/
- ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
- ST7701_DSI(st7701, 0xE1, 0x0B, 0x00, 0x0D, 0x00, 0x0C, 0x00, 0x0E,
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x00, 0x02);
+ ST7701_WRITE(st7701, 0xE1, 0x0B, 0x00, 0x0D, 0x00, 0x0C, 0x00, 0x0E,
0x00, 0x00, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE2, 0x33, 0x33, 0x44, 0x44, 0x64, 0x00, 0x66,
+ ST7701_WRITE(st7701, 0xE2, 0x33, 0x33, 0x44, 0x44, 0x64, 0x00, 0x66,
0x00, 0x65, 0x00, 0x67, 0x00, 0x00);
- ST7701_DSI(st7701, 0xE3, 0x00, 0x00, 0x33, 0x33);
- ST7701_DSI(st7701, 0xE4, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE5, 0x0C, 0x78, 0x3C, 0xA0, 0x0E, 0x78, 0x3C,
+ ST7701_WRITE(st7701, 0xE3, 0x00, 0x00, 0x33, 0x33);
+ ST7701_WRITE(st7701, 0xE4, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE5, 0x0C, 0x78, 0x3C, 0xA0, 0x0E, 0x78, 0x3C,
0xA0, 0x10, 0x78, 0x3C, 0xA0, 0x12, 0x78, 0x3C, 0xA0);
- ST7701_DSI(st7701, 0xE6, 0x00, 0x00, 0x33, 0x33);
- ST7701_DSI(st7701, 0xE7, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE8, 0x0D, 0x78, 0x3C, 0xA0, 0x0F, 0x78, 0x3C,
+ ST7701_WRITE(st7701, 0xE6, 0x00, 0x00, 0x33, 0x33);
+ ST7701_WRITE(st7701, 0xE7, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE8, 0x0D, 0x78, 0x3C, 0xA0, 0x0F, 0x78, 0x3C,
0xA0, 0x11, 0x78, 0x3C, 0xA0, 0x13, 0x78, 0x3C, 0xA0);
- ST7701_DSI(st7701, 0xEB, 0x02, 0x02, 0x39, 0x39, 0xEE, 0x44, 0x00);
- ST7701_DSI(st7701, 0xEC, 0x00, 0x00);
- ST7701_DSI(st7701, 0xED, 0xFF, 0xF1, 0x04, 0x56, 0x72, 0x3F, 0xFF,
+ ST7701_WRITE(st7701, 0xEB, 0x02, 0x02, 0x39, 0x39, 0xEE, 0x44, 0x00);
+ ST7701_WRITE(st7701, 0xEC, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xED, 0xFF, 0xF1, 0x04, 0x56, 0x72, 0x3F, 0xFF,
0xFF, 0xFF, 0xFF, 0xF3, 0x27, 0x65, 0x40, 0x1F, 0xFF);
}
static void dmt028vghmcmi_1a_gip_sequence(struct st7701 *st7701)
{
- ST7701_DSI(st7701, 0xEE, 0x42);
- ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
+ ST7701_WRITE(st7701, 0xEE, 0x42);
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x00, 0x02);
- ST7701_DSI(st7701, 0xE1,
+ ST7701_WRITE(st7701, 0xE1,
0x04, 0xA0, 0x06, 0xA0,
0x05, 0xA0, 0x07, 0xA0,
0x00, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE2,
+ ST7701_WRITE(st7701, 0xE2,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00);
- ST7701_DSI(st7701, 0xE3,
+ ST7701_WRITE(st7701, 0xE3,
0x00, 0x00, 0x22, 0x22);
- ST7701_DSI(st7701, 0xE4, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE5,
+ ST7701_WRITE(st7701, 0xE4, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE5,
0x0C, 0x90, 0xA0, 0xA0,
0x0E, 0x92, 0xA0, 0xA0,
0x08, 0x8C, 0xA0, 0xA0,
0x0A, 0x8E, 0xA0, 0xA0);
- ST7701_DSI(st7701, 0xE6,
+ ST7701_WRITE(st7701, 0xE6,
0x00, 0x00, 0x22, 0x22);
- ST7701_DSI(st7701, 0xE7, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE8,
+ ST7701_WRITE(st7701, 0xE7, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE8,
0x0D, 0x91, 0xA0, 0xA0,
0x0F, 0x93, 0xA0, 0xA0,
0x09, 0x8D, 0xA0, 0xA0,
0x0B, 0x8F, 0xA0, 0xA0);
- ST7701_DSI(st7701, 0xEB,
+ ST7701_WRITE(st7701, 0xEB,
0x00, 0x00, 0xE4, 0xE4,
0x44, 0x00, 0x00);
- ST7701_DSI(st7701, 0xED,
+ ST7701_WRITE(st7701, 0xED,
0xFF, 0xF5, 0x47, 0x6F,
0x0B, 0xA1, 0xAB, 0xFF,
0xFF, 0xBA, 0x1A, 0xB0,
0xF6, 0x74, 0x5F, 0xFF);
- ST7701_DSI(st7701, 0xEF,
+ ST7701_WRITE(st7701, 0xEF,
0x08, 0x08, 0x08, 0x40,
0x3F, 0x64);
st7701_switch_cmd_bkx(st7701, false, 0);
st7701_switch_cmd_bkx(st7701, true, 3);
- ST7701_DSI(st7701, 0xE6, 0x7C);
- ST7701_DSI(st7701, 0xE8, 0x00, 0x0E);
+ ST7701_WRITE(st7701, 0xE6, 0x7C);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x0E);
st7701_switch_cmd_bkx(st7701, false, 0);
- ST7701_DSI(st7701, 0x11);
+ ST7701_WRITE(st7701, 0x11);
msleep(120);
st7701_switch_cmd_bkx(st7701, true, 3);
- ST7701_DSI(st7701, 0xE8, 0x00, 0x0C);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x0C);
msleep(10);
- ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x00);
st7701_switch_cmd_bkx(st7701, false, 0);
- ST7701_DSI(st7701, 0x11);
+ ST7701_WRITE(st7701, 0x11);
msleep(120);
- ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x00);
st7701_switch_cmd_bkx(st7701, false, 0);
- ST7701_DSI(st7701, 0x3A, 0x70);
+ ST7701_WRITE(st7701, 0x3A, 0x70);
}
static void kd50t048a_gip_sequence(struct st7701 *st7701)
@@ -404,59 +416,108 @@ static void kd50t048a_gip_sequence(struct st7701 *st7701)
* ST7701_SPEC_V1.2 is unable to provide enough information above this
* specific command sequence, so grab the same from vendor BSP driver.
*/
- ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
- ST7701_DSI(st7701, 0xE1, 0x08, 0x00, 0x0A, 0x00, 0x07, 0x00, 0x09,
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x00, 0x02);
+ ST7701_WRITE(st7701, 0xE1, 0x08, 0x00, 0x0A, 0x00, 0x07, 0x00, 0x09,
0x00, 0x00, 0x33, 0x33);
- ST7701_DSI(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ST7701_WRITE(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- ST7701_DSI(st7701, 0xE3, 0x00, 0x00, 0x33, 0x33);
- ST7701_DSI(st7701, 0xE4, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE5, 0x0E, 0x60, 0xA0, 0xA0, 0x10, 0x60, 0xA0,
+ ST7701_WRITE(st7701, 0xE3, 0x00, 0x00, 0x33, 0x33);
+ ST7701_WRITE(st7701, 0xE4, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE5, 0x0E, 0x60, 0xA0, 0xA0, 0x10, 0x60, 0xA0,
0xA0, 0x0A, 0x60, 0xA0, 0xA0, 0x0C, 0x60, 0xA0, 0xA0);
- ST7701_DSI(st7701, 0xE6, 0x00, 0x00, 0x33, 0x33);
- ST7701_DSI(st7701, 0xE7, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE8, 0x0D, 0x60, 0xA0, 0xA0, 0x0F, 0x60, 0xA0,
+ ST7701_WRITE(st7701, 0xE6, 0x00, 0x00, 0x33, 0x33);
+ ST7701_WRITE(st7701, 0xE7, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE8, 0x0D, 0x60, 0xA0, 0xA0, 0x0F, 0x60, 0xA0,
0xA0, 0x09, 0x60, 0xA0, 0xA0, 0x0B, 0x60, 0xA0, 0xA0);
- ST7701_DSI(st7701, 0xEB, 0x02, 0x01, 0xE4, 0xE4, 0x44, 0x00, 0x40);
- ST7701_DSI(st7701, 0xEC, 0x02, 0x01);
- ST7701_DSI(st7701, 0xED, 0xAB, 0x89, 0x76, 0x54, 0x01, 0xFF, 0xFF,
+ ST7701_WRITE(st7701, 0xEB, 0x02, 0x01, 0xE4, 0xE4, 0x44, 0x00, 0x40);
+ ST7701_WRITE(st7701, 0xEC, 0x02, 0x01);
+ ST7701_WRITE(st7701, 0xED, 0xAB, 0x89, 0x76, 0x54, 0x01, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0x10, 0x45, 0x67, 0x98, 0xBA);
}
static void rg_arc_gip_sequence(struct st7701 *st7701)
{
st7701_switch_cmd_bkx(st7701, true, 3);
- ST7701_DSI(st7701, 0xEF, 0x08);
+ ST7701_WRITE(st7701, 0xEF, 0x08);
st7701_switch_cmd_bkx(st7701, true, 0);
- ST7701_DSI(st7701, 0xC7, 0x04);
- ST7701_DSI(st7701, 0xCC, 0x38);
+ ST7701_WRITE(st7701, 0xC7, 0x04);
+ ST7701_WRITE(st7701, 0xCC, 0x38);
st7701_switch_cmd_bkx(st7701, true, 1);
- ST7701_DSI(st7701, 0xB9, 0x10);
- ST7701_DSI(st7701, 0xBC, 0x03);
- ST7701_DSI(st7701, 0xC0, 0x89);
- ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
- ST7701_DSI(st7701, 0xE1, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00,
+ ST7701_WRITE(st7701, 0xB9, 0x10);
+ ST7701_WRITE(st7701, 0xBC, 0x03);
+ ST7701_WRITE(st7701, 0xC0, 0x89);
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x00, 0x02);
+ ST7701_WRITE(st7701, 0xE1, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00,
0x00, 0x00, 0x20, 0x20);
- ST7701_DSI(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ST7701_WRITE(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- ST7701_DSI(st7701, 0xE3, 0x00, 0x00, 0x33, 0x00);
- ST7701_DSI(st7701, 0xE4, 0x22, 0x00);
- ST7701_DSI(st7701, 0xE5, 0x04, 0x5C, 0xA0, 0xA0, 0x06, 0x5C, 0xA0,
+ ST7701_WRITE(st7701, 0xE3, 0x00, 0x00, 0x33, 0x00);
+ ST7701_WRITE(st7701, 0xE4, 0x22, 0x00);
+ ST7701_WRITE(st7701, 0xE5, 0x04, 0x5C, 0xA0, 0xA0, 0x06, 0x5C, 0xA0,
0xA0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- ST7701_DSI(st7701, 0xE6, 0x00, 0x00, 0x33, 0x00);
- ST7701_DSI(st7701, 0xE7, 0x22, 0x00);
- ST7701_DSI(st7701, 0xE8, 0x05, 0x5C, 0xA0, 0xA0, 0x07, 0x5C, 0xA0,
+ ST7701_WRITE(st7701, 0xE6, 0x00, 0x00, 0x33, 0x00);
+ ST7701_WRITE(st7701, 0xE7, 0x22, 0x00);
+ ST7701_WRITE(st7701, 0xE8, 0x05, 0x5C, 0xA0, 0xA0, 0x07, 0x5C, 0xA0,
0xA0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- ST7701_DSI(st7701, 0xEB, 0x02, 0x00, 0x40, 0x40, 0x00, 0x00, 0x00);
- ST7701_DSI(st7701, 0xEC, 0x00, 0x00);
- ST7701_DSI(st7701, 0xED, 0xFA, 0x45, 0x0B, 0xFF, 0xFF, 0xFF, 0xFF,
+ ST7701_WRITE(st7701, 0xEB, 0x02, 0x00, 0x40, 0x40, 0x00, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xEC, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xED, 0xFA, 0x45, 0x0B, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xB0, 0x54, 0xAF);
- ST7701_DSI(st7701, 0xEF, 0x08, 0x08, 0x08, 0x45, 0x3F, 0x54);
+ ST7701_WRITE(st7701, 0xEF, 0x08, 0x08, 0x08, 0x45, 0x3F, 0x54);
+ st7701_switch_cmd_bkx(st7701, false, 0);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_ADDRESS_MODE, 0x17);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_PIXEL_FORMAT, 0x77);
+ ST7701_WRITE(st7701, MIPI_DCS_EXIT_SLEEP_MODE, 0x00);
+ msleep(120);
+}
+
+static void rg28xx_gip_sequence(struct st7701 *st7701)
+{
+ st7701_switch_cmd_bkx(st7701, true, 3);
+ ST7701_WRITE(st7701, 0xEF, 0x08);
+
+ st7701_switch_cmd_bkx(st7701, true, 0);
+ ST7701_WRITE(st7701, 0xC3, 0x02, 0x10, 0x02);
+ ST7701_WRITE(st7701, 0xC7, 0x04);
+ ST7701_WRITE(st7701, 0xCC, 0x10);
+
+ st7701_switch_cmd_bkx(st7701, true, 1);
+ ST7701_WRITE(st7701, 0xEE, 0x42);
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x00, 0x02);
+
+ ST7701_WRITE(st7701, 0xE1, 0x04, 0xA0, 0x06, 0xA0, 0x05, 0xA0, 0x07, 0xA0,
+ 0x00, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xE3, 0x00, 0x00, 0x22, 0x22);
+ ST7701_WRITE(st7701, 0xE4, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE5, 0x0C, 0x90, 0xA0, 0xA0, 0x0E, 0x92, 0xA0, 0xA0,
+ 0x08, 0x8C, 0xA0, 0xA0, 0x0A, 0x8E, 0xA0, 0xA0);
+ ST7701_WRITE(st7701, 0xE6, 0x00, 0x00, 0x22, 0x22);
+ ST7701_WRITE(st7701, 0xE7, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE8, 0x0D, 0x91, 0xA0, 0xA0, 0x0F, 0x93, 0xA0, 0xA0,
+ 0x09, 0x8D, 0xA0, 0xA0, 0x0B, 0x8F, 0xA0, 0xA0);
+ ST7701_WRITE(st7701, 0xEB, 0x00, 0x00, 0xE4, 0xE4, 0x44, 0x00, 0x40);
+ ST7701_WRITE(st7701, 0xED, 0xFF, 0xF5, 0x47, 0x6F, 0x0B, 0xA1, 0xBA, 0xFF,
+ 0xFF, 0xAB, 0x1A, 0xB0, 0xF6, 0x74, 0x5F, 0xFF);
+ ST7701_WRITE(st7701, 0xEF, 0x08, 0x08, 0x08, 0x45, 0x3F, 0x54);
+
st7701_switch_cmd_bkx(st7701, false, 0);
- ST7701_DSI(st7701, MIPI_DCS_SET_ADDRESS_MODE, 0x17);
- ST7701_DSI(st7701, MIPI_DCS_SET_PIXEL_FORMAT, 0x77);
- ST7701_DSI(st7701, MIPI_DCS_EXIT_SLEEP_MODE, 0x00);
+
+ st7701_switch_cmd_bkx(st7701, true, 3);
+ ST7701_WRITE(st7701, 0xE6, 0x16);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x0E);
+
+ st7701_switch_cmd_bkx(st7701, false, 0);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_ADDRESS_MODE, 0x10);
+ ST7701_WRITE(st7701, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(120);
+
+ st7701_switch_cmd_bkx(st7701, true, 3);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x0C);
+ msleep(10);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x00);
+ st7701_switch_cmd_bkx(st7701, false, 0);
}
static int st7701_prepare(struct drm_panel *panel)
@@ -490,7 +551,7 @@ static int st7701_enable(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
- ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_ON, 0x00);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_DISPLAY_ON, 0x00);
return 0;
}
@@ -499,7 +560,7 @@ static int st7701_disable(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
- ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_OFF, 0x00);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_DISPLAY_OFF, 0x00);
return 0;
}
@@ -508,7 +569,7 @@ static int st7701_unprepare(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
- ST7701_DSI(st7701, MIPI_DCS_ENTER_SLEEP_MODE, 0x00);
+ ST7701_WRITE(st7701, MIPI_DCS_ENTER_SLEEP_MODE, 0x00);
msleep(st7701->sleep_delay);
@@ -539,7 +600,7 @@ static int st7701_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, desc_mode);
if (!mode) {
- dev_err(&st7701->dsi->dev, "failed to add mode %ux%u@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
desc_mode->hdisplay, desc_mode->vdisplay,
drm_mode_vrefresh(desc_mode));
return -ENOMEM;
@@ -602,62 +663,62 @@ static const struct st7701_panel_desc ts8550b_desc = {
.panel_sleep_delay = 80, /* panel need extra 80ms for sleep out cmd */
.pv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x8),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x8),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x23),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x12),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2b),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x23),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x12),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2b),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
},
.nv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0x2) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x13),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x7),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x9),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x10),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2c),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0x2) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x13),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x7),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x9),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x10),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2c),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
},
.nlinv = 7,
.vop_uv = 4400000,
@@ -703,62 +764,62 @@ static const struct st7701_panel_desc dmt028vghmcmi_1a_desc = {
.panel_sleep_delay = 5, /* panel need extra 5ms for sleep out cmd */
.pv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0x10),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x17),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x5),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1f),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x11),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x29),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x17),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x11),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x29),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
},
.nv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xe),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x4),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x13),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x26),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xe),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x4),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x13),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x26),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
},
.nlinv = 1,
.vop_uv = 4800000,
@@ -802,62 +863,62 @@ static const struct st7701_panel_desc kd50t048a_desc = {
.panel_sleep_delay = 0,
.pv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x5),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x2),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1e),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x11),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 2) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x23),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x18)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x2),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1e),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 2) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x23),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x18)
},
.nv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xc),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xc),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x5),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x3),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x11),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 2) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x24),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x18)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0xc),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xc),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x3),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 2) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x24),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x18)
},
.nlinv = 1,
.vop_uv = 4887500,
@@ -901,62 +962,62 @@ static const struct st7701_panel_desc rg_arc_desc = {
.panel_sleep_delay = 80,
.pv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0x01) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0x16),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1d),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0e),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x12),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x06),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x0c),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x0a),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x09),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x25),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x00),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x03),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x00),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x3f),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3f),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1c)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0x01) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x16),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1d),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0e),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x12),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x06),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x0c),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x0a),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x09),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x25),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x00),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x03),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x00),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x3f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1c)
},
.nv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0x01) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0x16),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1e),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0e),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x06),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x0c),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x08),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x09),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x26),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x00),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x15),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x00),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x3f),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3f),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1c)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0x01) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x16),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1e),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0e),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x06),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x0c),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x08),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x09),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x26),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x00),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x15),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x00),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x3f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1c)
},
.nlinv = 0,
.vop_uv = 4500000,
@@ -974,42 +1035,148 @@ static const struct st7701_panel_desc rg_arc_desc = {
.gip_sequence = rg_arc_gip_sequence,
};
-static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
+static const struct drm_display_mode rg28xx_mode = {
+ .clock = 22325,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 40,
+ .hsync_end = 480 + 40 + 4,
+ .htotal = 480 + 40 + 4 + 20,
+
+ .vdisplay = 640,
+ .vsync_start = 640 + 2,
+ .vsync_end = 640 + 2 + 40,
+ .vtotal = 640 + 2 + 40 + 16,
+
+ .width_mm = 44,
+ .height_mm = 58,
+
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct st7701_panel_desc rg28xx_desc = {
+ .mode = &rg28xx_mode,
+
+ .panel_sleep_delay = 80,
+
+ .pv_gamma = {
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x17),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x11),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x29),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nv_gamma = {
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xe),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x4),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x13),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x26),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nlinv = 7,
+ .vop_uv = 4800000,
+ .vcom_uv = 1512500,
+ .vgh_mv = 15000,
+ .vgl_mv = -11730,
+ .avdd_mv = 6600,
+ .avcl_mv = -4400,
+ .gamma_op_bias = OP_BIAS_MIDDLE,
+ .input_op_bias = OP_BIAS_MIN,
+ .output_op_bias = OP_BIAS_MIN,
+ .t2d_ns = 1600,
+ .t3d_ns = 10400,
+ .eot_en = true,
+ .gip_sequence = rg28xx_gip_sequence,
+};
+
+static void st7701_cleanup(void *data)
+{
+ struct st7701 *st7701 = (struct st7701 *)data;
+
+ drm_panel_remove(&st7701->panel);
+ drm_panel_disable(&st7701->panel);
+ drm_panel_unprepare(&st7701->panel);
+}
+
+static int st7701_probe(struct device *dev, int connector_type)
{
const struct st7701_panel_desc *desc;
struct st7701 *st7701;
int ret;
- st7701 = devm_kzalloc(&dsi->dev, sizeof(*st7701), GFP_KERNEL);
+ st7701 = devm_kzalloc(dev, sizeof(*st7701), GFP_KERNEL);
if (!st7701)
return -ENOMEM;
- desc = of_device_get_match_data(&dsi->dev);
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
- dsi->format = desc->format;
- dsi->lanes = desc->lanes;
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -ENODEV;
st7701->supplies[0].supply = "VCC";
st7701->supplies[1].supply = "IOVCC";
- ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(st7701->supplies),
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(st7701->supplies),
st7701->supplies);
if (ret < 0)
return ret;
- st7701->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ st7701->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(st7701->reset)) {
- dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
+ dev_err(dev, "Couldn't get our reset GPIO\n");
return PTR_ERR(st7701->reset);
}
- ret = of_drm_get_panel_orientation(dsi->dev.of_node, &st7701->orientation);
+ ret = of_drm_get_panel_orientation(dev->of_node, &st7701->orientation);
if (ret < 0)
- return dev_err_probe(&dsi->dev, ret, "Failed to get orientation\n");
+ return dev_err_probe(dev, ret, "Failed to get orientation\n");
- drm_panel_init(&st7701->panel, &dsi->dev, &st7701_funcs,
- DRM_MODE_CONNECTOR_DSI);
+ drm_panel_init(&st7701->panel, dev, &st7701_funcs, connector_type);
/**
* Once sleep out has been issued, ST7701 IC required to wait 120ms
@@ -1028,48 +1195,143 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
drm_panel_add(&st7701->panel);
- mipi_dsi_set_drvdata(dsi, st7701);
- st7701->dsi = dsi;
+ dev_set_drvdata(dev, st7701);
st7701->desc = desc;
- ret = mipi_dsi_attach(dsi);
- if (ret)
- goto err_attach;
+ return devm_add_action_or_reset(dev, st7701_cleanup, st7701);
+}
+
+static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct st7701 *st7701;
+ int err;
+
+ err = st7701_probe(&dsi->dev, DRM_MODE_CONNECTOR_DSI);
+ if (err)
+ return err;
+
+ st7701 = dev_get_drvdata(&dsi->dev);
+ st7701->dsi = dsi;
+ st7701->write_command = st7701_dsi_write;
+
+ if (!st7701->desc->lanes)
+ return dev_err_probe(&dsi->dev, -EINVAL, "This panel is not for MIPI DSI\n");
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ dsi->format = st7701->desc->format;
+ dsi->lanes = st7701->desc->lanes;
+
+ err = mipi_dsi_attach(dsi);
+ if (err)
+ return dev_err_probe(&dsi->dev, err, "Failed to init MIPI DSI\n");
return 0;
+}
-err_attach:
- drm_panel_remove(&st7701->panel);
- return ret;
+static int st7701_spi_probe(struct spi_device *spi)
+{
+ struct st7701 *st7701;
+ struct gpio_desc *dc;
+ int err;
+
+ err = st7701_probe(&spi->dev, DRM_MODE_CONNECTOR_DPI);
+ if (err)
+ return err;
+
+ st7701 = dev_get_drvdata(&spi->dev);
+ st7701->write_command = st7701_dbi_write;
+
+ dc = devm_gpiod_get_optional(&spi->dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc))
+ return dev_err_probe(&spi->dev, PTR_ERR(dc), "Failed to get GPIO for D/CX\n");
+
+ err = mipi_dbi_spi_init(spi, &st7701->dbi, dc);
+ if (err)
+ return dev_err_probe(&spi->dev, err, "Failed to init MIPI DBI\n");
+ st7701->dbi.read_commands = NULL;
+
+ return 0;
}
static void st7701_dsi_remove(struct mipi_dsi_device *dsi)
{
- struct st7701 *st7701 = mipi_dsi_get_drvdata(dsi);
-
mipi_dsi_detach(dsi);
- drm_panel_remove(&st7701->panel);
}
-static const struct of_device_id st7701_of_match[] = {
+static const struct of_device_id st7701_dsi_of_match[] = {
{ .compatible = "anbernic,rg-arc-panel", .data = &rg_arc_desc },
{ .compatible = "densitron,dmt028vghmcmi-1a", .data = &dmt028vghmcmi_1a_desc },
{ .compatible = "elida,kd50t048a", .data = &kd50t048a_desc },
{ .compatible = "techstar,ts8550b", .data = &ts8550b_desc },
{ }
};
-MODULE_DEVICE_TABLE(of, st7701_of_match);
+MODULE_DEVICE_TABLE(of, st7701_dsi_of_match);
+
+static const struct of_device_id st7701_spi_of_match[] = {
+ { .compatible = "anbernic,rg28xx-panel", .data = &rg28xx_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, st7701_spi_of_match);
+
+static const struct spi_device_id st7701_spi_ids[] = {
+ { "rg28xx-panel" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, st7701_spi_ids);
static struct mipi_dsi_driver st7701_dsi_driver = {
.probe = st7701_dsi_probe,
.remove = st7701_dsi_remove,
.driver = {
.name = "st7701",
- .of_match_table = st7701_of_match,
+ .of_match_table = st7701_dsi_of_match,
},
};
-module_mipi_dsi_driver(st7701_dsi_driver);
+
+static struct spi_driver st7701_spi_driver = {
+ .probe = st7701_spi_probe,
+ .id_table = st7701_spi_ids,
+ .driver = {
+ .name = "st7701",
+ .of_match_table = st7701_spi_of_match,
+ },
+};
+
+static int __init st7701_driver_init(void)
+{
+ int err;
+
+ if (IS_ENABLED(CONFIG_SPI)) {
+ err = spi_register_driver(&st7701_spi_driver);
+ if (err)
+ return err;
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
+ err = mipi_dsi_driver_register(&st7701_dsi_driver);
+ if (err) {
+ if (IS_ENABLED(CONFIG_SPI))
+ spi_unregister_driver(&st7701_spi_driver);
+ return err;
+ }
+ }
+
+ return 0;
+}
+module_init(st7701_driver_init);
+
+static void __exit st7701_driver_exit(void)
+{
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+ mipi_dsi_driver_unregister(&st7701_dsi_driver);
+
+ if (IS_ENABLED(CONFIG_SPI))
+ spi_unregister_driver(&st7701_spi_driver);
+}
+module_exit(st7701_driver_exit);
MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>");
+MODULE_AUTHOR("Hironori KIKUCHI <kikuchan98@gmail.com>");
MODULE_DESCRIPTION("Sitronix ST7701 LCD Panel Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index 73ba93ff00fe..217f03569494 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -342,7 +342,7 @@ static const struct backlight_ops acx565akm_bl_ops = {
static int acx565akm_backlight_init(struct acx565akm_panel *lcd)
{
struct backlight_properties props = {
- .power = FB_BLANK_UNBLANK,
+ .power = BACKLIGHT_POWER_ON,
.type = BACKLIGHT_RAW,
};
int ret;
diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
index f2198fa29735..104b2290560e 100644
--- a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
+++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
@@ -25,6 +25,12 @@ struct truly_nt35521 {
struct gpio_desc *blen_gpio;
};
+#define NT35521_DCS_SWITCH_PAGE 0xf0
+
+#define nt35521_switch_page(dsi_ctx, page) \
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, NT35521_DCS_SWITCH_PAGE, \
+ 0x55, 0xaa, 0x52, 0x08, (page))
+
static inline
struct truly_nt35521 *to_truly_nt35521(struct drm_panel *panel)
{
@@ -48,7 +54,7 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+ nt35521_switch_page(&dsi_ctx, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xff, 0xaa, 0x55, 0xa5, 0x80);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x6f, 0x11, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf7, 0x20, 0x00);
@@ -59,7 +65,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xbb, 0x11, 0x11);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xbc, 0x00, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb6, 0x02);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x01);
+
+ nt35521_switch_page(&dsi_ctx, 0x01);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x09, 0x09);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x09, 0x09);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xbc, 0x8c, 0x00);
@@ -71,7 +78,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb4, 0x25, 0x25);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb9, 0x43, 0x43);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xba, 0x24, 0x24);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x02);
+
+ nt35521_switch_page(&dsi_ctx, 0x02);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xee, 0x03);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0,
0x00, 0xb2, 0x00, 0xb3, 0x00, 0xb6, 0x00, 0xc3,
@@ -103,7 +111,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
0x02, 0x93, 0x02, 0xcd, 0x02, 0xf6, 0x03, 0x31,
0x03, 0x6c, 0x03, 0xe9, 0x03, 0xef, 0x03, 0xf4);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0xf6, 0x03, 0xf7);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x03);
+
+ nt35521_switch_page(&dsi_ctx, 0x03);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x22, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x22, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb2, 0x05, 0x00, 0x60, 0x00, 0x00);
@@ -122,7 +131,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xc5, 0xc0);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xc6, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xc7, 0x00);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x05);
+
+ nt35521_switch_page(&dsi_ctx, 0x05);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x17, 0x06);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x17, 0x06);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb2, 0x17, 0x06);
@@ -178,7 +188,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xeb, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xec, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xed, 0x30);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x06);
+
+ nt35521_switch_page(&dsi_ctx, 0x06);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x31, 0x31);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x31, 0x31);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb2, 0x2d, 0x2e);
@@ -235,10 +246,12 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x6f, 0x11);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf3, 0x01);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x35, 0x00);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+
+ nt35521_switch_page(&dsi_ctx, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xd9, 0x02, 0x03, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+
+ nt35521_switch_page(&dsi_ctx, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x6c, 0x21);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x35, 0x00);
diff --git a/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c b/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c
index 0156689f41cd..c0c95355b743 100644
--- a/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c
+++ b/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c
@@ -24,10 +24,10 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#define DSI_REG_MCAP 0xB0
-#define DSI_REG_IS 0xB3 /* Interface Setting */
-#define DSI_REG_IIS 0xB4 /* Interface ID Setting */
-#define DSI_REG_CTRL 0xB6
+#define DSI_REG_MCAP 0xb0
+#define DSI_REG_IS 0xb3 /* Interface Setting */
+#define DSI_REG_IIS 0xb4 /* Interface ID Setting */
+#define DSI_REG_CTRL 0xb6
enum {
IOVCC = 0,
@@ -52,92 +52,55 @@ static inline struct stk_panel *to_stk_panel(struct drm_panel *panel)
static int stk_panel_init(struct stk_panel *stk)
{
struct mipi_dsi_device *dsi = stk->dsi;
- struct device *dev = &stk->dsi->dev;
- int ret;
-
- ret = mipi_dsi_dcs_soft_reset(dsi);
- if (ret < 0) {
- dev_err(dev, "failed to mipi_dsi_dcs_soft_reset: %d\n", ret);
- return ret;
- }
- mdelay(5);
+ struct mipi_dsi_multi_context dsi_ctx = {.dsi = dsi};
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "failed to set exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_soft_reset_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 5);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- mipi_dsi_generic_write_seq(dsi, DSI_REG_MCAP, 0x04);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, DSI_REG_MCAP, 0x04);
/* Interface setting, video mode */
- mipi_dsi_generic_write_seq(dsi, DSI_REG_IS, 0x14, 0x08, 0x00, 0x22, 0x00);
- mipi_dsi_generic_write_seq(dsi, DSI_REG_IIS, 0x0C, 0x00);
- mipi_dsi_generic_write_seq(dsi, DSI_REG_CTRL, 0x3A, 0xD3);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, DSI_REG_IS, 0x14, 0x08, 0x00, 0x22, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, DSI_REG_IIS, 0x0c, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, DSI_REG_CTRL, 0x3a, 0xd3);
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x77);
- if (ret < 0) {
- dev_err(dev, "failed to write display brightness: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x77);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
- MIPI_DCS_WRITE_MEMORY_START);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ MIPI_DCS_WRITE_MEMORY_START);
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
- if (ret < 0) {
- dev_err(dev, "failed to set pixel format: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x77);
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0, stk->mode->hdisplay - 1);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0, stk->mode->vdisplay - 1);
- ret = mipi_dsi_dcs_set_column_address(dsi, 0, stk->mode->hdisplay - 1);
- if (ret < 0) {
- dev_err(dev, "failed to set column address: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_page_address(dsi, 0, stk->mode->vdisplay - 1);
- if (ret < 0) {
- dev_err(dev, "failed to set page address: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return dsi_ctx.accum_err;
}
static int stk_panel_on(struct stk_panel *stk)
{
struct mipi_dsi_device *dsi = stk->dsi;
- struct device *dev = &stk->dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = {.dsi = dsi};
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0)
- dev_err(dev, "failed to set display on: %d\n", ret);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- mdelay(20);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- return ret;
+ return dsi_ctx.accum_err;
}
static void stk_panel_off(struct stk_panel *stk)
{
struct mipi_dsi_device *dsi = stk->dsi;
- struct device *dev = &stk->dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = {.dsi = dsi};
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- dev_err(dev, "failed to set display off: %d\n", ret);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0)
- dev_err(dev, "failed to enter sleep mode: %d\n", ret);
-
- msleep(100);
+ mipi_dsi_msleep(&dsi_ctx, 100);
}
static int stk_panel_unprepare(struct drm_panel *panel)
@@ -155,7 +118,6 @@ static int stk_panel_unprepare(struct drm_panel *panel)
static int stk_panel_prepare(struct drm_panel *panel)
{
struct stk_panel *stk = to_stk_panel(panel);
- struct device *dev = &stk->dsi->dev;
int ret;
gpiod_set_value(stk->reset_gpio, 0);
@@ -175,16 +137,12 @@ static int stk_panel_prepare(struct drm_panel *panel)
gpiod_set_value(stk->reset_gpio, 1);
mdelay(10);
ret = stk_panel_init(stk);
- if (ret < 0) {
- dev_err(dev, "failed to init panel: %d\n", ret);
+ if (ret < 0)
goto poweroff;
- }
ret = stk_panel_on(stk);
- if (ret < 0) {
- dev_err(dev, "failed to set panel on: %d\n", ret);
+ if (ret < 0)
goto poweroff;
- }
return 0;
@@ -250,18 +208,15 @@ static int dsi_dcs_bl_get_brightness(struct backlight_device *bl)
static int dsi_dcs_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = {.dsi = dsi};
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness);
- if (ret < 0) {
- dev_err(dev, "failed to set DSI control: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, bl->props.brightness);
+ if (dsi_ctx.accum_err)
+ return dsi_ctx.accum_err;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- return 0;
+ return dsi_ctx.accum_err;
}
static const struct backlight_ops dsi_bl_ops = {
diff --git a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
index 540099253e1b..17b8defe79c1 100644
--- a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
+++ b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
@@ -19,7 +19,13 @@ struct visionox_vtdr6130 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct gpio_desc *reset_gpio;
- struct regulator_bulk_data supplies[3];
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data visionox_vtdr6130_supplies[] = {
+ { .supply = "vddio" },
+ { .supply = "vci" },
+ { .supply = "vdd" },
};
static inline struct visionox_vtdr6130 *to_visionox_vtdr6130(struct drm_panel *panel)
@@ -40,123 +46,106 @@ static void visionox_vtdr6130_reset(struct visionox_vtdr6130 *ctx)
static int visionox_vtdr6130_on(struct visionox_vtdr6130 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret)
- return ret;
-
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0x6c, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x6f, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x70,
- 0x12, 0x00, 0x00, 0xab, 0x30, 0x80, 0x09, 0x60, 0x04,
- 0x38, 0x00, 0x28, 0x02, 0x1c, 0x02, 0x1c, 0x02, 0x00,
- 0x02, 0x0e, 0x00, 0x20, 0x03, 0xdd, 0x00, 0x07, 0x00,
- 0x0c, 0x02, 0x77, 0x02, 0x8b, 0x18, 0x00, 0x10, 0xf0,
- 0x07, 0x10, 0x20, 0x00, 0x06, 0x0f, 0x0f, 0x33, 0x0e,
- 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 0x69, 0x70, 0x77,
- 0x79, 0x7b, 0x7d, 0x7e, 0x02, 0x02, 0x22, 0x00, 0x2a,
- 0x40, 0x2a, 0xbe, 0x3a, 0xfc, 0x3a, 0xfa, 0x3a, 0xf8,
- 0x3b, 0x38, 0x3b, 0x78, 0x3b, 0xb6, 0x4b, 0xb6, 0x4b,
- 0xf4, 0x4b, 0xf4, 0x6c, 0x34, 0x84, 0x74, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xb1,
- 0x01, 0x38, 0x00, 0x14, 0x00, 0x1c, 0x00, 0x01, 0x66,
- 0x00, 0x14, 0x00, 0x14, 0x00, 0x01, 0x66, 0x00, 0x14,
- 0x05, 0xcc, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0xce,
- 0x09, 0x11, 0x09, 0x11, 0x08, 0xc1, 0x07, 0xfa, 0x05,
- 0xa4, 0x00, 0x3c, 0x00, 0x34, 0x00, 0x24, 0x00, 0x0c,
- 0x00, 0x0c, 0x04, 0x00, 0x35);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x14);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x03, 0x33);
- mipi_dsi_dcs_write_seq(dsi, 0xb4,
- 0x00, 0x33, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00,
- 0x3e, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xb5,
- 0x00, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x06, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0x00, 0x08, 0x09, 0x09, 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xbc,
- 0x10, 0x00, 0x00, 0x06, 0x11, 0x09, 0x3b, 0x09, 0x47,
- 0x09, 0x47, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xbe,
- 0x10, 0x10, 0x00, 0x08, 0x22, 0x09, 0x19, 0x09, 0x25,
- 0x09, 0x25, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x14);
- mipi_dsi_dcs_write_seq(dsi, 0xfa, 0x08, 0x08, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x81);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0xf9, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x51, 0x83);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xf8, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xf4, 0x9a);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x00);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(20);
-
- return 0;
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx,
+ MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx,
+ MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6c, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x12, 0x00, 0x00, 0xab,
+ 0x30, 0x80, 0x09, 0x60, 0x04, 0x38, 0x00,
+ 0x28, 0x02, 0x1c, 0x02, 0x1c, 0x02, 0x00,
+ 0x02, 0x0e, 0x00, 0x20, 0x03, 0xdd, 0x00,
+ 0x07, 0x00, 0x0c, 0x02, 0x77, 0x02, 0x8b,
+ 0x18, 0x00, 0x10, 0xf0, 0x07, 0x10, 0x20,
+ 0x00, 0x06, 0x0f, 0x0f, 0x33, 0x0e, 0x1c,
+ 0x2a, 0x38, 0x46, 0x54, 0x62, 0x69, 0x70,
+ 0x77, 0x79, 0x7b, 0x7d, 0x7e, 0x02, 0x02,
+ 0x22, 0x00, 0x2a, 0x40, 0x2a, 0xbe, 0x3a,
+ 0xfc, 0x3a, 0xfa, 0x3a, 0xf8, 0x3b, 0x38,
+ 0x3b, 0x78, 0x3b, 0xb6, 0x4b, 0xb6, 0x4b,
+ 0xf4, 0x4b, 0xf4, 0x6c, 0x34, 0x84, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xaa, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x01, 0x38, 0x00, 0x14,
+ 0x00, 0x1c, 0x00, 0x01, 0x66, 0x00, 0x14,
+ 0x00, 0x14, 0x00, 0x01, 0x66, 0x00, 0x14,
+ 0x05, 0xcc, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xaa, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xce, 0x09, 0x11, 0x09, 0x11,
+ 0x08, 0xc1, 0x07, 0xfa, 0x05, 0xa4, 0x00,
+ 0x3c, 0x00, 0x34, 0x00, 0x24, 0x00, 0x0c,
+ 0x00, 0x0c, 0x04, 0x00, 0x35);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xaa, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x03, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x33, 0x00, 0x00,
+ 0x00, 0x3e, 0x00, 0x00, 0x00, 0x3e, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0x09, 0x09, 0x09,
+ 0x09, 0x09, 0x09, 0x06, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0x00, 0x08, 0x09,
+ 0x09, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbc, 0x10, 0x00, 0x00, 0x06,
+ 0x11, 0x09, 0x3b, 0x09, 0x47, 0x09, 0x47,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbe, 0x10, 0x10, 0x00, 0x08,
+ 0x22, 0x09, 0x19, 0x09, 0x25, 0x09, 0x25,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x5a, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfa, 0x08, 0x08, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x5a, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xaa, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x5a, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf9, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x51, 0x83);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf8, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x5a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x9a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x5a, 0x00);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
}
-static int visionox_vtdr6130_off(struct visionox_vtdr6130 *ctx)
+static void visionox_vtdr6130_off(struct visionox_vtdr6130 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(20);
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- return 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
}
static int visionox_vtdr6130_prepare(struct drm_panel *panel)
{
struct visionox_vtdr6130 *ctx = to_visionox_vtdr6130(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
- ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies),
+ ret = regulator_bulk_enable(ARRAY_SIZE(visionox_vtdr6130_supplies),
ctx->supplies);
if (ret < 0)
return ret;
@@ -165,9 +154,9 @@ static int visionox_vtdr6130_prepare(struct drm_panel *panel)
ret = visionox_vtdr6130_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ regulator_bulk_disable(ARRAY_SIZE(visionox_vtdr6130_supplies),
+ ctx->supplies);
return ret;
}
@@ -177,16 +166,13 @@ static int visionox_vtdr6130_prepare(struct drm_panel *panel)
static int visionox_vtdr6130_unprepare(struct drm_panel *panel)
{
struct visionox_vtdr6130 *ctx = to_visionox_vtdr6130(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = visionox_vtdr6130_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ visionox_vtdr6130_off(ctx);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ regulator_bulk_disable(ARRAY_SIZE(visionox_vtdr6130_supplies),
+ ctx->supplies);
return 0;
}
@@ -266,12 +252,10 @@ static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
if (!ctx)
return -ENOMEM;
- ctx->supplies[0].supply = "vddio";
- ctx->supplies[1].supply = "vci";
- ctx->supplies[2].supply = "vdd";
-
- ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
- ctx->supplies);
+ ret = devm_regulator_bulk_get_const(&dsi->dev,
+ ARRAY_SIZE(visionox_vtdr6130_supplies),
+ visionox_vtdr6130_supplies,
+ &ctx->supplies);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index a61ef0af9a4e..df49d37d0e7e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -727,7 +727,7 @@ panfrost_reset(struct panfrost_device *pfdev,
/* Restart the schedulers */
for (i = 0; i < NUM_JOB_SLOTS; i++)
- drm_sched_start(&pfdev->js->queue[i].sched, true);
+ drm_sched_start(&pfdev->js->queue[i].sched);
/* Re-enable job interrupts now that everything has been restarted. */
job_write(pfdev, JOB_INT_MASK,
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index cc6e13a97783..bbc12728437f 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -833,7 +833,7 @@ static void panthor_vm_stop(struct panthor_vm *vm)
static void panthor_vm_start(struct panthor_vm *vm)
{
- drm_sched_start(&vm->sched, true);
+ drm_sched_start(&vm->sched);
}
/**
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 12b272a912f8..91a31b70c037 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -2538,7 +2538,7 @@ static void queue_start(struct panthor_queue *queue)
list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
job->base.s_fence->parent = dma_fence_get(job->done_fence);
- drm_sched_start(&queue->scheduler, true);
+ drm_sched_start(&queue->scheduler);
}
static void panthor_group_stop(struct panthor_group *group)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 03e6871b3065..d1c5e471bdca 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -249,7 +249,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
*/
if (bd->props.brightness == 0)
bd->props.brightness = RADEON_MAX_BL_LEVEL;
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
DRM_INFO("radeon atom DIG backlight initialized\n");
@@ -2179,7 +2179,7 @@ assigned:
void
radeon_atom_encoder_init(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b5e96a8fc2c1..11a492f21157 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7585,7 +7585,7 @@ restart_ih:
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
+ drm_handle_vblank(rdev_to_drm(rdev), 0);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -7615,7 +7615,7 @@ restart_ih:
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
+ drm_handle_vblank(rdev_to_drm(rdev), 1);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -7645,7 +7645,7 @@ restart_ih:
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
+ drm_handle_vblank(rdev_to_drm(rdev), 2);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -7675,7 +7675,7 @@ restart_ih:
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
+ drm_handle_vblank(rdev_to_drm(rdev), 3);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -7705,7 +7705,7 @@ restart_ih:
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
+ drm_handle_vblank(rdev_to_drm(rdev), 4);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -7735,7 +7735,7 @@ restart_ih:
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
+ drm_handle_vblank(rdev_to_drm(rdev), 5);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -8581,7 +8581,7 @@ int cik_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* Fence driver */
radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 4c06f47453fd..d6ab93ed9ec4 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -91,7 +91,7 @@ struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
pin = &rdev->audio.pin[i];
pin_count = 0;
- list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+ list_for_each_entry(encoder, &rdev_to_drm(rdev)->mode_config.encoder_list, head) {
if (radeon_encoder_is_digital(encoder)) {
radeon_encoder = to_radeon_encoder(encoder);
dig = radeon_encoder->enc_priv;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index c634dc28e6c3..bc4ab71613a5 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1673,7 +1673,7 @@ void evergreen_pm_misc(struct radeon_device *rdev)
*/
void evergreen_pm_prepare(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
@@ -1698,7 +1698,7 @@ void evergreen_pm_prepare(struct radeon_device *rdev)
*/
void evergreen_pm_finish(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
@@ -1763,7 +1763,7 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev,
*/
void evergreen_hpd_init(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned enabled = 0;
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
@@ -1804,7 +1804,7 @@ void evergreen_hpd_init(struct radeon_device *rdev)
*/
void evergreen_hpd_fini(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned disabled = 0;
@@ -4753,7 +4753,7 @@ restart_ih:
event_name = "vblank";
if (rdev->irq.crtc_vblank_int[crtc_idx]) {
- drm_handle_vblank(rdev->ddev, crtc_idx);
+ drm_handle_vblank(rdev_to_drm(rdev), crtc_idx);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -5211,7 +5211,7 @@ int evergreen_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* Fence driver */
radeon_fence_driver_init(rdev);
/* initialize AGP */
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index e5577d2a19ef..a46613283393 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -397,7 +397,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
struct evergreen_cs_track *track = p->track;
struct eg_surface surf;
unsigned pitch, slice, mslice;
- unsigned long offset;
+ u64 offset;
int r;
mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
@@ -435,14 +435,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
return r;
}
- offset = track->cb_color_bo_offset[id] << 8;
+ offset = (u64)track->cb_color_bo_offset[id] << 8;
if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
+ dev_warn(p->dev, "%s:%d cb[%d] bo base %llu not aligned with %ld\n",
__func__, __LINE__, id, offset, surf.base_align);
return -EINVAL;
}
- offset += surf.layer_size * mslice;
+ offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->cb_color_bo[id])) {
/* old ddx are broken they allocate bo with w*h*bpp but
* program slice with ALIGN(h, 8), catch this and patch
@@ -450,14 +450,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
*/
if (!surf.mode) {
uint32_t *ib = p->ib.ptr;
- unsigned long tmp, nby, bsize, size, min = 0;
+ u64 tmp, nby, bsize, size, min = 0;
/* find the height the ddx wants */
if (surf.nby > 8) {
min = surf.nby - 8;
}
bsize = radeon_bo_size(track->cb_color_bo[id]);
- tmp = track->cb_color_bo_offset[id] << 8;
+ tmp = (u64)track->cb_color_bo_offset[id] << 8;
for (nby = surf.nby; nby > min; nby--) {
size = nby * surf.nbx * surf.bpe * surf.nsamples;
if ((tmp + size * mslice) <= bsize) {
@@ -469,7 +469,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
slice = ((nby * surf.nbx) / 64) - 1;
if (!evergreen_surface_check(p, &surf, "cb")) {
/* check if this one works */
- tmp += surf.layer_size * mslice;
+ tmp += (u64)surf.layer_size * mslice;
if (tmp <= bsize) {
ib[track->cb_color_slice_idx[id]] = slice;
goto old_ddx_ok;
@@ -478,9 +478,9 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
}
}
dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
- "offset %d, max layer %d, bo size %ld, slice %d)\n",
+ "offset %llu, max layer %d, bo size %ld, slice %d)\n",
__func__, __LINE__, id, surf.layer_size,
- track->cb_color_bo_offset[id] << 8, mslice,
+ (u64)track->cb_color_bo_offset[id] << 8, mslice,
radeon_bo_size(track->cb_color_bo[id]), slice);
dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
__func__, __LINE__, surf.nbx, surf.nby,
@@ -564,7 +564,7 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
struct evergreen_cs_track *track = p->track;
struct eg_surface surf;
unsigned pitch, slice, mslice;
- unsigned long offset;
+ u64 offset;
int r;
mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
@@ -610,18 +610,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
return r;
}
- offset = track->db_s_read_offset << 8;
+ offset = (u64)track->db_s_read_offset << 8;
if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+ dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n",
__func__, __LINE__, offset, surf.base_align);
return -EINVAL;
}
- offset += surf.layer_size * mslice;
+ offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->db_s_read_bo)) {
dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
- "offset %ld, max layer %d, bo size %ld)\n",
+ "offset %llu, max layer %d, bo size %ld)\n",
__func__, __LINE__, surf.layer_size,
- (unsigned long)track->db_s_read_offset << 8, mslice,
+ (u64)track->db_s_read_offset << 8, mslice,
radeon_bo_size(track->db_s_read_bo));
dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
__func__, __LINE__, track->db_depth_size,
@@ -629,18 +629,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
return -EINVAL;
}
- offset = track->db_s_write_offset << 8;
+ offset = (u64)track->db_s_write_offset << 8;
if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+ dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n",
__func__, __LINE__, offset, surf.base_align);
return -EINVAL;
}
- offset += surf.layer_size * mslice;
+ offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->db_s_write_bo)) {
dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
- "offset %ld, max layer %d, bo size %ld)\n",
+ "offset %llu, max layer %d, bo size %ld)\n",
__func__, __LINE__, surf.layer_size,
- (unsigned long)track->db_s_write_offset << 8, mslice,
+ (u64)track->db_s_write_offset << 8, mslice,
radeon_bo_size(track->db_s_write_bo));
return -EINVAL;
}
@@ -661,7 +661,7 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
struct evergreen_cs_track *track = p->track;
struct eg_surface surf;
unsigned pitch, slice, mslice;
- unsigned long offset;
+ u64 offset;
int r;
mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
@@ -708,34 +708,34 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
return r;
}
- offset = track->db_z_read_offset << 8;
+ offset = (u64)track->db_z_read_offset << 8;
if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+ dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n",
__func__, __LINE__, offset, surf.base_align);
return -EINVAL;
}
- offset += surf.layer_size * mslice;
+ offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->db_z_read_bo)) {
dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
- "offset %ld, max layer %d, bo size %ld)\n",
+ "offset %llu, max layer %d, bo size %ld)\n",
__func__, __LINE__, surf.layer_size,
- (unsigned long)track->db_z_read_offset << 8, mslice,
+ (u64)track->db_z_read_offset << 8, mslice,
radeon_bo_size(track->db_z_read_bo));
return -EINVAL;
}
- offset = track->db_z_write_offset << 8;
+ offset = (u64)track->db_z_write_offset << 8;
if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+ dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n",
__func__, __LINE__, offset, surf.base_align);
return -EINVAL;
}
- offset += surf.layer_size * mslice;
+ offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->db_z_write_bo)) {
dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
- "offset %ld, max layer %d, bo size %ld)\n",
+ "offset %llu, max layer %d, bo size %ld)\n",
__func__, __LINE__, surf.layer_size,
- (unsigned long)track->db_z_write_offset << 8, mslice,
+ (u64)track->db_z_write_offset << 8, mslice,
radeon_bo_size(track->db_z_write_bo));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 77aee99e473a..3890911fe693 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2360,7 +2360,7 @@ int cayman_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* Fence driver */
radeon_fence_driver_init(rdev);
/* initialize memory controller */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 0b1e19345f43..80703417d8a1 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -459,7 +459,7 @@ void r100_pm_misc(struct radeon_device *rdev)
*/
void r100_pm_prepare(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
@@ -490,7 +490,7 @@ void r100_pm_prepare(struct radeon_device *rdev)
*/
void r100_pm_finish(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
@@ -603,7 +603,7 @@ void r100_hpd_set_polarity(struct radeon_device *rdev,
*/
void r100_hpd_init(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned enable = 0;
@@ -626,7 +626,7 @@ void r100_hpd_init(struct radeon_device *rdev)
*/
void r100_hpd_fini(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned disable = 0;
@@ -798,7 +798,7 @@ int r100_irq_process(struct radeon_device *rdev)
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
+ drm_handle_vblank(rdev_to_drm(rdev), 0);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -807,7 +807,7 @@ int r100_irq_process(struct radeon_device *rdev)
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
+ drm_handle_vblank(rdev_to_drm(rdev), 1);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -1016,45 +1016,65 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
DRM_DEBUG_KMS("\n");
- if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
- (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
- (rdev->family == CHIP_RS200)) {
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RV200:
+ case CHIP_RS100:
+ case CHIP_RS200:
DRM_INFO("Loading R100 Microcode\n");
fw_name = FIRMWARE_R100;
- } else if ((rdev->family == CHIP_R200) ||
- (rdev->family == CHIP_RV250) ||
- (rdev->family == CHIP_RV280) ||
- (rdev->family == CHIP_RS300)) {
+ break;
+
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RV280:
+ case CHIP_RS300:
DRM_INFO("Loading R200 Microcode\n");
fw_name = FIRMWARE_R200;
- } else if ((rdev->family == CHIP_R300) ||
- (rdev->family == CHIP_R350) ||
- (rdev->family == CHIP_RV350) ||
- (rdev->family == CHIP_RV380) ||
- (rdev->family == CHIP_RS400) ||
- (rdev->family == CHIP_RS480)) {
+ break;
+
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_RS400:
+ case CHIP_RS480:
DRM_INFO("Loading R300 Microcode\n");
fw_name = FIRMWARE_R300;
- } else if ((rdev->family == CHIP_R420) ||
- (rdev->family == CHIP_R423) ||
- (rdev->family == CHIP_RV410)) {
+ break;
+
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
DRM_INFO("Loading R400 Microcode\n");
fw_name = FIRMWARE_R420;
- } else if ((rdev->family == CHIP_RS690) ||
- (rdev->family == CHIP_RS740)) {
+ break;
+
+ case CHIP_RS690:
+ case CHIP_RS740:
DRM_INFO("Loading RS690/RS740 Microcode\n");
fw_name = FIRMWARE_RS690;
- } else if (rdev->family == CHIP_RS600) {
+ break;
+
+ case CHIP_RS600:
DRM_INFO("Loading RS600 Microcode\n");
fw_name = FIRMWARE_RS600;
- } else if ((rdev->family == CHIP_RV515) ||
- (rdev->family == CHIP_R520) ||
- (rdev->family == CHIP_RV530) ||
- (rdev->family == CHIP_R580) ||
- (rdev->family == CHIP_RV560) ||
- (rdev->family == CHIP_RV570)) {
+ break;
+
+ case CHIP_RV515:
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_R580:
+ case CHIP_RV560:
+ case CHIP_RV570:
DRM_INFO("Loading R500 Microcode\n");
fw_name = FIRMWARE_R520;
+ break;
+
+ default:
+ DRM_ERROR("Unsupported Radeon family %u\n", rdev->family);
+ return -EINVAL;
}
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
@@ -1471,7 +1491,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
reg = R100_CP_PACKET0_GET_REG(header);
- crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
+ crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id);
if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
@@ -3059,7 +3079,7 @@ DEFINE_SHOW_ATTRIBUTE(r100_debugfs_mc_info);
void r100_debugfs_rbbm_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("r100_rbbm_info", 0444, root, rdev,
&r100_debugfs_rbbm_info_fops);
@@ -3069,7 +3089,7 @@ void r100_debugfs_rbbm_init(struct radeon_device *rdev)
void r100_debugfs_cp_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("r100_cp_ring_info", 0444, root, rdev,
&r100_debugfs_cp_ring_info_fops);
@@ -3081,7 +3101,7 @@ void r100_debugfs_cp_init(struct radeon_device *rdev)
void r100_debugfs_mc_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("r100_mc_info", 0444, root, rdev,
&r100_debugfs_mc_info_fops);
@@ -3947,7 +3967,7 @@ int r100_resume(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* post */
- radeon_combios_asic_init(rdev->ddev);
+ radeon_combios_asic_init(rdev_to_drm(rdev));
/* Resume clock after posting */
r100_clock_startup(rdev);
/* Initialize surface registers */
@@ -4056,7 +4076,7 @@ int r100_init(struct radeon_device *rdev)
/* Set asic errata */
r100_errata(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 1620f534f55f..05c13102a8cb 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -616,7 +616,7 @@ DEFINE_SHOW_ATTRIBUTE(rv370_debugfs_pcie_gart_info);
static void rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("rv370_pcie_gart_info", 0444, root, rdev,
&rv370_debugfs_pcie_gart_info_fops);
@@ -1452,7 +1452,7 @@ int r300_resume(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* post */
- radeon_combios_asic_init(rdev->ddev);
+ radeon_combios_asic_init(rdev_to_drm(rdev));
/* Resume clock after posting */
r300_clock_startup(rdev);
/* Initialize surface registers */
@@ -1538,7 +1538,7 @@ int r300_init(struct radeon_device *rdev)
/* Set asic errata */
r300_errata(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index a979662eaa73..9a31cdec6415 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -322,7 +322,7 @@ int r420_resume(struct radeon_device *rdev)
if (rdev->is_atom_bios) {
atom_asic_init(rdev->mode_info.atom_context);
} else {
- radeon_combios_asic_init(rdev->ddev);
+ radeon_combios_asic_init(rdev_to_drm(rdev));
}
/* Resume clock after posting */
r420_clock_resume(rdev);
@@ -414,7 +414,7 @@ int r420_init(struct radeon_device *rdev)
return -EINVAL;
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
@@ -493,7 +493,7 @@ DEFINE_SHOW_ATTRIBUTE(r420_debugfs_pipes_info);
void r420_debugfs_pipes_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("r420_pipes_info", 0444, root, rdev,
&r420_debugfs_pipes_info_fops);
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 6cbcaa845192..08e127b3249a 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -287,7 +287,7 @@ int r520_init(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
}
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 087d41e370fd..8b62f7faa5b9 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -950,7 +950,7 @@ void r600_hpd_set_polarity(struct radeon_device *rdev,
void r600_hpd_init(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned enable = 0;
@@ -1017,7 +1017,7 @@ void r600_hpd_init(struct radeon_device *rdev)
void r600_hpd_fini(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned disable = 0;
@@ -3280,7 +3280,7 @@ int r600_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* Fence driver */
radeon_fence_driver_init(rdev);
if (rdev->flags & RADEON_IS_AGP) {
@@ -4136,7 +4136,7 @@ restart_ih:
DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
+ drm_handle_vblank(rdev_to_drm(rdev), 0);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -4166,7 +4166,7 @@ restart_ih:
DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
+ drm_handle_vblank(rdev_to_drm(rdev), 1);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -4358,7 +4358,7 @@ DEFINE_SHOW_ATTRIBUTE(r600_debugfs_mc_info);
static void r600_debugfs_mc_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("r600_mc_info", 0444, root, rdev,
&r600_debugfs_mc_info_fops);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 6cf54a747749..1b2d31c4d77c 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -884,7 +884,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
reg = R600_CP_PACKET0_GET_REG(header);
- crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
+ crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id);
if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 64980a61d38a..81d58ef667dd 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -153,7 +153,7 @@ void r600_dpm_print_ps_status(struct radeon_device *rdev,
u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 vblank_in_pixels;
@@ -180,7 +180,7 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 vrefresh = 0;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f3551ebaa2f0..661f374f5f27 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -116,7 +116,7 @@ void r600_audio_update_hdmi(struct work_struct *work)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
audio_work);
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct r600_audio_pin audio_status = r600_audio_status(rdev);
struct drm_encoder *encoder;
bool changed = false;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 0999c8eaae94..fd8a4513025f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2297,7 +2297,7 @@ typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
struct radeon_device {
struct device *dev;
- struct drm_device *ddev;
+ struct drm_device ddev;
struct pci_dev *pdev;
#ifdef __alpha__
struct pci_controller *hose;
@@ -2476,6 +2476,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
+static inline struct drm_device *rdev_to_drm(struct radeon_device *rdev)
+{
+ return &rdev->ddev;
+}
+
/*
* Cast helper
*/
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 603a78e41ba5..22ce61bdfc06 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -405,11 +405,11 @@ static int radeon_atif_handler(struct radeon_device *rdev,
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if ((rdev->flags & RADEON_IS_PX) &&
radeon_atpx_dgpu_req_power_for_displays()) {
- pm_runtime_get_sync(rdev->ddev->dev);
+ pm_runtime_get_sync(rdev_to_drm(rdev)->dev);
/* Just fire off a uevent and let userspace tell us what to do */
- drm_helper_hpd_irq_event(rdev->ddev);
- pm_runtime_mark_last_busy(rdev->ddev->dev);
- pm_runtime_put_autosuspend(rdev->ddev->dev);
+ drm_helper_hpd_irq_event(rdev_to_drm(rdev));
+ pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev);
+ pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev);
}
}
/* TODO: check other events */
@@ -736,7 +736,7 @@ int radeon_acpi_init(struct radeon_device *rdev)
struct radeon_encoder *target = NULL;
/* Find the encoder controlling the brightness */
- list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list,
+ list_for_each_entry(tmp, &rdev_to_drm(rdev)->mode_config.encoder_list,
head) {
struct radeon_encoder *enc = to_radeon_encoder(tmp);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index a3d749e350f9..89d7b0e9e79f 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -161,7 +161,7 @@ struct radeon_agp_head *radeon_agp_head_init(struct drm_device *dev)
static int radeon_agp_head_acquire(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (!rdev->agp)
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 10793a433bf5..81a0a91921b9 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -187,7 +187,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
if (i2c.valid) {
sprintf(stmp, "0x%x", i2c.i2c_id);
- rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
+ rdev->i2c_bus[i] = radeon_i2c_create(rdev_to_drm(rdev), &i2c, stmp);
}
gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
@@ -1716,27 +1716,25 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
if (fake_edid_record->ucFakeEDIDLength) {
- struct edid *edid;
- int edid_size =
- max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
- edid = kmalloc(edid_size, GFP_KERNEL);
- if (edid) {
- memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
- fake_edid_record->ucFakeEDIDLength);
-
- if (drm_edid_is_valid(edid)) {
- rdev->mode_info.bios_hardcoded_edid = edid;
- rdev->mode_info.bios_hardcoded_edid_size = edid_size;
- } else
- kfree(edid);
- }
+ const struct drm_edid *edid;
+ int edid_size;
+
+ if (fake_edid_record->ucFakeEDIDLength == 128)
+ edid_size = fake_edid_record->ucFakeEDIDLength;
+ else
+ edid_size = fake_edid_record->ucFakeEDIDLength * 128;
+ edid = drm_edid_alloc(fake_edid_record->ucFakeEDIDString, edid_size);
+ if (drm_edid_valid(edid))
+ rdev->mode_info.bios_hardcoded_edid = edid;
+ else
+ drm_edid_free(edid);
+ record += struct_size(fake_edid_record,
+ ucFakeEDIDString,
+ edid_size);
+ } else {
+ /* empty fake edid record must be 3 bytes long */
+ record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
}
- record += fake_edid_record->ucFakeEDIDLength ?
- struct_size(fake_edid_record,
- ucFakeEDIDString,
- fake_edid_record->ucFakeEDIDLength) :
- /* empty fake edid record must be 3 bytes long */
- sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 0bcd767b9f47..47aa06a9a942 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -196,7 +196,7 @@ static void radeon_audio_enable(struct radeon_device *rdev,
return;
if (rdev->mode_info.mode_config_initialized) {
- list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+ list_for_each_entry(encoder, &rdev_to_drm(rdev)->mode_config.encoder_list, head) {
if (radeon_encoder_is_digital(encoder)) {
radeon_encoder = to_radeon_encoder(encoder);
dig = radeon_encoder->enc_priv;
@@ -760,7 +760,7 @@ static int radeon_audio_component_get_eld(struct device *kdev, int port,
if (!rdev->audio.enabled || !rdev->mode_info.mode_config_initialized)
return 0;
- list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+ list_for_each_entry(encoder, &rdev_to_drm(rdev)->mode_config.encoder_list, head) {
if (!radeon_encoder_is_digital(encoder))
continue;
radeon_encoder = to_radeon_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 6952b1273b0f..df8d7f56b028 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -370,27 +370,22 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
{
int edid_info, size;
- struct edid *edid;
+ const struct drm_edid *edid;
unsigned char *raw;
- edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
+ edid_info = combios_get_table_offset(rdev_to_drm(rdev), COMBIOS_HARDCODED_EDID_TABLE);
if (!edid_info)
return false;
raw = rdev->bios + edid_info;
size = EDID_LENGTH * (raw[0x7e] + 1);
- edid = kmalloc(size, GFP_KERNEL);
- if (edid == NULL)
- return false;
-
- memcpy((unsigned char *)edid, raw, size);
+ edid = drm_edid_alloc(raw, size);
- if (!drm_edid_is_valid(edid)) {
- kfree(edid);
+ if (!drm_edid_valid(edid)) {
+ drm_edid_free(edid);
return false;
}
rdev->mode_info.bios_hardcoded_edid = edid;
- rdev->mode_info.bios_hardcoded_edid_size = size;
return true;
}
@@ -398,18 +393,7 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
struct edid *
radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
{
- struct edid *edid;
-
- if (rdev->mode_info.bios_hardcoded_edid) {
- edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
- if (edid) {
- memcpy((unsigned char *)edid,
- (unsigned char *)rdev->mode_info.bios_hardcoded_edid,
- rdev->mode_info.bios_hardcoded_edid_size);
- return edid;
- }
- }
- return NULL;
+ return drm_edid_duplicate(drm_edid_raw(rdev->mode_info.bios_hardcoded_edid));
}
static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
@@ -642,7 +626,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct radeon_i2c_bus_rec i2c;
u16 offset;
u8 id, blocks, clk, data;
@@ -670,7 +654,7 @@ static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct r
void radeon_combios_i2c_init(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct radeon_i2c_bus_rec i2c;
/* actual hw pads
@@ -812,7 +796,7 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
bool radeon_combios_sideport_present(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
u16 igp_info;
/* sideport is AMD only */
@@ -915,7 +899,7 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
uint16_t tv_info;
enum radeon_tv_std tv_std = TV_STD_NTSC;
@@ -2637,7 +2621,7 @@ static const char *thermal_controller_names[] = {
void radeon_combios_get_power_modes(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
u16 offset, misc, misc2 = 0;
u8 rev, tmp;
int state_index = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 69693ba5949e..528a8f3677c2 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -505,6 +505,9 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
continue;
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+ if (!mode)
+ continue;
+
drm_mode_probed_add(connector, mode);
}
}
@@ -1056,7 +1059,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
*/
if ((!rdev->is_atom_bios) &&
(ret == connector_status_disconnected) &&
- rdev->mode_info.bios_hardcoded_edid_size) {
+ rdev->mode_info.bios_hardcoded_edid) {
ret = connector_status_connected;
}
@@ -1389,7 +1392,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
out:
if ((!rdev->is_atom_bios) &&
(ret == connector_status_disconnected) &&
- rdev->mode_info.bios_hardcoded_edid_size) {
+ rdev->mode_info.bios_hardcoded_edid) {
radeon_connector->use_digital = true;
ret = connector_status_connected;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index afbb3a80c0c6..554b236c2328 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -760,7 +760,7 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
if (rdev->is_atom_bios)
atom_asic_init(rdev->mode_info.atom_context);
else
- radeon_combios_asic_init(rdev->ddev);
+ radeon_combios_asic_init(rdev_to_drm(rdev));
return true;
} else {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
@@ -980,7 +980,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
return -ENOMEM;
rdev->mode_info.atom_card_info = atom_card_info;
- atom_card_info->dev = rdev->ddev;
+ atom_card_info->dev = rdev_to_drm(rdev);
atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write;
/* needed for iio ops */
@@ -1005,7 +1005,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
mutex_init(&rdev->mode_info.atom_context->mutex);
mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
- radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
+ radeon_atom_initialize_bios_scratch_regs(rdev_to_drm(rdev));
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
}
@@ -1049,7 +1049,7 @@ void radeon_atombios_fini(struct radeon_device *rdev)
*/
int radeon_combios_init(struct radeon_device *rdev)
{
- radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
+ radeon_combios_initialize_bios_scratch_regs(rdev_to_drm(rdev));
return 0;
}
@@ -1285,9 +1285,6 @@ int radeon_device_init(struct radeon_device *rdev,
bool runtime = false;
rdev->shutdown = false;
- rdev->dev = &pdev->dev;
- rdev->ddev = ddev;
- rdev->pdev = pdev;
rdev->flags = flags;
rdev->family = flags & RADEON_FAMILY_MASK;
rdev->is_atom_bios = false;
@@ -1847,7 +1844,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
downgrade_write(&rdev->exclusive_lock);
- drm_helper_resume_force_mode(rdev->ddev);
+ drm_helper_resume_force_mode(rdev_to_drm(rdev));
/* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 843383f7237f..8f5f8abcb1b4 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -302,13 +302,13 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev))
return;
- spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+ spin_lock_irqsave(&rdev_to_drm(rdev)->event_lock, flags);
if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
"RADEON_FLIP_SUBMITTED(%d)\n",
radeon_crtc->flip_status,
RADEON_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&rdev_to_drm(rdev)->event_lock, flags);
return;
}
@@ -334,7 +334,7 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
*/
if (update_pending &&
(DRM_SCANOUTPOS_VALID &
- radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+ radeon_get_crtc_scanoutpos(rdev_to_drm(rdev), crtc_id,
GET_DISTANCE_TO_VBLANKSTART,
&vpos, &hpos, NULL, NULL,
&rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
@@ -347,7 +347,7 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
*/
update_pending = 0;
}
- spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&rdev_to_drm(rdev)->event_lock, flags);
if (!update_pending)
radeon_crtc_handle_flip(rdev, crtc_id);
}
@@ -370,14 +370,14 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
if (radeon_crtc == NULL)
return;
- spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+ spin_lock_irqsave(&rdev_to_drm(rdev)->event_lock, flags);
work = radeon_crtc->flip_work;
if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
"RADEON_FLIP_SUBMITTED(%d)\n",
radeon_crtc->flip_status,
RADEON_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&rdev_to_drm(rdev)->event_lock, flags);
return;
}
@@ -389,7 +389,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
if (work->event)
drm_crtc_send_vblank_event(&radeon_crtc->base, work->event);
- spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&rdev_to_drm(rdev)->event_lock, flags);
drm_crtc_vblank_put(&radeon_crtc->base);
radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
@@ -408,7 +408,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
struct radeon_flip_work *work =
container_of(__work, struct radeon_flip_work, flip_work);
struct radeon_device *rdev = work->rdev;
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &radeon_crtc->base;
@@ -1401,7 +1401,7 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
if (rdev->is_atom_bios) {
rdev->mode_info.coherent_mode_property =
- drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1);
+ drm_property_create_range(rdev_to_drm(rdev), 0, "coherent", 0, 1);
if (!rdev->mode_info.coherent_mode_property)
return -ENOMEM;
}
@@ -1409,57 +1409,57 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
if (!ASIC_IS_AVIVO(rdev)) {
sz = ARRAY_SIZE(radeon_tmds_pll_enum_list);
rdev->mode_info.tmds_pll_property =
- drm_property_create_enum(rdev->ddev, 0,
+ drm_property_create_enum(rdev_to_drm(rdev), 0,
"tmds_pll",
radeon_tmds_pll_enum_list, sz);
}
rdev->mode_info.load_detect_property =
- drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1);
+ drm_property_create_range(rdev_to_drm(rdev), 0, "load detection", 0, 1);
if (!rdev->mode_info.load_detect_property)
return -ENOMEM;
- drm_mode_create_scaling_mode_property(rdev->ddev);
+ drm_mode_create_scaling_mode_property(rdev_to_drm(rdev));
sz = ARRAY_SIZE(radeon_tv_std_enum_list);
rdev->mode_info.tv_std_property =
- drm_property_create_enum(rdev->ddev, 0,
+ drm_property_create_enum(rdev_to_drm(rdev), 0,
"tv standard",
radeon_tv_std_enum_list, sz);
sz = ARRAY_SIZE(radeon_underscan_enum_list);
rdev->mode_info.underscan_property =
- drm_property_create_enum(rdev->ddev, 0,
+ drm_property_create_enum(rdev_to_drm(rdev), 0,
"underscan",
radeon_underscan_enum_list, sz);
rdev->mode_info.underscan_hborder_property =
- drm_property_create_range(rdev->ddev, 0,
+ drm_property_create_range(rdev_to_drm(rdev), 0,
"underscan hborder", 0, 128);
if (!rdev->mode_info.underscan_hborder_property)
return -ENOMEM;
rdev->mode_info.underscan_vborder_property =
- drm_property_create_range(rdev->ddev, 0,
+ drm_property_create_range(rdev_to_drm(rdev), 0,
"underscan vborder", 0, 128);
if (!rdev->mode_info.underscan_vborder_property)
return -ENOMEM;
sz = ARRAY_SIZE(radeon_audio_enum_list);
rdev->mode_info.audio_property =
- drm_property_create_enum(rdev->ddev, 0,
+ drm_property_create_enum(rdev_to_drm(rdev), 0,
"audio",
radeon_audio_enum_list, sz);
sz = ARRAY_SIZE(radeon_dither_enum_list);
rdev->mode_info.dither_property =
- drm_property_create_enum(rdev->ddev, 0,
+ drm_property_create_enum(rdev_to_drm(rdev), 0,
"dither",
radeon_dither_enum_list, sz);
sz = ARRAY_SIZE(radeon_output_csc_enum_list);
rdev->mode_info.output_csc_property =
- drm_property_create_enum(rdev->ddev, 0,
+ drm_property_create_enum(rdev_to_drm(rdev), 0,
"output_csc",
radeon_output_csc_enum_list, sz);
@@ -1578,29 +1578,29 @@ int radeon_modeset_init(struct radeon_device *rdev)
int i;
int ret;
- drm_mode_config_init(rdev->ddev);
+ drm_mode_config_init(rdev_to_drm(rdev));
rdev->mode_info.mode_config_initialized = true;
- rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
+ rdev_to_drm(rdev)->mode_config.funcs = &radeon_mode_funcs;
if (radeon_use_pflipirq == 2 && rdev->family >= CHIP_R600)
- rdev->ddev->mode_config.async_page_flip = true;
+ rdev_to_drm(rdev)->mode_config.async_page_flip = true;
if (ASIC_IS_DCE5(rdev)) {
- rdev->ddev->mode_config.max_width = 16384;
- rdev->ddev->mode_config.max_height = 16384;
+ rdev_to_drm(rdev)->mode_config.max_width = 16384;
+ rdev_to_drm(rdev)->mode_config.max_height = 16384;
} else if (ASIC_IS_AVIVO(rdev)) {
- rdev->ddev->mode_config.max_width = 8192;
- rdev->ddev->mode_config.max_height = 8192;
+ rdev_to_drm(rdev)->mode_config.max_width = 8192;
+ rdev_to_drm(rdev)->mode_config.max_height = 8192;
} else {
- rdev->ddev->mode_config.max_width = 4096;
- rdev->ddev->mode_config.max_height = 4096;
+ rdev_to_drm(rdev)->mode_config.max_width = 4096;
+ rdev_to_drm(rdev)->mode_config.max_height = 4096;
}
- rdev->ddev->mode_config.preferred_depth = 24;
- rdev->ddev->mode_config.prefer_shadow = 1;
+ rdev_to_drm(rdev)->mode_config.preferred_depth = 24;
+ rdev_to_drm(rdev)->mode_config.prefer_shadow = 1;
- rdev->ddev->mode_config.fb_modifiers_not_supported = true;
+ rdev_to_drm(rdev)->mode_config.fb_modifiers_not_supported = true;
ret = radeon_modeset_create_props(rdev);
if (ret) {
@@ -1618,11 +1618,11 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* allocate crtcs */
for (i = 0; i < rdev->num_crtc; i++) {
- radeon_crtc_init(rdev->ddev, i);
+ radeon_crtc_init(rdev_to_drm(rdev), i);
}
/* okay we should have all the bios connectors */
- ret = radeon_setup_enc_conn(rdev->ddev);
+ ret = radeon_setup_enc_conn(rdev_to_drm(rdev));
if (!ret) {
return ret;
}
@@ -1639,7 +1639,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* setup afmt */
radeon_afmt_init(rdev);
- drm_kms_helper_poll_init(rdev->ddev);
+ drm_kms_helper_poll_init(rdev_to_drm(rdev));
/* do pm late init */
ret = radeon_pm_late_init(rdev);
@@ -1650,15 +1650,15 @@ int radeon_modeset_init(struct radeon_device *rdev)
void radeon_modeset_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.mode_config_initialized) {
- drm_kms_helper_poll_fini(rdev->ddev);
+ drm_kms_helper_poll_fini(rdev_to_drm(rdev));
radeon_hpd_fini(rdev);
- drm_helper_force_disable_all(rdev->ddev);
+ drm_helper_force_disable_all(rdev_to_drm(rdev));
radeon_afmt_fini(rdev);
- drm_mode_config_cleanup(rdev->ddev);
+ drm_mode_config_cleanup(rdev_to_drm(rdev));
rdev->mode_info.mode_config_initialized = false;
}
- kfree(rdev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(rdev->mode_info.bios_hardcoded_edid);
/* free i2c buses */
radeon_i2c_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 7bf08164140e..e5a6f3e7c75b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -259,7 +259,8 @@ static int radeon_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned long flags = 0;
- struct drm_device *dev;
+ struct drm_device *ddev;
+ struct radeon_device *rdev;
int ret;
if (!ent)
@@ -300,28 +301,37 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- dev = drm_dev_alloc(&kms_driver, &pdev->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ rdev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*rdev), ddev);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+
+ rdev->dev = &pdev->dev;
+ rdev->pdev = pdev;
+ ddev = rdev_to_drm(rdev);
+ ddev->dev_private = rdev;
ret = pci_enable_device(pdev);
if (ret)
goto err_free;
- pci_set_drvdata(pdev, dev);
+ pci_set_drvdata(pdev, ddev);
+
+ ret = radeon_driver_load_kms(ddev, flags);
+ if (ret)
+ goto err_agp;
- ret = drm_dev_register(dev, ent->driver_data);
+ ret = drm_dev_register(ddev, flags);
if (ret)
goto err_agp;
- radeon_fbdev_setup(dev->dev_private);
+ radeon_fbdev_setup(ddev->dev_private);
return 0;
err_agp:
pci_disable_device(pdev);
err_free:
- drm_dev_put(dev);
+ drm_dev_put(ddev);
return ret;
}
@@ -520,6 +530,7 @@ static const struct file_operations radeon_driver_kms_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = radeon_kms_compat_ioctl,
#endif
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
static const struct drm_ioctl_desc radeon_ioctls_kms[] = {
@@ -569,7 +580,6 @@ static const struct drm_ioctl_desc radeon_ioctls_kms[] = {
static const struct drm_driver kms_driver = {
.driver_features =
DRIVER_GEM | DRIVER_RENDER | DRIVER_MODESET,
- .load = radeon_driver_load_kms,
.open = radeon_driver_open_kms,
.postclose = radeon_driver_postclose_kms,
.unload = radeon_driver_unload_kms,
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index 02bf25759059..fb70de29545c 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -67,7 +67,7 @@ static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
int height = mode_cmd->height;
u32 cpp;
- info = drm_get_format_info(rdev->ddev, mode_cmd);
+ info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd);
cpp = info->cpp[0];
/* need to align pitch with crtc limits */
@@ -148,15 +148,15 @@ static int radeon_fbdev_fb_open(struct fb_info *info, int user)
struct radeon_device *rdev = fb_helper->dev->dev_private;
int ret;
- ret = pm_runtime_get_sync(rdev->ddev->dev);
+ ret = pm_runtime_get_sync(rdev_to_drm(rdev)->dev);
if (ret < 0 && ret != -EACCES)
goto err_pm_runtime_mark_last_busy;
return 0;
err_pm_runtime_mark_last_busy:
- pm_runtime_mark_last_busy(rdev->ddev->dev);
- pm_runtime_put_autosuspend(rdev->ddev->dev);
+ pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev);
+ pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev);
return ret;
}
@@ -165,8 +165,8 @@ static int radeon_fbdev_fb_release(struct fb_info *info, int user)
struct drm_fb_helper *fb_helper = info->par;
struct radeon_device *rdev = fb_helper->dev->dev_private;
- pm_runtime_mark_last_busy(rdev->ddev->dev);
- pm_runtime_put_autosuspend(rdev->ddev->dev);
+ pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev);
+ pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev);
return 0;
}
@@ -236,7 +236,7 @@ static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper,
ret = -ENOMEM;
goto err_radeon_fbdev_destroy_pinned_object;
}
- ret = radeon_framebuffer_init(rdev->ddev, fb, &mode_cmd, gobj);
+ ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto err_kfree;
@@ -374,12 +374,12 @@ void radeon_fbdev_setup(struct radeon_device *rdev)
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
if (!fb_helper)
return;
- drm_fb_helper_prepare(rdev->ddev, fb_helper, bpp_sel, &radeon_fbdev_fb_helper_funcs);
+ drm_fb_helper_prepare(rdev_to_drm(rdev), fb_helper, bpp_sel, &radeon_fbdev_fb_helper_funcs);
- ret = drm_client_init(rdev->ddev, &fb_helper->client, "radeon-fbdev",
+ ret = drm_client_init(rdev_to_drm(rdev), &fb_helper->client, "radeon-fbdev",
&radeon_fbdev_client_funcs);
if (ret) {
- drm_err(rdev->ddev, "Failed to register client: %d\n", ret);
+ drm_err(rdev_to_drm(rdev), "Failed to register client: %d\n", ret);
goto err_drm_client_init;
}
@@ -394,13 +394,13 @@ err_drm_client_init:
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
{
- if (rdev->ddev->fb_helper)
- drm_fb_helper_set_suspend(rdev->ddev->fb_helper, state);
+ if (rdev_to_drm(rdev)->fb_helper)
+ drm_fb_helper_set_suspend(rdev_to_drm(rdev)->fb_helper, state);
}
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
- struct drm_fb_helper *fb_helper = rdev->ddev->fb_helper;
+ struct drm_fb_helper *fb_helper = rdev_to_drm(rdev)->fb_helper;
struct drm_gem_object *gobj;
if (!fb_helper)
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 4fb780d96f32..daff61586be5 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -150,7 +150,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
rdev->fence_context + ring,
seq);
radeon_fence_ring_emit(rdev, ring, *fence);
- trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
+ trace_radeon_fence_emit(rdev_to_drm(rdev), ring, (*fence)->seq);
radeon_fence_schedule_check(rdev, ring);
return 0;
}
@@ -489,7 +489,7 @@ static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
if (!target_seq[i])
continue;
- trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
+ trace_radeon_fence_wait_begin(rdev_to_drm(rdev), i, target_seq[i]);
radeon_irq_kms_sw_irq_get(rdev, i);
}
@@ -511,7 +511,7 @@ static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
continue;
radeon_irq_kms_sw_irq_put(rdev, i);
- trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
+ trace_radeon_fence_wait_end(rdev_to_drm(rdev), i, target_seq[i]);
}
return r;
@@ -995,7 +995,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(radeon_debugfs_gpu_reset_fops,
void radeon_debugfs_fence_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("radeon_gpu_reset", 0444, root, rdev,
&radeon_debugfs_gpu_reset_fops);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index e66a230331ee..9735f4968b86 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -88,7 +88,7 @@ static void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj) {
radeon_mn_unregister(robj);
- radeon_bo_unref(&robj);
+ ttm_bo_put(&robj->tbo);
}
}
@@ -899,7 +899,7 @@ DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
void radeon_gem_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("radeon_gem_info", 0444, root, rdev,
&radeon_debugfs_gem_info_fops);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 3d174390a8af..1f16619ed06e 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1011,7 +1011,7 @@ void radeon_i2c_add(struct radeon_device *rdev,
struct radeon_i2c_bus_rec *rec,
const char *name)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
int i;
for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 63d914f3414d..1aa41cc3f991 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -309,7 +309,7 @@ DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_sa_info);
static void radeon_debugfs_sa_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("radeon_sa_info", 0444, root, rdev,
&radeon_debugfs_sa_info_fops);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index c4dda908666c..9961251b44ba 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -80,7 +80,7 @@ static void radeon_hotplug_work_func(struct work_struct *work)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
hotplug_work.work);
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
@@ -101,7 +101,7 @@ static void radeon_dp_work_func(struct work_struct *work)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
dp_work);
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
@@ -197,7 +197,7 @@ static void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
static int radeon_irq_install(struct radeon_device *rdev, int irq)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
int ret;
if (irq == IRQ_NOTCONNECTED)
@@ -218,7 +218,7 @@ static int radeon_irq_install(struct radeon_device *rdev, int irq)
static void radeon_irq_uninstall(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
radeon_driver_irq_uninstall_kms(dev);
@@ -322,9 +322,9 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
spin_lock_init(&rdev->irq.lock);
/* Disable vblank irqs aggressively for power-saving */
- rdev->ddev->vblank_disable_immediate = true;
+ rdev_to_drm(rdev)->vblank_disable_immediate = true;
- r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
+ r = drm_vblank_init(rdev_to_drm(rdev), rdev->num_crtc);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index a16590c6247f..645e33bf7947 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -104,15 +104,9 @@ done_free:
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct radeon_device *rdev;
+ struct radeon_device *rdev = dev->dev_private;
int r, acpi_status;
- rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
- if (rdev == NULL) {
- return -ENOMEM;
- }
- dev->dev_private = (void *)rdev;
-
#ifdef __alpha__
rdev->hose = pdev->sysdata;
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index c4350ac2b3d2..d6aa1a3012a8 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -450,7 +450,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
}
bd->props.brightness = radeon_legacy_backlight_get_brightness(bd);
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
DRM_INFO("radeon legacy LVDS backlight initialized\n");
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e0a5af180801..421c83fc70dc 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -39,6 +39,7 @@
#include <linux/i2c-algo-bit.h>
struct edid;
+struct drm_edid;
struct radeon_bo;
struct radeon_device;
@@ -262,8 +263,7 @@ struct radeon_mode_info {
/* Output CSC */
struct drm_property *output_csc_property;
/* hardcoded DFP edid from BIOS */
- struct edid *bios_hardcoded_edid;
- int bios_hardcoded_edid_size;
+ const struct drm_edid *bios_hardcoded_edid;
/* firmware flags */
u16 firmware_flags;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index a955f8a2f7fe..d0e4b43d155c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -150,7 +150,7 @@ int radeon_bo_create(struct radeon_device *rdev,
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size);
+ drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size);
bo->rdev = rdev;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
@@ -256,18 +256,15 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
if (bo == NULL)
return NULL;
- ttm_bo_get(&bo->tbo);
+ drm_gem_object_get(&bo->tbo.base);
return bo;
}
void radeon_bo_unref(struct radeon_bo **bo)
{
- struct ttm_buffer_object *tbo;
-
if ((*bo) == NULL)
return;
- tbo = &((*bo)->tbo);
- ttm_bo_put(tbo);
+ drm_gem_object_put(&(*bo)->tbo.base);
*bo = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 2d9d9f46f243..b4fb7e70320b 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -282,7 +282,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
if (rdev->irq.installed) {
i = 0;
- drm_for_each_crtc(crtc, rdev->ddev) {
+ drm_for_each_crtc(crtc, rdev_to_drm(rdev)) {
if (rdev->pm.active_crtcs & (1 << i)) {
/* This can fail if a modeset is in progress */
if (drm_crtc_vblank_get(crtc) == 0)
@@ -299,7 +299,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
if (rdev->irq.installed) {
i = 0;
- drm_for_each_crtc(crtc, rdev->ddev) {
+ drm_for_each_crtc(crtc, rdev_to_drm(rdev)) {
if (rdev->pm.req_vblank & (1 << i)) {
rdev->pm.req_vblank &= ~(1 << i);
drm_crtc_vblank_put(crtc);
@@ -671,7 +671,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
char *buf)
{
struct radeon_device *rdev = dev_get_drvdata(dev);
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
int temp;
/* Can't get temperature when the card is off */
@@ -715,7 +715,7 @@ static ssize_t radeon_hwmon_show_sclk(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct radeon_device *rdev = dev_get_drvdata(dev);
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
u32 sclk = 0;
/* Can't get clock frequency when the card is off */
@@ -740,7 +740,7 @@ static ssize_t radeon_hwmon_show_vddc(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct radeon_device *rdev = dev_get_drvdata(dev);
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
u16 vddc = 0;
/* Can't get vddc when the card is off */
@@ -1692,7 +1692,7 @@ void radeon_pm_fini(struct radeon_device *rdev)
static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
@@ -1765,7 +1765,7 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_connector *radeon_connector;
@@ -1826,7 +1826,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
*/
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
- vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
+ vbl_status = radeon_get_crtc_scanoutpos(rdev_to_drm(rdev),
crtc,
USE_REAL_VBLANKSTART,
&vpos, &hpos, NULL, NULL,
@@ -1918,7 +1918,7 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
static int radeon_debugfs_pm_info_show(struct seq_file *m, void *unused)
{
struct radeon_device *rdev = m->private;
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
if ((rdev->flags & RADEON_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
@@ -1955,7 +1955,7 @@ DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_pm_info);
static void radeon_debugfs_pm_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("radeon_pm_info", 0444, root, rdev,
&radeon_debugfs_pm_info_fops);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 8d1d458286a8..581ae20c46e4 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -550,7 +550,7 @@ static void radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_r
{
#if defined(CONFIG_DEBUG_FS)
const char *ring_name = radeon_debugfs_ring_idx_to_name(ring->idx);
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
if (ring_name)
debugfs_create_file(ring_name, 0444, root, ring,
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5c65b6dfb99a..69d0c12fa419 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -682,8 +682,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
/* No others user of address space so set it to 0 */
r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
- rdev->ddev->anon_inode->i_mapping,
- rdev->ddev->vma_offset_manager,
+ rdev_to_drm(rdev)->anon_inode->i_mapping,
+ rdev_to_drm(rdev)->vma_offset_manager,
rdev->need_swiotlb,
dma_addressing_limited(&rdev->pdev->dev));
if (r) {
@@ -890,7 +890,7 @@ static const struct file_operations radeon_ttm_gtt_fops = {
static void radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct drm_minor *minor = rdev->ddev->primary;
+ struct drm_minor *minor = rdev_to_drm(rdev)->primary;
struct dentry *root = minor->debugfs_root;
debugfs_create_file("radeon_vram", 0444, root, rdev,
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index d4d1501e6576..d6c18fd740ec 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -379,7 +379,7 @@ DEFINE_SHOW_ATTRIBUTE(rs400_debugfs_gart_info);
static void rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("rs400_gart_info", 0444, root, rdev,
&rs400_debugfs_gart_info_fops);
@@ -474,7 +474,7 @@ int rs400_resume(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* post */
- radeon_combios_asic_init(rdev->ddev);
+ radeon_combios_asic_init(rdev_to_drm(rdev));
/* Resume clock after posting */
r300_clock_startup(rdev);
/* Initialize surface registers */
@@ -552,7 +552,7 @@ int rs400_init(struct radeon_device *rdev)
return -EINVAL;
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize memory controller */
rs400_mc_init(rdev);
/* Fence driver */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5c162778899b..88c8e91ea651 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -321,7 +321,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
void rs600_pm_prepare(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
@@ -339,7 +339,7 @@ void rs600_pm_prepare(struct radeon_device *rdev)
void rs600_pm_finish(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
@@ -408,7 +408,7 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev,
void rs600_hpd_init(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned enable = 0;
@@ -435,7 +435,7 @@ void rs600_hpd_init(struct radeon_device *rdev)
void rs600_hpd_fini(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned disable = 0;
@@ -797,7 +797,7 @@ int rs600_irq_process(struct radeon_device *rdev)
/* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
+ drm_handle_vblank(rdev_to_drm(rdev), 0);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -806,7 +806,7 @@ int rs600_irq_process(struct radeon_device *rdev)
}
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
+ drm_handle_vblank(rdev_to_drm(rdev), 1);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -1133,7 +1133,7 @@ int rs600_init(struct radeon_device *rdev)
return -EINVAL;
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize memory controller */
rs600_mc_init(rdev);
r100_debugfs_rbbm_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 14fb0819b8c1..016eb4992803 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -845,7 +845,7 @@ int rs690_init(struct radeon_device *rdev)
return -EINVAL;
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize memory controller */
rs690_mc_init(rdev);
rv515_debugfs(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index bbc6ccabf788..1b4dfb645585 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -255,7 +255,7 @@ DEFINE_SHOW_ATTRIBUTE(rv515_debugfs_ga_info);
void rv515_debugfs(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("rv515_pipes_info", 0444, root, rdev,
&rv515_debugfs_pipes_info_fops);
@@ -636,7 +636,7 @@ int rv515_init(struct radeon_device *rdev)
if (radeon_boot_test_post_card(rdev) == false)
return -EINVAL;
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 9ce12fa3c356..7d4b0bf59109 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1935,7 +1935,7 @@ int rv770_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* Fence driver */
radeon_fence_driver_init(rdev);
/* initialize AGP */
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 15759c8ca5b7..6c95575ce109 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6277,7 +6277,7 @@ restart_ih:
event_name = "vblank";
if (rdev->irq.crtc_vblank_int[crtc_idx]) {
- drm_handle_vblank(rdev->ddev, crtc_idx);
+ drm_handle_vblank(rdev_to_drm(rdev), crtc_idx);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -6839,7 +6839,7 @@ int si_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* Fence driver */
radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/renesas/rcar-du/Kconfig b/drivers/gpu/drm/renesas/rcar-du/Kconfig
index 025677fe88d3..e1f41468a9a6 100644
--- a/drivers/gpu/drm/renesas/rcar-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rcar-du/Kconfig
@@ -62,14 +62,6 @@ config DRM_RCAR_MIPI_DSI
select DRM_MIPI_DSI
select RESET_CONTROLLER
-config DRM_RZG2L_MIPI_DSI
- tristate "RZ/G2L MIPI DSI Encoder Support"
- depends on DRM && DRM_BRIDGE && OF
- depends on ARCH_RENESAS || COMPILE_TEST
- select DRM_MIPI_DSI
- help
- Enable support for the RZ/G2L Display Unit embedded MIPI DSI encoders.
-
config DRM_RCAR_VSP
bool "R-Car DU VSP Compositor Support" if ARM
default y if ARM64
diff --git a/drivers/gpu/drm/renesas/rcar-du/Makefile b/drivers/gpu/drm/renesas/rcar-du/Makefile
index b8f2c82651d9..6f132325c8b7 100644
--- a/drivers/gpu/drm/renesas/rcar-du/Makefile
+++ b/drivers/gpu/drm/renesas/rcar-du/Makefile
@@ -14,5 +14,3 @@ obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o
obj-$(CONFIG_DRM_RCAR_MIPI_DSI) += rcar_mipi_dsi.o
-
-obj-$(CONFIG_DRM_RZG2L_MIPI_DSI) += rzg2l_mipi_dsi.o
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
index e1a6dd322caf..89bdb598e0ae 100644
--- a/drivers/gpu/drm/renesas/rz-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -12,3 +12,11 @@ config DRM_RZG2L_DU
help
Choose this option if you have an RZ/G2L alike chipset.
If M is selected the module will be called rzg2l-du-drm.
+
+config DRM_RZG2L_MIPI_DSI
+ tristate "RZ/G2L MIPI DSI Encoder Support"
+ depends on DRM && DRM_BRIDGE && OF
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select DRM_MIPI_DSI
+ help
+ Enable support for the RZ/G2L Display Unit embedded MIPI DSI encoders.
diff --git a/drivers/gpu/drm/renesas/rz-du/Makefile b/drivers/gpu/drm/renesas/rz-du/Makefile
index 663b82a2577f..2987900ea6b6 100644
--- a/drivers/gpu/drm/renesas/rz-du/Makefile
+++ b/drivers/gpu/drm/renesas/rz-du/Makefile
@@ -6,3 +6,5 @@ rzg2l-du-drm-y := rzg2l_du_crtc.o \
rzg2l-du-drm-$(CONFIG_VIDEO_RENESAS_VSP1) += rzg2l_du_vsp.o
obj-$(CONFIG_DRM_RZG2L_DU) += rzg2l-du-drm.o
+
+obj-$(CONFIG_DRM_RZG2L_MIPI_DSI) += rzg2l_mipi_dsi.o
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
index 6e7aac6219be..c4c1474d487e 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
@@ -28,6 +28,7 @@
#include "rzg2l_du_vsp.h"
#define DU_MCR0 0x00
+#define DU_MCR0_DPI_OE BIT(0)
#define DU_MCR0_DI_EN BIT(8)
#define DU_DITR0 0x10
@@ -216,9 +217,14 @@ static void rzg2l_du_crtc_put(struct rzg2l_du_crtc *rcrtc)
static void rzg2l_du_start_stop(struct rzg2l_du_crtc *rcrtc, bool start)
{
+ struct rzg2l_du_crtc_state *rstate = to_rzg2l_crtc_state(rcrtc->crtc.state);
struct rzg2l_du_device *rcdu = rcrtc->dev;
+ u32 val = DU_MCR0_DI_EN;
- writel(start ? DU_MCR0_DI_EN : 0, rcdu->mmio + DU_MCR0);
+ if (rstate->outputs & BIT(RZG2L_DU_OUTPUT_DPAD0))
+ val |= DU_MCR0_DPI_OE;
+
+ writel(start ? val : 0, rcdu->mmio + DU_MCR0);
}
static void rzg2l_du_crtc_start(struct rzg2l_du_crtc *rcrtc)
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index e5eca8691a33..bc7c381f92ac 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -25,6 +25,16 @@
* Device Information
*/
+static const struct rzg2l_du_device_info rzg2l_du_r9a07g043u_info = {
+ .channels_mask = BIT(0),
+ .routes = {
+ [RZG2L_DU_OUTPUT_DPAD0] = {
+ .possible_outputs = BIT(0),
+ .port = 0,
+ },
+ },
+};
+
static const struct rzg2l_du_device_info rzg2l_du_r9a07g044_info = {
.channels_mask = BIT(0),
.routes = {
@@ -40,6 +50,7 @@ static const struct rzg2l_du_device_info rzg2l_du_r9a07g044_info = {
};
static const struct of_device_id rzg2l_du_of_table[] = {
+ { .compatible = "renesas,r9a07g043u-du", .data = &rzg2l_du_r9a07g043u_info },
{ .compatible = "renesas,r9a07g044-du", .data = &rzg2l_du_r9a07g044_info },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
index 07b312b6f81e..b99217b4e05d 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -183,7 +183,8 @@ static int rzg2l_du_encoders_init(struct rzg2l_du_device *rcdu)
/* Find the output route corresponding to the port number. */
for (i = 0; i < RZG2L_DU_OUTPUT_MAX; ++i) {
- if (rcdu->info->routes[i].port == ep.port) {
+ if (rcdu->info->routes[i].possible_outputs &&
+ rcdu->info->routes[i].port == ep.port) {
output = i;
break;
}
diff --git a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index 10febea473cd..10febea473cd 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
diff --git a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
index 1dbc16ec64a4..1dbc16ec64a4 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 362c7951ca4a..d3341edfe4f4 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -262,7 +262,7 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
}
-static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
.mode_fixup = rockchip_dp_drm_encoder_mode_fixup,
.mode_set = rockchip_dp_drm_encoder_mode_set,
.atomic_enable = rockchip_dp_drm_encoder_enable,
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index bd7aa891b839..b04538907f95 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -266,15 +266,6 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
mutex_lock(&dp->lock);
- if (dp->drm_edid) {
- /* FIXME: get rid of drm_edid_raw() */
- const struct edid *edid = drm_edid_raw(dp->drm_edid);
-
- DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
- edid->width_cm, edid->height_cm);
-
- }
-
ret = drm_edid_connector_add_modes(connector);
mutex_unlock(&dp->lock);
@@ -369,6 +360,7 @@ static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
{
+ const struct drm_display_info *info = &dp->connector.display_info;
int ret;
if (!cdn_dp_check_sink_connection(dp))
@@ -386,7 +378,11 @@ static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
cdn_dp_get_edid_block, dp);
drm_edid_connector_update(&dp->connector, dp->drm_edid);
- dp->sink_has_audio = dp->connector.display_info.has_audio;
+ dp->sink_has_audio = info->has_audio;
+
+ if (dp->drm_edid)
+ DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
+ info->width_mm / 10, info->height_mm / 10);
return 0;
}
@@ -969,21 +965,21 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
/* Not connected, notify userspace to disable the block */
if (!cdn_dp_connected_port(dp)) {
- DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
+ DRM_DEV_INFO(dp->dev, "Not connected; disabling cdn\n");
dp->connected = false;
/* Connected but not enabled, enable the block */
} else if (!dp->active) {
- DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
+ DRM_DEV_INFO(dp->dev, "Connected, not enabled; enabling cdn\n");
ret = cdn_dp_enable(dp);
if (ret) {
- DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "Enabling dp failed: %d\n", ret);
dp->connected = false;
}
/* Enabled and connected to a dongle without a sink, notify userspace */
} else if (!cdn_dp_check_sink_connection(dp)) {
- DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
+ DRM_DEV_INFO(dp->dev, "Connected without sink; assert hpd\n");
dp->connected = false;
/* Enabled and connected with a sink, re-train if requested */
@@ -992,11 +988,11 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
unsigned int lanes = dp->max_lanes;
struct drm_display_mode *mode = &dp->mode;
- DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
+ DRM_DEV_INFO(dp->dev, "Connected with sink; re-train link\n");
ret = cdn_dp_train_link(dp);
if (ret) {
dp->connected = false;
- DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "Training link failed: %d\n", ret);
goto out;
}
@@ -1006,9 +1002,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
ret = cdn_dp_config_video(dp);
if (ret) {
dp->connected = false;
- DRM_DEV_ERROR(dp->dev,
- "Failed to config video %d\n",
- ret);
+ DRM_DEV_ERROR(dp->dev, "Failed to configure video: %d\n", ret);
}
}
}
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index fe33092abbe7..240552eb517f 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -61,11 +61,13 @@
* @lcdsel_grf_reg: grf register offset of lcdc select
* @lcdsel_big: reg value of selecting vop big for HDMI
* @lcdsel_lit: reg value of selecting vop little for HDMI
+ * @max_tmds_clock: maximum TMDS clock rate supported
*/
struct rockchip_hdmi_chip_data {
int lcdsel_grf_reg;
u32 lcdsel_big;
u32 lcdsel_lit;
+ int max_tmds_clock;
};
struct rockchip_hdmi {
@@ -77,8 +79,6 @@ struct rockchip_hdmi {
struct clk *ref_clk;
struct clk *grf_clk;
struct dw_hdmi *hdmi;
- struct regulator *avdd_0v9;
- struct regulator *avdd_1v8;
struct phy *phy;
};
@@ -209,43 +209,40 @@ static const struct dw_hdmi_phy_config rockchip_phy_config[] = {
static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
{
struct device_node *np = hdmi->dev->of_node;
+ int ret;
hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(hdmi->regmap)) {
- DRM_DEV_ERROR(hdmi->dev, "Unable to get rockchip,grf\n");
+ drm_err(hdmi, "Unable to get rockchip,grf\n");
return PTR_ERR(hdmi->regmap);
}
- hdmi->ref_clk = devm_clk_get_optional(hdmi->dev, "ref");
+ hdmi->ref_clk = devm_clk_get_optional_enabled(hdmi->dev, "ref");
if (!hdmi->ref_clk)
- hdmi->ref_clk = devm_clk_get_optional(hdmi->dev, "vpll");
+ hdmi->ref_clk = devm_clk_get_optional_enabled(hdmi->dev, "vpll");
- if (PTR_ERR(hdmi->ref_clk) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (IS_ERR(hdmi->ref_clk)) {
- DRM_DEV_ERROR(hdmi->dev, "failed to get reference clock\n");
- return PTR_ERR(hdmi->ref_clk);
+ if (IS_ERR(hdmi->ref_clk)) {
+ ret = PTR_ERR(hdmi->ref_clk);
+ if (ret != -EPROBE_DEFER)
+ drm_err(hdmi, "failed to get reference clock\n");
+ return ret;
}
- hdmi->grf_clk = devm_clk_get(hdmi->dev, "grf");
- if (PTR_ERR(hdmi->grf_clk) == -ENOENT) {
- hdmi->grf_clk = NULL;
- } else if (PTR_ERR(hdmi->grf_clk) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (IS_ERR(hdmi->grf_clk)) {
- DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
- return PTR_ERR(hdmi->grf_clk);
+ hdmi->grf_clk = devm_clk_get_optional(hdmi->dev, "grf");
+ if (IS_ERR(hdmi->grf_clk)) {
+ ret = PTR_ERR(hdmi->grf_clk);
+ if (ret != -EPROBE_DEFER)
+ drm_err(hdmi, "failed to get grf clock\n");
+ return ret;
}
- hdmi->avdd_0v9 = devm_regulator_get(hdmi->dev, "avdd-0v9");
- if (IS_ERR(hdmi->avdd_0v9))
- return PTR_ERR(hdmi->avdd_0v9);
+ ret = devm_regulator_get_enable(hdmi->dev, "avdd-0v9");
+ if (ret)
+ return ret;
- hdmi->avdd_1v8 = devm_regulator_get(hdmi->dev, "avdd-1v8");
- if (IS_ERR(hdmi->avdd_1v8))
- return PTR_ERR(hdmi->avdd_1v8);
+ ret = devm_regulator_get_enable(hdmi->dev, "avdd-1v8");
- return 0;
+ return ret;
}
static enum drm_mode_status
@@ -259,6 +256,10 @@ dw_hdmi_rockchip_mode_valid(struct dw_hdmi *dw_hdmi, void *data,
bool exact_match = hdmi->plat_data->phy_force_vendor;
int i;
+ if (hdmi->chip_data->max_tmds_clock &&
+ mode->clock > hdmi->chip_data->max_tmds_clock)
+ return MODE_CLOCK_HIGH;
+
if (hdmi->ref_clk) {
int rpclk = clk_round_rate(hdmi->ref_clk, pclk);
@@ -322,17 +323,16 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
ret = clk_prepare_enable(hdmi->grf_clk);
if (ret < 0) {
- DRM_DEV_ERROR(hdmi->dev, "failed to enable grfclk %d\n", ret);
+ drm_err(hdmi, "failed to enable grfclk %d\n", ret);
return;
}
ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val);
if (ret != 0)
- DRM_DEV_ERROR(hdmi->dev, "Could not write to GRF: %d\n", ret);
+ drm_err(hdmi, "Could not write to GRF: %d\n", ret);
clk_disable_unprepare(hdmi->grf_clk);
- DRM_DEV_DEBUG(hdmi->dev, "vop %s output to hdmi\n",
- ret ? "LIT" : "BIG");
+ drm_dbg(hdmi, "vop %s output to hdmi\n", ret ? "LIT" : "BIG");
}
static int
@@ -362,6 +362,8 @@ static int dw_hdmi_rockchip_genphy_init(struct dw_hdmi *dw_hdmi, void *data,
{
struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+ dw_hdmi_set_high_tmds_clock_ratio(dw_hdmi, display);
+
return phy_power_on(hdmi->phy);
}
@@ -434,6 +436,8 @@ static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK,
RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK |
RK3328_HDMI_HPD_IOE));
+
+ dw_hdmi_rk3328_read_hpd(dw_hdmi, data);
}
static const struct dw_hdmi_phy_ops rk3228_hdmi_phy_ops = {
@@ -446,13 +450,11 @@ static const struct dw_hdmi_phy_ops rk3228_hdmi_phy_ops = {
static struct rockchip_hdmi_chip_data rk3228_chip_data = {
.lcdsel_grf_reg = -1,
+ .max_tmds_clock = 594000,
};
static const struct dw_hdmi_plat_data rk3228_hdmi_drv_data = {
.mode_valid = dw_hdmi_rockchip_mode_valid,
- .mpll_cfg = rockchip_mpll_cfg,
- .cur_ctr = rockchip_cur_ctr,
- .phy_config = rockchip_phy_config,
.phy_data = &rk3228_chip_data,
.phy_ops = &rk3228_hdmi_phy_ops,
.phy_name = "inno_dw_hdmi_phy2",
@@ -463,6 +465,7 @@ static struct rockchip_hdmi_chip_data rk3288_chip_data = {
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
.lcdsel_big = HIWORD_UPDATE(0, RK3288_HDMI_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(RK3288_HDMI_LCDC_SEL, RK3288_HDMI_LCDC_SEL),
+ .max_tmds_clock = 340000,
};
static const struct dw_hdmi_plat_data rk3288_hdmi_drv_data = {
@@ -483,13 +486,11 @@ static const struct dw_hdmi_phy_ops rk3328_hdmi_phy_ops = {
static struct rockchip_hdmi_chip_data rk3328_chip_data = {
.lcdsel_grf_reg = -1,
+ .max_tmds_clock = 594000,
};
static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
.mode_valid = dw_hdmi_rockchip_mode_valid,
- .mpll_cfg = rockchip_mpll_cfg,
- .cur_ctr = rockchip_cur_ctr,
- .phy_config = rockchip_phy_config,
.phy_data = &rk3328_chip_data,
.phy_ops = &rk3328_hdmi_phy_ops,
.phy_name = "inno_dw_hdmi_phy2",
@@ -501,6 +502,7 @@ static struct rockchip_hdmi_chip_data rk3399_chip_data = {
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
.lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(RK3399_HDMI_LCDC_SEL, RK3399_HDMI_LCDC_SEL),
+ .max_tmds_clock = 340000,
};
static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
@@ -514,6 +516,7 @@ static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
static struct rockchip_hdmi_chip_data rk3568_chip_data = {
.lcdsel_grf_reg = -1,
+ .max_tmds_clock = 340000,
};
static const struct dw_hdmi_plat_data rk3568_hdmi_drv_data = {
@@ -592,7 +595,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
ret = rockchip_hdmi_parse_dt(hdmi);
if (ret) {
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(hdmi->dev, "Unable to parse OF data\n");
+ drm_err(hdmi, "Unable to parse OF data\n");
return ret;
}
@@ -600,29 +603,10 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(hdmi->dev, "failed to get phy\n");
+ drm_err(hdmi, "failed to get phy\n");
return ret;
}
- ret = regulator_enable(hdmi->avdd_0v9);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "failed to enable avdd0v9: %d\n", ret);
- goto err_avdd_0v9;
- }
-
- ret = regulator_enable(hdmi->avdd_1v8);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "failed to enable avdd1v8: %d\n", ret);
- goto err_avdd_1v8;
- }
-
- ret = clk_prepare_enable(hdmi->ref_clk);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI reference clock: %d\n",
- ret);
- goto err_clk;
- }
-
if (hdmi->chip_data == &rk3568_chip_data) {
regmap_write(hdmi->regmap, RK3568_GRF_VO_CON1,
HIWORD_UPDATE(RK3568_HDMI_SDAIN_MSK |
@@ -651,12 +635,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
err_bind:
drm_encoder_cleanup(encoder);
- clk_disable_unprepare(hdmi->ref_clk);
-err_clk:
- regulator_disable(hdmi->avdd_1v8);
-err_avdd_1v8:
- regulator_disable(hdmi->avdd_0v9);
-err_avdd_0v9:
+
return ret;
}
@@ -667,10 +646,6 @@ static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
dw_hdmi_unbind(hdmi->hdmi);
drm_encoder_cleanup(&hdmi->encoder.encoder);
- clk_disable_unprepare(hdmi->ref_clk);
-
- regulator_disable(hdmi->avdd_1v8);
- regulator_disable(hdmi->avdd_0v9);
}
static const struct component_ops dw_hdmi_rockchip_ops = {
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index dec6913cec5b..42ef62aa0a1e 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -543,7 +543,7 @@ inno_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
}
-static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
.atomic_check = inno_hdmi_encoder_atomic_check,
.atomic_enable = inno_hdmi_encoder_enable,
.atomic_disable = inno_hdmi_encoder_disable,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 44d769d9234d..11e5d10de4d7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -103,13 +103,17 @@ static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
struct rockchip_drm_private *private = drm_dev->dev_private;
struct iommu_domain_geometry *geometry;
u64 start, end;
+ int ret;
if (IS_ERR_OR_NULL(private->iommu_dev))
return 0;
- private->domain = iommu_domain_alloc(private->iommu_dev->bus);
- if (!private->domain)
- return -ENOMEM;
+ private->domain = iommu_paging_domain_alloc(private->iommu_dev);
+ if (IS_ERR(private->domain)) {
+ ret = PTR_ERR(private->domain);
+ private->domain = NULL;
+ return ret;
+ }
geometry = &private->domain->geometry;
start = geometry->aperture_start;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index bbb9e0bf6804..8d566fcd80a2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -12,9 +12,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem.h>
+#include <linux/bits.h>
+#include <linux/component.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/component.h>
#define ROCKCHIP_MAX_FB_BUFFER 3
#define ROCKCHIP_MAX_CONNECTOR 2
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index a13473b2d54c..f161f40d8ce4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -396,8 +396,8 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
if (info->is_yuv)
is_yuv = true;
- if (dst_w > 3840) {
- DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
+ if (dst_w > 4096) {
+ DRM_DEV_ERROR(vop->dev, "Maximum dst width (4096) exceeded\n");
return;
}
@@ -1583,6 +1583,10 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
VOP_AFBC_SET(vop, enable, s->enable_afbc);
vop_cfg_done(vop);
+ /* Ack the DMA transfer of the previous frame (RK3066). */
+ if (VOP_HAS_REG(vop, common, dma_stop))
+ VOP_REG_SET(vop, common, dma_stop, 0);
+
spin_unlock(&vop->reg_lock);
/*
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index b33e5bdc26be..0cf512cc1614 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -122,6 +122,7 @@ struct vop_common {
struct vop_reg lut_buffer_index;
struct vop_reg gate_en;
struct vop_reg mmu_en;
+ struct vop_reg dma_stop;
struct vop_reg out_mode;
struct vop_reg standby;
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index b9ee02061d5b..e2c6ba26f437 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -466,6 +466,7 @@ static const struct vop_output rk3066_output = {
};
static const struct vop_common rk3066_common = {
+ .dma_stop = VOP_REG(RK3066_SYS_CTRL0, 0x1, 0),
.standby = VOP_REG(RK3066_SYS_CTRL0, 0x1, 1),
.out_mode = VOP_REG(RK3066_DSP_CTRL0, 0xf, 0),
.cfg_done = VOP_REG(RK3066_REG_CFG_DONE, 0x1, 0),
@@ -514,6 +515,7 @@ static const struct vop_data rk3066_vop = {
.output = &rk3066_output,
.win = rk3066_vop_win_data,
.win_size = ARRAY_SIZE(rk3066_vop_win_data),
+ .feature = VOP_FEATURE_INTERNAL_RGB,
.max_output = { 1920, 1080 },
};
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 7e90c9f95611..ab53ab486fe6 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -674,13 +674,11 @@ EXPORT_SYMBOL(drm_sched_stop);
* drm_sched_start - recover jobs after a reset
*
* @sched: scheduler instance
- * @full_recovery: proceed with complete sched restart
*
*/
-void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
+void drm_sched_start(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
- int r;
/*
* Locking the list is not required here as the sched thread is parked
@@ -692,24 +690,17 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
atomic_add(s_job->credits, &sched->credit_count);
- if (!full_recovery)
+ if (!fence) {
+ drm_sched_job_done(s_job, -ECANCELED);
continue;
+ }
- if (fence) {
- r = dma_fence_add_callback(fence, &s_job->cb,
- drm_sched_job_done_cb);
- if (r == -ENOENT)
- drm_sched_job_done(s_job, fence->error);
- else if (r)
- DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
- r);
- } else
- drm_sched_job_done(s_job, -ECANCELED);
+ if (dma_fence_add_callback(fence, &s_job->cb,
+ drm_sched_job_done_cb))
+ drm_sched_job_done(s_job, fence->error);
}
- if (full_recovery)
- drm_sched_start_timeout_unlocked(sched);
-
+ drm_sched_start_timeout_unlocked(sched);
drm_sched_wqueue_start(sched);
}
EXPORT_SYMBOL(drm_sched_start);
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 48a5d49fc131..68b8197b3dd1 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -582,7 +582,6 @@ MODULE_DEVICE_TABLE(of, dvo_of_match);
struct platform_driver sti_dvo_driver = {
.driver = {
.name = "sti-dvo",
- .owner = THIS_MODULE,
.of_match_table = dvo_of_match,
},
.probe = sti_dvo_probe,
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 6ee35612a14e..f18faad974aa 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -807,7 +807,6 @@ MODULE_DEVICE_TABLE(of, hda_of_match);
struct platform_driver sti_hda_driver = {
.driver = {
.name = "sti-hda",
- .owner = THIS_MODULE,
.of_match_table = hda_of_match,
},
.probe = sti_hda_probe,
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 500936d5743c..847470f747c0 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -974,28 +974,32 @@ static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = {
static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
{
+ const struct drm_display_info *info = &connector->display_info;
struct sti_hdmi_connector *hdmi_connector
= to_sti_hdmi_connector(connector);
struct sti_hdmi *hdmi = hdmi_connector->hdmi;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int count;
DRM_DEBUG_DRIVER("\n");
- edid = drm_get_edid(connector, hdmi->ddc_adapt);
- if (!edid)
- goto fail;
+ drm_edid = drm_edid_read(connector);
+
+ drm_edid_connector_update(connector, drm_edid);
- cec_notifier_set_phys_addr_from_edid(hdmi->notifier, edid);
+ cec_notifier_set_phys_addr(hdmi->notifier,
+ connector->display_info.source_physical_address);
+
+ if (!drm_edid)
+ goto fail;
- count = drm_add_edid_modes(connector, edid);
- drm_connector_update_edid_property(connector, edid);
+ count = drm_edid_connector_add_modes(connector);
DRM_DEBUG_KMS("%s : %dx%d cm\n",
- (connector->display_info.is_hdmi ? "hdmi monitor" : "dvi monitor"),
- edid->width_cm, edid->height_cm);
+ info->is_hdmi ? "hdmi monitor" : "dvi monitor",
+ info->width_mm / 10, info->height_mm / 10);
- kfree(edid);
+ drm_edid_free(drm_edid);
return count;
fail:
@@ -1485,7 +1489,6 @@ static void sti_hdmi_remove(struct platform_device *pdev)
struct platform_driver sti_hdmi_driver = {
.driver = {
.name = "sti-hdmi",
- .owner = THIS_MODULE,
.of_match_table = hdmi_of_match,
},
.probe = sti_hdmi_probe,
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 0fb48ac044d8..acbf70b95aeb 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1414,7 +1414,6 @@ MODULE_DEVICE_TABLE(of, hqvdp_of_match);
struct platform_driver sti_hqvdp_driver = {
.driver = {
.name = "sti-hqvdp",
- .owner = THIS_MODULE,
.of_match_table = hqvdp_of_match,
},
.probe = sti_hqvdp_probe,
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 64615638b79a..e714c232026c 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -886,7 +886,6 @@ MODULE_DEVICE_TABLE(of, tvout_of_match);
struct platform_driver sti_tvout_driver = {
.driver = {
.name = "sti-tvout",
- .owner = THIS_MODULE,
.of_match_table = tvout_of_match,
},
.probe = sti_tvout_probe,
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 5e5f82b6a5d9..5ba469b711b5 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -431,7 +431,6 @@ MODULE_DEVICE_TABLE(of, vtg_of_match);
struct platform_driver sti_vtg_driver = {
.driver = {
.name = "sti-vtg",
- .owner = THIS_MODULE,
.of_match_table = vtg_of_match,
},
.probe = vtg_probe,
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index 1cc6b6cbdfa9..d7f41a87808e 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -2,6 +2,7 @@
config DRM_STM
tristate "DRM Support for STMicroelectronics SoC Series"
depends on DRM && (ARCH_STM32 || COMPILE_TEST)
+ depends on COMMON_CLK
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index e8523abef27a..e1232f74dfa5 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_managed.h>
#include "ltdc.h"
@@ -75,7 +76,7 @@ static int drv_load(struct drm_device *ddev)
DRM_DEBUG("%s\n", __func__);
- ldev = devm_kzalloc(ddev->dev, sizeof(*ldev), GFP_KERNEL);
+ ldev = drmm_kzalloc(ddev, sizeof(*ldev), GFP_KERNEL);
if (!ldev)
return -ENOMEM;
@@ -203,12 +204,14 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
ret = drm_dev_register(ddev, 0);
if (ret)
- goto err_put;
+ goto err_unload;
drm_fbdev_dma_setup(ddev, 16);
return 0;
+err_unload:
+ drv_unload(ddev);
err_put:
drm_dev_put(ddev);
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 5576fdae4962..54a73753eff9 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -36,6 +36,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_managed.h>
#include <video/videomode.h>
@@ -169,6 +170,7 @@
#define IER_RRIE BIT(3) /* Register Reload Interrupt Enable */
#define IER_FUEIE BIT(6) /* Fifo Underrun Error Interrupt Enable */
#define IER_CRCIE BIT(7) /* CRC Error Interrupt Enable */
+#define IER_MASK (IER_LIE | IER_FUWIE | IER_TERRIE | IER_RRIE | IER_FUEIE | IER_CRCIE)
#define CPSR_CYPOS GENMASK(15, 0) /* Current Y position */
@@ -187,6 +189,7 @@
#define LXCR_COLKEN BIT(1) /* Color Keying Enable */
#define LXCR_CLUTEN BIT(4) /* Color Look-Up Table ENable */
#define LXCR_HMEN BIT(8) /* Horizontal Mirroring ENable */
+#define LXCR_MASK (LXCR_LEN | LXCR_COLKEN | LXCR_CLUTEN | LXCR_HMEN)
#define LXWHPCR_WHSTPOS GENMASK(11, 0) /* Window Horizontal StarT POSition */
#define LXWHPCR_WHSPPOS GENMASK(27, 16) /* Window Horizontal StoP POSition */
@@ -491,11 +494,6 @@ static inline struct ltdc_device *plane_to_ltdc(struct drm_plane *plane)
return (struct ltdc_device *)plane->dev->dev_private;
}
-static inline struct ltdc_device *encoder_to_ltdc(struct drm_encoder *enc)
-{
- return (struct ltdc_device *)enc->dev->dev_private;
-}
-
static inline enum ltdc_pix_fmt to_ltdc_pixelformat(u32 drm_fmt)
{
enum ltdc_pix_fmt pf;
@@ -784,7 +782,7 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
regmap_write(ldev->regmap, LTDC_BCCR, BCCR_BCBLACK);
/* Enable IRQ */
- regmap_set_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_RRIE | IER_TERRIE);
+ regmap_set_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_TERRIE);
/* Commit shadow registers = update planes at next vblank */
if (!ldev->caps.plane_reg_shadow)
@@ -806,11 +804,10 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
/* Disable all layers */
for (layer_index = 0; layer_index < ldev->caps.nb_layers; layer_index++)
- regmap_write_bits(ldev->regmap, LTDC_L1CR + layer_index * LAY_OFS,
- LXCR_CLUTEN | LXCR_LEN, 0);
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + layer_index * LAY_OFS, LXCR_MASK, 0);
- /* disable IRQ */
- regmap_clear_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_RRIE | IER_TERRIE);
+ /* Disable IRQ */
+ regmap_clear_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_TERRIE);
/* immediately commit disable of layers before switching off LTDC */
if (!ldev->caps.plane_reg_shadow)
@@ -1199,7 +1196,6 @@ static void ltdc_crtc_atomic_print_state(struct drm_printer *p,
}
static const struct drm_crtc_funcs ltdc_crtc_funcs = {
- .destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
@@ -1212,7 +1208,6 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
};
static const struct drm_crtc_funcs ltdc_crtc_with_crc_support_funcs = {
- .destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
@@ -1474,7 +1469,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
if (newstate->rotation & DRM_MODE_REFLECT_X)
val |= LXCR_HMEN;
- regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN | LXCR_CLUTEN | LXCR_HMEN, val);
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_MASK, val);
/* Commit shadow registers = update plane at next vblank */
if (ldev->caps.plane_reg_shadow)
@@ -1512,7 +1507,10 @@ static void ltdc_plane_atomic_disable(struct drm_plane *plane,
u32 lofs = plane->index * LAY_OFS;
/* Disable layer */
- regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN | LXCR_CLUTEN | LXCR_HMEN, 0);
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_MASK, 0);
+
+ /* Reset the layer transparency to hide any related background color */
+ regmap_write_bits(ldev->regmap, LTDC_L1CACR + lofs, LXCACR_CONSTA, 0x00);
/* Commit shadow registers = update plane at next vblank */
if (ldev->caps.plane_reg_shadow)
@@ -1545,7 +1543,6 @@ static void ltdc_plane_atomic_print_state(struct drm_printer *p,
static const struct drm_plane_funcs ltdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -1572,7 +1569,6 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
const u64 *modifiers = ltdc_format_modifiers;
u32 lofs = index * LAY_OFS;
u32 val;
- int ret;
/* Allocate the biggest size according to supported color formats */
formats = devm_kzalloc(dev, (ldev->caps.pix_fmt_nb +
@@ -1580,6 +1576,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
ARRAY_SIZE(ltdc_drm_fmt_ycbcr_sp) +
ARRAY_SIZE(ltdc_drm_fmt_ycbcr_fp)) *
sizeof(*formats), GFP_KERNEL);
+ if (!formats)
+ return NULL;
for (i = 0; i < ldev->caps.pix_fmt_nb; i++) {
drm_fmt = ldev->caps.pix_fmt_drm[i];
@@ -1613,14 +1611,10 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
}
}
- plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL);
- if (!plane)
- return NULL;
-
- ret = drm_universal_plane_init(ddev, plane, possible_crtcs,
- &ltdc_plane_funcs, formats, nb_fmt,
- modifiers, type, NULL);
- if (ret < 0)
+ plane = drmm_universal_plane_alloc(ddev, struct drm_plane, dev,
+ possible_crtcs, &ltdc_plane_funcs, formats,
+ nb_fmt, modifiers, type, NULL);
+ if (IS_ERR(plane))
return NULL;
if (ldev->caps.ycbcr_input) {
@@ -1643,15 +1637,6 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
return plane;
}
-static void ltdc_plane_destroy_all(struct drm_device *ddev)
-{
- struct drm_plane *plane, *plane_temp;
-
- list_for_each_entry_safe(plane, plane_temp,
- &ddev->mode_config.plane_list, head)
- drm_plane_cleanup(plane);
-}
-
static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
{
struct ltdc_device *ldev = ddev->dev_private;
@@ -1677,14 +1662,14 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
/* Init CRTC according to its hardware features */
if (ldev->caps.crc)
- ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
- &ltdc_crtc_with_crc_support_funcs, NULL);
+ ret = drmm_crtc_init_with_planes(ddev, crtc, primary, NULL,
+ &ltdc_crtc_with_crc_support_funcs, NULL);
else
- ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
- &ltdc_crtc_funcs, NULL);
+ ret = drmm_crtc_init_with_planes(ddev, crtc, primary, NULL,
+ &ltdc_crtc_funcs, NULL);
if (ret) {
DRM_ERROR("Can not initialize CRTC\n");
- goto cleanup;
+ return ret;
}
drm_crtc_helper_add(crtc, &ltdc_crtc_helper_funcs);
@@ -1698,9 +1683,8 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
for (i = 1; i < ldev->caps.nb_layers; i++) {
overlay = ltdc_plane_create(ddev, DRM_PLANE_TYPE_OVERLAY, i);
if (!overlay) {
- ret = -ENOMEM;
DRM_ERROR("Can not create overlay plane %d\n", i);
- goto cleanup;
+ return -ENOMEM;
}
if (ldev->caps.dynamic_zorder)
drm_plane_create_zpos_property(overlay, i, 0, ldev->caps.nb_layers - 1);
@@ -1713,10 +1697,6 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
}
return 0;
-
-cleanup:
- ltdc_plane_destroy_all(ddev);
- return ret;
}
static void ltdc_encoder_disable(struct drm_encoder *encoder)
@@ -1776,23 +1756,19 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
struct drm_encoder *encoder;
int ret;
- encoder = devm_kzalloc(ddev->dev, sizeof(*encoder), GFP_KERNEL);
- if (!encoder)
- return -ENOMEM;
+ encoder = drmm_simple_encoder_alloc(ddev, struct drm_encoder, dev,
+ DRM_MODE_ENCODER_DPI);
+ if (IS_ERR(encoder))
+ return PTR_ERR(encoder);
encoder->possible_crtcs = CRTC_MASK;
encoder->possible_clones = 0; /* No cloning support */
- drm_simple_encoder_init(ddev, encoder, DRM_MODE_ENCODER_DPI);
-
drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);
ret = drm_bridge_attach(encoder, bridge, NULL, 0);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- drm_encoder_cleanup(encoder);
+ if (ret)
return ret;
- }
DRM_DEBUG_DRIVER("Bridge encoder:%d created\n", encoder->base.id);
@@ -1962,8 +1938,7 @@ int ltdc_load(struct drm_device *ddev)
goto err;
if (panel) {
- bridge = drm_panel_bridge_add_typed(panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = drmm_panel_bridge_add(ddev, panel);
if (IS_ERR(bridge)) {
DRM_ERROR("panel-bridge endpoint %d\n", i);
ret = PTR_ERR(bridge);
@@ -2013,13 +1988,8 @@ int ltdc_load(struct drm_device *ddev)
goto err;
}
- /* Disable interrupts */
- if (ldev->caps.fifo_threshold)
- regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE | IER_RRIE | IER_FUWIE |
- IER_TERRIE);
- else
- regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE | IER_RRIE | IER_FUWIE |
- IER_TERRIE | IER_FUEIE);
+ /* Disable all interrupts */
+ regmap_clear_bits(ldev->regmap, LTDC_IER, IER_MASK);
DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
@@ -2045,7 +2015,7 @@ int ltdc_load(struct drm_device *ddev)
}
}
- crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
+ crtc = drmm_kzalloc(ddev, sizeof(*crtc), GFP_KERNEL);
if (!crtc) {
DRM_ERROR("Failed to allocate crtc\n");
ret = -ENOMEM;
@@ -2072,9 +2042,6 @@ int ltdc_load(struct drm_device *ddev)
return 0;
err:
- for (i = 0; i < nb_endpoints; i++)
- drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
-
clk_disable_unprepare(ldev->pixel_clk);
return ret;
@@ -2082,16 +2049,8 @@ err:
void ltdc_unload(struct drm_device *ddev)
{
- struct device *dev = ddev->dev;
- int nb_endpoints, i;
-
DRM_DEBUG_DRIVER("\n");
- nb_endpoints = of_graph_get_endpoint_count(dev->of_node);
-
- for (i = 0; i < nb_endpoints; i++)
- drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
-
pm_runtime_disable(ddev->dev);
}
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index 2fa2c81784e9..06f2d7a56cc9 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -1210,7 +1210,6 @@ static struct platform_driver lvds_platform_driver = {
.remove = lvds_remove,
.driver = {
.name = "stm32-display-lvds",
- .owner = THIS_MODULE,
.of_match_table = lvds_dt_ids,
},
};
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 03d1c76aec2d..c9eb329665ec 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -801,6 +801,7 @@ static const struct file_operations tegra_drm_fops = {
.read = drm_read,
.compat_ioctl = drm_compat_ioctl,
.llseek = noop_llseek,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
static int tegra_drm_context_cleanup(int id, void *p, void *data)
@@ -1135,6 +1136,7 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev)
static int host1x_drm_probe(struct host1x_device *dev)
{
+ struct device *dma_dev = dev->dev.parent;
struct tegra_drm *tegra;
struct drm_device *drm;
int err;
@@ -1149,8 +1151,8 @@ static int host1x_drm_probe(struct host1x_device *dev)
goto put;
}
- if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) {
- tegra->domain = iommu_domain_alloc(&platform_bus_type);
+ if (host1x_drm_wants_iommu(dev) && device_iommu_mapped(dma_dev)) {
+ tegra->domain = iommu_paging_domain_alloc(dma_dev);
if (!tegra->domain) {
err = -ENOMEM;
goto free;
@@ -1330,6 +1332,11 @@ static int host1x_drm_remove(struct host1x_device *dev)
return 0;
}
+static void host1x_drm_shutdown(struct host1x_device *dev)
+{
+ drm_atomic_helper_shutdown(dev_get_drvdata(&dev->dev));
+}
+
#ifdef CONFIG_PM_SLEEP
static int host1x_drm_suspend(struct device *dev)
{
@@ -1398,6 +1405,7 @@ static struct host1x_driver host1x_drm_driver = {
},
.probe = host1x_drm_probe,
.remove = host1x_drm_remove,
+ .shutdown = host1x_drm_shutdown,
.subdevs = host1x_drm_subdevs,
};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 682011166a8f..2f3781e04b0a 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -133,7 +133,7 @@ struct tegra_output {
struct drm_bridge *bridge;
struct drm_panel *panel;
struct i2c_adapter *ddc;
- const struct edid *edid;
+ const struct drm_edid *drm_edid;
struct cec_notifier *cec;
unsigned int hpd_irq;
struct gpio_desc *hpd_gpio;
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 00c8564520e7..4de1ea0fc7c0 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -46,6 +46,7 @@ struct gr3d {
unsigned int nclocks;
struct reset_control_bulk_data resets[RST_GR3D_MAX];
unsigned int nresets;
+ struct dev_pm_domain_list *pd_list;
DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
};
@@ -369,18 +370,12 @@ static int gr3d_power_up_legacy_domain(struct device *dev, const char *name,
return 0;
}
-static void gr3d_del_link(void *link)
-{
- device_link_del(link);
-}
-
static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
{
- static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL };
- const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
- struct device **opp_virt_devs, *pd_dev;
- struct device_link *link;
- unsigned int i;
+ struct dev_pm_domain_attach_data pd_data = {
+ .pd_names = (const char *[]) { "3d0", "3d1" },
+ .num_pd_names = 2,
+ };
int err;
err = of_count_phandle_with_args(dev->of_node, "power-domains",
@@ -414,29 +409,10 @@ static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
if (dev->pm_domain)
return 0;
- err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs);
- if (err)
+ err = dev_pm_domain_attach_list(dev, &pd_data, &gr3d->pd_list);
+ if (err < 0)
return err;
- for (i = 0; opp_genpd_names[i]; i++) {
- pd_dev = opp_virt_devs[i];
- if (!pd_dev) {
- dev_err(dev, "failed to get %s power domain\n",
- opp_genpd_names[i]);
- return -EINVAL;
- }
-
- link = device_link_add(dev, pd_dev, link_flags);
- if (!link) {
- dev_err(dev, "failed to link to %s\n", dev_name(pd_dev));
- return -EINVAL;
- }
-
- err = devm_add_action_or_reset(dev, gr3d_del_link, link);
- if (err)
- return err;
- }
-
return 0;
}
@@ -527,13 +503,13 @@ static int gr3d_probe(struct platform_device *pdev)
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
- return err;
+ goto err;
err = host1x_client_register(&gr3d->client.base);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- return err;
+ goto err;
}
/* initialize address register map */
@@ -541,6 +517,9 @@ static int gr3d_probe(struct platform_device *pdev)
set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
return 0;
+err:
+ dev_pm_domain_detach_list(gr3d->pd_list);
+ return err;
}
static void gr3d_remove(struct platform_device *pdev)
@@ -549,6 +528,7 @@ static void gr3d_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
host1x_client_unregister(&gr3d->client.base);
+ dev_pm_domain_detach_list(gr3d->pd_list);
}
static int __maybe_unused gr3d_runtime_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index f21e57e8599e..e0c2019a591b 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -521,12 +521,11 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
{
- u64 tmp, tmp1, tmp2;
+ u64 tmp, tmp1;
tmp = (u64)dfixed_trunc(in);
- tmp2 = (u64)out;
- tmp1 = (tmp << NFB) + (tmp2 >> 1);
- do_div(tmp1, tmp2);
+ tmp1 = (tmp << NFB) + ((u64)out >> 1);
+ do_div(tmp1, out);
return lower_32_bits(tmp1);
}
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 4da3c3d1abbc..49e4f63a5550 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -21,7 +21,7 @@
int tegra_output_connector_get_modes(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
- struct edid *edid = NULL;
+ const struct drm_edid *drm_edid = NULL;
int err = 0;
/*
@@ -34,18 +34,17 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
return err;
}
- if (output->edid)
- edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
+ if (output->drm_edid)
+ drm_edid = drm_edid_dup(output->drm_edid);
else if (output->ddc)
- edid = drm_get_edid(connector, output->ddc);
+ drm_edid = drm_edid_read_ddc(connector, output->ddc);
- cec_notifier_set_phys_addr_from_edid(output->cec, edid);
- drm_connector_update_edid_property(connector, edid);
+ drm_edid_connector_update(connector, drm_edid);
+ cec_notifier_set_phys_addr(output->cec,
+ connector->display_info.source_physical_address);
- if (edid) {
- err = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
+ err = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return err;
}
@@ -98,6 +97,7 @@ static irqreturn_t hpd_irq(int irq, void *data)
int tegra_output_probe(struct tegra_output *output)
{
struct device_node *ddc, *panel;
+ const void *edid;
unsigned long flags;
int err, size;
@@ -124,8 +124,6 @@ int tegra_output_probe(struct tegra_output *output)
return PTR_ERR(output->panel);
}
- output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
-
ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
if (ddc) {
output->ddc = of_get_i2c_adapter_by_node(ddc);
@@ -137,6 +135,9 @@ int tegra_output_probe(struct tegra_output *output)
}
}
+ edid = of_get_property(output->of_node, "nvidia,edid", &size);
+ output->drm_edid = drm_edid_alloc(edid, size);
+
output->hpd_gpio = devm_fwnode_gpiod_get(output->dev,
of_fwnode_handle(output->of_node),
"nvidia,hpd",
@@ -187,6 +188,8 @@ put_i2c:
if (output->ddc)
i2c_put_adapter(output->ddc);
+ drm_edid_free(output->drm_edid);
+
return err;
}
@@ -197,6 +200,8 @@ void tegra_output_remove(struct tegra_output *output)
if (output->ddc)
i2c_put_adapter(output->ddc);
+
+ drm_edid_free(output->drm_edid);
}
int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
index d8d0e4d1682f..fd4215e2f982 100644
--- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -23,29 +23,16 @@
#define TEST_BYTE 0xae
/*
- * Wrappers to avoid an explicit type casting when passing action
- * functions to kunit_add_action().
+ * Wrappers to avoid cast warnings when passing action functions
+ * directly to kunit_add_action().
*/
-static void kfree_wrapper(void *ptr)
-{
- const void *obj = ptr;
-
- kfree(obj);
-}
-
-static void sg_free_table_wrapper(void *ptr)
-{
- struct sg_table *sgt = ptr;
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
- sg_free_table(sgt);
-}
-
-static void drm_gem_shmem_free_wrapper(void *ptr)
-{
- struct drm_gem_shmem_object *shmem = ptr;
+KUNIT_DEFINE_ACTION_WRAPPER(sg_free_table_wrapper, sg_free_table,
+ struct sg_table *);
- drm_gem_shmem_free(shmem);
-}
+KUNIT_DEFINE_ACTION_WRAPPER(drm_gem_shmem_free_wrapper, drm_gem_shmem_free,
+ struct drm_gem_shmem_object *);
/*
* Test creating a shmem GEM object backed by shmem buffer. The test
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 68093d6b6b16..5f2d1b6f9ee9 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -49,7 +49,7 @@ static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
if (backlight) {
backlight->props.power = mode == DRM_MODE_DPMS_ON ?
- FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+ BACKLIGHT_POWER_ON : BACKLIGHT_POWER_OFF;
backlight_update_status(backlight);
}
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index e0defb1d134f..0bd7707c053e 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -464,7 +464,7 @@ static int gm12u320_set_ecomode(struct gm12u320_device *gm12u320)
* Note this assumes this driver is only ever used with the Acer C120, if we
* add support for other devices the vendor and model should be parameterized.
*/
-static struct edid gm12u320_edid = {
+static const struct edid gm12u320_edid = {
.header = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 },
.mfg_id = { 0x04, 0x72 }, /* "ACR" */
.prod_code = { 0x20, 0xc1 }, /* C120h */
@@ -523,8 +523,15 @@ static struct edid gm12u320_edid = {
static int gm12u320_conn_get_modes(struct drm_connector *connector)
{
- drm_connector_update_edid_property(connector, &gm12u320_edid);
- return drm_add_edid_modes(connector, &gm12u320_edid);
+ const struct drm_edid *drm_edid;
+ int count;
+
+ drm_edid = drm_edid_alloc(&gm12u320_edid, sizeof(gm12u320_edid));
+ drm_edid_connector_update(connector, drm_edid);
+ count = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
+
+ return count;
}
static const struct drm_connector_helper_funcs gm12u320_conn_helper_funcs = {
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index d1b32303d051..f0a7eb62116c 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -271,7 +271,7 @@ static void ttm_bo_unreserve_basic(struct kunit *test)
man = ttm_manager_type(priv->ttm_dev, mem_type);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res1->lru, &man->lru[bo->priority]), 1);
+ list_is_last(&res1->lru.link, &man->lru[bo->priority]), 1);
ttm_resource_free(bo, &res2);
ttm_resource_free(bo, &res1);
@@ -308,11 +308,11 @@ static void ttm_bo_unreserve_pinned(struct kunit *test)
err = ttm_resource_alloc(bo, place, &res2);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res2->lru, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res2->lru.link, &priv->ttm_dev->pinned), 1);
ttm_bo_unreserve(bo);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res1->lru, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res1->lru.link, &priv->ttm_dev->pinned), 1);
ttm_resource_free(bo, &res1);
ttm_resource_free(bo, &res2);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
index 9c2f13e53162..22260e7aea58 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
@@ -198,7 +198,7 @@ static void ttm_resource_fini_basic(struct kunit *test)
ttm_resource_init(bo, place, res);
ttm_resource_fini(man, res);
- KUNIT_ASSERT_TRUE(test, list_empty(&res->lru));
+ KUNIT_ASSERT_TRUE(test, list_empty(&res->lru.link));
KUNIT_ASSERT_EQ(test, man->usage, 0);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2427be8bc97f..320592435252 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -224,80 +224,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
dma_resv_iter_end(&cursor);
}
-/**
- * ttm_bo_cleanup_refs
- * If bo idle, remove from lru lists, and unref.
- * If not idle, block if possible.
- *
- * Must be called with lru_lock and reservation held, this function
- * will drop the lru lock and optionally the reservation lock before returning.
- *
- * @bo: The buffer object to clean-up
- * @interruptible: Any sleeps should occur interruptibly.
- * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead.
- * @unlock_resv: Unlock the reservation lock as well.
- */
-
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
- bool unlock_resv)
-{
- struct dma_resv *resv = &bo->base._resv;
- int ret;
-
- if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
- ret = 0;
- else
- ret = -EBUSY;
-
- if (ret && !no_wait_gpu) {
- long lret;
-
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
- spin_unlock(&bo->bdev->lru_lock);
-
- lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
- interruptible,
- 30 * HZ);
-
- if (lret < 0)
- return lret;
- else if (lret == 0)
- return -EBUSY;
-
- spin_lock(&bo->bdev->lru_lock);
- if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
- /*
- * We raced, and lost, someone else holds the reservation now,
- * and is probably busy in ttm_bo_cleanup_memtype_use.
- *
- * Even if it's not the case, because we finished waiting any
- * delayed destruction would succeed, so just return success
- * here.
- */
- spin_unlock(&bo->bdev->lru_lock);
- return 0;
- }
- ret = 0;
- }
-
- if (ret) {
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
- spin_unlock(&bo->bdev->lru_lock);
- return ret;
- }
-
- spin_unlock(&bo->bdev->lru_lock);
- ttm_bo_cleanup_memtype_use(bo);
-
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
-
- return 0;
-}
-
/*
* Block for the dma_resv object to become idle, lock the buffer and clean up
* the resource and tt object.
@@ -506,150 +432,152 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
-/*
- * Check the target bo is allowable to be evicted or swapout, including cases:
- *
- * a. if share same reservation object with ctx->resv, have assumption
- * reservation objects should already be locked, so not lock again and
- * return true directly when either the opreation allow_reserved_eviction
- * or the target bo already is in delayed free list;
+/**
+ * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list.
+ * @bdev: The ttm device.
+ * @man: The manager whose bo to evict.
+ * @ctx: The TTM operation ctx governing the eviction.
*
- * b. Otherwise, trylock it.
+ * Return: 0 if successful or the resource disappeared. Negative error code on error.
*/
-static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- const struct ttm_place *place,
- bool *locked, bool *busy)
+int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx)
{
- bool ret = false;
+ struct ttm_resource_cursor cursor;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ unsigned int mem_type;
+ int ret = 0;
- if (bo->pin_count) {
- *locked = false;
- if (busy)
- *busy = false;
- return false;
+ spin_lock(&bdev->lru_lock);
+ res = ttm_resource_manager_first(man, &cursor);
+ ttm_resource_cursor_fini(&cursor);
+ if (!res) {
+ ret = -ENOENT;
+ goto out_no_ref;
}
+ bo = res->bo;
+ if (!ttm_bo_get_unless_zero(bo))
+ goto out_no_ref;
+ mem_type = res->mem_type;
+ spin_unlock(&bdev->lru_lock);
+ ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL);
+ if (ret)
+ goto out_no_lock;
+ if (!bo->resource || bo->resource->mem_type != mem_type)
+ goto out_bo_moved;
- if (bo->base.resv == ctx->resv) {
- dma_resv_assert_held(bo->base.resv);
- if (ctx->allow_res_evict)
- ret = true;
- *locked = false;
- if (busy)
- *busy = false;
+ if (bo->deleted) {
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (!ret)
+ ttm_bo_cleanup_memtype_use(bo);
} else {
- ret = dma_resv_trylock(bo->base.resv);
- *locked = ret;
- if (busy)
- *busy = !ret;
- }
-
- if (ret && place && (bo->resource->mem_type != place->mem_type ||
- !bo->bdev->funcs->eviction_valuable(bo, place))) {
- ret = false;
- if (*locked) {
- dma_resv_unlock(bo->base.resv);
- *locked = false;
- }
+ ret = ttm_bo_evict(bo, ctx);
}
+out_bo_moved:
+ dma_resv_unlock(bo->base.resv);
+out_no_lock:
+ ttm_bo_put(bo);
+ return ret;
+out_no_ref:
+ spin_unlock(&bdev->lru_lock);
return ret;
}
/**
- * ttm_mem_evict_wait_busy - wait for a busy BO to become available
- *
- * @busy_bo: BO which couldn't be locked with trylock
- * @ctx: operation context
- * @ticket: acquire ticket
- *
- * Try to lock a busy buffer object to avoid failing eviction.
+ * struct ttm_bo_evict_walk - Parameters for the evict walk.
*/
-static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
+struct ttm_bo_evict_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @place: The place passed to the resource allocation. */
+ const struct ttm_place *place;
+ /** @evictor: The buffer object we're trying to make room for. */
+ struct ttm_buffer_object *evictor;
+ /** @res: The allocated resource if any. */
+ struct ttm_resource **res;
+ /** @evicted: Number of successful evictions. */
+ unsigned long evicted;
+};
+
+static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- int r;
-
- if (!busy_bo || !ticket)
- return -EBUSY;
-
- if (ctx->interruptible)
- r = dma_resv_lock_interruptible(busy_bo->base.resv,
- ticket);
- else
- r = dma_resv_lock(busy_bo->base.resv, ticket);
-
- /*
- * TODO: It would be better to keep the BO locked until allocation is at
- * least tried one more time, but that would mean a much larger rework
- * of TTM.
- */
- if (!r)
- dma_resv_unlock(busy_bo->base.resv);
-
- return r == -EDEADLK ? -EBUSY : r;
-}
-
-int ttm_mem_evict_first(struct ttm_device *bdev,
- struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
-{
- struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
- struct ttm_resource_cursor cursor;
- struct ttm_resource *res;
- bool locked = false;
- int ret;
+ struct ttm_bo_evict_walk *evict_walk =
+ container_of(walk, typeof(*evict_walk), walk);
+ s64 lret;
- spin_lock(&bdev->lru_lock);
- ttm_resource_manager_for_each_res(man, &cursor, res) {
- bool busy;
-
- if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
- &locked, &busy)) {
- if (busy && !busy_bo && ticket !=
- dma_resv_locking_ctx(res->bo->base.resv))
- busy_bo = res->bo;
- continue;
- }
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
+ return 0;
- if (ttm_bo_get_unless_zero(res->bo)) {
- bo = res->bo;
- break;
- }
- if (locked)
- dma_resv_unlock(res->bo->base.resv);
+ if (bo->deleted) {
+ lret = ttm_bo_wait_ctx(bo, walk->ctx);
+ if (!lret)
+ ttm_bo_cleanup_memtype_use(bo);
+ } else {
+ lret = ttm_bo_evict(bo, walk->ctx);
}
- if (!bo) {
- if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
- busy_bo = NULL;
- spin_unlock(&bdev->lru_lock);
- ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
- if (busy_bo)
- ttm_bo_put(busy_bo);
- return ret;
- }
+ if (lret)
+ goto out;
- if (bo->deleted) {
- ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
- ctx->no_wait_gpu, locked);
- ttm_bo_put(bo);
- return ret;
- }
+ evict_walk->evicted++;
+ if (evict_walk->res)
+ lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
+ evict_walk->res);
+ if (lret == 0)
+ return 1;
+out:
+ /* Errors that should terminate the walk. */
+ if (lret == -ENOSPC)
+ return -EBUSY;
- spin_unlock(&bdev->lru_lock);
+ return lret;
+}
- ret = ttm_bo_evict(bo, ctx);
- if (locked)
- ttm_bo_unreserve(bo);
- else
- ttm_bo_move_to_lru_tail_unlocked(bo);
+static const struct ttm_lru_walk_ops ttm_evict_walk_ops = {
+ .process_bo = ttm_bo_evict_cb,
+};
+
+static int ttm_bo_evict_alloc(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ const struct ttm_place *place,
+ struct ttm_buffer_object *evictor,
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket,
+ struct ttm_resource **res)
+{
+ struct ttm_bo_evict_walk evict_walk = {
+ .walk = {
+ .ops = &ttm_evict_walk_ops,
+ .ctx = ctx,
+ .ticket = ticket,
+ },
+ .place = place,
+ .evictor = evictor,
+ .res = res,
+ };
+ s64 lret;
+
+ evict_walk.walk.trylock_only = true;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ if (lret || !ticket)
+ goto out;
- ttm_bo_put(bo);
- return ret;
+ /* If ticket-locking, repeat while making progress. */
+ evict_walk.walk.trylock_only = false;
+ do {
+ /* The walk may clear the evict_walk.walk.ticket field */
+ evict_walk.walk.ticket = ticket;
+ evict_walk.evicted = 0;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ } while (!lret && evict_walk.evicted);
+out:
+ if (lret < 0)
+ return lret;
+ if (lret == 0)
+ return -EBUSY;
+ return 0;
}
/**
@@ -760,6 +688,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
+ bool may_evict;
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
@@ -769,22 +698,21 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
TTM_PL_FLAG_FALLBACK))
continue;
- do {
- ret = ttm_resource_alloc(bo, place, res);
- if (unlikely(ret && ret != -ENOSPC))
+ may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
+ ret = ttm_resource_alloc(bo, place, res);
+ if (ret) {
+ if (ret != -ENOSPC)
return ret;
- if (likely(!ret) || !force_space)
- break;
-
- ret = ttm_mem_evict_first(bdev, man, place, ctx,
- ticket);
- if (unlikely(ret == -EBUSY))
- break;
- if (unlikely(ret))
+ if (!may_evict)
+ continue;
+
+ ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
+ ticket, res);
+ if (ret == -EBUSY)
+ continue;
+ if (ret)
return ret;
- } while (1);
- if (ret)
- continue;
+ }
ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
if (unlikely(ret)) {
@@ -1118,12 +1046,24 @@ int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
}
EXPORT_SYMBOL(ttm_bo_wait_ctx);
-int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- gfp_t gfp_flags)
+/**
+ * struct ttm_bo_swapout_walk - Parameters for the swapout walk
+ */
+struct ttm_bo_swapout_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
+ gfp_t gfp_flags;
+};
+
+static s64
+ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- struct ttm_place place;
- bool locked;
- long ret;
+ struct ttm_place place = {.mem_type = bo->resource->mem_type};
+ struct ttm_bo_swapout_walk *swapout_walk =
+ container_of(walk, typeof(*swapout_walk), walk);
+ struct ttm_operation_ctx *ctx = walk->ctx;
+ s64 ret;
/*
* While the bo may already reside in SYSTEM placement, set
@@ -1131,28 +1071,29 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* The driver may use the fact that we're moving from SYSTEM
* as an indication that we're about to swap out.
*/
- memset(&place, 0, sizeof(place));
- place.mem_type = bo->resource->mem_type;
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
- return -EBUSY;
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) {
+ ret = -EBUSY;
+ goto out;
+ }
if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
- bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
- !ttm_bo_get_unless_zero(bo)) {
- if (locked)
- dma_resv_unlock(bo->base.resv);
- return -EBUSY;
+ bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) {
+ ret = -EBUSY;
+ goto out;
}
if (bo->deleted) {
- ret = ttm_bo_cleanup_refs(bo, false, false, locked);
- ttm_bo_put(bo);
- return ret == -EBUSY ? -ENOSPC : ret;
- }
+ pgoff_t num_pages = bo->ttm->num_pages;
- /* TODO: Cleanup the locking */
- spin_unlock(&bo->bdev->lru_lock);
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ goto out;
+
+ ttm_bo_cleanup_memtype_use(bo);
+ ret = num_pages;
+ goto out;
+ }
/*
* Move to system cached
@@ -1164,12 +1105,13 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
memset(&hop, 0, sizeof(hop));
place.mem_type = TTM_PL_SYSTEM;
ret = ttm_resource_alloc(bo, &place, &evict_mem);
- if (unlikely(ret))
+ if (ret)
goto out;
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
- if (unlikely(ret != 0)) {
- WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
+ if (ret) {
+ WARN(ret == -EMULTIHOP,
+ "Unexpected multihop in swapout - likely driver bug.\n");
ttm_resource_free(bo, &evict_mem);
goto out;
}
@@ -1179,30 +1121,54 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* Make sure BO is idle.
*/
ret = ttm_bo_wait_ctx(bo, ctx);
- if (unlikely(ret != 0))
+ if (ret)
goto out;
ttm_bo_unmap_virtual(bo);
-
- /*
- * Swap out. Buffer will be swapped in again as soon as
- * anyone tries to access a ttm page.
- */
if (bo->bdev->funcs->swap_notify)
bo->bdev->funcs->swap_notify(bo);
if (ttm_tt_is_populated(bo->ttm))
- ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
+
out:
+ /* Consider -ENOMEM and -ENOSPC non-fatal. */
+ if (ret == -ENOMEM || ret == -ENOSPC)
+ ret = -EBUSY;
- /*
- * Unreserve without putting on LRU to avoid swapping out an
- * already swapped buffer.
- */
- if (locked)
- dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
- return ret == -EBUSY ? -ENOSPC : ret;
+ return ret;
+}
+
+const struct ttm_lru_walk_ops ttm_swap_ops = {
+ .process_bo = ttm_bo_swapout_cb,
+};
+
+/**
+ * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem.
+ * @bdev: The ttm device.
+ * @ctx: The ttm_operation_ctx governing the swapout operation.
+ * @man: The resource manager whose resources / buffer objects are
+ * goint to be swapped out.
+ * @gfp_flags: The gfp flags used for shmem page allocations.
+ * @target: The desired number of bytes to swap out.
+ *
+ * Return: The number of bytes actually swapped out, or negative error code
+ * on error.
+ */
+s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ struct ttm_resource_manager *man, gfp_t gfp_flags,
+ s64 target)
+{
+ struct ttm_bo_swapout_walk swapout_walk = {
+ .walk = {
+ .ops = &ttm_swap_ops,
+ .ctx = ctx,
+ .trylock_only = true,
+ },
+ .gfp_flags = gfp_flags,
+ };
+
+ return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target);
}
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 0b3f4267130c..3c07f4712d5c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -768,3 +768,154 @@ error_destroy_tt:
ttm_tt_destroy(bo->bdev, ttm);
return ret;
}
+
+static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk,
+ struct ttm_buffer_object *bo,
+ bool *needs_unlock)
+{
+ struct ttm_operation_ctx *ctx = walk->ctx;
+
+ *needs_unlock = false;
+
+ if (dma_resv_trylock(bo->base.resv)) {
+ *needs_unlock = true;
+ return true;
+ }
+
+ if (bo->base.resv == ctx->resv && ctx->allow_res_evict) {
+ dma_resv_assert_held(bo->base.resv);
+ return true;
+ }
+
+ return false;
+}
+
+static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
+ struct ttm_buffer_object *bo,
+ bool *needs_unlock)
+{
+ struct dma_resv *resv = bo->base.resv;
+ int ret;
+
+ if (walk->ctx->interruptible)
+ ret = dma_resv_lock_interruptible(resv, walk->ticket);
+ else
+ ret = dma_resv_lock(resv, walk->ticket);
+
+ if (!ret) {
+ *needs_unlock = true;
+ /*
+ * Only a single ticketlock per loop. Ticketlocks are prone
+ * to return -EDEADLK causing the eviction to fail, so
+ * after waiting for the ticketlock, revert back to
+ * trylocking for this walk.
+ */
+ walk->ticket = NULL;
+ } else if (ret == -EDEADLK) {
+ /* Caller needs to exit the ww transaction. */
+ ret = -ENOSPC;
+ }
+
+ return ret;
+}
+
+static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked)
+{
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+}
+
+/**
+ * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
+ * valid items.
+ * @walk: describe the walks and actions taken
+ * @bdev: The TTM device.
+ * @man: The struct ttm_resource manager whose LRU lists we're walking.
+ * @target: The end condition for the walk.
+ *
+ * The LRU lists of @man are walk, and for each struct ttm_resource encountered,
+ * the corresponding ttm_buffer_object is locked and taken a reference on, and
+ * the LRU lock is dropped. the LRU lock may be dropped before locking and, in
+ * that case, it's verified that the item actually remains on the LRU list after
+ * the lock, and that the buffer object didn't switch resource in between.
+ *
+ * With a locked object, the actions indicated by @walk->process_bo are
+ * performed, and after that, the bo is unlocked, the refcount dropped and the
+ * next struct ttm_resource is processed. Here, the walker relies on
+ * TTM's restartable LRU list implementation.
+ *
+ * Typically @walk->process_bo() would return the number of pages evicted,
+ * swapped or shrunken, so that when the total exceeds @target, or when the
+ * LRU list has been walked in full, iteration is terminated. It's also terminated
+ * on error. Note that the definition of @target is done by the caller, it
+ * could have a different meaning than the number of pages.
+ *
+ * Note that the way dma_resv individualization is done, locking needs to be done
+ * either with the LRU lock held (trylocking only) or with a reference on the
+ * object.
+ *
+ * Return: The progress made towards target or negative error code on error.
+ */
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target)
+{
+ struct ttm_resource_cursor cursor;
+ struct ttm_resource *res;
+ s64 progress = 0;
+ s64 lret;
+
+ spin_lock(&bdev->lru_lock);
+ ttm_resource_manager_for_each_res(man, &cursor, res) {
+ struct ttm_buffer_object *bo = res->bo;
+ bool bo_needs_unlock = false;
+ bool bo_locked = false;
+ int mem_type;
+
+ /*
+ * Attempt a trylock before taking a reference on the bo,
+ * since if we do it the other way around, and the trylock fails,
+ * we need to drop the lru lock to put the bo.
+ */
+ if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock))
+ bo_locked = true;
+ else if (!walk->ticket || walk->ctx->no_wait_gpu ||
+ walk->trylock_only)
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ ttm_lru_walk_unlock(bo, bo_needs_unlock);
+ continue;
+ }
+
+ mem_type = res->mem_type;
+ spin_unlock(&bdev->lru_lock);
+
+ lret = 0;
+ if (!bo_locked)
+ lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock);
+
+ /*
+ * Note that in between the release of the lru lock and the
+ * ticketlock, the bo may have switched resource,
+ * and also memory type, since the resource may have been
+ * freed and allocated again with a different memory type.
+ * In that case, just skip it.
+ */
+ if (!lret && bo->resource && bo->resource->mem_type == mem_type)
+ lret = walk->ops->process_bo(walk, bo);
+
+ ttm_lru_walk_unlock(bo, bo_needs_unlock);
+ ttm_bo_put(bo);
+ if (lret == -EBUSY || lret == -EALREADY)
+ lret = 0;
+ progress = (lret < 0) ? lret : progress + lret;
+
+ spin_lock(&bdev->lru_lock);
+ if (progress < 0 || progress >= target)
+ break;
+ }
+ ttm_resource_cursor_fini(&cursor);
+ spin_unlock(&bdev->lru_lock);
+
+ return progress;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 434cf0258000..e7cc4954c1bc 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -148,35 +148,20 @@ int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
- struct ttm_resource_cursor cursor;
struct ttm_resource_manager *man;
- struct ttm_resource *res;
unsigned i;
- int ret;
+ s64 lret;
- spin_lock(&bdev->lru_lock);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
continue;
- ttm_resource_manager_for_each_res(man, &cursor, res) {
- struct ttm_buffer_object *bo = res->bo;
- uint32_t num_pages;
-
- if (!bo || bo->resource != res)
- continue;
-
- num_pages = PFN_UP(bo->base.size);
- ret = ttm_bo_swapout(bo, ctx, gfp_flags);
- /* ttm_bo_swapout has dropped the lru_lock */
- if (!ret)
- return num_pages;
- if (ret != -EBUSY)
- return ret;
- }
+ lret = ttm_bo_swapout(bdev, ctx, man, gfp_flags, 1);
+ /* Can be both positive (num_pages) and negative (error) */
+ if (lret)
+ return lret;
}
- spin_unlock(&bdev->lru_lock);
return 0;
}
EXPORT_SYMBOL(ttm_device_swapout);
@@ -274,14 +259,14 @@ static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
struct ttm_resource *res;
spin_lock(&bdev->lru_lock);
- while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
+ while ((res = ttm_lru_first_res_or_null(list))) {
struct ttm_buffer_object *bo = res->bo;
/* Take ref against racing releases once lru_lock is unlocked */
if (!ttm_bo_get_unless_zero(bo))
continue;
- list_del_init(&res->lru);
+ list_del_init(&bo->resource->lru.link);
spin_unlock(&bdev->lru_lock);
if (bo->ttm)
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 6e1fd6985ffc..8504dbe19c1a 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -91,7 +91,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
*/
if (order)
gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
- __GFP_KSWAPD_RECLAIM;
+ __GFP_THISNODE;
if (!pool->use_dma_alloc) {
p = alloc_pages_node(pool->nid, gfp_flags, order);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 4a66b851b67d..6d764ba88aab 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -33,6 +33,68 @@
#include <drm/drm_util.h>
+/* Detach the cursor from the bulk move list*/
+static void
+ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ cursor->bulk = NULL;
+ list_del_init(&cursor->bulk_link);
+}
+
+/* Move the cursor to the end of the bulk move list it's in */
+static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource_cursor *cursor)
+{
+ struct ttm_lru_bulk_move_pos *pos;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ if (WARN_ON_ONCE(bulk != cursor->bulk)) {
+ list_del_init(&cursor->bulk_link);
+ return;
+ }
+
+ pos = &bulk->pos[cursor->mem_type][cursor->priority];
+ if (pos->last)
+ list_move(&cursor->hitch.link, &pos->last->lru.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/* Move all cursors attached to a bulk move to its end */
+static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_move_bulk_tail(bulk, cursor);
+}
+
+/* Remove a cursor from an empty bulk move list */
+static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/**
+ * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage
+ * @cursor: The struct ttm_resource_cursor to finalize.
+ *
+ * The function pulls the LRU list cursor off any lists it was previusly
+ * attached to. Needs to be called with the LRU lock held. The function
+ * can be called multiple times after eachother.
+ */
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ list_del_init(&cursor->hitch.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
/**
* ttm_lru_bulk_move_init - initialize a bulk move structure
* @bulk: the structure to init
@@ -42,10 +104,28 @@
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
{
memset(bulk, 0, sizeof(*bulk));
+ INIT_LIST_HEAD(&bulk->cursor_list);
}
EXPORT_SYMBOL(ttm_lru_bulk_move_init);
/**
+ * ttm_lru_bulk_move_fini - finalize a bulk move structure
+ * @bdev: The struct ttm_device
+ * @bulk: the structure to finalize
+ *
+ * Sanity checks that bulk moves don't have any
+ * resources left and hence no cursors attached.
+ */
+void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
+ struct ttm_lru_bulk_move *bulk)
+{
+ spin_lock(&bdev->lru_lock);
+ ttm_bulk_move_drop_cursors(bulk);
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_fini);
+
+/**
* ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
*
* @bulk: bulk move structure
@@ -57,6 +137,7 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
{
unsigned i, j;
+ ttm_bulk_move_adjust_cursors(bulk);
for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
@@ -70,8 +151,8 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->last->bo->base.resv);
man = ttm_manager_type(pos->first->bo->bdev, i);
- list_bulk_move_tail(&man->lru[j], &pos->first->lru,
- &pos->last->lru);
+ list_bulk_move_tail(&man->lru[j], &pos->first->lru.link,
+ &pos->last->lru.link);
}
}
}
@@ -84,14 +165,38 @@ ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
return &bulk->pos[res->mem_type][res->bo->priority];
}
+/* Return the previous resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_prev_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
+/* Return the next resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_next_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
/* Move the resource to the tail of the bulk move range */
static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
struct ttm_resource *res)
{
if (pos->last != res) {
if (pos->first == res)
- pos->first = list_next_entry(res, lru);
- list_move(&res->lru, &pos->last->lru);
+ pos->first = ttm_lru_next_res(res);
+ list_move(&res->lru.link, &pos->last->lru.link);
pos->last = res;
}
}
@@ -122,11 +227,11 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
pos->first = NULL;
pos->last = NULL;
} else if (pos->first == res) {
- pos->first = list_next_entry(res, lru);
+ pos->first = ttm_lru_next_res(res);
} else if (pos->last == res) {
- pos->last = list_prev_entry(res, lru);
+ pos->last = ttm_lru_prev_res(res);
} else {
- list_move(&res->lru, &pos->last->lru);
+ list_move(&res->lru.link, &pos->last->lru.link);
}
}
@@ -155,7 +260,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
lockdep_assert_held(&bo->bdev->lru_lock);
if (bo->pin_count) {
- list_move_tail(&res->lru, &bdev->pinned);
+ list_move_tail(&res->lru.link, &bdev->pinned);
} else if (bo->bulk_move) {
struct ttm_lru_bulk_move_pos *pos =
@@ -166,7 +271,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
struct ttm_resource_manager *man;
man = ttm_manager_type(bdev, res->mem_type);
- list_move_tail(&res->lru, &man->lru[bo->priority]);
+ list_move_tail(&res->lru.link, &man->lru[bo->priority]);
}
}
@@ -197,9 +302,9 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
if (bo->pin_count)
- list_add_tail(&res->lru, &bo->bdev->pinned);
+ list_add_tail(&res->lru.link, &bo->bdev->pinned);
else
- list_add_tail(&res->lru, &man->lru[bo->priority]);
+ list_add_tail(&res->lru.link, &man->lru[bo->priority]);
man->usage += res->size;
spin_unlock(&bo->bdev->lru_lock);
}
@@ -221,7 +326,7 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
struct ttm_device *bdev = man->bdev;
spin_lock(&bdev->lru_lock);
- list_del_init(&res->lru);
+ list_del_init(&res->lru.link);
man->usage -= res->size;
spin_unlock(&bdev->lru_lock);
}
@@ -390,24 +495,11 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
};
struct dma_fence *fence;
int ret;
- unsigned i;
- /*
- * Can't use standard list traversal since we're unlocking.
- */
-
- spin_lock(&bdev->lru_lock);
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- while (!list_empty(&man->lru[i])) {
- spin_unlock(&bdev->lru_lock);
- ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
- NULL);
- if (ret)
- return ret;
- spin_lock(&bdev->lru_lock);
- }
- }
- spin_unlock(&bdev->lru_lock);
+ do {
+ ret = ttm_bo_evict_first(bdev, man, &ctx);
+ cond_resched();
+ } while (!ret);
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
@@ -460,53 +552,106 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
+static void
+ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
+ struct ttm_lru_item *next_lru)
+{
+ struct ttm_resource *next = ttm_lru_item_to_res(next_lru);
+ struct ttm_lru_bulk_move *bulk = NULL;
+ struct ttm_buffer_object *bo = next->bo;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ bulk = bo->bulk_move;
+
+ if (cursor->bulk != bulk) {
+ if (bulk) {
+ list_move_tail(&cursor->bulk_link, &bulk->cursor_list);
+ cursor->mem_type = next->mem_type;
+ } else {
+ list_del_init(&cursor->bulk_link);
+ }
+ cursor->bulk = bulk;
+ }
+}
+
/**
- * ttm_resource_manager_first
- *
+ * ttm_resource_manager_first() - Start iterating over the resources
+ * of a resource manager
* @man: resource manager to iterate over
* @cursor: cursor to record the position
*
- * Returns the first resource from the resource manager.
+ * Initializes the cursor and starts iterating. When done iterating,
+ * the caller must explicitly call ttm_resource_cursor_fini().
+ *
+ * Return: The first resource from the resource manager.
*/
struct ttm_resource *
ttm_resource_manager_first(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor)
{
- struct ttm_resource *res;
-
lockdep_assert_held(&man->bdev->lru_lock);
- for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
- ++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ cursor->priority = 0;
+ cursor->man = man;
+ ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
+ INIT_LIST_HEAD(&cursor->bulk_link);
+ list_add(&cursor->hitch.link, &man->lru[cursor->priority]);
- return NULL;
+ return ttm_resource_manager_next(cursor);
}
/**
- * ttm_resource_manager_next
- *
- * @man: resource manager to iterate over
+ * ttm_resource_manager_next() - Continue iterating over the resource manager
+ * resources
* @cursor: cursor to record the position
- * @res: the current resource pointer
*
- * Returns the next resource from the resource manager.
+ * Return: the next resource from the resource manager.
*/
struct ttm_resource *
-ttm_resource_manager_next(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor,
- struct ttm_resource *res)
+ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
{
+ struct ttm_resource_manager *man = cursor->man;
+ struct ttm_lru_item *lru;
+
lockdep_assert_held(&man->bdev->lru_lock);
- list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
- return res;
+ for (;;) {
+ lru = &cursor->hitch;
+ list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) {
+ if (ttm_lru_item_is_res(lru)) {
+ ttm_resource_cursor_check_bulk(cursor, lru);
+ list_move(&cursor->hitch.link, &lru->link);
+ return ttm_lru_item_to_res(lru);
+ }
+ }
+
+ if (++cursor->priority >= TTM_MAX_BO_PRIORITY)
+ break;
+
+ list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
+ ttm_resource_cursor_clear_bulk(cursor);
+ }
+
+ ttm_resource_cursor_fini(cursor);
- for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY;
- ++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ return NULL;
+}
+
+/**
+ * ttm_lru_first_res_or_null() - Return the first resource on an lru list
+ * @head: The list head of the lru list.
+ *
+ * Return: Pointer to the first resource on the lru list or NULL if
+ * there is none.
+ */
+struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head)
+{
+ struct ttm_lru_item *lru;
+
+ list_for_each_entry(lru, head, link) {
+ if (ttm_lru_item_is_res(lru))
+ return ttm_lru_item_to_res(lru);
+ }
return NULL;
}
diff --git a/drivers/gpu/drm/udl/udl_edid.c b/drivers/gpu/drm/udl/udl_edid.c
index d67e6bf1f2ae..12f48ae17073 100644
--- a/drivers/gpu/drm/udl/udl_edid.c
+++ b/drivers/gpu/drm/udl/udl_edid.c
@@ -69,7 +69,7 @@ bool udl_probe_edid(struct udl_device *udl)
* The adapter sends all-zeros if no monitor has been
* connected. We consider anything else a connection.
*/
- return !!memchr_inv(hdr, 0, sizeof(hdr));
+ return !mem_is_zero(hdr, sizeof(hdr));
}
const struct drm_edid *udl_edid_read(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a165cbcdd27b..ebe52bef4ffb 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -26,6 +26,17 @@
#include "v3d_drv.h"
#include "uapi/drm/v3d_drm.h"
+static enum drm_gem_object_status v3d_gem_status(struct drm_gem_object *obj)
+{
+ struct v3d_bo *bo = to_v3d_bo(obj);
+ enum drm_gem_object_status res = 0;
+
+ if (bo->base.pages)
+ res |= DRM_GEM_OBJECT_RESIDENT;
+
+ return res;
+}
+
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
@@ -63,6 +74,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
+ .status = v3d_gem_status,
.vm_ops = &drm_gem_shmem_vm_ops,
};
@@ -279,7 +291,7 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
else
args->timeout_ns = 0;
- /* Asked to wait beyond the jiffie/scheduler precision? */
+ /* Asked to wait beyond the jiffy/scheduler precision? */
if (ret == -ETIME && args->timeout_ns)
ret = -EAGAIN;
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 5982941d933b..d7ff1f5fa481 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -95,7 +95,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
args->value = 1;
return 0;
case DRM_V3D_PARAM_MAX_PERF_COUNTERS:
- args->value = v3d->max_counters;
+ args->value = v3d->perfmon_info.max_counters;
return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
@@ -184,6 +184,8 @@ static void v3d_show_fdinfo(struct drm_printer *p, struct drm_file *file)
drm_printf(p, "v3d-jobs-%s: \t%llu jobs\n",
v3d_queue_to_string(queue), jobs_completed);
}
+
+ drm_show_memory_stats(p, file);
}
static const struct file_operations v3d_drm_fops = {
@@ -301,12 +303,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ident3 = V3D_READ(V3D_HUB_IDENT3);
v3d->rev = V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV);
- if (v3d->ver >= 71)
- v3d->max_counters = V3D_V71_NUM_PERFCOUNTERS;
- else if (v3d->ver >= 42)
- v3d->max_counters = V3D_V42_NUM_PERFCOUNTERS;
- else
- v3d->max_counters = 0;
+ v3d_perfmon_init(v3d);
v3d->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(v3d->reset)) {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index a0febdb6f214..cf4b23369dc4 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -106,10 +106,7 @@ struct v3d_dev {
bool single_irq_line;
- /* Different revisions of V3D have different total number of performance
- * counters
- */
- unsigned int max_counters;
+ struct v3d_perfmon_info perfmon_info;
void __iomem *hub_regs;
void __iomem *core_regs[3];
@@ -353,13 +350,9 @@ struct v3d_timestamp_query {
struct drm_syncobj *syncobj;
};
-/* Number of perfmons required to handle all supported performance counters */
-#define V3D_MAX_PERFMONS DIV_ROUND_UP(V3D_MAX_COUNTERS, \
- DRM_V3D_MAX_PERF_COUNTERS)
-
struct v3d_performance_query {
/* Performance monitor IDs for this query */
- u32 kperfmon_ids[V3D_MAX_PERFMONS];
+ u32 *kperfmon_ids;
/* Syncobj that indicates the query availability */
struct drm_syncobj *syncobj;
@@ -574,6 +567,7 @@ int v3d_sched_init(struct v3d_dev *v3d);
void v3d_sched_fini(struct v3d_dev *v3d);
/* v3d_perfmon.c */
+void v3d_perfmon_init(struct v3d_dev *v3d);
void v3d_perfmon_get(struct v3d_perfmon *perfmon);
void v3d_perfmon_put(struct v3d_perfmon *perfmon);
void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon);
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index b7d0b02e1a95..cd7f1eedf17f 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -195,6 +195,23 @@ static const struct v3d_perf_counter_desc v3d_v71_performance_counters[] = {
{"QPU", "QPU-stalls-other", "[QPU] Stalled qcycles waiting for any other reason (vary/W/Z)"},
};
+void v3d_perfmon_init(struct v3d_dev *v3d)
+{
+ const struct v3d_perf_counter_desc *counters = NULL;
+ unsigned int max = 0;
+
+ if (v3d->ver >= 71) {
+ counters = v3d_v71_performance_counters;
+ max = ARRAY_SIZE(v3d_v71_performance_counters);
+ } else if (v3d->ver >= 42) {
+ counters = v3d_v42_performance_counters;
+ max = ARRAY_SIZE(v3d_v42_performance_counters);
+ }
+
+ v3d->perfmon_info.max_counters = max;
+ v3d->perfmon_info.counters = counters;
+}
+
void v3d_perfmon_get(struct v3d_perfmon *perfmon)
{
if (perfmon)
@@ -321,7 +338,7 @@ int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data,
/* Make sure all counters are valid. */
for (i = 0; i < req->ncounters; i++) {
- if (req->counters[i] >= v3d->max_counters)
+ if (req->counters[i] >= v3d->perfmon_info.max_counters)
return -EINVAL;
}
@@ -416,25 +433,14 @@ int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (!v3d->perfmon_info.max_counters)
+ return -EOPNOTSUPP;
+
/* Make sure that the counter ID is valid */
- if (req->counter >= v3d->max_counters)
+ if (req->counter >= v3d->perfmon_info.max_counters)
return -EINVAL;
- BUILD_BUG_ON(ARRAY_SIZE(v3d_v42_performance_counters) !=
- V3D_V42_NUM_PERFCOUNTERS);
- BUILD_BUG_ON(ARRAY_SIZE(v3d_v71_performance_counters) !=
- V3D_V71_NUM_PERFCOUNTERS);
- BUILD_BUG_ON(V3D_MAX_COUNTERS < V3D_V42_NUM_PERFCOUNTERS);
- BUILD_BUG_ON(V3D_MAX_COUNTERS < V3D_V71_NUM_PERFCOUNTERS);
- BUILD_BUG_ON((V3D_MAX_COUNTERS != V3D_V42_NUM_PERFCOUNTERS) &&
- (V3D_MAX_COUNTERS != V3D_V71_NUM_PERFCOUNTERS));
-
- if (v3d->ver >= 71)
- counter = &v3d_v71_performance_counters[req->counter];
- else if (v3d->ver >= 42)
- counter = &v3d_v42_performance_counters[req->counter];
- else
- return -EOPNOTSUPP;
+ counter = &v3d->perfmon_info.counters[req->counter];
strscpy(req->name, counter->name, sizeof(req->name));
strscpy(req->category, counter->category, sizeof(req->category));
diff --git a/drivers/gpu/drm/v3d/v3d_performance_counters.h b/drivers/gpu/drm/v3d/v3d_performance_counters.h
index 131b2909522a..d919a2fc9449 100644
--- a/drivers/gpu/drm/v3d/v3d_performance_counters.h
+++ b/drivers/gpu/drm/v3d/v3d_performance_counters.h
@@ -19,11 +19,17 @@ struct v3d_perf_counter_desc {
char description[256];
};
+struct v3d_perfmon_info {
+ /*
+ * Different revisions of V3D have different total number of
+ * performance counters.
+ */
+ unsigned int max_counters;
-#define V3D_V42_NUM_PERFCOUNTERS (87)
-#define V3D_V71_NUM_PERFCOUNTERS (93)
-
-/* Maximum number of performance counters supported by any version of V3D */
-#define V3D_MAX_COUNTERS (93)
+ /*
+ * Array of counters valid for the platform.
+ */
+ const struct v3d_perf_counter_desc *counters;
+};
#endif
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index ad1e6236ff6f..08d2a2739582 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -94,8 +94,10 @@ v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
if (query_info->queries) {
unsigned int i;
- for (i = 0; i < count; i++)
+ for (i = 0; i < count; i++) {
drm_syncobj_put(query_info->queries[i].syncobj);
+ kvfree(query_info->queries[i].kperfmon_ids);
+ }
kvfree(query_info->queries);
}
@@ -365,8 +367,7 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
- struct v3d_dev *v3d = job->base.v3d;
- u32 num_batches, *wg_counts;
+ u32 *wg_counts;
v3d_get_bo_vaddr(bo);
v3d_get_bo_vaddr(indirect);
@@ -379,17 +380,8 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
-
- num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
- (wg_counts[0] * wg_counts[1] * wg_counts[2]);
-
- /* V3D 7.1.6 and later don't subtract 1 from the number of batches */
- if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6))
- args->cfg[4] = num_batches - 1;
- else
- args->cfg[4] = num_batches;
-
- WARN_ON(args->cfg[4] == ~0);
+ args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
+ (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;
for (int i = 0; i < 3; i++) {
/* 0xffffffff indicates that the uniform rewrite is not needed */
@@ -443,18 +435,23 @@ v3d_reset_timestamp_queries(struct v3d_cpu_job *job)
v3d_put_bo_vaddr(bo);
}
-static void
-write_to_buffer(void *dst, u32 idx, bool do_64bit, u64 value)
+static void write_to_buffer_32(u32 *dst, unsigned int idx, u32 value)
{
- if (do_64bit) {
- u64 *dst64 = (u64 *)dst;
+ dst[idx] = value;
+}
- dst64[idx] = value;
- } else {
- u32 *dst32 = (u32 *)dst;
+static void write_to_buffer_64(u64 *dst, unsigned int idx, u64 value)
+{
+ dst[idx] = value;
+}
- dst32[idx] = (u32)value;
- }
+static void
+write_to_buffer(void *dst, unsigned int idx, bool do_64bit, u64 value)
+{
+ if (do_64bit)
+ write_to_buffer_64(dst, idx, value);
+ else
+ write_to_buffer_32(dst, idx, value);
}
static void
@@ -527,18 +524,24 @@ v3d_reset_performance_queries(struct v3d_cpu_job *job)
}
static void
-v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query)
+v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data,
+ unsigned int query)
{
- struct v3d_performance_query_info *performance_query = &job->performance_query;
- struct v3d_copy_query_results_info *copy = &job->copy;
+ struct v3d_performance_query_info *performance_query =
+ &job->performance_query;
struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_performance_query *perf_query =
+ &performance_query->queries[query];
struct v3d_dev *v3d = job->base.v3d;
- struct v3d_perfmon *perfmon;
- u64 counter_values[V3D_MAX_COUNTERS];
+ unsigned int i, j, offset;
+
+ for (i = 0, offset = 0;
+ i < performance_query->nperfmons;
+ i++, offset += DRM_V3D_MAX_PERF_COUNTERS) {
+ struct v3d_perfmon *perfmon;
- for (int i = 0; i < performance_query->nperfmons; i++) {
perfmon = v3d_perfmon_find(v3d_priv,
- performance_query->queries[query].kperfmon_ids[i]);
+ perf_query->kperfmon_ids[i]);
if (!perfmon) {
DRM_DEBUG("Failed to find perfmon.");
continue;
@@ -546,14 +549,18 @@ v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 quer
v3d_perfmon_stop(v3d, perfmon, true);
- memcpy(&counter_values[i * DRM_V3D_MAX_PERF_COUNTERS], perfmon->values,
- perfmon->ncounters * sizeof(u64));
+ if (job->copy.do_64bit) {
+ for (j = 0; j < perfmon->ncounters; j++)
+ write_to_buffer_64(data, offset + j,
+ perfmon->values[j]);
+ } else {
+ for (j = 0; j < perfmon->ncounters; j++)
+ write_to_buffer_32(data, offset + j,
+ perfmon->values[j]);
+ }
v3d_perfmon_put(perfmon);
}
-
- for (int i = 0; i < performance_query->ncounters; i++)
- write_to_buffer(data, i, copy->do_64bit, counter_values[i]);
}
static void
@@ -660,7 +667,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
/* Unblock schedulers and restart their jobs. */
for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_start(&v3d->queue[q].sched, true);
+ drm_sched_start(&v3d->queue[q].sched);
}
mutex_unlock(&v3d->reset_lock);
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index 4cdfabbf4964..d607aa9c4ec2 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -452,6 +452,7 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
{
u32 __user *offsets, *syncs;
struct drm_v3d_timestamp_query timestamp;
+ struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
unsigned int i;
int err;
@@ -473,10 +474,10 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY;
- job->timestamp_query.queries = kvmalloc_array(timestamp.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!job->timestamp_query.queries)
+ query_info->queries = kvmalloc_array(timestamp.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
return -ENOMEM;
offsets = u64_to_user_ptr(timestamp.offsets);
@@ -485,25 +486,26 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
for (i = 0; i < timestamp.count; i++) {
u32 offset, sync;
- if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+ if (get_user(offset, offsets++)) {
err = -EFAULT;
goto error;
}
- job->timestamp_query.queries[i].offset = offset;
+ query_info->queries[i].offset = offset;
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ if (get_user(sync, syncs++)) {
err = -EFAULT;
goto error;
}
- job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->timestamp_query.queries[i].syncobj) {
+ query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
+ sync);
+ if (!query_info->queries[i].syncobj) {
err = -ENOENT;
goto error;
}
}
- job->timestamp_query.count = timestamp.count;
+ query_info->count = timestamp.count;
return 0;
@@ -519,6 +521,7 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
{
u32 __user *syncs;
struct drm_v3d_reset_timestamp_query reset;
+ struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
unsigned int i;
int err;
@@ -537,10 +540,10 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY;
- job->timestamp_query.queries = kvmalloc_array(reset.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!job->timestamp_query.queries)
+ query_info->queries = kvmalloc_array(reset.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
return -ENOMEM;
syncs = u64_to_user_ptr(reset.syncs);
@@ -548,20 +551,21 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
for (i = 0; i < reset.count; i++) {
u32 sync;
- job->timestamp_query.queries[i].offset = reset.offset + 8 * i;
+ query_info->queries[i].offset = reset.offset + 8 * i;
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ if (get_user(sync, syncs++)) {
err = -EFAULT;
goto error;
}
- job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->timestamp_query.queries[i].syncobj) {
+ query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
+ sync);
+ if (!query_info->queries[i].syncobj) {
err = -ENOENT;
goto error;
}
}
- job->timestamp_query.count = reset.count;
+ query_info->count = reset.count;
return 0;
@@ -578,6 +582,7 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
{
u32 __user *offsets, *syncs;
struct drm_v3d_copy_timestamp_query copy;
+ struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
unsigned int i;
int err;
@@ -599,10 +604,10 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY;
- job->timestamp_query.queries = kvmalloc_array(copy.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!job->timestamp_query.queries)
+ query_info->queries = kvmalloc_array(copy.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
return -ENOMEM;
offsets = u64_to_user_ptr(copy.offsets);
@@ -611,25 +616,26 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
for (i = 0; i < copy.count; i++) {
u32 offset, sync;
- if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+ if (get_user(offset, offsets++)) {
err = -EFAULT;
goto error;
}
- job->timestamp_query.queries[i].offset = offset;
+ query_info->queries[i].offset = offset;
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ if (get_user(sync, syncs++)) {
err = -EFAULT;
goto error;
}
- job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->timestamp_query.queries[i].syncobj) {
+ query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
+ sync);
+ if (!query_info->queries[i].syncobj) {
err = -ENOENT;
goto error;
}
}
- job->timestamp_query.count = copy.count;
+ query_info->count = copy.count;
job->copy.do_64bit = copy.do_64bit;
job->copy.do_partial = copy.do_partial;
@@ -645,95 +651,121 @@ error:
}
static int
-v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
+v3d_copy_query_info(struct v3d_performance_query_info *query_info,
+ unsigned int count,
+ unsigned int nperfmons,
+ u32 __user *syncs,
+ u64 __user *kperfmon_ids,
+ struct drm_file *file_priv)
{
- u32 __user *syncs;
- u64 __user *kperfmon_ids;
- struct drm_v3d_reset_performance_query reset;
unsigned int i, j;
int err;
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&reset, ext, sizeof(reset)))
- return -EFAULT;
-
- if (reset.nperfmons > V3D_MAX_PERFMONS)
- return -EINVAL;
-
- job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
-
- job->performance_query.queries = kvmalloc_array(reset.count,
- sizeof(struct v3d_performance_query),
- GFP_KERNEL);
- if (!job->performance_query.queries)
- return -ENOMEM;
-
- syncs = u64_to_user_ptr(reset.syncs);
- kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids);
-
- for (i = 0; i < reset.count; i++) {
- u32 sync;
- u64 ids;
+ for (i = 0; i < count; i++) {
+ struct v3d_performance_query *query = &query_info->queries[i];
u32 __user *ids_pointer;
- u32 id;
+ u32 sync, id;
+ u64 ids;
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ if (get_user(sync, syncs++)) {
err = -EFAULT;
goto error;
}
- if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
+ if (get_user(ids, kperfmon_ids++)) {
err = -EFAULT;
goto error;
}
+ query->kperfmon_ids =
+ kvmalloc_array(nperfmons,
+ sizeof(struct v3d_performance_query *),
+ GFP_KERNEL);
+ if (!query->kperfmon_ids) {
+ err = -ENOMEM;
+ goto error;
+ }
+
ids_pointer = u64_to_user_ptr(ids);
- for (j = 0; j < reset.nperfmons; j++) {
- if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
+ for (j = 0; j < nperfmons; j++) {
+ if (get_user(id, ids_pointer++)) {
+ kvfree(query->kperfmon_ids);
err = -EFAULT;
goto error;
}
- job->performance_query.queries[i].kperfmon_ids[j] = id;
+ query->kperfmon_ids[j] = id;
}
- job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->performance_query.queries[i].syncobj) {
+ query->syncobj = drm_syncobj_find(file_priv, sync);
+ if (!query->syncobj) {
+ kvfree(query->kperfmon_ids);
err = -ENOENT;
goto error;
}
}
- job->performance_query.count = reset.count;
- job->performance_query.nperfmons = reset.nperfmons;
return 0;
error:
- v3d_performance_query_info_free(&job->performance_query, i);
+ v3d_performance_query_info_free(query_info, i);
return err;
}
static int
+v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ struct v3d_performance_query_info *query_info = &job->performance_query;
+ struct drm_v3d_reset_performance_query reset;
+ int err;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&reset, ext, sizeof(reset)))
+ return -EFAULT;
+
+ job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
+
+ query_info->queries =
+ kvmalloc_array(reset.count,
+ sizeof(struct v3d_performance_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
+ return -ENOMEM;
+
+ err = v3d_copy_query_info(query_info,
+ reset.count,
+ reset.nperfmons,
+ u64_to_user_ptr(reset.syncs),
+ u64_to_user_ptr(reset.kperfmon_ids),
+ file_priv);
+ if (err)
+ return err;
+
+ query_info->count = reset.count;
+ query_info->nperfmons = reset.nperfmons;
+
+ return 0;
+}
+
+static int
v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
struct drm_v3d_extension __user *ext,
struct v3d_cpu_job *job)
{
- u32 __user *syncs;
- u64 __user *kperfmon_ids;
+ struct v3d_performance_query_info *query_info = &job->performance_query;
struct drm_v3d_copy_performance_query copy;
- unsigned int i, j;
int err;
if (!job) {
@@ -752,56 +784,27 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
if (copy.pad)
return -EINVAL;
- if (copy.nperfmons > V3D_MAX_PERFMONS)
- return -EINVAL;
-
job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
- job->performance_query.queries = kvmalloc_array(copy.count,
- sizeof(struct v3d_performance_query),
- GFP_KERNEL);
- if (!job->performance_query.queries)
+ query_info->queries =
+ kvmalloc_array(copy.count,
+ sizeof(struct v3d_performance_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
return -ENOMEM;
- syncs = u64_to_user_ptr(copy.syncs);
- kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids);
+ err = v3d_copy_query_info(query_info,
+ copy.count,
+ copy.nperfmons,
+ u64_to_user_ptr(copy.syncs),
+ u64_to_user_ptr(copy.kperfmon_ids),
+ file_priv);
+ if (err)
+ return err;
- for (i = 0; i < copy.count; i++) {
- u32 sync;
- u64 ids;
- u32 __user *ids_pointer;
- u32 id;
-
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- err = -EFAULT;
- goto error;
- }
-
- if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
- err = -EFAULT;
- goto error;
- }
-
- ids_pointer = u64_to_user_ptr(ids);
-
- for (j = 0; j < copy.nperfmons; j++) {
- if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
- err = -EFAULT;
- goto error;
- }
-
- job->performance_query.queries[i].kperfmon_ids[j] = id;
- }
-
- job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->performance_query.queries[i].syncobj) {
- err = -ENOENT;
- goto error;
- }
- }
- job->performance_query.count = copy.count;
- job->performance_query.nperfmons = copy.nperfmons;
- job->performance_query.ncounters = copy.ncounters;
+ query_info->count = copy.count;
+ query_info->nperfmons = copy.nperfmons;
+ query_info->ncounters = copy.ncounters;
job->copy.do_64bit = copy.do_64bit;
job->copy.do_partial = copy.do_partial;
@@ -810,10 +813,6 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
job->copy.stride = copy.stride;
return 0;
-
-error:
- v3d_performance_query_info_free(&job->performance_query, i);
- return err;
}
/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index d4ade9325401..7f686a0190e6 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -114,6 +114,10 @@ int vbox_hw_init(struct vbox_private *vbox)
DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
+ ret = pcim_request_region(pdev, 0, "vboxvideo");
+ if (ret)
+ return ret;
+
/* Map guest-heap at end of vram */
vbox->guest_heap = pcim_iomap_range(pdev, 0,
GUEST_HEAP_OFFSET(vbox), GUEST_HEAP_SIZE);
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 86d629e45307..3f72be7490d5 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -469,7 +469,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
if (IS_ERR(dma_obj)) {
struct drm_printer p = drm_info_printer(vc4->base.dev);
- DRM_ERROR("Failed to allocate from GEM DMA helper:\n");
+ drm_err(dev, "Failed to allocate from GEM DMA helper:\n");
vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
}
@@ -702,7 +702,7 @@ static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags)
*/
ret = vc4_bo_inc_usecnt(bo);
if (ret) {
- DRM_ERROR("Failed to increment BO usecnt\n");
+ drm_err(obj->dev, "Failed to increment BO usecnt\n");
return ERR_PTR(ret);
}
@@ -1050,10 +1050,10 @@ static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
for (i = 0; i < vc4->num_labels; i++) {
if (vc4->bo_labels[i].num_allocated) {
- DRM_ERROR("Destroying BO cache with %d %s "
- "BOs still allocated\n",
- vc4->bo_labels[i].num_allocated,
- vc4->bo_labels[i].name);
+ drm_err(dev, "Destroying BO cache with %d %s "
+ "BOs still allocated\n",
+ vc4->bo_labels[i].num_allocated,
+ vc4->bo_labels[i].name);
}
if (is_user_label(i))
@@ -1083,7 +1083,7 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
- DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ drm_err(dev, "Failed to look up GEM BO %d\n", args->handle);
kfree(name);
return -ENOENT;
}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 39152e755a13..a382dc4654bd 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -199,8 +199,8 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
DPI_FORMAT);
break;
default:
- DRM_ERROR("Unknown media bus format %d\n",
- bus_format);
+ drm_err(dev, "Unknown media bus format %d\n",
+ bus_format);
break;
}
}
@@ -236,11 +236,11 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
ret = clk_set_rate(dpi->pixel_clock, mode->clock * 1000);
if (ret)
- DRM_ERROR("Failed to set clock rate: %d\n", ret);
+ drm_err(dev, "Failed to set clock rate: %d\n", ret);
ret = clk_prepare_enable(dpi->pixel_clock);
if (ret)
- DRM_ERROR("Failed to set clock rate: %d\n", ret);
+ drm_err(dev, "Failed to set clock rate: %d\n", ret);
drm_dev_exit(idx);
}
@@ -339,7 +339,7 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(dpi->core_clock)) {
ret = PTR_ERR(dpi->core_clock);
if (ret != -EPROBE_DEFER)
- DRM_ERROR("Failed to get core clock: %d\n", ret);
+ drm_err(drm, "Failed to get core clock: %d\n", ret);
return ret;
}
@@ -347,13 +347,13 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(dpi->pixel_clock)) {
ret = PTR_ERR(dpi->pixel_clock);
if (ret != -EPROBE_DEFER)
- DRM_ERROR("Failed to get pixel clock: %d\n", ret);
+ drm_err(drm, "Failed to get pixel clock: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(dpi->core_clock);
if (ret) {
- DRM_ERROR("Failed to turn on core clock: %d\n", ret);
+ drm_err(drm, "Failed to turn on core clock: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 46f6c4ce61c5..f5ccc1bf7a63 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -613,6 +613,7 @@ struct vc4_dsi {
static inline void
dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
{
+ struct drm_device *drm = dsi->bridge.dev;
struct dma_chan *chan = dsi->reg_dma_chan;
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
@@ -633,19 +634,19 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
dsi->reg_dma_paddr,
4, 0);
if (!tx) {
- DRM_ERROR("Failed to set up DMA register write\n");
+ drm_err(drm, "Failed to set up DMA register write\n");
return;
}
cookie = tx->tx_submit(tx);
ret = dma_submit_error(cookie);
if (ret) {
- DRM_ERROR("Failed to submit DMA: %d\n", ret);
+ drm_err(drm, "Failed to submit DMA: %d\n", ret);
return;
}
ret = dma_sync_wait(chan, cookie);
if (ret)
- DRM_ERROR("Failed to wait for DMA: %d\n", ret);
+ drm_err(drm, "Failed to wait for DMA: %d\n", ret);
}
#define DSI_READ(offset) \
@@ -893,7 +894,7 @@ static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge,
ret = pm_runtime_resume_and_get(dev);
if (ret) {
- DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
+ drm_err(bridge->dev, "Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
return;
}
@@ -986,13 +987,14 @@ static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge,
ret = clk_prepare_enable(dsi->escape_clock);
if (ret) {
- DRM_ERROR("Failed to turn on DSI escape clock: %d\n", ret);
+ drm_err(bridge->dev, "Failed to turn on DSI escape clock: %d\n",
+ ret);
return;
}
ret = clk_prepare_enable(dsi->pll_phy_clock);
if (ret) {
- DRM_ERROR("Failed to turn on DSI PLL: %d\n", ret);
+ drm_err(bridge->dev, "Failed to turn on DSI PLL: %d\n", ret);
return;
}
@@ -1014,7 +1016,7 @@ static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge,
ret = clk_prepare_enable(dsi->pixel_clock);
if (ret) {
- DRM_ERROR("Failed to turn on DSI pixel clock: %d\n", ret);
+ drm_err(bridge->dev, "Failed to turn on DSI pixel clock: %d\n", ret);
return;
}
@@ -1172,6 +1174,7 @@ static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct vc4_dsi *dsi = host_to_dsi(host);
+ struct drm_device *drm = dsi->bridge.dev;
struct mipi_dsi_packet packet;
u32 pkth = 0, pktc = 0;
int i, ret;
@@ -1303,8 +1306,8 @@ static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,
DSI_RXPKT1H_BC_PARAM);
if (rxlen != msg->rx_len) {
- DRM_ERROR("DSI returned %db, expecting %db\n",
- rxlen, (int)msg->rx_len);
+ drm_err(drm, "DSI returned %db, expecting %db\n",
+ rxlen, (int)msg->rx_len);
ret = -ENXIO;
goto reset_fifo_and_return;
}
@@ -1326,7 +1329,7 @@ static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,
return ret;
reset_fifo_and_return:
- DRM_ERROR("DSI transfer failed, resetting: %d\n", ret);
+ drm_err(drm, "DSI transfer failed, resetting: %d\n", ret);
DSI_PORT_WRITE(TXPKT1C, DSI_PORT_READ(TXPKT1C) & ~DSI_TXPKT1C_CMD_EN);
udelay(1);
@@ -1468,7 +1471,8 @@ static void dsi_handle_error(struct vc4_dsi *dsi,
if (!(stat & bit))
return;
- DRM_ERROR("DSI%d: %s error\n", dsi->variant->port, type);
+ drm_err(dsi->bridge.dev, "DSI%d: %s error\n", dsi->variant->port,
+ type);
*ret = IRQ_HANDLED;
}
@@ -1687,7 +1691,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
&dsi->reg_dma_paddr,
GFP_KERNEL);
if (!dsi->reg_dma_mem) {
- DRM_ERROR("Failed to get DMA memory\n");
+ drm_err(drm, "Failed to get DMA memory\n");
return -ENOMEM;
}
@@ -1702,8 +1706,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(dsi->reg_dma_chan)) {
ret = PTR_ERR(dsi->reg_dma_chan);
if (ret != -EPROBE_DEFER)
- DRM_ERROR("Failed to get DMA channel: %d\n",
- ret);
+ drm_err(drm, "Failed to get DMA channel: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 03648f954985..24fb1b57e1dd 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -832,8 +832,8 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
*/
temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
if (!temp) {
- DRM_ERROR("Failed to allocate storage for copying "
- "in bin/render CLs.\n");
+ drm_err(dev, "Failed to allocate storage for copying "
+ "in bin/render CLs.\n");
ret = -ENOMEM;
goto fail;
}
@@ -866,7 +866,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
if (IS_ERR(bo)) {
- DRM_ERROR("Couldn't allocate BO for binning\n");
+ drm_err(dev, "Couldn't allocate BO for binning\n");
ret = PTR_ERR(bo);
goto fail;
}
@@ -1153,10 +1153,9 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
}
exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
- if (!exec) {
- DRM_ERROR("malloc failure on exec struct\n");
+ if (!exec)
return -ENOMEM;
- }
+
exec->dev = vc4;
ret = vc4_v3d_pm_get(vc4);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index d57c4a5948c8..6611ab7c26a6 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -429,6 +429,7 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
{
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
enum drm_connector_status status = connector_status_disconnected;
+ int ret;
/*
* NOTE: This function should really take vc4_hdmi->mutex, but
@@ -441,7 +442,12 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
* the lock for now.
*/
- WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
+ ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
+ if (ret) {
+ drm_err_once(connector->dev, "Failed to retain HDMI power domain: %d\n",
+ ret);
+ return connector_status_unknown;
+ }
if (vc4_hdmi->hpd_gpio) {
if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
@@ -698,7 +704,7 @@ static int vc4_hdmi_write_infoframe(struct drm_connector *connector,
ret = vc4_hdmi_stop_packet(vc4_hdmi, type, true);
if (ret) {
- DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret);
+ drm_err(drm, "Failed to wait for infoframe to go idle: %d\n", ret);
goto out;
}
@@ -734,7 +740,7 @@ static int vc4_hdmi_write_infoframe(struct drm_connector *connector,
ret = wait_for((HDMI_READ(HDMI_RAM_PACKET_STATUS) &
BIT(packet_id)), 100);
if (ret)
- DRM_ERROR("Failed to wait for infoframe to start: %d\n", ret);
+ drm_err(drm, "Failed to wait for infoframe to start: %d\n", ret);
out:
drm_dev_exit(idx);
@@ -895,7 +901,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
if (ret < 0)
- DRM_ERROR("Failed to release power domain: %d\n", ret);
+ drm_err(drm, "Failed to release power domain: %d\n", ret);
drm_dev_exit(idx);
@@ -1437,7 +1443,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret < 0) {
- DRM_ERROR("Failed to retain power domain: %d\n", ret);
+ drm_err(drm, "Failed to retain power domain: %d\n", ret);
goto err_dev_exit;
}
@@ -1462,19 +1468,19 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
div_u64(tmds_char_rate, 100) * 101);
ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate);
if (ret) {
- DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
+ drm_err(drm, "Failed to set HSM clock rate: %d\n", ret);
goto err_put_runtime_pm;
}
ret = clk_set_rate(vc4_hdmi->pixel_clock, tmds_char_rate);
if (ret) {
- DRM_ERROR("Failed to set pixel clock rate: %d\n", ret);
+ drm_err(drm, "Failed to set pixel clock rate: %d\n", ret);
goto err_put_runtime_pm;
}
ret = clk_prepare_enable(vc4_hdmi->pixel_clock);
if (ret) {
- DRM_ERROR("Failed to turn on pixel clock: %d\n", ret);
+ drm_err(drm, "Failed to turn on pixel clock: %d\n", ret);
goto err_put_runtime_pm;
}
@@ -1490,13 +1496,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
if (ret) {
- DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
+ drm_err(drm, "Failed to set pixel bvb clock rate: %d\n", ret);
goto err_disable_pixel_clock;
}
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
- DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
+ drm_err(drm, "Failed to turn on pixel bvb clock: %d\n", ret);
goto err_disable_pixel_clock;
}
@@ -2945,13 +2951,13 @@ static int vc4_hdmi_init_resources(struct drm_device *drm,
if (IS_ERR(vc4_hdmi->pixel_clock)) {
ret = PTR_ERR(vc4_hdmi->pixel_clock);
if (ret != -EPROBE_DEFER)
- DRM_ERROR("Failed to get pixel clock\n");
+ drm_err(drm, "Failed to get pixel clock\n");
return ret;
}
vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi");
if (IS_ERR(vc4_hdmi->hsm_clock)) {
- DRM_ERROR("Failed to get HDMI state machine clock\n");
+ drm_err(drm, "Failed to get HDMI state machine clock\n");
return PTR_ERR(vc4_hdmi->hsm_clock);
}
vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock;
@@ -3035,31 +3041,31 @@ static int vc5_hdmi_init_resources(struct drm_device *drm,
vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi");
if (IS_ERR(vc4_hdmi->hsm_clock)) {
- DRM_ERROR("Failed to get HDMI state machine clock\n");
+ drm_err(drm, "Failed to get HDMI state machine clock\n");
return PTR_ERR(vc4_hdmi->hsm_clock);
}
vc4_hdmi->pixel_bvb_clock = devm_clk_get(dev, "bvb");
if (IS_ERR(vc4_hdmi->pixel_bvb_clock)) {
- DRM_ERROR("Failed to get pixel bvb clock\n");
+ drm_err(drm, "Failed to get pixel bvb clock\n");
return PTR_ERR(vc4_hdmi->pixel_bvb_clock);
}
vc4_hdmi->audio_clock = devm_clk_get(dev, "audio");
if (IS_ERR(vc4_hdmi->audio_clock)) {
- DRM_ERROR("Failed to get audio clock\n");
+ drm_err(drm, "Failed to get audio clock\n");
return PTR_ERR(vc4_hdmi->audio_clock);
}
vc4_hdmi->cec_clock = devm_clk_get(dev, "cec");
if (IS_ERR(vc4_hdmi->cec_clock)) {
- DRM_ERROR("Failed to get CEC clock\n");
+ drm_err(drm, "Failed to get CEC clock\n");
return PTR_ERR(vc4_hdmi->cec_clock);
}
vc4_hdmi->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(vc4_hdmi->reset)) {
- DRM_ERROR("Failed to get HDMI reset line\n");
+ drm_err(drm, "Failed to get HDMI reset line\n");
return PTR_ERR(vc4_hdmi->reset);
}
@@ -3215,14 +3221,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
if (!ddc_node) {
- DRM_ERROR("Failed to find ddc node in device tree\n");
+ drm_err(drm, "Failed to find ddc node in device tree\n");
return -ENODEV;
}
vc4_hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
of_node_put(ddc_node);
if (!vc4_hdmi->ddc) {
- DRM_DEBUG("Failed to get ddc i2c adapter by node\n");
+ drm_err(drm, "Failed to get ddc i2c adapter by node\n");
return -EPROBE_DEFER;
}
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 04af672caacb..2a835a5cff9d 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -191,8 +191,8 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
if (ret) {
- DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
- ret);
+ drm_err(&hvs->vc4->base, "Failed to allocate space for filter kernel: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 563b3dfeb9b9..ef93d8e22a35 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -76,7 +76,7 @@ vc4_overflow_mem_work(struct work_struct *work)
bin_bo_slot = vc4_v3d_get_bin_slot(vc4);
if (bin_bo_slot < 0) {
- DRM_ERROR("Couldn't allocate binner overflow mem\n");
+ drm_err(&vc4->base, "Couldn't allocate binner overflow mem\n");
goto complete;
}
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 04ac7805e6d5..bf5c4e36c94e 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -441,21 +441,9 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
vc4->v3d = v3d;
v3d->vc4 = vc4;
- v3d->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(v3d->clk)) {
- int ret = PTR_ERR(v3d->clk);
-
- if (ret == -ENOENT) {
- /* bcm2835 didn't have a clock reference in the DT. */
- ret = 0;
- v3d->clk = NULL;
- } else {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get V3D clock: %d\n",
- ret);
- return ret;
- }
- }
+ v3d->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(v3d->clk))
+ return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n");
ret = platform_get_irq(pdev, 0);
if (ret < 0)
@@ -471,8 +459,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
return ret;
if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
- DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
- V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
+ drm_err(drm, "V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
+ V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
ret = -EINVAL;
goto err_put_runtime_pm;
}
@@ -485,7 +473,7 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
ret = vc4_irq_install(drm, vc4->irq);
if (ret) {
- DRM_ERROR("Failed to install IRQ handler\n");
+ drm_err(drm, "Failed to install IRQ handler\n");
goto err_put_runtime_pm;
}
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 7dff3ca5af6b..0c17284bf6f5 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -65,7 +65,7 @@ utile_width(int cpp)
case 8:
return 2;
default:
- DRM_ERROR("unknown cpp: %d\n", cpp);
+ pr_err("unknown cpp: %d\n", cpp);
return 1;
}
}
@@ -82,7 +82,7 @@ utile_height(int cpp)
case 8:
return 4;
default:
- DRM_ERROR("unknown cpp: %d\n", cpp);
+ pr_err("unknown cpp: %d\n", cpp);
return 1;
}
}
@@ -390,8 +390,8 @@ validate_tile_binning_config(VALIDATE_ARGS)
bin_slot = vc4_v3d_get_bin_slot(vc4);
if (bin_slot < 0) {
if (bin_slot != -EINTR && bin_slot != -ERESTARTSYS) {
- DRM_ERROR("Failed to allocate binner memory: %d\n",
- bin_slot);
+ drm_err(dev, "Failed to allocate binner memory: %d\n",
+ bin_slot);
}
return bin_slot;
}
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 070813b8aff8..eb64e881051e 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -557,7 +557,7 @@ static void vc4_vec_encoder_disable(struct drm_encoder *encoder,
ret = pm_runtime_put(&vec->pdev->dev);
if (ret < 0) {
- DRM_ERROR("Failed to release power domain: %d\n", ret);
+ drm_err(drm, "Failed to release power domain: %d\n", ret);
goto err_dev_exit;
}
@@ -591,7 +591,7 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder,
ret = pm_runtime_resume_and_get(&vec->pdev->dev);
if (ret < 0) {
- DRM_ERROR("Failed to retain power domain: %d\n", ret);
+ drm_err(drm, "Failed to retain power domain: %d\n", ret);
goto err_dev_exit;
}
@@ -604,13 +604,13 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder,
*/
ret = clk_set_rate(vec->clock, 108000000);
if (ret) {
- DRM_ERROR("Failed to set clock rate: %d\n", ret);
+ drm_err(drm, "Failed to set clock rate: %d\n", ret);
goto err_put_runtime_pm;
}
ret = clk_prepare_enable(vec->clock);
if (ret) {
- DRM_ERROR("Failed to turn on core clock: %d\n", ret);
+ drm_err(drm, "Failed to turn on core clock: %d\n", ret);
goto err_put_runtime_pm;
}
@@ -806,7 +806,7 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(vec->clock)) {
ret = PTR_ERR(vec->clock);
if (ret != -EPROBE_DEFER)
- DRM_ERROR("Failed to get clock: %d\n", ret);
+ drm_err(drm, "Failed to get clock: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 8dc9dc13896e..0c1a713b7b7b 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -164,9 +164,11 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
dev->mode_config.max_height = YRES_MAX;
dev->mode_config.cursor_width = 512;
dev->mode_config.cursor_height = 512;
- /* FIXME: There's a confusion between bpp and depth between this and
+ /*
+ * FIXME: There's a confusion between bpp and depth between this and
* fbdev helpers. We have to go with 0, meaning "pick the default",
- * which ix XRGB8888 in all cases. */
+ * which is XRGB8888 in all cases.
+ */
dev->mode_config.preferred_depth = 0;
dev->mode_config.helper_private = &vkms_mode_config_helpers;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 8f5710debb1e..5e46ea5b96dc 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -103,7 +103,6 @@ struct vkms_output {
struct drm_writeback_connector wb_connector;
struct hrtimer vblank_hrtimer;
ktime_t period_ns;
- struct drm_pending_vblank_event *event;
/* ordered wq for composer_work */
struct workqueue_struct *composer_workq;
/* protects concurrent access to composer */
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 36046b12f296..040b7f113a3b 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -75,7 +75,7 @@ static void XRGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixe
static void ARGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
- u16 *pixels = (u16 *)src_pixels;
+ __le16 *pixels = (__force __le16 *)src_pixels;
out_pixel->a = le16_to_cpu(pixels[3]);
out_pixel->r = le16_to_cpu(pixels[2]);
@@ -85,7 +85,7 @@ static void ARGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_
static void XRGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
- u16 *pixels = (u16 *)src_pixels;
+ __le16 *pixels = (__force __le16 *)src_pixels;
out_pixel->a = (u16)0xffff;
out_pixel->r = le16_to_cpu(pixels[2]);
@@ -95,7 +95,7 @@ static void XRGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_
static void RGB565_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
- u16 *pixels = (u16 *)src_pixels;
+ __le16 *pixels = (__force __le16 *)src_pixels;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
@@ -178,7 +178,7 @@ static void argb_u16_to_XRGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel
static void argb_u16_to_ARGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- u16 *pixels = (u16 *)dst_pixels;
+ __le16 *pixels = (__force __le16 *)dst_pixels;
pixels[3] = cpu_to_le16(in_pixel->a);
pixels[2] = cpu_to_le16(in_pixel->r);
@@ -188,9 +188,9 @@ static void argb_u16_to_ARGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_p
static void argb_u16_to_XRGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- u16 *pixels = (u16 *)dst_pixels;
+ __le16 *pixels = (__force __le16 *)dst_pixels;
- pixels[3] = 0xffff;
+ pixels[3] = cpu_to_le16(0xffff);
pixels[2] = cpu_to_le16(in_pixel->r);
pixels[1] = cpu_to_le16(in_pixel->g);
pixels[0] = cpu_to_le16(in_pixel->b);
@@ -198,7 +198,7 @@ static void argb_u16_to_XRGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_p
static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- u16 *pixels = (u16 *)dst_pixels;
+ __le16 *pixels = (__force __le16 *)dst_pixels;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 50ad3105c16e..2825dd3149ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1609,6 +1609,7 @@ static const struct file_operations vmwgfx_driver_fops = {
.compat_ioctl = vmw_compat_ioctl,
#endif
.llseek = noop_llseek,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
static const struct drm_driver driver = {
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index e97c9da451b3..edfd812e0f41 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -12,34 +12,15 @@ subdir-ccflags-$(CONFIG_DRM_XE_WERROR) += -Werror
subdir-ccflags-y += -I$(obj) -I$(src)
# generated sources
-hostprogs := xe_gen_wa_oob
+hostprogs := xe_gen_wa_oob
generated_oob := $(obj)/generated/xe_wa_oob.c $(obj)/generated/xe_wa_oob.h
-
quiet_cmd_wa_oob = GEN $(notdir $(generated_oob))
cmd_wa_oob = mkdir -p $(@D); $^ $(generated_oob)
-
$(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
$(src)/xe_wa_oob.rules
$(call cmd,wa_oob)
-uses_generated_oob := \
- $(obj)/xe_ggtt.o \
- $(obj)/xe_device.o \
- $(obj)/xe_gsc.o \
- $(obj)/xe_gt.o \
- $(obj)/xe_guc.o \
- $(obj)/xe_guc_ads.o \
- $(obj)/xe_guc_pc.o \
- $(obj)/xe_migrate.o \
- $(obj)/xe_pat.o \
- $(obj)/xe_ring_ops.o \
- $(obj)/xe_vm.o \
- $(obj)/xe_wa.o \
- $(obj)/xe_ttm_stolen_mgr.o
-
-$(uses_generated_oob): $(generated_oob)
-
# Please keep these build lists sorted!
# core driver code
@@ -47,7 +28,6 @@ $(uses_generated_oob): $(generated_oob)
xe-y += xe_bb.o \
xe_bo.o \
xe_bo_evict.o \
- xe_debugfs.o \
xe_devcoredump.o \
xe_device.o \
xe_device_sysfs.o \
@@ -60,12 +40,12 @@ xe-y += xe_bb.o \
xe_ggtt.o \
xe_gpu_scheduler.o \
xe_gsc.o \
+ xe_gsc_debugfs.o \
xe_gsc_proxy.o \
xe_gsc_submit.o \
xe_gt.o \
xe_gt_ccs_mode.o \
xe_gt_clock.o \
- xe_gt_debugfs.o \
xe_gt_freq.o \
xe_gt_idle.o \
xe_gt_mcr.o \
@@ -78,7 +58,6 @@ xe-y += xe_bb.o \
xe_guc_ads.o \
xe_guc_ct.o \
xe_guc_db_mgr.o \
- xe_guc_debugfs.o \
xe_guc_hwconfig.o \
xe_guc_id_mgr.o \
xe_guc_klv_helpers.o \
@@ -88,9 +67,9 @@ xe-y += xe_bb.o \
xe_heci_gsc.o \
xe_hw_engine.o \
xe_hw_engine_class_sysfs.o \
+ xe_hw_engine_group.o \
xe_hw_fence.o \
xe_huc.o \
- xe_huc_debugfs.o \
xe_irq.o \
xe_lrc.o \
xe_migrate.o \
@@ -126,7 +105,6 @@ xe-y += xe_bb.o \
xe_ttm_vram_mgr.o \
xe_tuning.o \
xe_uc.o \
- xe_uc_debugfs.o \
xe_uc_fw.o \
xe_vm.o \
xe_vram.o \
@@ -143,7 +121,6 @@ xe-$(CONFIG_HWMON) += xe_hwmon.o
# graphics virtualization (SR-IOV) support
xe-y += \
xe_gt_sriov_vf.o \
- xe_gt_sriov_vf_debugfs.o \
xe_guc_relay.o \
xe_memirq.o \
xe_sriov.o
@@ -152,7 +129,6 @@ xe-$(CONFIG_PCI_IOV) += \
xe_gt_sriov_pf.o \
xe_gt_sriov_pf_config.o \
xe_gt_sriov_pf_control.o \
- xe_gt_sriov_pf_debugfs.o \
xe_gt_sriov_pf_monitor.o \
xe_gt_sriov_pf_policy.o \
xe_gt_sriov_pf_service.o \
@@ -194,6 +170,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/xe_display.o \
display/xe_display_misc.o \
display/xe_display_rps.o \
+ display/xe_display_wa.o \
display/xe_dsb_buffer.o \
display/xe_fb_pin.o \
display/xe_hdcp_gsc.o \
@@ -299,6 +276,16 @@ ifeq ($(CONFIG_DRM_FBDEV_EMULATION),y)
endif
ifeq ($(CONFIG_DEBUG_FS),y)
+ xe-y += xe_debugfs.o \
+ xe_gt_debugfs.o \
+ xe_gt_sriov_vf_debugfs.o \
+ xe_gt_stats.o \
+ xe_guc_debugfs.o \
+ xe_huc_debugfs.o \
+ xe_uc_debugfs.o
+
+ xe-$(CONFIG_PCI_IOV) += xe_gt_sriov_pf_debugfs.o
+
xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_display_debugfs.o \
i915-display/intel_display_debugfs_params.o \
@@ -322,3 +309,6 @@ quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
$(obj)/%.hdrtest: $(src)/%.h FORCE
$(call if_changed_dep,hdrtest)
+
+uses_generated_oob := $(addprefix $(obj)/, $(xe-y))
+$(uses_generated_oob): $(obj)/generated/xe_wa_oob.h
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 8f9f60b28306..6b30743a2f6c 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -351,6 +351,7 @@ enum xe_guc_klv_ids {
GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING = 0x9005,
GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE = 0x9007,
GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008,
+ GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009,
};
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index 2feedddf1e40..f27a2c75b56d 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -21,11 +21,6 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
return container_of(dev, struct drm_i915_private, drm);
}
-static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
-{
- return dev_get_drvdata(kdev);
-}
-
#define IS_PLATFORM(xe, x) ((xe)->info.platform == x)
#define INTEL_INFO(dev_priv) (&((dev_priv)->info))
#define IS_I830(dev_priv) (dev_priv && 0)
@@ -80,14 +75,9 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IS_MOBILE(xe) (xe && 0)
-#define HAS_GMD_ID(xe) GRAPHICS_VERx100(xe) >= 1270
-
-/* Workarounds not handled yet */
-#define IS_DISPLAY_STEP(xe, first, last) ({u8 __step = (xe)->info.step.display; first <= __step && __step <= last; })
-
-#define IS_LP(xe) (0)
-#define IS_GEN9_LP(xe) (0)
-#define IS_GEN9_BC(xe) (0)
+#define IS_LP(xe) ((xe) && 0)
+#define IS_GEN9_LP(xe) ((xe) && 0)
+#define IS_GEN9_BC(xe) ((xe) && 0)
#define IS_TIGERLAKE_UY(xe) (xe && 0)
#define IS_COMETLAKE_ULX(xe) (xe && 0)
@@ -115,9 +105,6 @@ struct i915_sched_attr {
};
#define i915_gem_fence_wait_priority(fence, attr) do { (void) attr; } while (0)
-#define pdev_to_i915 pdev_to_xe_device
-#define RUNTIME_INFO(xe) (&(xe)->info.i915_runtime)
-
#define FORCEWAKE_ALL XE_FORCEWAKE_ALL
#ifdef CONFIG_ARM64
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
index a20d2638ea7a..bdae8392e125 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
@@ -7,7 +7,8 @@
#define I915_VMA_H
#include <uapi/drm/i915_drm.h>
-#include <drm/drm_mm.h>
+
+#include "xe_ggtt_types.h"
/* We don't want these from i915_drm.h in case of Xe */
#undef I915_TILING_X
@@ -19,7 +20,7 @@ struct xe_bo;
struct i915_vma {
struct xe_bo *bo, *dpt;
- struct drm_mm_node node;
+ struct xe_ggtt_node *node;
};
#define i915_ggtt_clear_scanout(bo) do { } while (0)
@@ -28,7 +29,7 @@ struct i915_vma {
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
- return vma->node.start;
+ return vma->node->base.start;
}
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_step.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_step.h
index 0006ef812346..2cf13a572ab0 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_step.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_step.h
@@ -6,15 +6,9 @@
#ifndef __INTEL_STEP_H__
#define __INTEL_STEP_H__
-#include "xe_device_types.h"
#include "xe_step.h"
-#define intel_display_step_name xe_display_step_name
-
-static inline
-const char *xe_display_step_name(struct xe_device *xe)
-{
- return xe_step_name(xe->info.step.display);
-}
+#define intel_step xe_step
+#define intel_step_name xe_step_name
#endif /* __INTEL_STEP_H__ */
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
index f835492f73fb..63ce97cc4cfe 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -7,6 +7,7 @@
#include <drm/ttm/ttm_bo.h>
#include "intel_display_types.h"
+#include "intel_fb.h"
#include "intel_fb_bo.h"
#include "xe_bo.h"
@@ -28,6 +29,14 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
struct xe_device *xe = to_xe_device(bo->ttm.base.dev);
int ret;
+ /*
+ * Some modifiers require physical alignment of 64KiB VRAM pages;
+ * require that the BO in those cases is created correctly.
+ */
+ if (XE_IOCTL_DBG(xe, intel_fb_needs_64k_phys(mode_cmd->modifier[0]) &&
+ !(bo->flags & XE_BO_FLAG_NEEDS_64K)))
+ return -EINVAL;
+
xe_bo_get(bo);
ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 816ad13821a8..99499d6c0256 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -8,8 +8,10 @@
#include "intel_display_types.h"
#include "intel_fbdev_fb.h"
#include "xe_bo.h"
-#include "xe_gt.h"
#include "xe_ttm_stolen_mgr.h"
+#include "xe_wa.h"
+
+#include <generated/xe_wa_oob.h>
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
@@ -37,7 +39,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
size = PAGE_ALIGN(size);
obj = ERR_PTR(-ENODEV);
- if (!IS_DGFX(xe)) {
+ if (!IS_DGFX(xe) && !XE_WA(xe_root_mmio_gt(xe), 22019338487_display)) {
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
@@ -48,6 +50,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
else
drm_info(&xe->drm, "Allocated fbdev into stolen failed: %li\n", PTR_ERR(obj));
}
+
if (IS_ERR(obj)) {
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index c860fda410c8..75736faf2a80 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -10,7 +10,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "soc/intel_dram.h"
#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */
@@ -46,7 +46,7 @@ static bool has_display(struct xe_device *xe)
*/
bool xe_display_driver_probe_defer(struct pci_dev *pdev)
{
- if (!xe_modparam.enable_display)
+ if (!xe_modparam.probe_display)
return 0;
return intel_display_driver_probe_defer(pdev);
@@ -62,7 +62,7 @@ bool xe_display_driver_probe_defer(struct pci_dev *pdev)
*/
void xe_display_driver_set_hooks(struct drm_driver *driver)
{
- if (!xe_modparam.enable_display)
+ if (!xe_modparam.probe_display)
return;
driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
@@ -104,7 +104,7 @@ static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_power_domains_cleanup(xe);
@@ -112,7 +112,7 @@ static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
int xe_display_init_nommio(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return 0;
/* Fake uncore lock */
@@ -127,25 +127,27 @@ int xe_display_init_nommio(struct xe_device *xe)
static void xe_display_fini_noirq(void *arg)
{
struct xe_device *xe = arg;
+ struct intel_display *display = &xe->display;
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_display_driver_remove_noirq(xe);
- intel_opregion_cleanup(xe);
+ intel_opregion_cleanup(display);
}
int xe_display_init_noirq(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
int err;
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return 0;
intel_display_driver_early_probe(xe);
/* Early display init.. */
- intel_opregion_setup(xe);
+ intel_opregion_setup(display);
/*
* Fill the dram structure to get the system dram info. This will be
@@ -159,7 +161,7 @@ int xe_display_init_noirq(struct xe_device *xe)
err = intel_display_driver_probe_noirq(xe);
if (err) {
- intel_opregion_cleanup(xe);
+ intel_opregion_cleanup(display);
return err;
}
@@ -170,7 +172,7 @@ static void xe_display_fini_noaccel(void *arg)
{
struct xe_device *xe = arg;
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_display_driver_remove_nogem(xe);
@@ -180,7 +182,7 @@ int xe_display_init_noaccel(struct xe_device *xe)
{
int err;
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return 0;
err = intel_display_driver_probe_nogem(xe);
@@ -192,7 +194,7 @@ int xe_display_init_noaccel(struct xe_device *xe)
int xe_display_init(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return 0;
return intel_display_driver_probe(xe);
@@ -200,7 +202,7 @@ int xe_display_init(struct xe_device *xe)
void xe_display_fini(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_hpd_poll_fini(xe);
@@ -211,7 +213,7 @@ void xe_display_fini(struct xe_device *xe)
void xe_display_register(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_display_driver_register(xe);
@@ -221,7 +223,7 @@ void xe_display_register(struct xe_device *xe)
void xe_display_unregister(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_unregister_dsm_handler();
@@ -231,7 +233,7 @@ void xe_display_unregister(struct xe_device *xe)
void xe_display_driver_remove(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_display_driver_remove(xe);
@@ -241,7 +243,7 @@ void xe_display_driver_remove(struct xe_device *xe)
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
if (master_ctl & DISPLAY_IRQ)
@@ -250,16 +252,18 @@ void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
{
- if (!xe->info.enable_display)
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
return;
if (gu_misc_iir & GU_MISC_GSE)
- intel_opregion_asle_intr(xe);
+ intel_opregion_asle_intr(display);
}
void xe_display_irq_reset(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
gen11_display_irq_reset(xe);
@@ -267,7 +271,7 @@ void xe_display_irq_reset(struct xe_device *xe)
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
if (gt->info.id == XE_GT0)
@@ -304,10 +308,23 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe)
}
}
+/* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
+void xe_display_pm_runtime_suspend(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ if (xe->d3cold.allowed)
+ xe_display_pm_suspend(xe, true);
+
+ intel_hpd_poll_enable(xe);
+}
+
void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
{
+ struct intel_display *display = &xe->display;
bool s2idle = suspend_to_idle();
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
/*
@@ -316,14 +333,11 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
*/
intel_power_domains_disable(xe);
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
- if (has_display(xe)) {
+ if (!runtime && has_display(xe)) {
drm_kms_helper_poll_disable(&xe->drm);
- if (!runtime)
- intel_display_driver_disable_user_access(xe);
- }
-
- if (!runtime)
+ intel_display_driver_disable_user_access(xe);
intel_display_driver_suspend(xe);
+ }
xe_display_flush_cleanup_work(xe);
@@ -336,7 +350,7 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
intel_encoder_suspend_all(&xe->display);
}
- intel_opregion_suspend(xe, s2idle ? PCI_D1 : PCI_D3cold);
+ intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
intel_dmc_suspend(xe);
}
@@ -344,7 +358,7 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
void xe_display_pm_suspend_late(struct xe_device *xe)
{
bool s2idle = suspend_to_idle();
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_power_domains_suspend(xe, s2idle);
@@ -352,9 +366,20 @@ void xe_display_pm_suspend_late(struct xe_device *xe)
intel_display_power_suspend_late(xe);
}
+void xe_display_pm_runtime_resume(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_hpd_poll_disable(xe);
+
+ if (xe->d3cold.allowed)
+ xe_display_pm_resume(xe, true);
+}
+
void xe_display_pm_resume_early(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_display_power_resume_early(xe);
@@ -364,7 +389,9 @@ void xe_display_pm_resume_early(struct xe_device *xe)
void xe_display_pm_resume(struct xe_device *xe, bool runtime)
{
- if (!xe->info.enable_display)
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
return;
intel_dmc_resume(xe);
@@ -380,17 +407,14 @@ void xe_display_pm_resume(struct xe_device *xe, bool runtime)
/* MST sideband requires HPD interrupts enabled */
intel_dp_mst_resume(xe);
- if (!runtime)
+ if (!runtime && has_display(xe)) {
intel_display_driver_resume(xe);
-
- if (has_display(xe)) {
drm_kms_helper_poll_enable(&xe->drm);
- if (!runtime)
- intel_display_driver_enable_user_access(xe);
+ intel_display_driver_enable_user_access(xe);
+ intel_hpd_poll_disable(xe);
}
- intel_hpd_poll_disable(xe);
- intel_opregion_resume(xe);
+ intel_opregion_resume(display);
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
@@ -408,7 +432,7 @@ int xe_display_probe(struct xe_device *xe)
{
int err;
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
goto no_display;
intel_display_device_probe(xe);
@@ -421,7 +445,7 @@ int xe_display_probe(struct xe_device *xe)
return 0;
no_display:
- xe->info.enable_display = false;
+ xe->info.probe_display = false;
unset_display_features(xe);
return 0;
}
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 000fb5799df5..53d727fd792b 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -38,6 +38,8 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime);
void xe_display_pm_suspend_late(struct xe_device *xe);
void xe_display_pm_resume_early(struct xe_device *xe);
void xe_display_pm_resume(struct xe_device *xe, bool runtime);
+void xe_display_pm_runtime_suspend(struct xe_device *xe);
+void xe_display_pm_runtime_resume(struct xe_device *xe);
#else
@@ -67,6 +69,8 @@ static inline void xe_display_pm_suspend(struct xe_device *xe, bool runtime) {}
static inline void xe_display_pm_suspend_late(struct xe_device *xe) {}
static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
static inline void xe_display_pm_resume(struct xe_device *xe, bool runtime) {}
+static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {}
+static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {}
#endif /* CONFIG_DRM_XE_DISPLAY */
#endif /* _XE_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
new file mode 100644
index 000000000000..68e3d1959ad6
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "intel_display_wa.h"
+
+#include "xe_device.h"
+#include "xe_wa.h"
+
+#include <generated/xe_wa_oob.h>
+
+bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915)
+{
+ return XE_WA(xe_root_mmio_gt(i915), 16023588340);
+}
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index ccd0d87d438a..f99d901a3214 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -9,7 +9,6 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_device_types.h"
-#include "xe_gt.h"
u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
{
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index d7db44e79eaf..b58fc4ba2aac 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -12,7 +12,6 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_ggtt.h"
-#include "xe_gt.h"
#include "xe_pm.h"
static void
@@ -204,21 +203,28 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
align = max_t(u32, align, SZ_64K);
- if (bo->ggtt_node.size && view->type == I915_GTT_VIEW_NORMAL) {
+ if (bo->ggtt_node && view->type == I915_GTT_VIEW_NORMAL) {
vma->node = bo->ggtt_node;
} else if (view->type == I915_GTT_VIEW_NORMAL) {
u32 x, size = bo->ttm.base.size;
- ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size,
- align, 0);
- if (ret)
+ vma->node = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(vma->node)) {
+ ret = PTR_ERR(vma->node);
goto out_unlock;
+ }
+
+ ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0);
+ if (ret) {
+ xe_ggtt_node_fini(vma->node);
+ goto out_unlock;
+ }
for (x = 0; x < size; x += XE_PAGE_SIZE) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x,
xe->pat.idx[XE_CACHE_NONE]);
- ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node.start + x, pte);
+ ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node->base.start + x, pte);
}
} else {
u32 i, ggtt_ofs;
@@ -227,12 +233,19 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
/* display seems to use tiles instead of bytes here, so convert it back.. */
u32 size = intel_rotation_info_size(rot_info) * XE_PAGE_SIZE;
- ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size,
- align, 0);
- if (ret)
+ vma->node = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(vma->node)) {
+ ret = PTR_ERR(vma->node);
+ goto out_unlock;
+ }
+
+ ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0);
+ if (ret) {
+ xe_ggtt_node_fini(vma->node);
goto out_unlock;
+ }
- ggtt_ofs = vma->node.start;
+ ggtt_ofs = vma->node->base.start;
for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++)
write_ggtt_rotated(bo, ggtt, &ggtt_ofs,
@@ -320,14 +333,11 @@ err:
static void __xe_unpin_fb_vma(struct i915_vma *vma)
{
- struct xe_device *xe = to_xe_device(vma->bo->ttm.base.dev);
- struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
-
if (vma->dpt)
xe_bo_unpin_map_no_vm(vma->dpt);
- else if (!drm_mm_node_allocated(&vma->bo->ggtt_node) ||
- vma->bo->ggtt_node.start != vma->node.start)
- xe_ggtt_remove_node(ggtt, &vma->node, false);
+ else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node) ||
+ vma->bo->ggtt_node->base.start != vma->node->base.start)
+ xe_ggtt_node_remove(vma->node, false);
ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
ttm_bo_unpin(&vma->bo->ttm);
@@ -377,8 +387,8 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
}
/*
- * For Xe introduce dummy intel_dpt_create which just return NULL and
- * intel_dpt_destroy which does nothing.
+ * For Xe introduce dummy intel_dpt_create which just return NULL,
+ * intel_dpt_destroy which does nothing, and fake intel_dpt_ofsset returning 0;
*/
struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb)
{
@@ -389,3 +399,8 @@ void intel_dpt_destroy(struct i915_address_space *vm)
{
return;
}
+
+u64 intel_dpt_offset(struct i915_vma *dpt_vma)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 990285aa9b26..6619a40aed15 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -16,7 +16,6 @@
#include "xe_force_wake.h"
#include "xe_gsc_proxy.h"
#include "xe_gsc_submit.h"
-#include "xe_gt.h"
#include "xe_map.h"
#include "xe_pm.h"
#include "xe_uc_fw.h"
@@ -40,10 +39,14 @@ bool intel_hdcp_gsc_check_status(struct xe_device *xe)
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_gt *gt = tile->media_gt;
+ struct xe_gsc *gsc = &gt->uc.gsc;
bool ret = true;
- if (!xe_uc_fw_is_enabled(&gt->uc.gsc.fw))
+ if (!gsc && !xe_uc_fw_is_enabled(&gsc->fw)) {
+ drm_dbg_kms(&xe->drm,
+ "GSC Components not ready for HDCP2.x\n");
return false;
+ }
xe_pm_runtime_get(xe);
if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) {
@@ -53,7 +56,7 @@ bool intel_hdcp_gsc_check_status(struct xe_device *xe)
goto out;
}
- if (!xe_gsc_proxy_init_done(&gt->uc.gsc))
+ if (!xe_gsc_proxy_init_done(gsc))
ret = false;
xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 5eccd6abb3ef..a50ab9eae40a 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -18,6 +18,9 @@
#include "intel_frontbuffer.h"
#include "intel_plane_initial.h"
#include "xe_bo.h"
+#include "xe_wa.h"
+
+#include <generated/xe_wa_oob.h>
static bool
intel_reuse_initial_plane_obj(struct intel_crtc *this,
@@ -104,6 +107,9 @@ initial_plane_bo(struct xe_device *xe,
phys_base = base;
flags |= XE_BO_FLAG_STOLEN;
+ if (XE_WA(xe_root_mmio_gt(xe), 22019338487_display))
+ return NULL;
+
/*
* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index c38db2a74614..81b71903675e 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -104,6 +104,7 @@
#define CSFE_CHICKEN1(base) XE_REG((base) + 0xd4, XE_REG_OPTION_MASKED)
#define GHWSP_CSB_REPORT_DIS REG_BIT(15)
#define PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS REG_BIT(14)
+#define CS_PRIORITY_MEM_READ REG_BIT(7)
#define FF_SLICE_CS_CHICKEN1(base) XE_REG((base) + 0xe0, XE_REG_OPTION_MASKED)
#define FFSC_PERCTX_PREEMPT_CTRL REG_BIT(14)
diff --git a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
index e2a925be137c..7702364b65f1 100644
--- a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
@@ -32,8 +32,12 @@
#define HECI1_FWSTS1_CURRENT_STATE_RESET 0
#define HECI1_FWSTS1_PROXY_STATE_NORMAL 5
#define HECI1_FWSTS1_INIT_COMPLETE REG_BIT(9)
+#define HECI_FWSTS2(base) XE_REG((base) + 0xc48)
+#define HECI_FWSTS3(base) XE_REG((base) + 0xc60)
+#define HECI_FWSTS4(base) XE_REG((base) + 0xc64)
#define HECI_FWSTS5(base) XE_REG((base) + 0xc68)
#define HECI1_FWSTS5_HUC_AUTH_DONE REG_BIT(19)
+#define HECI_FWSTS6(base) XE_REG((base) + 0xc6c)
#define HECI_H_GS1(base) XE_REG((base) + 0xc4c)
#define HECI_H_GS1_ER_PREP REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 3c2865040058..660ff42e45a6 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -80,7 +80,10 @@
#define LE_CACHEABILITY_MASK REG_GENMASK(1, 0)
#define LE_CACHEABILITY(value) REG_FIELD_PREP(LE_CACHEABILITY_MASK, value)
-#define XE2_GAMREQSTRM_CTRL XE_REG(0x4194)
+#define STATELESS_COMPRESSION_CTRL XE_REG_MCR(0x4148)
+#define UNIFIED_COMPRESSION_FORMAT REG_GENMASK(3, 0)
+
+#define XE2_GAMREQSTRM_CTRL XE_REG_MCR(0x4194)
#define CG_DIS_CNTLBUS REG_BIT(6)
#define CCS_AUX_INV XE_REG(0x4208)
@@ -91,6 +94,8 @@
#define VE1_AUX_INV XE_REG(0x42b8)
#define AUX_INV REG_BIT(0)
+#define XE2_LMEM_CFG XE_REG(0x48b0)
+
#define XEHP_TILE_ADDR_RANGE(_idx) XE_REG_MCR(0x4900 + (_idx) * 4)
#define XEHP_FLAT_CCS_BASE_ADDR XE_REG_MCR(0x4910)
#define XEHP_FLAT_CCS_PTR REG_GENMASK(31, 8)
@@ -100,12 +105,14 @@
#define CHICKEN_RASTER_1 XE_REG_MCR(0x6204, XE_REG_OPTION_MASKED)
#define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8)
+#define DIS_CLIP_NEGATIVE_BOUNDING_BOX REG_BIT(6)
#define CHICKEN_RASTER_2 XE_REG_MCR(0x6208, XE_REG_OPTION_MASKED)
#define TBIMR_FAST_CLIP REG_BIT(5)
#define FF_MODE XE_REG_MCR(0x6210)
#define DIS_TE_AUTOSTRIP REG_BIT(31)
+#define VS_HIT_MAX_VALUE_MASK REG_GENMASK(25, 20)
#define DIS_MESH_PARTIAL_AUTOSTRIP REG_BIT(16)
#define DIS_MESH_AUTOSTRIP REG_BIT(15)
@@ -190,6 +197,7 @@
#define GSCPSMI_BASE XE_REG(0x880c)
#define CCCHKNREG1 XE_REG_MCR(0x8828)
+#define L3CMPCTRL REG_BIT(23)
#define ENCOMPPERFFIX REG_BIT(18)
/* Fuse readout registers for GT */
@@ -364,6 +372,9 @@
#define XEHP_L3NODEARBCFG XE_REG_MCR(0xb0b4)
#define XEHP_LNESPARE REG_BIT(19)
+#define L3SQCREG2 XE_REG_MCR(0xb104)
+#define COMPMEMRD256BOVRFETCHEN REG_BIT(20)
+
#define L3SQCREG3 XE_REG_MCR(0xb108)
#define COMPPWOVERFETCHEN REG_BIT(28)
@@ -403,6 +414,10 @@
#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12)
#define GLOBAL_INVALIDATION_MODE REG_BIT(2)
+#define LMEM_CFG XE_REG(0xcf58)
+#define LMEM_EN REG_BIT(31)
+#define LMTT_DIR_PTR REG_GENMASK(30, 0) /* in multiples of 64KB */
+
#define HALF_SLICE_CHICKEN5 XE_REG_MCR(0xe188, XE_REG_OPTION_MASKED)
#define DISABLE_SAMPLE_G_PERFORMANCE REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
index 1189f5a540a8..a9b0091cb7ee 100644
--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
@@ -52,6 +52,7 @@
#define OAG_OABUFFER_MEMORY_SELECT REG_BIT(0) /* 0: PPGTT, 1: GGTT */
#define OAG_OACONTROL XE_REG(0xdaf4)
+#define OAG_OACONTROL_OA_PES_DISAG_EN REG_GENMASK(27, 22)
#define OAG_OACONTROL_OA_CCS_SELECT_MASK REG_GENMASK(18, 16)
#define OAG_OACONTROL_OA_COUNTER_SEL_MASK REG_GENMASK(4, 2)
#define OAG_OACONTROL_OA_COUNTER_ENABLE REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index 23e33ec84902..dfa869f0dddd 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -15,8 +15,6 @@
#define GU_MISC_IRQ_OFFSET 0x444f0
#define GU_MISC_GSE REG_BIT(27)
-#define SOFTWARE_FLAGS_SPR33 XE_REG(0x4f084)
-
#define GU_CNTL_PROTECTED XE_REG(0x10100C)
#define DRIVERINT_FLR_DIS REG_BIT(31)
@@ -24,11 +22,14 @@
#define LMEM_INIT REG_BIT(7)
#define DRIVERFLR REG_BIT(31)
+#define XEHP_CLOCK_GATE_DIS XE_REG(0x101014)
+#define SGSI_SIDECLK_DIS REG_BIT(17)
+
#define GU_DEBUG XE_REG(0x101018)
#define DRIVERFLR_STATUS REG_BIT(31)
-#define XEHP_CLOCK_GATE_DIS XE_REG(0x101014)
-#define SGSI_SIDECLK_DIS REG_BIT(17)
+#define VIRTUAL_CTRL_REG XE_REG(0x10108c)
+#define GUEST_GTT_UPDATE_EN REG_BIT(8)
#define XEHP_MTCFG_ADDR XE_REG(0x101800)
#define TILE_COUNT REG_GENMASK(15, 8)
@@ -66,6 +67,9 @@
#define DISPLAY_IRQ REG_BIT(16)
#define GT_DW_IRQ(x) REG_BIT(x)
+#define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF)
+#define VF_CAP REG_BIT(0)
+
#define PVC_RP_STATE_CAP XE_REG(0x281014)
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h b/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
deleted file mode 100644
index 017b4ddd1ecf..000000000000
--- a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _REGS_XE_SRIOV_REGS_H_
-#define _REGS_XE_SRIOV_REGS_H_
-
-#include "regs/xe_reg_defs.h"
-
-#define XE2_LMEM_CFG XE_REG(0x48b0)
-
-#define LMEM_CFG XE_REG(0xcf58)
-#define LMEM_EN REG_BIT(31)
-#define LMTT_DIR_PTR REG_GENMASK(30, 0) /* in multiples of 64KB */
-
-#define VIRTUAL_CTRL_REG XE_REG(0x10108c)
-#define GUEST_GTT_UPDATE_EN REG_BIT(8)
-
-#define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF)
-#define VF_CAP REG_BIT(0)
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile
index 6e58931fddd4..0e3408f4952c 100644
--- a/drivers/gpu/drm/xe/tests/Makefile
+++ b/drivers/gpu/drm/xe/tests/Makefile
@@ -2,11 +2,7 @@
# "live" kunit tests
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_live_test.o
-xe_live_test-y = xe_live_test_mod.o \
- xe_bo_test.o \
- xe_dma_buf_test.o \
- xe_migrate_test.o \
- xe_mocs_test.o
+xe_live_test-y = xe_live_test_mod.o
# Normal kunit tests
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_test.o
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 9f3c02826464..8dac069483e8 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -6,7 +6,7 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_bo_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
@@ -36,7 +36,8 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
/* Optionally clear bo *and* CCS data in VRAM. */
if (clear) {
- fence = xe_migrate_clear(tile->migrate, bo, bo->ttm.resource);
+ fence = xe_migrate_clear(tile->migrate, bo, bo->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
if (IS_ERR(fence)) {
KUNIT_FAIL(test, "Failed to submit bo clear.\n");
return PTR_ERR(fence);
@@ -124,7 +125,7 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
kunit_info(test, "Testing system memory\n");
bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device, bo_flags);
+ bo_flags);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "Failed to create bo.\n");
return;
@@ -154,12 +155,18 @@ out_unlock:
static int ccs_test_run_device(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct xe_tile *tile;
int id;
if (!xe_device_has_flat_ccs(xe)) {
- kunit_info(test, "Skipping non-flat-ccs device.\n");
+ kunit_skip(test, "non-flat-ccs device\n");
+ return 0;
+ }
+
+ /* For xe2+ dgfx, we don't handle ccs metadata */
+ if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) {
+ kunit_skip(test, "xe2+ dgfx device\n");
return 0;
}
@@ -177,11 +184,12 @@ static int ccs_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_ccs_migrate_kunit(struct kunit *test)
+static void xe_ccs_migrate_kunit(struct kunit *test)
{
- xe_call_for_each_device(ccs_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ ccs_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit);
static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test)
{
@@ -198,7 +206,6 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
xe_vm_lock(vm, false);
bo = xe_bo_create_user(xe, NULL, vm, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device,
bo_flags);
xe_vm_unlock(vm);
if (IS_ERR(bo)) {
@@ -208,7 +215,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
external = xe_bo_create_user(xe, NULL, NULL, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device, bo_flags);
+ bo_flags);
if (IS_ERR(external)) {
KUNIT_FAIL(test, "external bo create err=%pe\n", external);
goto cleanup_bo;
@@ -325,13 +332,12 @@ cleanup_bo:
static int evict_test_run_device(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct xe_tile *tile;
int id;
if (!IS_DGFX(xe)) {
- kunit_info(test, "Skipping non-discrete device %s.\n",
- dev_name(xe->drm.dev));
+ kunit_skip(test, "non-discrete device\n");
return 0;
}
@@ -345,8 +351,23 @@ static int evict_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_bo_evict_kunit(struct kunit *test)
+static void xe_bo_evict_kunit(struct kunit *test)
{
- xe_call_for_each_device(evict_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ evict_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_bo_evict_kunit);
+
+static struct kunit_case xe_bo_tests[] = {
+ KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_bo_test_suite = {
+ .name = "xe_bo",
+ .test_cases = xe_bo_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_bo_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.c b/drivers/gpu/drm/xe/tests/xe_bo_test.c
deleted file mode 100644
index a324cde77db8..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_bo_test.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_bo_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_bo_tests[] = {
- KUNIT_CASE(xe_ccs_migrate_kunit),
- KUNIT_CASE(xe_bo_evict_kunit),
- {}
-};
-
-static struct kunit_suite xe_bo_test_suite = {
- .name = "xe_bo",
- .test_cases = xe_bo_tests,
-};
-
-kunit_test_suite(xe_bo_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.h b/drivers/gpu/drm/xe/tests/xe_bo_test.h
deleted file mode 100644
index 0113ab45066a..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_bo_test.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_BO_TEST_H_
-#define _XE_BO_TEST_H_
-
-struct kunit;
-
-void xe_ccs_migrate_kunit(struct kunit *test);
-void xe_bo_evict_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index e7f9b531c465..cedd3e88a6fb 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -3,12 +3,12 @@
* Copyright © 2022 Intel Corporation
*/
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_dma_buf_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "xe_pci.h"
@@ -107,7 +107,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
struct drm_gem_object *import;
struct dma_buf *dmabuf;
@@ -126,7 +126,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
kunit_info(test, "running %s\n", __func__);
bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device, params->mem_mask);
+ params->mem_mask);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(bo));
@@ -258,7 +258,7 @@ static const struct dma_buf_test_params test_params[] = {
static int dma_buf_run_device(struct xe_device *xe)
{
const struct dma_buf_test_params *params;
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
xe_pm_runtime_get(xe);
for (params = test_params; params->mem_mask; ++params) {
@@ -274,8 +274,22 @@ static int dma_buf_run_device(struct xe_device *xe)
return 0;
}
-void xe_dma_buf_kunit(struct kunit *test)
+static void xe_dma_buf_kunit(struct kunit *test)
{
- xe_call_for_each_device(dma_buf_run_device);
+ struct xe_device *xe = test->priv;
+
+ dma_buf_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_kunit);
+
+static struct kunit_case xe_dma_buf_tests[] = {
+ KUNIT_CASE_PARAM(xe_dma_buf_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_dma_buf_test_suite = {
+ .name = "xe_dma_buf",
+ .test_cases = xe_dma_buf_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
deleted file mode 100644
index 99cdb718b6c6..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_dma_buf_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_dma_buf_tests[] = {
- KUNIT_CASE(xe_dma_buf_kunit),
- {}
-};
-
-static struct kunit_suite xe_dma_buf_test_suite = {
- .name = "xe_dma_buf",
- .test_cases = xe_dma_buf_tests,
-};
-
-kunit_test_suite(xe_dma_buf_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h
deleted file mode 100644
index e6b464ddd526..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_DMA_BUF_TEST_H_
-#define _XE_DMA_BUF_TEST_H_
-
-struct kunit;
-
-void xe_dma_buf_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
index fefe79b3b75a..bc5156966ce9 100644
--- a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
+++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
@@ -12,7 +12,9 @@
#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
+#include "xe_device.h"
#include "xe_device_types.h"
+#include "xe_pm.h"
/**
* xe_kunit_helper_alloc_xe_device - Allocate a &xe_device for a KUnit test.
@@ -88,3 +90,40 @@ int xe_kunit_helper_xe_device_test_init(struct kunit *test)
return 0;
}
EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_test_init);
+
+KUNIT_DEFINE_ACTION_WRAPPER(put_xe_pm_runtime, xe_pm_runtime_put, struct xe_device *);
+
+/**
+ * xe_kunit_helper_xe_device_live_test_init - Prepare a &xe_device for
+ * use in a live KUnit test.
+ * @test: the &kunit where live &xe_device will be used
+ *
+ * This function expects pointer to the &xe_device in the &test.param_value,
+ * like it is prepared by the &xe_pci_live_device_gen_param and stores that
+ * pointer as &kunit.priv to allow the test code to access it.
+ *
+ * This function makes sure that device is not wedged and then resumes it
+ * to avoid waking up the device inside the test. It uses deferred cleanup
+ * action to release a runtime_pm reference.
+ *
+ * This function can be used as custom implementation of &kunit_suite.init.
+ *
+ * This function uses KUNIT_ASSERT to detect any failures.
+ *
+ * Return: Always 0.
+ */
+int xe_kunit_helper_xe_device_live_test_init(struct kunit *test)
+{
+ struct xe_device *xe = xe_device_const_cast(test->param_value);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
+ kunit_info(test, "running on %s device\n", xe->info.platform_name);
+
+ KUNIT_ASSERT_FALSE(test, xe_device_wedged(xe));
+ xe_pm_runtime_get(xe);
+ KUNIT_ASSERT_EQ(test, 0, kunit_add_action_or_reset(test, put_xe_pm_runtime, xe));
+
+ test->priv = xe;
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_live_test_init);
diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
index 067a1babf049..83665f7b1254 100644
--- a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
+++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
@@ -14,4 +14,6 @@ struct xe_device *xe_kunit_helper_alloc_xe_device(struct kunit *test,
struct device *dev);
int xe_kunit_helper_xe_device_test_init(struct kunit *test);
+int xe_kunit_helper_xe_device_live_test_init(struct kunit *test);
+
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
index eb1ea99a5a8b..5f14737c8210 100644
--- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
+++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
@@ -3,6 +3,17 @@
* Copyright © 2023 Intel Corporation
*/
#include <linux/module.h>
+#include <kunit/test.h>
+
+extern struct kunit_suite xe_bo_test_suite;
+extern struct kunit_suite xe_dma_buf_test_suite;
+extern struct kunit_suite xe_migrate_test_suite;
+extern struct kunit_suite xe_mocs_test_suite;
+
+kunit_test_suite(xe_bo_test_suite);
+kunit_test_suite(xe_dma_buf_test_suite);
+kunit_test_suite(xe_migrate_test_suite);
+kunit_test_suite(xe_mocs_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 962f6438e219..1a192a2a941b 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -6,7 +6,7 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_migrate_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "xe_pci.h"
@@ -105,7 +105,8 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
}
xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
- fence = xe_migrate_clear(m, remote, remote->ttm.resource);
+ fence = xe_migrate_clear(m, remote, remote->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" :
"Clearing remote small bo", test)) {
retval = xe_map_rd(xe, &remote->vmap, 0, u64);
@@ -279,7 +280,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
kunit_info(test, "Clearing small buffer object\n");
xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size);
expected = 0;
- fence = xe_migrate_clear(m, tiny, tiny->ttm.resource);
+ fence = xe_migrate_clear(m, tiny, tiny->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
if (sanity_fence_failed(xe, fence, "Clearing small bo", test))
goto out;
@@ -300,7 +302,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
kunit_info(test, "Clearing big buffer object\n");
xe_map_memset(xe, &big->vmap, 0, 0x11, big->size);
expected = 0;
- fence = xe_migrate_clear(m, big, big->ttm.resource);
+ fence = xe_migrate_clear(m, big, big->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
if (sanity_fence_failed(xe, fence, "Clearing big bo", test))
goto out;
@@ -334,7 +337,7 @@ vunmap:
static int migrate_test_run_device(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct xe_tile *tile;
int id;
@@ -354,8 +357,425 @@ static int migrate_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_migrate_sanity_kunit(struct kunit *test)
+static void xe_migrate_sanity_kunit(struct kunit *test)
{
- xe_call_for_each_device(migrate_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ migrate_test_run_device(xe);
+}
+
+static struct dma_fence *blt_copy(struct xe_tile *tile,
+ struct xe_bo *src_bo, struct xe_bo *dst_bo,
+ bool copy_only_ccs, const char *str, struct kunit *test)
+{
+ struct xe_gt *gt = tile->primary_gt;
+ struct xe_migrate *m = tile->migrate;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct dma_fence *fence = NULL;
+ u64 size = src_bo->size;
+ struct xe_res_cursor src_it, dst_it;
+ struct ttm_resource *src = src_bo->ttm.resource, *dst = dst_bo->ttm.resource;
+ u64 src_L0_ofs, dst_L0_ofs;
+ u32 src_L0_pt, dst_L0_pt;
+ u64 src_L0, dst_L0;
+ int err;
+ bool src_is_vram = mem_type_is_vram(src->mem_type);
+ bool dst_is_vram = mem_type_is_vram(dst->mem_type);
+
+ if (!src_is_vram)
+ xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
+ else
+ xe_res_first(src, 0, size, &src_it);
+
+ if (!dst_is_vram)
+ xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
+ else
+ xe_res_first(dst, 0, size, &dst_it);
+
+ while (size) {
+ u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
+ struct xe_sched_job *job;
+ struct xe_bb *bb;
+ u32 flush_flags = 0;
+ u32 update_idx;
+ u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+ u32 pte_flags;
+
+ src_L0 = xe_migrate_res_sizes(m, &src_it);
+ dst_L0 = xe_migrate_res_sizes(m, &dst_it);
+
+ src_L0 = min(src_L0, dst_L0);
+
+ pte_flags = src_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
+ PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
+ batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
+ &src_L0_ofs, &src_L0_pt, 0, 0,
+ avail_pts);
+
+ pte_flags = dst_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
+ PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
+ batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
+ &dst_L0_ofs, &dst_L0_pt, 0,
+ avail_pts, avail_pts);
+
+ /* Add copy commands size here */
+ batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
+ ((xe_device_has_flat_ccs(xe) && copy_only_ccs) ? EMIT_COPY_CCS_DW : 0);
+
+ bb = xe_bb_new(gt, batch_size, xe->info.has_usm);
+ if (IS_ERR(bb)) {
+ err = PTR_ERR(bb);
+ goto err_sync;
+ }
+
+ if (src_is_vram)
+ xe_res_next(&src_it, src_L0);
+ else
+ emit_pte(m, bb, src_L0_pt, src_is_vram, false,
+ &src_it, src_L0, src);
+
+ if (dst_is_vram)
+ xe_res_next(&dst_it, src_L0);
+ else
+ emit_pte(m, bb, dst_L0_pt, dst_is_vram, false,
+ &dst_it, src_L0, dst);
+
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ update_idx = bb->len;
+ if (!copy_only_ccs)
+ emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
+
+ if (copy_only_ccs)
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
+ src_is_vram, dst_L0_ofs,
+ dst_is_vram, src_L0, dst_L0_ofs,
+ copy_only_ccs);
+
+ job = xe_bb_create_migration_job(m->q, bb,
+ xe_migrate_batch_base(m, xe->info.has_usm),
+ update_idx);
+ if (IS_ERR(job)) {
+ err = PTR_ERR(job);
+ goto err;
+ }
+
+ xe_sched_job_add_migrate_flush(job, flush_flags);
+
+ mutex_lock(&m->job_mutex);
+ xe_sched_job_arm(job);
+ dma_fence_put(fence);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ dma_fence_put(m->fence);
+ m->fence = dma_fence_get(fence);
+
+ mutex_unlock(&m->job_mutex);
+
+ xe_bb_free(bb, fence);
+ size -= src_L0;
+ continue;
+
+err:
+ xe_bb_free(bb, NULL);
+
+err_sync:
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+ return ERR_PTR(err);
+ }
+
+ return fence;
+}
+
+static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo,
+ struct kunit *test)
+{
+ struct dma_fence *fence;
+ u64 expected, retval;
+ long timeout;
+ long ret;
+
+ expected = 0xd0d0d0d0d0d0d0d0;
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+
+ fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
+ if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ if (retval == expected)
+ KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
+ }
+ dma_fence_put(fence);
+
+ kunit_info(test, "Evict vram buffer object\n");
+ ret = xe_bo_evict(vram_bo, true);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to evict bo.\n");
+ return;
+ }
+
+ ret = xe_bo_vmap(vram_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
+ return;
+ }
+
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ check(retval, expected, "Clear evicted vram data first value", test);
+ retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ check(retval, expected, "Clear evicted vram data last value", test);
+
+ fence = blt_copy(tile, vram_bo, ccs_bo,
+ true, "Blit surf copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
+ retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
+ check(retval, 0, "Clear ccs data first value", test);
+
+ retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ check(retval, 0, "Clear ccs data last value", test);
+ }
+ dma_fence_put(fence);
+
+ kunit_info(test, "Restore vram buffer object\n");
+ ret = xe_bo_validate(vram_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
+ return;
+ }
+
+ /* Sync all migration blits */
+ timeout = dma_resv_wait_timeout(vram_bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL,
+ true,
+ 5 * HZ);
+ if (timeout <= 0) {
+ KUNIT_FAIL(test, "Failed to sync bo eviction.\n");
+ return;
+ }
+
+ ret = xe_bo_vmap(vram_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
+ return;
+ }
+
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ check(retval, expected, "Restored value must be equal to initial value", test);
+ retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ check(retval, expected, "Restored value must be equal to initial value", test);
+
+ fence = blt_copy(tile, vram_bo, ccs_bo,
+ true, "Blit surf copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
+ retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
+ check(retval, 0, "Clear ccs data first value", test);
+ retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ check(retval, 0, "Clear ccs data last value", test);
+ }
+ dma_fence_put(fence);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_migrate_sanity_kunit);
+
+static void test_clear(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct kunit *test)
+{
+ struct dma_fence *fence;
+ u64 expected, retval;
+
+ expected = 0xd0d0d0d0d0d0d0d0;
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+
+ fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
+ if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ if (retval == expected)
+ KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
+ }
+ dma_fence_put(fence);
+
+ fence = blt_copy(tile, vram_bo, sys_bo, false, "Blit copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Blit copy from vram to sysmem", test)) {
+ retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
+ check(retval, expected, "Decompressed value must be equal to initial value", test);
+ retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ check(retval, expected, "Decompressed value must be equal to initial value", test);
+ }
+ dma_fence_put(fence);
+
+ kunit_info(test, "Clear vram buffer object\n");
+ expected = 0x0000000000000000;
+ fence = xe_migrate_clear(tile->migrate, vram_bo, vram_bo->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
+ if (sanity_fence_failed(xe, fence, "Clear vram_bo", test))
+ return;
+ dma_fence_put(fence);
+
+ fence = blt_copy(tile, vram_bo, sys_bo,
+ false, "Blit copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear main buffer data", test)) {
+ retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
+ check(retval, expected, "Clear main buffer first value", test);
+ retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ check(retval, expected, "Clear main buffer last value", test);
+ }
+ dma_fence_put(fence);
+
+ fence = blt_copy(tile, vram_bo, sys_bo,
+ true, "Blit surf copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
+ retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
+ check(retval, expected, "Clear ccs data first value", test);
+ retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ check(retval, expected, "Clear ccs data last value", test);
+ }
+ dma_fence_put(fence);
+}
+
+static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
+ struct kunit *test)
+{
+ struct xe_bo *sys_bo, *vram_bo = NULL, *ccs_bo = NULL;
+ unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
+ long ret;
+
+ sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ DRM_XE_GEM_CPU_CACHING_WC,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+
+ if (IS_ERR(sys_bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(sys_bo));
+ return;
+ }
+
+ xe_bo_lock(sys_bo, false);
+ ret = xe_bo_validate(sys_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
+ goto free_sysbo;
+ }
+
+ ret = xe_bo_vmap(sys_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
+ goto free_sysbo;
+ }
+ xe_bo_unlock(sys_bo);
+
+ ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ DRM_XE_GEM_CPU_CACHING_WC,
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+
+ if (IS_ERR(ccs_bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(ccs_bo));
+ return;
+ }
+
+ xe_bo_lock(ccs_bo, false);
+ ret = xe_bo_validate(ccs_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
+ goto free_ccsbo;
+ }
+
+ ret = xe_bo_vmap(ccs_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
+ goto free_ccsbo;
+ }
+ xe_bo_unlock(ccs_bo);
+
+ vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ DRM_XE_GEM_CPU_CACHING_WC,
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ if (IS_ERR(vram_bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(vram_bo));
+ return;
+ }
+
+ xe_bo_lock(vram_bo, false);
+ ret = xe_bo_validate(vram_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
+ goto free_vrambo;
+ }
+
+ ret = xe_bo_vmap(vram_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
+ goto free_vrambo;
+ }
+
+ test_clear(xe, tile, sys_bo, vram_bo, test);
+ test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, test);
+ xe_bo_unlock(vram_bo);
+
+ xe_bo_lock(vram_bo, false);
+ xe_bo_vunmap(vram_bo);
+ xe_bo_unlock(vram_bo);
+
+ xe_bo_lock(ccs_bo, false);
+ xe_bo_vunmap(ccs_bo);
+ xe_bo_unlock(ccs_bo);
+
+ xe_bo_lock(sys_bo, false);
+ xe_bo_vunmap(sys_bo);
+ xe_bo_unlock(sys_bo);
+free_vrambo:
+ xe_bo_put(vram_bo);
+free_ccsbo:
+ xe_bo_put(ccs_bo);
+free_sysbo:
+ xe_bo_put(sys_bo);
+}
+
+static int validate_ccs_test_run_device(struct xe_device *xe)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct xe_tile *tile;
+ int id;
+
+ if (!xe_device_has_flat_ccs(xe)) {
+ kunit_skip(test, "non-flat-ccs device\n");
+ return 0;
+ }
+
+ if (!(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))) {
+ kunit_skip(test, "non-xe2 discrete device\n");
+ return 0;
+ }
+
+ xe_pm_runtime_get(xe);
+
+ for_each_tile(tile, xe, id)
+ validate_ccs_test_run_tile(xe, tile, test);
+
+ xe_pm_runtime_put(xe);
+
+ return 0;
+}
+
+static void xe_validate_ccs_kunit(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+
+ validate_ccs_test_run_device(xe);
+}
+
+static struct kunit_case xe_migrate_tests[] = {
+ KUNIT_CASE_PARAM(xe_migrate_sanity_kunit, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_validate_ccs_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_migrate_test_suite = {
+ .name = "xe_migrate",
+ .test_cases = xe_migrate_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_migrate_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.c b/drivers/gpu/drm/xe/tests/xe_migrate_test.c
deleted file mode 100644
index eb0d8963419c..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_migrate_test.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_migrate_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_migrate_tests[] = {
- KUNIT_CASE(xe_migrate_sanity_kunit),
- {}
-};
-
-static struct kunit_suite xe_migrate_test_suite = {
- .name = "xe_migrate",
- .test_cases = xe_migrate_tests,
-};
-
-kunit_test_suite(xe_migrate_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.h b/drivers/gpu/drm/xe/tests/xe_migrate_test.h
deleted file mode 100644
index 7c645c66824f..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_migrate_test.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_MIGRATE_TEST_H_
-#define _XE_MIGRATE_TEST_H_
-
-struct kunit;
-
-void xe_migrate_sanity_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 67c65e88c384..79be73b4a02b 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -6,7 +6,7 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_mocs_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
@@ -23,7 +23,7 @@ struct live_mocs {
static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt)
{
unsigned int flags;
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
memset(arg, 0, sizeof(*arg));
@@ -41,7 +41,7 @@ static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt)
static void read_l3cc_table(struct xe_gt *gt,
const struct xe_mocs_info *info)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u32 l3cc, l3cc_expected;
unsigned int i;
u32 reg_val;
@@ -78,7 +78,7 @@ static void read_l3cc_table(struct xe_gt *gt,
static void read_mocs_table(struct xe_gt *gt,
const struct xe_mocs_info *info)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u32 mocs, mocs_expected;
unsigned int i;
u32 reg_val;
@@ -134,11 +134,15 @@ static int mocs_kernel_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_live_mocs_kernel_kunit(struct kunit *test)
+static void xe_live_mocs_kernel_kunit(struct kunit *test)
{
- xe_call_for_each_device(mocs_kernel_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ if (IS_SRIOV_VF(xe))
+ kunit_skip(test, "this test is N/A for VF");
+
+ mocs_kernel_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_kernel_kunit);
static int mocs_reset_test_run_device(struct xe_device *xe)
{
@@ -148,7 +152,7 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
struct xe_gt *gt;
unsigned int flags;
int id;
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
xe_pm_runtime_get(xe);
@@ -175,8 +179,26 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_live_mocs_reset_kunit(struct kunit *test)
+static void xe_live_mocs_reset_kunit(struct kunit *test)
{
- xe_call_for_each_device(mocs_reset_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ if (IS_SRIOV_VF(xe))
+ kunit_skip(test, "this test is N/A for VF");
+
+ mocs_reset_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_reset_kunit);
+
+static struct kunit_case xe_mocs_tests[] = {
+ KUNIT_CASE_PARAM(xe_live_mocs_kernel_kunit, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_live_mocs_reset_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_mocs_test_suite = {
+ .name = "xe_mocs",
+ .test_cases = xe_mocs_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_mocs_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
deleted file mode 100644
index 6315886b659e..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_mocs_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_mocs_tests[] = {
- KUNIT_CASE(xe_live_mocs_kernel_kunit),
- KUNIT_CASE(xe_live_mocs_reset_kunit),
- {}
-};
-
-static struct kunit_suite xe_mocs_test_suite = {
- .name = "xe_mocs",
- .test_cases = xe_mocs_tests,
-};
-
-kunit_test_suite(xe_mocs_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.h b/drivers/gpu/drm/xe/tests/xe_mocs_test.h
deleted file mode 100644
index e7699d495411..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_MOCS_TEST_H_
-#define _XE_MOCS_TEST_H_
-
-struct kunit;
-
-void xe_live_mocs_kernel_kunit(struct kunit *test);
-void xe_live_mocs_reset_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index f62809ca8b51..67404863087e 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -12,58 +12,6 @@
#include <kunit/test-bug.h>
#include <kunit/visibility.h>
-struct kunit_test_data {
- int ndevs;
- xe_device_fn xe_fn;
-};
-
-static int dev_to_xe_device_fn(struct device *dev, void *__data)
-
-{
- struct drm_device *drm = dev_get_drvdata(dev);
- struct kunit_test_data *data = __data;
- int ret = 0;
- int idx;
-
- data->ndevs++;
-
- if (drm_dev_enter(drm, &idx))
- ret = data->xe_fn(to_xe_device(dev_get_drvdata(dev)));
- drm_dev_exit(idx);
-
- return ret;
-}
-
-/**
- * xe_call_for_each_device - Iterate over all devices this driver binds to
- * @xe_fn: Function to call for each device.
- *
- * This function iterated over all devices this driver binds to, and calls
- * @xe_fn: for each one of them. If the called function returns anything else
- * than 0, iteration is stopped and the return value is returned by this
- * function. Across each function call, drm_dev_enter() / drm_dev_exit() is
- * called for the corresponding drm device.
- *
- * Return: Number of devices iterated or
- * the error code of a call to @xe_fn returning an error code.
- */
-int xe_call_for_each_device(xe_device_fn xe_fn)
-{
- int ret;
- struct kunit_test_data data = {
- .xe_fn = xe_fn,
- .ndevs = 0,
- };
-
- ret = driver_for_each_device(&xe_pci_driver.driver, NULL,
- &data, dev_to_xe_device_fn);
-
- if (!data.ndevs)
- kunit_skip(current->kunit_test, "test runs only on hardware\n");
-
- return ret ?: data.ndevs;
-}
-
/**
* xe_call_for_each_graphics_ip - Iterate over all recognized graphics IPs
* @xe_fn: Function to call for each device.
@@ -167,3 +115,33 @@ done:
return 0;
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init);
+
+/**
+ * xe_pci_live_device_gen_param - Helper to iterate Xe devices as KUnit parameters
+ * @prev: the previously returned value, or NULL for the first iteration
+ * @desc: the buffer for a parameter name
+ *
+ * Iterates over the available Xe devices on the system. Uses the device name
+ * as the parameter name.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next &struct xe_device ready to be used as a parameter
+ * or NULL if there are no more Xe devices on the system.
+ */
+const void *xe_pci_live_device_gen_param(const void *prev, char *desc)
+{
+ const struct xe_device *xe = prev;
+ struct device *dev = xe ? xe->drm.dev : NULL;
+ struct device *next;
+
+ next = driver_find_next_device(&xe_pci_driver.driver, dev);
+ if (dev)
+ put_device(dev);
+ if (!next)
+ return NULL;
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", dev_name(next));
+ return pdev_to_xe_device(to_pci_dev(next));
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_live_device_gen_param);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
index a6705a536391..744a37583d2d 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -16,7 +16,7 @@
static void check_graphics_ip(const struct xe_graphics_desc *graphics)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u64 mask = graphics->hw_engine_mask;
/* RCS, CCS, and BCS engines are allowed on the graphics IP */
@@ -30,7 +30,7 @@ static void check_graphics_ip(const struct xe_graphics_desc *graphics)
static void check_media_ip(const struct xe_media_desc *media)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u64 mask = media->hw_engine_mask;
/* VCS, VECS and GSCCS engines are allowed on the media IP */
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
index f40dcec83992..ede46800aff1 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -19,7 +19,6 @@ typedef int (*xe_device_fn)(struct xe_device *);
typedef void (*xe_graphics_fn)(const struct xe_graphics_desc *);
typedef void (*xe_media_fn)(const struct xe_media_desc *);
-int xe_call_for_each_device(xe_device_fn xe_fn);
void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn);
void xe_call_for_each_media_ip(xe_media_fn xe_fn);
@@ -35,4 +34,6 @@ struct xe_pci_fake_data {
int xe_pci_fake_device_init(struct xe_device *xe);
+const void *xe_pci_live_device_gen_param(const void *prev, char *desc);
+
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index f217445c246a..36a3b5420fef 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -31,16 +31,23 @@
#undef XE_REG_MCR
#define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1)
-struct rtp_test_case {
+struct rtp_to_sr_test_case {
const char *name;
struct xe_reg expected_reg;
u32 expected_set_bits;
u32 expected_clr_bits;
- unsigned long expected_count;
+ unsigned long expected_count_sr_entries;
unsigned int expected_sr_errors;
+ unsigned long expected_active;
const struct xe_rtp_entry_sr *entries;
};
+struct rtp_test_case {
+ const char *name;
+ unsigned long expected_active;
+ const struct xe_rtp_entry *entries;
+};
+
static bool match_yes(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
{
return true;
@@ -51,13 +58,14 @@ static bool match_no(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
return false;
}
-static const struct rtp_test_case cases[] = {
+static const struct rtp_to_sr_test_case rtp_to_sr_cases[] = {
{
.name = "coalesce-same-reg",
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0) | REG_BIT(1),
.expected_clr_bits = REG_BIT(0) | REG_BIT(1),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
/* Different bits on the same register: create a single entry */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -76,7 +84,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0),
+ .expected_count_sr_entries = 1,
/* Don't coalesce second entry since rules don't match */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -95,7 +104,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2),
.expected_clr_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1) | BIT(2),
+ .expected_count_sr_entries = 1,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("first"),
XE_RTP_RULES(FUNC(match_yes), OR, FUNC(match_no)),
@@ -121,7 +131,7 @@ static const struct rtp_test_case cases[] = {
{
.name = "match-or-xfail",
.expected_reg = REGULAR_REG1,
- .expected_count = 0,
+ .expected_count_sr_entries = 0,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("leading-or"),
XE_RTP_RULES(OR, FUNC(match_yes)),
@@ -148,7 +158,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0),
+ .expected_count_sr_entries = 1,
/* Don't coalesce second entry due to one of the rules */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -167,7 +178,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 2,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 2,
/* Same bits on different registers are not coalesced */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -186,7 +198,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(1) | REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
/* Check clr vs set actions on different bits */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -207,7 +220,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = TEMP_FIELD,
.expected_clr_bits = TEMP_MASK,
- .expected_count = 1,
+ .expected_active = BIT(0),
+ .expected_count_sr_entries = 1,
/* Check FIELD_SET works */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -225,7 +239,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
.expected_sr_errors = 1,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -245,7 +260,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
.expected_sr_errors = 1,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -265,7 +281,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1) | BIT(2),
+ .expected_count_sr_entries = 1,
.expected_sr_errors = 2,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -287,28 +304,35 @@ static const struct rtp_test_case cases[] = {
},
};
-static void xe_rtp_process_tests(struct kunit *test)
+static void xe_rtp_process_to_sr_tests(struct kunit *test)
{
- const struct rtp_test_case *param = test->param_value;
+ const struct rtp_to_sr_test_case *param = test->param_value;
struct xe_device *xe = test->priv;
struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt;
struct xe_reg_sr *reg_sr = &gt->reg_sr;
const struct xe_reg_sr_entry *sre, *sr_entry = NULL;
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
- unsigned long idx, count = 0;
+ unsigned long idx, count_sr_entries = 0, count_rtp_entries = 0, active = 0;
+
+ xe_reg_sr_init(reg_sr, "xe_rtp_to_sr_tests", xe);
+
+ while (param->entries[count_rtp_entries].rules)
+ count_rtp_entries++;
- xe_reg_sr_init(reg_sr, "xe_rtp_tests", xe);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries);
xe_rtp_process_to_sr(&ctx, param->entries, reg_sr);
xa_for_each(&reg_sr->xa, idx, sre) {
if (idx == param->expected_reg.addr)
sr_entry = sre;
- count++;
+ count_sr_entries++;
}
- KUNIT_EXPECT_EQ(test, count, param->expected_count);
- if (count) {
+ KUNIT_EXPECT_EQ(test, active, param->expected_active);
+
+ KUNIT_EXPECT_EQ(test, count_sr_entries, param->expected_count_sr_entries);
+ if (count_sr_entries) {
KUNIT_EXPECT_EQ(test, sr_entry->clr_bits, param->expected_clr_bits);
KUNIT_EXPECT_EQ(test, sr_entry->set_bits, param->expected_set_bits);
KUNIT_EXPECT_EQ(test, sr_entry->reg.raw, param->expected_reg.raw);
@@ -319,12 +343,162 @@ static void xe_rtp_process_tests(struct kunit *test)
KUNIT_EXPECT_EQ(test, reg_sr->errors, param->expected_sr_errors);
}
+/*
+ * Entries below follow the logic used with xe_wa_oob.rules:
+ * 1) Entries with empty name are OR'ed: all entries marked active since the
+ * last entry with a name
+ * 2) There are no action associated with rules
+ */
+static const struct rtp_test_case rtp_cases[] = {
+ {
+ .name = "active1",
+ .expected_active = BIT(0),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "active2",
+ .expected_active = BIT(0) | BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ { XE_RTP_NAME("r2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "active-inactive",
+ .expected_active = BIT(0),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ { XE_RTP_NAME("r2"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-active",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-1st_or_active-inactive",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_yes), OR,
+ FUNC(match_no), OR,
+ FUNC(match_no)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-2nd_or_active-inactive",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_no), OR,
+ FUNC(match_yes), OR,
+ FUNC(match_no)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-last_or_active-inactive",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_no), OR,
+ FUNC(match_no), OR,
+ FUNC(match_yes)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-no_or_active-inactive",
+ .expected_active = 0,
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_no), OR,
+ FUNC(match_no), OR,
+ FUNC(match_no)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+};
+
+static void xe_rtp_process_tests(struct kunit *test)
+{
+ const struct rtp_test_case *param = test->param_value;
+ struct xe_device *xe = test->priv;
+ struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt;
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
+ unsigned long count_rtp_entries = 0, active = 0;
+
+ while (param->entries[count_rtp_entries].rules)
+ count_rtp_entries++;
+
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries);
+ xe_rtp_process(&ctx, param->entries);
+
+ KUNIT_EXPECT_EQ(test, active, param->expected_active);
+}
+
+static void rtp_to_sr_desc(const struct rtp_to_sr_test_case *t, char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(rtp_to_sr, rtp_to_sr_cases, rtp_to_sr_desc);
+
static void rtp_desc(const struct rtp_test_case *t, char *desc)
{
strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
}
-KUNIT_ARRAY_PARAM(rtp, cases, rtp_desc);
+KUNIT_ARRAY_PARAM(rtp, rtp_cases, rtp_desc);
static int xe_rtp_test_init(struct kunit *test)
{
@@ -357,6 +531,7 @@ static void xe_rtp_test_exit(struct kunit *test)
}
static struct kunit_case xe_rtp_tests[] = {
+ KUNIT_CASE_PARAM(xe_rtp_process_to_sr_tests, rtp_to_sr_gen_params),
KUNIT_CASE_PARAM(xe_rtp_process_tests, rtp_gen_params),
{}
};
diff --git a/drivers/gpu/drm/xe/tests/xe_test.h b/drivers/gpu/drm/xe/tests/xe_test.h
index 7a1ae213e750..9c23ad9dba8d 100644
--- a/drivers/gpu/drm/xe/tests/xe_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_test.h
@@ -9,8 +9,8 @@
#include <linux/types.h>
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
-#include <linux/sched.h>
#include <kunit/test.h>
+#include <kunit/test-bug.h>
/*
* Each test that provides a kunit private test structure, place a test id
@@ -31,8 +31,6 @@ struct xe_test_priv {
#define XE_TEST_DECLARE(x) x
#define XE_TEST_ONLY(x) unlikely(x)
-#define XE_TEST_EXPORT
-#define xe_cur_kunit() current->kunit_test
/**
* xe_cur_kunit_priv - Obtain the struct xe_test_priv pointed to by
@@ -48,10 +46,10 @@ xe_cur_kunit_priv(enum xe_test_priv_id id)
{
struct xe_test_priv *priv;
- if (!xe_cur_kunit())
+ if (!kunit_get_current_test())
return NULL;
- priv = xe_cur_kunit()->priv;
+ priv = kunit_get_current_test()->priv;
return priv->id == id ? priv : NULL;
}
@@ -59,8 +57,6 @@ xe_cur_kunit_priv(enum xe_test_priv_id id)
#define XE_TEST_DECLARE(x)
#define XE_TEST_ONLY(x) 0
-#define XE_TEST_EXPORT static
-#define xe_cur_kunit() NULL
#define xe_cur_kunit_priv(_id) NULL
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
index 9d0c715142b9..c96d1fe34151 100644
--- a/drivers/gpu/drm/xe/tests/xe_wa_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -74,6 +74,7 @@ static const struct platform_test_case cases[] = {
GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
+ GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1),
};
static void platform_desc(const struct platform_test_case *t, char *desc)
diff --git a/drivers/gpu/drm/xe/xe_assert.h b/drivers/gpu/drm/xe/xe_assert.h
index 8b0cc1bc9327..e22bbf57fca7 100644
--- a/drivers/gpu/drm/xe/xe_assert.h
+++ b/drivers/gpu/drm/xe/xe_assert.h
@@ -81,7 +81,7 @@
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
#define __xe_assert_msg(xe, condition, msg, arg...) ({ \
- (void)drm_WARN(&(xe)->drm, !(condition), "[" DRM_NAME "] Assertion `%s` failed!\n" msg, \
+ (void)drm_WARN(&(xe)->drm, !(condition), "Assertion `%s` failed!\n" msg, \
__stringify(condition), ## arg); \
})
#else
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 261d3d6c8a93..06911e9a3bf5 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -13,7 +13,7 @@
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_device.h"
#include "xe_dma_buf.h"
@@ -758,7 +758,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
xe_assert(xe, migrate);
trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
- xe_pm_runtime_get_noresume(xe);
+ if (xe_rpm_reclaim_safe(xe)) {
+ /*
+ * We might be called through swapout in the validation path of
+ * another TTM device, so unconditionally acquire rpm here.
+ */
+ xe_pm_runtime_get(xe);
+ } else {
+ drm_WARN_ON(&xe->drm, handle_system_ccs);
+ xe_pm_runtime_get_noresume(xe);
+ }
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
/*
@@ -793,8 +802,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
}
}
} else {
- if (move_lacks_source)
- fence = xe_migrate_clear(migrate, bo, new_mem);
+ if (move_lacks_source) {
+ u32 flags = 0;
+
+ if (mem_type_is_vram(new_mem->mem_type))
+ flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
+ else if (handle_system_ccs)
+ flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
+
+ fence = xe_migrate_clear(migrate, bo, new_mem, flags);
+ }
else
fence = xe_migrate_copy(migrate, bo, bo, old_mem,
new_mem, handle_system_ccs);
@@ -1090,7 +1107,7 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
- if (bo->ggtt_node.size)
+ if (bo->ggtt_node && bo->ggtt_node->base.size)
xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
#ifdef CONFIG_PROC_FS
@@ -1264,13 +1281,14 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
!(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
- (flags & XE_BO_NEEDS_64K))) {
- aligned_size = ALIGN(size, SZ_64K);
+ (flags & (XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_NEEDS_2M)))) {
+ size_t align = flags & XE_BO_FLAG_NEEDS_2M ? SZ_2M : SZ_64K;
+
+ aligned_size = ALIGN(size, align);
if (type != ttm_bo_type_device)
- size = ALIGN(size, SZ_64K);
+ size = ALIGN(size, align);
flags |= XE_BO_FLAG_INTERNAL_64K;
- alignment = SZ_64K >> PAGE_SHIFT;
-
+ alignment = align >> PAGE_SHIFT;
} else {
aligned_size = ALIGN(size, SZ_4K);
flags &= ~XE_BO_FLAG_INTERNAL_64K;
@@ -1490,11 +1508,10 @@ struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
u16 cpu_caching,
- enum ttm_bo_type type,
u32 flags)
{
struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
- cpu_caching, type,
+ cpu_caching, ttm_bo_type_device,
flags | XE_BO_FLAG_USER);
if (!IS_ERR(bo))
xe_bo_unlock_vm_held(bo);
@@ -1989,6 +2006,13 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1);
+ /* CCS formats need physical placement at a 64K alignment in VRAM. */
+ if ((bo_flags & XE_BO_FLAG_VRAM_MASK) &&
+ (bo_flags & XE_BO_FLAG_SCANOUT) &&
+ !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) &&
+ IS_ALIGNED(args->size, SZ_64K))
+ bo_flags |= XE_BO_FLAG_NEEDS_64K;
+
if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK)))
return -EINVAL;
@@ -2018,7 +2042,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
}
bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching,
- ttm_bo_type_device, bo_flags);
+ bo_flags);
if (vm)
xe_vm_unlock(vm);
@@ -2324,7 +2348,6 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
bo = xe_bo_create_user(xe, NULL, NULL, args->size,
DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device,
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_NEEDS_CPU_ACCESS);
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 6de894c728f5..dbfb3209615d 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -36,8 +36,9 @@
#define XE_BO_FLAG_PAGETABLE BIT(12)
#define XE_BO_FLAG_NEEDS_CPU_ACCESS BIT(13)
#define XE_BO_FLAG_NEEDS_UC BIT(14)
-#define XE_BO_NEEDS_64K BIT(15)
-#define XE_BO_FLAG_GGTT_INVALIDATE BIT(16)
+#define XE_BO_FLAG_NEEDS_64K BIT(15)
+#define XE_BO_FLAG_NEEDS_2M BIT(16)
+#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_FLAG_INTERNAL_64K BIT(31)
@@ -86,7 +87,6 @@ struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
u16 cpu_caching,
- enum ttm_bo_type type,
u32 flags);
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
@@ -194,9 +194,12 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
static inline u32
xe_bo_ggtt_addr(struct xe_bo *bo)
{
- XE_WARN_ON(bo->ggtt_node.size > bo->size);
- XE_WARN_ON(bo->ggtt_node.start + bo->ggtt_node.size > (1ull << 32));
- return bo->ggtt_node.start;
+ if (XE_WARN_ON(!bo->ggtt_node))
+ return 0;
+
+ XE_WARN_ON(bo->ggtt_node->base.size > bo->size);
+ XE_WARN_ON(bo->ggtt_node->base.start + bo->ggtt_node->base.size > (1ull << 32));
+ return bo->ggtt_node->base.start;
}
int xe_bo_vmap(struct xe_bo *bo);
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 10450f1fbbde..2ed558ac2264 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -8,12 +8,13 @@
#include <linux/iosys-map.h>
-#include <drm/drm_mm.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_placement.h>
+#include "xe_ggtt_types.h"
+
struct xe_device;
struct xe_vm;
@@ -39,7 +40,7 @@ struct xe_bo {
/** @placement: current placement for this BO */
struct ttm_placement placement;
/** @ggtt_node: GGTT node if this BO is mapped in the GGTT */
- struct drm_mm_node ggtt_node;
+ struct xe_ggtt_node *ggtt_node;
/** @vmap: iosys map of this buffer */
struct iosys_map vmap;
/** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */
@@ -58,6 +59,8 @@ struct xe_bo {
#endif
/** @freed: List node for delayed put. */
struct llist_node freed;
+ /** @update_index: Update index if PT BO */
+ int update_index;
/** @created: Whether the bo has passed initial creation */
bool created;
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index 1011e5d281fa..668615c6b172 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -6,6 +6,7 @@
#include "xe_debugfs.h"
#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
@@ -26,10 +27,7 @@
#include "xe_vm.h"
#endif
-#ifdef CONFIG_FAULT_INJECTION
-#include <linux/fault-inject.h> /* XXX: fault-inject.h is broken */
DECLARE_FAULT_ATTR(gt_reset_failure);
-#endif
static struct xe_device *node_to_xe(struct drm_info_node *node)
{
@@ -47,10 +45,9 @@ static int info(struct seq_file *m, void *data)
drm_printf(&p, "graphics_verx100 %d\n", xe->info.graphics_verx100);
drm_printf(&p, "media_verx100 %d\n", xe->info.media_verx100);
- drm_printf(&p, "stepping G:%s M:%s D:%s B:%s\n",
+ drm_printf(&p, "stepping G:%s M:%s B:%s\n",
xe_step_name(xe->info.step.graphics),
xe_step_name(xe->info.step.media),
- xe_step_name(xe->info.step.display),
xe_step_name(xe->info.step.basedie));
drm_printf(&p, "is_dgfx %s\n", str_yes_no(xe->info.is_dgfx));
drm_printf(&p, "platform %d\n", xe->info.platform);
@@ -214,8 +211,5 @@ void xe_debugfs_register(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_debugfs_register(gt);
-#ifdef CONFIG_FAULT_INJECTION
fault_create_debugfs_attr("fail_gt_reset", root, &gt_reset_failure);
-#endif
-
}
diff --git a/drivers/gpu/drm/xe/xe_debugfs.h b/drivers/gpu/drm/xe/xe_debugfs.h
index 715b8e2e0bd9..17f4c2f1b5e4 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.h
+++ b/drivers/gpu/drm/xe/xe_debugfs.h
@@ -8,6 +8,10 @@
struct xe_device;
+#ifdef CONFIG_DEBUG_FS
void xe_debugfs_register(struct xe_device *xe);
+#else
+static inline void xe_debugfs_register(struct xe_device *xe) { }
+#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 62c2b10fbf1d..bdb76e834e4c 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -66,22 +66,9 @@ static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
return &q->gt->uc.guc;
}
-static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
+static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
+ struct xe_devcoredump *coredump)
{
- struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
-
- /* keep going if fw fails as we still want to save the memory and SW data */
- if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL))
- xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
- xe_vm_snapshot_capture_delayed(ss->vm);
- xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
- xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
-}
-
-static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
- size_t count, void *data, size_t datalen)
-{
- struct xe_devcoredump *coredump = data;
struct xe_device *xe;
struct xe_devcoredump_snapshot *ss;
struct drm_printer p;
@@ -89,18 +76,11 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
struct timespec64 ts;
int i;
- if (!coredump)
- return -ENODEV;
-
xe = coredump_to_xe(coredump);
ss = &coredump->snapshot;
- /* Ensure delayed work is captured before continuing */
- flush_work(&ss->work);
-
iter.data = buffer;
- iter.offset = 0;
- iter.start = offset;
+ iter.start = 0;
iter.remain = count;
p = drm_coredump_printer(&iter);
@@ -134,10 +114,83 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
return count - iter.remain;
}
+static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
+{
+ int i;
+
+ xe_guc_ct_snapshot_free(ss->ct);
+ ss->ct = NULL;
+
+ xe_guc_exec_queue_snapshot_free(ss->ge);
+ ss->ge = NULL;
+
+ xe_sched_job_snapshot_free(ss->job);
+ ss->job = NULL;
+
+ for (i = 0; i < XE_NUM_HW_ENGINES; i++)
+ if (ss->hwe[i]) {
+ xe_hw_engine_snapshot_free(ss->hwe[i]);
+ ss->hwe[i] = NULL;
+ }
+
+ xe_vm_snapshot_free(ss->vm);
+ ss->vm = NULL;
+}
+
+static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
+{
+ struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
+ struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
+
+ /* keep going if fw fails as we still want to save the memory and SW data */
+ if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL))
+ xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
+ xe_vm_snapshot_capture_delayed(ss->vm);
+ xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
+ xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
+
+ /* Calculate devcoredump size */
+ ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
+
+ ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
+ if (!ss->read.buffer)
+ return;
+
+ __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
+ xe_devcoredump_snapshot_free(ss);
+}
+
+static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
+ size_t count, void *data, size_t datalen)
+{
+ struct xe_devcoredump *coredump = data;
+ struct xe_devcoredump_snapshot *ss;
+ ssize_t byte_copied;
+
+ if (!coredump)
+ return -ENODEV;
+
+ ss = &coredump->snapshot;
+
+ /* Ensure delayed work is captured before continuing */
+ flush_work(&ss->work);
+
+ if (!ss->read.buffer)
+ return -ENODEV;
+
+ if (offset >= ss->read.size)
+ return 0;
+
+ byte_copied = count < ss->read.size - offset ? count :
+ ss->read.size - offset;
+ memcpy(buffer, ss->read.buffer + offset, byte_copied);
+
+ return byte_copied;
+}
+
static void xe_devcoredump_free(void *data)
{
struct xe_devcoredump *coredump = data;
- int i;
/* Our device is gone. Nothing to do... */
if (!data || !coredump_to_xe(coredump))
@@ -145,13 +198,8 @@ static void xe_devcoredump_free(void *data)
cancel_work_sync(&coredump->snapshot.work);
- xe_guc_ct_snapshot_free(coredump->snapshot.ct);
- xe_guc_exec_queue_snapshot_free(coredump->snapshot.ge);
- xe_sched_job_snapshot_free(coredump->snapshot.job);
- for (i = 0; i < XE_NUM_HW_ENGINES; i++)
- if (coredump->snapshot.hwe[i])
- xe_hw_engine_snapshot_free(coredump->snapshot.hwe[i]);
- xe_vm_snapshot_free(coredump->snapshot.vm);
+ xe_devcoredump_snapshot_free(&coredump->snapshot);
+ kvfree(coredump->snapshot.read.buffer);
/* To prevent stale data on next snapshot, clear everything */
memset(&coredump->snapshot, 0, sizeof(coredump->snapshot));
@@ -171,7 +219,6 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
u32 adj_logical_mask = q->logical_mask;
u32 width_mask = (0x1 << q->width) - 1;
const char *process_name = "no process";
- struct task_struct *task = NULL;
int i;
bool cookie;
@@ -179,14 +226,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
ss->snapshot_time = ktime_get_real();
ss->boot_time = ktime_get_boottime();
- if (q->vm && q->vm->xef) {
- task = get_pid_task(q->vm->xef->drm->pid, PIDTYPE_PID);
- if (task)
- process_name = task->comm;
- }
+ if (q->vm && q->vm->xef)
+ process_name = q->vm->xef->process_name;
strscpy(ss->process_name, process_name);
- if (task)
- put_task_struct(task);
ss->gt = q->gt;
INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work);
@@ -266,4 +308,5 @@ int xe_devcoredump_init(struct xe_device *xe)
{
return devm_add_action_or_reset(xe->drm.dev, xe_driver_devcoredump_fini, &xe->drm);
}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
index 923cdf72a816..440d05d77a5a 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -46,6 +46,14 @@ struct xe_devcoredump_snapshot {
struct xe_sched_job_snapshot *job;
/** @vm: Snapshot of VM state */
struct xe_vm_snapshot *vm;
+
+ /** @read: devcoredump in human readable format */
+ struct {
+ /** @read.size: size of devcoredump in human readable format */
+ ssize_t size;
+ /** @read.buffer: buffer of devcoredump in human readable format */
+ char *buffer;
+ } read;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index c89deffffb6d..70d4e4d46c3c 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -15,7 +15,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "display/xe_display.h"
#include "instructions/xe_gpu_commands.h"
@@ -37,6 +37,7 @@
#include "xe_gt_printk.h"
#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
+#include "xe_hw_engine_group.h"
#include "xe_hwmon.h"
#include "xe_irq.h"
#include "xe_memirq.h"
@@ -64,6 +65,7 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
struct xe_drm_client *client;
struct xe_file *xef;
int ret = -ENOMEM;
+ struct task_struct *task = NULL;
xef = kzalloc(sizeof(*xef), GFP_KERNEL);
if (!xef)
@@ -92,6 +94,13 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = xef;
kref_init(&xef->refcount);
+ task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID);
+ if (task) {
+ xef->process_name = kstrdup(task->comm, GFP_KERNEL);
+ xef->pid = task->pid;
+ put_task_struct(task);
+ }
+
return 0;
}
@@ -110,6 +119,7 @@ static void xe_file_destroy(struct kref *ref)
spin_unlock(&xe->clients.lock);
xe_drm_client_put(xef->client);
+ kfree(xef->process_name);
kfree(xef);
}
@@ -156,6 +166,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
* vm->lock taken during xe_exec_queue_kill().
*/
xa_for_each(&xef->exec_queue.xa, idx, q) {
+ if (q->vm && q->hwe->hw_engine_group)
+ xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
xe_exec_queue_kill(q);
xe_exec_queue_put(q);
}
@@ -241,6 +253,7 @@ static const struct file_operations xe_driver_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = drm_show_fdinfo,
#endif
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
static struct drm_driver driver = {
@@ -534,7 +547,7 @@ static void update_device_info(struct xe_device *xe)
{
/* disable features that are not available/applicable to VFs */
if (IS_SRIOV_VF(xe)) {
- xe->info.enable_display = 0;
+ xe->info.probe_display = 0;
xe->info.has_heci_gscfi = 0;
xe->info.skip_guc_pc = 1;
xe->info.skip_pcode = 1;
@@ -788,13 +801,22 @@ void xe_device_shutdown(struct xe_device *xe)
{
}
+/**
+ * xe_device_wmb() - Device specific write memory barrier
+ * @xe: the &xe_device
+ *
+ * While wmb() is sufficient for a barrier if we use system memory, on discrete
+ * platforms with device memory we additionally need to issue a register write.
+ * Since it doesn't matter which register we write to, use the read-only VF_CAP
+ * register that is also marked as accessible by the VFs.
+ */
void xe_device_wmb(struct xe_device *xe)
{
struct xe_gt *gt = xe_root_mmio_gt(xe);
wmb();
if (IS_DGFX(xe))
- xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0);
+ xe_mmio_write32(gt, VF_CAP_REG, 0);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 533ccfb2567a..894f04770454 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -15,9 +15,23 @@ static inline struct xe_device *to_xe_device(const struct drm_device *dev)
return container_of(dev, struct xe_device, drm);
}
+static inline struct xe_device *kdev_to_xe_device(struct device *kdev)
+{
+ struct drm_device *drm = dev_get_drvdata(kdev);
+
+ return drm ? to_xe_device(drm) : NULL;
+}
+
static inline struct xe_device *pdev_to_xe_device(struct pci_dev *pdev)
{
- return pci_get_drvdata(pdev);
+ struct drm_device *drm = pci_get_drvdata(pdev);
+
+ return drm ? to_xe_device(drm) : NULL;
+}
+
+static inline struct xe_device *xe_device_const_cast(const struct xe_device *xe)
+{
+ return (struct xe_device *)xe;
}
static inline struct xe_device *ttm_to_xe_device(struct ttm_device *ttm)
@@ -129,16 +143,6 @@ static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
void xe_device_assert_mem_access(struct xe_device *xe);
-static inline bool xe_device_in_fault_mode(struct xe_device *xe)
-{
- return xe->usm.num_vm_in_fault_mode != 0;
-}
-
-static inline bool xe_device_in_non_fault_mode(struct xe_device *xe)
-{
- return xe->usm.num_vm_in_non_fault_mode != 0;
-}
-
static inline bool xe_device_has_flat_ccs(struct xe_device *xe)
{
return xe->info.has_flat_ccs;
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 9e5fdf96750b..ec7eb7811126 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -23,6 +23,10 @@
#include "xe_sriov_types.h"
#include "xe_step_types.h"
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define TEST_VM_OPS_ERROR
+#endif
+
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
#include "soc/intel_pch.h"
#include "intel_display_core.h"
@@ -40,6 +44,7 @@ struct xe_pat_ops;
#define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
#define IS_DGFX(xe) ((xe)->info.is_dgfx)
#define HAS_HECI_GSCFI(xe) ((xe)->info.has_heci_gscfi)
+#define HAS_HECI_CSCFI(xe) ((xe)->info.has_heci_cscfi)
#define XE_VRAM_FLAGS_NEED64K BIT(0)
@@ -199,7 +204,7 @@ struct xe_tile {
struct xe_memirq memirq;
/** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
- struct drm_mm_node ggtt_balloon[2];
+ struct xe_ggtt_node *ggtt_balloon[2];
} vf;
} sriov;
@@ -283,26 +288,29 @@ struct xe_device {
u8 has_sriov:1;
/** @info.has_usm: Device has unified shared memory support */
u8 has_usm:1;
- /** @info.enable_display: display enabled */
- u8 enable_display:1;
+ /**
+ * @info.probe_display: Probe display hardware. If set to
+ * false, the driver will behave as if there is no display
+ * hardware present and will not try to read/write to it in any
+ * way. The display hardware, if it exists, will not be
+ * exposed to userspace and will be left untouched in whatever
+ * state the firmware or bootloader left it in.
+ */
+ u8 probe_display:1;
/** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
u8 skip_mtcfg:1;
/** @info.skip_pcode: skip access to PCODE uC */
u8 skip_pcode:1;
/** @info.has_heci_gscfi: device has heci gscfi */
u8 has_heci_gscfi:1;
+ /** @info.has_heci_cscfi: device has heci cscfi */
+ u8 has_heci_cscfi:1;
/** @info.skip_guc_pc: Skip GuC based PM feature init */
u8 skip_guc_pc:1;
/** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
u8 has_atomic_enable_pte_bit:1;
/** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
u8 has_device_atomics_on_smem:1;
-
-#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
- struct {
- u32 rawclk_freq;
- } i915_runtime;
-#endif
} info;
/** @irq: device interrupt state */
@@ -360,10 +368,6 @@ struct xe_device {
struct xarray asid_to_vm;
/** @usm.next_asid: next ASID, used to cyclical alloc asids */
u32 next_asid;
- /** @usm.num_vm_in_fault_mode: number of VM in fault mode */
- u32 num_vm_in_fault_mode;
- /** @usm.num_vm_in_non_fault_mode: number of VM in non-fault mode */
- u32 num_vm_in_non_fault_mode;
/** @usm.lock: protects UM state */
struct mutex lock;
} usm;
@@ -483,6 +487,14 @@ struct xe_device {
int mode;
} wedged;
+#ifdef TEST_VM_OPS_ERROR
+ /**
+ * @vm_inject_error_position: inject errors at different places in VM
+ * bind IOCTL based on this value
+ */
+ u8 vm_inject_error_position;
+#endif
+
/* private: */
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
@@ -573,6 +585,18 @@ struct xe_file {
/** @client: drm client */
struct xe_drm_client *client;
+ /**
+ * @process_name: process name for file handle, used to safely output
+ * during error situations where xe file can outlive process
+ */
+ char *process_name;
+
+ /**
+ * @pid: pid for file handle, used to safely output uring error
+ * situations where xe file can outlive process
+ */
+ pid_t pid;
+
/** @refcount: ref count of this xe file */
struct kref refcount;
};
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 7ddd59908334..95a05c5bc897 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -5,11 +5,12 @@
#include "xe_drm_client.h"
#include <drm/drm_print.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include "xe_assert.h"
#include "xe_bo.h"
#include "xe_bo_types.h"
#include "xe_device_types.h"
@@ -151,10 +152,13 @@ void xe_drm_client_add_bo(struct xe_drm_client *client,
*/
void xe_drm_client_remove_bo(struct xe_bo *bo)
{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
struct xe_drm_client *client = bo->client;
+ xe_assert(xe, !kref_read(&bo->ttm.base.refcount));
+
spin_lock(&client->bos_lock);
- list_del(&bo->client_link);
+ list_del_init(&bo->client_link);
spin_unlock(&client->bos_lock);
xe_drm_client_put(client);
@@ -166,6 +170,8 @@ static void bo_meminfo(struct xe_bo *bo,
u64 sz = bo->size;
u32 mem_type;
+ xe_bo_assert_held(bo);
+
if (bo->placement.placement)
mem_type = bo->placement.placement->mem_type;
else
@@ -196,6 +202,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
struct xe_drm_client *client;
struct drm_gem_object *obj;
struct xe_bo *bo;
+ LLIST_HEAD(deferred);
unsigned int id;
u32 mem_type;
@@ -206,7 +213,20 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
idr_for_each_entry(&file->object_idr, obj, id) {
struct xe_bo *bo = gem_to_xe_bo(obj);
- bo_meminfo(bo, stats);
+ if (dma_resv_trylock(bo->ttm.base.resv)) {
+ bo_meminfo(bo, stats);
+ xe_bo_unlock(bo);
+ } else {
+ xe_bo_get(bo);
+ spin_unlock(&file->table_lock);
+
+ xe_bo_lock(bo, false);
+ bo_meminfo(bo, stats);
+ xe_bo_unlock(bo);
+
+ xe_bo_put(bo);
+ spin_lock(&file->table_lock);
+ }
}
spin_unlock(&file->table_lock);
@@ -215,11 +235,28 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
list_for_each_entry(bo, &client->bos_list, client_link) {
if (!kref_get_unless_zero(&bo->ttm.base.refcount))
continue;
- bo_meminfo(bo, stats);
- xe_bo_put(bo);
+
+ if (dma_resv_trylock(bo->ttm.base.resv)) {
+ bo_meminfo(bo, stats);
+ xe_bo_unlock(bo);
+ } else {
+ spin_unlock(&client->bos_lock);
+
+ xe_bo_lock(bo, false);
+ bo_meminfo(bo, stats);
+ xe_bo_unlock(bo);
+
+ spin_lock(&client->bos_lock);
+ /* The bo ref will prevent this bo from being removed from the list */
+ xe_assert(xef->xe, !list_empty(&bo->client_link));
+ }
+
+ xe_bo_put_deferred(bo, &deferred);
}
spin_unlock(&client->bos_lock);
+ xe_bo_put_commit(&deferred);
+
for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) {
if (!xe_mem_type_to_name[mem_type])
continue;
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index f36980aa26e6..7b38485817dc 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -8,12 +8,13 @@
#include <drm/drm_device.h>
#include <drm/drm_exec.h>
#include <drm/drm_file.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <linux/delay.h>
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_exec_queue.h"
+#include "xe_hw_engine_group.h"
#include "xe_macros.h"
#include "xe_ring_ops_types.h"
#include "xe_sched_job.h"
@@ -124,6 +125,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
bool write_locked, skip_retry = false;
ktime_t end = 0;
int err = 0;
+ struct xe_hw_engine_group *group;
+ enum xe_hw_engine_group_execution_mode mode, previous_mode;
if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
@@ -182,6 +185,15 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
}
+ group = q->hwe->hw_engine_group;
+ mode = xe_hw_engine_group_find_exec_mode(q);
+
+ if (mode == EXEC_MODE_DMA_FENCE) {
+ err = xe_hw_engine_group_get_mode(group, mode, &previous_mode);
+ if (err)
+ goto err_syncs;
+ }
+
retry:
if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) {
err = down_write_killable(&vm->lock);
@@ -199,7 +211,7 @@ retry:
downgrade_write(&vm->lock);
write_locked = false;
if (err)
- goto err_unlock_list;
+ goto err_hw_exec_mode;
}
if (!args->num_batch_buffer) {
@@ -312,6 +324,9 @@ retry:
spin_unlock(&xe->ttm.lru_lock);
}
+ if (mode == EXEC_MODE_LR)
+ xe_hw_engine_group_resume_faulting_lr_jobs(group);
+
err_repin:
if (!xe_vm_in_lr_mode(vm))
up_read(&vm->userptr.notifier_lock);
@@ -324,6 +339,9 @@ err_unlock_list:
up_read(&vm->lock);
if (err == -EAGAIN && !skip_retry)
goto retry;
+err_hw_exec_mode:
+ if (mode == EXEC_MODE_DMA_FENCE)
+ xe_hw_engine_group_put(group);
err_syncs:
while (num_syncs--)
xe_sync_entry_cleanup(&syncs[num_syncs]);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 9731dcd0b1bd..7f28b7fc68d5 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -9,11 +9,12 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_hw_engine_class_sysfs.h"
+#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
#include "xe_lrc.h"
#include "xe_macros.h"
@@ -73,6 +74,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
q->ops = gt->exec_queue_ops;
INIT_LIST_HEAD(&q->lr.link);
INIT_LIST_HEAD(&q->multi_gt_link);
+ INIT_LIST_HEAD(&q->hw_engine_group_link);
q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
q->sched_props.preempt_timeout_us =
@@ -166,7 +168,8 @@ err_post_alloc:
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
struct xe_vm *vm,
- enum xe_engine_class class, u32 flags)
+ enum xe_engine_class class,
+ u32 flags, u64 extensions)
{
struct xe_hw_engine *hwe, *hwe0 = NULL;
enum xe_hw_engine_id id;
@@ -186,7 +189,56 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe
if (!logical_mask)
return ERR_PTR(-ENODEV);
- return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, 0);
+ return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions);
+}
+
+/**
+ * xe_exec_queue_create_bind() - Create bind exec queue.
+ * @xe: Xe device.
+ * @tile: tile which bind exec queue belongs to.
+ * @flags: exec queue creation flags
+ * @extensions: exec queue creation extensions
+ *
+ * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
+ * for access to physical memory required for page table programming. On a
+ * faulting devices the reserved copy engine instance must be used to avoid
+ * deadlocking (user binds cannot get stuck behind faults as kernel binds which
+ * resolve faults depend on user binds). On non-faulting devices any copy engine
+ * can be used.
+ *
+ * Returns exec queue on success, ERR_PTR on failure
+ */
+struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
+ struct xe_tile *tile,
+ u32 flags, u64 extensions)
+{
+ struct xe_gt *gt = tile->primary_gt;
+ struct xe_exec_queue *q;
+ struct xe_vm *migrate_vm;
+
+ migrate_vm = xe_migrate_get_vm(tile->migrate);
+ if (xe->info.has_usm) {
+ struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
+ XE_ENGINE_CLASS_COPY,
+ gt->usm.reserved_bcs_instance,
+ false);
+
+ if (!hwe) {
+ xe_vm_put(migrate_vm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ q = xe_exec_queue_create(xe, migrate_vm,
+ BIT(hwe->logical_instance), 1, hwe,
+ flags, extensions);
+ } else {
+ q = xe_exec_queue_create_class(xe, gt, migrate_vm,
+ XE_ENGINE_CLASS_COPY, flags,
+ extensions);
+ }
+ xe_vm_put(migrate_vm);
+
+ return q;
}
void xe_exec_queue_destroy(struct kref *ref)
@@ -418,63 +470,6 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
return 0;
}
-static const enum xe_engine_class user_to_xe_engine_class[] = {
- [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
- [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
- [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
- [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
- [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
-};
-
-static struct xe_hw_engine *
-find_hw_engine(struct xe_device *xe,
- struct drm_xe_engine_class_instance eci)
-{
- u32 idx;
-
- if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
- return NULL;
-
- if (eci.gt_id >= xe->info.gt_count)
- return NULL;
-
- idx = array_index_nospec(eci.engine_class,
- ARRAY_SIZE(user_to_xe_engine_class));
-
- return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
- user_to_xe_engine_class[idx],
- eci.engine_instance, true);
-}
-
-static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
- struct drm_xe_engine_class_instance *eci,
- u16 width, u16 num_placements)
-{
- struct xe_hw_engine *hwe;
- enum xe_hw_engine_id id;
- u32 logical_mask = 0;
-
- if (XE_IOCTL_DBG(xe, width != 1))
- return 0;
- if (XE_IOCTL_DBG(xe, num_placements != 1))
- return 0;
- if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
- return 0;
-
- eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
-
- for_each_hw_engine(hwe, gt, id) {
- if (xe_hw_engine_is_reserved(hwe))
- continue;
-
- if (hwe->class ==
- user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
- logical_mask |= BIT(hwe->logical_instance);
- }
-
- return logical_mask;
-}
-
static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
struct drm_xe_engine_class_instance *eci,
u16 width, u16 num_placements)
@@ -497,7 +492,7 @@ static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
n = j * width + i;
- hwe = find_hw_engine(xe, eci[n]);
+ hwe = xe_hw_engine_lookup(xe, eci[n]);
if (XE_IOCTL_DBG(xe, !hwe))
return 0;
@@ -536,8 +531,9 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
struct drm_xe_engine_class_instance __user *user_eci =
u64_to_user_ptr(args->instances);
struct xe_hw_engine *hwe;
- struct xe_vm *vm, *migrate_vm;
+ struct xe_vm *vm;
struct xe_gt *gt;
+ struct xe_tile *tile;
struct xe_exec_queue *q = NULL;
u32 logical_mask;
u32 id;
@@ -562,37 +558,20 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
- for_each_gt(gt, xe, id) {
- struct xe_exec_queue *new;
- u32 flags;
-
- if (xe_gt_is_media_type(gt))
- continue;
-
- eci[0].gt_id = gt->info.id;
- logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
- args->width,
- args->num_placements);
- if (XE_IOCTL_DBG(xe, !logical_mask))
- return -EINVAL;
-
- hwe = find_hw_engine(xe, eci[0]);
- if (XE_IOCTL_DBG(xe, !hwe))
- return -EINVAL;
-
- /* The migration vm doesn't hold rpm ref */
- xe_pm_runtime_get_noresume(xe);
-
- flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
+ if (XE_IOCTL_DBG(xe, args->width != 1) ||
+ XE_IOCTL_DBG(xe, args->num_placements != 1) ||
+ XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
+ return -EINVAL;
- migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
- new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
- args->width, hwe, flags,
- args->extensions);
+ for_each_tile(tile, xe, id) {
+ struct xe_exec_queue *new;
+ u32 flags = EXEC_QUEUE_FLAG_VM;
- xe_pm_runtime_put(xe); /* now held by engine */
+ if (id)
+ flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
- xe_vm_put(migrate_vm);
+ new = xe_exec_queue_create_bind(xe, tile, flags,
+ args->extensions);
if (IS_ERR(new)) {
err = PTR_ERR(new);
if (q)
@@ -613,7 +592,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !logical_mask))
return -EINVAL;
- hwe = find_hw_engine(xe, eci[0]);
+ hwe = xe_hw_engine_lookup(xe, eci[0]);
if (XE_IOCTL_DBG(xe, !hwe))
return -EINVAL;
@@ -648,6 +627,12 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, err))
goto put_exec_queue;
}
+
+ if (q->vm && q->hwe->hw_engine_group) {
+ err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
+ if (err)
+ goto put_exec_queue;
+ }
}
mutex_lock(&xef->exec_queue.lock);
@@ -798,6 +783,15 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
}
+/**
+ * xe_exec_queue_kill - permanently stop all execution from an exec queue
+ * @q: The exec queue
+ *
+ * This function permanently stops all activity on an exec queue. If the queue
+ * is actively executing on the HW, it will be kicked off the engine; any
+ * pending jobs are discarded and all future submissions are rejected.
+ * This function is safe to call multiple times.
+ */
void xe_exec_queue_kill(struct xe_exec_queue *q)
{
struct xe_exec_queue *eq = q, *next;
@@ -830,6 +824,9 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
+ if (q->vm && q->hwe->hw_engine_group)
+ xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
+
xe_exec_queue_kill(q);
trace_xe_exec_queue_close(q);
@@ -841,10 +838,12 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
struct xe_vm *vm)
{
- if (q->flags & EXEC_QUEUE_FLAG_VM)
+ if (q->flags & EXEC_QUEUE_FLAG_VM) {
lockdep_assert_held(&vm->lock);
- else
+ } else {
xe_vm_assert_held(vm);
+ lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
+ }
}
/**
@@ -856,10 +855,7 @@ void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
{
xe_exec_queue_last_fence_lockdep_assert(q, vm);
- if (q->last_fence) {
- dma_fence_put(q->last_fence);
- q->last_fence = NULL;
- }
+ xe_exec_queue_last_fence_put_unlocked(q);
}
/**
@@ -902,6 +898,33 @@ struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
}
/**
+ * xe_exec_queue_last_fence_get_for_resume() - Get last fence
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ *
+ * Get last fence, takes a ref. Only safe to be called in the context of
+ * resuming the hw engine group's long-running exec queue, when the group
+ * semaphore is held.
+ *
+ * Returns: last fence if not signaled, dma fence stub if signaled
+ */
+struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
+ struct xe_vm *vm)
+{
+ struct dma_fence *fence;
+
+ lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
+
+ if (q->last_fence &&
+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
+ xe_exec_queue_last_fence_put_unlocked(q);
+
+ fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
+ dma_fence_get(fence);
+ return fence;
+}
+
+/**
* xe_exec_queue_last_fence_set() - Set last fence
* @q: The exec queue
* @vm: The VM the engine does a bind or exec for
@@ -918,3 +941,26 @@ void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
xe_exec_queue_last_fence_put(q, vm);
q->last_fence = dma_fence_get(fence);
}
+
+/**
+ * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ *
+ * Returns:
+ * -ETIME if there exists an unsignalled last fence dependency, zero otherwise.
+ */
+int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
+{
+ struct dma_fence *fence;
+ int err = 0;
+
+ fence = xe_exec_queue_last_fence_get(q, vm);
+ if (fence) {
+ err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ?
+ 0 : -ETIME;
+ dma_fence_put(fence);
+ }
+
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index 289a3a51d2a2..90c7f73eab88 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -20,7 +20,11 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
u64 extensions);
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
struct xe_vm *vm,
- enum xe_engine_class class, u32 flags);
+ enum xe_engine_class class,
+ u32 flags, u64 extensions);
+struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
+ struct xe_tile *tile,
+ u32 flags, u64 extensions);
void xe_exec_queue_fini(struct xe_exec_queue *q);
void xe_exec_queue_destroy(struct kref *ref);
@@ -73,8 +77,12 @@ void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
struct xe_vm *vm);
+struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *e,
+ struct xe_vm *vm);
void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
struct dma_fence *fence);
+int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q,
+ struct xe_vm *vm);
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
#endif
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index f6ee0ae80fd6..7deb480e26af 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -140,6 +140,8 @@ struct xe_exec_queue {
* Protected by @vm's resv. Unused if @vm == NULL.
*/
u64 tlb_flush_seqno;
+ /** @hw_engine_group_link: link into exec queues in the same hw engine group */
+ struct list_head hw_engine_group_link;
/** @lrc: logical ring context for this exec queue */
struct xe_lrc *lrc[];
};
@@ -169,9 +171,11 @@ struct xe_exec_queue_ops {
int (*suspend)(struct xe_exec_queue *q);
/**
* @suspend_wait: Wait for an exec queue to suspend executing, should be
- * call after suspend.
+ * call after suspend. In dma-fencing path thus must return within a
+ * reasonable amount of time. -ETIME return shall indicate an error
+ * waiting for suspend resulting in associated VM getting killed.
*/
- void (*suspend_wait)(struct xe_exec_queue *q);
+ int (*suspend_wait)(struct xe_exec_queue *q);
/**
* @resume: Resume exec queue execution, exec queue must be in a suspended
* state and dma fence returned from most recent suspend call must be
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index db906117db6d..6a59165b9569 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -123,8 +123,8 @@ static void __xe_execlist_port_idle(struct xe_execlist_port *port)
if (!port->running_exl)
return;
- xe_lrc_write_ring(port->hwe->kernel_lrc, noop, sizeof(noop));
- __start_lrc(port->hwe, port->hwe->kernel_lrc, 0);
+ xe_lrc_write_ring(port->lrc, noop, sizeof(noop));
+ __start_lrc(port->hwe, port->lrc, 0);
port->running_exl = NULL;
}
@@ -254,14 +254,22 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
{
struct drm_device *drm = &xe->drm;
struct xe_execlist_port *port;
- int i;
+ int i, err;
port = drmm_kzalloc(drm, sizeof(*port), GFP_KERNEL);
- if (!port)
- return ERR_PTR(-ENOMEM);
+ if (!port) {
+ err = -ENOMEM;
+ goto err;
+ }
port->hwe = hwe;
+ port->lrc = xe_lrc_create(hwe, NULL, SZ_16K);
+ if (IS_ERR(port->lrc)) {
+ err = PTR_ERR(port->lrc);
+ goto err;
+ }
+
spin_lock_init(&port->lock);
for (i = 0; i < ARRAY_SIZE(port->active); i++)
INIT_LIST_HEAD(&port->active[i]);
@@ -277,6 +285,9 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
add_timer(&port->irq_fail);
return port;
+
+err:
+ return ERR_PTR(err);
}
void xe_execlist_port_destroy(struct xe_execlist_port *port)
@@ -287,6 +298,8 @@ void xe_execlist_port_destroy(struct xe_execlist_port *port)
spin_lock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
port->hwe->irq_handler = NULL;
spin_unlock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
+
+ xe_lrc_put(port->lrc);
}
static struct dma_fence *
@@ -422,10 +435,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
return 0;
}
-static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
/* NIY */
+ return 0;
}
static void execlist_exec_queue_resume(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
index f94bbf4c53e4..415140936f11 100644
--- a/drivers/gpu/drm/xe/xe_execlist_types.h
+++ b/drivers/gpu/drm/xe/xe_execlist_types.h
@@ -27,6 +27,8 @@ struct xe_execlist_port {
struct xe_execlist_exec_queue *running_exl;
struct timer_list irq_fail;
+
+ struct xe_lrc *lrc;
};
struct xe_execlist_exec_queue {
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
index 106ee2b027f0..904cf47925aa 100644
--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -97,19 +97,27 @@ static int parse(FILE *input, FILE *csource, FILE *cheader)
if (name) {
fprintf(cheader, "\tXE_WA_OOB_%s = %u,\n", name, idx);
- fprintf(csource, "{ XE_RTP_NAME(\"%s\"), XE_RTP_RULES(%s) },\n",
+
+ /* Close previous entry before starting a new one */
+ if (idx)
+ fprintf(csource, ") },\n");
+
+ fprintf(csource, "{ XE_RTP_NAME(\"%s\"),\n XE_RTP_RULES(%s",
name, rules);
+ idx++;
} else {
- fprintf(csource, "{ XE_RTP_NAME(NULL), XE_RTP_RULES(%s) },\n",
- rules);
+ fprintf(csource, ", OR,\n\t%s", rules);
}
- idx++;
lineno++;
if (!is_continuation)
prev_name = name;
}
+ /* Close last entry */
+ if (idx)
+ fprintf(csource, ") },\n");
+
fprintf(cheader, "\t_XE_WA_OOB_COUNT = %u\n", idx);
return 0;
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 0cdbc1296e88..2895f154654c 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -30,6 +30,39 @@
#include "xe_wa.h"
#include "xe_wopcm.h"
+/**
+ * DOC: Global Graphics Translation Table (GGTT)
+ *
+ * Xe GGTT implements the support for a Global Virtual Address space that is used
+ * for resources that are accessible to privileged (i.e. kernel-mode) processes,
+ * and not tied to a specific user-level process. For example, the Graphics
+ * micro-Controller (GuC) and Display Engine (if present) utilize this Global
+ * address space.
+ *
+ * The Global GTT (GGTT) translates from the Global virtual address to a physical
+ * address that can be accessed by HW. The GGTT is a flat, single-level table.
+ *
+ * Xe implements a simplified version of the GGTT specifically managing only a
+ * certain range of it that goes from the Write Once Protected Content Memory (WOPCM)
+ * Layout to a predefined GUC_GGTT_TOP. This approach avoids complications related to
+ * the GuC (Graphics Microcontroller) hardware limitations. The GuC address space
+ * is limited on both ends of the GGTT, because the GuC shim HW redirects
+ * accesses to those addresses to other HW areas instead of going through the
+ * GGTT. On the bottom end, the GuC can't access offsets below the WOPCM size,
+ * while on the top side the limit is fixed at GUC_GGTT_TOP. To keep things
+ * simple, instead of checking each object to see if they are accessed by GuC or
+ * not, we just exclude those areas from the allocator. Additionally, to simplify
+ * the driver load, we use the maximum WOPCM size in this logic instead of the
+ * programmed one, so we don't need to wait until the actual size to be
+ * programmed is determined (which requires FW fetch) before initializing the
+ * GGTT. These simplifications might waste space in the GGTT (about 20-25 MBs
+ * depending on the platform) but we can live with this. Another benefit of this
+ * is the GuC bootrom can't access anything below the WOPCM max size so anything
+ * the bootrom needs to access (e.g. a RSA key) needs to be placed in the GGTT
+ * above the WOPCM max size. Starting the GGTT allocations above the WOPCM max
+ * give us the correct placement for free.
+ */
+
static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
u16 pat_index)
{
@@ -128,11 +161,12 @@ static void ggtt_fini_early(struct drm_device *drm, void *arg)
{
struct xe_ggtt *ggtt = arg;
+ destroy_workqueue(ggtt->wq);
mutex_destroy(&ggtt->lock);
drm_mm_takedown(&ggtt->mm);
}
-static void ggtt_fini(struct drm_device *drm, void *arg)
+static void ggtt_fini(void *arg)
{
struct xe_ggtt *ggtt = arg;
@@ -164,12 +198,16 @@ static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
};
-/*
- * Early GGTT initialization, which allows to create new mappings usable by the
- * GuC.
- * Mappings are not usable by the HW engines, as it doesn't have scratch /
+/**
+ * xe_ggtt_init_early - Early GGTT initialization
+ * @ggtt: the &xe_ggtt to be initialized
+ *
+ * It allows to create new mappings usable by the GuC.
+ * Mappings are not usable by the HW engines, as it doesn't have scratch nor
* initial clear done to it yet. That will happen in the regular, non-early
- * GGTT init.
+ * GGTT initialization.
+ *
+ * Return: 0 on success or a negative error code on failure.
*/
int xe_ggtt_init_early(struct xe_ggtt *ggtt)
{
@@ -194,29 +232,6 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
ggtt->flags |= XE_GGTT_FLAGS_64K;
- /*
- * 8B per entry, each points to a 4KB page.
- *
- * The GuC address space is limited on both ends of the GGTT, because
- * the GuC shim HW redirects accesses to those addresses to other HW
- * areas instead of going through the GGTT. On the bottom end, the GuC
- * can't access offsets below the WOPCM size, while on the top side the
- * limit is fixed at GUC_GGTT_TOP. To keep things simple, instead of
- * checking each object to see if they are accessed by GuC or not, we
- * just exclude those areas from the allocator. Additionally, to
- * simplify the driver load, we use the maximum WOPCM size in this logic
- * instead of the programmed one, so we don't need to wait until the
- * actual size to be programmed is determined (which requires FW fetch)
- * before initializing the GGTT. These simplifications might waste space
- * in the GGTT (about 20-25 MBs depending on the platform) but we can
- * live with this.
- *
- * Another benifit of this is the GuC bootrom can't access anything
- * below the WOPCM max size so anything the bootom needs to access (e.g.
- * a RSA key) needs to be placed in the GGTT above the WOPCM max size.
- * Starting the GGTT allocations above the WOPCM max give us the correct
- * placement for free.
- */
if (ggtt->size > GUC_GGTT_TOP)
ggtt->size = GUC_GGTT_TOP;
@@ -228,6 +243,8 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
else
ggtt->pt_ops = &xelp_pt_ops;
+ ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, 0);
+
drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
ggtt->size - xe_wopcm_size(xe));
mutex_init(&ggtt->lock);
@@ -262,6 +279,77 @@ static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
mutex_unlock(&ggtt->lock);
}
+static void ggtt_node_remove(struct xe_ggtt_node *node)
+{
+ struct xe_ggtt *ggtt = node->ggtt;
+ struct xe_device *xe = tile_to_xe(ggtt->tile);
+ bool bound;
+ int idx;
+
+ bound = drm_dev_enter(&xe->drm, &idx);
+
+ mutex_lock(&ggtt->lock);
+ if (bound)
+ xe_ggtt_clear(ggtt, node->base.start, node->base.size);
+ drm_mm_remove_node(&node->base);
+ node->base.size = 0;
+ mutex_unlock(&ggtt->lock);
+
+ if (!bound)
+ goto free_node;
+
+ if (node->invalidate_on_remove)
+ xe_ggtt_invalidate(ggtt);
+
+ drm_dev_exit(idx);
+
+free_node:
+ xe_ggtt_node_fini(node);
+}
+
+static void ggtt_node_remove_work_func(struct work_struct *work)
+{
+ struct xe_ggtt_node *node = container_of(work, typeof(*node),
+ delayed_removal_work);
+ struct xe_device *xe = tile_to_xe(node->ggtt->tile);
+
+ xe_pm_runtime_get(xe);
+ ggtt_node_remove(node);
+ xe_pm_runtime_put(xe);
+}
+
+/**
+ * xe_ggtt_node_remove - Remove a &xe_ggtt_node from the GGTT
+ * @node: the &xe_ggtt_node to be removed
+ * @invalidate: if node needs invalidation upon removal
+ */
+void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate)
+{
+ struct xe_ggtt *ggtt;
+ struct xe_device *xe;
+
+ if (!node || !node->ggtt)
+ return;
+
+ ggtt = node->ggtt;
+ xe = tile_to_xe(ggtt->tile);
+
+ node->invalidate_on_remove = invalidate;
+
+ if (xe_pm_runtime_get_if_active(xe)) {
+ ggtt_node_remove(node);
+ xe_pm_runtime_put(xe);
+ } else {
+ queue_work(ggtt->wq, &node->delayed_removal_work);
+ }
+}
+
+/**
+ * xe_ggtt_init - Regular non-early GGTT initialization
+ * @ggtt: the &xe_ggtt to be initialized
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
int xe_ggtt_init(struct xe_ggtt *ggtt)
{
struct xe_device *xe = tile_to_xe(ggtt->tile);
@@ -289,7 +377,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
xe_ggtt_initial_clear(ggtt);
- return drmm_add_action_or_reset(&xe->drm, ggtt_fini, ggtt);
+ return devm_add_action_or_reset(xe->drm.dev, ggtt_fini, ggtt);
err:
ggtt->scratch = NULL;
return err;
@@ -314,26 +402,6 @@ static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
}
-void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix)
-{
- u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
- u64 addr, scratch_pte;
-
- scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0, pat_index);
-
- printk("%sGlobal GTT:", prefix);
- for (addr = 0; addr < ggtt->size; addr += XE_PAGE_SIZE) {
- unsigned int i = addr / XE_PAGE_SIZE;
-
- xe_tile_assert(ggtt->tile, addr <= U32_MAX);
- if (ggtt->gsm[i] == scratch_pte)
- continue;
-
- printk("%s ggtt[0x%08x] = 0x%016llx",
- prefix, (u32)addr, ggtt->gsm[i]);
- }
-}
-
static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
const struct drm_mm_node *node, const char *description)
{
@@ -347,88 +415,180 @@ static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
}
/**
- * xe_ggtt_balloon - prevent allocation of specified GGTT addresses
- * @ggtt: the &xe_ggtt where we want to make reservation
+ * xe_ggtt_node_insert_balloon - prevent allocation of specified GGTT addresses
+ * @node: the &xe_ggtt_node to hold reserved GGTT node
* @start: the starting GGTT address of the reserved region
* @end: then end GGTT address of the reserved region
- * @node: the &drm_mm_node to hold reserved GGTT node
*
- * Use xe_ggtt_deballoon() to release a reserved GGTT node.
+ * Use xe_ggtt_node_remove_balloon() to release a reserved GGTT node.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 end, struct drm_mm_node *node)
+int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
{
+ struct xe_ggtt *ggtt = node->ggtt;
int err;
xe_tile_assert(ggtt->tile, start < end);
xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
- xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(node));
+ xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
- node->color = 0;
- node->start = start;
- node->size = end - start;
+ node->base.color = 0;
+ node->base.start = start;
+ node->base.size = end - start;
mutex_lock(&ggtt->lock);
- err = drm_mm_reserve_node(&ggtt->mm, node);
+ err = drm_mm_reserve_node(&ggtt->mm, &node->base);
mutex_unlock(&ggtt->lock);
if (xe_gt_WARN(ggtt->tile->primary_gt, err,
"Failed to balloon GGTT %#llx-%#llx (%pe)\n",
- node->start, node->start + node->size, ERR_PTR(err)))
+ node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
return err;
- xe_ggtt_dump_node(ggtt, node, "balloon");
+ xe_ggtt_dump_node(ggtt, &node->base, "balloon");
return 0;
}
/**
- * xe_ggtt_deballoon - release a reserved GGTT region
- * @ggtt: the &xe_ggtt where reserved node belongs
- * @node: the &drm_mm_node with reserved GGTT region
+ * xe_ggtt_node_remove_balloon - release a reserved GGTT region
+ * @node: the &xe_ggtt_node with reserved GGTT region
*
- * See xe_ggtt_balloon() for details.
+ * See xe_ggtt_node_insert_balloon() for details.
*/
-void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct drm_mm_node *node)
+void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node)
{
- if (!drm_mm_node_allocated(node))
+ if (!node || !node->ggtt)
return;
- xe_ggtt_dump_node(ggtt, node, "deballoon");
+ if (!drm_mm_node_allocated(&node->base))
+ goto free_node;
- mutex_lock(&ggtt->lock);
- drm_mm_remove_node(node);
- mutex_unlock(&ggtt->lock);
+ xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon");
+
+ mutex_lock(&node->ggtt->lock);
+ drm_mm_remove_node(&node->base);
+ mutex_unlock(&node->ggtt->lock);
+
+free_node:
+ xe_ggtt_node_fini(node);
}
-int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt, struct drm_mm_node *node,
- u32 size, u32 align, u32 mm_flags)
+/**
+ * xe_ggtt_node_insert_locked - Locked version to insert a &xe_ggtt_node into the GGTT
+ * @node: the &xe_ggtt_node to be inserted
+ * @size: size of the node
+ * @align: alignment constrain of the node
+ * @mm_flags: flags to control the node behavior
+ *
+ * It cannot be called without first having called xe_ggtt_init() once.
+ * To be used in cases where ggtt->lock is already taken.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
+ u32 size, u32 align, u32 mm_flags)
{
- return drm_mm_insert_node_generic(&ggtt->mm, node, size, align, 0,
+ return drm_mm_insert_node_generic(&node->ggtt->mm, &node->base, size, align, 0,
mm_flags);
}
-int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
- u32 size, u32 align)
+/**
+ * xe_ggtt_node_insert - Insert a &xe_ggtt_node into the GGTT
+ * @node: the &xe_ggtt_node to be inserted
+ * @size: size of the node
+ * @align: alignment constrain of the node
+ *
+ * It cannot be called without first having called xe_ggtt_init() once.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align)
{
int ret;
- mutex_lock(&ggtt->lock);
- ret = xe_ggtt_insert_special_node_locked(ggtt, node, size,
- align, DRM_MM_INSERT_HIGH);
- mutex_unlock(&ggtt->lock);
+ if (!node || !node->ggtt)
+ return -ENOENT;
+
+ mutex_lock(&node->ggtt->lock);
+ ret = xe_ggtt_node_insert_locked(node, size, align,
+ DRM_MM_INSERT_HIGH);
+ mutex_unlock(&node->ggtt->lock);
return ret;
}
+/**
+ * xe_ggtt_node_init - Initialize %xe_ggtt_node struct
+ * @ggtt: the &xe_ggtt where the new node will later be inserted/reserved.
+ *
+ * This function will allocated the struct %xe_ggtt_node and return it's pointer.
+ * This struct will then be freed after the node removal upon xe_ggtt_node_remove()
+ * or xe_ggtt_node_remove_balloon().
+ * Having %xe_ggtt_node struct allocated doesn't mean that the node is already allocated
+ * in GGTT. Only the xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
+ * xe_ggtt_node_insert_balloon() will ensure the node is inserted or reserved in GGTT.
+ *
+ * Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise.
+ **/
+struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt)
+{
+ struct xe_ggtt_node *node = kzalloc(sizeof(*node), GFP_NOFS);
+
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&node->delayed_removal_work, ggtt_node_remove_work_func);
+ node->ggtt = ggtt;
+
+ return node;
+}
+
+/**
+ * xe_ggtt_node_fini - Forcebly finalize %xe_ggtt_node struct
+ * @node: the &xe_ggtt_node to be freed
+ *
+ * If anything went wrong with either xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
+ * or xe_ggtt_node_insert_balloon(); and this @node is not going to be reused, then,
+ * this function needs to be called to free the %xe_ggtt_node struct
+ **/
+void xe_ggtt_node_fini(struct xe_ggtt_node *node)
+{
+ kfree(node);
+}
+
+/**
+ * xe_ggtt_node_allocated - Check if node is allocated in GGTT
+ * @node: the &xe_ggtt_node to be inspected
+ *
+ * Return: True if allocated, False otherwise.
+ */
+bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
+{
+ if (!node || !node->ggtt)
+ return false;
+
+ return drm_mm_node_allocated(&node->base);
+}
+
+/**
+ * xe_ggtt_map_bo - Map the BO into GGTT
+ * @ggtt: the &xe_ggtt where node will be mapped
+ * @bo: the &xe_bo to be mapped
+ */
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
- u64 start = bo->ggtt_node.start;
+ u64 start;
u64 offset, pte;
+ if (XE_WARN_ON(!bo->ggtt_node))
+ return;
+
+ start = bo->ggtt_node->base.start;
+
for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
ggtt->pt_ops->ggtt_set_pte(ggtt, start + offset, pte);
@@ -444,9 +604,9 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
alignment = SZ_64K;
- if (XE_WARN_ON(bo->ggtt_node.size)) {
+ if (XE_WARN_ON(bo->ggtt_node)) {
/* Someone's already inserted this BO in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
return 0;
}
@@ -455,69 +615,111 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
return err;
xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
+
+ bo->ggtt_node = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(bo->ggtt_node)) {
+ err = PTR_ERR(bo->ggtt_node);
+ bo->ggtt_node = NULL;
+ goto out;
+ }
+
mutex_lock(&ggtt->lock);
- err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, bo->size,
+ err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node->base, bo->size,
alignment, 0, start, end, 0);
- if (!err)
+ if (err) {
+ xe_ggtt_node_fini(bo->ggtt_node);
+ bo->ggtt_node = NULL;
+ } else {
xe_ggtt_map_bo(ggtt, bo);
+ }
mutex_unlock(&ggtt->lock);
if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
xe_ggtt_invalidate(ggtt);
+
+out:
xe_pm_runtime_put(tile_to_xe(ggtt->tile));
return err;
}
+/**
+ * xe_ggtt_insert_bo_at - Insert BO at a specific GGTT space
+ * @ggtt: the &xe_ggtt where bo will be inserted
+ * @bo: the &xe_bo to be inserted
+ * @start: address where it will be inserted
+ * @end: end of the range where it will be inserted
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end)
{
return __xe_ggtt_insert_bo_at(ggtt, bo, start, end);
}
+/**
+ * xe_ggtt_insert_bo - Insert BO into GGTT
+ * @ggtt: the &xe_ggtt where bo will be inserted
+ * @bo: the &xe_bo to be inserted
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
}
-void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
- bool invalidate)
+/**
+ * xe_ggtt_remove_bo - Remove a BO from the GGTT
+ * @ggtt: the &xe_ggtt where node will be removed
+ * @bo: the &xe_bo to be removed
+ */
+void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
- struct xe_device *xe = tile_to_xe(ggtt->tile);
- bool bound;
- int idx;
-
- bound = drm_dev_enter(&xe->drm, &idx);
- if (bound)
- xe_pm_runtime_get_noresume(xe);
-
- mutex_lock(&ggtt->lock);
- if (bound)
- xe_ggtt_clear(ggtt, node->start, node->size);
- drm_mm_remove_node(node);
- node->size = 0;
- mutex_unlock(&ggtt->lock);
-
- if (!bound)
+ if (XE_WARN_ON(!bo->ggtt_node))
return;
- if (invalidate)
- xe_ggtt_invalidate(ggtt);
+ /* This BO is not currently in the GGTT */
+ xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
- xe_pm_runtime_put(xe);
- drm_dev_exit(idx);
+ xe_ggtt_node_remove(bo->ggtt_node,
+ bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
}
-void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+/**
+ * xe_ggtt_largest_hole - Largest GGTT hole
+ * @ggtt: the &xe_ggtt that will be inspected
+ * @alignment: minimum alignment
+ * @spare: If not NULL: in: desired memory size to be spared / out: Adjusted possible spare
+ *
+ * Return: size of the largest continuous GGTT region
+ */
+u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare)
{
- if (XE_WARN_ON(!bo->ggtt_node.size))
- return;
+ const struct drm_mm *mm = &ggtt->mm;
+ const struct drm_mm_node *entry;
+ u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
+ u64 hole_start, hole_end, hole_size;
+ u64 max_hole = 0;
- /* This BO is not currently in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
+ mutex_lock(&ggtt->lock);
- xe_ggtt_remove_node(ggtt, &bo->ggtt_node,
- bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
+ drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
+ hole_start = max(hole_start, hole_min_start);
+ hole_start = ALIGN(hole_start, alignment);
+ hole_end = ALIGN_DOWN(hole_end, alignment);
+ if (hole_start >= hole_end)
+ continue;
+ hole_size = hole_end - hole_start;
+ if (spare)
+ *spare -= min3(*spare, hole_size, max_hole);
+ max_hole = max(max_hole, hole_size);
+ }
+
+ mutex_unlock(&ggtt->lock);
+
+ return max_hole;
}
#ifdef CONFIG_PCI_IOV
@@ -548,22 +750,28 @@ static void xe_ggtt_assign_locked(struct xe_ggtt *ggtt, const struct drm_mm_node
/**
* xe_ggtt_assign - assign a GGTT region to the VF
- * @ggtt: the &xe_ggtt where the node belongs
- * @node: the &drm_mm_node to update
+ * @node: the &xe_ggtt_node to update
* @vfid: the VF identifier
*
* This function is used by the PF driver to assign a GGTT region to the VF.
* In addition to PTE's VFID bits 11:2 also PRESENT bit 0 is set as on some
* platforms VFs can't modify that either.
*/
-void xe_ggtt_assign(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
+void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid)
{
- mutex_lock(&ggtt->lock);
- xe_ggtt_assign_locked(ggtt, node, vfid);
- mutex_unlock(&ggtt->lock);
+ mutex_lock(&node->ggtt->lock);
+ xe_ggtt_assign_locked(node->ggtt, &node->base, vfid);
+ mutex_unlock(&node->ggtt->lock);
}
#endif
+/**
+ * xe_ggtt_dump - Dump GGTT for debug
+ * @ggtt: the &xe_ggtt to be dumped
+ * @p: the &drm_mm_printer helper handle to be used to dump the information
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
{
int err;
@@ -576,3 +784,43 @@ int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
mutex_unlock(&ggtt->lock);
return err;
}
+
+/**
+ * xe_ggtt_print_holes - Print holes
+ * @ggtt: the &xe_ggtt to be inspected
+ * @alignment: min alignment
+ * @p: the &drm_printer
+ *
+ * Print GGTT ranges that are available and return total size available.
+ *
+ * Return: Total available size.
+ */
+u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer *p)
+{
+ const struct drm_mm *mm = &ggtt->mm;
+ const struct drm_mm_node *entry;
+ u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
+ u64 hole_start, hole_end, hole_size;
+ u64 total = 0;
+ char buf[10];
+
+ mutex_lock(&ggtt->lock);
+
+ drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
+ hole_start = max(hole_start, hole_min_start);
+ hole_start = ALIGN(hole_start, alignment);
+ hole_end = ALIGN_DOWN(hole_end, alignment);
+ if (hole_start >= hole_end)
+ continue;
+ hole_size = hole_end - hole_start;
+ total += hole_size;
+
+ string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
+ hole_start, hole_end - 1, buf);
+ }
+
+ mutex_unlock(&ggtt->lock);
+
+ return total;
+}
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
index 6a96fd54bf60..27e7d67de004 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.h
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -12,28 +12,30 @@ struct drm_printer;
int xe_ggtt_init_early(struct xe_ggtt *ggtt);
int xe_ggtt_init(struct xe_ggtt *ggtt);
-void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix);
-
-int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 size, struct drm_mm_node *node);
-void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct drm_mm_node *node);
-
-int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
- u32 size, u32 align);
-int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt,
- struct drm_mm_node *node,
- u32 size, u32 align, u32 mm_flags);
-void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
- bool invalidate);
+
+struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt);
+void xe_ggtt_node_fini(struct xe_ggtt_node *node);
+int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node,
+ u64 start, u64 size);
+void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node);
+
+int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align);
+int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
+ u32 size, u32 align, u32 mm_flags);
+void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate);
+bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node);
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end);
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
+u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare);
int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p);
+u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer *p);
#ifdef CONFIG_PCI_IOV
-void xe_ggtt_assign(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid);
+void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid);
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h
index 2245d88d8f39..cb02b7994a9a 100644
--- a/drivers/gpu/drm/xe/xe_ggtt_types.h
+++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
@@ -13,30 +13,70 @@
struct xe_bo;
struct xe_gt;
+/**
+ * struct xe_ggtt - Main GGTT struct
+ *
+ * In general, each tile can contains its own Global Graphics Translation Table
+ * (GGTT) instance.
+ */
struct xe_ggtt {
+ /** @tile: Back pointer to tile where this GGTT belongs */
struct xe_tile *tile;
-
+ /** @size: Total size of this GGTT */
u64 size;
#define XE_GGTT_FLAGS_64K BIT(0)
+ /**
+ * @flags: Flags for this GGTT
+ * Acceptable flags:
+ * - %XE_GGTT_FLAGS_64K - if PTE size is 64K. Otherwise, regular is 4K.
+ */
unsigned int flags;
-
+ /** @scratch: Internal object allocation used as a scratch page */
struct xe_bo *scratch;
-
+ /** @lock: Mutex lock to protect GGTT data */
struct mutex lock;
-
+ /**
+ * @gsm: The iomem pointer to the actual location of the translation
+ * table located in the GSM for easy PTE manipulation
+ */
u64 __iomem *gsm;
-
+ /** @pt_ops: Page Table operations per platform */
const struct xe_ggtt_pt_ops *pt_ops;
-
+ /** @mm: The memory manager used to manage individual GGTT allocations */
struct drm_mm mm;
-
/** @access_count: counts GGTT writes */
unsigned int access_count;
+ /** @wq: Dedicated unordered work queue to process node removals */
+ struct workqueue_struct *wq;
+};
+
+/**
+ * struct xe_ggtt_node - A node in GGTT.
+ *
+ * This struct needs to be initialized (only-once) with xe_ggtt_node_init() before any node
+ * insertion, reservation, or 'ballooning'.
+ * It will, then, be finalized by either xe_ggtt_node_remove() or xe_ggtt_node_deballoon().
+ */
+struct xe_ggtt_node {
+ /** @ggtt: Back pointer to xe_ggtt where this region will be inserted at */
+ struct xe_ggtt *ggtt;
+ /** @base: A drm_mm_node */
+ struct drm_mm_node base;
+ /** @delayed_removal_work: The work struct for the delayed removal */
+ struct work_struct delayed_removal_work;
+ /** @invalidate_on_remove: If it needs invalidation upon removal */
+ bool invalidate_on_remove;
};
+/**
+ * struct xe_ggtt_pt_ops - GGTT Page table operations
+ * Which can vary from platform to platform.
+ */
struct xe_ggtt_pt_ops {
+ /** @pte_encode_bo: Encode PTE address for a given BO */
u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index);
+ /** @ggtt_set_pte: Directly write into GGTT's PTE */
void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte);
};
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
index e4ad1d6ce1d5..c518d1d16d82 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
@@ -15,11 +15,11 @@ static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
{
struct xe_sched_msg *msg;
- spin_lock(&sched->base.job_list_lock);
+ xe_sched_msg_lock(sched);
msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
if (msg)
xe_sched_process_msg_queue(sched);
- spin_unlock(&sched->base.job_list_lock);
+ xe_sched_msg_unlock(sched);
}
static struct xe_sched_msg *
@@ -27,12 +27,12 @@ xe_sched_get_msg(struct xe_gpu_scheduler *sched)
{
struct xe_sched_msg *msg;
- spin_lock(&sched->base.job_list_lock);
+ xe_sched_msg_lock(sched);
msg = list_first_entry_or_null(&sched->msgs,
struct xe_sched_msg, link);
if (msg)
- list_del(&msg->link);
- spin_unlock(&sched->base.job_list_lock);
+ list_del_init(&msg->link);
+ xe_sched_msg_unlock(sched);
return msg;
}
@@ -93,9 +93,16 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg)
{
- spin_lock(&sched->base.job_list_lock);
- list_add_tail(&msg->link, &sched->msgs);
- spin_unlock(&sched->base.job_list_lock);
+ xe_sched_msg_lock(sched);
+ xe_sched_add_msg_locked(sched, msg);
+ xe_sched_msg_unlock(sched);
+}
+void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
+ struct xe_sched_msg *msg)
+{
+ lockdep_assert_held(&sched->base.job_list_lock);
+
+ list_add_tail(&msg->link, &sched->msgs);
xe_sched_process_msg_queue(sched);
}
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index 10c6bb9c9386..cee9c6809fc0 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -24,6 +24,18 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg);
+void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
+ struct xe_sched_msg *msg);
+
+static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
+{
+ spin_lock(&sched->base.job_list_lock);
+}
+
+static inline void xe_sched_msg_unlock(struct xe_gpu_scheduler *sched)
+{
+ spin_unlock(&sched->base.job_list_lock);
+}
static inline void xe_sched_stop(struct xe_gpu_scheduler *sched)
{
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 29f96f409391..6fbea70d3d36 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <generated/xe_wa_oob.h>
@@ -165,10 +166,11 @@ static int query_compatibility_version(struct xe_gsc *gsc)
return err;
}
- compat->major = version_query_rd(xe, &bo->vmap, rd_offset, compat_major);
- compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor);
+ compat->major = version_query_rd(xe, &bo->vmap, rd_offset, proj_major);
+ compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_major);
+ compat->patch = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor);
- xe_gt_info(gt, "found GSC cv%u.%u\n", compat->major, compat->minor);
+ xe_gt_info(gt, "found GSC cv%u.%u.%u\n", compat->major, compat->minor, compat->patch);
out_bo:
xe_bo_unpin_map_no_vm(bo);
@@ -333,9 +335,11 @@ static int gsc_er_complete(struct xe_gt *gt)
if (er_status == GSCI_TIMER_STATUS_TIMER_EXPIRED) {
/*
* XXX: we should trigger an FLR here, but we don't have support
- * for that yet.
+ * for that yet. Since we can't recover from the error, we
+ * declare the device as wedged.
*/
xe_gt_err(gt, "GSC ER timed out!\n");
+ xe_device_declare_wedged(gt_to_xe(gt));
return -EIO;
}
@@ -450,11 +454,6 @@ static void free_resources(void *arg)
xe_exec_queue_put(gsc->q);
gsc->q = NULL;
}
-
- if (gsc->private) {
- xe_bo_unpin_map_no_vm(gsc->private);
- gsc->private = NULL;
- }
}
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
@@ -474,10 +473,9 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
if (!hwe)
return -ENODEV;
- bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4M,
- ttm_bo_type_kernel,
- XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT);
+ bo = xe_managed_bo_create_pin_map(xe, tile, SZ_4M,
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -537,7 +535,10 @@ void xe_gsc_load_start(struct xe_gsc *gsc)
/* GSC FW survives GT reset and D3Hot */
if (gsc_fw_is_loaded(gt)) {
- xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
+ if (xe_gsc_proxy_init_done(gsc))
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
+ else
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
return;
}
@@ -589,3 +590,35 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
msleep(200);
}
}
+
+/**
+ * xe_gsc_print_info - print info about GSC FW status
+ * @gsc: the GSC structure
+ * @p: the printer to be used to print the info
+ */
+void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ int err;
+
+ xe_uc_fw_print(&gsc->fw, p);
+
+ drm_printf(p, "\tfound security version %u\n", gsc->security_version);
+
+ if (!xe_uc_fw_is_enabled(&gsc->fw))
+ return;
+
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ if (err)
+ return;
+
+ drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS2(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS3(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS4(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS5(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS6(MTL_GSC_HECI1_BASE)));
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+}
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
index 1c7a623faf11..e282b9ef6ec4 100644
--- a/drivers/gpu/drm/xe/xe_gsc.h
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+struct drm_printer;
struct xe_gsc;
struct xe_gt;
struct xe_hw_engine;
@@ -21,4 +22,6 @@ void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep);
+void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc_debugfs.c b/drivers/gpu/drm/xe/xe_gsc_debugfs.c
new file mode 100644
index 000000000000..461d7e99c2b3
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc_debugfs.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_gsc_debugfs.h"
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_managed.h>
+
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gsc.h"
+#include "xe_macros.h"
+#include "xe_pm.h"
+
+static struct xe_gt *
+gsc_to_gt(struct xe_gsc *gsc)
+{
+ return container_of(gsc, struct xe_gt, uc.gsc);
+}
+
+static struct xe_device *
+gsc_to_xe(struct xe_gsc *gsc)
+{
+ return gt_to_xe(gsc_to_gt(gsc));
+}
+
+static struct xe_gsc *node_to_gsc(struct drm_info_node *node)
+{
+ return node->info_ent->data;
+}
+
+static int gsc_info(struct seq_file *m, void *data)
+{
+ struct xe_gsc *gsc = node_to_gsc(m->private);
+ struct xe_device *xe = gsc_to_xe(gsc);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_pm_runtime_get(xe);
+ xe_gsc_print_info(gsc, &p);
+ xe_pm_runtime_put(xe);
+
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ {"gsc_info", gsc_info, 0},
+};
+
+void xe_gsc_debugfs_register(struct xe_gsc *gsc, struct dentry *parent)
+{
+ struct drm_minor *minor = gsc_to_xe(gsc)->drm.primary;
+ struct drm_info_list *local;
+ int i;
+
+#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
+ local = drmm_kmalloc(&gsc_to_xe(gsc)->drm, DEBUGFS_SIZE, GFP_KERNEL);
+ if (!local)
+ return;
+
+ memcpy(local, debugfs_list, DEBUGFS_SIZE);
+#undef DEBUGFS_SIZE
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
+ local[i].data = gsc;
+
+ drm_debugfs_create_files(local,
+ ARRAY_SIZE(debugfs_list),
+ parent, minor);
+}
diff --git a/drivers/gpu/drm/xe/xe_gsc_debugfs.h b/drivers/gpu/drm/xe/xe_gsc_debugfs.h
new file mode 100644
index 000000000000..c2e2645dc705
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GSC_DEBUGFS_H_
+#define _XE_GSC_DEBUGFS_H_
+
+struct dentry;
+struct xe_gsc;
+
+void xe_gsc_debugfs_register(struct xe_gsc *gsc, struct dentry *parent);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index aa812a2bc3ed..2d6ea8c01445 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -62,11 +62,6 @@ gsc_to_gt(struct xe_gsc *gsc)
return container_of(gsc, struct xe_gt, uc.gsc);
}
-static inline struct xe_device *kdev_to_xe(struct device *kdev)
-{
- return dev_get_drvdata(kdev);
-}
-
bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
@@ -345,7 +340,7 @@ void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir)
static int xe_gsc_proxy_component_bind(struct device *xe_kdev,
struct device *mei_kdev, void *data)
{
- struct xe_device *xe = kdev_to_xe(xe_kdev);
+ struct xe_device *xe = kdev_to_xe_device(xe_kdev);
struct xe_gt *gt = xe->tiles[0].media_gt;
struct xe_gsc *gsc = &gt->uc.gsc;
@@ -360,7 +355,7 @@ static int xe_gsc_proxy_component_bind(struct device *xe_kdev,
static void xe_gsc_proxy_component_unbind(struct device *xe_kdev,
struct device *mei_kdev, void *data)
{
- struct xe_device *xe = kdev_to_xe(xe_kdev);
+ struct xe_device *xe = kdev_to_xe_device(xe_kdev);
struct xe_gt *gt = xe->tiles[0].media_gt;
struct xe_gsc *gsc = &gt->uc.gsc;
@@ -376,27 +371,6 @@ static const struct component_ops xe_gsc_proxy_component_ops = {
.unbind = xe_gsc_proxy_component_unbind,
};
-static void proxy_channel_free(struct drm_device *drm, void *arg)
-{
- struct xe_gsc *gsc = arg;
-
- if (!gsc->proxy.bo)
- return;
-
- if (gsc->proxy.to_csme) {
- kfree(gsc->proxy.to_csme);
- gsc->proxy.to_csme = NULL;
- gsc->proxy.from_csme = NULL;
- }
-
- if (gsc->proxy.bo) {
- iosys_map_clear(&gsc->proxy.to_gsc);
- iosys_map_clear(&gsc->proxy.from_gsc);
- xe_bo_unpin_map_no_vm(gsc->proxy.bo);
- gsc->proxy.bo = NULL;
- }
-}
-
static int proxy_channel_alloc(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
@@ -405,18 +379,15 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
struct xe_bo *bo;
void *csme;
- csme = kzalloc(GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
+ csme = drmm_kzalloc(&xe->drm, GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
if (!csme)
return -ENOMEM;
- bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT);
- if (IS_ERR(bo)) {
- kfree(csme);
+ bo = xe_managed_bo_create_pin_map(xe, tile, GSC_PROXY_CHANNEL_SIZE,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT);
+ if (IS_ERR(bo))
return PTR_ERR(bo);
- }
gsc->proxy.bo = bo;
gsc->proxy.to_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0);
@@ -424,7 +395,7 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
gsc->proxy.to_csme = csme;
gsc->proxy.from_csme = csme + GSC_PROXY_BUFFER_SIZE;
- return drmm_add_action_or_reset(&xe->drm, proxy_channel_free, gsc);
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index b8c73f69fbaf..f0dc2bf24c7b 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -8,8 +8,7 @@
#include <linux/minmax.h>
#include <drm/drm_managed.h>
-#include <drm/xe_drm.h>
-#include <generated/xe_wa_oob.h>
+#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>
@@ -110,9 +109,9 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
if (!xe_gt_is_media_type(gt)) {
xe_mmio_write32(gt, SCRATCH1LPFC, EN_L3_RW_CCS_CACHE_FLUSH);
- reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL);
+ reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
reg |= CG_DIS_CNTLBUS;
- xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg);
+ xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
}
xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
@@ -134,9 +133,9 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
if (WARN_ON(err))
return;
- reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL);
+ reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
reg &= ~CG_DIS_CNTLBUS;
- xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg);
+ xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
}
@@ -556,7 +555,6 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
xe_gt_mcr_init_early(gt);
xe_pat_init(gt);
- xe_gt_enable_host_l2_vram(gt);
err = xe_uc_init(&gt->uc);
if (err)
@@ -568,6 +566,7 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
xe_gt_topology_init(gt);
xe_gt_mcr_init(gt);
+ xe_gt_enable_host_l2_vram(gt);
out_fw:
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 8b1a5027dcf2..ee138e9768a2 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -6,6 +6,8 @@
#ifndef _XE_GT_H_
#define _XE_GT_H_
+#include <linux/fault-inject.h>
+
#include <drm/drm_util.h>
#include "xe_device.h"
@@ -19,19 +21,11 @@
#define CCS_MASK(gt) (((gt)->info.engine_mask & XE_HW_ENGINE_CCS_MASK) >> XE_HW_ENGINE_CCS0)
-#ifdef CONFIG_FAULT_INJECTION
-#include <linux/fault-inject.h> /* XXX: fault-inject.h is broken */
extern struct fault_attr gt_reset_failure;
static inline bool xe_fault_inject_gt_reset(void)
{
return should_fail(&gt_reset_failure, 1);
}
-#else
-static inline bool xe_fault_inject_gt_reset(void)
-{
- return false;
-}
-#endif
struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
int xe_gt_init_hwconfig(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 5e7fd937917a..8f95d3a5949b 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -17,7 +17,9 @@
#include "xe_gt_mcr.h"
#include "xe_gt_sriov_pf_debugfs.h"
#include "xe_gt_sriov_vf_debugfs.h"
+#include "xe_gt_stats.h"
#include "xe_gt_topology.h"
+#include "xe_guc_hwconfig.h"
#include "xe_hw_engine.h"
#include "xe_lrc.h"
#include "xe_macros.h"
@@ -269,6 +271,15 @@ static int vecs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
+static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
+{
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_guc_hwconfig_dump(&gt->uc.guc, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return 0;
+}
+
static const struct drm_info_list debugfs_list[] = {
{"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
{"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset},
@@ -286,6 +297,8 @@ static const struct drm_info_list debugfs_list[] = {
{"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
{"default_lrc_vcs", .show = xe_gt_debugfs_simple_show, .data = vcs_default_lrc},
{"default_lrc_vecs", .show = xe_gt_debugfs_simple_show, .data = vecs_default_lrc},
+ {"stats", .show = xe_gt_debugfs_simple_show, .data = xe_gt_stats_print_info},
+ {"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig},
};
void xe_gt_debugfs_register(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 6d948a469126..7d7bd0be6233 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -8,8 +8,10 @@
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_gt_topology.h"
#include "xe_gt_types.h"
+#include "xe_guc_hwconfig.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
@@ -297,6 +299,36 @@ static void init_steering_mslice(struct xe_gt *gt)
static unsigned int dss_per_group(struct xe_gt *gt)
{
+ struct xe_guc *guc = &gt->uc.guc;
+ u32 max_slices = 0, max_subslices = 0;
+ int ret;
+
+ /*
+ * Try to query the GuC's hwconfig table for the maximum number of
+ * slices and subslices. These don't reflect the platform's actual
+ * slice/DSS counts, just the physical layout by which we should
+ * determine the steering targets. On older platforms with older GuC
+ * firmware releases it's possible that these attributes may not be
+ * included in the table, so we can always fall back to the old
+ * hardcoded layouts.
+ */
+#define HWCONFIG_ATTR_MAX_SLICES 1
+#define HWCONFIG_ATTR_MAX_SUBSLICES 70
+
+ ret = xe_guc_hwconfig_lookup_u32(guc, HWCONFIG_ATTR_MAX_SLICES,
+ &max_slices);
+ if (ret < 0 || max_slices == 0)
+ goto fallback;
+
+ ret = xe_guc_hwconfig_lookup_u32(guc, HWCONFIG_ATTR_MAX_SUBSLICES,
+ &max_subslices);
+ if (ret < 0 || max_subslices == 0)
+ goto fallback;
+
+ return DIV_ROUND_UP(max_subslices, max_slices);
+
+fallback:
+ xe_gt_dbg(gt, "GuC hwconfig cannot provide dss/slice; using typical fallback values\n");
if (gt_to_xe(gt)->info.platform == XE_PVC)
return 8;
else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
@@ -314,16 +346,16 @@ static unsigned int dss_per_group(struct xe_gt *gt)
*/
void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance)
{
- int dss_per_grp = dss_per_group(gt);
-
xe_gt_assert(gt, dss < XE_MAX_DSS_FUSE_BITS);
- *group = dss / dss_per_grp;
- *instance = dss % dss_per_grp;
+ *group = dss / gt->steering_dss_per_grp;
+ *instance = dss % gt->steering_dss_per_grp;
}
static void init_steering_dss(struct xe_gt *gt)
{
+ gt->steering_dss_per_grp = dss_per_group(gt);
+
xe_gt_mcr_get_dss_steering(gt,
min(xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0),
xe_dss_mask_group_ffs(gt->fuse_topo.c_dss_mask, 0, 0)),
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index b2a7fa55bd18..730eec07795e 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -287,7 +287,7 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
PFD_VIRTUAL_ADDR_LO_SHIFT;
pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
- PF_QUEUE_NUM_DW;
+ pf_queue->num_dw;
ret = true;
}
spin_unlock_irq(&pf_queue->lock);
@@ -299,7 +299,8 @@ static bool pf_queue_full(struct pf_queue *pf_queue)
{
lockdep_assert_held(&pf_queue->lock);
- return CIRC_SPACE(pf_queue->head, pf_queue->tail, PF_QUEUE_NUM_DW) <=
+ return CIRC_SPACE(pf_queue->head, pf_queue->tail,
+ pf_queue->num_dw) <=
PF_MSG_LEN_DW;
}
@@ -312,22 +313,23 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
u32 asid;
bool full;
- /*
- * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
- */
- BUILD_BUG_ON(PF_QUEUE_NUM_DW % PF_MSG_LEN_DW);
-
if (unlikely(len != PF_MSG_LEN_DW))
return -EPROTO;
asid = FIELD_GET(PFD_ASID, msg[1]);
pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
+ /*
+ * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
+ */
+ xe_gt_assert(gt, !(pf_queue->num_dw % PF_MSG_LEN_DW));
+
spin_lock_irqsave(&pf_queue->lock, flags);
full = pf_queue_full(pf_queue);
if (!full) {
memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
- pf_queue->head = (pf_queue->head + len) % PF_QUEUE_NUM_DW;
+ pf_queue->head = (pf_queue->head + len) %
+ pf_queue->num_dw;
queue_work(gt->usm.pf_wq, &pf_queue->worker);
} else {
drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
@@ -394,18 +396,47 @@ static void pagefault_fini(void *arg)
destroy_workqueue(gt->usm.pf_wq);
}
+static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ xe_dss_mask_t all_dss;
+ int num_dss, num_eus;
+
+ bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
+ XE_MAX_DSS_FUSE_BITS);
+
+ num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS);
+ num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
+ XE_MAX_EU_FUSE_BITS) * num_dss;
+
+ /* user can issue separate page faults per EU and per CS */
+ pf_queue->num_dw =
+ (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
+
+ pf_queue->gt = gt;
+ pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
+ sizeof(u32), GFP_KERNEL);
+ if (!pf_queue->data)
+ return -ENOMEM;
+
+ spin_lock_init(&pf_queue->lock);
+ INIT_WORK(&pf_queue->worker, pf_queue_work_func);
+
+ return 0;
+}
+
int xe_gt_pagefault_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- int i;
+ int i, ret = 0;
if (!xe->info.has_usm)
return 0;
for (i = 0; i < NUM_PF_QUEUE; ++i) {
- gt->usm.pf_queue[i].gt = gt;
- spin_lock_init(&gt->usm.pf_queue[i].lock);
- INIT_WORK(&gt->usm.pf_queue[i].worker, pf_queue_work_func);
+ ret = xe_alloc_pf_queue(gt, &gt->usm.pf_queue[i]);
+ if (ret)
+ return ret;
}
for (i = 0; i < NUM_ACC_QUEUE; ++i) {
gt->usm.acc_queue[i].gt = gt;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index 9dbba9ab7a9a..905f409db74b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -5,10 +5,11 @@
#include <drm/drm_managed.h>
-#include "regs/xe_sriov_regs.h"
+#include "regs/xe_regs.h"
#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_pf_config.h"
+#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_mmio.h"
@@ -57,6 +58,10 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
if (err)
return err;
+ err = xe_gt_sriov_pf_control_init(gt);
+ if (err)
+ return err;
+
return 0;
}
@@ -93,4 +98,5 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
xe_gt_sriov_pf_config_restart(gt);
+ xe_gt_sriov_pf_control_restart(gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index b6f0a7299c03..8250ef71e685 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -29,6 +29,7 @@
#include "xe_guc_submit.h"
#include "xe_lmtt.h"
#include "xe_map.h"
+#include "xe_migrate.h"
#include "xe_sriov.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wopcm.h"
@@ -232,14 +233,14 @@ static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
{
u32 n = 0;
- if (drm_mm_node_allocated(&config->ggtt_region)) {
+ if (xe_ggtt_node_allocated(config->ggtt_region)) {
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
- cfg[n++] = lower_32_bits(config->ggtt_region.start);
- cfg[n++] = upper_32_bits(config->ggtt_region.start);
+ cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
+ cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
- cfg[n++] = lower_32_bits(config->ggtt_region.size);
- cfg[n++] = upper_32_bits(config->ggtt_region.size);
+ cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
+ cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
}
return n;
@@ -276,6 +277,14 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
cfg[n++] = config->preempt_timeout;
+#define encode_threshold_config(TAG, ...) ({ \
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG); \
+ cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)]; \
+});
+
+ MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
+#undef encode_threshold_config
+
return n;
}
@@ -369,29 +378,28 @@ static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u6
return err ?: err2;
}
-static void pf_release_ggtt(struct xe_tile *tile, struct drm_mm_node *node)
+static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
{
- struct xe_ggtt *ggtt = tile->mem.ggtt;
-
- if (drm_mm_node_allocated(node)) {
+ if (xe_ggtt_node_allocated(node)) {
/*
* explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
* is redundant, as PTE will be implicitly re-assigned to PF by
* the xe_ggtt_clear() called by below xe_ggtt_remove_node().
*/
- xe_ggtt_remove_node(ggtt, node, false);
+ xe_ggtt_node_remove(node, false);
}
}
static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
- pf_release_ggtt(gt_to_tile(gt), &config->ggtt_region);
+ pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
+ config->ggtt_region = NULL;
}
static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
- struct drm_mm_node *node = &config->ggtt_region;
+ struct xe_ggtt_node *node;
struct xe_tile *tile = gt_to_tile(gt);
struct xe_ggtt *ggtt = tile->mem.ggtt;
u64 alignment = pf_get_ggtt_alignment(gt);
@@ -403,40 +411,48 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
size = round_up(size, alignment);
- if (drm_mm_node_allocated(node)) {
+ if (xe_ggtt_node_allocated(config->ggtt_region)) {
err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
if (unlikely(err))
return err;
- pf_release_ggtt(tile, node);
+ pf_release_vf_config_ggtt(gt, config);
}
- xe_gt_assert(gt, !drm_mm_node_allocated(node));
+ xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
if (!size)
return 0;
- err = xe_ggtt_insert_special_node(ggtt, node, size, alignment);
+ node = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ err = xe_ggtt_node_insert(node, size, alignment);
if (unlikely(err))
- return err;
+ goto err;
- xe_ggtt_assign(ggtt, node, vfid);
+ xe_ggtt_assign(node, vfid);
xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
- vfid, node->start, node->start + node->size - 1);
+ vfid, node->base.start, node->base.start + node->base.size - 1);
- err = pf_distribute_config_ggtt(gt->tile, vfid, node->start, node->size);
+ err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
if (unlikely(err))
- return err;
+ goto err;
+ config->ggtt_region = node;
return 0;
+err:
+ xe_ggtt_node_fini(node);
+ return err;
}
static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
- struct drm_mm_node *node = &config->ggtt_region;
+ struct xe_ggtt_node *node = config->ggtt_region;
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
- return drm_mm_node_allocated(node) ? node->size : 0;
+ return xe_ggtt_node_allocated(node) ? node->base.size : 0;
}
/**
@@ -587,30 +603,11 @@ int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
static u64 pf_get_max_ggtt(struct xe_gt *gt)
{
struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
- const struct drm_mm *mm = &ggtt->mm;
- const struct drm_mm_node *entry;
u64 alignment = pf_get_ggtt_alignment(gt);
u64 spare = pf_get_spare_ggtt(gt);
- u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt));
- u64 hole_start, hole_end, hole_size;
- u64 max_hole = 0;
+ u64 max_hole;
- mutex_lock(&ggtt->lock);
-
- drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
- hole_start = max(hole_start, hole_min_start);
- hole_start = ALIGN(hole_start, alignment);
- hole_end = ALIGN_DOWN(hole_end, alignment);
- if (hole_start >= hole_end)
- continue;
- hole_size = hole_end - hole_start;
- xe_gt_sriov_dbg_verbose(gt, "HOLE start %llx size %lluK\n",
- hole_start, hole_size / SZ_1K);
- spare -= min3(spare, hole_size, max_hole);
- max_hole = max(max_hole, hole_size);
- }
-
- mutex_unlock(&ggtt->lock);
+ max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
max_hole / SZ_1K, spare / SZ_1K);
@@ -1401,6 +1398,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
ALIGN(size, PAGE_SIZE),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_NEEDS_2M |
XE_BO_FLAG_PINNED);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1844,6 +1842,18 @@ u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
return value;
}
+static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+{
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+#define reset_threshold_config(TAG, ...) ({ \
+ config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0; \
+});
+
+ MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
+#undef reset_threshold_config
+}
+
static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
@@ -1859,6 +1869,7 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
pf_release_config_ctxs(gt, config);
pf_release_config_dbs(gt, config);
pf_reset_config_sched(gt, config);
+ pf_reset_config_thresholds(gt, config);
}
/**
@@ -1892,6 +1903,87 @@ int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool forc
return force ? 0 : err;
}
+static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
+{
+ if (xe_ggtt_node_allocated(ggtt_region))
+ xe_ggtt_assign(ggtt_region, vfid);
+}
+
+static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
+{
+ struct xe_migrate *m = tile->migrate;
+ struct dma_fence *fence;
+ int err;
+
+ if (!bo)
+ return 0;
+
+ xe_bo_lock(bo, false);
+ fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ } else if (!fence) {
+ err = -ENOMEM;
+ } else {
+ long ret = dma_fence_wait_timeout(fence, false, timeout);
+
+ err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
+ dma_fence_put(fence);
+ if (!err)
+ xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
+ jiffies_to_msecs(timeout - ret));
+ }
+ xe_bo_unlock(bo);
+
+ return err;
+}
+
+static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = gt_to_xe(gt);
+ int err = 0;
+
+ /*
+ * Only GGTT and LMEM requires to be cleared by the PF.
+ * GuC doorbell IDs and context IDs do not need any clearing.
+ */
+ if (!xe_gt_is_media_type(gt)) {
+ pf_sanitize_ggtt(config->ggtt_region, vfid);
+ if (IS_DGFX(xe))
+ err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
+ }
+
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be PF)
+ * @timeout: maximum timeout to wait for completion in jiffies
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
+{
+ int err;
+
+ xe_gt_assert(gt, vfid != PFID);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_sanitize_vf_resources(gt, vfid, timeout);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (unlikely(err))
+ xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
+ vfid, ERR_PTR(err));
+ return err;
+}
+
/**
* xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
* @gt: the &xe_gt
@@ -2024,13 +2116,15 @@ int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
for (n = 1; n <= total_vfs; n++) {
config = &gt->sriov.pf.vfs[n].config;
- if (!drm_mm_node_allocated(&config->ggtt_region))
+ if (!xe_ggtt_node_allocated(config->ggtt_region))
continue;
- string_get_size(config->ggtt_region.size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
+ buf, sizeof(buf));
drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
- n, config->ggtt_region.start,
- config->ggtt_region.start + config->ggtt_region.size - 1, buf);
+ n, config->ggtt_region->base.start,
+ config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
+ buf);
}
return 0;
@@ -2118,12 +2212,8 @@ int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
- const struct drm_mm *mm = &ggtt->mm;
- const struct drm_mm_node *entry;
u64 alignment = pf_get_ggtt_alignment(gt);
- u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt));
- u64 hole_start, hole_end, hole_size;
- u64 spare, avail, total = 0;
+ u64 spare, avail, total;
char buf[10];
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
@@ -2131,24 +2221,8 @@ int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_prin
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
spare = pf_get_spare_ggtt(gt);
+ total = xe_ggtt_print_holes(ggtt, alignment, p);
- mutex_lock(&ggtt->lock);
-
- drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
- hole_start = max(hole_start, hole_min_start);
- hole_start = ALIGN(hole_start, alignment);
- hole_end = ALIGN_DOWN(hole_end, alignment);
- if (hole_start >= hole_end)
- continue;
- hole_size = hole_end - hole_start;
- total += hole_size;
-
- string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
- drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
- hole_start, hole_end - 1, buf);
- }
-
- mutex_unlock(&ggtt->lock);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
index c0e6e4743dc2..42e64769f666 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
@@ -50,6 +50,7 @@ int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
enum xe_guc_klv_threshold_index index, u32 value);
int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
+int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout);
int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force);
int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
index 7bc66656fcc7..2d3b73d78f14 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
@@ -6,8 +6,7 @@
#ifndef _XE_GT_SRIOV_PF_CONFIG_TYPES_H_
#define _XE_GT_SRIOV_PF_CONFIG_TYPES_H_
-#include <drm/drm_mm.h>
-
+#include "xe_ggtt_types.h"
#include "xe_guc_klv_thresholds_set_types.h"
struct xe_bo;
@@ -19,7 +18,7 @@ struct xe_bo;
*/
struct xe_gt_sriov_config {
/** @ggtt_region: GGTT region assigned to the VF. */
- struct drm_mm_node ggtt_region;
+ struct xe_ggtt_node *ggtt_region;
/** @lmem_obj: LMEM allocation for use by the VF. */
struct xe_bo *lmem_obj;
/** @num_ctxs: number of GuC contexts IDs. */
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
index ebf06e037750..02f7328bd6ce 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -3,11 +3,17 @@
* Copyright © 2023-2024 Intel Corporation
*/
+#include <drm/drm_managed.h>
+
#include "abi/guc_actions_sriov_abi.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_monitor.h"
+#include "xe_gt_sriov_pf_service.h"
#include "xe_gt_sriov_printk.h"
#include "xe_guc_ct.h"
#include "xe_sriov.h"
@@ -41,10 +47,6 @@ static int guc_action_vf_control_cmd(struct xe_guc *guc, u32 vfid, u32 cmd)
};
int ret;
- /* XXX those two commands are now sent from the G2H handler */
- if (cmd == GUC_PF_TRIGGER_VF_FLR_START || cmd == GUC_PF_TRIGGER_VF_FLR_FINISH)
- return xe_guc_ct_send_g2h_handler(&guc->ct, request, ARRAY_SIZE(request));
-
ret = xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
return ret > 0 ? -EPROTO : ret;
}
@@ -54,6 +56,8 @@ static int pf_send_vf_control_cmd(struct xe_gt *gt, unsigned int vfid, u32 cmd)
int err;
xe_gt_assert(gt, vfid != PFID);
+ xe_gt_sriov_dbg_verbose(gt, "sending VF%u control command %s\n",
+ vfid, control_cmd_to_string(cmd));
err = guc_action_vf_control_cmd(&gt->uc.guc, vfid, cmd);
if (unlikely(err))
@@ -88,6 +92,456 @@ static int pf_send_vf_flr_finish(struct xe_gt *gt, unsigned int vfid)
}
/**
+ * DOC: The VF state machine
+ *
+ * The simplified VF state machine could be presented as::
+ *
+ * pause--------------------------o
+ * / |
+ * / v
+ * (READY)<------------------resume-----(PAUSED)
+ * ^ \ / /
+ * | \ / /
+ * | stop---->(STOPPED)<----stop /
+ * | / /
+ * | / /
+ * o--------<-----flr /
+ * \ /
+ * o------<--------------------flr
+ *
+ * Where:
+ *
+ * * READY - represents a state in which VF is fully operable
+ * * PAUSED - represents a state in which VF activity is temporarily suspended
+ * * STOPPED - represents a state in which VF activity is definitely halted
+ * * pause - represents a request to temporarily suspend VF activity
+ * * resume - represents a request to resume VF activity
+ * * stop - represents a request to definitely halt VF activity
+ * * flr - represents a request to perform VF FLR to restore VF activity
+ *
+ * However, each state transition requires additional steps that involves
+ * communication with GuC that might fail or be interrupted by other requests::
+ *
+ * .................................WIP....
+ * : :
+ * pause--------------------->PAUSE_WIP----------------------------o
+ * / : / \ : |
+ * / : o----<---stop flr--o : |
+ * / : | \ / | : V
+ * (READY,RESUMED)<--------+------------RESUME_WIP<----+--<-----resume--(PAUSED)
+ * ^ \ \ : | | : / /
+ * | \ \ : | | : / /
+ * | \ \ : | | : / /
+ * | \ \ : o----<----------------------+--<-------stop /
+ * | \ \ : | | : /
+ * | \ \ : V | : /
+ * | \ stop----->STOP_WIP---------flr--->-----o : /
+ * | \ : | | : /
+ * | \ : | V : /
+ * | flr--------+----->----------------->FLR_WIP<-----flr
+ * | : | / ^ :
+ * | : | / | :
+ * o--------<-------:----+-----<----------------o | :
+ * : | | :
+ * :....|...........................|.....:
+ * | |
+ * V |
+ * (STOPPED)--------------------flr
+ *
+ * For details about each internal WIP state machine see:
+ *
+ * * `The VF PAUSE state machine`_
+ * * `The VF RESUME state machine`_
+ * * `The VF STOP state machine`_
+ * * `The VF FLR state machine`_
+ */
+
+#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
+static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit)
+{
+ switch (bit) {
+#define CASE2STR(_X) \
+ case XE_GT_SRIOV_STATE_##_X: return #_X
+ CASE2STR(WIP);
+ CASE2STR(FLR_WIP);
+ CASE2STR(FLR_SEND_START);
+ CASE2STR(FLR_WAIT_GUC);
+ CASE2STR(FLR_GUC_DONE);
+ CASE2STR(FLR_RESET_CONFIG);
+ CASE2STR(FLR_RESET_DATA);
+ CASE2STR(FLR_RESET_MMIO);
+ CASE2STR(FLR_SEND_FINISH);
+ CASE2STR(FLR_FAILED);
+ CASE2STR(PAUSE_WIP);
+ CASE2STR(PAUSE_SEND_PAUSE);
+ CASE2STR(PAUSE_WAIT_GUC);
+ CASE2STR(PAUSE_GUC_DONE);
+ CASE2STR(PAUSE_FAILED);
+ CASE2STR(PAUSED);
+ CASE2STR(RESUME_WIP);
+ CASE2STR(RESUME_SEND_RESUME);
+ CASE2STR(RESUME_FAILED);
+ CASE2STR(RESUMED);
+ CASE2STR(STOP_WIP);
+ CASE2STR(STOP_SEND_STOP);
+ CASE2STR(STOP_FAILED);
+ CASE2STR(STOPPED);
+ CASE2STR(MISMATCH);
+#undef CASE2STR
+ default: return "?";
+ }
+}
+#endif
+
+static unsigned long pf_get_default_timeout(enum xe_gt_sriov_control_bits bit)
+{
+ switch (bit) {
+ case XE_GT_SRIOV_STATE_FLR_WAIT_GUC:
+ case XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC:
+ return HZ / 2;
+ case XE_GT_SRIOV_STATE_FLR_WIP:
+ case XE_GT_SRIOV_STATE_FLR_RESET_CONFIG:
+ return 5 * HZ;
+ default:
+ return HZ;
+ }
+}
+
+static struct xe_gt_sriov_control_state *pf_pick_vf_control(struct xe_gt *gt, unsigned int vfid)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid <= xe_gt_sriov_pf_get_totalvfs(gt));
+
+ return &gt->sriov.pf.vfs[vfid].control;
+}
+
+static unsigned long *pf_peek_vf_state(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
+
+ return &cs->state;
+}
+
+static bool pf_check_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ return test_bit(bit, pf_peek_vf_state(gt, vfid));
+}
+
+static void pf_dump_vf_state(struct xe_gt *gt, unsigned int vfid)
+{
+ unsigned long state = *pf_peek_vf_state(gt, vfid);
+ enum xe_gt_sriov_control_bits bit;
+
+ if (state) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %#lx%s%*pbl\n",
+ vfid, state, state ? " bits " : "",
+ (int)BITS_PER_LONG, &state);
+ for_each_set_bit(bit, &state, BITS_PER_LONG)
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d)\n",
+ vfid, control_bit_to_string(bit), bit);
+ } else {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state READY\n", vfid);
+ }
+}
+
+static bool pf_expect_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ bool result = pf_check_vf_state(gt, vfid, bit);
+
+ if (unlikely(!result))
+ pf_dump_vf_state(gt, vfid);
+
+ return result;
+}
+
+static bool pf_expect_vf_not_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ bool result = !pf_check_vf_state(gt, vfid, bit);
+
+ if (unlikely(!result))
+ pf_dump_vf_state(gt, vfid);
+
+ return result;
+}
+
+static bool pf_enter_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ if (!test_and_set_bit(bit, pf_peek_vf_state(gt, vfid))) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) enter\n",
+ vfid, control_bit_to_string(bit), bit);
+ return true;
+ }
+ return false;
+}
+
+static bool pf_exit_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ if (test_and_clear_bit(bit, pf_peek_vf_state(gt, vfid))) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) exit\n",
+ vfid, control_bit_to_string(bit), bit);
+ return true;
+ }
+ return false;
+}
+
+static void pf_escape_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ if (pf_exit_vf_state(gt, vfid, bit))
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) escaped by %ps\n",
+ vfid, control_bit_to_string(bit), bit,
+ __builtin_return_address(0));
+}
+
+static void pf_enter_vf_mismatch(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_MISMATCH)) {
+ xe_gt_sriov_dbg(gt, "VF%u state mismatch detected by %ps\n",
+ vfid, __builtin_return_address(0));
+ pf_dump_vf_state(gt, vfid);
+ }
+}
+
+static void pf_exit_vf_mismatch(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_MISMATCH))
+ xe_gt_sriov_dbg(gt, "VF%u state mismatch cleared by %ps\n",
+ vfid, __builtin_return_address(0));
+
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED);
+}
+
+#define pf_enter_vf_state_machine_bug(gt, vfid) ({ \
+ pf_enter_vf_mismatch((gt), (vfid)); \
+})
+
+static void pf_queue_control_worker(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ queue_work(xe->sriov.wq, &gt->sriov.pf.control.worker);
+}
+
+static void pf_queue_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_pf_control *pfc = &gt->sriov.pf.control;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ spin_lock(&pfc->lock);
+ list_move_tail(&gt->sriov.pf.vfs[vfid].control.link, &pfc->list);
+ spin_unlock(&pfc->lock);
+
+ pf_queue_control_worker(gt);
+}
+
+static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid);
+static void pf_exit_vf_stop_wip(struct xe_gt *gt, unsigned int vfid);
+static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid);
+static void pf_exit_vf_resume_wip(struct xe_gt *gt, unsigned int vfid);
+
+static bool pf_enter_vf_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_WIP)) {
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
+
+ reinit_completion(&cs->done);
+ return true;
+ }
+ return false;
+}
+
+static void pf_exit_vf_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_WIP)) {
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
+
+ pf_exit_vf_flr_wip(gt, vfid);
+ pf_exit_vf_stop_wip(gt, vfid);
+ pf_exit_vf_pause_wip(gt, vfid);
+ pf_exit_vf_resume_wip(gt, vfid);
+
+ complete_all(&cs->done);
+ }
+}
+
+static int pf_wait_vf_wip_done(struct xe_gt *gt, unsigned int vfid, unsigned long timeout)
+{
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
+
+ return wait_for_completion_timeout(&cs->done, timeout) ? 0 : -ETIMEDOUT;
+}
+
+static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+/**
+ * DOC: The VF PAUSE state machine
+ *
+ * The VF PAUSE state machine looks like::
+ *
+ * (READY,RESUMED)<-------------<---------------------o---------o
+ * | \ \
+ * pause \ \
+ * | \ \
+ * ....V...........................PAUSE_WIP........ \ \
+ * : \ : o \
+ * : \ o------<-----busy : | \
+ * : \ / / : | |
+ * : PAUSE_SEND_PAUSE ---failed--->----------o--->(PAUSE_FAILED) |
+ * : | \ : | |
+ * : acked rejected---->----------o--->(MISMATCH) /
+ * : | : /
+ * : v : /
+ * : PAUSE_WAIT_GUC : /
+ * : | : /
+ * : done : /
+ * : | : /
+ * : v : /
+ * : PAUSE_GUC_DONE o-----restart
+ * : / :
+ * : / :
+ * :....o..............o...............o...........:
+ * | | |
+ * completed flr stop
+ * | | |
+ * V .....V..... ......V.....
+ * (PAUSED) : FLR_WIP : : STOP_WIP :
+ * :.........: :..........:
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WIP)) {
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE);
+ }
+}
+
+static void pf_enter_vf_paused(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_pause_completed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_paused(gt, vfid);
+}
+
+static void pf_enter_vf_pause_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_pause_rejected(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_mismatch(gt, vfid);
+ pf_enter_vf_pause_failed(gt, vfid);
+}
+
+static bool pf_exit_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE))
+ return false;
+
+ pf_enter_vf_pause_completed(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE))
+ pf_queue_vf(gt, vfid);
+}
+
+static void pf_enter_pause_wait_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+}
+
+static bool pf_exit_pause_wait_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC);
+}
+
+static void pf_enter_vf_pause_send_pause(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_pause_send_pause(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE))
+ return false;
+
+ /* GuC may actually send a PAUSE_DONE before we get a RESPONSE */
+ pf_enter_pause_wait_guc(gt, vfid);
+
+ err = pf_send_vf_pause(gt, vfid);
+ if (err) {
+ /* send failed, so we shouldn't expect PAUSE_DONE from GuC */
+ pf_exit_pause_wait_guc(gt, vfid);
+
+ if (err == -EBUSY)
+ pf_enter_vf_pause_send_pause(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_pause_rejected(gt, vfid);
+ else
+ pf_enter_vf_pause_failed(gt, vfid);
+ } else {
+ /*
+ * we have already moved to WAIT_GUC, maybe even to GUC_DONE
+ * but since GuC didn't complain, we may clear MISMATCH
+ */
+ pf_exit_vf_mismatch(gt, vfid);
+ }
+
+ return true;
+}
+
+static bool pf_enter_vf_pause_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WIP)) {
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_pause_send_pause(gt, vfid);
+ return true;
+ }
+
+ return false;
+}
+
+/**
* xe_gt_sriov_pf_control_pause_vf - Pause a VF.
* @gt: the &xe_gt
* @vfid: the VF identifier
@@ -98,7 +552,140 @@ static int pf_send_vf_flr_finish(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid)
{
- return pf_send_vf_pause(gt, vfid);
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_PAUSE_WIP);
+ int err;
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
+ xe_gt_sriov_dbg(gt, "VF%u is stopped!\n", vfid);
+ return -EPERM;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
+ xe_gt_sriov_dbg(gt, "VF%u was already paused!\n", vfid);
+ return -ESTALE;
+ }
+
+ if (!pf_enter_vf_pause_wip(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "VF%u pause already in progress!\n", vfid);
+ return -EALREADY;
+ }
+
+ err = pf_wait_vf_wip_done(gt, vfid, timeout);
+ if (err) {
+ xe_gt_sriov_dbg(gt, "VF%u pause didn't finish in %u ms (%pe)\n",
+ vfid, jiffies_to_msecs(timeout), ERR_PTR(err));
+ return err;
+ }
+
+ if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
+ xe_gt_sriov_info(gt, "VF%u paused!\n", vfid);
+ return 0;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED)) {
+ xe_gt_sriov_dbg(gt, "VF%u pause failed!\n", vfid);
+ return -EIO;
+ }
+
+ xe_gt_sriov_dbg(gt, "VF%u pause was canceled!\n", vfid);
+ return -ECANCELED;
+}
+
+/**
+ * DOC: The VF RESUME state machine
+ *
+ * The VF RESUME state machine looks like::
+ *
+ * (PAUSED)<-----------------<------------------------o
+ * | \
+ * resume \
+ * | \
+ * ....V............................RESUME_WIP...... \
+ * : \ : o
+ * : \ o-------<-----busy : |
+ * : \ / / : |
+ * : RESUME_SEND_RESUME ---failed--->--------o--->(RESUME_FAILED)
+ * : / \ : |
+ * : acked rejected---->---------o--->(MISMATCH)
+ * : / :
+ * :....o..............o...............o.....o.....:
+ * | | | \
+ * completed flr stop restart-->(READY)
+ * | | |
+ * V .....V..... ......V.....
+ * (RESUMED) : FLR_WIP : : STOP_WIP :
+ * :.........: :..........:
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_exit_vf_resume_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_WIP))
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME);
+}
+
+static void pf_enter_vf_resumed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_resume_completed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_resumed(gt, vfid);
+}
+
+static void pf_enter_vf_resume_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_resume_rejected(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_mismatch(gt, vfid);
+ pf_enter_vf_resume_failed(gt, vfid);
+}
+
+static void pf_enter_vf_resume_send_resume(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_resume_send_resume(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME))
+ return false;
+
+ err = pf_send_vf_resume(gt, vfid);
+ if (err == -EBUSY)
+ pf_enter_vf_resume_send_resume(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_resume_rejected(gt, vfid);
+ else if (err)
+ pf_enter_vf_resume_failed(gt, vfid);
+ else
+ pf_enter_vf_resume_completed(gt, vfid);
+ return true;
+}
+
+static bool pf_enter_vf_resume_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_WIP)) {
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_resume_send_resume(gt, vfid);
+ return true;
+ }
+
+ return false;
}
/**
@@ -112,7 +699,134 @@ int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
{
- return pf_send_vf_resume(gt, vfid);
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_RESUME_WIP);
+ int err;
+
+ if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
+ xe_gt_sriov_dbg(gt, "VF%u is not paused!\n", vfid);
+ return -EPERM;
+ }
+
+ if (!pf_enter_vf_resume_wip(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "VF%u resume already in progress!\n", vfid);
+ return -EALREADY;
+ }
+
+ err = pf_wait_vf_wip_done(gt, vfid, timeout);
+ if (err)
+ return err;
+
+ if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED)) {
+ xe_gt_sriov_info(gt, "VF%u resumed!\n", vfid);
+ return 0;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED)) {
+ xe_gt_sriov_dbg(gt, "VF%u resume failed!\n", vfid);
+ return -EIO;
+ }
+
+ xe_gt_sriov_dbg(gt, "VF%u resume was canceled!\n", vfid);
+ return -ECANCELED;
+}
+
+/**
+ * DOC: The VF STOP state machine
+ *
+ * The VF STOP state machine looks like::
+ *
+ * (READY,PAUSED,RESUMED)<-------<--------------------o
+ * | \
+ * stop \
+ * | \
+ * ....V..............................STOP_WIP...... \
+ * : \ : o
+ * : \ o----<----busy : |
+ * : \ / / : |
+ * : STOP_SEND_STOP--------failed--->--------o--->(STOP_FAILED)
+ * : / \ : |
+ * : acked rejected-------->--------o--->(MISMATCH)
+ * : / :
+ * :....o..............o...............o...........:
+ * | | |
+ * completed flr restart
+ * | | |
+ * V .....V..... V
+ * (STOPPED) : FLR_WIP : (READY)
+ * :.........:
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_exit_vf_stop_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_WIP))
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_SEND_STOP);
+}
+
+static void pf_enter_vf_stopped(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_stop_completed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_stopped(gt, vfid);
+}
+
+static void pf_enter_vf_stop_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_stop_rejected(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_mismatch(gt, vfid);
+ pf_enter_vf_stop_failed(gt, vfid);
+}
+
+static void pf_enter_vf_stop_send_stop(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_SEND_STOP))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_stop_send_stop(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_SEND_STOP))
+ return false;
+
+ err = pf_send_vf_stop(gt, vfid);
+ if (err == -EBUSY)
+ pf_enter_vf_stop_send_stop(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_stop_rejected(gt, vfid);
+ else if (err)
+ pf_enter_vf_stop_failed(gt, vfid);
+ else
+ pf_enter_vf_stop_completed(gt, vfid);
+ return true;
+}
+
+static bool pf_enter_vf_stop_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_WIP)) {
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_stop_send_stop(gt, vfid);
+ return true;
+ }
+ return false;
}
/**
@@ -126,7 +840,280 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
{
- return pf_send_vf_stop(gt, vfid);
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_STOP_WIP);
+ int err;
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
+ xe_gt_sriov_dbg(gt, "VF%u was already stopped!\n", vfid);
+ return -ESTALE;
+ }
+
+ if (!pf_enter_vf_stop_wip(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "VF%u stop already in progress!\n", vfid);
+ return -EALREADY;
+ }
+
+ err = pf_wait_vf_wip_done(gt, vfid, timeout);
+ if (err)
+ return err;
+
+ if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
+ xe_gt_sriov_info(gt, "VF%u stopped!\n", vfid);
+ return 0;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED)) {
+ xe_gt_sriov_dbg(gt, "VF%u stop failed!\n", vfid);
+ return -EIO;
+ }
+
+ xe_gt_sriov_dbg(gt, "VF%u stop was canceled!\n", vfid);
+ return -ECANCELED;
+}
+
+/**
+ * DOC: The VF FLR state machine
+ *
+ * The VF FLR state machine looks like::
+ *
+ * (READY,PAUSED,STOPPED)<------------<--------------o
+ * | \
+ * flr \
+ * | \
+ * ....V..........................FLR_WIP........... \
+ * : \ : \
+ * : \ o----<----busy : |
+ * : \ / / : |
+ * : FLR_SEND_START---failed----->-----------o--->(FLR_FAILED)<---o
+ * : | \ : | |
+ * : acked rejected----->-----------o--->(MISMATCH) |
+ * : | : ^ |
+ * : v : | |
+ * : FLR_WAIT_GUC : | |
+ * : | : | |
+ * : done : | |
+ * : | : | |
+ * : v : | |
+ * : FLR_GUC_DONE : | |
+ * : | : | |
+ * : FLR_RESET_CONFIG---failed--->-----------o--------+-----------o
+ * : | : | |
+ * : FLR_RESET_DATA : | |
+ * : | : | |
+ * : FLR_RESET_MMIO : | |
+ * : | : | |
+ * : | o----<----busy : | |
+ * : |/ / : | |
+ * : FLR_SEND_FINISH----failed--->-----------o--------+-----------o
+ * : / \ : |
+ * : acked rejected----->-----------o--------o
+ * : / :
+ * :....o..............................o...........:
+ * | |
+ * completed restart
+ * | /
+ * V /
+ * (READY)<----------<------------o
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_enter_vf_flr_send_start(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static void pf_enter_vf_flr_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP)) {
+ xe_gt_sriov_dbg(gt, "VF%u FLR is already in progress\n", vfid);
+ return;
+ }
+
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_flr_send_start(gt, vfid);
+}
+
+static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP)) {
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_FINISH);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_CONFIG);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START);
+ }
+}
+
+static void pf_enter_vf_flr_completed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_ready(gt, vfid);
+}
+
+static void pf_enter_vf_flr_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED))
+ xe_gt_sriov_notice(gt, "VF%u FLR failed!\n", vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_flr_rejected(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_mismatch(gt, vfid);
+ pf_enter_vf_flr_failed(gt, vfid);
+}
+
+static void pf_enter_vf_flr_send_finish(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_FINISH))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_send_finish(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_FINISH))
+ return false;
+
+ err = pf_send_vf_flr_finish(gt, vfid);
+ if (err == -EBUSY)
+ pf_enter_vf_flr_send_finish(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_flr_rejected(gt, vfid);
+ else if (err)
+ pf_enter_vf_flr_failed(gt, vfid);
+ else
+ pf_enter_vf_flr_completed(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO))
+ return false;
+
+ /* XXX: placeholder */
+
+ pf_enter_vf_flr_send_finish(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_reset_data(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_reset_data(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA))
+ return false;
+
+ xe_gt_sriov_pf_service_reset(gt, vfid);
+ xe_gt_sriov_pf_monitor_flr(gt, vfid);
+
+ pf_enter_vf_flr_reset_mmio(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_reset_config(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_CONFIG))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_reset_config(struct xe_gt *gt, unsigned int vfid)
+{
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_FLR_RESET_CONFIG);
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_CONFIG))
+ return false;
+
+ err = xe_gt_sriov_pf_config_sanitize(gt, vfid, timeout);
+ if (err)
+ pf_enter_vf_flr_failed(gt, vfid);
+ else
+ pf_enter_vf_flr_reset_data(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_wait_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_wait_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC);
+}
+
+static bool pf_exit_vf_flr_send_start(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START))
+ return false;
+
+ /* GuC may actually send a FLR_DONE before we get a RESPONSE */
+ pf_enter_vf_flr_wait_guc(gt, vfid);
+
+ err = pf_send_vf_flr_start(gt, vfid);
+ if (err) {
+ /* send failed, so we shouldn't expect FLR_DONE from GuC */
+ pf_exit_vf_flr_wait_guc(gt, vfid);
+
+ if (err == -EBUSY)
+ pf_enter_vf_flr_send_start(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_flr_rejected(gt, vfid);
+ else
+ pf_enter_vf_flr_failed(gt, vfid);
+ } else {
+ /*
+ * we have already moved to WAIT_GUC, maybe even to GUC_DONE
+ * but since GuC didn't complain, we may clear MISMATCH
+ */
+ pf_exit_vf_mismatch(gt, vfid);
+ }
+
+ return true;
+}
+
+static bool pf_exit_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE))
+ return false;
+
+ pf_enter_vf_flr_reset_config(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE))
+ pf_queue_vf(gt, vfid);
}
/**
@@ -140,46 +1127,56 @@ int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid)
{
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_FLR_WIP);
int err;
- /* XXX pf_send_vf_flr_start() expects ct->lock */
- mutex_lock(&gt->uc.guc.ct.lock);
- err = pf_send_vf_flr_start(gt, vfid);
- mutex_unlock(&gt->uc.guc.ct.lock);
+ pf_enter_vf_flr_wip(gt, vfid);
- return err;
+ err = pf_wait_vf_wip_done(gt, vfid, timeout);
+ if (err) {
+ xe_gt_sriov_notice(gt, "VF%u FLR didn't finish in %u ms (%pe)\n",
+ vfid, jiffies_to_msecs(timeout), ERR_PTR(err));
+ return err;
+ }
+
+ if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED))
+ return -EIO;
+
+ return 0;
}
/**
* DOC: The VF FLR Flow with GuC
*
- * PF GUC PCI
- * ========================================================
- * | | |
- * (1) | [ ] <----- FLR --|
- * | [ ] :
- * (2) [ ] <-------- NOTIFY FLR --[ ]
- * [ ] |
- * (3) [ ] |
- * [ ] |
- * [ ]-- START FLR ---------> [ ]
- * | [ ]
- * (4) | [ ]
- * | [ ]
- * [ ] <--------- FLR DONE -- [ ]
- * [ ] |
- * (5) [ ] |
- * [ ] |
- * [ ]-- FINISH FLR --------> [ ]
- * | |
- *
- * Step 1: PCI HW generates interrupt to the GuC about VF FLR
- * Step 2: GuC FW sends G2H notification to the PF about VF FLR
- * Step 2a: on some platforms G2H is only received from root GuC
- * Step 3: PF sends H2G request to the GuC to start VF FLR sequence
- * Step 3a: on some platforms PF must send H2G to all other GuCs
- * Step 4: GuC FW performs VF FLR cleanups and notifies the PF when done
- * Step 5: PF performs VF FLR cleanups and notifies the GuC FW when finished
+ * The VF FLR flow includes several steps::
+ *
+ * PF GUC PCI
+ * ========================================================
+ * | | |
+ * (1) | [ ] <----- FLR --|
+ * | [ ] :
+ * (2) [ ] <-------- NOTIFY FLR --[ ]
+ * [ ] |
+ * (3) [ ] |
+ * [ ] |
+ * [ ]-- START FLR ---------> [ ]
+ * | [ ]
+ * (4) | [ ]
+ * | [ ]
+ * [ ] <--------- FLR DONE -- [ ]
+ * [ ] |
+ * (5) [ ] |
+ * [ ] |
+ * [ ]-- FINISH FLR --------> [ ]
+ * | |
+ *
+ * * Step 1: PCI HW generates interrupt to the GuC about VF FLR
+ * * Step 2: GuC FW sends G2H notification to the PF about VF FLR
+ * * Step 2a: on some platforms G2H is only received from root GuC
+ * * Step 3: PF sends H2G request to the GuC to start VF FLR sequence
+ * * Step 3a: on some platforms PF must send H2G to all other GuCs
+ * * Step 4: GuC FW performs VF FLR cleanups and notifies the PF when done
+ * * Step 5: PF performs VF FLR cleanups and notifies the GuC FW when finished
*/
static bool needs_dispatch_flr(struct xe_device *xe)
@@ -197,19 +1194,41 @@ static void pf_handle_vf_flr(struct xe_gt *gt, u32 vfid)
if (needs_dispatch_flr(xe)) {
for_each_gt(gtit, xe, gtid)
- pf_send_vf_flr_start(gtit, vfid);
+ pf_enter_vf_flr_wip(gtit, vfid);
} else {
- pf_send_vf_flr_start(gt, vfid);
+ pf_enter_vf_flr_wip(gt, vfid);
}
}
static void pf_handle_vf_flr_done(struct xe_gt *gt, u32 vfid)
{
- pf_send_vf_flr_finish(gt, vfid);
+ if (!pf_exit_vf_flr_wait_guc(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "Received out of order 'VF%u FLR done'\n", vfid);
+ pf_enter_vf_mismatch(gt, vfid);
+ return;
+ }
+
+ pf_enter_vf_flr_guc_done(gt, vfid);
+}
+
+static void pf_handle_vf_pause_done(struct xe_gt *gt, u32 vfid)
+{
+ if (!pf_exit_pause_wait_guc(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "Received out of order 'VF%u PAUSE done'\n", vfid);
+ pf_enter_vf_mismatch(gt, vfid);
+ return;
+ }
+
+ pf_enter_vf_pause_guc_done(gt, vfid);
}
static int pf_handle_vf_event(struct xe_gt *gt, u32 vfid, u32 eventid)
{
+ xe_gt_sriov_dbg_verbose(gt, "received VF%u event %#x\n", vfid, eventid);
+
+ if (vfid > xe_gt_sriov_pf_get_totalvfs(gt))
+ return -EPROTO;
+
switch (eventid) {
case GUC_PF_NOTIFY_VF_FLR:
pf_handle_vf_flr(gt, vfid);
@@ -218,6 +1237,7 @@ static int pf_handle_vf_event(struct xe_gt *gt, u32 vfid, u32 eventid)
pf_handle_vf_flr_done(gt, vfid);
break;
case GUC_PF_NOTIFY_VF_PAUSE_DONE:
+ pf_handle_vf_pause_done(gt, vfid);
break;
case GUC_PF_NOTIFY_VF_FIXUP_DONE:
break;
@@ -276,3 +1296,159 @@ int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32
return vfid ? pf_handle_vf_event(gt, vfid, eventid) : pf_handle_pf_event(gt, eventid);
}
+
+static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_flr_send_start(gt, vfid))
+ return true;
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC)) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid,
+ control_bit_to_string(XE_GT_SRIOV_STATE_FLR_WAIT_GUC));
+ return false;
+ }
+
+ if (pf_exit_vf_flr_guc_done(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_flr_reset_config(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_flr_reset_data(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_flr_reset_mmio(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_flr_send_finish(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_stop_send_stop(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_pause_send_pause(gt, vfid))
+ return true;
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC)) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid,
+ control_bit_to_string(XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC));
+ return true;
+ }
+
+ if (pf_exit_vf_pause_guc_done(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_resume_send_resume(gt, vfid))
+ return true;
+
+ return false;
+}
+
+static unsigned int pf_control_state_index(struct xe_gt *gt,
+ struct xe_gt_sriov_control_state *cs)
+{
+ return container_of(cs, struct xe_gt_sriov_metadata, control) - gt->sriov.pf.vfs;
+}
+
+static void pf_worker_find_work(struct xe_gt *gt)
+{
+ struct xe_gt_sriov_pf_control *pfc = &gt->sriov.pf.control;
+ struct xe_gt_sriov_control_state *cs;
+ unsigned int vfid;
+ bool empty;
+ bool more;
+
+ spin_lock(&pfc->lock);
+ cs = list_first_entry_or_null(&pfc->list, struct xe_gt_sriov_control_state, link);
+ if (cs)
+ list_del_init(&cs->link);
+ empty = list_empty(&pfc->list);
+ spin_unlock(&pfc->lock);
+
+ if (!cs)
+ return;
+
+ /* VF metadata structures are indexed by the VFID */
+ vfid = pf_control_state_index(gt, cs);
+ xe_gt_assert(gt, vfid <= xe_gt_sriov_pf_get_totalvfs(gt));
+
+ more = pf_process_vf_state_machine(gt, vfid);
+ if (more)
+ pf_queue_vf(gt, vfid);
+ else if (!empty)
+ pf_queue_control_worker(gt);
+}
+
+static void control_worker_func(struct work_struct *w)
+{
+ struct xe_gt *gt = container_of(w, struct xe_gt, sriov.pf.control.worker);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ pf_worker_find_work(gt);
+}
+
+static void pf_stop_worker(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ cancel_work_sync(&gt->sriov.pf.control.worker);
+}
+
+static void control_fini_action(struct drm_device *dev, void *data)
+{
+ struct xe_gt *gt = data;
+
+ pf_stop_worker(gt);
+}
+
+/**
+ * xe_gt_sriov_pf_control_init() - Initialize PF's control data.
+ * @gt: the &xe_gt
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int n, totalvfs;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ totalvfs = xe_sriov_pf_get_totalvfs(xe);
+ for (n = 0; n <= totalvfs; n++) {
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, n);
+
+ init_completion(&cs->done);
+ INIT_LIST_HEAD(&cs->link);
+ }
+
+ spin_lock_init(&gt->sriov.pf.control.lock);
+ INIT_LIST_HEAD(&gt->sriov.pf.control.list);
+ INIT_WORK(&gt->sriov.pf.control.worker, control_worker_func);
+
+ return drmm_add_action_or_reset(&xe->drm, control_fini_action, gt);
+}
+
+/**
+ * xe_gt_sriov_pf_control_restart() - Restart SR-IOV control data after a GT reset.
+ * @gt: the &xe_gt
+ *
+ * Any per-VF status maintained by the PF or any ongoing VF control activity
+ * performed by the PF must be reset or cancelled when the GT is reset.
+ *
+ * This function is for PF only.
+ */
+void xe_gt_sriov_pf_control_restart(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int n, totalvfs;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ pf_stop_worker(gt);
+
+ totalvfs = xe_sriov_pf_get_totalvfs(xe);
+ for (n = 1; n <= totalvfs; n++)
+ pf_enter_vf_ready(gt, n);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
index 405d1586f991..c85e64f099cc 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
@@ -11,6 +11,9 @@
struct xe_gt;
+int xe_gt_sriov_pf_control_init(struct xe_gt *gt);
+void xe_gt_sriov_pf_control_restart(struct xe_gt *gt);
+
int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
new file mode 100644
index 000000000000..11830aafea45
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_CONTROL_TYPES_H_
+#define _XE_GT_SRIOV_PF_CONTROL_TYPES_H_
+
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue_types.h>
+
+/**
+ * enum xe_gt_sriov_control_bits - Various bits used by the PF to represent a VF state
+ *
+ * @XE_GT_SRIOV_STATE_WIP: indicates that some operations are in progress.
+ * @XE_GT_SRIOV_STATE_FLR_WIP: indicates that a VF FLR is in progress.
+ * @XE_GT_SRIOV_STATE_FLR_SEND_START: indicates that the PF wants to send a FLR START command.
+ * @XE_GT_SRIOV_STATE_FLR_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
+ * @XE_GT_SRIOV_STATE_FLR_GUC_DONE: indicates that the PF has received a response from the GuC.
+ * @XE_GT_SRIOV_STATE_FLR_RESET_CONFIG: indicates that the PF needs to clear VF's resources.
+ * @XE_GT_SRIOV_STATE_FLR_RESET_DATA: indicates that the PF needs to clear VF's data.
+ * @XE_GT_SRIOV_STATE_FLR_RESET_MMIO: indicates that the PF needs to reset VF's registers.
+ * @XE_GT_SRIOV_STATE_FLR_SEND_FINISH: indicates that the PF wants to send a FLR FINISH message.
+ * @XE_GT_SRIOV_STATE_FLR_FAILED: indicates that VF FLR sequence failed.
+ * @XE_GT_SRIOV_STATE_PAUSE_WIP: indicates that a VF pause operation is in progress.
+ * @XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE: indicates that the PF is about to send a PAUSE command.
+ * @XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
+ * @XE_GT_SRIOV_STATE_PAUSE_GUC_DONE: indicates that the PF has received a response from the GuC.
+ * @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed.
+ * @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused.
+ * @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress.
+ * @XE_GT_SRIOV_STATE_RESUME_SEND_RESUME: indicates that the PF is about to send RESUME command.
+ * @XE_GT_SRIOV_STATE_RESUME_FAILED: indicates that a VF resume operation has failed.
+ * @XE_GT_SRIOV_STATE_RESUMED: indicates that the VF was resumed.
+ * @XE_GT_SRIOV_STATE_STOP_WIP: indicates that a VF stop operation is in progress.
+ * @XE_GT_SRIOV_STATE_STOP_SEND_STOP: indicates that the PF wants to send a STOP command.
+ * @XE_GT_SRIOV_STATE_STOP_FAILED: indicates that the VF stop operation has failed
+ * @XE_GT_SRIOV_STATE_STOPPED: indicates that the VF was stopped.
+ * @XE_GT_SRIOV_STATE_MISMATCH: indicates that the PF has detected a VF state mismatch.
+ */
+enum xe_gt_sriov_control_bits {
+ XE_GT_SRIOV_STATE_WIP = 1,
+
+ XE_GT_SRIOV_STATE_FLR_WIP,
+ XE_GT_SRIOV_STATE_FLR_SEND_START,
+ XE_GT_SRIOV_STATE_FLR_WAIT_GUC,
+ XE_GT_SRIOV_STATE_FLR_GUC_DONE,
+ XE_GT_SRIOV_STATE_FLR_RESET_CONFIG,
+ XE_GT_SRIOV_STATE_FLR_RESET_DATA,
+ XE_GT_SRIOV_STATE_FLR_RESET_MMIO,
+ XE_GT_SRIOV_STATE_FLR_SEND_FINISH,
+ XE_GT_SRIOV_STATE_FLR_FAILED,
+
+ XE_GT_SRIOV_STATE_PAUSE_WIP,
+ XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE,
+ XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC,
+ XE_GT_SRIOV_STATE_PAUSE_GUC_DONE,
+ XE_GT_SRIOV_STATE_PAUSE_FAILED,
+ XE_GT_SRIOV_STATE_PAUSED,
+
+ XE_GT_SRIOV_STATE_RESUME_WIP,
+ XE_GT_SRIOV_STATE_RESUME_SEND_RESUME,
+ XE_GT_SRIOV_STATE_RESUME_FAILED,
+ XE_GT_SRIOV_STATE_RESUMED,
+
+ XE_GT_SRIOV_STATE_STOP_WIP,
+ XE_GT_SRIOV_STATE_STOP_SEND_STOP,
+ XE_GT_SRIOV_STATE_STOP_FAILED,
+ XE_GT_SRIOV_STATE_STOPPED,
+
+ XE_GT_SRIOV_STATE_MISMATCH = BITS_PER_LONG - 1,
+};
+
+/**
+ * struct xe_gt_sriov_control_state - GT-level per-VF control state.
+ *
+ * Used by the PF driver to maintain per-VF control data.
+ */
+struct xe_gt_sriov_control_state {
+ /** @state: VF state bits */
+ unsigned long state;
+
+ /** @done: completion of async operations */
+ struct completion done;
+
+ /** @link: link into worker list */
+ struct list_head link;
+};
+
+/**
+ * struct xe_gt_sriov_pf_control - GT-level control data.
+ *
+ * Used by the PF driver to maintain its data.
+ */
+struct xe_gt_sriov_pf_control {
+ /** @worker: worker that executes a VF operations */
+ struct work_struct worker;
+
+ /** @list: list of VF entries that have a pending work */
+ struct list_head list;
+
+ /** @lock: protects VF pending list */
+ spinlock_t lock;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
index 40cbaea3ef44..28e1b130bf87 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include "xe_gt_sriov_pf_config_types.h"
+#include "xe_gt_sriov_pf_control_types.h"
#include "xe_gt_sriov_pf_monitor_types.h"
#include "xe_gt_sriov_pf_policy_types.h"
#include "xe_gt_sriov_pf_service_types.h"
@@ -23,6 +24,9 @@ struct xe_gt_sriov_metadata {
/** @monitor: per-VF monitoring data. */
struct xe_gt_sriov_monitor monitor;
+ /** @control: per-VF control data. */
+ struct xe_gt_sriov_control_state control;
+
/** @version: negotiated VF/PF ABI version */
struct xe_gt_sriov_pf_service_version version;
};
@@ -30,12 +34,14 @@ struct xe_gt_sriov_metadata {
/**
* struct xe_gt_sriov_pf - GT level PF virtualization data.
* @service: service data.
+ * @control: control data.
* @policy: policy data.
* @spare: PF-only provisioning configuration.
* @vfs: metadata for all VFs.
*/
struct xe_gt_sriov_pf {
struct xe_gt_sriov_pf_service service;
+ struct xe_gt_sriov_pf_control control;
struct xe_gt_sriov_pf_policy policy;
struct xe_gt_sriov_spare_config spare;
struct xe_gt_sriov_metadata *vfs;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 8892d6c2291e..4ebc82e607af 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -495,6 +495,25 @@ u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
return gt->sriov.vf.self_config.lmem_size;
}
+static struct xe_ggtt_node *
+vf_balloon_ggtt_node(struct xe_ggtt *ggtt, u64 start, u64 end)
+{
+ struct xe_ggtt_node *node;
+ int err;
+
+ node = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(node))
+ return node;
+
+ err = xe_ggtt_node_insert_balloon(node, start, end);
+ if (err) {
+ xe_ggtt_node_fini(node);
+ return ERR_PTR(err);
+ }
+
+ return node;
+}
+
static int vf_balloon_ggtt(struct xe_gt *gt)
{
struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
@@ -502,7 +521,6 @@ static int vf_balloon_ggtt(struct xe_gt *gt)
struct xe_ggtt *ggtt = tile->mem.ggtt;
struct xe_device *xe = gt_to_xe(gt);
u64 start, end;
- int err;
xe_gt_assert(gt, IS_SRIOV_VF(xe));
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
@@ -528,35 +546,31 @@ static int vf_balloon_ggtt(struct xe_gt *gt)
start = xe_wopcm_size(xe);
end = config->ggtt_base;
if (end != start) {
- err = xe_ggtt_balloon(ggtt, start, end, &tile->sriov.vf.ggtt_balloon[0]);
- if (err)
- goto failed;
+ tile->sriov.vf.ggtt_balloon[0] = vf_balloon_ggtt_node(ggtt, start, end);
+ if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
+ return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
}
start = config->ggtt_base + config->ggtt_size;
end = GUC_GGTT_TOP;
if (end != start) {
- err = xe_ggtt_balloon(ggtt, start, end, &tile->sriov.vf.ggtt_balloon[1]);
- if (err)
- goto deballoon;
+ tile->sriov.vf.ggtt_balloon[1] = vf_balloon_ggtt_node(ggtt, start, end);
+ if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
+ xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
+ return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
+ }
}
return 0;
-
-deballoon:
- xe_ggtt_deballoon(ggtt, &tile->sriov.vf.ggtt_balloon[0]);
-failed:
- return err;
}
static void deballoon_ggtt(struct drm_device *drm, void *arg)
{
struct xe_tile *tile = arg;
- struct xe_ggtt *ggtt = tile->mem.ggtt;
xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
- xe_ggtt_deballoon(ggtt, &tile->sriov.vf.ggtt_balloon[1]);
- xe_ggtt_deballoon(ggtt, &tile->sriov.vf.ggtt_balloon[0]);
+ xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[1]);
+ xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
}
/**
@@ -893,6 +907,32 @@ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
}
/**
+ * xe_gt_sriov_vf_write32 - Handle a write to an inaccessible register.
+ * @gt: the &xe_gt
+ * @reg: the register to write
+ * @val: value to write
+ *
+ * This function is for VF use only.
+ * Currently it will trigger a WARN if running on debug build.
+ */
+void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
+{
+ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, !reg.vf);
+
+ /*
+ * In the future, we may want to handle selected writes to inaccessible
+ * registers in some custom way, but for now let's just log a warning
+ * about such attempt, as likely we might be doing something wrong.
+ */
+ xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
+ "VF is trying to write %#x to an inaccessible register %#x+%#x\n",
+ val, reg.addr, addr - reg.addr);
+}
+
+/**
* xe_gt_sriov_vf_print_config - Print VF self config.
* @gt: the &xe_gt
* @p: the &drm_printer
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index 0de7f8cbcfa6..e541ce57bec2 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -22,6 +22,7 @@ u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg);
+void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
new file mode 100644
index 000000000000..c7364a5aef8f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/atomic.h>
+
+#include <drm/drm_print.h>
+
+#include "xe_gt.h"
+#include "xe_gt_stats.h"
+
+/**
+ * xe_gt_stats_incr - Increments the specified stats counter
+ * @gt: graphics tile
+ * @id: xe_gt_stats_id type id that needs to be incremented
+ * @incr: value to be incremented with
+ *
+ * Increments the specified stats counter.
+ */
+void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr)
+{
+ if (id >= __XE_GT_STATS_NUM_IDS)
+ return;
+
+ atomic_add(incr, &gt->stats.counters[id]);
+}
+
+static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
+ "tlb_inval_count",
+};
+
+/**
+ * xe_gt_stats_print_info - Print the GT stats
+ * @gt: graphics tile
+ * @p: drm_printer where it will be printed out.
+ *
+ * This prints out all the available GT stats.
+ */
+int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p)
+{
+ enum xe_gt_stats_id id;
+
+ for (id = 0; id < __XE_GT_STATS_NUM_IDS; ++id)
+ drm_printf(p, "%s: %d\n", stat_description[id],
+ atomic_read(&gt->stats.counters[id]));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.h b/drivers/gpu/drm/xe/xe_gt_stats.h
new file mode 100644
index 000000000000..91d944f6c4e4
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_stats.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_STATS_H_
+#define _XE_GT_STATS_H_
+
+struct xe_gt;
+struct drm_printer;
+
+enum xe_gt_stats_id {
+ XE_GT_STATS_ID_TLB_INVAL,
+ /* must be the last entry */
+ __XE_GT_STATS_NUM_IDS,
+};
+
+#ifdef CONFIG_DEBUG_FS
+int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p);
+void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr);
+#else
+static inline void
+xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id,
+ int incr)
+{
+}
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 481d83d07367..cca9cf536f76 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -12,6 +12,7 @@
#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
+#include "xe_gt_stats.h"
#include "xe_mmio.h"
#include "xe_pm.h"
#include "xe_sriov.h"
@@ -182,7 +183,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
action[1] = seqno;
ret = xe_guc_ct_send_locked(&guc->ct, action, len,
G2H_LEN_DW_TLB_INVALIDATE, 1);
- if (!ret && fence) {
+ if (!ret) {
spin_lock_irq(&gt->tlb_invalidation.pending_lock);
/*
* We haven't actually published the TLB fence as per
@@ -203,7 +204,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
tlb_timeout_jiffies(gt));
}
spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
- } else if (ret < 0 && fence) {
+ } else if (ret < 0) {
__invalidation_fence_signal(xe, fence);
}
if (!ret) {
@@ -213,6 +214,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
gt->tlb_invalidation.seqno = 1;
}
mutex_unlock(&guc->ct.lock);
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 25ff03ab8448..0662f71c6ede 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -6,6 +6,7 @@
#include "xe_gt_topology.h"
#include <linux/bitmap.h>
+#include <linux/compiler.h>
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
@@ -31,7 +32,7 @@ load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
}
static void
-load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
+load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask, enum xe_gt_eu_type *eu_type)
{
struct xe_device *xe = gt_to_xe(gt);
u32 reg_val = xe_mmio_read32(gt, XELP_EU_ENABLE);
@@ -47,11 +48,13 @@ load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
if (GRAPHICS_VERx100(xe) < 1250)
reg_val = ~reg_val & XELP_EU_MASK;
- /* On PVC, one bit = one EU */
- if (GRAPHICS_VERx100(xe) == 1260) {
+ if (GRAPHICS_VERx100(xe) == 1260 || GRAPHICS_VER(xe) >= 20) {
+ /* SIMD16 EUs, one bit == one EU */
+ *eu_type = XE_GT_EU_TYPE_SIMD16;
val = reg_val;
} else {
- /* All other platforms, one bit = 2 EU */
+ /* SIMD8 EUs, one bit == 2 EU */
+ *eu_type = XE_GT_EU_TYPE_SIMD8;
for (i = 0; i < fls(reg_val); i++)
if (reg_val & BIT(i))
val |= 0x3 << 2 * i;
@@ -213,7 +216,7 @@ xe_gt_topology_init(struct xe_gt *gt)
XEHP_GT_COMPUTE_DSS_ENABLE,
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
XE2_GT_COMPUTE_DSS_2);
- load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss);
+ load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss, &gt->fuse_topo.eu_type);
load_l3_bank_mask(gt, gt->fuse_topo.l3_bank_mask);
p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology");
@@ -221,6 +224,18 @@ xe_gt_topology_init(struct xe_gt *gt)
xe_gt_topology_dump(gt, &p);
}
+static const char *eu_type_to_str(enum xe_gt_eu_type eu_type)
+{
+ switch (eu_type) {
+ case XE_GT_EU_TYPE_SIMD16:
+ return "simd16";
+ case XE_GT_EU_TYPE_SIMD8:
+ return "simd8";
+ }
+
+ return NULL;
+}
+
void
xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
{
@@ -231,6 +246,8 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
drm_printf(p, "EU mask per DSS: %*pb\n", XE_MAX_EU_FUSE_BITS,
gt->fuse_topo.eu_mask_per_dss);
+ drm_printf(p, "EU type: %s\n",
+ eu_type_to_str(gt->fuse_topo.eu_type));
drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS,
gt->fuse_topo.l3_bank_mask);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index c582541970df..3d1c51de0268 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -10,6 +10,7 @@
#include "xe_gt_idle_types.h"
#include "xe_gt_sriov_pf_types.h"
#include "xe_gt_sriov_vf_types.h"
+#include "xe_gt_stats.h"
#include "xe_hw_engine_types.h"
#include "xe_hw_fence_types.h"
#include "xe_oa.h"
@@ -27,6 +28,11 @@ enum xe_gt_type {
XE_GT_TYPE_MEDIA,
};
+enum xe_gt_eu_type {
+ XE_GT_EU_TYPE_SIMD8,
+ XE_GT_EU_TYPE_SIMD16,
+};
+
#define XE_MAX_DSS_FUSE_REGS 3
#define XE_MAX_DSS_FUSE_BITS (32 * XE_MAX_DSS_FUSE_REGS)
#define XE_MAX_EU_FUSE_REGS 1
@@ -128,6 +134,14 @@ struct xe_gt {
u8 has_indirect_ring_state:1;
} info;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ /** @stats: GT stats */
+ struct {
+ /** @stats.counters: counters for various GT stats */
+ atomic_t counters[__XE_GT_STATS_NUM_IDS];
+ } stats;
+#endif
+
/**
* @mmio: mmio info for GT. All GTs within a tile share the same
* register space, but have their own copy of GSI registers at a
@@ -233,9 +247,14 @@ struct xe_gt {
struct pf_queue {
/** @usm.pf_queue.gt: back pointer to GT */
struct xe_gt *gt;
-#define PF_QUEUE_NUM_DW 128
/** @usm.pf_queue.data: data in the page fault queue */
- u32 data[PF_QUEUE_NUM_DW];
+ u32 *data;
+ /**
+ * @usm.pf_queue.num_dw: number of DWORDS in the page
+ * fault queue. Dynamically calculated based on the number
+ * of compute resources available.
+ */
+ u32 num_dw;
/**
* @usm.pf_queue.tail: tail pointer in DWs for page fault queue,
* moved by worker which processes faults (consumer).
@@ -337,6 +356,12 @@ struct xe_gt {
/** @fuse_topo.l3_bank_mask: L3 bank mask */
xe_l3_bank_mask_t l3_bank_mask;
+
+ /**
+ * @fuse_topo.eu_type: type/width of EU stored in
+ * fuse_topo.eu_mask_per_dss
+ */
+ enum xe_gt_eu_type eu_type;
} fuse_topo;
/** @steering: register steering for individual HW units */
@@ -351,6 +376,12 @@ struct xe_gt {
} steering[NUM_STEERING_TYPES];
/**
+ * @steering_dss_per_grp: number of DSS per steering group (gslice,
+ * cslice, etc.).
+ */
+ unsigned int steering_dss_per_grp;
+
+ /**
* @mcr_lock: protects the MCR_SELECTOR register for the duration
* of a steered operation
*/
@@ -370,8 +401,14 @@ struct xe_gt {
unsigned long *engine;
/** @wa_active.lrc: bitmap with active LRC workarounds */
unsigned long *lrc;
- /** @wa_active.oob: bitmap with active OOB workaroudns */
+ /** @wa_active.oob: bitmap with active OOB workarounds */
unsigned long *oob;
+ /**
+ * @wa_active.oob_initialized: mark oob as initialized to help
+ * detecting misuse of XE_WA() - it can only be called on
+ * initialization after OOB WAs have being processed
+ */
+ bool oob_initialized;
} wa_active;
/** @user_engines: engines present in GT and available to userspace */
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index de0fe9e65746..52df28032a6f 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -350,6 +350,8 @@ int xe_guc_init(struct xe_guc *guc)
if (ret)
goto out;
+ xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
+
ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc);
if (ret)
goto out;
@@ -358,8 +360,6 @@ int xe_guc_init(struct xe_guc *guc)
xe_guc_comm_init_early(guc);
- xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
-
return 0;
out:
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index e0bbf98f849d..c3e6b51f7a09 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -11,6 +11,16 @@
#include "xe_hw_engine_types.h"
#include "xe_macros.h"
+/*
+ * GuC version number components are defined to be only 8-bit size,
+ * so converting to a 32bit 8.8.8 integer allows simple (and safe)
+ * numerical comparisons.
+ */
+#define MAKE_GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat))
+#define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch)
+#define GUC_SUBMIT_VER(guc) MAKE_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY])
+#define GUC_FIRMWARE_VER(guc) MAKE_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_RELEASE])
+
struct drm_printer;
void xe_guc_comm_init_early(struct xe_guc *guc);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 1c60b685dbc6..d1902a8581ca 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -24,6 +24,7 @@
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_platform_types.h"
+#include "xe_uc_fw.h"
#include "xe_wa.h"
/* Slack of a few additional entries per engine */
@@ -367,6 +368,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
0xC40,
&offset, &remain);
+ if (XE_WA(gt, 14022293748) || XE_WA(gt, 22019794406))
+ guc_waklv_enable_simple(ads,
+ GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET,
+ &offset, &remain);
+
size = guc_ads_waklv_size(ads) - remain;
if (!size)
return;
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 64afc90ad2c5..f24dd5223926 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -105,12 +105,20 @@ ct_to_xe(struct xe_guc_ct *ct)
* enough space to avoid backpressure on the driver. We increase the size
* of the receive buffer (relative to the send) to ensure a G2H response
* CTB has a landing spot.
+ *
+ * In addition to submissions, the G2H buffer needs to be able to hold
+ * enough space for recoverable page fault notifications. The number of
+ * page faults is interrupt driven and can be as much as the number of
+ * compute resources available. However, most of the actual work for these
+ * is in a separate page fault worker thread. Therefore we only need to
+ * make sure the queue has enough space to handle all of the submissions
+ * and responses and an extra buffer for incoming page faults.
*/
#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
#define CTB_H2G_BUFFER_SIZE (SZ_4K)
-#define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE)
-#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4)
+#define CTB_G2H_BUFFER_SIZE (SZ_128K)
+#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2)
/**
* xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full
@@ -516,6 +524,7 @@ static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
lockdep_assert_held(&ct->fast_lock);
xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <=
ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
+ xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding);
ct->ctbs.g2h.info.space += g2h_len;
if (!--ct->g2h_outstanding)
diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.c b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
index d9b570a154a2..af2c817d552c 100644
--- a/drivers/gpu/drm/xe/xe_guc_hwconfig.c
+++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
@@ -6,6 +6,7 @@
#include "xe_guc_hwconfig.h"
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "abi/guc_actions_abi.h"
#include "xe_bo.h"
@@ -103,3 +104,99 @@ void xe_guc_hwconfig_copy(struct xe_guc *guc, void *dst)
xe_map_memcpy_from(xe, dst, &guc->hwconfig.bo->vmap, 0,
guc->hwconfig.size);
}
+
+void xe_guc_hwconfig_dump(struct xe_guc *guc, struct drm_printer *p)
+{
+ size_t size = xe_guc_hwconfig_size(guc);
+ u32 *hwconfig;
+ u64 num_dw;
+ u32 extra_bytes;
+ int i = 0;
+
+ if (size == 0) {
+ drm_printf(p, "No hwconfig available\n");
+ return;
+ }
+
+ num_dw = div_u64_rem(size, sizeof(u32), &extra_bytes);
+
+ hwconfig = kzalloc(size, GFP_KERNEL);
+ if (!hwconfig) {
+ drm_printf(p, "Error: could not allocate hwconfig memory\n");
+ return;
+ }
+
+ xe_guc_hwconfig_copy(guc, hwconfig);
+
+ /* An entry requires at least three dwords for key, length, value */
+ while (i + 3 <= num_dw) {
+ u32 attribute = hwconfig[i++];
+ u32 len_dw = hwconfig[i++];
+
+ if (i + len_dw > num_dw) {
+ drm_printf(p, "Error: Attribute %u is %u dwords, but only %llu remain\n",
+ attribute, len_dw, num_dw - i);
+ len_dw = num_dw - i;
+ }
+
+ /*
+ * If it's a single dword (as most hwconfig attributes are),
+ * then it's probably a number that makes sense to display
+ * in decimal form. In the rare cases where it's more than
+ * one dword, just print it in hex form and let the user
+ * figure out how to interpret it.
+ */
+ if (len_dw == 1)
+ drm_printf(p, "[%2u] = %u\n", attribute, hwconfig[i]);
+ else
+ drm_printf(p, "[%2u] = { %*ph }\n", attribute,
+ (int)(len_dw * sizeof(u32)), &hwconfig[i]);
+ i += len_dw;
+ }
+
+ if (i < num_dw || extra_bytes)
+ drm_printf(p, "Error: %llu extra bytes at end of hwconfig\n",
+ (num_dw - i) * sizeof(u32) + extra_bytes);
+
+ kfree(hwconfig);
+}
+
+/*
+ * Lookup a specific 32-bit attribute value in the GuC's hwconfig table.
+ */
+int xe_guc_hwconfig_lookup_u32(struct xe_guc *guc, u32 attribute, u32 *val)
+{
+ size_t size = xe_guc_hwconfig_size(guc);
+ u64 num_dw = div_u64(size, sizeof(u32));
+ u32 *hwconfig;
+ bool found = false;
+ int i = 0;
+
+ if (num_dw == 0)
+ return -EINVAL;
+
+ hwconfig = kzalloc(size, GFP_KERNEL);
+ if (!hwconfig)
+ return -ENOMEM;
+
+ xe_guc_hwconfig_copy(guc, hwconfig);
+
+ /* An entry requires at least three dwords for key, length, value */
+ while (i + 3 <= num_dw) {
+ u32 key = hwconfig[i++];
+ u32 len_dw = hwconfig[i++];
+
+ if (key != attribute) {
+ i += len_dw;
+ continue;
+ }
+
+ *val = hwconfig[i];
+ found = true;
+ break;
+ }
+
+ kfree(hwconfig);
+
+ return found ? 0 : -ENOENT;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.h b/drivers/gpu/drm/xe/xe_guc_hwconfig.h
index b5794d641900..ab4e5038236e 100644
--- a/drivers/gpu/drm/xe/xe_guc_hwconfig.h
+++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.h
@@ -8,10 +8,13 @@
#include <linux/types.h>
+struct drm_printer;
struct xe_guc;
int xe_guc_hwconfig_init(struct xe_guc *guc);
u32 xe_guc_hwconfig_size(struct xe_guc *guc);
void xe_guc_hwconfig_copy(struct xe_guc *guc, void *dst);
+void xe_guc_hwconfig_dump(struct xe_guc *guc, struct drm_printer *p);
+int xe_guc_hwconfig_lookup_u32(struct xe_guc *guc, u32 attribute, u32 *val);
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_id_mgr.c b/drivers/gpu/drm/xe/xe_guc_id_mgr.c
index cd0549d0ef89..e845425d670b 100644
--- a/drivers/gpu/drm/xe/xe_guc_id_mgr.c
+++ b/drivers/gpu/drm/xe/xe_guc_id_mgr.c
@@ -97,8 +97,8 @@ int xe_guc_id_mgr_init(struct xe_guc_id_mgr *idm, unsigned int limit)
if (ret)
return ret;
- xe_gt_info(idm_to_gt(idm), "using %u GUC ID%s\n",
- idm->total, str_plural(idm->total));
+ xe_gt_dbg(idm_to_gt(idm), "using %u GuC ID%s\n",
+ idm->total, str_plural(idm->total));
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index ccd574e948aa..034b29984d5e 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -1042,7 +1042,7 @@ static void xe_guc_pc_fini_hw(void *arg)
return;
XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
- XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
+ xe_guc_pc_gucrc_disable(pc);
XE_WARN_ON(xe_guc_pc_stop(pc));
/* Bind requested freq to mert_freq_cap before unload */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 77b0f0d8f729..fbbe6a487bbb 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1071,7 +1071,9 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
struct xe_exec_queue *q = job->q;
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_guc *guc = exec_queue_to_guc(q);
+ const char *process_name = "no process";
int err = -ETIME;
+ pid_t pid = -1;
int i = 0;
bool wedged, skip_timeout_check;
@@ -1168,9 +1170,14 @@ trigger_reset:
goto sched_enable;
}
- xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx",
+ if (q->vm && q->vm->xef) {
+ process_name = q->vm->xef->process_name;
+ pid = q->vm->xef->pid;
+ }
+ xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
- q->guc->id, q->flags);
+ q->guc->id, q->flags, process_name, pid);
+
trace_xe_sched_job_timedout(job);
if (!exec_queue_killed(q))
@@ -1312,6 +1319,15 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
kfree(msg);
}
+static void __suspend_fence_signal(struct xe_exec_queue *q)
+{
+ if (!q->guc->suspend_pending)
+ return;
+
+ WRITE_ONCE(q->guc->suspend_pending, false);
+ wake_up(&q->guc->suspend_wait);
+}
+
static void suspend_fence_signal(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
@@ -1321,9 +1337,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q)
guc_read_stopped(guc));
xe_assert(xe, q->guc->suspend_pending);
- q->guc->suspend_pending = false;
- smp_wmb();
- wake_up(&q->guc->suspend_wait);
+ __suspend_fence_signal(q);
}
static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
@@ -1360,9 +1374,11 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
struct xe_exec_queue *q = msg->private_data;
if (guc_exec_queue_allowed_to_change_state(q)) {
- q->guc->resume_time = RESUME_PENDING;
clear_exec_queue_suspended(q);
- enable_scheduling(q);
+ if (!exec_queue_enabled(q)) {
+ q->guc->resume_time = RESUME_PENDING;
+ enable_scheduling(q);
+ }
} else {
clear_exec_queue_suspended(q);
}
@@ -1372,9 +1388,13 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
#define SET_SCHED_PROPS 2
#define SUSPEND 3
#define RESUME 4
+#define OPCODE_MASK 0xf
+#define MSG_LOCKED BIT(8)
static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
{
+ struct xe_device *xe = guc_to_xe(exec_queue_to_guc(msg->private_data));
+
trace_xe_sched_msg_recv(msg);
switch (msg->opcode) {
@@ -1394,7 +1414,7 @@ static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
XE_WARN_ON("Unknown message type");
}
- xe_pm_runtime_put(guc_to_xe(exec_queue_to_guc(msg->private_data)));
+ xe_pm_runtime_put(xe);
}
static const struct drm_sched_backend_ops drm_sched_ops = {
@@ -1414,7 +1434,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
struct xe_device *xe = guc_to_xe(guc);
struct xe_guc_exec_queue *ge;
long timeout;
- int err;
+ int err, i;
xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc)));
@@ -1426,6 +1446,9 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
ge->q = q;
init_waitqueue_head(&ge->suspend_wait);
+ for (i = 0; i < MAX_STATIC_MSG_TYPE; ++i)
+ INIT_LIST_HEAD(&ge->static_msgs[i].link);
+
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
@@ -1478,6 +1501,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
{
trace_xe_exec_queue_kill(q);
set_exec_queue_killed(q);
+ __suspend_fence_signal(q);
xe_guc_exec_queue_trigger_cleanup(q);
}
@@ -1487,11 +1511,26 @@ static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg
xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q)));
INIT_LIST_HEAD(&msg->link);
- msg->opcode = opcode;
+ msg->opcode = opcode & OPCODE_MASK;
msg->private_data = q;
trace_xe_sched_msg_add(msg);
- xe_sched_add_msg(&q->guc->sched, msg);
+ if (opcode & MSG_LOCKED)
+ xe_sched_add_msg_locked(&q->guc->sched, msg);
+ else
+ xe_sched_add_msg(&q->guc->sched, msg);
+}
+
+static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
+ struct xe_sched_msg *msg,
+ u32 opcode)
+{
+ if (!list_empty(&msg->link))
+ return false;
+
+ guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED);
+
+ return true;
}
#define STATIC_MSG_CLEANUP 0
@@ -1565,34 +1604,59 @@ static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
static int guc_exec_queue_suspend(struct xe_exec_queue *q)
{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
- if (exec_queue_killed_or_banned_or_wedged(q) || q->guc->suspend_pending)
+ if (exec_queue_killed_or_banned_or_wedged(q))
return -EINVAL;
- q->guc->suspend_pending = true;
- guc_exec_queue_add_msg(q, msg, SUSPEND);
+ xe_sched_msg_lock(sched);
+ if (guc_exec_queue_try_add_msg(q, msg, SUSPEND))
+ q->guc->suspend_pending = true;
+ xe_sched_msg_unlock(sched);
return 0;
}
-static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
+ int ret;
+
+ /*
+ * Likely don't need to check exec_queue_killed() as we clear
+ * suspend_pending upon kill but to be paranoid but races in which
+ * suspend_pending is set after kill also check kill here.
+ */
+ ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
+ !READ_ONCE(q->guc->suspend_pending) ||
+ exec_queue_killed(q) ||
+ guc_read_stopped(guc),
+ HZ * 5);
- wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
- guc_read_stopped(guc));
+ if (!ret) {
+ xe_gt_warn(guc_to_gt(guc),
+ "Suspend fence, guc_id=%d, failed to respond",
+ q->guc->id);
+ /* XXX: Trigger GT reset? */
+ return -ETIME;
+ }
+
+ return ret < 0 ? ret : 0;
}
static void guc_exec_queue_resume(struct xe_exec_queue *q)
{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
xe_assert(xe, !q->guc->suspend_pending);
- guc_exec_queue_add_msg(q, msg, RESUME);
+ xe_sched_msg_lock(sched);
+ guc_exec_queue_try_add_msg(q, msg, RESUME);
+ xe_sched_msg_unlock(sched);
}
static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 1c9d38b6f5f1..65b2e147c4b9 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -92,7 +92,7 @@ void xe_heci_gsc_fini(struct xe_device *xe)
{
struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
- if (!HAS_HECI_GSCFI(xe))
+ if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
return;
if (heci_gsc->adev) {
@@ -177,12 +177,14 @@ void xe_heci_gsc_init(struct xe_device *xe)
const struct heci_gsc_def *def;
int ret;
- if (!HAS_HECI_GSCFI(xe))
+ if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
return;
heci_gsc->irq = -1;
- if (xe->info.platform == XE_PVC) {
+ if (xe->info.platform == XE_BATTLEMAGE) {
+ def = &heci_gsc_def_dg2;
+ } else if (xe->info.platform == XE_PVC) {
def = &heci_gsc_def_pvc;
} else if (xe->info.platform == XE_DG2) {
def = &heci_gsc_def_dg2;
@@ -232,3 +234,23 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
if (ret)
drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
}
+
+void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir)
+{
+ int ret;
+
+ if ((iir & CSC_IRQ_INTF(1)) == 0)
+ return;
+
+ if (!HAS_HECI_CSCFI(xe)) {
+ drm_warn_once(&xe->drm, "CSC irq: not supported");
+ return;
+ }
+
+ if (xe->heci_gsc.irq < 0)
+ return;
+
+ ret = generic_handle_irq(xe->heci_gsc.irq);
+ if (ret)
+ drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
+}
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.h b/drivers/gpu/drm/xe/xe_heci_gsc.h
index 9db454478fae..48b3b1838045 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.h
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.h
@@ -11,10 +11,15 @@ struct xe_device;
struct mei_aux_device;
/*
- * The HECI1 bit corresponds to bit15 and HECI2 to bit14.
+ * GSC HECI1 bit corresponds to bit15 and HECI2 to bit14.
* The reason for this is to allow growth for more interfaces in the future.
*/
-#define GSC_IRQ_INTF(_x) BIT(15 - (_x))
+#define GSC_IRQ_INTF(_x) BIT(15 - (_x))
+
+/*
+ * CSC HECI1 bit corresponds to bit9 and HECI2 to bit10.
+ */
+#define CSC_IRQ_INTF(_x) BIT(9 + (_x))
/**
* struct xe_heci_gsc - graphics security controller for xe, HECI interface
@@ -31,5 +36,6 @@ struct xe_heci_gsc {
void xe_heci_gsc_init(struct xe_device *xe);
void xe_heci_gsc_fini(struct xe_device *xe);
void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir);
+void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir);
#endif /* __XE_HECI_GSC_DEV_H__ */
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index bec4366e5513..f5459f97af23 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -43,14 +43,6 @@ huc_to_guc(struct xe_huc *huc)
return &container_of(huc, struct xe_uc, huc)->guc;
}
-static void free_gsc_pkt(struct drm_device *drm, void *arg)
-{
- struct xe_huc *huc = arg;
-
- xe_bo_unpin_map_no_vm(huc->gsc_pkt);
- huc->gsc_pkt = NULL;
-}
-
#define PXP43_HUC_AUTH_INOUT_SIZE SZ_4K
static int huc_alloc_gsc_pkt(struct xe_huc *huc)
{
@@ -59,17 +51,16 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
struct xe_bo *bo;
/* we use a single object for both input and output */
- bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
- PXP43_HUC_AUTH_INOUT_SIZE * 2,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT);
+ bo = xe_managed_bo_create_pin_map(xe, gt_to_tile(gt),
+ PXP43_HUC_AUTH_INOUT_SIZE * 2,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
huc->gsc_pkt = bo;
- return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
+ return 0;
}
int xe_huc_init(struct xe_huc *huc)
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 07ed9fd28f19..c9c3beb3ce8d 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -5,7 +5,10 @@
#include "xe_hw_engine.h"
+#include <linux/nospec.h>
+
#include <drm/drm_managed.h>
+#include <uapi/drm/xe_drm.h>
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
@@ -20,6 +23,7 @@
#include "xe_gt_printk.h"
#include "xe_gt_mcr.h"
#include "xe_gt_topology.h"
+#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
#include "xe_irq.h"
#include "xe_lrc.h"
@@ -263,19 +267,28 @@ static const struct engine_info engine_infos[] = {
},
};
-static void hw_engine_fini(struct drm_device *drm, void *arg)
+static void hw_engine_fini(void *arg)
{
struct xe_hw_engine *hwe = arg;
if (hwe->exl_port)
xe_execlist_port_destroy(hwe->exl_port);
- xe_lrc_put(hwe->kernel_lrc);
hwe->gt = NULL;
}
-static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg,
- u32 val)
+/**
+ * xe_hw_engine_mmio_write32() - Write engine register
+ * @hwe: engine
+ * @reg: register to write into
+ * @val: desired 32-bit value to write
+ *
+ * This function will write val into an engine specific register.
+ * Forcewake must be held by the caller.
+ *
+ */
+void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe,
+ struct xe_reg reg, u32 val)
{
xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
@@ -285,7 +298,17 @@ static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg,
xe_mmio_write32(hwe->gt, reg, val);
}
-static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
+/**
+ * xe_hw_engine_mmio_read32() - Read engine register
+ * @hwe: engine
+ * @reg: register to read from
+ *
+ * This function will read from an engine specific register.
+ * Forcewake must be held by the caller.
+ *
+ * Return: value of the 32-bit register.
+ */
+u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
{
xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
@@ -304,14 +327,14 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
xe_mmio_write32(hwe->gt, RCU_MODE,
_MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
- hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
- hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
- xe_bo_ggtt_addr(hwe->hwsp));
- hw_engine_mmio_write32(hwe, RING_MODE(0),
- _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
- hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
- _MASKED_BIT_DISABLE(STOP_RING));
- hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
+ xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
+ xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
+ xe_bo_ggtt_addr(hwe->hwsp));
+ xe_hw_engine_mmio_write32(hwe, RING_MODE(0),
+ _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
+ xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
+ _MASKED_BIT_DISABLE(STOP_RING));
+ xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
}
static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt,
@@ -425,6 +448,12 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
0xA,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
+ /* Enable Priority Mem Read */
+ { XE_RTP_NAME("Priority_Mem_Read"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
{}
};
@@ -528,21 +557,13 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
goto err_name;
}
- hwe->kernel_lrc = xe_lrc_create(hwe, NULL, SZ_16K);
- if (IS_ERR(hwe->kernel_lrc)) {
- err = PTR_ERR(hwe->kernel_lrc);
- goto err_hwsp;
- }
-
if (!xe_device_uc_enabled(xe)) {
hwe->exl_port = xe_execlist_port_create(xe, hwe);
if (IS_ERR(hwe->exl_port)) {
err = PTR_ERR(hwe->exl_port);
- goto err_kernel_lrc;
+ goto err_hwsp;
}
- }
-
- if (xe_device_uc_enabled(xe)) {
+ } else {
/* GSCCS has a special interrupt for reset */
if (hwe->class == XE_ENGINE_CLASS_OTHER)
hwe->irq_handler = xe_gsc_hwe_irq_handler;
@@ -555,10 +576,8 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
gt->usm.reserved_bcs_instance = hwe->instance;
- return drmm_add_action_or_reset(&xe->drm, hw_engine_fini, hwe);
+ return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
-err_kernel_lrc:
- xe_lrc_put(hwe->kernel_lrc);
err_hwsp:
xe_bo_unpin_map_no_vm(hwe->hwsp);
err_name:
@@ -761,6 +780,9 @@ int xe_hw_engines_init(struct xe_gt *gt)
}
hw_engine_setup_logical_mapping(gt);
+ err = xe_hw_engine_setup_groups(gt);
+ if (err)
+ return err;
return 0;
}
@@ -791,7 +813,7 @@ xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe,
unsigned int dss;
u16 group, instance;
- snapshot->reg.instdone.ring = hw_engine_mmio_read32(hwe, RING_INSTDONE(0));
+ snapshot->reg.instdone.ring = xe_hw_engine_mmio_read32(hwe, RING_INSTDONE(0));
if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER)
return;
@@ -887,53 +909,53 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
return snapshot;
snapshot->reg.ring_execlist_status =
- hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0));
- val = hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
+ xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0));
+ val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
snapshot->reg.ring_execlist_status |= val << 32;
snapshot->reg.ring_execlist_sq_contents =
- hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0));
- val = hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0));
+ xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0));
+ val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0));
snapshot->reg.ring_execlist_sq_contents |= val << 32;
- snapshot->reg.ring_acthd = hw_engine_mmio_read32(hwe, RING_ACTHD(0));
- val = hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
+ snapshot->reg.ring_acthd = xe_hw_engine_mmio_read32(hwe, RING_ACTHD(0));
+ val = xe_hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
snapshot->reg.ring_acthd |= val << 32;
- snapshot->reg.ring_bbaddr = hw_engine_mmio_read32(hwe, RING_BBADDR(0));
- val = hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
+ snapshot->reg.ring_bbaddr = xe_hw_engine_mmio_read32(hwe, RING_BBADDR(0));
+ val = xe_hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
snapshot->reg.ring_bbaddr |= val << 32;
snapshot->reg.ring_dma_fadd =
- hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
- val = hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
+ xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
+ val = xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
snapshot->reg.ring_dma_fadd |= val << 32;
- snapshot->reg.ring_hwstam = hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
- snapshot->reg.ring_hws_pga = hw_engine_mmio_read32(hwe, RING_HWS_PGA(0));
- snapshot->reg.ring_start = hw_engine_mmio_read32(hwe, RING_START(0));
+ snapshot->reg.ring_hwstam = xe_hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
+ snapshot->reg.ring_hws_pga = xe_hw_engine_mmio_read32(hwe, RING_HWS_PGA(0));
+ snapshot->reg.ring_start = xe_hw_engine_mmio_read32(hwe, RING_START(0));
if (GRAPHICS_VERx100(hwe->gt->tile->xe) >= 2000) {
- val = hw_engine_mmio_read32(hwe, RING_START_UDW(0));
+ val = xe_hw_engine_mmio_read32(hwe, RING_START_UDW(0));
snapshot->reg.ring_start |= val << 32;
}
if (xe_gt_has_indirect_ring_state(hwe->gt)) {
snapshot->reg.indirect_ring_state =
- hw_engine_mmio_read32(hwe, INDIRECT_RING_STATE(0));
+ xe_hw_engine_mmio_read32(hwe, INDIRECT_RING_STATE(0));
}
snapshot->reg.ring_head =
- hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR;
+ xe_hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR;
snapshot->reg.ring_tail =
- hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR;
- snapshot->reg.ring_ctl = hw_engine_mmio_read32(hwe, RING_CTL(0));
+ xe_hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR;
+ snapshot->reg.ring_ctl = xe_hw_engine_mmio_read32(hwe, RING_CTL(0));
snapshot->reg.ring_mi_mode =
- hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
- snapshot->reg.ring_mode = hw_engine_mmio_read32(hwe, RING_MODE(0));
- snapshot->reg.ring_imr = hw_engine_mmio_read32(hwe, RING_IMR(0));
- snapshot->reg.ring_esr = hw_engine_mmio_read32(hwe, RING_ESR(0));
- snapshot->reg.ring_emr = hw_engine_mmio_read32(hwe, RING_EMR(0));
- snapshot->reg.ring_eir = hw_engine_mmio_read32(hwe, RING_EIR(0));
- snapshot->reg.ipehr = hw_engine_mmio_read32(hwe, RING_IPEHR(0));
+ xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
+ snapshot->reg.ring_mode = xe_hw_engine_mmio_read32(hwe, RING_MODE(0));
+ snapshot->reg.ring_imr = xe_hw_engine_mmio_read32(hwe, RING_IMR(0));
+ snapshot->reg.ring_esr = xe_hw_engine_mmio_read32(hwe, RING_ESR(0));
+ snapshot->reg.ring_emr = xe_hw_engine_mmio_read32(hwe, RING_EMR(0));
+ snapshot->reg.ring_eir = xe_hw_engine_mmio_read32(hwe, RING_EIR(0));
+ snapshot->reg.ipehr = xe_hw_engine_mmio_read32(hwe, RING_IPEHR(0));
xe_hw_engine_snapshot_instdone_capture(hwe, snapshot);
if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE)
@@ -1135,3 +1157,41 @@ enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe)
{
return engine_infos[hwe->engine_id].domain;
}
+
+static const enum xe_engine_class user_to_xe_engine_class[] = {
+ [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
+ [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
+ [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
+ [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
+ [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
+};
+
+/**
+ * xe_hw_engine_lookup() - Lookup hardware engine for class:instance
+ * @xe: xe device
+ * @eci: engine class and instance
+ *
+ * This function will find a hardware engine for given engine
+ * class and instance.
+ *
+ * Return: If found xe_hw_engine pointer, NULL otherwise.
+ */
+struct xe_hw_engine *
+xe_hw_engine_lookup(struct xe_device *xe,
+ struct drm_xe_engine_class_instance eci)
+{
+ unsigned int idx;
+
+ if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
+ return NULL;
+
+ if (eci.gt_id >= xe->info.gt_count)
+ return NULL;
+
+ idx = array_index_nospec(eci.engine_class,
+ ARRAY_SIZE(user_to_xe_engine_class));
+
+ return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
+ user_to_xe_engine_class[idx],
+ eci.engine_instance, true);
+}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.h b/drivers/gpu/drm/xe/xe_hw_engine.h
index 900c8c991430..022819a4a8eb 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine.h
@@ -9,6 +9,8 @@
#include "xe_hw_engine_types.h"
struct drm_printer;
+struct drm_xe_engine_class_instance;
+struct xe_device;
#ifdef CONFIG_DRM_XE_JOB_TIMEOUT_MIN
#define XE_HW_ENGINE_JOB_TIMEOUT_MIN CONFIG_DRM_XE_JOB_TIMEOUT_MIN
@@ -62,6 +64,11 @@ void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p);
void xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe);
bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe);
+
+struct xe_hw_engine *
+xe_hw_engine_lookup(struct xe_device *xe,
+ struct drm_xe_engine_class_instance eci);
+
static inline bool xe_hw_engine_is_valid(struct xe_hw_engine *hwe)
{
return hwe->name;
@@ -71,4 +78,7 @@ const char *xe_hw_engine_class_to_str(enum xe_engine_class class);
u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe);
enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe);
+void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg, u32 val);
+u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
new file mode 100644
index 000000000000..82750520a90a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_gt.h"
+#include "xe_hw_engine_group.h"
+#include "xe_vm.h"
+
+static void
+hw_engine_group_free(struct drm_device *drm, void *arg)
+{
+ struct xe_hw_engine_group *group = arg;
+
+ destroy_workqueue(group->resume_wq);
+ kfree(group);
+}
+
+static void
+hw_engine_group_resume_lr_jobs_func(struct work_struct *w)
+{
+ struct xe_exec_queue *q;
+ struct xe_hw_engine_group *group = container_of(w, struct xe_hw_engine_group, resume_work);
+ int err;
+ enum xe_hw_engine_group_execution_mode previous_mode;
+
+ err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode);
+ if (err)
+ return;
+
+ if (previous_mode == EXEC_MODE_LR)
+ goto put;
+
+ list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
+ if (!xe_vm_in_fault_mode(q->vm))
+ continue;
+
+ q->ops->resume(q);
+ }
+
+put:
+ xe_hw_engine_group_put(group);
+}
+
+static struct xe_hw_engine_group *
+hw_engine_group_alloc(struct xe_device *xe)
+{
+ struct xe_hw_engine_group *group;
+ int err;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return ERR_PTR(-ENOMEM);
+
+ group->resume_wq = alloc_workqueue("xe-resume-lr-jobs-wq", 0, 0);
+ if (!group->resume_wq)
+ return ERR_PTR(-ENOMEM);
+
+ init_rwsem(&group->mode_sem);
+ INIT_WORK(&group->resume_work, hw_engine_group_resume_lr_jobs_func);
+ INIT_LIST_HEAD(&group->exec_queue_list);
+
+ err = drmm_add_action_or_reset(&xe->drm, hw_engine_group_free, group);
+ if (err)
+ return ERR_PTR(err);
+
+ return group;
+}
+
+/**
+ * xe_hw_engine_setup_groups() - Setup the hw engine groups for the gt
+ * @gt: The gt for which groups are setup
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_hw_engine_setup_groups(struct xe_gt *gt)
+{
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ struct xe_hw_engine_group *group_rcs_ccs, *group_bcs, *group_vcs_vecs;
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ group_rcs_ccs = hw_engine_group_alloc(xe);
+ if (IS_ERR(group_rcs_ccs)) {
+ err = PTR_ERR(group_rcs_ccs);
+ goto err_group_rcs_ccs;
+ }
+
+ group_bcs = hw_engine_group_alloc(xe);
+ if (IS_ERR(group_bcs)) {
+ err = PTR_ERR(group_bcs);
+ goto err_group_bcs;
+ }
+
+ group_vcs_vecs = hw_engine_group_alloc(xe);
+ if (IS_ERR(group_vcs_vecs)) {
+ err = PTR_ERR(group_vcs_vecs);
+ goto err_group_vcs_vecs;
+ }
+
+ for_each_hw_engine(hwe, gt, id) {
+ switch (hwe->class) {
+ case XE_ENGINE_CLASS_COPY:
+ hwe->hw_engine_group = group_bcs;
+ break;
+ case XE_ENGINE_CLASS_RENDER:
+ case XE_ENGINE_CLASS_COMPUTE:
+ hwe->hw_engine_group = group_rcs_ccs;
+ break;
+ case XE_ENGINE_CLASS_VIDEO_DECODE:
+ case XE_ENGINE_CLASS_VIDEO_ENHANCE:
+ hwe->hw_engine_group = group_vcs_vecs;
+ break;
+ case XE_ENGINE_CLASS_OTHER:
+ break;
+ default:
+ drm_warn(&xe->drm, "NOT POSSIBLE");
+ }
+ }
+
+ return 0;
+
+err_group_vcs_vecs:
+ kfree(group_vcs_vecs);
+err_group_bcs:
+ kfree(group_bcs);
+err_group_rcs_ccs:
+ kfree(group_rcs_ccs);
+
+ return err;
+}
+
+/**
+ * xe_hw_engine_group_add_exec_queue() - Add an exec queue to a hw engine group
+ * @group: The hw engine group
+ * @q: The exec_queue
+ *
+ * Return: 0 on success,
+ * -EINTR if the lock could not be acquired
+ */
+int xe_hw_engine_group_add_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q)
+{
+ int err;
+ struct xe_device *xe = gt_to_xe(q->gt);
+
+ xe_assert(xe, group);
+ xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_VM));
+ xe_assert(xe, q->vm);
+
+ if (xe_vm_in_preempt_fence_mode(q->vm))
+ return 0;
+
+ err = down_write_killable(&group->mode_sem);
+ if (err)
+ return err;
+
+ if (xe_vm_in_fault_mode(q->vm) && group->cur_mode == EXEC_MODE_DMA_FENCE) {
+ q->ops->suspend(q);
+ err = q->ops->suspend_wait(q);
+ if (err)
+ goto err_suspend;
+
+ xe_hw_engine_group_resume_faulting_lr_jobs(group);
+ }
+
+ list_add(&q->hw_engine_group_link, &group->exec_queue_list);
+ up_write(&group->mode_sem);
+
+ return 0;
+
+err_suspend:
+ up_write(&group->mode_sem);
+ return err;
+}
+
+/**
+ * xe_hw_engine_group_del_exec_queue() - Delete an exec queue from a hw engine group
+ * @group: The hw engine group
+ * @q: The exec_queue
+ */
+void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q)
+{
+ struct xe_device *xe = gt_to_xe(q->gt);
+
+ xe_assert(xe, group);
+ xe_assert(xe, q->vm);
+
+ down_write(&group->mode_sem);
+
+ if (!list_empty(&q->hw_engine_group_link))
+ list_del(&q->hw_engine_group_link);
+
+ up_write(&group->mode_sem);
+}
+
+/**
+ * xe_hw_engine_group_resume_faulting_lr_jobs() - Asynchronously resume the hw engine group's
+ * faulting LR jobs
+ * @group: The hw engine group
+ */
+void xe_hw_engine_group_resume_faulting_lr_jobs(struct xe_hw_engine_group *group)
+{
+ queue_work(group->resume_wq, &group->resume_work);
+}
+
+/**
+ * xe_hw_engine_group_suspend_faulting_lr_jobs() - Suspend the faulting LR jobs of this group
+ * @group: The hw engine group
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group *group)
+{
+ int err;
+ struct xe_exec_queue *q;
+ bool need_resume = false;
+
+ lockdep_assert_held_write(&group->mode_sem);
+
+ list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
+ if (!xe_vm_in_fault_mode(q->vm))
+ continue;
+
+ need_resume = true;
+ q->ops->suspend(q);
+ }
+
+ list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
+ if (!xe_vm_in_fault_mode(q->vm))
+ continue;
+
+ err = q->ops->suspend_wait(q);
+ if (err)
+ goto err_suspend;
+ }
+
+ if (need_resume)
+ xe_hw_engine_group_resume_faulting_lr_jobs(group);
+
+ return 0;
+
+err_suspend:
+ up_write(&group->mode_sem);
+ return err;
+}
+
+/**
+ * xe_hw_engine_group_wait_for_dma_fence_jobs() - Wait for dma fence jobs to complete
+ * @group: The hw engine group
+ *
+ * This function is not meant to be called directly from a user IOCTL as dma_fence_wait()
+ * is not interruptible.
+ *
+ * Return: 0 on success,
+ * -ETIME if waiting for one job failed
+ */
+static int xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group *group)
+{
+ long timeout;
+ struct xe_exec_queue *q;
+ struct dma_fence *fence;
+
+ lockdep_assert_held_write(&group->mode_sem);
+
+ list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
+ if (xe_vm_in_lr_mode(q->vm))
+ continue;
+
+ fence = xe_exec_queue_last_fence_get_for_resume(q, q->vm);
+ timeout = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+ if (timeout < 0)
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int switch_mode(struct xe_hw_engine_group *group)
+{
+ int err = 0;
+ enum xe_hw_engine_group_execution_mode new_mode;
+
+ lockdep_assert_held_write(&group->mode_sem);
+
+ switch (group->cur_mode) {
+ case EXEC_MODE_LR:
+ new_mode = EXEC_MODE_DMA_FENCE;
+ err = xe_hw_engine_group_suspend_faulting_lr_jobs(group);
+ break;
+ case EXEC_MODE_DMA_FENCE:
+ new_mode = EXEC_MODE_LR;
+ err = xe_hw_engine_group_wait_for_dma_fence_jobs(group);
+ break;
+ }
+
+ if (err)
+ return err;
+
+ group->cur_mode = new_mode;
+
+ return 0;
+}
+
+/**
+ * xe_hw_engine_group_get_mode() - Get the group to execute in the new mode
+ * @group: The hw engine group
+ * @new_mode: The new execution mode
+ * @previous_mode: Pointer to the previous mode provided for use by caller
+ *
+ * Return: 0 if successful, -EINTR if locking failed.
+ */
+int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group,
+ enum xe_hw_engine_group_execution_mode new_mode,
+ enum xe_hw_engine_group_execution_mode *previous_mode)
+__acquires(&group->mode_sem)
+{
+ int err = down_read_interruptible(&group->mode_sem);
+
+ if (err)
+ return err;
+
+ *previous_mode = group->cur_mode;
+
+ if (new_mode != group->cur_mode) {
+ up_read(&group->mode_sem);
+ err = down_write_killable(&group->mode_sem);
+ if (err)
+ return err;
+
+ if (new_mode != group->cur_mode) {
+ err = switch_mode(group);
+ if (err) {
+ up_write(&group->mode_sem);
+ return err;
+ }
+ }
+ downgrade_write(&group->mode_sem);
+ }
+
+ return err;
+}
+
+/**
+ * xe_hw_engine_group_put() - Put the group
+ * @group: The hw engine group
+ */
+void xe_hw_engine_group_put(struct xe_hw_engine_group *group)
+__releases(&group->mode_sem)
+{
+ up_read(&group->mode_sem);
+}
+
+/**
+ * xe_hw_engine_group_find_exec_mode() - Find the execution mode for this exec queue
+ * @q: The exec_queue
+ */
+enum xe_hw_engine_group_execution_mode
+xe_hw_engine_group_find_exec_mode(struct xe_exec_queue *q)
+{
+ if (xe_vm_in_fault_mode(q->vm))
+ return EXEC_MODE_LR;
+ else
+ return EXEC_MODE_DMA_FENCE;
+}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.h b/drivers/gpu/drm/xe/xe_hw_engine_group.h
new file mode 100644
index 000000000000..797ee81acbf2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_HW_ENGINE_GROUP_H_
+#define _XE_HW_ENGINE_GROUP_H_
+
+#include "xe_hw_engine_group_types.h"
+
+struct drm_device;
+struct xe_exec_queue;
+struct xe_gt;
+
+int xe_hw_engine_setup_groups(struct xe_gt *gt);
+
+int xe_hw_engine_group_add_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q);
+void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q);
+
+int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group,
+ enum xe_hw_engine_group_execution_mode new_mode,
+ enum xe_hw_engine_group_execution_mode *previous_mode);
+void xe_hw_engine_group_put(struct xe_hw_engine_group *group);
+
+enum xe_hw_engine_group_execution_mode
+xe_hw_engine_group_find_exec_mode(struct xe_exec_queue *q);
+void xe_hw_engine_group_resume_faulting_lr_jobs(struct xe_hw_engine_group *group);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group_types.h b/drivers/gpu/drm/xe/xe_hw_engine_group_types.h
new file mode 100644
index 000000000000..92b6e0712c03
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group_types.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_HW_ENGINE_GROUP_TYPES_H_
+#define _XE_HW_ENGINE_GROUP_TYPES_H_
+
+#include "xe_force_wake_types.h"
+#include "xe_lrc_types.h"
+#include "xe_reg_sr_types.h"
+
+/**
+ * enum xe_hw_engine_group_execution_mode - possible execution modes of a hw
+ * engine group
+ *
+ * @EXEC_MODE_LR: execution in long-running mode
+ * @EXEC_MODE_DMA_FENCE: execution in dma fence mode
+ */
+enum xe_hw_engine_group_execution_mode {
+ EXEC_MODE_LR,
+ EXEC_MODE_DMA_FENCE,
+};
+
+/**
+ * struct xe_hw_engine_group - Hardware engine group
+ *
+ * hw engines belong to the same group if they share hardware resources in a way
+ * that prevents them from making progress when one is stuck on a page fault.
+ */
+struct xe_hw_engine_group {
+ /**
+ * @exec_queue_list: list of exec queues attached to this
+ * xe_hw_engine_group
+ */
+ struct list_head exec_queue_list;
+ /** @resume_work: worker to resume faulting LR exec queues */
+ struct work_struct resume_work;
+ /** @resume_wq: workqueue to resume faulting LR exec queues */
+ struct workqueue_struct *resume_wq;
+ /**
+ * @mode_sem: used to protect this group's hardware resources and ensure
+ * mutual exclusion between execution only in faulting LR mode and
+ * execution only in DMA_FENCE mode
+ */
+ struct rw_semaphore mode_sem;
+ /** @cur_mode: current execution mode of this hw engine group */
+ enum xe_hw_engine_group_execution_mode cur_mode;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
index 70e6434f150d..8be6d420ece4 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
@@ -136,8 +136,6 @@ struct xe_hw_engine {
enum xe_force_wake_domains domain;
/** @hwsp: hardware status page buffer object */
struct xe_bo *hwsp;
- /** @kernel_lrc: Kernel LRC (should be replaced /w an xe_engine) */
- struct xe_lrc *kernel_lrc;
/** @exl_port: execlists port */
struct xe_execlist_port *exl_port;
/** @fence_irq: fence IRQ to run when a hw engine IRQ is received */
@@ -150,6 +148,8 @@ struct xe_hw_engine {
struct xe_hw_engine_class_intf *eclass;
/** @oa_unit: oa unit for this hw engine */
struct xe_oa_unit *oa_unit;
+ /** @hw_engine_group: the group of hw engines this one belongs to */
+ struct xe_hw_engine_group *hw_engine_group;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 98e3ec08279e..aa11728e7e79 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -12,7 +12,6 @@
#include "regs/xe_mchbar_regs.h"
#include "regs/xe_pcode_regs.h"
#include "xe_device.h"
-#include "xe_gt.h"
#include "xe_hwmon.h"
#include "xe_mmio.h"
#include "xe_pcode.h"
@@ -65,8 +64,8 @@ struct xe_hwmon_energy_info {
struct xe_hwmon {
/** @hwmon_dev: hwmon device for xe */
struct device *hwmon_dev;
- /** @gt: primary gt */
- struct xe_gt *gt;
+ /** @xe: Xe device */
+ struct xe_device *xe;
/** @hwmon_lock: lock for rw attributes*/
struct mutex hwmon_lock;
/** @scl_shift_power: pkg power unit */
@@ -82,7 +81,7 @@ struct xe_hwmon {
static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
int channel)
{
- struct xe_device *xe = gt_to_xe(hwmon->gt);
+ struct xe_device *xe = hwmon->xe;
switch (hwmon_reg) {
case REG_PKG_RAPL_LIMIT:
@@ -148,8 +147,9 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value)
{
u64 reg_val, min, max;
- struct xe_device *xe = gt_to_xe(hwmon->gt);
+ struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
+ struct xe_gt *mmio = xe_root_mmio_gt(xe);
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
@@ -166,7 +166,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v
mutex_lock(&hwmon->hwmon_lock);
- reg_val = xe_mmio_read32(hwmon->gt, rapl_limit);
+ reg_val = xe_mmio_read32(mmio, rapl_limit);
/* Check if PL1 limit is disabled */
if (!(reg_val & PKG_PWR_LIM_1_EN)) {
*value = PL1_DISABLE;
@@ -176,7 +176,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v
reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
- reg_val = xe_mmio_read64_2x32(hwmon->gt, pkg_power_sku);
+ reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
@@ -190,6 +190,7 @@ unlock:
static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
int ret = 0;
u64 reg_val;
struct xe_reg rapl_limit;
@@ -200,10 +201,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va
/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
if (value == PL1_DISABLE) {
- reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN, 0);
- reg_val = xe_mmio_read32(hwmon->gt, rapl_limit);
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN, 0);
+ reg_val = xe_mmio_read32(mmio, rapl_limit);
if (reg_val & PKG_PWR_LIM_1_EN) {
- drm_warn(&gt_to_xe(hwmon->gt)->drm, "PL1 disable is not supported!\n");
+ drm_warn(&hwmon->xe->drm, "PL1 disable is not supported!\n");
ret = -EOPNOTSUPP;
}
goto unlock;
@@ -212,7 +213,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va
/* Computation in 64-bits to avoid overflow. Round to nearest. */
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
- reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
unlock:
mutex_unlock(&hwmon->hwmon_lock);
@@ -221,6 +222,7 @@ unlock:
static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
u64 reg_val;
@@ -229,7 +231,7 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l
* for this register can be skipped.
* See xe_hwmon_power_is_visible.
*/
- reg_val = xe_mmio_read32(hwmon->gt, reg);
+ reg_val = xe_mmio_read32(mmio, reg);
reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
}
@@ -257,11 +259,12 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l
static void
xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
u64 reg_val;
- reg_val = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
- channel));
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
+ channel));
if (reg_val >= ei->reg_val_prev)
ei->accum_energy += reg_val - ei->reg_val_prev;
@@ -279,19 +282,20 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
char *buf)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
int sensor_index = to_sensor_dev_attr(attr)->index;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
- r = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
+ r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
mutex_unlock(&hwmon->hwmon_lock);
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
@@ -319,6 +323,7 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
const char *buf, size_t count)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
u32 x, y, rxy, x_w = 2; /* 2 bits */
u64 tau4, r, max_win;
unsigned long val;
@@ -371,16 +376,16 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
- r = xe_mmio_rmw32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
+ r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
PKG_PWR_LIM_1_TIME, rxy);
mutex_unlock(&hwmon->hwmon_lock);
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return count;
}
@@ -406,11 +411,11 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret = 0;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0;
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return ret;
}
@@ -435,20 +440,24 @@ static const struct hwmon_channel_info * const hwmon_info[] = {
};
/* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
-static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
+static int xe_hwmon_pcode_read_i1(const struct xe_hwmon *hwmon, u32 *uval)
{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+
/* Avoid Illegal Subcommand error */
- if (gt_to_xe(gt)->info.platform == XE_DG2)
+ if (hwmon->xe->info.platform == XE_DG2)
return -ENXIO;
- return xe_pcode_read(gt_to_tile(gt), PCODE_MBOX(PCODE_POWER_SETUP,
+ return xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_READ_I1, 0),
uval, NULL);
}
-static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
+static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval)
{
- return xe_pcode_write(gt_to_tile(gt), PCODE_MBOX(PCODE_POWER_SETUP,
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+
+ return xe_pcode_write(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
(uval & POWER_SETUP_I1_DATA_MASK));
}
@@ -461,7 +470,7 @@ static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
mutex_lock(&hwmon->hwmon_lock);
- ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
+ ret = xe_hwmon_pcode_read_i1(hwmon, &uval);
if (ret)
goto unlock;
@@ -481,7 +490,7 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
mutex_lock(&hwmon->hwmon_lock);
uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
- ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
+ ret = xe_hwmon_pcode_write_i1(hwmon, uval);
mutex_unlock(&hwmon->hwmon_lock);
return ret;
@@ -489,9 +498,10 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
u64 reg_val;
- reg_val = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel));
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel));
/* HW register value in units of 2.5 millivolt */
*value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
}
@@ -510,7 +520,7 @@ xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
channel)) ? 0444 : 0;
case hwmon_power_crit:
if (channel == CHANNEL_PKG)
- return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
!(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
break;
case hwmon_power_label:
@@ -563,10 +573,10 @@ xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
switch (attr) {
case hwmon_curr_crit:
- return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
case hwmon_curr_label:
- return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0444;
break;
default:
@@ -654,7 +664,7 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
int ret;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
switch (type) {
case hwmon_power:
@@ -674,7 +684,7 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
break;
}
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return ret;
}
@@ -686,7 +696,7 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
switch (type) {
case hwmon_power:
@@ -706,7 +716,7 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
}
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return ret;
}
@@ -718,7 +728,7 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
switch (type) {
case hwmon_power:
@@ -732,7 +742,7 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
}
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return ret;
}
@@ -771,6 +781,7 @@ static const struct hwmon_chip_info hwmon_chip_info = {
static void
xe_hwmon_get_preregistration_info(struct xe_device *xe)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(xe);
struct xe_hwmon *hwmon = xe->hwmon;
long energy;
u64 val_sku_unit = 0;
@@ -783,7 +794,7 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
*/
pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0);
if (xe_reg_is_valid(pkg_power_sku_unit)) {
- val_sku_unit = xe_mmio_read32(hwmon->gt, pkg_power_sku_unit);
+ val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit);
hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
@@ -828,8 +839,8 @@ void xe_hwmon_register(struct xe_device *xe)
if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon))
return;
- /* primary GT to access device level properties */
- hwmon->gt = xe->tiles[0].primary_gt;
+ /* There's only one instance of hwmon per device */
+ hwmon->xe = xe;
xe_hwmon_get_preregistration_info(xe);
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 85733f993d09..5f2c368c35ad 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -459,6 +459,8 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
* the primary tile.
*/
if (id == 0) {
+ if (HAS_HECI_CSCFI(xe))
+ xe_heci_csc_irq_handler(xe, master_ctl);
xe_display_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
}
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 418661a88918..8999ac511555 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -7,7 +7,7 @@
#include <drm/drm_managed.h>
-#include "regs/xe_sriov_regs.h"
+#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_bo.h"
@@ -71,7 +71,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
lmtt->ops->lmtt_pte_num(level)),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
- XE_BO_NEEDS_64K | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_PINNED);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 58121821f081..aec7db39c061 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -5,6 +5,8 @@
#include "xe_lrc.h"
+#include <generated/xe_wa_oob.h>
+
#include <linux/ascii85.h>
#include "instructions/xe_mi_commands.h"
@@ -24,6 +26,7 @@
#include "xe_memirq.h"
#include "xe_sriov.h"
#include "xe_vm.h"
+#include "xe_wa.h"
#define LRC_VALID BIT_ULL(0)
#define LRC_PRIVILEGE BIT_ULL(8)
@@ -1581,19 +1584,31 @@ void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *b
int state_table_size = 0;
/*
- * At the moment we only need to emit non-register state for the RCS
- * engine.
+ * Wa_14019789679
+ *
+ * If the driver doesn't explicitly emit the SVG instructions while
+ * setting up the default LRC, the context switch will write 0's
+ * (noops) into the LRC memory rather than the expected instruction
+ * headers. Application contexts start out as a copy of the default
+ * LRC, and if they also do not emit specific settings for some SVG
+ * state, then on context restore they'll unintentionally inherit
+ * whatever state setting the previous context had programmed into the
+ * hardware (i.e., the lack of a 3DSTATE_* instruction in the LRC will
+ * prevent the hardware from resetting that state back to any specific
+ * value).
+ *
+ * The official workaround only requires emitting 3DSTATE_MESH_CONTROL
+ * since that's a specific state setting that can easily cause GPU
+ * hangs if unintentionally inherited. However to be safe we'll
+ * continue to emit all of the SVG state since it's best not to leak
+ * any of the state between contexts, even if that leakage is harmless.
*/
- if (q->hwe->class != XE_ENGINE_CLASS_RENDER)
- return;
-
- switch (GRAPHICS_VERx100(xe)) {
- case 1255:
- case 1270 ... 2004:
+ if (XE_WA(gt, 14019789679) && q->hwe->class == XE_ENGINE_CLASS_RENDER) {
state_table = xe_hpg_svg_state;
state_table_size = ARRAY_SIZE(xe_hpg_svg_state);
- break;
- default:
+ }
+
+ if (!state_table) {
xe_gt_dbg(gt, "No non-register state to emit on graphics ver %d.%02d\n",
GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100);
return;
@@ -1634,7 +1649,7 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
if (!snapshot)
return NULL;
- if (lrc->bo && lrc->bo->vm)
+ if (lrc->bo->vm)
xe_vm_get(lrc->bo->vm);
snapshot->context_desc = xe_lrc_ggtt_addr(lrc);
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index c9f5673353ee..cfd31ae49cc1 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -10,7 +10,7 @@
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_tt.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>
@@ -73,6 +73,7 @@ struct xe_migrate {
#define NUM_PT_SLOTS 32
#define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
#define MAX_NUM_PTE 512
+#define IDENTITY_OFFSET 256ULL
/*
* Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
@@ -84,15 +85,14 @@ struct xe_migrate {
#define MAX_PTE_PER_SDI 0x1FE
/**
- * xe_tile_migrate_engine() - Get this tile's migrate engine.
+ * xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue.
* @tile: The tile.
*
- * Returns the default migrate engine of this tile.
- * TODO: Perhaps this function is slightly misplaced, and even unneeded?
+ * Returns the default migrate exec queue of this tile.
*
- * Return: The default migrate engine
+ * Return: The default migrate exec queue
*/
-struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
+struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
{
return tile->migrate->q;
}
@@ -121,14 +121,64 @@ static u64 xe_migrate_vm_addr(u64 slot, u32 level)
return (slot + 1ULL) << xe_pt_shift(level + 1);
}
-static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
+static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
{
/*
* Remove the DPA to get a correct offset into identity table for the
* migrate offset
*/
+ u64 identity_offset = IDENTITY_OFFSET;
+
+ if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
+ identity_offset += DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+
addr -= xe->mem.vram.dpa_base;
- return addr + (256ULL << xe_pt_shift(2));
+ return addr + (identity_offset << xe_pt_shift(2));
+}
+
+static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
+ u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
+{
+ u64 pos, ofs, flags;
+ u64 entry;
+ /* XXX: Unclear if this should be usable_size? */
+ u64 vram_limit = xe->mem.vram.actual_physical_size +
+ xe->mem.vram.dpa_base;
+ u32 level = 2;
+
+ ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
+ flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
+ true, 0);
+
+ xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
+
+ /*
+ * Use 1GB pages when possible, last chunk always use 2M
+ * pages as mixing reserved memory (stolen, WOCPM) with a single
+ * mapping is not allowed on certain platforms.
+ */
+ for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
+ pos += SZ_1G, ofs += 8) {
+ if (pos + SZ_1G >= vram_limit) {
+ entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs,
+ pat_index);
+ xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
+
+ flags = vm->pt_ops->pte_encode_addr(xe, 0,
+ pat_index,
+ level - 1,
+ true, 0);
+
+ for (ofs = pt_2m_ofs; pos < vram_limit;
+ pos += SZ_2M, ofs += 8)
+ xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+ break; /* Ensure pos == vram_limit assert correct */
+ }
+
+ xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+ }
+
+ xe_assert(xe, pos == vram_limit);
}
static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
@@ -137,11 +187,13 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
struct xe_device *xe = tile_to_xe(tile);
u16 pat_index = xe->pat.idx[XE_CACHE_WB];
u8 id = tile->id;
- u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level,
- num_setup = num_level + 1;
+ u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
+#define VRAM_IDENTITY_MAP_COUNT 2
+ u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT;
+#undef VRAM_IDENTITY_MAP_COUNT
u32 map_ofs, level, i;
struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
- u64 entry, pt30_ofs;
+ u64 entry, pt29_ofs;
/* Can't bump NUM_PT_SLOTS too high */
BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
@@ -161,9 +213,9 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (IS_ERR(bo))
return PTR_ERR(bo);
- /* PT31 reserved for 2M identity map */
- pt30_ofs = bo->size - 2 * XE_PAGE_SIZE;
- entry = vm->pt_ops->pde_encode_bo(bo, pt30_ofs, pat_index);
+ /* PT30 & PT31 reserved for 2M identity map */
+ pt29_ofs = bo->size - 3 * XE_PAGE_SIZE;
+ entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
@@ -215,12 +267,12 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
} else {
u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
- m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
+ m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
if (xe->info.has_usm) {
batch = tile->primary_gt->usm.bb_pool->bo;
batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
- m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
+ m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
}
}
@@ -254,55 +306,36 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Identity map the entire vram at 256GiB offset */
if (IS_DGFX(xe)) {
- u64 pos, ofs, flags;
- /* XXX: Unclear if this should be usable_size? */
- u64 vram_limit = xe->mem.vram.actual_physical_size +
- xe->mem.vram.dpa_base;
-
- level = 2;
- ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
- flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
- true, 0);
+ u64 pt30_ofs = bo->size - 2 * XE_PAGE_SIZE;
- xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
+ xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
+ pat_index, pt30_ofs);
+ xe_assert(xe, xe->mem.vram.actual_physical_size <=
+ (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
/*
- * Use 1GB pages when possible, last chunk always use 2M
- * pages as mixing reserved memory (stolen, WOCPM) with a single
- * mapping is not allowed on certain platforms.
+ * Identity map the entire vram for compressed pat_index for xe2+
+ * if flat ccs is enabled.
*/
- for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
- pos += SZ_1G, ofs += 8) {
- if (pos + SZ_1G >= vram_limit) {
- u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
-
- entry = vm->pt_ops->pde_encode_bo(bo, pt31_ofs,
- pat_index);
- xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
-
- flags = vm->pt_ops->pte_encode_addr(xe, 0,
- pat_index,
- level - 1,
- true, 0);
-
- for (ofs = pt31_ofs; pos < vram_limit;
- pos += SZ_2M, ofs += 8)
- xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
- break; /* Ensure pos == vram_limit assert correct */
- }
-
- xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+ if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
+ u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
+ u64 vram_offset = IDENTITY_OFFSET +
+ DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+ u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
+
+ xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE -
+ IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G);
+ xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
+ comp_pat_index, pt31_ofs);
}
-
- xe_assert(xe, pos == vram_limit);
}
/*
* Example layout created above, with root level = 3:
* [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
* [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
- * [PT9...PT27]: Userspace PT's for VM_BIND, 4 KiB PTE's
- * [PT28 = PDE 0] [PT29 = PDE 1] [PT30 = PDE 2] [PT31 = 2M vram identity map]
+ * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's
+ * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map]
*
* This makes the lowest part of the VM point to the pagetables.
* Hence the lowest 2M in the vm should point to itself, with a few writes
@@ -348,6 +381,11 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
return logical_mask;
}
+static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
+{
+ return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe));
+}
+
/**
* xe_migrate_init() - Initialize a migrate context
* @tile: Back-pointer to the tile we're initializing for.
@@ -404,7 +442,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
XE_ENGINE_CLASS_COPY,
EXEC_QUEUE_FLAG_KERNEL |
- EXEC_QUEUE_FLAG_PERMANENT);
+ EXEC_QUEUE_FLAG_PERMANENT, 0);
}
if (IS_ERR(m->q)) {
xe_vm_close_and_put(vm);
@@ -421,7 +459,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
return ERR_PTR(err);
if (IS_DGFX(xe)) {
- if (xe_device_has_flat_ccs(xe))
+ if (xe_migrate_needs_ccs_emit(xe))
/* min chunk size corresponds to 4K of CCS Metadata */
m->min_chunk_size = SZ_4K * SZ_64K /
xe_device_ccs_bytes(xe, SZ_64K);
@@ -475,20 +513,26 @@ static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
return cur->size >= size;
}
+#define PTE_UPDATE_FLAG_IS_VRAM BIT(0)
+#define PTE_UPDATE_FLAG_IS_COMP_PTE BIT(1)
+
static u32 pte_update_size(struct xe_migrate *m,
- bool is_vram,
+ u32 flags,
struct ttm_resource *res,
struct xe_res_cursor *cur,
u64 *L0, u64 *L0_ofs, u32 *L0_pt,
u32 cmd_size, u32 pt_ofs, u32 avail_pts)
{
u32 cmds = 0;
+ bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags;
+ bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags;
*L0_pt = pt_ofs;
if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
/* Offset into identity map. */
*L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
- cur->start + vram_region_gpu_offset(res));
+ cur->start + vram_region_gpu_offset(res),
+ is_comp_pte);
cmds += cmd_size;
} else {
/* Clip L0 to available size */
@@ -661,7 +705,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
struct xe_gt *gt = m->tile->primary_gt;
u32 flush_flags = 0;
- if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_indirect) {
+ if (!copy_ccs && dst_is_indirect) {
/*
* If the src is already in vram, then it should already
* have been cleared by us, or has been populated by the
@@ -737,6 +781,8 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
bool copy_ccs = xe_device_has_flat_ccs(xe) &&
xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
+ bool use_comp_pat = xe_device_has_flat_ccs(xe) &&
+ GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
/* Copying CCS between two different BOs is not supported yet. */
if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
@@ -763,10 +809,11 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
struct xe_sched_job *job;
struct xe_bb *bb;
- u32 flush_flags;
+ u32 flush_flags = 0;
u32 update_idx;
u64 ccs_ofs, ccs_size;
u32 ccs_pt;
+ u32 pte_flags;
bool usm = xe->info.has_usm;
u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
@@ -779,17 +826,20 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
src_L0 = min(src_L0, dst_L0);
- batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0,
+ pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
+ pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
+ batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
&src_L0_ofs, &src_L0_pt, 0, 0,
avail_pts);
- batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0,
+ pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
+ batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
&dst_L0_ofs, &dst_L0_pt, 0,
avail_pts, avail_pts);
if (copy_system_ccs) {
ccs_size = xe_device_ccs_bytes(xe, src_L0);
- batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size,
+ batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
&ccs_ofs, &ccs_pt, 0,
2 * avail_pts,
avail_pts);
@@ -798,7 +848,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
/* Add copy commands size here */
batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
- ((xe_device_has_flat_ccs(xe) ? EMIT_COPY_CCS_DW : 0));
+ ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0));
bb = xe_bb_new(gt, batch_size, usm);
if (IS_ERR(bb)) {
@@ -827,11 +877,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (!copy_only_ccs)
emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
- flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
- IS_DGFX(xe) ? src_is_vram : src_is_pltt,
- dst_L0_ofs,
- IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
- src_L0, ccs_ofs, copy_ccs);
+ if (xe_migrate_needs_ccs_emit(xe))
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
+ IS_DGFX(xe) ? src_is_vram : src_is_pltt,
+ dst_L0_ofs,
+ IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
+ src_L0, ccs_ofs, copy_ccs);
job = xe_bb_create_migration_job(m->q, bb,
xe_migrate_batch_base(m, usm),
@@ -986,9 +1037,11 @@ static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
* @m: The migration context.
* @bo: The buffer object @dst is currently bound to.
* @dst: The dst TTM resource to be cleared.
+ * @clear_flags: flags to specify which data to clear: CCS, BO, or both.
*
- * Clear the contents of @dst to zero. On flat CCS devices,
- * the CCS metadata is cleared to zero as well on VRAM destinations.
+ * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set.
+ * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA.
+ * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata.
* TODO: Eliminate the @bo argument.
*
* Return: Pointer to a dma_fence representing the last clear batch, or
@@ -997,18 +1050,27 @@ static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
*/
struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_bo *bo,
- struct ttm_resource *dst)
+ struct ttm_resource *dst,
+ u32 clear_flags)
{
bool clear_vram = mem_type_is_vram(dst->mem_type);
+ bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags;
+ bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags;
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
- bool clear_system_ccs = (xe_bo_needs_ccs_pages(bo) && !IS_DGFX(xe)) ? true : false;
+ bool clear_only_system_ccs = false;
struct dma_fence *fence = NULL;
u64 size = bo->size;
struct xe_res_cursor src_it;
struct ttm_resource *src = dst;
int err;
+ if (WARN_ON(!clear_bo_data && !clear_ccs))
+ return NULL;
+
+ if (!clear_bo_data && clear_ccs && !IS_DGFX(xe))
+ clear_only_system_ccs = true;
+
if (!clear_vram)
xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
else
@@ -1022,6 +1084,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_sched_job *job;
struct xe_bb *bb;
u32 batch_size, update_idx;
+ u32 pte_flags;
bool usm = xe->info.has_usm;
u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
@@ -1029,13 +1092,14 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
clear_L0 = xe_migrate_res_sizes(m, &src_it);
/* Calculate final sizes and batch size.. */
+ pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
batch_size = 2 +
- pte_update_size(m, clear_vram, src, &src_it,
+ pte_update_size(m, pte_flags, src, &src_it,
&clear_L0, &clear_L0_ofs, &clear_L0_pt,
- clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0,
+ clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0,
avail_pts);
- if (xe_device_has_flat_ccs(xe))
+ if (xe_migrate_needs_ccs_emit(xe))
batch_size += EMIT_COPY_CCS_DW;
/* Clear commands */
@@ -1054,16 +1118,16 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
xe_res_next(&src_it, clear_L0);
else
- emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs,
+ emit_pte(m, bb, clear_L0_pt, clear_vram, clear_only_system_ccs,
&src_it, clear_L0, dst);
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- if (!clear_system_ccs)
+ if (clear_bo_data)
emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
- if (xe_device_has_flat_ccs(xe)) {
+ if (xe_migrate_needs_ccs_emit(xe)) {
emit_copy_ccs(gt, bb, clear_L0_ofs, true,
m->cleared_mem_ofs, false, clear_L0);
flush_flags = MI_FLUSH_DW_CCS;
@@ -1119,13 +1183,14 @@ err_sync:
return ERR_PTR(err);
}
- if (clear_system_ccs)
+ if (clear_ccs)
bo->ccs_cleared = true;
return fence;
}
static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
+ const struct xe_vm_pgtable_update_op *pt_op,
const struct xe_vm_pgtable_update *update,
struct xe_migrate_pt_update *pt_update)
{
@@ -1146,7 +1211,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
if (!ppgtt_ofs)
ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
xe_bo_addr(update->pt_bo, 0,
- XE_PAGE_SIZE));
+ XE_PAGE_SIZE), false);
do {
u64 addr = ppgtt_ofs + ofs * 8;
@@ -1160,8 +1225,12 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
- ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
- update);
+ if (pt_op->bind)
+ ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
+ ofs, chunk, update);
+ else
+ ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
+ ofs, chunk, update);
bb->len += chunk * 2;
ofs += chunk;
@@ -1186,114 +1255,58 @@ struct migrate_test_params {
static struct dma_fence *
xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
- struct xe_vm *vm, struct xe_bo *bo,
- const struct xe_vm_pgtable_update *updates,
- u32 num_updates, bool wait_vm,
struct xe_migrate_pt_update *pt_update)
{
XE_TEST_DECLARE(struct migrate_test_params *test =
to_migrate_test_params
(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
- struct dma_fence *fence;
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &pt_update->vops->pt_update_ops[pt_update->tile_id];
int err;
- u32 i;
+ u32 i, j;
if (XE_TEST_ONLY(test && test->force_gpu))
return ERR_PTR(-ETIME);
- if (bo && !dma_resv_test_signaled(bo->ttm.base.resv,
- DMA_RESV_USAGE_KERNEL))
- return ERR_PTR(-ETIME);
-
- if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP))
- return ERR_PTR(-ETIME);
-
if (ops->pre_commit) {
pt_update->job = NULL;
err = ops->pre_commit(pt_update);
if (err)
return ERR_PTR(err);
}
- for (i = 0; i < num_updates; i++) {
- const struct xe_vm_pgtable_update *update = &updates[i];
-
- ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
- update->ofs, update->qwords, update);
- }
- if (vm) {
- trace_xe_vm_cpu_bind(vm);
- xe_device_wmb(vm->xe);
- }
-
- fence = dma_fence_get_stub();
-
- return fence;
-}
-
-static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs)
-{
- struct dma_fence *fence;
- int i;
-
- for (i = 0; i < num_syncs; i++) {
- fence = syncs[i].fence;
-
- if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &fence->flags))
- return false;
- }
- if (q) {
- fence = xe_exec_queue_last_fence_get(q, vm);
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- dma_fence_put(fence);
- return false;
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ const struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+
+ for (j = 0; j < pt_op->num_entries; j++) {
+ const struct xe_vm_pgtable_update *update =
+ &pt_op->entries[j];
+
+ if (pt_op->bind)
+ ops->populate(pt_update, m->tile,
+ &update->pt_bo->vmap, NULL,
+ update->ofs, update->qwords,
+ update);
+ else
+ ops->clear(pt_update, m->tile,
+ &update->pt_bo->vmap, NULL,
+ update->ofs, update->qwords, update);
}
- dma_fence_put(fence);
}
- return true;
+ trace_xe_vm_cpu_bind(vm);
+ xe_device_wmb(vm->xe);
+
+ return dma_fence_get_stub();
}
-/**
- * xe_migrate_update_pgtables() - Pipelined page-table update
- * @m: The migrate context.
- * @vm: The vm we'll be updating.
- * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
- * @q: The exec queue to be used for the update or NULL if the default
- * migration engine is to be used.
- * @updates: An array of update descriptors.
- * @num_updates: Number of descriptors in @updates.
- * @syncs: Array of xe_sync_entry to await before updating. Note that waits
- * will block the engine timeline.
- * @num_syncs: Number of entries in @syncs.
- * @pt_update: Pointer to a struct xe_migrate_pt_update, which contains
- * pointers to callback functions and, if subclassed, private arguments to
- * those.
- *
- * Perform a pipelined page-table update. The update descriptors are typically
- * built under the same lock critical section as a call to this function. If
- * using the default engine for the updates, they will be performed in the
- * order they grab the job_mutex. If different engines are used, external
- * synchronization is needed for overlapping updates to maintain page-table
- * consistency. Note that the meaing of "overlapping" is that the updates
- * touch the same page-table, which might be a higher-level page-directory.
- * If no pipelining is needed, then updates may be performed by the cpu.
- *
- * Return: A dma_fence that, when signaled, indicates the update completion.
- */
-struct dma_fence *
-xe_migrate_update_pgtables(struct xe_migrate *m,
- struct xe_vm *vm,
- struct xe_bo *bo,
- struct xe_exec_queue *q,
- const struct xe_vm_pgtable_update *updates,
- u32 num_updates,
- struct xe_sync_entry *syncs, u32 num_syncs,
- struct xe_migrate_pt_update *pt_update)
+static struct dma_fence *
+__xe_migrate_update_pgtables(struct xe_migrate *m,
+ struct xe_migrate_pt_update *pt_update,
+ struct xe_vm_pgtable_update_ops *pt_update_ops)
{
const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
struct xe_tile *tile = m->tile;
@@ -1302,59 +1315,53 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
struct xe_sched_job *job;
struct dma_fence *fence;
struct drm_suballoc *sa_bo = NULL;
- struct xe_vma *vma = pt_update->vma;
struct xe_bb *bb;
- u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
+ u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
+ u32 num_updates = 0, current_update = 0;
u64 addr;
int err = 0;
- bool usm = !q && xe->info.has_usm;
- bool first_munmap_rebind = vma &&
- vma->gpuva.flags & XE_VMA_FIRST_REBIND;
- struct xe_exec_queue *q_override = !q ? m->q : q;
- u16 pat_index = xe->pat.idx[XE_CACHE_WB];
+ bool is_migrate = pt_update_ops->q == m->q;
+ bool usm = is_migrate && xe->info.has_usm;
+
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
- /* Use the CPU if no in syncs and engine is idle */
- if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
- fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
- num_updates,
- first_munmap_rebind,
- pt_update);
- if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN))
- return fence;
+ num_updates += pt_op->num_entries;
+ for (j = 0; j < pt_op->num_entries; ++j) {
+ u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
+ MAX_PTE_PER_SDI);
+
+ /* align noop + MI_STORE_DATA_IMM cmd prefix */
+ batch_size += 4 * num_cmds + updates[j].qwords * 2;
+ }
}
/* fixed + PTE entries */
if (IS_DGFX(xe))
- batch_size = 2;
+ batch_size += 2;
else
- batch_size = 6 + num_updates * 2;
+ batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
+ num_updates * 2;
- for (i = 0; i < num_updates; i++) {
- u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI);
-
- /* align noop + MI_STORE_DATA_IMM cmd prefix */
- batch_size += 4 * num_cmds + updates[i].qwords * 2;
- }
-
- /*
- * XXX: Create temp bo to copy from, if batch_size becomes too big?
- *
- * Worst case: Sum(2 * (each lower level page size) + (top level page size))
- * Should be reasonably bound..
- */
- xe_tile_assert(tile, batch_size < SZ_128K);
-
- bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
+ bb = xe_bb_new(gt, batch_size, usm);
if (IS_ERR(bb))
return ERR_CAST(bb);
/* For sysmem PTE's, need to map them in our hole.. */
if (!IS_DGFX(xe)) {
+ u32 ptes, ofs;
+
ppgtt_ofs = NUM_KERNEL_PDE - 1;
- if (q) {
- xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT);
+ if (!is_migrate) {
+ u32 num_units = DIV_ROUND_UP(num_updates,
+ NUM_VMUSA_WRITES_PER_UNIT);
- sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
+ if (num_units > m->vm_update_sa.size) {
+ err = -ENOBUFS;
+ goto err_bb;
+ }
+ sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
GFP_KERNEL, true, 0);
if (IS_ERR(sa_bo)) {
err = PTR_ERR(sa_bo);
@@ -1370,18 +1377,49 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
}
/* Map our PT's to gtt */
- bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
- bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
- bb->cs[bb->len++] = 0; /* upper_32_bits */
-
- for (i = 0; i < num_updates; i++) {
- struct xe_bo *pt_bo = updates[i].pt_bo;
+ i = 0;
+ j = 0;
+ ptes = num_updates;
+ ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
+ while (ptes) {
+ u32 chunk = min(MAX_PTE_PER_SDI, ptes);
+ u32 idx = 0;
+
+ bb->cs[bb->len++] = MI_STORE_DATA_IMM |
+ MI_SDI_NUM_QW(chunk);
+ bb->cs[bb->len++] = ofs;
+ bb->cs[bb->len++] = 0; /* upper_32_bits */
+
+ for (; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+ for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_bo *pt_bo = updates[j].pt_bo;
+
+ if (idx == chunk)
+ goto next_cmd;
+
+ xe_tile_assert(tile, pt_bo->size == SZ_4K);
+
+ /* Map a PT at most once */
+ if (pt_bo->update_index < 0)
+ pt_bo->update_index = current_update;
+
+ addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
+ XE_CACHE_WB, 0);
+ bb->cs[bb->len++] = lower_32_bits(addr);
+ bb->cs[bb->len++] = upper_32_bits(addr);
+ }
- xe_tile_assert(tile, pt_bo->size == SZ_4K);
+ j = 0;
+ }
- addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
- bb->cs[bb->len++] = lower_32_bits(addr);
- bb->cs[bb->len++] = upper_32_bits(addr);
+next_cmd:
+ ptes -= chunk;
+ ofs += chunk * sizeof(u64);
}
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
@@ -1389,19 +1427,36 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
- for (i = 0; i < num_updates; i++)
- write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
- &updates[i], pt_update);
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+ for (j = 0; j < pt_op->num_entries; ++j) {
+ struct xe_bo *pt_bo = updates[j].pt_bo;
+
+ write_pgtable(tile, bb, addr +
+ pt_bo->update_index * XE_PAGE_SIZE,
+ pt_op, &updates[j], pt_update);
+ }
+ }
} else {
/* phys pages, no preamble required */
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- for (i = 0; i < num_updates; i++)
- write_pgtable(tile, bb, 0, &updates[i], pt_update);
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+ for (j = 0; j < pt_op->num_entries; ++j)
+ write_pgtable(tile, bb, 0, pt_op, &updates[j],
+ pt_update);
+ }
}
- job = xe_bb_create_migration_job(q ?: m->q, bb,
+ job = xe_bb_create_migration_job(pt_update_ops->q, bb,
xe_migrate_batch_base(m, usm),
update_idx);
if (IS_ERR(job)) {
@@ -1409,46 +1464,20 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
goto err_sa;
}
- /* Wait on BO move */
- if (bo) {
- err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
- DMA_RESV_USAGE_KERNEL);
- if (err)
- goto err_job;
- }
-
- /*
- * Munmap style VM unbind, need to wait for all jobs to be complete /
- * trigger preempts before moving forward
- */
- if (first_munmap_rebind) {
- err = xe_sched_job_add_deps(job, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- if (err)
- goto err_job;
- }
-
- err = xe_sched_job_last_fence_add_dep(job, vm);
- for (i = 0; !err && i < num_syncs; i++)
- err = xe_sync_entry_add_deps(&syncs[i], job);
-
- if (err)
- goto err_job;
-
if (ops->pre_commit) {
pt_update->job = job;
err = ops->pre_commit(pt_update);
if (err)
goto err_job;
}
- if (!q)
+ if (is_migrate)
mutex_lock(&m->job_mutex);
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
- if (!q)
+ if (is_migrate)
mutex_unlock(&m->job_mutex);
xe_bb_free(bb, fence);
@@ -1466,6 +1495,40 @@ err_bb:
}
/**
+ * xe_migrate_update_pgtables() - Pipelined page-table update
+ * @m: The migrate context.
+ * @pt_update: PT update arguments
+ *
+ * Perform a pipelined page-table update. The update descriptors are typically
+ * built under the same lock critical section as a call to this function. If
+ * using the default engine for the updates, they will be performed in the
+ * order they grab the job_mutex. If different engines are used, external
+ * synchronization is needed for overlapping updates to maintain page-table
+ * consistency. Note that the meaing of "overlapping" is that the updates
+ * touch the same page-table, which might be a higher-level page-directory.
+ * If no pipelining is needed, then updates may be performed by the cpu.
+ *
+ * Return: A dma_fence that, when signaled, indicates the update completion.
+ */
+struct dma_fence *
+xe_migrate_update_pgtables(struct xe_migrate *m,
+ struct xe_migrate_pt_update *pt_update)
+
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &pt_update->vops->pt_update_ops[pt_update->tile_id];
+ struct dma_fence *fence;
+
+ fence = xe_migrate_update_pgtables_cpu(m, pt_update);
+
+ /* -ETIME indicates a job is needed, anything else is legit error */
+ if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
+ return fence;
+
+ return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
+}
+
+/**
* xe_migrate_wait() - Complete all operations using the xe_migrate context
* @m: Migrate context to wait for.
*
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 951f19318ea4..0109866e398a 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -6,7 +6,7 @@
#ifndef _XE_MIGRATE_
#define _XE_MIGRATE_
-#include <drm/drm_mm.h>
+#include <linux/types.h>
struct dma_fence;
struct iosys_map;
@@ -47,6 +47,24 @@ struct xe_migrate_pt_update_ops {
struct xe_tile *tile, struct iosys_map *map,
void *pos, u32 ofs, u32 num_qwords,
const struct xe_vm_pgtable_update *update);
+ /**
+ * @clear: Clear a command buffer or page-table with ptes.
+ * @pt_update: Embeddable callback argument.
+ * @tile: The tile for the current operation.
+ * @map: struct iosys_map into the memory to be populated.
+ * @pos: If @map is NULL, map into the memory to be populated.
+ * @ofs: qword offset into @map, unused if @map is NULL.
+ * @num_qwords: Number of qwords to write.
+ * @update: Information about the PTEs to be inserted.
+ *
+ * This interface is intended to be used as a callback into the
+ * page-table system to populate command buffers or shared
+ * page-tables with PTEs.
+ */
+ void (*clear)(struct xe_migrate_pt_update *pt_update,
+ struct xe_tile *tile, struct iosys_map *map,
+ void *pos, u32 ofs, u32 num_qwords,
+ const struct xe_vm_pgtable_update *update);
/**
* @pre_commit: Callback to be called just before arming the
@@ -67,14 +85,10 @@ struct xe_migrate_pt_update_ops {
struct xe_migrate_pt_update {
/** @ops: Pointer to the struct xe_migrate_pt_update_ops callbacks */
const struct xe_migrate_pt_update_ops *ops;
- /** @vma: The vma we're updating the pagetable for. */
- struct xe_vma *vma;
+ /** @vops: VMA operations */
+ struct xe_vma_ops *vops;
/** @job: The job if a GPU page-table update. NULL otherwise */
struct xe_sched_job *job;
- /** @start: Start of update for the range fence */
- u64 start;
- /** @last: Last of update for the range fence */
- u64 last;
/** @tile_id: Tile ID of the update */
u8 tile_id;
};
@@ -88,23 +102,22 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct ttm_resource *dst,
bool copy_only_ccs);
+#define XE_MIGRATE_CLEAR_FLAG_BO_DATA BIT(0)
+#define XE_MIGRATE_CLEAR_FLAG_CCS_DATA BIT(1)
+#define XE_MIGRATE_CLEAR_FLAG_FULL (XE_MIGRATE_CLEAR_FLAG_BO_DATA | \
+ XE_MIGRATE_CLEAR_FLAG_CCS_DATA)
struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_bo *bo,
- struct ttm_resource *dst);
+ struct ttm_resource *dst,
+ u32 clear_flags);
struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m);
struct dma_fence *
xe_migrate_update_pgtables(struct xe_migrate *m,
- struct xe_vm *vm,
- struct xe_bo *bo,
- struct xe_exec_queue *q,
- const struct xe_vm_pgtable_update *updates,
- u32 num_updates,
- struct xe_sync_entry *syncs, u32 num_syncs,
struct xe_migrate_pt_update *pt_update);
void xe_migrate_wait(struct xe_migrate *m);
-struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile);
+struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile);
#endif
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index aa68cac9fdf8..3fd462fda625 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -29,34 +29,60 @@ static void tiles_fini(void *arg)
struct xe_tile *tile;
int id;
- for_each_tile(tile, xe, id)
- if (tile != xe_device_get_root_tile(xe))
- tile->mmio.regs = NULL;
+ for_each_remote_tile(tile, xe, id)
+ tile->mmio.regs = NULL;
}
-int xe_mmio_probe_tiles(struct xe_device *xe)
+/*
+ * On multi-tile devices, partition the BAR space for MMIO on each tile,
+ * possibly accounting for register override on the number of tiles available.
+ * Resulting memory layout is like below:
+ *
+ * .----------------------. <- tile_count * tile_mmio_size
+ * | .... |
+ * |----------------------| <- 2 * tile_mmio_size
+ * | tile1->mmio.regs |
+ * |----------------------| <- 1 * tile_mmio_size
+ * | tile0->mmio.regs |
+ * '----------------------' <- 0MB
+ */
+static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
{
- size_t tile_mmio_size = SZ_16M, tile_mmio_ext_size = xe->info.tile_mmio_ext_size;
- u8 id, tile_count = xe->info.tile_count;
- struct xe_gt *gt = xe_root_mmio_gt(xe);
struct xe_tile *tile;
void __iomem *regs;
- u32 mtcfg;
+ u8 id;
- if (tile_count == 1)
- goto add_mmio_ext;
+ /*
+ * Nothing to be done as tile 0 has already been setup earlier with the
+ * entire BAR mapped - see xe_mmio_init()
+ */
+ if (xe->info.tile_count == 1)
+ return;
+ /* Possibly override number of tile based on configuration register */
if (!xe->info.skip_mtcfg) {
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ u8 tile_count;
+ u32 mtcfg;
+
+ /*
+ * Although the per-tile mmio regs are not yet initialized, this
+ * is fine as it's going to the root gt, that's guaranteed to be
+ * initialized earlier in xe_mmio_init()
+ */
mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR);
tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
+
if (tile_count < xe->info.tile_count) {
drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
xe->info.tile_count, tile_count);
xe->info.tile_count = tile_count;
/*
- * FIXME: Needs some work for standalone media, but should be impossible
- * with multi-tile for now.
+ * FIXME: Needs some work for standalone media, but
+ * should be impossible with multi-tile for now:
+ * multi-tile platform with standalone media doesn't
+ * exist
*/
xe->info.gt_count = xe->info.tile_count;
}
@@ -68,23 +94,51 @@ int xe_mmio_probe_tiles(struct xe_device *xe)
tile->mmio.regs = regs;
regs += tile_mmio_size;
}
+}
-add_mmio_ext:
- /*
- * By design, there's a contiguous multi-tile MMIO space (16MB hard coded per tile).
- * When supported, there could be an additional contiguous multi-tile MMIO extension
- * space ON TOP of it, and hence the necessity for distinguished MMIO spaces.
- */
- if (xe->info.has_mmio_ext) {
- regs = xe->mmio.regs + tile_mmio_size * tile_count;
+/*
+ * On top of all the multi-tile MMIO space there can be a platform-dependent
+ * extension for each tile, resulting in a layout like below:
+ *
+ * .----------------------. <- ext_base + tile_count * tile_mmio_ext_size
+ * | .... |
+ * |----------------------| <- ext_base + 2 * tile_mmio_ext_size
+ * | tile1->mmio_ext.regs |
+ * |----------------------| <- ext_base + 1 * tile_mmio_ext_size
+ * | tile0->mmio_ext.regs |
+ * |======================| <- ext_base = tile_count * tile_mmio_size
+ * | |
+ * | mmio.regs |
+ * | |
+ * '----------------------' <- 0MB
+ *
+ * Set up the tile[]->mmio_ext pointers/sizes.
+ */
+static void mmio_extension_setup(struct xe_device *xe, size_t tile_mmio_size,
+ size_t tile_mmio_ext_size)
+{
+ struct xe_tile *tile;
+ void __iomem *regs;
+ u8 id;
- for_each_tile(tile, xe, id) {
- tile->mmio_ext.size = tile_mmio_ext_size;
- tile->mmio_ext.regs = regs;
+ if (!xe->info.has_mmio_ext)
+ return;
- regs += tile_mmio_ext_size;
- }
+ regs = xe->mmio.regs + tile_mmio_size * xe->info.tile_count;
+ for_each_tile(tile, xe, id) {
+ tile->mmio_ext.size = tile_mmio_ext_size;
+ tile->mmio_ext.regs = regs;
+ regs += tile_mmio_ext_size;
}
+}
+
+int xe_mmio_probe_tiles(struct xe_device *xe)
+{
+ size_t tile_mmio_size = SZ_16M;
+ size_t tile_mmio_ext_size = xe->info.tile_mmio_ext_size;
+
+ mmio_multi_tile_setup(xe, tile_mmio_size);
+ mmio_extension_setup(xe, tile_mmio_size, tile_mmio_ext_size);
return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe);
}
@@ -174,7 +228,11 @@ void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
trace_xe_reg_rw(gt, true, addr, val, sizeof(val));
- writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
+
+ if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
+ xe_gt_sriov_vf_write32(gt, reg, val);
+ else
+ writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
}
u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
@@ -277,37 +335,24 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
return (u64)udw << 32 | ldw;
}
-/**
- * xe_mmio_wait32() - Wait for a register to match the desired masked value
- * @gt: MMIO target GT
- * @reg: register to read value from
- * @mask: mask to be applied to the value read from the register
- * @val: desired value after applying the mask
- * @timeout_us: time out after this period of time. Wait logic tries to be
- * smart, applying an exponential backoff until @timeout_us is reached.
- * @out_val: if not NULL, points where to store the last unmasked value
- * @atomic: needs to be true if calling from an atomic context
- *
- * This function polls for the desired masked value and returns zero on success
- * or -ETIMEDOUT if timed out.
- *
- * Note that @timeout_us represents the minimum amount of time to wait before
- * giving up. The actual time taken by this function can be a little more than
- * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
- * it is possible that this function succeeds even after @timeout_us has passed.
- */
-int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
- u32 *out_val, bool atomic)
+static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+ u32 *out_val, bool atomic, bool expect_match)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us);
int ret = -ETIMEDOUT;
s64 wait = 10;
u32 read;
+ bool check;
for (;;) {
read = xe_mmio_read32(gt, reg);
- if ((read & mask) == val) {
+
+ check = (read & mask) == val;
+ if (!expect_match)
+ check = !check;
+
+ if (check) {
ret = 0;
break;
}
@@ -328,7 +373,12 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
if (ret != 0) {
read = xe_mmio_read32(gt, reg);
- if ((read & mask) == val)
+
+ check = (read & mask) == val;
+ if (!expect_match)
+ check = !check;
+
+ if (check)
ret = 0;
}
@@ -339,62 +389,45 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
}
/**
- * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
+ * xe_mmio_wait32() - Wait for a register to match the desired masked value
* @gt: MMIO target GT
* @reg: register to read value from
* @mask: mask to be applied to the value read from the register
- * @val: value to match after applying the mask
+ * @val: desired value after applying the mask
* @timeout_us: time out after this period of time. Wait logic tries to be
* smart, applying an exponential backoff until @timeout_us is reached.
* @out_val: if not NULL, points where to store the last unmasked value
* @atomic: needs to be true if calling from an atomic context
*
- * This function polls for a masked value to change from a given value and
- * returns zero on success or -ETIMEDOUT if timed out.
+ * This function polls for the desired masked value and returns zero on success
+ * or -ETIMEDOUT if timed out.
*
* Note that @timeout_us represents the minimum amount of time to wait before
* giving up. The actual time taken by this function can be a little more than
* @timeout_us for different reasons, specially in non-atomic contexts. Thus,
* it is possible that this function succeeds even after @timeout_us has passed.
*/
+int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+ u32 *out_val, bool atomic)
+{
+ return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true);
+}
+
+/**
+ * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
+ * @gt: MMIO target GT
+ * @reg: register to read value from
+ * @mask: mask to be applied to the value read from the register
+ * @val: value not to be matched after applying the mask
+ * @timeout_us: time out after this period of time
+ * @out_val: if not NULL, points where to store the last unmasked value
+ * @atomic: needs to be true if calling from an atomic context
+ *
+ * This function works exactly like xe_mmio_wait32() with the exception that
+ * @val is expected not to be matched.
+ */
int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic)
{
- ktime_t cur = ktime_get_raw();
- const ktime_t end = ktime_add_us(cur, timeout_us);
- int ret = -ETIMEDOUT;
- s64 wait = 10;
- u32 read;
-
- for (;;) {
- read = xe_mmio_read32(gt, reg);
- if ((read & mask) != val) {
- ret = 0;
- break;
- }
-
- cur = ktime_get_raw();
- if (!ktime_before(cur, end))
- break;
-
- if (ktime_after(ktime_add_us(cur, wait), end))
- wait = ktime_us_delta(end, cur);
-
- if (atomic)
- udelay(wait);
- else
- usleep_range(wait, wait << 1);
- wait <<= 1;
- }
-
- if (ret != 0) {
- read = xe_mmio_read32(gt, reg);
- if ((read & mask) != val)
- ret = 0;
- }
-
- if (out_val)
- *out_val = read;
-
- return ret;
+ return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false);
}
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index 6ae0cc32c651..26551410ecc8 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -22,7 +22,6 @@ u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set);
int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval);
bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg);
-int xe_mmio_probe_vram(struct xe_device *xe);
u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic);
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index 499540add465..bfc3deebdaa2 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -8,14 +8,17 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <drm/drm_module.h>
+
#include "xe_drv.h"
#include "xe_hw_fence.h"
#include "xe_pci.h"
+#include "xe_pm.h"
#include "xe_observation.h"
#include "xe_sched_job.h"
struct xe_modparam xe_modparam = {
- .enable_display = true,
+ .probe_display = true,
.guc_log_level = 5,
.force_probe = CONFIG_DRM_XE_FORCE_PROBE,
.wedged_mode = 1,
@@ -25,8 +28,8 @@ struct xe_modparam xe_modparam = {
module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444);
MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
-module_param_named(enable_display, xe_modparam.enable_display, bool, 0444);
-MODULE_PARM_DESC(enable_display, "Enable display");
+module_param_named(probe_display, xe_modparam.probe_display, bool, 0444);
+MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched (default: true)");
module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, uint, 0600);
MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
@@ -61,13 +64,28 @@ module_param_named_unsafe(wedged_mode, xe_modparam.wedged_mode, int, 0600);
MODULE_PARM_DESC(wedged_mode,
"Module's default policy for the wedged mode - 0=never, 1=upon-critical-errors[default], 2=upon-any-hang");
+static int xe_check_nomodeset(void)
+{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ return 0;
+}
+
struct init_funcs {
int (*init)(void);
void (*exit)(void);
};
+static void xe_dummy_exit(void)
+{
+}
+
static const struct init_funcs init_funcs[] = {
{
+ .init = xe_check_nomodeset,
+ },
+ {
.init = xe_hw_fence_module_init,
.exit = xe_hw_fence_module_exit,
},
@@ -83,17 +101,41 @@ static const struct init_funcs init_funcs[] = {
.init = xe_observation_sysctl_register,
.exit = xe_observation_sysctl_unregister,
},
+ {
+ .init = xe_pm_module_init,
+ .exit = xe_dummy_exit,
+ },
};
+static int __init xe_call_init_func(unsigned int i)
+{
+ if (WARN_ON(i >= ARRAY_SIZE(init_funcs)))
+ return 0;
+ if (!init_funcs[i].init)
+ return 0;
+
+ return init_funcs[i].init();
+}
+
+static void xe_call_exit_func(unsigned int i)
+{
+ if (WARN_ON(i >= ARRAY_SIZE(init_funcs)))
+ return;
+ if (!init_funcs[i].exit)
+ return;
+
+ init_funcs[i].exit();
+}
+
static int __init xe_init(void)
{
int err, i;
for (i = 0; i < ARRAY_SIZE(init_funcs); i++) {
- err = init_funcs[i].init();
+ err = xe_call_init_func(i);
if (err) {
while (i--)
- init_funcs[i].exit();
+ xe_call_exit_func(i);
return err;
}
}
@@ -106,7 +148,7 @@ static void __exit xe_exit(void)
int i;
for (i = ARRAY_SIZE(init_funcs) - 1; i >= 0; i--)
- init_funcs[i].exit();
+ xe_call_exit_func(i);
}
module_init(xe_init);
diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
index 61a0d28a28c8..161a5e6f717f 100644
--- a/drivers/gpu/drm/xe/xe_module.h
+++ b/drivers/gpu/drm/xe/xe_module.h
@@ -11,7 +11,7 @@
/* Module modprobe variables */
struct xe_modparam {
bool force_execlist;
- bool enable_display;
+ bool probe_display;
u32 force_vram_bar_size;
int guc_log_level;
char *guc_firmware_path;
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 6d69f751bf78..0369cc016f6a 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -10,7 +10,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "abi/guc_actions_slpc_abi.h"
#include "instructions/xe_mi_commands.h"
@@ -440,6 +440,10 @@ static void xe_oa_enable(struct xe_oa_stream *stream)
val = __format_to_oactrl(format, regs->oa_ctrl_counter_select_mask) |
__oa_ccs_select(stream) | OAG_OACONTROL_OA_COUNTER_ENABLE;
+ if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
+ stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
+ val |= OAG_OACONTROL_OA_PES_DISAG_EN;
+
xe_mmio_write32(stream->gt, regs->oa_ctrl, val);
}
@@ -641,7 +645,7 @@ static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
u32 offset = xe_bo_ggtt_addr(lrc->bo);
do {
- bb->cs[bb->len++] = MI_STORE_DATA_IMM | BIT(22) /* GGTT */ | 2;
+ bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
bb->cs[bb->len++] = offset + flex->offset * sizeof(u32);
bb->cs[bb->len++] = 0;
bb->cs[bb->len++] = flex->value;
@@ -1244,8 +1248,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
vm_flags_mod(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY,
VM_MAYWRITE | VM_MAYEXEC);
- xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages ==
- (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+ xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages == vma_pages(vma));
for (i = 0; i < bo->ttm.ttm->num_pages; i++) {
ret = remap_pfn_range(vma, start, page_to_pfn(bo->ttm.ttm->pages[i]),
PAGE_SIZE, vma->vm_page_prot);
diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
index 540c3ec53a6d..8862eca73fbe 100644
--- a/drivers/gpu/drm/xe/xe_oa_types.h
+++ b/drivers/gpu/drm/xe/xe_oa_types.h
@@ -11,7 +11,7 @@
#include <linux/mutex.h>
#include <linux/types.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "regs/xe_reg_defs.h"
#include "xe_hw_engine_types.h"
diff --git a/drivers/gpu/drm/xe/xe_observation.c b/drivers/gpu/drm/xe/xe_observation.c
index a78c92a44ec2..8ec1b84cbb9e 100644
--- a/drivers/gpu/drm/xe/xe_observation.c
+++ b/drivers/gpu/drm/xe/xe_observation.c
@@ -6,7 +6,7 @@
#include <linux/errno.h>
#include <linux/sysctl.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_oa.h"
#include "xe_observation.h"
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index 722278cc23fc..f291a1730024 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -5,7 +5,7 @@
#include "xe_pat.h"
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 732ee0d02124..937c3e064f0d 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -59,6 +59,7 @@ struct xe_device_desc {
u8 has_display:1;
u8 has_heci_gscfi:1;
+ u8 has_heci_cscfi:1;
u8 has_llc:1;
u8 has_mmio_ext:1;
u8 has_sriov:1;
@@ -337,14 +338,13 @@ static const struct xe_device_desc mtl_desc = {
static const struct xe_device_desc lnl_desc = {
PLATFORM(LUNARLAKE),
.has_display = true,
- .require_force_probe = true,
};
static const struct xe_device_desc bmg_desc = {
DGFX_FEATURES,
PLATFORM(BATTLEMAGE),
.has_display = true,
- .require_force_probe = true,
+ .has_heci_cscfi = 1,
};
#undef PLATFORM
@@ -606,6 +606,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.is_dgfx = desc->is_dgfx;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
+ xe->info.has_heci_cscfi = desc->has_heci_cscfi;
xe->info.has_llc = desc->has_llc;
xe->info.has_mmio_ext = desc->has_mmio_ext;
xe->info.has_sriov = desc->has_sriov;
@@ -613,9 +614,9 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
- xe->info.enable_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
- xe_modparam.enable_display &&
- desc->has_display;
+ xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
+ xe_modparam.probe_display &&
+ desc->has_display;
err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0);
if (err)
@@ -744,7 +745,7 @@ static void xe_pci_remove(struct pci_dev *pdev)
{
struct xe_device *xe;
- xe = pci_get_drvdata(pdev);
+ xe = pdev_to_xe_device(pdev);
if (!xe) /* driver load aborted, nothing to cleanup */
return;
@@ -792,7 +793,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(xe))
return PTR_ERR(xe);
- pci_set_drvdata(pdev, xe);
+ pci_set_drvdata(pdev, &xe->drm);
xe_pm_assert_unbounded_bridge(xe);
subplatform_desc = find_subplatform(xe, desc);
@@ -815,7 +816,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d",
+ drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d",
desc->platform_name,
subplatform_desc ? subplatform_desc->name : "",
xe->info.devid, xe->info.revid,
@@ -826,14 +827,13 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
xe->info.media_name,
xe->info.media_verx100 / 100,
xe->info.media_verx100 % 100,
- str_yes_no(xe->info.enable_display),
+ str_yes_no(xe->info.probe_display),
xe->info.dma_mask_size, xe->info.tile_count,
- xe->info.has_heci_gscfi);
+ xe->info.has_heci_gscfi, xe->info.has_heci_cscfi);
- drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, D:%s, B:%s)\n",
+ drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n",
xe_step_name(xe->info.step.graphics),
xe_step_name(xe->info.step.media),
- xe_step_name(xe->info.step.display),
xe_step_name(xe->info.step.basedie));
drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n",
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 9a3f618d22dc..7cf2160fe040 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -20,6 +20,7 @@
#include "xe_guc.h"
#include "xe_irq.h"
#include "xe_pcode.h"
+#include "xe_trace.h"
#include "xe_wa.h"
/**
@@ -69,12 +70,42 @@
*/
#ifdef CONFIG_LOCKDEP
-static struct lockdep_map xe_pm_runtime_lockdep_map = {
- .name = "xe_pm_runtime_lockdep_map"
+static struct lockdep_map xe_pm_runtime_d3cold_map = {
+ .name = "xe_rpm_d3cold_map"
+};
+
+static struct lockdep_map xe_pm_runtime_nod3cold_map = {
+ .name = "xe_rpm_nod3cold_map"
};
#endif
/**
+ * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
+ * @xe: The xe device.
+ *
+ * Return: true if it is safe to runtime resume from reclaim context.
+ * false otherwise.
+ */
+bool xe_rpm_reclaim_safe(const struct xe_device *xe)
+{
+ return !xe->d3cold.capable && !xe->info.has_sriov;
+}
+
+static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
+{
+ lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
+ &xe_pm_runtime_nod3cold_map :
+ &xe_pm_runtime_d3cold_map);
+}
+
+static void xe_rpm_lockmap_release(const struct xe_device *xe)
+{
+ lock_map_release(xe_rpm_reclaim_safe(xe) ?
+ &xe_pm_runtime_nod3cold_map :
+ &xe_pm_runtime_d3cold_map);
+}
+
+/**
* xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
* @xe: xe device instance
*
@@ -87,6 +118,7 @@ int xe_pm_suspend(struct xe_device *xe)
int err;
drm_dbg(&xe->drm, "Suspending device\n");
+ trace_xe_pm_suspend(xe, __builtin_return_address(0));
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
@@ -131,6 +163,7 @@ int xe_pm_resume(struct xe_device *xe)
int err;
drm_dbg(&xe->drm, "Resuming device\n");
+ trace_xe_pm_resume(xe, __builtin_return_address(0));
for_each_tile(tile, xe, id)
xe_wa_apply_tile_workarounds(tile);
@@ -326,6 +359,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
u8 id;
int err = 0;
+ trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
/* Disable access_ongoing asserts and prevent recursive pm calls */
xe_pm_write_callback_task(xe, current);
@@ -350,7 +384,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
* annotation here and in xe_pm_runtime_get() lockdep will see
* the potential lock inversion and give us a nice splat.
*/
- lock_map_acquire(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_acquire(xe);
/*
* Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
@@ -362,9 +396,9 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_bo_runtime_pm_release_mmap_offset(bo);
mutex_unlock(&xe->mem_access.vram_userfault.lock);
- if (xe->d3cold.allowed) {
- xe_display_pm_suspend(xe, true);
+ xe_display_pm_runtime_suspend(xe);
+ if (xe->d3cold.allowed) {
err = xe_bo_evict_all(xe);
if (err)
goto out;
@@ -382,8 +416,8 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_display_pm_suspend_late(xe);
out:
if (err)
- xe_display_pm_resume(xe, true);
- lock_map_release(&xe_pm_runtime_lockdep_map);
+ xe_display_pm_runtime_resume(xe);
+ xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return err;
}
@@ -400,10 +434,11 @@ int xe_pm_runtime_resume(struct xe_device *xe)
u8 id;
int err = 0;
+ trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
/* Disable access_ongoing asserts and prevent recursive pm calls */
xe_pm_write_callback_task(xe, current);
- lock_map_acquire(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_acquire(xe);
if (xe->d3cold.allowed) {
err = xe_pcode_ready(xe, true);
@@ -426,14 +461,16 @@ int xe_pm_runtime_resume(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_resume(gt);
+ xe_display_pm_runtime_resume(xe);
+
if (xe->d3cold.allowed) {
- xe_display_pm_resume(xe, true);
err = xe_bo_restore_user(xe);
if (err)
goto out;
}
+
out:
- lock_map_release(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return err;
}
@@ -447,15 +484,37 @@ out:
* stuff that can happen inside the runtime_resume callback by acquiring
* a dummy lock (it doesn't protect anything and gets compiled out on
* non-debug builds). Lockdep then only needs to see the
- * xe_pm_runtime_lockdep_map -> runtime_resume callback once, and then can
- * hopefully validate all the (callers_locks) -> xe_pm_runtime_lockdep_map.
+ * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
+ * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
* For example if the (callers_locks) are ever grabbed in the
* runtime_resume callback, lockdep should give us a nice splat.
*/
-static void pm_runtime_lockdep_prime(void)
+static void xe_rpm_might_enter_cb(const struct xe_device *xe)
{
- lock_map_acquire(&xe_pm_runtime_lockdep_map);
- lock_map_release(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_acquire(xe);
+ xe_rpm_lockmap_release(xe);
+}
+
+/*
+ * Prime the lockdep maps for known locking orders that need to
+ * be supported but that may not always occur on all systems.
+ */
+static void xe_pm_runtime_lockdep_prime(void)
+{
+ struct dma_resv lockdep_resv;
+
+ dma_resv_init(&lockdep_resv);
+ lock_map_acquire(&xe_pm_runtime_d3cold_map);
+ /* D3Cold takes the dma_resv locks to evict bos */
+ dma_resv_lock(&lockdep_resv, NULL);
+ dma_resv_unlock(&lockdep_resv);
+ lock_map_release(&xe_pm_runtime_d3cold_map);
+
+ /* Shrinkers might like to wake up the device under reclaim. */
+ fs_reclaim_acquire(GFP_KERNEL);
+ lock_map_acquire(&xe_pm_runtime_nod3cold_map);
+ lock_map_release(&xe_pm_runtime_nod3cold_map);
+ fs_reclaim_release(GFP_KERNEL);
}
/**
@@ -464,12 +523,13 @@ static void pm_runtime_lockdep_prime(void)
*/
void xe_pm_runtime_get(struct xe_device *xe)
{
+ trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
pm_runtime_get_noresume(xe->drm.dev);
if (xe_pm_read_callback_task(xe) == current)
return;
- pm_runtime_lockdep_prime();
+ xe_rpm_might_enter_cb(xe);
pm_runtime_resume(xe->drm.dev);
}
@@ -479,6 +539,7 @@ void xe_pm_runtime_get(struct xe_device *xe)
*/
void xe_pm_runtime_put(struct xe_device *xe)
{
+ trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
if (xe_pm_read_callback_task(xe) == current) {
pm_runtime_put_noidle(xe->drm.dev);
} else {
@@ -496,10 +557,11 @@ void xe_pm_runtime_put(struct xe_device *xe)
*/
int xe_pm_runtime_get_ioctl(struct xe_device *xe)
{
+ trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
if (WARN_ON(xe_pm_read_callback_task(xe) == current))
return -ELOOP;
- pm_runtime_lockdep_prime();
+ xe_rpm_might_enter_cb(xe);
return pm_runtime_get_sync(xe->drm.dev);
}
@@ -533,6 +595,22 @@ bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
}
+/*
+ * Very unreliable! Should only be used to suppress the false positive case
+ * in the missing outer rpm protection warning.
+ */
+static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
+{
+#ifdef CONFIG_PM
+ struct device *dev = xe->drm.dev;
+
+ return dev->power.runtime_status == RPM_SUSPENDING ||
+ dev->power.runtime_status == RPM_RESUMING;
+#else
+ return false;
+#endif
+}
+
/**
* xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
* @xe: xe device instance
@@ -549,8 +627,11 @@ void xe_pm_runtime_get_noresume(struct xe_device *xe)
ref = xe_pm_runtime_get_if_in_use(xe);
- if (drm_WARN(&xe->drm, !ref, "Missing outer runtime PM protection\n"))
+ if (!ref) {
pm_runtime_get_noresume(xe->drm.dev);
+ drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
+ "Missing outer runtime PM protection\n");
+ }
}
/**
@@ -567,7 +648,7 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
return true;
}
- pm_runtime_lockdep_prime();
+ xe_rpm_might_enter_cb(xe);
return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
}
@@ -659,3 +740,14 @@ void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
drm_dbg(&xe->drm,
"d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
}
+
+/**
+ * xe_pm_module_init() - Perform xe_pm specific module initialization.
+ *
+ * Return: 0 on success. Currently doesn't fail.
+ */
+int __init xe_pm_module_init(void)
+{
+ xe_pm_runtime_lockdep_prime();
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 104a21ae6dfd..998d1ed64556 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -31,6 +31,8 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe);
void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
+bool xe_rpm_reclaim_safe(const struct xe_device *xe);
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
+int xe_pm_module_init(void);
#endif
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index c453f45328b1..83fbeea5aa20 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -17,10 +17,16 @@ static void preempt_fence_work_func(struct work_struct *w)
container_of(w, typeof(*pfence), preempt_work);
struct xe_exec_queue *q = pfence->q;
- if (pfence->error)
+ if (pfence->error) {
dma_fence_set_error(&pfence->base, pfence->error);
- else
- q->ops->suspend_wait(q);
+ } else if (!q->ops->reset_status(q)) {
+ int err = q->ops->suspend_wait(q);
+
+ if (err)
+ dma_fence_set_error(&pfence->base, err);
+ } else {
+ dma_fence_set_error(&pfence->base, -ENOENT);
+ }
dma_fence_signal(&pfence->base);
/*
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 31a751a5de3f..d6353e8969f0 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -3,18 +3,23 @@
* Copyright © 2022 Intel Corporation
*/
+#include <linux/dma-fence-array.h>
+
#include "xe_pt.h"
#include "regs/xe_gtt_defs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_drm_client.h"
+#include "xe_exec_queue.h"
#include "xe_gt.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pt_types.h"
#include "xe_pt_walk.h"
#include "xe_res_cursor.h"
+#include "xe_sched_job.h"
+#include "xe_sync.h"
#include "xe_trace.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
@@ -325,6 +330,7 @@ xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent,
entry->pt = parent;
entry->flags = 0;
entry->qwords = 0;
+ entry->pt_bo->update_index = -1;
if (alloc_entries) {
entry->pt_entries = kmalloc_array(XE_PDES,
@@ -842,19 +848,27 @@ xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *t
}
}
-static void xe_pt_abort_bind(struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries,
- u32 num_entries)
+static void xe_pt_cancel_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
{
u32 i, j;
for (i = 0; i < num_entries; i++) {
- if (!entries[i].pt_entries)
+ struct xe_pt *pt = entries[i].pt;
+
+ if (!pt)
continue;
- for (j = 0; j < entries[i].qwords; j++)
- xe_pt_destroy(entries[i].pt_entries[j].pt, xe_vma_vm(vma)->flags, NULL);
+ if (pt->level) {
+ for (j = 0; j < entries[i].qwords; j++)
+ xe_pt_destroy(entries[i].pt_entries[j].pt,
+ xe_vma_vm(vma)->flags, NULL);
+ }
+
kfree(entries[i].pt_entries);
+ entries[i].pt_entries = NULL;
+ entries[i].qwords = 0;
}
}
@@ -864,18 +878,15 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
lockdep_assert_held(&vm->lock);
- if (xe_vma_is_userptr(vma))
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
- else if (!xe_vma_is_null(vma))
+ if (!xe_vma_is_userptr(vma) && !xe_vma_is_null(vma))
dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
xe_vm_assert_held(vm);
}
-static void xe_pt_commit_bind(struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries,
- u32 num_entries, bool rebind,
- struct llist_head *deferred)
+static void xe_pt_commit(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, struct llist_head *deferred)
{
u32 i, j;
@@ -883,31 +894,90 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
+
+ if (!pt->level)
+ continue;
+
+ for (j = 0; j < entries[i].qwords; j++) {
+ struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+
+ xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
+ }
+ }
+}
+
+static void xe_pt_abort_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, bool rebind)
+{
+ int i, j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (i = num_entries - 1; i >= 0; --i) {
+ struct xe_pt *pt = entries[i].pt;
struct xe_pt_dir *pt_dir;
if (!rebind)
- pt->num_live += entries[i].qwords;
+ pt->num_live -= entries[i].qwords;
- if (!pt->level) {
- kfree(entries[i].pt_entries);
+ if (!pt->level)
continue;
+
+ pt_dir = as_xe_pt_dir(pt);
+ for (j = 0; j < entries[i].qwords; j++) {
+ u32 j_ = j + entries[i].ofs;
+ struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
+ struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+
+ pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
+ xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
}
+ }
+}
+
+static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, bool rebind)
+{
+ u32 i, j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (i = 0; i < num_entries; i++) {
+ struct xe_pt *pt = entries[i].pt;
+ struct xe_pt_dir *pt_dir;
+
+ if (!rebind)
+ pt->num_live += entries[i].qwords;
+
+ if (!pt->level)
+ continue;
pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
u32 j_ = j + entries[i].ofs;
struct xe_pt *newpte = entries[i].pt_entries[j].pt;
+ struct xe_pt *oldpte = NULL;
if (xe_pt_entry(pt_dir, j_))
- xe_pt_destroy(xe_pt_entry(pt_dir, j_),
- xe_vma_vm(vma)->flags, deferred);
+ oldpte = xe_pt_entry(pt_dir, j_);
pt_dir->children[j_] = &newpte->base;
+ entries[i].pt_entries[j].pt = oldpte;
}
- kfree(entries[i].pt_entries);
}
}
+static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
+{
+ u32 i;
+
+ for (i = 0; i < num_entries; i++)
+ kfree(entries[i].pt_entries);
+}
+
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_vm_pgtable_update *entries, u32 *num_entries)
@@ -918,20 +988,19 @@ xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
err = xe_pt_stage_bind(tile, vma, entries, num_entries);
if (!err)
xe_tile_assert(tile, *num_entries);
- else /* abort! */
- xe_pt_abort_bind(vma, entries, *num_entries);
return err;
}
static void xe_vm_dbg_print_entries(struct xe_device *xe,
const struct xe_vm_pgtable_update *entries,
- unsigned int num_entries)
+ unsigned int num_entries, bool bind)
#if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
{
unsigned int i;
- vm_dbg(&xe->drm, "%u entries to update\n", num_entries);
+ vm_dbg(&xe->drm, "%s: %u entries to update\n", bind ? "bind" : "unbind",
+ num_entries);
for (i = 0; i < num_entries; i++) {
const struct xe_vm_pgtable_update *entry = &entries[i];
struct xe_pt *xe_pt = entry->pt;
@@ -952,66 +1021,108 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe,
{}
#endif
-#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
-
-static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs)
{
- u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
- static u32 count;
+ int i;
- if (count++ % divisor == divisor - 1) {
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+ for (i = 0; i < num_syncs; i++) {
+ struct dma_fence *fence = syncs[i].fence;
- uvma->userptr.divisor = divisor << 1;
- spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&uvma->userptr.invalidate_link,
- &vm->userptr.invalidated);
- spin_unlock(&vm->userptr.invalidated_lock);
- return true;
+ if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence->flags))
+ return false;
}
- return false;
+ return true;
}
-#else
+static int job_test_add_deps(struct xe_sched_job *job,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage)
+{
+ if (!job) {
+ if (!dma_resv_test_signaled(resv, usage))
+ return -ETIME;
-static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+ return 0;
+ }
+
+ return xe_sched_job_add_deps(job, resv, usage);
+}
+
+static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job)
{
- return false;
+ struct xe_bo *bo = xe_vma_bo(vma);
+
+ xe_bo_assert_held(bo);
+
+ if (bo && !bo->vm)
+ return job_test_add_deps(job, bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL);
+
+ return 0;
}
-#endif
+static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
+ struct xe_sched_job *job)
+{
+ int err = 0;
-/**
- * struct xe_pt_migrate_pt_update - Callback argument for pre-commit callbacks
- * @base: Base we derive from.
- * @bind: Whether this is a bind or an unbind operation. A bind operation
- * makes the pre-commit callback error with -EAGAIN if it detects a
- * pending invalidation.
- * @locked: Whether the pre-commit callback locked the userptr notifier lock
- * and it needs unlocking.
- */
-struct xe_pt_migrate_pt_update {
- struct xe_migrate_pt_update base;
- bool bind;
- bool locked;
-};
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ err = vma_add_deps(op->map.vma, job);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.prev)
+ err = vma_add_deps(op->remap.prev, job);
+ if (!err && op->remap.next)
+ err = vma_add_deps(op->remap.next, job);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+
+ return err;
+}
-/*
- * This function adds the needed dependencies to a page-table update job
- * to make sure racing jobs for separate bind engines don't race writing
- * to the same page-table range, wreaking havoc. Initially use a single
- * fence for the entire VM. An optimization would use smaller granularity.
- */
static int xe_pt_vm_dependencies(struct xe_sched_job *job,
- struct xe_range_fence_tree *rftree,
- u64 start, u64 last)
+ struct xe_vm *vm,
+ struct xe_vma_ops *vops,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_range_fence_tree *rftree)
{
struct xe_range_fence *rtfence;
struct dma_fence *fence;
- int err;
+ struct xe_vma_op *op;
+ int err = 0, i;
+
+ xe_vm_assert_held(vm);
+
+ if (!job && !no_in_syncs(vops->syncs, vops->num_syncs))
+ return -ETIME;
- rtfence = xe_range_fence_tree_first(rftree, start, last);
+ if (!job && !xe_exec_queue_is_idle(pt_update_ops->q))
+ return -ETIME;
+
+ if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) {
+ err = job_test_add_deps(job, xe_vm_resv(vm),
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_BOOKKEEP :
+ DMA_RESV_USAGE_KERNEL);
+ if (err)
+ return err;
+ }
+
+ rtfence = xe_range_fence_tree_first(rftree, pt_update_ops->start,
+ pt_update_ops->last);
while (rtfence) {
fence = rtfence->fence;
@@ -1029,80 +1140,175 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job,
return err;
}
- rtfence = xe_range_fence_tree_next(rtfence, start, last);
+ rtfence = xe_range_fence_tree_next(rtfence,
+ pt_update_ops->start,
+ pt_update_ops->last);
}
- return 0;
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_add_deps(vm, op, job);
+ if (err)
+ return err;
+ }
+
+ if (!(pt_update_ops->q->flags & EXEC_QUEUE_FLAG_KERNEL)) {
+ if (job)
+ err = xe_sched_job_last_fence_add_dep(job, vm);
+ else
+ err = xe_exec_queue_last_fence_test_dep(pt_update_ops->q, vm);
+ }
+
+ for (i = 0; job && !err && i < vops->num_syncs; i++)
+ err = xe_sync_entry_add_deps(&vops->syncs[i], job);
+
+ return err;
}
static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
{
- struct xe_range_fence_tree *rftree =
- &xe_vma_vm(pt_update->vma)->rftree[pt_update->tile_id];
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vm *vm = vops->vm;
+ struct xe_range_fence_tree *rftree = &vm->rftree[pt_update->tile_id];
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[pt_update->tile_id];
+
+ return xe_pt_vm_dependencies(pt_update->job, vm, pt_update->vops,
+ pt_update_ops, rftree);
+}
+
+#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
- return xe_pt_vm_dependencies(pt_update->job, rftree,
- pt_update->start, pt_update->last);
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+{
+ u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
+ static u32 count;
+
+ if (count++ % divisor == divisor - 1) {
+ uvma->userptr.divisor = divisor << 1;
+ return true;
+ }
+
+ return false;
}
-static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+#else
+
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{
- struct xe_pt_migrate_pt_update *userptr_update =
- container_of(pt_update, typeof(*userptr_update), base);
- struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma);
- unsigned long notifier_seq = uvma->userptr.notifier_seq;
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
- int err = xe_pt_vm_dependencies(pt_update->job,
- &vm->rftree[pt_update->tile_id],
- pt_update->start,
- pt_update->last);
+ return false;
+}
- if (err)
- return err;
+#endif
- userptr_update->locked = false;
+static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_vm_pgtable_update_ops *pt_update)
+{
+ struct xe_userptr_vma *uvma;
+ unsigned long notifier_seq;
- /*
- * Wait until nobody is running the invalidation notifier, and
- * since we're exiting the loop holding the notifier lock,
- * nobody can proceed invalidating either.
- *
- * Note that we don't update the vma->userptr.notifier_seq since
- * we don't update the userptr pages.
- */
- do {
- down_read(&vm->userptr.notifier_lock);
- if (!mmu_interval_read_retry(&uvma->userptr.notifier,
- notifier_seq))
- break;
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
- up_read(&vm->userptr.notifier_lock);
+ if (!xe_vma_is_userptr(vma))
+ return 0;
- if (userptr_update->bind)
- return -EAGAIN;
+ uvma = to_userptr_vma(vma);
+ notifier_seq = uvma->userptr.notifier_seq;
- notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier);
- } while (true);
+ if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
+ return 0;
- /* Inject errors to test_whether they are handled correctly */
- if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) {
- up_read(&vm->userptr.notifier_lock);
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+ notifier_seq) &&
+ !xe_pt_userptr_inject_eagain(uvma))
+ return 0;
+
+ if (xe_vm_in_fault_mode(vm)) {
return -EAGAIN;
- }
+ } else {
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_move_tail(&uvma->userptr.invalidate_link,
+ &vm->userptr.invalidated);
+ spin_unlock(&vm->userptr.invalidated_lock);
- userptr_update->locked = true;
+ if (xe_vm_in_preempt_fence_mode(vm)) {
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ long err;
+
+ dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, fence)
+ dma_fence_enable_sw_signaling(fence);
+ dma_resv_iter_end(&cursor);
+
+ err = dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ XE_WARN_ON(err <= 0);
+ }
+ }
return 0;
}
-static const struct xe_migrate_pt_update_ops bind_ops = {
- .populate = xe_vm_populate_pgtable,
- .pre_commit = xe_pt_pre_commit,
-};
+static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
+ struct xe_vm_pgtable_update_ops *pt_update)
+{
+ int err = 0;
-static const struct xe_migrate_pt_update_ops userptr_bind_ops = {
- .populate = xe_vm_populate_pgtable,
- .pre_commit = xe_pt_userptr_pre_commit,
-};
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ err = vma_check_userptr(vm, op->map.vma, pt_update);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.prev)
+ err = vma_check_userptr(vm, op->remap.prev, pt_update);
+ if (!err && op->remap.next)
+ err = vma_check_userptr(vm, op->remap.next, pt_update);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va),
+ pt_update);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+
+ return err;
+}
+
+static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+{
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[pt_update->tile_id];
+ struct xe_vma_op *op;
+ int err;
+
+ err = xe_pt_pre_commit(pt_update);
+ if (err)
+ return err;
+
+ down_read(&vm->userptr.notifier_lock);
+
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_check_userptr(vm, op, pt_update_ops);
+ if (err) {
+ up_read(&vm->userptr.notifier_lock);
+ break;
+ }
+ }
+
+ return err;
+}
struct invalidation_fence {
struct xe_gt_tlb_invalidation_fence base;
@@ -1144,10 +1350,10 @@ static void invalidation_fence_work_func(struct work_struct *w)
ifence->end, ifence->asid);
}
-static int invalidation_fence_init(struct xe_gt *gt,
- struct invalidation_fence *ifence,
- struct dma_fence *fence,
- u64 start, u64 end, u32 asid)
+static void invalidation_fence_init(struct xe_gt *gt,
+ struct invalidation_fence *ifence,
+ struct dma_fence *fence,
+ u64 start, u64 end, u32 asid)
{
int ret;
@@ -1172,192 +1378,6 @@ static int invalidation_fence_init(struct xe_gt *gt,
}
xe_gt_assert(gt, !ret || ret == -ENOENT);
-
- return ret && ret != -ENOENT ? ret : 0;
-}
-
-static void xe_pt_calc_rfence_interval(struct xe_vma *vma,
- struct xe_pt_migrate_pt_update *update,
- struct xe_vm_pgtable_update *entries,
- u32 num_entries)
-{
- int i, level = 0;
-
- for (i = 0; i < num_entries; i++) {
- const struct xe_vm_pgtable_update *entry = &entries[i];
-
- if (entry->pt->level > level)
- level = entry->pt->level;
- }
-
- /* Greedy (non-optimal) calculation but simple */
- update->base.start = ALIGN_DOWN(xe_vma_start(vma),
- 0x1ull << xe_pt_shift(level));
- update->base.last = ALIGN(xe_vma_end(vma),
- 0x1ull << xe_pt_shift(level)) - 1;
-}
-
-/**
- * __xe_pt_bind_vma() - Build and connect a page-table tree for the vma
- * address range.
- * @tile: The tile to bind for.
- * @vma: The vma to bind.
- * @q: The exec_queue with which to do pipelined page-table updates.
- * @syncs: Entries to sync on before binding the built tree to the live vm tree.
- * @num_syncs: Number of @sync entries.
- * @rebind: Whether we're rebinding this vma to the same address range without
- * an unbind in-between.
- *
- * This function builds a page-table tree (see xe_pt_stage_bind() for more
- * information on page-table building), and the xe_vm_pgtable_update entries
- * abstracting the operations needed to attach it to the main vm tree. It
- * then takes the relevant locks and updates the metadata side of the main
- * vm tree and submits the operations for pipelined attachment of the
- * gpu page-table to the vm main tree, (which can be done either by the
- * cpu and the GPU).
- *
- * Return: A valid dma-fence representing the pipelined attachment operation
- * on success, an error pointer on error.
- */
-struct dma_fence *
-__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- bool rebind)
-{
- struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
- struct xe_pt_migrate_pt_update bind_pt_update = {
- .base = {
- .ops = xe_vma_is_userptr(vma) ? &userptr_bind_ops : &bind_ops,
- .vma = vma,
- .tile_id = tile->id,
- },
- .bind = true,
- };
- struct xe_vm *vm = xe_vma_vm(vma);
- u32 num_entries;
- struct dma_fence *fence;
- struct invalidation_fence *ifence = NULL;
- struct xe_range_fence *rfence;
- int err;
-
- bind_pt_update.locked = false;
- xe_bo_assert_held(xe_vma_bo(vma));
- xe_vm_assert_held(vm);
-
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "Preparing bind, with range [%llx...%llx) engine %p.\n",
- xe_vma_start(vma), xe_vma_end(vma), q);
-
- err = xe_pt_prepare_bind(tile, vma, entries, &num_entries);
- if (err)
- goto err;
-
- err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
- if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
- if (err)
- goto err;
-
- xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
-
- xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
- xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries,
- num_entries);
-
- /*
- * If rebind, we have to invalidate TLB on !LR vms to invalidate
- * cached PTEs point to freed memory. on LR vms this is done
- * automatically when the context is re-enabled by the rebind worker,
- * or in fault mode it was invalidated on PTE zapping.
- *
- * If !rebind, and scratch enabled VMs, there is a chance the scratch
- * PTE is already cached in the TLB so it needs to be invalidated.
- * on !LR VMs this is done in the ring ops preceding a batch, but on
- * non-faulting LR, in particular on user-space batch buffer chaining,
- * it needs to be done here.
- */
- if ((!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
- ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!ifence)
- return ERR_PTR(-ENOMEM);
- } else if (rebind && !xe_vm_in_lr_mode(vm)) {
- /* We bump also if batch_invalidate_tlb is true */
- vm->tlb_flush_seqno++;
- }
-
- rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
- if (!rfence) {
- kfree(ifence);
- return ERR_PTR(-ENOMEM);
- }
-
- fence = xe_migrate_update_pgtables(tile->migrate,
- vm, xe_vma_bo(vma), q,
- entries, num_entries,
- syncs, num_syncs,
- &bind_pt_update.base);
- if (!IS_ERR(fence)) {
- bool last_munmap_rebind = vma->gpuva.flags & XE_VMA_LAST_REBIND;
- LLIST_HEAD(deferred);
- int err;
-
- err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
- &xe_range_fence_kfree_ops,
- bind_pt_update.base.start,
- bind_pt_update.base.last, fence);
- if (err)
- dma_fence_wait(fence, false);
-
- /* TLB invalidation must be done before signaling rebind */
- if (ifence) {
- int err = invalidation_fence_init(tile->primary_gt,
- ifence, fence,
- xe_vma_start(vma),
- xe_vma_end(vma),
- xe_vma_vm(vma)->usm.asid);
- if (err) {
- dma_fence_put(fence);
- kfree(ifence);
- return ERR_PTR(err);
- }
- fence = &ifence->base.base;
- }
-
- /* add shared fence now for pagetable delayed destroy */
- dma_resv_add_fence(xe_vm_resv(vm), fence, rebind ||
- last_munmap_rebind ?
- DMA_RESV_USAGE_KERNEL :
- DMA_RESV_USAGE_BOOKKEEP);
-
- if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
- DMA_RESV_USAGE_BOOKKEEP);
- xe_pt_commit_bind(vma, entries, num_entries, rebind,
- bind_pt_update.locked ? &deferred : NULL);
-
- /* This vma is live (again?) now */
- vma->tile_present |= BIT(tile->id);
-
- if (bind_pt_update.locked) {
- to_userptr_vma(vma)->userptr.initial_bind = true;
- up_read(&vm->userptr.notifier_lock);
- xe_bo_put_commit(&deferred);
- }
- if (!rebind && last_munmap_rebind &&
- xe_vm_in_preempt_fence_mode(vm))
- xe_vm_queue_rebind_worker(vm);
- } else {
- kfree(rfence);
- kfree(ifence);
- if (bind_pt_update.locked)
- up_read(&vm->userptr.notifier_lock);
- xe_pt_abort_bind(vma, entries, num_entries);
- }
-
- return fence;
-
-err:
- return ERR_PTR(err);
}
struct xe_pt_stage_unbind_walk {
@@ -1442,6 +1462,7 @@ xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
pgoff_t end_offset;
u64 size = 1ull << walk->shifts[--level];
+ int err;
if (!IS_ALIGNED(addr, size))
addr = xe_walk->modified_start;
@@ -1457,7 +1478,10 @@ xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
&end_offset))
return 0;
- (void)xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, false);
+ err = xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true);
+ if (err)
+ return err;
+
xe_walk->wupd.updates[level].update->qwords = end_offset - offset;
return 0;
@@ -1510,8 +1534,8 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
void *ptr, u32 qword_ofs, u32 num_qwords,
const struct xe_vm_pgtable_update *update)
{
- struct xe_vma *vma = pt_update->vma;
- u64 empty = __xe_pt_empty_pte(tile, xe_vma_vm(vma), update->pt->level);
+ struct xe_vm *vm = pt_update->vops->vm;
+ u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level);
int i;
if (map && map->is_iomem)
@@ -1525,181 +1549,644 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
memset64(ptr, empty, num_qwords);
}
+static void xe_pt_abort_unbind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
+{
+ int i, j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (i = num_entries - 1; i >= 0; --i) {
+ struct xe_vm_pgtable_update *entry = &entries[i];
+ struct xe_pt *pt = entry->pt;
+ struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
+
+ pt->num_live += entry->qwords;
+
+ if (!pt->level)
+ continue;
+
+ for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
+ pt_dir->children[j] =
+ entries[i].pt_entries[j - entry->ofs].pt ?
+ &entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
+ }
+}
+
static void
-xe_pt_commit_unbind(struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries, u32 num_entries,
- struct llist_head *deferred)
+xe_pt_commit_prepare_unbind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
{
- u32 j;
+ int i, j;
xe_pt_commit_locks_assert(vma);
- for (j = 0; j < num_entries; ++j) {
- struct xe_vm_pgtable_update *entry = &entries[j];
+ for (i = 0; i < num_entries; ++i) {
+ struct xe_vm_pgtable_update *entry = &entries[i];
struct xe_pt *pt = entry->pt;
+ struct xe_pt_dir *pt_dir;
pt->num_live -= entry->qwords;
- if (pt->level) {
- struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
- u32 i;
+ if (!pt->level)
+ continue;
- for (i = entry->ofs; i < entry->ofs + entry->qwords;
- i++) {
- if (xe_pt_entry(pt_dir, i))
- xe_pt_destroy(xe_pt_entry(pt_dir, i),
- xe_vma_vm(vma)->flags, deferred);
+ pt_dir = as_xe_pt_dir(pt);
+ for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
+ entry->pt_entries[j - entry->ofs].pt =
+ xe_pt_entry(pt_dir, j);
+ pt_dir->children[j] = NULL;
+ }
+ }
+}
- pt_dir->children[i] = NULL;
- }
+static void
+xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int i, level = 0;
+ u64 start, last;
+
+ for (i = 0; i < pt_op->num_entries; i++) {
+ const struct xe_vm_pgtable_update *entry = &pt_op->entries[i];
+
+ if (entry->pt->level > level)
+ level = entry->pt->level;
+ }
+
+ /* Greedy (non-optimal) calculation but simple */
+ start = ALIGN_DOWN(xe_vma_start(vma), 0x1ull << xe_pt_shift(level));
+ last = ALIGN(xe_vma_end(vma), 0x1ull << xe_pt_shift(level)) - 1;
+
+ if (start < pt_update_ops->start)
+ pt_update_ops->start = start;
+ if (last > pt_update_ops->last)
+ pt_update_ops->last = last;
+}
+
+static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
+{
+ int shift = xe_device_get_root_tile(xe)->media_gt ? 1 : 0;
+
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv,
+ xe->info.tile_count << shift);
+
+ return 0;
+}
+
+static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int err;
+
+ xe_bo_assert_held(xe_vma_bo(vma));
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "Preparing bind, with range [%llx...%llx)\n",
+ xe_vma_start(vma), xe_vma_end(vma) - 1);
+
+ pt_op->vma = NULL;
+ pt_op->bind = true;
+ pt_op->rebind = BIT(tile->id) & vma->tile_present;
+
+ err = vma_reserve_fences(tile_to_xe(tile), vma);
+ if (err)
+ return err;
+
+ err = xe_pt_prepare_bind(tile, vma, pt_op->entries,
+ &pt_op->num_entries);
+ if (!err) {
+ xe_tile_assert(tile, pt_op->num_entries <=
+ ARRAY_SIZE(pt_op->entries));
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, true);
+
+ xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+
+ /*
+ * If rebind, we have to invalidate TLB on !LR vms to invalidate
+ * cached PTEs point to freed memory. On LR vms this is done
+ * automatically when the context is re-enabled by the rebind worker,
+ * or in fault mode it was invalidated on PTE zapping.
+ *
+ * If !rebind, and scratch enabled VMs, there is a chance the scratch
+ * PTE is already cached in the TLB so it needs to be invalidated.
+ * On !LR VMs this is done in the ring ops preceding a batch, but on
+ * non-faulting LR, in particular on user-space batch buffer chaining,
+ * it needs to be done here.
+ */
+ if ((!pt_op->rebind && xe_vm_has_scratch(vm) &&
+ xe_vm_in_preempt_fence_mode(vm)))
+ pt_update_ops->needs_invalidation = true;
+ else if (pt_op->rebind && !xe_vm_in_lr_mode(vm))
+ /* We bump also if batch_invalidate_tlb is true */
+ vm->tlb_flush_seqno++;
+
+ vma->tile_staged |= BIT(tile->id);
+ pt_op->vma = vma;
+ xe_pt_commit_prepare_bind(vma, pt_op->entries,
+ pt_op->num_entries, pt_op->rebind);
+ } else {
+ xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries);
+ }
+
+ return err;
+}
+
+static int unbind_op_prepare(struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int err;
+
+ if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id)))
+ return 0;
+
+ xe_bo_assert_held(xe_vma_bo(vma));
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "Preparing unbind, with range [%llx...%llx)\n",
+ xe_vma_start(vma), xe_vma_end(vma) - 1);
+
+ /*
+ * Wait for invalidation to complete. Can corrupt internal page table
+ * state if an invalidation is running while preparing an unbind.
+ */
+ if (xe_vma_is_userptr(vma) && xe_vm_in_fault_mode(xe_vma_vm(vma)))
+ mmu_interval_read_begin(&to_userptr_vma(vma)->userptr.notifier);
+
+ pt_op->vma = vma;
+ pt_op->bind = false;
+ pt_op->rebind = false;
+
+ err = vma_reserve_fences(tile_to_xe(tile), vma);
+ if (err)
+ return err;
+
+ pt_op->num_entries = xe_pt_stage_unbind(tile, vma, pt_op->entries);
+
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, false);
+ xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+ pt_update_ops->needs_invalidation = true;
+
+ xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries);
+
+ return 0;
+}
+
+static int op_prepare(struct xe_vm *vm,
+ struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma_op *op)
+{
+ int err = 0;
+
+ xe_vm_assert_held(vm);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma);
+ pt_update_ops->wait_vm_kernel = true;
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ err = unbind_op_prepare(tile, pt_update_ops,
+ gpuva_to_vma(op->base.remap.unmap->va));
+
+ if (!err && op->remap.prev) {
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ op->remap.prev);
+ pt_update_ops->wait_vm_bookkeep = true;
}
+ if (!err && op->remap.next) {
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ op->remap.next);
+ pt_update_ops->wait_vm_bookkeep = true;
+ }
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ err = unbind_op_prepare(tile, pt_update_ops,
+ gpuva_to_vma(op->base.unmap.va));
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.prefetch.va));
+ pt_update_ops->wait_vm_kernel = true;
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
+
+ return err;
}
-static const struct xe_migrate_pt_update_ops unbind_ops = {
- .populate = xe_migrate_clear_pgtable_callback,
+static void
+xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops)
+{
+ init_llist_head(&pt_update_ops->deferred);
+ pt_update_ops->start = ~0x0ull;
+ pt_update_ops->last = 0x0ull;
+}
+
+/**
+ * xe_pt_update_ops_prepare() - Prepare PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
+ *
+ * Prepare PT update operations which includes updating internal PT state,
+ * allocate memory for page tables, populate page table being pruned in, and
+ * create PT update operations for leaf insertion / removal.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ struct xe_vma_op *op;
+ int shift = tile->media_gt ? 1 : 0;
+ int err;
+
+ lockdep_assert_held(&vops->vm->lock);
+ xe_vm_assert_held(vops->vm);
+
+ xe_pt_update_ops_init(pt_update_ops);
+
+ err = dma_resv_reserve_fences(xe_vm_resv(vops->vm),
+ tile_to_xe(tile)->info.tile_count << shift);
+ if (err)
+ return err;
+
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_prepare(vops->vm, tile, pt_update_ops, op);
+
+ if (err)
+ return err;
+ }
+
+ xe_tile_assert(tile, pt_update_ops->current_op <=
+ pt_update_ops->num_ops);
+
+#ifdef TEST_VM_OPS_ERROR
+ if (vops->inject_error &&
+ vops->vm->xe->vm_inject_error_position == FORCE_OP_ERROR_PREPARE)
+ return -ENOSPC;
+#endif
+
+ return 0;
+}
+
+static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma, struct dma_fence *fence,
+ struct dma_fence *fence2)
+{
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ if (fence2)
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ vma->tile_present |= BIT(tile->id);
+ vma->tile_staged &= ~BIT(tile->id);
+ if (xe_vma_is_userptr(vma)) {
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ to_userptr_vma(vma)->userptr.initial_bind = true;
+ }
+
+ /*
+ * Kick rebind worker if this bind triggers preempt fences and not in
+ * the rebind worker
+ */
+ if (pt_update_ops->wait_vm_bookkeep &&
+ xe_vm_in_preempt_fence_mode(vm) &&
+ !current->mm)
+ xe_vm_queue_rebind_worker(vm);
+}
+
+static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma, struct dma_fence *fence,
+ struct dma_fence *fence2)
+{
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ if (fence2)
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ vma->tile_present &= ~BIT(tile->id);
+ if (!vma->tile_present) {
+ list_del_init(&vma->combined_links.rebind);
+ if (xe_vma_is_userptr(vma)) {
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
+ spin_unlock(&vm->userptr.invalidated_lock);
+ }
+ }
+}
+
+static void op_commit(struct xe_vm *vm,
+ struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma_op *op, struct dma_fence *fence,
+ struct dma_fence *fence2)
+{
+ xe_vm_assert_held(vm);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
+ fence2);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ unbind_op_commit(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.remap.unmap->va), fence,
+ fence2);
+
+ if (op->remap.prev)
+ bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
+ fence, fence2);
+ if (op->remap.next)
+ bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
+ fence, fence2);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ unbind_op_commit(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.unmap.va), fence, fence2);
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ bind_op_commit(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.prefetch.va), fence, fence2);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+}
+
+static const struct xe_migrate_pt_update_ops migrate_ops = {
+ .populate = xe_vm_populate_pgtable,
+ .clear = xe_migrate_clear_pgtable_callback,
.pre_commit = xe_pt_pre_commit,
};
-static const struct xe_migrate_pt_update_ops userptr_unbind_ops = {
- .populate = xe_migrate_clear_pgtable_callback,
+static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
+ .populate = xe_vm_populate_pgtable,
+ .clear = xe_migrate_clear_pgtable_callback,
.pre_commit = xe_pt_userptr_pre_commit,
};
/**
- * __xe_pt_unbind_vma() - Disconnect and free a page-table tree for the vma
- * address range.
- * @tile: The tile to unbind for.
- * @vma: The vma to unbind.
- * @q: The exec_queue with which to do pipelined page-table updates.
- * @syncs: Entries to sync on before disconnecting the tree to be destroyed.
- * @num_syncs: Number of @sync entries.
+ * xe_pt_update_ops_run() - Run PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
*
- * This function builds a the xe_vm_pgtable_update entries abstracting the
- * operations needed to detach the page-table tree to be destroyed from the
- * man vm tree.
- * It then takes the relevant locks and submits the operations for
- * pipelined detachment of the gpu page-table from the vm main tree,
- * (which can be done either by the cpu and the GPU), Finally it frees the
- * detached page-table tree.
+ * Run PT update operations which includes committing internal PT state changes,
+ * creating job for PT update operations for leaf insertion / removal, and
+ * installing job fence in various places.
*
- * Return: A valid dma-fence representing the pipelined detachment operation
- * on success, an error pointer on error.
+ * Return: fence on success, negative ERR_PTR on error.
*/
struct dma_fence *
-__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs)
+xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
{
- struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
- struct xe_pt_migrate_pt_update unbind_pt_update = {
- .base = {
- .ops = xe_vma_is_userptr(vma) ? &userptr_unbind_ops :
- &unbind_ops,
- .vma = vma,
- .tile_id = tile->id,
- },
- };
- struct xe_vm *vm = xe_vma_vm(vma);
- u32 num_entries;
- struct dma_fence *fence = NULL;
- struct invalidation_fence *ifence;
+ struct xe_vm *vm = vops->vm;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ struct dma_fence *fence;
+ struct invalidation_fence *ifence = NULL, *mfence = NULL;
+ struct dma_fence **fences = NULL;
+ struct dma_fence_array *cf = NULL;
struct xe_range_fence *rfence;
- int err;
-
- LLIST_HEAD(deferred);
+ struct xe_vma_op *op;
+ int err = 0, i;
+ struct xe_migrate_pt_update update = {
+ .ops = pt_update_ops->needs_userptr_lock ?
+ &userptr_migrate_ops :
+ &migrate_ops,
+ .vops = vops,
+ .tile_id = tile->id,
+ };
- xe_bo_assert_held(xe_vma_bo(vma));
+ lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm);
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "Preparing unbind, with range [%llx...%llx) engine %p.\n",
- xe_vma_start(vma), xe_vma_end(vma), q);
+ if (!pt_update_ops->current_op) {
+ xe_tile_assert(tile, xe_vm_in_fault_mode(vm));
- num_entries = xe_pt_stage_unbind(tile, vma, entries);
- xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
-
- xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
- xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries,
- num_entries);
+ return dma_fence_get_stub();
+ }
- err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
- if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
- if (err)
- return ERR_PTR(err);
+#ifdef TEST_VM_OPS_ERROR
+ if (vops->inject_error &&
+ vm->xe->vm_inject_error_position == FORCE_OP_ERROR_RUN)
+ return ERR_PTR(-ENOSPC);
+#endif
- ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!ifence)
- return ERR_PTR(-ENOMEM);
+ if (pt_update_ops->needs_invalidation) {
+ ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!ifence) {
+ err = -ENOMEM;
+ goto kill_vm_tile1;
+ }
+ if (tile->media_gt) {
+ mfence = kzalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!mfence) {
+ err = -ENOMEM;
+ goto free_ifence;
+ }
+ fences = kmalloc_array(2, sizeof(*fences), GFP_KERNEL);
+ if (!fences) {
+ err = -ENOMEM;
+ goto free_ifence;
+ }
+ cf = dma_fence_array_alloc(2);
+ if (!cf) {
+ err = -ENOMEM;
+ goto free_ifence;
+ }
+ }
+ }
rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
if (!rfence) {
- kfree(ifence);
- return ERR_PTR(-ENOMEM);
+ err = -ENOMEM;
+ goto free_ifence;
}
- /*
- * Even if we were already evicted and unbind to destroy, we need to
- * clear again here. The eviction may have updated pagetables at a
- * lower level, because it needs to be more conservative.
- */
- fence = xe_migrate_update_pgtables(tile->migrate,
- vm, NULL, q ? q :
- vm->q[tile->id],
- entries, num_entries,
- syncs, num_syncs,
- &unbind_pt_update.base);
- if (!IS_ERR(fence)) {
- int err;
-
- err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
- &xe_range_fence_kfree_ops,
- unbind_pt_update.base.start,
- unbind_pt_update.base.last, fence);
- if (err)
- dma_fence_wait(fence, false);
+ fence = xe_migrate_update_pgtables(tile->migrate, &update);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto free_rfence;
+ }
- /* TLB invalidation must be done before signaling unbind */
- err = invalidation_fence_init(tile->primary_gt, ifence, fence,
- xe_vma_start(vma),
- xe_vma_end(vma),
- xe_vma_vm(vma)->usm.asid);
- if (err) {
- dma_fence_put(fence);
- kfree(ifence);
- return ERR_PTR(err);
+ /* Point of no return - VM killed if failure after this */
+ for (i = 0; i < pt_update_ops->current_op; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+
+ xe_pt_commit(pt_op->vma, pt_op->entries,
+ pt_op->num_entries, &pt_update_ops->deferred);
+ pt_op->vma = NULL; /* skip in xe_pt_update_ops_abort */
+ }
+
+ if (xe_range_fence_insert(&vm->rftree[tile->id], rfence,
+ &xe_range_fence_kfree_ops,
+ pt_update_ops->start,
+ pt_update_ops->last, fence))
+ dma_fence_wait(fence, false);
+
+ /* tlb invalidation must be done before signaling rebind */
+ if (ifence) {
+ if (mfence)
+ dma_fence_get(fence);
+ invalidation_fence_init(tile->primary_gt, ifence, fence,
+ pt_update_ops->start,
+ pt_update_ops->last, vm->usm.asid);
+ if (mfence) {
+ invalidation_fence_init(tile->media_gt, mfence, fence,
+ pt_update_ops->start,
+ pt_update_ops->last, vm->usm.asid);
+ fences[0] = &ifence->base.base;
+ fences[1] = &mfence->base.base;
+ dma_fence_array_init(cf, 2, fences,
+ vm->composite_fence_ctx,
+ vm->composite_fence_seqno++,
+ false);
+ fence = &cf->base;
+ } else {
+ fence = &ifence->base.base;
}
- fence = &ifence->base.base;
+ }
- /* add shared fence now for pagetable delayed destroy */
+ if (!mfence) {
dma_resv_add_fence(xe_vm_resv(vm), fence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
- /* This fence will be installed by caller when doing eviction */
- if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
- DMA_RESV_USAGE_BOOKKEEP);
- xe_pt_commit_unbind(vma, entries, num_entries,
- unbind_pt_update.locked ? &deferred : NULL);
- vma->tile_present &= ~BIT(tile->id);
+ list_for_each_entry(op, &vops->list, link)
+ op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL);
} else {
- kfree(rfence);
- kfree(ifence);
- }
+ dma_resv_add_fence(xe_vm_resv(vm), &ifence->base.base,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
- if (!vma->tile_present)
- list_del_init(&vma->combined_links.rebind);
+ dma_resv_add_fence(xe_vm_resv(vm), &mfence->base.base,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
- if (unbind_pt_update.locked) {
- xe_tile_assert(tile, xe_vma_is_userptr(vma));
+ list_for_each_entry(op, &vops->list, link)
+ op_commit(vops->vm, tile, pt_update_ops, op,
+ &ifence->base.base, &mfence->base.base);
+ }
- if (!vma->tile_present) {
- spin_lock(&vm->userptr.invalidated_lock);
- list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
- spin_unlock(&vm->userptr.invalidated_lock);
- }
+ if (pt_update_ops->needs_userptr_lock)
up_read(&vm->userptr.notifier_lock);
- xe_bo_put_commit(&deferred);
- }
return fence;
+
+free_rfence:
+ kfree(rfence);
+free_ifence:
+ kfree(cf);
+ kfree(fences);
+ kfree(mfence);
+ kfree(ifence);
+kill_vm_tile1:
+ if (err != -EAGAIN && tile->id)
+ xe_vm_kill(vops->vm, false);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_pt_update_ops_fini() - Finish PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operations
+ *
+ * Finish PT update operations by committing to destroy page table memory
+ */
+void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ int i;
+
+ lockdep_assert_held(&vops->vm->lock);
+ xe_vm_assert_held(vops->vm);
+
+ for (i = 0; i < pt_update_ops->current_op; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+
+ xe_pt_free_bind(pt_op->entries, pt_op->num_entries);
+ }
+ xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
+}
+
+/**
+ * xe_pt_update_ops_abort() - Abort PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
+ *
+ * Abort PT update operations by unwinding internal PT state
+ */
+void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ int i;
+
+ lockdep_assert_held(&vops->vm->lock);
+ xe_vm_assert_held(vops->vm);
+
+ for (i = pt_update_ops->num_ops - 1; i >= 0; --i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+
+ if (!pt_op->vma || i >= pt_update_ops->current_op)
+ continue;
+
+ if (pt_op->bind)
+ xe_pt_abort_bind(pt_op->vma, pt_op->entries,
+ pt_op->num_entries,
+ pt_op->rebind);
+ else
+ xe_pt_abort_unbind(pt_op->vma, pt_op->entries,
+ pt_op->num_entries);
+ }
+
+ xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
}
diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
index 71a4fbfcff43..9ab386431cad 100644
--- a/drivers/gpu/drm/xe/xe_pt.h
+++ b/drivers/gpu/drm/xe/xe_pt.h
@@ -17,6 +17,7 @@ struct xe_sync_entry;
struct xe_tile;
struct xe_vm;
struct xe_vma;
+struct xe_vma_ops;
/* Largest huge pte is currently 1GiB. May become device dependent. */
#define MAX_HUGEPTE_LEVEL 2
@@ -34,14 +35,11 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
-struct dma_fence *
-__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- bool rebind);
-
-struct dma_fence *
-__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs);
+int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops);
+struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile,
+ struct xe_vma_ops *vops);
+void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops);
+void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops);
bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index cee70cb0f014..384cc04de719 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -74,4 +74,52 @@ struct xe_vm_pgtable_update {
u32 flags;
};
+/** struct xe_vm_pgtable_update_op - Page table update operation */
+struct xe_vm_pgtable_update_op {
+ /** @entries: entries to update for this operation */
+ struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
+ /** @vma: VMA for operation, operation not valid if NULL */
+ struct xe_vma *vma;
+ /** @num_entries: number of entries for this update operation */
+ u32 num_entries;
+ /** @bind: is a bind */
+ bool bind;
+ /** @rebind: is a rebind */
+ bool rebind;
+};
+
+/** struct xe_vm_pgtable_update_ops: page table update operations */
+struct xe_vm_pgtable_update_ops {
+ /** @ops: operations */
+ struct xe_vm_pgtable_update_op *ops;
+ /** @deferred: deferred list to destroy PT entries */
+ struct llist_head deferred;
+ /** @q: exec queue for PT operations */
+ struct xe_exec_queue *q;
+ /** @start: start address of ops */
+ u64 start;
+ /** @last: last address of ops */
+ u64 last;
+ /** @num_ops: number of operations */
+ u32 num_ops;
+ /** @current_op: current operations */
+ u32 current_op;
+ /** @needs_userptr_lock: Needs userptr lock */
+ bool needs_userptr_lock;
+ /** @needs_invalidation: Needs invalidation */
+ bool needs_invalidation;
+ /**
+ * @wait_vm_bookkeep: PT operations need to wait until VM is idle
+ * (bookkeep dma-resv slots are idle) and stage all future VM activity
+ * behind these operations (install PT operations into VM kernel
+ * dma-resv slot).
+ */
+ bool wait_vm_bookkeep;
+ /**
+ * @wait_vm_kernel: PT operations need to wait until VM kernel dma-resv
+ * slots are idle.
+ */
+ bool wait_vm_kernel;
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 4e01df6b1b7a..28d9bb3b825d 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -9,7 +9,7 @@
#include <linux/sched/clock.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
@@ -518,7 +518,9 @@ static int query_gt_topology(struct xe_device *xe,
if (err)
return err;
- topo.type = DRM_XE_TOPO_EU_PER_DSS;
+ topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ?
+ DRM_XE_TOPO_SIMD16_EU_PER_DSS :
+ DRM_XE_TOPO_EU_PER_DSS;
err = copy_mask(&query_ptr, &topo,
gt->fuse_topo.eu_mask_per_dss,
sizeof(gt->fuse_topo.eu_mask_per_dss));
diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res_cursor.h
index 655af89b31a9..dca374b6521c 100644
--- a/drivers/gpu/drm/xe/xe_res_cursor.h
+++ b/drivers/gpu/drm/xe/xe_res_cursor.h
@@ -26,7 +26,6 @@
#include <linux/scatterlist.h>
-#include <drm/drm_mm.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_resource.h>
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 5efe83cc82ab..86c705d18c0d 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -7,7 +7,7 @@
#include <kunit/visibility.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_gt.h"
#include "xe_gt_topology.h"
@@ -217,21 +217,19 @@ void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
ctx->active_entries = active_entries;
ctx->n_entries = n_entries;
}
+EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_ctx_enable_active_tracking);
static void rtp_mark_active(struct xe_device *xe,
struct xe_rtp_process_ctx *ctx,
- unsigned int first, unsigned int last)
+ unsigned int idx)
{
if (!ctx->active_entries)
return;
- if (drm_WARN_ON(&xe->drm, last > ctx->n_entries))
+ if (drm_WARN_ON(&xe->drm, idx >= ctx->n_entries))
return;
- if (first == last)
- bitmap_set(ctx->active_entries, first, 1);
- else
- bitmap_set(ctx->active_entries, first, last - first + 1);
+ bitmap_set(ctx->active_entries, idx, 1);
}
/**
@@ -276,8 +274,7 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
}
if (match)
- rtp_mark_active(xe, ctx, entry - entries,
- entry - entries);
+ rtp_mark_active(xe, ctx, entry - entries);
}
}
EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr);
@@ -288,44 +285,29 @@ EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr);
* @entries: Table with RTP definitions
*
* Walk the table pointed by @entries (with an empty sentinel), executing the
- * rules. A few differences from xe_rtp_process_to_sr():
- *
- * 1. There is no action associated with each entry since this uses
- * struct xe_rtp_entry. Its main use is for marking active workarounds via
- * xe_rtp_process_ctx_enable_active_tracking().
- * 2. There is support for OR operations by having entries with no name.
+ * rules. One difference from xe_rtp_process_to_sr(): there is no action
+ * associated with each entry since this uses struct xe_rtp_entry. Its main use
+ * is for marking active workarounds via
+ * xe_rtp_process_ctx_enable_active_tracking().
*/
void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry *entries)
{
- const struct xe_rtp_entry *entry, *first_entry;
+ const struct xe_rtp_entry *entry;
struct xe_hw_engine *hwe;
struct xe_gt *gt;
struct xe_device *xe;
rtp_get_context(ctx, &hwe, &gt, &xe);
- first_entry = entries;
- if (drm_WARN_ON(&xe->drm, !first_entry->name))
- return;
-
for (entry = entries; entry && entry->rules; entry++) {
- if (entry->name)
- first_entry = entry;
-
if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules))
continue;
- /* Fast-forward entry, eliminating the OR'ed entries */
- for (entry++; entry && entry->rules; entry++)
- if (entry->name)
- break;
- entry--;
-
- rtp_mark_active(xe, ctx, first_entry - entries,
- entry - entries);
+ rtp_mark_active(xe, ctx, entry - entries);
}
}
+EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process);
bool xe_rtp_match_even_instance(const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index ad446731192c..827d932b6908 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -374,7 +374,7 @@ struct xe_reg_sr;
* XE_RTP_RULES - Helper to set multiple rules to a struct xe_rtp_entry_sr entry
* @...: Rules
*
- * At least one rule is needed and up to 6 are supported. Multiple rules are
+ * At least one rule is needed and up to 12 are supported. Multiple rules are
* AND'ed together, i.e. all the rules must evaluate to true for the entry to
* be processed. See XE_RTP_MATCH_* for the possible match rules. Example:
*
@@ -399,7 +399,7 @@ struct xe_reg_sr;
* XE_RTP_ACTIONS - Helper to set multiple actions to a struct xe_rtp_entry_sr
* @...: Actions to be taken
*
- * At least one action is needed and up to 6 are supported. See XE_RTP_ACTION_*
+ * At least one action is needed and up to 12 are supported. See XE_RTP_ACTION_*
* for the possible actions. Example:
*
* .. code-block:: c
diff --git a/drivers/gpu/drm/xe/xe_rtp_helpers.h b/drivers/gpu/drm/xe/xe_rtp_helpers.h
index c59e40fd7fff..a33b0ae98bbc 100644
--- a/drivers/gpu/drm/xe/xe_rtp_helpers.h
+++ b/drivers/gpu/drm/xe/xe_rtp_helpers.h
@@ -60,6 +60,12 @@
#define XE_RTP_PASTE_4(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_3(prefix_, sep_, _XE_TUPLE_TAIL args_)
#define XE_RTP_PASTE_5(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_4(prefix_, sep_, _XE_TUPLE_TAIL args_)
#define XE_RTP_PASTE_6(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_5(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_7(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_6(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_8(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_7(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_9(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_8(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_10(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_9(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_11(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_10(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_12(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_11(prefix_, sep_, _XE_TUPLE_TAIL args_)
/*
* XE_RTP_DROP_CAST - Drop cast to convert a compound statement to a initializer
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index 8941522b7705..fe2cb2a96f78 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -25,10 +25,9 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
drm_suballoc_manager_fini(&sa_manager->base);
- if (bo->vmap.is_iomem)
+ if (sa_manager->is_iomem)
kvfree(sa_manager->cpu_ptr);
- xe_bo_unpin_map_no_vm(bo);
sa_manager->bo = NULL;
}
@@ -47,16 +46,17 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
sa_manager->bo = NULL;
- bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ bo = xe_managed_bo_create_pin_map(xe, tile, size,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
PTR_ERR(bo));
return (struct xe_sa_manager *)bo;
}
sa_manager->bo = bo;
+ sa_manager->is_iomem = bo->vmap.is_iomem;
drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
@@ -64,7 +64,6 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
if (bo->vmap.is_iomem) {
sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
if (!sa_manager->cpu_ptr) {
- xe_bo_unpin_map_no_vm(sa_manager->bo);
sa_manager->bo = NULL;
return ERR_PTR(-ENOMEM);
}
@@ -84,6 +83,13 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
unsigned int size)
{
+ /*
+ * BB to large, return -ENOBUFS indicating user should split
+ * array of binds into smaller chunks.
+ */
+ if (size > sa_manager->base.size)
+ return ERR_PTR(-ENOBUFS);
+
return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0);
}
diff --git a/drivers/gpu/drm/xe/xe_sa_types.h b/drivers/gpu/drm/xe/xe_sa_types.h
index 2ef896aeca1d..2b070ff1292e 100644
--- a/drivers/gpu/drm/xe/xe_sa_types.h
+++ b/drivers/gpu/drm/xe/xe_sa_types.h
@@ -14,6 +14,7 @@ struct xe_sa_manager {
struct xe_bo *bo;
u64 gpu_addr;
void *cpu_ptr;
+ bool is_iomem;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 9628f9deb3c0..eeccc1c318ae 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -5,7 +5,7 @@
#include "xe_sched_job.h"
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <linux/dma-fence-chain.h>
#include <linux/slab.h>
@@ -89,8 +89,7 @@ static void xe_sched_job_free_fences(struct xe_sched_job *job)
if (ptrs->lrc_fence)
xe_lrc_free_seqno_fence(ptrs->lrc_fence);
- if (ptrs->chain_fence)
- dma_fence_chain_free(ptrs->chain_fence);
+ dma_fence_chain_free(ptrs->chain_fence);
}
}
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index a274a5fb1401..5a1d65e4f19f 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -5,7 +5,7 @@
#include <drm/drm_managed.h>
-#include "regs/xe_sriov_regs.h"
+#include "regs/xe_regs.h"
#include "xe_assert.h"
#include "xe_device.h"
diff --git a/drivers/gpu/drm/xe/xe_step.c b/drivers/gpu/drm/xe/xe_step.c
index eaf1b718f26c..c77b5c317fa0 100644
--- a/drivers/gpu/drm/xe/xe_step.c
+++ b/drivers/gpu/drm/xe/xe_step.c
@@ -28,23 +28,17 @@
* use a macro to define these to make it easier to identify the platforms
* where the two steppings can deviate.
*/
-#define COMMON_GT_MEDIA_STEP(x_) \
- .graphics = STEP_##x_, \
- .media = STEP_##x_
-
#define COMMON_STEP(x_) \
- COMMON_GT_MEDIA_STEP(x_), \
.graphics = STEP_##x_, \
- .media = STEP_##x_, \
- .display = STEP_##x_
+ .media = STEP_##x_
__diag_push();
__diag_ignore_all("-Woverride-init", "Allow field overrides in table");
/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
static const struct xe_step_info tgl_revids[] = {
- [0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_B0 },
- [1] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_D0 },
+ [0] = { COMMON_STEP(A0) },
+ [1] = { COMMON_STEP(B0) },
};
static const struct xe_step_info dg1_revids[] = {
@@ -53,49 +47,49 @@ static const struct xe_step_info dg1_revids[] = {
};
static const struct xe_step_info adls_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A0 },
- [0x1] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A2 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_B0 },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_B0 },
- [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x1] = { COMMON_STEP(A0) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x8] = { COMMON_STEP(C0) },
+ [0xC] = { COMMON_STEP(D0) },
};
static const struct xe_step_info adls_rpls_revids[] = {
- [0x4] = { COMMON_GT_MEDIA_STEP(D0), .display = STEP_D0 },
- [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display = STEP_C0 },
+ [0x4] = { COMMON_STEP(D0) },
+ [0xC] = { COMMON_STEP(D0) },
};
static const struct xe_step_info adlp_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A0 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_B0 },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_C0 },
- [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_D0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x8] = { COMMON_STEP(C0) },
+ [0xC] = { COMMON_STEP(C0) },
};
static const struct xe_step_info adlp_rpl_revids[] = {
- [0x4] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_E0 },
+ [0x4] = { COMMON_STEP(C0) },
};
static const struct xe_step_info adln_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_D0 },
+ [0x0] = { COMMON_STEP(A0) },
};
static const struct xe_step_info dg2_g10_revid_step_tbl[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A0 },
- [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display = STEP_A0 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_B0 },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x1] = { COMMON_STEP(A1) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x8] = { COMMON_STEP(C0) },
};
static const struct xe_step_info dg2_g11_revid_step_tbl[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_B0 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_C0 },
- [0x5] = { COMMON_GT_MEDIA_STEP(B1), .display = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x5] = { COMMON_STEP(B1) },
};
static const struct xe_step_info dg2_g12_revid_step_tbl[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_C0 },
- [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x1] = { COMMON_STEP(A1) },
};
static const struct xe_step_info pvc_revid_step_tbl[] = {
@@ -195,7 +189,6 @@ struct xe_step_info xe_step_pre_gmdid_get(struct xe_device *xe)
} else {
drm_dbg(&xe->drm, "Using future steppings\n");
step.graphics = STEP_FUTURE;
- step.display = STEP_FUTURE;
}
}
diff --git a/drivers/gpu/drm/xe/xe_step_types.h b/drivers/gpu/drm/xe/xe_step_types.h
index ccc9b4795e95..d978cc2512f2 100644
--- a/drivers/gpu/drm/xe/xe_step_types.h
+++ b/drivers/gpu/drm/xe/xe_step_types.h
@@ -11,12 +11,15 @@
struct xe_step_info {
u8 graphics;
u8 media;
- u8 display;
u8 basedie;
};
#define STEP_ENUM_VAL(name) STEP_##name,
+/*
+ * Always define four minor steppings 0-3 for each stepping to match GMD ID
+ * spacing of values. See xe_step_gmdid_get().
+ */
#define STEP_NAME_LIST(func) \
func(A0) \
func(A1) \
@@ -34,7 +37,30 @@ struct xe_step_info {
func(D1) \
func(D2) \
func(D3) \
- func(E0)
+ func(E0) \
+ func(E1) \
+ func(E2) \
+ func(E3) \
+ func(F0) \
+ func(F1) \
+ func(F2) \
+ func(F3) \
+ func(G0) \
+ func(G1) \
+ func(G2) \
+ func(G3) \
+ func(H0) \
+ func(H1) \
+ func(H2) \
+ func(H3) \
+ func(I0) \
+ func(I1) \
+ func(I2) \
+ func(I3) \
+ func(J0) \
+ func(J1) \
+ func(J2) \
+ func(J3)
/*
* Symbolic steppings that do not match the hardware. These are valid both as gt
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index e8d31e010860..bb3c2a830362 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -12,7 +12,7 @@
#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_device_types.h"
#include "xe_exec_queue.h"
@@ -55,7 +55,7 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
struct xe_user_fence *ufence;
u64 __user *ptr = u64_to_user_ptr(addr);
- if (!access_ok(ptr, sizeof(ptr)))
+ if (!access_ok(ptr, sizeof(*ptr)))
return ERR_PTR(-EFAULT);
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
@@ -204,26 +204,11 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
return 0;
}
-int xe_sync_entry_wait(struct xe_sync_entry *sync)
-{
- if (sync->fence)
- dma_fence_wait(sync->fence, true);
-
- return 0;
-}
-
int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
{
- int err;
-
- if (sync->fence) {
- err = drm_sched_job_add_dependency(&job->drm,
- dma_fence_get(sync->fence));
- if (err) {
- dma_fence_put(sync->fence);
- return err;
- }
- }
+ if (sync->fence)
+ return drm_sched_job_add_dependency(&job->drm,
+ dma_fence_get(sync->fence));
return 0;
}
@@ -264,10 +249,8 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync)
{
if (sync->syncobj)
drm_syncobj_put(sync->syncobj);
- if (sync->fence)
- dma_fence_put(sync->fence);
- if (sync->chain_fence)
- dma_fence_chain_free(sync->chain_fence);
+ dma_fence_put(sync->fence);
+ dma_fence_chain_free(sync->chain_fence);
if (sync->ufence)
user_fence_put(sync->ufence);
}
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index 006dbf780793..256ffc1e54dc 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -22,7 +22,6 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
struct xe_sync_entry *sync,
struct drm_xe_sync __user *sync_user,
unsigned int flags);
-int xe_sync_entry_wait(struct xe_sync_entry *sync);
int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
struct xe_sched_job *job);
void xe_sync_entry_signal(struct xe_sync_entry *sync,
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 01837f6f609f..8573d7a87d84 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -369,6 +369,58 @@ TRACE_EVENT(xe_reg_rw,
(u32)(__entry->val >> 32))
);
+DECLARE_EVENT_CLASS(xe_pm_runtime,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller),
+
+ TP_STRUCT__entry(
+ __string(dev, __dev_name_xe(xe))
+ __field(void *, caller)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __entry->caller = caller;
+ ),
+
+ TP_printk("dev=%s caller_function=%pS", __get_str(dev), __entry->caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_put,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_resume,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_suspend,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_resume,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_suspend,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
index f39f09ed3495..9b1a1d4304ae 100644
--- a/drivers/gpu/drm/xe/xe_trace_bo.h
+++ b/drivers/gpu/drm/xe/xe_trace_bo.h
@@ -117,11 +117,6 @@ DEFINE_EVENT(xe_vma, xe_vma_acc,
TP_ARGS(vma)
);
-DEFINE_EVENT(xe_vma, xe_vma_fail,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
DEFINE_EVENT(xe_vma, xe_vma_bind,
TP_PROTO(struct xe_vma *vma),
TP_ARGS(vma)
@@ -237,6 +232,11 @@ DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
TP_ARGS(vm)
);
+DEFINE_EVENT(xe_vm, xe_vm_ops_fail,
+ TP_PROTO(struct xe_vm *vm),
+ TP_ARGS(vm)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index f46fd2df84de..f7113cf6109d 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -5,7 +5,6 @@
*/
#include <drm/drm_managed.h>
-#include <drm/drm_mm.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index d4e6fa918942..faa1bf42e50e 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -39,12 +39,23 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
},
{ XE_RTP_NAME("Tuning: Compression Overfetch"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
- XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX)),
+ XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX),
+ SET(CCCHKNREG1, L3CMPCTRL))
},
{ XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN))
},
+ { XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_ACTIONS(SET(L3SQCREG2,
+ COMPMEMRD256BOVRFETCHEN))
+ },
+ { XE_RTP_NAME("Tuning: Stateless compression control"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT,
+ REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0)))
+ },
{}
};
@@ -93,6 +104,14 @@ static const struct xe_rtp_entry_sr lrc_tunings[] = {
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
+ /* Xe2_HPG */
+
+ { XE_RTP_NAME("Tuning: vs hit max value"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(FIELD_SET(FF_MODE, VS_HIT_MAX_VALUE_MASK,
+ REG_FIELD_PREP(VS_HIT_MAX_VALUE_MASK, 0x3f)))
+ },
+
{}
};
diff --git a/drivers/gpu/drm/xe/xe_uc_debugfs.c b/drivers/gpu/drm/xe/xe_uc_debugfs.c
index 78eb8db73791..24a4209051ee 100644
--- a/drivers/gpu/drm/xe/xe_uc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_uc_debugfs.c
@@ -8,6 +8,7 @@
#include <drm/drm_debugfs.h>
#include "xe_gt.h"
+#include "xe_gsc_debugfs.h"
#include "xe_guc_debugfs.h"
#include "xe_huc_debugfs.h"
#include "xe_macros.h"
@@ -23,6 +24,7 @@ void xe_uc_debugfs_register(struct xe_uc *uc, struct dentry *parent)
return;
}
+ xe_gsc_debugfs_register(&uc->gsc, root);
xe_guc_debugfs_register(&uc->guc, root);
xe_huc_debugfs_register(&uc->huc, root);
}
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index 5f23ecd98376..d431d0031185 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -15,6 +15,7 @@
#include "xe_gsc.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
+#include "xe_guc.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_module.h"
@@ -105,17 +106,20 @@ struct fw_blobs_by_type {
};
#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
- fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 19, 2)) \
- fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 19, 2)) \
- fw_def(DG2, major_ver(i915, guc, dg2, 70, 19, 2)) \
- fw_def(DG1, major_ver(i915, guc, dg1, 70, 19, 2)) \
- fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 19, 2)) \
- fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 19, 2)) \
- fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 19, 2)) \
- fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 19, 2)) \
- fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 19, 2))
+ fw_def(BATTLEMAGE, major_ver(xe, guc, bmg, 70, 29, 2)) \
+ fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 29, 2)) \
+ fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 29, 2)) \
+ fw_def(DG2, major_ver(i915, guc, dg2, 70, 29, 2)) \
+ fw_def(DG1, major_ver(i915, guc, dg1, 70, 29, 2)) \
+ fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 29, 2)) \
+ fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 29, 2)) \
+ fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 29, 2)) \
+ fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 29, 2)) \
+ fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 29, 2))
#define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \
+ fw_def(BATTLEMAGE, no_ver(xe, huc, bmg)) \
+ fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \
fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \
fw_def(DG1, no_ver(i915, huc, dg1)) \
fw_def(ALDERLAKE_P, no_ver(i915, huc, tgl)) \
@@ -125,7 +129,8 @@ struct fw_blobs_by_type {
/* for the GSC FW we match the compatibility version and not the release one */
#define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \
- fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0, 0))
+ fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 104, 1, 0)) \
+ fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 102, 1, 0))
#define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \
__stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin"
@@ -136,6 +141,8 @@ struct fw_blobs_by_type {
MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(a))
#define fw_filename_no_ver(dir_, uc_, shortname_) \
MAKE_FW_PATH(dir_, uc_, shortname_, "")
+#define fw_filename_gsc(dir_, uc_, shortname_, a, b, c) \
+ MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(b))
#define uc_fw_entry_mmp_ver(dir_, uc_, shortname_, a, b, c) \
{ fw_filename_mmp_ver(dir_, uc_, shortname_, a, b, c), \
@@ -146,6 +153,9 @@ struct fw_blobs_by_type {
#define uc_fw_entry_no_ver(dir_, uc_, shortname_) \
{ fw_filename_no_ver(dir_, uc_, shortname_), \
0, 0 }
+#define uc_fw_entry_gsc(dir_, uc_, shortname_, a, b, c) \
+ { fw_filename_gsc(dir_, uc_, shortname_, a, b, c), \
+ a, b, c }
/* All blobs need to be declared via MODULE_FIRMWARE() */
#define XE_UC_MODULE_FIRMWARE(platform__, fw_filename) \
@@ -161,7 +171,7 @@ XE_GUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_major_ver)
XE_HUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_no_ver)
-XE_GSC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, fw_filename_major_ver)
+XE_GSC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, fw_filename_gsc)
static struct xe_gt *
__uc_fw_to_gt(struct xe_uc_fw *uc_fw, enum xe_uc_fw_type type)
@@ -204,7 +214,7 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw)
uc_fw_entry_no_ver)
};
static const struct uc_fw_entry entries_gsc[] = {
- XE_GSC_FIRMWARE_DEFS(XE_UC_FW_ENTRY, uc_fw_entry_major_ver)
+ XE_GSC_FIRMWARE_DEFS(XE_UC_FW_ENTRY, uc_fw_entry_gsc)
};
static const struct fw_blobs_by_type blobs_all[XE_UC_FW_NUM_TYPES] = {
[XE_UC_FW_TYPE_GUC] = { entries_guc, ARRAY_SIZE(entries_guc) },
@@ -306,10 +316,10 @@ static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
xe_gt_assert(gt, uc_fw->type == XE_UC_FW_TYPE_GUC);
- /* We don't support GuC releases older than 70.19 */
- if (release->major < 70 || (release->major == 70 && release->minor < 19)) {
- xe_gt_err(gt, "Unsupported GuC v%u.%u! v70.19 or newer is required\n",
- release->major, release->minor);
+ /* We don't support GuC releases older than 70.29.2 */
+ if (MAKE_GUC_VER_STRUCT(*release) < MAKE_GUC_VER(70, 29, 2)) {
+ xe_gt_err(gt, "Unsupported GuC v%u.%u.%u! v70.29.2 or newer is required\n",
+ release->major, release->minor, release->patch);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 50e8fc49ba6c..7acd5fc9d032 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -12,7 +12,7 @@
#include <drm/drm_print.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <linux/ascii85.h>
#include <linux/delay.h>
#include <linux/kthread.h>
@@ -133,8 +133,10 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
if (q->lr.pfence) {
long timeout = dma_fence_wait(q->lr.pfence, false);
- if (timeout < 0)
+ /* Only -ETIME on fence indicates VM needs to be killed */
+ if (timeout < 0 || q->lr.pfence->error == -ETIME)
return -ETIME;
+
dma_fence_put(q->lr.pfence);
q->lr.pfence = NULL;
}
@@ -273,6 +275,8 @@ out_up_write:
* xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
* @vm: The VM.
* @q: The exec_queue
+ *
+ * Note that this function might be called multiple times on the same queue.
*/
void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
{
@@ -280,8 +284,10 @@ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
return;
down_write(&vm->lock);
- list_del(&q->lr.link);
- --vm->preempt.num_exec_queues;
+ if (!list_empty(&q->lr.link)) {
+ list_del_init(&q->lr.link);
+ --vm->preempt.num_exec_queues;
+ }
if (q->lr.pfence) {
dma_fence_enable_sw_signaling(q->lr.pfence);
dma_fence_put(q->lr.pfence);
@@ -311,7 +317,15 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
-static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
+/**
+ * xe_vm_kill() - VM Kill
+ * @vm: The VM.
+ * @unlocked: Flag indicates the VM's dma-resv is not held
+ *
+ * Kill the VM by setting banned flag indicated VM is no longer available for
+ * use. If in preempt fence mode, also kill all exec queue attached to the VM.
+ */
+void xe_vm_kill(struct xe_vm *vm, bool unlocked)
{
struct xe_exec_queue *q;
@@ -708,6 +722,42 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
}
+static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
+{
+ int i;
+
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) {
+ if (!vops->pt_update_ops[i].num_ops)
+ continue;
+
+ vops->pt_update_ops[i].ops =
+ kmalloc_array(vops->pt_update_ops[i].num_ops,
+ sizeof(*vops->pt_update_ops[i].ops),
+ GFP_KERNEL);
+ if (!vops->pt_update_ops[i].ops)
+ return array_of_binds ? -ENOBUFS : -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void xe_vma_ops_fini(struct xe_vma_ops *vops)
+{
+ int i;
+
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
+ kfree(vops->pt_update_ops[i].ops);
+}
+
+static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask)
+{
+ int i;
+
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
+ if (BIT(i) & tile_mask)
+ ++vops->pt_update_ops[i].num_ops;
+}
+
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
u8 tile_mask)
{
@@ -735,6 +785,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
xe_vm_populate_rebind(op, vma, tile_mask);
list_add_tail(&op->link, &vops->list);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
return 0;
}
@@ -751,7 +802,7 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
struct xe_vma *vma, *next;
struct xe_vma_ops vops;
struct xe_vma_op *op, *next_op;
- int err;
+ int err, i;
lockdep_assert_held(&vm->lock);
if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
@@ -759,6 +810,8 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
return 0;
xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
+ vops.pt_update_ops[i].wait_vm_bookkeep = true;
xe_vm_assert_held(vm);
list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
@@ -775,6 +828,10 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
goto free_ops;
}
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err)
+ goto free_ops;
+
fence = ops_execute(vm, &vops);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
@@ -789,6 +846,7 @@ free_ops:
list_del(&op->link);
kfree(op);
}
+ xe_vma_ops_fini(&vops);
return err;
}
@@ -798,6 +856,8 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
struct dma_fence *fence = NULL;
struct xe_vma_ops vops;
struct xe_vma_op *op, *next_op;
+ struct xe_tile *tile;
+ u8 id;
int err;
lockdep_assert_held(&vm->lock);
@@ -805,17 +865,30 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ for_each_tile(tile, vm->xe, id) {
+ vops.pt_update_ops[id].wait_vm_bookkeep = true;
+ vops.pt_update_ops[tile->id].q =
+ xe_tile_migrate_exec_queue(tile);
+ }
err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
if (err)
return ERR_PTR(err);
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err) {
+ fence = ERR_PTR(err);
+ goto free_ops;
+ }
+
fence = ops_execute(vm, &vops);
+free_ops:
list_for_each_entry_safe(op, next_op, &vops.list, link) {
list_del(&op->link);
kfree(op);
}
+ xe_vma_ops_fini(&vops);
return fence;
}
@@ -1122,7 +1195,7 @@ static const struct drm_gpuvm_ops gpuvm_ops = {
.vm_free = xe_vm_free,
};
-static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
+static u64 pde_encode_pat_index(u16 pat_index)
{
u64 pte = 0;
@@ -1135,8 +1208,7 @@ static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
return pte;
}
-static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
- u32 pt_level)
+static u64 pte_encode_pat_index(u16 pat_index, u32 pt_level)
{
u64 pte = 0;
@@ -1177,12 +1249,11 @@ static u64 pte_encode_ps(u32 pt_level)
static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
const u16 pat_index)
{
- struct xe_device *xe = xe_bo_device(bo);
u64 pde;
pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pde |= pde_encode_pat_index(xe, pat_index);
+ pde |= pde_encode_pat_index(pat_index);
return pde;
}
@@ -1190,12 +1261,11 @@ static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
u16 pat_index, u32 pt_level)
{
- struct xe_device *xe = xe_bo_device(bo);
u64 pte;
pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pte |= pte_encode_pat_index(xe, pat_index, pt_level);
+ pte |= pte_encode_pat_index(pat_index, pt_level);
pte |= pte_encode_ps(pt_level);
if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
@@ -1207,14 +1277,12 @@ static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
u16 pat_index, u32 pt_level)
{
- struct xe_device *xe = xe_vma_vm(vma)->xe;
-
pte |= XE_PAGE_PRESENT;
if (likely(!xe_vma_read_only(vma)))
pte |= XE_PAGE_RW;
- pte |= pte_encode_pat_index(xe, pat_index, pt_level);
+ pte |= pte_encode_pat_index(pat_index, pt_level);
pte |= pte_encode_ps(pt_level);
if (unlikely(xe_vma_is_null(vma)))
@@ -1234,7 +1302,7 @@ static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
pte = addr;
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pte |= pte_encode_pat_index(xe, pat_index, pt_level);
+ pte |= pte_encode_pat_index(pat_index, pt_level);
pte |= pte_encode_ps(pt_level);
if (devmem)
@@ -1333,6 +1401,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock);
+ ttm_lru_bulk_move_init(&vm->lru_bulk_move);
+
INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
INIT_LIST_HEAD(&vm->preempt.exec_queues);
@@ -1412,19 +1482,13 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
/* Kernel migration VM shouldn't have a circular loop.. */
if (!(flags & XE_VM_FLAG_MIGRATION)) {
for_each_tile(tile, xe, id) {
- struct xe_gt *gt = tile->primary_gt;
- struct xe_vm *migrate_vm;
struct xe_exec_queue *q;
u32 create_flags = EXEC_QUEUE_FLAG_VM;
if (!vm->pt_root[id])
continue;
- migrate_vm = xe_migrate_get_vm(tile->migrate);
- q = xe_exec_queue_create_class(xe, gt, migrate_vm,
- XE_ENGINE_CLASS_COPY,
- create_flags);
- xe_vm_put(migrate_vm);
+ q = xe_exec_queue_create_bind(xe, tile, create_flags, 0);
if (IS_ERR(q)) {
err = PTR_ERR(q);
goto err_close;
@@ -1437,13 +1501,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
if (number_tiles > 1)
vm->composite_fence_ctx = dma_fence_context_alloc(1);
- mutex_lock(&xe->usm.lock);
- if (flags & XE_VM_FLAG_FAULT_MODE)
- xe->usm.num_vm_in_fault_mode++;
- else if (!(flags & XE_VM_FLAG_MIGRATION))
- xe->usm.num_vm_in_non_fault_mode++;
- mutex_unlock(&xe->usm.lock);
-
trace_xe_vm_create(vm);
return vm;
@@ -1458,6 +1515,7 @@ err_no_resv:
mutex_destroy(&vm->snap_mutex);
for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]);
+ ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
kfree(vm);
if (flags & XE_VM_FLAG_LR_MODE)
xe_pm_runtime_put(xe);
@@ -1556,11 +1614,6 @@ void xe_vm_close_and_put(struct xe_vm *vm)
up_write(&vm->lock);
mutex_lock(&xe->usm.lock);
- if (vm->flags & XE_VM_FLAG_FAULT_MODE)
- xe->usm.num_vm_in_fault_mode--;
- else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
- xe->usm.num_vm_in_non_fault_mode--;
-
if (vm->usm.asid) {
void *lookup;
@@ -1602,6 +1655,8 @@ static void vm_destroy_work_func(struct work_struct *w)
trace_xe_vm_free(vm);
+ ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
+
if (vm->xef)
xe_file_put(vm->xef);
@@ -1641,147 +1696,6 @@ to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
return q ? q : vm->q[0];
}
-static struct dma_fence *
-xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- bool first_op, bool last_op)
-{
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
- struct xe_tile *tile;
- struct dma_fence *fence = NULL;
- struct dma_fence **fences = NULL;
- struct dma_fence_array *cf = NULL;
- int cur_fence = 0;
- int number_tiles = hweight8(vma->tile_present);
- int err;
- u8 id;
-
- trace_xe_vma_unbind(vma);
-
- if (number_tiles > 1) {
- fences = kmalloc_array(number_tiles, sizeof(*fences),
- GFP_KERNEL);
- if (!fences)
- return ERR_PTR(-ENOMEM);
- }
-
- for_each_tile(tile, vm->xe, id) {
- if (!(vma->tile_present & BIT(id)))
- goto next;
-
- fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
- first_op ? syncs : NULL,
- first_op ? num_syncs : 0);
- if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
- goto err_fences;
- }
-
- if (fences)
- fences[cur_fence++] = fence;
-
-next:
- if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
- q = list_next_entry(q, multi_gt_list);
- }
-
- if (fences) {
- cf = dma_fence_array_create(number_tiles, fences,
- vm->composite_fence_ctx,
- vm->composite_fence_seqno++,
- false);
- if (!cf) {
- --vm->composite_fence_seqno;
- err = -ENOMEM;
- goto err_fences;
- }
- }
-
- fence = cf ? &cf->base : !fence ?
- xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
-
- return fence;
-
-err_fences:
- if (fences) {
- while (cur_fence)
- dma_fence_put(fences[--cur_fence]);
- kfree(fences);
- }
-
- return ERR_PTR(err);
-}
-
-static struct dma_fence *
-xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- u8 tile_mask, bool first_op, bool last_op)
-{
- struct xe_tile *tile;
- struct dma_fence *fence;
- struct dma_fence **fences = NULL;
- struct dma_fence_array *cf = NULL;
- struct xe_vm *vm = xe_vma_vm(vma);
- int cur_fence = 0;
- int number_tiles = hweight8(tile_mask);
- int err;
- u8 id;
-
- trace_xe_vma_bind(vma);
-
- if (number_tiles > 1) {
- fences = kmalloc_array(number_tiles, sizeof(*fences),
- GFP_KERNEL);
- if (!fences)
- return ERR_PTR(-ENOMEM);
- }
-
- for_each_tile(tile, vm->xe, id) {
- if (!(tile_mask & BIT(id)))
- goto next;
-
- fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
- first_op ? syncs : NULL,
- first_op ? num_syncs : 0,
- vma->tile_present & BIT(id));
- if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
- goto err_fences;
- }
-
- if (fences)
- fences[cur_fence++] = fence;
-
-next:
- if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
- q = list_next_entry(q, multi_gt_list);
- }
-
- if (fences) {
- cf = dma_fence_array_create(number_tiles, fences,
- vm->composite_fence_ctx,
- vm->composite_fence_seqno++,
- false);
- if (!cf) {
- --vm->composite_fence_seqno;
- err = -ENOMEM;
- goto err_fences;
- }
- }
-
- return cf ? &cf->base : fence;
-
-err_fences:
- if (fences) {
- while (cur_fence)
- dma_fence_put(fences[--cur_fence]);
- kfree(fences);
- }
-
- return ERR_PTR(err);
-}
-
static struct xe_user_fence *
find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
{
@@ -1797,48 +1711,6 @@ find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
return NULL;
}
-static struct dma_fence *
-xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs,
- u8 tile_mask, bool immediate, bool first_op, bool last_op)
-{
- struct dma_fence *fence;
- struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
-
- xe_vm_assert_held(vm);
- xe_bo_assert_held(bo);
-
- if (immediate) {
- fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
- first_op, last_op);
- if (IS_ERR(fence))
- return fence;
- } else {
- xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
-
- fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
- }
-
- return fence;
-}
-
-static struct dma_fence *
-xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_exec_queue *q, struct xe_sync_entry *syncs,
- u32 num_syncs, bool first_op, bool last_op)
-{
- struct dma_fence *fence;
-
- xe_vm_assert_held(vm);
- xe_bo_assert_held(xe_vma_bo(vma));
-
- fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
- if (IS_ERR(fence))
- return fence;
-
- return fence;
-}
-
#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
DRM_XE_VM_CREATE_FLAG_LR_MODE | \
DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
@@ -1879,14 +1751,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
- xe_device_in_non_fault_mode(xe)))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
- xe_device_in_fault_mode(xe)))
- return -EINVAL;
-
if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
@@ -1979,21 +1843,6 @@ static const u32 region_to_mem_type[] = {
XE_PL_VRAM1,
};
-static struct dma_fence *
-xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_exec_queue *q, struct xe_sync_entry *syncs,
- u32 num_syncs, bool first_op, bool last_op)
-{
- struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
-
- if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
- return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
- vma->tile_mask, true, first_op, last_op);
- } else {
- return xe_exec_queue_last_fence_get(wait_exec_queue, vm);
- }
-}
-
static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
bool post_commit)
{
@@ -2281,14 +2130,10 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
return err;
}
-
-static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
- struct drm_gpuva_ops *ops,
- struct xe_sync_entry *syncs, u32 num_syncs,
- struct xe_vma_ops *vops, bool last)
+static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
+ struct xe_vma_ops *vops)
{
struct xe_device *xe = vm->xe;
- struct xe_vma_op *last_op = NULL;
struct drm_gpuva_op *__op;
struct xe_tile *tile;
u8 id, tile_mask = 0;
@@ -2302,19 +2147,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
drm_gpuva_for_each_op(__op, ops) {
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
struct xe_vma *vma;
- bool first = list_empty(&vops->list);
unsigned int flags = 0;
INIT_LIST_HEAD(&op->link);
list_add_tail(&op->link, &vops->list);
-
- if (first) {
- op->flags |= XE_VMA_OP_FIRST;
- op->num_syncs = num_syncs;
- op->syncs = syncs;
- }
-
- op->q = q;
op->tile_mask = tile_mask;
switch (op->base.op) {
@@ -2333,6 +2169,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
return PTR_ERR(vma);
op->map.vma = vma;
+ if (op->map.immediate || !xe_vm_in_fault_mode(vm))
+ xe_vma_ops_incr_pt_update_ops(vops,
+ op->tile_mask);
break;
}
case DRM_GPUVA_OP_REMAP:
@@ -2377,6 +2216,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
(ULL)op->remap.start,
(ULL)op->remap.range);
+ } else {
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
}
}
@@ -2413,203 +2254,30 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
(ULL)op->remap.start,
(ULL)op->remap.range);
+ } else {
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
}
}
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
}
case DRM_GPUVA_OP_UNMAP:
case DRM_GPUVA_OP_PREFETCH:
- /* Nothing to do */
+ /* FIXME: Need to skip some prefetch ops */
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
- last_op = op;
-
err = xe_vma_op_commit(vm, op);
if (err)
return err;
}
- /* FIXME: Unhandled corner case */
- XE_WARN_ON(!last_op && last && !list_empty(&vops->list));
-
- if (!last_op)
- return 0;
-
- if (last) {
- last_op->flags |= XE_VMA_OP_LAST;
- last_op->num_syncs = num_syncs;
- last_op->syncs = syncs;
- }
-
return 0;
}
-static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_vma_op *op)
-{
- struct dma_fence *fence = NULL;
-
- lockdep_assert_held(&vm->lock);
-
- xe_vm_assert_held(vm);
- xe_bo_assert_held(xe_vma_bo(vma));
-
- switch (op->base.op) {
- case DRM_GPUVA_OP_MAP:
- fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
- op->syncs, op->num_syncs,
- op->tile_mask,
- op->map.immediate || !xe_vm_in_fault_mode(vm),
- op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST);
- break;
- case DRM_GPUVA_OP_REMAP:
- {
- bool prev = !!op->remap.prev;
- bool next = !!op->remap.next;
-
- if (!op->remap.unmap_done) {
- if (prev || next)
- vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
- fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
- op->num_syncs,
- op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST &&
- !prev && !next);
- if (IS_ERR(fence))
- break;
- op->remap.unmap_done = true;
- }
-
- if (prev) {
- op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
- dma_fence_put(fence);
- fence = xe_vm_bind(vm, op->remap.prev, op->q,
- xe_vma_bo(op->remap.prev), op->syncs,
- op->num_syncs,
- op->remap.prev->tile_mask, true,
- false,
- op->flags & XE_VMA_OP_LAST && !next);
- op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
- if (IS_ERR(fence))
- break;
- op->remap.prev = NULL;
- }
-
- if (next) {
- op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
- dma_fence_put(fence);
- fence = xe_vm_bind(vm, op->remap.next, op->q,
- xe_vma_bo(op->remap.next),
- op->syncs, op->num_syncs,
- op->remap.next->tile_mask, true,
- false, op->flags & XE_VMA_OP_LAST);
- op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
- if (IS_ERR(fence))
- break;
- op->remap.next = NULL;
- }
-
- break;
- }
- case DRM_GPUVA_OP_UNMAP:
- fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
- op->num_syncs, op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST);
- break;
- case DRM_GPUVA_OP_PREFETCH:
- fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs,
- op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST);
- break;
- default:
- drm_warn(&vm->xe->drm, "NOT POSSIBLE");
- }
-
- if (IS_ERR(fence))
- trace_xe_vma_fail(vma);
-
- return fence;
-}
-
-static struct dma_fence *
-__xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_vma_op *op)
-{
- struct dma_fence *fence;
- int err;
-
-retry_userptr:
- fence = op_execute(vm, vma, op);
- if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) {
- lockdep_assert_held_write(&vm->lock);
-
- if (op->base.op == DRM_GPUVA_OP_REMAP) {
- if (!op->remap.unmap_done)
- vma = gpuva_to_vma(op->base.remap.unmap->va);
- else if (op->remap.prev)
- vma = op->remap.prev;
- else
- vma = op->remap.next;
- }
-
- if (xe_vma_is_userptr(vma)) {
- err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
- if (!err)
- goto retry_userptr;
-
- fence = ERR_PTR(err);
- trace_xe_vma_fail(vma);
- }
- }
-
- return fence;
-}
-
-static struct dma_fence *
-xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
-{
- struct dma_fence *fence = ERR_PTR(-ENOMEM);
-
- lockdep_assert_held(&vm->lock);
-
- switch (op->base.op) {
- case DRM_GPUVA_OP_MAP:
- fence = __xe_vma_op_execute(vm, op->map.vma, op);
- break;
- case DRM_GPUVA_OP_REMAP:
- {
- struct xe_vma *vma;
-
- if (!op->remap.unmap_done)
- vma = gpuva_to_vma(op->base.remap.unmap->va);
- else if (op->remap.prev)
- vma = op->remap.prev;
- else
- vma = op->remap.next;
-
- fence = __xe_vma_op_execute(vm, vma, op);
- break;
- }
- case DRM_GPUVA_OP_UNMAP:
- fence = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
- op);
- break;
- case DRM_GPUVA_OP_PREFETCH:
- fence = __xe_vma_op_execute(vm,
- gpuva_to_vma(op->base.prefetch.va),
- op);
- break;
- default:
- drm_warn(&vm->xe->drm, "NOT POSSIBLE");
- }
-
- return fence;
-}
-
static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
bool post_commit, bool prev_post_commit,
bool next_post_commit)
@@ -2792,26 +2460,157 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
return err;
}
+#ifdef TEST_VM_OPS_ERROR
+ if (vops->inject_error &&
+ vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK)
+ return -ENOSPC;
+#endif
+
return 0;
}
+static void op_trace(struct xe_vma_op *op)
+{
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ trace_xe_vma_bind(op->map.vma);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va));
+ if (op->remap.prev)
+ trace_xe_vma_bind(op->remap.prev);
+ if (op->remap.next)
+ trace_xe_vma_bind(op->remap.next);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va));
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
+ break;
+ default:
+ XE_WARN_ON("NOT POSSIBLE");
+ }
+}
+
+static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops)
+{
+ struct xe_vma_op *op;
+
+ list_for_each_entry(op, &vops->list, link)
+ op_trace(op);
+}
+
+static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops)
+{
+ struct xe_exec_queue *q = vops->q;
+ struct xe_tile *tile;
+ int number_tiles = 0;
+ u8 id;
+
+ for_each_tile(tile, vm->xe, id) {
+ if (vops->pt_update_ops[id].num_ops)
+ ++number_tiles;
+
+ if (vops->pt_update_ops[id].q)
+ continue;
+
+ if (q) {
+ vops->pt_update_ops[id].q = q;
+ if (vm->pt_root[id] && !list_empty(&q->multi_gt_list))
+ q = list_next_entry(q, multi_gt_list);
+ } else {
+ vops->pt_update_ops[id].q = vm->q[id];
+ }
+ }
+
+ return number_tiles;
+}
+
static struct dma_fence *ops_execute(struct xe_vm *vm,
struct xe_vma_ops *vops)
{
- struct xe_vma_op *op, *next;
+ struct xe_tile *tile;
struct dma_fence *fence = NULL;
+ struct dma_fence **fences = NULL;
+ struct dma_fence_array *cf = NULL;
+ int number_tiles = 0, current_fence = 0, err;
+ u8 id;
- list_for_each_entry_safe(op, next, &vops->list, link) {
- dma_fence_put(fence);
- fence = xe_vma_op_execute(vm, op);
- if (IS_ERR(fence)) {
- drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld",
- op->base.op, PTR_ERR(fence));
- fence = ERR_PTR(-ENOSPC);
- break;
+ number_tiles = vm_ops_setup_tile_args(vm, vops);
+ if (number_tiles == 0)
+ return ERR_PTR(-ENODATA);
+
+ if (number_tiles > 1) {
+ fences = kmalloc_array(number_tiles, sizeof(*fences),
+ GFP_KERNEL);
+ if (!fences) {
+ fence = ERR_PTR(-ENOMEM);
+ goto err_trace;
+ }
+ }
+
+ for_each_tile(tile, vm->xe, id) {
+ if (!vops->pt_update_ops[id].num_ops)
+ continue;
+
+ err = xe_pt_update_ops_prepare(tile, vops);
+ if (err) {
+ fence = ERR_PTR(err);
+ goto err_out;
}
}
+ trace_xe_vm_ops_execute(vops);
+
+ for_each_tile(tile, vm->xe, id) {
+ if (!vops->pt_update_ops[id].num_ops)
+ continue;
+
+ fence = xe_pt_update_ops_run(tile, vops);
+ if (IS_ERR(fence))
+ goto err_out;
+
+ if (fences)
+ fences[current_fence++] = fence;
+ }
+
+ if (fences) {
+ cf = dma_fence_array_create(number_tiles, fences,
+ vm->composite_fence_ctx,
+ vm->composite_fence_seqno++,
+ false);
+ if (!cf) {
+ --vm->composite_fence_seqno;
+ fence = ERR_PTR(-ENOMEM);
+ goto err_out;
+ }
+ fence = &cf->base;
+ }
+
+ for_each_tile(tile, vm->xe, id) {
+ if (!vops->pt_update_ops[id].num_ops)
+ continue;
+
+ xe_pt_update_ops_fini(tile, vops);
+ }
+
+ return fence;
+
+err_out:
+ for_each_tile(tile, vm->xe, id) {
+ if (!vops->pt_update_ops[id].num_ops)
+ continue;
+
+ xe_pt_update_ops_abort(tile, vops);
+ }
+ while (current_fence)
+ dma_fence_put(fences[--current_fence]);
+ kfree(fences);
+ kfree(cf);
+
+err_trace:
+ trace_xe_vm_ops_fail(vm);
return fence;
}
@@ -2892,12 +2691,10 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
fence = ops_execute(vm, vops);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
- /* FIXME: Killing VM rather than proper error handling */
- xe_vm_kill(vm, false);
goto unlock;
- } else {
- vm_bind_ioctl_ops_fini(vm, vops, fence);
}
+
+ vm_bind_ioctl_ops_fini(vm, vops, fence);
}
unlock:
@@ -2905,11 +2702,18 @@ unlock:
return err;
}
-#define SUPPORTED_FLAGS \
+#define SUPPORTED_FLAGS_STUB \
(DRM_XE_VM_BIND_FLAG_READONLY | \
DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE)
+
+#ifdef TEST_VM_OPS_ERROR
+#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
+#else
+#define SUPPORTED_FLAGS SUPPORTED_FLAGS_STUB
+#endif
+
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
@@ -2935,7 +2739,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
sizeof(struct drm_xe_vm_bind_op),
GFP_KERNEL | __GFP_ACCOUNT);
if (!*bind_ops)
- return -ENOMEM;
+ return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
err = __copy_from_user(*bind_ops, bind_user,
sizeof(struct drm_xe_vm_bind_op) *
@@ -3074,7 +2878,16 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
return -EINVAL;
}
- if (bo->flags & XE_BO_FLAG_INTERNAL_64K) {
+ /*
+ * Some platforms require 64k VM_BIND alignment,
+ * specifically those with XE_VRAM_FLAGS_NEED64K.
+ *
+ * Other platforms may have BO's set to 64k physical placement,
+ * but can be mapped at 4k offsets anyway. This check is only
+ * there for the former case.
+ */
+ if ((bo->flags & XE_BO_FLAG_INTERNAL_64K) &&
+ (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)) {
if (XE_IOCTL_DBG(xe, obj_offset &
XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
@@ -3254,10 +3067,18 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto unwind_ops;
}
- err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
- &vops, i == args->num_binds - 1);
+ err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops);
if (err)
goto unwind_ops;
+
+#ifdef TEST_VM_OPS_ERROR
+ if (flags & FORCE_OP_ERROR) {
+ vops.inject_error = true;
+ vm->xe->vm_inject_error_position =
+ (vm->xe->vm_inject_error_position + 1) %
+ FORCE_OP_ERROR_COUNT;
+ }
+#endif
}
/* Nothing to do */
@@ -3266,11 +3087,16 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto unwind_ops;
}
+ err = xe_vma_ops_alloc(&vops, args->num_binds > 1);
+ if (err)
+ goto unwind_ops;
+
err = vm_bind_ioctl_ops_execute(vm, &vops);
unwind_ops:
if (err && err != -ENODATA)
vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
+ xe_vma_ops_fini(&vops);
for (i = args->num_binds - 1; i >= 0; --i)
if (ops[i])
drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index b481608b12f1..c864dba35e1d 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
return drm_gpuvm_resv(&vm->gpuvm);
}
+void xe_vm_kill(struct xe_vm *vm, bool unlocked);
+
/**
* xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
* @vm: The vm
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index ce1a63a5e3e7..7f9a303e51d8 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -21,18 +21,27 @@ struct xe_bo;
struct xe_sync_entry;
struct xe_user_fence;
struct xe_vm;
+struct xe_vm_pgtable_update_op;
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define TEST_VM_OPS_ERROR
+#define FORCE_OP_ERROR BIT(31)
+
+#define FORCE_OP_ERROR_LOCK 0
+#define FORCE_OP_ERROR_PREPARE 1
+#define FORCE_OP_ERROR_RUN 2
+#define FORCE_OP_ERROR_COUNT 3
+#endif
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
-#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3)
-#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4)
-#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
-#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
-#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
-#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
-#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
-#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 10)
+#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 3)
+#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 4)
+#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 5)
+#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 6)
+#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
+#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
/** struct xe_userptr - User pointer */
struct xe_userptr {
@@ -99,6 +108,9 @@ struct xe_vma {
*/
u8 tile_present;
+ /** @tile_staged: bind is staged for this VMA */
+ u8 tile_staged;
+
/**
* @pat_index: The pat index to use when encoding the PTEs for this vma.
*/
@@ -314,31 +326,18 @@ struct xe_vma_op_prefetch {
/** enum xe_vma_op_flags - flags for VMA operation */
enum xe_vma_op_flags {
- /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
- XE_VMA_OP_FIRST = BIT(0),
- /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
- XE_VMA_OP_LAST = BIT(1),
/** @XE_VMA_OP_COMMITTED: VMA operation committed */
- XE_VMA_OP_COMMITTED = BIT(2),
+ XE_VMA_OP_COMMITTED = BIT(0),
/** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
- XE_VMA_OP_PREV_COMMITTED = BIT(3),
+ XE_VMA_OP_PREV_COMMITTED = BIT(1),
/** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
- XE_VMA_OP_NEXT_COMMITTED = BIT(4),
+ XE_VMA_OP_NEXT_COMMITTED = BIT(2),
};
/** struct xe_vma_op - VMA operation */
struct xe_vma_op {
/** @base: GPUVA base operation */
struct drm_gpuva_op base;
- /** @q: exec queue for this operation */
- struct xe_exec_queue *q;
- /**
- * @syncs: syncs for this operation, only used on first and last
- * operation
- */
- struct xe_sync_entry *syncs;
- /** @num_syncs: number of syncs */
- u32 num_syncs;
/** @link: async operation link */
struct list_head link;
/** @flags: operation flags */
@@ -362,12 +361,18 @@ struct xe_vma_ops {
struct list_head list;
/** @vm: VM */
struct xe_vm *vm;
- /** @q: exec queue these operations */
+ /** @q: exec queue for VMA operations */
struct xe_exec_queue *q;
/** @syncs: syncs these operation */
struct xe_sync_entry *syncs;
/** @num_syncs: number of syncs */
u32 num_syncs;
+ /** @pt_update_ops: page table update operations */
+ struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
+#ifdef TEST_VM_OPS_ERROR
+ /** @inject_error: inject error to test error handling */
+ bool inject_error;
+#endif
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index e648265d081b..d424992514a4 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -733,6 +733,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_PARTIAL_AUTOSTRIP |
DIS_AUTOSTRIP))
},
+ { XE_RTP_NAME("15016589081"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
+ },
{}
};
@@ -759,6 +763,7 @@ void xe_wa_process_oob(struct xe_gt *gt)
xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->wa_active.oob,
ARRAY_SIZE(oob_was));
+ gt->wa_active.oob_initialized = true;
xe_rtp_process(&ctx, oob_was);
}
diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h
index db9ddeaf69bf..52337405b5bc 100644
--- a/drivers/gpu/drm/xe/xe_wa.h
+++ b/drivers/gpu/drm/xe/xe_wa.h
@@ -6,6 +6,8 @@
#ifndef _XE_WA_
#define _XE_WA_
+#include "xe_assert.h"
+
struct drm_printer;
struct xe_gt;
struct xe_hw_engine;
@@ -25,6 +27,9 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
* @gt__: gt instance
* @id__: XE_OOB_<id__>, as generated by build system in generated/xe_wa_oob.h
*/
-#define XE_WA(gt__, id__) test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob)
+#define XE_WA(gt__, id__) ({ \
+ xe_gt_assert(gt__, (gt__)->wa_active.oob_initialized); \
+ test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob); \
+})
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 08f7336881e3..920ca5060146 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -27,6 +27,13 @@
16022287689 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
13011645652 GRAPHICS_VERSION(2004)
+14022293748 GRAPHICS_VERSION(2001)
+ GRAPHICS_VERSION(2004)
+22019794406 GRAPHICS_VERSION(2001)
+ GRAPHICS_VERSION(2004)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
+22019338487_display PLATFORM(LUNARLAKE)
16023588340 GRAPHICS_VERSION(2001)
+14019789679 GRAPHICS_VERSION(1255)
+ GRAPHICS_VERSION_RANGE(1270, 2004)
diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c
index f69721339201..d46fa8374980 100644
--- a/drivers/gpu/drm/xe/xe_wait_user_fence.c
+++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c
@@ -8,7 +8,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_utils.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_device.h"
#include "xe_gt.h"
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index f006bc931324..b62e4f0e8130 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -404,9 +404,10 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
if (err < 0)
goto put_group;
- host->domain = iommu_domain_alloc(&platform_bus_type);
- if (!host->domain) {
- err = -ENOMEM;
+ host->domain = iommu_paging_domain_alloc(host->dev);
+ if (IS_ERR(host->domain)) {
+ err = PTR_ERR(host->domain);
+ host->domain = NULL;
goto put_cache;
}
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 925a118db23f..92031b240a17 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/iommu.h>
#include <linux/iova.h>
+#include <linux/irqreturn.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -81,6 +82,7 @@ struct host1x_intr_ops {
void (*disable_syncpt_intr)(struct host1x *host, unsigned int id);
void (*disable_all_syncpt_intrs)(struct host1x *host);
int (*free_syncpt_irq)(struct host1x *host);
+ irqreturn_t (*isr)(int irq, void *dev_id);
};
struct host1x_sid_entry {
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index 9880e0c47235..415f8d7e4202 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -6,18 +6,11 @@
* Copyright (c) 2010-2013, NVIDIA Corporation.
*/
-#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/io.h>
#include "../intr.h"
#include "../dev.h"
-struct host1x_intr_irq_data {
- struct host1x *host;
- u32 offset;
-};
-
static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
{
struct host1x_intr_irq_data *irq_data = dev_id;
@@ -54,7 +47,8 @@ static void host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
}
}
-static void intr_hw_init(struct host1x *host, u32 cpm)
+static int
+host1x_intr_init_host_sync(struct host1x *host, u32 cpm)
{
#if HOST1X_HW < 6
/* disable the ip_busy_timeout. this prevents write drops */
@@ -85,32 +79,6 @@ static void intr_hw_init(struct host1x *host, u32 cpm)
host1x_sync_writel(host, irq_index, HOST1X_SYNC_SYNCPT_INTR_DEST(id));
}
#endif
-}
-
-static int
-host1x_intr_init_host_sync(struct host1x *host, u32 cpm)
-{
- int err, i;
- struct host1x_intr_irq_data *irq_data;
-
- irq_data = devm_kcalloc(host->dev, host->num_syncpt_irqs, sizeof(irq_data[0]), GFP_KERNEL);
- if (!irq_data)
- return -ENOMEM;
-
- host1x_hw_intr_disable_all_syncpt_intrs(host);
-
- for (i = 0; i < host->num_syncpt_irqs; i++) {
- irq_data[i].host = host;
- irq_data[i].offset = i;
-
- err = devm_request_irq(host->dev, host->syncpt_irqs[i],
- syncpt_thresh_isr, IRQF_SHARED,
- "host1x_syncpt", &irq_data[i]);
- if (err < 0)
- return err;
- }
-
- intr_hw_init(host, cpm);
return 0;
}
@@ -144,4 +112,5 @@ static const struct host1x_intr_ops host1x_intr_ops = {
.enable_syncpt_intr = host1x_intr_enable_syncpt_intr,
.disable_syncpt_intr = host1x_intr_disable_syncpt_intr,
.disable_all_syncpt_intrs = host1x_intr_disable_all_syncpt_intrs,
+ .isr = syncpt_thresh_isr,
};
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 995bfa980837..b3285dd10180 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -6,7 +6,7 @@
*/
#include <linux/clk.h>
-
+#include <linux/interrupt.h>
#include "dev.h"
#include "fence.h"
#include "intr.h"
@@ -100,7 +100,9 @@ void host1x_intr_handle_interrupt(struct host1x *host, unsigned int id)
int host1x_intr_init(struct host1x *host)
{
+ struct host1x_intr_irq_data *irq_data;
unsigned int id;
+ int i, err;
mutex_init(&host->intr_mutex);
@@ -111,6 +113,23 @@ int host1x_intr_init(struct host1x *host)
INIT_LIST_HEAD(&syncpt->fences.list);
}
+ irq_data = devm_kcalloc(host->dev, host->num_syncpt_irqs, sizeof(irq_data[0]), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ host1x_hw_intr_disable_all_syncpt_intrs(host);
+
+ for (i = 0; i < host->num_syncpt_irqs; i++) {
+ irq_data[i].host = host;
+ irq_data[i].offset = i;
+
+ err = devm_request_irq(host->dev, host->syncpt_irqs[i],
+ host->intr_op->isr, IRQF_SHARED,
+ "host1x_syncpt", &irq_data[i]);
+ if (err < 0)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
index 3b5610b525e5..11cdf13e32fe 100644
--- a/drivers/gpu/host1x/intr.h
+++ b/drivers/gpu/host1x/intr.h
@@ -11,6 +11,11 @@
struct host1x;
struct host1x_syncpt_fence;
+struct host1x_intr_irq_data {
+ struct host1x *host;
+ u32 offset;
+};
+
/* Initialize host1x sync point interrupt */
int host1x_intr_init(struct host1x *host);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 365e6ddbe90f..18f2c92beff8 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -926,8 +926,7 @@ static void vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
/**
* vga_switcheroo_process_delayed_switch() - helper for delayed switching
*
- * Process a delayed switch if one is pending. DRM drivers should call this
- * from their ->lastclose callback.
+ * Process a delayed switch if one is pending.
*
* Return: 0 on success. -EINVAL if no delayed switch is pending, if the client
* has unregistered in the meantime or if there are other clients blocking the
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 08446c89eff6..f8a56d631242 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -404,6 +404,12 @@ config HID_VIVALDI_COMMON
option so that drivers can use common code to parse the HID
descriptors for vivaldi function row keymap.
+config HID_GOODIX_SPI
+ tristate "Goodix GT7986U SPI HID touchscreen"
+ depends on SPI_MASTER
+ help
+ Support for Goodix GT7986U SPI HID touchscreen device.
+
config HID_GOOGLE_HAMMER
tristate "Google Hammer Keyboard"
select HID_VIVALDI_COMMON
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index e40f1ddebbb7..496dab54c73a 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_HID_GEMBIRD) += hid-gembird.o
obj-$(CONFIG_HID_GFRM) += hid-gfrm.o
obj-$(CONFIG_HID_GLORIOUS) += hid-glorious.o
obj-$(CONFIG_HID_VIVALDI_COMMON) += hid-vivaldi-common.o
+obj-$(CONFIG_HID_GOODIX_SPI) += hid-goodix-spi.o
obj-$(CONFIG_HID_GOOGLE_HAMMER) += hid-google-hammer.o
obj-$(CONFIG_HID_GOOGLE_STADIA_FF) += hid-google-stadiaff.o
obj-$(CONFIG_HID_VIVALDI) += hid-vivaldi.o
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
index 97296f587bc7..1c91be8daedd 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
@@ -73,8 +73,6 @@ struct amdtp_hid_data {
};
/* Interface functions between HID LL driver and AMD SFH client */
-void hid_amdtp_set_feature(struct hid_device *hid, char *buf, u32 len, int report_id);
-void hid_amdtp_get_report(struct hid_device *hid, int report_id, int report_type);
int amdtp_hid_probe(u32 cur_hid_dev, struct amdtp_cl_data *cli_data);
void amdtp_hid_remove(struct amdtp_cl_data *cli_data);
int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type);
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index 621793d92464..db36d87d5634 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -299,8 +299,8 @@ static void amd_sfh_set_ops(struct amd_mp2_dev *mp2)
sfh_interface_init(mp2);
mp2_ops = mp2->mp2_ops;
- mp2_ops->clear_intr = amd_sfh_clear_intr_v2,
- mp2_ops->init_intr = amd_sfh_irq_init_v2,
+ mp2_ops->clear_intr = amd_sfh_clear_intr_v2;
+ mp2_ops->init_intr = amd_sfh_irq_init_v2;
mp2_ops->suspend = amd_sfh_suspend;
mp2_ops->resume = amd_sfh_resume;
mp2_ops->remove = amd_mp2_pci_remove;
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index a272a086c950..8420c227e21b 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -148,7 +148,7 @@ out:
}
EXPORT_SYMBOL_GPL(dispatch_hid_bpf_output_report);
-u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size)
+u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size)
{
int ret;
struct hid_bpf_ctx_kern ctx_kern = {
@@ -179,9 +179,7 @@ u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *s
*size = ret;
}
- rdesc = krealloc(ctx_kern.data, *size, GFP_KERNEL);
-
- return rdesc;
+ return krealloc(ctx_kern.data, *size, GFP_KERNEL);
ignore_bpf:
kfree(ctx_kern.data);
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index af5cf94f9dea..7e1ae2a2bcc2 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -620,7 +620,7 @@ static void apple_battery_timer_tick(struct timer_list *t)
* MacBook JIS keyboard has wrong logical maximum
* Magic Keyboard JIS has wrong logical maximum
*/
-static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct apple_sc *asc = hid_get_drvdata(hdev);
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index a282388b7aa5..a4b47319ad8e 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -492,12 +492,19 @@ static void asus_kbd_backlight_work(struct work_struct *work)
*/
static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev)
{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
u32 value;
int ret;
if (!IS_ENABLED(CONFIG_ASUS_WMI))
return false;
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD &&
+ dmi_check_system(asus_use_hid_led_dmi_ids)) {
+ hid_info(hdev, "using HID for asus::kbd_backlight\n");
+ return false;
+ }
+
ret = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS,
ASUS_WMI_DEVID_KBD_BACKLIGHT, 0, &value);
hid_dbg(hdev, "WMI backlight check: rc %d value %x", ret, value);
@@ -1119,7 +1126,7 @@ static const __u8 asus_g752_fixed_rdesc[] = {
0x2A, 0xFF, 0x00, /* Usage Maximum (0xFF) */
};
-static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
diff --git a/drivers/hid/hid-aureal.c b/drivers/hid/hid-aureal.c
index cf1a562d8523..896304148a87 100644
--- a/drivers/hid/hid-aureal.c
+++ b/drivers/hid/hid-aureal.c
@@ -18,7 +18,7 @@
#include "hid-ids.h"
-static __u8 *aureal_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *aureal_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) {
diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
index be17af3d9c0c..9f05465358d9 100644
--- a/drivers/hid/hid-bigbenff.c
+++ b/drivers/hid/hid-bigbenff.c
@@ -99,7 +99,7 @@
* - map previously unused analog trigger data to Z/RZ
* - simplify feature and output descriptor
*/
-static __u8 pid0902_rdesc_fixed[] = {
+static const __u8 pid0902_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */
0x09, 0x05, /* Usage (Game Pad) */
0xA1, 0x01, /* Collection (Application) */
@@ -464,12 +464,12 @@ error_hw_stop:
return error;
}
-static __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc,
+static const __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize == PID0902_RDESC_ORIG_SIZE) {
- rdesc = pid0902_rdesc_fixed;
*rsize = sizeof(pid0902_rdesc_fixed);
+ return pid0902_rdesc_fixed;
} else
hid_warn(hid, "unexpected rdesc, please submit for review\n");
return rdesc;
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index 549c73b05b8d..a504632febfc 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -22,7 +22,7 @@
* Cherry Cymotion keyboard have an invalid HID report descriptor,
* that needs fixing before we can parse it.
*/
-static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index 99954c6b3242..5776ec2e7159 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -88,8 +88,8 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
}
-static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *ch_switch12_report_fixup(struct hid_device *hdev,
+ __u8 *rdesc, unsigned int *rsize)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
diff --git a/drivers/hid/hid-cmedia.c b/drivers/hid/hid-cmedia.c
index cab42047bc99..528d7f361215 100644
--- a/drivers/hid/hid-cmedia.c
+++ b/drivers/hid/hid-cmedia.c
@@ -26,7 +26,7 @@ MODULE_LICENSE("GPL");
/* Fixed report descriptor of HS-100B audio chip
* Bit 4 is an abolute Microphone mute usage instead of being unassigned.
*/
-static __u8 hs100b_rdesc_fixed[] = {
+static const __u8 hs100b_rdesc_fixed[] = {
0x05, 0x0C, /* Usage Page (Consumer), */
0x09, 0x01, /* Usage (Consumer Control), */
0xA1, 0x01, /* Collection (Application), */
@@ -199,13 +199,13 @@ static struct hid_driver cmhid_driver = {
.input_mapping = cmhid_input_mapping,
};
-static __u8 *cmhid_hs100b_report_fixup(struct hid_device *hid, __u8 *rdesc,
+static const __u8 *cmhid_hs100b_report_fixup(struct hid_device *hid, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize == HS100B_RDESC_ORIG_SIZE) {
hid_info(hid, "Fixing CMedia HS-100B report descriptor\n");
- rdesc = hs100b_rdesc_fixed;
*rsize = sizeof(hs100b_rdesc_fixed);
+ return hs100b_rdesc_fixed;
}
return rdesc;
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 988d0acbdf04..30de92d0bf0f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -723,7 +723,7 @@ static void hid_device_release(struct device *dev)
* items, though they are not used yet.
*/
-static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
+static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item)
{
u8 b;
@@ -880,8 +880,8 @@ static int hid_scan_report(struct hid_device *hid)
{
struct hid_parser *parser;
struct hid_item item;
- __u8 *start = hid->dev_rdesc;
- __u8 *end = start + hid->dev_rsize;
+ const __u8 *start = hid->dev_rdesc;
+ const __u8 *end = start + hid->dev_rsize;
static int (*dispatch_type[])(struct hid_parser *parser,
struct hid_item *item) = {
hid_scan_main,
@@ -946,7 +946,7 @@ static int hid_scan_report(struct hid_device *hid)
* Allocate the device report as read by the bus driver. This function should
* only be called from parse() in ll drivers.
*/
-int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
+int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size)
{
hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
if (!hid->dev_rdesc)
@@ -1204,10 +1204,10 @@ int hid_open_report(struct hid_device *device)
struct hid_parser *parser;
struct hid_item item;
unsigned int size;
- __u8 *start;
+ const __u8 *start;
__u8 *buf;
- __u8 *end;
- __u8 *next;
+ const __u8 *end;
+ const __u8 *next;
int ret;
int i;
static int (*dispatch_type[])(struct hid_parser *parser,
@@ -1912,6 +1912,31 @@ int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
}
EXPORT_SYMBOL_GPL(hid_set_field);
+struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
+ unsigned int application, unsigned int usage)
+{
+ struct list_head *report_list = &hdev->report_enum[report_type].report_list;
+ struct hid_report *report;
+ int i, j;
+
+ list_for_each_entry(report, report_list, list) {
+ if (report->application != application)
+ continue;
+
+ for (i = 0; i < report->maxfield; i++) {
+ struct hid_field *field = report->field[i];
+
+ for (j = 0; j < field->maxusage; j++) {
+ if (field->usage[j].hid == usage)
+ return field;
+ }
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(hid_find_field);
+
static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
const u8 *data)
{
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index 702f50e9841d..62b99f5c3cf8 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -690,8 +690,8 @@ static int corsair_input_mapping(struct hid_device *dev,
* - USB ID 1b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse
*/
-static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *corsair_mouse_report_fixup(struct hid_device *hdev,
+ __u8 *rdesc, unsigned int *rsize)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c
index 0fa785f52707..5596dd940322 100644
--- a/drivers/hid/hid-cougar.c
+++ b/drivers/hid/hid-cougar.c
@@ -103,8 +103,8 @@ static void cougar_fix_g6_mapping(void)
/*
* Constant-friendly rdesc fixup for mouse interface
*/
-static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
{
if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
(rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) {
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 20a0d1315d90..dae2b84a1490 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1094,7 +1094,6 @@ static void cp2112_gpio_poll_callback(struct work_struct *work)
{
struct cp2112_device *dev = container_of(work, struct cp2112_device,
gpio_poll_worker.work);
- struct irq_data *d;
u8 gpio_mask;
u32 irq_type;
int irq, virq, ret;
@@ -1111,12 +1110,10 @@ static void cp2112_gpio_poll_callback(struct work_struct *work)
if (!irq)
continue;
- d = irq_get_irq_data(irq);
- if (!d)
+ irq_type = irq_get_trigger_type(irq);
+ if (!irq_type)
continue;
- irq_type = irqd_get_trigger_type(d);
-
if (gpio_mask & BIT(virq)) {
/* Level High */
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index b952b235e70a..98548201feec 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -67,7 +67,7 @@ static __u8 *va_logical_boundary_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
-static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index c88224a96e9e..84e1e90a266b 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -199,7 +199,7 @@ static inline int drff_init(struct hid_device *hid)
#define PID0011_RDESC_ORIG_SIZE 101
/* Fixed report descriptor for PID 0x011 joystick */
-static __u8 pid0011_rdesc_fixed[] = {
+static const __u8 pid0011_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
@@ -228,14 +228,14 @@ static __u8 pid0011_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
{
switch (hdev->product) {
case 0x0011:
if (*rsize == PID0011_RDESC_ORIG_SIZE) {
- rdesc = pid0011_rdesc_fixed;
*rsize = sizeof(pid0011_rdesc_fixed);
+ return pid0011_rdesc_fixed;
}
break;
}
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 5973a3bab29f..defcf91fdd14 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -53,7 +53,7 @@ static void mouse_button_fixup(struct hid_device *hdev,
rdesc[padding_bit + 1] = MOUSE_BUTTONS_MAX - nbuttons;
}
-static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
switch (hdev->product) {
diff --git a/drivers/hid/hid-gembird.c b/drivers/hid/hid-gembird.c
index c42593fe7116..20a8de766e56 100644
--- a/drivers/hid/hid-gembird.c
+++ b/drivers/hid/hid-gembird.c
@@ -57,7 +57,7 @@ static const __u8 gembird_jpd_fixed_rdesc[] = {
0x81, 0x02, /* Input (Data,Var,Abs) */
};
-static __u8 *gembird_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *gembird_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
__u8 *new_rdesc;
diff --git a/drivers/hid/hid-glorious.c b/drivers/hid/hid-glorious.c
index 281b3a7187ce..5bbd81248053 100644
--- a/drivers/hid/hid-glorious.c
+++ b/drivers/hid/hid-glorious.c
@@ -26,7 +26,7 @@ MODULE_DESCRIPTION("HID driver for Glorious PC Gaming Race mice");
* keyboard HID report, causing keycodes to be misinterpreted.
* Fix this by setting Usage Minimum to 0 in that report.
*/
-static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize == 213 &&
diff --git a/drivers/hid/hid-goodix-spi.c b/drivers/hid/hid-goodix-spi.c
new file mode 100644
index 000000000000..0e59663814dd
--- /dev/null
+++ b/drivers/hid/hid-goodix-spi.c
@@ -0,0 +1,809 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Goodix GT7986U SPI Driver Code for HID.
+ *
+ * Copyright (C) 2024 Godix, Inc.
+ */
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/hid.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/sizes.h>
+#include <linux/spi/spi.h>
+
+#define GOODIX_DEV_CONFIRM_ADDR 0x10000
+#define GOODIX_HID_DESC_ADDR 0x1058C
+#define GOODIX_HID_REPORT_DESC_ADDR 0x105AA
+#define GOODIX_HID_SIGN_ADDR 0x10D32
+
+#define GOODIX_HID_GET_REPORT_CMD 0x02
+#define GOODIX_HID_SET_REPORT_CMD 0x03
+
+#define GOODIX_HID_MAX_INBUF_SIZE 128
+#define GOODIX_HID_ACK_READY_FLAG 0x01
+#define GOODIX_HID_REPORT_READY_FLAG 0x80
+
+#define GOODIX_DEV_CONFIRM_VAL 0xAA
+
+#define GOODIX_SPI_WRITE_FLAG 0xF0
+#define GOODIX_SPI_READ_FLAG 0xF1
+#define GOODIX_SPI_TRANS_PREFIX_LEN 1
+#define GOODIX_REGISTER_WIDTH 4
+#define GOODIX_SPI_READ_DUMMY_LEN 3
+#define GOODIX_SPI_READ_PREFIX_LEN (GOODIX_SPI_TRANS_PREFIX_LEN + \
+ GOODIX_REGISTER_WIDTH + \
+ GOODIX_SPI_READ_DUMMY_LEN)
+#define GOODIX_SPI_WRITE_PREFIX_LEN (GOODIX_SPI_TRANS_PREFIX_LEN + \
+ GOODIX_REGISTER_WIDTH)
+
+#define GOODIX_CHECKSUM_SIZE sizeof(u16)
+#define GOODIX_NORMAL_RESET_DELAY_MS 150
+
+struct goodix_hid_report_header {
+ u8 flag;
+ __le16 size;
+} __packed;
+#define GOODIX_HID_ACK_HEADER_SIZE sizeof(struct goodix_hid_report_header)
+
+struct goodix_hid_report_package {
+ __le16 size;
+ u8 data[];
+};
+
+#define GOODIX_HID_PKG_LEN_SIZE sizeof(u16)
+#define GOODIX_HID_COOR_DATA_LEN 82
+#define GOODIX_HID_COOR_PKG_LEN (GOODIX_HID_PKG_LEN_SIZE + \
+ GOODIX_HID_COOR_DATA_LEN)
+
+/* power state */
+#define GOODIX_SPI_POWER_ON 0x00
+#define GOODIX_SPI_POWER_SLEEP 0x01
+
+/* flags used to record the current device operating state */
+#define GOODIX_HID_STARTED 0
+
+struct goodix_hid_report_event {
+ struct goodix_hid_report_header hdr;
+ u8 data[GOODIX_HID_COOR_PKG_LEN];
+} __packed;
+
+struct goodix_hid_desc {
+ __le16 desc_length;
+ __le16 bcd_version;
+ __le16 report_desc_length;
+ __le16 report_desc_register;
+ __le16 input_register;
+ __le16 max_input_length;
+ __le16 output_register;
+ __le16 max_output_length;
+ __le16 cmd_register;
+ __le16 data_register;
+ __le16 vendor_id;
+ __le16 product_id;
+ __le16 version_id;
+ __le32 reserved;
+} __packed;
+
+struct goodix_ts_data {
+ struct device *dev;
+ struct spi_device *spi;
+ struct hid_device *hid;
+ struct goodix_hid_desc hid_desc;
+
+ struct gpio_desc *reset_gpio;
+ u32 hid_report_addr;
+
+ unsigned long flags;
+ /* lock for hid raw request operation */
+ struct mutex hid_request_lock;
+ /* buffer used to store hid report event */
+ u8 *event_buf;
+ u32 hid_max_event_sz;
+ /* buffer used to do spi data transfer */
+ u8 xfer_buf[SZ_2K] ____cacheline_aligned;
+};
+
+static void *goodix_get_event_report(struct goodix_ts_data *ts, u32 addr,
+ u8 *data, size_t len)
+{
+ struct spi_device *spi = to_spi_device(&ts->spi->dev);
+ struct spi_transfer xfers;
+ struct spi_message spi_msg;
+ int error;
+
+ /* buffer format: 0xF1 + addr(4bytes) + dummy(3bytes) + data */
+ data[0] = GOODIX_SPI_READ_FLAG;
+ put_unaligned_be32(addr, data + GOODIX_SPI_TRANS_PREFIX_LEN);
+
+ spi_message_init(&spi_msg);
+ memset(&xfers, 0, sizeof(xfers));
+ xfers.tx_buf = data;
+ xfers.rx_buf = data;
+ xfers.len = GOODIX_SPI_READ_PREFIX_LEN + len;
+ spi_message_add_tail(&xfers, &spi_msg);
+
+ error = spi_sync(spi, &spi_msg);
+ if (error) {
+ dev_err(ts->dev, "spi transfer error: %d", error);
+ return NULL;
+ }
+
+ return data + GOODIX_SPI_READ_PREFIX_LEN;
+}
+
+static int goodix_spi_read(struct goodix_ts_data *ts, u32 addr,
+ void *data, size_t len)
+{
+ struct spi_device *spi = to_spi_device(&ts->spi->dev);
+ struct spi_transfer xfers;
+ struct spi_message spi_msg;
+ int error;
+
+ if (GOODIX_SPI_READ_PREFIX_LEN + len > sizeof(ts->xfer_buf)) {
+ dev_err(ts->dev, "read data len exceed limit %zu",
+ sizeof(ts->xfer_buf) - GOODIX_SPI_READ_PREFIX_LEN);
+ return -EINVAL;
+ }
+
+ /* buffer format: 0xF1 + addr(4bytes) + dummy(3bytes) + data */
+ ts->xfer_buf[0] = GOODIX_SPI_READ_FLAG;
+ put_unaligned_be32(addr, ts->xfer_buf + GOODIX_SPI_TRANS_PREFIX_LEN);
+
+ spi_message_init(&spi_msg);
+ memset(&xfers, 0, sizeof(xfers));
+ xfers.tx_buf = ts->xfer_buf;
+ xfers.rx_buf = ts->xfer_buf;
+ xfers.len = GOODIX_SPI_READ_PREFIX_LEN + len;
+ spi_message_add_tail(&xfers, &spi_msg);
+
+ error = spi_sync(spi, &spi_msg);
+ if (error)
+ dev_err(ts->dev, "spi transfer error: %d", error);
+ else
+ memcpy(data, ts->xfer_buf + GOODIX_SPI_READ_PREFIX_LEN, len);
+
+ return error;
+}
+
+static int goodix_spi_write(struct goodix_ts_data *ts, u32 addr,
+ const void *data, size_t len)
+{
+ struct spi_device *spi = to_spi_device(&ts->spi->dev);
+ struct spi_transfer xfers;
+ struct spi_message spi_msg;
+ int error;
+
+ if (GOODIX_SPI_WRITE_PREFIX_LEN + len > sizeof(ts->xfer_buf)) {
+ dev_err(ts->dev, "write data len exceed limit %zu",
+ sizeof(ts->xfer_buf) - GOODIX_SPI_WRITE_PREFIX_LEN);
+ return -EINVAL;
+ }
+
+ /* buffer format: 0xF0 + addr(4bytes) + data */
+ ts->xfer_buf[0] = GOODIX_SPI_WRITE_FLAG;
+ put_unaligned_be32(addr, ts->xfer_buf + GOODIX_SPI_TRANS_PREFIX_LEN);
+ memcpy(ts->xfer_buf + GOODIX_SPI_WRITE_PREFIX_LEN, data, len);
+
+ spi_message_init(&spi_msg);
+ memset(&xfers, 0, sizeof(xfers));
+ xfers.tx_buf = ts->xfer_buf;
+ xfers.len = GOODIX_SPI_WRITE_PREFIX_LEN + len;
+ spi_message_add_tail(&xfers, &spi_msg);
+
+ error = spi_sync(spi, &spi_msg);
+ if (error)
+ dev_err(ts->dev, "spi transfer error: %d", error);
+
+ return error;
+}
+
+static int goodix_dev_confirm(struct goodix_ts_data *ts)
+{
+ u8 tx_buf[8], rx_buf[8];
+ int retry = 3;
+ int error;
+
+ gpiod_set_value_cansleep(ts->reset_gpio, 0);
+ usleep_range(4000, 4100);
+
+ memset(tx_buf, GOODIX_DEV_CONFIRM_VAL, sizeof(tx_buf));
+ while (retry--) {
+ error = goodix_spi_write(ts, GOODIX_DEV_CONFIRM_ADDR,
+ tx_buf, sizeof(tx_buf));
+ if (error)
+ return error;
+
+ error = goodix_spi_read(ts, GOODIX_DEV_CONFIRM_ADDR,
+ rx_buf, sizeof(rx_buf));
+ if (error)
+ return error;
+
+ if (!memcmp(tx_buf, rx_buf, sizeof(tx_buf)))
+ return 0;
+
+ usleep_range(5000, 5100);
+ }
+
+ dev_err(ts->dev, "device confirm failed, rx_buf: %*ph", 8, rx_buf);
+ return -EINVAL;
+}
+
+/**
+ * goodix_hid_parse() - hid-core .parse() callback
+ * @hid: hid device instance
+ *
+ * This function gets called during call to hid_add_device
+ *
+ * Return: 0 on success and non zero on error
+ */
+static int goodix_hid_parse(struct hid_device *hid)
+{
+ struct goodix_ts_data *ts = hid->driver_data;
+ u16 rsize;
+ int error;
+
+ rsize = le16_to_cpu(ts->hid_desc.report_desc_length);
+ if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
+ dev_err(ts->dev, "invalid report desc size, %d", rsize);
+ return -EINVAL;
+ }
+
+ u8 *rdesc __free(kfree) = kzalloc(rsize, GFP_KERNEL);
+ if (!rdesc)
+ return -ENOMEM;
+
+ error = goodix_spi_read(ts, GOODIX_HID_REPORT_DESC_ADDR, rdesc, rsize);
+ if (error) {
+ dev_err(ts->dev, "failed get report desc, %d", error);
+ return error;
+ }
+
+ error = hid_parse_report(hid, rdesc, rsize);
+ if (error) {
+ dev_err(ts->dev, "failed parse report, %d", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int goodix_hid_get_report_length(struct hid_report *report)
+{
+ return ((report->size - 1) >> 3) + 1 +
+ report->device->report_enum[report->type].numbered + 2;
+}
+
+static void goodix_hid_find_max_report(struct hid_device *hid, unsigned int type,
+ unsigned int *max)
+{
+ struct hid_report *report;
+ unsigned int size;
+
+ list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
+ size = goodix_hid_get_report_length(report);
+ if (*max < size)
+ *max = size;
+ }
+}
+
+static int goodix_hid_start(struct hid_device *hid)
+{
+ struct goodix_ts_data *ts = hid->driver_data;
+ unsigned int bufsize = GOODIX_HID_COOR_PKG_LEN;
+ u32 report_size;
+
+ goodix_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
+ goodix_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
+ goodix_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
+
+ report_size = GOODIX_SPI_READ_PREFIX_LEN +
+ GOODIX_HID_ACK_HEADER_SIZE + bufsize;
+ if (report_size <= ts->hid_max_event_sz)
+ return 0;
+
+ ts->event_buf = devm_krealloc(ts->dev, ts->event_buf,
+ report_size, GFP_KERNEL);
+ if (!ts->event_buf)
+ return -ENOMEM;
+
+ ts->hid_max_event_sz = report_size;
+ return 0;
+}
+
+static void goodix_hid_stop(struct hid_device *hid)
+{
+ hid->claimed = 0;
+}
+
+static int goodix_hid_open(struct hid_device *hid)
+{
+ struct goodix_ts_data *ts = hid->driver_data;
+
+ set_bit(GOODIX_HID_STARTED, &ts->flags);
+ return 0;
+}
+
+static void goodix_hid_close(struct hid_device *hid)
+{
+ struct goodix_ts_data *ts = hid->driver_data;
+
+ clear_bit(GOODIX_HID_STARTED, &ts->flags);
+}
+
+/* Return date length of response data */
+static int goodix_hid_check_ack_status(struct goodix_ts_data *ts, u32 *resp_len)
+{
+ struct goodix_hid_report_header hdr;
+ int retry = 20;
+ int error;
+ int len;
+
+ while (retry--) {
+ /*
+ * 3 bytes of hid request response data
+ * - byte 0: Ack flag, value of 1 for data ready
+ * - bytes 1-2: Response data length
+ */
+ error = goodix_spi_read(ts, ts->hid_report_addr,
+ &hdr, sizeof(hdr));
+ if (!error && (hdr.flag & GOODIX_HID_ACK_READY_FLAG)) {
+ len = le16_to_cpu(hdr.size);
+ if (len < GOODIX_HID_PKG_LEN_SIZE) {
+ dev_err(ts->dev, "hrd.size too short: %d", len);
+ return -EINVAL;
+ }
+ *resp_len = len;
+ return 0;
+ }
+
+ /* Wait 10ms for another try */
+ usleep_range(10000, 11000);
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * goodix_hid_get_raw_report() - Process hidraw GET REPORT operation
+ * @hid: hid device instance
+ * @reportnum: Report ID
+ * @buf: Buffer for store the report date
+ * @len: Length fo report data
+ * @report_type: Report type
+ *
+ * The function for hid_ll_driver.get_raw_report to handle the HIDRAW ioctl
+ * get report request. The transmitted data follows the standard i2c-hid
+ * protocol with a specified header.
+ *
+ * Return: The length of the data in the buf on success, negative error code
+ */
+static int goodix_hid_get_raw_report(struct hid_device *hid,
+ unsigned char reportnum,
+ u8 *buf, size_t len,
+ unsigned char report_type)
+{
+ struct goodix_ts_data *ts = hid->driver_data;
+ u16 data_register = le16_to_cpu(ts->hid_desc.data_register);
+ u16 cmd_register = le16_to_cpu(ts->hid_desc.cmd_register);
+ u8 tmp_buf[GOODIX_HID_MAX_INBUF_SIZE];
+ int tx_len = 0, args_len = 0;
+ u32 response_data_len;
+ u8 args[3];
+ int error;
+
+ if (report_type == HID_OUTPUT_REPORT)
+ return -EINVAL;
+
+ if (reportnum == 3) {
+ /* Get win8 signature data */
+ error = goodix_spi_read(ts, GOODIX_HID_SIGN_ADDR, buf, len);
+ if (error) {
+ dev_err(ts->dev, "failed get win8 sign: %d", error);
+ return -EINVAL;
+ }
+ return len;
+ }
+
+ if (reportnum >= 0x0F)
+ args[args_len++] = reportnum;
+
+ put_unaligned_le16(data_register, args + args_len);
+ args_len += sizeof(data_register);
+
+ /* Clean 3 bytes of hid ack header data */
+ memset(tmp_buf, 0, GOODIX_HID_ACK_HEADER_SIZE);
+ tx_len += GOODIX_HID_ACK_HEADER_SIZE;
+
+ put_unaligned_le16(cmd_register, tmp_buf + tx_len);
+ tx_len += sizeof(cmd_register);
+
+ tmp_buf[tx_len] = (report_type == HID_FEATURE_REPORT ? 0x03 : 0x01) << 4;
+ tmp_buf[tx_len] |= reportnum >= 0x0F ? 0x0F : reportnum;
+ tx_len++;
+
+ tmp_buf[tx_len++] = GOODIX_HID_GET_REPORT_CMD;
+
+ memcpy(tmp_buf + tx_len, args, args_len);
+ tx_len += args_len;
+
+ /* Step1: write report request info */
+ error = goodix_spi_write(ts, ts->hid_report_addr, tmp_buf, tx_len);
+ if (error) {
+ dev_err(ts->dev, "failed send read feature cmd, %d", error);
+ return error;
+ }
+
+ /* No need read response data */
+ if (!len)
+ return 0;
+
+ /* Step2: check response data status */
+ error = goodix_hid_check_ack_status(ts, &response_data_len);
+ if (error)
+ return error;
+
+ len = min(len, response_data_len - GOODIX_HID_PKG_LEN_SIZE);
+ /* Step3: read response data(skip 2bytes of hid pkg length) */
+ error = goodix_spi_read(ts, ts->hid_report_addr +
+ GOODIX_HID_ACK_HEADER_SIZE +
+ GOODIX_HID_PKG_LEN_SIZE, buf, len);
+ if (error) {
+ dev_err(ts->dev, "failed read hid response data, %d", error);
+ return error;
+ }
+
+ if (buf[0] != reportnum) {
+ dev_err(ts->dev, "incorrect report (%d vs %d expected)",
+ buf[0], reportnum);
+ return -EINVAL;
+ }
+ return len;
+}
+
+/**
+ * goodix_hid_set_raw_report() - process hidraw SET REPORT operation
+ * @hid: HID device
+ * @reportnum: Report ID
+ * @buf: Buffer for communication
+ * @len: Length of data in the buffer
+ * @report_type: Report type
+ *
+ * The function for hid_ll_driver.get_raw_report to handle the HIDRAW ioctl
+ * set report request. The transmitted data follows the standard i2c-hid
+ * protocol with a specified header.
+ *
+ * Return: The length of the data sent, negative error code on failure
+ */
+static int goodix_hid_set_raw_report(struct hid_device *hid,
+ unsigned char reportnum,
+ __u8 *buf, size_t len,
+ unsigned char report_type)
+{
+ struct goodix_ts_data *ts = hid->driver_data;
+ u16 data_register = le16_to_cpu(ts->hid_desc.data_register);
+ u16 cmd_register = le16_to_cpu(ts->hid_desc.cmd_register);
+ int tx_len = 0, args_len = 0;
+ u8 tmp_buf[GOODIX_HID_MAX_INBUF_SIZE];
+ u8 args[5];
+ int error;
+
+ if (reportnum >= 0x0F) {
+ args[args_len++] = reportnum;
+ reportnum = 0x0F;
+ }
+
+ put_unaligned_le16(data_register, args + args_len);
+ args_len += sizeof(data_register);
+
+ put_unaligned_le16(GOODIX_HID_PKG_LEN_SIZE + len, args + args_len);
+ args_len += GOODIX_HID_PKG_LEN_SIZE;
+
+ /* Clean 3 bytes of hid ack header data */
+ memset(tmp_buf, 0, GOODIX_HID_ACK_HEADER_SIZE);
+ tx_len += GOODIX_HID_ACK_HEADER_SIZE;
+
+ put_unaligned_le16(cmd_register, tmp_buf + tx_len);
+ tx_len += sizeof(cmd_register);
+
+ tmp_buf[tx_len++] = ((report_type == HID_FEATURE_REPORT ? 0x03 : 0x02) << 4) | reportnum;
+ tmp_buf[tx_len++] = GOODIX_HID_SET_REPORT_CMD;
+
+ memcpy(tmp_buf + tx_len, args, args_len);
+ tx_len += args_len;
+
+ memcpy(tmp_buf + tx_len, buf, len);
+ tx_len += len;
+
+ error = goodix_spi_write(ts, ts->hid_report_addr, tmp_buf, tx_len);
+ if (error) {
+ dev_err(ts->dev, "failed send report: %*ph", tx_len, tmp_buf);
+ return error;
+ }
+ return len;
+}
+
+static int goodix_hid_raw_request(struct hid_device *hid,
+ unsigned char reportnum,
+ __u8 *buf, size_t len,
+ unsigned char rtype, int reqtype)
+{
+ struct goodix_ts_data *ts = hid->driver_data;
+ int error = -EINVAL;
+
+ guard(mutex)(&ts->hid_request_lock);
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ error = goodix_hid_get_raw_report(hid, reportnum, buf,
+ len, rtype);
+ break;
+ case HID_REQ_SET_REPORT:
+ if (buf[0] == reportnum)
+ error = goodix_hid_set_raw_report(hid, reportnum,
+ buf, len, rtype);
+ break;
+ default:
+ break;
+ }
+
+ return error;
+}
+
+static struct hid_ll_driver goodix_hid_ll_driver = {
+ .parse = goodix_hid_parse,
+ .start = goodix_hid_start,
+ .stop = goodix_hid_stop,
+ .open = goodix_hid_open,
+ .close = goodix_hid_close,
+ .raw_request = goodix_hid_raw_request
+};
+
+static irqreturn_t goodix_hid_irq(int irq, void *data)
+{
+ struct goodix_ts_data *ts = data;
+ struct goodix_hid_report_event *event;
+ struct goodix_hid_report_package *pkg;
+ u16 report_size;
+
+ if (!test_bit(GOODIX_HID_STARTED, &ts->flags))
+ return IRQ_HANDLED;
+ /*
+ * First, read buffer with space for header and coordinate package:
+ * - event header = 3 bytes
+ * - coordinate event = GOODIX_HID_COOR_PKG_LEN bytes
+ *
+ * If the data size info in the event header exceeds
+ * GOODIX_HID_COOR_PKG_LEN, it means that there are other packages
+ * besides the coordinate package.
+ */
+ event = goodix_get_event_report(ts, ts->hid_report_addr, ts->event_buf,
+ GOODIX_HID_ACK_HEADER_SIZE +
+ GOODIX_HID_COOR_PKG_LEN);
+ if (!event) {
+ dev_err(ts->dev, "failed get coordinate data");
+ return IRQ_HANDLED;
+ }
+
+ /* Check coordinate data valid falg */
+ if (event->hdr.flag != GOODIX_HID_REPORT_READY_FLAG)
+ return IRQ_HANDLED;
+
+ pkg = (struct goodix_hid_report_package *)event->data;
+ if (le16_to_cpu(pkg->size) < GOODIX_HID_PKG_LEN_SIZE) {
+ dev_err(ts->dev, "invalid coordinate event package size, %d",
+ le16_to_cpu(pkg->size));
+ return IRQ_HANDLED;
+ }
+ hid_input_report(ts->hid, HID_INPUT_REPORT, pkg->data,
+ le16_to_cpu(pkg->size) - GOODIX_HID_PKG_LEN_SIZE, 1);
+
+ report_size = le16_to_cpu(event->hdr.size);
+ /* Check if there are other packages */
+ if (report_size <= GOODIX_HID_COOR_PKG_LEN)
+ return IRQ_HANDLED;
+
+ if (report_size >= ts->hid_max_event_sz) {
+ dev_err(ts->dev, "package size exceed limit %d vs %d",
+ report_size, ts->hid_max_event_sz);
+ return IRQ_HANDLED;
+ }
+
+ /* Read the package behind the coordinate data */
+ pkg = goodix_get_event_report(ts, ts->hid_report_addr + sizeof(*event),
+ ts->event_buf,
+ report_size - GOODIX_HID_COOR_PKG_LEN);
+ if (!pkg) {
+ dev_err(ts->dev, "failed read attachment data content");
+ return IRQ_HANDLED;
+ }
+
+ hid_input_report(ts->hid, HID_INPUT_REPORT, pkg->data,
+ le16_to_cpu(pkg->size) - GOODIX_HID_PKG_LEN_SIZE, 1);
+
+ return IRQ_HANDLED;
+}
+
+static int goodix_hid_init(struct goodix_ts_data *ts)
+{
+ struct hid_device *hid;
+ int error;
+
+ /* Get hid descriptor */
+ error = goodix_spi_read(ts, GOODIX_HID_DESC_ADDR, &ts->hid_desc,
+ sizeof(ts->hid_desc));
+ if (error) {
+ dev_err(ts->dev, "failed get hid desc, %d", error);
+ return error;
+ }
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ hid->driver_data = ts;
+ hid->ll_driver = &goodix_hid_ll_driver;
+ hid->bus = BUS_SPI;
+ hid->dev.parent = &ts->spi->dev;
+
+ hid->version = le16_to_cpu(ts->hid_desc.bcd_version);
+ hid->vendor = le16_to_cpu(ts->hid_desc.vendor_id);
+ hid->product = le16_to_cpu(ts->hid_desc.product_id);
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "hid-gdix",
+ hid->vendor, hid->product);
+
+ error = hid_add_device(hid);
+ if (error) {
+ dev_err(ts->dev, "failed add hid device, %d", error);
+ hid_destroy_device(hid);
+ return error;
+ }
+
+ ts->hid = hid;
+ return 0;
+}
+
+static int goodix_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct goodix_ts_data *ts;
+ int error;
+
+ /* init spi_device */
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ error = spi_setup(spi);
+ if (error)
+ return error;
+
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ mutex_init(&ts->hid_request_lock);
+ spi_set_drvdata(spi, ts);
+ ts->spi = spi;
+ ts->dev = dev;
+ ts->hid_max_event_sz = GOODIX_SPI_READ_PREFIX_LEN +
+ GOODIX_HID_ACK_HEADER_SIZE + GOODIX_HID_COOR_PKG_LEN;
+ ts->event_buf = devm_kmalloc(dev, ts->hid_max_event_sz, GFP_KERNEL);
+ if (!ts->event_buf)
+ return -ENOMEM;
+
+ ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ts->reset_gpio),
+ "failed to request reset gpio\n");
+
+ error = device_property_read_u32(dev, "goodix,hid-report-addr",
+ &ts->hid_report_addr);
+ if (error)
+ return dev_err_probe(dev, error,
+ "failed get hid report addr\n");
+
+ error = goodix_dev_confirm(ts);
+ if (error)
+ return error;
+
+ /* Waits 150ms for firmware to fully boot */
+ msleep(GOODIX_NORMAL_RESET_DELAY_MS);
+
+ error = goodix_hid_init(ts);
+ if (error) {
+ dev_err(dev, "failed init hid device");
+ return error;
+ }
+
+ error = devm_request_threaded_irq(&ts->spi->dev, ts->spi->irq,
+ NULL, goodix_hid_irq, IRQF_ONESHOT,
+ "goodix_spi_hid", ts);
+ if (error) {
+ dev_err(ts->dev, "could not register interrupt, irq = %d, %d",
+ ts->spi->irq, error);
+ goto err_destroy_hid;
+ }
+
+ return 0;
+
+err_destroy_hid:
+ hid_destroy_device(ts->hid);
+ return error;
+}
+
+static void goodix_spi_remove(struct spi_device *spi)
+{
+ struct goodix_ts_data *ts = spi_get_drvdata(spi);
+
+ disable_irq(spi->irq);
+ hid_destroy_device(ts->hid);
+}
+
+static int goodix_spi_set_power(struct goodix_ts_data *ts, int power_state)
+{
+ u8 power_control_cmd[] = {0x00, 0x00, 0x00, 0x87, 0x02, 0x00, 0x08};
+ int error;
+
+ /* value 0 for power on, 1 for power sleep */
+ power_control_cmd[5] = power_state;
+
+ guard(mutex)(&ts->hid_request_lock);
+ error = goodix_spi_write(ts, ts->hid_report_addr, power_control_cmd,
+ sizeof(power_control_cmd));
+ if (error) {
+ dev_err(ts->dev, "failed set power mode: %s",
+ power_state == GOODIX_SPI_POWER_ON ? "on" : "sleep");
+ return error;
+ }
+ return 0;
+}
+
+static int goodix_spi_suspend(struct device *dev)
+{
+ struct goodix_ts_data *ts = dev_get_drvdata(dev);
+
+ disable_irq(ts->spi->irq);
+ return goodix_spi_set_power(ts, GOODIX_SPI_POWER_SLEEP);
+}
+
+static int goodix_spi_resume(struct device *dev)
+{
+ struct goodix_ts_data *ts = dev_get_drvdata(dev);
+
+ enable_irq(ts->spi->irq);
+ return goodix_spi_set_power(ts, GOODIX_SPI_POWER_ON);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(goodix_spi_pm_ops,
+ goodix_spi_suspend, goodix_spi_resume);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id goodix_spi_acpi_match[] = {
+ { "GXTS7986" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goodix_spi_acpi_match);
+#endif
+
+static const struct spi_device_id goodix_spi_ids[] = {
+ { "gt7986u" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, goodix_spi_ids);
+
+static struct spi_driver goodix_spi_driver = {
+ .driver = {
+ .name = "goodix-spi-hid",
+ .acpi_match_table = ACPI_PTR(goodix_spi_acpi_match),
+ .pm = pm_sleep_ptr(&goodix_spi_pm_ops),
+ },
+ .probe = goodix_spi_probe,
+ .remove = goodix_spi_remove,
+ .id_table = goodix_spi_ids,
+};
+module_spi_driver(goodix_spi_driver);
+
+MODULE_DESCRIPTION("Goodix SPI driver for HID touchscreen");
+MODULE_AUTHOR("Goodix, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 6e4ebc349e45..4e79fafeeafa 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -418,38 +418,15 @@ static int hammer_event(struct hid_device *hid, struct hid_field *field,
return 0;
}
-static bool hammer_has_usage(struct hid_device *hdev, unsigned int report_type,
- unsigned application, unsigned usage)
-{
- struct hid_report_enum *re = &hdev->report_enum[report_type];
- struct hid_report *report;
- int i, j;
-
- list_for_each_entry(report, &re->report_list, list) {
- if (report->application != application)
- continue;
-
- for (i = 0; i < report->maxfield; i++) {
- struct hid_field *field = report->field[i];
-
- for (j = 0; j < field->maxusage; j++)
- if (field->usage[j].hid == usage)
- return true;
- }
- }
-
- return false;
-}
-
static bool hammer_has_folded_event(struct hid_device *hdev)
{
- return hammer_has_usage(hdev, HID_INPUT_REPORT,
+ return !!hid_find_field(hdev, HID_INPUT_REPORT,
HID_GD_KEYBOARD, HID_USAGE_KBD_FOLDED);
}
static bool hammer_has_backlight_control(struct hid_device *hdev)
{
- return hammer_has_usage(hdev, HID_OUTPUT_REPORT,
+ return !!hid_find_field(hdev, HID_OUTPUT_REPORT,
HID_GD_KEYBOARD, HID_AD_BRIGHTNESS);
}
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index 1f014ac54e14..d13543517a6c 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -27,7 +27,7 @@
* to the boot interface.
*/
-static __u8 holtek_kbd_rdesc_fixed[] = {
+static const __u8 holtek_kbd_rdesc_fixed[] = {
/* Original report descriptor, with reduced number of consumer usages */
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x80, /* Usage (Sys Control), */
@@ -102,14 +102,14 @@ static __u8 holtek_kbd_rdesc_fixed[] = {
0xC0, /* End Collection */
};
-static __u8 *holtek_kbd_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *holtek_kbd_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
- rdesc = holtek_kbd_rdesc_fixed;
*rsize = sizeof(holtek_kbd_rdesc_fixed);
+ return holtek_kbd_rdesc_fixed;
}
return rdesc;
}
diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
index 343730c28e2d..b618a1646c13 100644
--- a/drivers/hid/hid-holtek-mouse.c
+++ b/drivers/hid/hid-holtek-mouse.c
@@ -29,8 +29,8 @@
* - USB ID 04d9:a0c2, sold as ETEKCITY Scroll T-140 Gaming Mouse
*/
-static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *holtek_mouse_report_fixup(struct hid_device *hdev,
+ __u8 *rdesc, unsigned int *rsize)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 781c5aa29859..86820a3d9766 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -417,24 +417,8 @@
#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
-#define I2C_DEVICE_ID_HP_ENVY_X360_15 0x2d05
-#define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100 0x29CF
-#define I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV 0x2CF9
-#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
-#define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG 0x29DF
-#define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
-#define I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN 0x2C82
-#define I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN 0x2F2C
-#define I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN 0x4116
#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
-#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
-#define I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN 0x2A1C
-#define I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN 0x279F
-#define I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100 0x29F5
-#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1 0x2BED
-#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2 0x2BEE
-#define I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG 0x2D02
#define I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM 0x2F81
#define USB_VENDOR_ID_ELECOM 0x056e
@@ -521,6 +505,7 @@
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
#define I2C_VENDOR_ID_GOODIX 0x27c6
+#define I2C_DEVICE_ID_GOODIX_01E0 0x01e0
#define I2C_DEVICE_ID_GOODIX_01E8 0x01e8
#define I2C_DEVICE_ID_GOODIX_01E9 0x01e9
#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
@@ -810,6 +795,7 @@
#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
#define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe
+#define USB_DEVICE_ID_LENOVO_X12_TAB2 0x61ae
#define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index c9094a4f281e..fda9dce3da99 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -373,14 +373,6 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN),
- HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
@@ -391,32 +383,13 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_AVOID_QUERY },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW),
HID_BATTERY_QUIRK_AVOID_QUERY },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG),
- HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM),
HID_BATTERY_QUIRK_AVOID_QUERY },
+ /*
+ * Elan I2C-HID touchscreens seem to all report a non present battery,
+ * set HID_BATTERY_QUIRK_IGNORE for all Elan I2C-HID devices.
+ */
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE },
{}
};
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 6a7281bc27c9..8e42780a2663 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -13,7 +13,7 @@
#define QUIRK_TOUCHPAD_ON_OFF_REPORT BIT(0)
-static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize)
+static const __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize)
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
diff --git a/drivers/hid/hid-keytouch.c b/drivers/hid/hid-keytouch.c
index a972943baaea..b9abd53a5864 100644
--- a/drivers/hid/hid-keytouch.c
+++ b/drivers/hid/hid-keytouch.c
@@ -16,7 +16,7 @@
/* Replace the broken report descriptor of this device with rather
* a default one */
-static __u8 keytouch_fixed_rdesc[] = {
+static const __u8 keytouch_fixed_rdesc[] = {
0x05, 0x01, 0x09, 0x06, 0xa1, 0x01, 0x05, 0x07, 0x19, 0xe0, 0x29, 0xe7, 0x15,
0x00, 0x25, 0x01, 0x75, 0x01, 0x95, 0x08, 0x81, 0x02, 0x95, 0x01, 0x75, 0x08,
0x81, 0x01, 0x95, 0x03, 0x75, 0x01, 0x05, 0x08, 0x19, 0x01, 0x29, 0x03, 0x91,
@@ -24,15 +24,13 @@ static __u8 keytouch_fixed_rdesc[] = {
0x26, 0xff, 0x00, 0x05, 0x07, 0x19, 0x00, 0x2a, 0xff, 0x00, 0x81, 0x00, 0xc0
};
-static __u8 *keytouch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *keytouch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
hid_info(hdev, "fixing up Keytouch IEC report descriptor\n");
- rdesc = keytouch_fixed_rdesc;
*rsize = sizeof(keytouch_fixed_rdesc);
-
- return rdesc;
+ return keytouch_fixed_rdesc;
}
static const struct hid_device_id keytouch_devices[] = {
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index ca2ba3da2458..32344331282f 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -466,7 +466,7 @@ static __u8 *kye_tablet_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int
return rdesc;
}
-static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
switch (hdev->product) {
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index e5e72aa5260a..3b0c779ce8f7 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -133,7 +133,7 @@ static const __u8 lenovo_tpIIbtkbd_need_fixup_collection[] = {
0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
};
-static __u8 *lenovo_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *lenovo_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
switch (hdev->product) {
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index cfe2f4f6e93f..9a2cfa018bd3 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -58,7 +58,7 @@
* These descriptors remove the combined Y axis and instead report
* separate throttle (Y) and brake (RZ).
*/
-static __u8 df_rdesc_fixed[] = {
+static const __u8 df_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
@@ -124,7 +124,7 @@ static __u8 df_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 dfp_rdesc_fixed[] = {
+static const __u8 dfp_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
@@ -172,7 +172,7 @@ static __u8 dfp_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 fv_rdesc_fixed[] = {
+static const __u8 fv_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
@@ -239,7 +239,7 @@ static __u8 fv_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 momo_rdesc_fixed[] = {
+static const __u8 momo_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
@@ -285,7 +285,7 @@ static __u8 momo_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 momo2_rdesc_fixed[] = {
+static const __u8 momo2_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
@@ -333,7 +333,7 @@ static __u8 momo2_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 ffg_rdesc_fixed[] = {
+static const __u8 ffg_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystik), */
0xA1, 0x01, /* Collection (Application), */
@@ -379,7 +379,7 @@ static __u8 ffg_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 fg_rdesc_fixed[] = {
+static const __u8 fg_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystik), */
0xA1, 0x01, /* Collection (Application), */
@@ -427,7 +427,7 @@ static __u8 fg_rdesc_fixed[] = {
* above the logical maximum described in descriptor. This extends
* the original value of 0x28c of logical maximum to 0x104d
*/
-static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
@@ -453,8 +453,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (*rsize == FG_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Wingman Formula GP report descriptor\n");
- rdesc = fg_rdesc_fixed;
*rsize = sizeof(fg_rdesc_fixed);
+ return fg_rdesc_fixed;
} else {
hid_info(hdev,
"rdesc size test failed for formula gp\n");
@@ -466,8 +466,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (*rsize == FFG_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Wingman Formula Force GP report descriptor\n");
- rdesc = ffg_rdesc_fixed;
*rsize = sizeof(ffg_rdesc_fixed);
+ return ffg_rdesc_fixed;
}
break;
@@ -476,8 +476,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (*rsize == DF_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Driving Force report descriptor\n");
- rdesc = df_rdesc_fixed;
*rsize = sizeof(df_rdesc_fixed);
+ return df_rdesc_fixed;
}
break;
@@ -485,8 +485,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (*rsize == MOMO_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Momo Force (Red) report descriptor\n");
- rdesc = momo_rdesc_fixed;
*rsize = sizeof(momo_rdesc_fixed);
+ return momo_rdesc_fixed;
}
break;
@@ -494,8 +494,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (*rsize == MOMO2_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Momo Racing Force (Black) report descriptor\n");
- rdesc = momo2_rdesc_fixed;
*rsize = sizeof(momo2_rdesc_fixed);
+ return momo2_rdesc_fixed;
}
break;
@@ -503,8 +503,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (*rsize == FV_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Formula Vibration report descriptor\n");
- rdesc = fv_rdesc_fixed;
*rsize = sizeof(fv_rdesc_fixed);
+ return fv_rdesc_fixed;
}
break;
@@ -512,8 +512,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (*rsize == DFP_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Driving Force Pro report descriptor\n");
- rdesc = dfp_rdesc_fixed;
*rsize = sizeof(dfp_rdesc_fixed);
+ return dfp_rdesc_fixed;
}
break;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 400d70e6dbe2..0e33fa0eb8db 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -3767,8 +3767,8 @@ static int hidpp_initialize_hires_scroll(struct hidpp_device *hidpp)
/* Generic HID++ devices */
/* -------------------------------------------------------------------------- */
-static u8 *hidpp_report_fixup(struct hid_device *hdev, u8 *rdesc,
- unsigned int *rsize)
+static const u8 *hidpp_report_fixup(struct hid_device *hdev, u8 *rdesc,
+ unsigned int *rsize)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
diff --git a/drivers/hid/hid-macally.c b/drivers/hid/hid-macally.c
index aea46e522008..fe7576458afa 100644
--- a/drivers/hid/hid-macally.c
+++ b/drivers/hid/hid-macally.c
@@ -18,8 +18,8 @@ MODULE_LICENSE("GPL");
* The Macally ikey keyboard says that its logical and usage maximums are both
* 101, but the power key is 102 and the equals key is 103
*/
-static __u8 *macally_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *macally_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
{
if (*rsize >= 60 && rdesc[53] == 0x65 && rdesc[59] == 0x65) {
hid_info(hdev,
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 2eb285b97fc0..8a73b59e0827 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -907,8 +907,8 @@ static void magicmouse_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
-static __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
{
/*
* Change the usage from:
diff --git a/drivers/hid/hid-maltron.c b/drivers/hid/hid-maltron.c
index caba0def938c..f0aad1ba2e6d 100644
--- a/drivers/hid/hid-maltron.c
+++ b/drivers/hid/hid-maltron.c
@@ -22,7 +22,7 @@
#include "hid-ids.h"
/* The original buggy USB descriptor */
-static u8 maltron_rdesc_o[] = {
+static const u8 maltron_rdesc_o[] = {
0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */
0x09, 0x80, /* Usage (Sys Control) */
0xA1, 0x01, /* Collection (Application) */
@@ -79,7 +79,7 @@ static u8 maltron_rdesc_o[] = {
};
/* The patched descriptor, allowing media key events to be accepted as valid */
-static u8 maltron_rdesc[] = {
+static const u8 maltron_rdesc[] = {
0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */
0x09, 0x80, /* Usage (Sys Control) */
0xA1, 0x01, /* Collection (Application) */
@@ -137,8 +137,8 @@ static u8 maltron_rdesc[] = {
0xC0 /* End Collection */
};
-static __u8 *maltron_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *maltron_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
{
if (*rsize == sizeof(maltron_rdesc_o) &&
!memcmp(maltron_rdesc_o, rdesc, sizeof(maltron_rdesc_o))) {
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 4cf0fcddb379..18ac21c0bcb2 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -56,7 +56,7 @@ struct xb1s_ff_report {
__u8 loop_count;
} __packed;
-static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct ms_data *ms = hid_get_drvdata(hdev);
diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
index 989681f73d77..3089be990afe 100644
--- a/drivers/hid/hid-monterey.c
+++ b/drivers/hid/hid-monterey.c
@@ -18,7 +18,7 @@
#include "hid-ids.h"
-static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 99812c0f830b..638e36c6d0f1 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -212,6 +212,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
#define MT_CLS_GOOGLE 0x0111
#define MT_CLS_RAZER_BLADE_STEALTH 0x0112
#define MT_CLS_SMART_TECH 0x0113
+#define MT_CLS_SIS 0x0457
#define MT_DEFAULT_MAXCONTACT 10
#define MT_MAX_MAXCONTACT 250
@@ -396,6 +397,11 @@ static const struct mt_class mt_classes[] = {
MT_QUIRK_CONTACT_CNT_ACCURATE |
MT_QUIRK_SEPARATE_APP_REPORT,
},
+ { .name = MT_CLS_SIS,
+ .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
+ MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE,
+ },
{ }
};
@@ -1441,12 +1447,13 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
return 0;
}
-static __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *size)
{
if (hdev->vendor == I2C_VENDOR_ID_GOODIX &&
(hdev->product == I2C_DEVICE_ID_GOODIX_01E8 ||
- hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) {
+ hdev->product == I2C_DEVICE_ID_GOODIX_01E9 ||
+ hdev->product == I2C_DEVICE_ID_GOODIX_01E0)) {
if (rdesc[607] == 0x15) {
rdesc[607] = 0x25;
dev_info(
@@ -1811,6 +1818,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (mtclass->quirks & MT_QUIRK_FIX_CONST_CONTACT_ID)
mt_fix_const_fields(hdev, HID_DG_CONTACTID);
+ if (hdev->vendor == USB_VENDOR_ID_SIS_TOUCH)
+ hdev->quirks |= HID_QUIRK_NOGET;
+
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret)
return ret;
@@ -2065,7 +2075,10 @@ static const struct hid_device_id mt_devices[] = {
I2C_DEVICE_ID_GOODIX_01E8) },
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
- I2C_DEVICE_ID_GOODIX_01E8) },
+ I2C_DEVICE_ID_GOODIX_01E9) },
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
+ HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
+ I2C_DEVICE_ID_GOODIX_01E0) },
/* GoodTouch panels */
{ .driver_data = MT_CLS_NSMU,
@@ -2113,6 +2126,12 @@ static const struct hid_device_id mt_devices[] = {
USB_VENDOR_ID_LENOVO,
USB_DEVICE_ID_LENOVO_X12_TAB) },
+ /* Lenovo X12 TAB Gen 2 */
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_LENOVO,
+ USB_DEVICE_ID_LENOVO_X12_TAB2) },
+
/* Logitech devices */
{ .driver_data = MT_CLS_NSMU,
HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8,
@@ -2275,6 +2294,11 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE,
USB_DEVICE_ID_GOOGLE_WHISKERS) },
+ /* sis */
+ { .driver_data = MT_CLS_SIS,
+ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_SIS_TOUCH,
+ HID_ANY_ID) },
+
/* Generic MT device */
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
diff --git a/drivers/hid/hid-nti.c b/drivers/hid/hid-nti.c
index 1952e9ca5f45..03f7dd491228 100644
--- a/drivers/hid/hid-nti.c
+++ b/drivers/hid/hid-nti.c
@@ -29,7 +29,7 @@ MODULE_DESCRIPTION("HID driver for Network Technologies USB-SUN keyboard adapter
/*
* NTI Sun keyboard adapter has wrong logical maximum in report descriptor
*/
-static __u8 *nti_usbsun_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *nti_usbsun_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 60 && rdesc[53] == 0x65 && rdesc[59] == 0x65) {
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index 99e3b06a8331..f27297269a7f 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -22,7 +22,7 @@
#include "hid-ids.h"
-static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) {
diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
index 5e47634bb07d..1a986f077ce1 100644
--- a/drivers/hid/hid-petalynx.c
+++ b/drivers/hid/hid-petalynx.c
@@ -19,7 +19,7 @@
#include "hid-ids.h"
/* Petalynx Maxter Remote has maximum for consumer page set too low */
-static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
diff --git a/drivers/hid/hid-picolcd_backlight.c b/drivers/hid/hid-picolcd_backlight.c
index 08d16917eb60..4b43b64537a3 100644
--- a/drivers/hid/hid-picolcd_backlight.c
+++ b/drivers/hid/hid-picolcd_backlight.c
@@ -31,7 +31,8 @@ static int picolcd_set_brightness(struct backlight_device *bdev)
data->lcd_brightness = bdev->props.brightness & 0x0ff;
data->lcd_power = bdev->props.power;
spin_lock_irqsave(&data->lock, flags);
- hid_set_field(report->field[0], 0, data->lcd_power == FB_BLANK_UNBLANK ? data->lcd_brightness : 0);
+ hid_set_field(report->field[0], 0,
+ data->lcd_power == BACKLIGHT_POWER_ON ? data->lcd_brightness : 0);
if (!(data->status & PICOLCD_FAILED))
hid_hw_request(data->hdev, report, HID_REQ_SET_REPORT);
spin_unlock_irqrestore(&data->lock, flags);
@@ -94,7 +95,7 @@ void picolcd_suspend_backlight(struct picolcd_data *data)
if (!data->backlight)
return;
- data->backlight->props.power = FB_BLANK_POWERDOWN;
+ data->backlight->props.power = BACKLIGHT_POWER_OFF;
picolcd_set_brightness(data->backlight);
data->lcd_power = data->backlight->props.power = bl_power;
}
diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c
index e7c309cfe3a0..0d90d7ee693c 100644
--- a/drivers/hid/hid-playstation.c
+++ b/drivers/hid/hid-playstation.c
@@ -2143,6 +2143,26 @@ static void dualshock4_output_worker(struct work_struct *work)
spin_lock_irqsave(&ds4->base.lock, flags);
+ /*
+ * Some 3rd party gamepads expect updates to rumble and lightbar
+ * together, and setting one may cancel the other.
+ *
+ * Let's maximise compatibility by always sending rumble and lightbar
+ * updates together, even when only one has been scheduled, resulting
+ * in:
+ *
+ * ds4->valid_flag0 >= 0x03
+ *
+ * Hopefully this will maximise compatibility with third-party pads.
+ *
+ * Any further update bits, such as 0x04 for lightbar blinking, will
+ * be or'd on top of this like before.
+ */
+ if (ds4->update_rumble || ds4->update_lightbar) {
+ ds4->update_rumble = true; /* 0x01 */
+ ds4->update_lightbar = true; /* 0x02 */
+ }
+
if (ds4->update_rumble) {
/* Select classic rumble style haptics and enable it. */
common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_MOTOR;
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 757361593e52..3d08c190a935 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -728,7 +728,7 @@ static int pcmidi_snd_terminate(struct pcmidi_snd *pm)
/*
* PC-MIDI report descriptor for report id is wrong.
*/
-static __u8 *pk_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *pk_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize == 178 &&
diff --git a/drivers/hid/hid-pxrc.c b/drivers/hid/hid-pxrc.c
index b0e517f9cde7..71fe0c06ddcd 100644
--- a/drivers/hid/hid-pxrc.c
+++ b/drivers/hid/hid-pxrc.c
@@ -17,7 +17,7 @@ struct pxrc_priv {
bool alternate;
};
-static __u8 pxrc_rdesc_fixed[] = {
+static const __u8 pxrc_rdesc_fixed[] = {
0x05, 0x01, // Usage Page (Generic Desktop Ctrls)
0x09, 0x04, // Usage (Joystick)
0xA1, 0x01, // Collection (Application)
@@ -42,8 +42,8 @@ static __u8 pxrc_rdesc_fixed[] = {
0xC0, // End Collection
};
-static __u8 *pxrc_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *pxrc_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
{
hid_info(hdev, "fixing up PXRC report descriptor\n");
*rsize = sizeof(pxrc_rdesc_fixed);
diff --git a/drivers/hid/hid-redragon.c b/drivers/hid/hid-redragon.c
index 07d803513f27..20d28ed75c1e 100644
--- a/drivers/hid/hid-redragon.c
+++ b/drivers/hid/hid-redragon.c
@@ -33,7 +33,7 @@
* key codes are generated.
*/
-static __u8 *redragon_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *redragon_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 102 && rdesc[100] == 0x81 && rdesc[101] == 0x00) {
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 85ac8def368f..6fe7c087c594 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -66,7 +66,7 @@ static int saitek_probe(struct hid_device *hdev,
return 0;
}
-static __u8 *saitek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *saitek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct saitek_sc *ssc = hid_get_drvdata(hdev);
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index d4e27142245c..f3908a9e04e6 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -469,7 +469,7 @@ static int samsung_universal_kbd_input_mapping(struct hid_device *hdev,
return 1;
}
-static __u8 *samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (hdev->product == USB_DEVICE_ID_SAMSUNG_IR_REMOTE && hid_is_usb(hdev))
diff --git a/drivers/hid/hid-semitek.c b/drivers/hid/hid-semitek.c
index 710766f6839d..4fbec5fd87ce 100644
--- a/drivers/hid/hid-semitek.c
+++ b/drivers/hid/hid-semitek.c
@@ -11,8 +11,8 @@
#include "hid-ids.h"
-static __u8 *semitek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *semitek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
{
/* In the report descriptor for interface 2, fix the incorrect
description of report ID 0x04 (the report contains a
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index bd400f6b472b..66f0675df24b 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -732,7 +732,7 @@ static int hid_sensor_custom_dev_if_add(struct hid_sensor_custom *sensor_inst)
sensor_inst->custom_dev.minor = MISC_DYNAMIC_MINOR;
sensor_inst->custom_dev.name = dev_name(&sensor_inst->pdev->dev);
- sensor_inst->custom_dev.fops = &hid_sensor_custom_fops,
+ sensor_inst->custom_dev.fops = &hid_sensor_custom_fops;
ret = misc_register(&sensor_inst->custom_dev);
if (ret) {
kfifo_free(&sensor_inst->data_fifo);
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 26e93a331a51..7bd86eef6ec7 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -580,7 +580,7 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
}
EXPORT_SYMBOL_GPL(sensor_hub_device_close);
-static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
/*
diff --git a/drivers/hid/hid-sigmamicro.c b/drivers/hid/hid-sigmamicro.c
index 2e7058ac0e9d..c87276d7ba0d 100644
--- a/drivers/hid/hid-sigmamicro.c
+++ b/drivers/hid/hid-sigmamicro.c
@@ -99,8 +99,8 @@ static const __u8 sm_0059_rdesc[] = {
0xc0, /* End Collection 166 */
};
-static __u8 *sm_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *sm_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
{
if (*rsize == sizeof(sm_0059_rdesc) &&
!memcmp(sm_0059_rdesc, rdesc, *rsize)) {
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index eac75f98f08a..df29c614e490 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -99,7 +99,7 @@ static const char ghl_ps4_magic_data[] = {
};
/* PS/3 Motion controller */
-static u8 motion_rdesc[] = {
+static const u8 motion_rdesc[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
@@ -195,7 +195,7 @@ static u8 motion_rdesc[] = {
0xC0 /* End Collection */
};
-static u8 ps3remote_rdesc[] = {
+static const u8 ps3remote_rdesc[] = {
0x05, 0x01, /* GUsagePage Generic Desktop */
0x09, 0x05, /* LUsage 0x05 [Game Pad] */
0xA1, 0x01, /* MCollection Application (mouse, keyboard) */
@@ -599,15 +599,15 @@ static int guitar_mapping(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
-static u8 *motion_fixup(struct hid_device *hdev, u8 *rdesc,
- unsigned int *rsize)
+static const u8 *motion_fixup(struct hid_device *hdev, u8 *rdesc,
+ unsigned int *rsize)
{
*rsize = sizeof(motion_rdesc);
return motion_rdesc;
}
-static u8 *ps3remote_fixup(struct hid_device *hdev, u8 *rdesc,
- unsigned int *rsize)
+static const u8 *ps3remote_fixup(struct hid_device *hdev, u8 *rdesc,
+ unsigned int *rsize)
{
*rsize = sizeof(ps3remote_rdesc);
return ps3remote_rdesc;
@@ -743,7 +743,7 @@ static int sixaxis_mapping(struct hid_device *hdev, struct hid_input *hi,
return -1;
}
-static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
+static const u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index 2154e14f55f1..7e83fee1ffa0 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -51,7 +51,7 @@ struct steelseries_srws1_data {
* appear in the 'Generic Desktop' usage.
*/
-static __u8 steelseries_srws1_rdesc_fixed[] = {
+static const __u8 steelseries_srws1_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop) */
0x09, 0x08, /* Usage (MultiAxis), Changed */
0xA1, 0x01, /* Collection (Application), */
@@ -570,8 +570,8 @@ static void steelseries_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
-static __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev,
+ __u8 *rdesc, unsigned int *rsize)
{
if (hdev->vendor != USB_VENDOR_ID_STEELSERIES ||
hdev->product != USB_DEVICE_ID_STEELSERIES_SRWS1)
@@ -580,8 +580,8 @@ static __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev, __u8 *rdesc
if (*rsize >= 115 && rdesc[11] == 0x02 && rdesc[13] == 0xc8
&& rdesc[29] == 0xbb && rdesc[40] == 0xc5) {
hid_info(hdev, "Fixing up Steelseries SRW-S1 report descriptor\n");
- rdesc = steelseries_srws1_rdesc_fixed;
*rsize = sizeof(steelseries_srws1_rdesc_fixed);
+ return steelseries_srws1_rdesc_fixed;
}
return rdesc;
}
diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
index f32e60d4420f..64e4cff8ca1d 100644
--- a/drivers/hid/hid-sunplus.c
+++ b/drivers/hid/hid-sunplus.c
@@ -18,7 +18,7 @@
#include "hid-ids.h"
-static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
diff --git a/drivers/hid/hid-topre.c b/drivers/hid/hid-topre.c
index d1d5ca310ead..848361f6225d 100644
--- a/drivers/hid/hid-topre.c
+++ b/drivers/hid/hid-topre.c
@@ -21,8 +21,8 @@ MODULE_LICENSE("GPL");
* events it's actually sending. It claims to send array events but is instead
* sending variable events.
*/
-static __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
{
if (*rsize >= 119 && rdesc[69] == 0x29 && rdesc[70] == 0xe7 &&
rdesc[71] == 0x81 && rdesc[72] == 0x00) {
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index b176f9c0dd52..d8008933c052 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -50,14 +50,14 @@ static void uclogic_inrange_timeout(struct timer_list *t)
input_sync(input);
}
-static __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
if (drvdata->desc_ptr != NULL) {
- rdesc = drvdata->desc_ptr;
*rsize = drvdata->desc_size;
+ return drvdata->desc_ptr;
}
return rdesc;
}
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 5bab006ec165..87fd4eb76c70 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -681,7 +681,7 @@ void uclogic_params_cleanup(struct uclogic_params *params)
* -ENOMEM, if failed to allocate memory.
*/
int uclogic_params_get_desc(const struct uclogic_params *params,
- __u8 **pdesc,
+ const __u8 **pdesc,
unsigned int *psize)
{
int rc = -ENOMEM;
@@ -769,7 +769,7 @@ static void uclogic_params_init_invalid(struct uclogic_params *params)
static int uclogic_params_init_with_opt_desc(struct uclogic_params *params,
struct hid_device *hdev,
unsigned int orig_desc_size,
- __u8 *desc_ptr,
+ const __u8 *desc_ptr,
unsigned int desc_size)
{
__u8 *desc_copy_ptr = NULL;
diff --git a/drivers/hid/hid-uclogic-params.h b/drivers/hid/hid-uclogic-params.h
index d6ffadb2f601..35ff062d09b5 100644
--- a/drivers/hid/hid-uclogic-params.h
+++ b/drivers/hid/hid-uclogic-params.h
@@ -79,7 +79,7 @@ struct uclogic_params_pen {
* Pointer to report descriptor part describing the pen inputs.
* Allocated with kmalloc. NULL if the part is not specified.
*/
- __u8 *desc_ptr;
+ const __u8 *desc_ptr;
/*
* Size of the report descriptor.
* Only valid, if "desc_ptr" is not NULL.
@@ -118,7 +118,7 @@ struct uclogic_params_frame {
* Pointer to report descriptor part describing the frame inputs.
* Allocated with kmalloc. NULL if the part is not specified.
*/
- __u8 *desc_ptr;
+ const __u8 *desc_ptr;
/*
* Size of the report descriptor.
* Only valid, if "desc_ptr" is not NULL.
@@ -212,7 +212,7 @@ struct uclogic_params {
* allocated with kmalloc. NULL if no common part is needed.
* Only valid, if "invalid" is false.
*/
- __u8 *desc_ptr;
+ const __u8 *desc_ptr;
/*
* Size of the common part of the replacement report descriptor.
* Only valid, if "desc_ptr" is valid and not NULL.
@@ -239,7 +239,7 @@ struct uclogic_drvdata {
/* Interface parameters */
struct uclogic_params params;
/* Pointer to the replacement report descriptor. NULL if none. */
- __u8 *desc_ptr;
+ const __u8 *desc_ptr;
/*
* Size of the replacement report descriptor.
* Only valid if desc_ptr is not NULL
@@ -261,7 +261,7 @@ extern int uclogic_params_init(struct uclogic_params *params,
/* Get a replacement report descriptor for a tablet's interface. */
extern int uclogic_params_get_desc(const struct uclogic_params *params,
- __u8 **pdesc,
+ const __u8 **pdesc,
unsigned int *psize);
/* Free resources used by tablet interface's parameters */
diff --git a/drivers/hid/hid-uclogic-rdesc.c b/drivers/hid/hid-uclogic-rdesc.c
index acfa591ab52f..964d17e08f26 100644
--- a/drivers/hid/hid-uclogic-rdesc.c
+++ b/drivers/hid/hid-uclogic-rdesc.c
@@ -20,7 +20,7 @@
#include <kunit/visibility.h>
/* Fixed WP4030U report descriptor */
-__u8 uclogic_rdesc_wp4030u_fixed_arr[] = {
+const __u8 uclogic_rdesc_wp4030u_fixed_arr[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x01, /* Usage (Digitizer), */
0xA1, 0x01, /* Collection (Application), */
@@ -65,7 +65,7 @@ const size_t uclogic_rdesc_wp4030u_fixed_size =
sizeof(uclogic_rdesc_wp4030u_fixed_arr);
/* Fixed WP5540U report descriptor */
-__u8 uclogic_rdesc_wp5540u_fixed_arr[] = {
+const __u8 uclogic_rdesc_wp5540u_fixed_arr[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x01, /* Usage (Digitizer), */
0xA1, 0x01, /* Collection (Application), */
@@ -142,7 +142,7 @@ const size_t uclogic_rdesc_wp5540u_fixed_size =
sizeof(uclogic_rdesc_wp5540u_fixed_arr);
/* Fixed WP8060U report descriptor */
-__u8 uclogic_rdesc_wp8060u_fixed_arr[] = {
+const __u8 uclogic_rdesc_wp8060u_fixed_arr[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x01, /* Usage (Digitizer), */
0xA1, 0x01, /* Collection (Application), */
@@ -219,7 +219,7 @@ const size_t uclogic_rdesc_wp8060u_fixed_size =
sizeof(uclogic_rdesc_wp8060u_fixed_arr);
/* Fixed WP1062 report descriptor */
-__u8 uclogic_rdesc_wp1062_fixed_arr[] = {
+const __u8 uclogic_rdesc_wp1062_fixed_arr[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x01, /* Usage (Digitizer), */
0xA1, 0x01, /* Collection (Application), */
@@ -267,7 +267,7 @@ const size_t uclogic_rdesc_wp1062_fixed_size =
sizeof(uclogic_rdesc_wp1062_fixed_arr);
/* Fixed PF1209 report descriptor */
-__u8 uclogic_rdesc_pf1209_fixed_arr[] = {
+const __u8 uclogic_rdesc_pf1209_fixed_arr[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x01, /* Usage (Digitizer), */
0xA1, 0x01, /* Collection (Application), */
@@ -344,7 +344,7 @@ const size_t uclogic_rdesc_pf1209_fixed_size =
sizeof(uclogic_rdesc_pf1209_fixed_arr);
/* Fixed PID 0522 tablet report descriptor, interface 0 (stylus) */
-__u8 uclogic_rdesc_twhl850_fixed0_arr[] = {
+const __u8 uclogic_rdesc_twhl850_fixed0_arr[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x01, /* Usage (Digitizer), */
0xA1, 0x01, /* Collection (Application), */
@@ -390,7 +390,7 @@ const size_t uclogic_rdesc_twhl850_fixed0_size =
sizeof(uclogic_rdesc_twhl850_fixed0_arr);
/* Fixed PID 0522 tablet report descriptor, interface 1 (mouse) */
-__u8 uclogic_rdesc_twhl850_fixed1_arr[] = {
+const __u8 uclogic_rdesc_twhl850_fixed1_arr[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x02, /* Usage (Mouse), */
0xA1, 0x01, /* Collection (Application), */
@@ -430,7 +430,7 @@ const size_t uclogic_rdesc_twhl850_fixed1_size =
sizeof(uclogic_rdesc_twhl850_fixed1_arr);
/* Fixed PID 0522 tablet report descriptor, interface 2 (frame buttons) */
-__u8 uclogic_rdesc_twhl850_fixed2_arr[] = {
+const __u8 uclogic_rdesc_twhl850_fixed2_arr[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x06, /* Usage (Keyboard), */
0xA1, 0x01, /* Collection (Application), */
@@ -456,7 +456,7 @@ const size_t uclogic_rdesc_twhl850_fixed2_size =
sizeof(uclogic_rdesc_twhl850_fixed2_arr);
/* Fixed TWHA60 report descriptor, interface 0 (stylus) */
-__u8 uclogic_rdesc_twha60_fixed0_arr[] = {
+const __u8 uclogic_rdesc_twha60_fixed0_arr[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x01, /* Usage (Digitizer), */
0xA1, 0x01, /* Collection (Application), */
@@ -505,7 +505,7 @@ const size_t uclogic_rdesc_twha60_fixed0_size =
sizeof(uclogic_rdesc_twha60_fixed0_arr);
/* Fixed TWHA60 report descriptor, interface 1 (frame buttons) */
-__u8 uclogic_rdesc_twha60_fixed1_arr[] = {
+const __u8 uclogic_rdesc_twha60_fixed1_arr[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x06, /* Usage (Keyboard), */
0xA1, 0x01, /* Collection (Application), */
diff --git a/drivers/hid/hid-uclogic-rdesc.h b/drivers/hid/hid-uclogic-rdesc.h
index 906d068f50a9..3878a0e8c464 100644
--- a/drivers/hid/hid-uclogic-rdesc.h
+++ b/drivers/hid/hid-uclogic-rdesc.h
@@ -23,15 +23,15 @@
#define UCLOGIC_RDESC_WPXXXXU_ORIG_SIZE 212
/* Fixed WP4030U report descriptor */
-extern __u8 uclogic_rdesc_wp4030u_fixed_arr[];
+extern const __u8 uclogic_rdesc_wp4030u_fixed_arr[];
extern const size_t uclogic_rdesc_wp4030u_fixed_size;
/* Fixed WP5540U report descriptor */
-extern __u8 uclogic_rdesc_wp5540u_fixed_arr[];
+extern const __u8 uclogic_rdesc_wp5540u_fixed_arr[];
extern const size_t uclogic_rdesc_wp5540u_fixed_size;
/* Fixed WP8060U report descriptor */
-extern __u8 uclogic_rdesc_wp8060u_fixed_arr[];
+extern const __u8 uclogic_rdesc_wp8060u_fixed_arr[];
extern const size_t uclogic_rdesc_wp8060u_fixed_size;
/* Size of the original descriptor of the new WP5540U tablet */
@@ -41,14 +41,14 @@ extern const size_t uclogic_rdesc_wp8060u_fixed_size;
#define UCLOGIC_RDESC_WP1062_ORIG_SIZE 254
/* Fixed WP1062 report descriptor */
-extern __u8 uclogic_rdesc_wp1062_fixed_arr[];
+extern const __u8 uclogic_rdesc_wp1062_fixed_arr[];
extern const size_t uclogic_rdesc_wp1062_fixed_size;
/* Size of the original descriptor of PF1209 tablet */
#define UCLOGIC_RDESC_PF1209_ORIG_SIZE 234
/* Fixed PF1209 report descriptor */
-extern __u8 uclogic_rdesc_pf1209_fixed_arr[];
+extern const __u8 uclogic_rdesc_pf1209_fixed_arr[];
extern const size_t uclogic_rdesc_pf1209_fixed_size;
/* Size of the original descriptors of TWHL850 tablet */
@@ -57,15 +57,15 @@ extern const size_t uclogic_rdesc_pf1209_fixed_size;
#define UCLOGIC_RDESC_TWHL850_ORIG2_SIZE 92
/* Fixed PID 0522 tablet report descriptor, interface 0 (stylus) */
-extern __u8 uclogic_rdesc_twhl850_fixed0_arr[];
+extern const __u8 uclogic_rdesc_twhl850_fixed0_arr[];
extern const size_t uclogic_rdesc_twhl850_fixed0_size;
/* Fixed PID 0522 tablet report descriptor, interface 1 (mouse) */
-extern __u8 uclogic_rdesc_twhl850_fixed1_arr[];
+extern const __u8 uclogic_rdesc_twhl850_fixed1_arr[];
extern const size_t uclogic_rdesc_twhl850_fixed1_size;
/* Fixed PID 0522 tablet report descriptor, interface 2 (frame buttons) */
-extern __u8 uclogic_rdesc_twhl850_fixed2_arr[];
+extern const __u8 uclogic_rdesc_twhl850_fixed2_arr[];
extern const size_t uclogic_rdesc_twhl850_fixed2_size;
/* Size of the original descriptors of TWHA60 tablet */
@@ -73,11 +73,11 @@ extern const size_t uclogic_rdesc_twhl850_fixed2_size;
#define UCLOGIC_RDESC_TWHA60_ORIG1_SIZE 139
/* Fixed TWHA60 report descriptor, interface 0 (stylus) */
-extern __u8 uclogic_rdesc_twha60_fixed0_arr[];
+extern const __u8 uclogic_rdesc_twha60_fixed0_arr[];
extern const size_t uclogic_rdesc_twha60_fixed0_size;
/* Fixed TWHA60 report descriptor, interface 1 (frame buttons) */
-extern __u8 uclogic_rdesc_twha60_fixed1_arr[];
+extern const __u8 uclogic_rdesc_twha60_fixed1_arr[];
extern const size_t uclogic_rdesc_twha60_fixed1_size;
/* Report descriptor template placeholder head */
diff --git a/drivers/hid/hid-viewsonic.c b/drivers/hid/hid-viewsonic.c
index 668c2adb77b6..532bed88bdf8 100644
--- a/drivers/hid/hid-viewsonic.c
+++ b/drivers/hid/hid-viewsonic.c
@@ -22,7 +22,7 @@
#define PD1011_RDESC_ORIG_SIZE 408
/* Fixed report descriptor of PD1011 signature pad */
-static __u8 pd1011_rdesc_fixed[] = {
+static const __u8 pd1011_rdesc_fixed[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x01, /* Usage (Digitizer), */
0xA1, 0x01, /* Collection (Application), */
@@ -70,15 +70,15 @@ static __u8 pd1011_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 *viewsonic_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *viewsonic_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
{
switch (hdev->product) {
case USB_DEVICE_ID_VIEWSONIC_PD1011:
case USB_DEVICE_ID_SIGNOTEC_VIEWSONIC_PD1011:
if (*rsize == PD1011_RDESC_ORIG_SIZE) {
- rdesc = pd1011_rdesc_fixed;
*rsize = sizeof(pd1011_rdesc_fixed);
+ return pd1011_rdesc_fixed;
}
break;
}
diff --git a/drivers/hid/hid-vrc2.c b/drivers/hid/hid-vrc2.c
index 80a2b7ef5e66..7dc41e92f488 100644
--- a/drivers/hid/hid-vrc2.c
+++ b/drivers/hid/hid-vrc2.c
@@ -16,7 +16,7 @@
#define USB_VENDOR_ID_VRC2 (0x07c0)
#define USB_DEVICE_ID_VRC2 (0x1125)
-static __u8 vrc2_rdesc_fixed[] = {
+static const __u8 vrc2_rdesc_fixed[] = {
0x05, 0x01, // Usage Page (Generic Desktop Ctrls)
0x09, 0x04, // Usage (Joystick)
0xA1, 0x01, // Collection (Application)
@@ -38,8 +38,8 @@ static __u8 vrc2_rdesc_fixed[] = {
0xC0, // End Collection
};
-static __u8 *vrc2_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *vrc2_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
{
hid_info(hdev, "fixing up VRC-2 report descriptor\n");
*rsize = sizeof(vrc2_rdesc_fixed);
diff --git a/drivers/hid/hid-waltop.c b/drivers/hid/hid-waltop.c
index 1e590c61eef5..be34be27d4d5 100644
--- a/drivers/hid/hid-waltop.c
+++ b/drivers/hid/hid-waltop.c
@@ -43,7 +43,7 @@
#define SLIM_TABLET_5_8_INCH_RDESC_ORIG_SIZE 222
/* Fixed Slim Tablet 5.8 inch descriptor */
-static __u8 slim_tablet_5_8_inch_rdesc_fixed[] = {
+static const __u8 slim_tablet_5_8_inch_rdesc_fixed[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
@@ -94,7 +94,7 @@ static __u8 slim_tablet_5_8_inch_rdesc_fixed[] = {
#define SLIM_TABLET_12_1_INCH_RDESC_ORIG_SIZE 269
/* Fixed Slim Tablet 12.1 inch descriptor */
-static __u8 slim_tablet_12_1_inch_rdesc_fixed[] = {
+static const __u8 slim_tablet_12_1_inch_rdesc_fixed[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
@@ -145,7 +145,7 @@ static __u8 slim_tablet_12_1_inch_rdesc_fixed[] = {
#define Q_PAD_RDESC_ORIG_SIZE 241
/* Fixed Q Pad descriptor */
-static __u8 q_pad_rdesc_fixed[] = {
+static const __u8 q_pad_rdesc_fixed[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
@@ -198,7 +198,7 @@ static __u8 q_pad_rdesc_fixed[] = {
/*
* Fixed report descriptor for tablet with PID 0038.
*/
-static __u8 pid_0038_rdesc_fixed[] = {
+static const __u8 pid_0038_rdesc_fixed[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
@@ -249,7 +249,7 @@ static __u8 pid_0038_rdesc_fixed[] = {
#define MEDIA_TABLET_10_6_INCH_RDESC_ORIG_SIZE 300
/* Fixed Media Tablet 10.6 inch descriptor */
-static __u8 media_tablet_10_6_inch_rdesc_fixed[] = {
+static const __u8 media_tablet_10_6_inch_rdesc_fixed[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
@@ -362,7 +362,7 @@ static __u8 media_tablet_10_6_inch_rdesc_fixed[] = {
#define MEDIA_TABLET_14_1_INCH_RDESC_ORIG_SIZE 309
/* Fixed Media Tablet 14.1 inch descriptor */
-static __u8 media_tablet_14_1_inch_rdesc_fixed[] = {
+static const __u8 media_tablet_14_1_inch_rdesc_fixed[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
@@ -473,7 +473,7 @@ static __u8 media_tablet_14_1_inch_rdesc_fixed[] = {
#define SIRIUS_BATTERY_FREE_TABLET_RDESC_ORIG_SIZE 335
/* Fixed Sirius Battery Free Tablet descriptor */
-static __u8 sirius_battery_free_tablet_rdesc_fixed[] = {
+static const __u8 sirius_battery_free_tablet_rdesc_fixed[] = {
0x05, 0x0D, /* Usage Page (Digitizer), */
0x09, 0x02, /* Usage (Pen), */
0xA1, 0x01, /* Collection (Application), */
@@ -599,50 +599,50 @@ static __u8 sirius_battery_free_tablet_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 *waltop_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *waltop_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
switch (hdev->product) {
case USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH:
if (*rsize == SLIM_TABLET_5_8_INCH_RDESC_ORIG_SIZE) {
- rdesc = slim_tablet_5_8_inch_rdesc_fixed;
*rsize = sizeof(slim_tablet_5_8_inch_rdesc_fixed);
+ return slim_tablet_5_8_inch_rdesc_fixed;
}
break;
case USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH:
if (*rsize == SLIM_TABLET_12_1_INCH_RDESC_ORIG_SIZE) {
- rdesc = slim_tablet_12_1_inch_rdesc_fixed;
*rsize = sizeof(slim_tablet_12_1_inch_rdesc_fixed);
+ return slim_tablet_12_1_inch_rdesc_fixed;
}
break;
case USB_DEVICE_ID_WALTOP_Q_PAD:
if (*rsize == Q_PAD_RDESC_ORIG_SIZE) {
- rdesc = q_pad_rdesc_fixed;
*rsize = sizeof(q_pad_rdesc_fixed);
+ return q_pad_rdesc_fixed;
}
break;
case USB_DEVICE_ID_WALTOP_PID_0038:
if (*rsize == PID_0038_RDESC_ORIG_SIZE) {
- rdesc = pid_0038_rdesc_fixed;
*rsize = sizeof(pid_0038_rdesc_fixed);
+ return pid_0038_rdesc_fixed;
}
break;
case USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH:
if (*rsize == MEDIA_TABLET_10_6_INCH_RDESC_ORIG_SIZE) {
- rdesc = media_tablet_10_6_inch_rdesc_fixed;
*rsize = sizeof(media_tablet_10_6_inch_rdesc_fixed);
+ return media_tablet_10_6_inch_rdesc_fixed;
}
break;
case USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH:
if (*rsize == MEDIA_TABLET_14_1_INCH_RDESC_ORIG_SIZE) {
- rdesc = media_tablet_14_1_inch_rdesc_fixed;
*rsize = sizeof(media_tablet_14_1_inch_rdesc_fixed);
+ return media_tablet_14_1_inch_rdesc_fixed;
}
break;
case USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET:
if (*rsize == SIRIUS_BATTERY_FREE_TABLET_RDESC_ORIG_SIZE) {
- rdesc = sirius_battery_free_tablet_rdesc_fixed;
*rsize = sizeof(sirius_battery_free_tablet_rdesc_fixed);
+ return sirius_battery_free_tablet_rdesc_fixed;
}
break;
}
diff --git a/drivers/hid/hid-winwing.c b/drivers/hid/hid-winwing.c
index 10a5d87ccb96..831b760c66ea 100644
--- a/drivers/hid/hid-winwing.c
+++ b/drivers/hid/hid-winwing.c
@@ -27,7 +27,7 @@ struct winwing_led_info {
const char *led_name;
};
-static struct winwing_led_info led_info[3] = {
+static const struct winwing_led_info led_info[3] = {
{ 0, 255, "backlight" },
{ 1, 1, "a-a" },
{ 2, 1, "a-g" },
@@ -94,7 +94,7 @@ static int winwing_init_led(struct hid_device *hdev,
return -ENOMEM;
for (i = 0; i < 3; i += 1) {
- struct winwing_led_info *info = &led_info[i];
+ const struct winwing_led_info *info = &led_info[i];
led = &data->leds[i];
led->hdev = hdev;
@@ -150,7 +150,7 @@ static int winwing_input_configured(struct hid_device *hdev,
return ret;
}
-static __u8 original_rdesc_buttons[] = {
+static const __u8 original_rdesc_buttons[] = {
0x05, 0x09, 0x19, 0x01, 0x29, 0x6F,
0x15, 0x00, 0x25, 0x01, 0x35, 0x00,
0x45, 0x01, 0x75, 0x01, 0x95, 0x6F,
@@ -165,7 +165,7 @@ static __u8 original_rdesc_buttons[] = {
* This module skips numbers 32-63, unused on some throttle grips.
*/
-static __u8 *winwing_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *winwing_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
int sig_length = sizeof(original_rdesc_buttons);
diff --git a/drivers/hid/hid-xiaomi.c b/drivers/hid/hid-xiaomi.c
index a97a90afad33..ef6598550a40 100644
--- a/drivers/hid/hid-xiaomi.c
+++ b/drivers/hid/hid-xiaomi.c
@@ -14,7 +14,7 @@
/* Fixed Mi Silent Mouse report descriptor */
/* Button's Usage Maximum changed from 3 to 5 to make side buttons work */
#define MI_SILENT_MOUSE_ORIG_RDESC_LENGTH 87
-static __u8 mi_silent_mouse_rdesc_fixed[] = {
+static const __u8 mi_silent_mouse_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x02, /* Usage (Mouse), */
0xA1, 0x01, /* Collection (Application), */
@@ -61,15 +61,15 @@ static __u8 mi_silent_mouse_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 *xiaomi_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
+static const __u8 *xiaomi_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
{
switch (hdev->product) {
case USB_DEVICE_ID_MI_SILENT_MOUSE:
if (*rsize == MI_SILENT_MOUSE_ORIG_RDESC_LENGTH) {
hid_info(hdev, "fixing up Mi Silent Mouse report descriptor\n");
- rdesc = mi_silent_mouse_rdesc_fixed;
*rsize = sizeof(mi_silent_mouse_rdesc_fixed);
+ return mi_silent_mouse_rdesc_fixed;
}
break;
}
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
index 998a3db19c1f..3bdb26f45592 100644
--- a/drivers/hid/hid-zydacron.c
+++ b/drivers/hid/hid-zydacron.c
@@ -24,7 +24,7 @@ struct zc_device {
* Zydacron remote control has an invalid HID report descriptor,
* that needs fixing before we can parse it.
*/
-static __u8 *zc_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static const __u8 *zc_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 253 &&
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 716294e40e8a..c887f48756f4 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -38,12 +38,20 @@ static const struct class hidraw_class = {
static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES];
static DECLARE_RWSEM(minors_rwsem);
+static inline bool hidraw_is_revoked(struct hidraw_list *list)
+{
+ return list->revoked;
+}
+
static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct hidraw_list *list = file->private_data;
int ret = 0, len;
DECLARE_WAITQUEUE(wait, current);
+ if (hidraw_is_revoked(list))
+ return -ENODEV;
+
mutex_lock(&list->read_mutex);
while (ret == 0) {
@@ -161,9 +169,13 @@ out:
static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
+ struct hidraw_list *list = file->private_data;
ssize_t ret;
down_read(&minors_rwsem);
- ret = hidraw_send_report(file, buffer, count, HID_OUTPUT_REPORT);
+ if (hidraw_is_revoked(list))
+ ret = -ENODEV;
+ else
+ ret = hidraw_send_report(file, buffer, count, HID_OUTPUT_REPORT);
up_read(&minors_rwsem);
return ret;
}
@@ -256,7 +268,7 @@ static __poll_t hidraw_poll(struct file *file, poll_table *wait)
poll_wait(file, &list->hidraw->wait, wait);
if (list->head != list->tail)
mask |= EPOLLIN | EPOLLRDNORM;
- if (!list->hidraw->exist)
+ if (!list->hidraw->exist || hidraw_is_revoked(list))
mask |= EPOLLERR | EPOLLHUP;
return mask;
}
@@ -320,6 +332,9 @@ static int hidraw_fasync(int fd, struct file *file, int on)
{
struct hidraw_list *list = file->private_data;
+ if (hidraw_is_revoked(list))
+ return -ENODEV;
+
return fasync_helper(fd, file, on, &list->fasync);
}
@@ -372,6 +387,13 @@ static int hidraw_release(struct inode * inode, struct file * file)
return 0;
}
+static int hidraw_revoke(struct hidraw_list *list)
+{
+ list->revoked = true;
+
+ return 0;
+}
+
static long hidraw_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -379,11 +401,12 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
unsigned int minor = iminor(inode);
long ret = 0;
struct hidraw *dev;
+ struct hidraw_list *list = file->private_data;
void __user *user_arg = (void __user*) arg;
down_read(&minors_rwsem);
dev = hidraw_table[minor];
- if (!dev || !dev->exist) {
+ if (!dev || !dev->exist || hidraw_is_revoked(list)) {
ret = -ENODEV;
goto out;
}
@@ -421,6 +444,14 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
ret = -EFAULT;
break;
}
+ case HIDIOCREVOKE:
+ {
+ if (user_arg)
+ ret = -EINVAL;
+ else
+ ret = hidraw_revoke(list);
+ break;
+ }
default:
{
struct hid_device *hid = dev->hid;
@@ -527,7 +558,7 @@ int hidraw_report_event(struct hid_device *hid, u8 *data, int len)
list_for_each_entry(list, &dev->list, node) {
int new_head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1);
- if (new_head == list->tail)
+ if (hidraw_is_revoked(list) || new_head == list->tail)
continue;
if (!(list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC))) {
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 632eaf9e11a6..2f8a9d3f1e86 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -105,6 +105,7 @@ struct i2c_hid {
wait_queue_head_t wait; /* For waiting the interrupt */
+ struct mutex cmd_lock; /* protects cmdbuf and rawbuf */
struct mutex reset_lock;
struct i2chid_ops *ops;
@@ -220,6 +221,8 @@ static int i2c_hid_xfer(struct i2c_hid *ihid,
static int i2c_hid_read_register(struct i2c_hid *ihid, __le16 reg,
void *buf, size_t len)
{
+ guard(mutex)(&ihid->cmd_lock);
+
*(__le16 *)ihid->cmdbuf = reg;
return i2c_hid_xfer(ihid, ihid->cmdbuf, sizeof(__le16), buf, len);
@@ -252,6 +255,8 @@ static int i2c_hid_get_report(struct i2c_hid *ihid,
i2c_hid_dbg(ihid, "%s\n", __func__);
+ guard(mutex)(&ihid->cmd_lock);
+
/* Command register goes first */
*(__le16 *)ihid->cmdbuf = ihid->hdesc.wCommandRegister;
length += sizeof(__le16);
@@ -342,6 +347,8 @@ static int i2c_hid_set_or_send_report(struct i2c_hid *ihid,
if (!do_set && le16_to_cpu(ihid->hdesc.wMaxOutputLength) == 0)
return -ENOSYS;
+ guard(mutex)(&ihid->cmd_lock);
+
if (do_set) {
/* Command register goes first */
*(__le16 *)ihid->cmdbuf = ihid->hdesc.wCommandRegister;
@@ -384,6 +391,8 @@ static int i2c_hid_set_power_command(struct i2c_hid *ihid, int power_state)
{
size_t length;
+ guard(mutex)(&ihid->cmd_lock);
+
/* SET_POWER uses command register */
*(__le16 *)ihid->cmdbuf = ihid->hdesc.wCommandRegister;
length = sizeof(__le16);
@@ -440,25 +449,27 @@ static int i2c_hid_start_hwreset(struct i2c_hid *ihid)
if (ret)
return ret;
- /* Prepare reset command. Command register goes first. */
- *(__le16 *)ihid->cmdbuf = ihid->hdesc.wCommandRegister;
- length += sizeof(__le16);
- /* Next is RESET command itself */
- length += i2c_hid_encode_command(ihid->cmdbuf + length,
- I2C_HID_OPCODE_RESET, 0, 0);
+ scoped_guard(mutex, &ihid->cmd_lock) {
+ /* Prepare reset command. Command register goes first. */
+ *(__le16 *)ihid->cmdbuf = ihid->hdesc.wCommandRegister;
+ length += sizeof(__le16);
+ /* Next is RESET command itself */
+ length += i2c_hid_encode_command(ihid->cmdbuf + length,
+ I2C_HID_OPCODE_RESET, 0, 0);
- set_bit(I2C_HID_RESET_PENDING, &ihid->flags);
+ set_bit(I2C_HID_RESET_PENDING, &ihid->flags);
- ret = i2c_hid_xfer(ihid, ihid->cmdbuf, length, NULL, 0);
- if (ret) {
- dev_err(&ihid->client->dev,
- "failed to reset device: %d\n", ret);
- goto err_clear_reset;
- }
+ ret = i2c_hid_xfer(ihid, ihid->cmdbuf, length, NULL, 0);
+ if (ret) {
+ dev_err(&ihid->client->dev,
+ "failed to reset device: %d\n", ret);
+ break;
+ }
- return 0;
+ return 0;
+ }
-err_clear_reset:
+ /* Clean up if sending reset command failed */
clear_bit(I2C_HID_RESET_PENDING, &ihid->flags);
i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
return ret;
@@ -1200,6 +1211,7 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
ihid->is_panel_follower = drm_is_panel_follower(&client->dev);
init_waitqueue_head(&ihid->wait);
+ mutex_init(&ihid->cmd_lock);
mutex_init(&ihid->reset_lock);
INIT_WORK(&ihid->panel_follower_prepare_work, ihid_core_panel_prepare_work);
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
index 091e37933225..3fcff6daa0d3 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
@@ -152,6 +152,13 @@ static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = {
.main_supply_name = "vcc33",
};
+static const struct elan_i2c_hid_chip_data elan_ekth6a12nay_chip_data = {
+ .post_power_delay_ms = 10,
+ .post_gpio_reset_on_delay_ms = 300,
+ .hid_descriptor_address = 0x0001,
+ .main_supply_name = "vcc33",
+};
+
static const struct elan_i2c_hid_chip_data ilitek_ili9882t_chip_data = {
.post_power_delay_ms = 1,
.post_gpio_reset_on_delay_ms = 200,
@@ -174,6 +181,7 @@ static const struct elan_i2c_hid_chip_data ilitek_ili2901_chip_data = {
static const struct of_device_id elan_i2c_hid_of_match[] = {
{ .compatible = "elan,ekth6915", .data = &elan_ekth6915_chip_data },
+ { .compatible = "elan,ekth6a12nay", .data = &elan_ekth6a12nay_chip_data },
{ .compatible = "ilitek,ili9882t", .data = &ilitek_ili9882t_chip_data },
{ .compatible = "ilitek,ili2901", .data = &ilitek_ili2901_chip_data },
{ }
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index f82428d7f6c3..aae0d965b47b 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -28,11 +28,14 @@ enum ishtp_driver_data_index {
ISHTP_DRIVER_DATA_LNL_M,
};
-#define ISH_FW_FILENAME_LNL_M "intel/ish/ish_lnlm.bin"
+#define ISH_FW_GEN_LNL_M "lnlm"
+
+#define ISH_FIRMWARE_PATH(gen) "intel/ish/ish_" gen ".bin"
+#define ISH_FIRMWARE_PATH_ALL "intel/ish/ish_*.bin"
static struct ishtp_driver_data ishtp_driver_data[] = {
[ISHTP_DRIVER_DATA_LNL_M] = {
- .fw_filename = ISH_FW_FILENAME_LNL_M,
+ .fw_generation = ISH_FW_GEN_LNL_M,
},
};
@@ -397,4 +400,5 @@ MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_DESCRIPTION("Intel(R) Integrated Sensor Hub PCI Device Driver");
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(ISH_FW_FILENAME_LNL_M);
+MODULE_FIRMWARE(ISH_FIRMWARE_PATH(ISH_FW_GEN_LNL_M));
+MODULE_FIRMWARE(ISH_FIRMWARE_PATH_ALL);
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h
index 5bb85c932e4c..53645ac89ee8 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.h
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.h
@@ -46,7 +46,6 @@ struct ishtp_cl_device {
};
int ishtp_bus_new_client(struct ishtp_device *dev);
-void ishtp_remove_all_clients(struct ishtp_device *dev);
int ishtp_cl_device_bind(struct ishtp_cl *cl);
void ishtp_cl_bus_rx_event(struct ishtp_cl_device *device);
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
index fc62dd1495da..d9d398fadcf7 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.h
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -109,7 +109,6 @@ struct ishtp_cl {
};
/* Client connection managenment internal functions */
-int ishtp_can_client_connect(struct ishtp_device *ishtp_dev, guid_t *uuid);
int ishtp_fw_cl_by_id(struct ishtp_device *dev, uint8_t client_id);
void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl);
void recv_ishtp_cl_msg(struct ishtp_device *dev,
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index 181838c3d7ac..cdacce0a4c9d 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -129,13 +129,15 @@ struct ishtp_hw_ops {
* ISHTP device instance. It allows for the storage of data that is unique to
* a particular driver or hardware variant.
*
- * @fw_filename: The firmware filename associated with a specific hardware
+ * @fw_generation: The generation name associated with a specific hardware
* variant of the Intel Integrated Sensor Hub (ISH). This allows
* the driver to load the correct firmware based on the device's
- * hardware variant.
+ * hardware variant. For example, "lnlm" for the Lunar Lake-M
+ * platform. The generation name must not exceed 8 characters
+ * in length.
*/
struct ishtp_driver_data {
- char *fw_filename;
+ char *fw_generation;
};
/**
diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.c b/drivers/hid/intel-ish-hid/ishtp/loader.c
index fcca070bdecb..f76c4437a1f5 100644
--- a/drivers/hid/intel-ish-hid/ishtp/loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp/loader.c
@@ -35,14 +35,17 @@
#include <linux/cacheflush.h>
#include <linux/container_of.h>
+#include <linux/crc32.h>
#include <linux/dev_printk.h>
#include <linux/dma-mapping.h>
+#include <linux/dmi.h>
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/gfp_types.h>
#include <linux/math.h>
#include <linux/module.h>
#include <linux/pfn.h>
+#include <linux/sprintf.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
@@ -192,6 +195,119 @@ static int prepare_dma_bufs(struct ishtp_device *dev,
return 0;
}
+#define ISH_FW_FILE_VENDOR_NAME_SKU_FMT "intel/ish/ish_%s_%08x_%08x_%08x.bin"
+#define ISH_FW_FILE_VENDOR_SKU_FMT "intel/ish/ish_%s_%08x_%08x.bin"
+#define ISH_FW_FILE_VENDOR_NAME_FMT "intel/ish/ish_%s_%08x_%08x.bin"
+#define ISH_FW_FILE_VENDOR_FMT "intel/ish/ish_%s_%08x.bin"
+#define ISH_FW_FILE_DEFAULT_FMT "intel/ish/ish_%s.bin"
+
+#define ISH_FW_FILENAME_LEN_MAX 56
+
+#define ISH_CRC_INIT (~0u)
+#define ISH_CRC_XOROUT (~0u)
+
+static int _request_ish_firmware(const struct firmware **firmware_p,
+ const char *name, struct device *dev)
+{
+ int ret;
+
+ dev_dbg(dev, "Try to load firmware: %s\n", name);
+ ret = firmware_request_nowarn(firmware_p, name, dev);
+ if (!ret)
+ dev_info(dev, "load firmware: %s\n", name);
+
+ return ret;
+}
+
+/**
+ * request_ish_firmware() - Request and load the ISH firmware.
+ * @firmware_p: Pointer to the firmware image.
+ * @dev: Device for which firmware is being requested.
+ *
+ * This function attempts to load the Integrated Sensor Hub (ISH) firmware
+ * for the given device in the following order, prioritizing custom firmware
+ * with more precise matching patterns:
+ *
+ * ish_${fw_generation}_${SYS_VENDOR_CRC32}_$(PRODUCT_NAME_CRC32)_${PRODUCT_SKU_CRC32}.bin
+ * ish_${fw_generation}_${SYS_VENDOR_CRC32}_${PRODUCT_SKU_CRC32}.bin
+ * ish_${fw_generation}_${SYS_VENDOR_CRC32}_$(PRODUCT_NAME_CRC32).bin
+ * ish_${fw_generation}_${SYS_VENDOR_CRC32}.bin
+ * ish_${fw_generation}.bin
+ *
+ * The driver will load the first matching firmware and skip the rest. If no
+ * matching firmware is found, it will proceed to the next pattern in the
+ * specified order. If all searches fail, the default Intel firmware, listed
+ * last in the order above, will be loaded.
+ *
+ * The firmware file name is constructed using CRC32 checksums of strings.
+ * This is done to create a valid file name that does not contain spaces
+ * or special characters which may be present in the original strings.
+ *
+ * The CRC-32 algorithm uses the following parameters:
+ * Poly: 0x04C11DB7
+ * Init: 0xFFFFFFFF
+ * RefIn: true
+ * RefOut: true
+ * XorOut: 0xFFFFFFFF
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int request_ish_firmware(const struct firmware **firmware_p,
+ struct device *dev)
+{
+ const char *gen, *sys_vendor, *product_name, *product_sku;
+ struct ishtp_device *ishtp = dev_get_drvdata(dev);
+ u32 vendor_crc, name_crc, sku_crc;
+ char filename[ISH_FW_FILENAME_LEN_MAX];
+ int ret;
+
+ gen = ishtp->driver_data->fw_generation;
+ sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
+ product_sku = dmi_get_system_info(DMI_PRODUCT_SKU);
+
+ if (sys_vendor)
+ vendor_crc = crc32(ISH_CRC_INIT, sys_vendor, strlen(sys_vendor)) ^ ISH_CRC_XOROUT;
+ if (product_name)
+ name_crc = crc32(ISH_CRC_INIT, product_name, strlen(product_name)) ^ ISH_CRC_XOROUT;
+ if (product_sku)
+ sku_crc = crc32(ISH_CRC_INIT, product_sku, strlen(product_sku)) ^ ISH_CRC_XOROUT;
+
+ if (sys_vendor && product_name && product_sku) {
+ snprintf(filename, sizeof(filename), ISH_FW_FILE_VENDOR_NAME_SKU_FMT, gen,
+ vendor_crc, name_crc, sku_crc);
+ ret = _request_ish_firmware(firmware_p, filename, dev);
+ if (!ret)
+ return 0;
+ }
+
+ if (sys_vendor && product_sku) {
+ snprintf(filename, sizeof(filename), ISH_FW_FILE_VENDOR_SKU_FMT, gen, vendor_crc,
+ sku_crc);
+ ret = _request_ish_firmware(firmware_p, filename, dev);
+ if (!ret)
+ return 0;
+ }
+
+ if (sys_vendor && product_name) {
+ snprintf(filename, sizeof(filename), ISH_FW_FILE_VENDOR_NAME_FMT, gen, vendor_crc,
+ name_crc);
+ ret = _request_ish_firmware(firmware_p, filename, dev);
+ if (!ret)
+ return 0;
+ }
+
+ if (sys_vendor) {
+ snprintf(filename, sizeof(filename), ISH_FW_FILE_VENDOR_FMT, gen, vendor_crc);
+ ret = _request_ish_firmware(firmware_p, filename, dev);
+ if (!ret)
+ return 0;
+ }
+
+ snprintf(filename, sizeof(filename), ISH_FW_FILE_DEFAULT_FMT, gen);
+ return _request_ish_firmware(firmware_p, filename, dev);
+}
+
/**
* ishtp_loader_work() - Load the ISHTP firmware
* @work: The work structure
@@ -220,7 +336,6 @@ void ishtp_loader_work(struct work_struct *work)
struct loader_xfer_query query = { .header = cpu_to_le32(query_hdr.val32), };
struct loader_start start = { .header = cpu_to_le32(start_hdr.val32), };
union loader_recv_message recv_msg;
- char *filename = dev->driver_data->fw_filename;
const struct firmware *ish_fw;
void *dma_bufs[FRAGMENT_MAX_NUM] = {};
u32 fragment_size;
@@ -228,9 +343,9 @@ void ishtp_loader_work(struct work_struct *work)
int retry = ISHTP_LOADER_RETRY_TIMES;
int rv;
- rv = request_firmware(&ish_fw, filename, dev->devc);
+ rv = request_ish_firmware(&ish_fw, dev->devc);
if (rv < 0) {
- dev_err(dev->devc, "request firmware %s failed:%d\n", filename, rv);
+ dev_err(dev->devc, "request ISH firmware failed:%d\n", rv);
return;
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 2541fa2e0fa3..59a13ad9371c 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1906,11 +1906,12 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
if ((code == ABS_X || code == ABS_Y) && !resolution) {
resolution = WACOM_INTUOS_RES;
hid_warn(input,
- "Wacom usage (%d) missing resolution \n",
- code);
+ "Using default resolution for axis type 0x%x code 0x%x\n",
+ type, code);
}
input_abs_set_res(input, code, resolution);
break;
+ case EV_REL:
case EV_KEY:
case EV_MSC:
case EV_SW:
@@ -2047,7 +2048,23 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
case WACOM_HID_WD_TOUCHRING:
- wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ if (field->flags & HID_MAIN_ITEM_RELATIVE) {
+ wacom_wac->relring_count++;
+ if (wacom_wac->relring_count == 1) {
+ wacom_map_usage(input, usage, field, EV_REL, REL_WHEEL_HI_RES, 0);
+ set_bit(REL_WHEEL, input->relbit);
+ }
+ else if (wacom_wac->relring_count == 2) {
+ wacom_map_usage(input, usage, field, EV_REL, REL_HWHEEL_HI_RES, 0);
+ set_bit(REL_HWHEEL, input->relbit);
+ }
+ } else {
+ wacom_wac->absring_count++;
+ if (wacom_wac->absring_count == 1)
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ else if (wacom_wac->absring_count == 2)
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_THROTTLE, 0);
+ }
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
case WACOM_HID_WD_TOUCHRINGSTATUS:
@@ -2112,7 +2129,10 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
return;
if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) {
- if (usage->hid != WACOM_HID_WD_TOUCHRING)
+ bool is_abs_touchring = usage->hid == WACOM_HID_WD_TOUCHRING &&
+ !(field->flags & HID_MAIN_ITEM_RELATIVE);
+
+ if (!is_abs_touchring)
wacom_wac->hid_data.inrange_state |= value;
}
@@ -2165,6 +2185,52 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
hdev->product == 0x3AA)
value = wacom_offset_rotation(input, usage, value, 1, 2);
}
+ else if (field->flags & HID_MAIN_ITEM_RELATIVE) {
+ int hires_value = value * 120 / usage->resolution_multiplier;
+ int *ring_value;
+ int lowres_code;
+
+ if (usage->code == REL_WHEEL_HI_RES) {
+ /* We must invert the sign for vertical
+ * relative scrolling. Clockwise
+ * rotation produces positive values
+ * from HW, but userspace treats
+ * positive REL_WHEEL as a scroll *up*!
+ */
+ hires_value = -hires_value;
+ ring_value = &wacom_wac->hid_data.ring_value;
+ lowres_code = REL_WHEEL;
+ }
+ else if (usage->code == REL_HWHEEL_HI_RES) {
+ /* No need to invert the sign for
+ * horizontal relative scrolling.
+ * Clockwise rotation produces positive
+ * values from HW and userspace treats
+ * positive REL_HWHEEL as a scroll
+ * right.
+ */
+ ring_value = &wacom_wac->hid_data.ring2_value;
+ lowres_code = REL_HWHEEL;
+ }
+ else {
+ hid_err(wacom->hdev, "unrecognized relative wheel with code %d\n",
+ usage->code);
+ break;
+ }
+
+ value = hires_value;
+ *ring_value += hires_value;
+
+ /* Emulate a legacy wheel click for every 120
+ * units of hi-res travel.
+ */
+ if (*ring_value >= 120 || *ring_value <= -120) {
+ int clicks = *ring_value / 120;
+
+ input_event(input, usage->type, lowres_code, clicks);
+ *ring_value -= clicks * 120;
+ }
+ }
else {
value = wacom_offset_rotation(input, usage, value, 1, 4);
}
@@ -2322,6 +2388,9 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS3, 0);
features->quirks &= ~WACOM_QUIRK_PEN_BUTTON3;
break;
+ case WACOM_HID_WD_SEQUENCENUMBER:
+ wacom_wac->hid_data.sequence_number = -1;
+ break;
}
}
@@ -2446,9 +2515,15 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
wacom_wac->hid_data.barrelswitch3 = value;
return;
case WACOM_HID_WD_SEQUENCENUMBER:
- if (wacom_wac->hid_data.sequence_number != value)
- hid_warn(hdev, "Dropped %hu packets", (unsigned short)(value - wacom_wac->hid_data.sequence_number));
+ if (wacom_wac->hid_data.sequence_number != value &&
+ wacom_wac->hid_data.sequence_number >= 0) {
+ int sequence_size = field->logical_maximum - field->logical_minimum + 1;
+ int drop_count = (value - wacom_wac->hid_data.sequence_number) % sequence_size;
+ hid_warn(hdev, "Dropped %d packets", drop_count);
+ }
wacom_wac->hid_data.sequence_number = value + 1;
+ if (wacom_wac->hid_data.sequence_number > field->logical_maximum)
+ wacom_wac->hid_data.sequence_number = field->logical_minimum;
return;
}
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 6ec499841f70..c8803d5c6a71 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -312,6 +312,8 @@ struct hid_data {
int width;
int height;
int id;
+ int ring_value;
+ int ring2_value;
int cc_report;
int cc_index;
int cc_value_index;
@@ -324,7 +326,7 @@ struct hid_data {
int bat_connected;
int ps_connected;
bool pad_input_event_flag;
- unsigned short sequence_number;
+ int sequence_number;
ktime_t time_delayed;
};
@@ -355,6 +357,8 @@ struct wacom_wac {
int num_contacts_left;
u8 bt_features;
u8 bt_high_speed;
+ u8 absring_count;
+ u8 relring_count;
int mode_report;
int mode_value;
struct hid_data hid_data;
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 3140990a6164..15cb759151e6 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -116,22 +116,13 @@ static int ssi_debug_add_ctrl(struct hsi_controller *ssi)
/* SSI controller */
omap_ssi->dir = debugfs_create_dir(dev_name(&ssi->device), NULL);
- if (!omap_ssi->dir)
- return -ENOMEM;
+ debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi, &ssi_regs_fops);
- debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi,
- &ssi_regs_fops);
/* SSI GDD (DMA) */
dir = debugfs_create_dir("gdd", omap_ssi->dir);
- if (!dir)
- goto rback;
debugfs_create_file("regs", S_IRUGO, dir, ssi, &ssi_gdd_regs_fops);
return 0;
-rback:
- debugfs_remove_recursive(omap_ssi->dir);
-
- return -ENOMEM;
}
static void ssi_debug_remove_ctrl(struct hsi_controller *ssi)
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index e0d676c74f14..36d9ba097ff5 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -342,9 +342,6 @@ int hv_synic_init(unsigned int cpu)
return 0;
}
-/*
- * hv_synic_cleanup - Cleanup routine for hv_synic_init().
- */
void hv_synic_disable_regs(unsigned int cpu)
{
struct hv_per_cpu_context *hv_cpu =
@@ -436,6 +433,9 @@ retry:
return pending;
}
+/*
+ * hv_synic_cleanup - Cleanup routine for hv_synic_init().
+ */
int hv_synic_cleanup(unsigned int cpu)
{
struct vmbus_channel *channel, *sc;
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 9c452bfbd571..7a35c82976e0 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -207,13 +207,13 @@ static int hv_die_panic_notify_crash(struct notifier_block *self,
* buffer and call into Hyper-V to transfer the data.
*/
static void hv_kmsg_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
struct kmsg_dump_iter iter;
size_t bytes_written;
/* We are only interested in panics. */
- if (reason != KMSG_DUMP_PANIC || !sysctl_record_panic_msg)
+ if (detail->reason != KMSG_DUMP_PANIC || !sysctl_record_panic_msg)
return;
/*
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 76ac5185a01a..d2856023d53c 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -380,12 +380,6 @@ void hv_vss_deinit(void);
int hv_vss_pre_suspend(void);
int hv_vss_pre_resume(void);
void hv_vss_onchannelcallback(void *context);
-
-int hv_fcopy_init(struct hv_util_service *srv);
-void hv_fcopy_deinit(void);
-int hv_fcopy_pre_suspend(void);
-int hv_fcopy_pre_resume(void);
-void hv_fcopy_onchannelcallback(void *context);
void vmbus_initiate_unload(bool crash);
static inline void hv_poll_channel(struct vmbus_channel *channel,
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 4bae382a3eb4..9b15f7daf505 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1306,6 +1306,13 @@ static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void vmbus_percpu_work(struct work_struct *work)
+{
+ unsigned int cpu = smp_processor_id();
+
+ hv_synic_init(cpu);
+}
+
/*
* vmbus_bus_init -Main vmbus driver initialization routine.
*
@@ -1316,7 +1323,8 @@ static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
*/
static int vmbus_bus_init(void)
{
- int ret;
+ int ret, cpu;
+ struct work_struct __percpu *works;
ret = hv_init();
if (ret != 0) {
@@ -1355,12 +1363,32 @@ static int vmbus_bus_init(void)
if (ret)
goto err_alloc;
+ works = alloc_percpu(struct work_struct);
+ if (!works) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
/*
* Initialize the per-cpu interrupt state and stimer state.
* Then connect to the host.
*/
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
- hv_synic_init, hv_synic_cleanup);
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ struct work_struct *work = per_cpu_ptr(works, cpu);
+
+ INIT_WORK(work, vmbus_percpu_work);
+ schedule_work_on(cpu, work);
+ }
+
+ for_each_online_cpu(cpu)
+ flush_work(per_cpu_ptr(works, cpu));
+
+ /* Register the callbacks for possible CPU online/offline'ing */
+ ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
+ hv_synic_init, hv_synic_cleanup);
+ cpus_read_unlock();
+ free_percpu(works);
if (ret < 0)
goto err_alloc;
hyperv_cpuhp_online = ret;
@@ -1803,12 +1831,12 @@ static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
-static struct attribute_group vmbus_chan_group = {
+static const struct attribute_group vmbus_chan_group = {
.attrs = vmbus_chan_attrs,
.is_visible = vmbus_chan_attr_is_visible
};
-static struct kobj_type vmbus_chan_ktype = {
+static const struct kobj_type vmbus_chan_ktype = {
.sysfs_ops = &vmbus_chan_sysfs_ops,
.release = vmbus_chan_release,
};
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index b60fe2e58ad6..65ea92529406 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1511,9 +1511,10 @@ config SENSORS_LM90
config SENSORS_LM92
tristate "National Semiconductor LM92 and compatibles"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for National Semiconductor LM92
- and Maxim MAX6635 sensor chips.
+ and LM76 as well as Maxim MAX6633/6634/6635 sensor chips.
This driver can also be built as a module. If so, the module
will be called lm92.
@@ -1532,6 +1533,7 @@ config SENSORS_LM93
config SENSORS_LM95234
tristate "National Semiconductor LM95234 and compatibles"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for the LM95233 and LM95234
temperature sensor chips.
@@ -2066,6 +2068,17 @@ config SENSORS_SFCTEMP
This driver can also be built as a module. If so, the module
will be called sfctemp.
+config SENSORS_SG2042_MCU
+ tristate "Sophgo onboard MCU support"
+ depends on I2C
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Support for onboard MCU of Sophgo SG2042 SoCs. This mcu provides
+ power control and some basic information.
+
+ This driver can be built as a module. If so, the module
+ will be called sg2042-mcu.
+
config SENSORS_SURFACE_FAN
tristate "Surface Fan Driver"
depends on SURFACE_AGGREGATOR
@@ -2080,6 +2093,17 @@ config SENSORS_SURFACE_FAN
Select M or Y here, if you want to be able to read the fan's speed.
+config SENSORS_SURFACE_TEMP
+ tristate "Microsoft Surface Thermal Sensor Driver"
+ depends on SURFACE_AGGREGATOR
+ depends on SURFACE_AGGREGATOR_BUS
+ help
+ Driver for monitoring thermal sensors connected via the Surface
+ Aggregator Module (embedded controller) on Microsoft Surface devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called surface_temp.
+
config SENSORS_ADC128D818
tristate "Texas Instruments ADC128D818"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index b1c7056c37db..9554d2fdcf7b 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -194,6 +194,7 @@ obj-$(CONFIG_SENSORS_SCH56XX_COMMON)+= sch56xx-common.o
obj-$(CONFIG_SENSORS_SCH5627) += sch5627.o
obj-$(CONFIG_SENSORS_SCH5636) += sch5636.o
obj-$(CONFIG_SENSORS_SFCTEMP) += sfctemp.o
+obj-$(CONFIG_SENSORS_SG2042_MCU) += sg2042-mcu.o
obj-$(CONFIG_SENSORS_SL28CPLD) += sl28cpld-hwmon.o
obj-$(CONFIG_SENSORS_SHT15) += sht15.o
obj-$(CONFIG_SENSORS_SHT21) += sht21.o
@@ -209,6 +210,7 @@ obj-$(CONFIG_SENSORS_SPARX5) += sparx5-temp.o
obj-$(CONFIG_SENSORS_SPD5118) += spd5118.o
obj-$(CONFIG_SENSORS_STTS751) += stts751.o
obj-$(CONFIG_SENSORS_SURFACE_FAN)+= surface_fan.o
+obj-$(CONFIG_SENSORS_SURFACE_TEMP)+= surface_temp.o
obj-$(CONFIG_SENSORS_SY7636A) += sy7636a-hwmon.o
obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o
obj-$(CONFIG_SENSORS_TC74) += tc74.o
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 517248d2994e..dbee6926fa05 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -728,30 +728,22 @@ static const int adt7470_freq_map[] = {
static int pwm1_freq_get(struct device *dev)
{
struct adt7470_data *data = dev_get_drvdata(dev);
- unsigned int cfg_reg_1, cfg_reg_2;
+ unsigned int regs[2] = {ADT7470_REG_CFG, ADT7470_REG_CFG_2};
+ u8 cfg_reg[2];
int index;
int err;
- mutex_lock(&data->lock);
- err = regmap_read(data->regmap, ADT7470_REG_CFG, &cfg_reg_1);
- if (err < 0)
- goto out;
- err = regmap_read(data->regmap, ADT7470_REG_CFG_2, &cfg_reg_2);
- if (err < 0)
- goto out;
- mutex_unlock(&data->lock);
+ err = regmap_multi_reg_read(data->regmap, regs, cfg_reg, 2);
+ if (err)
+ return err;
- index = (cfg_reg_2 & ADT7470_FREQ_MASK) >> ADT7470_FREQ_SHIFT;
- if (!(cfg_reg_1 & ADT7470_CFG_LF))
+ index = (cfg_reg[1] & ADT7470_FREQ_MASK) >> ADT7470_FREQ_SHIFT;
+ if (!(cfg_reg[0] & ADT7470_CFG_LF))
index += 8;
if (index >= ARRAY_SIZE(adt7470_freq_map))
index = ARRAY_SIZE(adt7470_freq_map) - 1;
return adt7470_freq_map[index];
-
-out:
- mutex_unlock(&data->lock);
- return err;
}
static int adt7470_pwm_read(struct device *dev, u32 attr, int channel, long *val)
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 382a2bb9168a..ca466d12475a 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -21,6 +21,8 @@
#include <linux/of.h>
#include <linux/util_macros.h>
+#include <dt-bindings/pwm/pwm.h>
+
/* Indexes for the sysfs hooks */
enum adt_sysfs_id {
INPUT = 0,
@@ -1662,6 +1664,130 @@ static int adt7475_set_pwm_polarity(struct i2c_client *client)
return 0;
}
+struct adt7475_pwm_config {
+ int index;
+ int freq;
+ int flags;
+ int duty;
+};
+
+static int _adt7475_pwm_properties_parse_args(u32 args[4], struct adt7475_pwm_config *cfg)
+{
+ int freq_hz;
+ int duty;
+
+ if (args[1] == 0)
+ return -EINVAL;
+
+ freq_hz = 1000000000UL / args[1];
+ if (args[3] >= args[1])
+ duty = 255;
+ else
+ duty = div_u64(255ULL * args[3], args[1]);
+
+ cfg->index = args[0];
+ cfg->freq = find_closest(freq_hz, pwmfreq_table, ARRAY_SIZE(pwmfreq_table));
+ cfg->flags = args[2];
+ cfg->duty = duty;
+
+ return 0;
+}
+
+static int adt7475_pwm_properties_parse_reference_args(struct fwnode_handle *fwnode,
+ struct adt7475_pwm_config *cfg)
+{
+ int ret, i;
+ struct fwnode_reference_args rargs = {};
+ u32 args[4] = {};
+
+ ret = fwnode_property_get_reference_args(fwnode, "pwms", "#pwm-cells", 0, 0, &rargs);
+ if (ret)
+ return ret;
+
+ if (rargs.nargs != 4) {
+ fwnode_handle_put(rargs.fwnode);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 4; i++)
+ args[i] = rargs.args[i];
+
+ ret = _adt7475_pwm_properties_parse_args(args, cfg);
+
+ fwnode_handle_put(rargs.fwnode);
+
+ return ret;
+}
+
+static int adt7475_pwm_properties_parse_args(struct fwnode_handle *fwnode,
+ struct adt7475_pwm_config *cfg)
+{
+ int ret;
+ u32 args[4] = {};
+
+ ret = fwnode_property_read_u32_array(fwnode, "pwms", args, ARRAY_SIZE(args));
+ if (ret)
+ return ret;
+
+ return _adt7475_pwm_properties_parse_args(args, cfg);
+}
+
+static int adt7475_fan_pwm_config(struct i2c_client *client)
+{
+ struct adt7475_data *data = i2c_get_clientdata(client);
+ struct fwnode_handle *child;
+ struct adt7475_pwm_config cfg = {};
+ int ret;
+
+ device_for_each_child_node(&client->dev, child) {
+ if (!fwnode_property_present(child, "pwms"))
+ continue;
+
+ if (is_of_node(child))
+ ret = adt7475_pwm_properties_parse_reference_args(child, &cfg);
+ else
+ ret = adt7475_pwm_properties_parse_args(child, &cfg);
+
+ if (cfg.index >= ADT7475_PWM_COUNT)
+ return -EINVAL;
+
+ ret = adt7475_read(PWM_CONFIG_REG(cfg.index));
+ if (ret < 0)
+ return ret;
+ data->pwm[CONTROL][cfg.index] = ret;
+ if (cfg.flags & PWM_POLARITY_INVERTED)
+ data->pwm[CONTROL][cfg.index] |= BIT(4);
+ else
+ data->pwm[CONTROL][cfg.index] &= ~BIT(4);
+
+ /* Force to manual mode so PWM values take effect */
+ data->pwm[CONTROL][cfg.index] &= ~0xE0;
+ data->pwm[CONTROL][cfg.index] |= 0x07 << 5;
+
+ ret = i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(cfg.index),
+ data->pwm[CONTROL][cfg.index]);
+ if (ret)
+ return ret;
+
+ data->pwm[INPUT][cfg.index] = cfg.duty;
+ ret = i2c_smbus_write_byte_data(client, PWM_REG(cfg.index),
+ data->pwm[INPUT][cfg.index]);
+ if (ret)
+ return ret;
+
+ data->range[cfg.index] = adt7475_read(TEMP_TRANGE_REG(cfg.index));
+ data->range[cfg.index] &= ~0xf;
+ data->range[cfg.index] |= cfg.freq;
+
+ ret = i2c_smbus_write_byte_data(client, TEMP_TRANGE_REG(cfg.index),
+ data->range[cfg.index]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int adt7475_probe(struct i2c_client *client)
{
enum chips chip;
@@ -1774,6 +1900,10 @@ static int adt7475_probe(struct i2c_client *client)
if (ret && ret != -EINVAL)
dev_warn(&client->dev, "Error configuring pwm polarity\n");
+ ret = adt7475_fan_pwm_config(client);
+ if (ret)
+ dev_warn(&client->dev, "Error %d configuring fan/pwm\n", ret);
+
/* Start monitoring */
switch (chip) {
case adt7475:
diff --git a/drivers/hwmon/adt7x10.c b/drivers/hwmon/adt7x10.c
index 6701920de17f..2d329391ed3f 100644
--- a/drivers/hwmon/adt7x10.c
+++ b/drivers/hwmon/adt7x10.c
@@ -170,21 +170,15 @@ static int adt7x10_temp_write(struct adt7x10_data *data, int index, long temp)
static int adt7x10_hyst_read(struct adt7x10_data *data, int index, long *val)
{
- int hyst, temp, ret;
+ unsigned int regs[2] = {ADT7X10_T_HYST, ADT7X10_REG_TEMP[index]};
+ int hyst, ret;
+ u16 regdata[2];
- mutex_lock(&data->update_lock);
- ret = regmap_read(data->regmap, ADT7X10_T_HYST, &hyst);
- if (ret) {
- mutex_unlock(&data->update_lock);
- return ret;
- }
-
- ret = regmap_read(data->regmap, ADT7X10_REG_TEMP[index], &temp);
- mutex_unlock(&data->update_lock);
+ ret = regmap_multi_reg_read(data->regmap, regs, regdata, 2);
if (ret)
return ret;
- hyst = (hyst & ADT7X10_T_HYST_MASK) * 1000;
+ hyst = (regdata[0] & ADT7X10_T_HYST_MASK) * 1000;
/*
* hysteresis is stored as a 4 bit offset in the device, convert it
@@ -194,7 +188,7 @@ static int adt7x10_hyst_read(struct adt7x10_data *data, int index, long *val)
if (index == adt7x10_t_alarm_low)
hyst = -hyst;
- *val = ADT7X10_REG_TO_TEMP(data, temp) - hyst;
+ *val = ADT7X10_REG_TO_TEMP(data, regdata[1]) - hyst;
return 0;
}
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index ec94392fcb65..ac64b407ed0e 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -136,29 +136,25 @@ struct amc6821_data {
*/
static int amc6821_get_auto_point_temps(struct regmap *regmap, int channel, u8 *temps)
{
- u32 pwm, regval;
+ u32 regs[] = {
+ AMC6821_REG_DCY_LOW_TEMP,
+ AMC6821_REG_PSV_TEMP,
+ channel ? AMC6821_REG_RTEMP_FAN_CTRL : AMC6821_REG_LTEMP_FAN_CTRL
+ };
+ u8 regvals[3];
+ int slope;
int err;
- err = regmap_read(regmap, AMC6821_REG_DCY_LOW_TEMP, &pwm);
- if (err)
- return err;
-
- err = regmap_read(regmap, AMC6821_REG_PSV_TEMP, &regval);
- if (err)
- return err;
- temps[0] = regval;
-
- err = regmap_read(regmap,
- channel ? AMC6821_REG_RTEMP_FAN_CTRL : AMC6821_REG_LTEMP_FAN_CTRL,
- &regval);
+ err = regmap_multi_reg_read(regmap, regs, regvals, 3);
if (err)
return err;
- temps[1] = FIELD_GET(AMC6821_TEMP_LIMIT_MASK, regval) * 4;
+ temps[0] = regvals[1];
+ temps[1] = FIELD_GET(AMC6821_TEMP_LIMIT_MASK, regvals[2]) * 4;
/* slope is 32 >> <slope bits> in °C */
- regval = 32 >> FIELD_GET(AMC6821_TEMP_SLOPE_MASK, regval);
- if (regval)
- temps[2] = temps[1] + DIV_ROUND_CLOSEST(255 - pwm, regval);
+ slope = 32 >> FIELD_GET(AMC6821_TEMP_SLOPE_MASK, regvals[2]);
+ if (slope)
+ temps[2] = temps[1] + DIV_ROUND_CLOSEST(255 - regvals[0], slope);
else
temps[2] = 255;
diff --git a/drivers/hwmon/aspeed-g6-pwm-tach.c b/drivers/hwmon/aspeed-g6-pwm-tach.c
index 08a2ded95e45..75eadda738ab 100644
--- a/drivers/hwmon/aspeed-g6-pwm-tach.c
+++ b/drivers/hwmon/aspeed-g6-pwm-tach.c
@@ -456,7 +456,6 @@ static int aspeed_pwm_tach_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev, *hwmon;
int ret;
- struct device_node *child;
struct aspeed_pwm_tach_data *priv;
struct pwm_chip *chip;
@@ -498,10 +497,9 @@ static int aspeed_pwm_tach_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
- for_each_child_of_node(dev->of_node, child) {
+ for_each_child_of_node_scoped(dev->of_node, child) {
ret = aspeed_create_fan_monitor(dev, child, priv);
if (ret) {
- of_node_put(child);
dev_warn(dev, "Failed to create fan %d", ret);
return 0;
}
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 4acc1858d8ac..aa159bf158a3 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -907,7 +907,7 @@ static void aspeed_pwm_tacho_remove(void *data)
static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np, *child;
+ struct device_node *np;
struct aspeed_pwm_tacho_data *priv;
void __iomem *regs;
struct device *hwmon;
@@ -951,12 +951,10 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
aspeed_create_type(priv);
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = aspeed_create_fan(dev, child, priv);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
}
priv->groups[0] = &pwm_dev_group;
diff --git a/drivers/hwmon/chipcap2.c b/drivers/hwmon/chipcap2.c
index 6ccceae21f70..edf454474f11 100644
--- a/drivers/hwmon/chipcap2.c
+++ b/drivers/hwmon/chipcap2.c
@@ -740,37 +740,26 @@ static int cc2_probe(struct i2c_client *client)
data->client = client;
data->regulator = devm_regulator_get_exclusive(dev, "vdd");
- if (IS_ERR(data->regulator)) {
- dev_err_probe(dev, PTR_ERR(data->regulator),
- "Failed to get regulator\n");
- return PTR_ERR(data->regulator);
- }
+ if (IS_ERR(data->regulator))
+ return dev_err_probe(dev, PTR_ERR(data->regulator),
+ "Failed to get regulator\n");
ret = cc2_request_ready_irq(data, dev);
- if (ret) {
- dev_err_probe(dev, ret, "Failed to request ready irq\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request ready irq\n");
ret = cc2_request_alarm_irqs(data, dev);
- if (ret) {
- dev_err_probe(dev, ret, "Failed to request alarm irqs\n");
- goto disable;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request alarm irqs\n");
data->hwmon = devm_hwmon_device_register_with_info(dev, client->name,
data, &cc2_chip_info,
NULL);
- if (IS_ERR(data->hwmon)) {
- dev_err_probe(dev, PTR_ERR(data->hwmon),
- "Failed to register hwmon device\n");
- ret = PTR_ERR(data->hwmon);
- }
-
-disable:
- cc2_disable(data);
+ if (IS_ERR(data->hwmon))
+ return dev_err_probe(dev, PTR_ERR(data->hwmon),
+ "Failed to register hwmon device\n");
- return ret;
+ return 0;
}
static void cc2_remove(struct i2c_client *client)
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 0362a13f6525..f9b3a3030f13 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1489,6 +1489,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
},
{
+ .ident = "Dell Latitude 7320",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Latitude 7320"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
+ },
+ {
.ident = "Dell Latitude E6440",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
index cb2f01dc4326..4514f3ed90cc 100644
--- a/drivers/hwmon/gsc-hwmon.c
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -400,6 +400,7 @@ static const struct of_device_id gsc_hwmon_of_match[] = {
{ .compatible = "gw,gsc-adc", },
{}
};
+MODULE_DEVICE_TABLE(of, gsc_hwmon_of_match);
static struct platform_driver gsc_hwmon_driver = {
.driver = {
diff --git a/drivers/hwmon/hp-wmi-sensors.c b/drivers/hwmon/hp-wmi-sensors.c
index dfa1d6926dea..d6bdad26feb1 100644
--- a/drivers/hwmon/hp-wmi-sensors.c
+++ b/drivers/hwmon/hp-wmi-sensors.c
@@ -1597,15 +1597,13 @@ static void hp_wmi_devm_notify_remove(void *ignored)
}
/* hp_wmi_notify - WMI event notification handler */
-static void hp_wmi_notify(u32 value, void *context)
+static void hp_wmi_notify(union acpi_object *wobj, void *context)
{
struct hp_wmi_info *temp_info[HP_WMI_MAX_INSTANCES] = {};
- struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
struct hp_wmi_sensors *state = context;
struct device *dev = &state->wdev->dev;
struct hp_wmi_event event = {};
struct hp_wmi_info *fan_info;
- union acpi_object *wobj;
acpi_status err;
int event_type;
u8 count;
@@ -1630,20 +1628,15 @@ static void hp_wmi_notify(u32 value, void *context)
* HPBIOS_BIOSEvent instance.
*/
- mutex_lock(&state->lock);
-
- err = wmi_get_event_data(value, &out);
- if (ACPI_FAILURE(err))
- goto out_unlock;
-
- wobj = out.pointer;
if (!wobj)
- goto out_unlock;
+ return;
+
+ mutex_lock(&state->lock);
err = populate_event_from_wobj(dev, &event, wobj);
if (err) {
dev_warn(dev, "Bad event data (ACPI type %d)\n", wobj->type);
- goto out_free_wobj;
+ goto out_free;
}
event_type = classify_event(event.name, event.category);
@@ -1668,13 +1661,10 @@ static void hp_wmi_notify(u32 value, void *context)
break;
}
-out_free_wobj:
- kfree(wobj);
-
+out_free:
devm_kfree(dev, event.name);
devm_kfree(dev, event.description);
-out_unlock:
mutex_unlock(&state->lock);
}
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index a362080d41fa..9c35c4d0369d 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -1188,24 +1188,6 @@ error:
}
EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_info);
-static int devm_hwmon_match(struct device *dev, void *res, void *data)
-{
- struct device **hwdev = res;
-
- return *hwdev == data;
-}
-
-/**
- * devm_hwmon_device_unregister - removes a previously registered hwmon device
- *
- * @dev: the parent device of the device to unregister
- */
-void devm_hwmon_device_unregister(struct device *dev)
-{
- WARN_ON(devres_release(dev, devm_hwmon_release, devm_hwmon_match, dev));
-}
-EXPORT_SYMBOL_GPL(devm_hwmon_device_unregister);
-
static char *__hwmon_sanitize_name(struct device *dev, const char *old_name)
{
char *name, *p;
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 9ab4205622e2..f0fa6d073627 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -22,21 +22,21 @@
* Thanks to Jan Volkering
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/err.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/jiffies.h>
-#include <linux/of.h>
-#include <linux/delay.h>
+#include <linux/sysfs.h>
#include <linux/util_macros.h>
-#include <linux/regmap.h>
-
-#include <linux/platform_data/ina2xx.h>
/* common register definitions */
#define INA2XX_CONFIG 0x00
@@ -51,10 +51,6 @@
#define INA226_ALERT_LIMIT 0x07
#define INA226_DIE_ID 0xFF
-/* register count */
-#define INA219_REGISTERS 6
-#define INA226_REGISTERS 8
-
#define INA2XX_MAX_REGISTERS 8
/* settings - depend on use case */
@@ -68,39 +64,65 @@
#define INA2XX_RSHUNT_DEFAULT 10000
/* bit mask for reading the averaging setting in the configuration register */
-#define INA226_AVG_RD_MASK 0x0E00
+#define INA226_AVG_RD_MASK GENMASK(11, 9)
-#define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9)
-#define INA226_SHIFT_AVG(val) ((val) << 9)
+#define INA226_READ_AVG(reg) FIELD_GET(INA226_AVG_RD_MASK, reg)
-#define INA226_ALERT_POLARITY_MASK 0x0002
-#define INA226_SHIFT_ALERT_POLARITY(val) ((val) << 1)
-#define INA226_ALERT_POL_LOW 0
-#define INA226_ALERT_POL_HIGH 1
+#define INA226_ALERT_LATCH_ENABLE BIT(0)
+#define INA226_ALERT_POLARITY BIT(1)
/* bit number of alert functions in Mask/Enable Register */
-#define INA226_SHUNT_OVER_VOLTAGE_BIT 15
-#define INA226_SHUNT_UNDER_VOLTAGE_BIT 14
-#define INA226_BUS_OVER_VOLTAGE_BIT 13
-#define INA226_BUS_UNDER_VOLTAGE_BIT 12
-#define INA226_POWER_OVER_LIMIT_BIT 11
+#define INA226_SHUNT_OVER_VOLTAGE_MASK BIT(15)
+#define INA226_SHUNT_UNDER_VOLTAGE_MASK BIT(14)
+#define INA226_BUS_OVER_VOLTAGE_MASK BIT(13)
+#define INA226_BUS_UNDER_VOLTAGE_MASK BIT(12)
+#define INA226_POWER_OVER_LIMIT_MASK BIT(11)
/* bit mask for alert config bits of Mask/Enable Register */
-#define INA226_ALERT_CONFIG_MASK 0xFC00
+#define INA226_ALERT_CONFIG_MASK GENMASK(15, 10)
#define INA226_ALERT_FUNCTION_FLAG BIT(4)
-/* common attrs, ina226 attrs and NULL */
-#define INA2XX_MAX_ATTRIBUTE_GROUPS 3
-
/*
* Both bus voltage and shunt voltage conversion times for ina226 are set
* to 0b0100 on POR, which translates to 2200 microseconds in total.
*/
#define INA226_TOTAL_CONV_TIME_DEFAULT 2200
-static struct regmap_config ina2xx_regmap_config = {
+static bool ina2xx_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case INA2XX_CONFIG:
+ case INA2XX_CALIBRATION:
+ case INA226_MASK_ENABLE:
+ case INA226_ALERT_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ina2xx_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case INA2XX_SHUNT_VOLTAGE:
+ case INA2XX_BUS_VOLTAGE:
+ case INA2XX_POWER:
+ case INA2XX_CURRENT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config ina2xx_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
+ .use_single_write = true,
+ .use_single_read = true,
+ .max_register = INA2XX_MAX_REGISTERS,
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_reg = ina2xx_volatile_reg,
+ .writeable_reg = ina2xx_writeable_reg,
};
enum ina2xx_ids { ina219, ina226 };
@@ -108,7 +130,6 @@ enum ina2xx_ids { ina219, ina226 };
struct ina2xx_config {
u16 config_default;
int calibration_value;
- int registers;
int shunt_div;
int bus_voltage_shift;
int bus_voltage_lsb; /* uV */
@@ -117,21 +138,19 @@ struct ina2xx_config {
struct ina2xx_data {
const struct ina2xx_config *config;
+ enum ina2xx_ids chip;
long rshunt;
long current_lsb_uA;
long power_lsb_uW;
struct mutex config_lock;
struct regmap *regmap;
-
- const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS];
};
static const struct ina2xx_config ina2xx_config[] = {
[ina219] = {
.config_default = INA219_CONFIG_DEFAULT,
.calibration_value = 4096,
- .registers = INA219_REGISTERS,
.shunt_div = 100,
.bus_voltage_shift = 3,
.bus_voltage_lsb = 4000,
@@ -140,7 +159,6 @@ static const struct ina2xx_config ina2xx_config[] = {
[ina226] = {
.config_default = INA226_CONFIG_DEFAULT,
.calibration_value = 2048,
- .registers = INA226_REGISTERS,
.shunt_div = 400,
.bus_voltage_shift = 0,
.bus_voltage_lsb = 1250,
@@ -171,66 +189,76 @@ static int ina226_reg_to_interval(u16 config)
* Return the new, shifted AVG field value of CONFIG register,
* to use with regmap_update_bits
*/
-static u16 ina226_interval_to_reg(int interval)
+static u16 ina226_interval_to_reg(long interval)
{
int avg, avg_bits;
+ /*
+ * The maximum supported interval is 1,024 * (2 * 8.244ms) ~= 16.8s.
+ * Clamp to 32 seconds before calculations to avoid overflows.
+ */
+ interval = clamp_val(interval, 0, 32000);
+
avg = DIV_ROUND_CLOSEST(interval * 1000,
INA226_TOTAL_CONV_TIME_DEFAULT);
avg_bits = find_closest(avg, ina226_avg_tab,
ARRAY_SIZE(ina226_avg_tab));
- return INA226_SHIFT_AVG(avg_bits);
+ return FIELD_PREP(INA226_AVG_RD_MASK, avg_bits);
}
-static int ina2xx_set_alert_polarity(struct ina2xx_data *data,
- unsigned long val)
+static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
+ unsigned int regval)
{
- return regmap_update_bits(data->regmap, INA226_MASK_ENABLE,
- INA226_ALERT_POLARITY_MASK,
- INA226_SHIFT_ALERT_POLARITY(val));
-}
+ int val;
-/*
- * Calibration register is set to the best value, which eliminates
- * truncation errors on calculating current register in hardware.
- * According to datasheet (eq. 3) the best values are 2048 for
- * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
- */
-static int ina2xx_calibrate(struct ina2xx_data *data)
-{
- return regmap_write(data->regmap, INA2XX_CALIBRATION,
- data->config->calibration_value);
+ switch (reg) {
+ case INA2XX_SHUNT_VOLTAGE:
+ /* signed register */
+ val = DIV_ROUND_CLOSEST((s16)regval, data->config->shunt_div);
+ break;
+ case INA2XX_BUS_VOLTAGE:
+ val = (regval >> data->config->bus_voltage_shift) *
+ data->config->bus_voltage_lsb;
+ val = DIV_ROUND_CLOSEST(val, 1000);
+ break;
+ case INA2XX_POWER:
+ val = regval * data->power_lsb_uW;
+ break;
+ case INA2XX_CURRENT:
+ /* signed register, result in mA */
+ val = (s16)regval * data->current_lsb_uA;
+ val = DIV_ROUND_CLOSEST(val, 1000);
+ break;
+ case INA2XX_CALIBRATION:
+ val = regval;
+ break;
+ default:
+ /* programmer goofed */
+ WARN_ON_ONCE(1);
+ val = 0;
+ break;
+ }
+
+ return val;
}
/*
- * Initialize the configuration and calibration registers.
+ * Read and convert register value from chip. If the register value is 0,
+ * check if the chip has been power cycled or reset. If so, re-initialize it.
*/
-static int ina2xx_init(struct ina2xx_data *data)
-{
- int ret = regmap_write(data->regmap, INA2XX_CONFIG,
- data->config->config_default);
- if (ret < 0)
- return ret;
-
- return ina2xx_calibrate(data);
-}
-
-static int ina2xx_read_reg(struct device *dev, int reg, unsigned int *regval)
+static int ina2xx_read_init(struct device *dev, int reg, long *val)
{
struct ina2xx_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
int ret, retry;
- dev_dbg(dev, "Starting register %d read\n", reg);
-
for (retry = 5; retry; retry--) {
-
- ret = regmap_read(data->regmap, reg, regval);
+ ret = regmap_read(regmap, reg, &regval);
if (ret < 0)
return ret;
- dev_dbg(dev, "read %d, val = 0x%04x\n", reg, *regval);
-
/*
* If the current value in the calibration register is 0, the
* power and current registers will also remain at 0. In case
@@ -239,20 +267,19 @@ static int ina2xx_read_reg(struct device *dev, int reg, unsigned int *regval)
* We do that extra read of the calibration register if there
* is some hint of a chip reset.
*/
- if (*regval == 0) {
+ if (regval == 0) {
unsigned int cal;
- ret = regmap_read(data->regmap, INA2XX_CALIBRATION,
- &cal);
+ ret = regmap_read_bypassed(regmap, INA2XX_CALIBRATION, &cal);
if (ret < 0)
return ret;
if (cal == 0) {
dev_warn(dev, "chip not calibrated, reinitializing\n");
- ret = ina2xx_init(data);
- if (ret < 0)
- return ret;
+ regcache_mark_dirty(regmap);
+ regcache_sync(regmap);
+
/*
* Let's make sure the power and current
* registers have been updated before trying
@@ -262,6 +289,7 @@ static int ina2xx_read_reg(struct device *dev, int reg, unsigned int *regval)
continue;
}
}
+ *val = ina2xx_get_value(data, reg, regval);
return 0;
}
@@ -274,101 +302,31 @@ static int ina2xx_read_reg(struct device *dev, int reg, unsigned int *regval)
return -ENODEV;
}
-static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
- unsigned int regval)
-{
- int val;
-
- switch (reg) {
- case INA2XX_SHUNT_VOLTAGE:
- /* signed register */
- val = DIV_ROUND_CLOSEST((s16)regval, data->config->shunt_div);
- break;
- case INA2XX_BUS_VOLTAGE:
- val = (regval >> data->config->bus_voltage_shift)
- * data->config->bus_voltage_lsb;
- val = DIV_ROUND_CLOSEST(val, 1000);
- break;
- case INA2XX_POWER:
- val = regval * data->power_lsb_uW;
- break;
- case INA2XX_CURRENT:
- /* signed register, result in mA */
- val = (s16)regval * data->current_lsb_uA;
- val = DIV_ROUND_CLOSEST(val, 1000);
- break;
- case INA2XX_CALIBRATION:
- val = regval;
- break;
- default:
- /* programmer goofed */
- WARN_ON_ONCE(1);
- val = 0;
- break;
- }
-
- return val;
-}
-
-static ssize_t ina2xx_value_show(struct device *dev,
- struct device_attribute *da, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct ina2xx_data *data = dev_get_drvdata(dev);
- unsigned int regval;
-
- int err = ina2xx_read_reg(dev, attr->index, &regval);
-
- if (err < 0)
- return err;
-
- return sysfs_emit(buf, "%d\n", ina2xx_get_value(data, attr->index, regval));
-}
-
-static int ina226_reg_to_alert(struct ina2xx_data *data, u8 bit, u16 regval)
-{
- int reg;
-
- switch (bit) {
- case INA226_SHUNT_OVER_VOLTAGE_BIT:
- case INA226_SHUNT_UNDER_VOLTAGE_BIT:
- reg = INA2XX_SHUNT_VOLTAGE;
- break;
- case INA226_BUS_OVER_VOLTAGE_BIT:
- case INA226_BUS_UNDER_VOLTAGE_BIT:
- reg = INA2XX_BUS_VOLTAGE;
- break;
- case INA226_POWER_OVER_LIMIT_BIT:
- reg = INA2XX_POWER;
- break;
- default:
- /* programmer goofed */
- WARN_ON_ONCE(1);
- return 0;
- }
-
- return ina2xx_get_value(data, reg, regval);
-}
-
/*
* Turns alert limit values into register values.
* Opposite of the formula in ina2xx_get_value().
*/
-static s16 ina226_alert_to_reg(struct ina2xx_data *data, u8 bit, int val)
+static u16 ina226_alert_to_reg(struct ina2xx_data *data, int reg, long val)
{
- switch (bit) {
- case INA226_SHUNT_OVER_VOLTAGE_BIT:
- case INA226_SHUNT_UNDER_VOLTAGE_BIT:
+ switch (reg) {
+ case INA2XX_SHUNT_VOLTAGE:
+ val = clamp_val(val, 0, SHRT_MAX * data->config->shunt_div);
val *= data->config->shunt_div;
- return clamp_val(val, SHRT_MIN, SHRT_MAX);
- case INA226_BUS_OVER_VOLTAGE_BIT:
- case INA226_BUS_UNDER_VOLTAGE_BIT:
+ return clamp_val(val, 0, SHRT_MAX);
+ case INA2XX_BUS_VOLTAGE:
+ val = clamp_val(val, 0, 200000);
val = (val * 1000) << data->config->bus_voltage_shift;
val = DIV_ROUND_CLOSEST(val, data->config->bus_voltage_lsb);
- return clamp_val(val, 0, SHRT_MAX);
- case INA226_POWER_OVER_LIMIT_BIT:
+ return clamp_val(val, 0, USHRT_MAX);
+ case INA2XX_POWER:
+ val = clamp_val(val, 0, UINT_MAX - data->power_lsb_uW);
val = DIV_ROUND_CLOSEST(val, data->power_lsb_uW);
return clamp_val(val, 0, USHRT_MAX);
+ case INA2XX_CURRENT:
+ val = clamp_val(val, INT_MIN / 1000, INT_MAX / 1000);
+ /* signed register, result in mA */
+ val = DIV_ROUND_CLOSEST(val * 1000, data->current_lsb_uA);
+ return clamp_val(val, SHRT_MIN, SHRT_MAX);
default:
/* programmer goofed */
WARN_ON_ONCE(1);
@@ -376,45 +334,37 @@ static s16 ina226_alert_to_reg(struct ina2xx_data *data, u8 bit, int val)
}
}
-static ssize_t ina226_alert_show(struct device *dev,
- struct device_attribute *da, char *buf)
+static int ina226_alert_limit_read(struct ina2xx_data *data, u32 mask, int reg, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct ina2xx_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
int regval;
- int val = 0;
int ret;
mutex_lock(&data->config_lock);
- ret = regmap_read(data->regmap, INA226_MASK_ENABLE, &regval);
+ ret = regmap_read(regmap, INA226_MASK_ENABLE, &regval);
if (ret)
goto abort;
- if (regval & BIT(attr->index)) {
- ret = regmap_read(data->regmap, INA226_ALERT_LIMIT, &regval);
+ if (regval & mask) {
+ ret = regmap_read(regmap, INA226_ALERT_LIMIT, &regval);
if (ret)
goto abort;
- val = ina226_reg_to_alert(data, attr->index, regval);
+ *val = ina2xx_get_value(data, reg, regval);
+ } else {
+ *val = 0;
}
-
- ret = sysfs_emit(buf, "%d\n", val);
abort:
mutex_unlock(&data->config_lock);
return ret;
}
-static ssize_t ina226_alert_store(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+static int ina226_alert_limit_write(struct ina2xx_data *data, u32 mask, int reg, long val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct ina2xx_data *data = dev_get_drvdata(dev);
- unsigned long val;
+ struct regmap *regmap = data->regmap;
int ret;
- ret = kstrtoul(buf, 10, &val);
- if (ret < 0)
- return ret;
+ if (val < 0)
+ return -EINVAL;
/*
* Clear all alerts first to avoid accidentally triggering ALERT pin
@@ -422,217 +372,462 @@ static ssize_t ina226_alert_store(struct device *dev,
* if the value is non-zero.
*/
mutex_lock(&data->config_lock);
- ret = regmap_update_bits(data->regmap, INA226_MASK_ENABLE,
+ ret = regmap_update_bits(regmap, INA226_MASK_ENABLE,
INA226_ALERT_CONFIG_MASK, 0);
if (ret < 0)
goto abort;
- ret = regmap_write(data->regmap, INA226_ALERT_LIMIT,
- ina226_alert_to_reg(data, attr->index, val));
+ ret = regmap_write(regmap, INA226_ALERT_LIMIT,
+ ina226_alert_to_reg(data, reg, val));
if (ret < 0)
goto abort;
- if (val != 0) {
- ret = regmap_update_bits(data->regmap, INA226_MASK_ENABLE,
- INA226_ALERT_CONFIG_MASK,
- BIT(attr->index));
- if (ret < 0)
- goto abort;
- }
-
- ret = count;
+ if (val)
+ ret = regmap_update_bits(regmap, INA226_MASK_ENABLE,
+ INA226_ALERT_CONFIG_MASK, mask);
abort:
mutex_unlock(&data->config_lock);
return ret;
}
-static ssize_t ina226_alarm_show(struct device *dev,
- struct device_attribute *da, char *buf)
+static int ina2xx_chip_read(struct device *dev, u32 attr, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct ina2xx_data *data = dev_get_drvdata(dev);
- int regval;
- int alarm = 0;
+ u32 regval;
+ int ret;
+
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ ret = regmap_read(data->regmap, INA2XX_CONFIG, &regval);
+ if (ret)
+ return ret;
+
+ *val = ina226_reg_to_interval(regval);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int ina226_alert_read(struct regmap *regmap, u32 mask, long *val)
+{
+ unsigned int regval;
int ret;
- ret = regmap_read(data->regmap, INA226_MASK_ENABLE, &regval);
+ ret = regmap_read_bypassed(regmap, INA226_MASK_ENABLE, &regval);
if (ret)
return ret;
- alarm = (regval & BIT(attr->index)) &&
- (regval & INA226_ALERT_FUNCTION_FLAG);
- return sysfs_emit(buf, "%d\n", alarm);
+ *val = (regval & mask) && (regval & INA226_ALERT_FUNCTION_FLAG);
+
+ return 0;
+}
+
+static int ina2xx_in_read(struct device *dev, u32 attr, int channel, long *val)
+{
+ int voltage_reg = channel ? INA2XX_BUS_VOLTAGE : INA2XX_SHUNT_VOLTAGE;
+ u32 under_voltage_mask = channel ? INA226_BUS_UNDER_VOLTAGE_MASK
+ : INA226_SHUNT_UNDER_VOLTAGE_MASK;
+ u32 over_voltage_mask = channel ? INA226_BUS_OVER_VOLTAGE_MASK
+ : INA226_SHUNT_OVER_VOLTAGE_MASK;
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+ int ret;
+
+ switch (attr) {
+ case hwmon_in_input:
+ ret = regmap_read(regmap, voltage_reg, &regval);
+ if (ret)
+ return ret;
+ *val = ina2xx_get_value(data, voltage_reg, regval);
+ break;
+ case hwmon_in_lcrit:
+ return ina226_alert_limit_read(data, under_voltage_mask,
+ voltage_reg, val);
+ case hwmon_in_crit:
+ return ina226_alert_limit_read(data, over_voltage_mask,
+ voltage_reg, val);
+ case hwmon_in_lcrit_alarm:
+ return ina226_alert_read(regmap, under_voltage_mask, val);
+ case hwmon_in_crit_alarm:
+ return ina226_alert_read(regmap, over_voltage_mask, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int ina2xx_power_read(struct device *dev, u32 attr, long *val)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_power_input:
+ return ina2xx_read_init(dev, INA2XX_POWER, val);
+ case hwmon_power_crit:
+ return ina226_alert_limit_read(data, INA226_POWER_OVER_LIMIT_MASK,
+ INA2XX_POWER, val);
+ case hwmon_power_crit_alarm:
+ return ina226_alert_read(data->regmap, INA226_POWER_OVER_LIMIT_MASK, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ina2xx_curr_read(struct device *dev, u32 attr, long *val)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+ int ret;
+
+ /*
+ * While the chips supported by this driver do not directly support
+ * current limits, they do support setting shunt voltage limits.
+ * The shunt voltage divided by the shunt resistor value is the current.
+ * On top of that, calibration values are set such that in the shunt
+ * voltage register and the current register report the same values.
+ * That means we can report and configure current limits based on shunt
+ * voltage limits.
+ */
+ switch (attr) {
+ case hwmon_curr_input:
+ /*
+ * Since the shunt voltage and the current register report the
+ * same values when the chip is calibrated, we can calculate
+ * the current directly from the shunt voltage without relying
+ * on chip calibration.
+ */
+ ret = regmap_read(regmap, INA2XX_SHUNT_VOLTAGE, &regval);
+ if (ret)
+ return ret;
+ *val = ina2xx_get_value(data, INA2XX_CURRENT, regval);
+ return 0;
+ case hwmon_curr_lcrit:
+ return ina226_alert_limit_read(data, INA226_SHUNT_UNDER_VOLTAGE_MASK,
+ INA2XX_CURRENT, val);
+ case hwmon_curr_crit:
+ return ina226_alert_limit_read(data, INA226_SHUNT_OVER_VOLTAGE_MASK,
+ INA2XX_CURRENT, val);
+ case hwmon_curr_lcrit_alarm:
+ return ina226_alert_read(regmap, INA226_SHUNT_UNDER_VOLTAGE_MASK, val);
+ case hwmon_curr_crit_alarm:
+ return ina226_alert_read(regmap, INA226_SHUNT_OVER_VOLTAGE_MASK, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ina2xx_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_chip:
+ return ina2xx_chip_read(dev, attr, val);
+ case hwmon_in:
+ return ina2xx_in_read(dev, attr, channel, val);
+ case hwmon_power:
+ return ina2xx_power_read(dev, attr, val);
+ case hwmon_curr:
+ return ina2xx_curr_read(dev, attr, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ina2xx_chip_write(struct device *dev, u32 attr, long val)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return regmap_update_bits(data->regmap, INA2XX_CONFIG,
+ INA226_AVG_RD_MASK,
+ ina226_interval_to_reg(val));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ina2xx_in_write(struct device *dev, u32 attr, int channel, long val)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_in_lcrit:
+ return ina226_alert_limit_write(data,
+ channel ? INA226_BUS_UNDER_VOLTAGE_MASK : INA226_SHUNT_UNDER_VOLTAGE_MASK,
+ channel ? INA2XX_BUS_VOLTAGE : INA2XX_SHUNT_VOLTAGE,
+ val);
+ case hwmon_in_crit:
+ return ina226_alert_limit_write(data,
+ channel ? INA226_BUS_OVER_VOLTAGE_MASK : INA226_SHUNT_OVER_VOLTAGE_MASK,
+ channel ? INA2XX_BUS_VOLTAGE : INA2XX_SHUNT_VOLTAGE,
+ val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int ina2xx_power_write(struct device *dev, u32 attr, long val)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_power_crit:
+ return ina226_alert_limit_write(data, INA226_POWER_OVER_LIMIT_MASK,
+ INA2XX_POWER, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int ina2xx_curr_write(struct device *dev, u32 attr, long val)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_curr_lcrit:
+ return ina226_alert_limit_write(data, INA226_SHUNT_UNDER_VOLTAGE_MASK,
+ INA2XX_CURRENT, val);
+ case hwmon_curr_crit:
+ return ina226_alert_limit_write(data, INA226_SHUNT_OVER_VOLTAGE_MASK,
+ INA2XX_CURRENT, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int ina2xx_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_chip:
+ return ina2xx_chip_write(dev, attr, val);
+ case hwmon_in:
+ return ina2xx_in_write(dev, attr, channel, val);
+ case hwmon_power:
+ return ina2xx_power_write(dev, attr, val);
+ case hwmon_curr:
+ return ina2xx_curr_write(dev, attr, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t ina2xx_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct ina2xx_data *data = _data;
+ enum ina2xx_ids chip = data->chip;
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ return 0444;
+ case hwmon_in_lcrit:
+ case hwmon_in_crit:
+ if (chip == ina226)
+ return 0644;
+ break;
+ case hwmon_in_lcrit_alarm:
+ case hwmon_in_crit_alarm:
+ if (chip == ina226)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ return 0444;
+ case hwmon_curr_lcrit:
+ case hwmon_curr_crit:
+ if (chip == ina226)
+ return 0644;
+ break;
+ case hwmon_curr_lcrit_alarm:
+ case hwmon_curr_crit_alarm:
+ if (chip == ina226)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ return 0444;
+ case hwmon_power_crit:
+ if (chip == ina226)
+ return 0644;
+ break;
+ case hwmon_power_crit_alarm:
+ if (chip == ina226)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ if (chip == ina226)
+ return 0644;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
}
+static const struct hwmon_channel_info * const ina2xx_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_CRIT | HWMON_I_CRIT_ALARM |
+ HWMON_I_LCRIT | HWMON_I_LCRIT_ALARM,
+ HWMON_I_INPUT | HWMON_I_CRIT | HWMON_I_CRIT_ALARM |
+ HWMON_I_LCRIT | HWMON_I_LCRIT_ALARM
+ ),
+ HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_CRIT | HWMON_C_CRIT_ALARM |
+ HWMON_C_LCRIT | HWMON_C_LCRIT_ALARM),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops ina2xx_hwmon_ops = {
+ .is_visible = ina2xx_is_visible,
+ .read = ina2xx_read,
+ .write = ina2xx_write,
+};
+
+static const struct hwmon_chip_info ina2xx_chip_info = {
+ .ops = &ina2xx_hwmon_ops,
+ .info = ina2xx_info,
+};
+
+/* shunt resistance */
+
/*
* In order to keep calibration register value fixed, the product
* of current_lsb and shunt_resistor should also be fixed and equal
* to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order
* to keep the scale.
*/
-static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
+static int ina2xx_set_shunt(struct ina2xx_data *data, unsigned long val)
{
unsigned int dividend = DIV_ROUND_CLOSEST(1000000000,
data->config->shunt_div);
- if (val <= 0 || val > dividend)
+ if (!val || val > dividend)
return -EINVAL;
- mutex_lock(&data->config_lock);
data->rshunt = val;
data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val);
data->power_lsb_uW = data->config->power_lsb_factor *
data->current_lsb_uA;
- mutex_unlock(&data->config_lock);
return 0;
}
-static ssize_t ina2xx_shunt_show(struct device *dev,
- struct device_attribute *da, char *buf)
+static ssize_t shunt_resistor_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
struct ina2xx_data *data = dev_get_drvdata(dev);
return sysfs_emit(buf, "%li\n", data->rshunt);
}
-static ssize_t ina2xx_shunt_store(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t shunt_resistor_store(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
unsigned long val;
int status;
- struct ina2xx_data *data = dev_get_drvdata(dev);
status = kstrtoul(buf, 10, &val);
if (status < 0)
return status;
+ mutex_lock(&data->config_lock);
status = ina2xx_set_shunt(data, val);
+ mutex_unlock(&data->config_lock);
if (status < 0)
return status;
return count;
}
-static ssize_t ina226_interval_store(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
-{
- struct ina2xx_data *data = dev_get_drvdata(dev);
- unsigned long val;
- int status;
+static DEVICE_ATTR_RW(shunt_resistor);
- status = kstrtoul(buf, 10, &val);
- if (status < 0)
- return status;
-
- if (val > INT_MAX || val == 0)
- return -EINVAL;
-
- status = regmap_update_bits(data->regmap, INA2XX_CONFIG,
- INA226_AVG_RD_MASK,
- ina226_interval_to_reg(val));
- if (status < 0)
- return status;
-
- return count;
-}
+/* pointers to created device attributes */
+static struct attribute *ina2xx_attrs[] = {
+ &dev_attr_shunt_resistor.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ina2xx);
-static ssize_t ina226_interval_show(struct device *dev,
- struct device_attribute *da, char *buf)
+/*
+ * Initialize chip
+ */
+static int ina2xx_init(struct device *dev, struct ina2xx_data *data)
{
- struct ina2xx_data *data = dev_get_drvdata(dev);
- int status;
- unsigned int regval;
-
- status = regmap_read(data->regmap, INA2XX_CONFIG, &regval);
- if (status)
- return status;
-
- return sysfs_emit(buf, "%d\n", ina226_reg_to_interval(regval));
-}
-
-/* shunt voltage */
-static SENSOR_DEVICE_ATTR_RO(in0_input, ina2xx_value, INA2XX_SHUNT_VOLTAGE);
-/* shunt voltage over/under voltage alert setting and alarm */
-static SENSOR_DEVICE_ATTR_RW(in0_crit, ina226_alert,
- INA226_SHUNT_OVER_VOLTAGE_BIT);
-static SENSOR_DEVICE_ATTR_RW(in0_lcrit, ina226_alert,
- INA226_SHUNT_UNDER_VOLTAGE_BIT);
-static SENSOR_DEVICE_ATTR_RO(in0_crit_alarm, ina226_alarm,
- INA226_SHUNT_OVER_VOLTAGE_BIT);
-static SENSOR_DEVICE_ATTR_RO(in0_lcrit_alarm, ina226_alarm,
- INA226_SHUNT_UNDER_VOLTAGE_BIT);
-
-/* bus voltage */
-static SENSOR_DEVICE_ATTR_RO(in1_input, ina2xx_value, INA2XX_BUS_VOLTAGE);
-/* bus voltage over/under voltage alert setting and alarm */
-static SENSOR_DEVICE_ATTR_RW(in1_crit, ina226_alert,
- INA226_BUS_OVER_VOLTAGE_BIT);
-static SENSOR_DEVICE_ATTR_RW(in1_lcrit, ina226_alert,
- INA226_BUS_UNDER_VOLTAGE_BIT);
-static SENSOR_DEVICE_ATTR_RO(in1_crit_alarm, ina226_alarm,
- INA226_BUS_OVER_VOLTAGE_BIT);
-static SENSOR_DEVICE_ATTR_RO(in1_lcrit_alarm, ina226_alarm,
- INA226_BUS_UNDER_VOLTAGE_BIT);
-
-/* calculated current */
-static SENSOR_DEVICE_ATTR_RO(curr1_input, ina2xx_value, INA2XX_CURRENT);
-
-/* calculated power */
-static SENSOR_DEVICE_ATTR_RO(power1_input, ina2xx_value, INA2XX_POWER);
-/* over-limit power alert setting and alarm */
-static SENSOR_DEVICE_ATTR_RW(power1_crit, ina226_alert,
- INA226_POWER_OVER_LIMIT_BIT);
-static SENSOR_DEVICE_ATTR_RO(power1_crit_alarm, ina226_alarm,
- INA226_POWER_OVER_LIMIT_BIT);
+ struct regmap *regmap = data->regmap;
+ u32 shunt;
+ int ret;
-/* shunt resistance */
-static SENSOR_DEVICE_ATTR_RW(shunt_resistor, ina2xx_shunt, INA2XX_CALIBRATION);
+ if (device_property_read_u32(dev, "shunt-resistor", &shunt) < 0)
+ shunt = INA2XX_RSHUNT_DEFAULT;
-/* update interval (ina226 only) */
-static SENSOR_DEVICE_ATTR_RW(update_interval, ina226_interval, 0);
+ ret = ina2xx_set_shunt(data, shunt);
+ if (ret < 0)
+ return ret;
-/* pointers to created device attributes */
-static struct attribute *ina2xx_attrs[] = {
- &sensor_dev_attr_in0_input.dev_attr.attr,
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_curr1_input.dev_attr.attr,
- &sensor_dev_attr_power1_input.dev_attr.attr,
- &sensor_dev_attr_shunt_resistor.dev_attr.attr,
- NULL,
-};
+ ret = regmap_write(regmap, INA2XX_CONFIG, data->config->config_default);
+ if (ret < 0)
+ return ret;
-static const struct attribute_group ina2xx_group = {
- .attrs = ina2xx_attrs,
-};
+ if (data->chip == ina226) {
+ bool active_high = device_property_read_bool(dev, "ti,alert-polarity-active-high");
-static struct attribute *ina226_attrs[] = {
- &sensor_dev_attr_in0_crit.dev_attr.attr,
- &sensor_dev_attr_in0_lcrit.dev_attr.attr,
- &sensor_dev_attr_in0_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_in0_lcrit_alarm.dev_attr.attr,
- &sensor_dev_attr_in1_crit.dev_attr.attr,
- &sensor_dev_attr_in1_lcrit.dev_attr.attr,
- &sensor_dev_attr_in1_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_in1_lcrit_alarm.dev_attr.attr,
- &sensor_dev_attr_power1_crit.dev_attr.attr,
- &sensor_dev_attr_power1_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_update_interval.dev_attr.attr,
- NULL,
-};
+ regmap_update_bits(regmap, INA226_MASK_ENABLE,
+ INA226_ALERT_LATCH_ENABLE | INA226_ALERT_POLARITY,
+ INA226_ALERT_LATCH_ENABLE |
+ FIELD_PREP(INA226_ALERT_POLARITY, active_high));
+ }
-static const struct attribute_group ina226_group = {
- .attrs = ina226_attrs,
-};
+ /*
+ * Calibration register is set to the best value, which eliminates
+ * truncation errors on calculating current register in hardware.
+ * According to datasheet (eq. 3) the best values are 2048 for
+ * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
+ */
+ return regmap_write(regmap, INA2XX_CALIBRATION,
+ data->config->calibration_value);
+}
static int ina2xx_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ina2xx_data *data;
struct device *hwmon_dev;
- u32 val;
- int ret, group = 0;
enum ina2xx_ids chip;
+ int ret;
chip = (uintptr_t)i2c_get_match_data(client);
@@ -642,21 +837,9 @@ static int ina2xx_probe(struct i2c_client *client)
/* set the device type */
data->config = &ina2xx_config[chip];
+ data->chip = chip;
mutex_init(&data->config_lock);
- if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
- struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
-
- if (pdata)
- val = pdata->shunt_uohms;
- else
- val = INA2XX_RSHUNT_DEFAULT;
- }
-
- ina2xx_set_shunt(data, val);
-
- ina2xx_regmap_config.max_register = data->config->registers;
-
data->regmap = devm_regmap_init_i2c(client, &ina2xx_regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(dev, "failed to allocate register map\n");
@@ -667,37 +850,13 @@ static int ina2xx_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "failed to enable vs regulator\n");
- if (chip == ina226) {
- if (of_property_read_bool(dev->of_node, "ti,alert-polarity-active-high")) {
- ret = ina2xx_set_alert_polarity(data,
- INA226_ALERT_POL_HIGH);
- if (ret < 0) {
- return dev_err_probe(dev, ret,
- "failed to set alert polarity active high\n");
- }
- } else {
- /* Set default value i.e active low */
- ret = ina2xx_set_alert_polarity(data,
- INA226_ALERT_POL_LOW);
- if (ret < 0) {
- return dev_err_probe(dev, ret,
- "failed to set alert polarity active low\n");
- }
- }
- }
-
- ret = ina2xx_init(data);
- if (ret < 0) {
- dev_err(dev, "error configuring the device: %d\n", ret);
- return -ENODEV;
- }
-
- data->groups[group++] = &ina2xx_group;
- if (chip == ina226)
- data->groups[group++] = &ina226_group;
+ ret = ina2xx_init(dev, data);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to configure device\n");
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data, data->groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &ina2xx_chip_info,
+ ina2xx_groups);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index f0053f87e3e6..1bf479a0f793 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -813,7 +813,6 @@ static int ina3221_probe_child_from_dt(struct device *dev,
static int ina3221_probe_from_dt(struct device *dev, struct ina3221_data *ina)
{
const struct device_node *np = dev->of_node;
- struct device_node *child;
int ret;
/* Compatible with non-DT platforms */
@@ -822,12 +821,10 @@ static int ina3221_probe_from_dt(struct device *dev, struct ina3221_data *ina)
ina->single_shot = of_property_read_bool(np, "ti,single-shot");
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = ina3221_probe_child_from_dt(dev, child, ina);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
}
return 0;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 543526bac042..7dc19c5d62ac 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -438,16 +438,21 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
data->disp_negative = true;
}
- if (boot_cpu_data.x86 == 0x15 &&
+ data->is_zen = cpu_feature_enabled(X86_FEATURE_ZEN);
+ if (data->is_zen) {
+ data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
+ data->read_tempreg = read_tempreg_nb_zen;
+ } else if (boot_cpu_data.x86 == 0x15 &&
((boot_cpu_data.x86_model & 0xf0) == 0x60 ||
(boot_cpu_data.x86_model & 0xf0) == 0x70)) {
data->read_htcreg = read_htcreg_nb_f15;
data->read_tempreg = read_tempreg_nb_f15;
- } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
- data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
- data->read_tempreg = read_tempreg_nb_zen;
- data->is_zen = true;
+ } else {
+ data->read_htcreg = read_htcreg_pci;
+ data->read_tempreg = read_tempreg_pci;
+ }
+ if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
switch (boot_cpu_data.x86_model) {
case 0x1: /* Zen */
case 0x8: /* Zen+ */
@@ -469,10 +474,6 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
break;
}
} else if (boot_cpu_data.x86 == 0x19) {
- data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
- data->read_tempreg = read_tempreg_nb_zen;
- data->is_zen = true;
-
switch (boot_cpu_data.x86_model) {
case 0x0 ... 0x1: /* Zen3 SP3/TR */
case 0x8: /* Zen3 TR Chagall */
@@ -496,13 +497,6 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
k10temp_get_ccd_support(data, 12);
break;
}
- } else if (boot_cpu_data.x86 == 0x1a) {
- data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
- data->read_tempreg = read_tempreg_nb_zen;
- data->is_zen = true;
- } else {
- data->read_htcreg = read_htcreg_pci;
- data->read_tempreg = read_tempreg_pci;
}
for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
@@ -548,6 +542,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index ca5c52b38c0f..511d95a0efb3 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -2674,19 +2674,16 @@ static int lm90_parse_dt_channel_info(struct i2c_client *client,
struct lm90_data *data)
{
int err;
- struct device_node *child;
struct device *dev = &client->dev;
const struct device_node *np = dev->of_node;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (strcmp(child->name, "channel"))
continue;
err = lm90_probe_channel_from_dt(client, child, data);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 46579a3e1715..0be439b38ee1 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -27,15 +27,14 @@
* with the LM92.
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/jiffies.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
/*
* The LM92 and MAX6635 have 2 two-state pins for address selection,
@@ -43,8 +42,6 @@
*/
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
I2C_CLIENT_END };
-enum chips { lm92, max6635 };
-
/* The LM92 registers */
#define LM92_REG_CONFIG 0x01 /* 8-bit, RW */
#define LM92_REG_TEMP 0x00 /* 16-bit, RO */
@@ -66,10 +63,10 @@ static inline int TEMP_FROM_REG(s16 reg)
return reg / 8 * 625 / 10;
}
-static inline s16 TEMP_TO_REG(long val)
+static inline s16 TEMP_TO_REG(long val, int resolution)
{
val = clamp_val(val, -60000, 160000);
- return val * 10 / 625 * 8;
+ return DIV_ROUND_CLOSEST(val << (resolution - 9), 1000) << (16 - resolution);
}
/* Alarm flags are stored in the 3 LSB of the temperature register */
@@ -78,239 +75,336 @@ static inline u8 ALARMS_FROM_REG(s16 reg)
return reg & 0x0007;
}
-enum temp_index {
- t_input,
- t_crit,
- t_min,
- t_max,
- t_hyst,
- t_num_regs
-};
-
-static const u8 regs[t_num_regs] = {
- [t_input] = LM92_REG_TEMP,
- [t_crit] = LM92_REG_TEMP_CRIT,
- [t_min] = LM92_REG_TEMP_LOW,
- [t_max] = LM92_REG_TEMP_HIGH,
- [t_hyst] = LM92_REG_TEMP_HYST,
-};
-
/* Client data (each client gets its own) */
struct lm92_data {
- struct i2c_client *client;
+ struct regmap *regmap;
struct mutex update_lock;
- bool valid; /* false until following fields are valid */
- unsigned long last_updated; /* in jiffies */
-
- /* registers values */
- s16 temp[t_num_regs]; /* index with enum temp_index */
+ int resolution;
};
-/*
- * Sysfs attributes and callback functions
- */
-
-static struct lm92_data *lm92_update_device(struct device *dev)
+static int lm92_temp_read(struct lm92_data *data, u32 attr, int channel, long *val)
{
- struct lm92_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int i;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ) ||
- !data->valid) {
- dev_dbg(&client->dev, "Updating lm92 data\n");
- for (i = 0; i < t_num_regs; i++) {
- data->temp[i] =
- i2c_smbus_read_word_swapped(client, regs[i]);
- }
- data->last_updated = jiffies;
- data->valid = true;
+ int reg = -1, hyst_reg = -1, alarm_bit = 0;
+ struct regmap *regmap = data->regmap;
+ u32 temp;
+ int ret;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = LM92_REG_TEMP;
+ break;
+ case hwmon_temp_min:
+ reg = LM92_REG_TEMP_LOW;
+ break;
+ case hwmon_temp_max:
+ reg = LM92_REG_TEMP_HIGH;
+ break;
+ case hwmon_temp_crit:
+ reg = LM92_REG_TEMP_CRIT;
+ break;
+ case hwmon_temp_min_hyst:
+ hyst_reg = LM92_REG_TEMP_LOW;
+ break;
+ case hwmon_temp_max_hyst:
+ hyst_reg = LM92_REG_TEMP_HIGH;
+ break;
+ case hwmon_temp_crit_hyst:
+ hyst_reg = LM92_REG_TEMP_CRIT;
+ break;
+ case hwmon_temp_min_alarm:
+ alarm_bit = 0;
+ break;
+ case hwmon_temp_max_alarm:
+ alarm_bit = 1;
+ break;
+ case hwmon_temp_crit_alarm:
+ alarm_bit = 2;
+ break;
+ default:
+ return -EOPNOTSUPP;
}
-
- mutex_unlock(&data->update_lock);
-
- return data;
+ if (reg >= 0) {
+ ret = regmap_read(regmap, reg, &temp);
+ if (ret < 0)
+ return ret;
+ *val = TEMP_FROM_REG(temp);
+ } else if (hyst_reg >= 0) {
+ u32 regs[2] = { hyst_reg, LM92_REG_TEMP_HYST };
+ u16 regvals[2];
+
+ ret = regmap_multi_reg_read(regmap, regs, regvals, 2);
+ if (ret)
+ return ret;
+ if (attr == hwmon_temp_min_hyst)
+ *val = TEMP_FROM_REG(regvals[0]) + TEMP_FROM_REG(regvals[1]);
+ else
+ *val = TEMP_FROM_REG(regvals[0]) - TEMP_FROM_REG(regvals[1]);
+ } else {
+ ret = regmap_read(regmap, LM92_REG_TEMP, &temp);
+ if (ret)
+ return ret;
+ *val = !!(temp & BIT(alarm_bit));
+ }
+ return 0;
}
-static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static int lm92_chip_read(struct lm92_data *data, u32 attr, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct lm92_data *data = lm92_update_device(dev);
-
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]));
+ u32 temp;
+ int ret;
+
+ switch (attr) {
+ case hwmon_chip_alarms:
+ ret = regmap_read(data->regmap, LM92_REG_TEMP, &temp);
+ if (ret)
+ return ret;
+ *val = ALARMS_FROM_REG(temp);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp_store(struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count)
+static int lm92_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm92_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int nr = attr->index;
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- data->temp[nr] = TEMP_TO_REG(val);
- i2c_smbus_write_word_swapped(client, regs[nr], data->temp[nr]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-static ssize_t temp_hyst_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct lm92_data *data = lm92_update_device(dev);
-
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index])
- - TEMP_FROM_REG(data->temp[t_hyst]));
+ switch (type) {
+ case hwmon_chip:
+ return lm92_chip_read(data, attr, val);
+ case hwmon_temp:
+ return lm92_temp_read(data, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t temp1_min_hyst_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int lm92_temp_write(struct lm92_data *data, u32 attr, long val)
{
- struct lm92_data *data = lm92_update_device(dev);
-
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[t_min])
- + TEMP_FROM_REG(data->temp[t_hyst]));
+ struct regmap *regmap = data->regmap;
+ int reg, err;
+ u32 temp;
+
+ switch (attr) {
+ case hwmon_temp_min:
+ reg = LM92_REG_TEMP_LOW;
+ break;
+ case hwmon_temp_max:
+ reg = LM92_REG_TEMP_HIGH;
+ break;
+ case hwmon_temp_crit:
+ reg = LM92_REG_TEMP_CRIT;
+ break;
+ case hwmon_temp_crit_hyst:
+ val = clamp_val(val, -120000, 220000);
+ mutex_lock(&data->update_lock);
+ err = regmap_read(regmap, LM92_REG_TEMP_CRIT, &temp);
+ if (err)
+ goto unlock;
+ val = TEMP_TO_REG(TEMP_FROM_REG(temp) - val, data->resolution);
+ err = regmap_write(regmap, LM92_REG_TEMP_HYST, val);
+unlock:
+ mutex_unlock(&data->update_lock);
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return regmap_write(regmap, reg, TEMP_TO_REG(val, data->resolution));
}
-static ssize_t temp_hyst_store(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static int lm92_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm92_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long val;
- int err;
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val = clamp_val(val, -120000, 220000);
- mutex_lock(&data->update_lock);
- data->temp[t_hyst] =
- TEMP_TO_REG(TEMP_FROM_REG(data->temp[attr->index]) - val);
- i2c_smbus_write_word_swapped(client, LM92_REG_TEMP_HYST,
- data->temp[t_hyst]);
- mutex_unlock(&data->update_lock);
- return count;
+ switch (type) {
+ case hwmon_temp:
+ return lm92_temp_write(data, attr, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static umode_t lm92_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct lm92_data *data = lm92_update_device(dev);
-
- return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->temp[t_input]));
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_alarms:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ case hwmon_temp_crit_hyst:
+ return 0644;
+ case hwmon_temp_input:
+ case hwmon_temp_min_hyst:
+ case hwmon_temp_max_hyst:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
}
-static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- int bitnr = to_sensor_dev_attr(attr)->index;
- struct lm92_data *data = lm92_update_device(dev);
- return sprintf(buf, "%d\n", (data->temp[t_input] >> bitnr) & 1);
-}
+static const struct hwmon_channel_info * const lm92_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_ALARMS),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT |
+ HWMON_T_MIN | HWMON_T_MIN_HYST |
+ HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM),
+ NULL
+};
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, t_input);
-static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp, t_crit);
-static SENSOR_DEVICE_ATTR_RW(temp1_crit_hyst, temp_hyst, t_crit);
-static SENSOR_DEVICE_ATTR_RW(temp1_min, temp, t_min);
-static DEVICE_ATTR_RO(temp1_min_hyst);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, temp, t_max);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_hyst, temp_hyst, t_max);
-static DEVICE_ATTR_RO(alarms);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 2);
-static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, alarm, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 1);
+static const struct hwmon_ops lm92_hwmon_ops = {
+ .is_visible = lm92_is_visible,
+ .read = lm92_read,
+ .write = lm92_write,
+};
+
+static const struct hwmon_chip_info lm92_chip_info = {
+ .ops = &lm92_hwmon_ops,
+ .info = lm92_info,
+};
/*
* Detection and registration
*/
-static void lm92_init_client(struct i2c_client *client)
+static int lm92_init_client(struct regmap *regmap)
{
- u8 config;
-
- /* Start the conversions if needed */
- config = i2c_smbus_read_byte_data(client, LM92_REG_CONFIG);
- if (config & 0x01)
- i2c_smbus_write_byte_data(client, LM92_REG_CONFIG,
- config & 0xFE);
+ return regmap_clear_bits(regmap, LM92_REG_CONFIG, 0x01);
}
-static struct attribute *lm92_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &dev_attr_temp1_min_hyst.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &dev_attr_alarms.attr,
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(lm92);
-
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm92_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
- u8 config;
- u16 man_id;
+ u8 config_addr = LM92_REG_CONFIG;
+ u8 man_id_addr = LM92_REG_MAN_ID;
+ int i, regval;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
- config = i2c_smbus_read_byte_data(new_client, LM92_REG_CONFIG);
- man_id = i2c_smbus_read_word_data(new_client, LM92_REG_MAN_ID);
-
- if ((config & 0xe0) == 0x00 && man_id == 0x0180)
- pr_info("lm92: Found National Semiconductor LM92 chip\n");
- else
- return -ENODEV;
+ /*
+ * Register values repeat with multiples of 8.
+ * Read twice to improve detection accuracy.
+ */
+ for (i = 0; i < 2; i++) {
+ regval = i2c_smbus_read_word_data(new_client, man_id_addr);
+ if (regval != 0x0180)
+ return -ENODEV;
+ regval = i2c_smbus_read_byte_data(new_client, config_addr);
+ if (regval < 0 || (regval & 0xe0))
+ return -ENODEV;
+ config_addr += 8;
+ man_id_addr += 8;
+ }
strscpy(info->type, "lm92", I2C_NAME_SIZE);
return 0;
}
-static int lm92_probe(struct i2c_client *new_client)
+/* regmap */
+
+static int lm92_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ int ret;
+
+ if (reg == LM92_REG_CONFIG)
+ ret = i2c_smbus_read_byte_data(context, reg);
+ else
+ ret = i2c_smbus_read_word_swapped(context, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+}
+
+static int lm92_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ if (reg == LM92_REG_CONFIG)
+ return i2c_smbus_write_byte_data(context, LM92_REG_CONFIG, val);
+
+ return i2c_smbus_write_word_swapped(context, reg, val);
+}
+
+static bool lm92_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ return reg == LM92_REG_TEMP;
+}
+
+static bool lm92_regmap_is_writeable(struct device *dev, unsigned int reg)
{
+ return reg >= LM92_REG_CONFIG;
+}
+
+static const struct regmap_config lm92_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = LM92_REG_TEMP_HIGH,
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_reg = lm92_regmap_is_volatile,
+ .writeable_reg = lm92_regmap_is_writeable,
+};
+
+static const struct regmap_bus lm92_regmap_bus = {
+ .reg_write = lm92_reg_write,
+ .reg_read = lm92_reg_read,
+};
+
+static int lm92_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm92_data *data;
+ struct regmap *regmap;
+ int err;
+
+ regmap = devm_regmap_init(dev, &lm92_regmap_bus, client,
+ &lm92_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
- data = devm_kzalloc(&new_client->dev, sizeof(struct lm92_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct lm92_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->client = new_client;
+ data->regmap = regmap;
+ data->resolution = (unsigned long)i2c_get_match_data(client);
mutex_init(&data->update_lock);
/* Initialize the chipset */
- lm92_init_client(new_client);
+ err = lm92_init_client(regmap);
+ if (err)
+ return err;
- hwmon_dev = devm_hwmon_device_register_with_groups(&new_client->dev,
- new_client->name,
- data, lm92_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
+ &lm92_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
@@ -318,9 +412,10 @@ static int lm92_probe(struct i2c_client *new_client)
* Module and driver stuff
*/
+/* .driver_data is limit register resolution */
static const struct i2c_device_id lm92_id[] = {
- { "lm92", lm92 },
- { "max6635", max6635 },
+ { "lm92", 13 },
+ { "max6635", 9 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm92_id);
diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
index 9a7afdb49895..7da6c8f07332 100644
--- a/drivers/hwmon/lm95234.c
+++ b/drivers/hwmon/lm95234.c
@@ -8,16 +8,15 @@
* Copyright (C) 2008, 2010 Davide Rizzo <elpa.rizzo@gmail.com>
*/
-#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/err.h>
#include <linux/mutex.h>
-#include <linux/sysfs.h>
+#include <linux/regmap.h>
+#include <linux/util_macros.h>
#define DRVNAME "lm95234"
@@ -32,6 +31,8 @@ static const unsigned short normal_i2c[] = {
#define LM95234_REG_STATUS 0x02
#define LM95234_REG_CONFIG 0x03
#define LM95234_REG_CONVRATE 0x04
+#define LM95234_REG_ENABLE 0x05
+#define LM95234_REG_FILTER 0x06
#define LM95234_REG_STS_FAULT 0x07
#define LM95234_REG_STS_TCRIT1 0x08
#define LM95234_REG_STS_TCRIT2 0x09
@@ -52,541 +53,372 @@ static const unsigned short normal_i2c[] = {
/* Client data (each client gets its own) */
struct lm95234_data {
- struct i2c_client *client;
- const struct attribute_group *groups[3];
+ struct regmap *regmap;
struct mutex update_lock;
- unsigned long last_updated, interval; /* in jiffies */
- bool valid; /* false until following fields are valid */
- /* registers values */
- int temp[5]; /* temperature (signed) */
- u32 status; /* fault/alarm status */
- u8 tcrit1[5]; /* critical temperature limit */
- u8 tcrit2[2]; /* high temperature limit */
- s8 toffset[4]; /* remote temperature offset */
- u8 thyst; /* common hysteresis */
-
- u8 sensor_type; /* temperature sensor type */
+ enum chips type;
};
-static int lm95234_read_temp(struct i2c_client *client, int index, int *t)
+static int lm95234_read_temp(struct regmap *regmap, int index, long *t)
{
- int val;
- u16 temp = 0;
+ unsigned int regs[2];
+ int temp = 0, ret;
+ u8 regvals[2];
if (index) {
- val = i2c_smbus_read_byte_data(client,
- LM95234_REG_UTEMPH(index - 1));
- if (val < 0)
- return val;
- temp = val << 8;
- val = i2c_smbus_read_byte_data(client,
- LM95234_REG_UTEMPL(index - 1));
- if (val < 0)
- return val;
- temp |= val;
- *t = temp;
+ regs[0] = LM95234_REG_UTEMPH(index - 1);
+ regs[1] = LM95234_REG_UTEMPL(index - 1);
+ ret = regmap_multi_reg_read(regmap, regs, regvals, 2);
+ if (ret)
+ return ret;
+ temp = (regvals[0] << 8) | regvals[1];
}
/*
* Read signed temperature if unsigned temperature is 0,
* or if this is the local sensor.
*/
if (!temp) {
- val = i2c_smbus_read_byte_data(client,
- LM95234_REG_TEMPH(index));
- if (val < 0)
- return val;
- temp = val << 8;
- val = i2c_smbus_read_byte_data(client,
- LM95234_REG_TEMPL(index));
- if (val < 0)
- return val;
- temp |= val;
- *t = (s16)temp;
+ regs[0] = LM95234_REG_TEMPH(index);
+ regs[1] = LM95234_REG_TEMPL(index);
+ ret = regmap_multi_reg_read(regmap, regs, regvals, 2);
+ if (ret)
+ return ret;
+ temp = (regvals[0] << 8) | regvals[1];
+ temp = sign_extend32(temp, 15);
}
+ *t = DIV_ROUND_CLOSEST(temp * 125, 32);
return 0;
}
-static u16 update_intervals[] = { 143, 364, 1000, 2500 };
-
-/* Fill value cache. Must be called with update lock held. */
-
-static int lm95234_fill_cache(struct lm95234_data *data,
- struct i2c_client *client)
+static int lm95234_hyst_get(struct regmap *regmap, int reg, long *val)
{
- int i, ret;
-
- ret = i2c_smbus_read_byte_data(client, LM95234_REG_CONVRATE);
- if (ret < 0)
- return ret;
-
- data->interval = msecs_to_jiffies(update_intervals[ret & 0x03]);
-
- for (i = 0; i < ARRAY_SIZE(data->tcrit1); i++) {
- ret = i2c_smbus_read_byte_data(client, LM95234_REG_TCRIT1(i));
- if (ret < 0)
- return ret;
- data->tcrit1[i] = ret;
- }
- for (i = 0; i < ARRAY_SIZE(data->tcrit2); i++) {
- ret = i2c_smbus_read_byte_data(client, LM95234_REG_TCRIT2(i));
- if (ret < 0)
- return ret;
- data->tcrit2[i] = ret;
- }
- for (i = 0; i < ARRAY_SIZE(data->toffset); i++) {
- ret = i2c_smbus_read_byte_data(client, LM95234_REG_OFFSET(i));
- if (ret < 0)
- return ret;
- data->toffset[i] = ret;
- }
-
- ret = i2c_smbus_read_byte_data(client, LM95234_REG_TCRIT_HYST);
- if (ret < 0)
- return ret;
- data->thyst = ret;
+ unsigned int regs[2] = {reg, LM95234_REG_TCRIT_HYST};
+ u8 regvals[2];
+ int ret;
- ret = i2c_smbus_read_byte_data(client, LM95234_REG_REM_MODEL);
- if (ret < 0)
+ ret = regmap_multi_reg_read(regmap, regs, regvals, 2);
+ if (ret)
return ret;
- data->sensor_type = ret;
-
+ *val = (regvals[0] - regvals[1]) * 1000;
return 0;
}
-static int lm95234_update_device(struct lm95234_data *data)
+static ssize_t lm95234_hyst_set(struct lm95234_data *data, long val)
{
- struct i2c_client *client = data->client;
+ u32 tcrit;
int ret;
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + data->interval) ||
- !data->valid) {
- int i;
-
- if (!data->valid) {
- ret = lm95234_fill_cache(data, client);
- if (ret < 0)
- goto abort;
- }
-
- data->valid = false;
- for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
- ret = lm95234_read_temp(client, i, &data->temp[i]);
- if (ret < 0)
- goto abort;
- }
-
- ret = i2c_smbus_read_byte_data(client, LM95234_REG_STS_FAULT);
- if (ret < 0)
- goto abort;
- data->status = ret;
-
- ret = i2c_smbus_read_byte_data(client, LM95234_REG_STS_TCRIT1);
- if (ret < 0)
- goto abort;
- data->status |= ret << 8;
+ ret = regmap_read(data->regmap, LM95234_REG_TCRIT1(0), &tcrit);
+ if (ret)
+ goto unlock;
- ret = i2c_smbus_read_byte_data(client, LM95234_REG_STS_TCRIT2);
- if (ret < 0)
- goto abort;
- data->status |= ret << 16;
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000);
+ val = clamp_val((int)tcrit - val, 0, 31);
- data->last_updated = jiffies;
- data->valid = true;
- }
- ret = 0;
-abort:
+ ret = regmap_write(data->regmap, LM95234_REG_TCRIT_HYST, val);
+unlock:
mutex_unlock(&data->update_lock);
-
return ret;
}
-static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int lm95234_crit_reg(int channel)
{
- struct lm95234_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(data);
-
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n",
- DIV_ROUND_CLOSEST(data->temp[index] * 125, 32));
-}
-
-static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct lm95234_data *data = dev_get_drvdata(dev);
- u32 mask = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(data);
-
- if (ret)
- return ret;
-
- return sprintf(buf, "%u", !!(data->status & mask));
-}
-
-static ssize_t type_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct lm95234_data *data = dev_get_drvdata(dev);
- u8 mask = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(data);
-
- if (ret)
- return ret;
-
- return sprintf(buf, data->sensor_type & mask ? "1\n" : "2\n");
+ if (channel == 1 || channel == 2)
+ return LM95234_REG_TCRIT2(channel - 1);
+ return LM95234_REG_TCRIT1(channel);
}
-static ssize_t type_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int lm95234_temp_write(struct device *dev, u32 attr, int channel, long val)
{
struct lm95234_data *data = dev_get_drvdata(dev);
- unsigned long val;
- u8 mask = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(data);
-
- if (ret)
- return ret;
-
- ret = kstrtoul(buf, 10, &val);
- if (ret < 0)
- return ret;
-
- if (val != 1 && val != 2)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
- if (val == 1)
- data->sensor_type |= mask;
- else
- data->sensor_type &= ~mask;
- data->valid = false;
- i2c_smbus_write_byte_data(data->client, LM95234_REG_REM_MODEL,
- data->sensor_type);
- mutex_unlock(&data->update_lock);
-
- return count;
+ struct regmap *regmap = data->regmap;
+
+ switch (attr) {
+ case hwmon_temp_enable:
+ if (val && val != 1)
+ return -EINVAL;
+ return regmap_update_bits(regmap, LM95234_REG_ENABLE,
+ BIT(channel), val ? BIT(channel) : 0);
+ case hwmon_temp_type:
+ if (val != 1 && val != 2)
+ return -EINVAL;
+ return regmap_update_bits(regmap, LM95234_REG_REM_MODEL,
+ BIT(channel),
+ val == 1 ? BIT(channel) : 0);
+ case hwmon_temp_offset:
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500);
+ return regmap_write(regmap, LM95234_REG_OFFSET(channel - 1), val);
+ case hwmon_temp_max:
+ val = clamp_val(val, 0, channel == 1 ? 127000 : 255000);
+ val = DIV_ROUND_CLOSEST(val, 1000);
+ return regmap_write(regmap, lm95234_crit_reg(channel), val);
+ case hwmon_temp_max_hyst:
+ return lm95234_hyst_set(data, val);
+ case hwmon_temp_crit:
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000);
+ return regmap_write(regmap, LM95234_REG_TCRIT1(channel), val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t tcrit2_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int lm95234_alarm_reg(int channel)
{
- struct lm95234_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(data);
-
- if (ret)
- return ret;
-
- return sprintf(buf, "%u", data->tcrit2[index] * 1000);
+ if (channel == 1 || channel == 2)
+ return LM95234_REG_STS_TCRIT2;
+ return LM95234_REG_STS_TCRIT1;
}
-static ssize_t tcrit2_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int lm95234_temp_read(struct device *dev, u32 attr, int channel, long *val)
{
struct lm95234_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(attr)->index;
- long val;
- int ret = lm95234_update_device(data);
-
- if (ret)
- return ret;
-
- ret = kstrtol(buf, 10, &val);
- if (ret < 0)
- return ret;
-
- val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000),
- 1000);
-
- mutex_lock(&data->update_lock);
- data->tcrit2[index] = val;
- i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT2(index), val);
- mutex_unlock(&data->update_lock);
+ struct regmap *regmap = data->regmap;
+ u32 regval, mask;
+ int ret;
- return count;
+ switch (attr) {
+ case hwmon_temp_enable:
+ ret = regmap_read(regmap, LM95234_REG_ENABLE, &regval);
+ if (ret)
+ return ret;
+ *val = !!(regval & BIT(channel));
+ break;
+ case hwmon_temp_input:
+ return lm95234_read_temp(regmap, channel, val);
+ case hwmon_temp_max_alarm:
+ ret = regmap_read(regmap, lm95234_alarm_reg(channel), &regval);
+ if (ret)
+ return ret;
+ *val = !!(regval & BIT(channel));
+ break;
+ case hwmon_temp_crit_alarm:
+ ret = regmap_read(regmap, LM95234_REG_STS_TCRIT1, &regval);
+ if (ret)
+ return ret;
+ *val = !!(regval & BIT(channel));
+ break;
+ case hwmon_temp_crit_hyst:
+ return lm95234_hyst_get(regmap, LM95234_REG_TCRIT1(channel), val);
+ case hwmon_temp_type:
+ ret = regmap_read(regmap, LM95234_REG_REM_MODEL, &regval);
+ if (ret)
+ return ret;
+ *val = (regval & BIT(channel)) ? 1 : 2;
+ break;
+ case hwmon_temp_offset:
+ ret = regmap_read(regmap, LM95234_REG_OFFSET(channel - 1), &regval);
+ if (ret)
+ return ret;
+ *val = sign_extend32(regval, 7) * 500;
+ break;
+ case hwmon_temp_fault:
+ ret = regmap_read(regmap, LM95234_REG_STS_FAULT, &regval);
+ if (ret)
+ return ret;
+ mask = (BIT(0) | BIT(1)) << ((channel - 1) << 1);
+ *val = !!(regval & mask);
+ break;
+ case hwmon_temp_max:
+ ret = regmap_read(regmap, lm95234_crit_reg(channel), &regval);
+ if (ret)
+ return ret;
+ *val = regval * 1000;
+ break;
+ case hwmon_temp_max_hyst:
+ return lm95234_hyst_get(regmap, lm95234_crit_reg(channel), val);
+ case hwmon_temp_crit:
+ ret = regmap_read(regmap, LM95234_REG_TCRIT1(channel), &regval);
+ if (ret)
+ return ret;
+ *val = regval * 1000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t tcrit2_hyst_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct lm95234_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(data);
-
- if (ret)
- return ret;
-
- /* Result can be negative, so be careful with unsigned operands */
- return sprintf(buf, "%d",
- ((int)data->tcrit2[index] - (int)data->thyst) * 1000);
-}
+static u16 update_intervals[] = { 143, 364, 1000, 2500 };
-static ssize_t tcrit1_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int lm95234_chip_write(struct device *dev, u32 attr, long val)
{
struct lm95234_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(attr)->index;
- return sprintf(buf, "%u", data->tcrit1[index] * 1000);
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ val = find_closest(val, update_intervals, ARRAY_SIZE(update_intervals));
+ return regmap_write(data->regmap, LM95234_REG_CONVRATE, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t tcrit1_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int lm95234_chip_read(struct device *dev, u32 attr, long *val)
{
struct lm95234_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(data);
- long val;
-
- if (ret)
- return ret;
-
- ret = kstrtol(buf, 10, &val);
- if (ret < 0)
- return ret;
-
- val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000);
+ u32 convrate;
+ int ret;
- mutex_lock(&data->update_lock);
- data->tcrit1[index] = val;
- i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT1(index), val);
- mutex_unlock(&data->update_lock);
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ ret = regmap_read(data->regmap, LM95234_REG_CONVRATE, &convrate);
+ if (ret)
+ return ret;
- return count;
+ *val = update_intervals[convrate & 0x03];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t tcrit1_hyst_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int lm95234_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
{
- struct lm95234_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(data);
-
- if (ret)
- return ret;
-
- /* Result can be negative, so be careful with unsigned operands */
- return sprintf(buf, "%d",
- ((int)data->tcrit1[index] - (int)data->thyst) * 1000);
+ switch (type) {
+ case hwmon_chip:
+ return lm95234_chip_write(dev, attr, val);
+ case hwmon_temp:
+ return lm95234_temp_write(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t tcrit1_hyst_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int lm95234_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
- struct lm95234_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(data);
- long val;
-
- if (ret)
- return ret;
-
- ret = kstrtol(buf, 10, &val);
- if (ret < 0)
- return ret;
-
- val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000);
- val = clamp_val((int)data->tcrit1[index] - val, 0, 31);
-
- mutex_lock(&data->update_lock);
- data->thyst = val;
- i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT_HYST, val);
- mutex_unlock(&data->update_lock);
-
- return count;
+ switch (type) {
+ case hwmon_chip:
+ return lm95234_chip_read(dev, attr, val);
+ case hwmon_temp:
+ return lm95234_temp_read(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t offset_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static umode_t lm95234_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct lm95234_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(data);
+ const struct lm95234_data *data = _data;
- if (ret)
- return ret;
+ if (data->type == lm95233 && channel > 2)
+ return 0;
- return sprintf(buf, "%d", data->toffset[index] * 500);
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ return 0444;
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_crit_hyst:
+ return (channel && channel < 3) ? 0444 : 0;
+ case hwmon_temp_type:
+ case hwmon_temp_offset:
+ return channel ? 0644 : 0;
+ case hwmon_temp_fault:
+ return channel ? 0444 : 0;
+ case hwmon_temp_max:
+ case hwmon_temp_enable:
+ return 0644;
+ case hwmon_temp_max_hyst:
+ return channel ? 0444 : 0644;
+ case hwmon_temp_crit:
+ return (channel && channel < 3) ? 0644 : 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
}
-static ssize_t offset_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct lm95234_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(attr)->index;
- int ret = lm95234_update_device(data);
- long val;
-
- if (ret)
- return ret;
-
- ret = kstrtol(buf, 10, &val);
- if (ret < 0)
- return ret;
-
- /* Accuracy is 1/2 degrees C */
- val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500);
+static const struct hwmon_channel_info * const lm95234_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_MAX_ALARM | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_MAX_ALARM | HWMON_T_FAULT | HWMON_T_TYPE |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_CRIT_ALARM | HWMON_T_OFFSET | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_MAX_ALARM | HWMON_T_FAULT | HWMON_T_TYPE |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_CRIT_ALARM | HWMON_T_OFFSET | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_MAX_ALARM | HWMON_T_FAULT | HWMON_T_TYPE |
+ HWMON_T_OFFSET | HWMON_T_ENABLE,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_MAX_ALARM | HWMON_T_FAULT | HWMON_T_TYPE |
+ HWMON_T_OFFSET | HWMON_T_ENABLE),
+ NULL
+};
- mutex_lock(&data->update_lock);
- data->toffset[index] = val;
- i2c_smbus_write_byte_data(data->client, LM95234_REG_OFFSET(index), val);
- mutex_unlock(&data->update_lock);
+static const struct hwmon_ops lm95234_hwmon_ops = {
+ .is_visible = lm95234_is_visible,
+ .read = lm95234_read,
+ .write = lm95234_write,
+};
- return count;
-}
+static const struct hwmon_chip_info lm95234_chip_info = {
+ .ops = &lm95234_hwmon_ops,
+ .info = lm95234_info,
+};
-static ssize_t update_interval_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static bool lm95234_volatile_reg(struct device *dev, unsigned int reg)
{
- struct lm95234_data *data = dev_get_drvdata(dev);
- int ret = lm95234_update_device(data);
-
- if (ret)
- return ret;
-
- return sprintf(buf, "%lu\n",
- DIV_ROUND_CLOSEST(data->interval * 1000, HZ));
+ switch (reg) {
+ case LM95234_REG_TEMPH(0) ... LM95234_REG_TEMPH(4):
+ case LM95234_REG_TEMPL(0) ... LM95234_REG_TEMPL(4):
+ case LM95234_REG_UTEMPH(0) ... LM95234_REG_UTEMPH(3):
+ case LM95234_REG_UTEMPL(0) ... LM95234_REG_UTEMPL(3):
+ case LM95234_REG_STS_FAULT:
+ case LM95234_REG_STS_TCRIT1:
+ case LM95234_REG_STS_TCRIT2:
+ case LM95234_REG_REM_MODEL_STS:
+ return true;
+ default:
+ return false;
+ }
}
-static ssize_t update_interval_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static bool lm95234_writeable_reg(struct device *dev, unsigned int reg)
{
- struct lm95234_data *data = dev_get_drvdata(dev);
- int ret = lm95234_update_device(data);
- unsigned long val;
- u8 regval;
-
- if (ret)
- return ret;
-
- ret = kstrtoul(buf, 10, &val);
- if (ret < 0)
- return ret;
-
- for (regval = 0; regval < 3; regval++) {
- if (val <= update_intervals[regval])
- break;
+ switch (reg) {
+ case LM95234_REG_CONFIG ... LM95234_REG_FILTER:
+ case LM95234_REG_REM_MODEL ... LM95234_REG_OFFSET(3):
+ case LM95234_REG_TCRIT1(0) ... LM95234_REG_TCRIT1(4):
+ case LM95234_REG_TCRIT2(0) ... LM95234_REG_TCRIT2(1):
+ case LM95234_REG_TCRIT_HYST:
+ return true;
+ default:
+ return false;
}
-
- mutex_lock(&data->update_lock);
- data->interval = msecs_to_jiffies(update_intervals[regval]);
- i2c_smbus_write_byte_data(data->client, LM95234_REG_CONVRATE, regval);
- mutex_unlock(&data->update_lock);
-
- return count;
}
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
-static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
-static SENSOR_DEVICE_ATTR_RO(temp4_input, temp, 3);
-static SENSOR_DEVICE_ATTR_RO(temp5_input, temp, 4);
-
-static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, BIT(0) | BIT(1));
-static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, BIT(2) | BIT(3));
-static SENSOR_DEVICE_ATTR_RO(temp4_fault, alarm, BIT(4) | BIT(5));
-static SENSOR_DEVICE_ATTR_RO(temp5_fault, alarm, BIT(6) | BIT(7));
-
-static SENSOR_DEVICE_ATTR_RW(temp2_type, type, BIT(1));
-static SENSOR_DEVICE_ATTR_RW(temp3_type, type, BIT(2));
-static SENSOR_DEVICE_ATTR_RW(temp4_type, type, BIT(3));
-static SENSOR_DEVICE_ATTR_RW(temp5_type, type, BIT(4));
-
-static SENSOR_DEVICE_ATTR_RW(temp1_max, tcrit1, 0);
-static SENSOR_DEVICE_ATTR_RW(temp2_max, tcrit2, 0);
-static SENSOR_DEVICE_ATTR_RW(temp3_max, tcrit2, 1);
-static SENSOR_DEVICE_ATTR_RW(temp4_max, tcrit1, 3);
-static SENSOR_DEVICE_ATTR_RW(temp5_max, tcrit1, 4);
-
-static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, tcrit1_hyst, 0);
-static SENSOR_DEVICE_ATTR_RO(temp2_max_hyst, tcrit2_hyst, 0);
-static SENSOR_DEVICE_ATTR_RO(temp3_max_hyst, tcrit2_hyst, 1);
-static SENSOR_DEVICE_ATTR_RO(temp4_max_hyst, tcrit1_hyst, 3);
-static SENSOR_DEVICE_ATTR_RO(temp5_max_hyst, tcrit1_hyst, 4);
-
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, BIT(0 + 8));
-static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, BIT(1 + 16));
-static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, alarm, BIT(2 + 16));
-static SENSOR_DEVICE_ATTR_RO(temp4_max_alarm, alarm, BIT(3 + 8));
-static SENSOR_DEVICE_ATTR_RO(temp5_max_alarm, alarm, BIT(4 + 8));
-
-static SENSOR_DEVICE_ATTR_RW(temp2_crit, tcrit1, 1);
-static SENSOR_DEVICE_ATTR_RW(temp3_crit, tcrit1, 2);
-
-static SENSOR_DEVICE_ATTR_RO(temp2_crit_hyst, tcrit1_hyst, 1);
-static SENSOR_DEVICE_ATTR_RO(temp3_crit_hyst, tcrit1_hyst, 2);
-
-static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, BIT(1 + 8));
-static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, BIT(2 + 8));
-
-static SENSOR_DEVICE_ATTR_RW(temp2_offset, offset, 0);
-static SENSOR_DEVICE_ATTR_RW(temp3_offset, offset, 1);
-static SENSOR_DEVICE_ATTR_RW(temp4_offset, offset, 2);
-static SENSOR_DEVICE_ATTR_RW(temp5_offset, offset, 3);
-
-static DEVICE_ATTR_RW(update_interval);
-
-static struct attribute *lm95234_common_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_temp3_fault.dev_attr.attr,
- &sensor_dev_attr_temp2_type.dev_attr.attr,
- &sensor_dev_attr_temp3_type.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp3_max.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_crit.dev_attr.attr,
- &sensor_dev_attr_temp3_crit.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_offset.dev_attr.attr,
- &sensor_dev_attr_temp3_offset.dev_attr.attr,
- &dev_attr_update_interval.attr,
- NULL
-};
-
-static const struct attribute_group lm95234_common_group = {
- .attrs = lm95234_common_attrs,
-};
-
-static struct attribute *lm95234_attrs[] = {
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp5_input.dev_attr.attr,
- &sensor_dev_attr_temp4_fault.dev_attr.attr,
- &sensor_dev_attr_temp5_fault.dev_attr.attr,
- &sensor_dev_attr_temp4_type.dev_attr.attr,
- &sensor_dev_attr_temp5_type.dev_attr.attr,
- &sensor_dev_attr_temp4_max.dev_attr.attr,
- &sensor_dev_attr_temp5_max.dev_attr.attr,
- &sensor_dev_attr_temp4_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp5_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_offset.dev_attr.attr,
- &sensor_dev_attr_temp5_offset.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group lm95234_group = {
- .attrs = lm95234_attrs,
+static const struct regmap_config lm95234_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = lm95234_writeable_reg,
+ .volatile_reg = lm95234_volatile_reg,
+ .cache_type = REGCACHE_MAPLE,
};
static int lm95234_detect(struct i2c_client *client,
@@ -649,61 +481,60 @@ static int lm95234_detect(struct i2c_client *client,
return 0;
}
-static int lm95234_init_client(struct i2c_client *client)
+static int lm95234_init_client(struct device *dev, struct regmap *regmap)
{
- int val, model;
+ u32 val, model;
+ int ret;
/* start conversion if necessary */
- val = i2c_smbus_read_byte_data(client, LM95234_REG_CONFIG);
- if (val < 0)
- return val;
- if (val & 0x40)
- i2c_smbus_write_byte_data(client, LM95234_REG_CONFIG,
- val & ~0x40);
+ ret = regmap_clear_bits(regmap, LM95234_REG_CONFIG, 0x40);
+ if (ret)
+ return ret;
/* If diode type status reports an error, try to fix it */
- val = i2c_smbus_read_byte_data(client, LM95234_REG_REM_MODEL_STS);
- if (val < 0)
- return val;
- model = i2c_smbus_read_byte_data(client, LM95234_REG_REM_MODEL);
- if (model < 0)
- return model;
+ ret = regmap_read(regmap, LM95234_REG_REM_MODEL_STS, &val);
+ if (ret < 0)
+ return ret;
+ ret = regmap_read(regmap, LM95234_REG_REM_MODEL, &model);
+ if (ret < 0)
+ return ret;
if (model & val) {
- dev_notice(&client->dev,
+ dev_notice(dev,
"Fixing remote diode type misconfiguration (0x%x)\n",
val);
- i2c_smbus_write_byte_data(client, LM95234_REG_REM_MODEL,
- model & ~val);
+ ret = regmap_write(regmap, LM95234_REG_REM_MODEL, model & ~val);
}
- return 0;
+ return ret;
}
static int lm95234_probe(struct i2c_client *client)
{
- enum chips type = (uintptr_t)i2c_get_match_data(client);
struct device *dev = &client->dev;
struct lm95234_data *data;
struct device *hwmon_dev;
+ struct regmap *regmap;
int err;
data = devm_kzalloc(dev, sizeof(struct lm95234_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->client = client;
+ data->type = (uintptr_t)i2c_get_match_data(client);
+
+ regmap = devm_regmap_init_i2c(client, &lm95234_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ data->regmap = regmap;
mutex_init(&data->update_lock);
/* Initialize the LM95234 chip */
- err = lm95234_init_client(client);
+ err = lm95234_init_client(dev, regmap);
if (err < 0)
return err;
- data->groups[0] = &lm95234_common_group;
- if (type == lm95234)
- data->groups[1] = &lm95234_group;
-
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data, data->groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &lm95234_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index d293b4f15dc1..3bdc30530847 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -161,18 +161,18 @@ static int lm95245_read_temp(struct device *dev, u32 attr, int channel,
{
struct lm95245_data *data = dev_get_drvdata(dev);
struct regmap *regmap = data->regmap;
- int ret, regl, regh, regvall, regvalh;
+ unsigned int regs[2];
+ unsigned int regval;
+ u8 regvals[2];
+ int ret;
switch (attr) {
case hwmon_temp_input:
- regl = channel ? LM95245_REG_R_REMOTE_TEMPL_S :
- LM95245_REG_R_LOCAL_TEMPL_S;
- regh = channel ? LM95245_REG_R_REMOTE_TEMPH_S :
- LM95245_REG_R_LOCAL_TEMPH_S;
- ret = regmap_read(regmap, regl, &regvall);
- if (ret < 0)
- return ret;
- ret = regmap_read(regmap, regh, &regvalh);
+ regs[0] = channel ? LM95245_REG_R_REMOTE_TEMPL_S :
+ LM95245_REG_R_LOCAL_TEMPL_S;
+ regs[1] = channel ? LM95245_REG_R_REMOTE_TEMPH_S :
+ LM95245_REG_R_LOCAL_TEMPH_S;
+ ret = regmap_multi_reg_read(regmap, regs, regvals, 2);
if (ret < 0)
return ret;
/*
@@ -181,92 +181,77 @@ static int lm95245_read_temp(struct device *dev, u32 attr, int channel,
* Use signed calculation for remote if signed bit is set
* or if reported temperature is below signed limit.
*/
- if (!channel || (regvalh & 0x80) || regvalh < 0x7f) {
- *val = temp_from_reg_signed(regvalh, regvall);
+ if (!channel || (regvals[1] & 0x80) || regvals[1] < 0x7f) {
+ *val = temp_from_reg_signed(regvals[1], regvals[0]);
return 0;
}
- ret = regmap_read(regmap, LM95245_REG_R_REMOTE_TEMPL_U,
- &regvall);
- if (ret < 0)
- return ret;
- ret = regmap_read(regmap, LM95245_REG_R_REMOTE_TEMPH_U,
- &regvalh);
- if (ret < 0)
+ ret = regmap_bulk_read(regmap, LM95245_REG_R_REMOTE_TEMPH_U, regvals, 2);
+ if (ret)
return ret;
- *val = temp_from_reg_unsigned(regvalh, regvall);
+ *val = temp_from_reg_unsigned(regvals[0], regvals[1]);
return 0;
case hwmon_temp_max:
ret = regmap_read(regmap, LM95245_REG_RW_REMOTE_OS_LIMIT,
- &regvalh);
+ &regval);
if (ret < 0)
return ret;
- *val = regvalh * 1000;
+ *val = regval * 1000;
return 0;
case hwmon_temp_crit:
- regh = channel ? LM95245_REG_RW_REMOTE_TCRIT_LIMIT :
- LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT;
- ret = regmap_read(regmap, regh, &regvalh);
+ regs[0] = channel ? LM95245_REG_RW_REMOTE_TCRIT_LIMIT :
+ LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT;
+ ret = regmap_read(regmap, regs[0], &regval);
if (ret < 0)
return ret;
- *val = regvalh * 1000;
+ *val = regval * 1000;
return 0;
case hwmon_temp_max_hyst:
- ret = regmap_read(regmap, LM95245_REG_RW_REMOTE_OS_LIMIT,
- &regvalh);
+ regs[0] = LM95245_REG_RW_REMOTE_OS_LIMIT;
+ regs[1] = LM95245_REG_RW_COMMON_HYSTERESIS;
+ ret = regmap_multi_reg_read(regmap, regs, regvals, 2);
if (ret < 0)
return ret;
- ret = regmap_read(regmap, LM95245_REG_RW_COMMON_HYSTERESIS,
- &regvall);
- if (ret < 0)
- return ret;
- *val = (regvalh - regvall) * 1000;
+ *val = (regvals[0] - regvals[1]) * 1000;
return 0;
case hwmon_temp_crit_hyst:
- regh = channel ? LM95245_REG_RW_REMOTE_TCRIT_LIMIT :
- LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT;
- ret = regmap_read(regmap, regh, &regvalh);
- if (ret < 0)
- return ret;
- ret = regmap_read(regmap, LM95245_REG_RW_COMMON_HYSTERESIS,
- &regvall);
+ regs[0] = channel ? LM95245_REG_RW_REMOTE_TCRIT_LIMIT :
+ LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT;
+ regs[1] = LM95245_REG_RW_COMMON_HYSTERESIS;
+
+ ret = regmap_multi_reg_read(regmap, regs, regvals, 2);
if (ret < 0)
return ret;
- *val = (regvalh - regvall) * 1000;
+ *val = (regvals[0] - regvals[1]) * 1000;
return 0;
case hwmon_temp_type:
- ret = regmap_read(regmap, LM95245_REG_RW_CONFIG2, &regvalh);
+ ret = regmap_read(regmap, LM95245_REG_RW_CONFIG2, &regval);
if (ret < 0)
return ret;
- *val = (regvalh & CFG2_REMOTE_TT) ? 1 : 2;
+ *val = (regval & CFG2_REMOTE_TT) ? 1 : 2;
return 0;
case hwmon_temp_offset:
- ret = regmap_read(regmap, LM95245_REG_RW_REMOTE_OFFL,
- &regvall);
- if (ret < 0)
- return ret;
- ret = regmap_read(regmap, LM95245_REG_RW_REMOTE_OFFH,
- &regvalh);
+ ret = regmap_bulk_read(regmap, LM95245_REG_RW_REMOTE_OFFH, regvals, 2);
if (ret < 0)
return ret;
- *val = temp_from_reg_signed(regvalh, regvall);
+ *val = temp_from_reg_signed(regvals[0], regvals[1]);
return 0;
case hwmon_temp_max_alarm:
- ret = regmap_read(regmap, LM95245_REG_R_STATUS1, &regvalh);
+ ret = regmap_read(regmap, LM95245_REG_R_STATUS1, &regval);
if (ret < 0)
return ret;
- *val = !!(regvalh & STATUS1_ROS);
+ *val = !!(regval & STATUS1_ROS);
return 0;
case hwmon_temp_crit_alarm:
- ret = regmap_read(regmap, LM95245_REG_R_STATUS1, &regvalh);
+ ret = regmap_read(regmap, LM95245_REG_R_STATUS1, &regval);
if (ret < 0)
return ret;
- *val = !!(regvalh & (channel ? STATUS1_RTCRIT : STATUS1_LOC));
+ *val = !!(regval & (channel ? STATUS1_RTCRIT : STATUS1_LOC));
return 0;
case hwmon_temp_fault:
- ret = regmap_read(regmap, LM95245_REG_R_STATUS1, &regvalh);
+ ret = regmap_read(regmap, LM95245_REG_R_STATUS1, &regval);
if (ret < 0)
return ret;
- *val = !!(regvalh & STATUS1_DIODE_FAULT);
+ *val = !!(regval & STATUS1_DIODE_FAULT);
return 0;
default:
return -EOPNOTSUPP;
@@ -279,6 +264,7 @@ static int lm95245_write_temp(struct device *dev, u32 attr, int channel,
struct lm95245_data *data = dev_get_drvdata(dev);
struct regmap *regmap = data->regmap;
unsigned int regval;
+ u8 regvals[2];
int ret, reg;
switch (attr) {
@@ -311,16 +297,10 @@ static int lm95245_write_temp(struct device *dev, u32 attr, int channel,
case hwmon_temp_offset:
val = clamp_val(val, -128000, 127875);
val = val * 256 / 1000;
- mutex_lock(&data->update_lock);
- ret = regmap_write(regmap, LM95245_REG_RW_REMOTE_OFFL,
- val & 0xe0);
- if (ret < 0) {
- mutex_unlock(&data->update_lock);
- return ret;
- }
- ret = regmap_write(regmap, LM95245_REG_RW_REMOTE_OFFH,
- (val >> 8) & 0xff);
- mutex_unlock(&data->update_lock);
+ regvals[0] = val >> 8;
+ regvals[1] = val & 0xe0;
+
+ ret = regmap_bulk_write(regmap, LM95245_REG_RW_REMOTE_OFFH, regvals, 2);
return ret;
case hwmon_temp_type:
if (val != 1 && val != 2)
diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c
index d2ff6e700770..244839167e51 100644
--- a/drivers/hwmon/ltc2947-core.c
+++ b/drivers/hwmon/ltc2947-core.c
@@ -11,7 +11,8 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include "ltc2947.h"
@@ -1034,9 +1035,8 @@ static int ltc2947_setup(struct ltc2947_data *st)
/* 19.89E-6 * 10E9 */
st->lsb_energy = 19890;
}
- ret = of_property_read_u32_array(st->dev->of_node,
- "adi,accumulator-ctl-pol", accum,
- ARRAY_SIZE(accum));
+ ret = device_property_read_u32_array(st->dev, "adi,accumulator-ctl-pol",
+ accum, ARRAY_SIZE(accum));
if (!ret) {
u32 accum_reg = LTC2947_ACCUM_POL_1(accum[0]) |
LTC2947_ACCUM_POL_2(accum[1]);
@@ -1045,9 +1045,9 @@ static int ltc2947_setup(struct ltc2947_data *st)
if (ret)
return ret;
}
- ret = of_property_read_u32(st->dev->of_node,
- "adi,accumulation-deadband-microamp",
- &deadband);
+ ret = device_property_read_u32(st->dev,
+ "adi,accumulation-deadband-microamp",
+ &deadband);
if (!ret) {
/* the LSB is the same as the current, so 3mA */
ret = regmap_write(st->map, LTC2947_REG_ACCUM_DEADBAND,
@@ -1056,7 +1056,7 @@ static int ltc2947_setup(struct ltc2947_data *st)
return ret;
}
/* check gpio cfg */
- ret = of_property_read_u32(st->dev->of_node, "adi,gpio-out-pol", &pol);
+ ret = device_property_read_u32(st->dev, "adi,gpio-out-pol", &pol);
if (!ret) {
/* setup GPIO as output */
u32 gpio_ctl = LTC2947_GPIO_EN(1) | LTC2947_GPIO_FAN_EN(1) |
@@ -1067,8 +1067,8 @@ static int ltc2947_setup(struct ltc2947_data *st)
if (ret)
return ret;
}
- ret = of_property_read_u32_array(st->dev->of_node, "adi,gpio-in-accum",
- accum, ARRAY_SIZE(accum));
+ ret = device_property_read_u32_array(st->dev, "adi,gpio-in-accum",
+ accum, ARRAY_SIZE(accum));
if (!ret) {
/*
* Setup the accum options. The gpioctl is already defined as
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index d4a93223cd3b..541fa09dc6e7 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -854,33 +854,24 @@ static const struct regmap_config ltc2992_regmap_config = {
static int ltc2992_parse_dt(struct ltc2992_state *st)
{
- struct fwnode_handle *fwnode;
- struct fwnode_handle *child;
u32 addr;
u32 val;
int ret;
- fwnode = dev_fwnode(&st->client->dev);
-
- fwnode_for_each_available_child_node(fwnode, child) {
+ device_for_each_child_node_scoped(&st->client->dev, child) {
ret = fwnode_property_read_u32(child, "reg", &addr);
- if (ret < 0) {
- fwnode_handle_put(child);
+ if (ret < 0)
return ret;
- }
- if (addr > 1) {
- fwnode_handle_put(child);
+ if (addr > 1)
return -EINVAL;
- }
ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
if (!ret) {
- if (!val) {
- fwnode_handle_put(child);
+ if (!val)
return dev_err_probe(&st->client->dev, -EINVAL,
"shunt resistor value cannot be zero\n");
- }
+
st->r_sense_uohm[addr] = val;
}
}
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 7ce9a89f93a0..0ccb5eb596fc 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -79,7 +79,7 @@ static const bool max16065_have_current[] = {
};
struct max16065_data {
- enum chips type;
+ enum chips chip;
struct i2c_client *client;
const struct attribute_group *groups[4];
struct mutex update_lock;
@@ -114,9 +114,10 @@ static inline int LIMIT_TO_MV(int limit, int range)
return limit * range / 256;
}
-static inline int MV_TO_LIMIT(int mv, int range)
+static inline int MV_TO_LIMIT(unsigned long mv, int range)
{
- return clamp_val(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255);
+ mv = clamp_val(mv, 0, ULONG_MAX / 256);
+ return DIV_ROUND_CLOSEST(clamp_val(mv * 256, 0, range * 255), range);
}
static inline int ADC_TO_CURR(int adc, int gain)
@@ -161,10 +162,17 @@ static struct max16065_data *max16065_update_device(struct device *dev)
MAX16065_CURR_SENSE);
}
- for (i = 0; i < DIV_ROUND_UP(data->num_adc, 8); i++)
+ for (i = 0; i < 2; i++)
data->fault[i]
= i2c_smbus_read_byte_data(client, MAX16065_FAULT(i));
+ /*
+ * MAX16067 and MAX16068 have separate undervoltage and
+ * overvoltage alarm bits. Squash them together.
+ */
+ if (data->chip == max16067 || data->chip == max16068)
+ data->fault[0] |= data->fault[1];
+
data->last_updated = jiffies;
data->valid = true;
}
@@ -513,6 +521,7 @@ static int max16065_probe(struct i2c_client *client)
if (unlikely(!data))
return -ENOMEM;
+ data->chip = chip;
data->client = client;
mutex_init(&data->update_lock);
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index a89a519cf5d9..9b6d03cff4df 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -12,275 +12,356 @@
* http://pdfserv.maxim-ic.com/en/ds/MAX1619.pdf
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/mutex.h>
-#include <linux/sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/util_macros.h>
static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
-/*
- * The MAX1619 registers
- */
+#define MAX1619_REG_LOCAL_TEMP 0x00
+#define MAX1619_REG_REMOTE_TEMP 0x01
+#define MAX1619_REG_STATUS 0x02
+#define MAX1619_REG_CONFIG 0x03
+#define MAX1619_REG_CONVRATE 0x04
+#define MAX1619_REG_REMOTE_HIGH 0x07
+#define MAX1619_REG_REMOTE_LOW 0x08
+#define MAX1619_REG_REMOTE_CRIT 0x10
+#define MAX1619_REG_REMOTE_CRIT_HYST 0x11
+#define MAX1619_REG_MAN_ID 0xFE
+#define MAX1619_REG_CHIP_ID 0xFF
+
+static int get_alarms(struct regmap *regmap)
+{
+ static u32 regs[2] = { MAX1619_REG_STATUS, MAX1619_REG_CONFIG };
+ u8 regdata[2];
+ int ret;
-#define MAX1619_REG_R_MAN_ID 0xFE
-#define MAX1619_REG_R_CHIP_ID 0xFF
-#define MAX1619_REG_R_CONFIG 0x03
-#define MAX1619_REG_W_CONFIG 0x09
-#define MAX1619_REG_R_CONVRATE 0x04
-#define MAX1619_REG_W_CONVRATE 0x0A
-#define MAX1619_REG_R_STATUS 0x02
-#define MAX1619_REG_R_LOCAL_TEMP 0x00
-#define MAX1619_REG_R_REMOTE_TEMP 0x01
-#define MAX1619_REG_R_REMOTE_HIGH 0x07
-#define MAX1619_REG_W_REMOTE_HIGH 0x0D
-#define MAX1619_REG_R_REMOTE_LOW 0x08
-#define MAX1619_REG_W_REMOTE_LOW 0x0E
-#define MAX1619_REG_R_REMOTE_CRIT 0x10
-#define MAX1619_REG_W_REMOTE_CRIT 0x12
-#define MAX1619_REG_R_TCRIT_HYST 0x11
-#define MAX1619_REG_W_TCRIT_HYST 0x13
+ ret = regmap_multi_reg_read(regmap, regs, regdata, 2);
+ if (ret)
+ return ret;
-/*
- * Conversions
- */
+ /* OVERT status bit may be reversed */
+ if (!(regdata[1] & 0x20))
+ regdata[0] ^= 0x02;
-static int temp_from_reg(int val)
-{
- return (val & 0x80 ? val-0x100 : val) * 1000;
+ return regdata[0] & 0x1e;
}
-static int temp_to_reg(int val)
+static int max1619_temp_read(struct regmap *regmap, u32 attr, int channel, long *val)
{
- return (val < 0 ? val+0x100*1000 : val) / 1000;
+ int reg = -1, alarm_bit = 0;
+ u32 temp;
+ int ret;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = channel ? MAX1619_REG_REMOTE_TEMP : MAX1619_REG_LOCAL_TEMP;
+ break;
+ case hwmon_temp_min:
+ reg = MAX1619_REG_REMOTE_LOW;
+ break;
+ case hwmon_temp_max:
+ reg = MAX1619_REG_REMOTE_HIGH;
+ break;
+ case hwmon_temp_crit:
+ reg = MAX1619_REG_REMOTE_CRIT;
+ break;
+ case hwmon_temp_crit_hyst:
+ reg = MAX1619_REG_REMOTE_CRIT_HYST;
+ break;
+ case hwmon_temp_min_alarm:
+ alarm_bit = 3;
+ break;
+ case hwmon_temp_max_alarm:
+ alarm_bit = 4;
+ break;
+ case hwmon_temp_crit_alarm:
+ alarm_bit = 1;
+ break;
+ case hwmon_temp_fault:
+ alarm_bit = 2;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (reg >= 0) {
+ ret = regmap_read(regmap, reg, &temp);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(temp, 7) * 1000;
+ } else {
+ ret = get_alarms(regmap);
+ if (ret < 0)
+ return ret;
+ *val = !!(ret & BIT(alarm_bit));
+ }
+ return 0;
}
-enum temp_index {
- t_input1 = 0,
- t_input2,
- t_low2,
- t_high2,
- t_crit2,
- t_hyst2,
- t_num_regs
-};
-
-/*
- * Client data (each client gets its own)
- */
-
-struct max1619_data {
- struct i2c_client *client;
- struct mutex update_lock;
- bool valid; /* false until following fields are valid */
- unsigned long last_updated; /* in jiffies */
-
- /* registers values */
- u8 temp[t_num_regs]; /* index with enum temp_index */
- u8 alarms;
-};
+static u16 update_intervals[] = { 16000, 8000, 4000, 2000, 1000, 500, 250, 125 };
-static const u8 regs_read[t_num_regs] = {
- [t_input1] = MAX1619_REG_R_LOCAL_TEMP,
- [t_input2] = MAX1619_REG_R_REMOTE_TEMP,
- [t_low2] = MAX1619_REG_R_REMOTE_LOW,
- [t_high2] = MAX1619_REG_R_REMOTE_HIGH,
- [t_crit2] = MAX1619_REG_R_REMOTE_CRIT,
- [t_hyst2] = MAX1619_REG_R_TCRIT_HYST,
-};
-
-static const u8 regs_write[t_num_regs] = {
- [t_low2] = MAX1619_REG_W_REMOTE_LOW,
- [t_high2] = MAX1619_REG_W_REMOTE_HIGH,
- [t_crit2] = MAX1619_REG_W_REMOTE_CRIT,
- [t_hyst2] = MAX1619_REG_W_TCRIT_HYST,
-};
-
-static struct max1619_data *max1619_update_device(struct device *dev)
+static int max1619_chip_read(struct regmap *regmap, u32 attr, long *val)
{
- struct max1619_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int config, i;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
- dev_dbg(&client->dev, "Updating max1619 data.\n");
- for (i = 0; i < t_num_regs; i++)
- data->temp[i] = i2c_smbus_read_byte_data(client,
- regs_read[i]);
- data->alarms = i2c_smbus_read_byte_data(client,
- MAX1619_REG_R_STATUS);
- /* If OVERT polarity is low, reverse alarm bit */
- config = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONFIG);
- if (!(config & 0x20))
- data->alarms ^= 0x02;
-
- data->last_updated = jiffies;
- data->valid = true;
+ int alarms, ret;
+ u32 regval;
+
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ ret = regmap_read(regmap, MAX1619_REG_CONVRATE, &regval);
+ if (ret < 0)
+ return ret;
+ *val = update_intervals[regval & 7];
+ break;
+ case hwmon_chip_alarms:
+ alarms = get_alarms(regmap);
+ if (alarms < 0)
+ return alarms;
+ *val = alarms;
+ break;
+ default:
+ return -EOPNOTSUPP;
}
-
- mutex_unlock(&data->update_lock);
-
- return data;
+ return 0;
}
-/*
- * Sysfs stuff
- */
-
-static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static int max1619_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct max1619_data *data = max1619_update_device(dev);
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_chip:
+ return max1619_chip_read(regmap, attr, val);
+ case hwmon_temp:
+ return max1619_temp_read(regmap, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
- return sprintf(buf, "%d\n", temp_from_reg(data->temp[attr->index]));
+static int max1619_chip_write(struct regmap *regmap, u32 attr, long val)
+{
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ val = find_closest_descending(val, update_intervals, ARRAY_SIZE(update_intervals));
+ return regmap_write(regmap, MAX1619_REG_CONVRATE, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t temp_store(struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count)
+static int max1619_temp_write(struct regmap *regmap,
+ u32 attr, int channel, long val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct max1619_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long val;
- int err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- data->temp[attr->index] = temp_to_reg(val);
- i2c_smbus_write_byte_data(client, regs_write[attr->index],
- data->temp[attr->index]);
- mutex_unlock(&data->update_lock);
- return count;
+ int reg;
+
+ switch (attr) {
+ case hwmon_temp_min:
+ reg = MAX1619_REG_REMOTE_LOW;
+ break;
+ case hwmon_temp_max:
+ reg = MAX1619_REG_REMOTE_HIGH;
+ break;
+ case hwmon_temp_crit:
+ reg = MAX1619_REG_REMOTE_CRIT;
+ break;
+ case hwmon_temp_crit_hyst:
+ reg = MAX1619_REG_REMOTE_CRIT_HYST;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
+ return regmap_write(regmap, reg, val);
}
-static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int max1619_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
{
- struct max1619_data *data = max1619_update_device(dev);
- return sprintf(buf, "%d\n", data->alarms);
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_chip:
+ return max1619_chip_write(regmap, attr, val);
+ case hwmon_temp:
+ return max1619_temp_write(regmap, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static umode_t max1619_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- int bitnr = to_sensor_dev_attr(attr)->index;
- struct max1619_data *data = max1619_update_device(dev);
- return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return 0644;
+ case hwmon_chip_alarms:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ case hwmon_temp_crit_hyst:
+ return 0644;
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_fault:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
}
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, t_input1);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, t_input2);
-static SENSOR_DEVICE_ATTR_RW(temp2_min, temp, t_low2);
-static SENSOR_DEVICE_ATTR_RW(temp2_max, temp, t_high2);
-static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp, t_crit2);
-static SENSOR_DEVICE_ATTR_RW(temp2_crit_hyst, temp, t_hyst2);
-
-static DEVICE_ATTR_RO(alarms);
-static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 1);
-static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 2);
-static SENSOR_DEVICE_ATTR_RO(temp2_min_alarm, alarm, 3);
-static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 4);
-
-static struct attribute *max1619_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_min.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp2_crit.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
-
- &dev_attr_alarms.attr,
- &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+static const struct hwmon_channel_info * const max1619_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_ALARMS | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_FAULT),
NULL
};
-ATTRIBUTE_GROUPS(max1619);
+
+static const struct hwmon_ops max1619_hwmon_ops = {
+ .is_visible = max1619_is_visible,
+ .read = max1619_read,
+ .write = max1619_write,
+};
+
+static const struct hwmon_chip_info max1619_chip_info = {
+ .ops = &max1619_hwmon_ops,
+ .info = max1619_info,
+};
/* Return 0 if detection is successful, -ENODEV otherwise */
static int max1619_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
- u8 reg_config, reg_convrate, reg_status, man_id, chip_id;
+ int regval;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
- /* detection */
- reg_config = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONFIG);
- reg_convrate = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONVRATE);
- reg_status = i2c_smbus_read_byte_data(client, MAX1619_REG_R_STATUS);
- if ((reg_config & 0x03) != 0x00
- || reg_convrate > 0x07 || (reg_status & 0x61) != 0x00) {
- dev_dbg(&adapter->dev, "MAX1619 detection failed at 0x%02x\n",
- client->addr);
+ regval = i2c_smbus_read_byte_data(client, MAX1619_REG_CONFIG);
+ if (regval < 0 || (regval & 0x03))
+ return -ENODEV;
+ regval = i2c_smbus_read_byte_data(client, MAX1619_REG_CONVRATE);
+ if (regval < 0 || regval > 0x07)
+ return -ENODEV;
+ regval = i2c_smbus_read_byte_data(client, MAX1619_REG_STATUS);
+ if (regval < 0 || (regval & 0x61))
return -ENODEV;
- }
- /* identification */
- man_id = i2c_smbus_read_byte_data(client, MAX1619_REG_R_MAN_ID);
- chip_id = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CHIP_ID);
- if (man_id != 0x4D || chip_id != 0x04) {
- dev_info(&adapter->dev,
- "Unsupported chip (man_id=0x%02X, chip_id=0x%02X).\n",
- man_id, chip_id);
+ regval = i2c_smbus_read_byte_data(client, MAX1619_REG_MAN_ID);
+ if (regval != 0x4d)
+ return -ENODEV;
+ regval = i2c_smbus_read_byte_data(client, MAX1619_REG_CHIP_ID);
+ if (regval != 0x04)
return -ENODEV;
- }
strscpy(info->type, "max1619", I2C_NAME_SIZE);
return 0;
}
-static void max1619_init_client(struct i2c_client *client)
+static int max1619_init_chip(struct regmap *regmap)
{
- u8 config;
-
- /*
- * Start the conversions.
- */
- i2c_smbus_write_byte_data(client, MAX1619_REG_W_CONVRATE,
- 5); /* 2 Hz */
- config = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONFIG);
- if (config & 0x40)
- i2c_smbus_write_byte_data(client, MAX1619_REG_W_CONFIG,
- config & 0xBF); /* run */
+ int ret;
+
+ ret = regmap_write(regmap, MAX1619_REG_CONVRATE, 5); /* 2 Hz */
+ if (ret)
+ return ret;
+
+ /* Start conversions */
+ return regmap_clear_bits(regmap, MAX1619_REG_CONFIG, 0x40);
}
-static int max1619_probe(struct i2c_client *new_client)
+/* regmap */
+
+static int max1619_reg_read(void *context, unsigned int reg, unsigned int *val)
{
- struct max1619_data *data;
- struct device *hwmon_dev;
+ int ret;
- data = devm_kzalloc(&new_client->dev, sizeof(struct max1619_data),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ ret = i2c_smbus_read_byte_data(context, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+}
+
+static int max1619_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ int offset = reg < MAX1619_REG_REMOTE_CRIT ? 6 : 2;
+
+ return i2c_smbus_write_byte_data(context, reg + offset, val);
+}
+
+static bool max1619_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ return reg <= MAX1619_REG_STATUS;
+}
+
+static bool max1619_regmap_is_writeable(struct device *dev, unsigned int reg)
+{
+ return reg > MAX1619_REG_STATUS && reg <= MAX1619_REG_REMOTE_CRIT_HYST;
+}
+
+static const struct regmap_config max1619_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX1619_REG_REMOTE_CRIT_HYST,
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_reg = max1619_regmap_is_volatile,
+ .writeable_reg = max1619_regmap_is_writeable,
+};
+
+static const struct regmap_bus max1619_regmap_bus = {
+ .reg_write = max1619_reg_write,
+ .reg_read = max1619_reg_read,
+};
+
+static int max1619_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+ int ret;
- data->client = new_client;
- mutex_init(&data->update_lock);
+ regmap = devm_regmap_init(dev, &max1619_regmap_bus, client,
+ &max1619_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
- /* Initialize the MAX1619 chip */
- max1619_init_client(new_client);
+ ret = max1619_init_chip(regmap);
+ if (ret)
+ return ret;
- hwmon_dev = devm_hwmon_device_register_with_groups(&new_client->dev,
- new_client->name,
- data,
- max1619_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ regmap, &max1619_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 9fc583ebb11b..a8197a86f559 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -6,15 +6,15 @@
* some credit to Christoph Scheurer, but largely a rewrite
*/
-#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
/* Addresses to scan */
static const unsigned short max1668_addr_list[] = {
@@ -30,14 +30,10 @@ static const unsigned short max1668_addr_list[] = {
/* limits */
-/* write high limits */
-#define MAX1668_REG_LIMH_WR(nr) (0x13 + 2 * (nr))
-/* write low limits */
-#define MAX1668_REG_LIML_WR(nr) (0x14 + 2 * (nr))
-/* read high limits */
-#define MAX1668_REG_LIMH_RD(nr) (0x08 + 2 * (nr))
+/* high limits */
+#define MAX1668_REG_LIMH(nr) (0x08 + 2 * (nr))
/* read low limits */
-#define MAX1668_REG_LIML_RD(nr) (0x09 + 2 * (nr))
+#define MAX1668_REG_LIML(nr) (0x09 + 2 * (nr))
/* manufacturer and device ID Constants */
#define MAN_ID_MAXIM 0x4d
@@ -50,309 +46,146 @@ static bool read_only;
module_param(read_only, bool, 0);
MODULE_PARM_DESC(read_only, "Don't set any values, read only mode");
-enum chips { max1668, max1805, max1989 };
-
struct max1668_data {
- struct i2c_client *client;
- const struct attribute_group *groups[3];
- enum chips type;
-
- struct mutex update_lock;
- bool valid; /* true if following fields are valid */
- unsigned long last_updated; /* In jiffies */
-
- /* 1x local and 4x remote */
- s8 temp_max[5];
- s8 temp_min[5];
- s8 temp[5];
- u16 alarms;
+ struct regmap *regmap;
+ int channels;
};
-static struct max1668_data *max1668_update_device(struct device *dev)
+static int max1668_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
struct max1668_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- struct max1668_data *ret = data;
- s32 val;
- int i;
-
- mutex_lock(&data->update_lock);
-
- if (data->valid && !time_after(jiffies,
- data->last_updated + HZ + HZ / 2))
- goto abort;
-
- for (i = 0; i < 5; i++) {
- val = i2c_smbus_read_byte_data(client, MAX1668_REG_TEMP(i));
- if (unlikely(val < 0)) {
- ret = ERR_PTR(val);
- goto abort;
- }
- data->temp[i] = (s8) val;
-
- val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIMH_RD(i));
- if (unlikely(val < 0)) {
- ret = ERR_PTR(val);
- goto abort;
- }
- data->temp_max[i] = (s8) val;
-
- val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIML_RD(i));
- if (unlikely(val < 0)) {
- ret = ERR_PTR(val);
- goto abort;
- }
- data->temp_min[i] = (s8) val;
- }
-
- val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT1);
- if (unlikely(val < 0)) {
- ret = ERR_PTR(val);
- goto abort;
- }
- data->alarms = val << 8;
+ struct regmap *regmap = data->regmap;
+ u32 regs[2] = { MAX1668_REG_STAT1, MAX1668_REG_TEMP(channel) };
+ u8 regvals[2];
+ u32 regval;
+ int ret;
- val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT2);
- if (unlikely(val < 0)) {
- ret = ERR_PTR(val);
- goto abort;
+ switch (attr) {
+ case hwmon_temp_input:
+ ret = regmap_read(regmap, MAX1668_REG_TEMP(channel), &regval);
+ if (ret)
+ return ret;
+ *val = sign_extend32(regval, 7) * 1000;
+ break;
+ case hwmon_temp_min:
+ ret = regmap_read(regmap, MAX1668_REG_LIML(channel), &regval);
+ if (ret)
+ return ret;
+ *val = sign_extend32(regval, 7) * 1000;
+ break;
+ case hwmon_temp_max:
+ ret = regmap_read(regmap, MAX1668_REG_LIMH(channel), &regval);
+ if (ret)
+ return ret;
+ *val = sign_extend32(regval, 7) * 1000;
+ break;
+ case hwmon_temp_min_alarm:
+ ret = regmap_read(regmap,
+ channel ? MAX1668_REG_STAT2 : MAX1668_REG_STAT1,
+ &regval);
+ if (ret)
+ return ret;
+ if (channel)
+ *val = !!(regval & BIT(9 - channel * 2));
+ else
+ *val = !!(regval & BIT(5));
+ break;
+ case hwmon_temp_max_alarm:
+ ret = regmap_read(regmap,
+ channel ? MAX1668_REG_STAT2 : MAX1668_REG_STAT1,
+ &regval);
+ if (ret)
+ return ret;
+ if (channel)
+ *val = !!(regval & BIT(8 - channel * 2));
+ else
+ *val = !!(regval & BIT(6));
+ break;
+ case hwmon_temp_fault:
+ ret = regmap_multi_reg_read(regmap, regs, regvals, 2);
+ if (ret)
+ return ret;
+ *val = !!((regvals[0] & BIT(4)) && regvals[1] == 127);
+ break;
+ default:
+ return -EOPNOTSUPP;
}
- data->alarms |= val;
-
- data->last_updated = jiffies;
- data->valid = true;
-abort:
- mutex_unlock(&data->update_lock);
-
- return ret;
-}
-
-static ssize_t show_temp(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int index = to_sensor_dev_attr(devattr)->index;
- struct max1668_data *data = max1668_update_device(dev);
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- return sprintf(buf, "%d\n", data->temp[index] * 1000);
-}
-
-static ssize_t show_temp_max(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int index = to_sensor_dev_attr(devattr)->index;
- struct max1668_data *data = max1668_update_device(dev);
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- return sprintf(buf, "%d\n", data->temp_max[index] * 1000);
-}
-
-static ssize_t show_temp_min(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int index = to_sensor_dev_attr(devattr)->index;
- struct max1668_data *data = max1668_update_device(dev);
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- return sprintf(buf, "%d\n", data->temp_min[index] * 1000);
-}
-
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- int index = to_sensor_dev_attr(attr)->index;
- struct max1668_data *data = max1668_update_device(dev);
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1);
-}
-
-static ssize_t show_fault(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int index = to_sensor_dev_attr(devattr)->index;
- struct max1668_data *data = max1668_update_device(dev);
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- return sprintf(buf, "%u\n",
- (data->alarms & (1 << 12)) && data->temp[index] == 127);
+ return 0;
}
-static ssize_t set_temp_max(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static int max1668_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
{
- int index = to_sensor_dev_attr(devattr)->index;
struct max1668_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long temp;
- int ret;
+ struct regmap *regmap = data->regmap;
- ret = kstrtol(buf, 10, &temp);
- if (ret < 0)
- return ret;
+ val = clamp_val(val / 1000, -128, 127);
- mutex_lock(&data->update_lock);
- data->temp_max[index] = clamp_val(temp/1000, -128, 127);
- ret = i2c_smbus_write_byte_data(client,
- MAX1668_REG_LIMH_WR(index),
- data->temp_max[index]);
- if (ret < 0)
- count = ret;
- mutex_unlock(&data->update_lock);
-
- return count;
+ switch (attr) {
+ case hwmon_temp_min:
+ return regmap_write(regmap, MAX1668_REG_LIML(channel), val);
+ case hwmon_temp_max:
+ return regmap_write(regmap, MAX1668_REG_LIMH(channel), val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t set_temp_min(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static umode_t max1668_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- int index = to_sensor_dev_attr(devattr)->index;
- struct max1668_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long temp;
- int ret;
-
- ret = kstrtol(buf, 10, &temp);
- if (ret < 0)
- return ret;
-
- mutex_lock(&data->update_lock);
- data->temp_min[index] = clamp_val(temp/1000, -128, 127);
- ret = i2c_smbus_write_byte_data(client,
- MAX1668_REG_LIML_WR(index),
- data->temp_min[index]);
- if (ret < 0)
- count = ret;
- mutex_unlock(&data->update_lock);
-
- return count;
+ const struct max1668_data *data = _data;
+
+ if (channel >= data->channels)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ return read_only ? 0444 : 0644;
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ return 0444;
+ case hwmon_temp_fault:
+ if (channel)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ return 0;
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max,
- set_temp_max, 0);
-static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min,
- set_temp_min, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max,
- set_temp_max, 1);
-static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO, show_temp_min,
- set_temp_min, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max,
- set_temp_max, 2);
-static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO, show_temp_min,
- set_temp_min, 2);
-static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max,
- set_temp_max, 3);
-static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO, show_temp_min,
- set_temp_min, 3);
-static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max,
- set_temp_max, 4);
-static SENSOR_DEVICE_ATTR(temp5_min, S_IRUGO, show_temp_min,
- set_temp_min, 4);
-
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 14);
-static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 13);
-static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_alarm, NULL, 0);
-
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_fault, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_fault, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_fault, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_fault, NULL, 4);
-
-/* Attributes common to MAX1668, MAX1989 and MAX1805 */
-static struct attribute *max1668_attribute_common[] = {
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp2_min.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp3_max.dev_attr.attr,
- &sensor_dev_attr_temp3_min.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
-
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
-
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_temp3_fault.dev_attr.attr,
+static const struct hwmon_channel_info * const max1668_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT),
NULL
};
-/* Attributes not present on MAX1805 */
-static struct attribute *max1668_attribute_unique[] = {
- &sensor_dev_attr_temp4_max.dev_attr.attr,
- &sensor_dev_attr_temp4_min.dev_attr.attr,
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp5_max.dev_attr.attr,
- &sensor_dev_attr_temp5_min.dev_attr.attr,
- &sensor_dev_attr_temp5_input.dev_attr.attr,
-
- &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp5_min_alarm.dev_attr.attr,
-
- &sensor_dev_attr_temp4_fault.dev_attr.attr,
- &sensor_dev_attr_temp5_fault.dev_attr.attr,
- NULL
+static const struct hwmon_ops max1668_hwmon_ops = {
+ .is_visible = max1668_is_visible,
+ .read = max1668_read,
+ .write = max1668_write,
};
-static umode_t max1668_attribute_mode(struct kobject *kobj,
- struct attribute *attr, int index)
-{
- umode_t ret = S_IRUGO;
- if (read_only)
- return ret;
- if (attr == &sensor_dev_attr_temp1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_max.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_max.dev_attr.attr ||
- attr == &sensor_dev_attr_temp4_max.dev_attr.attr ||
- attr == &sensor_dev_attr_temp5_max.dev_attr.attr ||
- attr == &sensor_dev_attr_temp1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_min.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_min.dev_attr.attr ||
- attr == &sensor_dev_attr_temp4_min.dev_attr.attr ||
- attr == &sensor_dev_attr_temp5_min.dev_attr.attr)
- ret |= S_IWUSR;
- return ret;
-}
-
-static const struct attribute_group max1668_group_common = {
- .attrs = max1668_attribute_common,
- .is_visible = max1668_attribute_mode
-};
-
-static const struct attribute_group max1668_group_unique = {
- .attrs = max1668_attribute_unique,
- .is_visible = max1668_attribute_mode
+static const struct hwmon_chip_info max1668_chip_info = {
+ .ops = &max1668_hwmon_ops,
+ .info = max1668_info,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
@@ -391,6 +224,48 @@ static int max1668_detect(struct i2c_client *client,
return 0;
}
+/* regmap */
+
+static int max1668_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(context, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+}
+
+static int max1668_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ return i2c_smbus_write_byte_data(context, reg + 11, val);
+}
+
+static bool max1668_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ return reg <= MAX1668_REG_STAT2;
+}
+
+static bool max1668_regmap_is_writeable(struct device *dev, unsigned int reg)
+{
+ return reg > MAX1668_REG_STAT2 && reg <= MAX1668_REG_LIML(4);
+}
+
+static const struct regmap_config max1668_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_reg = max1668_regmap_is_volatile,
+ .writeable_reg = max1668_regmap_is_writeable,
+};
+
+static const struct regmap_bus max1668_regmap_bus = {
+ .reg_write = max1668_reg_write,
+ .reg_read = max1668_reg_read,
+};
+
static int max1668_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
@@ -405,24 +280,22 @@ static int max1668_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- data->client = client;
- data->type = (uintptr_t)i2c_get_match_data(client);
- mutex_init(&data->update_lock);
+ data->regmap = devm_regmap_init(dev, &max1668_regmap_bus, client,
+ &max1668_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
- /* sysfs hooks */
- data->groups[0] = &max1668_group_common;
- if (data->type == max1668 || data->type == max1989)
- data->groups[1] = &max1668_group_unique;
+ data->channels = (uintptr_t)i2c_get_match_data(client);
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data, data->groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
+ &max1668_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id max1668_id[] = {
- { "max1668", max1668 },
- { "max1805", max1805 },
- { "max1989", max1989 },
+ { "max1668", 5 },
+ { "max1805", 3 },
+ { "max1989", 5 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max1668_id);
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index f54720d3d2ce..c955b0f3a8d3 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -88,25 +88,16 @@ struct max6639_data {
static int max6639_temp_read_input(struct device *dev, int channel, long *temp)
{
+ u32 regs[2] = { MAX6639_REG_TEMP_EXT(channel), MAX6639_REG_TEMP(channel) };
struct max6639_data *data = dev_get_drvdata(dev);
- unsigned int val;
+ u8 regvals[2];
int res;
- /*
- * Lock isn't needed as MAX6639_REG_TEMP wpnt change for at least 250ms after reading
- * MAX6639_REG_TEMP_EXT
- */
- res = regmap_read(data->regmap, MAX6639_REG_TEMP_EXT(channel), &val);
- if (res < 0)
- return res;
-
- *temp = val >> 5;
- res = regmap_read(data->regmap, MAX6639_REG_TEMP(channel), &val);
+ res = regmap_multi_reg_read(data->regmap, regs, regvals, 2);
if (res < 0)
return res;
- *temp |= val << 3;
- *temp *= 125;
+ *temp = ((regvals[0] >> 5) | (regvals[1] << 3)) * 125;
return 0;
}
@@ -290,8 +281,10 @@ static umode_t max6639_fan_is_visible(const void *_data, u32 attr, int channel)
static int max6639_read_pwm(struct device *dev, u32 attr, int channel,
long *pwm_val)
{
+ u32 regs[2] = { MAX6639_REG_FAN_CONFIG3(channel), MAX6639_REG_GCONFIG };
struct max6639_data *data = dev_get_drvdata(dev);
unsigned int val;
+ u8 regvals[2];
int res;
u8 i;
@@ -303,26 +296,13 @@ static int max6639_read_pwm(struct device *dev, u32 attr, int channel,
*pwm_val = val * 255 / 120;
return 0;
case hwmon_pwm_freq:
- mutex_lock(&data->update_lock);
- res = regmap_read(data->regmap, MAX6639_REG_FAN_CONFIG3(channel), &val);
- if (res < 0) {
- mutex_unlock(&data->update_lock);
- return res;
- }
- i = val & MAX6639_FAN_CONFIG3_FREQ_MASK;
-
- res = regmap_read(data->regmap, MAX6639_REG_GCONFIG, &val);
- if (res < 0) {
- mutex_unlock(&data->update_lock);
+ res = regmap_multi_reg_read(data->regmap, regs, regvals, 2);
+ if (res < 0)
return res;
- }
-
- if (val & MAX6639_GCONFIG_PWM_FREQ_HI)
+ i = regvals[0] & MAX6639_FAN_CONFIG3_FREQ_MASK;
+ if (regvals[1] & MAX6639_GCONFIG_PWM_FREQ_HI)
i |= 0x4;
- i &= 0x7;
*pwm_val = freq_table[i];
-
- mutex_unlock(&data->update_lock);
return 0;
default:
return -EOPNOTSUPP;
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 20981f9443dd..0735a1d2c20f 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -6,18 +6,17 @@
* Copyright (c) 2011 David George <david.george@ska.ac.za>
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-
-#include <linux/platform_data/max6697.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
enum chips { max6581, max6602, max6622, max6636, max6689, max6693, max6694,
max6697, max6698, max6699 };
@@ -33,21 +32,36 @@ static const u8 MAX6697_REG_MAX[] = {
static const u8 MAX6697_REG_CRIT[] = {
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27 };
+#define MAX6697_REG_MIN 0x30
/*
- * Map device tree / platform data register bit map to chip bit map.
+ * Map device tree / internal register bit map to chip bit map.
* Applies to alert register and over-temperature register.
*/
+
+#define MAX6697_EXTERNAL_MASK_DT GENMASK(7, 1)
+#define MAX6697_LOCAL_MASK_DT BIT(0)
+#define MAX6697_EXTERNAL_MASK_CHIP GENMASK(6, 0)
+#define MAX6697_LOCAL_MASK_CHIP BIT(7)
+
+/* alert - local channel is in bit 6 */
#define MAX6697_ALERT_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \
(((reg) & 0x01) << 6) | ((reg) & 0x80))
-#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7))
-#define MAX6697_REG_STAT(n) (0x44 + (n))
+/* over-temperature - local channel is in bit 7 */
+#define MAX6697_OVERT_MAP_BITS(reg) \
+ (FIELD_PREP(MAX6697_EXTERNAL_MASK_CHIP, FIELD_GET(MAX6697_EXTERNAL_MASK_DT, reg)) | \
+ FIELD_PREP(MAX6697_LOCAL_MASK_CHIP, FIELD_GET(MAX6697_LOCAL_MASK_DT, reg)))
+
+#define MAX6697_REG_STAT_ALARM 0x44
+#define MAX6697_REG_STAT_CRIT 0x45
+#define MAX6697_REG_STAT_FAULT 0x46
+#define MAX6697_REG_STAT_MIN_ALARM 0x47
#define MAX6697_REG_CONFIG 0x41
-#define MAX6581_CONF_EXTENDED (1 << 1)
-#define MAX6693_CONF_BETA (1 << 2)
-#define MAX6697_CONF_RESISTANCE (1 << 3)
-#define MAX6697_CONF_TIMEOUT (1 << 5)
+#define MAX6581_CONF_EXTENDED BIT(1)
+#define MAX6693_CONF_BETA BIT(2)
+#define MAX6697_CONF_RESISTANCE BIT(3)
+#define MAX6697_CONF_TIMEOUT BIT(5)
#define MAX6697_REG_ALERT_MASK 0x42
#define MAX6697_REG_OVERT_MASK 0x43
@@ -67,24 +81,18 @@ struct max6697_chip_data {
u32 have_crit;
u32 have_fault;
u8 valid_conf;
- const u8 *alarm_map;
};
struct max6697_data {
- struct i2c_client *client;
+ struct regmap *regmap;
enum chips type;
const struct max6697_chip_data *chip;
- int update_interval; /* in milli-seconds */
int temp_offset; /* in degrees C */
struct mutex update_lock;
- unsigned long last_updated; /* In jiffies */
- bool valid; /* true if following fields are valid */
- /* 1x local and up to 7x remote */
- u8 temp[8][4]; /* [nr][0]=temp [1]=ext [2]=max [3]=crit */
#define MAX6697_TEMP_INPUT 0
#define MAX6697_TEMP_EXT 1
#define MAX6697_TEMP_MAX 2
@@ -92,11 +100,6 @@ struct max6697_data {
u32 alarms;
};
-/* Diode fault status bits on MAX6581 are right shifted by one bit */
-static const u8 max6581_alarm_map[] = {
- 0, 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23 };
-
static const struct max6697_chip_data max6697_chip_data[] = {
[max6581] = {
.channels = 8,
@@ -104,7 +107,6 @@ static const struct max6697_chip_data max6697_chip_data[] = {
.have_ext = 0x7f,
.have_fault = 0xfe,
.valid_conf = MAX6581_CONF_EXTENDED | MAX6697_CONF_TIMEOUT,
- .alarm_map = max6581_alarm_map,
},
[max6602] = {
.channels = 5,
@@ -173,545 +175,398 @@ static const struct max6697_chip_data max6697_chip_data[] = {
},
};
-static inline int max6581_offset_to_millic(int val)
+static int max6697_alarm_channel_map(int channel)
{
- return sign_extend32(val, 7) * 250;
+ switch (channel) {
+ case 0:
+ return 6;
+ case 7:
+ return 7;
+ default:
+ return channel - 1;
+ }
}
-static struct max6697_data *max6697_update_device(struct device *dev)
+static int max6697_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
+ unsigned int offset_regs[2] = { MAX6581_REG_OFFSET_SELECT, MAX6581_REG_OFFSET };
+ unsigned int temp_regs[2] = { MAX6697_REG_TEMP[channel],
+ MAX6697_REG_TEMP_EXT[channel] };
struct max6697_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- struct max6697_data *ret = data;
- int val;
- int i;
- u32 alarms;
+ struct regmap *regmap = data->regmap;
+ u8 regdata[2] = { };
+ u32 regval;
+ int ret;
- mutex_lock(&data->update_lock);
-
- if (data->valid &&
- !time_after(jiffies, data->last_updated
- + msecs_to_jiffies(data->update_interval)))
- goto abort;
-
- for (i = 0; i < data->chip->channels; i++) {
- if (data->chip->have_ext & (1 << i)) {
- val = i2c_smbus_read_byte_data(client,
- MAX6697_REG_TEMP_EXT[i]);
- if (unlikely(val < 0)) {
- ret = ERR_PTR(val);
- goto abort;
- }
- data->temp[i][MAX6697_TEMP_EXT] = val;
- }
+ switch (attr) {
+ case hwmon_temp_input:
+ ret = regmap_multi_reg_read(regmap, temp_regs, regdata,
+ data->chip->have_ext & BIT(channel) ? 2 : 1);
+ if (ret)
+ return ret;
+ *val = (((regdata[0] - data->temp_offset) << 3) | (regdata[1] >> 5)) * 125;
+ break;
+ case hwmon_temp_max:
+ ret = regmap_read(regmap, MAX6697_REG_MAX[channel], &regval);
+ if (ret)
+ return ret;
+ *val = ((int)regval - data->temp_offset) * 1000;
+ break;
+ case hwmon_temp_crit:
+ ret = regmap_read(regmap, MAX6697_REG_CRIT[channel], &regval);
+ if (ret)
+ return ret;
+ *val = ((int)regval - data->temp_offset) * 1000;
+ break;
+ case hwmon_temp_min:
+ ret = regmap_read(regmap, MAX6697_REG_MIN, &regval);
+ if (ret)
+ return ret;
+ *val = ((int)regval - data->temp_offset) * 1000;
+ break;
+ case hwmon_temp_offset:
+ ret = regmap_multi_reg_read(regmap, offset_regs, regdata, 2);
+ if (ret)
+ return ret;
- val = i2c_smbus_read_byte_data(client, MAX6697_REG_TEMP[i]);
- if (unlikely(val < 0)) {
- ret = ERR_PTR(val);
- goto abort;
- }
- data->temp[i][MAX6697_TEMP_INPUT] = val;
+ if (!(regdata[0] & BIT(channel - 1)))
+ regdata[1] = 0;
- val = i2c_smbus_read_byte_data(client, MAX6697_REG_MAX[i]);
- if (unlikely(val < 0)) {
- ret = ERR_PTR(val);
- goto abort;
- }
- data->temp[i][MAX6697_TEMP_MAX] = val;
-
- if (data->chip->have_crit & (1 << i)) {
- val = i2c_smbus_read_byte_data(client,
- MAX6697_REG_CRIT[i]);
- if (unlikely(val < 0)) {
- ret = ERR_PTR(val);
- goto abort;
- }
- data->temp[i][MAX6697_TEMP_CRIT] = val;
- }
- }
-
- alarms = 0;
- for (i = 0; i < 3; i++) {
- val = i2c_smbus_read_byte_data(client, MAX6697_REG_STAT(i));
- if (unlikely(val < 0)) {
- ret = ERR_PTR(val);
- goto abort;
- }
- alarms = (alarms << 8) | val;
+ *val = sign_extend32(regdata[1], 7) * 250;
+ break;
+ case hwmon_temp_fault:
+ ret = regmap_read(regmap, MAX6697_REG_STAT_FAULT, &regval);
+ if (ret)
+ return ret;
+ if (data->type == max6581)
+ *val = !!(regval & BIT(channel - 1));
+ else
+ *val = !!(regval & BIT(channel));
+ break;
+ case hwmon_temp_crit_alarm:
+ ret = regmap_read(regmap, MAX6697_REG_STAT_CRIT, &regval);
+ if (ret)
+ return ret;
+ /*
+ * In the MAX6581 datasheet revision 0 to 3, the local channel
+ * overtemperature status is reported in bit 6 of register 0x45,
+ * and the overtemperature status for remote channel 7 is
+ * reported in bit 7. In Revision 4 and later, the local channel
+ * overtemperature status is reported in bit 7, and the remote
+ * channel 7 overtemperature status is reported in bit 6. A real
+ * chip was found to match the functionality documented in
+ * Revision 4 and later.
+ */
+ *val = !!(regval & BIT(channel ? channel - 1 : 7));
+ break;
+ case hwmon_temp_max_alarm:
+ ret = regmap_read(regmap, MAX6697_REG_STAT_ALARM, &regval);
+ if (ret)
+ return ret;
+ *val = !!(regval & BIT(max6697_alarm_channel_map(channel)));
+ break;
+ case hwmon_temp_min_alarm:
+ ret = regmap_read(regmap, MAX6697_REG_STAT_MIN_ALARM, &regval);
+ if (ret)
+ return ret;
+ *val = !!(regval & BIT(max6697_alarm_channel_map(channel)));
+ break;
+ default:
+ return -EOPNOTSUPP;
}
- data->alarms = alarms;
- data->last_updated = jiffies;
- data->valid = true;
-abort:
- mutex_unlock(&data->update_lock);
-
- return ret;
-}
-
-static ssize_t temp_input_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int index = to_sensor_dev_attr(devattr)->index;
- struct max6697_data *data = max6697_update_device(dev);
- int temp;
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- temp = (data->temp[index][MAX6697_TEMP_INPUT] - data->temp_offset) << 3;
- temp |= data->temp[index][MAX6697_TEMP_EXT] >> 5;
-
- return sprintf(buf, "%d\n", temp * 125);
-}
-
-static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- int nr = to_sensor_dev_attr_2(devattr)->nr;
- int index = to_sensor_dev_attr_2(devattr)->index;
- struct max6697_data *data = max6697_update_device(dev);
- int temp;
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- temp = data->temp[nr][index];
- temp -= data->temp_offset;
-
- return sprintf(buf, "%d\n", temp * 1000);
-}
-
-static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- int index = to_sensor_dev_attr(attr)->index;
- struct max6697_data *data = max6697_update_device(dev);
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- if (data->chip->alarm_map)
- index = data->chip->alarm_map[index];
-
- return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1);
+ return 0;
}
-static ssize_t temp_store(struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count)
+static int max6697_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
{
- int nr = to_sensor_dev_attr_2(devattr)->nr;
- int index = to_sensor_dev_attr_2(devattr)->index;
struct max6697_data *data = dev_get_drvdata(dev);
- long temp;
+ struct regmap *regmap = data->regmap;
int ret;
- ret = kstrtol(buf, 10, &temp);
- if (ret < 0)
- return ret;
-
- mutex_lock(&data->update_lock);
- temp = clamp_val(temp, -1000000, 1000000); /* prevent underflow */
- temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset;
- temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127);
- data->temp[nr][index] = temp;
- ret = i2c_smbus_write_byte_data(data->client,
- index == 2 ? MAX6697_REG_MAX[nr]
- : MAX6697_REG_CRIT[nr],
- temp);
- mutex_unlock(&data->update_lock);
-
- return ret < 0 ? ret : count;
-}
-
-static ssize_t offset_store(struct device *dev, struct device_attribute *devattr, const char *buf,
- size_t count)
-{
- int val, ret, index, select;
- struct max6697_data *data;
- bool channel_enabled;
- long temp;
-
- index = to_sensor_dev_attr(devattr)->index;
- data = dev_get_drvdata(dev);
- ret = kstrtol(buf, 10, &temp);
- if (ret < 0)
+ switch (attr) {
+ case hwmon_temp_max:
+ val = clamp_val(val, -1000000, 1000000); /* prevent underflow */
+ val = DIV_ROUND_CLOSEST(val, 1000) + data->temp_offset;
+ val = clamp_val(val, 0, data->type == max6581 ? 255 : 127);
+ return regmap_write(regmap, MAX6697_REG_MAX[channel], val);
+ case hwmon_temp_crit:
+ val = clamp_val(val, -1000000, 1000000); /* prevent underflow */
+ val = DIV_ROUND_CLOSEST(val, 1000) + data->temp_offset;
+ val = clamp_val(val, 0, data->type == max6581 ? 255 : 127);
+ return regmap_write(regmap, MAX6697_REG_CRIT[channel], val);
+ case hwmon_temp_min:
+ val = clamp_val(val, -1000000, 1000000); /* prevent underflow */
+ val = DIV_ROUND_CLOSEST(val, 1000) + data->temp_offset;
+ val = clamp_val(val, 0, 255);
+ return regmap_write(regmap, MAX6697_REG_MIN, val);
+ case hwmon_temp_offset:
+ mutex_lock(&data->update_lock);
+ val = clamp_val(val, MAX6581_OFFSET_MIN, MAX6581_OFFSET_MAX);
+ val = DIV_ROUND_CLOSEST(val, 250);
+ if (!val) { /* disable this (and only this) channel */
+ ret = regmap_clear_bits(regmap, MAX6581_REG_OFFSET_SELECT,
+ BIT(channel - 1));
+ } else {
+ /* enable channel and update offset */
+ ret = regmap_set_bits(regmap, MAX6581_REG_OFFSET_SELECT,
+ BIT(channel - 1));
+ if (ret)
+ goto unlock;
+ ret = regmap_write(regmap, MAX6581_REG_OFFSET, val);
+ }
+unlock:
+ mutex_unlock(&data->update_lock);
return ret;
-
- mutex_lock(&data->update_lock);
- select = i2c_smbus_read_byte_data(data->client, MAX6581_REG_OFFSET_SELECT);
- if (select < 0) {
- ret = select;
- goto abort;
- }
- channel_enabled = (select & (1 << (index - 1)));
- temp = clamp_val(temp, MAX6581_OFFSET_MIN, MAX6581_OFFSET_MAX);
- val = DIV_ROUND_CLOSEST(temp, 250);
- /* disable the offset for channel if the new offset is 0 */
- if (val == 0) {
- if (channel_enabled)
- ret = i2c_smbus_write_byte_data(data->client, MAX6581_REG_OFFSET_SELECT,
- select & ~(1 << (index - 1)));
- ret = ret < 0 ? ret : count;
- goto abort;
+ default:
+ return -EOPNOTSUPP;
}
- if (!channel_enabled) {
- ret = i2c_smbus_write_byte_data(data->client, MAX6581_REG_OFFSET_SELECT,
- select | (1 << (index - 1)));
- if (ret < 0)
- goto abort;
- }
- ret = i2c_smbus_write_byte_data(data->client, MAX6581_REG_OFFSET, val);
- ret = ret < 0 ? ret : count;
-
-abort:
- mutex_unlock(&data->update_lock);
- return ret;
}
-static ssize_t offset_show(struct device *dev, struct device_attribute *devattr, char *buf)
+static umode_t max6697_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct max6697_data *data;
- int select, ret, index;
-
- index = to_sensor_dev_attr(devattr)->index;
- data = dev_get_drvdata(dev);
- mutex_lock(&data->update_lock);
- select = i2c_smbus_read_byte_data(data->client, MAX6581_REG_OFFSET_SELECT);
- if (select < 0)
- ret = select;
- else if (select & (1 << (index - 1)))
- ret = i2c_smbus_read_byte_data(data->client, MAX6581_REG_OFFSET);
- else
- ret = 0;
- mutex_unlock(&data->update_lock);
- return ret < 0 ? ret : sprintf(buf, "%d\n", max6581_offset_to_millic(ret));
-}
-
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_input, 0);
-static SENSOR_DEVICE_ATTR_2_RW(temp1_max, temp, 0, MAX6697_TEMP_MAX);
-static SENSOR_DEVICE_ATTR_2_RW(temp1_crit, temp, 0, MAX6697_TEMP_CRIT);
-
-static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_input, 1);
-static SENSOR_DEVICE_ATTR_2_RW(temp2_max, temp, 1, MAX6697_TEMP_MAX);
-static SENSOR_DEVICE_ATTR_2_RW(temp2_crit, temp, 1, MAX6697_TEMP_CRIT);
-
-static SENSOR_DEVICE_ATTR_RO(temp3_input, temp_input, 2);
-static SENSOR_DEVICE_ATTR_2_RW(temp3_max, temp, 2, MAX6697_TEMP_MAX);
-static SENSOR_DEVICE_ATTR_2_RW(temp3_crit, temp, 2, MAX6697_TEMP_CRIT);
-
-static SENSOR_DEVICE_ATTR_RO(temp4_input, temp_input, 3);
-static SENSOR_DEVICE_ATTR_2_RW(temp4_max, temp, 3, MAX6697_TEMP_MAX);
-static SENSOR_DEVICE_ATTR_2_RW(temp4_crit, temp, 3, MAX6697_TEMP_CRIT);
-
-static SENSOR_DEVICE_ATTR_RO(temp5_input, temp_input, 4);
-static SENSOR_DEVICE_ATTR_2_RW(temp5_max, temp, 4, MAX6697_TEMP_MAX);
-static SENSOR_DEVICE_ATTR_2_RW(temp5_crit, temp, 4, MAX6697_TEMP_CRIT);
-
-static SENSOR_DEVICE_ATTR_RO(temp6_input, temp_input, 5);
-static SENSOR_DEVICE_ATTR_2_RW(temp6_max, temp, 5, MAX6697_TEMP_MAX);
-static SENSOR_DEVICE_ATTR_2_RW(temp6_crit, temp, 5, MAX6697_TEMP_CRIT);
-
-static SENSOR_DEVICE_ATTR_RO(temp7_input, temp_input, 6);
-static SENSOR_DEVICE_ATTR_2_RW(temp7_max, temp, 6, MAX6697_TEMP_MAX);
-static SENSOR_DEVICE_ATTR_2_RW(temp7_crit, temp, 6, MAX6697_TEMP_CRIT);
-
-static SENSOR_DEVICE_ATTR_RO(temp8_input, temp_input, 7);
-static SENSOR_DEVICE_ATTR_2_RW(temp8_max, temp, 7, MAX6697_TEMP_MAX);
-static SENSOR_DEVICE_ATTR_2_RW(temp8_crit, temp, 7, MAX6697_TEMP_CRIT);
-
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 22);
-static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 16);
-static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, alarm, 17);
-static SENSOR_DEVICE_ATTR_RO(temp4_max_alarm, alarm, 18);
-static SENSOR_DEVICE_ATTR_RO(temp5_max_alarm, alarm, 19);
-static SENSOR_DEVICE_ATTR_RO(temp6_max_alarm, alarm, 20);
-static SENSOR_DEVICE_ATTR_RO(temp7_max_alarm, alarm, 21);
-static SENSOR_DEVICE_ATTR_RO(temp8_max_alarm, alarm, 23);
-
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 15);
-static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 8);
-static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 9);
-static SENSOR_DEVICE_ATTR_RO(temp4_crit_alarm, alarm, 10);
-static SENSOR_DEVICE_ATTR_RO(temp5_crit_alarm, alarm, 11);
-static SENSOR_DEVICE_ATTR_RO(temp6_crit_alarm, alarm, 12);
-static SENSOR_DEVICE_ATTR_RO(temp7_crit_alarm, alarm, 13);
-static SENSOR_DEVICE_ATTR_RO(temp8_crit_alarm, alarm, 14);
-
-static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 1);
-static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2);
-static SENSOR_DEVICE_ATTR_RO(temp4_fault, alarm, 3);
-static SENSOR_DEVICE_ATTR_RO(temp5_fault, alarm, 4);
-static SENSOR_DEVICE_ATTR_RO(temp6_fault, alarm, 5);
-static SENSOR_DEVICE_ATTR_RO(temp7_fault, alarm, 6);
-static SENSOR_DEVICE_ATTR_RO(temp8_fault, alarm, 7);
-
-/* There is no offset for local temperature so starting from temp2 */
-static SENSOR_DEVICE_ATTR_RW(temp2_offset, offset, 1);
-static SENSOR_DEVICE_ATTR_RW(temp3_offset, offset, 2);
-static SENSOR_DEVICE_ATTR_RW(temp4_offset, offset, 3);
-static SENSOR_DEVICE_ATTR_RW(temp5_offset, offset, 4);
-static SENSOR_DEVICE_ATTR_RW(temp6_offset, offset, 5);
-static SENSOR_DEVICE_ATTR_RW(temp7_offset, offset, 6);
-static SENSOR_DEVICE_ATTR_RW(temp8_offset, offset, 7);
-
-static DEVICE_ATTR(dummy, 0, NULL, NULL);
-
-static umode_t max6697_is_visible(struct kobject *kobj, struct attribute *attr,
- int index)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct max6697_data *data = dev_get_drvdata(dev);
+ const struct max6697_data *data = _data;
const struct max6697_chip_data *chip = data->chip;
- int channel = index / 7; /* channel number */
- int nr = index % 7; /* attribute index within channel */
if (channel >= chip->channels)
return 0;
- if ((nr == 3 || nr == 4) && !(chip->have_crit & (1 << channel)))
- return 0;
- if (nr == 5 && !(chip->have_fault & (1 << channel)))
- return 0;
- /* offset reg is only supported on max6581 remote channels */
- if (nr == 6)
- if (data->type != max6581 || channel == 0)
- return 0;
-
- return attr->mode;
+ switch (attr) {
+ case hwmon_temp_max:
+ return 0644;
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ return 0444;
+ case hwmon_temp_min:
+ if (data->type == max6581)
+ return channel ? 0444 : 0644;
+ break;
+ case hwmon_temp_min_alarm:
+ if (data->type == max6581)
+ return 0444;
+ break;
+ case hwmon_temp_crit:
+ if (chip->have_crit & BIT(channel))
+ return 0644;
+ break;
+ case hwmon_temp_crit_alarm:
+ if (chip->have_crit & BIT(channel))
+ return 0444;
+ break;
+ case hwmon_temp_fault:
+ if (chip->have_fault & BIT(channel))
+ return 0444;
+ break;
+ case hwmon_temp_offset:
+ if (data->type == max6581 && channel)
+ return 0644;
+ break;
+ default:
+ break;
+ }
+ return 0;
}
-/*
- * max6697_is_visible uses the index into the following array to determine
- * if attributes should be created or not. Any change in order or content
- * must be matched in max6697_is_visible.
- */
-static struct attribute *max6697_attributes[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
- &dev_attr_dummy.attr,
- &dev_attr_dummy.attr,
-
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_crit.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_temp2_offset.dev_attr.attr,
-
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp3_max.dev_attr.attr,
- &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_crit.dev_attr.attr,
- &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_fault.dev_attr.attr,
- &sensor_dev_attr_temp3_offset.dev_attr.attr,
-
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp4_max.dev_attr.attr,
- &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_crit.dev_attr.attr,
- &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_fault.dev_attr.attr,
- &sensor_dev_attr_temp4_offset.dev_attr.attr,
-
- &sensor_dev_attr_temp5_input.dev_attr.attr,
- &sensor_dev_attr_temp5_max.dev_attr.attr,
- &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp5_crit.dev_attr.attr,
- &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp5_fault.dev_attr.attr,
- &sensor_dev_attr_temp5_offset.dev_attr.attr,
-
- &sensor_dev_attr_temp6_input.dev_attr.attr,
- &sensor_dev_attr_temp6_max.dev_attr.attr,
- &sensor_dev_attr_temp6_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp6_crit.dev_attr.attr,
- &sensor_dev_attr_temp6_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp6_fault.dev_attr.attr,
- &sensor_dev_attr_temp6_offset.dev_attr.attr,
-
- &sensor_dev_attr_temp7_input.dev_attr.attr,
- &sensor_dev_attr_temp7_max.dev_attr.attr,
- &sensor_dev_attr_temp7_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp7_crit.dev_attr.attr,
- &sensor_dev_attr_temp7_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp7_fault.dev_attr.attr,
- &sensor_dev_attr_temp7_offset.dev_attr.attr,
-
- &sensor_dev_attr_temp8_input.dev_attr.attr,
- &sensor_dev_attr_temp8_max.dev_attr.attr,
- &sensor_dev_attr_temp8_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp8_crit.dev_attr.attr,
- &sensor_dev_attr_temp8_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp8_fault.dev_attr.attr,
- &sensor_dev_attr_temp8_offset.dev_attr.attr,
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static const struct hwmon_channel_info * const max6697_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM |
+ HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM |
+ HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM |
+ HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM |
+ HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM |
+ HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM |
+ HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM |
+ HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_FAULT | HWMON_T_OFFSET),
NULL
};
-static const struct attribute_group max6697_group = {
- .attrs = max6697_attributes, .is_visible = max6697_is_visible,
+static const struct hwmon_ops max6697_hwmon_ops = {
+ .is_visible = max6697_is_visible,
+ .read = max6697_read,
+ .write = max6697_write,
};
-__ATTRIBUTE_GROUPS(max6697);
-static void max6697_get_config_of(struct device_node *node,
- struct max6697_platform_data *pdata)
-{
- int len;
- const __be32 *prop;
-
- pdata->smbus_timeout_disable =
- of_property_read_bool(node, "smbus-timeout-disable");
- pdata->extended_range_enable =
- of_property_read_bool(node, "extended-range-enable");
- pdata->beta_compensation =
- of_property_read_bool(node, "beta-compensation-enable");
-
- prop = of_get_property(node, "alert-mask", &len);
- if (prop && len == sizeof(u32))
- pdata->alert_mask = be32_to_cpu(prop[0]);
- prop = of_get_property(node, "over-temperature-mask", &len);
- if (prop && len == sizeof(u32))
- pdata->over_temperature_mask = be32_to_cpu(prop[0]);
- prop = of_get_property(node, "resistance-cancellation", &len);
- if (prop) {
- if (len == sizeof(u32))
- pdata->resistance_cancellation = be32_to_cpu(prop[0]);
- else
- pdata->resistance_cancellation = 0xfe;
- }
- prop = of_get_property(node, "transistor-ideality", &len);
- if (prop && len == 2 * sizeof(u32)) {
- pdata->ideality_mask = be32_to_cpu(prop[0]);
- pdata->ideality_value = be32_to_cpu(prop[1]);
- }
-}
+static const struct hwmon_chip_info max6697_chip_info = {
+ .ops = &max6697_hwmon_ops,
+ .info = max6697_info,
+};
-static int max6697_init_chip(struct max6697_data *data,
- struct i2c_client *client)
+static int max6697_config_of(struct device_node *node, struct max6697_data *data)
{
- struct max6697_platform_data *pdata = dev_get_platdata(&client->dev);
- struct max6697_platform_data p;
const struct max6697_chip_data *chip = data->chip;
- int factor = chip->channels;
- int ret, reg;
+ struct regmap *regmap = data->regmap;
+ int ret, confreg;
+ u32 vals[2];
- /*
- * Don't touch configuration if neither platform data nor OF
- * configuration was specified. If that is the case, use the
- * current chip configuration.
- */
- if (!pdata && !client->dev.of_node) {
- reg = i2c_smbus_read_byte_data(client, MAX6697_REG_CONFIG);
- if (reg < 0)
- return reg;
- if (data->type == max6581) {
- if (reg & MAX6581_CONF_EXTENDED)
- data->temp_offset = 64;
- reg = i2c_smbus_read_byte_data(client,
- MAX6581_REG_RESISTANCE);
- if (reg < 0)
- return reg;
- factor += hweight8(reg);
- } else {
- if (reg & MAX6697_CONF_RESISTANCE)
- factor++;
- }
- goto done;
- }
-
- if (client->dev.of_node) {
- memset(&p, 0, sizeof(p));
- max6697_get_config_of(client->dev.of_node, &p);
- pdata = &p;
- }
-
- reg = 0;
- if (pdata->smbus_timeout_disable &&
+ confreg = 0;
+ if (of_property_read_bool(node, "smbus-timeout-disable") &&
(chip->valid_conf & MAX6697_CONF_TIMEOUT)) {
- reg |= MAX6697_CONF_TIMEOUT;
+ confreg |= MAX6697_CONF_TIMEOUT;
}
- if (pdata->extended_range_enable &&
+ if (of_property_read_bool(node, "extended-range-enable") &&
(chip->valid_conf & MAX6581_CONF_EXTENDED)) {
- reg |= MAX6581_CONF_EXTENDED;
+ confreg |= MAX6581_CONF_EXTENDED;
data->temp_offset = 64;
}
- if (pdata->resistance_cancellation &&
- (chip->valid_conf & MAX6697_CONF_RESISTANCE)) {
- reg |= MAX6697_CONF_RESISTANCE;
- factor++;
- }
- if (pdata->beta_compensation &&
+ if (of_property_read_bool(node, "beta-compensation-enable") &&
(chip->valid_conf & MAX6693_CONF_BETA)) {
- reg |= MAX6693_CONF_BETA;
+ confreg |= MAX6693_CONF_BETA;
}
- ret = i2c_smbus_write_byte_data(client, MAX6697_REG_CONFIG, reg);
- if (ret < 0)
+ if (of_property_read_u32(node, "alert-mask", vals))
+ vals[0] = 0;
+ ret = regmap_write(regmap, MAX6697_REG_ALERT_MASK,
+ MAX6697_ALERT_MAP_BITS(vals[0]));
+ if (ret)
return ret;
- ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK,
- MAX6697_ALERT_MAP_BITS(pdata->alert_mask));
- if (ret < 0)
+ if (of_property_read_u32(node, "over-temperature-mask", vals))
+ vals[0] = 0;
+ ret = regmap_write(regmap, MAX6697_REG_OVERT_MASK,
+ MAX6697_OVERT_MAP_BITS(vals[0]));
+ if (ret)
return ret;
- ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK,
- MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask));
- if (ret < 0)
- return ret;
+ if (data->type != max6581) {
+ if (of_property_read_bool(node, "resistance-cancellation") &&
+ chip->valid_conf & MAX6697_CONF_RESISTANCE) {
+ confreg |= MAX6697_CONF_RESISTANCE;
+ }
+ } else {
+ if (of_property_read_u32(node, "resistance-cancellation", &vals[0])) {
+ if (of_property_read_bool(node, "resistance-cancellation"))
+ vals[0] = 0xfe;
+ else
+ vals[0] = 0;
+ }
- if (data->type == max6581) {
- factor += hweight8(pdata->resistance_cancellation >> 1);
- ret = i2c_smbus_write_byte_data(client, MAX6581_REG_RESISTANCE,
- pdata->resistance_cancellation >> 1);
+ vals[0] &= 0xfe;
+ ret = regmap_write(regmap, MAX6581_REG_RESISTANCE, vals[0] >> 1);
if (ret < 0)
return ret;
- ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY,
- pdata->ideality_value);
+
+ if (of_property_read_u32_array(node, "transistor-ideality", vals, 2)) {
+ vals[0] = 0;
+ vals[1] = 0;
+ }
+
+ ret = regmap_write(regmap, MAX6581_REG_IDEALITY, vals[1]);
if (ret < 0)
return ret;
- ret = i2c_smbus_write_byte_data(client,
- MAX6581_REG_IDEALITY_SELECT,
- pdata->ideality_mask >> 1);
+ ret = regmap_write(regmap, MAX6581_REG_IDEALITY_SELECT,
+ (vals[0] & 0xfe) >> 1);
if (ret < 0)
return ret;
}
-done:
- data->update_interval = factor * MAX6697_CONV_TIME;
- return 0;
+ return regmap_write(regmap, MAX6697_REG_CONFIG, confreg);
}
+static int max6697_init_chip(struct device_node *np, struct max6697_data *data)
+{
+ unsigned int reg;
+ int ret;
+
+ /*
+ * Don't touch configuration if there is no devicetree configuration.
+ * If that is the case, use the current chip configuration.
+ */
+ if (!np) {
+ struct regmap *regmap = data->regmap;
+
+ ret = regmap_read(regmap, MAX6697_REG_CONFIG, &reg);
+ if (ret < 0)
+ return ret;
+ if (data->type == max6581) {
+ if (reg & MAX6581_CONF_EXTENDED)
+ data->temp_offset = 64;
+ ret = regmap_read(regmap, MAX6581_REG_RESISTANCE, &reg);
+ }
+ } else {
+ ret = max6697_config_of(np, data);
+ }
+
+ return ret;
+}
+
+static bool max6697_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x00 ... 0x09: /* temperature high bytes */
+ case 0x44 ... 0x47: /* status */
+ case 0x51 ... 0x58: /* temperature low bytes */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool max6697_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return reg != 0x0a && reg != 0x0f && !max6697_volatile_reg(dev, reg);
+}
+
+static const struct regmap_config max6697_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x58,
+ .writeable_reg = max6697_writeable_reg,
+ .volatile_reg = max6697_volatile_reg,
+ .cache_type = REGCACHE_MAPLE,
+};
+
static int max6697_probe(struct i2c_client *client)
{
- struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
struct max6697_data *data;
struct device *hwmon_dev;
+ struct regmap *regmap;
int err;
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -ENODEV;
+ regmap = regmap_init_i2c(client, &max6697_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
data = devm_kzalloc(dev, sizeof(struct max6697_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->regmap = regmap;
data->type = (uintptr_t)i2c_get_match_data(client);
data->chip = &max6697_chip_data[data->type];
- data->client = client;
mutex_init(&data->update_lock);
- err = max6697_init_chip(data, client);
+ err = max6697_init_chip(client->dev.of_node, data);
if (err)
return err;
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data,
- max6697_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
+ &max6697_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index 9aa4dcf4a6f3..096f1daa8f2b 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -1269,6 +1269,7 @@ static const char * const asus_msi_boards[] = {
"EX-B760M-V5 D4",
"EX-H510M-V3",
"EX-H610M-V3 D4",
+ "G15CF",
"PRIME A620M-A",
"PRIME B560-PLUS",
"PRIME B560-PLUS AC-HES",
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 97e8c6424403..8c9351da12c6 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -229,41 +229,34 @@ abort:
static int nct7802_read_fan(struct nct7802_data *data, u8 reg_fan)
{
- unsigned int f1, f2;
+ unsigned int regs[2] = {reg_fan, REG_FANCOUNT_LOW};
+ u8 f[2];
int ret;
- mutex_lock(&data->access_lock);
- ret = regmap_read(data->regmap, reg_fan, &f1);
- if (ret < 0)
- goto abort;
- ret = regmap_read(data->regmap, REG_FANCOUNT_LOW, &f2);
- if (ret < 0)
- goto abort;
- ret = (f1 << 5) | (f2 >> 3);
+ ret = regmap_multi_reg_read(data->regmap, regs, f, 2);
+ if (ret)
+ return ret;
+ ret = (f[0] << 5) | (f[1] >> 3);
/* convert fan count to rpm */
if (ret == 0x1fff) /* maximum value, assume fan is stopped */
ret = 0;
else if (ret)
ret = DIV_ROUND_CLOSEST(1350000U, ret);
-abort:
- mutex_unlock(&data->access_lock);
return ret;
}
static int nct7802_read_fan_min(struct nct7802_data *data, u8 reg_fan_low,
u8 reg_fan_high)
{
- unsigned int f1, f2;
+ unsigned int regs[2] = {reg_fan_low, reg_fan_high};
+ u8 f[2];
int ret;
- mutex_lock(&data->access_lock);
- ret = regmap_read(data->regmap, reg_fan_low, &f1);
- if (ret < 0)
- goto abort;
- ret = regmap_read(data->regmap, reg_fan_high, &f2);
+ ret = regmap_multi_reg_read(data->regmap, regs, f, 2);
if (ret < 0)
- goto abort;
- ret = f1 | ((f2 & 0xf8) << 5);
+ return ret;
+
+ ret = f[0] | ((f[1] & 0xf8) << 5);
/* convert fan count to rpm */
if (ret == 0x1fff) /* maximum value, assume no limit */
ret = 0;
@@ -271,8 +264,6 @@ static int nct7802_read_fan_min(struct nct7802_data *data, u8 reg_fan_low,
ret = DIV_ROUND_CLOSEST(1350000U, ret);
else
ret = 1350000U;
-abort:
- mutex_unlock(&data->access_lock);
return ret;
}
@@ -302,33 +293,26 @@ static u8 nct7802_vmul[] = { 4, 2, 2, 2, 2 };
static int nct7802_read_voltage(struct nct7802_data *data, int nr, int index)
{
- unsigned int v1, v2;
+ u8 v[2];
int ret;
- mutex_lock(&data->access_lock);
if (index == 0) { /* voltage */
- ret = regmap_read(data->regmap, REG_VOLTAGE[nr], &v1);
- if (ret < 0)
- goto abort;
- ret = regmap_read(data->regmap, REG_VOLTAGE_LOW, &v2);
+ unsigned int regs[2] = {REG_VOLTAGE[nr], REG_VOLTAGE_LOW};
+
+ ret = regmap_multi_reg_read(data->regmap, regs, v, 2);
if (ret < 0)
- goto abort;
- ret = ((v1 << 2) | (v2 >> 6)) * nct7802_vmul[nr];
+ return ret;
+ ret = ((v[0] << 2) | (v[1] >> 6)) * nct7802_vmul[nr];
} else { /* limit */
int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
+ unsigned int regs[2] = {REG_VOLTAGE_LIMIT_LSB[index - 1][nr],
+ REG_VOLTAGE_LIMIT_MSB[nr]};
- ret = regmap_read(data->regmap,
- REG_VOLTAGE_LIMIT_LSB[index - 1][nr], &v1);
- if (ret < 0)
- goto abort;
- ret = regmap_read(data->regmap, REG_VOLTAGE_LIMIT_MSB[nr],
- &v2);
+ ret = regmap_multi_reg_read(data->regmap, regs, v, 2);
if (ret < 0)
- goto abort;
- ret = (v1 | ((v2 << shift) & 0x300)) * nct7802_vmul[nr];
+ return ret;
+ ret = (v[0] | ((v[1] << shift) & 0x300)) * nct7802_vmul[nr];
}
-abort:
- mutex_unlock(&data->access_lock);
return ret;
}
@@ -1145,17 +1129,14 @@ static int nct7802_configure_channels(struct device *dev,
{
/* Enable local temperature sensor by default */
u8 mode_mask = MODE_LTD_EN, mode_val = MODE_LTD_EN;
- struct device_node *node;
int err;
if (dev->of_node) {
- for_each_child_of_node(dev->of_node, node) {
+ for_each_child_of_node_scoped(dev->of_node, node) {
err = nct7802_get_channel_config(dev, node, &mode_mask,
&mode_val);
- if (err) {
- of_node_put(node);
+ if (err)
return err;
- }
}
}
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index bc8db1dc595d..db3b551828eb 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -927,7 +927,7 @@ static int npcm7xx_en_pwm_fan(struct device *dev,
static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np, *child;
+ struct device_node *np;
struct npcm7xx_pwm_fan_data *data;
struct resource *res;
struct device *hwmon;
@@ -1004,11 +1004,10 @@ static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
}
}
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = npcm7xx_en_pwm_fan(dev, child, data);
if (ret) {
dev_err(dev, "enable pwm and fan failed\n");
- of_node_put(child);
return ret;
}
}
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index ef75b63f5894..b5352900463f 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -62,6 +62,7 @@ static const struct platform_device_id ntc_thermistor_id[] = {
[NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 },
[NTC_LAST] = { },
};
+MODULE_DEVICE_TABLE(platform, ntc_thermistor_id);
/*
* A compensation table should be sorted by the values of .ohm
diff --git a/drivers/hwmon/oxp-sensors.c b/drivers/hwmon/oxp-sensors.c
index 8d3b0f86cc57..83730d931824 100644
--- a/drivers/hwmon/oxp-sensors.c
+++ b/drivers/hwmon/oxp-sensors.c
@@ -1,18 +1,21 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Platform driver for OneXPlayer, AOK ZOE, and Aya Neo Handhelds that expose
- * fan reading and control via hwmon sysfs.
+ * Platform driver for OneXPlayer, AOKZOE, AYANEO, and OrangePi Handhelds
+ * that expose fan reading and control via hwmon sysfs.
*
* Old OXP boards have the same DMI strings and they are told apart by
- * the boot cpu vendor (Intel/AMD). Currently only AMD boards are
- * supported but the code is made to be simple to add other handheld
- * boards in the future.
+ * the boot cpu vendor (Intel/AMD). Of these older models only AMD is
+ * supported.
+ *
* Fan control is provided via pwm interface in the range [0-255].
* Old AMD boards use [0-100] as range in the EC, the written value is
* scaled to accommodate for that. Newer boards like the mini PRO and
- * AOK ZOE are not scaled but have the same EC layout.
+ * AOKZOE are not scaled but have the same EC layout. Newer models
+ * like the 2 and X1 are [0-184] and are scaled to 0-255. OrangePi
+ * are [1-244] and scaled to 0-255.
*
* Copyright (C) 2022 Joaquín I. Aramendía <samsagax@gmail.com>
+ * Copyright (C) 2024 Derek J. Clark <derekjohn.clark@gmail.com>
*/
#include <linux/acpi.h>
@@ -43,32 +46,48 @@ enum oxp_board {
aok_zoe_a1 = 1,
aya_neo_2,
aya_neo_air,
+ aya_neo_air_1s,
aya_neo_air_plus_mendo,
aya_neo_air_pro,
+ aya_neo_flip,
aya_neo_geek,
+ aya_neo_kun,
+ orange_pi_neo,
+ oxp_2,
+ oxp_fly,
oxp_mini_amd,
oxp_mini_amd_a07,
oxp_mini_amd_pro,
+ oxp_x1,
};
static enum oxp_board board;
/* Fan reading and PWM */
-#define OXP_SENSOR_FAN_REG 0x76 /* Fan reading is 2 registers long */
-#define OXP_SENSOR_PWM_ENABLE_REG 0x4A /* PWM enable is 1 register long */
-#define OXP_SENSOR_PWM_REG 0x4B /* PWM reading is 1 register long */
+#define OXP_SENSOR_FAN_REG 0x76 /* Fan reading is 2 registers long */
+#define OXP_2_SENSOR_FAN_REG 0x58 /* Fan reading is 2 registers long */
+#define OXP_SENSOR_PWM_ENABLE_REG 0x4A /* PWM enable is 1 register long */
+#define OXP_SENSOR_PWM_REG 0x4B /* PWM reading is 1 register long */
+#define PWM_MODE_AUTO 0x00
+#define PWM_MODE_MANUAL 0x01
+
+/* OrangePi fan reading and PWM */
+#define ORANGEPI_SENSOR_FAN_REG 0x78 /* Fan reading is 2 registers long */
+#define ORANGEPI_SENSOR_PWM_ENABLE_REG 0x40 /* PWM enable is 1 register long */
+#define ORANGEPI_SENSOR_PWM_REG 0x38 /* PWM reading is 1 register long */
/* Turbo button takeover function
- * Older boards have different values and EC registers
+ * Different boards have different values and EC registers
* for the same function
*/
-#define OXP_OLD_TURBO_SWITCH_REG 0x1E
-#define OXP_OLD_TURBO_TAKE_VAL 0x01
-#define OXP_OLD_TURBO_RETURN_VAL 0x00
+#define OXP_TURBO_SWITCH_REG 0xF1 /* Mini Pro, OneXFly, AOKZOE */
+#define OXP_2_TURBO_SWITCH_REG 0xEB /* OXP2 and X1 */
+#define OXP_MINI_TURBO_SWITCH_REG 0x1E /* Mini AO7 */
+
+#define OXP_MINI_TURBO_TAKE_VAL 0x01 /* Mini AO7 */
+#define OXP_TURBO_TAKE_VAL 0x40 /* All other models */
-#define OXP_TURBO_SWITCH_REG 0xF1
-#define OXP_TURBO_TAKE_VAL 0x40
-#define OXP_TURBO_RETURN_VAL 0x00
+#define OXP_TURBO_RETURN_VAL 0x00 /* Common return val */
static const struct dmi_system_id dmi_table[] = {
{
@@ -88,7 +107,7 @@ static const struct dmi_system_id dmi_table[] = {
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AYANEO 2"),
+ DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"),
},
.driver_data = (void *)aya_neo_2,
},
@@ -102,6 +121,13 @@ static const struct dmi_system_id dmi_table[] = {
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR 1S"),
+ },
+ .driver_data = (void *)aya_neo_air_1s,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "AB05-Mendocino"),
},
.driver_data = (void *)aya_neo_air_plus_mendo,
@@ -116,12 +142,33 @@ static const struct dmi_system_id dmi_table[] = {
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "GEEK"),
+ DMI_MATCH(DMI_BOARD_NAME, "FLIP"),
+ },
+ .driver_data = (void *)aya_neo_flip,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "GEEK"),
},
.driver_data = (void *)aya_neo_geek,
},
{
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "KUN"),
+ },
+ .driver_data = (void *)aya_neo_kun,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "OrangePi"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "NEO-01"),
+ },
+ .driver_data = (void *)orange_pi_neo,
+ },
+ {
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONE XPLAYER"),
},
@@ -130,6 +177,20 @@ static const struct dmi_system_id dmi_table[] = {
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_MATCH(DMI_BOARD_NAME, "ONEXPLAYER 2"),
+ },
+ .driver_data = (void *)oxp_2,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER F1"),
+ },
+ .driver_data = (void *)oxp_fly,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER mini A07"),
},
.driver_data = (void *)oxp_mini_amd_a07,
@@ -141,6 +202,13 @@ static const struct dmi_system_id dmi_table[] = {
},
.driver_data = (void *)oxp_mini_amd_pro,
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1"),
+ },
+ .driver_data = (void *)oxp_x1,
+ },
{},
};
@@ -192,14 +260,20 @@ static int tt_toggle_enable(void)
switch (board) {
case oxp_mini_amd_a07:
- reg = OXP_OLD_TURBO_SWITCH_REG;
- val = OXP_OLD_TURBO_TAKE_VAL;
+ reg = OXP_MINI_TURBO_SWITCH_REG;
+ val = OXP_MINI_TURBO_TAKE_VAL;
break;
- case oxp_mini_amd_pro:
case aok_zoe_a1:
+ case oxp_fly:
+ case oxp_mini_amd_pro:
reg = OXP_TURBO_SWITCH_REG;
val = OXP_TURBO_TAKE_VAL;
break;
+ case oxp_2:
+ case oxp_x1:
+ reg = OXP_2_TURBO_SWITCH_REG;
+ val = OXP_TURBO_TAKE_VAL;
+ break;
default:
return -EINVAL;
}
@@ -213,14 +287,20 @@ static int tt_toggle_disable(void)
switch (board) {
case oxp_mini_amd_a07:
- reg = OXP_OLD_TURBO_SWITCH_REG;
- val = OXP_OLD_TURBO_RETURN_VAL;
+ reg = OXP_MINI_TURBO_SWITCH_REG;
+ val = OXP_TURBO_RETURN_VAL;
break;
- case oxp_mini_amd_pro:
case aok_zoe_a1:
+ case oxp_fly:
+ case oxp_mini_amd_pro:
reg = OXP_TURBO_SWITCH_REG;
val = OXP_TURBO_RETURN_VAL;
break;
+ case oxp_2:
+ case oxp_x1:
+ reg = OXP_2_TURBO_SWITCH_REG;
+ val = OXP_TURBO_RETURN_VAL;
+ break;
default:
return -EINVAL;
}
@@ -233,8 +313,11 @@ static umode_t tt_toggle_is_visible(struct kobject *kobj,
{
switch (board) {
case aok_zoe_a1:
+ case oxp_2:
+ case oxp_fly:
case oxp_mini_amd_a07:
case oxp_mini_amd_pro:
+ case oxp_x1:
return attr->mode;
default:
break;
@@ -273,12 +356,17 @@ static ssize_t tt_toggle_show(struct device *dev,
switch (board) {
case oxp_mini_amd_a07:
- reg = OXP_OLD_TURBO_SWITCH_REG;
+ reg = OXP_MINI_TURBO_SWITCH_REG;
break;
- case oxp_mini_amd_pro:
case aok_zoe_a1:
+ case oxp_fly:
+ case oxp_mini_amd_pro:
reg = OXP_TURBO_SWITCH_REG;
break;
+ case oxp_2:
+ case oxp_x1:
+ reg = OXP_2_TURBO_SWITCH_REG;
+ break;
default:
return -EINVAL;
}
@@ -295,12 +383,53 @@ static DEVICE_ATTR_RW(tt_toggle);
/* PWM enable/disable functions */
static int oxp_pwm_enable(void)
{
- return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, 0x01);
+ switch (board) {
+ case orange_pi_neo:
+ return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL);
+ case aok_zoe_a1:
+ case aya_neo_2:
+ case aya_neo_air:
+ case aya_neo_air_plus_mendo:
+ case aya_neo_air_pro:
+ case aya_neo_flip:
+ case aya_neo_geek:
+ case aya_neo_kun:
+ case oxp_2:
+ case oxp_fly:
+ case oxp_mini_amd:
+ case oxp_mini_amd_a07:
+ case oxp_mini_amd_pro:
+ case oxp_x1:
+ return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL);
+ default:
+ return -EINVAL;
+ }
}
static int oxp_pwm_disable(void)
{
- return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, 0x00);
+ switch (board) {
+ case orange_pi_neo:
+ return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO);
+ case aok_zoe_a1:
+ case aya_neo_2:
+ case aya_neo_air:
+ case aya_neo_air_1s:
+ case aya_neo_air_plus_mendo:
+ case aya_neo_air_pro:
+ case aya_neo_flip:
+ case aya_neo_geek:
+ case aya_neo_kun:
+ case oxp_2:
+ case oxp_fly:
+ case oxp_mini_amd:
+ case oxp_mini_amd_a07:
+ case oxp_mini_amd_pro:
+ case oxp_x1:
+ return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO);
+ default:
+ return -EINVAL;
+ }
}
/* Callbacks for hwmon interface */
@@ -326,7 +455,30 @@ static int oxp_platform_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_fan:
switch (attr) {
case hwmon_fan_input:
- return read_from_ec(OXP_SENSOR_FAN_REG, 2, val);
+ switch (board) {
+ case orange_pi_neo:
+ return read_from_ec(ORANGEPI_SENSOR_FAN_REG, 2, val);
+ case oxp_2:
+ case oxp_x1:
+ return read_from_ec(OXP_2_SENSOR_FAN_REG, 2, val);
+ case aok_zoe_a1:
+ case aya_neo_2:
+ case aya_neo_air:
+ case aya_neo_air_1s:
+ case aya_neo_air_plus_mendo:
+ case aya_neo_air_pro:
+ case aya_neo_flip:
+ case aya_neo_geek:
+ case aya_neo_kun:
+ case oxp_fly:
+ case oxp_mini_amd:
+ case oxp_mini_amd_a07:
+ case oxp_mini_amd_pro:
+ return read_from_ec(OXP_SENSOR_FAN_REG, 2, val);
+ default:
+ break;
+ }
+ break;
default:
break;
}
@@ -334,27 +486,72 @@ static int oxp_platform_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_pwm:
switch (attr) {
case hwmon_pwm_input:
- ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
- if (ret)
- return ret;
switch (board) {
+ case orange_pi_neo:
+ ret = read_from_ec(ORANGEPI_SENSOR_PWM_REG, 1, val);
+ if (ret)
+ return ret;
+ /* scale from range [1-244] */
+ *val = ((*val - 1) * 254 / 243) + 1;
+ break;
+ case oxp_2:
+ case oxp_x1:
+ ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
+ if (ret)
+ return ret;
+ /* scale from range [0-184] */
+ *val = (*val * 255) / 184;
+ break;
case aya_neo_2:
case aya_neo_air:
+ case aya_neo_air_1s:
case aya_neo_air_plus_mendo:
case aya_neo_air_pro:
+ case aya_neo_flip:
case aya_neo_geek:
+ case aya_neo_kun:
case oxp_mini_amd:
case oxp_mini_amd_a07:
+ ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
+ if (ret)
+ return ret;
+ /* scale from range [0-100] */
*val = (*val * 255) / 100;
break;
- case oxp_mini_amd_pro:
case aok_zoe_a1:
+ case oxp_fly:
+ case oxp_mini_amd_pro:
default:
+ ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
+ if (ret)
+ return ret;
break;
}
return 0;
case hwmon_pwm_enable:
- return read_from_ec(OXP_SENSOR_PWM_ENABLE_REG, 1, val);
+ switch (board) {
+ case orange_pi_neo:
+ return read_from_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, 1, val);
+ case aok_zoe_a1:
+ case aya_neo_2:
+ case aya_neo_air:
+ case aya_neo_air_1s:
+ case aya_neo_air_plus_mendo:
+ case aya_neo_air_pro:
+ case aya_neo_flip:
+ case aya_neo_geek:
+ case aya_neo_kun:
+ case oxp_2:
+ case oxp_fly:
+ case oxp_mini_amd:
+ case oxp_mini_amd_a07:
+ case oxp_mini_amd_pro:
+ case oxp_x1:
+ return read_from_ec(OXP_SENSOR_PWM_ENABLE_REG, 1, val);
+ default:
+ break;
+ }
+ break;
default:
break;
}
@@ -381,21 +578,36 @@ static int oxp_platform_write(struct device *dev, enum hwmon_sensor_types type,
if (val < 0 || val > 255)
return -EINVAL;
switch (board) {
+ case orange_pi_neo:
+ /* scale to range [1-244] */
+ val = ((val - 1) * 243 / 254) + 1;
+ return write_to_ec(ORANGEPI_SENSOR_PWM_REG, val);
+ case oxp_2:
+ case oxp_x1:
+ /* scale to range [0-184] */
+ val = (val * 184) / 255;
+ return write_to_ec(OXP_SENSOR_PWM_REG, val);
case aya_neo_2:
case aya_neo_air:
+ case aya_neo_air_1s:
case aya_neo_air_plus_mendo:
case aya_neo_air_pro:
+ case aya_neo_flip:
case aya_neo_geek:
+ case aya_neo_kun:
case oxp_mini_amd:
case oxp_mini_amd_a07:
+ /* scale to range [0-100] */
val = (val * 100) / 255;
- break;
+ return write_to_ec(OXP_SENSOR_PWM_REG, val);
case aok_zoe_a1:
+ case oxp_fly:
case oxp_mini_amd_pro:
+ return write_to_ec(OXP_SENSOR_PWM_REG, val);
default:
break;
}
- return write_to_ec(OXP_SENSOR_PWM_REG, val);
+ break;
default:
break;
}
@@ -467,19 +679,20 @@ static int __init oxp_platform_init(void)
{
const struct dmi_system_id *dmi_entry;
- /*
- * Have to check for AMD processor here because DMI strings are the
- * same between Intel and AMD boards, the only way to tell them apart
- * is the CPU.
- * Intel boards seem to have different EC registers and values to
- * read/write.
- */
dmi_entry = dmi_first_match(dmi_table);
- if (!dmi_entry || boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ if (!dmi_entry)
return -ENODEV;
board = (enum oxp_board)(unsigned long)dmi_entry->driver_data;
+ /*
+ * Have to check for AMD processor here because DMI strings are the same
+ * between Intel and AMD boards on older OneXPlayer devices, the only way
+ * to tell them apart is the CPU. Old Intel boards have an unsupported EC.
+ */
+ if (board == oxp_mini_amd && boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return -ENODEV;
+
oxp_platform_device =
platform_create_bundle(&oxp_platform_driver,
oxp_platform_probe, NULL, 0, NULL, 0);
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 9e9681b2e8c5..788b5d58f77e 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1315,7 +1315,7 @@ static void pc87360_init_device(struct platform_device *pdev,
(reg & 0xC0) | 0x11);
}
- nr = data->innr < 11 ? data->innr : 11;
+ nr = min(data->innr, 11);
for (i = 0; i < nr; i++) {
reg = pc87360_read_value(data, LD_IN, i,
PC87365_REG_IN_STATUS);
diff --git a/drivers/hwmon/pmbus/max15301.c b/drivers/hwmon/pmbus/max15301.c
index 986404fe6a31..f5367a7bc0f5 100644
--- a/drivers/hwmon/pmbus/max15301.c
+++ b/drivers/hwmon/pmbus/max15301.c
@@ -31,8 +31,6 @@ MODULE_DEVICE_TABLE(i2c, max15301_id);
struct max15301_data {
int id;
- ktime_t access; /* Chip access time */
- int delay; /* Delay between chip accesses in us */
struct pmbus_driver_info info;
};
@@ -55,89 +53,6 @@ static struct max15301_data max15301_data = {
}
};
-/* This chip needs a delay between accesses */
-static inline void max15301_wait(const struct max15301_data *data)
-{
- if (data->delay) {
- s64 delta = ktime_us_delta(ktime_get(), data->access);
-
- if (delta < data->delay)
- udelay(data->delay - delta);
- }
-}
-
-static int max15301_read_word_data(struct i2c_client *client, int page,
- int phase, int reg)
-{
- const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
- struct max15301_data *data = to_max15301_data(info);
- int ret;
-
- if (page > 0)
- return -ENXIO;
-
- if (reg >= PMBUS_VIRT_BASE)
- return -ENXIO;
-
- max15301_wait(data);
- ret = pmbus_read_word_data(client, page, phase, reg);
- data->access = ktime_get();
-
- return ret;
-}
-
-static int max15301_read_byte_data(struct i2c_client *client, int page, int reg)
-{
- const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
- struct max15301_data *data = to_max15301_data(info);
- int ret;
-
- if (page > 0)
- return -ENXIO;
-
- max15301_wait(data);
- ret = pmbus_read_byte_data(client, page, reg);
- data->access = ktime_get();
-
- return ret;
-}
-
-static int max15301_write_word_data(struct i2c_client *client, int page, int reg,
- u16 word)
-{
- const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
- struct max15301_data *data = to_max15301_data(info);
- int ret;
-
- if (page > 0)
- return -ENXIO;
-
- if (reg >= PMBUS_VIRT_BASE)
- return -ENXIO;
-
- max15301_wait(data);
- ret = pmbus_write_word_data(client, page, reg, word);
- data->access = ktime_get();
-
- return ret;
-}
-
-static int max15301_write_byte(struct i2c_client *client, int page, u8 value)
-{
- const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
- struct max15301_data *data = to_max15301_data(info);
- int ret;
-
- if (page > 0)
- return -ENXIO;
-
- max15301_wait(data);
- ret = pmbus_write_byte(client, page, value);
- data->access = ktime_get();
-
- return ret;
-}
-
static int max15301_probe(struct i2c_client *client)
{
int status;
@@ -164,12 +79,7 @@ static int max15301_probe(struct i2c_client *client)
return -ENODEV;
}
- max15301_data.delay = delay;
-
- info->read_byte_data = max15301_read_byte_data;
- info->read_word_data = max15301_read_word_data;
- info->write_byte = max15301_write_byte;
- info->write_word_data = max15301_write_word_data;
+ info->access_delay = delay;
return pmbus_do_probe(client, info);
}
diff --git a/drivers/hwmon/pmbus/mpq7932.c b/drivers/hwmon/pmbus/mpq7932.c
index 67487867c70f..2dcb6da853bd 100644
--- a/drivers/hwmon/pmbus/mpq7932.c
+++ b/drivers/hwmon/pmbus/mpq7932.c
@@ -35,7 +35,7 @@ struct mpq7932_data {
};
#if IS_ENABLED(CONFIG_SENSORS_MPQ7932_REGULATOR)
-static struct regulator_desc mpq7932_regulators_desc[] = {
+static const struct regulator_desc mpq7932_regulators_desc[] = {
PMBUS_REGULATOR_STEP("buck", 0, MPQ7932_N_VOLTAGES,
MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
PMBUS_REGULATOR_STEP("buck", 1, MPQ7932_N_VOLTAGES,
diff --git a/drivers/hwmon/pmbus/pli1209bc.c b/drivers/hwmon/pmbus/pli1209bc.c
index 2c6c9ec2a652..178e0cdb7887 100644
--- a/drivers/hwmon/pmbus/pli1209bc.c
+++ b/drivers/hwmon/pmbus/pli1209bc.c
@@ -54,30 +54,6 @@ static int pli1209bc_read_word_data(struct i2c_client *client, int page,
}
}
-static int pli1209bc_write_byte(struct i2c_client *client, int page, u8 reg)
-{
- int ret;
-
- switch (reg) {
- case PMBUS_CLEAR_FAULTS:
- ret = pmbus_write_byte(client, page, reg);
- /*
- * PLI1209 takes 230 usec to execute the CLEAR_FAULTS command.
- * During that time it's busy and NACKs all requests on the
- * SMBUS interface. It also NACKs reads on PMBUS_STATUS_BYTE
- * making it impossible to poll the BUSY flag.
- *
- * Just wait for not BUSY unconditionally.
- */
- usleep_range(250, 300);
- break;
- default:
- ret = -ENODATA;
- break;
- }
- return ret;
-}
-
#if IS_ENABLED(CONFIG_SENSORS_PLI1209BC_REGULATOR)
static const struct regulator_desc pli1209bc_reg_desc = {
.name = "vout2",
@@ -127,7 +103,7 @@ static struct pmbus_driver_info pli1209bc_info = {
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
| PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT,
.read_word_data = pli1209bc_read_word_data,
- .write_byte = pli1209bc_write_byte,
+ .write_delay = 250,
#if IS_ENABLED(CONFIG_SENSORS_PLI1209BC_REGULATOR)
.num_regulators = 1,
.reg_desc = &pli1209bc_reg_desc,
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index fb442fae7b3e..d605412a3173 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -418,6 +418,12 @@ enum pmbus_sensor_classes {
enum pmbus_data_format { linear = 0, ieee754, direct, vid };
enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv };
+/* PMBus revision identifiers */
+#define PMBUS_REV_10 0x00 /* PMBus revision 1.0 */
+#define PMBUS_REV_11 0x11 /* PMBus revision 1.1 */
+#define PMBUS_REV_12 0x22 /* PMBus revision 1.2 */
+#define PMBUS_REV_13 0x33 /* PMBus revision 1.3 */
+
struct pmbus_driver_info {
int pages; /* Total number of pages */
u8 phases[PMBUS_PAGES]; /* Number of phases per page */
@@ -466,6 +472,16 @@ struct pmbus_driver_info {
/* custom attributes */
const struct attribute_group **groups;
+
+ /*
+ * Some chips need a little delay between SMBus communication. When
+ * set, the generic PMBus helper functions will wait if necessary
+ * to meet this requirement. The access delay is honored after
+ * every SMBus operation. The write delay is only honored after
+ * SMBus write operations.
+ */
+ int access_delay; /* in microseconds */
+ int write_delay; /* in microseconds */
};
/* Regulator ops */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index cb4c65a7f288..ce7fd4ca9d89 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -7,6 +7,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/module.h>
@@ -85,6 +86,8 @@ struct pmbus_data {
u32 flags; /* from platform data */
+ u8 revision; /* The PMBus revision the device is compliant with */
+
int exponent[PMBUS_PAGES];
/* linear mode: exponent for output voltages */
@@ -108,6 +111,8 @@ struct pmbus_data {
int vout_low[PMBUS_PAGES]; /* voltage low margin */
int vout_high[PMBUS_PAGES]; /* voltage high margin */
+ ktime_t write_time; /* Last SMBUS write timestamp */
+ ktime_t access_time; /* Last SMBUS access timestamp */
};
struct pmbus_debugfs_entry {
@@ -158,6 +163,39 @@ void pmbus_set_update(struct i2c_client *client, u8 reg, bool update)
}
EXPORT_SYMBOL_NS_GPL(pmbus_set_update, PMBUS);
+/* Some chips need a delay between accesses. */
+static void pmbus_wait(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+ s64 delta;
+
+ if (info->access_delay) {
+ delta = ktime_us_delta(ktime_get(), data->access_time);
+
+ if (delta < info->access_delay)
+ fsleep(info->access_delay - delta);
+ } else if (info->write_delay) {
+ delta = ktime_us_delta(ktime_get(), data->write_time);
+
+ if (delta < info->write_delay)
+ fsleep(info->write_delay - delta);
+ }
+}
+
+/* Sets the last accessed timestamp for pmbus_wait */
+static void pmbus_update_ts(struct i2c_client *client, bool write_op)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+
+ if (info->access_delay) {
+ data->access_time = ktime_get();
+ } else if (info->write_delay && write_op) {
+ data->write_time = ktime_get();
+ }
+}
+
int pmbus_set_page(struct i2c_client *client, int page, int phase)
{
struct pmbus_data *data = i2c_get_clientdata(client);
@@ -168,11 +206,15 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase)
if (!(data->info->func[page] & PMBUS_PAGE_VIRTUAL) &&
data->info->pages > 1 && page != data->currpage) {
+ pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ pmbus_update_ts(client, true);
if (rv < 0)
return rv;
+ pmbus_wait(client);
rv = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
+ pmbus_update_ts(client, false);
if (rv < 0)
return rv;
@@ -183,8 +225,10 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase)
if (data->info->phases[page] && data->currphase != phase &&
!(data->info->func[page] & PMBUS_PHASE_VIRTUAL)) {
+ pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, PMBUS_PHASE,
phase);
+ pmbus_update_ts(client, true);
if (rv)
return rv;
}
@@ -202,7 +246,11 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
if (rv < 0)
return rv;
- return i2c_smbus_write_byte(client, value);
+ pmbus_wait(client);
+ rv = i2c_smbus_write_byte(client, value);
+ pmbus_update_ts(client, true);
+
+ return rv;
}
EXPORT_SYMBOL_NS_GPL(pmbus_write_byte, PMBUS);
@@ -233,7 +281,11 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
if (rv < 0)
return rv;
- return i2c_smbus_write_word_data(client, reg, word);
+ pmbus_wait(client);
+ rv = i2c_smbus_write_word_data(client, reg, word);
+ pmbus_update_ts(client, true);
+
+ return rv;
}
EXPORT_SYMBOL_NS_GPL(pmbus_write_word_data, PMBUS);
@@ -351,7 +403,11 @@ int pmbus_read_word_data(struct i2c_client *client, int page, int phase, u8 reg)
if (rv < 0)
return rv;
- return i2c_smbus_read_word_data(client, reg);
+ pmbus_wait(client);
+ rv = i2c_smbus_read_word_data(client, reg);
+ pmbus_update_ts(client, false);
+
+ return rv;
}
EXPORT_SYMBOL_NS_GPL(pmbus_read_word_data, PMBUS);
@@ -410,7 +466,11 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
if (rv < 0)
return rv;
- return i2c_smbus_read_byte_data(client, reg);
+ pmbus_wait(client);
+ rv = i2c_smbus_read_byte_data(client, reg);
+ pmbus_update_ts(client, false);
+
+ return rv;
}
EXPORT_SYMBOL_NS_GPL(pmbus_read_byte_data, PMBUS);
@@ -422,7 +482,11 @@ int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, u8 value)
if (rv < 0)
return rv;
- return i2c_smbus_write_byte_data(client, reg, value);
+ pmbus_wait(client);
+ rv = i2c_smbus_write_byte_data(client, reg, value);
+ pmbus_update_ts(client, true);
+
+ return rv;
}
EXPORT_SYMBOL_NS_GPL(pmbus_write_byte_data, PMBUS);
@@ -454,7 +518,11 @@ static int pmbus_read_block_data(struct i2c_client *client, int page, u8 reg,
if (rv < 0)
return rv;
- return i2c_smbus_read_block_data(client, reg, data_buf);
+ pmbus_wait(client);
+ rv = i2c_smbus_read_block_data(client, reg, data_buf);
+ pmbus_update_ts(client, false);
+
+ return rv;
}
static struct pmbus_sensor *pmbus_find_sensor(struct pmbus_data *data, int page,
@@ -1095,9 +1163,14 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
regval = status & mask;
if (regval) {
- ret = _pmbus_write_byte_data(client, page, reg, regval);
- if (ret)
- goto unlock;
+ if (data->revision >= PMBUS_REV_12) {
+ ret = _pmbus_write_byte_data(client, page, reg, regval);
+ if (ret)
+ goto unlock;
+ } else {
+ pmbus_clear_fault_page(client, page);
+ }
+
}
if (s1 && s2) {
s64 v1, v2;
@@ -2450,9 +2523,11 @@ static int pmbus_read_coefficients(struct i2c_client *client,
data.block[1] = attr->reg;
data.block[2] = 0x01;
+ pmbus_wait(client);
rv = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
I2C_SMBUS_WRITE, PMBUS_COEFFICIENTS,
I2C_SMBUS_BLOCK_PROC_CALL, &data);
+ pmbus_update_ts(client, true);
if (rv < 0)
return rv;
@@ -2604,7 +2679,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
/* Enable PEC if the controller and bus supports it */
if (!(data->flags & PMBUS_NO_CAPABILITY)) {
+ pmbus_wait(client);
ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
+ pmbus_update_ts(client, false);
+
if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) {
if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC))
client->flags |= I2C_CLIENT_PEC;
@@ -2617,10 +2695,16 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
* Bail out if both registers are not supported.
*/
data->read_status = pmbus_read_status_word;
+ pmbus_wait(client);
ret = i2c_smbus_read_word_data(client, PMBUS_STATUS_WORD);
+ pmbus_update_ts(client, false);
+
if (ret < 0 || ret == 0xffff) {
data->read_status = pmbus_read_status_byte;
+ pmbus_wait(client);
ret = i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE);
+ pmbus_update_ts(client, false);
+
if (ret < 0 || ret == 0xff) {
dev_err(dev, "PMBus status register not found\n");
return -ENODEV;
@@ -2635,11 +2719,18 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
* limit registers need to be disabled.
*/
if (!(data->flags & PMBUS_NO_WRITE_PROTECT)) {
+ pmbus_wait(client);
ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT);
+ pmbus_update_ts(client, false);
+
if (ret > 0 && (ret & PB_WP_ANY))
data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
}
+ ret = i2c_smbus_read_byte_data(client, PMBUS_REVISION);
+ if (ret >= 0)
+ data->revision = ret;
+
if (data->info->pages)
pmbus_clear_faults(client);
else
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index d817c719b90b..5d3d1773bf52 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -67,7 +67,6 @@ struct ucd9000_data {
struct gpio_chip gpio;
#endif
struct dentry *debugfs;
- ktime_t write_time;
};
#define to_ucd9000_data(_info) container_of(_info, struct ucd9000_data, info)
@@ -86,63 +85,6 @@ struct ucd9000_debugfs_entry {
*/
#define UCD90320_WAIT_DELAY_US 500
-static inline void ucd90320_wait(const struct ucd9000_data *data)
-{
- s64 delta = ktime_us_delta(ktime_get(), data->write_time);
-
- if (delta < UCD90320_WAIT_DELAY_US)
- udelay(UCD90320_WAIT_DELAY_US - delta);
-}
-
-static int ucd90320_read_word_data(struct i2c_client *client, int page,
- int phase, int reg)
-{
- const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
- struct ucd9000_data *data = to_ucd9000_data(info);
-
- if (reg >= PMBUS_VIRT_BASE)
- return -ENXIO;
-
- ucd90320_wait(data);
- return pmbus_read_word_data(client, page, phase, reg);
-}
-
-static int ucd90320_read_byte_data(struct i2c_client *client, int page, int reg)
-{
- const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
- struct ucd9000_data *data = to_ucd9000_data(info);
-
- ucd90320_wait(data);
- return pmbus_read_byte_data(client, page, reg);
-}
-
-static int ucd90320_write_word_data(struct i2c_client *client, int page,
- int reg, u16 word)
-{
- const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
- struct ucd9000_data *data = to_ucd9000_data(info);
- int ret;
-
- ucd90320_wait(data);
- ret = pmbus_write_word_data(client, page, reg, word);
- data->write_time = ktime_get();
-
- return ret;
-}
-
-static int ucd90320_write_byte(struct i2c_client *client, int page, u8 value)
-{
- const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
- struct ucd9000_data *data = to_ucd9000_data(info);
- int ret;
-
- ucd90320_wait(data);
- ret = pmbus_write_byte(client, page, value);
- data->write_time = ktime_get();
-
- return ret;
-}
-
static int ucd9000_get_fan_config(struct i2c_client *client, int fan)
{
int fan_config = 0;
@@ -667,10 +609,8 @@ static int ucd9000_probe(struct i2c_client *client)
info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12
| PMBUS_HAVE_FAN34 | PMBUS_HAVE_STATUS_FAN34;
} else if (mid->driver_data == ucd90320) {
- info->read_byte_data = ucd90320_read_byte_data;
- info->read_word_data = ucd90320_read_word_data;
- info->write_byte = ucd90320_write_byte;
- info->write_word_data = ucd90320_write_word_data;
+ /* Delay SMBus operations after a write */
+ info->write_delay = UCD90320_WAIT_DELAY_US;
}
ucd9000_probe_gpio(client, mid, data);
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index 83458df0d0cf..7920a16203e1 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -22,8 +22,6 @@ enum chips { zl2004, zl2005, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105,
struct zl6100_data {
int id;
- ktime_t access; /* chip access time */
- int delay; /* Delay between chip accesses in uS */
struct pmbus_driver_info info;
};
@@ -122,16 +120,6 @@ static u16 zl6100_d2l(long val)
return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
}
-/* Some chips need a delay between accesses */
-static inline void zl6100_wait(const struct zl6100_data *data)
-{
- if (data->delay) {
- s64 delta = ktime_us_delta(ktime_get(), data->access);
- if (delta < data->delay)
- udelay(data->delay - delta);
- }
-}
-
static int zl6100_read_word_data(struct i2c_client *client, int page,
int phase, int reg)
{
@@ -174,9 +162,7 @@ static int zl6100_read_word_data(struct i2c_client *client, int page,
break;
}
- zl6100_wait(data);
ret = pmbus_read_word_data(client, page, phase, vreg);
- data->access = ktime_get();
if (ret < 0)
return ret;
@@ -195,14 +181,11 @@ static int zl6100_read_word_data(struct i2c_client *client, int page,
static int zl6100_read_byte_data(struct i2c_client *client, int page, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
- struct zl6100_data *data = to_zl6100_data(info);
int ret, status;
if (page >= info->pages)
return -ENXIO;
- zl6100_wait(data);
-
switch (reg) {
case PMBUS_VIRT_STATUS_VMON:
ret = pmbus_read_byte_data(client, 0,
@@ -225,7 +208,6 @@ static int zl6100_read_byte_data(struct i2c_client *client, int page, int reg)
ret = pmbus_read_byte_data(client, page, reg);
break;
}
- data->access = ktime_get();
return ret;
}
@@ -234,8 +216,7 @@ static int zl6100_write_word_data(struct i2c_client *client, int page, int reg,
u16 word)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
- struct zl6100_data *data = to_zl6100_data(info);
- int ret, vreg;
+ int vreg;
if (page >= info->pages)
return -ENXIO;
@@ -265,27 +246,7 @@ static int zl6100_write_word_data(struct i2c_client *client, int page, int reg,
vreg = reg;
}
- zl6100_wait(data);
- ret = pmbus_write_word_data(client, page, vreg, word);
- data->access = ktime_get();
-
- return ret;
-}
-
-static int zl6100_write_byte(struct i2c_client *client, int page, u8 value)
-{
- const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
- struct zl6100_data *data = to_zl6100_data(info);
- int ret;
-
- if (page >= info->pages)
- return -ENXIO;
-
- zl6100_wait(data);
- ret = pmbus_write_byte(client, page, value);
- data->access = ktime_get();
-
- return ret;
+ return pmbus_write_word_data(client, page, vreg, word);
}
static const struct i2c_device_id zl6100_id[] = {
@@ -363,14 +324,7 @@ static int zl6100_probe(struct i2c_client *client)
* supported chips are known to require a wait time between I2C
* accesses.
*/
- data->delay = delay;
-
- /*
- * Since there was a direct I2C device access above, wait before
- * accessing the chip again.
- */
- data->access = ktime_get();
- zl6100_wait(data);
+ udelay(delay);
info = &data->info;
@@ -404,8 +358,7 @@ static int zl6100_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- data->access = ktime_get();
- zl6100_wait(data);
+ udelay(delay);
if (ret & ZL8802_MFR_PHASES_MASK)
info->func[1] |= PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
@@ -418,8 +371,7 @@ static int zl6100_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- data->access = ktime_get();
- zl6100_wait(data);
+ udelay(delay);
ret = i2c_smbus_read_word_data(client, ZL8802_MFR_USER_CONFIG);
if (ret < 0)
@@ -428,8 +380,7 @@ static int zl6100_probe(struct i2c_client *client)
if (ret & ZL8802_MFR_XTEMP_ENABLE_2)
info->func[i] |= PMBUS_HAVE_TEMP2;
- data->access = ktime_get();
- zl6100_wait(data);
+ udelay(delay);
}
ret = i2c_smbus_read_word_data(client, ZL8802_MFR_USER_GLOBAL_CONFIG);
if (ret < 0)
@@ -446,13 +397,12 @@ static int zl6100_probe(struct i2c_client *client)
info->func[0] |= PMBUS_HAVE_TEMP2;
}
- data->access = ktime_get();
- zl6100_wait(data);
+ udelay(delay);
+ info->access_delay = delay;
info->read_word_data = zl6100_read_word_data;
info->read_byte_data = zl6100_read_byte_data;
info->write_word_data = zl6100_write_word_data;
- info->write_byte = zl6100_write_byte;
return pmbus_do_probe(client, info);
}
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index a1712649b07e..c434db4656e7 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -167,7 +167,7 @@ disable_regulator:
return ret;
}
-static int pwm_fan_power_off(struct pwm_fan_ctx *ctx)
+static int pwm_fan_power_off(struct pwm_fan_ctx *ctx, bool force_disable)
{
struct pwm_state *state = &ctx->pwm_state;
bool enable_regulator = false;
@@ -180,7 +180,8 @@ static int pwm_fan_power_off(struct pwm_fan_ctx *ctx)
state,
&enable_regulator);
- state->enabled = false;
+ if (force_disable)
+ state->enabled = false;
state->duty_cycle = 0;
ret = pwm_apply_might_sleep(ctx->pwm, state);
if (ret) {
@@ -213,7 +214,7 @@ static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
return ret;
ret = pwm_fan_power_on(ctx);
} else {
- ret = pwm_fan_power_off(ctx);
+ ret = pwm_fan_power_off(ctx, false);
}
if (!ret)
ctx->pwm_value = pwm;
@@ -468,7 +469,7 @@ static void pwm_fan_cleanup(void *__ctx)
del_timer_sync(&ctx->rpm_timer);
/* Switch off everything */
ctx->enable_mode = pwm_disable_reg_disable;
- pwm_fan_power_off(ctx);
+ pwm_fan_power_off(ctx, true);
}
static int pwm_fan_probe(struct platform_device *pdev)
@@ -661,7 +662,7 @@ static int pwm_fan_suspend(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
- return pwm_fan_power_off(ctx);
+ return pwm_fan_power_off(ctx, true);
}
static int pwm_fan_resume(struct device *dev)
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 6e6d54158474..a4b05ebb0546 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -416,8 +416,7 @@ static int sch5636_probe(struct platform_device *pdev)
id[i] = '\0';
if (strcmp(id, "THS")) {
- pr_err("Unknown Fujitsu id: %02x%02x%02x\n",
- id[0], id[1], id[2]);
+ pr_err("Unknown Fujitsu id: %3pE (%3ph)\n", id, id);
err = -ENODEV;
goto error;
}
diff --git a/drivers/hwmon/sch56xx-common.h b/drivers/hwmon/sch56xx-common.h
index 7479a549a026..601987c6b4cd 100644
--- a/drivers/hwmon/sch56xx-common.h
+++ b/drivers/hwmon/sch56xx-common.h
@@ -22,4 +22,3 @@ int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg,
void sch56xx_watchdog_register(struct device *parent, u16 addr, u32 revision,
struct mutex *io_lock, int check_enabled);
-void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data);
diff --git a/drivers/hwmon/sg2042-mcu.c b/drivers/hwmon/sg2042-mcu.c
new file mode 100644
index 000000000000..141045769354
--- /dev/null
+++ b/drivers/hwmon/sg2042-mcu.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * Sophgo power control mcu for SG2042
+ */
+
+#include <linux/cleanup.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+/* fixed MCU registers */
+#define REG_BOARD_TYPE 0x00
+#define REG_MCU_FIRMWARE_VERSION 0x01
+#define REG_PCB_VERSION 0x02
+#define REG_PWR_CTRL 0x03
+#define REG_SOC_TEMP 0x04
+#define REG_BOARD_TEMP 0x05
+#define REG_RST_COUNT 0x0a
+#define REG_UPTIME 0x0b
+#define REG_RESET_REASON 0x0d
+#define REG_MCU_TYPE 0x18
+#define REG_REPOWER_POLICY 0x65
+#define REG_CRITICAL_TEMP 0x66
+#define REG_REPOWER_TEMP 0x67
+
+#define REPOWER_POLICY_REBOOT 1
+#define REPOWER_POLICY_KEEP_OFF 2
+
+#define MCU_POWER_MAX 0xff
+
+#define DEFINE_MCU_DEBUG_ATTR(_name, _reg, _format) \
+ static int _name##_show(struct seq_file *seqf, \
+ void *unused) \
+ { \
+ struct sg2042_mcu_data *mcu = seqf->private; \
+ int ret; \
+ ret = i2c_smbus_read_byte_data(mcu->client, (_reg)); \
+ if (ret < 0) \
+ return ret; \
+ seq_printf(seqf, _format "\n", ret); \
+ return 0; \
+ } \
+ DEFINE_SHOW_ATTRIBUTE(_name) \
+
+struct sg2042_mcu_data {
+ struct i2c_client *client;
+ struct dentry *debugfs;
+ struct mutex mutex;
+};
+
+static struct dentry *sgmcu_debugfs;
+
+static ssize_t reset_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sg2042_mcu_data *mcu = dev_get_drvdata(dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(mcu->client, REG_RST_COUNT);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", ret);
+}
+
+static ssize_t uptime_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sg2042_mcu_data *mcu = dev_get_drvdata(dev);
+ u8 time_val[2];
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(mcu->client, REG_UPTIME,
+ sizeof(time_val), time_val);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n",
+ (time_val[0]) | (time_val[1] << 8));
+}
+
+static ssize_t reset_reason_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sg2042_mcu_data *mcu = dev_get_drvdata(dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(mcu->client, REG_RESET_REASON);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "0x%02x\n", ret);
+}
+
+static ssize_t repower_policy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sg2042_mcu_data *mcu = dev_get_drvdata(dev);
+ int ret;
+ const char *action;
+
+ ret = i2c_smbus_read_byte_data(mcu->client, REG_REPOWER_POLICY);
+ if (ret < 0)
+ return ret;
+
+ if (ret == REPOWER_POLICY_REBOOT)
+ action = "repower";
+ else if (ret == REPOWER_POLICY_KEEP_OFF)
+ action = "keep";
+ else
+ action = "unknown";
+
+ return sprintf(buf, "%s\n", action);
+}
+
+static ssize_t repower_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sg2042_mcu_data *mcu = dev_get_drvdata(dev);
+ u8 value;
+ int ret;
+
+ if (sysfs_streq("repower", buf))
+ value = REPOWER_POLICY_REBOOT;
+ else if (sysfs_streq("keep", buf))
+ value = REPOWER_POLICY_KEEP_OFF;
+ else
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(mcu->client,
+ REG_REPOWER_POLICY, value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(reset_count);
+static DEVICE_ATTR_RO(uptime);
+static DEVICE_ATTR_RO(reset_reason);
+static DEVICE_ATTR_RW(repower_policy);
+
+DEFINE_MCU_DEBUG_ATTR(firmware_version, REG_MCU_FIRMWARE_VERSION, "0x%02x");
+DEFINE_MCU_DEBUG_ATTR(pcb_version, REG_PCB_VERSION, "0x%02x");
+DEFINE_MCU_DEBUG_ATTR(board_type, REG_BOARD_TYPE, "0x%02x");
+DEFINE_MCU_DEBUG_ATTR(mcu_type, REG_MCU_TYPE, "%d");
+
+static struct attribute *sg2042_mcu_attrs[] = {
+ &dev_attr_reset_count.attr,
+ &dev_attr_uptime.attr,
+ &dev_attr_reset_reason.attr,
+ &dev_attr_repower_policy.attr,
+ NULL
+};
+
+static const struct attribute_group sg2042_mcu_attr_group = {
+ .attrs = sg2042_mcu_attrs,
+};
+
+static const struct attribute_group *sg2042_mcu_groups[] = {
+ &sg2042_mcu_attr_group,
+ NULL
+};
+
+static const struct hwmon_channel_info * const sg2042_mcu_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT),
+ NULL
+};
+
+static int sg2042_mcu_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct sg2042_mcu_data *mcu = dev_get_drvdata(dev);
+ int tmp;
+ u8 reg;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = channel ? REG_BOARD_TEMP : REG_SOC_TEMP;
+ break;
+ case hwmon_temp_crit:
+ reg = REG_CRITICAL_TEMP;
+ break;
+ case hwmon_temp_crit_hyst:
+ reg = REG_REPOWER_TEMP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ tmp = i2c_smbus_read_byte_data(mcu->client, reg);
+ if (tmp < 0)
+ return tmp;
+ *val = tmp * 1000;
+
+ return 0;
+}
+
+static int sg2042_mcu_write(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct sg2042_mcu_data *mcu = dev_get_drvdata(dev);
+ int temp = val / 1000;
+ int hyst_temp, crit_temp;
+ u8 reg;
+
+ temp = clamp_val(temp, 0, MCU_POWER_MAX);
+
+ guard(mutex)(&mcu->mutex);
+
+ switch (attr) {
+ case hwmon_temp_crit:
+ hyst_temp = i2c_smbus_read_byte_data(mcu->client,
+ REG_REPOWER_TEMP);
+ if (hyst_temp < 0)
+ return hyst_temp;
+
+ crit_temp = temp;
+ reg = REG_CRITICAL_TEMP;
+ break;
+ case hwmon_temp_crit_hyst:
+ crit_temp = i2c_smbus_read_byte_data(mcu->client,
+ REG_CRITICAL_TEMP);
+ if (crit_temp < 0)
+ return crit_temp;
+
+ hyst_temp = temp;
+ reg = REG_REPOWER_TEMP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * ensure hyst_temp is smaller to avoid MCU from
+ * keeping triggering repower event.
+ */
+ if (crit_temp < hyst_temp)
+ return -EINVAL;
+
+ return i2c_smbus_write_byte_data(mcu->client, reg, temp);
+}
+
+static umode_t sg2042_mcu_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_crit:
+ case hwmon_temp_crit_hyst:
+ if (channel == 0)
+ return 0644;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_ops sg2042_mcu_ops = {
+ .is_visible = sg2042_mcu_is_visible,
+ .read = sg2042_mcu_read,
+ .write = sg2042_mcu_write,
+};
+
+static const struct hwmon_chip_info sg2042_mcu_chip_info = {
+ .ops = &sg2042_mcu_ops,
+ .info = sg2042_mcu_info,
+};
+
+static void sg2042_mcu_debugfs_init(struct sg2042_mcu_data *mcu,
+ struct device *dev)
+{
+ mcu->debugfs = debugfs_create_dir(dev_name(dev), sgmcu_debugfs);
+
+ debugfs_create_file("firmware_version", 0444, mcu->debugfs,
+ mcu, &firmware_version_fops);
+ debugfs_create_file("pcb_version", 0444, mcu->debugfs, mcu,
+ &pcb_version_fops);
+ debugfs_create_file("mcu_type", 0444, mcu->debugfs, mcu,
+ &mcu_type_fops);
+ debugfs_create_file("board_type", 0444, mcu->debugfs, mcu,
+ &board_type_fops);
+}
+
+static int sg2042_mcu_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct sg2042_mcu_data *mcu;
+ struct device *hwmon_dev;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA))
+ return -ENODEV;
+
+ mcu = devm_kmalloc(dev, sizeof(*mcu), GFP_KERNEL);
+ if (!mcu)
+ return -ENOMEM;
+
+ mutex_init(&mcu->mutex);
+ mcu->client = client;
+
+ i2c_set_clientdata(client, mcu);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "sg2042_mcu",
+ mcu,
+ &sg2042_mcu_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ sg2042_mcu_debugfs_init(mcu, dev);
+
+ return 0;
+}
+
+static void sg2042_mcu_i2c_remove(struct i2c_client *client)
+{
+ struct sg2042_mcu_data *mcu = i2c_get_clientdata(client);
+
+ debugfs_remove_recursive(mcu->debugfs);
+}
+
+static const struct i2c_device_id sg2042_mcu_id[] = {
+ { "sg2042-hwmon-mcu", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, sg2042_mcu_id);
+
+static const struct of_device_id sg2042_mcu_of_id[] = {
+ { .compatible = "sophgo,sg2042-hwmon-mcu" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sg2042_mcu_of_id);
+
+static struct i2c_driver sg2042_mcu_driver = {
+ .driver = {
+ .name = "sg2042-mcu",
+ .of_match_table = sg2042_mcu_of_id,
+ .dev_groups = sg2042_mcu_groups,
+ },
+ .probe = sg2042_mcu_i2c_probe,
+ .remove = sg2042_mcu_i2c_remove,
+ .id_table = sg2042_mcu_id,
+};
+
+static int __init sg2042_mcu_init(void)
+{
+ sgmcu_debugfs = debugfs_create_dir("sg2042-mcu", NULL);
+ return i2c_add_driver(&sg2042_mcu_driver);
+}
+
+static void __exit sg2042_mcu_exit(void)
+{
+ debugfs_remove_recursive(sgmcu_debugfs);
+ i2c_del_driver(&sg2042_mcu_driver);
+}
+
+module_init(sg2042_mcu_init);
+module_exit(sg2042_mcu_exit);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@outlook.com>");
+MODULE_DESCRIPTION("MCU I2C driver for SG2042 soc platform");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index ad1b827ea782..97327313529b 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -199,10 +199,7 @@ static ssize_t eic_read(struct sht21 *sht21)
eic[6] = rx[0];
eic[7] = rx[1];
- ret = snprintf(sht21->eic, sizeof(sht21->eic),
- "%02x%02x%02x%02x%02x%02x%02x%02x\n",
- eic[0], eic[1], eic[2], eic[3],
- eic[4], eic[5], eic[6], eic[7]);
+ ret = snprintf(sht21->eic, sizeof(sht21->eic), "%8phN\n", eic);
out:
if (ret < 0)
sht21->eic[0] = 0;
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index e7632081a1d1..f9e8b2869164 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -77,7 +77,7 @@ static const struct i2c_device_id stts751_id[] = {
};
static const struct of_device_id __maybe_unused stts751_of_match[] = {
- { .compatible = "stts751" },
+ { .compatible = "st,stts751" },
{ },
};
MODULE_DEVICE_TABLE(of, stts751_of_match);
diff --git a/drivers/hwmon/surface_temp.c b/drivers/hwmon/surface_temp.c
new file mode 100644
index 000000000000..cd21f331f157
--- /dev/null
+++ b/drivers/hwmon/surface_temp.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Thermal sensor subsystem driver for Surface System Aggregator Module (SSAM).
+ *
+ * Copyright (C) 2022-2023 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+/* -- SAM interface. -------------------------------------------------------- */
+
+/*
+ * Available sensors are indicated by a 16-bit bitfield, where a 1 marks the
+ * presence of a sensor. So we have at most 16 possible sensors/channels.
+ */
+#define SSAM_TMP_SENSOR_MAX_COUNT 16
+
+/*
+ * All names observed so far are 6 characters long, but there's only
+ * zeros after the name, so perhaps they can be longer. This number reflects
+ * the maximum zero-padded space observed in the returned buffer.
+ */
+#define SSAM_TMP_SENSOR_NAME_LENGTH 18
+
+struct ssam_tmp_get_name_rsp {
+ __le16 unknown1;
+ char unknown2;
+ char name[SSAM_TMP_SENSOR_NAME_LENGTH];
+} __packed;
+
+static_assert(sizeof(struct ssam_tmp_get_name_rsp) == 21);
+
+SSAM_DEFINE_SYNC_REQUEST_CL_R(__ssam_tmp_get_available_sensors, __le16, {
+ .target_category = SSAM_SSH_TC_TMP,
+ .command_id = 0x04,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_MD_R(__ssam_tmp_get_temperature, __le16, {
+ .target_category = SSAM_SSH_TC_TMP,
+ .command_id = 0x01,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_MD_R(__ssam_tmp_get_name, struct ssam_tmp_get_name_rsp, {
+ .target_category = SSAM_SSH_TC_TMP,
+ .command_id = 0x0e,
+});
+
+static int ssam_tmp_get_available_sensors(struct ssam_device *sdev, s16 *sensors)
+{
+ __le16 sensors_le;
+ int status;
+
+ status = __ssam_tmp_get_available_sensors(sdev, &sensors_le);
+ if (status)
+ return status;
+
+ *sensors = le16_to_cpu(sensors_le);
+ return 0;
+}
+
+static int ssam_tmp_get_temperature(struct ssam_device *sdev, u8 iid, long *temperature)
+{
+ __le16 temp_le;
+ int status;
+
+ status = __ssam_tmp_get_temperature(sdev->ctrl, sdev->uid.target, iid, &temp_le);
+ if (status)
+ return status;
+
+ /* Convert 1/10 °K to 1/1000 °C */
+ *temperature = (le16_to_cpu(temp_le) - 2731) * 100L;
+ return 0;
+}
+
+static int ssam_tmp_get_name(struct ssam_device *sdev, u8 iid, char *buf, size_t buf_len)
+{
+ struct ssam_tmp_get_name_rsp name_rsp;
+ int status;
+
+ status = __ssam_tmp_get_name(sdev->ctrl, sdev->uid.target, iid, &name_rsp);
+ if (status)
+ return status;
+
+ /*
+ * This should not fail unless the name in the returned struct is not
+ * null-terminated or someone changed something in the struct
+ * definitions above, since our buffer and struct have the same
+ * capacity by design. So if this fails, log an error message. Since
+ * the more likely cause is that the returned string isn't
+ * null-terminated, we might have received garbage (as opposed to just
+ * an incomplete string), so also fail the function.
+ */
+ status = strscpy(buf, name_rsp.name, buf_len);
+ if (status < 0) {
+ dev_err(&sdev->dev, "received non-null-terminated sensor name string\n");
+ return status;
+ }
+
+ return 0;
+}
+
+/* -- Driver.---------------------------------------------------------------- */
+
+struct ssam_temp {
+ struct ssam_device *sdev;
+ s16 sensors;
+ char names[SSAM_TMP_SENSOR_MAX_COUNT][SSAM_TMP_SENSOR_NAME_LENGTH];
+};
+
+static umode_t ssam_temp_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct ssam_temp *ssam_temp = data;
+
+ if (!(ssam_temp->sensors & BIT(channel)))
+ return 0;
+
+ return 0444;
+}
+
+static int ssam_temp_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ const struct ssam_temp *ssam_temp = dev_get_drvdata(dev);
+
+ return ssam_tmp_get_temperature(ssam_temp->sdev, channel + 1, value);
+}
+
+static int ssam_temp_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ const struct ssam_temp *ssam_temp = dev_get_drvdata(dev);
+
+ *str = ssam_temp->names[channel];
+ return 0;
+}
+
+static const struct hwmon_channel_info * const ssam_temp_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops ssam_temp_hwmon_ops = {
+ .is_visible = ssam_temp_hwmon_is_visible,
+ .read = ssam_temp_hwmon_read,
+ .read_string = ssam_temp_hwmon_read_string,
+};
+
+static const struct hwmon_chip_info ssam_temp_hwmon_chip_info = {
+ .ops = &ssam_temp_hwmon_ops,
+ .info = ssam_temp_hwmon_info,
+};
+
+static int ssam_temp_probe(struct ssam_device *sdev)
+{
+ struct ssam_temp *ssam_temp;
+ struct device *hwmon_dev;
+ s16 sensors;
+ int channel;
+ int status;
+
+ status = ssam_tmp_get_available_sensors(sdev, &sensors);
+ if (status)
+ return status;
+
+ ssam_temp = devm_kzalloc(&sdev->dev, sizeof(*ssam_temp), GFP_KERNEL);
+ if (!ssam_temp)
+ return -ENOMEM;
+
+ ssam_temp->sdev = sdev;
+ ssam_temp->sensors = sensors;
+
+ /* Retrieve the name for each available sensor. */
+ for (channel = 0; channel < SSAM_TMP_SENSOR_MAX_COUNT; channel++) {
+ if (!(sensors & BIT(channel)))
+ continue;
+
+ status = ssam_tmp_get_name(sdev, channel + 1, ssam_temp->names[channel],
+ SSAM_TMP_SENSOR_NAME_LENGTH);
+ if (status)
+ return status;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&sdev->dev, "surface_thermal", ssam_temp,
+ &ssam_temp_hwmon_chip_info, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct ssam_device_id ssam_temp_match[] = {
+ { SSAM_SDEV(TMP, SAM, 0x00, 0x02) },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, ssam_temp_match);
+
+static struct ssam_device_driver ssam_temp = {
+ .probe = ssam_temp_probe,
+ .match_table = ssam_temp_match,
+ .driver = {
+ .name = "surface_temp",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_ssam_device_driver(ssam_temp);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Thermal sensor subsystem driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index 853dbe708ff5..02c5a3bb1071 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -308,7 +308,9 @@ static int tmp401_temp_read(struct device *dev, u32 attr, int channel, long *val
{
struct tmp401_data *data = dev_get_drvdata(dev);
struct regmap *regmap = data->regmap;
+ unsigned int regs[2] = { TMP401_TEMP_MSB[3][channel], TMP401_TEMP_CRIT_HYST };
unsigned int regval;
+ u16 regvals[2];
int reg, ret;
switch (attr) {
@@ -325,20 +327,11 @@ static int tmp401_temp_read(struct device *dev, u32 attr, int channel, long *val
*val = tmp401_register_to_temp(regval, data->extended_range);
break;
case hwmon_temp_crit_hyst:
- mutex_lock(&data->update_lock);
- reg = TMP401_TEMP_MSB[3][channel];
- ret = regmap_read(regmap, reg, &regval);
- if (ret < 0)
- goto unlock;
- *val = tmp401_register_to_temp(regval, data->extended_range);
- ret = regmap_read(regmap, TMP401_TEMP_CRIT_HYST, &regval);
- if (ret < 0)
- goto unlock;
- *val -= regval * 1000;
-unlock:
- mutex_unlock(&data->update_lock);
+ ret = regmap_multi_reg_read(regmap, regs, regvals, 2);
if (ret < 0)
return ret;
+ *val = tmp401_register_to_temp(regvals[0], data->extended_range) -
+ (regvals[1] * 1000);
break;
case hwmon_temp_fault:
case hwmon_temp_min_alarm:
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 7a6f9532e594..9537727aad9a 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -410,18 +410,15 @@ static int tmp421_probe_from_dt(struct i2c_client *client, struct tmp421_data *d
{
struct device *dev = &client->dev;
const struct device_node *np = dev->of_node;
- struct device_node *child;
int err;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (strcmp(child->name, "channel"))
continue;
err = tmp421_probe_child_from_dt(client, child, data);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
diff --git a/drivers/hwmon/tmp464.c b/drivers/hwmon/tmp464.c
index 3ee1137533d6..0f629c6d7695 100644
--- a/drivers/hwmon/tmp464.c
+++ b/drivers/hwmon/tmp464.c
@@ -147,11 +147,11 @@ static int tmp464_temp_read(struct device *dev, u32 attr, int channel, long *val
{
struct tmp464_data *data = dev_get_drvdata(dev);
struct regmap *regmap = data->regmap;
- unsigned int regval, regval2;
+ unsigned int regs[2];
+ unsigned int regval;
+ u16 regvals[2];
int err = 0;
- mutex_lock(&data->update_lock);
-
switch (attr) {
case hwmon_temp_max_alarm:
err = regmap_read(regmap, TMP464_THERM_STATUS_REG, &regval);
@@ -172,26 +172,27 @@ static int tmp464_temp_read(struct device *dev, u32 attr, int channel, long *val
* complete. That means we have to cache the value internally
* for one measurement cycle and report the cached value.
*/
+ mutex_lock(&data->update_lock);
if (!data->valid || time_after(jiffies, data->last_updated +
msecs_to_jiffies(data->update_interval))) {
err = regmap_read(regmap, TMP464_REMOTE_OPEN_REG, &regval);
if (err < 0)
- break;
+ goto unlock;
data->open_reg = regval;
data->last_updated = jiffies;
data->valid = true;
}
*val = !!(data->open_reg & BIT(channel + 7));
+unlock:
+ mutex_unlock(&data->update_lock);
break;
case hwmon_temp_max_hyst:
- err = regmap_read(regmap, TMP464_THERM_LIMIT[channel], &regval);
+ regs[0] = TMP464_THERM_LIMIT[channel];
+ regs[1] = TMP464_TEMP_HYST_REG;
+ err = regmap_multi_reg_read(regmap, regs, regvals, 2);
if (err < 0)
break;
- err = regmap_read(regmap, TMP464_TEMP_HYST_REG, &regval2);
- if (err < 0)
- break;
- regval -= regval2;
- *val = temp_from_reg(regval);
+ *val = temp_from_reg(regvals[0] - regvals[1]);
break;
case hwmon_temp_max:
err = regmap_read(regmap, TMP464_THERM_LIMIT[channel], &regval);
@@ -200,14 +201,12 @@ static int tmp464_temp_read(struct device *dev, u32 attr, int channel, long *val
*val = temp_from_reg(regval);
break;
case hwmon_temp_crit_hyst:
- err = regmap_read(regmap, TMP464_THERM2_LIMIT[channel], &regval);
+ regs[0] = TMP464_THERM2_LIMIT[channel];
+ regs[1] = TMP464_TEMP_HYST_REG;
+ err = regmap_multi_reg_read(regmap, regs, regvals, 2);
if (err < 0)
break;
- err = regmap_read(regmap, TMP464_TEMP_HYST_REG, &regval2);
- if (err < 0)
- break;
- regval -= regval2;
- *val = temp_from_reg(regval);
+ *val = temp_from_reg(regvals[0] - regvals[1]);
break;
case hwmon_temp_crit:
err = regmap_read(regmap, TMP464_THERM2_LIMIT[channel], &regval);
@@ -239,8 +238,6 @@ static int tmp464_temp_read(struct device *dev, u32 attr, int channel, long *val
break;
}
- mutex_unlock(&data->update_lock);
-
return err;
}
@@ -565,18 +562,15 @@ static int tmp464_probe_child_from_dt(struct device *dev,
static int tmp464_probe_from_dt(struct device *dev, struct tmp464_data *data)
{
const struct device_node *np = dev->of_node;
- struct device_node *child;
int err;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (strcmp(child->name, "channel"))
continue;
err = tmp464_probe_child_from_dt(dev, child, data);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
diff --git a/drivers/hwmon/vexpress-hwmon.c b/drivers/hwmon/vexpress-hwmon.c
index d82a3b454d0e..a2e350f52a9e 100644
--- a/drivers/hwmon/vexpress-hwmon.c
+++ b/drivers/hwmon/vexpress-hwmon.c
@@ -72,7 +72,7 @@ static umode_t vexpress_hwmon_attr_is_visible(struct kobject *kobj,
struct device_attribute, attr);
if (dev_attr->show == vexpress_hwmon_label_show &&
- !of_get_property(dev->of_node, "label", NULL))
+ !of_property_present(dev->of_node, "label"))
return 0;
return attr->mode;
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 44710267d669..c232054fddd6 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -40,14 +40,6 @@ config I2C_BOARDINFO
bool
default y
-config I2C_COMPAT
- bool "Enable compatibility bits for old user-space"
- default y
- help
- Say Y here if you intend to run lm-sensors 3.1.1 or older, or any
- other user-space package which expects i2c adapters to be class
- devices. If you don't know, say Y.
-
config I2C_CHARDEV
tristate "I2C device interface"
help
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a22f9125322a..53f18b351f53 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -559,28 +559,33 @@ config I2C_DAVINCI
For details please see http://www.ti.com/davinci
config I2C_DESIGNWARE_CORE
- tristate
+ tristate "Synopsys DesignWare I2C adapter"
select REGMAP
+ help
+ This option enables support for the Synopsys DesignWare I2C adapter.
+ This driver includes support for the I2C host on the Synopsys
+ Designware I2C adapter.
+
+ To compile the driver as a module, choose M here: the module will be
+ called i2c-designware-core.
+
+if I2C_DESIGNWARE_CORE
config I2C_DESIGNWARE_SLAVE
bool "Synopsys DesignWare Slave"
- depends on I2C_DESIGNWARE_CORE
select I2C_SLAVE
help
If you say yes to this option, support will be included for the
Synopsys DesignWare I2C slave adapter.
- This is not a standalone module, this module compiles together with
- i2c-designware-core.
-
config I2C_DESIGNWARE_PLATFORM
- tristate "Synopsys DesignWare Platform"
+ tristate "Synopsys DesignWare Platform driver"
depends on (ACPI && COMMON_CLK) || !ACPI
- select I2C_DESIGNWARE_CORE
select MFD_SYSCON if MIPS_BAIKAL_T1
+ default I2C_DESIGNWARE_CORE
help
If you say yes to this option, support will be included for the
- Synopsys DesignWare I2C adapter.
+ Synopsys DesignWare I2C adapters on the platform bus.
This driver can also be built as a module. If so, the module
will be called i2c-designware-platform.
@@ -613,17 +618,19 @@ config I2C_DESIGNWARE_BAYTRAIL
a BayTrail system using the AXP288.
config I2C_DESIGNWARE_PCI
- tristate "Synopsys DesignWare PCI"
+ tristate "Synopsys DesignWare PCI driver"
depends on PCI
- select I2C_DESIGNWARE_CORE
select I2C_CCGX_UCSI
help
If you say yes to this option, support will be included for the
- Synopsys DesignWare I2C adapter. Only master mode is supported.
+ Synopsys DesignWare I2C adapters on the PCI bus. Only master mode is
+ supported.
This driver can also be built as a module. If so, the module
will be called i2c-designware-pci.
+endif
+
config I2C_DIGICOLOR
tristate "Conexant Digicolor I2C driver"
depends on ARCH_DIGICOLOR || COMPILE_TEST
@@ -772,6 +779,17 @@ config I2C_JZ4780
If you don't know what to do here, say N.
+config I2C_KEBA
+ tristate "KEBA I2C controller support"
+ depends on HAS_IOMEM
+ select AUXILIARY_BUS
+ help
+ This driver supports the I2C controller found in KEBA system FPGA
+ devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-keba.
+
config I2C_KEMPLD
tristate "Kontron COM I2C Controller"
depends on MFD_KEMPLD
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 78d0561339e5..ecc07c50f2a0 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_I2C_IMX) += i2c-imx.o
obj-$(CONFIG_I2C_IMX_LPI2C) += i2c-imx-lpi2c.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_JZ4780) += i2c-jz4780.o
+obj-$(CONFIG_I2C_KEBA) += i2c-keba.o
obj-$(CONFIG_I2C_KEMPLD) += i2c-kempld.o
obj-$(CONFIG_I2C_LPC2K) += i2c-lpc2k.o
obj-$(CONFIG_I2C_LS2X) += i2c-ls2x.o
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 9d7b4efe26ad..544c94e86b89 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -479,9 +479,8 @@ static struct i2c_adapter ali1535_adapter = {
static const struct pci_device_id ali1535_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
- { },
+ { }
};
-
MODULE_DEVICE_TABLE(pci, ali1535_ids);
static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
index d3ac1c77a509..6f0ef587e76d 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-plat.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -340,7 +340,7 @@ static void i2c_amd_remove(struct platform_device *pdev)
static const struct acpi_device_id i2c_amd_acpi_match[] = {
{ "AMDI0011" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, i2c_amd_acpi_match);
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index ce8c4846b7fa..cc5a26637fd5 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -170,6 +170,13 @@ struct aspeed_i2c_bus {
static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
+/* precondition: bus.lock has been acquired. */
+static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
+{
+ bus->master_state = ASPEED_I2C_MASTER_STOP;
+ writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
+}
+
static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
{
unsigned long time_left, flags;
@@ -187,7 +194,7 @@ static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
command);
reinit_completion(&bus->cmd_complete);
- writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
+ aspeed_i2c_do_stop(bus);
spin_unlock_irqrestore(&bus->lock, flags);
time_left = wait_for_completion_timeout(
@@ -391,13 +398,6 @@ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
}
/* precondition: bus.lock has been acquired. */
-static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
-{
- bus->master_state = ASPEED_I2C_MASTER_STOP;
- writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
-}
-
-/* precondition: bus.lock has been acquired. */
static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus)
{
if (bus->msgs_index + 1 < bus->msgs_count) {
@@ -991,7 +991,7 @@ static const struct of_device_id aspeed_i2c_bus_of_table[] = {
.compatible = "aspeed,ast2600-i2c-bus",
.data = aspeed_i2c_25xx_get_clk_reg_val,
},
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table);
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index e8a688d04aee..080204182bb5 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -20,12 +20,17 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/swab.h>
#include <linux/types.h>
#include <linux/units.h>
+#define DEFAULT_SYMBOL_NAMESPACE I2C_DW_COMMON
+
#include "i2c-designware-core.h"
static char *abort_sources[] = {
@@ -188,7 +193,7 @@ static const u32 supported_speeds[] = {
I2C_MAX_STANDARD_MODE_FREQ,
};
-int i2c_dw_validate_speed(struct dw_i2c_dev *dev)
+static int i2c_dw_validate_speed(struct dw_i2c_dev *dev)
{
struct i2c_timings *t = &dev->timings;
unsigned int i;
@@ -208,7 +213,44 @@ int i2c_dw_validate_speed(struct dw_i2c_dev *dev)
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(i2c_dw_validate_speed);
+
+#ifdef CONFIG_OF
+
+#include <linux/platform_device.h>
+
+#define MSCC_ICPU_CFG_TWI_DELAY 0x0
+#define MSCC_ICPU_CFG_TWI_DELAY_ENABLE BIT(0)
+#define MSCC_ICPU_CFG_TWI_SPIKE_FILTER 0x4
+
+static int mscc_twi_set_sda_hold_time(struct dw_i2c_dev *dev)
+{
+ writel((dev->sda_hold_time << 1) | MSCC_ICPU_CFG_TWI_DELAY_ENABLE,
+ dev->ext + MSCC_ICPU_CFG_TWI_DELAY);
+
+ return 0;
+}
+
+static void i2c_dw_of_configure(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct dw_i2c_dev *dev = dev_get_drvdata(device);
+
+ switch (dev->flags & MODEL_MASK) {
+ case MODEL_MSCC_OCELOT:
+ dev->ext = devm_platform_ioremap_resource(pdev, 1);
+ if (!IS_ERR(dev->ext))
+ dev->set_sda_hold_time = mscc_twi_set_sda_hold_time;
+ break;
+ default:
+ break;
+ }
+}
+
+#else /* CONFIG_OF */
+
+static inline void i2c_dw_of_configure(struct device *device) { }
+
+#endif /* CONFIG_OF */
#ifdef CONFIG_ACPI
@@ -255,7 +297,7 @@ static void i2c_dw_acpi_params(struct device *device, char method[],
kfree(buf.pointer);
}
-int i2c_dw_acpi_configure(struct device *device)
+static void i2c_dw_acpi_configure(struct device *device)
{
struct dw_i2c_dev *dev = dev_get_drvdata(device);
struct i2c_timings *t = &dev->timings;
@@ -285,10 +327,7 @@ int i2c_dw_acpi_configure(struct device *device)
dev->sda_hold_time = fs_ht;
break;
}
-
- return 0;
}
-EXPORT_SYMBOL_GPL(i2c_dw_acpi_configure);
static u32 i2c_dw_acpi_round_bus_speed(struct device *device)
{
@@ -310,11 +349,13 @@ static u32 i2c_dw_acpi_round_bus_speed(struct device *device)
#else /* CONFIG_ACPI */
+static inline void i2c_dw_acpi_configure(struct device *device) { }
+
static inline u32 i2c_dw_acpi_round_bus_speed(struct device *device) { return 0; }
#endif /* CONFIG_ACPI */
-void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev)
+static void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev)
{
u32 acpi_speed = i2c_dw_acpi_round_bus_speed(dev->dev);
struct i2c_timings *t = &dev->timings;
@@ -330,10 +371,47 @@ void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev)
else
t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
}
-EXPORT_SYMBOL_GPL(i2c_dw_adjust_bus_speed);
-u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
+int i2c_dw_fw_parse_and_configure(struct dw_i2c_dev *dev)
+{
+ struct i2c_timings *t = &dev->timings;
+ struct device *device = dev->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(device);
+
+ i2c_parse_fw_timings(device, t, false);
+
+ i2c_dw_adjust_bus_speed(dev);
+
+ if (is_of_node(fwnode))
+ i2c_dw_of_configure(device);
+ else if (is_acpi_node(fwnode))
+ i2c_dw_acpi_configure(device);
+
+ return i2c_dw_validate_speed(dev);
+}
+EXPORT_SYMBOL_GPL(i2c_dw_fw_parse_and_configure);
+
+static u32 i2c_dw_read_scl_reg(struct dw_i2c_dev *dev, u32 reg)
+{
+ u32 val;
+ int ret;
+
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ return 0;
+
+ ret = regmap_read(dev->map, reg, &val);
+ i2c_dw_release_lock(dev);
+
+ return ret ? 0 : val;
+}
+
+u32 i2c_dw_scl_hcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk,
+ u32 tSYMBOL, u32 tf, int cond, int offset)
{
+ if (!ic_clk)
+ return i2c_dw_read_scl_reg(dev, reg);
+
/*
* DesignWare I2C core doesn't seem to have solid strategy to meet
* the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec
@@ -372,8 +450,12 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
3 + offset;
}
-u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
+u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk,
+ u32 tLOW, u32 tf, int offset)
{
+ if (!ic_clk)
+ return i2c_dw_read_scl_reg(dev, reg);
+
/*
* Conditional expression:
*
@@ -653,6 +735,84 @@ void i2c_dw_disable(struct dw_i2c_dev *dev)
i2c_dw_release_lock(dev);
}
+EXPORT_SYMBOL_GPL(i2c_dw_disable);
+
+int i2c_dw_probe(struct dw_i2c_dev *dev)
+{
+ device_set_node(&dev->adapter.dev, dev_fwnode(dev->dev));
+
+ switch (dev->mode) {
+ case DW_IC_SLAVE:
+ return i2c_dw_probe_slave(dev);
+ case DW_IC_MASTER:
+ return i2c_dw_probe_master(dev);
+ default:
+ dev_err(dev->dev, "Wrong operation mode: %d\n", dev->mode);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(i2c_dw_probe);
+
+static int i2c_dw_prepare(struct device *device)
+{
+ /*
+ * If the ACPI companion device object is present for this device,
+ * it may be accessed during suspend and resume of other devices via
+ * I2C operation regions, so tell the PM core and middle layers to
+ * avoid skipping system suspend/resume callbacks for it in that case.
+ */
+ return !has_acpi_companion(device);
+}
+
+static int i2c_dw_runtime_suspend(struct device *device)
+{
+ struct dw_i2c_dev *dev = dev_get_drvdata(device);
+
+ if (dev->shared_with_punit)
+ return 0;
+
+ i2c_dw_disable(dev);
+ i2c_dw_prepare_clk(dev, false);
+
+ return 0;
+}
+
+static int i2c_dw_suspend(struct device *device)
+{
+ struct dw_i2c_dev *dev = dev_get_drvdata(device);
+
+ i2c_mark_adapter_suspended(&dev->adapter);
+
+ return i2c_dw_runtime_suspend(device);
+}
+
+static int i2c_dw_runtime_resume(struct device *device)
+{
+ struct dw_i2c_dev *dev = dev_get_drvdata(device);
+
+ if (!dev->shared_with_punit)
+ i2c_dw_prepare_clk(dev, true);
+
+ dev->init(dev);
+
+ return 0;
+}
+
+static int i2c_dw_resume(struct device *device)
+{
+ struct dw_i2c_dev *dev = dev_get_drvdata(device);
+
+ i2c_dw_runtime_resume(device);
+ i2c_mark_adapter_resumed(&dev->adapter);
+
+ return 0;
+}
+
+EXPORT_GPL_DEV_PM_OPS(i2c_dw_dev_pm_ops) = {
+ .prepare = pm_sleep_ptr(i2c_dw_prepare),
+ LATE_SYSTEM_SLEEP_PM_OPS(i2c_dw_suspend, i2c_dw_resume)
+ RUNTIME_PM_OPS(i2c_dw_runtime_suspend, i2c_dw_runtime_resume, NULL)
+};
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index e9606c00b8d1..1ac2afd03a0a 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -10,11 +10,10 @@
*/
#include <linux/bits.h>
-#include <linux/compiler_types.h>
#include <linux/completion.h>
-#include <linux/dev_printk.h>
#include <linux/errno.h>
#include <linux/i2c.h>
+#include <linux/pm.h>
#include <linux/regmap.h>
#include <linux/types.h>
@@ -237,7 +236,6 @@ struct reset_control;
* @semaphore_idx: Index of table with semaphore type attached to the bus. It's
* -1 if there is no semaphore.
* @shared_with_punit: true if this bus is shared with the SoCs PUNIT
- * @disable: function to disable the controller
* @init: function to initialize the I2C hardware
* @set_sda_hold_time: callback to retrieve IP specific SDA hold timing
* @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE
@@ -295,7 +293,6 @@ struct dw_i2c_dev {
void (*release_lock)(void);
int semaphore_idx;
bool shared_with_punit;
- void (*disable)(struct dw_i2c_dev *dev);
int (*init)(struct dw_i2c_dev *dev);
int (*set_sda_hold_time)(struct dw_i2c_dev *dev);
int mode;
@@ -329,8 +326,10 @@ struct i2c_dw_semaphore_callbacks {
};
int i2c_dw_init_regmap(struct dw_i2c_dev *dev);
-u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset);
-u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset);
+u32 i2c_dw_scl_hcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk,
+ u32 tSYMBOL, u32 tf, int cond, int offset);
+u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk,
+ u32 tLOW, u32 tf, int offset);
int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev);
u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev);
int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare);
@@ -340,7 +339,8 @@ int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev);
int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev);
int i2c_dw_set_fifo_size(struct dw_i2c_dev *dev);
u32 i2c_dw_func(struct i2c_adapter *adap);
-void i2c_dw_disable(struct dw_i2c_dev *dev);
+
+extern const struct dev_pm_ops i2c_dw_dev_pm_ops;
static inline void __i2c_dw_enable(struct dw_i2c_dev *dev)
{
@@ -373,6 +373,7 @@ static inline void __i2c_dw_read_intr_mask(struct dw_i2c_dev *dev,
}
void __i2c_dw_disable(struct dw_i2c_dev *dev);
+void i2c_dw_disable(struct dw_i2c_dev *dev);
extern void i2c_dw_configure_master(struct dw_i2c_dev *dev);
extern int i2c_dw_probe_master(struct dw_i2c_dev *dev);
@@ -385,19 +386,6 @@ static inline void i2c_dw_configure_slave(struct dw_i2c_dev *dev) { }
static inline int i2c_dw_probe_slave(struct dw_i2c_dev *dev) { return -EINVAL; }
#endif
-static inline int i2c_dw_probe(struct dw_i2c_dev *dev)
-{
- switch (dev->mode) {
- case DW_IC_SLAVE:
- return i2c_dw_probe_slave(dev);
- case DW_IC_MASTER:
- return i2c_dw_probe_master(dev);
- default:
- dev_err(dev->dev, "Wrong operation mode: %d\n", dev->mode);
- return -EINVAL;
- }
-}
-
static inline void i2c_dw_configure(struct dw_i2c_dev *dev)
{
if (i2c_detect_slave_mode(dev->dev))
@@ -406,6 +394,8 @@ static inline void i2c_dw_configure(struct dw_i2c_dev *dev)
i2c_dw_configure_master(dev);
}
+int i2c_dw_probe(struct dw_i2c_dev *dev);
+
#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL)
int i2c_dw_baytrail_probe_lock_support(struct dw_i2c_dev *dev);
#endif
@@ -414,11 +404,4 @@ int i2c_dw_baytrail_probe_lock_support(struct dw_i2c_dev *dev);
int i2c_dw_amdpsp_probe_lock_support(struct dw_i2c_dev *dev);
#endif
-int i2c_dw_validate_speed(struct dw_i2c_dev *dev);
-void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev);
-
-#if IS_ENABLED(CONFIG_ACPI)
-int i2c_dw_acpi_configure(struct device *device);
-#else
-static inline int i2c_dw_acpi_configure(struct device *device) { return -ENODEV; }
-#endif
+int i2c_dw_fw_parse_and_configure(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index c7e56002809a..e46f1b22c360 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -22,6 +22,8 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#define DEFAULT_SYMBOL_NAMESPACE I2C_DW
+
#include "i2c-designware-core.h"
#define AMD_TIMEOUT_MIN_US 25
@@ -64,13 +66,17 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
if (!dev->ss_hcnt || !dev->ss_lcnt) {
ic_clk = i2c_dw_clk_rate(dev);
dev->ss_hcnt =
- i2c_dw_scl_hcnt(ic_clk,
+ i2c_dw_scl_hcnt(dev,
+ DW_IC_SS_SCL_HCNT,
+ ic_clk,
4000, /* tHD;STA = tHIGH = 4.0 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
dev->ss_lcnt =
- i2c_dw_scl_lcnt(ic_clk,
+ i2c_dw_scl_lcnt(dev,
+ DW_IC_SS_SCL_LCNT,
+ ic_clk,
4700, /* tLOW = 4.7 us */
scl_falling_time,
0); /* No offset */
@@ -94,13 +100,17 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
} else {
ic_clk = i2c_dw_clk_rate(dev);
dev->fs_hcnt =
- i2c_dw_scl_hcnt(ic_clk,
+ i2c_dw_scl_hcnt(dev,
+ DW_IC_FS_SCL_HCNT,
+ ic_clk,
260, /* tHIGH = 260 ns */
sda_falling_time,
0, /* DW default */
0); /* No offset */
dev->fs_lcnt =
- i2c_dw_scl_lcnt(ic_clk,
+ i2c_dw_scl_lcnt(dev,
+ DW_IC_FS_SCL_LCNT,
+ ic_clk,
500, /* tLOW = 500 ns */
scl_falling_time,
0); /* No offset */
@@ -114,13 +124,17 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
if (!dev->fs_hcnt || !dev->fs_lcnt) {
ic_clk = i2c_dw_clk_rate(dev);
dev->fs_hcnt =
- i2c_dw_scl_hcnt(ic_clk,
+ i2c_dw_scl_hcnt(dev,
+ DW_IC_FS_SCL_HCNT,
+ ic_clk,
600, /* tHD;STA = tHIGH = 0.6 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
dev->fs_lcnt =
- i2c_dw_scl_lcnt(ic_clk,
+ i2c_dw_scl_lcnt(dev,
+ DW_IC_FS_SCL_LCNT,
+ ic_clk,
1300, /* tLOW = 1.3 us */
scl_falling_time,
0); /* No offset */
@@ -142,13 +156,17 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
} else if (!dev->hs_hcnt || !dev->hs_lcnt) {
ic_clk = i2c_dw_clk_rate(dev);
dev->hs_hcnt =
- i2c_dw_scl_hcnt(ic_clk,
+ i2c_dw_scl_hcnt(dev,
+ DW_IC_HS_SCL_HCNT,
+ ic_clk,
160, /* tHIGH = 160 ns */
sda_falling_time,
0, /* DW default */
0); /* No offset */
dev->hs_lcnt =
- i2c_dw_scl_lcnt(ic_clk,
+ i2c_dw_scl_lcnt(dev,
+ DW_IC_HS_SCL_LCNT,
+ ic_clk,
320, /* tLOW = 320 ns */
scl_falling_time,
0); /* No offset */
@@ -931,7 +949,6 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev)
init_completion(&dev->cmd_complete);
dev->init = i2c_dw_init_master;
- dev->disable = i2c_dw_disable;
ret = i2c_dw_init_regmap(dev);
if (ret)
@@ -1021,3 +1038,4 @@ EXPORT_SYMBOL_GPL(i2c_dw_probe_master);
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(I2C_DW_COMMON);
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index a1b379a1e904..7b2c5d71a7fc 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -9,7 +9,6 @@
* Copyright (C) 2009 Provigent Ltd.
* Copyright (C) 2011, 2015, 2016 Intel Corporation.
*/
-#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -19,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/power_supply.h>
#include <linux/sched.h>
@@ -102,7 +102,7 @@ static u32 mfld_get_clk_rate_khz(struct dw_i2c_dev *dev)
static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
- struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
+ struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
switch (pdev->device) {
case 0x0817:
@@ -152,7 +152,7 @@ static u32 navi_amd_get_clk_rate_khz(struct dw_i2c_dev *dev)
static int navi_amd_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
- struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
+ struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
dev->flags |= MODEL_AMD_NAVI_GPU | ACCESS_POLLING;
dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
@@ -194,47 +194,6 @@ static struct dw_pci_controller dw_pci_controllers[] = {
},
};
-static int __maybe_unused i2c_dw_pci_runtime_suspend(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- i_dev->disable(i_dev);
- return 0;
-}
-
-static int __maybe_unused i2c_dw_pci_suspend(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- i2c_mark_adapter_suspended(&i_dev->adapter);
-
- return i2c_dw_pci_runtime_suspend(dev);
-}
-
-static int __maybe_unused i2c_dw_pci_runtime_resume(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- return i_dev->init(i_dev);
-}
-
-static int __maybe_unused i2c_dw_pci_resume(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- int ret;
-
- ret = i2c_dw_pci_runtime_resume(dev);
-
- i2c_mark_adapter_resumed(&i_dev->adapter);
-
- return ret;
-}
-
-static const struct dev_pm_ops i2c_dw_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(i2c_dw_pci_suspend, i2c_dw_pci_resume)
- SET_RUNTIME_PM_OPS(i2c_dw_pci_runtime_suspend, i2c_dw_pci_runtime_resume, NULL)
-};
-
static const struct property_entry dgpu_properties[] = {
/* USB-C doesn't power the system */
PROPERTY_ENTRY_U8("scope", POWER_SUPPLY_SCOPE_DEVICE),
@@ -253,7 +212,6 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
int r;
struct dw_pci_controller *controller;
struct dw_scl_sda_cfg *cfg;
- struct i2c_timings *t;
if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers))
return dev_err_probe(&pdev->dev, -EINVAL,
@@ -288,29 +246,17 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
dev->irq = pci_irq_vector(pdev, 0);
dev->flags |= controller->flags;
- t = &dev->timings;
- i2c_parse_fw_timings(&pdev->dev, t, false);
-
pci_set_drvdata(pdev, dev);
if (controller->setup) {
r = controller->setup(pdev, controller);
- if (r) {
- pci_free_irq_vectors(pdev);
+ if (r)
return r;
- }
}
- i2c_dw_adjust_bus_speed(dev);
-
- if (has_acpi_companion(&pdev->dev))
- i2c_dw_acpi_configure(&pdev->dev);
-
- r = i2c_dw_validate_speed(dev);
- if (r) {
- pci_free_irq_vectors(pdev);
+ r = i2c_dw_fw_parse_and_configure(dev);
+ if (r)
return r;
- }
i2c_dw_configure(dev);
@@ -326,14 +272,11 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
adap = &dev->adapter;
adap->owner = THIS_MODULE;
adap->class = 0;
- ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->nr = controller->bus_num;
r = i2c_dw_probe(dev);
- if (r) {
- pci_free_irq_vectors(pdev);
+ if (r)
return r;
- }
if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node);
@@ -354,16 +297,15 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
{
struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
- dev->disable(dev);
+ i2c_dw_disable(dev);
+
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
i2c_del_adapter(&dev->adapter);
- devm_free_irq(&pdev->dev, dev->irq, dev);
- pci_free_irq_vectors(pdev);
}
-static const struct pci_device_id i2_designware_pci_ids[] = {
+static const struct pci_device_id i2c_designware_pci_ids[] = {
/* Medfield */
{ PCI_VDEVICE(INTEL, 0x0817), medfield },
{ PCI_VDEVICE(INTEL, 0x0818), medfield },
@@ -409,21 +351,23 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
{ PCI_VDEVICE(ATI, 0x73c4), navi_amd },
{ PCI_VDEVICE(ATI, 0x7444), navi_amd },
{ PCI_VDEVICE(ATI, 0x7464), navi_amd },
- { 0,}
+ {}
};
-MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
+MODULE_DEVICE_TABLE(pci, i2c_designware_pci_ids);
static struct pci_driver dw_i2c_driver = {
.name = DRIVER_NAME,
- .id_table = i2_designware_pci_ids,
.probe = i2c_dw_pci_probe,
.remove = i2c_dw_pci_remove,
.driver = {
- .pm = &i2c_dw_pm_ops,
+ .pm = pm_ptr(&i2c_dw_dev_pm_ops),
},
+ .id_table = i2c_designware_pci_ids,
};
module_pci_driver(dw_i2c_driver);
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(I2C_DW);
+MODULE_IMPORT_NS(I2C_DW_COMMON);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index df3dc1e8093e..2d0c7348e491 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -8,7 +8,6 @@
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
*/
-#include <linux/acpi.h>
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -21,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -30,7 +28,6 @@
#include <linux/reset.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/suspend.h>
#include <linux/units.h>
#include "i2c-designware-core.h"
@@ -40,29 +37,6 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
return clk_get_rate(dev->clk) / KILO;
}
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id dw_i2c_acpi_match[] = {
- { "INT33C2", 0 },
- { "INT33C3", 0 },
- { "INT3432", 0 },
- { "INT3433", 0 },
- { "INTC10EF", 0 },
- { "80860F41", ACCESS_NO_IRQ_SUSPEND },
- { "808622C1", ACCESS_NO_IRQ_SUSPEND },
- { "AMD0010", ACCESS_INTR_MASK },
- { "AMDI0010", ACCESS_INTR_MASK },
- { "AMDI0019", ACCESS_INTR_MASK | ARBITRATION_SEMAPHORE },
- { "AMDI0510", 0 },
- { "APMC0D0F", 0 },
- { "HISI02A1", 0 },
- { "HISI02A2", 0 },
- { "HISI02A3", 0 },
- { "HYGO0010", ACCESS_INTR_MASK },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
-#endif
-
#ifdef CONFIG_OF
#define BT1_I2C_CTL 0x100
#define BT1_I2C_CTL_ADDR_MASK GENMASK(7, 0)
@@ -120,53 +94,11 @@ static int bt1_i2c_request_regs(struct dw_i2c_dev *dev)
dev->map = devm_regmap_init(dev->dev, NULL, dev, &bt1_i2c_cfg);
return PTR_ERR_OR_ZERO(dev->map);
}
-
-#define MSCC_ICPU_CFG_TWI_DELAY 0x0
-#define MSCC_ICPU_CFG_TWI_DELAY_ENABLE BIT(0)
-#define MSCC_ICPU_CFG_TWI_SPIKE_FILTER 0x4
-
-static int mscc_twi_set_sda_hold_time(struct dw_i2c_dev *dev)
-{
- writel((dev->sda_hold_time << 1) | MSCC_ICPU_CFG_TWI_DELAY_ENABLE,
- dev->ext + MSCC_ICPU_CFG_TWI_DELAY);
-
- return 0;
-}
-
-static int dw_i2c_of_configure(struct platform_device *pdev)
-{
- struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
-
- switch (dev->flags & MODEL_MASK) {
- case MODEL_MSCC_OCELOT:
- dev->ext = devm_platform_ioremap_resource(pdev, 1);
- if (!IS_ERR(dev->ext))
- dev->set_sda_hold_time = mscc_twi_set_sda_hold_time;
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static const struct of_device_id dw_i2c_of_match[] = {
- { .compatible = "snps,designware-i2c", },
- { .compatible = "mscc,ocelot-i2c", .data = (void *)MODEL_MSCC_OCELOT },
- { .compatible = "baikal,bt1-sys-i2c", .data = (void *)MODEL_BAIKAL_BT1 },
- {},
-};
-MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
#else
static int bt1_i2c_request_regs(struct dw_i2c_dev *dev)
{
return -ENODEV;
}
-
-static inline int dw_i2c_of_configure(struct platform_device *pdev)
-{
- return -ENODEV;
-}
#endif
static int txgbe_i2c_request_regs(struct dw_i2c_dev *dev)
@@ -238,11 +170,9 @@ static int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev)
int i = 0;
int ret;
- ptr = i2c_dw_semaphore_cb_table;
-
dev->semaphore_idx = -1;
- while (ptr->probe) {
+ for (ptr = i2c_dw_semaphore_cb_table; ptr->probe; ptr++) {
ret = ptr->probe(dev);
if (ret) {
/*
@@ -254,7 +184,6 @@ static int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev)
return ret;
i++;
- ptr++;
continue;
}
@@ -278,7 +207,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
{
struct i2c_adapter *adap;
struct dw_i2c_dev *dev;
- struct i2c_timings *t;
int irq, ret;
irq = platform_get_irq(pdev, 0);
@@ -307,18 +235,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
reset_control_deassert(dev->rst);
- t = &dev->timings;
- i2c_parse_fw_timings(&pdev->dev, t, false);
-
- i2c_dw_adjust_bus_speed(dev);
-
- if (pdev->dev.of_node)
- dw_i2c_of_configure(pdev);
-
- if (has_acpi_companion(&pdev->dev))
- i2c_dw_acpi_configure(&pdev->dev);
-
- ret = i2c_dw_validate_speed(dev);
+ ret = i2c_dw_fw_parse_and_configure(dev);
if (ret)
goto exit_reset;
@@ -346,6 +263,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
goto exit_reset;
if (dev->clk) {
+ struct i2c_timings *t = &dev->timings;
u64 clk_khz;
dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
@@ -360,8 +278,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ?
I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED;
- ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
- adap->dev.of_node = pdev->dev.of_node;
adap->nr = -1;
if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
@@ -408,7 +324,7 @@ static void dw_i2c_plat_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
- dev->disable(dev);
+ i2c_dw_disable(dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
@@ -419,66 +335,34 @@ static void dw_i2c_plat_remove(struct platform_device *pdev)
reset_control_assert(dev->rst);
}
-static int dw_i2c_plat_prepare(struct device *dev)
-{
- /*
- * If the ACPI companion device object is present for this device, it
- * may be accessed during suspend and resume of other devices via I2C
- * operation regions, so tell the PM core and middle layers to avoid
- * skipping system suspend/resume callbacks for it in that case.
- */
- return !has_acpi_companion(dev);
-}
-
-static int dw_i2c_plat_runtime_suspend(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- if (i_dev->shared_with_punit)
- return 0;
-
- i_dev->disable(i_dev);
- i2c_dw_prepare_clk(i_dev, false);
-
- return 0;
-}
-
-static int dw_i2c_plat_suspend(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- i2c_mark_adapter_suspended(&i_dev->adapter);
-
- return dw_i2c_plat_runtime_suspend(dev);
-}
-
-static int dw_i2c_plat_runtime_resume(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- if (!i_dev->shared_with_punit)
- i2c_dw_prepare_clk(i_dev, true);
-
- i_dev->init(i_dev);
-
- return 0;
-}
-
-static int dw_i2c_plat_resume(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- dw_i2c_plat_runtime_resume(dev);
- i2c_mark_adapter_resumed(&i_dev->adapter);
-
- return 0;
-}
+static const struct of_device_id dw_i2c_of_match[] = {
+ { .compatible = "snps,designware-i2c", },
+ { .compatible = "mscc,ocelot-i2c", .data = (void *)MODEL_MSCC_OCELOT },
+ { .compatible = "baikal,bt1-sys-i2c", .data = (void *)MODEL_BAIKAL_BT1 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
-static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
- .prepare = pm_sleep_ptr(dw_i2c_plat_prepare),
- LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
- RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, dw_i2c_plat_runtime_resume, NULL)
+static const struct acpi_device_id dw_i2c_acpi_match[] = {
+ { "80860F41", ACCESS_NO_IRQ_SUSPEND },
+ { "808622C1", ACCESS_NO_IRQ_SUSPEND },
+ { "AMD0010", ACCESS_INTR_MASK },
+ { "AMDI0010", ACCESS_INTR_MASK },
+ { "AMDI0019", ACCESS_INTR_MASK | ARBITRATION_SEMAPHORE },
+ { "AMDI0510", 0 },
+ { "APMC0D0F", 0 },
+ { "HISI02A1", 0 },
+ { "HISI02A2", 0 },
+ { "HISI02A3", 0 },
+ { "HYGO0010", ACCESS_INTR_MASK },
+ { "INT33C2", 0 },
+ { "INT33C3", 0 },
+ { "INT3432", 0 },
+ { "INT3433", 0 },
+ { "INTC10EF", 0 },
+ {}
};
+MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
static const struct platform_device_id dw_i2c_platform_ids[] = {
{ "i2c_designware" },
@@ -491,9 +375,9 @@ static struct platform_driver dw_i2c_driver = {
.remove_new = dw_i2c_plat_remove,
.driver = {
.name = "i2c_designware",
- .of_match_table = of_match_ptr(dw_i2c_of_match),
- .acpi_match_table = ACPI_PTR(dw_i2c_acpi_match),
- .pm = pm_ptr(&dw_i2c_dev_pm_ops),
+ .of_match_table = dw_i2c_of_match,
+ .acpi_match_table = dw_i2c_acpi_match,
+ .pm = pm_ptr(&i2c_dw_dev_pm_ops),
},
.id_table = dw_i2c_platform_ids,
};
@@ -513,3 +397,5 @@ module_exit(dw_i2c_exit_driver);
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(I2C_DW);
+MODULE_IMPORT_NS(I2C_DW_COMMON);
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 78e2c47e3d7d..7035296aa24c 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -16,6 +16,8 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#define DEFAULT_SYMBOL_NAMESPACE I2C_DW
+
#include "i2c-designware-core.h"
static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev)
@@ -88,7 +90,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
struct dw_i2c_dev *dev = i2c_get_adapdata(slave->adapter);
regmap_write(dev->map, DW_IC_INTR_MASK, 0);
- dev->disable(dev);
+ i2c_dw_disable(dev);
synchronize_irq(dev->irq);
dev->slave = NULL;
pm_runtime_put(dev->dev);
@@ -235,7 +237,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
int ret;
dev->init = i2c_dw_init_slave;
- dev->disable = i2c_dw_disable;
ret = i2c_dw_init_regmap(dev);
if (ret)
@@ -279,3 +280,4 @@ EXPORT_SYMBOL_GPL(i2c_dw_probe_slave);
MODULE_AUTHOR("Luis Oliveira <lolivei@synopsys.com>");
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus slave adapter");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(I2C_DW_COMMON);
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 3e6b80e59b90..3dc5a46698fc 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -357,7 +357,7 @@ static void dc_i2c_remove(struct platform_device *pdev)
static const struct of_device_id dc_i2c_match[] = {
{ .compatible = "cnxt,cx92755-i2c" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, dc_i2c_match);
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 557409410445..d08be3f3cede 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -67,7 +67,6 @@ struct em_i2c_device {
void __iomem *base;
struct i2c_adapter adap;
struct completion msg_done;
- struct clk *sclk;
struct i2c_client *slave;
int irq;
};
@@ -361,6 +360,7 @@ static const struct i2c_algorithm em_i2c_algo = {
static int em_i2c_probe(struct platform_device *pdev)
{
struct em_i2c_device *priv;
+ struct clk *sclk;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -373,13 +373,9 @@ static int em_i2c_probe(struct platform_device *pdev)
strscpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name));
- priv->sclk = devm_clk_get(&pdev->dev, "sclk");
- if (IS_ERR(priv->sclk))
- return PTR_ERR(priv->sclk);
-
- ret = clk_prepare_enable(priv->sclk);
- if (ret)
- return ret;
+ sclk = devm_clk_get_enabled(&pdev->dev, "sclk");
+ if (IS_ERR(sclk))
+ return PTR_ERR(sclk);
priv->adap.timeout = msecs_to_jiffies(100);
priv->adap.retries = 5;
@@ -397,26 +393,22 @@ static int em_i2c_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto err_clk;
+ return ret;
priv->irq = ret;
+
ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
"em_i2c", priv);
if (ret)
- goto err_clk;
+ return ret;
ret = i2c_add_adapter(&priv->adap);
-
if (ret)
- goto err_clk;
+ return ret;
dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
priv->irq);
return 0;
-
-err_clk:
- clk_disable_unprepare(priv->sclk);
- return ret;
}
static void em_i2c_remove(struct platform_device *dev)
@@ -424,7 +416,6 @@ static void em_i2c_remove(struct platform_device *dev)
struct em_i2c_device *priv = platform_get_drvdata(dev);
i2c_del_adapter(&priv->adap);
- clk_disable_unprepare(priv->sclk);
}
static const struct of_device_id em_i2c_ids[] = {
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 328c0dab6b14..299fe9d3afab 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1763,8 +1763,15 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
i801_add_tco(priv);
+ /*
+ * adapter.name is used by platform code to find the main I801 adapter
+ * to instantiante i2c_clients, do not change.
+ */
snprintf(priv->adapter.name, sizeof(priv->adapter.name),
- "SMBus I801 adapter at %04lx", priv->smba);
+ "SMBus %s adapter at %04lx",
+ (priv->features & FEATURE_IDF) ? "I801 IDF" : "I801",
+ priv->smba);
+
err = i2c_add_adapter(&priv->adapter);
if (err) {
platform_device_unregister(priv->tco_pdev);
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 0197786892a2..976d43f73f38 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -559,7 +559,7 @@ static const struct i2c_algorithm lpi2c_imx_algo = {
static const struct of_device_id lpi2c_imx_of_match[] = {
{ .compatible = "fsl,imx7ulp-lpi2c" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 3842e527116b..98539313cbc9 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -687,7 +687,7 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx, bool atomic)
i2c_imx_bus_busy(i2c_imx, 0, atomic);
/* Disable I2C controller */
- temp = i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN,
+ temp = i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
}
@@ -1549,7 +1549,7 @@ static void i2c_imx_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int __maybe_unused i2c_imx_runtime_suspend(struct device *dev)
+static int i2c_imx_runtime_suspend(struct device *dev)
{
struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
@@ -1558,7 +1558,7 @@ static int __maybe_unused i2c_imx_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused i2c_imx_runtime_resume(struct device *dev)
+static int i2c_imx_runtime_resume(struct device *dev)
{
struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
int ret;
@@ -1571,8 +1571,7 @@ static int __maybe_unused i2c_imx_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops i2c_imx_pm_ops = {
- SET_RUNTIME_PM_OPS(i2c_imx_runtime_suspend,
- i2c_imx_runtime_resume, NULL)
+ RUNTIME_PM_OPS(i2c_imx_runtime_suspend, i2c_imx_runtime_resume, NULL)
};
static struct platform_driver i2c_imx_driver = {
@@ -1580,7 +1579,7 @@ static struct platform_driver i2c_imx_driver = {
.remove_new = i2c_imx_remove,
.driver = {
.name = DRIVER_NAME,
- .pm = &i2c_imx_pm_ops,
+ .pm = pm_ptr(&i2c_imx_pm_ops),
.of_match_table = i2c_imx_dt_ids,
.acpi_match_table = i2c_imx_acpi_ids,
},
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 33dbc19d3848..f59158489ad9 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -99,8 +99,7 @@ static int sch_transaction(void)
if (retries > MAX_RETRIES) {
dev_err(&sch_adapter.dev, "SMBus Timeout!\n");
result = -ETIMEDOUT;
- }
- if (temp & 0x04) {
+ } else if (temp & 0x04) {
result = -EIO;
dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be "
"locked until next hard reset. (sorry!)\n");
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 655b5d851c48..c93c02aa6ac8 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -382,6 +382,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
}
/**
+ * ismt_kill_transaction() - kill current transaction
+ * @priv: iSMT private data
+ */
+static void ismt_kill_transaction(struct ismt_priv *priv)
+{
+ writel(ISMT_GCTRL_KILL, priv->smba + ISMT_GR_GCTRL);
+}
+
+/**
* ismt_access() - process an SMBus command
* @adap: the i2c host adapter
* @addr: address of the i2c/SMBus target
@@ -623,6 +632,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
if (unlikely(!time_left)) {
+ ismt_kill_transaction(priv);
ret = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 4aafdfab6305..92cc5b091137 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -792,26 +792,22 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
- i2c->clk = devm_clk_get(&pdev->dev, NULL);
+ i2c->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(i2c->clk))
return PTR_ERR(i2c->clk);
- ret = clk_prepare_enable(i2c->clk);
- if (ret)
- return ret;
-
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&clk_freq);
if (ret) {
dev_err(&pdev->dev, "clock-frequency not specified in DT\n");
- goto err;
+ return ret;
}
i2c->speed = clk_freq / 1000;
if (i2c->speed == 0) {
ret = -EINVAL;
dev_err(&pdev->dev, "clock-frequency minimum is 1000\n");
- goto err;
+ return ret;
}
jz4780_i2c_set_speed(i2c);
@@ -827,29 +823,25 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto err;
+ return ret;
i2c->irq = ret;
+
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
if (ret)
- goto err;
+ return ret;
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0)
- goto err;
+ return ret;
return 0;
-
-err:
- clk_disable_unprepare(i2c->clk);
- return ret;
}
static void jz4780_i2c_remove(struct platform_device *pdev)
{
struct jz4780_i2c *i2c = platform_get_drvdata(pdev);
- clk_disable_unprepare(i2c->clk);
i2c_del_adapter(&i2c->adap);
}
diff --git a/drivers/i2c/busses/i2c-keba.c b/drivers/i2c/busses/i2c-keba.c
new file mode 100644
index 000000000000..759732a07ef0
--- /dev/null
+++ b/drivers/i2c/busses/i2c-keba.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) KEBA Industrial Automation Gmbh 2024
+ *
+ * Driver for KEBA I2C controller FPGA IP core
+ */
+
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/misc/keba.h>
+
+#define KI2C "i2c-keba"
+
+#define KI2C_CAPABILITY_REG 0x02
+#define KI2C_CAPABILITY_CRYPTO 0x01
+#define KI2C_CAPABILITY_DC 0x02
+
+#define KI2C_CONTROL_REG 0x04
+#define KI2C_CONTROL_MEN 0x01
+#define KI2C_CONTROL_MSTA 0x02
+#define KI2C_CONTROL_RSTA 0x04
+#define KI2C_CONTROL_MTX 0x08
+#define KI2C_CONTROL_TXAK 0x10
+#define KI2C_CONTROL_DISABLE 0x00
+
+#define KI2C_CONTROL_DC_REG 0x05
+#define KI2C_CONTROL_DC_SDA 0x01
+#define KI2C_CONTROL_DC_SCL 0x02
+
+#define KI2C_STATUS_REG 0x08
+#define KI2C_STATUS_IN_USE 0x01
+#define KI2C_STATUS_ACK_CYC 0x02
+#define KI2C_STATUS_RXAK 0x04
+#define KI2C_STATUS_MCF 0x08
+
+#define KI2C_STATUS_DC_REG 0x09
+#define KI2C_STATUS_DC_SDA 0x01
+#define KI2C_STATUS_DC_SCL 0x02
+
+#define KI2C_DATA_REG 0x0c
+
+#define KI2C_INUSE_SLEEP_US (2 * USEC_PER_MSEC)
+#define KI2C_INUSE_TIMEOUT_US (10 * USEC_PER_SEC)
+
+#define KI2C_POLL_DELAY_US 5
+
+struct ki2c {
+ struct keba_i2c_auxdev *auxdev;
+ void __iomem *base;
+ struct i2c_adapter adapter;
+
+ struct i2c_client **client;
+ int client_size;
+};
+
+static int ki2c_inuse_lock(struct ki2c *ki2c)
+{
+ u8 sts;
+ int ret;
+
+ /*
+ * The I2C controller has an IN_USE bit for locking access to the
+ * controller. This enables the use of I2C controller by other none
+ * Linux processors.
+ *
+ * If the I2C controller is free, then the first read returns
+ * IN_USE == 0. After that the I2C controller is locked and further
+ * reads of IN_USE return 1.
+ *
+ * The I2C controller is unlocked by writing 1 into IN_USE.
+ *
+ * The IN_USE bit acts as a hardware semaphore for the I2C controller.
+ * Poll for semaphore, but sleep while polling to free the CPU.
+ */
+ ret = readb_poll_timeout(ki2c->base + KI2C_STATUS_REG,
+ sts, (sts & KI2C_STATUS_IN_USE) == 0,
+ KI2C_INUSE_SLEEP_US, KI2C_INUSE_TIMEOUT_US);
+ if (ret)
+ dev_err(&ki2c->auxdev->auxdev.dev, "%s err!\n", __func__);
+
+ return ret;
+}
+
+static void ki2c_inuse_unlock(struct ki2c *ki2c)
+{
+ /* unlock the controller by writing 1 into IN_USE */
+ iowrite8(KI2C_STATUS_IN_USE, ki2c->base + KI2C_STATUS_REG);
+}
+
+static int ki2c_wait_for_bit(void __iomem *addr, u8 mask, unsigned long timeout)
+{
+ u8 val;
+
+ return readb_poll_timeout(addr, val, (val & mask), KI2C_POLL_DELAY_US,
+ jiffies_to_usecs(timeout));
+}
+
+static int ki2c_wait_for_mcf(struct ki2c *ki2c)
+{
+ return ki2c_wait_for_bit(ki2c->base + KI2C_STATUS_REG, KI2C_STATUS_MCF,
+ ki2c->adapter.timeout);
+}
+
+static int ki2c_wait_for_data(struct ki2c *ki2c)
+{
+ int ret;
+
+ ret = ki2c_wait_for_mcf(ki2c);
+ if (ret < 0)
+ return ret;
+
+ return ki2c_wait_for_bit(ki2c->base + KI2C_STATUS_REG,
+ KI2C_STATUS_ACK_CYC,
+ ki2c->adapter.timeout);
+}
+
+static int ki2c_wait_for_data_ack(struct ki2c *ki2c)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = ki2c_wait_for_data(ki2c);
+ if (ret < 0)
+ return ret;
+
+ /* RXAK == 0 means ACK reveived */
+ reg = ioread8(ki2c->base + KI2C_STATUS_REG);
+ if (reg & KI2C_STATUS_RXAK)
+ return -EIO;
+
+ return 0;
+}
+
+static int ki2c_has_capability(struct ki2c *ki2c, unsigned int cap)
+{
+ unsigned int reg = ioread8(ki2c->base + KI2C_CAPABILITY_REG);
+
+ return (reg & cap) != 0;
+}
+
+static int ki2c_get_scl(struct ki2c *ki2c)
+{
+ unsigned int reg = ioread8(ki2c->base + KI2C_STATUS_DC_REG);
+
+ /* capability KI2C_CAPABILITY_DC required */
+ return (reg & KI2C_STATUS_DC_SCL) != 0;
+}
+
+static int ki2c_get_sda(struct ki2c *ki2c)
+{
+ unsigned int reg = ioread8(ki2c->base + KI2C_STATUS_DC_REG);
+
+ /* capability KI2C_CAPABILITY_DC required */
+ return (reg & KI2C_STATUS_DC_SDA) != 0;
+}
+
+static void ki2c_set_scl(struct ki2c *ki2c, int val)
+{
+ u8 control_dc;
+
+ /* capability KI2C_CAPABILITY_DC and KI2C_CONTROL_MEN = 0 reqired */
+ control_dc = ioread8(ki2c->base + KI2C_CONTROL_DC_REG);
+ if (val)
+ control_dc |= KI2C_CONTROL_DC_SCL;
+ else
+ control_dc &= ~KI2C_CONTROL_DC_SCL;
+ iowrite8(control_dc, ki2c->base + KI2C_CONTROL_DC_REG);
+}
+
+/*
+ * Resetting bus bitwise is done by checking SDA and applying clock cycles as
+ * long as SDA is low. 9 clock cycles are applied at most.
+ *
+ * Clock cycles are generated and udelay() determines the duration of clock
+ * cycles. Generated clock rate is 100 KHz and so duration of both clock levels
+ * is: delay in ns = (10^6 / 100) / 2
+ */
+#define KI2C_RECOVERY_CLK_CNT (9 * 2)
+#define KI2C_RECOVERY_UDELAY 5
+static int ki2c_reset_bus_bitwise(struct ki2c *ki2c)
+{
+ int val = 1;
+ int ret = 0;
+ int i;
+
+ /* disable I2C controller (MEN = 0) to get direct access to SCL/SDA */
+ iowrite8(0, ki2c->base + KI2C_CONTROL_REG);
+
+ /* generate clock cycles */
+ ki2c_set_scl(ki2c, val);
+ udelay(KI2C_RECOVERY_UDELAY);
+ for (i = 0; i < KI2C_RECOVERY_CLK_CNT; i++) {
+ if (val) {
+ /* SCL shouldn't be low here */
+ if (!ki2c_get_scl(ki2c)) {
+ dev_err(&ki2c->auxdev->auxdev.dev,
+ "SCL is stuck low!\n");
+ ret = -EBUSY;
+ break;
+ }
+
+ /* break if SDA is high */
+ if (ki2c_get_sda(ki2c))
+ break;
+ }
+
+ val = !val;
+ ki2c_set_scl(ki2c, val);
+ udelay(KI2C_RECOVERY_UDELAY);
+ }
+
+ if (!ki2c_get_sda(ki2c)) {
+ dev_err(&ki2c->auxdev->auxdev.dev, "SDA is still low!\n");
+ ret = -EBUSY;
+ }
+
+ /* reenable controller */
+ iowrite8(KI2C_CONTROL_MEN, ki2c->base + KI2C_CONTROL_REG);
+
+ return ret;
+}
+
+/*
+ * Resetting bus bytewise is done by writing start bit, 9 data bits and stop
+ * bit.
+ *
+ * This is not 100% safe. If target is an EEPROM and a write access was
+ * interrupted during the ACK cycle, this approach might not be able to recover
+ * the bus. The reason is, that after the 9 clock cycles the EEPROM will be in
+ * ACK cycle again and will hold SDA low like it did before the start of the
+ * routine. Furthermore the EEPROM might get written one additional byte with
+ * 0xff into it. Thus, use bitwise approach whenever possible, especially when
+ * EEPROMs are on the bus.
+ */
+static int ki2c_reset_bus_bytewise(struct ki2c *ki2c)
+{
+ int ret;
+
+ /* hold data line high for 9 clock cycles */
+ iowrite8(0xFF, ki2c->base + KI2C_DATA_REG);
+
+ /* create start condition */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MTX | KI2C_CONTROL_MSTA | KI2C_CONTROL_TXAK,
+ ki2c->base + KI2C_CONTROL_REG);
+ ret = ki2c_wait_for_mcf(ki2c);
+ if (ret < 0) {
+ dev_err(&ki2c->auxdev->auxdev.dev, "Start condition failed\n");
+
+ return ret;
+ }
+
+ /* create stop condition */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MTX | KI2C_CONTROL_TXAK,
+ ki2c->base + KI2C_CONTROL_REG);
+ ret = ki2c_wait_for_mcf(ki2c);
+ if (ret < 0)
+ dev_err(&ki2c->auxdev->auxdev.dev, "Stop condition failed\n");
+
+ return ret;
+}
+
+static int ki2c_reset_bus(struct ki2c *ki2c)
+{
+ int ret;
+
+ ret = ki2c_inuse_lock(ki2c);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * If the I2C controller is capable of direct control of SCL/SDA, then a
+ * bitwise reset is used. Otherwise fall back to bytewise reset.
+ */
+ if (ki2c_has_capability(ki2c, KI2C_CAPABILITY_DC))
+ ret = ki2c_reset_bus_bitwise(ki2c);
+ else
+ ret = ki2c_reset_bus_bytewise(ki2c);
+
+ ki2c_inuse_unlock(ki2c);
+
+ return ret;
+}
+
+static void ki2c_write_target_addr(struct ki2c *ki2c, struct i2c_msg *m)
+{
+ u8 addr;
+
+ addr = m->addr << 1;
+ /* Bit 0 signals RD/WR */
+ if (m->flags & I2C_M_RD)
+ addr |= 0x01;
+
+ iowrite8(addr, ki2c->base + KI2C_DATA_REG);
+}
+
+static int ki2c_start_addr(struct ki2c *ki2c, struct i2c_msg *m)
+{
+ int ret;
+
+ /*
+ * Store target address byte in the controller. This has to be done
+ * before sending START condition.
+ */
+ ki2c_write_target_addr(ki2c, m);
+
+ /* enable controller for TX */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MTX,
+ ki2c->base + KI2C_CONTROL_REG);
+
+ /* send START condition and target address byte */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MTX | KI2C_CONTROL_MSTA,
+ ki2c->base + KI2C_CONTROL_REG);
+
+ ret = ki2c_wait_for_data_ack(ki2c);
+ if (ret < 0)
+ /*
+ * For EEPROMs this is normal behavior during internal write
+ * operation.
+ */
+ dev_dbg(&ki2c->auxdev->auxdev.dev,
+ "%s wait for ACK err at 0x%02x!\n", __func__, m->addr);
+
+ return ret;
+}
+
+static int ki2c_repstart_addr(struct ki2c *ki2c, struct i2c_msg *m)
+{
+ int ret;
+
+ /* repeated start and write is not supported */
+ if ((m->flags & I2C_M_RD) == 0) {
+ dev_err(&ki2c->auxdev->auxdev.dev,
+ "Repeated start not supported for writes\n");
+ return -EINVAL;
+ }
+
+ /* send repeated start */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MSTA | KI2C_CONTROL_RSTA,
+ ki2c->base + KI2C_CONTROL_REG);
+
+ ret = ki2c_wait_for_mcf(ki2c);
+ if (ret < 0) {
+ dev_err(&ki2c->auxdev->auxdev.dev,
+ "%s wait for MCF err at 0x%02x!\n", __func__, m->addr);
+ return ret;
+ }
+
+ /* write target-address byte */
+ ki2c_write_target_addr(ki2c, m);
+
+ ret = ki2c_wait_for_data_ack(ki2c);
+ if (ret < 0)
+ dev_err(&ki2c->auxdev->auxdev.dev,
+ "%s wait for ACK err at 0x%02x!\n", __func__, m->addr);
+
+ return ret;
+}
+
+static void ki2c_stop(struct ki2c *ki2c)
+{
+ iowrite8(KI2C_CONTROL_MEN, ki2c->base + KI2C_CONTROL_REG);
+ ki2c_wait_for_mcf(ki2c);
+}
+
+static int ki2c_write(struct ki2c *ki2c, const u8 *data, int len)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ /* write data byte */
+ iowrite8(data[i], ki2c->base + KI2C_DATA_REG);
+
+ ret = ki2c_wait_for_data_ack(ki2c);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ki2c_read(struct ki2c *ki2c, u8 *data, int len)
+{
+ u8 control;
+ int ret;
+ int i;
+
+ if (len == 0)
+ return 0; /* nothing to do */
+
+ control = KI2C_CONTROL_MEN | KI2C_CONTROL_MSTA;
+
+ /* if just one byte => send tx-nack after transfer */
+ if (len == 1)
+ control |= KI2C_CONTROL_TXAK;
+
+ iowrite8(control, ki2c->base + KI2C_CONTROL_REG);
+
+ /* dummy read to start transfer on bus */
+ ioread8(ki2c->base + KI2C_DATA_REG);
+
+ for (i = 0; i < len; i++) {
+ ret = ki2c_wait_for_data(ki2c);
+ if (ret < 0)
+ return ret;
+
+ if (i == len - 2)
+ /* send tx-nack after transfer of last byte */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MSTA | KI2C_CONTROL_TXAK,
+ ki2c->base + KI2C_CONTROL_REG);
+ else if (i == len - 1)
+ /*
+ * switch to TX on last byte, so that reading DATA
+ * register does not trigger another read transfer
+ */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MSTA | KI2C_CONTROL_MTX,
+ ki2c->base + KI2C_CONTROL_REG);
+
+ /* read byte and start next transfer (if not last byte) */
+ data[i] = ioread8(ki2c->base + KI2C_DATA_REG);
+ }
+
+ return len;
+}
+
+static int ki2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ struct ki2c *ki2c = i2c_get_adapdata(adap);
+ int ret;
+ int i;
+
+ ret = ki2c_inuse_lock(ki2c);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *m = &msgs[i];
+
+ if (i == 0)
+ ret = ki2c_start_addr(ki2c, m);
+ else
+ ret = ki2c_repstart_addr(ki2c, m);
+ if (ret < 0)
+ break;
+
+ if (m->flags & I2C_M_RD)
+ ret = ki2c_read(ki2c, m->buf, m->len);
+ else
+ ret = ki2c_write(ki2c, m->buf, m->len);
+ if (ret < 0)
+ break;
+ }
+
+ ki2c_stop(ki2c);
+
+ ki2c_inuse_unlock(ki2c);
+
+ return ret < 0 ? ret : num;
+}
+
+static void ki2c_unregister_devices(struct ki2c *ki2c)
+{
+ int i;
+
+ for (i = 0; i < ki2c->client_size; i++) {
+ struct i2c_client *client = ki2c->client[i];
+
+ if (client)
+ i2c_unregister_device(client);
+ }
+}
+
+static int ki2c_register_devices(struct ki2c *ki2c)
+{
+ struct i2c_board_info *info = ki2c->auxdev->info;
+ int i;
+
+ /* register all known I2C devices */
+ for (i = 0; i < ki2c->client_size; i++) {
+ struct i2c_client *client;
+ unsigned short const addr_list[2] = { info[i].addr,
+ I2C_CLIENT_END };
+
+ client = i2c_new_scanned_device(&ki2c->adapter, &info[i],
+ addr_list, NULL);
+ if (!IS_ERR(client)) {
+ ki2c->client[i] = client;
+ } else if (PTR_ERR(client) != -ENODEV) {
+ ki2c->client_size = i;
+ ki2c_unregister_devices(ki2c);
+
+ return PTR_ERR(client);
+ }
+ }
+
+ return 0;
+}
+
+static u32 ki2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm ki2c_algo = {
+ .master_xfer = ki2c_xfer,
+ .functionality = ki2c_func,
+};
+
+static int ki2c_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct device *dev = &auxdev->dev;
+ struct i2c_adapter *adap;
+ struct ki2c *ki2c;
+ int ret;
+
+ ki2c = devm_kzalloc(dev, sizeof(*ki2c), GFP_KERNEL);
+ if (!ki2c)
+ return -ENOMEM;
+ ki2c->auxdev = container_of(auxdev, struct keba_i2c_auxdev, auxdev);
+ ki2c->client = devm_kcalloc(dev, ki2c->auxdev->info_size,
+ sizeof(*ki2c->client), GFP_KERNEL);
+ if (!ki2c->client)
+ return -ENOMEM;
+ ki2c->client_size = ki2c->auxdev->info_size;
+ auxiliary_set_drvdata(auxdev, ki2c);
+
+ ki2c->base = devm_ioremap_resource(dev, &ki2c->auxdev->io);
+ if (IS_ERR(ki2c->base))
+ return PTR_ERR(ki2c->base);
+
+ adap = &ki2c->adapter;
+ strscpy(adap->name, "KEBA I2C adapter", sizeof(adap->name));
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_HWMON;
+ adap->algo = &ki2c_algo;
+ adap->dev.parent = dev;
+
+ i2c_set_adapdata(adap, ki2c);
+
+ /* enable controller */
+ iowrite8(KI2C_CONTROL_MEN, ki2c->base + KI2C_CONTROL_REG);
+
+ /* reset bus before probing I2C devices */
+ ret = ki2c_reset_bus(ki2c);
+ if (ret)
+ goto out;
+
+ ret = devm_i2c_add_adapter(dev, adap);
+ if (ret) {
+ dev_err(dev, "Failed to add adapter (%d)!\n", ret);
+ goto out;
+ }
+
+ ret = ki2c_register_devices(ki2c);
+ if (ret) {
+ dev_err(dev, "Failed to register devices (%d)!\n", ret);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ iowrite8(KI2C_CONTROL_DISABLE, ki2c->base + KI2C_CONTROL_REG);
+ return ret;
+}
+
+static void ki2c_remove(struct auxiliary_device *auxdev)
+{
+ struct ki2c *ki2c = auxiliary_get_drvdata(auxdev);
+
+ ki2c_unregister_devices(ki2c);
+
+ /* disable controller */
+ iowrite8(KI2C_CONTROL_DISABLE, ki2c->base + KI2C_CONTROL_REG);
+
+ auxiliary_set_drvdata(auxdev, NULL);
+}
+
+static const struct auxiliary_device_id ki2c_devtype_aux[] = {
+ { .name = "keba.i2c" },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, ki2c_devtype_aux);
+
+static struct auxiliary_driver ki2c_driver_aux = {
+ .name = KI2C,
+ .id_table = ki2c_devtype_aux,
+ .probe = ki2c_probe,
+ .remove = ki2c_remove,
+};
+module_auxiliary_driver(ki2c_driver_aux);
+
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA I2C bus controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-ljca.c b/drivers/i2c/busses/i2c-ljca.c
index 0b70621cf9d3..1dc516ef0fdd 100644
--- a/drivers/i2c/busses/i2c-ljca.c
+++ b/drivers/i2c/busses/i2c-ljca.c
@@ -107,7 +107,7 @@ static int ljca_i2c_start(struct ljca_i2c_dev *ljca_i2c, u8 target_addr,
return 0;
}
-static void ljca_i2c_stop(struct ljca_i2c_dev *ljca_i2c, u8 target_addr)
+static void ljca_i2c_stop(struct ljca_i2c_dev *ljca_i2c)
{
struct ljca_i2c_rw_packet *w_packet =
(struct ljca_i2c_rw_packet *)ljca_i2c->obuf;
@@ -178,7 +178,7 @@ static int ljca_i2c_read(struct ljca_i2c_dev *ljca_i2c, u8 target_addr, u8 *data
if (!ret)
ret = ljca_i2c_pure_read(ljca_i2c, data, len);
- ljca_i2c_stop(ljca_i2c, target_addr);
+ ljca_i2c_stop(ljca_i2c);
return ret;
}
@@ -222,7 +222,7 @@ static int ljca_i2c_write(struct ljca_i2c_dev *ljca_i2c, u8 target_addr,
if (!ret)
ret = ljca_i2c_pure_write(ljca_i2c, data, len);
- ljca_i2c_stop(ljca_i2c, target_addr);
+ ljca_i2c_stop(ljca_i2c);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 41d6c8ed163a..236d6b8ba867 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -88,7 +88,6 @@ struct mpc_i2c {
int irq;
u32 real_clk;
u8 fdr, dfsrr;
- struct clk *clk_per;
u32 cntl_bits;
enum mpc_i2c_action action;
struct i2c_msg *msgs;
@@ -779,7 +778,6 @@ static int fsl_i2c_probe(struct platform_device *op)
struct clk *clk;
int result;
u32 clock;
- int err;
i2c = devm_kzalloc(&op->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
@@ -809,18 +807,12 @@ static int fsl_i2c_probe(struct platform_device *op)
* enable clock for the I2C peripheral (non fatal),
* keep a reference upon successful allocation
*/
- clk = devm_clk_get_optional(&op->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- err = clk_prepare_enable(clk);
- if (err) {
+ clk = devm_clk_get_optional_enabled(&op->dev, NULL);
+ if (IS_ERR(clk)) {
dev_err(&op->dev, "failed to enable clock\n");
- return err;
+ return PTR_ERR(clk);
}
- i2c->clk_per = clk;
-
if (of_property_read_bool(op->dev.of_node, "fsl,preserve-clocking")) {
clock = MPC_I2C_CLOCK_PRESERVE;
} else {
@@ -876,14 +868,9 @@ static int fsl_i2c_probe(struct platform_device *op)
result = i2c_add_numbered_adapter(&i2c->adap);
if (result)
- goto fail_add;
+ return result;
return 0;
-
- fail_add:
- clk_disable_unprepare(i2c->clk_per);
-
- return result;
};
static void fsl_i2c_remove(struct platform_device *op)
@@ -891,8 +878,6 @@ static void fsl_i2c_remove(struct platform_device *op)
struct mpc_i2c *i2c = platform_get_drvdata(op);
i2c_del_adapter(&i2c->adap);
-
- clk_disable_unprepare(i2c->clk_per);
};
static int __maybe_unused mpc_i2c_suspend(struct device *dev)
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index a8b5719c3372..e0ba653dec2d 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -1306,12 +1306,9 @@ err_exit:
static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
{
struct mtk_i2c *i2c = dev_id;
- u16 restart_flag = 0;
+ u16 restart_flag = i2c->auto_restart ? I2C_RS_TRANSFER : 0;
u16 intr_stat;
- if (i2c->auto_restart)
- restart_flag = I2C_RS_TRANSFER;
-
intr_stat = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
mtk_i2c_writew(i2c, intr_stat, OFFSET_INTR_STAT);
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 2fe68615942e..bbcb4d6668ce 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -136,11 +136,13 @@ enum i2c_addr {
* Since the addr regs are sprinkled all over the address space,
* use this array to get the address or each register.
*/
-#define I2C_NUM_OWN_ADDR 2
+#define I2C_NUM_OWN_ADDR 10
#define I2C_NUM_OWN_ADDR_SUPPORTED 2
static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = {
- NPCM_I2CADDR1, NPCM_I2CADDR2,
+ NPCM_I2CADDR1, NPCM_I2CADDR2, NPCM_I2CADDR3, NPCM_I2CADDR4,
+ NPCM_I2CADDR5, NPCM_I2CADDR6, NPCM_I2CADDR7, NPCM_I2CADDR8,
+ NPCM_I2CADDR9, NPCM_I2CADDR10,
};
#endif
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 35a3f0a64986..1d9ad25c89ae 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1261,7 +1261,7 @@ static const struct of_device_id omap_i2c_of_match[] = {
.compatible = "ti,omap2420-i2c",
.data = &omap2420_pdata,
},
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
#endif
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 4e32d57ae0bf..febbd9950d8f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -146,7 +146,7 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
.ident = "IBM",
.matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
},
- { },
+ { }
};
/*
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index f448505d5468..1dafadda73af 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -721,7 +721,7 @@ static void i2c_pnx_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id i2c_pnx_of_match[] = {
{ .compatible = "nxp,pnx-i2c" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, i2c_pnx_of_match);
#endif
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 6b3c6a733368..af2094720a4d 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -135,7 +135,7 @@ err_dev_add:
static const struct pci_device_id ce4100_i2c_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)},
- { },
+ { }
};
static struct pci_driver ce4100_i2c_driver = {
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 031175113dd4..4d76e71cdd4b 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -218,7 +218,7 @@ static const struct platform_device_id i2c_pxa_id_table[] = {
{ "ce4100-i2c", REGS_CE4100 },
{ "pxa910-i2c", REGS_PXA910 },
{ "armada-3700-i2c", REGS_A3700 },
- { },
+ { }
};
MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table);
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 06e836e3e877..212336f724a6 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -721,7 +721,7 @@ static const struct i2c_algorithm geni_i2c_algo = {
static const struct acpi_device_id geni_i2c_acpi_match[] = {
{ "QCOM0220"},
{ "QCOM0411" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, geni_i2c_acpi_match);
#endif
@@ -818,15 +818,13 @@ static int geni_i2c_probe(struct platform_device *pdev)
init_completion(&gi2c->done);
spin_lock_init(&gi2c->lock);
platform_set_drvdata(pdev, gi2c);
- ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, 0,
+ ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, IRQF_NO_AUTOEN,
dev_name(dev), gi2c);
if (ret) {
dev_err(dev, "Request_irq failed:%d: err:%d\n",
gi2c->irq, ret);
return ret;
}
- /* Disable the interrupt so that the system can enter low-power mode */
- disable_irq(gi2c->irq);
i2c_set_adapdata(&gi2c->adap, gi2c);
gi2c->adap.dev.parent = dev;
gi2c->adap.dev.of_node = dev->of_node;
@@ -986,21 +984,24 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
return ret;
ret = clk_prepare_enable(gi2c->core_clk);
- if (ret) {
- geni_icc_disable(&gi2c->se);
- return ret;
- }
+ if (ret)
+ goto out_icc_disable;
ret = geni_se_resources_on(&gi2c->se);
- if (ret) {
- clk_disable_unprepare(gi2c->core_clk);
- geni_icc_disable(&gi2c->se);
- return ret;
- }
+ if (ret)
+ goto out_clk_disable;
enable_irq(gi2c->irq);
gi2c->suspended = 0;
+
return 0;
+
+out_clk_disable:
+ clk_disable_unprepare(gi2c->core_clk);
+out_icc_disable:
+ geni_icc_disable(&gi2c->se);
+
+ return ret;
}
static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 4a2c745751a2..d480162a4d39 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1648,7 +1648,7 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup)
static const struct acpi_device_id qup_i2c_acpi_match[] = {
{ "QCOM8010"},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, qup_i2c_acpi_match);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index da4b07c0ed4c..9267df38c2d0 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -1164,11 +1164,6 @@ static int rcar_i2c_probe(struct platform_device *pdev)
rcar_i2c_init(priv);
rcar_i2c_reset_slave(priv);
- if (priv->devtype < I2C_RCAR_GEN3) {
- irqflags |= IRQF_NO_THREAD;
- irqhandler = rcar_i2c_gen2_irq;
- }
-
/* Stay always active when multi-master to keep arbitration working */
if (of_property_read_bool(dev->of_node, "multi-master"))
priv->flags |= ID_P_PM_BLOCKED;
@@ -1178,8 +1173,11 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (of_property_read_bool(dev->of_node, "smbus"))
priv->flags |= ID_P_HOST_NOTIFY;
- /* R-Car Gen3+ needs a reset before every transfer */
- if (priv->devtype >= I2C_RCAR_GEN3) {
+ if (priv->devtype < I2C_RCAR_GEN3) {
+ irqflags |= IRQF_NO_THREAD;
+ irqhandler = rcar_i2c_gen2_irq;
+ } else {
+ /* R-Car Gen3+ needs a reset before every transfer */
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(priv->rstc)) {
ret = PTR_ERR(priv->rstc);
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index d6f585cdb7e5..c7f3a4c02470 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -63,6 +63,8 @@
#define ICMR3_ACKWP 0x10
#define ICMR3_ACKBT 0x08
+#define ICFER_FMPE 0x80
+
#define ICIER_TIE 0x80
#define ICIER_TEIE 0x40
#define ICIER_RIE 0x20
@@ -80,6 +82,7 @@ enum riic_reg_list {
RIIC_ICCR2,
RIIC_ICMR1,
RIIC_ICMR3,
+ RIIC_ICFER,
RIIC_ICSER,
RIIC_ICIER,
RIIC_ICSR2,
@@ -91,7 +94,8 @@ enum riic_reg_list {
};
struct riic_of_data {
- u8 regs[RIIC_REG_END];
+ const u8 *regs;
+ bool fast_mode_plus;
};
struct riic_dev {
@@ -105,6 +109,8 @@ struct riic_dev {
struct completion msg_done;
struct i2c_adapter adapter;
struct clk *clk;
+ struct reset_control *rstc;
+ struct i2c_timings i2c_t;
};
struct riic_irq_desc {
@@ -131,11 +137,14 @@ static inline void riic_clear_set_bit(struct riic_dev *riic, u8 clear, u8 set, u
static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct riic_dev *riic = i2c_get_adapdata(adap);
+ struct device *dev = adap->dev.parent;
unsigned long time_left;
- int i;
+ int i, ret;
u8 start_bit;
- pm_runtime_get_sync(adap->dev.parent);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
if (riic_readb(riic, RIIC_ICCR2) & ICCR2_BBSY) {
riic->err = -EBUSY;
@@ -168,7 +177,8 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
}
out:
- pm_runtime_put(adap->dev.parent);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return riic->err ?: num;
}
@@ -298,21 +308,21 @@ static const struct i2c_algorithm riic_algo = {
.functionality = riic_func,
};
-static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
+static int riic_init_hw(struct riic_dev *riic)
{
- int ret = 0;
+ int ret;
unsigned long rate;
int total_ticks, cks, brl, brh;
+ struct i2c_timings *t = &riic->i2c_t;
+ struct device *dev = riic->adapter.dev.parent;
+ bool fast_mode_plus = riic->info->fast_mode_plus;
+ u32 max_freq = fast_mode_plus ? I2C_MAX_FAST_MODE_PLUS_FREQ
+ : I2C_MAX_FAST_MODE_FREQ;
- pm_runtime_get_sync(riic->adapter.dev.parent);
-
- if (t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ) {
- dev_err(&riic->adapter.dev,
- "unsupported bus speed (%dHz). %d max\n",
- t->bus_freq_hz, I2C_MAX_FAST_MODE_FREQ);
- ret = -EINVAL;
- goto out;
- }
+ if (t->bus_freq_hz > max_freq)
+ return dev_err_probe(&riic->adapter.dev, -EINVAL,
+ "unsupported bus speed %uHz (%u max)\n",
+ t->bus_freq_hz, max_freq);
rate = clk_get_rate(riic->clk);
@@ -349,8 +359,7 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
if (brl > (0x1F + 3)) {
dev_err(&riic->adapter.dev, "invalid speed (%lu). Too slow.\n",
(unsigned long)t->bus_freq_hz);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
brh = total_ticks - brl;
@@ -382,6 +391,10 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
t->scl_fall_ns / (1000000000 / rate),
t->scl_rise_ns / (1000000000 / rate), cks, brl, brh);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
/* Changing the order of accessing IICRST and ICE may break things! */
riic_writeb(riic, ICCR1_IICRST | ICCR1_SOWP, RIIC_ICCR1);
riic_clear_set_bit(riic, 0, ICCR1_ICE, RIIC_ICCR1);
@@ -393,11 +406,14 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
riic_writeb(riic, 0, RIIC_ICSER);
riic_writeb(riic, ICMR3_ACKWP | ICMR3_RDRFS, RIIC_ICMR3);
+ if (fast_mode_plus && t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ)
+ riic_clear_set_bit(riic, 0, ICFER_FMPE, RIIC_ICFER);
+
riic_clear_set_bit(riic, ICCR1_IICRST, 0, RIIC_ICCR1);
-out:
- pm_runtime_put(riic->adapter.dev.parent);
- return ret;
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
}
static struct riic_irq_desc riic_irqs[] = {
@@ -415,13 +431,12 @@ static void riic_reset_control_assert(void *data)
static int riic_i2c_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct riic_dev *riic;
struct i2c_adapter *adap;
- struct i2c_timings i2c_t;
- struct reset_control *rstc;
int i, ret;
- riic = devm_kzalloc(&pdev->dev, sizeof(*riic), GFP_KERNEL);
+ riic = devm_kzalloc(dev, sizeof(*riic), GFP_KERNEL);
if (!riic)
return -ENOMEM;
@@ -429,22 +444,22 @@ static int riic_i2c_probe(struct platform_device *pdev)
if (IS_ERR(riic->base))
return PTR_ERR(riic->base);
- riic->clk = devm_clk_get(&pdev->dev, NULL);
+ riic->clk = devm_clk_get(dev, NULL);
if (IS_ERR(riic->clk)) {
- dev_err(&pdev->dev, "missing controller clock");
+ dev_err(dev, "missing controller clock");
return PTR_ERR(riic->clk);
}
- rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc))
- return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
+ riic->rstc = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(riic->rstc))
+ return dev_err_probe(dev, PTR_ERR(riic->rstc),
"Error: missing reset ctrl\n");
- ret = reset_control_deassert(rstc);
+ ret = reset_control_deassert(riic->rstc);
if (ret)
return ret;
- ret = devm_add_action_or_reset(&pdev->dev, riic_reset_control_assert, rstc);
+ ret = devm_add_action_or_reset(dev, riic_reset_control_assert, riic->rstc);
if (ret)
return ret;
@@ -453,31 +468,34 @@ static int riic_i2c_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- ret = devm_request_irq(&pdev->dev, ret, riic_irqs[i].isr,
+ ret = devm_request_irq(dev, ret, riic_irqs[i].isr,
0, riic_irqs[i].name, riic);
if (ret) {
- dev_err(&pdev->dev, "failed to request irq %s\n", riic_irqs[i].name);
+ dev_err(dev, "failed to request irq %s\n", riic_irqs[i].name);
return ret;
}
}
- riic->info = of_device_get_match_data(&pdev->dev);
+ riic->info = of_device_get_match_data(dev);
adap = &riic->adapter;
i2c_set_adapdata(adap, riic);
strscpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &riic_algo;
- adap->dev.parent = &pdev->dev;
- adap->dev.of_node = pdev->dev.of_node;
+ adap->dev.parent = dev;
+ adap->dev.of_node = dev->of_node;
init_completion(&riic->msg_done);
- i2c_parse_fw_timings(&pdev->dev, &i2c_t, true);
+ i2c_parse_fw_timings(dev, &riic->i2c_t, true);
- pm_runtime_enable(&pdev->dev);
+ /* Default 0 to save power. Can be overridden via sysfs for lower latency. */
+ pm_runtime_set_autosuspend_delay(dev, 0);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
- ret = riic_init_hw(riic, &i2c_t);
+ ret = riic_init_hw(riic);
if (ret)
goto out;
@@ -487,60 +505,127 @@ static int riic_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, riic);
- dev_info(&pdev->dev, "registered with %dHz bus speed\n",
- i2c_t.bus_freq_hz);
+ dev_info(dev, "registered with %dHz bus speed\n", riic->i2c_t.bus_freq_hz);
return 0;
out:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
return ret;
}
static void riic_i2c_remove(struct platform_device *pdev)
{
struct riic_dev *riic = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
- pm_runtime_get_sync(&pdev->dev);
- riic_writeb(riic, 0, RIIC_ICIER);
- pm_runtime_put(&pdev->dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (!ret) {
+ riic_writeb(riic, 0, RIIC_ICIER);
+ pm_runtime_put(dev);
+ }
i2c_del_adapter(&riic->adapter);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
}
+static const u8 riic_rz_a_regs[RIIC_REG_END] = {
+ [RIIC_ICCR1] = 0x00,
+ [RIIC_ICCR2] = 0x04,
+ [RIIC_ICMR1] = 0x08,
+ [RIIC_ICMR3] = 0x10,
+ [RIIC_ICFER] = 0x14,
+ [RIIC_ICSER] = 0x18,
+ [RIIC_ICIER] = 0x1c,
+ [RIIC_ICSR2] = 0x24,
+ [RIIC_ICBRL] = 0x34,
+ [RIIC_ICBRH] = 0x38,
+ [RIIC_ICDRT] = 0x3c,
+ [RIIC_ICDRR] = 0x40,
+};
+
static const struct riic_of_data riic_rz_a_info = {
- .regs = {
- [RIIC_ICCR1] = 0x00,
- [RIIC_ICCR2] = 0x04,
- [RIIC_ICMR1] = 0x08,
- [RIIC_ICMR3] = 0x10,
- [RIIC_ICSER] = 0x18,
- [RIIC_ICIER] = 0x1c,
- [RIIC_ICSR2] = 0x24,
- [RIIC_ICBRL] = 0x34,
- [RIIC_ICBRH] = 0x38,
- [RIIC_ICDRT] = 0x3c,
- [RIIC_ICDRR] = 0x40,
- },
+ .regs = riic_rz_a_regs,
+ .fast_mode_plus = true,
+};
+
+static const struct riic_of_data riic_rz_a1h_info = {
+ .regs = riic_rz_a_regs,
+};
+
+static const u8 riic_rz_v2h_regs[RIIC_REG_END] = {
+ [RIIC_ICCR1] = 0x00,
+ [RIIC_ICCR2] = 0x01,
+ [RIIC_ICMR1] = 0x02,
+ [RIIC_ICMR3] = 0x04,
+ [RIIC_ICFER] = 0x05,
+ [RIIC_ICSER] = 0x06,
+ [RIIC_ICIER] = 0x07,
+ [RIIC_ICSR2] = 0x09,
+ [RIIC_ICBRL] = 0x10,
+ [RIIC_ICBRH] = 0x11,
+ [RIIC_ICDRT] = 0x12,
+ [RIIC_ICDRR] = 0x13,
};
static const struct riic_of_data riic_rz_v2h_info = {
- .regs = {
- [RIIC_ICCR1] = 0x00,
- [RIIC_ICCR2] = 0x01,
- [RIIC_ICMR1] = 0x02,
- [RIIC_ICMR3] = 0x04,
- [RIIC_ICSER] = 0x06,
- [RIIC_ICIER] = 0x07,
- [RIIC_ICSR2] = 0x09,
- [RIIC_ICBRL] = 0x10,
- [RIIC_ICBRH] = 0x11,
- [RIIC_ICDRT] = 0x12,
- [RIIC_ICDRR] = 0x13,
- },
+ .regs = riic_rz_v2h_regs,
+ .fast_mode_plus = true,
+};
+
+static int riic_i2c_suspend(struct device *dev)
+{
+ struct riic_dev *riic = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ i2c_mark_adapter_suspended(&riic->adapter);
+
+ /* Disable output on SDA, SCL pins. */
+ riic_clear_set_bit(riic, ICCR1_ICE, 0, RIIC_ICCR1);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_sync(dev);
+
+ return reset_control_assert(riic->rstc);
+}
+
+static int riic_i2c_resume(struct device *dev)
+{
+ struct riic_dev *riic = dev_get_drvdata(dev);
+ int ret;
+
+ ret = reset_control_deassert(riic->rstc);
+ if (ret)
+ return ret;
+
+ ret = riic_init_hw(riic);
+ if (ret) {
+ /*
+ * In case this happens there is no way to recover from this
+ * state. The driver will remain loaded. We want to avoid
+ * keeping the reset line de-asserted for no reason.
+ */
+ reset_control_assert(riic->rstc);
+ return ret;
+ }
+
+ i2c_mark_adapter_resumed(&riic->adapter);
+
+ return 0;
+}
+
+static const struct dev_pm_ops riic_i2c_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(riic_i2c_suspend, riic_i2c_resume)
};
static const struct of_device_id riic_i2c_dt_ids[] = {
{ .compatible = "renesas,riic-rz", .data = &riic_rz_a_info },
+ { .compatible = "renesas,riic-r7s72100", .data = &riic_rz_a1h_info, },
{ .compatible = "renesas,riic-r9a09g057", .data = &riic_rz_v2h_info },
{ /* Sentinel */ },
};
@@ -551,6 +636,7 @@ static struct platform_driver riic_i2c_driver = {
.driver = {
.name = "i2c-riic",
.of_match_table = riic_i2c_dt_ids,
+ .pm = pm_ptr(&riic_i2c_pm_ops),
},
};
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 01419c738cfc..7698d9d59744 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -130,7 +130,7 @@ static const struct platform_device_id s3c24xx_driver_ids[] = {
}, {
.name = "s3c2440-hdmiphy-i2c",
.driver_data = QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO,
- }, { },
+ }, { }
};
MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 52ba1e0845ca..2a351f961b89 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -182,7 +182,7 @@ static u32 virtio_i2c_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-static struct i2c_algorithm virtio_algorithm = {
+static const struct i2c_algorithm virtio_algorithm = {
.xfer = virtio_i2c_xfer,
.functionality = virtio_i2c_func,
};
@@ -237,7 +237,7 @@ static void virtio_i2c_remove(struct virtio_device *vdev)
virtio_i2c_del_vqs(vdev);
}
-static struct virtio_device_id id_table[] = {
+static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_I2C_ADAPTER, VIRTIO_DEV_ANY_ID },
{}
};
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 19468565120e..4c89aad02451 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -772,14 +772,17 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
goto out;
}
- xiic_fill_tx_fifo(i2c);
-
- /* current message sent and there is space in the fifo */
- if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) {
+ if (xiic_tx_space(i2c)) {
+ xiic_fill_tx_fifo(i2c);
+ } else {
+ /* current message fully written */
dev_dbg(i2c->adap.dev.parent,
"%s end of message sent, nmsgs: %d\n",
__func__, i2c->nmsgs);
- if (i2c->nmsgs > 1) {
+ /* Don't move onto the next message until the TX FIFO empties,
+ * to ensure that a NAK is not missed.
+ */
+ if (i2c->nmsgs > 1 && (pend & XIIC_INTR_TX_EMPTY_MASK)) {
i2c->nmsgs--;
i2c->tx_msg++;
xfer_more = 1;
@@ -790,11 +793,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
"%s Got TX IRQ but no more to do...\n",
__func__);
}
- } else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1))
- /* current frame is sent and is last,
- * make sure to disable tx half
- */
- xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
+ }
}
if (pend & XIIC_INTR_BNB_MASK) {
@@ -844,23 +843,11 @@ static int xiic_bus_busy(struct xiic_i2c *i2c)
return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0;
}
-static int xiic_busy(struct xiic_i2c *i2c)
+static int xiic_wait_not_busy(struct xiic_i2c *i2c)
{
int tries = 3;
int err;
- if (i2c->tx_msg || i2c->rx_msg)
- return -EBUSY;
-
- /* In single master mode bus can only be busy, when in use by this
- * driver. If the register indicates bus being busy for some reason we
- * should ignore it, since bus will never be released and i2c will be
- * stuck forever.
- */
- if (i2c->singlemaster) {
- return 0;
- }
-
/* for instance if previous transfer was terminated due to TX error
* it might be that the bus is on it's way to become available
* give it at most 3 ms to wake
@@ -1104,13 +1091,36 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
mutex_lock(&i2c->lock);
- ret = xiic_busy(i2c);
- if (ret) {
+ if (i2c->tx_msg || i2c->rx_msg) {
dev_err(i2c->adap.dev.parent,
"cannot start a transfer while busy\n");
+ ret = -EBUSY;
goto out;
}
+ /* In single master mode bus can only be busy, when in use by this
+ * driver. If the register indicates bus being busy for some reason we
+ * should ignore it, since bus will never be released and i2c will be
+ * stuck forever.
+ */
+ if (!i2c->singlemaster) {
+ ret = xiic_wait_not_busy(i2c);
+ if (ret) {
+ /* If the bus is stuck in a busy state, such as due to spurious low
+ * pulses on the bus causing a false start condition to be detected,
+ * then try to recover by re-initializing the controller and check
+ * again if the bus is still busy.
+ */
+ dev_warn(i2c->adap.dev.parent, "I2C bus busy timeout, reinitializing\n");
+ ret = xiic_reinit(i2c);
+ if (ret)
+ goto out;
+ ret = xiic_wait_not_busy(i2c);
+ if (ret)
+ goto out;
+ }
+ }
+
i2c->tx_msg = msgs;
i2c->rx_msg = NULL;
i2c->nmsgs = num;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index b63f75e44296..7c810893bfa3 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -915,6 +915,27 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
return 0;
}
+/*
+ * Serialize device instantiation in case it can be instantiated explicitly
+ * and by auto-detection
+ */
+static int i2c_lock_addr(struct i2c_adapter *adap, unsigned short addr,
+ unsigned short flags)
+{
+ if (!(flags & I2C_CLIENT_TEN) &&
+ test_and_set_bit(addr, adap->addrs_in_instantiation))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void i2c_unlock_addr(struct i2c_adapter *adap, unsigned short addr,
+ unsigned short flags)
+{
+ if (!(flags & I2C_CLIENT_TEN))
+ clear_bit(addr, adap->addrs_in_instantiation);
+}
+
/**
* i2c_new_client_device - instantiate an i2c device
* @adap: the adapter managing the device
@@ -962,6 +983,10 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
goto out_err_silent;
}
+ status = i2c_lock_addr(adap, client->addr, client->flags);
+ if (status)
+ goto out_err_silent;
+
/* Check for address business */
status = i2c_check_addr_busy(adap, i2c_encode_flags_to_addr(client));
if (status)
@@ -993,6 +1018,8 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
client->name, dev_name(&client->dev));
+ i2c_unlock_addr(adap, client->addr, client->flags);
+
return client;
out_remove_swnode:
@@ -1004,6 +1031,7 @@ out_err:
dev_err(&adap->dev,
"Failed to register i2c client %s at 0x%02x (%d)\n",
client->name, client->addr, status);
+ i2c_unlock_addr(adap, client->addr, client->flags);
out_err_silent:
if (need_put)
put_device(&client->dev);
@@ -1068,7 +1096,7 @@ EXPORT_SYMBOL(i2c_find_device_by_fwnode);
static const struct i2c_device_id dummy_id[] = {
{ "dummy", },
{ "smbus_host_notify", },
- { },
+ { }
};
static int dummy_probe(struct i2c_client *client)
@@ -1367,10 +1395,6 @@ struct i2c_adapter *i2c_verify_adapter(struct device *dev)
}
EXPORT_SYMBOL(i2c_verify_adapter);
-#ifdef CONFIG_I2C_COMPAT
-static struct class_compat *i2c_adapter_compat_class;
-#endif
-
static void i2c_scan_static_board_info(struct i2c_adapter *adapter)
{
struct i2c_devinfo *devinfo;
@@ -1524,7 +1548,18 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
dev_set_name(&adap->dev, "i2c-%d", adap->nr);
adap->dev.bus = &i2c_bus_type;
adap->dev.type = &i2c_adapter_type;
- res = device_register(&adap->dev);
+ device_initialize(&adap->dev);
+
+ /*
+ * This adapter can be used as a parent immediately after device_add(),
+ * setup runtime-pm (especially ignore-children) before hand.
+ */
+ device_enable_async_suspend(&adap->dev);
+ pm_runtime_no_callbacks(&adap->dev);
+ pm_suspend_ignore_children(&adap->dev, true);
+ pm_runtime_enable(&adap->dev);
+
+ res = device_add(&adap->dev);
if (res) {
pr_err("adapter '%s': can't register device (%d)\n", adap->name, res);
goto out_list;
@@ -1536,25 +1571,12 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
if (res)
goto out_reg;
- device_enable_async_suspend(&adap->dev);
- pm_runtime_no_callbacks(&adap->dev);
- pm_suspend_ignore_children(&adap->dev, true);
- pm_runtime_enable(&adap->dev);
-
res = i2c_init_recovery(adap);
if (res == -EPROBE_DEFER)
goto out_reg;
dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
-#ifdef CONFIG_I2C_COMPAT
- res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev,
- adap->dev.parent);
- if (res)
- dev_warn(&adap->dev,
- "Failed to create compatibility class link\n");
-#endif
-
/* create pre-declared device nodes */
of_i2c_register_devices(adap);
i2c_acpi_install_space_handler(adap);
@@ -1761,11 +1783,6 @@ void i2c_del_adapter(struct i2c_adapter *adap)
device_for_each_child(&adap->dev, NULL, __unregister_client);
device_for_each_child(&adap->dev, NULL, __unregister_dummy);
-#ifdef CONFIG_I2C_COMPAT
- class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
- adap->dev.parent);
-#endif
-
/* device name is gone after device_unregister */
dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
@@ -2074,13 +2091,6 @@ static int __init i2c_init(void)
i2c_debugfs_root = debugfs_create_dir("i2c", NULL);
-#ifdef CONFIG_I2C_COMPAT
- i2c_adapter_compat_class = class_compat_register("i2c-adapter");
- if (!i2c_adapter_compat_class) {
- retval = -ENOMEM;
- goto bus_err;
- }
-#endif
retval = i2c_add_driver(&dummy_driver);
if (retval)
goto class_err;
@@ -2093,10 +2103,6 @@ static int __init i2c_init(void)
return 0;
class_err:
-#ifdef CONFIG_I2C_COMPAT
- class_compat_unregister(i2c_adapter_compat_class);
-bus_err:
-#endif
is_registered = false;
bus_unregister(&i2c_bus_type);
return retval;
@@ -2109,9 +2115,6 @@ static void __exit i2c_exit(void)
if (IS_ENABLED(CONFIG_OF_DYNAMIC))
WARN_ON(of_reconfig_notifier_unregister(&i2c_of_notifier));
i2c_del_driver(&dummy_driver);
-#ifdef CONFIG_I2C_COMPAT
- class_compat_unregister(i2c_adapter_compat_class);
-#endif
debugfs_remove_recursive(i2c_debugfs_root);
bus_unregister(&i2c_bus_type);
tracepoint_synchronize_unregister();
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index e3765e12f93b..faefe1dfa8e5 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -109,15 +109,12 @@ EXPORT_SYMBOL_GPL(i2c_slave_event);
bool i2c_detect_slave_mode(struct device *dev)
{
if (IS_BUILTIN(CONFIG_OF) && dev->of_node) {
- struct device_node *child;
u32 reg;
- for_each_child_of_node(dev->of_node, child) {
+ for_each_child_of_node_scoped(dev->of_node, child) {
of_property_read_u32(child, "reg", &reg);
- if (reg & I2C_OWN_SLAVE_ADDRESS) {
- of_node_put(child);
+ if (reg & I2C_OWN_SLAVE_ADDRESS)
return true;
- }
}
} else if (IS_BUILTIN(CONFIG_ACPI) && ACPI_HANDLE(dev)) {
dev_dbg(dev, "ACPI slave is not supported yet\n");
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index 4c550306f3ec..9fe3150378e8 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -6,7 +6,10 @@
* Copyright (C) 2020 by Renesas Electronics Corporation
*/
+#include <generated/utsrelease.h>
#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -14,12 +17,14 @@
#include <linux/slab.h>
#include <linux/workqueue.h> /* FIXME: is system_long_wq the best choice? */
-#define TU_CUR_VERSION 0x01
+#define TU_VERSION_MAX_LENGTH 128
enum testunit_cmds {
TU_CMD_READ_BYTES = 1, /* save 0 for ABORT, RESET or similar */
TU_CMD_SMBUS_HOST_NOTIFY,
TU_CMD_SMBUS_BLOCK_PROC_CALL,
+ TU_CMD_GET_VERSION_WITH_REP_START,
+ TU_CMD_SMBUS_ALERT_REQUEST,
TU_NUM_CMDS
};
@@ -39,50 +44,38 @@ struct testunit_data {
unsigned long flags;
u8 regs[TU_NUM_REGS];
u8 reg_idx;
+ u8 read_idx;
struct i2c_client *client;
struct delayed_work worker;
+ struct gpio_desc *gpio;
+ struct completion alert_done;
};
-static void i2c_slave_testunit_work(struct work_struct *work)
-{
- struct testunit_data *tu = container_of(work, struct testunit_data, worker.work);
- struct i2c_msg msg;
- u8 msgbuf[256];
- int ret = 0;
-
- msg.addr = I2C_CLIENT_END;
- msg.buf = msgbuf;
+static char tu_version_info[] = "v" UTS_RELEASE "\n\0";
- switch (tu->regs[TU_REG_CMD]) {
- case TU_CMD_READ_BYTES:
- msg.addr = tu->regs[TU_REG_DATAL];
- msg.flags = I2C_M_RD;
- msg.len = tu->regs[TU_REG_DATAH];
- break;
+static int i2c_slave_testunit_smbalert_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct testunit_data *tu = i2c_get_clientdata(client);
- case TU_CMD_SMBUS_HOST_NOTIFY:
- msg.addr = 0x08;
- msg.flags = 0;
- msg.len = 3;
- msgbuf[0] = tu->client->addr;
- msgbuf[1] = tu->regs[TU_REG_DATAL];
- msgbuf[2] = tu->regs[TU_REG_DATAH];
+ switch (event) {
+ case I2C_SLAVE_READ_PROCESSED:
+ gpiod_set_value(tu->gpio, 0);
+ fallthrough;
+ case I2C_SLAVE_READ_REQUESTED:
+ *val = tu->regs[TU_REG_DATAL];
break;
- default:
+ case I2C_SLAVE_STOP:
+ complete(&tu->alert_done);
break;
- }
- if (msg.addr != I2C_CLIENT_END) {
- ret = i2c_transfer(tu->client->adapter, &msg, 1);
- /* convert '0 msgs transferred' to errno */
- ret = (ret == 0) ? -EIO : ret;
+ case I2C_SLAVE_WRITE_REQUESTED:
+ case I2C_SLAVE_WRITE_RECEIVED:
+ return -EOPNOTSUPP;
}
- if (ret < 0)
- dev_err(&tu->client->dev, "CMD%02X failed (%d)\n", tu->regs[TU_REG_CMD], ret);
-
- clear_bit(TU_FLAG_IN_PROCESS, &tu->flags);
+ return 0;
}
static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
@@ -91,9 +84,20 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
struct testunit_data *tu = i2c_get_clientdata(client);
bool is_proc_call = tu->reg_idx == 3 && tu->regs[TU_REG_DATAL] == 1 &&
tu->regs[TU_REG_CMD] == TU_CMD_SMBUS_BLOCK_PROC_CALL;
+ bool is_get_version = tu->reg_idx == 3 &&
+ tu->regs[TU_REG_CMD] == TU_CMD_GET_VERSION_WITH_REP_START;
int ret = 0;
switch (event) {
+ case I2C_SLAVE_WRITE_REQUESTED:
+ if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
+ return -EBUSY;
+
+ memset(tu->regs, 0, TU_NUM_REGS);
+ tu->reg_idx = 0;
+ tu->read_idx = 0;
+ break;
+
case I2C_SLAVE_WRITE_RECEIVED:
if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
return -EBUSY;
@@ -127,27 +131,93 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
tu->reg_idx = 0;
break;
- case I2C_SLAVE_WRITE_REQUESTED:
- if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
- return -EBUSY;
-
- memset(tu->regs, 0, TU_NUM_REGS);
- tu->reg_idx = 0;
- break;
-
case I2C_SLAVE_READ_PROCESSED:
- if (is_proc_call && tu->regs[TU_REG_DATAH])
+ /* Advance until we reach the NUL character */
+ if (is_get_version && tu_version_info[tu->read_idx] != 0)
+ tu->read_idx++;
+ else if (is_proc_call && tu->regs[TU_REG_DATAH])
tu->regs[TU_REG_DATAH]--;
+
fallthrough;
case I2C_SLAVE_READ_REQUESTED:
- *val = is_proc_call ? tu->regs[TU_REG_DATAH] : TU_CUR_VERSION;
+ if (is_get_version)
+ *val = tu_version_info[tu->read_idx];
+ else if (is_proc_call)
+ *val = tu->regs[TU_REG_DATAH];
+ else
+ *val = test_bit(TU_FLAG_IN_PROCESS, &tu->flags) ?
+ tu->regs[TU_REG_CMD] : 0;
break;
}
return ret;
}
+static void i2c_slave_testunit_work(struct work_struct *work)
+{
+ struct testunit_data *tu = container_of(work, struct testunit_data, worker.work);
+ unsigned long time_left;
+ struct i2c_msg msg;
+ u8 msgbuf[256];
+ u16 orig_addr;
+ int ret = 0;
+
+ msg.addr = I2C_CLIENT_END;
+ msg.buf = msgbuf;
+
+ switch (tu->regs[TU_REG_CMD]) {
+ case TU_CMD_READ_BYTES:
+ msg.addr = tu->regs[TU_REG_DATAL];
+ msg.flags = I2C_M_RD;
+ msg.len = tu->regs[TU_REG_DATAH];
+ break;
+
+ case TU_CMD_SMBUS_HOST_NOTIFY:
+ msg.addr = 0x08;
+ msg.flags = 0;
+ msg.len = 3;
+ msgbuf[0] = tu->client->addr;
+ msgbuf[1] = tu->regs[TU_REG_DATAL];
+ msgbuf[2] = tu->regs[TU_REG_DATAH];
+ break;
+
+ case TU_CMD_SMBUS_ALERT_REQUEST:
+ i2c_slave_unregister(tu->client);
+ orig_addr = tu->client->addr;
+ tu->client->addr = 0x0c;
+ ret = i2c_slave_register(tu->client, i2c_slave_testunit_smbalert_cb);
+ if (ret)
+ goto out_smbalert;
+
+ reinit_completion(&tu->alert_done);
+ gpiod_set_value(tu->gpio, 1);
+ time_left = wait_for_completion_timeout(&tu->alert_done, HZ);
+ if (!time_left)
+ ret = -ETIMEDOUT;
+
+ i2c_slave_unregister(tu->client);
+out_smbalert:
+ tu->client->addr = orig_addr;
+ i2c_slave_register(tu->client, i2c_slave_testunit_slave_cb);
+ break;
+
+ default:
+ break;
+ }
+
+ if (msg.addr != I2C_CLIENT_END) {
+ ret = i2c_transfer(tu->client->adapter, &msg, 1);
+ /* convert '0 msgs transferred' to errno */
+ ret = (ret == 0) ? -EIO : ret;
+ }
+
+ if (ret < 0)
+ dev_err(&tu->client->dev, "CMD%02X failed (%d)\n", tu->regs[TU_REG_CMD], ret);
+
+ clear_bit(TU_FLAG_IN_PROCESS, &tu->flags);
+}
+
static int i2c_slave_testunit_probe(struct i2c_client *client)
{
struct testunit_data *tu;
@@ -158,8 +228,18 @@ static int i2c_slave_testunit_probe(struct i2c_client *client)
tu->client = client;
i2c_set_clientdata(client, tu);
+ init_completion(&tu->alert_done);
INIT_DELAYED_WORK(&tu->worker, i2c_slave_testunit_work);
+ tu->gpio = devm_gpiod_get_index_optional(&client->dev, NULL, 0, GPIOD_OUT_LOW);
+ if (gpiod_cansleep(tu->gpio)) {
+ dev_err(&client->dev, "GPIO access which may sleep is not allowed\n");
+ return -EDEADLK;
+ }
+
+ if (sizeof(tu_version_info) > TU_VERSION_MAX_LENGTH)
+ tu_version_info[TU_VERSION_MAX_LENGTH - 1] = 0;
+
return i2c_slave_register(client, i2c_slave_testunit_slave_cb);
};
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index db1b9057612a..6d2f66810cdc 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -119,4 +119,20 @@ config I2C_MUX_MLXCPLD
This driver can also be built as a module. If so, the module
will be called i2c-mux-mlxcpld.
+config I2C_MUX_MULE
+ tristate "Theobroma Systems Mule I2C device multiplexer"
+ depends on OF && SENSORS_AMC6821
+ help
+ Mule is an MCU that emulates a set of I2C devices, among which
+ devices that are reachable through an I2C-mux. The devices on the mux
+ can be selected by writing the appropriate device number to an I2C
+ configuration register.
+
+ If you say yes to this option, support will be included for a
+ Theobroma Systems Mule I2C multiplexer. This driver provides access to
+ I2C devices connected on this mux.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-mux-mule.
+
endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 6d9d865e8518..4b24f49515a7 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
obj-$(CONFIG_I2C_MUX_GPMUX) += i2c-mux-gpmux.o
obj-$(CONFIG_I2C_MUX_LTC4306) += i2c-mux-ltc4306.o
obj-$(CONFIG_I2C_MUX_MLXCPLD) += i2c-mux-mlxcpld.o
+obj-$(CONFIG_I2C_MUX_MULE) += i2c-mux-mule.o
obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o
diff --git a/drivers/i2c/muxes/i2c-mux-mule.c b/drivers/i2c/muxes/i2c-mux-mule.c
new file mode 100644
index 000000000000..8e942470b35f
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-mule.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Theobroma Systems Mule I2C device multiplexer
+ *
+ * Copyright (C) 2024 Theobroma Systems Design und Consulting GmbH
+ */
+
+#include <linux/i2c-mux.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define MULE_I2C_MUX_CONFIG_REG 0xff
+#define MULE_I2C_MUX_DEFAULT_DEV 0x0
+
+struct mule_i2c_reg_mux {
+ struct regmap *regmap;
+};
+
+static int mule_i2c_mux_select(struct i2c_mux_core *muxc, u32 dev)
+{
+ struct mule_i2c_reg_mux *mux = muxc->priv;
+
+ return regmap_write(mux->regmap, MULE_I2C_MUX_CONFIG_REG, dev);
+}
+
+static int mule_i2c_mux_deselect(struct i2c_mux_core *muxc, u32 dev)
+{
+ return mule_i2c_mux_select(muxc, MULE_I2C_MUX_DEFAULT_DEV);
+}
+
+static void mule_i2c_mux_remove(void *data)
+{
+ struct i2c_mux_core *muxc = data;
+
+ i2c_mux_del_adapters(muxc);
+
+ mule_i2c_mux_deselect(muxc, MULE_I2C_MUX_DEFAULT_DEV);
+}
+
+static int mule_i2c_mux_probe(struct platform_device *pdev)
+{
+ struct device *mux_dev = &pdev->dev;
+ struct mule_i2c_reg_mux *priv;
+ struct i2c_client *client;
+ struct i2c_mux_core *muxc;
+ struct device_node *dev;
+ unsigned int readback;
+ int ndev, ret;
+ bool old_fw;
+
+ /* Count devices on the mux */
+ ndev = of_get_child_count(mux_dev->of_node);
+ dev_dbg(mux_dev, "%d devices on the mux\n", ndev);
+
+ client = to_i2c_client(mux_dev->parent);
+
+ muxc = i2c_mux_alloc(client->adapter, mux_dev, ndev, sizeof(*priv),
+ I2C_MUX_LOCKED, mule_i2c_mux_select, mule_i2c_mux_deselect);
+ if (!muxc)
+ return -ENOMEM;
+
+ priv = i2c_mux_priv(muxc);
+
+ priv->regmap = dev_get_regmap(mux_dev->parent, NULL);
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(mux_dev, PTR_ERR(priv->regmap),
+ "No parent i2c register map\n");
+
+ platform_set_drvdata(pdev, muxc);
+
+ /*
+ * MULE_I2C_MUX_DEFAULT_DEV is guaranteed to exist on all old and new
+ * mule fw. Mule fw without mux support will accept write ops to the
+ * config register, but readback returns 0xff (register not updated).
+ */
+ ret = mule_i2c_mux_select(muxc, MULE_I2C_MUX_DEFAULT_DEV);
+ if (ret)
+ return dev_err_probe(mux_dev, ret,
+ "Failed to write config register\n");
+
+ ret = regmap_read(priv->regmap, MULE_I2C_MUX_CONFIG_REG, &readback);
+ if (ret)
+ return dev_err_probe(mux_dev, ret,
+ "Failed to read config register\n");
+
+ old_fw = (readback != MULE_I2C_MUX_DEFAULT_DEV);
+
+ ret = devm_add_action_or_reset(mux_dev, mule_i2c_mux_remove, muxc);
+ if (ret)
+ return dev_err_probe(mux_dev, ret,
+ "Failed to register mux remove\n");
+
+ /* Create device adapters */
+ for_each_child_of_node(mux_dev->of_node, dev) {
+ u32 reg;
+
+ ret = of_property_read_u32(dev, "reg", &reg);
+ if (ret)
+ return dev_err_probe(mux_dev, ret,
+ "No reg property found for %s\n",
+ of_node_full_name(dev));
+
+ if (old_fw && reg != 0) {
+ dev_warn(mux_dev,
+ "Mux is not supported, please update Mule FW\n");
+ continue;
+ }
+
+ ret = mule_i2c_mux_select(muxc, reg);
+ if (ret) {
+ dev_warn(mux_dev,
+ "Device %d not supported, please update Mule FW\n", reg);
+ continue;
+ }
+
+ ret = i2c_mux_add_adapter(muxc, 0, reg);
+ if (ret)
+ return ret;
+ }
+
+ mule_i2c_mux_deselect(muxc, MULE_I2C_MUX_DEFAULT_DEV);
+
+ return 0;
+}
+
+static const struct of_device_id mule_i2c_mux_of_match[] = {
+ { .compatible = "tsd,mule-i2c-mux", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mule_i2c_mux_of_match);
+
+static struct platform_driver mule_i2c_mux_driver = {
+ .driver = {
+ .name = "mule-i2c-mux",
+ .of_match_table = mule_i2c_mux_of_match,
+ },
+ .probe = mule_i2c_mux_probe,
+};
+
+module_platform_driver(mule_i2c_mux_driver);
+
+MODULE_AUTHOR("Farouk Bouabid <farouk.bouabid@cherry.de>");
+MODULE_DESCRIPTION("I2C mux driver for Theobroma Systems Mule");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 7028f03c2c42..6f3eb710a75d 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1868,6 +1868,12 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
goto err_bus_cleanup;
}
+ if (master->ops->set_speed) {
+ ret = master->ops->set_speed(master, I3C_OPEN_DRAIN_SLOW_SPEED);
+ if (ret)
+ goto err_bus_cleanup;
+ }
+
/*
* Reset all dynamic address that may have been assigned before
* (assigned by the bootloader for example).
@@ -1876,6 +1882,12 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
if (ret && ret != I3C_ERROR_M2)
goto err_bus_cleanup;
+ if (master->ops->set_speed) {
+ master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED);
+ if (ret)
+ goto err_bus_cleanup;
+ }
+
/* Disable all slave events before starting DAA. */
ret = i3c_master_disec_locked(master, I3C_BROADCAST_ADDR,
I3C_CCC_EVENT_SIR | I3C_CCC_EVENT_MR |
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index c1627f3552ce..fe4d59833ad5 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -1562,6 +1562,7 @@ static const struct of_device_id cdns_i3c_master_of_ids[] = {
{ .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, cdns_i3c_master_of_ids);
static int cdns_i3c_master_probe(struct platform_device *pdev)
{
@@ -1666,6 +1667,7 @@ static void cdns_i3c_master_remove(struct platform_device *pdev)
{
struct cdns_i3c_master *master = platform_get_drvdata(pdev);
+ cancel_work_sync(&master->hj_work);
i3c_master_unregister(&master->base);
clk_disable_unprepare(master->sysclk);
diff --git a/drivers/i3c/master/mipi-i3c-hci/Makefile b/drivers/i3c/master/mipi-i3c-hci/Makefile
index a658e7b8262c..1f8cd5c48fde 100644
--- a/drivers/i3c/master/mipi-i3c-hci/Makefile
+++ b/drivers/i3c/master/mipi-i3c-hci/Makefile
@@ -3,4 +3,5 @@
obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci.o
mipi-i3c-hci-y := core.o ext_caps.o pio.o dma.o \
cmd_v1.o cmd_v2.o \
- dat_v1.o dct_v1.o
+ dat_v1.o dct_v1.o \
+ hci_quirks.o
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
index 638b054d6c92..dd636094b07f 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -123,17 +123,15 @@ static enum hci_cmd_mode get_i3c_mode(struct i3c_hci *hci)
{
struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
- if (bus->scl_rate.i3c >= 12500000)
- return MODE_I3C_SDR0;
if (bus->scl_rate.i3c > 8000000)
- return MODE_I3C_SDR1;
+ return MODE_I3C_SDR0;
if (bus->scl_rate.i3c > 6000000)
- return MODE_I3C_SDR2;
+ return MODE_I3C_SDR1;
if (bus->scl_rate.i3c > 4000000)
- return MODE_I3C_SDR3;
+ return MODE_I3C_SDR2;
if (bus->scl_rate.i3c > 2000000)
- return MODE_I3C_SDR4;
- return MODE_I3C_Fm_FmP;
+ return MODE_I3C_SDR3;
+ return MODE_I3C_SDR4;
}
static enum hci_cmd_mode get_i2c_mode(struct i3c_hci *hci)
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 4e7d6a43ee9b..a82c47c9986d 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -12,7 +12,6 @@
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/interrupt.h>
-#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -27,11 +26,6 @@
* Host Controller Capabilities and Operation Registers
*/
-#define reg_read(r) readl(hci->base_regs + (r))
-#define reg_write(r, v) writel(v, hci->base_regs + (r))
-#define reg_set(r, v) reg_write(r, reg_read(r) | (v))
-#define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v))
-
#define HCI_VERSION 0x00 /* HCI Version (in BCD) */
#define HC_CONTROL 0x04
@@ -152,6 +146,10 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
if (ret)
return ret;
+ /* Set RESP_BUF_THLD to 0(n) to get 1(n+1) response */
+ if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD)
+ amd_set_resp_buf_thld(hci);
+
reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
@@ -630,8 +628,8 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
static int i3c_hci_init(struct i3c_hci *hci)
{
+ bool size_in_dwords, mode_selector;
u32 regval, offset;
- bool size_in_dwords;
int ret;
/* Validate HCI hardware version */
@@ -753,10 +751,17 @@ static int i3c_hci_init(struct i3c_hci *hci)
return -EINVAL;
}
+ mode_selector = hci->version_major > 1 ||
+ (hci->version_major == 1 && hci->version_minor > 0);
+
+ /* Quirk for HCI_QUIRK_PIO_MODE on AMD platforms */
+ if (hci->quirks & HCI_QUIRK_PIO_MODE)
+ hci->RHS_regs = NULL;
+
/* Try activating DMA operations first */
if (hci->RHS_regs) {
reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
- if (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE) {
+ if (mode_selector && (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
dev_err(&hci->master.dev, "PIO mode is stuck\n");
ret = -EIO;
} else {
@@ -768,7 +773,7 @@ static int i3c_hci_init(struct i3c_hci *hci)
/* If no DMA, try PIO */
if (!hci->io && hci->PIO_regs) {
reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
- if (!(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
+ if (mode_selector && !(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
dev_err(&hci->master.dev, "DMA mode is stuck\n");
ret = -EIO;
} else {
@@ -784,6 +789,10 @@ static int i3c_hci_init(struct i3c_hci *hci)
return ret;
}
+ /* Configure OD and PP timings for AMD platforms */
+ if (hci->quirks & HCI_QUIRK_OD_PP_TIMING)
+ amd_set_od_pp_timing(hci);
+
return 0;
}
@@ -803,6 +812,8 @@ static int i3c_hci_probe(struct platform_device *pdev)
/* temporary for dev_printk's, to be replaced in i3c_master_register */
hci->master.dev.init_name = dev_name(&pdev->dev);
+ hci->quirks = (unsigned long)device_get_match_data(&pdev->dev);
+
ret = i3c_hci_init(hci);
if (ret)
return ret;
@@ -834,12 +845,19 @@ static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
+static const struct acpi_device_id i3c_hci_acpi_match[] = {
+ { "AMDI5017", HCI_QUIRK_PIO_MODE | HCI_QUIRK_OD_PP_TIMING | HCI_QUIRK_RESP_BUF_THLD },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, i3c_hci_acpi_match);
+
static struct platform_driver i3c_hci_driver = {
.probe = i3c_hci_probe,
.remove_new = i3c_hci_remove,
.driver = {
.name = "mipi-i3c-hci",
.of_match_table = of_match_ptr(i3c_hci_of_match),
+ .acpi_match_table = i3c_hci_acpi_match,
},
};
module_platform_driver(i3c_hci_driver);
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
index f94d95e024be..aaa47ac47381 100644
--- a/drivers/i3c/master/mipi-i3c-hci/hci.h
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -10,6 +10,7 @@
#ifndef HCI_H
#define HCI_H
+#include <linux/io.h>
/* Handy logging macro to save on line length */
#define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__)
@@ -26,6 +27,10 @@
#define W2_BIT_(x) BIT((x) - 64)
#define W3_BIT_(x) BIT((x) - 96)
+#define reg_read(r) readl(hci->base_regs + (r))
+#define reg_write(r, v) writel(v, hci->base_regs + (r))
+#define reg_set(r, v) reg_write(r, reg_read(r) | (v))
+#define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v))
struct hci_cmd_ops;
@@ -135,11 +140,16 @@ struct i3c_hci_dev_data {
/* list of quirks */
#define HCI_QUIRK_RAW_CCC BIT(1) /* CCC framing must be explicit */
+#define HCI_QUIRK_PIO_MODE BIT(2) /* Set PIO mode for AMD platforms */
+#define HCI_QUIRK_OD_PP_TIMING BIT(3) /* Set OD and PP timings for AMD platforms */
+#define HCI_QUIRK_RESP_BUF_THLD BIT(4) /* Set resp buf thld to 0 for AMD platforms */
/* global functions */
void mipi_i3c_hci_resume(struct i3c_hci *hci);
void mipi_i3c_hci_pio_reset(struct i3c_hci *hci);
void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci);
+void amd_set_od_pp_timing(struct i3c_hci *hci);
+void amd_set_resp_buf_thld(struct i3c_hci *hci);
#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci_quirks.c b/drivers/i3c/master/mipi-i3c-hci/hci_quirks.c
new file mode 100644
index 000000000000..3b9c6e76c536
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/hci_quirks.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * I3C HCI Quirks
+ *
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Guruvendra Punugupati <Guruvendra.Punugupati@amd.com>
+ */
+
+#include <linux/i3c/master.h>
+#include "hci.h"
+
+/* Timing registers */
+#define HCI_SCL_I3C_OD_TIMING 0x214
+#define HCI_SCL_I3C_PP_TIMING 0x218
+#define HCI_SDA_HOLD_SWITCH_DLY_TIMING 0x230
+
+/* Timing values to configure 9MHz frequency */
+#define AMD_SCL_I3C_OD_TIMING 0x00cf00cf
+#define AMD_SCL_I3C_PP_TIMING 0x00160016
+
+#define QUEUE_THLD_CTRL 0xD0
+
+void amd_set_od_pp_timing(struct i3c_hci *hci)
+{
+ u32 data;
+
+ reg_write(HCI_SCL_I3C_OD_TIMING, AMD_SCL_I3C_OD_TIMING);
+ reg_write(HCI_SCL_I3C_PP_TIMING, AMD_SCL_I3C_PP_TIMING);
+ data = reg_read(HCI_SDA_HOLD_SWITCH_DLY_TIMING);
+ /* Configure maximum TX hold time */
+ data |= W0_MASK(18, 16);
+ reg_write(HCI_SDA_HOLD_SWITCH_DLY_TIMING, data);
+}
+
+void amd_set_resp_buf_thld(struct i3c_hci *hci)
+{
+ u32 data;
+
+ data = reg_read(QUEUE_THLD_CTRL);
+ data = data & ~W0_MASK(15, 8);
+ reg_write(QUEUE_THLD_CTRL, data);
+}
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 0a68fd1b81d4..a7bfc678153e 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -127,6 +127,8 @@
/* This parameter depends on the implementation and may be tuned */
#define SVC_I3C_FIFO_SIZE 16
+#define SVC_I3C_PPBAUD_MAX 15
+#define SVC_I3C_QUICK_I2C_CLK 4170000
#define SVC_I3C_EVENT_IBI BIT(0)
#define SVC_I3C_EVENT_HOTJOIN BIT(1)
@@ -182,6 +184,7 @@ struct svc_i3c_regs_save {
* @ibi.lock: IBI lock
* @lock: Transfer lock, protect between IBI work thread and callbacks from master
* @enabled_events: Bit masks for enable events (IBI, HotJoin).
+ * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
*/
struct svc_i3c_master {
struct i3c_master_controller base;
@@ -212,6 +215,7 @@ struct svc_i3c_master {
} ibi;
struct mutex lock;
int enabled_events;
+ u32 mctrl_config;
};
/**
@@ -529,12 +533,61 @@ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
+ enum i3c_open_drain_speed speed)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct i3c_bus *bus = i3c_master_get_bus(&master->base);
+ u32 ppbaud, odbaud, odhpp, mconfig;
+ unsigned long fclk_rate;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return ret;
+ }
+
+ switch (speed) {
+ case I3C_OPEN_DRAIN_SLOW_SPEED:
+ fclk_rate = clk_get_rate(master->fclk);
+ if (!fclk_rate) {
+ ret = -EINVAL;
+ goto rpm_out;
+ }
+ /*
+ * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first
+ * broadcast address is visible to all I2C/I3C devices on the I3C bus.
+ * I3C device working as a I2C device will turn off its 50ns Spike
+ * Filter to change to I3C mode.
+ */
+ mconfig = master->mctrl_config;
+ ppbaud = FIELD_GET(GENMASK(11, 8), mconfig);
+ odhpp = 0;
+ odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1;
+ mconfig &= ~GENMASK(24, 16);
+ mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp);
+ writel(mconfig, master->regs + SVC_I3C_MCONFIG);
+ break;
+ case I3C_OPEN_DRAIN_NORMAL_SPEED:
+ writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
+ break;
+ }
+
+rpm_out:
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+
+ return ret;
+}
+
static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
{
struct svc_i3c_master *master = to_svc_i3c_master(m);
struct i3c_bus *bus = i3c_master_get_bus(m);
struct i3c_device_info info = {};
unsigned long fclk_rate, fclk_period_ns;
+ unsigned long i2c_period_ns, i2c_scl_rate, i3c_scl_rate;
unsigned int high_period_ns, od_low_period_ns;
u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
int ret;
@@ -555,12 +608,15 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
}
fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
+ i2c_period_ns = DIV_ROUND_UP(1000000000, bus->scl_rate.i2c);
+ i2c_scl_rate = bus->scl_rate.i2c;
+ i3c_scl_rate = bus->scl_rate.i3c;
/*
* Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
* Simplest configuration is using a 50% duty-cycle of 40ns.
*/
- ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
+ ppbaud = DIV_ROUND_UP(fclk_rate / 2, i3c_scl_rate) - 1;
pplow = 0;
/*
@@ -570,7 +626,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
*/
odhpp = 1;
high_period_ns = (ppbaud + 1) * fclk_period_ns;
- odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
+ odbaud = DIV_ROUND_UP(fclk_rate, SVC_I3C_QUICK_I2C_CLK * (1 + ppbaud)) - 2;
od_low_period_ns = (odbaud + 1) * high_period_ns;
switch (bus->mode) {
@@ -579,20 +635,27 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
odstop = 0;
break;
case I3C_BUS_MODE_MIXED_FAST:
- case I3C_BUS_MODE_MIXED_LIMITED:
/*
* Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
* between the high and low period does not really matter.
*/
- i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
+ i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
odstop = 1;
break;
+ case I3C_BUS_MODE_MIXED_LIMITED:
case I3C_BUS_MODE_MIXED_SLOW:
- /*
- * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
- * constraints as the FM+ mode.
- */
- i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
+ /* I3C PP + I3C OP + I2C OP both use i2c clk rate */
+ if (ppbaud > SVC_I3C_PPBAUD_MAX) {
+ ppbaud = SVC_I3C_PPBAUD_MAX;
+ pplow = DIV_ROUND_UP(fclk_rate, i3c_scl_rate) - (2 + 2 * ppbaud);
+ }
+
+ high_period_ns = (ppbaud + 1) * fclk_period_ns;
+ odhpp = 0;
+ odbaud = DIV_ROUND_UP(fclk_rate, i2c_scl_rate * (2 + 2 * ppbaud)) - 1;
+
+ od_low_period_ns = (odbaud + 1) * high_period_ns;
+ i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
odstop = 1;
break;
default:
@@ -611,6 +674,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
writel(reg, master->regs + SVC_I3C_MCONFIG);
+ master->mctrl_config = reg;
/* Master core's registration */
ret = i3c_master_get_free_addr(m, 0);
if (ret < 0)
@@ -1645,6 +1709,7 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = {
.disable_ibi = svc_i3c_master_disable_ibi,
.enable_hotjoin = svc_i3c_master_enable_hotjoin,
.disable_hotjoin = svc_i3c_master_disable_hotjoin,
+ .set_speed = svc_i3c_master_set_speed,
};
static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
@@ -1775,6 +1840,7 @@ static void svc_i3c_master_remove(struct platform_device *pdev)
{
struct svc_i3c_master *master = platform_get_drvdata(pdev);
+ cancel_work_sync(&master->hj_work);
i3c_master_unregister(&master->base);
pm_runtime_dont_use_autosuspend(&pdev->dev);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 9aab7abc2ae9..9457e34b9e32 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1022,6 +1022,45 @@ static struct cpuidle_state spr_cstates[] __initdata = {
.enter = NULL }
};
+static struct cpuidle_state gnr_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 4,
+ .target_residency = 4,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED |
+ CPUIDLE_FLAG_INIT_XSTATE,
+ .exit_latency = 170,
+ .target_residency = 650,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6P",
+ .desc = "MWAIT 0x21",
+ .flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED |
+ CPUIDLE_FLAG_INIT_XSTATE,
+ .exit_latency = 210,
+ .target_residency = 1000,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state atom_cstates[] __initdata = {
{
.name = "C1E",
@@ -1453,6 +1492,12 @@ static const struct idle_cpu idle_cpu_spr __initconst = {
.use_acpi = true,
};
+static const struct idle_cpu idle_cpu_gnr __initconst = {
+ .state_table = gnr_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_avn __initconst = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
@@ -1475,6 +1520,10 @@ static const struct idle_cpu idle_cpu_dnv __initconst = {
.use_acpi = true,
};
+static const struct idle_cpu idle_cpu_tmt __initconst = {
+ .disable_promotion_to_c1e = true,
+};
+
static const struct idle_cpu idle_cpu_snr __initconst = {
.state_table = snr_cstates,
.disable_promotion_to_c1e = true,
@@ -1533,11 +1582,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &idle_cpu_gmt),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &idle_cpu_spr),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &idle_cpu_spr),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &idle_cpu_gnr),
X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &idle_cpu_knl),
X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &idle_cpu_knl),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &idle_cpu_bxt),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &idle_cpu_dnv),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT, &idle_cpu_tmt),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &idle_cpu_tmt),
X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf),
@@ -2075,7 +2127,7 @@ static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
drv->state_count = 1;
- if (icpu)
+ if (icpu && icpu->state_table)
intel_idle_init_cstates_icpu(drv);
else
intel_idle_init_cstates_acpi(drv);
@@ -2209,7 +2261,11 @@ static int __init intel_idle_init(void)
icpu = (const struct idle_cpu *)id->driver_data;
if (icpu) {
- cpuidle_state_table = icpu->state_table;
+ if (icpu->state_table)
+ cpuidle_state_table = icpu->state_table;
+ else if (!intel_idle_acpi_cst_extract())
+ return -ENODEV;
+
auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
if (icpu->disable_promotion_to_c1e)
c1e_promotion = C1E_PROMOTION_DISABLE;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 6791df64a5fe..b7c078b7f7cf 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1640,8 +1640,10 @@ int ib_cache_setup_one(struct ib_device *device)
rdma_for_each_port (device, p) {
err = ib_cache_update(device, p, true, true, true);
- if (err)
+ if (err) {
+ gid_table_cleanup_one(device);
return err;
+ }
}
return 0;
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index dd7715ba9fd1..05102769a918 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -325,9 +325,6 @@ void ib_qp_usecnt_inc(struct ib_qp *qp);
void ib_qp_usecnt_dec(struct ib_qp *qp);
struct rdma_dev_addr;
-int rdma_resolve_ip_route(struct sockaddr *src_addr,
- const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr);
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 0290aca18d26..e029401b5680 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1351,6 +1351,29 @@ static void prevent_dealloc_device(struct ib_device *ib_dev)
{
}
+static void ib_device_notify_register(struct ib_device *device)
+{
+ struct net_device *netdev;
+ u32 port;
+ int ret;
+
+ ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
+ if (ret)
+ return;
+
+ rdma_for_each_port(device, port) {
+ netdev = ib_device_get_netdev(device, port);
+ if (!netdev)
+ continue;
+
+ ret = rdma_nl_notify_event(device, port,
+ RDMA_NETDEV_ATTACH_EVENT);
+ dev_put(netdev);
+ if (ret)
+ return;
+ }
+}
+
/**
* ib_register_device - Register an IB device with IB core
* @device: Device to register
@@ -1449,6 +1472,8 @@ int ib_register_device(struct ib_device *device, const char *name,
dev_set_uevent_suppress(&device->dev, false);
/* Mark for userspace that device is ready */
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
+
+ ib_device_notify_register(device);
ib_device_put(device);
return 0;
@@ -1491,6 +1516,7 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
goto out;
disable_device(ib_dev);
+ rdma_nl_notify_event(ib_dev, 0, RDMA_UNREGISTER_EVENT);
/* Expedite removing unregistered pointers from the hash table */
free_netdevs(ib_dev);
@@ -2159,6 +2185,7 @@ static void add_ndev_hash(struct ib_port_data *pdata)
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
u32 port)
{
+ enum rdma_nl_notify_event_type etype;
struct net_device *old_ndev;
struct ib_port_data *pdata;
unsigned long flags;
@@ -2190,6 +2217,14 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
spin_unlock_irqrestore(&pdata->netdev_lock, flags);
add_ndev_hash(pdata);
+
+ /* Make sure that the device is registered before we send events */
+ if (xa_load(&devices, ib_dev->index) != ib_dev)
+ return 0;
+
+ etype = ndev ? RDMA_NETDEV_ATTACH_EVENT : RDMA_NETDEV_DETACH_EVENT;
+ rdma_nl_notify_event(ib_dev, port, etype);
+
return 0;
}
EXPORT_SYMBOL(ib_device_set_netdev);
@@ -2236,6 +2271,9 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
if (!rdma_is_port_valid(ib_dev, port))
return NULL;
+ if (!ib_dev->port_data)
+ return NULL;
+
pdata = &ib_dev->port_data[port];
/*
@@ -2252,17 +2290,9 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
spin_unlock(&pdata->netdev_lock);
}
- /*
- * If we are starting to unregister expedite things by preventing
- * propagation of an unregistering netdev.
- */
- if (res && res->reg_state != NETREG_REGISTERED) {
- dev_put(res);
- return NULL;
- }
-
return res;
}
+EXPORT_SYMBOL(ib_device_get_netdev);
/**
* ib_device_get_by_netdev - Find an IB device associated with a netdev
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 1a6339f3a63f..7e3a55349e10 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -1182,7 +1182,7 @@ static int __init iw_cm_init(void)
if (ret)
return ret;
- iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
+ iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM);
if (!iwcm_wq)
goto err_alloc;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 7439e47ff951..1fd54d5c4dd8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2616,14 +2616,16 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
static void timeout_sends(struct work_struct *work)
{
+ struct ib_mad_send_wr_private *mad_send_wr, *n;
struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
+ struct list_head local_list;
unsigned long flags, delay;
mad_agent_priv = container_of(work, struct ib_mad_agent_private,
timed_work.work);
mad_send_wc.vendor_err = 0;
+ INIT_LIST_HEAD(&local_list);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
while (!list_empty(&mad_agent_priv->wait_list)) {
@@ -2641,13 +2643,16 @@ static void timeout_sends(struct work_struct *work)
break;
}
- list_del(&mad_send_wr->agent_list);
+ list_del_init(&mad_send_wr->agent_list);
if (mad_send_wr->status == IB_WC_SUCCESS &&
!retry_send(mad_send_wr))
continue;
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ list_add_tail(&mad_send_wr->agent_list, &local_list);
+ }
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ list_for_each_entry_safe(mad_send_wr, n, &local_list, agent_list) {
if (mad_send_wr->status == IB_WC_SUCCESS)
mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
else
@@ -2655,11 +2660,8 @@ static void timeout_sends(struct work_struct *work)
mad_send_wc.send_buf = &mad_send_wr->send_buf;
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
&mad_send_wc);
-
deref_mad_agent(mad_agent_priv);
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
}
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
}
/*
@@ -2937,7 +2939,6 @@ static int ib_mad_port_open(struct ib_device *device,
int ret, cq_size;
struct ib_mad_port_private *port_priv;
unsigned long flags;
- char name[sizeof "ib_mad123"];
int has_smi;
if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
@@ -2990,8 +2991,8 @@ static int ib_mad_port_open(struct ib_device *device,
goto error7;
}
- snprintf(name, sizeof(name), "ib_mad%u", port_num);
- port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ port_priv->wq = alloc_ordered_workqueue("ib_mad%u", WQ_MEM_RECLAIM,
+ port_num);
if (!port_priv->wq) {
ret = -ENOMEM;
goto error8;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index ae2db0c70788..def14c54b648 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -311,6 +311,7 @@ int rdma_nl_net_init(struct rdma_dev_net *rnet)
struct net *net = read_pnet(&rnet->net);
struct netlink_kernel_cfg cfg = {
.input = rdma_nl_rcv,
+ .flags = NL_CFG_F_NONROOT_RECV,
};
struct sock *nls;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index a6b80cdc96f7..39f89a4b8649 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -170,6 +170,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_DEV_TYPE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_PARENT_NAME] = { .type = NLA_NUL_STRING },
[RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_EVENT_TYPE] = { .type = NLA_U8 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -1074,8 +1075,8 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -1123,8 +1124,8 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -1215,8 +1216,8 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 port;
int err;
- err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (err ||
!tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
!tb[RDMA_NLDEV_ATTR_PORT_INDEX])
@@ -1275,8 +1276,8 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
int err;
unsigned int p;
- err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, NULL);
+ err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, NULL);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -1331,8 +1332,8 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int ret;
- ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -1481,8 +1482,8 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct sk_buff *msg;
int ret;
- ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
return -EINVAL;
@@ -1569,8 +1570,8 @@ static int res_get_common_dumpit(struct sk_buff *skb,
u32 index, port = 0;
bool filled = false;
- err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, NULL);
+ err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, NULL);
/*
* Right now, we are expecting the device index to get res information,
* but it is possible to extend this code to return all devices in
@@ -1762,8 +1763,8 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
char type[IFNAMSIZ];
int err;
- err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
!tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
return -EINVAL;
@@ -1806,8 +1807,8 @@ static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -1836,8 +1837,8 @@ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
- extack);
+ err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
+ NL_VALIDATE_LIBERAL, extack);
if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
return -EINVAL;
@@ -1920,8 +1921,8 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct sk_buff *msg;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (err)
return err;
@@ -1951,6 +1952,12 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_free(msg);
return err;
}
+
+ err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_MONITOR_MODE, 1);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
/*
* Copy-on-fork is supported.
* See commits:
@@ -2420,8 +2427,8 @@ static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
int ret;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (ret)
return -EINVAL;
@@ -2450,8 +2457,8 @@ static int nldev_stat_get_dumpit(struct sk_buff *skb,
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
int ret;
- ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, NULL);
+ ret = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, NULL);
if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
return -EINVAL;
@@ -2482,8 +2489,8 @@ static int nldev_stat_get_counter_status_doit(struct sk_buff *skb,
u32 devid, port;
int ret, i;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
!tb[RDMA_NLDEV_ATTR_PORT_INDEX])
return -EINVAL;
@@ -2722,6 +2729,130 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
},
};
+static int fill_mon_netdev_association(struct sk_buff *msg,
+ struct ib_device *device, u32 port,
+ const struct net *net)
+{
+ struct net_device *netdev = ib_device_get_netdev(device, port);
+ int ret = 0;
+
+ if (netdev && !net_eq(dev_net(netdev), net))
+ goto out;
+
+ ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index);
+ if (ret)
+ goto out;
+
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
+ dev_name(&device->dev));
+ if (ret)
+ goto out;
+
+ ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port);
+ if (ret)
+ goto out;
+
+ if (netdev) {
+ ret = nla_put_u32(msg,
+ RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
+ if (ret)
+ goto out;
+
+ ret = nla_put_string(msg,
+ RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
+ }
+
+out:
+ dev_put(netdev);
+ return ret;
+}
+
+static void rdma_nl_notify_err_msg(struct ib_device *device, u32 port_num,
+ enum rdma_nl_notify_event_type type)
+{
+ struct net_device *netdev;
+
+ switch (type) {
+ case RDMA_REGISTER_EVENT:
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor register device event\n");
+ break;
+ case RDMA_UNREGISTER_EVENT:
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor unregister device event\n");
+ break;
+ case RDMA_NETDEV_ATTACH_EVENT:
+ netdev = ib_device_get_netdev(device, port_num);
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor netdev attach event: port %d netdev %d\n",
+ port_num, netdev->ifindex);
+ dev_put(netdev);
+ break;
+ case RDMA_NETDEV_DETACH_EVENT:
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor netdev detach event: port %d\n",
+ port_num);
+ break;
+ default:
+ break;
+ }
+}
+
+int rdma_nl_notify_event(struct ib_device *device, u32 port_num,
+ enum rdma_nl_notify_event_type type)
+{
+ struct sk_buff *skb;
+ struct net *net;
+ int ret = 0;
+ void *nlh;
+
+ net = read_pnet(&device->coredev.rdma_net);
+ if (!net)
+ return -EINVAL;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ nlh = nlmsg_put(skb, 0, 0,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_MONITOR),
+ 0, 0);
+
+ switch (type) {
+ case RDMA_REGISTER_EVENT:
+ case RDMA_UNREGISTER_EVENT:
+ ret = fill_nldev_handle(skb, device);
+ if (ret)
+ goto err_free;
+ break;
+ case RDMA_NETDEV_ATTACH_EVENT:
+ case RDMA_NETDEV_DETACH_EVENT:
+ ret = fill_mon_netdev_association(skb, device,
+ port_num, net);
+ if (ret)
+ goto err_free;
+ break;
+ default:
+ break;
+ }
+
+ ret = nla_put_u8(skb, RDMA_NLDEV_ATTR_EVENT_TYPE, type);
+ if (ret)
+ goto err_free;
+
+ nlmsg_end(skb, nlh);
+ ret = rdma_nl_multicast(net, skb, RDMA_NL_GROUP_NOTIFY, GFP_KERNEL);
+ if (ret && ret != -ESRCH) {
+ skb = NULL; /* skb is freed in the netlink send-op handling */
+ goto err_free;
+ }
+ return 0;
+
+err_free:
+ rdma_nl_notify_err_msg(device, port_num, type);
+ nlmsg_free(skb);
+ return ret;
+}
+
void __init nldev_init(void)
{
rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8175dde60b0a..53571e6b3162 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1420,7 +1420,7 @@ enum opa_pr_supported {
/*
* opa_pr_query_possible - Check if current PR query can be an OPA query.
*
- * Retuns PR_NOT_SUPPORTED if a path record query is not
+ * Returns PR_NOT_SUPPORTED if a path record query is not
* possible, PR_OPA_SUPPORTED if an OPA path record query
* is possible and PR_IB_SUPPORTED if an IB path record
* query is possible.
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 5f5ad8faf86e..dc57d07a1f45 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1624,13 +1624,13 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
/* Get current fd to protect against it being closed */
f = fdget(cmd.fd);
- if (!f.file)
+ if (!fd_file(f))
return -ENOENT;
- if (f.file->f_op != &ucma_fops) {
+ if (fd_file(f)->f_op != &ucma_fops) {
ret = -EINVAL;
goto file_put;
}
- cur_file = f.file->private_data;
+ cur_file = fd_file(f)->private_data;
/* Validate current fd and prevent destruction of id. */
ctx = ucma_get_ctx(cur_file, cmd.id);
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index 39357dc2d229..9fcd37761264 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -23,6 +23,9 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+ if (umem_dmabuf->revoked)
+ return -EINVAL;
+
if (umem_dmabuf->sgt)
goto wait_fence;
@@ -110,10 +113,12 @@ void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
}
EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
-struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
- unsigned long offset, size_t size,
- int fd, int access,
- const struct dma_buf_attach_ops *ops)
+static struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access,
+ const struct dma_buf_attach_ops *ops)
{
struct dma_buf *dmabuf;
struct ib_umem_dmabuf *umem_dmabuf;
@@ -152,7 +157,7 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
umem_dmabuf->attach = dma_buf_dynamic_attach(
dmabuf,
- device->dma_device,
+ dma_device,
ops,
umem_dmabuf);
if (IS_ERR(umem_dmabuf->attach)) {
@@ -168,6 +173,15 @@ out_release_dmabuf:
dma_buf_put(dmabuf);
return ret;
}
+
+struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
+ unsigned long offset, size_t size,
+ int fd, int access,
+ const struct dma_buf_attach_ops *ops)
+{
+ return ib_umem_dmabuf_get_with_dma_device(device, device->dma_device,
+ offset, size, fd, access, ops);
+}
EXPORT_SYMBOL(ib_umem_dmabuf_get);
static void
@@ -184,16 +198,18 @@ static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
.move_notify = ib_umem_dmabuf_unsupported_move_notify,
};
-struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
- unsigned long offset,
- size_t size, int fd,
- int access)
+struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access)
{
struct ib_umem_dmabuf *umem_dmabuf;
int err;
- umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access,
- &ib_umem_dmabuf_attach_pinned_ops);
+ umem_dmabuf = ib_umem_dmabuf_get_with_dma_device(device, dma_device, offset,
+ size, fd, access,
+ &ib_umem_dmabuf_attach_pinned_ops);
if (IS_ERR(umem_dmabuf))
return umem_dmabuf;
@@ -217,17 +233,41 @@ err_release:
ib_umem_release(&umem_dmabuf->umem);
return ERR_PTR(err);
}
+EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned_with_dma_device);
+
+struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
+ unsigned long offset,
+ size_t size, int fd,
+ int access)
+{
+ return ib_umem_dmabuf_get_pinned_with_dma_device(device, device->dma_device,
+ offset, size, fd, access);
+}
EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
-void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
+void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf)
{
struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
dma_resv_lock(dmabuf->resv, NULL);
+ if (umem_dmabuf->revoked)
+ goto end;
ib_umem_dmabuf_unmap_pages(umem_dmabuf);
- if (umem_dmabuf->pinned)
+ if (umem_dmabuf->pinned) {
dma_buf_unpin(umem_dmabuf->attach);
+ umem_dmabuf->pinned = 0;
+ }
+ umem_dmabuf->revoked = 1;
+end:
dma_resv_unlock(dmabuf->resv);
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_revoke);
+
+void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
+
+ ib_umem_dmabuf_revoke(umem_dmabuf);
dma_buf_detach(dmabuf, umem_dmabuf->attach);
dma_buf_put(dmabuf);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 1b3ea71f2c33..a4cce360df21 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -572,7 +572,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
struct inode *inode = NULL;
int new_xrcd = 0;
struct ib_device *ib_dev;
- struct fd f = {};
+ struct fd f = EMPTY_FD;
int ret;
ret = uverbs_request(attrs, &cmd, sizeof(cmd));
@@ -584,12 +584,12 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
if (cmd.fd != -1) {
/* search for file descriptor */
f = fdget(cmd.fd);
- if (!f.file) {
+ if (!fd_file(f)) {
ret = -EBADF;
goto err_tree_mutex_unlock;
}
- inode = file_inode(f.file);
+ inode = file_inode(fd_file(f));
xrcd = find_xrcd(ibudev, inode);
if (!xrcd && !(cmd.oflags & O_CREAT)) {
/* no file descriptor. Need CREATE flag */
@@ -632,7 +632,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
atomic_inc(&xrcd->usecnt);
}
- if (f.file)
+ if (fd_file(f))
fdput(f);
mutex_unlock(&ibudev->xrcd_tree_mutex);
@@ -648,7 +648,7 @@ err:
uobj_alloc_abort(&obj->uobject, attrs);
err_tree_mutex_unlock:
- if (f.file)
+ if (fd_file(f))
fdput(f);
mutex_unlock(&ibudev->xrcd_tree_mutex);
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index 03e1db5d1e8c..7ebc7bd3caae 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -239,7 +239,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)(
mr = pd->device->ops.reg_user_mr_dmabuf(pd, offset, length, iova, fd,
access_flags,
- &attrs->driver_udata);
+ attrs);
if (IS_ERR(mr))
return PTR_ERR(mr);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 0912d2fa9634..e94518b12f86 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -91,6 +91,15 @@ struct bnxt_re_ring_attr {
u8 mode;
};
+/*
+ * Data structure and defines to handle
+ * recovery
+ */
+#define BNXT_RE_PRE_RECOVERY_REMOVE 0x1
+#define BNXT_RE_COMPLETE_REMOVE 0x2
+#define BNXT_RE_POST_RECOVERY_INIT 0x4
+#define BNXT_RE_COMPLETE_INIT 0x8
+
struct bnxt_re_sqp_entries {
struct bnxt_qplib_sge sge;
u64 wrid;
@@ -107,6 +116,11 @@ struct bnxt_re_gsi_context {
struct bnxt_re_sqp_entries *sqp_tbl;
};
+struct bnxt_re_en_dev_info {
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_re_dev *rdev;
+};
+
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
#define BNXT_RE_GEN_P5_MAX_VF 64
@@ -141,6 +155,7 @@ struct bnxt_re_pacing {
#define BNXT_RE_GRC_FIFO_REG_BASE 0x2000
#define MAX_CQ_HASH_BITS (16)
+#define MAX_SRQ_HASH_BITS (16)
struct bnxt_re_dev {
struct ib_device ibdev;
struct list_head list;
@@ -154,6 +169,7 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
+ struct auxiliary_device *adev;
struct notifier_block nb;
unsigned int version, major, minor;
struct bnxt_qplib_chip_ctx *chip_ctx;
@@ -196,6 +212,7 @@ struct bnxt_re_dev {
struct work_struct dbq_fifo_check_work;
struct delayed_work dbq_pacing_work;
DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS);
+ DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS);
};
#define to_bnxt_re_dev(ptr, member) \
@@ -216,4 +233,10 @@ static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
}
extern const struct uapi_definition bnxt_re_uapi_defs[];
+
+static inline void bnxt_re_set_pacing_dev_state(struct bnxt_re_dev *rdev)
+{
+ rdev->qplib_res.pacing_data->dev_err_state =
+ test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+}
#endif
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 7c757351a016..460f33914825 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -115,6 +115,14 @@ static enum ib_access_flags __to_ib_access_flags(int qflags)
return iflags;
};
+static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
+ struct bnxt_qplib_mrw *qplib_mr)
+{
+ if (_is_relaxed_ordering_supported(rdev->dev_attr.dev_cap_flags2) &&
+ pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
+ qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
+}
+
static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
struct bnxt_qplib_sge *sg_list, int num)
{
@@ -517,15 +525,19 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->rdev = rdev;
mr->qplib_mr.pd = &pd->qplib_pd;
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
- mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
- rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
- ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
- goto fail;
- }
+ mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
+ if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
+ goto fail;
+ }
- /* Register MR */
- mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ /* Register MR */
+ mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ } else {
+ mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
+ }
mr->qplib_mr.va = (u64)(unsigned long)fence->va;
mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
@@ -994,43 +1006,37 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
align = sizeof(struct sq_send_hdr);
ilsize = ALIGN(init_attr->cap.max_inline_data, align);
- sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
- if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
- return -EINVAL;
- /* For gen p4 and gen p5 backward compatibility mode
- * wqe size is fixed to 128 bytes
+ /* For gen p4 and gen p5 fixed wqe compatibility mode
+ * wqe size is fixed to 128 bytes - ie 6 SGEs
*/
- if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
- qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
- sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
+ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) {
+ sq->wqe_size = bnxt_re_get_swqe_size(BNXT_STATIC_MAX_SGE);
+ sq->max_sge = BNXT_STATIC_MAX_SGE;
+ } else {
+ sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
+ if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
+ return -EINVAL;
+ }
if (init_attr->cap.max_inline_data) {
qplqp->max_inline_data = sq->wqe_size -
sizeof(struct sq_send_hdr);
init_attr->cap.max_inline_data = qplqp->max_inline_data;
- if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
- sq->max_sge = qplqp->max_inline_data /
- sizeof(struct sq_sge);
}
return 0;
}
static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
- struct bnxt_re_qp *qp, struct ib_udata *udata)
+ struct bnxt_re_qp *qp, struct bnxt_re_ucontext *cntx,
+ struct bnxt_re_qp_req *ureq)
{
struct bnxt_qplib_qp *qplib_qp;
- struct bnxt_re_ucontext *cntx;
- struct bnxt_re_qp_req ureq;
int bytes = 0, psn_sz;
struct ib_umem *umem;
int psn_nume;
qplib_qp = &qp->qplib_qp;
- cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
- ib_uctx);
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
- return -EFAULT;
bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
/* Consider mapping PSN search memory only for RC QPs. */
@@ -1038,15 +1044,20 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
- psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
- qplib_qp->sq.max_wqe :
- ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
- sizeof(struct bnxt_qplib_sge));
+ if (cntx && bnxt_re_is_var_size_supported(rdev, cntx)) {
+ psn_nume = ureq->sq_slots;
+ } else {
+ psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+ qplib_qp->sq.max_wqe : ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
+ sizeof(struct bnxt_qplib_sge));
+ }
+ if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
+ psn_nume = roundup_pow_of_two(psn_nume);
bytes += (psn_nume * psn_sz);
}
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
+ umem = ib_umem_get(&rdev->ibdev, ureq->qpsva, bytes,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -1055,12 +1066,12 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
qplib_qp->sq.sg_info.umem = umem;
qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
- qplib_qp->qp_handle = ureq.qp_handle;
+ qplib_qp->qp_handle = ureq->qp_handle;
if (!qp->qplib_qp.srq) {
bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
+ umem = ib_umem_get(&rdev->ibdev, ureq->qprva, bytes,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
goto rqfail;
@@ -1156,6 +1167,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
/* Shadow QP SQ depth should be same as QP1 RQ depth */
qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.sq.max_sge = 2;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.sq.q_full_delta = 1;
@@ -1167,6 +1179,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.rq.q_full_delta = 1;
@@ -1228,6 +1241,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
*/
entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ rq->max_sw_wqe = rq->max_wqe;
rq->q_full_delta = 0;
rq->sg_info.pgsize = PAGE_SIZE;
rq->sg_info.pgshft = PAGE_SHIFT;
@@ -1256,14 +1270,15 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
struct ib_qp_init_attr *init_attr,
- struct bnxt_re_ucontext *uctx)
+ struct bnxt_re_ucontext *uctx,
+ struct bnxt_re_qp_req *ureq)
{
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
struct bnxt_qplib_q *sq;
+ int diff = 0;
int entries;
- int diff;
int rc;
rdev = qp->rdev;
@@ -1272,21 +1287,28 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
dev_attr = &rdev->dev_attr;
sq->max_sge = init_attr->cap.max_send_sge;
- if (sq->max_sge > dev_attr->max_qp_sges) {
- sq->max_sge = dev_attr->max_qp_sges;
- init_attr->cap.max_send_sge = sq->max_sge;
- }
+ entries = init_attr->cap.max_send_wr;
+ if (uctx && qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) {
+ sq->max_wqe = ureq->sq_slots;
+ sq->max_sw_wqe = ureq->sq_slots;
+ sq->wqe_size = sizeof(struct sq_sge);
+ } else {
+ if (sq->max_sge > dev_attr->max_qp_sges) {
+ sq->max_sge = dev_attr->max_qp_sges;
+ init_attr->cap.max_send_sge = sq->max_sge;
+ }
- rc = bnxt_re_setup_swqe_size(qp, init_attr);
- if (rc)
- return rc;
+ rc = bnxt_re_setup_swqe_size(qp, init_attr);
+ if (rc)
+ return rc;
- entries = init_attr->cap.max_send_wr;
- /* Allocate 128 + 1 more than what's provided */
- diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
- 0 : BNXT_QPLIB_RESERVED_QP_WRS;
- entries = bnxt_re_init_depth(entries + diff + 1, uctx);
- sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
+ /* Allocate 128 + 1 more than what's provided */
+ diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
+ 0 : BNXT_QPLIB_RESERVED_QP_WRS;
+ entries = bnxt_re_init_depth(entries + diff + 1, uctx);
+ sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
+ sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true);
+ }
sq->q_full_delta = diff + 1;
/*
* Reserving one slot for Phantom WQE. Application can
@@ -1349,10 +1371,10 @@ out:
static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+ struct bnxt_re_ucontext *uctx,
+ struct bnxt_re_qp_req *ureq)
{
struct bnxt_qplib_dev_attr *dev_attr;
- struct bnxt_re_ucontext *uctx;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
struct bnxt_re_cq *cq;
@@ -1362,7 +1384,6 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
qplqp = &qp->qplib_qp;
dev_attr = &rdev->dev_attr;
- uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
/* Setup misc params */
ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
qplqp->pd = &pd->qplib_pd;
@@ -1375,8 +1396,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
goto out;
}
qplqp->type = (u8)qptype;
- qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
-
+ qplqp->wqe_mode = bnxt_re_is_var_size_supported(rdev, uctx);
if (init_attr->qp_type == IB_QPT_RC) {
qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
@@ -1411,14 +1431,14 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
bnxt_re_adjust_gsi_rq_attr(qp);
/* Setup SQ */
- rc = bnxt_re_init_sq_attr(qp, init_attr, uctx);
+ rc = bnxt_re_init_sq_attr(qp, init_attr, uctx, ureq);
if (rc)
goto out;
if (init_attr->qp_type == IB_QPT_GSI)
bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
- if (udata) /* This will update DPI and qp_handle */
- rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
+ if (uctx) /* This will update DPI and qp_handle */
+ rc = bnxt_re_init_user_qp(rdev, pd, qp, uctx, ureq);
out:
return rc;
}
@@ -1519,14 +1539,27 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata)
{
- struct ib_pd *ib_pd = ib_qp->pd;
- struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
- struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_ucontext *uctx;
+ struct bnxt_re_qp_req ureq;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_pd *pd;
+ struct bnxt_re_qp *qp;
+ struct ib_pd *ib_pd;
u32 active_qps;
int rc;
+ ib_pd = ib_qp->pd;
+ pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ rdev = pd->rdev;
+ dev_attr = &rdev->dev_attr;
+ qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+
+ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+ if (udata)
+ if (ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq))))
+ return -EFAULT;
+
rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
if (!rc) {
rc = -EINVAL;
@@ -1534,7 +1567,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
}
qp->rdev = rdev;
- rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
+ rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, uctx, &ureq);
if (rc)
goto fail;
@@ -1685,6 +1718,10 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
if (qplib_srq->cq)
nq = qplib_srq->cq->nq;
+ if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
+ free_page((unsigned long)srq->uctx_srq_page);
+ hash_del(&srq->hash_entry);
+ }
bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
ib_umem_release(srq->umem);
atomic_dec(&rdev->stats.res.srq_count);
@@ -1789,9 +1826,18 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
}
if (udata) {
- struct bnxt_re_srq_resp resp;
+ struct bnxt_re_srq_resp resp = {};
resp.srqid = srq->qplib_srq.id;
+ if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
+ hash_add(rdev->srq_hash, &srq->hash_entry, srq->qplib_srq.id);
+ srq->uctx_srq_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!srq->uctx_srq_page) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ resp.comp_mask |= BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT;
+ }
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) {
ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
@@ -2155,6 +2201,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
qp->qplib_qp.rq.max_wqe =
min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe;
qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
qp_attr->cap.max_recv_wr;
qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
@@ -3845,9 +3892,12 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
mr->rdev = rdev;
mr->qplib_mr.pd = &pd->qplib_pd;
- mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+ mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+ if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
+ bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
+
/* Allocate and register 0 as the address */
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc)
@@ -3945,7 +3995,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
mr->rdev = rdev;
mr->qplib_mr.pd = &pd->qplib_pd;
- mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
+ mr->qplib_mr.access_flags = BNXT_QPLIB_FR_PMR;
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
@@ -4062,21 +4112,28 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64
mr->rdev = rdev;
mr->qplib_mr.pd = &pd->qplib_pd;
- mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+ mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
- rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
- ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
- rc = -EIO;
- goto free_mr;
+ if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
+ rc = -EIO;
+ goto free_mr;
+ }
+ /* The fixed portion of the rkey is the same as the lkey */
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+ } else {
+ mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
}
- /* The fixed portion of the rkey is the same as the lkey */
- mr->ib_mr.rkey = mr->qplib_mr.rkey;
mr->ib_umem = umem;
mr->qplib_mr.va = virt_addr;
mr->qplib_mr.total_size = length;
+ if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
+ bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
+
umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
umem_pgs, page_size);
@@ -4122,7 +4179,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
u64 length, u64 virt_addr, int fd,
- int mr_access_flags, struct ib_udata *udata)
+ int mr_access_flags,
+ struct uverbs_attr_bundle *attrs)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
@@ -4187,9 +4245,6 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
resp.cqe_sz = sizeof(struct cq_base);
resp.max_cqd = dev_attr->max_cq_wqes;
- resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
- resp.mode = rdev->chip_ctx->modes.wqe_mode;
-
if (rdev->chip_ctx->modes.db_push)
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
@@ -4211,7 +4266,13 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
goto cfail;
if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
- uctx->cmask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
+ uctx->cmask |= BNXT_RE_UCNTX_CAP_POW2_DISABLED;
+ }
+ if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT) {
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
+ resp.mode = rdev->chip_ctx->modes.wqe_mode;
+ if (resp.mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ uctx->cmask |= BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
}
}
@@ -4265,6 +4326,19 @@ static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq
return cq;
}
+static struct bnxt_re_srq *bnxt_re_search_for_srq(struct bnxt_re_dev *rdev, u32 srq_id)
+{
+ struct bnxt_re_srq *srq = NULL, *tmp_srq;
+
+ hash_for_each_possible(rdev->srq_hash, tmp_srq, hash_entry, srq_id) {
+ if (tmp_srq->qplib_srq.id == srq_id) {
+ srq = tmp_srq;
+ break;
+ }
+ }
+ return srq;
+}
+
/* Helper function to mmap the virtual memory from user app */
int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
{
@@ -4493,12 +4567,13 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund
struct bnxt_re_ucontext *uctx;
struct ib_ucontext *ib_uctx;
struct bnxt_re_dev *rdev;
+ struct bnxt_re_srq *srq;
+ u32 length = PAGE_SIZE;
struct bnxt_re_cq *cq;
u64 mem_offset;
+ u32 offset = 0;
u64 addr = 0;
- u32 length;
- u32 offset;
- u32 cq_id;
+ u32 res_id;
int err;
ib_uctx = ib_uverbs_get_ucontext(attrs);
@@ -4511,23 +4586,24 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund
uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
rdev = uctx->rdev;
+ err = uverbs_copy_from(&res_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
+ if (err)
+ return err;
switch (res_type) {
case BNXT_RE_CQ_TOGGLE_MEM:
- err = uverbs_copy_from(&cq_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
- if (err)
- return err;
-
- cq = bnxt_re_search_for_cq(rdev, cq_id);
+ cq = bnxt_re_search_for_cq(rdev, res_id);
if (!cq)
return -EINVAL;
- length = PAGE_SIZE;
addr = (u64)cq->uctx_cq_page;
- mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
- offset = 0;
break;
case BNXT_RE_SRQ_TOGGLE_MEM:
+ srq = bnxt_re_search_for_srq(rdev, res_id);
+ if (!srq)
+ return -EINVAL;
+
+ addr = (u64)srq->uctx_srq_page;
break;
default:
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index e98cb1717338..b789e47ec97a 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -77,6 +77,8 @@ struct bnxt_re_srq {
struct bnxt_qplib_srq qplib_srq;
struct ib_umem *umem;
spinlock_t lock; /* protect srq */
+ void *uctx_srq_page;
+ struct hlist_node hash_entry;
};
struct bnxt_re_qp {
@@ -171,12 +173,26 @@ static inline u16 bnxt_re_get_rwqe_size(int nsge)
return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge));
}
+enum {
+ BNXT_RE_UCNTX_CAP_POW2_DISABLED = 0x1ULL,
+ BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED = 0x2ULL,
+};
+
static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
{
- return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CMASK_POW2_DISABLED) ?
+ return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CAP_POW2_DISABLED) ?
ent : roundup_pow_of_two(ent) : ent;
}
+static inline bool bnxt_re_is_var_size_supported(struct bnxt_re_dev *rdev,
+ struct bnxt_re_ucontext *uctx)
+{
+ if (uctx)
+ return uctx->cmask & BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
+ else
+ return rdev->chip_ctx->modes.wqe_mode;
+}
+
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
@@ -242,7 +258,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
u64 length, u64 virt_addr,
int fd, int mr_access_flags,
- struct ib_udata *udata);
+ struct uverbs_attr_bundle *attrs);
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 9714b9ab7524..777068de4bbc 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -83,11 +83,12 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
static int bnxt_re_netdev_event(struct notifier_block *notifier,
unsigned long event, void *ptr);
static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev);
-static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev);
+static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type);
static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
u32 *offset);
+static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable);
static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *cctx;
@@ -129,18 +130,20 @@ static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
}
}
-static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
+static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *cctx;
cctx = rdev->chip_ctx;
- cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
- mode : BNXT_QPLIB_WQE_MODE_STATIC;
+ cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ?
+ BNXT_QPLIB_WQE_MODE_VARIABLE : BNXT_QPLIB_WQE_MODE_STATIC;
if (bnxt_re_hwrm_qcaps(rdev))
dev_err(rdev_to_dev(rdev),
"Failed to query hwrm qcaps\n");
- if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx))
+ if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx)) {
cctx->modes.toggle_bits |= BNXT_QPLIB_CQ_TOGGLE_BIT;
+ cctx->modes.toggle_bits |= BNXT_QPLIB_SRQ_TOGGLE_BIT;
+ }
}
static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
@@ -158,7 +161,7 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
kfree(chip_ctx);
}
-static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
+static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
@@ -166,6 +169,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
en_dev = rdev->en_dev;
+ rdev->qplib_res.pdev = en_dev->pdev;
chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
if (!chip_ctx)
return -ENOMEM;
@@ -180,7 +184,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
rdev->qplib_res.dattr = &rdev->dev_attr;
rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
- bnxt_re_set_drv_mode(rdev, wqe_mode);
+ bnxt_re_set_drv_mode(rdev);
bnxt_re_set_db_offset(rdev);
rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
@@ -290,21 +294,31 @@ static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
static void bnxt_re_shutdown(struct auxiliary_device *adev)
{
- struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
- if (!rdev)
+ if (!en_info)
return;
+
+ rdev = en_info->rdev;
ib_unregister_device(&rdev->ibdev);
- bnxt_re_dev_uninit(rdev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
}
static void bnxt_re_stop_irq(void *handle)
{
- struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
- struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_qplib_rcfw *rcfw;
+ struct bnxt_re_dev *rdev;
struct bnxt_qplib_nq *nq;
int indx;
+ if (!en_info)
+ return;
+
+ rdev = en_info->rdev;
+ rcfw = &rdev->rcfw;
+
for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
nq = &rdev->nq[indx - 1];
bnxt_qplib_nq_stop_irq(nq, false);
@@ -315,12 +329,19 @@ static void bnxt_re_stop_irq(void *handle)
static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
{
- struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
- struct bnxt_msix_entry *msix_ent = rdev->en_dev->msix_entries;
- struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_msix_entry *msix_ent;
+ struct bnxt_qplib_rcfw *rcfw;
+ struct bnxt_re_dev *rdev;
struct bnxt_qplib_nq *nq;
int indx, rc;
+ if (!en_info)
+ return;
+
+ rdev = en_info->rdev;
+ msix_ent = rdev->en_dev->msix_entries;
+ rcfw = &rdev->rcfw;
if (!ent) {
/* Not setting the f/w timeout bit in rcfw.
* During the driver unload the first command
@@ -365,14 +386,9 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev;
- int rc;
en_dev = rdev->en_dev;
-
- rc = bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev);
- if (!rc)
- rdev->qplib_res.pdev = rdev->en_dev->pdev;
- return rc;
+ return bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev->adev);
}
static void bnxt_re_init_hwrm_hdr(struct input *hdr, u16 opcd)
@@ -1573,7 +1589,7 @@ static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
return rc;
}
-static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
+static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
{
u8 type;
int rc;
@@ -1606,8 +1622,10 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
bnxt_re_deinitialize_dbr_pacing(rdev);
bnxt_re_destroy_chip_ctx(rdev);
- if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
- bnxt_unregister_dev(rdev->en_dev);
+ if (op_type == BNXT_RE_COMPLETE_REMOVE) {
+ if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
+ bnxt_unregister_dev(rdev->en_dev);
+ }
}
/* worker thread for polling periodic events. Now used for QoS programming*/
@@ -1620,7 +1638,7 @@ static void bnxt_re_worker(struct work_struct *work)
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
}
-static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
+static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
{
struct bnxt_re_ring_attr rattr = {};
struct bnxt_qplib_creq_ctx *creq;
@@ -1629,16 +1647,18 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
u8 type;
int rc;
- /* Registered a new RoCE device instance to netdev */
- rc = bnxt_re_register_netdev(rdev);
- if (rc) {
- ibdev_err(&rdev->ibdev,
- "Failed to register with netedev: %#x\n", rc);
- return -EINVAL;
+ if (op_type == BNXT_RE_COMPLETE_INIT) {
+ /* Registered a new RoCE device instance to netdev */
+ rc = bnxt_re_register_netdev(rdev);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to register with netedev: %#x\n", rc);
+ return -EINVAL;
+ }
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
- rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
+ rc = bnxt_re_setup_chip_ctx(rdev);
if (rc) {
bnxt_unregister_dev(rdev->en_dev);
clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
@@ -1771,6 +1791,8 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
bnxt_re_vf_res_config(rdev);
}
hash_init(rdev->cq_hash);
+ if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT)
+ hash_init(rdev->srq_hash);
return 0;
free_sctx:
@@ -1785,21 +1807,38 @@ free_ring:
free_rcfw:
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
fail:
- bnxt_re_dev_uninit(rdev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
return rc;
}
-static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
+static void bnxt_re_update_en_info_rdev(struct bnxt_re_dev *rdev,
+ struct bnxt_re_en_dev_info *en_info,
+ struct auxiliary_device *adev)
+{
+ /* Before updating the rdev pointer in bnxt_re_en_dev_info structure,
+ * take the rtnl lock to avoid accessing invalid rdev pointer from
+ * L2 ULP callbacks. This is applicable in all the places where rdev
+ * pointer is updated in bnxt_re_en_dev_info.
+ */
+ rtnl_lock();
+ en_info->rdev = rdev;
+ rdev->adev = adev;
+ rtnl_unlock();
+}
+
+static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type)
{
struct bnxt_aux_priv *aux_priv =
container_of(adev, struct bnxt_aux_priv, aux_dev);
+ struct bnxt_re_en_dev_info *en_info;
struct bnxt_en_dev *en_dev;
struct bnxt_re_dev *rdev;
int rc;
- /* en_dev should never be NULL as long as adev and aux_dev are valid. */
- en_dev = aux_priv->edev;
+ en_info = auxiliary_get_drvdata(adev);
+ en_dev = en_info->en_dev;
+
rdev = bnxt_re_dev_add(aux_priv, en_dev);
if (!rdev || !rdev_to_dev(rdev)) {
@@ -1807,7 +1846,9 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
goto exit;
}
- rc = bnxt_re_dev_init(rdev, wqe_mode);
+ bnxt_re_update_en_info_rdev(rdev, en_info, adev);
+
+ rc = bnxt_re_dev_init(rdev, op_type);
if (rc)
goto re_dev_dealloc;
@@ -1817,12 +1858,22 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
aux_priv->aux_dev.name);
goto re_dev_uninit;
}
- auxiliary_set_drvdata(adev, rdev);
+
+ rdev->nb.notifier_call = bnxt_re_netdev_event;
+ rc = register_netdevice_notifier(&rdev->nb);
+ if (rc) {
+ rdev->nb.notifier_call = NULL;
+ pr_err("%s: Cannot register to netdevice_notifier",
+ ROCE_DRV_MODULE_NAME);
+ return rc;
+ }
+ bnxt_re_setup_cc(rdev, true);
return 0;
re_dev_uninit:
- bnxt_re_dev_uninit(rdev);
+ bnxt_re_update_en_info_rdev(NULL, en_info, adev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
re_dev_dealloc:
ib_dealloc_device(&rdev->ibdev);
exit:
@@ -1905,14 +1956,9 @@ exit:
#define BNXT_ADEV_NAME "bnxt_en"
-static void bnxt_re_remove(struct auxiliary_device *adev)
+static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
+ struct auxiliary_device *aux_dev)
{
- struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
-
- if (!rdev)
- return;
-
- mutex_lock(&bnxt_re_mutex);
if (rdev->nb.notifier_call) {
unregister_netdevice_notifier(&rdev->nb);
rdev->nb.notifier_call = NULL;
@@ -1920,41 +1966,56 @@ static void bnxt_re_remove(struct auxiliary_device *adev)
/* If notifier is null, we should have already done a
* clean up before coming here.
*/
- goto skip_remove;
+ return;
}
bnxt_re_setup_cc(rdev, false);
ib_unregister_device(&rdev->ibdev);
- bnxt_re_dev_uninit(rdev);
+ bnxt_re_dev_uninit(rdev, op_type);
ib_dealloc_device(&rdev->ibdev);
-skip_remove:
+}
+
+static void bnxt_re_remove(struct auxiliary_device *adev)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
+
+ mutex_lock(&bnxt_re_mutex);
+ if (!en_info) {
+ mutex_unlock(&bnxt_re_mutex);
+ return;
+ }
+ rdev = en_info->rdev;
+
+ if (rdev)
+ bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, adev);
+ kfree(en_info);
mutex_unlock(&bnxt_re_mutex);
}
static int bnxt_re_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
- struct bnxt_re_dev *rdev;
+ struct bnxt_aux_priv *aux_priv =
+ container_of(adev, struct bnxt_aux_priv, aux_dev);
+ struct bnxt_re_en_dev_info *en_info;
+ struct bnxt_en_dev *en_dev;
int rc;
+ en_dev = aux_priv->edev;
+
mutex_lock(&bnxt_re_mutex);
- rc = bnxt_re_add_device(adev, BNXT_QPLIB_WQE_MODE_STATIC);
- if (rc) {
+ en_info = kzalloc(sizeof(*en_info), GFP_KERNEL);
+ if (!en_info) {
mutex_unlock(&bnxt_re_mutex);
- return rc;
+ return -ENOMEM;
}
+ en_info->en_dev = en_dev;
- rdev = auxiliary_get_drvdata(adev);
+ auxiliary_set_drvdata(adev, en_info);
- rdev->nb.notifier_call = bnxt_re_netdev_event;
- rc = register_netdevice_notifier(&rdev->nb);
- if (rc) {
- rdev->nb.notifier_call = NULL;
- pr_err("%s: Cannot register to netdevice_notifier",
- ROCE_DRV_MODULE_NAME);
+ rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT);
+ if (rc)
goto err;
- }
-
- bnxt_re_setup_cc(rdev, true);
mutex_unlock(&bnxt_re_mutex);
return 0;
@@ -1967,11 +2028,15 @@ err:
static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_re_dev *rdev;
- if (!rdev)
+ if (!en_info)
return 0;
+ rdev = en_info->rdev;
+ en_dev = en_info->en_dev;
mutex_lock(&bnxt_re_mutex);
/* L2 driver may invoke this callback during device error/crash or device
* reset. Current RoCE driver doesn't recover the device in case of
@@ -1990,13 +2055,20 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
bnxt_re_dev_stop(rdev);
- bnxt_re_stop_irq(rdev);
+ bnxt_re_stop_irq(adev);
/* Move the device states to detached and avoid sending any more
* commands to HW
*/
set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
wake_up_all(&rdev->rcfw.cmdq.waitq);
+
+ if (rdev->pacing.dbr_pacing)
+ bnxt_re_set_pacing_dev_state(rdev);
+
+ ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx",
+ __func__, en_dev->en_state);
+ bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev);
mutex_unlock(&bnxt_re_mutex);
return 0;
@@ -2004,9 +2076,10 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
static int bnxt_re_resume(struct auxiliary_device *adev)
{
- struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
- if (!rdev)
+ if (!en_info)
return 0;
mutex_lock(&bnxt_re_mutex);
@@ -2017,7 +2090,9 @@ static int bnxt_re_resume(struct auxiliary_device *adev)
* L2 driver want to modify the MSIx table.
*/
- ibdev_info(&rdev->ibdev, "Handle device resume call");
+ bnxt_re_add_device(adev, BNXT_RE_POST_RECOVERY_INIT);
+ rdev = en_info->rdev;
+ ibdev_info(&rdev->ibdev, "Device resume completed");
mutex_unlock(&bnxt_re_mutex);
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 49e4a4a50bfa..42e98e5f94cb 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -54,6 +54,10 @@
#include "qplib_rcfw.h"
#include "qplib_sp.h"
#include "qplib_fp.h"
+#include <rdma/ib_addr.h>
+#include "bnxt_ulp.h"
+#include "bnxt_re.h"
+#include "ib_verbs.h"
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
@@ -347,6 +351,7 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
case NQ_BASE_TYPE_SRQ_EVENT:
{
struct bnxt_qplib_srq *srq;
+ struct bnxt_re_srq *srq_p;
struct nq_srq_event *nqsrqe =
(struct nq_srq_event *)nqe;
@@ -354,6 +359,12 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
<< 32;
srq = (struct bnxt_qplib_srq *)q_handle;
+ srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
+ >> NQ_CN_TOGGLE_SFT;
+ srq->dbinfo.toggle = srq->toggle;
+ srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq);
+ if (srq_p->uctx_srq_page)
+ *((u32 *)srq_p->uctx_srq_page) = srq->toggle;
bnxt_qplib_armen_db(&srq->dbinfo,
DBC_DBC_TYPE_SRQ_ARMENA);
if (nq->srqn_handler(nq,
@@ -809,13 +820,13 @@ static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
{
int indx;
- que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
+ que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
if (!que->swq)
return -ENOMEM;
que->swq_start = 0;
- que->swq_last = que->max_wqe - 1;
- for (indx = 0; indx < que->max_wqe; indx++)
+ que->swq_last = que->max_sw_wqe - 1;
+ for (indx = 0; indx < que->max_sw_wqe; indx++)
que->swq[indx].next_idx = indx + 1;
que->swq[que->swq_last].next_idx = 0; /* Make it circular */
que->swq_last = 0;
@@ -851,7 +862,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
@@ -879,7 +890,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(rq);
+ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
@@ -1011,7 +1022,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
hwq_attr.aux_stride = psn_sz;
hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
: 0;
@@ -1052,7 +1063,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(rq);
+ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
hwq_attr.aux_stride = 0;
hwq_attr.aux_depth = 0;
hwq_attr.type = HWQ_TYPE_QUEUE;
@@ -2471,6 +2482,32 @@ out:
return rc;
}
+static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
+{
+ struct bnxt_qplib_hwq *sq_hwq;
+ struct bnxt_qplib_swq *swq;
+ int cqe_sq_cons = -1;
+ u32 start, last;
+
+ sq_hwq = &sq->hwq;
+
+ start = sq->swq_start;
+ last = sq->swq_last;
+
+ while (last != start) {
+ swq = &sq->swq[last];
+ if (swq->slot_idx == cqe_slot) {
+ cqe_sq_cons = swq->next_idx;
+ dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
+ __func__, cqe_sq_cons, cqe_slot);
+ break;
+ }
+
+ last = swq->next_idx;
+ }
+ return cqe_sq_cons;
+}
+
static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
struct cq_req *hwcqe,
struct bnxt_qplib_cqe **pcqe, int *budget,
@@ -2478,9 +2515,10 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_swq *swq;
struct bnxt_qplib_cqe *cqe;
+ u32 cqe_sq_cons, slot_num;
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *sq;
- u32 cqe_sq_cons;
+ int cqe_cons;
int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long)
@@ -2492,12 +2530,26 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
}
sq = &qp->sq;
- cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
+ cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
"%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
+
+ if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
+ slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
+ cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
+ if (cqe_cons < 0) {
+ dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
+ __func__, slot_num);
+ goto done;
+ }
+ cqe_sq_cons = cqe_cons;
+ dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
+ __func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
+ }
+
/* Require to walk the sq's swq to fabricate CQEs for all previously
* signaled SWQEs due to CQE aggregation from the current sq cons
* to the cqe_sq_cons
@@ -2882,7 +2934,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
if (cqe_cons == 0xFFFF)
goto do_rq;
- cqe_cons %= sq->max_wqe;
+ cqe_cons %= sq->max_sw_wqe;
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 56538b90d6c5..b62df8701950 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -105,6 +105,7 @@ struct bnxt_qplib_srq {
struct bnxt_qplib_sg_info sg_info;
u16 eventq_hw_ring_id;
spinlock_t lock; /* protect SRQE link list */
+ u8 toggle;
};
struct bnxt_qplib_sge {
@@ -251,6 +252,7 @@ struct bnxt_qplib_q {
struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sg_info;
u32 max_wqe;
+ u32 max_sw_wqe;
u16 wqe_size;
u16 q_full_delta;
u16 max_sge;
@@ -586,15 +588,22 @@ static inline void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx)
que->swq_start = que->swq[idx].next_idx;
}
-static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que)
+static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq)
{
- return (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
+ u32 slots;
+
+ /* Queue depth is the number of slots. */
+ slots = (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
+ /* For variable WQE mode, need to align the slots to 256 */
+ if (wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE && is_sq)
+ slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN);
+ return slots;
}
static inline u32 bnxt_qplib_set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode)
{
return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
- que->max_wqe : bnxt_qplib_get_depth(que);
+ que->max_wqe : bnxt_qplib_get_depth(que, wqe_mode, true);
}
static inline u32 bnxt_qplib_set_sq_max_slot(u8 wqe_mode)
@@ -641,4 +650,14 @@ static inline __le64 bnxt_re_update_msn_tbl(u32 st_idx, u32 npsn, u32 start_psn)
(((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) &
SQ_MSN_SEARCH_START_PSN_MASK));
}
+
+static inline bool __is_var_wqe(struct bnxt_qplib_qp *qp)
+{
+ return (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE);
+}
+
+static inline bool __is_err_cqe_for_var_wqe(struct bnxt_qplib_qp *qp, u8 status)
+{
+ return (status != CQ_REQ_STATUS_OK) && __is_var_wqe(qp);
+}
#endif /* __BNXT_QPLIB_FP_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index a0f78cde314f..c2f710364e0f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -82,6 +82,7 @@ struct bnxt_qplib_db_pacing_data {
u32 fifo_room_mask;
u32 fifo_room_shift;
u32 grc_reg_offset;
+ u32 dev_err_state;
};
#define BNXT_QPLIB_DBR_PF_DB_OFFSET 0x10000
@@ -565,4 +566,14 @@ static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
return cctx->modes.dbr_pacing;
}
+static inline bool _is_alloc_mr_unified(u16 dev_cap_flags)
+{
+ return dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC;
+}
+
+static inline bool _is_relaxed_ordering_supported(u16 dev_cap_ext_flags2)
+{
+ return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED;
+}
+
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 9328db92fa6d..4f75e7e5bcf7 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -95,11 +95,13 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_cmdqmsg msg = {};
struct creq_query_func_resp_sb *sb;
struct bnxt_qplib_rcfw_sbuf sbuf;
+ struct bnxt_qplib_chip_ctx *cctx;
struct cmdq_query_func req = {};
u8 *tqm_alloc;
int i, rc;
u32 temp;
+ cctx = rcfw->res->cctx;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_FUNC,
sizeof(req));
@@ -133,8 +135,9 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
* reporting the max number
*/
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
- attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx) ?
- 6 : sb->max_sge;
+
+ attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
+ min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;
attr->max_cq = le32_to_cpu(sb->max_cq);
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
attr->max_cq_sges = attr->max_qp_sges;
@@ -541,7 +544,7 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
req.pd_id = cpu_to_le32(mrw->pd->id);
req.mrw_flags = mrw->type;
if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR &&
- mrw->flags & BNXT_QPLIB_FR_PMR) ||
+ mrw->access_flags & BNXT_QPLIB_FR_PMR) ||
mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A ||
mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)
req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY;
@@ -653,9 +656,12 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
- req.access = (mr->flags & 0xFFFF);
+ req.access = (mr->access_flags & 0xFFFF);
req.va = cpu_to_le64(mr->va);
req.key = cpu_to_le32(mr->lkey);
+ if (_is_alloc_mr_unified(res->dattr->dev_cap_flags))
+ req.key = cpu_to_le32(mr->pd->id);
+ req.flags = cpu_to_le16(mr->flags);
req.mr_size = cpu_to_le64(mr->total_size);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
@@ -664,6 +670,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
if (rc)
goto fail;
+ if (_is_alloc_mr_unified(res->dattr->dev_cap_flags)) {
+ mr->lkey = le32_to_cpu(resp.xid);
+ mr->rkey = mr->lkey;
+ }
+
return 0;
fail:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 16a67d70a6fc..acd9c14a31c4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -40,6 +40,7 @@
#ifndef __BNXT_QPLIB_SP_H__
#define __BNXT_QPLIB_SP_H__
+#include <rdma/bnxt_re-abi.h>
#define BNXT_QPLIB_RESERVED_QP_WRS 128
struct bnxt_qplib_dev_attr {
@@ -108,7 +109,7 @@ struct bnxt_qplib_ah {
struct bnxt_qplib_mrw {
struct bnxt_qplib_pd *pd;
int type;
- u32 flags;
+ u32 access_flags;
#define BNXT_QPLIB_FR_PMR 0x80000000
u32 lkey;
u32 rkey;
@@ -116,6 +117,7 @@ struct bnxt_qplib_mrw {
u64 va;
u64 total_size;
u32 npages;
+ u16 flags;
u64 mr_handle;
struct bnxt_qplib_hwq hwq;
};
@@ -351,4 +353,11 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
+#define BNXT_VAR_MAX_WQE 4352
+#define BNXT_VAR_MAX_SLOT_ALIGN 256
+#define BNXT_VAR_MAX_SGE 13
+#define BNXT_RE_MAX_RQ_WQES 65536
+
+#define BNXT_STATIC_MAX_SGE 6
+
#endif /* __BNXT_QPLIB_SP_H__*/
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 042530969505..3ec895284e49 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -409,7 +409,7 @@ struct creq_deinitialize_fw_resp {
u8 reserved48[6];
};
-/* cmdq_create_qp (size:768b/96B) */
+/* cmdq_create_qp (size:832b/104B) */
struct cmdq_create_qp {
u8 opcode;
#define CMDQ_CREATE_QP_OPCODE_CREATE_QP 0x1UL
@@ -430,8 +430,11 @@ struct cmdq_create_qp {
#define CMDQ_CREATE_QP_QP_FLAGS_OPTIMIZED_TRANSMIT_ENABLED 0x20UL
#define CMDQ_CREATE_QP_QP_FLAGS_RESPONDER_UD_CQE_WITH_CFA 0x40UL
#define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED 0x80UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_EXPRESS_MODE_ENABLED 0x100UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_STEERING_TAG_VALID 0x200UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED 0x400UL
#define CMDQ_CREATE_QP_QP_FLAGS_LAST \
- CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED
+ CMDQ_CREATE_QP_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED
u8 type;
#define CMDQ_CREATE_QP_TYPE_RC 0x2UL
#define CMDQ_CREATE_QP_TYPE_UD 0x4UL
@@ -492,6 +495,9 @@ struct cmdq_create_qp {
__le64 rq_pbl;
__le64 irrq_addr;
__le64 orrq_addr;
+ __le32 request_xid;
+ __le16 steering_tag;
+ __le16 reserved16;
};
/* creq_create_qp_resp (size:128b/16B) */
@@ -972,13 +978,14 @@ struct creq_query_qp_extend_resp_sb_tlv {
__le16 reserved_16;
};
-/* cmdq_create_srq (size:384b/48B) */
+/* cmdq_create_srq (size:448b/56B) */
struct cmdq_create_srq {
u8 opcode;
#define CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ 0x5UL
#define CMDQ_CREATE_SRQ_OPCODE_LAST CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ
u8 cmd_size;
__le16 flags;
+ #define CMDQ_CREATE_SRQ_FLAGS_STEERING_TAG_VALID 0x1UL
__le16 cookie;
u8 resp_size;
u8 reserved8;
@@ -1012,6 +1019,8 @@ struct cmdq_create_srq {
__le32 dpi;
__le32 pd_id;
__le64 pbl;
+ __le16 steering_tag;
+ u8 reserved48[6];
};
/* creq_create_srq_resp (size:128b/16B) */
@@ -1118,7 +1127,7 @@ struct creq_query_srq_resp_sb {
__le32 data[4];
};
-/* cmdq_create_cq (size:384b/48B) */
+/* cmdq_create_cq (size:448b/56B) */
struct cmdq_create_cq {
u8 opcode;
#define CMDQ_CREATE_CQ_OPCODE_CREATE_CQ 0x9UL
@@ -1126,6 +1135,8 @@ struct cmdq_create_cq {
u8 cmd_size;
__le16 flags;
#define CMDQ_CREATE_CQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x1UL
+ #define CMDQ_CREATE_CQ_FLAGS_STEERING_TAG_VALID 0x2UL
+ #define CMDQ_CREATE_CQ_FLAGS_INFINITE_CQ_MODE 0x4UL
__le16 cookie;
u8 resp_size;
u8 reserved8;
@@ -1157,6 +1168,8 @@ struct cmdq_create_cq {
__le32 dpi;
__le32 cq_size;
__le64 pbl;
+ __le16 steering_tag;
+ u8 reserved48[6];
};
/* creq_create_cq_resp (size:128b/16B) */
@@ -1288,11 +1301,12 @@ struct cmdq_allocate_mrw {
#define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A 0x3UL
#define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B 0x4UL
#define CMDQ_ALLOCATE_MRW_MRW_FLAGS_LAST CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
- #define CMDQ_ALLOCATE_MRW_UNUSED4_MASK 0xf0UL
- #define CMDQ_ALLOCATE_MRW_UNUSED4_SFT 4
+ #define CMDQ_ALLOCATE_MRW_STEERING_TAG_VALID 0x10UL
+ #define CMDQ_ALLOCATE_MRW_UNUSED4_MASK 0xe0UL
+ #define CMDQ_ALLOCATE_MRW_UNUSED4_SFT 5
u8 access;
#define CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY 0x20UL
- __le16 unused16;
+ __le16 steering_tag;
__le32 pd_id;
};
@@ -1359,14 +1373,16 @@ struct creq_deallocate_key_resp {
__le32 bound_window_info;
};
-/* cmdq_register_mr (size:384b/48B) */
+/* cmdq_register_mr (size:448b/56B) */
struct cmdq_register_mr {
u8 opcode;
#define CMDQ_REGISTER_MR_OPCODE_REGISTER_MR 0xfUL
#define CMDQ_REGISTER_MR_OPCODE_LAST CMDQ_REGISTER_MR_OPCODE_REGISTER_MR
u8 cmd_size;
__le16 flags;
- #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR 0x1UL
+ #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR 0x1UL
+ #define CMDQ_REGISTER_MR_FLAGS_STEERING_TAG_VALID 0x2UL
+ #define CMDQ_REGISTER_MR_FLAGS_ENABLE_RO 0x4UL
__le16 cookie;
u8 resp_size;
u8 reserved8;
@@ -1415,6 +1431,8 @@ struct cmdq_register_mr {
__le64 pbl;
__le64 va;
__le64 mr_size;
+ __le16 steering_tag;
+ u8 reserved48[6];
};
/* creq_register_mr_resp (size:128b/16B) */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 040ba2224f9f..b3757c6a0457 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1222,6 +1222,8 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
int ret;
ep = lookup_atid(t, atid);
+ if (!ep)
+ return -EINVAL;
pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
@@ -2279,6 +2281,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
int ret = 0;
ep = lookup_atid(t, atid);
+ if (!ep)
+ return -EINVAL;
+
la = (struct sockaddr_in *)&ep->com.local_addr;
ra = (struct sockaddr_in *)&ep->com.remote_addr;
la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 5111421f9473..14ced7b667fa 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -1126,13 +1126,19 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_free_mm2;
mm->key = uresp.key;
- mm->addr = virt_to_phys(chp->cq.queue);
+ mm->addr = 0;
+ mm->vaddr = chp->cq.queue;
+ mm->dma_addr = chp->cq.dma_addr;
mm->len = chp->cq.memsize;
+ insert_flag_to_mmap(&rhp->rdev, mm, mm->addr);
insert_mmap(ucontext, mm);
mm2->key = uresp.gts_key;
mm2->addr = chp->cq.bar2_pa;
mm2->len = PAGE_SIZE;
+ mm2->vaddr = NULL;
+ mm2->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, mm2, mm2->addr);
insert_mmap(ucontext, mm2);
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index f838bb6718af..5b3007acaa1f 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -532,11 +532,21 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
return container_of(c, struct c4iw_ucontext, ibucontext);
}
+enum {
+ CXGB4_MMAP_BAR,
+ CXGB4_MMAP_BAR_WC,
+ CXGB4_MMAP_CONTIG,
+ CXGB4_MMAP_NON_CONTIG,
+};
+
struct c4iw_mm_entry {
struct list_head entry;
u64 addr;
u32 key;
+ void *vaddr;
+ dma_addr_t dma_addr;
unsigned len;
+ u8 mmap_flag;
};
static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
@@ -561,6 +571,32 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
return NULL;
}
+static inline void insert_flag_to_mmap(struct c4iw_rdev *rdev,
+ struct c4iw_mm_entry *mm, u64 addr)
+{
+ if (addr >= pci_resource_start(rdev->lldi.pdev, 0) &&
+ (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
+ pci_resource_len(rdev->lldi.pdev, 0))))
+ mm->mmap_flag = CXGB4_MMAP_BAR;
+ else if (addr >= pci_resource_start(rdev->lldi.pdev, 2) &&
+ (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
+ pci_resource_len(rdev->lldi.pdev, 2)))) {
+ if (addr >= rdev->oc_mw_pa) {
+ mm->mmap_flag = CXGB4_MMAP_BAR_WC;
+ } else {
+ if (is_t4(rdev->lldi.adapter_type))
+ mm->mmap_flag = CXGB4_MMAP_BAR;
+ else
+ mm->mmap_flag = CXGB4_MMAP_BAR_WC;
+ }
+ } else {
+ if (addr)
+ mm->mmap_flag = CXGB4_MMAP_CONTIG;
+ else
+ mm->mmap_flag = CXGB4_MMAP_NON_CONTIG;
+ }
+}
+
static inline void insert_mmap(struct c4iw_ucontext *ucontext,
struct c4iw_mm_entry *mm)
{
@@ -936,7 +972,6 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table);
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt,
u32 nr_pdid, u32 nr_srqt);
-int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
int c4iw_pblpool_create(struct c4iw_rdev *rdev);
int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
@@ -944,7 +979,6 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
void c4iw_destroy_resource(struct c4iw_resource *rscp);
-int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
void c4iw_register_device(struct work_struct *work);
void c4iw_unregister_device(struct c4iw_dev *dev);
int __init c4iw_cm_init(void);
@@ -1006,8 +1040,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
int c4iw_flush_sq(struct c4iw_qp *qhp);
int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
-u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
-int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
struct c4iw_dev_ucontext *uctx);
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 246b739ddb2b..10a4c738b59f 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -113,6 +113,9 @@ static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
mm->key = uresp.status_page_key;
mm->addr = virt_to_phys(rhp->rdev.status_page);
mm->len = PAGE_SIZE;
+ mm->vaddr = NULL;
+ mm->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, mm, mm->addr);
insert_mmap(context, mm);
}
return 0;
@@ -131,6 +134,11 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
struct c4iw_mm_entry *mm;
struct c4iw_ucontext *ucontext;
u64 addr;
+ u8 mmap_flag;
+ size_t size;
+ void *vaddr;
+ unsigned long vm_pgoff;
+ dma_addr_t dma_addr;
pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
key, len);
@@ -145,47 +153,38 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if (!mm)
return -EINVAL;
addr = mm->addr;
+ vaddr = mm->vaddr;
+ dma_addr = mm->dma_addr;
+ size = mm->len;
+ mmap_flag = mm->mmap_flag;
kfree(mm);
- if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
- (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
- pci_resource_len(rdev->lldi.pdev, 0)))) {
-
- /*
- * MA_SYNC register...
- */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ switch (mmap_flag) {
+ case CXGB4_MMAP_BAR:
+ ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
+ len,
+ pgprot_noncached(vma->vm_page_prot));
+ break;
+ case CXGB4_MMAP_BAR_WC:
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
- } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
- (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
- pci_resource_len(rdev->lldi.pdev, 2)))) {
-
- /*
- * Map user DB or OCQP memory...
- */
- if (addr >= rdev->oc_mw_pa)
- vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
- else {
- if (!is_t4(rdev->lldi.adapter_type))
- vma->vm_page_prot =
- t4_pgprot_wc(vma->vm_page_prot);
- else
- vma->vm_page_prot =
- pgprot_noncached(vma->vm_page_prot);
- }
+ len, t4_pgprot_wc(vma->vm_page_prot));
+ break;
+ case CXGB4_MMAP_CONTIG:
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
- } else {
-
- /*
- * Map WQ or CQ contig dma memory...
- */
- ret = remap_pfn_range(vma, vma->vm_start,
- addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
+ break;
+ case CXGB4_MMAP_NON_CONTIG:
+ vm_pgoff = vma->vm_pgoff;
+ vma->vm_pgoff = 0;
+ ret = dma_mmap_coherent(&rdev->lldi.pdev->dev, vma,
+ vaddr, dma_addr, size);
+ vma->vm_pgoff = vm_pgoff;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
return ret;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index d16d8eaa1415..7b5c4522b426 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2281,24 +2281,39 @@ int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
if (ret)
goto err_free_ma_sync_key;
sq_key_mm->key = uresp.sq_key;
- sq_key_mm->addr = qhp->wq.sq.phys_addr;
+ sq_key_mm->addr = 0;
+ sq_key_mm->vaddr = qhp->wq.sq.queue;
+ sq_key_mm->dma_addr = qhp->wq.sq.dma_addr;
sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
+ insert_flag_to_mmap(&rhp->rdev, sq_key_mm, sq_key_mm->addr);
insert_mmap(ucontext, sq_key_mm);
if (!attrs->srq) {
rq_key_mm->key = uresp.rq_key;
- rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
+ rq_key_mm->addr = 0;
+ rq_key_mm->vaddr = qhp->wq.rq.queue;
+ rq_key_mm->dma_addr = qhp->wq.rq.dma_addr;
rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
+ insert_flag_to_mmap(&rhp->rdev, rq_key_mm,
+ rq_key_mm->addr);
insert_mmap(ucontext, rq_key_mm);
}
sq_db_key_mm->key = uresp.sq_db_gts_key;
sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
+ sq_db_key_mm->vaddr = NULL;
+ sq_db_key_mm->dma_addr = 0;
sq_db_key_mm->len = PAGE_SIZE;
+ insert_flag_to_mmap(&rhp->rdev, sq_db_key_mm,
+ sq_db_key_mm->addr);
insert_mmap(ucontext, sq_db_key_mm);
if (!attrs->srq) {
rq_db_key_mm->key = uresp.rq_db_gts_key;
rq_db_key_mm->addr =
(u64)(unsigned long)qhp->wq.rq.bar2_pa;
rq_db_key_mm->len = PAGE_SIZE;
+ rq_db_key_mm->vaddr = NULL;
+ rq_db_key_mm->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, rq_db_key_mm,
+ rq_db_key_mm->addr);
insert_mmap(ucontext, rq_db_key_mm);
}
if (ma_sync_key_mm) {
@@ -2307,6 +2322,10 @@ int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
(pci_resource_start(rhp->rdev.lldi.pdev, 0) +
PCIE_MA_SYNC_A) & PAGE_MASK;
ma_sync_key_mm->len = PAGE_SIZE;
+ ma_sync_key_mm->vaddr = NULL;
+ ma_sync_key_mm->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, ma_sync_key_mm,
+ ma_sync_key_mm->addr);
insert_mmap(ucontext, ma_sync_key_mm);
}
@@ -2761,12 +2780,19 @@ int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
if (ret)
goto err_free_srq_db_key_mm;
srq_key_mm->key = uresp.srq_key;
- srq_key_mm->addr = virt_to_phys(srq->wq.queue);
+ srq_key_mm->addr = 0;
srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize);
+ srq_key_mm->vaddr = srq->wq.queue;
+ srq_key_mm->dma_addr = srq->wq.dma_addr;
+ insert_flag_to_mmap(&rhp->rdev, srq_key_mm, srq_key_mm->addr);
insert_mmap(ucontext, srq_key_mm);
srq_db_key_mm->key = uresp.srq_db_gts_key;
srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa;
srq_db_key_mm->len = PAGE_SIZE;
+ srq_db_key_mm->vaddr = NULL;
+ srq_db_key_mm->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, srq_db_key_mm,
+ srq_db_key_mm->addr);
insert_mmap(ucontext, srq_db_key_mm);
}
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index e580e087e9da..d7fc9d5eeefd 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -168,7 +168,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
- struct ib_udata *udata);
+ struct uverbs_attr_bundle *attrs);
int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable);
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 4296662e59c3..cd03a5429beb 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -674,6 +674,9 @@ struct efa_admin_feature_device_attr_desc {
/* Max RDMA transfer size in bytes */
u32 max_rdma_size;
+
+ /* Unique global ID for an EFA device */
+ u64 guid;
};
struct efa_admin_feature_queue_attr_desc {
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index 5b9c2b16df0e..5a774925cdea 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -465,6 +465,7 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->db_bar = resp.u.device_attr.db_bar;
result->max_rdma_size = resp.u.device_attr.max_rdma_size;
result->device_caps = resp.u.device_attr.device_caps;
+ result->guid = resp.u.device_attr.guid;
if (result->admin_api_version < 1) {
ibdev_err_ratelimited(
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 9714105fcf7e..668d033f7477 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -112,6 +112,7 @@ struct efa_com_get_device_attr_result {
u8 addr[EFA_GID_SIZE];
u64 page_size_cap;
u64 max_mr_pages;
+ u64 guid;
u32 mtu;
u32 fw_version;
u32 admin_api_version;
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 1a777791bea3..ad225823e6f2 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -441,6 +441,7 @@ static int efa_ib_device_add(struct efa_dev *dev)
efa_set_host_info(dev);
dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
+ dev->ibdev.node_guid = dev->dev_attr.guid;
dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = dev->neqs ?: 1;
dev->ibdev.dev.parent = &pdev->dev;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index b1e0a1b7c59d..cc13415ff7e7 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1684,14 +1684,14 @@ static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
- struct ib_udata *udata)
+ struct uverbs_attr_bundle *attrs)
{
struct efa_dev *dev = to_edev(ibpd->device);
struct ib_umem_dmabuf *umem_dmabuf;
struct efa_mr *mr;
int err;
- mr = efa_alloc_mr(ibpd, access_flags, udata);
+ mr = efa_alloc_mr(ibpd, access_flags, &attrs->driver_udata);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
goto err_out;
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
index c8bd698e21b0..3c166359448d 100644
--- a/drivers/infiniband/hw/erdma/erdma.h
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -274,7 +274,8 @@ void notify_eq(struct erdma_eq *eq);
void *get_next_valid_eqe(struct erdma_eq *eq);
int erdma_aeq_init(struct erdma_dev *dev);
-void erdma_aeq_destroy(struct erdma_dev *dev);
+int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth);
+void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq);
void erdma_aeq_event_handler(struct erdma_dev *dev);
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
index 43ff40b5a09d..a3d8922d1ad1 100644
--- a/drivers/infiniband/hw/erdma/erdma_cmdq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -158,20 +158,13 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
{
struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_eq *eq = &cmdq->eq;
+ int ret;
- eq->depth = cmdq->max_outstandings;
- eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
- &eq->qbuf_dma_addr, GFP_KERNEL);
- if (!eq->qbuf)
- return -ENOMEM;
-
- spin_lock_init(&eq->lock);
- atomic64_set(&eq->event_num, 0);
+ ret = erdma_eq_common_init(dev, eq, cmdq->max_outstandings);
+ if (ret)
+ return ret;
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG;
- eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
- if (!eq->dbrec)
- goto err_out;
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
upper_32_bits(eq->qbuf_dma_addr));
@@ -181,12 +174,6 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
return 0;
-
-err_out:
- dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
- eq->qbuf_dma_addr);
-
- return -ENOMEM;
}
int erdma_cmdq_init(struct erdma_dev *dev)
@@ -247,10 +234,7 @@ void erdma_cmdq_destroy(struct erdma_dev *dev)
clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
- dma_free_coherent(&dev->pdev->dev, cmdq->eq.depth << EQE_SHIFT,
- cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
-
- dma_pool_free(dev->db_pool, cmdq->eq.dbrec, cmdq->eq.dbrec_dma);
+ erdma_eq_destroy(dev, &cmdq->eq);
dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
index 84ccdd8144c9..9a72fec6d5cc 100644
--- a/drivers/infiniband/hw/erdma/erdma_eq.c
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -80,50 +80,60 @@ void erdma_aeq_event_handler(struct erdma_dev *dev)
notify_eq(&dev->aeq);
}
-int erdma_aeq_init(struct erdma_dev *dev)
+int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth)
{
- struct erdma_eq *eq = &dev->aeq;
+ u32 buf_size = depth << EQE_SHIFT;
- eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
-
- eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
+ eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, buf_size,
&eq->qbuf_dma_addr, GFP_KERNEL);
if (!eq->qbuf)
return -ENOMEM;
- spin_lock_init(&eq->lock);
- atomic64_set(&eq->event_num, 0);
- atomic64_set(&eq->notify_num, 0);
-
- eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
if (!eq->dbrec)
- goto err_out;
+ goto err_free_qbuf;
- erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
- upper_32_bits(eq->qbuf_dma_addr));
- erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
- lower_32_bits(eq->qbuf_dma_addr));
- erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
- erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
+ spin_lock_init(&eq->lock);
+ atomic64_set(&eq->event_num, 0);
+ atomic64_set(&eq->notify_num, 0);
+ eq->ci = 0;
+ eq->depth = depth;
return 0;
-err_out:
- dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
+err_free_qbuf:
+ dma_free_coherent(&dev->pdev->dev, buf_size, eq->qbuf,
eq->qbuf_dma_addr);
return -ENOMEM;
}
-void erdma_aeq_destroy(struct erdma_dev *dev)
+void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq)
{
- struct erdma_eq *eq = &dev->aeq;
-
+ dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
eq->qbuf_dma_addr);
+}
- dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
+int erdma_aeq_init(struct erdma_dev *dev)
+{
+ struct erdma_eq *eq = &dev->aeq;
+ int ret;
+
+ ret = erdma_eq_common_init(dev, &dev->aeq, ERDMA_DEFAULT_EQ_DEPTH);
+ if (ret)
+ return ret;
+
+ eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
+
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
+ upper_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
+ lower_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
+ erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
+
+ return 0;
}
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
@@ -234,32 +244,21 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
int ret;
- eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
- eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
- &eq->qbuf_dma_addr, GFP_KERNEL);
- if (!eq->qbuf)
- return -ENOMEM;
-
- spin_lock_init(&eq->lock);
- atomic64_set(&eq->event_num, 0);
- atomic64_set(&eq->notify_num, 0);
+ ret = erdma_eq_common_init(dev, eq, ERDMA_DEFAULT_EQ_DEPTH);
+ if (ret)
+ return ret;
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
(ceqn + 1) * ERDMA_DB_SIZE;
-
- eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
- if (!eq->dbrec) {
- dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
- eq->qbuf, eq->qbuf_dma_addr);
- return -ENOMEM;
- }
-
- eq->ci = 0;
dev->ceqs[ceqn].dev = dev;
+ dev->ceqs[ceqn].ready = true;
/* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
ret = create_eq_cmd(dev, ceqn + 1, eq);
- dev->ceqs[ceqn].ready = ret ? false : true;
+ if (ret) {
+ erdma_eq_destroy(dev, eq);
+ dev->ceqs[ceqn].ready = false;
+ }
return ret;
}
@@ -283,9 +282,7 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
if (err)
return;
- dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
- eq->qbuf_dma_addr);
- dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
+ erdma_eq_destroy(dev, eq);
}
int erdma_ceqs_init(struct erdma_dev *dev)
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
index 7080f8a71ec4..62f497a71004 100644
--- a/drivers/infiniband/hw/erdma/erdma_main.c
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -333,7 +333,7 @@ err_uninit_cmdq:
erdma_cmdq_destroy(dev);
err_uninit_aeq:
- erdma_aeq_destroy(dev);
+ erdma_eq_destroy(dev, &dev->aeq);
err_uninit_comm_irq:
erdma_comm_irq_uninit(dev);
@@ -366,7 +366,7 @@ static void erdma_remove_dev(struct pci_dev *pdev)
erdma_ceqs_uninit(dev);
erdma_hw_reset(dev);
erdma_cmdq_destroy(dev);
- erdma_aeq_destroy(dev);
+ erdma_eq_destroy(dev, &dev->aeq);
erdma_comm_irq_uninit(dev);
pci_free_irq_vectors(dev->pdev);
erdma_device_uninit(dev);
@@ -490,6 +490,7 @@ static const struct ib_device_ops erdma_device_ops = {
.dereg_mr = erdma_dereg_mr,
.destroy_cq = erdma_destroy_cq,
.destroy_qp = erdma_destroy_qp,
+ .disassociate_ucontext = erdma_disassociate_ucontext,
.get_dma_mr = erdma_get_dma_mr,
.get_hw_stats = erdma_get_hw_stats,
.get_port_immutable = erdma_get_port_immutable,
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index d7e1cbf9f5c2..51d619edb6c5 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -1544,11 +1544,31 @@ int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
return ret;
}
+static enum ib_qp_state query_qp_state(struct erdma_qp *qp)
+{
+ switch (qp->attrs.state) {
+ case ERDMA_QP_STATE_IDLE:
+ return IB_QPS_INIT;
+ case ERDMA_QP_STATE_RTR:
+ return IB_QPS_RTR;
+ case ERDMA_QP_STATE_RTS:
+ return IB_QPS_RTS;
+ case ERDMA_QP_STATE_CLOSING:
+ return IB_QPS_ERR;
+ case ERDMA_QP_STATE_TERMINATE:
+ return IB_QPS_ERR;
+ case ERDMA_QP_STATE_ERROR:
+ return IB_QPS_ERR;
+ default:
+ return IB_QPS_ERR;
+ }
+}
+
int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
- struct erdma_qp *qp;
struct erdma_dev *dev;
+ struct erdma_qp *qp;
if (ibqp && qp_attr && qp_init_attr) {
qp = to_eqp(ibqp);
@@ -1575,6 +1595,9 @@ int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
+ qp_attr->qp_state = query_qp_state(qp);
+ qp_attr->cur_qp_state = query_qp_state(qp);
+
return 0;
}
@@ -1701,6 +1724,10 @@ err_out_xa:
return ret;
}
+void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+}
+
void erdma_set_mtu(struct erdma_dev *dev, u32 mtu)
{
struct erdma_cmdq_config_mtu_req req;
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index 6afdc02f5869..c998acd39a78 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -344,6 +344,7 @@ int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
struct ib_udata *data);
int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext);
int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
u64 virt, int access, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 3e02c474f59f..4fc5b9d5fea8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -64,8 +64,10 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
u8 tc_mode = 0;
int ret;
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
- return -EOPNOTSUPP;
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) {
+ ret = -EOPNOTSUPP;
+ goto err_out;
+ }
ah->av.port = rdma_ah_get_port_num(ah_attr);
ah->av.gid_index = grh->sgid_index;
@@ -83,7 +85,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
ret = 0;
if (ret && grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
- return ret;
+ goto err_out;
if (tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
@@ -91,8 +93,10 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
else
ah->av.sl = rdma_ah_get_sl(ah_attr);
- if (!check_sl_valid(hr_dev, ah->av.sl))
- return -EINVAL;
+ if (!check_sl_valid(hr_dev, ah->av.sl)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 02baa853a76c..c7c167e2a045 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -1041,9 +1041,9 @@ static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
* @bt_level: base address table level
* @unit: ba entries per bt page
*/
-static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
+static u64 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
{
- u32 step;
+ u64 step;
int max;
int i;
@@ -1079,7 +1079,7 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
{
struct hns_roce_buf_region *r;
int total = 0;
- int step;
+ u64 step;
int i;
for (i = 0; i < region_cnt; i++) {
@@ -1110,7 +1110,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
int ret = 0;
int max_ofs;
int level;
- u32 step;
+ u64 step;
int end;
if (hopnum <= 1)
@@ -1134,10 +1134,12 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
/* config L1 bt to last bt and link them to corresponding parent */
for (level = 1; level < hopnum; level++) {
- cur = hem_list_search_item(&mid_bt[level], offset);
- if (cur) {
- hem_ptrs[level] = cur;
- continue;
+ if (!hem_list_is_bottom_bt(hopnum, level)) {
+ cur = hem_list_search_item(&mid_bt[level], offset);
+ if (cur) {
+ hem_ptrs[level] = cur;
+ continue;
+ }
}
step = hem_list_calc_ba_range(hopnum, level, unit);
@@ -1147,7 +1149,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
}
start_aligned = (distance / step) * step + r->offset;
- end = min_t(int, start_aligned + step - 1, max_ofs);
+ end = min_t(u64, start_aligned + step - 1, max_ofs);
cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
true);
if (!cur) {
@@ -1235,7 +1237,7 @@ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
struct hns_roce_hem_item *hem, *temp_hem;
int total = 0;
int offset;
- int step;
+ u64 step;
step = hem_list_calc_ba_range(r->hopnum, 1, unit);
if (step < 1)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 621b057fb9da..24e906b9d3ae 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1681,8 +1681,8 @@ static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev,
for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) {
bd_idx = i / CNT_PER_DESC;
- if (!(desc[bd_idx].flag & HNS_ROCE_CMD_FLAG_NEXT) &&
- bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC)
+ if (bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC &&
+ !(desc[bd_idx].flag & cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT)))
break;
cnt_data = (__le64 *)&desc[bd_idx].data[0];
@@ -2972,6 +2972,9 @@ err_llm_init_failed:
static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
{
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ free_mr_exit(hr_dev);
+
hns_roce_function_clear(hr_dev);
if (!hr_dev->is_vf)
@@ -4423,12 +4426,14 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
upper_32_bits(to_hr_hw_page_addr(mtts[0])));
hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H);
- context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
- qpc_mask->rq_nxt_blk_addr = 0;
-
- hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
- upper_32_bits(to_hr_hw_page_addr(mtts[1])));
- hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ context->rq_nxt_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
+ qpc_mask->rq_nxt_blk_addr = 0;
+ hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts[1])));
+ hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
+ }
return 0;
}
@@ -6193,6 +6198,7 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
struct pci_dev *pdev = hr_dev->pci_dev;
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
const struct hnae3_ae_ops *ops = ae_dev->ops;
+ enum hnae3_reset_type reset_type;
irqreturn_t int_work = IRQ_NONE;
u32 int_en;
@@ -6204,10 +6210,12 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
+ reset_type = hr_dev->is_vf ?
+ HNAE3_VF_FUNC_RESET : HNAE3_FUNC_RESET;
+
/* Set reset level for reset_event() */
if (ops->set_default_reset_request)
- ops->set_default_reset_request(ae_dev,
- HNAE3_FUNC_RESET);
+ ops->set_default_reset_request(ae_dev, reset_type);
if (ops->reset_event)
ops->reset_event(pdev, NULL);
@@ -6277,7 +6285,7 @@ static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data)
res_type == ECC_RESOURCE_SCCC)
return le64_to_cpu(*data);
- return le64_to_cpu(*data) << PAGE_SHIFT;
+ return le64_to_cpu(*data) << HNS_HW_PAGE_SHIFT;
}
static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type,
@@ -6949,9 +6957,6 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
hns_roce_handle_device_err(hr_dev);
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
- free_mr_exit(hr_dev);
-
hns_roce_exit(hr_dev);
kfree(hr_dev->priv);
ib_dealloc_device(&hr_dev->ib_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 1de384ce4d0e..6b03ba671ff8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -1460,19 +1460,19 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
__acquire(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
__acquire(&send_cq->lock);
} else if (send_cq == recv_cq) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}
@@ -1492,13 +1492,13 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
spin_unlock(&recv_cq->lock);
} else if (send_cq == recv_cq) {
__release(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
- spin_unlock_irq(&recv_cq->lock);
+ spin_unlock(&recv_cq->lock);
}
}
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index fc0ce35da14e..eeb932e58730 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -1347,7 +1347,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
ibdev_err(&iwdev->ibdev,
"rd_atomic = %d, above max_hw_ird=%d\n",
- attr->max_rd_atomic,
+ attr->max_dest_rd_atomic,
dev->hw_attrs.max_hw_ird);
return -EINVAL;
}
@@ -3085,7 +3085,7 @@ error:
static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
u64 len, u64 virt,
int fd, int access,
- struct ib_udata *udata)
+ struct uverbs_attr_bundle *attrs)
{
struct irdma_device *iwdev = to_iwdev(pd->device);
struct ib_umem_dmabuf *umem_dmabuf;
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index d13abc954d2a..67c2d43135a8 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -383,7 +383,7 @@ static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem
create_req->length = umem->length;
create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
- create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
+ create_req->gdma_page_type = order_base_2(page_sz) - MANA_PAGE_SHIFT;
create_req->page_count = num_pages_total;
ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
@@ -511,13 +511,13 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
PAGE_SHIFT;
prot = pgprot_writecombine(vma->vm_page_prot);
- ret = rdma_user_mmap_io(ibcontext, vma, pfn, gc->db_page_size, prot,
+ ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot,
NULL);
if (ret)
ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret);
else
- ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %u, ret %d\n",
- pfn, gc->db_page_size, ret);
+ ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %lu, ret %d\n",
+ pfn, PAGE_SIZE, ret);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 9a439569ffcf..d7327735b8d0 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -829,7 +829,6 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
{
- char alias_wq_name[22];
int ret = 0;
int i, j;
union ib_gid gid;
@@ -875,9 +874,8 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
dev->sriov.alias_guid.ports_guid[i].port = i;
- snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
dev->sriov.alias_guid.ports_guid[i].wq =
- alloc_ordered_workqueue(alias_wq_name, WQ_MEM_RECLAIM);
+ alloc_ordered_workqueue("alias_guid%d", WQ_MEM_RECLAIM, i);
if (!dev->sriov.alias_guid.ports_guid[i].wq) {
ret = -ENOMEM;
goto err_thread;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index dc9cf45d2d32..e6e132f10625 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -2158,7 +2158,6 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
struct mlx4_ib_demux_ctx *ctx,
int port)
{
- char name[21];
int ret = 0;
int i;
@@ -2194,24 +2193,21 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
goto err_mcg;
}
- snprintf(name, sizeof(name), "mlx4_ibt%d", port);
- ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ ctx->wq = alloc_ordered_workqueue("mlx4_ibt%d", WQ_MEM_RECLAIM, port);
if (!ctx->wq) {
pr_err("Failed to create tunnelling WQ for port %d\n", port);
ret = -ENOMEM;
goto err_wq;
}
- snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
- ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ ctx->wi_wq = alloc_ordered_workqueue("mlx4_ibwi%d", WQ_MEM_RECLAIM, port);
if (!ctx->wi_wq) {
pr_err("Failed to create wire WQ for port %d\n", port);
ret = -ENOMEM;
goto err_wiwq;
}
- snprintf(name, sizeof(name), "mlx4_ibud%d", port);
- ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ ctx->ud_wq = alloc_ordered_workqueue("mlx4_ibud%d", WQ_MEM_RECLAIM, port);
if (!ctx->ud_wq) {
pr_err("Failed to create up/down WQ for port %d\n", port);
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 72a526236c2e..b38961f5058e 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -6,6 +6,7 @@ mlx5_ib-y := ah.o \
cong.o \
counters.o \
cq.o \
+ data_direct.o \
dm.o \
doorbell.o \
gsi.o \
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 895b62cc528d..7c08e3008927 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -245,3 +245,24 @@ int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid)
MLX5_SET(dealloc_uar_in, in, uid, uid);
return mlx5_cmd_exec_in(dev, dealloc_uar, in);
}
+
+int mlx5_cmd_query_vuid(struct mlx5_core_dev *dev, bool data_direct,
+ char *out_vuid)
+{
+ u8 out[MLX5_ST_SZ_BYTES(query_vuid_out) +
+ MLX5_ST_SZ_BYTES(array1024_auto)] = {};
+ u8 in[MLX5_ST_SZ_BYTES(query_vuid_in)] = {};
+ char *vuid;
+ int err;
+
+ MLX5_SET(query_vuid_in, in, opcode, MLX5_CMD_OPCODE_QUERY_VUID);
+ MLX5_SET(query_vuid_in, in, vhca_id, MLX5_CAP_GEN(dev, vhca_id));
+ MLX5_SET(query_vuid_in, in, data_direct, data_direct);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ vuid = MLX5_ADDR_OF(query_vuid_out, out, vuid);
+ memcpy(out_vuid, vuid, MLX5_ST_SZ_BYTES(array1024_auto));
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index e5cd31270443..e6c88b6ebd0d 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -58,4 +58,6 @@ int mlx5_cmd_mad_ifc(struct mlx5_ib_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid);
int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid);
+int mlx5_cmd_query_vuid(struct mlx5_core_dev *dev, bool data_direct,
+ char *out_vuid);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/data_direct.c b/drivers/infiniband/hw/mlx5/data_direct.c
new file mode 100644
index 000000000000..b9ba84afaae2
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/data_direct.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include "mlx5_ib.h"
+#include "data_direct.h"
+
+static LIST_HEAD(mlx5_data_direct_dev_list);
+static LIST_HEAD(mlx5_data_direct_reg_list);
+
+/*
+ * This mutex should be held when accessing either of the above lists
+ */
+static DEFINE_MUTEX(mlx5_data_direct_mutex);
+
+struct mlx5_data_direct_registration {
+ struct mlx5_ib_dev *ibdev;
+ char vuid[MLX5_ST_SZ_BYTES(array1024_auto) + 1];
+ struct list_head list;
+};
+
+static const struct pci_device_id mlx5_data_direct_pci_table[] = {
+ { PCI_VDEVICE(MELLANOX, 0x2100) }, /* ConnectX-8 Data Direct */
+ { 0, }
+};
+
+static int mlx5_data_direct_vpd_get_vuid(struct mlx5_data_direct_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ unsigned int vpd_size, kw_len;
+ u8 *vpd_data;
+ int start;
+ int ret;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_err(pdev, "Unable to read VPD, err=%ld\n", PTR_ERR(vpd_data));
+ return PTR_ERR(vpd_data);
+ }
+
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "VU", &kw_len);
+ if (start < 0) {
+ ret = start;
+ pci_err(pdev, "VU keyword not found, err=%d\n", ret);
+ goto end;
+ }
+
+ dev->vuid = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
+ ret = dev->vuid ? 0 : -ENOMEM;
+
+end:
+ kfree(vpd_data);
+ return ret;
+}
+
+static void mlx5_data_direct_shutdown(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+}
+
+static int mlx5_data_direct_set_dma_caps(struct pci_dev *pdev)
+{
+ int err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_warn(&pdev->dev,
+ "Warning: couldn't set 64-bit PCI DMA mask, err=%d\n", err);
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, err=%d\n", err);
+ return err;
+ }
+ }
+
+ dma_set_max_seg_size(&pdev->dev, SZ_2G);
+ return 0;
+}
+
+int mlx5_data_direct_ib_reg(struct mlx5_ib_dev *ibdev, char *vuid)
+{
+ struct mlx5_data_direct_registration *reg;
+ struct mlx5_data_direct_dev *dev;
+
+ reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ reg->ibdev = ibdev;
+ strcpy(reg->vuid, vuid);
+
+ mutex_lock(&mlx5_data_direct_mutex);
+ list_for_each_entry(dev, &mlx5_data_direct_dev_list, list) {
+ if (strcmp(dev->vuid, vuid) == 0) {
+ mlx5_ib_data_direct_bind(ibdev, dev);
+ break;
+ }
+ }
+
+ /* Add the registration to its global list, to be used upon bind/unbind
+ * of its affiliated data direct device
+ */
+ list_add_tail(&reg->list, &mlx5_data_direct_reg_list);
+ mutex_unlock(&mlx5_data_direct_mutex);
+ return 0;
+}
+
+void mlx5_data_direct_ib_unreg(struct mlx5_ib_dev *ibdev)
+{
+ struct mlx5_data_direct_registration *reg;
+
+ mutex_lock(&mlx5_data_direct_mutex);
+ list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
+ if (reg->ibdev == ibdev) {
+ list_del(&reg->list);
+ kfree(reg);
+ goto end;
+ }
+ }
+
+ WARN_ON(true);
+end:
+ mutex_unlock(&mlx5_data_direct_mutex);
+}
+
+static void mlx5_data_direct_dev_reg(struct mlx5_data_direct_dev *dev)
+{
+ struct mlx5_data_direct_registration *reg;
+
+ mutex_lock(&mlx5_data_direct_mutex);
+ list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
+ if (strcmp(dev->vuid, reg->vuid) == 0)
+ mlx5_ib_data_direct_bind(reg->ibdev, dev);
+ }
+
+ /* Add the data direct device to the global list, further IB devices may
+ * use it later as well
+ */
+ list_add_tail(&dev->list, &mlx5_data_direct_dev_list);
+ mutex_unlock(&mlx5_data_direct_mutex);
+}
+
+static void mlx5_data_direct_dev_unreg(struct mlx5_data_direct_dev *dev)
+{
+ struct mlx5_data_direct_registration *reg;
+
+ mutex_lock(&mlx5_data_direct_mutex);
+ /* Prevent any further affiliations */
+ list_del(&dev->list);
+ list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
+ if (strcmp(dev->vuid, reg->vuid) == 0)
+ mlx5_ib_data_direct_unbind(reg->ibdev);
+ }
+ mutex_unlock(&mlx5_data_direct_mutex);
+}
+
+static int mlx5_data_direct_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mlx5_data_direct_dev *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->device = &pdev->dev;
+ dev->pdev = pdev;
+
+ pci_set_drvdata(dev->pdev, dev);
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev->device, "Cannot enable PCI device, err=%d\n", err);
+ goto err;
+ }
+
+ pci_set_master(pdev);
+ err = mlx5_data_direct_set_dma_caps(pdev);
+ if (err)
+ goto err_disable;
+
+ if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
+ pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
+ pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
+ dev_dbg(dev->device, "Enabling pci atomics failed\n");
+
+ err = mlx5_data_direct_vpd_get_vuid(dev);
+ if (err)
+ goto err_disable;
+
+ mlx5_data_direct_dev_reg(dev);
+ return 0;
+
+err_disable:
+ pci_disable_device(pdev);
+err:
+ kfree(dev);
+ return err;
+}
+
+static void mlx5_data_direct_remove(struct pci_dev *pdev)
+{
+ struct mlx5_data_direct_dev *dev = pci_get_drvdata(pdev);
+
+ mlx5_data_direct_dev_unreg(dev);
+ pci_disable_device(pdev);
+ kfree(dev->vuid);
+ kfree(dev);
+}
+
+static struct pci_driver mlx5_data_direct_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mlx5_data_direct_pci_table,
+ .probe = mlx5_data_direct_probe,
+ .remove = mlx5_data_direct_remove,
+ .shutdown = mlx5_data_direct_shutdown,
+};
+
+int mlx5_data_direct_driver_register(void)
+{
+ return pci_register_driver(&mlx5_data_direct_driver);
+}
+
+void mlx5_data_direct_driver_unregister(void)
+{
+ pci_unregister_driver(&mlx5_data_direct_driver);
+}
diff --git a/drivers/infiniband/hw/mlx5/data_direct.h b/drivers/infiniband/hw/mlx5/data_direct.h
new file mode 100644
index 000000000000..2fd2bdbe8f69
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/data_direct.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#ifndef _MLX5_IB_DATA_DIRECT_H
+#define _MLX5_IB_DATA_DIRECT_H
+
+struct mlx5_ib_dev;
+
+struct mlx5_data_direct_dev {
+ struct device *device;
+ struct pci_dev *pdev;
+ char *vuid;
+ struct list_head list;
+};
+
+int mlx5_data_direct_ib_reg(struct mlx5_ib_dev *ibdev, char *vuid);
+void mlx5_data_direct_ib_unreg(struct mlx5_ib_dev *ibdev);
+int mlx5_data_direct_driver_register(void);
+void mlx5_data_direct_driver_unregister(void);
+
+#endif
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index c7a4ee896121..49af1cfbe6d1 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -13,6 +13,7 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev,
int vport_index)
{
struct mlx5_ib_dev *ibdev;
+ struct net_device *ndev;
ibdev = mlx5_eswitch_uplink_get_proto_dev(dev->priv.eswitch, REP_IB);
if (!ibdev)
@@ -20,12 +21,9 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev,
ibdev->port[vport_index].rep = rep;
rep->rep_data[REP_IB].priv = ibdev;
- write_lock(&ibdev->port[vport_index].roce.netdev_lock);
- ibdev->port[vport_index].roce.netdev =
- mlx5_ib_get_rep_netdev(rep->esw, rep->vport);
- write_unlock(&ibdev->port[vport_index].roce.netdev_lock);
+ ndev = mlx5_ib_get_rep_netdev(rep->esw, rep->vport);
- return 0;
+ return ib_device_set_netdev(&ibdev->ib_dev, ndev, vport_index + 1);
}
static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev);
@@ -104,10 +102,15 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
ibdev->is_rep = true;
vport_index = rep->vport_index;
ibdev->port[vport_index].rep = rep;
- ibdev->port[vport_index].roce.netdev =
- mlx5_ib_get_rep_netdev(lag_master->priv.eswitch, rep->vport);
ibdev->mdev = lag_master;
ibdev->num_ports = num_ports;
+ ibdev->ib_dev.phys_port_cnt = num_ports;
+ ret = ib_device_set_netdev(&ibdev->ib_dev,
+ mlx5_ib_get_rep_netdev(lag_master->priv.eswitch,
+ rep->vport),
+ vport_index + 1);
+ if (ret)
+ goto fail_add;
ret = __mlx5_ib_add(ibdev, profile);
if (ret)
@@ -160,9 +163,8 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
}
port = &dev->port[vport_index];
- write_lock(&port->roce.netdev_lock);
- port->roce.netdev = NULL;
- write_unlock(&port->roce.netdev_lock);
+
+ ib_device_set_netdev(&dev->ib_dev, NULL, vport_index + 1);
rep->rep_data[REP_IB].priv = NULL;
port->rep = NULL;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 6048b9ad13bb..4999239c8f41 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -48,6 +48,7 @@
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
#include "macsec.h"
+#include "data_direct.h"
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -146,16 +147,52 @@ static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
if (upper && port->rep->vport == MLX5_VPORT_UPLINK)
continue;
-
- read_lock(&port->roce.netdev_lock);
- rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw,
- port->rep->vport);
- if (rep_ndev == ndev) {
- read_unlock(&port->roce.netdev_lock);
+ rep_ndev = ib_device_get_netdev(&dev->ib_dev, i + 1);
+ if (rep_ndev && rep_ndev == ndev) {
+ dev_put(rep_ndev);
*port_num = i + 1;
return &port->roce;
}
- read_unlock(&port->roce.netdev_lock);
+
+ dev_put(rep_ndev);
+ }
+
+ return NULL;
+}
+
+static bool mlx5_netdev_send_event(struct mlx5_ib_dev *dev,
+ struct net_device *ndev,
+ struct net_device *upper,
+ struct net_device *ib_ndev)
+{
+ if (!dev->ib_active)
+ return false;
+
+ /* Event is about our upper device */
+ if (upper == ndev)
+ return true;
+
+ /* RDMA device is not in lag and not in switchdev */
+ if (!dev->is_rep && !upper && ndev == ib_ndev)
+ return true;
+
+ /* RDMA devie is in switchdev */
+ if (dev->is_rep && ndev == ib_ndev)
+ return true;
+
+ return false;
+}
+
+static struct net_device *mlx5_ib_get_rep_uplink_netdev(struct mlx5_ib_dev *ibdev)
+{
+ struct mlx5_ib_port *port;
+ int i;
+
+ for (i = 0; i < ibdev->num_ports; i++) {
+ port = &ibdev->port[i];
+ if (port->rep && port->rep->vport == MLX5_VPORT_UPLINK) {
+ return ib_device_get_netdev(&ibdev->ib_dev, i + 1);
+ }
}
return NULL;
@@ -167,6 +204,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
u32 port_num = roce->native_port_num;
+ struct net_device *ib_ndev = NULL;
struct mlx5_core_dev *mdev;
struct mlx5_ib_dev *ibdev;
@@ -180,47 +218,63 @@ static int mlx5_netdev_event(struct notifier_block *this,
/* Should already be registered during the load */
if (ibdev->is_rep)
break;
- write_lock(&roce->netdev_lock);
+
+ ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
+ /* Exit if already registered */
+ if (ib_ndev)
+ goto put_ndev;
+
if (ndev->dev.parent == mdev->device)
- roce->netdev = ndev;
- write_unlock(&roce->netdev_lock);
+ ib_device_set_netdev(&ibdev->ib_dev, ndev, port_num);
break;
case NETDEV_UNREGISTER:
/* In case of reps, ib device goes away before the netdevs */
- write_lock(&roce->netdev_lock);
- if (roce->netdev == ndev)
- roce->netdev = NULL;
- write_unlock(&roce->netdev_lock);
- break;
+ if (ibdev->is_rep)
+ break;
+ ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
+ if (ib_ndev == ndev)
+ ib_device_set_netdev(&ibdev->ib_dev, NULL, port_num);
+ goto put_ndev;
case NETDEV_CHANGE:
case NETDEV_UP:
case NETDEV_DOWN: {
- struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
struct net_device *upper = NULL;
- if (lag_ndev) {
- upper = netdev_master_upper_dev_get(lag_ndev);
- dev_put(lag_ndev);
+ if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
+ struct net_device *lag_ndev;
+
+ if(mlx5_lag_is_roce(mdev))
+ lag_ndev = ib_device_get_netdev(&ibdev->ib_dev, 1);
+ else /* sriov lag */
+ lag_ndev = mlx5_ib_get_rep_uplink_netdev(ibdev);
+
+ if (lag_ndev) {
+ upper = netdev_master_upper_dev_get(lag_ndev);
+ dev_put(lag_ndev);
+ } else {
+ goto done;
+ }
}
if (ibdev->is_rep)
roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num);
if (!roce)
return NOTIFY_DONE;
- if ((upper == ndev ||
- ((!upper || ibdev->is_rep) && ndev == roce->netdev)) &&
- ibdev->ib_active) {
+
+ ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
+
+ if (mlx5_netdev_send_event(ibdev, ndev, upper, ib_ndev)) {
struct ib_event ibev = { };
enum ib_port_state port_state;
if (get_port_state(&ibdev->ib_dev, port_num,
&port_state))
- goto done;
+ goto put_ndev;
if (roce->last_port_state == port_state)
- goto done;
+ goto put_ndev;
roce->last_port_state = port_state;
ibev.device = &ibdev->ib_dev;
@@ -229,7 +283,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
else if (port_state == IB_PORT_ACTIVE)
ibev.event = IB_EVENT_PORT_ACTIVE;
else
- goto done;
+ goto put_ndev;
ibev.element.port_num = port_num;
ib_dispatch_event(&ibev);
@@ -240,38 +294,13 @@ static int mlx5_netdev_event(struct notifier_block *this,
default:
break;
}
+put_ndev:
+ dev_put(ib_ndev);
done:
mlx5_ib_put_native_port_mdev(ibdev, port_num);
return NOTIFY_DONE;
}
-static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
- u32 port_num)
-{
- struct mlx5_ib_dev *ibdev = to_mdev(device);
- struct net_device *ndev;
- struct mlx5_core_dev *mdev;
-
- mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
- if (!mdev)
- return NULL;
-
- ndev = mlx5_lag_get_roce_netdev(mdev);
- if (ndev)
- goto out;
-
- /* Ensure ndev does not disappear before we invoke dev_hold()
- */
- read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
- ndev = ibdev->port[port_num - 1].roce.netdev;
- dev_hold(ndev);
- read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
-
-out:
- mlx5_ib_put_native_port_mdev(ibdev, port_num);
- return ndev;
-}
-
struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
u32 ib_port_num,
u32 *native_port_num)
@@ -546,11 +575,11 @@ static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
if (!put_mdev)
goto out;
- ndev = mlx5_ib_get_netdev(device, port_num);
+ ndev = ib_device_get_netdev(device, port_num);
if (!ndev)
goto out;
- if (dev->lag_active) {
+ if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
rcu_read_lock();
upper = netdev_master_upper_dev_get_rcu(ndev);
if (upper) {
@@ -3024,6 +3053,59 @@ static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
mutex_destroy(&devr->srq_lock);
}
+static int
+mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ void *mkc;
+ u32 mkey;
+ u32 pdn;
+ u32 *in;
+ int err;
+
+ err = mlx5_core_alloc_pd(mdev, &pdn);
+ if (err)
+ return err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ MLX5_SET(create_mkey_in, in, data_direct, 1);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, rw, 1);
+ MLX5_SET(mkc, mkc, rr, 1);
+ MLX5_SET(mkc, mkc, a, 1);
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
+ kvfree(in);
+ if (err)
+ goto err;
+
+ dev->ddr.mkey = mkey;
+ dev->ddr.pdn = pdn;
+ return 0;
+
+err:
+ mlx5_core_dealloc_pd(mdev, pdn);
+ return err;
+}
+
+static void
+mlx5_ib_free_data_direct_resources(struct mlx5_ib_dev *dev)
+{
+ mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey);
+ mlx5_core_dealloc_pd(dev->mdev, dev->ddr.pdn);
+}
+
static u32 get_core_cap_flags(struct ib_device *ibdev,
struct mlx5_hca_vport_context *rep)
{
@@ -3124,6 +3206,60 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str)
fw_rev_sub(dev->mdev));
}
+static int lag_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5_ib_dev *dev = container_of(nb, struct mlx5_ib_dev,
+ lag_events);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_port *port;
+ struct net_device *ndev;
+ int i, err;
+ int portnum;
+
+ portnum = 0;
+ switch (event) {
+ case MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE:
+ ndev = data;
+ if (ndev) {
+ if (!mlx5_lag_is_roce(mdev)) {
+ // sriov lag
+ for (i = 0; i < dev->num_ports; i++) {
+ port = &dev->port[i];
+ if (port->rep && port->rep->vport ==
+ MLX5_VPORT_UPLINK) {
+ portnum = i;
+ break;
+ }
+ }
+ }
+ err = ib_device_set_netdev(&dev->ib_dev, ndev,
+ portnum + 1);
+ dev_put(ndev);
+ if (err)
+ return err;
+ /* Rescan gids after new netdev assignment */
+ rdma_roce_rescan_device(&dev->ib_dev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static void mlx5e_lag_event_register(struct mlx5_ib_dev *dev)
+{
+ dev->lag_events.notifier_call = lag_event;
+ blocking_notifier_chain_register(&dev->mdev->priv.lag_nh,
+ &dev->lag_events);
+}
+
+static void mlx5e_lag_event_unregister(struct mlx5_ib_dev *dev)
+{
+ blocking_notifier_chain_unregister(&dev->mdev->priv.lag_nh,
+ &dev->lag_events);
+}
+
static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
@@ -3145,6 +3281,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
goto err_destroy_vport_lag;
}
+ mlx5e_lag_event_register(dev);
dev->flow_db->lag_demux_ft = ft;
dev->lag_ports = mlx5_lag_get_num_ports(mdev);
dev->lag_active = true;
@@ -3162,6 +3299,7 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
if (dev->lag_active) {
dev->lag_active = false;
+ mlx5e_lag_event_unregister(dev);
mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
dev->flow_db->lag_demux_ft = NULL;
@@ -3420,6 +3558,41 @@ unbind:
return false;
}
+static int mlx5_ib_data_direct_init(struct mlx5_ib_dev *dev)
+{
+ char vuid[MLX5_ST_SZ_BYTES(array1024_auto) + 1] = {};
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev->mdev, data_direct) ||
+ !MLX5_CAP_GEN_2(dev->mdev, query_vuid))
+ return 0;
+
+ ret = mlx5_cmd_query_vuid(dev->mdev, true, vuid);
+ if (ret)
+ return ret;
+
+ ret = mlx5_ib_create_data_direct_resources(dev);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&dev->data_direct_mr_list);
+ ret = mlx5_data_direct_ib_reg(dev, vuid);
+ if (ret)
+ mlx5_ib_free_data_direct_resources(dev);
+
+ return ret;
+}
+
+static void mlx5_ib_data_direct_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, data_direct) ||
+ !MLX5_CAP_GEN_2(dev->mdev, query_vuid))
+ return;
+
+ mlx5_data_direct_ib_unreg(dev);
+ mlx5_ib_free_data_direct_resources(dev);
+}
+
static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
{
u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
@@ -3796,6 +3969,14 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE(
dump_fill_mkey),
UA_MANDATORY));
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_reg_dmabuf_mr,
+ UVERBS_OBJECT_MR,
+ UVERBS_METHOD_REG_DMABUF_MR,
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ enum mlx5_ib_uapi_reg_dmabuf_flags,
+ UA_OPTIONAL));
+
static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
@@ -3805,6 +3986,7 @@ static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_create_cq_defs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR, &mlx5_ib_reg_dmabuf_mr),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
@@ -3813,6 +3995,7 @@ static const struct uapi_definition mlx5_ib_defs[] = {
static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
+ mlx5_ib_data_direct_cleanup(dev);
mlx5_ib_cleanup_multiport_master(dev);
WARN_ON(!xa_empty(&dev->odp_mkeys));
mutex_destroy(&dev->cap_mask_mutex);
@@ -3828,13 +4011,11 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
- dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.dev.parent = mdev->device;
dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
for (i = 0; i < dev->num_ports; i++) {
spin_lock_init(&dev->port[i].mp.mpi_lock);
- rwlock_init(&dev->port[i].roce.netdev_lock);
dev->port[i].roce.dev = dev;
dev->port[i].roce.native_port_num = i + 1;
dev->port[i].roce.last_port_state = IB_PORT_DOWN;
@@ -3866,6 +4047,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_max(mdev);
mutex_init(&dev->cap_mask_mutex);
+ mutex_init(&dev->data_direct_lock);
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
xa_init(&dev->odp_mkeys);
@@ -3874,6 +4056,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
+ err = mlx5_ib_data_direct_init(dev);
+ if (err)
+ goto err_mp;
+
return 0;
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
@@ -4094,7 +4280,6 @@ static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
.create_wq = mlx5_ib_create_wq,
.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
.destroy_wq = mlx5_ib_destroy_wq,
- .get_netdev = mlx5_ib_get_netdev,
.modify_wq = mlx5_ib_modify_wq,
INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table,
@@ -4293,6 +4478,22 @@ static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
}
+void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev,
+ struct mlx5_data_direct_dev *dev)
+{
+ mutex_lock(&ibdev->data_direct_lock);
+ ibdev->data_direct_dev = dev;
+ mutex_unlock(&ibdev->data_direct_lock);
+}
+
+void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev)
+{
+ mutex_lock(&ibdev->data_direct_lock);
+ mlx5_ib_revoke_data_direct_mrs(ibdev);
+ ibdev->data_direct_dev = NULL;
+ mutex_unlock(&ibdev->data_direct_lock);
+}
+
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
int stage)
@@ -4522,6 +4723,7 @@ static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent,
mplane->mdev = mparent->mdev;
mplane->num_ports = mparent->num_plane;
mplane->sub_dev_name = name;
+ mplane->ib_dev.phys_port_cnt = mplane->num_ports;
ret = __mlx5_ib_add(mplane, &plane_profile);
if (ret)
@@ -4638,6 +4840,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
dev->mdev = mdev;
dev->num_ports = num_ports;
+ dev->ib_dev.phys_port_cnt = num_ports;
if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
profile = &raw_eth_profile;
@@ -4715,17 +4918,23 @@ static int __init mlx5_ib_init(void)
ret = mlx5r_rep_init();
if (ret)
goto rep_err;
+ ret = mlx5_data_direct_driver_register();
+ if (ret)
+ goto dd_err;
ret = auxiliary_driver_register(&mlx5r_mp_driver);
if (ret)
goto mp_err;
ret = auxiliary_driver_register(&mlx5r_driver);
if (ret)
goto drv_err;
+
return 0;
drv_err:
auxiliary_driver_unregister(&mlx5r_mp_driver);
mp_err:
+ mlx5_data_direct_driver_unregister();
+dd_err:
mlx5r_rep_cleanup();
rep_err:
mlx5_ib_qp_event_cleanup();
@@ -4737,6 +4946,7 @@ qp_event_err:
static void __exit mlx5_ib_cleanup(void)
{
+ mlx5_data_direct_driver_unregister();
auxiliary_driver_unregister(&mlx5r_driver);
auxiliary_driver_unregister(&mlx5r_mp_driver);
mlx5r_rep_cleanup();
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index d5eb1b726675..23fd72f7f63d 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -63,17 +63,6 @@ __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
return GENMASK(largest_pg_shift, pgsz_shift);
}
-/*
- * For mkc users, instead of a page_offset the command has a start_iova which
- * specifies both the page_offset and the on-the-wire IOVA
- */
-#define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova) \
- ib_umem_find_best_pgsz(umem, \
- __mlx5_log_page_size_to_bitmap( \
- __mlx5_bit_sz(typ, log_pgsz_fld), \
- pgsz_shift), \
- iova)
-
static __always_inline unsigned long
__mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
unsigned int offset_shift)
@@ -640,6 +629,8 @@ enum mlx5_mkey_type {
MLX5_MKEY_MR = 1,
MLX5_MKEY_MW,
MLX5_MKEY_INDIRECT_DEVX,
+ MLX5_MKEY_NULL,
+ MLX5_MKEY_IMPLICIT_CHILD,
};
struct mlx5r_cache_rb_key {
@@ -682,6 +673,8 @@ struct mlx5_ib_mr {
struct mlx5_ib_mkey mmkey;
struct ib_umem *umem;
+ /* The mr is data direct related */
+ u8 data_direct :1;
union {
/* Used only by kernel MRs (umem == NULL) */
@@ -719,6 +712,11 @@ struct mlx5_ib_mr {
} odp_destroy;
struct ib_odp_counters odp_stats;
bool is_odp_implicit;
+ /* The affilated data direct crossed mr */
+ struct mlx5_ib_mr *dd_crossed_mr;
+ struct list_head dd_node;
+ u8 revoked :1;
+ struct mlx5_ib_mkey null_mmkey;
};
};
};
@@ -796,6 +794,7 @@ struct mlx5_cache_ent {
u8 is_tmp:1;
u8 disabled:1;
u8 fill_to_high_water:1;
+ u8 tmp_cleanup_scheduled:1;
/*
* - limit is the low water mark for stored mkeys, 2* limit is the
@@ -827,7 +826,6 @@ struct mlx5_mkey_cache {
struct mutex rb_lock;
struct dentry *fs_root;
unsigned long last_add;
- struct delayed_work remove_ent_dwork;
};
struct mlx5_ib_port_resources {
@@ -835,6 +833,11 @@ struct mlx5_ib_port_resources {
struct work_struct pkey_change_work;
};
+struct mlx5_data_direct_resources {
+ u32 pdn;
+ u32 mkey;
+};
+
struct mlx5_ib_resources {
struct ib_cq *c0;
struct mutex cq_lock;
@@ -885,8 +888,6 @@ struct mlx5_roce {
/* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
* netdev pointer
*/
- rwlock_t netdev_lock;
- struct net_device *netdev;
struct notifier_block nb;
struct netdev_net_notifier nn;
struct notifier_block mdev_nb;
@@ -1131,7 +1132,11 @@ struct mlx5_macsec {
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
+ struct mlx5_data_direct_dev *data_direct_dev;
+ /* protect accessing data_direct_dev */
+ struct mutex data_direct_lock;
struct notifier_block mdev_events;
+ struct notifier_block lag_events;
int num_ports;
/* serialize update of capability mask
*/
@@ -1161,6 +1166,7 @@ struct mlx5_ib_dev {
/* protect resources needed as part of reset flow */
spinlock_t reset_flow_resource_lock;
struct list_head qp_list;
+ struct list_head data_direct_mr_list;
/* Array with num_ports elements */
struct mlx5_ib_port *port;
struct mlx5_sq_bfreg bfreg;
@@ -1185,6 +1191,7 @@ struct mlx5_ib_dev {
u16 pkey_table_len;
u8 lag_ports;
struct mlx5_special_mkeys mkeys;
+ struct mlx5_data_direct_resources ddr;
#ifdef CONFIG_MLX5_MACSEC
struct mlx5_macsec macsec;
@@ -1345,7 +1352,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
- struct ib_udata *udata);
+ struct uverbs_attr_bundle *attrs);
int mlx5_ib_advise_mr(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 flags,
@@ -1356,7 +1363,6 @@ int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int mlx5_ib_dealloc_mw(struct ib_mw *mw);
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
int access_flags);
-void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
@@ -1425,6 +1431,10 @@ int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
+void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev,
+ struct mlx5_data_direct_dev *dev);
+void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev);
+void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
@@ -1633,8 +1643,6 @@ static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
}
-int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
-
static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
{
/*
@@ -1707,4 +1715,20 @@ static inline u32 smi_to_native_portnum(struct mlx5_ib_dev *dev, u32 port)
return (port - 1) / dev->num_ports + 1;
}
+/*
+ * For mkc users, instead of a page_offset the command has a start_iova which
+ * specifies both the page_offset and the on-the-wire IOVA
+ */
+static __always_inline unsigned long
+mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ u64 iova)
+{
+ int page_size_bits =
+ MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5) ? 6 : 5;
+ unsigned long bitmap =
+ __mlx5_log_page_size_to_bitmap(page_size_bits, 0);
+
+ return ib_umem_find_best_pgsz(umem, bitmap, iova);
+}
+
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 98bd8eaa393e..45d9dc9c6c8f 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -43,18 +43,22 @@
#include "dm.h"
#include "mlx5_ib.h"
#include "umr.h"
+#include "data_direct.h"
enum {
MAX_PENDING_REG_MR = 8,
};
+#define MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS 4
#define MLX5_UMR_ALIGN 2048
static void
create_mkey_callback(int status, struct mlx5_async_work *context);
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u64 iova, int access_flags,
- unsigned int page_size, bool populate);
+ unsigned int page_size, bool populate,
+ int access_mode);
+static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr);
static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
struct ib_pd *pd)
@@ -211,9 +215,9 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
spin_lock_irqsave(&ent->mkeys_queue.lock, flags);
push_mkey_locked(ent, mkey_out->mkey);
+ ent->pending--;
/* If we are doing fill_to_high_water then keep going. */
queue_adjust_cache_locked(ent);
- ent->pending--;
spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags);
kfree(mkey_out);
}
@@ -527,6 +531,21 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
}
}
+static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent)
+{
+ u32 mkey;
+
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ while (ent->mkeys_queue.ci) {
+ mkey = pop_mkey_locked(ent);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ mlx5_core_destroy_mkey(dev->mdev, mkey);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ }
+ ent->tmp_cleanup_scheduled = false;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+}
+
static void __cache_work_func(struct mlx5_cache_ent *ent)
{
struct mlx5_ib_dev *dev = ent->dev;
@@ -598,7 +617,11 @@ static void delayed_cache_work_func(struct work_struct *work)
struct mlx5_cache_ent *ent;
ent = container_of(work, struct mlx5_cache_ent, dwork.work);
- __cache_work_func(ent);
+ /* temp entries are never filled, only cleaned */
+ if (ent->is_tmp)
+ clean_keys(ent->dev, ent);
+ else
+ __cache_work_func(ent);
}
static int cache_ent_key_cmp(struct mlx5r_cache_rb_key key1,
@@ -659,6 +682,7 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
{
struct rb_node *node = dev->cache.rb_root.rb_node;
struct mlx5_cache_ent *cur, *smallest = NULL;
+ u64 ndescs_limit;
int cmp;
/*
@@ -677,10 +701,18 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
return cur;
}
+ /*
+ * Limit the usage of mkeys larger than twice the required size while
+ * also allowing the usage of smallest cache entry for small MRs.
+ */
+ ndescs_limit = max_t(u64, rb_key.ndescs * 2,
+ MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS);
+
return (smallest &&
smallest->rb_key.access_mode == rb_key.access_mode &&
smallest->rb_key.access_flags == rb_key.access_flags &&
- smallest->rb_key.ats == rb_key.ats) ?
+ smallest->rb_key.ats == rb_key.ats &&
+ smallest->rb_key.ndescs <= ndescs_limit) ?
smallest :
NULL;
}
@@ -765,21 +797,6 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
return _mlx5_mr_cache_alloc(dev, ent, access_flags);
}
-static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent)
-{
- u32 mkey;
-
- cancel_delayed_work(&ent->dwork);
- spin_lock_irq(&ent->mkeys_queue.lock);
- while (ent->mkeys_queue.ci) {
- mkey = pop_mkey_locked(ent);
- spin_unlock_irq(&ent->mkeys_queue.lock);
- mlx5_core_destroy_mkey(dev->mdev, mkey);
- spin_lock_irq(&ent->mkeys_queue.lock);
- }
- spin_unlock_irq(&ent->mkeys_queue.lock);
-}
-
static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
{
if (!mlx5_debugfs_root || dev->is_rep)
@@ -892,10 +909,6 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
ent->limit = 0;
mlx5_mkey_cache_debugfs_add_ent(dev, ent);
- } else {
- mod_delayed_work(ent->dev->cache.wq,
- &ent->dev->cache.remove_ent_dwork,
- msecs_to_jiffies(30 * 1000));
}
return ent;
@@ -906,35 +919,6 @@ mkeys_err:
return ERR_PTR(ret);
}
-static void remove_ent_work_func(struct work_struct *work)
-{
- struct mlx5_mkey_cache *cache;
- struct mlx5_cache_ent *ent;
- struct rb_node *cur;
-
- cache = container_of(work, struct mlx5_mkey_cache,
- remove_ent_dwork.work);
- mutex_lock(&cache->rb_lock);
- cur = rb_last(&cache->rb_root);
- while (cur) {
- ent = rb_entry(cur, struct mlx5_cache_ent, node);
- cur = rb_prev(cur);
- mutex_unlock(&cache->rb_lock);
-
- spin_lock_irq(&ent->mkeys_queue.lock);
- if (!ent->is_tmp) {
- spin_unlock_irq(&ent->mkeys_queue.lock);
- mutex_lock(&cache->rb_lock);
- continue;
- }
- spin_unlock_irq(&ent->mkeys_queue.lock);
-
- clean_keys(ent->dev, ent);
- mutex_lock(&cache->rb_lock);
- }
- mutex_unlock(&cache->rb_lock);
-}
-
int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
{
struct mlx5_mkey_cache *cache = &dev->cache;
@@ -950,7 +934,6 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
mutex_init(&dev->slow_path_mutex);
mutex_init(&dev->cache.rb_lock);
dev->cache.rb_root = RB_ROOT;
- INIT_DELAYED_WORK(&dev->cache.remove_ent_dwork, remove_ent_work_func);
cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
if (!cache->wq) {
mlx5_ib_warn(dev, "failed to create work queue\n");
@@ -962,7 +945,7 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
mlx5_mkey_cache_debugfs_init(dev);
mutex_lock(&cache->rb_lock);
for (i = 0; i <= mkey_cache_max_order(dev); i++) {
- rb_key.ndescs = 1 << (i + 2);
+ rb_key.ndescs = MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS << i;
ent = mlx5r_cache_create_ent_locked(dev, rb_key, true);
if (IS_ERR(ent)) {
ret = PTR_ERR(ent);
@@ -1001,7 +984,6 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
return;
mutex_lock(&dev->cache.rb_lock);
- cancel_delayed_work(&dev->cache.remove_ent_dwork);
for (node = rb_first(root); node; node = rb_next(node)) {
ent = rb_entry(node, struct mlx5_cache_ent, node);
spin_lock_irq(&ent->mkeys_queue.lock);
@@ -1062,6 +1044,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
MLX5_SET(mkc, mkc, length64, 1);
set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0,
pd);
+ MLX5_SET(mkc, mkc, ma_translation_mode, MLX5_CAP_GEN(dev->mdev, ats));
err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err)
@@ -1126,12 +1109,10 @@ static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
struct ib_umem *umem, u64 iova,
- int access_flags)
+ int access_flags, int access_mode)
{
- struct mlx5r_cache_rb_key rb_key = {
- .access_mode = MLX5_MKC_ACCESS_MODE_MTT,
- };
struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5r_cache_rb_key rb_key = {};
struct mlx5_cache_ent *ent;
struct mlx5_ib_mr *mr;
unsigned int page_size;
@@ -1139,11 +1120,11 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
if (umem->is_dmabuf)
page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
else
- page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size,
- 0, iova);
+ page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
if (WARN_ON(!page_size))
return ERR_PTR(-EINVAL);
+ rb_key.access_mode = access_mode;
rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size);
rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags);
rb_key.access_flags = get_unchangeable_access_flags(dev, access_flags);
@@ -1154,7 +1135,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
*/
if (!ent) {
mutex_lock(&dev->slow_path_mutex);
- mr = reg_create(pd, umem, iova, access_flags, page_size, false);
+ mr = reg_create(pd, umem, iova, access_flags, page_size, false, access_mode);
mutex_unlock(&dev->slow_path_mutex);
if (IS_ERR(mr))
return mr;
@@ -1175,13 +1156,71 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
return mr;
}
+static struct ib_mr *
+reg_create_crossing_vhca_mr(struct ib_pd *pd, u64 iova, u64 length, int access_flags,
+ u32 crossed_lkey)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int access_mode = MLX5_MKC_ACCESS_MODE_CROSSING;
+ struct mlx5_ib_mr *mr;
+ void *mkc;
+ int inlen;
+ u32 *in;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev->mdev, crossing_vhca_mkey))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_1;
+ }
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, crossing_target_vhca_id,
+ MLX5_CAP_GEN(dev->mdev, vhca_id));
+ MLX5_SET(mkc, mkc, translations_octword_size, crossed_lkey);
+ MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
+
+ /* for this crossing mkey IOVA should be 0 and len should be IOVA + len */
+ set_mkc_access_pd_addr_fields(mkc, access_flags, 0, pd);
+ MLX5_SET64(mkc, mkc, len, iova + length);
+
+ MLX5_SET(mkc, mkc, free, 0);
+ MLX5_SET(mkc, mkc, umr_en, 0);
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
+ if (err)
+ goto err_2;
+
+ mr->mmkey.type = MLX5_MKEY_MR;
+ set_mr_fields(dev, mr, length, access_flags, iova);
+ mr->ibmr.pd = pd;
+ kvfree(in);
+ mlx5_ib_dbg(dev, "crossing mkey = 0x%x\n", mr->mmkey.key);
+
+ return &mr->ibmr;
+err_2:
+ kvfree(in);
+err_1:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
/*
* If ibmr is NULL it will be allocated by reg_create.
* Else, the given ibmr will be used.
*/
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u64 iova, int access_flags,
- unsigned int page_size, bool populate)
+ unsigned int page_size, bool populate,
+ int access_mode)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr;
@@ -1190,7 +1229,9 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
int inlen;
u32 *in;
int err;
- bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
+ bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)) &&
+ (access_mode == MLX5_MKC_ACCESS_MODE_MTT);
+ bool ksm_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM);
if (!page_size)
return ERR_PTR(-EINVAL);
@@ -1213,7 +1254,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
}
pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
if (populate) {
- if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) {
+ if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND || ksm_mode)) {
err = -EINVAL;
goto err_2;
}
@@ -1229,14 +1270,22 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
set_mkc_access_pd_addr_fields(mkc, access_flags, iova,
populate ? pd : dev->umrc.pd);
+ /* In case a data direct flow, overwrite the pdn field by its internal kernel PD */
+ if (umem->is_dmabuf && ksm_mode)
+ MLX5_SET(mkc, mkc, pd, dev->ddr.pdn);
+
MLX5_SET(mkc, mkc, free, !populate);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, access_mode_1_0, access_mode);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET64(mkc, mkc, len, umem->length);
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
- MLX5_SET(mkc, mkc, translations_octword_size,
- get_octo_len(iova, umem->length, mr->page_shift));
+ if (ksm_mode)
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_octo_len(iova, umem->length, mr->page_shift) * 2);
+ else
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_octo_len(iova, umem->length, mr->page_shift));
MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
if (mlx5_umem_needs_ats(dev, umem, access_flags))
MLX5_SET(mkc, mkc, ma_translation_mode, 1);
@@ -1373,13 +1422,15 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length);
if (xlt_with_umr) {
- mr = alloc_cacheable_mr(pd, umem, iova, access_flags);
+ mr = alloc_cacheable_mr(pd, umem, iova, access_flags,
+ MLX5_MKC_ACCESS_MODE_MTT);
} else {
- unsigned int page_size = mlx5_umem_find_best_pgsz(
- umem, mkc, log_page_size, 0, iova);
+ unsigned int page_size =
+ mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
mutex_lock(&dev->slow_path_mutex);
- mr = reg_create(pd, umem, iova, access_flags, page_size, true);
+ mr = reg_create(pd, umem, iova, access_flags, page_size,
+ true, MLX5_MKC_ACCESS_MODE_MTT);
mutex_unlock(&dev->slow_path_mutex);
}
if (IS_ERR(mr)) {
@@ -1442,7 +1493,8 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ERR(odp))
return ERR_CAST(odp);
- mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags);
+ mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags,
+ MLX5_MKC_ACCESS_MODE_MTT);
if (IS_ERR(mr)) {
ib_umem_release(&odp->umem);
return ERR_CAST(mr);
@@ -1510,35 +1562,31 @@ static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
.move_notify = mlx5_ib_dmabuf_invalidate_cb,
};
-struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
- u64 length, u64 virt_addr,
- int fd, int access_flags,
- struct ib_udata *udata)
+static struct ib_mr *
+reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
+ u64 offset, u64 length, u64 virt_addr,
+ int fd, int access_flags, int access_mode)
{
+ bool pinned_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM);
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL;
struct ib_umem_dmabuf *umem_dmabuf;
int err;
- if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
- !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- return ERR_PTR(-EOPNOTSUPP);
-
- mlx5_ib_dbg(dev,
- "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n",
- offset, virt_addr, length, fd, access_flags);
-
err = mlx5r_umr_resource_init(dev);
if (err)
return ERR_PTR(err);
- /* dmabuf requires xlt update via umr to work. */
- if (!mlx5r_umr_can_load_pas(dev, length))
- return ERR_PTR(-EINVAL);
+ if (!pinned_mode)
+ umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev,
+ offset, length, fd,
+ access_flags,
+ &mlx5_ib_dmabuf_attach_ops);
+ else
+ umem_dmabuf = ib_umem_dmabuf_get_pinned_with_dma_device(&dev->ib_dev,
+ dma_device, offset, length,
+ fd, access_flags);
- umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd,
- access_flags,
- &mlx5_ib_dmabuf_attach_ops);
if (IS_ERR(umem_dmabuf)) {
mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
PTR_ERR(umem_dmabuf));
@@ -1546,7 +1594,7 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
}
mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
- access_flags);
+ access_flags, access_mode);
if (IS_ERR(mr)) {
ib_umem_release(&umem_dmabuf->umem);
return ERR_CAST(mr);
@@ -1556,9 +1604,13 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
umem_dmabuf->private = mr;
- err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
- if (err)
- goto err_dereg_mr;
+ if (!pinned_mode) {
+ err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
+ if (err)
+ goto err_dereg_mr;
+ } else {
+ mr->data_direct = true;
+ }
err = mlx5_ib_init_dmabuf_mr(mr);
if (err)
@@ -1566,10 +1618,101 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
return &mr->ibmr;
err_dereg_mr:
- mlx5_ib_dereg_mr(&mr->ibmr, NULL);
+ __mlx5_ib_dereg_mr(&mr->ibmr);
return ERR_PTR(err);
}
+static struct ib_mr *
+reg_user_mr_dmabuf_by_data_direct(struct ib_pd *pd, u64 offset,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_data_direct_dev *data_direct_dev;
+ struct ib_mr *crossing_mr;
+ struct ib_mr *crossed_mr;
+ int ret = 0;
+
+ /* As of HW behaviour the IOVA must be page aligned in KSM mode */
+ if (!PAGE_ALIGNED(virt_addr) || (access_flags & IB_ACCESS_ON_DEMAND))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&dev->data_direct_lock);
+ data_direct_dev = dev->data_direct_dev;
+ if (!data_direct_dev) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* The device's 'data direct mkey' was created without RO flags to
+ * simplify things and allow for a single mkey per device.
+ * Since RO is not a must, mask it out accordingly.
+ */
+ access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
+ crossed_mr = reg_user_mr_dmabuf(pd, &data_direct_dev->pdev->dev,
+ offset, length, virt_addr, fd,
+ access_flags, MLX5_MKC_ACCESS_MODE_KSM);
+ if (IS_ERR(crossed_mr)) {
+ ret = PTR_ERR(crossed_mr);
+ goto end;
+ }
+
+ mutex_lock(&dev->slow_path_mutex);
+ crossing_mr = reg_create_crossing_vhca_mr(pd, virt_addr, length, access_flags,
+ crossed_mr->lkey);
+ mutex_unlock(&dev->slow_path_mutex);
+ if (IS_ERR(crossing_mr)) {
+ __mlx5_ib_dereg_mr(crossed_mr);
+ ret = PTR_ERR(crossing_mr);
+ goto end;
+ }
+
+ list_add_tail(&to_mmr(crossed_mr)->dd_node, &dev->data_direct_mr_list);
+ to_mmr(crossing_mr)->dd_crossed_mr = to_mmr(crossed_mr);
+ to_mmr(crossing_mr)->data_direct = true;
+end:
+ mutex_unlock(&dev->data_direct_lock);
+ return ret ? ERR_PTR(ret) : crossing_mr;
+}
+
+struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int mlx5_access_flags = 0;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
+ !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS)) {
+ err = uverbs_get_flags32(&mlx5_access_flags, attrs,
+ MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ MLX5_IB_UAPI_REG_DMABUF_ACCESS_DATA_DIRECT);
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ mlx5_ib_dbg(dev,
+ "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x, mlx5_access_flags 0x%x\n",
+ offset, virt_addr, length, fd, access_flags, mlx5_access_flags);
+
+ /* dmabuf requires xlt update via umr to work. */
+ if (!mlx5r_umr_can_load_pas(dev, length))
+ return ERR_PTR(-EINVAL);
+
+ if (mlx5_access_flags & MLX5_IB_UAPI_REG_DMABUF_ACCESS_DATA_DIRECT)
+ return reg_user_mr_dmabuf_by_data_direct(pd, offset, length, virt_addr,
+ fd, access_flags);
+
+ return reg_user_mr_dmabuf(pd, pd->device->dma_device,
+ offset, length, virt_addr,
+ fd, access_flags, MLX5_MKC_ACCESS_MODE_MTT);
+}
+
/*
* True if the change in access flags can be done via UMR, only some access
* flags can be updated.
@@ -1601,8 +1744,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
return false;
- *page_size =
- mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
+ *page_size = mlx5_umem_mkc_find_best_pgsz(dev, new_umem, iova);
if (WARN_ON(!*page_size))
return false;
return (mr->mmkey.cache_ent->rb_key.ndescs) >=
@@ -1665,7 +1807,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
struct mlx5_ib_mr *mr = to_mmr(ib_mr);
int err;
- if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || mr->data_direct)
return ERR_PTR(-EOPNOTSUPP);
mlx5_ib_dbg(
@@ -1793,7 +1935,7 @@ err:
static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
- if (!mr->umem && mr->descs) {
+ if (!mr->umem && !mr->data_direct && mr->descs) {
struct ib_device *device = mr->ibmr.device;
int size = mr->max_descs * mr->desc_size;
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -1847,13 +1989,51 @@ end:
return ret;
}
+static int mlx5_ib_revoke_data_direct_mr(struct mlx5_ib_mr *mr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
+ int err;
+
+ lockdep_assert_held(&dev->data_direct_lock);
+ mr->revoked = true;
+ err = mlx5r_umr_revoke_mr(mr);
+ if (WARN_ON(err))
+ return err;
+
+ ib_umem_dmabuf_revoke(umem_dmabuf);
+ return 0;
+}
+
+void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_ib_mr *mr, *next;
+
+ lockdep_assert_held(&dev->data_direct_lock);
+
+ list_for_each_entry_safe(mr, next, &dev->data_direct_mr_list, dd_node) {
+ list_del(&mr->dd_node);
+ mlx5_ib_revoke_data_direct_mr(mr);
+ }
+}
+
static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
{
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
- if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr))
+ if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
+ ent = mr->mmkey.cache_ent;
+ /* upon storing to a clean temp entry - schedule its cleanup */
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ if (ent->is_tmp && !ent->tmp_cleanup_scheduled) {
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork,
+ msecs_to_jiffies(30 * 1000));
+ ent->tmp_cleanup_scheduled = true;
+ }
+ spin_unlock_irq(&ent->mkeys_queue.lock);
return 0;
+ }
if (ent) {
spin_lock_irq(&ent->mkeys_queue.lock);
@@ -1864,7 +2044,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
return destroy_mkey(dev, mr);
}
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr)
{
struct mlx5_ib_mr *mr = to_mmr(ibmr);
struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
@@ -1931,9 +2111,40 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return 0;
}
+static int dereg_crossing_data_direct_mr(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mr *mr)
+{
+ struct mlx5_ib_mr *dd_crossed_mr = mr->dd_crossed_mr;
+ int ret;
+
+ ret = __mlx5_ib_dereg_mr(&mr->ibmr);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->data_direct_lock);
+ if (!dd_crossed_mr->revoked)
+ list_del(&dd_crossed_mr->dd_node);
+
+ ret = __mlx5_ib_dereg_mr(&dd_crossed_mr->ibmr);
+ mutex_unlock(&dev->data_direct_lock);
+ return ret;
+}
+
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+
+ if (mr->data_direct)
+ return dereg_crossing_data_direct_mr(dev, mr);
+
+ return __mlx5_ib_dereg_mr(ibmr);
+}
+
static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
int access_mode, int page_shift)
{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
void *mkc;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
@@ -1946,6 +2157,9 @@ static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, log_page_size, page_shift);
+ if (access_mode == MLX5_MKC_ACCESS_MODE_PA ||
+ access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ MLX5_SET(mkc, mkc, ma_translation_mode, MLX5_CAP_GEN(dev->mdev, ats));
}
static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index a524181f34df..4b37446758fd 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -45,7 +45,7 @@
/* Contains the details of a pagefault. */
struct mlx5_pagefault {
u32 bytes_committed;
- u32 token;
+ u64 token;
u8 event_subtype;
u8 type;
union {
@@ -74,6 +74,14 @@ struct mlx5_pagefault {
u32 rdma_op_len;
u64 rdma_va;
} rdma;
+ struct {
+ u64 va;
+ u32 mkey;
+ u32 fault_byte_count;
+ u32 prefetch_before_byte_count;
+ u32 prefetch_after_byte_count;
+ u8 flags;
+ } memory;
};
struct mlx5_ib_pf_eq *eq;
@@ -99,13 +107,20 @@ static u64 mlx5_imr_ksm_entries;
static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
struct mlx5_ib_mr *imr, int flags)
{
+ struct mlx5_core_dev *dev = mr_to_mdev(imr)->mdev;
struct mlx5_klm *end = pklm + nentries;
+ int step = MLX5_CAP_ODP(dev, mem_page_fault) ? MLX5_IMR_MTT_SIZE : 0;
+ __be32 key = MLX5_CAP_ODP(dev, mem_page_fault) ?
+ cpu_to_be32(imr->null_mmkey.key) :
+ mr_to_mdev(imr)->mkeys.null_mkey;
+ u64 va =
+ MLX5_CAP_ODP(dev, mem_page_fault) ? idx * MLX5_IMR_MTT_SIZE : 0;
if (flags & MLX5_IB_UPD_XLT_ZAP) {
- for (; pklm != end; pklm++, idx++) {
+ for (; pklm != end; pklm++, idx++, va += step) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
- pklm->key = mr_to_mdev(imr)->mkeys.null_mkey;
- pklm->va = 0;
+ pklm->key = key;
+ pklm->va = cpu_to_be64(va);
}
return;
}
@@ -129,7 +144,7 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
*/
lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
- for (; pklm != end; pklm++, idx++) {
+ for (; pklm != end; pklm++, idx++, va += step) {
struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
@@ -137,8 +152,8 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
pklm->key = cpu_to_be32(mtt->ibmr.lkey);
pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
} else {
- pklm->key = mr_to_mdev(imr)->mkeys.null_mkey;
- pklm->va = 0;
+ pklm->key = key;
+ pklm->va = cpu_to_be64(va);
}
}
}
@@ -217,6 +232,9 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
return;
xa_erase(&imr->implicit_children, idx);
+ if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault))
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+ mlx5_base_mkey(mr->mmkey.key));
/* Freeing a MR is a sleeping operation, so bounce to a work queue */
INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
@@ -332,46 +350,46 @@ static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
else
dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
- if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.send))
caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
- if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.srq_receive))
caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.send))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.receive))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.write))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.read))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.atomic))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.srq_receive))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
- if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.send))
caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
- if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.receive))
caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
- if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.write))
caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
- if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.read))
caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
- if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.atomic))
caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
- if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.srq_receive))
caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
@@ -388,13 +406,29 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
pfault->wqe.wq_num : pfault->token;
u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
+ void *info;
int err;
MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
- MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
- MLX5_SET(page_fault_resume_in, in, token, pfault->token);
- MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
- MLX5_SET(page_fault_resume_in, in, error, !!error);
+
+ if (pfault->event_subtype == MLX5_PFAULT_SUBTYPE_MEMORY) {
+ info = MLX5_ADDR_OF(page_fault_resume_in, in,
+ page_fault_info.mem_page_fault_info);
+ MLX5_SET(mem_page_fault_info, info, fault_token_31_0,
+ pfault->token & 0xffffffff);
+ MLX5_SET(mem_page_fault_info, info, fault_token_47_32,
+ (pfault->token >> 32) & 0xffff);
+ MLX5_SET(mem_page_fault_info, info, error, !!error);
+ } else {
+ info = MLX5_ADDR_OF(page_fault_resume_in, in,
+ page_fault_info.trans_page_fault_info);
+ MLX5_SET(trans_page_fault_info, info, page_fault_type,
+ pfault->type);
+ MLX5_SET(trans_page_fault_info, info, fault_token,
+ pfault->token);
+ MLX5_SET(trans_page_fault_info, info, wq_number, wq_num);
+ MLX5_SET(trans_page_fault_info, info, error, !!error);
+ }
err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
if (err)
@@ -468,6 +502,16 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
}
xa_unlock(&imr->implicit_children);
+ if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
+ ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
+ &mr->mmkey, GFP_KERNEL);
+ if (xa_is_err(ret)) {
+ ret = ERR_PTR(xa_err(ret));
+ xa_erase(&imr->implicit_children, idx);
+ goto out_mr;
+ }
+ mr->mmkey.type = MLX5_MKEY_IMPLICIT_CHILD;
+ }
mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
return mr;
@@ -478,6 +522,57 @@ out_mr:
return ret;
}
+/*
+ * When using memory scheme ODP, implicit MRs can't use the reserved null mkey
+ * and each implicit MR needs to assign a private null mkey to get the page
+ * faults on.
+ * The null mkey is created with the properties to enable getting the page
+ * fault for every time it is accessed and having all relevant access flags.
+ */
+static int alloc_implicit_mr_null_mkey(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mr *imr,
+ struct mlx5_ib_pd *pd)
+{
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + 64;
+ void *mkc;
+ u32 *in;
+ int err;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 4);
+ MLX5_SET(create_mkey_in, in, pg_access, 1);
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, a, 1);
+ MLX5_SET(mkc, mkc, rw, 1);
+ MLX5_SET(mkc, mkc, rr, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, free, 0);
+ MLX5_SET(mkc, mkc, umr_en, 0);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+
+ MLX5_SET(mkc, mkc, translations_octword_size, 4);
+ MLX5_SET(mkc, mkc, log_page_size, 61);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, pd, pd->pdn);
+ MLX5_SET64(mkc, mkc, start_addr, 0);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_core_create_mkey(dev->mdev, &imr->null_mmkey.key, in, inlen);
+ if (err)
+ goto free_in;
+
+ imr->null_mmkey.type = MLX5_MKEY_NULL;
+
+free_in:
+ kfree(in);
+ return err;
+}
+
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
int access_flags)
{
@@ -510,6 +605,16 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
imr->is_odp_implicit = true;
xa_init(&imr->implicit_children);
+ if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
+ err = alloc_implicit_mr_null_mkey(dev, imr, pd);
+ if (err)
+ goto out_mr;
+
+ err = mlx5r_store_odp_mkey(dev, &imr->null_mmkey);
+ if (err)
+ goto out_mr;
+ }
+
err = mlx5r_umr_update_xlt(imr, 0,
mlx5_imr_ksm_entries,
MLX5_KSM_PAGE_SHIFT,
@@ -544,6 +649,14 @@ void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr)
xa_erase(&mr->implicit_children, idx);
mlx5_ib_dereg_mr(&mtt->ibmr, NULL);
}
+
+ if (mr->null_mmkey.key) {
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+ mlx5_base_mkey(mr->null_mmkey.key));
+
+ mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev,
+ mr->null_mmkey.key);
+ }
}
#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
@@ -693,7 +806,7 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
u32 xlt_flags = 0;
int err;
- unsigned int page_size;
+ unsigned long page_size;
if (flags & MLX5_PF_FLAGS_ENABLE)
xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
@@ -710,7 +823,10 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
ib_umem_dmabuf_unmap_pages(umem_dmabuf);
err = -EINVAL;
} else {
- err = mlx5r_umr_update_mr_pas(mr, xlt_flags);
+ if (mr->data_direct)
+ err = mlx5r_umr_update_data_direct_ksm_pas(mr, xlt_flags);
+ else
+ err = mlx5r_umr_update_mr_pas(mr, xlt_flags);
}
dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
@@ -733,24 +849,31 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
* >0: Number of pages mapped
*/
static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
- u32 *bytes_mapped, u32 flags)
+ u32 *bytes_mapped, u32 flags, bool permissive_fault)
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- if (unlikely(io_virt < mr->ibmr.iova))
+ if (unlikely(io_virt < mr->ibmr.iova) && !permissive_fault)
return -EFAULT;
if (mr->umem->is_dmabuf)
return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
if (!odp->is_implicit_odp) {
+ u64 offset = io_virt < mr->ibmr.iova ? 0 : io_virt - mr->ibmr.iova;
u64 user_va;
- if (check_add_overflow(io_virt - mr->ibmr.iova,
- (u64)odp->umem.address, &user_va))
+ if (check_add_overflow(offset, (u64)odp->umem.address,
+ &user_va))
return -EFAULT;
- if (unlikely(user_va >= ib_umem_end(odp) ||
- ib_umem_end(odp) - user_va < bcnt))
+
+ if (permissive_fault) {
+ if (user_va < ib_umem_start(odp))
+ user_va = ib_umem_start(odp);
+ if ((user_va + bcnt) > ib_umem_end(odp))
+ bcnt = ib_umem_end(odp) - user_va;
+ } else if (unlikely(user_va >= ib_umem_end(odp) ||
+ ib_umem_end(odp) - user_va < bcnt))
return -EFAULT;
return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
flags);
@@ -797,6 +920,27 @@ static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
return mmkey->key == key;
}
+static struct mlx5_ib_mkey *find_odp_mkey(struct mlx5_ib_dev *dev, u32 key)
+{
+ struct mlx5_ib_mkey *mmkey;
+
+ xa_lock(&dev->odp_mkeys);
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
+ if (!mmkey) {
+ mmkey = ERR_PTR(-ENOENT);
+ goto out;
+ }
+ if (!mkey_is_eq(mmkey, key)) {
+ mmkey = ERR_PTR(-EFAULT);
+ goto out;
+ }
+ refcount_inc(&mmkey->usecount);
+out:
+ xa_unlock(&dev->odp_mkeys);
+
+ return mmkey;
+}
+
/*
* Handle a single data segment in a page-fault WQE or RDMA region.
*
@@ -824,32 +968,24 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
io_virt += *bytes_committed;
bcnt -= *bytes_committed;
-
next_mr:
- xa_lock(&dev->odp_mkeys);
- mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
- if (!mmkey) {
- xa_unlock(&dev->odp_mkeys);
- mlx5_ib_dbg(
- dev,
- "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
- key);
- if (bytes_mapped)
- *bytes_mapped += bcnt;
- /*
- * The user could specify a SGL with multiple lkeys and only
- * some of them are ODP. Treat the non-ODP ones as fully
- * faulted.
- */
- ret = 0;
- goto end;
- }
- refcount_inc(&mmkey->usecount);
- xa_unlock(&dev->odp_mkeys);
-
- if (!mkey_is_eq(mmkey, key)) {
- mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
- ret = -EFAULT;
+ mmkey = find_odp_mkey(dev, key);
+ if (IS_ERR(mmkey)) {
+ ret = PTR_ERR(mmkey);
+ if (ret == -ENOENT) {
+ mlx5_ib_dbg(
+ dev,
+ "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
+ key);
+ if (bytes_mapped)
+ *bytes_mapped += bcnt;
+ /*
+ * The user could specify a SGL with multiple lkeys and
+ * only some of them are ODP. Treat the non-ODP ones as
+ * fully faulted.
+ */
+ ret = 0;
+ }
goto end;
}
@@ -857,7 +993,7 @@ next_mr:
case MLX5_MKEY_MR:
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
+ ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false);
if (ret < 0)
goto end;
@@ -944,7 +1080,7 @@ next_mr:
}
end:
- if (mmkey)
+ if (!IS_ERR(mmkey))
mlx5r_deref_odp_mkey(mmkey);
while (head) {
frame = head;
@@ -1266,7 +1402,7 @@ read_user:
if (ret)
mlx5_ib_err(
dev,
- "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
+ "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %llx\n",
ret, wqe_index, pfault->token);
resolve_page_fault:
@@ -1325,13 +1461,13 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
} else if (ret < 0 || pages_in_range(address, length) > ret) {
mlx5_ib_page_fault_resume(dev, pfault, 1);
if (ret != -ENOENT)
- mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
+ mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%llx, type: 0x%x\n",
ret, pfault->token, pfault->type);
return;
}
mlx5_ib_page_fault_resume(dev, pfault, 0);
- mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
+ mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%llx, type: 0x%x, prefetch_activated: %d\n",
pfault->token, pfault->type,
prefetch_activated);
@@ -1347,12 +1483,80 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
prefetch_len,
&bytes_committed, NULL);
if (ret < 0 && ret != -EAGAIN) {
- mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
+ mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%llx, address: 0x%.16llx, length = 0x%.16x\n",
ret, pfault->token, address, prefetch_len);
}
}
}
+#define MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST BIT(7)
+static void mlx5_ib_mr_memory_pfault_handler(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault)
+{
+ u64 prefetch_va =
+ pfault->memory.va - pfault->memory.prefetch_before_byte_count;
+ size_t prefetch_size = pfault->memory.prefetch_before_byte_count +
+ pfault->memory.fault_byte_count +
+ pfault->memory.prefetch_after_byte_count;
+ struct mlx5_ib_mkey *mmkey;
+ struct mlx5_ib_mr *mr, *child_mr;
+ int ret = 0;
+
+ mmkey = find_odp_mkey(dev, pfault->memory.mkey);
+ if (IS_ERR(mmkey))
+ goto err;
+
+ switch (mmkey->type) {
+ case MLX5_MKEY_IMPLICIT_CHILD:
+ child_mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ mr = child_mr->parent;
+ break;
+ case MLX5_MKEY_NULL:
+ mr = container_of(mmkey, struct mlx5_ib_mr, null_mmkey);
+ break;
+ default:
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ break;
+ }
+
+ /* If prefetch fails, handle only demanded page fault */
+ ret = pagefault_mr(mr, prefetch_va, prefetch_size, NULL, 0, true);
+ if (ret < 0) {
+ ret = pagefault_mr(mr, pfault->memory.va,
+ pfault->memory.fault_byte_count, NULL, 0,
+ true);
+ if (ret < 0)
+ goto err;
+ }
+
+ mlx5_update_odp_stats(mr, faults, ret);
+ mlx5r_deref_odp_mkey(mmkey);
+
+ if (pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST)
+ mlx5_ib_page_fault_resume(dev, pfault, 0);
+
+ mlx5_ib_dbg(
+ dev,
+ "PAGE FAULT completed %s. token 0x%llx, mkey: 0x%x, va: 0x%llx, byte_count: 0x%x\n",
+ pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST ?
+ "" :
+ "without resume cmd",
+ pfault->token, pfault->memory.mkey, pfault->memory.va,
+ pfault->memory.fault_byte_count);
+
+ return;
+
+err:
+ if (!IS_ERR(mmkey))
+ mlx5r_deref_odp_mkey(mmkey);
+ mlx5_ib_page_fault_resume(dev, pfault, 1);
+ mlx5_ib_dbg(
+ dev,
+ "PAGE FAULT error. token 0x%llx, mkey: 0x%x, va: 0x%llx, byte_count: 0x%x, err: %d\n",
+ pfault->token, pfault->memory.mkey, pfault->memory.va,
+ pfault->memory.fault_byte_count, ret);
+}
+
static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
{
u8 event_subtype = pfault->event_subtype;
@@ -1364,6 +1568,9 @@ static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfaul
case MLX5_PFAULT_SUBTYPE_RDMA:
mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
break;
+ case MLX5_PFAULT_SUBTYPE_MEMORY:
+ mlx5_ib_mr_memory_pfault_handler(dev, pfault);
+ break;
default:
mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
event_subtype);
@@ -1382,6 +1589,7 @@ static void mlx5_ib_eqe_pf_action(struct work_struct *work)
mempool_free(pfault, eq->pool);
}
+#define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY 4096
static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
{
struct mlx5_eqe_page_fault *pf_eqe;
@@ -1398,15 +1606,12 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
pf_eqe = &eqe->data.page_fault;
pfault->event_subtype = eqe->sub_type;
- pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
-
- mlx5_ib_dbg(eq->dev,
- "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
- eqe->sub_type, pfault->bytes_committed);
switch (eqe->sub_type) {
case MLX5_PFAULT_SUBTYPE_RDMA:
/* RDMA based event */
+ pfault->bytes_committed =
+ be32_to_cpu(pf_eqe->rdma.bytes_committed);
pfault->type =
be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
pfault->token =
@@ -1420,10 +1625,12 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
be32_to_cpu(pf_eqe->rdma.rdma_op_len);
pfault->rdma.rdma_va =
be64_to_cpu(pf_eqe->rdma.rdma_va);
- mlx5_ib_dbg(eq->dev,
- "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
- pfault->type, pfault->token,
- pfault->rdma.r_key);
+ mlx5_ib_dbg(
+ eq->dev,
+ "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x, type:0x%x, token: 0x%06llx, r_key: 0x%08x\n",
+ eqe->sub_type, pfault->bytes_committed,
+ pfault->type, pfault->token,
+ pfault->rdma.r_key);
mlx5_ib_dbg(eq->dev,
"PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
pfault->rdma.rdma_op_len,
@@ -1432,6 +1639,8 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
case MLX5_PFAULT_SUBTYPE_WQE:
/* WQE based event */
+ pfault->bytes_committed =
+ be32_to_cpu(pf_eqe->wqe.bytes_committed);
pfault->type =
(be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
pfault->token =
@@ -1443,11 +1652,47 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
be16_to_cpu(pf_eqe->wqe.wqe_index);
pfault->wqe.packet_size =
be16_to_cpu(pf_eqe->wqe.packet_length);
- mlx5_ib_dbg(eq->dev,
- "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
- pfault->type, pfault->token,
- pfault->wqe.wq_num,
- pfault->wqe.wqe_index);
+ mlx5_ib_dbg(
+ eq->dev,
+ "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x, type:0x%x, token: 0x%06llx, wq_num: 0x%06x, wqe_index: 0x%04x\n",
+ eqe->sub_type, pfault->bytes_committed,
+ pfault->type, pfault->token, pfault->wqe.wq_num,
+ pfault->wqe.wqe_index);
+ break;
+
+ case MLX5_PFAULT_SUBTYPE_MEMORY:
+ /* Memory based event */
+ pfault->bytes_committed = 0;
+ pfault->token =
+ be32_to_cpu(pf_eqe->memory.token31_0) |
+ ((u64)be16_to_cpu(pf_eqe->memory.token47_32)
+ << 32);
+ pfault->memory.va = be64_to_cpu(pf_eqe->memory.va);
+ pfault->memory.mkey = be32_to_cpu(pf_eqe->memory.mkey);
+ pfault->memory.fault_byte_count = (be32_to_cpu(
+ pf_eqe->memory.demand_fault_pages) >> 12) *
+ MEMORY_SCHEME_PAGE_FAULT_GRANULARITY;
+ pfault->memory.prefetch_before_byte_count =
+ be16_to_cpu(
+ pf_eqe->memory.pre_demand_fault_pages) *
+ MEMORY_SCHEME_PAGE_FAULT_GRANULARITY;
+ pfault->memory.prefetch_after_byte_count =
+ be16_to_cpu(
+ pf_eqe->memory.post_demand_fault_pages) *
+ MEMORY_SCHEME_PAGE_FAULT_GRANULARITY;
+ pfault->memory.flags = pf_eqe->memory.flags;
+ mlx5_ib_dbg(
+ eq->dev,
+ "PAGE_FAULT: subtype: 0x%02x, token: 0x%06llx, mkey: 0x%06x, fault_byte_count: 0x%06x, va: 0x%016llx, flags: 0x%02x\n",
+ eqe->sub_type, pfault->token,
+ pfault->memory.mkey,
+ pfault->memory.fault_byte_count,
+ pfault->memory.va, pfault->memory.flags);
+ mlx5_ib_dbg(
+ eq->dev,
+ "PAGE_FAULT: prefetch size: before: 0x%06x, after 0x%06x\n",
+ pfault->memory.prefetch_before_byte_count,
+ pfault->memory.prefetch_after_byte_count);
break;
default:
@@ -1710,7 +1955,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
for (i = 0; i < work->num_sge; ++i) {
ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
work->frags[i].length, &bytes_mapped,
- work->pf_flags);
+ work->pf_flags, false);
if (ret <= 0)
continue;
mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
@@ -1761,7 +2006,7 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
if (IS_ERR(mr))
return PTR_ERR(mr);
ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
- &bytes_mapped, pf_flags);
+ &bytes_mapped, pf_flags, false);
if (ret < 0) {
mlx5r_deref_odp_mkey(&mr->mmkey);
return ret;
diff --git a/drivers/infiniband/hw/mlx5/std_types.c b/drivers/infiniband/hw/mlx5/std_types.c
index bbfcce3bdc84..bdb568411091 100644
--- a/drivers/infiniband/hw/mlx5/std_types.c
+++ b/drivers/infiniband/hw/mlx5/std_types.c
@@ -10,6 +10,7 @@
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
+#include "data_direct.h"
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -111,6 +112,23 @@ out:
return err;
}
+static int fill_multiport_info(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ struct mlx5_core_dev *mdev;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL);
+ if (!mdev)
+ return -EINVAL;
+
+ info->vport_vhca_id = MLX5_CAP_GEN(mdev, vhca_id);
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
+
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+
+ return 0;
+}
+
static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num,
struct mlx5_ib_uapi_query_port *info)
{
@@ -177,12 +195,60 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_QUERY_PORT)(
ret = fill_switchdev_info(dev, port_num, &info);
if (ret)
return ret;
+ } else if (mlx5_core_mp_enabled(dev->mdev)) {
+ ret = fill_multiport_info(dev, port_num, &info);
+ if (ret)
+ return ret;
}
return uverbs_copy_to_struct_or_zero(attrs, MLX5_IB_ATTR_QUERY_PORT, &info,
sizeof(info));
}
+static int UVERBS_HANDLER(MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_data_direct_dev *data_direct_dev;
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ int out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH);
+ u32 dev_path_len;
+ char *dev_path;
+ int ret;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+ mutex_lock(&dev->data_direct_lock);
+ data_direct_dev = dev->data_direct_dev;
+ if (!data_direct_dev) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ dev_path = kobject_get_path(&data_direct_dev->device->kobj, GFP_KERNEL);
+ if (!dev_path) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ dev_path_len = strlen(dev_path) + 1;
+ if (dev_path_len > out_len) {
+ ret = -ENOSPC;
+ goto end;
+ }
+
+ ret = uverbs_copy_to(attrs, MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH, dev_path,
+ dev_path_len);
+ kfree(dev_path);
+
+end:
+ mutex_unlock(&dev->data_direct_lock);
+ return ret;
+}
+
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_QUERY_PORT,
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_QUERY_PORT_PORT_NUM,
@@ -193,9 +259,17 @@ DECLARE_UVERBS_NAMED_METHOD(
reg_c0),
UA_MANDATORY));
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH,
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH,
+ UVERBS_ATTR_MIN_SIZE(0),
+ UA_MANDATORY));
+
ADD_UVERBS_METHODS(mlx5_ib_device,
UVERBS_OBJECT_DEVICE,
- &UVERBS_METHOD(MLX5_IB_METHOD_QUERY_PORT));
+ &UVERBS_METHOD(MLX5_IB_METHOD_QUERY_PORT),
+ &UVERBS_METHOD(MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH));
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_PD_QUERY,
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index ffc31b01f690..887fd6fa3ba9 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -224,6 +224,9 @@ int mlx5r_umr_init(struct mlx5_ib_dev *dev)
void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev)
{
+ if (!dev->umrc.pd)
+ return;
+
mutex_destroy(&dev->umrc.init_lock);
ib_dealloc_pd(dev->umrc.pd);
}
@@ -632,44 +635,47 @@ static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev,
wqe->data_seg.byte_count = cpu_to_be32(sg->length);
}
-/*
- * Send the DMA list to the HW for a normal MR using UMR.
- * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
- * flag may be used.
- */
-int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
+static int
+_mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd)
{
+ size_t ent_size = dd ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt);
struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct device *ddev = &dev->mdev->pdev->dev;
struct mlx5r_umr_wqe wqe = {};
struct ib_block_iter biter;
+ struct mlx5_ksm *cur_ksm;
struct mlx5_mtt *cur_mtt;
size_t orig_sg_length;
- struct mlx5_mtt *mtt;
size_t final_size;
+ void *curr_entry;
struct ib_sge sg;
+ void *entry;
u64 offset = 0;
int err = 0;
- if (WARN_ON(mr->umem->is_odp))
- return -EINVAL;
-
- mtt = mlx5r_umr_create_xlt(
- dev, &sg, ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift),
- sizeof(*mtt), flags);
- if (!mtt)
+ entry = mlx5r_umr_create_xlt(dev, &sg,
+ ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift),
+ ent_size, flags);
+ if (!entry)
return -ENOMEM;
orig_sg_length = sg.length;
-
mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg);
mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr,
mr->page_shift);
+ if (dd) {
+ /* Use the data direct internal kernel PD */
+ MLX5_SET(mkc, &wqe.mkey_seg, pd, dev->ddr.pdn);
+ cur_ksm = entry;
+ } else {
+ cur_mtt = entry;
+ }
+
mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg);
- cur_mtt = mtt;
+ curr_entry = entry;
rdma_umem_for_each_dma_block(mr->umem, &biter, BIT(mr->page_shift)) {
- if (cur_mtt == (void *)mtt + sg.length) {
+ if (curr_entry == entry + sg.length) {
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
@@ -681,23 +687,31 @@ int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
DMA_TO_DEVICE);
offset += sg.length;
mlx5r_umr_update_offset(&wqe.ctrl_seg, offset);
-
- cur_mtt = mtt;
+ if (dd)
+ cur_ksm = entry;
+ else
+ cur_mtt = entry;
}
- cur_mtt->ptag =
- cpu_to_be64(rdma_block_iter_dma_address(&biter) |
- MLX5_IB_MTT_PRESENT);
-
- if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
- cur_mtt->ptag = 0;
-
- cur_mtt++;
+ if (dd) {
+ cur_ksm->va = cpu_to_be64(rdma_block_iter_dma_address(&biter));
+ cur_ksm->key = cpu_to_be32(dev->ddr.mkey);
+ cur_ksm++;
+ curr_entry = cur_ksm;
+ } else {
+ cur_mtt->ptag =
+ cpu_to_be64(rdma_block_iter_dma_address(&biter) |
+ MLX5_IB_MTT_PRESENT);
+ if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
+ cur_mtt->ptag = 0;
+ cur_mtt++;
+ curr_entry = cur_mtt;
+ }
}
- final_size = (void *)cur_mtt - (void *)mtt;
+ final_size = curr_entry - entry;
sg.length = ALIGN(final_size, MLX5_UMR_FLEX_ALIGNMENT);
- memset(cur_mtt, 0, sg.length - final_size);
+ memset(curr_entry, 0, sg.length - final_size);
mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE);
@@ -705,10 +719,32 @@ int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
err:
sg.length = orig_sg_length;
- mlx5r_umr_unmap_free_xlt(dev, mtt, &sg);
+ mlx5r_umr_unmap_free_xlt(dev, entry, &sg);
return err;
}
+int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags)
+{
+ /* No invalidation flow is expected */
+ if (WARN_ON(!mr->umem->is_dmabuf) || (flags & MLX5_IB_UPD_XLT_ZAP))
+ return -EINVAL;
+
+ return _mlx5r_umr_update_mr_pas(mr, flags, true);
+}
+
+/*
+ * Send the DMA list to the HW for a normal MR using UMR.
+ * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
+ * flag may be used.
+ */
+int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
+{
+ if (WARN_ON(mr->umem->is_odp))
+ return -EINVAL;
+
+ return _mlx5r_umr_update_mr_pas(mr, flags, false);
+}
+
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
diff --git a/drivers/infiniband/hw/mlx5/umr.h b/drivers/infiniband/hw/mlx5/umr.h
index 5f734dc72bef..4a02c9b5aad8 100644
--- a/drivers/infiniband/hw/mlx5/umr.h
+++ b/drivers/infiniband/hw/mlx5/umr.h
@@ -95,6 +95,7 @@ int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
int access_flags);
int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
+int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags);
int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index db3b25c8433a..4100656fe9a3 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -581,12 +581,9 @@ static int qib_create_workqueues(struct qib_devdata *dd)
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
if (!ppd->qib_wq) {
- char wq_name[23];
-
- snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
- dd->unit, pidx);
- ppd->qib_wq = alloc_ordered_workqueue(wq_name,
- WQ_MEM_RECLAIM);
+ ppd->qib_wq = alloc_ordered_workqueue("qib%d_%d",
+ WQ_MEM_RECLAIM,
+ dd->unit, pidx);
if (!ppd->qib_wq)
goto wq_error;
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 07548fac1d8e..408fe1ba74b9 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -303,8 +303,6 @@ int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
-void qib_rc_rnr_retry(unsigned long arg);
-
void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
int qib_post_ud_send(struct rvt_qp *qp, const struct ib_send_wr *wr);
@@ -312,8 +310,6 @@ int qib_post_ud_send(struct rvt_qp *qp, const struct ib_send_wr *wr);
void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
-void mr_rcu_callback(struct rcu_head *list);
-
void qib_migrate_qp(struct rvt_qp *qp);
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 7a9afd5231d5..5ed5cfc2b280 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -348,13 +348,13 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
umem = ib_umem_get(pd->device, start, length, mr_access_flags);
if (IS_ERR(umem))
- return (void *)umem;
+ return ERR_CAST(umem);
n = ib_umem_num_pages(umem);
mr = __rvt_alloc_mr(n, pd);
if (IS_ERR(mr)) {
- ret = (struct ib_mr *)mr;
+ ret = ERR_CAST(mr);
goto bail_umem;
}
@@ -542,7 +542,7 @@ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
mr = __rvt_alloc_mr(max_num_sg, pd);
if (IS_ERR(mr))
- return (struct ib_mr *)mr;
+ return ERR_CAST(mr);
return &mr->ibmr;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h
index 46f82b27fcd2..1f0322491d8c 100644
--- a/drivers/infiniband/sw/rxe/rxe_hdr.h
+++ b/drivers/infiniband/sw/rxe/rxe_hdr.h
@@ -234,7 +234,7 @@ static inline void __bth_set_resv6a(void *arg)
{
struct rxe_bth *bth = arg;
- bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
+ bth->qpn &= cpu_to_be32(~BTH_RESV6A_MASK);
}
static inline int __bth_ack(void *arg)
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 6596a85723c9..c11ab280551a 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -341,7 +341,7 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
/*
* See IBA C9-92
* For UD QPs we only check if the packet will fit in the
- * receive buffer later. For rmda operations additional
+ * receive buffer later. For RDMA operations additional
* length checks are performed in check_rkey.
*/
if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
@@ -351,7 +351,7 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
- if (payload + 40 > recv_buffer_len) {
+ if (payload + sizeof(union rdma_network_hdr) > recv_buffer_len) {
rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
return RESPST_ERR_LENGTH;
}
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 75253f2b3e3d..86d4d6a2170e 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -94,8 +94,6 @@ struct siw_device {
atomic_t num_mr;
atomic_t num_srq;
atomic_t num_ctx;
-
- struct work_struct netdev_down;
};
struct siw_ucontext {
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index b2b54242aa69..17abef48abcd 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -364,39 +364,6 @@ error:
return NULL;
}
-/*
- * Network link becomes unavailable. Mark all
- * affected QP's accordingly.
- */
-static void siw_netdev_down(struct work_struct *work)
-{
- struct siw_device *sdev =
- container_of(work, struct siw_device, netdev_down);
-
- struct siw_qp_attrs qp_attrs;
- struct list_head *pos, *tmp;
-
- memset(&qp_attrs, 0, sizeof(qp_attrs));
- qp_attrs.state = SIW_QP_STATE_ERROR;
-
- list_for_each_safe(pos, tmp, &sdev->qp_list) {
- struct siw_qp *qp = list_entry(pos, struct siw_qp, devq);
-
- down_write(&qp->state_lock);
- WARN_ON(siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE));
- up_write(&qp->state_lock);
- }
- ib_device_put(&sdev->base_dev);
-}
-
-static void siw_device_goes_down(struct siw_device *sdev)
-{
- if (ib_device_try_get(&sdev->base_dev)) {
- INIT_WORK(&sdev->netdev_down, siw_netdev_down);
- schedule_work(&sdev->netdev_down);
- }
-}
-
static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
void *arg)
{
@@ -418,10 +385,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE);
break;
- case NETDEV_GOING_DOWN:
- siw_device_goes_down(sdev);
- break;
-
case NETDEV_DOWN:
sdev->state = IB_PORT_DOWN;
siw_port_event(sdev, 1, IB_EVENT_PORT_ERR);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 963e936da5e3..abe0522b7df4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -509,12 +509,10 @@ struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
const char *format);
int ipoib_intf_init(struct ib_device *hca, u32 port, const char *format,
struct net_device *dev);
-void ipoib_ib_tx_timer_func(struct timer_list *t);
void ipoib_ib_dev_flush_light(struct work_struct *work);
void ipoib_ib_dev_flush_normal(struct work_struct *work);
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
void ipoib_ib_tx_timeout_work(struct work_struct *work);
-void ipoib_pkey_event(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
int ipoib_ib_dev_open_default(struct net_device *dev);
@@ -533,7 +531,6 @@ void ipoib_mcast_restart_task(struct work_struct *work);
void ipoib_mcast_start_thread(struct net_device *dev);
void ipoib_mcast_stop_thread(struct net_device *dev);
-void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
@@ -610,7 +607,6 @@ int ipoib_set_mode(struct net_device *dev, const char *buf);
void ipoib_setup_common(struct net_device *dev);
-void ipoib_pkey_open(struct ipoib_dev_priv *priv);
void ipoib_drain_cq(struct net_device *dev);
void ipoib_set_ethtool_ops(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 68429a5f796d..1d7ac24c4c00 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -507,10 +507,6 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_free_rx_descriptors(struct iser_conn *iser_conn);
-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- enum iser_data_dir cmd_dir);
-
int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
enum iser_data_dir dir,
bool all_imm);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 88106cf5ce55..71387811b281 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -331,7 +331,7 @@ static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n",
+ rtrs_err_rl(con->c.path, "Failed IB_WR_REG_MR: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
@@ -351,11 +351,11 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
+ rtrs_err_rl(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
- req->need_inv = false;
+ req->mr->need_inval = false;
if (req->need_inv_comp)
complete(&req->inv_comp);
else
@@ -391,12 +391,13 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
clt_path = to_clt_path(con->c.path);
if (req->sg_cnt) {
- if (req->dir == DMA_FROM_DEVICE && req->need_inv) {
+ if (req->mr->need_inval) {
/*
- * We are here to invalidate read requests
+ * We are here to invalidate read/write requests
* ourselves. In normal scenario server should
- * send INV for all read requests, but
- * we are here, thus two things could happen:
+ * send INV for all read requests, we do local
+ * invalidate for write requests ourselves, but
+ * we are here, thus three things could happen:
*
* 1. this is failover, when errno != 0
* and can_wait == 1,
@@ -404,6 +405,9 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
* 2. something totally bad happened and
* server forgot to send INV, so we
* should do that ourselves.
+ *
+ * 3. write request finishes, we need to do local
+ * invalidate
*/
if (can_wait) {
@@ -418,18 +422,10 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
refcount_inc(&req->ref);
err = rtrs_inv_rkey(req);
if (err) {
- rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n",
+ rtrs_err_rl(con->c.path, "Send INV WR key=%#x: %d\n",
req->mr->rkey, err);
} else if (can_wait) {
wait_for_completion(&req->inv_comp);
- } else {
- /*
- * Something went wrong, so request will be
- * completed from INV callback.
- */
- WARN_ON_ONCE(1);
-
- return;
}
if (!refcount_dec_and_test(&req->ref))
return;
@@ -446,8 +442,10 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
req->con = NULL;
if (errno) {
- rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
- errno, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ rtrs_err_rl(con->c.path,
+ "IO %s request failed: error=%d path=%s [%s:%u] notify=%d\n",
+ req->dir == DMA_TO_DEVICE ? "write" : "read", errno,
+ kobject_name(&clt_path->kobj), clt_path->hca_name,
clt_path->hca_port, notify);
}
@@ -501,7 +499,7 @@ static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id,
req = &clt_path->reqs[msg_id];
/* Drop need_inv if server responded with send with invalidation */
- req->need_inv &= !w_inval;
+ req->mr->need_inval &= !w_inval;
complete_rdma_req(req, errno, true, false);
}
@@ -626,6 +624,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
*/
if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
return;
+ clt_path->s.hb_missed_cnt = 0;
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
&imm_type, &imm_payload);
if (imm_type == RTRS_IO_RSP_IMM ||
@@ -643,7 +642,6 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
return rtrs_clt_recv_done(con, wc);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
- clt_path->s.hb_missed_cnt = 0;
clt_path->s.hb_cur_latency =
ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
@@ -670,6 +668,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
/*
* Key invalidations from server side
*/
+ clt_path->s.hb_missed_cnt = 0;
WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
wc->wc_flags & IB_WC_WITH_IMM));
WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
@@ -967,7 +966,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
req->dir = dir;
req->con = rtrs_permit_to_clt_con(clt_path, permit);
req->conf = conf;
- req->need_inv = false;
+ req->mr->need_inval = false;
req->need_inv_comp = false;
req->inv_errno = 0;
refcount_set(&req->ref, 1);
@@ -1089,7 +1088,6 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
int ret, count = 0;
u32 imm, buf_id;
struct ib_reg_wr rwr;
- struct ib_send_wr inv_wr;
struct ib_send_wr *wr = NULL;
bool fr_en = false;
@@ -1130,13 +1128,6 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
req->sg_cnt, req->dir);
return ret;
}
- inv_wr = (struct ib_send_wr) {
- .opcode = IB_WR_LOCAL_INV,
- .wr_cqe = &req->inv_cqe,
- .send_flags = IB_SEND_SIGNALED,
- .ex.invalidate_rkey = req->mr->rkey,
- };
- req->inv_cqe.done = rtrs_clt_inv_rkey_done;
rwr = (struct ib_reg_wr) {
.wr.opcode = IB_WR_REG_MR,
.wr.wr_cqe = &fast_reg_cqe,
@@ -1146,7 +1137,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
};
wr = &rwr.wr;
fr_en = true;
- refcount_inc(&req->ref);
+ req->mr->need_inval = true;
}
/*
* Update stats now, after request is successfully sent it is not
@@ -1156,7 +1147,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
req->usr_len + sizeof(*msg),
- imm, wr, &inv_wr);
+ imm, wr, NULL);
if (ret) {
rtrs_err_rl(s,
"Write request failed: error=%d path=%s [%s:%u]\n",
@@ -1164,6 +1155,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&clt_path->stats->inflight);
+ if (req->mr->need_inval) {
+ req->mr->need_inval = false;
+ refcount_dec(&req->ref);
+ }
if (req->sg_cnt)
ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
@@ -1213,7 +1208,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
ret = rtrs_map_sg_fr(req, count);
if (ret < 0) {
rtrs_err_rl(s,
- "Read request failed, failed to map fast reg. data, err: %d\n",
+ "Read request failed, failed to map fast reg. data, err: %d\n",
ret);
ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
req->dir);
@@ -1237,7 +1232,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
msg->desc[0].len = cpu_to_le32(req->mr->length);
/* Further invalidation is required */
- req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
+ req->mr->need_inval = !!RTRS_MSG_NEED_INVAL_F;
} else {
msg->sg_cnt = 0;
@@ -1270,7 +1265,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&clt_path->stats->inflight);
- req->need_inv = false;
+ req->mr->need_inval = false;
if (req->sg_cnt)
ib_dma_unmap_sg(dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
@@ -1494,7 +1489,9 @@ static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path,
static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
{
struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&clt_path->kobj));
rtrs_rdma_error_recovery(con);
}
@@ -2346,6 +2343,12 @@ static int init_conns(struct rtrs_clt_path *clt_path)
if (err)
goto destroy;
}
+
+ /*
+ * Set the cid to con_num - 1, since if we fail later, we want to stay in bounds.
+ */
+ cid = clt_path->s.con_num - 1;
+
err = alloc_path_reqs(clt_path);
if (err)
goto destroy;
@@ -3140,8 +3143,20 @@ close_path:
return err;
}
+void rtrs_clt_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent)
+{
+ pr_info("Handling event: %s (%d).\n", ib_event_msg(ibevent->event),
+ ibevent->event);
+}
+
+
static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
{
+ INIT_IB_EVENT_HANDLER(&dev->event_handler, dev->ib_dev,
+ rtrs_clt_ib_event_handler);
+ ib_register_event_handler(&dev->event_handler);
+
if (!(dev->ib_dev->attrs.device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS)) {
pr_err("Memory registrations not supported.\n");
@@ -3151,8 +3166,15 @@ static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
return 0;
}
+static void rtrs_clt_ib_dev_deinit(struct rtrs_ib_dev *dev)
+{
+ ib_unregister_event_handler(&dev->event_handler);
+}
+
+
static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
- .init = rtrs_clt_ib_dev_init
+ .init = rtrs_clt_ib_dev_init,
+ .deinit = rtrs_clt_ib_dev_deinit
};
static int __init rtrs_client_init(void)
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index f848c0392d98..0f57759b3080 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -115,7 +115,6 @@ struct rtrs_clt_io_req {
struct completion inv_comp;
int inv_errno;
bool need_inv_comp;
- bool need_inv;
refcount_t ref;
};
@@ -213,6 +212,8 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *path,
void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value);
int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt);
void free_path(struct rtrs_clt_path *clt_path);
+void rtrs_clt_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent);
/* rtrs-clt-stats.c */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index ab25619261d2..ef29bd483b5a 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -69,6 +69,7 @@ struct rtrs_ib_dev;
struct rtrs_rdma_dev_pd_ops {
int (*init)(struct rtrs_ib_dev *dev);
+ void (*deinit)(struct rtrs_ib_dev *dev);
};
struct rtrs_rdma_dev_pd {
@@ -84,6 +85,7 @@ struct rtrs_ib_dev {
struct kref ref;
struct list_head entry;
struct rtrs_rdma_dev_pd *pool;
+ struct ib_event_handler event_handler;
};
struct rtrs_con {
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 1d33efb8fb03..e83d95647852 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -26,7 +26,10 @@ MODULE_LICENSE("GPL");
#define DEFAULT_SESS_QUEUE_DEPTH 512
#define MAX_HDR_SIZE PAGE_SIZE
-static struct rtrs_rdma_dev_pd dev_pd;
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
+static struct rtrs_rdma_dev_pd dev_pd = {
+ .ops = &dev_pd_ops
+};
const struct class rtrs_dev_class = {
.name = "rtrs-server",
};
@@ -672,6 +675,10 @@ err:
static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
{
+ struct rtrs_srv_con *con = container_of(c, typeof(*con), c);
+ struct rtrs_srv_path *srv_path = to_srv_path(con->c.path);
+
+ rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&srv_path->kobj));
close_path(to_srv_path(c->path));
}
@@ -931,12 +938,11 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
if (err)
goto close;
-out:
rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
return;
close:
+ rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
close_path(srv_path);
- goto out;
}
static int post_recv_info_req(struct rtrs_srv_con *con)
@@ -987,6 +993,16 @@ static int post_recv_path(struct rtrs_srv_path *srv_path)
q_size = SERVICE_CON_QUEUE_DEPTH;
else
q_size = srv->queue_depth;
+ if (srv_path->state != RTRS_SRV_CONNECTING) {
+ rtrs_err(s, "Path state invalid. state %s\n",
+ rtrs_srv_state_str(srv_path->state));
+ return -EIO;
+ }
+
+ if (!srv_path->s.con[cid]) {
+ rtrs_err(s, "Conn not set for %d\n", cid);
+ return -EIO;
+ }
err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
if (err) {
@@ -1229,6 +1245,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
*/
if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
return;
+ srv_path->s.hb_missed_cnt = 0;
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
if (err) {
rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
@@ -2255,6 +2272,34 @@ static int check_module_params(void)
return 0;
}
+void rtrs_srv_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent)
+{
+ pr_info("Handling event: %s (%d).\n", ib_event_msg(ibevent->event),
+ ibevent->event);
+}
+
+static int rtrs_srv_ib_dev_init(struct rtrs_ib_dev *dev)
+{
+ INIT_IB_EVENT_HANDLER(&dev->event_handler, dev->ib_dev,
+ rtrs_srv_ib_event_handler);
+ ib_register_event_handler(&dev->event_handler);
+
+ return 0;
+}
+
+static void rtrs_srv_ib_dev_deinit(struct rtrs_ib_dev *dev)
+{
+ ib_unregister_event_handler(&dev->event_handler);
+}
+
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
+ .init = rtrs_srv_ib_dev_init,
+ .deinit = rtrs_srv_ib_dev_deinit
+};
+
+
static int __init rtrs_server_init(void)
{
int err;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 5e325b82ff33..014f85681f37 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -132,6 +132,8 @@ struct rtrs_srv_ib_ctx {
extern const struct class rtrs_dev_class;
void close_path(struct rtrs_srv_path *srv_path);
+void rtrs_srv_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent);
static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
size_t size, int d)
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index a8ce3d140722..eb4906552ac8 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -498,6 +498,13 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
struct input_event event;
int retval = 0;
+ /*
+ * Limit amount of data we inject into the input subsystem so that
+ * we do not hold evdev->mutex for too long. 4096 bytes corresponds
+ * to 170 input events.
+ */
+ count = min(count, 4096);
+
if (count != 0 && count < input_event_size())
return -EINVAL;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 54c57b267b25..47fac29cf7c3 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1079,33 +1079,31 @@ static inline void input_wakeup_procfs_readers(void)
wake_up(&input_devices_poll_wait);
}
+struct input_seq_state {
+ unsigned short pos;
+ bool mutex_acquired;
+ int input_devices_state;
+};
+
static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait)
{
+ struct seq_file *seq = file->private_data;
+ struct input_seq_state *state = seq->private;
+
poll_wait(file, &input_devices_poll_wait, wait);
- if (file->f_version != input_devices_state) {
- file->f_version = input_devices_state;
+ if (state->input_devices_state != input_devices_state) {
+ state->input_devices_state = input_devices_state;
return EPOLLIN | EPOLLRDNORM;
}
return 0;
}
-union input_seq_state {
- struct {
- unsigned short pos;
- bool mutex_acquired;
- };
- void *p;
-};
-
static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
{
- union input_seq_state *state = (union input_seq_state *)&seq->private;
+ struct input_seq_state *state = seq->private;
int error;
- /* We need to fit into seq->private pointer */
- BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
-
error = mutex_lock_interruptible(&input_mutex);
if (error) {
state->mutex_acquired = false;
@@ -1124,7 +1122,7 @@ static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void input_seq_stop(struct seq_file *seq, void *v)
{
- union input_seq_state *state = (union input_seq_state *)&seq->private;
+ struct input_seq_state *state = seq->private;
if (state->mutex_acquired)
mutex_unlock(&input_mutex);
@@ -1210,7 +1208,8 @@ static const struct seq_operations input_devices_seq_ops = {
static int input_proc_devices_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &input_devices_seq_ops);
+ return seq_open_private(file, &input_devices_seq_ops,
+ sizeof(struct input_seq_state));
}
static const struct proc_ops input_devices_proc_ops = {
@@ -1218,17 +1217,14 @@ static const struct proc_ops input_devices_proc_ops = {
.proc_poll = input_proc_devices_poll,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
- .proc_release = seq_release,
+ .proc_release = seq_release_private,
};
static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
{
- union input_seq_state *state = (union input_seq_state *)&seq->private;
+ struct input_seq_state *state = seq->private;
int error;
- /* We need to fit into seq->private pointer */
- BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
-
error = mutex_lock_interruptible(&input_mutex);
if (error) {
state->mutex_acquired = false;
@@ -1243,7 +1239,7 @@ static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- union input_seq_state *state = (union input_seq_state *)&seq->private;
+ struct input_seq_state *state = seq->private;
state->pos = *pos + 1;
return seq_list_next(v, &input_handler_list, pos);
@@ -1252,7 +1248,7 @@ static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static int input_handlers_seq_show(struct seq_file *seq, void *v)
{
struct input_handler *handler = container_of(v, struct input_handler, node);
- union input_seq_state *state = (union input_seq_state *)&seq->private;
+ struct input_seq_state *state = seq->private;
seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
if (handler->filter)
@@ -1273,14 +1269,15 @@ static const struct seq_operations input_handlers_seq_ops = {
static int input_proc_handlers_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &input_handlers_seq_ops);
+ return seq_open_private(file, &input_handlers_seq_ops,
+ sizeof(struct input_seq_state));
}
static const struct proc_ops input_handlers_proc_ops = {
.proc_open = input_proc_handlers_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
- .proc_release = seq_release,
+ .proc_release = seq_release_private,
};
static int __init input_proc_init(void)
@@ -2224,7 +2221,7 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
mt_slots = dev->mt->num_slots;
} else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
- dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
+ dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1;
mt_slots = clamp(mt_slots, 2, 32);
} else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
mt_slots = 2;
diff --git a/drivers/input/joystick/adc-joystick.c b/drivers/input/joystick/adc-joystick.c
index de1fa4cf291b..02713e624df1 100644
--- a/drivers/input/joystick/adc-joystick.c
+++ b/drivers/input/joystick/adc-joystick.c
@@ -132,7 +132,6 @@ static void adc_joystick_cleanup(void *data)
static int adc_joystick_set_axes(struct device *dev, struct adc_joystick *joy)
{
struct adc_joystick_axis *axes = joy->axes;
- struct fwnode_handle *child;
s32 range[2], fuzz, flat;
unsigned int num_axes;
int error, i;
@@ -149,31 +148,30 @@ static int adc_joystick_set_axes(struct device *dev, struct adc_joystick *joy)
return -EINVAL;
}
- device_for_each_child_node(dev, child) {
+ device_for_each_child_node_scoped(dev, child) {
error = fwnode_property_read_u32(child, "reg", &i);
if (error) {
dev_err(dev, "reg invalid or missing\n");
- goto err_fwnode_put;
+ return error;
}
if (i >= num_axes) {
- error = -EINVAL;
dev_err(dev, "No matching axis for reg %d\n", i);
- goto err_fwnode_put;
+ return -EINVAL;
}
error = fwnode_property_read_u32(child, "linux,code",
&axes[i].code);
if (error) {
dev_err(dev, "linux,code invalid or missing\n");
- goto err_fwnode_put;
+ return error;
}
error = fwnode_property_read_u32_array(child, "abs-range",
range, 2);
if (error) {
dev_err(dev, "abs-range invalid or missing\n");
- goto err_fwnode_put;
+ return error;
}
if (range[0] > range[1]) {
@@ -193,10 +191,6 @@ static int adc_joystick_set_axes(struct device *dev, struct adc_joystick *joy)
}
return 0;
-
-err_fwnode_put:
- fwnode_handle_put(child);
- return error;
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 1d0c5f4c0f99..721ab69e84ac 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -421,18 +421,6 @@ config KEYBOARD_MAX7359
To compile this driver as a module, choose M here: the
module will be called max7359_keypad.
-config KEYBOARD_MCS
- tristate "MELFAS MCS Touchkey"
- depends on I2C
- help
- Say Y here if you have the MELFAS MCS5000/5080 touchkey controller
- chip in your system.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called mcs_touchkey.
-
config KEYBOARD_MPR121
tristate "Freescale MPR121 Touchkey"
depends on I2C
@@ -466,6 +454,17 @@ config KEYBOARD_IMX
To compile this driver as a module, choose M here: the
module will be called imx_keypad.
+config KEYBOARD_IMX_BBM_SCMI
+ tristate "IMX BBM SCMI Key Driver"
+ depends on IMX_SCMI_BBM_EXT || COMPILE_TEST
+ default y if ARCH_MXC
+ help
+ This is the BBM key driver for NXP i.MX SoCs managed through
+ SCMI protocol.
+
+ To compile this driver as a module, choose M here: the
+ module will be called scmi-imx-bbm-key.
+
config KEYBOARD_IMX_SC_KEY
tristate "IMX SCU Key Driver"
depends on IMX_SCU
@@ -485,17 +484,6 @@ config KEYBOARD_NEWTON
To compile this driver as a module, choose M here: the
module will be called newtonkbd.
-config KEYBOARD_NOMADIK
- tristate "ST-Ericsson Nomadik SKE keyboard"
- depends on (ARCH_NOMADIK || ARCH_U8500 || COMPILE_TEST)
- select INPUT_MATRIXKMAP
- help
- Say Y here if you want to use a keypad provided on the SKE controller
- used on the Ux500 and Nomadik platforms
-
- To compile this driver as a module, choose M here: the
- module will be called nmk-ske-keypad.
-
config KEYBOARD_NSPIRE
tristate "TI-NSPIRE built-in keyboard"
depends on ARCH_NSPIRE && OF
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index aecef00c5d09..1e0721c30709 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_KEYBOARD_IPAQ_MICRO) += ipaq-micro-keys.o
obj-$(CONFIG_KEYBOARD_IQS62X) += iqs62x-keys.o
obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o
obj-$(CONFIG_KEYBOARD_IMX_SC_KEY) += imx_sc_key.o
+obj-$(CONFIG_KEYBOARD_IMX_BBM_SCMI) += imx-sm-bbm-key.o
obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o
obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o
@@ -41,12 +42,10 @@ obj-$(CONFIG_KEYBOARD_LPC32XX) += lpc32xx-keys.o
obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o
obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o
-obj-$(CONFIG_KEYBOARD_MCS) += mcs_touchkey.o
obj-$(CONFIG_KEYBOARD_MPR121) += mpr121_touchkey.o
obj-$(CONFIG_KEYBOARD_MT6779) += mt6779-keypad.o
obj-$(CONFIG_KEYBOARD_MTK_PMIC) += mtk-pmic-keys.o
obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
-obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o
obj-$(CONFIG_KEYBOARD_NSPIRE) += nspire-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o
diff --git a/drivers/input/keyboard/adc-keys.c b/drivers/input/keyboard/adc-keys.c
index bf72ab8df817..f1753207429d 100644
--- a/drivers/input/keyboard/adc-keys.c
+++ b/drivers/input/keyboard/adc-keys.c
@@ -66,7 +66,6 @@ static void adc_keys_poll(struct input_dev *input)
static int adc_keys_load_keymap(struct device *dev, struct adc_keys_state *st)
{
struct adc_keys_button *map;
- struct fwnode_handle *child;
int i;
st->num_keys = device_get_child_node_count(dev);
@@ -80,11 +79,10 @@ static int adc_keys_load_keymap(struct device *dev, struct adc_keys_state *st)
return -ENOMEM;
i = 0;
- device_for_each_child_node(dev, child) {
+ device_for_each_child_node_scoped(dev, child) {
if (fwnode_property_read_u32(child, "press-threshold-microvolt",
&map[i].voltage)) {
dev_err(dev, "Key with invalid or missing voltage\n");
- fwnode_handle_put(child);
return -EINVAL;
}
map[i].voltage /= 1000;
@@ -92,7 +90,6 @@ static int adc_keys_load_keymap(struct device *dev, struct adc_keys_state *st)
if (fwnode_property_read_u32(child, "linux,code",
&map[i].keycode)) {
dev_err(dev, "Key with invalid or missing linux,code\n");
- fwnode_handle_put(child);
return -EINVAL;
}
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 1b0279393df4..d25d63a807f2 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -188,6 +188,7 @@ struct adp5588_kpad {
u32 cols;
u32 unlock_keys[2];
int nkeys_unlock;
+ bool gpio_only;
unsigned short keycode[ADP5588_KEYMAPSIZE];
unsigned char gpiomap[ADP5588_MAXGPIO];
struct gpio_chip gc;
@@ -221,15 +222,13 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned int off)
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
int val;
- mutex_lock(&kpad->gpio_lock);
+ guard(mutex)(&kpad->gpio_lock);
if (kpad->dir[bank] & bit)
val = kpad->dat_out[bank];
else
val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank);
- mutex_unlock(&kpad->gpio_lock);
-
return !!(val & bit);
}
@@ -240,7 +239,7 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
- mutex_lock(&kpad->gpio_lock);
+ guard(mutex)(&kpad->gpio_lock);
if (val)
kpad->dat_out[bank] |= bit;
@@ -248,8 +247,6 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
kpad->dat_out[bank] &= ~bit;
adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank, kpad->dat_out[bank]);
-
- mutex_unlock(&kpad->gpio_lock);
}
static int adp5588_gpio_set_config(struct gpio_chip *chip, unsigned int off,
@@ -259,7 +256,6 @@ static int adp5588_gpio_set_config(struct gpio_chip *chip, unsigned int off,
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
bool pull_disable;
- int ret;
switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_BIAS_PULL_UP:
@@ -272,19 +268,15 @@ static int adp5588_gpio_set_config(struct gpio_chip *chip, unsigned int off,
return -ENOTSUPP;
}
- mutex_lock(&kpad->gpio_lock);
+ guard(mutex)(&kpad->gpio_lock);
if (pull_disable)
kpad->pull_dis[bank] |= bit;
else
kpad->pull_dis[bank] &= bit;
- ret = adp5588_write(kpad->client, GPIO_PULL1 + bank,
- kpad->pull_dis[bank]);
-
- mutex_unlock(&kpad->gpio_lock);
-
- return ret;
+ return adp5588_write(kpad->client, GPIO_PULL1 + bank,
+ kpad->pull_dis[bank]);
}
static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned int off)
@@ -292,16 +284,11 @@ static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned int off
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
- int ret;
- mutex_lock(&kpad->gpio_lock);
+ guard(mutex)(&kpad->gpio_lock);
kpad->dir[bank] &= ~bit;
- ret = adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]);
-
- mutex_unlock(&kpad->gpio_lock);
-
- return ret;
+ return adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]);
}
static int adp5588_gpio_direction_output(struct gpio_chip *chip,
@@ -310,9 +297,9 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
- int ret;
+ int error;
- mutex_lock(&kpad->gpio_lock);
+ guard(mutex)(&kpad->gpio_lock);
kpad->dir[bank] |= bit;
@@ -321,17 +308,16 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
else
kpad->dat_out[bank] &= ~bit;
- ret = adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank,
- kpad->dat_out[bank]);
- if (ret)
- goto out_unlock;
-
- ret = adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]);
+ error = adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank,
+ kpad->dat_out[bank]);
+ if (error)
+ return error;
-out_unlock:
- mutex_unlock(&kpad->gpio_lock);
+ error = adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]);
+ if (error)
+ return error;
- return ret;
+ return 0;
}
static int adp5588_build_gpiomap(struct adp5588_kpad *kpad)
@@ -446,10 +432,17 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
kpad->gc.label = kpad->client->name;
kpad->gc.owner = THIS_MODULE;
- girq = &kpad->gc.irq;
- gpio_irq_chip_set_chip(girq, &adp5588_irq_chip);
- girq->handler = handle_bad_irq;
- girq->threaded = true;
+ if (device_property_present(dev, "interrupt-controller")) {
+ if (!kpad->client->irq) {
+ dev_err(dev, "Unable to serve as interrupt controller without interrupt");
+ return -EINVAL;
+ }
+
+ girq = &kpad->gc.irq;
+ gpio_irq_chip_set_chip(girq, &adp5588_irq_chip);
+ girq->handler = handle_bad_irq;
+ girq->threaded = true;
+ }
mutex_init(&kpad->gpio_lock);
@@ -627,7 +620,7 @@ static int adp5588_setup(struct adp5588_kpad *kpad)
for (i = 0; i < KEYP_MAX_EVENT; i++) {
ret = adp5588_read(client, KEY_EVENTA);
- if (ret)
+ if (ret < 0)
return ret;
}
@@ -647,6 +640,18 @@ static int adp5588_fw_parse(struct adp5588_kpad *kpad)
struct i2c_client *client = kpad->client;
int ret, i;
+ /*
+ * Check if the device is to be operated purely in GPIO mode. To do
+ * so, check that no keypad rows or columns have been specified,
+ * since all GPINS should be configured as GPIO.
+ */
+ if (!device_property_present(&client->dev, "keypad,num-rows") &&
+ !device_property_present(&client->dev, "keypad,num-columns")) {
+ /* If purely GPIO, skip keypad setup */
+ kpad->gpio_only = true;
+ return 0;
+ }
+
ret = matrix_keypad_parse_properties(&client->dev, &kpad->rows,
&kpad->cols);
if (ret)
@@ -790,17 +795,19 @@ static int adp5588_probe(struct i2c_client *client)
if (error)
return error;
- error = devm_request_threaded_irq(&client->dev, client->irq,
- adp5588_hard_irq, adp5588_thread_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->dev.driver->name, kpad);
- if (error) {
- dev_err(&client->dev, "failed to request irq %d: %d\n",
- client->irq, error);
- return error;
+ if (client->irq) {
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ adp5588_hard_irq, adp5588_thread_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->dev.driver->name, kpad);
+ if (error) {
+ dev_err(&client->dev, "failed to request irq %d: %d\n",
+ client->irq, error);
+ return error;
+ }
}
- dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
+ dev_info(&client->dev, "Rev.%d controller\n", revid);
return 0;
}
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index f4f2078cf501..5855d4fc6e6a 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -639,7 +639,7 @@ static void atkbd_event_work(struct work_struct *work)
{
struct atkbd *atkbd = container_of(work, struct atkbd, event_work.work);
- mutex_lock(&atkbd->mutex);
+ guard(mutex)(&atkbd->mutex);
if (!atkbd->enabled) {
/*
@@ -657,8 +657,6 @@ static void atkbd_event_work(struct work_struct *work)
if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask))
atkbd_set_repeat_rate(atkbd);
}
-
- mutex_unlock(&atkbd->mutex);
}
/*
@@ -1361,7 +1359,7 @@ static int atkbd_reconnect(struct serio *serio)
{
struct atkbd *atkbd = atkbd_from_serio(serio);
struct serio_driver *drv = serio->drv;
- int retval = -1;
+ int error;
if (!atkbd || !drv) {
dev_dbg(&serio->dev,
@@ -1369,16 +1367,17 @@ static int atkbd_reconnect(struct serio *serio)
return -1;
}
- mutex_lock(&atkbd->mutex);
+ guard(mutex)(&atkbd->mutex);
atkbd_disable(atkbd);
if (atkbd->write) {
- if (atkbd_probe(atkbd))
- goto out;
+ error = atkbd_probe(atkbd);
+ if (error)
+ return error;
if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra))
- goto out;
+ return -EIO;
/*
* Restore LED state and repeat rate. While input core
@@ -1404,11 +1403,7 @@ static int atkbd_reconnect(struct serio *serio)
if (atkbd->write)
atkbd_activate(atkbd);
- retval = 0;
-
- out:
- mutex_unlock(&atkbd->mutex);
- return retval;
+ return 0;
}
static const struct serio_device_id atkbd_serio_ids[] = {
@@ -1465,17 +1460,15 @@ static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t
struct atkbd *atkbd = atkbd_from_serio(serio);
int retval;
- retval = mutex_lock_interruptible(&atkbd->mutex);
- if (retval)
- return retval;
+ scoped_guard(mutex_intr, &atkbd->mutex) {
+ atkbd_disable(atkbd);
+ retval = handler(atkbd, buf, count);
+ atkbd_enable(atkbd);
- atkbd_disable(atkbd);
- retval = handler(atkbd, buf, count);
- atkbd_enable(atkbd);
-
- mutex_unlock(&atkbd->mutex);
+ return retval;
+ }
- return retval;
+ return -EINTR;
}
static ssize_t atkbd_show_extra(struct atkbd *atkbd, char *buf)
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 9f3bcd41cf67..380fe8dab3b0 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -245,23 +245,20 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
{
int n_events = get_n_events_by_type(type);
const unsigned long *bitmap = get_bm_events_by_type(ddata->input, type);
- unsigned long *bits;
ssize_t error;
int i;
- bits = bitmap_alloc(n_events, GFP_KERNEL);
+ unsigned long *bits __free(bitmap) = bitmap_alloc(n_events, GFP_KERNEL);
if (!bits)
return -ENOMEM;
error = bitmap_parselist(buf, bits, n_events);
if (error)
- goto out;
+ return error;
/* First validate */
- if (!bitmap_subset(bits, bitmap, n_events)) {
- error = -EINVAL;
- goto out;
- }
+ if (!bitmap_subset(bits, bitmap, n_events))
+ return -EINVAL;
for (i = 0; i < ddata->pdata->nbuttons; i++) {
struct gpio_button_data *bdata = &ddata->data[i];
@@ -271,12 +268,11 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
if (test_bit(*bdata->code, bits) &&
!bdata->button->can_disable) {
- error = -EINVAL;
- goto out;
+ return -EINVAL;
}
}
- mutex_lock(&ddata->disable_lock);
+ guard(mutex)(&ddata->disable_lock);
for (i = 0; i < ddata->pdata->nbuttons; i++) {
struct gpio_button_data *bdata = &ddata->data[i];
@@ -290,11 +286,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
gpio_keys_enable_button(bdata);
}
- mutex_unlock(&ddata->disable_lock);
-
-out:
- bitmap_free(bits);
- return error;
+ return 0;
}
#define ATTR_SHOW_FN(name, type, only_disabled) \
@@ -470,11 +462,10 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
{
struct gpio_button_data *bdata = dev_id;
struct input_dev *input = bdata->input;
- unsigned long flags;
BUG_ON(irq != bdata->irq);
- spin_lock_irqsave(&bdata->lock, flags);
+ guard(spinlock_irqsave)(&bdata->lock);
if (!bdata->key_pressed) {
if (bdata->button->wakeup)
@@ -497,7 +488,6 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
ms_to_ktime(bdata->release_delay),
HRTIMER_MODE_REL_HARD);
out:
- spin_unlock_irqrestore(&bdata->lock, flags);
return IRQ_HANDLED;
}
@@ -768,7 +758,6 @@ gpio_keys_get_devtree_pdata(struct device *dev)
{
struct gpio_keys_platform_data *pdata;
struct gpio_keys_button *button;
- struct fwnode_handle *child;
int nbuttons, irq;
nbuttons = device_get_child_node_count(dev);
@@ -790,7 +779,7 @@ gpio_keys_get_devtree_pdata(struct device *dev)
device_property_read_string(dev, "label", &pdata->name);
- device_for_each_child_node(dev, child) {
+ device_for_each_child_node_scoped(dev, child) {
if (is_of_node(child)) {
irq = of_irq_get_byname(to_of_node(child), "irq");
if (irq > 0)
@@ -808,7 +797,6 @@ gpio_keys_get_devtree_pdata(struct device *dev)
if (fwnode_property_read_u32(child, "linux,code",
&button->code)) {
dev_err(dev, "Button without keycode\n");
- fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
@@ -1064,10 +1052,10 @@ static int gpio_keys_suspend(struct device *dev)
if (error)
return error;
} else {
- mutex_lock(&input->mutex);
+ guard(mutex)(&input->mutex);
+
if (input_device_enabled(input))
gpio_keys_close(input);
- mutex_unlock(&input->mutex);
}
return 0;
@@ -1077,20 +1065,20 @@ static int gpio_keys_resume(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
struct input_dev *input = ddata->input;
- int error = 0;
+ int error;
if (device_may_wakeup(dev)) {
gpio_keys_disable_wakeup(ddata);
} else {
- mutex_lock(&input->mutex);
- if (input_device_enabled(input))
+ guard(mutex)(&input->mutex);
+
+ if (input_device_enabled(input)) {
error = gpio_keys_open(input);
- mutex_unlock(&input->mutex);
+ if (error)
+ return error;
+ }
}
- if (error)
- return error;
-
gpio_keys_report_state(ddata);
return 0;
}
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index b41fd1240f43..41ca0d3c9098 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -144,7 +144,6 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev)
{
struct gpio_keys_platform_data *pdata;
struct gpio_keys_button *button;
- struct fwnode_handle *child;
int nbuttons;
nbuttons = device_get_child_node_count(dev);
@@ -166,11 +165,10 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev)
device_property_read_string(dev, "label", &pdata->name);
- device_for_each_child_node(dev, child) {
+ device_for_each_child_node_scoped(dev, child) {
if (fwnode_property_read_u32(child, "linux,code",
&button->code)) {
dev_err(dev, "button without keycode\n");
- fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/input/keyboard/imx-sm-bbm-key.c b/drivers/input/keyboard/imx-sm-bbm-key.c
new file mode 100644
index 000000000000..96486bd23d60
--- /dev/null
+++ b/drivers/input/keyboard/imx-sm-bbm-key.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP.
+ */
+
+#include <linux/input.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+#include <linux/suspend.h>
+
+#define DEBOUNCE_TIME 30
+#define REPEAT_INTERVAL 60
+
+struct scmi_imx_bbm {
+ struct scmi_protocol_handle *ph;
+ const struct scmi_imx_bbm_proto_ops *ops;
+ struct notifier_block nb;
+ int keycode;
+ int keystate; /* 1:pressed */
+ bool suspended;
+ struct delayed_work check_work;
+ struct input_dev *input;
+};
+
+static void scmi_imx_bbm_pwrkey_check_for_events(struct work_struct *work)
+{
+ struct scmi_imx_bbm *bbnsm = container_of(to_delayed_work(work),
+ struct scmi_imx_bbm, check_work);
+ struct scmi_protocol_handle *ph = bbnsm->ph;
+ struct input_dev *input = bbnsm->input;
+ u32 state = 0;
+ int ret;
+
+ ret = bbnsm->ops->button_get(ph, &state);
+ if (ret) {
+ pr_err("%s: %d\n", __func__, ret);
+ return;
+ }
+
+ pr_debug("%s: state: %d, keystate %d\n", __func__, state, bbnsm->keystate);
+
+ /* only report new event if status changed */
+ if (state ^ bbnsm->keystate) {
+ bbnsm->keystate = state;
+ input_event(input, EV_KEY, bbnsm->keycode, state);
+ input_sync(input);
+ pm_relax(bbnsm->input->dev.parent);
+ pr_debug("EV_KEY: %x\n", bbnsm->keycode);
+ }
+
+ /* repeat check if pressed long */
+ if (state)
+ schedule_delayed_work(&bbnsm->check_work, msecs_to_jiffies(REPEAT_INTERVAL));
+}
+
+static int scmi_imx_bbm_pwrkey_event(struct scmi_imx_bbm *bbnsm)
+{
+ struct input_dev *input = bbnsm->input;
+
+ pm_wakeup_event(input->dev.parent, 0);
+
+ /*
+ * Directly report key event after resume to make no key press
+ * event is missed.
+ */
+ if (READ_ONCE(bbnsm->suspended)) {
+ bbnsm->keystate = 1;
+ input_event(input, EV_KEY, bbnsm->keycode, 1);
+ input_sync(input);
+ WRITE_ONCE(bbnsm->suspended, false);
+ }
+
+ schedule_delayed_work(&bbnsm->check_work, msecs_to_jiffies(DEBOUNCE_TIME));
+
+ return 0;
+}
+
+static void scmi_imx_bbm_pwrkey_act(void *pdata)
+{
+ struct scmi_imx_bbm *bbnsm = pdata;
+
+ cancel_delayed_work_sync(&bbnsm->check_work);
+}
+
+static int scmi_imx_bbm_key_notifier(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct scmi_imx_bbm *bbnsm = container_of(nb, struct scmi_imx_bbm, nb);
+ struct scmi_imx_bbm_notif_report *r = data;
+
+ if (r->is_button) {
+ pr_debug("BBM Button Power key pressed\n");
+ scmi_imx_bbm_pwrkey_event(bbnsm);
+ } else {
+ /* Should never reach here */
+ pr_err("Unexpected BBM event: %s\n", __func__);
+ }
+
+ return 0;
+}
+
+static int scmi_imx_bbm_pwrkey_init(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+ struct device *dev = &sdev->dev;
+ struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev);
+ struct input_dev *input;
+ int ret;
+
+ if (device_property_read_u32(dev, "linux,code", &bbnsm->keycode)) {
+ bbnsm->keycode = KEY_POWER;
+ dev_warn(dev, "key code is not specified, using default KEY_POWER\n");
+ }
+
+ INIT_DELAYED_WORK(&bbnsm->check_work, scmi_imx_bbm_pwrkey_check_for_events);
+
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "failed to allocate the input device for SCMI IMX BBM\n");
+ return -ENOMEM;
+ }
+
+ input->name = dev_name(dev);
+ input->phys = "bbnsm-pwrkey/input0";
+ input->id.bustype = BUS_HOST;
+
+ input_set_capability(input, EV_KEY, bbnsm->keycode);
+
+ ret = devm_add_action_or_reset(dev, scmi_imx_bbm_pwrkey_act, bbnsm);
+ if (ret) {
+ dev_err(dev, "failed to register remove action\n");
+ return ret;
+ }
+
+ bbnsm->input = input;
+
+ bbnsm->nb.notifier_call = &scmi_imx_bbm_key_notifier;
+ ret = handle->notify_ops->devm_event_notifier_register(sdev, SCMI_PROTOCOL_IMX_BBM,
+ SCMI_EVENT_IMX_BBM_BUTTON,
+ NULL, &bbnsm->nb);
+
+ if (ret)
+ dev_err(dev, "Failed to register BBM Button Events %d:", ret);
+
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(dev, "failed to register input device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int scmi_imx_bbm_key_probe(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+ struct device *dev = &sdev->dev;
+ struct scmi_protocol_handle *ph;
+ struct scmi_imx_bbm *bbnsm;
+ int ret;
+
+ if (!handle)
+ return -ENODEV;
+
+ bbnsm = devm_kzalloc(dev, sizeof(*bbnsm), GFP_KERNEL);
+ if (!bbnsm)
+ return -ENOMEM;
+
+ bbnsm->ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_BBM, &ph);
+ if (IS_ERR(bbnsm->ops))
+ return PTR_ERR(bbnsm->ops);
+
+ bbnsm->ph = ph;
+
+ device_init_wakeup(dev, true);
+
+ dev_set_drvdata(dev, bbnsm);
+
+ ret = scmi_imx_bbm_pwrkey_init(sdev);
+ if (ret)
+ device_init_wakeup(dev, false);
+
+ return ret;
+}
+
+static int __maybe_unused scmi_imx_bbm_key_suspend(struct device *dev)
+{
+ struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev);
+
+ WRITE_ONCE(bbnsm->suspended, true);
+
+ return 0;
+}
+
+static int __maybe_unused scmi_imx_bbm_key_resume(struct device *dev)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(scmi_imx_bbm_pm_key_ops, scmi_imx_bbm_key_suspend,
+ scmi_imx_bbm_key_resume);
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_IMX_BBM, "imx-bbm-key" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_imx_bbm_key_driver = {
+ .driver = {
+ .pm = &scmi_imx_bbm_pm_key_ops,
+ },
+ .name = "scmi-imx-bbm-key",
+ .probe = scmi_imx_bbm_key_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_imx_bbm_key_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("IMX SM BBM Key driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/iqs62x-keys.c b/drivers/input/keyboard/iqs62x-keys.c
index 688d61244b5f..1315b0f0862f 100644
--- a/drivers/input/keyboard/iqs62x-keys.c
+++ b/drivers/input/keyboard/iqs62x-keys.c
@@ -45,7 +45,6 @@ struct iqs62x_keys_private {
static int iqs62x_keys_parse_prop(struct platform_device *pdev,
struct iqs62x_keys_private *iqs62x_keys)
{
- struct fwnode_handle *child;
unsigned int val;
int ret, i;
@@ -68,7 +67,8 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
}
for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++) {
- child = device_get_named_child_node(&pdev->dev,
+ struct fwnode_handle *child __free(fwnode_handle) =
+ device_get_named_child_node(&pdev->dev,
iqs62x_switch_names[i]);
if (!child)
continue;
@@ -77,7 +77,6 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
if (ret) {
dev_err(&pdev->dev, "Failed to read switch code: %d\n",
ret);
- fwnode_handle_put(child);
return ret;
}
iqs62x_keys->switches[i].code = val;
@@ -91,8 +90,6 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
iqs62x_keys->switches[i].flag = (i == IQS62X_SW_HALL_N ?
IQS62X_EVENT_HALL_N_T :
IQS62X_EVENT_HALL_S_T);
-
- fwnode_handle_put(child);
}
return 0;
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 7a56f3d3aacd..3c38bae576ed 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -17,19 +17,27 @@
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/of_platform.h>
struct matrix_keypad {
- const struct matrix_keypad_platform_data *pdata;
struct input_dev *input_dev;
unsigned int row_shift;
+ unsigned int col_scan_delay_us;
+ /* key debounce interval in milli-second */
+ unsigned int debounce_ms;
+ bool drive_inactive_cols;
+
+ struct gpio_desc *row_gpios[MATRIX_MAX_ROWS];
+ unsigned int num_row_gpios;
+
+ struct gpio_desc *col_gpios[MATRIX_MAX_ROWS];
+ unsigned int num_col_gpios;
+
unsigned int row_irqs[MATRIX_MAX_ROWS];
- unsigned int num_row_irqs;
DECLARE_BITMAP(wakeup_enabled_irqs, MATRIX_MAX_ROWS);
uint32_t last_key_state[MATRIX_MAX_COLS];
@@ -45,50 +53,43 @@ struct matrix_keypad {
* columns. In that case it is configured here to be input, otherwise it is
* driven with the inactive value.
*/
-static void __activate_col(const struct matrix_keypad_platform_data *pdata,
- int col, bool on)
+static void __activate_col(struct matrix_keypad *keypad, int col, bool on)
{
- bool level_on = !pdata->active_low;
-
if (on) {
- gpio_direction_output(pdata->col_gpios[col], level_on);
+ gpiod_direction_output(keypad->col_gpios[col], 1);
} else {
- gpio_set_value_cansleep(pdata->col_gpios[col], !level_on);
- if (!pdata->drive_inactive_cols)
- gpio_direction_input(pdata->col_gpios[col]);
+ gpiod_set_value_cansleep(keypad->col_gpios[col], 0);
+ if (!keypad->drive_inactive_cols)
+ gpiod_direction_input(keypad->col_gpios[col]);
}
}
-static void activate_col(const struct matrix_keypad_platform_data *pdata,
- int col, bool on)
+static void activate_col(struct matrix_keypad *keypad, int col, bool on)
{
- __activate_col(pdata, col, on);
+ __activate_col(keypad, col, on);
- if (on && pdata->col_scan_delay_us)
- udelay(pdata->col_scan_delay_us);
+ if (on && keypad->col_scan_delay_us)
+ udelay(keypad->col_scan_delay_us);
}
-static void activate_all_cols(const struct matrix_keypad_platform_data *pdata,
- bool on)
+static void activate_all_cols(struct matrix_keypad *keypad, bool on)
{
int col;
- for (col = 0; col < pdata->num_col_gpios; col++)
- __activate_col(pdata, col, on);
+ for (col = 0; col < keypad->num_col_gpios; col++)
+ __activate_col(keypad, col, on);
}
-static bool row_asserted(const struct matrix_keypad_platform_data *pdata,
- int row)
+static bool row_asserted(struct matrix_keypad *keypad, int row)
{
- return gpio_get_value_cansleep(pdata->row_gpios[row]) ?
- !pdata->active_low : pdata->active_low;
+ return gpiod_get_value_cansleep(keypad->row_gpios[row]);
}
static void enable_row_irqs(struct matrix_keypad *keypad)
{
int i;
- for (i = 0; i < keypad->num_row_irqs; i++)
+ for (i = 0; i < keypad->num_row_gpios; i++)
enable_irq(keypad->row_irqs[i]);
}
@@ -96,7 +97,7 @@ static void disable_row_irqs(struct matrix_keypad *keypad)
{
int i;
- for (i = 0; i < keypad->num_row_irqs; i++)
+ for (i = 0; i < keypad->num_row_gpios; i++)
disable_irq_nosync(keypad->row_irqs[i]);
}
@@ -109,39 +110,38 @@ static void matrix_keypad_scan(struct work_struct *work)
container_of(work, struct matrix_keypad, work.work);
struct input_dev *input_dev = keypad->input_dev;
const unsigned short *keycodes = input_dev->keycode;
- const struct matrix_keypad_platform_data *pdata = keypad->pdata;
uint32_t new_state[MATRIX_MAX_COLS];
int row, col, code;
/* de-activate all columns for scanning */
- activate_all_cols(pdata, false);
+ activate_all_cols(keypad, false);
memset(new_state, 0, sizeof(new_state));
- for (row = 0; row < pdata->num_row_gpios; row++)
- gpio_direction_input(pdata->row_gpios[row]);
+ for (row = 0; row < keypad->num_row_gpios; row++)
+ gpiod_direction_input(keypad->row_gpios[row]);
/* assert each column and read the row status out */
- for (col = 0; col < pdata->num_col_gpios; col++) {
+ for (col = 0; col < keypad->num_col_gpios; col++) {
- activate_col(pdata, col, true);
+ activate_col(keypad, col, true);
- for (row = 0; row < pdata->num_row_gpios; row++)
+ for (row = 0; row < keypad->num_row_gpios; row++)
new_state[col] |=
- row_asserted(pdata, row) ? (1 << row) : 0;
+ row_asserted(keypad, row) ? BIT(row) : 0;
- activate_col(pdata, col, false);
+ activate_col(keypad, col, false);
}
- for (col = 0; col < pdata->num_col_gpios; col++) {
+ for (col = 0; col < keypad->num_col_gpios; col++) {
uint32_t bits_changed;
bits_changed = keypad->last_key_state[col] ^ new_state[col];
if (bits_changed == 0)
continue;
- for (row = 0; row < pdata->num_row_gpios; row++) {
- if ((bits_changed & (1 << row)) == 0)
+ for (row = 0; row < keypad->num_row_gpios; row++) {
+ if (!(bits_changed & BIT(row)))
continue;
code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
@@ -155,7 +155,7 @@ static void matrix_keypad_scan(struct work_struct *work)
memcpy(keypad->last_key_state, new_state, sizeof(new_state));
- activate_all_cols(pdata, true);
+ activate_all_cols(keypad, true);
/* Enable IRQs again */
spin_lock_irq(&keypad->lock);
@@ -182,7 +182,7 @@ static irqreturn_t matrix_keypad_interrupt(int irq, void *id)
disable_row_irqs(keypad);
keypad->scan_pending = true;
schedule_delayed_work(&keypad->work,
- msecs_to_jiffies(keypad->pdata->debounce_ms));
+ msecs_to_jiffies(keypad->debounce_ms));
out:
spin_unlock_irqrestore(&keypad->lock, flags);
@@ -225,7 +225,8 @@ static void matrix_keypad_enable_wakeup(struct matrix_keypad *keypad)
{
int i;
- for_each_clear_bit(i, keypad->wakeup_enabled_irqs, keypad->num_row_irqs)
+ for_each_clear_bit(i, keypad->wakeup_enabled_irqs,
+ keypad->num_row_gpios)
if (enable_irq_wake(keypad->row_irqs[i]) == 0)
__set_bit(i, keypad->wakeup_enabled_irqs);
}
@@ -234,7 +235,8 @@ static void matrix_keypad_disable_wakeup(struct matrix_keypad *keypad)
{
int i;
- for_each_set_bit(i, keypad->wakeup_enabled_irqs, keypad->num_row_irqs) {
+ for_each_set_bit(i, keypad->wakeup_enabled_irqs,
+ keypad->num_row_gpios) {
disable_irq_wake(keypad->row_irqs[i]);
__clear_bit(i, keypad->wakeup_enabled_irqs);
}
@@ -272,182 +274,108 @@ static DEFINE_SIMPLE_DEV_PM_OPS(matrix_keypad_pm_ops,
static int matrix_keypad_init_gpio(struct platform_device *pdev,
struct matrix_keypad *keypad)
{
- const struct matrix_keypad_platform_data *pdata = keypad->pdata;
- int i, irq, err;
-
- /* initialized strobe lines as outputs, activated */
- for (i = 0; i < pdata->num_col_gpios; i++) {
- err = devm_gpio_request(&pdev->dev,
- pdata->col_gpios[i], "matrix_kbd_col");
- if (err) {
- dev_err(&pdev->dev,
- "failed to request GPIO%d for COL%d\n",
- pdata->col_gpios[i], i);
- return err;
- }
+ bool active_low;
+ int nrow, ncol;
+ int err;
+ int i;
- gpio_direction_output(pdata->col_gpios[i], !pdata->active_low);
+ nrow = gpiod_count(&pdev->dev, "row");
+ ncol = gpiod_count(&pdev->dev, "col");
+ if (nrow < 0 || ncol < 0) {
+ dev_err(&pdev->dev, "missing row or column GPIOs\n");
+ return -EINVAL;
}
- for (i = 0; i < pdata->num_row_gpios; i++) {
- err = devm_gpio_request(&pdev->dev,
- pdata->row_gpios[i], "matrix_kbd_row");
+ keypad->num_row_gpios = nrow;
+ keypad->num_col_gpios = ncol;
+
+ active_low = device_property_read_bool(&pdev->dev, "gpio-activelow");
+
+ /* initialize strobe lines as outputs, activated */
+ for (i = 0; i < keypad->num_col_gpios; i++) {
+ keypad->col_gpios[i] = devm_gpiod_get_index(&pdev->dev, "col",
+ i, GPIOD_ASIS);
+ err = PTR_ERR_OR_ZERO(keypad->col_gpios[i]);
if (err) {
dev_err(&pdev->dev,
- "failed to request GPIO%d for ROW%d\n",
- pdata->row_gpios[i], i);
+ "failed to request GPIO for COL%d: %d\n",
+ i, err);
return err;
}
- gpio_direction_input(pdata->row_gpios[i]);
+ gpiod_set_consumer_name(keypad->col_gpios[i], "matrix_kbd_col");
+
+ if (active_low ^ gpiod_is_active_low(keypad->col_gpios[i]))
+ gpiod_toggle_active_low(keypad->col_gpios[i]);
+
+ gpiod_direction_output(keypad->col_gpios[i], 1);
}
- if (pdata->clustered_irq > 0) {
- err = devm_request_any_context_irq(&pdev->dev,
- pdata->clustered_irq,
- matrix_keypad_interrupt,
- pdata->clustered_irq_flags,
- "matrix-keypad", keypad);
- if (err < 0) {
+ for (i = 0; i < keypad->num_row_gpios; i++) {
+ keypad->row_gpios[i] = devm_gpiod_get_index(&pdev->dev, "row",
+ i, GPIOD_IN);
+ err = PTR_ERR_OR_ZERO(keypad->row_gpios[i]);
+ if (err) {
dev_err(&pdev->dev,
- "Unable to acquire clustered interrupt\n");
+ "failed to request GPIO for ROW%d: %d\n",
+ i, err);
return err;
}
- keypad->row_irqs[0] = pdata->clustered_irq;
- keypad->num_row_irqs = 1;
- } else {
- for (i = 0; i < pdata->num_row_gpios; i++) {
- irq = gpio_to_irq(pdata->row_gpios[i]);
- if (irq < 0) {
- err = irq;
- dev_err(&pdev->dev,
- "Unable to convert GPIO line %i to irq: %d\n",
- pdata->row_gpios[i], err);
- return err;
- }
-
- err = devm_request_any_context_irq(&pdev->dev,
- irq,
- matrix_keypad_interrupt,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING,
- "matrix-keypad", keypad);
- if (err < 0) {
- dev_err(&pdev->dev,
- "Unable to acquire interrupt for GPIO line %i\n",
- pdata->row_gpios[i]);
- return err;
- }
-
- keypad->row_irqs[i] = irq;
- }
+ gpiod_set_consumer_name(keypad->row_gpios[i], "matrix_kbd_row");
- keypad->num_row_irqs = pdata->num_row_gpios;
+ if (active_low ^ gpiod_is_active_low(keypad->row_gpios[i]))
+ gpiod_toggle_active_low(keypad->row_gpios[i]);
}
- /* initialized as disabled - enabled by input->open */
- disable_row_irqs(keypad);
-
return 0;
}
-#ifdef CONFIG_OF
-static struct matrix_keypad_platform_data *
-matrix_keypad_parse_dt(struct device *dev)
+static int matrix_keypad_setup_interrupts(struct platform_device *pdev,
+ struct matrix_keypad *keypad)
{
- struct matrix_keypad_platform_data *pdata;
- struct device_node *np = dev->of_node;
- unsigned int *gpios;
- int ret, i, nrow, ncol;
-
- if (!np) {
- dev_err(dev, "device lacks DT data\n");
- return ERR_PTR(-ENODEV);
- }
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "could not allocate memory for platform data\n");
- return ERR_PTR(-ENOMEM);
- }
-
- pdata->num_row_gpios = nrow = gpiod_count(dev, "row");
- pdata->num_col_gpios = ncol = gpiod_count(dev, "col");
- if (nrow < 0 || ncol < 0) {
- dev_err(dev, "number of keypad rows/columns not specified\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata->no_autorepeat = of_property_read_bool(np, "linux,no-autorepeat");
-
- pdata->wakeup = of_property_read_bool(np, "wakeup-source") ||
- of_property_read_bool(np, "linux,wakeup"); /* legacy */
-
- pdata->active_low = of_property_read_bool(np, "gpio-activelow");
-
- pdata->drive_inactive_cols =
- of_property_read_bool(np, "drive-inactive-cols");
-
- of_property_read_u32(np, "debounce-delay-ms", &pdata->debounce_ms);
- of_property_read_u32(np, "col-scan-delay-us",
- &pdata->col_scan_delay_us);
+ int err;
+ int irq;
+ int i;
- gpios = devm_kcalloc(dev,
- pdata->num_row_gpios + pdata->num_col_gpios,
- sizeof(unsigned int),
- GFP_KERNEL);
- if (!gpios) {
- dev_err(dev, "could not allocate memory for gpios\n");
- return ERR_PTR(-ENOMEM);
- }
+ for (i = 0; i < keypad->num_row_gpios; i++) {
+ irq = gpiod_to_irq(keypad->row_gpios[i]);
+ if (irq < 0) {
+ err = irq;
+ dev_err(&pdev->dev,
+ "Unable to convert GPIO line %i to irq: %d\n",
+ i, err);
+ return err;
+ }
- for (i = 0; i < nrow; i++) {
- ret = of_get_named_gpio(np, "row-gpios", i);
- if (ret < 0)
- return ERR_PTR(ret);
- gpios[i] = ret;
- }
+ err = devm_request_any_context_irq(&pdev->dev, irq,
+ matrix_keypad_interrupt,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "matrix-keypad", keypad);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Unable to acquire interrupt for row %i: %d\n",
+ i, err);
+ return err;
+ }
- for (i = 0; i < ncol; i++) {
- ret = of_get_named_gpio(np, "col-gpios", i);
- if (ret < 0)
- return ERR_PTR(ret);
- gpios[nrow + i] = ret;
+ keypad->row_irqs[i] = irq;
}
- pdata->row_gpios = gpios;
- pdata->col_gpios = &gpios[pdata->num_row_gpios];
-
- return pdata;
-}
-#else
-static inline struct matrix_keypad_platform_data *
-matrix_keypad_parse_dt(struct device *dev)
-{
- dev_err(dev, "no platform data defined\n");
+ /* initialized as disabled - enabled by input->open */
+ disable_row_irqs(keypad);
- return ERR_PTR(-EINVAL);
+ return 0;
}
-#endif
static int matrix_keypad_probe(struct platform_device *pdev)
{
- const struct matrix_keypad_platform_data *pdata;
struct matrix_keypad *keypad;
struct input_dev *input_dev;
+ bool wakeup;
int err;
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- pdata = matrix_keypad_parse_dt(&pdev->dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- } else if (!pdata->keymap_data) {
- dev_err(&pdev->dev, "no keymap data defined\n");
- return -EINVAL;
- }
-
keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL);
if (!keypad)
return -ENOMEM;
@@ -457,40 +385,56 @@ static int matrix_keypad_probe(struct platform_device *pdev)
return -ENOMEM;
keypad->input_dev = input_dev;
- keypad->pdata = pdata;
- keypad->row_shift = get_count_order(pdata->num_col_gpios);
keypad->stopped = true;
INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan);
spin_lock_init(&keypad->lock);
+ keypad->drive_inactive_cols =
+ device_property_read_bool(&pdev->dev, "drive-inactive-cols");
+ device_property_read_u32(&pdev->dev, "debounce-delay-ms",
+ &keypad->debounce_ms);
+ device_property_read_u32(&pdev->dev, "col-scan-delay-us",
+ &keypad->col_scan_delay_us);
+
+ err = matrix_keypad_init_gpio(pdev, keypad);
+ if (err)
+ return err;
+
+ keypad->row_shift = get_count_order(keypad->num_col_gpios);
+
+ err = matrix_keypad_setup_interrupts(pdev, keypad);
+ if (err)
+ return err;
+
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
input_dev->open = matrix_keypad_start;
input_dev->close = matrix_keypad_stop;
- err = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
- pdata->num_row_gpios,
- pdata->num_col_gpios,
+ err = matrix_keypad_build_keymap(NULL, NULL,
+ keypad->num_row_gpios,
+ keypad->num_col_gpios,
NULL, input_dev);
if (err) {
dev_err(&pdev->dev, "failed to build keymap\n");
return -ENOMEM;
}
- if (!pdata->no_autorepeat)
+ if (!device_property_read_bool(&pdev->dev, "linux,no-autorepeat"))
__set_bit(EV_REP, input_dev->evbit);
+
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_set_drvdata(input_dev, keypad);
- err = matrix_keypad_init_gpio(pdev, keypad);
- if (err)
- return err;
-
err = input_register_device(keypad->input_dev);
if (err)
return err;
- device_init_wakeup(&pdev->dev, pdata->wakeup);
+ wakeup = device_property_read_bool(&pdev->dev, "wakeup-source") ||
+ /* legacy */
+ device_property_read_bool(&pdev->dev, "linux,wakeup");
+ device_init_wakeup(&pdev->dev, wakeup);
+
platform_set_drvdata(pdev, keypad);
return 0;
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
deleted file mode 100644
index 2410f676c7f9..000000000000
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ /dev/null
@@ -1,268 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Touchkey driver for MELFAS MCS5000/5080 controller
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: HeungJun Kim <riverful.kim@samsung.com>
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- */
-
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/platform_data/mcs.h>
-#include <linux/pm.h>
-
-/* MCS5000 Touchkey */
-#define MCS5000_TOUCHKEY_STATUS 0x04
-#define MCS5000_TOUCHKEY_STATUS_PRESS 7
-#define MCS5000_TOUCHKEY_FW 0x0a
-#define MCS5000_TOUCHKEY_BASE_VAL 0x61
-
-/* MCS5080 Touchkey */
-#define MCS5080_TOUCHKEY_STATUS 0x00
-#define MCS5080_TOUCHKEY_STATUS_PRESS 3
-#define MCS5080_TOUCHKEY_FW 0x01
-#define MCS5080_TOUCHKEY_BASE_VAL 0x1
-
-enum mcs_touchkey_type {
- MCS5000_TOUCHKEY,
- MCS5080_TOUCHKEY,
-};
-
-struct mcs_touchkey_chip {
- unsigned int status_reg;
- unsigned int pressbit;
- unsigned int press_invert;
- unsigned int baseval;
-};
-
-struct mcs_touchkey_data {
- void (*poweron)(bool);
-
- struct i2c_client *client;
- struct input_dev *input_dev;
- struct mcs_touchkey_chip chip;
- unsigned int key_code;
- unsigned int key_val;
- unsigned short keycodes[];
-};
-
-static irqreturn_t mcs_touchkey_interrupt(int irq, void *dev_id)
-{
- struct mcs_touchkey_data *data = dev_id;
- struct mcs_touchkey_chip *chip = &data->chip;
- struct i2c_client *client = data->client;
- struct input_dev *input = data->input_dev;
- unsigned int key_val;
- unsigned int pressed;
- int val;
-
- val = i2c_smbus_read_byte_data(client, chip->status_reg);
- if (val < 0) {
- dev_err(&client->dev, "i2c read error [%d]\n", val);
- goto out;
- }
-
- pressed = (val & (1 << chip->pressbit)) >> chip->pressbit;
- if (chip->press_invert)
- pressed ^= chip->press_invert;
-
- /* key_val is 0 when released, so we should use key_val of press. */
- if (pressed) {
- key_val = val & (0xff >> (8 - chip->pressbit));
- if (!key_val)
- goto out;
- key_val -= chip->baseval;
- data->key_code = data->keycodes[key_val];
- data->key_val = key_val;
- }
-
- input_event(input, EV_MSC, MSC_SCAN, data->key_val);
- input_report_key(input, data->key_code, pressed);
- input_sync(input);
-
- dev_dbg(&client->dev, "key %d %d %s\n", data->key_val, data->key_code,
- pressed ? "pressed" : "released");
-
- out:
- return IRQ_HANDLED;
-}
-
-static void mcs_touchkey_poweroff(void *data)
-{
- struct mcs_touchkey_data *touchkey = data;
-
- touchkey->poweron(false);
-}
-
-static int mcs_touchkey_probe(struct i2c_client *client)
-{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
- const struct mcs_platform_data *pdata;
- struct mcs_touchkey_data *data;
- struct input_dev *input_dev;
- unsigned int fw_reg;
- int fw_ver;
- int error;
- int i;
-
- pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- dev_err(&client->dev, "no platform data defined\n");
- return -EINVAL;
- }
-
- data = devm_kzalloc(&client->dev,
- struct_size(data, keycodes, pdata->key_maxval + 1),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- input_dev = devm_input_allocate_device(&client->dev);
- if (!input_dev) {
- dev_err(&client->dev, "Failed to allocate input device\n");
- return -ENOMEM;
- }
-
- data->client = client;
- data->input_dev = input_dev;
-
- if (id->driver_data == MCS5000_TOUCHKEY) {
- data->chip.status_reg = MCS5000_TOUCHKEY_STATUS;
- data->chip.pressbit = MCS5000_TOUCHKEY_STATUS_PRESS;
- data->chip.baseval = MCS5000_TOUCHKEY_BASE_VAL;
- fw_reg = MCS5000_TOUCHKEY_FW;
- } else {
- data->chip.status_reg = MCS5080_TOUCHKEY_STATUS;
- data->chip.pressbit = MCS5080_TOUCHKEY_STATUS_PRESS;
- data->chip.press_invert = 1;
- data->chip.baseval = MCS5080_TOUCHKEY_BASE_VAL;
- fw_reg = MCS5080_TOUCHKEY_FW;
- }
-
- fw_ver = i2c_smbus_read_byte_data(client, fw_reg);
- if (fw_ver < 0) {
- dev_err(&client->dev, "i2c read error[%d]\n", fw_ver);
- return fw_ver;
- }
- dev_info(&client->dev, "Firmware version: %d\n", fw_ver);
-
- input_dev->name = "MELFAS MCS Touchkey";
- input_dev->id.bustype = BUS_I2C;
- input_dev->evbit[0] = BIT_MASK(EV_KEY);
- if (!pdata->no_autorepeat)
- input_dev->evbit[0] |= BIT_MASK(EV_REP);
- input_dev->keycode = data->keycodes;
- input_dev->keycodesize = sizeof(data->keycodes[0]);
- input_dev->keycodemax = pdata->key_maxval + 1;
-
- for (i = 0; i < pdata->keymap_size; i++) {
- unsigned int val = MCS_KEY_VAL(pdata->keymap[i]);
- unsigned int code = MCS_KEY_CODE(pdata->keymap[i]);
-
- data->keycodes[val] = code;
- __set_bit(code, input_dev->keybit);
- }
-
- input_set_capability(input_dev, EV_MSC, MSC_SCAN);
- input_set_drvdata(input_dev, data);
-
- if (pdata->cfg_pin)
- pdata->cfg_pin();
-
- if (pdata->poweron) {
- data->poweron = pdata->poweron;
- data->poweron(true);
-
- error = devm_add_action_or_reset(&client->dev,
- mcs_touchkey_poweroff, data);
- if (error)
- return error;
- }
-
- error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, mcs_touchkey_interrupt,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->dev.driver->name, data);
- if (error) {
- dev_err(&client->dev, "Failed to register interrupt\n");
- return error;
- }
-
- error = input_register_device(input_dev);
- if (error)
- return error;
-
- i2c_set_clientdata(client, data);
- return 0;
-}
-
-static void mcs_touchkey_shutdown(struct i2c_client *client)
-{
- struct mcs_touchkey_data *data = i2c_get_clientdata(client);
-
- if (data->poweron)
- data->poweron(false);
-}
-
-static int mcs_touchkey_suspend(struct device *dev)
-{
- struct mcs_touchkey_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
-
- /* Disable the work */
- disable_irq(client->irq);
-
- /* Finally turn off the power */
- if (data->poweron)
- data->poweron(false);
-
- return 0;
-}
-
-static int mcs_touchkey_resume(struct device *dev)
-{
- struct mcs_touchkey_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
-
- /* Enable the device first */
- if (data->poweron)
- data->poweron(true);
-
- /* Enable irq again */
- enable_irq(client->irq);
-
- return 0;
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(mcs_touchkey_pm_ops,
- mcs_touchkey_suspend, mcs_touchkey_resume);
-
-static const struct i2c_device_id mcs_touchkey_id[] = {
- { "mcs5000_touchkey", MCS5000_TOUCHKEY },
- { "mcs5080_touchkey", MCS5080_TOUCHKEY },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, mcs_touchkey_id);
-
-static struct i2c_driver mcs_touchkey_driver = {
- .driver = {
- .name = "mcs_touchkey",
- .pm = pm_sleep_ptr(&mcs_touchkey_pm_ops),
- },
- .probe = mcs_touchkey_probe,
- .shutdown = mcs_touchkey_shutdown,
- .id_table = mcs_touchkey_id,
-};
-
-module_i2c_driver(mcs_touchkey_driver);
-
-/* Module information */
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_AUTHOR("HeungJun Kim <riverful.kim@samsung.com>");
-MODULE_DESCRIPTION("Touchkey driver for MELFAS MCS5000/5080 controller");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c
index 19f69d167fbd..e5eb025c5e99 100644
--- a/drivers/input/keyboard/mt6779-keypad.c
+++ b/drivers/input/keyboard/mt6779-keypad.c
@@ -92,11 +92,6 @@ static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void mt6779_keypad_clk_disable(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static void mt6779_keypad_calc_row_col_single(unsigned int key,
unsigned int *row,
unsigned int *col)
@@ -213,21 +208,10 @@ static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_COL,
MTK_KPD_SEL_COLMASK(keypad->n_cols));
- keypad->clk = devm_clk_get(&pdev->dev, "kpd");
+ keypad->clk = devm_clk_get_enabled(&pdev->dev, "kpd");
if (IS_ERR(keypad->clk))
return PTR_ERR(keypad->clk);
- error = clk_prepare_enable(keypad->clk);
- if (error) {
- dev_err(&pdev->dev, "cannot prepare/enable keypad clock\n");
- return error;
- }
-
- error = devm_add_action_or_reset(&pdev->dev, mt6779_keypad_clk_disable,
- keypad->clk);
- if (error)
- return error;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -260,6 +244,7 @@ static const struct of_device_id mt6779_keypad_of_match[] = {
{ .compatible = "mediatek,mt6873-keypad" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mt6779_keypad_of_match);
static struct platform_driver mt6779_keypad_pdrv = {
.probe = mt6779_keypad_pdrv_probe,
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
deleted file mode 100644
index b3ccc97f61e1..000000000000
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ /dev/null
@@ -1,378 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
- * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
- *
- * Keypad controller driver for the SKE (Scroll Key Encoder) module used in
- * the Nomadik 8815 and Ux500 platforms.
- */
-
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/input.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/module.h>
-
-#include <linux/platform_data/keypad-nomadik-ske.h>
-
-/* SKE_CR bits */
-#define SKE_KPMLT (0x1 << 6)
-#define SKE_KPCN (0x7 << 3)
-#define SKE_KPASEN (0x1 << 2)
-#define SKE_KPASON (0x1 << 7)
-
-/* SKE_IMSC bits */
-#define SKE_KPIMA (0x1 << 2)
-
-/* SKE_ICR bits */
-#define SKE_KPICS (0x1 << 3)
-#define SKE_KPICA (0x1 << 2)
-
-/* SKE_RIS bits */
-#define SKE_KPRISA (0x1 << 2)
-
-#define SKE_KEYPAD_ROW_SHIFT 3
-#define SKE_KPD_NUM_ROWS 8
-#define SKE_KPD_NUM_COLS 8
-
-/* keypad auto scan registers */
-#define SKE_ASR0 0x20
-#define SKE_ASR1 0x24
-#define SKE_ASR2 0x28
-#define SKE_ASR3 0x2C
-
-#define SKE_NUM_ASRX_REGISTERS (4)
-#define KEY_PRESSED_DELAY 10
-
-/**
- * struct ske_keypad - data structure used by keypad driver
- * @irq: irq no
- * @reg_base: ske registers base address
- * @input: pointer to input device object
- * @board: keypad platform device
- * @keymap: matrix scan code table for keycodes
- * @clk: clock structure pointer
- * @pclk: clock structure pointer
- * @ske_keypad_lock: spinlock protecting the keypad read/writes
- */
-struct ske_keypad {
- int irq;
- void __iomem *reg_base;
- struct input_dev *input;
- const struct ske_keypad_platform_data *board;
- unsigned short keymap[SKE_KPD_NUM_ROWS * SKE_KPD_NUM_COLS];
- struct clk *clk;
- struct clk *pclk;
- spinlock_t ske_keypad_lock;
-};
-
-static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr,
- u8 mask, u8 data)
-{
- u32 ret;
-
- spin_lock(&keypad->ske_keypad_lock);
-
- ret = readl(keypad->reg_base + addr);
- ret &= ~mask;
- ret |= data;
- writel(ret, keypad->reg_base + addr);
-
- spin_unlock(&keypad->ske_keypad_lock);
-}
-
-/*
- * ske_keypad_chip_init: init keypad controller configuration
- *
- * Enable Multi key press detection, auto scan mode
- */
-static int __init ske_keypad_chip_init(struct ske_keypad *keypad)
-{
- u32 value;
- int timeout = keypad->board->debounce_ms;
-
- /* check SKE_RIS to be 0 */
- while ((readl(keypad->reg_base + SKE_RIS) != 0x00000000) && timeout--)
- cpu_relax();
-
- if (timeout == -1)
- return -EINVAL;
-
- /*
- * set debounce value
- * keypad dbounce is configured in DBCR[15:8]
- * dbounce value in steps of 32/32.768 ms
- */
- spin_lock(&keypad->ske_keypad_lock);
- value = readl(keypad->reg_base + SKE_DBCR);
- value = value & 0xff;
- value |= ((keypad->board->debounce_ms * 32000)/32768) << 8;
- writel(value, keypad->reg_base + SKE_DBCR);
- spin_unlock(&keypad->ske_keypad_lock);
-
- /* enable multi key detection */
- ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPMLT);
-
- /*
- * set up the number of columns
- * KPCN[5:3] defines no. of keypad columns to be auto scanned
- */
- value = (keypad->board->kcol - 1) << 3;
- ske_keypad_set_bits(keypad, SKE_CR, SKE_KPCN, value);
-
- /* clear keypad interrupt for auto(and pending SW) scans */
- ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA | SKE_KPICS);
-
- /* un-mask keypad interrupts */
- ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
-
- /* enable automatic scan */
- ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPASEN);
-
- return 0;
-}
-
-static void ske_keypad_report(struct ske_keypad *keypad, u8 status, int col)
-{
- int row = 0, code, pos;
- struct input_dev *input = keypad->input;
- u32 ske_ris;
- int key_pressed;
- int num_of_rows;
-
- /* find out the row */
- num_of_rows = hweight8(status);
- do {
- pos = __ffs(status);
- row = pos;
- status &= ~(1 << pos);
-
- code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT);
- ske_ris = readl(keypad->reg_base + SKE_RIS);
- key_pressed = ske_ris & SKE_KPRISA;
-
- input_event(input, EV_MSC, MSC_SCAN, code);
- input_report_key(input, keypad->keymap[code], key_pressed);
- input_sync(input);
- num_of_rows--;
- } while (num_of_rows);
-}
-
-static void ske_keypad_read_data(struct ske_keypad *keypad)
-{
- u8 status;
- int col = 0;
- int ske_asr, i;
-
- /*
- * Read the auto scan registers
- *
- * Each SKE_ASRx (x=0 to x=3) contains two row values.
- * lower byte contains row value for column 2*x,
- * upper byte contains row value for column 2*x + 1
- */
- for (i = 0; i < SKE_NUM_ASRX_REGISTERS; i++) {
- ske_asr = readl(keypad->reg_base + SKE_ASR0 + (4 * i));
- if (!ske_asr)
- continue;
-
- /* now that ASRx is zero, find out the coloumn x and row y */
- status = ske_asr & 0xff;
- if (status) {
- col = i * 2;
- ske_keypad_report(keypad, status, col);
- }
- status = (ske_asr & 0xff00) >> 8;
- if (status) {
- col = (i * 2) + 1;
- ske_keypad_report(keypad, status, col);
- }
- }
-}
-
-static irqreturn_t ske_keypad_irq(int irq, void *dev_id)
-{
- struct ske_keypad *keypad = dev_id;
- int timeout = keypad->board->debounce_ms;
-
- /* disable auto scan interrupt; mask the interrupt generated */
- ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
- ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA);
-
- while ((readl(keypad->reg_base + SKE_CR) & SKE_KPASON) && --timeout)
- cpu_relax();
-
- /* SKEx registers are stable and can be read */
- ske_keypad_read_data(keypad);
-
- /* wait until raw interrupt is clear */
- while ((readl(keypad->reg_base + SKE_RIS)) && --timeout)
- msleep(KEY_PRESSED_DELAY);
-
- /* enable auto scan interrupts */
- ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
-
- return IRQ_HANDLED;
-}
-
-static void ske_keypad_board_exit(void *data)
-{
- struct ske_keypad *keypad = data;
-
- keypad->board->exit();
-}
-
-static int __init ske_keypad_probe(struct platform_device *pdev)
-{
- const struct ske_keypad_platform_data *plat =
- dev_get_platdata(&pdev->dev);
- struct device *dev = &pdev->dev;
- struct ske_keypad *keypad;
- struct input_dev *input;
- int irq;
- int error;
-
- if (!plat) {
- dev_err(&pdev->dev, "invalid keypad platform data\n");
- return -EINVAL;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- keypad = devm_kzalloc(dev, sizeof(struct ske_keypad),
- GFP_KERNEL);
- input = devm_input_allocate_device(dev);
- if (!keypad || !input) {
- dev_err(&pdev->dev, "failed to allocate keypad memory\n");
- return -ENOMEM;
- }
-
- keypad->irq = irq;
- keypad->board = plat;
- keypad->input = input;
- spin_lock_init(&keypad->ske_keypad_lock);
-
- keypad->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(keypad->reg_base))
- return PTR_ERR(keypad->reg_base);
-
- keypad->pclk = devm_clk_get_enabled(dev, "apb_pclk");
- if (IS_ERR(keypad->pclk)) {
- dev_err(&pdev->dev, "failed to get pclk\n");
- return PTR_ERR(keypad->pclk);
- }
-
- keypad->clk = devm_clk_get_enabled(dev, NULL);
- if (IS_ERR(keypad->clk)) {
- dev_err(&pdev->dev, "failed to get clk\n");
- return PTR_ERR(keypad->clk);
- }
-
- input->id.bustype = BUS_HOST;
- input->name = "ux500-ske-keypad";
- input->dev.parent = &pdev->dev;
-
- error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
- SKE_KPD_NUM_ROWS, SKE_KPD_NUM_COLS,
- keypad->keymap, input);
- if (error) {
- dev_err(&pdev->dev, "Failed to build keymap\n");
- return error;
- }
-
- input_set_capability(input, EV_MSC, MSC_SCAN);
- if (!plat->no_autorepeat)
- __set_bit(EV_REP, input->evbit);
-
- /* go through board initialization helpers */
- if (keypad->board->init)
- keypad->board->init();
-
- if (keypad->board->exit) {
- error = devm_add_action_or_reset(dev, ske_keypad_board_exit,
- keypad);
- if (error)
- return error;
- }
-
- error = ske_keypad_chip_init(keypad);
- if (error) {
- dev_err(&pdev->dev, "unable to init keypad hardware\n");
- return error;
- }
-
- error = devm_request_threaded_irq(dev, keypad->irq,
- NULL, ske_keypad_irq,
- IRQF_ONESHOT, "ske-keypad", keypad);
- if (error) {
- dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq);
- return error;
- }
-
- error = input_register_device(input);
- if (error) {
- dev_err(&pdev->dev,
- "unable to register input device: %d\n", error);
- return error;
- }
-
- if (plat->wakeup_enable)
- device_init_wakeup(&pdev->dev, true);
-
- platform_set_drvdata(pdev, keypad);
-
- return 0;
-}
-
-static int ske_keypad_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ske_keypad *keypad = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- if (device_may_wakeup(dev))
- enable_irq_wake(irq);
- else
- ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
-
- return 0;
-}
-
-static int ske_keypad_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ske_keypad *keypad = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- if (device_may_wakeup(dev))
- disable_irq_wake(irq);
- else
- ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
-
- return 0;
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(ske_keypad_dev_pm_ops,
- ske_keypad_suspend, ske_keypad_resume);
-
-static struct platform_driver ske_keypad_driver = {
- .driver = {
- .name = "nmk-ske-keypad",
- .pm = pm_sleep_ptr(&ske_keypad_dev_pm_ops),
- },
-};
-
-module_platform_driver_probe(ske_keypad_driver, ske_keypad_probe);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com> / Sundar Iyer <sundar.iyer@stericsson.com>");
-MODULE_DESCRIPTION("Nomadik Scroll-Key-Encoder Keypad Driver");
-MODULE_ALIAS("platform:nomadik-ske-keypad");
diff --git a/drivers/input/keyboard/qt1050.c b/drivers/input/keyboard/qt1050.c
index 5a2592e6293d..bce8157d1871 100644
--- a/drivers/input/keyboard/qt1050.c
+++ b/drivers/input/keyboard/qt1050.c
@@ -346,35 +346,34 @@ static int qt1050_apply_fw_data(struct qt1050_priv *ts)
static int qt1050_parse_fw(struct qt1050_priv *ts)
{
struct device *dev = &ts->client->dev;
- struct fwnode_handle *child;
int nbuttons;
nbuttons = device_get_child_node_count(dev);
if (nbuttons == 0 || nbuttons > QT1050_MAX_KEYS)
return -ENODEV;
- device_for_each_child_node(dev, child) {
+ device_for_each_child_node_scoped(dev, child) {
struct qt1050_key button;
/* Required properties */
if (fwnode_property_read_u32(child, "linux,code",
&button.keycode)) {
dev_err(dev, "Button without keycode\n");
- goto err;
+ return -EINVAL;
}
if (button.keycode >= KEY_MAX) {
dev_err(dev, "Invalid keycode 0x%x\n",
button.keycode);
- goto err;
+ return -EINVAL;
}
if (fwnode_property_read_u32(child, "reg",
&button.num)) {
dev_err(dev, "Button without pad number\n");
- goto err;
+ return -EINVAL;
}
if (button.num < 0 || button.num > QT1050_MAX_KEYS - 1)
- goto err;
+ return -EINVAL;
ts->reg_keys |= BIT(button.num);
@@ -424,10 +423,6 @@ static int qt1050_parse_fw(struct qt1050_priv *ts)
}
return 0;
-
-err:
- fwnode_handle_put(child);
- return -EINVAL;
}
static int qt1050_probe(struct i2c_client *client)
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index ad8660be0127..f7b5f1e25c80 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -100,11 +100,6 @@ static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void imx_snvs_pwrkey_disable_clk(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static void imx_snvs_pwrkey_act(void *pdata)
{
struct pwrkey_drv_data *pd = pdata;
@@ -141,28 +136,12 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n");
}
- clk = devm_clk_get_optional(&pdev->dev, NULL);
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Failed to get snvs clock (%pe)\n", clk);
return PTR_ERR(clk);
}
- error = clk_prepare_enable(clk);
- if (error) {
- dev_err(&pdev->dev, "Failed to enable snvs clock (%pe)\n",
- ERR_PTR(error));
- return error;
- }
-
- error = devm_add_action_or_reset(&pdev->dev,
- imx_snvs_pwrkey_disable_clk, clk);
- if (error) {
- dev_err(&pdev->dev,
- "Failed to register clock cleanup handler (%pe)\n",
- ERR_PTR(error));
- return error;
- }
-
pdata->wakeup = of_property_read_bool(np, "wakeup-source");
pdata->irq = platform_get_irq(pdev, 0);
@@ -204,7 +183,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
error = devm_request_irq(&pdev->dev, pdata->irq,
imx_snvs_pwrkey_interrupt,
0, pdev->name, pdev);
-
if (error) {
dev_err(&pdev->dev, "interrupt not available.\n");
return error;
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index 557d00a667ce..1df4feb8ba01 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -222,7 +222,7 @@ static int spear_kbd_probe(struct platform_device *pdev)
if (IS_ERR(kbd->io_base))
return PTR_ERR(kbd->io_base);
- kbd->clk = devm_clk_get(&pdev->dev, NULL);
+ kbd->clk = devm_clk_get_prepared(&pdev->dev, NULL);
if (IS_ERR(kbd->clk))
return PTR_ERR(kbd->clk);
@@ -255,14 +255,9 @@ static int spear_kbd_probe(struct platform_device *pdev)
return error;
}
- error = clk_prepare(kbd->clk);
- if (error)
- return error;
-
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev, "Unable to register keyboard device\n");
- clk_unprepare(kbd->clk);
return error;
}
@@ -272,14 +267,6 @@ static int spear_kbd_probe(struct platform_device *pdev)
return 0;
}
-static void spear_kbd_remove(struct platform_device *pdev)
-{
- struct spear_kbd *kbd = platform_get_drvdata(pdev);
-
- input_unregister_device(kbd->input);
- clk_unprepare(kbd->clk);
-}
-
static int spear_kbd_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -373,7 +360,6 @@ MODULE_DEVICE_TABLE(of, spear_kbd_id_table);
static struct platform_driver spear_kbd_driver = {
.probe = spear_kbd_probe,
- .remove_new = spear_kbd_remove,
.driver = {
.name = "keyboard",
.pm = pm_sleep_ptr(&spear_kbd_pm_ops),
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index b0d86621c60a..11988cffdfae 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -325,7 +325,6 @@ tc3589x_keypad_of_probe(struct device *dev)
struct tc3589x_keypad_platform_data *plat;
u32 cols, rows;
u32 debounce_ms;
- int proplen;
if (!np)
return ERR_PTR(-ENODEV);
@@ -346,7 +345,7 @@ tc3589x_keypad_of_probe(struct device *dev)
return ERR_PTR(-EINVAL);
}
- if (!of_get_property(np, "linux,keymap", &proplen)) {
+ if (!of_property_present(np, "linux,keymap")) {
dev_err(dev, "property linux,keymap not found\n");
return ERR_PTR(-ENOENT);
}
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index a1765ed8c825..6776dd94ce76 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -241,11 +241,10 @@ static void tegra_kbc_set_fifo_interrupt(struct tegra_kbc *kbc, bool enable)
static void tegra_kbc_keypress_timer(struct timer_list *t)
{
struct tegra_kbc *kbc = from_timer(kbc, t, timer);
- unsigned long flags;
u32 val;
unsigned int i;
- spin_lock_irqsave(&kbc->lock, flags);
+ guard(spinlock_irqsave)(&kbc->lock);
val = (readl(kbc->mmio + KBC_INT_0) >> 4) & 0xf;
if (val) {
@@ -270,17 +269,14 @@ static void tegra_kbc_keypress_timer(struct timer_list *t)
/* All keys are released so enable the keypress interrupt */
tegra_kbc_set_fifo_interrupt(kbc, true);
}
-
- spin_unlock_irqrestore(&kbc->lock, flags);
}
static irqreturn_t tegra_kbc_isr(int irq, void *args)
{
struct tegra_kbc *kbc = args;
- unsigned long flags;
u32 val;
- spin_lock_irqsave(&kbc->lock, flags);
+ guard(spinlock_irqsave)(&kbc->lock);
/*
* Quickly bail out & reenable interrupts if the fifo threshold
@@ -301,8 +297,6 @@ static irqreturn_t tegra_kbc_isr(int irq, void *args)
kbc->keypress_caused_wake = true;
}
- spin_unlock_irqrestore(&kbc->lock, flags);
-
return IRQ_HANDLED;
}
@@ -413,14 +407,13 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
static void tegra_kbc_stop(struct tegra_kbc *kbc)
{
- unsigned long flags;
u32 val;
- spin_lock_irqsave(&kbc->lock, flags);
- val = readl(kbc->mmio + KBC_CONTROL_0);
- val &= ~1;
- writel(val, kbc->mmio + KBC_CONTROL_0);
- spin_unlock_irqrestore(&kbc->lock, flags);
+ scoped_guard(spinlock_irqsave, &kbc->lock) {
+ val = readl(kbc->mmio + KBC_CONTROL_0);
+ val &= ~1;
+ writel(val, kbc->mmio + KBC_CONTROL_0);
+ }
disable_irq(kbc->irq);
del_timer_sync(&kbc->timer);
@@ -491,12 +484,10 @@ static int tegra_kbc_parse_dt(struct tegra_kbc *kbc)
struct device_node *np = kbc->dev->of_node;
u32 prop;
int i;
- u32 num_rows = 0;
- u32 num_cols = 0;
+ int num_rows;
+ int num_cols;
u32 cols_cfg[KBC_MAX_GPIO];
u32 rows_cfg[KBC_MAX_GPIO];
- int proplen;
- int ret;
if (!of_property_read_u32(np, "nvidia,debounce-delay-ms", &prop))
kbc->debounce_cnt = prop;
@@ -510,56 +501,23 @@ static int tegra_kbc_parse_dt(struct tegra_kbc *kbc)
of_property_read_bool(np, "nvidia,wakeup-source")) /* legacy */
kbc->wakeup = true;
- if (!of_get_property(np, "nvidia,kbc-row-pins", &proplen)) {
- dev_err(kbc->dev, "property nvidia,kbc-row-pins not found\n");
- return -ENOENT;
- }
- num_rows = proplen / sizeof(u32);
-
- if (!of_get_property(np, "nvidia,kbc-col-pins", &proplen)) {
- dev_err(kbc->dev, "property nvidia,kbc-col-pins not found\n");
- return -ENOENT;
- }
- num_cols = proplen / sizeof(u32);
-
- if (num_rows > kbc->hw_support->max_rows) {
- dev_err(kbc->dev,
- "Number of rows is more than supported by hardware\n");
- return -EINVAL;
- }
-
- if (num_cols > kbc->hw_support->max_columns) {
- dev_err(kbc->dev,
- "Number of cols is more than supported by hardware\n");
- return -EINVAL;
- }
-
- if (!of_get_property(np, "linux,keymap", &proplen)) {
+ if (!of_property_present(np, "linux,keymap")) {
dev_err(kbc->dev, "property linux,keymap not found\n");
return -ENOENT;
}
- if (!num_rows || !num_cols || ((num_rows + num_cols) > KBC_MAX_GPIO)) {
- dev_err(kbc->dev,
- "keypad rows/columns not properly specified\n");
- return -EINVAL;
- }
-
/* Set all pins as non-configured */
for (i = 0; i < kbc->num_rows_and_columns; i++)
kbc->pin_cfg[i].type = PIN_CFG_IGNORE;
- ret = of_property_read_u32_array(np, "nvidia,kbc-row-pins",
- rows_cfg, num_rows);
- if (ret < 0) {
+ num_rows = of_property_read_variable_u32_array(np, "nvidia,kbc-row-pins",
+ rows_cfg, 1, KBC_MAX_GPIO);
+ if (num_rows < 0) {
dev_err(kbc->dev, "Rows configurations are not proper\n");
- return -EINVAL;
- }
-
- ret = of_property_read_u32_array(np, "nvidia,kbc-col-pins",
- cols_cfg, num_cols);
- if (ret < 0) {
- dev_err(kbc->dev, "Cols configurations are not proper\n");
+ return num_rows;
+ } else if (num_rows > kbc->hw_support->max_rows) {
+ dev_err(kbc->dev,
+ "Number of rows is more than supported by hardware\n");
return -EINVAL;
}
@@ -568,11 +526,28 @@ static int tegra_kbc_parse_dt(struct tegra_kbc *kbc)
kbc->pin_cfg[rows_cfg[i]].num = i;
}
+ num_cols = of_property_read_variable_u32_array(np, "nvidia,kbc-col-pins",
+ cols_cfg, 1, KBC_MAX_GPIO);
+ if (num_cols < 0) {
+ dev_err(kbc->dev, "Cols configurations are not proper\n");
+ return num_cols;
+ } else if (num_cols > kbc->hw_support->max_columns) {
+ dev_err(kbc->dev,
+ "Number of cols is more than supported by hardware\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < num_cols; i++) {
kbc->pin_cfg[cols_cfg[i]].type = PIN_CFG_COL;
kbc->pin_cfg[cols_cfg[i]].num = i;
}
+ if (!num_rows || !num_cols || ((num_rows + num_cols) > KBC_MAX_GPIO)) {
+ dev_err(kbc->dev,
+ "keypad rows/columns not properly specified\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -724,7 +699,8 @@ static int tegra_kbc_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct tegra_kbc *kbc = platform_get_drvdata(pdev);
- mutex_lock(&kbc->idev->mutex);
+ guard(mutex)(&kbc->idev->mutex);
+
if (device_may_wakeup(&pdev->dev)) {
disable_irq(kbc->irq);
del_timer_sync(&kbc->timer);
@@ -747,11 +723,9 @@ static int tegra_kbc_suspend(struct device *dev)
tegra_kbc_set_keypress_interrupt(kbc, true);
enable_irq(kbc->irq);
enable_irq_wake(kbc->irq);
- } else {
- if (input_device_enabled(kbc->idev))
- tegra_kbc_stop(kbc);
+ } else if (input_device_enabled(kbc->idev)) {
+ tegra_kbc_stop(kbc);
}
- mutex_unlock(&kbc->idev->mutex);
return 0;
}
@@ -760,9 +734,10 @@ static int tegra_kbc_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct tegra_kbc *kbc = platform_get_drvdata(pdev);
- int err = 0;
+ int err;
+
+ guard(mutex)(&kbc->idev->mutex);
- mutex_lock(&kbc->idev->mutex);
if (device_may_wakeup(&pdev->dev)) {
disable_irq_wake(kbc->irq);
tegra_kbc_setup_wakekeys(kbc, false);
@@ -787,13 +762,13 @@ static int tegra_kbc_resume(struct device *dev)
input_report_key(kbc->idev, kbc->wakeup_key, 0);
input_sync(kbc->idev);
}
- } else {
- if (input_device_enabled(kbc->idev))
- err = tegra_kbc_start(kbc);
+ } else if (input_device_enabled(kbc->idev)) {
+ err = tegra_kbc_start(kbc);
+ if (err)
+ return err;
}
- mutex_unlock(&kbc->idev->mutex);
- return err;
+ return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(tegra_kbc_pm_ops,
diff --git a/drivers/input/matrix-keymap.c b/drivers/input/matrix-keymap.c
index 5d93043bad8e..3bea3575a0a9 100644
--- a/drivers/input/matrix-keymap.c
+++ b/drivers/input/matrix-keymap.c
@@ -73,10 +73,9 @@ static int matrix_keypad_parse_keymap(const char *propname,
struct device *dev = input_dev->dev.parent;
unsigned int row_shift = get_count_order(cols);
unsigned int max_keys = rows << row_shift;
- u32 *keys;
int i;
int size;
- int retval;
+ int error;
if (!propname)
propname = "linux,keymap";
@@ -94,30 +93,24 @@ static int matrix_keypad_parse_keymap(const char *propname,
return -EINVAL;
}
- keys = kmalloc_array(size, sizeof(u32), GFP_KERNEL);
+ u32 *keys __free(kfree) = kmalloc_array(size, sizeof(*keys), GFP_KERNEL);
if (!keys)
return -ENOMEM;
- retval = device_property_read_u32_array(dev, propname, keys, size);
- if (retval) {
+ error = device_property_read_u32_array(dev, propname, keys, size);
+ if (error) {
dev_err(dev, "failed to read %s property: %d\n",
- propname, retval);
- goto out;
+ propname, error);
+ return error;
}
for (i = 0; i < size; i++) {
if (!matrix_keypad_map_key(input_dev, rows, cols,
- row_shift, keys[i])) {
- retval = -EINVAL;
- goto out;
- }
+ row_shift, keys[i]))
+ return -EINVAL;
}
- retval = 0;
-
-out:
- kfree(keys);
- return retval;
+ return 0;
}
/**
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index c086dadb45e3..058f3470b7ae 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1067,7 +1067,7 @@ static ssize_t ims_pcu_attribute_store(struct device *dev,
if (data_len > attr->field_length)
return -EINVAL;
- scoped_cond_guard(mutex, return -EINTR, &pcu->cmd_mutex) {
+ scoped_cond_guard(mutex_intr, return -EINTR, &pcu->cmd_mutex) {
memset(field, 0, attr->field_length);
memcpy(field, buf, data_len);
diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c
index cd14ff9f57cf..843f8a3f3410 100644
--- a/drivers/input/misc/iqs269a.c
+++ b/drivers/input/misc/iqs269a.c
@@ -811,7 +811,6 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269)
{
struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg;
struct i2c_client *client = iqs269->client;
- struct fwnode_handle *ch_node;
u16 general, misc_a, misc_b;
unsigned int val;
int error;
@@ -1049,12 +1048,10 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269)
sys_reg->event_mask = ~((u8)IQS269_EVENT_MASK_SYS);
- device_for_each_child_node(&client->dev, ch_node) {
+ device_for_each_child_node_scoped(&client->dev, ch_node) {
error = iqs269_parse_chan(iqs269, ch_node);
- if (error) {
- fwnode_handle_put(ch_node);
+ if (error)
return error;
- }
}
/*
diff --git a/drivers/input/misc/nxp-bbnsm-pwrkey.c b/drivers/input/misc/nxp-bbnsm-pwrkey.c
index 1d99206dd3a8..eb4173f9c820 100644
--- a/drivers/input/misc/nxp-bbnsm-pwrkey.c
+++ b/drivers/input/misc/nxp-bbnsm-pwrkey.c
@@ -38,6 +38,7 @@ struct bbnsm_pwrkey {
int irq;
int keycode;
int keystate; /* 1:pressed */
+ bool suspended;
struct timer_list check_timer;
struct input_dev *input;
};
@@ -70,6 +71,7 @@ static irqreturn_t bbnsm_pwrkey_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct bbnsm_pwrkey *bbnsm = platform_get_drvdata(pdev);
+ struct input_dev *input = bbnsm->input;
u32 event;
regmap_read(bbnsm->regmap, BBNSM_EVENTS, &event);
@@ -78,6 +80,18 @@ static irqreturn_t bbnsm_pwrkey_interrupt(int irq, void *dev_id)
pm_wakeup_event(bbnsm->input->dev.parent, 0);
+ /*
+ * Directly report key event after resume to make sure key press
+ * event is never missed.
+ */
+ if (bbnsm->suspended) {
+ bbnsm->keystate = 1;
+ input_event(input, EV_KEY, bbnsm->keycode, 1);
+ input_sync(input);
+ /* Fire at most once per suspend/resume cycle */
+ bbnsm->suspended = false;
+ }
+
mod_timer(&bbnsm->check_timer,
jiffies + msecs_to_jiffies(DEBOUNCE_TIME));
@@ -173,6 +187,29 @@ static int bbnsm_pwrkey_probe(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused bbnsm_pwrkey_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bbnsm_pwrkey *bbnsm = platform_get_drvdata(pdev);
+
+ bbnsm->suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused bbnsm_pwrkey_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bbnsm_pwrkey *bbnsm = platform_get_drvdata(pdev);
+
+ bbnsm->suspended = false;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(bbnsm_pwrkey_pm_ops, bbnsm_pwrkey_suspend,
+ bbnsm_pwrkey_resume);
+
static const struct of_device_id bbnsm_pwrkey_ids[] = {
{ .compatible = "nxp,imx93-bbnsm-pwrkey" },
{ /* sentinel */ }
@@ -182,6 +219,7 @@ MODULE_DEVICE_TABLE(of, bbnsm_pwrkey_ids);
static struct platform_driver bbnsm_pwrkey_driver = {
.driver = {
.name = "bbnsm_pwrkey",
+ .pm = &bbnsm_pwrkey_pm_ops,
.of_match_table = bbnsm_pwrkey_ids,
},
.probe = bbnsm_pwrkey_probe,
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 5c4956678cd0..5a64557920fa 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -990,8 +990,8 @@ static int __init copy_keymap(void)
for (key = keymap; key->type != KE_END; key++)
length++;
- new_keymap = kmemdup(keymap, length * sizeof(struct key_entry),
- GFP_KERNEL);
+ new_keymap = kmemdup_array(keymap, length, sizeof(struct key_entry),
+ GFP_KERNEL);
if (!new_keymap)
return -ENOMEM;
@@ -1075,7 +1075,7 @@ static void wistron_led_init(struct device *parent)
}
if (leds_present & FE_MAIL_LED) {
- /* bios_get_default_setting(MAIL) always retuns 0, so just turn the led off */
+ /* bios_get_default_setting(MAIL) always returns 0, so just turn the led off */
wistron_mail_led.brightness = LED_OFF;
if (led_classdev_register(parent, &wistron_mail_led))
leds_present &= ~FE_MAIL_LED;
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 833b643f0616..8a27a20d04b0 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -69,6 +69,18 @@ config MOUSE_PS2_LOGIPS2PP
If unsure, say Y.
+config MOUSE_PS2_PIXART
+ bool "PixArt PS/2 touchpad protocol extension" if EXPERT
+ default y
+ depends on MOUSE_PS2
+ help
+ This driver supports the PixArt PS/2 touchpad found in some
+ laptops.
+ Say Y here if you have a PixArt PS/2 TouchPad connected to
+ your system.
+
+ If unsure, say Y.
+
config MOUSE_PS2_SYNAPTICS
bool "Synaptics PS/2 mouse protocol extension" if EXPERT
default y
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index a1336d5bee6f..563029551529 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -32,6 +32,7 @@ psmouse-$(CONFIG_MOUSE_PS2_ELANTECH) += elantech.o
psmouse-$(CONFIG_MOUSE_PS2_OLPC) += hgpk.o
psmouse-$(CONFIG_MOUSE_PS2_LOGIPS2PP) += logips2pp.o
psmouse-$(CONFIG_MOUSE_PS2_LIFEBOOK) += lifebook.o
+psmouse-$(CONFIG_MOUSE_PS2_PIXART) += pixart_ps2.o
psmouse-$(CONFIG_MOUSE_PS2_SENTELIC) += sentelic.o
psmouse-$(CONFIG_MOUSE_PS2_TRACKPOINT) += trackpoint.o
psmouse-$(CONFIG_MOUSE_PS2_TOUCHKIT) += touchkit_ps2.o
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index d5ef5a112d6f..4e37fc3f1a9e 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1396,24 +1396,16 @@ static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse)
static DEFINE_MUTEX(alps_mutex);
-static void alps_register_bare_ps2_mouse(struct work_struct *work)
+static int alps_do_register_bare_ps2_mouse(struct alps_data *priv)
{
- struct alps_data *priv =
- container_of(work, struct alps_data, dev3_register_work.work);
struct psmouse *psmouse = priv->psmouse;
struct input_dev *dev3;
- int error = 0;
-
- mutex_lock(&alps_mutex);
-
- if (priv->dev3)
- goto out;
+ int error;
dev3 = input_allocate_device();
if (!dev3) {
psmouse_err(psmouse, "failed to allocate secondary device\n");
- error = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
snprintf(priv->phys3, sizeof(priv->phys3), "%s/%s",
@@ -1446,21 +1438,35 @@ static void alps_register_bare_ps2_mouse(struct work_struct *work)
psmouse_err(psmouse,
"failed to register secondary device: %d\n",
error);
- input_free_device(dev3);
- goto out;
+ goto err_free_input;
}
priv->dev3 = dev3;
+ return 0;
-out:
- /*
- * Save the error code so that we can detect that we
- * already tried to create the device.
- */
- if (error)
- priv->dev3 = ERR_PTR(error);
+err_free_input:
+ input_free_device(dev3);
+ return error;
+}
- mutex_unlock(&alps_mutex);
+static void alps_register_bare_ps2_mouse(struct work_struct *work)
+{
+ struct alps_data *priv = container_of(work, struct alps_data,
+ dev3_register_work.work);
+ int error;
+
+ guard(mutex)(&alps_mutex);
+
+ if (!priv->dev3) {
+ error = alps_do_register_bare_ps2_mouse(priv);
+ if (error) {
+ /*
+ * Save the error code so that we can detect that we
+ * already tried to create the device.
+ */
+ priv->dev3 = ERR_PTR(error);
+ }
+ }
}
static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 10a03a566905..dfdfb59cc8b5 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -834,13 +834,11 @@ static int bcm5974_open(struct input_dev *input)
if (error)
return error;
- mutex_lock(&dev->pm_mutex);
-
- error = bcm5974_start_traffic(dev);
- if (!error)
- dev->opened = 1;
-
- mutex_unlock(&dev->pm_mutex);
+ scoped_guard(mutex, &dev->pm_mutex) {
+ error = bcm5974_start_traffic(dev);
+ if (!error)
+ dev->opened = 1;
+ }
if (error)
usb_autopm_put_interface(dev->intf);
@@ -852,12 +850,10 @@ static void bcm5974_close(struct input_dev *input)
{
struct bcm5974 *dev = input_get_drvdata(input);
- mutex_lock(&dev->pm_mutex);
-
- bcm5974_pause_traffic(dev);
- dev->opened = 0;
-
- mutex_unlock(&dev->pm_mutex);
+ scoped_guard(mutex, &dev->pm_mutex) {
+ bcm5974_pause_traffic(dev);
+ dev->opened = 0;
+ }
usb_autopm_put_interface(dev->intf);
}
@@ -866,29 +862,24 @@ static int bcm5974_suspend(struct usb_interface *iface, pm_message_t message)
{
struct bcm5974 *dev = usb_get_intfdata(iface);
- mutex_lock(&dev->pm_mutex);
+ guard(mutex)(&dev->pm_mutex);
if (dev->opened)
bcm5974_pause_traffic(dev);
- mutex_unlock(&dev->pm_mutex);
-
return 0;
}
static int bcm5974_resume(struct usb_interface *iface)
{
struct bcm5974 *dev = usb_get_intfdata(iface);
- int error = 0;
- mutex_lock(&dev->pm_mutex);
+ guard(mutex)(&dev->pm_mutex);
if (dev->opened)
- error = bcm5974_start_traffic(dev);
+ return bcm5974_start_traffic(dev);
- mutex_unlock(&dev->pm_mutex);
-
- return error;
+ return 0;
}
static int bcm5974_probe(struct usb_interface *iface,
diff --git a/drivers/input/mouse/pixart_ps2.c b/drivers/input/mouse/pixart_ps2.c
new file mode 100644
index 000000000000..1993fc760d7b
--- /dev/null
+++ b/drivers/input/mouse/pixart_ps2.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Pixart Touchpad Controller 1336U PS2 driver
+ *
+ * Author: Jon Xie <jon_xie@pixart.com>
+ * Jay Lee <jay_lee@pixart.com>
+ * Further cleanup and restructuring by:
+ * Binbin Zhou <zhoubinbin@loongson.cn>
+ *
+ * Copyright (C) 2021-2024 Pixart Imaging.
+ * Copyright (C) 2024 Loongson Technology Corporation Limited.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/libps2.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+#include "pixart_ps2.h"
+
+static int pixart_read_tp_mode(struct ps2dev *ps2dev, u8 *mode)
+{
+ int error;
+ u8 param[1] = { 0 };
+
+ error = ps2_command(ps2dev, param, PIXART_CMD_REPORT_FORMAT);
+ if (error)
+ return error;
+
+ *mode = param[0] == 1 ? PIXART_MODE_ABS : PIXART_MODE_REL;
+
+ return 0;
+}
+
+static int pixart_read_tp_type(struct ps2dev *ps2dev, u8 *type)
+{
+ int error;
+ u8 param[3] = { 0 };
+
+ param[0] = 0x0a;
+ error = ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
+ if (error)
+ return error;
+
+ param[0] = 0x0;
+ error = ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES);
+ if (error)
+ return error;
+
+ error = ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES);
+ if (error)
+ return error;
+
+ error = ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES);
+ if (error)
+ return error;
+
+ param[0] = 0x03;
+ error = ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES);
+ if (error)
+ return error;
+
+ error = ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO);
+ if (error)
+ return error;
+
+ *type = param[0] == 0x0e ? PIXART_TYPE_TOUCHPAD : PIXART_TYPE_CLICKPAD;
+
+ return 0;
+}
+
+static void pixart_reset(struct psmouse *psmouse)
+{
+ ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
+
+ /* according to PixArt, 100ms is required for the upcoming reset */
+ msleep(100);
+ psmouse_reset(psmouse);
+}
+
+static void pixart_process_packet(struct psmouse *psmouse)
+{
+ struct pixart_data *priv = psmouse->private;
+ struct input_dev *dev = psmouse->dev;
+ const u8 *pkt = psmouse->packet;
+ unsigned int contact_cnt = FIELD_GET(CONTACT_CNT_MASK, pkt[0]);
+ unsigned int i, id, abs_x, abs_y;
+ bool tip;
+
+ for (i = 0; i < contact_cnt; i++) {
+ const u8 *p = &pkt[i * 3];
+
+ id = FIELD_GET(SLOT_ID_MASK, p[3]);
+ abs_y = FIELD_GET(ABS_Y_MASK, p[3]) << 8 | p[1];
+ abs_x = FIELD_GET(ABS_X_MASK, p[3]) << 8 | p[2];
+
+ if (i == PIXART_MAX_FINGERS - 1)
+ tip = pkt[14] & BIT(1);
+ else
+ tip = pkt[3 * contact_cnt + 1] & BIT(2 * i + 1);
+
+ input_mt_slot(dev, id);
+ if (input_mt_report_slot_state(dev, MT_TOOL_FINGER, tip)) {
+ input_report_abs(dev, ABS_MT_POSITION_Y, abs_y);
+ input_report_abs(dev, ABS_MT_POSITION_X, abs_x);
+ }
+ }
+
+ input_mt_sync_frame(dev);
+
+ if (priv->type == PIXART_TYPE_CLICKPAD) {
+ input_report_key(dev, BTN_LEFT, pkt[0] & 0x03);
+ } else {
+ input_report_key(dev, BTN_LEFT, pkt[0] & BIT(0));
+ input_report_key(dev, BTN_RIGHT, pkt[0] & BIT(1));
+ }
+
+ input_sync(dev);
+}
+
+static psmouse_ret_t pixart_protocol_handler(struct psmouse *psmouse)
+{
+ u8 *pkt = psmouse->packet;
+ u8 contact_cnt;
+
+ if ((pkt[0] & 0x8c) != 0x80)
+ return PSMOUSE_BAD_DATA;
+
+ contact_cnt = FIELD_GET(CONTACT_CNT_MASK, pkt[0]);
+ if (contact_cnt > PIXART_MAX_FINGERS)
+ return PSMOUSE_BAD_DATA;
+
+ if (contact_cnt == PIXART_MAX_FINGERS &&
+ psmouse->pktcnt < psmouse->pktsize) {
+ return PSMOUSE_GOOD_DATA;
+ }
+
+ if (contact_cnt == 0 && psmouse->pktcnt < 5)
+ return PSMOUSE_GOOD_DATA;
+
+ if (psmouse->pktcnt < 3 * contact_cnt + 2)
+ return PSMOUSE_GOOD_DATA;
+
+ pixart_process_packet(psmouse);
+
+ return PSMOUSE_FULL_PACKET;
+}
+
+static void pixart_disconnect(struct psmouse *psmouse)
+{
+ pixart_reset(psmouse);
+ kfree(psmouse->private);
+ psmouse->private = NULL;
+}
+
+static int pixart_reconnect(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ u8 mode;
+ int error;
+
+ pixart_reset(psmouse);
+
+ error = pixart_read_tp_mode(ps2dev, &mode);
+ if (error)
+ return error;
+
+ if (mode != PIXART_MODE_ABS)
+ return -EIO;
+
+ error = ps2_command(ps2dev, NULL, PIXART_CMD_SWITCH_PROTO);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int pixart_set_input_params(struct input_dev *dev,
+ struct pixart_data *priv)
+{
+ /* No relative support */
+ __clear_bit(EV_REL, dev->evbit);
+ __clear_bit(REL_X, dev->relbit);
+ __clear_bit(REL_Y, dev->relbit);
+ __clear_bit(BTN_MIDDLE, dev->keybit);
+
+ /* Buttons */
+ __set_bit(EV_KEY, dev->evbit);
+ __set_bit(BTN_LEFT, dev->keybit);
+ if (priv->type == PIXART_TYPE_CLICKPAD)
+ __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
+ else
+ __set_bit(BTN_RIGHT, dev->keybit);
+
+ /* Absolute position */
+ input_set_abs_params(dev, ABS_X, 0, PIXART_PAD_WIDTH, 0, 0);
+ input_set_abs_params(dev, ABS_Y, 0, PIXART_PAD_HEIGHT, 0, 0);
+
+ input_set_abs_params(dev, ABS_MT_POSITION_X,
+ 0, PIXART_PAD_WIDTH, 0, 0);
+ input_set_abs_params(dev, ABS_MT_POSITION_Y,
+ 0, PIXART_PAD_HEIGHT, 0, 0);
+
+ return input_mt_init_slots(dev, PIXART_MAX_FINGERS, INPUT_MT_POINTER);
+}
+
+static int pixart_query_hardware(struct ps2dev *ps2dev, u8 *mode, u8 *type)
+{
+ int error;
+
+ error = pixart_read_tp_type(ps2dev, type);
+ if (error)
+ return error;
+
+ error = pixart_read_tp_mode(ps2dev, mode);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+int pixart_detect(struct psmouse *psmouse, bool set_properties)
+{
+ u8 type;
+ int error;
+
+ pixart_reset(psmouse);
+
+ error = pixart_read_tp_type(&psmouse->ps2dev, &type);
+ if (error)
+ return error;
+
+ if (set_properties) {
+ psmouse->vendor = "PixArt";
+ psmouse->name = (type == PIXART_TYPE_TOUCHPAD) ?
+ "touchpad" : "clickpad";
+ }
+
+ return 0;
+}
+
+int pixart_init(struct psmouse *psmouse)
+{
+ int error;
+ struct pixart_data *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ psmouse->private = priv;
+ pixart_reset(psmouse);
+
+ error = pixart_query_hardware(&psmouse->ps2dev,
+ &priv->mode, &priv->type);
+ if (error) {
+ psmouse_err(psmouse, "init: Unable to query PixArt touchpad hardware.\n");
+ goto err_exit;
+ }
+
+ /* Relative mode follows standard PS/2 mouse protocol */
+ if (priv->mode != PIXART_MODE_ABS) {
+ error = -EIO;
+ goto err_exit;
+ }
+
+ /* Set absolute mode */
+ error = ps2_command(&psmouse->ps2dev, NULL, PIXART_CMD_SWITCH_PROTO);
+ if (error) {
+ psmouse_err(psmouse, "init: Unable to initialize PixArt absolute mode.\n");
+ goto err_exit;
+ }
+
+ error = pixart_set_input_params(psmouse->dev, priv);
+ if (error) {
+ psmouse_err(psmouse, "init: Unable to set input params.\n");
+ goto err_exit;
+ }
+
+ psmouse->pktsize = 15;
+ psmouse->protocol_handler = pixart_protocol_handler;
+ psmouse->disconnect = pixart_disconnect;
+ psmouse->reconnect = pixart_reconnect;
+ psmouse->cleanup = pixart_reset;
+ /* resync is not supported yet */
+ psmouse->resync_time = 0;
+
+ return 0;
+
+err_exit:
+ pixart_reset(psmouse);
+ kfree(priv);
+ psmouse->private = NULL;
+ return error;
+}
diff --git a/drivers/input/mouse/pixart_ps2.h b/drivers/input/mouse/pixart_ps2.h
new file mode 100644
index 000000000000..47a1d040f2d1
--- /dev/null
+++ b/drivers/input/mouse/pixart_ps2.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PIXART_PS2_H
+#define _PIXART_PS2_H
+
+#include "psmouse.h"
+
+#define PIXART_PAD_WIDTH 1023
+#define PIXART_PAD_HEIGHT 579
+#define PIXART_MAX_FINGERS 4
+
+#define PIXART_CMD_REPORT_FORMAT 0x01d8
+#define PIXART_CMD_SWITCH_PROTO 0x00de
+
+#define PIXART_MODE_REL 0
+#define PIXART_MODE_ABS 1
+
+#define PIXART_TYPE_CLICKPAD 0
+#define PIXART_TYPE_TOUCHPAD 1
+
+#define CONTACT_CNT_MASK GENMASK(6, 4)
+
+#define SLOT_ID_MASK GENMASK(2, 0)
+#define ABS_Y_MASK GENMASK(5, 4)
+#define ABS_X_MASK GENMASK(7, 6)
+
+struct pixart_data {
+ u8 mode;
+ u8 type;
+ int x_max;
+ int y_max;
+};
+
+int pixart_detect(struct psmouse *psmouse, bool set_properties);
+int pixart_init(struct psmouse *psmouse);
+
+#endif /* _PIXART_PS2_H */
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index a2c9f7144864..5a4defe9cf32 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -36,6 +36,7 @@
#include "focaltech.h"
#include "vmmouse.h"
#include "byd.h"
+#include "pixart_ps2.h"
#define DRIVER_DESC "PS/2 mouse driver"
@@ -906,6 +907,15 @@ static const struct psmouse_protocol psmouse_protocols[] = {
.init = byd_init,
},
#endif
+#ifdef CONFIG_MOUSE_PS2_PIXART
+ {
+ .type = PSMOUSE_PIXART,
+ .name = "PixArtPS/2",
+ .alias = "pixart",
+ .detect = pixart_detect,
+ .init = pixart_init,
+ },
+#endif
{
.type = PSMOUSE_AUTO,
.name = "auto",
@@ -1172,6 +1182,13 @@ static int psmouse_extensions(struct psmouse *psmouse,
return ret;
}
+ /* Try PixArt touchpad */
+ if (max_proto > PSMOUSE_IMEX &&
+ psmouse_try_protocol(psmouse, PSMOUSE_PIXART, &max_proto,
+ set_properties, true)) {
+ return PSMOUSE_PIXART;
+ }
+
if (max_proto > PSMOUSE_IMEX) {
if (psmouse_try_protocol(psmouse, PSMOUSE_GENPS,
&max_proto, set_properties, true))
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index 4d8acfe0d82a..23f7fa7243cb 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -69,6 +69,7 @@ enum psmouse_type {
PSMOUSE_BYD,
PSMOUSE_SYNAPTICS_SMBUS,
PSMOUSE_ELANTECH_SMBUS,
+ PSMOUSE_PIXART,
PSMOUSE_AUTO /* This one should always be last */
};
@@ -94,7 +95,7 @@ struct psmouse {
const char *vendor;
const char *name;
const struct psmouse_protocol *protocol;
- unsigned char packet[8];
+ unsigned char packet[16];
unsigned char badbyte;
unsigned char pktcnt;
unsigned char pktsize;
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index 7e97944f7616..8246fe77114b 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -24,6 +24,7 @@ enum rmi_f12_object_type {
};
#define F12_DATA1_BYTES_PER_OBJ 8
+#define RMI_F12_QUERY_RESOLUTION 29
struct f12_data {
struct rmi_2d_sensor sensor;
@@ -73,6 +74,8 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
int pitch_y = 0;
int rx_receivers = 0;
int tx_receivers = 0;
+ u16 query_dpm_addr = 0;
+ int dpm_resolution = 0;
item = rmi_get_register_desc_item(&f12->control_reg_desc, 8);
if (!item) {
@@ -122,18 +125,38 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
offset += 4;
}
- if (rmi_register_desc_has_subpacket(item, 3)) {
- rx_receivers = buf[offset];
- tx_receivers = buf[offset + 1];
- offset += 2;
- }
+ /*
+ * Use the Query DPM feature when the resolution query register
+ * exists.
+ */
+ if (rmi_get_register_desc_item(&f12->query_reg_desc,
+ RMI_F12_QUERY_RESOLUTION)) {
+ offset = rmi_register_desc_calc_reg_offset(&f12->query_reg_desc,
+ RMI_F12_QUERY_RESOLUTION);
+ query_dpm_addr = fn->fd.query_base_addr + offset;
+ ret = rmi_read(fn->rmi_dev, query_dpm_addr, buf);
+ if (ret < 0) {
+ dev_err(&fn->dev, "Failed to read DPM value: %d\n", ret);
+ return -ENODEV;
+ }
+ dpm_resolution = buf[0];
- /* Skip over sensor flags */
- if (rmi_register_desc_has_subpacket(item, 4))
- offset += 1;
+ sensor->x_mm = sensor->max_x / dpm_resolution;
+ sensor->y_mm = sensor->max_y / dpm_resolution;
+ } else {
+ if (rmi_register_desc_has_subpacket(item, 3)) {
+ rx_receivers = buf[offset];
+ tx_receivers = buf[offset + 1];
+ offset += 2;
+ }
- sensor->x_mm = (pitch_x * rx_receivers) >> 12;
- sensor->y_mm = (pitch_y * tx_receivers) >> 12;
+ /* Skip over sensor flags */
+ if (rmi_register_desc_has_subpacket(item, 4))
+ offset += 1;
+
+ sensor->x_mm = (pitch_x * rx_receivers) >> 12;
+ sensor->y_mm = (pitch_y * tx_receivers) >> 12;
+ }
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: x_mm: %d y_mm: %d\n", __func__,
sensor->x_mm, sensor->y_mm);
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index bad238f69a7a..34d1f07ea4c3 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -1121,6 +1121,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
/*
+ * Some TongFang barebones have touchpad and/or keyboard issues after
+ * suspend fixable with nomux + reset + noloop + nopnp. Luckily, none of
+ * them have an external PS/2 port so this can safely be set for all of
+ * them.
+ * TongFang barebones come with board_vendor and/or system_vendor set to
+ * a different value for each individual reseller. The only somewhat
+ * universal way to identify them is by board_name.
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ /*
* A lot of modern Clevo barebones have touchpad and/or keyboard issues
* after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
* none of them have an external PS/2 port so this can safely be set for
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index 0c8b390b8b4f..3a431395c464 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -429,16 +429,14 @@ static int ps2_gpio_probe(struct platform_device *pdev)
}
error = devm_request_irq(dev, drvdata->irq, ps2_gpio_irq,
- IRQF_NO_THREAD, DRIVER_NAME, drvdata);
+ IRQF_NO_THREAD | IRQF_NO_AUTOEN, DRIVER_NAME,
+ drvdata);
if (error) {
dev_err(dev, "failed to request irq %d: %d\n",
drvdata->irq, error);
goto err_free_serio;
}
- /* Keep irq disabled until serio->open is called. */
- disable_irq(drvdata->irq);
-
serio->id.type = SERIO_8042;
serio->open = ps2_gpio_open;
serio->close = ps2_gpio_close;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index c821fe3ee794..1ac26fc2e3eb 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -254,36 +254,6 @@ config TOUCHSCREEN_CYTTSP_SPI
To compile this driver as a module, choose M here: the
module will be called cyttsp_spi.
-config TOUCHSCREEN_CYTTSP4_CORE
- tristate "Cypress TrueTouch Gen4 Touchscreen Driver"
- help
- Core driver for Cypress TrueTouch(tm) Standard Product
- Generation4 touchscreen controllers.
-
- Say Y here if you have a Cypress Gen4 touchscreen.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here.
-
-config TOUCHSCREEN_CYTTSP4_I2C
- tristate "support I2C bus connection"
- depends on TOUCHSCREEN_CYTTSP4_CORE && I2C
- help
- Say Y here if the touchscreen is connected via I2C bus.
-
- To compile this driver as a module, choose M here: the
- module will be called cyttsp4_i2c.
-
-config TOUCHSCREEN_CYTTSP4_SPI
- tristate "support SPI bus connection"
- depends on TOUCHSCREEN_CYTTSP4_CORE && SPI_MASTER
- help
- Say Y here if the touchscreen is connected via SPI bus.
-
- To compile this driver as a module, choose M here: the
- module will be called cyttsp4_spi.
-
config TOUCHSCREEN_CYTTSP5
tristate "Cypress TrueTouch Gen5 Touchscreen Driver"
depends on I2C
@@ -626,18 +596,6 @@ config TOUCHSCREEN_MAX11801
To compile this driver as a module, choose M here: the
module will be called max11801_ts.
-config TOUCHSCREEN_MCS5000
- tristate "MELFAS MCS-5000 touchscreen"
- depends on I2C
- help
- Say Y here if you have the MELFAS MCS-5000 touchscreen controller
- chip in your system.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called mcs5000_ts.
-
config TOUCHSCREEN_MMS114
tristate "MELFAS MMS114 touchscreen"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index a81cb5aa21a5..82bc837ca01e 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -25,11 +25,8 @@ obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8505) += chipone_icn8505.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMA140) += cy8ctma140.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o
-obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp_i2c.o cyttsp_i2c_common.o
+obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp_i2c.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_SPI) += cyttsp_spi.o
-obj-$(CONFIG_TOUCHSCREEN_CYTTSP4_CORE) += cyttsp4_core.o
-obj-$(CONFIG_TOUCHSCREEN_CYTTSP4_I2C) += cyttsp4_i2c.o cyttsp_i2c_common.o
-obj-$(CONFIG_TOUCHSCREEN_CYTTSP4_SPI) += cyttsp4_spi.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP5) += cyttsp5.o
obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o
obj-$(CONFIG_TOUCHSCREEN_DA9052) += da9052_tsi.o
@@ -63,7 +60,6 @@ obj-$(CONFIG_TOUCHSCREEN_MAX11801) += max11801_ts.o
obj-$(CONFIG_TOUCHSCREEN_MXS_LRADC) += mxs-lradc-ts.o
obj-$(CONFIG_TOUCHSCREEN_MX25) += fsl-imx25-tcq.o
obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o
-obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o
obj-$(CONFIG_TOUCHSCREEN_MELFAS_MIP4) += melfas_mip4.o
obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o
obj-$(CONFIG_TOUCHSCREEN_MMS114) += mms114.o
diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c
index aa829725ded7..98d5b2ba63fb 100644
--- a/drivers/input/touchscreen/colibri-vf50-ts.c
+++ b/drivers/input/touchscreen/colibri-vf50-ts.c
@@ -239,14 +239,10 @@ static void vf50_ts_close(struct input_dev *dev_input)
static int vf50_ts_get_gpiod(struct device *dev, struct gpio_desc **gpio_d,
const char *con_id, enum gpiod_flags flags)
{
- int error;
-
*gpio_d = devm_gpiod_get(dev, con_id, flags);
- if (IS_ERR(*gpio_d)) {
- error = PTR_ERR(*gpio_d);
- dev_err(dev, "Could not get gpio_%s %d\n", con_id, error);
- return error;
- }
+ if (IS_ERR(*gpio_d))
+ return dev_err_probe(dev, PTR_ERR(*gpio_d),
+ "Could not get gpio_%s\n", con_id);
return 0;
}
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
deleted file mode 100644
index 9dc25eb2be44..000000000000
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ /dev/null
@@ -1,2174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * cyttsp4_core.c
- * Cypress TrueTouch(TM) Standard Product V4 Core driver module.
- * For use with Cypress Txx4xx parts.
- * Supported parts include:
- * TMA4XX
- * TMA1036
- *
- * Copyright (C) 2012 Cypress Semiconductor
- *
- * Contact Cypress Semiconductor at www.cypress.com <ttdrivers@cypress.com>
- */
-
-#include "cyttsp4_core.h"
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/input/mt.h>
-#include <linux/interrupt.h>
-#include <linux/pm_runtime.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-/* Timeout in ms. */
-#define CY_CORE_REQUEST_EXCLUSIVE_TIMEOUT 500
-#define CY_CORE_SLEEP_REQUEST_EXCLUSIVE_TIMEOUT 5000
-#define CY_CORE_MODE_CHANGE_TIMEOUT 1000
-#define CY_CORE_RESET_AND_WAIT_TIMEOUT 500
-#define CY_CORE_WAKEUP_TIMEOUT 500
-
-#define CY_CORE_STARTUP_RETRY_COUNT 3
-
-static const char * const cyttsp4_tch_abs_string[] = {
- [CY_TCH_X] = "X",
- [CY_TCH_Y] = "Y",
- [CY_TCH_P] = "P",
- [CY_TCH_T] = "T",
- [CY_TCH_E] = "E",
- [CY_TCH_O] = "O",
- [CY_TCH_W] = "W",
- [CY_TCH_MAJ] = "MAJ",
- [CY_TCH_MIN] = "MIN",
- [CY_TCH_OR] = "OR",
- [CY_TCH_NUM_ABS] = "INVALID"
-};
-
-static const u8 ldr_exit[] = {
- 0xFF, 0x01, 0x3B, 0x00, 0x00, 0x4F, 0x6D, 0x17
-};
-
-static const u8 ldr_err_app[] = {
- 0x01, 0x02, 0x00, 0x00, 0x55, 0xDD, 0x17
-};
-
-static inline size_t merge_bytes(u8 high, u8 low)
-{
- return (high << 8) + low;
-}
-
-#ifdef VERBOSE_DEBUG
-static void cyttsp4_pr_buf(struct device *dev, u8 *pr_buf, u8 *dptr, int size,
- const char *data_name)
-{
- int i, k;
- const char fmt[] = "%02X ";
- int max;
-
- if (!size)
- return;
-
- max = (CY_MAX_PRBUF_SIZE - 1) - sizeof(CY_PR_TRUNCATED);
-
- pr_buf[0] = 0;
- for (i = k = 0; i < size && k < max; i++, k += 3)
- scnprintf(pr_buf + k, CY_MAX_PRBUF_SIZE, fmt, dptr[i]);
-
- dev_vdbg(dev, "%s: %s[0..%d]=%s%s\n", __func__, data_name, size - 1,
- pr_buf, size <= max ? "" : CY_PR_TRUNCATED);
-}
-#else
-#define cyttsp4_pr_buf(dev, pr_buf, dptr, size, data_name) do { } while (0)
-#endif
-
-static int cyttsp4_load_status_regs(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- struct device *dev = cd->dev;
- int rc;
-
- rc = cyttsp4_adap_read(cd, CY_REG_BASE, si->si_ofs.mode_size,
- si->xy_mode);
- if (rc < 0)
- dev_err(dev, "%s: fail read mode regs r=%d\n",
- __func__, rc);
- else
- cyttsp4_pr_buf(dev, cd->pr_buf, si->xy_mode,
- si->si_ofs.mode_size, "xy_mode");
-
- return rc;
-}
-
-static int cyttsp4_handshake(struct cyttsp4 *cd, u8 mode)
-{
- u8 cmd = mode ^ CY_HST_TOGGLE;
- int rc;
-
- /*
- * Mode change issued, handshaking now will cause endless mode change
- * requests, for sync mode modechange will do same with handshake
- * */
- if (mode & CY_HST_MODE_CHANGE)
- return 0;
-
- rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(cmd), &cmd);
- if (rc < 0)
- dev_err(cd->dev, "%s: bus write fail on handshake (ret=%d)\n",
- __func__, rc);
-
- return rc;
-}
-
-static int cyttsp4_hw_soft_reset(struct cyttsp4 *cd)
-{
- u8 cmd = CY_HST_RESET;
- int rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(cmd), &cmd);
- if (rc < 0) {
- dev_err(cd->dev, "%s: FAILED to execute SOFT reset\n",
- __func__);
- return rc;
- }
- return 0;
-}
-
-static int cyttsp4_hw_hard_reset(struct cyttsp4 *cd)
-{
- if (cd->cpdata->xres) {
- cd->cpdata->xres(cd->cpdata, cd->dev);
- dev_dbg(cd->dev, "%s: execute HARD reset\n", __func__);
- return 0;
- }
- dev_err(cd->dev, "%s: FAILED to execute HARD reset\n", __func__);
- return -ENOSYS;
-}
-
-static int cyttsp4_hw_reset(struct cyttsp4 *cd)
-{
- int rc = cyttsp4_hw_hard_reset(cd);
- if (rc == -ENOSYS)
- rc = cyttsp4_hw_soft_reset(cd);
- return rc;
-}
-
-/*
- * Gets number of bits for a touch filed as parameter,
- * sets maximum value for field which is used as bit mask
- * and returns number of bytes required for that field
- */
-static int cyttsp4_bits_2_bytes(unsigned int nbits, size_t *max)
-{
- *max = 1UL << nbits;
- return (nbits + 7) / 8;
-}
-
-static int cyttsp4_si_data_offsets(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- int rc = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(si->si_data),
- &si->si_data);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read sysinfo data offsets r=%d\n",
- __func__, rc);
- return rc;
- }
-
- /* Print sysinfo data offsets */
- cyttsp4_pr_buf(cd->dev, cd->pr_buf, (u8 *)&si->si_data,
- sizeof(si->si_data), "sysinfo_data_offsets");
-
- /* convert sysinfo data offset bytes into integers */
-
- si->si_ofs.map_sz = merge_bytes(si->si_data.map_szh,
- si->si_data.map_szl);
- si->si_ofs.map_sz = merge_bytes(si->si_data.map_szh,
- si->si_data.map_szl);
- si->si_ofs.cydata_ofs = merge_bytes(si->si_data.cydata_ofsh,
- si->si_data.cydata_ofsl);
- si->si_ofs.test_ofs = merge_bytes(si->si_data.test_ofsh,
- si->si_data.test_ofsl);
- si->si_ofs.pcfg_ofs = merge_bytes(si->si_data.pcfg_ofsh,
- si->si_data.pcfg_ofsl);
- si->si_ofs.opcfg_ofs = merge_bytes(si->si_data.opcfg_ofsh,
- si->si_data.opcfg_ofsl);
- si->si_ofs.ddata_ofs = merge_bytes(si->si_data.ddata_ofsh,
- si->si_data.ddata_ofsl);
- si->si_ofs.mdata_ofs = merge_bytes(si->si_data.mdata_ofsh,
- si->si_data.mdata_ofsl);
- return rc;
-}
-
-static int cyttsp4_si_get_cydata(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- int read_offset;
- int mfgid_sz, calc_mfgid_sz;
- void *p;
- int rc;
-
- if (si->si_ofs.test_ofs <= si->si_ofs.cydata_ofs) {
- dev_err(cd->dev,
- "%s: invalid offset test_ofs: %zu, cydata_ofs: %zu\n",
- __func__, si->si_ofs.test_ofs, si->si_ofs.cydata_ofs);
- return -EINVAL;
- }
-
- si->si_ofs.cydata_size = si->si_ofs.test_ofs - si->si_ofs.cydata_ofs;
- dev_dbg(cd->dev, "%s: cydata size: %zd\n", __func__,
- si->si_ofs.cydata_size);
-
- p = krealloc(si->si_ptrs.cydata, si->si_ofs.cydata_size, GFP_KERNEL);
- if (p == NULL) {
- dev_err(cd->dev, "%s: failed to allocate cydata memory\n",
- __func__);
- return -ENOMEM;
- }
- si->si_ptrs.cydata = p;
-
- read_offset = si->si_ofs.cydata_ofs;
-
- /* Read the CYDA registers up to MFGID field */
- rc = cyttsp4_adap_read(cd, read_offset,
- offsetof(struct cyttsp4_cydata, mfgid_sz)
- + sizeof(si->si_ptrs.cydata->mfgid_sz),
- si->si_ptrs.cydata);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read cydata r=%d\n",
- __func__, rc);
- return rc;
- }
-
- /* Check MFGID size */
- mfgid_sz = si->si_ptrs.cydata->mfgid_sz;
- calc_mfgid_sz = si->si_ofs.cydata_size - sizeof(struct cyttsp4_cydata);
- if (mfgid_sz != calc_mfgid_sz) {
- dev_err(cd->dev, "%s: mismatch in MFGID size, reported:%d calculated:%d\n",
- __func__, mfgid_sz, calc_mfgid_sz);
- return -EINVAL;
- }
-
- read_offset += offsetof(struct cyttsp4_cydata, mfgid_sz)
- + sizeof(si->si_ptrs.cydata->mfgid_sz);
-
- /* Read the CYDA registers for MFGID field */
- rc = cyttsp4_adap_read(cd, read_offset, si->si_ptrs.cydata->mfgid_sz,
- si->si_ptrs.cydata->mfg_id);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read cydata r=%d\n",
- __func__, rc);
- return rc;
- }
-
- read_offset += si->si_ptrs.cydata->mfgid_sz;
-
- /* Read the rest of the CYDA registers */
- rc = cyttsp4_adap_read(cd, read_offset,
- sizeof(struct cyttsp4_cydata)
- - offsetof(struct cyttsp4_cydata, cyito_idh),
- &si->si_ptrs.cydata->cyito_idh);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read cydata r=%d\n",
- __func__, rc);
- return rc;
- }
-
- cyttsp4_pr_buf(cd->dev, cd->pr_buf, (u8 *)si->si_ptrs.cydata,
- si->si_ofs.cydata_size, "sysinfo_cydata");
- return rc;
-}
-
-static int cyttsp4_si_get_test_data(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- void *p;
- int rc;
-
- if (si->si_ofs.pcfg_ofs <= si->si_ofs.test_ofs) {
- dev_err(cd->dev,
- "%s: invalid offset pcfg_ofs: %zu, test_ofs: %zu\n",
- __func__, si->si_ofs.pcfg_ofs, si->si_ofs.test_ofs);
- return -EINVAL;
- }
-
- si->si_ofs.test_size = si->si_ofs.pcfg_ofs - si->si_ofs.test_ofs;
-
- p = krealloc(si->si_ptrs.test, si->si_ofs.test_size, GFP_KERNEL);
- if (p == NULL) {
- dev_err(cd->dev, "%s: failed to allocate test memory\n",
- __func__);
- return -ENOMEM;
- }
- si->si_ptrs.test = p;
-
- rc = cyttsp4_adap_read(cd, si->si_ofs.test_ofs, si->si_ofs.test_size,
- si->si_ptrs.test);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read test data r=%d\n",
- __func__, rc);
- return rc;
- }
-
- cyttsp4_pr_buf(cd->dev, cd->pr_buf,
- (u8 *)si->si_ptrs.test, si->si_ofs.test_size,
- "sysinfo_test_data");
- if (si->si_ptrs.test->post_codel &
- CY_POST_CODEL_WDG_RST)
- dev_info(cd->dev, "%s: %s codel=%02X\n",
- __func__, "Reset was a WATCHDOG RESET",
- si->si_ptrs.test->post_codel);
-
- if (!(si->si_ptrs.test->post_codel &
- CY_POST_CODEL_CFG_DATA_CRC_FAIL))
- dev_info(cd->dev, "%s: %s codel=%02X\n", __func__,
- "Config Data CRC FAIL",
- si->si_ptrs.test->post_codel);
-
- if (!(si->si_ptrs.test->post_codel &
- CY_POST_CODEL_PANEL_TEST_FAIL))
- dev_info(cd->dev, "%s: %s codel=%02X\n",
- __func__, "PANEL TEST FAIL",
- si->si_ptrs.test->post_codel);
-
- dev_info(cd->dev, "%s: SCANNING is %s codel=%02X\n",
- __func__, si->si_ptrs.test->post_codel & 0x08 ?
- "ENABLED" : "DISABLED",
- si->si_ptrs.test->post_codel);
- return rc;
-}
-
-static int cyttsp4_si_get_pcfg_data(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- void *p;
- int rc;
-
- if (si->si_ofs.opcfg_ofs <= si->si_ofs.pcfg_ofs) {
- dev_err(cd->dev,
- "%s: invalid offset opcfg_ofs: %zu, pcfg_ofs: %zu\n",
- __func__, si->si_ofs.opcfg_ofs, si->si_ofs.pcfg_ofs);
- return -EINVAL;
- }
-
- si->si_ofs.pcfg_size = si->si_ofs.opcfg_ofs - si->si_ofs.pcfg_ofs;
-
- p = krealloc(si->si_ptrs.pcfg, si->si_ofs.pcfg_size, GFP_KERNEL);
- if (p == NULL) {
- dev_err(cd->dev, "%s: failed to allocate pcfg memory\n",
- __func__);
- return -ENOMEM;
- }
- si->si_ptrs.pcfg = p;
-
- rc = cyttsp4_adap_read(cd, si->si_ofs.pcfg_ofs, si->si_ofs.pcfg_size,
- si->si_ptrs.pcfg);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read pcfg data r=%d\n",
- __func__, rc);
- return rc;
- }
-
- si->si_ofs.max_x = merge_bytes((si->si_ptrs.pcfg->res_xh
- & CY_PCFG_RESOLUTION_X_MASK), si->si_ptrs.pcfg->res_xl);
- si->si_ofs.x_origin = !!(si->si_ptrs.pcfg->res_xh
- & CY_PCFG_ORIGIN_X_MASK);
- si->si_ofs.max_y = merge_bytes((si->si_ptrs.pcfg->res_yh
- & CY_PCFG_RESOLUTION_Y_MASK), si->si_ptrs.pcfg->res_yl);
- si->si_ofs.y_origin = !!(si->si_ptrs.pcfg->res_yh
- & CY_PCFG_ORIGIN_Y_MASK);
- si->si_ofs.max_p = merge_bytes(si->si_ptrs.pcfg->max_zh,
- si->si_ptrs.pcfg->max_zl);
-
- cyttsp4_pr_buf(cd->dev, cd->pr_buf,
- (u8 *)si->si_ptrs.pcfg,
- si->si_ofs.pcfg_size, "sysinfo_pcfg_data");
- return rc;
-}
-
-static int cyttsp4_si_get_opcfg_data(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- struct cyttsp4_tch_abs_params *tch;
- struct cyttsp4_tch_rec_params *tch_old, *tch_new;
- enum cyttsp4_tch_abs abs;
- int i;
- void *p;
- int rc;
-
- if (si->si_ofs.ddata_ofs <= si->si_ofs.opcfg_ofs) {
- dev_err(cd->dev,
- "%s: invalid offset ddata_ofs: %zu, opcfg_ofs: %zu\n",
- __func__, si->si_ofs.ddata_ofs, si->si_ofs.opcfg_ofs);
- return -EINVAL;
- }
-
- si->si_ofs.opcfg_size = si->si_ofs.ddata_ofs - si->si_ofs.opcfg_ofs;
-
- p = krealloc(si->si_ptrs.opcfg, si->si_ofs.opcfg_size, GFP_KERNEL);
- if (p == NULL) {
- dev_err(cd->dev, "%s: failed to allocate opcfg memory\n",
- __func__);
- return -ENOMEM;
- }
- si->si_ptrs.opcfg = p;
-
- rc = cyttsp4_adap_read(cd, si->si_ofs.opcfg_ofs, si->si_ofs.opcfg_size,
- si->si_ptrs.opcfg);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read opcfg data r=%d\n",
- __func__, rc);
- return rc;
- }
- si->si_ofs.cmd_ofs = si->si_ptrs.opcfg->cmd_ofs;
- si->si_ofs.rep_ofs = si->si_ptrs.opcfg->rep_ofs;
- si->si_ofs.rep_sz = (si->si_ptrs.opcfg->rep_szh * 256) +
- si->si_ptrs.opcfg->rep_szl;
- si->si_ofs.num_btns = si->si_ptrs.opcfg->num_btns;
- si->si_ofs.num_btn_regs = (si->si_ofs.num_btns +
- CY_NUM_BTN_PER_REG - 1) / CY_NUM_BTN_PER_REG;
- si->si_ofs.tt_stat_ofs = si->si_ptrs.opcfg->tt_stat_ofs;
- si->si_ofs.obj_cfg0 = si->si_ptrs.opcfg->obj_cfg0;
- si->si_ofs.max_tchs = si->si_ptrs.opcfg->max_tchs &
- CY_BYTE_OFS_MASK;
- si->si_ofs.tch_rec_size = si->si_ptrs.opcfg->tch_rec_size &
- CY_BYTE_OFS_MASK;
-
- /* Get the old touch fields */
- for (abs = CY_TCH_X; abs < CY_NUM_TCH_FIELDS; abs++) {
- tch = &si->si_ofs.tch_abs[abs];
- tch_old = &si->si_ptrs.opcfg->tch_rec_old[abs];
-
- tch->ofs = tch_old->loc & CY_BYTE_OFS_MASK;
- tch->size = cyttsp4_bits_2_bytes(tch_old->size,
- &tch->max);
- tch->bofs = (tch_old->loc & CY_BOFS_MASK) >> CY_BOFS_SHIFT;
- }
-
- /* button fields */
- si->si_ofs.btn_rec_size = si->si_ptrs.opcfg->btn_rec_size;
- si->si_ofs.btn_diff_ofs = si->si_ptrs.opcfg->btn_diff_ofs;
- si->si_ofs.btn_diff_size = si->si_ptrs.opcfg->btn_diff_size;
-
- if (si->si_ofs.tch_rec_size > CY_TMA1036_TCH_REC_SIZE) {
- /* Get the extended touch fields */
- for (i = 0; i < CY_NUM_EXT_TCH_FIELDS; abs++, i++) {
- tch = &si->si_ofs.tch_abs[abs];
- tch_new = &si->si_ptrs.opcfg->tch_rec_new[i];
-
- tch->ofs = tch_new->loc & CY_BYTE_OFS_MASK;
- tch->size = cyttsp4_bits_2_bytes(tch_new->size,
- &tch->max);
- tch->bofs = (tch_new->loc & CY_BOFS_MASK) >> CY_BOFS_SHIFT;
- }
- }
-
- for (abs = 0; abs < CY_TCH_NUM_ABS; abs++) {
- dev_dbg(cd->dev, "%s: tch_rec_%s\n", __func__,
- cyttsp4_tch_abs_string[abs]);
- dev_dbg(cd->dev, "%s: ofs =%2zd\n", __func__,
- si->si_ofs.tch_abs[abs].ofs);
- dev_dbg(cd->dev, "%s: siz =%2zd\n", __func__,
- si->si_ofs.tch_abs[abs].size);
- dev_dbg(cd->dev, "%s: max =%2zd\n", __func__,
- si->si_ofs.tch_abs[abs].max);
- dev_dbg(cd->dev, "%s: bofs=%2zd\n", __func__,
- si->si_ofs.tch_abs[abs].bofs);
- }
-
- si->si_ofs.mode_size = si->si_ofs.tt_stat_ofs + 1;
- si->si_ofs.data_size = si->si_ofs.max_tchs *
- si->si_ptrs.opcfg->tch_rec_size;
-
- cyttsp4_pr_buf(cd->dev, cd->pr_buf, (u8 *)si->si_ptrs.opcfg,
- si->si_ofs.opcfg_size, "sysinfo_opcfg_data");
-
- return 0;
-}
-
-static int cyttsp4_si_get_ddata(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- void *p;
- int rc;
-
- si->si_ofs.ddata_size = si->si_ofs.mdata_ofs - si->si_ofs.ddata_ofs;
-
- p = krealloc(si->si_ptrs.ddata, si->si_ofs.ddata_size, GFP_KERNEL);
- if (p == NULL) {
- dev_err(cd->dev, "%s: fail alloc ddata memory\n", __func__);
- return -ENOMEM;
- }
- si->si_ptrs.ddata = p;
-
- rc = cyttsp4_adap_read(cd, si->si_ofs.ddata_ofs, si->si_ofs.ddata_size,
- si->si_ptrs.ddata);
- if (rc < 0)
- dev_err(cd->dev, "%s: fail read ddata data r=%d\n",
- __func__, rc);
- else
- cyttsp4_pr_buf(cd->dev, cd->pr_buf,
- (u8 *)si->si_ptrs.ddata,
- si->si_ofs.ddata_size, "sysinfo_ddata");
- return rc;
-}
-
-static int cyttsp4_si_get_mdata(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- void *p;
- int rc;
-
- si->si_ofs.mdata_size = si->si_ofs.map_sz - si->si_ofs.mdata_ofs;
-
- p = krealloc(si->si_ptrs.mdata, si->si_ofs.mdata_size, GFP_KERNEL);
- if (p == NULL) {
- dev_err(cd->dev, "%s: fail alloc mdata memory\n", __func__);
- return -ENOMEM;
- }
- si->si_ptrs.mdata = p;
-
- rc = cyttsp4_adap_read(cd, si->si_ofs.mdata_ofs, si->si_ofs.mdata_size,
- si->si_ptrs.mdata);
- if (rc < 0)
- dev_err(cd->dev, "%s: fail read mdata data r=%d\n",
- __func__, rc);
- else
- cyttsp4_pr_buf(cd->dev, cd->pr_buf,
- (u8 *)si->si_ptrs.mdata,
- si->si_ofs.mdata_size, "sysinfo_mdata");
- return rc;
-}
-
-static int cyttsp4_si_get_btn_data(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- int btn;
- int num_defined_keys;
- u16 *key_table;
- void *p;
- int rc = 0;
-
- if (si->si_ofs.num_btns) {
- si->si_ofs.btn_keys_size = si->si_ofs.num_btns *
- sizeof(struct cyttsp4_btn);
-
- p = krealloc(si->btn, si->si_ofs.btn_keys_size,
- GFP_KERNEL|__GFP_ZERO);
- if (p == NULL) {
- dev_err(cd->dev, "%s: %s\n", __func__,
- "fail alloc btn_keys memory");
- return -ENOMEM;
- }
- si->btn = p;
-
- if (cd->cpdata->sett[CY_IC_GRPNUM_BTN_KEYS] == NULL)
- num_defined_keys = 0;
- else if (cd->cpdata->sett[CY_IC_GRPNUM_BTN_KEYS]->data == NULL)
- num_defined_keys = 0;
- else
- num_defined_keys = cd->cpdata->sett
- [CY_IC_GRPNUM_BTN_KEYS]->size;
-
- for (btn = 0; btn < si->si_ofs.num_btns &&
- btn < num_defined_keys; btn++) {
- key_table = (u16 *)cd->cpdata->sett
- [CY_IC_GRPNUM_BTN_KEYS]->data;
- si->btn[btn].key_code = key_table[btn];
- si->btn[btn].state = CY_BTN_RELEASED;
- si->btn[btn].enabled = true;
- }
- for (; btn < si->si_ofs.num_btns; btn++) {
- si->btn[btn].key_code = KEY_RESERVED;
- si->btn[btn].state = CY_BTN_RELEASED;
- si->btn[btn].enabled = true;
- }
-
- return rc;
- }
-
- si->si_ofs.btn_keys_size = 0;
- kfree(si->btn);
- si->btn = NULL;
- return rc;
-}
-
-static int cyttsp4_si_get_op_data_ptrs(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- void *p;
-
- p = krealloc(si->xy_mode, si->si_ofs.mode_size, GFP_KERNEL|__GFP_ZERO);
- if (p == NULL)
- return -ENOMEM;
- si->xy_mode = p;
-
- p = krealloc(si->xy_data, si->si_ofs.data_size, GFP_KERNEL|__GFP_ZERO);
- if (p == NULL)
- return -ENOMEM;
- si->xy_data = p;
-
- p = krealloc(si->btn_rec_data,
- si->si_ofs.btn_rec_size * si->si_ofs.num_btns,
- GFP_KERNEL|__GFP_ZERO);
- if (p == NULL)
- return -ENOMEM;
- si->btn_rec_data = p;
-
- return 0;
-}
-
-static void cyttsp4_si_put_log_data(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- dev_dbg(cd->dev, "%s: cydata_ofs =%4zd siz=%4zd\n", __func__,
- si->si_ofs.cydata_ofs, si->si_ofs.cydata_size);
- dev_dbg(cd->dev, "%s: test_ofs =%4zd siz=%4zd\n", __func__,
- si->si_ofs.test_ofs, si->si_ofs.test_size);
- dev_dbg(cd->dev, "%s: pcfg_ofs =%4zd siz=%4zd\n", __func__,
- si->si_ofs.pcfg_ofs, si->si_ofs.pcfg_size);
- dev_dbg(cd->dev, "%s: opcfg_ofs =%4zd siz=%4zd\n", __func__,
- si->si_ofs.opcfg_ofs, si->si_ofs.opcfg_size);
- dev_dbg(cd->dev, "%s: ddata_ofs =%4zd siz=%4zd\n", __func__,
- si->si_ofs.ddata_ofs, si->si_ofs.ddata_size);
- dev_dbg(cd->dev, "%s: mdata_ofs =%4zd siz=%4zd\n", __func__,
- si->si_ofs.mdata_ofs, si->si_ofs.mdata_size);
-
- dev_dbg(cd->dev, "%s: cmd_ofs =%4zd\n", __func__,
- si->si_ofs.cmd_ofs);
- dev_dbg(cd->dev, "%s: rep_ofs =%4zd\n", __func__,
- si->si_ofs.rep_ofs);
- dev_dbg(cd->dev, "%s: rep_sz =%4zd\n", __func__,
- si->si_ofs.rep_sz);
- dev_dbg(cd->dev, "%s: num_btns =%4zd\n", __func__,
- si->si_ofs.num_btns);
- dev_dbg(cd->dev, "%s: num_btn_regs =%4zd\n", __func__,
- si->si_ofs.num_btn_regs);
- dev_dbg(cd->dev, "%s: tt_stat_ofs =%4zd\n", __func__,
- si->si_ofs.tt_stat_ofs);
- dev_dbg(cd->dev, "%s: tch_rec_size =%4zd\n", __func__,
- si->si_ofs.tch_rec_size);
- dev_dbg(cd->dev, "%s: max_tchs =%4zd\n", __func__,
- si->si_ofs.max_tchs);
- dev_dbg(cd->dev, "%s: mode_size =%4zd\n", __func__,
- si->si_ofs.mode_size);
- dev_dbg(cd->dev, "%s: data_size =%4zd\n", __func__,
- si->si_ofs.data_size);
- dev_dbg(cd->dev, "%s: map_sz =%4zd\n", __func__,
- si->si_ofs.map_sz);
-
- dev_dbg(cd->dev, "%s: btn_rec_size =%2zd\n", __func__,
- si->si_ofs.btn_rec_size);
- dev_dbg(cd->dev, "%s: btn_diff_ofs =%2zd\n", __func__,
- si->si_ofs.btn_diff_ofs);
- dev_dbg(cd->dev, "%s: btn_diff_size =%2zd\n", __func__,
- si->si_ofs.btn_diff_size);
-
- dev_dbg(cd->dev, "%s: max_x = 0x%04zX (%zd)\n", __func__,
- si->si_ofs.max_x, si->si_ofs.max_x);
- dev_dbg(cd->dev, "%s: x_origin = %zd (%s)\n", __func__,
- si->si_ofs.x_origin,
- si->si_ofs.x_origin == CY_NORMAL_ORIGIN ?
- "left corner" : "right corner");
- dev_dbg(cd->dev, "%s: max_y = 0x%04zX (%zd)\n", __func__,
- si->si_ofs.max_y, si->si_ofs.max_y);
- dev_dbg(cd->dev, "%s: y_origin = %zd (%s)\n", __func__,
- si->si_ofs.y_origin,
- si->si_ofs.y_origin == CY_NORMAL_ORIGIN ?
- "upper corner" : "lower corner");
- dev_dbg(cd->dev, "%s: max_p = 0x%04zX (%zd)\n", __func__,
- si->si_ofs.max_p, si->si_ofs.max_p);
-
- dev_dbg(cd->dev, "%s: xy_mode=%p xy_data=%p\n", __func__,
- si->xy_mode, si->xy_data);
-}
-
-static int cyttsp4_get_sysinfo_regs(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- int rc;
-
- rc = cyttsp4_si_data_offsets(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_cydata(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_test_data(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_pcfg_data(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_opcfg_data(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_ddata(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_mdata(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_btn_data(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_op_data_ptrs(cd);
- if (rc < 0) {
- dev_err(cd->dev, "%s: failed to get_op_data\n",
- __func__);
- return rc;
- }
-
- cyttsp4_si_put_log_data(cd);
-
- /* provide flow control handshake */
- rc = cyttsp4_handshake(cd, si->si_data.hst_mode);
- if (rc < 0)
- dev_err(cd->dev, "%s: handshake fail on sysinfo reg\n",
- __func__);
-
- si->ready = true;
- return rc;
-}
-
-static void cyttsp4_queue_startup_(struct cyttsp4 *cd)
-{
- if (cd->startup_state == STARTUP_NONE) {
- cd->startup_state = STARTUP_QUEUED;
- schedule_work(&cd->startup_work);
- dev_dbg(cd->dev, "%s: cyttsp4_startup queued\n", __func__);
- } else {
- dev_dbg(cd->dev, "%s: startup_state = %d\n", __func__,
- cd->startup_state);
- }
-}
-
-static void cyttsp4_report_slot_liftoff(struct cyttsp4_mt_data *md,
- int max_slots)
-{
- int t;
-
- if (md->num_prv_tch == 0)
- return;
-
- for (t = 0; t < max_slots; t++) {
- input_mt_slot(md->input, t);
- input_mt_report_slot_inactive(md->input);
- }
-}
-
-static void cyttsp4_lift_all(struct cyttsp4_mt_data *md)
-{
- if (!md->si)
- return;
-
- if (md->num_prv_tch != 0) {
- cyttsp4_report_slot_liftoff(md,
- md->si->si_ofs.tch_abs[CY_TCH_T].max);
- input_sync(md->input);
- md->num_prv_tch = 0;
- }
-}
-
-static void cyttsp4_get_touch_axis(struct cyttsp4_mt_data *md,
- int *axis, int size, int max, u8 *xy_data, int bofs)
-{
- int nbyte;
- int next;
-
- for (nbyte = 0, *axis = 0, next = 0; nbyte < size; nbyte++) {
- dev_vdbg(&md->input->dev,
- "%s: *axis=%02X(%d) size=%d max=%08X xy_data=%p"
- " xy_data[%d]=%02X(%d) bofs=%d\n",
- __func__, *axis, *axis, size, max, xy_data, next,
- xy_data[next], xy_data[next], bofs);
- *axis = (*axis * 256) + (xy_data[next] >> bofs);
- next++;
- }
-
- *axis &= max - 1;
-
- dev_vdbg(&md->input->dev,
- "%s: *axis=%02X(%d) size=%d max=%08X xy_data=%p"
- " xy_data[%d]=%02X(%d)\n",
- __func__, *axis, *axis, size, max, xy_data, next,
- xy_data[next], xy_data[next]);
-}
-
-static void cyttsp4_get_touch(struct cyttsp4_mt_data *md,
- struct cyttsp4_touch *touch, u8 *xy_data)
-{
- struct device *dev = &md->input->dev;
- struct cyttsp4_sysinfo *si = md->si;
- enum cyttsp4_tch_abs abs;
- bool flipped;
-
- for (abs = CY_TCH_X; abs < CY_TCH_NUM_ABS; abs++) {
- cyttsp4_get_touch_axis(md, &touch->abs[abs],
- si->si_ofs.tch_abs[abs].size,
- si->si_ofs.tch_abs[abs].max,
- xy_data + si->si_ofs.tch_abs[abs].ofs,
- si->si_ofs.tch_abs[abs].bofs);
- dev_vdbg(dev, "%s: get %s=%04X(%d)\n", __func__,
- cyttsp4_tch_abs_string[abs],
- touch->abs[abs], touch->abs[abs]);
- }
-
- if (md->pdata->flags & CY_FLAG_FLIP) {
- swap(touch->abs[CY_TCH_X], touch->abs[CY_TCH_Y]);
- flipped = true;
- } else
- flipped = false;
-
- if (md->pdata->flags & CY_FLAG_INV_X) {
- if (flipped)
- touch->abs[CY_TCH_X] = md->si->si_ofs.max_y -
- touch->abs[CY_TCH_X];
- else
- touch->abs[CY_TCH_X] = md->si->si_ofs.max_x -
- touch->abs[CY_TCH_X];
- }
- if (md->pdata->flags & CY_FLAG_INV_Y) {
- if (flipped)
- touch->abs[CY_TCH_Y] = md->si->si_ofs.max_x -
- touch->abs[CY_TCH_Y];
- else
- touch->abs[CY_TCH_Y] = md->si->si_ofs.max_y -
- touch->abs[CY_TCH_Y];
- }
-
- dev_vdbg(dev, "%s: flip=%s inv-x=%s inv-y=%s x=%04X(%d) y=%04X(%d)\n",
- __func__, flipped ? "true" : "false",
- md->pdata->flags & CY_FLAG_INV_X ? "true" : "false",
- md->pdata->flags & CY_FLAG_INV_Y ? "true" : "false",
- touch->abs[CY_TCH_X], touch->abs[CY_TCH_X],
- touch->abs[CY_TCH_Y], touch->abs[CY_TCH_Y]);
-}
-
-static void cyttsp4_final_sync(struct input_dev *input, int max_slots, int *ids)
-{
- int t;
-
- for (t = 0; t < max_slots; t++) {
- if (ids[t])
- continue;
- input_mt_slot(input, t);
- input_mt_report_slot_inactive(input);
- }
-
- input_sync(input);
-}
-
-static void cyttsp4_get_mt_touches(struct cyttsp4_mt_data *md, int num_cur_tch)
-{
- struct device *dev = &md->input->dev;
- struct cyttsp4_sysinfo *si = md->si;
- struct cyttsp4_touch tch;
- int sig;
- int i, j, t = 0;
- int ids[MAX(CY_TMA1036_MAX_TCH, CY_TMA4XX_MAX_TCH)];
-
- memset(ids, 0, si->si_ofs.tch_abs[CY_TCH_T].max * sizeof(int));
- for (i = 0; i < num_cur_tch; i++) {
- cyttsp4_get_touch(md, &tch, si->xy_data +
- (i * si->si_ofs.tch_rec_size));
- if ((tch.abs[CY_TCH_T] < md->pdata->frmwrk->abs
- [(CY_ABS_ID_OST * CY_NUM_ABS_SET) + CY_MIN_OST]) ||
- (tch.abs[CY_TCH_T] > md->pdata->frmwrk->abs
- [(CY_ABS_ID_OST * CY_NUM_ABS_SET) + CY_MAX_OST])) {
- dev_err(dev, "%s: tch=%d -> bad trk_id=%d max_id=%d\n",
- __func__, i, tch.abs[CY_TCH_T],
- md->pdata->frmwrk->abs[(CY_ABS_ID_OST *
- CY_NUM_ABS_SET) + CY_MAX_OST]);
- continue;
- }
-
- /* use 0 based track id's */
- sig = md->pdata->frmwrk->abs
- [(CY_ABS_ID_OST * CY_NUM_ABS_SET) + 0];
- if (sig != CY_IGNORE_VALUE) {
- t = tch.abs[CY_TCH_T] - md->pdata->frmwrk->abs
- [(CY_ABS_ID_OST * CY_NUM_ABS_SET) + CY_MIN_OST];
- if (tch.abs[CY_TCH_E] == CY_EV_LIFTOFF) {
- dev_dbg(dev, "%s: t=%d e=%d lift-off\n",
- __func__, t, tch.abs[CY_TCH_E]);
- goto cyttsp4_get_mt_touches_pr_tch;
- }
- input_mt_slot(md->input, t);
- input_mt_report_slot_state(md->input, MT_TOOL_FINGER,
- true);
- ids[t] = true;
- }
-
- /* all devices: position and pressure fields */
- for (j = 0; j <= CY_ABS_W_OST; j++) {
- sig = md->pdata->frmwrk->abs[((CY_ABS_X_OST + j) *
- CY_NUM_ABS_SET) + 0];
- if (sig != CY_IGNORE_VALUE)
- input_report_abs(md->input, sig,
- tch.abs[CY_TCH_X + j]);
- }
- if (si->si_ofs.tch_rec_size > CY_TMA1036_TCH_REC_SIZE) {
- /*
- * TMA400 size and orientation fields:
- * if pressure is non-zero and major touch
- * signal is zero, then set major and minor touch
- * signals to minimum non-zero value
- */
- if (tch.abs[CY_TCH_P] > 0 && tch.abs[CY_TCH_MAJ] == 0)
- tch.abs[CY_TCH_MAJ] = tch.abs[CY_TCH_MIN] = 1;
-
- /* Get the extended touch fields */
- for (j = 0; j < CY_NUM_EXT_TCH_FIELDS; j++) {
- sig = md->pdata->frmwrk->abs
- [((CY_ABS_MAJ_OST + j) *
- CY_NUM_ABS_SET) + 0];
- if (sig != CY_IGNORE_VALUE)
- input_report_abs(md->input, sig,
- tch.abs[CY_TCH_MAJ + j]);
- }
- }
-
-cyttsp4_get_mt_touches_pr_tch:
- if (si->si_ofs.tch_rec_size > CY_TMA1036_TCH_REC_SIZE)
- dev_dbg(dev,
- "%s: t=%d x=%d y=%d z=%d M=%d m=%d o=%d e=%d\n",
- __func__, t,
- tch.abs[CY_TCH_X],
- tch.abs[CY_TCH_Y],
- tch.abs[CY_TCH_P],
- tch.abs[CY_TCH_MAJ],
- tch.abs[CY_TCH_MIN],
- tch.abs[CY_TCH_OR],
- tch.abs[CY_TCH_E]);
- else
- dev_dbg(dev,
- "%s: t=%d x=%d y=%d z=%d e=%d\n", __func__,
- t,
- tch.abs[CY_TCH_X],
- tch.abs[CY_TCH_Y],
- tch.abs[CY_TCH_P],
- tch.abs[CY_TCH_E]);
- }
-
- cyttsp4_final_sync(md->input, si->si_ofs.tch_abs[CY_TCH_T].max, ids);
-
- md->num_prv_tch = num_cur_tch;
-
- return;
-}
-
-/* read xy_data for all current touches */
-static int cyttsp4_xy_worker(struct cyttsp4 *cd)
-{
- struct cyttsp4_mt_data *md = &cd->md;
- struct device *dev = &md->input->dev;
- struct cyttsp4_sysinfo *si = md->si;
- u8 num_cur_tch;
- u8 hst_mode;
- u8 rep_len;
- u8 rep_stat;
- u8 tt_stat;
- int rc = 0;
-
- /*
- * Get event data from cyttsp4 device.
- * The event data includes all data
- * for all active touches.
- * Event data also includes button data
- */
- /*
- * Use 2 reads:
- * 1st read to get mode + button bytes + touch count (core)
- * 2nd read (optional) to get touch 1 - touch n data
- */
- hst_mode = si->xy_mode[CY_REG_BASE];
- rep_len = si->xy_mode[si->si_ofs.rep_ofs];
- rep_stat = si->xy_mode[si->si_ofs.rep_ofs + 1];
- tt_stat = si->xy_mode[si->si_ofs.tt_stat_ofs];
- dev_vdbg(dev, "%s: %s%02X %s%d %s%02X %s%02X\n", __func__,
- "hst_mode=", hst_mode, "rep_len=", rep_len,
- "rep_stat=", rep_stat, "tt_stat=", tt_stat);
-
- num_cur_tch = GET_NUM_TOUCHES(tt_stat);
- dev_vdbg(dev, "%s: num_cur_tch=%d\n", __func__, num_cur_tch);
-
- if (rep_len == 0 && num_cur_tch > 0) {
- dev_err(dev, "%s: report length error rep_len=%d num_tch=%d\n",
- __func__, rep_len, num_cur_tch);
- goto cyttsp4_xy_worker_exit;
- }
-
- /* read touches */
- if (num_cur_tch > 0) {
- rc = cyttsp4_adap_read(cd, si->si_ofs.tt_stat_ofs + 1,
- num_cur_tch * si->si_ofs.tch_rec_size,
- si->xy_data);
- if (rc < 0) {
- dev_err(dev, "%s: read fail on touch regs r=%d\n",
- __func__, rc);
- goto cyttsp4_xy_worker_exit;
- }
- }
-
- /* print xy data */
- cyttsp4_pr_buf(dev, cd->pr_buf, si->xy_data, num_cur_tch *
- si->si_ofs.tch_rec_size, "xy_data");
-
- /* check any error conditions */
- if (IS_BAD_PKT(rep_stat)) {
- dev_dbg(dev, "%s: Invalid buffer detected\n", __func__);
- rc = 0;
- goto cyttsp4_xy_worker_exit;
- }
-
- if (IS_LARGE_AREA(tt_stat))
- dev_dbg(dev, "%s: Large area detected\n", __func__);
-
- if (num_cur_tch > si->si_ofs.max_tchs) {
- dev_err(dev, "%s: too many tch; set to max tch (n=%d c=%zd)\n",
- __func__, num_cur_tch, si->si_ofs.max_tchs);
- num_cur_tch = si->si_ofs.max_tchs;
- }
-
- /* extract xy_data for all currently reported touches */
- dev_vdbg(dev, "%s: extract data num_cur_tch=%d\n", __func__,
- num_cur_tch);
- if (num_cur_tch)
- cyttsp4_get_mt_touches(md, num_cur_tch);
- else
- cyttsp4_lift_all(md);
-
- rc = 0;
-
-cyttsp4_xy_worker_exit:
- return rc;
-}
-
-static int cyttsp4_mt_attention(struct cyttsp4 *cd)
-{
- struct device *dev = cd->dev;
- struct cyttsp4_mt_data *md = &cd->md;
- int rc = 0;
-
- if (!md->si)
- return 0;
-
- mutex_lock(&md->report_lock);
- if (!md->is_suspended) {
- /* core handles handshake */
- rc = cyttsp4_xy_worker(cd);
- } else {
- dev_vdbg(dev, "%s: Ignoring report while suspended\n",
- __func__);
- }
- mutex_unlock(&md->report_lock);
- if (rc < 0)
- dev_err(dev, "%s: xy_worker error r=%d\n", __func__, rc);
-
- return rc;
-}
-
-static irqreturn_t cyttsp4_irq(int irq, void *handle)
-{
- struct cyttsp4 *cd = handle;
- struct device *dev = cd->dev;
- enum cyttsp4_mode cur_mode;
- u8 cmd_ofs = cd->sysinfo.si_ofs.cmd_ofs;
- u8 mode[3];
- int rc;
-
- /*
- * Check whether this IRQ should be ignored (external)
- * This should be the very first thing to check since
- * ignore_irq may be set for a very short period of time
- */
- if (atomic_read(&cd->ignore_irq)) {
- dev_vdbg(dev, "%s: Ignoring IRQ\n", __func__);
- return IRQ_HANDLED;
- }
-
- dev_dbg(dev, "%s int:0x%x\n", __func__, cd->int_status);
-
- mutex_lock(&cd->system_lock);
-
- /* Just to debug */
- if (cd->sleep_state == SS_SLEEP_ON || cd->sleep_state == SS_SLEEPING)
- dev_vdbg(dev, "%s: Received IRQ while in sleep\n", __func__);
-
- rc = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(mode), mode);
- if (rc) {
- dev_err(cd->dev, "%s: Fail read adapter r=%d\n", __func__, rc);
- goto cyttsp4_irq_exit;
- }
- dev_vdbg(dev, "%s mode[0-2]:0x%X 0x%X 0x%X\n", __func__,
- mode[0], mode[1], mode[2]);
-
- if (IS_BOOTLOADER(mode[0], mode[1])) {
- cur_mode = CY_MODE_BOOTLOADER;
- dev_vdbg(dev, "%s: bl running\n", __func__);
- if (cd->mode == CY_MODE_BOOTLOADER) {
- /* Signal bootloader heartbeat heard */
- wake_up(&cd->wait_q);
- goto cyttsp4_irq_exit;
- }
-
- /* switch to bootloader */
- dev_dbg(dev, "%s: restart switch to bl m=%d -> m=%d\n",
- __func__, cd->mode, cur_mode);
-
- /* catch operation->bl glitch */
- if (cd->mode != CY_MODE_UNKNOWN) {
- /* Incase startup_state do not let startup_() */
- cd->mode = CY_MODE_UNKNOWN;
- cyttsp4_queue_startup_(cd);
- goto cyttsp4_irq_exit;
- }
-
- /*
- * do not wake thread on this switch since
- * it is possible to get an early heartbeat
- * prior to performing the reset
- */
- cd->mode = cur_mode;
-
- goto cyttsp4_irq_exit;
- }
-
- switch (mode[0] & CY_HST_MODE) {
- case CY_HST_OPERATE:
- cur_mode = CY_MODE_OPERATIONAL;
- dev_vdbg(dev, "%s: operational\n", __func__);
- break;
- case CY_HST_CAT:
- cur_mode = CY_MODE_CAT;
- dev_vdbg(dev, "%s: CaT\n", __func__);
- break;
- case CY_HST_SYSINFO:
- cur_mode = CY_MODE_SYSINFO;
- dev_vdbg(dev, "%s: sysinfo\n", __func__);
- break;
- default:
- cur_mode = CY_MODE_UNKNOWN;
- dev_err(dev, "%s: unknown HST mode 0x%02X\n", __func__,
- mode[0]);
- break;
- }
-
- /* Check whether this IRQ should be ignored (internal) */
- if (cd->int_status & CY_INT_IGNORE) {
- dev_vdbg(dev, "%s: Ignoring IRQ\n", __func__);
- goto cyttsp4_irq_exit;
- }
-
- /* Check for wake up interrupt */
- if (cd->int_status & CY_INT_AWAKE) {
- cd->int_status &= ~CY_INT_AWAKE;
- wake_up(&cd->wait_q);
- dev_vdbg(dev, "%s: Received wake up interrupt\n", __func__);
- goto cyttsp4_irq_handshake;
- }
-
- /* Expecting mode change interrupt */
- if ((cd->int_status & CY_INT_MODE_CHANGE)
- && (mode[0] & CY_HST_MODE_CHANGE) == 0) {
- cd->int_status &= ~CY_INT_MODE_CHANGE;
- dev_dbg(dev, "%s: finish mode switch m=%d -> m=%d\n",
- __func__, cd->mode, cur_mode);
- cd->mode = cur_mode;
- wake_up(&cd->wait_q);
- goto cyttsp4_irq_handshake;
- }
-
- /* compare current core mode to current device mode */
- dev_vdbg(dev, "%s: cd->mode=%d cur_mode=%d\n",
- __func__, cd->mode, cur_mode);
- if ((mode[0] & CY_HST_MODE_CHANGE) == 0 && cd->mode != cur_mode) {
- /* Unexpected mode change occurred */
- dev_err(dev, "%s %d->%d 0x%x\n", __func__, cd->mode,
- cur_mode, cd->int_status);
- dev_dbg(dev, "%s: Unexpected mode change, startup\n",
- __func__);
- cyttsp4_queue_startup_(cd);
- goto cyttsp4_irq_exit;
- }
-
- /* Expecting command complete interrupt */
- dev_vdbg(dev, "%s: command byte:0x%x\n", __func__, mode[cmd_ofs]);
- if ((cd->int_status & CY_INT_EXEC_CMD)
- && mode[cmd_ofs] & CY_CMD_COMPLETE) {
- cd->int_status &= ~CY_INT_EXEC_CMD;
- dev_vdbg(dev, "%s: Received command complete interrupt\n",
- __func__);
- wake_up(&cd->wait_q);
- /*
- * It is possible to receive a single interrupt for
- * command complete and touch/button status report.
- * Continue processing for a possible status report.
- */
- }
-
- /* This should be status report, read status regs */
- if (cd->mode == CY_MODE_OPERATIONAL) {
- dev_vdbg(dev, "%s: Read status registers\n", __func__);
- rc = cyttsp4_load_status_regs(cd);
- if (rc < 0)
- dev_err(dev, "%s: fail read mode regs r=%d\n",
- __func__, rc);
- }
-
- cyttsp4_mt_attention(cd);
-
-cyttsp4_irq_handshake:
- /* handshake the event */
- dev_vdbg(dev, "%s: Handshake mode=0x%02X r=%d\n",
- __func__, mode[0], rc);
- rc = cyttsp4_handshake(cd, mode[0]);
- if (rc < 0)
- dev_err(dev, "%s: Fail handshake mode=0x%02X r=%d\n",
- __func__, mode[0], rc);
-
- /*
- * a non-zero udelay period is required for using
- * IRQF_TRIGGER_LOW in order to delay until the
- * device completes isr deassert
- */
- udelay(cd->cpdata->level_irq_udelay);
-
-cyttsp4_irq_exit:
- mutex_unlock(&cd->system_lock);
- return IRQ_HANDLED;
-}
-
-static void cyttsp4_start_wd_timer(struct cyttsp4 *cd)
-{
- if (!CY_WATCHDOG_TIMEOUT)
- return;
-
- mod_timer(&cd->watchdog_timer, jiffies +
- msecs_to_jiffies(CY_WATCHDOG_TIMEOUT));
-}
-
-static void cyttsp4_stop_wd_timer(struct cyttsp4 *cd)
-{
- if (!CY_WATCHDOG_TIMEOUT)
- return;
-
- /*
- * Ensure we wait until the watchdog timer
- * running on a different CPU finishes
- */
- timer_shutdown_sync(&cd->watchdog_timer);
- cancel_work_sync(&cd->watchdog_work);
-}
-
-static void cyttsp4_watchdog_timer(struct timer_list *t)
-{
- struct cyttsp4 *cd = from_timer(cd, t, watchdog_timer);
-
- dev_vdbg(cd->dev, "%s: Watchdog timer triggered\n", __func__);
-
- schedule_work(&cd->watchdog_work);
-
- return;
-}
-
-static int cyttsp4_request_exclusive(struct cyttsp4 *cd, void *ownptr,
- int timeout_ms)
-{
- int t = msecs_to_jiffies(timeout_ms);
- bool with_timeout = (timeout_ms != 0);
-
- mutex_lock(&cd->system_lock);
- if (!cd->exclusive_dev && cd->exclusive_waits == 0) {
- cd->exclusive_dev = ownptr;
- goto exit;
- }
-
- cd->exclusive_waits++;
-wait:
- mutex_unlock(&cd->system_lock);
- if (with_timeout) {
- t = wait_event_timeout(cd->wait_q, !cd->exclusive_dev, t);
- if (IS_TMO(t)) {
- dev_err(cd->dev, "%s: tmo waiting exclusive access\n",
- __func__);
- mutex_lock(&cd->system_lock);
- cd->exclusive_waits--;
- mutex_unlock(&cd->system_lock);
- return -ETIME;
- }
- } else {
- wait_event(cd->wait_q, !cd->exclusive_dev);
- }
- mutex_lock(&cd->system_lock);
- if (cd->exclusive_dev)
- goto wait;
- cd->exclusive_dev = ownptr;
- cd->exclusive_waits--;
-exit:
- mutex_unlock(&cd->system_lock);
-
- return 0;
-}
-
-/*
- * returns error if was not owned
- */
-static int cyttsp4_release_exclusive(struct cyttsp4 *cd, void *ownptr)
-{
- mutex_lock(&cd->system_lock);
- if (cd->exclusive_dev != ownptr) {
- mutex_unlock(&cd->system_lock);
- return -EINVAL;
- }
-
- dev_vdbg(cd->dev, "%s: exclusive_dev %p freed\n",
- __func__, cd->exclusive_dev);
- cd->exclusive_dev = NULL;
- wake_up(&cd->wait_q);
- mutex_unlock(&cd->system_lock);
- return 0;
-}
-
-static int cyttsp4_wait_bl_heartbeat(struct cyttsp4 *cd)
-{
- long t;
- int rc = 0;
-
- /* wait heartbeat */
- dev_vdbg(cd->dev, "%s: wait heartbeat...\n", __func__);
- t = wait_event_timeout(cd->wait_q, cd->mode == CY_MODE_BOOTLOADER,
- msecs_to_jiffies(CY_CORE_RESET_AND_WAIT_TIMEOUT));
- if (IS_TMO(t)) {
- dev_err(cd->dev, "%s: tmo waiting bl heartbeat cd->mode=%d\n",
- __func__, cd->mode);
- rc = -ETIME;
- }
-
- return rc;
-}
-
-static int cyttsp4_wait_sysinfo_mode(struct cyttsp4 *cd)
-{
- long t;
-
- dev_vdbg(cd->dev, "%s: wait sysinfo...\n", __func__);
-
- t = wait_event_timeout(cd->wait_q, cd->mode == CY_MODE_SYSINFO,
- msecs_to_jiffies(CY_CORE_MODE_CHANGE_TIMEOUT));
- if (IS_TMO(t)) {
- dev_err(cd->dev, "%s: tmo waiting exit bl cd->mode=%d\n",
- __func__, cd->mode);
- mutex_lock(&cd->system_lock);
- cd->int_status &= ~CY_INT_MODE_CHANGE;
- mutex_unlock(&cd->system_lock);
- return -ETIME;
- }
-
- return 0;
-}
-
-static int cyttsp4_reset_and_wait(struct cyttsp4 *cd)
-{
- int rc;
-
- /* reset hardware */
- mutex_lock(&cd->system_lock);
- dev_dbg(cd->dev, "%s: reset hw...\n", __func__);
- rc = cyttsp4_hw_reset(cd);
- cd->mode = CY_MODE_UNKNOWN;
- mutex_unlock(&cd->system_lock);
- if (rc < 0) {
- dev_err(cd->dev, "%s:Fail hw reset r=%d\n", __func__, rc);
- return rc;
- }
-
- return cyttsp4_wait_bl_heartbeat(cd);
-}
-
-/*
- * returns err if refused or timeout; block until mode change complete
- * bit is set (mode change interrupt)
- */
-static int cyttsp4_set_mode(struct cyttsp4 *cd, int new_mode)
-{
- u8 new_dev_mode;
- u8 mode;
- long t;
- int rc;
-
- switch (new_mode) {
- case CY_MODE_OPERATIONAL:
- new_dev_mode = CY_HST_OPERATE;
- break;
- case CY_MODE_SYSINFO:
- new_dev_mode = CY_HST_SYSINFO;
- break;
- case CY_MODE_CAT:
- new_dev_mode = CY_HST_CAT;
- break;
- default:
- dev_err(cd->dev, "%s: invalid mode: %02X(%d)\n",
- __func__, new_mode, new_mode);
- return -EINVAL;
- }
-
- /* change mode */
- dev_dbg(cd->dev, "%s: %s=%p new_dev_mode=%02X new_mode=%d\n",
- __func__, "have exclusive", cd->exclusive_dev,
- new_dev_mode, new_mode);
-
- mutex_lock(&cd->system_lock);
- rc = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(mode), &mode);
- if (rc < 0) {
- mutex_unlock(&cd->system_lock);
- dev_err(cd->dev, "%s: Fail read mode r=%d\n",
- __func__, rc);
- goto exit;
- }
-
- /* Clear device mode bits and set to new mode */
- mode &= ~CY_HST_MODE;
- mode |= new_dev_mode | CY_HST_MODE_CHANGE;
-
- cd->int_status |= CY_INT_MODE_CHANGE;
- rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(mode), &mode);
- mutex_unlock(&cd->system_lock);
- if (rc < 0) {
- dev_err(cd->dev, "%s: Fail write mode change r=%d\n",
- __func__, rc);
- goto exit;
- }
-
- /* wait for mode change done interrupt */
- t = wait_event_timeout(cd->wait_q,
- (cd->int_status & CY_INT_MODE_CHANGE) == 0,
- msecs_to_jiffies(CY_CORE_MODE_CHANGE_TIMEOUT));
- dev_dbg(cd->dev, "%s: back from wait t=%ld cd->mode=%d\n",
- __func__, t, cd->mode);
-
- if (IS_TMO(t)) {
- dev_err(cd->dev, "%s: %s\n", __func__,
- "tmo waiting mode change");
- mutex_lock(&cd->system_lock);
- cd->int_status &= ~CY_INT_MODE_CHANGE;
- mutex_unlock(&cd->system_lock);
- rc = -EINVAL;
- }
-
-exit:
- return rc;
-}
-
-static void cyttsp4_watchdog_work(struct work_struct *work)
-{
- struct cyttsp4 *cd =
- container_of(work, struct cyttsp4, watchdog_work);
- u8 *mode;
- int retval;
-
- mutex_lock(&cd->system_lock);
- retval = cyttsp4_load_status_regs(cd);
- if (retval < 0) {
- dev_err(cd->dev,
- "%s: failed to access device in watchdog timer r=%d\n",
- __func__, retval);
- cyttsp4_queue_startup_(cd);
- goto cyttsp4_timer_watchdog_exit_error;
- }
- mode = &cd->sysinfo.xy_mode[CY_REG_BASE];
- if (IS_BOOTLOADER(mode[0], mode[1])) {
- dev_err(cd->dev,
- "%s: device found in bootloader mode when operational mode\n",
- __func__);
- cyttsp4_queue_startup_(cd);
- goto cyttsp4_timer_watchdog_exit_error;
- }
-
- cyttsp4_start_wd_timer(cd);
-cyttsp4_timer_watchdog_exit_error:
- mutex_unlock(&cd->system_lock);
- return;
-}
-
-static int cyttsp4_core_sleep_(struct cyttsp4 *cd)
-{
- enum cyttsp4_sleep_state ss = SS_SLEEP_ON;
- enum cyttsp4_int_state int_status = CY_INT_IGNORE;
- int rc = 0;
- u8 mode[2];
-
- /* Already in sleep mode? */
- mutex_lock(&cd->system_lock);
- if (cd->sleep_state == SS_SLEEP_ON) {
- mutex_unlock(&cd->system_lock);
- return 0;
- }
- cd->sleep_state = SS_SLEEPING;
- mutex_unlock(&cd->system_lock);
-
- cyttsp4_stop_wd_timer(cd);
-
- /* Wait until currently running IRQ handler exits and disable IRQ */
- disable_irq(cd->irq);
-
- dev_vdbg(cd->dev, "%s: write DEEP SLEEP...\n", __func__);
- mutex_lock(&cd->system_lock);
- rc = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(mode), &mode);
- if (rc) {
- mutex_unlock(&cd->system_lock);
- dev_err(cd->dev, "%s: Fail read adapter r=%d\n", __func__, rc);
- goto error;
- }
-
- if (IS_BOOTLOADER(mode[0], mode[1])) {
- mutex_unlock(&cd->system_lock);
- dev_err(cd->dev, "%s: Device in BOOTLOADER mode.\n", __func__);
- rc = -EINVAL;
- goto error;
- }
-
- mode[0] |= CY_HST_SLEEP;
- rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(mode[0]), &mode[0]);
- mutex_unlock(&cd->system_lock);
- if (rc) {
- dev_err(cd->dev, "%s: Fail write adapter r=%d\n", __func__, rc);
- goto error;
- }
- dev_vdbg(cd->dev, "%s: write DEEP SLEEP succeeded\n", __func__);
-
- if (cd->cpdata->power) {
- dev_dbg(cd->dev, "%s: Power down HW\n", __func__);
- rc = cd->cpdata->power(cd->cpdata, 0, cd->dev, &cd->ignore_irq);
- } else {
- dev_dbg(cd->dev, "%s: No power function\n", __func__);
- rc = 0;
- }
- if (rc < 0) {
- dev_err(cd->dev, "%s: HW Power down fails r=%d\n",
- __func__, rc);
- goto error;
- }
-
- /* Give time to FW to sleep */
- msleep(50);
-
- goto exit;
-
-error:
- ss = SS_SLEEP_OFF;
- int_status = CY_INT_NONE;
- cyttsp4_start_wd_timer(cd);
-
-exit:
- mutex_lock(&cd->system_lock);
- cd->sleep_state = ss;
- cd->int_status |= int_status;
- mutex_unlock(&cd->system_lock);
- enable_irq(cd->irq);
- return rc;
-}
-
-static int cyttsp4_startup_(struct cyttsp4 *cd)
-{
- int retry = CY_CORE_STARTUP_RETRY_COUNT;
- int rc;
-
- cyttsp4_stop_wd_timer(cd);
-
-reset:
- if (retry != CY_CORE_STARTUP_RETRY_COUNT)
- dev_dbg(cd->dev, "%s: Retry %d\n", __func__,
- CY_CORE_STARTUP_RETRY_COUNT - retry);
-
- /* reset hardware and wait for heartbeat */
- rc = cyttsp4_reset_and_wait(cd);
- if (rc < 0) {
- dev_err(cd->dev, "%s: Error on h/w reset r=%d\n", __func__, rc);
- if (retry--)
- goto reset;
- goto exit;
- }
-
- /* exit bl into sysinfo mode */
- dev_vdbg(cd->dev, "%s: write exit ldr...\n", __func__);
- mutex_lock(&cd->system_lock);
- cd->int_status &= ~CY_INT_IGNORE;
- cd->int_status |= CY_INT_MODE_CHANGE;
-
- rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(ldr_exit),
- (u8 *)ldr_exit);
- mutex_unlock(&cd->system_lock);
- if (rc < 0) {
- dev_err(cd->dev, "%s: Fail write r=%d\n", __func__, rc);
- if (retry--)
- goto reset;
- goto exit;
- }
-
- rc = cyttsp4_wait_sysinfo_mode(cd);
- if (rc < 0) {
- u8 buf[sizeof(ldr_err_app)];
- int rc1;
-
- /* Check for invalid/corrupted touch application */
- rc1 = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(ldr_err_app),
- buf);
- if (rc1) {
- dev_err(cd->dev, "%s: Fail read r=%d\n", __func__, rc1);
- } else if (!memcmp(buf, ldr_err_app, sizeof(ldr_err_app))) {
- dev_err(cd->dev, "%s: Error launching touch application\n",
- __func__);
- mutex_lock(&cd->system_lock);
- cd->invalid_touch_app = true;
- mutex_unlock(&cd->system_lock);
- goto exit_no_wd;
- }
-
- if (retry--)
- goto reset;
- goto exit;
- }
-
- mutex_lock(&cd->system_lock);
- cd->invalid_touch_app = false;
- mutex_unlock(&cd->system_lock);
-
- /* read sysinfo data */
- dev_vdbg(cd->dev, "%s: get sysinfo regs..\n", __func__);
- rc = cyttsp4_get_sysinfo_regs(cd);
- if (rc < 0) {
- dev_err(cd->dev, "%s: failed to get sysinfo regs rc=%d\n",
- __func__, rc);
- if (retry--)
- goto reset;
- goto exit;
- }
-
- rc = cyttsp4_set_mode(cd, CY_MODE_OPERATIONAL);
- if (rc < 0) {
- dev_err(cd->dev, "%s: failed to set mode to operational rc=%d\n",
- __func__, rc);
- if (retry--)
- goto reset;
- goto exit;
- }
-
- cyttsp4_lift_all(&cd->md);
-
- /* restore to sleep if was suspended */
- mutex_lock(&cd->system_lock);
- if (cd->sleep_state == SS_SLEEP_ON) {
- cd->sleep_state = SS_SLEEP_OFF;
- mutex_unlock(&cd->system_lock);
- cyttsp4_core_sleep_(cd);
- goto exit_no_wd;
- }
- mutex_unlock(&cd->system_lock);
-
-exit:
- cyttsp4_start_wd_timer(cd);
-exit_no_wd:
- return rc;
-}
-
-static int cyttsp4_startup(struct cyttsp4 *cd)
-{
- int rc;
-
- mutex_lock(&cd->system_lock);
- cd->startup_state = STARTUP_RUNNING;
- mutex_unlock(&cd->system_lock);
-
- rc = cyttsp4_request_exclusive(cd, cd->dev,
- CY_CORE_REQUEST_EXCLUSIVE_TIMEOUT);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail get exclusive ex=%p own=%p\n",
- __func__, cd->exclusive_dev, cd->dev);
- goto exit;
- }
-
- rc = cyttsp4_startup_(cd);
-
- if (cyttsp4_release_exclusive(cd, cd->dev) < 0)
- /* Don't return fail code, mode is already changed. */
- dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
- else
- dev_vdbg(cd->dev, "%s: pass release exclusive\n", __func__);
-
-exit:
- mutex_lock(&cd->system_lock);
- cd->startup_state = STARTUP_NONE;
- mutex_unlock(&cd->system_lock);
-
- /* Wake the waiters for end of startup */
- wake_up(&cd->wait_q);
-
- return rc;
-}
-
-static void cyttsp4_startup_work_function(struct work_struct *work)
-{
- struct cyttsp4 *cd = container_of(work, struct cyttsp4, startup_work);
- int rc;
-
- rc = cyttsp4_startup(cd);
- if (rc < 0)
- dev_err(cd->dev, "%s: Fail queued startup r=%d\n",
- __func__, rc);
-}
-
-static void cyttsp4_free_si_ptrs(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
-
- if (!si)
- return;
-
- kfree(si->si_ptrs.cydata);
- kfree(si->si_ptrs.test);
- kfree(si->si_ptrs.pcfg);
- kfree(si->si_ptrs.opcfg);
- kfree(si->si_ptrs.ddata);
- kfree(si->si_ptrs.mdata);
- kfree(si->btn);
- kfree(si->xy_mode);
- kfree(si->xy_data);
- kfree(si->btn_rec_data);
-}
-
-static int cyttsp4_core_sleep(struct cyttsp4 *cd)
-{
- int rc;
-
- rc = cyttsp4_request_exclusive(cd, cd->dev,
- CY_CORE_SLEEP_REQUEST_EXCLUSIVE_TIMEOUT);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail get exclusive ex=%p own=%p\n",
- __func__, cd->exclusive_dev, cd->dev);
- return 0;
- }
-
- rc = cyttsp4_core_sleep_(cd);
-
- if (cyttsp4_release_exclusive(cd, cd->dev) < 0)
- dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
- else
- dev_vdbg(cd->dev, "%s: pass release exclusive\n", __func__);
-
- return rc;
-}
-
-static int cyttsp4_core_wake_(struct cyttsp4 *cd)
-{
- struct device *dev = cd->dev;
- int rc;
- u8 mode;
- int t;
-
- /* Already woken? */
- mutex_lock(&cd->system_lock);
- if (cd->sleep_state == SS_SLEEP_OFF) {
- mutex_unlock(&cd->system_lock);
- return 0;
- }
- cd->int_status &= ~CY_INT_IGNORE;
- cd->int_status |= CY_INT_AWAKE;
- cd->sleep_state = SS_WAKING;
-
- if (cd->cpdata->power) {
- dev_dbg(dev, "%s: Power up HW\n", __func__);
- rc = cd->cpdata->power(cd->cpdata, 1, dev, &cd->ignore_irq);
- } else {
- dev_dbg(dev, "%s: No power function\n", __func__);
- rc = -ENOSYS;
- }
- if (rc < 0) {
- dev_err(dev, "%s: HW Power up fails r=%d\n",
- __func__, rc);
-
- /* Initiate a read transaction to wake up */
- cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(mode), &mode);
- } else
- dev_vdbg(cd->dev, "%s: HW power up succeeds\n",
- __func__);
- mutex_unlock(&cd->system_lock);
-
- t = wait_event_timeout(cd->wait_q,
- (cd->int_status & CY_INT_AWAKE) == 0,
- msecs_to_jiffies(CY_CORE_WAKEUP_TIMEOUT));
- if (IS_TMO(t)) {
- dev_err(dev, "%s: TMO waiting for wakeup\n", __func__);
- mutex_lock(&cd->system_lock);
- cd->int_status &= ~CY_INT_AWAKE;
- /* Try starting up */
- cyttsp4_queue_startup_(cd);
- mutex_unlock(&cd->system_lock);
- }
-
- mutex_lock(&cd->system_lock);
- cd->sleep_state = SS_SLEEP_OFF;
- mutex_unlock(&cd->system_lock);
-
- cyttsp4_start_wd_timer(cd);
-
- return 0;
-}
-
-static int cyttsp4_core_wake(struct cyttsp4 *cd)
-{
- int rc;
-
- rc = cyttsp4_request_exclusive(cd, cd->dev,
- CY_CORE_REQUEST_EXCLUSIVE_TIMEOUT);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail get exclusive ex=%p own=%p\n",
- __func__, cd->exclusive_dev, cd->dev);
- return 0;
- }
-
- rc = cyttsp4_core_wake_(cd);
-
- if (cyttsp4_release_exclusive(cd, cd->dev) < 0)
- dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
- else
- dev_vdbg(cd->dev, "%s: pass release exclusive\n", __func__);
-
- return rc;
-}
-
-static int cyttsp4_core_suspend(struct device *dev)
-{
- struct cyttsp4 *cd = dev_get_drvdata(dev);
- struct cyttsp4_mt_data *md = &cd->md;
- int rc;
-
- md->is_suspended = true;
-
- rc = cyttsp4_core_sleep(cd);
- if (rc < 0) {
- dev_err(dev, "%s: Error on sleep\n", __func__);
- return -EAGAIN;
- }
- return 0;
-}
-
-static int cyttsp4_core_resume(struct device *dev)
-{
- struct cyttsp4 *cd = dev_get_drvdata(dev);
- struct cyttsp4_mt_data *md = &cd->md;
- int rc;
-
- md->is_suspended = false;
-
- rc = cyttsp4_core_wake(cd);
- if (rc < 0) {
- dev_err(dev, "%s: Error on wake\n", __func__);
- return -EAGAIN;
- }
-
- return 0;
-}
-
-EXPORT_GPL_RUNTIME_DEV_PM_OPS(cyttsp4_pm_ops,
- cyttsp4_core_suspend, cyttsp4_core_resume, NULL);
-
-static int cyttsp4_mt_open(struct input_dev *input)
-{
- pm_runtime_get(input->dev.parent);
- return 0;
-}
-
-static void cyttsp4_mt_close(struct input_dev *input)
-{
- struct cyttsp4_mt_data *md = input_get_drvdata(input);
- mutex_lock(&md->report_lock);
- if (!md->is_suspended)
- pm_runtime_put(input->dev.parent);
- mutex_unlock(&md->report_lock);
-}
-
-
-static int cyttsp4_setup_input_device(struct cyttsp4 *cd)
-{
- struct device *dev = cd->dev;
- struct cyttsp4_mt_data *md = &cd->md;
- int signal = CY_IGNORE_VALUE;
- int max_x, max_y, max_p, min, max;
- int max_x_tmp, max_y_tmp;
- int i;
- int rc;
-
- dev_vdbg(dev, "%s: Initialize event signals\n", __func__);
- __set_bit(EV_ABS, md->input->evbit);
- __set_bit(EV_REL, md->input->evbit);
- __set_bit(EV_KEY, md->input->evbit);
-
- max_x_tmp = md->si->si_ofs.max_x;
- max_y_tmp = md->si->si_ofs.max_y;
-
- /* get maximum values from the sysinfo data */
- if (md->pdata->flags & CY_FLAG_FLIP) {
- max_x = max_y_tmp - 1;
- max_y = max_x_tmp - 1;
- } else {
- max_x = max_x_tmp - 1;
- max_y = max_y_tmp - 1;
- }
- max_p = md->si->si_ofs.max_p;
-
- /* set event signal capabilities */
- for (i = 0; i < (md->pdata->frmwrk->size / CY_NUM_ABS_SET); i++) {
- signal = md->pdata->frmwrk->abs
- [(i * CY_NUM_ABS_SET) + CY_SIGNAL_OST];
- if (signal != CY_IGNORE_VALUE) {
- __set_bit(signal, md->input->absbit);
- min = md->pdata->frmwrk->abs
- [(i * CY_NUM_ABS_SET) + CY_MIN_OST];
- max = md->pdata->frmwrk->abs
- [(i * CY_NUM_ABS_SET) + CY_MAX_OST];
- if (i == CY_ABS_ID_OST) {
- /* shift track ids down to start at 0 */
- max = max - min;
- min = min - min;
- } else if (i == CY_ABS_X_OST)
- max = max_x;
- else if (i == CY_ABS_Y_OST)
- max = max_y;
- else if (i == CY_ABS_P_OST)
- max = max_p;
- input_set_abs_params(md->input, signal, min, max,
- md->pdata->frmwrk->abs
- [(i * CY_NUM_ABS_SET) + CY_FUZZ_OST],
- md->pdata->frmwrk->abs
- [(i * CY_NUM_ABS_SET) + CY_FLAT_OST]);
- dev_dbg(dev, "%s: register signal=%02X min=%d max=%d\n",
- __func__, signal, min, max);
- if ((i == CY_ABS_ID_OST) &&
- (md->si->si_ofs.tch_rec_size <
- CY_TMA4XX_TCH_REC_SIZE))
- break;
- }
- }
-
- input_mt_init_slots(md->input, md->si->si_ofs.tch_abs[CY_TCH_T].max,
- INPUT_MT_DIRECT);
- rc = input_register_device(md->input);
- if (rc < 0)
- dev_err(dev, "%s: Error, failed register input device r=%d\n",
- __func__, rc);
- return rc;
-}
-
-static int cyttsp4_mt_probe(struct cyttsp4 *cd)
-{
- struct device *dev = cd->dev;
- struct cyttsp4_mt_data *md = &cd->md;
- struct cyttsp4_mt_platform_data *pdata = cd->pdata->mt_pdata;
- int rc = 0;
-
- mutex_init(&md->report_lock);
- md->pdata = pdata;
- /* Create the input device and register it. */
- dev_vdbg(dev, "%s: Create the input device and register it\n",
- __func__);
- md->input = input_allocate_device();
- if (md->input == NULL) {
- dev_err(dev, "%s: Error, failed to allocate input device\n",
- __func__);
- rc = -ENOSYS;
- goto error_alloc_failed;
- }
-
- md->input->name = pdata->inp_dev_name;
- scnprintf(md->phys, sizeof(md->phys)-1, "%s", dev_name(dev));
- md->input->phys = md->phys;
- md->input->id.bustype = cd->bus_ops->bustype;
- md->input->dev.parent = dev;
- md->input->open = cyttsp4_mt_open;
- md->input->close = cyttsp4_mt_close;
- input_set_drvdata(md->input, md);
-
- /* get sysinfo */
- md->si = &cd->sysinfo;
-
- rc = cyttsp4_setup_input_device(cd);
- if (rc)
- goto error_init_input;
-
- return 0;
-
-error_init_input:
- input_free_device(md->input);
-error_alloc_failed:
- dev_err(dev, "%s failed.\n", __func__);
- return rc;
-}
-
-struct cyttsp4 *cyttsp4_probe(const struct cyttsp4_bus_ops *ops,
- struct device *dev, u16 irq, size_t xfer_buf_size)
-{
- struct cyttsp4 *cd;
- struct cyttsp4_platform_data *pdata = dev_get_platdata(dev);
- unsigned long irq_flags;
- int rc = 0;
-
- if (!pdata || !pdata->core_pdata || !pdata->mt_pdata) {
- dev_err(dev, "%s: Missing platform data\n", __func__);
- rc = -ENODEV;
- goto error_no_pdata;
- }
-
- cd = kzalloc(sizeof(*cd), GFP_KERNEL);
- if (!cd) {
- dev_err(dev, "%s: Error, kzalloc\n", __func__);
- rc = -ENOMEM;
- goto error_alloc_data;
- }
-
- cd->xfer_buf = kzalloc(xfer_buf_size, GFP_KERNEL);
- if (!cd->xfer_buf) {
- dev_err(dev, "%s: Error, kzalloc\n", __func__);
- rc = -ENOMEM;
- goto error_free_cd;
- }
-
- /* Initialize device info */
- cd->dev = dev;
- cd->pdata = pdata;
- cd->cpdata = pdata->core_pdata;
- cd->bus_ops = ops;
-
- /* Initialize mutexes and spinlocks */
- mutex_init(&cd->system_lock);
- mutex_init(&cd->adap_lock);
-
- /* Initialize wait queue */
- init_waitqueue_head(&cd->wait_q);
-
- /* Initialize works */
- INIT_WORK(&cd->startup_work, cyttsp4_startup_work_function);
- INIT_WORK(&cd->watchdog_work, cyttsp4_watchdog_work);
-
- /* Initialize IRQ */
- cd->irq = gpio_to_irq(cd->cpdata->irq_gpio);
- if (cd->irq < 0) {
- rc = -EINVAL;
- goto error_free_xfer;
- }
-
- dev_set_drvdata(dev, cd);
-
- /* Call platform init function */
- if (cd->cpdata->init) {
- dev_dbg(cd->dev, "%s: Init HW\n", __func__);
- rc = cd->cpdata->init(cd->cpdata, 1, cd->dev);
- } else {
- dev_dbg(cd->dev, "%s: No HW INIT function\n", __func__);
- rc = 0;
- }
- if (rc < 0)
- dev_err(cd->dev, "%s: HW Init fail r=%d\n", __func__, rc);
-
- dev_dbg(dev, "%s: initialize threaded irq=%d\n", __func__, cd->irq);
- if (cd->cpdata->level_irq_udelay > 0)
- /* use level triggered interrupts */
- irq_flags = IRQF_TRIGGER_LOW | IRQF_ONESHOT;
- else
- /* use edge triggered interrupts */
- irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
-
- rc = request_threaded_irq(cd->irq, NULL, cyttsp4_irq, irq_flags,
- dev_name(dev), cd);
- if (rc < 0) {
- dev_err(dev, "%s: Error, could not request irq\n", __func__);
- goto error_request_irq;
- }
-
- /* Setup watchdog timer */
- timer_setup(&cd->watchdog_timer, cyttsp4_watchdog_timer, 0);
-
- /*
- * call startup directly to ensure that the device
- * is tested before leaving the probe
- */
- rc = cyttsp4_startup(cd);
-
- /* Do not fail probe if startup fails but the device is detected */
- if (rc < 0 && cd->mode == CY_MODE_UNKNOWN) {
- dev_err(cd->dev, "%s: Fail initial startup r=%d\n",
- __func__, rc);
- goto error_startup;
- }
-
- rc = cyttsp4_mt_probe(cd);
- if (rc < 0) {
- dev_err(dev, "%s: Error, fail mt probe\n", __func__);
- goto error_startup;
- }
-
- pm_runtime_enable(dev);
-
- return cd;
-
-error_startup:
- cancel_work_sync(&cd->startup_work);
- cyttsp4_stop_wd_timer(cd);
- pm_runtime_disable(dev);
- cyttsp4_free_si_ptrs(cd);
- free_irq(cd->irq, cd);
-error_request_irq:
- if (cd->cpdata->init)
- cd->cpdata->init(cd->cpdata, 0, dev);
-error_free_xfer:
- kfree(cd->xfer_buf);
-error_free_cd:
- kfree(cd);
-error_alloc_data:
-error_no_pdata:
- dev_err(dev, "%s failed.\n", __func__);
- return ERR_PTR(rc);
-}
-EXPORT_SYMBOL_GPL(cyttsp4_probe);
-
-static void cyttsp4_mt_release(struct cyttsp4_mt_data *md)
-{
- input_unregister_device(md->input);
- input_set_drvdata(md->input, NULL);
-}
-
-int cyttsp4_remove(struct cyttsp4 *cd)
-{
- struct device *dev = cd->dev;
-
- cyttsp4_mt_release(&cd->md);
-
- /*
- * Suspend the device before freeing the startup_work and stopping
- * the watchdog since sleep function restarts watchdog on failure
- */
- pm_runtime_suspend(dev);
- pm_runtime_disable(dev);
-
- cancel_work_sync(&cd->startup_work);
-
- cyttsp4_stop_wd_timer(cd);
-
- free_irq(cd->irq, cd);
- if (cd->cpdata->init)
- cd->cpdata->init(cd->cpdata, 0, dev);
- cyttsp4_free_si_ptrs(cd);
- kfree(cd);
- return 0;
-}
-EXPORT_SYMBOL_GPL(cyttsp4_remove);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard touchscreen core driver");
-MODULE_AUTHOR("Cypress");
diff --git a/drivers/input/touchscreen/cyttsp4_core.h b/drivers/input/touchscreen/cyttsp4_core.h
deleted file mode 100644
index 6262f6e45075..000000000000
--- a/drivers/input/touchscreen/cyttsp4_core.h
+++ /dev/null
@@ -1,448 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * cyttsp4_core.h
- * Cypress TrueTouch(TM) Standard Product V4 Core driver module.
- * For use with Cypress Txx4xx parts.
- * Supported parts include:
- * TMA4XX
- * TMA1036
- *
- * Copyright (C) 2012 Cypress Semiconductor
- *
- * Contact Cypress Semiconductor at www.cypress.com <ttdrivers@cypress.com>
- */
-
-#ifndef _LINUX_CYTTSP4_CORE_H
-#define _LINUX_CYTTSP4_CORE_H
-
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/input.h>
-#include <linux/kernel.h>
-#include <linux/limits.h>
-#include <linux/module.h>
-#include <linux/stringify.h>
-#include <linux/types.h>
-#include <linux/platform_data/cyttsp4.h>
-
-#define CY_REG_BASE 0x00
-
-#define CY_POST_CODEL_WDG_RST 0x01
-#define CY_POST_CODEL_CFG_DATA_CRC_FAIL 0x02
-#define CY_POST_CODEL_PANEL_TEST_FAIL 0x04
-
-#define CY_NUM_BTN_PER_REG 4
-
-/* touch record system information offset masks and shifts */
-#define CY_BYTE_OFS_MASK 0x1F
-#define CY_BOFS_MASK 0xE0
-#define CY_BOFS_SHIFT 5
-
-#define CY_TMA1036_TCH_REC_SIZE 6
-#define CY_TMA4XX_TCH_REC_SIZE 9
-#define CY_TMA1036_MAX_TCH 0x0E
-#define CY_TMA4XX_MAX_TCH 0x1E
-
-#define CY_NORMAL_ORIGIN 0 /* upper, left corner */
-#define CY_INVERT_ORIGIN 1 /* lower, right corner */
-
-/* helpers */
-#define GET_NUM_TOUCHES(x) ((x) & 0x1F)
-#define IS_LARGE_AREA(x) ((x) & 0x20)
-#define IS_BAD_PKT(x) ((x) & 0x20)
-#define IS_BOOTLOADER(hst_mode, reset_detect) \
- ((hst_mode) & 0x01 || (reset_detect) != 0)
-#define IS_TMO(t) ((t) == 0)
-
-
-enum cyttsp_cmd_bits {
- CY_CMD_COMPLETE = (1 << 6),
-};
-
-/* Timeout in ms. */
-#define CY_WATCHDOG_TIMEOUT 1000
-
-#define CY_MAX_PRINT_SIZE 512
-#ifdef VERBOSE_DEBUG
-#define CY_MAX_PRBUF_SIZE PIPE_BUF
-#define CY_PR_TRUNCATED " truncated..."
-#endif
-
-enum cyttsp4_ic_grpnum {
- CY_IC_GRPNUM_RESERVED,
- CY_IC_GRPNUM_CMD_REGS,
- CY_IC_GRPNUM_TCH_REP,
- CY_IC_GRPNUM_DATA_REC,
- CY_IC_GRPNUM_TEST_REC,
- CY_IC_GRPNUM_PCFG_REC,
- CY_IC_GRPNUM_TCH_PARM_VAL,
- CY_IC_GRPNUM_TCH_PARM_SIZE,
- CY_IC_GRPNUM_RESERVED1,
- CY_IC_GRPNUM_RESERVED2,
- CY_IC_GRPNUM_OPCFG_REC,
- CY_IC_GRPNUM_DDATA_REC,
- CY_IC_GRPNUM_MDATA_REC,
- CY_IC_GRPNUM_TEST_REGS,
- CY_IC_GRPNUM_BTN_KEYS,
- CY_IC_GRPNUM_TTHE_REGS,
- CY_IC_GRPNUM_NUM
-};
-
-enum cyttsp4_int_state {
- CY_INT_NONE,
- CY_INT_IGNORE = (1 << 0),
- CY_INT_MODE_CHANGE = (1 << 1),
- CY_INT_EXEC_CMD = (1 << 2),
- CY_INT_AWAKE = (1 << 3),
-};
-
-enum cyttsp4_mode {
- CY_MODE_UNKNOWN,
- CY_MODE_BOOTLOADER = (1 << 1),
- CY_MODE_OPERATIONAL = (1 << 2),
- CY_MODE_SYSINFO = (1 << 3),
- CY_MODE_CAT = (1 << 4),
- CY_MODE_STARTUP = (1 << 5),
- CY_MODE_LOADER = (1 << 6),
- CY_MODE_CHANGE_MODE = (1 << 7),
- CY_MODE_CHANGED = (1 << 8),
- CY_MODE_CMD_COMPLETE = (1 << 9),
-};
-
-enum cyttsp4_sleep_state {
- SS_SLEEP_OFF,
- SS_SLEEP_ON,
- SS_SLEEPING,
- SS_WAKING,
-};
-
-enum cyttsp4_startup_state {
- STARTUP_NONE,
- STARTUP_QUEUED,
- STARTUP_RUNNING,
-};
-
-#define CY_NUM_REVCTRL 8
-struct cyttsp4_cydata {
- u8 ttpidh;
- u8 ttpidl;
- u8 fw_ver_major;
- u8 fw_ver_minor;
- u8 revctrl[CY_NUM_REVCTRL];
- u8 blver_major;
- u8 blver_minor;
- u8 jtag_si_id3;
- u8 jtag_si_id2;
- u8 jtag_si_id1;
- u8 jtag_si_id0;
- u8 mfgid_sz;
- u8 cyito_idh;
- u8 cyito_idl;
- u8 cyito_verh;
- u8 cyito_verl;
- u8 ttsp_ver_major;
- u8 ttsp_ver_minor;
- u8 device_info;
- u8 mfg_id[];
-} __packed;
-
-struct cyttsp4_test {
- u8 post_codeh;
- u8 post_codel;
-} __packed;
-
-struct cyttsp4_pcfg {
- u8 electrodes_x;
- u8 electrodes_y;
- u8 len_xh;
- u8 len_xl;
- u8 len_yh;
- u8 len_yl;
- u8 res_xh;
- u8 res_xl;
- u8 res_yh;
- u8 res_yl;
- u8 max_zh;
- u8 max_zl;
- u8 panel_info0;
-} __packed;
-
-struct cyttsp4_tch_rec_params {
- u8 loc;
- u8 size;
-} __packed;
-
-#define CY_NUM_TCH_FIELDS 7
-#define CY_NUM_EXT_TCH_FIELDS 3
-struct cyttsp4_opcfg {
- u8 cmd_ofs;
- u8 rep_ofs;
- u8 rep_szh;
- u8 rep_szl;
- u8 num_btns;
- u8 tt_stat_ofs;
- u8 obj_cfg0;
- u8 max_tchs;
- u8 tch_rec_size;
- struct cyttsp4_tch_rec_params tch_rec_old[CY_NUM_TCH_FIELDS];
- u8 btn_rec_size; /* btn record size (in bytes) */
- u8 btn_diff_ofs; /* btn data loc, diff counts */
- u8 btn_diff_size; /* btn size of diff counts (in bits) */
- struct cyttsp4_tch_rec_params tch_rec_new[CY_NUM_EXT_TCH_FIELDS];
-} __packed;
-
-struct cyttsp4_sysinfo_ptr {
- struct cyttsp4_cydata *cydata;
- struct cyttsp4_test *test;
- struct cyttsp4_pcfg *pcfg;
- struct cyttsp4_opcfg *opcfg;
- struct cyttsp4_ddata *ddata;
- struct cyttsp4_mdata *mdata;
-} __packed;
-
-struct cyttsp4_sysinfo_data {
- u8 hst_mode;
- u8 reserved;
- u8 map_szh;
- u8 map_szl;
- u8 cydata_ofsh;
- u8 cydata_ofsl;
- u8 test_ofsh;
- u8 test_ofsl;
- u8 pcfg_ofsh;
- u8 pcfg_ofsl;
- u8 opcfg_ofsh;
- u8 opcfg_ofsl;
- u8 ddata_ofsh;
- u8 ddata_ofsl;
- u8 mdata_ofsh;
- u8 mdata_ofsl;
-} __packed;
-
-enum cyttsp4_tch_abs { /* for ordering within the extracted touch data array */
- CY_TCH_X, /* X */
- CY_TCH_Y, /* Y */
- CY_TCH_P, /* P (Z) */
- CY_TCH_T, /* TOUCH ID */
- CY_TCH_E, /* EVENT ID */
- CY_TCH_O, /* OBJECT ID */
- CY_TCH_W, /* SIZE */
- CY_TCH_MAJ, /* TOUCH_MAJOR */
- CY_TCH_MIN, /* TOUCH_MINOR */
- CY_TCH_OR, /* ORIENTATION */
- CY_TCH_NUM_ABS
-};
-
-struct cyttsp4_touch {
- int abs[CY_TCH_NUM_ABS];
-};
-
-struct cyttsp4_tch_abs_params {
- size_t ofs; /* abs byte offset */
- size_t size; /* size in bits */
- size_t max; /* max value */
- size_t bofs; /* bit offset */
-};
-
-struct cyttsp4_sysinfo_ofs {
- size_t chip_type;
- size_t cmd_ofs;
- size_t rep_ofs;
- size_t rep_sz;
- size_t num_btns;
- size_t num_btn_regs; /* ceil(num_btns/4) */
- size_t tt_stat_ofs;
- size_t tch_rec_size;
- size_t obj_cfg0;
- size_t max_tchs;
- size_t mode_size;
- size_t data_size;
- size_t map_sz;
- size_t max_x;
- size_t x_origin; /* left or right corner */
- size_t max_y;
- size_t y_origin; /* upper or lower corner */
- size_t max_p;
- size_t cydata_ofs;
- size_t test_ofs;
- size_t pcfg_ofs;
- size_t opcfg_ofs;
- size_t ddata_ofs;
- size_t mdata_ofs;
- size_t cydata_size;
- size_t test_size;
- size_t pcfg_size;
- size_t opcfg_size;
- size_t ddata_size;
- size_t mdata_size;
- size_t btn_keys_size;
- struct cyttsp4_tch_abs_params tch_abs[CY_TCH_NUM_ABS];
- size_t btn_rec_size; /* btn record size (in bytes) */
- size_t btn_diff_ofs;/* btn data loc ,diff counts, (Op-Mode byte ofs) */
- size_t btn_diff_size;/* btn size of diff counts (in bits) */
-};
-
-enum cyttsp4_btn_state {
- CY_BTN_RELEASED,
- CY_BTN_PRESSED,
- CY_BTN_NUM_STATE
-};
-
-struct cyttsp4_btn {
- bool enabled;
- int state; /* CY_BTN_PRESSED, CY_BTN_RELEASED */
- int key_code;
-};
-
-struct cyttsp4_sysinfo {
- bool ready;
- struct cyttsp4_sysinfo_data si_data;
- struct cyttsp4_sysinfo_ptr si_ptrs;
- struct cyttsp4_sysinfo_ofs si_ofs;
- struct cyttsp4_btn *btn; /* button states */
- u8 *btn_rec_data; /* button diff count data */
- u8 *xy_mode; /* operational mode and status regs */
- u8 *xy_data; /* operational touch regs */
-};
-
-struct cyttsp4_mt_data {
- struct cyttsp4_mt_platform_data *pdata;
- struct cyttsp4_sysinfo *si;
- struct input_dev *input;
- struct mutex report_lock;
- bool is_suspended;
- char phys[NAME_MAX];
- int num_prv_tch;
-};
-
-struct cyttsp4 {
- struct device *dev;
- struct mutex system_lock;
- struct mutex adap_lock;
- enum cyttsp4_mode mode;
- enum cyttsp4_sleep_state sleep_state;
- enum cyttsp4_startup_state startup_state;
- int int_status;
- wait_queue_head_t wait_q;
- int irq;
- struct work_struct startup_work;
- struct work_struct watchdog_work;
- struct timer_list watchdog_timer;
- struct cyttsp4_sysinfo sysinfo;
- void *exclusive_dev;
- int exclusive_waits;
- atomic_t ignore_irq;
- bool invalid_touch_app;
- struct cyttsp4_mt_data md;
- struct cyttsp4_platform_data *pdata;
- struct cyttsp4_core_platform_data *cpdata;
- const struct cyttsp4_bus_ops *bus_ops;
- u8 *xfer_buf;
-#ifdef VERBOSE_DEBUG
- u8 pr_buf[CY_MAX_PRBUF_SIZE];
-#endif
-};
-
-struct cyttsp4_bus_ops {
- u16 bustype;
- int (*write)(struct device *dev, u8 *xfer_buf, u16 addr, u8 length,
- const void *values);
- int (*read)(struct device *dev, u8 *xfer_buf, u16 addr, u8 length,
- void *values);
-};
-
-enum cyttsp4_hst_mode_bits {
- CY_HST_TOGGLE = (1 << 7),
- CY_HST_MODE_CHANGE = (1 << 3),
- CY_HST_MODE = (7 << 4),
- CY_HST_OPERATE = (0 << 4),
- CY_HST_SYSINFO = (1 << 4),
- CY_HST_CAT = (2 << 4),
- CY_HST_LOWPOW = (1 << 2),
- CY_HST_SLEEP = (1 << 1),
- CY_HST_RESET = (1 << 0),
-};
-
-/* abs settings */
-#define CY_IGNORE_VALUE 0xFFFF
-
-/* abs signal capabilities offsets in the frameworks array */
-enum cyttsp4_sig_caps {
- CY_SIGNAL_OST,
- CY_MIN_OST,
- CY_MAX_OST,
- CY_FUZZ_OST,
- CY_FLAT_OST,
- CY_NUM_ABS_SET /* number of signal capability fields */
-};
-
-/* abs axis signal offsets in the framworks array */
-enum cyttsp4_sig_ost {
- CY_ABS_X_OST,
- CY_ABS_Y_OST,
- CY_ABS_P_OST,
- CY_ABS_W_OST,
- CY_ABS_ID_OST,
- CY_ABS_MAJ_OST,
- CY_ABS_MIN_OST,
- CY_ABS_OR_OST,
- CY_NUM_ABS_OST /* number of abs signals */
-};
-
-enum cyttsp4_flags {
- CY_FLAG_NONE = 0x00,
- CY_FLAG_HOVER = 0x04,
- CY_FLAG_FLIP = 0x08,
- CY_FLAG_INV_X = 0x10,
- CY_FLAG_INV_Y = 0x20,
- CY_FLAG_VKEYS = 0x40,
-};
-
-enum cyttsp4_object_id {
- CY_OBJ_STANDARD_FINGER,
- CY_OBJ_LARGE_OBJECT,
- CY_OBJ_STYLUS,
- CY_OBJ_HOVER,
-};
-
-enum cyttsp4_event_id {
- CY_EV_NO_EVENT,
- CY_EV_TOUCHDOWN,
- CY_EV_MOVE, /* significant displacement (> act dist) */
- CY_EV_LIFTOFF, /* record reports last position */
-};
-
-/* x-axis resolution of panel in pixels */
-#define CY_PCFG_RESOLUTION_X_MASK 0x7F
-
-/* y-axis resolution of panel in pixels */
-#define CY_PCFG_RESOLUTION_Y_MASK 0x7F
-
-/* x-axis, 0:origin is on left side of panel, 1: right */
-#define CY_PCFG_ORIGIN_X_MASK 0x80
-
-/* y-axis, 0:origin is on top side of panel, 1: bottom */
-#define CY_PCFG_ORIGIN_Y_MASK 0x80
-
-static inline int cyttsp4_adap_read(struct cyttsp4 *ts, u16 addr, int size,
- void *buf)
-{
- return ts->bus_ops->read(ts->dev, ts->xfer_buf, addr, size, buf);
-}
-
-static inline int cyttsp4_adap_write(struct cyttsp4 *ts, u16 addr, int size,
- const void *buf)
-{
- return ts->bus_ops->write(ts->dev, ts->xfer_buf, addr, size, buf);
-}
-
-extern struct cyttsp4 *cyttsp4_probe(const struct cyttsp4_bus_ops *ops,
- struct device *dev, u16 irq, size_t xfer_buf_size);
-extern int cyttsp4_remove(struct cyttsp4 *ts);
-int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u16 addr,
- u8 length, const void *values);
-int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, u16 addr,
- u8 length, void *values);
-extern const struct dev_pm_ops cyttsp4_pm_ops;
-
-#endif /* _LINUX_CYTTSP4_CORE_H */
diff --git a/drivers/input/touchscreen/cyttsp4_i2c.c b/drivers/input/touchscreen/cyttsp4_i2c.c
deleted file mode 100644
index da32c151def5..000000000000
--- a/drivers/input/touchscreen/cyttsp4_i2c.c
+++ /dev/null
@@ -1,72 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * cyttsp_i2c.c
- * Cypress TrueTouch(TM) Standard Product (TTSP) I2C touchscreen driver.
- * For use with Cypress Txx4xx parts.
- * Supported parts include:
- * TMA4XX
- * TMA1036
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- * Copyright (C) 2013 Cypress Semiconductor
- *
- * Contact Cypress Semiconductor at www.cypress.com <ttdrivers@cypress.com>
- */
-
-#include "cyttsp4_core.h"
-
-#include <linux/i2c.h>
-#include <linux/input.h>
-
-#define CYTTSP4_I2C_DATA_SIZE (3 * 256)
-
-static const struct cyttsp4_bus_ops cyttsp4_i2c_bus_ops = {
- .bustype = BUS_I2C,
- .write = cyttsp_i2c_write_block_data,
- .read = cyttsp_i2c_read_block_data,
-};
-
-static int cyttsp4_i2c_probe(struct i2c_client *client)
-{
- struct cyttsp4 *ts;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "I2C functionality not Supported\n");
- return -EIO;
- }
-
- ts = cyttsp4_probe(&cyttsp4_i2c_bus_ops, &client->dev, client->irq,
- CYTTSP4_I2C_DATA_SIZE);
-
- return PTR_ERR_OR_ZERO(ts);
-}
-
-static void cyttsp4_i2c_remove(struct i2c_client *client)
-{
- struct cyttsp4 *ts = i2c_get_clientdata(client);
-
- cyttsp4_remove(ts);
-}
-
-static const struct i2c_device_id cyttsp4_i2c_id[] = {
- { CYTTSP4_I2C_NAME },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, cyttsp4_i2c_id);
-
-static struct i2c_driver cyttsp4_i2c_driver = {
- .driver = {
- .name = CYTTSP4_I2C_NAME,
- .pm = pm_ptr(&cyttsp4_pm_ops),
- },
- .probe = cyttsp4_i2c_probe,
- .remove = cyttsp4_i2c_remove,
- .id_table = cyttsp4_i2c_id,
-};
-
-module_i2c_driver(cyttsp4_i2c_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) I2C driver");
-MODULE_AUTHOR("Cypress");
diff --git a/drivers/input/touchscreen/cyttsp4_spi.c b/drivers/input/touchscreen/cyttsp4_spi.c
deleted file mode 100644
index 944fbbe9113e..000000000000
--- a/drivers/input/touchscreen/cyttsp4_spi.c
+++ /dev/null
@@ -1,187 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Source for:
- * Cypress TrueTouch(TM) Standard Product (TTSP) SPI touchscreen driver.
- * For use with Cypress Txx4xx parts.
- * Supported parts include:
- * TMA4XX
- * TMA1036
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- * Copyright (C) 2013 Cypress Semiconductor
- *
- * Contact Cypress Semiconductor at www.cypress.com <ttdrivers@cypress.com>
- */
-
-#include "cyttsp4_core.h"
-
-#include <linux/delay.h>
-#include <linux/input.h>
-#include <linux/spi/spi.h>
-
-#define CY_SPI_WR_OP 0x00 /* r/~w */
-#define CY_SPI_RD_OP 0x01
-#define CY_SPI_BITS_PER_WORD 8
-#define CY_SPI_A8_BIT 0x02
-#define CY_SPI_WR_HEADER_BYTES 2
-#define CY_SPI_RD_HEADER_BYTES 1
-#define CY_SPI_CMD_BYTES 2
-#define CY_SPI_SYNC_BYTE 0
-#define CY_SPI_SYNC_ACK 0x62 /* from TRM *A protocol */
-#define CY_SPI_DATA_SIZE (2 * 256)
-
-#define CY_SPI_DATA_BUF_SIZE (CY_SPI_CMD_BYTES + CY_SPI_DATA_SIZE)
-
-static int cyttsp_spi_xfer(struct device *dev, u8 *xfer_buf,
- u8 op, u16 reg, u8 *buf, int length)
-{
- struct spi_device *spi = to_spi_device(dev);
- struct spi_message msg;
- struct spi_transfer xfer[2];
- u8 *wr_buf = &xfer_buf[0];
- u8 rd_buf[CY_SPI_CMD_BYTES];
- int retval;
- int i;
-
- if (length > CY_SPI_DATA_SIZE) {
- dev_err(dev, "%s: length %d is too big.\n",
- __func__, length);
- return -EINVAL;
- }
-
- memset(wr_buf, 0, CY_SPI_DATA_BUF_SIZE);
- memset(rd_buf, 0, CY_SPI_CMD_BYTES);
-
- wr_buf[0] = op + (((reg >> 8) & 0x1) ? CY_SPI_A8_BIT : 0);
- if (op == CY_SPI_WR_OP) {
- wr_buf[1] = reg & 0xFF;
- if (length > 0)
- memcpy(wr_buf + CY_SPI_CMD_BYTES, buf, length);
- }
-
- memset(xfer, 0, sizeof(xfer));
- spi_message_init(&msg);
-
- /*
- We set both TX and RX buffers because Cypress TTSP
- requires full duplex operation.
- */
- xfer[0].tx_buf = wr_buf;
- xfer[0].rx_buf = rd_buf;
- switch (op) {
- case CY_SPI_WR_OP:
- xfer[0].len = length + CY_SPI_CMD_BYTES;
- spi_message_add_tail(&xfer[0], &msg);
- break;
-
- case CY_SPI_RD_OP:
- xfer[0].len = CY_SPI_RD_HEADER_BYTES;
- spi_message_add_tail(&xfer[0], &msg);
-
- xfer[1].rx_buf = buf;
- xfer[1].len = length;
- spi_message_add_tail(&xfer[1], &msg);
- break;
-
- default:
- dev_err(dev, "%s: bad operation code=%d\n", __func__, op);
- return -EINVAL;
- }
-
- retval = spi_sync(spi, &msg);
- if (retval < 0) {
- dev_dbg(dev, "%s: spi_sync() error %d, len=%d, op=%d\n",
- __func__, retval, xfer[1].len, op);
-
- /*
- * do not return here since was a bad ACK sequence
- * let the following ACK check handle any errors and
- * allow silent retries
- */
- }
-
- if (rd_buf[CY_SPI_SYNC_BYTE] != CY_SPI_SYNC_ACK) {
- dev_dbg(dev, "%s: operation %d failed\n", __func__, op);
-
- for (i = 0; i < CY_SPI_CMD_BYTES; i++)
- dev_dbg(dev, "%s: test rd_buf[%d]:0x%02x\n",
- __func__, i, rd_buf[i]);
- for (i = 0; i < length; i++)
- dev_dbg(dev, "%s: test buf[%d]:0x%02x\n",
- __func__, i, buf[i]);
-
- return -EIO;
- }
-
- return 0;
-}
-
-static int cyttsp_spi_read_block_data(struct device *dev, u8 *xfer_buf,
- u16 addr, u8 length, void *data)
-{
- int rc;
-
- rc = cyttsp_spi_xfer(dev, xfer_buf, CY_SPI_WR_OP, addr, NULL, 0);
- if (rc)
- return rc;
- else
- return cyttsp_spi_xfer(dev, xfer_buf, CY_SPI_RD_OP, addr, data,
- length);
-}
-
-static int cyttsp_spi_write_block_data(struct device *dev, u8 *xfer_buf,
- u16 addr, u8 length, const void *data)
-{
- return cyttsp_spi_xfer(dev, xfer_buf, CY_SPI_WR_OP, addr, (void *)data,
- length);
-}
-
-static const struct cyttsp4_bus_ops cyttsp_spi_bus_ops = {
- .bustype = BUS_SPI,
- .write = cyttsp_spi_write_block_data,
- .read = cyttsp_spi_read_block_data,
-};
-
-static int cyttsp4_spi_probe(struct spi_device *spi)
-{
- struct cyttsp4 *ts;
- int error;
-
- /* Set up SPI*/
- spi->bits_per_word = CY_SPI_BITS_PER_WORD;
- spi->mode = SPI_MODE_0;
- error = spi_setup(spi);
- if (error < 0) {
- dev_err(&spi->dev, "%s: SPI setup error %d\n",
- __func__, error);
- return error;
- }
-
- ts = cyttsp4_probe(&cyttsp_spi_bus_ops, &spi->dev, spi->irq,
- CY_SPI_DATA_BUF_SIZE);
-
- return PTR_ERR_OR_ZERO(ts);
-}
-
-static void cyttsp4_spi_remove(struct spi_device *spi)
-{
- struct cyttsp4 *ts = spi_get_drvdata(spi);
- cyttsp4_remove(ts);
-}
-
-static struct spi_driver cyttsp4_spi_driver = {
- .driver = {
- .name = CYTTSP4_SPI_NAME,
- .pm = pm_ptr(&cyttsp4_pm_ops),
- },
- .probe = cyttsp4_spi_probe,
- .remove = cyttsp4_spi_remove,
-};
-
-module_spi_driver(cyttsp4_spi_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) SPI driver");
-MODULE_AUTHOR("Cypress");
-MODULE_ALIAS("spi:cyttsp4");
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 132ed5786e84..b8ce6012364c 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -17,7 +17,6 @@
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/property.h>
@@ -615,17 +614,14 @@ static int cyttsp_parse_properties(struct cyttsp *ts)
return 0;
}
-static void cyttsp_disable_regulators(void *_ts)
-{
- struct cyttsp *ts = _ts;
-
- regulator_bulk_disable(ARRAY_SIZE(ts->regulators),
- ts->regulators);
-}
-
struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
struct device *dev, int irq, size_t xfer_buf_size)
{
+ /*
+ * VCPIN is the analog voltage supply
+ * VDD is the digital voltage supply
+ */
+ static const char * const supplies[] = { "vcpin", "vdd" };
struct cyttsp *ts;
struct input_dev *input_dev;
int error;
@@ -643,29 +639,10 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
ts->bus_ops = bus_ops;
ts->irq = irq;
- /*
- * VCPIN is the analog voltage supply
- * VDD is the digital voltage supply
- */
- ts->regulators[0].supply = "vcpin";
- ts->regulators[1].supply = "vdd";
- error = devm_regulator_bulk_get(dev, ARRAY_SIZE(ts->regulators),
- ts->regulators);
- if (error) {
- dev_err(dev, "Failed to get regulators: %d\n", error);
- return ERR_PTR(error);
- }
-
- error = regulator_bulk_enable(ARRAY_SIZE(ts->regulators),
- ts->regulators);
- if (error) {
- dev_err(dev, "Cannot enable regulators: %d\n", error);
- return ERR_PTR(error);
- }
-
- error = devm_add_action_or_reset(dev, cyttsp_disable_regulators, ts);
+ error = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(supplies),
+ supplies);
if (error) {
- dev_err(dev, "failed to install chip disable handler\n");
+ dev_err(dev, "Failed to enable regulators: %d\n", error);
return ERR_PTR(error);
}
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h
index 075509e695a2..40a605d20285 100644
--- a/drivers/input/touchscreen/cyttsp_core.h
+++ b/drivers/input/touchscreen/cyttsp_core.h
@@ -122,7 +122,6 @@ struct cyttsp {
enum cyttsp_state state;
bool suspended;
- struct regulator_bulk_data regulators[2];
struct gpio_desc *reset_gpio;
bool use_hndshk;
u8 act_dist;
@@ -137,10 +136,6 @@ struct cyttsp {
struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
struct device *dev, int irq, size_t xfer_buf_size);
-int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u16 addr,
- u8 length, const void *values);
-int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, u16 addr,
- u8 length, void *values);
extern const struct dev_pm_ops cyttsp_pm_ops;
#endif /* __CYTTSP_CORE_H__ */
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index bf13b3448a6b..cb15600549cd 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -22,6 +22,61 @@
#define CY_I2C_DATA_SIZE 128
+static int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf,
+ u16 addr, u8 length, void *values)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 client_addr = client->addr | ((addr >> 8) & 0x1);
+ u8 addr_lo = addr & 0xFF;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr_lo,
+ },
+ {
+ .addr = client_addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ .buf = values,
+ },
+ };
+ int retval;
+
+ retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (retval < 0)
+ return retval;
+
+ return retval != ARRAY_SIZE(msgs) ? -EIO : 0;
+}
+
+static int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf,
+ u16 addr, u8 length, const void *values)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 client_addr = client->addr | ((addr >> 8) & 0x1);
+ u8 addr_lo = addr & 0xFF;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client_addr,
+ .flags = 0,
+ .len = length + 1,
+ .buf = xfer_buf,
+ },
+ };
+ int retval;
+
+ xfer_buf[0] = addr_lo;
+ memcpy(&xfer_buf[1], values, length);
+
+ retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (retval < 0)
+ return retval;
+
+ return retval != ARRAY_SIZE(msgs) ? -EIO : 0;
+}
+
static const struct cyttsp_bus_ops cyttsp_i2c_bus_ops = {
.bustype = BUS_I2C,
.write = cyttsp_i2c_write_block_data,
diff --git a/drivers/input/touchscreen/cyttsp_i2c_common.c b/drivers/input/touchscreen/cyttsp_i2c_common.c
deleted file mode 100644
index 7e752fb9fad7..000000000000
--- a/drivers/input/touchscreen/cyttsp_i2c_common.c
+++ /dev/null
@@ -1,86 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * cyttsp_i2c_common.c
- * Cypress TrueTouch(TM) Standard Product (TTSP) I2C touchscreen driver.
- * For use with Cypress Txx3xx and Txx4xx parts.
- * Supported parts include:
- * CY8CTST341
- * CY8CTMA340
- * TMA4XX
- * TMA1036
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- *
- * Contact Cypress Semiconductor at www.cypress.com <ttdrivers@cypress.com>
- */
-
-#include <linux/device.h>
-#include <linux/export.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/types.h>
-
-#include "cyttsp4_core.h"
-
-int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf,
- u16 addr, u8 length, void *values)
-{
- struct i2c_client *client = to_i2c_client(dev);
- u8 client_addr = client->addr | ((addr >> 8) & 0x1);
- u8 addr_lo = addr & 0xFF;
- struct i2c_msg msgs[] = {
- {
- .addr = client_addr,
- .flags = 0,
- .len = 1,
- .buf = &addr_lo,
- },
- {
- .addr = client_addr,
- .flags = I2C_M_RD,
- .len = length,
- .buf = values,
- },
- };
- int retval;
-
- retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (retval < 0)
- return retval;
-
- return retval != ARRAY_SIZE(msgs) ? -EIO : 0;
-}
-EXPORT_SYMBOL_GPL(cyttsp_i2c_read_block_data);
-
-int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf,
- u16 addr, u8 length, const void *values)
-{
- struct i2c_client *client = to_i2c_client(dev);
- u8 client_addr = client->addr | ((addr >> 8) & 0x1);
- u8 addr_lo = addr & 0xFF;
- struct i2c_msg msgs[] = {
- {
- .addr = client_addr,
- .flags = 0,
- .len = length + 1,
- .buf = xfer_buf,
- },
- };
- int retval;
-
- xfer_buf[0] = addr_lo;
- memcpy(&xfer_buf[1], values, length);
-
- retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (retval < 0)
- return retval;
-
- return retval != ARRAY_SIZE(msgs) ? -EIO : 0;
-}
-EXPORT_SYMBOL_GPL(cyttsp_i2c_write_block_data);
-
-
-MODULE_DESCRIPTION("Cypress TrueTouch(TM) Standard Product (TTSP) I2C touchscreen driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cypress");
diff --git a/drivers/input/touchscreen/goodix_berlin.h b/drivers/input/touchscreen/goodix_berlin.h
index 1fd77eb69c9a..38b6f9ddbdef 100644
--- a/drivers/input/touchscreen/goodix_berlin.h
+++ b/drivers/input/touchscreen/goodix_berlin.h
@@ -20,5 +20,6 @@ int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id,
struct regmap *regmap);
extern const struct dev_pm_ops goodix_berlin_pm_ops;
+extern const struct attribute_group *goodix_berlin_groups[];
#endif
diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c
index e7b41a926ef8..0bfca897ce5a 100644
--- a/drivers/input/touchscreen/goodix_berlin_core.c
+++ b/drivers/input/touchscreen/goodix_berlin_core.c
@@ -672,6 +672,49 @@ static void goodix_berlin_power_off_act(void *data)
goodix_berlin_power_off(cd);
}
+static ssize_t registers_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct goodix_berlin_core *cd = dev_get_drvdata(dev);
+ int error;
+
+ error = regmap_raw_read(cd->regmap, off, buf, count);
+
+ return error ? error : count;
+}
+
+static ssize_t registers_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct goodix_berlin_core *cd = dev_get_drvdata(dev);
+ int error;
+
+ error = regmap_raw_write(cd->regmap, off, buf, count);
+
+ return error ? error : count;
+}
+
+static BIN_ATTR_ADMIN_RW(registers, 0);
+
+static struct bin_attribute *goodix_berlin_bin_attrs[] = {
+ &bin_attr_registers,
+ NULL,
+};
+
+static const struct attribute_group goodix_berlin_attr_group = {
+ .bin_attrs = goodix_berlin_bin_attrs,
+};
+
+const struct attribute_group *goodix_berlin_groups[] = {
+ &goodix_berlin_attr_group,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(goodix_berlin_groups);
+
int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id,
struct regmap *regmap)
{
diff --git a/drivers/input/touchscreen/goodix_berlin_i2c.c b/drivers/input/touchscreen/goodix_berlin_i2c.c
index 2e7098078838..ad7a60d94338 100644
--- a/drivers/input/touchscreen/goodix_berlin_i2c.c
+++ b/drivers/input/touchscreen/goodix_berlin_i2c.c
@@ -64,6 +64,7 @@ static struct i2c_driver goodix_berlin_i2c_driver = {
.name = "goodix-berlin-i2c",
.of_match_table = goodix_berlin_i2c_of_match,
.pm = pm_sleep_ptr(&goodix_berlin_pm_ops),
+ .dev_groups = goodix_berlin_groups,
},
.probe = goodix_berlin_i2c_probe,
.id_table = goodix_berlin_i2c_id,
diff --git a/drivers/input/touchscreen/goodix_berlin_spi.c b/drivers/input/touchscreen/goodix_berlin_spi.c
index 82774a412956..a2d80e84391b 100644
--- a/drivers/input/touchscreen/goodix_berlin_spi.c
+++ b/drivers/input/touchscreen/goodix_berlin_spi.c
@@ -169,6 +169,7 @@ static struct spi_driver goodix_berlin_spi_driver = {
.name = "goodix-berlin-spi",
.of_match_table = goodix_berlin_spi_of_match,
.pm = pm_sleep_ptr(&goodix_berlin_pm_ops),
+ .dev_groups = goodix_berlin_groups,
},
.probe = goodix_berlin_spi_probe,
.id_table = goodix_berlin_spi_ids,
diff --git a/drivers/input/touchscreen/hynitron_cstxxx.c b/drivers/input/touchscreen/hynitron_cstxxx.c
index 05946fee4fd4..f72834859282 100644
--- a/drivers/input/touchscreen/hynitron_cstxxx.c
+++ b/drivers/input/touchscreen/hynitron_cstxxx.c
@@ -470,7 +470,7 @@ static const struct hynitron_ts_chip_data cst3xx_data = {
};
static const struct i2c_device_id hyn_tpd_id[] = {
- { .name = "hynitron_ts", 0 },
+ { .name = "hynitron_ts" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(i2c, hyn_tpd_id);
diff --git a/drivers/input/touchscreen/ilitek_ts_i2c.c b/drivers/input/touchscreen/ilitek_ts_i2c.c
index 3eb762896345..5569641f05f6 100644
--- a/drivers/input/touchscreen/ilitek_ts_i2c.c
+++ b/drivers/input/touchscreen/ilitek_ts_i2c.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/errno.h>
#include <linux/acpi.h>
@@ -37,6 +36,8 @@
#define ILITEK_TP_CMD_GET_MCU_VER 0x61
#define ILITEK_TP_CMD_GET_IC_MODE 0xC0
+#define ILITEK_TP_I2C_REPORT_ID 0x48
+
#define REPORT_COUNT_ADDRESS 61
#define ILITEK_SUPPORT_MAX_POINT 40
@@ -160,15 +161,19 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts)
error = ilitek_i2c_write_and_read(ts, NULL, 0, 0, buf, 64);
if (error) {
dev_err(dev, "get touch info failed, err:%d\n", error);
- goto err_sync_frame;
+ return error;
+ }
+
+ if (buf[0] != ILITEK_TP_I2C_REPORT_ID) {
+ dev_err(dev, "get touch info failed. Wrong id: 0x%02X\n", buf[0]);
+ return -EINVAL;
}
report_max_point = buf[REPORT_COUNT_ADDRESS];
if (report_max_point > ts->max_tp) {
dev_err(dev, "FW report max point:%d > panel info. max:%d\n",
report_max_point, ts->max_tp);
- error = -EINVAL;
- goto err_sync_frame;
+ return -EINVAL;
}
count = DIV_ROUND_UP(report_max_point, packet_max_point);
@@ -178,7 +183,7 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts)
if (error) {
dev_err(dev, "get touch info. failed, cnt:%d, err:%d\n",
count, error);
- goto err_sync_frame;
+ return error;
}
}
@@ -203,10 +208,10 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts)
ilitek_touch_down(ts, id, x, y);
}
-err_sync_frame:
input_mt_sync_frame(input);
input_sync(input);
- return error;
+
+ return 0;
}
/* APIs of cmds for ILITEK Touch IC */
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
deleted file mode 100644
index 5aff8dcda0dc..000000000000
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ /dev/null
@@ -1,288 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * mcs5000_ts.c - Touchscreen driver for MELFAS MCS-5000 controller
- *
- * Copyright (C) 2009 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * Based on wm97xx-core.c
- */
-
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/irq.h>
-#include <linux/platform_data/mcs.h>
-#include <linux/slab.h>
-
-/* Registers */
-#define MCS5000_TS_STATUS 0x00
-#define STATUS_OFFSET 0
-#define STATUS_NO (0 << STATUS_OFFSET)
-#define STATUS_INIT (1 << STATUS_OFFSET)
-#define STATUS_SENSING (2 << STATUS_OFFSET)
-#define STATUS_COORD (3 << STATUS_OFFSET)
-#define STATUS_GESTURE (4 << STATUS_OFFSET)
-#define ERROR_OFFSET 4
-#define ERROR_NO (0 << ERROR_OFFSET)
-#define ERROR_POWER_ON_RESET (1 << ERROR_OFFSET)
-#define ERROR_INT_RESET (2 << ERROR_OFFSET)
-#define ERROR_EXT_RESET (3 << ERROR_OFFSET)
-#define ERROR_INVALID_REG_ADDRESS (8 << ERROR_OFFSET)
-#define ERROR_INVALID_REG_VALUE (9 << ERROR_OFFSET)
-
-#define MCS5000_TS_OP_MODE 0x01
-#define RESET_OFFSET 0
-#define RESET_NO (0 << RESET_OFFSET)
-#define RESET_EXT_SOFT (1 << RESET_OFFSET)
-#define OP_MODE_OFFSET 1
-#define OP_MODE_SLEEP (0 << OP_MODE_OFFSET)
-#define OP_MODE_ACTIVE (1 << OP_MODE_OFFSET)
-#define GESTURE_OFFSET 4
-#define GESTURE_DISABLE (0 << GESTURE_OFFSET)
-#define GESTURE_ENABLE (1 << GESTURE_OFFSET)
-#define PROXIMITY_OFFSET 5
-#define PROXIMITY_DISABLE (0 << PROXIMITY_OFFSET)
-#define PROXIMITY_ENABLE (1 << PROXIMITY_OFFSET)
-#define SCAN_MODE_OFFSET 6
-#define SCAN_MODE_INTERRUPT (0 << SCAN_MODE_OFFSET)
-#define SCAN_MODE_POLLING (1 << SCAN_MODE_OFFSET)
-#define REPORT_RATE_OFFSET 7
-#define REPORT_RATE_40 (0 << REPORT_RATE_OFFSET)
-#define REPORT_RATE_80 (1 << REPORT_RATE_OFFSET)
-
-#define MCS5000_TS_SENS_CTL 0x02
-#define MCS5000_TS_FILTER_CTL 0x03
-#define PRI_FILTER_OFFSET 0
-#define SEC_FILTER_OFFSET 4
-
-#define MCS5000_TS_X_SIZE_UPPER 0x08
-#define MCS5000_TS_X_SIZE_LOWER 0x09
-#define MCS5000_TS_Y_SIZE_UPPER 0x0A
-#define MCS5000_TS_Y_SIZE_LOWER 0x0B
-
-#define MCS5000_TS_INPUT_INFO 0x10
-#define INPUT_TYPE_OFFSET 0
-#define INPUT_TYPE_NONTOUCH (0 << INPUT_TYPE_OFFSET)
-#define INPUT_TYPE_SINGLE (1 << INPUT_TYPE_OFFSET)
-#define INPUT_TYPE_DUAL (2 << INPUT_TYPE_OFFSET)
-#define INPUT_TYPE_PALM (3 << INPUT_TYPE_OFFSET)
-#define INPUT_TYPE_PROXIMITY (7 << INPUT_TYPE_OFFSET)
-#define GESTURE_CODE_OFFSET 3
-#define GESTURE_CODE_NO (0 << GESTURE_CODE_OFFSET)
-
-#define MCS5000_TS_X_POS_UPPER 0x11
-#define MCS5000_TS_X_POS_LOWER 0x12
-#define MCS5000_TS_Y_POS_UPPER 0x13
-#define MCS5000_TS_Y_POS_LOWER 0x14
-#define MCS5000_TS_Z_POS 0x15
-#define MCS5000_TS_WIDTH 0x16
-#define MCS5000_TS_GESTURE_VAL 0x17
-#define MCS5000_TS_MODULE_REV 0x20
-#define MCS5000_TS_FIRMWARE_VER 0x21
-
-/* Touchscreen absolute values */
-#define MCS5000_MAX_XC 0x3ff
-#define MCS5000_MAX_YC 0x3ff
-
-enum mcs5000_ts_read_offset {
- READ_INPUT_INFO,
- READ_X_POS_UPPER,
- READ_X_POS_LOWER,
- READ_Y_POS_UPPER,
- READ_Y_POS_LOWER,
- READ_BLOCK_SIZE,
-};
-
-/* Each client has this additional data */
-struct mcs5000_ts_data {
- struct i2c_client *client;
- struct input_dev *input_dev;
- const struct mcs_platform_data *platform_data;
-};
-
-static irqreturn_t mcs5000_ts_interrupt(int irq, void *dev_id)
-{
- struct mcs5000_ts_data *data = dev_id;
- struct i2c_client *client = data->client;
- u8 buffer[READ_BLOCK_SIZE];
- int err;
- int x;
- int y;
-
- err = i2c_smbus_read_i2c_block_data(client, MCS5000_TS_INPUT_INFO,
- READ_BLOCK_SIZE, buffer);
- if (err < 0) {
- dev_err(&client->dev, "%s, err[%d]\n", __func__, err);
- goto out;
- }
-
- switch (buffer[READ_INPUT_INFO]) {
- case INPUT_TYPE_NONTOUCH:
- input_report_key(data->input_dev, BTN_TOUCH, 0);
- input_sync(data->input_dev);
- break;
-
- case INPUT_TYPE_SINGLE:
- x = (buffer[READ_X_POS_UPPER] << 8) | buffer[READ_X_POS_LOWER];
- y = (buffer[READ_Y_POS_UPPER] << 8) | buffer[READ_Y_POS_LOWER];
-
- input_report_key(data->input_dev, BTN_TOUCH, 1);
- input_report_abs(data->input_dev, ABS_X, x);
- input_report_abs(data->input_dev, ABS_Y, y);
- input_sync(data->input_dev);
- break;
-
- case INPUT_TYPE_DUAL:
- /* TODO */
- break;
-
- case INPUT_TYPE_PALM:
- /* TODO */
- break;
-
- case INPUT_TYPE_PROXIMITY:
- /* TODO */
- break;
-
- default:
- dev_err(&client->dev, "Unknown ts input type %d\n",
- buffer[READ_INPUT_INFO]);
- break;
- }
-
- out:
- return IRQ_HANDLED;
-}
-
-static void mcs5000_ts_phys_init(struct mcs5000_ts_data *data,
- const struct mcs_platform_data *platform_data)
-{
- struct i2c_client *client = data->client;
-
- /* Touch reset & sleep mode */
- i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE,
- RESET_EXT_SOFT | OP_MODE_SLEEP);
-
- /* Touch size */
- i2c_smbus_write_byte_data(client, MCS5000_TS_X_SIZE_UPPER,
- platform_data->x_size >> 8);
- i2c_smbus_write_byte_data(client, MCS5000_TS_X_SIZE_LOWER,
- platform_data->x_size & 0xff);
- i2c_smbus_write_byte_data(client, MCS5000_TS_Y_SIZE_UPPER,
- platform_data->y_size >> 8);
- i2c_smbus_write_byte_data(client, MCS5000_TS_Y_SIZE_LOWER,
- platform_data->y_size & 0xff);
-
- /* Touch active mode & 80 report rate */
- i2c_smbus_write_byte_data(data->client, MCS5000_TS_OP_MODE,
- OP_MODE_ACTIVE | REPORT_RATE_80);
-}
-
-static int mcs5000_ts_probe(struct i2c_client *client)
-{
- const struct mcs_platform_data *pdata;
- struct mcs5000_ts_data *data;
- struct input_dev *input_dev;
- int error;
-
- pdata = dev_get_platdata(&client->dev);
- if (!pdata)
- return -EINVAL;
-
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_err(&client->dev, "Failed to allocate memory\n");
- return -ENOMEM;
- }
-
- data->client = client;
-
- input_dev = devm_input_allocate_device(&client->dev);
- if (!input_dev) {
- dev_err(&client->dev, "Failed to allocate input device\n");
- return -ENOMEM;
- }
-
- input_dev->name = "MELFAS MCS-5000 Touchscreen";
- input_dev->id.bustype = BUS_I2C;
- input_dev->dev.parent = &client->dev;
-
- __set_bit(EV_ABS, input_dev->evbit);
- __set_bit(EV_KEY, input_dev->evbit);
- __set_bit(BTN_TOUCH, input_dev->keybit);
- input_set_abs_params(input_dev, ABS_X, 0, MCS5000_MAX_XC, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, MCS5000_MAX_YC, 0, 0);
-
- data->input_dev = input_dev;
-
- if (pdata->cfg_pin)
- pdata->cfg_pin();
-
- error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, mcs5000_ts_interrupt,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "mcs5000_ts", data);
- if (error) {
- dev_err(&client->dev, "Failed to register interrupt\n");
- return error;
- }
-
- error = input_register_device(data->input_dev);
- if (error) {
- dev_err(&client->dev, "Failed to register input device\n");
- return error;
- }
-
- mcs5000_ts_phys_init(data, pdata);
- i2c_set_clientdata(client, data);
-
- return 0;
-}
-
-static int mcs5000_ts_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
-
- /* Touch sleep mode */
- i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, OP_MODE_SLEEP);
-
- return 0;
-}
-
-static int mcs5000_ts_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct mcs5000_ts_data *data = i2c_get_clientdata(client);
- const struct mcs_platform_data *pdata = dev_get_platdata(dev);
-
- mcs5000_ts_phys_init(data, pdata);
-
- return 0;
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(mcs5000_ts_pm,
- mcs5000_ts_suspend, mcs5000_ts_resume);
-
-static const struct i2c_device_id mcs5000_ts_id[] = {
- { "mcs5000_ts" },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, mcs5000_ts_id);
-
-static struct i2c_driver mcs5000_ts_driver = {
- .probe = mcs5000_ts_probe,
- .driver = {
- .name = "mcs5000_ts",
- .pm = pm_sleep_ptr(&mcs5000_ts_pm),
- },
- .id_table = mcs5000_ts_id,
-};
-
-module_i2c_driver(mcs5000_ts_driver);
-
-/* Module information */
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_DESCRIPTION("Touchscreen driver for MELFAS MCS-5000 controller");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c
index b673098535ad..787f2caf4f73 100644
--- a/drivers/input/touchscreen/tsc2004.c
+++ b/drivers/input/touchscreen/tsc2004.c
@@ -42,11 +42,6 @@ static int tsc2004_probe(struct i2c_client *i2c)
tsc2004_cmd);
}
-static void tsc2004_remove(struct i2c_client *i2c)
-{
- tsc200x_remove(&i2c->dev);
-}
-
static const struct i2c_device_id tsc2004_idtable[] = {
{ "tsc2004" },
{ }
@@ -70,7 +65,6 @@ static struct i2c_driver tsc2004_driver = {
},
.id_table = tsc2004_idtable,
.probe = tsc2004_probe,
- .remove = tsc2004_remove,
};
module_i2c_driver(tsc2004_driver);
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 1b40ce0ca1b9..6fe8b41b3ecc 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -64,11 +64,6 @@ static int tsc2005_probe(struct spi_device *spi)
tsc2005_cmd);
}
-static void tsc2005_remove(struct spi_device *spi)
-{
- tsc200x_remove(&spi->dev);
-}
-
#ifdef CONFIG_OF
static const struct of_device_id tsc2005_of_match[] = {
{ .compatible = "ti,tsc2005" },
@@ -85,7 +80,6 @@ static struct spi_driver tsc2005_driver = {
.pm = pm_sleep_ptr(&tsc200x_pm_ops),
},
.probe = tsc2005_probe,
- .remove = tsc2005_remove,
};
module_spi_driver(tsc2005_driver);
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index a4c0e9db9bb9..df39dee13e1c 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -104,11 +104,11 @@ struct tsc200x {
bool pen_down;
- struct regulator *vio;
-
struct gpio_desc *reset_gpio;
int (*tsc200x_cmd)(struct device *dev, u8 cmd);
+
int irq;
+ bool wake_irq_enabled;
};
static void tsc200x_update_pen_state(struct tsc200x *ts,
@@ -136,7 +136,6 @@ static void tsc200x_update_pen_state(struct tsc200x *ts,
static irqreturn_t tsc200x_irq_thread(int irq, void *_ts)
{
struct tsc200x *ts = _ts;
- unsigned long flags;
unsigned int pressure;
struct tsc200x_data tsdata;
int error;
@@ -182,13 +181,11 @@ static irqreturn_t tsc200x_irq_thread(int irq, void *_ts)
if (unlikely(pressure > MAX_12BIT))
goto out;
- spin_lock_irqsave(&ts->lock, flags);
-
- tsc200x_update_pen_state(ts, tsdata.x, tsdata.y, pressure);
- mod_timer(&ts->penup_timer,
- jiffies + msecs_to_jiffies(TSC200X_PENUP_TIME_MS));
-
- spin_unlock_irqrestore(&ts->lock, flags);
+ scoped_guard(spinlock_irqsave, &ts->lock) {
+ tsc200x_update_pen_state(ts, tsdata.x, tsdata.y, pressure);
+ mod_timer(&ts->penup_timer,
+ jiffies + msecs_to_jiffies(TSC200X_PENUP_TIME_MS));
+ }
ts->last_valid_interrupt = jiffies;
out:
@@ -198,11 +195,9 @@ out:
static void tsc200x_penup_timer(struct timer_list *t)
{
struct tsc200x *ts = from_timer(ts, t, penup_timer);
- unsigned long flags;
- spin_lock_irqsave(&ts->lock, flags);
+ guard(spinlock_irqsave)(&ts->lock);
tsc200x_update_pen_state(ts, 0, 0, 0);
- spin_unlock_irqrestore(&ts->lock, flags);
}
static void tsc200x_start_scan(struct tsc200x *ts)
@@ -232,12 +227,10 @@ static void __tsc200x_disable(struct tsc200x *ts)
{
tsc200x_stop_scan(ts);
- disable_irq(ts->irq);
- del_timer_sync(&ts->penup_timer);
+ guard(disable_irq)(&ts->irq);
+ del_timer_sync(&ts->penup_timer);
cancel_delayed_work_sync(&ts->esd_work);
-
- enable_irq(ts->irq);
}
/* must be called with ts->mutex held */
@@ -253,80 +246,79 @@ static void __tsc200x_enable(struct tsc200x *ts)
}
}
-static ssize_t tsc200x_selftest_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+/*
+ * Test TSC200X communications via temp high register.
+ */
+static int tsc200x_do_selftest(struct tsc200x *ts)
{
- struct tsc200x *ts = dev_get_drvdata(dev);
- unsigned int temp_high;
unsigned int temp_high_orig;
unsigned int temp_high_test;
- bool success = true;
+ unsigned int temp_high;
int error;
- mutex_lock(&ts->mutex);
-
- /*
- * Test TSC200X communications via temp high register.
- */
- __tsc200x_disable(ts);
-
error = regmap_read(ts->regmap, TSC200X_REG_TEMP_HIGH, &temp_high_orig);
if (error) {
- dev_warn(dev, "selftest failed: read error %d\n", error);
- success = false;
- goto out;
+ dev_warn(ts->dev, "selftest failed: read error %d\n", error);
+ return error;
}
temp_high_test = (temp_high_orig - 1) & MAX_12BIT;
error = regmap_write(ts->regmap, TSC200X_REG_TEMP_HIGH, temp_high_test);
if (error) {
- dev_warn(dev, "selftest failed: write error %d\n", error);
- success = false;
- goto out;
+ dev_warn(ts->dev, "selftest failed: write error %d\n", error);
+ return error;
}
error = regmap_read(ts->regmap, TSC200X_REG_TEMP_HIGH, &temp_high);
if (error) {
- dev_warn(dev, "selftest failed: read error %d after write\n",
- error);
- success = false;
- goto out;
- }
-
- if (temp_high != temp_high_test) {
- dev_warn(dev, "selftest failed: %d != %d\n",
- temp_high, temp_high_test);
- success = false;
+ dev_warn(ts->dev,
+ "selftest failed: read error %d after write\n", error);
+ return error;
}
/* hardware reset */
tsc200x_reset(ts);
- if (!success)
- goto out;
+ if (temp_high != temp_high_test) {
+ dev_warn(ts->dev, "selftest failed: %d != %d\n",
+ temp_high, temp_high_test);
+ return -EINVAL;
+ }
/* test that the reset really happened */
error = regmap_read(ts->regmap, TSC200X_REG_TEMP_HIGH, &temp_high);
if (error) {
- dev_warn(dev, "selftest failed: read error %d after reset\n",
- error);
- success = false;
- goto out;
+ dev_warn(ts->dev,
+ "selftest failed: read error %d after reset\n", error);
+ return error;
}
if (temp_high != temp_high_orig) {
- dev_warn(dev, "selftest failed after reset: %d != %d\n",
+ dev_warn(ts->dev, "selftest failed after reset: %d != %d\n",
temp_high, temp_high_orig);
- success = false;
+ return -EINVAL;
}
-out:
- __tsc200x_enable(ts);
- mutex_unlock(&ts->mutex);
+ return 0;
+}
- return sprintf(buf, "%d\n", success);
+static ssize_t tsc200x_selftest_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tsc200x *ts = dev_get_drvdata(dev);
+ int error;
+
+ scoped_guard(mutex, &ts->mutex) {
+ __tsc200x_disable(ts);
+
+ error = tsc200x_do_selftest(ts);
+
+ __tsc200x_enable(ts);
+ }
+
+ return sprintf(buf, "%d\n", !error);
}
static DEVICE_ATTR(selftest, S_IRUGO, tsc200x_selftest_show, NULL);
@@ -368,46 +360,42 @@ static void tsc200x_esd_work(struct work_struct *work)
int error;
unsigned int r;
- if (!mutex_trylock(&ts->mutex)) {
- /*
- * If the mutex is taken, it means that disable or enable is in
- * progress. In that case just reschedule the work. If the work
- * is not needed, it will be canceled by disable.
- */
- goto reschedule;
- }
-
- if (time_is_after_jiffies(ts->last_valid_interrupt +
- msecs_to_jiffies(ts->esd_timeout)))
- goto out;
-
- /* We should be able to read register without disabling interrupts. */
- error = regmap_read(ts->regmap, TSC200X_REG_CFR0, &r);
- if (!error &&
- !((r ^ TSC200X_CFR0_INITVALUE) & TSC200X_CFR0_RW_MASK)) {
- goto out;
- }
-
/*
- * If we could not read our known value from configuration register 0
- * then we should reset the controller as if from power-up and start
- * scanning again.
+ * If the mutex is taken, it means that disable or enable is in
+ * progress. In that case just reschedule the work. If the work
+ * is not needed, it will be canceled by disable.
*/
- dev_info(ts->dev, "TSC200X not responding - resetting\n");
+ scoped_guard(mutex_try, &ts->mutex) {
+ if (time_is_after_jiffies(ts->last_valid_interrupt +
+ msecs_to_jiffies(ts->esd_timeout)))
+ break;
- disable_irq(ts->irq);
- del_timer_sync(&ts->penup_timer);
+ /*
+ * We should be able to read register without disabling
+ * interrupts.
+ */
+ error = regmap_read(ts->regmap, TSC200X_REG_CFR0, &r);
+ if (!error &&
+ !((r ^ TSC200X_CFR0_INITVALUE) & TSC200X_CFR0_RW_MASK)) {
+ break;
+ }
- tsc200x_update_pen_state(ts, 0, 0, 0);
+ /*
+ * If we could not read our known value from configuration
+ * register 0 then we should reset the controller as if from
+ * power-up and start scanning again.
+ */
+ dev_info(ts->dev, "TSC200X not responding - resetting\n");
- tsc200x_reset(ts);
+ scoped_guard(disable_irq, &ts->irq) {
+ del_timer_sync(&ts->penup_timer);
+ tsc200x_update_pen_state(ts, 0, 0, 0);
+ tsc200x_reset(ts);
+ }
- enable_irq(ts->irq);
- tsc200x_start_scan(ts);
+ tsc200x_start_scan(ts);
+ }
-out:
- mutex_unlock(&ts->mutex);
-reschedule:
/* re-arm the watchdog */
schedule_delayed_work(&ts->esd_work,
round_jiffies_relative(
@@ -418,15 +406,13 @@ static int tsc200x_open(struct input_dev *input)
{
struct tsc200x *ts = input_get_drvdata(input);
- mutex_lock(&ts->mutex);
+ guard(mutex)(&ts->mutex);
if (!ts->suspended)
__tsc200x_enable(ts);
ts->opened = true;
- mutex_unlock(&ts->mutex);
-
return 0;
}
@@ -434,14 +420,12 @@ static void tsc200x_close(struct input_dev *input)
{
struct tsc200x *ts = input_get_drvdata(input);
- mutex_lock(&ts->mutex);
+ guard(mutex)(&ts->mutex);
if (!ts->suspended)
__tsc200x_disable(ts);
ts->opened = false;
-
- mutex_unlock(&ts->mutex);
}
int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
@@ -488,20 +472,6 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
&esd_timeout);
ts->esd_timeout = error ? 0 : esd_timeout;
- ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(ts->reset_gpio)) {
- error = PTR_ERR(ts->reset_gpio);
- dev_err(dev, "error acquiring reset gpio: %d\n", error);
- return error;
- }
-
- ts->vio = devm_regulator_get(dev, "vio");
- if (IS_ERR(ts->vio)) {
- error = PTR_ERR(ts->vio);
- dev_err(dev, "error acquiring vio regulator: %d", error);
- return error;
- }
-
mutex_init(&ts->mutex);
spin_lock_init(&ts->lock);
@@ -542,60 +512,60 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
touchscreen_parse_properties(input_dev, false, &ts->prop);
+ ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ error = PTR_ERR_OR_ZERO(ts->reset_gpio);
+ if (error) {
+ dev_err(dev, "error acquiring reset gpio: %d\n", error);
+ return error;
+ }
+
+ error = devm_regulator_get_enable(dev, "vio");
+ if (error) {
+ dev_err(dev, "error acquiring vio regulator: %d\n", error);
+ return error;
+ }
+
+ tsc200x_reset(ts);
+
/* Ensure the touchscreen is off */
tsc200x_stop_scan(ts);
- error = devm_request_threaded_irq(dev, irq, NULL,
- tsc200x_irq_thread,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- "tsc200x", ts);
+ error = devm_request_threaded_irq(dev, irq, NULL, tsc200x_irq_thread,
+ IRQF_ONESHOT, "tsc200x", ts);
if (error) {
dev_err(dev, "Failed to request irq, err: %d\n", error);
return error;
}
- error = regulator_enable(ts->vio);
- if (error)
- return error;
-
dev_set_drvdata(dev, ts);
error = input_register_device(ts->idev);
if (error) {
dev_err(dev,
"Failed to register input device, err: %d\n", error);
- goto disable_regulator;
+ return error;
}
- irq_set_irq_wake(irq, 1);
- return 0;
+ device_init_wakeup(dev,
+ device_property_read_bool(dev, "wakeup-source"));
-disable_regulator:
- regulator_disable(ts->vio);
- return error;
+ return 0;
}
EXPORT_SYMBOL_GPL(tsc200x_probe);
-void tsc200x_remove(struct device *dev)
-{
- struct tsc200x *ts = dev_get_drvdata(dev);
-
- regulator_disable(ts->vio);
-}
-EXPORT_SYMBOL_GPL(tsc200x_remove);
-
static int tsc200x_suspend(struct device *dev)
{
struct tsc200x *ts = dev_get_drvdata(dev);
- mutex_lock(&ts->mutex);
+ guard(mutex)(&ts->mutex);
if (!ts->suspended && ts->opened)
__tsc200x_disable(ts);
ts->suspended = true;
- mutex_unlock(&ts->mutex);
+ if (device_may_wakeup(dev))
+ ts->wake_irq_enabled = enable_irq_wake(ts->irq) == 0;
return 0;
}
@@ -604,15 +574,18 @@ static int tsc200x_resume(struct device *dev)
{
struct tsc200x *ts = dev_get_drvdata(dev);
- mutex_lock(&ts->mutex);
+ guard(mutex)(&ts->mutex);
+
+ if (ts->wake_irq_enabled) {
+ disable_irq_wake(ts->irq);
+ ts->wake_irq_enabled = false;
+ }
if (ts->suspended && ts->opened)
__tsc200x_enable(ts);
ts->suspended = false;
- mutex_unlock(&ts->mutex);
-
return 0;
}
diff --git a/drivers/input/touchscreen/tsc200x-core.h b/drivers/input/touchscreen/tsc200x-core.h
index 37de91efd78e..e76ba7a889dd 100644
--- a/drivers/input/touchscreen/tsc200x-core.h
+++ b/drivers/input/touchscreen/tsc200x-core.h
@@ -75,6 +75,5 @@ extern const struct attribute_group *tsc200x_groups[];
int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd));
-void tsc200x_remove(struct device *dev);
#endif
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index dd6b12c6dc58..7567efabe014 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -68,8 +68,6 @@ struct usbtouch_device_info {
*/
bool irq_always;
- void (*process_pkt) (struct usbtouch_usb *usbtouch, unsigned char *pkt, int len);
-
/*
* used to get the packet len. possible return values:
* > 0: packet len
@@ -94,7 +92,7 @@ struct usbtouch_usb {
struct urb *irq;
struct usb_interface *interface;
struct input_dev *input;
- struct usbtouch_device_info *type;
+ const struct usbtouch_device_info *type;
struct mutex pm_mutex; /* serialize access to open/suspend */
bool is_open;
char name[128];
@@ -103,141 +101,8 @@ struct usbtouch_usb {
int x, y;
int touch, press;
-};
-
-
-/* device types */
-enum {
- DEVTYPE_IGNORE = -1,
- DEVTYPE_EGALAX,
- DEVTYPE_PANJIT,
- DEVTYPE_3M,
- DEVTYPE_ITM,
- DEVTYPE_ETURBO,
- DEVTYPE_GUNZE,
- DEVTYPE_DMC_TSC10,
- DEVTYPE_IRTOUCH,
- DEVTYPE_IRTOUCH_HIRES,
- DEVTYPE_IDEALTEK,
- DEVTYPE_GENERAL_TOUCH,
- DEVTYPE_GOTOP,
- DEVTYPE_JASTEC,
- DEVTYPE_E2I,
- DEVTYPE_ZYTRONIC,
- DEVTYPE_TC45USB,
- DEVTYPE_NEXIO,
- DEVTYPE_ELO,
- DEVTYPE_ETOUCH,
-};
-
-#define USB_DEVICE_HID_CLASS(vend, prod) \
- .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \
- | USB_DEVICE_ID_MATCH_DEVICE, \
- .idVendor = (vend), \
- .idProduct = (prod), \
- .bInterfaceClass = USB_INTERFACE_CLASS_HID
-
-static const struct usb_device_id usbtouch_devices[] = {
-#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
- /* ignore the HID capable devices, handled by usbhid */
- {USB_DEVICE_HID_CLASS(0x0eef, 0x0001), .driver_info = DEVTYPE_IGNORE},
- {USB_DEVICE_HID_CLASS(0x0eef, 0x0002), .driver_info = DEVTYPE_IGNORE},
-
- /* normal device IDs */
- {USB_DEVICE(0x3823, 0x0001), .driver_info = DEVTYPE_EGALAX},
- {USB_DEVICE(0x3823, 0x0002), .driver_info = DEVTYPE_EGALAX},
- {USB_DEVICE(0x0123, 0x0001), .driver_info = DEVTYPE_EGALAX},
- {USB_DEVICE(0x0eef, 0x0001), .driver_info = DEVTYPE_EGALAX},
- {USB_DEVICE(0x0eef, 0x0002), .driver_info = DEVTYPE_EGALAX},
- {USB_DEVICE(0x1234, 0x0001), .driver_info = DEVTYPE_EGALAX},
- {USB_DEVICE(0x1234, 0x0002), .driver_info = DEVTYPE_EGALAX},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_PANJIT
- {USB_DEVICE(0x134c, 0x0001), .driver_info = DEVTYPE_PANJIT},
- {USB_DEVICE(0x134c, 0x0002), .driver_info = DEVTYPE_PANJIT},
- {USB_DEVICE(0x134c, 0x0003), .driver_info = DEVTYPE_PANJIT},
- {USB_DEVICE(0x134c, 0x0004), .driver_info = DEVTYPE_PANJIT},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_3M
- {USB_DEVICE(0x0596, 0x0001), .driver_info = DEVTYPE_3M},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ITM
- {USB_DEVICE(0x0403, 0xf9e9), .driver_info = DEVTYPE_ITM},
- {USB_DEVICE(0x16e3, 0xf9e9), .driver_info = DEVTYPE_ITM},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ETURBO
- {USB_DEVICE(0x1234, 0x5678), .driver_info = DEVTYPE_ETURBO},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_GUNZE
- {USB_DEVICE(0x0637, 0x0001), .driver_info = DEVTYPE_GUNZE},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10
- {USB_DEVICE(0x0afa, 0x03e8), .driver_info = DEVTYPE_DMC_TSC10},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
- {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
- {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
- {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
- {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK
- {USB_DEVICE(0x1391, 0x1000), .driver_info = DEVTYPE_IDEALTEK},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
- {USB_DEVICE(0x0dfc, 0x0001), .driver_info = DEVTYPE_GENERAL_TOUCH},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_GOTOP
- {USB_DEVICE(0x08f2, 0x007f), .driver_info = DEVTYPE_GOTOP},
- {USB_DEVICE(0x08f2, 0x00ce), .driver_info = DEVTYPE_GOTOP},
- {USB_DEVICE(0x08f2, 0x00f4), .driver_info = DEVTYPE_GOTOP},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_JASTEC
- {USB_DEVICE(0x0f92, 0x0001), .driver_info = DEVTYPE_JASTEC},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_E2I
- {USB_DEVICE(0x1ac7, 0x0001), .driver_info = DEVTYPE_E2I},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC
- {USB_DEVICE(0x14c8, 0x0003), .driver_info = DEVTYPE_ZYTRONIC},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB
- /* TC5UH */
- {USB_DEVICE(0x0664, 0x0309), .driver_info = DEVTYPE_TC45USB},
- /* TC4UM */
- {USB_DEVICE(0x0664, 0x0306), .driver_info = DEVTYPE_TC45USB},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
- /* data interface only */
- {USB_DEVICE_AND_INTERFACE_INFO(0x10f0, 0x2002, 0x0a, 0x00, 0x00),
- .driver_info = DEVTYPE_NEXIO},
- {USB_DEVICE_AND_INTERFACE_INFO(0x1870, 0x0001, 0x0a, 0x00, 0x00),
- .driver_info = DEVTYPE_NEXIO},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ELO
- {USB_DEVICE(0x04e7, 0x0020), .driver_info = DEVTYPE_ELO},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH
- {USB_DEVICE(0x7374, 0x0001), .driver_info = DEVTYPE_ETOUCH},
-#endif
- {}
+ void (*process_pkt)(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len);
};
@@ -273,6 +138,16 @@ static int e2i_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info e2i_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x7fff,
+ .min_yc = 0x0,
+ .max_yc = 0x7fff,
+ .rept_size = 6,
+ .init = e2i_init,
+ .read_data = e2i_read_data,
+};
#endif
@@ -292,9 +167,8 @@ static int e2i_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
static int egalax_init(struct usbtouch_usb *usbtouch)
{
- int ret, i;
- unsigned char *buf;
struct usb_device *udev = interface_to_usbdev(usbtouch->interface);
+ int ret, i;
/*
* An eGalax diagnostic packet kicks the device into using the right
@@ -302,7 +176,7 @@ static int egalax_init(struct usbtouch_usb *usbtouch)
* read later and ignored.
*/
- buf = kmalloc(3, GFP_KERNEL);
+ u8 *buf __free(kfree) = kmalloc(3, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -316,17 +190,11 @@ static int egalax_init(struct usbtouch_usb *usbtouch)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, 0, buf, 3,
USB_CTRL_SET_TIMEOUT);
- if (ret >= 0) {
- ret = 0;
- break;
- }
if (ret != -EPIPE)
break;
}
- kfree(buf);
-
- return ret;
+ return ret < 0 ? ret : 0;
}
static int egalax_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
@@ -356,6 +224,17 @@ static int egalax_get_pkt_len(unsigned char *buf, int len)
return 0;
}
+
+static const struct usbtouch_device_info egalax_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x07ff,
+ .min_yc = 0x0,
+ .max_yc = 0x07ff,
+ .rept_size = 16,
+ .get_pkt_len = egalax_get_pkt_len,
+ .read_data = egalax_read_data,
+ .init = egalax_init,
+};
#endif
/*****************************************************************************
@@ -402,6 +281,16 @@ static int etouch_get_pkt_len(unsigned char *buf, int len)
return 0;
}
+
+static const struct usbtouch_device_info etouch_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x07ff,
+ .min_yc = 0x0,
+ .max_yc = 0x07ff,
+ .rept_size = 16,
+ .get_pkt_len = etouch_get_pkt_len,
+ .read_data = etouch_read_data,
+};
#endif
/*****************************************************************************
@@ -416,6 +305,15 @@ static int panjit_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info panjit_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .rept_size = 8,
+ .read_data = panjit_read_data,
+};
#endif
@@ -449,35 +347,13 @@ struct mtouch_priv {
u8 fw_rev_minor;
};
-static ssize_t mtouch_firmware_rev_show(struct device *dev,
- struct device_attribute *attr, char *output)
-{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
- struct mtouch_priv *priv = usbtouch->priv;
-
- return sysfs_emit(output, "%1x.%1x\n",
- priv->fw_rev_major, priv->fw_rev_minor);
-}
-static DEVICE_ATTR(firmware_rev, 0444, mtouch_firmware_rev_show, NULL);
-
-static struct attribute *mtouch_attrs[] = {
- &dev_attr_firmware_rev.attr,
- NULL
-};
-
-static const struct attribute_group mtouch_attr_group = {
- .attrs = mtouch_attrs,
-};
-
static int mtouch_get_fw_revision(struct usbtouch_usb *usbtouch)
{
struct usb_device *udev = interface_to_usbdev(usbtouch->interface);
struct mtouch_priv *priv = usbtouch->priv;
- u8 *buf;
int ret;
- buf = kzalloc(MTOUCHUSB_REQ_CTRLLR_ID_LEN, GFP_NOIO);
+ u8 *buf __free(kfree) = kzalloc(MTOUCHUSB_REQ_CTRLLR_ID_LEN, GFP_NOIO);
if (!buf)
return -ENOMEM;
@@ -489,38 +365,24 @@ static int mtouch_get_fw_revision(struct usbtouch_usb *usbtouch)
if (ret != MTOUCHUSB_REQ_CTRLLR_ID_LEN) {
dev_warn(&usbtouch->interface->dev,
"Failed to read FW rev: %d\n", ret);
- ret = ret < 0 ? ret : -EIO;
- goto free;
+ return ret < 0 ? ret : -EIO;
}
priv->fw_rev_major = buf[3];
priv->fw_rev_minor = buf[4];
- ret = 0;
-
-free:
- kfree(buf);
- return ret;
+ return 0;
}
static int mtouch_alloc(struct usbtouch_usb *usbtouch)
{
struct mtouch_priv *priv;
- int ret;
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
usbtouch->priv = priv;
- ret = sysfs_create_group(&usbtouch->interface->dev.kobj,
- &mtouch_attr_group);
- if (ret) {
- kfree(usbtouch->priv);
- usbtouch->priv = NULL;
- return ret;
- }
-
return 0;
}
@@ -571,9 +433,53 @@ static void mtouch_exit(struct usbtouch_usb *usbtouch)
{
struct mtouch_priv *priv = usbtouch->priv;
- sysfs_remove_group(&usbtouch->interface->dev.kobj, &mtouch_attr_group);
kfree(priv);
}
+
+static struct usbtouch_device_info mtouch_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x4000,
+ .min_yc = 0x0,
+ .max_yc = 0x4000,
+ .rept_size = 11,
+ .read_data = mtouch_read_data,
+ .alloc = mtouch_alloc,
+ .init = mtouch_init,
+ .exit = mtouch_exit,
+};
+
+static ssize_t mtouch_firmware_rev_show(struct device *dev,
+ struct device_attribute *attr, char *output)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
+ struct mtouch_priv *priv = usbtouch->priv;
+
+ return sysfs_emit(output, "%1x.%1x\n",
+ priv->fw_rev_major, priv->fw_rev_minor);
+}
+static DEVICE_ATTR(firmware_rev, 0444, mtouch_firmware_rev_show, NULL);
+
+static struct attribute *mtouch_attrs[] = {
+ &dev_attr_firmware_rev.attr,
+ NULL
+};
+
+static bool mtouch_group_visible(struct kobject *kobj)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
+
+ return usbtouch->type == &mtouch_dev_info;
+}
+
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(mtouch);
+
+static const struct attribute_group mtouch_attr_group = {
+ .is_visible = SYSFS_GROUP_VISIBLE(mtouch),
+ .attrs = mtouch_attrs,
+};
#endif
@@ -608,6 +514,16 @@ static int itm_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info itm_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .max_press = 0xff,
+ .rept_size = 8,
+ .read_data = itm_read_data,
+};
#endif
@@ -642,6 +558,16 @@ static int eturbo_get_pkt_len(unsigned char *buf, int len)
return 3;
return 0;
}
+
+static const struct usbtouch_device_info eturbo_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x07ff,
+ .min_yc = 0x0,
+ .max_yc = 0x07ff,
+ .rept_size = 8,
+ .get_pkt_len = eturbo_get_pkt_len,
+ .read_data = eturbo_read_data,
+};
#endif
@@ -660,6 +586,15 @@ static int gunze_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info gunze_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .rept_size = 4,
+ .read_data = gunze_read_data,
+};
#endif
/*****************************************************************************
@@ -688,24 +623,23 @@ static int gunze_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
{
struct usb_device *dev = interface_to_usbdev(usbtouch->interface);
- int ret = -ENOMEM;
- unsigned char *buf;
+ int ret;
- buf = kmalloc(2, GFP_NOIO);
+ u8 *buf __free(kfree) = kmalloc(2, GFP_NOIO);
if (!buf)
- goto err_nobuf;
+ return -ENOMEM;
+
/* reset */
buf[0] = buf[1] = 0xFF;
ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
- TSC10_CMD_RESET,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0, 0, buf, 2, USB_CTRL_SET_TIMEOUT);
+ TSC10_CMD_RESET,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, 0, buf, 2, USB_CTRL_SET_TIMEOUT);
if (ret < 0)
- goto err_out;
- if (buf[0] != 0x06) {
- ret = -ENODEV;
- goto err_out;
- }
+ return ret;
+
+ if (buf[0] != 0x06)
+ return -ENODEV;
/* TSC-25 data sheet specifies a delay after the RESET command */
msleep(150);
@@ -713,28 +647,22 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
/* set coordinate output rate */
buf[0] = buf[1] = 0xFF;
ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
- TSC10_CMD_RATE,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- TSC10_RATE_150, 0, buf, 2, USB_CTRL_SET_TIMEOUT);
+ TSC10_CMD_RATE,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ TSC10_RATE_150, 0, buf, 2, USB_CTRL_SET_TIMEOUT);
if (ret < 0)
- goto err_out;
- if ((buf[0] != 0x06) && (buf[0] != 0x15 || buf[1] != 0x01)) {
- ret = -ENODEV;
- goto err_out;
- }
+ return ret;
+
+ if (buf[0] != 0x06 && (buf[0] != 0x15 || buf[1] != 0x01))
+ return -ENODEV;
/* start sending data */
- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- TSC10_CMD_DATA1,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
-err_out:
- kfree(buf);
-err_nobuf:
- return ret;
+ return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ TSC10_CMD_DATA1,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
}
-
static int dmc_tsc10_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
{
dev->x = ((pkt[2] & 0x03) << 8) | pkt[1];
@@ -743,6 +671,16 @@ static int dmc_tsc10_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info dmc_tsc10_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x03ff,
+ .min_yc = 0x0,
+ .max_yc = 0x03ff,
+ .rept_size = 5,
+ .init = dmc_tsc10_init,
+ .read_data = dmc_tsc10_read_data,
+};
#endif
@@ -758,6 +696,24 @@ static int irtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info irtouch_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .rept_size = 8,
+ .read_data = irtouch_read_data,
+};
+
+static const struct usbtouch_device_info irtouch_hires_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x7fff,
+ .min_yc = 0x0,
+ .max_yc = 0x7fff,
+ .rept_size = 8,
+ .read_data = irtouch_read_data,
+};
#endif
/*****************************************************************************
@@ -772,6 +728,15 @@ static int tc45usb_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info tc45usb_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .rept_size = 5,
+ .read_data = tc45usb_read_data,
+};
#endif
/*****************************************************************************
@@ -811,6 +776,16 @@ static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 0;
}
}
+
+static const struct usbtouch_device_info idealtek_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .rept_size = 8,
+ .get_pkt_len = idealtek_get_pkt_len,
+ .read_data = idealtek_read_data,
+};
#endif
/*****************************************************************************
@@ -826,6 +801,15 @@ static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info general_touch_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x7fff,
+ .min_yc = 0x0,
+ .max_yc = 0x7fff,
+ .rept_size = 7,
+ .read_data = general_touch_read_data,
+};
#endif
/*****************************************************************************
@@ -840,6 +824,15 @@ static int gotop_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info gotop_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x03ff,
+ .min_yc = 0x0,
+ .max_yc = 0x03ff,
+ .rept_size = 4,
+ .read_data = gotop_read_data,
+};
#endif
/*****************************************************************************
@@ -854,6 +847,15 @@ static int jastec_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info jastec_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .rept_size = 4,
+ .read_data = jastec_read_data,
+};
#endif
/*****************************************************************************
@@ -890,6 +892,16 @@ static int zytronic_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 0;
}
+
+static const struct usbtouch_device_info zytronic_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x03ff,
+ .min_yc = 0x0,
+ .max_yc = 0x03ff,
+ .rept_size = 5,
+ .read_data = zytronic_read_data,
+ .irq_always = true,
+};
#endif
/*****************************************************************************
@@ -960,7 +972,6 @@ static int nexio_init(struct usbtouch_usb *usbtouch)
struct nexio_priv *priv = usbtouch->priv;
int ret = -ENOMEM;
int actual_len, i;
- unsigned char *buf;
char *firmware_ver = NULL, *device_name = NULL;
int input_ep = 0, output_ep = 0;
@@ -976,9 +987,9 @@ static int nexio_init(struct usbtouch_usb *usbtouch)
if (!input_ep || !output_ep)
return -ENXIO;
- buf = kmalloc(NEXIO_BUFSIZE, GFP_NOIO);
+ u8 *buf __free(kfree) = kmalloc(NEXIO_BUFSIZE, GFP_NOIO);
if (!buf)
- goto out_buf;
+ return -ENOMEM;
/* two empty reads */
for (i = 0; i < 2; i++) {
@@ -986,7 +997,7 @@ static int nexio_init(struct usbtouch_usb *usbtouch)
buf, NEXIO_BUFSIZE, &actual_len,
NEXIO_TIMEOUT);
if (ret < 0)
- goto out_buf;
+ return ret;
}
/* send init command */
@@ -995,7 +1006,7 @@ static int nexio_init(struct usbtouch_usb *usbtouch)
buf, sizeof(nexio_init_pkt), &actual_len,
NEXIO_TIMEOUT);
if (ret < 0)
- goto out_buf;
+ return ret;
/* read replies */
for (i = 0; i < 3; i++) {
@@ -1026,11 +1037,8 @@ static int nexio_init(struct usbtouch_usb *usbtouch)
usb_fill_bulk_urb(priv->ack, dev, usb_sndbulkpipe(dev, output_ep),
priv->ack_buf, sizeof(nexio_ack_pkt),
nexio_ack_complete, usbtouch);
- ret = 0;
-out_buf:
- kfree(buf);
- return ret;
+ return 0;
}
static void nexio_exit(struct usbtouch_usb *usbtouch)
@@ -1067,13 +1075,11 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
if (ret)
dev_warn(dev, "Failed to submit ACK URB: %d\n", ret);
- if (!usbtouch->type->max_xc) {
- usbtouch->type->max_xc = 2 * x_len;
+ if (!input_abs_get_max(usbtouch->input, ABS_X)) {
input_set_abs_params(usbtouch->input, ABS_X,
- 0, usbtouch->type->max_xc, 0, 0);
- usbtouch->type->max_yc = 2 * y_len;
+ 0, 2 * x_len, 0, 0);
input_set_abs_params(usbtouch->input, ABS_Y,
- 0, usbtouch->type->max_yc, 0, 0);
+ 0, 2 * y_len, 0, 0);
}
/*
* The device reports state of IR sensors on X and Y axes.
@@ -1128,6 +1134,15 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
}
return 0;
}
+
+static const struct usbtouch_device_info nexio_dev_info = {
+ .rept_size = 1024,
+ .irq_always = true,
+ .read_data = nexio_read_data,
+ .alloc = nexio_alloc,
+ .init = nexio_init,
+ .exit = nexio_exit,
+};
#endif
@@ -1146,241 +1161,17 @@ static int elo_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
-#endif
-
-
-/*****************************************************************************
- * the different device descriptors
- */
-#ifdef MULTI_PACKET
-static void usbtouch_process_multi(struct usbtouch_usb *usbtouch,
- unsigned char *pkt, int len);
-#endif
-
-static struct usbtouch_device_info usbtouch_dev_info[] = {
-#ifdef CONFIG_TOUCHSCREEN_USB_ELO
- [DEVTYPE_ELO] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .max_press = 0xff,
- .rept_size = 8,
- .read_data = elo_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
- [DEVTYPE_EGALAX] = {
- .min_xc = 0x0,
- .max_xc = 0x07ff,
- .min_yc = 0x0,
- .max_yc = 0x07ff,
- .rept_size = 16,
- .process_pkt = usbtouch_process_multi,
- .get_pkt_len = egalax_get_pkt_len,
- .read_data = egalax_read_data,
- .init = egalax_init,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_PANJIT
- [DEVTYPE_PANJIT] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .rept_size = 8,
- .read_data = panjit_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_3M
- [DEVTYPE_3M] = {
- .min_xc = 0x0,
- .max_xc = 0x4000,
- .min_yc = 0x0,
- .max_yc = 0x4000,
- .rept_size = 11,
- .read_data = mtouch_read_data,
- .alloc = mtouch_alloc,
- .init = mtouch_init,
- .exit = mtouch_exit,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ITM
- [DEVTYPE_ITM] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .max_press = 0xff,
- .rept_size = 8,
- .read_data = itm_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ETURBO
- [DEVTYPE_ETURBO] = {
- .min_xc = 0x0,
- .max_xc = 0x07ff,
- .min_yc = 0x0,
- .max_yc = 0x07ff,
- .rept_size = 8,
- .process_pkt = usbtouch_process_multi,
- .get_pkt_len = eturbo_get_pkt_len,
- .read_data = eturbo_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_GUNZE
- [DEVTYPE_GUNZE] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .rept_size = 4,
- .read_data = gunze_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10
- [DEVTYPE_DMC_TSC10] = {
- .min_xc = 0x0,
- .max_xc = 0x03ff,
- .min_yc = 0x0,
- .max_yc = 0x03ff,
- .rept_size = 5,
- .init = dmc_tsc10_init,
- .read_data = dmc_tsc10_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
- [DEVTYPE_IRTOUCH] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .rept_size = 8,
- .read_data = irtouch_read_data,
- },
-
- [DEVTYPE_IRTOUCH_HIRES] = {
- .min_xc = 0x0,
- .max_xc = 0x7fff,
- .min_yc = 0x0,
- .max_yc = 0x7fff,
- .rept_size = 8,
- .read_data = irtouch_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK
- [DEVTYPE_IDEALTEK] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .rept_size = 8,
- .process_pkt = usbtouch_process_multi,
- .get_pkt_len = idealtek_get_pkt_len,
- .read_data = idealtek_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
- [DEVTYPE_GENERAL_TOUCH] = {
- .min_xc = 0x0,
- .max_xc = 0x7fff,
- .min_yc = 0x0,
- .max_yc = 0x7fff,
- .rept_size = 7,
- .read_data = general_touch_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_GOTOP
- [DEVTYPE_GOTOP] = {
- .min_xc = 0x0,
- .max_xc = 0x03ff,
- .min_yc = 0x0,
- .max_yc = 0x03ff,
- .rept_size = 4,
- .read_data = gotop_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_JASTEC
- [DEVTYPE_JASTEC] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .rept_size = 4,
- .read_data = jastec_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_E2I
- [DEVTYPE_E2I] = {
- .min_xc = 0x0,
- .max_xc = 0x7fff,
- .min_yc = 0x0,
- .max_yc = 0x7fff,
- .rept_size = 6,
- .init = e2i_init,
- .read_data = e2i_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC
- [DEVTYPE_ZYTRONIC] = {
- .min_xc = 0x0,
- .max_xc = 0x03ff,
- .min_yc = 0x0,
- .max_yc = 0x03ff,
- .rept_size = 5,
- .read_data = zytronic_read_data,
- .irq_always = true,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB
- [DEVTYPE_TC45USB] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .rept_size = 5,
- .read_data = tc45usb_read_data,
- },
-#endif
-#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
- [DEVTYPE_NEXIO] = {
- .rept_size = 1024,
- .irq_always = true,
- .read_data = nexio_read_data,
- .alloc = nexio_alloc,
- .init = nexio_init,
- .exit = nexio_exit,
- },
-#endif
-#ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH
- [DEVTYPE_ETOUCH] = {
- .min_xc = 0x0,
- .max_xc = 0x07ff,
- .min_yc = 0x0,
- .max_yc = 0x07ff,
- .rept_size = 16,
- .process_pkt = usbtouch_process_multi,
- .get_pkt_len = etouch_get_pkt_len,
- .read_data = etouch_read_data,
- },
-#endif
+static const struct usbtouch_device_info elo_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .max_press = 0xff,
+ .rept_size = 8,
+ .read_data = elo_read_data,
};
+#endif
/*****************************************************************************
@@ -1389,10 +1180,10 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
static void usbtouch_process_pkt(struct usbtouch_usb *usbtouch,
unsigned char *pkt, int len)
{
- struct usbtouch_device_info *type = usbtouch->type;
+ const struct usbtouch_device_info *type = usbtouch->type;
if (!type->read_data(usbtouch, pkt))
- return;
+ return;
input_report_key(usbtouch->input, BTN_TOUCH, usbtouch->touch);
@@ -1485,9 +1276,15 @@ out_flush_buf:
usbtouch->buf_len = 0;
return;
}
+#else
+static void usbtouch_process_multi(struct usbtouch_usb *usbtouch,
+ unsigned char *pkt, int len)
+{
+ dev_WARN_ONCE(&usbtouch->interface->dev, 1,
+ "Protocol has ->get_pkt_len() without #define MULTI_PACKET");
+}
#endif
-
static void usbtouch_irq(struct urb *urb)
{
struct usbtouch_usb *usbtouch = urb->context;
@@ -1518,7 +1315,7 @@ static void usbtouch_irq(struct urb *urb)
goto exit;
}
- usbtouch->type->process_pkt(usbtouch, usbtouch->data, urb->actual_length);
+ usbtouch->process_pkt(usbtouch, usbtouch->data, urb->actual_length);
exit:
usb_mark_last_busy(interface_to_usbdev(usbtouch->interface));
@@ -1528,6 +1325,20 @@ exit:
__func__, retval);
}
+static int usbtouch_start_io(struct usbtouch_usb *usbtouch)
+{
+ guard(mutex)(&usbtouch->pm_mutex);
+
+ if (!usbtouch->type->irq_always)
+ if (usb_submit_urb(usbtouch->irq, GFP_KERNEL))
+ return -EIO;
+
+ usbtouch->interface->needs_remote_wakeup = 1;
+ usbtouch->is_open = true;
+
+ return 0;
+}
+
static int usbtouch_open(struct input_dev *input)
{
struct usbtouch_usb *usbtouch = input_get_drvdata(input);
@@ -1536,23 +1347,12 @@ static int usbtouch_open(struct input_dev *input)
usbtouch->irq->dev = interface_to_usbdev(usbtouch->interface);
r = usb_autopm_get_interface(usbtouch->interface) ? -EIO : 0;
- if (r < 0)
- goto out;
-
- mutex_lock(&usbtouch->pm_mutex);
- if (!usbtouch->type->irq_always) {
- if (usb_submit_urb(usbtouch->irq, GFP_KERNEL)) {
- r = -EIO;
- goto out_put;
- }
- }
+ if (r)
+ return r;
+
+ r = usbtouch_start_io(usbtouch);
- usbtouch->interface->needs_remote_wakeup = 1;
- usbtouch->is_open = true;
-out_put:
- mutex_unlock(&usbtouch->pm_mutex);
usb_autopm_put_interface(usbtouch->interface);
-out:
return r;
}
@@ -1561,11 +1361,11 @@ static void usbtouch_close(struct input_dev *input)
struct usbtouch_usb *usbtouch = input_get_drvdata(input);
int r;
- mutex_lock(&usbtouch->pm_mutex);
- if (!usbtouch->type->irq_always)
- usb_kill_urb(usbtouch->irq);
- usbtouch->is_open = false;
- mutex_unlock(&usbtouch->pm_mutex);
+ scoped_guard(mutex, &usbtouch->pm_mutex) {
+ if (!usbtouch->type->irq_always)
+ usb_kill_urb(usbtouch->irq);
+ usbtouch->is_open = false;
+ }
r = usb_autopm_get_interface(usbtouch->interface);
usbtouch->interface->needs_remote_wakeup = 0;
@@ -1573,8 +1373,7 @@ static void usbtouch_close(struct input_dev *input)
usb_autopm_put_interface(usbtouch->interface);
}
-static int usbtouch_suspend
-(struct usb_interface *intf, pm_message_t message)
+static int usbtouch_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
@@ -1586,20 +1385,19 @@ static int usbtouch_suspend
static int usbtouch_resume(struct usb_interface *intf)
{
struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
- int result = 0;
- mutex_lock(&usbtouch->pm_mutex);
+ guard(mutex)(&usbtouch->pm_mutex);
+
if (usbtouch->is_open || usbtouch->type->irq_always)
- result = usb_submit_urb(usbtouch->irq, GFP_NOIO);
- mutex_unlock(&usbtouch->pm_mutex);
+ return usb_submit_urb(usbtouch->irq, GFP_NOIO);
- return result;
+ return 0;
}
static int usbtouch_reset_resume(struct usb_interface *intf)
{
struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
- int err = 0;
+ int err;
/* reinit the device */
if (usbtouch->type->init) {
@@ -1613,12 +1411,12 @@ static int usbtouch_reset_resume(struct usb_interface *intf)
}
/* restart IO if needed */
- mutex_lock(&usbtouch->pm_mutex);
+ guard(mutex)(&usbtouch->pm_mutex);
+
if (usbtouch->is_open)
- err = usb_submit_urb(usbtouch->irq, GFP_NOIO);
- mutex_unlock(&usbtouch->pm_mutex);
+ return usb_submit_urb(usbtouch->irq, GFP_NOIO);
- return err;
+ return 0;
}
static void usbtouch_free_buffers(struct usb_device *udev,
@@ -1648,14 +1446,12 @@ static int usbtouch_probe(struct usb_interface *intf,
struct input_dev *input_dev;
struct usb_endpoint_descriptor *endpoint;
struct usb_device *udev = interface_to_usbdev(intf);
- struct usbtouch_device_info *type;
+ const struct usbtouch_device_info *type;
int err = -ENOMEM;
/* some devices are ignored */
- if (id->driver_info == DEVTYPE_IGNORE)
- return -ENODEV;
-
- if (id->driver_info >= ARRAY_SIZE(usbtouch_dev_info))
+ type = (const struct usbtouch_device_info *)id->driver_info;
+ if (!type)
return -ENODEV;
endpoint = usbtouch_get_input_endpoint(intf->cur_altsetting);
@@ -1668,11 +1464,7 @@ static int usbtouch_probe(struct usb_interface *intf,
goto out_free;
mutex_init(&usbtouch->pm_mutex);
-
- type = &usbtouch_dev_info[id->driver_info];
usbtouch->type = type;
- if (!type->process_pkt)
- type->process_pkt = usbtouch_process_pkt;
usbtouch->data_size = type->rept_size;
if (type->get_pkt_len) {
@@ -1696,6 +1488,9 @@ static int usbtouch_probe(struct usb_interface *intf,
usbtouch->buffer = kmalloc(type->rept_size, GFP_KERNEL);
if (!usbtouch->buffer)
goto out_free_buffers;
+ usbtouch->process_pkt = usbtouch_process_multi;
+ } else {
+ usbtouch->process_pkt = usbtouch_process_pkt;
}
usbtouch->irq = usb_alloc_urb(0, GFP_KERNEL);
@@ -1842,6 +1637,150 @@ static void usbtouch_disconnect(struct usb_interface *intf)
kfree(usbtouch);
}
+static const struct attribute_group *usbtouch_groups[] = {
+#ifdef CONFIG_TOUCHSCREEN_USB_3M
+ &mtouch_attr_group,
+#endif
+ NULL
+};
+
+static const struct usb_device_id usbtouch_devices[] = {
+#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
+ /* ignore the HID capable devices, handled by usbhid */
+ { USB_DEVICE_INTERFACE_CLASS(0x0eef, 0x0001, USB_INTERFACE_CLASS_HID),
+ .driver_info = 0 },
+ { USB_DEVICE_INTERFACE_CLASS(0x0eef, 0x0002, USB_INTERFACE_CLASS_HID),
+ .driver_info = 0 },
+
+ /* normal device IDs */
+ { USB_DEVICE(0x3823, 0x0001),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+ { USB_DEVICE(0x3823, 0x0002),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+ { USB_DEVICE(0x0123, 0x0001),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+ { USB_DEVICE(0x0eef, 0x0001),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+ { USB_DEVICE(0x0eef, 0x0002),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+ { USB_DEVICE(0x1234, 0x0001),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+ { USB_DEVICE(0x1234, 0x0002),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_PANJIT
+ { USB_DEVICE(0x134c, 0x0001),
+ .driver_info = (kernel_ulong_t)&panjit_dev_info },
+ { USB_DEVICE(0x134c, 0x0002),
+ .driver_info = (kernel_ulong_t)&panjit_dev_info },
+ { USB_DEVICE(0x134c, 0x0003),
+ .driver_info = (kernel_ulong_t)&panjit_dev_info },
+ { USB_DEVICE(0x134c, 0x0004),
+ .driver_info = (kernel_ulong_t)&panjit_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_3M
+ { USB_DEVICE(0x0596, 0x0001),
+ .driver_info = (kernel_ulong_t)&mtouch_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ITM
+ { USB_DEVICE(0x0403, 0xf9e9),
+ .driver_info = (kernel_ulong_t)&itm_dev_info },
+ { USB_DEVICE(0x16e3, 0xf9e9),
+ .driver_info = (kernel_ulong_t)&itm_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ETURBO
+ { USB_DEVICE(0x1234, 0x5678),
+ .driver_info = (kernel_ulong_t)&eturbo_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_GUNZE
+ { USB_DEVICE(0x0637, 0x0001),
+ .driver_info = (kernel_ulong_t)&gunze_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10
+ { USB_DEVICE(0x0afa, 0x03e8),
+ .driver_info = (kernel_ulong_t)&dmc_tsc10_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
+ { USB_DEVICE(0x255e, 0x0001),
+ .driver_info = (kernel_ulong_t)&irtouch_dev_info },
+ { USB_DEVICE(0x595a, 0x0001),
+ .driver_info = (kernel_ulong_t)&irtouch_dev_info },
+ { USB_DEVICE(0x6615, 0x0001),
+ .driver_info = (kernel_ulong_t)&irtouch_dev_info },
+ { USB_DEVICE(0x6615, 0x0012),
+ .driver_info = (kernel_ulong_t)&irtouch_hires_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK
+ { USB_DEVICE(0x1391, 0x1000),
+ .driver_info = (kernel_ulong_t)&idealtek_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
+ { USB_DEVICE(0x0dfc, 0x0001),
+ .driver_info = (kernel_ulong_t)&general_touch_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_GOTOP
+ { USB_DEVICE(0x08f2, 0x007f),
+ .driver_info = (kernel_ulong_t)&gotop_dev_info },
+ { USB_DEVICE(0x08f2, 0x00ce),
+ .driver_info = (kernel_ulong_t)&gotop_dev_info },
+ { USB_DEVICE(0x08f2, 0x00f4),
+ .driver_info = (kernel_ulong_t)&gotop_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_JASTEC
+ { USB_DEVICE(0x0f92, 0x0001),
+ .driver_info = (kernel_ulong_t)&jastec_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_E2I
+ { USB_DEVICE(0x1ac7, 0x0001),
+ .driver_info = (kernel_ulong_t)&e2i_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC
+ { USB_DEVICE(0x14c8, 0x0003),
+ .driver_info = (kernel_ulong_t)&zytronic_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB
+ /* TC5UH */
+ { USB_DEVICE(0x0664, 0x0309),
+ .driver_info = (kernel_ulong_t)&tc45usb_dev_info },
+ /* TC4UM */
+ { USB_DEVICE(0x0664, 0x0306),
+ .driver_info = (kernel_ulong_t)&tc45usb_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
+ /* data interface only */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x10f0, 0x2002, 0x0a, 0x00, 0x00),
+ .driver_info = (kernel_ulong_t)&nexio_dev_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1870, 0x0001, 0x0a, 0x00, 0x00),
+ .driver_info = (kernel_ulong_t)&nexio_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+ { USB_DEVICE(0x04e7, 0x0020),
+ .driver_info = (kernel_ulong_t)&elo_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH
+ { USB_DEVICE(0x7374, 0x0001),
+ .driver_info = (kernel_ulong_t)&etouch_dev_info },
+#endif
+
+ { }
+};
MODULE_DEVICE_TABLE(usb, usbtouch_devices);
static struct usb_driver usbtouch_driver = {
@@ -1852,6 +1791,7 @@ static struct usb_driver usbtouch_driver = {
.resume = usbtouch_resume,
.reset_resume = usbtouch_reset_resume,
.id_table = usbtouch_devices,
+ .dev_groups = usbtouch_groups,
.supports_autosuspend = 1,
};
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index fdf2d1e770c8..4b8c4ebfff96 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -9,21 +9,20 @@
* Author: Pieter Truter<ptruter@intrinsyc.com>
*/
-#include <linux/module.h>
-#include <linux/hrtimer.h>
-#include <linux/slab.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/i2c.h>
#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
#include <linux/device.h>
-#include <linux/sysfs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
-#include <linux/platform_data/zforce_ts.h>
-#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
#define WAIT_TIMEOUT msecs_to_jiffies(1000)
@@ -97,9 +96,7 @@ struct zforce_point {
* @suspending in the process of going to suspend (don't emit wakeup
* events for commands executed to suspend the device)
* @suspended device suspended
- * @access_mutex serialize i2c-access, to keep multipart reads together
* @command_done completion to wait for the command result
- * @command_mutex serialize commands sent to the ic
* @command_waiting the id of the command that is currently waiting
* for a result
* @command_result returned result of the command
@@ -108,11 +105,8 @@ struct zforce_ts {
struct i2c_client *client;
struct input_dev *input;
struct touchscreen_properties prop;
- const struct zforce_ts_platdata *pdata;
char phys[32];
- struct regulator *reg_vdd;
-
struct gpio_desc *gpio_int;
struct gpio_desc *gpio_rst;
@@ -126,10 +120,7 @@ struct zforce_ts {
u16 version_build;
u16 version_rev;
- struct mutex access_mutex;
-
struct completion command_done;
- struct mutex command_mutex;
int command_waiting;
int command_result;
};
@@ -146,9 +137,7 @@ static int zforce_command(struct zforce_ts *ts, u8 cmd)
buf[1] = 1; /* data size, command only */
buf[2] = cmd;
- mutex_lock(&ts->access_mutex);
ret = i2c_master_send(client, &buf[0], ARRAY_SIZE(buf));
- mutex_unlock(&ts->access_mutex);
if (ret < 0) {
dev_err(&client->dev, "i2c send data request error: %d\n", ret);
return ret;
@@ -157,59 +146,36 @@ static int zforce_command(struct zforce_ts *ts, u8 cmd)
return 0;
}
-static void zforce_reset_assert(struct zforce_ts *ts)
-{
- gpiod_set_value_cansleep(ts->gpio_rst, 1);
-}
-
-static void zforce_reset_deassert(struct zforce_ts *ts)
-{
- gpiod_set_value_cansleep(ts->gpio_rst, 0);
-}
-
static int zforce_send_wait(struct zforce_ts *ts, const char *buf, int len)
{
struct i2c_client *client = ts->client;
int ret;
- ret = mutex_trylock(&ts->command_mutex);
- if (!ret) {
- dev_err(&client->dev, "already waiting for a command\n");
- return -EBUSY;
- }
-
dev_dbg(&client->dev, "sending %d bytes for command 0x%x\n",
buf[1], buf[2]);
ts->command_waiting = buf[2];
- mutex_lock(&ts->access_mutex);
ret = i2c_master_send(client, buf, len);
- mutex_unlock(&ts->access_mutex);
if (ret < 0) {
dev_err(&client->dev, "i2c send data request error: %d\n", ret);
- goto unlock;
+ return ret;
}
dev_dbg(&client->dev, "waiting for result for command 0x%x\n", buf[2]);
- if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0) {
- ret = -ETIME;
- goto unlock;
- }
+ if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0)
+ return -ETIME;
ret = ts->command_result;
-
-unlock:
- mutex_unlock(&ts->command_mutex);
- return ret;
+ return 0;
}
static int zforce_command_wait(struct zforce_ts *ts, u8 cmd)
{
struct i2c_client *client = ts->client;
char buf[3];
- int ret;
+ int error;
dev_dbg(&client->dev, "%s: 0x%x\n", __func__, cmd);
@@ -217,10 +183,11 @@ static int zforce_command_wait(struct zforce_ts *ts, u8 cmd)
buf[1] = 1; /* data size, command only */
buf[2] = cmd;
- ret = zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
- if (ret < 0) {
- dev_err(&client->dev, "i2c send data request error: %d\n", ret);
- return ret;
+ error = zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+ if (error) {
+ dev_err(&client->dev, "i2c send data request error: %d\n",
+ error);
+ return error;
}
return 0;
@@ -268,40 +235,40 @@ static int zforce_setconfig(struct zforce_ts *ts, char b1)
static int zforce_start(struct zforce_ts *ts)
{
struct i2c_client *client = ts->client;
- int ret;
+ int error;
dev_dbg(&client->dev, "starting device\n");
- ret = zforce_command_wait(ts, COMMAND_INITIALIZE);
- if (ret) {
- dev_err(&client->dev, "Unable to initialize, %d\n", ret);
- return ret;
+ error = zforce_command_wait(ts, COMMAND_INITIALIZE);
+ if (error) {
+ dev_err(&client->dev, "Unable to initialize, %d\n", error);
+ return error;
}
- ret = zforce_resolution(ts, ts->prop.max_x, ts->prop.max_y);
- if (ret) {
- dev_err(&client->dev, "Unable to set resolution, %d\n", ret);
- goto error;
+ error = zforce_resolution(ts, ts->prop.max_x, ts->prop.max_y);
+ if (error) {
+ dev_err(&client->dev, "Unable to set resolution, %d\n", error);
+ goto err_deactivate;
}
- ret = zforce_scan_frequency(ts, 10, 50, 50);
- if (ret) {
+ error = zforce_scan_frequency(ts, 10, 50, 50);
+ if (error) {
dev_err(&client->dev, "Unable to set scan frequency, %d\n",
- ret);
- goto error;
+ error);
+ goto err_deactivate;
}
- ret = zforce_setconfig(ts, SETCONFIG_DUALTOUCH);
- if (ret) {
+ error = zforce_setconfig(ts, SETCONFIG_DUALTOUCH);
+ if (error) {
dev_err(&client->dev, "Unable to set config\n");
- goto error;
+ goto err_deactivate;
}
/* start sending touch events */
- ret = zforce_command(ts, COMMAND_DATAREQUEST);
- if (ret) {
+ error = zforce_command(ts, COMMAND_DATAREQUEST);
+ if (error) {
dev_err(&client->dev, "Unable to request data\n");
- goto error;
+ goto err_deactivate;
}
/*
@@ -312,24 +279,24 @@ static int zforce_start(struct zforce_ts *ts)
return 0;
-error:
+err_deactivate:
zforce_command_wait(ts, COMMAND_DEACTIVATE);
- return ret;
+ return error;
}
static int zforce_stop(struct zforce_ts *ts)
{
struct i2c_client *client = ts->client;
- int ret;
+ int error;
dev_dbg(&client->dev, "stopping device\n");
/* Deactivates touch sensing and puts the device into sleep. */
- ret = zforce_command_wait(ts, COMMAND_DEACTIVATE);
- if (ret != 0) {
+ error = zforce_command_wait(ts, COMMAND_DEACTIVATE);
+ if (error) {
dev_err(&client->dev, "could not deactivate device, %d\n",
- ret);
- return ret;
+ error);
+ return error;
}
return 0;
@@ -340,6 +307,7 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
struct i2c_client *client = ts->client;
struct zforce_point point;
int count, i, num = 0;
+ u8 *p;
count = payload[0];
if (count > ZFORCE_REPORT_POINTS) {
@@ -350,10 +318,10 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
}
for (i = 0; i < count; i++) {
- point.coord_x =
- payload[9 * i + 2] << 8 | payload[9 * i + 1];
- point.coord_y =
- payload[9 * i + 4] << 8 | payload[9 * i + 3];
+ p = &payload[i * 9 + 1];
+
+ point.coord_x = get_unaligned_le16(&p[0]);
+ point.coord_y = get_unaligned_le16(&p[2]);
if (point.coord_x > ts->prop.max_x ||
point.coord_y > ts->prop.max_y) {
@@ -362,18 +330,16 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
point.coord_x = point.coord_y = 0;
}
- point.state = payload[9 * i + 5] & 0x0f;
- point.id = (payload[9 * i + 5] & 0xf0) >> 4;
+ point.state = p[4] & 0x0f;
+ point.id = (p[4] & 0xf0) >> 4;
/* determine touch major, minor and orientation */
- point.area_major = max(payload[9 * i + 6],
- payload[9 * i + 7]);
- point.area_minor = min(payload[9 * i + 6],
- payload[9 * i + 7]);
- point.orientation = payload[9 * i + 6] > payload[9 * i + 7];
+ point.area_major = max(p[5], p[6]);
+ point.area_minor = min(p[5], p[6]);
+ point.orientation = p[5] > p[6];
- point.pressure = payload[9 * i + 8];
- point.prblty = payload[9 * i + 9];
+ point.pressure = p[7];
+ point.prblty = p[8];
dev_dbg(&client->dev,
"point %d/%d: state %d, id %d, pressure %d, prblty %d, x %d, y %d, amajor %d, aminor %d, ori %d\n",
@@ -386,10 +352,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
/* the zforce id starts with "1", so needs to be decreased */
input_mt_slot(ts->input, point.id - 1);
- input_mt_report_slot_state(ts->input, MT_TOOL_FINGER,
- point.state != STATE_UP);
-
- if (point.state != STATE_UP) {
+ if (input_mt_report_slot_state(ts->input, MT_TOOL_FINGER,
+ point.state != STATE_UP)) {
touchscreen_report_pos(ts->input, &ts->prop,
point.coord_x, point.coord_y,
true);
@@ -417,41 +381,35 @@ static int zforce_read_packet(struct zforce_ts *ts, u8 *buf)
struct i2c_client *client = ts->client;
int ret;
- mutex_lock(&ts->access_mutex);
-
/* read 2 byte message header */
ret = i2c_master_recv(client, buf, 2);
if (ret < 0) {
dev_err(&client->dev, "error reading header: %d\n", ret);
- goto unlock;
+ return ret;
}
if (buf[PAYLOAD_HEADER] != FRAME_START) {
dev_err(&client->dev, "invalid frame start: %d\n", buf[0]);
- ret = -EIO;
- goto unlock;
+ return -EIO;
}
if (buf[PAYLOAD_LENGTH] == 0) {
dev_err(&client->dev, "invalid payload length: %d\n",
buf[PAYLOAD_LENGTH]);
- ret = -EIO;
- goto unlock;
+ return -EIO;
}
/* read the message */
ret = i2c_master_recv(client, &buf[PAYLOAD_BODY], buf[PAYLOAD_LENGTH]);
if (ret < 0) {
dev_err(&client->dev, "error reading payload: %d\n", ret);
- goto unlock;
+ return ret;
}
dev_dbg(&client->dev, "read %d bytes for response command 0x%x\n",
buf[PAYLOAD_LENGTH], buf[PAYLOAD_BODY]);
-unlock:
- mutex_unlock(&ts->access_mutex);
- return ret;
+ return 0;
}
static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
@@ -482,9 +440,10 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
{
struct zforce_ts *ts = dev_id;
struct i2c_client *client = ts->client;
- int ret;
+ int error;
u8 payload_buffer[FRAME_MAXSIZE];
u8 *payload;
+ bool suspending;
/*
* When still suspended, return.
@@ -498,7 +457,8 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
dev_dbg(&client->dev, "handling interrupt\n");
/* Don't emit wakeup events from commands run by zforce_suspend */
- if (!ts->suspending && device_may_wakeup(&client->dev))
+ suspending = READ_ONCE(ts->suspending);
+ if (!suspending && device_may_wakeup(&client->dev))
pm_stay_awake(&client->dev);
/*
@@ -511,10 +471,10 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
* no IRQ any more)
*/
do {
- ret = zforce_read_packet(ts, payload_buffer);
- if (ret < 0) {
+ error = zforce_read_packet(ts, payload_buffer);
+ if (error) {
dev_err(&client->dev,
- "could not read packet, ret: %d\n", ret);
+ "could not read packet, ret: %d\n", error);
break;
}
@@ -526,7 +486,7 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
* Always report touch-events received while
* suspending, when being a wakeup source
*/
- if (ts->suspending && device_may_wakeup(&client->dev))
+ if (suspending && device_may_wakeup(&client->dev))
pm_wakeup_event(&client->dev, 500);
zforce_touch_event(ts, &payload[RESPONSE_DATA]);
break;
@@ -550,14 +510,15 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
* Version Payload Results
* [2:major] [2:minor] [2:build] [2:rev]
*/
- ts->version_major = (payload[RESPONSE_DATA + 1] << 8) |
- payload[RESPONSE_DATA];
- ts->version_minor = (payload[RESPONSE_DATA + 3] << 8) |
- payload[RESPONSE_DATA + 2];
- ts->version_build = (payload[RESPONSE_DATA + 5] << 8) |
- payload[RESPONSE_DATA + 4];
- ts->version_rev = (payload[RESPONSE_DATA + 7] << 8) |
- payload[RESPONSE_DATA + 6];
+ ts->version_major =
+ get_unaligned_le16(&payload[RESPONSE_DATA]);
+ ts->version_minor =
+ get_unaligned_le16(&payload[RESPONSE_DATA + 2]);
+ ts->version_build =
+ get_unaligned_le16(&payload[RESPONSE_DATA + 4]);
+ ts->version_rev =
+ get_unaligned_le16(&payload[RESPONSE_DATA + 6]);
+
dev_dbg(&ts->client->dev,
"Firmware Version %04x:%04x %04x:%04x\n",
ts->version_major, ts->version_minor,
@@ -579,7 +540,7 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
}
} while (gpiod_get_value_cansleep(ts->gpio_int));
- if (!ts->suspending && device_may_wakeup(&client->dev))
+ if (!suspending && device_may_wakeup(&client->dev))
pm_relax(&client->dev);
dev_dbg(&client->dev, "finished interrupt\n");
@@ -598,24 +559,20 @@ static void zforce_input_close(struct input_dev *dev)
{
struct zforce_ts *ts = input_get_drvdata(dev);
struct i2c_client *client = ts->client;
- int ret;
+ int error;
- ret = zforce_stop(ts);
- if (ret)
+ error = zforce_stop(ts);
+ if (error)
dev_warn(&client->dev, "stopping zforce failed\n");
-
- return;
}
-static int zforce_suspend(struct device *dev)
+static int __zforce_suspend(struct zforce_ts *ts)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct zforce_ts *ts = i2c_get_clientdata(client);
+ struct i2c_client *client = ts->client;
struct input_dev *input = ts->input;
- int ret = 0;
+ int error;
- mutex_lock(&input->mutex);
- ts->suspending = true;
+ guard(mutex)(&input->mutex);
/*
* When configured as a wakeup source device should always wake
@@ -626,9 +583,9 @@ static int zforce_suspend(struct device *dev)
/* Need to start device, if not open, to be a wakeup source. */
if (!input_device_enabled(input)) {
- ret = zforce_start(ts);
- if (ret)
- goto unlock;
+ error = zforce_start(ts);
+ if (error)
+ return error;
}
enable_irq_wake(client->irq);
@@ -636,18 +593,30 @@ static int zforce_suspend(struct device *dev)
dev_dbg(&client->dev,
"suspend without being a wakeup source\n");
- ret = zforce_stop(ts);
- if (ret)
- goto unlock;
+ error = zforce_stop(ts);
+ if (error)
+ return error;
disable_irq(client->irq);
}
ts->suspended = true;
+ return 0;
+}
+
+static int zforce_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct zforce_ts *ts = i2c_get_clientdata(client);
+ int ret;
+
+ WRITE_ONCE(ts->suspending, true);
+ smp_mb();
-unlock:
- ts->suspending = false;
- mutex_unlock(&input->mutex);
+ ret = __zforce_suspend(ts);
+
+ smp_mb();
+ WRITE_ONCE(ts->suspending, false);
return ret;
}
@@ -657,9 +626,9 @@ static int zforce_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct zforce_ts *ts = i2c_get_clientdata(client);
struct input_dev *input = ts->input;
- int ret = 0;
+ int error;
- mutex_lock(&input->mutex);
+ guard(mutex)(&input->mutex);
ts->suspended = false;
@@ -670,24 +639,21 @@ static int zforce_resume(struct device *dev)
/* need to stop device if it was not open on suspend */
if (!input_device_enabled(input)) {
- ret = zforce_stop(ts);
- if (ret)
- goto unlock;
+ error = zforce_stop(ts);
+ if (error)
+ return error;
}
} else if (input_device_enabled(input)) {
dev_dbg(&client->dev, "resume without being a wakeup source\n");
enable_irq(client->irq);
- ret = zforce_start(ts);
- if (ret < 0)
- goto unlock;
+ error = zforce_start(ts);
+ if (error)
+ return error;
}
-unlock:
- mutex_unlock(&input->mutex);
-
- return ret;
+ return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(zforce_pm_ops, zforce_suspend, zforce_resume);
@@ -696,46 +662,27 @@ static void zforce_reset(void *data)
{
struct zforce_ts *ts = data;
- zforce_reset_assert(ts);
-
+ gpiod_set_value_cansleep(ts->gpio_rst, 1);
udelay(10);
-
- if (!IS_ERR(ts->reg_vdd))
- regulator_disable(ts->reg_vdd);
}
-static struct zforce_ts_platdata *zforce_parse_dt(struct device *dev)
+static void zforce_ts_parse_legacy_properties(struct zforce_ts *ts)
{
- struct zforce_ts_platdata *pdata;
- struct device_node *np = dev->of_node;
+ u32 x_max = 0;
+ u32 y_max = 0;
- if (!np)
- return ERR_PTR(-ENOENT);
+ device_property_read_u32(&ts->client->dev, "x-size", &x_max);
+ input_set_abs_params(ts->input, ABS_MT_POSITION_X, 0, x_max, 0, 0);
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "failed to allocate platform data\n");
- return ERR_PTR(-ENOMEM);
- }
-
- of_property_read_u32(np, "x-size", &pdata->x_max);
- of_property_read_u32(np, "y-size", &pdata->y_max);
-
- return pdata;
+ device_property_read_u32(&ts->client->dev, "y-size", &y_max);
+ input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, y_max, 0, 0);
}
static int zforce_probe(struct i2c_client *client)
{
- const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
struct zforce_ts *ts;
struct input_dev *input_dev;
- int ret;
-
- if (!pdata) {
- pdata = zforce_parse_dt(&client->dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- }
+ int error;
ts = devm_kzalloc(&client->dev, sizeof(struct zforce_ts), GFP_KERNEL);
if (!ts)
@@ -743,22 +690,18 @@ static int zforce_probe(struct i2c_client *client)
ts->gpio_rst = devm_gpiod_get_optional(&client->dev, "reset",
GPIOD_OUT_HIGH);
- if (IS_ERR(ts->gpio_rst)) {
- ret = PTR_ERR(ts->gpio_rst);
- dev_err(&client->dev,
- "failed to request reset GPIO: %d\n", ret);
- return ret;
- }
+ error = PTR_ERR_OR_ZERO(ts->gpio_rst);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "failed to request reset GPIO\n");
if (ts->gpio_rst) {
ts->gpio_int = devm_gpiod_get_optional(&client->dev, "irq",
GPIOD_IN);
- if (IS_ERR(ts->gpio_int)) {
- ret = PTR_ERR(ts->gpio_int);
- dev_err(&client->dev,
- "failed to request interrupt GPIO: %d\n", ret);
- return ret;
- }
+ error = PTR_ERR_OR_ZERO(ts->gpio_int);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "failed to request interrupt GPIO\n");
} else {
/*
* Deprecated GPIO handling for compatibility
@@ -768,66 +711,45 @@ static int zforce_probe(struct i2c_client *client)
/* INT GPIO */
ts->gpio_int = devm_gpiod_get_index(&client->dev, NULL, 0,
GPIOD_IN);
- if (IS_ERR(ts->gpio_int)) {
- ret = PTR_ERR(ts->gpio_int);
- dev_err(&client->dev,
- "failed to request interrupt GPIO: %d\n", ret);
- return ret;
- }
+
+ error = PTR_ERR_OR_ZERO(ts->gpio_int);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "failed to request interrupt GPIO\n");
/* RST GPIO */
ts->gpio_rst = devm_gpiod_get_index(&client->dev, NULL, 1,
GPIOD_OUT_HIGH);
- if (IS_ERR(ts->gpio_rst)) {
- ret = PTR_ERR(ts->gpio_rst);
- dev_err(&client->dev,
- "failed to request reset GPIO: %d\n", ret);
- return ret;
- }
- }
-
- ts->reg_vdd = devm_regulator_get_optional(&client->dev, "vdd");
- if (IS_ERR(ts->reg_vdd)) {
- ret = PTR_ERR(ts->reg_vdd);
- if (ret == -EPROBE_DEFER)
- return ret;
- } else {
- ret = regulator_enable(ts->reg_vdd);
- if (ret)
- return ret;
-
- /*
- * according to datasheet add 100us grace time after regular
- * regulator enable delay.
- */
- udelay(100);
+ error = PTR_ERR_OR_ZERO(ts->gpio_rst);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "failed to request reset GPIO\n");
}
- ret = devm_add_action(&client->dev, zforce_reset, ts);
- if (ret) {
- dev_err(&client->dev, "failed to register reset action, %d\n",
- ret);
+ error = devm_regulator_get_enable(&client->dev, "vdd");
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "failed to request vdd supply\n");
- /* hereafter the regulator will be disabled by the action */
- if (!IS_ERR(ts->reg_vdd))
- regulator_disable(ts->reg_vdd);
+ /*
+ * According to datasheet add 100us grace time after regular
+ * regulator enable delay.
+ */
+ usleep_range(100, 200);
- return ret;
- }
+ error = devm_add_action_or_reset(&client->dev, zforce_reset, ts);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "failed to register reset action\n");
snprintf(ts->phys, sizeof(ts->phys),
"%s/input0", dev_name(&client->dev));
input_dev = devm_input_allocate_device(&client->dev);
- if (!input_dev) {
- dev_err(&client->dev, "could not allocate input device\n");
- return -ENOMEM;
- }
-
- mutex_init(&ts->access_mutex);
- mutex_init(&ts->command_mutex);
+ if (!input_dev)
+ return dev_err_probe(&client->dev, -ENOMEM,
+ "could not allocate input device\n");
- ts->pdata = pdata;
ts->client = client;
ts->input = input_dev;
@@ -838,28 +760,21 @@ static int zforce_probe(struct i2c_client *client)
input_dev->open = zforce_input_open;
input_dev->close = zforce_input_close;
- __set_bit(EV_KEY, input_dev->evbit);
- __set_bit(EV_SYN, input_dev->evbit);
- __set_bit(EV_ABS, input_dev->evbit);
-
- /* For multi touch */
- input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
- pdata->x_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
- pdata->y_max, 0, 0);
-
+ zforce_ts_parse_legacy_properties(ts);
touchscreen_parse_properties(input_dev, true, &ts->prop);
- if (ts->prop.max_x == 0 || ts->prop.max_y == 0) {
- dev_err(&client->dev, "no size specified\n");
- return -EINVAL;
- }
+ if (ts->prop.max_x == 0 || ts->prop.max_y == 0)
+ return dev_err_probe(&client->dev, -EINVAL, "no size specified");
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
ZFORCE_MAX_AREA, 0, 0);
input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0,
ZFORCE_MAX_AREA, 0, 0);
input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
- input_mt_init_slots(input_dev, ZFORCE_REPORT_POINTS, INPUT_MT_DIRECT);
+
+ error = input_mt_init_slots(input_dev, ZFORCE_REPORT_POINTS,
+ INPUT_MT_DIRECT);
+ if (error)
+ return error;
input_set_drvdata(ts->input, ts);
@@ -872,57 +787,51 @@ static int zforce_probe(struct i2c_client *client)
* Therefore we can trigger the interrupt anytime it is low and do
* not need to limit it to the interrupt edge.
*/
- ret = devm_request_threaded_irq(&client->dev, client->irq,
- zforce_irq, zforce_irq_thread,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- input_dev->name, ts);
- if (ret) {
- dev_err(&client->dev, "irq %d request failed\n", client->irq);
- return ret;
- }
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ zforce_irq, zforce_irq_thread,
+ IRQF_ONESHOT, input_dev->name, ts);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "irq %d request failed\n", client->irq);
i2c_set_clientdata(client, ts);
/* let the controller boot */
- zforce_reset_deassert(ts);
+ gpiod_set_value_cansleep(ts->gpio_rst, 0);
ts->command_waiting = NOTIFICATION_BOOTCOMPLETE;
if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0)
dev_warn(&client->dev, "bootcomplete timed out\n");
/* need to start device to get version information */
- ret = zforce_command_wait(ts, COMMAND_INITIALIZE);
- if (ret) {
- dev_err(&client->dev, "unable to initialize, %d\n", ret);
- return ret;
- }
+ error = zforce_command_wait(ts, COMMAND_INITIALIZE);
+ if (error)
+ return dev_err_probe(&client->dev, error, "unable to initialize\n");
/* this gets the firmware version among other information */
- ret = zforce_command_wait(ts, COMMAND_STATUS);
- if (ret < 0) {
- dev_err(&client->dev, "couldn't get status, %d\n", ret);
+ error = zforce_command_wait(ts, COMMAND_STATUS);
+ if (error) {
+ dev_err_probe(&client->dev, error, "couldn't get status\n");
zforce_stop(ts);
- return ret;
+ return error;
}
/* stop device and put it into sleep until it is opened */
- ret = zforce_stop(ts);
- if (ret < 0)
- return ret;
+ error = zforce_stop(ts);
+ if (error)
+ return error;
device_set_wakeup_capable(&client->dev, true);
- ret = input_register_device(input_dev);
- if (ret) {
- dev_err(&client->dev, "could not register input device, %d\n",
- ret);
- return ret;
- }
+ error = input_register_device(input_dev);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "could not register input device\n");
return 0;
}
-static struct i2c_device_id zforce_idtable[] = {
+static const struct i2c_device_id zforce_idtable[] = {
{ "zforce-ts" },
{ }
};
@@ -941,6 +850,7 @@ static struct i2c_driver zforce_driver = {
.name = "zforce-ts",
.pm = pm_sleep_ptr(&zforce_pm_ops),
.of_match_table = of_match_ptr(zforce_dt_idtable),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = zforce_probe,
.id_table = zforce_idtable,
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 1b4807ba4624..52b3950460e2 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -34,7 +35,13 @@
#define ZINITIX_DEBUG_REG 0x0115 /* 0~7 */
#define ZINITIX_TOUCH_MODE 0x0010
+
#define ZINITIX_CHIP_REVISION 0x0011
+#define ZINITIX_CHIP_BTX0X_MASK 0xF0F0
+#define ZINITIX_CHIP_BT4X2 0x4020
+#define ZINITIX_CHIP_BT4X3 0x4030
+#define ZINITIX_CHIP_BT4X4 0x4040
+
#define ZINITIX_FIRMWARE_VERSION 0x0012
#define ZINITIX_USB_DETECT 0x116
@@ -62,7 +69,11 @@
#define ZINITIX_Y_RESOLUTION 0x00C1
#define ZINITIX_POINT_STATUS_REG 0x0080
-#define ZINITIX_ICON_STATUS_REG 0x00AA
+
+#define ZINITIX_BT4X2_ICON_STATUS_REG 0x009A
+#define ZINITIX_BT4X3_ICON_STATUS_REG 0x00A0
+#define ZINITIX_BT4X4_ICON_STATUS_REG 0x00A0
+#define ZINITIX_BT5XX_ICON_STATUS_REG 0x00AA
#define ZINITIX_POINT_COORD_REG (ZINITIX_POINT_STATUS_REG + 2)
@@ -119,6 +130,7 @@
#define DEFAULT_TOUCH_POINT_MODE 2
#define MAX_SUPPORTED_FINGER_NUM 5
+#define MAX_SUPPORTED_BUTTON_NUM 8
#define CHIP_ON_DELAY 15 // ms
#define FIRMWARE_ON_DELAY 40 // ms
@@ -146,6 +158,13 @@ struct bt541_ts_data {
struct touchscreen_properties prop;
struct regulator_bulk_data supplies[2];
u32 zinitix_mode;
+ u32 keycodes[MAX_SUPPORTED_BUTTON_NUM];
+ int num_keycodes;
+ bool have_versioninfo;
+ u16 chip_revision;
+ u16 firmware_version;
+ u16 regdata_version;
+ u16 icon_status_reg;
};
static int zinitix_read_data(struct i2c_client *client,
@@ -190,11 +209,25 @@ static int zinitix_write_cmd(struct i2c_client *client, u16 reg)
return 0;
}
+static u16 zinitix_get_u16_reg(struct bt541_ts_data *bt541, u16 vreg)
+{
+ struct i2c_client *client = bt541->client;
+ int error;
+ __le16 val;
+
+ error = zinitix_read_data(client, vreg, (void *)&val, 2);
+ if (error)
+ return U8_MAX;
+
+ return le16_to_cpu(val);
+}
+
static int zinitix_init_touch(struct bt541_ts_data *bt541)
{
struct i2c_client *client = bt541->client;
int i;
int error;
+ u16 int_flags;
error = zinitix_write_cmd(client, ZINITIX_SWRESET_CMD);
if (error) {
@@ -202,6 +235,47 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
return error;
}
+ /*
+ * Read and cache the chip revision and firmware version the first time
+ * we get here.
+ */
+ if (!bt541->have_versioninfo) {
+ bt541->chip_revision = zinitix_get_u16_reg(bt541,
+ ZINITIX_CHIP_REVISION);
+ bt541->firmware_version = zinitix_get_u16_reg(bt541,
+ ZINITIX_FIRMWARE_VERSION);
+ bt541->regdata_version = zinitix_get_u16_reg(bt541,
+ ZINITIX_DATA_VERSION_REG);
+ bt541->have_versioninfo = true;
+
+ dev_dbg(&client->dev,
+ "chip revision %04x firmware version %04x regdata version %04x\n",
+ bt541->chip_revision, bt541->firmware_version,
+ bt541->regdata_version);
+
+ /*
+ * Determine the "icon" status register which varies by the
+ * chip.
+ */
+ switch (bt541->chip_revision & ZINITIX_CHIP_BTX0X_MASK) {
+ case ZINITIX_CHIP_BT4X2:
+ bt541->icon_status_reg = ZINITIX_BT4X2_ICON_STATUS_REG;
+ break;
+
+ case ZINITIX_CHIP_BT4X3:
+ bt541->icon_status_reg = ZINITIX_BT4X3_ICON_STATUS_REG;
+ break;
+
+ case ZINITIX_CHIP_BT4X4:
+ bt541->icon_status_reg = ZINITIX_BT4X4_ICON_STATUS_REG;
+ break;
+
+ default:
+ bt541->icon_status_reg = ZINITIX_BT5XX_ICON_STATUS_REG;
+ break;
+ }
+ }
+
error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG, 0x0);
if (error) {
dev_err(&client->dev,
@@ -225,6 +299,11 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
if (error)
return error;
+ error = zinitix_write_u16(client, ZINITIX_BUTTON_SUPPORTED_NUM,
+ bt541->num_keycodes);
+ if (error)
+ return error;
+
error = zinitix_write_u16(client, ZINITIX_INITIAL_TOUCH_MODE,
bt541->zinitix_mode);
if (error)
@@ -235,9 +314,11 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
if (error)
return error;
- error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG,
- BIT_PT_CNT_CHANGE | BIT_DOWN | BIT_MOVE |
- BIT_UP);
+ int_flags = BIT_PT_CNT_CHANGE | BIT_DOWN | BIT_MOVE | BIT_UP;
+ if (bt541->num_keycodes)
+ int_flags |= BIT_ICON_EVENT;
+
+ error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG, int_flags);
if (error)
return error;
@@ -350,12 +431,22 @@ static void zinitix_report_finger(struct bt541_ts_data *bt541, int slot,
}
}
+static void zinitix_report_keys(struct bt541_ts_data *bt541, u16 icon_events)
+{
+ int i;
+
+ for (i = 0; i < bt541->num_keycodes; i++)
+ input_report_key(bt541->input_dev,
+ bt541->keycodes[i], icon_events & BIT(i));
+}
+
static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
{
struct bt541_ts_data *bt541 = bt541_handler;
struct i2c_client *client = bt541->client;
struct touch_event touch_event;
unsigned long finger_mask;
+ __le16 icon_events;
int error;
int i;
@@ -368,6 +459,17 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
goto out;
}
+ if (le16_to_cpu(touch_event.status) & BIT_ICON_EVENT) {
+ error = zinitix_read_data(bt541->client, bt541->icon_status_reg,
+ &icon_events, sizeof(icon_events));
+ if (error) {
+ dev_err(&client->dev, "Failed to read icon events\n");
+ goto out;
+ }
+
+ zinitix_report_keys(bt541, le16_to_cpu(icon_events));
+ }
+
finger_mask = touch_event.finger_mask;
for_each_set_bit(i, &finger_mask, MAX_SUPPORTED_FINGER_NUM) {
const struct point_coord *p = &touch_event.point_coord[i];
@@ -453,6 +555,7 @@ static int zinitix_init_input_dev(struct bt541_ts_data *bt541)
{
struct input_dev *input_dev;
int error;
+ int i;
input_dev = devm_input_allocate_device(&bt541->client->dev);
if (!input_dev) {
@@ -470,6 +573,14 @@ static int zinitix_init_input_dev(struct bt541_ts_data *bt541)
input_dev->open = zinitix_input_open;
input_dev->close = zinitix_input_close;
+ if (bt541->num_keycodes) {
+ input_dev->keycode = bt541->keycodes;
+ input_dev->keycodemax = bt541->num_keycodes;
+ input_dev->keycodesize = sizeof(bt541->keycodes[0]);
+ for (i = 0; i < bt541->num_keycodes; i++)
+ input_set_capability(input_dev, EV_KEY, bt541->keycodes[i]);
+ }
+
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
@@ -534,6 +645,21 @@ static int zinitix_ts_probe(struct i2c_client *client)
return error;
}
+ bt541->num_keycodes = device_property_count_u32(&client->dev, "linux,keycodes");
+ if (bt541->num_keycodes > ARRAY_SIZE(bt541->keycodes)) {
+ dev_err(&client->dev, "too many keys defined (%d)\n", bt541->num_keycodes);
+ return -EINVAL;
+ }
+
+ error = device_property_read_u32_array(&client->dev, "linux,keycodes",
+ bt541->keycodes,
+ bt541->num_keycodes);
+ if (error) {
+ dev_err(&client->dev,
+ "Unable to parse \"linux,keycodes\" property: %d\n", error);
+ return error;
+ }
+
error = zinitix_init_input_dev(bt541);
if (error) {
dev_err(&client->dev,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index a82f10054aec..b3aa1f5d5321 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -151,7 +151,7 @@ config OF_IOMMU
# IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA
def_bool ARM64 || X86 || S390
- select DMA_OPS
+ select DMA_OPS_HELPERS
select IOMMU_API
select IOMMU_IOVA
select IRQ_MSI_IOMMU
@@ -424,6 +424,17 @@ config ARM_SMMU_V3_KUNIT_TEST
Enable this option to unit-test arm-smmu-v3 driver functions.
If unsure, say N.
+
+config TEGRA241_CMDQV
+ bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3"
+ depends on ACPI
+ help
+ Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The
+ CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues
+ support, except with virtualization capabilities.
+
+ Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same
+ CMDQ-V extension.
endif
config S390_IOMMU
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 2d5945c982bd..6386fa4556d9 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -43,9 +43,10 @@ int amd_iommu_enable_faulting(unsigned int cpu);
extern int amd_iommu_guest_ir;
extern enum io_pgtable_fmt amd_iommu_pgtable;
extern int amd_iommu_gpt_level;
+extern unsigned long amd_iommu_pgsize_bitmap;
/* Protection domain ops */
-struct protection_domain *protection_domain_alloc(unsigned int type);
+struct protection_domain *protection_domain_alloc(unsigned int type, int nid);
void protection_domain_free(struct protection_domain *domain);
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct mm_struct *mm);
@@ -87,14 +88,10 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag);
void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
void amd_iommu_domain_update(struct protection_domain *domain);
-void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set);
-void amd_iommu_domain_flush_complete(struct protection_domain *domain);
void amd_iommu_domain_flush_pages(struct protection_domain *domain,
u64 address, size_t size);
void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
ioasid_t pasid, u64 address, size_t size);
-void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
- ioasid_t pasid);
#ifdef CONFIG_IRQ_REMAP
int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
@@ -121,11 +118,6 @@ static inline bool check_feature2(u64 mask)
return (amd_iommu_efr2 & mask);
}
-static inline int check_feature_gpt_level(void)
-{
- return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
-}
-
static inline bool amd_iommu_gt_ppr_supported(void)
{
return (check_feature(FEATURE_GT) &&
@@ -143,19 +135,6 @@ static inline void *iommu_phys_to_virt(unsigned long paddr)
return phys_to_virt(__sme_clr(paddr));
}
-static inline
-void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
-{
- domain->iop.root = (u64 *)(root & PAGE_MASK);
- domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */
-}
-
-static inline
-void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
-{
- amd_iommu_domain_set_pt_root(domain, 0);
-}
-
static inline int get_pci_sbdf_id(struct pci_dev *pdev)
{
int seg = pci_domain_nr(pdev->bus);
@@ -185,7 +164,6 @@ static inline struct protection_domain *to_pdomain(struct iommu_domain *dom)
}
bool translation_pre_enabled(struct amd_iommu *iommu);
-bool amd_iommu_is_attach_deferred(struct device *dev);
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line);
#ifdef CONFIG_DMI
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 2b76b5dedc1d..601fb4ee6900 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -8,6 +8,7 @@
#ifndef _ASM_X86_AMD_IOMMU_TYPES_H
#define _ASM_X86_AMD_IOMMU_TYPES_H
+#include <linux/bitfield.h>
#include <linux/iommu.h>
#include <linux/types.h>
#include <linux/mmu_notifier.h>
@@ -95,26 +96,21 @@
#define FEATURE_GA BIT_ULL(7)
#define FEATURE_HE BIT_ULL(8)
#define FEATURE_PC BIT_ULL(9)
-#define FEATURE_GATS_SHIFT (12)
-#define FEATURE_GATS_MASK (3ULL)
+#define FEATURE_GATS GENMASK_ULL(13, 12)
+#define FEATURE_GLX GENMASK_ULL(15, 14)
#define FEATURE_GAM_VAPIC BIT_ULL(21)
+#define FEATURE_PASMAX GENMASK_ULL(36, 32)
#define FEATURE_GIOSUP BIT_ULL(48)
#define FEATURE_HASUP BIT_ULL(49)
#define FEATURE_EPHSUP BIT_ULL(50)
#define FEATURE_HDSUP BIT_ULL(52)
#define FEATURE_SNP BIT_ULL(63)
-#define FEATURE_PASID_SHIFT 32
-#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
-
-#define FEATURE_GLXVAL_SHIFT 14
-#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
/* Extended Feature 2 Bits */
-#define FEATURE_SNPAVICSUP_SHIFT 5
-#define FEATURE_SNPAVICSUP_MASK (0x07ULL << FEATURE_SNPAVICSUP_SHIFT)
+#define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5)
#define FEATURE_SNPAVICSUP_GAM(x) \
- ((x & FEATURE_SNPAVICSUP_MASK) >> FEATURE_SNPAVICSUP_SHIFT == 0x1)
+ (FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1)
/* Note:
* The current driver only support 16-bit PASID.
@@ -294,8 +290,13 @@
* that we support.
*
* 512GB Pages are not supported due to a hardware bug
+ * Page sizes >= the 52 bit max physical address of the CPU are not supported.
*/
-#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
+#define AMD_IOMMU_PGSIZES (GENMASK_ULL(51, 12) ^ SZ_512G)
+
+/* Special mode where page-sizes are limited to 4 KiB */
+#define AMD_IOMMU_PGSIZES_4K (PAGE_SIZE)
+
/* 4K, 2MB, 1G page sizes are supported */
#define AMD_IOMMU_PGSIZES_V2 (PAGE_SIZE | (1ULL << 21) | (1ULL << 30))
@@ -419,10 +420,6 @@
#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL)
-#define DTE_GCR3_INDEX_A 0
-#define DTE_GCR3_INDEX_B 1
-#define DTE_GCR3_INDEX_C 1
-
#define DTE_GCR3_SHIFT_A 58
#define DTE_GCR3_SHIFT_B 16
#define DTE_GCR3_SHIFT_C 43
@@ -527,7 +524,7 @@ struct amd_irte_ops;
#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0)
#define io_pgtable_to_data(x) \
- container_of((x), struct amd_io_pgtable, iop)
+ container_of((x), struct amd_io_pgtable, pgtbl)
#define io_pgtable_ops_to_data(x) \
io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
@@ -537,7 +534,7 @@ struct amd_irte_ops;
struct protection_domain, iop)
#define io_pgtable_cfg_to_data(x) \
- container_of((x), struct amd_io_pgtable, pgtbl_cfg)
+ container_of((x), struct amd_io_pgtable, pgtbl.cfg)
struct gcr3_tbl_info {
u64 *gcr3_tbl; /* Guest CR3 table */
@@ -547,8 +544,7 @@ struct gcr3_tbl_info {
};
struct amd_io_pgtable {
- struct io_pgtable_cfg pgtbl_cfg;
- struct io_pgtable iop;
+ struct io_pgtable pgtbl;
int mode;
u64 *root;
u64 *pgd; /* v2 pgtable pgd pointer */
@@ -580,7 +576,6 @@ struct protection_domain {
struct amd_io_pgtable iop;
spinlock_t lock; /* mostly used to lock the page table*/
u16 id; /* the domain id written to the device table */
- int nid; /* Node ID */
enum protection_domain_mode pd_mode; /* Track page table type */
bool dirty_tracking; /* dirty tracking is enabled in the domain */
unsigned dev_cnt; /* devices assigned to this domain */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index c89d85b54a1a..43131c3a2172 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -192,6 +192,8 @@ bool amdr_ivrs_remap_support __read_mostly;
bool amd_iommu_force_isolation __read_mostly;
+unsigned long amd_iommu_pgsize_bitmap __ro_after_init = AMD_IOMMU_PGSIZES;
+
/*
* AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
* to know which ones are already in use.
@@ -2042,14 +2044,12 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
int glxval;
u64 pasmax;
- pasmax = amd_iommu_efr & FEATURE_PASID_MASK;
- pasmax >>= FEATURE_PASID_SHIFT;
+ pasmax = FIELD_GET(FEATURE_PASMAX, amd_iommu_efr);
iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1;
BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK);
- glxval = amd_iommu_efr & FEATURE_GLXVAL_MASK;
- glxval >>= FEATURE_GLXVAL_SHIFT;
+ glxval = FIELD_GET(FEATURE_GLX, amd_iommu_efr);
if (amd_iommu_max_glx_val == -1)
amd_iommu_max_glx_val = glxval;
@@ -3088,7 +3088,7 @@ static int __init early_amd_iommu_init(void)
/* 5 level guest page table */
if (cpu_feature_enabled(X86_FEATURE_LA57) &&
- check_feature_gpt_level() == GUEST_PGTABLE_5_LEVEL)
+ FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL)
amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
/* Disable any previously enabled IOMMUs */
@@ -3494,6 +3494,12 @@ static int __init parse_amd_iommu_options(char *str)
amd_iommu_pgtable = AMD_IOMMU_V2;
} else if (strncmp(str, "irtcachedis", 11) == 0) {
amd_iommu_irtcachedis = true;
+ } else if (strncmp(str, "nohugepages", 11) == 0) {
+ pr_info("Restricting V1 page-sizes to 4KiB");
+ amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_4K;
+ } else if (strncmp(str, "v2_pgsizes_only", 15) == 0) {
+ pr_info("Restricting V1 page-sizes to 4KiB/2MiB/1GiB");
+ amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
} else {
pr_notice("Unknown option - '%s'\n", str);
}
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 1074ee25064d..804b788f3f16 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -24,27 +24,6 @@
#include "amd_iommu.h"
#include "../iommu-pages.h"
-static void v1_tlb_flush_all(void *cookie)
-{
-}
-
-static void v1_tlb_flush_walk(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
-}
-
-static void v1_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule,
- void *cookie)
-{
-}
-
-static const struct iommu_flush_ops v1_flush_ops = {
- .tlb_flush_all = v1_tlb_flush_all,
- .tlb_flush_walk = v1_tlb_flush_walk,
- .tlb_add_page = v1_tlb_add_page,
-};
-
/*
* Helper function to get the first pte of a large mapping
*/
@@ -132,56 +111,40 @@ static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
}
}
-void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
- u64 *root, int mode)
-{
- u64 pt_root;
-
- /* lowest 3 bits encode pgtable mode */
- pt_root = mode & 7;
- pt_root |= (u64)root;
-
- amd_iommu_domain_set_pt_root(domain, pt_root);
-}
-
/*
* This function is used to add another level to an IO page table. Adding
* another level increases the size of the address space by 9 bits to a size up
* to 64 bits.
*/
-static bool increase_address_space(struct protection_domain *domain,
+static bool increase_address_space(struct amd_io_pgtable *pgtable,
unsigned long address,
gfp_t gfp)
{
+ struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
+ struct protection_domain *domain =
+ container_of(pgtable, struct protection_domain, iop);
unsigned long flags;
bool ret = true;
u64 *pte;
- pte = iommu_alloc_page_node(domain->nid, gfp);
+ pte = iommu_alloc_page_node(cfg->amd.nid, gfp);
if (!pte)
return false;
spin_lock_irqsave(&domain->lock, flags);
- if (address <= PM_LEVEL_SIZE(domain->iop.mode))
+ if (address <= PM_LEVEL_SIZE(pgtable->mode))
goto out;
ret = false;
- if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL))
+ if (WARN_ON_ONCE(pgtable->mode == PAGE_MODE_6_LEVEL))
goto out;
- *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
+ *pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
- domain->iop.root = pte;
- domain->iop.mode += 1;
+ pgtable->root = pte;
+ pgtable->mode += 1;
amd_iommu_update_and_flush_device_table(domain);
- amd_iommu_domain_flush_complete(domain);
-
- /*
- * Device Table needs to be updated and flushed before the new root can
- * be published.
- */
- amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode);
pte = NULL;
ret = true;
@@ -193,30 +156,31 @@ out:
return ret;
}
-static u64 *alloc_pte(struct protection_domain *domain,
+static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
unsigned long address,
unsigned long page_size,
u64 **pte_page,
gfp_t gfp,
bool *updated)
{
+ struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
int level, end_lvl;
u64 *pte, *page;
BUG_ON(!is_power_of_2(page_size));
- while (address > PM_LEVEL_SIZE(domain->iop.mode)) {
+ while (address > PM_LEVEL_SIZE(pgtable->mode)) {
/*
* Return an error if there is no memory to update the
* page-table.
*/
- if (!increase_address_space(domain, address, gfp))
+ if (!increase_address_space(pgtable, address, gfp))
return NULL;
}
- level = domain->iop.mode - 1;
- pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size);
@@ -251,7 +215,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
if (!IOMMU_PTE_PRESENT(__pte) ||
pte_level == PAGE_MODE_NONE) {
- page = iommu_alloc_page_node(domain->nid, gfp);
+ page = iommu_alloc_page_node(cfg->amd.nid, gfp);
if (!page)
return NULL;
@@ -365,7 +329,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
{
- struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
LIST_HEAD(freelist);
bool updated = false;
u64 __pte, *pte;
@@ -382,7 +346,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
while (pgcount > 0) {
count = PAGE_SIZE_PTE_COUNT(pgsize);
- pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
+ pte = alloc_pte(pgtable, iova, pgsize, NULL, gfp, &updated);
ret = -ENOMEM;
if (!pte)
@@ -419,6 +383,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
out:
if (updated) {
+ struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
@@ -560,27 +525,17 @@ static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops,
*/
static void v1_free_pgtable(struct io_pgtable *iop)
{
- struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
- struct protection_domain *dom;
+ struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
LIST_HEAD(freelist);
if (pgtable->mode == PAGE_MODE_NONE)
return;
- dom = container_of(pgtable, struct protection_domain, iop);
-
/* Page-table is not visible to IOMMU anymore, so free it */
BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
pgtable->mode > PAGE_MODE_6_LEVEL);
free_sub_pt(pgtable->root, pgtable->mode, &freelist);
-
- /* Update data structure */
- amd_iommu_domain_clr_pt_root(dom);
-
- /* Make changes visible to IOMMUs */
- amd_iommu_domain_update(dom);
-
iommu_put_pages_list(&freelist);
}
@@ -588,17 +543,21 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
- cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES;
+ pgtable->root = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
+ if (!pgtable->root)
+ return NULL;
+ pgtable->mode = PAGE_MODE_3_LEVEL;
+
+ cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap;
cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE;
- cfg->tlb = &v1_flush_ops;
- pgtable->iop.ops.map_pages = iommu_v1_map_pages;
- pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
- pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
- pgtable->iop.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty;
+ pgtable->pgtbl.ops.map_pages = iommu_v1_map_pages;
+ pgtable->pgtbl.ops.unmap_pages = iommu_v1_unmap_pages;
+ pgtable->pgtbl.ops.iova_to_phys = iommu_v1_iova_to_phys;
+ pgtable->pgtbl.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty;
- return &pgtable->iop;
+ return &pgtable->pgtbl;
}
struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index 664e91c88748..25b9042fa453 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -51,7 +51,7 @@ static inline u64 set_pgtable_attr(u64 *page)
u64 prot;
prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
- prot |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
+ prot |= IOMMU_PAGE_ACCESS;
return (iommu_virt_to_phys(page) | prot);
}
@@ -233,8 +233,8 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
{
- struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
- struct io_pgtable_cfg *cfg = &pdom->iop.iop.cfg;
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
u64 *pte;
unsigned long map_size;
unsigned long mapped_size = 0;
@@ -251,7 +251,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
while (mapped_size < size) {
map_size = get_alloc_page_size(pgsize);
- pte = v2_alloc_pte(pdom->nid, pdom->iop.pgd,
+ pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd,
iova, map_size, gfp, &updated);
if (!pte) {
ret = -EINVAL;
@@ -266,8 +266,11 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
}
out:
- if (updated)
+ if (updated) {
+ struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
+
amd_iommu_domain_flush_pages(pdom, o_iova, size);
+ }
if (mapped)
*mapped += mapped_size;
@@ -281,7 +284,7 @@ static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
struct iommu_iotlb_gather *gather)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
- struct io_pgtable_cfg *cfg = &pgtable->iop.cfg;
+ struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
unsigned long unmap_size;
unsigned long unmapped = 0;
size_t size = pgcount << __ffs(pgsize);
@@ -323,30 +326,9 @@ static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned lo
/*
* ----------------------------------------------------
*/
-static void v2_tlb_flush_all(void *cookie)
-{
-}
-
-static void v2_tlb_flush_walk(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
-}
-
-static void v2_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule,
- void *cookie)
-{
-}
-
-static const struct iommu_flush_ops v2_flush_ops = {
- .tlb_flush_all = v2_tlb_flush_all,
- .tlb_flush_walk = v2_tlb_flush_walk,
- .tlb_add_page = v2_tlb_add_page,
-};
-
static void v2_free_pgtable(struct io_pgtable *iop)
{
- struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
+ struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
if (!pgtable || !pgtable->pgd)
return;
@@ -359,26 +341,24 @@ static void v2_free_pgtable(struct io_pgtable *iop)
static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
- struct protection_domain *pdom = (struct protection_domain *)cookie;
int ias = IOMMU_IN_ADDR_BIT_SIZE;
- pgtable->pgd = iommu_alloc_page_node(pdom->nid, GFP_ATOMIC);
+ pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
if (!pgtable->pgd)
return NULL;
if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
ias = 57;
- pgtable->iop.ops.map_pages = iommu_v2_map_pages;
- pgtable->iop.ops.unmap_pages = iommu_v2_unmap_pages;
- pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys;
+ pgtable->pgtbl.ops.map_pages = iommu_v2_map_pages;
+ pgtable->pgtbl.ops.unmap_pages = iommu_v2_unmap_pages;
+ pgtable->pgtbl.ops.iova_to_phys = iommu_v2_iova_to_phys;
- cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2,
- cfg->ias = ias,
- cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
- cfg->tlb = &v2_flush_ops;
+ cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
+ cfg->ias = ias;
+ cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE;
- return &pgtable->iop;
+ return &pgtable->pgtbl;
}
struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index b19e8c0f48fa..8364cd6fa47d 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -52,8 +52,6 @@
#define HT_RANGE_START (0xfd00000000ULL)
#define HT_RANGE_END (0xffffffffffULL)
-#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL
-
static DEFINE_SPINLOCK(pd_bitmap_lock);
LIST_HEAD(ioapic_map);
@@ -825,10 +823,12 @@ static void iommu_poll_events(struct amd_iommu *iommu)
while (head != tail) {
iommu_print_event(iommu, iommu->evt_buf + head);
+
+ /* Update head pointer of hardware ring-buffer */
head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
+ writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
}
- writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
}
#ifdef CONFIG_IRQ_REMAP
@@ -1247,6 +1247,22 @@ out_unlock:
return ret;
}
+static void domain_flush_complete(struct protection_domain *domain)
+{
+ int i;
+
+ for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
+ if (domain && !domain->dev_iommu[i])
+ continue;
+
+ /*
+ * Devices of this domain are behind this IOMMU
+ * We need to wait for completion of all commands.
+ */
+ iommu_completion_wait(amd_iommus[i]);
+ }
+}
+
static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
{
struct iommu_cmd cmd;
@@ -1483,7 +1499,7 @@ void amd_iommu_domain_flush_pages(struct protection_domain *domain,
__domain_flush_pages(domain, address, size);
/* Wait until IOMMU TLB and all device IOTLB flushes are complete */
- amd_iommu_domain_flush_complete(domain);
+ domain_flush_complete(domain);
return;
}
@@ -1523,7 +1539,7 @@ void amd_iommu_domain_flush_pages(struct protection_domain *domain,
}
/* Wait until IOMMU TLB and all device IOTLB flushes are complete */
- amd_iommu_domain_flush_complete(domain);
+ domain_flush_complete(domain);
}
/* Flush the whole IO/TLB for a given protection domain - including PDE */
@@ -1549,27 +1565,11 @@ void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
iommu_completion_wait(iommu);
}
-void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
- ioasid_t pasid)
+static void dev_flush_pasid_all(struct iommu_dev_data *dev_data,
+ ioasid_t pasid)
{
- amd_iommu_dev_flush_pasid_pages(dev_data, 0,
- CMD_INV_IOMMU_ALL_PAGES_ADDRESS, pasid);
-}
-
-void amd_iommu_domain_flush_complete(struct protection_domain *domain)
-{
- int i;
-
- for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (domain && !domain->dev_iommu[i])
- continue;
-
- /*
- * Devices of this domain are behind this IOMMU
- * We need to wait for completion of all commands.
- */
- iommu_completion_wait(amd_iommus[i]);
- }
+ amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0,
+ CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
/* Flush the not present cache if it exists */
@@ -1589,15 +1589,7 @@ static void domain_flush_np_cache(struct protection_domain *domain,
/*
* This function flushes the DTEs for all devices in domain
*/
-static void domain_flush_devices(struct protection_domain *domain)
-{
- struct iommu_dev_data *dev_data;
-
- list_for_each_entry(dev_data, &domain->dev_list, list)
- device_flush_dte(dev_data);
-}
-
-static void update_device_table(struct protection_domain *domain)
+void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
@@ -1607,12 +1599,11 @@ static void update_device_table(struct protection_domain *domain)
set_dte_entry(iommu, dev_data);
clone_aliases(iommu, dev_data->dev);
}
-}
-void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
-{
- update_device_table(domain);
- domain_flush_devices(domain);
+ list_for_each_entry(dev_data, &domain->dev_list, list)
+ device_flush_dte(dev_data);
+
+ domain_flush_complete(domain);
}
void amd_iommu_domain_update(struct protection_domain *domain)
@@ -1816,7 +1807,7 @@ static int update_gcr3(struct iommu_dev_data *dev_data,
else
*pte = 0;
- amd_iommu_dev_flush_pasid_all(dev_data, pasid);
+ dev_flush_pasid_all(dev_data, pasid);
return 0;
}
@@ -1962,7 +1953,7 @@ static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
}
/* Update and flush DTE for the given device */
-void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set)
+static void dev_update_dte(struct iommu_dev_data *dev_data, bool set)
{
struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
@@ -2032,6 +2023,7 @@ static int do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ struct io_pgtable_cfg *cfg = &domain->iop.pgtbl.cfg;
int ret = 0;
/* Update data structures */
@@ -2039,8 +2031,8 @@ static int do_attach(struct iommu_dev_data *dev_data,
list_add(&dev_data->list, &domain->dev_list);
/* Update NUMA Node ID */
- if (domain->nid == NUMA_NO_NODE)
- domain->nid = dev_to_node(dev_data->dev);
+ if (cfg->amd.nid == NUMA_NO_NODE)
+ cfg->amd.nid = dev_to_node(dev_data->dev);
/* Do reference counting */
domain->dev_iommu[iommu->index] += 1;
@@ -2062,7 +2054,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
/* Clear DTE and flush the entry */
- amd_iommu_dev_update_dte(dev_data, false);
+ dev_update_dte(dev_data, false);
/* Flush IOTLB and wait for the flushes to finish */
amd_iommu_domain_flush_all(domain);
@@ -2185,11 +2177,12 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
iommu_dev = ERR_PTR(ret);
iommu_ignore_device(iommu, dev);
- } else {
- amd_iommu_set_pci_msi_domain(dev, iommu);
- iommu_dev = &iommu->iommu;
+ goto out_err;
}
+ amd_iommu_set_pci_msi_domain(dev, iommu);
+ iommu_dev = &iommu->iommu;
+
/*
* If IOMMU and device supports PASID then it will contain max
* supported PASIDs, else it will be zero.
@@ -2201,8 +2194,12 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
pci_max_pasids(to_pci_dev(dev)));
}
+out_err:
iommu_completion_wait(iommu);
+ if (dev_is_pci(dev))
+ pci_prepare_ats(to_pci_dev(dev), PAGE_SHIFT);
+
return iommu_dev;
}
@@ -2259,53 +2256,18 @@ static void cleanup_domain(struct protection_domain *domain)
void protection_domain_free(struct protection_domain *domain)
{
- if (!domain)
- return;
-
- if (domain->iop.pgtbl_cfg.tlb)
- free_io_pgtable_ops(&domain->iop.iop.ops);
-
- if (domain->iop.root)
- iommu_free_page(domain->iop.root);
-
- if (domain->id)
- domain_id_free(domain->id);
-
+ WARN_ON(!list_empty(&domain->dev_list));
+ if (domain->domain.type & __IOMMU_DOMAIN_PAGING)
+ free_io_pgtable_ops(&domain->iop.pgtbl.ops);
+ domain_id_free(domain->id);
kfree(domain);
}
-static int protection_domain_init_v1(struct protection_domain *domain, int mode)
-{
- u64 *pt_root = NULL;
-
- BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
-
- if (mode != PAGE_MODE_NONE) {
- pt_root = iommu_alloc_page(GFP_KERNEL);
- if (!pt_root)
- return -ENOMEM;
- }
-
- domain->pd_mode = PD_MODE_V1;
- amd_iommu_domain_set_pgtable(domain, pt_root, mode);
-
- return 0;
-}
-
-static int protection_domain_init_v2(struct protection_domain *pdom)
-{
- pdom->pd_mode = PD_MODE_V2;
- pdom->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
-
- return 0;
-}
-
-struct protection_domain *protection_domain_alloc(unsigned int type)
+struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
{
struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
int pgtable;
- int ret;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
@@ -2313,12 +2275,12 @@ struct protection_domain *protection_domain_alloc(unsigned int type)
domain->id = domain_id_alloc();
if (!domain->id)
- goto out_err;
+ goto err_free;
spin_lock_init(&domain->lock);
INIT_LIST_HEAD(&domain->dev_list);
INIT_LIST_HEAD(&domain->dev_data_list);
- domain->nid = NUMA_NO_NODE;
+ domain->iop.pgtbl.cfg.amd.nid = nid;
switch (type) {
/* No need to allocate io pgtable ops in passthrough mode */
@@ -2336,31 +2298,30 @@ struct protection_domain *protection_domain_alloc(unsigned int type)
pgtable = AMD_IOMMU_V1;
break;
default:
- goto out_err;
+ goto err_id;
}
switch (pgtable) {
case AMD_IOMMU_V1:
- ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL);
+ domain->pd_mode = PD_MODE_V1;
break;
case AMD_IOMMU_V2:
- ret = protection_domain_init_v2(domain);
+ domain->pd_mode = PD_MODE_V2;
break;
default:
- ret = -EINVAL;
- break;
+ goto err_id;
}
- if (ret)
- goto out_err;
-
- pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
+ pgtbl_ops =
+ alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain);
if (!pgtbl_ops)
- goto out_err;
+ goto err_id;
return domain;
-out_err:
- protection_domain_free(domain);
+err_id:
+ domain_id_free(domain->id);
+err_free:
+ kfree(domain);
return NULL;
}
@@ -2398,17 +2359,18 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
if (dirty_tracking && !amd_iommu_hd_support(iommu))
return ERR_PTR(-EOPNOTSUPP);
- domain = protection_domain_alloc(type);
+ domain = protection_domain_alloc(type,
+ dev ? dev_to_node(dev) : NUMA_NO_NODE);
if (!domain)
return ERR_PTR(-ENOMEM);
domain->domain.geometry.aperture_start = 0;
domain->domain.geometry.aperture_end = dma_max_address();
domain->domain.geometry.force_aperture = true;
+ domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap;
if (iommu) {
domain->domain.type = type;
- domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap;
domain->domain.ops = iommu->iommu.ops->default_domain_ops;
if (dirty_tracking)
@@ -2448,9 +2410,6 @@ void amd_iommu_domain_free(struct iommu_domain *dom)
struct protection_domain *domain;
unsigned long flags;
- if (!dom)
- return;
-
domain = to_pdomain(dom);
spin_lock_irqsave(&domain->lock, flags);
@@ -2462,6 +2421,29 @@ void amd_iommu_domain_free(struct iommu_domain *dom)
protection_domain_free(domain);
}
+static int blocked_domain_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+
+ if (dev_data->domain)
+ detach_device(dev);
+
+ /* Clear DTE and flush the entry */
+ spin_lock(&dev_data->lock);
+ dev_update_dte(dev_data, false);
+ spin_unlock(&dev_data->lock);
+
+ return 0;
+}
+
+static struct iommu_domain blocked_domain = {
+ .type = IOMMU_DOMAIN_BLOCKED,
+ .ops = &(const struct iommu_domain_ops) {
+ .attach_dev = blocked_domain_attach_device,
+ }
+};
+
static int amd_iommu_attach_device(struct iommu_domain *dom,
struct device *dev)
{
@@ -2517,7 +2499,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}
/* Update device table */
- amd_iommu_dev_update_dte(dev_data, true);
+ dev_update_dte(dev_data, true);
return ret;
}
@@ -2526,7 +2508,7 @@ static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
unsigned long iova, size_t size)
{
struct protection_domain *domain = to_pdomain(dom);
- struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+ struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
if (ops->map_pages)
domain_flush_np_cache(domain, iova, size);
@@ -2538,7 +2520,7 @@ static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
int iommu_prot, gfp_t gfp, size_t *mapped)
{
struct protection_domain *domain = to_pdomain(dom);
- struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+ struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
int prot = 0;
int ret = -EINVAL;
@@ -2585,7 +2567,7 @@ static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
- struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+ struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
size_t r;
if ((domain->pd_mode == PD_MODE_V1) &&
@@ -2604,7 +2586,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
dma_addr_t iova)
{
struct protection_domain *domain = to_pdomain(dom);
- struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+ struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
return ops->iova_to_phys(ops, iova);
}
@@ -2682,7 +2664,7 @@ static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain,
struct iommu_dirty_bitmap *dirty)
{
struct protection_domain *pdomain = to_pdomain(domain);
- struct io_pgtable_ops *ops = &pdomain->iop.iop.ops;
+ struct io_pgtable_ops *ops = &pdomain->iop.pgtbl.ops;
unsigned long lflags;
if (!ops || !ops->read_and_clear_dirty)
@@ -2757,7 +2739,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, head);
}
-bool amd_iommu_is_attach_deferred(struct device *dev)
+static bool amd_iommu_is_attach_deferred(struct device *dev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
@@ -2859,6 +2841,7 @@ static int amd_iommu_dev_disable_feature(struct device *dev,
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
+ .blocked_domain = &blocked_domain,
.domain_alloc = amd_iommu_domain_alloc,
.domain_alloc_user = amd_iommu_domain_alloc_user,
.domain_alloc_sva = amd_iommu_domain_alloc_sva,
@@ -2867,7 +2850,6 @@ const struct iommu_ops amd_iommu_ops = {
.device_group = amd_iommu_device_group,
.get_resv_regions = amd_iommu_get_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
- .pgsize_bitmap = AMD_IOMMU_PGSIZES,
.def_domain_type = amd_iommu_def_domain_type,
.dev_enable_feat = amd_iommu_dev_enable_feature,
.dev_disable_feat = amd_iommu_dev_disable_feature,
diff --git a/drivers/iommu/amd/pasid.c b/drivers/iommu/amd/pasid.c
index a68215f2b3e1..0657b9373be5 100644
--- a/drivers/iommu/amd/pasid.c
+++ b/drivers/iommu/amd/pasid.c
@@ -181,7 +181,7 @@ struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct protection_domain *pdom;
int ret;
- pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA);
+ pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA, dev_to_node(dev));
if (!pdom)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile
index 355173d1441d..dc98c88b48c8 100644
--- a/drivers/iommu/arm/arm-smmu-v3/Makefile
+++ b/drivers/iommu/arm/arm-smmu-v3/Makefile
@@ -2,5 +2,6 @@
obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o
arm_smmu_v3-y := arm-smmu-v3.o
arm_smmu_v3-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
+arm_smmu_v3-$(CONFIG_TEGRA241_CMDQV) += tegra241-cmdqv.o
obj-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
index cceb737a7001..84baa021370a 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
@@ -30,6 +30,11 @@ static struct mm_struct sva_mm = {
.pgd = (void *)0xdaedbeefdeadbeefULL,
};
+enum arm_smmu_test_master_feat {
+ ARM_SMMU_MASTER_TEST_ATS = BIT(0),
+ ARM_SMMU_MASTER_TEST_STALL = BIT(1),
+};
+
static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry,
const __le64 *used_bits,
const __le64 *target,
@@ -164,16 +169,22 @@ static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0;
static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste,
unsigned int s1dss,
- const dma_addr_t dma_addr)
+ const dma_addr_t dma_addr,
+ enum arm_smmu_test_master_feat feat)
{
+ bool ats_enabled = feat & ARM_SMMU_MASTER_TEST_ATS;
+ bool stall_enabled = feat & ARM_SMMU_MASTER_TEST_STALL;
+
struct arm_smmu_master master = {
+ .ats_enabled = ats_enabled,
.cd_table.cdtab_dma = dma_addr,
.cd_table.s1cdmax = 0xFF,
.cd_table.s1fmt = STRTAB_STE_0_S1FMT_64K_L2,
.smmu = &smmu,
+ .stall_enabled = stall_enabled,
};
- arm_smmu_make_cdtable_ste(ste, &master, true, s1dss);
+ arm_smmu_make_cdtable_ste(ste, &master, ats_enabled, s1dss);
}
static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test)
@@ -204,7 +215,7 @@ static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test)
struct arm_smmu_ste ste;
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
- fake_cdtab_dma_addr);
+ fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
NUM_EXPECTED_SYNCS(2));
}
@@ -214,7 +225,7 @@ static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test)
struct arm_smmu_ste ste;
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
- fake_cdtab_dma_addr);
+ fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
NUM_EXPECTED_SYNCS(2));
}
@@ -224,7 +235,7 @@ static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test)
struct arm_smmu_ste ste;
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
- fake_cdtab_dma_addr);
+ fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
NUM_EXPECTED_SYNCS(3));
}
@@ -234,7 +245,7 @@ static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test)
struct arm_smmu_ste ste;
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
- fake_cdtab_dma_addr);
+ fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
NUM_EXPECTED_SYNCS(3));
}
@@ -245,9 +256,9 @@ static void arm_smmu_v3_write_ste_test_cdtable_s1dss_change(struct kunit *test)
struct arm_smmu_ste s1dss_bypass;
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
- fake_cdtab_dma_addr);
+ fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
- fake_cdtab_dma_addr);
+ fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
/*
* Flipping s1dss on a CD table STE only involves changes to the second
@@ -265,7 +276,7 @@ arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass(struct kunit *test)
struct arm_smmu_ste s1dss_bypass;
arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
- fake_cdtab_dma_addr);
+ fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_v3_test_ste_expect_hitless_transition(
test, &s1dss_bypass, &bypass_ste, NUM_EXPECTED_SYNCS(2));
}
@@ -276,16 +287,20 @@ arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass(struct kunit *test)
struct arm_smmu_ste s1dss_bypass;
arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
- fake_cdtab_dma_addr);
+ fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_v3_test_ste_expect_hitless_transition(
test, &bypass_ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(2));
}
static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste,
- bool ats_enabled)
+ enum arm_smmu_test_master_feat feat)
{
+ bool ats_enabled = feat & ARM_SMMU_MASTER_TEST_ATS;
+ bool stall_enabled = feat & ARM_SMMU_MASTER_TEST_STALL;
struct arm_smmu_master master = {
+ .ats_enabled = ats_enabled,
.smmu = &smmu,
+ .stall_enabled = stall_enabled,
};
struct io_pgtable io_pgtable = {};
struct arm_smmu_domain smmu_domain = {
@@ -308,7 +323,7 @@ static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test)
{
struct arm_smmu_ste ste;
- arm_smmu_test_make_s2_ste(&ste, true);
+ arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
NUM_EXPECTED_SYNCS(2));
}
@@ -317,7 +332,7 @@ static void arm_smmu_v3_write_ste_test_abort_to_s2(struct kunit *test)
{
struct arm_smmu_ste ste;
- arm_smmu_test_make_s2_ste(&ste, true);
+ arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
NUM_EXPECTED_SYNCS(2));
}
@@ -326,7 +341,7 @@ static void arm_smmu_v3_write_ste_test_s2_to_bypass(struct kunit *test)
{
struct arm_smmu_ste ste;
- arm_smmu_test_make_s2_ste(&ste, true);
+ arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
NUM_EXPECTED_SYNCS(2));
}
@@ -335,7 +350,7 @@ static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test)
{
struct arm_smmu_ste ste;
- arm_smmu_test_make_s2_ste(&ste, true);
+ arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
NUM_EXPECTED_SYNCS(2));
}
@@ -346,8 +361,8 @@ static void arm_smmu_v3_write_ste_test_s1_to_s2(struct kunit *test)
struct arm_smmu_ste s2_ste;
arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
- fake_cdtab_dma_addr);
- arm_smmu_test_make_s2_ste(&s2_ste, true);
+ fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
+ arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste,
NUM_EXPECTED_SYNCS(3));
}
@@ -358,8 +373,8 @@ static void arm_smmu_v3_write_ste_test_s2_to_s1(struct kunit *test)
struct arm_smmu_ste s2_ste;
arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
- fake_cdtab_dma_addr);
- arm_smmu_test_make_s2_ste(&s2_ste, true);
+ fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
+ arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste,
NUM_EXPECTED_SYNCS(3));
}
@@ -375,9 +390,9 @@ static void arm_smmu_v3_write_ste_test_non_hitless(struct kunit *test)
* s1 dss field in the same update.
*/
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
- fake_cdtab_dma_addr);
+ fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_test_make_cdtable_ste(&ste_2, STRTAB_STE_1_S1DSS_BYPASS,
- 0x4B4B4b4B4B);
+ 0x4B4B4b4B4B, ARM_SMMU_MASTER_TEST_ATS);
arm_smmu_v3_test_ste_expect_non_hitless_transition(
test, &ste, &ste_2, NUM_EXPECTED_SYNCS(3));
}
@@ -503,6 +518,30 @@ static void arm_smmu_test_make_sva_release_cd(struct arm_smmu_cd *cd,
arm_smmu_make_sva_cd(cd, &master, NULL, asid);
}
+static void arm_smmu_v3_write_ste_test_s1_to_s2_stall(struct kunit *test)
+{
+ struct arm_smmu_ste s1_ste;
+ struct arm_smmu_ste s2_ste;
+
+ arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
+ fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_STALL);
+ arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_STALL);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste,
+ NUM_EXPECTED_SYNCS(3));
+}
+
+static void arm_smmu_v3_write_ste_test_s2_to_s1_stall(struct kunit *test)
+{
+ struct arm_smmu_ste s1_ste;
+ struct arm_smmu_ste s2_ste;
+
+ arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
+ fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_STALL);
+ arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_STALL);
+ arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste,
+ NUM_EXPECTED_SYNCS(3));
+}
+
static void arm_smmu_v3_write_cd_test_sva_clear(struct kunit *test)
{
struct arm_smmu_cd cd = {};
@@ -547,6 +586,8 @@ static struct kunit_case arm_smmu_v3_test_cases[] = {
KUNIT_CASE(arm_smmu_v3_write_ste_test_non_hitless),
KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear),
KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_s1_to_s2_stall),
+ KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_s1_stall),
KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear),
KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_release),
{},
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index ed2b106e02dd..737c5b882355 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -346,14 +346,30 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
return 0;
}
-static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu)
+static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_ent *ent)
{
- return &smmu->cmdq;
+ struct arm_smmu_cmdq *cmdq = NULL;
+
+ if (smmu->impl_ops && smmu->impl_ops->get_secondary_cmdq)
+ cmdq = smmu->impl_ops->get_secondary_cmdq(smmu, ent);
+
+ return cmdq ?: &smmu->cmdq;
+}
+
+static bool arm_smmu_cmdq_needs_busy_polling(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq *cmdq)
+{
+ if (cmdq == &smmu->cmdq)
+ return false;
+
+ return smmu->options & ARM_SMMU_OPT_TEGRA241_CMDQV;
}
static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
- struct arm_smmu_queue *q, u32 prod)
+ struct arm_smmu_cmdq *cmdq, u32 prod)
{
+ struct arm_smmu_queue *q = &cmdq->q;
struct arm_smmu_cmdq_ent ent = {
.opcode = CMDQ_OP_CMD_SYNC,
};
@@ -368,10 +384,12 @@ static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
}
arm_smmu_cmdq_build_cmd(cmd, &ent);
+ if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq))
+ u64p_replace_bits(cmd, CMDQ_SYNC_0_CS_NONE, CMDQ_SYNC_0_CS);
}
-static void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
- struct arm_smmu_queue *q)
+void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq *cmdq)
{
static const char * const cerror_str[] = {
[CMDQ_ERR_CERROR_NONE_IDX] = "No error",
@@ -379,6 +397,7 @@ static void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
[CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
[CMDQ_ERR_CERROR_ATC_INV_IDX] = "ATC invalidate timeout",
};
+ struct arm_smmu_queue *q = &cmdq->q;
int i;
u64 cmd[CMDQ_ENT_DWORDS];
@@ -421,13 +440,15 @@ static void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
/* Convert the erroneous command into a CMD_SYNC */
arm_smmu_cmdq_build_cmd(cmd, &cmd_sync);
+ if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq))
+ u64p_replace_bits(cmd, CMDQ_SYNC_0_CS_NONE, CMDQ_SYNC_0_CS);
queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
{
- __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq.q);
+ __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq);
}
/*
@@ -592,11 +613,11 @@ static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
/* Wait for the command queue to become non-full */
static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq *cmdq,
struct arm_smmu_ll_queue *llq)
{
unsigned long flags;
struct arm_smmu_queue_poll qp;
- struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
int ret = 0;
/*
@@ -627,11 +648,11 @@ static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
* Must be called with the cmdq lock held in some capacity.
*/
static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq *cmdq,
struct arm_smmu_ll_queue *llq)
{
int ret = 0;
struct arm_smmu_queue_poll qp;
- struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
queue_poll_init(smmu, &qp);
@@ -651,10 +672,10 @@ static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
* Must be called with the cmdq lock held in some capacity.
*/
static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq *cmdq,
struct arm_smmu_ll_queue *llq)
{
struct arm_smmu_queue_poll qp;
- struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
u32 prod = llq->prod;
int ret = 0;
@@ -701,12 +722,14 @@ static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
}
static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq *cmdq,
struct arm_smmu_ll_queue *llq)
{
- if (smmu->options & ARM_SMMU_OPT_MSIPOLL)
- return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
+ if (smmu->options & ARM_SMMU_OPT_MSIPOLL &&
+ !arm_smmu_cmdq_needs_busy_polling(smmu, cmdq))
+ return __arm_smmu_cmdq_poll_until_msi(smmu, cmdq, llq);
- return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
+ return __arm_smmu_cmdq_poll_until_consumed(smmu, cmdq, llq);
}
static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
@@ -743,13 +766,13 @@ static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
* CPU will appear before any of the commands from the other CPU.
*/
static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq *cmdq,
u64 *cmds, int n, bool sync)
{
u64 cmd_sync[CMDQ_ENT_DWORDS];
u32 prod;
unsigned long flags;
bool owner;
- struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
struct arm_smmu_ll_queue llq, head;
int ret = 0;
@@ -763,7 +786,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
while (!queue_has_space(&llq, n + sync)) {
local_irq_restore(flags);
- if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
+ if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq))
dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
local_irq_save(flags);
}
@@ -789,7 +812,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
if (sync) {
prod = queue_inc_prod_n(&llq, n);
- arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod);
+ arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, cmdq, prod);
queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
/*
@@ -839,7 +862,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
/* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */
if (sync) {
llq.prod = queue_inc_prod_n(&llq, n);
- ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
+ ret = arm_smmu_cmdq_poll_until_sync(smmu, cmdq, &llq);
if (ret) {
dev_err_ratelimited(smmu->dev,
"CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n",
@@ -874,7 +897,8 @@ static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
return -EINVAL;
}
- return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, sync);
+ return arm_smmu_cmdq_issue_cmdlist(
+ smmu, arm_smmu_get_cmdq(smmu, ent), cmd, 1, sync);
}
static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
@@ -889,21 +913,33 @@ static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu,
return __arm_smmu_cmdq_issue_cmd(smmu, ent, true);
}
+static void arm_smmu_cmdq_batch_init(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_batch *cmds,
+ struct arm_smmu_cmdq_ent *ent)
+{
+ cmds->num = 0;
+ cmds->cmdq = arm_smmu_get_cmdq(smmu, ent);
+}
+
static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_batch *cmds,
struct arm_smmu_cmdq_ent *cmd)
{
+ bool unsupported_cmd = !arm_smmu_cmdq_supports_cmd(cmds->cmdq, cmd);
+ bool force_sync = (cmds->num == CMDQ_BATCH_ENTRIES - 1) &&
+ (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC);
int index;
- if (cmds->num == CMDQ_BATCH_ENTRIES - 1 &&
- (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) {
- arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
- cmds->num = 0;
+ if (force_sync || unsupported_cmd) {
+ arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
+ cmds->num, true);
+ arm_smmu_cmdq_batch_init(smmu, cmds, cmd);
}
if (cmds->num == CMDQ_BATCH_ENTRIES) {
- arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
- cmds->num = 0;
+ arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
+ cmds->num, false);
+ arm_smmu_cmdq_batch_init(smmu, cmds, cmd);
}
index = cmds->num * CMDQ_ENT_DWORDS;
@@ -919,7 +955,8 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_batch *cmds)
{
- return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
+ return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
+ cmds->num, true);
}
static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused,
@@ -1012,7 +1049,8 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
used_bits[2] |=
cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
- STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2R);
+ STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2S |
+ STRTAB_STE_2_S2R);
used_bits[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
}
@@ -1170,7 +1208,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_master *master,
},
};
- cmds.num = 0;
+ arm_smmu_cmdq_batch_init(smmu, &cmds, &cmd);
for (i = 0; i < master->num_streams; i++) {
cmd.cfgi.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
@@ -1179,48 +1217,36 @@ static void arm_smmu_sync_cd(struct arm_smmu_master *master,
arm_smmu_cmdq_batch_submit(smmu, &cmds);
}
-static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
- struct arm_smmu_l1_ctx_desc *l1_desc)
+static void arm_smmu_write_cd_l1_desc(struct arm_smmu_cdtab_l1 *dst,
+ dma_addr_t l2ptr_dma)
{
- size_t size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
+ u64 val = (l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) | CTXDESC_L1_DESC_V;
- l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size,
- &l1_desc->l2ptr_dma, GFP_KERNEL);
- if (!l1_desc->l2ptr) {
- dev_warn(smmu->dev,
- "failed to allocate context descriptor table\n");
- return -ENOMEM;
- }
- return 0;
+ /* The HW has 64 bit atomicity with stores to the L2 CD table */
+ WRITE_ONCE(dst->l2ptr, cpu_to_le64(val));
}
-static void arm_smmu_write_cd_l1_desc(__le64 *dst,
- struct arm_smmu_l1_ctx_desc *l1_desc)
+static dma_addr_t arm_smmu_cd_l1_get_desc(const struct arm_smmu_cdtab_l1 *src)
{
- u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
- CTXDESC_L1_DESC_V;
-
- /* The HW has 64 bit atomicity with stores to the L2 CD table */
- WRITE_ONCE(*dst, cpu_to_le64(val));
+ return le64_to_cpu(src->l2ptr) & CTXDESC_L1_DESC_L2PTR_MASK;
}
struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
u32 ssid)
{
- struct arm_smmu_l1_ctx_desc *l1_desc;
+ struct arm_smmu_cdtab_l2 *l2;
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
- if (!cd_table->cdtab)
+ if (!arm_smmu_cdtab_allocated(cd_table))
return NULL;
if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
- return (struct arm_smmu_cd *)(cd_table->cdtab +
- ssid * CTXDESC_CD_DWORDS);
+ return &cd_table->linear.table[ssid];
- l1_desc = &cd_table->l1_desc[ssid / CTXDESC_L2_ENTRIES];
- if (!l1_desc->l2ptr)
+ l2 = cd_table->l2.l2ptrs[arm_smmu_cdtab_l1_idx(ssid)];
+ if (!l2)
return NULL;
- return &l1_desc->l2ptr[ssid % CTXDESC_L2_ENTRIES];
+ return &l2->cds[arm_smmu_cdtab_l2_idx(ssid)];
}
static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
@@ -1232,24 +1258,25 @@ static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
might_sleep();
iommu_group_mutex_assert(master->dev);
- if (!cd_table->cdtab) {
+ if (!arm_smmu_cdtab_allocated(cd_table)) {
if (arm_smmu_alloc_cd_tables(master))
return NULL;
}
if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_64K_L2) {
- unsigned int idx = ssid / CTXDESC_L2_ENTRIES;
- struct arm_smmu_l1_ctx_desc *l1_desc;
+ unsigned int idx = arm_smmu_cdtab_l1_idx(ssid);
+ struct arm_smmu_cdtab_l2 **l2ptr = &cd_table->l2.l2ptrs[idx];
- l1_desc = &cd_table->l1_desc[idx];
- if (!l1_desc->l2ptr) {
- __le64 *l1ptr;
+ if (!*l2ptr) {
+ dma_addr_t l2ptr_dma;
- if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
+ *l2ptr = dma_alloc_coherent(smmu->dev, sizeof(**l2ptr),
+ &l2ptr_dma, GFP_KERNEL);
+ if (!*l2ptr)
return NULL;
- l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
- arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
+ arm_smmu_write_cd_l1_desc(&cd_table->l2.l1tab[idx],
+ l2ptr_dma);
/* An invalid L1CD can be cached */
arm_smmu_sync_cd(master, ssid, false);
}
@@ -1369,7 +1396,7 @@ void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
struct arm_smmu_cd target = {};
struct arm_smmu_cd *cdptr;
- if (!master->cd_table.cdtab)
+ if (!arm_smmu_cdtab_allocated(&master->cd_table))
return;
cdptr = arm_smmu_get_cd_ptr(master, ssid);
if (WARN_ON(!cdptr))
@@ -1391,74 +1418,75 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
max_contexts <= CTXDESC_L2_ENTRIES) {
cd_table->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
- cd_table->num_l1_ents = max_contexts;
+ cd_table->linear.num_ents = max_contexts;
- l1size = max_contexts * (CTXDESC_CD_DWORDS << 3);
+ l1size = max_contexts * sizeof(struct arm_smmu_cd),
+ cd_table->linear.table = dma_alloc_coherent(smmu->dev, l1size,
+ &cd_table->cdtab_dma,
+ GFP_KERNEL);
+ if (!cd_table->linear.table)
+ return -ENOMEM;
} else {
cd_table->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
- cd_table->num_l1_ents = DIV_ROUND_UP(max_contexts,
- CTXDESC_L2_ENTRIES);
+ cd_table->l2.num_l1_ents =
+ DIV_ROUND_UP(max_contexts, CTXDESC_L2_ENTRIES);
- cd_table->l1_desc = devm_kcalloc(smmu->dev, cd_table->num_l1_ents,
- sizeof(*cd_table->l1_desc),
- GFP_KERNEL);
- if (!cd_table->l1_desc)
+ cd_table->l2.l2ptrs = kcalloc(cd_table->l2.num_l1_ents,
+ sizeof(*cd_table->l2.l2ptrs),
+ GFP_KERNEL);
+ if (!cd_table->l2.l2ptrs)
return -ENOMEM;
- l1size = cd_table->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
- }
-
- cd_table->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cd_table->cdtab_dma,
- GFP_KERNEL);
- if (!cd_table->cdtab) {
- dev_warn(smmu->dev, "failed to allocate context descriptor\n");
- ret = -ENOMEM;
- goto err_free_l1;
+ l1size = cd_table->l2.num_l1_ents * sizeof(struct arm_smmu_cdtab_l1);
+ cd_table->l2.l1tab = dma_alloc_coherent(smmu->dev, l1size,
+ &cd_table->cdtab_dma,
+ GFP_KERNEL);
+ if (!cd_table->l2.l2ptrs) {
+ ret = -ENOMEM;
+ goto err_free_l2ptrs;
+ }
}
-
return 0;
-err_free_l1:
- if (cd_table->l1_desc) {
- devm_kfree(smmu->dev, cd_table->l1_desc);
- cd_table->l1_desc = NULL;
- }
+err_free_l2ptrs:
+ kfree(cd_table->l2.l2ptrs);
+ cd_table->l2.l2ptrs = NULL;
return ret;
}
static void arm_smmu_free_cd_tables(struct arm_smmu_master *master)
{
int i;
- size_t size, l1size;
struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
- if (cd_table->l1_desc) {
- size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
-
- for (i = 0; i < cd_table->num_l1_ents; i++) {
- if (!cd_table->l1_desc[i].l2ptr)
+ if (cd_table->s1fmt != STRTAB_STE_0_S1FMT_LINEAR) {
+ for (i = 0; i < cd_table->l2.num_l1_ents; i++) {
+ if (!cd_table->l2.l2ptrs[i])
continue;
- dmam_free_coherent(smmu->dev, size,
- cd_table->l1_desc[i].l2ptr,
- cd_table->l1_desc[i].l2ptr_dma);
+ dma_free_coherent(smmu->dev,
+ sizeof(*cd_table->l2.l2ptrs[i]),
+ cd_table->l2.l2ptrs[i],
+ arm_smmu_cd_l1_get_desc(&cd_table->l2.l1tab[i]));
}
- devm_kfree(smmu->dev, cd_table->l1_desc);
- cd_table->l1_desc = NULL;
+ kfree(cd_table->l2.l2ptrs);
- l1size = cd_table->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+ dma_free_coherent(smmu->dev,
+ cd_table->l2.num_l1_ents *
+ sizeof(struct arm_smmu_cdtab_l1),
+ cd_table->l2.l1tab, cd_table->cdtab_dma);
} else {
- l1size = cd_table->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
+ dma_free_coherent(smmu->dev,
+ cd_table->linear.num_ents *
+ sizeof(struct arm_smmu_cd),
+ cd_table->linear.table, cd_table->cdtab_dma);
}
-
- dmam_free_coherent(smmu->dev, l1size, cd_table->cdtab, cd_table->cdtab_dma);
- cd_table->cdtab_dma = 0;
- cd_table->cdtab = NULL;
}
/* Stream table manipulation functions */
-static void arm_smmu_write_strtab_l1_desc(__le64 *dst, dma_addr_t l2ptr_dma)
+static void arm_smmu_write_strtab_l1_desc(struct arm_smmu_strtab_l1 *dst,
+ dma_addr_t l2ptr_dma)
{
u64 val = 0;
@@ -1466,7 +1494,7 @@ static void arm_smmu_write_strtab_l1_desc(__le64 *dst, dma_addr_t l2ptr_dma)
val |= l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
/* The HW has 64 bit atomicity with stores to the L2 STE table */
- WRITE_ONCE(*dst, cpu_to_le64(val));
+ WRITE_ONCE(dst->l2ptr, cpu_to_le64(val));
}
struct arm_smmu_ste_writer {
@@ -1646,6 +1674,7 @@ void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
STRTAB_STE_2_S2ENDI |
#endif
STRTAB_STE_2_S2PTW |
+ (master->stall_enabled ? STRTAB_STE_2_S2S : 0) |
STRTAB_STE_2_S2R);
target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr &
@@ -1670,52 +1699,61 @@ static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
{
- size_t size;
- void *strtab;
dma_addr_t l2ptr_dma;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
- struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
+ struct arm_smmu_strtab_l2 **l2table;
- if (desc->l2ptr)
+ l2table = &cfg->l2.l2ptrs[arm_smmu_strtab_l1_idx(sid)];
+ if (*l2table)
return 0;
- size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
- strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
-
- desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &l2ptr_dma,
- GFP_KERNEL);
- if (!desc->l2ptr) {
+ *l2table = dmam_alloc_coherent(smmu->dev, sizeof(**l2table),
+ &l2ptr_dma, GFP_KERNEL);
+ if (!*l2table) {
dev_err(smmu->dev,
"failed to allocate l2 stream table for SID %u\n",
sid);
return -ENOMEM;
}
- arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
- arm_smmu_write_strtab_l1_desc(strtab, l2ptr_dma);
+ arm_smmu_init_initial_stes((*l2table)->stes,
+ ARRAY_SIZE((*l2table)->stes));
+ arm_smmu_write_strtab_l1_desc(&cfg->l2.l1tab[arm_smmu_strtab_l1_idx(sid)],
+ l2ptr_dma);
return 0;
}
+static int arm_smmu_streams_cmp_key(const void *lhs, const struct rb_node *rhs)
+{
+ struct arm_smmu_stream *stream_rhs =
+ rb_entry(rhs, struct arm_smmu_stream, node);
+ const u32 *sid_lhs = lhs;
+
+ if (*sid_lhs < stream_rhs->id)
+ return -1;
+ if (*sid_lhs > stream_rhs->id)
+ return 1;
+ return 0;
+}
+
+static int arm_smmu_streams_cmp_node(struct rb_node *lhs,
+ const struct rb_node *rhs)
+{
+ return arm_smmu_streams_cmp_key(
+ &rb_entry(lhs, struct arm_smmu_stream, node)->id, rhs);
+}
+
static struct arm_smmu_master *
arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
{
struct rb_node *node;
- struct arm_smmu_stream *stream;
lockdep_assert_held(&smmu->streams_mutex);
- node = smmu->streams.rb_node;
- while (node) {
- stream = rb_entry(node, struct arm_smmu_stream, node);
- if (stream->id < sid)
- node = node->rb_right;
- else if (stream->id > sid)
- node = node->rb_left;
- else
- return stream->master;
- }
-
- return NULL;
+ node = rb_find(&sid, &smmu->streams, arm_smmu_streams_cmp_key);
+ if (!node)
+ return NULL;
+ return rb_entry(node, struct arm_smmu_stream, node)->master;
}
/* IRQ and event handlers */
@@ -1739,10 +1777,6 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
return -EOPNOTSUPP;
}
- /* Stage-2 is always pinned at the moment */
- if (evt[1] & EVTQ_1_S2)
- return -EFAULT;
-
if (!(evt[1] & EVTQ_1_STALL))
return -EOPNOTSUPP;
@@ -2021,7 +2055,7 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd);
- cmds.num = 0;
+ arm_smmu_cmdq_batch_init(master->smmu, &cmds, &cmd);
for (i = 0; i < master->num_streams; i++) {
cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd);
@@ -2036,7 +2070,9 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_master_domain *master_domain;
int i;
unsigned long flags;
- struct arm_smmu_cmdq_ent cmd;
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = CMDQ_OP_ATC_INV,
+ };
struct arm_smmu_cmdq_batch cmds;
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
@@ -2059,7 +2095,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
if (!atomic_read(&smmu_domain->nr_ats_masters))
return 0;
- cmds.num = 0;
+ arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds, &cmd);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master_domain, &smmu_domain->devices,
@@ -2141,7 +2177,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
num_pages++;
}
- cmds.num = 0;
+ arm_smmu_cmdq_batch_init(smmu, &cmds, cmd);
while (iova < end) {
if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
@@ -2438,16 +2474,12 @@ arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
- unsigned int idx1, idx2;
-
/* Two-level walk */
- idx1 = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
- idx2 = sid & ((1 << STRTAB_SPLIT) - 1);
- return &cfg->l1_desc[idx1].l2ptr[idx2];
+ return &cfg->l2.l2ptrs[arm_smmu_strtab_l1_idx(sid)]
+ ->stes[arm_smmu_strtab_l2_idx(sid)];
} else {
/* Simple linear lookup */
- return (struct arm_smmu_ste *)&cfg
- ->strtab[sid * STRTAB_STE_DWORDS];
+ return &cfg->linear.table[sid];
}
}
@@ -3062,8 +3094,8 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags,
return ERR_PTR(-EOPNOTSUPP);
smmu_domain = arm_smmu_domain_alloc();
- if (!smmu_domain)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(smmu_domain))
+ return ERR_CAST(smmu_domain);
smmu_domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
smmu_domain->domain.ops = arm_smmu_ops.default_domain_ops;
@@ -3147,12 +3179,9 @@ struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
{
- unsigned long limit = smmu->strtab_cfg.num_l1_ents;
-
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
- limit *= 1UL << STRTAB_SPLIT;
-
- return sid < limit;
+ return arm_smmu_strtab_l1_idx(sid) < smmu->strtab_cfg.l2.num_l1_ents;
+ return sid < smmu->strtab_cfg.linear.num_ents;
}
static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid)
@@ -3173,8 +3202,6 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
{
int i;
int ret = 0;
- struct arm_smmu_stream *new_stream, *cur_stream;
- struct rb_node **new_node, *parent_node = NULL;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
master->streams = kcalloc(fwspec->num_ids, sizeof(*master->streams),
@@ -3185,9 +3212,9 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
mutex_lock(&smmu->streams_mutex);
for (i = 0; i < fwspec->num_ids; i++) {
+ struct arm_smmu_stream *new_stream = &master->streams[i];
u32 sid = fwspec->ids[i];
- new_stream = &master->streams[i];
new_stream->id = sid;
new_stream->master = master;
@@ -3196,28 +3223,13 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
break;
/* Insert into SID tree */
- new_node = &(smmu->streams.rb_node);
- while (*new_node) {
- cur_stream = rb_entry(*new_node, struct arm_smmu_stream,
- node);
- parent_node = *new_node;
- if (cur_stream->id > new_stream->id) {
- new_node = &((*new_node)->rb_left);
- } else if (cur_stream->id < new_stream->id) {
- new_node = &((*new_node)->rb_right);
- } else {
- dev_warn(master->dev,
- "stream %u already in tree\n",
- cur_stream->id);
- ret = -EINVAL;
- break;
- }
- }
- if (ret)
+ if (rb_find_add(&new_stream->node, &smmu->streams,
+ arm_smmu_streams_cmp_node)) {
+ dev_warn(master->dev, "stream %u already in tree\n",
+ sid);
+ ret = -EINVAL;
break;
-
- rb_link_node(&new_stream->node, parent_node, new_node);
- rb_insert_color(&new_stream->node, &smmu->streams);
+ }
}
if (ret) {
@@ -3295,6 +3307,12 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
master->stall_enabled = true;
+ if (dev_is_pci(dev)) {
+ unsigned int stu = __ffs(smmu->pgsize_bitmap);
+
+ pci_prepare_ats(to_pci_dev(dev), stu);
+ }
+
return &smmu->iommu;
err_free_master:
@@ -3317,7 +3335,7 @@ static void arm_smmu_release_device(struct device *dev)
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
- if (master->cd_table.cdtab)
+ if (arm_smmu_cdtab_allocated(&master->cd_table))
arm_smmu_free_cd_tables(master);
kfree(master);
}
@@ -3507,12 +3525,10 @@ static struct iommu_dirty_ops arm_smmu_dirty_ops = {
};
/* Probing and initialisation functions */
-static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
- struct arm_smmu_queue *q,
- void __iomem *page,
- unsigned long prod_off,
- unsigned long cons_off,
- size_t dwords, const char *name)
+int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
+ struct arm_smmu_queue *q, void __iomem *page,
+ unsigned long prod_off, unsigned long cons_off,
+ size_t dwords, const char *name)
{
size_t qsz;
@@ -3550,9 +3566,9 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
return 0;
}
-static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu)
+int arm_smmu_cmdq_init(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq *cmdq)
{
- struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
atomic_set(&cmdq->owner_prod, 0);
@@ -3577,7 +3593,7 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
if (ret)
return ret;
- ret = arm_smmu_cmdq_init(smmu);
+ ret = arm_smmu_cmdq_init(smmu, &smmu->cmdq);
if (ret)
return ret;
@@ -3606,42 +3622,32 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
{
- void *strtab;
- u64 reg;
- u32 size, l1size;
+ u32 l1size;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
+ unsigned int last_sid_idx =
+ arm_smmu_strtab_l1_idx((1 << smmu->sid_bits) - 1);
/* Calculate the L1 size, capped to the SIDSIZE. */
- size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
- size = min(size, smmu->sid_bits - STRTAB_SPLIT);
- cfg->num_l1_ents = 1 << size;
-
- size += STRTAB_SPLIT;
- if (size < smmu->sid_bits)
+ cfg->l2.num_l1_ents = min(last_sid_idx + 1, STRTAB_MAX_L1_ENTRIES);
+ if (cfg->l2.num_l1_ents <= last_sid_idx)
dev_warn(smmu->dev,
"2-level strtab only covers %u/%u bits of SID\n",
- size, smmu->sid_bits);
+ ilog2(cfg->l2.num_l1_ents * STRTAB_NUM_L2_STES),
+ smmu->sid_bits);
- l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
- strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
- GFP_KERNEL);
- if (!strtab) {
+ l1size = cfg->l2.num_l1_ents * sizeof(struct arm_smmu_strtab_l1);
+ cfg->l2.l1tab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->l2.l1_dma,
+ GFP_KERNEL);
+ if (!cfg->l2.l1tab) {
dev_err(smmu->dev,
"failed to allocate l1 stream table (%u bytes)\n",
l1size);
return -ENOMEM;
}
- cfg->strtab = strtab;
-
- /* Configure strtab_base_cfg for 2 levels */
- reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_2LVL);
- reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, size);
- reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT);
- cfg->strtab_base_cfg = reg;
- cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents,
- sizeof(*cfg->l1_desc), GFP_KERNEL);
- if (!cfg->l1_desc)
+ cfg->l2.l2ptrs = devm_kcalloc(smmu->dev, cfg->l2.num_l1_ents,
+ sizeof(*cfg->l2.l2ptrs), GFP_KERNEL);
+ if (!cfg->l2.l2ptrs)
return -ENOMEM;
return 0;
@@ -3649,50 +3655,36 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
{
- void *strtab;
- u64 reg;
u32 size;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
- size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
- strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
- GFP_KERNEL);
- if (!strtab) {
+ size = (1 << smmu->sid_bits) * sizeof(struct arm_smmu_ste);
+ cfg->linear.table = dmam_alloc_coherent(smmu->dev, size,
+ &cfg->linear.ste_dma,
+ GFP_KERNEL);
+ if (!cfg->linear.table) {
dev_err(smmu->dev,
"failed to allocate linear stream table (%u bytes)\n",
size);
return -ENOMEM;
}
- cfg->strtab = strtab;
- cfg->num_l1_ents = 1 << smmu->sid_bits;
+ cfg->linear.num_ents = 1 << smmu->sid_bits;
- /* Configure strtab_base_cfg for a linear table covering all SIDs */
- reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_LINEAR);
- reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
- cfg->strtab_base_cfg = reg;
-
- arm_smmu_init_initial_stes(strtab, cfg->num_l1_ents);
+ arm_smmu_init_initial_stes(cfg->linear.table, cfg->linear.num_ents);
return 0;
}
static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
{
- u64 reg;
int ret;
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
ret = arm_smmu_init_strtab_2lvl(smmu);
else
ret = arm_smmu_init_strtab_linear(smmu);
-
if (ret)
return ret;
- /* Set the strtab base address */
- reg = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK;
- reg |= STRTAB_BASE_RA;
- smmu->strtab_cfg.strtab_base = reg;
-
ida_init(&smmu->vmid_map);
return 0;
@@ -3709,7 +3701,14 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
if (ret)
return ret;
- return arm_smmu_init_strtab(smmu);
+ ret = arm_smmu_init_strtab(smmu);
+ if (ret)
+ return ret;
+
+ if (smmu->impl_ops && smmu->impl_ops->init_structures)
+ return smmu->impl_ops->init_structures(smmu);
+
+ return 0;
}
static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
@@ -3901,6 +3900,30 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
return ret;
}
+static void arm_smmu_write_strtab(struct arm_smmu_device *smmu)
+{
+ struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
+ dma_addr_t dma;
+ u32 reg;
+
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
+ reg = FIELD_PREP(STRTAB_BASE_CFG_FMT,
+ STRTAB_BASE_CFG_FMT_2LVL) |
+ FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE,
+ ilog2(cfg->l2.num_l1_ents) + STRTAB_SPLIT) |
+ FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT);
+ dma = cfg->l2.l1_dma;
+ } else {
+ reg = FIELD_PREP(STRTAB_BASE_CFG_FMT,
+ STRTAB_BASE_CFG_FMT_LINEAR) |
+ FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
+ dma = cfg->linear.ste_dma;
+ }
+ writeq_relaxed((dma & STRTAB_BASE_ADDR_MASK) | STRTAB_BASE_RA,
+ smmu->base + ARM_SMMU_STRTAB_BASE);
+ writel_relaxed(reg, smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
+}
+
static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
{
int ret;
@@ -3936,10 +3959,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
/* Stream table */
- writeq_relaxed(smmu->strtab_cfg.strtab_base,
- smmu->base + ARM_SMMU_STRTAB_BASE);
- writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
- smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
+ arm_smmu_write_strtab(smmu);
/* Command queue */
writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
@@ -4026,6 +4046,14 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
return ret;
}
+ if (smmu->impl_ops && smmu->impl_ops->device_reset) {
+ ret = smmu->impl_ops->device_reset(smmu);
+ if (ret) {
+ dev_err(smmu->dev, "failed to reset impl\n");
+ return ret;
+ }
+ }
+
return 0;
}
@@ -4315,18 +4343,55 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
}
#ifdef CONFIG_ACPI
-static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
+#ifdef CONFIG_TEGRA241_CMDQV
+static void acpi_smmu_dsdt_probe_tegra241_cmdqv(struct acpi_iort_node *node,
+ struct arm_smmu_device *smmu)
+{
+ const char *uid = kasprintf(GFP_KERNEL, "%u", node->identifier);
+ struct acpi_device *adev;
+
+ /* Look for an NVDA200C node whose _UID matches the SMMU node ID */
+ adev = acpi_dev_get_first_match_dev("NVDA200C", uid, -1);
+ if (adev) {
+ /* Tegra241 CMDQV driver is responsible for put_device() */
+ smmu->impl_dev = &adev->dev;
+ smmu->options |= ARM_SMMU_OPT_TEGRA241_CMDQV;
+ dev_info(smmu->dev, "found companion CMDQV device: %s\n",
+ dev_name(smmu->impl_dev));
+ }
+ kfree(uid);
+}
+#else
+static void acpi_smmu_dsdt_probe_tegra241_cmdqv(struct acpi_iort_node *node,
+ struct arm_smmu_device *smmu)
+{
+}
+#endif
+
+static int acpi_smmu_iort_probe_model(struct acpi_iort_node *node,
+ struct arm_smmu_device *smmu)
{
- switch (model) {
+ struct acpi_iort_smmu_v3 *iort_smmu =
+ (struct acpi_iort_smmu_v3 *)node->node_data;
+
+ switch (iort_smmu->model) {
case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX:
smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
break;
case ACPI_IORT_SMMU_V3_HISILICON_HI161X:
smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
break;
+ case ACPI_IORT_SMMU_V3_GENERIC:
+ /*
+ * Tegra241 implementation stores its SMMU options and impl_dev
+ * in DSDT. Thus, go through the ACPI tables unconditionally.
+ */
+ acpi_smmu_dsdt_probe_tegra241_cmdqv(node, smmu);
+ break;
}
dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options);
+ return 0;
}
static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
@@ -4341,8 +4406,6 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
/* Retrieve SMMUv3 specific data */
iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
- acpi_smmu_get_options(iort_smmu->model, smmu);
-
if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
smmu->features |= ARM_SMMU_FEAT_COHERENCY;
@@ -4354,7 +4417,7 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
smmu->features |= ARM_SMMU_FEAT_HA;
}
- return 0;
+ return acpi_smmu_iort_probe_model(node, smmu);
}
#else
static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
@@ -4435,6 +4498,39 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
}
+static void arm_smmu_impl_remove(void *data)
+{
+ struct arm_smmu_device *smmu = data;
+
+ if (smmu->impl_ops && smmu->impl_ops->device_remove)
+ smmu->impl_ops->device_remove(smmu);
+}
+
+/*
+ * Probe all the compiled in implementations. Each one checks to see if it
+ * matches this HW and if so returns a devm_krealloc'd arm_smmu_device which
+ * replaces the callers. Otherwise the original is returned or ERR_PTR.
+ */
+static struct arm_smmu_device *arm_smmu_impl_probe(struct arm_smmu_device *smmu)
+{
+ struct arm_smmu_device *new_smmu = ERR_PTR(-ENODEV);
+ int ret;
+
+ if (smmu->impl_dev && (smmu->options & ARM_SMMU_OPT_TEGRA241_CMDQV))
+ new_smmu = tegra241_cmdqv_probe(smmu);
+
+ if (new_smmu == ERR_PTR(-ENODEV))
+ return smmu;
+ if (IS_ERR(new_smmu))
+ return new_smmu;
+
+ ret = devm_add_action_or_reset(new_smmu->dev, arm_smmu_impl_remove,
+ new_smmu);
+ if (ret)
+ return ERR_PTR(ret);
+ return new_smmu;
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
int irq, ret;
@@ -4456,6 +4552,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (ret)
return ret;
+ smmu = arm_smmu_impl_probe(smmu);
+ if (IS_ERR(smmu))
+ return PTR_ERR(smmu);
+
/* Base address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 14bca41a981b..1e9952ca989f 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -14,6 +14,8 @@
#include <linux/mmzone.h>
#include <linux/sizes.h>
+struct arm_smmu_device;
+
/* MMIO registers */
#define ARM_SMMU_IDR0 0x0
#define IDR0_ST_LVL GENMASK(28, 27)
@@ -202,10 +204,8 @@
* 2lvl: 128k L1 entries,
* 256 lazy entries per table (each table covers a PCI bus)
*/
-#define STRTAB_L1_SZ_SHIFT 20
#define STRTAB_SPLIT 8
-#define STRTAB_L1_DESC_DWORDS 1
#define STRTAB_L1_DESC_SPAN GENMASK_ULL(4, 0)
#define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6)
@@ -215,6 +215,26 @@ struct arm_smmu_ste {
__le64 data[STRTAB_STE_DWORDS];
};
+#define STRTAB_NUM_L2_STES (1 << STRTAB_SPLIT)
+struct arm_smmu_strtab_l2 {
+ struct arm_smmu_ste stes[STRTAB_NUM_L2_STES];
+};
+
+struct arm_smmu_strtab_l1 {
+ __le64 l2ptr;
+};
+#define STRTAB_MAX_L1_ENTRIES (1 << 17)
+
+static inline u32 arm_smmu_strtab_l1_idx(u32 sid)
+{
+ return sid / STRTAB_NUM_L2_STES;
+}
+
+static inline u32 arm_smmu_strtab_l2_idx(u32 sid)
+{
+ return sid % STRTAB_NUM_L2_STES;
+}
+
#define STRTAB_STE_0_V (1UL << 0)
#define STRTAB_STE_0_CFG GENMASK_ULL(3, 1)
#define STRTAB_STE_0_CFG_ABORT 0
@@ -267,6 +287,7 @@ struct arm_smmu_ste {
#define STRTAB_STE_2_S2AA64 (1UL << 51)
#define STRTAB_STE_2_S2ENDI (1UL << 52)
#define STRTAB_STE_2_S2PTW (1UL << 54)
+#define STRTAB_STE_2_S2S (1UL << 57)
#define STRTAB_STE_2_S2R (1UL << 58)
#define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4)
@@ -280,7 +301,6 @@ struct arm_smmu_ste {
*/
#define CTXDESC_L2_ENTRIES 1024
-#define CTXDESC_L1_DESC_DWORDS 1
#define CTXDESC_L1_DESC_V (1UL << 0)
#define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12)
@@ -290,6 +310,24 @@ struct arm_smmu_cd {
__le64 data[CTXDESC_CD_DWORDS];
};
+struct arm_smmu_cdtab_l2 {
+ struct arm_smmu_cd cds[CTXDESC_L2_ENTRIES];
+};
+
+struct arm_smmu_cdtab_l1 {
+ __le64 l2ptr;
+};
+
+static inline unsigned int arm_smmu_cdtab_l1_idx(unsigned int ssid)
+{
+ return ssid / CTXDESC_L2_ENTRIES;
+}
+
+static inline unsigned int arm_smmu_cdtab_l2_idx(unsigned int ssid)
+{
+ return ssid % CTXDESC_L2_ENTRIES;
+}
+
#define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
#define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
#define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
@@ -320,7 +358,7 @@ struct arm_smmu_cd {
* When the SMMU only supports linear context descriptor tables, pick a
* reasonable size limit (64kB).
*/
-#define CTXDESC_LINEAR_CDMAX ilog2(SZ_64K / (CTXDESC_CD_DWORDS << 3))
+#define CTXDESC_LINEAR_CDMAX ilog2(SZ_64K / sizeof(struct arm_smmu_cd))
/* Command queue */
#define CMDQ_ENT_SZ_SHIFT 4
@@ -566,10 +604,18 @@ struct arm_smmu_cmdq {
atomic_long_t *valid_map;
atomic_t owner_prod;
atomic_t lock;
+ bool (*supports_cmd)(struct arm_smmu_cmdq_ent *ent);
};
+static inline bool arm_smmu_cmdq_supports_cmd(struct arm_smmu_cmdq *cmdq,
+ struct arm_smmu_cmdq_ent *ent)
+{
+ return cmdq->supports_cmd ? cmdq->supports_cmd(ent) : true;
+}
+
struct arm_smmu_cmdq_batch {
u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
+ struct arm_smmu_cmdq *cmdq;
int num;
};
@@ -584,24 +630,23 @@ struct arm_smmu_priq {
};
/* High-level stream table and context descriptor structures */
-struct arm_smmu_strtab_l1_desc {
- struct arm_smmu_ste *l2ptr;
-};
-
struct arm_smmu_ctx_desc {
u16 asid;
};
-struct arm_smmu_l1_ctx_desc {
- struct arm_smmu_cd *l2ptr;
- dma_addr_t l2ptr_dma;
-};
-
struct arm_smmu_ctx_desc_cfg {
- __le64 *cdtab;
+ union {
+ struct {
+ struct arm_smmu_cd *table;
+ unsigned int num_ents;
+ } linear;
+ struct {
+ struct arm_smmu_cdtab_l1 *l1tab;
+ struct arm_smmu_cdtab_l2 **l2ptrs;
+ unsigned int num_l1_ents;
+ } l2;
+ };
dma_addr_t cdtab_dma;
- struct arm_smmu_l1_ctx_desc *l1_desc;
- unsigned int num_l1_ents;
unsigned int used_ssids;
u8 in_ste;
u8 s1fmt;
@@ -609,6 +654,12 @@ struct arm_smmu_ctx_desc_cfg {
u8 s1cdmax;
};
+static inline bool
+arm_smmu_cdtab_allocated(struct arm_smmu_ctx_desc_cfg *cfg)
+{
+ return cfg->linear.table || cfg->l2.l1tab;
+}
+
/* True if the cd table has SSIDS > 0 in use. */
static inline bool arm_smmu_ssids_in_use(struct arm_smmu_ctx_desc_cfg *cd_table)
{
@@ -620,18 +671,35 @@ struct arm_smmu_s2_cfg {
};
struct arm_smmu_strtab_cfg {
- __le64 *strtab;
- dma_addr_t strtab_dma;
- struct arm_smmu_strtab_l1_desc *l1_desc;
- unsigned int num_l1_ents;
+ union {
+ struct {
+ struct arm_smmu_ste *table;
+ dma_addr_t ste_dma;
+ unsigned int num_ents;
+ } linear;
+ struct {
+ struct arm_smmu_strtab_l1 *l1tab;
+ struct arm_smmu_strtab_l2 **l2ptrs;
+ dma_addr_t l1_dma;
+ unsigned int num_l1_ents;
+ } l2;
+ };
+};
- u64 strtab_base;
- u32 strtab_base_cfg;
+struct arm_smmu_impl_ops {
+ int (*device_reset)(struct arm_smmu_device *smmu);
+ void (*device_remove)(struct arm_smmu_device *smmu);
+ int (*init_structures)(struct arm_smmu_device *smmu);
+ struct arm_smmu_cmdq *(*get_secondary_cmdq)(
+ struct arm_smmu_device *smmu, struct arm_smmu_cmdq_ent *ent);
};
/* An SMMUv3 instance */
struct arm_smmu_device {
struct device *dev;
+ struct device *impl_dev;
+ const struct arm_smmu_impl_ops *impl_ops;
+
void __iomem *base;
void __iomem *page1;
@@ -664,6 +732,7 @@ struct arm_smmu_device {
#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
#define ARM_SMMU_OPT_MSIPOLL (1 << 2)
#define ARM_SMMU_OPT_CMDQ_FORCE_SYNC (1 << 3)
+#define ARM_SMMU_OPT_TEGRA241_CMDQV (1 << 4)
u32 options;
struct arm_smmu_cmdq cmdq;
@@ -815,6 +884,15 @@ void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
unsigned long iova, size_t size);
+void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq *cmdq);
+int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
+ struct arm_smmu_queue *q, void __iomem *page,
+ unsigned long prod_off, unsigned long cons_off,
+ size_t dwords, const char *name);
+int arm_smmu_cmdq_init(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq *cmdq);
+
#ifdef CONFIG_ARM_SMMU_V3_SVA
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
@@ -860,10 +938,15 @@ static inline void arm_smmu_sva_notifier_synchronize(void) {}
#define arm_smmu_sva_domain_alloc NULL
-static inline void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
- struct device *dev,
- ioasid_t id)
+#endif /* CONFIG_ARM_SMMU_V3_SVA */
+
+#ifdef CONFIG_TEGRA241_CMDQV
+struct arm_smmu_device *tegra241_cmdqv_probe(struct arm_smmu_device *smmu);
+#else /* CONFIG_TEGRA241_CMDQV */
+static inline struct arm_smmu_device *
+tegra241_cmdqv_probe(struct arm_smmu_device *smmu)
{
+ return ERR_PTR(-ENODEV);
}
-#endif /* CONFIG_ARM_SMMU_V3_SVA */
+#endif /* CONFIG_TEGRA241_CMDQV */
#endif /* _ARM_SMMU_V3_H */
diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
new file mode 100644
index 000000000000..fcd13d301fff
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
@@ -0,0 +1,909 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021-2024 NVIDIA CORPORATION & AFFILIATES. */
+
+#define dev_fmt(fmt) "tegra241_cmdqv: " fmt
+
+#include <linux/acpi.h>
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+
+#include <acpi/acpixf.h>
+
+#include "arm-smmu-v3.h"
+
+/* CMDQV register page base and size defines */
+#define TEGRA241_CMDQV_CONFIG_BASE (0)
+#define TEGRA241_CMDQV_CONFIG_SIZE (SZ_64K)
+#define TEGRA241_VCMDQ_PAGE0_BASE (TEGRA241_CMDQV_CONFIG_BASE + SZ_64K)
+#define TEGRA241_VCMDQ_PAGE1_BASE (TEGRA241_VCMDQ_PAGE0_BASE + SZ_64K)
+#define TEGRA241_VINTF_PAGE_BASE (TEGRA241_VCMDQ_PAGE1_BASE + SZ_64K)
+
+/* CMDQV global base regs */
+#define TEGRA241_CMDQV_CONFIG 0x0000
+#define CMDQV_EN BIT(0)
+
+#define TEGRA241_CMDQV_PARAM 0x0004
+#define CMDQV_NUM_VINTF_LOG2 GENMASK(11, 8)
+#define CMDQV_NUM_VCMDQ_LOG2 GENMASK(7, 4)
+
+#define TEGRA241_CMDQV_STATUS 0x0008
+#define CMDQV_ENABLED BIT(0)
+
+#define TEGRA241_CMDQV_VINTF_ERR_MAP 0x0014
+#define TEGRA241_CMDQV_VINTF_INT_MASK 0x001C
+#define TEGRA241_CMDQV_CMDQ_ERR_MAP(m) (0x0024 + 0x4*(m))
+
+#define TEGRA241_CMDQV_CMDQ_ALLOC(q) (0x0200 + 0x4*(q))
+#define CMDQV_CMDQ_ALLOC_VINTF GENMASK(20, 15)
+#define CMDQV_CMDQ_ALLOC_LVCMDQ GENMASK(7, 1)
+#define CMDQV_CMDQ_ALLOCATED BIT(0)
+
+/* VINTF base regs */
+#define TEGRA241_VINTF(v) (0x1000 + 0x100*(v))
+
+#define TEGRA241_VINTF_CONFIG 0x0000
+#define VINTF_HYP_OWN BIT(17)
+#define VINTF_VMID GENMASK(16, 1)
+#define VINTF_EN BIT(0)
+
+#define TEGRA241_VINTF_STATUS 0x0004
+#define VINTF_STATUS GENMASK(3, 1)
+#define VINTF_ENABLED BIT(0)
+
+#define TEGRA241_VINTF_LVCMDQ_ERR_MAP_64(m) \
+ (0x00C0 + 0x8*(m))
+#define LVCMDQ_ERR_MAP_NUM_64 2
+
+/* VCMDQ base regs */
+/* -- PAGE0 -- */
+#define TEGRA241_VCMDQ_PAGE0(q) (TEGRA241_VCMDQ_PAGE0_BASE + 0x80*(q))
+
+#define TEGRA241_VCMDQ_CONS 0x00000
+#define VCMDQ_CONS_ERR GENMASK(30, 24)
+
+#define TEGRA241_VCMDQ_PROD 0x00004
+
+#define TEGRA241_VCMDQ_CONFIG 0x00008
+#define VCMDQ_EN BIT(0)
+
+#define TEGRA241_VCMDQ_STATUS 0x0000C
+#define VCMDQ_ENABLED BIT(0)
+
+#define TEGRA241_VCMDQ_GERROR 0x00010
+#define TEGRA241_VCMDQ_GERRORN 0x00014
+
+/* -- PAGE1 -- */
+#define TEGRA241_VCMDQ_PAGE1(q) (TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
+#define VCMDQ_ADDR GENMASK(47, 5)
+#define VCMDQ_LOG2SIZE GENMASK(4, 0)
+#define VCMDQ_LOG2SIZE_MAX 19
+
+#define TEGRA241_VCMDQ_BASE 0x00000
+#define TEGRA241_VCMDQ_CONS_INDX_BASE 0x00008
+
+/* VINTF logical-VCMDQ pages */
+#define TEGRA241_VINTFi_PAGE0(i) (TEGRA241_VINTF_PAGE_BASE + SZ_128K*(i))
+#define TEGRA241_VINTFi_PAGE1(i) (TEGRA241_VINTFi_PAGE0(i) + SZ_64K)
+#define TEGRA241_VINTFi_LVCMDQ_PAGE0(i, q) \
+ (TEGRA241_VINTFi_PAGE0(i) + 0x80*(q))
+#define TEGRA241_VINTFi_LVCMDQ_PAGE1(i, q) \
+ (TEGRA241_VINTFi_PAGE1(i) + 0x80*(q))
+
+/* MMIO helpers */
+#define REG_CMDQV(_cmdqv, _regname) \
+ ((_cmdqv)->base + TEGRA241_CMDQV_##_regname)
+#define REG_VINTF(_vintf, _regname) \
+ ((_vintf)->base + TEGRA241_VINTF_##_regname)
+#define REG_VCMDQ_PAGE0(_vcmdq, _regname) \
+ ((_vcmdq)->page0 + TEGRA241_VCMDQ_##_regname)
+#define REG_VCMDQ_PAGE1(_vcmdq, _regname) \
+ ((_vcmdq)->page1 + TEGRA241_VCMDQ_##_regname)
+
+
+static bool disable_cmdqv;
+module_param(disable_cmdqv, bool, 0444);
+MODULE_PARM_DESC(disable_cmdqv,
+ "This allows to disable CMDQV HW and use default SMMU internal CMDQ.");
+
+static bool bypass_vcmdq;
+module_param(bypass_vcmdq, bool, 0444);
+MODULE_PARM_DESC(bypass_vcmdq,
+ "This allows to bypass VCMDQ for debugging use or perf comparison.");
+
+/**
+ * struct tegra241_vcmdq - Virtual Command Queue
+ * @idx: Global index in the CMDQV
+ * @lidx: Local index in the VINTF
+ * @enabled: Enable status
+ * @cmdqv: Parent CMDQV pointer
+ * @vintf: Parent VINTF pointer
+ * @cmdq: Command Queue struct
+ * @page0: MMIO Page0 base address
+ * @page1: MMIO Page1 base address
+ */
+struct tegra241_vcmdq {
+ u16 idx;
+ u16 lidx;
+
+ bool enabled;
+
+ struct tegra241_cmdqv *cmdqv;
+ struct tegra241_vintf *vintf;
+ struct arm_smmu_cmdq cmdq;
+
+ void __iomem *page0;
+ void __iomem *page1;
+};
+
+/**
+ * struct tegra241_vintf - Virtual Interface
+ * @idx: Global index in the CMDQV
+ * @enabled: Enable status
+ * @hyp_own: Owned by hypervisor (in-kernel)
+ * @cmdqv: Parent CMDQV pointer
+ * @lvcmdqs: List of logical VCMDQ pointers
+ * @base: MMIO base address
+ */
+struct tegra241_vintf {
+ u16 idx;
+
+ bool enabled;
+ bool hyp_own;
+
+ struct tegra241_cmdqv *cmdqv;
+ struct tegra241_vcmdq **lvcmdqs;
+
+ void __iomem *base;
+};
+
+/**
+ * struct tegra241_cmdqv - CMDQ-V for SMMUv3
+ * @smmu: SMMUv3 device
+ * @dev: CMDQV device
+ * @base: MMIO base address
+ * @irq: IRQ number
+ * @num_vintfs: Total number of VINTFs
+ * @num_vcmdqs: Total number of VCMDQs
+ * @num_lvcmdqs_per_vintf: Number of logical VCMDQs per VINTF
+ * @vintf_ids: VINTF id allocator
+ * @vintfs: List of VINTFs
+ */
+struct tegra241_cmdqv {
+ struct arm_smmu_device smmu;
+ struct device *dev;
+
+ void __iomem *base;
+ int irq;
+
+ /* CMDQV Hardware Params */
+ u16 num_vintfs;
+ u16 num_vcmdqs;
+ u16 num_lvcmdqs_per_vintf;
+
+ struct ida vintf_ids;
+
+ struct tegra241_vintf **vintfs;
+};
+
+/* Config and Polling Helpers */
+
+static inline int tegra241_cmdqv_write_config(struct tegra241_cmdqv *cmdqv,
+ void __iomem *addr_config,
+ void __iomem *addr_status,
+ u32 regval, const char *header,
+ bool *out_enabled)
+{
+ bool en = regval & BIT(0);
+ int ret;
+
+ writel(regval, addr_config);
+ ret = readl_poll_timeout(addr_status, regval,
+ en ? regval & BIT(0) : !(regval & BIT(0)),
+ 1, ARM_SMMU_POLL_TIMEOUT_US);
+ if (ret)
+ dev_err(cmdqv->dev, "%sfailed to %sable, STATUS=0x%08X\n",
+ header, en ? "en" : "dis", regval);
+ if (out_enabled)
+ WRITE_ONCE(*out_enabled, regval & BIT(0));
+ return ret;
+}
+
+static inline int cmdqv_write_config(struct tegra241_cmdqv *cmdqv, u32 regval)
+{
+ return tegra241_cmdqv_write_config(cmdqv,
+ REG_CMDQV(cmdqv, CONFIG),
+ REG_CMDQV(cmdqv, STATUS),
+ regval, "CMDQV: ", NULL);
+}
+
+static inline int vintf_write_config(struct tegra241_vintf *vintf, u32 regval)
+{
+ char header[16];
+
+ snprintf(header, 16, "VINTF%u: ", vintf->idx);
+ return tegra241_cmdqv_write_config(vintf->cmdqv,
+ REG_VINTF(vintf, CONFIG),
+ REG_VINTF(vintf, STATUS),
+ regval, header, &vintf->enabled);
+}
+
+static inline char *lvcmdq_error_header(struct tegra241_vcmdq *vcmdq,
+ char *header, int hlen)
+{
+ WARN_ON(hlen < 64);
+ if (WARN_ON(!vcmdq->vintf))
+ return "";
+ snprintf(header, hlen, "VINTF%u: VCMDQ%u/LVCMDQ%u: ",
+ vcmdq->vintf->idx, vcmdq->idx, vcmdq->lidx);
+ return header;
+}
+
+static inline int vcmdq_write_config(struct tegra241_vcmdq *vcmdq, u32 regval)
+{
+ char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
+
+ return tegra241_cmdqv_write_config(vcmdq->cmdqv,
+ REG_VCMDQ_PAGE0(vcmdq, CONFIG),
+ REG_VCMDQ_PAGE0(vcmdq, STATUS),
+ regval, h, &vcmdq->enabled);
+}
+
+/* ISR Functions */
+
+static void tegra241_vintf0_handle_error(struct tegra241_vintf *vintf)
+{
+ int i;
+
+ for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) {
+ u64 map = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
+
+ while (map) {
+ unsigned long lidx = __ffs64(map);
+ struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
+ u32 gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
+
+ __arm_smmu_cmdq_skip_err(&vintf->cmdqv->smmu, &vcmdq->cmdq);
+ writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
+ map &= ~BIT_ULL(lidx);
+ }
+ }
+}
+
+static irqreturn_t tegra241_cmdqv_isr(int irq, void *devid)
+{
+ struct tegra241_cmdqv *cmdqv = (struct tegra241_cmdqv *)devid;
+ void __iomem *reg_vintf_map = REG_CMDQV(cmdqv, VINTF_ERR_MAP);
+ char err_str[256];
+ u64 vintf_map;
+
+ /* Use readl_relaxed() as register addresses are not 64-bit aligned */
+ vintf_map = (u64)readl_relaxed(reg_vintf_map + 0x4) << 32 |
+ (u64)readl_relaxed(reg_vintf_map);
+
+ snprintf(err_str, sizeof(err_str),
+ "vintf_map: %016llx, vcmdq_map %08x:%08x:%08x:%08x", vintf_map,
+ readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(3))),
+ readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(2))),
+ readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(1))),
+ readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(0))));
+
+ dev_warn(cmdqv->dev, "unexpected error reported. %s\n", err_str);
+
+ /* Handle VINTF0 and its LVCMDQs */
+ if (vintf_map & BIT_ULL(0)) {
+ tegra241_vintf0_handle_error(cmdqv->vintfs[0]);
+ vintf_map &= ~BIT_ULL(0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Command Queue Function */
+
+static bool tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent *ent)
+{
+ switch (ent->opcode) {
+ case CMDQ_OP_TLBI_NH_ASID:
+ case CMDQ_OP_TLBI_NH_VA:
+ case CMDQ_OP_ATC_INV:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct arm_smmu_cmdq *
+tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_ent *ent)
+{
+ struct tegra241_cmdqv *cmdqv =
+ container_of(smmu, struct tegra241_cmdqv, smmu);
+ struct tegra241_vintf *vintf = cmdqv->vintfs[0];
+ struct tegra241_vcmdq *vcmdq;
+ u16 lidx;
+
+ if (READ_ONCE(bypass_vcmdq))
+ return NULL;
+
+ /* Use SMMU CMDQ if VINTF0 is uninitialized */
+ if (!READ_ONCE(vintf->enabled))
+ return NULL;
+
+ /*
+ * Select a LVCMDQ to use. Here we use a temporal solution to
+ * balance out traffic on cmdq issuing: each cmdq has its own
+ * lock, if all cpus issue cmdlist using the same cmdq, only
+ * one CPU at a time can enter the process, while the others
+ * will be spinning at the same lock.
+ */
+ lidx = smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
+ vcmdq = vintf->lvcmdqs[lidx];
+ if (!vcmdq || !READ_ONCE(vcmdq->enabled))
+ return NULL;
+
+ /* Unsupported CMD goes for smmu->cmdq pathway */
+ if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent))
+ return NULL;
+ return &vcmdq->cmdq;
+}
+
+/* HW Reset Functions */
+
+static void tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq *vcmdq)
+{
+ char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
+ u32 gerrorn, gerror;
+
+ if (vcmdq_write_config(vcmdq, 0)) {
+ dev_err(vcmdq->cmdqv->dev,
+ "%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
+ readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
+ readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
+ readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
+ }
+ writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, PROD));
+ writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, CONS));
+ writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, BASE));
+ writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, CONS_INDX_BASE));
+
+ gerrorn = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN));
+ gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
+ if (gerror != gerrorn) {
+ dev_warn(vcmdq->cmdqv->dev,
+ "%suncleared error detected, resetting\n", h);
+ writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
+ }
+
+ dev_dbg(vcmdq->cmdqv->dev, "%sdeinited\n", h);
+}
+
+static int tegra241_vcmdq_hw_init(struct tegra241_vcmdq *vcmdq)
+{
+ char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
+ int ret;
+
+ /* Reset VCMDQ */
+ tegra241_vcmdq_hw_deinit(vcmdq);
+
+ /* Configure and enable VCMDQ */
+ writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));
+
+ ret = vcmdq_write_config(vcmdq, VCMDQ_EN);
+ if (ret) {
+ dev_err(vcmdq->cmdqv->dev,
+ "%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h,
+ readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)),
+ readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)),
+ readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS)));
+ return ret;
+ }
+
+ dev_dbg(vcmdq->cmdqv->dev, "%sinited\n", h);
+ return 0;
+}
+
+static void tegra241_vintf_hw_deinit(struct tegra241_vintf *vintf)
+{
+ u16 lidx;
+
+ for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
+ if (vintf->lvcmdqs && vintf->lvcmdqs[lidx])
+ tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
+ vintf_write_config(vintf, 0);
+}
+
+static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own)
+{
+ u32 regval;
+ u16 lidx;
+ int ret;
+
+ /* Reset VINTF */
+ tegra241_vintf_hw_deinit(vintf);
+
+ /* Configure and enable VINTF */
+ /*
+ * Note that HYP_OWN bit is wired to zero when running in guest kernel,
+ * whether enabling it here or not, as !HYP_OWN cmdq HWs only support a
+ * restricted set of supported commands.
+ */
+ regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own);
+ writel(regval, REG_VINTF(vintf, CONFIG));
+
+ ret = vintf_write_config(vintf, regval | VINTF_EN);
+ if (ret)
+ return ret;
+ /*
+ * As being mentioned above, HYP_OWN bit is wired to zero for a guest
+ * kernel, so read it back from HW to ensure that reflects in hyp_own
+ */
+ vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG)));
+
+ for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) {
+ if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
+ ret = tegra241_vcmdq_hw_init(vintf->lvcmdqs[lidx]);
+ if (ret) {
+ tegra241_vintf_hw_deinit(vintf);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu)
+{
+ struct tegra241_cmdqv *cmdqv =
+ container_of(smmu, struct tegra241_cmdqv, smmu);
+ u16 qidx, lidx, idx;
+ u32 regval;
+ int ret;
+
+ /* Reset CMDQV */
+ regval = readl_relaxed(REG_CMDQV(cmdqv, CONFIG));
+ ret = cmdqv_write_config(cmdqv, regval & ~CMDQV_EN);
+ if (ret)
+ return ret;
+ ret = cmdqv_write_config(cmdqv, regval | CMDQV_EN);
+ if (ret)
+ return ret;
+
+ /* Assign preallocated global VCMDQs to each VINTF as LVCMDQs */
+ for (idx = 0, qidx = 0; idx < cmdqv->num_vintfs; idx++) {
+ for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
+ regval = FIELD_PREP(CMDQV_CMDQ_ALLOC_VINTF, idx);
+ regval |= FIELD_PREP(CMDQV_CMDQ_ALLOC_LVCMDQ, lidx);
+ regval |= CMDQV_CMDQ_ALLOCATED;
+ writel_relaxed(regval,
+ REG_CMDQV(cmdqv, CMDQ_ALLOC(qidx++)));
+ }
+ }
+
+ return tegra241_vintf_hw_init(cmdqv->vintfs[0], true);
+}
+
+/* VCMDQ Resource Helpers */
+
+static void tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
+{
+ struct arm_smmu_queue *q = &vcmdq->cmdq.q;
+ size_t nents = 1 << q->llq.max_n_shift;
+ size_t qsz = nents << CMDQ_ENT_SZ_SHIFT;
+
+ if (!q->base)
+ return;
+ dmam_free_coherent(vcmdq->cmdqv->smmu.dev, qsz, q->base, q->base_dma);
+}
+
+static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
+{
+ struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
+ struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq;
+ struct arm_smmu_queue *q = &cmdq->q;
+ char name[16];
+ int ret;
+
+ snprintf(name, 16, "vcmdq%u", vcmdq->idx);
+
+ q->llq.max_n_shift = VCMDQ_LOG2SIZE_MAX;
+
+ /* Use the common helper to init the VCMDQ, and then... */
+ ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
+ TEGRA241_VCMDQ_PROD, TEGRA241_VCMDQ_CONS,
+ CMDQ_ENT_DWORDS, name);
+ if (ret)
+ return ret;
+
+ /* ...override q_base to write VCMDQ_BASE registers */
+ q->q_base = q->base_dma & VCMDQ_ADDR;
+ q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift);
+
+ if (!vcmdq->vintf->hyp_own)
+ cmdq->supports_cmd = tegra241_guest_vcmdq_supports_cmd;
+
+ return arm_smmu_cmdq_init(smmu, cmdq);
+}
+
+/* VINTF Logical VCMDQ Resource Helpers */
+
+static void tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
+{
+ vintf->lvcmdqs[lidx] = NULL;
+}
+
+static int tegra241_vintf_init_lvcmdq(struct tegra241_vintf *vintf, u16 lidx,
+ struct tegra241_vcmdq *vcmdq)
+{
+ struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
+ u16 idx = vintf->idx;
+
+ vcmdq->idx = idx * cmdqv->num_lvcmdqs_per_vintf + lidx;
+ vcmdq->lidx = lidx;
+ vcmdq->cmdqv = cmdqv;
+ vcmdq->vintf = vintf;
+ vcmdq->page0 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE0(idx, lidx);
+ vcmdq->page1 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE1(idx, lidx);
+
+ vintf->lvcmdqs[lidx] = vcmdq;
+ return 0;
+}
+
+static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
+{
+ struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx];
+ char header[64];
+
+ tegra241_vcmdq_free_smmu_cmdq(vcmdq);
+ tegra241_vintf_deinit_lvcmdq(vintf, lidx);
+
+ dev_dbg(vintf->cmdqv->dev,
+ "%sdeallocated\n", lvcmdq_error_header(vcmdq, header, 64));
+ kfree(vcmdq);
+}
+
+static struct tegra241_vcmdq *
+tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
+{
+ struct tegra241_cmdqv *cmdqv = vintf->cmdqv;
+ struct tegra241_vcmdq *vcmdq;
+ char header[64];
+ int ret;
+
+ vcmdq = kzalloc(sizeof(*vcmdq), GFP_KERNEL);
+ if (!vcmdq)
+ return ERR_PTR(-ENOMEM);
+
+ ret = tegra241_vintf_init_lvcmdq(vintf, lidx, vcmdq);
+ if (ret)
+ goto free_vcmdq;
+
+ /* Build an arm_smmu_cmdq for each LVCMDQ */
+ ret = tegra241_vcmdq_alloc_smmu_cmdq(vcmdq);
+ if (ret)
+ goto deinit_lvcmdq;
+
+ dev_dbg(cmdqv->dev,
+ "%sallocated\n", lvcmdq_error_header(vcmdq, header, 64));
+ return vcmdq;
+
+deinit_lvcmdq:
+ tegra241_vintf_deinit_lvcmdq(vintf, lidx);
+free_vcmdq:
+ kfree(vcmdq);
+ return ERR_PTR(ret);
+}
+
+/* VINTF Resource Helpers */
+
+static void tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
+{
+ kfree(cmdqv->vintfs[idx]->lvcmdqs);
+ ida_free(&cmdqv->vintf_ids, idx);
+ cmdqv->vintfs[idx] = NULL;
+}
+
+static int tegra241_cmdqv_init_vintf(struct tegra241_cmdqv *cmdqv, u16 max_idx,
+ struct tegra241_vintf *vintf)
+{
+
+ u16 idx;
+ int ret;
+
+ ret = ida_alloc_max(&cmdqv->vintf_ids, max_idx, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ idx = ret;
+
+ vintf->idx = idx;
+ vintf->cmdqv = cmdqv;
+ vintf->base = cmdqv->base + TEGRA241_VINTF(idx);
+
+ vintf->lvcmdqs = kcalloc(cmdqv->num_lvcmdqs_per_vintf,
+ sizeof(*vintf->lvcmdqs), GFP_KERNEL);
+ if (!vintf->lvcmdqs) {
+ ida_free(&cmdqv->vintf_ids, idx);
+ return -ENOMEM;
+ }
+
+ cmdqv->vintfs[idx] = vintf;
+ return ret;
+}
+
+/* Remove Helpers */
+
+static void tegra241_vintf_remove_lvcmdq(struct tegra241_vintf *vintf, u16 lidx)
+{
+ tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]);
+ tegra241_vintf_free_lvcmdq(vintf, lidx);
+}
+
+static void tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv *cmdqv, u16 idx)
+{
+ struct tegra241_vintf *vintf = cmdqv->vintfs[idx];
+ u16 lidx;
+
+ /* Remove LVCMDQ resources */
+ for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++)
+ if (vintf->lvcmdqs[lidx])
+ tegra241_vintf_remove_lvcmdq(vintf, lidx);
+
+ /* Remove VINTF resources */
+ tegra241_vintf_hw_deinit(vintf);
+
+ dev_dbg(cmdqv->dev, "VINTF%u: deallocated\n", vintf->idx);
+ tegra241_cmdqv_deinit_vintf(cmdqv, idx);
+ kfree(vintf);
+}
+
+static void tegra241_cmdqv_remove(struct arm_smmu_device *smmu)
+{
+ struct tegra241_cmdqv *cmdqv =
+ container_of(smmu, struct tegra241_cmdqv, smmu);
+ u16 idx;
+
+ /* Remove VINTF resources */
+ for (idx = 0; idx < cmdqv->num_vintfs; idx++) {
+ if (cmdqv->vintfs[idx]) {
+ /* Only vintf0 should remain at this stage */
+ WARN_ON(idx > 0);
+ tegra241_cmdqv_remove_vintf(cmdqv, idx);
+ }
+ }
+
+ /* Remove cmdqv resources */
+ ida_destroy(&cmdqv->vintf_ids);
+
+ if (cmdqv->irq > 0)
+ free_irq(cmdqv->irq, cmdqv);
+ iounmap(cmdqv->base);
+ kfree(cmdqv->vintfs);
+ put_device(cmdqv->dev); /* smmu->impl_dev */
+}
+
+static struct arm_smmu_impl_ops tegra241_cmdqv_impl_ops = {
+ .get_secondary_cmdq = tegra241_cmdqv_get_cmdq,
+ .device_reset = tegra241_cmdqv_hw_reset,
+ .device_remove = tegra241_cmdqv_remove,
+};
+
+/* Probe Functions */
+
+static int tegra241_cmdqv_acpi_is_memory(struct acpi_resource *res, void *data)
+{
+ struct resource_win win;
+
+ return !acpi_dev_resource_address_space(res, &win);
+}
+
+static int tegra241_cmdqv_acpi_get_irqs(struct acpi_resource *ares, void *data)
+{
+ struct resource r;
+ int *irq = data;
+
+ if (*irq <= 0 && acpi_dev_resource_interrupt(ares, 0, &r))
+ *irq = r.start;
+ return 1; /* No need to add resource to the list */
+}
+
+static struct resource *
+tegra241_cmdqv_find_acpi_resource(struct device *dev, int *irq)
+{
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct list_head resource_list;
+ struct resource_entry *rentry;
+ struct resource *res = NULL;
+ int ret;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ tegra241_cmdqv_acpi_is_memory, NULL);
+ if (ret < 0) {
+ dev_err(dev, "failed to get memory resource: %d\n", ret);
+ return NULL;
+ }
+
+ rentry = list_first_entry_or_null(&resource_list,
+ struct resource_entry, node);
+ if (!rentry) {
+ dev_err(dev, "failed to get memory resource entry\n");
+ goto free_list;
+ }
+
+ /* Caller must free the res */
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ goto free_list;
+
+ *res = *rentry->res;
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ INIT_LIST_HEAD(&resource_list);
+
+ if (irq)
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ tegra241_cmdqv_acpi_get_irqs, irq);
+ if (ret < 0 || !irq || *irq <= 0)
+ dev_warn(dev, "no interrupt. errors will not be reported\n");
+
+free_list:
+ acpi_dev_free_resource_list(&resource_list);
+ return res;
+}
+
+static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
+{
+ struct tegra241_cmdqv *cmdqv =
+ container_of(smmu, struct tegra241_cmdqv, smmu);
+ struct tegra241_vintf *vintf;
+ int lidx;
+ int ret;
+
+ vintf = kzalloc(sizeof(*vintf), GFP_KERNEL);
+ if (!vintf)
+ goto out_fallback;
+
+ /* Init VINTF0 for in-kernel use */
+ ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf);
+ if (ret) {
+ dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret);
+ goto free_vintf;
+ }
+
+ /* Preallocate logical VCMDQs to VINTF0 */
+ for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) {
+ struct tegra241_vcmdq *vcmdq;
+
+ vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx);
+ if (IS_ERR(vcmdq))
+ goto free_lvcmdq;
+ }
+
+ /* Now, we are ready to run all the impl ops */
+ smmu->impl_ops = &tegra241_cmdqv_impl_ops;
+ return 0;
+
+free_lvcmdq:
+ for (lidx--; lidx >= 0; lidx--)
+ tegra241_vintf_free_lvcmdq(vintf, lidx);
+ tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx);
+free_vintf:
+ kfree(vintf);
+out_fallback:
+ dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
+ smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
+ tegra241_cmdqv_remove(smmu);
+ return 0;
+}
+
+struct dentry *cmdqv_debugfs_dir;
+
+static struct arm_smmu_device *
+__tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res,
+ int irq)
+{
+ static const struct arm_smmu_impl_ops init_ops = {
+ .init_structures = tegra241_cmdqv_init_structures,
+ .device_remove = tegra241_cmdqv_remove,
+ };
+ struct tegra241_cmdqv *cmdqv = NULL;
+ struct arm_smmu_device *new_smmu;
+ void __iomem *base;
+ u32 regval;
+ int ret;
+
+ static_assert(offsetof(struct tegra241_cmdqv, smmu) == 0);
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(smmu->dev, "failed to ioremap\n");
+ return NULL;
+ }
+
+ regval = readl(base + TEGRA241_CMDQV_CONFIG);
+ if (disable_cmdqv) {
+ dev_info(smmu->dev, "Detected disable_cmdqv=true\n");
+ writel(regval & ~CMDQV_EN, base + TEGRA241_CMDQV_CONFIG);
+ goto iounmap;
+ }
+
+ cmdqv = devm_krealloc(smmu->dev, smmu, sizeof(*cmdqv), GFP_KERNEL);
+ if (!cmdqv)
+ goto iounmap;
+ new_smmu = &cmdqv->smmu;
+
+ cmdqv->irq = irq;
+ cmdqv->base = base;
+ cmdqv->dev = smmu->impl_dev;
+
+ if (cmdqv->irq > 0) {
+ ret = request_irq(irq, tegra241_cmdqv_isr, 0, "tegra241-cmdqv",
+ cmdqv);
+ if (ret) {
+ dev_err(cmdqv->dev, "failed to request irq (%d): %d\n",
+ cmdqv->irq, ret);
+ goto iounmap;
+ }
+ }
+
+ regval = readl_relaxed(REG_CMDQV(cmdqv, PARAM));
+ cmdqv->num_vintfs = 1 << FIELD_GET(CMDQV_NUM_VINTF_LOG2, regval);
+ cmdqv->num_vcmdqs = 1 << FIELD_GET(CMDQV_NUM_VCMDQ_LOG2, regval);
+ cmdqv->num_lvcmdqs_per_vintf = cmdqv->num_vcmdqs / cmdqv->num_vintfs;
+
+ cmdqv->vintfs =
+ kcalloc(cmdqv->num_vintfs, sizeof(*cmdqv->vintfs), GFP_KERNEL);
+ if (!cmdqv->vintfs)
+ goto free_irq;
+
+ ida_init(&cmdqv->vintf_ids);
+
+#ifdef CONFIG_IOMMU_DEBUGFS
+ if (!cmdqv_debugfs_dir) {
+ cmdqv_debugfs_dir =
+ debugfs_create_dir("tegra241_cmdqv", iommu_debugfs_dir);
+ debugfs_create_bool("bypass_vcmdq", 0644, cmdqv_debugfs_dir,
+ &bypass_vcmdq);
+ }
+#endif
+
+ /* Provide init-level ops only, until tegra241_cmdqv_init_structures */
+ new_smmu->impl_ops = &init_ops;
+
+ return new_smmu;
+
+free_irq:
+ if (cmdqv->irq > 0)
+ free_irq(cmdqv->irq, cmdqv);
+iounmap:
+ iounmap(base);
+ return NULL;
+}
+
+struct arm_smmu_device *tegra241_cmdqv_probe(struct arm_smmu_device *smmu)
+{
+ struct arm_smmu_device *new_smmu;
+ struct resource *res = NULL;
+ int irq;
+
+ if (!smmu->dev->of_node)
+ res = tegra241_cmdqv_find_acpi_resource(smmu->impl_dev, &irq);
+ if (!res)
+ goto out_fallback;
+
+ new_smmu = __tegra241_cmdqv_probe(smmu, res, irq);
+ kfree(res);
+
+ if (new_smmu)
+ return new_smmu;
+
+out_fallback:
+ dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
+ smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
+ put_device(smmu->impl_dev);
+ return ERR_PTR(-ENODEV);
+}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 36c6b36ad4ff..6372f3e25c4b 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -283,6 +283,20 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
int i;
/*
+ * MSM8998 LPASS SMMU reports 13 context banks, but accessing
+ * the last context bank crashes the system.
+ */
+ if (of_device_is_compatible(smmu->dev->of_node, "qcom,msm8998-smmu-v2") &&
+ smmu->num_context_banks == 13) {
+ smmu->num_context_banks = 12;
+ } else if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2")) {
+ if (smmu->num_context_banks == 21) /* SDM630 / SDM660 A2NOC SMMU */
+ smmu->num_context_banks = 7;
+ else if (smmu->num_context_banks == 14) /* SDM630 / SDM660 LPASS SMMU */
+ smmu->num_context_banks = 13;
+ }
+
+ /*
* Some platforms support more than the Arm SMMU architected maximum of
* 128 stream matching groups. For unknown reasons, the additional
* groups don't exhibit the same behavior as the architected registers,
@@ -338,6 +352,19 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
return 0;
}
+static int qcom_adreno_smmuv2_cfg_probe(struct arm_smmu_device *smmu)
+{
+ /* Support for 16K pages is advertised on some SoCs, but it doesn't seem to work */
+ smmu->features &= ~ARM_SMMU_FEAT_FMT_AARCH64_16K;
+
+ /* TZ protects several last context banks, hide them from Linux */
+ if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2") &&
+ smmu->num_context_banks == 5)
+ smmu->num_context_banks = 2;
+
+ return 0;
+}
+
static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
{
struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
@@ -436,6 +463,7 @@ static const struct arm_smmu_impl sdm845_smmu_500_impl = {
static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
.init_context = qcom_adreno_smmu_init_context,
+ .cfg_probe = qcom_adreno_smmuv2_cfg_probe,
.def_domain_type = qcom_smmu_def_domain_type,
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 723273440c21..8321962b3714 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -417,7 +417,7 @@ void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx,
void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx,
const struct arm_smmu_context_fault_info *cfi)
{
- dev_dbg(smmu->dev,
+ dev_err(smmu->dev,
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
cfi->fsr, cfi->iova, cfi->fsynr, cfi->cbfrsynra, idx);
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 7b1dfa0665df..2a9fa0c8cc00 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -17,6 +17,7 @@
#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
+#include <linux/iommu-dma.h>
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/list_sort.h>
@@ -1037,9 +1038,23 @@ out_unmap:
return NULL;
}
-static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
- size_t size, enum dma_data_direction dir, gfp_t gfp,
- unsigned long attrs)
+/*
+ * This is the actual return value from the iommu_dma_alloc_noncontiguous.
+ *
+ * The users of the DMA API should only care about the sg_table, but to make
+ * the DMA-API internal vmaping and freeing easier we stash away the page
+ * array as well (except for the fallback case). This can go away any time,
+ * e.g. when a vmap-variant that takes a scatterlist comes along.
+ */
+struct dma_sgt_handle {
+ struct sg_table sgt;
+ struct page **pages;
+};
+#define sgt_handle(sgt) \
+ container_of((sgt), struct dma_sgt_handle, sgt)
+
+struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
{
struct dma_sgt_handle *sh;
@@ -1055,7 +1070,7 @@ static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
return &sh->sgt;
}
-static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
+void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir)
{
struct dma_sgt_handle *sh = sgt_handle(sgt);
@@ -1066,8 +1081,26 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
kfree(sh);
}
-static void iommu_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
+void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt)
+{
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
+}
+
+int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct sg_table *sgt)
+{
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
+ return -ENXIO;
+ return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
+}
+
+void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
{
phys_addr_t phys;
@@ -1081,8 +1114,8 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
swiotlb_sync_single_for_cpu(dev, phys, size, dir);
}
-static void iommu_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
+void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
{
phys_addr_t phys;
@@ -1096,9 +1129,8 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
arch_sync_dma_for_device(phys, size, dir);
}
-static void iommu_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
+void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
@@ -1112,9 +1144,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
}
-static void iommu_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
+void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
@@ -1129,9 +1160,9 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
-static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
@@ -1189,7 +1220,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
return iova;
}
-static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
@@ -1342,8 +1373,8 @@ out_unmap:
* impedance-matching, to be able to hand off a suitably-aligned list,
* but still preserve the original offsets and sizes for the caller.
*/
-static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
+int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -1462,8 +1493,8 @@ out:
return ret;
}
-static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
+void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs)
{
dma_addr_t end = 0, start;
struct scatterlist *tmp;
@@ -1512,7 +1543,7 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
__iommu_dma_unmap(dev, start, end - start);
}
-static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
@@ -1520,7 +1551,7 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
dma_get_mask(dev));
}
-static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
__iommu_dma_unmap(dev, handle, size);
@@ -1557,7 +1588,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
dma_free_contiguous(dev, page, alloc_size);
}
-static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
+void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs)
{
__iommu_dma_unmap(dev, handle, size);
@@ -1601,8 +1632,8 @@ out_free_pages:
return NULL;
}
-static void *iommu_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
+void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, unsigned long attrs)
{
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
@@ -1635,7 +1666,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
return cpu_addr;
}
-static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
@@ -1666,7 +1697,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot);
}
-static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
@@ -1693,19 +1724,19 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
return ret;
}
-static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
+unsigned long iommu_dma_get_merge_boundary(struct device *dev)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
}
-static size_t iommu_dma_opt_mapping_size(void)
+size_t iommu_dma_opt_mapping_size(void)
{
return iova_rcache_range();
}
-static size_t iommu_dma_max_mapping_size(struct device *dev)
+size_t iommu_dma_max_mapping_size(struct device *dev)
{
if (dev_is_untrusted(dev))
return swiotlb_max_mapping_size(dev);
@@ -1713,32 +1744,6 @@ static size_t iommu_dma_max_mapping_size(struct device *dev)
return SIZE_MAX;
}
-static const struct dma_map_ops iommu_dma_ops = {
- .flags = DMA_F_PCI_P2PDMA_SUPPORTED |
- DMA_F_CAN_SKIP_SYNC,
- .alloc = iommu_dma_alloc,
- .free = iommu_dma_free,
- .alloc_pages_op = dma_common_alloc_pages,
- .free_pages = dma_common_free_pages,
- .alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
- .free_noncontiguous = iommu_dma_free_noncontiguous,
- .mmap = iommu_dma_mmap,
- .get_sgtable = iommu_dma_get_sgtable,
- .map_page = iommu_dma_map_page,
- .unmap_page = iommu_dma_unmap_page,
- .map_sg = iommu_dma_map_sg,
- .unmap_sg = iommu_dma_unmap_sg,
- .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
- .sync_single_for_device = iommu_dma_sync_single_for_device,
- .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
- .sync_sg_for_device = iommu_dma_sync_sg_for_device,
- .map_resource = iommu_dma_map_resource,
- .unmap_resource = iommu_dma_unmap_resource,
- .get_merge_boundary = iommu_dma_get_merge_boundary,
- .opt_mapping_size = iommu_dma_opt_mapping_size,
- .max_mapping_size = iommu_dma_max_mapping_size,
-};
-
void iommu_setup_dma_ops(struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
@@ -1746,19 +1751,15 @@ void iommu_setup_dma_ops(struct device *dev)
if (dev_is_pci(dev))
dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
- if (iommu_is_dma_domain(domain)) {
- if (iommu_dma_init_domain(domain, dev))
- goto out_err;
- dev->dma_ops = &iommu_dma_ops;
- } else if (dev->dma_ops == &iommu_dma_ops) {
- /* Clean up if we've switched *from* a DMA domain */
- dev->dma_ops = NULL;
- }
+ dev->dma_iommu = iommu_is_dma_domain(domain);
+ if (dev->dma_iommu && iommu_dma_init_domain(domain, dev))
+ goto out_err;
return;
out_err:
- pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
- dev_name(dev));
+ pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+ dev_name(dev));
+ dev->dma_iommu = false;
}
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index e9d2bff4659b..30be786bff11 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -416,14 +416,12 @@ static struct iommu_group *fsl_pamu_device_group(struct device *dev)
static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
{
- int len;
-
/*
* uboot must fill the fsl,liodn for platform devices to be supported by
* the iommu.
*/
if (!dev_is_pci(dev) &&
- !of_get_property(dev->of_node, "fsl,liodn", &len))
+ !of_property_present(dev->of_node, "fsl,liodn"))
return ERR_PTR(-ENODEV);
return &pamu_iommu;
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index f52fb39c968e..88fd32a9323c 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -12,7 +12,6 @@ config DMAR_DEBUG
config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && X86
- select DMA_OPS
select IOMMU_API
select IOMMU_IOVA
select IOMMUFD_DRIVER if IOMMUFD
diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
index 44e92638c0cd..e5b89f728ad3 100644
--- a/drivers/iommu/intel/cache.c
+++ b/drivers/iommu/intel/cache.c
@@ -190,6 +190,13 @@ int cache_tag_assign_domain(struct dmar_domain *domain,
u16 did = domain_get_id_for_dev(domain, dev);
int ret;
+ /* domain->qi_bach will be freed in iommu_free_domain() path. */
+ if (!domain->qi_batch) {
+ domain->qi_batch = kzalloc(sizeof(*domain->qi_batch), GFP_KERNEL);
+ if (!domain->qi_batch)
+ return -ENOMEM;
+ }
+
ret = __cache_tag_assign_domain(domain, did, dev, pasid);
if (ret || domain->domain.type != IOMMU_DOMAIN_NESTED)
return ret;
@@ -255,6 +262,154 @@ static unsigned long calculate_psi_aligned_address(unsigned long start,
return ALIGN_DOWN(start, VTD_PAGE_SIZE << mask);
}
+static void qi_batch_flush_descs(struct intel_iommu *iommu, struct qi_batch *batch)
+{
+ if (!iommu || !batch->index)
+ return;
+
+ qi_submit_sync(iommu, batch->descs, batch->index, 0);
+
+ /* Reset the index value and clean the whole batch buffer. */
+ memset(batch, 0, sizeof(*batch));
+}
+
+static void qi_batch_increment_index(struct intel_iommu *iommu, struct qi_batch *batch)
+{
+ if (++batch->index == QI_MAX_BATCHED_DESC_COUNT)
+ qi_batch_flush_descs(iommu, batch);
+}
+
+static void qi_batch_add_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type,
+ struct qi_batch *batch)
+{
+ qi_desc_iotlb(iommu, did, addr, size_order, type, &batch->descs[batch->index]);
+ qi_batch_increment_index(iommu, batch);
+}
+
+static void qi_batch_add_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u16 qdep, u64 addr, unsigned int mask,
+ struct qi_batch *batch)
+{
+ /*
+ * According to VT-d spec, software is recommended to not submit any Device-TLB
+ * invalidation requests while address remapping hardware is disabled.
+ */
+ if (!(iommu->gcmd & DMA_GCMD_TE))
+ return;
+
+ qi_desc_dev_iotlb(sid, pfsid, qdep, addr, mask, &batch->descs[batch->index]);
+ qi_batch_increment_index(iommu, batch);
+}
+
+static void qi_batch_add_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid,
+ u64 addr, unsigned long npages, bool ih,
+ struct qi_batch *batch)
+{
+ /*
+ * npages == -1 means a PASID-selective invalidation, otherwise,
+ * a positive value for Page-selective-within-PASID invalidation.
+ * 0 is not a valid input.
+ */
+ if (!npages)
+ return;
+
+ qi_desc_piotlb(did, pasid, addr, npages, ih, &batch->descs[batch->index]);
+ qi_batch_increment_index(iommu, batch);
+}
+
+static void qi_batch_add_pasid_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u32 pasid, u16 qdep, u64 addr,
+ unsigned int size_order, struct qi_batch *batch)
+{
+ /*
+ * According to VT-d spec, software is recommended to not submit any
+ * Device-TLB invalidation requests while address remapping hardware
+ * is disabled.
+ */
+ if (!(iommu->gcmd & DMA_GCMD_TE))
+ return;
+
+ qi_desc_dev_iotlb_pasid(sid, pfsid, pasid, qdep, addr, size_order,
+ &batch->descs[batch->index]);
+ qi_batch_increment_index(iommu, batch);
+}
+
+static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *tag,
+ unsigned long addr, unsigned long pages,
+ unsigned long mask, int ih)
+{
+ struct intel_iommu *iommu = tag->iommu;
+ u64 type = DMA_TLB_PSI_FLUSH;
+
+ if (domain->use_first_level) {
+ qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, addr,
+ pages, ih, domain->qi_batch);
+ return;
+ }
+
+ /*
+ * Fallback to domain selective flush if no PSI support or the size
+ * is too big.
+ */
+ if (!cap_pgsel_inv(iommu->cap) ||
+ mask > cap_max_amask_val(iommu->cap) || pages == -1) {
+ addr = 0;
+ mask = 0;
+ ih = 0;
+ type = DMA_TLB_DSI_FLUSH;
+ }
+
+ if (ecap_qis(iommu->ecap))
+ qi_batch_add_iotlb(iommu, tag->domain_id, addr | ih, mask, type,
+ domain->qi_batch);
+ else
+ __iommu_flush_iotlb(iommu, tag->domain_id, addr | ih, mask, type);
+}
+
+static void cache_tag_flush_devtlb_psi(struct dmar_domain *domain, struct cache_tag *tag,
+ unsigned long addr, unsigned long mask)
+{
+ struct intel_iommu *iommu = tag->iommu;
+ struct device_domain_info *info;
+ u16 sid;
+
+ info = dev_iommu_priv_get(tag->dev);
+ sid = PCI_DEVID(info->bus, info->devfn);
+
+ if (tag->pasid == IOMMU_NO_PASID) {
+ qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep,
+ addr, mask, domain->qi_batch);
+ if (info->dtlb_extra_inval)
+ qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep,
+ addr, mask, domain->qi_batch);
+ return;
+ }
+
+ qi_batch_add_pasid_dev_iotlb(iommu, sid, info->pfsid, tag->pasid,
+ info->ats_qdep, addr, mask, domain->qi_batch);
+ if (info->dtlb_extra_inval)
+ qi_batch_add_pasid_dev_iotlb(iommu, sid, info->pfsid, tag->pasid,
+ info->ats_qdep, addr, mask,
+ domain->qi_batch);
+}
+
+static void cache_tag_flush_devtlb_all(struct dmar_domain *domain, struct cache_tag *tag)
+{
+ struct intel_iommu *iommu = tag->iommu;
+ struct device_domain_info *info;
+ u16 sid;
+
+ info = dev_iommu_priv_get(tag->dev);
+ sid = PCI_DEVID(info->bus, info->devfn);
+
+ qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
+ MAX_AGAW_PFN_WIDTH, domain->qi_batch);
+ if (info->dtlb_extra_inval)
+ qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
+ MAX_AGAW_PFN_WIDTH, domain->qi_batch);
+}
+
/*
* Invalidates a range of IOVA from @start (inclusive) to @end (inclusive)
* when the memory mappings in the target domain have been modified.
@@ -262,6 +417,7 @@ static unsigned long calculate_psi_aligned_address(unsigned long start,
void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
unsigned long end, int ih)
{
+ struct intel_iommu *iommu = NULL;
unsigned long pages, mask, addr;
struct cache_tag *tag;
unsigned long flags;
@@ -270,30 +426,14 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
- struct intel_iommu *iommu = tag->iommu;
- struct device_domain_info *info;
- u16 sid;
+ if (iommu && iommu != tag->iommu)
+ qi_batch_flush_descs(iommu, domain->qi_batch);
+ iommu = tag->iommu;
switch (tag->type) {
case CACHE_TAG_IOTLB:
case CACHE_TAG_NESTING_IOTLB:
- if (domain->use_first_level) {
- qi_flush_piotlb(iommu, tag->domain_id,
- tag->pasid, addr, pages, ih);
- } else {
- /*
- * Fallback to domain selective flush if no
- * PSI support or the size is too big.
- */
- if (!cap_pgsel_inv(iommu->cap) ||
- mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, tag->domain_id,
- 0, 0, DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, tag->domain_id,
- addr | ih, mask,
- DMA_TLB_PSI_FLUSH);
- }
+ cache_tag_flush_iotlb(domain, tag, addr, pages, mask, ih);
break;
case CACHE_TAG_NESTING_DEVTLB:
/*
@@ -307,23 +447,13 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
mask = MAX_AGAW_PFN_WIDTH;
fallthrough;
case CACHE_TAG_DEVTLB:
- info = dev_iommu_priv_get(tag->dev);
- sid = PCI_DEVID(info->bus, info->devfn);
-
- if (tag->pasid == IOMMU_NO_PASID)
- qi_flush_dev_iotlb(iommu, sid, info->pfsid,
- info->ats_qdep, addr, mask);
- else
- qi_flush_dev_iotlb_pasid(iommu, sid, info->pfsid,
- tag->pasid, info->ats_qdep,
- addr, mask);
-
- quirk_extra_dev_tlb_flush(info, addr, mask, tag->pasid, info->ats_qdep);
+ cache_tag_flush_devtlb_psi(domain, tag, addr, mask);
break;
}
trace_cache_tag_flush_range(tag, start, end, addr, pages, mask);
}
+ qi_batch_flush_descs(iommu, domain->qi_batch);
spin_unlock_irqrestore(&domain->cache_lock, flags);
}
@@ -333,39 +463,30 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
*/
void cache_tag_flush_all(struct dmar_domain *domain)
{
+ struct intel_iommu *iommu = NULL;
struct cache_tag *tag;
unsigned long flags;
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
- struct intel_iommu *iommu = tag->iommu;
- struct device_domain_info *info;
- u16 sid;
+ if (iommu && iommu != tag->iommu)
+ qi_batch_flush_descs(iommu, domain->qi_batch);
+ iommu = tag->iommu;
switch (tag->type) {
case CACHE_TAG_IOTLB:
case CACHE_TAG_NESTING_IOTLB:
- if (domain->use_first_level)
- qi_flush_piotlb(iommu, tag->domain_id,
- tag->pasid, 0, -1, 0);
- else
- iommu->flush.flush_iotlb(iommu, tag->domain_id,
- 0, 0, DMA_TLB_DSI_FLUSH);
+ cache_tag_flush_iotlb(domain, tag, 0, -1, 0, 0);
break;
case CACHE_TAG_DEVTLB:
case CACHE_TAG_NESTING_DEVTLB:
- info = dev_iommu_priv_get(tag->dev);
- sid = PCI_DEVID(info->bus, info->devfn);
-
- qi_flush_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep,
- 0, MAX_AGAW_PFN_WIDTH);
- quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH,
- IOMMU_NO_PASID, info->ats_qdep);
+ cache_tag_flush_devtlb_all(domain, tag);
break;
}
trace_cache_tag_flush_all(tag);
}
+ qi_batch_flush_descs(iommu, domain->qi_batch);
spin_unlock_irqrestore(&domain->cache_lock, flags);
}
@@ -383,6 +504,7 @@ void cache_tag_flush_all(struct dmar_domain *domain)
void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
unsigned long end)
{
+ struct intel_iommu *iommu = NULL;
unsigned long pages, mask, addr;
struct cache_tag *tag;
unsigned long flags;
@@ -391,7 +513,9 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
- struct intel_iommu *iommu = tag->iommu;
+ if (iommu && iommu != tag->iommu)
+ qi_batch_flush_descs(iommu, domain->qi_batch);
+ iommu = tag->iommu;
if (!cap_caching_mode(iommu->cap) || domain->use_first_level) {
iommu_flush_write_buffer(iommu);
@@ -399,22 +523,11 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
}
if (tag->type == CACHE_TAG_IOTLB ||
- tag->type == CACHE_TAG_NESTING_IOTLB) {
- /*
- * Fallback to domain selective flush if no
- * PSI support or the size is too big.
- */
- if (!cap_pgsel_inv(iommu->cap) ||
- mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, tag->domain_id,
- 0, 0, DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, tag->domain_id,
- addr, mask,
- DMA_TLB_PSI_FLUSH);
- }
+ tag->type == CACHE_TAG_NESTING_IOTLB)
+ cache_tag_flush_iotlb(domain, tag, addr, pages, mask, 0);
trace_cache_tag_flush_range_np(tag, start, end, addr, pages, mask);
}
+ qi_batch_flush_descs(iommu, domain->qi_batch);
spin_unlock_irqrestore(&domain->cache_lock, flags);
}
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 1c8d3141cb55..eaf862e8dea1 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1204,9 +1204,7 @@ static void free_iommu(struct intel_iommu *iommu)
*/
static inline void reclaim_free_desc(struct q_inval *qi)
{
- while (qi->desc_status[qi->free_tail] == QI_DONE ||
- qi->desc_status[qi->free_tail] == QI_ABORT) {
- qi->desc_status[qi->free_tail] = QI_FREE;
+ while (qi->desc_status[qi->free_tail] == QI_FREE && qi->free_tail != qi->free_head) {
qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
qi->free_cnt++;
}
@@ -1463,8 +1461,16 @@ restart:
raw_spin_lock(&qi->q_lock);
}
- for (i = 0; i < count; i++)
- qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE;
+ /*
+ * The reclaim code can free descriptors from multiple submissions
+ * starting from the tail of the queue. When count == 0, the
+ * status of the standalone wait descriptor at the tail of the queue
+ * must be set to QI_FREE to allow the reclaim code to proceed.
+ * It is also possible that descriptors from one of the previous
+ * submissions has to be reclaimed by a subsequent submission.
+ */
+ for (i = 0; i <= count; i++)
+ qi->desc_status[(index + i) % QI_LENGTH] = QI_FREE;
reclaim_free_desc(qi);
raw_spin_unlock_irqrestore(&qi->q_lock, flags);
@@ -1520,24 +1526,9 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type)
{
- u8 dw = 0, dr = 0;
-
struct qi_desc desc;
- int ih = 0;
-
- if (cap_write_drain(iommu->cap))
- dw = 1;
-
- if (cap_read_drain(iommu->cap))
- dr = 1;
-
- desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
- | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
- desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
- | QI_IOTLB_AM(size_order);
- desc.qw2 = 0;
- desc.qw3 = 0;
+ qi_desc_iotlb(iommu, did, addr, size_order, type, &desc);
qi_submit_sync(iommu, &desc, 1, 0);
}
@@ -1555,20 +1546,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
if (!(iommu->gcmd & DMA_GCMD_TE))
return;
- if (mask) {
- addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
- desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
- } else
- desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
-
- if (qdep >= QI_DEV_IOTLB_MAX_INVS)
- qdep = 0;
-
- desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
- QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
- desc.qw2 = 0;
- desc.qw3 = 0;
-
+ qi_desc_dev_iotlb(sid, pfsid, qdep, addr, mask, &desc);
qi_submit_sync(iommu, &desc, 1, 0);
}
@@ -1588,28 +1566,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
return;
}
- if (npages == -1) {
- desc.qw0 = QI_EIOTLB_PASID(pasid) |
- QI_EIOTLB_DID(did) |
- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
- QI_EIOTLB_TYPE;
- desc.qw1 = 0;
- } else {
- int mask = ilog2(__roundup_pow_of_two(npages));
- unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
-
- if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
- addr = ALIGN_DOWN(addr, align);
-
- desc.qw0 = QI_EIOTLB_PASID(pasid) |
- QI_EIOTLB_DID(did) |
- QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
- QI_EIOTLB_TYPE;
- desc.qw1 = QI_EIOTLB_ADDR(addr) |
- QI_EIOTLB_IH(ih) |
- QI_EIOTLB_AM(mask);
- }
-
+ qi_desc_piotlb(did, pasid, addr, npages, ih, &desc);
qi_submit_sync(iommu, &desc, 1, 0);
}
@@ -1617,7 +1574,6 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u32 pasid, u16 qdep, u64 addr, unsigned int size_order)
{
- unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
/*
@@ -1629,40 +1585,9 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
if (!(iommu->gcmd & DMA_GCMD_TE))
return;
- desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
- QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
- QI_DEV_IOTLB_PFSID(pfsid);
-
- /*
- * If S bit is 0, we only flush a single page. If S bit is set,
- * The least significant zero bit indicates the invalidation address
- * range. VT-d spec 6.5.2.6.
- * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
- * size order = 0 is PAGE_SIZE 4KB
- * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
- * ECAP.
- */
- if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
- pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
- addr, size_order);
-
- /* Take page address */
- desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
-
- if (size_order) {
- /*
- * Existing 0s in address below size_order may be the least
- * significant bit, we must set them to 1s to avoid having
- * smaller size than desired.
- */
- desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
- VTD_PAGE_SHIFT);
- /* Clear size_order bit to indicate size */
- desc.qw1 &= ~mask;
- /* Set the S bit to indicate flushing more than 1 page */
- desc.qw1 |= QI_DEV_EIOTLB_SIZE;
- }
-
+ qi_desc_dev_iotlb_pasid(sid, pfsid, pasid,
+ qdep, addr, size_order,
+ &desc);
qi_submit_sync(iommu, &desc, 1, 0);
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 4aa070cf56e7..9f6b0780f2ef 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -167,15 +167,6 @@ static void device_rbtree_remove(struct device_domain_info *info)
spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
}
-/*
- * This domain is a statically identity mapping domain.
- * 1. This domain creats a static 1:1 mapping to all usable memory.
- * 2. It maps to each iommu if successful.
- * 3. Each iommu mapps to this domain if successful.
- */
-static struct dmar_domain *si_domain;
-static int hw_pass_through = 1;
-
struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */
struct acpi_dmar_header *hdr; /* ACPI header */
@@ -293,11 +284,6 @@ static int __init intel_iommu_setup(char *str)
}
__setup("intel_iommu=", intel_iommu_setup);
-static int domain_type_is_si(struct dmar_domain *domain)
-{
- return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
-}
-
static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
@@ -492,7 +478,6 @@ void domain_update_iommu_cap(struct dmar_domain *domain)
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain);
- domain_update_iotlb(domain);
}
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -1199,9 +1184,8 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-/* return value determine if we need a write buffer flush */
-static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int size_order, u64 type)
+void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type)
{
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0;
@@ -1270,32 +1254,6 @@ domain_lookup_dev_info(struct dmar_domain *domain,
return NULL;
}
-void domain_update_iotlb(struct dmar_domain *domain)
-{
- struct dev_pasid_info *dev_pasid;
- struct device_domain_info *info;
- bool has_iotlb_device = false;
- unsigned long flags;
-
- spin_lock_irqsave(&domain->lock, flags);
- list_for_each_entry(info, &domain->devices, link) {
- if (info->ats_enabled) {
- has_iotlb_device = true;
- break;
- }
- }
-
- list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
- info = dev_iommu_priv_get(dev_pasid->dev);
- if (info->ats_enabled) {
- has_iotlb_device = true;
- break;
- }
- }
- domain->has_iotlb_device = has_iotlb_device;
- spin_unlock_irqrestore(&domain->lock, flags);
-}
-
/*
* The extra devTLB flush quirk impacts those QAT devices with PCI device
* IDs ranging from 0x4940 to 0x4943. It is exempted from risky_device()
@@ -1322,20 +1280,9 @@ static void iommu_enable_pci_caps(struct device_domain_info *info)
return;
pdev = to_pci_dev(info->dev);
-
- /* The PCIe spec, in its wisdom, declares that the behaviour of
- the device if you enable PASID support after ATS support is
- undefined. So always enable PASID support on devices which
- have it, even if we can't yet know if we're ever going to
- use it. */
- if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
- info->pasid_enabled = 1;
-
if (info->ats_supported && pci_ats_page_aligned(pdev) &&
- !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
+ !pci_enable_ats(pdev, VTD_PAGE_SHIFT))
info->ats_enabled = 1;
- domain_update_iotlb(info->domain);
- }
}
static void iommu_disable_pci_caps(struct device_domain_info *info)
@@ -1350,12 +1297,6 @@ static void iommu_disable_pci_caps(struct device_domain_info *info)
if (info->ats_enabled) {
pci_disable_ats(pdev);
info->ats_enabled = 0;
- domain_update_iotlb(info->domain);
- }
-
- if (info->pasid_enabled) {
- pci_disable_pasid(pdev);
- info->pasid_enabled = 0;
}
}
@@ -1447,10 +1388,10 @@ static int iommu_init_domains(struct intel_iommu *iommu)
* entry for first-level or pass-through translation modes should
* be programmed with a domain id different from those used for
* second-level or nested translation. We reserve a domain id for
- * this purpose.
+ * this purpose. This domain id is also used for identity domain
+ * in legacy mode.
*/
- if (sm_supported(iommu))
- set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
+ set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
return 0;
}
@@ -1524,7 +1465,6 @@ static struct dmar_domain *alloc_domain(unsigned int type)
domain->nid = NUMA_NO_NODE;
if (first_level_by_default(type))
domain->use_first_level = true;
- domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
INIT_LIST_HEAD(&domain->dev_pasids);
INIT_LIST_HEAD(&domain->cache_tags);
@@ -1632,9 +1572,65 @@ static void domain_exit(struct dmar_domain *domain)
if (WARN_ON(!list_empty(&domain->devices)))
return;
+ kfree(domain->qi_batch);
kfree(domain);
}
+/*
+ * For kdump cases, old valid entries may be cached due to the
+ * in-flight DMA and copied pgtable, but there is no unmapping
+ * behaviour for them, thus we need an explicit cache flush for
+ * the newly-mapped device. For kdump, at this point, the device
+ * is supposed to finish reset at its driver probe stage, so no
+ * in-flight DMA will exist, and we don't need to worry anymore
+ * hereafter.
+ */
+static void copied_context_tear_down(struct intel_iommu *iommu,
+ struct context_entry *context,
+ u8 bus, u8 devfn)
+{
+ u16 did_old;
+
+ if (!context_copied(iommu, bus, devfn))
+ return;
+
+ assert_spin_locked(&iommu->lock);
+
+ did_old = context_domain_id(context);
+ context_clear_entry(context);
+
+ if (did_old < cap_ndoms(iommu->cap)) {
+ iommu->flush.flush_context(iommu, did_old,
+ (((u16)bus) << 8) | devfn,
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ }
+
+ clear_context_copied(iommu, bus, devfn);
+}
+
+/*
+ * It's a non-present to present mapping. If hardware doesn't cache
+ * non-present entry we only need to flush the write-buffer. If the
+ * _does_ cache non-present entries, then it does so in the special
+ * domain #0, which we have to flush:
+ */
+static void context_present_cache_flush(struct intel_iommu *iommu, u16 did,
+ u8 bus, u8 devfn)
+{
+ if (cap_caching_mode(iommu->cap)) {
+ iommu->flush.flush_context(iommu, 0,
+ (((u16)bus) << 8) | devfn,
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
+ } else {
+ iommu_flush_write_buffer(iommu);
+ }
+}
+
static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu,
u8 bus, u8 devfn)
@@ -1647,9 +1643,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
struct context_entry *context;
int agaw, ret;
- if (hw_pass_through && domain_type_is_si(domain))
- translation = CONTEXT_TT_PASS_THROUGH;
-
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
@@ -1663,83 +1656,35 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
if (context_present(context) && !context_copied(iommu, bus, devfn))
goto out_unlock;
- /*
- * For kdump cases, old valid entries may be cached due to the
- * in-flight DMA and copied pgtable, but there is no unmapping
- * behaviour for them, thus we need an explicit cache flush for
- * the newly-mapped device. For kdump, at this point, the device
- * is supposed to finish reset at its driver probe stage, so no
- * in-flight DMA will exist, and we don't need to worry anymore
- * hereafter.
- */
- if (context_copied(iommu, bus, devfn)) {
- u16 did_old = context_domain_id(context);
-
- if (did_old < cap_ndoms(iommu->cap)) {
- iommu->flush.flush_context(iommu, did_old,
- (((u16)bus) << 8) | devfn,
- DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL);
- iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
- DMA_TLB_DSI_FLUSH);
- }
-
- clear_context_copied(iommu, bus, devfn);
- }
-
+ copied_context_tear_down(iommu, context, bus, devfn);
context_clear_entry(context);
- context_set_domain_id(context, did);
- if (translation != CONTEXT_TT_PASS_THROUGH) {
- /*
- * Skip top levels of page tables for iommu which has
- * less agaw than default. Unnecessary for PT mode.
- */
- for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
- ret = -ENOMEM;
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd))
- goto out_unlock;
- }
-
- if (info && info->ats_supported)
- translation = CONTEXT_TT_DEV_IOTLB;
- else
- translation = CONTEXT_TT_MULTI_LEVEL;
+ context_set_domain_id(context, did);
- context_set_address_root(context, virt_to_phys(pgd));
- context_set_address_width(context, agaw);
- } else {
- /*
- * In pass through mode, AW must be programmed to
- * indicate the largest AGAW value supported by
- * hardware. And ASR is ignored by hardware.
- */
- context_set_address_width(context, iommu->msagaw);
+ /*
+ * Skip top levels of page tables for iommu which has
+ * less agaw than default. Unnecessary for PT mode.
+ */
+ for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+ ret = -ENOMEM;
+ pgd = phys_to_virt(dma_pte_addr(pgd));
+ if (!dma_pte_present(pgd))
+ goto out_unlock;
}
+ if (info && info->ats_supported)
+ translation = CONTEXT_TT_DEV_IOTLB;
+ else
+ translation = CONTEXT_TT_MULTI_LEVEL;
+
+ context_set_address_root(context, virt_to_phys(pgd));
+ context_set_address_width(context, agaw);
context_set_translation_type(context, translation);
context_set_fault_enable(context);
context_set_present(context);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(context, sizeof(*context));
-
- /*
- * It's a non-present to present mapping. If hardware doesn't cache
- * non-present entry we only need to flush the write-buffer. If the
- * _does_ cache non-present entries, then it does so in the special
- * domain #0, which we have to flush:
- */
- if (cap_caching_mode(iommu->cap)) {
- iommu->flush.flush_context(iommu, 0,
- (((u16)bus) << 8) | devfn,
- DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL);
- iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
- } else {
- iommu_flush_write_buffer(iommu);
- }
-
+ context_present_cache_flush(iommu, did, bus, devfn);
ret = 0;
out_unlock:
@@ -2000,80 +1945,6 @@ static bool dev_is_real_dma_subdevice(struct device *dev)
pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
}
-static int iommu_domain_identity_map(struct dmar_domain *domain,
- unsigned long first_vpfn,
- unsigned long last_vpfn)
-{
- /*
- * RMRR range might have overlap with physical memory range,
- * clear it first
- */
- dma_pte_clear_range(domain, first_vpfn, last_vpfn);
-
- return __domain_mapping(domain, first_vpfn,
- first_vpfn, last_vpfn - first_vpfn + 1,
- DMA_PTE_READ|DMA_PTE_WRITE, GFP_KERNEL);
-}
-
-static int md_domain_init(struct dmar_domain *domain, int guest_width);
-
-static int __init si_domain_init(int hw)
-{
- struct dmar_rmrr_unit *rmrr;
- struct device *dev;
- int i, nid, ret;
-
- si_domain = alloc_domain(IOMMU_DOMAIN_IDENTITY);
- if (!si_domain)
- return -EFAULT;
-
- if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
- domain_exit(si_domain);
- si_domain = NULL;
- return -EFAULT;
- }
-
- if (hw)
- return 0;
-
- for_each_online_node(nid) {
- unsigned long start_pfn, end_pfn;
- int i;
-
- for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
- ret = iommu_domain_identity_map(si_domain,
- mm_to_dma_pfn_start(start_pfn),
- mm_to_dma_pfn_end(end_pfn-1));
- if (ret)
- return ret;
- }
- }
-
- /*
- * Identity map the RMRRs so that devices with RMRRs could also use
- * the si_domain.
- */
- for_each_rmrr_units(rmrr) {
- for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
- i, dev) {
- unsigned long long start = rmrr->base_address;
- unsigned long long end = rmrr->end_address;
-
- if (WARN_ON(end < start ||
- end >> agaw_to_width(si_domain->agaw)))
- continue;
-
- ret = iommu_domain_identity_map(si_domain,
- mm_to_dma_pfn_start(start >> PAGE_SHIFT),
- mm_to_dma_pfn_end(end >> PAGE_SHIFT));
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
static int dmar_domain_attach_device(struct dmar_domain *domain,
struct device *dev)
{
@@ -2096,8 +1967,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (!sm_supported(iommu))
ret = domain_context_mapping(domain, dev);
- else if (hw_pass_through && domain_type_is_si(domain))
- ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
else if (domain->use_first_level)
ret = domain_setup_first_level(iommu, domain, dev, IOMMU_NO_PASID);
else
@@ -2106,8 +1975,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (ret)
goto out_block_translation;
- if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
- iommu_enable_pci_caps(info);
+ iommu_enable_pci_caps(info);
ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
if (ret)
@@ -2151,6 +2019,16 @@ static bool device_rmrr_is_relaxable(struct device *dev)
static int device_def_domain_type(struct device *dev)
{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+
+ /*
+ * Hardware does not support the passthrough translation mode.
+ * Always use a dynamaic mapping domain.
+ */
+ if (!ecap_pass_through(iommu->ecap))
+ return IOMMU_DOMAIN_DMA;
+
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2441,8 +2319,6 @@ static int __init init_dmars(void)
}
}
- if (!ecap_pass_through(iommu->ecap))
- hw_pass_through = 0;
intel_svm_check(iommu);
}
@@ -2458,10 +2334,6 @@ static int __init init_dmars(void)
check_tylersburg_isoch();
- ret = si_domain_init(hw_pass_through);
- if (ret)
- goto free_iommu;
-
/*
* for each drhd
* enable fault log
@@ -2507,10 +2379,6 @@ free_iommu:
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
}
- if (si_domain) {
- domain_exit(si_domain);
- si_domain = NULL;
- }
return ret;
}
@@ -2885,12 +2753,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (ret)
goto out;
- if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
- pr_warn("%s: Doesn't support hardware pass through.\n",
- iommu->name);
- return -ENXIO;
- }
-
sp = domain_update_iommu_superpage(NULL, iommu) - 1;
if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
pr_warn("%s: Doesn't support large page.\n",
@@ -3141,43 +3003,6 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
return 0;
}
-static int intel_iommu_memory_notifier(struct notifier_block *nb,
- unsigned long val, void *v)
-{
- struct memory_notify *mhp = v;
- unsigned long start_vpfn = mm_to_dma_pfn_start(mhp->start_pfn);
- unsigned long last_vpfn = mm_to_dma_pfn_end(mhp->start_pfn +
- mhp->nr_pages - 1);
-
- switch (val) {
- case MEM_GOING_ONLINE:
- if (iommu_domain_identity_map(si_domain,
- start_vpfn, last_vpfn)) {
- pr_warn("Failed to build identity map for [%lx-%lx]\n",
- start_vpfn, last_vpfn);
- return NOTIFY_BAD;
- }
- break;
-
- case MEM_OFFLINE:
- case MEM_CANCEL_ONLINE:
- {
- LIST_HEAD(freelist);
-
- domain_unmap(si_domain, start_vpfn, last_vpfn, &freelist);
- iommu_put_pages_list(&freelist);
- }
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block intel_iommu_memory_nb = {
- .notifier_call = intel_iommu_memory_notifier,
- .priority = 0
-};
-
static void intel_disable_iommus(void)
{
struct intel_iommu *iommu = NULL;
@@ -3474,12 +3299,7 @@ int __init intel_iommu_init(void)
iommu_pmu_register(iommu);
}
- up_read(&dmar_global_lock);
-
- if (si_domain && !hw_pass_through)
- register_memory_notifier(&intel_iommu_memory_nb);
- down_read(&dmar_global_lock);
if (probe_acpi_namespace_devices())
pr_warn("ACPI name space devices didn't probe correctly\n");
@@ -3624,7 +3444,6 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
xa_init(&domain->iommu_array);
domain->nid = dev_to_node(dev);
- domain->has_iotlb_device = info->ats_enabled;
domain->use_first_level = first_stage;
/* calculate the address width */
@@ -3693,8 +3512,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
domain->geometry.force_aperture = true;
return domain;
- case IOMMU_DOMAIN_IDENTITY:
- return &si_domain->domain;
default:
return NULL;
}
@@ -3761,8 +3578,7 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
WARN_ON(dmar_domain->nested_parent &&
!list_empty(&dmar_domain->s1_domains));
- if (domain != &si_domain->domain)
- domain_exit(dmar_domain);
+ domain_exit(dmar_domain);
}
int prepare_domain_attach_device(struct iommu_domain *domain,
@@ -3812,11 +3628,9 @@ int prepare_domain_attach_device(struct iommu_domain *domain,
static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
int ret;
- if (info->domain)
- device_block_translation(dev);
+ device_block_translation(dev);
ret = prepare_domain_attach_device(domain, dev);
if (ret)
@@ -4093,6 +3907,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
dev_iommu_priv_set(dev, info);
if (pdev && pci_ats_supported(pdev)) {
+ pci_prepare_ats(pdev, VTD_PAGE_SHIFT);
ret = device_rbtree_insert(iommu, info);
if (ret)
goto free;
@@ -4114,6 +3929,16 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
intel_iommu_debugfs_create_dev(info);
+ /*
+ * The PCIe spec, in its wisdom, declares that the behaviour of the
+ * device is undefined if you enable PASID support after ATS support.
+ * So always enable PASID support on devices which have it, even if
+ * we can't yet know if we're ever going to use it.
+ */
+ if (info->pasid_supported &&
+ !pci_enable_pasid(pdev, info->pasid_supported & ~1))
+ info->pasid_enabled = 1;
+
return &iommu->iommu;
free_table:
intel_pasid_free_table(dev);
@@ -4130,6 +3955,11 @@ static void intel_iommu_release_device(struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
+ if (info->pasid_enabled) {
+ pci_disable_pasid(to_pci_dev(dev));
+ info->pasid_enabled = 0;
+ }
+
mutex_lock(&iommu->iopf_lock);
if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)))
device_rbtree_remove(info);
@@ -4424,11 +4254,17 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
struct iommu_domain *domain)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct dev_pasid_info *curr, *dev_pasid = NULL;
struct intel_iommu *iommu = info->iommu;
+ struct dmar_domain *dmar_domain;
unsigned long flags;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ intel_pasid_tear_down_entry(iommu, dev, pasid, false);
+ return;
+ }
+
+ dmar_domain = to_dmar_domain(domain);
spin_lock_irqsave(&dmar_domain->lock, flags);
list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) {
if (curr->dev == dev && curr->pasid == pasid) {
@@ -4483,9 +4319,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
if (ret)
goto out_detach_iommu;
- if (domain_type_is_si(dmar_domain))
- ret = intel_pasid_setup_pass_through(iommu, dev, pasid);
- else if (dmar_domain->use_first_level)
+ if (dmar_domain->use_first_level)
ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid);
else
@@ -4655,9 +4489,111 @@ static const struct iommu_dirty_ops intel_dirty_ops = {
.read_and_clear_dirty = intel_iommu_read_and_clear_dirty,
};
+static int context_setup_pass_through(struct device *dev, u8 bus, u8 devfn)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct context_entry *context;
+
+ spin_lock(&iommu->lock);
+ context = iommu_context_addr(iommu, bus, devfn, 1);
+ if (!context) {
+ spin_unlock(&iommu->lock);
+ return -ENOMEM;
+ }
+
+ if (context_present(context) && !context_copied(iommu, bus, devfn)) {
+ spin_unlock(&iommu->lock);
+ return 0;
+ }
+
+ copied_context_tear_down(iommu, context, bus, devfn);
+ context_clear_entry(context);
+ context_set_domain_id(context, FLPT_DEFAULT_DID);
+
+ /*
+ * In pass through mode, AW must be programmed to indicate the largest
+ * AGAW value supported by hardware. And ASR is ignored by hardware.
+ */
+ context_set_address_width(context, iommu->msagaw);
+ context_set_translation_type(context, CONTEXT_TT_PASS_THROUGH);
+ context_set_fault_enable(context);
+ context_set_present(context);
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(context, sizeof(*context));
+ context_present_cache_flush(iommu, FLPT_DEFAULT_DID, bus, devfn);
+ spin_unlock(&iommu->lock);
+
+ return 0;
+}
+
+static int context_setup_pass_through_cb(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct device *dev = data;
+
+ if (dev != &pdev->dev)
+ return 0;
+
+ return context_setup_pass_through(dev, PCI_BUS_NUM(alias), alias & 0xff);
+}
+
+static int device_setup_pass_through(struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ if (!dev_is_pci(dev))
+ return context_setup_pass_through(dev, info->bus, info->devfn);
+
+ return pci_for_each_dma_alias(to_pci_dev(dev),
+ context_setup_pass_through_cb, dev);
+}
+
+static int identity_domain_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ int ret;
+
+ device_block_translation(dev);
+
+ if (dev_is_real_dma_subdevice(dev))
+ return 0;
+
+ if (sm_supported(iommu)) {
+ ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
+ if (!ret)
+ iommu_enable_pci_caps(info);
+ } else {
+ ret = device_setup_pass_through(dev);
+ }
+
+ return ret;
+}
+
+static int identity_domain_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+
+ if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
+ return -EOPNOTSUPP;
+
+ return intel_pasid_setup_pass_through(iommu, dev, pasid);
+}
+
+static struct iommu_domain identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &(const struct iommu_domain_ops) {
+ .attach_dev = identity_domain_attach_dev,
+ .set_dev_pasid = identity_domain_set_dev_pasid,
+ },
+};
+
const struct iommu_ops intel_iommu_ops = {
.blocked_domain = &blocking_domain,
.release_domain = &blocking_domain,
+ .identity_domain = &identity_domain,
.capable = intel_iommu_capable,
.hw_info = intel_iommu_hw_info,
.domain_alloc = intel_iommu_domain_alloc,
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index a969be2258b1..1497f3112b12 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -584,11 +584,23 @@ struct iommu_domain_info {
* to VT-d spec, section 9.3 */
};
+/*
+ * We start simply by using a fixed size for the batched descriptors. This
+ * size is currently sufficient for our needs. Future improvements could
+ * involve dynamically allocating the batch buffer based on actual demand,
+ * allowing us to adjust the batch size for optimal performance in different
+ * scenarios.
+ */
+#define QI_MAX_BATCHED_DESC_COUNT 16
+struct qi_batch {
+ struct qi_desc descs[QI_MAX_BATCHED_DESC_COUNT];
+ unsigned int index;
+};
+
struct dmar_domain {
int nid; /* node id */
struct xarray iommu_array; /* Attached IOMMU array */
- u8 has_iotlb_device: 1;
u8 iommu_coherency: 1; /* indicate coherency of iommu access */
u8 force_snooping : 1; /* Create IOPTEs with snoop control */
u8 set_pte_snp:1;
@@ -609,6 +621,7 @@ struct dmar_domain {
spinlock_t cache_lock; /* Protect the cache tag list */
struct list_head cache_tags; /* Cache tag list */
+ struct qi_batch *qi_batch; /* Batched QI descriptors */
int iommu_superpage;/* Level of superpages supported:
0 == 4KiB (no superpages), 1 == 2MiB,
@@ -687,8 +700,6 @@ struct iommu_pmu {
DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX);
struct perf_event *event_list[IOMMU_PMU_IDX_MAX];
unsigned char irq_name[16];
- struct hlist_node cpuhp_node;
- int cpu;
};
#define IOMMU_IRQ_ID_OFFSET_PRQ (DMAR_UNITS_SUPPORTED)
@@ -1067,6 +1078,115 @@ static inline unsigned long nrpages_to_size(unsigned long npages)
return npages << VTD_PAGE_SHIFT;
}
+static inline void qi_desc_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type,
+ struct qi_desc *desc)
+{
+ u8 dw = 0, dr = 0;
+ int ih = 0;
+
+ if (cap_write_drain(iommu->cap))
+ dw = 1;
+
+ if (cap_read_drain(iommu->cap))
+ dr = 1;
+
+ desc->qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
+ | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
+ desc->qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
+ | QI_IOTLB_AM(size_order);
+ desc->qw2 = 0;
+ desc->qw3 = 0;
+}
+
+static inline void qi_desc_dev_iotlb(u16 sid, u16 pfsid, u16 qdep, u64 addr,
+ unsigned int mask, struct qi_desc *desc)
+{
+ if (mask) {
+ addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+ desc->qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+ } else {
+ desc->qw1 = QI_DEV_IOTLB_ADDR(addr);
+ }
+
+ if (qdep >= QI_DEV_IOTLB_MAX_INVS)
+ qdep = 0;
+
+ desc->qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
+ QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
+ desc->qw2 = 0;
+ desc->qw3 = 0;
+}
+
+static inline void qi_desc_piotlb(u16 did, u32 pasid, u64 addr,
+ unsigned long npages, bool ih,
+ struct qi_desc *desc)
+{
+ if (npages == -1) {
+ desc->qw0 = QI_EIOTLB_PASID(pasid) |
+ QI_EIOTLB_DID(did) |
+ QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+ QI_EIOTLB_TYPE;
+ desc->qw1 = 0;
+ } else {
+ int mask = ilog2(__roundup_pow_of_two(npages));
+ unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
+
+ if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
+ addr = ALIGN_DOWN(addr, align);
+
+ desc->qw0 = QI_EIOTLB_PASID(pasid) |
+ QI_EIOTLB_DID(did) |
+ QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
+ QI_EIOTLB_TYPE;
+ desc->qw1 = QI_EIOTLB_ADDR(addr) |
+ QI_EIOTLB_IH(ih) |
+ QI_EIOTLB_AM(mask);
+ }
+}
+
+static inline void qi_desc_dev_iotlb_pasid(u16 sid, u16 pfsid, u32 pasid,
+ u16 qdep, u64 addr,
+ unsigned int size_order,
+ struct qi_desc *desc)
+{
+ unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
+
+ desc->qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
+ QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
+ QI_DEV_IOTLB_PFSID(pfsid);
+
+ /*
+ * If S bit is 0, we only flush a single page. If S bit is set,
+ * The least significant zero bit indicates the invalidation address
+ * range. VT-d spec 6.5.2.6.
+ * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
+ * size order = 0 is PAGE_SIZE 4KB
+ * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
+ * ECAP.
+ */
+ if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
+ pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
+ addr, size_order);
+
+ /* Take page address */
+ desc->qw1 = QI_DEV_EIOTLB_ADDR(addr);
+
+ if (size_order) {
+ /*
+ * Existing 0s in address below size_order may be the least
+ * significant bit, we must set them to 1s to avoid having
+ * smaller size than desired.
+ */
+ desc->qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
+ VTD_PAGE_SHIFT);
+ /* Clear size_order bit to indicate size */
+ desc->qw1 &= ~mask;
+ /* Set the S bit to indicate flushing more than 1 page */
+ desc->qw1 |= QI_DEV_EIOTLB_SIZE;
+ }
+}
+
/* Convert value to context PASID directory size field coding. */
#define context_pdts(pds) (((pds) & 0x7) << 9)
@@ -1098,13 +1218,15 @@ void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
unsigned int count, unsigned long options);
+
+void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
/*
* Options used in qi_submit_sync:
* QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
*/
#define QI_OPT_WAIT_DRAIN BIT(0)
-void domain_update_iotlb(struct dmar_domain *domain);
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void device_block_translation(struct device *dev);
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index e090ca07364b..7a6d188e3bea 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1352,12 +1352,11 @@ static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
case X86_IRQ_ALLOC_TYPE_IOAPIC:
/* Set source-id of interrupt request */
set_ioapic_sid(irte, info->devid);
- apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n",
- info->devid, irte->present, irte->fpd,
- irte->dst_mode, irte->redir_hint,
- irte->trigger_mode, irte->dlvry_mode,
- irte->avail, irte->vector, irte->dest_id,
- irte->sid, irte->sq, irte->svt);
+ apic_pr_verbose("IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n",
+ info->devid, irte->present, irte->fpd, irte->dst_mode,
+ irte->redir_hint, irte->trigger_mode, irte->dlvry_mode,
+ irte->avail, irte->vector, irte->dest_id, irte->sid,
+ irte->sq, irte->svt);
sub_handle = info->ioapic.pin;
break;
case X86_IRQ_ALLOC_TYPE_HPET:
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index 16a2bcf5cfeb..433c58944401 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -66,8 +66,6 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
list_add(&info->link, &dmar_domain->devices);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
- domain_update_iotlb(dmar_domain);
-
return 0;
unassign_tag:
cache_tag_unassign_domain(dmar_domain, dev, IOMMU_NO_PASID);
@@ -85,6 +83,7 @@ static void intel_nested_domain_free(struct iommu_domain *domain)
spin_lock(&s2_domain->s1_lock);
list_del(&dmar_domain->s2_link);
spin_unlock(&s2_domain->s1_lock);
+ kfree(dmar_domain->qi_batch);
kfree(dmar_domain);
}
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index b51fc268dc84..2e5fa0a23299 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -264,9 +264,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
else
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
- /* Device IOTLB doesn't need to be flushed in caching mode. */
- if (!cap_caching_mode(iommu->cap))
- devtlb_invalidation_with_pasid(iommu, dev, pasid);
+ devtlb_invalidation_with_pasid(iommu, dev, pasid);
}
/*
@@ -493,9 +491,7 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
- /* Device IOTLB doesn't need to be flushed in caching mode. */
- if (!cap_caching_mode(iommu->cap))
- devtlb_invalidation_with_pasid(iommu, dev, pasid);
+ devtlb_invalidation_with_pasid(iommu, dev, pasid);
return 0;
}
@@ -572,9 +568,7 @@ void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
- /* Device IOTLB doesn't need to be flushed in caching mode. */
- if (!cap_caching_mode(iommu->cap))
- devtlb_invalidation_with_pasid(iommu, dev, pasid);
+ devtlb_invalidation_with_pasid(iommu, dev, pasid);
}
/**
diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c
index 44083d01852d..75f493bcb353 100644
--- a/drivers/iommu/intel/perfmon.c
+++ b/drivers/iommu/intel/perfmon.c
@@ -34,28 +34,9 @@ static struct attribute_group iommu_pmu_events_attr_group = {
.attrs = attrs_empty,
};
-static cpumask_t iommu_pmu_cpu_mask;
-
-static ssize_t
-cpumask_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- return cpumap_print_to_pagebuf(true, buf, &iommu_pmu_cpu_mask);
-}
-static DEVICE_ATTR_RO(cpumask);
-
-static struct attribute *iommu_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL
-};
-
-static struct attribute_group iommu_pmu_cpumask_attr_group = {
- .attrs = iommu_pmu_cpumask_attrs,
-};
-
static const struct attribute_group *iommu_pmu_attr_groups[] = {
&iommu_pmu_format_attr_group,
&iommu_pmu_events_attr_group,
- &iommu_pmu_cpumask_attr_group,
NULL
};
@@ -565,6 +546,7 @@ static int __iommu_pmu_register(struct intel_iommu *iommu)
iommu_pmu->pmu.attr_groups = iommu_pmu_attr_groups;
iommu_pmu->pmu.attr_update = iommu_pmu_attr_update;
iommu_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
+ iommu_pmu->pmu.scope = PERF_PMU_SCOPE_SYS_WIDE;
iommu_pmu->pmu.module = THIS_MODULE;
return perf_pmu_register(&iommu_pmu->pmu, iommu_pmu->pmu.name, -1);
@@ -773,89 +755,6 @@ static void iommu_pmu_unset_interrupt(struct intel_iommu *iommu)
iommu->perf_irq = 0;
}
-static int iommu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
-{
- struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node);
-
- if (cpumask_empty(&iommu_pmu_cpu_mask))
- cpumask_set_cpu(cpu, &iommu_pmu_cpu_mask);
-
- if (cpumask_test_cpu(cpu, &iommu_pmu_cpu_mask))
- iommu_pmu->cpu = cpu;
-
- return 0;
-}
-
-static int iommu_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
-{
- struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node);
- int target = cpumask_first(&iommu_pmu_cpu_mask);
-
- /*
- * The iommu_pmu_cpu_mask has been updated when offline the CPU
- * for the first iommu_pmu. Migrate the other iommu_pmu to the
- * new target.
- */
- if (target < nr_cpu_ids && target != iommu_pmu->cpu) {
- perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target);
- iommu_pmu->cpu = target;
- return 0;
- }
-
- if (!cpumask_test_and_clear_cpu(cpu, &iommu_pmu_cpu_mask))
- return 0;
-
- target = cpumask_any_but(cpu_online_mask, cpu);
-
- if (target < nr_cpu_ids)
- cpumask_set_cpu(target, &iommu_pmu_cpu_mask);
- else
- return 0;
-
- perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target);
- iommu_pmu->cpu = target;
-
- return 0;
-}
-
-static int nr_iommu_pmu;
-static enum cpuhp_state iommu_cpuhp_slot;
-
-static int iommu_pmu_cpuhp_setup(struct iommu_pmu *iommu_pmu)
-{
- int ret;
-
- if (!nr_iommu_pmu) {
- ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
- "driver/iommu/intel/perfmon:online",
- iommu_pmu_cpu_online,
- iommu_pmu_cpu_offline);
- if (ret < 0)
- return ret;
- iommu_cpuhp_slot = ret;
- }
-
- ret = cpuhp_state_add_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node);
- if (ret) {
- if (!nr_iommu_pmu)
- cpuhp_remove_multi_state(iommu_cpuhp_slot);
- return ret;
- }
- nr_iommu_pmu++;
-
- return 0;
-}
-
-static void iommu_pmu_cpuhp_free(struct iommu_pmu *iommu_pmu)
-{
- cpuhp_state_remove_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node);
-
- if (--nr_iommu_pmu)
- return;
-
- cpuhp_remove_multi_state(iommu_cpuhp_slot);
-}
-
void iommu_pmu_register(struct intel_iommu *iommu)
{
struct iommu_pmu *iommu_pmu = iommu->pmu;
@@ -866,17 +765,12 @@ void iommu_pmu_register(struct intel_iommu *iommu)
if (__iommu_pmu_register(iommu))
goto err;
- if (iommu_pmu_cpuhp_setup(iommu_pmu))
- goto unregister;
-
/* Set interrupt for overflow */
if (iommu_pmu_set_interrupt(iommu))
- goto cpuhp_free;
+ goto unregister;
return;
-cpuhp_free:
- iommu_pmu_cpuhp_free(iommu_pmu);
unregister:
perf_pmu_unregister(&iommu_pmu->pmu);
err:
@@ -892,6 +786,5 @@ void iommu_pmu_unregister(struct intel_iommu *iommu)
return;
iommu_pmu_unset_interrupt(iommu);
- iommu_pmu_cpuhp_free(iommu_pmu);
perf_pmu_unregister(&iommu_pmu->pmu);
}
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 0e3a9b38bef2..078d1e32a24e 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -184,7 +184,10 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
static void intel_mm_free_notifier(struct mmu_notifier *mn)
{
- kfree(container_of(mn, struct dmar_domain, notifier));
+ struct dmar_domain *domain = container_of(mn, struct dmar_domain, notifier);
+
+ kfree(domain->qi_batch);
+ kfree(domain);
}
static const struct mmu_notifier_ops intel_mmuops = {
@@ -311,7 +314,7 @@ void intel_drain_pasid_prq(struct device *dev, u32 pasid)
domain = info->domain;
pdev = to_pci_dev(dev);
sid = PCI_DEVID(info->bus, info->devfn);
- did = domain_id_iommu(domain, iommu);
+ did = domain ? domain_id_iommu(domain, iommu) : FLPT_DEFAULT_DID;
qdep = pci_ats_queue_depth(pdev);
/*
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index ff4149ae1751..0e67f1721a3d 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -274,13 +274,13 @@ static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
}
-static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
+static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries)
{
+ for (int i = 0; i < num_entries; i++)
+ ptep[i] = 0;
- *ptep = 0;
-
- if (!cfg->coherent_walk)
- __arm_lpae_sync_pte(ptep, 1, cfg);
+ if (!cfg->coherent_walk && num_entries)
+ __arm_lpae_sync_pte(ptep, num_entries, cfg);
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
@@ -653,26 +653,29 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
num_entries = min_t(int, pgcount, max_entries);
- while (i < num_entries) {
- pte = READ_ONCE(*ptep);
+ /* Find and handle non-leaf entries */
+ for (i = 0; i < num_entries; i++) {
+ pte = READ_ONCE(ptep[i]);
if (WARN_ON(!pte))
break;
- __arm_lpae_clear_pte(ptep, &iop->cfg);
-
if (!iopte_leaf(pte, lvl, iop->fmt)) {
+ __arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
+
/* Also flush any partial walks */
io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
ARM_LPAE_GRANULE(data));
__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
- } else if (!iommu_iotlb_gather_queued(gather)) {
- io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
}
-
- ptep++;
- i++;
}
+ /* Clear the remaining entries */
+ __arm_lpae_clear_pte(ptep, &iop->cfg, i);
+
+ if (gather && !iommu_iotlb_gather_queued(gather))
+ for (int j = 0; j < i; j++)
+ io_pgtable_tlb_add_page(iop, gather, iova + j * size, size);
+
return i * size;
} else if (iopte_leaf(pte, lvl, iop->fmt)) {
/*
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index ed6c5cb60c5a..83c8e617a2c5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -3578,6 +3578,7 @@ int iommu_replace_group_handle(struct iommu_group *group,
ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL);
if (ret)
goto err_unlock;
+ handle->domain = new_domain;
}
ret = __iommu_group_set_domain(group, new_domain);
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 3214a4c17c6b..5fd3dd420290 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
*/
+#include <linux/iommu.h>
#include <linux/iommufd.h>
#include <linux/slab.h>
-#include <linux/iommu.h>
#include <uapi/linux/iommufd.h>
-#include "../iommu-priv.h"
+#include "../iommu-priv.h"
#include "io_pagetable.h"
#include "iommufd_private.h"
@@ -327,8 +327,9 @@ static int iommufd_group_setup_msi(struct iommufd_group *igroup,
return 0;
}
-static int iommufd_hwpt_paging_attach(struct iommufd_hwpt_paging *hwpt_paging,
- struct iommufd_device *idev)
+static int
+iommufd_device_attach_reserved_iova(struct iommufd_device *idev,
+ struct iommufd_hwpt_paging *hwpt_paging)
{
int rc;
@@ -354,6 +355,7 @@ static int iommufd_hwpt_paging_attach(struct iommufd_hwpt_paging *hwpt_paging,
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
+ struct iommufd_hwpt_paging *hwpt_paging = find_hwpt_paging(hwpt);
int rc;
mutex_lock(&idev->igroup->lock);
@@ -363,8 +365,8 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
goto err_unlock;
}
- if (hwpt_is_paging(hwpt)) {
- rc = iommufd_hwpt_paging_attach(to_hwpt_paging(hwpt), idev);
+ if (hwpt_paging) {
+ rc = iommufd_device_attach_reserved_iova(idev, hwpt_paging);
if (rc)
goto err_unlock;
}
@@ -387,9 +389,8 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
mutex_unlock(&idev->igroup->lock);
return 0;
err_unresv:
- if (hwpt_is_paging(hwpt))
- iopt_remove_reserved_iova(&to_hwpt_paging(hwpt)->ioas->iopt,
- idev->dev);
+ if (hwpt_paging)
+ iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev);
err_unlock:
mutex_unlock(&idev->igroup->lock);
return rc;
@@ -399,6 +400,7 @@ struct iommufd_hw_pagetable *
iommufd_hw_pagetable_detach(struct iommufd_device *idev)
{
struct iommufd_hw_pagetable *hwpt = idev->igroup->hwpt;
+ struct iommufd_hwpt_paging *hwpt_paging = find_hwpt_paging(hwpt);
mutex_lock(&idev->igroup->lock);
list_del(&idev->group_item);
@@ -406,9 +408,8 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev)
iommufd_hwpt_detach_device(hwpt, idev);
idev->igroup->hwpt = NULL;
}
- if (hwpt_is_paging(hwpt))
- iopt_remove_reserved_iova(&to_hwpt_paging(hwpt)->ioas->iopt,
- idev->dev);
+ if (hwpt_paging)
+ iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev);
mutex_unlock(&idev->igroup->lock);
/* Caller must destroy hwpt */
@@ -440,17 +441,17 @@ iommufd_group_remove_reserved_iova(struct iommufd_group *igroup,
}
static int
-iommufd_group_do_replace_paging(struct iommufd_group *igroup,
- struct iommufd_hwpt_paging *hwpt_paging)
+iommufd_group_do_replace_reserved_iova(struct iommufd_group *igroup,
+ struct iommufd_hwpt_paging *hwpt_paging)
{
- struct iommufd_hw_pagetable *old_hwpt = igroup->hwpt;
+ struct iommufd_hwpt_paging *old_hwpt_paging;
struct iommufd_device *cur;
int rc;
lockdep_assert_held(&igroup->lock);
- if (!hwpt_is_paging(old_hwpt) ||
- hwpt_paging->ioas != to_hwpt_paging(old_hwpt)->ioas) {
+ old_hwpt_paging = find_hwpt_paging(igroup->hwpt);
+ if (!old_hwpt_paging || hwpt_paging->ioas != old_hwpt_paging->ioas) {
list_for_each_entry(cur, &igroup->device_list, group_item) {
rc = iopt_table_enforce_dev_resv_regions(
&hwpt_paging->ioas->iopt, cur->dev, NULL);
@@ -473,6 +474,8 @@ static struct iommufd_hw_pagetable *
iommufd_device_do_replace(struct iommufd_device *idev,
struct iommufd_hw_pagetable *hwpt)
{
+ struct iommufd_hwpt_paging *hwpt_paging = find_hwpt_paging(hwpt);
+ struct iommufd_hwpt_paging *old_hwpt_paging;
struct iommufd_group *igroup = idev->igroup;
struct iommufd_hw_pagetable *old_hwpt;
unsigned int num_devices;
@@ -491,9 +494,8 @@ iommufd_device_do_replace(struct iommufd_device *idev,
}
old_hwpt = igroup->hwpt;
- if (hwpt_is_paging(hwpt)) {
- rc = iommufd_group_do_replace_paging(igroup,
- to_hwpt_paging(hwpt));
+ if (hwpt_paging) {
+ rc = iommufd_group_do_replace_reserved_iova(igroup, hwpt_paging);
if (rc)
goto err_unlock;
}
@@ -502,11 +504,10 @@ iommufd_device_do_replace(struct iommufd_device *idev,
if (rc)
goto err_unresv;
- if (hwpt_is_paging(old_hwpt) &&
- (!hwpt_is_paging(hwpt) ||
- to_hwpt_paging(hwpt)->ioas != to_hwpt_paging(old_hwpt)->ioas))
- iommufd_group_remove_reserved_iova(igroup,
- to_hwpt_paging(old_hwpt));
+ old_hwpt_paging = find_hwpt_paging(old_hwpt);
+ if (old_hwpt_paging &&
+ (!hwpt_paging || hwpt_paging->ioas != old_hwpt_paging->ioas))
+ iommufd_group_remove_reserved_iova(igroup, old_hwpt_paging);
igroup->hwpt = hwpt;
@@ -524,9 +525,8 @@ iommufd_device_do_replace(struct iommufd_device *idev,
/* Caller must destroy old_hwpt */
return old_hwpt;
err_unresv:
- if (hwpt_is_paging(hwpt))
- iommufd_group_remove_reserved_iova(igroup,
- to_hwpt_paging(hwpt));
+ if (hwpt_paging)
+ iommufd_group_remove_reserved_iova(igroup, hwpt_paging);
err_unlock:
mutex_unlock(&idev->igroup->lock);
return ERR_PTR(rc);
diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
index a643d5c7c535..8c8226f0dffd 100644
--- a/drivers/iommu/iommufd/fault.c
+++ b/drivers/iommu/iommufd/fault.c
@@ -3,14 +3,14 @@
*/
#define pr_fmt(fmt) "iommufd: " fmt
+#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/iommufd.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/iommufd.h>
#include <linux/pci.h>
#include <linux/poll.h>
-#include <linux/anon_inodes.h>
#include <uapi/linux/iommufd.h>
#include "../iommu-priv.h"
@@ -161,7 +161,6 @@ static int __fault_domain_replace_dev(struct iommufd_device *idev,
if (!handle)
return -ENOMEM;
- handle->handle.domain = hwpt->domain;
handle->idev = idev;
ret = iommu_replace_group_handle(idev->igroup->group,
hwpt->domain, &handle->handle);
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index aefde4443671..d06bf6e6c19f 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -225,7 +225,8 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) ||
!user_data->len || !ops->domain_alloc_user)
return ERR_PTR(-EOPNOTSUPP);
- if (parent->auto_domain || !parent->nest_parent)
+ if (parent->auto_domain || !parent->nest_parent ||
+ parent->common.domain->owner != ops)
return ERR_PTR(-EINVAL);
hwpt_nested = __iommufd_object_alloc(
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index 05fd9d3abf1b..4bf7ccd39d46 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -8,17 +8,17 @@
* The datastructure uses the iopt_pages to optimize the storage of the PFNs
* between the domains and xarray.
*/
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/iommu.h>
#include <linux/iommufd.h>
#include <linux/lockdep.h>
-#include <linux/iommu.h>
#include <linux/sched/mm.h>
-#include <linux/err.h>
#include <linux/slab.h>
-#include <linux/errno.h>
#include <uapi/linux/iommufd.h>
-#include "io_pagetable.h"
#include "double_span.h"
+#include "io_pagetable.h"
struct iopt_pages_list {
struct iopt_pages *pages;
@@ -112,6 +112,7 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova,
unsigned long page_offset = uptr % PAGE_SIZE;
struct interval_tree_double_span_iter used_span;
struct interval_tree_span_iter allowed_span;
+ unsigned long max_alignment = PAGE_SIZE;
unsigned long iova_alignment;
lockdep_assert_held(&iopt->iova_rwsem);
@@ -131,6 +132,13 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova,
roundup_pow_of_two(length),
1UL << __ffs64(uptr));
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ max_alignment = HPAGE_SIZE;
+#endif
+ /* Protect against ALIGN() overflow */
+ if (iova_alignment >= max_alignment)
+ iova_alignment = max_alignment;
+
if (iova_alignment < iopt->iova_alignment)
return -EINVAL;
diff --git a/drivers/iommu/iommufd/io_pagetable.h b/drivers/iommu/iommufd/io_pagetable.h
index 0ec3509b7e33..c61d74471684 100644
--- a/drivers/iommu/iommufd/io_pagetable.h
+++ b/drivers/iommu/iommufd/io_pagetable.h
@@ -6,8 +6,8 @@
#define __IO_PAGETABLE_H
#include <linux/interval_tree.h>
-#include <linux/mutex.h>
#include <linux/kref.h>
+#include <linux/mutex.h>
#include <linux/xarray.h>
#include "iommufd_private.h"
diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c
index 157a89b993e4..2c4b2bb11e78 100644
--- a/drivers/iommu/iommufd/ioas.c
+++ b/drivers/iommu/iommufd/ioas.c
@@ -3,8 +3,8 @@
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
*/
#include <linux/interval_tree.h>
-#include <linux/iommufd.h>
#include <linux/iommu.h>
+#include <linux/iommufd.h>
#include <uapi/linux/iommufd.h>
#include "io_pagetable.h"
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 92efe30a8f0d..f1d865e6fab6 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -4,13 +4,14 @@
#ifndef __IOMMUFD_PRIVATE_H
#define __IOMMUFD_PRIVATE_H
-#include <linux/rwsem.h>
-#include <linux/xarray.h>
-#include <linux/refcount.h>
-#include <linux/uaccess.h>
#include <linux/iommu.h>
#include <linux/iova_bitmap.h>
+#include <linux/refcount.h>
+#include <linux/rwsem.h>
+#include <linux/uaccess.h>
+#include <linux/xarray.h>
#include <uapi/linux/iommufd.h>
+
#include "../iommu-priv.h"
struct iommu_domain;
@@ -324,6 +325,25 @@ to_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
return container_of(hwpt, struct iommufd_hwpt_paging, common);
}
+static inline struct iommufd_hwpt_nested *
+to_hwpt_nested(struct iommufd_hw_pagetable *hwpt)
+{
+ return container_of(hwpt, struct iommufd_hwpt_nested, common);
+}
+
+static inline struct iommufd_hwpt_paging *
+find_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
+{
+ switch (hwpt->obj.type) {
+ case IOMMUFD_OBJ_HWPT_PAGING:
+ return to_hwpt_paging(hwpt);
+ case IOMMUFD_OBJ_HWPT_NESTED:
+ return to_hwpt_nested(hwpt)->parent;
+ default:
+ return NULL;
+ }
+}
+
static inline struct iommufd_hwpt_paging *
iommufd_get_hwpt_paging(struct iommufd_ucmd *ucmd, u32 id)
{
@@ -490,8 +510,10 @@ static inline int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
static inline void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
- if (hwpt->fault)
+ if (hwpt->fault) {
iommufd_fault_domain_detach_dev(hwpt, idev);
+ return;
+ }
iommu_detach_group(hwpt->domain, idev->igroup->group);
}
diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index acbbba1c6671..f4bc23a92f9a 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -4,8 +4,8 @@
#ifndef _UAPI_IOMMUFD_TEST_H
#define _UAPI_IOMMUFD_TEST_H
-#include <linux/types.h>
#include <linux/iommufd.h>
+#include <linux/types.h>
enum {
IOMMU_TEST_OP_ADD_RESERVED = 1,
diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
index b9e964b1ad5c..d90b9e253412 100644
--- a/drivers/iommu/iommufd/iova_bitmap.c
+++ b/drivers/iommu/iommufd/iova_bitmap.c
@@ -3,10 +3,10 @@
* Copyright (c) 2022, Oracle and/or its affiliates.
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
*/
+#include <linux/highmem.h>
#include <linux/iova_bitmap.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/highmem.h>
#define BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 83bbd7c5d160..b5f5d27ee963 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -8,15 +8,15 @@
*/
#define pr_fmt(fmt) "iommufd: " fmt
+#include <linux/bug.h>
#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/slab.h>
+#include <linux/iommufd.h>
#include <linux/miscdevice.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/bug.h>
+#include <linux/slab.h>
#include <uapi/linux/iommufd.h>
-#include <linux/iommufd.h>
#include "io_pagetable.h"
#include "iommufd_private.h"
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index 117f644a0c5b..93d806c9c073 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -45,16 +45,16 @@
* last_iova + 1 can overflow. An iopt_pages index will always be much less than
* ULONG_MAX so last_index + 1 cannot overflow.
*/
+#include <linux/highmem.h>
+#include <linux/iommu.h>
+#include <linux/iommufd.h>
+#include <linux/kthread.h>
#include <linux/overflow.h>
#include <linux/slab.h>
-#include <linux/iommu.h>
#include <linux/sched/mm.h>
-#include <linux/highmem.h>
-#include <linux/kthread.h>
-#include <linux/iommufd.h>
-#include "io_pagetable.h"
#include "double_span.h"
+#include "io_pagetable.h"
#ifndef CONFIG_IOMMUFD_TEST
#define TEMP_MEMORY_LIMIT 65536
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 222cfc11ebfd..540437be168a 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -3,13 +3,14 @@
*
* Kernel side components to support tools/testing/selftests/iommu
*/
-#include <linux/slab.h>
-#include <linux/iommu.h>
-#include <linux/xarray.h>
-#include <linux/file.h>
#include <linux/anon_inodes.h>
+#include <linux/debugfs.h>
#include <linux/fault-inject.h>
+#include <linux/file.h>
+#include <linux/iommu.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
#include <uapi/linux/iommufd.h>
#include "../iommu-priv.h"
@@ -1342,7 +1343,7 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
unsigned long page_size, void __user *uptr,
u32 flags)
{
- unsigned long bitmap_size, i, max;
+ unsigned long i, max;
struct iommu_test_cmd *cmd = ucmd->cmd;
struct iommufd_hw_pagetable *hwpt;
struct mock_iommu_domain *mock;
@@ -1363,15 +1364,14 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
}
max = length / page_size;
- bitmap_size = DIV_ROUND_UP(max, BITS_PER_BYTE);
-
- tmp = kvzalloc(bitmap_size, GFP_KERNEL_ACCOUNT);
+ tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long),
+ GFP_KERNEL_ACCOUNT);
if (!tmp) {
rc = -ENOMEM;
goto out_put;
}
- if (copy_from_user(tmp, uptr, bitmap_size)) {
+ if (copy_from_user(tmp, uptr,DIV_ROUND_UP(max, BITS_PER_BYTE))) {
rc = -EFAULT;
goto out_free;
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index b657cc09605f..ff55b8c30712 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -804,8 +804,7 @@ static int ipmmu_init_arm_mapping(struct device *dev)
if (!mmu->mapping) {
struct dma_iommu_mapping *mapping;
- mapping = arm_iommu_create_mapping(&platform_bus_type,
- SZ_1G, SZ_2G);
+ mapping = arm_iommu_create_mapping(dev, SZ_1G, SZ_2G);
if (IS_ERR(mapping)) {
dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
ret = PTR_ERR(mapping);
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index c6ea5b4baff3..ee4e55b6b190 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -433,8 +433,7 @@ static int mtk_iommu_v1_create_mapping(struct device *dev,
mtk_mapping = data->mapping;
if (!mtk_mapping) {
/* MTK iommu support 4GB iova address space. */
- mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
- 0, 1ULL << 32);
+ mtk_mapping = arm_iommu_create_mapping(dev, 0, 1ULL << 32);
if (IS_ERR(mtk_mapping))
return PTR_ERR(mtk_mapping);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 78d61da75257..e7a6a1611d19 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -214,7 +214,7 @@ void of_iommu_get_resv_regions(struct device *dev, struct list_head *list)
* that represent reservations in the IOVA space, which are regions that should
* not be mapped.
*/
- if (of_find_property(it.node, "reg", NULL)) {
+ if (of_property_present(it.node, "reg")) {
err = of_address_to_resource(it.node, 0, &phys);
if (err < 0) {
dev_err(dev, "failed to parse memory region %pOF: %d\n",
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index d078bdc48c38..341cd9ca5a05 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -685,6 +685,7 @@ config LOONGSON_PCH_MSI
depends on PCI
default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY
+ select IRQ_MSI_LIB
select PCI_MSI
help
Support for the Loongson PCH MSI Controller.
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 15635812b2d6..e3679ec2b9f7 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -110,7 +110,7 @@ obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o
-obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o
+obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o irq-loongarch-avec.o
obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o
obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o
obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 5c534d9fd2b0..da5250f0155c 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -234,7 +234,10 @@ enum fiq_hwirq {
AIC_NR_FIQ
};
+/* True if UNCORE/UNCORE2 and Sn_... IPI registers are present and used (A11+) */
static DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
+/* True if SYS_IMP_APL_IPI_RR_LOCAL_EL1 exists for local fast IPIs (M1+) */
+static DEFINE_STATIC_KEY_TRUE(use_local_fast_ipi);
struct aic_info {
int version;
@@ -252,6 +255,7 @@ struct aic_info {
/* Features */
bool fast_ipi;
+ bool local_fast_ipi;
};
static const struct aic_info aic1_info __initconst = {
@@ -270,17 +274,32 @@ static const struct aic_info aic1_fipi_info __initconst = {
.fast_ipi = true,
};
+static const struct aic_info aic1_local_fipi_info __initconst = {
+ .version = 1,
+
+ .event = AIC_EVENT,
+ .target_cpu = AIC_TARGET_CPU,
+
+ .fast_ipi = true,
+ .local_fast_ipi = true,
+};
+
static const struct aic_info aic2_info __initconst = {
.version = 2,
.irq_cfg = AIC2_IRQ_CFG,
.fast_ipi = true,
+ .local_fast_ipi = true,
};
static const struct of_device_id aic_info_match[] = {
{
.compatible = "apple,t8103-aic",
+ .data = &aic1_local_fipi_info,
+ },
+ {
+ .compatible = "apple,t8015-aic",
.data = &aic1_fipi_info,
},
{
@@ -532,14 +551,9 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
* we check for everything here, even things we don't support yet.
*/
- if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
- if (static_branch_likely(&use_fast_ipi)) {
- aic_handle_ipi(regs);
- } else {
- pr_err_ratelimited("Fast IPI fired. Acking.\n");
- write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
- }
- }
+ if (static_branch_likely(&use_fast_ipi) &&
+ (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING))
+ aic_handle_ipi(regs);
if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
generic_handle_domain_irq(aic_irqc->hw_domain,
@@ -574,8 +588,9 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
AIC_FIQ_HWIRQ(irq));
}
- if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ &&
- (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
+ if (static_branch_likely(&use_fast_ipi) &&
+ (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ) &&
+ (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
/* Same story with uncore PMCs */
pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
@@ -750,12 +765,12 @@ static void aic_ipi_send_fast(int cpu)
u64 cluster = MPIDR_CLUSTER(mpidr);
u64 idx = MPIDR_CPU(mpidr);
- if (MPIDR_CLUSTER(my_mpidr) == cluster)
- write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx),
- SYS_IMP_APL_IPI_RR_LOCAL_EL1);
- else
+ if (static_branch_likely(&use_local_fast_ipi) && MPIDR_CLUSTER(my_mpidr) == cluster) {
+ write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx), SYS_IMP_APL_IPI_RR_LOCAL_EL1);
+ } else {
write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
+ }
isb();
}
@@ -811,7 +826,8 @@ static int aic_init_cpu(unsigned int cpu)
/* Mask all hard-wired per-CPU IRQ/FIQ sources */
/* Pending Fast IPI FIQs */
- write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
+ if (static_branch_likely(&use_fast_ipi))
+ write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
/* Timer FIQs */
sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
@@ -832,8 +848,10 @@ static int aic_init_cpu(unsigned int cpu)
FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
/* Uncore PMC FIQ */
- sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
- FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
+ if (static_branch_likely(&use_fast_ipi)) {
+ sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
+ FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
+ }
/* Commit all of the above */
isb();
@@ -987,11 +1005,12 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */
off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */
- if (irqc->info.fast_ipi)
- static_branch_enable(&use_fast_ipi);
- else
+ if (!irqc->info.fast_ipi)
static_branch_disable(&use_fast_ipi);
+ if (!irqc->info.local_fast_ipi)
+ static_branch_disable(&use_local_fast_ipi);
+
irqc->info.die_stride = off - start_off;
irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index dce2b80bf439..d7c5ef248474 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Marvell Armada 370 and Armada XP SoC IRQ handling
*
@@ -7,13 +8,11 @@
* Gregory CLEMENT <gregory.clement@free-electrons.com>
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
* Ben Dooks <ben.dooks@codethink.co.uk>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
+#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -66,19 +65,17 @@
* device
*
* The "global interrupt mask/unmask" is modified using the
- * ARMADA_370_XP_INT_SET_ENABLE_OFFS and
- * ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS registers, which are relative
- * to "main_int_base".
+ * MPIC_INT_SET_ENABLE and MPIC_INT_CLEAR_ENABLE
+ * registers, which are relative to "mpic->base".
*
- * The "per-CPU mask/unmask" is modified using the
- * ARMADA_370_XP_INT_SET_MASK_OFFS and
- * ARMADA_370_XP_INT_CLEAR_MASK_OFFS registers, which are relative to
- * "per_cpu_int_base". This base address points to a special address,
+ * The "per-CPU mask/unmask" is modified using the MPIC_INT_SET_MASK
+ * and MPIC_INT_CLEAR_MASK registers, which are relative to
+ * "mpic->per_cpu". This base address points to a special address,
* which automatically accesses the registers of the current CPU.
*
* The per-CPU mask/unmask can also be adjusted using the global
- * per-interrupt ARMADA_370_XP_INT_SOURCE_CTL register, which we use
- * to configure interrupt affinity.
+ * per-interrupt MPIC_INT_SOURCE_CTL register, which we use to
+ * configure interrupt affinity.
*
* Due to this model, all interrupts need to be mask/unmasked at two
* different levels: at the global level and at the per-CPU level.
@@ -92,9 +89,8 @@
* the current CPU, running the ->map() code. This allows to have
* the interrupt unmasked at this level in non-SMP
* configurations. In SMP configurations, the ->set_affinity()
- * callback is called, which using the
- * ARMADA_370_XP_INT_SOURCE_CTL() readjusts the per-CPU mask/unmask
- * for the interrupt.
+ * callback is called, which using the MPIC_INT_SOURCE_CTL()
+ * readjusts the per-CPU mask/unmask for the interrupt.
*
* The ->mask() and ->unmask() operations only mask/unmask the
* interrupt at the "global" level.
@@ -116,58 +112,84 @@
* at the per-CPU level.
*/
-/* Registers relative to main_int_base */
-#define ARMADA_370_XP_INT_CONTROL (0x00)
-#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x04)
-#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
-#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
-#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
-#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
-#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid)
-
-/* Registers relative to per_cpu_int_base */
-#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x08)
-#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0x0c)
-#define ARMADA_375_PPI_CAUSE (0x10)
-#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
-#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
-#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
-#define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54)
-#define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu)
-
-#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
+/* Registers relative to mpic->base */
+#define MPIC_INT_CONTROL 0x00
+#define MPIC_INT_CONTROL_NUMINT_MASK GENMASK(12, 2)
+#define MPIC_SW_TRIG_INT 0x04
+#define MPIC_INT_SET_ENABLE 0x30
+#define MPIC_INT_CLEAR_ENABLE 0x34
+#define MPIC_INT_SOURCE_CTL(hwirq) (0x100 + (hwirq) * 4)
+#define MPIC_INT_SOURCE_CPU_MASK GENMASK(3, 0)
+#define MPIC_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << (cpuid))
+
+/* Registers relative to mpic->per_cpu */
+#define MPIC_IN_DRBEL_CAUSE 0x08
+#define MPIC_IN_DRBEL_MASK 0x0c
+#define MPIC_PPI_CAUSE 0x10
+#define MPIC_CPU_INTACK 0x44
+#define MPIC_CPU_INTACK_IID_MASK GENMASK(9, 0)
+#define MPIC_INT_SET_MASK 0x48
+#define MPIC_INT_CLEAR_MASK 0x4C
+#define MPIC_INT_FABRIC_MASK 0x54
+#define MPIC_INT_CAUSE_PERF(cpu) BIT(cpu)
+
+#define MPIC_PER_CPU_IRQS_NR 29
/* IPI and MSI interrupt definitions for IPI platforms */
-#define IPI_DOORBELL_START (0)
-#define IPI_DOORBELL_END (8)
-#define IPI_DOORBELL_MASK 0xFF
-#define PCI_MSI_DOORBELL_START (16)
-#define PCI_MSI_DOORBELL_NR (16)
-#define PCI_MSI_DOORBELL_END (32)
-#define PCI_MSI_DOORBELL_MASK 0xFFFF0000
+#define IPI_DOORBELL_NR 8
+#define IPI_DOORBELL_MASK GENMASK(7, 0)
+#define PCI_MSI_DOORBELL_START 16
+#define PCI_MSI_DOORBELL_NR 16
+#define PCI_MSI_DOORBELL_MASK GENMASK(31, 16)
/* MSI interrupt definitions for non-IPI platforms */
#define PCI_MSI_FULL_DOORBELL_START 0
#define PCI_MSI_FULL_DOORBELL_NR 32
-#define PCI_MSI_FULL_DOORBELL_END 32
#define PCI_MSI_FULL_DOORBELL_MASK GENMASK(31, 0)
#define PCI_MSI_FULL_DOORBELL_SRC0_MASK GENMASK(15, 0)
#define PCI_MSI_FULL_DOORBELL_SRC1_MASK GENMASK(31, 16)
-static void __iomem *per_cpu_int_base;
-static void __iomem *main_int_base;
-static struct irq_domain *armada_370_xp_mpic_domain;
-static u32 doorbell_mask_reg;
-static int parent_irq;
+/**
+ * struct mpic - MPIC private data structure
+ * @base: MPIC registers base address
+ * @per_cpu: per-CPU registers base address
+ * @parent_irq: parent IRQ if MPIC is not top-level interrupt controller
+ * @domain: MPIC main interrupt domain
+ * @ipi_domain: IPI domain
+ * @msi_domain: MSI domain
+ * @msi_inner_domain: MSI inner domain
+ * @msi_used: bitmap of used MSI numbers
+ * @msi_lock: mutex serializing access to @msi_used
+ * @msi_doorbell_addr: physical address of MSI doorbell register
+ * @msi_doorbell_mask: mask of available doorbell bits for MSIs (either PCI_MSI_DOORBELL_MASK or
+ * PCI_MSI_FULL_DOORBELL_MASK)
+ * @msi_doorbell_start: first set bit in @msi_doorbell_mask
+ * @msi_doorbell_size: number of set bits in @msi_doorbell_mask
+ * @doorbell_mask: doorbell mask of MSIs and IPIs, stored on suspend, restored on resume
+ */
+struct mpic {
+ void __iomem *base;
+ void __iomem *per_cpu;
+ int parent_irq;
+ struct irq_domain *domain;
+#ifdef CONFIG_SMP
+ struct irq_domain *ipi_domain;
+#endif
#ifdef CONFIG_PCI_MSI
-static struct irq_domain *armada_370_xp_msi_domain;
-static struct irq_domain *armada_370_xp_msi_inner_domain;
-static DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR);
-static DEFINE_MUTEX(msi_used_lock);
-static phys_addr_t msi_doorbell_addr;
+ struct irq_domain *msi_domain;
+ struct irq_domain *msi_inner_domain;
+ DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR);
+ struct mutex msi_lock;
+ phys_addr_t msi_doorbell_addr;
+ u32 msi_doorbell_mask;
+ unsigned int msi_doorbell_start, msi_doorbell_size;
#endif
+ u32 doorbell_mask;
+};
+
+static struct mpic *mpic_data __ro_after_init;
-static inline bool is_ipi_available(void)
+static inline bool mpic_is_ipi_available(struct mpic *mpic)
{
/*
* We distinguish IPI availability in the IC by the IC not having a
@@ -175,39 +197,12 @@ static inline bool is_ipi_available(void)
* interrupt controller (e.g. GIC) that takes care of inter-processor
* interrupts.
*/
- return parent_irq <= 0;
-}
-
-static inline u32 msi_doorbell_mask(void)
-{
- return is_ipi_available() ? PCI_MSI_DOORBELL_MASK :
- PCI_MSI_FULL_DOORBELL_MASK;
-}
-
-static inline unsigned int msi_doorbell_start(void)
-{
- return is_ipi_available() ? PCI_MSI_DOORBELL_START :
- PCI_MSI_FULL_DOORBELL_START;
+ return mpic->parent_irq <= 0;
}
-static inline unsigned int msi_doorbell_size(void)
+static inline bool mpic_is_percpu_irq(irq_hw_number_t hwirq)
{
- return is_ipi_available() ? PCI_MSI_DOORBELL_NR :
- PCI_MSI_FULL_DOORBELL_NR;
-}
-
-static inline unsigned int msi_doorbell_end(void)
-{
- return is_ipi_available() ? PCI_MSI_DOORBELL_END :
- PCI_MSI_FULL_DOORBELL_END;
-}
-
-static inline bool is_percpu_irq(irq_hw_number_t irq)
-{
- if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
- return true;
-
- return false;
+ return hwirq < MPIC_PER_CPU_IRQS_NR;
}
/*
@@ -215,55 +210,53 @@ static inline bool is_percpu_irq(irq_hw_number_t irq)
* For shared global interrupts, mask/unmask global enable bit
* For CPU interrupts, mask/unmask the calling CPU's bit
*/
-static void armada_370_xp_irq_mask(struct irq_data *d)
+static void mpic_irq_mask(struct irq_data *d)
{
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (!is_percpu_irq(hwirq))
- writel(hwirq, main_int_base +
- ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
+ if (!mpic_is_percpu_irq(hwirq))
+ writel(hwirq, mpic->base + MPIC_INT_CLEAR_ENABLE);
else
- writel(hwirq, per_cpu_int_base +
- ARMADA_370_XP_INT_SET_MASK_OFFS);
+ writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK);
}
-static void armada_370_xp_irq_unmask(struct irq_data *d)
+static void mpic_irq_unmask(struct irq_data *d)
{
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (!is_percpu_irq(hwirq))
- writel(hwirq, main_int_base +
- ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ if (!mpic_is_percpu_irq(hwirq))
+ writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE);
else
- writel(hwirq, per_cpu_int_base +
- ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
}
#ifdef CONFIG_PCI_MSI
-static struct irq_chip armada_370_xp_msi_irq_chip = {
- .name = "MPIC MSI",
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
+static struct irq_chip mpic_msi_irq_chip = {
+ .name = "MPIC MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
};
-static struct msi_domain_info armada_370_xp_msi_domain_info = {
+static struct msi_domain_info mpic_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
- .chip = &armada_370_xp_msi_irq_chip,
+ .chip = &mpic_msi_irq_chip,
};
-static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+static void mpic_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
- unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
+ unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
- msg->address_lo = lower_32_bits(msi_doorbell_addr);
- msg->address_hi = upper_32_bits(msi_doorbell_addr);
- msg->data = BIT(cpu + 8) | (data->hwirq + msi_doorbell_start());
+ msg->address_lo = lower_32_bits(mpic->msi_doorbell_addr);
+ msg->address_hi = upper_32_bits(mpic->msi_doorbell_addr);
+ msg->data = BIT(cpu + 8) | (d->hwirq + mpic->msi_doorbell_start);
}
-static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
+static int mpic_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
{
unsigned int cpu;
@@ -275,33 +268,34 @@ static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
if (cpu >= nr_cpu_ids)
return -EINVAL;
- irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK;
}
-static struct irq_chip armada_370_xp_msi_bottom_irq_chip = {
+static struct irq_chip mpic_msi_bottom_irq_chip = {
.name = "MPIC MSI",
- .irq_compose_msi_msg = armada_370_xp_compose_msi_msg,
- .irq_set_affinity = armada_370_xp_msi_set_affinity,
+ .irq_compose_msi_msg = mpic_compose_msi_msg,
+ .irq_set_affinity = mpic_msi_set_affinity,
};
-static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *args)
+static int mpic_msi_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs,
+ void *args)
{
- int hwirq, i;
+ struct mpic *mpic = domain->host_data;
+ int hwirq;
- mutex_lock(&msi_used_lock);
- hwirq = bitmap_find_free_region(msi_used, msi_doorbell_size(),
+ mutex_lock(&mpic->msi_lock);
+ hwirq = bitmap_find_free_region(mpic->msi_used, mpic->msi_doorbell_size,
order_base_2(nr_irqs));
- mutex_unlock(&msi_used_lock);
+ mutex_unlock(&mpic->msi_lock);
if (hwirq < 0)
return -ENOSPC;
- for (i = 0; i < nr_irqs; i++) {
+ for (unsigned int i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, hwirq + i,
- &armada_370_xp_msi_bottom_irq_chip,
+ &mpic_msi_bottom_irq_chip,
domain->host_data, handle_simple_irq,
NULL, NULL);
}
@@ -309,76 +303,84 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
return 0;
}
-static void armada_370_xp_msi_free(struct irq_domain *domain,
- unsigned int virq, unsigned int nr_irqs)
+static void mpic_msi_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mpic *mpic = domain->host_data;
- mutex_lock(&msi_used_lock);
- bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
- mutex_unlock(&msi_used_lock);
+ mutex_lock(&mpic->msi_lock);
+ bitmap_release_region(mpic->msi_used, d->hwirq, order_base_2(nr_irqs));
+ mutex_unlock(&mpic->msi_lock);
}
-static const struct irq_domain_ops armada_370_xp_msi_domain_ops = {
- .alloc = armada_370_xp_msi_alloc,
- .free = armada_370_xp_msi_free,
+static const struct irq_domain_ops mpic_msi_domain_ops = {
+ .alloc = mpic_msi_alloc,
+ .free = mpic_msi_free,
};
-static void armada_370_xp_msi_reenable_percpu(void)
+static void mpic_msi_reenable_percpu(struct mpic *mpic)
{
u32 reg;
/* Enable MSI doorbell mask and combined cpu local interrupt */
- reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
- reg |= msi_doorbell_mask();
- writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
+ reg |= mpic->msi_doorbell_mask;
+ writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
/* Unmask local doorbell interrupt */
- writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
}
-static int armada_370_xp_msi_init(struct device_node *node,
- phys_addr_t main_int_phys_base)
+static int __init mpic_msi_init(struct mpic *mpic, struct device_node *node,
+ phys_addr_t main_int_phys_base)
{
- msi_doorbell_addr = main_int_phys_base +
- ARMADA_370_XP_SW_TRIG_INT_OFFS;
+ mpic->msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT;
+
+ mutex_init(&mpic->msi_lock);
+
+ if (mpic_is_ipi_available(mpic)) {
+ mpic->msi_doorbell_start = PCI_MSI_DOORBELL_START;
+ mpic->msi_doorbell_size = PCI_MSI_DOORBELL_NR;
+ mpic->msi_doorbell_mask = PCI_MSI_DOORBELL_MASK;
+ } else {
+ mpic->msi_doorbell_start = PCI_MSI_FULL_DOORBELL_START;
+ mpic->msi_doorbell_size = PCI_MSI_FULL_DOORBELL_NR;
+ mpic->msi_doorbell_mask = PCI_MSI_FULL_DOORBELL_MASK;
+ }
- armada_370_xp_msi_inner_domain =
- irq_domain_add_linear(NULL, msi_doorbell_size(),
- &armada_370_xp_msi_domain_ops, NULL);
- if (!armada_370_xp_msi_inner_domain)
+ mpic->msi_inner_domain = irq_domain_add_linear(NULL, mpic->msi_doorbell_size,
+ &mpic_msi_domain_ops, mpic);
+ if (!mpic->msi_inner_domain)
return -ENOMEM;
- armada_370_xp_msi_domain =
- pci_msi_create_irq_domain(of_node_to_fwnode(node),
- &armada_370_xp_msi_domain_info,
- armada_370_xp_msi_inner_domain);
- if (!armada_370_xp_msi_domain) {
- irq_domain_remove(armada_370_xp_msi_inner_domain);
+ mpic->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), &mpic_msi_domain_info,
+ mpic->msi_inner_domain);
+ if (!mpic->msi_domain) {
+ irq_domain_remove(mpic->msi_inner_domain);
return -ENOMEM;
}
- armada_370_xp_msi_reenable_percpu();
+ mpic_msi_reenable_percpu(mpic);
/* Unmask low 16 MSI irqs on non-IPI platforms */
- if (!is_ipi_available())
- writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ if (!mpic_is_ipi_available(mpic))
+ writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
return 0;
}
#else
-static __maybe_unused void armada_370_xp_msi_reenable_percpu(void) {}
+static __maybe_unused void mpic_msi_reenable_percpu(struct mpic *mpic) {}
-static inline int armada_370_xp_msi_init(struct device_node *node,
- phys_addr_t main_int_phys_base)
+static inline int mpic_msi_init(struct mpic *mpic, struct device_node *node,
+ phys_addr_t main_int_phys_base)
{
return 0;
}
#endif
-static void armada_xp_mpic_perf_init(void)
+static void mpic_perf_init(struct mpic *mpic)
{
- unsigned long cpuid;
+ u32 cpuid;
/*
* This Performance Counter Overflow interrupt is specific for
@@ -390,38 +392,39 @@ static void armada_xp_mpic_perf_init(void)
cpuid = cpu_logical_map(smp_processor_id());
/* Enable Performance Counter Overflow interrupts */
- writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
- per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS);
+ writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK);
}
#ifdef CONFIG_SMP
-static struct irq_domain *ipi_domain;
-
-static void armada_370_xp_ipi_mask(struct irq_data *d)
+static void mpic_ipi_mask(struct irq_data *d)
{
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
u32 reg;
- reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
+ reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
reg &= ~BIT(d->hwirq);
- writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
}
-static void armada_370_xp_ipi_unmask(struct irq_data *d)
+static void mpic_ipi_unmask(struct irq_data *d)
{
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
u32 reg;
- reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
+ reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
reg |= BIT(d->hwirq);
- writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
}
-static void armada_370_xp_ipi_send_mask(struct irq_data *d,
- const struct cpumask *mask)
+static void mpic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
{
- unsigned long map = 0;
- int cpu;
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
+ unsigned int cpu;
+ u32 map = 0;
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
- map |= 1 << cpu_logical_map(cpu);
+ map |= BIT(cpu_logical_map(cpu));
/*
* Ensure that stores to Normal memory are visible to the
@@ -430,451 +433,465 @@ static void armada_370_xp_ipi_send_mask(struct irq_data *d,
dsb();
/* submit softirq */
- writel((map << 8) | d->hwirq, main_int_base +
- ARMADA_370_XP_SW_TRIG_INT_OFFS);
+ writel((map << 8) | d->hwirq, mpic->base + MPIC_SW_TRIG_INT);
}
-static void armada_370_xp_ipi_ack(struct irq_data *d)
+static void mpic_ipi_ack(struct irq_data *d)
{
- writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
+
+ writel(~BIT(d->hwirq), mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
}
-static struct irq_chip ipi_irqchip = {
+static struct irq_chip mpic_ipi_irqchip = {
.name = "IPI",
- .irq_ack = armada_370_xp_ipi_ack,
- .irq_mask = armada_370_xp_ipi_mask,
- .irq_unmask = armada_370_xp_ipi_unmask,
- .ipi_send_mask = armada_370_xp_ipi_send_mask,
+ .irq_ack = mpic_ipi_ack,
+ .irq_mask = mpic_ipi_mask,
+ .irq_unmask = mpic_ipi_unmask,
+ .ipi_send_mask = mpic_ipi_send_mask,
};
-static int armada_370_xp_ipi_alloc(struct irq_domain *d,
- unsigned int virq,
- unsigned int nr_irqs, void *args)
+static int mpic_ipi_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *args)
{
- int i;
-
- for (i = 0; i < nr_irqs; i++) {
+ for (unsigned int i = 0; i < nr_irqs; i++) {
irq_set_percpu_devid(virq + i);
- irq_domain_set_info(d, virq + i, i, &ipi_irqchip,
- d->host_data,
- handle_percpu_devid_irq,
- NULL, NULL);
+ irq_domain_set_info(d, virq + i, i, &mpic_ipi_irqchip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
}
return 0;
}
-static void armada_370_xp_ipi_free(struct irq_domain *d,
- unsigned int virq,
- unsigned int nr_irqs)
+static void mpic_ipi_free(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs)
{
/* Not freeing IPIs */
}
-static const struct irq_domain_ops ipi_domain_ops = {
- .alloc = armada_370_xp_ipi_alloc,
- .free = armada_370_xp_ipi_free,
+static const struct irq_domain_ops mpic_ipi_domain_ops = {
+ .alloc = mpic_ipi_alloc,
+ .free = mpic_ipi_free,
};
-static void ipi_resume(void)
+static void mpic_ipi_resume(struct mpic *mpic)
{
- int i;
-
- for (i = 0; i < IPI_DOORBELL_END; i++) {
- int irq;
+ for (irq_hw_number_t i = 0; i < IPI_DOORBELL_NR; i++) {
+ unsigned int virq = irq_find_mapping(mpic->ipi_domain, i);
+ struct irq_data *d;
- irq = irq_find_mapping(ipi_domain, i);
- if (irq <= 0)
+ if (!virq || !irq_percpu_is_enabled(virq))
continue;
- if (irq_percpu_is_enabled(irq)) {
- struct irq_data *d;
- d = irq_domain_get_irq_data(ipi_domain, irq);
- armada_370_xp_ipi_unmask(d);
- }
+
+ d = irq_domain_get_irq_data(mpic->ipi_domain, virq);
+ mpic_ipi_unmask(d);
}
}
-static __init void armada_xp_ipi_init(struct device_node *node)
+static int __init mpic_ipi_init(struct mpic *mpic, struct device_node *node)
{
int base_ipi;
- ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node),
- IPI_DOORBELL_END,
- &ipi_domain_ops, NULL);
- if (WARN_ON(!ipi_domain))
- return;
+ mpic->ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node), IPI_DOORBELL_NR,
+ &mpic_ipi_domain_ops, mpic);
+ if (WARN_ON(!mpic->ipi_domain))
+ return -ENOMEM;
- irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
- base_ipi = irq_domain_alloc_irqs(ipi_domain, IPI_DOORBELL_END, NUMA_NO_NODE, NULL);
+ irq_domain_update_bus_token(mpic->ipi_domain, DOMAIN_BUS_IPI);
+ base_ipi = irq_domain_alloc_irqs(mpic->ipi_domain, IPI_DOORBELL_NR, NUMA_NO_NODE, NULL);
if (WARN_ON(!base_ipi))
- return;
+ return -ENOMEM;
+
+ set_smp_ipi_range(base_ipi, IPI_DOORBELL_NR);
- set_smp_ipi_range(base_ipi, IPI_DOORBELL_END);
+ return 0;
}
-static int armada_xp_set_affinity(struct irq_data *d,
- const struct cpumask *mask_val, bool force)
+static int mpic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force)
{
+ struct mpic *mpic = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- int cpu;
+ unsigned int cpu;
/* Select a single core from the affinity mask which is online */
cpu = cpumask_any_and(mask_val, cpu_online_mask);
- atomic_io_modify(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq),
- ARMADA_370_XP_INT_SOURCE_CPU_MASK,
- BIT(cpu_logical_map(cpu)));
+ atomic_io_modify(mpic->base + MPIC_INT_SOURCE_CTL(hwirq),
+ MPIC_INT_SOURCE_CPU_MASK, BIT(cpu_logical_map(cpu)));
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK;
}
-static void armada_xp_mpic_smp_cpu_init(void)
+static void mpic_smp_cpu_init(struct mpic *mpic)
{
- u32 control;
- int nr_irqs, i;
-
- control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
- nr_irqs = (control >> 2) & 0x3ff;
+ for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++)
+ writel(i, mpic->per_cpu + MPIC_INT_SET_MASK);
- for (i = 0; i < nr_irqs; i++)
- writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
-
- if (!is_ipi_available())
+ if (!mpic_is_ipi_available(mpic))
return;
/* Disable all IPIs */
- writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
/* Clear pending IPIs */
- writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+ writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
/* Unmask IPI interrupt */
- writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
}
-static void armada_xp_mpic_reenable_percpu(void)
+static void mpic_reenable_percpu(struct mpic *mpic)
{
- unsigned int irq;
-
/* Re-enable per-CPU interrupts that were enabled before suspend */
- for (irq = 0; irq < ARMADA_370_XP_MAX_PER_CPU_IRQS; irq++) {
- struct irq_data *data;
- int virq;
-
- virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
- if (virq == 0)
- continue;
-
- data = irq_get_irq_data(virq);
+ for (irq_hw_number_t i = 0; i < MPIC_PER_CPU_IRQS_NR; i++) {
+ unsigned int virq = irq_linear_revmap(mpic->domain, i);
+ struct irq_data *d;
- if (!irq_percpu_is_enabled(virq))
+ if (!virq || !irq_percpu_is_enabled(virq))
continue;
- armada_370_xp_irq_unmask(data);
+ d = irq_get_irq_data(virq);
+ mpic_irq_unmask(d);
}
- if (is_ipi_available())
- ipi_resume();
+ if (mpic_is_ipi_available(mpic))
+ mpic_ipi_resume(mpic);
- armada_370_xp_msi_reenable_percpu();
+ mpic_msi_reenable_percpu(mpic);
}
-static int armada_xp_mpic_starting_cpu(unsigned int cpu)
+static int mpic_starting_cpu(unsigned int cpu)
{
- armada_xp_mpic_perf_init();
- armada_xp_mpic_smp_cpu_init();
- armada_xp_mpic_reenable_percpu();
+ struct mpic *mpic = irq_get_default_host()->host_data;
+
+ mpic_perf_init(mpic);
+ mpic_smp_cpu_init(mpic);
+ mpic_reenable_percpu(mpic);
+
return 0;
}
static int mpic_cascaded_starting_cpu(unsigned int cpu)
{
- armada_xp_mpic_perf_init();
- armada_xp_mpic_reenable_percpu();
- enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
+ struct mpic *mpic = mpic_data;
+
+ mpic_perf_init(mpic);
+ mpic_reenable_percpu(mpic);
+ enable_percpu_irq(mpic->parent_irq, IRQ_TYPE_NONE);
+
return 0;
}
#else
-static void armada_xp_mpic_smp_cpu_init(void) {}
-static void ipi_resume(void) {}
+static void mpic_smp_cpu_init(struct mpic *mpic) {}
+static void mpic_ipi_resume(struct mpic *mpic) {}
#endif
-static struct irq_chip armada_370_xp_irq_chip = {
+static struct irq_chip mpic_irq_chip = {
.name = "MPIC",
- .irq_mask = armada_370_xp_irq_mask,
- .irq_mask_ack = armada_370_xp_irq_mask,
- .irq_unmask = armada_370_xp_irq_unmask,
+ .irq_mask = mpic_irq_mask,
+ .irq_mask_ack = mpic_irq_mask,
+ .irq_unmask = mpic_irq_unmask,
#ifdef CONFIG_SMP
- .irq_set_affinity = armada_xp_set_affinity,
+ .irq_set_affinity = mpic_set_affinity,
#endif
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
};
-static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
- unsigned int virq, irq_hw_number_t hw)
+static int mpic_irq_map(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq)
{
+ struct mpic *mpic = domain->host_data;
+
/* IRQs 0 and 1 cannot be mapped, they are handled internally */
- if (hw <= 1)
+ if (hwirq <= 1)
return -EINVAL;
- armada_370_xp_irq_mask(irq_get_irq_data(virq));
- if (!is_percpu_irq(hw))
- writel(hw, per_cpu_int_base +
- ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ irq_set_chip_data(virq, mpic);
+
+ mpic_irq_mask(irq_get_irq_data(virq));
+ if (!mpic_is_percpu_irq(hwirq))
+ writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
else
- writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE);
irq_set_status_flags(virq, IRQ_LEVEL);
- if (is_percpu_irq(hw)) {
+ if (mpic_is_percpu_irq(hwirq)) {
irq_set_percpu_devid(virq);
- irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
- handle_percpu_devid_irq);
+ irq_set_chip_and_handler(virq, &mpic_irq_chip, handle_percpu_devid_irq);
} else {
- irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
- handle_level_irq);
+ irq_set_chip_and_handler(virq, &mpic_irq_chip, handle_level_irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
}
irq_set_probe(virq);
-
return 0;
}
-static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
- .map = armada_370_xp_mpic_irq_map,
- .xlate = irq_domain_xlate_onecell,
+static const struct irq_domain_ops mpic_irq_ops = {
+ .map = mpic_irq_map,
+ .xlate = irq_domain_xlate_onecell,
};
#ifdef CONFIG_PCI_MSI
-static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
+static void mpic_handle_msi_irq(struct mpic *mpic)
{
- u32 msimask, msinr;
-
- msimask = readl_relaxed(per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
- msimask &= msi_doorbell_mask();
+ unsigned long cause;
+ unsigned int i;
- writel(~msimask, per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+ cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
+ cause &= mpic->msi_doorbell_mask;
+ writel(~cause, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
- for (msinr = msi_doorbell_start();
- msinr < msi_doorbell_end(); msinr++) {
- unsigned int irq;
+ for_each_set_bit(i, &cause, BITS_PER_LONG)
+ generic_handle_domain_irq(mpic->msi_inner_domain, i - mpic->msi_doorbell_start);
+}
+#else
+static void mpic_handle_msi_irq(struct mpic *mpic) {}
+#endif
- if (!(msimask & BIT(msinr)))
- continue;
+#ifdef CONFIG_SMP
+static void mpic_handle_ipi_irq(struct mpic *mpic)
+{
+ unsigned long cause;
+ irq_hw_number_t i;
- irq = msinr - msi_doorbell_start();
+ cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
+ cause &= IPI_DOORBELL_MASK;
- generic_handle_domain_irq(armada_370_xp_msi_inner_domain, irq);
- }
+ for_each_set_bit(i, &cause, IPI_DOORBELL_NR)
+ generic_handle_domain_irq(mpic->ipi_domain, i);
}
#else
-static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
+static inline void mpic_handle_ipi_irq(struct mpic *mpic) {}
#endif
-static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
+static void mpic_handle_cascade_irq(struct irq_desc *desc)
{
+ struct mpic *mpic = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned long irqmap, irqn, irqsrc, cpuid;
+ unsigned long cause;
+ u32 irqsrc, cpuid;
+ irq_hw_number_t i;
chained_irq_enter(chip, desc);
- irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
+ cause = readl_relaxed(mpic->per_cpu + MPIC_PPI_CAUSE);
cpuid = cpu_logical_map(smp_processor_id());
- for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
- irqsrc = readl_relaxed(main_int_base +
- ARMADA_370_XP_INT_SOURCE_CTL(irqn));
+ for_each_set_bit(i, &cause, MPIC_PER_CPU_IRQS_NR) {
+ irqsrc = readl_relaxed(mpic->base + MPIC_INT_SOURCE_CTL(i));
/* Check if the interrupt is not masked on current CPU.
* Test IRQ (0-1) and FIQ (8-9) mask bits.
*/
- if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
+ if (!(irqsrc & MPIC_INT_IRQ_FIQ_MASK(cpuid)))
continue;
- if (irqn == 0 || irqn == 1) {
- armada_370_xp_handle_msi_irq(NULL, true);
+ if (i == 0 || i == 1) {
+ mpic_handle_msi_irq(mpic);
continue;
}
- generic_handle_domain_irq(armada_370_xp_mpic_domain, irqn);
+ generic_handle_domain_irq(mpic->domain, i);
}
chained_irq_exit(chip, desc);
}
-static void __exception_irq_entry
-armada_370_xp_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry mpic_handle_irq(struct pt_regs *regs)
{
- u32 irqstat, irqnr;
+ struct mpic *mpic = irq_get_default_host()->host_data;
+ irq_hw_number_t i;
+ u32 irqstat;
do {
- irqstat = readl_relaxed(per_cpu_int_base +
- ARMADA_370_XP_CPU_INTACK_OFFS);
- irqnr = irqstat & 0x3FF;
+ irqstat = readl_relaxed(mpic->per_cpu + MPIC_CPU_INTACK);
+ i = FIELD_GET(MPIC_CPU_INTACK_IID_MASK, irqstat);
- if (irqnr > 1022)
+ if (i > 1022)
break;
- if (irqnr > 1) {
- generic_handle_domain_irq(armada_370_xp_mpic_domain,
- irqnr);
- continue;
- }
+ if (i > 1)
+ generic_handle_domain_irq(mpic->domain, i);
/* MSI handling */
- if (irqnr == 1)
- armada_370_xp_handle_msi_irq(regs, false);
+ if (i == 1)
+ mpic_handle_msi_irq(mpic);
-#ifdef CONFIG_SMP
/* IPI Handling */
- if (irqnr == 0) {
- unsigned long ipimask;
- int ipi;
-
- ipimask = readl_relaxed(per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
- & IPI_DOORBELL_MASK;
-
- for_each_set_bit(ipi, &ipimask, IPI_DOORBELL_END)
- generic_handle_domain_irq(ipi_domain, ipi);
- }
-#endif
-
+ if (i == 0)
+ mpic_handle_ipi_irq(mpic);
} while (1);
}
-static int armada_370_xp_mpic_suspend(void)
+static int mpic_suspend(void)
{
- doorbell_mask_reg = readl(per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ struct mpic *mpic = mpic_data;
+
+ mpic->doorbell_mask = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
+
return 0;
}
-static void armada_370_xp_mpic_resume(void)
+static void mpic_resume(void)
{
+ struct mpic *mpic = mpic_data;
bool src0, src1;
- int nirqs;
- irq_hw_number_t irq;
/* Re-enable interrupts */
- nirqs = (readl(main_int_base + ARMADA_370_XP_INT_CONTROL) >> 2) & 0x3ff;
- for (irq = 0; irq < nirqs; irq++) {
- struct irq_data *data;
- int virq;
+ for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) {
+ unsigned int virq = irq_linear_revmap(mpic->domain, i);
+ struct irq_data *d;
- virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
- if (virq == 0)
+ if (!virq)
continue;
- data = irq_get_irq_data(virq);
+ d = irq_get_irq_data(virq);
- if (!is_percpu_irq(irq)) {
+ if (!mpic_is_percpu_irq(i)) {
/* Non per-CPU interrupts */
- writel(irq, per_cpu_int_base +
- ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
- if (!irqd_irq_disabled(data))
- armada_370_xp_irq_unmask(data);
+ writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
+ if (!irqd_irq_disabled(d))
+ mpic_irq_unmask(d);
} else {
/* Per-CPU interrupts */
- writel(irq, main_int_base +
- ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ writel(i, mpic->base + MPIC_INT_SET_ENABLE);
/*
- * Re-enable on the current CPU,
- * armada_xp_mpic_reenable_percpu() will take
- * care of secondary CPUs when they come up.
+ * Re-enable on the current CPU, mpic_reenable_percpu()
+ * will take care of secondary CPUs when they come up.
*/
if (irq_percpu_is_enabled(virq))
- armada_370_xp_irq_unmask(data);
+ mpic_irq_unmask(d);
}
}
/* Reconfigure doorbells for IPIs and MSIs */
- writel(doorbell_mask_reg,
- per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
- if (is_ipi_available()) {
- src0 = doorbell_mask_reg & IPI_DOORBELL_MASK;
- src1 = doorbell_mask_reg & PCI_MSI_DOORBELL_MASK;
+ if (mpic_is_ipi_available(mpic)) {
+ src0 = mpic->doorbell_mask & IPI_DOORBELL_MASK;
+ src1 = mpic->doorbell_mask & PCI_MSI_DOORBELL_MASK;
} else {
- src0 = doorbell_mask_reg & PCI_MSI_FULL_DOORBELL_SRC0_MASK;
- src1 = doorbell_mask_reg & PCI_MSI_FULL_DOORBELL_SRC1_MASK;
+ src0 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC0_MASK;
+ src1 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC1_MASK;
}
if (src0)
- writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
if (src1)
- writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
- if (is_ipi_available())
- ipi_resume();
+ if (mpic_is_ipi_available(mpic))
+ mpic_ipi_resume(mpic);
}
-static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
- .suspend = armada_370_xp_mpic_suspend,
- .resume = armada_370_xp_mpic_resume,
+static struct syscore_ops mpic_syscore_ops = {
+ .suspend = mpic_suspend,
+ .resume = mpic_resume,
};
-static int __init armada_370_xp_mpic_of_init(struct device_node *node,
- struct device_node *parent)
+static int __init mpic_map_region(struct device_node *np, int index,
+ void __iomem **base, phys_addr_t *phys_base)
{
- struct resource main_int_res, per_cpu_int_res;
- int nr_irqs, i;
- u32 control;
+ struct resource res;
+ int err;
+
+ err = of_address_to_resource(np, index, &res);
+ if (WARN_ON(err))
+ goto fail;
+
+ if (WARN_ON(!request_mem_region(res.start, resource_size(&res), np->full_name))) {
+ err = -EBUSY;
+ goto fail;
+ }
+
+ *base = ioremap(res.start, resource_size(&res));
+ if (WARN_ON(!*base)) {
+ err = -ENOMEM;
+ goto fail;
+ }
- BUG_ON(of_address_to_resource(node, 0, &main_int_res));
- BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
+ if (phys_base)
+ *phys_base = res.start;
- BUG_ON(!request_mem_region(main_int_res.start,
- resource_size(&main_int_res),
- node->full_name));
- BUG_ON(!request_mem_region(per_cpu_int_res.start,
- resource_size(&per_cpu_int_res),
- node->full_name));
+ return 0;
+
+fail:
+ pr_err("%pOF: Unable to map resource %d: %pE\n", np, index, ERR_PTR(err));
+ return err;
+}
- main_int_base = ioremap(main_int_res.start,
- resource_size(&main_int_res));
- BUG_ON(!main_int_base);
+static int __init mpic_of_init(struct device_node *node, struct device_node *parent)
+{
+ phys_addr_t phys_base;
+ unsigned int nr_irqs;
+ struct mpic *mpic;
+ int err;
+
+ mpic = kzalloc(sizeof(*mpic), GFP_KERNEL);
+ if (WARN_ON(!mpic))
+ return -ENOMEM;
- per_cpu_int_base = ioremap(per_cpu_int_res.start,
- resource_size(&per_cpu_int_res));
- BUG_ON(!per_cpu_int_base);
+ mpic_data = mpic;
- control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
- nr_irqs = (control >> 2) & 0x3ff;
+ err = mpic_map_region(node, 0, &mpic->base, &phys_base);
+ if (err)
+ return err;
- for (i = 0; i < nr_irqs; i++)
- writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
+ err = mpic_map_region(node, 1, &mpic->per_cpu, NULL);
+ if (err)
+ return err;
- armada_370_xp_mpic_domain =
- irq_domain_add_linear(node, nr_irqs,
- &armada_370_xp_mpic_irq_ops, NULL);
- BUG_ON(!armada_370_xp_mpic_domain);
- irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
+ nr_irqs = FIELD_GET(MPIC_INT_CONTROL_NUMINT_MASK, readl(mpic->base + MPIC_INT_CONTROL));
+
+ for (irq_hw_number_t i = 0; i < nr_irqs; i++)
+ writel(i, mpic->base + MPIC_INT_CLEAR_ENABLE);
+
+ /*
+ * Initialize mpic->parent_irq before calling any other functions, since
+ * it is used to distinguish between IPI and non-IPI platforms.
+ */
+ mpic->parent_irq = irq_of_parse_and_map(node, 0);
/*
- * Initialize parent_irq before calling any other functions, since it is
- * used to distinguish between IPI and non-IPI platforms.
+ * On non-IPI platforms the driver currently supports only the per-CPU
+ * interrupts (the first 29 interrupts). See mpic_handle_cascade_irq().
*/
- parent_irq = irq_of_parse_and_map(node, 0);
+ if (!mpic_is_ipi_available(mpic))
+ nr_irqs = MPIC_PER_CPU_IRQS_NR;
+
+ mpic->domain = irq_domain_add_linear(node, nr_irqs, &mpic_irq_ops, mpic);
+ if (!mpic->domain) {
+ pr_err("%pOF: Unable to add IRQ domain\n", node);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(mpic->domain, DOMAIN_BUS_WIRED);
/* Setup for the boot CPU */
- armada_xp_mpic_perf_init();
- armada_xp_mpic_smp_cpu_init();
+ mpic_perf_init(mpic);
+ mpic_smp_cpu_init(mpic);
- armada_370_xp_msi_init(node, main_int_res.start);
+ err = mpic_msi_init(mpic, node, phys_base);
+ if (err) {
+ pr_err("%pOF: Unable to initialize MSI domain\n", node);
+ return err;
+ }
- if (parent_irq <= 0) {
- irq_set_default_host(armada_370_xp_mpic_domain);
- set_handle_irq(armada_370_xp_handle_irq);
+ if (mpic_is_ipi_available(mpic)) {
+ irq_set_default_host(mpic->domain);
+ set_handle_irq(mpic_handle_irq);
#ifdef CONFIG_SMP
- armada_xp_ipi_init(node);
+ err = mpic_ipi_init(mpic, node);
+ if (err) {
+ pr_err("%pOF: Unable to initialize IPI domain\n", node);
+ return err;
+ }
+
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
"irqchip/armada/ipi:starting",
- armada_xp_mpic_starting_cpu, NULL);
+ mpic_starting_cpu, NULL);
#endif
} else {
#ifdef CONFIG_SMP
@@ -882,13 +899,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
"irqchip/armada/cascade:starting",
mpic_cascaded_starting_cpu, NULL);
#endif
- irq_set_chained_handler(parent_irq,
- armada_370_xp_mpic_handle_cascade_irq);
+ irq_set_chained_handler_and_data(mpic->parent_irq,
+ mpic_handle_cascade_irq, mpic);
}
- register_syscore_ops(&armada_370_xp_mpic_syscore_ops);
+ register_syscore_ops(&mpic_syscore_ops);
return 0;
}
-IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);
+IRQCHIP_DECLARE(marvell_mpic, "marvell,mpic", mpic_of_init);
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 4631f6847953..3839ad79ad31 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -57,8 +57,7 @@
static struct irq_domain *aic_domain;
-static asmlinkage void __exception_irq_entry
-aic_handle(struct pt_regs *regs)
+static void __exception_irq_entry aic_handle(struct pt_regs *regs)
{
struct irq_domain_chip_generic *dgc = aic_domain->gc;
struct irq_chip_generic *gc = dgc->gc[0];
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 145535bd7560..c0f55dc7b050 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -67,8 +67,7 @@
static struct irq_domain *aic5_domain;
-static asmlinkage void __exception_irq_entry
-aic5_handle(struct pt_regs *regs)
+static void __exception_irq_entry aic5_handle(struct pt_regs *regs)
{
struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0);
u32 irqnr;
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index e731e0784f7e..806ebb1de201 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -69,7 +69,7 @@ static struct {
struct irq_domain_ops ops;
} *clps711x_intc;
-static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs)
+static void __exception_irq_entry clps711x_irqh(struct pt_regs *regs)
{
u32 irqstat;
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
index 7482c8ed34b2..f4f8e9fadbbf 100644
--- a/drivers/irqchip/irq-davinci-cp-intc.c
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -116,8 +116,7 @@ static struct irq_chip davinci_cp_intc_irq_chip = {
.flags = IRQCHIP_SKIP_SET_WAKE,
};
-static asmlinkage void __exception_irq_entry
-davinci_cp_intc_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry davinci_cp_intc_handle_irq(struct pt_regs *regs)
{
int gpir, irqnr, none;
diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c
index 359efc1d1be7..b91c358ea6db 100644
--- a/drivers/irqchip/irq-ftintc010.c
+++ b/drivers/irqchip/irq-ftintc010.c
@@ -125,7 +125,7 @@ static struct irq_chip ft010_irq_chip = {
/* Local static for the IRQ entry call */
static struct ft010_irq_data firq;
-static asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs)
{
struct ft010_irq_data *f = &firq;
int irq;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 74f21e03d4a3..ce87205e3e82 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -930,7 +930,7 @@ static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
__gic_handle_nmi(irqnr, regs);
}
-static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
__gic_handle_irq_from_irqsoff(regs);
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index ca32ac19d284..58c28895f8c4 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -97,7 +97,7 @@ bool gic_cpuif_has_vsgi(void)
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT);
- return fld >= 0x3;
+ return fld >= ID_AA64PFR0_EL1_GIC_V4P1;
}
#else
bool gic_cpuif_has_vsgi(void)
diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c
index 5fba907b9052..f23b02f62a5c 100644
--- a/drivers/irqchip/irq-ixp4xx.c
+++ b/drivers/irqchip/irq-ixp4xx.c
@@ -105,8 +105,7 @@ static void ixp4xx_irq_unmask(struct irq_data *d)
}
}
-static asmlinkage void __exception_irq_entry
-ixp4xx_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs)
{
struct ixp4xx_irq *ixi = &ixirq;
unsigned long status;
diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c
new file mode 100644
index 000000000000..0f6e465dd309
--- /dev/null
+++ b/drivers/irqchip/irq-loongarch-avec.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2024 Loongson Technologies, Inc.
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/radix-tree.h>
+#include <linux/spinlock.h>
+
+#include <asm/loongarch.h>
+#include <asm/setup.h>
+
+#include "irq-msi-lib.h"
+#include "irq-loongson.h"
+
+#define VECTORS_PER_REG 64
+#define IRR_VECTOR_MASK 0xffUL
+#define IRR_INVALID_MASK 0x80000000UL
+#define AVEC_MSG_OFFSET 0x100000
+
+#ifdef CONFIG_SMP
+struct pending_list {
+ struct list_head head;
+};
+
+static struct cpumask intersect_mask;
+static DEFINE_PER_CPU(struct pending_list, pending_list);
+#endif
+
+static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map);
+
+struct avecintc_chip {
+ raw_spinlock_t lock;
+ struct fwnode_handle *fwnode;
+ struct irq_domain *domain;
+ struct irq_matrix *vector_matrix;
+ phys_addr_t msi_base_addr;
+};
+
+static struct avecintc_chip loongarch_avec;
+
+struct avecintc_data {
+ struct list_head entry;
+ unsigned int cpu;
+ unsigned int vec;
+ unsigned int prev_cpu;
+ unsigned int prev_vec;
+ unsigned int moving;
+};
+
+static inline void avecintc_ack_irq(struct irq_data *d)
+{
+}
+
+static inline void avecintc_mask_irq(struct irq_data *d)
+{
+}
+
+static inline void avecintc_unmask_irq(struct irq_data *d)
+{
+}
+
+#ifdef CONFIG_SMP
+static inline void pending_list_init(int cpu)
+{
+ struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
+
+ INIT_LIST_HEAD(&plist->head);
+}
+
+static void avecintc_sync(struct avecintc_data *adata)
+{
+ struct pending_list *plist;
+
+ if (cpu_online(adata->prev_cpu)) {
+ plist = per_cpu_ptr(&pending_list, adata->prev_cpu);
+ list_add_tail(&adata->entry, &plist->head);
+ adata->moving = 1;
+ mp_ops.send_ipi_single(adata->prev_cpu, ACTION_CLEAR_VECTOR);
+ }
+}
+
+static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force)
+{
+ int cpu, ret, vector;
+ struct avecintc_data *adata;
+
+ scoped_guard(raw_spinlock, &loongarch_avec.lock) {
+ adata = irq_data_get_irq_chip_data(data);
+
+ if (adata->moving)
+ return -EBUSY;
+
+ if (cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest))
+ return 0;
+
+ cpumask_and(&intersect_mask, dest, cpu_online_mask);
+
+ ret = irq_matrix_alloc(loongarch_avec.vector_matrix, &intersect_mask, false, &cpu);
+ if (ret < 0)
+ return ret;
+
+ vector = ret;
+ adata->cpu = cpu;
+ adata->vec = vector;
+ per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data);
+ avecintc_sync(adata);
+ }
+
+ irq_data_update_effective_affinity(data, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK;
+}
+
+static int avecintc_cpu_online(unsigned int cpu)
+{
+ if (!loongarch_avec.vector_matrix)
+ return 0;
+
+ guard(raw_spinlock)(&loongarch_avec.lock);
+
+ irq_matrix_online(loongarch_avec.vector_matrix);
+
+ pending_list_init(cpu);
+
+ return 0;
+}
+
+static int avecintc_cpu_offline(unsigned int cpu)
+{
+ struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
+
+ if (!loongarch_avec.vector_matrix)
+ return 0;
+
+ guard(raw_spinlock)(&loongarch_avec.lock);
+
+ if (!list_empty(&plist->head))
+ pr_warn("CPU#%d vector is busy\n", cpu);
+
+ irq_matrix_offline(loongarch_avec.vector_matrix);
+
+ return 0;
+}
+
+void complete_irq_moving(void)
+{
+ struct pending_list *plist = this_cpu_ptr(&pending_list);
+ struct avecintc_data *adata, *tdata;
+ int cpu, vector, bias;
+ uint64_t isr;
+
+ guard(raw_spinlock)(&loongarch_avec.lock);
+
+ list_for_each_entry_safe(adata, tdata, &plist->head, entry) {
+ cpu = adata->prev_cpu;
+ vector = adata->prev_vec;
+ bias = vector / VECTORS_PER_REG;
+ switch (bias) {
+ case 0:
+ isr = csr_read64(LOONGARCH_CSR_ISR0);
+ break;
+ case 1:
+ isr = csr_read64(LOONGARCH_CSR_ISR1);
+ break;
+ case 2:
+ isr = csr_read64(LOONGARCH_CSR_ISR2);
+ break;
+ case 3:
+ isr = csr_read64(LOONGARCH_CSR_ISR3);
+ break;
+ }
+
+ if (isr & (1UL << (vector % VECTORS_PER_REG))) {
+ mp_ops.send_ipi_single(cpu, ACTION_CLEAR_VECTOR);
+ continue;
+ }
+ list_del(&adata->entry);
+ irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, false);
+ this_cpu_write(irq_map[vector], NULL);
+ adata->moving = 0;
+ adata->prev_cpu = adata->cpu;
+ adata->prev_vec = adata->vec;
+ }
+}
+#endif
+
+static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
+
+ msg->address_hi = 0x0;
+ msg->address_lo = (loongarch_avec.msi_base_addr | (adata->vec & 0xff) << 4)
+ | ((cpu_logical_map(adata->cpu & 0xffff)) << 12);
+ msg->data = 0x0;
+}
+
+static struct irq_chip avec_irq_controller = {
+ .name = "AVECINTC",
+ .irq_ack = avecintc_ack_irq,
+ .irq_mask = avecintc_mask_irq,
+ .irq_unmask = avecintc_unmask_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = avecintc_set_affinity,
+#endif
+ .irq_compose_msi_msg = avecintc_compose_msi_msg,
+};
+
+static void avecintc_irq_dispatch(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_desc *d;
+
+ chained_irq_enter(chip, desc);
+
+ while (true) {
+ unsigned long vector = csr_read64(LOONGARCH_CSR_IRR);
+ if (vector & IRR_INVALID_MASK)
+ break;
+
+ vector &= IRR_VECTOR_MASK;
+
+ d = this_cpu_read(irq_map[vector]);
+ if (d) {
+ generic_handle_irq_desc(d);
+ } else {
+ spurious_interrupt();
+ pr_warn("Unexpected IRQ occurs on CPU#%d [vector %ld]\n", smp_processor_id(), vector);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int avecintc_alloc_vector(struct irq_data *irqd, struct avecintc_data *adata)
+{
+ int cpu, ret;
+
+ guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
+
+ ret = irq_matrix_alloc(loongarch_avec.vector_matrix, cpu_online_mask, false, &cpu);
+ if (ret < 0)
+ return ret;
+
+ adata->prev_cpu = adata->cpu = cpu;
+ adata->prev_vec = adata->vec = ret;
+ per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd);
+
+ return 0;
+}
+
+static int avecintc_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ for (unsigned int i = 0; i < nr_irqs; i++) {
+ struct irq_data *irqd = irq_domain_get_irq_data(domain, virq + i);
+ struct avecintc_data *adata = kzalloc(sizeof(*adata), GFP_KERNEL);
+ int ret;
+
+ if (!adata)
+ return -ENOMEM;
+
+ ret = avecintc_alloc_vector(irqd, adata);
+ if (ret < 0) {
+ kfree(adata);
+ return ret;
+ }
+
+ irq_domain_set_info(domain, virq + i, virq + i, &avec_irq_controller,
+ adata, handle_edge_irq, NULL, NULL);
+ irqd_set_single_target(irqd);
+ irqd_set_affinity_on_activate(irqd);
+ }
+
+ return 0;
+}
+
+static void avecintc_free_vector(struct irq_data *irqd, struct avecintc_data *adata)
+{
+ guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
+
+ per_cpu(irq_map, adata->cpu)[adata->vec] = NULL;
+ irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, false);
+
+#ifdef CONFIG_SMP
+ if (!adata->moving)
+ return;
+
+ per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL;
+ irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, adata->prev_vec, false);
+ list_del_init(&adata->entry);
+#endif
+}
+
+static void avecintc_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ for (unsigned int i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+
+ if (d) {
+ struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
+
+ avecintc_free_vector(d, adata);
+ irq_domain_reset_irq_data(d);
+ kfree(adata);
+ }
+ }
+}
+
+static const struct irq_domain_ops avecintc_domain_ops = {
+ .alloc = avecintc_domain_alloc,
+ .free = avecintc_domain_free,
+ .select = msi_lib_irq_domain_select,
+};
+
+static int __init irq_matrix_init(void)
+{
+ loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS);
+ if (!loongarch_avec.vector_matrix)
+ return -ENOMEM;
+
+ for (int i = 0; i < NR_LEGACY_VECTORS; i++)
+ irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false);
+
+ irq_matrix_online(loongarch_avec.vector_matrix);
+
+ return 0;
+}
+
+static int __init avecintc_init(struct irq_domain *parent)
+{
+ int ret, parent_irq;
+ unsigned long value;
+
+ raw_spin_lock_init(&loongarch_avec.lock);
+
+ loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("AVECINTC");
+ if (!loongarch_avec.fwnode) {
+ pr_err("Unable to allocate domain handle\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode,
+ &avecintc_domain_ops, NULL);
+ if (!loongarch_avec.domain) {
+ pr_err("Unable to create IRQ domain\n");
+ ret = -ENOMEM;
+ goto out_free_handle;
+ }
+
+ parent_irq = irq_create_mapping(parent, INT_AVEC);
+ if (!parent_irq) {
+ pr_err("Failed to mapping hwirq\n");
+ ret = -EINVAL;
+ goto out_remove_domain;
+ }
+
+ ret = irq_matrix_init();
+ if (ret < 0) {
+ pr_err("Failed to init irq matrix\n");
+ goto out_remove_domain;
+ }
+ irq_set_chained_handler_and_data(parent_irq, avecintc_irq_dispatch, NULL);
+
+#ifdef CONFIG_SMP
+ pending_list_init(0);
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_AVECINTC_STARTING,
+ "irqchip/loongarch/avecintc:starting",
+ avecintc_cpu_online, avecintc_cpu_offline);
+#endif
+ value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
+ value |= IOCSR_MISC_FUNC_AVEC_EN;
+ iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC);
+
+ return ret;
+
+out_remove_domain:
+ irq_domain_remove(loongarch_avec.domain);
+out_free_handle:
+ irq_domain_free_fwnode(loongarch_avec.fwnode);
+out:
+ return ret;
+}
+
+static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
+
+ loongarch_avec.msi_base_addr = pchmsi_entry->msg_address - AVEC_MSG_OFFSET;
+
+ return pch_msi_acpi_init_avec(loongarch_avec.domain);
+}
+
+static inline int __init acpi_cascade_irqdomain_init(void)
+{
+ return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
+}
+
+int __init avecintc_acpi_init(struct irq_domain *parent)
+{
+ int ret = avecintc_init(parent);
+ if (ret < 0) {
+ pr_err("Failed to init IRQ domain\n");
+ return ret;
+ }
+
+ ret = acpi_cascade_irqdomain_init();
+ if (ret < 0) {
+ pr_err("Failed to init cascade IRQ domain\n");
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
index b35903a06902..e62dab4c97fc 100644
--- a/drivers/irqchip/irq-loongarch-cpu.c
+++ b/drivers/irqchip/irq-loongarch-cpu.c
@@ -13,6 +13,8 @@
#include <asm/loongarch.h>
#include <asm/setup.h>
+#include "irq-loongson.h"
+
static struct irq_domain *irq_domain;
struct fwnode_handle *cpuintc_handle;
@@ -140,7 +142,10 @@ static int __init acpi_cascade_irqdomain_init(void)
if (r < 0)
return r;
- return 0;
+ if (cpu_has_avecint)
+ r = avecintc_acpi_init(irq_domain);
+
+ return r;
}
static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index b1f2080be2be..e24db71a8783 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -17,6 +17,8 @@
#include <linux/syscore_ops.h>
#include <asm/numa.h>
+#include "irq-loongson.h"
+
#define EIOINTC_REG_NODEMAP 0x14a0
#define EIOINTC_REG_IPMAP 0x14c0
#define EIOINTC_REG_ENABLE 0x1600
@@ -360,6 +362,9 @@ static int __init acpi_cascade_irqdomain_init(void)
if (r < 0)
return r;
+ if (cpu_has_avecint)
+ return 0;
+
r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
if (r < 0)
return r;
@@ -396,8 +401,8 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
if (nr_pics == 1) {
register_syscore_ops(&eiointc_syscore_ops);
- cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
- "irqchip/loongarch/intc:starting",
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_EIOINTC_STARTING,
+ "irqchip/loongarch/eiointc:starting",
eiointc_router_init, NULL);
}
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
index 0bff728b25e3..5da02c7ad0b3 100644
--- a/drivers/irqchip/irq-loongson-htvec.c
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -17,6 +17,8 @@
#include <linux/of_irq.h>
#include <linux/syscore_ops.h>
+#include "irq-loongson.h"
+
/* Registers */
#define HTVEC_EN_OFF 0x20
#define HTVEC_MAX_PARENT_IRQ 8
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 7c4fe7ab4b83..2b1bd4a96665 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -22,6 +22,8 @@
#include <asm/loongson.h>
#endif
+#include "irq-loongson.h"
+
#define LIOINTC_CHIP_IRQ 32
#define LIOINTC_NUM_PARENT 4
#define LIOINTC_NUM_CORES 4
diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c
index 9b35492fb6be..2d4c3ec128b8 100644
--- a/drivers/irqchip/irq-loongson-pch-lpc.c
+++ b/drivers/irqchip/irq-loongson-pch-lpc.c
@@ -15,6 +15,8 @@
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
+#include "irq-loongson.h"
+
/* Registers */
#define LPC_INT_CTL 0x00
#define LPC_INT_ENA 0x04
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index dd4d699170f4..bd337ecddb40 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -15,6 +15,9 @@
#include <linux/pci.h>
#include <linux/slab.h>
+#include "irq-msi-lib.h"
+#include "irq-loongson.h"
+
static int nr_pics;
struct pch_msi_data {
@@ -27,26 +30,6 @@ struct pch_msi_data {
static struct fwnode_handle *pch_msi_handle[MAX_IO_PICS];
-static void pch_msi_mask_msi_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void pch_msi_unmask_msi_irq(struct irq_data *d)
-{
- irq_chip_unmask_parent(d);
- pci_msi_unmask_irq(d);
-}
-
-static struct irq_chip pch_msi_irq_chip = {
- .name = "PCH PCI MSI",
- .irq_mask = pch_msi_mask_msi_irq,
- .irq_unmask = pch_msi_unmask_msi_irq,
- .irq_ack = irq_chip_ack_parent,
- .irq_set_affinity = irq_chip_set_affinity_parent,
-};
-
static int pch_msi_allocate_hwirq(struct pch_msi_data *priv, int num_req)
{
int first;
@@ -85,12 +68,6 @@ static void pch_msi_compose_msi_msg(struct irq_data *data,
msg->data = data->hwirq;
}
-static struct msi_domain_info pch_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
- .chip = &pch_msi_irq_chip,
-};
-
static struct irq_chip middle_irq_chip = {
.name = "PCH MSI",
.irq_mask = irq_chip_mask_parent,
@@ -155,13 +132,31 @@ static void pch_msi_middle_domain_free(struct irq_domain *domain,
static const struct irq_domain_ops pch_msi_middle_domain_ops = {
.alloc = pch_msi_middle_domain_alloc,
.free = pch_msi_middle_domain_free,
+ .select = msi_lib_irq_domain_select,
+};
+
+#define PCH_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define PCH_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static struct msi_parent_ops pch_msi_parent_ops = {
+ .required_flags = PCH_MSI_FLAGS_REQUIRED,
+ .supported_flags = PCH_MSI_FLAGS_SUPPORTED,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .prefix = "PCH-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int pch_msi_init_domains(struct pch_msi_data *priv,
struct irq_domain *parent,
struct fwnode_handle *domain_handle)
{
- struct irq_domain *middle_domain, *msi_domain;
+ struct irq_domain *middle_domain;
middle_domain = irq_domain_create_hierarchy(parent, 0, priv->num_irqs,
domain_handle,
@@ -174,14 +169,8 @@ static int pch_msi_init_domains(struct pch_msi_data *priv,
irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
- msi_domain = pci_msi_create_irq_domain(domain_handle,
- &pch_msi_domain_info,
- middle_domain);
- if (!msi_domain) {
- pr_err("Failed to create PCI MSI domain\n");
- irq_domain_remove(middle_domain);
- return -ENOMEM;
- }
+ middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ middle_domain->msi_parent_ops = &pch_msi_parent_ops;
return 0;
}
@@ -266,17 +255,17 @@ IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_of_init);
#ifdef CONFIG_ACPI
struct fwnode_handle *get_pch_msi_handle(int pci_segment)
{
- int i;
+ if (cpu_has_avecint)
+ return pch_msi_handle[0];
- for (i = 0; i < MAX_IO_PICS; i++) {
+ for (int i = 0; i < MAX_IO_PICS; i++) {
if (msi_group[i].pci_segment == pci_segment)
return pch_msi_handle[i];
}
- return NULL;
+ return pch_msi_handle[0];
}
-int __init pch_msi_acpi_init(struct irq_domain *parent,
- struct acpi_madt_msi_pic *acpi_pchmsi)
+int __init pch_msi_acpi_init(struct irq_domain *parent, struct acpi_madt_msi_pic *acpi_pchmsi)
{
int ret;
struct fwnode_handle *domain_handle;
@@ -289,4 +278,18 @@ int __init pch_msi_acpi_init(struct irq_domain *parent,
return ret;
}
+
+int __init pch_msi_acpi_init_avec(struct irq_domain *parent)
+{
+ if (pch_msi_handle[0])
+ return 0;
+
+ pch_msi_handle[0] = parent->fwnode;
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+
+ parent->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ parent->msi_parent_ops = &pch_msi_parent_ops;
+
+ return 0;
+}
#endif
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index cbaef65e804c..69efda35a8e7 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -17,6 +17,8 @@
#include <linux/of_irq.h>
#include <linux/syscore_ops.h>
+#include "irq-loongson.h"
+
/* Registers */
#define PCH_PIC_MASK 0x20
#define PCH_PIC_HTMSI_EN 0x40
diff --git a/drivers/irqchip/irq-loongson.h b/drivers/irqchip/irq-loongson.h
new file mode 100644
index 000000000000..11fa138d1f44
--- /dev/null
+++ b/drivers/irqchip/irq-loongson.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef _DRIVERS_IRQCHIP_IRQ_LOONGSON_H
+#define _DRIVERS_IRQCHIP_IRQ_LOONGSON_H
+
+int find_pch_pic(u32 gsi);
+
+int liointc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_lio_pic *acpi_liointc);
+int eiointc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_eio_pic *acpi_eiointc);
+int avecintc_acpi_init(struct irq_domain *parent);
+
+int htvec_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_ht_pic *acpi_htvec);
+int pch_lpc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_lpc_pic *acpi_pchlpc);
+int pch_pic_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_bio_pic *acpi_pchpic);
+int pch_msi_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_msi_pic *acpi_pchmsi);
+int pch_msi_acpi_init_avec(struct irq_domain *parent);
+
+#endif /* _DRIVERS_IRQCHIP_IRQ_LOONGSON_H */
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 53cc08387588..6f69f4e5dbac 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -234,37 +234,27 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
struct platform_device *child;
- struct device_node *np;
u32 num_pins;
- int ret = 0;
- for_each_child_of_node(pdev->dev.of_node, np) {
+ for_each_child_of_node_scoped(pdev->dev.of_node, np) {
if (!of_property_read_bool(np, "interrupt-controller"))
continue;
child = of_platform_device_create(np, NULL, NULL);
- if (!child) {
- ret = -ENOMEM;
- break;
- }
+ if (!child)
+ return -ENOMEM;
if (of_property_read_u32(child->dev.of_node, "num-pins",
&num_pins) < 0) {
dev_err(&pdev->dev, "No num-pins property\n");
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- if (!mbigen_create_device_domain(&child->dev, num_pins, mgn_chip)) {
- ret = -ENOMEM;
- break;
- }
+ if (!mbigen_create_device_domain(&child->dev, num_pins, mgn_chip))
+ return -ENOMEM;
}
- if (ret)
- of_node_put(np);
-
- return ret;
+ return 0;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index dc82162ba763..ad84a2f03368 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -325,8 +325,7 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
return ret;
}
-static asmlinkage void __exception_irq_entry
-omap_intc_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry omap_intc_handle_irq(struct pt_regs *regs)
{
extern unsigned long irq_err_count;
u32 irqnr;
diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c
index 4a3ffe856d6c..7cd6b646774b 100644
--- a/drivers/irqchip/irq-riscv-aplic-direct.c
+++ b/drivers/irqchip/irq-riscv-aplic-direct.c
@@ -4,6 +4,7 @@
* Copyright (C) 2022 Ventana Micro Systems Inc.
*/
+#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cpu.h>
@@ -189,17 +190,22 @@ static int aplic_direct_starting_cpu(unsigned int cpu)
}
static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
- u32 *parent_hwirq, unsigned long *parent_hartid)
+ u32 *parent_hwirq, unsigned long *parent_hartid,
+ struct aplic_priv *priv)
{
struct of_phandle_args parent;
+ unsigned long hartid;
int rc;
- /*
- * Currently, only OF fwnode is supported so extend this
- * function for ACPI support.
- */
- if (!is_of_node(dev->fwnode))
- return -EINVAL;
+ if (!is_of_node(dev->fwnode)) {
+ hartid = acpi_rintc_ext_parent_to_hartid(priv->acpi_aplic_id, index);
+ if (hartid == INVALID_HARTID)
+ return -ENODEV;
+
+ *parent_hartid = hartid;
+ *parent_hwirq = RV_IRQ_EXT;
+ return 0;
+ }
rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent);
if (rc)
@@ -237,7 +243,7 @@ int aplic_direct_setup(struct device *dev, void __iomem *regs)
/* Setup per-CPU IDC and target CPU mask */
current_cpu = get_cpu();
for (i = 0; i < priv->nr_idcs; i++) {
- rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid);
+ rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid, priv);
if (rc) {
dev_warn(dev, "parent irq for IDC%d not found\n", i);
continue;
diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c
index 981fad6fb8f7..900e72541db9 100644
--- a/drivers/irqchip/irq-riscv-aplic-main.c
+++ b/drivers/irqchip/irq-riscv-aplic-main.c
@@ -4,8 +4,10 @@
* Copyright (C) 2022 Ventana Micro Systems Inc.
*/
+#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/irqchip/riscv-aplic.h>
+#include <linux/irqchip/riscv-imsic.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
@@ -125,39 +127,50 @@ static void aplic_init_hw_irqs(struct aplic_priv *priv)
writel(0, priv->regs + APLIC_DOMAINCFG);
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id aplic_acpi_match[] = {
+ { "RSCV0002", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, aplic_acpi_match);
+
+#endif
+
int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs)
{
struct device_node *np = to_of_node(dev->fwnode);
struct of_phandle_args parent;
int rc;
- /*
- * Currently, only OF fwnode is supported so extend this
- * function for ACPI support.
- */
- if (!np)
- return -EINVAL;
-
/* Save device pointer and register base */
priv->dev = dev;
priv->regs = regs;
- /* Find out number of interrupt sources */
- rc = of_property_read_u32(np, "riscv,num-sources", &priv->nr_irqs);
- if (rc) {
- dev_err(dev, "failed to get number of interrupt sources\n");
- return rc;
- }
-
- /*
- * Find out number of IDCs based on parent interrupts
- *
- * If "msi-parent" property is present then we ignore the
- * APLIC IDCs which forces the APLIC driver to use MSI mode.
- */
- if (!of_property_present(np, "msi-parent")) {
- while (!of_irq_parse_one(np, priv->nr_idcs, &parent))
- priv->nr_idcs++;
+ if (np) {
+ /* Find out number of interrupt sources */
+ rc = of_property_read_u32(np, "riscv,num-sources", &priv->nr_irqs);
+ if (rc) {
+ dev_err(dev, "failed to get number of interrupt sources\n");
+ return rc;
+ }
+
+ /*
+ * Find out number of IDCs based on parent interrupts
+ *
+ * If "msi-parent" property is present then we ignore the
+ * APLIC IDCs which forces the APLIC driver to use MSI mode.
+ */
+ if (!of_property_present(np, "msi-parent")) {
+ while (!of_irq_parse_one(np, priv->nr_idcs, &parent))
+ priv->nr_idcs++;
+ }
+ } else {
+ rc = riscv_acpi_get_gsi_info(dev->fwnode, &priv->gsi_base, &priv->acpi_aplic_id,
+ &priv->nr_irqs, &priv->nr_idcs);
+ if (rc) {
+ dev_err(dev, "failed to find GSI mapping\n");
+ return rc;
+ }
}
/* Setup initial state APLIC interrupts */
@@ -184,7 +197,11 @@ static int aplic_probe(struct platform_device *pdev)
* If msi-parent property is present then setup APLIC MSI
* mode otherwise setup APLIC direct mode.
*/
- msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent");
+ if (is_of_node(dev->fwnode))
+ msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent");
+ else
+ msi_mode = imsic_acpi_get_fwnode(NULL) ? 1 : 0;
+
if (msi_mode)
rc = aplic_msi_setup(dev, regs);
else
@@ -192,6 +209,11 @@ static int aplic_probe(struct platform_device *pdev)
if (rc)
dev_err(dev, "failed to setup APLIC in %s mode\n", msi_mode ? "MSI" : "direct");
+#ifdef CONFIG_ACPI
+ if (!acpi_disabled)
+ acpi_dev_clear_dependencies(ACPI_COMPANION(dev));
+#endif
+
return rc;
}
@@ -204,6 +226,7 @@ static struct platform_driver aplic_driver = {
.driver = {
.name = "riscv-aplic",
.of_match_table = aplic_match,
+ .acpi_match_table = ACPI_PTR(aplic_acpi_match),
},
.probe = aplic_probe,
};
diff --git a/drivers/irqchip/irq-riscv-aplic-main.h b/drivers/irqchip/irq-riscv-aplic-main.h
index 4393927d8c80..b0ad8cde69b1 100644
--- a/drivers/irqchip/irq-riscv-aplic-main.h
+++ b/drivers/irqchip/irq-riscv-aplic-main.h
@@ -28,6 +28,7 @@ struct aplic_priv {
u32 gsi_base;
u32 nr_irqs;
u32 nr_idcs;
+ u32 acpi_aplic_id;
void __iomem *regs;
struct aplic_msicfg msicfg;
};
diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c
index d7773f76e5d0..945bff28265c 100644
--- a/drivers/irqchip/irq-riscv-aplic-msi.c
+++ b/drivers/irqchip/irq-riscv-aplic-msi.c
@@ -175,6 +175,7 @@ static const struct msi_domain_template aplic_msi_template = {
int aplic_msi_setup(struct device *dev, void __iomem *regs)
{
const struct imsic_global_config *imsic_global;
+ struct irq_domain *msi_domain;
struct aplic_priv *priv;
struct aplic_msicfg *mc;
phys_addr_t pa;
@@ -257,8 +258,14 @@ int aplic_msi_setup(struct device *dev, void __iomem *regs)
* IMSIC and the IMSIC MSI domains are created later through
* the platform driver probing so we set it explicitly here.
*/
- if (is_of_node(dev->fwnode))
+ if (is_of_node(dev->fwnode)) {
of_msi_configure(dev, to_of_node(dev->fwnode));
+ } else {
+ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
+ DOMAIN_BUS_PLATFORM_MSI);
+ if (msi_domain)
+ dev_set_msi_domain(dev, msi_domain);
+ }
}
if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, &aplic_msi_template,
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
index 4fbb37074d29..c5c2e6929a2f 100644
--- a/drivers/irqchip/irq-riscv-imsic-early.c
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -5,13 +5,16 @@
*/
#define pr_fmt(fmt) "riscv-imsic: " fmt
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/riscv-imsic.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
@@ -182,7 +185,7 @@ static int __init imsic_early_dt_init(struct device_node *node, struct device_no
int rc;
/* Setup IMSIC state */
- rc = imsic_setup_state(fwnode);
+ rc = imsic_setup_state(fwnode, NULL);
if (rc) {
pr_err("%pfwP: failed to setup state (error %d)\n", fwnode, rc);
return rc;
@@ -199,3 +202,62 @@ static int __init imsic_early_dt_init(struct device_node *node, struct device_no
}
IRQCHIP_DECLARE(riscv_imsic, "riscv,imsics", imsic_early_dt_init);
+
+#ifdef CONFIG_ACPI
+
+static struct fwnode_handle *imsic_acpi_fwnode;
+
+struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev)
+{
+ return imsic_acpi_fwnode;
+}
+
+static int __init imsic_early_acpi_init(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)header;
+ int rc;
+
+ imsic_acpi_fwnode = irq_domain_alloc_named_fwnode("imsic");
+ if (!imsic_acpi_fwnode) {
+ pr_err("unable to allocate IMSIC FW node\n");
+ return -ENOMEM;
+ }
+
+ /* Setup IMSIC state */
+ rc = imsic_setup_state(imsic_acpi_fwnode, imsic);
+ if (rc) {
+ pr_err("%pfwP: failed to setup state (error %d)\n", imsic_acpi_fwnode, rc);
+ return rc;
+ }
+
+ /* Do early setup of IMSIC state and IPIs */
+ rc = imsic_early_probe(imsic_acpi_fwnode);
+ if (rc) {
+ irq_domain_free_fwnode(imsic_acpi_fwnode);
+ imsic_acpi_fwnode = NULL;
+ return rc;
+ }
+
+ rc = imsic_platform_acpi_probe(imsic_acpi_fwnode);
+
+#ifdef CONFIG_PCI
+ if (!rc)
+ pci_msi_register_fwnode_provider(&imsic_acpi_get_fwnode);
+#endif
+
+ if (rc)
+ pr_err("%pfwP: failed to register IMSIC for MSI functionality (error %d)\n",
+ imsic_acpi_fwnode, rc);
+
+ /*
+ * Even if imsic_platform_acpi_probe() fails, the IPI part of IMSIC can
+ * continue to work. So, no need to return failure. This is similar to
+ * DT where IPI works but MSI probe fails for some reason.
+ */
+ return 0;
+}
+
+IRQCHIP_ACPI_DECLARE(riscv_imsic, ACPI_MADT_TYPE_IMSIC, NULL,
+ 1, imsic_early_acpi_init);
+#endif
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index 11723a763c10..64905e6f52d7 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -5,6 +5,7 @@
*/
#define pr_fmt(fmt) "riscv-imsic: " fmt
+#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
@@ -348,18 +349,37 @@ int imsic_irqdomain_init(void)
return 0;
}
-static int imsic_platform_probe(struct platform_device *pdev)
+static int imsic_platform_probe_common(struct fwnode_handle *fwnode)
{
- struct device *dev = &pdev->dev;
-
- if (imsic && imsic->fwnode != dev->fwnode) {
- dev_err(dev, "fwnode mismatch\n");
+ if (imsic && imsic->fwnode != fwnode) {
+ pr_err("%pfwP: fwnode mismatch\n", fwnode);
return -ENODEV;
}
return imsic_irqdomain_init();
}
+static int imsic_platform_dt_probe(struct platform_device *pdev)
+{
+ return imsic_platform_probe_common(pdev->dev.fwnode);
+}
+
+#ifdef CONFIG_ACPI
+
+/*
+ * On ACPI based systems, PCI enumeration happens early during boot in
+ * acpi_scan_init(). PCI enumeration expects MSI domain setup before
+ * it calls pci_set_msi_domain(). Hence, unlike in DT where
+ * imsic-platform drive probe happens late during boot, ACPI based
+ * systems need to setup the MSI domain early.
+ */
+int imsic_platform_acpi_probe(struct fwnode_handle *fwnode)
+{
+ return imsic_platform_probe_common(fwnode);
+}
+
+#endif
+
static const struct of_device_id imsic_platform_match[] = {
{ .compatible = "riscv,imsics" },
{}
@@ -370,6 +390,6 @@ static struct platform_driver imsic_platform_driver = {
.name = "riscv-imsic",
.of_match_table = imsic_platform_match,
},
- .probe = imsic_platform_probe,
+ .probe = imsic_platform_dt_probe,
};
builtin_platform_driver(imsic_platform_driver);
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
index 5479f872e62b..b97e6cd89ed7 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.c
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -5,6 +5,7 @@
*/
#define pr_fmt(fmt) "riscv-imsic: " fmt
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/bitmap.h>
#include <linux/interrupt.h>
@@ -510,18 +511,90 @@ static int __init imsic_matrix_init(void)
return 0;
}
+static int __init imsic_populate_global_dt(struct fwnode_handle *fwnode,
+ struct imsic_global_config *global,
+ u32 *nr_parent_irqs)
+{
+ int rc;
+
+ /* Find number of guest index bits in MSI address */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits",
+ &global->guest_index_bits);
+ if (rc)
+ global->guest_index_bits = 0;
+
+ /* Find number of HART index bits */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits",
+ &global->hart_index_bits);
+ if (rc) {
+ /* Assume default value */
+ global->hart_index_bits = __fls(*nr_parent_irqs);
+ if (BIT(global->hart_index_bits) < *nr_parent_irqs)
+ global->hart_index_bits++;
+ }
+
+ /* Find number of group index bits */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits",
+ &global->group_index_bits);
+ if (rc)
+ global->group_index_bits = 0;
+
+ /*
+ * Find first bit position of group index.
+ * If not specified assumed the default APLIC-IMSIC configuration.
+ */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift",
+ &global->group_index_shift);
+ if (rc)
+ global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2;
+
+ /* Find number of interrupt identities */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids",
+ &global->nr_ids);
+ if (rc) {
+ pr_err("%pfwP: number of interrupt identities not found\n", fwnode);
+ return rc;
+ }
+
+ /* Find number of guest interrupt identities */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids",
+ &global->nr_guest_ids);
+ if (rc)
+ global->nr_guest_ids = global->nr_ids;
+
+ return 0;
+}
+
+static int __init imsic_populate_global_acpi(struct fwnode_handle *fwnode,
+ struct imsic_global_config *global,
+ u32 *nr_parent_irqs, void *opaque)
+{
+ struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)opaque;
+
+ global->guest_index_bits = imsic->guest_index_bits;
+ global->hart_index_bits = imsic->hart_index_bits;
+ global->group_index_bits = imsic->group_index_bits;
+ global->group_index_shift = imsic->group_index_shift;
+ global->nr_ids = imsic->num_ids;
+ global->nr_guest_ids = imsic->num_guest_ids;
+ return 0;
+}
+
static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode,
u32 index, unsigned long *hartid)
{
struct of_phandle_args parent;
int rc;
- /*
- * Currently, only OF fwnode is supported so extend this
- * function for ACPI support.
- */
- if (!is_of_node(fwnode))
- return -EINVAL;
+ if (!is_of_node(fwnode)) {
+ if (hartid)
+ *hartid = acpi_rintc_index_to_hartid(index);
+
+ if (!hartid || (*hartid == INVALID_HARTID))
+ return -EINVAL;
+
+ return 0;
+ }
rc = of_irq_parse_one(to_of_node(fwnode), index, &parent);
if (rc)
@@ -540,12 +613,8 @@ static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode,
static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode,
u32 index, struct resource *res)
{
- /*
- * Currently, only OF fwnode is supported so extend this
- * function for ACPI support.
- */
if (!is_of_node(fwnode))
- return -EINVAL;
+ return acpi_rintc_get_imsic_mmio_info(index, res);
return of_address_to_resource(to_of_node(fwnode), index, res);
}
@@ -553,20 +622,14 @@ static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode,
static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode,
struct imsic_global_config *global,
u32 *nr_parent_irqs,
- u32 *nr_mmios)
+ u32 *nr_mmios,
+ void *opaque)
{
unsigned long hartid;
struct resource res;
int rc;
u32 i;
- /*
- * Currently, only OF fwnode is supported so extend this
- * function for ACPI support.
- */
- if (!is_of_node(fwnode))
- return -EINVAL;
-
*nr_parent_irqs = 0;
*nr_mmios = 0;
@@ -578,50 +641,13 @@ static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode,
return -EINVAL;
}
- /* Find number of guest index bits in MSI address */
- rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits",
- &global->guest_index_bits);
- if (rc)
- global->guest_index_bits = 0;
-
- /* Find number of HART index bits */
- rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits",
- &global->hart_index_bits);
- if (rc) {
- /* Assume default value */
- global->hart_index_bits = __fls(*nr_parent_irqs);
- if (BIT(global->hart_index_bits) < *nr_parent_irqs)
- global->hart_index_bits++;
- }
-
- /* Find number of group index bits */
- rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits",
- &global->group_index_bits);
- if (rc)
- global->group_index_bits = 0;
+ if (is_of_node(fwnode))
+ rc = imsic_populate_global_dt(fwnode, global, nr_parent_irqs);
+ else
+ rc = imsic_populate_global_acpi(fwnode, global, nr_parent_irqs, opaque);
- /*
- * Find first bit position of group index.
- * If not specified assumed the default APLIC-IMSIC configuration.
- */
- rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift",
- &global->group_index_shift);
if (rc)
- global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2;
-
- /* Find number of interrupt identities */
- rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids",
- &global->nr_ids);
- if (rc) {
- pr_err("%pfwP: number of interrupt identities not found\n", fwnode);
return rc;
- }
-
- /* Find number of guest interrupt identities */
- rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids",
- &global->nr_guest_ids);
- if (rc)
- global->nr_guest_ids = global->nr_ids;
/* Sanity check guest index bits */
i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT;
@@ -688,7 +714,7 @@ static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode,
return 0;
}
-int __init imsic_setup_state(struct fwnode_handle *fwnode)
+int __init imsic_setup_state(struct fwnode_handle *fwnode, void *opaque)
{
u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0;
struct imsic_global_config *global;
@@ -729,7 +755,7 @@ int __init imsic_setup_state(struct fwnode_handle *fwnode)
}
/* Parse IMSIC fwnode */
- rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios);
+ rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios, opaque);
if (rc)
goto out_free_local;
diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h
index 5ae2f69b035b..391e44280827 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.h
+++ b/drivers/irqchip/irq-riscv-imsic-state.h
@@ -102,7 +102,7 @@ void imsic_vector_debug_show_summary(struct seq_file *m, int ind);
void imsic_state_online(void);
void imsic_state_offline(void);
-int imsic_setup_state(struct fwnode_handle *fwnode);
+int imsic_setup_state(struct fwnode_handle *fwnode, void *opaque);
int imsic_irqdomain_init(void);
#endif
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index 47f3200476da..8c5411386220 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -250,6 +250,85 @@ IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
#ifdef CONFIG_ACPI
+struct rintc_data {
+ union {
+ u32 ext_intc_id;
+ struct {
+ u32 context_id : 16,
+ reserved : 8,
+ aplic_plic_id : 8;
+ };
+ };
+ unsigned long hart_id;
+ u64 imsic_addr;
+ u32 imsic_size;
+};
+
+static u32 nr_rintc;
+static struct rintc_data *rintc_acpi_data[NR_CPUS];
+
+#define for_each_matching_plic(_plic_id) \
+ unsigned int _plic; \
+ \
+ for (_plic = 0; _plic < nr_rintc; _plic++) \
+ if (rintc_acpi_data[_plic]->aplic_plic_id != _plic_id) \
+ continue; \
+ else
+
+unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id)
+{
+ unsigned int nctx = 0;
+
+ for_each_matching_plic(plic_id)
+ nctx++;
+
+ return nctx;
+}
+
+static struct rintc_data *get_plic_context(unsigned int plic_id, unsigned int ctxt_idx)
+{
+ unsigned int ctxt = 0;
+
+ for_each_matching_plic(plic_id) {
+ if (ctxt == ctxt_idx)
+ return rintc_acpi_data[_plic];
+
+ ctxt++;
+ }
+
+ return NULL;
+}
+
+unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int ctxt_idx)
+{
+ struct rintc_data *data = get_plic_context(plic_id, ctxt_idx);
+
+ return data ? data->hart_id : INVALID_HARTID;
+}
+
+unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx)
+{
+ struct rintc_data *data = get_plic_context(plic_id, ctxt_idx);
+
+ return data ? data->context_id : INVALID_CONTEXT;
+}
+
+unsigned long acpi_rintc_index_to_hartid(u32 index)
+{
+ return index >= nr_rintc ? INVALID_HARTID : rintc_acpi_data[index]->hart_id;
+}
+
+int acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res)
+{
+ if (index >= nr_rintc)
+ return -1;
+
+ res->start = rintc_acpi_data[index]->imsic_addr;
+ res->end = res->start + rintc_acpi_data[index]->imsic_size - 1;
+ res->flags = IORESOURCE_MEM;
+ return 0;
+}
+
static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
const unsigned long end)
{
@@ -258,6 +337,15 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
int rc;
rintc = (struct acpi_madt_rintc *)header;
+ rintc_acpi_data[nr_rintc] = kzalloc(sizeof(*rintc_acpi_data[0]), GFP_KERNEL);
+ if (!rintc_acpi_data[nr_rintc])
+ return -ENOMEM;
+
+ rintc_acpi_data[nr_rintc]->ext_intc_id = rintc->ext_intc_id;
+ rintc_acpi_data[nr_rintc]->hart_id = rintc->hart_id;
+ rintc_acpi_data[nr_rintc]->imsic_addr = rintc->imsic_addr;
+ rintc_acpi_data[nr_rintc]->imsic_size = rintc->imsic_size;
+ nr_rintc++;
/*
* The ACPI MADT will have one INTC for each CPU (or HART)
@@ -277,6 +365,8 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
rc = riscv_intc_init_common(fn, &riscv_intc_chip);
if (rc)
irq_domain_free_fwnode(fn);
+ else
+ acpi_set_irq_model(ACPI_IRQ_MODEL_RINTC, riscv_acpi_get_gsi_domain_id);
return rc;
}
diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c
index 31c202a1ae62..9d0b80271949 100644
--- a/drivers/irqchip/irq-sa11x0.c
+++ b/drivers/irqchip/irq-sa11x0.c
@@ -127,8 +127,7 @@ static int __init sa1100irq_init_devicefs(void)
device_initcall(sa1100irq_init_devicefs);
-static asmlinkage void __exception_irq_entry
-sa1100_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry sa1100_handle_irq(struct pt_regs *regs)
{
uint32_t icip, icmr, mask;
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 4d9ea718086d..2f6ef5c495bd 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -4,6 +4,7 @@
* Copyright (C) 2018 Christoph Hellwig
*/
#define pr_fmt(fmt) "riscv-plic: " fmt
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -71,6 +72,8 @@ struct plic_priv {
unsigned long plic_quirks;
unsigned int nr_irqs;
unsigned long *prio_save;
+ u32 gsi_base;
+ int acpi_plic_id;
};
struct plic_handler {
@@ -325,6 +328,10 @@ static int plic_irq_domain_translate(struct irq_domain *d,
{
struct plic_priv *priv = d->host_data;
+ /* For DT, gsi_base is always zero. */
+ if (fwspec->param[0] >= priv->gsi_base)
+ fwspec->param[0] = fwspec->param[0] - priv->gsi_base;
+
if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks))
return irq_domain_translate_twocell(d, fwspec, hwirq, type);
@@ -426,17 +433,36 @@ static const struct of_device_id plic_match[] = {
{}
};
+#ifdef CONFIG_ACPI
+
+static const struct acpi_device_id plic_acpi_match[] = {
+ { "RSCV0001", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, plic_acpi_match);
+
+#endif
static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode,
- u32 *nr_irqs, u32 *nr_contexts)
+ u32 *nr_irqs, u32 *nr_contexts,
+ u32 *gsi_base, u32 *id)
{
int rc;
- /*
- * Currently, only OF fwnode is supported so extend this
- * function for ACPI support.
- */
- if (!is_of_node(fwnode))
- return -EINVAL;
+ if (!is_of_node(fwnode)) {
+ rc = riscv_acpi_get_gsi_info(fwnode, gsi_base, id, nr_irqs, NULL);
+ if (rc) {
+ pr_err("%pfwP: failed to find GSI mapping\n", fwnode);
+ return rc;
+ }
+
+ *nr_contexts = acpi_rintc_get_plic_nr_contexts(*id);
+ if (WARN_ON(!*nr_contexts)) {
+ pr_err("%pfwP: no PLIC context available\n", fwnode);
+ return -EINVAL;
+ }
+
+ return 0;
+ }
rc = of_property_read_u32(to_of_node(fwnode), "riscv,ndev", nr_irqs);
if (rc) {
@@ -450,22 +476,28 @@ static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode,
return -EINVAL;
}
+ *gsi_base = 0;
+ *id = 0;
+
return 0;
}
static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context,
- u32 *parent_hwirq, int *parent_cpu)
+ u32 *parent_hwirq, int *parent_cpu, u32 id)
{
struct of_phandle_args parent;
unsigned long hartid;
int rc;
- /*
- * Currently, only OF fwnode is supported so extend this
- * function for ACPI support.
- */
- if (!is_of_node(fwnode))
- return -EINVAL;
+ if (!is_of_node(fwnode)) {
+ hartid = acpi_rintc_ext_parent_to_hartid(id, context);
+ if (hartid == INVALID_HARTID)
+ return -EINVAL;
+
+ *parent_cpu = riscv_hartid_to_cpuid(hartid);
+ *parent_hwirq = RV_IRQ_EXT;
+ return 0;
+ }
rc = of_irq_parse_one(to_of_node(fwnode), context, &parent);
if (rc)
@@ -489,6 +521,8 @@ static int plic_probe(struct fwnode_handle *fwnode)
struct plic_priv *priv;
irq_hw_number_t hwirq;
void __iomem *regs;
+ int id, context_id;
+ u32 gsi_base;
if (is_of_node(fwnode)) {
const struct of_device_id *id;
@@ -501,10 +535,12 @@ static int plic_probe(struct fwnode_handle *fwnode)
if (!regs)
return -ENOMEM;
} else {
- return -ENODEV;
+ regs = devm_platform_ioremap_resource(to_platform_device(fwnode->dev), 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
}
- error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts);
+ error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts, &gsi_base, &id);
if (error)
goto fail_free_regs;
@@ -518,6 +554,8 @@ static int plic_probe(struct fwnode_handle *fwnode)
priv->plic_quirks = plic_quirks;
priv->nr_irqs = nr_irqs;
priv->regs = regs;
+ priv->gsi_base = gsi_base;
+ priv->acpi_plic_id = id;
priv->prio_save = bitmap_zalloc(nr_irqs, GFP_KERNEL);
if (!priv->prio_save) {
@@ -526,12 +564,23 @@ static int plic_probe(struct fwnode_handle *fwnode)
}
for (i = 0; i < nr_contexts; i++) {
- error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu);
+ error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu,
+ priv->acpi_plic_id);
if (error) {
pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i);
continue;
}
+ if (is_of_node(fwnode)) {
+ context_id = i;
+ } else {
+ context_id = acpi_rintc_get_plic_context(priv->acpi_plic_id, i);
+ if (context_id == INVALID_CONTEXT) {
+ pr_warn("%pfwP: invalid context id for context%d\n", fwnode, i);
+ continue;
+ }
+ }
+
/*
* Skip contexts other than external interrupts for our
* privilege level.
@@ -569,10 +618,10 @@ static int plic_probe(struct fwnode_handle *fwnode)
cpumask_set_cpu(cpu, &priv->lmask);
handler->present = true;
handler->hart_base = priv->regs + CONTEXT_BASE +
- i * CONTEXT_SIZE;
+ context_id * CONTEXT_SIZE;
raw_spin_lock_init(&handler->enable_lock);
handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE +
- i * CONTEXT_ENABLE_SIZE;
+ context_id * CONTEXT_ENABLE_SIZE;
handler->priv = priv;
handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32),
@@ -588,8 +637,8 @@ done:
nr_handlers++;
}
- priv->irqdomain = irq_domain_add_linear(to_of_node(fwnode), nr_irqs + 1,
- &plic_irqdomain_ops, priv);
+ priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1,
+ &plic_irqdomain_ops, priv);
if (WARN_ON(!priv->irqdomain))
goto fail_cleanup_contexts;
@@ -626,13 +675,18 @@ done:
}
}
+#ifdef CONFIG_ACPI
+ if (!acpi_disabled)
+ acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode->dev));
+#endif
+
pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n",
fwnode, nr_irqs, nr_handlers, nr_contexts);
return 0;
fail_cleanup_contexts:
for (i = 0; i < nr_contexts; i++) {
- if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu))
+ if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, priv->acpi_plic_id))
continue;
if (parent_hwirq != RV_IRQ_EXT || cpu < 0)
continue;
@@ -663,6 +717,7 @@ static struct platform_driver plic_driver = {
.name = "riscv-plic",
.of_match_table = plic_match,
.suppress_bind_attrs = true,
+ .acpi_match_table = ACPI_PTR(plic_acpi_match),
},
.probe = plic_platform_probe,
};
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 5018a06060e6..ca471c6fee99 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -128,7 +128,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
* Keep iterating over all registered FPGA IRQ controllers until there are
* no pending interrupts.
*/
-static asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
{
int i, handled;
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 61cb45c5d0d8..53fad9487574 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -82,7 +82,7 @@
* - has multiple clocks.
* - has no usable clock due to jitter or packet loss (VoIP).
* In this case the system's clock is used. The clock resolution depends on
- * the jiffie resolution.
+ * the jiffy resolution.
*
* If a member joins a conference:
*
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 8d9d8da376e4..b784bb74a837 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -825,6 +825,14 @@ config LEDS_BLINKM
This option enables support for the BlinkM RGB LED connected
through I2C. Say Y to enable support for the BlinkM LED.
+config LEDS_BLINKM_MULTICOLOR
+ bool "Enable multicolor support for BlinkM I2C RGB LED"
+ depends on LEDS_BLINKM
+ depends on LEDS_CLASS_MULTICOLOR=y || LEDS_CLASS_MULTICOLOR=LEDS_BLINKM
+ help
+ This option enables multicolor sysfs class support for BlinkM LED and
+ disables the older, separated sysfs interface
+
config LEDS_POWERNV
tristate "LED support for PowerNV Platform"
depends on LEDS_CLASS
diff --git a/drivers/leds/flash/leds-aat1290.c b/drivers/leds/flash/leds-aat1290.c
index e8f9dd293592..c7b6a1f01288 100644
--- a/drivers/leds/flash/leds-aat1290.c
+++ b/drivers/leds/flash/leds-aat1290.c
@@ -7,6 +7,7 @@
* Author: Jacek Anaszewski <j.anaszewski@samsung.com>
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/led-class-flash.h>
@@ -215,7 +216,6 @@ static int aat1290_led_parse_dt(struct aat1290_led *led,
struct device_node **sub_node)
{
struct device *dev = &led->pdev->dev;
- struct device_node *child_node;
#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
struct pinctrl *pinctrl;
#endif
@@ -246,7 +246,8 @@ static int aat1290_led_parse_dt(struct aat1290_led *led,
}
#endif
- child_node = of_get_next_available_child(dev_of_node(dev), NULL);
+ struct device_node *child_node __free(device_node) =
+ of_get_next_available_child(dev_of_node(dev), NULL);
if (!child_node) {
dev_err(dev, "No DT child node found for connected LED.\n");
return -EINVAL;
@@ -267,7 +268,7 @@ static int aat1290_led_parse_dt(struct aat1290_led *led,
if (ret < 0) {
dev_err(dev,
"flash-max-microamp DT property missing\n");
- goto err_parse_dt;
+ return ret;
}
ret = of_property_read_u32(child_node, "flash-max-timeout-us",
@@ -275,15 +276,12 @@ static int aat1290_led_parse_dt(struct aat1290_led *led,
if (ret < 0) {
dev_err(dev,
"flash-max-timeout-us DT property missing\n");
- goto err_parse_dt;
+ return ret;
}
*sub_node = child_node;
-err_parse_dt:
- of_node_put(child_node);
-
- return ret;
+ return 0;
}
static void aat1290_led_validate_mm_current(struct aat1290_led *led,
diff --git a/drivers/leds/flash/leds-as3645a.c b/drivers/leds/flash/leds-as3645a.c
index 2c6ef321b7c8..2f2d783c62c3 100644
--- a/drivers/leds/flash/leds-as3645a.c
+++ b/drivers/leds/flash/leds-as3645a.c
@@ -478,14 +478,12 @@ static int as3645a_detect(struct as3645a *flash)
return as3645a_write(flash, AS_BOOST_REG, AS_BOOST_CURRENT_DISABLE);
}
-static int as3645a_parse_node(struct as3645a *flash,
- struct fwnode_handle *fwnode)
+static int as3645a_parse_node(struct device *dev, struct as3645a *flash)
{
struct as3645a_config *cfg = &flash->cfg;
- struct fwnode_handle *child;
int rval;
- fwnode_for_each_child_node(fwnode, child) {
+ device_for_each_child_node_scoped(dev, child) {
u32 id = 0;
fwnode_property_read_u32(child, "reg", &id);
@@ -686,7 +684,7 @@ static int as3645a_probe(struct i2c_client *client)
flash->client = client;
- rval = as3645a_parse_node(flash, dev_fwnode(&client->dev));
+ rval = as3645a_parse_node(&client->dev, flash);
if (rval < 0)
return rval;
diff --git a/drivers/leds/flash/leds-ktd2692.c b/drivers/leds/flash/leds-ktd2692.c
index 7bb0aa2753e3..16a01a200c0b 100644
--- a/drivers/leds/flash/leds-ktd2692.c
+++ b/drivers/leds/flash/leds-ktd2692.c
@@ -6,6 +6,7 @@
* Ingi Kim <ingi2.kim@samsung.com>
*/
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/leds-expresswire.h>
@@ -208,7 +209,6 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
struct ktd2692_led_config_data *cfg)
{
struct device_node *np = dev_of_node(dev);
- struct device_node *child_node;
int ret;
if (!np)
@@ -239,7 +239,8 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
}
}
- child_node = of_get_next_available_child(np, NULL);
+ struct device_node *child_node __free(device_node) =
+ of_get_next_available_child(np, NULL);
if (!child_node) {
dev_err(dev, "No DT child node found for connected LED.\n");
return -EINVAL;
@@ -252,26 +253,24 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
&cfg->movie_max_microamp);
if (ret) {
dev_err(dev, "failed to parse led-max-microamp\n");
- goto err_parse_dt;
+ return ret;
}
ret = of_property_read_u32(child_node, "flash-max-microamp",
&cfg->flash_max_microamp);
if (ret) {
dev_err(dev, "failed to parse flash-max-microamp\n");
- goto err_parse_dt;
+ return ret;
}
ret = of_property_read_u32(child_node, "flash-max-timeout-us",
&cfg->flash_max_timeout);
if (ret) {
dev_err(dev, "failed to parse flash-max-timeout-us\n");
- goto err_parse_dt;
+ return ret;
}
-err_parse_dt:
- of_node_put(child_node);
- return ret;
+ return 0;
}
static const struct led_flash_ops flash_ops = {
diff --git a/drivers/leds/flash/leds-lm3601x.c b/drivers/leds/flash/leds-lm3601x.c
index 7e93c447fec5..abf6b96ade3d 100644
--- a/drivers/leds/flash/leds-lm3601x.c
+++ b/drivers/leds/flash/leds-lm3601x.c
@@ -190,7 +190,7 @@ static int lm3601x_brightness_set(struct led_classdev *cdev,
goto out;
}
- ret = regmap_write(led->regmap, LM3601X_LED_TORCH_REG, brightness);
+ ret = regmap_write(led->regmap, LM3601X_LED_TORCH_REG, brightness - 1);
if (ret < 0)
goto out;
@@ -341,8 +341,9 @@ static int lm3601x_register_leds(struct lm3601x_led *led,
led_cdev = &led->fled_cdev.led_cdev;
led_cdev->brightness_set_blocking = lm3601x_brightness_set;
- led_cdev->max_brightness = DIV_ROUND_UP(led->torch_current_max,
- LM3601X_TORCH_REG_DIV);
+ led_cdev->max_brightness =
+ DIV_ROUND_UP(led->torch_current_max - LM3601X_MIN_TORCH_I_UA + 1,
+ LM3601X_TORCH_REG_DIV);
led_cdev->flags |= LED_DEV_CAP_FLASH;
init_data.fwnode = fwnode;
@@ -386,6 +387,14 @@ static int lm3601x_parse_node(struct lm3601x_led *led,
goto out_err;
}
+ if (led->torch_current_max > LM3601X_MAX_TORCH_I_UA) {
+ dev_warn(&led->client->dev,
+ "Max torch current set too high (%d vs %d)\n",
+ led->torch_current_max,
+ LM3601X_MAX_TORCH_I_UA);
+ led->torch_current_max = LM3601X_MAX_TORCH_I_UA;
+ }
+
ret = fwnode_property_read_u32(child, "flash-max-microamp",
&led->flash_current_max);
if (ret) {
@@ -434,6 +443,10 @@ static int lm3601x_probe(struct i2c_client *client)
return ret;
}
+ ret = regmap_write(led->regmap, LM3601X_DEV_ID_REG, LM3601X_SW_RESET);
+ if (ret)
+ dev_warn(&client->dev, "Failed to reset the LED controller\n");
+
mutex_init(&led->lock);
return lm3601x_register_leds(led, fwnode);
diff --git a/drivers/leds/flash/leds-max77693.c b/drivers/leds/flash/leds-max77693.c
index 9f016b851193..90d78b3d22f8 100644
--- a/drivers/leds/flash/leds-max77693.c
+++ b/drivers/leds/flash/leds-max77693.c
@@ -599,7 +599,7 @@ static int max77693_led_parse_dt(struct max77693_led_device *led,
{
struct device *dev = &led->pdev->dev;
struct max77693_sub_led *sub_leds = led->sub_leds;
- struct device_node *node = dev_of_node(dev), *child_node;
+ struct device_node *node = dev_of_node(dev);
struct property *prop;
u32 led_sources[2];
int i, ret, fled_id;
@@ -608,7 +608,7 @@ static int max77693_led_parse_dt(struct max77693_led_device *led,
of_property_read_u32(node, "maxim,boost-mvout", &cfg->boost_vout);
of_property_read_u32(node, "maxim,mvsys-min", &cfg->low_vsys);
- for_each_available_child_of_node(node, child_node) {
+ for_each_available_child_of_node_scoped(node, child_node) {
prop = of_find_property(child_node, "led-sources", NULL);
if (prop) {
const __be32 *srcs = NULL;
@@ -622,7 +622,6 @@ static int max77693_led_parse_dt(struct max77693_led_device *led,
} else {
dev_err(dev,
"led-sources DT property missing\n");
- of_node_put(child_node);
return -EINVAL;
}
@@ -638,18 +637,16 @@ static int max77693_led_parse_dt(struct max77693_led_device *led,
} else {
dev_err(dev,
"Wrong led-sources DT property value.\n");
- of_node_put(child_node);
return -EINVAL;
}
if (sub_nodes[fled_id]) {
dev_err(dev,
"Conflicting \"led-sources\" DT properties\n");
- of_node_put(child_node);
return -EINVAL;
}
- sub_nodes[fled_id] = child_node;
+ sub_nodes[fled_id] = of_node_get(child_node);
sub_leds[fled_id].fled_id = fled_id;
cfg->label[fled_id] =
@@ -681,10 +678,8 @@ static int max77693_led_parse_dt(struct max77693_led_device *led,
if (++cfg->num_leds == 2 ||
(max77693_fled_used(led, FLED1) &&
- max77693_fled_used(led, FLED2))) {
- of_node_put(child_node);
+ max77693_fled_used(led, FLED2)))
break;
- }
}
if (cfg->num_leds == 0) {
@@ -968,7 +963,7 @@ static int max77693_led_probe(struct platform_device *pdev)
ret = max77693_setup(led, &led_cfg);
if (ret < 0)
- return ret;
+ goto err_setup;
mutex_init(&led->lock);
@@ -1000,6 +995,8 @@ static int max77693_led_probe(struct platform_device *pdev)
else
goto err_register_led1;
}
+ of_node_put(sub_nodes[i]);
+ sub_nodes[i] = NULL;
}
return 0;
@@ -1013,6 +1010,9 @@ err_register_led2:
err_register_led1:
mutex_destroy(&led->lock);
+err_setup:
+ for (i = FLED1; i <= FLED2; i++)
+ of_node_put(sub_nodes[i]);
return ret;
}
diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c
index bf70bf6fb0d5..41ce034f700e 100644
--- a/drivers/leds/flash/leds-qcom-flash.c
+++ b/drivers/leds/flash/leds-qcom-flash.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/bitfield.h>
@@ -14,6 +14,9 @@
#include <media/v4l2-flash-led-class.h>
/* registers definitions */
+#define FLASH_REVISION_REG 0x00
+#define FLASH_4CH_REVISION_V0P1 0x01
+
#define FLASH_TYPE_REG 0x04
#define FLASH_TYPE_VAL 0x18
@@ -73,6 +76,16 @@
#define UA_PER_MA 1000
+/* thermal threshold constants */
+#define OTST_3CH_MIN_VAL 3
+#define OTST1_4CH_MIN_VAL 0
+#define OTST1_4CH_V0P1_MIN_VAL 3
+#define OTST2_4CH_MIN_VAL 0
+
+#define OTST1_MAX_CURRENT_MA 1000
+#define OTST2_MAX_CURRENT_MA 500
+#define OTST3_MAX_CURRENT_MA 200
+
enum hw_type {
QCOM_MVFLASH_3CH,
QCOM_MVFLASH_4CH,
@@ -98,6 +111,9 @@ enum {
REG_IRESOLUTION,
REG_CHAN_STROBE,
REG_CHAN_EN,
+ REG_THERM_THRSH1,
+ REG_THERM_THRSH2,
+ REG_THERM_THRSH3,
REG_MAX_COUNT,
};
@@ -111,6 +127,9 @@ static struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = {
REG_FIELD(0x47, 0, 5), /* iresolution */
REG_FIELD_ID(0x49, 0, 2, 3, 1), /* chan_strobe */
REG_FIELD(0x4c, 0, 2), /* chan_en */
+ REG_FIELD(0x56, 0, 2), /* therm_thrsh1 */
+ REG_FIELD(0x57, 0, 2), /* therm_thrsh2 */
+ REG_FIELD(0x58, 0, 2), /* therm_thrsh3 */
};
static struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = {
@@ -123,6 +142,8 @@ static struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = {
REG_FIELD(0x49, 0, 3), /* iresolution */
REG_FIELD_ID(0x4a, 0, 6, 4, 1), /* chan_strobe */
REG_FIELD(0x4e, 0, 3), /* chan_en */
+ REG_FIELD(0x7a, 0, 2), /* therm_thrsh1 */
+ REG_FIELD(0x78, 0, 2), /* therm_thrsh2 */
};
struct qcom_flash_data {
@@ -130,9 +151,11 @@ struct qcom_flash_data {
struct regmap_field *r_fields[REG_MAX_COUNT];
struct mutex lock;
enum hw_type hw_type;
+ u32 total_ma;
u8 leds_count;
u8 max_channels;
u8 chan_en_bits;
+ u8 revision;
};
struct qcom_flash_led {
@@ -143,6 +166,7 @@ struct qcom_flash_led {
u32 max_timeout_ms;
u32 flash_current_ma;
u32 flash_timeout_ms;
+ u32 current_in_use_ma;
u8 *chan_id;
u8 chan_count;
bool enabled;
@@ -172,6 +196,127 @@ static int set_flash_module_en(struct qcom_flash_led *led, bool en)
return rc;
}
+static int update_allowed_flash_current(struct qcom_flash_led *led, u32 *current_ma, bool strobe)
+{
+ struct qcom_flash_data *flash_data = led->flash_data;
+ u32 therm_ma, avail_ma, thrsh[3], min_thrsh, sts;
+ int rc = 0;
+
+ mutex_lock(&flash_data->lock);
+ /*
+ * Put previously allocated current into allowed budget in either of these two cases:
+ * 1) LED is disabled;
+ * 2) LED is enabled repeatedly
+ */
+ if (!strobe || led->current_in_use_ma != 0) {
+ if (flash_data->total_ma >= led->current_in_use_ma)
+ flash_data->total_ma -= led->current_in_use_ma;
+ else
+ flash_data->total_ma = 0;
+
+ led->current_in_use_ma = 0;
+ if (!strobe)
+ goto unlock;
+ }
+
+ /*
+ * Cache the default thermal threshold settings, and set them to the lowest levels before
+ * reading over-temp real time status. If over-temp has been triggered at the lowest
+ * threshold, it's very likely that it would be triggered at a higher (default) threshold
+ * when more flash current is requested. Prevent device from triggering over-temp condition
+ * by limiting the flash current for the new request.
+ */
+ rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH1], &thrsh[0]);
+ if (rc < 0)
+ goto unlock;
+
+ rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH2], &thrsh[1]);
+ if (rc < 0)
+ goto unlock;
+
+ if (flash_data->hw_type == QCOM_MVFLASH_3CH) {
+ rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH3], &thrsh[2]);
+ if (rc < 0)
+ goto unlock;
+ }
+
+ min_thrsh = OTST_3CH_MIN_VAL;
+ if (flash_data->hw_type == QCOM_MVFLASH_4CH)
+ min_thrsh = (flash_data->revision == FLASH_4CH_REVISION_V0P1) ?
+ OTST1_4CH_V0P1_MIN_VAL : OTST1_4CH_MIN_VAL;
+
+ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH1], min_thrsh);
+ if (rc < 0)
+ goto unlock;
+
+ if (flash_data->hw_type == QCOM_MVFLASH_4CH)
+ min_thrsh = OTST2_4CH_MIN_VAL;
+
+ /*
+ * The default thermal threshold settings have been updated hence
+ * restore them if any fault happens starting from here.
+ */
+ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH2], min_thrsh);
+ if (rc < 0)
+ goto restore;
+
+ if (flash_data->hw_type == QCOM_MVFLASH_3CH) {
+ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH3], min_thrsh);
+ if (rc < 0)
+ goto restore;
+ }
+
+ /* Read thermal level status to get corresponding derating flash current */
+ rc = regmap_field_read(flash_data->r_fields[REG_STATUS2], &sts);
+ if (rc)
+ goto restore;
+
+ therm_ma = FLASH_TOTAL_CURRENT_MAX_UA / 1000;
+ if (flash_data->hw_type == QCOM_MVFLASH_3CH) {
+ if (sts & FLASH_STS_3CH_OTST3)
+ therm_ma = OTST3_MAX_CURRENT_MA;
+ else if (sts & FLASH_STS_3CH_OTST2)
+ therm_ma = OTST2_MAX_CURRENT_MA;
+ else if (sts & FLASH_STS_3CH_OTST1)
+ therm_ma = OTST1_MAX_CURRENT_MA;
+ } else {
+ if (sts & FLASH_STS_4CH_OTST2)
+ therm_ma = OTST2_MAX_CURRENT_MA;
+ else if (sts & FLASH_STS_4CH_OTST1)
+ therm_ma = OTST1_MAX_CURRENT_MA;
+ }
+
+ /* Calculate the allowed flash current for the request */
+ if (therm_ma <= flash_data->total_ma)
+ avail_ma = 0;
+ else
+ avail_ma = therm_ma - flash_data->total_ma;
+
+ *current_ma = min_t(u32, *current_ma, avail_ma);
+ led->current_in_use_ma = *current_ma;
+ flash_data->total_ma += led->current_in_use_ma;
+
+ dev_dbg(led->flash.led_cdev.dev, "allowed flash current: %dmA, total current: %dmA\n",
+ led->current_in_use_ma, flash_data->total_ma);
+
+restore:
+ /* Restore to default thermal threshold settings */
+ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH1], thrsh[0]);
+ if (rc < 0)
+ goto unlock;
+
+ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH2], thrsh[1]);
+ if (rc < 0)
+ goto unlock;
+
+ if (flash_data->hw_type == QCOM_MVFLASH_3CH)
+ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH3], thrsh[2]);
+
+unlock:
+ mutex_unlock(&flash_data->lock);
+ return rc;
+}
+
static int set_flash_current(struct qcom_flash_led *led, u32 current_ma, enum led_mode mode)
{
struct qcom_flash_data *flash_data = led->flash_data;
@@ -313,6 +458,10 @@ static int qcom_flash_strobe_set(struct led_classdev_flash *fled_cdev, bool stat
if (rc)
return rc;
+ rc = update_allowed_flash_current(led, &led->flash_current_ma, state);
+ if (rc < 0)
+ return rc;
+
rc = set_flash_current(led, led->flash_current_ma, FLASH_MODE);
if (rc)
return rc;
@@ -429,6 +578,10 @@ static int qcom_flash_led_brightness_set(struct led_classdev *led_cdev,
if (rc)
return rc;
+ rc = update_allowed_flash_current(led, &current_ma, enable);
+ if (rc < 0)
+ return rc;
+
rc = set_flash_current(led, current_ma, TORCH_MODE);
if (rc)
return rc;
@@ -707,6 +860,14 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
flash_data->hw_type = QCOM_MVFLASH_4CH;
flash_data->max_channels = 4;
regs = mvflash_4ch_regs;
+
+ rc = regmap_read(regmap, reg_base + FLASH_REVISION_REG, &val);
+ if (rc < 0) {
+ dev_err(dev, "Failed to read flash LED module revision, rc=%d\n", rc);
+ return rc;
+ }
+
+ flash_data->revision = val;
} else {
dev_err(dev, "flash LED subtype %#x is not yet supported\n", val);
return -ENODEV;
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 033ab5fed38a..81238376484b 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -115,7 +115,7 @@ static int pm860x_led_set(struct led_classdev *cdev,
static int pm860x_led_dt_init(struct platform_device *pdev,
struct pm860x_led *data)
{
- struct device_node *nproot, *np;
+ struct device_node *nproot;
int iset = 0;
if (!dev_of_node(pdev->dev.parent))
@@ -125,12 +125,11 @@ static int pm860x_led_dt_init(struct platform_device *pdev,
dev_err(&pdev->dev, "failed to find leds node\n");
return -ENODEV;
}
- for_each_available_child_of_node(nproot, np) {
+ for_each_available_child_of_node_scoped(nproot, np) {
if (of_node_name_eq(np, data->name)) {
of_property_read_u32(np, "marvell,88pm860x-iset",
&iset);
data->iset = PM8606_LED_CURRENT(iset);
- of_node_put(np);
break;
}
}
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
index 6475eadcb0df..216755d6010f 100644
--- a/drivers/leds/leds-aw2013.c
+++ b/drivers/leds/leds-aw2013.c
@@ -263,7 +263,7 @@ out:
static int aw2013_probe_dt(struct aw2013 *chip)
{
- struct device_node *np = dev_of_node(&chip->client->dev), *child;
+ struct device_node *np = dev_of_node(&chip->client->dev);
int count, ret = 0, i = 0;
struct aw2013_led *led;
@@ -273,7 +273,7 @@ static int aw2013_probe_dt(struct aw2013 *chip)
regmap_write(chip->regmap, AW2013_RSTR, AW2013_RSTR_RESET);
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
struct led_init_data init_data = {};
u32 source;
u32 imax;
@@ -304,10 +304,8 @@ static int aw2013_probe_dt(struct aw2013 *chip)
ret = devm_led_classdev_register_ext(&chip->client->dev,
&led->cdev, &init_data);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
return ret;
- }
i++;
}
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 246f1296ab09..29f5bad61796 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -392,7 +392,6 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev_of_node(&pdev->dev);
- struct device_node *child;
void __iomem *mem;
spinlock_t *lock; /* memory lock */
unsigned long val, *blink_leds, *blink_delay;
@@ -435,7 +434,7 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
val |= BCM6328_SERIAL_LED_SHIFT_DIR;
bcm6328_led_write(mem + BCM6328_REG_INIT, val);
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
int rc;
u32 reg;
@@ -454,10 +453,8 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
rc = bcm6328_led(dev, child, reg, mem, lock,
blink_leds, blink_delay);
- if (rc < 0) {
- of_node_put(child);
+ if (rc < 0)
return rc;
- }
}
return 0;
diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c
index 86e51d44a5a7..51fcff2a64fd 100644
--- a/drivers/leds/leds-bcm6358.c
+++ b/drivers/leds/leds-bcm6358.c
@@ -147,7 +147,6 @@ static int bcm6358_leds_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev_of_node(&pdev->dev);
- struct device_node *child;
void __iomem *mem;
spinlock_t *lock; /* memory lock */
unsigned long val;
@@ -184,7 +183,7 @@ static int bcm6358_leds_probe(struct platform_device *pdev)
}
bcm6358_led_write(mem + BCM6358_REG_CTRL, val);
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
int rc;
u32 reg;
@@ -198,10 +197,8 @@ static int bcm6358_leds_probe(struct platform_device *pdev)
}
rc = bcm6358_led(dev, child, reg, mem, lock);
- if (rc < 0) {
- of_node_put(child);
+ if (rc < 0)
return rc;
- }
}
return 0;
diff --git a/drivers/leds/leds-bd2606mvv.c b/drivers/leds/leds-bd2606mvv.c
index 3fda712d2f80..c1181a35d0f7 100644
--- a/drivers/leds/leds-bd2606mvv.c
+++ b/drivers/leds/leds-bd2606mvv.c
@@ -69,16 +69,14 @@ static const struct regmap_config bd2606mvv_regmap = {
static int bd2606mvv_probe(struct i2c_client *client)
{
- struct fwnode_handle *np, *child;
struct device *dev = &client->dev;
struct bd2606mvv_priv *priv;
struct fwnode_handle *led_fwnodes[BD2606_MAX_LEDS] = { 0 };
int active_pairs[BD2606_MAX_LEDS / 2] = { 0 };
int err, reg;
- int i;
+ int i, j;
- np = dev_fwnode(dev);
- if (!np)
+ if (!dev_fwnode(dev))
return -ENODEV;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -94,20 +92,18 @@ static int bd2606mvv_probe(struct i2c_client *client)
i2c_set_clientdata(client, priv);
- fwnode_for_each_available_child_node(np, child) {
+ device_for_each_child_node_scoped(dev, child) {
struct bd2606mvv_led *led;
err = fwnode_property_read_u32(child, "reg", &reg);
- if (err) {
- fwnode_handle_put(child);
+ if (err)
return err;
- }
- if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg]) {
- fwnode_handle_put(child);
+
+ if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg])
return -EINVAL;
- }
+
led = &priv->leds[reg];
- led_fwnodes[reg] = child;
+ led_fwnodes[reg] = fwnode_handle_get(child);
active_pairs[reg / 2]++;
led->priv = priv;
led->led_no = reg;
@@ -130,7 +126,8 @@ static int bd2606mvv_probe(struct i2c_client *client)
&priv->leds[i].ldev,
&init_data);
if (err < 0) {
- fwnode_handle_put(child);
+ for (j = i; j < BD2606_MAX_LEDS; j++)
+ fwnode_handle_put(led_fwnodes[j]);
return dev_err_probe(dev, err,
"couldn't register LED %s\n",
priv->leds[i].ldev.name);
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index e40b87aead2d..577497b9d426 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -2,6 +2,7 @@
/*
* leds-blinkm.c
* (c) Jan-Simon Möller (dl9pf@gmx.de)
+ * (c) Joseph Strauss (jstrauss@mailbox.org)
*/
#include <linux/module.h>
@@ -15,6 +16,10 @@
#include <linux/pm_runtime.h>
#include <linux/leds.h>
#include <linux/delay.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/kconfig.h>
+
+#define NUM_LEDS 3
/* Addresses to scan - BlinkM is on 0x09 by default*/
static const unsigned short normal_i2c[] = { 0x09, I2C_CLIENT_END };
@@ -22,19 +27,25 @@ static const unsigned short normal_i2c[] = { 0x09, I2C_CLIENT_END };
static int blinkm_transfer_hw(struct i2c_client *client, int cmd);
static int blinkm_test_run(struct i2c_client *client);
+/* Contains structs for both the color-separated sysfs classes, and the new multicolor class */
struct blinkm_led {
struct i2c_client *i2c_client;
- struct led_classdev led_cdev;
+ union {
+ /* used when multicolor support is disabled */
+ struct led_classdev led_cdev;
+ struct led_classdev_mc mcled_cdev;
+ } cdev;
int id;
};
-#define cdev_to_blmled(c) container_of(c, struct blinkm_led, led_cdev)
+#define led_cdev_to_blmled(c) container_of(c, struct blinkm_led, cdev.led_cdev)
+#define mcled_cdev_to_led(c) container_of(c, struct blinkm_led, cdev.mcled_cdev)
struct blinkm_data {
struct i2c_client *i2c_client;
struct mutex update_lock;
/* used for led class interface */
- struct blinkm_led blinkm_leds[3];
+ struct blinkm_led blinkm_leds[NUM_LEDS];
/* used for "blinkm" sysfs interface */
u8 red; /* color red */
u8 green; /* color green */
@@ -419,11 +430,29 @@ static int blinkm_transfer_hw(struct i2c_client *client, int cmd)
return 0;
}
+static int blinkm_set_mc_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct led_classdev_mc *mcled_cdev = lcdev_to_mccdev(led_cdev);
+ struct blinkm_led *led = mcled_cdev_to_led(mcled_cdev);
+ struct blinkm_data *data = i2c_get_clientdata(led->i2c_client);
+
+ led_mc_calc_color_components(mcled_cdev, value);
+
+ data->next_red = (u8) mcled_cdev->subled_info[RED].brightness;
+ data->next_green = (u8) mcled_cdev->subled_info[GREEN].brightness;
+ data->next_blue = (u8) mcled_cdev->subled_info[BLUE].brightness;
+
+ blinkm_transfer_hw(led->i2c_client, BLM_GO_RGB);
+
+ return 0;
+}
+
static int blinkm_led_common_set(struct led_classdev *led_cdev,
enum led_brightness value, int color)
{
/* led_brightness is 0, 127 or 255 - we just use it here as-is */
- struct blinkm_led *led = cdev_to_blmled(led_cdev);
+ struct blinkm_led *led = led_cdev_to_blmled(led_cdev);
struct blinkm_data *data = i2c_get_clientdata(led->i2c_client);
switch (color) {
@@ -565,117 +594,175 @@ static int blinkm_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
-static int blinkm_probe(struct i2c_client *client)
+static int register_separate_colors(struct i2c_client *client, struct blinkm_data *data)
{
- struct blinkm_data *data;
- struct blinkm_led *led[3];
- int err, i;
+ /* 3 separate classes for red, green, and blue respectively */
+ struct blinkm_led *leds[NUM_LEDS];
+ int err;
char blinkm_led_name[28];
-
- data = devm_kzalloc(&client->dev,
- sizeof(struct blinkm_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
-
- data->i2c_addr = 0x08;
- /* i2c addr - use fake addr of 0x08 initially (real is 0x09) */
- data->fw_ver = 0xfe;
- /* firmware version - use fake until we read real value
- * (currently broken - BlinkM confused!) */
- data->script_id = 0x01;
- data->i2c_client = client;
-
- i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
-
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &blinkm_group);
- if (err < 0) {
- dev_err(&client->dev, "couldn't register sysfs group\n");
- goto exit;
- }
-
- for (i = 0; i < 3; i++) {
+ /* Register red, green, and blue sysfs classes */
+ for (int i = 0; i < NUM_LEDS; i++) {
/* RED = 0, GREEN = 1, BLUE = 2 */
- led[i] = &data->blinkm_leds[i];
- led[i]->i2c_client = client;
- led[i]->id = i;
- led[i]->led_cdev.max_brightness = 255;
- led[i]->led_cdev.flags = LED_CORE_SUSPENDRESUME;
+ leds[i] = &data->blinkm_leds[i];
+ leds[i]->i2c_client = client;
+ leds[i]->id = i;
+ leds[i]->cdev.led_cdev.max_brightness = 255;
+ leds[i]->cdev.led_cdev.flags = LED_CORE_SUSPENDRESUME;
switch (i) {
case RED:
- snprintf(blinkm_led_name, sizeof(blinkm_led_name),
+ scnprintf(blinkm_led_name, sizeof(blinkm_led_name),
"blinkm-%d-%d-red",
client->adapter->nr,
client->addr);
- led[i]->led_cdev.name = blinkm_led_name;
- led[i]->led_cdev.brightness_set_blocking =
+ leds[i]->cdev.led_cdev.name = blinkm_led_name;
+ leds[i]->cdev.led_cdev.brightness_set_blocking =
blinkm_led_red_set;
err = led_classdev_register(&client->dev,
- &led[i]->led_cdev);
+ &leds[i]->cdev.led_cdev);
if (err < 0) {
dev_err(&client->dev,
"couldn't register LED %s\n",
- led[i]->led_cdev.name);
+ leds[i]->cdev.led_cdev.name);
goto failred;
}
break;
case GREEN:
- snprintf(blinkm_led_name, sizeof(blinkm_led_name),
+ scnprintf(blinkm_led_name, sizeof(blinkm_led_name),
"blinkm-%d-%d-green",
client->adapter->nr,
client->addr);
- led[i]->led_cdev.name = blinkm_led_name;
- led[i]->led_cdev.brightness_set_blocking =
+ leds[i]->cdev.led_cdev.name = blinkm_led_name;
+ leds[i]->cdev.led_cdev.brightness_set_blocking =
blinkm_led_green_set;
err = led_classdev_register(&client->dev,
- &led[i]->led_cdev);
+ &leds[i]->cdev.led_cdev);
if (err < 0) {
dev_err(&client->dev,
"couldn't register LED %s\n",
- led[i]->led_cdev.name);
+ leds[i]->cdev.led_cdev.name);
goto failgreen;
}
break;
case BLUE:
- snprintf(blinkm_led_name, sizeof(blinkm_led_name),
+ scnprintf(blinkm_led_name, sizeof(blinkm_led_name),
"blinkm-%d-%d-blue",
client->adapter->nr,
client->addr);
- led[i]->led_cdev.name = blinkm_led_name;
- led[i]->led_cdev.brightness_set_blocking =
+ leds[i]->cdev.led_cdev.name = blinkm_led_name;
+ leds[i]->cdev.led_cdev.brightness_set_blocking =
blinkm_led_blue_set;
err = led_classdev_register(&client->dev,
- &led[i]->led_cdev);
+ &leds[i]->cdev.led_cdev);
if (err < 0) {
dev_err(&client->dev,
"couldn't register LED %s\n",
- led[i]->led_cdev.name);
+ leds[i]->cdev.led_cdev.name);
goto failblue;
}
break;
+ default:
+ break;
} /* end switch */
} /* end for */
-
- /* Initialize the blinkm */
- blinkm_init_hw(client);
-
return 0;
failblue:
- led_classdev_unregister(&led[GREEN]->led_cdev);
-
+ led_classdev_unregister(&leds[GREEN]->cdev.led_cdev);
failgreen:
- led_classdev_unregister(&led[RED]->led_cdev);
-
+ led_classdev_unregister(&leds[RED]->cdev.led_cdev);
failred:
sysfs_remove_group(&client->dev.kobj, &blinkm_group);
-exit:
+
return err;
}
+static int register_multicolor(struct i2c_client *client, struct blinkm_data *data)
+{
+ struct blinkm_led *mc_led;
+ struct mc_subled *mc_led_info;
+ char blinkm_led_name[28];
+ int err;
+
+ /* Register multicolor sysfs class */
+ /* The first element of leds is used for multicolor facilities */
+ mc_led = &data->blinkm_leds[RED];
+ mc_led->i2c_client = client;
+
+ mc_led_info = devm_kcalloc(&client->dev, NUM_LEDS, sizeof(*mc_led_info),
+ GFP_KERNEL);
+ if (!mc_led_info)
+ return -ENOMEM;
+
+ mc_led_info[RED].color_index = LED_COLOR_ID_RED;
+ mc_led_info[GREEN].color_index = LED_COLOR_ID_GREEN;
+ mc_led_info[BLUE].color_index = LED_COLOR_ID_BLUE;
+
+ mc_led->cdev.mcled_cdev.subled_info = mc_led_info;
+ mc_led->cdev.mcled_cdev.num_colors = NUM_LEDS;
+ mc_led->cdev.mcled_cdev.led_cdev.brightness = 255;
+ mc_led->cdev.mcled_cdev.led_cdev.max_brightness = 255;
+ mc_led->cdev.mcled_cdev.led_cdev.flags = LED_CORE_SUSPENDRESUME;
+
+ scnprintf(blinkm_led_name, sizeof(blinkm_led_name),
+ "blinkm-%d-%d:rgb:indicator",
+ client->adapter->nr,
+ client->addr);
+ mc_led->cdev.mcled_cdev.led_cdev.name = blinkm_led_name;
+ mc_led->cdev.mcled_cdev.led_cdev.brightness_set_blocking = blinkm_set_mc_brightness;
+
+ err = led_classdev_multicolor_register(&client->dev, &mc_led->cdev.mcled_cdev);
+ if (err < 0) {
+ dev_err(&client->dev, "couldn't register LED %s\n",
+ mc_led->cdev.led_cdev.name);
+ sysfs_remove_group(&client->dev.kobj, &blinkm_group);
+ }
+ return 0;
+}
+
+static int blinkm_probe(struct i2c_client *client)
+{
+ struct blinkm_data *data;
+ int err;
+
+ data = devm_kzalloc(&client->dev,
+ sizeof(struct blinkm_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->i2c_addr = 0x08;
+ /* i2c addr - use fake addr of 0x08 initially (real is 0x09) */
+ data->fw_ver = 0xfe;
+ /* firmware version - use fake until we read real value
+ * (currently broken - BlinkM confused!)
+ */
+ data->script_id = 0x01;
+ data->i2c_client = client;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &blinkm_group);
+ if (err < 0) {
+ dev_err(&client->dev, "couldn't register sysfs group\n");
+ return err;
+ }
+
+ if (!IS_ENABLED(CONFIG_LEDS_BLINKM_MULTICOLOR)) {
+ err = register_separate_colors(client, data);
+ if (err < 0)
+ return err;
+ } else {
+ err = register_multicolor(client, data);
+ if (err < 0)
+ return err;
+ }
+
+ blinkm_init_hw(client);
+
+ return 0;
+}
+
static void blinkm_remove(struct i2c_client *client)
{
struct blinkm_data *data = i2c_get_clientdata(client);
@@ -683,8 +770,8 @@ static void blinkm_remove(struct i2c_client *client)
int i;
/* make sure no workqueue entries are pending */
- for (i = 0; i < 3; i++)
- led_classdev_unregister(&data->blinkm_leds[i].led_cdev);
+ for (i = 0; i < NUM_LEDS; i++)
+ led_classdev_unregister(&data->blinkm_leds[i].cdev.led_cdev);
/* reset rgb */
data->next_red = 0x00;
@@ -740,6 +827,7 @@ static struct i2c_driver blinkm_driver = {
module_i2c_driver(blinkm_driver);
MODULE_AUTHOR("Jan-Simon Moeller <dl9pf@gmx.de>");
+MODULE_AUTHOR("Joseph Strauss <jstrauss@mailbox.org>");
MODULE_DESCRIPTION("BlinkM RGB LED driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 83fcd7b6afff..4d1612d557c8 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -150,7 +150,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev)
{
struct fwnode_handle *child;
struct gpio_leds_priv *priv;
- int count, ret;
+ int count, used, ret;
count = device_get_child_node_count(dev);
if (!count)
@@ -159,9 +159,11 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev)
priv = devm_kzalloc(dev, struct_size(priv, leds, count), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
+ priv->num_leds = count;
+ used = 0;
device_for_each_child_node(dev, child) {
- struct gpio_led_data *led_dat = &priv->leds[priv->num_leds];
+ struct gpio_led_data *led_dat = &priv->leds[used];
struct gpio_led led = {};
/*
@@ -197,8 +199,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev)
/* Set gpiod label to match the corresponding LED name. */
gpiod_set_consumer_name(led_dat->gpiod,
led_dat->cdev.dev->kobj.name);
- priv->num_leds++;
+ used++;
}
+ priv->num_leds = used;
return priv;
}
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
index 5e1a4d39a107..27bfab3da479 100644
--- a/drivers/leds/leds-is31fl319x.c
+++ b/drivers/leds/leds-is31fl319x.c
@@ -392,7 +392,7 @@ static int is31fl319x_parse_child_fw(const struct device *dev,
static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31)
{
- struct fwnode_handle *fwnode = dev_fwnode(dev), *child;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
int count;
int ret;
@@ -404,7 +404,7 @@ static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31)
is31->cdef = device_get_match_data(dev);
count = 0;
- fwnode_for_each_available_child_node(fwnode, child)
+ device_for_each_child_node_scoped(dev, child)
count++;
dev_dbg(dev, "probing with %d leds defined in DT\n", count);
@@ -414,33 +414,25 @@ static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31)
"Number of leds defined must be between 1 and %u\n",
is31->cdef->num_leds);
- fwnode_for_each_available_child_node(fwnode, child) {
+ device_for_each_child_node_scoped(dev, child) {
struct is31fl319x_led *led;
u32 reg;
ret = fwnode_property_read_u32(child, "reg", &reg);
- if (ret) {
- ret = dev_err_probe(dev, ret, "Failed to read led 'reg' property\n");
- goto put_child_node;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read led 'reg' property\n");
- if (reg < 1 || reg > is31->cdef->num_leds) {
- ret = dev_err_probe(dev, -EINVAL, "invalid led reg %u\n", reg);
- goto put_child_node;
- }
+ if (reg < 1 || reg > is31->cdef->num_leds)
+ return dev_err_probe(dev, -EINVAL, "invalid led reg %u\n", reg);
led = &is31->leds[reg - 1];
- if (led->configured) {
- ret = dev_err_probe(dev, -EINVAL, "led %u is already configured\n", reg);
- goto put_child_node;
- }
+ if (led->configured)
+ return dev_err_probe(dev, -EINVAL, "led %u is already configured\n", reg);
ret = is31fl319x_parse_child_fw(dev, child, led, is31);
- if (ret) {
- ret = dev_err_probe(dev, ret, "led %u DT parsing failed\n", reg);
- goto put_child_node;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "led %u DT parsing failed\n", reg);
led->configured = true;
}
@@ -454,10 +446,6 @@ static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31)
}
return 0;
-
-put_child_node:
- fwnode_handle_put(child);
- return ret;
}
static inline int is31fl3190_microamp_to_cs(struct device *dev, u32 microamp)
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index b0a0be77bb33..8793330dd414 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -363,10 +363,9 @@ static struct is31fl32xx_led_data *is31fl32xx_find_led_data(
static int is31fl32xx_parse_dt(struct device *dev,
struct is31fl32xx_priv *priv)
{
- struct device_node *child;
int ret = 0;
- for_each_available_child_of_node(dev_of_node(dev), child) {
+ for_each_available_child_of_node_scoped(dev_of_node(dev), child) {
struct led_init_data init_data = {};
struct is31fl32xx_led_data *led_data =
&priv->leds[priv->num_leds];
@@ -376,7 +375,7 @@ static int is31fl32xx_parse_dt(struct device *dev,
ret = is31fl32xx_parse_child_dt(dev, child, led_data);
if (ret)
- goto err;
+ return ret;
/* Detect if channel is already in use by another child */
other_led_data = is31fl32xx_find_led_data(priv,
@@ -385,8 +384,7 @@ static int is31fl32xx_parse_dt(struct device *dev,
dev_err(dev,
"Node %pOF 'reg' conflicts with another LED\n",
child);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
init_data.fwnode = of_fwnode_handle(child);
@@ -396,17 +394,13 @@ static int is31fl32xx_parse_dt(struct device *dev,
if (ret) {
dev_err(dev, "Failed to register LED for %pOF: %d\n",
child, ret);
- goto err;
+ return ret;
}
priv->num_leds++;
}
return 0;
-
-err:
- of_node_put(child);
- return ret;
}
static const struct of_device_id of_is31fl32xx_match[] = {
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 29e7142dca72..5a2e259679cf 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -965,24 +965,16 @@ EXPORT_SYMBOL_GPL(lp55xx_update_bits);
bool lp55xx_is_extclk_used(struct lp55xx_chip *chip)
{
struct clk *clk;
- int err;
- clk = devm_clk_get(&chip->cl->dev, "32k_clk");
+ clk = devm_clk_get_enabled(&chip->cl->dev, "32k_clk");
if (IS_ERR(clk))
goto use_internal_clk;
- err = clk_prepare_enable(clk);
- if (err)
+ if (clk_get_rate(clk) != LP55XX_CLK_32K)
goto use_internal_clk;
- if (clk_get_rate(clk) != LP55XX_CLK_32K) {
- clk_disable_unprepare(clk);
- goto use_internal_clk;
- }
-
dev_info(&chip->cl->dev, "%dHz external clock used\n", LP55XX_CLK_32K);
- chip->clk = clk;
return true;
use_internal_clk:
@@ -995,9 +987,6 @@ static void lp55xx_deinit_device(struct lp55xx_chip *chip)
{
struct lp55xx_platform_data *pdata = chip->pdata;
- if (chip->clk)
- clk_disable_unprepare(chip->clk);
-
if (pdata->enable_gpiod)
gpiod_set_value(pdata->enable_gpiod, 0);
}
@@ -1173,16 +1162,13 @@ static int lp55xx_parse_multi_led(struct device_node *np,
struct lp55xx_led_config *cfg,
int child_number)
{
- struct device_node *child;
int num_colors = 0, ret;
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
ret = lp55xx_parse_multi_led_child(child, cfg, child_number,
num_colors);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
num_colors++;
}
diff --git a/drivers/leds/leds-lp55xx-common.h b/drivers/leds/leds-lp55xx-common.h
index 1bb7c559662c..8fd64ec40919 100644
--- a/drivers/leds/leds-lp55xx-common.h
+++ b/drivers/leds/leds-lp55xx-common.h
@@ -193,7 +193,6 @@ struct lp55xx_engine {
*/
struct lp55xx_chip {
struct i2c_client *cl;
- struct clk *clk;
struct lp55xx_platform_data *pdata;
struct mutex lock; /* lock for user-space interface */
int num_leds;
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index bbd1d359bba4..da99d114bfb2 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -12,6 +12,7 @@
* Eric Miao <eric.miao@marvell.com>
*/
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -113,7 +114,7 @@ static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
{
struct mc13xxx_leds *leds = platform_get_drvdata(pdev);
struct mc13xxx_leds_platform_data *pdata;
- struct device_node *parent, *child;
+ struct device_node *child;
struct device *dev = &pdev->dev;
int i = 0, ret = -ENODATA;
@@ -121,24 +122,23 @@ static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
if (!pdata)
return ERR_PTR(-ENOMEM);
- parent = of_get_child_by_name(dev_of_node(dev->parent), "leds");
+ struct device_node *parent __free(device_node) =
+ of_get_child_by_name(dev_of_node(dev->parent), "leds");
if (!parent)
- goto out_node_put;
+ return ERR_PTR(-ENODATA);
ret = of_property_read_u32_array(parent, "led-control",
pdata->led_control,
leds->devtype->num_regs);
if (ret)
- goto out_node_put;
+ return ERR_PTR(ret);
pdata->num_leds = of_get_available_child_count(parent);
pdata->led = devm_kcalloc(dev, pdata->num_leds, sizeof(*pdata->led),
GFP_KERNEL);
- if (!pdata->led) {
- ret = -ENOMEM;
- goto out_node_put;
- }
+ if (!pdata->led)
+ return ERR_PTR(-ENOMEM);
for_each_available_child_of_node(parent, child) {
const char *str;
@@ -158,12 +158,10 @@ static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
}
pdata->num_leds = i;
- ret = i > 0 ? 0 : -ENODATA;
-
-out_node_put:
- of_node_put(parent);
+ if (i <= 0)
+ return ERR_PTR(-ENODATA);
- return ret ? ERR_PTR(ret) : pdata;
+ return pdata;
}
#else
static inline struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
diff --git a/drivers/leds/leds-mt6323.c b/drivers/leds/leds-mt6323.c
index 40d508510823..a19e8e0b6d1b 100644
--- a/drivers/leds/leds-mt6323.c
+++ b/drivers/leds/leds-mt6323.c
@@ -527,7 +527,6 @@ static int mt6323_led_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev_of_node(dev);
- struct device_node *child;
struct mt6397_chip *hw = dev_get_drvdata(dev->parent);
struct mt6323_leds *leds;
struct mt6323_led *led;
@@ -565,28 +564,25 @@ static int mt6323_led_probe(struct platform_device *pdev)
return ret;
}
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
struct led_init_data init_data = {};
bool is_wled;
ret = of_property_read_u32(child, "reg", &reg);
if (ret) {
dev_err(dev, "Failed to read led 'reg' property\n");
- goto put_child_node;
+ return ret;
}
if (reg >= max_leds || reg >= MAX_SUPPORTED_LEDS ||
leds->led[reg]) {
dev_err(dev, "Invalid led reg %u\n", reg);
- ret = -EINVAL;
- goto put_child_node;
+ return -EINVAL;
}
led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
- if (!led) {
- ret = -ENOMEM;
- goto put_child_node;
- }
+ if (!led)
+ return -ENOMEM;
is_wled = of_property_read_bool(child, "mediatek,is-wled");
@@ -612,7 +608,7 @@ static int mt6323_led_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(leds->dev,
"Failed to LED set default from devicetree\n");
- goto put_child_node;
+ return ret;
}
init_data.fwnode = of_fwnode_handle(child);
@@ -621,15 +617,11 @@ static int mt6323_led_probe(struct platform_device *pdev)
&init_data);
if (ret) {
dev_err(dev, "Failed to register LED: %d\n", ret);
- goto put_child_node;
+ return ret;
}
}
return 0;
-
-put_child_node:
- of_node_put(child);
- return ret;
}
static void mt6323_led_remove(struct platform_device *pdev)
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 77213b79f84d..af5a908b8d9e 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -423,7 +423,6 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
struct device_node *gpio_ext_np;
struct platform_device *gpio_ext_pdev;
struct device *gpio_ext_dev;
- struct device_node *child;
struct netxbig_gpio_ext *gpio_ext;
struct netxbig_led_timer *timers;
struct netxbig_led *leds, *led;
@@ -507,7 +506,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
}
led = leds;
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
const char *string;
int *mode_val;
int num_modes;
@@ -515,17 +514,17 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
ret = of_property_read_u32(child, "mode-addr",
&led->mode_addr);
if (ret)
- goto err_node_put;
+ goto put_device;
ret = of_property_read_u32(child, "bright-addr",
&led->bright_addr);
if (ret)
- goto err_node_put;
+ goto put_device;
ret = of_property_read_u32(child, "max-brightness",
&led->bright_max);
if (ret)
- goto err_node_put;
+ goto put_device;
mode_val =
devm_kcalloc(dev,
@@ -533,7 +532,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
GFP_KERNEL);
if (!mode_val) {
ret = -ENOMEM;
- goto err_node_put;
+ goto put_device;
}
for (i = 0; i < NETXBIG_LED_MODE_NUM; i++)
@@ -542,12 +541,12 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
ret = of_property_count_u32_elems(child, "mode-val");
if (ret < 0 || ret % 2) {
ret = -EINVAL;
- goto err_node_put;
+ goto put_device;
}
num_modes = ret / 2;
if (num_modes > NETXBIG_LED_MODE_NUM) {
ret = -EINVAL;
- goto err_node_put;
+ goto put_device;
}
for (i = 0; i < num_modes; i++) {
@@ -560,7 +559,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
"mode-val", 2 * i + 1, &val);
if (mode >= NETXBIG_LED_MODE_NUM) {
ret = -EINVAL;
- goto err_node_put;
+ goto put_device;
}
mode_val[mode] = val;
}
@@ -583,8 +582,6 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
return 0;
-err_node_put:
- of_node_put(child);
put_device:
put_device(gpio_ext_dev);
return ret;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 9f3fac66a11c..1b47acf54720 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -215,8 +215,7 @@ static int pca9532_update_hw_blink(struct pca9532_led *led,
if (other->state == PCA9532_PWM1) {
if (other->ldev.blink_delay_on != delay_on ||
other->ldev.blink_delay_off != delay_off) {
- dev_err(&led->client->dev,
- "HW can handle only one blink configuration at a time\n");
+ /* HW can handle only one blink configuration at a time */
return -EINVAL;
}
}
@@ -224,7 +223,7 @@ static int pca9532_update_hw_blink(struct pca9532_led *led,
psc = ((delay_on + delay_off) * PCA9532_PWM_PERIOD_DIV - 1) / 1000;
if (psc > U8_MAX) {
- dev_err(&led->client->dev, "Blink period too long to be handled by hardware\n");
+ /* Blink period too long to be handled by hardware */
return -EINVAL;
}
@@ -506,7 +505,6 @@ static struct pca9532_platform_data *
pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
{
struct pca9532_platform_data *pdata;
- struct device_node *child;
int devid, maxleds;
int i = 0;
const char *state;
@@ -525,7 +523,7 @@ pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
of_property_read_u8_array(np, "nxp,psc", &pdata->psc[PCA9532_PWM_ID_0],
ARRAY_SIZE(pdata->psc));
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
if (of_property_read_string(child, "label",
&pdata->leds[i].name))
pdata->leds[i].name = child->name;
@@ -538,10 +536,8 @@ pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
else if (!strcmp(state, "keep"))
pdata->leds[i].state = PCA9532_KEEP;
}
- if (++i >= maxleds) {
- of_node_put(child);
+ if (++i >= maxleds)
break;
- }
}
return pdata;
diff --git a/drivers/leds/leds-pca995x.c b/drivers/leds/leds-pca995x.c
index 78215dff1499..11c7bb69573e 100644
--- a/drivers/leds/leds-pca995x.c
+++ b/drivers/leds/leds-pca995x.c
@@ -19,10 +19,6 @@
#define PCA995X_MODE1 0x00
#define PCA995X_MODE2 0x01
#define PCA995X_LEDOUT0 0x02
-#define PCA9955B_PWM0 0x08
-#define PCA9952_PWM0 0x0A
-#define PCA9952_IREFALL 0x43
-#define PCA9955B_IREFALL 0x45
/* Auto-increment disabled. Normal mode */
#define PCA995X_MODE1_CFG 0x00
@@ -34,17 +30,38 @@
#define PCA995X_LDRX_MASK 0x3
#define PCA995X_LDRX_BITS 2
-#define PCA995X_MAX_OUTPUTS 16
+#define PCA995X_MAX_OUTPUTS 24
#define PCA995X_OUTPUTS_PER_REG 4
#define PCA995X_IREFALL_FULL_CFG 0xFF
#define PCA995X_IREFALL_HALF_CFG (PCA995X_IREFALL_FULL_CFG / 2)
-#define PCA995X_TYPE_NON_B 0
-#define PCA995X_TYPE_B 1
-
#define ldev_to_led(c) container_of(c, struct pca995x_led, ldev)
+struct pca995x_chipdef {
+ unsigned int num_leds;
+ u8 pwm_base;
+ u8 irefall;
+};
+
+static const struct pca995x_chipdef pca9952_chipdef = {
+ .num_leds = 16,
+ .pwm_base = 0x0a,
+ .irefall = 0x43,
+};
+
+static const struct pca995x_chipdef pca9955b_chipdef = {
+ .num_leds = 16,
+ .pwm_base = 0x08,
+ .irefall = 0x45,
+};
+
+static const struct pca995x_chipdef pca9956b_chipdef = {
+ .num_leds = 24,
+ .pwm_base = 0x0a,
+ .irefall = 0x40,
+};
+
struct pca995x_led {
unsigned int led_no;
struct led_classdev ldev;
@@ -54,7 +71,7 @@ struct pca995x_led {
struct pca995x_chip {
struct regmap *regmap;
struct pca995x_led leds[PCA995X_MAX_OUTPUTS];
- int btype;
+ const struct pca995x_chipdef *chipdef;
};
static int pca995x_brightness_set(struct led_classdev *led_cdev,
@@ -62,10 +79,11 @@ static int pca995x_brightness_set(struct led_classdev *led_cdev,
{
struct pca995x_led *led = ldev_to_led(led_cdev);
struct pca995x_chip *chip = led->chip;
+ const struct pca995x_chipdef *chipdef = chip->chipdef;
u8 ledout_addr, pwmout_addr;
int shift, ret;
- pwmout_addr = (chip->btype ? PCA9955B_PWM0 : PCA9952_PWM0) + led->led_no;
+ pwmout_addr = chipdef->pwm_base + led->led_no;
ledout_addr = PCA995X_LEDOUT0 + (led->led_no / PCA995X_OUTPUTS_PER_REG);
shift = PCA995X_LDRX_BITS * (led->led_no % PCA995X_OUTPUTS_PER_REG);
@@ -102,43 +120,38 @@ static const struct regmap_config pca995x_regmap = {
static int pca995x_probe(struct i2c_client *client)
{
struct fwnode_handle *led_fwnodes[PCA995X_MAX_OUTPUTS] = { 0 };
- struct fwnode_handle *np, *child;
struct device *dev = &client->dev;
+ const struct pca995x_chipdef *chipdef;
struct pca995x_chip *chip;
struct pca995x_led *led;
- int i, btype, reg, ret;
+ int i, j, reg, ret;
- btype = (unsigned long)device_get_match_data(&client->dev);
+ chipdef = device_get_match_data(&client->dev);
- np = dev_fwnode(dev);
- if (!np)
+ if (!dev_fwnode(dev))
return -ENODEV;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- chip->btype = btype;
+ chip->chipdef = chipdef;
chip->regmap = devm_regmap_init_i2c(client, &pca995x_regmap);
if (IS_ERR(chip->regmap))
return PTR_ERR(chip->regmap);
i2c_set_clientdata(client, chip);
- fwnode_for_each_available_child_node(np, child) {
+ device_for_each_child_node_scoped(dev, child) {
ret = fwnode_property_read_u32(child, "reg", &reg);
- if (ret) {
- fwnode_handle_put(child);
+ if (ret)
return ret;
- }
- if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg]) {
- fwnode_handle_put(child);
+ if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg])
return -EINVAL;
- }
led = &chip->leds[reg];
- led_fwnodes[reg] = child;
+ led_fwnodes[reg] = fwnode_handle_get(child);
led->chip = chip;
led->led_no = reg;
led->ldev.brightness_set_blocking = pca995x_brightness_set;
@@ -157,7 +170,8 @@ static int pca995x_probe(struct i2c_client *client)
&chip->leds[i].ldev,
&init_data);
if (ret < 0) {
- fwnode_handle_put(child);
+ for (j = i; j < PCA995X_MAX_OUTPUTS; j++)
+ fwnode_handle_put(led_fwnodes[j]);
return dev_err_probe(dev, ret,
"Could not register LED %s\n",
chip->leds[i].ldev.name);
@@ -170,21 +184,21 @@ static int pca995x_probe(struct i2c_client *client)
return ret;
/* IREF Output current value for all LEDn outputs */
- return regmap_write(chip->regmap,
- btype ? PCA9955B_IREFALL : PCA9952_IREFALL,
- PCA995X_IREFALL_HALF_CFG);
+ return regmap_write(chip->regmap, chipdef->irefall, PCA995X_IREFALL_HALF_CFG);
}
static const struct i2c_device_id pca995x_id[] = {
- { "pca9952", .driver_data = (kernel_ulong_t)PCA995X_TYPE_NON_B },
- { "pca9955b", .driver_data = (kernel_ulong_t)PCA995X_TYPE_B },
+ { "pca9952", .driver_data = (kernel_ulong_t)&pca9952_chipdef },
+ { "pca9955b", .driver_data = (kernel_ulong_t)&pca9955b_chipdef },
+ { "pca9956b", .driver_data = (kernel_ulong_t)&pca9956b_chipdef },
{}
};
MODULE_DEVICE_TABLE(i2c, pca995x_id);
static const struct of_device_id pca995x_of_match[] = {
- { .compatible = "nxp,pca9952", .data = (void *)PCA995X_TYPE_NON_B },
- { .compatible = "nxp,pca9955b", .data = (void *)PCA995X_TYPE_B },
+ { .compatible = "nxp,pca9952", .data = &pca9952_chipdef },
+ { .compatible = "nxp,pca9955b", . data = &pca9955b_chipdef },
+ { .compatible = "nxp,pca9956b", .data = &pca9956b_chipdef },
{},
};
MODULE_DEVICE_TABLE(of, pca995x_of_match);
diff --git a/drivers/leds/leds-sc27xx-bltc.c b/drivers/leds/leds-sc27xx-bltc.c
index f04db793e8d6..cca98c644aa6 100644
--- a/drivers/leds/leds-sc27xx-bltc.c
+++ b/drivers/leds/leds-sc27xx-bltc.c
@@ -276,7 +276,7 @@ static int sc27xx_led_register(struct device *dev, struct sc27xx_led_priv *priv)
static int sc27xx_led_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev_of_node(dev), *child;
+ struct device_node *np = dev_of_node(dev);
struct sc27xx_led_priv *priv;
u32 base, count, reg;
int err;
@@ -304,17 +304,13 @@ static int sc27xx_led_probe(struct platform_device *pdev)
return err;
}
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
err = of_property_read_u32(child, "reg", &reg);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
- if (reg >= SC27XX_LEDS_MAX || priv->leds[reg].active) {
- of_node_put(child);
+ if (reg >= SC27XX_LEDS_MAX || priv->leds[reg].active)
return -EINVAL;
- }
priv->leds[reg].fwnode = of_fwnode_handle(child);
priv->leds[reg].active = true;
diff --git a/drivers/leds/leds-sun50i-a100.c b/drivers/leds/leds-sun50i-a100.c
index 119eff9471f0..4c468d487486 100644
--- a/drivers/leds/leds-sun50i-a100.c
+++ b/drivers/leds/leds-sun50i-a100.c
@@ -368,7 +368,7 @@ static int sun50i_a100_ledc_suspend(struct device *dev)
if (!xfer_active)
break;
- msleep(1);
+ usleep_range(1000, 1100);
}
clk_disable_unprepare(priv->mod_clk);
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 39f740be058f..4cff8c4b020c 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -452,7 +452,7 @@ static int omnia_mcu_get_features(const struct i2c_client *client)
static int omnia_leds_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
- struct device_node *np = dev_of_node(dev), *child;
+ struct device_node *np = dev_of_node(dev);
struct omnia_leds *leds;
struct omnia_led *led;
int ret, count;
@@ -497,12 +497,10 @@ static int omnia_leds_probe(struct i2c_client *client)
}
led = &leds->leds[0];
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
ret = omnia_led_register(client, led, child);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
return ret;
- }
led += ret;
}
@@ -532,6 +530,7 @@ static const struct of_device_id of_omnia_leds_match[] = {
{ .compatible = "cznic,turris-omnia-leds", },
{},
};
+MODULE_DEVICE_TABLE(of, of_omnia_leds_match);
static const struct i2c_device_id omnia_id[] = {
{ "omnia" },
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index e74b2ceed1c2..f3c9ef2bfa57 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -1368,7 +1368,6 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
{
struct led_init_data init_data = {};
struct led_classdev *cdev;
- struct device_node *child;
struct mc_subled *info;
struct lpg_led *led;
const char *state;
@@ -1399,12 +1398,10 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
if (!info)
return -ENOMEM;
i = 0;
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
ret = lpg_parse_channel(lpg, child, &led->channels[i]);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
return ret;
- }
info[i].color_index = led->channels[i]->color;
info[i].intensity = 0;
@@ -1600,7 +1597,6 @@ static int lpg_init_sdam(struct lpg *lpg)
static int lpg_probe(struct platform_device *pdev)
{
- struct device_node *np;
struct lpg *lpg;
int ret;
int i;
@@ -1640,12 +1636,10 @@ static int lpg_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- for_each_available_child_of_node(pdev->dev.of_node, np) {
+ for_each_available_child_of_node_scoped(pdev->dev.of_node, np) {
ret = lpg_add_led(lpg, np);
- if (ret) {
- of_node_put(np);
+ if (ret)
return ret;
- }
}
for (i = 0; i < lpg->num_channels; i++)
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 22bba8e97642..4b0863db901a 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -39,6 +39,8 @@
* (has carrier) or not
* tx - LED blinks on transmitted data
* rx - LED blinks on receive data
+ * tx_err - LED blinks on transmit error
+ * rx_err - LED blinks on receive error
*
* Note: If the user selects a mode that is not supported by hw, default
* behavior is to fall back to software control of the LED. However not every
@@ -144,7 +146,9 @@ static void set_baseline_state(struct led_netdev_data *trigger_data)
* checking stats
*/
if (test_bit(TRIGGER_NETDEV_TX, &trigger_data->mode) ||
- test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode))
+ test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode) ||
+ test_bit(TRIGGER_NETDEV_TX_ERR, &trigger_data->mode) ||
+ test_bit(TRIGGER_NETDEV_RX_ERR, &trigger_data->mode))
schedule_delayed_work(&trigger_data->work, 0);
}
}
@@ -337,6 +341,8 @@ static ssize_t netdev_led_attr_show(struct device *dev, char *buf,
case TRIGGER_NETDEV_FULL_DUPLEX:
case TRIGGER_NETDEV_TX:
case TRIGGER_NETDEV_RX:
+ case TRIGGER_NETDEV_TX_ERR:
+ case TRIGGER_NETDEV_RX_ERR:
bit = attr;
break;
default:
@@ -371,6 +377,8 @@ static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
case TRIGGER_NETDEV_FULL_DUPLEX:
case TRIGGER_NETDEV_TX:
case TRIGGER_NETDEV_RX:
+ case TRIGGER_NETDEV_TX_ERR:
+ case TRIGGER_NETDEV_RX_ERR:
bit = attr;
break;
default:
@@ -429,6 +437,8 @@ DEFINE_NETDEV_TRIGGER(half_duplex, TRIGGER_NETDEV_HALF_DUPLEX);
DEFINE_NETDEV_TRIGGER(full_duplex, TRIGGER_NETDEV_FULL_DUPLEX);
DEFINE_NETDEV_TRIGGER(tx, TRIGGER_NETDEV_TX);
DEFINE_NETDEV_TRIGGER(rx, TRIGGER_NETDEV_RX);
+DEFINE_NETDEV_TRIGGER(tx_err, TRIGGER_NETDEV_TX_ERR);
+DEFINE_NETDEV_TRIGGER(rx_err, TRIGGER_NETDEV_RX_ERR);
static ssize_t interval_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -538,6 +548,8 @@ static struct attribute *netdev_trig_attrs[] = {
&dev_attr_half_duplex.attr,
&dev_attr_rx.attr,
&dev_attr_tx.attr,
+ &dev_attr_rx_err.attr,
+ &dev_attr_tx_err.attr,
&dev_attr_interval.attr,
&dev_attr_offloaded.attr,
NULL
@@ -628,7 +640,9 @@ static void netdev_trig_work(struct work_struct *work)
/* If we are not looking for RX/TX then return */
if (!test_bit(TRIGGER_NETDEV_TX, &trigger_data->mode) &&
- !test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode))
+ !test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode) &&
+ !test_bit(TRIGGER_NETDEV_TX_ERR, &trigger_data->mode) &&
+ !test_bit(TRIGGER_NETDEV_RX_ERR, &trigger_data->mode))
return;
dev_stats = dev_get_stats(trigger_data->net_dev, &temp);
@@ -636,7 +650,11 @@ static void netdev_trig_work(struct work_struct *work)
(test_bit(TRIGGER_NETDEV_TX, &trigger_data->mode) ?
dev_stats->tx_packets : 0) +
(test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode) ?
- dev_stats->rx_packets : 0);
+ dev_stats->rx_packets : 0) +
+ (test_bit(TRIGGER_NETDEV_TX_ERR, &trigger_data->mode) ?
+ dev_stats->tx_errors : 0) +
+ (test_bit(TRIGGER_NETDEV_RX_ERR, &trigger_data->mode) ?
+ dev_stats->rx_errors : 0);
if (trigger_data->last_activity != new_activity) {
led_stop_software_blink(trigger_data->led_cdev);
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 13626205530d..bede200e32e8 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -387,7 +387,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dma_set_max_seg_size(&dev->ofdev.dev, 65536);
dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff);
-#if defined(CONFIG_PCI) && defined(CONFIG_DMA_OPS)
+#if defined(CONFIG_PCI) && defined(CONFIG_ARCH_HAS_DMA_OPS)
/* Set the DMA ops to the ones from the PCI device, this could be
* fishy if we didn't know that on PowerMac it's always direct ops
* or iommu ops that will work fine
@@ -396,7 +396,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
*/
dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;
-#endif /* CONFIG_PCI && CONFIG_DMA_OPS */
+#endif /* CONFIG_PCI && CONFIG_ARCH_HAS_DMA_OPS */
#ifdef DEBUG
printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n",
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index 89450645c230..26bd9ed5e664 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -178,7 +178,7 @@ void __init pmu_backlight_init(void)
}
bd->props.brightness = level;
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
printk(KERN_INFO "PMU Backlight initialized (%s)\n", name);
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 9d5703b60937..b0f09c70f1ff 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2334,7 +2334,7 @@ static const struct platform_suspend_ops pmu_pm_ops = {
.valid = pmu_sleep_valid,
};
-static int register_pmu_pm_ops(void)
+static int __init register_pmu_pm_ops(void)
{
if (pmu_kind == PMU_OHARE_BASED)
powerbook_sleep_init_3400();
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 51e6964c1305..acff2f64f251 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2174,6 +2174,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
unsigned int journal_section, journal_entry;
unsigned int journal_read_pos;
+ sector_t recalc_sector;
struct completion read_comp;
bool discard_retried = false;
bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
@@ -2314,6 +2315,7 @@ offload_to_thread:
goto lock_retry;
}
}
+ recalc_sector = le64_to_cpu(ic->sb->recalc_sector);
spin_unlock_irq(&ic->endio_wait.lock);
if (unlikely(journal_read_pos != NOT_FOUND)) {
@@ -2368,7 +2370,7 @@ offload_to_thread:
if (need_sync_io) {
wait_for_completion_io(&read_comp);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
- dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
+ dio->range.logical_sector + dio->range.n_sectors > recalc_sector)
goto skip_check;
if (ic->mode == 'B') {
if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 0c3323e0adb2..63682d27fc8d 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3949,7 +3949,9 @@ static int __load_dirty_region_bitmap(struct raid_set *rs)
/* Try loading the bitmap unless "raid0", which does not have one */
if (!rs_is_raid0(rs) &&
!test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
- r = md_bitmap_load(&rs->md);
+ struct mddev *mddev = &rs->md;
+
+ r = mddev->bitmap_ops->load(mddev);
if (r)
DMERR("Failed to load bitmap");
}
@@ -4066,7 +4068,8 @@ static int raid_preresume(struct dm_target *ti)
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize;
- r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors, chunksize, 0);
+ r = mddev->bitmap_ops->resize(mddev, mddev->dev_sectors,
+ chunksize, false);
if (r)
DMERR("Failed to resize bitmap");
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index cf659c8feb29..24ba9a10444c 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -22,6 +22,7 @@
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/jump_label.h>
+#include <linux/security.h>
#define DM_MSG_PREFIX "verity"
@@ -930,6 +931,41 @@ static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->dma_alignment = limits->logical_block_size - 1;
}
+#ifdef CONFIG_SECURITY
+
+static int verity_init_sig(struct dm_verity *v, const void *sig,
+ size_t sig_size)
+{
+ v->sig_size = sig_size;
+
+ if (sig) {
+ v->root_digest_sig = kmemdup(sig, v->sig_size, GFP_KERNEL);
+ if (!v->root_digest_sig)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void verity_free_sig(struct dm_verity *v)
+{
+ kfree(v->root_digest_sig);
+}
+
+#else
+
+static inline int verity_init_sig(struct dm_verity *v, const void *sig,
+ size_t sig_size)
+{
+ return 0;
+}
+
+static inline void verity_free_sig(struct dm_verity *v)
+{
+}
+
+#endif /* CONFIG_SECURITY */
+
static void verity_dtr(struct dm_target *ti)
{
struct dm_verity *v = ti->private;
@@ -949,6 +985,7 @@ static void verity_dtr(struct dm_target *ti)
kfree(v->initial_hashstate);
kfree(v->root_digest);
kfree(v->zero_digest);
+ verity_free_sig(v);
if (v->ahash_tfm) {
static_branch_dec(&ahash_enabled);
@@ -1418,6 +1455,13 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Root hash verification failed";
goto bad;
}
+
+ r = verity_init_sig(v, verify_args.sig, verify_args.sig_size);
+ if (r < 0) {
+ ti->error = "Cannot allocate root digest signature";
+ goto bad;
+ }
+
v->hash_per_block_bits =
__fls((1 << v->hash_dev_block_bits) / v->digest_size);
@@ -1559,8 +1603,79 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i
return 0;
}
+#ifdef CONFIG_SECURITY
+
+#ifdef CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG
+
+static int verity_security_set_signature(struct block_device *bdev,
+ struct dm_verity *v)
+{
+ /*
+ * if the dm-verity target is unsigned, v->root_digest_sig will
+ * be NULL, and the hook call is still required to let LSMs mark
+ * the device as unsigned. This information is crucial for LSMs to
+ * block operations such as execution on unsigned files
+ */
+ return security_bdev_setintegrity(bdev,
+ LSM_INT_DMVERITY_SIG_VALID,
+ v->root_digest_sig,
+ v->sig_size);
+}
+
+#else
+
+static inline int verity_security_set_signature(struct block_device *bdev,
+ struct dm_verity *v)
+{
+ return 0;
+}
+
+#endif /* CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG */
+
+/*
+ * Expose verity target's root hash and signature data to LSMs before resume.
+ *
+ * Returns 0 on success, or -ENOMEM if the system is out of memory.
+ */
+static int verity_preresume(struct dm_target *ti)
+{
+ struct block_device *bdev;
+ struct dm_verity_digest root_digest;
+ struct dm_verity *v;
+ int r;
+
+ v = ti->private;
+ bdev = dm_disk(dm_table_get_md(ti->table))->part0;
+ root_digest.digest = v->root_digest;
+ root_digest.digest_len = v->digest_size;
+ if (static_branch_unlikely(&ahash_enabled) && !v->shash_tfm)
+ root_digest.alg = crypto_ahash_alg_name(v->ahash_tfm);
+ else
+ root_digest.alg = crypto_shash_alg_name(v->shash_tfm);
+
+ r = security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, &root_digest,
+ sizeof(root_digest));
+ if (r)
+ return r;
+
+ r = verity_security_set_signature(bdev, v);
+ if (r)
+ goto bad;
+
+ return 0;
+
+bad:
+
+ security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, NULL, 0);
+
+ return r;
+}
+
+#endif /* CONFIG_SECURITY */
+
static struct target_type verity_target = {
.name = "verity",
+/* Note: the LSMs depend on the singleton and immutable features */
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
.version = {1, 10, 0},
.module = THIS_MODULE,
@@ -1571,6 +1686,9 @@ static struct target_type verity_target = {
.prepare_ioctl = verity_prepare_ioctl,
.iterate_devices = verity_iterate_devices,
.io_hints = verity_io_hints,
+#ifdef CONFIG_SECURITY
+ .preresume = verity_preresume,
+#endif /* CONFIG_SECURITY */
};
module_dm(verity);
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index aac3a1b1d94a..754e70bb5fe0 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -45,6 +45,10 @@ struct dm_verity {
u8 *salt; /* salt: its size is salt_size */
u8 *initial_hashstate; /* salted initial state, if shash_tfm is set */
u8 *zero_digest; /* digest for a zero block */
+#ifdef CONFIG_SECURITY
+ u8 *root_digest_sig; /* signature of the root digest */
+ unsigned int sig_size; /* root digest signature size */
+#endif /* CONFIG_SECURITY */
unsigned int salt_size;
sector_t data_start; /* data offset in 512-byte sectors */
sector_t hash_start; /* hash start in blocks */
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 08232d8dc815..29da10e6f703 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -32,11 +32,210 @@
#include "md.h"
#include "md-bitmap.h"
+#define BITMAP_MAJOR_LO 3
+/* version 4 insists the bitmap is in little-endian order
+ * with version 3, it is host-endian which is non-portable
+ * Version 5 is currently set only for clustered devices
+ */
+#define BITMAP_MAJOR_HI 4
+#define BITMAP_MAJOR_CLUSTERED 5
+#define BITMAP_MAJOR_HOSTENDIAN 3
+
+/*
+ * in-memory bitmap:
+ *
+ * Use 16 bit block counters to track pending writes to each "chunk".
+ * The 2 high order bits are special-purpose, the first is a flag indicating
+ * whether a resync is needed. The second is a flag indicating whether a
+ * resync is active.
+ * This means that the counter is actually 14 bits:
+ *
+ * +--------+--------+------------------------------------------------+
+ * | resync | resync | counter |
+ * | needed | active | |
+ * | (0-1) | (0-1) | (0-16383) |
+ * +--------+--------+------------------------------------------------+
+ *
+ * The "resync needed" bit is set when:
+ * a '1' bit is read from storage at startup.
+ * a write request fails on some drives
+ * a resync is aborted on a chunk with 'resync active' set
+ * It is cleared (and resync-active set) when a resync starts across all drives
+ * of the chunk.
+ *
+ *
+ * The "resync active" bit is set when:
+ * a resync is started on all drives, and resync_needed is set.
+ * resync_needed will be cleared (as long as resync_active wasn't already set).
+ * It is cleared when a resync completes.
+ *
+ * The counter counts pending write requests, plus the on-disk bit.
+ * When the counter is '1' and the resync bits are clear, the on-disk
+ * bit can be cleared as well, thus setting the counter to 0.
+ * When we set a bit, or in the counter (to start a write), if the fields is
+ * 0, we first set the disk bit and set the counter to 1.
+ *
+ * If the counter is 0, the on-disk bit is clear and the stripe is clean
+ * Anything that dirties the stripe pushes the counter to 2 (at least)
+ * and sets the on-disk bit (lazily).
+ * If a periodic sweep find the counter at 2, it is decremented to 1.
+ * If the sweep find the counter at 1, the on-disk bit is cleared and the
+ * counter goes to zero.
+ *
+ * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block
+ * counters as a fallback when "page" memory cannot be allocated:
+ *
+ * Normal case (page memory allocated):
+ *
+ * page pointer (32-bit)
+ *
+ * [ ] ------+
+ * |
+ * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters)
+ * c1 c2 c2048
+ *
+ * Hijacked case (page memory allocation failed):
+ *
+ * hijacked page pointer (32-bit)
+ *
+ * [ ][ ] (no page memory allocated)
+ * counter #1 (16-bit) counter #2 (16-bit)
+ *
+ */
+
+#define PAGE_BITS (PAGE_SIZE << 3)
+#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3)
+
+#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK)
+#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK)
+#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX)
+
+/* how many counters per page? */
+#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS)
+/* same, except a shift value for more efficient bitops */
+#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT)
+/* same, except a mask value for more efficient bitops */
+#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1)
+
+#define BITMAP_BLOCK_SHIFT 9
+
+/*
+ * bitmap structures:
+ */
+
+/* the in-memory bitmap is represented by bitmap_pages */
+struct bitmap_page {
+ /*
+ * map points to the actual memory page
+ */
+ char *map;
+ /*
+ * in emergencies (when map cannot be alloced), hijack the map
+ * pointer and use it as two counters itself
+ */
+ unsigned int hijacked:1;
+ /*
+ * If any counter in this page is '1' or '2' - and so could be
+ * cleared then that page is marked as 'pending'
+ */
+ unsigned int pending:1;
+ /*
+ * count of dirty bits on the page
+ */
+ unsigned int count:30;
+};
+
+/* the main bitmap structure - one per mddev */
+struct bitmap {
+
+ struct bitmap_counts {
+ spinlock_t lock;
+ struct bitmap_page *bp;
+ /* total number of pages in the bitmap */
+ unsigned long pages;
+ /* number of pages not yet allocated */
+ unsigned long missing_pages;
+ /* chunksize = 2^chunkshift (for bitops) */
+ unsigned long chunkshift;
+ /* total number of data chunks for the array */
+ unsigned long chunks;
+ } counts;
+
+ struct mddev *mddev; /* the md device that the bitmap is for */
+
+ __u64 events_cleared;
+ int need_sync;
+
+ struct bitmap_storage {
+ /* backing disk file */
+ struct file *file;
+ /* cached copy of the bitmap file superblock */
+ struct page *sb_page;
+ unsigned long sb_index;
+ /* list of cache pages for the file */
+ struct page **filemap;
+ /* attributes associated filemap pages */
+ unsigned long *filemap_attr;
+ /* number of pages in the file */
+ unsigned long file_pages;
+ /* total bytes in the bitmap */
+ unsigned long bytes;
+ } storage;
+
+ unsigned long flags;
+
+ int allclean;
+
+ atomic_t behind_writes;
+ /* highest actual value at runtime */
+ unsigned long behind_writes_used;
+
+ /*
+ * the bitmap daemon - periodically wakes up and sweeps the bitmap
+ * file, cleaning up bits and flushing out pages to disk as necessary
+ */
+ unsigned long daemon_lastrun; /* jiffies of last run */
+ /*
+ * when we lasted called end_sync to update bitmap with resync
+ * progress.
+ */
+ unsigned long last_end_sync;
+
+ /* pending writes to the bitmap file */
+ atomic_t pending_writes;
+ wait_queue_head_t write_wait;
+ wait_queue_head_t overflow_wait;
+ wait_queue_head_t behind_wait;
+
+ struct kernfs_node *sysfs_can_clear;
+ /* slot offset for clustered env */
+ int cluster_slot;
+};
+
+static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ int chunksize, bool init);
+
static inline char *bmname(struct bitmap *bitmap)
{
return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
}
+static bool __bitmap_enabled(struct bitmap *bitmap)
+{
+ return bitmap->storage.filemap &&
+ !test_bit(BITMAP_STALE, &bitmap->flags);
+}
+
+static bool bitmap_enabled(struct mddev *mddev)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (!bitmap)
+ return false;
+
+ return __bitmap_enabled(bitmap);
+}
+
/*
* check a page and, if necessary, allocate it (or hijack it if the alloc fails)
*
@@ -360,7 +559,7 @@ static int read_file_page(struct file *file, unsigned long index,
pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT);
- bh = alloc_page_buffers(page, blocksize, false);
+ bh = alloc_page_buffers(page, blocksize);
if (!bh) {
ret = -ENOMEM;
goto out;
@@ -472,9 +671,10 @@ static void md_bitmap_wait_writes(struct bitmap *bitmap)
/* update the event counter and sync the superblock to disk */
-void md_bitmap_update_sb(struct bitmap *bitmap)
+static void bitmap_update_sb(void *data)
{
bitmap_super_t *sb;
+ struct bitmap *bitmap = data;
if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
return;
@@ -510,10 +710,8 @@ void md_bitmap_update_sb(struct bitmap *bitmap)
write_sb_page(bitmap, bitmap->storage.sb_index,
bitmap->storage.sb_page, 1);
}
-EXPORT_SYMBOL(md_bitmap_update_sb);
-/* print out the bitmap file superblock */
-void md_bitmap_print_sb(struct bitmap *bitmap)
+static void bitmap_print_sb(struct bitmap *bitmap)
{
bitmap_super_t *sb;
@@ -760,7 +958,7 @@ out_no_sb:
bitmap->mddev->bitmap_info.space > sectors_reserved)
bitmap->mddev->bitmap_info.space = sectors_reserved;
} else {
- md_bitmap_print_sb(bitmap);
+ bitmap_print_sb(bitmap);
if (bitmap->cluster_slot < 0)
md_cluster_stop(bitmap->mddev);
}
@@ -893,7 +1091,7 @@ static void md_bitmap_file_unmap(struct bitmap_storage *store)
static void md_bitmap_file_kick(struct bitmap *bitmap)
{
if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
- md_bitmap_update_sb(bitmap);
+ bitmap_update_sb(bitmap);
if (bitmap->storage.file) {
pr_warn("%s: kicking failed bitmap file %pD4 from array!\n",
@@ -1028,13 +1226,13 @@ static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
/* this gets called when the md device is ready to unplug its underlying
* (slave) device queues -- before we let any writes go down, we need to
* sync the dirty pages of the bitmap file to disk */
-void md_bitmap_unplug(struct bitmap *bitmap)
+static void __bitmap_unplug(struct bitmap *bitmap)
{
unsigned long i;
int dirty, need_write;
int writing = 0;
- if (!md_bitmap_enabled(bitmap))
+ if (!__bitmap_enabled(bitmap))
return;
/* look at each page to see if there are any set bits that need to be
@@ -1060,7 +1258,6 @@ void md_bitmap_unplug(struct bitmap *bitmap)
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
md_bitmap_file_kick(bitmap);
}
-EXPORT_SYMBOL(md_bitmap_unplug);
struct bitmap_unplug_work {
struct work_struct work;
@@ -1073,11 +1270,11 @@ static void md_bitmap_unplug_fn(struct work_struct *work)
struct bitmap_unplug_work *unplug_work =
container_of(work, struct bitmap_unplug_work, work);
- md_bitmap_unplug(unplug_work->bitmap);
+ __bitmap_unplug(unplug_work->bitmap);
complete(unplug_work->done);
}
-void md_bitmap_unplug_async(struct bitmap *bitmap)
+static void bitmap_unplug_async(struct bitmap *bitmap)
{
DECLARE_COMPLETION_ONSTACK(done);
struct bitmap_unplug_work unplug_work;
@@ -1089,7 +1286,19 @@ void md_bitmap_unplug_async(struct bitmap *bitmap)
queue_work(md_bitmap_wq, &unplug_work.work);
wait_for_completion(&done);
}
-EXPORT_SYMBOL(md_bitmap_unplug_async);
+
+static void bitmap_unplug(struct mddev *mddev, bool sync)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (!bitmap)
+ return;
+
+ if (sync)
+ __bitmap_unplug(bitmap);
+ else
+ bitmap_unplug_async(bitmap);
+}
static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
@@ -1226,22 +1435,21 @@ static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
return ret;
}
-void md_bitmap_write_all(struct bitmap *bitmap)
+/* just flag bitmap pages as needing to be written. */
+static void bitmap_write_all(struct mddev *mddev)
{
- /* We don't actually write all bitmap blocks here,
- * just flag them as needing to be written
- */
int i;
+ struct bitmap *bitmap = mddev->bitmap;
if (!bitmap || !bitmap->storage.filemap)
return;
+
+ /* Only one copy, so nothing needed */
if (bitmap->storage.file)
- /* Only one copy, so nothing needed */
return;
for (i = 0; i < bitmap->storage.file_pages; i++)
- set_page_attr(bitmap, i,
- BITMAP_PAGE_NEEDWRITE);
+ set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
bitmap->allclean = 0;
}
@@ -1290,7 +1498,7 @@ out:
* bitmap daemon -- periodically wakes up to clean bits and flush pages
* out to disk
*/
-void md_bitmap_daemon_work(struct mddev *mddev)
+static void bitmap_daemon_work(struct mddev *mddev)
{
struct bitmap *bitmap;
unsigned long j;
@@ -1461,8 +1669,11 @@ __acquires(bitmap->lock)
&(bitmap->bp[page].map[pageoff]);
}
-int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
+static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
+ unsigned long sectors, bool behind)
{
+ struct bitmap *bitmap = mddev->bitmap;
+
if (!bitmap)
return 0;
@@ -1523,13 +1734,15 @@ int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long s
}
return 0;
}
-EXPORT_SYMBOL(md_bitmap_startwrite);
-void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
- unsigned long sectors, int success, int behind)
+static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
+ unsigned long sectors, bool success, bool behind)
{
+ struct bitmap *bitmap = mddev->bitmap;
+
if (!bitmap)
return;
+
if (behind) {
if (atomic_dec_and_test(&bitmap->behind_writes))
wake_up(&bitmap->behind_wait);
@@ -1576,26 +1789,27 @@ void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
sectors = 0;
}
}
-EXPORT_SYMBOL(md_bitmap_endwrite);
-static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
- int degraded)
+static bool __bitmap_start_sync(struct bitmap *bitmap, sector_t offset,
+ sector_t *blocks, bool degraded)
{
bitmap_counter_t *bmc;
- int rv;
+ bool rv;
+
if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
*blocks = 1024;
- return 1; /* always resync if no bitmap */
+ return true; /* always resync if no bitmap */
}
spin_lock_irq(&bitmap->counts.lock);
+
+ rv = false;
bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
- rv = 0;
if (bmc) {
/* locked */
- if (RESYNC(*bmc))
- rv = 1;
- else if (NEEDED(*bmc)) {
- rv = 1;
+ if (RESYNC(*bmc)) {
+ rv = true;
+ } else if (NEEDED(*bmc)) {
+ rv = true;
if (!degraded) { /* don't set/clear bits if degraded */
*bmc |= RESYNC_MASK;
*bmc &= ~NEEDED_MASK;
@@ -1603,11 +1817,12 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t
}
}
spin_unlock_irq(&bitmap->counts.lock);
+
return rv;
}
-int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
- int degraded)
+static bool bitmap_start_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks, bool degraded)
{
/* bitmap_start_sync must always report on multiples of whole
* pages, otherwise resync (which is very PAGE_SIZE based) will
@@ -1616,21 +1831,22 @@ int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *block
* At least PAGE_SIZE>>9 blocks are covered.
* Return the 'or' of the result.
*/
- int rv = 0;
+ bool rv = false;
sector_t blocks1;
*blocks = 0;
while (*blocks < (PAGE_SIZE>>9)) {
- rv |= __bitmap_start_sync(bitmap, offset,
+ rv |= __bitmap_start_sync(mddev->bitmap, offset,
&blocks1, degraded);
offset += blocks1;
*blocks += blocks1;
}
+
return rv;
}
-EXPORT_SYMBOL(md_bitmap_start_sync);
-void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
+static void __bitmap_end_sync(struct bitmap *bitmap, sector_t offset,
+ sector_t *blocks, bool aborted)
{
bitmap_counter_t *bmc;
unsigned long flags;
@@ -1659,9 +1875,14 @@ void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks
unlock:
spin_unlock_irqrestore(&bitmap->counts.lock, flags);
}
-EXPORT_SYMBOL(md_bitmap_end_sync);
-void md_bitmap_close_sync(struct bitmap *bitmap)
+static void bitmap_end_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks)
+{
+ __bitmap_end_sync(mddev->bitmap, offset, blocks, true);
+}
+
+static void bitmap_close_sync(struct mddev *mddev)
{
/* Sync has finished, and any bitmap chunks that weren't synced
* properly have been aborted. It remains to us to clear the
@@ -1669,19 +1890,23 @@ void md_bitmap_close_sync(struct bitmap *bitmap)
*/
sector_t sector = 0;
sector_t blocks;
+ struct bitmap *bitmap = mddev->bitmap;
+
if (!bitmap)
return;
+
while (sector < bitmap->mddev->resync_max_sectors) {
- md_bitmap_end_sync(bitmap, sector, &blocks, 0);
+ __bitmap_end_sync(bitmap, sector, &blocks, false);
sector += blocks;
}
}
-EXPORT_SYMBOL(md_bitmap_close_sync);
-void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
+static void bitmap_cond_end_sync(struct mddev *mddev, sector_t sector,
+ bool force)
{
sector_t s = 0;
sector_t blocks;
+ struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
return;
@@ -1700,34 +1925,32 @@ void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
s = 0;
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
- md_bitmap_end_sync(bitmap, s, &blocks, 0);
+ __bitmap_end_sync(bitmap, s, &blocks, false);
s += blocks;
}
bitmap->last_end_sync = jiffies;
sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed);
}
-EXPORT_SYMBOL(md_bitmap_cond_end_sync);
-void md_bitmap_sync_with_cluster(struct mddev *mddev,
- sector_t old_lo, sector_t old_hi,
- sector_t new_lo, sector_t new_hi)
+static void bitmap_sync_with_cluster(struct mddev *mddev,
+ sector_t old_lo, sector_t old_hi,
+ sector_t new_lo, sector_t new_hi)
{
struct bitmap *bitmap = mddev->bitmap;
sector_t sector, blocks = 0;
for (sector = old_lo; sector < new_lo; ) {
- md_bitmap_end_sync(bitmap, sector, &blocks, 0);
+ __bitmap_end_sync(bitmap, sector, &blocks, false);
sector += blocks;
}
WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
for (sector = old_hi; sector < new_hi; ) {
- md_bitmap_start_sync(bitmap, sector, &blocks, 0);
+ bitmap_start_sync(mddev, sector, &blocks, false);
sector += blocks;
}
WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
}
-EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
{
@@ -1756,12 +1979,18 @@ static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, in
}
/* dirty the memory and file bits for bitmap chunks "s" to "e" */
-void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
+static void bitmap_dirty_bits(struct mddev *mddev, unsigned long s,
+ unsigned long e)
{
unsigned long chunk;
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (!bitmap)
+ return;
for (chunk = s; chunk <= e; chunk++) {
sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
+
md_bitmap_set_memory_bits(bitmap, sec, 1);
md_bitmap_file_set_bit(bitmap, sec);
if (sec < bitmap->mddev->recovery_cp)
@@ -1773,10 +2002,7 @@ void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long
}
}
-/*
- * flush out any pending updates
- */
-void md_bitmap_flush(struct mddev *mddev)
+static void bitmap_flush(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
long sleep;
@@ -1789,23 +2015,21 @@ void md_bitmap_flush(struct mddev *mddev)
*/
sleep = mddev->bitmap_info.daemon_sleep * 2;
bitmap->daemon_lastrun -= sleep;
- md_bitmap_daemon_work(mddev);
+ bitmap_daemon_work(mddev);
bitmap->daemon_lastrun -= sleep;
- md_bitmap_daemon_work(mddev);
+ bitmap_daemon_work(mddev);
bitmap->daemon_lastrun -= sleep;
- md_bitmap_daemon_work(mddev);
+ bitmap_daemon_work(mddev);
if (mddev->bitmap_info.external)
md_super_wait(mddev);
- md_bitmap_update_sb(bitmap);
+ bitmap_update_sb(bitmap);
}
-/*
- * free memory that was allocated
- */
-void md_bitmap_free(struct bitmap *bitmap)
+static void md_bitmap_free(void *data)
{
unsigned long k, pages;
struct bitmap_page *bp;
+ struct bitmap *bitmap = data;
if (!bitmap) /* there was no bitmap */
return;
@@ -1836,9 +2060,8 @@ void md_bitmap_free(struct bitmap *bitmap)
kfree(bp);
kfree(bitmap);
}
-EXPORT_SYMBOL(md_bitmap_free);
-void md_bitmap_wait_behind_writes(struct mddev *mddev)
+static void bitmap_wait_behind_writes(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
@@ -1852,14 +2075,14 @@ void md_bitmap_wait_behind_writes(struct mddev *mddev)
}
}
-void md_bitmap_destroy(struct mddev *mddev)
+static void bitmap_destroy(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap) /* there was no bitmap */
return;
- md_bitmap_wait_behind_writes(mddev);
+ bitmap_wait_behind_writes(mddev);
if (!mddev->serialize_policy)
mddev_destroy_serial_pool(mddev, NULL);
@@ -1878,7 +2101,7 @@ void md_bitmap_destroy(struct mddev *mddev)
* if this returns an error, bitmap_destroy must be called to do clean up
* once mddev->bitmap is set
*/
-struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
+static struct bitmap *__bitmap_create(struct mddev *mddev, int slot)
{
struct bitmap *bitmap;
sector_t blocks = mddev->resync_max_sectors;
@@ -1948,7 +2171,8 @@ struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
goto error;
bitmap->daemon_lastrun = jiffies;
- err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
+ err = __bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize,
+ true);
if (err)
goto error;
@@ -1965,7 +2189,18 @@ struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
return ERR_PTR(err);
}
-int md_bitmap_load(struct mddev *mddev)
+static int bitmap_create(struct mddev *mddev, int slot)
+{
+ struct bitmap *bitmap = __bitmap_create(mddev, slot);
+
+ if (IS_ERR(bitmap))
+ return PTR_ERR(bitmap);
+
+ mddev->bitmap = bitmap;
+ return 0;
+}
+
+static int bitmap_load(struct mddev *mddev)
{
int err = 0;
sector_t start = 0;
@@ -1989,10 +2224,10 @@ int md_bitmap_load(struct mddev *mddev)
*/
while (sector < mddev->resync_max_sectors) {
sector_t blocks;
- md_bitmap_start_sync(bitmap, sector, &blocks, 0);
+ bitmap_start_sync(mddev, sector, &blocks, false);
sector += blocks;
}
- md_bitmap_close_sync(bitmap);
+ bitmap_close_sync(mddev);
if (mddev->degraded == 0
|| bitmap->events_cleared == mddev->events)
@@ -2014,22 +2249,21 @@ int md_bitmap_load(struct mddev *mddev)
mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true);
md_wakeup_thread(mddev->thread);
- md_bitmap_update_sb(bitmap);
+ bitmap_update_sb(bitmap);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
err = -EIO;
out:
return err;
}
-EXPORT_SYMBOL_GPL(md_bitmap_load);
/* caller need to free returned bitmap with md_bitmap_free() */
-struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
+static void *bitmap_get_from_slot(struct mddev *mddev, int slot)
{
int rv = 0;
struct bitmap *bitmap;
- bitmap = md_bitmap_create(mddev, slot);
+ bitmap = __bitmap_create(mddev, slot);
if (IS_ERR(bitmap)) {
rv = PTR_ERR(bitmap);
return ERR_PTR(rv);
@@ -2043,20 +2277,19 @@ struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
return bitmap;
}
-EXPORT_SYMBOL(get_bitmap_from_slot);
/* Loads the bitmap associated with slot and copies the resync information
* to our bitmap
*/
-int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
- sector_t *low, sector_t *high, bool clear_bits)
+static int bitmap_copy_from_slot(struct mddev *mddev, int slot, sector_t *low,
+ sector_t *high, bool clear_bits)
{
int rv = 0, i, j;
sector_t block, lo = 0, hi = 0;
struct bitmap_counts *counts;
struct bitmap *bitmap;
- bitmap = get_bitmap_from_slot(mddev, slot);
+ bitmap = bitmap_get_from_slot(mddev, slot);
if (IS_ERR(bitmap)) {
pr_err("%s can't get bitmap from slot %d\n", __func__, slot);
return -1;
@@ -2076,53 +2309,59 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
}
if (clear_bits) {
- md_bitmap_update_sb(bitmap);
+ bitmap_update_sb(bitmap);
/* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
* BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
for (i = 0; i < bitmap->storage.file_pages; i++)
if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
- md_bitmap_unplug(bitmap);
+ __bitmap_unplug(bitmap);
}
- md_bitmap_unplug(mddev->bitmap);
+ __bitmap_unplug(mddev->bitmap);
*low = lo;
*high = hi;
md_bitmap_free(bitmap);
return rv;
}
-EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
+static void bitmap_set_pages(void *data, unsigned long pages)
+{
+ struct bitmap *bitmap = data;
+
+ bitmap->counts.pages = pages;
+}
-void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
+static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats)
{
- unsigned long chunk_kb;
+ struct bitmap_storage *storage;
struct bitmap_counts *counts;
+ struct bitmap *bitmap = data;
+ bitmap_super_t *sb;
if (!bitmap)
- return;
+ return -ENOENT;
+
+ sb = kmap_local_page(bitmap->storage.sb_page);
+ stats->sync_size = le64_to_cpu(sb->sync_size);
+ kunmap_local(sb);
counts = &bitmap->counts;
+ stats->missing_pages = counts->missing_pages;
+ stats->pages = counts->pages;
- chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
- seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
- "%lu%s chunk",
- counts->pages - counts->missing_pages,
- counts->pages,
- (counts->pages - counts->missing_pages)
- << (PAGE_SHIFT - 10),
- chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
- chunk_kb ? "KB" : "B");
- if (bitmap->storage.file) {
- seq_printf(seq, ", file: ");
- seq_file_path(seq, bitmap->storage.file, " \t\n");
- }
+ storage = &bitmap->storage;
+ stats->file_pages = storage->file_pages;
+ stats->file = storage->file;
- seq_printf(seq, "\n");
+ stats->behind_writes = atomic_read(&bitmap->behind_writes);
+ stats->behind_wait = wq_has_sleeper(&bitmap->behind_wait);
+ stats->events_cleared = bitmap->events_cleared;
+ return 0;
}
-int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
- int chunksize, int init)
+static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ int chunksize, bool init)
{
/* If chunk_size is 0, choose an appropriate chunk size.
* Then possibly allocate new storage space.
@@ -2320,14 +2559,24 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
spin_unlock_irq(&bitmap->counts.lock);
if (!init) {
- md_bitmap_unplug(bitmap);
+ __bitmap_unplug(bitmap);
bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
}
ret = 0;
err:
return ret;
}
-EXPORT_SYMBOL_GPL(md_bitmap_resize);
+
+static int bitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize,
+ bool init)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (!bitmap)
+ return 0;
+
+ return __bitmap_resize(bitmap, blocks, chunksize, init);
+}
static ssize_t
location_show(struct mddev *mddev, char *page)
@@ -2367,7 +2616,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
goto out;
}
- md_bitmap_destroy(mddev);
+ bitmap_destroy(mddev);
mddev->bitmap_info.offset = 0;
if (mddev->bitmap_info.file) {
struct file *f = mddev->bitmap_info.file;
@@ -2377,7 +2626,6 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
} else {
/* No bitmap, OK to set a location */
long long offset;
- struct bitmap *bitmap;
if (strncmp(buf, "none", 4) == 0)
/* nothing to be done */;
@@ -2404,17 +2652,14 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
}
mddev->bitmap_info.offset = offset;
- bitmap = md_bitmap_create(mddev, -1);
- if (IS_ERR(bitmap)) {
- rv = PTR_ERR(bitmap);
+ rv = bitmap_create(mddev, -1);
+ if (rv)
goto out;
- }
- mddev->bitmap = bitmap;
- rv = md_bitmap_load(mddev);
+ rv = bitmap_load(mddev);
if (rv) {
mddev->bitmap_info.offset = 0;
- md_bitmap_destroy(mddev);
+ bitmap_destroy(mddev);
goto out;
}
}
@@ -2450,6 +2695,7 @@ space_show(struct mddev *mddev, char *page)
static ssize_t
space_store(struct mddev *mddev, const char *buf, size_t len)
{
+ struct bitmap *bitmap;
unsigned long sectors;
int rv;
@@ -2460,8 +2706,8 @@ space_store(struct mddev *mddev, const char *buf, size_t len)
if (sectors == 0)
return -EINVAL;
- if (mddev->bitmap &&
- sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
+ bitmap = mddev->bitmap;
+ if (bitmap && sectors < (bitmap->storage.bytes + 511) >> 9)
return -EFBIG; /* Bitmap is too big for this small space */
/* could make sure it isn't too big, but that isn't really
@@ -2569,7 +2815,7 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
mddev_create_serial_pool(mddev, rdev);
}
if (old_mwb != backlog)
- md_bitmap_update_sb(mddev->bitmap);
+ bitmap_update_sb(mddev->bitmap);
mddev_unlock_and_resume(mddev);
return len;
@@ -2638,10 +2884,13 @@ __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
static ssize_t can_clear_show(struct mddev *mddev, char *page)
{
int len;
+ struct bitmap *bitmap;
+
spin_lock(&mddev->lock);
- if (mddev->bitmap)
- len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
- "false" : "true"));
+ bitmap = mddev->bitmap;
+ if (bitmap)
+ len = sprintf(page, "%s\n", (bitmap->need_sync ? "false" :
+ "true"));
else
len = sprintf(page, "\n");
spin_unlock(&mddev->lock);
@@ -2650,17 +2899,24 @@ static ssize_t can_clear_show(struct mddev *mddev, char *page)
static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
{
- if (mddev->bitmap == NULL)
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (!bitmap)
return -ENOENT;
- if (strncmp(buf, "false", 5) == 0)
- mddev->bitmap->need_sync = 1;
- else if (strncmp(buf, "true", 4) == 0) {
+
+ if (strncmp(buf, "false", 5) == 0) {
+ bitmap->need_sync = 1;
+ return len;
+ }
+
+ if (strncmp(buf, "true", 4) == 0) {
if (mddev->degraded)
return -EBUSY;
- mddev->bitmap->need_sync = 0;
- } else
- return -EINVAL;
- return len;
+ bitmap->need_sync = 0;
+ return len;
+ }
+
+ return -EINVAL;
}
static struct md_sysfs_entry bitmap_can_clear =
@@ -2670,21 +2926,26 @@ static ssize_t
behind_writes_used_show(struct mddev *mddev, char *page)
{
ssize_t ret;
+ struct bitmap *bitmap;
+
spin_lock(&mddev->lock);
- if (mddev->bitmap == NULL)
+ bitmap = mddev->bitmap;
+ if (!bitmap)
ret = sprintf(page, "0\n");
else
- ret = sprintf(page, "%lu\n",
- mddev->bitmap->behind_writes_used);
+ ret = sprintf(page, "%lu\n", bitmap->behind_writes_used);
spin_unlock(&mddev->lock);
+
return ret;
}
static ssize_t
behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
{
- if (mddev->bitmap)
- mddev->bitmap->behind_writes_used = 0;
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (bitmap)
+ bitmap->behind_writes_used = 0;
return len;
}
@@ -2707,3 +2968,38 @@ const struct attribute_group md_bitmap_group = {
.name = "bitmap",
.attrs = md_bitmap_attrs,
};
+
+static struct bitmap_operations bitmap_ops = {
+ .enabled = bitmap_enabled,
+ .create = bitmap_create,
+ .resize = bitmap_resize,
+ .load = bitmap_load,
+ .destroy = bitmap_destroy,
+ .flush = bitmap_flush,
+ .write_all = bitmap_write_all,
+ .dirty_bits = bitmap_dirty_bits,
+ .unplug = bitmap_unplug,
+ .daemon_work = bitmap_daemon_work,
+ .wait_behind_writes = bitmap_wait_behind_writes,
+
+ .startwrite = bitmap_startwrite,
+ .endwrite = bitmap_endwrite,
+ .start_sync = bitmap_start_sync,
+ .end_sync = bitmap_end_sync,
+ .cond_end_sync = bitmap_cond_end_sync,
+ .close_sync = bitmap_close_sync,
+
+ .update_sb = bitmap_update_sb,
+ .get_stats = bitmap_get_stats,
+
+ .sync_with_cluster = bitmap_sync_with_cluster,
+ .get_from_slot = bitmap_get_from_slot,
+ .copy_from_slot = bitmap_copy_from_slot,
+ .set_pages = bitmap_set_pages,
+ .free = md_bitmap_free,
+};
+
+void mddev_set_bitmap_ops(struct mddev *mddev)
+{
+ mddev->bitmap_ops = &bitmap_ops;
+}
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index bb9eb418780a..662e6fc141a7 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -7,81 +7,7 @@
#ifndef BITMAP_H
#define BITMAP_H 1
-#define BITMAP_MAJOR_LO 3
-/* version 4 insists the bitmap is in little-endian order
- * with version 3, it is host-endian which is non-portable
- * Version 5 is currently set only for clustered devices
- */
-#define BITMAP_MAJOR_HI 4
-#define BITMAP_MAJOR_CLUSTERED 5
-#define BITMAP_MAJOR_HOSTENDIAN 3
-
-/*
- * in-memory bitmap:
- *
- * Use 16 bit block counters to track pending writes to each "chunk".
- * The 2 high order bits are special-purpose, the first is a flag indicating
- * whether a resync is needed. The second is a flag indicating whether a
- * resync is active.
- * This means that the counter is actually 14 bits:
- *
- * +--------+--------+------------------------------------------------+
- * | resync | resync | counter |
- * | needed | active | |
- * | (0-1) | (0-1) | (0-16383) |
- * +--------+--------+------------------------------------------------+
- *
- * The "resync needed" bit is set when:
- * a '1' bit is read from storage at startup.
- * a write request fails on some drives
- * a resync is aborted on a chunk with 'resync active' set
- * It is cleared (and resync-active set) when a resync starts across all drives
- * of the chunk.
- *
- *
- * The "resync active" bit is set when:
- * a resync is started on all drives, and resync_needed is set.
- * resync_needed will be cleared (as long as resync_active wasn't already set).
- * It is cleared when a resync completes.
- *
- * The counter counts pending write requests, plus the on-disk bit.
- * When the counter is '1' and the resync bits are clear, the on-disk
- * bit can be cleared as well, thus setting the counter to 0.
- * When we set a bit, or in the counter (to start a write), if the fields is
- * 0, we first set the disk bit and set the counter to 1.
- *
- * If the counter is 0, the on-disk bit is clear and the stripe is clean
- * Anything that dirties the stripe pushes the counter to 2 (at least)
- * and sets the on-disk bit (lazily).
- * If a periodic sweep find the counter at 2, it is decremented to 1.
- * If the sweep find the counter at 1, the on-disk bit is cleared and the
- * counter goes to zero.
- *
- * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block
- * counters as a fallback when "page" memory cannot be allocated:
- *
- * Normal case (page memory allocated):
- *
- * page pointer (32-bit)
- *
- * [ ] ------+
- * |
- * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters)
- * c1 c2 c2048
- *
- * Hijacked case (page memory allocation failed):
- *
- * hijacked page pointer (32-bit)
- *
- * [ ][ ] (no page memory allocated)
- * counter #1 (16-bit) counter #2 (16-bit)
- *
- */
-
-#ifdef __KERNEL__
-
-#define PAGE_BITS (PAGE_SIZE << 3)
-#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3)
+#define BITMAP_MAGIC 0x6d746962
typedef __u16 bitmap_counter_t;
#define COUNTER_BITS 16
@@ -91,26 +17,6 @@ typedef __u16 bitmap_counter_t;
#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
#define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2)))
#define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1)
-#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK)
-#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK)
-#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX)
-
-/* how many counters per page? */
-#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS)
-/* same, except a shift value for more efficient bitops */
-#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT)
-/* same, except a mask value for more efficient bitops */
-#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1)
-
-#define BITMAP_BLOCK_SHIFT 9
-
-#endif
-
-/*
- * bitmap structures:
- */
-
-#define BITMAP_MAGIC 0x6d746962
/* use these for bitmap->flags and bitmap->sb->state bit-fields */
enum bitmap_state {
@@ -152,136 +58,58 @@ typedef struct bitmap_super_s {
* devices. For raid10 it is the size of the array.
*/
-#ifdef __KERNEL__
+struct md_bitmap_stats {
+ u64 events_cleared;
+ int behind_writes;
+ bool behind_wait;
-/* the in-memory bitmap is represented by bitmap_pages */
-struct bitmap_page {
- /*
- * map points to the actual memory page
- */
- char *map;
- /*
- * in emergencies (when map cannot be alloced), hijack the map
- * pointer and use it as two counters itself
- */
- unsigned int hijacked:1;
- /*
- * If any counter in this page is '1' or '2' - and so could be
- * cleared then that page is marked as 'pending'
- */
- unsigned int pending:1;
- /*
- * count of dirty bits on the page
- */
- unsigned int count:30;
+ unsigned long missing_pages;
+ unsigned long file_pages;
+ unsigned long sync_size;
+ unsigned long pages;
+ struct file *file;
};
-/* the main bitmap structure - one per mddev */
-struct bitmap {
-
- struct bitmap_counts {
- spinlock_t lock;
- struct bitmap_page *bp;
- unsigned long pages; /* total number of pages
- * in the bitmap */
- unsigned long missing_pages; /* number of pages
- * not yet allocated */
- unsigned long chunkshift; /* chunksize = 2^chunkshift
- * (for bitops) */
- unsigned long chunks; /* Total number of data
- * chunks for the array */
- } counts;
-
- struct mddev *mddev; /* the md device that the bitmap is for */
-
- __u64 events_cleared;
- int need_sync;
-
- struct bitmap_storage {
- struct file *file; /* backing disk file */
- struct page *sb_page; /* cached copy of the bitmap
- * file superblock */
- unsigned long sb_index;
- struct page **filemap; /* list of cache pages for
- * the file */
- unsigned long *filemap_attr; /* attributes associated
- * w/ filemap pages */
- unsigned long file_pages; /* number of pages in the file*/
- unsigned long bytes; /* total bytes in the bitmap */
- } storage;
-
- unsigned long flags;
-
- int allclean;
-
- atomic_t behind_writes;
- unsigned long behind_writes_used; /* highest actual value at runtime */
-
- /*
- * the bitmap daemon - periodically wakes up and sweeps the bitmap
- * file, cleaning up bits and flushing out pages to disk as necessary
- */
- unsigned long daemon_lastrun; /* jiffies of last run */
- unsigned long last_end_sync; /* when we lasted called end_sync to
- * update bitmap with resync progress */
-
- atomic_t pending_writes; /* pending writes to the bitmap file */
- wait_queue_head_t write_wait;
- wait_queue_head_t overflow_wait;
- wait_queue_head_t behind_wait;
-
- struct kernfs_node *sysfs_can_clear;
- int cluster_slot; /* Slot offset for clustered env */
+struct bitmap_operations {
+ bool (*enabled)(struct mddev *mddev);
+ int (*create)(struct mddev *mddev, int slot);
+ int (*resize)(struct mddev *mddev, sector_t blocks, int chunksize,
+ bool init);
+
+ int (*load)(struct mddev *mddev);
+ void (*destroy)(struct mddev *mddev);
+ void (*flush)(struct mddev *mddev);
+ void (*write_all)(struct mddev *mddev);
+ void (*dirty_bits)(struct mddev *mddev, unsigned long s,
+ unsigned long e);
+ void (*unplug)(struct mddev *mddev, bool sync);
+ void (*daemon_work)(struct mddev *mddev);
+ void (*wait_behind_writes)(struct mddev *mddev);
+
+ int (*startwrite)(struct mddev *mddev, sector_t offset,
+ unsigned long sectors, bool behind);
+ void (*endwrite)(struct mddev *mddev, sector_t offset,
+ unsigned long sectors, bool success, bool behind);
+ bool (*start_sync)(struct mddev *mddev, sector_t offset,
+ sector_t *blocks, bool degraded);
+ void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks);
+ void (*cond_end_sync)(struct mddev *mddev, sector_t sector, bool force);
+ void (*close_sync)(struct mddev *mddev);
+
+ void (*update_sb)(void *data);
+ int (*get_stats)(void *data, struct md_bitmap_stats *stats);
+
+ void (*sync_with_cluster)(struct mddev *mddev,
+ sector_t old_lo, sector_t old_hi,
+ sector_t new_lo, sector_t new_hi);
+ void *(*get_from_slot)(struct mddev *mddev, int slot);
+ int (*copy_from_slot)(struct mddev *mddev, int slot, sector_t *lo,
+ sector_t *hi, bool clear_bits);
+ void (*set_pages)(void *data, unsigned long pages);
+ void (*free)(void *data);
};
/* the bitmap API */
-
-/* these are used only by md/bitmap */
-struct bitmap *md_bitmap_create(struct mddev *mddev, int slot);
-int md_bitmap_load(struct mddev *mddev);
-void md_bitmap_flush(struct mddev *mddev);
-void md_bitmap_destroy(struct mddev *mddev);
-
-void md_bitmap_print_sb(struct bitmap *bitmap);
-void md_bitmap_update_sb(struct bitmap *bitmap);
-void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap);
-
-int md_bitmap_setallbits(struct bitmap *bitmap);
-void md_bitmap_write_all(struct bitmap *bitmap);
-
-void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e);
-
-/* these are exported */
-int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
- unsigned long sectors, int behind);
-void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
- unsigned long sectors, int success, int behind);
-int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
-void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
-void md_bitmap_close_sync(struct bitmap *bitmap);
-void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force);
-void md_bitmap_sync_with_cluster(struct mddev *mddev,
- sector_t old_lo, sector_t old_hi,
- sector_t new_lo, sector_t new_hi);
-
-void md_bitmap_unplug(struct bitmap *bitmap);
-void md_bitmap_unplug_async(struct bitmap *bitmap);
-void md_bitmap_daemon_work(struct mddev *mddev);
-
-int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
- int chunksize, int init);
-struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot);
-int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
- sector_t *lo, sector_t *hi, bool clear_bits);
-void md_bitmap_free(struct bitmap *bitmap);
-void md_bitmap_wait_behind_writes(struct mddev *mddev);
-
-static inline bool md_bitmap_enabled(struct bitmap *bitmap)
-{
- return bitmap && bitmap->storage.filemap &&
- !test_bit(BITMAP_STALE, &bitmap->flags);
-}
-
-#endif
+void mddev_set_bitmap_ops(struct mddev *mddev);
#endif
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 1d0db62f0351..6595f89becdb 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -317,7 +317,7 @@ static void recover_bitmaps(struct md_thread *thread)
str, ret);
goto clear_bit;
}
- ret = md_bitmap_copy_from_slot(mddev, slot, &lo, &hi, true);
+ ret = mddev->bitmap_ops->copy_from_slot(mddev, slot, &lo, &hi, true);
if (ret) {
pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
goto clear_bit;
@@ -497,8 +497,8 @@ static void process_suspend_info(struct mddev *mddev,
* we don't want to trigger lots of WARN.
*/
if (sb && !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE))
- md_bitmap_sync_with_cluster(mddev, cinfo->sync_low,
- cinfo->sync_hi, lo, hi);
+ mddev->bitmap_ops->sync_with_cluster(mddev, cinfo->sync_low,
+ cinfo->sync_hi, lo, hi);
cinfo->sync_low = lo;
cinfo->sync_hi = hi;
@@ -628,8 +628,9 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
break;
case BITMAP_RESIZE:
if (le64_to_cpu(msg->high) != mddev->pers->size(mddev, 0, 0))
- ret = md_bitmap_resize(mddev->bitmap,
- le64_to_cpu(msg->high), 0, 0);
+ ret = mddev->bitmap_ops->resize(mddev,
+ le64_to_cpu(msg->high),
+ 0, false);
break;
default:
ret = -1;
@@ -856,7 +857,7 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
}
/* Read the disk bitmap sb and check if it needs recovery */
- ret = md_bitmap_copy_from_slot(mddev, i, &lo, &hi, false);
+ ret = mddev->bitmap_ops->copy_from_slot(mddev, i, &lo, &hi, false);
if (ret) {
pr_warn("md-cluster: Could not gather bitmaps from slot %d", i);
lockres_free(bm_lockres);
@@ -1143,13 +1144,16 @@ static int update_bitmap_size(struct mddev *mddev, sector_t size)
static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsize)
{
- struct bitmap_counts *counts;
- char str[64];
- struct dlm_lock_resource *bm_lockres;
- struct bitmap *bitmap = mddev->bitmap;
- unsigned long my_pages = bitmap->counts.pages;
+ void *bitmap = mddev->bitmap;
+ struct md_bitmap_stats stats;
+ unsigned long my_pages;
int i, rv;
+ rv = mddev->bitmap_ops->get_stats(bitmap, &stats);
+ if (rv)
+ return rv;
+
+ my_pages = stats.pages;
/*
* We need to ensure all the nodes can grow to a larger
* bitmap size before make the reshaping.
@@ -1159,17 +1163,22 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
return rv;
for (i = 0; i < mddev->bitmap_info.nodes; i++) {
+ struct dlm_lock_resource *bm_lockres;
+ char str[64];
+
if (i == md_cluster_ops->slot_number(mddev))
continue;
- bitmap = get_bitmap_from_slot(mddev, i);
+ bitmap = mddev->bitmap_ops->get_from_slot(mddev, i);
if (IS_ERR(bitmap)) {
pr_err("can't get bitmap from slot %d\n", i);
bitmap = NULL;
goto out;
}
- counts = &bitmap->counts;
+ rv = mddev->bitmap_ops->get_stats(bitmap, &stats);
+ if (rv)
+ goto out;
/*
* If we can hold the bitmap lock of one node then
* the slot is not occupied, update the pages.
@@ -1183,21 +1192,21 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
bm_lockres->flags |= DLM_LKF_NOQUEUE;
rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
if (!rv)
- counts->pages = my_pages;
+ mddev->bitmap_ops->set_pages(bitmap, my_pages);
lockres_free(bm_lockres);
- if (my_pages != counts->pages)
+ if (my_pages != stats.pages)
/*
* Let's revert the bitmap size if one node
* can't resize bitmap
*/
goto out;
- md_bitmap_free(bitmap);
+ mddev->bitmap_ops->free(bitmap);
}
return 0;
out:
- md_bitmap_free(bitmap);
+ mddev->bitmap_ops->free(bitmap);
update_bitmap_size(mddev, oldsize);
return -1;
}
@@ -1207,24 +1216,27 @@ out:
*/
static int cluster_check_sync_size(struct mddev *mddev)
{
- int i, rv;
- bitmap_super_t *sb;
- unsigned long my_sync_size, sync_size = 0;
- int node_num = mddev->bitmap_info.nodes;
int current_slot = md_cluster_ops->slot_number(mddev);
- struct bitmap *bitmap = mddev->bitmap;
- char str[64];
+ int node_num = mddev->bitmap_info.nodes;
struct dlm_lock_resource *bm_lockres;
+ struct md_bitmap_stats stats;
+ void *bitmap = mddev->bitmap;
+ unsigned long sync_size = 0;
+ unsigned long my_sync_size;
+ char str[64];
+ int i, rv;
- sb = kmap_atomic(bitmap->storage.sb_page);
- my_sync_size = sb->sync_size;
- kunmap_atomic(sb);
+ rv = mddev->bitmap_ops->get_stats(bitmap, &stats);
+ if (rv)
+ return rv;
+
+ my_sync_size = stats.sync_size;
for (i = 0; i < node_num; i++) {
if (i == current_slot)
continue;
- bitmap = get_bitmap_from_slot(mddev, i);
+ bitmap = mddev->bitmap_ops->get_from_slot(mddev, i);
if (IS_ERR(bitmap)) {
pr_err("can't get bitmap from slot %d\n", i);
return -1;
@@ -1238,25 +1250,28 @@ static int cluster_check_sync_size(struct mddev *mddev)
bm_lockres = lockres_init(mddev, str, NULL, 1);
if (!bm_lockres) {
pr_err("md-cluster: Cannot initialize %s\n", str);
- md_bitmap_free(bitmap);
+ mddev->bitmap_ops->free(bitmap);
return -1;
}
bm_lockres->flags |= DLM_LKF_NOQUEUE;
rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
if (!rv)
- md_bitmap_update_sb(bitmap);
+ mddev->bitmap_ops->update_sb(bitmap);
lockres_free(bm_lockres);
- sb = kmap_atomic(bitmap->storage.sb_page);
- if (sync_size == 0)
- sync_size = sb->sync_size;
- else if (sync_size != sb->sync_size) {
- kunmap_atomic(sb);
- md_bitmap_free(bitmap);
+ rv = mddev->bitmap_ops->get_stats(bitmap, &stats);
+ if (rv) {
+ mddev->bitmap_ops->free(bitmap);
+ return rv;
+ }
+
+ if (sync_size == 0) {
+ sync_size = stats.sync_size;
+ } else if (sync_size != stats.sync_size) {
+ mddev->bitmap_ops->free(bitmap);
return -1;
}
- kunmap_atomic(sb);
- md_bitmap_free(bitmap);
+ mddev->bitmap_ops->free(bitmap);
}
return (my_sync_size == sync_size) ? 0 : -1;
@@ -1585,7 +1600,7 @@ static int gather_bitmaps(struct md_rdev *rdev)
for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) {
if (sn == (cinfo->slot_number - 1))
continue;
- err = md_bitmap_copy_from_slot(mddev, sn, &lo, &hi, false);
+ err = mddev->bitmap_ops->copy_from_slot(mddev, sn, &lo, &hi, false);
if (err) {
pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
goto out;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d3a837506a36..179ee4afe937 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -546,137 +546,30 @@ static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev, int opener_n
return 0;
}
-/*
- * Generic flush handling for md
- */
-
-static void md_end_flush(struct bio *bio)
-{
- struct md_rdev *rdev = bio->bi_private;
- struct mddev *mddev = rdev->mddev;
-
- bio_put(bio);
-
- rdev_dec_pending(rdev, mddev);
-
- if (atomic_dec_and_test(&mddev->flush_pending))
- /* The pre-request flush has finished */
- queue_work(md_wq, &mddev->flush_work);
-}
-
-static void md_submit_flush_data(struct work_struct *ws);
-
-static void submit_flushes(struct work_struct *ws)
+bool md_flush_request(struct mddev *mddev, struct bio *bio)
{
- struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct md_rdev *rdev;
-
- mddev->start_flush = ktime_get_boottime();
- INIT_WORK(&mddev->flush_work, md_submit_flush_data);
- atomic_set(&mddev->flush_pending, 1);
- rcu_read_lock();
- rdev_for_each_rcu(rdev, mddev)
- if (rdev->raid_disk >= 0 &&
- !test_bit(Faulty, &rdev->flags)) {
- struct bio *bi;
-
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- bi = bio_alloc_bioset(rdev->bdev, 0,
- REQ_OP_WRITE | REQ_PREFLUSH,
- GFP_NOIO, &mddev->bio_set);
- bi->bi_end_io = md_end_flush;
- bi->bi_private = rdev;
- atomic_inc(&mddev->flush_pending);
- submit_bio(bi);
- rcu_read_lock();
- }
- rcu_read_unlock();
- if (atomic_dec_and_test(&mddev->flush_pending))
- queue_work(md_wq, &mddev->flush_work);
-}
-
-static void md_submit_flush_data(struct work_struct *ws)
-{
- struct mddev *mddev = container_of(ws, struct mddev, flush_work);
- struct bio *bio = mddev->flush_bio;
+ struct bio *new;
/*
- * must reset flush_bio before calling into md_handle_request to avoid a
- * deadlock, because other bios passed md_handle_request suspend check
- * could wait for this and below md_handle_request could wait for those
- * bios because of suspend check
+ * md_flush_reqeust() should be called under md_handle_request() and
+ * 'active_io' is already grabbed. Hence it's safe to get rdev directly
+ * without rcu protection.
*/
- spin_lock_irq(&mddev->lock);
- mddev->prev_flush_start = mddev->start_flush;
- mddev->flush_bio = NULL;
- spin_unlock_irq(&mddev->lock);
- wake_up(&mddev->sb_wait);
+ WARN_ON(percpu_ref_is_zero(&mddev->active_io));
- if (bio->bi_iter.bi_size == 0) {
- /* an empty barrier - all done */
- bio_endio(bio);
- } else {
- bio->bi_opf &= ~REQ_PREFLUSH;
-
- /*
- * make_requst() will never return error here, it only
- * returns error in raid5_make_request() by dm-raid.
- * Since dm always splits data and flush operation into
- * two separate io, io size of flush submitted by dm
- * always is 0, make_request() will not be called here.
- */
- if (WARN_ON_ONCE(!mddev->pers->make_request(mddev, bio)))
- bio_io_error(bio);
- }
-
- /* The pair is percpu_ref_get() from md_flush_request() */
- percpu_ref_put(&mddev->active_io);
-}
+ rdev_for_each(rdev, mddev) {
+ if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags))
+ continue;
-/*
- * Manages consolidation of flushes and submitting any flushes needed for
- * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
- * being finished in another context. Returns false if the flushing is
- * complete but still needs the I/O portion of the bio to be processed.
- */
-bool md_flush_request(struct mddev *mddev, struct bio *bio)
-{
- ktime_t req_start = ktime_get_boottime();
- spin_lock_irq(&mddev->lock);
- /* flush requests wait until ongoing flush completes,
- * hence coalescing all the pending requests.
- */
- wait_event_lock_irq(mddev->sb_wait,
- !mddev->flush_bio ||
- ktime_before(req_start, mddev->prev_flush_start),
- mddev->lock);
- /* new request after previous flush is completed */
- if (ktime_after(req_start, mddev->prev_flush_start)) {
- WARN_ON(mddev->flush_bio);
- /*
- * Grab a reference to make sure mddev_suspend() will wait for
- * this flush to be done.
- *
- * md_flush_reqeust() is called under md_handle_request() and
- * 'active_io' is already grabbed, hence percpu_ref_is_zero()
- * won't pass, percpu_ref_tryget_live() can't be used because
- * percpu_ref_kill() can be called by mddev_suspend()
- * concurrently.
- */
- WARN_ON(percpu_ref_is_zero(&mddev->active_io));
- percpu_ref_get(&mddev->active_io);
- mddev->flush_bio = bio;
- spin_unlock_irq(&mddev->lock);
- INIT_WORK(&mddev->flush_work, submit_flushes);
- queue_work(md_wq, &mddev->flush_work);
- return true;
+ new = bio_alloc_bioset(rdev->bdev, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO,
+ &mddev->bio_set);
+ bio_chain(new, bio);
+ submit_bio(new);
}
- /* flush was performed for some other bio while we waited. */
- spin_unlock_irq(&mddev->lock);
- if (bio->bi_iter.bi_size == 0) {
- /* pure flush without data - all done */
+ if (bio_sectors(bio) == 0) {
bio_endio(bio);
return true;
}
@@ -763,7 +656,6 @@ int mddev_init(struct mddev *mddev)
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->sync_seq, 0);
spin_lock_init(&mddev->lock);
- atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
init_waitqueue_head(&mddev->recovery_wait);
mddev->reshape_position = MaxSector;
@@ -772,6 +664,7 @@ int mddev_init(struct mddev *mddev)
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
+ mddev_set_bitmap_ops(mddev);
INIT_WORK(&mddev->sync_work, md_start_sync);
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
@@ -1372,6 +1265,18 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
return ret;
}
+static u64 md_bitmap_events_cleared(struct mddev *mddev)
+{
+ struct md_bitmap_stats stats;
+ int err;
+
+ err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
+ if (err)
+ return 0;
+
+ return stats.events_cleared;
+}
+
/*
* validate_super for 0.90.0
* note: we are not using "freshest" for 0.9 superblock
@@ -1464,7 +1369,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, stru
/* if adding to array with a bitmap, then we can accept an
* older device ... but not too old.
*/
- if (ev1 < mddev->bitmap->events_cleared)
+ if (ev1 < md_bitmap_events_cleared(mddev))
return 0;
if (ev1 < mddev->events)
set_bit(Bitmap_sync, &rdev->flags);
@@ -1991,7 +1896,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
/* If adding to array with a bitmap, then we can accept an
* older device, but not too old.
*/
- if (ev1 < mddev->bitmap->events_cleared)
+ if (ev1 < md_bitmap_events_cleared(mddev))
return 0;
if (ev1 < mddev->events)
set_bit(Bitmap_sync, &rdev->flags);
@@ -2323,7 +2228,6 @@ super_1_allow_new_offset(struct md_rdev *rdev,
unsigned long long new_offset)
{
/* All necessary checks on new >= old have been done */
- struct bitmap *bitmap;
if (new_offset >= rdev->data_offset)
return 1;
@@ -2340,11 +2244,18 @@ super_1_allow_new_offset(struct md_rdev *rdev,
*/
if (rdev->sb_start + (32+4)*2 > new_offset)
return 0;
- bitmap = rdev->mddev->bitmap;
- if (bitmap && !rdev->mddev->bitmap_info.file &&
- rdev->sb_start + rdev->mddev->bitmap_info.offset +
- bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
- return 0;
+
+ if (!rdev->mddev->bitmap_info.file) {
+ struct mddev *mddev = rdev->mddev;
+ struct md_bitmap_stats stats;
+ int err;
+
+ err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
+ if (!err && rdev->sb_start + mddev->bitmap_info.offset +
+ stats.file_pages * (PAGE_SIZE >> 9) > new_offset)
+ return 0;
+ }
+
if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
return 0;
@@ -2820,7 +2731,7 @@ repeat:
mddev_add_trace_msg(mddev, "md md_update_sb");
rewrite:
- md_bitmap_update_sb(mddev->bitmap);
+ mddev->bitmap_ops->update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
if (rdev->sb_loaded != 1)
continue; /* no noise on spare devices */
@@ -4142,6 +4053,34 @@ static struct md_sysfs_entry md_level =
__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
static ssize_t
+new_level_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%d\n", mddev->new_level);
+}
+
+static ssize_t
+new_level_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ unsigned int n;
+ int err;
+
+ err = kstrtouint(buf, 10, &n);
+ if (err < 0)
+ return err;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+
+ mddev->new_level = n;
+ md_update_sb(mddev, 1);
+
+ mddev_unlock(mddev);
+ return len;
+}
+static struct md_sysfs_entry md_new_level =
+__ATTR(new_level, 0664, new_level_show, new_level_store);
+
+static ssize_t
layout_show(struct mddev *mddev, char *page)
{
/* just a number, not meaningful for all levels */
@@ -4680,17 +4619,23 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len)
/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
while (*buf) {
chunk = end_chunk = simple_strtoul(buf, &end, 0);
- if (buf == end) break;
+ if (buf == end)
+ break;
+
if (*end == '-') { /* range */
buf = end + 1;
end_chunk = simple_strtoul(buf, &end, 0);
- if (buf == end) break;
+ if (buf == end)
+ break;
}
- if (*end && !isspace(*end)) break;
- md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
+
+ if (*end && !isspace(*end))
+ break;
+
+ mddev->bitmap_ops->dirty_bits(mddev, chunk, end_chunk);
buf = skip_spaces(end);
}
- md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
+ mddev->bitmap_ops->unplug(mddev, true); /* flush the bits to disk */
out:
mddev_unlock(mddev);
return len;
@@ -5666,6 +5611,7 @@ __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
static struct attribute *md_default_attrs[] = {
&md_level.attr,
+ &md_new_level.attr,
&md_layout.attr,
&md_raid_disks.attr,
&md_uuid.attr,
@@ -6206,16 +6152,10 @@ int md_run(struct mddev *mddev)
}
if (err == 0 && pers->sync_request &&
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
- struct bitmap *bitmap;
-
- bitmap = md_bitmap_create(mddev, -1);
- if (IS_ERR(bitmap)) {
- err = PTR_ERR(bitmap);
+ err = mddev->bitmap_ops->create(mddev, -1);
+ if (err)
pr_warn("%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
- } else
- mddev->bitmap = bitmap;
-
}
if (err)
goto bitmap_abort;
@@ -6285,7 +6225,7 @@ bitmap_abort:
pers->free(mddev, mddev->private);
mddev->private = NULL;
module_put(pers->owner);
- md_bitmap_destroy(mddev);
+ mddev->bitmap_ops->destroy(mddev);
abort:
bioset_exit(&mddev->io_clone_set);
exit_sync_set:
@@ -6304,9 +6244,10 @@ int do_md_run(struct mddev *mddev)
err = md_run(mddev);
if (err)
goto out;
- err = md_bitmap_load(mddev);
+
+ err = mddev->bitmap_ops->load(mddev);
if (err) {
- md_bitmap_destroy(mddev);
+ mddev->bitmap_ops->destroy(mddev);
goto out;
}
@@ -6450,7 +6391,8 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
- md_bitmap_flush(mddev);
+
+ mddev->bitmap_ops->flush(mddev);
if (md_is_rdwr(mddev) &&
((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
@@ -6477,7 +6419,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
static void mddev_detach(struct mddev *mddev)
{
- md_bitmap_wait_behind_writes(mddev);
+ mddev->bitmap_ops->wait_behind_writes(mddev);
if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
@@ -6492,7 +6434,8 @@ static void mddev_detach(struct mddev *mddev)
static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
- md_bitmap_destroy(mddev);
+
+ mddev->bitmap_ops->destroy(mddev);
mddev_detach(mddev);
spin_lock(&mddev->lock);
mddev->pers = NULL;
@@ -7270,22 +7213,19 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
err = 0;
if (mddev->pers) {
if (fd >= 0) {
- struct bitmap *bitmap;
+ err = mddev->bitmap_ops->create(mddev, -1);
+ if (!err)
+ err = mddev->bitmap_ops->load(mddev);
- bitmap = md_bitmap_create(mddev, -1);
- if (!IS_ERR(bitmap)) {
- mddev->bitmap = bitmap;
- err = md_bitmap_load(mddev);
- } else
- err = PTR_ERR(bitmap);
if (err) {
- md_bitmap_destroy(mddev);
+ mddev->bitmap_ops->destroy(mddev);
fd = -1;
}
} else if (fd < 0) {
- md_bitmap_destroy(mddev);
+ mddev->bitmap_ops->destroy(mddev);
}
}
+
if (fd < 0) {
struct file *f = mddev->bitmap_info.file;
if (f) {
@@ -7554,7 +7494,6 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
goto err;
}
if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
- struct bitmap *bitmap;
/* add the bitmap */
if (mddev->bitmap) {
rv = -EEXIST;
@@ -7568,24 +7507,24 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
- bitmap = md_bitmap_create(mddev, -1);
- if (!IS_ERR(bitmap)) {
- mddev->bitmap = bitmap;
- rv = md_bitmap_load(mddev);
- } else
- rv = PTR_ERR(bitmap);
+ rv = mddev->bitmap_ops->create(mddev, -1);
+ if (!rv)
+ rv = mddev->bitmap_ops->load(mddev);
+
if (rv)
- md_bitmap_destroy(mddev);
+ mddev->bitmap_ops->destroy(mddev);
} else {
- /* remove the bitmap */
- if (!mddev->bitmap) {
- rv = -ENOENT;
+ struct md_bitmap_stats stats;
+
+ rv = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
+ if (rv)
goto err;
- }
- if (mddev->bitmap->storage.file) {
+
+ if (stats.file) {
rv = -EINVAL;
goto err;
}
+
if (mddev->bitmap_info.nodes) {
/* hold PW on all the bitmap lock */
if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
@@ -7600,7 +7539,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
module_put(md_cluster_mod);
mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
}
- md_bitmap_destroy(mddev);
+ mddev->bitmap_ops->destroy(mddev);
mddev->bitmap_info.offset = 0;
}
}
@@ -8370,6 +8309,33 @@ static void md_seq_stop(struct seq_file *seq, void *v)
spin_unlock(&all_mddevs_lock);
}
+static void md_bitmap_status(struct seq_file *seq, struct mddev *mddev)
+{
+ struct md_bitmap_stats stats;
+ unsigned long used_pages;
+ unsigned long chunk_kb;
+ int err;
+
+ err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
+ if (err)
+ return;
+
+ chunk_kb = mddev->bitmap_info.chunksize >> 10;
+ used_pages = stats.pages - stats.missing_pages;
+
+ seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], %lu%s chunk",
+ used_pages, stats.pages, used_pages << (PAGE_SHIFT - 10),
+ chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
+ chunk_kb ? "KB" : "B");
+
+ if (stats.file) {
+ seq_puts(seq, ", file: ");
+ seq_file_path(seq, stats.file, " \t\n");
+ }
+
+ seq_putc(seq, '\n');
+}
+
static int md_seq_show(struct seq_file *seq, void *v)
{
struct mddev *mddev;
@@ -8390,14 +8356,19 @@ static int md_seq_show(struct seq_file *seq, void *v)
spin_unlock(&all_mddevs_lock);
spin_lock(&mddev->lock);
if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
- seq_printf(seq, "%s : %sactive", mdname(mddev),
- mddev->pers ? "" : "in");
+ seq_printf(seq, "%s : ", mdname(mddev));
if (mddev->pers) {
+ if (test_bit(MD_BROKEN, &mddev->flags))
+ seq_printf(seq, "broken");
+ else
+ seq_printf(seq, "active");
if (mddev->ro == MD_RDONLY)
seq_printf(seq, " (read-only)");
if (mddev->ro == MD_AUTO_READ)
seq_printf(seq, " (auto-read-only)");
seq_printf(seq, " %s", mddev->pers->name);
+ } else {
+ seq_printf(seq, "inactive");
}
sectors = 0;
@@ -8453,7 +8424,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
} else
seq_printf(seq, "\n ");
- md_bitmap_status(seq, mddev->bitmap);
+ md_bitmap_status(seq, mddev);
seq_printf(seq, "\n");
}
@@ -8668,7 +8639,6 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
BUG_ON(mddev->ro == MD_RDONLY);
if (mddev->ro == MD_AUTO_READ) {
/* need to switch to read/write */
- flush_work(&mddev->sync_work);
mddev->ro = MD_RDWR;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -9506,7 +9476,7 @@ static void md_start_sync(struct work_struct *ws)
* stored on all devices. So make sure all bitmap pages get written.
*/
if (spares)
- md_bitmap_write_all(mddev->bitmap);
+ mddev->bitmap_ops->write_all(mddev);
name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
"reshape" : "resync";
@@ -9594,7 +9564,7 @@ static void unregister_sync_thread(struct mddev *mddev)
void md_check_recovery(struct mddev *mddev)
{
if (mddev->bitmap)
- md_bitmap_daemon_work(mddev);
+ mddev->bitmap_ops->daemon_work(mddev);
if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) {
@@ -9965,7 +9935,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
if (ret)
pr_info("md-cluster: resize failed\n");
else
- md_bitmap_update_sb(mddev->bitmap);
+ mddev->bitmap_ops->update_sb(mddev->bitmap);
}
/* Check for change of roles in the active devices */
diff --git a/drivers/md/md.h b/drivers/md/md.h
index a0d6827dced9..5d2e6bd58e4d 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -535,7 +535,8 @@ struct mddev {
struct percpu_ref writes_pending;
int sync_checkers; /* # of threads checking writes_pending */
- struct bitmap *bitmap; /* the bitmap for the device */
+ void *bitmap; /* the bitmap for the device */
+ struct bitmap_operations *bitmap_ops;
struct {
struct file *file; /* the bitmap file */
loff_t offset; /* offset from superblock of
@@ -571,16 +572,6 @@ struct mddev {
*/
struct bio_set io_clone_set;
- /* Generic flush handling.
- * The last to finish preflush schedules a worker to submit
- * the rest of the request (without the REQ_PREFLUSH flag).
- */
- struct bio *flush_bio;
- atomic_t flush_pending;
- ktime_t start_flush, prev_flush_start; /* prev_flush_start is when the previous completed
- * flush was started.
- */
- struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
mempool_t *serial_info_pool;
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 2ea1710a3b70..4378d3250bd7 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -140,7 +140,7 @@ static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
* If bitmap is not enabled, it's safe to submit the io directly, and
* this can get optimal performance.
*/
- if (!md_bitmap_enabled(mddev->bitmap)) {
+ if (!mddev->bitmap_ops->enabled(mddev)) {
raid1_submit_write(bio);
return true;
}
@@ -166,12 +166,9 @@ static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
* while current io submission must wait for bitmap io to be done. In order to
* avoid such deadlock, submit bitmap io asynchronously.
*/
-static inline void raid1_prepare_flush_writes(struct bitmap *bitmap)
+static inline void raid1_prepare_flush_writes(struct mddev *mddev)
{
- if (current->bio_list)
- md_bitmap_unplug_async(bitmap);
- else
- md_bitmap_unplug(bitmap);
+ mddev->bitmap_ops->unplug(mddev, current->bio_list == NULL);
}
/*
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 761989d67906..6c9d24203f39 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -411,18 +411,20 @@ static void raid1_end_read_request(struct bio *bio)
static void close_write(struct r1bio *r1_bio)
{
+ struct mddev *mddev = r1_bio->mddev;
+
/* it really is the end of this request */
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
bio_free_pages(r1_bio->behind_master_bio);
bio_put(r1_bio->behind_master_bio);
r1_bio->behind_master_bio = NULL;
}
+
/* clear the bitmap if all writes complete successfully */
- md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
- r1_bio->sectors,
- !test_bit(R1BIO_Degraded, &r1_bio->state),
- test_bit(R1BIO_BehindIO, &r1_bio->state));
- md_write_end(r1_bio->mddev);
+ mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors,
+ !test_bit(R1BIO_Degraded, &r1_bio->state),
+ test_bit(R1BIO_BehindIO, &r1_bio->state));
+ md_write_end(mddev);
}
static void r1_bio_write_done(struct r1bio *r1_bio)
@@ -900,7 +902,7 @@ static void wake_up_barrier(struct r1conf *conf)
static void flush_bio_list(struct r1conf *conf, struct bio *bio)
{
/* flush any pending bitmap writes to disk before proceeding w/ I/O */
- raid1_prepare_flush_writes(conf->mddev->bitmap);
+ raid1_prepare_flush_writes(conf->mddev);
wake_up_barrier(conf);
while (bio) { /* submit pending writes */
@@ -1317,13 +1319,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
struct r1conf *conf = mddev->private;
struct raid1_info *mirror;
struct bio *read_bio;
- struct bitmap *bitmap = mddev->bitmap;
const enum req_op op = bio_op(bio);
const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
int max_sectors;
int rdisk;
bool r1bio_existed = !!r1_bio;
- char b[BDEVNAME_SIZE];
/*
* If r1_bio is set, we are blocking the raid1d thread
@@ -1332,16 +1332,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
*/
gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
- if (r1bio_existed) {
- /* Need to get the block device name carefully */
- struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
-
- if (rdev)
- snprintf(b, sizeof(b), "%pg", rdev->bdev);
- else
- strcpy(b, "???");
- }
-
/*
* Still need barrier for READ in case that whole
* array is frozen.
@@ -1363,15 +1353,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
* used and no empty request is available.
*/
rdisk = read_balance(conf, r1_bio, &max_sectors);
-
if (rdisk < 0) {
/* couldn't find anywhere to read from */
- if (r1bio_existed) {
- pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
+ if (r1bio_existed)
+ pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n",
mdname(mddev),
- b,
- (unsigned long long)r1_bio->sector);
- }
+ conf->mirrors[r1_bio->read_disk].rdev->bdev,
+ r1_bio->sector);
raid_end_bio_io(r1_bio);
return;
}
@@ -1383,15 +1371,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
(unsigned long long)r1_bio->sector,
mirror->rdev->bdev);
- if (test_bit(WriteMostly, &mirror->rdev->flags) &&
- bitmap) {
+ if (test_bit(WriteMostly, &mirror->rdev->flags)) {
/*
* Reading from a write-mostly device must take care not to
* over-take any writes that are 'behind'
*/
mddev_add_trace_msg(mddev, "raid1 wait behind writes");
- wait_event(bitmap->behind_wait,
- atomic_read(&bitmap->behind_writes) == 0);
+ mddev->bitmap_ops->wait_behind_writes(mddev);
}
if (max_sectors < bio_sectors(bio)) {
@@ -1432,7 +1418,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
int i, disks;
- struct bitmap *bitmap = mddev->bitmap;
unsigned long flags;
struct md_rdev *blocked_rdev;
int first_clone;
@@ -1585,7 +1570,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
* at a time and thus needs a new bio that can fit the whole payload
* this bio in page sized chunks.
*/
- if (write_behind && bitmap)
+ if (write_behind && mddev->bitmap)
max_sectors = min_t(int, max_sectors,
BIO_MAX_VECS * (PAGE_SIZE >> 9));
if (max_sectors < bio_sectors(bio)) {
@@ -1612,19 +1597,23 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
continue;
if (first_clone) {
+ unsigned long max_write_behind =
+ mddev->bitmap_info.max_write_behind;
+ struct md_bitmap_stats stats;
+ int err;
+
/* do behind I/O ?
* Not if there are too many, or cannot
* allocate memory, or a reader on WriteMostly
* is waiting for behind writes to flush */
- if (bitmap && write_behind &&
- (atomic_read(&bitmap->behind_writes)
- < mddev->bitmap_info.max_write_behind) &&
- !waitqueue_active(&bitmap->behind_wait)) {
+ err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
+ if (!err && write_behind && !stats.behind_wait &&
+ stats.behind_writes < max_write_behind)
alloc_behind_master_bio(r1_bio, bio);
- }
- md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
- test_bit(R1BIO_BehindIO, &r1_bio->state));
+ mddev->bitmap_ops->startwrite(
+ mddev, r1_bio->sector, r1_bio->sectors,
+ test_bit(R1BIO_BehindIO, &r1_bio->state));
first_clone = 0;
}
@@ -2042,7 +2031,7 @@ static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
/* make sure these bits don't get cleared. */
do {
- md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
+ mddev->bitmap_ops->end_sync(mddev, s, &sync_blocks);
s += sync_blocks;
sectors_to_go -= sync_blocks;
} while (sectors_to_go > 0);
@@ -2771,7 +2760,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
int wonly = -1;
int write_targets = 0, read_targets = 0;
sector_t sync_blocks;
- int still_degraded = 0;
+ bool still_degraded = false;
int good_sectors = RESYNC_SECTORS;
int min_bad = 0; /* number of sectors that are bad in all devices */
int idx = sector_to_idx(sector_nr);
@@ -2788,12 +2777,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* We can find the current addess in mddev->curr_resync
*/
if (mddev->curr_resync < max_sector) /* aborted */
- md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
- &sync_blocks, 1);
+ mddev->bitmap_ops->end_sync(mddev, mddev->curr_resync,
+ &sync_blocks);
else /* completed sync */
conf->fullsync = 0;
- md_bitmap_close_sync(mddev->bitmap);
+ mddev->bitmap_ops->close_sync(mddev);
close_sync(conf);
if (mddev_is_clustered(mddev)) {
@@ -2813,7 +2802,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
/* before building a request, check if we can skip these blocks..
* This call the bitmap_start_sync doesn't actually record anything
*/
- if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
+ if (!mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks, true) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */
*skipped = 1;
@@ -2831,9 +2820,9 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* sector_nr + two times RESYNC_SECTORS
*/
- md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
- mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
-
+ mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
+ mddev_is_clustered(mddev) &&
+ (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
if (raise_barrier(conf, sector_nr))
return 0;
@@ -2864,7 +2853,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (rdev == NULL ||
test_bit(Faulty, &rdev->flags)) {
if (i < conf->raid_disks)
- still_degraded = 1;
+ still_degraded = true;
} else if (!test_bit(In_sync, &rdev->flags)) {
bio->bi_opf = REQ_OP_WRITE;
bio->bi_end_io = end_sync_write;
@@ -2988,8 +2977,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (len == 0)
break;
if (sync_blocks == 0) {
- if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
- &sync_blocks, still_degraded) &&
+ if (!mddev->bitmap_ops->start_sync(mddev, sector_nr,
+ &sync_blocks, still_degraded) &&
!conf->fullsync &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
break;
@@ -3313,14 +3302,16 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
* worth it.
*/
sector_t newsize = raid1_size(mddev, sectors, 0);
+ int ret;
+
if (mddev->external_size &&
mddev->array_sectors > newsize)
return -EINVAL;
- if (mddev->bitmap) {
- int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
- if (ret)
- return ret;
- }
+
+ ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
+ if (ret)
+ return ret;
+
md_set_array_sectors(mddev, newsize);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > mddev->dev_sectors) {
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 2a9c4ee982e0..f3bf1116794a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -426,12 +426,13 @@ static void raid10_end_read_request(struct bio *bio)
static void close_write(struct r10bio *r10_bio)
{
+ struct mddev *mddev = r10_bio->mddev;
+
/* clear the bitmap if all writes complete successfully */
- md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
- r10_bio->sectors,
- !test_bit(R10BIO_Degraded, &r10_bio->state),
- 0);
- md_write_end(r10_bio->mddev);
+ mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors,
+ !test_bit(R10BIO_Degraded, &r10_bio->state),
+ false);
+ md_write_end(mddev);
}
static void one_write_done(struct r10bio *r10_bio)
@@ -884,7 +885,7 @@ static void flush_pending_writes(struct r10conf *conf)
__set_current_state(TASK_RUNNING);
blk_start_plug(&plug);
- raid1_prepare_flush_writes(conf->mddev->bitmap);
+ raid1_prepare_flush_writes(conf->mddev);
wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */
@@ -1100,7 +1101,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
/* we aren't scheduling, so we can do the write-out directly. */
bio = bio_list_get(&plug->pending);
- raid1_prepare_flush_writes(mddev->bitmap);
+ raid1_prepare_flush_writes(mddev);
wake_up_barrier(conf);
while (bio) { /* submit pending writes */
@@ -1492,7 +1493,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
md_account_bio(mddev, &bio);
r10_bio->master_bio = bio;
atomic_set(&r10_bio->remaining, 1);
- md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
+ mddev->bitmap_ops->startwrite(mddev, r10_bio->sector, r10_bio->sectors,
+ false);
for (i = 0; i < conf->copies; i++) {
if (r10_bio->devs[i].bio)
@@ -2465,7 +2467,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
s = PAGE_SIZE >> 9;
rdev = conf->mirrors[dr].rdev;
- addr = r10_bio->devs[0].addr + sect,
+ addr = r10_bio->devs[0].addr + sect;
ok = sync_page_io(rdev,
addr,
s << 9,
@@ -3192,13 +3194,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (mddev->curr_resync < max_sector) { /* aborted */
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
- md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
- &sync_blocks, 1);
+ mddev->bitmap_ops->end_sync(mddev,
+ mddev->curr_resync,
+ &sync_blocks);
else for (i = 0; i < conf->geo.raid_disks; i++) {
sector_t sect =
raid10_find_virt(conf, mddev->curr_resync, i);
- md_bitmap_end_sync(mddev->bitmap, sect,
- &sync_blocks, 1);
+
+ mddev->bitmap_ops->end_sync(mddev, sect,
+ &sync_blocks);
}
} else {
/* completed sync */
@@ -3218,7 +3222,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
conf->fullsync = 0;
}
- md_bitmap_close_sync(mddev->bitmap);
+ mddev->bitmap_ops->close_sync(mddev);
close_sync(conf);
*skipped = 1;
return sectors_skipped;
@@ -3287,10 +3291,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio = NULL;
for (i = 0 ; i < conf->geo.raid_disks; i++) {
- int still_degraded;
+ bool still_degraded;
struct r10bio *rb2;
sector_t sect;
- int must_sync;
+ bool must_sync;
int any_working;
struct raid10_info *mirror = &conf->mirrors[i];
struct md_rdev *mrdev, *mreplace;
@@ -3307,7 +3311,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (!mrdev && !mreplace)
continue;
- still_degraded = 0;
+ still_degraded = false;
/* want to reconstruct this device */
rb2 = r10_bio;
sect = raid10_find_virt(conf, sector_nr, i);
@@ -3320,8 +3324,9 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* we only need to recover the block if it is set in
* the bitmap
*/
- must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
- &sync_blocks, 1);
+ must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
+ &sync_blocks,
+ true);
if (sync_blocks < max_sync)
max_sync = sync_blocks;
if (!must_sync &&
@@ -3359,13 +3364,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
struct md_rdev *rdev = conf->mirrors[j].rdev;
if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
- still_degraded = 1;
+ still_degraded = false;
break;
}
}
- must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
- &sync_blocks, still_degraded);
+ must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
+ &sync_blocks, still_degraded);
any_working = 0;
for (j=0; j<conf->copies;j++) {
@@ -3538,12 +3543,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* safety reason, which ensures curr_resync_completed is
* updated in bitmap_cond_end_sync.
*/
- md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
+ mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
mddev_is_clustered(mddev) &&
(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
- if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
- &sync_blocks, mddev->degraded) &&
+ if (!mddev->bitmap_ops->start_sync(mddev, sector_nr,
+ &sync_blocks,
+ mddev->degraded) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
&mddev->recovery)) {
/* We can skip this block */
@@ -4190,6 +4196,7 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
*/
struct r10conf *conf = mddev->private;
sector_t oldsize, size;
+ int ret;
if (mddev->reshape_position != MaxSector)
return -EBUSY;
@@ -4202,11 +4209,11 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
if (mddev->external_size &&
mddev->array_sectors > size)
return -EINVAL;
- if (mddev->bitmap) {
- int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
- if (ret)
- return ret;
- }
+
+ ret = mddev->bitmap_ops->resize(mddev, size, 0, false);
+ if (ret)
+ return ret;
+
md_set_array_sectors(mddev, size);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > oldsize) {
@@ -4472,7 +4479,7 @@ static int raid10_start_reshape(struct mddev *mddev)
newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
if (!mddev_is_clustered(mddev)) {
- ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
+ ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
if (ret)
goto abort;
else
@@ -4487,20 +4494,20 @@ static int raid10_start_reshape(struct mddev *mddev)
/*
* some node is already performing reshape, and no need to
- * call md_bitmap_resize again since it should be called when
+ * call bitmap_ops->resize again since it should be called when
* receiving BITMAP_RESIZE msg
*/
if ((sb && (le32_to_cpu(sb->feature_map) &
MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
goto out;
- ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
+ ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
if (ret)
goto abort;
ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
if (ret) {
- md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
+ mddev->bitmap_ops->resize(mddev, oldsize, 0, false);
goto abort;
}
}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 874874fe4fa1..b4f7b79fd187 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -313,10 +313,10 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
if (sh->dev[i].written) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
r5c_return_dev_pending_writes(conf, &sh->dev[i]);
- md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- RAID5_STRIPE_SECTORS(conf),
- !test_bit(STRIPE_DEGRADED, &sh->state),
- 0);
+ conf->mddev->bitmap_ops->endwrite(conf->mddev,
+ sh->sector, RAID5_STRIPE_SECTORS(conf),
+ !test_bit(STRIPE_DEGRADED, &sh->state),
+ false);
}
}
}
@@ -2798,7 +2798,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
{
struct r5l_log *log = READ_ONCE(conf->log);
int i;
- int do_wakeup = 0;
sector_t tree_index;
void __rcu **pslot;
uintptr_t refcount;
@@ -2815,7 +2814,7 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
for (i = sh->disks; i--; ) {
clear_bit(R5_InJournal, &sh->dev[i].flags);
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- do_wakeup = 1;
+ wake_up_bit(&sh->dev[i].flags, R5_Overlap);
}
/*
@@ -2828,9 +2827,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
if (atomic_dec_and_test(&conf->pending_full_writes))
md_wakeup_thread(conf->mddev->thread);
- if (do_wakeup)
- wake_up(&conf->wait_for_overlap);
-
spin_lock_irq(&log->stripe_in_journal_lock);
list_del_init(&sh->r5c);
spin_unlock_irq(&log->stripe_in_journal_lock);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c14cf2410365..dc2ea636d173 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2337,7 +2337,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (test_and_clear_bit(R5_Overlap, &dev->flags))
- wake_up(&sh->raid_conf->wait_for_overlap);
+ wake_up_bit(&dev->flags, R5_Overlap);
}
}
local_unlock(&conf->percpu->lock);
@@ -3473,7 +3473,7 @@ static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi,
* With PPL only writes to consecutive data chunks within a
* stripe are allowed because for a single stripe_head we can
* only have one PPL entry at a time, which describes one data
- * range. Not really an overlap, but wait_for_overlap can be
+ * range. Not really an overlap, but R5_Overlap can be
* used to handle this.
*/
sector_t sector;
@@ -3563,8 +3563,8 @@ static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
*/
set_bit(STRIPE_BITMAP_PENDING, &sh->state);
spin_unlock_irq(&sh->stripe_lock);
- md_bitmap_startwrite(conf->mddev->bitmap, sh->sector,
- RAID5_STRIPE_SECTORS(conf), 0);
+ conf->mddev->bitmap_ops->startwrite(conf->mddev, sh->sector,
+ RAID5_STRIPE_SECTORS(conf), false);
spin_lock_irq(&sh->stripe_lock);
clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
if (!sh->batch_head) {
@@ -3652,7 +3652,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
log_stripe_write_finished(sh);
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
+ wake_up_bit(&sh->dev[i].flags, R5_Overlap);
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
@@ -3663,8 +3663,9 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bi = nextbi;
}
if (bitmap_end)
- md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- RAID5_STRIPE_SECTORS(conf), 0, 0);
+ conf->mddev->bitmap_ops->endwrite(conf->mddev,
+ sh->sector, RAID5_STRIPE_SECTORS(conf),
+ false, false);
bitmap_end = 0;
/* and fail all 'written' */
bi = sh->dev[i].written;
@@ -3696,7 +3697,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].toread = NULL;
spin_unlock_irq(&sh->stripe_lock);
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
+ wake_up_bit(&sh->dev[i].flags, R5_Overlap);
if (bi)
s->to_read--;
while (bi && bi->bi_iter.bi_sector <
@@ -3709,8 +3710,9 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
}
}
if (bitmap_end)
- md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- RAID5_STRIPE_SECTORS(conf), 0, 0);
+ conf->mddev->bitmap_ops->endwrite(conf->mddev,
+ sh->sector, RAID5_STRIPE_SECTORS(conf),
+ false, false);
/* If we were in the middle of a write the parity block might
* still be locked - so just clear all R5_LOCKED flags
*/
@@ -3734,7 +3736,7 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
BUG_ON(sh->batch_head);
clear_bit(STRIPE_SYNCING, &sh->state);
if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
- wake_up(&conf->wait_for_overlap);
+ wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap);
s->syncing = 0;
s->replacing = 0;
/* There is nothing more to do for sync/check/repair.
@@ -4059,10 +4061,10 @@ returnbi:
bio_endio(wbi);
wbi = wbi2;
}
- md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- RAID5_STRIPE_SECTORS(conf),
- !test_bit(STRIPE_DEGRADED, &sh->state),
- 0);
+ conf->mddev->bitmap_ops->endwrite(conf->mddev,
+ sh->sector, RAID5_STRIPE_SECTORS(conf),
+ !test_bit(STRIPE_DEGRADED, &sh->state),
+ false);
if (head_sh->batch_head) {
sh = list_first_entry(&sh->batch_list,
struct stripe_head,
@@ -4875,7 +4877,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
{
struct stripe_head *sh, *next;
int i;
- int do_wakeup = 0;
list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
@@ -4911,7 +4912,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
spin_unlock_irq(&sh->stripe_lock);
for (i = 0; i < sh->disks; i++) {
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- do_wakeup = 1;
+ wake_up_bit(&sh->dev[i].flags, R5_Overlap);
sh->dev[i].flags = head_sh->dev[i].flags &
(~((1 << R5_WriteError) | (1 << R5_Overlap)));
}
@@ -4925,12 +4926,9 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
spin_unlock_irq(&head_sh->stripe_lock);
for (i = 0; i < head_sh->disks; i++)
if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
- do_wakeup = 1;
+ wake_up_bit(&head_sh->dev[i].flags, R5_Overlap);
if (head_sh->state & handle_flags)
set_bit(STRIPE_HANDLE, &head_sh->state);
-
- if (do_wakeup)
- wake_up(&head_sh->raid_conf->wait_for_overlap);
}
static void handle_stripe(struct stripe_head *sh)
@@ -5196,7 +5194,7 @@ static void handle_stripe(struct stripe_head *sh)
md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1);
clear_bit(STRIPE_SYNCING, &sh->state);
if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
- wake_up(&conf->wait_for_overlap);
+ wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap);
}
/* If the failed drives are just a ReadError, then we might need
@@ -5259,7 +5257,7 @@ static void handle_stripe(struct stripe_head *sh)
} else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
atomic_dec(&conf->reshape_stripes);
- wake_up(&conf->wait_for_overlap);
+ wake_up(&conf->wait_for_reshape);
md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1);
}
@@ -5753,12 +5751,11 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
int d;
again:
sh = raid5_get_active_stripe(conf, NULL, logical_sector, 0);
- prepare_to_wait(&conf->wait_for_overlap, &w,
- TASK_UNINTERRUPTIBLE);
set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
if (test_bit(STRIPE_SYNCING, &sh->state)) {
raid5_release_stripe(sh);
- schedule();
+ wait_on_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap,
+ TASK_UNINTERRUPTIBLE);
goto again;
}
clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
@@ -5770,12 +5767,12 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
set_bit(R5_Overlap, &sh->dev[d].flags);
spin_unlock_irq(&sh->stripe_lock);
raid5_release_stripe(sh);
- schedule();
+ wait_on_bit(&sh->dev[d].flags, R5_Overlap,
+ TASK_UNINTERRUPTIBLE);
goto again;
}
}
set_bit(STRIPE_DISCARD, &sh->state);
- finish_wait(&conf->wait_for_overlap, &w);
sh->overwrite_disks = 0;
for (d = 0; d < conf->raid_disks; d++) {
if (d == sh->pd_idx || d == sh->qd_idx)
@@ -5788,13 +5785,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
}
spin_unlock_irq(&sh->stripe_lock);
if (conf->mddev->bitmap) {
- for (d = 0;
- d < conf->raid_disks - conf->max_degraded;
+ for (d = 0; d < conf->raid_disks - conf->max_degraded;
d++)
- md_bitmap_startwrite(mddev->bitmap,
- sh->sector,
- RAID5_STRIPE_SECTORS(conf),
- 0);
+ mddev->bitmap_ops->startwrite(mddev, sh->sector,
+ RAID5_STRIPE_SECTORS(conf), false);
sh->bm_seq = conf->seq_flush + 1;
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
@@ -5855,7 +5849,6 @@ static int add_all_stripe_bios(struct r5conf *conf,
struct bio *bi, int forwrite, int previous)
{
int dd_idx;
- int ret = 1;
spin_lock_irq(&sh->stripe_lock);
@@ -5871,14 +5864,19 @@ static int add_all_stripe_bios(struct r5conf *conf,
if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
set_bit(R5_Overlap, &dev->flags);
- ret = 0;
- continue;
+ spin_unlock_irq(&sh->stripe_lock);
+ raid5_release_stripe(sh);
+ /* release batch_last before wait to avoid risk of deadlock */
+ if (ctx->batch_last) {
+ raid5_release_stripe(ctx->batch_last);
+ ctx->batch_last = NULL;
+ }
+ md_wakeup_thread(conf->mddev->thread);
+ wait_on_bit(&dev->flags, R5_Overlap, TASK_UNINTERRUPTIBLE);
+ return 0;
}
}
- if (!ret)
- goto out;
-
for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
struct r5dev *dev = &sh->dev[dd_idx];
@@ -5894,9 +5892,8 @@ static int add_all_stripe_bios(struct r5conf *conf,
RAID5_STRIPE_SHIFT(conf), ctx->sectors_to_do);
}
-out:
spin_unlock_irq(&sh->stripe_lock);
- return ret;
+ return 1;
}
enum reshape_loc {
@@ -5992,17 +5989,17 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
goto out_release;
}
- if (test_bit(STRIPE_EXPANDING, &sh->state) ||
- !add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) {
- /*
- * Stripe is busy expanding or add failed due to
- * overlap. Flush everything and wait a while.
- */
+ if (test_bit(STRIPE_EXPANDING, &sh->state)) {
md_wakeup_thread(mddev->thread);
ret = STRIPE_SCHEDULE_AND_RETRY;
goto out_release;
}
+ if (!add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) {
+ ret = STRIPE_RETRY;
+ goto out;
+ }
+
if (stripe_can_batch(sh)) {
stripe_add_to_batch_list(conf, sh, ctx->batch_last);
if (ctx->batch_last)
@@ -6073,6 +6070,7 @@ static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf,
static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ bool on_wq;
struct r5conf *conf = mddev->private;
sector_t logical_sector;
struct stripe_request_ctx ctx = {};
@@ -6146,11 +6144,15 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
* sequential IO pattern. We don't bother with the optimization when
* reshaping as the performance benefit is not worth the complexity.
*/
- if (likely(conf->reshape_progress == MaxSector))
+ if (likely(conf->reshape_progress == MaxSector)) {
logical_sector = raid5_bio_lowest_chunk_sector(conf, bi);
+ on_wq = false;
+ } else {
+ add_wait_queue(&conf->wait_for_reshape, &wait);
+ on_wq = true;
+ }
s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf);
- add_wait_queue(&conf->wait_for_overlap, &wait);
while (1) {
res = make_stripe_request(mddev, conf, &ctx, logical_sector,
bi);
@@ -6161,6 +6163,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
continue;
if (res == STRIPE_SCHEDULE_AND_RETRY) {
+ WARN_ON_ONCE(!on_wq);
/*
* Must release the reference to batch_last before
* scheduling and waiting for work to be done,
@@ -6185,7 +6188,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
logical_sector = ctx.first_sector +
(s << RAID5_STRIPE_SHIFT(conf));
}
- remove_wait_queue(&conf->wait_for_overlap, &wait);
+ if (unlikely(on_wq))
+ remove_wait_queue(&conf->wait_for_reshape, &wait);
if (ctx.batch_last)
raid5_release_stripe(ctx.batch_last);
@@ -6338,7 +6342,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
: (safepos < writepos && readpos > writepos)) ||
time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
/* Cannot proceed until we've updated the superblock... */
- wait_event(conf->wait_for_overlap,
+ wait_event(conf->wait_for_reshape,
atomic_read(&conf->reshape_stripes)==0
|| test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (atomic_read(&conf->reshape_stripes) != 0)
@@ -6364,7 +6368,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
spin_lock_irq(&conf->device_lock);
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
- wake_up(&conf->wait_for_overlap);
+ wake_up(&conf->wait_for_reshape);
sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
@@ -6447,7 +6451,7 @@ finish:
(sector_nr - mddev->curr_resync_completed) * 2
>= mddev->resync_max - mddev->curr_resync_completed) {
/* Cannot proceed until we've updated the superblock... */
- wait_event(conf->wait_for_overlap,
+ wait_event(conf->wait_for_reshape,
atomic_read(&conf->reshape_stripes) == 0
|| test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (atomic_read(&conf->reshape_stripes) != 0)
@@ -6473,7 +6477,7 @@ finish:
spin_lock_irq(&conf->device_lock);
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
- wake_up(&conf->wait_for_overlap);
+ wake_up(&conf->wait_for_reshape);
sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
ret:
@@ -6486,7 +6490,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
struct r5conf *conf = mddev->private;
struct stripe_head *sh;
sector_t sync_blocks;
- int still_degraded = 0;
+ bool still_degraded = false;
int i;
if (sector_nr >= max_sector) {
@@ -6498,17 +6502,17 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
}
if (mddev->curr_resync < max_sector) /* aborted */
- md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
- &sync_blocks, 1);
+ mddev->bitmap_ops->end_sync(mddev, mddev->curr_resync,
+ &sync_blocks);
else /* completed sync */
conf->fullsync = 0;
- md_bitmap_close_sync(mddev->bitmap);
+ mddev->bitmap_ops->close_sync(mddev);
return 0;
}
/* Allow raid5_quiesce to complete */
- wait_event(conf->wait_for_overlap, conf->quiesce != 2);
+ wait_event(conf->wait_for_reshape, conf->quiesce != 2);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
return reshape_request(mddev, sector_nr, skipped);
@@ -6531,7 +6535,8 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
}
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
!conf->fullsync &&
- !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
+ !mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks,
+ true) &&
sync_blocks >= RAID5_STRIPE_SECTORS(conf)) {
/* we can skip this block, and probably more */
do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf));
@@ -6540,7 +6545,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
return sync_blocks * RAID5_STRIPE_SECTORS(conf);
}
- md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
+ mddev->bitmap_ops->cond_end_sync(mddev, sector_nr, false);
sh = raid5_get_active_stripe(conf, NULL, sector_nr,
R5_GAS_NOBLOCK);
@@ -6559,10 +6564,11 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
struct md_rdev *rdev = conf->disks[i].rdev;
if (rdev == NULL || test_bit(Faulty, &rdev->flags))
- still_degraded = 1;
+ still_degraded = true;
}
- md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
+ mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks,
+ still_degraded);
set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -6767,7 +6773,7 @@ static void raid5d(struct md_thread *thread)
/* Now is a good time to flush some bitmap updates */
conf->seq_flush++;
spin_unlock_irq(&conf->device_lock);
- md_bitmap_unplug(mddev->bitmap);
+ mddev->bitmap_ops->unplug(mddev, true);
spin_lock_irq(&conf->device_lock);
conf->seq_write = conf->seq_flush;
activate_bit_delay(conf, conf->temp_inactive_list);
@@ -7492,7 +7498,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
init_waitqueue_head(&conf->wait_for_quiescent);
init_waitqueue_head(&conf->wait_for_stripe);
- init_waitqueue_head(&conf->wait_for_overlap);
+ init_waitqueue_head(&conf->wait_for_reshape);
INIT_LIST_HEAD(&conf->handle_list);
INIT_LIST_HEAD(&conf->loprio_list);
INIT_LIST_HEAD(&conf->hold_list);
@@ -8312,6 +8318,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
*/
sector_t newsize;
struct r5conf *conf = mddev->private;
+ int ret;
if (raid5_has_log(conf) || raid5_has_ppl(conf))
return -EINVAL;
@@ -8320,11 +8327,11 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
if (mddev->external_size &&
mddev->array_sectors > newsize)
return -EINVAL;
- if (mddev->bitmap) {
- int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0);
- if (ret)
- return ret;
- }
+
+ ret = mddev->bitmap_ops->resize(mddev, sectors, 0, false);
+ if (ret)
+ return ret;
+
md_set_array_sectors(mddev, newsize);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > mddev->dev_sectors) {
@@ -8550,7 +8557,7 @@ static void end_reshape(struct r5conf *conf)
!test_bit(In_sync, &rdev->flags))
rdev->recovery_offset = MaxSector;
spin_unlock_irq(&conf->device_lock);
- wake_up(&conf->wait_for_overlap);
+ wake_up(&conf->wait_for_reshape);
mddev_update_io_opt(conf->mddev,
conf->raid_disks - conf->max_degraded);
@@ -8614,13 +8621,13 @@ static void raid5_quiesce(struct mddev *mddev, int quiesce)
conf->quiesce = 1;
unlock_all_device_hash_locks_irq(conf);
/* allow reshape to continue */
- wake_up(&conf->wait_for_overlap);
+ wake_up(&conf->wait_for_reshape);
} else {
/* re-enable writes */
lock_all_device_hash_locks_irq(conf);
conf->quiesce = 0;
wake_up(&conf->wait_for_quiescent);
- wake_up(&conf->wait_for_overlap);
+ wake_up(&conf->wait_for_reshape);
unlock_all_device_hash_locks_irq(conf);
}
log_quiesce(conf, quiesce);
@@ -8939,7 +8946,7 @@ static void raid5_prepare_suspend(struct mddev *mddev)
{
struct r5conf *conf = mddev->private;
- wake_up(&conf->wait_for_overlap);
+ wake_up(&conf->wait_for_reshape);
}
static struct md_personality raid6_personality =
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 9b5a7dc3f2a0..896ecfc4afa6 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -668,7 +668,7 @@ struct r5conf {
struct llist_head released_stripes;
wait_queue_head_t wait_for_quiescent;
wait_queue_head_t wait_for_stripe;
- wait_queue_head_t wait_for_overlap;
+ wait_queue_head_t wait_for_reshape;
unsigned long cache_state;
struct shrinker *shrinker;
int pool_size; /* number of disks in stripeheads in pool */
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index da09834990b8..c7d36010c890 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -673,8 +673,9 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
/* Retry this message */
data->attempts -= attempts_made;
if (msg->timeout)
- dprintk(2, "retransmit: %*ph (attempts: %d, wait for 0x%02x)\n",
- msg->len, msg->msg, data->attempts, msg->reply);
+ dprintk(2, "retransmit: %*ph (attempts: %d, wait for %*ph)\n",
+ msg->len, msg->msg, data->attempts,
+ data->match_len, data->match_reply);
else
dprintk(2, "retransmit: %*ph (attempts: %d)\n",
msg->len, msg->msg, data->attempts);
@@ -780,6 +781,8 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
{
struct cec_data *data;
bool is_raw = msg_is_raw(msg);
+ bool reply_vendor_id = (msg->flags & CEC_MSG_FL_REPLY_VENDOR_ID) &&
+ msg->len > 1 && msg->msg[1] == CEC_MSG_VENDOR_COMMAND_WITH_ID;
int err;
if (adap->devnode.unregistered)
@@ -794,12 +797,13 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
msg->tx_low_drive_cnt = 0;
msg->tx_error_cnt = 0;
msg->sequence = 0;
+ msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS | CEC_MSG_FL_RAW |
+ (reply_vendor_id ? CEC_MSG_FL_REPLY_VENDOR_ID : 0);
- if (msg->reply && msg->timeout == 0) {
+ if ((reply_vendor_id || msg->reply) && msg->timeout == 0) {
/* Make sure the timeout isn't 0. */
msg->timeout = 1000;
}
- msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS | CEC_MSG_FL_RAW;
if (!msg->timeout)
msg->flags &= ~CEC_MSG_FL_REPLY_TO_FOLLOWERS;
@@ -809,6 +813,11 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
dprintk(1, "%s: invalid length %d\n", __func__, msg->len);
return -EINVAL;
}
+ if (reply_vendor_id && msg->len < 6) {
+ dprintk(1, "%s: <Vendor Command With ID> message too short\n",
+ __func__);
+ return -EINVAL;
+ }
memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
@@ -900,8 +909,9 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
__func__);
return -ENONET;
}
- if (msg->reply) {
- dprintk(1, "%s: invalid msg->reply\n", __func__);
+ if (reply_vendor_id || msg->reply) {
+ dprintk(1, "%s: adapter is unconfigured so reply is not supported\n",
+ __func__);
return -EINVAL;
}
}
@@ -923,6 +933,14 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
data->fh = fh;
data->adap = adap;
data->blocking = block;
+ if (reply_vendor_id) {
+ memcpy(data->match_reply, msg->msg + 1, 4);
+ data->match_reply[4] = msg->reply;
+ data->match_len = 5;
+ } else if (msg->timeout) {
+ data->match_reply[0] = msg->reply;
+ data->match_len = 1;
+ }
init_completion(&data->c);
INIT_DELAYED_WORK(&data->work, cec_wait_timeout);
@@ -1211,13 +1229,15 @@ void cec_received_msg_ts(struct cec_adapter *adap,
if (!abort && dst->msg[1] == CEC_MSG_INITIATE_ARC &&
(cmd == CEC_MSG_REPORT_ARC_INITIATED ||
cmd == CEC_MSG_REPORT_ARC_TERMINATED) &&
- (dst->reply == CEC_MSG_REPORT_ARC_INITIATED ||
- dst->reply == CEC_MSG_REPORT_ARC_TERMINATED))
+ (data->match_reply[0] == CEC_MSG_REPORT_ARC_INITIATED ||
+ data->match_reply[0] == CEC_MSG_REPORT_ARC_TERMINATED)) {
dst->reply = cmd;
+ data->match_reply[0] = cmd;
+ }
/* Does the command match? */
if ((abort && cmd != dst->msg[1]) ||
- (!abort && cmd != dst->reply))
+ (!abort && memcmp(data->match_reply, msg->msg + 1, data->match_len)))
continue;
/* Does the addressing match? */
@@ -2318,18 +2338,21 @@ int cec_adap_status(struct seq_file *file, void *priv)
}
data = adap->transmitting;
if (data)
- seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n",
- data->msg.len, data->msg.msg, data->msg.reply,
+ seq_printf(file, "transmitting message: %*ph (reply: %*ph, timeout: %ums)\n",
+ data->msg.len, data->msg.msg,
+ data->match_len, data->match_reply,
data->msg.timeout);
seq_printf(file, "pending transmits: %u\n", adap->transmit_queue_sz);
list_for_each_entry(data, &adap->transmit_queue, list) {
- seq_printf(file, "queued tx message: %*ph (reply: %02x, timeout: %ums)\n",
- data->msg.len, data->msg.msg, data->msg.reply,
+ seq_printf(file, "queued tx message: %*ph (reply: %*ph, timeout: %ums)\n",
+ data->msg.len, data->msg.msg,
+ data->match_len, data->match_reply,
data->msg.timeout);
}
list_for_each_entry(data, &adap->wait_queue, list) {
- seq_printf(file, "message waiting for reply: %*ph (reply: %02x, timeout: %ums)\n",
- data->msg.len, data->msg.msg, data->msg.reply,
+ seq_printf(file, "message waiting for reply: %*ph (reply: %*ph, timeout: %ums)\n",
+ data->msg.len, data->msg.msg,
+ data->match_len, data->match_reply,
data->msg.timeout);
}
diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
index 3ef915344304..c75a4057f00e 100644
--- a/drivers/media/cec/core/cec-api.c
+++ b/drivers/media/cec/core/cec-api.c
@@ -580,7 +580,7 @@ static int cec_open(struct inode *inode, struct file *filp)
fh->mode_initiator = CEC_MODE_INITIATOR;
fh->adap = adap;
- err = cec_get_device(devnode);
+ err = cec_get_device(adap);
if (err) {
kfree(fh);
return err;
@@ -686,7 +686,7 @@ static int cec_release(struct inode *inode, struct file *filp)
mutex_unlock(&fh->lock);
kfree(fh);
- cec_put_device(devnode);
+ cec_put_device(adap);
filp->private_data = NULL;
return 0;
}
diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
index 6f940df0230c..48282d272fe6 100644
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@ -51,35 +51,6 @@ static struct dentry *top_cec_dir;
/* dev to cec_devnode */
#define to_cec_devnode(cd) container_of(cd, struct cec_devnode, dev)
-int cec_get_device(struct cec_devnode *devnode)
-{
- /*
- * Check if the cec device is available. This needs to be done with
- * the devnode->lock held to prevent an open/unregister race:
- * without the lock, the device could be unregistered and freed between
- * the devnode->registered check and get_device() calls, leading to
- * a crash.
- */
- mutex_lock(&devnode->lock);
- /*
- * return ENODEV if the cec device has been removed
- * already or if it is not registered anymore.
- */
- if (!devnode->registered) {
- mutex_unlock(&devnode->lock);
- return -ENODEV;
- }
- /* and increase the device refcount */
- get_device(&devnode->dev);
- mutex_unlock(&devnode->lock);
- return 0;
-}
-
-void cec_put_device(struct cec_devnode *devnode)
-{
- put_device(&devnode->dev);
-}
-
/* Called when the last user of the cec device exits. */
static void cec_devnode_release(struct device *cd)
{
@@ -273,7 +244,7 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap->cec_pin_is_high = true;
adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
- adap->capabilities = caps;
+ adap->capabilities = caps | CEC_CAP_REPLY_VENDOR_ID;
if (debug_phys_addr)
adap->capabilities |= CEC_CAP_PHYS_ADDR;
adap->needs_hpd = caps & CEC_CAP_NEEDS_HPD;
diff --git a/drivers/media/cec/core/cec-priv.h b/drivers/media/cec/core/cec-priv.h
index ed1f8c67626b..ce42a37c4ac0 100644
--- a/drivers/media/cec/core/cec-priv.h
+++ b/drivers/media/cec/core/cec-priv.h
@@ -37,8 +37,6 @@ static inline bool msg_is_raw(const struct cec_msg *msg)
/* cec-core.c */
extern int cec_debug;
-int cec_get_device(struct cec_devnode *devnode);
-void cec_put_device(struct cec_devnode *devnode);
/* cec-adap.c */
int cec_monitor_all_cnt_inc(struct cec_adapter *adap);
diff --git a/drivers/media/cec/usb/Kconfig b/drivers/media/cec/usb/Kconfig
index 3f3a5c75287a..6faf4742981d 100644
--- a/drivers/media/cec/usb/Kconfig
+++ b/drivers/media/cec/usb/Kconfig
@@ -3,6 +3,7 @@
# USB drivers
if USB_SUPPORT && TTY
+source "drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig"
source "drivers/media/cec/usb/pulse8/Kconfig"
source "drivers/media/cec/usb/rainshadow/Kconfig"
endif
diff --git a/drivers/media/cec/usb/Makefile b/drivers/media/cec/usb/Makefile
index e4183d1bfa9a..c082679f5318 100644
--- a/drivers/media/cec/usb/Makefile
+++ b/drivers/media/cec/usb/Makefile
@@ -2,5 +2,6 @@
#
# Makefile for the CEC USB device drivers.
#
+obj-$(CONFIG_USB_EXTRON_DA_HD_4K_PLUS_CEC) += extron-da-hd-4k-plus/
obj-$(CONFIG_USB_PULSE8_CEC) += pulse8/
obj-$(CONFIG_USB_RAINSHADOW_CEC) += rainshadow/
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig b/drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig
new file mode 100644
index 000000000000..5354f0eebe5c
--- /dev/null
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config USB_EXTRON_DA_HD_4K_PLUS_CEC
+ tristate "Extron DA HD 4K Plus CEC driver"
+ depends on VIDEO_DEV
+ depends on USB
+ depends on USB_ACM
+ select CEC_CORE
+ select SERIO
+ select SERIO_SERPORT
+ help
+ This is a CEC driver for the Extron DA HD 4K Plus HDMI Splitter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called extron-da-hd-4k-plus-cec.
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
new file mode 100644
index 000000000000..2e8f7f60263f
--- /dev/null
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
@@ -0,0 +1,8 @@
+extron-da-hd-4k-plus-cec-objs := extron-da-hd-4k-plus.o cec-splitter.o
+obj-$(CONFIG_USB_EXTRON_DA_HD_4K_PLUS_CEC) := extron-da-hd-4k-plus-cec.o
+
+all:
+ $(MAKE) -C $(KDIR) M=$(shell pwd) modules
+
+install:
+ $(MAKE) -C $(KDIR) M=$(shell pwd) modules_install
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.c
new file mode 100644
index 000000000000..73fdec4b791d
--- /dev/null
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright 2021-2024 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <media/cec.h>
+
+#include "cec-splitter.h"
+
+/*
+ * Helper function to reply to a received message with a Feature Abort
+ * message.
+ */
+static int cec_feature_abort_reason(struct cec_adapter *adap,
+ struct cec_msg *msg, u8 reason)
+{
+ struct cec_msg tx_msg = { };
+
+ /*
+ * Don't reply with CEC_MSG_FEATURE_ABORT to a CEC_MSG_FEATURE_ABORT
+ * message!
+ */
+ if (msg->msg[1] == CEC_MSG_FEATURE_ABORT)
+ return 0;
+ /* Don't Feature Abort messages from 'Unregistered' */
+ if (cec_msg_initiator(msg) == CEC_LOG_ADDR_UNREGISTERED)
+ return 0;
+ cec_msg_set_reply_to(&tx_msg, msg);
+ cec_msg_feature_abort(&tx_msg, msg->msg[1], reason);
+ return cec_transmit_msg(adap, &tx_msg, false);
+}
+
+/* Transmit an Active Source message from this output port to a sink */
+static void cec_port_out_active_source(struct cec_splitter_port *p)
+{
+ struct cec_adapter *adap = p->adap;
+ struct cec_msg msg;
+
+ if (!adap->is_configured)
+ return;
+ p->is_active_source = true;
+ cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0);
+ cec_msg_active_source(&msg, adap->phys_addr);
+ cec_transmit_msg(adap, &msg, false);
+}
+
+/* Transmit Active Source messages from all output ports to the sinks */
+static void cec_out_active_source(struct cec_splitter *splitter)
+{
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++)
+ cec_port_out_active_source(splitter->ports[i]);
+}
+
+/* Transmit a Standby message from this output port to a sink */
+static void cec_port_out_standby(struct cec_splitter_port *p)
+{
+ struct cec_adapter *adap = p->adap;
+ struct cec_msg msg;
+
+ if (!adap->is_configured)
+ return;
+ cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0);
+ cec_msg_standby(&msg);
+ cec_transmit_msg(adap, &msg, false);
+}
+
+/* Transmit Standby messages from all output ports to the sinks */
+static void cec_out_standby(struct cec_splitter *splitter)
+{
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++)
+ cec_port_out_standby(splitter->ports[i]);
+}
+
+/* Transmit an Image/Text View On message from this output port to a sink */
+static void cec_port_out_wakeup(struct cec_splitter_port *p, u8 opcode)
+{
+ struct cec_adapter *adap = p->adap;
+ u8 la = adap->log_addrs.log_addr[0];
+ struct cec_msg msg;
+
+ if (la == CEC_LOG_ADDR_INVALID)
+ la = CEC_LOG_ADDR_UNREGISTERED;
+ cec_msg_init(&msg, la, 0);
+ msg.len = 2;
+ msg.msg[1] = opcode;
+ cec_transmit_msg(adap, &msg, false);
+}
+
+/* Transmit Image/Text View On messages from all output ports to the sinks */
+static void cec_out_wakeup(struct cec_splitter *splitter, u8 opcode)
+{
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++)
+ cec_port_out_wakeup(splitter->ports[i], opcode);
+}
+
+/*
+ * Update the power state of the unconfigured CEC device to either
+ * Off or On depending on the current state of the splitter.
+ * This keeps the outputs in a consistent state.
+ */
+void cec_splitter_unconfigured_output(struct cec_splitter_port *p)
+{
+ p->video_latency = 1;
+ p->power_status = p->splitter->is_standby ?
+ CEC_OP_POWER_STATUS_TO_STANDBY : CEC_OP_POWER_STATUS_TO_ON;
+
+ /* The adapter was unconfigured, so clear the sequence and ts values */
+ p->out_give_device_power_status_seq = 0;
+ p->out_give_device_power_status_ts = ktime_set(0, 0);
+ p->out_request_current_latency_seq = 0;
+ p->out_request_current_latency_ts = ktime_set(0, 0);
+}
+
+/*
+ * Update the power state of the newly configured CEC device to either
+ * Off or On depending on the current state of the splitter.
+ * This keeps the outputs in a consistent state.
+ */
+void cec_splitter_configured_output(struct cec_splitter_port *p)
+{
+ p->video_latency = 1;
+ p->power_status = p->splitter->is_standby ?
+ CEC_OP_POWER_STATUS_TO_STANDBY : CEC_OP_POWER_STATUS_TO_ON;
+
+ if (p->splitter->is_standby) {
+ /*
+ * Some sinks only obey Standby if it comes from the
+ * active source.
+ */
+ cec_port_out_active_source(p);
+ cec_port_out_standby(p);
+ } else {
+ cec_port_out_wakeup(p, CEC_MSG_IMAGE_VIEW_ON);
+ }
+}
+
+/* Pass the in_msg on to all output ports */
+static void cec_out_passthrough(struct cec_splitter *splitter,
+ const struct cec_msg *in_msg)
+{
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ struct cec_adapter *adap = p->adap;
+ struct cec_msg msg;
+
+ if (!adap->is_configured)
+ continue;
+ cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0);
+ msg.len = in_msg->len;
+ memcpy(msg.msg + 1, in_msg->msg + 1, msg.len - 1);
+ cec_transmit_msg(adap, &msg, false);
+ }
+}
+
+/*
+ * See if all output ports received the Report Current Latency message,
+ * and if so, transmit the result from the input port to the video source.
+ */
+static void cec_out_report_current_latency(struct cec_splitter *splitter,
+ struct cec_adapter *input_adap)
+{
+ struct cec_msg reply = {};
+ unsigned int reply_lat = 0;
+ unsigned int cnt = 0;
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ struct cec_adapter *adap = p->adap;
+
+ /* Skip unconfigured ports */
+ if (!adap->is_configured)
+ continue;
+ /* Return if a port is still waiting for a reply */
+ if (p->out_request_current_latency_seq)
+ return;
+ reply_lat += p->video_latency - 1;
+ cnt++;
+ }
+
+ /*
+ * All ports that can reply, replied, so clear the sequence
+ * and timestamp values.
+ */
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+
+ p->out_request_current_latency_seq = 0;
+ p->out_request_current_latency_ts = ktime_set(0, 0);
+ }
+
+ /*
+ * Return if there were no replies or the input port is no longer
+ * configured.
+ */
+ if (!cnt || !input_adap->is_configured)
+ return;
+
+ /* Reply with the average latency */
+ reply_lat = 1 + reply_lat / cnt;
+ cec_msg_init(&reply, input_adap->log_addrs.log_addr[0],
+ splitter->request_current_latency_dest);
+ cec_msg_report_current_latency(&reply, input_adap->phys_addr,
+ reply_lat, 1, 1, 1);
+ cec_transmit_msg(input_adap, &reply, false);
+}
+
+/* Transmit Request Current Latency to all output ports */
+static int cec_out_request_current_latency(struct cec_splitter *splitter)
+{
+ ktime_t now = ktime_get();
+ bool error = true;
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ struct cec_adapter *adap = p->adap;
+
+ if (!adap->is_configured) {
+ /* Clear if not configured */
+ p->out_request_current_latency_seq = 0;
+ p->out_request_current_latency_ts = ktime_set(0, 0);
+ } else if (!p->out_request_current_latency_seq) {
+ /*
+ * Keep the old ts if an earlier request is still
+ * pending. This ensures that the request will
+ * eventually time out based on the timestamp of
+ * the first request if the sink is unresponsive.
+ */
+ p->out_request_current_latency_ts = now;
+ }
+ }
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ struct cec_adapter *adap = p->adap;
+ struct cec_msg msg;
+
+ if (!adap->is_configured)
+ continue;
+ cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0);
+ cec_msg_request_current_latency(&msg, true, adap->phys_addr);
+ if (cec_transmit_msg(adap, &msg, false))
+ continue;
+ p->out_request_current_latency_seq = msg.sequence | (1U << 31);
+ error = false;
+ }
+ return error ? -ENODEV : 0;
+}
+
+/*
+ * See if all output ports received the Report Power Status message,
+ * and if so, transmit the result from the input port to the video source.
+ */
+static void cec_out_report_power_status(struct cec_splitter *splitter,
+ struct cec_adapter *input_adap)
+{
+ struct cec_msg reply = {};
+ /* The target power status of the splitter itself */
+ u8 splitter_pwr = splitter->is_standby ?
+ CEC_OP_POWER_STATUS_STANDBY : CEC_OP_POWER_STATUS_ON;
+ /*
+ * The transient power status of the splitter, used if not all
+ * output report the target power status.
+ */
+ u8 splitter_transient_pwr = splitter->is_standby ?
+ CEC_OP_POWER_STATUS_TO_STANDBY : CEC_OP_POWER_STATUS_TO_ON;
+ u8 reply_pwr = splitter_pwr;
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+
+ /* Skip if no sink was found (HPD was low for more than 5s) */
+ if (!p->found_sink)
+ continue;
+
+ /* Return if a port is still waiting for a reply */
+ if (p->out_give_device_power_status_seq)
+ return;
+ if (p->power_status != splitter_pwr)
+ reply_pwr = splitter_transient_pwr;
+ }
+
+ /*
+ * All ports that can reply, replied, so clear the sequence
+ * and timestamp values.
+ */
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+
+ p->out_give_device_power_status_seq = 0;
+ p->out_give_device_power_status_ts = ktime_set(0, 0);
+ }
+
+ /* Return if the input port is no longer configured. */
+ if (!input_adap->is_configured)
+ return;
+
+ /* Reply with the new power status */
+ cec_msg_init(&reply, input_adap->log_addrs.log_addr[0],
+ splitter->give_device_power_status_dest);
+ cec_msg_report_power_status(&reply, reply_pwr);
+ cec_transmit_msg(input_adap, &reply, false);
+}
+
+/* Transmit Give Device Power Status to all output ports */
+static int cec_out_give_device_power_status(struct cec_splitter *splitter)
+{
+ ktime_t now = ktime_get();
+ bool error = true;
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ struct cec_adapter *adap = p->adap;
+
+ /*
+ * Keep the old ts if an earlier request is still
+ * pending. This ensures that the request will
+ * eventually time out based on the timestamp of
+ * the first request if the sink is unresponsive.
+ */
+ if (adap->is_configured && !p->out_give_device_power_status_seq)
+ p->out_give_device_power_status_ts = now;
+ }
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ struct cec_adapter *adap = p->adap;
+ struct cec_msg msg;
+
+ if (!adap->is_configured)
+ continue;
+
+ cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0);
+ cec_msg_give_device_power_status(&msg, true);
+ if (cec_transmit_msg(adap, &msg, false))
+ continue;
+ p->out_give_device_power_status_seq = msg.sequence | (1U << 31);
+ error = false;
+ }
+ return error ? -ENODEV : 0;
+}
+
+/*
+ * CEC messages received on the HDMI input of the splitter are
+ * forwarded (if relevant) to the HDMI outputs of the splitter.
+ */
+int cec_splitter_received_input(struct cec_splitter_port *p, struct cec_msg *msg)
+{
+ if (!cec_msg_status_is_ok(msg))
+ return 0;
+
+ if (msg->len < 2)
+ return -ENOMSG;
+
+ switch (msg->msg[1]) {
+ case CEC_MSG_DEVICE_VENDOR_ID:
+ case CEC_MSG_REPORT_POWER_STATUS:
+ case CEC_MSG_SET_STREAM_PATH:
+ case CEC_MSG_ROUTING_CHANGE:
+ case CEC_MSG_REQUEST_ACTIVE_SOURCE:
+ case CEC_MSG_SYSTEM_AUDIO_MODE_STATUS:
+ return 0;
+
+ case CEC_MSG_STANDBY:
+ p->splitter->is_standby = true;
+ cec_out_standby(p->splitter);
+ return 0;
+
+ case CEC_MSG_IMAGE_VIEW_ON:
+ case CEC_MSG_TEXT_VIEW_ON:
+ p->splitter->is_standby = false;
+ cec_out_wakeup(p->splitter, msg->msg[1]);
+ return 0;
+
+ case CEC_MSG_ACTIVE_SOURCE:
+ cec_out_active_source(p->splitter);
+ return 0;
+
+ case CEC_MSG_SET_SYSTEM_AUDIO_MODE:
+ cec_out_passthrough(p->splitter, msg);
+ return 0;
+
+ case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
+ p->splitter->give_device_power_status_dest =
+ cec_msg_initiator(msg);
+ if (cec_out_give_device_power_status(p->splitter))
+ cec_feature_abort_reason(p->adap, msg,
+ CEC_OP_ABORT_INCORRECT_MODE);
+ return 0;
+
+ case CEC_MSG_REQUEST_CURRENT_LATENCY: {
+ u16 pa;
+
+ p->splitter->request_current_latency_dest =
+ cec_msg_initiator(msg);
+ cec_ops_request_current_latency(msg, &pa);
+ if (pa == p->adap->phys_addr &&
+ cec_out_request_current_latency(p->splitter))
+ cec_feature_abort_reason(p->adap, msg,
+ CEC_OP_ABORT_INCORRECT_MODE);
+ return 0;
+ }
+
+ default:
+ return -ENOMSG;
+ }
+ return -ENOMSG;
+}
+
+void cec_splitter_nb_transmit_canceled_output(struct cec_splitter_port *p,
+ const struct cec_msg *msg,
+ struct cec_adapter *input_adap)
+{
+ struct cec_splitter *splitter = p->splitter;
+ u32 seq = msg->sequence | (1U << 31);
+
+ /*
+ * If this is the result of a failed non-blocking transmit, or it is
+ * the result of the failed reply to a non-blocking transmit, then
+ * check if the original transmit was to get the current power status
+ * or latency and, if so, assume that the remove device is for one
+ * reason or another unavailable and assume that it is in the same
+ * power status as the splitter, or has no video latency.
+ */
+ if ((cec_msg_recv_is_tx_result(msg) && !(msg->tx_status & CEC_TX_STATUS_OK)) ||
+ (cec_msg_recv_is_rx_result(msg) && !(msg->rx_status & CEC_RX_STATUS_OK))) {
+ u8 tx_op = msg->msg[1];
+
+ if (msg->len < 2)
+ return;
+ if (cec_msg_recv_is_rx_result(msg) &&
+ (msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT))
+ tx_op = msg->msg[2];
+ switch (tx_op) {
+ case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
+ if (p->out_give_device_power_status_seq != seq)
+ break;
+ p->out_give_device_power_status_seq = 0;
+ p->out_give_device_power_status_ts = ktime_set(0, 0);
+ p->power_status = splitter->is_standby ?
+ CEC_OP_POWER_STATUS_STANDBY :
+ CEC_OP_POWER_STATUS_ON;
+ cec_out_report_power_status(splitter, input_adap);
+ break;
+ case CEC_MSG_REQUEST_CURRENT_LATENCY:
+ if (p->out_request_current_latency_seq != seq)
+ break;
+ p->video_latency = 1;
+ p->out_request_current_latency_seq = 0;
+ p->out_request_current_latency_ts = ktime_set(0, 0);
+ cec_out_report_current_latency(splitter, input_adap);
+ break;
+ }
+ return;
+ }
+
+ if (cec_msg_recv_is_tx_result(msg)) {
+ if (p->out_request_current_latency_seq != seq)
+ return;
+ p->out_request_current_latency_ts = ns_to_ktime(msg->tx_ts);
+ return;
+ }
+}
+
+/*
+ * CEC messages received on an HDMI output of the splitter
+ * are processed here.
+ */
+int cec_splitter_received_output(struct cec_splitter_port *p, struct cec_msg *msg,
+ struct cec_adapter *input_adap)
+{
+ struct cec_adapter *adap = p->adap;
+ struct cec_splitter *splitter = p->splitter;
+ u32 seq = msg->sequence | (1U << 31);
+ struct cec_msg reply = {};
+ u16 pa;
+
+ if (!adap->is_configured || msg->len < 2)
+ return -ENOMSG;
+
+ switch (msg->msg[1]) {
+ case CEC_MSG_REPORT_POWER_STATUS: {
+ u8 pwr;
+
+ cec_ops_report_power_status(msg, &pwr);
+ if (pwr > CEC_OP_POWER_STATUS_TO_STANDBY)
+ pwr = splitter->is_standby ?
+ CEC_OP_POWER_STATUS_TO_STANDBY :
+ CEC_OP_POWER_STATUS_TO_ON;
+ p->power_status = pwr;
+ if (p->out_give_device_power_status_seq == seq) {
+ p->out_give_device_power_status_seq = 0;
+ p->out_give_device_power_status_ts = ktime_set(0, 0);
+ }
+ cec_out_report_power_status(splitter, input_adap);
+ return 0;
+ }
+
+ case CEC_MSG_REPORT_CURRENT_LATENCY: {
+ u8 video_lat;
+ u8 low_lat_mode;
+ u8 audio_out_comp;
+ u8 audio_out_delay;
+
+ cec_ops_report_current_latency(msg, &pa,
+ &video_lat, &low_lat_mode,
+ &audio_out_comp, &audio_out_delay);
+ if (!video_lat || video_lat >= 252)
+ video_lat = 1;
+ p->video_latency = video_lat;
+ if (p->out_request_current_latency_seq == seq) {
+ p->out_request_current_latency_seq = 0;
+ p->out_request_current_latency_ts = ktime_set(0, 0);
+ }
+ cec_out_report_current_latency(splitter, input_adap);
+ return 0;
+ }
+
+ case CEC_MSG_STANDBY:
+ case CEC_MSG_ROUTING_CHANGE:
+ case CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS:
+ return 0;
+
+ case CEC_MSG_ACTIVE_SOURCE:
+ cec_ops_active_source(msg, &pa);
+ if (pa == 0)
+ p->is_active_source = false;
+ return 0;
+
+ case CEC_MSG_REQUEST_ACTIVE_SOURCE:
+ if (!p->is_active_source)
+ return 0;
+ cec_msg_set_reply_to(&reply, msg);
+ cec_msg_active_source(&reply, adap->phys_addr);
+ cec_transmit_msg(adap, &reply, false);
+ return 0;
+
+ case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
+ cec_msg_set_reply_to(&reply, msg);
+ cec_msg_report_power_status(&reply, splitter->is_standby ?
+ CEC_OP_POWER_STATUS_STANDBY :
+ CEC_OP_POWER_STATUS_ON);
+ cec_transmit_msg(adap, &reply, false);
+ return 0;
+
+ case CEC_MSG_SET_STREAM_PATH:
+ cec_ops_set_stream_path(msg, &pa);
+ if (pa == adap->phys_addr) {
+ cec_msg_set_reply_to(&reply, msg);
+ cec_msg_active_source(&reply, pa);
+ cec_transmit_msg(adap, &reply, false);
+ }
+ return 0;
+
+ default:
+ return -ENOMSG;
+ }
+ return -ENOMSG;
+}
+
+/*
+ * Called every second to check for timed out messages and whether there
+ * still is a video sink connected or not.
+ *
+ * Returns true if sinks were lost.
+ */
+bool cec_splitter_poll(struct cec_splitter *splitter,
+ struct cec_adapter *input_adap, bool debug)
+{
+ ktime_t now = ktime_get();
+ u8 pwr = splitter->is_standby ?
+ CEC_OP_POWER_STATUS_STANDBY : CEC_OP_POWER_STATUS_ON;
+ unsigned int max_delay_ms = input_adap->xfer_timeout_ms + 2000;
+ unsigned int i;
+ bool res = false;
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ s64 pwr_delta, lat_delta;
+ bool pwr_timeout, lat_timeout;
+
+ if (!p)
+ continue;
+
+ pwr_delta = ktime_ms_delta(now, p->out_give_device_power_status_ts);
+ pwr_timeout = p->out_give_device_power_status_seq &&
+ pwr_delta >= max_delay_ms;
+ lat_delta = ktime_ms_delta(now, p->out_request_current_latency_ts);
+ lat_timeout = p->out_request_current_latency_seq &&
+ lat_delta >= max_delay_ms;
+
+ /*
+ * If the HPD is low for more than 5 seconds, then assume no display
+ * is connected.
+ */
+ if (p->found_sink && ktime_to_ns(p->lost_sink_ts) &&
+ ktime_ms_delta(now, p->lost_sink_ts) > 5000) {
+ if (debug)
+ dev_info(splitter->dev,
+ "port %u: HPD low for more than 5s, assume no sink is connected.\n",
+ p->port);
+ p->found_sink = false;
+ p->lost_sink_ts = ktime_set(0, 0);
+ res = true;
+ }
+
+ /*
+ * If the power status request timed out, then set the port's
+ * power status to that of the splitter, ensuring a consistent
+ * power state.
+ */
+ if (pwr_timeout) {
+ mutex_lock(&p->adap->lock);
+ if (debug)
+ dev_info(splitter->dev,
+ "port %u: give up on power status for seq %u\n",
+ p->port,
+ p->out_give_device_power_status_seq & ~(1 << 31));
+ p->power_status = pwr;
+ p->out_give_device_power_status_seq = 0;
+ p->out_give_device_power_status_ts = ktime_set(0, 0);
+ mutex_unlock(&p->adap->lock);
+ cec_out_report_power_status(splitter, input_adap);
+ }
+
+ /*
+ * If the current latency request timed out, then set the port's
+ * latency to 1.
+ */
+ if (lat_timeout) {
+ mutex_lock(&p->adap->lock);
+ if (debug)
+ dev_info(splitter->dev,
+ "port %u: give up on latency for seq %u\n",
+ p->port,
+ p->out_request_current_latency_seq & ~(1 << 31));
+ p->video_latency = 1;
+ p->out_request_current_latency_seq = 0;
+ p->out_request_current_latency_ts = ktime_set(0, 0);
+ mutex_unlock(&p->adap->lock);
+ cec_out_report_current_latency(splitter, input_adap);
+ }
+ }
+ return res;
+}
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.h b/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.h
new file mode 100644
index 000000000000..7422f7c5719e
--- /dev/null
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Copyright 2021-2024 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _CEC_SPLITTER_H_
+#define _CEC_SPLITTER_H_
+
+struct cec_splitter;
+
+#define STATE_CHANGE_MAX_REPEATS 2
+
+struct cec_splitter_port {
+ struct cec_splitter *splitter;
+ struct cec_adapter *adap;
+ unsigned int port;
+ bool is_active_source;
+ bool found_sink;
+ ktime_t lost_sink_ts;
+ u32 out_request_current_latency_seq;
+ ktime_t out_request_current_latency_ts;
+ u8 video_latency;
+ u32 out_give_device_power_status_seq;
+ ktime_t out_give_device_power_status_ts;
+ u8 power_status;
+};
+
+struct cec_splitter {
+ struct device *dev;
+ unsigned int num_out_ports;
+ struct cec_splitter_port **ports;
+
+ /* High-level splitter state */
+ u8 request_current_latency_dest;
+ u8 give_device_power_status_dest;
+ bool is_standby;
+};
+
+void cec_splitter_unconfigured_output(struct cec_splitter_port *port);
+void cec_splitter_configured_output(struct cec_splitter_port *port);
+int cec_splitter_received_input(struct cec_splitter_port *port, struct cec_msg *msg);
+int cec_splitter_received_output(struct cec_splitter_port *port, struct cec_msg *msg,
+ struct cec_adapter *input_adap);
+void cec_splitter_nb_transmit_canceled_output(struct cec_splitter_port *port,
+ const struct cec_msg *msg,
+ struct cec_adapter *input_adap);
+bool cec_splitter_poll(struct cec_splitter *splitter,
+ struct cec_adapter *input_adap, bool debug);
+
+#endif
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
new file mode 100644
index 000000000000..8526f613a40e
--- /dev/null
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
@@ -0,0 +1,1836 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021-2024 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Currently this driver does not fully support the serial port of the
+ * Extron, only the USB port is fully supported.
+ *
+ * Issues specific to using the serial port instead of the USB since the
+ * serial port doesn't detect if the device is powered off:
+ *
+ * - Some periodic ping mechanism is needed to detect when the Extron is
+ * powered off and when it is powered on again.
+ * - What to do when it is powered off and the driver is modprobed? Keep
+ * trying to contact the Extron indefinitely?
+ */
+
+#include <linux/completion.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+
+#include "extron-da-hd-4k-plus.h"
+
+MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_DESCRIPTION("Extron DA HD 4K PLUS HDMI CEC driver");
+MODULE_LICENSE("GPL");
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-1)");
+
+static unsigned int vendor_id;
+module_param(vendor_id, uint, 0444);
+MODULE_PARM_DESC(vendor_id, "CEC Vendor ID");
+
+static char manufacturer_name[4];
+module_param_string(manufacturer_name, manufacturer_name,
+ sizeof(manufacturer_name), 0644);
+MODULE_PARM_DESC(manufacturer_name,
+ "EDID Vendor String (3 uppercase characters)");
+
+static bool hpd_never_low;
+module_param(hpd_never_low, bool, 0644);
+MODULE_PARM_DESC(hpd_never_low, "Input HPD will never go low (1), or go low if all output HPDs are low (0, default)");
+
+#define EXTRON_TIMEOUT_SECS 6
+
+static const u8 hdmi_edid[256] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x20, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+ 0x0f, 0x50, 0x54, 0x20, 0x00, 0x00, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a,
+ 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+ 0x45, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
+ 0x87, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x68,
+ 0x64, 0x6d, 0x69, 0x2d, 0x31, 0x30, 0x38, 0x30,
+ 0x70, 0x36, 0x30, 0x0a, 0x00, 0x00, 0x00, 0xfe,
+ 0x00, 0x73, 0x65, 0x72, 0x69, 0x6f, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0x95,
+
+ 0x02, 0x03, 0x1b, 0xf1, 0x42, 0x10, 0x01, 0x23,
+ 0x09, 0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x68,
+ 0x03, 0x0c, 0x00, 0x10, 0x00, 0x00, 0x21, 0x01,
+ 0xe2, 0x00, 0xca, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89,
+};
+
+static const u8 hdmi_edid_4k_300[256] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x20, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+ 0x0f, 0x50, 0x54, 0x20, 0x00, 0x00, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a,
+ 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+ 0x45, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
+ 0x87, 0x3c, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x68,
+ 0x64, 0x6d, 0x69, 0x2d, 0x34, 0x6b, 0x2d, 0x36,
+ 0x30, 0x30, 0x0a, 0x20, 0x00, 0x00, 0x00, 0xfe,
+ 0x00, 0x73, 0x65, 0x72, 0x69, 0x6f, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0x87,
+
+ 0x02, 0x03, 0x1f, 0xf1, 0x43, 0x10, 0x5f, 0x01,
+ 0x23, 0x09, 0x07, 0x07, 0x83, 0x01, 0x00, 0x00,
+ 0x6b, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c,
+ 0x21, 0x00, 0x20, 0x01, 0xe2, 0x00, 0xca, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6,
+};
+
+static const u8 hdmi_edid_4k_600[256] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x20, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+ 0x0f, 0x50, 0x54, 0x20, 0x00, 0x00, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x08, 0xe8,
+ 0x00, 0x30, 0xf2, 0x70, 0x5a, 0x80, 0xb0, 0x58,
+ 0x8a, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
+ 0x87, 0x3c, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x68,
+ 0x64, 0x6d, 0x69, 0x2d, 0x34, 0x6b, 0x2d, 0x36,
+ 0x30, 0x30, 0x0a, 0x20, 0x00, 0x00, 0x00, 0xfe,
+ 0x00, 0x73, 0x65, 0x72, 0x69, 0x6f, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0x4c,
+
+ 0x02, 0x03, 0x28, 0xf1, 0x44, 0x61, 0x5f, 0x10,
+ 0x01, 0x23, 0x09, 0x07, 0x07, 0x83, 0x01, 0x00,
+ 0x00, 0x6b, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x00,
+ 0x3c, 0x21, 0x00, 0x20, 0x01, 0x67, 0xd8, 0x5d,
+ 0xc4, 0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82,
+};
+
+static int extron_send_byte(struct serio *serio, char byte)
+{
+ int err, i;
+
+ for (i = 0; i < 100; i++) {
+ err = serio_write(serio, byte);
+ if (!err)
+ break;
+ usleep_range(80, 120);
+ }
+ if (err)
+ dev_warn(&serio->dev, "unable to write byte after 100 attempts\n");
+ return err ? -EIO : 0;
+}
+
+static int extron_send_len(struct serio *serio, const char *command,
+ const unsigned char *bin, unsigned int len)
+{
+ int err = 0;
+
+ for (; !err && *command; command++)
+ err = extron_send_byte(serio, *command);
+ if (!err)
+ err = extron_send_byte(serio, '\r');
+ if (bin)
+ for (; !err && len; len--)
+ err = extron_send_byte(serio, *bin++);
+ return err;
+}
+
+static int extron_send_and_wait_len(struct extron *extron, struct extron_port *port,
+ const char *cmd, const unsigned char *bin,
+ unsigned int len, const char *response)
+{
+ int timeout = EXTRON_TIMEOUT_SECS * HZ;
+ int err;
+
+ if (debug) {
+ if (response)
+ dev_info(extron->dev, "transmit %s (response: %s)\n",
+ cmd, response);
+ else
+ dev_info(extron->dev, "transmit %s\n", cmd);
+ }
+
+ mutex_lock(&extron->serio_lock);
+ if (port) {
+ init_completion(&port->cmd_done);
+ port->cmd_error = 0;
+ port->response = response;
+ } else {
+ init_completion(&extron->cmd_done);
+ extron->cmd_error = 0;
+ extron->response = response;
+ }
+ err = extron_send_len(extron->serio, cmd, bin, len);
+
+ if (!err && response &&
+ !wait_for_completion_timeout(port ? &port->cmd_done : &extron->cmd_done, timeout)) {
+ dev_info(extron->dev, "transmit %s failed with %s (expected: %s)\n",
+ cmd, extron->reply, response);
+ err = -ETIMEDOUT;
+ }
+
+ if (!err && response && (port ? port->cmd_error : extron->cmd_error)) {
+ dev_info(extron->dev, "transmit %s failed with E%02u (expected: %s)\n",
+ cmd, port ? port->cmd_error : extron->cmd_error, response);
+ if (port)
+ port->cmd_error = 0;
+ else
+ extron->cmd_error = 0;
+ err = -EPROTO;
+ }
+ if (port)
+ port->response = NULL;
+ else
+ extron->response = NULL;
+ mutex_unlock(&extron->serio_lock);
+ return err;
+}
+
+static int extron_send_and_wait(struct extron *extron, struct extron_port *port,
+ const char *cmd, const char *response)
+{
+ return extron_send_and_wait_len(extron, port, cmd, NULL, 0, response);
+}
+
+static void extron_parse_edid(struct extron_port *port)
+{
+ const u8 *edid = port->edid;
+ unsigned int i, end;
+ u8 d;
+
+ port->has_4kp30 = false;
+ port->has_4kp60 = false;
+ port->has_qy = false;
+ port->has_qs = false;
+ /* Store Established Timings 1 and 2 */
+ port->est_i = edid[0x23];
+ port->est_ii = edid[0x24];
+
+ // Check DTDs in base block
+ for (i = 0; i < 4; i++) {
+ const u8 *dtd = edid + 0x36 + i * 18;
+ unsigned int w, h;
+ unsigned int mhz;
+ u64 pclk;
+
+ if (!dtd[0] && !dtd[1])
+ continue;
+ w = dtd[2] + ((dtd[4] & 0xf0) << 4);
+ h = dtd[5] + ((dtd[7] & 0xf0) << 4);
+ if (w != 3840 || h != 2160)
+ continue;
+
+ w += dtd[3] + ((dtd[4] & 0x0f) << 8);
+ h += dtd[6] + ((dtd[7] & 0x0f) << 8);
+ pclk = dtd[0] + (dtd[1] << 8);
+ pclk *= 100000;
+ mhz = div_u64(pclk, w * h);
+ if (mhz >= 297)
+ port->has_4kp30 = true;
+ if (mhz >= 594)
+ port->has_4kp60 = true;
+ }
+
+ if (port->edid_blocks == 1)
+ return;
+
+ edid += 128;
+
+ /* Return if not a CTA-861 extension block */
+ if (edid[0] != 0x02 || edid[1] != 0x03)
+ return;
+
+ /* search Video Data Block (tag 2) */
+ d = edid[2] & 0x7f;
+ /* Check if there are Data Blocks */
+ if (d <= 4)
+ return;
+
+ i = 4;
+ end = d;
+
+ do {
+ u8 tag = edid[i] >> 5;
+ u8 len = edid[i] & 0x1f;
+
+ /* Avoid buffer overrun in case the EDID is malformed */
+ if (i + len + 1 > 0x7f)
+ return;
+
+ switch (tag) {
+ case 2: /* Video Data Block */
+ /* Search for VIC 97 */
+ if (memchr(edid + i + 1, 97, len))
+ port->has_4kp60 = true;
+ /* Search for VIC 95 */
+ if (memchr(edid + i + 1, 95, len))
+ port->has_4kp30 = true;
+ break;
+
+ case 7: /* Use Extended Tag */
+ switch (edid[i + 1]) {
+ case 0: /* Video Capability Data Block */
+ if (edid[i + 2] & 0x80)
+ port->has_qy = true;
+ if (edid[i + 2] & 0x40)
+ port->has_qs = true;
+ break;
+ }
+ break;
+ }
+ i += len + 1;
+ } while (i < end);
+}
+
+static int get_edid_tag_location(const u8 *edid, unsigned int size,
+ u8 want_tag, u8 ext_tag)
+{
+ unsigned int offset = 128;
+ int i, end;
+ u8 d;
+
+ edid += offset;
+
+ /* Return if not a CTA-861 extension block */
+ if (size < 256 || edid[0] != 0x02 || edid[1] != 0x03)
+ return -1;
+
+ /* search tag */
+ d = edid[0x02] & 0x7f;
+ if (d <= 4)
+ return -1;
+
+ i = 0x04;
+ end = 0x00 + d;
+
+ do {
+ unsigned char tag = edid[i] >> 5;
+ unsigned char len = edid[i] & 0x1f;
+
+ if (tag != want_tag || i + len > end) {
+ i += len + 1;
+ continue;
+ }
+
+ if (tag < 7 || (len >= 1 && edid[i + 1] == ext_tag))
+ return offset + i;
+ i += len + 1;
+ } while (i < end);
+ return -1;
+}
+
+static void extron_edid_crc(u8 *edid)
+{
+ u8 sum = 0;
+ int offset;
+
+ /* Update CRC */
+ for (offset = 0; offset < 127; offset++)
+ sum += edid[offset];
+ edid[127] = 256 - sum;
+}
+
+/*
+ * Fill in EDID string. As per VESA EDID-1.3, strings are at most 13 chars
+ * long. If shorter then add a 0x0a character after the string and pad the
+ * remainder with spaces.
+ */
+static void extron_set_edid_string(u8 *start, const char *s)
+{
+ const unsigned int max_len = 13;
+ int len = strlen(s);
+
+ memset(start, ' ', max_len);
+ if (len > max_len)
+ len = max_len;
+ memcpy(start, s, len);
+ if (len < max_len)
+ start[len] = 0x0a;
+}
+
+static void extron_update_edid(struct extron_port *port, unsigned int blocks)
+{
+ int offset;
+ u8 c1, c2;
+
+ c1 = ((manufacturer_name[0] - '@') << 2) |
+ (((manufacturer_name[1] - '@') >> 3) & 0x03);
+ c2 = (((manufacturer_name[1] - '@') & 0x07) << 5) |
+ ((manufacturer_name[2] - '@') & 0x1f);
+
+ port->edid_tmp[8] = c1;
+ port->edid_tmp[9] = c2;
+
+ /* Set Established Timings, but always enable VGA */
+ port->edid_tmp[0x23] = port->est_i | 0x20;
+ port->edid_tmp[0x24] = port->est_ii;
+
+ /* Set the Monitor Name to the unit name */
+ extron_set_edid_string(port->edid_tmp + 0x5f, port->extron->unit_name);
+ /* Set the ASCII String to the CEC adapter name */
+ extron_set_edid_string(port->edid_tmp + 0x71, port->adap->name);
+
+ extron_edid_crc(port->edid_tmp);
+
+ /* Find Video Capability Data Block */
+ offset = get_edid_tag_location(port->edid_tmp, blocks * 128, 7, 0);
+ if (offset > 0) {
+ port->edid_tmp[offset + 2] &= ~0xc0;
+ if (port->has_qy)
+ port->edid_tmp[offset + 2] |= 0x80;
+ if (port->has_qs)
+ port->edid_tmp[offset + 2] |= 0x40;
+ }
+
+ extron_edid_crc(port->edid_tmp + 128);
+}
+
+static int extron_write_edid(struct extron_port *port,
+ const u8 *edid, unsigned int blocks)
+{
+ struct extron *extron = port->extron;
+ u16 phys_addr = CEC_PHYS_ADDR_INVALID;
+ int ret;
+
+ if (cec_get_edid_spa_location(edid, blocks * 128))
+ phys_addr = 0;
+
+ if (mutex_lock_interruptible(&extron->edid_lock))
+ return -EINTR;
+
+ memcpy(port->edid_tmp, edid, blocks * 128);
+
+ if (manufacturer_name[0])
+ extron_update_edid(port, blocks);
+
+ ret = extron_send_and_wait_len(port->extron, port, "W+UF256,in.bin",
+ port->edid_tmp, sizeof(port->edid_tmp),
+ "Upl");
+ if (ret)
+ goto unlock;
+ ret = extron_send_and_wait(port->extron, port, "WI1,in.binEDID",
+ "EdidI01");
+ if (ret)
+ goto unlock;
+
+ port->edid_blocks = blocks;
+ memcpy(port->edid, port->edid_tmp, blocks * 128);
+ port->read_edid = true;
+ mutex_unlock(&extron->edid_lock);
+
+ cec_s_phys_addr(port->adap, phys_addr, false);
+ return 0;
+
+unlock:
+ mutex_unlock(&extron->edid_lock);
+ return ret;
+}
+
+static void update_edid_work(struct work_struct *w)
+{
+ struct extron *extron = container_of(w, struct extron,
+ work_update_edid.work);
+ struct extron_port *in = extron->ports[extron->num_out_ports];
+ struct extron_port *p;
+ bool has_edid = false;
+ bool has_4kp30 = true;
+ bool has_4kp60 = true;
+ bool has_qy = true;
+ bool has_qs = true;
+ u8 est_i = 0xff;
+ u8 est_ii = 0xff;
+ unsigned int out;
+
+ for (out = 0; has_4kp60 && out < extron->num_out_ports; out++) {
+ p = extron->ports[out];
+ if (p->read_edid) {
+ has_4kp60 = p->has_4kp60;
+ est_i &= p->est_i;
+ est_ii &= p->est_ii;
+ has_edid = true;
+ }
+ }
+ for (out = 0; has_4kp30 && out < extron->num_out_ports; out++)
+ if (extron->ports[out]->read_edid)
+ has_4kp30 = extron->ports[out]->has_4kp30;
+
+ for (out = 0; has_qy && out < extron->num_out_ports; out++)
+ if (extron->ports[out]->read_edid)
+ has_qy = extron->ports[out]->has_qy;
+
+ for (out = 0; has_qs && out < extron->num_out_ports; out++)
+ if (extron->ports[out]->read_edid)
+ has_qs = extron->ports[out]->has_qs;
+
+ /* exit if no output port had an EDID */
+ if (!has_edid)
+ return;
+
+ /* exit if the input EDID properties remained unchanged */
+ if (has_4kp60 == in->has_4kp60 && has_4kp30 == in->has_4kp30 &&
+ has_qy == in->has_qy && has_qs == in->has_qs &&
+ est_i == in->est_i && est_ii == in->est_ii)
+ return;
+
+ in->has_4kp60 = has_4kp60;
+ in->has_4kp30 = has_4kp30;
+ in->has_qy = has_qy;
+ in->has_qs = has_qs;
+ in->est_i = est_i;
+ in->est_ii = est_ii;
+ extron_write_edid(extron->ports[extron->num_out_ports],
+ has_4kp60 ? hdmi_edid_4k_600 :
+ (has_4kp30 ? hdmi_edid_4k_300 : hdmi_edid), 2);
+}
+
+static void extron_read_edid(struct extron_port *port)
+{
+ struct extron *extron = port->extron;
+ char cmd[10], reply[10];
+ unsigned int idx;
+
+ idx = port->port.port + (port->is_input ? 0 : extron->num_in_ports);
+ snprintf(cmd, sizeof(cmd), "WR%uEDID", idx);
+ snprintf(reply, sizeof(reply), "EdidR%u", idx);
+ if (mutex_lock_interruptible(&extron->edid_lock))
+ return;
+ if (port->read_edid)
+ goto unlock;
+ extron->edid_bytes_read = 0;
+ extron->edid_port = port;
+ port->edid_blocks = 0;
+ if (!port->has_edid)
+ goto no_edid;
+
+ extron->edid_reading = true;
+
+ if (!extron_send_and_wait(extron, port, cmd, reply))
+ wait_for_completion_killable_timeout(&extron->edid_completion,
+ msecs_to_jiffies(1000));
+ if (port->edid_blocks) {
+ extron_parse_edid(port);
+ port->read_edid = true;
+ if (!port->is_input)
+ v4l2_ctrl_s_ctrl(port->ctrl_tx_edid_present, 1);
+ }
+no_edid:
+ extron->edid_reading = false;
+unlock:
+ mutex_unlock(&extron->edid_lock);
+ cancel_delayed_work_sync(&extron->work_update_edid);
+ if (manufacturer_name[0])
+ schedule_delayed_work(&extron->work_update_edid,
+ msecs_to_jiffies(1000));
+}
+
+static void extron_irq_work_handler(struct work_struct *work)
+{
+ struct extron_port *port =
+ container_of(work, struct extron_port, irq_work);
+ struct extron *extron = port->extron;
+ unsigned long flags;
+ bool update_pa;
+ u16 pa;
+ bool update_has_signal;
+ bool has_signal;
+ bool update_has_edid;
+ bool has_edid;
+ u32 status;
+
+ spin_lock_irqsave(&port->msg_lock, flags);
+ while (port->rx_msg_num) {
+ spin_unlock_irqrestore(&port->msg_lock, flags);
+ cec_received_msg(port->adap,
+ &port->rx_msg[port->rx_msg_cur_idx]);
+ spin_lock_irqsave(&port->msg_lock, flags);
+ if (port->rx_msg_num)
+ port->rx_msg_num--;
+ port->rx_msg_cur_idx =
+ (port->rx_msg_cur_idx + 1) % NUM_MSGS;
+ }
+ update_pa = port->update_phys_addr;
+ pa = port->phys_addr;
+ port->update_phys_addr = false;
+ update_has_signal = port->update_has_signal;
+ has_signal = port->has_signal;
+ port->update_has_signal = false;
+ update_has_edid = port->update_has_edid;
+ has_edid = port->has_edid;
+ port->update_has_edid = false;
+ status = port->tx_done_status;
+ port->tx_done_status = 0;
+ spin_unlock_irqrestore(&port->msg_lock, flags);
+
+ if (status)
+ cec_transmit_done(port->adap, status, 0, 0, 0, 0);
+
+ if (update_has_signal && port->is_input)
+ v4l2_ctrl_s_ctrl(port->ctrl_rx_power_present, has_signal);
+
+ if (update_has_edid && !port->is_input) {
+ v4l2_ctrl_s_ctrl(port->ctrl_tx_hotplug,
+ port->has_edid);
+ if (port->has_edid) {
+ port->port.found_sink = true;
+ port->port.lost_sink_ts = ktime_set(0, 0);
+ } else {
+ port->port.lost_sink_ts = ktime_get();
+ }
+ if (!has_edid) {
+ port->edid_blocks = 0;
+ port->read_edid = false;
+ if (extron->edid_reading && !has_edid &&
+ extron->edid_port == port)
+ extron->edid_reading = false;
+ v4l2_ctrl_s_ctrl(port->ctrl_tx_edid_present, 0);
+ } else if (!extron->edid_reading || extron->edid_port != port) {
+ extron_read_edid(port);
+ }
+ }
+ if (update_pa)
+ cec_s_phys_addr(port->adap, pa, false);
+}
+
+static void extron_process_received(struct extron_port *port, const char *data)
+{
+ struct cec_msg msg = {};
+ unsigned int len = strlen(data);
+ unsigned long irq_flags;
+ unsigned int idx;
+
+ if (!port || port->disconnected)
+ return;
+
+ if (len < 5 || (len - 2) % 3 || data[len - 2] != '*')
+ goto malformed;
+
+ while (*data != '*') {
+ int v = hex2bin(&msg.msg[msg.len], data + 1, 1);
+
+ if (*data != '%' || v)
+ goto malformed;
+ msg.len++;
+ data += 3;
+ }
+
+ spin_lock_irqsave(&port->msg_lock, irq_flags);
+ idx = (port->rx_msg_cur_idx + port->rx_msg_num) %
+ NUM_MSGS;
+ if (port->rx_msg_num == NUM_MSGS) {
+ dev_warn(port->dev,
+ "message queue is full, dropping %*ph\n",
+ msg.len, msg.msg);
+ spin_unlock_irqrestore(&port->msg_lock,
+ irq_flags);
+ return;
+ }
+ port->rx_msg_num++;
+ port->rx_msg[idx] = msg;
+ spin_unlock_irqrestore(&port->msg_lock, irq_flags);
+ if (!port->disconnected)
+ schedule_work(&port->irq_work);
+ return;
+
+malformed:
+ dev_info(port->extron->dev, "malformed msg received: '%s'\n", data);
+}
+
+static void extron_port_signal_change(struct extron_port *port, bool has_sig)
+{
+ unsigned long irq_flags;
+ bool update = false;
+
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&port->msg_lock, irq_flags);
+ if (!port->update_has_signal && port->has_signal != has_sig) {
+ port->update_has_signal = true;
+ update = true;
+ }
+ port->has_signal = has_sig;
+ spin_unlock_irqrestore(&port->msg_lock, irq_flags);
+ if (update && !port->disconnected)
+ schedule_work(&port->irq_work);
+}
+
+static void extron_process_signal_change(struct extron *extron, const char *data)
+{
+ unsigned int i;
+
+ extron_port_signal_change(extron->ports[extron->num_out_ports],
+ data[0] == '1');
+ for (i = 0; i < extron->num_out_ports; i++)
+ extron_port_signal_change(extron->ports[i],
+ data[2 + 2 * i] != '0');
+}
+
+static void extron_port_edid_change(struct extron_port *port, bool has_edid)
+{
+ unsigned long irq_flags;
+ bool update = false;
+
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&port->msg_lock, irq_flags);
+ if (!port->update_has_edid && port->has_edid != has_edid) {
+ port->update_has_edid = true;
+ update = true;
+ }
+ port->has_edid = has_edid;
+ spin_unlock_irqrestore(&port->msg_lock, irq_flags);
+ if (update && !port->disconnected)
+ schedule_work(&port->irq_work);
+}
+
+static void extron_process_edid_change(struct extron *extron, const char *data)
+{
+ unsigned int i;
+
+ /*
+ * Do nothing if the Extron isn't ready yet. Trying to do this
+ * while the Extron firmware is still settling will fail.
+ */
+ if (!extron->is_ready)
+ return;
+
+ for (i = 0; i < extron->num_out_ports; i++)
+ extron_port_edid_change(extron->ports[i],
+ data[2 + 2 * i] != '0');
+}
+
+static void extron_phys_addr_change(struct extron_port *port, u16 pa)
+{
+ unsigned long irq_flags;
+ bool update = false;
+
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&port->msg_lock, irq_flags);
+ if (!port->update_phys_addr && port->phys_addr != pa) {
+ update = true;
+ port->update_phys_addr = true;
+ }
+ port->phys_addr = pa;
+ spin_unlock_irqrestore(&port->msg_lock, irq_flags);
+ if (update && !port->disconnected)
+ schedule_work(&port->irq_work);
+}
+
+static void extron_process_tx_done(struct extron_port *port, char status)
+{
+ unsigned long irq_flags;
+ unsigned int tx_status;
+
+ if (!port)
+ return;
+
+ switch (status) {
+ case '0':
+ tx_status = CEC_TX_STATUS_NACK | CEC_TX_STATUS_MAX_RETRIES;
+ break;
+ case '1':
+ tx_status = CEC_TX_STATUS_OK;
+ break;
+ default:
+ tx_status = CEC_TX_STATUS_ERROR;
+ break;
+ }
+ spin_lock_irqsave(&port->msg_lock, irq_flags);
+ port->tx_done_status = tx_status;
+ spin_unlock_irqrestore(&port->msg_lock, irq_flags);
+ if (!port->disconnected)
+ schedule_work(&port->irq_work);
+}
+
+static void extron_add_edid(struct extron_port *port, const char *hex)
+{
+ struct extron *extron = port ? port->extron : NULL;
+
+ if (!port || port != extron->edid_port)
+ return;
+ while (extron->edid_bytes_read < sizeof(port->edid) && *hex) {
+ int err = hex2bin(&port->edid[extron->edid_bytes_read], hex, 1);
+
+ if (err) {
+ extron->edid_reading = false;
+ complete(&extron->edid_completion);
+ break;
+ }
+ extron->edid_bytes_read++;
+ hex += 2;
+ }
+ if (extron->edid_bytes_read == 128 &&
+ port->edid[126] == 0) {
+ /* There are no extension blocks, we're done */
+ port->edid_blocks = 1;
+ extron->edid_reading = false;
+ complete(&extron->edid_completion);
+ }
+ if (extron->edid_bytes_read < sizeof(port->edid))
+ return;
+ if (!*hex)
+ port->edid_blocks = 2;
+ extron->edid_reading = false;
+ complete(&extron->edid_completion);
+}
+
+static irqreturn_t extron_interrupt(struct serio *serio, unsigned char data,
+ unsigned int flags)
+{
+ struct extron *extron = serio_get_drvdata(serio);
+ struct extron_port *port = NULL;
+ bool found_response;
+ unsigned int p;
+
+ if (data == '\r' || data == '\n') {
+ if (extron->idx == 0)
+ return IRQ_HANDLED;
+ memcpy(extron->data, extron->buf, extron->idx);
+ extron->len = extron->idx;
+ extron->data[extron->len] = 0;
+ if (debug)
+ dev_info(extron->dev, "received %s\n", extron->data);
+ extron->idx = 0;
+ if (!memcmp(extron->data, "Sig", 3) &&
+ extron->data[4] == '*') {
+ extron_process_signal_change(extron, extron->data + 3);
+ } else if (!memcmp(extron->data, "Hdcp", 4) &&
+ extron->data[5] == '*') {
+ extron_process_edid_change(extron, extron->data + 4);
+ } else if (!memcmp(extron->data, "DcecI", 5) &&
+ extron->data[5] >= '1' &&
+ extron->data[5] < '1' + extron->num_in_ports) {
+ unsigned int p = extron->data[5] - '1';
+
+ p += extron->num_out_ports;
+ extron_process_tx_done(extron->ports[p],
+ extron->data[extron->len - 1]);
+ } else if (!memcmp(extron->data, "Ceci", 4) &&
+ extron->data[4] >= '1' &&
+ extron->data[4] < '1' + extron->num_in_ports &&
+ extron->data[5] == '*') {
+ unsigned int p = extron->data[4] - '1';
+
+ p += extron->num_out_ports;
+ extron_process_received(extron->ports[p],
+ extron->data + 6);
+ } else if (!memcmp(extron->data, "DcecO", 5) &&
+ extron->data[5] >= '1' &&
+ extron->data[5] < '1' + extron->num_out_ports) {
+ unsigned int p = extron->data[5] - '1';
+
+ extron_process_tx_done(extron->ports[p],
+ extron->data[extron->len - 1]);
+ } else if (!memcmp(extron->data, "Ceco", 4) &&
+ extron->data[4] >= '1' &&
+ extron->data[4] < '1' + extron->num_out_ports &&
+ extron->data[5] == '*') {
+ unsigned int p = extron->data[4] - '1';
+
+ extron_process_received(extron->ports[p],
+ extron->data + 6);
+ } else if (!memcmp(extron->data, "Pceco", 5) &&
+ extron->data[5] >= '1' &&
+ extron->data[5] < '1' + extron->num_out_ports) {
+ unsigned int p = extron->data[5] - '1';
+ unsigned int tmp_pa[2] = { 0xff, 0xff };
+
+ if (sscanf(extron->data + 7, "%%%02x%%%02x",
+ &tmp_pa[0], &tmp_pa[1]) == 2)
+ extron_phys_addr_change(extron->ports[p],
+ tmp_pa[0] << 8 | tmp_pa[1]);
+ } else if (!memcmp(extron->data, "Pceci", 5) &&
+ extron->data[5] >= '1' &&
+ extron->data[5] < '1' + extron->num_in_ports) {
+ unsigned int p = extron->data[5] - '1';
+ unsigned int tmp_pa[2] = { 0xff, 0xff };
+
+ p += extron->num_out_ports;
+ if (sscanf(extron->data + 7, "%%%02x%%%02x",
+ &tmp_pa[0], &tmp_pa[1]) == 2)
+ extron_phys_addr_change(extron->ports[p],
+ tmp_pa[0] << 8 | tmp_pa[1]);
+ } else if (!memcmp(extron->data, "EdidR", 5) &&
+ extron->data[5] >= '1' &&
+ extron->data[5] < '1' + extron->num_ports &&
+ extron->data[6] == '*') {
+ unsigned int p = extron->data[5] - '1';
+
+ if (p)
+ p--;
+ else
+ p = extron->num_out_ports;
+ extron_add_edid(extron->ports[p], extron->data + 7);
+ } else if (extron->edid_reading && extron->len == 32 &&
+ extron->edid_port) {
+ extron_add_edid(extron->edid_port, extron->data);
+ }
+
+ found_response = false;
+ if (extron->response &&
+ !strncmp(extron->response, extron->data,
+ strlen(extron->response)))
+ found_response = true;
+
+ for (p = 0; !found_response && p < extron->num_ports; p++) {
+ port = extron->ports[p];
+ if (port && port->response &&
+ !strncmp(port->response, extron->data,
+ strlen(port->response)))
+ found_response = true;
+ }
+
+ if (!found_response && extron->response &&
+ extron->data[0] == 'E' &&
+ isdigit(extron->data[1]) &&
+ isdigit(extron->data[2]) &&
+ !extron->data[3]) {
+ extron->cmd_error = (extron->data[1] - '0') * 10 +
+ extron->data[2] - '0';
+ extron->response = NULL;
+ complete(&extron->cmd_done);
+ }
+
+ if (!found_response)
+ return IRQ_HANDLED;
+
+ memcpy(extron->reply, extron->data, extron->len);
+ extron->reply[extron->len] = 0;
+ if (!port) {
+ extron->response = NULL;
+ complete(&extron->cmd_done);
+ } else {
+ port->response = NULL;
+ complete(&port->cmd_done);
+ }
+ return IRQ_HANDLED;
+ }
+
+ if (extron->idx >= DATA_SIZE - 1) {
+ dev_info(extron->dev,
+ "throwing away %d bytes of garbage\n", extron->idx);
+ extron->idx = 0;
+ }
+ extron->buf[extron->idx++] = (char)data;
+ return IRQ_HANDLED;
+}
+
+static int extron_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+
+ return (port->disconnected && enable) ? -ENODEV : 0;
+}
+
+static int extron_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+ char cmd[26];
+ char resp[25];
+ u8 la = log_addr == CEC_LOG_ADDR_INVALID ? 15 : log_addr;
+ int err;
+
+ if (port->disconnected)
+ return -ENODEV;
+ snprintf(cmd, sizeof(cmd), "W%c%u*%uLCEC",
+ port->direction, port->port.port, la);
+ snprintf(resp, sizeof(resp), "Lcec%c%u*%u",
+ port->direction, port->port.port, la);
+ err = extron_send_and_wait(port->extron, port, cmd, resp);
+ return log_addr != CEC_LOG_ADDR_INVALID && err ? err : 0;
+}
+
+static int extron_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+ char buf[CEC_MAX_MSG_SIZE * 3 + 1];
+ char cmd[CEC_MAX_MSG_SIZE * 3 + 13];
+ unsigned int i;
+
+ if (port->disconnected)
+ return -ENODEV;
+ buf[0] = 0;
+ for (i = 0; i < msg->len - 1; i++)
+ sprintf(buf + i * 3, "%%%02X", msg->msg[i + 1]);
+ snprintf(cmd, sizeof(cmd), "W%c%u*%u*%u*%sDCEC",
+ port->direction, port->port.port,
+ cec_msg_initiator(msg), cec_msg_destination(msg), buf);
+ return extron_send_and_wait(port->extron, port, cmd, NULL);
+}
+
+static void extron_cec_adap_unconfigured(struct cec_adapter *adap)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+
+ if (port->disconnected)
+ return;
+ if (debug)
+ dev_info(port->extron->dev, "unconfigured port %d (%s)\n",
+ port->port.port,
+ port->extron->splitter.is_standby ? "Off" : "On");
+ if (!port->is_input)
+ cec_splitter_unconfigured_output(&port->port);
+}
+
+static void extron_cec_configured(struct cec_adapter *adap)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+
+ if (port->disconnected)
+ return;
+ if (debug)
+ dev_info(port->extron->dev, "configured port %d (%s)\n",
+ port->port.port,
+ port->extron->splitter.is_standby ? "Off" : "On");
+ if (!port->is_input)
+ cec_splitter_configured_output(&port->port);
+}
+
+static void extron_cec_adap_nb_transmit_canceled(struct cec_adapter *adap,
+ const struct cec_msg *msg)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+ struct cec_adapter *input_adap;
+
+ if (!vendor_id)
+ return;
+ if (port->disconnected || port->is_input)
+ return;
+ input_adap = port->extron->ports[port->extron->num_out_ports]->adap;
+ cec_splitter_nb_transmit_canceled_output(&port->port, msg, input_adap);
+}
+
+static int extron_received(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+
+ if (!vendor_id)
+ return -ENOMSG;
+ if (port->disconnected)
+ return -ENOMSG;
+ if (port->is_input)
+ return cec_splitter_received_input(&port->port, msg);
+ return cec_splitter_received_output(&port->port, msg,
+ port->extron->ports[port->extron->num_out_ports]->adap);
+}
+
+#define log_printf(adap, file, fmt, arg...) \
+ do { \
+ if (file) \
+ seq_printf((file), fmt, ## arg); \
+ else \
+ pr_info("cec-%s: " fmt, (adap)->name, ## arg); \
+ } while (0)
+
+static const char * const pwr_state[] = {
+ "on",
+ "standby",
+ "to on",
+ "to standby",
+};
+
+static void extron_adap_status_port(struct extron_port *port, struct seq_file *file)
+{
+ struct cec_adapter *adap = port->adap;
+
+ if (port->disconnected) {
+ log_printf(adap, file,
+ "\tport %u: disconnected\n", port->port.port);
+ return;
+ }
+ if (port->is_input)
+ log_printf(adap, file,
+ "\tport %u: %s signal, %s edid, %s 4kp30, %s 4kp60, %sQS/%sQY, is %s\n",
+ port->port.port,
+ port->has_signal ? "has" : "no",
+ port->has_edid ? "has" : "no",
+ port->has_4kp30 ? "has" : "no",
+ port->has_4kp60 ? "has" : "no",
+ port->has_qs ? "" : "no ",
+ port->has_qy ? "" : "no ",
+ !port->port.adap->is_configured ? "not configured" :
+ pwr_state[port->extron->splitter.is_standby]);
+ else
+ log_printf(adap, file,
+ "\tport %u: %s sink, %s signal, %s edid, %s 4kp30, %s 4kp60, %sQS/%sQY, is %sactive source, is %s\n",
+ port->port.port,
+ port->port.found_sink ? "found" : "no",
+ port->has_signal ? "has" : "no",
+ port->has_edid ? "has" : "no",
+ port->has_4kp30 ? "has" : "no",
+ port->has_4kp60 ? "has" : "no",
+ port->has_qs ? "" : "no ",
+ port->has_qy ? "" : "no ",
+ port->port.is_active_source ? "" : "not ",
+ !port->port.adap->is_configured ? "not configured" :
+ pwr_state[port->port.power_status & 3]);
+ if (port->port.out_give_device_power_status_seq)
+ log_printf(adap, file,
+ "\tport %u: querying power status (%u, %lldms)\n",
+ port->port.port,
+ port->port.out_give_device_power_status_seq & ~(1 << 31),
+ ktime_ms_delta(ktime_get(),
+ port->port.out_give_device_power_status_ts));
+ if (port->port.out_request_current_latency_seq)
+ log_printf(adap, file,
+ "\tport %u: querying latency (%u, %lldms)\n",
+ port->port.port,
+ port->port.out_request_current_latency_seq & ~(1 << 31),
+ ktime_ms_delta(ktime_get(),
+ port->port.out_request_current_latency_ts));
+}
+
+static void extron_adap_status(struct cec_adapter *adap, struct seq_file *file)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+ struct extron *extron = port->extron;
+ unsigned int i;
+
+ log_printf(adap, file, "name: %s type: %s\n",
+ extron->unit_name, extron->unit_type);
+ log_printf(adap, file, "model: 60-160%c-01 (1 input, %u outputs)\n",
+ '6' + extron->num_out_ports / 2, extron->num_out_ports);
+ log_printf(adap, file, "firmware version: %s CEC engine version: %s\n",
+ extron->unit_fw_version, extron->unit_cec_engine_version);
+ if (extron->hpd_never_low)
+ log_printf(adap, file, "always keep input HPD high\n");
+ else
+ log_printf(adap, file,
+ "pull input HPD low if all output HPDs are low\n");
+ if (vendor_id)
+ log_printf(adap, file,
+ "splitter vendor ID: 0x%06x\n", vendor_id);
+ if (manufacturer_name[0])
+ log_printf(adap, file, "splitter manufacturer name: %s\n",
+ manufacturer_name);
+ log_printf(adap, file, "splitter power status: %s\n",
+ pwr_state[extron->splitter.is_standby]);
+ log_printf(adap, file, "%s port: %d (%s)\n",
+ port->is_input ? "input" : "output",
+ port->port.port, port->name);
+ log_printf(adap, file, "splitter input port:\n");
+ extron_adap_status_port(extron->ports[extron->num_out_ports], file);
+
+ log_printf(adap, file, "splitter output ports:\n");
+ for (i = 0; i < extron->num_out_ports; i++)
+ extron_adap_status_port(extron->ports[i], file);
+
+ if (!port->has_edid || !port->read_edid)
+ return;
+
+ for (i = 0; i < port->edid_blocks * 128; i += 16) {
+ if (i % 128 == 0)
+ log_printf(adap, file, "\n");
+ log_printf(adap, file, "EDID: %*ph\n", 16, port->edid + i);
+ }
+}
+
+static const struct cec_adap_ops extron_cec_adap_ops = {
+ .adap_enable = extron_cec_adap_enable,
+ .adap_log_addr = extron_cec_adap_log_addr,
+ .adap_transmit = extron_cec_adap_transmit,
+ .adap_nb_transmit_canceled = extron_cec_adap_nb_transmit_canceled,
+ .adap_unconfigured = extron_cec_adap_unconfigured,
+ .adap_status = extron_adap_status,
+ .configured = extron_cec_configured,
+ .received = extron_received,
+};
+
+static int extron_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct extron_port *port = video_drvdata(file);
+
+ strscpy(cap->driver, "extron-da-hd-4k-plus-cec", sizeof(cap->driver));
+ strscpy(cap->card, cap->driver, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "serio:%s", port->name);
+ return 0;
+}
+
+static int extron_enum_input(struct file *file, void *priv, struct v4l2_input *inp)
+{
+ struct extron_port *port = video_drvdata(file);
+
+ if (inp->index)
+ return -EINVAL;
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ snprintf(inp->name, sizeof(inp->name), "HDMI IN %u", port->port.port);
+ inp->status = v4l2_ctrl_g_ctrl(port->ctrl_rx_power_present) ?
+ 0 : V4L2_IN_ST_NO_SIGNAL;
+ return 0;
+}
+
+static int extron_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int extron_s_input(struct file *file, void *priv, unsigned int i)
+{
+ return i ? -EINVAL : 0;
+}
+
+static int extron_enum_output(struct file *file, void *priv, struct v4l2_output *out)
+{
+ struct extron_port *port = video_drvdata(file);
+
+ if (out->index)
+ return -EINVAL;
+ out->type = V4L2_OUTPUT_TYPE_ANALOG;
+ snprintf(out->name, sizeof(out->name), "HDMI OUT %u", port->port.port);
+ return 0;
+}
+
+static int extron_g_output(struct file *file, void *priv, unsigned int *o)
+{
+ *o = 0;
+ return 0;
+}
+
+static int extron_s_output(struct file *file, void *priv, unsigned int o)
+{
+ return o ? -EINVAL : 0;
+}
+
+static int extron_g_edid(struct file *file, void *_fh,
+ struct v4l2_edid *edid)
+{
+ struct extron_port *port = video_drvdata(file);
+
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+ if (port->disconnected)
+ return -ENODEV;
+ if (edid->pad)
+ return -EINVAL;
+ if (!port->has_edid)
+ return -ENODATA;
+ if (!port->read_edid)
+ extron_read_edid(port);
+ if (!port->read_edid)
+ return -ENODATA;
+ if (edid->start_block == 0 && edid->blocks == 0) {
+ edid->blocks = port->edid_blocks;
+ return 0;
+ }
+ if (edid->start_block >= port->edid_blocks)
+ return -EINVAL;
+ if (edid->blocks > port->edid_blocks - edid->start_block)
+ edid->blocks = port->edid_blocks - edid->start_block;
+ memcpy(edid->edid, port->edid + edid->start_block * 128, edid->blocks * 128);
+ return 0;
+}
+
+static int extron_s_edid(struct file *file, void *_fh, struct v4l2_edid *edid)
+{
+ struct extron_port *port = video_drvdata(file);
+
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+ if (port->disconnected)
+ return -ENODEV;
+ if (edid->pad)
+ return -EINVAL;
+
+ /* Unfortunately it is not possible to clear the EDID */
+ if (edid->blocks == 0)
+ return -EINVAL;
+
+ if (edid->blocks > MAX_EDID_BLOCKS) {
+ edid->blocks = MAX_EDID_BLOCKS;
+ return -E2BIG;
+ }
+
+ if (cec_get_edid_spa_location(edid->edid, edid->blocks * 128))
+ v4l2_set_edid_phys_addr(edid->edid, edid->blocks * 128, 0);
+ extron_parse_edid(port);
+ return extron_write_edid(port, edid->edid, edid->blocks);
+}
+
+static int extron_log_status(struct file *file, void *priv)
+{
+ struct extron_port *port = video_drvdata(file);
+
+ extron_adap_status(port->adap, NULL);
+ return v4l2_ctrl_log_status(file, priv);
+}
+
+static const struct v4l2_ioctl_ops extron_ioctl_ops = {
+ .vidioc_querycap = extron_querycap,
+ .vidioc_enum_input = extron_enum_input,
+ .vidioc_g_input = extron_g_input,
+ .vidioc_s_input = extron_s_input,
+ .vidioc_enum_output = extron_enum_output,
+ .vidioc_g_output = extron_g_output,
+ .vidioc_s_output = extron_s_output,
+ .vidioc_g_edid = extron_g_edid,
+ .vidioc_s_edid = extron_s_edid,
+ .vidioc_log_status = extron_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations extron_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct video_device extron_videodev = {
+ .name = "extron-da-hd-4k-plus-cec",
+ .vfl_dir = VFL_DIR_RX,
+ .fops = &extron_fops,
+ .ioctl_ops = &extron_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+};
+
+static void extron_disconnect(struct serio *serio)
+{
+ struct extron *extron = serio_get_drvdata(serio);
+ unsigned int p;
+
+ kthread_stop(extron->kthread_setup);
+
+ for (p = 0; p < extron->num_ports; p++) {
+ struct extron_port *port = extron->ports[p];
+
+ if (!port)
+ continue;
+ port->disconnected = true;
+ cancel_work_sync(&port->irq_work);
+ }
+ cancel_delayed_work_sync(&extron->work_update_edid);
+ for (p = 0; p < extron->num_ports; p++) {
+ struct extron_port *port = extron->ports[p];
+
+ if (!port)
+ continue;
+
+ if (port->cec_was_registered) {
+ if (cec_is_registered(port->adap))
+ cec_unregister_adapter(port->adap);
+ /*
+ * After registering the adapter, the
+ * extron_setup_thread() function took an extra
+ * reference to the device. We call the corresponding
+ * put here.
+ */
+ cec_put_device(port->adap);
+ } else {
+ cec_delete_adapter(port->adap);
+ }
+ video_unregister_device(&port->vdev);
+ }
+
+ complete(&extron->edid_completion);
+
+ for (p = 0; p < extron->num_ports; p++) {
+ struct extron_port *port = extron->ports[p];
+
+ if (!port)
+ continue;
+ v4l2_ctrl_handler_free(&port->hdl);
+ mutex_destroy(&port->video_lock);
+ kfree(port);
+ }
+ mutex_destroy(&extron->edid_lock);
+ mutex_destroy(&extron->serio_lock);
+ extron->serio = NULL;
+ serio_set_drvdata(serio, NULL);
+ serio_close(serio);
+}
+
+static int extron_setup(struct extron *extron)
+{
+ struct serio *serio = extron->serio;
+ struct extron_port *port;
+ u8 *reply = extron->reply;
+ unsigned int p;
+ unsigned int major, minor;
+ int err;
+
+ /*
+ * Attempt to disable CEC: avoid received CEC messages
+ * from interfering with the other serial port traffic.
+ */
+ extron_send_and_wait(extron, NULL, "WI1*0CCEC", NULL);
+ extron_send_and_wait(extron, NULL, "WO0*CCEC", NULL);
+
+ /* Obtain unit part number */
+ err = extron_send_and_wait(extron, NULL, "N", "Pno");
+ if (err)
+ return err;
+ dev_info(extron->dev, "Unit part number: %s\n", reply + 3);
+ if (strcmp(reply + 3, "60-1607-01") &&
+ strcmp(reply + 3, "60-1608-01") &&
+ strcmp(reply + 3, "60-1609-01")) {
+ dev_err(extron->dev, "Unsupported model\n");
+ return -ENODEV;
+ }
+ /* Up to 6 output ports and one input port */
+ extron->num_out_ports = 2 * (reply[9] - '6');
+ extron->splitter.num_out_ports = extron->num_out_ports;
+ extron->splitter.ports = extron->splitter_ports;
+ extron->splitter.dev = extron->dev;
+ extron->num_in_ports = 1;
+ extron->num_ports = extron->num_out_ports + extron->num_in_ports;
+ dev_info(extron->dev, "Unit output ports: %d\n", extron->num_out_ports);
+ dev_info(extron->dev, "Unit input ports: %d\n", extron->num_in_ports);
+
+ err = extron_send_and_wait(extron, NULL, "W CN", "Ipn ");
+ if (err)
+ return err;
+ dev_info(extron->dev, "Unit name: %s\n", reply + 4);
+ strscpy(extron->unit_name, reply + 4, sizeof(extron->unit_name));
+
+ err = extron_send_and_wait(extron, NULL, "*Q", "Bld");
+ if (err)
+ return err;
+ dev_info(extron->dev, "Unit FW Version: %s\n", reply + 3);
+ strscpy(extron->unit_fw_version, reply + 3,
+ sizeof(extron->unit_fw_version));
+ if (sscanf(reply + 3, "%u.%u.", &major, &minor) < 2 ||
+ major < 1 || minor < 2) {
+ dev_err(extron->dev,
+ "Unsupported FW version (only 1.02 or up is supported)\n");
+ return -ENODEV;
+ }
+
+ err = extron_send_and_wait(extron, NULL, "2i", "Inf02*");
+ if (err)
+ return err;
+ dev_info(extron->dev, "Unit Type: %s\n", reply + 6);
+ strscpy(extron->unit_type, reply + 6, sizeof(extron->unit_type));
+
+ err = extron_send_and_wait(extron, NULL, "39Q", "Ver39*");
+ if (err)
+ return err;
+ dev_info(extron->dev, "CEC Engine Version: %s\n", reply + 6);
+ strscpy(extron->unit_cec_engine_version, reply + 6,
+ sizeof(extron->unit_cec_engine_version));
+
+ /* Disable CEC */
+ err = extron_send_and_wait(extron, NULL, "WI1*0CCEC", "CcecI1*");
+ if (err)
+ return err;
+ err = extron_send_and_wait(extron, NULL, "WO0*CCEC", "CcecO0");
+ if (err)
+ return err;
+
+ extron->hpd_never_low = hpd_never_low;
+
+ /* Pull input port HPD low if all output ports also have a low HPD */
+ if (hpd_never_low) {
+ dev_info(extron->dev, "Always keep input HPD high\n");
+ } else {
+ dev_info(extron->dev, "Pull input HPD low if all output HPDs are low\n");
+ extron_send_and_wait(extron, NULL, "W1ihpd", "Ihpd1");
+ }
+
+ for (p = 0; p < extron->num_ports; p++) {
+ u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL;
+
+ if (vendor_id)
+ caps &= ~CEC_CAP_LOG_ADDRS;
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ INIT_WORK(&port->irq_work, extron_irq_work_handler);
+ spin_lock_init(&port->msg_lock);
+ mutex_init(&port->video_lock);
+ port->extron = extron;
+ port->is_input = p >= extron->num_out_ports;
+ port->direction = port->is_input ? 'I' : 'O';
+ port->port.port = 1 + (port->is_input ? p - extron->num_out_ports : p);
+ port->port.splitter = &extron->splitter;
+ port->phys_addr = CEC_PHYS_ADDR_INVALID;
+ snprintf(port->name, sizeof(port->name), "%s-%s-%u",
+ dev_name(&serio->dev), port->is_input ? "in" : "out",
+ port->port.port);
+
+ port->dev = extron->dev;
+ port->adap = cec_allocate_adapter(&extron_cec_adap_ops, port,
+ port->name, caps, 1);
+ err = PTR_ERR_OR_ZERO(port->adap);
+ if (err < 0) {
+ kfree(port);
+ return err;
+ }
+
+ port->adap->xfer_timeout_ms = EXTRON_TIMEOUT_SECS * 1000;
+ port->port.adap = port->adap;
+ port->vdev = extron_videodev;
+ port->vdev.lock = &port->video_lock;
+ port->vdev.v4l2_dev = &extron->v4l2_dev;
+ port->vdev.ctrl_handler = &port->hdl;
+ port->vdev.device_caps = V4L2_CAP_EDID;
+ video_set_drvdata(&port->vdev, port);
+
+ v4l2_ctrl_handler_init(&port->hdl, 2);
+
+ if (port->is_input) {
+ port->vdev.vfl_dir = VFL_DIR_RX;
+ port->ctrl_rx_power_present =
+ v4l2_ctrl_new_std(&port->hdl, NULL,
+ V4L2_CID_DV_RX_POWER_PRESENT,
+ 0, 1, 0, 0);
+ port->has_edid = true;
+ } else {
+ port->vdev.vfl_dir = VFL_DIR_TX;
+ port->ctrl_tx_hotplug =
+ v4l2_ctrl_new_std(&port->hdl, NULL,
+ V4L2_CID_DV_TX_HOTPLUG,
+ 0, 1, 0, 0);
+ port->ctrl_tx_edid_present =
+ v4l2_ctrl_new_std(&port->hdl, NULL,
+ V4L2_CID_DV_TX_EDID_PRESENT,
+ 0, 1, 0, 0);
+ }
+
+ err = port->hdl.error;
+ if (err < 0) {
+ cec_delete_adapter(port->adap);
+ kfree(port);
+ return err;
+ }
+ extron->ports[p] = port;
+ extron->splitter_ports[p] = &port->port;
+ if (port->is_input && manufacturer_name[0])
+ extron_write_edid(port, hdmi_edid, 2);
+ }
+
+ /* Enable CEC (manual mode, i.e. controlled by the driver) */
+ err = extron_send_and_wait(extron, NULL, "WI1*20CCEC", "CcecI1*");
+ if (err)
+ return err;
+
+ err = extron_send_and_wait(extron, NULL, "WO20*CCEC", "CcecO20");
+ if (err)
+ return err;
+
+ /* Set logical addresses to 15 */
+ err = extron_send_and_wait(extron, NULL, "WI1*15LCEC", "LcecI1*15");
+ if (err)
+ return err;
+
+ for (p = 0; p < extron->num_out_ports; p++) {
+ char cmd[20];
+ char resp[20];
+
+ snprintf(cmd, sizeof(cmd), "WO%u*15LCEC", p + 1);
+ snprintf(resp, sizeof(resp), "LcecO%u*15", p + 1);
+ err = extron_send_and_wait(extron, extron->ports[p], cmd, resp);
+ if (err)
+ return err;
+ }
+
+ /*
+ * The Extron is now ready for operation. Specifically it is now
+ * possible to retrieve EDIDs.
+ */
+ extron->is_ready = true;
+
+ /* Query HDCP and Signal states, used to update the initial state */
+ err = extron_send_and_wait(extron, NULL, "WHDCP", "Hdcp");
+ if (err)
+ return err;
+
+ return extron_send_and_wait(extron, NULL, "WLS", "Sig");
+}
+
+static int extron_setup_thread(void *_extron)
+{
+ struct extron *extron = _extron;
+ struct extron_port *port;
+ unsigned int p;
+ bool poll_splitter = false;
+ bool was_connected = true;
+ int err;
+
+ while (1) {
+ if (kthread_should_stop())
+ return 0;
+ err = extron_send_and_wait(extron, NULL, "W3CV", "Vrb3");
+ // that should make it possible to detect a serio disconnect
+ // here by stopping the workqueue
+ if (err >= 0)
+ break;
+ was_connected = false;
+ ssleep(1);
+ }
+
+ /*
+ * If the Extron was not connected at probe() time, i.e. it just got
+ * powered up and while the serial port is working, the firmware is
+ * still booting up, then wait 10 seconds for the firmware to settle.
+ *
+ * Trying to continue too soon means that some commands will not
+ * work yet.
+ */
+ if (!was_connected)
+ ssleep(10);
+
+ err = extron_setup(extron);
+ if (err)
+ goto disable_ports;
+
+ for (p = 0; p < extron->num_ports; p++) {
+ struct cec_log_addrs log_addrs = {};
+
+ port = extron->ports[p];
+ if (port->is_input && manufacturer_name[0])
+ v4l2_disable_ioctl(&port->vdev, VIDIOC_S_EDID);
+ err = video_register_device(&port->vdev, VFL_TYPE_VIDEO, -1);
+ if (err) {
+ v4l2_err(&extron->v4l2_dev, "Failed to register video device\n");
+ goto disable_ports;
+ }
+
+ err = cec_register_adapter(port->adap, extron->dev);
+ if (err < 0)
+ goto disable_ports;
+ port->dev = &port->adap->devnode.dev;
+ port->cec_was_registered = true;
+ /*
+ * This driver is unusual in that the whole setup takes place
+ * in a thread since it can take such a long time before the
+ * Extron Splitter boots up, and you do not want to block the
+ * probe function on this driver. In addition, as soon as
+ * CEC adapters come online, they can be used, and you cannot
+ * just unregister them again if an error occurs, since that
+ * can delete the underlying CEC adapter, which might already
+ * be in use.
+ *
+ * So we take an additional reference to the adapter. This
+ * allows us to unregister the device node if needed, without
+ * deleting the actual adapter.
+ *
+ * In the disconnect function we will do the corresponding
+ * put call to ensure the adapter is deleted.
+ */
+ cec_get_device(port->adap);
+
+ /*
+ * If vendor_id wasn't set, then userspace configures the
+ * CEC devices. Otherwise the driver configures the CEC
+ * devices as TV (input) and Playback (outputs) devices
+ * and the driver processes all CEC messages.
+ */
+ if (!vendor_id)
+ continue;
+
+ log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
+ log_addrs.num_log_addrs = 1;
+ log_addrs.vendor_id = vendor_id;
+ if (port->is_input) {
+ strscpy(log_addrs.osd_name, "Splitter In",
+ sizeof(log_addrs.osd_name));
+ log_addrs.log_addr_type[0] = CEC_LOG_ADDR_TYPE_TV;
+ log_addrs.primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_TV;
+ log_addrs.all_device_types[0] = CEC_OP_ALL_DEVTYPE_TV;
+ } else {
+ snprintf(log_addrs.osd_name, sizeof(log_addrs.osd_name),
+ "Splitter Out%u", port->port.port);
+ log_addrs.log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
+ log_addrs.primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_PLAYBACK;
+ log_addrs.all_device_types[0] = CEC_OP_ALL_DEVTYPE_PLAYBACK;
+ }
+ err = cec_s_log_addrs(port->adap, &log_addrs, false);
+ if (err < 0)
+ goto disable_ports;
+ }
+ poll_splitter = true;
+
+ port = extron->ports[extron->num_out_ports];
+ while (!kthread_should_stop()) {
+ ssleep(1);
+ if (hpd_never_low != extron->hpd_never_low) {
+ /*
+ * Keep input port HPD high at all times, or pull it low
+ * if all output ports also have a low HPD
+ */
+ if (hpd_never_low) {
+ dev_info(extron->dev, "Always keep input HPD high\n");
+ extron_send_and_wait(extron, NULL, "W0ihpd", "Ihpd0");
+ } else {
+ dev_info(extron->dev, "Pull input HPD low if all output HPDs are low\n");
+ extron_send_and_wait(extron, NULL, "W1ihpd", "Ihpd1");
+ }
+ extron->hpd_never_low = hpd_never_low;
+ }
+ if (poll_splitter &&
+ cec_splitter_poll(&extron->splitter, port->adap, debug) &&
+ manufacturer_name[0]) {
+ /*
+ * Sinks were lost, so see if the input edid needs to
+ * be updated.
+ */
+ cancel_delayed_work_sync(&extron->work_update_edid);
+ schedule_delayed_work(&extron->work_update_edid,
+ msecs_to_jiffies(1000));
+ }
+ }
+ return 0;
+
+disable_ports:
+ extron->is_ready = false;
+ for (p = 0; p < extron->num_ports; p++) {
+ struct extron_port *port = extron->ports[p];
+
+ if (!port)
+ continue;
+ port->disconnected = true;
+ cancel_work_sync(&port->irq_work);
+ video_unregister_device(&port->vdev);
+ if (port->cec_was_registered)
+ cec_unregister_adapter(port->adap);
+ }
+ cancel_delayed_work_sync(&extron->work_update_edid);
+ complete(&extron->edid_completion);
+ dev_err(extron->dev, "Setup failed with error %d\n", err);
+ while (!kthread_should_stop())
+ ssleep(1);
+ return err;
+}
+
+static int extron_connect(struct serio *serio, struct serio_driver *drv)
+{
+ struct extron *extron;
+ int err = -ENOMEM;
+
+ if (manufacturer_name[0] &&
+ (!isupper(manufacturer_name[0]) ||
+ !isupper(manufacturer_name[1]) ||
+ !isupper(manufacturer_name[2]))) {
+ dev_warn(&serio->dev, "ignoring invalid manufacturer name\n");
+ manufacturer_name[0] = 0;
+ }
+
+ extron = kzalloc(sizeof(*extron), GFP_KERNEL);
+
+ if (!extron)
+ return -ENOMEM;
+
+ extron->serio = serio;
+ extron->dev = &serio->dev;
+ mutex_init(&extron->serio_lock);
+ mutex_init(&extron->edid_lock);
+ INIT_DELAYED_WORK(&extron->work_update_edid, update_edid_work);
+
+ err = v4l2_device_register(extron->dev, &extron->v4l2_dev);
+ if (err)
+ goto free_device;
+
+ err = serio_open(serio, drv);
+ if (err)
+ goto unreg_v4l2_dev;
+
+ serio_set_drvdata(serio, extron);
+ init_completion(&extron->edid_completion);
+
+ extron->kthread_setup = kthread_run(extron_setup_thread, extron,
+ "extron-da-hd-4k-plus-cec-%s", dev_name(&serio->dev));
+ if (!IS_ERR(extron->kthread_setup))
+ return 0;
+
+ dev_err(extron->dev, "kthread_run() failed\n");
+ err = PTR_ERR(extron->kthread_setup);
+
+ extron->serio = NULL;
+ serio_set_drvdata(serio, NULL);
+ serio_close(serio);
+unreg_v4l2_dev:
+ v4l2_device_unregister(&extron->v4l2_dev);
+free_device:
+ mutex_destroy(&extron->edid_lock);
+ mutex_destroy(&extron->serio_lock);
+ kfree(extron);
+ return err;
+}
+
+static const struct serio_device_id extron_serio_ids[] = {
+ {
+ .type = SERIO_RS232,
+ .proto = SERIO_EXTRON_DA_HD_4K_PLUS,
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(serio, extron_serio_ids);
+
+static struct serio_driver extron_drv = {
+ .driver = {
+ .name = "extron-da-hd-4k-plus-cec",
+ },
+ .description = "Extron DA HD 4K PLUS HDMI CEC driver",
+ .id_table = extron_serio_ids,
+ .interrupt = extron_interrupt,
+ .connect = extron_connect,
+ .disconnect = extron_disconnect,
+};
+
+module_serio_driver(extron_drv);
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.h b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.h
new file mode 100644
index 000000000000..b79f1253ab5d
--- /dev/null
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Copyright 2021-2024 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _EXTRON_DA_HD_4K_PLUS_H_
+#define _EXTRON_DA_HD_4K_PLUS_H_
+
+#include <linux/kthread.h>
+#include <linux/serio.h>
+#include <linux/workqueue.h>
+#include <media/cec.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+
+#include "cec-splitter.h"
+
+#define DATA_SIZE 256
+
+#define PING_PERIOD (15 * HZ)
+
+#define NUM_MSGS CEC_MAX_MSG_RX_QUEUE_SZ
+
+#define MAX_PORTS (1 + 6)
+
+#define MAX_EDID_BLOCKS 2
+
+struct extron;
+
+struct extron_port {
+ struct cec_splitter_port port;
+ struct device *dev;
+ struct cec_adapter *adap;
+ struct video_device vdev;
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *ctrl_rx_power_present;
+ struct v4l2_ctrl *ctrl_tx_hotplug;
+ struct v4l2_ctrl *ctrl_tx_edid_present;
+ bool is_input;
+ char direction;
+ char name[26];
+ unsigned char edid[MAX_EDID_BLOCKS * 128];
+ unsigned char edid_tmp[MAX_EDID_BLOCKS * 128];
+ unsigned int edid_blocks;
+ bool read_edid;
+ struct extron *extron;
+ struct work_struct irq_work;
+ struct completion cmd_done;
+ const char *response;
+ unsigned int cmd_error;
+ struct cec_msg rx_msg[NUM_MSGS];
+ unsigned int rx_msg_cur_idx, rx_msg_num;
+ /* protect rx_msg_cur_idx and rx_msg_num */
+ spinlock_t msg_lock;
+ u32 tx_done_status;
+ bool update_phys_addr;
+ u16 phys_addr;
+ bool cec_was_registered;
+ bool disconnected;
+ bool update_has_signal;
+ bool has_signal;
+ bool update_has_edid;
+ bool has_edid;
+ bool has_4kp30;
+ bool has_4kp60;
+ bool has_qy;
+ bool has_qs;
+ u8 est_i, est_ii;
+
+ /* locks access to the video_device */
+ struct mutex video_lock;
+};
+
+struct extron {
+ struct cec_splitter splitter;
+ struct device *dev;
+ struct serio *serio;
+ /* locks access to serio */
+ struct mutex serio_lock;
+ unsigned int num_ports;
+ unsigned int num_in_ports;
+ unsigned int num_out_ports;
+ char unit_name[32];
+ char unit_type[64];
+ char unit_fw_version[32];
+ char unit_cec_engine_version[32];
+ struct extron_port *ports[MAX_PORTS];
+ struct cec_splitter_port *splitter_ports[MAX_PORTS];
+ struct v4l2_device v4l2_dev;
+ bool hpd_never_low;
+ struct task_struct *kthread_setup;
+ struct delayed_work work_update_edid;
+
+ /* serializes EDID reading */
+ struct mutex edid_lock;
+ unsigned int edid_bytes_read;
+ struct extron_port *edid_port;
+ struct completion edid_completion;
+ bool edid_reading;
+ bool is_ready;
+
+ struct completion cmd_done;
+ const char *response;
+ unsigned int cmd_error;
+ char data[DATA_SIZE];
+ unsigned int len;
+ char reply[DATA_SIZE];
+ char buf[DATA_SIZE];
+ unsigned int idx;
+};
+
+#endif
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index b6f1eb5dbbdf..3732367e0c62 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -1132,8 +1132,7 @@ static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
* return: 0 on success, <0 on error.
*/
static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
- int mode,
- loadfirmware_t loadfirmware_handler)
+ int mode)
{
int rc = -ENOENT;
u8 *fw_buf;
@@ -1147,8 +1146,7 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
}
pr_debug("Firmware name: %s\n", fw_filename);
- if (!loadfirmware_handler &&
- !(coredev->device_flags & SMS_DEVICE_FAMILY2))
+ if (!(coredev->device_flags & SMS_DEVICE_FAMILY2))
return -EINVAL;
rc = request_firmware(&fw, fw_filename, coredev->device);
@@ -1166,10 +1164,8 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
memcpy(fw_buf, fw->data, fw->size);
fw_buf_size = fw->size;
- rc = (coredev->device_flags & SMS_DEVICE_FAMILY2) ?
- smscore_load_firmware_family2(coredev, fw_buf, fw_buf_size)
- : loadfirmware_handler(coredev->context, fw_buf,
- fw_buf_size);
+ rc = smscore_load_firmware_family2(coredev, fw_buf,
+ fw_buf_size);
}
kfree(fw_buf);
@@ -1353,8 +1349,7 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
}
if (!(coredev->modes_supported & (1 << mode))) {
- rc = smscore_load_firmware_from_file(coredev,
- mode, NULL);
+ rc = smscore_load_firmware_from_file(coredev, mode);
if (rc >= 0)
pr_debug("firmware download success\n");
} else {
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
index 82d9f8a64d99..d945a2d6d624 100644
--- a/drivers/media/common/siano/smscoreapi.h
+++ b/drivers/media/common/siano/smscoreapi.h
@@ -97,7 +97,6 @@ typedef int (*hotplug_t)(struct smscore_device_t *coredev,
typedef int (*setmode_t)(void *context, int mode);
typedef void (*detectmode_t)(void *context, int *mode);
typedef int (*sendrequest_t)(void *context, void *buffer, size_t size);
-typedef int (*loadfirmware_t)(void *context, void *buffer, size_t size);
typedef int (*preload_t)(void *context);
typedef int (*postload_t)(void *context);
@@ -1102,9 +1101,6 @@ extern int smscore_register_device(struct smsdevice_params_t *params,
extern void smscore_unregister_device(struct smscore_device_t *coredev);
extern int smscore_start_device(struct smscore_device_t *coredev);
-extern int smscore_load_firmware(struct smscore_device_t *coredev,
- char *filename,
- loadfirmware_t loadfirmware_handler);
extern int smscore_set_device_mode(struct smscore_device_t *coredev, int mode);
extern int smscore_get_device_mode(struct smscore_device_t *coredev);
@@ -1119,12 +1115,6 @@ extern int smsclient_sendrequest(struct smscore_client_t *client,
extern void smscore_onresponse(struct smscore_device_t *coredev,
struct smscore_buffer_t *cb);
-extern int smscore_get_common_buffer_size(struct smscore_device_t *coredev);
-extern int smscore_map_common_buffer(struct smscore_device_t *coredev,
- struct vm_area_struct *vma);
-extern int smscore_send_fw_file(struct smscore_device_t *coredev,
- u8 *ufwbuf, int size);
-
extern
struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev);
extern void smscore_putbuffer(struct smscore_device_t *coredev,
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 0217392fcc0d..29a8d876e6c2 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -303,14 +303,22 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
if (!p->mem_priv)
return;
- if (p->dbuf_mapped)
- call_void_memop(vb, unmap_dmabuf, p->mem_priv);
+ if (!p->dbuf_duplicated) {
+ if (p->dbuf_mapped)
+ call_void_memop(vb, unmap_dmabuf, p->mem_priv);
+
+ call_void_memop(vb, detach_dmabuf, p->mem_priv);
+ }
- call_void_memop(vb, detach_dmabuf, p->mem_priv);
dma_buf_put(p->dbuf);
p->mem_priv = NULL;
p->dbuf = NULL;
p->dbuf_mapped = 0;
+ p->bytesused = 0;
+ p->length = 0;
+ p->m.fd = 0;
+ p->data_offset = 0;
+ p->dbuf_duplicated = false;
}
/*
@@ -319,9 +327,15 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
*/
static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
{
- unsigned int plane;
+ int plane;
- for (plane = 0; plane < vb->num_planes; ++plane)
+ /*
+ * When multiple planes share the same DMA buffer attachment, the plane
+ * with the lowest index owns the mem_priv.
+ * Put planes in the reversed order so that we don't leave invalid
+ * mem_priv behind.
+ */
+ for (plane = vb->num_planes - 1; plane >= 0; --plane)
__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
}
@@ -1369,7 +1383,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
struct vb2_plane planes[VB2_MAX_PLANES];
struct vb2_queue *q = vb->vb2_queue;
void *mem_priv;
- unsigned int plane;
+ unsigned int plane, i;
int ret = 0;
bool reacquired = vb->planes[0].mem_priv == NULL;
@@ -1383,11 +1397,13 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
for (plane = 0; plane < vb->num_planes; ++plane) {
struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
+ planes[plane].dbuf = dbuf;
+
if (IS_ERR_OR_NULL(dbuf)) {
dprintk(q, 1, "invalid dmabuf fd for plane %d\n",
plane);
ret = -EINVAL;
- goto err;
+ goto err_put_planes;
}
/* use DMABUF size if length is not provided */
@@ -1398,80 +1414,86 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
planes[plane].length, plane,
vb->planes[plane].min_length);
- dma_buf_put(dbuf);
ret = -EINVAL;
- goto err;
+ goto err_put_planes;
}
/* Skip the plane if already verified */
if (dbuf == vb->planes[plane].dbuf &&
- vb->planes[plane].length == planes[plane].length) {
- dma_buf_put(dbuf);
+ vb->planes[plane].length == planes[plane].length)
continue;
- }
dprintk(q, 3, "buffer for plane %d changed\n", plane);
- if (!reacquired) {
- reacquired = true;
+ reacquired = true;
+ }
+
+ if (reacquired) {
+ if (vb->planes[0].mem_priv) {
vb->copied_timestamp = 0;
call_void_vb_qop(vb, buf_cleanup, vb);
+ __vb2_buf_dmabuf_put(vb);
}
- /* Release previously acquired memory if present */
- __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
- vb->planes[plane].bytesused = 0;
- vb->planes[plane].length = 0;
- vb->planes[plane].m.fd = 0;
- vb->planes[plane].data_offset = 0;
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ /*
+ * This is an optimization to reduce dma_buf attachment/mapping.
+ * When the same dma_buf is used for multiple planes, there is no need
+ * to create duplicated attachments.
+ */
+ for (i = 0; i < plane; ++i) {
+ if (planes[plane].dbuf == vb->planes[i].dbuf &&
+ q->alloc_devs[plane] == q->alloc_devs[i]) {
+ vb->planes[plane].dbuf_duplicated = true;
+ vb->planes[plane].dbuf = vb->planes[i].dbuf;
+ vb->planes[plane].mem_priv = vb->planes[i].mem_priv;
+ break;
+ }
+ }
- /* Acquire each plane's memory */
- mem_priv = call_ptr_memop(attach_dmabuf,
- vb,
- q->alloc_devs[plane] ? : q->dev,
- dbuf,
- planes[plane].length);
- if (IS_ERR(mem_priv)) {
- dprintk(q, 1, "failed to attach dmabuf\n");
- ret = PTR_ERR(mem_priv);
- dma_buf_put(dbuf);
- goto err;
- }
+ if (vb->planes[plane].dbuf_duplicated)
+ continue;
- vb->planes[plane].dbuf = dbuf;
- vb->planes[plane].mem_priv = mem_priv;
- }
+ /* Acquire each plane's memory */
+ mem_priv = call_ptr_memop(attach_dmabuf,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ planes[plane].dbuf,
+ planes[plane].length);
+ if (IS_ERR(mem_priv)) {
+ dprintk(q, 1, "failed to attach dmabuf\n");
+ ret = PTR_ERR(mem_priv);
+ goto err_put_vb2_buf;
+ }
- /*
- * This pins the buffer(s) with dma_buf_map_attachment()). It's done
- * here instead just before the DMA, while queueing the buffer(s) so
- * userspace knows sooner rather than later if the dma-buf map fails.
- */
- for (plane = 0; plane < vb->num_planes; ++plane) {
- if (vb->planes[plane].dbuf_mapped)
- continue;
+ vb->planes[plane].dbuf = planes[plane].dbuf;
+ vb->planes[plane].mem_priv = mem_priv;
- ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
- if (ret) {
- dprintk(q, 1, "failed to map dmabuf for plane %d\n",
- plane);
- goto err;
+ /*
+ * This pins the buffer(s) with dma_buf_map_attachment()). It's done
+ * here instead just before the DMA, while queueing the buffer(s) so
+ * userspace knows sooner rather than later if the dma-buf map fails.
+ */
+ ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
+ if (ret) {
+ dprintk(q, 1, "failed to map dmabuf for plane %d\n",
+ plane);
+ goto err_put_vb2_buf;
+ }
+ vb->planes[plane].dbuf_mapped = 1;
}
- vb->planes[plane].dbuf_mapped = 1;
- }
- /*
- * Now that everything is in order, copy relevant information
- * provided by userspace.
- */
- for (plane = 0; plane < vb->num_planes; ++plane) {
- vb->planes[plane].bytesused = planes[plane].bytesused;
- vb->planes[plane].length = planes[plane].length;
- vb->planes[plane].m.fd = planes[plane].m.fd;
- vb->planes[plane].data_offset = planes[plane].data_offset;
- }
+ /*
+ * Now that everything is in order, copy relevant information
+ * provided by userspace.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ vb->planes[plane].bytesused = planes[plane].bytesused;
+ vb->planes[plane].length = planes[plane].length;
+ vb->planes[plane].m.fd = planes[plane].m.fd;
+ vb->planes[plane].data_offset = planes[plane].data_offset;
+ }
- if (reacquired) {
/*
* Call driver-specific initialization on the newly acquired buffer,
* if provided.
@@ -1479,19 +1501,28 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
ret = call_vb_qop(vb, buf_init, vb);
if (ret) {
dprintk(q, 1, "buffer initialization failed\n");
- goto err;
+ goto err_put_vb2_buf;
}
+ } else {
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ dma_buf_put(planes[plane].dbuf);
}
ret = call_vb_qop(vb, buf_prepare, vb);
if (ret) {
dprintk(q, 1, "buffer preparation failed\n");
call_void_vb_qop(vb, buf_cleanup, vb);
- goto err;
+ goto err_put_vb2_buf;
}
return 0;
-err:
+
+err_put_planes:
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (!IS_ERR_OR_NULL(planes[plane].dbuf))
+ dma_buf_put(planes[plane].dbuf);
+ }
+err_put_vb2_buf:
/* In case of errors, release planes that were already acquired */
__vb2_buf_dmabuf_put(vb);
@@ -2602,13 +2633,6 @@ int vb2_core_queue_init(struct vb2_queue *q)
return -EINVAL;
/*
- * The minimum requirement is 2: one buffer is used
- * by the hardware while the other is being processed by userspace.
- */
- if (q->min_reqbufs_allocation < 2)
- q->min_reqbufs_allocation = 2;
-
- /*
* If the driver needs 'min_queued_buffers' in the queue before
* calling start_streaming() then the minimum requirement is
* 'min_queued_buffers + 1' to keep at least one buffer available
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 3d4fd4ef5310..bb0b7fa67b53 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -854,8 +854,7 @@ int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
return -ENODEV;
}
if (dma_get_max_seg_size(dev) < size)
- return dma_set_max_seg_size(dev, size);
-
+ dma_set_max_seg_size(dev, size);
return 0;
}
EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
diff --git a/drivers/media/dvb-frontends/a8293.c b/drivers/media/dvb-frontends/a8293.c
index f39887c04978..bf2773c5b97a 100644
--- a/drivers/media/dvb-frontends/a8293.c
+++ b/drivers/media/dvb-frontends/a8293.c
@@ -256,7 +256,7 @@ static void a8293_remove(struct i2c_client *client)
}
static const struct i2c_device_id a8293_id_table[] = {
- {"a8293", 0},
+ { "a8293" },
{}
};
MODULE_DEVICE_TABLE(i2c, a8293_id_table);
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index 5afdbe244596..befd6a4eafd9 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -1553,7 +1553,7 @@ static void af9013_remove(struct i2c_client *client)
}
static const struct i2c_device_id af9013_id_table[] = {
- {"af9013", 0},
+ { "af9013" },
{}
};
MODULE_DEVICE_TABLE(i2c, af9013_id_table);
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 49b7b04a7899..eed2ea4da8fa 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -1173,7 +1173,7 @@ static void af9033_remove(struct i2c_client *client)
}
static const struct i2c_device_id af9033_id_table[] = {
- {"af9033", 0},
+ { "af9033" },
{}
};
MODULE_DEVICE_TABLE(i2c, af9033_id_table);
diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c
index d02a92a81c60..58c4c489bf97 100644
--- a/drivers/media/dvb-frontends/au8522_decoder.c
+++ b/drivers/media/dvb-frontends/au8522_decoder.c
@@ -767,7 +767,7 @@ static void au8522_remove(struct i2c_client *client)
}
static const struct i2c_device_id au8522_id[] = {
- {"au8522", 0},
+ { "au8522" },
{}
};
diff --git a/drivers/media/dvb-frontends/cxd2099.c b/drivers/media/dvb-frontends/cxd2099.c
index 3f3b85743666..5e6e18819a0d 100644
--- a/drivers/media/dvb-frontends/cxd2099.c
+++ b/drivers/media/dvb-frontends/cxd2099.c
@@ -672,7 +672,7 @@ static void cxd2099_remove(struct i2c_client *client)
}
static const struct i2c_device_id cxd2099_id[] = {
- {"cxd2099", 0},
+ { "cxd2099" },
{}
};
MODULE_DEVICE_TABLE(i2c, cxd2099_id);
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 7feb08dccfa1..c3d8ced6c3ba 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -723,7 +723,7 @@ static void cxd2820r_remove(struct i2c_client *client)
}
static const struct i2c_device_id cxd2820r_id_table[] = {
- {"cxd2820r", 0},
+ { "cxd2820r" },
{}
};
MODULE_DEVICE_TABLE(i2c, cxd2820r_id_table);
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index b25d11be8611..6ab9d4de65ce 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -2244,7 +2244,7 @@ static void lgdt3306a_remove(struct i2c_client *client)
}
static const struct i2c_device_id lgdt3306a_id_table[] = {
- {"lgdt3306a", 0},
+ { "lgdt3306a" },
{}
};
MODULE_DEVICE_TABLE(i2c, lgdt3306a_id_table);
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index 081d6ad3ce72..cab442a350a5 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -983,7 +983,7 @@ static void lgdt330x_remove(struct i2c_client *client)
}
static const struct i2c_device_id lgdt330x_id_table[] = {
- {"lgdt330x", 0},
+ { "lgdt330x" },
{}
};
MODULE_DEVICE_TABLE(i2c, lgdt330x_id_table);
diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c
index 73d1e52de569..729751671c3d 100644
--- a/drivers/media/dvb-frontends/mn88472.c
+++ b/drivers/media/dvb-frontends/mn88472.c
@@ -708,7 +708,7 @@ static void mn88472_remove(struct i2c_client *client)
}
static const struct i2c_device_id mn88472_id_table[] = {
- {"mn88472", 0},
+ { "mn88472" },
{}
};
MODULE_DEVICE_TABLE(i2c, mn88472_id_table);
diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index eb50591c0e7a..fefc640d8afb 100644
--- a/drivers/media/dvb-frontends/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -743,7 +743,7 @@ static void mn88473_remove(struct i2c_client *client)
}
static const struct i2c_device_id mn88473_id_table[] = {
- {"mn88473", 0},
+ { "mn88473" },
{}
};
MODULE_DEVICE_TABLE(i2c, mn88473_id_table);
diff --git a/drivers/media/dvb-frontends/mxl692.c b/drivers/media/dvb-frontends/mxl692.c
index 2a31bde2630f..bbc2bc778225 100644
--- a/drivers/media/dvb-frontends/mxl692.c
+++ b/drivers/media/dvb-frontends/mxl692.c
@@ -1346,7 +1346,7 @@ static void mxl692_remove(struct i2c_client *client)
}
static const struct i2c_device_id mxl692_id_table[] = {
- {"mxl692", 0},
+ { "mxl692" },
{}
};
MODULE_DEVICE_TABLE(i2c, mxl692_id_table);
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 30d10fe4b33e..aa4ef9aedf17 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -609,7 +609,7 @@ static int rtl2830_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, int on
index, pid, onoff);
/* skip invalid PIDs (0x2000) */
- if (pid > 0x1fff || index > 32)
+ if (pid > 0x1fff || index >= 32)
return 0;
if (onoff)
@@ -876,7 +876,7 @@ static void rtl2830_remove(struct i2c_client *client)
}
static const struct i2c_device_id rtl2830_id_table[] = {
- {"rtl2830", 0},
+ { "rtl2830" },
{}
};
MODULE_DEVICE_TABLE(i2c, rtl2830_id_table);
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 5142820b1b3d..3b4e46dac1bf 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -983,7 +983,7 @@ static int rtl2832_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid,
index, pid, onoff, dev->slave_ts);
/* skip invalid PIDs (0x2000) */
- if (pid > 0x1fff || index > 32)
+ if (pid > 0x1fff || index >= 32)
return 0;
if (onoff)
@@ -1125,7 +1125,7 @@ static void rtl2832_remove(struct i2c_client *client)
}
static const struct i2c_device_id rtl2832_id_table[] = {
- {"rtl2832", 0},
+ { "rtl2832" },
{}
};
MODULE_DEVICE_TABLE(i2c, rtl2832_id_table);
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index 013d423d3263..f87c9357cee3 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -1281,7 +1281,7 @@ static void si2165_remove(struct i2c_client *client)
}
static const struct i2c_device_id si2165_id_table[] = {
- {"si2165", 0},
+ { "si2165" },
{}
};
MODULE_DEVICE_TABLE(i2c, si2165_id_table);
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 26828fd41e68..d6b6b8bc7d4e 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -788,7 +788,7 @@ static void si2168_remove(struct i2c_client *client)
}
static const struct i2c_device_id si2168_id_table[] = {
- {"si2168", 0},
+ { "si2168" },
{}
};
MODULE_DEVICE_TABLE(i2c, si2168_id_table);
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index 4d7d0b8b51b4..75adf2a4589f 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -407,7 +407,7 @@ static void sp2_remove(struct i2c_client *client)
}
static const struct i2c_device_id sp2_id[] = {
- {"sp2", 0},
+ { "sp2" },
{}
};
MODULE_DEVICE_TABLE(i2c, sp2_id);
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 3b02d504941f..f273efa330cf 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -5079,7 +5079,7 @@ error:
EXPORT_SYMBOL_GPL(stv090x_attach);
static const struct i2c_device_id stv090x_id_table[] = {
- {"stv090x", 0},
+ { "stv090x" },
{}
};
MODULE_DEVICE_TABLE(i2c, stv090x_id_table);
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index c678f47d2449..33c8105da1c3 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -470,7 +470,7 @@ const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
EXPORT_SYMBOL_GPL(stv6110x_attach);
static const struct i2c_device_id stv6110x_id_table[] = {
- {"stv6110x", 0},
+ { "stv6110x" },
{}
};
MODULE_DEVICE_TABLE(i2c, stv6110x_id_table);
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 6640851d8bbc..e23794b821cd 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -1230,7 +1230,7 @@ static void tda10071_remove(struct i2c_client *client)
}
static const struct i2c_device_id tda10071_id_table[] = {
- {"tda10071_cx24118", 0},
+ { "tda10071_cx24118" },
{}
};
MODULE_DEVICE_TABLE(i2c, tda10071_id_table);
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index a5ebce57f35e..a5baca2449c7 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -710,8 +710,8 @@ static void ts2020_remove(struct i2c_client *client)
}
static const struct i2c_device_id ts2020_id_table[] = {
- {"ts2020", 0},
- {"ts2022", 0},
+ { "ts2020" },
+ { "ts2022" },
{}
};
MODULE_DEVICE_TABLE(i2c, ts2020_id_table);
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index 1543d24f522c..f60271082fb5 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -347,8 +347,8 @@ static void ad5820_remove(struct i2c_client *client)
}
static const struct i2c_device_id ad5820_id_table[] = {
- { "ad5820", 0 },
- { "ad5821", 0 },
+ { "ad5820" },
+ { "ad5821" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 5ace7b5804d4..391bc75bfcd0 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -522,7 +522,7 @@ static void adp1653_remove(struct i2c_client *client)
}
static const struct i2c_device_id adp1653_id_table[] = {
- { ADP1653_NAME, 0 },
+ { ADP1653_NAME },
{ }
};
MODULE_DEVICE_TABLE(i2c, adp1653_id_table);
diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c
index 4a2b9fd9e2da..ef8682b980b4 100644
--- a/drivers/media/i2c/adv7170.c
+++ b/drivers/media/i2c/adv7170.c
@@ -377,8 +377,8 @@ static void adv7170_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id adv7170_id[] = {
- { "adv7170", 0 },
- { "adv7171", 0 },
+ { "adv7170" },
+ { "adv7171" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7170_id);
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index e454cba4b026..384da1ec5bf9 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -432,8 +432,8 @@ static void adv7175_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id adv7175_id[] = {
- { "adv7175", 0 },
- { "adv7176", 0 },
+ { "adv7175" },
+ { "adv7176" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7175_id);
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index 2a2cace4a153..25a31a6dd456 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -619,8 +619,8 @@ static void adv7183_remove(struct i2c_client *client)
}
static const struct i2c_device_id adv7183_id[] = {
- {"adv7183", 0},
- {},
+ { "adv7183" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, adv7183_id);
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index 4fbe4e18570e..b96443404a26 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -502,8 +502,8 @@ static void adv7343_remove(struct i2c_client *client)
}
static const struct i2c_device_id adv7343_id[] = {
- {"adv7343", 0},
- {},
+ { "adv7343" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, adv7343_id);
diff --git a/drivers/media/i2c/adv7393.c b/drivers/media/i2c/adv7393.c
index 7638af455cef..c7994bd0bbd4 100644
--- a/drivers/media/i2c/adv7393.c
+++ b/drivers/media/i2c/adv7393.c
@@ -446,8 +446,8 @@ static void adv7393_remove(struct i2c_client *client)
}
static const struct i2c_device_id adv7393_id[] = {
- {"adv7393", 0},
- {},
+ { "adv7393" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, adv7393_id);
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 261871be833f..e9406d552699 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -1949,7 +1949,7 @@ static void adv7511_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id adv7511_id[] = {
- { "adv7511-v4l2", 0 },
+ { "adv7511-v4l2" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7511_id);
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index f2d4217310e7..014fc913225c 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3617,7 +3617,7 @@ static void adv7842_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id adv7842_id[] = {
- { "adv7842", 0 },
+ { "adv7842" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7842_id);
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index ce840adc2aa7..ee575d01a676 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -304,8 +304,8 @@ static void ak881x_remove(struct i2c_client *client)
}
static const struct i2c_device_id ak881x_id[] = {
- { "ak8813", 0 },
- { "ak8814", 0 },
+ { "ak8813" },
+ { "ak8814" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ak881x_id);
diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
index 09331cf95c62..fc27238dd4d3 100644
--- a/drivers/media/i2c/ar0521.c
+++ b/drivers/media/i2c/ar0521.c
@@ -835,21 +835,30 @@ static const struct initial_reg {
be(0x0707)), /* 3F44: couple k factor 2 */
};
-static int ar0521_power_off(struct device *dev)
+static void __ar0521_power_off(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ar0521_dev *sensor = to_ar0521_dev(sd);
int i;
- clk_disable_unprepare(sensor->extclk);
-
if (sensor->reset_gpio)
- gpiod_set_value(sensor->reset_gpio, 1); /* assert RESET signal */
+ /* assert RESET signal */
+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
for (i = ARRAY_SIZE(ar0521_supply_names) - 1; i >= 0; i--) {
if (sensor->supplies[i])
regulator_disable(sensor->supplies[i]);
}
+}
+
+static int ar0521_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+
+ clk_disable_unprepare(sensor->extclk);
+ __ar0521_power_off(dev);
+
return 0;
}
@@ -878,7 +887,7 @@ static int ar0521_power_on(struct device *dev)
if (sensor->reset_gpio)
/* deassert RESET signal */
- gpiod_set_value(sensor->reset_gpio, 0);
+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
usleep_range(4500, 5000); /* min 45000 clocks */
for (cnt = 0; cnt < ARRAY_SIZE(initial_regs); cnt++) {
@@ -908,7 +917,8 @@ static int ar0521_power_on(struct device *dev)
return 0;
off:
- ar0521_power_off(dev);
+ clk_disable_unprepare(sensor->extclk);
+ __ar0521_power_off(dev);
return ret;
}
diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c
index b4a25cc996dc..f97245f91f88 100644
--- a/drivers/media/i2c/bt819.c
+++ b/drivers/media/i2c/bt819.c
@@ -457,9 +457,9 @@ static void bt819_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id bt819_id[] = {
- { "bt819a", 0 },
- { "bt817a", 0 },
- { "bt815a", 0 },
+ { "bt819a" },
+ { "bt817a" },
+ { "bt815a" },
{ }
};
MODULE_DEVICE_TABLE(i2c, bt819_id);
diff --git a/drivers/media/i2c/bt856.c b/drivers/media/i2c/bt856.c
index 814acbd6a5a8..6852aa47cafb 100644
--- a/drivers/media/i2c/bt856.c
+++ b/drivers/media/i2c/bt856.c
@@ -230,7 +230,7 @@ static void bt856_remove(struct i2c_client *client)
}
static const struct i2c_device_id bt856_id[] = {
- { "bt856", 0 },
+ { "bt856" },
{ }
};
MODULE_DEVICE_TABLE(i2c, bt856_id);
diff --git a/drivers/media/i2c/bt866.c b/drivers/media/i2c/bt866.c
index dada059cbce4..a2cc34d35ed2 100644
--- a/drivers/media/i2c/bt866.c
+++ b/drivers/media/i2c/bt866.c
@@ -197,7 +197,7 @@ static void bt866_remove(struct i2c_client *client)
}
static const struct i2c_device_id bt866_id[] = {
- { "bt866", 0 },
+ { "bt866" },
{ }
};
MODULE_DEVICE_TABLE(i2c, bt866_id);
diff --git a/drivers/media/i2c/ccs/ccs-reg-access.h b/drivers/media/i2c/ccs/ccs-reg-access.h
index 78c43f92d99a..4b56b21a26b5 100644
--- a/drivers/media/i2c/ccs/ccs-reg-access.h
+++ b/drivers/media/i2c/ccs/ccs-reg-access.h
@@ -21,16 +21,13 @@
struct ccs_sensor;
-int ccs_read_addr_no_quirk(struct ccs_sensor *sensor, u32 reg, u32 *val);
int ccs_read_addr(struct ccs_sensor *sensor, u32 reg, u32 *val);
int ccs_read_addr_8only(struct ccs_sensor *sensor, u32 reg, u32 *val);
int ccs_read_addr_noconv(struct ccs_sensor *sensor, u32 reg, u32 *val);
-int ccs_write_addr_no_quirk(struct ccs_sensor *sensor, u32 reg, u32 val);
int ccs_write_addr(struct ccs_sensor *sensor, u32 reg, u32 val);
int ccs_write_data_regs(struct ccs_sensor *sensor, struct ccs_reg *regs,
size_t num_regs);
-unsigned int ccs_reg_width(u32 reg);
u32 ccs_reg_conv(struct ccs_sensor *sensor, u32 reg, u32 val);
#define ccs_read(sensor, reg_name, val) \
diff --git a/drivers/media/i2c/cs3308.c b/drivers/media/i2c/cs3308.c
index 61afa3d799d2..078e0066ce4b 100644
--- a/drivers/media/i2c/cs3308.c
+++ b/drivers/media/i2c/cs3308.c
@@ -109,7 +109,7 @@ static void cs3308_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id cs3308_id[] = {
- { "cs3308", 0 },
+ { "cs3308" },
{ }
};
MODULE_DEVICE_TABLE(i2c, cs3308_id);
diff --git a/drivers/media/i2c/cs5345.c b/drivers/media/i2c/cs5345.c
index 3019a132e079..3a9797a50e82 100644
--- a/drivers/media/i2c/cs5345.c
+++ b/drivers/media/i2c/cs5345.c
@@ -189,7 +189,7 @@ static void cs5345_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id cs5345_id[] = {
- { "cs5345", 0 },
+ { "cs5345" },
{ }
};
MODULE_DEVICE_TABLE(i2c, cs5345_id);
diff --git a/drivers/media/i2c/cs53l32a.c b/drivers/media/i2c/cs53l32a.c
index 82881b79e730..c4cad3293905 100644
--- a/drivers/media/i2c/cs53l32a.c
+++ b/drivers/media/i2c/cs53l32a.c
@@ -200,7 +200,7 @@ static void cs53l32a_remove(struct i2c_client *client)
}
static const struct i2c_device_id cs53l32a_id[] = {
- { "cs53l32a", 0 },
+ { "cs53l32a" },
{ }
};
MODULE_DEVICE_TABLE(i2c, cs53l32a_id);
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 04461c893d90..a90a9e5705a0 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -3964,7 +3964,7 @@ static void cx25840_remove(struct i2c_client *client)
}
static const struct i2c_device_id cx25840_id[] = {
- { "cx25840", 0 },
+ { "cx25840" },
{ }
};
MODULE_DEVICE_TABLE(i2c, cx25840_id);
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index ca9bb29dab89..8eed4a200fd8 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -877,7 +877,10 @@ static void ub913_remove(struct i2c_client *client)
ub913_gpiochip_remove(priv);
}
-static const struct i2c_device_id ub913_id[] = { { "ds90ub913a-q1", 0 }, {} };
+static const struct i2c_device_id ub913_id[] = {
+ { "ds90ub913a-q1" },
+ {}
+};
MODULE_DEVICE_TABLE(i2c, ub913_id);
static const struct of_device_id ub913_dt_ids[] = {
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 0e88ce0ef8d7..2ddd7daa79e2 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -279,8 +279,8 @@ static int __maybe_unused dw9714_vcm_resume(struct device *dev)
}
static const struct i2c_device_id dw9714_id_table[] = {
- { DW9714_NAME, 0 },
- { { 0 } }
+ { DW9714_NAME },
+ { }
};
MODULE_DEVICE_TABLE(i2c, dw9714_id_table);
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index e932d25ca7b3..7519863d77b1 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1501,7 +1501,7 @@ static const struct of_device_id et8ek8_of_table[] = {
MODULE_DEVICE_TABLE(of, et8ek8_of_table);
static const struct i2c_device_id et8ek8_id_table[] = {
- { ET8EK8_NAME, 0 },
+ { ET8EK8_NAME },
{ }
};
MODULE_DEVICE_TABLE(i2c, et8ek8_id_table);
diff --git a/drivers/media/i2c/gc05a2.c b/drivers/media/i2c/gc05a2.c
index dcba29ee725c..0413c557e594 100644
--- a/drivers/media/i2c/gc05a2.c
+++ b/drivers/media/i2c/gc05a2.c
@@ -65,7 +65,7 @@
static const char *const gc05a2_test_pattern_menu[] = {
"No Pattern", "Fade_to_gray_Color Bar", "Color Bar",
- "PN9", "Horizental_gradient", "Checkboard Pattern",
+ "PN9", "Horizontal_gradient", "Checkboard Pattern",
"Slant", "Resolution", "Solid Black",
"Solid White",
};
diff --git a/drivers/media/i2c/gc08a3.c b/drivers/media/i2c/gc08a3.c
index 7680d807e7a5..84de5cff958d 100644
--- a/drivers/media/i2c/gc08a3.c
+++ b/drivers/media/i2c/gc08a3.c
@@ -948,7 +948,7 @@ static int gc08a3_start_streaming(struct gc08a3 *gc08a3)
ret = cci_write(gc08a3->regmap, GC08A3_STREAMING_REG, 1, NULL);
if (ret < 0) {
- dev_err(gc08a3->dev, "write STRAEMING_REG failed: %d\n", ret);
+ dev_err(gc08a3->dev, "write STREAMING_REG failed: %d\n", ret);
goto err_rpm_put;
}
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 3800de974e8a..a2b824986027 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -1949,7 +1949,7 @@ static const struct of_device_id imx274_of_id_table[] = {
MODULE_DEVICE_TABLE(of, imx274_of_id_table);
static const struct i2c_device_id imx274_id[] = {
- { "IMX274", 0 },
+ { "IMX274" },
{ }
};
MODULE_DEVICE_TABLE(i2c, imx274_id);
diff --git a/drivers/media/i2c/imx283.c b/drivers/media/i2c/imx283.c
index 8490618c5071..94276f4f2d83 100644
--- a/drivers/media/i2c/imx283.c
+++ b/drivers/media/i2c/imx283.c
@@ -472,6 +472,39 @@ static const struct imx283_mode supported_modes_12bit[] = {
.height = 3648,
},
},
+ {
+ /*
+ * Readout mode 3 : 3/3 binned mode (1824x1216)
+ */
+ .mode = IMX283_MODE_3,
+ .bpp = 12,
+ .width = 1824,
+ .height = 1216,
+ .min_hmax = 1894, /* Pixels (284 * 480MHz/72MHz + padding) */
+ .min_vmax = 4200, /* Lines */
+
+ /* 60.00 fps */
+ .default_hmax = 1900, /* 285 @ 480MHz/72Mhz */
+ .default_vmax = 4200,
+
+ .veff = 1234,
+ .vst = 0,
+ .vct = 0,
+
+ .hbin_ratio = 3,
+ .vbin_ratio = 3,
+
+ .min_shr = 16,
+ .horizontal_ob = 32,
+ .vertical_ob = 4,
+
+ .crop = {
+ .top = 40,
+ .left = 108,
+ .width = 5472,
+ .height = 3648,
+ },
+ },
};
static const struct imx283_mode supported_modes_10bit[] = {
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index 990d74214cc2..54a1de53d497 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -997,7 +997,7 @@ static int imx335_parse_hw_config(struct imx335 *imx335)
/* Request optional reset pin */
imx335->reset_gpio = devm_gpiod_get_optional(imx335->dev, "reset",
- GPIOD_OUT_LOW);
+ GPIOD_OUT_HIGH);
if (IS_ERR(imx335->reset_gpio)) {
dev_err(imx335->dev, "failed to get reset gpio %ld\n",
PTR_ERR(imx335->reset_gpio));
@@ -1110,8 +1110,7 @@ static int imx335_power_on(struct device *dev)
usleep_range(500, 550); /* Tlow */
- /* Set XCLR */
- gpiod_set_value_cansleep(imx335->reset_gpio, 1);
+ gpiod_set_value_cansleep(imx335->reset_gpio, 0);
ret = clk_prepare_enable(imx335->inclk);
if (ret) {
@@ -1124,7 +1123,7 @@ static int imx335_power_on(struct device *dev)
return 0;
error_reset:
- gpiod_set_value_cansleep(imx335->reset_gpio, 0);
+ gpiod_set_value_cansleep(imx335->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(imx335_supply_name), imx335->supplies);
return ret;
@@ -1141,7 +1140,7 @@ static int imx335_power_off(struct device *dev)
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct imx335 *imx335 = to_imx335(sd);
- gpiod_set_value_cansleep(imx335->reset_gpio, 0);
+ gpiod_set_value_cansleep(imx335->reset_gpio, 1);
clk_disable_unprepare(imx335->inclk);
regulator_bulk_disable(ARRAY_SIZE(imx335_supply_name), imx335->supplies);
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index 7e9c2f65fa08..0dd25eeea60b 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -1520,6 +1520,7 @@ static const struct v4l2_subdev_internal_ops imx355_internal_ops = {
static int imx355_init_controls(struct imx355 *imx355)
{
struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
+ struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
s64 vblank_def;
@@ -1531,7 +1532,7 @@ static int imx355_init_controls(struct imx355 *imx355)
int ret;
ctrl_hdlr = &imx355->ctrl_handler;
- ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 12);
if (ret)
return ret;
@@ -1603,6 +1604,15 @@ static int imx355_init_controls(struct imx355 *imx355)
goto error;
}
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ goto error;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &imx355_ctrl_ops,
+ &props);
+ if (ret)
+ goto error;
+
imx355->sd.ctrl_handler = ctrl_hdlr;
return 0;
diff --git a/drivers/media/i2c/isl7998x.c b/drivers/media/i2c/isl7998x.c
index c7089035bbc1..5ffd53e005ee 100644
--- a/drivers/media/i2c/isl7998x.c
+++ b/drivers/media/i2c/isl7998x.c
@@ -1561,8 +1561,8 @@ static const struct of_device_id isl7998x_of_match[] = {
MODULE_DEVICE_TABLE(of, isl7998x_of_match);
static const struct i2c_device_id isl7998x_id[] = {
- { "isl79987", 0 },
- { /* sentinel */ },
+ { "isl79987" },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, isl7998x_id);
diff --git a/drivers/media/i2c/ks0127.c b/drivers/media/i2c/ks0127.c
index 9d0a763cd503..f3fba9179684 100644
--- a/drivers/media/i2c/ks0127.c
+++ b/drivers/media/i2c/ks0127.c
@@ -677,9 +677,9 @@ static void ks0127_remove(struct i2c_client *client)
}
static const struct i2c_device_id ks0127_id[] = {
- { "ks0127", 0 },
- { "ks0127b", 0 },
- { "ks0122s", 0 },
+ { "ks0127" },
+ { "ks0127b" },
+ { "ks0122s" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ks0127_id);
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index 05283ac68f2d..f4cc844f4e3c 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -455,8 +455,8 @@ static void lm3560_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3560_id_table[] = {
- {LM3559_NAME, 0},
- {LM3560_NAME, 0},
+ { LM3559_NAME },
+ { LM3560_NAME },
{}
};
diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c
index fab3a7e05f92..2d16e42ec224 100644
--- a/drivers/media/i2c/lm3646.c
+++ b/drivers/media/i2c/lm3646.c
@@ -386,7 +386,7 @@ static void lm3646_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3646_id_table[] = {
- {LM3646_NAME, 0},
+ { LM3646_NAME },
{}
};
diff --git a/drivers/media/i2c/m52790.c b/drivers/media/i2c/m52790.c
index f8a69142aae9..9e1ecfd01e2a 100644
--- a/drivers/media/i2c/m52790.c
+++ b/drivers/media/i2c/m52790.c
@@ -163,7 +163,7 @@ static void m52790_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id m52790_id[] = {
- { "m52790", 0 },
+ { "m52790" },
{ }
};
MODULE_DEVICE_TABLE(i2c, m52790_id);
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index cd73d2096ae4..bf02ca23a284 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -1413,8 +1413,8 @@ static void max2175_remove(struct i2c_client *client)
}
static const struct i2c_device_id max2175_id[] = {
- { DRIVER_NAME, 0},
- {},
+ { DRIVER_NAME },
+ {}
};
MODULE_DEVICE_TABLE(i2c, max2175_id);
diff --git a/drivers/media/i2c/max96714.c b/drivers/media/i2c/max96714.c
index c97de66631e0..159753b13777 100644
--- a/drivers/media/i2c/max96714.c
+++ b/drivers/media/i2c/max96714.c
@@ -25,6 +25,7 @@
#define MAX96714_NPORTS 2
#define MAX96714_PAD_SINK 0
#define MAX96714_PAD_SOURCE 1
+#define MAX96714_CSI_NLANES 4
/* DEV */
#define MAX96714_REG13 CCI_REG8(0x0d)
@@ -52,9 +53,9 @@
#define MAX96714_PATGEN_V2D CCI_REG24(0x254)
#define MAX96714_PATGEN_DE_HIGH CCI_REG16(0x257)
#define MAX96714_PATGEN_DE_LOW CCI_REG16(0x259)
-#define MAX96714_PATGEN_DE_CNT CCI_REG16(0x25B)
+#define MAX96714_PATGEN_DE_CNT CCI_REG16(0x25b)
#define MAX96714_PATGEN_GRAD_INC CCI_REG8(0x25d)
-#define MAX96714_PATGEN_CHKB_COLOR_A CCI_REG24(0x25E)
+#define MAX96714_PATGEN_CHKB_COLOR_A CCI_REG24(0x25e)
#define MAX96714_PATGEN_CHKB_COLOR_B CCI_REG24(0x261)
#define MAX96714_PATGEN_CHKB_RPT_CNT_A CCI_REG8(0x264)
#define MAX96714_PATGEN_CHKB_RPT_CNT_B CCI_REG8(0x265)
@@ -724,8 +725,9 @@ static int max96714_init_tx_port(struct max96714_priv *priv)
* Unused lanes need to be mapped as well to not have
* the same lanes mapped twice.
*/
- for (; lane < 4; lane++) {
- unsigned int idx = find_first_zero_bit(&lanes_used, 4);
+ for (; lane < MAX96714_CSI_NLANES; lane++) {
+ unsigned int idx = find_first_zero_bit(&lanes_used,
+ MAX96714_CSI_NLANES);
val |= idx << (lane * 2);
lanes_used |= BIT(idx);
@@ -757,9 +759,7 @@ static int max96714_rxport_disable_poc(struct max96714_priv *priv)
static int max96714_parse_dt_txport(struct max96714_priv *priv)
{
struct device *dev = &priv->client->dev;
- struct v4l2_fwnode_endpoint vep = {
- .bus_type = V4L2_MBUS_CSI2_DPHY
- };
+ struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
struct fwnode_handle *ep_fwnode;
u32 num_data_lanes;
int ret;
@@ -791,14 +791,14 @@ static int max96714_parse_dt_txport(struct max96714_priv *priv)
}
num_data_lanes = vep.bus.mipi_csi2.num_data_lanes;
- if (num_data_lanes < 1 || num_data_lanes > 4) {
+ if (num_data_lanes < 1 || num_data_lanes > MAX96714_CSI_NLANES) {
dev_err(dev,
"tx: invalid number of data lanes must be 1 to 4\n");
ret = -EINVAL;
goto err_free_vep;
}
- memcpy(&priv->mipi_csi2, &vep.bus.mipi_csi2, sizeof(priv->mipi_csi2));
+ priv->mipi_csi2 = vep.bus.mipi_csi2;
err_free_vep:
v4l2_fwnode_endpoint_free(&vep);
diff --git a/drivers/media/i2c/max96717.c b/drivers/media/i2c/max96717.c
index 949306485873..4e85b8eb1e77 100644
--- a/drivers/media/i2c/max96717.c
+++ b/drivers/media/i2c/max96717.c
@@ -16,6 +16,7 @@
#include <linux/regmap.h>
#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
@@ -24,6 +25,7 @@
#define MAX96717_PORTS 2
#define MAX96717_PAD_SINK 0
#define MAX96717_PAD_SOURCE 1
+#define MAX96717_CSI_NLANES 4
#define MAX96717_DEFAULT_CLKOUT_RATE 24000000UL
@@ -38,9 +40,35 @@
#define MAX96717_DEV_REV_MASK GENMASK(3, 0)
/* VID_TX Z */
+#define MAX96717_VIDEO_TX0 CCI_REG8(0x110)
+#define MAX96717_VIDEO_AUTO_BPP BIT(3)
#define MAX96717_VIDEO_TX2 CCI_REG8(0x112)
#define MAX96717_VIDEO_PCLKDET BIT(7)
+/* VTX_Z */
+#define MAX96717_VTX0 CCI_REG8(0x24e)
+#define MAX96717_VTX1 CCI_REG8(0x24f)
+#define MAX96717_PATTERN_CLK_FREQ GENMASK(3, 1)
+#define MAX96717_VTX_VS_DLY CCI_REG24(0x250)
+#define MAX96717_VTX_VS_HIGH CCI_REG24(0x253)
+#define MAX96717_VTX_VS_LOW CCI_REG24(0x256)
+#define MAX96717_VTX_V2H CCI_REG24(0x259)
+#define MAX96717_VTX_HS_HIGH CCI_REG16(0x25c)
+#define MAX96717_VTX_HS_LOW CCI_REG16(0x25e)
+#define MAX96717_VTX_HS_CNT CCI_REG16(0x260)
+#define MAX96717_VTX_V2D CCI_REG24(0x262)
+#define MAX96717_VTX_DE_HIGH CCI_REG16(0x265)
+#define MAX96717_VTX_DE_LOW CCI_REG16(0x267)
+#define MAX96717_VTX_DE_CNT CCI_REG16(0x269)
+#define MAX96717_VTX29 CCI_REG8(0x26b)
+#define MAX96717_VTX_MODE GENMASK(1, 0)
+#define MAX96717_VTX_GRAD_INC CCI_REG8(0x26c)
+#define MAX96717_VTX_CHKB_COLOR_A CCI_REG24(0x26d)
+#define MAX96717_VTX_CHKB_COLOR_B CCI_REG24(0x270)
+#define MAX96717_VTX_CHKB_RPT_CNT_A CCI_REG8(0x273)
+#define MAX96717_VTX_CHKB_RPT_CNT_B CCI_REG8(0x274)
+#define MAX96717_VTX_CHKB_ALT CCI_REG8(0x275)
+
/* GPIO */
#define MAX96717_NUM_GPIO 11
#define MAX96717_GPIO_REG_A(gpio) CCI_REG8(0x2be + (gpio) * 3)
@@ -82,6 +110,12 @@
/* MISC */
#define PIO_SLEW_1 CCI_REG8(0x570)
+enum max96717_vpg_mode {
+ MAX96717_VPG_DISABLED = 0,
+ MAX96717_VPG_CHECKERBOARD = 1,
+ MAX96717_VPG_GRADIENT = 2,
+};
+
struct max96717_priv {
struct i2c_client *client;
struct regmap *regmap;
@@ -89,6 +123,7 @@ struct max96717_priv {
struct v4l2_mbus_config_mipi_csi2 mipi_csi2;
struct v4l2_subdev sd;
struct media_pad pads[MAX96717_PORTS];
+ struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_async_notifier notifier;
struct v4l2_subdev *source_sd;
u16 source_sd_pad;
@@ -96,6 +131,7 @@ struct max96717_priv {
u8 pll_predef_index;
struct clk_hw clk_hw;
struct gpio_chip gpio_chip;
+ enum max96717_vpg_mode pattern;
};
static inline struct max96717_priv *sd_to_max96717(struct v4l2_subdev *sd)
@@ -131,6 +167,118 @@ static inline int max96717_start_csi(struct max96717_priv *priv, bool start)
start ? MAX96717_START_PORT_B : 0, NULL);
}
+static int max96717_apply_patgen_timing(struct max96717_priv *priv,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *fmt =
+ v4l2_subdev_state_get_format(state, MAX96717_PAD_SOURCE);
+ const u32 h_active = fmt->width;
+ const u32 h_fp = 88;
+ const u32 h_sw = 44;
+ const u32 h_bp = 148;
+ u32 h_tot;
+ const u32 v_active = fmt->height;
+ const u32 v_fp = 4;
+ const u32 v_sw = 5;
+ const u32 v_bp = 36;
+ u32 v_tot;
+ int ret = 0;
+
+ h_tot = h_active + h_fp + h_sw + h_bp;
+ v_tot = v_active + v_fp + v_sw + v_bp;
+
+ /* 75 Mhz pixel clock */
+ cci_update_bits(priv->regmap, MAX96717_VTX1,
+ MAX96717_PATTERN_CLK_FREQ, 0xa, &ret);
+
+ dev_info(&priv->client->dev, "height: %d width: %d\n", fmt->height,
+ fmt->width);
+
+ cci_write(priv->regmap, MAX96717_VTX_VS_DLY, 0, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_VS_HIGH, v_sw * h_tot, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_VS_LOW,
+ (v_active + v_fp + v_bp) * h_tot, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_HS_HIGH, h_sw, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_HS_LOW, h_active + h_fp + h_bp,
+ &ret);
+ cci_write(priv->regmap, MAX96717_VTX_V2D,
+ h_tot * (v_sw + v_bp) + (h_sw + h_bp), &ret);
+ cci_write(priv->regmap, MAX96717_VTX_HS_CNT, v_tot, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_DE_HIGH, h_active, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_DE_LOW, h_fp + h_sw + h_bp,
+ &ret);
+ cci_write(priv->regmap, MAX96717_VTX_DE_CNT, v_active, &ret);
+ /* B G R */
+ cci_write(priv->regmap, MAX96717_VTX_CHKB_COLOR_A, 0xfecc00, &ret);
+ /* B G R */
+ cci_write(priv->regmap, MAX96717_VTX_CHKB_COLOR_B, 0x006aa7, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_CHKB_RPT_CNT_A, 0x3c, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_CHKB_RPT_CNT_B, 0x3c, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_CHKB_ALT, 0x3c, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_GRAD_INC, 0x10, &ret);
+
+ return ret;
+}
+
+static int max96717_apply_patgen(struct max96717_priv *priv,
+ struct v4l2_subdev_state *state)
+{
+ unsigned int val;
+ int ret = 0;
+
+ if (priv->pattern)
+ ret = max96717_apply_patgen_timing(priv, state);
+
+ cci_write(priv->regmap, MAX96717_VTX0, priv->pattern ? 0xfb : 0,
+ &ret);
+
+ val = FIELD_PREP(MAX96717_VTX_MODE, priv->pattern);
+ cci_update_bits(priv->regmap, MAX96717_VTX29, MAX96717_VTX_MODE,
+ val, &ret);
+ return ret;
+}
+
+static int max96717_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct max96717_priv *priv =
+ container_of(ctrl->handler, struct max96717_priv, ctrl_handler);
+ int ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ if (priv->enabled_source_streams)
+ return -EBUSY;
+ priv->pattern = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Use bpp from bpp register */
+ ret = cci_update_bits(priv->regmap, MAX96717_VIDEO_TX0,
+ MAX96717_VIDEO_AUTO_BPP,
+ priv->pattern ? 0 : MAX96717_VIDEO_AUTO_BPP,
+ NULL);
+
+ /*
+ * Pattern generator doesn't work with tunnel mode.
+ * Needs RGB color format and deserializer tunnel mode must be disabled.
+ */
+ return cci_update_bits(priv->regmap, MAX96717_MIPI_RX_EXT11,
+ MAX96717_TUN_MODE,
+ priv->pattern ? 0 : MAX96717_TUN_MODE, &ret);
+}
+
+static const char * const max96717_test_pattern[] = {
+ "Disabled",
+ "Checkerboard",
+ "Gradient"
+};
+
+static const struct v4l2_ctrl_ops max96717_ctrl_ops = {
+ .s_ctrl = max96717_s_ctrl,
+};
+
static int max96717_gpiochip_get(struct gpio_chip *gpiochip,
unsigned int offset)
{
@@ -348,24 +496,28 @@ static int max96717_enable_streams(struct v4l2_subdev *sd,
u64 streams_mask)
{
struct max96717_priv *priv = sd_to_max96717(sd);
- struct device *dev = &priv->client->dev;
u64 sink_streams;
int ret;
- sink_streams = v4l2_subdev_state_xlate_streams(state,
- MAX96717_PAD_SOURCE,
- MAX96717_PAD_SINK,
- &streams_mask);
-
if (!priv->enabled_source_streams)
max96717_start_csi(priv, true);
- ret = v4l2_subdev_enable_streams(priv->source_sd, priv->source_sd_pad,
- sink_streams);
- if (ret) {
- dev_err(dev, "Fail to start streams:%llu on remote subdev\n",
- sink_streams);
+ ret = max96717_apply_patgen(priv, state);
+ if (ret)
goto stop_csi;
+
+ if (!priv->pattern) {
+ sink_streams =
+ v4l2_subdev_state_xlate_streams(state,
+ MAX96717_PAD_SOURCE,
+ MAX96717_PAD_SINK,
+ &streams_mask);
+
+ ret = v4l2_subdev_enable_streams(priv->source_sd,
+ priv->source_sd_pad,
+ sink_streams);
+ if (ret)
+ goto stop_csi;
}
priv->enabled_source_streams |= streams_mask;
@@ -375,6 +527,7 @@ static int max96717_enable_streams(struct v4l2_subdev *sd,
stop_csi:
if (!priv->enabled_source_streams)
max96717_start_csi(priv, false);
+
return ret;
}
@@ -394,13 +547,23 @@ static int max96717_disable_streams(struct v4l2_subdev *sd,
if (!priv->enabled_source_streams)
max96717_start_csi(priv, false);
- sink_streams = v4l2_subdev_state_xlate_streams(state,
- MAX96717_PAD_SOURCE,
- MAX96717_PAD_SINK,
- &streams_mask);
+ if (!priv->pattern) {
+ int ret;
+
+ sink_streams =
+ v4l2_subdev_state_xlate_streams(state,
+ MAX96717_PAD_SOURCE,
+ MAX96717_PAD_SINK,
+ &streams_mask);
+
+ ret = v4l2_subdev_disable_streams(priv->source_sd,
+ priv->source_sd_pad,
+ sink_streams);
+ if (ret)
+ return ret;
+ }
- return v4l2_subdev_disable_streams(priv->source_sd, priv->source_sd_pad,
- sink_streams);
+ return 0;
}
static const struct v4l2_subdev_pad_ops max96717_pad_ops = {
@@ -513,6 +676,19 @@ static int max96717_subdev_init(struct max96717_priv *priv)
v4l2_i2c_subdev_init(&priv->sd, priv->client, &max96717_subdev_ops);
priv->sd.internal_ops = &max96717_internal_ops;
+ v4l2_ctrl_handler_init(&priv->ctrl_handler, 1);
+ priv->sd.ctrl_handler = &priv->ctrl_handler;
+
+ v4l2_ctrl_new_std_menu_items(&priv->ctrl_handler,
+ &max96717_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(max96717_test_pattern) - 1,
+ 0, 0, max96717_test_pattern);
+ if (priv->ctrl_handler.error) {
+ ret = priv->ctrl_handler.error;
+ goto err_free_ctrl;
+ }
+
priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS;
priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
priv->sd.entity.ops = &max96717_entity_ops;
@@ -552,6 +728,8 @@ err_free_state:
v4l2_subdev_cleanup(&priv->sd);
err_entity_cleanup:
media_entity_cleanup(&priv->sd.entity);
+err_free_ctrl:
+ v4l2_ctrl_handler_free(&priv->ctrl_handler);
return ret;
}
@@ -563,6 +741,7 @@ static void max96717_subdev_uninit(struct max96717_priv *priv)
v4l2_async_nf_cleanup(&priv->notifier);
v4l2_subdev_cleanup(&priv->sd);
media_entity_cleanup(&priv->sd.entity);
+ v4l2_ctrl_handler_free(&priv->ctrl_handler);
}
struct max96717_pll_predef_freq {
@@ -588,11 +767,8 @@ max96717_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
static unsigned int max96717_clk_find_best_index(struct max96717_priv *priv,
unsigned long rate)
{
- unsigned int i, idx;
- unsigned long diff_new, diff_old;
-
- diff_old = U32_MAX;
- idx = 0;
+ unsigned int i, idx = 0;
+ unsigned long diff_new, diff_old = U32_MAX;
for (i = 0; i < ARRAY_SIZE(max96717_predef_freqs); i++) {
diff_new = abs(rate - max96717_predef_freqs[i].freq);
@@ -679,8 +855,7 @@ static int max96717_register_clkout(struct max96717_priv *priv)
struct clk_init_data init = { .ops = &max96717_clk_ops };
int ret;
- init.name = kasprintf(GFP_KERNEL, "max96717.%s.clk_out",
- dev_name(dev));
+ init.name = kasprintf(GFP_KERNEL, "max96717.%s.clk_out", dev_name(dev));
if (!init.name)
return -ENOMEM;
@@ -763,8 +938,9 @@ static int max96717_init_csi_lanes(struct max96717_priv *priv)
* Unused lanes need to be mapped as well to not have
* the same lanes mapped twice.
*/
- for (; lane < 4; lane++) {
- unsigned int idx = find_first_zero_bit(&lanes_used, 4);
+ for (; lane < MAX96717_CSI_NLANES; lane++) {
+ unsigned int idx = find_first_zero_bit(&lanes_used,
+ MAX96717_CSI_NLANES);
val |= idx << (lane * 2);
lanes_used |= BIT(idx);
@@ -818,9 +994,7 @@ static int max96717_hw_init(struct max96717_priv *priv)
static int max96717_parse_dt(struct max96717_priv *priv)
{
struct device *dev = &priv->client->dev;
- struct v4l2_fwnode_endpoint vep = {
- .bus_type = V4L2_MBUS_CSI2_DPHY
- };
+ struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
struct fwnode_handle *ep_fwnode;
unsigned char num_data_lanes;
int ret;
@@ -838,11 +1012,11 @@ static int max96717_parse_dt(struct max96717_priv *priv)
return dev_err_probe(dev, ret, "Failed to parse sink endpoint");
num_data_lanes = vep.bus.mipi_csi2.num_data_lanes;
- if (num_data_lanes < 1 || num_data_lanes > 4)
+ if (num_data_lanes < 1 || num_data_lanes > MAX96717_CSI_NLANES)
return dev_err_probe(dev, -EINVAL,
"Invalid data lanes must be 1 to 4\n");
- memcpy(&priv->mipi_csi2, &vep.bus.mipi_csi2, sizeof(priv->mipi_csi2));
+ priv->mipi_csi2 = vep.bus.mipi_csi2;
return 0;
}
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index 5b72d4434224..57ba3693649a 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -424,8 +424,8 @@ static void ml86v7667_remove(struct i2c_client *client)
}
static const struct i2c_device_id ml86v7667_id[] = {
- {DRV_NAME, 0},
- {},
+ { DRV_NAME },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ml86v7667_id);
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index 599a5bc7cbb3..4c0b0ad68c08 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -874,7 +874,7 @@ static const struct dev_pm_ops msp3400_pm_ops = {
};
static const struct i2c_device_id msp_id[] = {
- { "msp3400", 0 },
+ { "msp3400" },
{ }
};
MODULE_DEVICE_TABLE(i2c, msp_id);
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index ad1a3ab77411..12d3e86bdc0f 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -854,7 +854,7 @@ static void mt9m001_remove(struct i2c_client *client)
}
static const struct i2c_device_id mt9m001_id[] = {
- { "mt9m001", 0 },
+ { "mt9m001" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9m001_id);
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index ceeeb94c38d5..9aa5dcda3805 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -1383,7 +1383,7 @@ static const struct of_device_id mt9m111_of_match[] = {
MODULE_DEVICE_TABLE(of, mt9m111_of_match);
static const struct i2c_device_id mt9m111_id[] = {
- { "mt9m111", 0 },
+ { "mt9m111" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9m111_id);
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index f4b481212356..d8735c246e52 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -15,6 +15,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/log2.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
@@ -112,11 +113,6 @@
#define MT9P031_TEST_PATTERN_RED 0xa2
#define MT9P031_TEST_PATTERN_BLUE 0xa3
-enum mt9p031_model {
- MT9P031_MODEL_COLOR,
- MT9P031_MODEL_MONOCHROME,
-};
-
struct mt9p031 {
struct v4l2_subdev subdev;
struct media_pad pad;
@@ -129,7 +125,7 @@ struct mt9p031 {
struct clk *clk;
struct regulator_bulk_data regulators[3];
- enum mt9p031_model model;
+ u32 code;
struct aptina_pll pll;
unsigned int clk_div;
bool use_pll;
@@ -712,12 +708,7 @@ static int mt9p031_init_state(struct v4l2_subdev *subdev,
crop->height = MT9P031_WINDOW_HEIGHT_DEF;
format = __mt9p031_get_pad_format(mt9p031, sd_state, 0, which);
-
- if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
- format->code = MEDIA_BUS_FMT_Y12_1X12;
- else
- format->code = MEDIA_BUS_FMT_SGRBG12_1X12;
-
+ format->code = mt9p031->code;
format->width = MT9P031_WINDOW_WIDTH_DEF;
format->height = MT9P031_WINDOW_HEIGHT_DEF;
format->field = V4L2_FIELD_NONE;
@@ -1102,7 +1093,6 @@ done:
static int mt9p031_probe(struct i2c_client *client)
{
- const struct i2c_device_id *did = i2c_client_get_device_id(client);
struct mt9p031_platform_data *pdata = mt9p031_get_pdata(client);
struct i2c_adapter *adapter = client->adapter;
struct mt9p031 *mt9p031;
@@ -1127,7 +1117,7 @@ static int mt9p031_probe(struct i2c_client *client)
mt9p031->pdata = pdata;
mt9p031->output_control = MT9P031_OUTPUT_CONTROL_DEF;
mt9p031->mode2 = MT9P031_READ_MODE_2_ROW_BLC;
- mt9p031->model = did->driver_data;
+ mt9p031->code = (uintptr_t)i2c_get_match_data(client);
mt9p031->regulators[0].supply = "vdd";
mt9p031->regulators[1].supply = "vdd_io";
@@ -1224,26 +1214,24 @@ static void mt9p031_remove(struct i2c_client *client)
}
static const struct i2c_device_id mt9p031_id[] = {
- { "mt9p006", MT9P031_MODEL_COLOR },
- { "mt9p031", MT9P031_MODEL_COLOR },
- { "mt9p031m", MT9P031_MODEL_MONOCHROME },
- { }
+ { "mt9p006", MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { "mt9p031", MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { "mt9p031m", MEDIA_BUS_FMT_Y12_1X12 },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, mt9p031_id);
-#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id mt9p031_of_match[] = {
- { .compatible = "aptina,mt9p006", },
- { .compatible = "aptina,mt9p031", },
- { .compatible = "aptina,mt9p031m", },
- { /* sentinel */ },
+ { .compatible = "aptina,mt9p006", .data = (void *)MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { .compatible = "aptina,mt9p031", .data = (void *)MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { .compatible = "aptina,mt9p031m", .data = (void *)MEDIA_BUS_FMT_Y12_1X12 },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mt9p031_of_match);
-#endif
static struct i2c_driver mt9p031_i2c_driver = {
.driver = {
- .of_match_table = of_match_ptr(mt9p031_of_match),
+ .of_match_table = mt9p031_of_match,
.name = "mt9p031",
},
.probe = mt9p031_probe,
diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c
index fb1588c57cc8..878dff9b7577 100644
--- a/drivers/media/i2c/mt9t112.c
+++ b/drivers/media/i2c/mt9t112.c
@@ -1109,7 +1109,7 @@ static void mt9t112_remove(struct i2c_client *client)
}
static const struct i2c_device_id mt9t112_id[] = {
- { "mt9t112", 0 },
+ { "mt9t112" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9t112_id);
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 8834ff8786e5..055b7915260a 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -582,7 +582,7 @@ static void mt9v011_remove(struct i2c_client *c)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id mt9v011_id[] = {
- { "mt9v011", 0 },
+ { "mt9v011" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9v011_id);
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index b0b98ed3c150..723fe138e7bc 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -1263,8 +1263,9 @@ static void mt9v111_remove(struct i2c_client *client)
static const struct of_device_id mt9v111_of_match[] = {
{ .compatible = "aptina,mt9v111", },
- { /* sentinel */ },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mt9v111_of_match);
static struct i2c_driver mt9v111_driver = {
.driver = {
diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c
index bac9597faf68..e906435fc49a 100644
--- a/drivers/media/i2c/og01a1b.c
+++ b/drivers/media/i2c/og01a1b.c
@@ -3,10 +3,13 @@
#include <asm/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -418,6 +421,12 @@ static const struct og01a1b_mode supported_modes[] = {
};
struct og01a1b {
+ struct clk *xvclk;
+ struct gpio_desc *reset_gpio;
+ struct regulator *avdd;
+ struct regulator *dovdd;
+ struct regulator *dvdd;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -898,8 +907,10 @@ static int og01a1b_identify_module(struct og01a1b *og01a1b)
return 0;
}
-static int og01a1b_check_hwcfg(struct device *dev)
+static int og01a1b_check_hwcfg(struct og01a1b *og01a1b)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
+ struct device *dev = &client->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
@@ -913,10 +924,13 @@ static int og01a1b_check_hwcfg(struct device *dev)
return -ENXIO;
ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
-
if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
+ if (!og01a1b->xvclk) {
+ dev_err(dev, "can't get clock frequency");
+ return ret;
+ }
+
+ mclk = clk_get_rate(og01a1b->xvclk);
}
if (mclk != OG01A1B_MCLK) {
@@ -967,6 +981,83 @@ check_hwcfg_error:
return ret;
}
+/* Power/clock management functions */
+static int og01a1b_power_on(struct device *dev)
+{
+ unsigned long delay = DIV_ROUND_UP(8192UL * USEC_PER_SEC, OG01A1B_MCLK);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct og01a1b *og01a1b = to_og01a1b(sd);
+ int ret;
+
+ if (og01a1b->avdd) {
+ ret = regulator_enable(og01a1b->avdd);
+ if (ret)
+ return ret;
+ }
+
+ if (og01a1b->dovdd) {
+ ret = regulator_enable(og01a1b->dovdd);
+ if (ret)
+ goto avdd_disable;
+ }
+
+ if (og01a1b->dvdd) {
+ ret = regulator_enable(og01a1b->dvdd);
+ if (ret)
+ goto dovdd_disable;
+ }
+
+ ret = clk_prepare_enable(og01a1b->xvclk);
+ if (ret)
+ goto dvdd_disable;
+
+ gpiod_set_value_cansleep(og01a1b->reset_gpio, 0);
+
+ if (og01a1b->reset_gpio)
+ usleep_range(5 * USEC_PER_MSEC, 6 * USEC_PER_MSEC);
+ else if (og01a1b->xvclk)
+ usleep_range(delay, 2 * delay);
+
+ return 0;
+
+dvdd_disable:
+ if (og01a1b->dvdd)
+ regulator_disable(og01a1b->dvdd);
+dovdd_disable:
+ if (og01a1b->dovdd)
+ regulator_disable(og01a1b->dovdd);
+avdd_disable:
+ if (og01a1b->avdd)
+ regulator_disable(og01a1b->avdd);
+
+ return ret;
+}
+
+static int og01a1b_power_off(struct device *dev)
+{
+ unsigned long delay = DIV_ROUND_UP(512 * USEC_PER_SEC, OG01A1B_MCLK);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct og01a1b *og01a1b = to_og01a1b(sd);
+
+ if (og01a1b->xvclk)
+ usleep_range(delay, 2 * delay);
+
+ clk_disable_unprepare(og01a1b->xvclk);
+
+ gpiod_set_value_cansleep(og01a1b->reset_gpio, 1);
+
+ if (og01a1b->dvdd)
+ regulator_disable(og01a1b->dvdd);
+
+ if (og01a1b->dovdd)
+ regulator_disable(og01a1b->dovdd);
+
+ if (og01a1b->avdd)
+ regulator_disable(og01a1b->avdd);
+
+ return 0;
+}
+
static void og01a1b_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
@@ -984,22 +1075,78 @@ static int og01a1b_probe(struct i2c_client *client)
struct og01a1b *og01a1b;
int ret;
- ret = og01a1b_check_hwcfg(&client->dev);
+ og01a1b = devm_kzalloc(&client->dev, sizeof(*og01a1b), GFP_KERNEL);
+ if (!og01a1b)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&og01a1b->sd, client, &og01a1b_subdev_ops);
+
+ og01a1b->xvclk = devm_clk_get_optional(&client->dev, NULL);
+ if (IS_ERR(og01a1b->xvclk)) {
+ ret = PTR_ERR(og01a1b->xvclk);
+ dev_err(&client->dev, "failed to get xvclk clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = og01a1b_check_hwcfg(og01a1b);
if (ret) {
dev_err(&client->dev, "failed to check HW configuration: %d",
ret);
return ret;
}
- og01a1b = devm_kzalloc(&client->dev, sizeof(*og01a1b), GFP_KERNEL);
- if (!og01a1b)
- return -ENOMEM;
+ og01a1b->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(og01a1b->reset_gpio)) {
+ dev_err(&client->dev, "cannot get reset GPIO\n");
+ return PTR_ERR(og01a1b->reset_gpio);
+ }
+
+ og01a1b->avdd = devm_regulator_get_optional(&client->dev, "avdd");
+ if (IS_ERR(og01a1b->avdd)) {
+ ret = PTR_ERR(og01a1b->avdd);
+ if (ret != -ENODEV) {
+ dev_err_probe(&client->dev, ret,
+ "Failed to get 'avdd' regulator\n");
+ return ret;
+ }
+
+ og01a1b->avdd = NULL;
+ }
+
+ og01a1b->dovdd = devm_regulator_get_optional(&client->dev, "dovdd");
+ if (IS_ERR(og01a1b->dovdd)) {
+ ret = PTR_ERR(og01a1b->dovdd);
+ if (ret != -ENODEV) {
+ dev_err_probe(&client->dev, ret,
+ "Failed to get 'dovdd' regulator\n");
+ return ret;
+ }
+
+ og01a1b->dovdd = NULL;
+ }
+
+ og01a1b->dvdd = devm_regulator_get_optional(&client->dev, "dvdd");
+ if (IS_ERR(og01a1b->dvdd)) {
+ ret = PTR_ERR(og01a1b->dvdd);
+ if (ret != -ENODEV) {
+ dev_err_probe(&client->dev, ret,
+ "Failed to get 'dvdd' regulator\n");
+ return ret;
+ }
+
+ og01a1b->dvdd = NULL;
+ }
+
+ /* The sensor must be powered on to read the CHIP_ID register */
+ ret = og01a1b_power_on(&client->dev);
+ if (ret)
+ return ret;
- v4l2_i2c_subdev_init(&og01a1b->sd, client, &og01a1b_subdev_ops);
ret = og01a1b_identify_module(og01a1b);
if (ret) {
dev_err(&client->dev, "failed to find sensor: %d", ret);
- return ret;
+ goto power_off;
}
mutex_init(&og01a1b->mutex);
@@ -1028,10 +1175,7 @@ static int og01a1b_probe(struct i2c_client *client)
goto probe_error_media_entity_cleanup;
}
- /*
- * Device is already turned on by i2c-core with ACPI domain PM.
- * Enable runtime PM and turn off the device.
- */
+ /* Enable runtime PM and turn off the device */
pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
pm_runtime_idle(&client->dev);
@@ -1045,9 +1189,16 @@ probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(og01a1b->sd.ctrl_handler);
mutex_destroy(&og01a1b->mutex);
+power_off:
+ og01a1b_power_off(&client->dev);
+
return ret;
}
+static const struct dev_pm_ops og01a1b_pm_ops = {
+ SET_RUNTIME_PM_OPS(og01a1b_power_off, og01a1b_power_on, NULL)
+};
+
#ifdef CONFIG_ACPI
static const struct acpi_device_id og01a1b_acpi_ids[] = {
{"OVTI01AC"},
@@ -1057,10 +1208,18 @@ static const struct acpi_device_id og01a1b_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, og01a1b_acpi_ids);
#endif
+static const struct of_device_id og01a1b_of_match[] = {
+ { .compatible = "ovti,og01a1b" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, og01a1b_of_match);
+
static struct i2c_driver og01a1b_i2c_driver = {
.driver = {
.name = "og01a1b",
+ .pm = &og01a1b_pm_ops,
.acpi_match_table = ACPI_PTR(og01a1b_acpi_ids),
+ .of_match_table = og01a1b_of_match,
},
.probe = og01a1b_probe,
.remove = og01a1b_remove,
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 09387e335d80..7a3fc1d28514 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1740,8 +1740,8 @@ static void ov13858_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov13858_id_table[] = {
- {"ov13858", 0},
- {},
+ { "ov13858" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ov13858_id_table);
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index 67c4bd2916e8..d27fc2df64e6 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -1271,7 +1271,7 @@ static void ov2640_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov2640_id[] = {
- { "ov2640", 0 },
+ { "ov2640" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ov2640_id);
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index d1653d7431d0..06b7896c3eaf 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1551,8 +1551,8 @@ static const struct dev_pm_ops ov2659_pm_ops = {
};
static const struct i2c_device_id ov2659_id[] = {
- { "ov2659", 0 },
- { /* sentinel */ },
+ { "ov2659" },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, ov2659_id);
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 5162d45fe73b..c1d3fce4a7d3 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -4003,8 +4003,8 @@ static const struct dev_pm_ops ov5640_pm_ops = {
};
static const struct i2c_device_id ov5640_id[] = {
- {"ov5640", 0},
- {},
+ { "ov5640" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ov5640_id);
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index 3b22b9e12787..0c32bd2940ec 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -635,7 +635,7 @@ static int ov5645_set_register_array(struct ov5645 *ov5645,
return 0;
}
-static int ov5645_set_power_off(struct device *dev)
+static void __ov5645_set_power_off(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ov5645 *ov5645 = to_ov5645(sd);
@@ -643,8 +643,16 @@ static int ov5645_set_power_off(struct device *dev)
ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x58);
gpiod_set_value_cansleep(ov5645->rst_gpio, 1);
gpiod_set_value_cansleep(ov5645->enable_gpio, 0);
- clk_disable_unprepare(ov5645->xclk);
regulator_bulk_disable(OV5645_NUM_SUPPLIES, ov5645->supplies);
+}
+
+static int ov5645_set_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov5645 *ov5645 = to_ov5645(sd);
+
+ __ov5645_set_power_off(dev);
+ clk_disable_unprepare(ov5645->xclk);
return 0;
}
@@ -686,7 +694,8 @@ static int ov5645_set_power_on(struct device *dev)
return 0;
exit:
- ov5645_set_power_off(dev);
+ __ov5645_set_power_off(dev);
+ clk_disable_unprepare(ov5645->xclk);
return ret;
}
@@ -1272,7 +1281,7 @@ static void ov5645_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov5645_id[] = {
- { "ov5645", 0 },
+ { "ov5645" },
{}
};
MODULE_DEVICE_TABLE(i2c, ov5645_id);
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index 0fb4d7bff9d1..a727beb9d57e 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -1487,7 +1487,7 @@ static const struct dev_pm_ops ov5647_pm_ops = {
};
static const struct i2c_device_id ov5647_id[] = {
- { "ov5647", 0 },
+ { "ov5647" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, ov5647_id);
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index 3641911bc73f..5b5127f8953f 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -972,12 +972,10 @@ static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
static int ov5675_power_off(struct device *dev)
{
- /* 512 xvclk cycles after the last SCCB transation or MIPI frame end */
- u32 delay_us = DIV_ROUND_UP(512, OV5675_XVCLK_19_2 / 1000 / 1000);
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ov5675 *ov5675 = to_ov5675(sd);
- usleep_range(delay_us, delay_us * 2);
+ usleep_range(90, 100);
clk_disable_unprepare(ov5675->xvclk);
gpiod_set_value_cansleep(ov5675->reset_gpio, 1);
@@ -988,7 +986,6 @@ static int ov5675_power_off(struct device *dev)
static int ov5675_power_on(struct device *dev)
{
- u32 delay_us = DIV_ROUND_UP(8192, OV5675_XVCLK_19_2 / 1000 / 1000);
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ov5675 *ov5675 = to_ov5675(sd);
int ret;
@@ -1014,8 +1011,11 @@ static int ov5675_power_on(struct device *dev)
gpiod_set_value_cansleep(ov5675->reset_gpio, 0);
- /* 8192 xvclk cycles prior to the first SCCB transation */
- usleep_range(delay_us, delay_us * 2);
+ /* Worst case quiesence gap is 1.365 milliseconds @ 6MHz XVCLK
+ * Add an additional threshold grace period to ensure reset
+ * completion before initiating our first I2C transaction.
+ */
+ usleep_range(1500, 1600);
return 0;
}
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index b65befb22a79..9c7627161142 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -1128,7 +1128,7 @@ static void ov6650_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov6650_id[] = {
- { "ov6650", 0 },
+ { "ov6650" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ov6650_id);
diff --git a/drivers/media/i2c/ov7640.c b/drivers/media/i2c/ov7640.c
index 293f5f404358..9f68d89936eb 100644
--- a/drivers/media/i2c/ov7640.c
+++ b/drivers/media/i2c/ov7640.c
@@ -77,7 +77,7 @@ static void ov7640_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov7640_id[] = {
- { "ov7640", 0 },
+ { "ov7640" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ov7640_id);
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index 3e36a55274ef..3b0fdb3c70c0 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -1546,7 +1546,7 @@ static void ov772x_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov772x_id[] = {
- { "ov772x", 0 },
+ { "ov772x" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ov772x_id);
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 47b1b14d8796..0830676e5d5a 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -1152,7 +1152,7 @@ static int __maybe_unused ov7740_runtime_resume(struct device *dev)
}
static const struct i2c_device_id ov7740_id[] = {
- { "ov7740", 0 },
+ { "ov7740" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, ov7740_id);
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index e9a52a8a9dc0..01dbc0ba89c8 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -751,7 +751,7 @@ static void ov9640_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov9640_id[] = {
- { "ov9640", 0 },
+ { "ov9640" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ov9640_id);
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 66cd0e9ddc9a..56df97c9886b 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1566,8 +1566,8 @@ static void ov965x_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov965x_id[] = {
- { "OV9650", 0 },
- { "OV9652", 0 },
+ { "OV9650" },
+ { "OV9652" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, ov965x_id);
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
index a59db10153cd..b7ca39f63dba 100644
--- a/drivers/media/i2c/rj54n1cb0c.c
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -1410,7 +1410,7 @@ static void rj54n1_remove(struct i2c_client *client)
}
static const struct i2c_device_id rj54n1_id[] = {
- { "rj54n1cb0c", 0 },
+ { "rj54n1cb0c" },
{ }
};
MODULE_DEVICE_TABLE(i2c, rj54n1_id);
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index cf6be509af33..7716dfe2b8c9 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1392,6 +1392,16 @@ err_reg_dis:
return ret;
}
+/*
+ * This function has been created just to avoid a smatch warning,
+ * please do not merge into __s5c73m3_power_off() until you have
+ * confirmed that it does not introduce a new warning.
+ */
+static void s5c73m3_enable_clk(struct s5c73m3 *state)
+{
+ clk_prepare_enable(state->clock);
+}
+
static int __s5c73m3_power_off(struct s5c73m3 *state)
{
int i, ret;
@@ -1421,7 +1431,8 @@ err:
state->supplies[i].supply, r);
}
- clk_prepare_enable(state->clock);
+ s5c73m3_enable_clk(state);
+
return ret;
}
@@ -1724,7 +1735,7 @@ static void s5c73m3_remove(struct i2c_client *client)
}
static const struct i2c_device_id s5c73m3_id[] = {
- { DRIVER_NAME, 0 },
+ { DRIVER_NAME },
{ }
};
MODULE_DEVICE_TABLE(i2c, s5c73m3_id);
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 6b11039c3579..24f399cd2124 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -2018,8 +2018,8 @@ static void s5k5baf_remove(struct i2c_client *c)
}
static const struct i2c_device_id s5k5baf_id[] = {
- { S5K5BAF_DRIVER_NAME, 0 },
- { },
+ { S5K5BAF_DRIVER_NAME },
+ { }
};
MODULE_DEVICE_TABLE(i2c, s5k5baf_id);
diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
index dea9fc09356f..fb09e4560d8a 100644
--- a/drivers/media/i2c/saa6588.c
+++ b/drivers/media/i2c/saa6588.c
@@ -496,7 +496,7 @@ static void saa6588_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id saa6588_id[] = {
- { "saa6588", 0 },
+ { "saa6588" },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa6588_id);
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index 897eaa669b86..1ed8b5edb3fb 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -770,7 +770,7 @@ static void saa6752hs_remove(struct i2c_client *client)
}
static const struct i2c_device_id saa6752hs_id[] = {
- { "saa6752hs", 0 },
+ { "saa6752hs" },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa6752hs_id);
diff --git a/drivers/media/i2c/saa7110.c b/drivers/media/i2c/saa7110.c
index 1520790338ce..942aeeb40c52 100644
--- a/drivers/media/i2c/saa7110.c
+++ b/drivers/media/i2c/saa7110.c
@@ -439,7 +439,7 @@ static void saa7110_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id saa7110_id[] = {
- { "saa7110", 0 },
+ { "saa7110" },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa7110_id);
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index 933ec0171430..b0793bb0c02a 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -1334,7 +1334,7 @@ static void saa717x_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id saa717x_id[] = {
- { "saa717x", 0 },
+ { "saa717x" },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa717x_id);
diff --git a/drivers/media/i2c/saa7185.c b/drivers/media/i2c/saa7185.c
index 5535d71f4860..c04e452a332b 100644
--- a/drivers/media/i2c/saa7185.c
+++ b/drivers/media/i2c/saa7185.c
@@ -334,7 +334,7 @@ static void saa7185_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id saa7185_id[] = {
- { "saa7185", 0 },
+ { "saa7185" },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa7185_id);
diff --git a/drivers/media/i2c/sony-btf-mpx.c b/drivers/media/i2c/sony-btf-mpx.c
index 0f53834f3ae4..16072a9f8247 100644
--- a/drivers/media/i2c/sony-btf-mpx.c
+++ b/drivers/media/i2c/sony-btf-mpx.c
@@ -366,7 +366,7 @@ static void sony_btf_mpx_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id sony_btf_mpx_id[] = {
- { "sony-btf-mpx", 0 },
+ { "sony-btf-mpx" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sony_btf_mpx_id);
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 0307fee3cce9..65d58ddf0287 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -2197,7 +2197,7 @@ static void tc358743_remove(struct i2c_client *client)
}
static const struct i2c_device_id tc358743_id[] = {
- {"tc358743", 0},
+ { "tc358743" },
{}
};
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index edf79107adc5..389582420ba7 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -1616,6 +1616,16 @@ static void tc358746_remove(struct i2c_client *client)
pm_runtime_dont_use_autosuspend(sd->dev);
}
+/*
+ * This function has been created just to avoid a smatch warning,
+ * please do not merge it into tc358746_suspend until you have
+ * confirmed that it does not introduce a new warning.
+ */
+static void tc358746_clk_enable(struct tc358746 *tc358746)
+{
+ clk_prepare_enable(tc358746->refclk);
+}
+
static int tc358746_suspend(struct device *dev)
{
struct tc358746 *tc358746 = dev_get_drvdata(dev);
@@ -1626,7 +1636,7 @@ static int tc358746_suspend(struct device *dev)
err = regulator_bulk_disable(ARRAY_SIZE(tc358746_supplies),
tc358746->supplies);
if (err)
- clk_prepare_enable(tc358746->refclk);
+ tc358746_clk_enable(tc358746);
return err;
}
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 58ce8fec3041..3b7e5ff5b010 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2514,7 +2514,7 @@ static void tda1997x_codec_remove(struct snd_soc_component *component)
{
}
-static struct snd_soc_component_driver tda1997x_codec_driver = {
+static const struct snd_soc_component_driver tda1997x_codec_driver = {
.probe = tda1997x_codec_probe,
.remove = tda1997x_codec_remove,
.idle_bias_on = 1,
diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
index 6ecdc8e2e0c6..76ef0fdddf76 100644
--- a/drivers/media/i2c/tda7432.c
+++ b/drivers/media/i2c/tda7432.c
@@ -400,7 +400,7 @@ static void tda7432_remove(struct i2c_client *client)
}
static const struct i2c_device_id tda7432_id[] = {
- { "tda7432", 0 },
+ { "tda7432" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tda7432_id);
diff --git a/drivers/media/i2c/tda9840.c b/drivers/media/i2c/tda9840.c
index 1911ef2126be..d61da811c9da 100644
--- a/drivers/media/i2c/tda9840.c
+++ b/drivers/media/i2c/tda9840.c
@@ -182,7 +182,7 @@ static void tda9840_remove(struct i2c_client *client)
}
static const struct i2c_device_id tda9840_id[] = {
- { "tda9840", 0 },
+ { "tda9840" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tda9840_id);
diff --git a/drivers/media/i2c/tea6415c.c b/drivers/media/i2c/tea6415c.c
index 3ed6e441d515..4aaf66353610 100644
--- a/drivers/media/i2c/tea6415c.c
+++ b/drivers/media/i2c/tea6415c.c
@@ -141,7 +141,7 @@ static void tea6415c_remove(struct i2c_client *client)
}
static const struct i2c_device_id tea6415c_id[] = {
- { "tea6415c", 0 },
+ { "tea6415c" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tea6415c_id);
diff --git a/drivers/media/i2c/tea6420.c b/drivers/media/i2c/tea6420.c
index 63f23784bb41..5c5ea3973251 100644
--- a/drivers/media/i2c/tea6420.c
+++ b/drivers/media/i2c/tea6420.c
@@ -123,7 +123,7 @@ static void tea6420_remove(struct i2c_client *client)
}
static const struct i2c_device_id tea6420_id[] = {
- { "tea6420", 0 },
+ { "tea6420" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tea6420_id);
diff --git a/drivers/media/i2c/thp7312.c b/drivers/media/i2c/thp7312.c
index 19bd923a7315..75225ff5eff6 100644
--- a/drivers/media/i2c/thp7312.c
+++ b/drivers/media/i2c/thp7312.c
@@ -1503,7 +1503,7 @@ static int __thp7312_flash_reg_read(struct thp7312_device *thp7312,
msgs[0].addr = client->addr;
msgs[0].flags = 0;
- msgs[0].len = sizeof(thp7312_cmd_read_reg),
+ msgs[0].len = sizeof(thp7312_cmd_read_reg);
msgs[0].buf = (u8 *)thp7312_cmd_read_reg;
msgs[1].addr = client->addr;
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 49ed83a0ac94..7526fabc7ee4 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -369,9 +369,9 @@ static void ths7303_remove(struct i2c_client *client)
}
static const struct i2c_device_id ths7303_id[] = {
- {"ths7303", 0},
- {"ths7353", 0},
- {},
+ { "ths7303" },
+ { "ths7353" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ths7303_id);
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index ce0a7f809f19..686f10641c7a 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -487,8 +487,8 @@ static void ths8200_remove(struct i2c_client *client)
}
static const struct i2c_device_id ths8200_id[] = {
- { "ths8200", 0 },
- {},
+ { "ths8200" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ths8200_id);
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index d800ff8af1ff..b7b31b6192af 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -188,7 +188,7 @@ static void tlv320aic23b_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id tlv320aic23b_id[] = {
- { "tlv320aic23b", 0 },
+ { "tlv320aic23b" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tlv320aic23b_id);
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index ba20f35cafd5..654725dfafac 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -2086,7 +2086,7 @@ static void tvaudio_remove(struct i2c_client *client)
detect which device is present. So rather than listing all supported
devices here, we pretend to support a single, fake device type. */
static const struct i2c_device_id tvaudio_id[] = {
- { "tvaudio", 0 },
+ { "tvaudio" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tvaudio_id);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 64b91aa3c82a..e3675c744d9e 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -514,7 +514,7 @@ struct i2c_vbi_ram_value {
* and so on. There are 16 possible locations from 0 to 15.
*/
-static struct i2c_vbi_ram_value vbi_ram_default[] = {
+static const struct i2c_vbi_ram_value vbi_ram_default[] = {
/*
* FIXME: Current api doesn't handle all VBI types, those not
@@ -1812,7 +1812,7 @@ static const struct regmap_access_table tvp5150_readable_table = {
.n_yes_ranges = ARRAY_SIZE(tvp5150_readable_ranges),
};
-static struct regmap_config tvp5150_config = {
+static const struct regmap_config tvp5150_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xff,
@@ -2265,7 +2265,7 @@ static const struct dev_pm_ops tvp5150_pm_ops = {
};
static const struct i2c_device_id tvp5150_id[] = {
- { "tvp5150", 0 },
+ { "tvp5150" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tvp5150_id);
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index ea01bd86450e..c09a5bd71fd0 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -1070,7 +1070,7 @@ static void tvp7002_remove(struct i2c_client *c)
/* I2C Device ID table */
static const struct i2c_device_id tvp7002_id[] = {
- { "tvp7002", 0 },
+ { "tvp7002" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tvp7002_id);
diff --git a/drivers/media/i2c/tw2804.c b/drivers/media/i2c/tw2804.c
index 6a2521e3a25c..3d154f4fb5f9 100644
--- a/drivers/media/i2c/tw2804.c
+++ b/drivers/media/i2c/tw2804.c
@@ -414,7 +414,7 @@ static void tw2804_remove(struct i2c_client *client)
}
static const struct i2c_device_id tw2804_id[] = {
- { "tw2804", 0 },
+ { "tw2804" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tw2804_id);
diff --git a/drivers/media/i2c/tw9900.c b/drivers/media/i2c/tw9900.c
index bc7623ec46e5..53efdeaed1db 100644
--- a/drivers/media/i2c/tw9900.c
+++ b/drivers/media/i2c/tw9900.c
@@ -753,7 +753,7 @@ static const struct dev_pm_ops tw9900_pm_ops = {
};
static const struct i2c_device_id tw9900_id[] = {
- { "tw9900", 0 },
+ { "tw9900" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tw9900_id);
diff --git a/drivers/media/i2c/tw9903.c b/drivers/media/i2c/tw9903.c
index 996be3960af3..b996a05e56f2 100644
--- a/drivers/media/i2c/tw9903.c
+++ b/drivers/media/i2c/tw9903.c
@@ -245,7 +245,7 @@ static void tw9903_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id tw9903_id[] = {
- { "tw9903", 0 },
+ { "tw9903" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tw9903_id);
diff --git a/drivers/media/i2c/tw9906.c b/drivers/media/i2c/tw9906.c
index 25c625f6d6e4..6220f4fddbab 100644
--- a/drivers/media/i2c/tw9906.c
+++ b/drivers/media/i2c/tw9906.c
@@ -213,7 +213,7 @@ static void tw9906_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id tw9906_id[] = {
- { "tw9906", 0 },
+ { "tw9906" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tw9906_id);
diff --git a/drivers/media/i2c/tw9910.c b/drivers/media/i2c/tw9910.c
index 6dffaaa9ed56..f3e400304e04 100644
--- a/drivers/media/i2c/tw9910.c
+++ b/drivers/media/i2c/tw9910.c
@@ -996,7 +996,7 @@ static void tw9910_remove(struct i2c_client *client)
}
static const struct i2c_device_id tw9910_id[] = {
- { "tw9910", 0 },
+ { "tw9910" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tw9910_id);
diff --git a/drivers/media/i2c/uda1342.c b/drivers/media/i2c/uda1342.c
index abd052a44bd7..2e4540ee2df2 100644
--- a/drivers/media/i2c/uda1342.c
+++ b/drivers/media/i2c/uda1342.c
@@ -79,7 +79,7 @@ static void uda1342_remove(struct i2c_client *client)
}
static const struct i2c_device_id uda1342_id[] = {
- { "uda1342", 0 },
+ { "uda1342" },
{ }
};
MODULE_DEVICE_TABLE(i2c, uda1342_id);
diff --git a/drivers/media/i2c/upd64031a.c b/drivers/media/i2c/upd64031a.c
index 54c2ba0ba375..9d0b72a213be 100644
--- a/drivers/media/i2c/upd64031a.c
+++ b/drivers/media/i2c/upd64031a.c
@@ -219,7 +219,7 @@ static void upd64031a_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id upd64031a_id[] = {
- { "upd64031a", 0 },
+ { "upd64031a" },
{ }
};
MODULE_DEVICE_TABLE(i2c, upd64031a_id);
diff --git a/drivers/media/i2c/upd64083.c b/drivers/media/i2c/upd64083.c
index 2a820589a4cb..2e99ed5da42c 100644
--- a/drivers/media/i2c/upd64083.c
+++ b/drivers/media/i2c/upd64083.c
@@ -190,7 +190,7 @@ static void upd64083_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id upd64083_id[] = {
- { "upd64083", 0 },
+ { "upd64083" },
{ }
};
MODULE_DEVICE_TABLE(i2c, upd64083_id);
diff --git a/drivers/media/i2c/vp27smpx.c b/drivers/media/i2c/vp27smpx.c
index 0ba3c2b68037..06fd46a63c72 100644
--- a/drivers/media/i2c/vp27smpx.c
+++ b/drivers/media/i2c/vp27smpx.c
@@ -172,7 +172,7 @@ static void vp27smpx_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id vp27smpx_id[] = {
- { "vp27smpx", 0 },
+ { "vp27smpx" },
{ }
};
MODULE_DEVICE_TABLE(i2c, vp27smpx_id);
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index 1eaae886f217..5f1a22284168 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -535,9 +535,9 @@ static void vpx3220_remove(struct i2c_client *client)
}
static const struct i2c_device_id vpx3220_id[] = {
- { "vpx3220a", 0 },
- { "vpx3216b", 0 },
- { "vpx3214c", 0 },
+ { "vpx3220a" },
+ { "vpx3216b" },
+ { "vpx3214c" },
{ }
};
MODULE_DEVICE_TABLE(i2c, vpx3220_id);
diff --git a/drivers/media/i2c/wm8739.c b/drivers/media/i2c/wm8739.c
index 19bf7a00dff9..c091b78a5b41 100644
--- a/drivers/media/i2c/wm8739.c
+++ b/drivers/media/i2c/wm8739.c
@@ -243,7 +243,7 @@ static void wm8739_remove(struct i2c_client *client)
}
static const struct i2c_device_id wm8739_id[] = {
- { "wm8739", 0 },
+ { "wm8739" },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8739_id);
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index d1b716fd6f11..619b2988577c 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -289,7 +289,7 @@ static void wm8775_remove(struct i2c_client *client)
}
static const struct i2c_device_id wm8775_id[] = {
- { "wm8775", 0 },
+ { "wm8775" },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8775_id);
diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c
index addb8f2d8939..e064914c476e 100644
--- a/drivers/media/mc/mc-request.c
+++ b/drivers/media/mc/mc-request.c
@@ -254,12 +254,12 @@ media_request_get_by_fd(struct media_device *mdev, int request_fd)
return ERR_PTR(-EBADR);
f = fdget(request_fd);
- if (!f.file)
+ if (!fd_file(f))
goto err_no_req_fd;
- if (f.file->f_op != &request_fops)
+ if (fd_file(f)->f_op != &request_fops)
goto err_fput;
- req = f.file->private_data;
+ req = fd_file(f)->private_data;
if (req->mdev != mdev)
goto err_fput;
diff --git a/drivers/media/pci/intel/ipu6/Kconfig b/drivers/media/pci/intel/ipu6/Kconfig
index 40e20f0aa5ae..49e4fb696573 100644
--- a/drivers/media/pci/intel/ipu6/Kconfig
+++ b/drivers/media/pci/intel/ipu6/Kconfig
@@ -4,8 +4,13 @@ config VIDEO_INTEL_IPU6
depends on VIDEO_DEV
depends on X86 && X86_64 && HAS_DMA
depends on IPU_BRIDGE || !IPU_BRIDGE
+ #
+ # This driver incorrectly tries to override the dma_ops. It should
+ # never have done that, but for now keep it working on architectures
+ # that use dma ops
+ #
+ depends on ARCH_HAS_DMA_OPS
select AUXILIARY_BUS
- select DMA_OPS
select IOMMU_IOVA
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c
index bbd646378ab3..7fb707d35309 100644
--- a/drivers/media/pci/intel/ipu6/ipu6.c
+++ b/drivers/media/pci/intel/ipu6/ipu6.c
@@ -390,20 +390,18 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent,
isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
IPU6_ISYS_NAME);
if (IS_ERR(isys_adev)) {
- dev_err_probe(dev, PTR_ERR(isys_adev),
- "ipu6_bus_initialize_device isys failed\n");
kfree(pdata);
- return ERR_CAST(isys_adev);
+ return dev_err_cast_probe(dev, isys_adev,
+ "ipu6_bus_initialize_device isys failed\n");
}
isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID,
&ipdata->hw_variant);
if (IS_ERR(isys_adev->mmu)) {
- dev_err_probe(dev, PTR_ERR(isys_adev->mmu),
- "ipu6_mmu_init(isys_adev->mmu) failed\n");
put_device(&isys_adev->auxdev.dev);
kfree(pdata);
- return ERR_CAST(isys_adev->mmu);
+ return dev_err_cast_probe(dev, isys_adev->mmu,
+ "ipu6_mmu_init(isys_adev->mmu) failed\n");
}
isys_adev->mmu->dev = &isys_adev->auxdev.dev;
@@ -436,20 +434,18 @@ ipu6_psys_init(struct pci_dev *pdev, struct device *parent,
psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
IPU6_PSYS_NAME);
if (IS_ERR(psys_adev)) {
- dev_err_probe(&pdev->dev, PTR_ERR(psys_adev),
- "ipu6_bus_initialize_device psys failed\n");
kfree(pdata);
- return ERR_CAST(psys_adev);
+ return dev_err_cast_probe(&pdev->dev, psys_adev,
+ "ipu6_bus_initialize_device psys failed\n");
}
psys_adev->mmu = ipu6_mmu_init(&pdev->dev, base, PSYS_MMID,
&ipdata->hw_variant);
if (IS_ERR(psys_adev->mmu)) {
- dev_err_probe(&pdev->dev, PTR_ERR(psys_adev->mmu),
- "ipu6_mmu_init(psys_adev->mmu) failed\n");
put_device(&psys_adev->auxdev.dev);
kfree(pdata);
- return ERR_CAST(psys_adev->mmu);
+ return dev_err_cast_probe(&pdev->dev, psys_adev->mmu,
+ "ipu6_mmu_init(psys_adev->mmu) failed\n");
}
psys_adev->mmu->dev = &psys_adev->auxdev.dev;
@@ -576,9 +572,7 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return dev_err_probe(dev, ret, "Failed to set DMA mask\n");
- ret = dma_set_max_seg_size(dev, UINT_MAX);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to set max_seg_size\n");
+ dma_set_max_seg_size(dev, UINT_MAX);
ret = ipu6_pci_config_setup(pdev, isp->hw_ver);
if (ret)
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
index ab4f07e2e560..2819bbdab484 100644
--- a/drivers/media/pci/mgb4/mgb4_core.c
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -302,7 +302,7 @@ static int init_i2c(struct mgb4_dev *mgbdev)
/* create dummy clock required by the xiic-i2c adapter */
snprintf(clk_name, sizeof(clk_name), "xiic-i2c.%d", id);
mgbdev->i2c_clk = clk_hw_register_fixed_rate(NULL, clk_name, NULL,
- 0, 125000000);
+ 0, MGB4_HW_FREQ);
if (IS_ERR(mgbdev->i2c_clk)) {
dev_err(dev, "failed to register I2C clock\n");
return PTR_ERR(mgbdev->i2c_clk);
diff --git a/drivers/media/pci/mgb4/mgb4_core.h b/drivers/media/pci/mgb4/mgb4_core.h
index 2a946e46aec1..b52cd67270b5 100644
--- a/drivers/media/pci/mgb4/mgb4_core.h
+++ b/drivers/media/pci/mgb4/mgb4_core.h
@@ -13,6 +13,8 @@
#include <linux/dmaengine.h>
#include "mgb4_regs.h"
+#define MGB4_HW_FREQ 125000000
+
#define MGB4_VIN_DEVICES 2
#define MGB4_VOUT_DEVICES 2
diff --git a/drivers/media/pci/mgb4/mgb4_io.h b/drivers/media/pci/mgb4/mgb4_io.h
index 8698db1be4a9..dd8696d7df31 100644
--- a/drivers/media/pci/mgb4/mgb4_io.h
+++ b/drivers/media/pci/mgb4/mgb4_io.h
@@ -7,11 +7,9 @@
#ifndef __MGB4_IO_H__
#define __MGB4_IO_H__
+#include <linux/math64.h>
#include <media/v4l2-dev.h>
-
-#define MGB4_DEFAULT_WIDTH 1280
-#define MGB4_DEFAULT_HEIGHT 640
-#define MGB4_DEFAULT_PERIOD (125000000 / 60)
+#include "mgb4_core.h"
/* Register access error indication */
#define MGB4_ERR_NO_REG 0xFFFFFFFE
@@ -20,6 +18,9 @@
#define MGB4_ERR_QUEUE_EMPTY 0xFFFFFFFC
#define MGB4_ERR_QUEUE_FULL 0xFFFFFFFB
+#define MGB4_PERIOD(numerator, denominator) \
+ ((u32)div_u64((MGB4_HW_FREQ * (u64)(numerator)), (denominator)))
+
struct mgb4_frame_buffer {
struct vb2_v4l2_buffer vb;
struct list_head list;
@@ -30,4 +31,24 @@ static inline struct mgb4_frame_buffer *to_frame_buffer(struct vb2_v4l2_buffer *
return container_of(vbuf, struct mgb4_frame_buffer, vb);
}
+static inline bool has_yuv_and_timeperframe(struct mgb4_regs *video)
+{
+ u32 status = mgb4_read_reg(video, 0xD0);
+
+ return (status & (1U << 8));
+}
+
+#define has_yuv(video) has_yuv_and_timeperframe(video)
+#define has_timeperframe(video) has_yuv_and_timeperframe(video)
+
+static inline u32 pixel_size(struct v4l2_dv_timings *timings)
+{
+ struct v4l2_bt_timings *bt = &timings->bt;
+
+ u32 height = bt->height + bt->vfrontporch + bt->vsync + bt->vbackporch;
+ u32 width = bt->width + bt->hfrontporch + bt->hsync + bt->hbackporch;
+
+ return width * height;
+}
+
#endif
diff --git a/drivers/media/pci/mgb4/mgb4_sysfs_out.c b/drivers/media/pci/mgb4/mgb4_sysfs_out.c
index 9f6e81c57726..573aa61c69d4 100644
--- a/drivers/media/pci/mgb4/mgb4_sysfs_out.c
+++ b/drivers/media/pci/mgb4/mgb4_sysfs_out.c
@@ -229,9 +229,9 @@ static ssize_t frame_rate_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 period = mgb4_read_reg(&voutdev->mgbdev->video,
- voutdev->config->regs.frame_period);
+ voutdev->config->regs.frame_limit);
- return sprintf(buf, "%u\n", 125000000 / period);
+ return sprintf(buf, "%u\n", period ? MGB4_HW_FREQ / period : 0);
}
/*
@@ -245,14 +245,15 @@ static ssize_t frame_rate_store(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
- int ret;
+ int limit, ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
+ limit = val ? MGB4_HW_FREQ / val : 0;
mgb4_write_reg(&voutdev->mgbdev->video,
- voutdev->config->regs.frame_period, 125000000 / val);
+ voutdev->config->regs.frame_limit, limit);
return count;
}
diff --git a/drivers/media/pci/mgb4/mgb4_vin.c b/drivers/media/pci/mgb4/mgb4_vin.c
index 2cd78c539889..e9332abb3172 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.c
+++ b/drivers/media/pci/mgb4/mgb4_vin.c
@@ -18,6 +18,7 @@
#include <linux/workqueue.h>
#include <linux/align.h>
#include <linux/dma/amd_xdma.h>
+#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-sg.h>
@@ -34,8 +35,8 @@ ATTRIBUTE_GROUPS(mgb4_fpdl3_in);
ATTRIBUTE_GROUPS(mgb4_gmsl_in);
static const struct mgb4_vin_config vin_cfg[] = {
- {0, 0, 0, 6, {0x10, 0x00, 0x04, 0x08, 0x1C, 0x14, 0x18, 0x20, 0x24, 0x28}},
- {1, 1, 1, 7, {0x40, 0x30, 0x34, 0x38, 0x4C, 0x44, 0x48, 0x50, 0x54, 0x58}}
+ {0, 0, 0, 6, {0x10, 0x00, 0x04, 0x08, 0x1C, 0x14, 0x18, 0x20, 0x24, 0x28, 0xE8}},
+ {1, 1, 1, 7, {0x40, 0x30, 0x34, 0x38, 0x4C, 0x44, 0x48, 0x50, 0x54, 0x58, 0xEC}}
};
static const struct i2c_board_info fpdl3_deser_info[] = {
@@ -76,6 +77,9 @@ static const struct v4l2_dv_timings_cap video_timings_cap = {
},
};
+/* Dummy timings when no signal present */
+static const struct v4l2_dv_timings cea1080p60 = V4L2_DV_BT_CEA_1920X1080P60;
+
/*
* Returns the video output connected with the given video input if the input
* is in loopback mode.
@@ -186,8 +190,11 @@ static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
struct device *alloc_devs[])
{
struct mgb4_vin_dev *vindev = vb2_get_drv_priv(q);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ u32 config = mgb4_read_reg(video, vindev->config->regs.config);
+ u32 pixelsize = (config & (1U << 16)) ? 2 : 4;
unsigned int size = (vindev->timings.bt.width + vindev->padding)
- * vindev->timings.bt.height * 4;
+ * vindev->timings.bt.height * pixelsize;
/*
* If I/O reconfiguration is in process, do not allow to start
@@ -220,9 +227,12 @@ static int buffer_init(struct vb2_buffer *vb)
static int buffer_prepare(struct vb2_buffer *vb)
{
struct mgb4_vin_dev *vindev = vb2_get_drv_priv(vb->vb2_queue);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
struct device *dev = &vindev->mgbdev->pdev->dev;
+ u32 config = mgb4_read_reg(video, vindev->config->regs.config);
+ u32 pixelsize = (config & (1U << 16)) ? 2 : 4;
unsigned int size = (vindev->timings.bt.width + vindev->padding)
- * vindev->timings.bt.height * 4;
+ * vindev->timings.bt.height * pixelsize;
if (vb2_plane_size(vb, 0) < size) {
dev_err(dev, "buffer too small (%lu < %u)\n",
@@ -312,7 +322,8 @@ static int fh_open(struct file *file)
if (!v4l2_fh_is_singular_file(file))
goto out;
- get_timings(vindev, &vindev->timings);
+ if (get_timings(vindev, &vindev->timings) < 0)
+ vindev->timings = cea1080p60;
set_loopback_padding(vindev, vindev->padding);
out:
@@ -359,33 +370,42 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- if (f->index != 0)
- return -EINVAL;
-
- f->pixelformat = V4L2_PIX_FMT_ABGR32;
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
- return 0;
+ if (f->index == 0) {
+ f->pixelformat = V4L2_PIX_FMT_ABGR32;
+ return 0;
+ } else if (f->index == 1 && has_yuv(video)) {
+ f->pixelformat = V4L2_PIX_FMT_YUYV;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
}
static int vidioc_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *ival)
{
struct mgb4_vin_dev *vindev = video_drvdata(file);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
if (ival->index != 0)
return -EINVAL;
- if (ival->pixel_format != V4L2_PIX_FMT_ABGR32)
+ if (!(ival->pixel_format == V4L2_PIX_FMT_ABGR32 ||
+ ((has_yuv(video) && ival->pixel_format == V4L2_PIX_FMT_YUYV))))
return -EINVAL;
if (ival->width != vindev->timings.bt.width ||
ival->height != vindev->timings.bt.height)
return -EINVAL;
- ival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
- ival->stepwise.min.denominator = 60;
- ival->stepwise.min.numerator = 1;
- ival->stepwise.max.denominator = 1;
- ival->stepwise.max.numerator = 1;
- ival->stepwise.step = ival->stepwise.max;
+ ival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+ ival->stepwise.max.denominator = MGB4_HW_FREQ;
+ ival->stepwise.max.numerator = 0xFFFFFFFF;
+ ival->stepwise.min.denominator = vindev->timings.bt.pixelclock;
+ ival->stepwise.min.numerator = pixel_size(&vindev->timings);
+ ival->stepwise.step.denominator = MGB4_HW_FREQ;
+ ival->stepwise.step.numerator = 1;
return 0;
}
@@ -393,13 +413,29 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv,
static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct mgb4_vin_dev *vindev = video_drvdata(file);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ u32 config = mgb4_read_reg(video, vindev->config->regs.config);
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
f->fmt.pix.width = vindev->timings.bt.width;
f->fmt.pix.height = vindev->timings.bt.height;
f->fmt.pix.field = V4L2_FIELD_NONE;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
- f->fmt.pix.bytesperline = (f->fmt.pix.width + vindev->padding) * 4;
+
+ if (config & (1U << 16)) {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
+ if (config & (1U << 20)) {
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+ } else {
+ if (config & (1U << 19))
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ }
+ f->fmt.pix.bytesperline = (f->fmt.pix.width + vindev->padding) * 2;
+ } else {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ f->fmt.pix.bytesperline = (f->fmt.pix.width + vindev->padding) * 4;
+ }
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
return 0;
@@ -408,14 +444,30 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct mgb4_vin_dev *vindev = video_drvdata(file);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ u32 pixelsize;
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
f->fmt.pix.width = vindev->timings.bt.width;
f->fmt.pix.height = vindev->timings.bt.height;
f->fmt.pix.field = V4L2_FIELD_NONE;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
- f->fmt.pix.bytesperline = max(f->fmt.pix.width * 4,
- ALIGN_DOWN(f->fmt.pix.bytesperline, 4));
+
+ if (has_yuv(video) && f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
+ pixelsize = 2;
+ if (!(f->fmt.pix.colorspace == V4L2_COLORSPACE_REC709 ||
+ f->fmt.pix.colorspace == V4L2_COLORSPACE_SMPTE170M))
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ } else {
+ pixelsize = 4;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ }
+
+ if (f->fmt.pix.bytesperline > f->fmt.pix.width * pixelsize &&
+ f->fmt.pix.bytesperline < f->fmt.pix.width * pixelsize * 2)
+ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.bytesperline,
+ pixelsize);
+ else
+ f->fmt.pix.bytesperline = f->fmt.pix.width * pixelsize;
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
return 0;
@@ -425,13 +477,36 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct mgb4_vin_dev *vindev = video_drvdata(file);
struct mgb4_regs *video = &vindev->mgbdev->video;
+ u32 config, pixelsize;
if (vb2_is_busy(&vindev->queue))
return -EBUSY;
vidioc_try_fmt(file, priv, f);
- vindev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width * 4)) / 4;
+ config = mgb4_read_reg(video, vindev->config->regs.config);
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
+ pixelsize = 2;
+ config |= 1U << 16;
+
+ if (f->fmt.pix.colorspace == V4L2_COLORSPACE_REC709) {
+ config |= 1U << 20;
+ config |= 1U << 19;
+ } else if (f->fmt.pix.colorspace == V4L2_COLORSPACE_SMPTE170M) {
+ config &= ~(1U << 20);
+ config |= 1U << 19;
+ } else {
+ config &= ~(1U << 20);
+ config &= ~(1U << 19);
+ }
+ } else {
+ pixelsize = 4;
+ config &= ~(1U << 16);
+ }
+ mgb4_write_reg(video, vindev->config->regs.config, config);
+
+ vindev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width
+ * pixelsize)) / pixelsize;
mgb4_write_reg(video, vindev->config->regs.padding, vindev->padding);
set_loopback_padding(vindev, vindev->padding);
@@ -467,7 +542,8 @@ static int vidioc_enum_framesizes(struct file *file, void *fh,
{
struct mgb4_vin_dev *vindev = video_drvdata(file);
- if (fsize->index != 0 || fsize->pixel_format != V4L2_PIX_FMT_ABGR32)
+ if (fsize->index != 0 || !(fsize->pixel_format == V4L2_PIX_FMT_ABGR32 ||
+ fsize->pixel_format == V4L2_PIX_FMT_YUYV))
return -EINVAL;
fsize->discrete.width = vindev->timings.bt.width;
@@ -488,27 +564,56 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
return 0;
}
-static int vidioc_parm(struct file *file, void *priv,
- struct v4l2_streamparm *parm)
+static int vidioc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
{
struct mgb4_vin_dev *vindev = video_drvdata(file);
struct mgb4_regs *video = &vindev->mgbdev->video;
- const struct mgb4_vin_regs *regs = &vindev->config->regs;
- struct v4l2_fract timeperframe = {
- .numerator = mgb4_read_reg(video, regs->frame_period),
- .denominator = 125000000,
- };
-
- if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
+ struct v4l2_fract *tpf = &parm->parm.output.timeperframe;
+ u32 timer;
parm->parm.capture.readbuffers = 2;
- parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
- parm->parm.capture.timeperframe = timeperframe;
+
+ if (has_timeperframe(video)) {
+ timer = mgb4_read_reg(video, vindev->config->regs.timer);
+ if (timer < 0xFFFF) {
+ tpf->numerator = pixel_size(&vindev->timings);
+ tpf->denominator = vindev->timings.bt.pixelclock;
+ } else {
+ tpf->numerator = timer;
+ tpf->denominator = MGB4_HW_FREQ;
+ }
+
+ parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ }
return 0;
}
+static int vidioc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ struct v4l2_fract *tpf = &parm->parm.output.timeperframe;
+ u32 period, timer;
+
+ if (has_timeperframe(video)) {
+ timer = tpf->denominator ?
+ MGB4_PERIOD(tpf->numerator, tpf->denominator) : 0;
+ if (timer) {
+ period = MGB4_PERIOD(pixel_size(&vindev->timings),
+ vindev->timings.bt.pixelclock);
+ if (timer < period)
+ timer = 0;
+ }
+
+ mgb4_write_reg(video, vindev->config->regs.timer, timer);
+ }
+
+ return vidioc_g_parm(file, priv, parm);
+}
+
static int vidioc_s_dv_timings(struct file *file, void *fh,
struct v4l2_dv_timings *timings)
{
@@ -592,8 +697,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
- .vidioc_g_parm = vidioc_parm,
- .vidioc_s_parm = vidioc_parm,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_s_parm = vidioc_s_parm,
.vidioc_dv_timings_cap = vidioc_dv_timings_cap,
.vidioc_enum_dv_timings = vidioc_enum_dv_timings,
.vidioc_g_dv_timings = vidioc_g_dv_timings,
@@ -776,10 +881,16 @@ static void debugfs_init(struct mgb4_vin_dev *vindev)
vindev->regs[7].offset = vindev->config->regs.signal2;
vindev->regs[8].name = "PADDING_PIXELS";
vindev->regs[8].offset = vindev->config->regs.padding;
+ if (has_timeperframe(video)) {
+ vindev->regs[9].name = "TIMER";
+ vindev->regs[9].offset = vindev->config->regs.timer;
+ vindev->regset.nregs = 10;
+ } else {
+ vindev->regset.nregs = 9;
+ }
vindev->regset.base = video->membase;
vindev->regset.regs = vindev->regs;
- vindev->regset.nregs = ARRAY_SIZE(vindev->regs);
debugfs_create_regset32("registers", 0444, vindev->debugfs,
&vindev->regset);
diff --git a/drivers/media/pci/mgb4/mgb4_vin.h b/drivers/media/pci/mgb4/mgb4_vin.h
index 0249b400ad4d..9693bd0ce180 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.h
+++ b/drivers/media/pci/mgb4/mgb4_vin.h
@@ -25,6 +25,7 @@ struct mgb4_vin_regs {
u32 signal;
u32 signal2;
u32 padding;
+ u32 timer;
};
struct mgb4_vin_config {
@@ -59,7 +60,7 @@ struct mgb4_vin_dev {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
struct debugfs_regset32 regset;
- struct debugfs_reg32 regs[9];
+ struct debugfs_reg32 regs[sizeof(struct mgb4_vin_regs) / 4];
#endif
};
diff --git a/drivers/media/pci/mgb4/mgb4_vout.c b/drivers/media/pci/mgb4/mgb4_vout.c
index 241353ee77a5..998edcbd9723 100644
--- a/drivers/media/pci/mgb4/mgb4_vout.c
+++ b/drivers/media/pci/mgb4/mgb4_vout.c
@@ -16,6 +16,7 @@
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-sg.h>
+#include <media/v4l2-dv-timings.h>
#include "mgb4_core.h"
#include "mgb4_dma.h"
#include "mgb4_sysfs.h"
@@ -23,12 +24,16 @@
#include "mgb4_cmt.h"
#include "mgb4_vout.h"
+#define DEFAULT_WIDTH 1280
+#define DEFAULT_HEIGHT 640
+#define DEFAULT_PERIOD (MGB4_HW_FREQ / 60)
+
ATTRIBUTE_GROUPS(mgb4_fpdl3_out);
ATTRIBUTE_GROUPS(mgb4_gmsl_out);
static const struct mgb4_vout_config vout_cfg[] = {
- {0, 0, 8, {0x78, 0x60, 0x64, 0x68, 0x74, 0x6C, 0x70, 0x7c}},
- {1, 1, 9, {0x98, 0x80, 0x84, 0x88, 0x94, 0x8c, 0x90, 0x9c}}
+ {0, 0, 8, {0x78, 0x60, 0x64, 0x68, 0x74, 0x6C, 0x70, 0x7C, 0xE0}},
+ {1, 1, 9, {0x98, 0x80, 0x84, 0x88, 0x94, 0x8C, 0x90, 0x9C, 0xE4}}
};
static const struct i2c_board_info fpdl3_ser_info[] = {
@@ -40,6 +45,49 @@ static const struct mgb4_i2c_kv fpdl3_i2c[] = {
{0x05, 0xFF, 0x04}, {0x06, 0xFF, 0x01}, {0xC2, 0xFF, 0x80}
};
+static const struct v4l2_dv_timings_cap video_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ .bt = {
+ .min_width = 320,
+ .max_width = 4096,
+ .min_height = 240,
+ .max_height = 2160,
+ .min_pixelclock = 1843200, /* 320 x 240 x 24Hz */
+ .max_pixelclock = 530841600, /* 4096 x 2160 x 60Hz */
+ .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
+ .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_CUSTOM,
+ },
+};
+
+static void get_timings(struct mgb4_vout_dev *voutdev,
+ struct v4l2_dv_timings *timings)
+{
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ const struct mgb4_vout_regs *regs = &voutdev->config->regs;
+
+ u32 hsync = mgb4_read_reg(video, regs->hsync);
+ u32 vsync = mgb4_read_reg(video, regs->vsync);
+ u32 resolution = mgb4_read_reg(video, regs->resolution);
+
+ memset(timings, 0, sizeof(*timings));
+ timings->type = V4L2_DV_BT_656_1120;
+ timings->bt.width = resolution >> 16;
+ timings->bt.height = resolution & 0xFFFF;
+ if (hsync & (1U << 31))
+ timings->bt.polarities |= V4L2_DV_HSYNC_POS_POL;
+ if (vsync & (1U << 31))
+ timings->bt.polarities |= V4L2_DV_VSYNC_POS_POL;
+ timings->bt.pixelclock = voutdev->freq * 1000;
+ timings->bt.hsync = (hsync & 0x00FF0000) >> 16;
+ timings->bt.vsync = (vsync & 0x00FF0000) >> 16;
+ timings->bt.hbackporch = (hsync & 0x0000FF00) >> 8;
+ timings->bt.hfrontporch = hsync & 0x000000FF;
+ timings->bt.vbackporch = (vsync & 0x0000FF00) >> 8;
+ timings->bt.vfrontporch = vsync & 0x000000FF;
+}
+
static void return_all_buffers(struct mgb4_vout_dev *voutdev,
enum vb2_buffer_state state)
{
@@ -59,7 +107,11 @@ static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
struct device *alloc_devs[])
{
struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(q);
- unsigned int size;
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ u32 config = mgb4_read_reg(video, voutdev->config->regs.config);
+ u32 pixelsize = (config & (1U << 16)) ? 2 : 4;
+ unsigned int size = (voutdev->width + voutdev->padding) * voutdev->height
+ * pixelsize;
/*
* If I/O reconfiguration is in process, do not allow to start
@@ -69,8 +121,6 @@ static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
if (test_bit(0, &voutdev->mgbdev->io_reconfig))
return -EBUSY;
- size = (voutdev->width + voutdev->padding) * voutdev->height * 4;
-
if (*nplanes)
return sizes[0] < size ? -EINVAL : 0;
*nplanes = 1;
@@ -93,9 +143,11 @@ static int buffer_prepare(struct vb2_buffer *vb)
{
struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(vb->vb2_queue);
struct device *dev = &voutdev->mgbdev->pdev->dev;
- unsigned int size;
-
- size = (voutdev->width + voutdev->padding) * voutdev->height * 4;
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ u32 config = mgb4_read_reg(video, voutdev->config->regs.config);
+ u32 pixelsize = (config & (1U << 16)) ? 2 : 4;
+ unsigned int size = (voutdev->width + voutdev->padding) * voutdev->height
+ * pixelsize;
if (vb2_plane_size(vb, 0) < size) {
dev_err(dev, "buffer too small (%lu < %u)\n",
@@ -194,24 +246,47 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- if (f->index != 0)
- return -EINVAL;
-
- f->pixelformat = V4L2_PIX_FMT_ABGR32;
+ struct mgb4_vin_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
- return 0;
+ if (f->index == 0) {
+ f->pixelformat = V4L2_PIX_FMT_ABGR32;
+ return 0;
+ } else if (f->index == 1 && has_yuv(video)) {
+ f->pixelformat = V4L2_PIX_FMT_YUYV;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
}
static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct mgb4_vout_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ u32 config = mgb4_read_reg(video, voutdev->config->regs.config);
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
f->fmt.pix.width = voutdev->width;
f->fmt.pix.height = voutdev->height;
f->fmt.pix.field = V4L2_FIELD_NONE;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
- f->fmt.pix.bytesperline = (f->fmt.pix.width + voutdev->padding) * 4;
+
+ if (config & (1U << 16)) {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
+ if (config & (1U << 20)) {
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+ } else {
+ if (config & (1U << 19))
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ }
+ f->fmt.pix.bytesperline = (f->fmt.pix.width + voutdev->padding) * 2;
+ } else {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ f->fmt.pix.bytesperline = (f->fmt.pix.width + voutdev->padding) * 4;
+ }
+
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
return 0;
@@ -220,14 +295,30 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct mgb4_vout_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ u32 pixelsize;
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
f->fmt.pix.width = voutdev->width;
f->fmt.pix.height = voutdev->height;
f->fmt.pix.field = V4L2_FIELD_NONE;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
- f->fmt.pix.bytesperline = max(f->fmt.pix.width * 4,
- ALIGN_DOWN(f->fmt.pix.bytesperline, 4));
+
+ if (has_yuv(video) && f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
+ pixelsize = 2;
+ if (!(f->fmt.pix.colorspace == V4L2_COLORSPACE_REC709 ||
+ f->fmt.pix.colorspace == V4L2_COLORSPACE_SMPTE170M))
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ } else {
+ pixelsize = 4;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ }
+
+ if (f->fmt.pix.bytesperline > f->fmt.pix.width * pixelsize &&
+ f->fmt.pix.bytesperline < f->fmt.pix.width * pixelsize * 2)
+ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.bytesperline,
+ pixelsize);
+ else
+ f->fmt.pix.bytesperline = f->fmt.pix.width * pixelsize;
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
return 0;
@@ -237,13 +328,39 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct mgb4_vout_dev *voutdev = video_drvdata(file);
struct mgb4_regs *video = &voutdev->mgbdev->video;
+ u32 config, pixelsize;
+ int ret;
if (vb2_is_busy(&voutdev->queue))
return -EBUSY;
- vidioc_try_fmt(file, priv, f);
+ ret = vidioc_try_fmt(file, priv, f);
+ if (ret < 0)
+ return ret;
+
+ config = mgb4_read_reg(video, voutdev->config->regs.config);
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
+ pixelsize = 2;
+ config |= 1U << 16;
+
+ if (f->fmt.pix.colorspace == V4L2_COLORSPACE_REC709) {
+ config |= 1U << 20;
+ config |= 1U << 19;
+ } else if (f->fmt.pix.colorspace == V4L2_COLORSPACE_SMPTE170M) {
+ config &= ~(1U << 20);
+ config |= 1U << 19;
+ } else {
+ config &= ~(1U << 20);
+ config &= ~(1U << 19);
+ }
+ } else {
+ pixelsize = 4;
+ config &= ~(1U << 16);
+ }
+ mgb4_write_reg(video, voutdev->config->regs.config, config);
- voutdev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width * 4)) / 4;
+ voutdev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width
+ * pixelsize)) / pixelsize;
mgb4_write_reg(video, voutdev->config->regs.padding, voutdev->padding);
return 0;
@@ -267,11 +384,128 @@ static int vidioc_enum_output(struct file *file, void *priv,
return -EINVAL;
out->type = V4L2_OUTPUT_TYPE_ANALOG;
+ out->capabilities = V4L2_OUT_CAP_DV_TIMINGS;
strscpy(out->name, "MGB4", sizeof(out->name));
return 0;
}
+static int vidioc_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *ival)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ struct v4l2_dv_timings timings;
+
+ if (ival->index != 0)
+ return -EINVAL;
+ if (!(ival->pixel_format == V4L2_PIX_FMT_ABGR32 ||
+ ((has_yuv(video) && ival->pixel_format == V4L2_PIX_FMT_YUYV))))
+ return -EINVAL;
+ if (ival->width != voutdev->width || ival->height != voutdev->height)
+ return -EINVAL;
+
+ get_timings(voutdev, &timings);
+
+ ival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+ ival->stepwise.max.denominator = MGB4_HW_FREQ;
+ ival->stepwise.max.numerator = 0xFFFFFFFF;
+ ival->stepwise.min.denominator = timings.bt.pixelclock;
+ ival->stepwise.min.numerator = pixel_size(&timings);
+ ival->stepwise.step.denominator = MGB4_HW_FREQ;
+ ival->stepwise.step.numerator = 1;
+
+ return 0;
+}
+
+static int vidioc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ struct v4l2_fract *tpf = &parm->parm.output.timeperframe;
+ struct v4l2_dv_timings timings;
+ u32 timer;
+
+ parm->parm.output.writebuffers = 2;
+
+ if (has_timeperframe(video)) {
+ timer = mgb4_read_reg(video, voutdev->config->regs.timer);
+ if (timer < 0xFFFF) {
+ get_timings(voutdev, &timings);
+ tpf->numerator = pixel_size(&timings);
+ tpf->denominator = timings.bt.pixelclock;
+ } else {
+ tpf->numerator = timer;
+ tpf->denominator = MGB4_HW_FREQ;
+ }
+
+ parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ struct v4l2_fract *tpf = &parm->parm.output.timeperframe;
+ struct v4l2_dv_timings timings;
+ u32 timer, period;
+
+ if (has_timeperframe(video)) {
+ timer = tpf->denominator ?
+ MGB4_PERIOD(tpf->numerator, tpf->denominator) : 0;
+ if (timer) {
+ get_timings(voutdev, &timings);
+ period = MGB4_PERIOD(pixel_size(&timings),
+ timings.bt.pixelclock);
+ if (timer < period)
+ timer = 0;
+ }
+
+ mgb4_write_reg(video, voutdev->config->regs.timer, timer);
+ }
+
+ return vidioc_g_parm(file, priv, parm);
+}
+
+static int vidioc_g_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+
+ get_timings(voutdev, timings);
+
+ return 0;
+}
+
+static int vidioc_s_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+
+ get_timings(voutdev, timings);
+
+ return 0;
+}
+
+static int vidioc_enum_dv_timings(struct file *file, void *fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ return v4l2_enum_dv_timings_cap(timings, &video_timings_cap, NULL, NULL);
+}
+
+static int vidioc_dv_timings_cap(struct file *file, void *fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ *cap = video_timings_cap;
+
+ return 0;
+}
+
static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
@@ -279,8 +513,15 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_s_fmt_vid_out = vidioc_s_fmt,
.vidioc_g_fmt_vid_out = vidioc_g_fmt,
.vidioc_enum_output = vidioc_enum_output,
+ .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
.vidioc_g_output = vidioc_g_output,
.vidioc_s_output = vidioc_s_output,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_s_parm = vidioc_s_parm,
+ .vidioc_dv_timings_cap = vidioc_dv_timings_cap,
+ .vidioc_enum_dv_timings = vidioc_enum_dv_timings,
+ .vidioc_g_dv_timings = vidioc_g_dv_timings,
+ .vidioc_s_dv_timings = vidioc_s_dv_timings,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
@@ -423,13 +664,13 @@ static void fpga_init(struct mgb4_vout_dev *voutdev)
mgb4_write_reg(video, regs->config, 0x00000011);
mgb4_write_reg(video, regs->resolution,
- (MGB4_DEFAULT_WIDTH << 16) | MGB4_DEFAULT_HEIGHT);
- mgb4_write_reg(video, regs->hsync, 0x00102020);
- mgb4_write_reg(video, regs->vsync, 0x40020202);
- mgb4_write_reg(video, regs->frame_period, MGB4_DEFAULT_PERIOD);
+ (DEFAULT_WIDTH << 16) | DEFAULT_HEIGHT);
+ mgb4_write_reg(video, regs->hsync, 0x00283232);
+ mgb4_write_reg(video, regs->vsync, 0x40141F1E);
+ mgb4_write_reg(video, regs->frame_limit, DEFAULT_PERIOD);
mgb4_write_reg(video, regs->padding, 0x00000000);
- voutdev->freq = mgb4_cmt_set_vout_freq(voutdev, 70000 >> 1) << 1;
+ voutdev->freq = mgb4_cmt_set_vout_freq(voutdev, 61150 >> 1) << 1;
mgb4_write_reg(video, regs->config,
(voutdev->config->id + MGB4_VIN_DEVICES) << 2 | 1 << 4);
@@ -455,14 +696,20 @@ static void debugfs_init(struct mgb4_vout_dev *voutdev)
voutdev->regs[3].offset = voutdev->config->regs.hsync;
voutdev->regs[4].name = "VIDEO_PARAMS_2";
voutdev->regs[4].offset = voutdev->config->regs.vsync;
- voutdev->regs[5].name = "FRAME_PERIOD";
- voutdev->regs[5].offset = voutdev->config->regs.frame_period;
- voutdev->regs[6].name = "PADDING";
+ voutdev->regs[5].name = "FRAME_LIMIT";
+ voutdev->regs[5].offset = voutdev->config->regs.frame_limit;
+ voutdev->regs[6].name = "PADDING_PIXELS";
voutdev->regs[6].offset = voutdev->config->regs.padding;
+ if (has_timeperframe(video)) {
+ voutdev->regs[7].name = "TIMER";
+ voutdev->regs[7].offset = voutdev->config->regs.timer;
+ voutdev->regset.nregs = 8;
+ } else {
+ voutdev->regset.nregs = 7;
+ }
voutdev->regset.base = video->membase;
voutdev->regset.regs = voutdev->regs;
- voutdev->regset.nregs = ARRAY_SIZE(voutdev->regs);
debugfs_create_regset32("registers", 0444, voutdev->debugfs,
&voutdev->regset);
diff --git a/drivers/media/pci/mgb4/mgb4_vout.h b/drivers/media/pci/mgb4/mgb4_vout.h
index b163dee711fd..adc8fe1e7ae6 100644
--- a/drivers/media/pci/mgb4/mgb4_vout.h
+++ b/drivers/media/pci/mgb4/mgb4_vout.h
@@ -19,10 +19,11 @@ struct mgb4_vout_regs {
u32 config;
u32 status;
u32 resolution;
- u32 frame_period;
+ u32 frame_limit;
u32 hsync;
u32 vsync;
u32 padding;
+ u32 timer;
};
struct mgb4_vout_config {
@@ -55,7 +56,7 @@ struct mgb4_vout_dev {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
struct debugfs_regset32 regset;
- struct debugfs_reg32 regs[7];
+ struct debugfs_reg32 regs[sizeof(struct mgb4_vout_regs) / 4];
#endif
};
diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
index ca70a864a3ef..5f100e5e03d9 100644
--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
@@ -57,7 +57,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
int desc_cnt)
{
struct solo_p2m_dev *p2m_dev;
- unsigned int timeout;
+ unsigned long time_left;
unsigned int config = 0;
int ret = 0;
unsigned int p2m_id = 0;
@@ -99,12 +99,12 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
desc[1].ctrl);
}
- timeout = wait_for_completion_timeout(&p2m_dev->completion,
- solo_dev->p2m_jiffies);
+ time_left = wait_for_completion_timeout(&p2m_dev->completion,
+ solo_dev->p2m_jiffies);
if (WARN_ON_ONCE(p2m_dev->error))
ret = -EIO;
- else if (timeout == 0) {
+ else if (time_left == 0) {
solo_dev->p2m_timeouts++;
ret = -EAGAIN;
}
diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
index da61f9beb6b4..73606cee586e 100644
--- a/drivers/media/platform/allegro-dvt/allegro-core.c
+++ b/drivers/media/platform/allegro-dvt/allegro-core.c
@@ -179,7 +179,7 @@ struct allegro_dev {
struct list_head channels;
};
-static struct regmap_config allegro_regmap_config = {
+static const struct regmap_config allegro_regmap_config = {
.name = "regmap",
.reg_bits = 32,
.val_bits = 32,
@@ -188,7 +188,7 @@ static struct regmap_config allegro_regmap_config = {
.cache_type = REGCACHE_NONE,
};
-static struct regmap_config allegro_sram_config = {
+static const struct regmap_config allegro_sram_config = {
.name = "sram",
.reg_bits = 32,
.val_bits = 32,
@@ -1415,11 +1415,11 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
static int allegro_mcu_wait_for_init_timeout(struct allegro_dev *dev,
unsigned long timeout_ms)
{
- unsigned long tmo;
+ unsigned long time_left;
- tmo = wait_for_completion_timeout(&dev->init_complete,
- msecs_to_jiffies(timeout_ms));
- if (tmo == 0)
+ time_left = wait_for_completion_timeout(&dev->init_complete,
+ msecs_to_jiffies(timeout_ms));
+ if (time_left == 0)
return -ETIMEDOUT;
reinit_completion(&dev->init_complete);
@@ -2481,14 +2481,14 @@ static void allegro_mcu_interrupt(struct allegro_dev *dev)
static void allegro_destroy_channel(struct allegro_channel *channel)
{
struct allegro_dev *dev = channel->dev;
- unsigned long timeout;
+ unsigned long time_left;
if (channel_exists(channel)) {
reinit_completion(&channel->completion);
allegro_mcu_send_destroy_channel(dev, channel);
- timeout = wait_for_completion_timeout(&channel->completion,
- msecs_to_jiffies(5000));
- if (timeout == 0)
+ time_left = wait_for_completion_timeout(&channel->completion,
+ msecs_to_jiffies(5000));
+ if (time_left == 0)
v4l2_warn(&dev->v4l2_dev,
"channel %d: timeout while destroying\n",
channel->mcu_channel_id);
@@ -2544,7 +2544,7 @@ static void allegro_destroy_channel(struct allegro_channel *channel)
static int allegro_create_channel(struct allegro_channel *channel)
{
struct allegro_dev *dev = channel->dev;
- unsigned long timeout;
+ unsigned long time_left;
if (channel_exists(channel)) {
v4l2_warn(&dev->v4l2_dev,
@@ -2595,9 +2595,9 @@ static int allegro_create_channel(struct allegro_channel *channel)
reinit_completion(&channel->completion);
allegro_mcu_send_create_channel(dev, channel);
- timeout = wait_for_completion_timeout(&channel->completion,
- msecs_to_jiffies(5000));
- if (timeout == 0)
+ time_left = wait_for_completion_timeout(&channel->completion,
+ msecs_to_jiffies(5000));
+ if (time_left == 0)
channel->error = -ETIMEDOUT;
if (channel->error)
goto err;
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index c1108df72dd5..5c823d3f9cc0 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -242,7 +242,7 @@ static irqreturn_t isi_interrupt(int irq, void *dev_id)
#define WAIT_ISI_DISABLE 0
static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
{
- unsigned long timeout;
+ unsigned long time_left;
/*
* The reset or disable will only succeed if we have a
* pixel clock from the camera.
@@ -257,9 +257,9 @@ static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
}
- timeout = wait_for_completion_timeout(&isi->complete,
- msecs_to_jiffies(500));
- if (timeout == 0)
+ time_left = wait_for_completion_timeout(&isi->complete,
+ msecs_to_jiffies(500));
+ if (time_left == 0)
return -ETIMEDOUT;
return 0;
diff --git a/drivers/media/platform/chips-media/coda/coda-bit.c b/drivers/media/platform/chips-media/coda/coda-bit.c
index ed47d5bd8d61..84ded154adfe 100644
--- a/drivers/media/platform/chips-media/coda/coda-bit.c
+++ b/drivers/media/platform/chips-media/coda/coda-bit.c
@@ -585,7 +585,7 @@ static int coda_alloc_context_buffers(struct coda_ctx *ctx,
if (!ctx->slicebuf.vaddr && q_data->fourcc == V4L2_PIX_FMT_H264) {
/* worst case slice size */
- size = (DIV_ROUND_UP(q_data->rect.width, 16) *
+ size = (unsigned long)(DIV_ROUND_UP(q_data->rect.width, 16) *
DIV_ROUND_UP(q_data->rect.height, 16)) * 3200 / 8 + 512;
ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size,
"slicebuf");
diff --git a/drivers/media/platform/imagination/Kconfig b/drivers/media/platform/imagination/Kconfig
index 7139ae22219b..a302c955483d 100644
--- a/drivers/media/platform/imagination/Kconfig
+++ b/drivers/media/platform/imagination/Kconfig
@@ -2,6 +2,7 @@
config VIDEO_E5010_JPEG_ENC
tristate "Imagination E5010 JPEG Encoder Driver"
depends on VIDEO_DEV
+ depends on ARCH_K3 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c
index 11ca2c2fbaad..e62c1c18758b 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c
@@ -595,7 +595,7 @@ static void mtk_init_vdec_params(struct mtk_vcodec_dec_ctx *ctx)
}
}
-static struct vb2_ops mtk_vdec_frame_vb2_ops = {
+static const struct vb2_ops mtk_vdec_frame_vb2_ops = {
.queue_setup = vb2ops_vdec_queue_setup,
.buf_prepare = vb2ops_vdec_buf_prepare,
.wait_prepare = vb2_ops_wait_prepare,
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
index b903e39fee89..3307dc15fc1d 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
@@ -854,7 +854,7 @@ static int vb2ops_vdec_out_buf_validate(struct vb2_buffer *vb)
return 0;
}
-static struct vb2_ops mtk_vdec_request_vb2_ops = {
+static const struct vb2_ops mtk_vdec_request_vb2_ops = {
.queue_setup = vb2ops_vdec_queue_setup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
index 73d5cef33b2a..1e1b32faac77 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
@@ -347,11 +347,16 @@ static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
return vpu_dec_reset(vpu);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
+ if (!fb) {
+ mtk_vdec_err(inst->ctx, "fb buffer is NULL");
+ return -ENOMEM;
+ }
+
src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+ y_fb_dma = fb->base_y.dma_addr;
+ c_fb_dma = fb->base_c.dma_addr;
mtk_vdec_debug(inst->ctx, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
inst->num_nalu, y_fb_dma, c_fb_dma, fb);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
index 2d4611e7fa0b..1ed0ccec5665 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
@@ -724,11 +724,16 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs
return vpu_dec_reset(vpu);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
+ if (!fb) {
+ mtk_vdec_err(inst->ctx, "fb buffer is NULL");
+ return -ENOMEM;
+ }
+
src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+ y_fb_dma = fb->base_y.dma_addr;
+ c_fb_dma = fb->base_c.dma_addr;
mtk_vdec_debug(inst->ctx, "[h264-dec] [%d] y_dma=%llx c_dma=%llx",
inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
index e27e728f392e..232ef3bd246a 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
@@ -334,14 +334,18 @@ static int vdec_vp8_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
- dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
+ if (!fb) {
+ mtk_vdec_err(inst->ctx, "fb buffer is NULL");
+ return -ENOMEM;
+ }
- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+ dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
+ y_fb_dma = fb->base_y.dma_addr;
if (inst->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1)
c_fb_dma = y_fb_dma +
inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h;
else
- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+ c_fb_dma = fb->base_c.dma_addr;
inst->vsi->dec.bs_dma = (u64)bs->dma_addr;
inst->vsi->dec.bs_sz = bs->size;
diff --git a/drivers/media/platform/microchip/microchip-isc-base.c b/drivers/media/platform/microchip/microchip-isc-base.c
index f3a5cbacadbe..28e56f6a695d 100644
--- a/drivers/media/platform/microchip/microchip-isc-base.c
+++ b/drivers/media/platform/microchip/microchip-isc-base.c
@@ -902,8 +902,11 @@ static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
return 0;
}
-static int isc_validate(struct isc_device *isc)
+static int isc_link_validate(struct media_link *link)
{
+ struct video_device *vdev =
+ media_entity_to_video_device(link->sink->entity);
+ struct isc_device *isc = video_get_drvdata(vdev);
int ret;
int i;
struct isc_format *sd_fmt = NULL;
@@ -1906,20 +1909,6 @@ int microchip_isc_pipeline_init(struct isc_device *isc)
}
EXPORT_SYMBOL_GPL(microchip_isc_pipeline_init);
-static int isc_link_validate(struct media_link *link)
-{
- struct video_device *vdev =
- media_entity_to_video_device(link->sink->entity);
- struct isc_device *isc = video_get_drvdata(vdev);
- int ret;
-
- ret = v4l2_subdev_link_validate(link);
- if (ret)
- return ret;
-
- return isc_validate(isc);
-}
-
static const struct media_entity_operations isc_entity_operations = {
.link_validate = isc_link_validate,
};
diff --git a/drivers/media/platform/microchip/microchip-sama5d2-isc.c b/drivers/media/platform/microchip/microchip-sama5d2-isc.c
index 5ac149cf3647..60b6d922d764 100644
--- a/drivers/media/platform/microchip/microchip-sama5d2-isc.c
+++ b/drivers/media/platform/microchip/microchip-sama5d2-isc.c
@@ -353,33 +353,29 @@ static const u32 isc_sama5d2_gamma_table[][GAMMA_ENTRIES] = {
static int isc_parse_dt(struct device *dev, struct isc_device *isc)
{
struct device_node *np = dev->of_node;
- struct device_node *epn = NULL;
+ struct device_node *epn;
struct isc_subdev_entity *subdev_entity;
unsigned int flags;
- int ret;
INIT_LIST_HEAD(&isc->subdev_entities);
- while (1) {
+ for_each_endpoint_of_node(np, epn) {
struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
-
- epn = of_graph_get_next_endpoint(np, epn);
- if (!epn)
- return 0;
+ int ret;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
&v4l2_epn);
if (ret) {
- ret = -EINVAL;
+ of_node_put(epn);
dev_err(dev, "Could not parse the endpoint\n");
- break;
+ return -EINVAL;
}
subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity),
GFP_KERNEL);
if (!subdev_entity) {
- ret = -ENOMEM;
- break;
+ of_node_put(epn);
+ return -ENOMEM;
}
subdev_entity->epn = epn;
@@ -400,9 +396,8 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
list_add_tail(&subdev_entity->list, &isc->subdev_entities);
}
- of_node_put(epn);
- return ret;
+ return 0;
}
static int microchip_isc_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/microchip/microchip-sama7g5-isc.c b/drivers/media/platform/microchip/microchip-sama7g5-isc.c
index 73445f33d26b..e97abe3e35af 100644
--- a/drivers/media/platform/microchip/microchip-sama7g5-isc.c
+++ b/drivers/media/platform/microchip/microchip-sama7g5-isc.c
@@ -336,36 +336,32 @@ static const u32 isc_sama7g5_gamma_table[][GAMMA_ENTRIES] = {
static int xisc_parse_dt(struct device *dev, struct isc_device *isc)
{
struct device_node *np = dev->of_node;
- struct device_node *epn = NULL;
+ struct device_node *epn;
struct isc_subdev_entity *subdev_entity;
unsigned int flags;
- int ret;
bool mipi_mode;
INIT_LIST_HEAD(&isc->subdev_entities);
mipi_mode = of_property_read_bool(np, "microchip,mipi-mode");
- while (1) {
+ for_each_endpoint_of_node(np, epn) {
struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
-
- epn = of_graph_get_next_endpoint(np, epn);
- if (!epn)
- return 0;
+ int ret;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
&v4l2_epn);
if (ret) {
- ret = -EINVAL;
+ of_node_put(epn);
dev_err(dev, "Could not parse the endpoint\n");
- break;
+ return -EINVAL;
}
subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity),
GFP_KERNEL);
if (!subdev_entity) {
- ret = -ENOMEM;
- break;
+ of_node_put(epn);
+ return -ENOMEM;
}
subdev_entity->epn = epn;
@@ -389,9 +385,8 @@ static int xisc_parse_dt(struct device *dev, struct isc_device *isc)
list_add_tail(&subdev_entity->list, &isc->subdev_entities);
}
- of_node_put(epn);
- return ret;
+ return 0;
}
static int microchip_xisc_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/nvidia/tegra-vde/h264.c b/drivers/media/platform/nvidia/tegra-vde/h264.c
index d8812fc06c67..0e56a4331b0d 100644
--- a/drivers/media/platform/nvidia/tegra-vde/h264.c
+++ b/drivers/media/platform/nvidia/tegra-vde/h264.c
@@ -623,14 +623,14 @@ static int tegra_vde_decode_end(struct tegra_vde *vde)
unsigned int read_bytes, macroblocks_nb;
struct device *dev = vde->dev;
dma_addr_t bsev_ptr;
- long timeout;
+ long time_left;
int ret;
- timeout = wait_for_completion_interruptible_timeout(
+ time_left = wait_for_completion_interruptible_timeout(
&vde->decode_completion, msecs_to_jiffies(1000));
- if (timeout < 0) {
- ret = timeout;
- } else if (timeout == 0) {
+ if (time_left < 0) {
+ ret = time_left;
+ } else if (time_left == 0) {
bsev_ptr = tegra_vde_readl(vde, vde->bsev, 0x10);
macroblocks_nb = tegra_vde_readl(vde, vde->sxe, 0xC8) & 0x1FFF;
read_bytes = bsev_ptr ? bsev_ptr - vde->bitstream_data_addr : 0;
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index b9729a8883d6..44e1402e8be1 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -861,18 +861,21 @@ static void mipi_csis_log_counters(struct mipi_csis_device *csis, bool non_error
{
unsigned int num_events = non_errors ? MIPI_CSIS_NUM_EVENTS
: MIPI_CSIS_NUM_EVENTS - 8;
+ unsigned int counters[MIPI_CSIS_NUM_EVENTS];
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&csis->slock, flags);
+ for (i = 0; i < num_events; ++i)
+ counters[i] = csis->events[i].counter;
+ spin_unlock_irqrestore(&csis->slock, flags);
for (i = 0; i < num_events; ++i) {
- if (csis->events[i].counter > 0 || csis->debug.enable)
+ if (counters[i] > 0 || csis->debug.enable)
dev_info(csis->dev, "%s events: %d\n",
csis->events[i].name,
- csis->events[i].counter);
+ counters[i]);
}
- spin_unlock_irqrestore(&csis->slock, flags);
}
static int mipi_csis_dump_regs(struct mipi_csis_device *csis)
@@ -1344,7 +1347,7 @@ err_parse:
* Suspend/resume
*/
-static int __maybe_unused mipi_csis_runtime_suspend(struct device *dev)
+static int mipi_csis_runtime_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct mipi_csis_device *csis = sd_to_mipi_csis_device(sd);
@@ -1359,7 +1362,7 @@ static int __maybe_unused mipi_csis_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mipi_csis_runtime_resume(struct device *dev)
+static int mipi_csis_runtime_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct mipi_csis_device *csis = sd_to_mipi_csis_device(sd);
@@ -1379,8 +1382,8 @@ static int __maybe_unused mipi_csis_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops mipi_csis_pm_ops = {
- SET_RUNTIME_PM_OPS(mipi_csis_runtime_suspend, mipi_csis_runtime_resume,
- NULL)
+ RUNTIME_PM_OPS(mipi_csis_runtime_suspend, mipi_csis_runtime_resume,
+ NULL)
};
/* -----------------------------------------------------------------------------
@@ -1571,7 +1574,7 @@ static struct platform_driver mipi_csis_driver = {
.driver = {
.of_match_table = mipi_csis_of_match,
.name = CSIS_DRIVER_NAME,
- .pm = &mipi_csis_pm_ops,
+ .pm = pm_ptr(&mipi_csis_pm_ops),
},
};
diff --git a/drivers/media/platform/nxp/imx-pxp.h b/drivers/media/platform/nxp/imx-pxp.h
index 44f95c749d2e..476f2042fa6f 100644
--- a/drivers/media/platform/nxp/imx-pxp.h
+++ b/drivers/media/platform/nxp/imx-pxp.h
@@ -594,12 +594,17 @@
(((v) << 18) & BM_PXP_CSC1_COEF0_C0)
#define BP_PXP_CSC1_COEF0_UV_OFFSET 9
#define BM_PXP_CSC1_COEF0_UV_OFFSET 0x0003FE00
+
+/*
+ * We use v * (1 << 9) instead of v << 9, to workaround a gcc5 bug.
+ * The compiler cannot understand that the expression is constant.
+ */
#define BF_PXP_CSC1_COEF0_UV_OFFSET(v) \
- (((v) << 9) & BM_PXP_CSC1_COEF0_UV_OFFSET)
+ (((v) * (1 << 9)) & BM_PXP_CSC1_COEF0_UV_OFFSET)
#define BP_PXP_CSC1_COEF0_Y_OFFSET 0
#define BM_PXP_CSC1_COEF0_Y_OFFSET 0x000001FF
#define BF_PXP_CSC1_COEF0_Y_OFFSET(v) \
- (((v) << 0) & BM_PXP_CSC1_COEF0_Y_OFFSET)
+ ((v) & BM_PXP_CSC1_COEF0_Y_OFFSET)
#define HW_PXP_CSC1_COEF1 (0x000001b0)
diff --git a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
index ba2e81f24965..d4a6c5532969 100644
--- a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
+++ b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
@@ -693,7 +693,7 @@ unlock:
return ret ? -EAGAIN : 0;
}
-static int __maybe_unused imx8mq_mipi_csi_suspend(struct device *dev)
+static int imx8mq_mipi_csi_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csi_state *state = mipi_sd_to_csi2_state(sd);
@@ -705,7 +705,7 @@ static int __maybe_unused imx8mq_mipi_csi_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused imx8mq_mipi_csi_resume(struct device *dev)
+static int imx8mq_mipi_csi_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csi_state *state = mipi_sd_to_csi2_state(sd);
@@ -716,7 +716,7 @@ static int __maybe_unused imx8mq_mipi_csi_resume(struct device *dev)
return imx8mq_mipi_csi_pm_resume(dev);
}
-static int __maybe_unused imx8mq_mipi_csi_runtime_suspend(struct device *dev)
+static int imx8mq_mipi_csi_runtime_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csi_state *state = mipi_sd_to_csi2_state(sd);
@@ -731,7 +731,7 @@ static int __maybe_unused imx8mq_mipi_csi_runtime_suspend(struct device *dev)
return ret;
}
-static int __maybe_unused imx8mq_mipi_csi_runtime_resume(struct device *dev)
+static int imx8mq_mipi_csi_runtime_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csi_state *state = mipi_sd_to_csi2_state(sd);
@@ -747,10 +747,9 @@ static int __maybe_unused imx8mq_mipi_csi_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops imx8mq_mipi_csi_pm_ops = {
- SET_RUNTIME_PM_OPS(imx8mq_mipi_csi_runtime_suspend,
- imx8mq_mipi_csi_runtime_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(imx8mq_mipi_csi_suspend, imx8mq_mipi_csi_resume)
+ RUNTIME_PM_OPS(imx8mq_mipi_csi_runtime_suspend,
+ imx8mq_mipi_csi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(imx8mq_mipi_csi_suspend, imx8mq_mipi_csi_resume)
};
/* -----------------------------------------------------------------------------
@@ -958,7 +957,7 @@ static struct platform_driver imx8mq_mipi_csi_driver = {
.driver = {
.of_match_table = imx8mq_mipi_csi_of_match,
.name = MIPI_CSI2_DRIVER_NAME,
- .pm = &imx8mq_mipi_csi_pm_ops,
+ .pm = pm_ptr(&imx8mq_mipi_csi_pm_ops),
},
};
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index cd72feca618c..3b8fc31d957c 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -297,12 +297,6 @@ static void video_stop_streaming(struct vb2_queue *q)
ret = v4l2_subdev_call(subdev, video, s_stream, 0);
- if (entity->use_count > 1) {
- /* Don't stop if other instances of the pipeline are still running */
- dev_dbg(video->camss->dev, "Video pipeline still used, don't stop streaming.\n");
- return;
- }
-
if (ret) {
dev_err(video->camss->dev, "Video pipeline stop failed: %d\n", ret);
return;
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 51b1d3550421..d64985ca6e88 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -2283,6 +2283,8 @@ static int camss_probe(struct platform_device *pdev)
v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev);
+ pm_runtime_enable(dev);
+
num_subdevs = camss_of_parse_ports(camss);
if (num_subdevs < 0) {
ret = num_subdevs;
@@ -2323,8 +2325,6 @@ static int camss_probe(struct platform_device *pdev)
}
}
- pm_runtime_enable(dev);
-
return 0;
err_register_subdevs:
@@ -2332,6 +2332,7 @@ err_register_subdevs:
err_v4l2_device_unregister:
v4l2_device_unregister(&camss->v4l2_dev);
v4l2_async_nf_cleanup(&camss->notifier);
+ pm_runtime_disable(dev);
err_genpd_cleanup:
camss_genpd_cleanup(camss);
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 165c947a6703..84e95a46dfc9 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -430,6 +430,7 @@ static void venus_remove(struct platform_device *pdev)
struct device *dev = core->dev;
int ret;
+ cancel_delayed_work_sync(&core->work);
ret = pm_runtime_get_sync(dev);
WARN_ON(ret < 0);
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index fe7da2b30482..66a18830e66d 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -316,10 +316,10 @@ int venus_firmware_init(struct venus_core *core)
core->fw.dev = &pdev->dev;
- iommu_dom = iommu_domain_alloc(&platform_bus_type);
- if (!iommu_dom) {
+ iommu_dom = iommu_paging_domain_alloc(core->fw.dev);
+ if (IS_ERR(iommu_dom)) {
dev_err(core->fw.dev, "Failed to allocate iommu domain\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(iommu_dom);
goto err_unregister;
}
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index 3418d2dd9371..3ae063094e3e 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -156,7 +156,7 @@ void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt)
pkt->hdr.size = sizeof(*pkt);
pkt->hdr.pkt_type = HFI_CMD_SYS_GET_PROPERTY;
pkt->num_properties = 1;
- pkt->data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION;
+ pkt->data = HFI_PROPERTY_SYS_IMAGE_VERSION;
}
int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie,
@@ -331,7 +331,7 @@ int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt, void *cookie,
pkt->alloc_len = out_frame->alloc_len;
pkt->filled_len = out_frame->filled_len;
pkt->offset = out_frame->offset;
- pkt->data[0] = out_frame->extradata_size;
+ pkt->data = out_frame->extradata_size;
return 0;
}
@@ -402,7 +402,7 @@ static int pkt_session_get_property_1x(struct hfi_session_get_property_pkt *pkt,
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->num_properties = 1;
- pkt->data[0] = ptype;
+ pkt->data = ptype;
return 0;
}
@@ -1110,7 +1110,7 @@ pkt_session_get_property_3xx(struct hfi_session_get_property_pkt *pkt,
switch (ptype) {
case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
- pkt->data[0] = HFI_PROPERTY_CONFIG_VDEC_ENTROPY;
+ pkt->data = HFI_PROPERTY_CONFIG_VDEC_ENTROPY;
break;
default:
ret = pkt_session_get_property_1x(pkt, cookie, ptype);
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.h b/drivers/media/platform/qcom/venus/hfi_cmds.h
index 20acd412ee7b..a83125bc17aa 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.h
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.h
@@ -74,7 +74,7 @@ struct hfi_sys_set_property_pkt {
struct hfi_sys_get_property_pkt {
struct hfi_pkt_hdr hdr;
u32 num_properties;
- u32 data[1];
+ u32 data;
};
struct hfi_sys_set_buffers_pkt {
@@ -82,7 +82,7 @@ struct hfi_sys_set_buffers_pkt {
u32 buffer_type;
u32 buffer_size;
u32 num_buffers;
- u32 buffer_addr[1];
+ u32 buffer_addr[];
};
struct hfi_sys_ping_pkt {
@@ -151,7 +151,7 @@ struct hfi_session_empty_buffer_compressed_pkt {
u32 input_tag;
u32 packet_buffer;
u32 extradata_buffer;
- u32 data[1];
+ u32 data;
};
struct hfi_session_empty_buffer_uncompressed_plane0_pkt {
@@ -168,7 +168,7 @@ struct hfi_session_empty_buffer_uncompressed_plane0_pkt {
u32 input_tag;
u32 packet_buffer;
u32 extradata_buffer;
- u32 data[1];
+ u32 data;
};
struct hfi_session_empty_buffer_uncompressed_plane1_pkt {
@@ -177,7 +177,7 @@ struct hfi_session_empty_buffer_uncompressed_plane1_pkt {
u32 filled_len;
u32 offset;
u32 packet_buffer2;
- u32 data[1];
+ u32 data;
};
struct hfi_session_empty_buffer_uncompressed_plane2_pkt {
@@ -186,7 +186,7 @@ struct hfi_session_empty_buffer_uncompressed_plane2_pkt {
u32 filled_len;
u32 offset;
u32 packet_buffer3;
- u32 data[1];
+ u32 data;
};
struct hfi_session_fill_buffer_pkt {
@@ -198,7 +198,7 @@ struct hfi_session_fill_buffer_pkt {
u32 output_tag;
u32 packet_buffer;
u32 extradata_buffer;
- u32 data[1];
+ u32 data;
};
struct hfi_session_flush_pkt {
@@ -217,7 +217,7 @@ struct hfi_session_resume_pkt {
struct hfi_session_get_property_pkt {
struct hfi_session_hdr_pkt shdr;
u32 num_properties;
- u32 data[1];
+ u32 data;
};
struct hfi_session_release_buffer_pkt {
@@ -227,7 +227,7 @@ struct hfi_session_release_buffer_pkt {
u32 extradata_size;
u32 response_req;
u32 num_buffers;
- u32 buffer_info[1];
+ u32 buffer_info[] __counted_by(num_buffers);
};
struct hfi_session_release_resources_pkt {
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index e4c05d62cfc7..f44059f19505 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -761,7 +761,7 @@ struct hfi_multi_stream_3x {
struct hfi_multi_view_format {
u32 views;
- u32 view_order[1];
+ u32 view_order[];
};
#define HFI_MULTI_SLICE_OFF 0x1
@@ -1005,13 +1005,13 @@ struct hfi_uncompressed_plane_constraints {
struct hfi_uncompressed_plane_info {
u32 format;
u32 num_planes;
- struct hfi_uncompressed_plane_constraints plane_constraints[1];
+ struct hfi_uncompressed_plane_constraints plane_constraints;
};
struct hfi_uncompressed_format_supported {
u32 buffer_type;
u32 format_entries;
- struct hfi_uncompressed_plane_info plane_info[1];
+ struct hfi_uncompressed_plane_info plane_info;
};
struct hfi_uncompressed_plane_actual {
@@ -1038,7 +1038,7 @@ struct hfi_codec_supported {
struct hfi_properties_supported {
u32 num_properties;
- u32 properties[1];
+ u32 properties[];
};
struct hfi_max_sessions_supported {
@@ -1085,12 +1085,12 @@ struct hfi_resource_ocmem_requirement {
struct hfi_resource_ocmem_requirement_info {
u32 num_entries;
- struct hfi_resource_ocmem_requirement requirements[1];
+ struct hfi_resource_ocmem_requirement requirements[];
};
struct hfi_property_sys_image_version_info_type {
u32 string_size;
- u8 str_image_version[1];
+ u8 str_image_version[];
};
struct hfi_codec_mask_supported {
@@ -1141,7 +1141,7 @@ struct hfi_extradata_header {
u32 port_index;
u32 type;
u32 data_size;
- u8 data[1];
+ u8 data[];
};
struct hfi_batch_info {
@@ -1236,7 +1236,7 @@ static inline void hfi_bufreq_set_count_min_host(struct hfi_buffer_requirements
struct hfi_data_payload {
u32 size;
- u8 data[1];
+ u8 data[];
};
struct hfi_enable_picture {
@@ -1264,12 +1264,12 @@ struct hfi_interlace_format_supported {
struct hfi_buffer_alloc_mode_supported {
u32 buffer_type;
u32 num_entries;
- u32 data[1];
+ u32 data[];
};
struct hfi_mb_error_map {
u32 error_map_size;
- u8 error_map[1];
+ u8 error_map[];
};
struct hfi_metadata_pass_through {
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
index c43839539d4d..3df241dc3a11 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -157,7 +157,7 @@ static void
parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_uncompressed_format_supported *fmt = data;
- struct hfi_uncompressed_plane_info *pinfo = fmt->plane_info;
+ struct hfi_uncompressed_plane_info *pinfo = &fmt->plane_info;
struct hfi_uncompressed_plane_constraints *constr;
struct raw_formats rawfmts[MAX_FMT_ENTRIES] = {};
u32 entries = fmt->format_entries;
diff --git a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
index f5a655973c08..6289166786ec 100644
--- a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
+++ b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
@@ -1063,51 +1063,51 @@ struct enc_bufsize_ops {
u32 (*persist)(void);
};
-static struct dec_bufsize_ops dec_h264_ops = {
+static const struct dec_bufsize_ops dec_h264_ops = {
.scratch = h264d_scratch_size,
.scratch1 = h264d_scratch1_size,
.persist1 = h264d_persist1_size,
};
-static struct dec_bufsize_ops dec_h265_ops = {
+static const struct dec_bufsize_ops dec_h265_ops = {
.scratch = h265d_scratch_size,
.scratch1 = h265d_scratch1_size,
.persist1 = h265d_persist1_size,
};
-static struct dec_bufsize_ops dec_vp8_ops = {
+static const struct dec_bufsize_ops dec_vp8_ops = {
.scratch = vpxd_scratch_size,
.scratch1 = vp8d_scratch1_size,
.persist1 = vp8d_persist1_size,
};
-static struct dec_bufsize_ops dec_vp9_ops = {
+static const struct dec_bufsize_ops dec_vp9_ops = {
.scratch = vpxd_scratch_size,
.scratch1 = vp9d_scratch1_size,
.persist1 = vp9d_persist1_size,
};
-static struct dec_bufsize_ops dec_mpeg2_ops = {
+static const struct dec_bufsize_ops dec_mpeg2_ops = {
.scratch = mpeg2d_scratch_size,
.scratch1 = mpeg2d_scratch1_size,
.persist1 = mpeg2d_persist1_size,
};
-static struct enc_bufsize_ops enc_h264_ops = {
+static const struct enc_bufsize_ops enc_h264_ops = {
.scratch = h264e_scratch_size,
.scratch1 = h264e_scratch1_size,
.scratch2 = enc_scratch2_size,
.persist = enc_persist_size,
};
-static struct enc_bufsize_ops enc_h265_ops = {
+static const struct enc_bufsize_ops enc_h265_ops = {
.scratch = h265e_scratch_size,
.scratch1 = h265e_scratch1_size,
.scratch2 = enc_scratch2_size,
.persist = enc_persist_size,
};
-static struct enc_bufsize_ops enc_vp8_ops = {
+static const struct enc_bufsize_ops enc_vp8_ops = {
.scratch = vp8e_scratch_size,
.scratch1 = vp8e_scratch1_size,
.scratch2 = enc_scratch2_size,
@@ -1186,7 +1186,7 @@ static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype,
u32 codec = params->codec;
u32 width = params->width, height = params->height, out_min_count;
u32 out_width = params->out_width, out_height = params->out_height;
- struct dec_bufsize_ops *dec_ops;
+ const struct dec_bufsize_ops *dec_ops;
bool is_secondary_output = params->dec.is_secondary_output;
bool is_interlaced = params->dec.is_interlaced;
u32 max_mbs_per_frame = params->dec.max_mbs_per_frame;
@@ -1260,7 +1260,7 @@ static int bufreq_enc(struct hfi_plat_buffers_params *params, u32 buftype,
struct hfi_buffer_requirements *bufreq)
{
enum hfi_version version = params->version;
- struct enc_bufsize_ops *enc_ops;
+ const struct enc_bufsize_ops *enc_ops;
u32 width = params->width;
u32 height = params->height;
bool is_tenbit = params->enc.is_tenbit;
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 4ce76ce6dd4d..ea8a2bd9419e 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -876,7 +876,7 @@ static int vcodec_domains_get(struct venus_core *core)
if (!res->vcodec_pmdomains_num)
goto skip_pmdomains;
- ret = dev_pm_domain_attach_list(dev, &vcodec_data, &core->pmdomains);
+ ret = devm_pm_domain_attach_list(dev, &vcodec_data, &core->pmdomains);
if (ret < 0)
return ret;
@@ -902,14 +902,11 @@ skip_pmdomains:
return 0;
opp_attach_err:
- dev_pm_domain_detach_list(core->pmdomains);
return ret;
}
static void vcodec_domains_put(struct venus_core *core)
{
- dev_pm_domain_detach_list(core->pmdomains);
-
if (!core->has_opp_table)
return;
diff --git a/drivers/media/platform/raspberrypi/pisp_be/Kconfig b/drivers/media/platform/raspberrypi/pisp_be/Kconfig
index 38c0f8305d62..46765a2e4c4d 100644
--- a/drivers/media/platform/raspberrypi/pisp_be/Kconfig
+++ b/drivers/media/platform/raspberrypi/pisp_be/Kconfig
@@ -2,6 +2,7 @@ config VIDEO_RASPBERRYPI_PISP_BE
tristate "Raspberry Pi PiSP Backend (BE) ISP driver"
depends on V4L_PLATFORM_DRIVERS
depends on VIDEO_DEV
+ depends on ARCH_BCM2835 || COMPILE_TEST
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
index 809c3a38cc4a..695d884a22d1 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
@@ -1274,16 +1274,7 @@ static const struct rvin_info rcar_info_r8a77995 = {
.scaler = rvin_scaler_gen3,
};
-static const struct rvin_info rcar_info_r8a779a0 = {
- .model = RCAR_GEN3,
- .use_mc = true,
- .use_isp = true,
- .nv12 = true,
- .max_width = 4096,
- .max_height = 4096,
-};
-
-static const struct rvin_info rcar_info_r8a779g0 = {
+static const struct rvin_info rcar_info_gen4 = {
.model = RCAR_GEN3,
.use_mc = true,
.use_isp = true,
@@ -1354,12 +1345,18 @@ static const struct of_device_id rvin_of_id_table[] = {
.data = &rcar_info_r8a77995,
},
{
+ /* Keep to be compatible with old DTS files. */
.compatible = "renesas,vin-r8a779a0",
- .data = &rcar_info_r8a779a0,
+ .data = &rcar_info_gen4,
},
{
+ /* Keep to be compatible with old DTS files. */
.compatible = "renesas,vin-r8a779g0",
- .data = &rcar_info_r8a779g0,
+ .data = &rcar_info_gen4,
+ },
+ {
+ .compatible = "renesas,rcar-gen4-vin",
+ .data = &rcar_info_gen4,
},
{ /* Sentinel */ },
};
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
index e68fcdaea207..c7fdee347ac8 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
@@ -865,6 +865,7 @@ static const struct of_device_id rzg2l_csi2_of_table[] = {
{ .compatible = "renesas,rzg2l-csi2", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, rzg2l_csi2_of_table);
static struct platform_driver rzg2l_csi2_pdrv = {
.remove_new = rzg2l_csi2_remove,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
index fdb46ec0c872..e728f9f5160e 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_video.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
@@ -1082,6 +1082,27 @@ static const struct v4l2_file_operations vsp1_video_fops = {
};
/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+static int vsp1_video_link_validate(struct media_link *link)
+{
+ /*
+ * Ideally, link validation should be implemented here instead of
+ * calling vsp1_video_verify_format() in vsp1_video_streamon()
+ * manually. That would however break userspace that start one video
+ * device before configures formats on other video devices in the
+ * pipeline. This operation is just a no-op to silence the warnings
+ * from v4l2_subdev_link_validate().
+ */
+ return 0;
+}
+
+static const struct media_entity_operations vsp1_video_media_ops = {
+ .link_validate = vsp1_video_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
* Suspend and Resume
*/
@@ -1215,6 +1236,7 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
/* ... and the video node... */
video->video.v4l2_dev = &video->vsp1->v4l2_dev;
+ video->video.entity.ops = &vsp1_video_media_ops;
video->video.fops = &vsp1_video_fops;
snprintf(video->video.name, sizeof(video->video.name), "%s %s",
rwpf->entity.subdev.name, direction);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c
index f956b90a407a..60c97bb7b18b 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c
@@ -178,3 +178,17 @@ void rkisp1_sd_adjust_crop(struct v4l2_rect *crop,
rkisp1_sd_adjust_crop_rect(crop, &crop_bounds);
}
+
+void rkisp1_bls_swap_regs(enum rkisp1_fmt_raw_pat_type pattern,
+ const u32 input[4], u32 output[4])
+{
+ static const unsigned int swap[4][4] = {
+ [RKISP1_RAW_RGGB] = { 0, 1, 2, 3 },
+ [RKISP1_RAW_GRBG] = { 1, 0, 3, 2 },
+ [RKISP1_RAW_GBRG] = { 2, 3, 0, 1 },
+ [RKISP1_RAW_BGGR] = { 3, 2, 1, 0 },
+ };
+
+ for (unsigned int i = 0; i < 4; ++i)
+ output[i] = input[swap[pattern][i]];
+}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index 26573f6ae575..ca952fd0829b 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -33,9 +33,10 @@ struct regmap;
#define RKISP1_ISP_SD_SRC BIT(0)
#define RKISP1_ISP_SD_SINK BIT(1)
-/* min and max values for the widths and heights of the entities */
-#define RKISP1_ISP_MAX_WIDTH 4032
-#define RKISP1_ISP_MAX_HEIGHT 3024
+/*
+ * Minimum values for the width and height of entities. The maximum values are
+ * model-specific and stored in the rkisp1_info structure.
+ */
#define RKISP1_ISP_MIN_WIDTH 32
#define RKISP1_ISP_MIN_HEIGHT 32
@@ -115,6 +116,8 @@ enum rkisp1_isp_pad {
* @RKISP1_FEATURE_SELF_PATH: The ISP has a self path
* @RKISP1_FEATURE_DUAL_CROP: The ISP has the dual crop block at the resizer input
* @RKISP1_FEATURE_DMA_34BIT: The ISP uses 34-bit DMA addresses
+ * @RKISP1_FEATURE_BLS: The ISP has a dedicated BLS block
+ * @RKISP1_FEATURE_COMPAND: The ISP has a companding block
*
* The ISP features are stored in a bitmask in &rkisp1_info.features and allow
* the driver to implement support for features present in some ISP versions
@@ -126,6 +129,8 @@ enum rkisp1_feature {
RKISP1_FEATURE_SELF_PATH = BIT(2),
RKISP1_FEATURE_DUAL_CROP = BIT(3),
RKISP1_FEATURE_DMA_34BIT = BIT(4),
+ RKISP1_FEATURE_BLS = BIT(5),
+ RKISP1_FEATURE_COMPAND = BIT(6),
};
#define rkisp1_has_feature(rkisp1, feature) \
@@ -140,6 +145,8 @@ enum rkisp1_feature {
* @isr_size: number of entries in the @isrs array
* @isp_ver: ISP version
* @features: bitmask of rkisp1_feature features implemented by the ISP
+ * @max_width: maximum input frame width
+ * @max_height: maximum input frame height
*
* This structure contains information about the ISP specific to a particular
* ISP model, version, or integration in a particular SoC.
@@ -151,6 +158,8 @@ struct rkisp1_info {
unsigned int isr_size;
enum rkisp1_cif_isp_version isp_ver;
unsigned int features;
+ unsigned int max_width;
+ unsigned int max_height;
};
/*
@@ -232,7 +241,7 @@ struct rkisp1_vdev_node {
/*
* struct rkisp1_buffer - A container for the vb2 buffers used by the video devices:
- * params, stats, mainpath, selfpath
+ * stats, mainpath, selfpath
*
* @vb: vb2 buffer
* @queue: entry of the buffer in the queue
@@ -245,6 +254,26 @@ struct rkisp1_buffer {
};
/*
+ * struct rkisp1_params_buffer - A container for the vb2 buffers used by the
+ * params video device
+ *
+ * @vb: vb2 buffer
+ * @queue: entry of the buffer in the queue
+ * @cfg: scratch buffer used for caching the ISP configuration parameters
+ */
+struct rkisp1_params_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head queue;
+ void *cfg;
+};
+
+static inline struct rkisp1_params_buffer *
+to_rkisp1_params_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct rkisp1_params_buffer, vb);
+}
+
+/*
* struct rkisp1_dummy_buffer - A buffer to write the next frame to in case
* there are no vb2 buffers available.
*
@@ -372,9 +401,11 @@ struct rkisp1_params_ops {
* @ops: pointer to the variant-specific operations
* @config_lock: locks the buffer list 'params'
* @params: queue of rkisp1_buffer
- * @vdev_fmt: v4l2_format of the metadata format
+ * @metafmt the currently enabled metadata format
* @quantization: the quantization configured on the isp's src pad
+ * @ycbcr_encoding the YCbCr encoding
* @raw_type: the bayer pattern on the isp video sink pad
+ * @enabled_blocks: bitmask of enabled ISP blocks
*/
struct rkisp1_params {
struct rkisp1_vdev_node vnode;
@@ -383,11 +414,14 @@ struct rkisp1_params {
spinlock_t config_lock; /* locks the buffers list 'params' */
struct list_head params;
- struct v4l2_format vdev_fmt;
+
+ const struct v4l2_meta_format *metafmt;
enum v4l2_quantization quantization;
enum v4l2_ycbcr_encoding ycbcr_encoding;
enum rkisp1_fmt_raw_pat_type raw_type;
+
+ u32 enabled_blocks;
};
/*
@@ -573,6 +607,9 @@ void rkisp1_sd_adjust_crop_rect(struct v4l2_rect *crop,
void rkisp1_sd_adjust_crop(struct v4l2_rect *crop,
const struct v4l2_mbus_framefmt *bounds);
+void rkisp1_bls_swap_regs(enum rkisp1_fmt_raw_pat_type pattern,
+ const u32 input[4], u32 output[4]);
+
/*
* rkisp1_mbus_info_get_by_code - get the isp info of the media bus code
*
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
index 4202642e0523..841e58c20f7f 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
@@ -307,6 +307,7 @@ static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
+ struct rkisp1_csi *csi = to_rkisp1_csi(sd);
const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
@@ -326,10 +327,10 @@ static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd,
sink_fmt->width = clamp_t(u32, fmt->format.width,
RKISP1_ISP_MIN_WIDTH,
- RKISP1_ISP_MAX_WIDTH);
+ csi->rkisp1->info->max_width);
sink_fmt->height = clamp_t(u32, fmt->format.height,
RKISP1_ISP_MIN_HEIGHT,
- RKISP1_ISP_MAX_HEIGHT);
+ csi->rkisp1->info->max_height);
fmt->format = *sink_fmt;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index bb0202386c70..dd114ab77800 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -509,7 +509,10 @@ static const struct rkisp1_info px30_isp_info = {
.isp_ver = RKISP1_V12,
.features = RKISP1_FEATURE_MIPI_CSI2
| RKISP1_FEATURE_SELF_PATH
- | RKISP1_FEATURE_DUAL_CROP,
+ | RKISP1_FEATURE_DUAL_CROP
+ | RKISP1_FEATURE_BLS,
+ .max_width = 3264,
+ .max_height = 2448,
};
static const char * const rk3399_isp_clks[] = {
@@ -530,7 +533,10 @@ static const struct rkisp1_info rk3399_isp_info = {
.isp_ver = RKISP1_V10,
.features = RKISP1_FEATURE_MIPI_CSI2
| RKISP1_FEATURE_SELF_PATH
- | RKISP1_FEATURE_DUAL_CROP,
+ | RKISP1_FEATURE_DUAL_CROP
+ | RKISP1_FEATURE_BLS,
+ .max_width = 4416,
+ .max_height = 3312,
};
static const char * const imx8mp_isp_clks[] = {
@@ -550,7 +556,10 @@ static const struct rkisp1_info imx8mp_isp_info = {
.isr_size = ARRAY_SIZE(imx8mp_isp_isrs),
.isp_ver = RKISP1_V_IMX8MP,
.features = RKISP1_FEATURE_MAIN_STRIDE
- | RKISP1_FEATURE_DMA_34BIT,
+ | RKISP1_FEATURE_DMA_34BIT
+ | RKISP1_FEATURE_COMPAND,
+ .max_width = 4096,
+ .max_height = 3072,
};
static const struct of_device_id rkisp1_of_match[] = {
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index 91301d17d356..d94917211828 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -517,6 +517,7 @@ static int rkisp1_isp_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct rkisp1_isp *isp = to_rkisp1_isp(sd);
const struct rkisp1_mbus_info *mbus_info;
if (fse->pad == RKISP1_ISP_PAD_SINK_PARAMS ||
@@ -539,9 +540,9 @@ static int rkisp1_isp_enum_frame_size(struct v4l2_subdev *sd,
return -EINVAL;
fse->min_width = RKISP1_ISP_MIN_WIDTH;
- fse->max_width = RKISP1_ISP_MAX_WIDTH;
+ fse->max_width = isp->rkisp1->info->max_width;
fse->min_height = RKISP1_ISP_MIN_HEIGHT;
- fse->max_height = RKISP1_ISP_MAX_HEIGHT;
+ fse->max_height = isp->rkisp1->info->max_height;
return 0;
}
@@ -772,10 +773,10 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
sink_fmt->width = clamp_t(u32, format->width,
RKISP1_ISP_MIN_WIDTH,
- RKISP1_ISP_MAX_WIDTH);
+ isp->rkisp1->info->max_width);
sink_fmt->height = clamp_t(u32, format->height,
RKISP1_ISP_MIN_HEIGHT,
- RKISP1_ISP_MAX_HEIGHT);
+ isp->rkisp1->info->max_height);
/*
* Adjust the color space fields. Accept any color primaries and
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
index 173d1ea41874..320581a9f866 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
@@ -5,6 +5,9 @@
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
+#include <linux/math.h>
+#include <linux/string.h>
+
#include <media/v4l2-common.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
@@ -33,6 +36,59 @@
#define RKISP1_ISP_CC_COEFF(n) \
(RKISP1_CIF_ISP_CC_COEFF_0 + (n) * 4)
+#define RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS BIT(0)
+#define RKISP1_EXT_PARAMS_BLOCK_GROUP_LSC BIT(1)
+
+union rkisp1_ext_params_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_ext_params_bls_config bls;
+ struct rkisp1_ext_params_dpcc_config dpcc;
+ struct rkisp1_ext_params_sdg_config sdg;
+ struct rkisp1_ext_params_lsc_config lsc;
+ struct rkisp1_ext_params_awb_gain_config awbg;
+ struct rkisp1_ext_params_flt_config flt;
+ struct rkisp1_ext_params_bdm_config bdm;
+ struct rkisp1_ext_params_ctk_config ctk;
+ struct rkisp1_ext_params_goc_config goc;
+ struct rkisp1_ext_params_dpf_config dpf;
+ struct rkisp1_ext_params_dpf_strength_config dpfs;
+ struct rkisp1_ext_params_cproc_config cproc;
+ struct rkisp1_ext_params_ie_config ie;
+ struct rkisp1_ext_params_awb_meas_config awbm;
+ struct rkisp1_ext_params_hst_config hst;
+ struct rkisp1_ext_params_aec_config aec;
+ struct rkisp1_ext_params_afc_config afc;
+ struct rkisp1_ext_params_compand_bls_config compand_bls;
+ struct rkisp1_ext_params_compand_curve_config compand_curve;
+};
+
+enum rkisp1_params_formats {
+ RKISP1_PARAMS_FIXED,
+ RKISP1_PARAMS_EXTENSIBLE,
+};
+
+static const struct v4l2_meta_format rkisp1_params_formats[] = {
+ [RKISP1_PARAMS_FIXED] = {
+ .dataformat = V4L2_META_FMT_RK_ISP1_PARAMS,
+ .buffersize = sizeof(struct rkisp1_params_cfg),
+ },
+ [RKISP1_PARAMS_EXTENSIBLE] = {
+ .dataformat = V4L2_META_FMT_RK_ISP1_EXT_PARAMS,
+ .buffersize = sizeof(struct rkisp1_ext_params_cfg),
+ },
+};
+
+static const struct v4l2_meta_format *
+rkisp1_params_get_format_info(u32 dataformat)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(rkisp1_params_formats); i++) {
+ if (rkisp1_params_formats[i].dataformat == dataformat)
+ return &rkisp1_params_formats[i];
+ }
+
+ return &rkisp1_params_formats[RKISP1_PARAMS_FIXED];
+}
+
static inline void
rkisp1_param_set_bits(struct rkisp1_params *params, u32 reg, u32 bit_mask)
{
@@ -112,54 +168,20 @@ static void rkisp1_bls_config(struct rkisp1_params *params,
new_control &= RKISP1_CIF_ISP_BLS_ENA;
/* fixed subtraction values */
if (!arg->enable_auto) {
- const struct rkisp1_cif_isp_bls_fixed_val *pval =
- &arg->fixed_val;
-
- switch (params->raw_type) {
- case RKISP1_RAW_BGGR:
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
- pval->r);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
- pval->gr);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
- pval->gb);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
- pval->b);
- break;
- case RKISP1_RAW_GBRG:
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
- pval->r);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
- pval->gr);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
- pval->gb);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
- pval->b);
- break;
- case RKISP1_RAW_GRBG:
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
- pval->r);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
- pval->gr);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
- pval->gb);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
- pval->b);
- break;
- case RKISP1_RAW_RGGB:
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
- pval->r);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
- pval->gr);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
- pval->gb);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
- pval->b);
- break;
- default:
- break;
- }
-
+ static const u32 regs[] = {
+ RKISP1_CIF_ISP_BLS_A_FIXED,
+ RKISP1_CIF_ISP_BLS_B_FIXED,
+ RKISP1_CIF_ISP_BLS_C_FIXED,
+ RKISP1_CIF_ISP_BLS_D_FIXED,
+ };
+ u32 swapped[4];
+
+ rkisp1_bls_swap_regs(params->raw_type, regs, swapped);
+
+ rkisp1_write(params->rkisp1, swapped[0], arg->fixed_val.r);
+ rkisp1_write(params->rkisp1, swapped[1], arg->fixed_val.gr);
+ rkisp1_write(params->rkisp1, swapped[2], arg->fixed_val.gb);
+ rkisp1_write(params->rkisp1, swapped[3], arg->fixed_val.b);
} else {
if (arg->en_windows & BIT(1)) {
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_H2_START,
@@ -1239,6 +1261,93 @@ rkisp1_dpf_strength_config(struct rkisp1_params *params,
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_STRENGTH_R, arg->r);
}
+static void rkisp1_compand_write_px_curve(struct rkisp1_params *params,
+ unsigned int addr, const u8 *curve)
+{
+ const unsigned int points_per_reg = 6;
+ const unsigned int num_regs =
+ DIV_ROUND_UP(RKISP1_CIF_ISP_COMPAND_NUM_POINTS,
+ points_per_reg);
+
+ /*
+ * The compand curve is specified as a piecewise linear function with
+ * 64 points. X coordinates are stored as a log2 of the displacement
+ * from the previous point, in 5 bits, with 6 values per register. The
+ * last register stores 4 values.
+ */
+ for (unsigned int reg = 0; reg < num_regs; ++reg) {
+ unsigned int num_points =
+ min(RKISP1_CIF_ISP_COMPAND_NUM_POINTS -
+ reg * points_per_reg, points_per_reg);
+ u32 val = 0;
+
+ for (unsigned int i = 0; i < num_points; i++)
+ val |= (*curve++ & 0x1f) << (i * 5);
+
+ rkisp1_write(params->rkisp1, addr, val);
+ addr += 4;
+ }
+}
+
+static void
+rkisp1_compand_write_curve_mem(struct rkisp1_params *params,
+ unsigned int reg_addr, unsigned int reg_data,
+ const u32 curve[RKISP1_CIF_ISP_COMPAND_NUM_POINTS])
+{
+ for (unsigned int i = 0; i < RKISP1_CIF_ISP_COMPAND_NUM_POINTS; i++) {
+ rkisp1_write(params->rkisp1, reg_addr, i);
+ rkisp1_write(params->rkisp1, reg_data, curve[i]);
+ }
+}
+
+static void
+rkisp1_compand_bls_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_compand_bls_config *arg)
+{
+ static const u32 regs[] = {
+ RKISP1_CIF_ISP_COMPAND_BLS_A_FIXED,
+ RKISP1_CIF_ISP_COMPAND_BLS_B_FIXED,
+ RKISP1_CIF_ISP_COMPAND_BLS_C_FIXED,
+ RKISP1_CIF_ISP_COMPAND_BLS_D_FIXED,
+ };
+ u32 swapped[4];
+
+ rkisp1_bls_swap_regs(params->raw_type, regs, swapped);
+
+ rkisp1_write(params->rkisp1, swapped[0], arg->r);
+ rkisp1_write(params->rkisp1, swapped[1], arg->gr);
+ rkisp1_write(params->rkisp1, swapped[2], arg->gb);
+ rkisp1_write(params->rkisp1, swapped[3], arg->b);
+}
+
+static void
+rkisp1_compand_expand_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_compand_curve_config *arg)
+{
+ rkisp1_compand_write_px_curve(params, RKISP1_CIF_ISP_COMPAND_EXPAND_PX_N(0),
+ arg->px);
+ rkisp1_compand_write_curve_mem(params, RKISP1_CIF_ISP_COMPAND_EXPAND_Y_ADDR,
+ RKISP1_CIF_ISP_COMPAND_EXPAND_Y_WRITE_DATA,
+ arg->y);
+ rkisp1_compand_write_curve_mem(params, RKISP1_CIF_ISP_COMPAND_EXPAND_X_ADDR,
+ RKISP1_CIF_ISP_COMPAND_EXPAND_X_WRITE_DATA,
+ arg->x);
+}
+
+static void
+rkisp1_compand_compress_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_compand_curve_config *arg)
+{
+ rkisp1_compand_write_px_curve(params, RKISP1_CIF_ISP_COMPAND_COMPRESS_PX_N(0),
+ arg->px);
+ rkisp1_compand_write_curve_mem(params, RKISP1_CIF_ISP_COMPAND_COMPRESS_Y_ADDR,
+ RKISP1_CIF_ISP_COMPAND_COMPRESS_Y_WRITE_DATA,
+ arg->y);
+ rkisp1_compand_write_curve_mem(params, RKISP1_CIF_ISP_COMPAND_COMPRESS_X_ADDR,
+ RKISP1_CIF_ISP_COMPAND_COMPRESS_X_WRITE_DATA,
+ arg->x);
+}
+
static void
rkisp1_isp_isr_other_config(struct rkisp1_params *params,
const struct rkisp1_params_cfg *new_params)
@@ -1249,6 +1358,12 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params,
module_cfg_update = new_params->module_cfg_update;
module_ens = new_params->module_ens;
+ if (!rkisp1_has_feature(params->rkisp1, BLS)) {
+ module_en_update &= ~RKISP1_CIF_ISP_MODULE_BLS;
+ module_cfg_update &= ~RKISP1_CIF_ISP_MODULE_BLS;
+ module_ens &= ~RKISP1_CIF_ISP_MODULE_BLS;
+ }
+
/* update dpc config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_DPCC)
rkisp1_dpcc_config(params,
@@ -1501,21 +1616,551 @@ static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
}
}
-static bool rkisp1_params_get_buffer(struct rkisp1_params *params,
- struct rkisp1_buffer **buf,
- struct rkisp1_params_cfg **cfg)
+/*------------------------------------------------------------------------------
+ * Extensible parameters format handling
+ */
+
+static void
+rkisp1_ext_params_bls(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_bls_config *bls = &block->bls;
+
+ if (bls->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_BLS_CTRL,
+ RKISP1_CIF_ISP_BLS_ENA);
+ return;
+ }
+
+ rkisp1_bls_config(params, &bls->config);
+
+ if ((bls->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(bls->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_BLS_CTRL,
+ RKISP1_CIF_ISP_BLS_ENA);
+}
+
+static void
+rkisp1_ext_params_dpcc(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
{
- if (list_empty(&params->params))
- return false;
+ const struct rkisp1_ext_params_dpcc_config *dpcc = &block->dpcc;
+
+ if (dpcc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPCC_MODE,
+ RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE);
+ return;
+ }
- *buf = list_first_entry(&params->params, struct rkisp1_buffer, queue);
- *cfg = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
+ rkisp1_dpcc_config(params, &dpcc->config);
- return true;
+ if ((dpcc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(dpcc->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DPCC_MODE,
+ RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE);
+}
+
+static void
+rkisp1_ext_params_sdg(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_sdg_config *sdg = &block->sdg;
+
+ if (sdg->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
+ return;
+ }
+
+ rkisp1_sdg_config(params, &sdg->config);
+
+ if ((sdg->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(sdg->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
+}
+
+static void
+rkisp1_ext_params_lsc(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_lsc_config *lsc = &block->lsc;
+
+ if (lsc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
+ RKISP1_CIF_ISP_LSC_CTRL_ENA);
+ return;
+ }
+
+ rkisp1_lsc_config(params, &lsc->config);
+
+ if ((lsc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(lsc->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
+ RKISP1_CIF_ISP_LSC_CTRL_ENA);
+}
+
+static void
+rkisp1_ext_params_awbg(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_awb_gain_config *awbg = &block->awbg;
+
+ if (awbg->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+ return;
+ }
+
+ params->ops->awb_gain_config(params, &awbg->config);
+
+ if ((awbg->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(awbg->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+}
+
+static void
+rkisp1_ext_params_flt(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_flt_config *flt = &block->flt;
+
+ if (flt->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_FILT_MODE,
+ RKISP1_CIF_ISP_FLT_ENA);
+ return;
+ }
+
+ rkisp1_flt_config(params, &flt->config);
+
+ if ((flt->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(flt->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_FILT_MODE,
+ RKISP1_CIF_ISP_FLT_ENA);
+}
+
+static void
+rkisp1_ext_params_bdm(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_bdm_config *bdm = &block->bdm;
+
+ if (bdm->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DEMOSAIC,
+ RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
+ return;
+ }
+
+ rkisp1_bdm_config(params, &bdm->config);
+
+ if ((bdm->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(bdm->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DEMOSAIC,
+ RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
+}
+
+static void
+rkisp1_ext_params_ctk(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_ctk_config *ctk = &block->ctk;
+
+ if (ctk->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_ctk_enable(params, false);
+ return;
+ }
+
+ rkisp1_ctk_config(params, &ctk->config);
+
+ if ((ctk->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(ctk->header.type)))
+ rkisp1_ctk_enable(params, true);
+}
+
+static void
+rkisp1_ext_params_goc(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_goc_config *goc = &block->goc;
+
+ if (goc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+ return;
+ }
+
+ params->ops->goc_config(params, &goc->config);
+
+ /*
+ * Unconditionally re-enable the GOC module which gets disabled by
+ * goc_config().
+ */
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+}
+
+static void
+rkisp1_ext_params_dpf(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_dpf_config *dpf = &block->dpf;
+
+ if (dpf->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPF_MODE,
+ RKISP1_CIF_ISP_DPF_MODE_EN);
+ return;
+ }
+
+ rkisp1_dpf_config(params, &dpf->config);
+
+ if ((dpf->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(dpf->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DPF_MODE,
+ RKISP1_CIF_ISP_DPF_MODE_EN);
+}
+
+static void
+rkisp1_ext_params_dpfs(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_dpf_strength_config *dpfs = &block->dpfs;
+
+ rkisp1_dpf_strength_config(params, &dpfs->config);
+}
+
+static void
+rkisp1_ext_params_cproc(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_cproc_config *cproc = &block->cproc;
+
+ if (cproc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_C_PROC_CTRL,
+ RKISP1_CIF_C_PROC_CTR_ENABLE);
+ return;
+ }
+
+ rkisp1_cproc_config(params, &cproc->config);
+
+ if ((cproc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(cproc->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_C_PROC_CTRL,
+ RKISP1_CIF_C_PROC_CTR_ENABLE);
+}
+
+static void
+rkisp1_ext_params_ie(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_ie_config *ie = &block->ie;
+
+ if (ie->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_ie_enable(params, false);
+ return;
+ }
+
+ rkisp1_ie_config(params, &ie->config);
+
+ if ((ie->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(ie->header.type)))
+ rkisp1_ie_enable(params, true);
+}
+
+static void
+rkisp1_ext_params_awbm(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_awb_meas_config *awbm = &block->awbm;
+
+ if (awbm->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ params->ops->awb_meas_enable(params, &awbm->config,
+ false);
+ return;
+ }
+
+ params->ops->awb_meas_config(params, &awbm->config);
+
+ if ((awbm->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(awbm->header.type)))
+ params->ops->awb_meas_enable(params, &awbm->config,
+ true);
+}
+
+static void
+rkisp1_ext_params_hstm(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_hst_config *hst = &block->hst;
+
+ if (hst->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ params->ops->hst_enable(params, &hst->config, false);
+ return;
+ }
+
+ params->ops->hst_config(params, &hst->config);
+
+ if ((hst->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(hst->header.type)))
+ params->ops->hst_enable(params, &hst->config, true);
+}
+
+static void
+rkisp1_ext_params_aecm(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_aec_config *aec = &block->aec;
+
+ if (aec->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
+ RKISP1_CIF_ISP_EXP_ENA);
+ return;
+ }
+
+ params->ops->aec_config(params, &aec->config);
+
+ if ((aec->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(aec->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
+ RKISP1_CIF_ISP_EXP_ENA);
+}
+
+static void
+rkisp1_ext_params_afcm(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_afc_config *afc = &block->afc;
+
+ if (afc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
+ RKISP1_CIF_ISP_AFM_ENA);
+ return;
+ }
+
+ params->ops->afm_config(params, &afc->config);
+
+ if ((afc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(afc->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
+ RKISP1_CIF_ISP_AFM_ENA);
+}
+
+static void rkisp1_ext_params_compand_bls(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_compand_bls_config *bls =
+ &block->compand_bls;
+
+ if (bls->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL,
+ RKISP1_CIF_ISP_COMPAND_CTRL_BLS_ENABLE);
+ return;
+ }
+
+ rkisp1_compand_bls_config(params, &bls->config);
+
+ if ((bls->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(bls->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL,
+ RKISP1_CIF_ISP_COMPAND_CTRL_BLS_ENABLE);
+}
+
+static void rkisp1_ext_params_compand_expand(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_compand_curve_config *curve =
+ &block->compand_curve;
+
+ if (curve->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL,
+ RKISP1_CIF_ISP_COMPAND_CTRL_EXPAND_ENABLE);
+ return;
+ }
+
+ rkisp1_compand_expand_config(params, &curve->config);
+
+ if ((curve->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(curve->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL,
+ RKISP1_CIF_ISP_COMPAND_CTRL_EXPAND_ENABLE);
+}
+
+static void rkisp1_ext_params_compand_compress(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_compand_curve_config *curve =
+ &block->compand_curve;
+
+ if (curve->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL,
+ RKISP1_CIF_ISP_COMPAND_CTRL_COMPRESS_ENABLE);
+ return;
+ }
+
+ rkisp1_compand_compress_config(params, &curve->config);
+
+ if ((curve->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(curve->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL,
+ RKISP1_CIF_ISP_COMPAND_CTRL_COMPRESS_ENABLE);
+}
+
+typedef void (*rkisp1_block_handler)(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *config);
+
+static const struct rkisp1_ext_params_handler {
+ size_t size;
+ rkisp1_block_handler handler;
+ unsigned int group;
+ unsigned int features;
+} rkisp1_ext_params_handlers[] = {
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS] = {
+ .size = sizeof(struct rkisp1_ext_params_bls_config),
+ .handler = rkisp1_ext_params_bls,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ .features = RKISP1_FEATURE_BLS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC] = {
+ .size = sizeof(struct rkisp1_ext_params_dpcc_config),
+ .handler = rkisp1_ext_params_dpcc,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG] = {
+ .size = sizeof(struct rkisp1_ext_params_sdg_config),
+ .handler = rkisp1_ext_params_sdg,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN] = {
+ .size = sizeof(struct rkisp1_ext_params_awb_gain_config),
+ .handler = rkisp1_ext_params_awbg,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT] = {
+ .size = sizeof(struct rkisp1_ext_params_flt_config),
+ .handler = rkisp1_ext_params_flt,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM] = {
+ .size = sizeof(struct rkisp1_ext_params_bdm_config),
+ .handler = rkisp1_ext_params_bdm,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK] = {
+ .size = sizeof(struct rkisp1_ext_params_ctk_config),
+ .handler = rkisp1_ext_params_ctk,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC] = {
+ .size = sizeof(struct rkisp1_ext_params_goc_config),
+ .handler = rkisp1_ext_params_goc,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF] = {
+ .size = sizeof(struct rkisp1_ext_params_dpf_config),
+ .handler = rkisp1_ext_params_dpf,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH] = {
+ .size = sizeof(struct rkisp1_ext_params_dpf_strength_config),
+ .handler = rkisp1_ext_params_dpfs,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC] = {
+ .size = sizeof(struct rkisp1_ext_params_cproc_config),
+ .handler = rkisp1_ext_params_cproc,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_IE] = {
+ .size = sizeof(struct rkisp1_ext_params_ie_config),
+ .handler = rkisp1_ext_params_ie,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC] = {
+ .size = sizeof(struct rkisp1_ext_params_lsc_config),
+ .handler = rkisp1_ext_params_lsc,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_LSC,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS] = {
+ .size = sizeof(struct rkisp1_ext_params_awb_meas_config),
+ .handler = rkisp1_ext_params_awbm,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS] = {
+ .size = sizeof(struct rkisp1_ext_params_hst_config),
+ .handler = rkisp1_ext_params_hstm,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS] = {
+ .size = sizeof(struct rkisp1_ext_params_aec_config),
+ .handler = rkisp1_ext_params_aecm,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS] = {
+ .size = sizeof(struct rkisp1_ext_params_afc_config),
+ .handler = rkisp1_ext_params_afcm,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS] = {
+ .size = sizeof(struct rkisp1_ext_params_compand_bls_config),
+ .handler = rkisp1_ext_params_compand_bls,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ .features = RKISP1_FEATURE_COMPAND,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND] = {
+ .size = sizeof(struct rkisp1_ext_params_compand_curve_config),
+ .handler = rkisp1_ext_params_compand_expand,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ .features = RKISP1_FEATURE_COMPAND,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS] = {
+ .size = sizeof(struct rkisp1_ext_params_compand_curve_config),
+ .handler = rkisp1_ext_params_compand_compress,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ .features = RKISP1_FEATURE_COMPAND,
+ },
+};
+
+static void rkisp1_ext_params_config(struct rkisp1_params *params,
+ struct rkisp1_ext_params_cfg *cfg,
+ u32 block_group_mask)
+{
+ size_t block_offset = 0;
+
+ if (WARN_ON(!cfg))
+ return;
+
+ /* Walk the list of parameter blocks and process them. */
+ while (block_offset < cfg->data_size) {
+ const struct rkisp1_ext_params_handler *block_handler;
+ const union rkisp1_ext_params_config *block;
+
+ block = (const union rkisp1_ext_params_config *)
+ &cfg->data[block_offset];
+ block_offset += block->header.size;
+
+ /*
+ * Make sure the block is supported by the platform and in the
+ * list of groups to configure.
+ */
+ block_handler = &rkisp1_ext_params_handlers[block->header.type];
+ if (!(block_handler->group & block_group_mask))
+ continue;
+
+ if ((params->rkisp1->info->features & block_handler->features) !=
+ block_handler->features)
+ continue;
+
+ block_handler->handler(params, block);
+
+ if (block->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE)
+ params->enabled_blocks &= ~BIT(block->header.type);
+ else if (block->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE)
+ params->enabled_blocks |= BIT(block->header.type);
+ }
}
static void rkisp1_params_complete_buffer(struct rkisp1_params *params,
- struct rkisp1_buffer *buf,
+ struct rkisp1_params_buffer *buf,
unsigned int frame_sequence)
{
list_del(&buf->queue);
@@ -1527,17 +2172,24 @@ static void rkisp1_params_complete_buffer(struct rkisp1_params *params,
void rkisp1_params_isr(struct rkisp1_device *rkisp1)
{
struct rkisp1_params *params = &rkisp1->params;
- struct rkisp1_params_cfg *new_params;
- struct rkisp1_buffer *cur_buf;
+ struct rkisp1_params_buffer *cur_buf;
spin_lock(&params->config_lock);
- if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params))
+ cur_buf = list_first_entry_or_null(&params->params,
+ struct rkisp1_params_buffer, queue);
+ if (!cur_buf)
goto unlock;
- rkisp1_isp_isr_other_config(params, new_params);
- rkisp1_isp_isr_lsc_config(params, new_params);
- rkisp1_isp_isr_meas_config(params, new_params);
+ if (params->metafmt->dataformat == V4L2_META_FMT_RK_ISP1_PARAMS) {
+ rkisp1_isp_isr_other_config(params, cur_buf->cfg);
+ rkisp1_isp_isr_lsc_config(params, cur_buf->cfg);
+ rkisp1_isp_isr_meas_config(params, cur_buf->cfg);
+ } else {
+ rkisp1_ext_params_config(params, cur_buf->cfg,
+ RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS |
+ RKISP1_EXT_PARAMS_BLOCK_GROUP_LSC);
+ }
/* update shadow register immediately */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
@@ -1603,8 +2255,7 @@ void rkisp1_params_pre_configure(struct rkisp1_params *params,
enum v4l2_ycbcr_encoding ycbcr_encoding)
{
struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config;
- struct rkisp1_params_cfg *new_params;
- struct rkisp1_buffer *cur_buf;
+ struct rkisp1_params_buffer *cur_buf;
params->quantization = quantization;
params->ycbcr_encoding = ycbcr_encoding;
@@ -1633,11 +2284,18 @@ void rkisp1_params_pre_configure(struct rkisp1_params *params,
/* apply the first buffer if there is one already */
- if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params))
+ cur_buf = list_first_entry_or_null(&params->params,
+ struct rkisp1_params_buffer, queue);
+ if (!cur_buf)
goto unlock;
- rkisp1_isp_isr_other_config(params, new_params);
- rkisp1_isp_isr_meas_config(params, new_params);
+ if (params->metafmt->dataformat == V4L2_META_FMT_RK_ISP1_PARAMS) {
+ rkisp1_isp_isr_other_config(params, cur_buf->cfg);
+ rkisp1_isp_isr_meas_config(params, cur_buf->cfg);
+ } else {
+ rkisp1_ext_params_config(params, cur_buf->cfg,
+ RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS);
+ }
/* update shadow register immediately */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
@@ -1649,8 +2307,7 @@ unlock:
void rkisp1_params_post_configure(struct rkisp1_params *params)
{
- struct rkisp1_params_cfg *new_params;
- struct rkisp1_buffer *cur_buf;
+ struct rkisp1_params_buffer *cur_buf;
spin_lock_irq(&params->config_lock);
@@ -1662,11 +2319,16 @@ void rkisp1_params_post_configure(struct rkisp1_params *params)
* ordering doesn't affect other ISP versions negatively, do so
* unconditionally.
*/
-
- if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params))
+ cur_buf = list_first_entry_or_null(&params->params,
+ struct rkisp1_params_buffer, queue);
+ if (!cur_buf)
goto unlock;
- rkisp1_isp_isr_lsc_config(params, new_params);
+ if (params->metafmt->dataformat == V4L2_META_FMT_RK_ISP1_PARAMS)
+ rkisp1_isp_isr_lsc_config(params, cur_buf->cfg);
+ else
+ rkisp1_ext_params_config(params, cur_buf->cfg,
+ RKISP1_EXT_PARAMS_BLOCK_GROUP_LSC);
/* update shadow register immediately */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
@@ -1742,12 +2404,12 @@ static int rkisp1_params_enum_fmt_meta_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct video_device *video = video_devdata(file);
- struct rkisp1_params *params = video_get_drvdata(video);
- if (f->index > 0 || f->type != video->queue->type)
+ if (f->index >= ARRAY_SIZE(rkisp1_params_formats) ||
+ f->type != video->queue->type)
return -EINVAL;
- f->pixelformat = params->vdev_fmt.fmt.meta.dataformat;
+ f->pixelformat = rkisp1_params_formats[f->index].dataformat;
return 0;
}
@@ -1762,9 +2424,40 @@ static int rkisp1_params_g_fmt_meta_out(struct file *file, void *fh,
if (f->type != video->queue->type)
return -EINVAL;
- memset(meta, 0, sizeof(*meta));
- meta->dataformat = params->vdev_fmt.fmt.meta.dataformat;
- meta->buffersize = params->vdev_fmt.fmt.meta.buffersize;
+ *meta = *params->metafmt;
+
+ return 0;
+}
+
+static int rkisp1_params_try_fmt_meta_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *video = video_devdata(file);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (f->type != video->queue->type)
+ return -EINVAL;
+
+ *meta = *rkisp1_params_get_format_info(meta->dataformat);
+
+ return 0;
+}
+
+static int rkisp1_params_s_fmt_meta_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *video = video_devdata(file);
+ struct rkisp1_params *params = video_get_drvdata(video);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (f->type != video->queue->type)
+ return -EINVAL;
+
+ if (vb2_is_busy(video->queue))
+ return -EBUSY;
+
+ params->metafmt = rkisp1_params_get_format_info(meta->dataformat);
+ *meta = *params->metafmt;
return 0;
}
@@ -1794,8 +2487,8 @@ static const struct v4l2_ioctl_ops rkisp1_params_ioctl = {
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_enum_fmt_meta_out = rkisp1_params_enum_fmt_meta_out,
.vidioc_g_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
- .vidioc_s_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
- .vidioc_try_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
+ .vidioc_s_fmt_meta_out = rkisp1_params_s_fmt_meta_out,
+ .vidioc_try_fmt_meta_out = rkisp1_params_try_fmt_meta_out,
.vidioc_querycap = rkisp1_params_querycap,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
@@ -1807,22 +2500,46 @@ static int rkisp1_params_vb2_queue_setup(struct vb2_queue *vq,
unsigned int sizes[],
struct device *alloc_devs[])
{
+ struct rkisp1_params *params = vq->drv_priv;
+
*num_buffers = clamp_t(u32, *num_buffers,
RKISP1_ISP_PARAMS_REQ_BUFS_MIN,
RKISP1_ISP_PARAMS_REQ_BUFS_MAX);
*num_planes = 1;
- sizes[0] = sizeof(struct rkisp1_params_cfg);
+ sizes[0] = params->metafmt->buffersize;
return 0;
}
+static int rkisp1_params_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf);
+ struct rkisp1_params *params = vb->vb2_queue->drv_priv;
+
+ params_buf->cfg = kvmalloc(params->metafmt->buffersize,
+ GFP_KERNEL);
+ if (!params_buf->cfg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void rkisp1_params_vb2_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf);
+
+ kvfree(params_buf->cfg);
+ params_buf->cfg = NULL;
+}
+
static void rkisp1_params_vb2_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct rkisp1_buffer *params_buf =
- container_of(vbuf, struct rkisp1_buffer, vb);
+ struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf);
struct vb2_queue *vq = vb->vb2_queue;
struct rkisp1_params *params = vq->drv_priv;
@@ -1831,12 +2548,133 @@ static void rkisp1_params_vb2_buf_queue(struct vb2_buffer *vb)
spin_unlock_irq(&params->config_lock);
}
+static int rkisp1_params_prepare_ext_params(struct rkisp1_params *params,
+ struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf);
+ size_t header_size = offsetof(struct rkisp1_ext_params_cfg, data);
+ struct rkisp1_ext_params_cfg *cfg = params_buf->cfg;
+ size_t payload_size = vb2_get_plane_payload(vb, 0);
+ struct rkisp1_ext_params_cfg *usr_cfg =
+ vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ size_t block_offset = 0;
+ size_t cfg_size;
+
+ /*
+ * Validate the buffer payload size before copying the parameters. The
+ * payload has to be smaller than the destination buffer size and larger
+ * than the header size.
+ */
+ if (payload_size > params->metafmt->buffersize) {
+ dev_dbg(params->rkisp1->dev,
+ "Too large buffer payload size %zu\n", payload_size);
+ return -EINVAL;
+ }
+
+ if (payload_size < header_size) {
+ dev_dbg(params->rkisp1->dev,
+ "Buffer payload %zu smaller than header size %zu\n",
+ payload_size, header_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Copy the parameters buffer to the internal scratch buffer to avoid
+ * userspace modifying the buffer content while the driver processes it.
+ */
+ memcpy(cfg, usr_cfg, payload_size);
+
+ /* Only v1 is supported at the moment. */
+ if (cfg->version != RKISP1_EXT_PARAM_BUFFER_V1) {
+ dev_dbg(params->rkisp1->dev,
+ "Unsupported extensible format version: %u\n",
+ cfg->version);
+ return -EINVAL;
+ }
+
+ /* Validate the size reported in the parameters buffer header. */
+ cfg_size = header_size + cfg->data_size;
+ if (cfg_size != payload_size) {
+ dev_dbg(params->rkisp1->dev,
+ "Data size %zu different than buffer payload size %zu\n",
+ cfg_size, payload_size);
+ return -EINVAL;
+ }
+
+ /* Walk the list of parameter blocks and validate them. */
+ cfg_size = cfg->data_size;
+ while (cfg_size >= sizeof(struct rkisp1_ext_params_block_header)) {
+ const struct rkisp1_ext_params_block_header *block;
+ const struct rkisp1_ext_params_handler *handler;
+
+ block = (const struct rkisp1_ext_params_block_header *)
+ &cfg->data[block_offset];
+
+ if (block->type >= ARRAY_SIZE(rkisp1_ext_params_handlers)) {
+ dev_dbg(params->rkisp1->dev,
+ "Invalid parameters block type\n");
+ return -EINVAL;
+ }
+
+ if (block->size > cfg_size) {
+ dev_dbg(params->rkisp1->dev,
+ "Premature end of parameters data\n");
+ return -EINVAL;
+ }
+
+ if ((block->flags & (RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE |
+ RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE)) ==
+ (RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE |
+ RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE)) {
+ dev_dbg(params->rkisp1->dev,
+ "Invalid parameters block flags\n");
+ return -EINVAL;
+ }
+
+ handler = &rkisp1_ext_params_handlers[block->type];
+ if (block->size != handler->size) {
+ dev_dbg(params->rkisp1->dev,
+ "Invalid parameters block size\n");
+ return -EINVAL;
+ }
+
+ block_offset += block->size;
+ cfg_size -= block->size;
+ }
+
+ if (cfg_size) {
+ dev_dbg(params->rkisp1->dev,
+ "Unexpected data after the parameters buffer end\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rkisp1_params_vb2_buf_prepare(struct vb2_buffer *vb)
{
- if (vb2_plane_size(vb, 0) < sizeof(struct rkisp1_params_cfg))
+ struct rkisp1_params *params = vb->vb2_queue->drv_priv;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf);
+ struct rkisp1_params_cfg *cfg = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ size_t payload = vb2_get_plane_payload(vb, 0);
+
+ if (params->metafmt->dataformat == V4L2_META_FMT_RK_ISP1_EXT_PARAMS)
+ return rkisp1_params_prepare_ext_params(params, vb);
+
+ /*
+ * For the fixed parameters format the payload size must be exactly the
+ * size of the parameters structure.
+ */
+ if (payload != sizeof(*cfg))
return -EINVAL;
- vb2_set_plane_payload(vb, 0, sizeof(struct rkisp1_params_cfg));
+ /*
+ * Copy the parameters buffer to the internal scratch buffer to avoid
+ * userspace modifying the buffer content while the driver processes it.
+ */
+ memcpy(params_buf->cfg, cfg, payload);
return 0;
}
@@ -1844,7 +2682,7 @@ static int rkisp1_params_vb2_buf_prepare(struct vb2_buffer *vb)
static void rkisp1_params_vb2_stop_streaming(struct vb2_queue *vq)
{
struct rkisp1_params *params = vq->drv_priv;
- struct rkisp1_buffer *buf;
+ struct rkisp1_params_buffer *buf;
LIST_HEAD(tmp_list);
/*
@@ -1858,16 +2696,19 @@ static void rkisp1_params_vb2_stop_streaming(struct vb2_queue *vq)
list_for_each_entry(buf, &tmp_list, queue)
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+
+ params->enabled_blocks = 0;
}
static const struct vb2_ops rkisp1_params_vb2_ops = {
.queue_setup = rkisp1_params_vb2_queue_setup,
+ .buf_init = rkisp1_params_vb2_buf_init,
+ .buf_cleanup = rkisp1_params_vb2_buf_cleanup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.buf_queue = rkisp1_params_vb2_buf_queue,
.buf_prepare = rkisp1_params_vb2_buf_prepare,
.stop_streaming = rkisp1_params_vb2_stop_streaming,
-
};
static const struct v4l2_file_operations rkisp1_params_fops = {
@@ -1890,26 +2731,13 @@ static int rkisp1_params_init_vb2_queue(struct vb2_queue *q,
q->drv_priv = params;
q->ops = &rkisp1_params_vb2_ops;
q->mem_ops = &vb2_vmalloc_memops;
- q->buf_struct_size = sizeof(struct rkisp1_buffer);
+ q->buf_struct_size = sizeof(struct rkisp1_params_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &node->vlock;
return vb2_queue_init(q);
}
-static void rkisp1_init_params(struct rkisp1_params *params)
-{
- params->vdev_fmt.fmt.meta.dataformat =
- V4L2_META_FMT_RK_ISP1_PARAMS;
- params->vdev_fmt.fmt.meta.buffersize =
- sizeof(struct rkisp1_params_cfg);
-
- if (params->rkisp1->info->isp_ver == RKISP1_V12)
- params->ops = &rkisp1_v12_params_ops;
- else
- params->ops = &rkisp1_v10_params_ops;
-}
-
int rkisp1_params_register(struct rkisp1_device *rkisp1)
{
struct rkisp1_params *params = &rkisp1->params;
@@ -1938,7 +2766,14 @@ int rkisp1_params_register(struct rkisp1_device *rkisp1)
vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_META_OUTPUT;
vdev->vfl_dir = VFL_DIR_TX;
rkisp1_params_init_vb2_queue(vdev->queue, params);
- rkisp1_init_params(params);
+
+ params->metafmt = &rkisp1_params_formats[RKISP1_PARAMS_FIXED];
+
+ if (params->rkisp1->info->isp_ver == RKISP1_V12)
+ params->ops = &rkisp1_v12_params_ops;
+ else
+ params->ops = &rkisp1_v10_params_ops;
+
video_set_drvdata(vdev, params);
node->pad.flags = MEDIA_PAD_FL_SOURCE;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index fccf4c17ee8d..bf0260600a19 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -704,6 +704,12 @@
#define RKISP1_CIF_ISP_DPF_SPATIAL_COEFF_MAX 0x1f
#define RKISP1_CIF_ISP_DPF_NLL_COEFF_N_MAX 0x3ff
+/* COMPAND */
+#define RKISP1_CIF_ISP_COMPAND_CTRL_EXPAND_ENABLE BIT(0)
+#define RKISP1_CIF_ISP_COMPAND_CTRL_COMPRESS_ENABLE BIT(1)
+#define RKISP1_CIF_ISP_COMPAND_CTRL_SOFT_RESET_FLAG BIT(2)
+#define RKISP1_CIF_ISP_COMPAND_CTRL_BLS_ENABLE BIT(3)
+
/* =================================================================== */
/* CIF Registers */
/* =================================================================== */
@@ -1394,6 +1400,23 @@
#define RKISP1_CIF_ISP_VSM_DELTA_H (RKISP1_CIF_ISP_VSM_BASE + 0x0000001c)
#define RKISP1_CIF_ISP_VSM_DELTA_V (RKISP1_CIF_ISP_VSM_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_COMPAND_BASE 0x00003200
+#define RKISP1_CIF_ISP_COMPAND_CTRL (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_COMPAND_BLS_A_FIXED (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_COMPAND_BLS_B_FIXED (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_COMPAND_BLS_C_FIXED (RKISP1_CIF_ISP_COMPAND_BASE + 0x0000000c)
+#define RKISP1_CIF_ISP_COMPAND_BLS_D_FIXED (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_COMPAND_EXPAND_PX_N(n) (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000014 + (n) * 4)
+#define RKISP1_CIF_ISP_COMPAND_COMPRESS_PX_N(n) (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000040 + (n) * 4)
+#define RKISP1_CIF_ISP_COMPAND_EXPAND_Y_ADDR (RKISP1_CIF_ISP_COMPAND_BASE + 0x0000006c)
+#define RKISP1_CIF_ISP_COMPAND_EXPAND_Y_WRITE_DATA (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000070)
+#define RKISP1_CIF_ISP_COMPAND_COMPRESS_Y_ADDR (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000074)
+#define RKISP1_CIF_ISP_COMPAND_COMPRESS_Y_WRITE_DATA (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000078)
+#define RKISP1_CIF_ISP_COMPAND_EXPAND_X_ADDR (RKISP1_CIF_ISP_COMPAND_BASE + 0x0000007c)
+#define RKISP1_CIF_ISP_COMPAND_EXPAND_X_WRITE_DATA (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000080)
+#define RKISP1_CIF_ISP_COMPAND_COMPRESS_X_ADDR (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000084)
+#define RKISP1_CIF_ISP_COMPAND_COMPRESS_X_WRITE_DATA (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000088)
+
#define RKISP1_CIF_ISP_CSI0_BASE 0x00007000
#define RKISP1_CIF_ISP_CSI0_CTRL0 (RKISP1_CIF_ISP_CSI0_BASE + 0x00000000)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
index 1fa991227fa9..f073e72a0d37 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
@@ -494,10 +494,10 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
sink_fmt->width = clamp_t(u32, format->width,
RKISP1_ISP_MIN_WIDTH,
- RKISP1_ISP_MAX_WIDTH);
+ rsz->rkisp1->info->max_width);
sink_fmt->height = clamp_t(u32, format->height,
RKISP1_ISP_MIN_HEIGHT,
- RKISP1_ISP_MAX_HEIGHT);
+ rsz->rkisp1->info->max_height);
/*
* Adjust the color space fields. Accept any color primaries and
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
index 2795eef91bdd..a502719e916a 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
@@ -304,48 +304,25 @@ static void rkisp1_stats_get_hst_meas_v12(struct rkisp1_stats *stats,
static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
struct rkisp1_stat_buffer *pbuf)
{
+ static const u32 regs[] = {
+ RKISP1_CIF_ISP_BLS_A_MEASURED,
+ RKISP1_CIF_ISP_BLS_B_MEASURED,
+ RKISP1_CIF_ISP_BLS_C_MEASURED,
+ RKISP1_CIF_ISP_BLS_D_MEASURED,
+ };
struct rkisp1_device *rkisp1 = stats->rkisp1;
const struct rkisp1_mbus_info *in_fmt = rkisp1->isp.sink_fmt;
struct rkisp1_cif_isp_bls_meas_val *bls_val;
+ u32 swapped[4];
+
+ rkisp1_bls_swap_regs(in_fmt->bayer_pat, regs, swapped);
bls_val = &pbuf->params.ae.bls_val;
- if (in_fmt->bayer_pat == RKISP1_RAW_BGGR) {
- bls_val->meas_b =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
- bls_val->meas_gb =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
- bls_val->meas_gr =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
- bls_val->meas_r =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
- } else if (in_fmt->bayer_pat == RKISP1_RAW_GBRG) {
- bls_val->meas_gb =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
- bls_val->meas_b =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
- bls_val->meas_r =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
- bls_val->meas_gr =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
- } else if (in_fmt->bayer_pat == RKISP1_RAW_GRBG) {
- bls_val->meas_gr =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
- bls_val->meas_r =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
- bls_val->meas_b =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
- bls_val->meas_gb =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
- } else if (in_fmt->bayer_pat == RKISP1_RAW_RGGB) {
- bls_val->meas_r =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
- bls_val->meas_gr =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
- bls_val->meas_gb =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
- bls_val->meas_b =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
- }
+
+ bls_val->meas_r = rkisp1_read(rkisp1, swapped[0]);
+ bls_val->meas_gr = rkisp1_read(rkisp1, swapped[1]);
+ bls_val->meas_gb = rkisp1_read(rkisp1, swapped[2]);
+ bls_val->meas_b = rkisp1_read(rkisp1, swapped[3]);
}
static const struct rkisp1_stats_ops rkisp1_v10_stats_ops = {
diff --git a/drivers/media/platform/samsung/exynos-gsc/gsc-core.c b/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
index 618ae55fe396..f45f5c8612a6 100644
--- a/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
@@ -1225,7 +1225,7 @@ static void gsc_remove(struct platform_device *pdev)
static int gsc_m2m_suspend(struct gsc_dev *gsc)
{
unsigned long flags;
- int timeout;
+ long time_left;
spin_lock_irqsave(&gsc->slock, flags);
if (!gsc_m2m_pending(gsc)) {
@@ -1236,12 +1236,12 @@ static int gsc_m2m_suspend(struct gsc_dev *gsc)
set_bit(ST_M2M_SUSPENDING, &gsc->state);
spin_unlock_irqrestore(&gsc->slock, flags);
- timeout = wait_event_timeout(gsc->irq_queue,
- test_bit(ST_M2M_SUSPENDED, &gsc->state),
- GSC_SHUTDOWN_TIMEOUT);
+ time_left = wait_event_timeout(gsc->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &gsc->state),
+ GSC_SHUTDOWN_TIMEOUT);
clear_bit(ST_M2M_SUSPENDING, &gsc->state);
- return timeout == 0 ? -EAGAIN : 0;
+ return time_left == 0 ? -EAGAIN : 0;
}
static void gsc_m2m_resume(struct gsc_dev *gsc)
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.c b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
index aae74b501a42..adfc2d73d04b 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
@@ -822,7 +822,7 @@ err:
static int fimc_m2m_suspend(struct fimc_dev *fimc)
{
unsigned long flags;
- int timeout;
+ long time_left;
spin_lock_irqsave(&fimc->slock, flags);
if (!fimc_m2m_pending(fimc)) {
@@ -833,12 +833,12 @@ static int fimc_m2m_suspend(struct fimc_dev *fimc)
set_bit(ST_M2M_SUSPENDING, &fimc->state);
spin_unlock_irqrestore(&fimc->slock, flags);
- timeout = wait_event_timeout(fimc->irq_queue,
- test_bit(ST_M2M_SUSPENDED, &fimc->state),
- FIMC_SHUTDOWN_TIMEOUT);
+ time_left = wait_event_timeout(fimc->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &fimc->state),
+ FIMC_SHUTDOWN_TIMEOUT);
clear_bit(ST_M2M_SUSPENDING, &fimc->state);
- return timeout == 0 ? -EAGAIN : 0;
+ return time_left == 0 ? -EAGAIN : 0;
}
static int fimc_m2m_resume(struct fimc_dev *fimc)
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
index 1328b4eb6b9f..c7ee6e1a4451 100644
--- a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
@@ -1160,7 +1160,7 @@ static void bdisp_irq_timeout(struct work_struct *ptr)
static int bdisp_m2m_suspend(struct bdisp_dev *bdisp)
{
unsigned long flags;
- int timeout;
+ long time_left;
spin_lock_irqsave(&bdisp->slock, flags);
if (!test_bit(ST_M2M_RUNNING, &bdisp->state)) {
@@ -1171,13 +1171,13 @@ static int bdisp_m2m_suspend(struct bdisp_dev *bdisp)
set_bit(ST_M2M_SUSPENDING, &bdisp->state);
spin_unlock_irqrestore(&bdisp->slock, flags);
- timeout = wait_event_timeout(bdisp->irq_queue,
- test_bit(ST_M2M_SUSPENDED, &bdisp->state),
- BDISP_WORK_TIMEOUT);
+ time_left = wait_event_timeout(bdisp->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &bdisp->state),
+ BDISP_WORK_TIMEOUT);
clear_bit(ST_M2M_SUSPENDING, &bdisp->state);
- if (!timeout) {
+ if (!time_left) {
dev_err(bdisp->dev, "%s IRQ timeout\n", __func__);
return -EAGAIN;
}
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index 097a3a08ef7d..d07e980aba61 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -35,7 +35,18 @@ struct sun4i_csi_traits {
bool has_isp;
};
+static int sun4i_csi_video_link_validate(struct media_link *link)
+{
+ dev_warn_once(link->graph_obj.mdev->dev,
+ "Driver bug: link validation not implemented\n");
+ return 0;
+}
+
static const struct media_entity_operations sun4i_csi_video_entity_ops = {
+ .link_validate = sun4i_csi_video_link_validate,
+};
+
+static const struct media_entity_operations sun4i_csi_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -214,6 +225,7 @@ static int sun4i_csi_probe(struct platform_device *pdev)
subdev->internal_ops = &sun4i_csi_subdev_internal_ops;
subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ subdev->entity.ops = &sun4i_csi_subdev_entity_ops;
subdev->owner = THIS_MODULE;
snprintf(subdev->name, sizeof(subdev->name), "sun4i-csi-0");
v4l2_set_subdevdata(subdev, csi);
diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe.c b/drivers/media/platform/ti/am437x/am437x-vpfe.c
index 77e12457d149..009ff68a2b43 100644
--- a/drivers/media/platform/ti/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/ti/am437x/am437x-vpfe.c
@@ -2287,7 +2287,7 @@ static const struct v4l2_async_notifier_operations vpfe_async_ops = {
static struct vpfe_config *
vpfe_get_pdata(struct vpfe_device *vpfe)
{
- struct device_node *endpoint = NULL;
+ struct device_node *endpoint;
struct device *dev = vpfe->pdev;
struct vpfe_subdev_info *sdinfo;
struct vpfe_config *pdata;
@@ -2306,14 +2306,11 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
if (!pdata)
return NULL;
- for (i = 0; ; i++) {
+ i = 0;
+ for_each_endpoint_of_node(dev->of_node, endpoint) {
struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 };
struct device_node *rem;
- endpoint = of_graph_get_next_endpoint(dev->of_node, endpoint);
- if (!endpoint)
- break;
-
sdinfo = &pdata->sub_devs[i];
sdinfo->grp_id = 0;
@@ -2371,9 +2368,10 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
of_node_put(rem);
if (IS_ERR(pdata->asd[i]))
goto cleanup;
+
+ i++;
}
- of_node_put(endpoint);
return pdata;
cleanup:
diff --git a/drivers/media/platform/ti/cal/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c
index 4afc2ad00330..42dfe08b765f 100644
--- a/drivers/media/platform/ti/cal/cal-camerarx.c
+++ b/drivers/media/platform/ti/cal/cal-camerarx.c
@@ -798,7 +798,7 @@ static const struct v4l2_subdev_internal_ops cal_camerarx_internal_ops = {
.init_state = cal_camerarx_sd_init_state,
};
-static struct media_entity_operations cal_camerarx_media_ops = {
+static const struct media_entity_operations cal_camerarx_media_ops = {
.link_validate = v4l2_subdev_link_validate,
};
diff --git a/drivers/media/platform/ti/cal/cal.c b/drivers/media/platform/ti/cal/cal.c
index 528909ae4bd6..5c2c04142aee 100644
--- a/drivers/media/platform/ti/cal/cal.c
+++ b/drivers/media/platform/ti/cal/cal.c
@@ -549,7 +549,7 @@ void cal_ctx_start(struct cal_ctx *ctx)
void cal_ctx_stop(struct cal_ctx *ctx)
{
struct cal_camerarx *phy = ctx->phy;
- long timeout;
+ long time_left;
WARN_ON(phy->vc_enable_count[ctx->vc] == 0);
@@ -565,9 +565,9 @@ void cal_ctx_stop(struct cal_ctx *ctx)
ctx->dma.state = CAL_DMA_STOP_REQUESTED;
spin_unlock_irq(&ctx->dma.lock);
- timeout = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx),
- msecs_to_jiffies(500));
- if (!timeout) {
+ time_left = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx),
+ msecs_to_jiffies(500));
+ if (!time_left) {
ctx_err(ctx, "failed to disable dma cleanly\n");
cal_ctx_wr_dma_disable(ctx);
}
diff --git a/drivers/media/platform/ti/davinci/vpif_capture.c b/drivers/media/platform/ti/davinci/vpif_capture.c
index c28794b6677b..16326437767f 100644
--- a/drivers/media/platform/ti/davinci/vpif_capture.c
+++ b/drivers/media/platform/ti/davinci/vpif_capture.c
@@ -1487,7 +1487,7 @@ static struct vpif_capture_config *
vpif_capture_get_pdata(struct platform_device *pdev,
struct v4l2_device *v4l2_dev)
{
- struct device_node *endpoint = NULL;
+ struct device_node *endpoint;
struct device_node *rem = NULL;
struct vpif_capture_config *pdata;
struct vpif_subdev_info *sdinfo;
@@ -1517,16 +1517,12 @@ vpif_capture_get_pdata(struct platform_device *pdev,
if (!pdata->subdev_info)
return NULL;
- for (i = 0; i < VPIF_CAPTURE_NUM_CHANNELS; i++) {
+ i = 0;
+ for_each_endpoint_of_node(pdev->dev.of_node, endpoint) {
struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 };
unsigned int flags;
int err;
- endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
- endpoint);
- if (!endpoint)
- break;
-
rem = of_graph_get_remote_port_parent(endpoint);
if (!rem) {
dev_dbg(&pdev->dev, "Remote device at %pOF not found\n",
@@ -1577,6 +1573,10 @@ vpif_capture_get_pdata(struct platform_device *pdev,
goto err_cleanup;
of_node_put(rem);
+
+ i++;
+ if (i >= VPIF_CAPTURE_NUM_CHANNELS)
+ break;
}
done:
diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c
index 1cda23244c7b..91101ba88ef0 100644
--- a/drivers/media/platform/ti/omap3isp/isp.c
+++ b/drivers/media/platform/ti/omap3isp/isp.c
@@ -1965,7 +1965,7 @@ static int isp_attach_iommu(struct isp_device *isp)
* Create the ARM mapping, used by the ARM DMA mapping core to allocate
* VAs. This will allocate a corresponding IOMMU domain.
*/
- mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
+ mapping = arm_iommu_create_mapping(isp->dev, SZ_1G, SZ_2G);
if (IS_ERR(mapping)) {
dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
return PTR_ERR(mapping);
diff --git a/drivers/media/platform/verisilicon/Kconfig b/drivers/media/platform/verisilicon/Kconfig
index 149d0b32c324..3272a24db71d 100644
--- a/drivers/media/platform/verisilicon/Kconfig
+++ b/drivers/media/platform/verisilicon/Kconfig
@@ -21,6 +21,14 @@ config VIDEO_HANTRO
To compile this driver as a module, choose M here: the module
will be called hantro-vpu.
+config VIDEO_HANTRO_HEVC_RFC
+ bool "Use reference frame compression for HEVC"
+ depends on VIDEO_HANTRO
+ default n
+ help
+ Enable the reference frame compression feature for the HEVC codec.
+ It will use more memory but save bandwidth on memory bus.
+
config VIDEO_HANTRO_IMX8M
bool "Hantro VPU i.MX8M support"
depends on VIDEO_HANTRO
diff --git a/drivers/media/platform/verisilicon/Makefile b/drivers/media/platform/verisilicon/Makefile
index eb38a1833b02..f6f019d04ff0 100644
--- a/drivers/media/platform/verisilicon/Makefile
+++ b/drivers/media/platform/verisilicon/Makefile
@@ -14,13 +14,6 @@ hantro-vpu-y += \
hantro_g2.o \
hantro_g2_hevc_dec.o \
hantro_g2_vp9_dec.o \
- rockchip_vpu2_hw_jpeg_enc.o \
- rockchip_vpu2_hw_h264_dec.o \
- rockchip_vpu2_hw_mpeg2_dec.o \
- rockchip_vpu2_hw_vp8_dec.o \
- rockchip_vpu981_hw_av1_dec.o \
- rockchip_av1_filmgrain.o \
- rockchip_av1_entropymode.o \
hantro_jpeg.o \
hantro_h264.o \
hantro_hevc.o \
@@ -35,6 +28,13 @@ hantro-vpu-$(CONFIG_VIDEO_HANTRO_SAMA5D4) += \
sama5d4_vdec_hw.o
hantro-vpu-$(CONFIG_VIDEO_HANTRO_ROCKCHIP) += \
+ rockchip_vpu2_hw_jpeg_enc.o \
+ rockchip_vpu2_hw_h264_dec.o \
+ rockchip_vpu2_hw_mpeg2_dec.o \
+ rockchip_vpu2_hw_vp8_dec.o \
+ rockchip_vpu981_hw_av1_dec.o \
+ rockchip_av1_filmgrain.o \
+ rockchip_av1_entropymode.o \
rockchip_vpu_hw.o
hantro-vpu-$(CONFIG_VIDEO_HANTRO_SUNXI) += \
diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
index 34b123dafd89..05bbac853c4f 100644
--- a/drivers/media/platform/verisilicon/hantro_drv.c
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -722,6 +722,7 @@ static const struct of_device_id of_hantro_match[] = {
{ .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
{ .compatible = "rockchip,rk3568-vepu", .data = &rk3568_vepu_variant, },
{ .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, },
+ { .compatible = "rockchip,rk3588-vepu121", .data = &rk3568_vepu_variant, },
{ .compatible = "rockchip,rk3588-av1-vpu", .data = &rk3588_vpu981_variant, },
#endif
#ifdef CONFIG_VIDEO_HANTRO_IMX8M
@@ -992,6 +993,49 @@ static const struct media_device_ops hantro_m2m_media_ops = {
.req_queue = v4l2_m2m_request_queue,
};
+/*
+ * Some SoCs, like RK3588 have multiple identical Hantro cores, but the
+ * kernel is currently missing support for multi-core handling. Exposing
+ * separate devices for each core to userspace is bad, since that does
+ * not allow scheduling tasks properly (and creates ABI). With this workaround
+ * the driver will only probe for the first core and early exit for the other
+ * cores. Once the driver gains multi-core support, the same technique
+ * for detecting the main core can be used to cluster all cores together.
+ */
+static int hantro_disable_multicore(struct hantro_dev *vpu)
+{
+ struct device_node *node = NULL;
+ const char *compatible;
+ bool is_main_core;
+ int ret;
+
+ /* Intentionally ignores the fallback strings */
+ ret = of_property_read_string(vpu->dev->of_node, "compatible", &compatible);
+ if (ret)
+ return ret;
+
+ /* The first compatible and available node found is considered the main core */
+ do {
+ node = of_find_compatible_node(node, NULL, compatible);
+ if (of_device_is_available(node))
+ break;
+ } while (node);
+
+ if (!node)
+ return -EINVAL;
+
+ is_main_core = (vpu->dev->of_node == node);
+
+ of_node_put(node);
+
+ if (!is_main_core) {
+ dev_info(vpu->dev, "missing multi-core support, ignoring this instance\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int hantro_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -1011,6 +1055,10 @@ static int hantro_probe(struct platform_device *pdev)
match = of_match_node(of_hantro_match, pdev->dev.of_node);
vpu->variant = match->data;
+ ret = hantro_disable_multicore(vpu);
+ if (ret)
+ return ret;
+
/*
* Support for nxp,imx8mq-vpu is kept for backwards compatibility
* but it's deprecated. Please update your DTS file to use
diff --git a/drivers/media/platform/verisilicon/hantro_g2.c b/drivers/media/platform/verisilicon/hantro_g2.c
index b880a6849d58..5c1d799d8618 100644
--- a/drivers/media/platform/verisilicon/hantro_g2.c
+++ b/drivers/media/platform/verisilicon/hantro_g2.c
@@ -56,3 +56,32 @@ size_t hantro_g2_motion_vectors_offset(struct hantro_ctx *ctx)
return ALIGN((cr_offset * 3) / 2, G2_ALIGN);
}
+
+static size_t hantro_g2_mv_size(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ unsigned int pic_width_in_ctbs, pic_height_in_ctbs;
+ unsigned int max_log2_ctb_size;
+
+ max_log2_ctb_size = sps->log2_min_luma_coding_block_size_minus3 + 3 +
+ sps->log2_diff_max_min_luma_coding_block_size;
+ pic_width_in_ctbs = (sps->pic_width_in_luma_samples +
+ (1 << max_log2_ctb_size) - 1) >> max_log2_ctb_size;
+ pic_height_in_ctbs = (sps->pic_height_in_luma_samples + (1 << max_log2_ctb_size) - 1)
+ >> max_log2_ctb_size;
+
+ return pic_width_in_ctbs * pic_height_in_ctbs * (1 << (2 * (max_log2_ctb_size - 4))) * 16;
+}
+
+size_t hantro_g2_luma_compress_offset(struct hantro_ctx *ctx)
+{
+ return hantro_g2_motion_vectors_offset(ctx) +
+ hantro_g2_mv_size(ctx);
+}
+
+size_t hantro_g2_chroma_compress_offset(struct hantro_ctx *ctx)
+{
+ return hantro_g2_luma_compress_offset(ctx) +
+ hantro_hevc_luma_compressed_size(ctx->dst_fmt.width, ctx->dst_fmt.height);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
index d3f8c33eb16c..85a44143b378 100644
--- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
@@ -367,11 +367,14 @@ static int set_ref(struct hantro_ctx *ctx)
const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
const struct v4l2_hevc_dpb_entry *dpb = decode_params->dpb;
dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
+ dma_addr_t compress_luma_addr, compress_chroma_addr = 0;
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_dst;
struct hantro_decoded_buffer *dst;
size_t cr_offset = hantro_g2_chroma_offset(ctx);
size_t mv_offset = hantro_g2_motion_vectors_offset(ctx);
+ size_t compress_luma_offset = hantro_g2_luma_compress_offset(ctx);
+ size_t compress_chroma_offset = hantro_g2_chroma_compress_offset(ctx);
u32 max_ref_frames;
u16 dpb_longterm_e;
static const struct hantro_reg cur_poc[] = {
@@ -445,6 +448,8 @@ static int set_ref(struct hantro_ctx *ctx)
chroma_addr = luma_addr + cr_offset;
mv_addr = luma_addr + mv_offset;
+ compress_luma_addr = luma_addr + compress_luma_offset;
+ compress_chroma_addr = luma_addr + compress_chroma_offset;
if (dpb[i].flags & V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE)
dpb_longterm_e |= BIT(V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1 - i);
@@ -452,6 +457,8 @@ static int set_ref(struct hantro_ctx *ctx)
hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), luma_addr);
hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), chroma_addr);
hantro_write_addr(vpu, G2_REF_MV_ADDR(i), mv_addr);
+ hantro_write_addr(vpu, G2_REF_COMP_LUMA_ADDR(i), compress_luma_addr);
+ hantro_write_addr(vpu, G2_REF_COMP_CHROMA_ADDR(i), compress_chroma_addr);
}
vb2_dst = hantro_get_dst_buf(ctx);
@@ -465,19 +472,27 @@ static int set_ref(struct hantro_ctx *ctx)
chroma_addr = luma_addr + cr_offset;
mv_addr = luma_addr + mv_offset;
+ compress_luma_addr = luma_addr + compress_luma_offset;
+ compress_chroma_addr = luma_addr + compress_chroma_offset;
hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), luma_addr);
hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), chroma_addr);
- hantro_write_addr(vpu, G2_REF_MV_ADDR(i++), mv_addr);
+ hantro_write_addr(vpu, G2_REF_MV_ADDR(i), mv_addr);
+ hantro_write_addr(vpu, G2_REF_COMP_LUMA_ADDR(i), compress_luma_addr);
+ hantro_write_addr(vpu, G2_REF_COMP_CHROMA_ADDR(i++), compress_chroma_addr);
hantro_write_addr(vpu, G2_OUT_LUMA_ADDR, luma_addr);
hantro_write_addr(vpu, G2_OUT_CHROMA_ADDR, chroma_addr);
hantro_write_addr(vpu, G2_OUT_MV_ADDR, mv_addr);
+ hantro_write_addr(vpu, G2_OUT_COMP_LUMA_ADDR, compress_luma_addr);
+ hantro_write_addr(vpu, G2_OUT_COMP_CHROMA_ADDR, compress_chroma_addr);
for (; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), 0);
hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), 0);
hantro_write_addr(vpu, G2_REF_MV_ADDR(i), 0);
+ hantro_write_addr(vpu, G2_REF_COMP_LUMA_ADDR(i), 0);
+ hantro_write_addr(vpu, G2_REF_COMP_CHROMA_ADDR(i), 0);
}
hantro_reg_write(vpu, &g2_refer_lterm_e, dpb_longterm_e);
@@ -594,8 +609,7 @@ int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx)
/* Don't disable output */
hantro_reg_write(vpu, &g2_out_dis, 0);
- /* Don't compress buffers */
- hantro_reg_write(vpu, &g2_ref_compress_bypass, 1);
+ hantro_reg_write(vpu, &g2_ref_compress_bypass, !ctx->hevc_dec.use_compression);
/* Bus width and max burst */
hantro_reg_write(vpu, &g2_buswidth, BUS_WIDTH_128);
diff --git a/drivers/media/platform/verisilicon/hantro_g2_regs.h b/drivers/media/platform/verisilicon/hantro_g2_regs.h
index 82606783591a..b943b1816db7 100644
--- a/drivers/media/platform/verisilicon/hantro_g2_regs.h
+++ b/drivers/media/platform/verisilicon/hantro_g2_regs.h
@@ -318,6 +318,10 @@
#define G2_TILE_BSD_ADDR (G2_SWREG(183))
#define G2_DS_DST (G2_SWREG(186))
#define G2_DS_DST_CHR (G2_SWREG(188))
+#define G2_OUT_COMP_LUMA_ADDR (G2_SWREG(190))
+#define G2_REF_COMP_LUMA_ADDR(i) (G2_SWREG(192) + ((i) * 0x8))
+#define G2_OUT_COMP_CHROMA_ADDR (G2_SWREG(224))
+#define G2_REF_COMP_CHROMA_ADDR(i) (G2_SWREG(226) + ((i) * 0x8))
#define g2_strm_buffer_len G2_DEC_REG(258, 0, 0xffffffff)
#define g2_strm_start_offset G2_DEC_REG(259, 0, 0xffffffff)
diff --git a/drivers/media/platform/verisilicon/hantro_hevc.c b/drivers/media/platform/verisilicon/hantro_hevc.c
index 2c14330bc562..83cd12b0ddd6 100644
--- a/drivers/media/platform/verisilicon/hantro_hevc.c
+++ b/drivers/media/platform/verisilicon/hantro_hevc.c
@@ -25,6 +25,11 @@
#define MAX_TILE_COLS 20
#define MAX_TILE_ROWS 22
+static bool hevc_use_compression = IS_ENABLED(CONFIG_VIDEO_HANTRO_HEVC_RFC);
+module_param_named(hevc_use_compression, hevc_use_compression, bool, 0644);
+MODULE_PARM_DESC(hevc_use_compression,
+ "Use reference frame compression for HEVC");
+
void hantro_hevc_ref_init(struct hantro_ctx *ctx)
{
struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
@@ -275,5 +280,8 @@ int hantro_hevc_dec_init(struct hantro_ctx *ctx)
hantro_hevc_ref_init(ctx);
+ hevc_dec->use_compression =
+ hevc_use_compression & hantro_needs_postproc(ctx, ctx->vpu_dst_fmt);
+
return 0;
}
diff --git a/drivers/media/platform/verisilicon/hantro_hw.h b/drivers/media/platform/verisilicon/hantro_hw.h
index 7737320cc8cc..c9b6556f8b2b 100644
--- a/drivers/media/platform/verisilicon/hantro_hw.h
+++ b/drivers/media/platform/verisilicon/hantro_hw.h
@@ -42,6 +42,13 @@
#define MAX_POSTPROC_BUFFERS 64
+#define CBS_SIZE 16 /* compression table size in bytes */
+#define CBS_LUMA 8 /* luminance CBS is composed of 1 8x8 coded block */
+#define CBS_CHROMA_W (8 * 2) /* chrominance CBS is composed of two 8x4 coded
+ * blocks, with Cb CB first then Cr CB following
+ */
+#define CBS_CHROMA_H 4
+
struct hantro_dev;
struct hantro_ctx;
struct hantro_buf;
@@ -144,6 +151,7 @@ struct hantro_hevc_dec_ctrls {
* @ref_bufs_used: Bitfield of used reference buffers
* @ctrls: V4L2 controls attached to a run
* @num_tile_cols_allocated: number of allocated tiles
+ * @use_compression: use reference buffer compression
*/
struct hantro_hevc_dec_hw_ctx {
struct hantro_aux_buf tile_sizes;
@@ -156,6 +164,7 @@ struct hantro_hevc_dec_hw_ctx {
u32 ref_bufs_used;
struct hantro_hevc_dec_ctrls ctrls;
unsigned int num_tile_cols_allocated;
+ bool use_compression;
};
/**
@@ -510,6 +519,33 @@ hantro_hevc_mv_size(unsigned int width, unsigned int height)
return width * height / 16;
}
+static inline size_t
+hantro_hevc_luma_compressed_size(unsigned int width, unsigned int height)
+{
+ u32 pic_width_in_cbsy =
+ round_up((width + CBS_LUMA - 1) / CBS_LUMA, CBS_SIZE);
+ u32 pic_height_in_cbsy = (height + CBS_LUMA - 1) / CBS_LUMA;
+
+ return round_up(pic_width_in_cbsy * pic_height_in_cbsy, CBS_SIZE);
+}
+
+static inline size_t
+hantro_hevc_chroma_compressed_size(unsigned int width, unsigned int height)
+{
+ u32 pic_width_in_cbsc =
+ round_up((width + CBS_CHROMA_W - 1) / CBS_CHROMA_W, CBS_SIZE);
+ u32 pic_height_in_cbsc = (height / 2 + CBS_CHROMA_H - 1) / CBS_CHROMA_H;
+
+ return round_up(pic_width_in_cbsc * pic_height_in_cbsc, CBS_SIZE);
+}
+
+static inline size_t
+hantro_hevc_compressed_size(unsigned int width, unsigned int height)
+{
+ return hantro_hevc_luma_compressed_size(width, height) +
+ hantro_hevc_chroma_compressed_size(width, height);
+}
+
static inline unsigned short hantro_av1_num_sbs(unsigned short dimension)
{
return DIV_ROUND_UP(dimension, 64);
@@ -525,6 +561,8 @@ hantro_av1_mv_size(unsigned int width, unsigned int height)
size_t hantro_g2_chroma_offset(struct hantro_ctx *ctx);
size_t hantro_g2_motion_vectors_offset(struct hantro_ctx *ctx);
+size_t hantro_g2_luma_compress_offset(struct hantro_ctx *ctx);
+size_t hantro_g2_chroma_compress_offset(struct hantro_ctx *ctx);
int hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx);
int rockchip_vpu2_mpeg2_dec_run(struct hantro_ctx *ctx);
diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
index 41e93176300b..232c93eea7ee 100644
--- a/drivers/media/platform/verisilicon/hantro_postproc.c
+++ b/drivers/media/platform/verisilicon/hantro_postproc.c
@@ -213,9 +213,13 @@ static unsigned int hantro_postproc_buffer_size(struct hantro_ctx *ctx)
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME)
buf_size += hantro_vp9_mv_size(pix_mp.width,
pix_mp.height);
- else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE)
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE) {
buf_size += hantro_hevc_mv_size(pix_mp.width,
pix_mp.height);
+ if (ctx->hevc_dec.use_compression)
+ buf_size += hantro_hevc_compressed_size(pix_mp.width,
+ pix_mp.height);
+ }
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_AV1_FRAME)
buf_size += hantro_av1_mv_size(pix_mp.width,
pix_mp.height);
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index df6f2536263b..62d3962c18d9 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -303,11 +303,7 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx,
coded = capture == ctx->is_encoder;
- vpu_debug(4, "trying format %c%c%c%c\n",
- (pix_mp->pixelformat & 0x7f),
- (pix_mp->pixelformat >> 8) & 0x7f,
- (pix_mp->pixelformat >> 16) & 0x7f,
- (pix_mp->pixelformat >> 24) & 0x7f);
+ vpu_debug(4, "trying format %p4cc\n", &pix_mp->pixelformat);
fmt = hantro_find_format(ctx, pix_mp->pixelformat);
if (!fmt) {
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
index cc4483857489..65e8f2d07400 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
@@ -257,7 +257,8 @@ static int rockchip_vpu981_av1_dec_tiles_reallocate(struct hantro_ctx *ctx)
struct hantro_dev *vpu = ctx->dev;
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
- unsigned int num_tile_cols = 1 << ctrls->tile_group_entry->tile_col;
+ const struct v4l2_av1_tile_info *tile_info = &ctrls->frame->tile_info;
+ unsigned int num_tile_cols = tile_info->tile_cols;
unsigned int height = ALIGN(ctrls->frame->frame_height_minus_1 + 1, 64);
unsigned int height_in_sb = height / 64;
unsigned int stripe_num = ((height + 8) + 63) / 64;
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h b/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h
index 850ff0f84424..e4008da64f19 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h
+++ b/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h
@@ -327,7 +327,7 @@
#define av1_apf_threshold AV1_DEC_REG(55, 0, 0xffff)
#define av1_apf_single_pu_mode AV1_DEC_REG(55, 30, 0x1)
-#define av1_apf_disable AV1_DEC_REG(55, 30, 0x1)
+#define av1_apf_disable AV1_DEC_REG(55, 31, 0x1)
#define av1_dec_max_burst AV1_DEC_REG(58, 0, 0xff)
#define av1_dec_buswidth AV1_DEC_REG(58, 8, 0x7)
@@ -337,10 +337,10 @@
#define av1_dec_mc_polltime AV1_DEC_REG(58, 17, 0x3ff)
#define av1_dec_mc_pollmode AV1_DEC_REG(58, 27, 0x3)
-#define av1_filt_ref_adj_3 AV1_DEC_REG(59, 0, 0x3f)
-#define av1_filt_ref_adj_2 AV1_DEC_REG(59, 7, 0x3f)
-#define av1_filt_ref_adj_1 AV1_DEC_REG(59, 14, 0x3f)
-#define av1_filt_ref_adj_0 AV1_DEC_REG(59, 21, 0x3f)
+#define av1_filt_ref_adj_3 AV1_DEC_REG(59, 0, 0x7f)
+#define av1_filt_ref_adj_2 AV1_DEC_REG(59, 7, 0x7f)
+#define av1_filt_ref_adj_1 AV1_DEC_REG(59, 14, 0x7f)
+#define av1_filt_ref_adj_0 AV1_DEC_REG(59, 21, 0x7f)
#define av1_ref0_sign_bias AV1_DEC_REG(59, 28, 0x1)
#define av1_ref1_sign_bias AV1_DEC_REG(59, 29, 0x1)
#define av1_ref2_sign_bias AV1_DEC_REG(59, 30, 0x1)
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
index f97527670783..964122e7c355 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
@@ -82,7 +82,6 @@ static const struct hantro_fmt rockchip_vpu981_postproc_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
- .match_depth = true,
.postprocessed = true,
.frmsize = {
.min_width = ROCKCHIP_VPU981_MIN_SIZE,
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index 996684a73038..bfe48cc0ab52 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -199,18 +199,13 @@ static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
struct media_pad *sink_pad;
struct xvip_graph_entity *ent;
struct v4l2_fwnode_link link;
- struct device_node *ep = NULL;
+ struct device_node *ep;
struct xvip_dma *dma;
int ret = 0;
dev_dbg(xdev->dev, "creating links for DMA engines\n");
- while (1) {
- /* Get the next endpoint and parse its link. */
- ep = of_graph_get_next_endpoint(node, ep);
- if (ep == NULL)
- break;
-
+ for_each_endpoint_of_node(node, ep) {
dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 14e7dd3889ff..dd85b0b1bcd9 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -502,7 +502,7 @@ static void tea5764_i2c_remove(struct i2c_client *client)
/* I2C subsystem interface */
static const struct i2c_device_id tea5764_id[] = {
- { "radio-tea5764", 0 },
+ { "radio-tea5764" },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(i2c, tea5764_id);
diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
index 91345198bbf1..d9eecddffd91 100644
--- a/drivers/media/radio/saa7706h.c
+++ b/drivers/media/radio/saa7706h.c
@@ -395,8 +395,8 @@ static void saa7706h_remove(struct i2c_client *client)
}
static const struct i2c_device_id saa7706h_id[] = {
- {DRIVER_NAME, 0},
- {},
+ { DRIVER_NAME },
+ {}
};
MODULE_DEVICE_TABLE(i2c, saa7706h_id);
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index fd449e42c191..cdd2ac198f2c 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -28,7 +28,7 @@
/* I2C Device ID List */
static const struct i2c_device_id si470x_i2c_id[] = {
/* Generic Entry */
- { "si470x", 0 },
+ { "si470x" },
/* Terminating entry */
{ }
};
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index ddaf7a60b7d0..e71272c6de37 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -1639,8 +1639,8 @@ static void si4713_remove(struct i2c_client *client)
/* si4713_i2c_driver - i2c driver interface */
static const struct i2c_device_id si4713_id[] = {
- { "si4713" , 0 },
- { },
+ { "si4713" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, si4713_id);
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 215168aa1588..b00ccf651922 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -173,8 +173,8 @@ static void tef6862_remove(struct i2c_client *client)
}
static const struct i2c_device_id tef6862_id[] = {
- {DRIVER_NAME, 0},
- {},
+ { DRIVER_NAME },
+ {}
};
MODULE_DEVICE_TABLE(i2c, tef6862_id);
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 11ee21a7db8f..67722e2e47ff 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -451,9 +451,6 @@ select_timeout:
dev->rdev->max_timeout = 200000;
}
- if (dev->hw_learning_and_tx_capable)
- dev->rdev->tx_resolution = sample_period;
-
if (dev->rdev->timeout > dev->rdev->max_timeout)
dev->rdev->timeout = dev->rdev->max_timeout;
if (dev->rdev->timeout < dev->rdev->min_timeout)
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index fcfadd7ea31c..2bacecb02262 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1380,7 +1380,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
rdev->timeout = IR_DEFAULT_TIMEOUT;
rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rdev->rx_resolution = ITE_BAUDRATE_DIVISOR * sample_period / 1000;
- rdev->tx_resolution = ITE_BAUDRATE_DIVISOR * sample_period / 1000;
/* set up transmitter related values */
rdev->tx_ir = ite_tx_ir;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 717c441b4a86..b8dfd530fab7 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -820,20 +820,20 @@ struct rc_dev *rc_dev_get_from_fd(int fd, bool write)
struct lirc_fh *fh;
struct rc_dev *dev;
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-EBADF);
- if (f.file->f_op != &lirc_fops) {
+ if (fd_file(f)->f_op != &lirc_fops) {
fdput(f);
return ERR_PTR(-EINVAL);
}
- if (write && !(f.file->f_mode & FMODE_WRITE)) {
+ if (write && !(fd_file(f)->f_mode & FMODE_WRITE)) {
fdput(f);
return ERR_PTR(-EPERM);
}
- fh = f.file->private_data;
+ fh = fd_file(f)->private_data;
dev = fh->rc;
get_device(&dev->dev);
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index 5303e6da5809..9cdb45821ecc 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -567,6 +567,32 @@ static void meson_ir_shutdown(struct platform_device *pdev)
spin_unlock_irqrestore(&ir->lock, flags);
}
+static __maybe_unused int meson_ir_resume(struct device *dev)
+{
+ struct meson_ir *ir = dev_get_drvdata(dev);
+
+ if (ir->param->support_hw_decoder)
+ meson_ir_hw_decoder_init(ir->rc, &ir->rc->enabled_protocols);
+ else
+ meson_ir_sw_decoder_init(ir->rc);
+
+ return 0;
+}
+
+static __maybe_unused int meson_ir_suspend(struct device *dev)
+{
+ struct meson_ir *ir = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ir->lock, flags);
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE, 0);
+ spin_unlock_irqrestore(&ir->lock, flags);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(meson_ir_pm_ops, meson_ir_suspend, meson_ir_resume);
+
static const struct meson_ir_param meson6_ir_param = {
.support_hw_decoder = false,
.max_register = IR_DEC_REG1,
@@ -607,6 +633,7 @@ static struct platform_driver meson_ir_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_ir_match,
+ .pm = pm_ptr(&meson_ir_pm_ops),
},
};
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index b356041c5c00..8288366f891f 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -230,7 +230,6 @@ static int __init loop_init(void)
rc->min_timeout = 1;
rc->max_timeout = IR_MAX_TIMEOUT;
rc->rx_resolution = 1;
- rc->tx_resolution = 1;
rc->s_tx_mask = loop_set_tx_mask;
rc->s_tx_carrier = loop_set_tx_carrier;
rc->s_tx_duty_cycle = loop_set_tx_duty_cycle;
diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c
index 3e011fe62ae1..846e90c06291 100644
--- a/drivers/media/test-drivers/vicodec/vicodec-core.c
+++ b/drivers/media/test-drivers/vicodec/vicodec-core.c
@@ -1215,8 +1215,7 @@ static int vicodec_encoder_cmd(struct file *file, void *fh,
if (ret < 0)
return ret;
- if (!vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q) ||
- !vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
+ if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
return 0;
ret = v4l2_m2m_ioctl_encoder_cmd(file, fh, ec);
@@ -1250,8 +1249,7 @@ static int vicodec_decoder_cmd(struct file *file, void *fh,
if (ret < 0)
return ret;
- if (!vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q) ||
- !vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
+ if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
return 0;
ret = v4l2_m2m_ioctl_decoder_cmd(file, fh, dc);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_demod.c b/drivers/media/test-drivers/vidtv/vidtv_demod.c
index 7a0cd9601917..505f96fccbf3 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_demod.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_demod.c
@@ -407,7 +407,7 @@ static const struct dvb_frontend_ops vidtv_demod_ops = {
};
static const struct i2c_device_id vidtv_demod_i2c_id_table[] = {
- {"dvb_vidtv_demod", 0},
+ { "dvb_vidtv_demod" },
{}
};
MODULE_DEVICE_TABLE(i2c, vidtv_demod_i2c_id_table);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_tuner.c b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
index a748737d47f3..4ba302d569d6 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_tuner.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
@@ -385,7 +385,7 @@ static const struct dvb_tuner_ops vidtv_tuner_ops = {
};
static const struct i2c_device_id vidtv_tuner_i2c_id_table[] = {
- {"dvb_vidtv_tuner", 0},
+ { "dvb_vidtv_tuner" },
{}
};
MODULE_DEVICE_TABLE(i2c, vidtv_tuner_i2c_id_table);
diff --git a/drivers/media/test-drivers/vivid/vivid-cec.c b/drivers/media/test-drivers/vivid/vivid-cec.c
index 941ef4263214..356a988dd6a1 100644
--- a/drivers/media/test-drivers/vivid/vivid-cec.c
+++ b/drivers/media/test-drivers/vivid/vivid-cec.c
@@ -316,15 +316,16 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
struct vivid_dev *dev = cec_get_drvdata(adap);
struct cec_msg reply;
u8 dest = cec_msg_destination(msg);
- u8 disp_ctl;
- char osd[14];
if (cec_msg_is_broadcast(msg))
dest = adap->log_addrs.log_addr[0];
cec_msg_init(&reply, dest, cec_msg_initiator(msg));
switch (cec_msg_opcode(msg)) {
- case CEC_MSG_SET_OSD_STRING:
+ case CEC_MSG_SET_OSD_STRING: {
+ u8 disp_ctl;
+ char osd[14];
+
if (!cec_is_sink(adap))
return -ENOMSG;
cec_ops_set_osd_string(msg, &disp_ctl, osd);
@@ -348,6 +349,47 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
break;
}
break;
+ }
+ case CEC_MSG_VENDOR_COMMAND_WITH_ID: {
+ u32 vendor_id;
+ u8 size;
+ const u8 *vendor_cmd;
+
+ /*
+ * If we receive <Vendor Command With ID> with our vendor ID
+ * and with a payload of size 1, and the payload value is odd,
+ * then we reply with the same message, but with the payload
+ * byte incremented by 1.
+ *
+ * If the size is 1 and the payload value is even, then we
+ * ignore the message.
+ *
+ * The reason we reply to odd instead of even payload values
+ * is that it allows for testing of the corner case where the
+ * reply value is 0 (0xff + 1 % 256).
+ *
+ * For other sizes we Feature Abort.
+ *
+ * This is added for the specific purpose of testing the
+ * CEC_MSG_FL_REPLY_VENDOR_ID flag using vivid.
+ */
+ cec_ops_vendor_command_with_id(msg, &vendor_id, &size, &vendor_cmd);
+ if (vendor_id != adap->log_addrs.vendor_id)
+ break;
+ if (size == 1) {
+ // Ignore even op values
+ if (!(vendor_cmd[0] & 1))
+ break;
+ reply.len = msg->len;
+ memcpy(reply.msg + 1, msg->msg + 1, msg->len - 1);
+ reply.msg[msg->len - 1]++;
+ } else {
+ cec_msg_feature_abort(&reply, cec_msg_opcode(msg),
+ CEC_OP_ABORT_INVALID_OP);
+ }
+ cec_transmit_msg(adap, &reply, false);
+ break;
+ }
default:
return -ENOMSG;
}
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index 3893a00c18ce..549b2009f974 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -719,7 +719,7 @@ static void e4000_remove(struct i2c_client *client)
}
static const struct i2c_device_id e4000_id_table[] = {
- {"e4000", 0},
+ { "e4000" },
{}
};
MODULE_DEVICE_TABLE(i2c, e4000_id_table);
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index f6613dcf40a3..046389896dc5 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -600,7 +600,7 @@ static void fc2580_remove(struct i2c_client *client)
}
static const struct i2c_device_id fc2580_id_table[] = {
- {"fc2580", 0},
+ { "fc2580" },
{}
};
MODULE_DEVICE_TABLE(i2c, fc2580_id_table);
diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
index 2cd7f0e0c70d..cc57980ed417 100644
--- a/drivers/media/tuners/m88rs6000t.c
+++ b/drivers/media/tuners/m88rs6000t.c
@@ -709,7 +709,7 @@ static void m88rs6000t_remove(struct i2c_client *client)
}
static const struct i2c_device_id m88rs6000t_id[] = {
- {"m88rs6000t", 0},
+ { "m88rs6000t" },
{}
};
MODULE_DEVICE_TABLE(i2c, m88rs6000t_id);
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 4205ed4cf467..4b9dca2f17cc 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -514,7 +514,7 @@ static void mt2060_remove(struct i2c_client *client)
}
static const struct i2c_device_id mt2060_id_table[] = {
- {"mt2060", 0},
+ { "mt2060" },
{}
};
MODULE_DEVICE_TABLE(i2c, mt2060_id_table);
diff --git a/drivers/media/tuners/mxl301rf.c b/drivers/media/tuners/mxl301rf.c
index 9b2b237745ae..7c03d4132763 100644
--- a/drivers/media/tuners/mxl301rf.c
+++ b/drivers/media/tuners/mxl301rf.c
@@ -317,7 +317,7 @@ static void mxl301rf_remove(struct i2c_client *client)
static const struct i2c_device_id mxl301rf_id[] = {
- {"mxl301rf", 0},
+ { "mxl301rf" },
{}
};
MODULE_DEVICE_TABLE(i2c, mxl301rf_id);
diff --git a/drivers/media/tuners/qm1d1b0004.c b/drivers/media/tuners/qm1d1b0004.c
index af2d3618b9d5..c53aeb558413 100644
--- a/drivers/media/tuners/qm1d1b0004.c
+++ b/drivers/media/tuners/qm1d1b0004.c
@@ -243,7 +243,7 @@ static void qm1d1b0004_remove(struct i2c_client *client)
static const struct i2c_device_id qm1d1b0004_id[] = {
- {"qm1d1b0004", 0},
+ { "qm1d1b0004" },
{}
};
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index ce7223315b0c..c58f5b6526f1 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -434,7 +434,7 @@ static void qm1d1c0042_remove(struct i2c_client *client)
static const struct i2c_device_id qm1d1c0042_id[] = {
- {"qm1d1c0042", 0},
+ { "qm1d1c0042" },
{}
};
MODULE_DEVICE_TABLE(i2c, qm1d1c0042_id);
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index 8d742bd61df0..39f2dc9c2845 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -254,7 +254,7 @@ static void tda18212_remove(struct i2c_client *client)
}
static const struct i2c_device_id tda18212_id[] = {
- {"tda18212", 0},
+ { "tda18212" },
{}
};
MODULE_DEVICE_TABLE(i2c, tda18212_id);
diff --git a/drivers/media/tuners/tda18250.c b/drivers/media/tuners/tda18250.c
index 32ea473f3f49..68d0275f29e1 100644
--- a/drivers/media/tuners/tda18250.c
+++ b/drivers/media/tuners/tda18250.c
@@ -868,7 +868,7 @@ static void tda18250_remove(struct i2c_client *client)
}
static const struct i2c_device_id tda18250_id_table[] = {
- {"tda18250", 0},
+ { "tda18250" },
{}
};
MODULE_DEVICE_TABLE(i2c, tda18250_id_table);
diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c
index 03a3a022b0a8..562a7a5c26f5 100644
--- a/drivers/media/tuners/tua9001.c
+++ b/drivers/media/tuners/tua9001.c
@@ -245,7 +245,7 @@ static void tua9001_remove(struct i2c_client *client)
}
static const struct i2c_device_id tua9001_id_table[] = {
- {"tua9001", 0},
+ { "tua9001" },
{}
};
MODULE_DEVICE_TABLE(i2c, tua9001_id_table);
diff --git a/drivers/media/tuners/tuner-i2c.h b/drivers/media/tuners/tuner-i2c.h
index 07aeead0644a..724952e001cd 100644
--- a/drivers/media/tuners/tuner-i2c.h
+++ b/drivers/media/tuners/tuner-i2c.h
@@ -133,10 +133,8 @@ static inline int tuner_i2c_xfer_send_recv(struct tuner_i2c_props *props,
} \
if (0 == __ret) { \
state = kzalloc(sizeof(type), GFP_KERNEL); \
- if (!state) { \
- __ret = -ENOMEM; \
+ if (NULL == state) \
goto __fail; \
- } \
state->i2c_props.addr = i2caddr; \
state->i2c_props.adap = i2cadap; \
state->i2c_props.name = devname; \
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index db1fab96d529..a155b987282f 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -611,7 +611,7 @@ static void s2250_remove(struct i2c_client *client)
}
static const struct i2c_device_id s2250_id[] = {
- { "s2250", 0 },
+ { "s2250" },
{ }
};
MODULE_DEVICE_TABLE(i2c, s2250_id);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index be2ba7ca5de2..570ba00e00b3 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -557,6 +557,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
bool is_io_mc = vdev->device_caps & V4L2_CAP_IO_MC;
bool has_streaming = vdev->device_caps & V4L2_CAP_STREAMING;
+ bool is_edid = vdev->device_caps & V4L2_CAP_EDID;
bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE);
@@ -784,6 +785,20 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner);
SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek);
}
+ if (is_edid) {
+ SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid);
+ if (is_tx) {
+ SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
+ SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
+ }
+ if (is_rx) {
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid);
+ }
+ }
bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls,
BASE_VIDIOC_PRIVATE);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 5eb4d797d259..e14db67be97c 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -484,7 +484,7 @@ static void v4l_print_create_buffers(const void *arg, bool write_only)
{
const struct v4l2_create_buffers *p = arg;
- pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, max num buffers=%u",
+ pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, max num buffers=%u, ",
p->index, p->count, prt_names(p->memory, v4l2_memory_names),
p->capabilities, p->max_num_buffers);
v4l_print_format(&p->format, write_only);
@@ -1458,6 +1458,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break;
case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break;
case V4L2_META_FMT_RK_ISP1_STAT_3A: descr = "Rockchip ISP1 3A Statistics"; break;
+ case V4L2_META_FMT_RK_ISP1_EXT_PARAMS: descr = "Rockchip ISP1 Ext 3A Params"; break;
case V4L2_PIX_FMT_NV12_8L128: descr = "NV12 (8x128 Linear)"; break;
case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break;
case V4L2_PIX_FMT_NV12_10BE_8L128: descr = "10-bit NV12 (8x128 Linear, BE)"; break;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 7c5812d55315..3a4ba08810d2 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -1443,16 +1443,53 @@ int v4l2_subdev_link_validate(struct media_link *link)
bool states_locked;
int ret;
- if (!is_media_entity_v4l2_subdev(link->sink->entity) ||
- !is_media_entity_v4l2_subdev(link->source->entity)) {
- pr_warn_once("%s of link '%s':%u->'%s':%u is not a V4L2 sub-device, driver bug!\n",
- !is_media_entity_v4l2_subdev(link->sink->entity) ?
- "sink" : "source",
- link->source->entity->name, link->source->index,
- link->sink->entity->name, link->sink->index);
- return 0;
+ /*
+ * Links are validated in the context of the sink entity. Usage of this
+ * helper on a sink that is not a subdev is a clear driver bug.
+ */
+ if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity)))
+ return -EINVAL;
+
+ /*
+ * If the source is a video device, delegate link validation to it. This
+ * allows usage of this helper for subdev connected to a video output
+ * device, provided that the driver implement the video output device's
+ * .link_validate() operation.
+ */
+ if (is_media_entity_v4l2_video_device(link->source->entity)) {
+ struct media_entity *source = link->source->entity;
+
+ if (!source->ops || !source->ops->link_validate) {
+ /*
+ * Many existing drivers do not implement the required
+ * .link_validate() operation for their video devices.
+ * Print a warning to get the drivers fixed, and return
+ * 0 to avoid breaking userspace. This should
+ * eventually be turned into a WARN_ON() when all
+ * drivers will have been fixed.
+ */
+ pr_warn_once("video device '%s' does not implement .link_validate(), driver bug!\n",
+ source->name);
+ return 0;
+ }
+
+ /*
+ * Avoid infinite loops in case a video device incorrectly uses
+ * this helper function as its .link_validate() handler.
+ */
+ if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate))
+ return -EINVAL;
+
+ return source->ops->link_validate(link);
}
+ /*
+ * If the source is still not a subdev, usage of this helper is a clear
+ * driver bug.
+ */
+ if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity)))
+ return -EINVAL;
+
sink_sd = media_entity_to_v4l2_subdev(link->sink->entity);
source_sd = media_entity_to_v4l2_subdev(link->source->entity);
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index e8bb5f37f5cb..8db970da9af9 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -6,6 +6,7 @@
* Copyright (C) 2013 Jean-Jacques Hiblot <jjhiblot@traphandler.com>
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
@@ -517,7 +518,7 @@ static int atmel_ebi_dev_disable(struct atmel_ebi *ebi, struct device_node *np)
static int atmel_ebi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *child, *np = dev->of_node, *smc_np;
+ struct device_node *np = dev->of_node;
struct atmel_ebi *ebi;
int ret, reg_cells;
struct clk *clk;
@@ -541,30 +542,24 @@ static int atmel_ebi_probe(struct platform_device *pdev)
ebi->clk = clk;
- smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
+ struct device_node *smc_np __free(device_node) =
+ of_parse_phandle(dev->of_node, "atmel,smc", 0);
ebi->smc.regmap = syscon_node_to_regmap(smc_np);
- if (IS_ERR(ebi->smc.regmap)) {
- ret = PTR_ERR(ebi->smc.regmap);
- goto put_node;
- }
+ if (IS_ERR(ebi->smc.regmap))
+ return PTR_ERR(ebi->smc.regmap);
ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
- if (IS_ERR(ebi->smc.layout)) {
- ret = PTR_ERR(ebi->smc.layout);
- goto put_node;
- }
+ if (IS_ERR(ebi->smc.layout))
+ return PTR_ERR(ebi->smc.layout);
ebi->smc.clk = of_clk_get(smc_np, 0);
if (IS_ERR(ebi->smc.clk)) {
- if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
- ret = PTR_ERR(ebi->smc.clk);
- goto put_node;
- }
+ if (PTR_ERR(ebi->smc.clk) != -ENOENT)
+ return PTR_ERR(ebi->smc.clk);
ebi->smc.clk = NULL;
}
- of_node_put(smc_np);
ret = clk_prepare_enable(ebi->smc.clk);
if (ret)
return ret;
@@ -597,7 +592,7 @@ static int atmel_ebi_probe(struct platform_device *pdev)
reg_cells += val;
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
if (!of_property_present(child, "reg"))
continue;
@@ -607,18 +602,12 @@ static int atmel_ebi_probe(struct platform_device *pdev)
child);
ret = atmel_ebi_dev_disable(ebi, child);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
}
}
return of_platform_populate(np, NULL, NULL, dev);
-
-put_node:
- of_node_put(smc_np);
- return ret;
}
static __maybe_unused int atmel_ebi_resume(struct device *dev)
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 8c5ad5c025fa..99eb7d1baa5f 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -7,6 +7,7 @@
* Aneesh V <aneesh@ti.com>
* Santosh Shilimkar <santosh.shilimkar@ti.com>
*/
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
@@ -57,7 +58,6 @@ struct emif_data {
u8 temperature_level;
u8 lpmode;
struct list_head node;
- unsigned long irq_state;
void __iomem *base;
struct device *dev;
struct emif_regs *regs_cache[EMIF_MAX_NUM_FREQUENCIES];
@@ -69,7 +69,6 @@ struct emif_data {
static struct emif_data *emif1;
static DEFINE_SPINLOCK(emif_lock);
-static unsigned long irq_state;
static LIST_HEAD(device_list);
static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif,
@@ -523,18 +522,18 @@ out:
static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
{
u32 old_temp_level;
- irqreturn_t ret = IRQ_HANDLED;
+ irqreturn_t ret;
struct emif_custom_configs *custom_configs;
- spin_lock_irqsave(&emif_lock, irq_state);
+ guard(spinlock_irqsave)(&emif_lock);
old_temp_level = emif->temperature_level;
get_temperature_level(emif);
if (unlikely(emif->temperature_level == old_temp_level)) {
- goto out;
+ return IRQ_HANDLED;
} else if (!emif->curr_regs) {
dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
- goto out;
+ return IRQ_HANDLED;
}
custom_configs = emif->plat_data->custom_configs;
@@ -554,8 +553,7 @@ static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
* from thread context
*/
emif->temperature_level = SDRAM_TEMP_VERY_HIGH_SHUTDOWN;
- ret = IRQ_WAKE_THREAD;
- goto out;
+ return IRQ_WAKE_THREAD;
}
}
@@ -571,10 +569,9 @@ static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
/* Temperature is going up - handle immediately */
setup_temperature_sensitive_regs(emif, emif->curr_regs);
do_freq_update();
+ ret = IRQ_HANDLED;
}
-out:
- spin_unlock_irqrestore(&emif_lock, irq_state);
return ret;
}
@@ -617,6 +614,7 @@ static irqreturn_t emif_interrupt_handler(int irq, void *dev_id)
static irqreturn_t emif_threaded_isr(int irq, void *dev_id)
{
struct emif_data *emif = dev_id;
+ unsigned long irq_state;
if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
@@ -864,7 +862,7 @@ static void of_get_custom_configs(struct device_node *np_emif,
be32_to_cpup(poll_intvl);
}
- if (of_find_property(np_emif, "extended-temp-part", &len))
+ if (of_property_read_bool(np_emif, "extended-temp-part"))
cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART;
if (!is_custom_config_valid(cust_cfgs, emif->dev)) {
@@ -880,13 +878,9 @@ static void of_get_ddr_info(struct device_node *np_emif,
struct ddr_device_info *dev_info)
{
u32 density = 0, io_width = 0;
- int len;
- if (of_find_property(np_emif, "cs1-used", &len))
- dev_info->cs1_used = true;
-
- if (of_find_property(np_emif, "cal-resistor-per-cs", &len))
- dev_info->cal_resistors_per_cs = true;
+ dev_info->cs1_used = of_property_read_bool(np_emif, "cs1-used");
+ dev_info->cal_resistors_per_cs = of_property_read_bool(np_emif, "cal-resistor-per-cs");
if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s4"))
dev_info->type = DDR_TYPE_LPDDR2_S4;
@@ -916,7 +910,6 @@ static struct emif_data *of_get_memory_device_details(
struct ddr_device_info *dev_info = NULL;
struct emif_platform_data *pd = NULL;
struct device_node *np_ddr;
- int len;
np_ddr = of_parse_phandle(np_emif, "device-handle", 0);
if (!np_ddr)
@@ -944,7 +937,7 @@ static struct emif_data *of_get_memory_device_details(
of_property_read_u32(np_emif, "phy-type", &pd->phy_type);
- if (of_find_property(np_emif, "hw-caps-ll-interface", &len))
+ if (of_property_read_bool(np_emif, "hw-caps-ll-interface"))
pd->hw_caps |= EMIF_HW_CAPS_LL_INTERFACE;
of_get_ddr_info(np_emif, np_ddr, dev_info);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index fbe52ecc0eca..2bc034dff691 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -771,13 +771,9 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
if (IS_ERR(common->smi_ao_base))
return PTR_ERR(common->smi_ao_base);
- common->clk_async = devm_clk_get(dev, "async");
+ common->clk_async = devm_clk_get_enabled(dev, "async");
if (IS_ERR(common->clk_async))
return PTR_ERR(common->clk_async);
-
- ret = clk_prepare_enable(common->clk_async);
- if (ret)
- return ret;
} else {
common->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(common->base))
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 80d038884207..c8a0d82f9c27 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -9,6 +9,7 @@
* Copyright (C) 2009 Texas Instruments
* Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
*/
+#include <linux/cleanup.h>
#include <linux/cpu_pm.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -989,18 +990,18 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
if (size > (1 << GPMC_SECTION_SHIFT))
return -ENOMEM;
- spin_lock(&gpmc_mem_lock);
- if (gpmc_cs_reserved(cs)) {
- r = -EBUSY;
- goto out;
- }
+ guard(spinlock)(&gpmc_mem_lock);
+
+ if (gpmc_cs_reserved(cs))
+ return -EBUSY;
+
if (gpmc_cs_mem_enabled(cs))
r = adjust_resource(res, res->start & ~(size - 1), size);
if (r < 0)
r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
size, NULL, NULL);
if (r < 0)
- goto out;
+ return r;
/* Disable CS while changing base address and size mask */
gpmc_cs_disable_mem(cs);
@@ -1008,16 +1009,15 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
r = gpmc_cs_set_memconf(cs, res->start, resource_size(res));
if (r < 0) {
release_resource(res);
- goto out;
+ return r;
}
/* Enable CS */
gpmc_cs_enable_mem(cs);
*base = res->start;
gpmc_cs_set_reserved(cs, 1);
-out:
- spin_unlock(&gpmc_mem_lock);
- return r;
+
+ return 0;
}
EXPORT_SYMBOL(gpmc_cs_request);
@@ -1026,10 +1026,9 @@ void gpmc_cs_free(int cs)
struct gpmc_cs_data *gpmc;
struct resource *res;
- spin_lock(&gpmc_mem_lock);
+ guard(spinlock)(&gpmc_mem_lock);
if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
WARN(1, "Trying to free non-reserved GPMC CS%d\n", cs);
- spin_unlock(&gpmc_mem_lock);
return;
}
gpmc = &gpmc_cs[cs];
@@ -1039,7 +1038,6 @@ void gpmc_cs_free(int cs)
if (res->flags)
release_resource(res);
gpmc_cs_set_reserved(cs, 0);
- spin_unlock(&gpmc_mem_lock);
}
EXPORT_SYMBOL(gpmc_cs_free);
diff --git a/drivers/memory/pl172.c b/drivers/memory/pl172.c
index 9eb8cc7de494..be7ba599cccf 100644
--- a/drivers/memory/pl172.c
+++ b/drivers/memory/pl172.c
@@ -187,6 +187,13 @@ static int pl172_parse_cs_config(struct amba_device *adev,
return -EINVAL;
}
+static void pl172_amba_release_regions(void *data)
+{
+ struct amba_device *adev = data;
+
+ amba_release_regions(adev);
+}
+
static const char * const pl172_revisions[] = {"r1", "r2", "r2p3", "r2p4"};
static const char * const pl175_revisions[] = {"r1"};
static const char * const pl176_revisions[] = {"r0"};
@@ -216,38 +223,30 @@ static int pl172_probe(struct amba_device *adev, const struct amba_id *id)
if (!pl172)
return -ENOMEM;
- pl172->clk = devm_clk_get(dev, "mpmcclk");
- if (IS_ERR(pl172->clk)) {
- dev_err(dev, "no mpmcclk provided clock\n");
- return PTR_ERR(pl172->clk);
- }
-
- ret = clk_prepare_enable(pl172->clk);
- if (ret) {
- dev_err(dev, "unable to mpmcclk enable clock\n");
- return ret;
- }
+ pl172->clk = devm_clk_get_enabled(dev, "mpmcclk");
+ if (IS_ERR(pl172->clk))
+ return dev_err_probe(dev, PTR_ERR(pl172->clk),
+ "no mpmcclk provided clock\n");
pl172->rate = clk_get_rate(pl172->clk) / MSEC_PER_SEC;
- if (!pl172->rate) {
- dev_err(dev, "unable to get mpmcclk clock rate\n");
- ret = -EINVAL;
- goto err_clk_enable;
- }
+ if (!pl172->rate)
+ return dev_err_probe(dev, -EINVAL,
+ "unable to get mpmcclk clock rate\n");
ret = amba_request_regions(adev, NULL);
if (ret) {
dev_err(dev, "unable to request AMBA regions\n");
- goto err_clk_enable;
+ return ret;
}
+ ret = devm_add_action_or_reset(dev, pl172_amba_release_regions, adev);
+ if (ret)
+ return ret;
+
pl172->base = devm_ioremap(dev, adev->res.start,
resource_size(&adev->res));
- if (!pl172->base) {
- dev_err(dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err_no_ioremap;
- }
+ if (!pl172->base)
+ return dev_err_probe(dev, -ENOMEM, "ioremap failed\n");
amba_set_drvdata(adev, pl172);
@@ -265,20 +264,6 @@ static int pl172_probe(struct amba_device *adev, const struct amba_id *id)
}
return 0;
-
-err_no_ioremap:
- amba_release_regions(adev);
-err_clk_enable:
- clk_disable_unprepare(pl172->clk);
- return ret;
-}
-
-static void pl172_remove(struct amba_device *adev)
-{
- struct pl172_data *pl172 = amba_get_drvdata(adev);
-
- clk_disable_unprepare(pl172->clk);
- amba_release_regions(adev);
}
static const struct amba_id pl172_ids[] = {
@@ -306,7 +291,6 @@ static struct amba_driver pl172_driver = {
.name = "memory-pl172",
},
.probe = pl172_probe,
- .remove = pl172_remove,
.id_table = pl172_ids,
};
module_amba_driver(pl172_driver);
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
index 56e51737c81f..28a8cc56003c 100644
--- a/drivers/memory/pl353-smc.c
+++ b/drivers/memory/pl353-smc.c
@@ -74,73 +74,39 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id)
struct device_node *of_node = adev->dev.of_node;
const struct of_device_id *match = NULL;
struct pl353_smc_data *pl353_smc;
- struct device_node *child;
- int err;
pl353_smc = devm_kzalloc(&adev->dev, sizeof(*pl353_smc), GFP_KERNEL);
if (!pl353_smc)
return -ENOMEM;
- pl353_smc->aclk = devm_clk_get(&adev->dev, "apb_pclk");
- if (IS_ERR(pl353_smc->aclk)) {
- dev_err(&adev->dev, "aclk clock not found.\n");
- return PTR_ERR(pl353_smc->aclk);
- }
-
- pl353_smc->memclk = devm_clk_get(&adev->dev, "memclk");
- if (IS_ERR(pl353_smc->memclk)) {
- dev_err(&adev->dev, "memclk clock not found.\n");
- return PTR_ERR(pl353_smc->memclk);
- }
+ pl353_smc->aclk = devm_clk_get_enabled(&adev->dev, "apb_pclk");
+ if (IS_ERR(pl353_smc->aclk))
+ return dev_err_probe(&adev->dev, PTR_ERR(pl353_smc->aclk),
+ "aclk clock not found.\n");
- err = clk_prepare_enable(pl353_smc->aclk);
- if (err) {
- dev_err(&adev->dev, "Unable to enable AXI clock.\n");
- return err;
- }
-
- err = clk_prepare_enable(pl353_smc->memclk);
- if (err) {
- dev_err(&adev->dev, "Unable to enable memory clock.\n");
- goto disable_axi_clk;
- }
+ pl353_smc->memclk = devm_clk_get_enabled(&adev->dev, "memclk");
+ if (IS_ERR(pl353_smc->memclk))
+ return dev_err_probe(&adev->dev, PTR_ERR(pl353_smc->memclk),
+ "memclk clock not found.\n");
amba_set_drvdata(adev, pl353_smc);
/* Find compatible children. Only a single child is supported */
- for_each_available_child_of_node(of_node, child) {
+ for_each_available_child_of_node_scoped(of_node, child) {
match = of_match_node(pl353_smc_supported_children, child);
if (!match) {
dev_warn(&adev->dev, "unsupported child node\n");
continue;
}
+ of_platform_device_create(child, NULL, &adev->dev);
break;
}
if (!match) {
- err = -ENODEV;
dev_err(&adev->dev, "no matching children\n");
- goto disable_mem_clk;
+ return -ENODEV;
}
- of_platform_device_create(child, NULL, &adev->dev);
- of_node_put(child);
-
return 0;
-
-disable_mem_clk:
- clk_disable_unprepare(pl353_smc->memclk);
-disable_axi_clk:
- clk_disable_unprepare(pl353_smc->aclk);
-
- return err;
-}
-
-static void pl353_smc_remove(struct amba_device *adev)
-{
- struct pl353_smc_data *pl353_smc = amba_get_drvdata(adev);
-
- clk_disable_unprepare(pl353_smc->memclk);
- clk_disable_unprepare(pl353_smc->aclk);
}
static const struct amba_id pl353_ids[] = {
@@ -159,7 +125,6 @@ static struct amba_driver pl353_smc_driver = {
},
.id_table = pl353_ids,
.probe = pl353_smc_probe,
- .remove = pl353_smc_remove,
};
module_amba_driver(pl353_smc_driver);
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 3167826b236a..7fbd36fa1a1b 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -367,7 +367,7 @@ int rpcif_hw_init(struct device *dev, bool hyperflash)
regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_IOFV(3) |
RPCIF_CMNCR_BSZ(3),
- RPCIF_CMNCR_MOIIO(1) | RPCIF_CMNCR_IOFV(2) |
+ RPCIF_CMNCR_MOIIO(1) | RPCIF_CMNCR_IOFV(3) |
RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
else
regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index da7ecd921c72..7d80322754fa 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -4,6 +4,7 @@
* Author: Lukasz Luba <l.luba@partner.samsung.com>
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/devfreq.h>
#include <linux/devfreq-event.h>
@@ -339,19 +340,20 @@ static int exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
static int exynos5_init_freq_table(struct exynos5_dmc *dmc,
struct devfreq_dev_profile *profile)
{
+ struct device *dev = dmc->dev;
int i, ret;
int idx;
unsigned long freq;
- ret = devm_pm_opp_of_add_table(dmc->dev);
+ ret = devm_pm_opp_of_add_table(dev);
if (ret < 0) {
- dev_err(dmc->dev, "Failed to get OPP table\n");
+ dev_err(dev, "Failed to get OPP table\n");
return ret;
}
- dmc->opp_count = dev_pm_opp_get_opp_count(dmc->dev);
+ dmc->opp_count = dev_pm_opp_get_opp_count(dev);
- dmc->opp = devm_kmalloc_array(dmc->dev, dmc->opp_count,
+ dmc->opp = devm_kmalloc_array(dev, dmc->opp_count,
sizeof(struct dmc_opp_table), GFP_KERNEL);
if (!dmc->opp)
return -ENOMEM;
@@ -360,7 +362,7 @@ static int exynos5_init_freq_table(struct exynos5_dmc *dmc,
for (i = 0, freq = ULONG_MAX; i < dmc->opp_count; i++, freq--) {
struct dev_pm_opp *opp;
- opp = dev_pm_opp_find_freq_floor(dmc->dev, &freq);
+ opp = dev_pm_opp_find_freq_floor(dev, &freq);
if (IS_ERR(opp))
return PTR_ERR(opp);
@@ -1175,51 +1177,44 @@ static int create_timings_aligned(struct exynos5_dmc *dmc, u32 *reg_timing_row,
static int of_get_dram_timings(struct exynos5_dmc *dmc)
{
int ret = 0;
+ struct device *dev = dmc->dev;
int idx;
- struct device_node *np_ddr;
u32 freq_mhz, clk_period_ps;
- np_ddr = of_parse_phandle(dmc->dev->of_node, "device-handle", 0);
+ struct device_node *np_ddr __free(device_node) =
+ of_parse_phandle(dev->of_node, "device-handle", 0);
if (!np_ddr) {
- dev_warn(dmc->dev, "could not find 'device-handle' in DT\n");
+ dev_warn(dev, "could not find 'device-handle' in DT\n");
return -EINVAL;
}
- dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ dmc->timing_row = devm_kmalloc_array(dev, TIMING_COUNT,
sizeof(u32), GFP_KERNEL);
- if (!dmc->timing_row) {
- ret = -ENOMEM;
- goto put_node;
- }
+ if (!dmc->timing_row)
+ return -ENOMEM;
- dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ dmc->timing_data = devm_kmalloc_array(dev, TIMING_COUNT,
sizeof(u32), GFP_KERNEL);
- if (!dmc->timing_data) {
- ret = -ENOMEM;
- goto put_node;
- }
+ if (!dmc->timing_data)
+ return -ENOMEM;
- dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ dmc->timing_power = devm_kmalloc_array(dev, TIMING_COUNT,
sizeof(u32), GFP_KERNEL);
- if (!dmc->timing_power) {
- ret = -ENOMEM;
- goto put_node;
- }
+ if (!dmc->timing_power)
+ return -ENOMEM;
- dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev,
+ dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dev,
DDR_TYPE_LPDDR3,
&dmc->timings_arr_size);
if (!dmc->timings) {
- dev_warn(dmc->dev, "could not get timings from DT\n");
- ret = -EINVAL;
- goto put_node;
+ dev_warn(dev, "could not get timings from DT\n");
+ return -EINVAL;
}
- dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev);
+ dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dev);
if (!dmc->min_tck) {
- dev_warn(dmc->dev, "could not get tck from DT\n");
- ret = -EINVAL;
- goto put_node;
+ dev_warn(dev, "could not get tck from DT\n");
+ return -EINVAL;
}
/* Sorted array of OPPs with frequency ascending */
@@ -1239,8 +1234,6 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc)
dmc->bypass_timing_data = dmc->timing_data[idx - 1];
dmc->bypass_timing_power = dmc->timing_power[idx - 1];
-put_node:
- of_node_put(np_ddr);
return ret;
}
@@ -1254,34 +1247,34 @@ put_node:
static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
{
int ret;
+ struct device *dev = dmc->dev;
unsigned long target_volt = 0;
unsigned long target_rate = 0;
unsigned int tmp;
- dmc->fout_spll = devm_clk_get(dmc->dev, "fout_spll");
+ dmc->fout_spll = devm_clk_get(dev, "fout_spll");
if (IS_ERR(dmc->fout_spll))
return PTR_ERR(dmc->fout_spll);
- dmc->fout_bpll = devm_clk_get(dmc->dev, "fout_bpll");
+ dmc->fout_bpll = devm_clk_get(dev, "fout_bpll");
if (IS_ERR(dmc->fout_bpll))
return PTR_ERR(dmc->fout_bpll);
- dmc->mout_mclk_cdrex = devm_clk_get(dmc->dev, "mout_mclk_cdrex");
+ dmc->mout_mclk_cdrex = devm_clk_get(dev, "mout_mclk_cdrex");
if (IS_ERR(dmc->mout_mclk_cdrex))
return PTR_ERR(dmc->mout_mclk_cdrex);
- dmc->mout_bpll = devm_clk_get(dmc->dev, "mout_bpll");
+ dmc->mout_bpll = devm_clk_get(dev, "mout_bpll");
if (IS_ERR(dmc->mout_bpll))
return PTR_ERR(dmc->mout_bpll);
- dmc->mout_mx_mspll_ccore = devm_clk_get(dmc->dev,
- "mout_mx_mspll_ccore");
+ dmc->mout_mx_mspll_ccore = devm_clk_get(dev, "mout_mx_mspll_ccore");
if (IS_ERR(dmc->mout_mx_mspll_ccore))
return PTR_ERR(dmc->mout_mx_mspll_ccore);
- dmc->mout_spll = devm_clk_get(dmc->dev, "ff_dout_spll2");
+ dmc->mout_spll = devm_clk_get(dev, "ff_dout_spll2");
if (IS_ERR(dmc->mout_spll)) {
- dmc->mout_spll = devm_clk_get(dmc->dev, "mout_sclk_spll");
+ dmc->mout_spll = devm_clk_get(dev, "mout_sclk_spll");
if (IS_ERR(dmc->mout_spll))
return PTR_ERR(dmc->mout_spll);
}
@@ -1329,38 +1322,37 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
*/
static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
{
+ struct device *dev = dmc->dev;
int ret, i;
- dmc->num_counters = devfreq_event_get_edev_count(dmc->dev,
- "devfreq-events");
+ dmc->num_counters = devfreq_event_get_edev_count(dev, "devfreq-events");
if (dmc->num_counters < 0) {
- dev_err(dmc->dev, "could not get devfreq-event counters\n");
+ dev_err(dev, "could not get devfreq-event counters\n");
return dmc->num_counters;
}
- dmc->counter = devm_kcalloc(dmc->dev, dmc->num_counters,
+ dmc->counter = devm_kcalloc(dev, dmc->num_counters,
sizeof(*dmc->counter), GFP_KERNEL);
if (!dmc->counter)
return -ENOMEM;
for (i = 0; i < dmc->num_counters; i++) {
dmc->counter[i] =
- devfreq_event_get_edev_by_phandle(dmc->dev,
- "devfreq-events", i);
+ devfreq_event_get_edev_by_phandle(dev, "devfreq-events", i);
if (IS_ERR_OR_NULL(dmc->counter[i]))
return -EPROBE_DEFER;
}
ret = exynos5_counters_enable_edev(dmc);
if (ret < 0) {
- dev_err(dmc->dev, "could not enable event counter\n");
+ dev_err(dev, "could not enable event counter\n");
return ret;
}
ret = exynos5_counters_set_event(dmc);
if (ret < 0) {
exynos5_counters_disable_edev(dmc);
- dev_err(dmc->dev, "could not set event counter\n");
+ dev_err(dev, "could not set event counter\n");
return ret;
}
diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c
index 1c63eeacd071..566c225f71c0 100644
--- a/drivers/memory/stm32-fmc2-ebi.c
+++ b/drivers/memory/stm32-fmc2-ebi.c
@@ -1573,29 +1573,22 @@ static int stm32_fmc2_ebi_setup_cs(struct stm32_fmc2_ebi *ebi,
static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
{
struct device *dev = ebi->dev;
- struct device_node *child;
bool child_found = false;
u32 bank;
int ret;
- for_each_available_child_of_node(dev->of_node, child) {
+ for_each_available_child_of_node_scoped(dev->of_node, child) {
ret = of_property_read_u32(child, "reg", &bank);
- if (ret) {
- dev_err(dev, "could not retrieve reg property: %d\n",
- ret);
- of_node_put(child);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "could not retrieve reg property\n");
if (bank >= FMC2_MAX_BANKS) {
dev_err(dev, "invalid reg value: %d\n", bank);
- of_node_put(child);
return -EINVAL;
}
if (ebi->bank_assigned & BIT(bank)) {
dev_err(dev, "bank already assigned: %d\n", bank);
- of_node_put(child);
return -EINVAL;
}
@@ -1603,19 +1596,15 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
ret = ebi->data->check_rif(ebi, bank + 1);
if (ret) {
dev_err(dev, "bank access failed: %d\n", bank);
- of_node_put(child);
return ret;
}
}
if (bank < FMC2_MAX_EBI_CE) {
ret = stm32_fmc2_ebi_setup_cs(ebi, child, bank);
- if (ret) {
- dev_err(dev, "setup chip select %d failed: %d\n",
- bank, ret);
- of_node_put(child);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "setup chip select %d failed\n", bank);
}
ebi->bank_assigned |= BIT(bank);
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 224b488794e5..bd5b58f1fd42 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -450,7 +450,6 @@ static int load_one_timing(struct tegra_mc *mc,
static int load_timings(struct tegra_mc *mc, struct device_node *node)
{
- struct device_node *child;
struct tegra_mc_timing *timing;
int child_count = of_get_child_count(node);
int i = 0, err;
@@ -462,14 +461,12 @@ static int load_timings(struct tegra_mc *mc, struct device_node *node)
mc->num_timings = child_count;
- for_each_child_of_node(node, child) {
+ for_each_child_of_node_scoped(node, child) {
timing = &mc->timings[i++];
err = load_one_timing(mc, timing, child);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
@@ -477,7 +474,6 @@ static int load_timings(struct tegra_mc *mc, struct device_node *node)
static int tegra_mc_setup_timings(struct tegra_mc *mc)
{
- struct device_node *node;
u32 ram_code, node_ram_code;
int err;
@@ -485,14 +481,13 @@ static int tegra_mc_setup_timings(struct tegra_mc *mc)
mc->num_timings = 0;
- for_each_child_of_node(mc->dev->of_node, node) {
+ for_each_child_of_node_scoped(mc->dev->of_node, node) {
err = of_property_read_u32(node, "nvidia,ram-code",
&node_ram_code);
if (err || (node_ram_code != ram_code))
continue;
err = load_timings(mc, node);
- of_node_put(node);
if (err)
return err;
break;
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 47c0c19e13fd..03f1daa2d132 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -992,7 +992,6 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
struct device_node *node)
{
int child_count = of_get_child_count(node);
- struct device_node *child;
struct emc_timing *timing;
unsigned int i = 0;
int err;
@@ -1004,14 +1003,12 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
emc->num_timings = child_count;
- for_each_child_of_node(node, child) {
+ for_each_child_of_node_scoped(node, child) {
timing = &emc->timings[i++];
err = load_one_timing_from_dt(emc, timing, child);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index 57d9ae12fcfe..33d67d251719 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -35,11 +35,6 @@ struct tegra186_emc {
struct icc_provider provider;
};
-static inline struct tegra186_emc *to_tegra186_emc(struct icc_provider *provider)
-{
- return container_of(provider, struct tegra186_emc, provider);
-}
-
/*
* debugfs interface
*
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 97cf59523b0b..7193f848d17e 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -410,7 +410,6 @@ static int cmp_timings(const void *_a, const void *_b)
static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
struct device_node *node)
{
- struct device_node *child;
struct emc_timing *timing;
int child_count;
int err;
@@ -428,15 +427,13 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
timing = emc->timings;
- for_each_child_of_node(node, child) {
+ for_each_child_of_node_scoped(node, child) {
if (of_node_name_eq(child, "lpddr2"))
continue;
err = load_one_timing_from_dt(emc, timing++, child);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
emc->num_timings++;
}
diff --git a/drivers/memory/tegra/tegra210-emc-cc-r21021.c b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
index 4cb608c71ead..a30a646ec468 100644
--- a/drivers/memory/tegra/tegra210-emc-cc-r21021.c
+++ b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
@@ -75,29 +75,29 @@ enum {
* The division portion of the average operation.
*/
#define __AVERAGE_PTFV(dev) \
- ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] = \
- next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \
+ ({ next->ptfv_list[(dev)] = \
+ next->ptfv_list[(dev)] / \
next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
/*
* Convert val to fixed point and add it to the temporary average.
*/
#define __INCREMENT_PTFV(dev, val) \
- ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] += \
+ ({ next->ptfv_list[(dev)] += \
((val) * MOVAVG_PRECISION_FACTOR); })
/*
* Convert a moving average back to integral form and return the value.
*/
#define __MOVAVG_AC(timing, dev) \
- ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \
+ ((timing)->ptfv_list[(dev)] / \
MOVAVG_PRECISION_FACTOR)
/* Weighted update. */
#define __WEIGHTED_UPDATE_PTFV(dev, nval) \
do { \
int w = PTFV_MOVAVG_WEIGHT_INDEX; \
- int dqs = PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX; \
+ int dqs = (dev); \
\
next->ptfv_list[dqs] = \
((nval * MOVAVG_PRECISION_FACTOR) + \
@@ -105,315 +105,91 @@ enum {
next->ptfv_list[w])) / \
(next->ptfv_list[w] + 1); \
\
- emc_dbg(emc, EMA_UPDATES, "%s: (s=%lu) EMA: %u\n", \
+ emc_dbg(emc, EMA_UPDATES, "%s: (s=%u) EMA: %u\n", \
__stringify(dev), nval, next->ptfv_list[dqs]); \
} while (0)
/* Access a particular average. */
#define __MOVAVG(timing, dev) \
- ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX])
+ ((timing)->ptfv_list[(dev)])
-static u32 update_clock_tree_delay(struct tegra210_emc *emc, int type)
+static bool tegra210_emc_compare_update_delay(struct tegra210_emc_timing *timing,
+ u32 measured, u32 idx)
{
- bool periodic_training_update = type == PERIODIC_TRAINING_UPDATE;
- struct tegra210_emc_timing *last = emc->last;
- struct tegra210_emc_timing *next = emc->next;
- u32 last_timing_rate_mhz = last->rate / 1000;
- u32 next_timing_rate_mhz = next->rate / 1000;
- bool dvfs_update = type == DVFS_UPDATE;
- s32 tdel = 0, tmdel = 0, adel = 0;
- bool dvfs_pt1 = type == DVFS_PT1;
- unsigned long cval = 0;
- u32 temp[2][2], value;
- unsigned int i;
-
- /*
- * Dev0 MSB.
- */
- if (dvfs_pt1 || periodic_training_update) {
- value = tegra210_emc_mrr_read(emc, 2, 19);
-
- for (i = 0; i < emc->num_channels; i++) {
- temp[i][0] = (value & 0x00ff) << 8;
- temp[i][1] = (value & 0xff00) << 0;
- value >>= 16;
- }
-
- /*
- * Dev0 LSB.
- */
- value = tegra210_emc_mrr_read(emc, 2, 18);
-
- for (i = 0; i < emc->num_channels; i++) {
- temp[i][0] |= (value & 0x00ff) >> 0;
- temp[i][1] |= (value & 0xff00) >> 8;
- value >>= 16;
- }
- }
-
- if (dvfs_pt1 || periodic_training_update) {
- cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
- cval *= 1000000;
- cval /= last_timing_rate_mhz * 2 * temp[0][0];
- }
+ u32 *curr = &timing->current_dram_clktree[idx];
+ u32 rate_mhz = timing->rate / 1000;
+ u32 tmdel;
- if (dvfs_pt1)
- __INCREMENT_PTFV(C0D0U0, cval);
- else if (dvfs_update)
- __AVERAGE_PTFV(C0D0U0);
- else if (periodic_training_update)
- __WEIGHTED_UPDATE_PTFV(C0D0U0, cval);
-
- if (dvfs_update || periodic_training_update) {
- tdel = next->current_dram_clktree[C0D0U0] -
- __MOVAVG_AC(next, C0D0U0);
- tmdel = (tdel < 0) ? -1 * tdel : tdel;
- adel = tmdel;
-
- if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
- next->tree_margin)
- next->current_dram_clktree[C0D0U0] =
- __MOVAVG_AC(next, C0D0U0);
- }
+ tmdel = abs(*curr - measured);
- if (dvfs_pt1 || periodic_training_update) {
- cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
- cval *= 1000000;
- cval /= last_timing_rate_mhz * 2 * temp[0][1];
+ if (tmdel * 128 * rate_mhz / 1000000 > timing->tree_margin) {
+ *curr = measured;
+ return true;
}
- if (dvfs_pt1)
- __INCREMENT_PTFV(C0D0U1, cval);
- else if (dvfs_update)
- __AVERAGE_PTFV(C0D0U1);
- else if (periodic_training_update)
- __WEIGHTED_UPDATE_PTFV(C0D0U1, cval);
-
- if (dvfs_update || periodic_training_update) {
- tdel = next->current_dram_clktree[C0D0U1] -
- __MOVAVG_AC(next, C0D0U1);
- tmdel = (tdel < 0) ? -1 * tdel : tdel;
-
- if (tmdel > adel)
- adel = tmdel;
-
- if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
- next->tree_margin)
- next->current_dram_clktree[C0D0U1] =
- __MOVAVG_AC(next, C0D0U1);
- }
-
- if (emc->num_channels > 1) {
- if (dvfs_pt1 || periodic_training_update) {
- cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
- cval *= 1000000;
- cval /= last_timing_rate_mhz * 2 * temp[1][0];
- }
-
- if (dvfs_pt1)
- __INCREMENT_PTFV(C1D0U0, cval);
- else if (dvfs_update)
- __AVERAGE_PTFV(C1D0U0);
- else if (periodic_training_update)
- __WEIGHTED_UPDATE_PTFV(C1D0U0, cval);
-
- if (dvfs_update || periodic_training_update) {
- tdel = next->current_dram_clktree[C1D0U0] -
- __MOVAVG_AC(next, C1D0U0);
- tmdel = (tdel < 0) ? -1 * tdel : tdel;
-
- if (tmdel > adel)
- adel = tmdel;
-
- if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
- next->tree_margin)
- next->current_dram_clktree[C1D0U0] =
- __MOVAVG_AC(next, C1D0U0);
- }
-
- if (dvfs_pt1 || periodic_training_update) {
- cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
- cval *= 1000000;
- cval /= last_timing_rate_mhz * 2 * temp[1][1];
- }
-
- if (dvfs_pt1)
- __INCREMENT_PTFV(C1D0U1, cval);
- else if (dvfs_update)
- __AVERAGE_PTFV(C1D0U1);
- else if (periodic_training_update)
- __WEIGHTED_UPDATE_PTFV(C1D0U1, cval);
-
- if (dvfs_update || periodic_training_update) {
- tdel = next->current_dram_clktree[C1D0U1] -
- __MOVAVG_AC(next, C1D0U1);
- tmdel = (tdel < 0) ? -1 * tdel : tdel;
-
- if (tmdel > adel)
- adel = tmdel;
-
- if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
- next->tree_margin)
- next->current_dram_clktree[C1D0U1] =
- __MOVAVG_AC(next, C1D0U1);
- }
- }
-
- if (emc->num_devices < 2)
- goto done;
-
- /*
- * Dev1 MSB.
- */
- if (dvfs_pt1 || periodic_training_update) {
- value = tegra210_emc_mrr_read(emc, 1, 19);
+ return false;
+}
- for (i = 0; i < emc->num_channels; i++) {
- temp[i][0] = (value & 0x00ff) << 8;
- temp[i][1] = (value & 0xff00) << 0;
- value >>= 16;
- }
+static void tegra210_emc_get_clktree_delay(struct tegra210_emc *emc,
+ u32 delay[DRAM_CLKTREE_NUM])
+{
+ struct tegra210_emc_timing *curr = emc->last;
+ u32 rate_mhz = curr->rate / 1000;
+ u32 msb, lsb, dqsosc, delay_us;
+ unsigned int c, d, idx;
+ unsigned long clocks;
- /*
- * Dev1 LSB.
- */
- value = tegra210_emc_mrr_read(emc, 1, 18);
+ clocks = tegra210_emc_actual_osc_clocks(curr->run_clocks);
+ delay_us = 2 + (clocks / rate_mhz);
- for (i = 0; i < emc->num_channels; i++) {
- temp[i][0] |= (value & 0x00ff) >> 0;
- temp[i][1] |= (value & 0xff00) >> 8;
- value >>= 16;
- }
- }
+ tegra210_emc_start_periodic_compensation(emc);
+ udelay(delay_us);
- if (dvfs_pt1 || periodic_training_update) {
- cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
- cval *= 1000000;
- cval /= last_timing_rate_mhz * 2 * temp[0][0];
- }
+ for (d = 0; d < emc->num_devices; d++) {
+ /* Read DQSOSC from MRR18/19 */
+ msb = tegra210_emc_mrr_read(emc, 2 - d, 19);
+ lsb = tegra210_emc_mrr_read(emc, 2 - d, 18);
- if (dvfs_pt1)
- __INCREMENT_PTFV(C0D1U0, cval);
- else if (dvfs_update)
- __AVERAGE_PTFV(C0D1U0);
- else if (periodic_training_update)
- __WEIGHTED_UPDATE_PTFV(C0D1U0, cval);
-
- if (dvfs_update || periodic_training_update) {
- tdel = next->current_dram_clktree[C0D1U0] -
- __MOVAVG_AC(next, C0D1U0);
- tmdel = (tdel < 0) ? -1 * tdel : tdel;
-
- if (tmdel > adel)
- adel = tmdel;
-
- if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
- next->tree_margin)
- next->current_dram_clktree[C0D1U0] =
- __MOVAVG_AC(next, C0D1U0);
- }
+ for (c = 0; c < emc->num_channels; c++) {
+ /* C[c]D[d]U[0] */
+ idx = c * 4 + d * 2;
- if (dvfs_pt1 || periodic_training_update) {
- cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
- cval *= 1000000;
- cval /= last_timing_rate_mhz * 2 * temp[0][1];
- }
+ dqsosc = (msb & 0x00ff) << 8;
+ dqsosc |= (lsb & 0x00ff) >> 0;
- if (dvfs_pt1)
- __INCREMENT_PTFV(C0D1U1, cval);
- else if (dvfs_update)
- __AVERAGE_PTFV(C0D1U1);
- else if (periodic_training_update)
- __WEIGHTED_UPDATE_PTFV(C0D1U1, cval);
-
- if (dvfs_update || periodic_training_update) {
- tdel = next->current_dram_clktree[C0D1U1] -
- __MOVAVG_AC(next, C0D1U1);
- tmdel = (tdel < 0) ? -1 * tdel : tdel;
-
- if (tmdel > adel)
- adel = tmdel;
-
- if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
- next->tree_margin)
- next->current_dram_clktree[C0D1U1] =
- __MOVAVG_AC(next, C0D1U1);
- }
+ /* Check for unpopulated channels */
+ if (dqsosc)
+ delay[idx] = (clocks * 1000000) /
+ (rate_mhz * 2 * dqsosc);
- if (emc->num_channels > 1) {
- if (dvfs_pt1 || periodic_training_update) {
- cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
- cval *= 1000000;
- cval /= last_timing_rate_mhz * 2 * temp[1][0];
- }
+ /* C[c]D[d]U[1] */
+ idx++;
- if (dvfs_pt1)
- __INCREMENT_PTFV(C1D1U0, cval);
- else if (dvfs_update)
- __AVERAGE_PTFV(C1D1U0);
- else if (periodic_training_update)
- __WEIGHTED_UPDATE_PTFV(C1D1U0, cval);
-
- if (dvfs_update || periodic_training_update) {
- tdel = next->current_dram_clktree[C1D1U0] -
- __MOVAVG_AC(next, C1D1U0);
- tmdel = (tdel < 0) ? -1 * tdel : tdel;
-
- if (tmdel > adel)
- adel = tmdel;
-
- if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
- next->tree_margin)
- next->current_dram_clktree[C1D1U0] =
- __MOVAVG_AC(next, C1D1U0);
- }
+ dqsosc = (msb & 0xff00) << 0;
+ dqsosc |= (lsb & 0xff00) >> 8;
- if (dvfs_pt1 || periodic_training_update) {
- cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
- cval *= 1000000;
- cval /= last_timing_rate_mhz * 2 * temp[1][1];
- }
+ /* Check for unpopulated channels */
+ if (dqsosc)
+ delay[idx] = (clocks * 1000000) /
+ (rate_mhz * 2 * dqsosc);
- if (dvfs_pt1)
- __INCREMENT_PTFV(C1D1U1, cval);
- else if (dvfs_update)
- __AVERAGE_PTFV(C1D1U1);
- else if (periodic_training_update)
- __WEIGHTED_UPDATE_PTFV(C1D1U1, cval);
-
- if (dvfs_update || periodic_training_update) {
- tdel = next->current_dram_clktree[C1D1U1] -
- __MOVAVG_AC(next, C1D1U1);
- tmdel = (tdel < 0) ? -1 * tdel : tdel;
-
- if (tmdel > adel)
- adel = tmdel;
-
- if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
- next->tree_margin)
- next->current_dram_clktree[C1D1U1] =
- __MOVAVG_AC(next, C1D1U1);
+ msb >>= 16;
+ lsb >>= 16;
}
}
-
-done:
- return adel;
}
-static u32 periodic_compensation_handler(struct tegra210_emc *emc, u32 type,
- struct tegra210_emc_timing *last,
- struct tegra210_emc_timing *next)
+static bool periodic_compensation_handler(struct tegra210_emc *emc, u32 type,
+ struct tegra210_emc_timing *last,
+ struct tegra210_emc_timing *next)
{
#define __COPY_EMA(nt, lt, dev) \
({ __MOVAVG(nt, dev) = __MOVAVG(lt, dev) * \
(nt)->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
- u32 i, adel = 0, samples = next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX];
- u32 delay;
-
- delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
- delay *= 1000;
- delay = 2 + (delay / last->rate);
+ u32 i, samples = next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX];
+ u32 delay[DRAM_CLKTREE_NUM], idx;
+ bool over = false;
if (!next->periodic_training)
return 0;
@@ -427,57 +203,46 @@ static u32 periodic_compensation_handler(struct tegra210_emc *emc, u32 type,
* calibration then we can reuse the previous
* frequencies EMA data.
*/
- __COPY_EMA(next, last, C0D0U0);
- __COPY_EMA(next, last, C0D0U1);
- __COPY_EMA(next, last, C1D0U0);
- __COPY_EMA(next, last, C1D0U1);
- __COPY_EMA(next, last, C0D1U0);
- __COPY_EMA(next, last, C0D1U1);
- __COPY_EMA(next, last, C1D1U0);
- __COPY_EMA(next, last, C1D1U1);
+ for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++)
+ __COPY_EMA(next, last, idx);
} else {
/* Reset the EMA.*/
- __MOVAVG(next, C0D0U0) = 0;
- __MOVAVG(next, C0D0U1) = 0;
- __MOVAVG(next, C1D0U0) = 0;
- __MOVAVG(next, C1D0U1) = 0;
- __MOVAVG(next, C0D1U0) = 0;
- __MOVAVG(next, C0D1U1) = 0;
- __MOVAVG(next, C1D1U0) = 0;
- __MOVAVG(next, C1D1U1) = 0;
+ for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++)
+ __MOVAVG(next, idx) = 0;
for (i = 0; i < samples; i++) {
- tegra210_emc_start_periodic_compensation(emc);
- udelay(delay);
+ /* Generate next sample of data. */
+ tegra210_emc_get_clktree_delay(emc, delay);
- /*
- * Generate next sample of data.
- */
- adel = update_clock_tree_delay(emc, DVFS_PT1);
+ for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++)
+ __INCREMENT_PTFV(idx, delay[idx]);
}
}
- /*
- * Seems like it should be part of the
- * 'if (last_timing->periodic_training)' conditional
- * since is already done for the else clause.
- */
- adel = update_clock_tree_delay(emc, DVFS_UPDATE);
+ for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++) {
+ /* Do the division part of the moving average */
+ __AVERAGE_PTFV(idx);
+ over |= tegra210_emc_compare_update_delay(next,
+ __MOVAVG_AC(next, idx), idx);
+ }
}
if (type == PERIODIC_TRAINING_SEQUENCE) {
- tegra210_emc_start_periodic_compensation(emc);
- udelay(delay);
+ tegra210_emc_get_clktree_delay(emc, delay);
- adel = update_clock_tree_delay(emc, PERIODIC_TRAINING_UPDATE);
+ for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++) {
+ __WEIGHTED_UPDATE_PTFV(idx, delay[idx]);
+ over |= tegra210_emc_compare_update_delay(next,
+ __MOVAVG_AC(next, idx), idx);
+ }
}
- return adel;
+ return over;
}
static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
{
- u32 emc_cfg, emc_cfg_o, emc_cfg_update, del, value;
+ u32 emc_cfg, emc_cfg_o, emc_cfg_update, value;
static const u32 list[] = {
EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
@@ -492,7 +257,6 @@ static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
};
struct tegra210_emc_timing *last = emc->last;
unsigned int items = ARRAY_SIZE(list), i;
- unsigned long delay;
if (last->periodic_training) {
emc_dbg(emc, PER_TRAIN, "Periodic training starting\n");
@@ -530,30 +294,18 @@ static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
/*
* 2. osc kick off - this assumes training and dvfs have set
* correct MR23.
- */
- tegra210_emc_start_periodic_compensation(emc);
-
- /*
+ *
* 3. Let dram capture its clock tree delays.
- */
- delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
- delay *= 1000;
- delay /= last->rate + 1;
- udelay(delay);
-
- /*
+ *
* 4. Check delta wrt previous values (save value if margin
* exceeds what is set in table).
*/
- del = periodic_compensation_handler(emc,
- PERIODIC_TRAINING_SEQUENCE,
- last, last);
-
+ if (periodic_compensation_handler(emc, PERIODIC_TRAINING_SEQUENCE,
+ last, last)) {
/*
* 5. Apply compensation w.r.t. trained values (if clock tree
* has drifted more than the set margin).
*/
- if (last->tree_margin < ((del * 128 * (last->rate / 1000)) / 1000000)) {
for (i = 0; i < items; i++) {
value = tegra210_emc_compensate(last, list[i]);
emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
@@ -734,16 +486,7 @@ static void tegra210_emc_r21021_set_clock(struct tegra210_emc *emc, u32 clksrc)
EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
0);
- tegra210_emc_start_periodic_compensation(emc);
-
- delay = 1000 * tegra210_emc_actual_osc_clocks(last->run_clocks);
- udelay((delay / last->rate) + 2);
-
- value = periodic_compensation_handler(emc, DVFS_SEQUENCE, fake,
- next);
- value = (value * 128 * next->rate / 1000) / 1000000;
-
- if (next->periodic_training && value > next->tree_margin)
+ if (periodic_compensation_handler(emc, DVFS_SEQUENCE, fake, next))
compensate_trimmer_applicable = true;
}
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index d7b0a23c2d7d..921dce1b8bc6 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -979,7 +979,6 @@ static int emc_check_mc_timings(struct tegra_emc *emc)
static int emc_load_timings_from_dt(struct tegra_emc *emc,
struct device_node *node)
{
- struct device_node *child;
struct emc_timing *timing;
int child_count;
int err;
@@ -998,12 +997,10 @@ static int emc_load_timings_from_dt(struct tegra_emc *emc,
emc->num_timings = child_count;
timing = emc->timings;
- for_each_child_of_node(node, child) {
+ for_each_child_of_node_scoped(node, child) {
err = load_one_timing_from_dt(emc, timing++, child);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index e192db9e0e4b..d54dc3cfff73 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -17,7 +17,6 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/ti-aemif.h>
#define TA_SHIFT 2
#define RHOLD_SHIFT 4
@@ -330,42 +329,27 @@ static int aemif_probe(struct platform_device *pdev)
int ret = -ENODEV;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct device_node *child_np;
struct aemif_device *aemif;
- struct aemif_platform_data *pdata;
- struct of_dev_auxdata *dev_lookup;
aemif = devm_kzalloc(dev, sizeof(*aemif), GFP_KERNEL);
if (!aemif)
return -ENOMEM;
- pdata = dev_get_platdata(&pdev->dev);
- dev_lookup = pdata ? pdata->dev_lookup : NULL;
-
platform_set_drvdata(pdev, aemif);
- aemif->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(aemif->clk)) {
- dev_err(dev, "cannot get clock 'aemif'\n");
- return PTR_ERR(aemif->clk);
- }
-
- ret = clk_prepare_enable(aemif->clk);
- if (ret)
- return ret;
+ aemif->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(aemif->clk))
+ return dev_err_probe(dev, PTR_ERR(aemif->clk),
+ "cannot get clock 'aemif'\n");
aemif->clk_rate = clk_get_rate(aemif->clk) / MSEC_PER_SEC;
if (np && of_device_is_compatible(np, "ti,da850-aemif"))
aemif->cs_offset = 2;
- else if (pdata)
- aemif->cs_offset = pdata->cs_offset;
aemif->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(aemif->base)) {
- ret = PTR_ERR(aemif->base);
- goto error;
- }
+ if (IS_ERR(aemif->base))
+ return PTR_ERR(aemif->base);
if (np) {
/*
@@ -374,17 +358,10 @@ static int aemif_probe(struct platform_device *pdev)
* functions iterate over these nodes and update the cs data
* array.
*/
- for_each_available_child_of_node(np, child_np) {
+ for_each_available_child_of_node_scoped(np, child_np) {
ret = of_aemif_parse_abus_config(pdev, child_np);
- if (ret < 0) {
- of_node_put(child_np);
- goto error;
- }
- }
- } else if (pdata && pdata->num_abus_data > 0) {
- for (i = 0; i < pdata->num_abus_data; i++, aemif->num_cs++) {
- aemif->cs_data[i].cs = pdata->abus_data[i].cs;
- aemif_get_hw_params(pdev, i);
+ if (ret < 0)
+ return ret;
}
}
@@ -393,7 +370,7 @@ static int aemif_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(dev, "Error configuring chip select %d\n",
aemif->cs_data[i].cs);
- goto error;
+ return ret;
}
}
@@ -402,41 +379,18 @@ static int aemif_probe(struct platform_device *pdev)
* child will be probed after the AEMIF timing parameters are set.
*/
if (np) {
- for_each_available_child_of_node(np, child_np) {
- ret = of_platform_populate(child_np, NULL,
- dev_lookup, dev);
- if (ret < 0) {
- of_node_put(child_np);
- goto error;
- }
- }
- } else if (pdata) {
- for (i = 0; i < pdata->num_sub_devices; i++) {
- pdata->sub_devices[i].dev.parent = dev;
- ret = platform_device_register(&pdata->sub_devices[i]);
- if (ret) {
- dev_warn(dev, "Error register sub device %s\n",
- pdata->sub_devices[i].name);
- }
+ for_each_available_child_of_node_scoped(np, child_np) {
+ ret = of_platform_populate(child_np, NULL, NULL, dev);
+ if (ret < 0)
+ return ret;
}
}
return 0;
-error:
- clk_disable_unprepare(aemif->clk);
- return ret;
-}
-
-static void aemif_remove(struct platform_device *pdev)
-{
- struct aemif_device *aemif = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(aemif->clk);
}
static struct platform_driver aemif_driver = {
.probe = aemif_probe,
- .remove_new = aemif_remove,
.driver = {
.name = "ti-aemif",
.of_match_table = of_match_ptr(aemif_of_match),
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 3770cb1cff7d..1167a16d8fb4 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -1018,14 +1018,6 @@ typedef struct _CONFIG_PAGE_IOC_2_RAID_VOL
#define MPI_IOCPAGE2_FLAG_VOLUME_INACTIVE (0x08)
-/*
- * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength at runtime.
- */
-#ifndef MPI_IOC_PAGE_2_RAID_VOLUME_MAX
-#define MPI_IOC_PAGE_2_RAID_VOLUME_MAX (1)
-#endif
-
typedef struct _CONFIG_PAGE_IOC_2
{
CONFIG_PAGE_HEADER Header; /* 00h */
@@ -1034,7 +1026,7 @@ typedef struct _CONFIG_PAGE_IOC_2
U8 MaxVolumes; /* 09h */
U8 NumActivePhysDisks; /* 0Ah */
U8 MaxPhysDisks; /* 0Bh */
- CONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[MPI_IOC_PAGE_2_RAID_VOLUME_MAX];/* 0Ch */
+ CONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[] __counted_by(NumActiveVolumes); /* 0Ch */
} CONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2,
IOCPage2_t, MPI_POINTER pIOCPage2_t;
@@ -1064,21 +1056,13 @@ typedef struct _IOC_3_PHYS_DISK
} IOC_3_PHYS_DISK, MPI_POINTER PTR_IOC_3_PHYS_DISK,
Ioc3PhysDisk_t, MPI_POINTER pIoc3PhysDisk_t;
-/*
- * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength at runtime.
- */
-#ifndef MPI_IOC_PAGE_3_PHYSDISK_MAX
-#define MPI_IOC_PAGE_3_PHYSDISK_MAX (1)
-#endif
-
typedef struct _CONFIG_PAGE_IOC_3
{
CONFIG_PAGE_HEADER Header; /* 00h */
U8 NumPhysDisks; /* 04h */
U8 Reserved1; /* 05h */
U16 Reserved2; /* 06h */
- IOC_3_PHYS_DISK PhysDisk[MPI_IOC_PAGE_3_PHYSDISK_MAX]; /* 08h */
+ IOC_3_PHYS_DISK PhysDisk[] __counted_by(NumPhysDisks); /* 08h */
} CONFIG_PAGE_IOC_3, MPI_POINTER PTR_CONFIG_PAGE_IOC_3,
IOCPage3_t, MPI_POINTER pIOCPage3_t;
@@ -1093,21 +1077,13 @@ typedef struct _IOC_4_SEP
} IOC_4_SEP, MPI_POINTER PTR_IOC_4_SEP,
Ioc4Sep_t, MPI_POINTER pIoc4Sep_t;
-/*
- * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength at runtime.
- */
-#ifndef MPI_IOC_PAGE_4_SEP_MAX
-#define MPI_IOC_PAGE_4_SEP_MAX (1)
-#endif
-
typedef struct _CONFIG_PAGE_IOC_4
{
CONFIG_PAGE_HEADER Header; /* 00h */
U8 ActiveSEP; /* 04h */
U8 MaxSEP; /* 05h */
U16 Reserved1; /* 06h */
- IOC_4_SEP SEP[MPI_IOC_PAGE_4_SEP_MAX]; /* 08h */
+ IOC_4_SEP SEP[] __counted_by(ActiveSEP); /* 08h */
} CONFIG_PAGE_IOC_4, MPI_POINTER PTR_CONFIG_PAGE_IOC_4,
IOCPage4_t, MPI_POINTER pIOCPage4_t;
@@ -2295,14 +2271,6 @@ typedef struct _RAID_VOL0_SETTINGS
#define MPI_RAID_HOT_SPARE_POOL_6 (0x40)
#define MPI_RAID_HOT_SPARE_POOL_7 (0x80)
-/*
- * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength at runtime.
- */
-#ifndef MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX
-#define MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
-#endif
-
typedef struct _CONFIG_PAGE_RAID_VOL_0
{
CONFIG_PAGE_HEADER Header; /* 00h */
@@ -2321,7 +2289,7 @@ typedef struct _CONFIG_PAGE_RAID_VOL_0
U8 DataScrubRate; /* 25h */
U8 ResyncRate; /* 26h */
U8 InactiveStatus; /* 27h */
- RAID_VOL0_PHYS_DISK PhysDisk[MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX];/* 28h */
+ RAID_VOL0_PHYS_DISK PhysDisk[] __counted_by(NumPhysDisks); /* 28h */
} CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0,
RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t;
@@ -2455,14 +2423,6 @@ typedef struct _RAID_PHYS_DISK1_PATH
#define MPI_RAID_PHYSDISK1_FLAG_INVALID (0x0001)
-/*
- * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength or NumPhysDiskPaths at runtime.
- */
-#ifndef MPI_RAID_PHYS_DISK1_PATH_MAX
-#define MPI_RAID_PHYS_DISK1_PATH_MAX (1)
-#endif
-
typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1
{
CONFIG_PAGE_HEADER Header; /* 00h */
@@ -2470,7 +2430,7 @@ typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1
U8 PhysDiskNum; /* 05h */
U16 Reserved2; /* 06h */
U32 Reserved1; /* 08h */
- RAID_PHYS_DISK1_PATH Path[MPI_RAID_PHYS_DISK1_PATH_MAX];/* 0Ch */
+ RAID_PHYS_DISK1_PATH Path[] __counted_by(NumPhysDiskPaths);/* 0Ch */
} CONFIG_PAGE_RAID_PHYS_DISK_1, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_1,
RaidPhysDiskPage1_t, MPI_POINTER pRaidPhysDiskPage1_t;
@@ -2555,14 +2515,6 @@ typedef struct _MPI_SAS_IO_UNIT0_PHY_DATA
} MPI_SAS_IO_UNIT0_PHY_DATA, MPI_POINTER PTR_MPI_SAS_IO_UNIT0_PHY_DATA,
SasIOUnit0PhyData, MPI_POINTER pSasIOUnit0PhyData;
-/*
- * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength at runtime.
- */
-#ifndef MPI_SAS_IOUNIT0_PHY_MAX
-#define MPI_SAS_IOUNIT0_PHY_MAX (1)
-#endif
-
typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
{
CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
@@ -2571,7 +2523,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
U8 NumPhys; /* 0Ch */
U8 Reserved2; /* 0Dh */
U16 Reserved3; /* 0Eh */
- MPI_SAS_IO_UNIT0_PHY_DATA PhyData[MPI_SAS_IOUNIT0_PHY_MAX]; /* 10h */
+ MPI_SAS_IO_UNIT0_PHY_DATA PhyData[] __counted_by(NumPhys); /* 10h */
} CONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0,
SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 4bf669c55649..738bc4e60a18 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1856,10 +1856,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* Initialize workqueue */
INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
- snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
- "mpt_poll_%d", ioc->id);
- ioc->reset_work_q = alloc_workqueue(ioc->reset_work_q_name,
- WQ_MEM_RECLAIM, 0);
+ ioc->reset_work_q =
+ alloc_workqueue("mpt_poll_%d", WQ_MEM_RECLAIM, 0, ioc->id);
if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
@@ -1986,9 +1984,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->fw_event_list);
spin_lock_init(&ioc->fw_event_lock);
- snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
- ioc->fw_event_q = alloc_workqueue(ioc->fw_event_q_name,
- WQ_MEM_RECLAIM, 0);
+ ioc->fw_event_q = alloc_workqueue("mpt/%d", WQ_MEM_RECLAIM, 0, ioc->id);
if (!ioc->fw_event_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 8031173c3655..b406fd676da0 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -729,7 +729,6 @@ typedef struct _MPT_ADAPTER
struct list_head fw_event_list;
spinlock_t fw_event_lock;
u8 fw_events_off; /* if '1', then ignore events */
- char fw_event_q_name[MPT_KOBJ_NAME_LEN];
struct mutex sas_discovery_mutex;
u8 sas_discovery_runtime;
@@ -764,7 +763,6 @@ typedef struct _MPT_ADAPTER
u8 fc_link_speed[2];
spinlock_t fc_rescan_work_lock;
struct work_struct fc_rescan_work;
- char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN];
struct workqueue_struct *fc_rescan_work_q;
/* driver forced bus resets count */
@@ -778,7 +776,6 @@ typedef struct _MPT_ADAPTER
spinlock_t scsi_lookup_lock;
u64 dma_mask;
u32 broadcast_aen_busy;
- char reset_work_q_name[MPT_KOBJ_NAME_LEN];
struct workqueue_struct *reset_work_q;
struct delayed_work fault_reset_work;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index a3c17c4fe69c..91242f26defb 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1349,11 +1349,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* initialize workqueue */
- snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name),
- "mptfc_wq_%d", sh->host_no);
- ioc->fc_rescan_work_q =
- alloc_ordered_workqueue(ioc->fc_rescan_work_q_name,
- WQ_MEM_RECLAIM);
+ ioc->fc_rescan_work_q = alloc_ordered_workqueue(
+ "mptfc_wq_%d", WQ_MEM_RECLAIM, sh->host_no);
if (!ioc->fc_rescan_work_q) {
error = -ENOMEM;
goto out_mptfc_host;
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index 384ecf5301d2..e9941da58b18 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -391,7 +391,7 @@ static void device_irq_exit_800(struct pm80x_chip *chip)
regmap_del_irq_chip(chip->irq, chip->irq_data);
}
-static struct regmap_irq_chip pm800_irq_chip = {
+static const struct regmap_irq_chip pm800_irq_chip = {
.name = "88pm800",
.irqs = pm800_irqs,
.num_irqs = ARRAY_SIZE(pm800_irqs),
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index 205f0762a928..f5d6663172ee 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -73,7 +73,7 @@ static const struct mfd_cell codec_devs[] = {
},
};
-static struct regmap_irq pm805_irqs[] = {
+static const struct regmap_irq pm805_irqs[] = {
/* INT0 */
[PM805_IRQ_LDO_OFF] = {
.mask = PM805_INT1_HP1_SHRT,
@@ -163,7 +163,7 @@ static void device_irq_exit_805(struct pm80x_chip *chip)
regmap_del_irq_chip(chip->irq, chip->irq_data);
}
-static struct regmap_irq_chip pm805_irq_chip = {
+static const struct regmap_irq_chip pm805_irq_chip = {
.name = "88pm805",
.irqs = pm805_irqs,
.num_irqs = ARRAY_SIZE(pm805_irqs),
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 7f003f71e1af..8e68b64bd7f8 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -916,7 +916,7 @@ static void device_power_init(struct pm860x_chip *chip,
power_devs[0].platform_data = pdata->power;
power_devs[0].pdata_size = sizeof(struct pm860x_power_pdata);
power_devs[0].num_resources = ARRAY_SIZE(battery_resources);
- power_devs[0].resources = &battery_resources[0],
+ power_devs[0].resources = &battery_resources[0];
ret = mfd_add_devices(chip->dev, 0, &power_devs[0], 1,
&battery_resources[0], chip->irq_base, NULL);
if (ret < 0)
@@ -925,7 +925,7 @@ static void device_power_init(struct pm860x_chip *chip,
power_devs[1].platform_data = pdata->power;
power_devs[1].pdata_size = sizeof(struct pm860x_power_pdata);
power_devs[1].num_resources = ARRAY_SIZE(charger_resources);
- power_devs[1].resources = &charger_resources[0],
+ power_devs[1].resources = &charger_resources[0];
ret = mfd_add_devices(chip->dev, 0, &power_devs[1], 1,
&charger_resources[0], chip->irq_base, NULL);
if (ret < 0)
@@ -942,7 +942,7 @@ static void device_power_init(struct pm860x_chip *chip,
pdata->chg_desc->charger_regulators =
&chg_desc_regulator_data[0];
pdata->chg_desc->num_charger_regulators =
- ARRAY_SIZE(chg_desc_regulator_data),
+ ARRAY_SIZE(chg_desc_regulator_data);
power_devs[3].platform_data = pdata->chg_desc;
power_devs[3].pdata_size = sizeof(*pdata->chg_desc);
ret = mfd_add_devices(chip->dev, 0, &power_devs[3], 1,
@@ -958,7 +958,7 @@ static void device_onkey_init(struct pm860x_chip *chip,
int ret;
onkey_devs[0].num_resources = ARRAY_SIZE(onkey_resources);
- onkey_devs[0].resources = &onkey_resources[0],
+ onkey_devs[0].resources = &onkey_resources[0];
ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0],
ARRAY_SIZE(onkey_devs), &onkey_resources[0],
chip->irq_base, NULL);
@@ -972,7 +972,7 @@ static void device_codec_init(struct pm860x_chip *chip,
int ret;
codec_devs[0].num_resources = ARRAY_SIZE(codec_resources);
- codec_devs[0].resources = &codec_resources[0],
+ codec_devs[0].resources = &codec_resources[0];
ret = mfd_add_devices(chip->dev, 0, &codec_devs[0],
ARRAY_SIZE(codec_devs), &codec_resources[0], 0,
NULL);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index bc8be2e593b6..f9325bcce1b9 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -20,6 +20,18 @@ config MFD_CS5535
This is the core driver for CS5535/CS5536 MFD functions. This is
necessary for using the board's GPIO and MFGPT functionality.
+config MFD_ADP5585
+ tristate "Analog Devices ADP5585 keypad decoder and I/O expander driver"
+ select MFD_CORE
+ select REGMAP_I2C
+ depends on I2C
+ depends on OF || COMPILE_TEST
+ help
+ Say yes here to add support for the Analog Devices ADP5585 GPIO
+ expander, PWM and keypad controller. This includes the I2C driver and
+ the core APIs _only_, you have to select individual components like
+ the GPIO and PWM functions under the corresponding menus.
+
config MFD_ALTERA_A10SR
bool "Altera Arria10 DevKit System Resource chip"
depends on ARCH_INTEL_SOCFPGA && SPI_MASTER=y && OF
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 02b651cd7535..2a9f91e81af8 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -193,6 +193,7 @@ obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
+obj-$(CONFIG_MFD_ADP5585) += adp5585.o
obj-$(CONFIG_MFD_KEMPLD) += kempld-core.o
obj-$(CONFIG_MFD_INTEL_QUARK_I2C_GPIO) += intel_quark_i2c_gpio.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
diff --git a/drivers/mfd/adp5585.c b/drivers/mfd/adp5585.c
new file mode 100644
index 000000000000..160e0b38106a
--- /dev/null
+++ b/drivers/mfd/adp5585.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices ADP5585 I/O expander, PWM controller and keypad controller
+ *
+ * Copyright 2022 NXP
+ * Copyright 2024 Ideas on Board Oy
+ */
+
+#include <linux/array_size.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mfd/adp5585.h>
+#include <linux/mfd/core.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+static const struct mfd_cell adp5585_devs[] = {
+ { .name = "adp5585-gpio", },
+ { .name = "adp5585-pwm", },
+};
+
+static const struct regmap_range adp5585_volatile_ranges[] = {
+ regmap_reg_range(ADP5585_ID, ADP5585_GPI_STATUS_B),
+};
+
+static const struct regmap_access_table adp5585_volatile_regs = {
+ .yes_ranges = adp5585_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(adp5585_volatile_ranges),
+};
+
+/*
+ * Chip variants differ in the default configuration of pull-up and pull-down
+ * resistors, and therefore have different default register values:
+ *
+ * - The -00, -01 and -03 variants (collectively referred to as
+ * ADP5585_REGMAP_00) have pull-up on all GPIO pins by default.
+ * - The -02 variant has no default pull-up or pull-down resistors.
+ * - The -04 variant has default pull-down resistors on all GPIO pins.
+ */
+
+static const u8 adp5585_regmap_defaults_00[ADP5585_MAX_REG + 1] = {
+ /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x08 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x18 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 adp5585_regmap_defaults_02[ADP5585_MAX_REG + 1] = {
+ /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x08 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3,
+ /* 0x18 */ 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 adp5585_regmap_defaults_04[ADP5585_MAX_REG + 1] = {
+ /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x08 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55,
+ /* 0x18 */ 0x05, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+enum adp5585_regmap_type {
+ ADP5585_REGMAP_00,
+ ADP5585_REGMAP_02,
+ ADP5585_REGMAP_04,
+};
+
+static const struct regmap_config adp5585_regmap_configs[] = {
+ [ADP5585_REGMAP_00] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = ADP5585_MAX_REG,
+ .volatile_table = &adp5585_volatile_regs,
+ .cache_type = REGCACHE_MAPLE,
+ .reg_defaults_raw = adp5585_regmap_defaults_00,
+ .num_reg_defaults_raw = sizeof(adp5585_regmap_defaults_00),
+ },
+ [ADP5585_REGMAP_02] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = ADP5585_MAX_REG,
+ .volatile_table = &adp5585_volatile_regs,
+ .cache_type = REGCACHE_MAPLE,
+ .reg_defaults_raw = adp5585_regmap_defaults_02,
+ .num_reg_defaults_raw = sizeof(adp5585_regmap_defaults_02),
+ },
+ [ADP5585_REGMAP_04] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = ADP5585_MAX_REG,
+ .volatile_table = &adp5585_volatile_regs,
+ .cache_type = REGCACHE_MAPLE,
+ .reg_defaults_raw = adp5585_regmap_defaults_04,
+ .num_reg_defaults_raw = sizeof(adp5585_regmap_defaults_04),
+ },
+};
+
+static int adp5585_i2c_probe(struct i2c_client *i2c)
+{
+ const struct regmap_config *regmap_config;
+ struct adp5585_dev *adp5585;
+ unsigned int id;
+ int ret;
+
+ adp5585 = devm_kzalloc(&i2c->dev, sizeof(*adp5585), GFP_KERNEL);
+ if (!adp5585)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, adp5585);
+
+ regmap_config = i2c_get_match_data(i2c);
+ adp5585->regmap = devm_regmap_init_i2c(i2c, regmap_config);
+ if (IS_ERR(adp5585->regmap))
+ return dev_err_probe(&i2c->dev, PTR_ERR(adp5585->regmap),
+ "Failed to initialize register map\n");
+
+ ret = regmap_read(adp5585->regmap, ADP5585_ID, &id);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret,
+ "Failed to read device ID\n");
+
+ if ((id & ADP5585_MAN_ID_MASK) != ADP5585_MAN_ID_VALUE)
+ return dev_err_probe(&i2c->dev, -ENODEV,
+ "Invalid device ID 0x%02x\n", id);
+
+ ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO,
+ adp5585_devs, ARRAY_SIZE(adp5585_devs),
+ NULL, 0, NULL);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret,
+ "Failed to add child devices\n");
+
+ return 0;
+}
+
+static int adp5585_suspend(struct device *dev)
+{
+ struct adp5585_dev *adp5585 = dev_get_drvdata(dev);
+
+ regcache_cache_only(adp5585->regmap, true);
+
+ return 0;
+}
+
+static int adp5585_resume(struct device *dev)
+{
+ struct adp5585_dev *adp5585 = dev_get_drvdata(dev);
+
+ regcache_cache_only(adp5585->regmap, false);
+ regcache_mark_dirty(adp5585->regmap);
+
+ return regcache_sync(adp5585->regmap);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(adp5585_pm, adp5585_suspend, adp5585_resume);
+
+static const struct of_device_id adp5585_of_match[] = {
+ {
+ .compatible = "adi,adp5585-00",
+ .data = &adp5585_regmap_configs[ADP5585_REGMAP_00],
+ }, {
+ .compatible = "adi,adp5585-01",
+ .data = &adp5585_regmap_configs[ADP5585_REGMAP_00],
+ }, {
+ .compatible = "adi,adp5585-02",
+ .data = &adp5585_regmap_configs[ADP5585_REGMAP_02],
+ }, {
+ .compatible = "adi,adp5585-03",
+ .data = &adp5585_regmap_configs[ADP5585_REGMAP_00],
+ }, {
+ .compatible = "adi,adp5585-04",
+ .data = &adp5585_regmap_configs[ADP5585_REGMAP_04],
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, adp5585_of_match);
+
+static struct i2c_driver adp5585_i2c_driver = {
+ .driver = {
+ .name = "adp5585",
+ .of_match_table = adp5585_of_match,
+ .pm = pm_sleep_ptr(&adp5585_pm),
+ },
+ .probe = adp5585_i2c_probe,
+};
+module_i2c_driver(adp5585_i2c_driver);
+
+MODULE_DESCRIPTION("ADP5585 core driver");
+MODULE_AUTHOR("Haibo Chen <haibo.chen@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/atc260x-core.c b/drivers/mfd/atc260x-core.c
index 67473b58b03d..6b6d5f1b9d76 100644
--- a/drivers/mfd/atc260x-core.c
+++ b/drivers/mfd/atc260x-core.c
@@ -235,8 +235,8 @@ int atc260x_match_device(struct atc260x *atc260x, struct regmap_config *regmap_c
mutex_init(atc260x->regmap_mutex);
- regmap_cfg->lock = regmap_lock_mutex,
- regmap_cfg->unlock = regmap_unlock_mutex,
+ regmap_cfg->lock = regmap_lock_mutex;
+ regmap_cfg->unlock = regmap_unlock_mutex;
regmap_cfg->lock_arg = atc260x->regmap_mutex;
return 0;
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index dacd3c96c9f5..4051551757f2 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -209,13 +209,23 @@ static const struct regmap_access_table axp313a_volatile_table = {
};
static const struct regmap_range axp717_writeable_ranges[] = {
+ regmap_reg_range(AXP717_PMU_FAULT, AXP717_MODULE_EN_CONTROL_1),
+ regmap_reg_range(AXP717_MIN_SYS_V_CONTROL, AXP717_BOOST_CONTROL),
+ regmap_reg_range(AXP717_VSYS_V_POWEROFF, AXP717_VSYS_V_POWEROFF),
regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN),
regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE),
+ regmap_reg_range(AXP717_ICC_CHG_SET, AXP717_CV_CHG_SET),
regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL),
+ regmap_reg_range(AXP717_ADC_CH_EN_CONTROL, AXP717_ADC_CH_EN_CONTROL),
+ regmap_reg_range(AXP717_ADC_DATA_SEL, AXP717_ADC_DATA_SEL),
};
static const struct regmap_range axp717_volatile_ranges[] = {
+ regmap_reg_range(AXP717_ON_INDICATE, AXP717_PMU_FAULT),
regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE),
+ regmap_reg_range(AXP717_BATT_PERCENT_DATA, AXP717_BATT_PERCENT_DATA),
+ regmap_reg_range(AXP717_BATT_V_H, AXP717_BATT_CHRG_I_L),
+ regmap_reg_range(AXP717_ADC_DATA_H, AXP717_ADC_DATA_L),
};
static const struct regmap_access_table axp717_writeable_table = {
@@ -308,6 +318,12 @@ static const struct resource axp22x_usb_power_supply_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"),
};
+static const struct resource axp717_usb_power_supply_resources[] = {
+ DEFINE_RES_IRQ_NAMED(AXP717_IRQ_VBUS_OVER_V, "VBUS_OVER_V"),
+ DEFINE_RES_IRQ_NAMED(AXP717_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"),
+ DEFINE_RES_IRQ_NAMED(AXP717_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"),
+};
+
/* AXP803 and AXP813/AXP818 share the same interrupts */
static const struct resource axp803_usb_power_supply_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP803_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"),
@@ -422,7 +438,7 @@ static const struct regmap_config axp717_regmap_config = {
.val_bits = 8,
.wr_table = &axp717_writeable_table,
.volatile_table = &axp717_volatile_table,
- .max_register = AXP717_CPUSLDO_CONTROL,
+ .max_register = AXP717_ADC_DATA_L,
.cache_type = REGCACHE_MAPLE,
};
@@ -1024,6 +1040,13 @@ static struct mfd_cell axp313a_cells[] = {
static struct mfd_cell axp717_cells[] = {
MFD_CELL_NAME("axp20x-regulator"),
MFD_CELL_RES("axp20x-pek", axp717_pek_resources),
+ MFD_CELL_OF("axp717-adc",
+ NULL, NULL, 0, 0, "x-powers,axp717-adc"),
+ MFD_CELL_OF("axp20x-usb-power-supply",
+ axp717_usb_power_supply_resources, NULL, 0, 0,
+ "x-powers,axp717-usb-power-supply"),
+ MFD_CELL_OF("axp20x-battery-power-supply",
+ NULL, NULL, 0, 0, "x-powers,axp717-battery-power-supply"),
};
static const struct resource axp288_adc_resources[] = {
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index e7c2ac74d998..db8c2963fb48 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -93,7 +93,7 @@ static const struct regmap_irq bd9571mwv_irqs[] = {
BD9571MWV_INT_INTREQ_BKUP_TRG_INT),
};
-static struct regmap_irq_chip bd9571mwv_irq_chip = {
+static const struct regmap_irq_chip bd9571mwv_irq_chip = {
.name = "bd9571mwv",
.status_base = BD9571MWV_INT_INTREQ,
.mask_base = BD9571MWV_INT_INTMASK,
@@ -159,7 +159,7 @@ static const struct regmap_config bd9574mwf_regmap_config = {
.max_register = 0xff,
};
-static struct regmap_irq_chip bd9574mwf_irq_chip = {
+static const struct regmap_irq_chip bd9574mwf_irq_chip = {
.name = "bd9574mwf",
.status_base = BD9571MWV_INT_INTREQ,
.mask_base = BD9571MWV_INT_INTMASK,
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index e2aae8918679..f3dc812b359f 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * cros_ec_dev - expose the Chrome OS Embedded Controller to user-space
+ * ChromeOS Embedded Controller
*
* Copyright (C) 2014 Google, Inc.
*/
@@ -353,22 +353,17 @@ static int __init cros_ec_dev_init(void)
{
int ret;
- ret = class_register(&cros_class);
+ ret = class_register(&cros_class);
if (ret) {
pr_err(CROS_EC_DEV_NAME ": failed to register device class\n");
return ret;
}
- /* Register the driver */
ret = platform_driver_register(&cros_ec_dev_driver);
- if (ret < 0) {
+ if (ret) {
pr_warn(CROS_EC_DEV_NAME ": can't register driver: %d\n", ret);
- goto failed_devreg;
+ class_unregister(&cros_class);
}
- return 0;
-
-failed_devreg:
- class_unregister(&cros_class);
return ret;
}
@@ -382,6 +377,6 @@ module_init(cros_ec_dev_init);
module_exit(cros_ec_dev_exit);
MODULE_AUTHOR("Bill Richardson <wfrichar@chromium.org>");
-MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller");
+MODULE_DESCRIPTION("ChromeOS Embedded Controller");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index dbbc4779170a..637c5f47a4b0 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -25,7 +25,7 @@
#define DA9062_IRQ_LOW 0
#define DA9062_IRQ_HIGH 1
-static struct regmap_irq da9061_irqs[] = {
+static const struct regmap_irq da9061_irqs[] = {
/* EVENT A */
[DA9061_IRQ_ONKEY] = {
.reg_offset = DA9062_REG_EVENT_A_OFFSET,
@@ -79,7 +79,7 @@ static struct regmap_irq da9061_irqs[] = {
},
};
-static struct regmap_irq_chip da9061_irq_chip = {
+static const struct regmap_irq_chip da9061_irq_chip = {
.name = "da9061-irq",
.irqs = da9061_irqs,
.num_irqs = DA9061_NUM_IRQ,
@@ -89,7 +89,7 @@ static struct regmap_irq_chip da9061_irq_chip = {
.ack_base = DA9062AA_EVENT_A,
};
-static struct regmap_irq da9062_irqs[] = {
+static const struct regmap_irq da9062_irqs[] = {
/* EVENT A */
[DA9062_IRQ_ONKEY] = {
.reg_offset = DA9062_REG_EVENT_A_OFFSET,
@@ -151,7 +151,7 @@ static struct regmap_irq da9062_irqs[] = {
},
};
-static struct regmap_irq_chip da9062_irq_chip = {
+static const struct regmap_irq_chip da9062_irq_chip = {
.name = "da9062-irq",
.irqs = da9062_irqs,
.num_irqs = DA9062_NUM_IRQ,
@@ -470,7 +470,7 @@ static const struct regmap_range_cfg da9061_range_cfg[] = {
}
};
-static struct regmap_config da9061_regmap_config = {
+static const struct regmap_config da9061_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.ranges = da9061_range_cfg,
@@ -576,7 +576,7 @@ static const struct regmap_range_cfg da9062_range_cfg[] = {
}
};
-static struct regmap_config da9062_regmap_config = {
+static const struct regmap_config da9062_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.ranges = da9062_range_cfg,
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index 74f38bf3778f..2e4ab2404154 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -16,7 +16,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-static struct regmap_config mx25_tsadc_regmap_config = {
+static const struct regmap_config mx25_tsadc_regmap_config = {
.fast_io = true,
.max_register = 8,
.reg_bits = 32,
diff --git a/drivers/mfd/gateworks-gsc.c b/drivers/mfd/gateworks-gsc.c
index b02bfdc871e9..6ca867b8f5f1 100644
--- a/drivers/mfd/gateworks-gsc.c
+++ b/drivers/mfd/gateworks-gsc.c
@@ -160,7 +160,7 @@ static const struct of_device_id gsc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, gsc_of_match);
-static struct regmap_bus gsc_regmap_bus = {
+static const struct regmap_bus gsc_regmap_bus = {
.reg_read = gsc_read,
.reg_write = gsc_write,
};
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index 042109304db4..5f61909c85e9 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -41,7 +41,7 @@ static const struct regmap_irq_chip hi655x_irq_chip = {
.mask_base = HI655X_IRQ_MASK_BASE,
};
-static struct regmap_config hi655x_regmap_config = {
+static const struct regmap_config hi655x_regmap_config = {
.reg_bits = 32,
.reg_stride = HI655X_STRIDE,
.val_bits = 8,
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 1362b3f64ade..1d8cdc4d5819 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -424,6 +424,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x5ac4), (kernel_ulong_t)&bxt_spi_info },
{ PCI_VDEVICE(INTEL, 0x5ac6), (kernel_ulong_t)&bxt_spi_info },
{ PCI_VDEVICE(INTEL, 0x5aee), (kernel_ulong_t)&bxt_uart_info },
+ /* ARL-H */
+ { PCI_VDEVICE(INTEL, 0x7725), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7726), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7727), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7730), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7746), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7750), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7751), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7752), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7778), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7779), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x777a), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x777b), (kernel_ulong_t)&bxt_i2c_info },
/* RPL-S */
{ PCI_VDEVICE(INTEL, 0x7a28), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x7a29), (kernel_ulong_t)&bxt_uart_info },
@@ -594,6 +607,32 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa879), (kernel_ulong_t)&ehl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa87a), (kernel_ulong_t)&ehl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa87b), (kernel_ulong_t)&ehl_i2c_info },
+ /* PTL-H */
+ { PCI_VDEVICE(INTEL, 0xe325), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xe326), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xe327), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xe330), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xe346), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xe350), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe351), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe352), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xe378), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe379), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe37a), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe37b), (kernel_ulong_t)&ehl_i2c_info },
+ /* PTL-P */
+ { PCI_VDEVICE(INTEL, 0xe425), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xe426), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xe427), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xe430), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xe446), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xe450), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe451), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe452), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xe478), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe479), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe47a), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe47b), (kernel_ulong_t)&ehl_i2c_info },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
diff --git a/drivers/mfd/intel-m10-bmc-pmci.c b/drivers/mfd/intel-m10-bmc-pmci.c
index 698c5933938b..4fa9d380c62b 100644
--- a/drivers/mfd/intel-m10-bmc-pmci.c
+++ b/drivers/mfd/intel-m10-bmc-pmci.c
@@ -336,7 +336,7 @@ static const struct regmap_access_table m10bmc_pmci_access_table = {
.n_yes_ranges = ARRAY_SIZE(m10bmc_pmci_regmap_range),
};
-static struct regmap_config m10bmc_pmci_regmap_config = {
+static const struct regmap_config m10bmc_pmci_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
diff --git a/drivers/mfd/intel-m10-bmc-spi.c b/drivers/mfd/intel-m10-bmc-spi.c
index d64d28199df6..36f631ef7a67 100644
--- a/drivers/mfd/intel-m10-bmc-spi.c
+++ b/drivers/mfd/intel-m10-bmc-spi.c
@@ -24,7 +24,7 @@ static const struct regmap_access_table m10bmc_access_table = {
.n_yes_ranges = ARRAY_SIZE(m10bmc_regmap_range),
};
-static struct regmap_config intel_m10bmc_regmap_config = {
+static const struct regmap_config intel_m10bmc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/mfd/intel_pmc_bxt.c b/drivers/mfd/intel_pmc_bxt.c
index 9f01d38acc7f..e405d7513ca1 100644
--- a/drivers/mfd/intel_pmc_bxt.c
+++ b/drivers/mfd/intel_pmc_bxt.c
@@ -23,8 +23,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/platform_data/itco_wdt.h>
-
-#include <asm/intel_scu_ipc.h>
+#include <linux/platform_data/x86/intel_scu_ipc.h>
/* Residency with clock rate at 19.2MHz to usecs */
#define S0IX_RESIDENCY_IN_USECS(d, s) \
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index ba32cacfc499..ccd76800d8e4 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -15,8 +15,7 @@
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/mfd/intel_soc_pmic_bxtwc.h>
#include <linux/module.h>
-
-#include <asm/intel_scu_ipc.h>
+#include <linux/platform_data/x86/intel_scu_ipc.h>
/* PMIC device registers */
#define REG_ADDR_MASK GENMASK(15, 8)
@@ -138,7 +137,7 @@ static const struct regmap_irq bxtwc_regmap_irqs_crit[] = {
REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 0, GENMASK(1, 0)),
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip = {
.name = "bxtwc_irq_chip",
.status_base = BXTWC_IRQLVL1,
.mask_base = BXTWC_MIRQLVL1,
@@ -147,7 +146,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
.num_regs = 1,
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = {
.name = "bxtwc_irq_chip_pwrbtn",
.status_base = BXTWC_PWRBTNIRQ,
.mask_base = BXTWC_MPWRBTNIRQ,
@@ -156,7 +155,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = {
.num_regs = 1,
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
.name = "bxtwc_irq_chip_tmu",
.status_base = BXTWC_TMUIRQ,
.mask_base = BXTWC_MTMUIRQ,
@@ -165,7 +164,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
.num_regs = 1,
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = {
.name = "bxtwc_irq_chip_bcu",
.status_base = BXTWC_BCUIRQ,
.mask_base = BXTWC_MBCUIRQ,
@@ -174,7 +173,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = {
.num_regs = 1,
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = {
.name = "bxtwc_irq_chip_adc",
.status_base = BXTWC_ADCIRQ,
.mask_base = BXTWC_MADCIRQ,
@@ -183,7 +182,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = {
.num_regs = 1,
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = {
.name = "bxtwc_irq_chip_chgr",
.status_base = BXTWC_CHGR0IRQ,
.mask_base = BXTWC_MCHGR0IRQ,
@@ -192,7 +191,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = {
.num_regs = 2,
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip_crit = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip_crit = {
.name = "bxtwc_irq_chip_crit",
.status_base = BXTWC_CRITIRQ,
.mask_base = BXTWC_MCRITIRQ,
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index 7fce3ef7ab45..2a83f540d4c9 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -178,7 +178,6 @@ static const struct dmi_system_id cht_wc_model_dmi_ids[] = {
.driver_data = (void *)(long)INTEL_CHT_WC_LENOVO_YT3_X90,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
},
diff --git a/drivers/mfd/intel_soc_pmic_mrfld.c b/drivers/mfd/intel_soc_pmic_mrfld.c
index 71da861e8c27..77121775c1a3 100644
--- a/drivers/mfd/intel_soc_pmic_mrfld.c
+++ b/drivers/mfd/intel_soc_pmic_mrfld.c
@@ -12,11 +12,10 @@
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/mfd/intel_soc_pmic_mrfld.h>
#include <linux/module.h>
+#include <linux/platform_data/x86/intel_scu_ipc.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <asm/intel_scu_ipc.h>
-
/*
* Level 2 IRQs
*
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 67bf4de4c0c1..6fce79ec2dc6 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -143,6 +143,7 @@ static const struct of_device_id max14577_dt_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, max14577_dt_match);
static bool max14577_muic_volatile_reg(struct device *dev, unsigned int reg)
{
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index 74ef3f6d576c..89b30ef91f4f 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -400,7 +400,7 @@ static int max77620_config_fps(struct max77620_chip *chip,
static int max77620_initialise_fps(struct max77620_chip *chip)
{
struct device *dev = chip->dev;
- struct device_node *fps_np, *fps_child;
+ struct device_node *fps_np;
u8 config;
int fps_id;
int ret;
@@ -414,10 +414,9 @@ static int max77620_initialise_fps(struct max77620_chip *chip)
if (!fps_np)
goto skip_fps;
- for_each_child_of_node(fps_np, fps_child) {
+ for_each_child_of_node_scoped(fps_np, fps_child) {
ret = max77620_config_fps(chip, fps_child);
if (ret < 0) {
- of_node_put(fps_child);
of_node_put(fps_np);
return ret;
}
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index c973e2579bdf..9f438d5d4326 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -116,7 +116,7 @@ static int mc13xxx_spi_write(void *context, const void *data, size_t count)
* single transfer.
*/
-static struct regmap_bus regmap_mc13xxx_bus = {
+static const struct regmap_bus regmap_mc13xxx_bus = {
.write = mc13xxx_spi_write,
.read = mc13xxx_spi_read,
};
diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c
index 2685efa5c9e2..b9b1036c8ff4 100644
--- a/drivers/mfd/mt6360-core.c
+++ b/drivers/mfd/mt6360-core.c
@@ -5,6 +5,7 @@
* Author: Gene Chen <gene_chen@richtek.com>
*/
+#include <linux/cleanup.h>
#include <linux/crc8.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -404,7 +405,6 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size,
u8 reg_addr = *(u8 *)(reg + 1);
struct i2c_client *i2c;
bool crc_needed = false;
- u8 *buf;
int buf_len = MT6360_ALLOC_READ_SIZE(val_size);
int read_size = val_size;
u8 crc;
@@ -423,7 +423,7 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size,
read_size += MT6360_CRC_CRC8_SIZE;
}
- buf = kzalloc(buf_len, GFP_KERNEL);
+ u8 *buf __free(kfree) = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -433,24 +433,19 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size,
ret = i2c_smbus_read_i2c_block_data(i2c, reg_addr, read_size,
buf + MT6360_CRC_PREDATA_OFFSET);
if (ret < 0)
- goto out;
- else if (ret != read_size) {
- ret = -EIO;
- goto out;
- }
+ return ret;
+ else if (ret != read_size)
+ return -EIO;
if (crc_needed) {
crc = crc8(ddata->crc8_tbl, buf, val_size + MT6360_CRC_PREDATA_OFFSET, 0);
- if (crc != buf[val_size + MT6360_CRC_PREDATA_OFFSET]) {
- ret = -EIO;
- goto out;
- }
+ if (crc != buf[val_size + MT6360_CRC_PREDATA_OFFSET])
+ return -EIO;
}
memcpy(val, buf + MT6360_CRC_PREDATA_OFFSET, val_size);
-out:
- kfree(buf);
- return (ret < 0) ? ret : 0;
+
+ return 0;
}
static int mt6360_regmap_write(void *context, const void *val, size_t val_size)
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index eab5bf6cff10..b4b178caf754 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -84,7 +84,6 @@ static const struct of_device_id pmic_spmi_id_table[] = {
static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
{
struct device_node *spmi_bus;
- struct device_node *child;
int function_parent_usid, ret;
u32 pmic_addr;
@@ -108,10 +107,9 @@ static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, str
*/
spmi_bus = of_get_parent(sdev->dev.of_node);
sdev = ERR_PTR(-ENODATA);
- for_each_child_of_node(spmi_bus, child) {
+ for_each_child_of_node_scoped(spmi_bus, child) {
ret = of_property_read_u32_index(child, "reg", 0, &pmic_addr);
if (ret) {
- of_node_put(child);
sdev = ERR_PTR(ret);
break;
}
@@ -125,7 +123,6 @@ static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, str
*/
sdev = ERR_PTR(-EPROBE_DEFER);
}
- of_node_put(child);
break;
}
}
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index 9184e553fafd..1d43458b4938 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -65,13 +65,13 @@ static const struct mfd_cell retu_devs[] = {
}
};
-static struct regmap_irq retu_irqs[] = {
+static const struct regmap_irq retu_irqs[] = {
[RETU_INT_PWR] = {
.mask = 1 << RETU_INT_PWR,
}
};
-static struct regmap_irq_chip retu_irq_chip = {
+static const struct regmap_irq_chip retu_irq_chip = {
.name = "RETU",
.irqs = retu_irqs,
.num_irqs = ARRAY_SIZE(retu_irqs),
@@ -101,13 +101,13 @@ static const struct mfd_cell tahvo_devs[] = {
},
};
-static struct regmap_irq tahvo_irqs[] = {
+static const struct regmap_irq tahvo_irqs[] = {
[TAHVO_INT_VBUS] = {
.mask = 1 << TAHVO_INT_VBUS,
}
};
-static struct regmap_irq_chip tahvo_irq_chip = {
+static const struct regmap_irq_chip tahvo_irq_chip = {
.name = "TAHVO",
.irqs = tahvo_irqs,
.num_irqs = ARRAY_SIZE(tahvo_irqs),
@@ -120,7 +120,7 @@ static struct regmap_irq_chip tahvo_irq_chip = {
static const struct retu_data {
char *chip_name;
char *companion_name;
- struct regmap_irq_chip *irq_chip;
+ const struct regmap_irq_chip *irq_chip;
const struct mfd_cell *children;
int nchildren;
} retu_data[] = {
@@ -216,7 +216,7 @@ static int retu_regmap_write(void *context, const void *data, size_t count)
return i2c_smbus_write_word_data(i2c, reg, val);
}
-static struct regmap_bus retu_bus = {
+static const struct regmap_bus retu_bus = {
.read = retu_regmap_read,
.write = retu_regmap_write,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
diff --git a/drivers/mfd/rk8xx-core.c b/drivers/mfd/rk8xx-core.c
index 5eda3c0dbbdf..39ab114ea669 100644
--- a/drivers/mfd/rk8xx-core.c
+++ b/drivers/mfd/rk8xx-core.c
@@ -531,7 +531,7 @@ static const struct regmap_irq rk817_irqs[RK817_IRQ_END] = {
REGMAP_IRQ_REG_LINE(23, 8)
};
-static struct regmap_irq_chip rk805_irq_chip = {
+static const struct regmap_irq_chip rk805_irq_chip = {
.name = "rk805",
.irqs = rk805_irqs,
.num_irqs = ARRAY_SIZE(rk805_irqs),
@@ -542,7 +542,7 @@ static struct regmap_irq_chip rk805_irq_chip = {
.init_ack_masked = true,
};
-static struct regmap_irq_chip rk806_irq_chip = {
+static const struct regmap_irq_chip rk806_irq_chip = {
.name = "rk806",
.irqs = rk806_irqs,
.num_irqs = ARRAY_SIZE(rk806_irqs),
@@ -578,7 +578,7 @@ static const struct regmap_irq_chip rk816_irq_chip = {
.init_ack_masked = true,
};
-static struct regmap_irq_chip rk817_irq_chip = {
+static const struct regmap_irq_chip rk817_irq_chip = {
.name = "rk817",
.irqs = rk817_irqs,
.num_irqs = ARRAY_SIZE(rk817_irqs),
diff --git a/drivers/mfd/rk8xx-i2c.c b/drivers/mfd/rk8xx-i2c.c
index 69a6b297d723..37287b06dab0 100644
--- a/drivers/mfd/rk8xx-i2c.c
+++ b/drivers/mfd/rk8xx-i2c.c
@@ -21,6 +21,17 @@ struct rk8xx_i2c_platform_data {
int variant;
};
+static bool rk806_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RK806_POWER_EN0 ... RK806_POWER_EN5:
+ case RK806_DVS_START_CTRL ... RK806_INT_MSK1:
+ return true;
+ }
+
+ return false;
+}
+
static bool rk808_is_volatile_reg(struct device *dev, unsigned int reg)
{
/*
@@ -121,6 +132,14 @@ static const struct regmap_config rk805_regmap_config = {
.volatile_reg = rk808_is_volatile_reg,
};
+static const struct regmap_config rk806_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RK806_BUCK_RSERVE_REG5,
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_reg = rk806_is_volatile_reg,
+};
+
static const struct regmap_config rk808_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -150,6 +169,11 @@ static const struct rk8xx_i2c_platform_data rk805_data = {
.variant = RK805_ID,
};
+static const struct rk8xx_i2c_platform_data rk806_data = {
+ .regmap_cfg = &rk806_regmap_config,
+ .variant = RK806_ID,
+};
+
static const struct rk8xx_i2c_platform_data rk808_data = {
.regmap_cfg = &rk808_regmap_config,
.variant = RK808_ID,
@@ -201,6 +225,7 @@ static SIMPLE_DEV_PM_OPS(rk8xx_i2c_pm_ops, rk8xx_suspend, rk8xx_resume);
static const struct of_device_id rk8xx_i2c_of_match[] = {
{ .compatible = "rockchip,rk805", .data = &rk805_data },
+ { .compatible = "rockchip,rk806", .data = &rk806_data },
{ .compatible = "rockchip,rk808", .data = &rk808_data },
{ .compatible = "rockchip,rk809", .data = &rk809_data },
{ .compatible = "rockchip,rk816", .data = &rk816_data },
diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c
index 5b4290f116fc..39f7514aa3d8 100644
--- a/drivers/mfd/rohm-bd71828.c
+++ b/drivers/mfd/rohm-bd71828.c
@@ -316,7 +316,7 @@ static const struct regmap_irq bd71815_irqs[] = {
REGMAP_IRQ_REG(BD71815_INT_RTC2, 11, BD71815_INT_RTC2_MASK),
};
-static struct regmap_irq bd71828_irqs[] = {
+static const struct regmap_irq bd71828_irqs[] = {
REGMAP_IRQ_REG(BD71828_INT_BUCK1_OCP, 0, BD71828_INT_BUCK1_OCP_MASK),
REGMAP_IRQ_REG(BD71828_INT_BUCK2_OCP, 0, BD71828_INT_BUCK2_OCP_MASK),
REGMAP_IRQ_REG(BD71828_INT_BUCK3_OCP, 0, BD71828_INT_BUCK3_OCP_MASK),
@@ -407,7 +407,7 @@ static struct regmap_irq bd71828_irqs[] = {
REGMAP_IRQ_REG(BD71828_INT_RTC2, 11, BD71828_INT_RTC2_MASK),
};
-static struct regmap_irq_chip bd71828_irq_chip = {
+static const struct regmap_irq_chip bd71828_irq_chip = {
.name = "bd71828_irq",
.main_status = BD71828_REG_INT_MAIN,
.irqs = &bd71828_irqs[0],
@@ -423,7 +423,7 @@ static struct regmap_irq_chip bd71828_irq_chip = {
.irq_reg_stride = 1,
};
-static struct regmap_irq_chip bd71815_irq_chip = {
+static const struct regmap_irq_chip bd71815_irq_chip = {
.name = "bd71815_irq",
.main_status = BD71815_REG_INT_STAT,
.irqs = &bd71815_irqs[0],
@@ -491,7 +491,7 @@ static int bd71828_i2c_probe(struct i2c_client *i2c)
int ret;
struct regmap *regmap;
const struct regmap_config *regmap_config;
- struct regmap_irq_chip *irqchip;
+ const struct regmap_irq_chip *irqchip;
unsigned int chip_type;
struct mfd_cell *mfd;
int cells;
diff --git a/drivers/mfd/rohm-bd718x7.c b/drivers/mfd/rohm-bd718x7.c
index 7755a4c073bf..25e494a93d48 100644
--- a/drivers/mfd/rohm-bd718x7.c
+++ b/drivers/mfd/rohm-bd718x7.c
@@ -60,7 +60,7 @@ static const struct regmap_irq bd718xx_irqs[] = {
REGMAP_IRQ_REG(BD718XX_INT_STBY_REQ, 0, BD718XX_INT_STBY_REQ_MASK),
};
-static struct regmap_irq_chip bd718xx_irq_chip = {
+static const struct regmap_irq_chip bd718xx_irq_chip = {
.name = "bd718xx-irq",
.irqs = bd718xx_irqs,
.num_irqs = ARRAY_SIZE(bd718xx_irqs),
diff --git a/drivers/mfd/rohm-bd9576.c b/drivers/mfd/rohm-bd9576.c
index 3a9f61961721..17323ae39803 100644
--- a/drivers/mfd/rohm-bd9576.c
+++ b/drivers/mfd/rohm-bd9576.c
@@ -57,7 +57,7 @@ static const struct regmap_access_table volatile_regs = {
.n_yes_ranges = ARRAY_SIZE(volatile_ranges),
};
-static struct regmap_config bd957x_regmap = {
+static const struct regmap_config bd957x_regmap = {
.reg_bits = 8,
.val_bits = 8,
.volatile_table = &volatile_regs,
@@ -65,7 +65,7 @@ static struct regmap_config bd957x_regmap = {
.cache_type = REGCACHE_MAPLE,
};
-static struct regmap_irq bd9576_irqs[] = {
+static const struct regmap_irq bd9576_irqs[] = {
REGMAP_IRQ_REG(BD9576_INT_THERM, 0, BD957X_MASK_INT_MAIN_THERM),
REGMAP_IRQ_REG(BD9576_INT_OVP, 0, BD957X_MASK_INT_MAIN_OVP),
REGMAP_IRQ_REG(BD9576_INT_SCP, 0, BD957X_MASK_INT_MAIN_SCP),
@@ -76,7 +76,7 @@ static struct regmap_irq bd9576_irqs[] = {
REGMAP_IRQ_REG(BD9576_INT_SYS, 0, BD957X_MASK_INT_MAIN_SYS),
};
-static struct regmap_irq_chip bd9576_irq_chip = {
+static const struct regmap_irq_chip bd9576_irq_chip = {
.name = "bd9576_irq",
.irqs = &bd9576_irqs[0],
.num_irqs = ARRAY_SIZE(bd9576_irqs),
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 81e517cdfb27..7186e2108108 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -135,7 +135,7 @@ static int sprd_pmic_spi_read(void *context,
return 0;
}
-static struct regmap_bus sprd_pmic_regmap = {
+static const struct regmap_bus sprd_pmic_regmap = {
.write = sprd_pmic_spi_write,
.read = sprd_pmic_spi_read,
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 33f1e07ab24d..2ce15f60eb10 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -8,6 +8,7 @@
* Author: Dong Aisheng <dong.aisheng@linaro.org>
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/hwspinlock.h>
@@ -45,7 +46,6 @@ static const struct regmap_config syscon_regmap_config = {
static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
{
struct clk *clk;
- struct syscon *syscon;
struct regmap *regmap;
void __iomem *base;
u32 reg_io_width;
@@ -54,20 +54,16 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
struct resource res;
struct reset_control *reset;
- syscon = kzalloc(sizeof(*syscon), GFP_KERNEL);
+ struct syscon *syscon __free(kfree) = kzalloc(sizeof(*syscon), GFP_KERNEL);
if (!syscon)
return ERR_PTR(-ENOMEM);
- if (of_address_to_resource(np, 0, &res)) {
- ret = -ENOMEM;
- goto err_map;
- }
+ if (of_address_to_resource(np, 0, &res))
+ return ERR_PTR(-ENOMEM);
base = of_iomap(np, 0);
- if (!base) {
- ret = -ENOMEM;
- goto err_map;
- }
+ if (!base)
+ return ERR_PTR(-ENOMEM);
/* Parse the device's DT node for an endianness specification */
if (of_property_read_bool(np, "big-endian"))
@@ -152,7 +148,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
list_add_tail(&syscon->list, &syscon_list);
spin_unlock(&syscon_list_slock);
- return syscon;
+ return_ptr(syscon);
err_reset:
reset_control_put(reset);
@@ -163,8 +159,6 @@ err_clk:
regmap_exit(regmap);
err_regmap:
iounmap(base);
-err_map:
- kfree(syscon);
return ERR_PTR(ret);
}
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index db28eb0c8995..ef953ee73145 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -312,8 +312,6 @@ static int tc3589x_device_init(struct tc3589x *tc3589x)
}
static const struct of_device_id tc3589x_match[] = {
- /* Legacy compatible string */
- { .compatible = "tc3589x", .data = (void *) TC3589X_UNKNOWN },
{ .compatible = "toshiba,tc35890", .data = (void *) TC3589X_TC35890 },
{ .compatible = "toshiba,tc35892", .data = (void *) TC3589X_TC35892 },
{ .compatible = "toshiba,tc35893", .data = (void *) TC3589X_TC35893 },
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index 0da1cecb5af6..e2f6858d101e 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -23,7 +23,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tps6105x.h>
-static struct regmap_config tps6105x_regmap_config = {
+static const struct regmap_config tps6105x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = TPS6105X_REG_3,
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
index 5ef0a7e0d61d..54832e9321b9 100644
--- a/drivers/mfd/tps65086.c
+++ b/drivers/mfd/tps65086.c
@@ -45,7 +45,7 @@ static const struct regmap_irq tps65086_irqs[] = {
REGMAP_IRQ_REG(TPS65086_IRQ_FAULT, 0, TPS65086_IRQ_FAULT_MASK),
};
-static struct regmap_irq_chip tps65086_irq_chip = {
+static const struct regmap_irq_chip tps65086_irq_chip = {
.name = "tps65086",
.status_base = TPS65086_IRQ,
.mask_base = TPS65086_IRQ_MASK,
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index b82cd484ac85..24f42175a9b4 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -120,7 +120,7 @@ static const struct regmap_irq tps65090_irqs[] = {
},
};
-static struct regmap_irq_chip tps65090_irq_chip = {
+static const struct regmap_irq_chip tps65090_irq_chip = {
.name = "tps65090",
.irqs = tps65090_irqs,
.num_irqs = ARRAY_SIZE(tps65090_irqs),
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 427a2b97f117..4f3e632f726f 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -186,7 +186,7 @@ static const struct regmap_irq tps65218_irqs[] = {
},
};
-static struct regmap_irq_chip tps65218_irq_chip = {
+static const struct regmap_irq_chip tps65218_irq_chip = {
.name = "tps65218",
.irqs = tps65218_irqs,
.num_irqs = ARRAY_SIZE(tps65218_irqs),
diff --git a/drivers/mfd/tps65219.c b/drivers/mfd/tps65219.c
index 0e0c42e4fdfc..57ff5cb294a6 100644
--- a/drivers/mfd/tps65219.c
+++ b/drivers/mfd/tps65219.c
@@ -159,7 +159,7 @@ static struct regmap_irq_sub_irq_map tps65219_sub_irq_offsets[] = {
#define TPS65219_REGMAP_IRQ_REG(int_name, register_position) \
REGMAP_IRQ_REG(int_name, register_position, int_name##_MASK)
-static struct regmap_irq tps65219_irqs[] = {
+static const struct regmap_irq tps65219_irqs[] = {
TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO3_SCG, TPS65219_REG_INT_LDO_3_4_POS),
TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO3_OC, TPS65219_REG_INT_LDO_3_4_POS),
TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO3_UV, TPS65219_REG_INT_LDO_3_4_POS),
@@ -211,7 +211,7 @@ static struct regmap_irq tps65219_irqs[] = {
TPS65219_REGMAP_IRQ_REG(TPS65219_INT_PB_RISING_EDGE_DETECT, TPS65219_REG_INT_PB_POS),
};
-static struct regmap_irq_chip tps65219_irq_chip = {
+static const struct regmap_irq_chip tps65219_irq_chip = {
.name = "tps65219_irq",
.main_status = TPS65219_REG_INT_SOURCE,
.num_main_regs = 1,
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 8fb0384d5a8e..6a7b7a697fb7 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -197,7 +197,7 @@ static const struct regmap_irq tps65910_irqs[] = {
},
};
-static struct regmap_irq_chip tps65911_irq_chip = {
+static const struct regmap_irq_chip tps65911_irq_chip = {
.name = "tps65910",
.irqs = tps65911_irqs,
.num_irqs = ARRAY_SIZE(tps65911_irqs),
@@ -208,7 +208,7 @@ static struct regmap_irq_chip tps65911_irq_chip = {
.ack_base = TPS65910_INT_STS,
};
-static struct regmap_irq_chip tps65910_irq_chip = {
+static const struct regmap_irq_chip tps65910_irq_chip = {
.name = "tps65910",
.irqs = tps65910_irqs,
.num_irqs = ARRAY_SIZE(tps65910_irqs),
@@ -223,7 +223,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
struct tps65910_platform_data *pdata)
{
int ret;
- static struct regmap_irq_chip *tps6591x_irqs_chip;
+ static const struct regmap_irq_chip *tps6591x_irqs_chip;
if (!irq) {
dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index 87ee6aac3763..a9dcd7f0d9e3 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -57,7 +57,7 @@ static const struct regmap_irq tps65912_irqs[] = {
REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO10, 3, TPS65912_INT_STS4_PGOOD_LDO10),
};
-static struct regmap_irq_chip tps65912_irq_chip = {
+static const struct regmap_irq_chip tps65912_irq_chip = {
.name = "tps65912",
.irqs = tps65912_irqs,
.num_irqs = ARRAY_SIZE(tps65912_irqs),
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index c184e8bfab7c..218d6195fad2 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -620,7 +620,7 @@ static const struct regmap_irq twl6040_irqs[] = {
{ .reg_offset = 0, .mask = TWL6040_READYINT, },
};
-static struct regmap_irq_chip twl6040_irq_chip = {
+static const struct regmap_irq_chip twl6040_irq_chip = {
.name = "twl6040",
.irqs = twl6040_irqs,
.num_irqs = ARRAY_SIZE(twl6040_irqs),
diff --git a/drivers/mfd/wcd934x.c b/drivers/mfd/wcd934x.c
index 7b9873b72c37..fcd182d51981 100644
--- a/drivers/mfd/wcd934x.c
+++ b/drivers/mfd/wcd934x.c
@@ -109,7 +109,7 @@ static const struct regmap_range_cfg wcd934x_ranges[] = {
},
};
-static struct regmap_config wcd934x_regmap_config = {
+static const struct regmap_config wcd934x_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
.cache_type = REGCACHE_MAPLE,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 41c54051347a..3fe7e2a9bd29 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -104,6 +104,16 @@ config PHANTOM
If you choose to build module, its name will be phantom. If unsure,
say N here.
+config RPMB
+ tristate "RPMB partition interface"
+ depends on MMC
+ help
+ Unified RPMB unit interface for RPMB capable devices such as eMMC and
+ UFS. Provides interface for in-kernel security controllers to access
+ RPMB unit.
+
+ If unsure, select N.
+
config TIFM_CORE
tristate "TI Flash Media interface support"
depends on PCI
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c2f990862d2b..a9f94525e181 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_LKDTM) += lkdtm/
obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
+obj-$(CONFIG_RPMB) += rpmb-core.o
obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o
obj-$(CONFIG_QCOM_FASTRPC) += fastrpc.o
obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
diff --git a/drivers/misc/rpmb-core.c b/drivers/misc/rpmb-core.c
new file mode 100644
index 000000000000..bc68cde1a8bf
--- /dev/null
+++ b/drivers/misc/rpmb-core.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2015 - 2019 Intel Corporation. All rights reserved.
+ * Copyright(c) 2021 - 2024 Linaro Ltd.
+ */
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rpmb.h>
+#include <linux/slab.h>
+
+static DEFINE_IDA(rpmb_ida);
+static DEFINE_MUTEX(rpmb_mutex);
+
+/**
+ * rpmb_dev_get() - increase rpmb device ref counter
+ * @rdev: rpmb device
+ */
+struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev)
+{
+ if (rdev)
+ get_device(&rdev->dev);
+ return rdev;
+}
+EXPORT_SYMBOL_GPL(rpmb_dev_get);
+
+/**
+ * rpmb_dev_put() - decrease rpmb device ref counter
+ * @rdev: rpmb device
+ */
+void rpmb_dev_put(struct rpmb_dev *rdev)
+{
+ if (rdev)
+ put_device(&rdev->dev);
+}
+EXPORT_SYMBOL_GPL(rpmb_dev_put);
+
+/**
+ * rpmb_route_frames() - route rpmb frames to rpmb device
+ * @rdev: rpmb device
+ * @req: rpmb request frames
+ * @req_len: length of rpmb request frames in bytes
+ * @rsp: rpmb response frames
+ * @rsp_len: length of rpmb response frames in bytes
+ *
+ * Returns: < 0 on failure
+ */
+int rpmb_route_frames(struct rpmb_dev *rdev, u8 *req,
+ unsigned int req_len, u8 *rsp, unsigned int rsp_len)
+{
+ if (!req || !req_len || !rsp || !rsp_len)
+ return -EINVAL;
+
+ return rdev->descr.route_frames(rdev->dev.parent, req, req_len,
+ rsp, rsp_len);
+}
+EXPORT_SYMBOL_GPL(rpmb_route_frames);
+
+static void rpmb_dev_release(struct device *dev)
+{
+ struct rpmb_dev *rdev = to_rpmb_dev(dev);
+
+ mutex_lock(&rpmb_mutex);
+ ida_simple_remove(&rpmb_ida, rdev->id);
+ mutex_unlock(&rpmb_mutex);
+ kfree(rdev->descr.dev_id);
+ kfree(rdev);
+}
+
+static struct class rpmb_class = {
+ .name = "rpmb",
+ .dev_release = rpmb_dev_release,
+};
+
+/**
+ * rpmb_dev_find_device() - return first matching rpmb device
+ * @start: rpmb device to begin with
+ * @data: data for the match function
+ * @match: the matching function
+ *
+ * Iterate over registered RPMB devices, and call @match() for each passing
+ * it the RPMB device and @data.
+ *
+ * The return value of @match() is checked for each call. If it returns
+ * anything other 0, break and return the found RPMB device.
+ *
+ * It's the callers responsibility to call rpmb_dev_put() on the returned
+ * device, when it's done with it.
+ *
+ * Returns: a matching rpmb device or NULL on failure
+ */
+struct rpmb_dev *rpmb_dev_find_device(const void *data,
+ const struct rpmb_dev *start,
+ int (*match)(struct device *dev,
+ const void *data))
+{
+ struct device *dev;
+ const struct device *start_dev = NULL;
+
+ if (start)
+ start_dev = &start->dev;
+ dev = class_find_device(&rpmb_class, start_dev, data, match);
+
+ return dev ? to_rpmb_dev(dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(rpmb_dev_find_device);
+
+int rpmb_interface_register(struct class_interface *intf)
+{
+ intf->class = &rpmb_class;
+
+ return class_interface_register(intf);
+}
+EXPORT_SYMBOL_GPL(rpmb_interface_register);
+
+void rpmb_interface_unregister(struct class_interface *intf)
+{
+ class_interface_unregister(intf);
+}
+EXPORT_SYMBOL_GPL(rpmb_interface_unregister);
+
+/**
+ * rpmb_dev_unregister() - unregister RPMB partition from the RPMB subsystem
+ * @rdev: the rpmb device to unregister
+ *
+ * This function should be called from the release function of the
+ * underlying device used when the RPMB device was registered.
+ *
+ * Returns: < 0 on failure
+ */
+int rpmb_dev_unregister(struct rpmb_dev *rdev)
+{
+ if (!rdev)
+ return -EINVAL;
+
+ device_del(&rdev->dev);
+
+ rpmb_dev_put(rdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rpmb_dev_unregister);
+
+/**
+ * rpmb_dev_register - register RPMB partition with the RPMB subsystem
+ * @dev: storage device of the rpmb device
+ * @descr: RPMB device description
+ *
+ * While registering the RPMB partition extract needed device information
+ * while needed resources are available.
+ *
+ * Returns: a pointer to a 'struct rpmb_dev' or an ERR_PTR on failure
+ */
+struct rpmb_dev *rpmb_dev_register(struct device *dev,
+ struct rpmb_descr *descr)
+{
+ struct rpmb_dev *rdev;
+ int ret;
+
+ if (!dev || !descr || !descr->route_frames || !descr->dev_id ||
+ !descr->dev_id_len)
+ return ERR_PTR(-EINVAL);
+
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev)
+ return ERR_PTR(-ENOMEM);
+ rdev->descr = *descr;
+ rdev->descr.dev_id = kmemdup(descr->dev_id, descr->dev_id_len,
+ GFP_KERNEL);
+ if (!rdev->descr.dev_id) {
+ ret = -ENOMEM;
+ goto err_free_rdev;
+ }
+
+ mutex_lock(&rpmb_mutex);
+ ret = ida_simple_get(&rpmb_ida, 0, 0, GFP_KERNEL);
+ mutex_unlock(&rpmb_mutex);
+ if (ret < 0)
+ goto err_free_dev_id;
+ rdev->id = ret;
+
+ dev_set_name(&rdev->dev, "rpmb%d", rdev->id);
+ rdev->dev.class = &rpmb_class;
+ rdev->dev.parent = dev;
+
+ ret = device_register(&rdev->dev);
+ if (ret) {
+ put_device(&rdev->dev);
+ return ERR_PTR(ret);
+ }
+
+ dev_dbg(&rdev->dev, "registered device\n");
+
+ return rdev;
+
+err_free_dev_id:
+ kfree(rdev->descr.dev_id);
+err_free_rdev:
+ kfree(rdev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(rpmb_dev_register);
+
+static int __init rpmb_init(void)
+{
+ int ret;
+
+ ret = class_register(&rpmb_class);
+ if (ret) {
+ pr_err("couldn't create class\n");
+ return ret;
+ }
+ ida_init(&rpmb_ida);
+ return 0;
+}
+
+static void __exit rpmb_exit(void)
+{
+ ida_destroy(&rpmb_ida);
+ class_unregister(&rpmb_class);
+}
+
+subsys_initcall(rpmb_init);
+module_exit(rpmb_exit);
+
+MODULE_AUTHOR("Jens Wiklander <jens.wiklander@linaro.org>");
+MODULE_DESCRIPTION("RPMB class");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/xilinx_tmr_inject.c b/drivers/misc/xilinx_tmr_inject.c
index 73c6da7d0963..734fdfac19ef 100644
--- a/drivers/misc/xilinx_tmr_inject.c
+++ b/drivers/misc/xilinx_tmr_inject.c
@@ -12,6 +12,7 @@
#include <asm/xilinx_mb_manager.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <linux/fault-inject.h>
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index bf4e29ef023c..14d2ecbb04d3 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -37,6 +37,7 @@ config PWRSEQ_SIMPLE
config MMC_BLOCK
tristate "MMC block device driver"
depends on BLOCK
+ depends on RPMB || !RPMB
imply IOSCHED_BFQ
default y
help
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 2c9963248fcb..f58bea534004 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -33,6 +33,7 @@
#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
#include <linux/string_helpers.h>
#include <linux/delay.h>
#include <linux/capability.h>
@@ -40,6 +41,7 @@
#include <linux/pm_runtime.h>
#include <linux/idr.h>
#include <linux/debugfs.h>
+#include <linux/rpmb.h>
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
@@ -76,6 +78,48 @@ MODULE_ALIAS("mmc:block");
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
+/**
+ * struct rpmb_frame - rpmb frame as defined by eMMC 5.1 (JESD84-B51)
+ *
+ * @stuff : stuff bytes
+ * @key_mac : The authentication key or the message authentication
+ * code (MAC) depending on the request/response type.
+ * The MAC will be delivered in the last (or the only)
+ * block of data.
+ * @data : Data to be written or read by signed access.
+ * @nonce : Random number generated by the host for the requests
+ * and copied to the response by the RPMB engine.
+ * @write_counter: Counter value for the total amount of the successful
+ * authenticated data write requests made by the host.
+ * @addr : Address of the data to be programmed to or read
+ * from the RPMB. Address is the serial number of
+ * the accessed block (half sector 256B).
+ * @block_count : Number of blocks (half sectors, 256B) requested to be
+ * read/programmed.
+ * @result : Includes information about the status of the write counter
+ * (valid, expired) and result of the access made to the RPMB.
+ * @req_resp : Defines the type of request and response to/from the memory.
+ *
+ * The stuff bytes and big-endian properties are modeled to fit to the spec.
+ */
+struct rpmb_frame {
+ u8 stuff[196];
+ u8 key_mac[32];
+ u8 data[256];
+ u8 nonce[16];
+ __be32 write_counter;
+ __be16 addr;
+ __be16 block_count;
+ __be16 result;
+ __be16 req_resp;
+} __packed;
+
+#define RPMB_PROGRAM_KEY 0x1 /* Program RPMB Authentication Key */
+#define RPMB_GET_WRITE_COUNTER 0x2 /* Read RPMB write counter */
+#define RPMB_WRITE_DATA 0x3 /* Write data to RPMB partition */
+#define RPMB_READ_DATA 0x4 /* Read data from RPMB partition */
+#define RPMB_RESULT_READ 0x5 /* Read result request (Internal) */
+
static DEFINE_MUTEX(block_mutex);
/*
@@ -155,6 +199,7 @@ static const struct bus_type mmc_rpmb_bus_type = {
* @id: unique device ID number
* @part_index: partition index (0 on first)
* @md: parent MMC block device
+ * @rdev: registered RPMB device
* @node: list item, so we can put this device on a list
*/
struct mmc_rpmb_data {
@@ -163,6 +208,7 @@ struct mmc_rpmb_data {
int id;
unsigned int part_index;
struct mmc_blk_data *md;
+ struct rpmb_dev *rdev;
struct list_head node;
};
@@ -307,10 +353,10 @@ static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
- char *end;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
- unsigned long set = simple_strtoul(buf, &end, 0);
- if (end == buf) {
+ unsigned long set;
+
+ if (kstrtoul(buf, 0, &set)) {
ret = -EINVAL;
goto out;
}
@@ -2484,7 +2530,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
return ERR_PTR(devidx);
}
- md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
+ md = kzalloc(sizeof(*md), GFP_KERNEL);
if (!md) {
ret = -ENOMEM;
goto out;
@@ -2670,7 +2716,6 @@ static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
get_device(&rpmb->dev);
filp->private_data = rpmb;
- mmc_blk_get(rpmb->md->disk);
return nonseekable_open(inode, filp);
}
@@ -2680,7 +2725,6 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
struct mmc_rpmb_data, chrdev);
- mmc_blk_put(rpmb->md);
put_device(&rpmb->dev);
return 0;
@@ -2701,10 +2745,165 @@ static void mmc_blk_rpmb_device_release(struct device *dev)
{
struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
+ rpmb_dev_unregister(rpmb->rdev);
+ mmc_blk_put(rpmb->md);
ida_free(&mmc_rpmb_ida, rpmb->id);
kfree(rpmb);
}
+static void free_idata(struct mmc_blk_ioc_data **idata, unsigned int cmd_count)
+{
+ unsigned int n;
+
+ for (n = 0; n < cmd_count; n++)
+ kfree(idata[n]);
+ kfree(idata);
+}
+
+static struct mmc_blk_ioc_data **alloc_idata(struct mmc_rpmb_data *rpmb,
+ unsigned int cmd_count)
+{
+ struct mmc_blk_ioc_data **idata;
+ unsigned int n;
+
+ idata = kcalloc(cmd_count, sizeof(*idata), GFP_KERNEL);
+ if (!idata)
+ return NULL;
+
+ for (n = 0; n < cmd_count; n++) {
+ idata[n] = kcalloc(1, sizeof(**idata), GFP_KERNEL);
+ if (!idata[n]) {
+ free_idata(idata, n);
+ return NULL;
+ }
+ idata[n]->rpmb = rpmb;
+ }
+
+ return idata;
+}
+
+static void set_idata(struct mmc_blk_ioc_data *idata, u32 opcode,
+ int write_flag, u8 *buf, unsigned int buf_bytes)
+{
+ /*
+ * The size of an RPMB frame must match what's expected by the
+ * hardware.
+ */
+ BUILD_BUG_ON(sizeof(struct rpmb_frame) != 512);
+
+ idata->ic.opcode = opcode;
+ idata->ic.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+ idata->ic.write_flag = write_flag;
+ idata->ic.blksz = sizeof(struct rpmb_frame);
+ idata->ic.blocks = buf_bytes / idata->ic.blksz;
+ idata->buf = buf;
+ idata->buf_bytes = buf_bytes;
+}
+
+static int mmc_route_rpmb_frames(struct device *dev, u8 *req,
+ unsigned int req_len, u8 *resp,
+ unsigned int resp_len)
+{
+ struct rpmb_frame *frm = (struct rpmb_frame *)req;
+ struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
+ struct mmc_blk_data *md = rpmb->md;
+ struct mmc_blk_ioc_data **idata;
+ struct mmc_queue_req *mq_rq;
+ unsigned int cmd_count;
+ struct request *rq;
+ u16 req_type;
+ bool write;
+ int ret;
+
+ if (IS_ERR(md->queue.card))
+ return PTR_ERR(md->queue.card);
+
+ if (req_len < sizeof(*frm))
+ return -EINVAL;
+
+ req_type = be16_to_cpu(frm->req_resp);
+ switch (req_type) {
+ case RPMB_PROGRAM_KEY:
+ if (req_len != sizeof(struct rpmb_frame) ||
+ resp_len != sizeof(struct rpmb_frame))
+ return -EINVAL;
+ write = true;
+ break;
+ case RPMB_GET_WRITE_COUNTER:
+ if (req_len != sizeof(struct rpmb_frame) ||
+ resp_len != sizeof(struct rpmb_frame))
+ return -EINVAL;
+ write = false;
+ break;
+ case RPMB_WRITE_DATA:
+ if (req_len % sizeof(struct rpmb_frame) ||
+ resp_len != sizeof(struct rpmb_frame))
+ return -EINVAL;
+ write = true;
+ break;
+ case RPMB_READ_DATA:
+ if (req_len != sizeof(struct rpmb_frame) ||
+ resp_len % sizeof(struct rpmb_frame))
+ return -EINVAL;
+ write = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (write)
+ cmd_count = 3;
+ else
+ cmd_count = 2;
+
+ idata = alloc_idata(rpmb, cmd_count);
+ if (!idata)
+ return -ENOMEM;
+
+ if (write) {
+ struct rpmb_frame *frm = (struct rpmb_frame *)resp;
+
+ /* Send write request frame(s) */
+ set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK,
+ 1 | MMC_CMD23_ARG_REL_WR, req, req_len);
+
+ /* Send result request frame */
+ memset(frm, 0, sizeof(*frm));
+ frm->req_resp = cpu_to_be16(RPMB_RESULT_READ);
+ set_idata(idata[1], MMC_WRITE_MULTIPLE_BLOCK, 1, resp,
+ resp_len);
+
+ /* Read response frame */
+ set_idata(idata[2], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len);
+ } else {
+ /* Send write request frame(s) */
+ set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK, 1, req, req_len);
+
+ /* Read response frame */
+ set_idata(idata[1], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len);
+ }
+
+ rq = blk_mq_alloc_request(md->queue.queue, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto out;
+ }
+
+ mq_rq = req_to_mmc_queue_req(rq);
+ mq_rq->drv_op = MMC_DRV_OP_IOCTL_RPMB;
+ mq_rq->drv_op_result = -EIO;
+ mq_rq->drv_op_data = idata;
+ mq_rq->ioc_count = cmd_count;
+ blk_execute_rq(rq, false);
+ ret = req_to_mmc_queue_req(rq)->drv_op_result;
+
+ blk_mq_free_request(rq);
+
+out:
+ free_idata(idata, cmd_count);
+ return ret;
+}
+
static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
struct mmc_blk_data *md,
unsigned int part_index,
@@ -2739,6 +2938,7 @@ static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
rpmb->dev.release = mmc_blk_rpmb_device_release;
device_initialize(&rpmb->dev);
dev_set_drvdata(&rpmb->dev, rpmb);
+ mmc_blk_get(md->disk);
rpmb->md = md;
cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
@@ -3000,6 +3200,42 @@ static void mmc_blk_remove_debugfs(struct mmc_card *card,
#endif /* CONFIG_DEBUG_FS */
+static void mmc_blk_rpmb_add(struct mmc_card *card)
+{
+ struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+ struct mmc_rpmb_data *rpmb;
+ struct rpmb_dev *rdev;
+ unsigned int n;
+ u32 cid[4];
+ struct rpmb_descr descr = {
+ .type = RPMB_TYPE_EMMC,
+ .route_frames = mmc_route_rpmb_frames,
+ .reliable_wr_count = card->ext_csd.enhanced_rpmb_supported ?
+ 2 : 32,
+ .capacity = card->ext_csd.raw_rpmb_size_mult,
+ .dev_id = (void *)cid,
+ .dev_id_len = sizeof(cid),
+ };
+
+ /*
+ * Provice CID as an octet array. The CID needs to be interpreted
+ * when used as input to derive the RPMB key since some fields
+ * will change due to firmware updates.
+ */
+ for (n = 0; n < 4; n++)
+ cid[n] = be32_to_cpu((__force __be32)card->raw_cid[n]);
+
+ list_for_each_entry(rpmb, &md->rpmbs, node) {
+ rdev = rpmb_dev_register(&rpmb->dev, &descr);
+ if (IS_ERR(rdev)) {
+ pr_warn("%s: could not register RPMB device\n",
+ dev_name(&rpmb->dev));
+ continue;
+ }
+ rpmb->rdev = rdev;
+ }
+}
+
static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md;
@@ -3045,6 +3281,8 @@ static int mmc_blk_probe(struct mmc_card *card)
pm_runtime_enable(&card->dev);
}
+ mmc_blk_rpmb_add(card);
+
return 0;
out:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 5b2f7c285461..6a23be214543 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -51,20 +51,6 @@ static const unsigned int taac_mant[] = {
35, 40, 45, 50, 55, 60, 70, 80,
};
-#define UNSTUFF_BITS(resp,start,size) \
- ({ \
- const int __size = size; \
- const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
- const int __off = 3 - ((start) / 32); \
- const int __shft = (start) & 31; \
- u32 __res; \
- \
- __res = resp[__off] >> __shft; \
- if (__size + __shft > 32) \
- __res |= resp[__off-1] << ((32 - __shft) % 32); \
- __res & __mask; \
- })
-
/*
* Given the decoded CSD structure, decode the raw CID to our CID structure.
*/
@@ -85,36 +71,36 @@ static int mmc_decode_cid(struct mmc_card *card)
switch (card->csd.mmca_vsn) {
case 0: /* MMC v1.0 - v1.2 */
case 1: /* MMC v1.4 */
- card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
- card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
- card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
- card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
- card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
- card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
- card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
- card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
- card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
- card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
- card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
- card->cid.month = UNSTUFF_BITS(resp, 12, 4);
- card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
+ card->cid.manfid = unstuff_bits(resp, 104, 24);
+ card->cid.prod_name[0] = unstuff_bits(resp, 96, 8);
+ card->cid.prod_name[1] = unstuff_bits(resp, 88, 8);
+ card->cid.prod_name[2] = unstuff_bits(resp, 80, 8);
+ card->cid.prod_name[3] = unstuff_bits(resp, 72, 8);
+ card->cid.prod_name[4] = unstuff_bits(resp, 64, 8);
+ card->cid.prod_name[5] = unstuff_bits(resp, 56, 8);
+ card->cid.prod_name[6] = unstuff_bits(resp, 48, 8);
+ card->cid.hwrev = unstuff_bits(resp, 44, 4);
+ card->cid.fwrev = unstuff_bits(resp, 40, 4);
+ card->cid.serial = unstuff_bits(resp, 16, 24);
+ card->cid.month = unstuff_bits(resp, 12, 4);
+ card->cid.year = unstuff_bits(resp, 8, 4) + 1997;
break;
case 2: /* MMC v2.0 - v2.2 */
case 3: /* MMC v3.1 - v3.3 */
case 4: /* MMC v4 */
- card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
- card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
- card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
- card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
- card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
- card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
- card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
- card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
- card->cid.prv = UNSTUFF_BITS(resp, 48, 8);
- card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
- card->cid.month = UNSTUFF_BITS(resp, 12, 4);
- card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
+ card->cid.manfid = unstuff_bits(resp, 120, 8);
+ card->cid.oemid = unstuff_bits(resp, 104, 16);
+ card->cid.prod_name[0] = unstuff_bits(resp, 96, 8);
+ card->cid.prod_name[1] = unstuff_bits(resp, 88, 8);
+ card->cid.prod_name[2] = unstuff_bits(resp, 80, 8);
+ card->cid.prod_name[3] = unstuff_bits(resp, 72, 8);
+ card->cid.prod_name[4] = unstuff_bits(resp, 64, 8);
+ card->cid.prod_name[5] = unstuff_bits(resp, 56, 8);
+ card->cid.prv = unstuff_bits(resp, 48, 8);
+ card->cid.serial = unstuff_bits(resp, 16, 32);
+ card->cid.month = unstuff_bits(resp, 12, 4);
+ card->cid.year = unstuff_bits(resp, 8, 4) + 1997;
break;
default:
@@ -161,43 +147,43 @@ static int mmc_decode_csd(struct mmc_card *card)
* v1.2 has extra information in bits 15, 11 and 10.
* We also support eMMC v4.4 & v4.41.
*/
- csd->structure = UNSTUFF_BITS(resp, 126, 2);
+ csd->structure = unstuff_bits(resp, 126, 2);
if (csd->structure == 0) {
pr_err("%s: unrecognised CSD structure version %d\n",
mmc_hostname(card->host), csd->structure);
return -EINVAL;
}
- csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
- m = UNSTUFF_BITS(resp, 115, 4);
- e = UNSTUFF_BITS(resp, 112, 3);
+ csd->mmca_vsn = unstuff_bits(resp, 122, 4);
+ m = unstuff_bits(resp, 115, 4);
+ e = unstuff_bits(resp, 112, 3);
csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10;
- csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
+ csd->taac_clks = unstuff_bits(resp, 104, 8) * 100;
- m = UNSTUFF_BITS(resp, 99, 4);
- e = UNSTUFF_BITS(resp, 96, 3);
+ m = unstuff_bits(resp, 99, 4);
+ e = unstuff_bits(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
- csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
+ csd->cmdclass = unstuff_bits(resp, 84, 12);
- e = UNSTUFF_BITS(resp, 47, 3);
- m = UNSTUFF_BITS(resp, 62, 12);
+ e = unstuff_bits(resp, 47, 3);
+ m = unstuff_bits(resp, 62, 12);
csd->capacity = (1 + m) << (e + 2);
- csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
- csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
- csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
- csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
- csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
- csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
- csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
- csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
+ csd->read_blkbits = unstuff_bits(resp, 80, 4);
+ csd->read_partial = unstuff_bits(resp, 79, 1);
+ csd->write_misalign = unstuff_bits(resp, 78, 1);
+ csd->read_misalign = unstuff_bits(resp, 77, 1);
+ csd->dsr_imp = unstuff_bits(resp, 76, 1);
+ csd->r2w_factor = unstuff_bits(resp, 26, 3);
+ csd->write_blkbits = unstuff_bits(resp, 22, 4);
+ csd->write_partial = unstuff_bits(resp, 21, 1);
if (csd->write_blkbits >= 9) {
- a = UNSTUFF_BITS(resp, 42, 5);
- b = UNSTUFF_BITS(resp, 37, 5);
+ a = unstuff_bits(resp, 42, 5);
+ b = unstuff_bits(resp, 37, 5);
csd->erase_size = (a + 1) * (b + 1);
csd->erase_size <<= csd->write_blkbits - 9;
- csd->wp_grp_size = UNSTUFF_BITS(resp, 32, 5);
+ csd->wp_grp_size = unstuff_bits(resp, 32, 5);
}
return 0;
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 92d4194c7893..06017110e1b0 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -56,5 +56,19 @@ int mmc_cmdq_enable(struct mmc_card *card);
int mmc_cmdq_disable(struct mmc_card *card);
int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms);
+static inline u32 unstuff_bits(const u32 *resp, int start, int size)
+{
+ const int __size = size;
+ const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1;
+ const int __off = 3 - (start / 32);
+ const int __shft = start & 31;
+ u32 __res = resp[__off] >> __shft;
+
+ if (__size + __shft > 32)
+ __res |= resp[__off - 1] << ((32 - __shft) % 32);
+
+ return __res & __mask;
+}
+
#endif
diff --git a/drivers/mmc/core/regulator.c b/drivers/mmc/core/regulator.c
index 005247a49e51..01747ab1024e 100644
--- a/drivers/mmc/core/regulator.c
+++ b/drivers/mmc/core/regulator.c
@@ -255,7 +255,9 @@ int mmc_regulator_get_supply(struct mmc_host *mmc)
if (IS_ERR(mmc->supply.vmmc)) {
if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "vmmc regulator not available\n");
+
dev_dbg(dev, "No vmmc regulator found\n");
} else {
ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
@@ -267,7 +269,9 @@ int mmc_regulator_get_supply(struct mmc_host *mmc)
if (IS_ERR(mmc->supply.vqmmc)) {
if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "vqmmc regulator not available\n");
+
dev_dbg(dev, "No vqmmc regulator found\n");
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index ee37ad14e79e..12fe282bea77 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -56,20 +56,6 @@ static const unsigned int sd_au_size[] = {
SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
};
-#define UNSTUFF_BITS(resp,start,size) \
- ({ \
- const int __size = size; \
- const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
- const int __off = 3 - ((start) / 32); \
- const int __shft = (start) & 31; \
- u32 __res; \
- \
- __res = resp[__off] >> __shft; \
- if (__size + __shft > 32) \
- __res |= resp[__off-1] << ((32 - __shft) % 32); \
- __res & __mask; \
- })
-
#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 1000
#define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000
@@ -95,18 +81,18 @@ void mmc_decode_cid(struct mmc_card *card)
* SD doesn't currently have a version field so we will
* have to assume we can parse this.
*/
- card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
- card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
- card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
- card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
- card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
- card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
- card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
- card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4);
- card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4);
- card->cid.serial = UNSTUFF_BITS(resp, 24, 32);
- card->cid.year = UNSTUFF_BITS(resp, 12, 8);
- card->cid.month = UNSTUFF_BITS(resp, 8, 4);
+ card->cid.manfid = unstuff_bits(resp, 120, 8);
+ card->cid.oemid = unstuff_bits(resp, 104, 16);
+ card->cid.prod_name[0] = unstuff_bits(resp, 96, 8);
+ card->cid.prod_name[1] = unstuff_bits(resp, 88, 8);
+ card->cid.prod_name[2] = unstuff_bits(resp, 80, 8);
+ card->cid.prod_name[3] = unstuff_bits(resp, 72, 8);
+ card->cid.prod_name[4] = unstuff_bits(resp, 64, 8);
+ card->cid.hwrev = unstuff_bits(resp, 60, 4);
+ card->cid.fwrev = unstuff_bits(resp, 56, 4);
+ card->cid.serial = unstuff_bits(resp, 24, 32);
+ card->cid.year = unstuff_bits(resp, 12, 8);
+ card->cid.month = unstuff_bits(resp, 8, 4);
card->cid.year += 2000; /* SD cards year offset */
}
@@ -120,41 +106,41 @@ static int mmc_decode_csd(struct mmc_card *card)
unsigned int e, m, csd_struct;
u32 *resp = card->raw_csd;
- csd_struct = UNSTUFF_BITS(resp, 126, 2);
+ csd_struct = unstuff_bits(resp, 126, 2);
switch (csd_struct) {
case 0:
- m = UNSTUFF_BITS(resp, 115, 4);
- e = UNSTUFF_BITS(resp, 112, 3);
+ m = unstuff_bits(resp, 115, 4);
+ e = unstuff_bits(resp, 112, 3);
csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10;
- csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
+ csd->taac_clks = unstuff_bits(resp, 104, 8) * 100;
- m = UNSTUFF_BITS(resp, 99, 4);
- e = UNSTUFF_BITS(resp, 96, 3);
+ m = unstuff_bits(resp, 99, 4);
+ e = unstuff_bits(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
- csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
+ csd->cmdclass = unstuff_bits(resp, 84, 12);
- e = UNSTUFF_BITS(resp, 47, 3);
- m = UNSTUFF_BITS(resp, 62, 12);
+ e = unstuff_bits(resp, 47, 3);
+ m = unstuff_bits(resp, 62, 12);
csd->capacity = (1 + m) << (e + 2);
- csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
- csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
- csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
- csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
- csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
- csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
- csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
- csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
+ csd->read_blkbits = unstuff_bits(resp, 80, 4);
+ csd->read_partial = unstuff_bits(resp, 79, 1);
+ csd->write_misalign = unstuff_bits(resp, 78, 1);
+ csd->read_misalign = unstuff_bits(resp, 77, 1);
+ csd->dsr_imp = unstuff_bits(resp, 76, 1);
+ csd->r2w_factor = unstuff_bits(resp, 26, 3);
+ csd->write_blkbits = unstuff_bits(resp, 22, 4);
+ csd->write_partial = unstuff_bits(resp, 21, 1);
- if (UNSTUFF_BITS(resp, 46, 1)) {
+ if (unstuff_bits(resp, 46, 1)) {
csd->erase_size = 1;
} else if (csd->write_blkbits >= 9) {
- csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
+ csd->erase_size = unstuff_bits(resp, 39, 7) + 1;
csd->erase_size <<= csd->write_blkbits - 9;
}
- if (UNSTUFF_BITS(resp, 13, 1))
+ if (unstuff_bits(resp, 13, 1))
mmc_card_set_readonly(card);
break;
case 1:
@@ -169,17 +155,17 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->taac_ns = 0; /* Unused */
csd->taac_clks = 0; /* Unused */
- m = UNSTUFF_BITS(resp, 99, 4);
- e = UNSTUFF_BITS(resp, 96, 3);
+ m = unstuff_bits(resp, 99, 4);
+ e = unstuff_bits(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
- csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
- csd->c_size = UNSTUFF_BITS(resp, 48, 22);
+ csd->cmdclass = unstuff_bits(resp, 84, 12);
+ csd->c_size = unstuff_bits(resp, 48, 22);
/* SDXC cards have a minimum C_SIZE of 0x00FFFF */
if (csd->c_size >= 0xFFFF)
mmc_card_set_ext_capacity(card);
- m = UNSTUFF_BITS(resp, 48, 22);
+ m = unstuff_bits(resp, 48, 22);
csd->capacity = (1 + m) << 10;
csd->read_blkbits = 9;
@@ -191,7 +177,7 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->write_partial = 0;
csd->erase_size = 1;
- if (UNSTUFF_BITS(resp, 13, 1))
+ if (unstuff_bits(resp, 13, 1))
mmc_card_set_readonly(card);
break;
default:
@@ -217,33 +203,33 @@ static int mmc_decode_scr(struct mmc_card *card)
resp[3] = card->raw_scr[1];
resp[2] = card->raw_scr[0];
- scr_struct = UNSTUFF_BITS(resp, 60, 4);
+ scr_struct = unstuff_bits(resp, 60, 4);
if (scr_struct != 0) {
pr_err("%s: unrecognised SCR structure version %d\n",
mmc_hostname(card->host), scr_struct);
return -EINVAL;
}
- scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);
- scr->bus_widths = UNSTUFF_BITS(resp, 48, 4);
+ scr->sda_vsn = unstuff_bits(resp, 56, 4);
+ scr->bus_widths = unstuff_bits(resp, 48, 4);
if (scr->sda_vsn == SCR_SPEC_VER_2)
/* Check if Physical Layer Spec v3.0 is supported */
- scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1);
+ scr->sda_spec3 = unstuff_bits(resp, 47, 1);
if (scr->sda_spec3) {
- scr->sda_spec4 = UNSTUFF_BITS(resp, 42, 1);
- scr->sda_specx = UNSTUFF_BITS(resp, 38, 4);
+ scr->sda_spec4 = unstuff_bits(resp, 42, 1);
+ scr->sda_specx = unstuff_bits(resp, 38, 4);
}
- if (UNSTUFF_BITS(resp, 55, 1))
+ if (unstuff_bits(resp, 55, 1))
card->erased_byte = 0xFF;
else
card->erased_byte = 0x0;
if (scr->sda_spec4)
- scr->cmds = UNSTUFF_BITS(resp, 32, 4);
+ scr->cmds = unstuff_bits(resp, 32, 4);
else if (scr->sda_spec3)
- scr->cmds = UNSTUFF_BITS(resp, 32, 2);
+ scr->cmds = unstuff_bits(resp, 32, 2);
/* SD Spec says: any SD Card shall set at least bits 0 and 2 */
if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) ||
@@ -289,17 +275,17 @@ static int mmc_read_ssr(struct mmc_card *card)
kfree(raw_ssr);
/*
- * UNSTUFF_BITS only works with four u32s so we have to offset the
+ * unstuff_bits only works with four u32s so we have to offset the
* bitfield positions accordingly.
*/
- au = UNSTUFF_BITS(card->raw_ssr, 428 - 384, 4);
+ au = unstuff_bits(card->raw_ssr, 428 - 384, 4);
if (au) {
if (au <= 9 || card->scr.sda_spec3) {
card->ssr.au = sd_au_size[au];
- es = UNSTUFF_BITS(card->raw_ssr, 408 - 384, 16);
- et = UNSTUFF_BITS(card->raw_ssr, 402 - 384, 6);
+ es = unstuff_bits(card->raw_ssr, 408 - 384, 16);
+ et = unstuff_bits(card->raw_ssr, 402 - 384, 6);
if (es && et) {
- eo = UNSTUFF_BITS(card->raw_ssr, 400 - 384, 2);
+ eo = unstuff_bits(card->raw_ssr, 400 - 384, 2);
card->ssr.erase_timeout = (et * 1000) / es;
card->ssr.erase_offset = eo * 1000;
}
@@ -313,7 +299,7 @@ static int mmc_read_ssr(struct mmc_card *card)
* starting SD5.1 discard is supported if DISCARD_SUPPORT (b313) is set
*/
resp[3] = card->raw_ssr[6];
- discard_support = UNSTUFF_BITS(resp, 313 - 288, 1);
+ discard_support = unstuff_bits(resp, 313 - 288, 1);
card->erase_arg = (card->scr.sda_specx && discard_support) ?
SD_DISCARD_ARG : SD_ERASE_ARG;
@@ -346,7 +332,7 @@ static int mmc_read_switch(struct mmc_card *card)
* The argument does not matter, as the support bits do not
* change with the arguments.
*/
- err = mmc_sd_switch(card, 0, 0, 0, status);
+ err = mmc_sd_switch(card, SD_SWITCH_CHECK, 0, 0, status);
if (err) {
/*
* If the host or the card can't do the switch,
@@ -402,7 +388,8 @@ int mmc_sd_switch_hs(struct mmc_card *card)
if (!status)
return -ENOMEM;
- err = mmc_sd_switch(card, 1, 0, HIGH_SPEED_BUS_SPEED, status);
+ err = mmc_sd_switch(card, SD_SWITCH_SET, 0,
+ HIGH_SPEED_BUS_SPEED, status);
if (err)
goto out;
@@ -434,7 +421,8 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
card_drv_type, &drv_type);
if (drive_strength) {
- err = mmc_sd_switch(card, 1, 2, drive_strength, status);
+ err = mmc_sd_switch(card, SD_SWITCH_SET, 2,
+ drive_strength, status);
if (err)
return err;
if ((status[15] & 0xF) != drive_strength) {
@@ -514,7 +502,7 @@ static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
return 0;
}
- err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
+ err = mmc_sd_switch(card, SD_SWITCH_SET, 0, card->sd_bus_speed, status);
if (err)
return err;
@@ -605,7 +593,8 @@ static int sd_set_current_limit(struct mmc_card *card, u8 *status)
current_limit = SD_SET_CURRENT_LIMIT_200;
if (current_limit != SD_SET_CURRENT_NO_CHANGE) {
- err = mmc_sd_switch(card, 1, 3, current_limit, status);
+ err = mmc_sd_switch(card, SD_SWITCH_SET, 3,
+ current_limit, status);
if (err)
return err;
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 8b9b34286ef3..f93c392040ae 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -336,14 +336,13 @@ int mmc_app_send_scr(struct mmc_card *card)
return 0;
}
-int mmc_sd_switch(struct mmc_card *card, int mode, int group,
+int mmc_sd_switch(struct mmc_card *card, bool mode, int group,
u8 value, u8 *resp)
{
u32 cmd_args;
/* NOTE: caller guarantees resp is heap-allocated */
- mode = !!mode;
value &= 0xF;
cmd_args = mode << 31 | 0x00FFFFFF;
cmd_args &= ~(0xF << (group * 4));
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index eb3ecfe05591..7199cb0bd0b9 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -252,6 +252,18 @@ config MMC_SDHCI_OF_SPARX5
If unsure, say N.
+config MMC_SDHCI_OF_MA35D1
+ tristate "SDHCI OF support for the MA35D1 SDHCI controller"
+ depends on ARCH_MA35 || COMPILE_TEST
+ depends on MMC_SDHCI_PLTFM
+ help
+ This selects the MA35D1 Secure Digital Host Controller Interface.
+ The controller supports SD/MMC/SDIO devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_CADENCE
tristate "SDHCI support for the Cadence SD/SDIO/eMMC controller"
depends on MMC_SDHCI_PLTFM
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index f53f86d200ac..3ccffebbe59b 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_OF_DWCMSHC) += sdhci-of-dwcmshc.o
obj-$(CONFIG_MMC_SDHCI_OF_SPARX5) += sdhci-of-sparx5.o
+obj-$(CONFIG_MMC_SDHCI_OF_MA35D1) += sdhci-of-ma35d1.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
obj-$(CONFIG_MMC_SDHCI_NPCM) += sdhci-npcm.o
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index a02da26a1efd..178277d90c31 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -33,6 +33,11 @@ struct cqhci_slot {
#define CQHCI_HOST_OTHER BIT(4)
};
+static bool cqhci_halted(struct cqhci_host *cq_host)
+{
+ return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
+}
+
static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
{
return cq_host->desc_base + (tag * cq_host->slot_sz);
@@ -282,7 +287,7 @@ static void __cqhci_enable(struct cqhci_host *cq_host)
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
- if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
+ if (cqhci_halted(cq_host))
cqhci_writel(cq_host, 0, CQHCI_CTL);
mmc->cqe_on = true;
@@ -617,7 +622,7 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
cqhci_writel(cq_host, 0, CQHCI_CTL);
mmc->cqe_on = true;
pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
- if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
+ if (cqhci_halted(cq_host)) {
pr_err("%s: cqhci: CQE failed to exit halt state\n",
mmc_hostname(mmc));
}
@@ -953,11 +958,6 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
return ret;
}
-static bool cqhci_halted(struct cqhci_host *cq_host)
-{
- return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
-}
-
static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
{
struct cqhci_host *cq_host = mmc->cqe_private;
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index b07190ba4b7a..f96260fd143b 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -15,7 +15,17 @@
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
-#define RK3288_CLKGEN_DIV 2
+#define RK3288_CLKGEN_DIV 2
+#define SDMMC_TIMING_CON0 0x130
+#define SDMMC_TIMING_CON1 0x134
+#define ROCKCHIP_MMC_DELAY_SEL BIT(10)
+#define ROCKCHIP_MMC_DEGREE_MASK 0x3
+#define ROCKCHIP_MMC_DEGREE_OFFSET 1
+#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
+#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
+#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
+#define HIWORD_UPDATE(val, mask, shift) \
+ ((val) << (shift) | (mask) << ((shift) + 16))
static const unsigned int freqs[] = { 100000, 200000, 300000, 400000 };
@@ -24,8 +34,143 @@ struct dw_mci_rockchip_priv_data {
struct clk *sample_clk;
int default_sample_phase;
int num_phases;
+ bool internal_phase;
};
+/*
+ * Each fine delay is between 44ps-77ps. Assume each fine delay is 60ps to
+ * simplify calculations. So 45degs could be anywhere between 33deg and 57.8deg.
+ */
+static int rockchip_mmc_get_internal_phase(struct dw_mci *host, bool sample)
+{
+ unsigned long rate = clk_get_rate(host->ciu_clk);
+ u32 raw_value;
+ u16 degrees;
+ u32 delay_num = 0;
+
+ /* Constant signal, no measurable phase shift */
+ if (!rate)
+ return 0;
+
+ if (sample)
+ raw_value = mci_readl(host, TIMING_CON1);
+ else
+ raw_value = mci_readl(host, TIMING_CON0);
+
+ raw_value >>= ROCKCHIP_MMC_DEGREE_OFFSET;
+ degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
+
+ if (raw_value & ROCKCHIP_MMC_DELAY_SEL) {
+ /* degrees/delaynum * 1000000 */
+ unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) *
+ 36 * (rate / 10000);
+
+ delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK);
+ delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET;
+ degrees += DIV_ROUND_CLOSEST(delay_num * factor, 1000000);
+ }
+
+ return degrees % 360;
+}
+
+static int rockchip_mmc_get_phase(struct dw_mci *host, bool sample)
+{
+ struct dw_mci_rockchip_priv_data *priv = host->priv;
+ struct clk *clock = sample ? priv->sample_clk : priv->drv_clk;
+
+ if (priv->internal_phase)
+ return rockchip_mmc_get_internal_phase(host, sample);
+ else
+ return clk_get_phase(clock);
+}
+
+static int rockchip_mmc_set_internal_phase(struct dw_mci *host, bool sample, int degrees)
+{
+ unsigned long rate = clk_get_rate(host->ciu_clk);
+ u8 nineties, remainder;
+ u8 delay_num;
+ u32 raw_value;
+ u32 delay;
+
+ /*
+ * The below calculation is based on the output clock from
+ * MMC host to the card, which expects the phase clock inherits
+ * the clock rate from its parent, namely the output clock
+ * provider of MMC host. However, things may go wrong if
+ * (1) It is orphan.
+ * (2) It is assigned to the wrong parent.
+ *
+ * This check help debug the case (1), which seems to be the
+ * most likely problem we often face and which makes it difficult
+ * for people to debug unstable mmc tuning results.
+ */
+ if (!rate) {
+ dev_err(host->dev, "%s: invalid clk rate\n", __func__);
+ return -EINVAL;
+ }
+
+ nineties = degrees / 90;
+ remainder = (degrees % 90);
+
+ /*
+ * Due to the inexact nature of the "fine" delay, we might
+ * actually go non-monotonic. We don't go _too_ monotonic
+ * though, so we should be OK. Here are options of how we may
+ * work:
+ *
+ * Ideally we end up with:
+ * 1.0, 2.0, ..., 69.0, 70.0, ..., 89.0, 90.0
+ *
+ * On one extreme (if delay is actually 44ps):
+ * .73, 1.5, ..., 50.6, 51.3, ..., 65.3, 90.0
+ * The other (if delay is actually 77ps):
+ * 1.3, 2.6, ..., 88.6. 89.8, ..., 114.0, 90
+ *
+ * It's possible we might make a delay that is up to 25
+ * degrees off from what we think we're making. That's OK
+ * though because we should be REALLY far from any bad range.
+ */
+
+ /*
+ * Convert to delay; do a little extra work to make sure we
+ * don't overflow 32-bit / 64-bit numbers.
+ */
+ delay = 10000000; /* PSECS_PER_SEC / 10000 / 10 */
+ delay *= remainder;
+ delay = DIV_ROUND_CLOSEST(delay,
+ (rate / 1000) * 36 *
+ (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10));
+
+ delay_num = (u8) min_t(u32, delay, 255);
+
+ raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
+ raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
+ raw_value |= nineties;
+
+ if (sample)
+ mci_writel(host, TIMING_CON1, HIWORD_UPDATE(raw_value, 0x07ff, 1));
+ else
+ mci_writel(host, TIMING_CON0, HIWORD_UPDATE(raw_value, 0x07ff, 1));
+
+ dev_dbg(host->dev, "set %s_phase(%d) delay_nums=%u actual_degrees=%d\n",
+ sample ? "sample" : "drv", degrees, delay_num,
+ rockchip_mmc_get_phase(host, sample)
+ );
+
+ return 0;
+}
+
+static int rockchip_mmc_set_phase(struct dw_mci *host, bool sample, int degrees)
+{
+ struct dw_mci_rockchip_priv_data *priv = host->priv;
+ struct clk *clock = sample ? priv->sample_clk : priv->drv_clk;
+
+ if (priv->internal_phase)
+ return rockchip_mmc_set_internal_phase(host, sample, degrees);
+ else
+ return clk_set_phase(clock, degrees);
+}
+
static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
struct dw_mci_rockchip_priv_data *priv = host->priv;
@@ -64,7 +209,7 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
/* Make sure we use phases which we can enumerate with */
if (!IS_ERR(priv->sample_clk) && ios->timing <= MMC_TIMING_SD_HS)
- clk_set_phase(priv->sample_clk, priv->default_sample_phase);
+ rockchip_mmc_set_phase(host, true, priv->default_sample_phase);
/*
* Set the drive phase offset based on speed mode to achieve hold times.
@@ -127,7 +272,7 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
break;
}
- clk_set_phase(priv->drv_clk, phase);
+ rockchip_mmc_set_phase(host, false, phase);
}
}
@@ -151,6 +296,7 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
int longest_range_len = -1;
int longest_range = -1;
int middle_phase;
+ int phase;
if (IS_ERR(priv->sample_clk)) {
dev_err(host->dev, "Tuning clock (sample_clk) not defined.\n");
@@ -164,8 +310,10 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
/* Try each phase and extract good ranges */
for (i = 0; i < priv->num_phases; ) {
- clk_set_phase(priv->sample_clk,
- TUNING_ITERATION_TO_PHASE(i, priv->num_phases));
+ rockchip_mmc_set_phase(host, true,
+ TUNING_ITERATION_TO_PHASE(
+ i,
+ priv->num_phases));
v = !mmc_send_tuning(mmc, opcode, NULL);
@@ -211,7 +359,8 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
}
if (ranges[0].start == 0 && ranges[0].end == priv->num_phases - 1) {
- clk_set_phase(priv->sample_clk, priv->default_sample_phase);
+ rockchip_mmc_set_phase(host, true, priv->default_sample_phase);
+
dev_info(host->dev, "All phases work, using default phase %d.",
priv->default_sample_phase);
goto free;
@@ -248,19 +397,17 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
middle_phase = ranges[longest_range].start + longest_range_len / 2;
middle_phase %= priv->num_phases;
- dev_info(host->dev, "Successfully tuned phase to %d\n",
- TUNING_ITERATION_TO_PHASE(middle_phase, priv->num_phases));
+ phase = TUNING_ITERATION_TO_PHASE(middle_phase, priv->num_phases);
+ dev_info(host->dev, "Successfully tuned phase to %d\n", phase);
- clk_set_phase(priv->sample_clk,
- TUNING_ITERATION_TO_PHASE(middle_phase,
- priv->num_phases));
+ rockchip_mmc_set_phase(host, true, phase);
free:
kfree(ranges);
return ret;
}
-static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
+static int dw_mci_common_parse_dt(struct dw_mci *host)
{
struct device_node *np = host->dev->of_node;
struct dw_mci_rockchip_priv_data *priv;
@@ -270,13 +417,29 @@ static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
return -ENOMEM;
if (of_property_read_u32(np, "rockchip,desired-num-phases",
- &priv->num_phases))
+ &priv->num_phases))
priv->num_phases = 360;
if (of_property_read_u32(np, "rockchip,default-sample-phase",
- &priv->default_sample_phase))
+ &priv->default_sample_phase))
priv->default_sample_phase = 0;
+ host->priv = priv;
+
+ return 0;
+}
+
+static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
+{
+ struct dw_mci_rockchip_priv_data *priv;
+ int err;
+
+ err = dw_mci_common_parse_dt(host);
+ if (err)
+ return err;
+
+ priv = host->priv;
+
priv->drv_clk = devm_clk_get(host->dev, "ciu-drive");
if (IS_ERR(priv->drv_clk))
dev_dbg(host->dev, "ciu-drive not available\n");
@@ -285,7 +448,21 @@ static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
if (IS_ERR(priv->sample_clk))
dev_dbg(host->dev, "ciu-sample not available\n");
- host->priv = priv;
+ priv->internal_phase = false;
+
+ return 0;
+}
+
+static int dw_mci_rk3576_parse_dt(struct dw_mci *host)
+{
+ struct dw_mci_rockchip_priv_data *priv;
+ int err = dw_mci_common_parse_dt(host);
+ if (err)
+ return err;
+
+ priv = host->priv;
+
+ priv->internal_phase = true;
return 0;
}
@@ -331,11 +508,21 @@ static const struct dw_mci_drv_data rk3288_drv_data = {
.init = dw_mci_rockchip_init,
};
+static const struct dw_mci_drv_data rk3576_drv_data = {
+ .common_caps = MMC_CAP_CMD23,
+ .set_ios = dw_mci_rk3288_set_ios,
+ .execute_tuning = dw_mci_rk3288_execute_tuning,
+ .parse_dt = dw_mci_rk3576_parse_dt,
+ .init = dw_mci_rockchip_init,
+};
+
static const struct of_device_id dw_mci_rockchip_match[] = {
{ .compatible = "rockchip,rk2928-dw-mshc",
.data = &rk2928_drv_data },
{ .compatible = "rockchip,rk3288-dw-mshc",
.data = &rk3288_drv_data },
+ { .compatible = "rockchip,rk3576-dw-mshc",
+ .data = &rk3576_drv_data },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_rockchip_match);
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index f5da7f9baa52..9dc51859c2e5 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -213,7 +213,8 @@ static int sdmmc_idma_setup(struct mmci_host *host)
host->mmc->max_seg_size = host->mmc->max_req_size;
}
- return dma_set_max_seg_size(dev, host->mmc->max_seg_size);
+ dma_set_max_seg_size(dev, host->mmc->max_seg_size);
+ return 0;
}
static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index e386f78e3267..89018b6c97b9 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -795,14 +795,13 @@ static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data)
static u64 msdc_timeout_cal(struct msdc_host *host, u64 ns, u64 clks)
{
struct mmc_host *mmc = mmc_from_priv(host);
- u64 timeout, clk_ns;
- u32 mode = 0;
+ u64 timeout;
+ u32 clk_ns, mode = 0;
if (mmc->actual_clock == 0) {
timeout = 0;
} else {
- clk_ns = 1000000000ULL;
- do_div(clk_ns, mmc->actual_clock);
+ clk_ns = 1000000000U / mmc->actual_clock;
timeout = ns + clk_ns - 1;
do_div(timeout, clk_ns);
timeout += clks;
@@ -831,7 +830,7 @@ static void msdc_set_timeout(struct msdc_host *host, u64 ns, u64 clks)
timeout = msdc_timeout_cal(host, ns, clks);
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC,
- (u32)(timeout > 255 ? 255 : timeout));
+ min_t(u32, timeout, 255));
}
static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks)
@@ -840,7 +839,7 @@ static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks)
timeout = msdc_timeout_cal(host, ns, clks);
sdr_set_field(host->base + SDC_CFG, SDC_CFG_WRDTOC,
- (u32)(timeout > 8191 ? 8191 : timeout));
+ min_t(u32, timeout, 8191));
}
static void msdc_gate_clock(struct msdc_host *host)
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index caf1d2e23343..1dcaa050f264 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -285,6 +285,7 @@ static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
{ .compatible = "renesas,sdhi-r8a77995", .data = &of_rcar_gen3_nohs400_compatible, },
{ .compatible = "renesas,sdhi-r9a09g011", .data = &of_rzg2l_compatible, },
+ { .compatible = "renesas,sdhi-r9a09g057", .data = &of_rzg2l_compatible, },
{ .compatible = "renesas,rzg2l-sdhi", .data = &of_rzg2l_compatible, },
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,rcar-gen4-sdhi", .data = &of_rcar_gen3_compatible, },
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index e79aa4b3b6c3..8999b97263af 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -8,6 +8,7 @@
*/
#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
@@ -108,18 +109,20 @@
#define DLL_LOCK_WO_TMOUT(x) \
((((x) & DWCMSHC_EMMC_DLL_LOCKED) == DWCMSHC_EMMC_DLL_LOCKED) && \
(((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0))
-#define RK35xx_MAX_CLKS 3
/* PHY register area pointer */
#define DWC_MSHC_PTR_PHY_R 0x300
/* PHY general configuration */
-#define PHY_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x00)
-#define PHY_CNFG_RSTN_DEASSERT 0x1 /* Deassert PHY reset */
-#define PHY_CNFG_PAD_SP_MASK GENMASK(19, 16) /* bits [19:16] */
-#define PHY_CNFG_PAD_SP 0x0c /* PMOS TX drive strength */
-#define PHY_CNFG_PAD_SN_MASK GENMASK(23, 20) /* bits [23:20] */
-#define PHY_CNFG_PAD_SN 0x0c /* NMOS TX drive strength */
+#define PHY_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x00)
+#define PHY_CNFG_RSTN_DEASSERT 0x1 /* Deassert PHY reset */
+#define PHY_CNFG_PHY_PWRGOOD_MASK BIT_MASK(1) /* bit [1] */
+#define PHY_CNFG_PAD_SP_MASK GENMASK(19, 16) /* bits [19:16] */
+#define PHY_CNFG_PAD_SP 0x0c /* PMOS TX drive strength */
+#define PHY_CNFG_PAD_SP_SG2042 0x09 /* PMOS TX drive strength for SG2042 */
+#define PHY_CNFG_PAD_SN_MASK GENMASK(23, 20) /* bits [23:20] */
+#define PHY_CNFG_PAD_SN 0x0c /* NMOS TX drive strength */
+#define PHY_CNFG_PAD_SN_SG2042 0x08 /* NMOS TX drive strength for SG2042 */
/* PHY command/response pad settings */
#define PHY_CMDPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x04)
@@ -148,10 +151,12 @@
#define PHY_PAD_TXSLEW_CTRL_P 0x3 /* Slew control for P-Type pad TX */
#define PHY_PAD_TXSLEW_CTRL_N_MASK GENMASK(12, 9) /* bits [12:9] */
#define PHY_PAD_TXSLEW_CTRL_N 0x3 /* Slew control for N-Type pad TX */
+#define PHY_PAD_TXSLEW_CTRL_N_SG2042 0x2 /* Slew control for N-Type pad TX for SG2042 */
/* PHY CLK delay line settings */
#define PHY_SDCLKDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1d)
-#define PHY_SDCLKDL_CNFG_UPDATE BIT(4) /* set before writing to SDCLKDL_DC */
+#define PHY_SDCLKDL_CNFG_EXTDLY_EN BIT(0)
+#define PHY_SDCLKDL_CNFG_UPDATE BIT(4) /* set before writing to SDCLKDL_DC */
/* PHY CLK delay line delay code */
#define PHY_SDCLKDL_DC_R (DWC_MSHC_PTR_PHY_R + 0x1e)
@@ -159,10 +164,14 @@
#define PHY_SDCLKDL_DC_DEFAULT 0x32 /* default delay code */
#define PHY_SDCLKDL_DC_HS400 0x18 /* delay code for HS400 mode */
+#define PHY_SMPLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x20)
+#define PHY_SMPLDL_CNFG_BYPASS_EN BIT(1)
+
/* PHY drift_cclk_rx delay line configuration setting */
#define PHY_ATDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x21)
#define PHY_ATDL_CNFG_INPSEL_MASK GENMASK(3, 2) /* bits [3:2] */
#define PHY_ATDL_CNFG_INPSEL 0x3 /* delay line input source */
+#define PHY_ATDL_CNFG_INPSEL_SG2042 0x2 /* delay line input source for SG2042 */
/* PHY DLL control settings */
#define PHY_DLL_CTRL_R (DWC_MSHC_PTR_PHY_R + 0x24)
@@ -193,29 +202,69 @@
SDHCI_TRNS_BLK_CNT_EN | \
SDHCI_TRNS_DMA)
+/* SMC call for BlueField-3 eMMC RST_N */
+#define BLUEFIELD_SMC_SET_EMMC_RST_N 0x82000007
+
enum dwcmshc_rk_type {
DWCMSHC_RK3568,
DWCMSHC_RK3588,
};
struct rk35xx_priv {
- /* Rockchip specified optional clocks */
- struct clk_bulk_data rockchip_clks[RK35xx_MAX_CLKS];
struct reset_control *reset;
enum dwcmshc_rk_type devtype;
u8 txclk_tapnum;
};
+#define DWCMSHC_MAX_OTHER_CLKS 3
+
struct dwcmshc_priv {
struct clk *bus_clk;
int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA1 reg */
int vendor_specific_area2; /* P_VENDOR_SPECIFIC_AREA2 reg */
+ int num_other_clks;
+ struct clk_bulk_data other_clks[DWCMSHC_MAX_OTHER_CLKS];
+
void *priv; /* pointer to SoC private stuff */
u16 delay_line;
u16 flags;
};
+struct dwcmshc_pltfm_data {
+ const struct sdhci_pltfm_data pdata;
+ int (*init)(struct device *dev, struct sdhci_host *host, struct dwcmshc_priv *dwc_priv);
+ void (*postinit)(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv);
+};
+
+static int dwcmshc_get_enable_other_clks(struct device *dev,
+ struct dwcmshc_priv *priv,
+ int num_clks,
+ const char * const clk_ids[])
+{
+ int err;
+
+ if (num_clks > DWCMSHC_MAX_OTHER_CLKS)
+ return -EINVAL;
+
+ for (int i = 0; i < num_clks; i++)
+ priv->other_clks[i].id = clk_ids[i];
+
+ err = devm_clk_bulk_get_optional(dev, num_clks, priv->other_clks);
+ if (err) {
+ dev_err(dev, "failed to get clocks %d\n", err);
+ return err;
+ }
+
+ err = clk_bulk_prepare_enable(num_clks, priv->other_clks);
+ if (err)
+ dev_err(dev, "failed to enable clocks %d\n", err);
+
+ priv->num_other_clks = num_clks;
+
+ return err;
+}
+
/*
* If DMA addr spans 128MB boundary, we split the DMA transfer into two
* so that each DMA transfer doesn't exceed the boundary.
@@ -681,6 +730,63 @@ static void rk35xx_sdhci_reset(struct sdhci_host *host, u8 mask)
sdhci_reset(host, mask);
}
+static int dwcmshc_rk35xx_init(struct device *dev, struct sdhci_host *host,
+ struct dwcmshc_priv *dwc_priv)
+{
+ static const char * const clk_ids[] = {"axi", "block", "timer"};
+ struct rk35xx_priv *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(struct rk35xx_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (of_device_is_compatible(dev->of_node, "rockchip,rk3588-dwcmshc"))
+ priv->devtype = DWCMSHC_RK3588;
+ else
+ priv->devtype = DWCMSHC_RK3568;
+
+ priv->reset = devm_reset_control_array_get_optional_exclusive(mmc_dev(host->mmc));
+ if (IS_ERR(priv->reset)) {
+ err = PTR_ERR(priv->reset);
+ dev_err(mmc_dev(host->mmc), "failed to get reset control %d\n", err);
+ return err;
+ }
+
+ err = dwcmshc_get_enable_other_clks(mmc_dev(host->mmc), dwc_priv,
+ ARRAY_SIZE(clk_ids), clk_ids);
+ if (err)
+ return err;
+
+ if (of_property_read_u8(mmc_dev(host->mmc)->of_node, "rockchip,txclk-tapnum",
+ &priv->txclk_tapnum))
+ priv->txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT;
+
+ /* Disable cmd conflict check */
+ sdhci_writel(host, 0x0, dwc_priv->vendor_specific_area1 + DWCMSHC_HOST_CTRL3);
+ /* Reset previous settings */
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_STRBIN);
+
+ dwc_priv->priv = priv;
+
+ return 0;
+}
+
+static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
+{
+ /*
+ * Don't support highspeed bus mode with low clk speed as we
+ * cannot use DLL for this condition.
+ */
+ if (host->mmc->f_max <= 52000000) {
+ dev_info(mmc_dev(host->mmc), "Disabling HS200/HS400, frequency too low (%d)\n",
+ host->mmc->f_max);
+ host->mmc->caps2 &= ~(MMC_CAP2_HS200 | MMC_CAP2_HS400);
+ host->mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR);
+ }
+}
+
static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -755,6 +861,35 @@ static void th1520_sdhci_reset(struct sdhci_host *host, u8 mask)
}
}
+static int th1520_init(struct device *dev,
+ struct sdhci_host *host,
+ struct dwcmshc_priv *dwc_priv)
+{
+ dwc_priv->delay_line = PHY_SDCLKDL_DC_DEFAULT;
+
+ if (device_property_read_bool(dev, "mmc-ddr-1_8v") ||
+ device_property_read_bool(dev, "mmc-hs200-1_8v") ||
+ device_property_read_bool(dev, "mmc-hs400-1_8v"))
+ dwc_priv->flags |= FLAG_IO_FIXED_1V8;
+ else
+ dwc_priv->flags &= ~FLAG_IO_FIXED_1V8;
+
+ /*
+ * start_signal_voltage_switch() will try 3.3V first
+ * then 1.8V. Use SDHCI_SIGNALING_180 rather than
+ * SDHCI_SIGNALING_330 to avoid setting voltage to 3.3V
+ * in sdhci_start_signal_voltage_switch().
+ */
+ if (dwc_priv->flags & FLAG_IO_FIXED_1V8) {
+ host->flags &= ~SDHCI_SIGNALING_330;
+ host->flags |= SDHCI_SIGNALING_180;
+ }
+
+ sdhci_enable_v4_mode(host);
+
+ return 0;
+}
+
static void cv18xx_sdhci_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -891,6 +1026,85 @@ static int cv18xx_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
return ret;
}
+static inline void sg2042_sdhci_phy_init(struct sdhci_host *host)
+{
+ u32 val;
+
+ /* Asset phy reset & set tx drive strength */
+ val = sdhci_readl(host, PHY_CNFG_R);
+ val &= ~PHY_CNFG_RSTN_DEASSERT;
+ val |= FIELD_PREP(PHY_CNFG_PHY_PWRGOOD_MASK, 1);
+ val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP_SG2042);
+ val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN_SG2042);
+ sdhci_writel(host, val, PHY_CNFG_R);
+
+ /* Configure phy pads */
+ val = PHY_PAD_RXSEL_3V3;
+ val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
+ sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
+ sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
+ sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
+
+ val = PHY_PAD_RXSEL_3V3;
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
+ sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
+
+ val = PHY_PAD_RXSEL_3V3;
+ val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
+ sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
+
+ /* Configure delay line */
+ /* Enable fixed delay */
+ sdhci_writeb(host, PHY_SDCLKDL_CNFG_EXTDLY_EN, PHY_SDCLKDL_CNFG_R);
+ /*
+ * Set delay line.
+ * Its recommended that bit UPDATE_DC[4] is 1 when SDCLKDL_DC is being written.
+ * Ensure UPDATE_DC[4] is '0' when not updating code.
+ */
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
+ val |= PHY_SDCLKDL_CNFG_UPDATE;
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
+ /* Add 10 * 70ps = 0.7ns for output delay */
+ sdhci_writeb(host, 10, PHY_SDCLKDL_DC_R);
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
+ val &= ~(PHY_SDCLKDL_CNFG_UPDATE);
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
+
+ /* Set SMPLDL_CNFG, Bypass */
+ sdhci_writeb(host, PHY_SMPLDL_CNFG_BYPASS_EN, PHY_SMPLDL_CNFG_R);
+
+ /* Set ATDL_CNFG, tuning clk not use for init */
+ val = FIELD_PREP(PHY_ATDL_CNFG_INPSEL_MASK, PHY_ATDL_CNFG_INPSEL_SG2042);
+ sdhci_writeb(host, val, PHY_ATDL_CNFG_R);
+
+ /* Deasset phy reset */
+ val = sdhci_readl(host, PHY_CNFG_R);
+ val |= PHY_CNFG_RSTN_DEASSERT;
+ sdhci_writel(host, val, PHY_CNFG_R);
+}
+
+static void sg2042_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ sdhci_reset(host, mask);
+
+ if (mask & SDHCI_RESET_ALL)
+ sg2042_sdhci_phy_init(host);
+}
+
+static int sg2042_init(struct device *dev, struct sdhci_host *host,
+ struct dwcmshc_priv *dwc_priv)
+{
+ static const char * const clk_ids[] = {"timer"};
+
+ return dwcmshc_get_enable_other_clks(mmc_dev(host->mmc), dwc_priv,
+ ARRAY_SIZE(clk_ids), clk_ids);
+}
+
static const struct sdhci_ops sdhci_dwcmshc_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -901,6 +1115,29 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = {
.irq = dwcmshc_cqe_irq_handler,
};
+#ifdef CONFIG_ACPI
+static void dwcmshc_bf3_hw_reset(struct sdhci_host *host)
+{
+ struct arm_smccc_res res = { 0 };
+
+ arm_smccc_smc(BLUEFIELD_SMC_SET_EMMC_RST_N, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0)
+ pr_err("%s: RST_N failed.\n", mmc_hostname(host->mmc));
+}
+
+static const struct sdhci_ops sdhci_dwcmshc_bf3_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = dwcmshc_set_uhs_signaling,
+ .get_max_clock = dwcmshc_get_max_clock,
+ .reset = sdhci_reset,
+ .adma_write_desc = dwcmshc_adma_write_desc,
+ .irq = dwcmshc_cqe_irq_handler,
+ .hw_reset = dwcmshc_bf3_hw_reset,
+};
+#endif
+
static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = {
.set_clock = dwcmshc_rk3568_set_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -932,39 +1169,71 @@ static const struct sdhci_ops sdhci_dwcmshc_cv18xx_ops = {
.platform_execute_tuning = cv18xx_sdhci_execute_tuning,
};
-static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
- .ops = &sdhci_dwcmshc_ops,
- .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+static const struct sdhci_ops sdhci_dwcmshc_sg2042_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = dwcmshc_set_uhs_signaling,
+ .get_max_clock = dwcmshc_get_max_clock,
+ .reset = sg2042_sdhci_reset,
+ .adma_write_desc = dwcmshc_adma_write_desc,
+ .platform_execute_tuning = th1520_execute_tuning,
+};
+
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ },
};
#ifdef CONFIG_ACPI
-static const struct sdhci_pltfm_data sdhci_dwcmshc_bf3_pdata = {
- .ops = &sdhci_dwcmshc_ops,
- .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
- SDHCI_QUIRK2_ACMD23_BROKEN,
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_bf3_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_bf3_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_ACMD23_BROKEN,
+ },
};
#endif
-static const struct sdhci_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
- .ops = &sdhci_dwcmshc_rk35xx_ops,
- .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
- SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
- SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_rk35xx_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
+ },
+ .init = dwcmshc_rk35xx_init,
+ .postinit = dwcmshc_rk35xx_postinit,
};
-static const struct sdhci_pltfm_data sdhci_dwcmshc_th1520_pdata = {
- .ops = &sdhci_dwcmshc_th1520_ops,
- .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_th1520_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_th1520_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ },
+ .init = th1520_init,
};
-static const struct sdhci_pltfm_data sdhci_dwcmshc_cv18xx_pdata = {
- .ops = &sdhci_dwcmshc_cv18xx_ops,
- .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_cv18xx_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_cv18xx_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ },
+};
+
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_sg2042_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_sg2042_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ },
+ .init = sg2042_init,
};
static const struct cqhci_host_ops dwcmshc_cqhci_ops = {
@@ -1034,61 +1303,6 @@ dsbl_cqe_caps:
host->mmc->caps2 &= ~(MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD);
}
-static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
-{
- int err;
- struct rk35xx_priv *priv = dwc_priv->priv;
-
- priv->reset = devm_reset_control_array_get_optional_exclusive(mmc_dev(host->mmc));
- if (IS_ERR(priv->reset)) {
- err = PTR_ERR(priv->reset);
- dev_err(mmc_dev(host->mmc), "failed to get reset control %d\n", err);
- return err;
- }
-
- priv->rockchip_clks[0].id = "axi";
- priv->rockchip_clks[1].id = "block";
- priv->rockchip_clks[2].id = "timer";
- err = devm_clk_bulk_get_optional(mmc_dev(host->mmc), RK35xx_MAX_CLKS,
- priv->rockchip_clks);
- if (err) {
- dev_err(mmc_dev(host->mmc), "failed to get clocks %d\n", err);
- return err;
- }
-
- err = clk_bulk_prepare_enable(RK35xx_MAX_CLKS, priv->rockchip_clks);
- if (err) {
- dev_err(mmc_dev(host->mmc), "failed to enable clocks %d\n", err);
- return err;
- }
-
- if (of_property_read_u8(mmc_dev(host->mmc)->of_node, "rockchip,txclk-tapnum",
- &priv->txclk_tapnum))
- priv->txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT;
-
- /* Disable cmd conflict check */
- sdhci_writel(host, 0x0, dwc_priv->vendor_specific_area1 + DWCMSHC_HOST_CTRL3);
- /* Reset previous settings */
- sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
- sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_STRBIN);
-
- return 0;
-}
-
-static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
-{
- /*
- * Don't support highspeed bus mode with low clk speed as we
- * cannot use DLL for this condition.
- */
- if (host->mmc->f_max <= 52000000) {
- dev_info(mmc_dev(host->mmc), "Disabling HS200/HS400, frequency too low (%d)\n",
- host->mmc->f_max);
- host->mmc->caps2 &= ~(MMC_CAP2_HS200 | MMC_CAP2_HS400);
- host->mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR);
- }
-}
-
static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
{
.compatible = "rockchip,rk3588-dwcmshc",
@@ -1114,6 +1328,10 @@ static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
.compatible = "thead,th1520-dwcmshc",
.data = &sdhci_dwcmshc_th1520_pdata,
},
+ {
+ .compatible = "sophgo,sg2042-dwcmshc",
+ .data = &sdhci_dwcmshc_sg2042_pdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
@@ -1135,8 +1353,7 @@ static int dwcmshc_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
struct dwcmshc_priv *priv;
- struct rk35xx_priv *rk_priv = NULL;
- const struct sdhci_pltfm_data *pltfm_data;
+ const struct dwcmshc_pltfm_data *pltfm_data;
int err;
u32 extra, caps;
@@ -1146,7 +1363,7 @@ static int dwcmshc_probe(struct platform_device *pdev)
return -ENODEV;
}
- host = sdhci_pltfm_init(pdev, pltfm_data,
+ host = sdhci_pltfm_init(pdev, &pltfm_data->pdata,
sizeof(struct dwcmshc_priv));
if (IS_ERR(host))
return PTR_ERR(host);
@@ -1191,49 +1408,12 @@ static int dwcmshc_probe(struct platform_device *pdev)
host->mmc_host_ops.hs400_enhanced_strobe = dwcmshc_hs400_enhanced_strobe;
host->mmc_host_ops.execute_tuning = dwcmshc_execute_tuning;
- if (pltfm_data == &sdhci_dwcmshc_rk35xx_pdata) {
- rk_priv = devm_kzalloc(&pdev->dev, sizeof(struct rk35xx_priv), GFP_KERNEL);
- if (!rk_priv) {
- err = -ENOMEM;
- goto err_clk;
- }
-
- if (of_device_is_compatible(pdev->dev.of_node, "rockchip,rk3588-dwcmshc"))
- rk_priv->devtype = DWCMSHC_RK3588;
- else
- rk_priv->devtype = DWCMSHC_RK3568;
-
- priv->priv = rk_priv;
-
- err = dwcmshc_rk35xx_init(host, priv);
+ if (pltfm_data->init) {
+ err = pltfm_data->init(&pdev->dev, host, priv);
if (err)
goto err_clk;
}
- if (pltfm_data == &sdhci_dwcmshc_th1520_pdata) {
- priv->delay_line = PHY_SDCLKDL_DC_DEFAULT;
-
- if (device_property_read_bool(dev, "mmc-ddr-1_8v") ||
- device_property_read_bool(dev, "mmc-hs200-1_8v") ||
- device_property_read_bool(dev, "mmc-hs400-1_8v"))
- priv->flags |= FLAG_IO_FIXED_1V8;
- else
- priv->flags &= ~FLAG_IO_FIXED_1V8;
-
- /*
- * start_signal_voltage_switch() will try 3.3V first
- * then 1.8V. Use SDHCI_SIGNALING_180 rather than
- * SDHCI_SIGNALING_330 to avoid setting voltage to 3.3V
- * in sdhci_start_signal_voltage_switch().
- */
- if (priv->flags & FLAG_IO_FIXED_1V8) {
- host->flags &= ~SDHCI_SIGNALING_330;
- host->flags |= SDHCI_SIGNALING_180;
- }
-
- sdhci_enable_v4_mode(host);
- }
-
#ifdef CONFIG_ACPI
if (pltfm_data == &sdhci_dwcmshc_bf3_pdata)
sdhci_enable_v4_mode(host);
@@ -1261,8 +1441,8 @@ static int dwcmshc_probe(struct platform_device *pdev)
dwcmshc_cqhci_init(host, pdev);
}
- if (rk_priv)
- dwcmshc_rk35xx_postinit(host, priv);
+ if (pltfm_data->postinit)
+ pltfm_data->postinit(host, priv);
err = __sdhci_add_host(host);
if (err)
@@ -1280,9 +1460,7 @@ err_rpm:
err_clk:
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
- if (rk_priv)
- clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
- rk_priv->rockchip_clks);
+ clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
free_pltfm:
sdhci_pltfm_free(pdev);
return err;
@@ -1304,7 +1482,6 @@ static void dwcmshc_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk35xx_priv *rk_priv = priv->priv;
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -1316,9 +1493,7 @@ static void dwcmshc_remove(struct platform_device *pdev)
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
- if (rk_priv)
- clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
- rk_priv->rockchip_clks);
+ clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
sdhci_pltfm_free(pdev);
}
@@ -1328,7 +1503,6 @@ static int dwcmshc_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk35xx_priv *rk_priv = priv->priv;
int ret;
pm_runtime_resume(dev);
@@ -1347,9 +1521,7 @@ static int dwcmshc_suspend(struct device *dev)
if (!IS_ERR(priv->bus_clk))
clk_disable_unprepare(priv->bus_clk);
- if (rk_priv)
- clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
- rk_priv->rockchip_clks);
+ clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
return ret;
}
@@ -1359,7 +1531,6 @@ static int dwcmshc_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk35xx_priv *rk_priv = priv->priv;
int ret;
ret = clk_prepare_enable(pltfm_host->clk);
@@ -1372,29 +1543,24 @@ static int dwcmshc_resume(struct device *dev)
goto disable_clk;
}
- if (rk_priv) {
- ret = clk_bulk_prepare_enable(RK35xx_MAX_CLKS,
- rk_priv->rockchip_clks);
- if (ret)
- goto disable_bus_clk;
- }
+ ret = clk_bulk_prepare_enable(priv->num_other_clks, priv->other_clks);
+ if (ret)
+ goto disable_bus_clk;
ret = sdhci_resume_host(host);
if (ret)
- goto disable_rockchip_clks;
+ goto disable_other_clks;
if (host->mmc->caps2 & MMC_CAP2_CQE) {
ret = cqhci_resume(host->mmc);
if (ret)
- goto disable_rockchip_clks;
+ goto disable_other_clks;
}
return 0;
-disable_rockchip_clks:
- if (rk_priv)
- clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
- rk_priv->rockchip_clks);
+disable_other_clks:
+ clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
disable_bus_clk:
if (!IS_ERR(priv->bus_clk))
clk_disable_unprepare(priv->bus_clk);
diff --git a/drivers/mmc/host/sdhci-of-ma35d1.c b/drivers/mmc/host/sdhci-of-ma35d1.c
new file mode 100644
index 000000000000..b84c2927bd4a
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-ma35d1.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2024 Nuvoton Technology Corp.
+ *
+ * Author: Shan-Chun Hung <shanchun1218@gmail.com>
+ */
+
+#include <linux/align.h>
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/build_bug.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/math.h>
+#include <linux/mfd/syscon.h>
+#include <linux/minmax.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include "sdhci-pltfm.h"
+#include "sdhci.h"
+
+#define MA35_SYS_MISCFCR0 0x070
+#define MA35_SDHCI_MSHCCTL 0x508
+#define MA35_SDHCI_MBIUCTL 0x510
+
+#define MA35_SDHCI_CMD_CONFLICT_CHK BIT(0)
+#define MA35_SDHCI_INCR_MSK GENMASK(3, 0)
+#define MA35_SDHCI_INCR16 BIT(3)
+#define MA35_SDHCI_INCR8 BIT(2)
+
+struct ma35_priv {
+ struct reset_control *rst;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_uhs;
+ struct pinctrl_state *pins_default;
+};
+
+struct ma35_restore_data {
+ u32 reg;
+ u32 width;
+};
+
+static const struct ma35_restore_data restore_data[] = {
+ { SDHCI_CLOCK_CONTROL, sizeof(u32)},
+ { SDHCI_BLOCK_SIZE, sizeof(u32)},
+ { SDHCI_INT_ENABLE, sizeof(u32)},
+ { SDHCI_SIGNAL_ENABLE, sizeof(u32)},
+ { SDHCI_AUTO_CMD_STATUS, sizeof(u32)},
+ { SDHCI_HOST_CONTROL, sizeof(u32)},
+ { SDHCI_TIMEOUT_CONTROL, sizeof(u8) },
+ { MA35_SDHCI_MSHCCTL, sizeof(u16)},
+ { MA35_SDHCI_MBIUCTL, sizeof(u16)},
+};
+
+/*
+ * If DMA addr spans 128MB boundary, we split the DMA transfer into two
+ * so that each DMA transfer doesn't exceed the boundary.
+ */
+static void ma35_adma_write_desc(struct sdhci_host *host, void **desc, dma_addr_t addr, int len,
+ unsigned int cmd)
+{
+ int tmplen, offset;
+
+ if (likely(!len || (ALIGN(addr, SZ_128M) == ALIGN(addr + len - 1, SZ_128M)))) {
+ sdhci_adma_write_desc(host, desc, addr, len, cmd);
+ return;
+ }
+
+ offset = addr & (SZ_128M - 1);
+ tmplen = SZ_128M - offset;
+ sdhci_adma_write_desc(host, desc, addr, tmplen, cmd);
+
+ addr += tmplen;
+ len -= tmplen;
+ sdhci_adma_write_desc(host, desc, addr, len, cmd);
+}
+
+static void ma35_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u32 ctl;
+
+ /*
+ * If the clock frequency exceeds MMC_HIGH_52_MAX_DTR,
+ * disable command conflict check.
+ */
+ ctl = sdhci_readw(host, MA35_SDHCI_MSHCCTL);
+ if (clock > MMC_HIGH_52_MAX_DTR)
+ ctl &= ~MA35_SDHCI_CMD_CONFLICT_CHK;
+ else
+ ctl |= MA35_SDHCI_CMD_CONFLICT_CHK;
+ sdhci_writew(host, ctl, MA35_SDHCI_MSHCCTL);
+
+ sdhci_set_clock(host, clock);
+}
+
+static int ma35_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct ma35_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_180:
+ if (!IS_ERR(priv->pinctrl) && !IS_ERR(priv->pins_uhs))
+ pinctrl_select_state(priv->pinctrl, priv->pins_uhs);
+ break;
+ case MMC_SIGNAL_VOLTAGE_330:
+ if (!IS_ERR(priv->pinctrl) && !IS_ERR(priv->pins_default))
+ pinctrl_select_state(priv->pinctrl, priv->pins_default);
+ break;
+ default:
+ dev_err(mmc_dev(host->mmc), "Unsupported signal voltage!\n");
+ return -EINVAL;
+ }
+
+ return sdhci_start_signal_voltage_switch(mmc, ios);
+}
+
+static void ma35_voltage_switch(struct sdhci_host *host)
+{
+ /* Wait for 5ms after set 1.8V signal enable bit */
+ fsleep(5000);
+}
+
+static int ma35_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct ma35_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ int idx;
+ u32 regs[ARRAY_SIZE(restore_data)] = {};
+
+ /*
+ * Limitations require a reset of SD/eMMC before tuning and
+ * saving the registers before resetting, then restoring
+ * after the reset.
+ */
+ for (idx = 0; idx < ARRAY_SIZE(restore_data); idx++) {
+ if (restore_data[idx].width == sizeof(u32))
+ regs[idx] = sdhci_readl(host, restore_data[idx].reg);
+ else if (restore_data[idx].width == sizeof(u16))
+ regs[idx] = sdhci_readw(host, restore_data[idx].reg);
+ else if (restore_data[idx].width == sizeof(u8))
+ regs[idx] = sdhci_readb(host, restore_data[idx].reg);
+ }
+
+ reset_control_assert(priv->rst);
+ reset_control_deassert(priv->rst);
+
+ for (idx = 0; idx < ARRAY_SIZE(restore_data); idx++) {
+ if (restore_data[idx].width == sizeof(u32))
+ sdhci_writel(host, regs[idx], restore_data[idx].reg);
+ else if (restore_data[idx].width == sizeof(u16))
+ sdhci_writew(host, regs[idx], restore_data[idx].reg);
+ else if (restore_data[idx].width == sizeof(u8))
+ sdhci_writeb(host, regs[idx], restore_data[idx].reg);
+ }
+
+ return sdhci_execute_tuning(mmc, opcode);
+}
+
+static const struct sdhci_ops sdhci_ma35_ops = {
+ .set_clock = ma35_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .reset = sdhci_reset,
+ .adma_write_desc = ma35_adma_write_desc,
+ .voltage_switch = ma35_voltage_switch,
+};
+
+static const struct sdhci_pltfm_data sdhci_ma35_pdata = {
+ .ops = &sdhci_ma35_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_BROKEN_DDR50 |
+ SDHCI_QUIRK2_ACMD23_BROKEN,
+};
+
+static int ma35_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_host *host;
+ struct ma35_priv *priv;
+ int err;
+ u32 extra, ctl;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_ma35_pdata, sizeof(struct ma35_priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ /* Extra adma table cnt for cross 128M boundary handling. */
+ extra = DIV_ROUND_UP_ULL(dma_get_required_mask(dev), SZ_128M);
+ extra = min(extra, SDHCI_MAX_SEGS);
+
+ host->adma_table_cnt += extra;
+ pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
+
+ pltfm_host->clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(pltfm_host->clk)) {
+ err = dev_err_probe(dev, PTR_ERR(pltfm_host->clk), "failed to get clk\n");
+ goto err_sdhci;
+ }
+
+ err = mmc_of_parse(host->mmc);
+ if (err)
+ goto err_sdhci;
+
+ priv->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ err = dev_err_probe(dev, PTR_ERR(priv->rst), "failed to get reset control\n");
+ goto err_sdhci;
+ }
+
+ sdhci_get_of_property(pdev);
+
+ priv->pinctrl = devm_pinctrl_get(dev);
+ if (!IS_ERR(priv->pinctrl)) {
+ priv->pins_default = pinctrl_lookup_state(priv->pinctrl, "default");
+ priv->pins_uhs = pinctrl_lookup_state(priv->pinctrl, "state_uhs");
+ pinctrl_select_state(priv->pinctrl, priv->pins_default);
+ }
+
+ if (!(host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)) {
+ struct regmap *regmap;
+ u32 reg;
+
+ regmap = syscon_regmap_lookup_by_phandle(dev_of_node(dev), "nuvoton,sys");
+ if (!IS_ERR(regmap)) {
+ /* Enable SDHCI voltage stable for 1.8V */
+ regmap_read(regmap, MA35_SYS_MISCFCR0, &reg);
+ reg |= BIT(17);
+ regmap_write(regmap, MA35_SYS_MISCFCR0, reg);
+ }
+
+ host->mmc_host_ops.start_signal_voltage_switch =
+ ma35_start_signal_voltage_switch;
+ }
+
+ host->mmc_host_ops.execute_tuning = ma35_execute_tuning;
+
+ err = sdhci_add_host(host);
+ if (err)
+ goto err_sdhci;
+
+ /*
+ * Split data into chunks of 16 or 8 bytes for transmission.
+ * Each chunk transfer is guaranteed to be uninterrupted on the bus.
+ * This likely corresponds to the AHB bus DMA burst size.
+ */
+ ctl = sdhci_readw(host, MA35_SDHCI_MBIUCTL);
+ ctl &= ~MA35_SDHCI_INCR_MSK;
+ ctl |= MA35_SDHCI_INCR16 | MA35_SDHCI_INCR8;
+ sdhci_writew(host, ctl, MA35_SDHCI_MBIUCTL);
+
+ return 0;
+
+err_sdhci:
+ sdhci_pltfm_free(pdev);
+ return err;
+}
+
+static void ma35_disable_card_clk(struct sdhci_host *host)
+{
+ u16 ctrl;
+
+ ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (ctrl & SDHCI_CLOCK_CARD_EN) {
+ ctrl &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
+ }
+}
+
+static void ma35_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+
+ sdhci_remove_host(host, 0);
+ ma35_disable_card_clk(host);
+ sdhci_pltfm_free(pdev);
+}
+
+static const struct of_device_id sdhci_ma35_dt_ids[] = {
+ { .compatible = "nuvoton,ma35d1-sdhci" },
+ {}
+};
+
+static struct platform_driver sdhci_ma35_driver = {
+ .driver = {
+ .name = "sdhci-ma35",
+ .of_match_table = sdhci_ma35_dt_ids,
+ },
+ .probe = ma35_probe,
+ .remove_new = ma35_remove,
+};
+module_platform_driver(sdhci_ma35_driver);
+
+MODULE_DESCRIPTION("SDHCI platform driver for Nuvoton MA35");
+MODULE_AUTHOR("Shan-Chun Hung <shanchun1218@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index b75cbea88b40..7b957f6d5588 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -126,7 +126,7 @@ static void pxav1_request_done(struct sdhci_host *host, struct mmc_request *mrq)
struct sdhci_pxav2_host *pxav2_host;
/* If this is an SDIO command, perform errata workaround for silicon bug */
- if (mrq->cmd && !mrq->cmd->error &&
+ if (!mrq->cmd->error &&
(mrq->cmd->opcode == SD_IO_RW_DIRECT ||
mrq->cmd->opcode == SD_IO_RW_EXTENDED)) {
/* Reset data port */
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 64e10f7c9faa..0aa3c40ea6ed 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -86,6 +86,7 @@
#define CLOCK_TOO_SLOW_HZ 50000000
#define SDHCI_AM654_AUTOSUSPEND_DELAY -1
+#define RETRY_TUNING_MAX 10
/* Command Queue Host Controller Interface Base address */
#define SDHCI_AM654_CQE_BASE_ADDR 0x200
@@ -151,6 +152,7 @@ struct sdhci_am654_data {
u32 flags;
u32 quirks;
bool dll_enable;
+ u32 tuning_loop;
#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
};
@@ -443,7 +445,7 @@ static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
#define ITAPDLY_LENGTH 32
#define ITAPDLY_LAST_INDEX (ITAPDLY_LENGTH - 1)
-static u32 sdhci_am654_calculate_itap(struct sdhci_host *host, struct window
+static int sdhci_am654_calculate_itap(struct sdhci_host *host, struct window
*fail_window, u8 num_fails, bool circular_buffer)
{
u8 itap = 0, start_fail = 0, end_fail = 0, pass_length = 0;
@@ -453,12 +455,16 @@ static u32 sdhci_am654_calculate_itap(struct sdhci_host *host, struct window
int prev_fail_end = -1;
u8 i;
- if (!num_fails)
- return ITAPDLY_LAST_INDEX >> 1;
+ if (!num_fails) {
+ /* Retry tuning */
+ dev_dbg(dev, "No failing region found, retry tuning\n");
+ return -1;
+ }
if (fail_window->length == ITAPDLY_LENGTH) {
- dev_err(dev, "No passing ITAPDLY, return 0\n");
- return 0;
+ /* Retry tuning */
+ dev_dbg(dev, "No passing itapdly, retry tuning\n");
+ return -1;
}
first_fail_start = fail_window->start;
@@ -494,13 +500,14 @@ static u32 sdhci_am654_calculate_itap(struct sdhci_host *host, struct window
return (itap > ITAPDLY_LAST_INDEX) ? ITAPDLY_LAST_INDEX >> 1 : itap;
}
-static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host,
- u32 opcode)
+static int sdhci_am654_do_tuning(struct sdhci_host *host,
+ u32 opcode)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
unsigned char timing = host->mmc->ios.timing;
struct window fail_window[ITAPDLY_LENGTH];
+ struct device *dev = mmc_dev(host->mmc);
u8 curr_pass, itap;
u8 fail_index = 0;
u8 prev_pass = 1;
@@ -521,6 +528,7 @@ static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host,
if (!curr_pass) {
fail_window[fail_index].end = itap;
fail_window[fail_index].length++;
+ dev_dbg(dev, "Failed itapdly=%d\n", itap);
}
if (curr_pass && !prev_pass)
@@ -532,13 +540,34 @@ static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host,
if (fail_window[fail_index].length != 0)
fail_index++;
- itap = sdhci_am654_calculate_itap(host, fail_window, fail_index,
- sdhci_am654->dll_enable);
+ return sdhci_am654_calculate_itap(host, fail_window, fail_index,
+ sdhci_am654->dll_enable);
+}
- sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]);
+static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host,
+ u32 opcode)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ unsigned char timing = host->mmc->ios.timing;
+ struct device *dev = mmc_dev(host->mmc);
+ int itapdly;
+
+ do {
+ itapdly = sdhci_am654_do_tuning(host, opcode);
+ if (itapdly >= 0)
+ break;
+ } while (++sdhci_am654->tuning_loop < RETRY_TUNING_MAX);
+
+ if (itapdly < 0) {
+ dev_err(dev, "Failed to find itapdly, fail tuning\n");
+ return -1;
+ }
+ dev_dbg(dev, "Passed tuning, final itapdly=%d\n", itapdly);
+ sdhci_am654_write_itapdly(sdhci_am654, itapdly, sdhci_am654->itap_del_ena[timing]);
/* Save ITAPDLY */
- sdhci_am654->itap_del_sel[timing] = itap;
+ sdhci_am654->itap_del_sel[timing] = itapdly;
return 0;
}
@@ -742,6 +771,9 @@ static int sdhci_am654_init(struct sdhci_host *host)
regmap_update_bits(sdhci_am654->base, CTL_CFG_3, TUNINGFORSDR50_MASK,
TUNINGFORSDR50_MASK);
+ /* Use to re-execute tuning */
+ sdhci_am654->tuning_loop = 0;
+
ret = sdhci_setup_host(host);
if (ret)
return ret;
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index b359876cc33d..45a474ccab1c 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -895,8 +895,8 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
* It seems, VccQ should be switched on after Vcc, this is also what the
* omap_hsmmc.c driver does.
*/
- if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
- ret = regulator_enable(mmc->supply.vqmmc);
+ if (!ret) {
+ ret = mmc_regulator_enable_vqmmc(mmc);
usleep_range(200, 300);
}
@@ -909,8 +909,7 @@ static void tmio_mmc_power_off(struct tmio_mmc_host *host)
{
struct mmc_host *mmc = host->mmc;
- if (!IS_ERR(mmc->supply.vqmmc))
- regulator_disable(mmc->supply.vqmmc);
+ mmc_regulator_disable_vqmmc(mmc);
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index 66044f4f5bad..10cd1d9b4885 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -207,6 +207,9 @@ static int powernv_flash_set_driver_info(struct device *dev,
* get them
*/
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!mtd->name)
+ return -ENOMEM;
+
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_WRITEABLE;
mtd->size = size;
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 28131a127d06..8297b366a066 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -296,10 +296,12 @@ static int __init init_slram(void)
T("slram: devname = %s\n", devname);
if ((!map) || (!(devstart = strsep(&map, ",")))) {
E("slram: No devicestart specified.\n");
+ break;
}
T("slram: devstart = %s\n", devstart);
if ((!map) || (!(devlength = strsep(&map, ",")))) {
E("slram: No devicelength / -end specified.\n");
+ break;
}
T("slram: devlength = %s\n", devlength);
if (parse_cmdline(devname, devstart, devlength) != 0) {
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 193428de6a4b..f56f44aa8625 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -204,7 +204,7 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
}
/* make a copy of vecs */
- vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
+ vecs_copy = kmemdup_array(vecs, count, sizeof(struct kvec), GFP_KERNEL);
if (!vecs_copy)
return -ENOMEM;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 2f11585b5613..7bf3777e1f13 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -298,14 +298,14 @@ static void find_next_position(struct mtdoops_context *cxt)
}
static void mtdoops_do_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
struct mtdoops_context *cxt = container_of(dumper,
struct mtdoops_context, dump);
struct kmsg_dump_iter iter;
/* Only dump oopses if dump_oops is set */
- if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ if (detail->reason == KMSG_DUMP_OOPS && !dump_oops)
return;
kmsg_dump_rewind(&iter);
@@ -317,7 +317,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
record_size - sizeof(struct mtdoops_hdr), NULL);
clear_bit(0, &cxt->oops_buf_busy);
- if (reason != KMSG_DUMP_OOPS) {
+ if (detail->reason != KMSG_DUMP_OOPS) {
/* Panics must be written immediately */
mtdoops_write(cxt, 1);
} else {
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 2ff1d2b13e3c..5436ec4a8fde 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -1360,7 +1360,7 @@ static void anfc_chips_cleanup(struct arasan_nfc *nfc)
static int anfc_chips_init(struct arasan_nfc *nfc)
{
- struct device_node *np = nfc->dev->of_node, *nand_np;
+ struct device_node *np = nfc->dev->of_node;
int nchips = of_get_child_count(np);
int ret;
@@ -1370,10 +1370,9 @@ static int anfc_chips_init(struct arasan_nfc *nfc)
return -EINVAL;
}
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = anfc_chip_init(nfc, nand_np);
if (ret) {
- of_node_put(nand_np);
anfc_chips_cleanup(nfc);
break;
}
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index dc75d50d52e8..f9ccfd02e804 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -2049,7 +2049,10 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
dma_cap_set(DMA_MEMCPY, mask);
nc->dmac = dma_request_channel(mask, NULL, NULL);
- if (!nc->dmac)
+ if (nc->dmac)
+ dev_info(nc->dev, "using %s for DMA transfers\n",
+ dma_chan_name(nc->dmac));
+ else
dev_err(nc->dev, "Failed to request DMA channel\n");
}
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index ff92c17def83..3bc89b356963 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -2836,7 +2836,6 @@ static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
{
struct device_node *np = cdns_ctrl->dev->of_node;
- struct device_node *nand_np;
int max_cs = cdns_ctrl->caps2.max_banks;
int nchips, ret;
@@ -2849,10 +2848,9 @@ static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
return -EINVAL;
}
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
if (ret) {
- of_node_put(nand_np);
cadence_nand_chips_cleanup(cdns_ctrl);
return ret;
}
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 051deea768db..392678143a36 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -20,8 +20,71 @@
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/platform_data/mtd-davinci.h>
-#include <linux/platform_data/mtd-davinci-aemif.h>
+#define NRCSR_OFFSET 0x00
+#define NANDFCR_OFFSET 0x60
+#define NANDFSR_OFFSET 0x64
+#define NANDF1ECC_OFFSET 0x70
+
+/* 4-bit ECC syndrome registers */
+#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc
+#define NAND_4BIT_ECC1_OFFSET 0xc0
+#define NAND_4BIT_ECC2_OFFSET 0xc4
+#define NAND_4BIT_ECC3_OFFSET 0xc8
+#define NAND_4BIT_ECC4_OFFSET 0xcc
+#define NAND_ERR_ADD1_OFFSET 0xd0
+#define NAND_ERR_ADD2_OFFSET 0xd4
+#define NAND_ERR_ERRVAL1_OFFSET 0xd8
+#define NAND_ERR_ERRVAL2_OFFSET 0xdc
+
+/* NOTE: boards don't need to use these address bits
+ * for ALE/CLE unless they support booting from NAND.
+ * They're used unless platform data overrides them.
+ */
+#define MASK_ALE 0x08
+#define MASK_CLE 0x10
+
+struct davinci_nand_pdata {
+ uint32_t mask_ale;
+ uint32_t mask_cle;
+
+ /*
+ * 0-indexed chip-select number of the asynchronous
+ * interface to which the NAND device has been connected.
+ *
+ * So, if you have NAND connected to CS3 of DA850, you
+ * will pass '1' here. Since the asynchronous interface
+ * on DA850 starts from CS2.
+ */
+ uint32_t core_chipsel;
+
+ /* for packages using two chipselects */
+ uint32_t mask_chipsel;
+
+ /* board's default static partition info */
+ struct mtd_partition *parts;
+ unsigned int nr_parts;
+
+ /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
+ * soft == NAND_ECC_ENGINE_TYPE_SOFT
+ * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
+ *
+ * All DaVinci-family chips support 1-bit hardware ECC.
+ * Newer ones also support 4-bit ECC, but are awkward
+ * using it with large page chips.
+ */
+ enum nand_ecc_engine_type engine_type;
+ enum nand_ecc_placement ecc_placement;
+ u8 ecc_bits;
+
+ /* e.g. NAND_BUSWIDTH_16 */
+ unsigned int options;
+ /* e.g. NAND_BBT_USE_FLASH */
+ unsigned int bbt_options;
+
+ /* Main and mirror bbt descriptor overrides */
+ struct nand_bbt_descr *bbt_td;
+ struct nand_bbt_descr *bbt_md;
+};
/*
* This is a device driver for the NAND flash controller found on the
@@ -54,8 +117,6 @@ struct davinci_nand_info {
uint32_t mask_cle;
uint32_t core_chipsel;
-
- struct davinci_aemif_timing *timing;
};
static DEFINE_SPINLOCK(davinci_nand_lock);
@@ -775,7 +836,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->chip.options = pdata->options;
info->chip.bbt_td = pdata->bbt_td;
info->chip.bbt_md = pdata->bbt_md;
- info->timing = pdata->timing;
info->current_cs = info->vaddr;
info->core_chipsel = pdata->core_chipsel;
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index edac8749bb93..2f5666511fda 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -145,15 +145,15 @@ static int denali_dt_probe(struct platform_device *pdev)
if (IS_ERR(denali->host))
return PTR_ERR(denali->host);
- dt->clk = devm_clk_get(dev, "nand");
+ dt->clk = devm_clk_get_enabled(dev, "nand");
if (IS_ERR(dt->clk))
return PTR_ERR(dt->clk);
- dt->clk_x = devm_clk_get(dev, "nand_x");
+ dt->clk_x = devm_clk_get_enabled(dev, "nand_x");
if (IS_ERR(dt->clk_x))
return PTR_ERR(dt->clk_x);
- dt->clk_ecc = devm_clk_get(dev, "ecc");
+ dt->clk_ecc = devm_clk_get_enabled(dev, "ecc");
if (IS_ERR(dt->clk_ecc))
return PTR_ERR(dt->clk_ecc);
@@ -165,18 +165,6 @@ static int denali_dt_probe(struct platform_device *pdev)
if (IS_ERR(dt->rst_reg))
return PTR_ERR(dt->rst_reg);
- ret = clk_prepare_enable(dt->clk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(dt->clk_x);
- if (ret)
- goto out_disable_clk;
-
- ret = clk_prepare_enable(dt->clk_ecc);
- if (ret)
- goto out_disable_clk_x;
-
denali->clk_rate = clk_get_rate(dt->clk);
denali->clk_x_rate = clk_get_rate(dt->clk_x);
@@ -187,7 +175,7 @@ static int denali_dt_probe(struct platform_device *pdev)
*/
ret = reset_control_deassert(dt->rst_reg);
if (ret)
- goto out_disable_clk_ecc;
+ return ret;
ret = reset_control_deassert(dt->rst);
if (ret)
@@ -222,12 +210,6 @@ out_assert_rst:
reset_control_assert(dt->rst);
out_assert_rst_reg:
reset_control_assert(dt->rst_reg);
-out_disable_clk_ecc:
- clk_disable_unprepare(dt->clk_ecc);
-out_disable_clk_x:
- clk_disable_unprepare(dt->clk_x);
-out_disable_clk:
- clk_disable_unprepare(dt->clk);
return ret;
}
@@ -239,9 +221,6 @@ static void denali_dt_remove(struct platform_device *pdev)
denali_remove(&dt->controller);
reset_control_assert(dt->rst);
reset_control_assert(dt->rst_reg);
- clk_disable_unprepare(dt->clk_ecc);
- clk_disable_unprepare(dt->clk_x);
- clk_disable_unprepare(dt->clk);
}
static struct platform_driver denali_dt_driver = {
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index de7e722d3826..e22094e39546 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -77,18 +77,20 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->reg = devm_ioremap(denali->dev, csr_base, csr_len);
if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto regions_release;
}
denali->host = devm_ioremap(denali->dev, mem_base, mem_len);
if (!denali->host) {
dev_err(&dev->dev, "Spectra: ioremap failed!");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto regions_release;
}
ret = denali_init(denali);
if (ret)
- return ret;
+ goto regions_release;
nsels = denali->nbanks;
@@ -116,6 +118,8 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_remove_denali:
denali_remove(denali);
+regions_release:
+ pci_release_regions(dev);
return ret;
}
@@ -123,6 +127,7 @@ static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_controller *denali = pci_get_drvdata(dev);
+ pci_release_regions(dev);
denali_remove(denali);
}
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 5b0f5a9cef81..26648b72e691 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2771,7 +2771,6 @@ static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
{
struct device_node *np = dev->of_node;
- struct device_node *nand_np;
int max_cs = nfc->caps->max_cs_nb;
int nchips;
int ret;
@@ -2798,20 +2797,15 @@ static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
return ret;
}
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = marvell_nand_chip_init(dev, nfc, nand_np);
if (ret) {
- of_node_put(nand_np);
- goto cleanup_chips;
+ marvell_nand_chips_cleanup(nfc);
+ return ret;
}
}
return 0;
-
-cleanup_chips:
- marvell_nand_chips_cleanup(nfc);
-
- return ret;
}
static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 9eb5470344d0..fbb06aa305cb 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -1475,7 +1475,7 @@ meson_nfc_nand_chip_init(struct device *dev,
return 0;
}
-static void meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
+static void meson_nfc_nand_chips_cleanup(struct meson_nfc *nfc)
{
struct meson_nfc_nand_chip *meson_chip;
struct mtd_info *mtd;
@@ -1495,14 +1495,12 @@ static int meson_nfc_nand_chips_init(struct device *dev,
struct meson_nfc *nfc)
{
struct device_node *np = dev->of_node;
- struct device_node *nand_np;
int ret;
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = meson_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
- meson_nfc_nand_chip_cleanup(nfc);
- of_node_put(nand_np);
+ meson_nfc_nand_chips_cleanup(nfc);
return ret;
}
}
@@ -1616,7 +1614,7 @@ static void meson_nfc_remove(struct platform_device *pdev)
{
struct meson_nfc *nfc = platform_get_drvdata(pdev);
- meson_nfc_nand_chip_cleanup(nfc);
+ meson_nfc_nand_chips_cleanup(nfc);
meson_nfc_disable_clk(nfc);
}
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 17477bb2d48f..586868b4139f 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1429,16 +1429,32 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
return 0;
}
+static void mtk_nfc_nand_chips_cleanup(struct mtk_nfc *nfc)
+{
+ struct mtk_nfc_nand_chip *mtk_chip;
+ struct nand_chip *chip;
+ int ret;
+
+ while (!list_empty(&nfc->chips)) {
+ mtk_chip = list_first_entry(&nfc->chips,
+ struct mtk_nfc_nand_chip, node);
+ chip = &mtk_chip->nand;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&mtk_chip->node);
+ }
+}
+
static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
{
struct device_node *np = dev->of_node;
- struct device_node *nand_np;
int ret;
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
- of_node_put(nand_np);
+ mtk_nfc_nand_chips_cleanup(nfc);
return ret;
}
}
@@ -1570,20 +1586,8 @@ release_ecc:
static void mtk_nfc_remove(struct platform_device *pdev)
{
struct mtk_nfc *nfc = platform_get_drvdata(pdev);
- struct mtk_nfc_nand_chip *mtk_chip;
- struct nand_chip *chip;
- int ret;
-
- while (!list_empty(&nfc->chips)) {
- mtk_chip = list_first_entry(&nfc->chips,
- struct mtk_nfc_nand_chip, node);
- chip = &mtk_chip->nand;
- ret = mtd_device_unregister(nand_to_mtd(chip));
- WARN_ON(ret);
- nand_cleanup(chip);
- list_del(&mtk_chip->node);
- }
+ mtk_nfc_nand_chips_cleanup(nfc);
mtk_ecc_release(nfc->ecc);
}
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 179b28459b4b..df48b7d01d16 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -1381,7 +1381,7 @@ static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
}
/*
- * Retuns a pointer to the current byte, within the current page.
+ * Returns a pointer to the current byte, within the current page.
*/
static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
{
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
index 1c76ee98efb7..2570fd0beea0 100644
--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -1111,7 +1111,7 @@ static void pl35x_nand_chips_cleanup(struct pl35x_nandc *nfc)
static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
{
- struct device_node *np = nfc->dev->of_node, *nand_np;
+ struct device_node *np = nfc->dev->of_node;
int nchips = of_get_child_count(np);
int ret;
@@ -1121,10 +1121,9 @@ static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
return -EINVAL;
}
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = pl35x_nand_chip_init(nfc, nand_np);
if (ret) {
- of_node_put(nand_np);
pl35x_nand_chips_cleanup(nfc);
break;
}
diff --git a/drivers/mtd/nand/raw/renesas-nand-controller.c b/drivers/mtd/nand/raw/renesas-nand-controller.c
index c9a01feff8df..0e92d50c5249 100644
--- a/drivers/mtd/nand/raw/renesas-nand-controller.c
+++ b/drivers/mtd/nand/raw/renesas-nand-controller.c
@@ -1297,23 +1297,17 @@ static void rnandc_chips_cleanup(struct rnandc *rnandc)
static int rnandc_chips_init(struct rnandc *rnandc)
{
- struct device_node *np;
int ret;
- for_each_child_of_node(rnandc->dev->of_node, np) {
+ for_each_child_of_node_scoped(rnandc->dev->of_node, np) {
ret = rnandc_chip_init(rnandc, np);
if (ret) {
- of_node_put(np);
- goto cleanup_chips;
+ rnandc_chips_cleanup(rnandc);
+ return ret;
}
}
return 0;
-
-cleanup_chips:
- rnandc_chips_cleanup(rnandc);
-
- return ret;
}
static int rnandc_probe(struct platform_device *pdev)
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
index 55580447633b..51c9cf9013dc 100644
--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
@@ -1211,7 +1211,7 @@ static void rk_nfc_chips_cleanup(struct rk_nfc *nfc)
static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc)
{
- struct device_node *np = dev->of_node, *nand_np;
+ struct device_node *np = dev->of_node;
int nchips = of_get_child_count(np);
int ret;
@@ -1221,10 +1221,9 @@ static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc)
return -EINVAL;
}
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = rk_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
- of_node_put(nand_np);
rk_nfc_chips_cleanup(nfc);
return ret;
}
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 264556939a00..0f67e96cc240 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1851,7 +1851,6 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
{
struct device_node *dn = nfc->dev->of_node;
- struct device_node *child;
int nchips = of_get_child_count(dn);
int ret = 0;
@@ -1865,12 +1864,10 @@ static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
return -EINVAL;
}
- for_each_child_of_node(dn, child) {
+ for_each_child_of_node_scoped(dn, child) {
ret = stm32_fmc2_nfc_parse_child(nfc, child);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
return ret;
- }
}
return ret;
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 4ec17c8bce5a..c28634e20abf 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -2025,13 +2025,11 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
{
struct device_node *np = dev->of_node;
- struct device_node *nand_np;
int ret;
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = sunxi_nand_chip_init(dev, nfc, nand_np);
if (ret) {
- of_node_put(nand_np);
sunxi_nand_chips_cleanup(nfc);
return ret;
}
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index e0b6715e5dfe..4d76f9f71a0e 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -34,7 +34,7 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
return 0;
}
-static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
+int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
{
struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
spinand->scratchbuf);
@@ -200,6 +200,12 @@ static int spinand_ecc_enable(struct spinand_device *spinand,
enable ? CFG_ECC_ENABLE : 0);
}
+static int spinand_cont_read_enable(struct spinand_device *spinand,
+ bool enable)
+{
+ return spinand->set_cont_read(spinand, enable);
+}
+
static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
@@ -311,10 +317,22 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
/* Finish a page read: check the status, report errors/bitflips */
ret = spinand_check_ecc_status(spinand, engine_conf->status);
- if (ret == -EBADMSG)
+ if (ret == -EBADMSG) {
mtd->ecc_stats.failed++;
- else if (ret > 0)
- mtd->ecc_stats.corrected += ret;
+ } else if (ret > 0) {
+ unsigned int pages;
+
+ /*
+ * Continuous reads don't allow us to get the detail,
+ * so we may exagerate the actual number of corrected bitflips.
+ */
+ if (!req->continuous)
+ pages = 1;
+ else
+ pages = req->datalen / nanddev_page_size(nand);
+
+ mtd->ecc_stats.corrected += ret * pages;
+ }
return ret;
}
@@ -369,7 +387,11 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
if (req->datalen) {
buf = spinand->databuf;
- nbytes = nanddev_page_size(nand);
+ if (!req->continuous)
+ nbytes = nanddev_page_size(nand);
+ else
+ nbytes = round_up(req->dataoffs + req->datalen,
+ nanddev_page_size(nand));
column = 0;
}
@@ -386,6 +408,9 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
else
rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
+ if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT)
+ column |= req->pos.plane << fls(nanddev_page_size(nand));
+
while (nbytes) {
ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
if (ret < 0)
@@ -397,6 +422,13 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
nbytes -= ret;
column += ret;
buf += ret;
+
+ /*
+ * Dirmap accesses are allowed to toggle the CS.
+ * Toggling the CS during a continuous read is forbidden.
+ */
+ if (nbytes && req->continuous)
+ return -EIO;
}
if (req->datalen)
@@ -460,6 +492,9 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
else
wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
+ if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT)
+ column |= req->pos.plane << fls(nanddev_page_size(nand));
+
while (nbytes) {
ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
if (ret < 0)
@@ -630,25 +665,20 @@ static int spinand_write_page(struct spinand_device *spinand,
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
-static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
+static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ unsigned int *max_bitflips)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
- struct mtd_ecc_stats old_stats;
- unsigned int max_bitflips = 0;
struct nand_io_iter iter;
bool disable_ecc = false;
bool ecc_failed = false;
- int ret = 0;
+ int ret;
- if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
+ if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
disable_ecc = true;
- mutex_lock(&spinand->lock);
-
- old_stats = mtd->ecc_stats;
-
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
if (disable_ecc)
iter.req.mode = MTD_OPS_RAW;
@@ -664,13 +694,155 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
if (ret == -EBADMSG)
ecc_failed = true;
else
- max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
ret = 0;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
}
+ if (ecc_failed && !ret)
+ ret = -EBADMSG;
+
+ return ret;
+}
+
+static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ unsigned int *max_bitflips)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_io_iter iter;
+ u8 status;
+ int ret;
+
+ ret = spinand_cont_read_enable(spinand, true);
+ if (ret)
+ return ret;
+
+ /*
+ * The cache is divided into two halves. While one half of the cache has
+ * the requested data, the other half is loaded with the next chunk of data.
+ * Therefore, the host can read out the data continuously from page to page.
+ * Each data read must be a multiple of 4-bytes and full pages should be read;
+ * otherwise, the data output might get out of sequence from one read command
+ * to another.
+ */
+ nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ goto end_cont_read;
+
+ ret = nand_ecc_prepare_io_req(nand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_load_page_op(spinand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
+ SPINAND_READ_POLL_DELAY_US, NULL);
+ if (ret < 0)
+ goto end_cont_read;
+
+ ret = spinand_read_from_cache_op(spinand, &iter.req);
+ if (ret)
+ goto end_cont_read;
+
+ ops->retlen += iter.req.datalen;
+
+ ret = spinand_read_status(spinand, &status);
+ if (ret)
+ goto end_cont_read;
+
+ spinand_ondie_ecc_save_status(nand, status);
+
+ ret = nand_ecc_finish_io_req(nand, &iter.req);
+ if (ret < 0)
+ goto end_cont_read;
+
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ ret = 0;
+ }
+
+end_cont_read:
+ /*
+ * Once all the data has been read out, the host can either pull CS#
+ * high and wait for tRST or manually clear the bit in the configuration
+ * register to terminate the continuous read operation. We have no
+ * guarantee the SPI controller drivers will effectively deassert the CS
+ * when we expect them to, so take the register based approach.
+ */
+ spinand_cont_read_enable(spinand, false);
+
+ return ret;
+}
+
+static void spinand_cont_read_init(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type;
+
+ /* OOBs cannot be retrieved so external/on-host ECC engine won't work */
+ if (spinand->set_cont_read &&
+ (engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE ||
+ engine_type == NAND_ECC_ENGINE_TYPE_NONE)) {
+ spinand->cont_read_possible = true;
+ }
+}
+
+static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_pos start_pos, end_pos;
+
+ if (!spinand->cont_read_possible)
+ return false;
+
+ /* OOBs won't be retrieved */
+ if (ops->ooblen || ops->oobbuf)
+ return false;
+
+ nanddev_offs_to_pos(nand, from, &start_pos);
+ nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);
+
+ /*
+ * Continuous reads never cross LUN boundaries. Some devices don't
+ * support crossing planes boundaries. Some devices don't even support
+ * crossing blocks boundaries. The common case being to read through UBI,
+ * we will very rarely read two consequent blocks or more, so it is safer
+ * and easier (can be improved) to only enable continuous reads when
+ * reading within the same erase block.
+ */
+ if (start_pos.target != end_pos.target ||
+ start_pos.plane != end_pos.plane ||
+ start_pos.eraseblock != end_pos.eraseblock)
+ return false;
+
+ return start_pos.page < end_pos.page;
+}
+
+static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct mtd_ecc_stats old_stats;
+ unsigned int max_bitflips = 0;
+ int ret;
+
+ mutex_lock(&spinand->lock);
+
+ old_stats = mtd->ecc_stats;
+
+ if (spinand_use_cont_read(mtd, from, ops))
+ ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
+ else
+ ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
+
if (ops->stats) {
ops->stats->uncorrectable_errors +=
mtd->ecc_stats.failed - old_stats.failed;
@@ -680,9 +852,6 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
mutex_unlock(&spinand->lock);
- if (ecc_failed && !ret)
- ret = -EBADMSG;
-
return ret ? ret : max_bitflips;
}
@@ -862,6 +1031,9 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
};
struct spi_mem_dirmap_desc *desc;
+ if (spinand->cont_read_possible)
+ info.length = nanddev_eraseblock_size(nand);
+
/* The plane number is passed in MSB just above the column address */
info.offset = plane << fls(nand->memorg.pagesize);
@@ -1095,6 +1267,7 @@ int spinand_match_and_init(struct spinand_device *spinand,
spinand->flags = table[i].flags;
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
+ spinand->set_cont_read = table[i].set_cont_read;
op = spinand_select_op_variant(spinand,
info->op_variants.read_cache);
@@ -1236,9 +1409,8 @@ static int spinand_init(struct spinand_device *spinand)
* may use this buffer for DMA access.
* Memory allocated by devm_ does not guarantee DMA-safe alignment.
*/
- spinand->databuf = kzalloc(nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand),
- GFP_KERNEL);
+ spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
+ GFP_KERNEL);
if (!spinand->databuf) {
ret = -ENOMEM;
goto err_free_bufs;
@@ -1267,6 +1439,12 @@ static int spinand_init(struct spinand_device *spinand)
if (ret)
goto err_cleanup_nanddev;
+ /*
+ * Continuous read can only be enabled with an on-die ECC engine, so the
+ * ECC initialization must have happened previously.
+ */
+ spinand_cont_read_init(spinand);
+
mtd->_read_oob = spinand_mtd_read;
mtd->_write_oob = spinand_mtd_write;
mtd->_block_isbad = spinand_mtd_block_isbad;
@@ -1287,6 +1465,7 @@ static int spinand_init(struct spinand_device *spinand)
/* Propagate ECC information to mtd_info */
mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
+ mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
ret = spinand_create_dirmaps(spinand);
if (ret) {
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 3f9e9c572854..d277c3220fdc 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -5,12 +5,25 @@
* Author: Boris Brezillon <boris.brezillon@bootlin.com>
*/
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_MACRONIX 0xC2
-#define MACRONIX_ECCSR_MASK 0x0F
+#define MACRONIX_ECCSR_BF_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr)
+#define MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(eccsr) FIELD_GET(GENMASK(7, 4), eccsr)
+#define MACRONIX_CFG_CONT_READ BIT(2)
+
+#define STATUS_ECC_HAS_BITFLIPS_THRESHOLD (3 << 4)
+
+/* Bitflip theshold configuration register */
+#define REG_CFG_BFT 0x10
+#define CFG_BFT(x) FIELD_PREP(GENMASK(7, 4), (x))
+
+struct macronix_priv {
+ bool cont_read;
+};
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -49,8 +62,9 @@ static const struct mtd_ooblayout_ops mx35lfxge4ab_ooblayout = {
.free = mx35lfxge4ab_ooblayout_free,
};
-static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
+static int macronix_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
{
+ struct macronix_priv *priv = spinand->priv;
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_DUMMY(1, 1),
@@ -60,12 +74,21 @@ static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
if (ret)
return ret;
- *eccsr &= MACRONIX_ECCSR_MASK;
+ /*
+ * ECCSR exposes the number of bitflips for the last read page in bits [3:0].
+ * Continuous read compatible chips also expose the maximum number of
+ * bitflips for the whole (continuous) read operation in bits [7:4].
+ */
+ if (!priv->cont_read)
+ *eccsr = MACRONIX_ECCSR_BF_LAST_PAGE(*eccsr);
+ else
+ *eccsr = MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(*eccsr);
+
return 0;
}
-static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
- u8 status)
+static int macronix_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 eccsr;
@@ -83,16 +106,14 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
* in order to avoid forcing the wear-leveling layer to move
* data around if it's not necessary.
*/
- if (mx35lf1ge4ab_get_eccsr(spinand, spinand->scratchbuf))
+ if (macronix_get_eccsr(spinand, spinand->scratchbuf))
return nanddev_get_ecc_conf(nand)->strength;
eccsr = *spinand->scratchbuf;
- if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength ||
- !eccsr))
+ if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength || !eccsr))
return nanddev_get_ecc_conf(nand)->strength;
return eccsr;
-
default:
break;
}
@@ -100,6 +121,21 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
+static int macronix_set_cont_read(struct spinand_device *spinand, bool enable)
+{
+ struct macronix_priv *priv = spinand->priv;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand, MACRONIX_CFG_CONT_READ,
+ enable ? MACRONIX_CFG_CONT_READ : 0);
+ if (ret)
+ return ret;
+
+ priv->cont_read = enable;
+
+ return 0;
+}
+
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
@@ -110,7 +146,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35LF2GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
@@ -118,7 +154,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT |
+ SPINAND_HAS_READ_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF2GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26, 0x03),
@@ -129,7 +167,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35LF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37, 0x03),
NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1),
@@ -139,7 +178,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35LF1G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
@@ -156,7 +196,8 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF2G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x64, 0x03),
@@ -174,7 +215,8 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF4G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x75, 0x03),
@@ -194,7 +236,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX31UF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -204,7 +246,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35LF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x20),
@@ -213,9 +255,11 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT |
+ SPINAND_HAS_READ_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF4G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
@@ -223,9 +267,10 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF4G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xf5, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -235,7 +280,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -245,7 +290,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35UF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
@@ -253,9 +299,11 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT |
+ SPINAND_HAS_READ_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF2G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
@@ -263,9 +311,10 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ SPINAND_HAS_QE_BIT |
+ SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF2G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe4, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -275,7 +324,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF2GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -285,7 +334,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35UF2GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
@@ -295,7 +345,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35UF1G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x90),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -305,7 +356,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF1G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
@@ -315,7 +366,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX35UF1GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
@@ -325,7 +376,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX35UF1GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -335,8 +387,8 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
-
+ macronix_ecc_get_status),
+ SPINAND_CONT_READ(macronix_set_cont_read)),
SPINAND_INFO("MX31LF2GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x2e),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
@@ -346,7 +398,7 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
SPINAND_INFO("MX3UF2GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
@@ -356,10 +408,30 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- mx35lf1ge4ab_ecc_get_status)),
+ macronix_ecc_get_status)),
};
+static int macronix_spinand_init(struct spinand_device *spinand)
+{
+ struct macronix_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spinand->priv = priv;
+
+ return 0;
+}
+
+static void macronix_spinand_cleanup(struct spinand_device *spinand)
+{
+ kfree(spinand->priv);
+}
+
static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
+ .init = macronix_spinand_init,
+ .cleanup = macronix_spinand_cleanup,
};
const struct spinand_manufacturer macronix_spinand_manufacturer = {
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index ba7c813b9542..f3bb81d7e460 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -76,6 +76,18 @@ static int w25m02gv_select_target(struct spinand_device *spinand,
return spi_mem_exec_op(spinand->spimem, &op);
}
+static int w25n01kv_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = 64 + (8 * section);
+ region->length = 7;
+
+ return 0;
+}
+
static int w25n02kv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
@@ -100,6 +112,11 @@ static int w25n02kv_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
+static const struct mtd_ooblayout_ops w25n01kv_ooblayout = {
+ .ecc = w25n01kv_ooblayout_ecc,
+ .free = w25n02kv_ooblayout_free,
+};
+
static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
.ecc = w25n02kv_ooblayout_ecc,
.free = w25n02kv_ooblayout_free,
@@ -163,6 +180,15 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ SPINAND_INFO("W25N01KV",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21),
+ NAND_MEMORG(1, 2048, 96, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)),
SPINAND_INFO("W25N02KV",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
diff --git a/drivers/mtd/parsers/bcm47xxpart.c b/drivers/mtd/parsers/bcm47xxpart.c
index 13daf9bffd08..49c8e7f27f21 100644
--- a/drivers/mtd/parsers/bcm47xxpart.c
+++ b/drivers/mtd/parsers/bcm47xxpart.c
@@ -95,7 +95,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
uint32_t blocksize = master->erasesize;
int trx_parts[2]; /* Array with indexes of TRX partitions */
int trx_num = 0; /* Number of found TRX partitions */
- int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
+ static const int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
int err;
/*
diff --git a/drivers/mtd/parsers/ofpart_core.c b/drivers/mtd/parsers/ofpart_core.c
index e7b8e9d0a910..abfa68798918 100644
--- a/drivers/mtd/parsers/ofpart_core.c
+++ b/drivers/mtd/parsers/ofpart_core.c
@@ -157,10 +157,10 @@ static int parse_fixed_partitions(struct mtd_info *master,
partname = of_get_property(pp, "name", &len);
parts[i].name = partname;
- if (of_get_property(pp, "read-only", &len))
+ if (of_property_read_bool(pp, "read-only"))
parts[i].mask_flags |= MTD_WRITEABLE;
- if (of_get_property(pp, "lock", &len))
+ if (of_property_read_bool(pp, "lock"))
parts[i].mask_flags |= MTD_POWERUP_LOCK;
if (of_property_read_bool(pp, "slc-mode"))
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index e0c4efc424f4..9d6e85bf227b 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -3281,7 +3281,8 @@ static const struct flash_info *spi_nor_match_name(struct spi_nor *nor,
for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
for (j = 0; j < manufacturers[i]->nparts; j++) {
- if (!strcmp(name, manufacturers[i]->parts[j].name)) {
+ if (manufacturers[i]->parts[j].name &&
+ !strcmp(name, manufacturers[i]->parts[j].name)) {
nor->manufacturer = manufacturers[i];
return &manufacturers[i]->parts[j];
}
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index 3c6499fdb712..e6bab2d00c92 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -436,6 +436,8 @@ static const struct flash_info st_nor_parts[] = {
.id = SNOR_ID(0x20, 0xbb, 0x17),
.name = "n25q064a",
.size = SZ_8M,
+ .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6,
.no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
}, {
.id = SNOR_ID(0x20, 0xbb, 0x18),
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 6cc237c24e07..d6c92595f6bc 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -967,6 +967,10 @@ static const struct flash_info spansion_nor_parts[] = {
.mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
}, {
+ .id = SNOR_ID(0x34, 0x5b, 0x19),
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s28hx_t_fixups,
+ }, {
.id = SNOR_ID(0x34, 0x5b, 0x1a),
.name = "s28hs512t",
.mfr_flags = USE_CLPEF,
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index 180b7390690c..b5ad7118c49a 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -167,6 +167,21 @@ static const struct flash_info sst_nor_parts[] = {
}
};
+static int sst_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
+{
+ u8 op = (len == 1) ? SPINOR_OP_BP : SPINOR_OP_AAI_WP;
+ int ret;
+
+ nor->program_opcode = op;
+ ret = spi_nor_write_data(nor, to, 1, buf);
+ if (ret < 0)
+ return ret;
+ WARN(ret != len, "While writing %zu byte written %i bytes\n", len, ret);
+
+ return spi_nor_wait_till_ready(nor);
+}
+
static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
@@ -188,16 +203,10 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Start write from odd address. */
if (to % 2) {
- nor->program_opcode = SPINOR_OP_BP;
-
/* write one byte. */
- ret = spi_nor_write_data(nor, to, 1, buf);
+ ret = sst_nor_write_data(nor, to, 1, buf);
if (ret < 0)
goto out;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
to++;
actual++;
@@ -205,16 +214,11 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Write out most of the data here. */
for (; actual < len - 1; actual += 2) {
- nor->program_opcode = SPINOR_OP_AAI_WP;
-
/* write two bytes. */
- ret = spi_nor_write_data(nor, to, 2, buf + actual);
+ ret = sst_nor_write_data(nor, to, 2, buf + actual);
if (ret < 0)
goto out;
- WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
+
to += 2;
nor->sst_write_second = true;
}
@@ -234,14 +238,9 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
goto out;
- nor->program_opcode = SPINOR_OP_BP;
- ret = spi_nor_write_data(nor, to, 1, buf + actual);
+ ret = sst_nor_write_data(nor, to, 1, buf + actual);
if (ret < 0)
goto out;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
actual += 1;
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index e065e4fd42a3..9f7ce5763e71 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -18,6 +18,31 @@
SPI_MEM_OP_DATA_OUT(1, buf, 0))
static int
+w25q128_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ /*
+ * Zetta ZD25Q128C is a clone of the Winbond device. But the encoded
+ * size is really wrong. It seems that they confused Mbit with MiB.
+ * Thus the flash is discovered as a 2MiB device.
+ */
+ if (bfpt_header->major == SFDP_JESD216_MAJOR &&
+ bfpt_header->minor == SFDP_JESD216_MINOR &&
+ nor->params->size == SZ_2M &&
+ nor->params->erase_map.regions[0].size == SZ_2M) {
+ nor->params->size = SZ_16M;
+ nor->params->erase_map.regions[0].size = SZ_16M;
+ }
+
+ return 0;
+}
+
+static const struct spi_nor_fixups w25q128_fixups = {
+ .post_bfpt = w25q128_post_bfpt_fixups,
+};
+
+static int
w25q256_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
@@ -108,6 +133,7 @@ static const struct flash_info winbond_nor_parts[] = {
.size = SZ_16M,
.flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixups = &w25q128_fixups,
}, {
.id = SNOR_ID(0xef, 0x40, 0x19),
.name = "w25q256",
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 6d15ab3bfbbc..0433a0f36d1b 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -3098,9 +3098,9 @@ static void amt_link_setup(struct net_device *dev)
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->priv_flags |= IFF_NO_QUEUE;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
+ dev->netns_local = true;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 7aca0544fb29..e057526448d7 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -68,6 +68,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
__be16 proto;
void *oiph;
int err;
+ int nh;
bareudp = rcu_dereference_sk_user_data(sk);
if (!bareudp)
@@ -148,10 +149,25 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
}
skb_dst_set(skb, &tun_dst->dst);
skb->dev = bareudp->dev;
- oiph = skb_network_header(skb);
- skb_reset_network_header(skb);
skb_reset_mac_header(skb);
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
+ skb_reset_network_header(skb);
+
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(bareudp->dev, rx_length_errors);
+ DEV_STATS_INC(bareudp->dev, rx_errors);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
if (!ipv6_mod_enabled() || family == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
else
@@ -301,6 +317,9 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be32 saddr;
int err;
+ if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
+ return -EINVAL;
+
if (!sock)
return -ESHUTDOWN;
@@ -368,6 +387,9 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
+ if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
+ return -EINVAL;
+
if (!sock)
return -ESHUTDOWN;
@@ -553,7 +575,6 @@ static void bareudp_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &bareudp_type);
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
- dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->hw_features |= NETIF_F_RXCSUM;
@@ -566,6 +587,7 @@ static void bareudp_setup(struct net_device *dev)
dev->type = ARPHRD_NONE;
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->lltx = true;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bb9c3d6ef435..b560644ee1b1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -419,6 +419,41 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
#ifdef CONFIG_XFRM_OFFLOAD
/**
+ * bond_ipsec_dev - Get active device for IPsec offload
+ * @xs: pointer to transformer state struct
+ *
+ * Context: caller must hold rcu_read_lock.
+ *
+ * Return: the device for ipsec offload, or NULL if not exist.
+ **/
+static struct net_device *bond_ipsec_dev(struct xfrm_state *xs)
+{
+ struct net_device *bond_dev = xs->xso.dev;
+ struct bonding *bond;
+ struct slave *slave;
+
+ if (!bond_dev)
+ return NULL;
+
+ bond = netdev_priv(bond_dev);
+ if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
+ return NULL;
+
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (!slave)
+ return NULL;
+
+ if (!xs->xso.real_dev)
+ return NULL;
+
+ if (xs->xso.real_dev != slave->dev)
+ pr_warn_ratelimited("%s: (slave %s): not same with IPsec offload real dev %s\n",
+ bond_dev->name, slave->dev->name, xs->xso.real_dev->name);
+
+ return slave->dev;
+}
+
+/**
* bond_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
@@ -640,23 +675,12 @@ out:
**/
static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
- struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
- struct slave *curr_active;
- struct bonding *bond;
bool ok = false;
- bond = netdev_priv(bond_dev);
rcu_read_lock();
- curr_active = rcu_dereference(bond->curr_active_slave);
- if (!curr_active)
- goto out;
- real_dev = curr_active->dev;
-
- if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
- goto out;
-
- if (!xs->xso.real_dev)
+ real_dev = bond_ipsec_dev(xs);
+ if (!real_dev)
goto out;
if (!real_dev->xfrmdev_ops ||
@@ -670,11 +694,61 @@ out:
return ok;
}
+/**
+ * bond_advance_esn_state - ESN support for IPSec HW offload
+ * @xs: pointer to transformer state struct
+ **/
+static void bond_advance_esn_state(struct xfrm_state *xs)
+{
+ struct net_device *real_dev;
+
+ rcu_read_lock();
+ real_dev = bond_ipsec_dev(xs);
+ if (!real_dev)
+ goto out;
+
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
+ pr_warn_ratelimited("%s: %s doesn't support xdo_dev_state_advance_esn\n", __func__, real_dev->name);
+ goto out;
+ }
+
+ real_dev->xfrmdev_ops->xdo_dev_state_advance_esn(xs);
+out:
+ rcu_read_unlock();
+}
+
+/**
+ * bond_xfrm_update_stats - Update xfrm state
+ * @xs: pointer to transformer state struct
+ **/
+static void bond_xfrm_update_stats(struct xfrm_state *xs)
+{
+ struct net_device *real_dev;
+
+ rcu_read_lock();
+ real_dev = bond_ipsec_dev(xs);
+ if (!real_dev)
+ goto out;
+
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_update_stats) {
+ pr_warn_ratelimited("%s: %s doesn't support xdo_dev_state_update_stats\n", __func__, real_dev->name);
+ goto out;
+ }
+
+ real_dev->xfrmdev_ops->xdo_dev_state_update_stats(xs);
+out:
+ rcu_read_unlock();
+}
+
static const struct xfrmdev_ops bond_xfrmdev_ops = {
.xdo_dev_state_add = bond_ipsec_add_sa,
.xdo_dev_state_delete = bond_ipsec_del_sa,
.xdo_dev_state_free = bond_ipsec_free_sa,
.xdo_dev_offload_ok = bond_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = bond_advance_esn_state,
+ .xdo_dev_state_update_stats = bond_xfrm_update_stats,
};
#endif /* CONFIG_XFRM_OFFLOAD */
@@ -2300,7 +2374,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
goto err_sysfs_del;
}
- res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ res = dev_xdp_propagate(slave_dev, &xdp);
if (res < 0) {
/* ndo_bpf() sets extack error message */
slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
@@ -2436,7 +2510,7 @@ static int __bond_release_one(struct net_device *bond_dev,
.prog = NULL,
.extack = NULL,
};
- if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
+ if (dev_xdp_propagate(slave_dev, &xdp))
slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
}
@@ -5626,7 +5700,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
goto err;
}
- err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ err = dev_xdp_propagate(slave_dev, &xdp);
if (err < 0) {
/* ndo_bpf() sets extack error message */
slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
@@ -5658,7 +5732,7 @@ err:
if (slave == rollback_slave)
break;
- err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ err_unwind = dev_xdp_propagate(slave_dev, &xdp);
if (err_unwind < 0)
slave_err(dev, slave_dev,
"Error %d when unwinding XDP program change\n", err_unwind);
@@ -5812,9 +5886,6 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
if (real_dev) {
ret = ethtool_get_ts_info_by_layer(real_dev, info);
} else {
- info->phc_index = -1;
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
/* Check if all slaves support software tx timestamping */
rcu_read_lock();
bond_for_each_slave_rcu(bond, slave, iter) {
@@ -5928,7 +5999,10 @@ void bond_setup(struct net_device *bond_dev)
#endif /* CONFIG_XFRM_OFFLOAD */
/* don't acquire bond device's netif_tx_lock when transmitting */
- bond_dev->features |= NETIF_F_LLTX;
+ bond_dev->lltx = true;
+
+ /* Don't allow bond devices to change network namespaces. */
+ bond_dev->netns_local = true;
/* By default, we declare the bond to be fully
* VLAN hardware accelerated capable. Special
@@ -5937,9 +6011,6 @@ void bond_setup(struct net_device *bond_dev)
* capable
*/
- /* Don't allow bond devices to change network namespaces. */
- bond_dev->features |= NETIF_F_NETNS_LOCAL;
-
bond_dev->hw_features = BOND_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
@@ -6384,7 +6455,8 @@ static int bond_init(struct net_device *bond_dev)
netdev_dbg(bond_dev, "Begin bond_init\n");
- bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
+ bond->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ bond_dev->name);
if (!bond->wq)
return -ENOMEM;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 7f9b60a42d29..cf989bea9aa3 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -225,6 +225,7 @@ source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/peak_canfd/Kconfig"
source "drivers/net/can/rcar/Kconfig"
+source "drivers/net/can/rockchip/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/softing/Kconfig"
source "drivers/net/can/spi/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 4669cd51e7bf..a71db2cfe990 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_CAN_SLCAN) += slcan/
obj-y += dev/
obj-y += esd/
obj-y += rcar/
+obj-y += rockchip/
obj-y += spi/
obj-y += usb/
obj-y += softing/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 11f434d708b3..191707d7e3da 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1191,7 +1191,7 @@ MODULE_DEVICE_TABLE(platform, at91_can_id_table);
static struct platform_driver at91_can_driver = {
.probe = at91_can_probe,
- .remove_new = at91_can_remove,
+ .remove = at91_can_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(at91_can_dt_ids),
diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c
index 49cf9682b925..bfc60eb33dc3 100644
--- a/drivers/net/can/bxcan.c
+++ b/drivers/net/can/bxcan.c
@@ -1092,7 +1092,7 @@ static struct platform_driver bxcan_driver = {
.of_match_table = bxcan_of_match,
},
.probe = bxcan_probe,
- .remove_new = bxcan_remove,
+ .remove = bxcan_remove,
};
module_platform_driver(bxcan_driver);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index e2ec69aa46e5..6cba9717a6d8 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -476,7 +476,7 @@ static struct platform_driver c_can_plat_driver = {
.of_match_table = c_can_of_table,
},
.probe = c_can_plat_probe,
- .remove_new = c_can_plat_remove,
+ .remove = c_can_plat_remove,
.suspend = c_can_suspend,
.resume = c_can_resume,
.id_table = c_can_id_table,
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index 22009440a983..d06762817153 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -307,7 +307,7 @@ static void cc770_isa_remove(struct platform_device *pdev)
static struct platform_driver cc770_isa_driver = {
.probe = cc770_isa_probe,
- .remove_new = cc770_isa_remove,
+ .remove = cc770_isa_remove,
.driver = {
.name = KBUILD_MODNAME,
},
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 13bcfba05f18..b6c4f02ffb97 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -70,17 +70,10 @@ static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
static int cc770_get_of_node_data(struct platform_device *pdev,
struct cc770_priv *priv)
{
+ u32 clkext = CC770_PLATFORM_CAN_CLOCK, clkout = 0;
struct device_node *np = pdev->dev.of_node;
- const u32 *prop;
- int prop_size;
- u32 clkext;
-
- prop = of_get_property(np, "bosch,external-clock-frequency",
- &prop_size);
- if (prop && (prop_size == sizeof(u32)))
- clkext = *prop;
- else
- clkext = CC770_PLATFORM_CAN_CLOCK; /* default */
+
+ of_property_read_u32(np, "bosch,external-clock-frequency", &clkext);
priv->can.clock.freq = clkext;
/* The system clock may not exceed 10 MHz */
@@ -98,7 +91,7 @@ static int cc770_get_of_node_data(struct platform_device *pdev,
if (of_property_read_bool(np, "bosch,iso-low-speed-mux"))
priv->cpu_interface |= CPUIF_MUX;
- if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
+ if (!of_property_read_bool(np, "bosch,no-comperator-bypass"))
priv->bus_config |= BUSCFG_CBY;
if (of_property_read_bool(np, "bosch,disconnect-rx0-input"))
priv->bus_config |= BUSCFG_DR0;
@@ -109,25 +102,22 @@ static int cc770_get_of_node_data(struct platform_device *pdev,
if (of_property_read_bool(np, "bosch,polarity-dominant"))
priv->bus_config |= BUSCFG_POL;
- prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
- if (prop && (prop_size == sizeof(u32)) && *prop > 0) {
- u32 cdv = clkext / *prop;
- int slew;
+ of_property_read_u32(np, "bosch,clock-out-frequency", &clkout);
+ if (clkout > 0) {
+ u32 cdv = clkext / clkout;
if (cdv > 0 && cdv < 16) {
+ u32 slew;
+
priv->cpu_interface |= CPUIF_CEN;
priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK;
- prop = of_get_property(np, "bosch,slew-rate",
- &prop_size);
- if (prop && (prop_size == sizeof(u32))) {
- slew = *prop;
- } else {
+ if (of_property_read_u32(np, "bosch,slew-rate", &slew)) {
/* Determine default slew rate */
slew = (CLKOUT_SL_MASK >>
CLKOUT_SL_SHIFT) -
((cdv * clkext - 1) / 8000000);
- if (slew < 0)
+ if (slew > (CLKOUT_SL_MASK >> CLKOUT_SL_SHIFT))
slew = 0;
}
priv->clkout |= (slew << CLKOUT_SL_SHIFT) &
@@ -257,7 +247,7 @@ static struct platform_driver cc770_platform_driver = {
.of_match_table = cc770_platform_table,
},
.probe = cc770_platform_probe,
- .remove_new = cc770_platform_remove,
+ .remove = cc770_platform_remove,
};
module_platform_driver(cc770_platform_driver);
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
index 55bb10b157b4..70e2577c8541 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_platform.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(of, ctucan_of_match);
static struct platform_driver ctucanfd_driver = {
.probe = ctucan_platform_probe,
- .remove_new = ctucan_platform_remove,
+ .remove = ctucan_platform_remove,
.driver = {
.name = DRV_NAME,
.pm = &ctucan_platform_pm_ops,
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 87828f953073..6792c14fd7eb 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -380,12 +380,9 @@ int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index dfdc039d92a6..01aacdcda260 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -65,15 +65,6 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
if (!data)
return 0;
- if (data[IFLA_CAN_BITTIMING]) {
- struct can_bittiming bt;
-
- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_validate_bittiming(&bt, extack);
- if (err)
- return err;
- }
-
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
@@ -114,6 +105,15 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
}
}
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ err = can_validate_bittiming(&bt, extack);
+ if (err)
+ return err;
+ }
+
if (is_can_fd) {
if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
return -EOPNOTSUPP;
@@ -195,48 +195,6 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
/* We need synchronization with dev->stop() */
ASSERT_RTNL();
- if (data[IFLA_CAN_BITTIMING]) {
- struct can_bittiming bt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->bittiming_const && !priv->do_set_bittiming &&
- !priv->bitrate_const)
- return -EOPNOTSUPP;
-
- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_get_bittiming(dev, &bt,
- priv->bittiming_const,
- priv->bitrate_const,
- priv->bitrate_const_cnt,
- extack);
- if (err)
- return err;
-
- if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
- NL_SET_ERR_MSG_FMT(extack,
- "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps",
- bt.bitrate, priv->bitrate_max);
- return -EINVAL;
- }
-
- memcpy(&priv->bittiming, &bt, sizeof(bt));
-
- if (priv->do_set_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->do_set_bittiming(dev);
- if (err)
- return err;
- }
- }
-
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm;
u32 ctrlstatic;
@@ -284,6 +242,48 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK;
}
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->bittiming_const && !priv->do_set_bittiming &&
+ !priv->bitrate_const)
+ return -EOPNOTSUPP;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ err = can_get_bittiming(dev, &bt,
+ priv->bittiming_const,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt,
+ extack);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps",
+ bt.bitrate, priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memcpy(&priv->bittiming, &bt, sizeof(bt));
+
+ if (priv->do_set_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
if (data[IFLA_CAN_RESTART_MS]) {
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
diff --git a/drivers/net/can/esd/esd_402_pci-core.c b/drivers/net/can/esd/esd_402_pci-core.c
index b7cdcffd0e45..5d6d2828cd04 100644
--- a/drivers/net/can/esd/esd_402_pci-core.c
+++ b/drivers/net/can/esd/esd_402_pci-core.c
@@ -369,12 +369,13 @@ static int pci402_init_cores(struct pci_dev *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
priv = netdev_priv(netdev);
+ priv->can.clock.freq = card->ov.core_frequency;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_CC_LEN8_DLC;
-
- priv->can.clock.freq = card->ov.core_frequency;
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_DAR)
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD)
priv->can.bittiming_const = &pci402_bittiming_const_canfd;
else
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
index 121cbbf81458..c80032bc1a52 100644
--- a/drivers/net/can/esd/esdacc.c
+++ b/drivers/net/can/esd/esdacc.c
@@ -17,6 +17,9 @@
/* esdACC DLC register layout */
#define ACC_DLC_DLC_MASK GENMASK(3, 0)
#define ACC_DLC_RTR_FLAG BIT(4)
+#define ACC_DLC_SSTX_FLAG BIT(24) /* Single Shot TX */
+
+/* esdACC DLC in struct acc_bmmsg_rxtxdone::acc_dlc.len only! */
#define ACC_DLC_TXD_FLAG BIT(5)
/* ecc value of esdACC equals SJA1000's ECC register */
@@ -43,8 +46,8 @@
static void acc_resetmode_enter(struct acc_core *core)
{
- acc_set_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+ acc_set_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_RESETMODE);
/* Read back reset mode bit to flush PCI write posting */
acc_resetmode_entered(core);
@@ -52,14 +55,14 @@ static void acc_resetmode_enter(struct acc_core *core)
static void acc_resetmode_leave(struct acc_core *core)
{
- acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_RESETMODE);
/* Read back reset mode bit to flush PCI write posting */
acc_resetmode_entered(core);
}
-static void acc_txq_put(struct acc_core *core, u32 acc_id, u8 acc_dlc,
+static void acc_txq_put(struct acc_core *core, u32 acc_id, u32 acc_dlc,
const void *data)
{
acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1,
@@ -172,7 +175,7 @@ int acc_open(struct net_device *netdev)
struct acc_net_priv *priv = netdev_priv(netdev);
struct acc_core *core = priv->core;
u32 tx_fifo_status;
- u32 ctrl_mode;
+ u32 ctrl;
int err;
/* Retry to enter RESET mode if out of sync. */
@@ -187,19 +190,19 @@ int acc_open(struct net_device *netdev)
if (err)
return err;
- ctrl_mode = ACC_REG_CONTROL_MASK_IE_RXTX |
- ACC_REG_CONTROL_MASK_IE_TXERROR |
- ACC_REG_CONTROL_MASK_IE_ERRWARN |
- ACC_REG_CONTROL_MASK_IE_OVERRUN |
- ACC_REG_CONTROL_MASK_IE_ERRPASS;
+ ctrl = ACC_REG_CTRL_MASK_IE_RXTX |
+ ACC_REG_CTRL_MASK_IE_TXERROR |
+ ACC_REG_CTRL_MASK_IE_ERRWARN |
+ ACC_REG_CTRL_MASK_IE_OVERRUN |
+ ACC_REG_CTRL_MASK_IE_ERRPASS;
if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
- ctrl_mode |= ACC_REG_CONTROL_MASK_IE_BUSERR;
+ ctrl |= ACC_REG_CTRL_MASK_IE_BUSERR;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- ctrl_mode |= ACC_REG_CONTROL_MASK_MODE_LOM;
+ ctrl |= ACC_REG_CTRL_MASK_LOM;
- acc_set_bits(core, ACC_CORE_OF_CTRL_MODE, ctrl_mode);
+ acc_set_bits(core, ACC_CORE_OF_CTRL, ctrl);
acc_resetmode_leave(core);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -218,13 +221,13 @@ int acc_close(struct net_device *netdev)
struct acc_net_priv *priv = netdev_priv(netdev);
struct acc_core *core = priv->core;
- acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_IE_RXTX |
- ACC_REG_CONTROL_MASK_IE_TXERROR |
- ACC_REG_CONTROL_MASK_IE_ERRWARN |
- ACC_REG_CONTROL_MASK_IE_OVERRUN |
- ACC_REG_CONTROL_MASK_IE_ERRPASS |
- ACC_REG_CONTROL_MASK_IE_BUSERR);
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_IE_RXTX |
+ ACC_REG_CTRL_MASK_IE_TXERROR |
+ ACC_REG_CTRL_MASK_IE_ERRWARN |
+ ACC_REG_CTRL_MASK_IE_OVERRUN |
+ ACC_REG_CTRL_MASK_IE_ERRPASS |
+ ACC_REG_CTRL_MASK_IE_BUSERR);
netif_stop_queue(netdev);
acc_resetmode_enter(core);
@@ -233,9 +236,9 @@ int acc_close(struct net_device *netdev)
/* Mark pending TX requests to be aborted after controller restart. */
acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
- /* ACC_REG_CONTROL_MASK_MODE_LOM is only accessible in RESET mode */
- acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_MODE_LOM);
+ /* ACC_REG_CTRL_MASK_LOM is only accessible in RESET mode */
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_LOM);
close_candev(netdev);
return 0;
@@ -249,7 +252,7 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
u8 tx_fifo_head = core->tx_fifo_head;
int fifo_usage;
u32 acc_id;
- u8 acc_dlc;
+ u32 acc_dlc;
if (can_dropped_invalid_skb(netdev, skb))
return NETDEV_TX_OK;
@@ -274,6 +277,8 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
if (cf->can_id & CAN_RTR_FLAG)
acc_dlc |= ACC_DLC_RTR_FLAG;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ acc_dlc |= ACC_DLC_SSTX_FLAG;
if (cf->can_id & CAN_EFF_FLAG) {
acc_id = cf->can_id & CAN_EFF_MASK;
diff --git a/drivers/net/can/esd/esdacc.h b/drivers/net/can/esd/esdacc.h
index a70488b25d39..6b7ebd8c91b2 100644
--- a/drivers/net/can/esd/esdacc.h
+++ b/drivers/net/can/esd/esdacc.h
@@ -35,6 +35,7 @@
*/
#define ACC_OV_REG_FEAT_MASK_CANFD BIT(27 - 16)
#define ACC_OV_REG_FEAT_MASK_NEW_PSC BIT(28 - 16)
+#define ACC_OV_REG_FEAT_MASK_DAR BIT(30 - 16)
#define ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE BIT(0)
#define ACC_OV_REG_MODE_MASK_BM_ENABLE BIT(1)
@@ -50,7 +51,7 @@
#define ACC_OV_REG_MODE_MASK_FPGA_RESET BIT(31)
/* esdACC CAN Core Module */
-#define ACC_CORE_OF_CTRL_MODE 0x0000
+#define ACC_CORE_OF_CTRL 0x0000
#define ACC_CORE_OF_STATUS_IRQ 0x0008
#define ACC_CORE_OF_BRP 0x000c
#define ACC_CORE_OF_BTR 0x0010
@@ -66,21 +67,22 @@
#define ACC_CORE_OF_TXFIFO_DATA_0 0x00c8
#define ACC_CORE_OF_TXFIFO_DATA_1 0x00cc
-#define ACC_REG_CONTROL_MASK_MODE_RESETMODE BIT(0)
-#define ACC_REG_CONTROL_MASK_MODE_LOM BIT(1)
-#define ACC_REG_CONTROL_MASK_MODE_STM BIT(2)
-#define ACC_REG_CONTROL_MASK_MODE_TRANSEN BIT(5)
-#define ACC_REG_CONTROL_MASK_MODE_TS BIT(6)
-#define ACC_REG_CONTROL_MASK_MODE_SCHEDULE BIT(7)
-
-#define ACC_REG_CONTROL_MASK_IE_RXTX BIT(8)
-#define ACC_REG_CONTROL_MASK_IE_TXERROR BIT(9)
-#define ACC_REG_CONTROL_MASK_IE_ERRWARN BIT(10)
-#define ACC_REG_CONTROL_MASK_IE_OVERRUN BIT(11)
-#define ACC_REG_CONTROL_MASK_IE_TSI BIT(12)
-#define ACC_REG_CONTROL_MASK_IE_ERRPASS BIT(13)
-#define ACC_REG_CONTROL_MASK_IE_ALI BIT(14)
-#define ACC_REG_CONTROL_MASK_IE_BUSERR BIT(15)
+/* CTRL register layout */
+#define ACC_REG_CTRL_MASK_RESETMODE BIT(0)
+#define ACC_REG_CTRL_MASK_LOM BIT(1)
+#define ACC_REG_CTRL_MASK_STM BIT(2)
+#define ACC_REG_CTRL_MASK_TRANSEN BIT(5)
+#define ACC_REG_CTRL_MASK_TS BIT(6)
+#define ACC_REG_CTRL_MASK_SCHEDULE BIT(7)
+
+#define ACC_REG_CTRL_MASK_IE_RXTX BIT(8)
+#define ACC_REG_CTRL_MASK_IE_TXERROR BIT(9)
+#define ACC_REG_CTRL_MASK_IE_ERRWARN BIT(10)
+#define ACC_REG_CTRL_MASK_IE_OVERRUN BIT(11)
+#define ACC_REG_CTRL_MASK_IE_TSI BIT(12)
+#define ACC_REG_CTRL_MASK_IE_ERRPASS BIT(13)
+#define ACC_REG_CTRL_MASK_IE_ALI BIT(14)
+#define ACC_REG_CTRL_MASK_IE_BUSERR BIT(15)
/* BRP and BTR register layout for CAN-Classic version */
#define ACC_REG_BRP_CL_MASK_BRP GENMASK(8, 0)
@@ -300,9 +302,9 @@ static inline void acc_clear_bits(struct acc_core *core,
static inline int acc_resetmode_entered(struct acc_core *core)
{
- u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL_MODE);
+ u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL);
- return (ctrl & ACC_REG_CONTROL_MASK_MODE_RESETMODE) != 0;
+ return (ctrl & ACC_REG_CTRL_MASK_RESETMODE) != 0;
}
static inline u32 acc_ov_read32(struct acc_ov *ov, unsigned short offs)
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index 8ea7f2795551..ac1a860986df 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -354,6 +354,14 @@ static struct flexcan_devtype_data fsl_imx93_devtype_data = {
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
+static const struct flexcan_devtype_data fsl_imx95_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI,
+};
+
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
@@ -544,6 +552,13 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
} else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+ } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI) {
+ /* For the SCMI mode, driver do nothing, ATF will send request to
+ * SM(system manager, M33 core) through SCMI protocol after linux
+ * suspend. Once SM get this request, it will send IPG_STOP signal
+ * to Flex_CAN, let CAN in STOP mode.
+ */
+ return 0;
}
return flexcan_low_power_enter_ack(priv);
@@ -555,7 +570,11 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
u32 reg_mcr;
int ret;
- /* remove stop request */
+ /* Remove stop request, for FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI,
+ * do nothing here, because ATF already send request to SM before
+ * linux resume. Once SM get this request, it will deassert the
+ * IPG_STOP signal to Flex_CAN.
+ */
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
ret = flexcan_stop_mode_enable_scfw(priv, false);
if (ret < 0)
@@ -1983,6 +2002,9 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
ret = flexcan_setup_stop_mode_scfw(pdev);
else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
ret = flexcan_setup_stop_mode_gpr(pdev);
+ else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)
+ /* ATF will handle all STOP_IPG related work */
+ ret = 0;
else
/* return 0 directly if doesn't support stop mode feature */
return 0;
@@ -2009,6 +2031,7 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, },
{ .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, },
{ .compatible = "fsl,imx93-flexcan", .data = &fsl_imx93_devtype_data, },
+ { .compatible = "fsl,imx95-flexcan", .data = &fsl_imx95_devtype_data, },
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
{ .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
@@ -2309,9 +2332,19 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
if (device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, true);
- err = pm_runtime_force_suspend(device);
- if (err)
- return err;
+ /* For FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI, it need ATF to send
+ * to SM through SCMI protocol, SM will assert the IPG_STOP
+ * signal. But all this works need the CAN clocks keep on.
+ * After the CAN module get the IPG_STOP mode, and switch to
+ * STOP mode, whether still keep the CAN clocks on or gate them
+ * off depend on the Hardware design.
+ */
+ if (!(device_may_wakeup(device) &&
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) {
+ err = pm_runtime_force_suspend(device);
+ if (err)
+ return err;
+ }
}
return 0;
@@ -2325,9 +2358,12 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
if (netif_running(dev)) {
int err;
- err = pm_runtime_force_resume(device);
- if (err)
- return err;
+ if (!(device_may_wakeup(device) &&
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) {
+ err = pm_runtime_force_resume(device);
+ if (err)
+ return err;
+ }
if (device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, false);
@@ -2349,7 +2385,7 @@ static struct platform_driver flexcan_driver = {
.of_match_table = flexcan_of_match,
},
.probe = flexcan_probe,
- .remove_new = flexcan_remove,
+ .remove = flexcan_remove,
.id_table = flexcan_id_table,
};
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index 025c3417031f..4933d8c7439e 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -68,6 +68,8 @@
#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
/* Device supports RX via FIFO */
#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
+/* Setup stop mode with ATF SCMI protocol to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17)
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 6d3ba71a6a73..cdf0ec9fa7f3 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1725,7 +1725,7 @@ static struct platform_driver grcan_driver = {
.of_match_table = grcan_match,
},
.probe = grcan_probe,
- .remove_new = grcan_remove,
+ .remove = grcan_remove,
};
module_platform_driver(grcan_driver);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 72307297d75e..d32b10900d2f 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -1033,7 +1033,7 @@ static struct platform_driver ifi_canfd_plat_driver = {
.of_match_table = ifi_canfd_of_table,
},
.probe = ifi_canfd_plat_probe,
- .remove_new = ifi_canfd_plat_remove,
+ .remove = ifi_canfd_plat_remove,
};
module_platform_driver(ifi_canfd_plat_driver);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index d048ea565b89..60c7b83b4539 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -2049,7 +2049,7 @@ static struct platform_driver ican3_driver = {
.name = DRV_NAME,
},
.probe = ican3_probe,
- .remove_new = ican3_remove,
+ .remove = ican3_remove,
};
module_platform_driver(ican3_driver);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 9ffc3ffb4e8f..fee012b57f33 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -1053,13 +1053,13 @@ static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
void __iomem *serdes_base;
u32 word1, word2;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- word1 = addr | KVASER_PCIEFD_ALTERA_DMA_64BIT;
- word2 = addr >> 32;
-#else
- word1 = addr;
- word2 = 0;
-#endif
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) {
+ word1 = lower_32_bits(addr) | KVASER_PCIEFD_ALTERA_DMA_64BIT;
+ word2 = upper_32_bits(addr);
+ } else {
+ word1 = addr;
+ word2 = 0;
+ }
serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
iowrite32(word1, serdes_base);
iowrite32(word2, serdes_base + 0x4);
@@ -1072,9 +1072,9 @@ static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK;
u32 msb = 0x0;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- msb = addr >> 32;
-#endif
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ msb = upper_32_bits(addr);
+
serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index;
iowrite32(lsb, serdes_base);
iowrite32(msb, serdes_base + 0x4);
@@ -1087,9 +1087,9 @@ static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK;
u32 msb = 0x0;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- msb = addr >> 32;
-#endif
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ msb = upper_32_bits(addr);
+
serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
iowrite32(msb, serdes_base);
iowrite32(lsb, serdes_base + 0x4);
@@ -1104,6 +1104,9 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
/* Disable the DMA */
iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
+
+ dma_set_mask_and_coherent(&pcie->pci->dev, DMA_BIT_MASK(64));
+
for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev,
KVASER_PCIEFD_DMA_SIZE,
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 012c3d22b01d..a978b960f1f1 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1434,7 +1434,8 @@ static int m_can_chip_config(struct net_device *dev)
/* Disable unused interrupts */
interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF |
- IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F);
+ IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F |
+ IR_TSW);
err = m_can_config_enable(cdev);
if (err)
@@ -1763,11 +1764,7 @@ static int m_can_close(struct net_device *dev)
netif_stop_queue(dev);
- if (!cdev->is_peripheral)
- napi_disable(&cdev->napi);
-
m_can_stop(dev);
- m_can_clk_stop(cdev);
free_irq(dev->irq, dev);
m_can_clean(dev);
@@ -1776,10 +1773,13 @@ static int m_can_close(struct net_device *dev)
destroy_workqueue(cdev->tx_wq);
cdev->tx_wq = NULL;
can_rx_offload_disable(&cdev->offload);
+ } else {
+ napi_disable(&cdev->napi);
}
close_candev(dev);
+ m_can_clk_stop(cdev);
phy_power_off(cdev->transceiver);
return 0;
@@ -2030,6 +2030,8 @@ static int m_can_open(struct net_device *dev)
if (cdev->is_peripheral)
can_rx_offload_enable(&cdev->offload);
+ else
+ napi_enable(&cdev->napi);
/* register interrupt handler */
if (cdev->is_peripheral) {
@@ -2063,9 +2065,6 @@ static int m_can_open(struct net_device *dev)
if (err)
goto exit_start_fail;
- if (!cdev->is_peripheral)
- napi_enable(&cdev->napi);
-
netif_start_queue(dev);
return 0;
@@ -2079,6 +2078,8 @@ exit_irq_fail:
out_wq_fail:
if (cdev->is_peripheral)
can_rx_offload_disable(&cdev->offload);
+ else
+ napi_disable(&cdev->napi);
close_candev(dev);
exit_disable_clks:
m_can_clk_stop(cdev);
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 983ab80260dd..b832566efda0 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -231,7 +231,7 @@ static struct platform_driver m_can_plat_driver = {
.pm = &m_can_pmops,
},
.probe = m_can_plat_probe,
- .remove_new = m_can_plat_remove,
+ .remove = m_can_plat_remove,
};
module_platform_driver(m_can_plat_driver);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5b3d69c3b6b6..0080c39ee182 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -435,7 +435,7 @@ static struct platform_driver mpc5xxx_can_driver = {
.of_match_table = mpc5xxx_can_table,
},
.probe = mpc5xxx_can_probe,
- .remove_new = mpc5xxx_can_remove,
+ .remove = mpc5xxx_can_remove,
#ifdef CONFIG_PM
.suspend = mpc5xxx_can_suspend,
.resume = mpc5xxx_can_resume,
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index b50005397463..28f3fd805273 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -781,11 +781,8 @@ static int peak_get_ts_info(struct net_device *dev,
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF);
info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index f5aa5dbacaf2..2b7dd359f27b 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -907,7 +907,7 @@ static struct platform_driver rcar_can_driver = {
.pm = &rcar_can_pm_ops,
},
.probe = rcar_can_probe,
- .remove_new = rcar_can_remove,
+ .remove = rcar_can_remove,
};
module_platform_driver(rcar_can_driver);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index c919668bbe7a..df1a5d0b37b2 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -2118,7 +2118,7 @@ static struct platform_driver rcar_canfd_driver = {
.pm = &rcar_canfd_pm_ops,
},
.probe = rcar_canfd_probe,
- .remove_new = rcar_canfd_remove,
+ .remove = rcar_canfd_remove,
};
module_platform_driver(rcar_canfd_driver);
diff --git a/drivers/net/can/rockchip/Kconfig b/drivers/net/can/rockchip/Kconfig
new file mode 100644
index 000000000000..e029e2a3ca4b
--- /dev/null
+++ b/drivers/net/can/rockchip/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config CAN_ROCKCHIP_CANFD
+ tristate "Rockchip CAN-FD controller"
+ depends on OF || COMPILE_TEST
+ select CAN_RX_OFFLOAD
+ help
+ Say Y here if you want to use CAN-FD controller found on
+ Rockchip SoCs.
diff --git a/drivers/net/can/rockchip/Makefile b/drivers/net/can/rockchip/Makefile
new file mode 100644
index 000000000000..3760d3e1baa3
--- /dev/null
+++ b/drivers/net/can/rockchip/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_ROCKCHIP_CANFD) += rockchip_canfd.o
+
+rockchip_canfd-objs :=
+rockchip_canfd-objs += rockchip_canfd-core.o
+rockchip_canfd-objs += rockchip_canfd-ethtool.o
+rockchip_canfd-objs += rockchip_canfd-rx.o
+rockchip_canfd-objs += rockchip_canfd-timestamp.o
+rockchip_canfd-objs += rockchip_canfd-tx.o
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
new file mode 100644
index 000000000000..df18c85fc078
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -0,0 +1,967 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// Rockchip CANFD driver
+//
+// Copyright (c) 2020 Rockchip Electronics Co. Ltd.
+//
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/string.h>
+
+#include "rockchip_canfd.h"
+
+static const struct rkcanfd_devtype_data rkcanfd_devtype_data_rk3568v2 = {
+ .model = RKCANFD_MODEL_RK3568V2,
+ .quirks = RKCANFD_QUIRK_RK3568_ERRATUM_1 | RKCANFD_QUIRK_RK3568_ERRATUM_2 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_3 | RKCANFD_QUIRK_RK3568_ERRATUM_4 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_5 | RKCANFD_QUIRK_RK3568_ERRATUM_6 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_7 | RKCANFD_QUIRK_RK3568_ERRATUM_8 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_9 | RKCANFD_QUIRK_RK3568_ERRATUM_10 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_11 | RKCANFD_QUIRK_RK3568_ERRATUM_12 |
+ RKCANFD_QUIRK_CANFD_BROKEN,
+};
+
+/* The rk3568 CAN-FD errata sheet as of Tue 07 Nov 2023 11:25:31 +08:00
+ * states that only the rk3568v2 is affected by erratum 5, but tests
+ * with the rk3568v2 and rk3568v3 show that the RX_FIFO_CNT is
+ * sometimes too high. In contrast to the errata sheet mark rk3568v3
+ * as effected by erratum 5, too.
+ */
+static const struct rkcanfd_devtype_data rkcanfd_devtype_data_rk3568v3 = {
+ .model = RKCANFD_MODEL_RK3568V3,
+ .quirks = RKCANFD_QUIRK_RK3568_ERRATUM_1 | RKCANFD_QUIRK_RK3568_ERRATUM_2 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_5 | RKCANFD_QUIRK_RK3568_ERRATUM_7 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_8 | RKCANFD_QUIRK_RK3568_ERRATUM_10 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_11 | RKCANFD_QUIRK_RK3568_ERRATUM_12 |
+ RKCANFD_QUIRK_CANFD_BROKEN,
+};
+
+static const char *__rkcanfd_get_model_str(enum rkcanfd_model model)
+{
+ switch (model) {
+ case RKCANFD_MODEL_RK3568V2:
+ return "rk3568v2";
+ case RKCANFD_MODEL_RK3568V3:
+ return "rk3568v3";
+ }
+
+ return "<unknown>";
+}
+
+static inline const char *
+rkcanfd_get_model_str(const struct rkcanfd_priv *priv)
+{
+ return __rkcanfd_get_model_str(priv->devtype_data.model);
+}
+
+/* Note:
+ *
+ * The formula to calculate the CAN System Clock is:
+ *
+ * Tsclk = 2 x Tclk x (brp + 1)
+ *
+ * Double the data sheet's brp_min, brp_max and brp_inc values (both
+ * for the arbitration and data bit timing) to take the "2 x" into
+ * account.
+ */
+static const struct can_bittiming_const rkcanfd_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 2, /* value from data sheet x2 */
+ .brp_max = 512, /* value from data sheet x2 */
+ .brp_inc = 2, /* value from data sheet x2 */
+};
+
+static const struct can_bittiming_const rkcanfd_data_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 2, /* value from data sheet x2 */
+ .brp_max = 512, /* value from data sheet x2 */
+ .brp_inc = 2, /* value from data sheet x2 */
+};
+
+static void rkcanfd_chip_set_reset_mode(const struct rkcanfd_priv *priv)
+{
+ reset_control_assert(priv->reset);
+ udelay(2);
+ reset_control_deassert(priv->reset);
+
+ rkcanfd_write(priv, RKCANFD_REG_MODE, 0x0);
+}
+
+static void rkcanfd_chip_set_work_mode(const struct rkcanfd_priv *priv)
+{
+ rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default);
+}
+
+static int rkcanfd_set_bittiming(struct rkcanfd_priv *priv)
+{
+ const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 reg_nbt, reg_dbt, reg_tdc;
+ u32 tdco;
+
+ reg_nbt = FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_SJW,
+ bt->sjw - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_BRP,
+ (bt->brp / 2) - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG2,
+ bt->phase_seg2 - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG1,
+ bt->prop_seg + bt->phase_seg1 - 1);
+
+ rkcanfd_write(priv, RKCANFD_REG_FD_NOMINAL_BITTIMING, reg_nbt);
+
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
+ return 0;
+
+ reg_dbt = FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_SJW,
+ dbt->sjw - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_BRP,
+ (dbt->brp / 2) - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_TSEG2,
+ dbt->phase_seg2 - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_TSEG1,
+ dbt->prop_seg + dbt->phase_seg1 - 1);
+
+ rkcanfd_write(priv, RKCANFD_REG_FD_DATA_BITTIMING, reg_dbt);
+
+ tdco = (priv->can.clock.freq / dbt->bitrate) * 2 / 3;
+ tdco = min(tdco, FIELD_MAX(RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET));
+
+ reg_tdc = FIELD_PREP(RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET, tdco) |
+ RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION,
+ reg_tdc);
+
+ return 0;
+}
+
+static void rkcanfd_get_berr_counter_corrected(struct rkcanfd_priv *priv,
+ struct can_berr_counter *bec)
+{
+ struct can_berr_counter bec_raw;
+ u32 reg_state;
+
+ bec->rxerr = rkcanfd_read(priv, RKCANFD_REG_RXERRORCNT);
+ bec->txerr = rkcanfd_read(priv, RKCANFD_REG_TXERRORCNT);
+ bec_raw = *bec;
+
+ /* Tests show that sometimes both CAN bus error counters read
+ * 0x0, even if the controller is in warning mode
+ * (RKCANFD_REG_STATE_ERROR_WARNING_STATE in RKCANFD_REG_STATE
+ * set).
+ *
+ * In case both error counters read 0x0, use the struct
+ * priv->bec, otherwise save the read value to priv->bec.
+ *
+ * rkcanfd_handle_rx_int_one() handles the decrementing of
+ * priv->bec.rxerr for successfully RX'ed CAN frames.
+ *
+ * Luckily the controller doesn't decrement the RX CAN bus
+ * error counter in hardware for self received TX'ed CAN
+ * frames (RKCANFD_REG_MODE_RXSTX_MODE), so RXSTX doesn't
+ * interfere with proper RX CAN bus error counters.
+ *
+ * rkcanfd_handle_tx_done_one() handles the decrementing of
+ * priv->bec.txerr for successfully TX'ed CAN frames.
+ */
+ if (!bec->rxerr && !bec->txerr)
+ *bec = priv->bec;
+ else
+ priv->bec = *bec;
+
+ reg_state = rkcanfd_read(priv, RKCANFD_REG_STATE);
+ netdev_vdbg(priv->ndev,
+ "%s: Raw/Cor: txerr=%3u/%3u rxerr=%3u/%3u Bus Off=%u Warning=%u\n",
+ __func__,
+ bec_raw.txerr, bec->txerr, bec_raw.rxerr, bec->rxerr,
+ !!(reg_state & RKCANFD_REG_STATE_BUS_OFF_STATE),
+ !!(reg_state & RKCANFD_REG_STATE_ERROR_WARNING_STATE));
+}
+
+static int rkcanfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
+ if (err)
+ return err;
+
+ rkcanfd_get_berr_counter_corrected(priv, bec);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+}
+
+static void rkcanfd_chip_interrupts_enable(const struct rkcanfd_priv *priv)
+{
+ rkcanfd_write(priv, RKCANFD_REG_INT_MASK, priv->reg_int_mask_default);
+
+ netdev_dbg(priv->ndev, "%s: reg_int_mask=0x%08x\n", __func__,
+ rkcanfd_read(priv, RKCANFD_REG_INT_MASK));
+}
+
+static void rkcanfd_chip_interrupts_disable(const struct rkcanfd_priv *priv)
+{
+ rkcanfd_write(priv, RKCANFD_REG_INT_MASK, RKCANFD_REG_INT_ALL);
+}
+
+static void rkcanfd_chip_fifo_setup(struct rkcanfd_priv *priv)
+{
+ u32 reg;
+
+ /* TXE FIFO */
+ reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
+ reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg);
+
+ /* RX FIFO */
+ reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
+ reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg);
+
+ WRITE_ONCE(priv->tx_head, 0);
+ WRITE_ONCE(priv->tx_tail, 0);
+ netdev_reset_queue(priv->ndev);
+}
+
+static void rkcanfd_chip_start(struct rkcanfd_priv *priv)
+{
+ u32 reg;
+
+ rkcanfd_chip_set_reset_mode(priv);
+
+ /* Receiving Filter: accept all */
+ rkcanfd_write(priv, RKCANFD_REG_IDCODE, 0x0);
+ rkcanfd_write(priv, RKCANFD_REG_IDMASK, RKCANFD_REG_IDCODE_EXTENDED_FRAME_ID);
+
+ /* enable:
+ * - CAN_FD: enable CAN-FD
+ * - AUTO_RETX_MODE: auto retransmission on TX error
+ * - COVER_MODE: RX-FIFO overwrite mode, do not send OVERLOAD frames
+ * - RXSTX_MODE: Receive Self Transmit data mode
+ * - WORK_MODE: transition from reset to working mode
+ */
+ reg = rkcanfd_read(priv, RKCANFD_REG_MODE);
+ priv->reg_mode_default = reg |
+ RKCANFD_REG_MODE_CAN_FD_MODE_ENABLE |
+ RKCANFD_REG_MODE_AUTO_RETX_MODE |
+ RKCANFD_REG_MODE_COVER_MODE |
+ RKCANFD_REG_MODE_RXSTX_MODE |
+ RKCANFD_REG_MODE_WORK_MODE;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ priv->reg_mode_default |= RKCANFD_REG_MODE_LBACK_MODE |
+ RKCANFD_REG_MODE_SILENT_MODE |
+ RKCANFD_REG_MODE_SELF_TEST;
+
+ /* mask, i.e. ignore:
+ * - TIMESTAMP_COUNTER_OVERFLOW_INT - timestamp counter overflow interrupt
+ * - TX_ARBIT_FAIL_INT - TX arbitration fail interrupt
+ * - OVERLOAD_INT - CAN bus overload interrupt
+ * - TX_FINISH_INT - Transmit finish interrupt
+ */
+ priv->reg_int_mask_default =
+ RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT |
+ RKCANFD_REG_INT_TX_ARBIT_FAIL_INT |
+ RKCANFD_REG_INT_OVERLOAD_INT |
+ RKCANFD_REG_INT_TX_FINISH_INT;
+
+ /* Do not mask the bus error interrupt if the bus error
+ * reporting is requested.
+ */
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+ priv->reg_int_mask_default |= RKCANFD_REG_INT_ERROR_INT;
+
+ memset(&priv->bec, 0x0, sizeof(priv->bec));
+
+ rkcanfd_chip_fifo_setup(priv);
+ rkcanfd_timestamp_init(priv);
+ rkcanfd_timestamp_start(priv);
+
+ rkcanfd_set_bittiming(priv);
+
+ rkcanfd_chip_interrupts_disable(priv);
+ rkcanfd_chip_set_work_mode(priv);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ netdev_dbg(priv->ndev, "%s: reg_mode=0x%08x\n", __func__,
+ rkcanfd_read(priv, RKCANFD_REG_MODE));
+}
+
+static void __rkcanfd_chip_stop(struct rkcanfd_priv *priv, const enum can_state state)
+{
+ priv->can.state = state;
+
+ rkcanfd_chip_set_reset_mode(priv);
+ rkcanfd_chip_interrupts_disable(priv);
+}
+
+static void rkcanfd_chip_stop(struct rkcanfd_priv *priv, const enum can_state state)
+{
+ priv->can.state = state;
+
+ rkcanfd_timestamp_stop(priv);
+ __rkcanfd_chip_stop(priv, state);
+}
+
+static void rkcanfd_chip_stop_sync(struct rkcanfd_priv *priv, const enum can_state state)
+{
+ priv->can.state = state;
+
+ rkcanfd_timestamp_stop_sync(priv);
+ __rkcanfd_chip_stop(priv, state);
+}
+
+static int rkcanfd_set_mode(struct net_device *ndev,
+ enum can_mode mode)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ rkcanfd_chip_start(priv);
+ rkcanfd_chip_interrupts_enable(priv);
+ netif_wake_queue(ndev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *
+rkcanfd_alloc_can_err_skb(struct rkcanfd_priv *priv,
+ struct can_frame **cf, u32 *timestamp)
+{
+ struct sk_buff *skb;
+
+ *timestamp = rkcanfd_get_timestamp(priv);
+
+ skb = alloc_can_err_skb(priv->ndev, cf);
+ if (skb)
+ rkcanfd_skb_set_timestamp(priv, skb, *timestamp);
+
+ return skb;
+}
+
+static const char *rkcanfd_get_error_type_str(unsigned int type)
+{
+ switch (type) {
+ case RKCANFD_REG_ERROR_CODE_TYPE_BIT:
+ return "Bit";
+ case RKCANFD_REG_ERROR_CODE_TYPE_STUFF:
+ return "Stuff";
+ case RKCANFD_REG_ERROR_CODE_TYPE_FORM:
+ return "Form";
+ case RKCANFD_REG_ERROR_CODE_TYPE_ACK:
+ return "ACK";
+ case RKCANFD_REG_ERROR_CODE_TYPE_CRC:
+ return "CRC";
+ }
+
+ return "<unknown>";
+}
+
+#define RKCAN_ERROR_CODE(reg_ec, code) \
+ ((reg_ec) & RKCANFD_REG_ERROR_CODE_##code ? __stringify(code) " " : "")
+
+static void
+rkcanfd_handle_error_int_reg_ec(struct rkcanfd_priv *priv, struct can_frame *cf,
+ const u32 reg_ec)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ unsigned int type;
+ u32 reg_state, reg_cmd;
+
+ type = FIELD_GET(RKCANFD_REG_ERROR_CODE_TYPE, reg_ec);
+ reg_cmd = rkcanfd_read(priv, RKCANFD_REG_CMD);
+ reg_state = rkcanfd_read(priv, RKCANFD_REG_STATE);
+
+ netdev_dbg(priv->ndev, "%s Error in %s %s Phase: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s(0x%08x) CMD=%u RX=%u TX=%u Error-Warning=%u Bus-Off=%u\n",
+ rkcanfd_get_error_type_str(type),
+ reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX ? "RX" : "TX",
+ reg_ec & RKCANFD_REG_ERROR_CODE_PHASE ? "Data" : "Arbitration",
+ RKCAN_ERROR_CODE(reg_ec, TX_OVERLOAD),
+ RKCAN_ERROR_CODE(reg_ec, TX_ERROR),
+ RKCAN_ERROR_CODE(reg_ec, TX_ACK),
+ RKCAN_ERROR_CODE(reg_ec, TX_ACK_EOF),
+ RKCAN_ERROR_CODE(reg_ec, TX_CRC),
+ RKCAN_ERROR_CODE(reg_ec, TX_STUFF_COUNT),
+ RKCAN_ERROR_CODE(reg_ec, TX_DATA),
+ RKCAN_ERROR_CODE(reg_ec, TX_SOF_DLC),
+ RKCAN_ERROR_CODE(reg_ec, TX_IDLE),
+ RKCAN_ERROR_CODE(reg_ec, RX_BUF_INT),
+ RKCAN_ERROR_CODE(reg_ec, RX_SPACE),
+ RKCAN_ERROR_CODE(reg_ec, RX_EOF),
+ RKCAN_ERROR_CODE(reg_ec, RX_ACK_LIM),
+ RKCAN_ERROR_CODE(reg_ec, RX_ACK),
+ RKCAN_ERROR_CODE(reg_ec, RX_CRC_LIM),
+ RKCAN_ERROR_CODE(reg_ec, RX_CRC),
+ RKCAN_ERROR_CODE(reg_ec, RX_STUFF_COUNT),
+ RKCAN_ERROR_CODE(reg_ec, RX_DATA),
+ RKCAN_ERROR_CODE(reg_ec, RX_DLC),
+ RKCAN_ERROR_CODE(reg_ec, RX_BRS_ESI),
+ RKCAN_ERROR_CODE(reg_ec, RX_RES),
+ RKCAN_ERROR_CODE(reg_ec, RX_FDF),
+ RKCAN_ERROR_CODE(reg_ec, RX_ID2_RTR),
+ RKCAN_ERROR_CODE(reg_ec, RX_SOF_IDE),
+ RKCAN_ERROR_CODE(reg_ec, RX_IDLE),
+ reg_ec, reg_cmd,
+ !!(reg_state & RKCANFD_REG_STATE_RX_PERIOD),
+ !!(reg_state & RKCANFD_REG_STATE_TX_PERIOD),
+ !!(reg_state & RKCANFD_REG_STATE_ERROR_WARNING_STATE),
+ !!(reg_state & RKCANFD_REG_STATE_BUS_OFF_STATE));
+
+ priv->can.can_stats.bus_error++;
+
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX)
+ stats->rx_errors++;
+ else
+ stats->tx_errors++;
+
+ if (!cf)
+ return;
+
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX) {
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_SOF_IDE)
+ cf->data[3] = CAN_ERR_PROT_LOC_SOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ID2_RTR)
+ cf->data[3] = CAN_ERR_PROT_LOC_RTR;
+ /* RKCANFD_REG_ERROR_CODE_RX_FDF */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_RES)
+ cf->data[3] = CAN_ERR_PROT_LOC_RES0;
+ /* RKCANFD_REG_ERROR_CODE_RX_BRS_ESI */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_DLC)
+ cf->data[3] = CAN_ERR_PROT_LOC_DLC;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_DATA)
+ cf->data[3] = CAN_ERR_PROT_LOC_DATA;
+ /* RKCANFD_REG_ERROR_CODE_RX_STUFF_COUNT */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_CRC)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_CRC_LIM)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ACK)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ACK_LIM)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_EOF)
+ cf->data[3] = CAN_ERR_PROT_LOC_EOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_SPACE)
+ cf->data[3] = CAN_ERR_PROT_LOC_EOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_BUF_INT)
+ cf->data[3] = CAN_ERR_PROT_LOC_INTERM;
+ } else {
+ cf->data[2] |= CAN_ERR_PROT_TX;
+
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_SOF_DLC)
+ cf->data[3] = CAN_ERR_PROT_LOC_SOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_DATA)
+ cf->data[3] = CAN_ERR_PROT_LOC_DATA;
+ /* RKCANFD_REG_ERROR_CODE_TX_STUFF_COUNT */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_CRC)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_ACK_EOF)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_ACK)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ /* RKCANFD_REG_ERROR_CODE_TX_ERROR */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_OVERLOAD)
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
+
+ switch (reg_ec & RKCANFD_REG_ERROR_CODE_TYPE) {
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_BIT):
+
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_STUFF):
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_FORM):
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_ACK):
+ cf->can_id |= CAN_ERR_ACK;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_CRC):
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ break;
+ }
+}
+
+static int rkcanfd_handle_error_int(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct can_frame *cf = NULL;
+ u32 reg_ec, timestamp;
+ struct sk_buff *skb;
+ int err;
+
+ reg_ec = rkcanfd_read(priv, RKCANFD_REG_ERROR_CODE);
+
+ if (!reg_ec)
+ return 0;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ if (cf) {
+ struct can_berr_counter bec;
+
+ rkcanfd_get_berr_counter_corrected(priv, &bec);
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ }
+
+ rkcanfd_handle_error_int_reg_ec(priv, cf, reg_ec);
+
+ if (!cf)
+ return 0;
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int rkcanfd_handle_state_error_int(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ enum can_state new_state, rx_state, tx_state;
+ struct net_device *ndev = priv->ndev;
+ struct can_berr_counter bec;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ u32 timestamp;
+ int err;
+
+ rkcanfd_get_berr_counter_corrected(priv, &bec);
+ can_state_get_by_berr_counter(ndev, &bec, &tx_state, &rx_state);
+
+ new_state = max(tx_state, rx_state);
+ if (new_state == priv->can.state)
+ return 0;
+
+ /* The skb allocation might fail, but can_change_state()
+ * handles cf == NULL.
+ */
+ skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ can_change_state(ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ rkcanfd_chip_stop(priv, CAN_STATE_BUS_OFF);
+ can_bus_off(ndev);
+ }
+
+ if (!skb)
+ return 0;
+
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int
+rkcanfd_handle_rx_fifo_overflow_int(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct can_berr_counter bec;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ u32 timestamp;
+ int err;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ netdev_dbg(priv->ndev, "RX-FIFO overflow\n");
+
+ skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ if (skb)
+ return 0;
+
+ rkcanfd_get_berr_counter_corrected(priv, &bec);
+
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+#define rkcanfd_handle(priv, irq, ...) \
+({ \
+ struct rkcanfd_priv *_priv = (priv); \
+ int err; \
+\
+ err = rkcanfd_handle_##irq(_priv, ## __VA_ARGS__); \
+ if (err) \
+ netdev_err(_priv->ndev, \
+ "IRQ handler rkcanfd_handle_%s() returned error: %pe\n", \
+ __stringify(irq), ERR_PTR(err)); \
+ err; \
+})
+
+static irqreturn_t rkcanfd_irq(int irq, void *dev_id)
+{
+ struct rkcanfd_priv *priv = dev_id;
+ u32 reg_int_unmasked, reg_int;
+
+ reg_int_unmasked = rkcanfd_read(priv, RKCANFD_REG_INT);
+ reg_int = reg_int_unmasked & ~priv->reg_int_mask_default;
+
+ if (!reg_int)
+ return IRQ_NONE;
+
+ /* First ACK then handle, to avoid lost-IRQ race condition on
+ * fast re-occurring interrupts.
+ */
+ rkcanfd_write(priv, RKCANFD_REG_INT, reg_int);
+
+ if (reg_int & RKCANFD_REG_INT_RX_FINISH_INT)
+ rkcanfd_handle(priv, rx_int);
+
+ if (reg_int & RKCANFD_REG_INT_ERROR_INT)
+ rkcanfd_handle(priv, error_int);
+
+ if (reg_int & (RKCANFD_REG_INT_BUS_OFF_INT |
+ RKCANFD_REG_INT_PASSIVE_ERROR_INT |
+ RKCANFD_REG_INT_ERROR_WARNING_INT) ||
+ priv->can.state > CAN_STATE_ERROR_ACTIVE)
+ rkcanfd_handle(priv, state_error_int);
+
+ if (reg_int & RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT)
+ rkcanfd_handle(priv, rx_fifo_overflow_int);
+
+ if (reg_int & ~(RKCANFD_REG_INT_ALL_ERROR |
+ RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT |
+ RKCANFD_REG_INT_RX_FINISH_INT))
+ netdev_err(priv->ndev, "%s: int=0x%08x\n", __func__, reg_int);
+
+ if (reg_int & RKCANFD_REG_INT_WAKEUP_INT)
+ netdev_info(priv->ndev, "%s: WAKEUP_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_TXE_FIFO_FULL_INT)
+ netdev_info(priv->ndev, "%s: TXE_FIFO_FULL_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_TXE_FIFO_OV_INT)
+ netdev_info(priv->ndev, "%s: TXE_FIFO_OV_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT)
+ netdev_info(priv->ndev, "%s: BUS_OFF_RECOVERY_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_RX_FIFO_FULL_INT)
+ netdev_info(priv->ndev, "%s: RX_FIFO_FULL_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_OVERLOAD_INT)
+ netdev_info(priv->ndev, "%s: OVERLOAD_INT\n", __func__);
+
+ can_rx_offload_irq_finish(&priv->offload);
+
+ return IRQ_HANDLED;
+}
+
+static int rkcanfd_open(struct net_device *ndev)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = open_candev(ndev);
+ if (err)
+ return err;
+
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
+ if (err)
+ goto out_close_candev;
+
+ rkcanfd_chip_start(priv);
+ can_rx_offload_enable(&priv->offload);
+
+ err = request_irq(ndev->irq, rkcanfd_irq, IRQF_SHARED, ndev->name, priv);
+ if (err)
+ goto out_rkcanfd_chip_stop;
+
+ rkcanfd_chip_interrupts_enable(priv);
+
+ netif_start_queue(ndev);
+
+ return 0;
+
+out_rkcanfd_chip_stop:
+ rkcanfd_chip_stop_sync(priv, CAN_STATE_STOPPED);
+ pm_runtime_put(ndev->dev.parent);
+out_close_candev:
+ close_candev(ndev);
+ return err;
+}
+
+static int rkcanfd_stop(struct net_device *ndev)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+
+ rkcanfd_chip_interrupts_disable(priv);
+ free_irq(ndev->irq, priv);
+ can_rx_offload_disable(&priv->offload);
+ rkcanfd_chip_stop_sync(priv, CAN_STATE_STOPPED);
+ close_candev(ndev);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+}
+
+static const struct net_device_ops rkcanfd_netdev_ops = {
+ .ndo_open = rkcanfd_open,
+ .ndo_stop = rkcanfd_stop,
+ .ndo_start_xmit = rkcanfd_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static int __maybe_unused rkcanfd_runtime_suspend(struct device *dev)
+{
+ struct rkcanfd_priv *priv = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(priv->clks_num, priv->clks);
+
+ return 0;
+}
+
+static int __maybe_unused rkcanfd_runtime_resume(struct device *dev)
+{
+ struct rkcanfd_priv *priv = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(priv->clks_num, priv->clks);
+}
+
+static void rkcanfd_register_done(const struct rkcanfd_priv *priv)
+{
+ u32 dev_id;
+
+ dev_id = rkcanfd_read(priv, RKCANFD_REG_RTL_VERSION);
+
+ netdev_info(priv->ndev,
+ "Rockchip-CANFD %s rev%lu.%lu (errata 0x%04x) found\n",
+ rkcanfd_get_model_str(priv),
+ FIELD_GET(RKCANFD_REG_RTL_VERSION_MAJOR, dev_id),
+ FIELD_GET(RKCANFD_REG_RTL_VERSION_MINOR, dev_id),
+ priv->devtype_data.quirks);
+
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_5 &&
+ priv->can.clock.freq < RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN)
+ netdev_info(priv->ndev,
+ "Erratum 5: CAN clock frequency (%luMHz) lower than known good (%luMHz), expect degraded performance\n",
+ priv->can.clock.freq / MEGA,
+ RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN / MEGA);
+}
+
+static int rkcanfd_register(struct rkcanfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ int err;
+
+ pm_runtime_enable(ndev->dev.parent);
+
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
+ if (err)
+ goto out_pm_runtime_disable;
+
+ rkcanfd_ethtool_init(priv);
+
+ err = register_candev(ndev);
+ if (err)
+ goto out_pm_runtime_put_sync;
+
+ rkcanfd_register_done(priv);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+
+out_pm_runtime_put_sync:
+ pm_runtime_put_sync(ndev->dev.parent);
+out_pm_runtime_disable:
+ pm_runtime_disable(ndev->dev.parent);
+
+ return err;
+}
+
+static inline void rkcanfd_unregister(struct rkcanfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+
+ unregister_candev(ndev);
+ pm_runtime_disable(ndev->dev.parent);
+}
+
+static const struct of_device_id rkcanfd_of_match[] = {
+ {
+ .compatible = "rockchip,rk3568v2-canfd",
+ .data = &rkcanfd_devtype_data_rk3568v2,
+ }, {
+ .compatible = "rockchip,rk3568v3-canfd",
+ .data = &rkcanfd_devtype_data_rk3568v3,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, rkcanfd_of_match);
+
+static int rkcanfd_probe(struct platform_device *pdev)
+{
+ struct rkcanfd_priv *priv;
+ struct net_device *ndev;
+ const void *match;
+ int err;
+
+ ndev = alloc_candev(sizeof(struct rkcanfd_priv), RKCANFD_TXFIFO_DEPTH);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ err = ndev->irq;
+ goto out_free_candev;
+ }
+
+ priv->clks_num = devm_clk_bulk_get_all(&pdev->dev, &priv->clks);
+ if (priv->clks_num < 0) {
+ err = priv->clks_num;
+ goto out_free_candev;
+ }
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs)) {
+ err = PTR_ERR(priv->regs);
+ goto out_free_candev;
+ }
+
+ priv->reset = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(priv->reset)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(priv->reset),
+ "Failed to get reset line\n");
+ goto out_free_candev;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->netdev_ops = &rkcanfd_netdev_ops;
+ ndev->flags |= IFF_ECHO;
+
+ platform_set_drvdata(pdev, priv);
+ priv->can.clock.freq = clk_get_rate(priv->clks[0].clk);
+ priv->can.bittiming_const = &rkcanfd_bittiming_const;
+ priv->can.data_bittiming_const = &rkcanfd_data_bittiming_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_BERR_REPORTING;
+ if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN))
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ priv->can.do_set_mode = rkcanfd_set_mode;
+ priv->can.do_get_berr_counter = rkcanfd_get_berr_counter;
+ priv->ndev = ndev;
+
+ match = device_get_match_data(&pdev->dev);
+ if (match)
+ priv->devtype_data = *(struct rkcanfd_devtype_data *)match;
+
+ err = can_rx_offload_add_manual(ndev, &priv->offload,
+ RKCANFD_NAPI_WEIGHT);
+ if (err)
+ goto out_free_candev;
+
+ err = rkcanfd_register(priv);
+ if (err)
+ goto out_can_rx_offload_del;
+
+ return 0;
+
+out_can_rx_offload_del:
+ can_rx_offload_del(&priv->offload);
+out_free_candev:
+ free_candev(ndev);
+
+ return err;
+}
+
+static void rkcanfd_remove(struct platform_device *pdev)
+{
+ struct rkcanfd_priv *priv = platform_get_drvdata(pdev);
+ struct net_device *ndev = priv->ndev;
+
+ can_rx_offload_del(&priv->offload);
+ rkcanfd_unregister(priv);
+ free_candev(ndev);
+}
+
+static const struct dev_pm_ops rkcanfd_pm_ops = {
+ SET_RUNTIME_PM_OPS(rkcanfd_runtime_suspend,
+ rkcanfd_runtime_resume, NULL)
+};
+
+static struct platform_driver rkcanfd_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .pm = &rkcanfd_pm_ops,
+ .of_match_table = rkcanfd_of_match,
+ },
+ .probe = rkcanfd_probe,
+ .remove = rkcanfd_remove,
+};
+module_platform_driver(rkcanfd_driver);
+
+MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
+MODULE_DESCRIPTION("Rockchip CAN-FD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/rockchip/rockchip_canfd-ethtool.c b/drivers/net/can/rockchip/rockchip_canfd-ethtool.c
new file mode 100644
index 000000000000..5aeeef64a67a
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-ethtool.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/ethtool.h>
+
+#include "rockchip_canfd.h"
+
+enum rkcanfd_stats_type {
+ RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS,
+ RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS,
+};
+
+static const char rkcanfd_stats_strings[][ETH_GSTRING_LEN] = {
+ [RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS] = "rx_fifo_empty_errors",
+ [RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS] = "tx_extended_as_standard_errors",
+};
+
+static void
+rkcanfd_ethtool_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, rkcanfd_stats_strings,
+ sizeof(rkcanfd_stats_strings));
+ }
+}
+
+static int rkcanfd_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(rkcanfd_stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+rkcanfd_ethtool_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ struct rkcanfd_stats *rkcanfd_stats;
+ unsigned int start;
+
+ rkcanfd_stats = &priv->stats;
+
+ do {
+ start = u64_stats_fetch_begin(&rkcanfd_stats->syncp);
+
+ data[RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS] =
+ u64_stats_read(&rkcanfd_stats->rx_fifo_empty_errors);
+ data[RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS] =
+ u64_stats_read(&rkcanfd_stats->tx_extended_as_standard_errors);
+ } while (u64_stats_fetch_retry(&rkcanfd_stats->syncp, start));
+}
+
+static const struct ethtool_ops rkcanfd_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+ .get_strings = rkcanfd_ethtool_get_strings,
+ .get_sset_count = rkcanfd_ethtool_get_sset_count,
+ .get_ethtool_stats = rkcanfd_ethtool_get_ethtool_stats,
+};
+
+void rkcanfd_ethtool_init(struct rkcanfd_priv *priv)
+{
+ priv->ndev->ethtool_ops = &rkcanfd_ethtool_ops;
+
+ u64_stats_init(&priv->stats.syncp);
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd-rx.c b/drivers/net/can/rockchip/rockchip_canfd-rx.c
new file mode 100644
index 000000000000..475c0409e215
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-rx.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <net/netdev_queues.h>
+
+#include "rockchip_canfd.h"
+
+static bool rkcanfd_can_frame_header_equal(const struct canfd_frame *const cfd1,
+ const struct canfd_frame *const cfd2,
+ const bool is_canfd)
+{
+ const u8 mask_flags = CANFD_BRS | CANFD_ESI | CANFD_FDF;
+ canid_t mask = CAN_EFF_FLAG;
+
+ if (canfd_sanitize_len(cfd1->len) != canfd_sanitize_len(cfd2->len))
+ return false;
+
+ if (!is_canfd)
+ mask |= CAN_RTR_FLAG;
+
+ if (cfd1->can_id & CAN_EFF_FLAG)
+ mask |= CAN_EFF_MASK;
+ else
+ mask |= CAN_SFF_MASK;
+
+ if ((cfd1->can_id & mask) != (cfd2->can_id & mask))
+ return false;
+
+ if (is_canfd &&
+ (cfd1->flags & mask_flags) != (cfd2->flags & mask_flags))
+ return false;
+
+ return true;
+}
+
+static bool rkcanfd_can_frame_data_equal(const struct canfd_frame *cfd1,
+ const struct canfd_frame *cfd2,
+ const bool is_canfd)
+{
+ u8 len;
+
+ if (!is_canfd && (cfd1->can_id & CAN_RTR_FLAG))
+ return true;
+
+ len = canfd_sanitize_len(cfd1->len);
+
+ return !memcmp(cfd1->data, cfd2->data, len);
+}
+
+static unsigned int
+rkcanfd_fifo_header_to_cfd_header(const struct rkcanfd_priv *priv,
+ const struct rkcanfd_fifo_header *header,
+ struct canfd_frame *cfd)
+{
+ unsigned int len = sizeof(*cfd) - sizeof(cfd->data);
+ u8 dlc;
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT)
+ cfd->can_id = FIELD_GET(RKCANFD_REG_FD_ID_EFF, header->id) |
+ CAN_EFF_FLAG;
+ else
+ cfd->can_id = FIELD_GET(RKCANFD_REG_FD_ID_SFF, header->id);
+
+ dlc = FIELD_GET(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH,
+ header->frameinfo);
+
+ /* CAN-FD */
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF) {
+ cfd->len = can_fd_dlc2len(dlc);
+
+ /* The cfd is not allocated by alloc_canfd_skb(), so
+ * set CANFD_FDF here.
+ */
+ cfd->flags |= CANFD_FDF;
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_BRS)
+ cfd->flags |= CANFD_BRS;
+ } else {
+ cfd->len = can_cc_dlc2len(dlc);
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_RTR) {
+ cfd->can_id |= CAN_RTR_FLAG;
+
+ return len;
+ }
+ }
+
+ return len + cfd->len;
+}
+
+static int rkcanfd_rxstx_filter(struct rkcanfd_priv *priv,
+ const struct canfd_frame *cfd_rx, const u32 ts,
+ bool *tx_done)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct rkcanfd_stats *rkcanfd_stats = &priv->stats;
+ const struct canfd_frame *cfd_nominal;
+ const struct sk_buff *skb;
+ unsigned int tx_tail;
+
+ tx_tail = rkcanfd_get_tx_tail(priv);
+ skb = priv->can.echo_skb[tx_tail];
+ if (!skb) {
+ netdev_err(priv->ndev,
+ "%s: echo_skb[%u]=NULL tx_head=0x%08x tx_tail=0x%08x\n",
+ __func__, tx_tail,
+ priv->tx_head, priv->tx_tail);
+
+ return -ENOMSG;
+ }
+ cfd_nominal = (struct canfd_frame *)skb->data;
+
+ /* We RX'ed a frame identical to our pending TX frame. */
+ if (rkcanfd_can_frame_header_equal(cfd_rx, cfd_nominal,
+ cfd_rx->flags & CANFD_FDF) &&
+ rkcanfd_can_frame_data_equal(cfd_rx, cfd_nominal,
+ cfd_rx->flags & CANFD_FDF)) {
+ unsigned int frame_len;
+
+ rkcanfd_handle_tx_done_one(priv, ts, &frame_len);
+
+ WRITE_ONCE(priv->tx_tail, priv->tx_tail + 1);
+ netif_subqueue_completed_wake(priv->ndev, 0, 1, frame_len,
+ rkcanfd_get_effective_tx_free(priv),
+ RKCANFD_TX_START_THRESHOLD);
+
+ *tx_done = true;
+
+ return 0;
+ }
+
+ if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_6))
+ return 0;
+
+ /* Erratum 6: Extended frames may be send as standard frames.
+ *
+ * Not affected if:
+ * - TX'ed a standard frame -or-
+ * - RX'ed an extended frame
+ */
+ if (!(cfd_nominal->can_id & CAN_EFF_FLAG) ||
+ (cfd_rx->can_id & CAN_EFF_FLAG))
+ return 0;
+
+ /* Not affected if:
+ * - standard part and RTR flag of the TX'ed frame
+ * is not equal the CAN-ID and RTR flag of the RX'ed frame.
+ */
+ if ((cfd_nominal->can_id & (CAN_RTR_FLAG | CAN_SFF_MASK)) !=
+ (cfd_rx->can_id & (CAN_RTR_FLAG | CAN_SFF_MASK)))
+ return 0;
+
+ /* Not affected if:
+ * - length is not the same
+ */
+ if (cfd_nominal->len != cfd_rx->len)
+ return 0;
+
+ /* Not affected if:
+ * - the data of non RTR frames is different
+ */
+ if (!(cfd_nominal->can_id & CAN_RTR_FLAG) &&
+ memcmp(cfd_nominal->data, cfd_rx->data, cfd_nominal->len))
+ return 0;
+
+ /* Affected by Erratum 6 */
+ u64_stats_update_begin(&rkcanfd_stats->syncp);
+ u64_stats_inc(&rkcanfd_stats->tx_extended_as_standard_errors);
+ u64_stats_update_end(&rkcanfd_stats->syncp);
+
+ /* Manual handling of CAN Bus Error counters. See
+ * rkcanfd_get_corrected_berr_counter() for detailed
+ * explanation.
+ */
+ if (priv->bec.txerr)
+ priv->bec.txerr--;
+
+ *tx_done = true;
+
+ stats->tx_packets++;
+ stats->tx_errors++;
+
+ rkcanfd_xmit_retry(priv);
+
+ return 0;
+}
+
+static inline bool
+rkcanfd_fifo_header_empty(const struct rkcanfd_fifo_header *header)
+{
+ /* Erratum 5: If the FIFO is empty, we read the same value for
+ * all elements.
+ */
+ return header->frameinfo == header->id &&
+ header->frameinfo == header->ts;
+}
+
+static int rkcanfd_handle_rx_int_one(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct canfd_frame cfd[1] = { }, *skb_cfd;
+ struct rkcanfd_fifo_header header[1] = { };
+ struct sk_buff *skb;
+ unsigned int len;
+ int err;
+
+ /* read header into separate struct and convert it later */
+ rkcanfd_read_rep(priv, RKCANFD_REG_RX_FIFO_RDATA,
+ header, sizeof(*header));
+ /* read data directly into cfd */
+ rkcanfd_read_rep(priv, RKCANFD_REG_RX_FIFO_RDATA,
+ cfd->data, sizeof(cfd->data));
+
+ /* Erratum 5: Counters for TXEFIFO and RXFIFO may be wrong */
+ if (rkcanfd_fifo_header_empty(header)) {
+ struct rkcanfd_stats *rkcanfd_stats = &priv->stats;
+
+ u64_stats_update_begin(&rkcanfd_stats->syncp);
+ u64_stats_inc(&rkcanfd_stats->rx_fifo_empty_errors);
+ u64_stats_update_end(&rkcanfd_stats->syncp);
+
+ return 0;
+ }
+
+ len = rkcanfd_fifo_header_to_cfd_header(priv, header, cfd);
+
+ /* Drop any received CAN-FD frames if CAN-FD mode is not
+ * requested.
+ */
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_FD)) {
+ stats->rx_dropped++;
+
+ return 0;
+ }
+
+ if (rkcanfd_get_tx_pending(priv)) {
+ bool tx_done = false;
+
+ err = rkcanfd_rxstx_filter(priv, cfd, header->ts, &tx_done);
+ if (err)
+ return err;
+ if (tx_done && !(priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK))
+ return 0;
+ }
+
+ /* Manual handling of CAN Bus Error counters. See
+ * rkcanfd_get_corrected_berr_counter() for detailed
+ * explanation.
+ */
+ if (priv->bec.rxerr)
+ priv->bec.rxerr = min(CAN_ERROR_PASSIVE_THRESHOLD,
+ priv->bec.rxerr) - 1;
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF)
+ skb = alloc_canfd_skb(priv->ndev, &skb_cfd);
+ else
+ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&skb_cfd);
+
+ if (!skb) {
+ stats->rx_dropped++;
+
+ return 0;
+ }
+
+ memcpy(skb_cfd, cfd, len);
+ rkcanfd_skb_set_timestamp(priv, skb, header->ts);
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, header->ts);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static inline unsigned int
+rkcanfd_rx_fifo_get_len(const struct rkcanfd_priv *priv)
+{
+ const u32 reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
+
+ return FIELD_GET(RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_CNT, reg);
+}
+
+int rkcanfd_handle_rx_int(struct rkcanfd_priv *priv)
+{
+ unsigned int len;
+ int err;
+
+ while ((len = rkcanfd_rx_fifo_get_len(priv))) {
+ err = rkcanfd_handle_rx_int_one(priv);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
new file mode 100644
index 000000000000..43d4b5721812
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/clocksource.h>
+
+#include "rockchip_canfd.h"
+
+static u64 rkcanfd_timestamp_read(const struct cyclecounter *cc)
+{
+ const struct rkcanfd_priv *priv = container_of(cc, struct rkcanfd_priv, cc);
+
+ return rkcanfd_get_timestamp(priv);
+}
+
+void rkcanfd_skb_set_timestamp(const struct rkcanfd_priv *priv,
+ struct sk_buff *skb, const u32 timestamp)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ u64 ns;
+
+ ns = timecounter_cyc2time(&priv->tc, timestamp);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static void rkcanfd_timestamp_work(struct work_struct *work)
+{
+ const struct delayed_work *delayed_work = to_delayed_work(work);
+ struct rkcanfd_priv *priv;
+
+ priv = container_of(delayed_work, struct rkcanfd_priv, timestamp);
+ timecounter_read(&priv->tc);
+
+ schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies);
+}
+
+void rkcanfd_timestamp_init(struct rkcanfd_priv *priv)
+{
+ const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ struct cyclecounter *cc = &priv->cc;
+ u32 bitrate, div, reg, rate;
+ u64 work_delay_ns;
+ u64 max_cycles;
+
+ /* At the standard clock rate of 300Mhz on the rk3658, the 32
+ * bit timer overflows every 14s. This means that we have to
+ * poll it quite often to avoid missing a wrap around.
+ *
+ * Divide it down to a reasonable rate, at least twice the bit
+ * rate.
+ */
+ bitrate = max(bt->bitrate, dbt->bitrate);
+ div = min(DIV_ROUND_UP(priv->can.clock.freq, bitrate * 2),
+ FIELD_MAX(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE) + 1);
+
+ reg = FIELD_PREP(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE,
+ div - 1) |
+ RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_TIMESTAMP_CTRL, reg);
+
+ cc->read = rkcanfd_timestamp_read;
+ cc->mask = CYCLECOUNTER_MASK(32);
+
+ rate = priv->can.clock.freq / div;
+ clocks_calc_mult_shift(&cc->mult, &cc->shift, rate, NSEC_PER_SEC,
+ RKCANFD_TIMESTAMP_WORK_MAX_DELAY_SEC);
+
+ max_cycles = div_u64(ULLONG_MAX, cc->mult);
+ max_cycles = min(max_cycles, cc->mask);
+ work_delay_ns = clocksource_cyc2ns(max_cycles, cc->mult, cc->shift);
+ priv->work_delay_jiffies = div_u64(work_delay_ns, 3u * NSEC_PER_SEC / HZ);
+ INIT_DELAYED_WORK(&priv->timestamp, rkcanfd_timestamp_work);
+
+ netdev_dbg(priv->ndev, "clock=%lu.%02luMHz bitrate=%lu.%02luMBit/s div=%u rate=%lu.%02luMHz mult=%u shift=%u delay=%lus\n",
+ priv->can.clock.freq / MEGA,
+ priv->can.clock.freq % MEGA / KILO / 10,
+ bitrate / MEGA,
+ bitrate % MEGA / KILO / 100,
+ div,
+ rate / MEGA,
+ rate % MEGA / KILO / 10,
+ cc->mult, cc->shift,
+ priv->work_delay_jiffies / HZ);
+}
+
+void rkcanfd_timestamp_start(struct rkcanfd_priv *priv)
+{
+ timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
+
+ schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies);
+}
+
+void rkcanfd_timestamp_stop(struct rkcanfd_priv *priv)
+{
+ cancel_delayed_work(&priv->timestamp);
+}
+
+void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->timestamp);
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd-tx.c b/drivers/net/can/rockchip/rockchip_canfd-tx.c
new file mode 100644
index 000000000000..865a15e033a9
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-tx.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <net/netdev_queues.h>
+
+#include "rockchip_canfd.h"
+
+static bool rkcanfd_tx_tail_is_eff(const struct rkcanfd_priv *priv)
+{
+ const struct canfd_frame *cfd;
+ const struct sk_buff *skb;
+ unsigned int tx_tail;
+
+ if (!rkcanfd_get_tx_pending(priv))
+ return false;
+
+ tx_tail = rkcanfd_get_tx_tail(priv);
+ skb = priv->can.echo_skb[tx_tail];
+ if (!skb) {
+ netdev_err(priv->ndev,
+ "%s: echo_skb[%u]=NULL tx_head=0x%08x tx_tail=0x%08x\n",
+ __func__, tx_tail,
+ priv->tx_head, priv->tx_tail);
+
+ return false;
+ }
+
+ cfd = (struct canfd_frame *)skb->data;
+
+ return cfd->can_id & CAN_EFF_FLAG;
+}
+
+unsigned int rkcanfd_get_effective_tx_free(const struct rkcanfd_priv *priv)
+{
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_6 &&
+ rkcanfd_tx_tail_is_eff(priv))
+ return 0;
+
+ return rkcanfd_get_tx_free(priv);
+}
+
+static void rkcanfd_start_xmit_write_cmd(const struct rkcanfd_priv *priv,
+ const u32 reg_cmd)
+{
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_12)
+ rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default |
+ RKCANFD_REG_MODE_SPACE_RX_MODE);
+
+ rkcanfd_write(priv, RKCANFD_REG_CMD, reg_cmd);
+
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_12)
+ rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default);
+}
+
+void rkcanfd_xmit_retry(struct rkcanfd_priv *priv)
+{
+ const unsigned int tx_head = rkcanfd_get_tx_head(priv);
+ const u32 reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head);
+
+ rkcanfd_start_xmit_write_cmd(priv, reg_cmd);
+}
+
+netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ u32 reg_frameinfo, reg_id, reg_cmd;
+ unsigned int tx_head, frame_len;
+ const struct canfd_frame *cfd;
+ int err;
+ u8 i;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (!netif_subqueue_maybe_stop(priv->ndev, 0,
+ rkcanfd_get_effective_tx_free(priv),
+ RKCANFD_TX_STOP_THRESHOLD,
+ RKCANFD_TX_START_THRESHOLD)) {
+ if (net_ratelimit())
+ netdev_info(priv->ndev,
+ "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, tx_pending=%d)\n",
+ priv->tx_head, priv->tx_tail,
+ rkcanfd_get_tx_pending(priv));
+
+ return NETDEV_TX_BUSY;
+ }
+
+ cfd = (struct canfd_frame *)skb->data;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ reg_frameinfo = RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT;
+ reg_id = FIELD_PREP(RKCANFD_REG_FD_ID_EFF, cfd->can_id);
+ } else {
+ reg_frameinfo = 0;
+ reg_id = FIELD_PREP(RKCANFD_REG_FD_ID_SFF, cfd->can_id);
+ }
+
+ if (cfd->can_id & CAN_RTR_FLAG)
+ reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_RTR;
+
+ if (can_is_canfd_skb(skb)) {
+ reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_FDF;
+
+ if (cfd->flags & CANFD_BRS)
+ reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_BRS;
+
+ reg_frameinfo |= FIELD_PREP(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH,
+ can_fd_len2dlc(cfd->len));
+ } else {
+ reg_frameinfo |= FIELD_PREP(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH,
+ cfd->len);
+ }
+
+ tx_head = rkcanfd_get_tx_head(priv);
+ reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head);
+
+ rkcanfd_write(priv, RKCANFD_REG_FD_TXFRAMEINFO, reg_frameinfo);
+ rkcanfd_write(priv, RKCANFD_REG_FD_TXID, reg_id);
+ for (i = 0; i < cfd->len; i += 4)
+ rkcanfd_write(priv, RKCANFD_REG_FD_TXDATA0 + i,
+ *(u32 *)(cfd->data + i));
+
+ frame_len = can_skb_get_frame_len(skb);
+ err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
+ if (!err)
+ netdev_sent_queue(priv->ndev, frame_len);
+
+ WRITE_ONCE(priv->tx_head, priv->tx_head + 1);
+
+ rkcanfd_start_xmit_write_cmd(priv, reg_cmd);
+
+ netif_subqueue_maybe_stop(priv->ndev, 0,
+ rkcanfd_get_effective_tx_free(priv),
+ RKCANFD_TX_STOP_THRESHOLD,
+ RKCANFD_TX_START_THRESHOLD);
+
+ return NETDEV_TX_OK;
+}
+
+void rkcanfd_handle_tx_done_one(struct rkcanfd_priv *priv, const u32 ts,
+ unsigned int *frame_len_p)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ unsigned int tx_tail;
+ struct sk_buff *skb;
+
+ tx_tail = rkcanfd_get_tx_tail(priv);
+ skb = priv->can.echo_skb[tx_tail];
+
+ /* Manual handling of CAN Bus Error counters. See
+ * rkcanfd_get_corrected_berr_counter() for detailed
+ * explanation.
+ */
+ if (priv->bec.txerr)
+ priv->bec.txerr--;
+
+ if (skb)
+ rkcanfd_skb_set_timestamp(priv, skb, ts);
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload,
+ tx_tail, ts,
+ frame_len_p);
+ stats->tx_packets++;
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd.h b/drivers/net/can/rockchip/rockchip_canfd.h
new file mode 100644
index 000000000000..93131c7d7f54
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2023, 2024 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _ROCKCHIP_CANFD_H
+#define _ROCKCHIP_CANFD_H
+
+#include <linux/bitfield.h>
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/reset.h>
+#include <linux/skbuff.h>
+#include <linux/timecounter.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/units.h>
+
+#define RKCANFD_REG_MODE 0x000
+#define RKCANFD_REG_MODE_CAN_FD_MODE_ENABLE BIT(15)
+#define RKCANFD_REG_MODE_DPEE BIT(14)
+#define RKCANFD_REG_MODE_BRSD BIT(13)
+#define RKCANFD_REG_MODE_SPACE_RX_MODE BIT(12)
+#define RKCANFD_REG_MODE_AUTO_BUS_ON BIT(11)
+#define RKCANFD_REG_MODE_AUTO_RETX_MODE BIT(10)
+#define RKCANFD_REG_MODE_OVLD_MODE BIT(9)
+#define RKCANFD_REG_MODE_COVER_MODE BIT(8)
+#define RKCANFD_REG_MODE_RXSORT_MODE BIT(7)
+#define RKCANFD_REG_MODE_TXORDER_MODE BIT(6)
+#define RKCANFD_REG_MODE_RXSTX_MODE BIT(5)
+#define RKCANFD_REG_MODE_LBACK_MODE BIT(4)
+#define RKCANFD_REG_MODE_SILENT_MODE BIT(3)
+#define RKCANFD_REG_MODE_SELF_TEST BIT(2)
+#define RKCANFD_REG_MODE_SLEEP_MODE BIT(1)
+#define RKCANFD_REG_MODE_WORK_MODE BIT(0)
+
+#define RKCANFD_REG_CMD 0x004
+#define RKCANFD_REG_CMD_TX1_REQ BIT(1)
+#define RKCANFD_REG_CMD_TX0_REQ BIT(0)
+#define RKCANFD_REG_CMD_TX_REQ(i) (RKCANFD_REG_CMD_TX0_REQ << (i))
+
+#define RKCANFD_REG_STATE 0x008
+#define RKCANFD_REG_STATE_SLEEP_STATE BIT(6)
+#define RKCANFD_REG_STATE_BUS_OFF_STATE BIT(5)
+#define RKCANFD_REG_STATE_ERROR_WARNING_STATE BIT(4)
+#define RKCANFD_REG_STATE_TX_PERIOD BIT(3)
+#define RKCANFD_REG_STATE_RX_PERIOD BIT(2)
+#define RKCANFD_REG_STATE_TX_BUFFER_FULL BIT(1)
+#define RKCANFD_REG_STATE_RX_BUFFER_FULL BIT(0)
+
+#define RKCANFD_REG_INT 0x00c
+#define RKCANFD_REG_INT_WAKEUP_INT BIT(14)
+#define RKCANFD_REG_INT_TXE_FIFO_FULL_INT BIT(13)
+#define RKCANFD_REG_INT_TXE_FIFO_OV_INT BIT(12)
+#define RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT BIT(11)
+#define RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT BIT(10)
+#define RKCANFD_REG_INT_BUS_OFF_INT BIT(9)
+#define RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT BIT(8)
+#define RKCANFD_REG_INT_RX_FIFO_FULL_INT BIT(7)
+#define RKCANFD_REG_INT_ERROR_INT BIT(6)
+#define RKCANFD_REG_INT_TX_ARBIT_FAIL_INT BIT(5)
+#define RKCANFD_REG_INT_PASSIVE_ERROR_INT BIT(4)
+#define RKCANFD_REG_INT_OVERLOAD_INT BIT(3)
+#define RKCANFD_REG_INT_ERROR_WARNING_INT BIT(2)
+#define RKCANFD_REG_INT_TX_FINISH_INT BIT(1)
+#define RKCANFD_REG_INT_RX_FINISH_INT BIT(0)
+
+#define RKCANFD_REG_INT_ALL \
+ (RKCANFD_REG_INT_WAKEUP_INT | \
+ RKCANFD_REG_INT_TXE_FIFO_FULL_INT | \
+ RKCANFD_REG_INT_TXE_FIFO_OV_INT | \
+ RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT | \
+ RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT | \
+ RKCANFD_REG_INT_BUS_OFF_INT | \
+ RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT | \
+ RKCANFD_REG_INT_RX_FIFO_FULL_INT | \
+ RKCANFD_REG_INT_ERROR_INT | \
+ RKCANFD_REG_INT_TX_ARBIT_FAIL_INT | \
+ RKCANFD_REG_INT_PASSIVE_ERROR_INT | \
+ RKCANFD_REG_INT_OVERLOAD_INT | \
+ RKCANFD_REG_INT_ERROR_WARNING_INT | \
+ RKCANFD_REG_INT_TX_FINISH_INT | \
+ RKCANFD_REG_INT_RX_FINISH_INT)
+
+#define RKCANFD_REG_INT_ALL_ERROR \
+ (RKCANFD_REG_INT_BUS_OFF_INT | \
+ RKCANFD_REG_INT_ERROR_INT | \
+ RKCANFD_REG_INT_PASSIVE_ERROR_INT | \
+ RKCANFD_REG_INT_ERROR_WARNING_INT)
+
+#define RKCANFD_REG_INT_MASK 0x010
+
+#define RKCANFD_REG_DMA_CTL 0x014
+#define RKCANFD_REG_DMA_CTL_DMA_RX_MODE BIT(1)
+#define RKCANFD_REG_DMA_CTL_DMA_TX_MODE BIT(9)
+
+#define RKCANFD_REG_BITTIMING 0x018
+#define RKCANFD_REG_BITTIMING_SAMPLE_MODE BIT(16)
+#define RKCANFD_REG_BITTIMING_SJW GENMASK(15, 14)
+#define RKCANFD_REG_BITTIMING_BRP GENMASK(13, 8)
+#define RKCANFD_REG_BITTIMING_TSEG2 GENMASK(6, 4)
+#define RKCANFD_REG_BITTIMING_TSEG1 GENMASK(3, 0)
+
+#define RKCANFD_REG_ARBITFAIL 0x028
+#define RKCANFD_REG_ARBITFAIL_ARBIT_FAIL_CODE GENMASK(6, 0)
+
+/* Register seems to be clear or read */
+#define RKCANFD_REG_ERROR_CODE 0x02c
+#define RKCANFD_REG_ERROR_CODE_PHASE BIT(29)
+#define RKCANFD_REG_ERROR_CODE_TYPE GENMASK(28, 26)
+#define RKCANFD_REG_ERROR_CODE_TYPE_BIT 0x0
+#define RKCANFD_REG_ERROR_CODE_TYPE_STUFF 0x1
+#define RKCANFD_REG_ERROR_CODE_TYPE_FORM 0x2
+#define RKCANFD_REG_ERROR_CODE_TYPE_ACK 0x3
+#define RKCANFD_REG_ERROR_CODE_TYPE_CRC 0x4
+#define RKCANFD_REG_ERROR_CODE_DIRECTION_RX BIT(25)
+#define RKCANFD_REG_ERROR_CODE_TX GENMASK(24, 16)
+#define RKCANFD_REG_ERROR_CODE_TX_OVERLOAD BIT(24)
+#define RKCANFD_REG_ERROR_CODE_TX_ERROR BIT(23)
+#define RKCANFD_REG_ERROR_CODE_TX_ACK BIT(22)
+#define RKCANFD_REG_ERROR_CODE_TX_ACK_EOF BIT(21)
+#define RKCANFD_REG_ERROR_CODE_TX_CRC BIT(20)
+#define RKCANFD_REG_ERROR_CODE_TX_STUFF_COUNT BIT(19)
+#define RKCANFD_REG_ERROR_CODE_TX_DATA BIT(18)
+#define RKCANFD_REG_ERROR_CODE_TX_SOF_DLC BIT(17)
+#define RKCANFD_REG_ERROR_CODE_TX_IDLE BIT(16)
+#define RKCANFD_REG_ERROR_CODE_RX GENMASK(15, 0)
+#define RKCANFD_REG_ERROR_CODE_RX_BUF_INT BIT(15)
+#define RKCANFD_REG_ERROR_CODE_RX_SPACE BIT(14)
+#define RKCANFD_REG_ERROR_CODE_RX_EOF BIT(13)
+#define RKCANFD_REG_ERROR_CODE_RX_ACK_LIM BIT(12)
+#define RKCANFD_REG_ERROR_CODE_RX_ACK BIT(11)
+#define RKCANFD_REG_ERROR_CODE_RX_CRC_LIM BIT(10)
+#define RKCANFD_REG_ERROR_CODE_RX_CRC BIT(9)
+#define RKCANFD_REG_ERROR_CODE_RX_STUFF_COUNT BIT(8)
+#define RKCANFD_REG_ERROR_CODE_RX_DATA BIT(7)
+#define RKCANFD_REG_ERROR_CODE_RX_DLC BIT(6)
+#define RKCANFD_REG_ERROR_CODE_RX_BRS_ESI BIT(5)
+#define RKCANFD_REG_ERROR_CODE_RX_RES BIT(4)
+#define RKCANFD_REG_ERROR_CODE_RX_FDF BIT(3)
+#define RKCANFD_REG_ERROR_CODE_RX_ID2_RTR BIT(2)
+#define RKCANFD_REG_ERROR_CODE_RX_SOF_IDE BIT(1)
+#define RKCANFD_REG_ERROR_CODE_RX_IDLE BIT(0)
+
+#define RKCANFD_REG_ERROR_CODE_NOACK \
+ (FIELD_PREP(RKCANFD_REG_ERROR_CODE_TYPE, \
+ RKCANFD_REG_ERROR_CODE_TYPE_ACK) | \
+ RKCANFD_REG_ERROR_CODE_TX_ACK_EOF | \
+ RKCANFD_REG_ERROR_CODE_RX_ACK)
+
+#define RKCANFD_REG_RXERRORCNT 0x034
+#define RKCANFD_REG_RXERRORCNT_RX_ERR_CNT GENMASK(7, 0)
+
+#define RKCANFD_REG_TXERRORCNT 0x038
+#define RKCANFD_REG_TXERRORCNT_TX_ERR_CNT GENMASK(8, 0)
+
+#define RKCANFD_REG_IDCODE 0x03c
+#define RKCANFD_REG_IDCODE_STANDARD_FRAME_ID GENMASK(10, 0)
+#define RKCANFD_REG_IDCODE_EXTENDED_FRAME_ID GENMASK(28, 0)
+
+#define RKCANFD_REG_IDMASK 0x040
+
+#define RKCANFD_REG_TXFRAMEINFO 0x050
+#define RKCANFD_REG_FRAMEINFO_FRAME_FORMAT BIT(7)
+#define RKCANFD_REG_FRAMEINFO_RTR BIT(6)
+#define RKCANFD_REG_FRAMEINFO_DATA_LENGTH GENMASK(3, 0)
+
+#define RKCANFD_REG_TXID 0x054
+#define RKCANFD_REG_TXID_TX_ID GENMASK(28, 0)
+
+#define RKCANFD_REG_TXDATA0 0x058
+#define RKCANFD_REG_TXDATA1 0x05C
+#define RKCANFD_REG_RXFRAMEINFO 0x060
+#define RKCANFD_REG_RXID 0x064
+#define RKCANFD_REG_RXDATA0 0x068
+#define RKCANFD_REG_RXDATA1 0x06c
+
+#define RKCANFD_REG_RTL_VERSION 0x070
+#define RKCANFD_REG_RTL_VERSION_MAJOR GENMASK(7, 4)
+#define RKCANFD_REG_RTL_VERSION_MINOR GENMASK(3, 0)
+
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING 0x100
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_SAMPLE_MODE BIT(31)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_SJW GENMASK(30, 24)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_BRP GENMASK(23, 16)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG2 GENMASK(14, 8)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG1 GENMASK(7, 0)
+
+#define RKCANFD_REG_FD_DATA_BITTIMING 0x104
+#define RKCANFD_REG_FD_DATA_BITTIMING_SAMPLE_MODE BIT(21)
+#define RKCANFD_REG_FD_DATA_BITTIMING_SJW GENMASK(20, 17)
+#define RKCANFD_REG_FD_DATA_BITTIMING_BRP GENMASK(16, 9)
+#define RKCANFD_REG_FD_DATA_BITTIMING_TSEG2 GENMASK(8, 5)
+#define RKCANFD_REG_FD_DATA_BITTIMING_TSEG1 GENMASK(4, 0)
+
+#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION 0x108
+#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET GENMASK(6, 1)
+#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_ENABLE BIT(0)
+
+#define RKCANFD_REG_TIMESTAMP_CTRL 0x10c
+/* datasheet says 6:1, which is wrong */
+#define RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE GENMASK(5, 1)
+#define RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_ENABLE BIT(0)
+
+#define RKCANFD_REG_TIMESTAMP 0x110
+
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL 0x114
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_CNT GENMASK(8, 5)
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_WATERMARK GENMASK(4, 1)
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_ENABLE BIT(0)
+
+#define RKCANFD_REG_RX_FIFO_CTRL 0x118
+#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_CNT GENMASK(6, 4)
+#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_FULL_WATERMARK GENMASK(3, 1)
+#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE BIT(0)
+
+#define RKCANFD_REG_AFC_CTRL 0x11c
+#define RKCANFD_REG_AFC_CTRL_UAF5 BIT(4)
+#define RKCANFD_REG_AFC_CTRL_UAF4 BIT(3)
+#define RKCANFD_REG_AFC_CTRL_UAF3 BIT(2)
+#define RKCANFD_REG_AFC_CTRL_UAF2 BIT(1)
+#define RKCANFD_REG_AFC_CTRL_UAF1 BIT(0)
+
+#define RKCANFD_REG_IDCODE0 0x120
+#define RKCANFD_REG_IDMASK0 0x124
+#define RKCANFD_REG_IDCODE1 0x128
+#define RKCANFD_REG_IDMASK1 0x12c
+#define RKCANFD_REG_IDCODE2 0x130
+#define RKCANFD_REG_IDMASK2 0x134
+#define RKCANFD_REG_IDCODE3 0x138
+#define RKCANFD_REG_IDMASK3 0x13c
+#define RKCANFD_REG_IDCODE4 0x140
+#define RKCANFD_REG_IDMASK4 0x144
+
+#define RKCANFD_REG_FD_TXFRAMEINFO 0x200
+#define RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT BIT(7)
+#define RKCANFD_REG_FD_FRAMEINFO_RTR BIT(6)
+#define RKCANFD_REG_FD_FRAMEINFO_FDF BIT(5)
+#define RKCANFD_REG_FD_FRAMEINFO_BRS BIT(4)
+#define RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH GENMASK(3, 0)
+
+#define RKCANFD_REG_FD_TXID 0x204
+#define RKCANFD_REG_FD_ID_EFF GENMASK(28, 0)
+#define RKCANFD_REG_FD_ID_SFF GENMASK(11, 0)
+
+#define RKCANFD_REG_FD_TXDATA0 0x208
+#define RKCANFD_REG_FD_TXDATA1 0x20c
+#define RKCANFD_REG_FD_TXDATA2 0x210
+#define RKCANFD_REG_FD_TXDATA3 0x214
+#define RKCANFD_REG_FD_TXDATA4 0x218
+#define RKCANFD_REG_FD_TXDATA5 0x21c
+#define RKCANFD_REG_FD_TXDATA6 0x220
+#define RKCANFD_REG_FD_TXDATA7 0x224
+#define RKCANFD_REG_FD_TXDATA8 0x228
+#define RKCANFD_REG_FD_TXDATA9 0x22c
+#define RKCANFD_REG_FD_TXDATA10 0x230
+#define RKCANFD_REG_FD_TXDATA11 0x234
+#define RKCANFD_REG_FD_TXDATA12 0x238
+#define RKCANFD_REG_FD_TXDATA13 0x23c
+#define RKCANFD_REG_FD_TXDATA14 0x240
+#define RKCANFD_REG_FD_TXDATA15 0x244
+
+#define RKCANFD_REG_FD_RXFRAMEINFO 0x300
+#define RKCANFD_REG_FD_RXID 0x304
+#define RKCANFD_REG_FD_RXTIMESTAMP 0x308
+#define RKCANFD_REG_FD_RXDATA0 0x30c
+#define RKCANFD_REG_FD_RXDATA1 0x310
+#define RKCANFD_REG_FD_RXDATA2 0x314
+#define RKCANFD_REG_FD_RXDATA3 0x318
+#define RKCANFD_REG_FD_RXDATA4 0x31c
+#define RKCANFD_REG_FD_RXDATA5 0x320
+#define RKCANFD_REG_FD_RXDATA6 0x320
+#define RKCANFD_REG_FD_RXDATA7 0x328
+#define RKCANFD_REG_FD_RXDATA8 0x32c
+#define RKCANFD_REG_FD_RXDATA9 0x330
+#define RKCANFD_REG_FD_RXDATA10 0x334
+#define RKCANFD_REG_FD_RXDATA11 0x338
+#define RKCANFD_REG_FD_RXDATA12 0x33c
+#define RKCANFD_REG_FD_RXDATA13 0x340
+#define RKCANFD_REG_FD_RXDATA14 0x344
+#define RKCANFD_REG_FD_RXDATA15 0x348
+
+#define RKCANFD_REG_RX_FIFO_RDATA 0x400
+#define RKCANFD_REG_TXE_FIFO_RDATA 0x500
+
+#define DEVICE_NAME "rockchip_canfd"
+#define RKCANFD_NAPI_WEIGHT 32
+#define RKCANFD_TXFIFO_DEPTH 2
+#define RKCANFD_TX_STOP_THRESHOLD 1
+#define RKCANFD_TX_START_THRESHOLD 1
+
+#define RKCANFD_TIMESTAMP_WORK_MAX_DELAY_SEC 60
+#define RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN (300 * MEGA)
+
+/* rk3568 CAN-FD Errata, as of Tue 07 Nov 2023 11:25:31 +08:00 */
+
+/* Erratum 1: The error frame sent by the CAN controller has an
+ * abnormal format.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_1 BIT(0)
+
+/* Erratum 2: The error frame sent after detecting a CRC error has an
+ * abnormal position.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_2 BIT(1)
+
+/* Erratum 3: Intermittent CRC calculation errors. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_3 BIT(2)
+
+/* Erratum 4: Intermittent occurrence of stuffing errors. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_4 BIT(3)
+
+/* Erratum 5: Counters related to the TXFIFO and RXFIFO exhibit
+ * abnormal counting behavior.
+ *
+ * The rk3568 CAN-FD errata sheet as of Tue 07 Nov 2023 11:25:31 +08:00
+ * states that only the rk3568v2 is affected by this erratum, but
+ * tests with the rk3568v2 and rk3568v3 show that the RX_FIFO_CNT is
+ * sometimes too high. This leads to CAN frames being read from the
+ * FIFO, which is then already empty.
+ *
+ * Further tests on the rk3568v2 and rk3568v3 show that in this
+ * situation (i.e. empty FIFO) all elements of the FIFO header
+ * (frameinfo, id, ts) contain the same data.
+ *
+ * On the rk3568v2 and rk3568v3, this problem only occurs extremely
+ * rarely with the standard clock of 300 MHz, but almost immediately
+ * at 80 MHz.
+ *
+ * To workaround this problem, check for empty FIFO with
+ * rkcanfd_fifo_header_empty() in rkcanfd_handle_rx_int_one() and exit
+ * early.
+ *
+ * To reproduce:
+ * assigned-clocks = <&cru CLK_CANx>;
+ * assigned-clock-rates = <80000000>;
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_5 BIT(4)
+
+/* Erratum 6: The CAN controller's transmission of extended frames may
+ * intermittently change into standard frames
+ *
+ * Work around this issue by activating self reception (RXSTX). If we
+ * have pending TX CAN frames, check all RX'ed CAN frames in
+ * rkcanfd_rxstx_filter().
+ *
+ * If it's a frame we've send and it's OK, call the TX complete
+ * handler: rkcanfd_handle_tx_done_one(). Mask the TX complete IRQ.
+ *
+ * If it's a frame we've send, but the CAN-ID is mangled, resend the
+ * original extended frame.
+ *
+ * To reproduce:
+ * host:
+ * canfdtest -evx -g can0
+ * candump any,0:80000000 -cexdtA
+ * dut:
+ * canfdtest -evx can0
+ * ethtool -S can0
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_6 BIT(5)
+
+/* Erratum 7: In the passive error state, the CAN controller's
+ * interframe space segment counting is inaccurate.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_7 BIT(6)
+
+/* Erratum 8: The Format-Error error flag is transmitted one bit
+ * later.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_8 BIT(7)
+
+/* Erratum 9: In the arbitration segment, the CAN controller will
+ * identify stuffing errors as arbitration failures.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_9 BIT(8)
+
+/* Erratum 10: Does not support the BUSOFF slow recovery mechanism. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_10 BIT(9)
+
+/* Erratum 11: Arbitration error. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_11 BIT(10)
+
+/* Erratum 12: A dominant bit at the third bit of the intermission may
+ * cause a transmission error.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_12 BIT(11)
+
+/* Tests on the rk3568v2 and rk3568v3 show that receiving certain
+ * CAN-FD frames trigger an Error Interrupt.
+ *
+ * - Form Error in RX Arbitration Phase: TX_IDLE RX_STUFF_COUNT (0x0a010100) CMD=0 RX=0 TX=0
+ * Error-Warning=1 Bus-Off=0
+ * To reproduce:
+ * host:
+ * cansend can0 002##01f
+ * DUT:
+ * candump any,0:0,#FFFFFFFF -cexdHtA
+ *
+ * - Form Error in RX Arbitration Phase: TX_IDLE RX_CRC (0x0a010200) CMD=0 RX=0 TX=0
+ * Error-Warning=1 Bus-Off=0
+ * To reproduce:
+ * host:
+ * cansend can0 002##07217010000000000
+ * DUT:
+ * candump any,0:0,#FFFFFFFF -cexdHtA
+ */
+#define RKCANFD_QUIRK_CANFD_BROKEN BIT(12)
+
+/* known issues with rk3568v3:
+ *
+ * - Overload situation during high bus load
+ * To reproduce:
+ * host:
+ * # add a 2nd CAN adapter to the CAN bus
+ * cangen can0 -I 1 -Li -Di -p10 -g 0.3
+ * cansequence -rve
+ * DUT:
+ * cangen can0 -I2 -L1 -Di -p10 -c10 -g 1 -e
+ * cansequence -rv -i 1
+ *
+ * - TX starvation after repeated Bus-Off
+ * To reproduce:
+ * host:
+ * sleep 3 && cangen can0 -I2 -Li -Di -p10 -g 0.0
+ * DUT:
+ * cangen can0 -I2 -Li -Di -p10 -g 0.05
+ */
+
+enum rkcanfd_model {
+ RKCANFD_MODEL_RK3568V2 = 0x35682,
+ RKCANFD_MODEL_RK3568V3 = 0x35683,
+};
+
+struct rkcanfd_devtype_data {
+ enum rkcanfd_model model;
+ u32 quirks;
+};
+
+struct rkcanfd_fifo_header {
+ u32 frameinfo;
+ u32 id;
+ u32 ts;
+};
+
+struct rkcanfd_stats {
+ struct u64_stats_sync syncp;
+
+ /* Erratum 5 */
+ u64_stats_t rx_fifo_empty_errors;
+
+ /* Erratum 6 */
+ u64_stats_t tx_extended_as_standard_errors;
+};
+
+struct rkcanfd_priv {
+ struct can_priv can;
+ struct can_rx_offload offload;
+ struct net_device *ndev;
+
+ void __iomem *regs;
+ unsigned int tx_head;
+ unsigned int tx_tail;
+
+ u32 reg_mode_default;
+ u32 reg_int_mask_default;
+ struct rkcanfd_devtype_data devtype_data;
+
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct delayed_work timestamp;
+ unsigned long work_delay_jiffies;
+
+ struct can_berr_counter bec;
+
+ struct rkcanfd_stats stats;
+
+ struct reset_control *reset;
+ struct clk_bulk_data *clks;
+ int clks_num;
+};
+
+static inline u32
+rkcanfd_read(const struct rkcanfd_priv *priv, u32 reg)
+{
+ return readl(priv->regs + reg);
+}
+
+static inline void
+rkcanfd_read_rep(const struct rkcanfd_priv *priv, u32 reg,
+ void *buf, unsigned int len)
+{
+ readsl(priv->regs + reg, buf, len / sizeof(u32));
+}
+
+static inline void
+rkcanfd_write(const struct rkcanfd_priv *priv, u32 reg, u32 val)
+{
+ writel(val, priv->regs + reg);
+}
+
+static inline u32
+rkcanfd_get_timestamp(const struct rkcanfd_priv *priv)
+{
+ return rkcanfd_read(priv, RKCANFD_REG_TIMESTAMP);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_head(const struct rkcanfd_priv *priv)
+{
+ return READ_ONCE(priv->tx_head) & (RKCANFD_TXFIFO_DEPTH - 1);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_tail(const struct rkcanfd_priv *priv)
+{
+ return READ_ONCE(priv->tx_tail) & (RKCANFD_TXFIFO_DEPTH - 1);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_pending(const struct rkcanfd_priv *priv)
+{
+ return READ_ONCE(priv->tx_head) - READ_ONCE(priv->tx_tail);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_free(const struct rkcanfd_priv *priv)
+{
+ return RKCANFD_TXFIFO_DEPTH - rkcanfd_get_tx_pending(priv);
+}
+
+void rkcanfd_ethtool_init(struct rkcanfd_priv *priv);
+
+int rkcanfd_handle_rx_int(struct rkcanfd_priv *priv);
+
+void rkcanfd_skb_set_timestamp(const struct rkcanfd_priv *priv,
+ struct sk_buff *skb, const u32 timestamp);
+void rkcanfd_timestamp_init(struct rkcanfd_priv *priv);
+void rkcanfd_timestamp_start(struct rkcanfd_priv *priv);
+void rkcanfd_timestamp_stop(struct rkcanfd_priv *priv);
+void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv);
+
+unsigned int rkcanfd_get_effective_tx_free(const struct rkcanfd_priv *priv);
+void rkcanfd_xmit_retry(struct rkcanfd_priv *priv);
+netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+void rkcanfd_handle_tx_done_one(struct rkcanfd_priv *priv, const u32 ts,
+ unsigned int *frame_len_p);
+
+#endif
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index fca5a9a1d857..2d1f715459d7 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -245,7 +245,7 @@ static void sja1000_isa_remove(struct platform_device *pdev)
static struct platform_driver sja1000_isa_driver = {
.probe = sja1000_isa_probe,
- .remove_new = sja1000_isa_remove,
+ .remove = sja1000_isa_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 33f0e46ab1c2..c42ebe9da55a 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -329,7 +329,7 @@ static void sp_remove(struct platform_device *pdev)
static struct platform_driver sp_driver = {
.probe = sp_probe,
- .remove_new = sp_remove,
+ .remove = sp_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = sp_of_table,
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index bd25137062c5..278ee8722770 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -854,7 +854,7 @@ static struct platform_driver softing_driver = {
.name = KBUILD_MODNAME,
},
.probe = softing_pdev_probe,
- .remove_new = softing_pdev_remove,
+ .remove = softing_pdev_remove,
};
module_platform_driver(softing_driver);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index ab8d01784686..360158c295d3 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -914,7 +914,7 @@ static struct platform_driver sun4i_can_driver = {
.of_match_table = sun4ican_of_match,
},
.probe = sun4ican_probe,
- .remove_new = sun4ican_remove,
+ .remove = sun4ican_remove,
};
module_platform_driver(sun4i_can_driver);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 5aab440074c6..644e8b8eb91e 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1025,7 +1025,7 @@ static struct platform_driver ti_hecc_driver = {
.of_match_table = ti_hecc_dt_ids,
},
.probe = ti_hecc_probe,
- .remove_new = ti_hecc_remove,
+ .remove = ti_hecc_remove,
.suspend = ti_hecc_suspend,
.resume = ti_hecc_resume,
};
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 3e1fba12c0c3..9dae0c71a2e1 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -17,11 +17,12 @@ config CAN_EMS_USB
config CAN_ESD_USB
tristate "esd electronics gmbh CAN/USB interfaces"
help
- This driver adds supports for several CAN/USB interfaces
+ This driver adds support for several CAN/USB interfaces
from esd electronics gmbh (https://www.esd.eu).
The drivers supports the following devices:
- esd CAN-USB/2
+ - esd CAN-USB/3-FD
- esd CAN-USB/Micro
To compile this driver as a module, choose M here: the module
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 41a0e4261d15..03ad10b01867 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -3,7 +3,7 @@
* CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro
*
* Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu>
- * Copyright (C) 2022-2023 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
+ * Copyright (C) 2022-2024 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
*/
#include <linux/can.h>
@@ -1116,9 +1116,6 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev)
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
flags |= ESD_USB_3_BAUDRATE_FLAG_LOM;
- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- flags |= ESD_USB_3_BAUDRATE_FLAG_TRS;
-
baud_x->nom.brp = cpu_to_le16(nom_bt->brp & (nom_btc->brp_max - 1));
baud_x->nom.sjw = cpu_to_le16(nom_bt->sjw & (nom_btc->sjw_max - 1));
baud_x->nom.tseg1 = cpu_to_le16((nom_bt->prop_seg + nom_bt->phase_seg1)
@@ -1219,7 +1216,6 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
switch (le16_to_cpu(dev->udev->descriptor.idProduct)) {
case ESD_USB_CANUSB3_PRODUCT_ID:
priv->can.clock.freq = ESD_USB_3_CAN_CLOCK;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const;
priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index ff10b3790d84..078496d9b7ba 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -22,6 +22,8 @@
*/
#include <linux/completion.h>
+#include <linux/ktime.h>
+#include <linux/math64.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/usb.h>
@@ -39,7 +41,6 @@
#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0)
#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1)
#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2)
-#define KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP BIT(3)
/* Device capabilities */
#define KVASER_USB_CAP_BERR_CAP 0x01
@@ -68,6 +69,7 @@ struct kvaser_usb_dev_card_data {
u32 ctrlmode_supported;
u32 capabilities;
struct kvaser_usb_dev_card_data_hydra hydra;
+ u32 usbcan_timestamp_msb;
};
/* Context for an outstanding, not yet ACKed, transmission */
@@ -216,4 +218,26 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev);
extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const;
+static inline ktime_t kvaser_usb_ticks_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ u64 ticks)
+{
+ return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
+}
+
+static inline ktime_t kvaser_usb_timestamp48_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ const __le16 *timestamp)
+{
+ u64 ticks = le16_to_cpu(timestamp[0]) |
+ (u64)(le16_to_cpu(timestamp[1])) << 16 |
+ (u64)(le16_to_cpu(timestamp[2])) << 32;
+
+ return kvaser_usb_ticks_to_ktime(cfg, ticks);
+}
+
+static inline ktime_t kvaser_usb_timestamp64_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ __le64 timestamp)
+{
+ return kvaser_usb_ticks_to_ktime(cfg, le64_to_cpu(timestamp));
+}
+
#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index daa34b532aa8..7d12776ab63e 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -94,7 +94,7 @@
#define USB_MINI_PCIE_1XCAN_PRODUCT_ID 0x011B
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
- .quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
+ .quirks = 0,
.ops = &kvaser_usb_hydra_dev_ops,
};
@@ -756,23 +756,12 @@ freeurb:
static const struct net_device_ops kvaser_usb_netdev_ops = {
.ndo_open = kvaser_usb_open,
.ndo_stop = kvaser_usb_close,
- .ndo_start_xmit = kvaser_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
-};
-
-static const struct net_device_ops kvaser_usb_netdev_ops_hwts = {
- .ndo_open = kvaser_usb_open,
- .ndo_stop = kvaser_usb_close,
.ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_start_xmit = kvaser_usb_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops kvaser_usb_ethtool_ops = {
- .get_ts_info = ethtool_op_get_ts_info,
-};
-
-static const struct ethtool_ops kvaser_usb_ethtool_ops_hwts = {
.get_ts_info = can_ethtool_op_get_ts_info_hwts,
};
@@ -859,13 +848,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &kvaser_usb_netdev_ops;
- if (driver_info->quirks & KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP) {
- netdev->netdev_ops = &kvaser_usb_netdev_ops_hwts;
- netdev->ethtool_ops = &kvaser_usb_ethtool_ops_hwts;
- } else {
- netdev->netdev_ops = &kvaser_usb_netdev_ops;
- netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
- }
+ netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->intf->dev);
netdev->dev_id = channel;
@@ -915,10 +898,8 @@ static int kvaser_usb_probe(struct usb_interface *intf,
ops = driver_info->ops;
err = ops->dev_setup_endpoints(dev);
- if (err) {
- dev_err(&intf->dev, "Cannot get usb endpoint(s)");
- return err;
- }
+ if (err)
+ return dev_err_probe(&intf->dev, err, "Cannot get usb endpoint(s)");
dev->udev = interface_to_usbdev(intf);
@@ -929,26 +910,20 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev->card_data.ctrlmode_supported = 0;
dev->card_data.capabilities = 0;
err = ops->dev_init_card(dev);
- if (err) {
- dev_err(&intf->dev,
- "Failed to initialize card, error %d\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(&intf->dev, err,
+ "Failed to initialize card\n");
err = ops->dev_get_software_info(dev);
- if (err) {
- dev_err(&intf->dev,
- "Cannot get software info, error %d\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(&intf->dev, err,
+ "Cannot get software info\n");
if (ops->dev_get_software_details) {
err = ops->dev_get_software_details(dev);
- if (err) {
- dev_err(&intf->dev,
- "Cannot get software details, error %d\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(&intf->dev, err,
+ "Cannot get software details\n");
}
if (WARN_ON(!dev->cfg))
@@ -962,18 +937,16 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
err = ops->dev_get_card_info(dev);
- if (err) {
- dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(&intf->dev, err,
+ "Cannot get card info\n");
if (ops->dev_get_capabilities) {
err = ops->dev_get_capabilities(dev);
if (err) {
- dev_err(&intf->dev,
- "Cannot get capabilities, error %d\n", err);
kvaser_usb_remove_interfaces(dev);
- return err;
+ return dev_err_probe(&intf->dev, err,
+ "Cannot get capabilities\n");
}
}
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index c7ba768dfe17..3764b263add3 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -10,7 +10,6 @@
* - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only
* reported after a call to do_get_berr_counter(), since firmware does not
* distinguish between ERROR_WARNING and ERROR_ACTIVE.
- * - Hardware timestamps are not set for CAN Tx frames.
*/
#include <linux/completion.h>
@@ -261,6 +260,15 @@ struct kvaser_cmd_tx_can {
u8 reserved[11];
} __packed;
+struct kvaser_cmd_tx_ack {
+ __le32 id;
+ u8 data[8];
+ u8 dlc;
+ u8 flags;
+ __le16 timestamp[3];
+ u8 reserved0[8];
+} __packed;
+
struct kvaser_cmd_header {
u8 cmd_no;
/* The destination HE address is stored in 0..5 of he_addr.
@@ -297,6 +305,7 @@ struct kvaser_cmd {
struct kvaser_cmd_rx_can rx_can;
struct kvaser_cmd_tx_can tx_can;
+ struct kvaser_cmd_tx_ack tx_ack;
} __packed;
} __packed;
@@ -522,23 +531,25 @@ kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev,
return priv;
}
-static ktime_t
-kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg,
- const struct kvaser_cmd *cmd)
+static ktime_t kvaser_usb_hydra_ktime_from_cmd(const struct kvaser_usb_dev_cfg *cfg,
+ const struct kvaser_cmd *cmd)
{
- u64 ticks;
+ ktime_t hwtstamp = 0;
if (cmd->header.cmd_no == CMD_EXTENDED) {
struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd;
- ticks = le64_to_cpu(cmd_ext->rx_can.timestamp);
- } else {
- ticks = le16_to_cpu(cmd->rx_can.timestamp[0]);
- ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16;
- ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32;
+ if (cmd_ext->cmd_no_ext == CMD_RX_MESSAGE_FD)
+ hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->rx_can.timestamp);
+ else if (cmd_ext->cmd_no_ext == CMD_TX_ACKNOWLEDGE_FD)
+ hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->tx_ack.timestamp);
+ } else if (cmd->header.cmd_no == CMD_RX_MESSAGE) {
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->rx_can.timestamp);
+ } else if (cmd->header.cmd_no == CMD_TX_ACKNOWLEDGE) {
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->tx_ack.timestamp);
}
- return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
+ return hwtstamp;
}
static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
@@ -1175,6 +1186,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
bool one_shot_fail = false;
bool is_err_frame = false;
u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
+ struct sk_buff *skb;
priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
if (!priv)
@@ -1201,6 +1213,9 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
+ skb = priv->can.echo_skb[context->echo_index];
+ if (skb)
+ skb_hwtstamps(skb)->hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd);
len = can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
@@ -1234,7 +1249,7 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
stats = &priv->netdev->stats;
flags = cmd->rx_can.flags;
- hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd);
+ hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd);
if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
@@ -1302,7 +1317,7 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
KVASER_USB_KCAN_DATA_DLC_SHIFT;
flags = le32_to_cpu(cmd->rx_can.flags);
- hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd);
+ hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, std_cmd);
if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 23bd7574b1c7..6b9122ab1464 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -119,6 +119,10 @@
/* Extended CAN identifier flag */
#define KVASER_EXTENDED_FRAME BIT(31)
+/* USBCanII timestamp */
+#define KVASER_USB_USBCAN_CLK_OVERFLOW_MASK GENMASK(31, 16)
+#define KVASER_USB_USBCAN_TIMESTAMP_FACTOR 10
+
struct kvaser_cmd_simple {
u8 tid;
u8 channel;
@@ -235,6 +239,20 @@ struct kvaser_cmd_tx_acknowledge_header {
u8 tid;
} __packed;
+struct leaf_cmd_tx_acknowledge {
+ u8 channel;
+ u8 tid;
+ __le16 time[3];
+ u8 padding[2];
+} __packed;
+
+struct usbcan_cmd_tx_acknowledge {
+ u8 channel;
+ u8 tid;
+ __le16 time;
+ u8 padding[2];
+} __packed;
+
struct leaf_cmd_can_error_event {
u8 tid;
u8 flags;
@@ -281,6 +299,12 @@ struct usbcan_cmd_error_event {
__le16 padding;
} __packed;
+struct usbcan_cmd_clk_overflow_event {
+ u8 tid;
+ u8 padding;
+ __le32 time;
+} __packed;
+
struct kvaser_cmd_ctrl_mode {
u8 tid;
u8 channel;
@@ -347,6 +371,7 @@ struct kvaser_cmd {
struct leaf_cmd_error_event error_event;
struct kvaser_cmd_cap_req cap_req;
struct kvaser_cmd_cap_res cap_res;
+ struct leaf_cmd_tx_acknowledge tx_ack;
} __packed leaf;
union {
@@ -355,6 +380,8 @@ struct kvaser_cmd {
struct usbcan_cmd_chip_state_event chip_state_event;
struct usbcan_cmd_can_error_event can_error_event;
struct usbcan_cmd_error_event error_event;
+ struct usbcan_cmd_tx_acknowledge tx_ack;
+ struct usbcan_cmd_clk_overflow_event clk_overflow_event;
} __packed usbcan;
struct kvaser_cmd_tx_can tx_can;
@@ -370,7 +397,7 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
[CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
- [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.leaf.tx_ack),
[CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo),
[CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
[CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
@@ -388,15 +415,14 @@ static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
[CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
- [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.usbcan.tx_ack),
[CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo),
[CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
[CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
[CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
[CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event),
[CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
- /* ignored events: */
- [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = kvaser_fsize(u.usbcan.clk_overflow_event),
};
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
@@ -463,11 +489,27 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = {
.bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_16mhz = {
.clock = {
.freq = 16 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 16,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_24mhz = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 24,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_32mhz = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 32,
.bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
};
@@ -475,7 +517,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = {
.clock = {
.freq = 16 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 16,
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
@@ -483,7 +525,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = {
.clock = {
.freq = 24 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 24,
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
@@ -491,10 +533,19 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
.clock = {
.freq = 32 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 32,
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
+static inline ktime_t kvaser_usb_usbcan_timestamp_to_ktime(const struct kvaser_usb *dev,
+ __le16 timestamp)
+{
+ u64 ticks = le16_to_cpu(timestamp) |
+ dev->card_data.usbcan_timestamp_msb;
+
+ return kvaser_usb_ticks_to_ktime(dev->cfg, ticks * KVASER_USB_USBCAN_TIMESTAMP_FACTOR);
+}
+
static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
@@ -678,8 +729,19 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
/* Firmware expects bittiming parameters calculated for 16MHz
* clock, regardless of the actual clock
+ * Though, the reported freq is used for timestamps
*/
- dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg;
+ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_16mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_24mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_32mhz;
+ break;
+ }
} else {
switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
@@ -880,6 +942,8 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
struct kvaser_usb_net_priv *priv;
unsigned long flags;
u8 channel, tid;
+ struct sk_buff *skb;
+ ktime_t hwtstamp = 0;
channel = cmd->u.tx_acknowledge_header.channel;
tid = cmd->u.tx_acknowledge_header.tid;
@@ -901,14 +965,14 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
/* Sometimes the state change doesn't come after a bus-off event */
if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) {
- struct sk_buff *skb;
+ struct sk_buff *err_skb;
struct can_frame *cf;
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (skb) {
+ err_skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (err_skb) {
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx(skb);
+ netif_rx(err_skb);
} else {
netdev_err(priv->netdev,
"No memory left for err_skb\n");
@@ -919,9 +983,20 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.tx_ack.time);
+ break;
+ case KVASER_USBCAN:
+ hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.tx_ack.time);
+ break;
+ }
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+ skb = priv->can.echo_skb[context->echo_index];
+ if (skb)
+ skb_hwtstamps(skb)->hwtstamp = hwtstamp;
stats->tx_packets++;
stats->tx_bytes += can_get_echo_skb(priv->netdev,
context->echo_index, NULL);
@@ -1299,6 +1374,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
struct net_device_stats *stats;
u8 channel = cmd->u.rx_can_header.channel;
const u8 *rx_data = NULL; /* GCC */
+ ktime_t hwtstamp = 0;
if (channel >= dev->nchannels) {
dev_err(&dev->intf->dev,
@@ -1329,9 +1405,11 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
switch (dev->driver_info->family) {
case KVASER_LEAF:
rx_data = cmd->u.leaf.rx_can.data;
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.rx_can.time);
break;
case KVASER_USBCAN:
rx_data = cmd->u.usbcan.rx_can.data;
+ hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.rx_can.time);
break;
}
@@ -1375,6 +1453,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
memcpy(cf->data, &rx_data[6], cf->len);
}
+ skb_hwtstamps(skb)->hwtstamp = hwtstamp;
stats->rx_packets++;
if (!(cf->can_id & CAN_RTR_FLAG))
stats->rx_bytes += cf->len;
@@ -1508,7 +1587,7 @@ static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev,
complete(&priv->get_busparams_comp);
}
-static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+static void kvaser_usb_leaf_handle_command(struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
if (kvaser_usb_leaf_verify_size(dev, cmd) < 0)
@@ -1554,12 +1633,15 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
kvaser_usb_leaf_get_busparams_reply(dev, cmd);
break;
- /* Ignored commands */
case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
if (dev->driver_info->family != KVASER_USBCAN)
goto warn;
+ dev->card_data.usbcan_timestamp_msb =
+ le32_to_cpu(cmd->u.usbcan.clk_overflow_event.time) &
+ KVASER_USB_USBCAN_CLK_OVERFLOW_MASK;
break;
+ /* Ignored commands */
case CMD_FLUSH_QUEUE_REPLY:
if (dev->driver_info->family != KVASER_LEAF)
goto warn;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 3d68fef46ded..59f7cd8ceb39 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -901,11 +901,8 @@ int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF);
info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index d944911d7f05..436c0e4b0344 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -2103,7 +2103,7 @@ static void xcan_remove(struct platform_device *pdev)
static struct platform_driver xcan_driver = {
.probe = xcan_probe,
- .remove_new = xcan_remove,
+ .remove = xcan_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &xcan_dev_pm_ops,
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index 897e5e8b3d69..31d070bf161a 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -343,10 +343,9 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
dev_set_drvdata(&mdiodev->dev, dev);
ret = b53_switch_register(dev);
- if (ret) {
- dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&mdiodev->dev, ret,
+ "failed to register switch\n");
return ret;
}
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index c1b906c05a02..12a86585a77f 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -1,14 +1,17 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
- tristate "Microchip KSZ8795/KSZ9477/LAN937x series switch support"
+ tristate "Microchip KSZ8XXX/KSZ9XXX/LAN937X series switch support"
depends on NET_DSA
select NET_DSA_TAG_KSZ
select NET_DSA_TAG_NONE
select NET_IEEE8021Q_HELPERS
select DCB
help
- This driver adds support for Microchip KSZ9477 series switch and
- KSZ8795/KSZ88x3 switch chips.
+ This driver adds support for Microchip KSZ8, KSZ9 and
+ LAN937X series switch chips, being KSZ8863/8873,
+ KSZ8895/8864, KSZ8794/8795/8765,
+ KSZ9477/9897/9896/9567/8567, KSZ9893/9563/8563 and
+ LAN9370/9371/9372/9373/9374.
config NET_DSA_MICROCHIP_KSZ9477_I2C
tristate "KSZ series I2C connected switch driver"
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 1cfba1ec9355..9347cfb3d0b5 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o
ksz_switch-objs := ksz_common.o ksz_dcb.o
ksz_switch-objs += ksz9477.o ksz9477_acl.o ksz9477_tc_flower.o
-ksz_switch-objs += ksz8795.o
+ksz_switch-objs += ksz8.o
ksz_switch-objs += lan937x_main.o
ifdef CONFIG_NET_DSA_MICROCHIP_KSZ_PTP
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8.c
index d27b9c36d73f..da7110d67558 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8.c
@@ -1,6 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Microchip KSZ8795 switch driver
+ * Microchip KSZ8XXX series switch driver
+ *
+ * It supports the following switches:
+ * - KSZ8863, KSZ8873 aka KSZ88X3
+ * - KSZ8895, KSZ8864 aka KSZ8895 family
+ * - KSZ8794, KSZ8795, KSZ8765 aka KSZ87XX
+ * Note that it does NOT support:
+ * - KSZ8563, KSZ8567 - see KSZ9477 driver
*
* Copyright (C) 2017 Microchip Technology Inc.
* Tristram Ha <Tristram.Ha@microchip.com>
@@ -23,7 +30,7 @@
#include <linux/phylink.h>
#include "ksz_common.h"
-#include "ksz8795_reg.h"
+#include "ksz8_reg.h"
#include "ksz8.h"
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
@@ -38,6 +45,20 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bits, set ? bits : 0);
}
+/**
+ * ksz8_ind_write8 - EEE/ACL/PME indirect register write
+ * @dev: The device structure.
+ * @table: Function & table select, register 110.
+ * @addr: Indirect access control, register 111.
+ * @data: The data to be written.
+ *
+ * This function performs an indirect register write for EEE, ACL or
+ * PME switch functionalities. Both 8-bit registers 110 and 111 are
+ * written at once with ksz_write16, using the serial multiple write
+ * functionality.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
{
const u16 *regs;
@@ -58,6 +79,59 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
return ret;
}
+/**
+ * ksz8_ind_read8 - EEE/ACL/PME indirect register read
+ * @dev: The device structure.
+ * @table: Function & table select, register 110.
+ * @addr: Indirect access control, register 111.
+ * @val: The value read.
+ *
+ * This function performs an indirect register read for EEE, ACL or
+ * PME switch functionalities. Both 8-bit registers 110 and 111 are
+ * written at once with ksz_write16, using the serial multiple write
+ * functionality.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+static int ksz8_ind_read8(struct ksz_device *dev, u8 table, u16 addr, u8 *val)
+{
+ const u16 *regs;
+ u16 ctrl_addr;
+ int ret = 0;
+
+ regs = dev->info->regs;
+
+ mutex_lock(&dev->alu_mutex);
+
+ ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (!ret)
+ ret = ksz_read8(dev, regs[REG_IND_BYTE], val);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+int ksz8_pme_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+ return ksz8_ind_write8(dev, (u8)(reg >> 8), (u8)(reg), value);
+}
+
+int ksz8_pme_pread8(struct ksz_device *dev, int port, int offset, u8 *data)
+{
+ u8 table = (u8)(offset >> 8 | (port + 1));
+
+ return ksz8_ind_read8(dev, table, (u8)(offset), data);
+}
+
+int ksz8_pme_pwrite8(struct ksz_device *dev, int port, int offset, u8 data)
+{
+ u8 table = (u8)(offset >> 8 | (port + 1));
+
+ return ksz8_ind_write8(dev, table, (u8)(offset), data);
+}
+
int ksz8_reset_switch(struct ksz_device *dev)
{
if (ksz_is_ksz88x3(dev)) {
@@ -120,7 +194,9 @@ int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
return ksz8795_change_mtu(dev, frame_size);
- case KSZ8830_CHIP_ID:
+ case KSZ88X3_CHIP_ID:
+ case KSZ8864_CHIP_ID:
+ case KSZ8895_CHIP_ID:
return ksz8863_change_mtu(dev, frame_size);
}
@@ -317,7 +393,7 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
- if (ksz_is_ksz88x3(dev))
+ if (is_ksz88xx(dev))
ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt);
else
ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt);
@@ -325,7 +401,7 @@ void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
- if (ksz_is_ksz88x3(dev))
+ if (is_ksz88xx(dev))
return;
/* enable the port for flush/freeze function */
@@ -343,7 +419,8 @@ void ksz8_port_init_cnt(struct ksz_device *dev, int port)
struct ksz_port_mib *mib = &dev->ports[port].mib;
u64 *dropped;
- if (!ksz_is_ksz88x3(dev)) {
+ /* For KSZ8795 family. */
+ if (ksz_is_ksz87xx(dev)) {
/* flush all enabled port MIB counters */
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true);
@@ -542,11 +619,11 @@ static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
shifts[STATIC_MAC_FWD_PORTS];
alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
- /* KSZ8795 family switches have STATIC_MAC_TABLE_USE_FID and
+ /* KSZ8795/KSZ8895 family switches have STATIC_MAC_TABLE_USE_FID and
* STATIC_MAC_TABLE_FID definitions off by 1 when doing read on the
* static MAC table compared to doing write.
*/
- if (ksz_is_ksz87xx(dev))
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev))
data_hi >>= 1;
alu->is_static = true;
alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
@@ -1545,6 +1622,7 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
+ const u16 *regs = dev->info->regs;
struct dsa_switch *ds = dev->ds;
const u32 *masks;
int queues;
@@ -1575,6 +1653,13 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
member = BIT(dsa_upstream_port(ds, port));
ksz8_cfg_port_member(dev, port, member);
+
+ /* Disable all WoL options by default. Otherwise
+ * ksz_switch_macaddr_get/put logic will not work properly.
+ * CPU port 4 has no WoL functionality.
+ */
+ if (ksz_is_ksz87xx(dev) && !cpu_port)
+ ksz8_pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0);
}
static void ksz88x3_config_rmii_clk(struct ksz_device *dev)
@@ -1617,7 +1702,8 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
- if (!ksz_is_ksz88x3(dev)) {
+ /* For KSZ8795 family. */
+ if (ksz_is_ksz87xx(dev)) {
ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
if (remote & KSZ8_PORT_FIBER_MODE)
p->fiber = 1;
@@ -1790,7 +1876,8 @@ int ksz8_enable_stp_addr(struct ksz_device *dev)
int ksz8_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
- int i;
+ const u16 *regs = dev->info->regs;
+ int i, ret = 0;
ds->mtu_enforcement_ingress = true;
@@ -1829,7 +1916,21 @@ int ksz8_setup(struct dsa_switch *ds)
for (i = 0; i < (dev->info->num_vlans / 4); i++)
ksz8_r_vlan_entries(dev, i);
- return ksz8_handle_global_errata(ds);
+ /* Make sure PME (WoL) is not enabled. If requested, it will
+ * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs
+ * do not like PME events changes before shutdown. PME only
+ * available on KSZ87xx family.
+ */
+ if (ksz_is_ksz87xx(dev)) {
+ ret = ksz8_pme_write8(dev, regs[REG_SW_PME_CTRL], 0);
+ if (!ret)
+ ret = ksz_rmw8(dev, REG_INT_ENABLE, INT_PME, 0);
+ }
+
+ if (!ret)
+ return ksz8_handle_global_errata(ds);
+ else
+ return ret;
}
void ksz8_get_caps(struct ksz_device *dev, int port,
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index ae43077e76c3..e1c79ff97123 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -54,6 +54,9 @@ int ksz8_reset_switch(struct ksz_device *dev);
int ksz8_switch_init(struct ksz_device *dev);
void ksz8_switch_exit(struct ksz_device *dev);
int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu);
+int ksz8_pme_write8(struct ksz_device *dev, u32 reg, u8 value);
+int ksz8_pme_pread8(struct ksz_device *dev, int port, int offset, u8 *data);
+int ksz8_pme_pwrite8(struct ksz_device *dev, int port, int offset, u8 data);
void ksz8_phylink_mac_link_up(struct phylink_config *config,
struct phy_device *phydev, unsigned int mode,
phy_interface_t interface, int speed, int duplex,
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index 5711a59e2ac9..a8bfcd917bf7 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -199,11 +199,11 @@ static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id ksz8863_dt_ids[] = {
{
.compatible = "microchip,ksz8863",
- .data = &ksz_switch_chips[KSZ8830]
+ .data = &ksz_switch_chips[KSZ88X3]
},
{
.compatible = "microchip,ksz8873",
- .data = &ksz_switch_chips[KSZ8830]
+ .data = &ksz_switch_chips[KSZ88X3]
},
{ },
};
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8_reg.h
index 69566a5d9cda..329688603a58 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8_reg.h
@@ -1,13 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Microchip KSZ8795 register definitions
+ * Microchip KSZ8XXX series register definitions
+ *
+ * The base for these definitions is KSZ8795 but unless indicated
+ * differently by their prefix, they apply to all KSZ8 series
+ * devices. Registers and masks that do change are defined in
+ * dedicated structures in ksz_common.c.
*
* Copyright (c) 2017 Microchip Technology Inc.
* Tristram Ha <Tristram.Ha@microchip.com>
*/
-#ifndef __KSZ8795_REG_H
-#define __KSZ8795_REG_H
+#ifndef __KSZ8_REG_H
+#define __KSZ8_REG_H
#define KS_PORT_M 0x1F
@@ -359,8 +364,6 @@
#define REG_IND_DATA_1 0x77
#define REG_IND_DATA_0 0x78
-#define REG_IND_DATA_PME_EEE_ACL 0xA0
-
#define REG_INT_STATUS 0x7C
#define REG_INT_ENABLE 0x7D
@@ -704,8 +707,6 @@
#define KSZ8795_ID_LO 0x1550
#define KSZ8863_ID_LO 0x1430
-#define KSZ8795_SW_ID 0x8795
-
#define PHY_REG_LINK_MD 0x1D
#define PHY_START_CABLE_DIAG BIT(15)
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 425e20daf1e9..0ba658a72d8f 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -56,187 +56,6 @@ int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
REG_SW_MTU_MASK, frame_size);
}
-/**
- * ksz9477_handle_wake_reason - Handle wake reason on a specified port.
- * @dev: The device structure.
- * @port: The port number.
- *
- * This function reads the PME (Power Management Event) status register of a
- * specified port to determine the wake reason. If there is no wake event, it
- * returns early. Otherwise, it logs the wake reason which could be due to a
- * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
- * is then cleared to acknowledge the handling of the wake event.
- *
- * Return: 0 on success, or an error code on failure.
- */
-static int ksz9477_handle_wake_reason(struct ksz_device *dev, int port)
-{
- u8 pme_status;
- int ret;
-
- ret = ksz_pread8(dev, port, REG_PORT_PME_STATUS, &pme_status);
- if (ret)
- return ret;
-
- if (!pme_status)
- return 0;
-
- dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port,
- pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "",
- pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "",
- pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : "");
-
- return ksz_pwrite8(dev, port, REG_PORT_PME_STATUS, pme_status);
-}
-
-/**
- * ksz9477_get_wol - Get Wake-on-LAN settings for a specified port.
- * @dev: The device structure.
- * @port: The port number.
- * @wol: Pointer to ethtool Wake-on-LAN settings structure.
- *
- * This function checks the PME Pin Control Register to see if PME Pin Output
- * Enable is set, indicating PME is enabled. If enabled, it sets the supported
- * and active WoL flags.
- */
-void ksz9477_get_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol)
-{
- u8 pme_ctrl;
- int ret;
-
- if (!dev->wakeup_source)
- return;
-
- wol->supported = WAKE_PHY;
-
- /* Check if the current MAC address on this port can be set
- * as global for WAKE_MAGIC support. The result may vary
- * dynamically based on other ports configurations.
- */
- if (ksz_is_port_mac_global_usable(dev->ds, port))
- wol->supported |= WAKE_MAGIC;
-
- ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl);
- if (ret)
- return;
-
- if (pme_ctrl & PME_WOL_MAGICPKT)
- wol->wolopts |= WAKE_MAGIC;
- if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY))
- wol->wolopts |= WAKE_PHY;
-}
-
-/**
- * ksz9477_set_wol - Set Wake-on-LAN settings for a specified port.
- * @dev: The device structure.
- * @port: The port number.
- * @wol: Pointer to ethtool Wake-on-LAN settings structure.
- *
- * This function configures Wake-on-LAN (WoL) settings for a specified port.
- * It validates the provided WoL options, checks if PME is enabled via the
- * switch's PME Pin Control Register, clears any previous wake reasons,
- * and sets the Magic Packet flag in the port's PME control register if
- * specified.
- *
- * Return: 0 on success, or other error codes on failure.
- */
-int ksz9477_set_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol)
-{
- u8 pme_ctrl = 0, pme_ctrl_old = 0;
- bool magic_switched_off;
- bool magic_switched_on;
- int ret;
-
- if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
- return -EINVAL;
-
- if (!dev->wakeup_source)
- return -EOPNOTSUPP;
-
- ret = ksz9477_handle_wake_reason(dev, port);
- if (ret)
- return ret;
-
- if (wol->wolopts & WAKE_MAGIC)
- pme_ctrl |= PME_WOL_MAGICPKT;
- if (wol->wolopts & WAKE_PHY)
- pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY;
-
- ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl_old);
- if (ret)
- return ret;
-
- if (pme_ctrl_old == pme_ctrl)
- return 0;
-
- magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) &&
- !(pme_ctrl & PME_WOL_MAGICPKT);
- magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) &&
- (pme_ctrl & PME_WOL_MAGICPKT);
-
- /* To keep reference count of MAC address, we should do this
- * operation only on change of WOL settings.
- */
- if (magic_switched_on) {
- ret = ksz_switch_macaddr_get(dev->ds, port, NULL);
- if (ret)
- return ret;
- } else if (magic_switched_off) {
- ksz_switch_macaddr_put(dev->ds);
- }
-
- ret = ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, pme_ctrl);
- if (ret) {
- if (magic_switched_on)
- ksz_switch_macaddr_put(dev->ds);
- return ret;
- }
-
- return 0;
-}
-
-/**
- * ksz9477_wol_pre_shutdown - Prepares the switch device for shutdown while
- * considering Wake-on-LAN (WoL) settings.
- * @dev: The switch device structure.
- * @wol_enabled: Pointer to a boolean which will be set to true if WoL is
- * enabled on any port.
- *
- * This function prepares the switch device for a safe shutdown while taking
- * into account the Wake-on-LAN (WoL) settings on the user ports. It updates
- * the wol_enabled flag accordingly to reflect whether WoL is active on any
- * port.
- */
-void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled)
-{
- struct dsa_port *dp;
- int ret;
-
- *wol_enabled = false;
-
- if (!dev->wakeup_source)
- return;
-
- dsa_switch_for_each_user_port(dp, dev->ds) {
- u8 pme_ctrl = 0;
-
- ret = ksz_pread8(dev, dp->index, REG_PORT_PME_CTRL, &pme_ctrl);
- if (!ret && pme_ctrl)
- *wol_enabled = true;
-
- /* make sure there are no pending wake events which would
- * prevent the device from going to sleep/shutdown.
- */
- ksz9477_handle_wake_reason(dev, dp->index);
- }
-
- /* Now we are save to enable PME pin. */
- if (*wol_enabled)
- ksz_write8(dev, REG_SW_PME_CTRL, PME_ENABLE);
-}
-
static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
{
unsigned int val;
@@ -427,54 +246,70 @@ void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
mutex_unlock(&p->mib.cnt_mutex);
}
-int ksz9477_errata_monitor(struct ksz_device *dev, int port,
- u64 tx_late_col)
+static int ksz9477_half_duplex_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
{
+ u8 lue_ctrl;
u32 pmavbc;
- u8 status;
u16 pqm;
int ret;
- ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
+ /* Errata DS80000754 recommends monitoring potential faults in
+ * half-duplex mode. The switch might not be able to communicate anymore
+ * in these states. If you see this message, please read the
+ * errata-sheet for more information:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
+ * To workaround this issue, half-duplex mode should be avoided.
+ * A software reset could be implemented to recover from this state.
+ */
+ dev_warn_once(dev->dev,
+ "Half-duplex detected on port %d, transmission halt may occur\n",
+ port);
+ if (tx_late_col != 0) {
+ /* Transmission halt with late collisions */
+ dev_crit_once(dev->dev,
+ "TX late collisions detected, transmission may be halted on port %d\n",
+ port);
+ }
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &lue_ctrl);
if (ret)
return ret;
- if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status) == PORT_INTF_SPEED_NONE) &&
- !(status & PORT_INTF_FULL_DUPLEX)) {
- /* Errata DS80000754 recommends monitoring potential faults in
- * half-duplex mode. The switch might not be able to communicate anymore
- * in these states.
- * If you see this message, please read the errata-sheet for more information:
- * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
- * To workaround this issue, half-duplex mode should be avoided.
- * A software reset could be implemented to recover from this state.
- */
- dev_warn_once(dev->dev,
- "Half-duplex detected on port %d, transmission halt may occur\n",
- port);
- if (tx_late_col != 0) {
- /* Transmission halt with late collisions */
- dev_crit_once(dev->dev,
- "TX late collisions detected, transmission may be halted on port %d\n",
- port);
- }
- ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &status);
+ if (lue_ctrl & SW_VLAN_ENABLE) {
+ ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
if (ret)
return ret;
- if (status & SW_VLAN_ENABLE) {
- ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
- if (ret)
- return ret;
- ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
- if (ret)
- return ret;
- if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
- (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
- /* Transmission halt with Half-Duplex and VLAN */
- dev_crit_once(dev->dev,
- "resources out of limits, transmission may be halted\n");
- }
+
+ ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
+ if (ret)
+ return ret;
+
+ if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
+ (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
+ /* Transmission halt with Half-Duplex and VLAN */
+ dev_crit_once(dev->dev,
+ "resources out of limits, transmission may be halted\n");
}
}
+
+ return ret;
+}
+
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
+{
+ u8 status;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
+ if (ret)
+ return ret;
+
+ if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status)
+ == PORT_INTF_SPEED_NONE) &&
+ !(status & PORT_INTF_FULL_DUPLEX)) {
+ ret = ksz9477_half_duplex_monitor(dev, port, tx_late_col);
+ }
+
return ret;
}
@@ -1188,6 +1023,7 @@ void ksz9477_port_queue_split(struct ksz_device *dev, int port)
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
+ const u16 *regs = dev->info->regs;
struct dsa_switch *ds = dev->ds;
u16 data16;
u8 member;
@@ -1232,12 +1068,12 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz9477_port_acl_init(dev, port);
/* clear pending wake flags */
- ksz9477_handle_wake_reason(dev, port);
+ ksz_handle_wake_reason(dev, port);
/* Disable all WoL options by default. Otherwise
* ksz_switch_macaddr_get/put logic will not work properly.
*/
- ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, 0);
+ ksz_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0);
}
void ksz9477_config_cpu_port(struct dsa_switch *ds)
@@ -1334,6 +1170,7 @@ int ksz9477_enable_stp_addr(struct ksz_device *dev)
int ksz9477_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
int ret = 0;
ds->mtu_enforcement_ingress = true;
@@ -1364,13 +1201,11 @@ int ksz9477_setup(struct dsa_switch *ds)
/* enable global MIB counter freeze function */
ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
- /* Make sure PME (WoL) is not enabled. If requested, it will be
- * enabled by ksz9477_wol_pre_shutdown(). Otherwise, some PMICs do not
- * like PME events changes before shutdown.
+ /* Make sure PME (WoL) is not enabled. If requested, it will
+ * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs
+ * do not like PME events changes before shutdown.
*/
- ksz_write8(dev, REG_SW_PME_CTRL, 0);
-
- return 0;
+ return ksz_write8(dev, regs[REG_SW_PME_CTRL], 0);
}
u32 ksz9477_get_port_addr(int port, int offset)
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index 239a281da10b..d2166b0d881e 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -60,11 +60,6 @@ void ksz9477_switch_exit(struct ksz_device *dev);
void ksz9477_port_queue_split(struct ksz_device *dev, int port);
void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr);
void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr);
-void ksz9477_get_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
-int ksz9477_set_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
-void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled);
int ksz9477_port_acl_init(struct ksz_device *dev, int port);
void ksz9477_port_acl_free(struct ksz_device *dev, int port);
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index d5354c600ea1..04235c22bf40 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -38,11 +38,6 @@
#define SWITCH_REVISION_S 4
#define SWITCH_RESET 0x01
-#define REG_SW_PME_CTRL 0x0006
-
-#define PME_ENABLE BIT(1)
-#define PME_POLARITY BIT(0)
-
#define REG_GLOBAL_OPTIONS 0x000F
#define SW_GIGABIT_ABLE BIT(6)
@@ -807,13 +802,6 @@
#define REG_PORT_AVB_SR_1_TYPE 0x0008
#define REG_PORT_AVB_SR_2_TYPE 0x000A
-#define REG_PORT_PME_STATUS 0x0013
-#define REG_PORT_PME_CTRL 0x0017
-
-#define PME_WOL_MAGICPKT BIT(2)
-#define PME_WOL_LINKUP BIT(1)
-#define PME_WOL_ENERGY BIT(0)
-
#define REG_PORT_INT_STATUS 0x001B
#define REG_PORT_INT_MASK 0x001F
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 1491099528be..4e8710c7cb7b 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2,7 +2,7 @@
/*
* Microchip switch driver main logic
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#include <linux/delay.h>
@@ -246,16 +246,16 @@ static const struct ksz_drive_strength ksz9477_drive_strengths[] = {
{ SW_DRIVE_STRENGTH_28MA, 28000 },
};
-/* ksz8830_drive_strengths - Drive strength mapping for KSZ8830, KSZ8873, ..
+/* ksz88x3_drive_strengths - Drive strength mapping for KSZ8863, KSZ8873, ..
* variants.
* This values are documented in KSZ8873 and KSZ8863 datasheets.
*/
-static const struct ksz_drive_strength ksz8830_drive_strengths[] = {
+static const struct ksz_drive_strength ksz88x3_drive_strengths[] = {
{ 0, 8000 },
{ KSZ8873_DRIVE_STRENGTH_16MA, 16000 },
};
-static void ksz8830_phylink_mac_config(struct phylink_config *config,
+static void ksz88x3_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state);
static void ksz_phylink_mac_config(struct phylink_config *config,
@@ -265,8 +265,8 @@ static void ksz_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface);
-static const struct phylink_mac_ops ksz8830_phylink_mac_ops = {
- .mac_config = ksz8830_phylink_mac_config,
+static const struct phylink_mac_ops ksz88x3_phylink_mac_ops = {
+ .mac_config = ksz88x3_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz8_phylink_mac_link_up,
};
@@ -277,7 +277,7 @@ static const struct phylink_mac_ops ksz8_phylink_mac_ops = {
.mac_link_up = ksz8_phylink_mac_link_up,
};
-static const struct ksz_dev_ops ksz8_dev_ops = {
+static const struct ksz_dev_ops ksz88xx_dev_ops = {
.setup = ksz8_setup,
.get_port_addr = ksz8_get_port_addr,
.cfg_port_member = ksz8_cfg_port_member,
@@ -307,6 +307,44 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.init = ksz8_switch_init,
.exit = ksz8_switch_exit,
.change_mtu = ksz8_change_mtu,
+ .pme_write8 = ksz8_pme_write8,
+ .pme_pread8 = ksz8_pme_pread8,
+ .pme_pwrite8 = ksz8_pme_pwrite8,
+};
+
+static const struct ksz_dev_ops ksz87xx_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8_r_phy,
+ .w_phy = ksz8_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .fdb_add = ksz8_fdb_add,
+ .fdb_del = ksz8_fdb_del,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+ .change_mtu = ksz8_change_mtu,
+ .pme_write8 = ksz8_pme_write8,
+ .pme_pread8 = ksz8_pme_pread8,
+ .pme_pwrite8 = ksz8_pme_pwrite8,
};
static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
@@ -348,9 +386,9 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.mdb_add = ksz9477_mdb_add,
.mdb_del = ksz9477_mdb_del,
.change_mtu = ksz9477_change_mtu,
- .get_wol = ksz9477_get_wol,
- .set_wol = ksz9477_set_wol,
- .wol_pre_shutdown = ksz9477_wol_pre_shutdown,
+ .pme_write8 = ksz_write8,
+ .pme_pread8 = ksz_pread8,
+ .pme_pwrite8 = ksz_pwrite8,
.config_cpu_port = ksz9477_config_cpu_port,
.tc_cbs_set_cinc = ksz9477_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
@@ -423,6 +461,9 @@ static const u16 ksz8795_regs[] = {
[S_MULTICAST_CTRL] = 0x04,
[P_XMII_CTRL_0] = 0x06,
[P_XMII_CTRL_1] = 0x06,
+ [REG_SW_PME_CTRL] = 0x8003,
+ [REG_PORT_PME_STATUS] = 0x8003,
+ [REG_PORT_PME_CTRL] = 0x8007,
};
static const u32 ksz8795_masks[] = {
@@ -531,6 +572,61 @@ static u8 ksz8863_shifts[] = {
[DYNAMIC_MAC_SRC_PORT] = 20,
};
+static const u16 ksz8895_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x68,
+ [REG_IND_CTRL_0] = 0x6E,
+ [REG_IND_DATA_8] = 0x70,
+ [REG_IND_DATA_CHECK] = 0x72,
+ [REG_IND_DATA_HI] = 0x71,
+ [REG_IND_DATA_LO] = 0x75,
+ [REG_IND_MIB_CHECK] = 0x75,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x09,
+ [S_TAIL_TAG_CTRL] = 0x0C,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8895_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(7),
+ [SW_TAIL_TAG_ENABLE] = BIT(1),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(6, 0),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
+ [VLAN_TABLE_VALID] = BIT(12),
+ [STATIC_MAC_TABLE_VALID] = BIT(21),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(23),
+ [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(22),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(20, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(22, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
+};
+
+static const u8 ksz8895_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 7,
+ [VLAN_TABLE] = 13,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 24,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 29,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 27,
+ [DYNAMIC_MAC_SRC_PORT] = 24,
+};
+
static const u16 ksz9477_regs[] = {
[REG_SW_MAC_ADDR] = 0x0302,
[P_STP_CTRL] = 0x0B04,
@@ -539,6 +635,9 @@ static const u16 ksz9477_regs[] = {
[S_MULTICAST_CTRL] = 0x0331,
[P_XMII_CTRL_0] = 0x0300,
[P_XMII_CTRL_1] = 0x0301,
+ [REG_SW_PME_CTRL] = 0x0006,
+ [REG_PORT_PME_STATUS] = 0x0013,
+ [REG_PORT_PME_CTRL] = 0x0017,
};
static const u32 ksz9477_masks[] = {
@@ -1253,12 +1352,12 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.dev_name = "KSZ8795",
.num_vlans = 4096,
.num_alus = 0,
- .num_statics = 8,
+ .num_statics = 32,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
.num_ipms = 4,
- .ops = &ksz8_dev_ops,
+ .ops = &ksz87xx_dev_ops,
.phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1294,12 +1393,12 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.dev_name = "KSZ8794",
.num_vlans = 4096,
.num_alus = 0,
- .num_statics = 8,
+ .num_statics = 32,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
.num_ipms = 4,
- .ops = &ksz8_dev_ops,
+ .ops = &ksz87xx_dev_ops,
.phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1321,12 +1420,12 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.dev_name = "KSZ8765",
.num_vlans = 4096,
.num_alus = 0,
- .num_statics = 8,
+ .num_statics = 32,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
.num_ipms = 4,
- .ops = &ksz8_dev_ops,
+ .ops = &ksz87xx_dev_ops,
.phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1343,8 +1442,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true, false},
},
- [KSZ8830] = {
- .chip_id = KSZ8830_CHIP_ID,
+ [KSZ88X3] = {
+ .chip_id = KSZ88X3_CHIP_ID,
.dev_name = "KSZ8863/KSZ8873",
.num_vlans = 16,
.num_alus = 0,
@@ -1353,8 +1452,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 3,
.num_tx_queues = 4,
.num_ipms = 4,
- .ops = &ksz8_dev_ops,
- .phylink_mac_ops = &ksz8830_phylink_mac_ops,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
.mib_names = ksz88xx_mib_names,
.mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1368,6 +1467,61 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.rd_table = &ksz8873_register_set,
},
+ [KSZ8864] = {
+ /* WARNING
+ * =======
+ * KSZ8864 is similar to KSZ8895, except the first port
+ * does not exist.
+ * external cpu
+ * KSZ8864 1,2,3 4
+ * KSZ8895 0,1,2,3 4
+ * port_cnt is configured as 5, even though it is 4
+ */
+ .chip_id = KSZ8864_CHIP_ID,
+ .dev_name = "KSZ8864",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 32,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8895_regs,
+ .masks = ksz8895_masks,
+ .shifts = ksz8895_shifts,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .internal_phy = {false, true, true, true, false},
+ },
+
+ [KSZ8895] = {
+ .chip_id = KSZ8895_CHIP_ID,
+ .dev_name = "KSZ8895",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 32,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8895_regs,
+ .masks = ksz8895_masks,
+ .shifts = ksz8895_shifts,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
[KSZ9477] = {
.chip_id = KSZ9477_CHIP_ID,
.dev_name = "KSZ9477",
@@ -2570,7 +2724,7 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
struct ksz_device *dev = ds->priv;
switch (dev->chip_id) {
- case KSZ8830_CHIP_ID:
+ case KSZ88X3_CHIP_ID:
/* Silicon Errata Sheet (DS80000830A):
* Port 1 does not work with LinkMD Cable-Testing.
* Port 1 does not respond to received PAUSE control frames.
@@ -2893,12 +3047,10 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
struct ksz_device *dev = ds->priv;
enum dsa_tag_protocol proto = DSA_TAG_PROTO_NONE;
- if (dev->chip_id == KSZ8795_CHIP_ID ||
- dev->chip_id == KSZ8794_CHIP_ID ||
- dev->chip_id == KSZ8765_CHIP_ID)
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev))
proto = DSA_TAG_PROTO_KSZ8795;
- if (dev->chip_id == KSZ8830_CHIP_ID ||
+ if (dev->chip_id == KSZ88X3_CHIP_ID ||
dev->chip_id == KSZ8563_CHIP_ID ||
dev->chip_id == KSZ9893_CHIP_ID ||
dev->chip_id == KSZ9563_CHIP_ID)
@@ -3010,7 +3162,9 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
- case KSZ8830_CHIP_ID:
+ case KSZ88X3_CHIP_ID:
+ case KSZ8864_CHIP_ID:
+ case KSZ8895_CHIP_ID:
return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
case KSZ8563_CHIP_ID:
case KSZ8567_CHIP_ID:
@@ -3180,7 +3334,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
return interface;
}
-static void ksz8830_phylink_mac_config(struct phylink_config *config,
+static void ksz88x3_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
@@ -3364,10 +3518,22 @@ static int ksz_switch_detect(struct ksz_device *dev)
break;
case KSZ88_FAMILY_ID:
if (id2 == KSZ88_CHIP_ID_63)
- dev->chip_id = KSZ8830_CHIP_ID;
+ dev->chip_id = KSZ88X3_CHIP_ID;
else
return -ENODEV;
break;
+ case KSZ8895_FAMILY_ID:
+ if (id2 == KSZ8895_CHIP_ID_95 ||
+ id2 == KSZ8895_CHIP_ID_95R)
+ dev->chip_id = KSZ8895_CHIP_ID;
+ else
+ return -ENODEV;
+ ret = ksz_read8(dev, REG_KSZ8864_CHIP_ID, &id4);
+ if (ret)
+ return ret;
+ if (id4 & SW_KSZ8864)
+ dev->chip_id = KSZ8864_CHIP_ID;
+ break;
default:
ret = ksz_read32(dev, REG_CHIP_ID0, &id32);
if (ret)
@@ -3742,24 +3908,214 @@ static int ksz_setup_tc(struct dsa_switch *ds, int port,
}
}
+/**
+ * ksz_handle_wake_reason - Handle wake reason on a specified port.
+ * @dev: The device structure.
+ * @port: The port number.
+ *
+ * This function reads the PME (Power Management Event) status register of a
+ * specified port to determine the wake reason. If there is no wake event, it
+ * returns early. Otherwise, it logs the wake reason which could be due to a
+ * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
+ * is then cleared to acknowledge the handling of the wake event.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int ksz_handle_wake_reason(struct ksz_device *dev, int port)
+{
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ const u16 *regs = dev->info->regs;
+ u8 pme_status;
+ int ret;
+
+ ret = ops->pme_pread8(dev, port, regs[REG_PORT_PME_STATUS],
+ &pme_status);
+ if (ret)
+ return ret;
+
+ if (!pme_status)
+ return 0;
+
+ dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port,
+ pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "",
+ pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "",
+ pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : "");
+
+ return ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_STATUS],
+ pme_status);
+}
+
+/**
+ * ksz_get_wol - Get Wake-on-LAN settings for a specified port.
+ * @ds: The dsa_switch structure.
+ * @port: The port number.
+ * @wol: Pointer to ethtool Wake-on-LAN settings structure.
+ *
+ * This function checks the device PME wakeup_source flag and chip_id.
+ * If enabled and supported, it sets the supported and active WoL
+ * flags.
+ */
static void ksz_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ u8 pme_ctrl;
+ int ret;
- if (dev->dev_ops->get_wol)
- dev->dev_ops->get_wol(dev, port, wol);
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return;
+
+ if (!dev->wakeup_source)
+ return;
+
+ wol->supported = WAKE_PHY;
+
+ /* Check if the current MAC address on this port can be set
+ * as global for WAKE_MAGIC support. The result may vary
+ * dynamically based on other ports configurations.
+ */
+ if (ksz_is_port_mac_global_usable(dev->ds, port))
+ wol->supported |= WAKE_MAGIC;
+
+ ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL],
+ &pme_ctrl);
+ if (ret)
+ return;
+
+ if (pme_ctrl & PME_WOL_MAGICPKT)
+ wol->wolopts |= WAKE_MAGIC;
+ if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY))
+ wol->wolopts |= WAKE_PHY;
}
+/**
+ * ksz_set_wol - Set Wake-on-LAN settings for a specified port.
+ * @ds: The dsa_switch structure.
+ * @port: The port number.
+ * @wol: Pointer to ethtool Wake-on-LAN settings structure.
+ *
+ * This function configures Wake-on-LAN (WoL) settings for a specified
+ * port. It validates the provided WoL options, checks if PME is
+ * enabled and supported, clears any previous wake reasons, and sets
+ * the Magic Packet flag in the port's PME control register if
+ * specified.
+ *
+ * Return: 0 on success, or other error codes on failure.
+ */
static int ksz_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
+ u8 pme_ctrl = 0, pme_ctrl_old = 0;
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ bool magic_switched_off;
+ bool magic_switched_on;
+ int ret;
- if (dev->dev_ops->set_wol)
- return dev->dev_ops->set_wol(dev, port, wol);
+ if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
- return -EOPNOTSUPP;
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return -EOPNOTSUPP;
+
+ if (!dev->wakeup_source)
+ return -EOPNOTSUPP;
+
+ ret = ksz_handle_wake_reason(dev, port);
+ if (ret)
+ return ret;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ pme_ctrl |= PME_WOL_MAGICPKT;
+ if (wol->wolopts & WAKE_PHY)
+ pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY;
+
+ ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL],
+ &pme_ctrl_old);
+ if (ret)
+ return ret;
+
+ if (pme_ctrl_old == pme_ctrl)
+ return 0;
+
+ magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) &&
+ !(pme_ctrl & PME_WOL_MAGICPKT);
+ magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) &&
+ (pme_ctrl & PME_WOL_MAGICPKT);
+
+ /* To keep reference count of MAC address, we should do this
+ * operation only on change of WOL settings.
+ */
+ if (magic_switched_on) {
+ ret = ksz_switch_macaddr_get(dev->ds, port, NULL);
+ if (ret)
+ return ret;
+ } else if (magic_switched_off) {
+ ksz_switch_macaddr_put(dev->ds);
+ }
+
+ ret = dev->dev_ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL],
+ pme_ctrl);
+ if (ret) {
+ if (magic_switched_on)
+ ksz_switch_macaddr_put(dev->ds);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_wol_pre_shutdown - Prepares the switch device for shutdown while
+ * considering Wake-on-LAN (WoL) settings.
+ * @dev: The switch device structure.
+ * @wol_enabled: Pointer to a boolean which will be set to true if WoL is
+ * enabled on any port.
+ *
+ * This function prepares the switch device for a safe shutdown while taking
+ * into account the Wake-on-LAN (WoL) settings on the user ports. It updates
+ * the wol_enabled flag accordingly to reflect whether WoL is active on any
+ * port.
+ */
+static void ksz_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled)
+{
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ const u16 *regs = dev->info->regs;
+ u8 pme_pin_en = PME_ENABLE;
+ struct dsa_port *dp;
+ int ret;
+
+ *wol_enabled = false;
+
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return;
+
+ if (!dev->wakeup_source)
+ return;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ u8 pme_ctrl = 0;
+
+ ret = ops->pme_pread8(dev, dp->index,
+ regs[REG_PORT_PME_CTRL], &pme_ctrl);
+ if (!ret && pme_ctrl)
+ *wol_enabled = true;
+
+ /* make sure there are no pending wake events which would
+ * prevent the device from going to sleep/shutdown.
+ */
+ ksz_handle_wake_reason(dev, dp->index);
+ }
+
+ /* Now we are save to enable PME pin. */
+ if (*wol_enabled) {
+ if (dev->pme_active_high)
+ pme_pin_en |= PME_POLARITY;
+ ops->pme_write8(dev, regs[REG_SW_PME_CTRL], pme_pin_en);
+ if (ksz_is_ksz87xx(dev))
+ ksz_write8(dev, KSZ87XX_REG_INT_EN, KSZ87XX_INT_PME_MASK);
+ }
}
static int ksz_port_set_mac_address(struct dsa_switch *ds, int port,
@@ -4072,8 +4428,7 @@ void ksz_switch_shutdown(struct ksz_device *dev)
{
bool wol_enabled = false;
- if (dev->dev_ops->wol_pre_shutdown)
- dev->dev_ops->wol_pre_shutdown(dev, &wol_enabled);
+ ksz_wol_pre_shutdown(dev, &wol_enabled);
if (dev->dev_ops->reset && !wol_enabled)
dev->dev_ops->reset(dev);
@@ -4237,24 +4592,24 @@ static int ksz9477_drive_strength_write(struct ksz_device *dev,
}
/**
- * ksz8830_drive_strength_write() - Set the drive strength configuration for
- * KSZ8830 compatible chip variants.
+ * ksz88x3_drive_strength_write() - Set the drive strength configuration for
+ * KSZ8863 compatible chip variants.
* @dev: ksz device
* @props: Array of drive strength properties to be set
* @num_props: Number of properties in the array
*
- * This function applies the specified drive strength settings to KSZ8830 chip
+ * This function applies the specified drive strength settings to KSZ88X3 chip
* variants (KSZ8873, KSZ8863).
* It ensures the configurations align with what the chip variant supports and
* warns or errors out on unsupported settings.
*
* Return: 0 on success, error code otherwise
*/
-static int ksz8830_drive_strength_write(struct ksz_device *dev,
+static int ksz88x3_drive_strength_write(struct ksz_device *dev,
struct ksz_driver_strength_prop *props,
int num_props)
{
- size_t array_size = ARRAY_SIZE(ksz8830_drive_strengths);
+ size_t array_size = ARRAY_SIZE(ksz88x3_drive_strengths);
int microamp;
int i, ret;
@@ -4267,10 +4622,10 @@ static int ksz8830_drive_strength_write(struct ksz_device *dev,
}
microamp = props[KSZ_DRIVER_STRENGTH_IO].value;
- ret = ksz_drive_strength_to_reg(ksz8830_drive_strengths, array_size,
+ ret = ksz_drive_strength_to_reg(ksz88x3_drive_strengths, array_size,
microamp);
if (ret < 0) {
- ksz_drive_strength_error(dev, ksz8830_drive_strengths,
+ ksz_drive_strength_error(dev, ksz88x3_drive_strengths,
array_size, microamp);
return ret;
}
@@ -4330,8 +4685,8 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
return 0;
switch (dev->chip_id) {
- case KSZ8830_CHIP_ID:
- return ksz8830_drive_strength_write(dev, of_props,
+ case KSZ88X3_CHIP_ID:
+ return ksz88x3_drive_strength_write(dev, of_props,
ARRAY_SIZE(of_props));
case KSZ8795_CHIP_ID:
case KSZ8794_CHIP_ID:
@@ -4362,7 +4717,7 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
int ksz_switch_register(struct ksz_device *dev)
{
const struct ksz_chip_data *info;
- struct device_node *port, *ports;
+ struct device_node *ports;
phy_interface_t interface;
unsigned int port_num;
int ret;
@@ -4448,12 +4803,11 @@ int ksz_switch_register(struct ksz_device *dev)
if (!ports)
ports = of_get_child_by_name(dev->dev->of_node, "ports");
if (ports) {
- for_each_available_child_of_node(ports, port) {
+ for_each_available_child_of_node_scoped(ports, port) {
if (of_property_read_u32(port, "reg",
&port_num))
continue;
if (!(dev->port_mask & BIT(port_num))) {
- of_node_put(port);
of_node_put(ports);
return -EINVAL;
}
@@ -4475,6 +4829,8 @@ int ksz_switch_register(struct ksz_device *dev)
dev->wakeup_source = of_property_read_bool(dev->dev->of_node,
"wakeup-source");
+ dev->pme_active_high = of_property_read_bool(dev->dev->of_node,
+ "microchip,pme-active-high");
}
ret = dsa_register_switch(dev->ds);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 5f0a628b9849..bec846e20682 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip switch driver common header
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#ifndef __KSZ_COMMON_H
@@ -174,6 +174,7 @@ struct ksz_device {
bool synclko_125;
bool synclko_disable;
bool wakeup_source;
+ bool pme_active_high;
struct vlan_table *vlan_cache;
@@ -199,7 +200,9 @@ enum ksz_model {
KSZ8795,
KSZ8794,
KSZ8765,
- KSZ8830,
+ KSZ88X3,
+ KSZ8864,
+ KSZ8895,
KSZ9477,
KSZ9896,
KSZ9897,
@@ -235,6 +238,9 @@ enum ksz_regs {
S_MULTICAST_CTRL,
P_XMII_CTRL_0,
P_XMII_CTRL_1,
+ REG_SW_PME_CTRL,
+ REG_PORT_PME_STATUS,
+ REG_PORT_PME_CTRL,
};
enum ksz_masks {
@@ -354,6 +360,11 @@ struct ksz_dev_ops {
void (*get_caps)(struct ksz_device *dev, int port,
struct phylink_config *config);
int (*change_mtu)(struct ksz_device *dev, int port, int mtu);
+ int (*pme_write8)(struct ksz_device *dev, u32 reg, u8 value);
+ int (*pme_pread8)(struct ksz_device *dev, int port, int offset,
+ u8 *data);
+ int (*pme_pwrite8)(struct ksz_device *dev, int port, int offset,
+ u8 data);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
void (*phylink_mac_link_up)(struct ksz_device *dev, int port,
@@ -363,11 +374,6 @@ struct ksz_dev_ops {
int duplex, bool tx_pause, bool rx_pause);
void (*setup_rgmii_delay)(struct ksz_device *dev, int port);
int (*tc_cbs_set_cinc)(struct ksz_device *dev, int port, u32 val);
- void (*get_wol)(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
- int (*set_wol)(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
- void (*wol_pre_shutdown)(struct ksz_device *dev, bool *wol_enabled);
void (*config_cpu_port)(struct dsa_switch *ds);
int (*enable_stp_addr)(struct ksz_device *dev);
int (*reset)(struct ksz_device *dev);
@@ -391,6 +397,7 @@ int ksz_switch_macaddr_get(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
void ksz_switch_macaddr_put(struct dsa_switch *ds);
void ksz_switch_shutdown(struct ksz_device *dev);
+int ksz_handle_wake_reason(struct ksz_device *dev, int port);
/* Common register access functions */
static inline struct regmap *ksz_regmap_8(struct ksz_device *dev)
@@ -621,12 +628,29 @@ static inline bool ksz_is_ksz87xx(struct ksz_device *dev)
static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
{
- return dev->chip_id == KSZ8830_CHIP_ID;
+ return dev->chip_id == KSZ88X3_CHIP_ID;
+}
+
+static inline bool ksz_is_8895_family(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8895_CHIP_ID ||
+ dev->chip_id == KSZ8864_CHIP_ID;
}
static inline bool is_ksz8(struct ksz_device *dev)
{
- return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev);
+ return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev) ||
+ ksz_is_8895_family(dev);
+}
+
+static inline bool is_ksz88xx(struct ksz_device *dev)
+{
+ return ksz_is_ksz88x3(dev) || ksz_is_8895_family(dev);
+}
+
+static inline bool is_ksz9477(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ9477_CHIP_ID;
}
static inline int is_lan937x(struct ksz_device *dev)
@@ -655,6 +679,7 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
#define SW_FAMILY_ID_M GENMASK(15, 8)
#define KSZ87_FAMILY_ID 0x87
#define KSZ88_FAMILY_ID 0x88
+#define KSZ8895_FAMILY_ID 0x95
#define KSZ8_PORT_STATUS_0 0x08
#define KSZ8_PORT_FIBER_MODE BIT(7)
@@ -663,6 +688,12 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
#define KSZ87_CHIP_ID_94 0x6
#define KSZ87_CHIP_ID_95 0x9
#define KSZ88_CHIP_ID_63 0x3
+#define KSZ8895_CHIP_ID_95 0x4
+#define KSZ8895_CHIP_ID_95R 0x6
+
+/* KSZ8895 specific register */
+#define REG_KSZ8864_CHIP_ID 0xFE
+#define SW_KSZ8864 BIT(7)
#define SW_REV_ID_M GENMASK(7, 4)
@@ -695,6 +726,17 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
#define P_MII_MAC_MODE BIT(2)
#define P_MII_SEL_M 0x3
+/* KSZ9477, KSZ87xx Wake-on-LAN (WoL) masks */
+#define PME_WOL_MAGICPKT BIT(2)
+#define PME_WOL_LINKUP BIT(1)
+#define PME_WOL_ENERGY BIT(0)
+
+#define PME_ENABLE BIT(1)
+#define PME_POLARITY BIT(0)
+
+#define KSZ87XX_REG_INT_EN 0x7D
+#define KSZ87XX_INT_PME_MASK BIT(4)
+
/* Interrupt */
#define REG_SW_PORT_INT_STATUS__1 0x001B
#define REG_SW_PORT_INT_MASK__1 0x001F
diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c
index 086bc9b3cf53..30b4a6186e38 100644
--- a/drivers/net/dsa/microchip/ksz_dcb.c
+++ b/drivers/net/dsa/microchip/ksz_dcb.c
@@ -113,7 +113,7 @@ static void ksz_get_default_port_prio_reg(struct ksz_device *dev, int *reg,
static void ksz_get_dscp_prio_reg(struct ksz_device *dev, int *reg,
int *per_reg, u8 *mask)
{
- if (ksz_is_ksz87xx(dev)) {
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev)) {
*reg = KSZ8765_REG_TOS_DSCP_CTRL;
*per_reg = 4;
*mask = GENMASK(1, 0);
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 8e8d83213b04..e3e341431f09 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -2,7 +2,7 @@
/*
* Microchip ksz series register access through SPI
*
- * Copyright (C) 2017 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
* Tristram Ha <Tristram.Ha@microchip.com>
*/
@@ -54,12 +54,15 @@ static int ksz_spi_probe(struct spi_device *spi)
if (!chip)
return -EINVAL;
- if (chip->chip_id == KSZ8830_CHIP_ID)
+ if (chip->chip_id == KSZ88X3_CHIP_ID)
regmap_config = ksz8863_regmap_config;
else if (chip->chip_id == KSZ8795_CHIP_ID ||
chip->chip_id == KSZ8794_CHIP_ID ||
chip->chip_id == KSZ8765_CHIP_ID)
regmap_config = ksz8795_regmap_config;
+ else if (chip->chip_id == KSZ8895_CHIP_ID ||
+ chip->chip_id == KSZ8864_CHIP_ID)
+ regmap_config = ksz8863_regmap_config;
else
regmap_config = ksz9477_regmap_config;
@@ -134,11 +137,19 @@ static const struct of_device_id ksz_dt_ids[] = {
},
{
.compatible = "microchip,ksz8863",
- .data = &ksz_switch_chips[KSZ8830]
+ .data = &ksz_switch_chips[KSZ88X3]
+ },
+ {
+ .compatible = "microchip,ksz8864",
+ .data = &ksz_switch_chips[KSZ8864]
},
{
.compatible = "microchip,ksz8873",
- .data = &ksz_switch_chips[KSZ8830]
+ .data = &ksz_switch_chips[KSZ88X3]
+ },
+ {
+ .compatible = "microchip,ksz8895",
+ .data = &ksz_switch_chips[KSZ8895]
},
{
.compatible = "microchip,ksz9477",
@@ -201,7 +212,9 @@ static const struct spi_device_id ksz_spi_ids[] = {
{ "ksz8794" },
{ "ksz8795" },
{ "ksz8863" },
+ { "ksz8864" },
{ "ksz8873" },
+ { "ksz8895" },
{ "ksz9477" },
{ "ksz9896" },
{ "ksz9897" },
diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c
index b74a230a3f13..10dc49961f15 100644
--- a/drivers/net/dsa/mt7530-mmio.c
+++ b/drivers/net/dsa/mt7530-mmio.c
@@ -11,6 +11,7 @@
#include "mt7530.h"
static const struct of_device_id mt7988_of_match[] = {
+ { .compatible = "airoha,en7581-switch", .data = &mt753x_table[ID_EN7581], },
{ .compatible = "mediatek,mt7988-switch", .data = &mt753x_table[ID_MT7988], },
{ /* sentinel */ },
};
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index ec18e68bf3a8..d84ee1b419a6 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1152,7 +1152,8 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
* the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
* is affine to the inbound user port.
*/
- if (priv->id == ID_MT7531 || priv->id == ID_MT7988)
+ if (priv->id == ID_MT7531 || priv->id == ID_MT7988 ||
+ priv->id == ID_EN7581)
mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
/* CPU port gets connected to all user ports of
@@ -2207,7 +2208,7 @@ mt7530_setup_irq(struct mt7530_priv *priv)
return priv->irq ? : -EINVAL;
}
- if (priv->id == ID_MT7988)
+ if (priv->id == ID_MT7988 || priv->id == ID_EN7581)
priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
&mt7988_irq_domain_ops,
priv);
@@ -2438,8 +2439,10 @@ mt7530_setup(struct dsa_switch *ds)
/* Clear link settings and enable force mode to force link down
* on all ports until they're enabled later.
*/
- mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
- MT7530_FORCE_MODE, MT7530_FORCE_MODE);
+ mt7530_rmw(priv, MT753X_PMCR_P(i),
+ PMCR_LINK_SETTINGS_MASK |
+ MT753X_FORCE_MODE(priv->id),
+ MT753X_FORCE_MODE(priv->id));
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -2550,8 +2553,10 @@ mt7531_setup_common(struct dsa_switch *ds)
/* Clear link settings and enable force mode to force link down
* on all ports until they're enabled later.
*/
- mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
- MT7531_FORCE_MODE_MASK, MT7531_FORCE_MODE_MASK);
+ mt7530_rmw(priv, MT753X_PMCR_P(i),
+ PMCR_LINK_SETTINGS_MASK |
+ MT753X_FORCE_MODE(priv->id),
+ MT753X_FORCE_MODE(priv->id));
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -2783,6 +2788,28 @@ static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
}
}
+static void en7581_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+ break;
+
+ /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_10000FD;
+ break;
+ }
+}
+
static void
mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
@@ -3220,6 +3247,16 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c45 = mt7531_ind_c45_phy_write,
.mac_port_get_caps = mt7988_mac_port_get_caps,
},
+ [ID_EN7581] = {
+ .id = ID_EN7581,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7988_setup,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
+ .mac_port_get_caps = en7581_mac_port_get_caps,
+ },
};
EXPORT_SYMBOL_GPL(mt753x_table);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 28592123070b..6ad33a9f6b1d 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -19,6 +19,7 @@ enum mt753x_id {
ID_MT7621 = 1,
ID_MT7531 = 2,
ID_MT7988 = 3,
+ ID_EN7581 = 4,
};
#define NUM_TRGMII_CTRL 5
@@ -64,25 +65,30 @@ enum mt753x_id {
#define MT7531_CPU_PMAP(x) FIELD_PREP(MT7531_CPU_PMAP_MASK, x)
#define MT753X_MIRROR_REG(id) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_CFC : MT753X_MFC)
#define MT753X_MIRROR_EN(id) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_MIRROR_EN : MT7530_MIRROR_EN)
#define MT753X_MIRROR_PORT_MASK(id) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_MIRROR_PORT_MASK : \
MT7530_MIRROR_PORT_MASK)
#define MT753X_MIRROR_PORT_GET(id, val) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_MIRROR_PORT_GET(val) : \
MT7530_MIRROR_PORT_GET(val))
#define MT753X_MIRROR_PORT_SET(id, val) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_MIRROR_PORT_SET(val) : \
MT7530_MIRROR_PORT_SET(val))
@@ -355,6 +361,10 @@ enum mt7530_vlan_port_acc_frm {
MT7531_FORCE_MODE_TX_FC | \
MT7531_FORCE_MODE_EEE100 | \
MT7531_FORCE_MODE_EEE1G)
+#define MT753X_FORCE_MODE(id) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_FORCE_MODE_MASK : \
+ MT7530_FORCE_MODE)
#define PMCR_LINK_SETTINGS_MASK (PMCR_MAC_TX_EN | PMCR_MAC_RX_EN | \
PMCR_FORCE_EEE1G | \
PMCR_FORCE_EEE100 | \
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index 61ab6cc4fbfc..53a6d3ed63b3 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -146,7 +146,7 @@ static int mv88e6352_g2_scratch_gpio_set_data(struct mv88e6xxx_chip *chip,
* @chip: chip private data
* @pin: gpio index
*
- * Return: 0 for output, 1 for input (same as GPIOF_DIR_XXX).
+ * Return: 0 for output, 1 for input.
*/
static int mv88e6352_g2_scratch_gpio_get_dir(struct mv88e6xxx_chip *chip,
unsigned int pin)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 4a705f7333f4..3aa9c997018a 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1370,9 +1370,8 @@ static int felix_parse_ports_node(struct felix *felix,
phy_interface_t *port_phy_modes)
{
struct device *dev = felix->ocelot.dev;
- struct device_node *child;
- for_each_available_child_of_node(ports_node, child) {
+ for_each_available_child_of_node_scoped(ports_node, child) {
phy_interface_t phy_mode;
u32 port;
int err;
@@ -1381,7 +1380,6 @@ static int felix_parse_ports_node(struct felix *felix,
if (of_property_read_u32(child, "reg", &port) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
- of_node_put(child);
return -ENODEV;
}
@@ -1391,7 +1389,6 @@ static int felix_parse_ports_node(struct felix *felix,
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
port);
- of_node_put(child);
return -ENODEV;
}
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index ba37a566da39..0102a82e88cc 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1474,10 +1474,13 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
/* Hardware errata - Admin config could not be overwritten if
* config is pending, need reset the TAS module
*/
- val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
- if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
- ret = -EBUSY;
- goto err_reset_tc;
+ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
+ if (val & QSYS_TAG_CONFIG_ENABLE) {
+ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
+ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
+ ret = -EBUSY;
+ goto err_reset_tc;
+ }
}
ocelot_rmw_rix(ocelot,
@@ -1733,7 +1736,7 @@ struct felix_stream_gate {
u64 cycletime;
u64 cycletime_ext;
u32 num_entries;
- struct action_gate_entry entries[];
+ struct action_gate_entry entries[] __counted_by(num_entries);
};
struct felix_stream_gate_entry {
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index b9674f68b756..ad7044b295ec 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -1740,7 +1740,7 @@ static int rtl8365mb_irq_setup(struct realtek_priv *priv)
}
/* Configure chip interrupt signal polarity */
- irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ irq_trig = irq_get_trigger_type(irq);
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index 9e821b42e5f3..c7a8cd060587 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -599,7 +599,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
}
/* Fetch IRQ edge information from the descriptor */
- irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ irq_trig = irq_get_trigger_type(irq);
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
@@ -1009,8 +1009,8 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
static int rtl8366rb_setup_leds(struct realtek_priv *priv)
{
- struct device_node *leds_np, *led_np;
struct dsa_switch *ds = &priv->ds;
+ struct device_node *leds_np;
struct dsa_port *dp;
int ret = 0;
@@ -1025,13 +1025,11 @@ static int rtl8366rb_setup_leds(struct realtek_priv *priv)
continue;
}
- for_each_child_of_node(leds_np, led_np) {
+ for_each_child_of_node_scoped(leds_np, led_np) {
ret = rtl8366rb_setup_led(priv, dp,
of_fwnode_handle(led_np));
- if (ret) {
- of_node_put(led_np);
+ if (ret)
break;
- }
}
of_node_put(leds_np);
diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
index 35709a1756ae..3c5018d5e1f9 100644
--- a/drivers/net/dsa/realtek/rtl83xx.c
+++ b/drivers/net/dsa/realtek/rtl83xx.c
@@ -185,11 +185,9 @@ rtl83xx_probe(struct device *dev,
/* TODO: if power is software controlled, set up any regulators here */
priv->reset_ctl = devm_reset_control_get_optional(dev, NULL);
- if (IS_ERR(priv->reset_ctl)) {
- ret = PTR_ERR(priv->reset_ctl);
- dev_err_probe(dev, ret, "failed to get reset control\n");
- return ERR_CAST(priv->reset_ctl);
- }
+ if (IS_ERR(priv->reset_ctl))
+ return dev_err_cast_probe(dev, priv->reset_ctl,
+ "failed to get reset control\n");
priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(priv->reset)) {
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index c7282ce3d11c..bc7e50dcb57c 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1188,9 +1188,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
struct device_node *ports_node)
{
struct device *dev = &priv->spidev->dev;
- struct device_node *child;
- for_each_available_child_of_node(ports_node, child) {
+ for_each_available_child_of_node_scoped(ports_node, child) {
struct device_node *phy_node;
phy_interface_t phy_mode;
u32 index;
@@ -1200,7 +1199,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (of_property_read_u32(child, "reg", &index) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
- of_node_put(child);
return -ENODEV;
}
@@ -1210,7 +1208,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
index);
- of_node_put(child);
return -ENODEV;
}
@@ -1219,7 +1216,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (!of_phy_is_fixed_link(child)) {
dev_err(dev, "phy-handle or fixed-link "
"properties missing!\n");
- of_node_put(child);
return -ENODEV;
}
/* phy-handle is missing, but fixed-link isn't.
@@ -1233,10 +1229,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
priv->phy_mode[index] = phy_mode;
err = sja1105_parse_rgmii_delays(priv, index, child);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 212421e9d42e..e4b98fd51643 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
@@ -45,6 +46,8 @@
#define VSC73XX_BLOCK_MII_EXTERNAL 0x1 /* External MDIO subblock */
#define CPU_PORT 6 /* CPU port */
+#define VSC73XX_NUM_FDB_ROWS 2048
+#define VSC73XX_NUM_BUCKETS 4
/* MAC Block registers */
#define VSC73XX_MAC_CFG 0x00
@@ -196,6 +199,40 @@
#define VSC73XX_SRCMASKS_MIRROR BIT(26)
#define VSC73XX_SRCMASKS_PORTS_MASK GENMASK(7, 0)
+#define VSC73XX_MACHDATA_VID GENMASK(27, 16)
+#define VSC73XX_MACHDATA_MAC0 GENMASK(15, 8)
+#define VSC73XX_MACHDATA_MAC1 GENMASK(7, 0)
+#define VSC73XX_MACLDATA_MAC2 GENMASK(31, 24)
+#define VSC73XX_MACLDATA_MAC3 GENMASK(23, 16)
+#define VSC73XX_MACLDATA_MAC4 GENMASK(15, 8)
+#define VSC73XX_MACLDATA_MAC5 GENMASK(7, 0)
+
+#define VSC73XX_HASH0_VID_FROM_MASK GENMASK(5, 0)
+#define VSC73XX_HASH0_MAC0_FROM_MASK GENMASK(7, 4)
+#define VSC73XX_HASH1_MAC0_FROM_MASK GENMASK(3, 0)
+#define VSC73XX_HASH1_MAC1_FROM_MASK GENMASK(7, 1)
+#define VSC73XX_HASH2_MAC1_FROM_MASK BIT(0)
+#define VSC73XX_HASH2_MAC2_FROM_MASK GENMASK(7, 0)
+#define VSC73XX_HASH2_MAC3_FROM_MASK GENMASK(7, 6)
+#define VSC73XX_HASH3_MAC3_FROM_MASK GENMASK(5, 0)
+#define VSC73XX_HASH3_MAC4_FROM_MASK GENMASK(7, 3)
+#define VSC73XX_HASH4_MAC4_FROM_MASK GENMASK(2, 0)
+
+#define VSC73XX_HASH0_VID_TO_MASK GENMASK(9, 4)
+#define VSC73XX_HASH0_MAC0_TO_MASK GENMASK(3, 0)
+#define VSC73XX_HASH1_MAC0_TO_MASK GENMASK(10, 7)
+#define VSC73XX_HASH1_MAC1_TO_MASK GENMASK(6, 0)
+#define VSC73XX_HASH2_MAC1_TO_MASK BIT(10)
+#define VSC73XX_HASH2_MAC2_TO_MASK GENMASK(9, 2)
+#define VSC73XX_HASH2_MAC3_TO_MASK GENMASK(1, 0)
+#define VSC73XX_HASH3_MAC3_TO_MASK GENMASK(10, 5)
+#define VSC73XX_HASH3_MAC4_TO_MASK GENMASK(4, 0)
+#define VSC73XX_HASH4_MAC4_TO_MASK GENMASK(10, 8)
+
+#define VSC73XX_MACTINDX_SHADOW BIT(13)
+#define VSC73XX_MACTINDX_BUCKET_MSK GENMASK(12, 11)
+#define VSC73XX_MACTINDX_INDEX_MSK GENMASK(10, 0)
+
#define VSC73XX_MACACCESS_CPU_COPY BIT(14)
#define VSC73XX_MACACCESS_FWD_KILL BIT(13)
#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12)
@@ -225,9 +262,27 @@
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3
/* MII block 3 registers */
-#define VSC73XX_MII_STAT 0x0
-#define VSC73XX_MII_CMD 0x1
-#define VSC73XX_MII_DATA 0x2
+#define VSC73XX_MII_STAT 0x0
+#define VSC73XX_MII_CMD 0x1
+#define VSC73XX_MII_DATA 0x2
+#define VSC73XX_MII_MPRES 0x3
+
+#define VSC73XX_MII_STAT_BUSY BIT(3)
+#define VSC73XX_MII_STAT_READ BIT(2)
+#define VSC73XX_MII_STAT_WRITE BIT(1)
+
+#define VSC73XX_MII_CMD_SCAN BIT(27)
+#define VSC73XX_MII_CMD_OPERATION BIT(26)
+#define VSC73XX_MII_CMD_PHY_ADDR GENMASK(25, 21)
+#define VSC73XX_MII_CMD_PHY_REG GENMASK(20, 16)
+#define VSC73XX_MII_CMD_WRITE_DATA GENMASK(15, 0)
+
+#define VSC73XX_MII_DATA_FAILURE BIT(16)
+#define VSC73XX_MII_DATA_READ_DATA GENMASK(15, 0)
+
+#define VSC73XX_MII_MPRES_NOPREAMBLE BIT(6)
+#define VSC73XX_MII_MPRES_PRESCALEVAL GENMASK(5, 0)
+#define VSC73XX_MII_PRESCALEVAL_MIN 3 /* min allowed mdio clock prescaler */
#define VSC73XX_MII_STAT_BUSY BIT(3)
@@ -313,6 +368,13 @@ struct vsc73xx_counter {
const char *name;
};
+struct vsc73xx_fdb {
+ u16 vid;
+ u8 port;
+ u8 mac[ETH_ALEN];
+ bool valid;
+};
+
/* Counters are named according to the MIB standards where applicable.
* Some counters are custom, non-standard. The standard counters are
* named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex
@@ -568,8 +630,11 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
return ret;
/* Setting bit 26 means "read" */
- cmd = BIT(26) | (phy << 21) | (regnum << 16);
- ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ cmd = VSC73XX_MII_CMD_OPERATION |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_ADDR, phy) |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_CMD, cmd);
if (ret)
return ret;
@@ -577,15 +642,16 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
if (ret)
return ret;
- ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_DATA, &val);
if (ret)
return ret;
- if (val & BIT(16)) {
+ if (val & VSC73XX_MII_DATA_FAILURE) {
dev_err(vsc->dev, "reading reg %02x from phy%d failed\n",
regnum, phy);
return -EIO;
}
- val &= 0xFFFFU;
+ val &= VSC73XX_MII_DATA_READ_DATA;
dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n",
regnum, phy, val);
@@ -604,8 +670,11 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
if (ret)
return ret;
- cmd = (phy << 21) | (regnum << 16) | val;
- ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ cmd = FIELD_PREP(VSC73XX_MII_CMD_PHY_ADDR, phy) |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum) |
+ FIELD_PREP(VSC73XX_MII_CMD_WRITE_DATA, val);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_CMD, cmd);
if (ret)
return ret;
@@ -714,15 +783,77 @@ vsc73xx_update_vlan_table(struct vsc73xx *vsc, int port, u16 vid, bool set)
return vsc73xx_write_vlan_table_entry(vsc, vid, portmap);
}
+static int vsc73xx_configure_rgmii_port_delay(struct dsa_switch *ds)
+{
+ /* Keep 2.0 ns delay for backward complatibility */
+ u32 tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS;
+ u32 rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS;
+ struct dsa_port *dp = dsa_to_port(ds, CPU_PORT);
+ struct device_node *port_dn = dp->dn;
+ struct vsc73xx *vsc = ds->priv;
+ u32 delay;
+
+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) {
+ switch (delay) {
+ case 0:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE;
+ break;
+ case 1400:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS;
+ break;
+ case 1700:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS;
+ break;
+ case 2000:
+ break;
+ default:
+ dev_err(vsc->dev,
+ "Unsupported RGMII Transmit Clock Delay\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(vsc->dev,
+ "RGMII Transmit Clock Delay isn't configured, set to 2.0 ns\n");
+ }
+
+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) {
+ switch (delay) {
+ case 0:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE;
+ break;
+ case 1400:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS;
+ break;
+ case 1700:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS;
+ break;
+ case 2000:
+ break;
+ default:
+ dev_err(vsc->dev,
+ "Unsupported RGMII Receive Clock Delay value\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(vsc->dev,
+ "RGMII Receive Clock Delay isn't configured, set to 2.0 ns\n");
+ }
+
+ /* MII delay, set both GTX and RX delay */
+ return vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
+ tx_delay | rx_delay);
+}
+
static int vsc73xx_setup(struct dsa_switch *ds)
{
struct vsc73xx *vsc = ds->priv;
- int i, ret;
+ int i, ret, val;
dev_info(vsc->dev, "set up the switch\n");
ds->untag_bridge_pvid = true;
ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
+ ds->fdb_isolation = true;
/* Issue RESET */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
@@ -776,10 +907,11 @@ static int vsc73xx_setup(struct dsa_switch *ds)
VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
}
- /* MII delay, set both GTX and RX delay to 2 ns */
- vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
- VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS |
- VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS);
+ /* Configure RGMII delay */
+ ret = vsc73xx_configure_rgmii_port_delay(ds);
+ if (ret)
+ return ret;
+
/* Ingess VLAN reception mask (table 145) */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANMASK,
0xff);
@@ -789,6 +921,15 @@ static int vsc73xx_setup(struct dsa_switch *ds)
mdelay(50);
+ /* Disable preamble and use maximum allowed clock for the internal
+ * mdio bus, used for communication with internal PHYs only.
+ */
+ val = VSC73XX_MII_MPRES_NOPREAMBLE |
+ FIELD_PREP(VSC73XX_MII_MPRES_PRESCALEVAL,
+ VSC73XX_MII_PRESCALEVAL_MIN);
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_MPRES, val);
+
/* Release reset from the internal PHYs */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
VSC73XX_GLORESET_PHY_RESET);
@@ -1763,6 +1904,312 @@ static void vsc73xx_port_stp_state_set(struct dsa_switch *ds, int port,
vsc73xx_refresh_fwd_map(ds, port, state);
}
+static u16 vsc73xx_calc_hash(const unsigned char *addr, u16 vid)
+{
+ /* VID 5-0, MAC 47-44 */
+ u16 hash = FIELD_PREP(VSC73XX_HASH0_VID_TO_MASK,
+ FIELD_GET(VSC73XX_HASH0_VID_FROM_MASK, vid)) |
+ FIELD_PREP(VSC73XX_HASH0_MAC0_TO_MASK,
+ FIELD_GET(VSC73XX_HASH0_MAC0_FROM_MASK, addr[0]));
+ /* MAC 43-33 */
+ hash ^= FIELD_PREP(VSC73XX_HASH1_MAC0_TO_MASK,
+ FIELD_GET(VSC73XX_HASH1_MAC0_FROM_MASK, addr[0])) |
+ FIELD_PREP(VSC73XX_HASH1_MAC1_TO_MASK,
+ FIELD_GET(VSC73XX_HASH1_MAC1_FROM_MASK, addr[1]));
+ /* MAC 32-22 */
+ hash ^= FIELD_PREP(VSC73XX_HASH2_MAC1_TO_MASK,
+ FIELD_GET(VSC73XX_HASH2_MAC1_FROM_MASK, addr[1])) |
+ FIELD_PREP(VSC73XX_HASH2_MAC2_TO_MASK,
+ FIELD_GET(VSC73XX_HASH2_MAC2_FROM_MASK, addr[2])) |
+ FIELD_PREP(VSC73XX_HASH2_MAC3_TO_MASK,
+ FIELD_GET(VSC73XX_HASH2_MAC3_FROM_MASK, addr[3]));
+ /* MAC 21-11 */
+ hash ^= FIELD_PREP(VSC73XX_HASH3_MAC3_TO_MASK,
+ FIELD_GET(VSC73XX_HASH3_MAC3_FROM_MASK, addr[3])) |
+ FIELD_PREP(VSC73XX_HASH3_MAC4_TO_MASK,
+ FIELD_GET(VSC73XX_HASH3_MAC4_FROM_MASK, addr[4]));
+ /* MAC 10-0 */
+ hash ^= FIELD_PREP(VSC73XX_HASH4_MAC4_TO_MASK,
+ FIELD_GET(VSC73XX_HASH4_MAC4_FROM_MASK, addr[4])) |
+ addr[5];
+
+ return hash;
+}
+
+static int
+vsc73xx_port_wait_for_mac_table_cmd(struct vsc73xx *vsc)
+{
+ int ret, err;
+ u32 val;
+
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 ||
+ ((val & VSC73XX_MACACCESS_CMD_MASK) ==
+ VSC73XX_MACACCESS_CMD_IDLE),
+ VSC73XX_POLL_SLEEP_US, VSC73XX_POLL_TIMEOUT_US,
+ false, vsc, VSC73XX_BLOCK_ANALYZER,
+ 0, VSC73XX_MACACCESS, &val);
+ if (ret)
+ return ret;
+ return err;
+}
+
+static int vsc73xx_port_read_mac_table_row(struct vsc73xx *vsc, u16 index,
+ struct vsc73xx_fdb *fdb)
+{
+ int ret, i;
+ u32 val;
+
+ if (!fdb)
+ return -EINVAL;
+ if (index >= VSC73XX_NUM_FDB_ROWS)
+ return -EINVAL;
+
+ for (i = 0; i < VSC73XX_NUM_BUCKETS; i++) {
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACTINDX,
+ (i ? 0 : VSC73XX_MACTINDX_SHADOW) |
+ FIELD_PREP(VSC73XX_MACTINDX_BUCKET_MSK, i) |
+ index);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_port_wait_for_mac_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS,
+ VSC73XX_MACACCESS_CMD_MASK,
+ VSC73XX_MACACCESS_CMD_READ_ENTRY);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_port_wait_for_mac_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS, &val);
+ if (ret)
+ return ret;
+
+ fdb[i].valid = FIELD_GET(VSC73XX_MACACCESS_VALID, val);
+ if (!fdb[i].valid)
+ continue;
+
+ fdb[i].port = FIELD_GET(VSC73XX_MACACCESS_DEST_IDX_MASK, val);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACHDATA, &val);
+ if (ret)
+ return ret;
+
+ fdb[i].vid = FIELD_GET(VSC73XX_MACHDATA_VID, val);
+ fdb[i].mac[0] = FIELD_GET(VSC73XX_MACHDATA_MAC0, val);
+ fdb[i].mac[1] = FIELD_GET(VSC73XX_MACHDATA_MAC1, val);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACLDATA, &val);
+ if (ret)
+ return ret;
+
+ fdb[i].mac[2] = FIELD_GET(VSC73XX_MACLDATA_MAC2, val);
+ fdb[i].mac[3] = FIELD_GET(VSC73XX_MACLDATA_MAC3, val);
+ fdb[i].mac[4] = FIELD_GET(VSC73XX_MACLDATA_MAC4, val);
+ fdb[i].mac[5] = FIELD_GET(VSC73XX_MACLDATA_MAC5, val);
+ }
+
+ return ret;
+}
+
+static int
+vsc73xx_fdb_operation(struct vsc73xx *vsc, const unsigned char *addr, u16 vid,
+ u16 hash, u16 cmd_mask, u16 cmd_val)
+{
+ int ret;
+ u32 val;
+
+ val = FIELD_PREP(VSC73XX_MACHDATA_VID, vid) |
+ FIELD_PREP(VSC73XX_MACHDATA_MAC0, addr[0]) |
+ FIELD_PREP(VSC73XX_MACHDATA_MAC1, addr[1]);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACHDATA,
+ val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(VSC73XX_MACLDATA_MAC2, addr[2]) |
+ FIELD_PREP(VSC73XX_MACLDATA_MAC3, addr[3]) |
+ FIELD_PREP(VSC73XX_MACLDATA_MAC4, addr[4]) |
+ FIELD_PREP(VSC73XX_MACLDATA_MAC5, addr[5]);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACLDATA,
+ val);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACTINDX,
+ hash);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_port_wait_for_mac_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS, cmd_mask, cmd_val);
+ if (ret)
+ return ret;
+
+ return vsc73xx_port_wait_for_mac_table_cmd(vsc);
+}
+
+static int vsc73xx_fdb_del_entry(struct vsc73xx *vsc, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS];
+ u16 hash = vsc73xx_calc_hash(addr, vid);
+ int bucket, ret;
+
+ mutex_lock(&vsc->fdb_lock);
+
+ ret = vsc73xx_port_read_mac_table_row(vsc, hash, fdb);
+ if (ret)
+ goto err;
+
+ for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) {
+ if (fdb[bucket].valid && fdb[bucket].port == port &&
+ ether_addr_equal(addr, fdb[bucket].mac))
+ break;
+ }
+
+ if (bucket == VSC73XX_NUM_BUCKETS) {
+ /* Can't find MAC in MAC table */
+ ret = -ENODATA;
+ goto err;
+ }
+
+ ret = vsc73xx_fdb_operation(vsc, addr, vid, hash,
+ VSC73XX_MACACCESS_CMD_MASK,
+ VSC73XX_MACACCESS_CMD_FORGET);
+err:
+ mutex_unlock(&vsc->fdb_lock);
+ return ret;
+}
+
+static int vsc73xx_fdb_add_entry(struct vsc73xx *vsc, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS];
+ u16 hash = vsc73xx_calc_hash(addr, vid);
+ int bucket, ret;
+ u32 val;
+
+ mutex_lock(&vsc->fdb_lock);
+
+ ret = vsc73xx_port_read_mac_table_row(vsc, hash, fdb);
+ if (ret)
+ goto err;
+
+ for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) {
+ if (!fdb[bucket].valid)
+ break;
+ }
+
+ if (bucket == VSC73XX_NUM_BUCKETS) {
+ /* Bucket is full */
+ ret = -EOVERFLOW;
+ goto err;
+ }
+
+ val = VSC73XX_MACACCESS_VALID | VSC73XX_MACACCESS_LOCKED |
+ FIELD_PREP(VSC73XX_MACACCESS_DEST_IDX_MASK, port) |
+ VSC73XX_MACACCESS_CMD_LEARN;
+ ret = vsc73xx_fdb_operation(vsc, addr, vid, hash,
+ VSC73XX_MACACCESS_VALID |
+ VSC73XX_MACACCESS_LOCKED |
+ VSC73XX_MACACCESS_DEST_IDX_MASK |
+ VSC73XX_MACACCESS_CMD_MASK, val);
+err:
+ mutex_unlock(&vsc->fdb_lock);
+ return ret;
+}
+
+static int vsc73xx_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return vsc73xx_fdb_add_entry(vsc, port, addr, vid);
+}
+
+static int vsc73xx_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return vsc73xx_fdb_del_entry(vsc, port, addr, vid);
+}
+
+static int vsc73xx_port_fdb_dump(struct dsa_switch *ds,
+ int port, dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS];
+ struct vsc73xx *vsc = ds->priv;
+ u16 i, bucket;
+ int err = 0;
+
+ mutex_lock(&vsc->fdb_lock);
+
+ for (i = 0; i < VSC73XX_NUM_FDB_ROWS; i++) {
+ err = vsc73xx_port_read_mac_table_row(vsc, i, fdb);
+ if (err)
+ goto unlock;
+
+ for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) {
+ if (!fdb[bucket].valid || fdb[bucket].port != port)
+ continue;
+
+ /* We need to hide dsa_8021q VLANs from the user */
+ if (vid_is_dsa_8021q(fdb[bucket].vid))
+ fdb[bucket].vid = 0;
+
+ err = cb(fdb[bucket].mac, fdb[bucket].vid, false, data);
+ if (err)
+ goto unlock;
+ }
+ }
+unlock:
+ mutex_unlock(&vsc->fdb_lock);
+ return err;
+}
+
static const struct phylink_mac_ops vsc73xx_phylink_mac_ops = {
.mac_config = vsc73xx_mac_config,
.mac_link_down = vsc73xx_mac_link_down,
@@ -1785,6 +2232,9 @@ static const struct dsa_switch_ops vsc73xx_ds_ops = {
.port_bridge_join = dsa_tag_8021q_bridge_join,
.port_bridge_leave = dsa_tag_8021q_bridge_leave,
.port_change_mtu = vsc73xx_change_mtu,
+ .port_fdb_add = vsc73xx_fdb_add,
+ .port_fdb_del = vsc73xx_fdb_del,
+ .port_fdb_dump = vsc73xx_port_fdb_dump,
.port_max_mtu = vsc73xx_get_max_mtu,
.port_stp_state_set = vsc73xx_port_stp_state_set,
.port_vlan_filtering = vsc73xx_port_vlan_filtering,
@@ -1915,6 +2365,8 @@ int vsc73xx_probe(struct vsc73xx *vsc)
return -ENODEV;
}
+ mutex_init(&vsc->fdb_lock);
+
eth_random_addr(vsc->addr);
dev_info(vsc->dev,
"MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n",
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
index 3ca579acc798..3c30e143c14f 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.h
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -45,6 +45,7 @@ struct vsc73xx_portinfo {
* @vlans: List of configured vlans. Contains port mask and untagged status of
* every vlan configured in port vlan operation. It doesn't cover tag_8021q
* vlans.
+ * @fdb_lock: Mutex protects fdb access
*/
struct vsc73xx {
struct device *dev;
@@ -57,6 +58,7 @@ struct vsc73xx {
void *priv;
struct vsc73xx_portinfo portinfo[VSC73XX_MAX_NUM_PORTS];
struct list_head vlans;
+ struct mutex fdb_lock;
};
/**
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index d29b5d7af0d7..e9c5e1e11fa0 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -109,9 +109,10 @@ static void dummy_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+ dev->lltx = true;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
dev->features |= NETIF_F_GSO_ENCAP_ALL;
dev->hw_features |= dev->features;
dev->hw_enc_features |= dev->features;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 0baac25db4f8..9a542e3c9b05 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -158,6 +158,17 @@ config ETHOC
help
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
+config OA_TC6
+ tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support"
+ depends on SPI
+ select PHYLIB
+ help
+ This library implements OPEN Alliance TC6 10BASE-T1x MAC-PHY
+ Serial Interface protocol for supporting 10BASE-T1x MAC-PHYs.
+
+ To know the implementation details, refer documentation in
+ <file:Documentation/networking/oa-tc6-framework.rst>.
+
source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/pensando/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c03203439c0e..99fa180dedb8 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -105,3 +105,4 @@ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/
+obj-$(CONFIG_OA_TC6) += oa_tc6.o
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 0713f1e2c7f3..3431a7e62b0d 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -1599,7 +1599,7 @@ static int adin1110_probe_netdevs(struct adin1110_priv *priv)
netdev->netdev_ops = &adin1110_netdev_ops;
netdev->ethtool_ops = &adin1110_ethtool_ops;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->features |= NETIF_F_NETNS_LOCAL;
+ netdev->netns_local = true;
port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
if (IS_ERR(port_priv->phydev)) {
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 78231c85234d..f62851708d4f 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1678,17 +1678,15 @@ static int slic_init(struct slic_device *sdev)
slic_card_reset(sdev);
err = slic_load_firmware(sdev);
- if (err) {
- dev_err(&sdev->pdev->dev, "failed to load firmware\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&sdev->pdev->dev, err,
+ "failed to load firmware\n");
/* we need the shared memory to read EEPROM so set it up temporarily */
err = slic_init_shmem(sdev);
- if (err) {
- dev_err(&sdev->pdev->dev, "failed to init shared memory\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&sdev->pdev->dev, err,
+ "failed to init shared memory\n");
err = slic_read_eeprom(sdev);
if (err) {
@@ -1741,10 +1739,9 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "failed to enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to enable PCI device\n");
pci_set_master(pdev);
pci_try_set_mwi(pdev);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 3d8ac63132fb..9e6f91df2ba0 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -1560,9 +1560,9 @@ static void ace_watchdog(struct net_device *data, unsigned int txqueue)
}
-static void ace_tasklet(struct tasklet_struct *t)
+static void ace_bh_work(struct work_struct *work)
{
- struct ace_private *ap = from_tasklet(ap, t, ace_tasklet);
+ struct ace_private *ap = from_work(ap, work, ace_bh_work);
struct net_device *dev = ap->ndev;
int cur_size;
@@ -1595,7 +1595,7 @@ static void ace_tasklet(struct tasklet_struct *t)
#endif
ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
}
- ap->tasklet_pending = 0;
+ ap->bh_work_pending = 0;
}
@@ -1617,7 +1617,7 @@ static void ace_dump_trace(struct ace_private *ap)
*
* Loading rings is safe without holding the spin lock since this is
* done only before the device is enabled, thus no interrupts are
- * generated and by the interrupt handler/tasklet handler.
+ * generated and by the interrupt handler/bh handler.
*/
static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
{
@@ -2160,7 +2160,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
*/
if (netif_running(dev)) {
int cur_size;
- int run_tasklet = 0;
+ int run_bh_work = 0;
cur_size = atomic_read(&ap->cur_rx_bufs);
if (cur_size < RX_LOW_STD_THRES) {
@@ -2172,7 +2172,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_std_rx_ring(dev,
RX_RING_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
if (!ACE_IS_TIGON_I(ap)) {
@@ -2188,7 +2188,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_mini_rx_ring(dev,
RX_MINI_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
}
@@ -2205,12 +2205,12 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_jumbo_rx_ring(dev,
RX_JUMBO_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
}
- if (run_tasklet && !ap->tasklet_pending) {
- ap->tasklet_pending = 1;
- tasklet_schedule(&ap->ace_tasklet);
+ if (run_bh_work && !ap->bh_work_pending) {
+ ap->bh_work_pending = 1;
+ queue_work(system_bh_wq, &ap->ace_bh_work);
}
}
@@ -2267,7 +2267,7 @@ static int ace_open(struct net_device *dev)
/*
* Setup the bottom half rx ring refill handler
*/
- tasklet_setup(&ap->ace_tasklet, ace_tasklet);
+ INIT_WORK(&ap->ace_bh_work, ace_bh_work);
return 0;
}
@@ -2301,7 +2301,7 @@ static int ace_close(struct net_device *dev)
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
- tasklet_kill(&ap->ace_tasklet);
+ cancel_work_sync(&ap->ace_bh_work);
/*
* Make sure one CPU is not processing packets while
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index ca5ce0cbbad1..0e45a97b9c9b 100644
--- a/drivers/net/ethernet/alteon/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -2,7 +2,7 @@
#ifndef _ACENIC_H_
#define _ACENIC_H_
#include <linux/interrupt.h>
-
+#include <linux/workqueue.h>
/*
* Generate TX index update each time, when TX ring is closed.
@@ -667,8 +667,8 @@ struct ace_private
struct rx_desc *rx_mini_ring;
struct rx_desc *rx_return_ring;
- int tasklet_pending, jumbo;
- struct tasklet_struct ace_tasklet;
+ int bh_work_pending, jumbo;
+ struct work_struct ace_bh_work;
struct event *evt_ring;
@@ -776,7 +776,7 @@ static int ace_open(struct net_device *dev);
static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int ace_close(struct net_device *dev);
-static void ace_tasklet(struct tasklet_struct *t);
+static void ace_bh_work(struct work_struct *work);
static void ace_dump_trace(struct ace_private *ap);
static void ace_set_multicast_list(struct net_device *dev);
static int ace_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 6de0d590be34..9d9fa6559354 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -7,6 +7,21 @@
#define ENA_ADMIN_RSS_KEY_PARTS 10
+#define ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK 0x3F
+#define ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK 0x1F
+
+ /* customer metrics - in correlation with
+ * ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK
+ */
+enum ena_admin_customer_metrics_id {
+ ENA_ADMIN_BW_IN_ALLOWANCE_EXCEEDED = 0,
+ ENA_ADMIN_BW_OUT_ALLOWANCE_EXCEEDED = 1,
+ ENA_ADMIN_PPS_ALLOWANCE_EXCEEDED = 2,
+ ENA_ADMIN_CONNTRACK_ALLOWANCE_EXCEEDED = 3,
+ ENA_ADMIN_LINKLOCAL_ALLOWANCE_EXCEEDED = 4,
+ ENA_ADMIN_CONNTRACK_ALLOWANCE_AVAILABLE = 5,
+};
+
enum ena_admin_aq_opcode {
ENA_ADMIN_CREATE_SQ = 1,
ENA_ADMIN_DESTROY_SQ = 2,
@@ -51,6 +66,9 @@ enum ena_admin_aq_feature_id {
/* device capabilities */
enum ena_admin_aq_caps_id {
ENA_ADMIN_ENI_STATS = 0,
+ /* ENA SRD customer metrics */
+ ENA_ADMIN_ENA_SRD_INFO = 1,
+ ENA_ADMIN_CUSTOMER_METRICS = 2,
};
enum ena_admin_placement_policy_type {
@@ -99,6 +117,9 @@ enum ena_admin_get_stats_type {
ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
/* extra HW stats for specific network interface */
ENA_ADMIN_GET_STATS_TYPE_ENI = 2,
+ /* extra HW stats for ENA SRD */
+ ENA_ADMIN_GET_STATS_TYPE_ENA_SRD = 3,
+ ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS = 4,
};
enum ena_admin_get_stats_scope {
@@ -106,6 +127,16 @@ enum ena_admin_get_stats_scope {
ENA_ADMIN_ETH_TRAFFIC = 1,
};
+/* ENA SRD configuration for ENI */
+enum ena_admin_ena_srd_flags {
+ /* Feature enabled */
+ ENA_ADMIN_ENA_SRD_ENABLED = BIT(0),
+ /* UDP support enabled */
+ ENA_ADMIN_ENA_SRD_UDP_ENABLED = BIT(1),
+ /* Bypass Rx UDP ordering */
+ ENA_ADMIN_ENA_SRD_UDP_ORDERING_BYPASS_ENABLED = BIT(2),
+};
+
struct ena_admin_aq_common_desc {
/* 11:0 : command_id
* 15:12 : reserved12
@@ -363,6 +394,9 @@ struct ena_admin_aq_get_stats_cmd {
* stats of other device
*/
u16 device_id;
+
+ /* a bitmap representing the requested metric values */
+ u64 requested_metrics;
};
/* Basic Statistics Command. */
@@ -419,6 +453,40 @@ struct ena_admin_eni_stats {
u64 linklocal_allowance_exceeded;
};
+struct ena_admin_ena_srd_stats {
+ /* Number of packets transmitted over ENA SRD */
+ u64 ena_srd_tx_pkts;
+
+ /* Number of packets transmitted or could have been
+ * transmitted over ENA SRD
+ */
+ u64 ena_srd_eligible_tx_pkts;
+
+ /* Number of packets received over ENA SRD */
+ u64 ena_srd_rx_pkts;
+
+ /* Percentage of the ENA SRD resources that is in use */
+ u64 ena_srd_resource_utilization;
+};
+
+/* ENA SRD Statistics Command */
+struct ena_admin_ena_srd_info {
+ /* ENA SRD configuration bitmap. See ena_admin_ena_srd_flags for
+ * details
+ */
+ u64 flags;
+
+ struct ena_admin_ena_srd_stats ena_srd_stats;
+};
+
+/* Customer Metrics Command. */
+struct ena_admin_customer_metrics {
+ /* A bitmap representing the reported customer metrics according to
+ * the order they are reported
+ */
+ u64 reported_metrics;
+};
+
struct ena_admin_acq_get_stats_resp {
struct ena_admin_acq_common_desc acq_common_desc;
@@ -428,6 +496,10 @@ struct ena_admin_acq_get_stats_resp {
struct ena_admin_basic_stats basic_stats;
struct ena_admin_eni_stats eni_stats;
+
+ struct ena_admin_ena_srd_info ena_srd_info;
+
+ struct ena_admin_customer_metrics customer_metrics;
} u;
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 713a595370bf..d958cda9e58b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1881,6 +1881,56 @@ int ena_com_get_link_params(struct ena_com_dev *ena_dev,
return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
}
+static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+ struct ena_com_stats_ctx *ctx,
+ enum ena_admin_get_stats_type type)
+{
+ struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
+ struct ena_com_admin_queue *admin_queue;
+ int ret;
+
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
+ get_cmd->aq_common_descriptor.flags = 0;
+ get_cmd->type = type;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)get_cmd,
+ sizeof(*get_cmd),
+ (struct ena_admin_acq_entry *)get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
+
+ return ret;
+}
+
+static void ena_com_set_supported_customer_metrics(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics;
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ customer_metrics = &ena_dev->customer_metrics;
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK;
+ return;
+ }
+
+ memset(&ctx, 0x0, sizeof(ctx));
+ ctx.get_cmd.requested_metrics = ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK;
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
+ if (likely(ret == 0))
+ customer_metrics->supported_metrics =
+ ctx.get_resp.u.customer_metrics.reported_metrics;
+ else
+ netdev_err(ena_dev->net_device,
+ "Failed to query customer metrics support. error: %d\n", ret);
+}
+
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
@@ -1960,6 +2010,8 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
else
return rc;
+ ena_com_set_supported_customer_metrics(ena_dev);
+
return 0;
}
@@ -2104,50 +2156,44 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
- struct ena_com_stats_ctx *ctx,
- enum ena_admin_get_stats_type type)
+int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_eni_stats *stats)
{
- struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
- struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
- struct ena_com_admin_queue *admin_queue;
+ struct ena_com_stats_ctx ctx;
int ret;
- admin_queue = &ena_dev->admin_queue;
-
- get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
- get_cmd->aq_common_descriptor.flags = 0;
- get_cmd->type = type;
-
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)get_cmd,
- sizeof(*get_cmd),
- (struct ena_admin_acq_entry *)get_resp,
- sizeof(*get_resp));
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+ netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
+ ENA_ADMIN_ENI_STATS);
+ return -EOPNOTSUPP;
+ }
- if (unlikely(ret))
- netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ if (likely(ret == 0))
+ memcpy(stats, &ctx.get_resp.u.eni_stats,
+ sizeof(ctx.get_resp.u.eni_stats));
return ret;
}
-int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_eni_stats *stats)
+int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_ena_srd_info *info)
{
struct ena_com_stats_ctx ctx;
int ret;
- if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENA_SRD_INFO)) {
netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
- ENA_ADMIN_ENI_STATS);
+ ENA_ADMIN_ENA_SRD_INFO);
return -EOPNOTSUPP;
}
memset(&ctx, 0x0, sizeof(ctx));
- ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENA_SRD);
if (likely(ret == 0))
- memcpy(stats, &ctx.get_resp.u.eni_stats,
- sizeof(ctx.get_resp.u.eni_stats));
+ memcpy(info, &ctx.get_resp.u.ena_srd_info,
+ sizeof(ctx.get_resp.u.ena_srd_info));
return ret;
}
@@ -2167,6 +2213,50 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
return ret;
}
+int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len)
+{
+ struct ena_admin_aq_get_stats_cmd *get_cmd;
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ if (unlikely(len > ena_dev->customer_metrics.buffer_len)) {
+ netdev_err(ena_dev->net_device,
+ "Invalid buffer size %u. The given buffer is too big.\n", len);
+ return -EINVAL;
+ }
+
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ netdev_err(ena_dev->net_device, "Capability %d not supported.\n",
+ ENA_ADMIN_CUSTOMER_METRICS);
+ return -EOPNOTSUPP;
+ }
+
+ if (!ena_dev->customer_metrics.supported_metrics) {
+ netdev_err(ena_dev->net_device, "No supported customer metrics.\n");
+ return -EOPNOTSUPP;
+ }
+
+ get_cmd = &ctx.get_cmd;
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_com_mem_addr_set(ena_dev,
+ &get_cmd->u.control_buffer.address,
+ ena_dev->customer_metrics.buffer_dma_addr);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device, "Memory address set failed.\n");
+ return ret;
+ }
+
+ get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len;
+ get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics;
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
+ if (likely(ret == 0))
+ memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len);
+ else
+ netdev_err(ena_dev->net_device, "Failed to get customer metrics. error: %d\n", ret);
+
+ return ret;
+}
+
int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
{
struct ena_com_admin_queue *admin_queue;
@@ -2706,6 +2796,24 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
return 0;
}
+int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
+
+ customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
+ customer_metrics->buffer_virt_addr = NULL;
+
+ customer_metrics->buffer_virt_addr =
+ dma_alloc_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
+ &customer_metrics->buffer_dma_addr, GFP_KERNEL);
+ if (!customer_metrics->buffer_virt_addr) {
+ customer_metrics->buffer_len = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
@@ -2728,6 +2836,19 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
}
}
+void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
+
+ if (customer_metrics->buffer_virt_addr) {
+ dma_free_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
+ customer_metrics->buffer_virt_addr,
+ customer_metrics->buffer_dma_addr);
+ customer_metrics->buffer_virt_addr = NULL;
+ customer_metrics->buffer_len = 0;
+ }
+}
+
int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 924f03f5a6c7..a372c5e768a7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -42,6 +42,8 @@
#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
+#define ENA_CUSTOMER_METRICS_BUFFER_SIZE 512
+
/*****************************************************************************/
/*****************************************************************************/
/* ENA adaptive interrupt moderation settings */
@@ -278,6 +280,16 @@ struct ena_rss {
};
+struct ena_customer_metrics {
+ /* in correlation with ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK
+ * and ena_admin_customer_metrics_id
+ */
+ u64 supported_metrics;
+ dma_addr_t buffer_dma_addr;
+ void *buffer_virt_addr;
+ u32 buffer_len;
+};
+
struct ena_host_attribute {
/* Debug area */
u8 *debug_area_virt_addr;
@@ -327,6 +339,8 @@ struct ena_com_dev {
struct ena_intr_moder_entry *intr_moder_tbl;
struct ena_com_llq_info llq_info;
+
+ struct ena_customer_metrics customer_metrics;
};
struct ena_com_dev_get_features_ctx {
@@ -595,6 +609,24 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
struct ena_admin_eni_stats *stats);
+/* ena_com_get_ena_srd_info - Get ENA SRD network interface statistics
+ * @ena_dev: ENA communication layer struct
+ * @info: ena srd stats and flags
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_ena_srd_info *info);
+
+/* ena_com_get_customer_metrics - Get customer metrics for network interface
+ * @ena_dev: ENA communication layer struct
+ * @buffer: buffer for returned customer metrics
+ * @len: size of the buffer
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len);
+
/* ena_com_set_dev_mtu - Configure the device mtu.
* @ena_dev: ENA communication layer struct
* @mtu: mtu value
@@ -805,6 +837,13 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
u32 debug_area_size);
+/* ena_com_allocate_customer_metrics_buffer - Allocate customer metrics resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev);
+
/* ena_com_delete_debug_area - Free the debug area resources.
* @ena_dev: ENA communication layer struct
*
@@ -819,6 +858,13 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
*/
void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
+/* ena_com_delete_customer_metrics_buffer - Free the customer metrics resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocated customer metrics area.
+ */
+void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev);
+
/* ena_com_set_host_attributes - Update the device with the host
* attributes (debug area and host info) base address.
* @ena_dev: ENA communication layer struct
@@ -975,6 +1021,28 @@ static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev,
return !!(ena_dev->capabilities & BIT(cap_id));
}
+/* ena_com_get_customer_metric_support - query whether device supports a given customer metric.
+ * @ena_dev: ENA communication layer struct
+ * @metric_id: enum value representing the customer metric
+ *
+ * @return - true if customer metric is supported or false otherwise
+ */
+static inline bool ena_com_get_customer_metric_support(struct ena_com_dev *ena_dev,
+ enum ena_admin_customer_metrics_id metric_id)
+{
+ return !!(ena_dev->customer_metrics.supported_metrics & BIT(metric_id));
+}
+
+/* ena_com_get_customer_metric_count - return the number of supported customer metrics.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - the number of supported customer metrics
+ */
+static inline int ena_com_get_customer_metric_count(struct ena_com_dev *ena_dev)
+{
+ return hweight64(ena_dev->customer_metrics.supported_metrics);
+}
+
/* ena_com_update_intr_reg - Prepare interrupt register
* @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index b24cc3f05248..60fb35ec4b15 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -14,6 +14,10 @@ struct ena_stats {
int stat_offset;
};
+struct ena_hw_metrics {
+ char name[ETH_GSTRING_LEN];
+};
+
#define ENA_STAT_ENA_COM_ENTRY(stat) { \
.name = #stat, \
.stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \
@@ -41,6 +45,18 @@ struct ena_stats {
#define ENA_STAT_ENI_ENTRY(stat) \
ENA_STAT_HW_ENTRY(stat, eni_stats)
+#define ENA_STAT_ENA_SRD_ENTRY(stat) \
+ ENA_STAT_HW_ENTRY(stat, ena_srd_stats)
+
+#define ENA_STAT_ENA_SRD_MODE_ENTRY(stat) { \
+ .name = #stat, \
+ .stat_offset = offsetof(struct ena_admin_ena_srd_info, flags) / sizeof(u64) \
+}
+
+#define ENA_METRIC_ENI_ENTRY(stat) { \
+ .name = #stat \
+}
+
static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(tx_timeout),
ENA_STAT_GLOBAL_ENTRY(suspend),
@@ -52,6 +68,9 @@ static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(reset_fail),
};
+/* A partial list of hw stats. Used when admin command
+ * with type ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS is not supported
+ */
static const struct ena_stats ena_stats_eni_strings[] = {
ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
@@ -60,6 +79,23 @@ static const struct ena_stats ena_stats_eni_strings[] = {
ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
};
+static const struct ena_hw_metrics ena_hw_stats_strings[] = {
+ ENA_METRIC_ENI_ENTRY(bw_in_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(bw_out_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(pps_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(conntrack_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(linklocal_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(conntrack_allowance_available),
+};
+
+static const struct ena_stats ena_srd_info_strings[] = {
+ ENA_STAT_ENA_SRD_MODE_ENTRY(ena_srd_mode),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_tx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_eligible_tx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_rx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_resource_utilization)
+};
+
static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(cnt),
ENA_STAT_TX_ENTRY(bytes),
@@ -112,7 +148,9 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
-#define ENA_STATS_ARRAY_ENI(adapter) ARRAY_SIZE(ena_stats_eni_strings)
+#define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings)
+#define ENA_STATS_ARRAY_ENA_SRD ARRAY_SIZE(ena_srd_info_strings)
+#define ENA_METRICS_ARRAY_ENI ARRAY_SIZE(ena_hw_stats_strings)
static void ena_safe_update_stat(u64 *src, u64 *dst,
struct u64_stats_sync *syncp)
@@ -125,6 +163,57 @@ static void ena_safe_update_stat(u64 *src, u64 *dst,
} while (u64_stats_fetch_retry(syncp, start));
}
+static void ena_metrics_stats(struct ena_adapter *adapter, u64 **data)
+{
+ struct ena_com_dev *dev = adapter->ena_dev;
+ const struct ena_stats *ena_stats;
+ u64 *ptr;
+ int i;
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ u32 supported_metrics_count;
+ int len;
+
+ supported_metrics_count = ena_com_get_customer_metric_count(dev);
+ len = supported_metrics_count * sizeof(u64);
+
+ /* Fill the data buffer, and advance its pointer */
+ ena_com_get_customer_metrics(dev, (char *)(*data), len);
+ (*data) += supported_metrics_count;
+
+ } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) {
+ ena_com_get_eni_stats(dev, &adapter->eni_stats);
+ /* Updating regardless of rc - once we told ethtool how many stats we have
+ * it will print that much stats. We can't leave holes in the stats
+ */
+ for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+
+ ptr = (u64 *)&adapter->eni_stats +
+ ena_stats->stat_offset;
+
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ }
+ }
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) {
+ ena_com_get_ena_srd_info(dev, &adapter->ena_srd_info);
+ /* Get ENA SRD mode */
+ ptr = (u64 *)&adapter->ena_srd_info;
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ for (i = 1; i < ENA_STATS_ARRAY_ENA_SRD; i++) {
+ ena_stats = &ena_srd_info_strings[i];
+ /* Wrapped within an outer struct - need to accommodate an
+ * additional offset of the ENA SRD mode that was already processed
+ */
+ ptr = (u64 *)&adapter->ena_srd_info +
+ ena_stats->stat_offset + 1;
+
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ }
+ }
+}
+
static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
{
const struct ena_stats *ena_stats;
@@ -179,7 +268,7 @@ static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
static void ena_get_stats(struct ena_adapter *adapter,
u64 *data,
- bool eni_stats_needed)
+ bool hw_stats_needed)
{
const struct ena_stats *ena_stats;
u64 *ptr;
@@ -193,17 +282,8 @@ static void ena_get_stats(struct ena_adapter *adapter,
ena_safe_update_stat(ptr, data++, &adapter->syncp);
}
- if (eni_stats_needed) {
- ena_update_hw_stats(adapter);
- for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
- ena_stats = &ena_stats_eni_strings[i];
-
- ptr = (u64 *)&adapter->eni_stats +
- ena_stats->stat_offset;
-
- ena_safe_update_stat(ptr, data++, &adapter->syncp);
- }
- }
+ if (hw_stats_needed)
+ ena_metrics_stats(adapter, &data);
ena_queue_stats(adapter, &data);
ena_dev_admin_queue_stats(adapter, &data);
@@ -214,9 +294,8 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- struct ena_com_dev *dev = adapter->ena_dev;
- ena_get_stats(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
+ ena_get_stats(adapter, data, true);
}
static int ena_get_sw_stats_count(struct ena_adapter *adapter)
@@ -228,9 +307,17 @@ static int ena_get_sw_stats_count(struct ena_adapter *adapter)
static int ena_get_hw_stats_count(struct ena_adapter *adapter)
{
- bool supported = ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS);
+ struct ena_com_dev *dev = adapter->ena_dev;
+ int count;
+
+ count = ENA_STATS_ARRAY_ENA_SRD * ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO);
- return ENA_STATS_ARRAY_ENI(adapter) * supported;
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS))
+ count += ena_com_get_customer_metric_count(dev);
+ else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS))
+ count += ENA_STATS_ARRAY_ENI;
+
+ return count;
}
int ena_get_sset_count(struct net_device *netdev, int sset)
@@ -246,6 +333,35 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
return -EOPNOTSUPP;
}
+static void ena_metrics_stats_strings(struct ena_adapter *adapter, u8 **data)
+{
+ struct ena_com_dev *dev = adapter->ena_dev;
+ const struct ena_hw_metrics *ena_metrics;
+ const struct ena_stats *ena_stats;
+ int i;
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ for (i = 0; i < ENA_METRICS_ARRAY_ENI; i++) {
+ if (ena_com_get_customer_metric_support(dev, i)) {
+ ena_metrics = &ena_hw_stats_strings[i];
+ ethtool_puts(data, ena_metrics->name);
+ }
+ }
+ } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) {
+ for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+ ethtool_puts(data, ena_stats->name);
+ }
+ }
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) {
+ for (i = 0; i < ENA_STATS_ARRAY_ENA_SRD; i++) {
+ ena_stats = &ena_srd_info_strings[i];
+ ethtool_puts(data, ena_stats->name);
+ }
+ }
+}
+
static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
{
const struct ena_stats *ena_stats;
@@ -291,7 +407,7 @@ static void ena_com_dev_strings(u8 **data)
static void ena_get_strings(struct ena_adapter *adapter,
u8 *data,
- bool eni_stats_needed)
+ bool hw_stats_needed)
{
const struct ena_stats *ena_stats;
int i;
@@ -301,12 +417,8 @@ static void ena_get_strings(struct ena_adapter *adapter,
ethtool_puts(&data, ena_stats->name);
}
- if (eni_stats_needed) {
- for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
- ena_stats = &ena_stats_eni_strings[i];
- ethtool_puts(&data, ena_stats->name);
- }
- }
+ if (hw_stats_needed)
+ ena_metrics_stats_strings(adapter, &data);
ena_queue_strings(adapter, &data);
ena_com_dev_strings(&data);
@@ -317,11 +429,10 @@ static void ena_get_ethtool_strings(struct net_device *netdev,
u8 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- struct ena_com_dev *dev = adapter->ena_dev;
switch (sset) {
case ETH_SS_STATS:
- ena_get_strings(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
+ ena_get_strings(adapter, data, true);
break;
}
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 184b6e6cbed4..c5b50cfa935a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2798,19 +2798,6 @@ err:
ena_com_delete_debug_area(adapter->ena_dev);
}
-int ena_update_hw_stats(struct ena_adapter *adapter)
-{
- int rc;
-
- rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
- if (rc) {
- netdev_err(adapter->netdev, "Failed to get ENI stats\n");
- return rc;
- }
-
- return 0;
-}
-
static void ena_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3944,10 +3931,16 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
+ rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
+ if (rc) {
+ netdev_err(netdev, "ena_com_allocate_customer_metrics_buffer failed\n");
+ goto err_netdev_destroy;
+ }
+
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
if (rc) {
dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n");
- goto err_netdev_destroy;
+ goto err_metrics_destroy;
}
rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
@@ -3955,7 +3948,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "ENA device init failed\n");
if (rc == -ETIME)
rc = -EPROBE_DEFER;
- goto err_netdev_destroy;
+ goto err_metrics_destroy;
}
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
@@ -4076,6 +4069,8 @@ err_worker_destroy:
err_device_destroy:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
+err_metrics_destroy:
+ ena_com_delete_customer_metrics_buffer(ena_dev);
err_netdev_destroy:
free_netdev(netdev);
err_free_region:
@@ -4139,6 +4134,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_com_delete_host_info(ena_dev);
+ ena_com_delete_customer_metrics_buffer(ena_dev);
+
ena_release_bars(ena_dev, pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index d59509747d1a..6e12ae3b12e5 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -373,6 +373,7 @@ struct ena_adapter {
struct u64_stats_sync syncp;
struct ena_stats_dev dev_stats;
struct ena_admin_eni_stats eni_stats;
+ struct ena_admin_ena_srd_info ena_srd_info;
/* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid;
@@ -390,7 +391,6 @@ void ena_dump_stats_to_dmesg(struct ena_adapter *adapter);
void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
-int ena_update_hw_stats(struct ena_adapter *adapter);
int ena_update_queue_params(struct ena_adapter *adapter,
u32 new_tx_size,
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
index 6bdd02b7aa6d..ac37a4e738ae 100644
--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -112,7 +112,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
struct pdsc_cq *cq = &qcq->cq;
qcq_dentry = debugfs_create_dir(q->name, pdsc->dentry);
- if (IS_ERR_OR_NULL(qcq_dentry))
+ if (IS_ERR(qcq_dentry))
return;
qcq->dentry = qcq_dentry;
@@ -123,7 +123,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
debugfs_create_x32("accum_work", 0400, qcq_dentry, &qcq->accum_work);
q_dentry = debugfs_create_dir("q", qcq->dentry);
- if (IS_ERR_OR_NULL(q_dentry))
+ if (IS_ERR(q_dentry))
return;
debugfs_create_u32("index", 0400, q_dentry, &q->index);
@@ -135,7 +135,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
debugfs_create_u16("head", 0400, q_dentry, &q->head_idx);
cq_dentry = debugfs_create_dir("cq", qcq->dentry);
- if (IS_ERR_OR_NULL(cq_dentry))
+ if (IS_ERR(cq_dentry))
return;
debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
@@ -148,7 +148,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
intr_dentry = debugfs_create_dir("intr", qcq->dentry);
- if (IS_ERR_OR_NULL(intr_dentry))
+ if (IS_ERR(intr_dentry))
return;
debugfs_create_u32("index", 0400, intr_dentry, &intr->index);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index c4a4e316683f..5475867708f4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -403,9 +403,9 @@ static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
return false;
}
-static void xgbe_ecc_isr_task(struct tasklet_struct *t)
+static void xgbe_ecc_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, ecc_bh_work);
unsigned int ecc_isr;
bool stop = false;
@@ -465,17 +465,17 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_ecc);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->ecc_bh_work);
else
- xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+ xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
return IRQ_HANDLED;
}
-static void xgbe_isr_task(struct tasklet_struct *t)
+static void xgbe_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
unsigned int dma_isr, dma_ch_isr;
@@ -582,7 +582,7 @@ isr_done:
/* If there is not a separate ECC irq, handle it here */
if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
- xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+ xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
/* If there is not a separate I2C irq, handle it here */
if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
@@ -604,10 +604,10 @@ static irqreturn_t xgbe_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_dev);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->dev_bh_work);
else
- xgbe_isr_task(&pdata->tasklet_dev);
+ xgbe_isr_bh_work(&pdata->dev_bh_work);
return IRQ_HANDLED;
}
@@ -1007,8 +1007,8 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
unsigned int i;
int ret;
- tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task);
- tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task);
+ INIT_WORK(&pdata->dev_bh_work, xgbe_isr_bh_work);
+ INIT_WORK(&pdata->ecc_bh_work, xgbe_ecc_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev_name(netdev), pdata);
@@ -1078,8 +1078,8 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
- tasklet_kill(&pdata->tasklet_dev);
- tasklet_kill(&pdata->tasklet_ecc);
+ cancel_work_sync(&pdata->dev_bh_work);
+ cancel_work_sync(&pdata->ecc_bh_work);
if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 21407a26f806..5fc94c2f638e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -582,16 +582,12 @@ static int xgbe_get_ts_info(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (pdata->ptp_clock)
ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
- else
- ts_info->phc_index = -1;
ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index a9ccc4258ee5..7a833894f52a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -274,9 +274,9 @@ static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
}
-static void xgbe_i2c_isr_task(struct tasklet_struct *t)
+static void xgbe_i2c_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_i2c);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, i2c_bh_work);
struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
unsigned int isr;
@@ -321,10 +321,10 @@ static irqreturn_t xgbe_i2c_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_i2c);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->i2c_bh_work);
else
- xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+ xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work);
return IRQ_HANDLED;
}
@@ -369,7 +369,7 @@ static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+ xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work);
return IRQ_HANDLED;
}
@@ -449,7 +449,7 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
if (pdata->dev_irq != pdata->i2c_irq) {
devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
- tasklet_kill(&pdata->tasklet_i2c);
+ cancel_work_sync(&pdata->i2c_bh_work);
}
}
@@ -464,7 +464,7 @@ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
/* If we have a separate I2C irq, enable it */
if (pdata->dev_irq != pdata->i2c_irq) {
- tasklet_setup(&pdata->tasklet_i2c, xgbe_i2c_isr_task);
+ INIT_WORK(&pdata->i2c_bh_work, xgbe_i2c_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
xgbe_i2c_isr, 0, pdata->i2c_name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4a2dc705b528..07f4f3418d01 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -703,9 +703,9 @@ static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_an_isr_task(struct tasklet_struct *t)
+static void xgbe_an_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_an);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, an_bh_work);
netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
@@ -727,17 +727,17 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_an);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->an_bh_work);
else
- xgbe_an_isr_task(&pdata->tasklet_an);
+ xgbe_an_isr_bh_work(&pdata->an_bh_work);
return IRQ_HANDLED;
}
static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_an_isr_task(&pdata->tasklet_an);
+ xgbe_an_isr_bh_work(&pdata->an_bh_work);
return IRQ_HANDLED;
}
@@ -1454,7 +1454,7 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
if (pdata->dev_irq != pdata->an_irq) {
devm_free_irq(pdata->dev, pdata->an_irq, pdata);
- tasklet_kill(&pdata->tasklet_an);
+ cancel_work_sync(&pdata->an_bh_work);
}
pdata->phy_if.phy_impl.stop(pdata);
@@ -1477,7 +1477,7 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
/* If we have a separate AN irq, enable it */
if (pdata->dev_irq != pdata->an_irq) {
- tasklet_setup(&pdata->tasklet_an, xgbe_an_isr_task);
+ INIT_WORK(&pdata->an_bh_work, xgbe_an_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->an_irq,
xgbe_an_isr, 0, pdata->an_name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index c5e5fac49779..c636999a6a84 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -139,7 +139,7 @@ static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
return ret;
}
- pdata->isr_as_tasklet = 1;
+ pdata->isr_as_bh_work = 1;
pdata->irq_count = ret;
pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
@@ -176,7 +176,7 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
return ret;
}
- pdata->isr_as_tasklet = pdata->pcidev->msi_enabled ? 1 : 0;
+ pdata->isr_as_bh_work = pdata->pcidev->msi_enabled ? 1 : 0;
pdata->irq_count = 1;
pdata->channel_irq_count = 1;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f01a1e566da6..d85386cac8d1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -1298,11 +1298,11 @@ struct xgbe_prv_data {
unsigned int lpm_ctrl; /* CTRL1 for resume */
- unsigned int isr_as_tasklet;
- struct tasklet_struct tasklet_dev;
- struct tasklet_struct tasklet_ecc;
- struct tasklet_struct tasklet_i2c;
- struct tasklet_struct tasklet_an;
+ unsigned int isr_as_bh_work;
+ struct work_struct dev_bh_work;
+ struct work_struct ecc_bh_work;
+ struct work_struct i2c_bh_work;
+ struct work_struct an_bh_work;
struct dentry *xgbe_debugfs;
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 292b1f9cd9e7..785f4b4ff758 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1317,7 +1317,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
- ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
+ ret = request_irq(dev->irq, bmac_misc_intr, IRQF_NO_AUTOEN, "BMAC-misc", dev);
if (ret) {
printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
goto err_out_iounmap_rx;
@@ -1336,7 +1336,6 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
/* Mask chip interrupts and disable chip, will be
* re-enabled on open()
*/
- disable_irq(dev->irq);
pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
if (register_netdev(dev) != 0) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index d0aecd1d7357..440ff4616fec 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -266,7 +266,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names);
const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names);
char tc_string[8];
- int tc;
+ unsigned int tc;
memset(tc_string, 0, sizeof(tc_string));
memcpy(p, aq_ethtool_stat_names,
@@ -275,22 +275,20 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (tc = 0; tc < cfg->tcs; tc++) {
if (cfg->is_qos)
- snprintf(tc_string, 8, "TC%d ", tc);
+ snprintf(tc_string, 8, "TC%u ", tc);
for (i = 0; i < cfg->vecs; i++) {
for (si = 0; si < rx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_rx_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
- p += ETH_GSTRING_LEN;
}
for (si = 0; si < tx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_tx_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
- p += ETH_GSTRING_LEN;
}
}
}
@@ -305,20 +303,18 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (i = 0; i < max(rx_ring_cnt, tx_ring_cnt); i++) {
for (si = 0; si < rx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_rx_stat_names[si],
tc_string,
i ? PTP_HWST_RING_IDX : ptp_ring_idx);
- p += ETH_GSTRING_LEN;
}
if (i >= tx_ring_cnt)
continue;
for (si = 0; si < tx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_tx_stat_names[si],
tc_string,
i ? PTP_HWST_RING_IDX : ptp_ring_idx);
- p += ETH_GSTRING_LEN;
}
}
}
@@ -338,9 +334,8 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_txsc_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_txsc_stat_names[si], i);
- p += ETH_GSTRING_LEN;
}
aq_txsc = &nic->macsec_cfg->aq_txsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
@@ -349,10 +344,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_txsa_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_txsa_stat_names[si],
i, sa);
- p += ETH_GSTRING_LEN;
}
}
}
@@ -369,10 +363,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_rxsa_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_rxsa_stat_names[si],
i, sa);
- p += ETH_GSTRING_LEN;
}
}
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index f7433abd6591..f21de0c21e52 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -557,7 +557,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
}
frag_cnt++;
- next_ = buff_->next,
+ next_ = buff_->next;
buff_ = &self->buff_ring[next_];
is_rsc_completed =
aq_ring_dx_in_range(self->sw_head,
@@ -583,7 +583,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
err = -EIO;
goto err_exit;
}
- next_ = buff_->next,
+ next_ = buff_->next;
buff_ = &self->buff_ring[next_];
buff_->is_cleaned = true;
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 482c58c4c584..bec5cdf8d1da 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_ATHEROS
bool "Atheros devices"
default y
- depends on (PCI || ATH79)
+ depends on PCI || ATH79 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -19,7 +19,7 @@ if NET_VENDOR_ATHEROS
config AG71XX
tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support"
- depends on ATH79
+ depends on ATH79 || COMPILE_TEST
select PHYLINK
imply NET_SELFTESTS
help
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index a38be924cdaa..9586b6894f7e 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -149,11 +149,11 @@
#define FIFO_CFG4_MC BIT(8) /* Multicast Packet */
#define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */
#define FIFO_CFG4_DR BIT(10) /* Dribble */
-#define FIFO_CFG4_LE BIT(11) /* Long Event */
-#define FIFO_CFG4_CF BIT(12) /* Control Frame */
-#define FIFO_CFG4_PF BIT(13) /* Pause Frame */
-#define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */
-#define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */
+#define FIFO_CFG4_CF BIT(11) /* Control Frame */
+#define FIFO_CFG4_PF BIT(12) /* Pause Frame */
+#define FIFO_CFG4_UO BIT(13) /* Unsupported Opcode */
+#define FIFO_CFG4_VT BIT(14) /* VLAN tag detected */
+#define FIFO_CFG4_LE BIT(15) /* Long Event */
#define FIFO_CFG4_FT BIT(16) /* Frame Truncated */
#define FIFO_CFG4_UC BIT(17) /* Unicast Packet */
#define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
@@ -168,28 +168,28 @@
#define FIFO_CFG5_DV BIT(1) /* RX_DV Event */
#define FIFO_CFG5_FC BIT(2) /* False Carrier */
#define FIFO_CFG5_CE BIT(3) /* Code Error */
-#define FIFO_CFG5_LM BIT(4) /* Length Mismatch */
-#define FIFO_CFG5_LO BIT(5) /* Length Out of Range */
-#define FIFO_CFG5_OK BIT(6) /* Packet is OK */
-#define FIFO_CFG5_MC BIT(7) /* Multicast Packet */
-#define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */
-#define FIFO_CFG5_DR BIT(9) /* Dribble */
-#define FIFO_CFG5_CF BIT(10) /* Control Frame */
-#define FIFO_CFG5_PF BIT(11) /* Pause Frame */
-#define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */
-#define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */
-#define FIFO_CFG5_LE BIT(14) /* Long Event */
-#define FIFO_CFG5_FT BIT(15) /* Frame Truncated */
-#define FIFO_CFG5_16 BIT(16) /* unknown */
-#define FIFO_CFG5_17 BIT(17) /* unknown */
+#define FIFO_CFG5_CR BIT(4) /* CRC error */
+#define FIFO_CFG5_LM BIT(5) /* Length Mismatch */
+#define FIFO_CFG5_LO BIT(6) /* Length Out of Range */
+#define FIFO_CFG5_OK BIT(7) /* Packet is OK */
+#define FIFO_CFG5_MC BIT(8) /* Multicast Packet */
+#define FIFO_CFG5_BC BIT(9) /* Broadcast Packet */
+#define FIFO_CFG5_DR BIT(10) /* Dribble */
+#define FIFO_CFG5_CF BIT(11) /* Control Frame */
+#define FIFO_CFG5_PF BIT(12) /* Pause Frame */
+#define FIFO_CFG5_UO BIT(13) /* Unsupported Opcode */
+#define FIFO_CFG5_VT BIT(14) /* VLAN tag detected */
+#define FIFO_CFG5_LE BIT(15) /* Long Event */
+#define FIFO_CFG5_FT BIT(16) /* Frame Truncated */
+#define FIFO_CFG5_UC BIT(17) /* Unicast Packet */
#define FIFO_CFG5_SF BIT(18) /* Short Frame */
#define FIFO_CFG5_BM BIT(19) /* Byte Mode */
#define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
- FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
- FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
- FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
- FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
- FIFO_CFG5_17 | FIFO_CFG5_SF)
+ FIFO_CFG5_CE | FIFO_CFG5_LM | FIFO_CFG5_LO | \
+ FIFO_CFG5_OK | FIFO_CFG5_MC | FIFO_CFG5_BC | \
+ FIFO_CFG5_DR | FIFO_CFG5_CF | FIFO_CFG5_UO | \
+ FIFO_CFG5_VT | FIFO_CFG5_LE | FIFO_CFG5_FT | \
+ FIFO_CFG5_UC | FIFO_CFG5_SF)
#define AG71XX_REG_TX_CTRL 0x0180
#define TX_CTRL_TXE BIT(0) /* Tx Enable */
@@ -379,10 +379,7 @@ struct ag71xx {
u32 fifodata[3];
int mac_idx;
- struct reset_control *mdio_reset;
- struct mii_bus *mii_bus;
struct clk *clk_mdio;
- struct clk *clk_eth;
};
static int ag71xx_desc_empty(struct ag71xx_desc *desc)
@@ -447,6 +444,13 @@ static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
}
+static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ return phylink_mii_ioctl(ag->phylink, ifr, cmd);
+}
+
static void ag71xx_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
@@ -504,8 +508,7 @@ static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
switch (sset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- ag71xx_statistics[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, ag71xx_statistics[i].name);
break;
case ETH_SS_TEST:
net_selftest_get_strings(data);
@@ -685,36 +688,27 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
{
struct device *dev = &ag->pdev->dev;
struct net_device *ndev = ag->ndev;
+ struct reset_control *mdio_reset;
static struct mii_bus *mii_bus;
struct device_node *np, *mnp;
int err;
np = dev->of_node;
- ag->mii_bus = NULL;
- ag->clk_mdio = devm_clk_get(dev, "mdio");
+ ag->clk_mdio = devm_clk_get_enabled(dev, "mdio");
if (IS_ERR(ag->clk_mdio)) {
netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
return PTR_ERR(ag->clk_mdio);
}
- err = clk_prepare_enable(ag->clk_mdio);
- if (err) {
- netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
- return err;
- }
-
mii_bus = devm_mdiobus_alloc(dev);
- if (!mii_bus) {
- err = -ENOMEM;
- goto mdio_err_put_clk;
- }
+ if (!mii_bus)
+ return -ENOMEM;
- ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
- if (IS_ERR(ag->mdio_reset)) {
+ mdio_reset = devm_reset_control_get_exclusive(dev, "mdio");
+ if (IS_ERR(mdio_reset)) {
netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
- err = PTR_ERR(ag->mdio_reset);
- goto mdio_err_put_clk;
+ return PTR_ERR(mdio_reset);
}
mii_bus->name = "ag71xx_mdio";
@@ -725,33 +719,18 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
mii_bus->parent = dev;
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
- if (!IS_ERR(ag->mdio_reset)) {
- reset_control_assert(ag->mdio_reset);
- msleep(100);
- reset_control_deassert(ag->mdio_reset);
- msleep(200);
- }
+ reset_control_assert(mdio_reset);
+ msleep(100);
+ reset_control_deassert(mdio_reset);
+ msleep(200);
mnp = of_get_child_by_name(np, "mdio");
- err = of_mdiobus_register(mii_bus, mnp);
+ err = devm_of_mdiobus_register(dev, mii_bus, mnp);
of_node_put(mnp);
if (err)
- goto mdio_err_put_clk;
-
- ag->mii_bus = mii_bus;
+ return err;
return 0;
-
-mdio_err_put_clk:
- clk_disable_unprepare(ag->clk_mdio);
- return err;
-}
-
-static void ag71xx_mdio_remove(struct ag71xx *ag)
-{
- if (ag->mii_bus)
- mdiobus_unregister(ag->mii_bus);
- clk_disable_unprepare(ag->clk_mdio);
}
static void ag71xx_hw_stop(struct ag71xx *ag)
@@ -1637,7 +1616,6 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
unsigned int i = ring->curr & ring_mask;
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
int pktlen;
- int err = 0;
if (ag71xx_desc_empty(desc))
break;
@@ -1660,6 +1638,7 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
if (!skb) {
+ ndev->stats.rx_errors++;
skb_free_frag(ring->buf[i].rx.rx_buf);
goto next;
}
@@ -1667,14 +1646,9 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
skb_reserve(skb, offset);
skb_put(skb, pktlen);
- if (err) {
- ndev->stats.rx_dropped++;
- kfree_skb(skb);
- } else {
- skb->dev = ndev;
- skb->ip_summed = CHECKSUM_NONE;
- list_add_tail(&skb->list, &rx_list);
- }
+ skb->dev = ndev;
+ skb->ip_summed = CHECKSUM_NONE;
+ list_add_tail(&skb->list, &rx_list);
next:
ring->buf[i].rx.rx_buf = NULL;
@@ -1799,7 +1773,7 @@ static const struct net_device_ops ag71xx_netdev_ops = {
.ndo_open = ag71xx_open,
.ndo_stop = ag71xx_stop,
.ndo_start_xmit = ag71xx_hard_start_xmit,
- .ndo_eth_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = ag71xx_do_ioctl,
.ndo_tx_timeout = ag71xx_tx_timeout,
.ndo_change_mtu = ag71xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
@@ -1816,6 +1790,7 @@ static int ag71xx_probe(struct platform_device *pdev)
const struct ag71xx_dcfg *dcfg;
struct net_device *ndev;
struct resource *res;
+ struct clk *clk_eth;
int tx_size, err, i;
struct ag71xx *ag;
@@ -1846,10 +1821,10 @@ static int ag71xx_probe(struct platform_device *pdev)
return -EINVAL;
}
- ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
- if (IS_ERR(ag->clk_eth)) {
+ clk_eth = devm_clk_get_enabled(&pdev->dev, "eth");
+ if (IS_ERR(clk_eth)) {
netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
- return PTR_ERR(ag->clk_eth);
+ return PTR_ERR(clk_eth);
}
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1870,6 +1845,12 @@ static int ag71xx_probe(struct platform_device *pdev)
if (!ag->mac_base)
return -ENOMEM;
+ /* ensure that HW is in manual polling mode before interrupts are
+ * activated. Otherwise ag71xx_interrupt might call napi_schedule
+ * before it is initialized by netif_napi_add.
+ */
+ ag71xx_int_disable(ag, AG71XX_INT_POLL);
+
ndev->irq = platform_get_irq(pdev, 0);
err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
0x0, dev_name(&pdev->dev), ndev);
@@ -1912,6 +1893,8 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc->next = (u32)ag->stop_desc_dma;
err = of_get_ethdev_address(np, ndev);
+ if (err == -EPROBE_DEFER)
+ return err;
if (err) {
netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
eth_hw_addr_random(ndev);
@@ -1926,33 +1909,27 @@ static int ag71xx_probe(struct platform_device *pdev)
netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll,
AG71XX_NAPI_WEIGHT);
- err = clk_prepare_enable(ag->clk_eth);
- if (err) {
- netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
- return err;
- }
-
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
ag71xx_hw_init(ag);
err = ag71xx_mdio_probe(ag);
if (err)
- goto err_put_clk;
+ return err;
platform_set_drvdata(pdev, ndev);
err = ag71xx_phylink_setup(ag);
if (err) {
netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
- goto err_mdio_remove;
+ return err;
}
- err = register_netdev(ndev);
+ err = devm_register_netdev(&pdev->dev, ndev);
if (err) {
netif_err(ag, probe, ndev, "unable to register net device\n");
platform_set_drvdata(pdev, NULL);
- goto err_mdio_remove;
+ return err;
}
netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
@@ -1960,27 +1937,6 @@ static int ag71xx_probe(struct platform_device *pdev)
phy_modes(ag->phy_if_mode));
return 0;
-
-err_mdio_remove:
- ag71xx_mdio_remove(ag);
-err_put_clk:
- clk_disable_unprepare(ag->clk_eth);
- return err;
-}
-
-static void ag71xx_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct ag71xx *ag;
-
- if (!ndev)
- return;
-
- ag = netdev_priv(ndev);
- unregister_netdev(ndev);
- ag71xx_mdio_remove(ag);
- clk_disable_unprepare(ag->clk_eth);
- platform_set_drvdata(pdev, NULL);
}
static const u32 ar71xx_fifo_ar7100[] = {
@@ -2064,10 +2020,10 @@ static const struct of_device_id ag71xx_match[] = {
{ .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
{}
};
+MODULE_DEVICE_TABLE(of, ag71xx_match);
static struct platform_driver ag71xx_driver = {
.probe = ag71xx_probe,
- .remove_new = ag71xx_remove,
.driver = {
.name = "ag71xx",
.of_match_table = ag71xx_match,
@@ -2075,4 +2031,5 @@ static struct platform_driver ag71xx_driver = {
};
module_platform_driver(ag71xx_driver);
+MODULE_DESCRIPTION("Atheros AR71xx built-in ethernet mac driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 20c6529ec135..297c2682a9cf 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -1300,9 +1300,9 @@ static void bcmasp_remove_intfs(struct bcmasp_priv *priv)
static int bcmasp_probe(struct platform_device *pdev)
{
- struct device_node *ports_node, *intf_node;
const struct bcmasp_plat_data *pdata;
struct device *dev = &pdev->dev;
+ struct device_node *ports_node;
struct bcmasp_priv *priv;
struct bcmasp_intf *intf;
int ret = 0, count = 0;
@@ -1374,12 +1374,11 @@ static int bcmasp_probe(struct platform_device *pdev)
}
i = 0;
- for_each_available_child_of_node(ports_node, intf_node) {
+ for_each_available_child_of_node_scoped(ports_node, intf_node) {
intf = bcmasp_interface_create(priv, intf_node, i);
if (!intf) {
dev_err(dev, "Cannot create eth interface %d\n", i);
bcmasp_remove_intfs(priv);
- of_node_put(intf_node);
ret = -ENOMEM;
goto of_put_exit;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index c7b56a5e5425..adf7b6b94941 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3640,16 +3640,12 @@ static int bnx2x_get_ts_info(struct net_device *dev,
if (bp->flags & PTP_SUPPORTED) {
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (bp->ptp_clock)
info->phc_index = ptp_clock_index(bp->ptp_clock);
- else
- info->phc_index = -1;
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 4e9215bce4ad..a018f251d198 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -868,6 +868,8 @@
#define DORQ_REG_VF_TYPE_VALUE_0 0x170258
#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340
+extern const u32 dmae_reg_go_c[];
+
/* [RW 4] Initial activity counter value on the load request; when the
shortcut is done. */
#define DORQ_REG_SHRT_ACT_CNT 0x170070
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 77d4cb4ad782..12198fc3ab22 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2652,10 +2652,10 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
/* vlan */
if (bulletin->valid_bitmap & (1 << VLAN_VALID))
/* vlan configured by ndo so its in bulletin board */
- memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+ ivi->vlan = bulletin->vlan;
else
/* function has not been loaded yet. Show vlans as 0s */
- memset(&ivi->vlan, 0, VLAN_HLEN);
+ ivi->vlan = 0;
mutex_unlock(&bp->vfdb->bulletin_mutex);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 2bb133ae61c3..ba6729f2f9c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -23,8 +23,6 @@
#include "bnx2x_cmn.h"
#include "bnx2x_sriov.h"
-extern const u32 dmae_reg_go_c[];
-
/* Statistics */
/*
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 04a623b3eee2..6e422e24750a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -69,6 +69,7 @@
#include "bnxt_tc.h"
#include "bnxt_devlink.h"
#include "bnxt_debugfs.h"
+#include "bnxt_coredump.h"
#include "bnxt_hwmon.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@ -301,10 +302,6 @@ static bool bnxt_vf_pciid(enum board_idx idx)
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
-#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
-
-#define BNXT_CP_DB_IRQ_DIS(db) \
- writel(DB_CP_IRQ_DIS_FLAGS, db)
#define BNXT_DB_CQ(db, idx) \
writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
@@ -2853,34 +2850,6 @@ static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
return TX_CMP_VALID(txcmp, raw_cons);
}
-static irqreturn_t bnxt_inta(int irq, void *dev_instance)
-{
- struct bnxt_napi *bnapi = dev_instance;
- struct bnxt *bp = bnapi->bp;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- u32 cons = RING_CMP(cpr->cp_raw_cons);
- u32 int_status;
-
- prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
-
- if (!bnxt_has_work(bp, cpr)) {
- int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
- /* return if erroneous interrupt */
- if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
- return IRQ_NONE;
- }
-
- /* disable ring IRQ */
- BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
-
- /* Return here if interrupt is shared and is disabled. */
- if (unlikely(atomic_read(&bp->intr_sem) != 0))
- return IRQ_HANDLED;
-
- napi_schedule(&bnapi->napi);
- return IRQ_HANDLED;
-}
-
static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
int budget)
{
@@ -6579,7 +6548,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req->lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
+ vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ req->mru = cpu_to_le16(vnic->mru);
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
@@ -6715,6 +6685,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
+ bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
}
hwrm_req_drop(bp, req);
return rc;
@@ -6872,15 +6844,14 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
req->cq_handle = cpu_to_le64(ring->handle);
req->enables |= cpu_to_le32(
RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
- } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
+ } else {
req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
}
break;
case HWRM_RING_ALLOC_NQ:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
req->length = cpu_to_le32(bp->cp_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
break;
default:
netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
@@ -8943,6 +8914,80 @@ skip_rdma:
return 0;
}
+static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
+{
+ struct hwrm_dbg_crashdump_medium_cfg_input *req;
+ u16 page_attr;
+ int rc;
+
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
+ return 0;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
+ if (rc)
+ return rc;
+
+ if (BNXT_PAGE_SIZE == 0x2000)
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
+ else if (BNXT_PAGE_SIZE == 0x10000)
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
+ else
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
+ req->pg_size_lvl = cpu_to_le16(page_attr |
+ bp->fw_crash_mem->ring_mem.depth);
+ req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
+ req->size = cpu_to_le32(bp->fw_crash_len);
+ req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
+ return hwrm_req_send(bp, req);
+}
+
+static void bnxt_free_crash_dump_mem(struct bnxt *bp)
+{
+ if (bp->fw_crash_mem) {
+ bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
+ kfree(bp->fw_crash_mem);
+ bp->fw_crash_mem = NULL;
+ }
+}
+
+static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
+{
+ u32 mem_size = 0;
+ int rc;
+
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
+ return 0;
+
+ rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
+ if (rc)
+ return rc;
+
+ mem_size = round_up(mem_size, 4);
+
+ /* keep and use the existing pages */
+ if (bp->fw_crash_mem &&
+ mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
+ goto alloc_done;
+
+ if (bp->fw_crash_mem)
+ bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
+ else
+ bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
+ GFP_KERNEL);
+ if (!bp->fw_crash_mem)
+ return -ENOMEM;
+
+ rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
+ if (rc) {
+ bnxt_free_crash_dump_mem(bp);
+ return rc;
+ }
+
+alloc_done:
+ bp->fw_crash_len = mem_size;
+ return 0;
+}
+
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
{
struct hwrm_func_resource_qcaps_output *resp;
@@ -9118,6 +9163,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
@@ -10089,6 +10136,26 @@ vnic_setup_err:
return rc;
}
+int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u8 valid)
+{
+ struct hwrm_vnic_update_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
+ req->mru = cpu_to_le16(vnic->mru);
+
+ req->enables = cpu_to_le32(valid);
+
+ return hwrm_req_send(bp, req);
+}
+
int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc;
@@ -10553,22 +10620,32 @@ static void bnxt_setup_msix(struct bnxt *bp)
}
}
-static void bnxt_setup_inta(struct bnxt *bp)
+static int bnxt_init_int_mode(struct bnxt *bp);
+
+static int bnxt_change_msix(struct bnxt *bp, int total)
{
- const int len = sizeof(bp->irq_tbl[0].name);
+ struct msi_map map;
+ int i;
- if (bp->num_tc) {
- netdev_reset_tc(bp->dev);
- bp->num_tc = 0;
+ /* add MSIX to the end if needed */
+ for (i = bp->total_irqs; i < total; i++) {
+ map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
+ if (map.index < 0)
+ return bp->total_irqs;
+ bp->irq_tbl[i].vector = map.virq;
+ bp->total_irqs++;
}
- snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
- 0);
- bp->irq_tbl[0].handler = bnxt_inta;
+ /* trim MSIX from the end if needed */
+ for (i = bp->total_irqs; i > total; i--) {
+ map.index = i - 1;
+ map.virq = bp->irq_tbl[i - 1].vector;
+ pci_msix_free_irq(bp->pdev, map);
+ bp->total_irqs--;
+ }
+ return bp->total_irqs;
}
-static int bnxt_init_int_mode(struct bnxt *bp);
-
static int bnxt_setup_int_mode(struct bnxt *bp)
{
int rc;
@@ -10579,10 +10656,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc ?: -ENODEV;
}
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- bnxt_setup_msix(bp);
- else
- bnxt_setup_inta(bp);
+ bnxt_setup_msix(bp);
rc = bnxt_set_real_num_queues(bp);
return rc;
@@ -10670,10 +10744,9 @@ static int bnxt_get_num_msix(struct bnxt *bp)
return bnxt_nq_rings_in_use(bp);
}
-static int bnxt_init_msix(struct bnxt *bp)
+static int bnxt_init_int_mode(struct bnxt *bp)
{
- int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp;
- struct msix_entry *msix_ent;
+ int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
total_vecs = bnxt_get_num_msix(bp);
max = bnxt_get_max_func_irqs(bp);
@@ -10683,29 +10756,24 @@ static int bnxt_init_msix(struct bnxt *bp)
if (!total_vecs)
return 0;
- msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
- if (!msix_ent)
- return -ENOMEM;
-
- for (i = 0; i < total_vecs; i++) {
- msix_ent[i].entry = i;
- msix_ent[i].vector = 0;
- }
-
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
min = 2;
- total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
+ total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
+ PCI_IRQ_MSIX);
ulp_msix = bnxt_get_ulp_msix_num(bp);
if (total_vecs < 0 || total_vecs < ulp_msix) {
rc = -ENODEV;
goto msix_setup_exit;
}
- bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
+ tbl_size = total_vecs;
+ if (pci_msix_can_alloc_dyn(bp->pdev))
+ tbl_size = max;
+ bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
if (bp->irq_tbl) {
for (i = 0; i < total_vecs; i++)
- bp->irq_tbl[i].vector = msix_ent[i].vector;
+ bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
bp->total_irqs = total_vecs;
/* Trim rings based upon num of vectors allocated */
@@ -10723,61 +10791,28 @@ static int bnxt_init_msix(struct bnxt *bp)
rc = -ENOMEM;
goto msix_setup_exit;
}
- bp->flags |= BNXT_FLAG_USING_MSIX;
- kfree(msix_ent);
return 0;
msix_setup_exit:
- netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
+ netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
- pci_disable_msix(bp->pdev);
- kfree(msix_ent);
- return rc;
-}
-
-static int bnxt_init_inta(struct bnxt *bp)
-{
- bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
- if (!bp->irq_tbl)
- return -ENOMEM;
-
- bp->total_irqs = 1;
- bp->rx_nr_rings = 1;
- bp->tx_nr_rings = 1;
- bp->cp_nr_rings = 1;
- bp->flags |= BNXT_FLAG_SHARED_RINGS;
- bp->irq_tbl[0].vector = bp->pdev->irq;
- return 0;
-}
-
-static int bnxt_init_int_mode(struct bnxt *bp)
-{
- int rc = -ENODEV;
-
- if (bp->flags & BNXT_FLAG_MSIX_CAP)
- rc = bnxt_init_msix(bp);
-
- if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
- /* fallback to INTA */
- rc = bnxt_init_inta(bp);
- }
+ pci_free_irq_vectors(bp->pdev);
return rc;
}
static void bnxt_clear_int_mode(struct bnxt *bp)
{
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- pci_disable_msix(bp->pdev);
+ pci_free_irq_vectors(bp->pdev);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
- bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
bool irq_cleared = false;
+ bool irq_change = false;
int tcs = bp->num_tc;
int irqs_required;
int rc;
@@ -10796,15 +10831,21 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
}
if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
- bnxt_ulp_irq_stop(bp);
- bnxt_clear_int_mode(bp);
- irq_cleared = true;
+ irq_change = true;
+ if (!pci_msix_can_alloc_dyn(bp->pdev)) {
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ irq_cleared = true;
+ }
}
rc = __bnxt_reserve_rings(bp);
if (irq_cleared) {
if (!rc)
rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
+ } else if (irq_change && !rc) {
+ if (bnxt_change_msix(bp, irqs_required) != irqs_required)
+ rc = -ENOSPC;
}
if (rc) {
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
@@ -10870,9 +10911,6 @@ static int bnxt_request_irq(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
rmap = bp->dev->rx_cpu_rmap;
#endif
- if (!(bp->flags & BNXT_FLAG_USING_MSIX))
- flags = IRQF_SHARED;
-
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
int map_idx = bnxt_cp_num_to_irq_num(bp, i);
struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
@@ -10937,29 +10975,22 @@ static void bnxt_del_napi(struct bnxt *bp)
static void bnxt_init_napi(struct bnxt *bp)
{
- int i;
+ int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
unsigned int cp_nr_rings = bp->cp_nr_rings;
struct bnxt_napi *bnapi;
+ int i;
- if (bp->flags & BNXT_FLAG_USING_MSIX) {
- int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
-
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- poll_fn = bnxt_poll_p5;
- else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
- cp_nr_rings--;
- for (i = 0; i < cp_nr_rings; i++) {
- bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
- }
- if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
- bnapi = bp->bnapi[cp_nr_rings];
- netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll_nitroa0);
- }
- } else {
- bnapi = bp->bnapi[0];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ poll_fn = bnxt_poll_p5;
+ else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ cp_nr_rings--;
+ for (i = 0; i < cp_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
+ }
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ bnapi = bp->bnapi[cp_nr_rings];
+ netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
}
}
@@ -11947,20 +11978,6 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
return rc;
}
-/* Common routine to pre-map certain register block to different GRC window.
- * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
- * in PF and 3 windows in VF that can be customized to map in different
- * register blocks.
- */
-static void bnxt_preset_reg_win(struct bnxt *bp)
-{
- if (BNXT_PF(bp)) {
- /* CAG registers map to GRC window #4 */
- writel(BNXT_CAG_REG_BASE,
- bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
- }
-}
-
static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
static int bnxt_reinit_after_abort(struct bnxt *bp)
@@ -12065,7 +12082,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
- bnxt_preset_reg_win(bp);
netif_carrier_off(bp->dev);
if (irq_re_init) {
/* Reserve rings now if none were reserved at driver probe. */
@@ -12078,12 +12094,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = bnxt_reserve_rings(bp, irq_re_init);
if (rc)
return rc;
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_USING_MSIX)) {
- /* disable RFS if falling back to INTA */
- bp->dev->hw_features &= ~NETIF_F_NTUPLE;
- bp->flags &= ~BNXT_FLAG_RFS;
- }
rc = bnxt_alloc_mem(bp, irq_re_init);
if (rc) {
@@ -12810,7 +12820,7 @@ bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
!BNXT_SUPPORTS_NTUPLE_VNIC(bp))
return bnxt_rfs_supported(bp);
- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
+ if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
hwr.grp = bp->rx_nr_rings;
@@ -13793,6 +13803,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
struct bnxt_hw_rings hwr = {0};
int rx_rings = rx;
+ int rc;
if (tcs)
tx_sets = tcs;
@@ -13825,7 +13836,23 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
}
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
hwr.cp_p5 = hwr.tx + rx;
- return bnxt_hwrm_check_rings(bp, &hwr);
+ rc = bnxt_hwrm_check_rings(bp, &hwr);
+ if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
+ if (!bnxt_ulp_registered(bp->edev)) {
+ hwr.cp += bnxt_get_ulp_msix_num(bp);
+ hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
+ }
+ if (hwr.cp > bp->total_irqs) {
+ int total_msix = bnxt_change_msix(bp, hwr.cp);
+
+ if (total_msix < hwr.cp) {
+ netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
+ hwr.cp, total_msix);
+ rc = -ENOSPC;
+ }
+ }
+ }
+ return rc;
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -13963,6 +13990,19 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (rc)
return -ENODEV;
+ rc = bnxt_alloc_crash_dump_mem(bp);
+ if (rc)
+ netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
+ rc);
+ if (!rc) {
+ rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
+ if (rc) {
+ bnxt_free_crash_dump_mem(bp);
+ netdev_warn(bp->dev,
+ "hwrm crash dump mem failure rc: %d\n", rc);
+ }
+ }
+
if (bnxt_fw_pre_resv_vnics(bp))
bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
@@ -15154,7 +15194,8 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr, *clone;
struct bnxt_cp_ring_info *cpr;
- int rc;
+ struct bnxt_vnic_info *vnic;
+ int i, rc;
rxr = &bp->rx_ring[idx];
clone = qmem;
@@ -15179,11 +15220,16 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
- napi_enable(&rxr->bnapi->napi);
-
cpr = &rxr->bnapi->cp_ring;
cpr->sw_stats->rx.rx_resets++;
+ for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ vnic = &bp->vnic_info[i];
+ vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+ }
+
return 0;
err_free_hwrm_rx_ring:
@@ -15195,9 +15241,17 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr;
+ struct bnxt_vnic_info *vnic;
+ int i;
+
+ for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ vnic = &bp->vnic_info[i];
+ vnic->mru = 0;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+ }
rxr = &bp->rx_ring[idx];
- napi_disable(&rxr->bnapi->napi);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
rxr->rx_next_cons = 0;
@@ -15257,6 +15311,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
+ bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
@@ -15644,6 +15699,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_is_bridge(pdev))
return -ENODEV;
+ if (!pdev->msix_cap) {
+ dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
+ return -ENODEV;
+ }
+
/* Clear any pending DMA transactions from crash kernel
* while loading driver in capture kernel.
*/
@@ -15670,9 +15730,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_PF(bp))
SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
- if (pdev->msix_cap)
- bp->flags |= BNXT_FLAG_MSIX_CAP;
-
rc = bnxt_init_board(pdev, dev);
if (rc < 0)
goto init_err_free;
@@ -15681,7 +15738,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->stat_ops = &bnxt_stat_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
- dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp);
@@ -15862,6 +15918,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
+ if (BNXT_SUPPORTS_QUEUE_API(bp))
+ dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
rc = register_netdev(dev);
if (rc)
@@ -15895,6 +15953,7 @@ init_err_pci_clean:
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
+ bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@@ -15986,6 +16045,8 @@ static int bnxt_resume(struct device *device)
rc = -ENODEV;
goto resume_exit;
}
+ if (bp->fw_crash_mem)
+ bnxt_hwrm_crash_dump_mem_cfg(bp);
bnxt_get_wol_settings(bp);
if (netif_running(dev)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 059a6f81c1a8..69231e85140b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1217,12 +1217,15 @@ struct bnxt_napi {
bool in_reset;
};
+/* "TxRx", 2 hypens, plus maximum integer */
+#define BNXT_IRQ_NAME_EXTRA 17
+
struct bnxt_irq {
irq_handler_t handler;
unsigned int vector;
u8 requested:1;
u8 have_cpumask:1;
- char name[IFNAMSIZ + 2];
+ char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA];
cpumask_var_t cpu_mask;
};
@@ -1250,6 +1253,7 @@ struct bnxt_vnic_info {
#define BNXT_MAX_CTX_PER_VNIC 8
u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
+ u16 mru;
#define BNXT_MAX_UC_ADDRS 4
struct bnxt_l2_filter *l2_filters[BNXT_MAX_UC_ADDRS];
/* index 0 always dev_addr */
@@ -1355,7 +1359,6 @@ struct bnxt_vf_info {
u16 vlan;
u16 func_qcfg_flags;
u32 flags;
-#define BNXT_VF_QOS 0x1
#define BNXT_VF_SPOOFCHK 0x2
#define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8
@@ -1755,8 +1758,6 @@ struct bnxt_test_info {
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
-#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
-#define BNXT_CAG_REG_BASE 0x300000
#define BNXT_GRC_REG_STATUS_P5 0x520
@@ -2199,8 +2200,6 @@ struct bnxt {
#define BNXT_FLAG_STRIP_VLAN 0x20
#define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
BNXT_FLAG_LRO)
- #define BNXT_FLAG_USING_MSIX 0x40
- #define BNXT_FLAG_MSIX_CAP 0x80
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
@@ -2437,6 +2436,7 @@ struct bnxt {
#define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37)
#define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
+ #define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
u32 fw_dbg_cap;
@@ -2449,6 +2449,9 @@ struct bnxt {
#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \
(BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX))
+#define BNXT_SUPPORTS_QUEUE_API(bp) \
+ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
+ ((bp)->fw_cap & BNXT_FW_CAP_VNIC_RE_FLUSH))
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
@@ -2644,6 +2647,9 @@ struct bnxt {
#endif
u32 thermal_threshold_type;
enum board_idx board_idx;
+
+ struct bnxt_ctx_pg_info *fw_crash_mem;
+ u32 fw_crash_len;
};
#define BNXT_NUM_RX_RING_STATS 8
@@ -2837,6 +2843,8 @@ int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
+int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u8 valid);
int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index c06789882036..4e2b938ed1f7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -372,20 +372,81 @@ err:
return rc;
}
+static u32 bnxt_copy_crash_data(struct bnxt_ring_mem_info *rmem, void *buf,
+ u32 dump_len)
+{
+ u32 data_copied = 0;
+ u32 data_len;
+ int i;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ data_len = rmem->page_size;
+ if (data_copied + data_len > dump_len)
+ data_len = dump_len - data_copied;
+ memcpy(buf + data_copied, rmem->pg_arr[i], data_len);
+ data_copied += data_len;
+ if (data_copied >= dump_len)
+ break;
+ }
+ return data_copied;
+}
+
+static int bnxt_copy_crash_dump(struct bnxt *bp, void *buf, u32 dump_len)
+{
+ struct bnxt_ring_mem_info *rmem;
+ u32 offset = 0;
+
+ if (!bp->fw_crash_mem)
+ return -ENOENT;
+
+ rmem = &bp->fw_crash_mem->ring_mem;
+
+ if (rmem->depth > 1) {
+ int i;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ struct bnxt_ctx_pg_info *pg_tbl;
+
+ pg_tbl = bp->fw_crash_mem->ctx_pg_tbl[i];
+ offset += bnxt_copy_crash_data(&pg_tbl->ring_mem,
+ buf + offset,
+ dump_len - offset);
+ if (offset >= dump_len)
+ break;
+ }
+ } else {
+ bnxt_copy_crash_data(rmem, buf, dump_len);
+ }
+
+ return 0;
+}
+
+static bool bnxt_crash_dump_avail(struct bnxt *bp)
+{
+ u32 sig = 0;
+
+ /* First 4 bytes(signature) of crash dump is always non-zero */
+ bnxt_copy_crash_dump(bp, &sig, sizeof(sig));
+ return !!sig;
+}
+
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
{
if (dump_type == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)
+ return bnxt_copy_crash_dump(bp, buf, *dump_len);
#ifdef CONFIG_TEE_BNXT_FW
- return tee_bnxt_copy_coredump(buf, 0, *dump_len);
-#else
- return -EOPNOTSUPP;
+ else if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ return tee_bnxt_copy_coredump(buf, 0, *dump_len);
#endif
+ else
+ return -EOPNOTSUPP;
} else {
return __bnxt_get_coredump(bp, buf, dump_len);
}
}
-static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
+int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
{
struct hwrm_dbg_qcfg_output *resp;
struct hwrm_dbg_qcfg_input *req;
@@ -395,7 +456,8 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return -EOPNOTSUPP;
if (dump_type == BNXT_DUMP_CRASH &&
- !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR))
+ !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR ||
+ (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)))
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
@@ -403,8 +465,12 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return rc;
req->fid = cpu_to_le16(0xffff);
- if (dump_type == BNXT_DUMP_CRASH)
- req->flags = cpu_to_le16(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR);
+ if (dump_type == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_SOC);
+ else
+ req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_HOST);
+ }
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
@@ -412,7 +478,10 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
goto get_dump_len_exit;
if (dump_type == BNXT_DUMP_CRASH) {
- *dump_len = le32_to_cpu(resp->crashdump_size);
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ *dump_len = BNXT_CRASH_DUMP_LEN;
+ else
+ *dump_len = le32_to_cpu(resp->crashdump_size);
} else {
/* Driver adds coredump header and "HWRM_VER_GET response"
* segment additionally to coredump.
@@ -434,10 +503,17 @@ u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
{
u32 len = 0;
+ if (dump_type == BNXT_DUMP_CRASH &&
+ bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR &&
+ bp->fw_crash_mem) {
+ if (!bnxt_crash_dump_avail(bp))
+ return 0;
+
+ return bp->fw_crash_len;
+ }
+
if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) {
- if (dump_type == BNXT_DUMP_CRASH)
- len = BNXT_CRASH_DUMP_LEN;
- else
+ if (dump_type != BNXT_DUMP_CRASH)
__bnxt_get_coredump(bp, NULL, &len);
}
return len;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
index b1a1b2fffb19..a76d5c281413 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -111,7 +111,15 @@ struct hwrm_dbg_cmn_output {
#define HWRM_DBG_CMN_FLAGS_MORE 1
};
+#define BNXT_DBG_FL_CR_DUMP_SIZE_SOC \
+ DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+#define BNXT_DBG_FL_CR_DUMP_SIZE_HOST \
+ DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR
+#define BNXT_DBG_CR_DUMP_MDM_CFG_DDR \
+ DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR
+
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
+int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len);
u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 156c2404854f..127b7015f676 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -64,9 +64,9 @@ static const struct file_operations debugfs_dim_fops = {
static void debugfs_dim_ring_init(struct dim *dim, int ring_idx,
struct dentry *dd)
{
- static char qname[16];
+ static char qname[12];
- snprintf(qname, 10, "%d", ring_idx);
+ snprintf(qname, sizeof(qname), "%d", ring_idx);
debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 4cf9bf8b01b0..f71cc8188b4e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -955,11 +955,6 @@ static int bnxt_set_channels(struct net_device *dev,
}
tx_xdp = req_rx_rings;
}
- rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
- if (rc) {
- netdev_warn(dev, "Unable to allocate the requested rings\n");
- return rc;
- }
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
@@ -968,6 +963,12 @@ static int bnxt_set_channels(struct net_device *dev,
return -EINVAL;
}
+ rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
+ if (rc) {
+ netdev_warn(dev, "Unable to allocate the requested rings\n");
+ return rc;
+ }
+
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -4157,7 +4158,7 @@ static void bnxt_get_pkgver(struct net_device *dev)
if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
len = strlen(bp->fw_ver_str);
- snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
+ snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len,
"/pkg %s", buf);
}
}
@@ -4989,9 +4990,16 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
return -EINVAL;
}
- if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
- netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
- return -EOPNOTSUPP;
+ if (dump->flag == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR &&
+ (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) {
+ netdev_info(dev,
+ "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
+ return -EOPNOTSUPP;
+ } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) {
+ netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n");
+ return -EOPNOTSUPP;
+ }
}
bp->dump_flag = dump->flag;
@@ -5036,11 +5044,8 @@ static int bnxt_get_ts_info(struct net_device *dev,
struct bnxt_ptp_cfg *ptp;
ptp = bp->ptp_cfg;
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
- info->phc_index = -1;
if (!ptp)
return 0;
@@ -5285,7 +5290,7 @@ void bnxt_ethtool_free(struct bnxt *bp)
const struct ethtool_ops bnxt_ethtool_ops = {
.cap_link_lanes_supported = 1,
- .cap_rss_ctx_supported = 1,
+ .rxfh_per_ctx_key = 1,
.rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1,
.rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5,
.rxfh_priv_size = sizeof(struct bnxt_rss_ctx),
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index f219709f9563..f8ef6f1a1964 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -403,6 +403,9 @@ struct cmd_nums {
#define HWRM_FUNC_LAG_UPDATE 0x1b1UL
#define HWRM_FUNC_LAG_FREE 0x1b2UL
#define HWRM_FUNC_LAG_QCFG 0x1b3UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_ADD 0x1c2UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_DELETE 0x1c3UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_QUERY 0x1c4UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -430,6 +433,9 @@ struct cmd_nums {
#define HWRM_STAT_GENERIC_QSTATS 0x218UL
#define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
#define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
+ #define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_PORT_POE_CFG 0x230UL
+ #define HWRM_PORT_POE_QCFG 0x231UL
#define HWRM_UDCC_QCAPS 0x258UL
#define HWRM_UDCC_CFG 0x259UL
#define HWRM_UDCC_QCFG 0x25aUL
@@ -439,6 +445,9 @@ struct cmd_nums {
#define HWRM_UDCC_COMP_CFG 0x25eUL
#define HWRM_UDCC_COMP_QCFG 0x25fUL
#define HWRM_UDCC_COMP_QUERY 0x260UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -500,10 +509,8 @@ struct cmd_nums {
#define HWRM_TFC_IF_TBL_GET 0x399UL
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
#define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x39cUL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x39dUL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x39eUL
#define HWRM_SV 0x400UL
+ #define HWRM_DBG_SERDES_TEST 0xff0eUL
#define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
@@ -533,6 +540,9 @@ struct cmd_nums {
#define HWRM_DBG_USEQ_RUN 0xff29UL
#define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
#define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL
+ #define HWRM_DBG_PTRACE 0xff2dUL
+ #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL
#define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
#define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
#define HWRM_NVM_DEFRAG 0xffecUL
@@ -582,6 +592,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
#define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
#define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
+ #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -613,8 +624,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 44
-#define HWRM_VERSION_STR "1.10.3.44"
+#define HWRM_VERSION_RSVD 68
+#define HWRM_VERSION_STR "1.10.3.68"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -850,7 +861,10 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
#define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
#define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x51UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1691,7 +1705,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:1088b/136B) */
+/* hwrm_func_qcaps_output (size:1152b/144B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1824,6 +1838,9 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1845,7 +1862,9 @@ struct hwrm_func_qcaps_output {
__le32 roce_vf_max_qp;
__le32 roce_vf_max_srq;
__le32 roce_vf_max_gid;
- u8 unused_3[3];
+ __le32 flags_ext3;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ u8 unused_3[7];
u8 valid;
};
@@ -2021,7 +2040,8 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__le16 host_mtu;
- u8 unused_3[2];
+ __le16 flags2;
+ #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
u8 unused_4[2];
u8 port_kdnet_mode;
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
@@ -3671,33 +3691,38 @@ struct hwrm_func_backing_store_cfg_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
__le16 instance;
__le32 flags;
#define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
@@ -3772,6 +3797,11 @@ struct hwrm_func_backing_store_qcfg_v2_input {
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3785,29 +3815,34 @@ struct hwrm_func_backing_store_qcfg_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
__le16 instance;
__le32 flags;
__le64 page_dir;
@@ -3883,6 +3918,13 @@ struct ts_split_entries {
__le32 rsvd2[2];
};
+/* ck_split_entries (size:128b/16B) */
+struct ck_split_entries {
+ __le32 num_quic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
struct hwrm_func_backing_store_qcaps_v2_input {
__le16 req_type;
@@ -3891,33 +3933,38 @@ struct hwrm_func_backing_store_qcaps_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
u8 rsvd[6];
};
@@ -3928,39 +3975,45 @@ struct hwrm_func_backing_store_qcaps_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
__le16 entry_size;
__le32 flags;
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL
__le32 instance_bit_map;
u8 ctx_init_value;
u8 ctx_init_offset;
@@ -4410,6 +4463,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
__le32 preemphasis;
@@ -4941,7 +4995,9 @@ struct hwrm_port_qstats_output {
__le16 resp_len;
__le16 tx_stat_size;
__le16 rx_stat_size;
- u8 unused_0[3];
+ u8 flags;
+ #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL
+ u8 unused_0[2];
u8 valid;
};
@@ -5074,6 +5130,7 @@ struct hwrm_port_qstats_ext_output {
__le16 total_active_cos_queues;
u8 flags;
#define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL
u8 valid;
};
@@ -6510,6 +6567,43 @@ struct hwrm_vnic_alloc_output {
u8 valid;
};
+/* hwrm_vnic_update_input (size:256b/32B) */
+struct hwrm_vnic_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 enables;
+ #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL
+ #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL
+ #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL
+ u8 vnic_state;
+ #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP
+ u8 metadata_format_type;
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4
+ __le16 mru;
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_update_output (size:128b/16B) */
+struct hwrm_vnic_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_vnic_free_input (size:192b/24B) */
struct hwrm_vnic_free_input {
__le16 req_type;
@@ -6640,6 +6734,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
#define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
#define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -7484,23 +7579,24 @@ struct hwrm_cfa_l2_filter_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP
__le32 enables;
#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
#define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
@@ -8766,7 +8862,7 @@ struct ctx_hw_stats_ext {
__le64 rx_tpa_events;
};
-/* hwrm_stat_ctx_alloc_input (size:320b/40B) */
+/* hwrm_stat_ctx_alloc_input (size:384b/48B) */
struct hwrm_stat_ctx_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -8776,13 +8872,16 @@ struct hwrm_stat_ctx_alloc_input {
__le64 stats_dma_addr;
__le32 update_period_ms;
u8 stat_ctx_flags;
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL
u8 unused_0;
__le16 stats_dma_length;
__le16 flags;
#define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
__le16 steering_tag;
- __le32 unused_1;
+ __le32 stat_ctx_id;
+ __le16 alloc_seq_id;
+ u8 unused_1[6];
};
/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
@@ -9650,10 +9749,13 @@ struct hwrm_dbg_qcaps_output {
__le32 coredump_component_disable_caps;
#define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
__le32 flags;
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
- #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL
+ #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL
u8 unused_1[3];
u8 valid;
};
@@ -10092,16 +10194,19 @@ struct hwrm_nvm_erase_dir_entry_output {
u8 valid;
};
-/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
+/* hwrm_nvm_get_dev_info_input (size:192b/24B) */
struct hwrm_nvm_get_dev_info_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
};
-/* hwrm_nvm_get_dev_info_output (size:704b/88B) */
+/* hwrm_nvm_get_dev_info_output (size:768b/96B) */
struct hwrm_nvm_get_dev_info_output {
__le16 error_code;
__le16 req_type;
@@ -10135,6 +10240,10 @@ struct hwrm_nvm_get_dev_info_output {
__le16 netctrl_fw_minor;
__le16 netctrl_fw_build;
__le16 netctrl_fw_patch;
+ __le16 srt2_fw_major;
+ __le16 srt2_fw_minor;
+ __le16 srt2_fw_build;
+ __le16 srt2_fw_patch;
u8 unused_0[7];
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 22898d3d088b..7bb8a5d74430 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -15,6 +15,7 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
+#include <net/dcbnl.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
@@ -196,11 +197,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
- ivi->vlan = vf->vlan;
- if (vf->flags & BNXT_VF_QOS)
- ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
- else
- ivi->qos = 0;
+ ivi->vlan = vf->vlan & VLAN_VID_MASK;
+ ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
ivi->trusted = bnxt_is_trusted_vf(bp, vf);
if (!(vf->flags & BNXT_VF_LINK_FORCED))
@@ -256,21 +254,21 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
if (bp->hwrm_spec_code < 0x10201)
return -ENOTSUPP;
- if (vlan_proto != htons(ETH_P_8021Q))
+ if (vlan_proto != htons(ETH_P_8021Q) &&
+ (vlan_proto != htons(ETH_P_8021AD) ||
+ !(bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP)))
return -EPROTONOSUPPORT;
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
- /* TODO: needed to implement proper handling of user priority,
- * currently fail the command if there is valid priority
- */
- if (vlan_id > 4095 || qos)
+ if (vlan_id >= VLAN_N_VID || qos >= IEEE_8021Q_MAX_PRIORITIES ||
+ (!vlan_id && qos))
return -EINVAL;
vf = &bp->pf.vf[vf_id];
- vlan_tag = vlan_id;
+ vlan_tag = vlan_id | (u16)qos << VLAN_PRIO_SHIFT;
if (vlan_tag == vf->vlan)
return 0;
@@ -279,6 +277,10 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
req->fid = cpu_to_le16(vf->fw_fid);
req->dflt_vlan = cpu_to_le16(vlan_tag);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ if (bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP) {
+ req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_TPID);
+ req->tpid = vlan_proto;
+ }
rc = hwrm_req_send(bp, req);
if (!rc)
vf->vlan = vlan_tag;
@@ -900,11 +902,6 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(dev);
- if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
- netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
- return 0;
- }
-
rtnl_lock();
if (!netif_running(dev)) {
netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index b9e7d3e7b15d..fdd6356f21ef 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -176,11 +176,17 @@ EXPORT_SYMBOL(bnxt_unregister_dev);
static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
{
- u32 roce_msix = BNXT_VF(bp) ?
- BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
+ int roce_msix = BNXT_MAX_ROCE_MSIX;
- return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
- min_t(u32, roce_msix, num_online_cpus()) : 0);
+ if (BNXT_VF(bp))
+ roce_msix = BNXT_MAX_ROCE_MSIX_VF;
+ else if (bp->port_partition_type)
+ roce_msix = BNXT_MAX_ROCE_MSIX_NPAR_PF;
+
+ /* NQ MSIX vectors should match the number of CPUs plus 1 more for
+ * the CREQ MSIX, up to the default.
+ */
+ return min_t(int, roce_msix, num_online_cpus() + 1);
}
int bnxt_send_msg(struct bnxt_en_dev *edev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 4eafe6ec0abf..4f4914f5c84c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -15,8 +15,10 @@
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
-#define BNXT_MAX_ROCE_MSIX 9
-#define BNXT_MAX_VF_ROCE_MSIX 2
+
+#define BNXT_MAX_ROCE_MSIX_VF 2
+#define BNXT_MAX_ROCE_MSIX_NPAR_PF 5
+#define BNXT_MAX_ROCE_MSIX 64
struct hwrm_async_event_cmpl;
struct bnxt;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index c2b4188a1ef1..a9040c42d2ff 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -31,6 +31,7 @@
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <linux/random.h>
+#include <linux/workqueue.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define BCM_VLAN 1
#endif
@@ -3015,9 +3016,9 @@ static int cnic_service_bnx2(void *data, void *status_blk)
return cnic_service_bnx2_queues(dev);
}
-static void cnic_service_bnx2_msix(struct tasklet_struct *t)
+static void cnic_service_bnx2_msix(struct work_struct *work)
{
- struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
cp->last_status_idx = cnic_service_bnx2_queues(dev);
@@ -3036,7 +3037,7 @@ static void cnic_doirq(struct cnic_dev *dev)
prefetch(cp->status_blk.gen);
prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
- tasklet_schedule(&cp->cnic_irq_task);
+ queue_work(system_bh_wq, &cp->cnic_irq_bh_work);
}
}
@@ -3140,9 +3141,9 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
return last_status;
}
-static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
+static void cnic_service_bnx2x_bh_work(struct work_struct *work)
{
- struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
struct bnx2x *bp = netdev_priv(dev->netdev);
u32 status_idx, new_status_idx;
@@ -4428,7 +4429,7 @@ static void cnic_free_irq(struct cnic_dev *dev)
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
cp->disable_int_sync(dev);
- tasklet_kill(&cp->cnic_irq_task);
+ cancel_work_sync(&cp->cnic_irq_bh_work);
free_irq(ethdev->irq_arr[0].vector, dev);
}
}
@@ -4441,7 +4442,7 @@ static int cnic_request_irq(struct cnic_dev *dev)
err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
if (err)
- tasklet_disable(&cp->cnic_irq_task);
+ disable_work_sync(&cp->cnic_irq_bh_work);
return err;
}
@@ -4464,7 +4465,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
cp->last_status_idx = cp->status_blk.bnx2->status_idx;
- tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
+ INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2_msix);
err = cnic_request_irq(dev);
if (err)
return err;
@@ -4873,7 +4874,7 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
struct cnic_eth_dev *ethdev = cp->ethdev;
int err = 0;
- tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
+ INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2x_bh_work);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
err = cnic_request_irq(dev);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index fedc84ada937..1a314a75d2d2 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -268,7 +268,7 @@ struct cnic_local {
u32 bnx2x_igu_sb_id;
u32 int_num;
u32 last_status_idx;
- struct tasklet_struct cnic_irq_task;
+ struct work_struct cnic_irq_bh_work;
struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 0ec5f01551f9..378815917741 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6145,9 +6145,7 @@ static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info
{
struct tg3 *tp = netdev_priv(dev);
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
if (tg3_flag(tp, PTP_CAPABLE)) {
info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
@@ -6157,8 +6155,6 @@ static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info
if (tp->ptp_clock)
info->phc_index = ptp_clock_index(tp->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index ea71612f6b36..5740c98d8c9f 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -13,6 +13,7 @@
#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
#include <linux/phy/phy.h>
+#include <linux/workqueue.h>
#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
#define MACB_EXT_DESC
@@ -1330,7 +1331,7 @@ struct macb {
spinlock_t rx_fs_lock;
unsigned int max_tuples;
- struct tasklet_struct hresp_err_tasklet;
+ struct work_struct hresp_err_bh_work;
int rx_bd_rd_prefetch;
int tx_bd_rd_prefetch;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index dcd3f54ed0cf..f06babec04a0 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1792,9 +1792,9 @@ static int macb_tx_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void macb_hresp_error_task(struct tasklet_struct *t)
+static void macb_hresp_error_task(struct work_struct *work)
{
- struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
+ struct macb *bp = from_work(bp, work, hresp_err_bh_work);
struct net_device *dev = bp->dev;
struct macb_queue *queue;
unsigned int q;
@@ -1994,7 +1994,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_BIT(HRESP)) {
- tasklet_schedule(&bp->hresp_err_tasklet);
+ queue_work(system_bh_wq, &bp->hresp_err_bh_work);
netdev_err(dev, "DMA bus error: HRESP not OK\n");
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -3410,8 +3410,6 @@ static int gem_get_ts_info(struct net_device *dev,
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -3423,7 +3421,8 @@ static int gem_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
- info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
+ if (bp->ptp_clock)
+ info->phc_index = ptp_clock_index(bp->ptp_clock);
return 0;
}
@@ -4184,6 +4183,8 @@ static int macb_init(struct platform_device *pdev)
dev->ethtool_ops = &macb_ethtool_ops;
}
+ netdev_sw_irq_coalesce_default_on(dev);
+
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
/* Set features */
@@ -5119,12 +5120,12 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_free_netdev;
}
- /* MTU range: 68 - 1500 or 10240 */
+ /* MTU range: 68 - 1518 or 10240 */
dev->min_mtu = GEM_MTU_MIN_SIZE;
if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
else
- dev->max_mtu = ETH_DATA_LEN;
+ dev->max_mtu = 1536 - ETH_HLEN - ETH_FCS_LEN;
if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
@@ -5172,7 +5173,7 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_unregister_mdio;
}
- tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
+ INIT_WORK(&bp->hresp_err_bh_work, macb_hresp_error_task);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
@@ -5216,7 +5217,7 @@ static void macb_remove(struct platform_device *pdev)
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
- tasklet_kill(&bp->hresp_err_tasklet);
+ cancel_work_sync(&bp->hresp_err_bh_work);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) {
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
index f66d22de5168..fc4f5aee6ab3 100644
--- a/drivers/net/ethernet/cadence/macb_pci.c
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -19,8 +19,7 @@
#define PCI_DRIVER_NAME "macb_pci"
#define PLAT_DRIVER_NAME "macb"
-#define CDNS_VENDOR_ID 0x17cd
-#define CDNS_DEVICE_ID 0xe007
+#define PCI_DEVICE_ID_CDNS_MACB 0xe007
#define GEM_PCLK_RATE 50000000
#define GEM_HCLK_RATE 50000000
@@ -117,7 +116,7 @@ static void macb_remove(struct pci_dev *pdev)
}
static const struct pci_device_id dev_id_table[] = {
- { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), },
+ { PCI_VDEVICE(CDNS, PCI_DEVICE_ID_CDNS_MACB) },
{ 0, }
};
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
index 2d06097d3f61..40f529d0bc4c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
@@ -43,6 +43,4 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct);
int cn23xx_setup_octeon_vf_device(struct octeon_device *oct);
u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
-
-void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index 8ed57134ee0c..129c8b84f549 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -86,7 +86,6 @@ u32
lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, u8 unused);
void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, u8 unused);
-void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
struct octeon_reg_list *reg_list);
u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 5835965dbc32..c849e2c871a9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -2496,37 +2496,31 @@ ret_intrmod:
return ret;
}
+#ifdef PTP_HARDWARE_TIMESTAMPING
static int lio_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *info)
{
struct lio *lio = GET_LIO(netdev);
info->so_timestamping =
-#ifdef PTP_HARDWARE_TIMESTAMPING
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
-#endif
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
if (lio->ptp_clock)
info->phc_index = ptp_clock_index(lio->ptp_clock);
- else
- info->phc_index = -1;
-#ifdef PTP_HARDWARE_TIMESTAMPING
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
-#endif
return 0;
}
+#endif
/* Return register dump len. */
static int lio_get_regs_len(struct net_device *dev)
@@ -3146,7 +3140,9 @@ static const struct ethtool_ops lio_ethtool_ops = {
.set_coalesce = lio_set_intr_coalesce,
.get_priv_flags = lio_get_priv_flags,
.set_priv_flags = lio_set_priv_flags,
+#ifdef PTP_HARDWARE_TIMESTAMPING
.get_ts_info = lio_get_ts_info,
+#endif
};
static const struct ethtool_ops lio_vf_ethtool_ops = {
@@ -3169,7 +3165,9 @@ static const struct ethtool_ops lio_vf_ethtool_ops = {
.set_coalesce = lio_set_intr_coalesce,
.get_priv_flags = lio_get_priv_flags,
.set_priv_flags = lio_set_priv_flags,
+#ifdef PTP_HARDWARE_TIMESTAMPING
.get_ts_info = lio_get_ts_info,
+#endif
};
void liquidio_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index fb380b4f3e02..d26364c2ac81 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -804,13 +804,6 @@ int octeon_init_consoles(struct octeon_device *oct);
int octeon_add_console(struct octeon_device *oct, u32 console_num,
char *dbg_enb);
-/** write or read from a console */
-int octeon_console_write(struct octeon_device *oct, u32 console_num,
- char *buffer, u32 write_request_size, u32 flags);
-int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
-
-int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
-
/** Removes all attached consoles. */
void octeon_remove_consoles(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index c9b19e624dce..232ae72c0e37 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -395,8 +395,6 @@ int octeon_register_dispatch_fn(struct octeon_device *oct,
void *octeon_get_dispatch_arg(struct octeon_device *oct,
u16 opcode, u16 subcode);
-void octeon_droq_print_stats(void);
-
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
int octeon_create_droq(struct octeon_device *oct, u32 q_no,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index bebf3bd349c6..a04f36a0e1a0 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -378,9 +378,6 @@ int octeon_send_command(struct octeon_device *oct, u32 iq_no,
u32 force_db, void *cmd, void *buf,
u32 datasize, u32 reqtype);
-void octeon_dump_soft_command(struct octeon_device *oct,
- struct octeon_soft_command *sc);
-
void octeon_prepare_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc,
u8 opcode, u8 subcode,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 6a04d2530176..d0ff0c170b1a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -844,8 +844,6 @@ static int nicvf_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8453defc296c..b7531041c56d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -359,8 +359,6 @@ int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
/* Register access APIs */
void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
-void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
-u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
u64 qidx, u64 val);
u64 nicvf_queue_reg_read(struct nicvf *nic,
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cdea49392185..84f16ababaee 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -219,9 +219,7 @@
void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, u64 mac, u8 vf);
void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf);
void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode);
-void octeon_mdiobus_force_mod_depencency(void);
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
-void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
unsigned bgx_get_map(int node);
int bgx_get_lmac_count(int node, int bgx);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index e56eff701395..304bb282ab03 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -329,8 +329,6 @@ irqreturn_t t1_slow_intr_handler(adapter_t *adapter);
int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct board_info *t1_get_board_info(unsigned int board_id);
-const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
- unsigned short ssid);
int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
struct adapter_params *p);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 7d7d3e0098df..3b7068832f95 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -1034,7 +1034,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_RXCSUM;
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_RXCSUM | NETIF_F_LLTX | NETIF_F_HIGHDMA;
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ netdev->lltx = true;
if (vlan_tso_capable(adapter)) {
netdev->features |=
diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.h b/drivers/net/ethernet/chelsio/cxgb/tp.h
index ba15675d56df..64f93dcc676b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/tp.h
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.h
@@ -65,9 +65,7 @@ void t1_tp_intr_enable(struct petp *tp);
void t1_tp_intr_clear(struct petp *tp);
int t1_tp_intr_handler(struct petp *tp);
-void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
-int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
index f04e81f33795..a08fc762a438 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
@@ -106,6 +106,4 @@ static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
return &e->t3c_tid;
}
-int attach_t3cdev(struct t3cdev *dev);
-void detach_t3cdev(struct t3cdev *dev);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index fca9533bc011..bbf7641a0fc7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1958,11 +1958,6 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
-void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
- const u8 *addr);
-int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
- u64 mask0, u64 mask1, unsigned int crc, bool enable);
-
int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
enum dev_master master, enum dev_state *state);
int t4_fw_bye(struct adapter *adap, unsigned int mbox);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index 80c6627fe981..c80a93347a8c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -122,7 +122,6 @@ void cxgb4_dcb_version_init(struct net_device *);
void cxgb4_dcb_reset(struct net_device *dev);
void cxgb4_dcb_state_fsm(struct net_device *, enum cxgb4_dcb_state_input);
void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *);
-void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *);
extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops;
static inline __u8 bitswap_1(unsigned char val)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 3d091947ae00..7f3f5afa864f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1556,12 +1556,9 @@ static int get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *ts
struct adapter *adapter = pi->adapter;
ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
-
- ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -1575,8 +1572,6 @@ static int get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *ts
if (adapter->ptp_clock)
ts_info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- ts_info->phc_index = -1;
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
index 33b2c0c45509..f6f745f5c022 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
@@ -81,8 +81,7 @@ int cxgb_fcoe_enable(struct net_device *netdev)
netdev->features |= NETIF_F_FCOE_CRC;
netdev->vlan_features |= NETIF_F_FCOE_CRC;
- netdev->features |= NETIF_F_FCOE_MTU;
- netdev->vlan_features |= NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = true;
netdev_features_change(netdev);
@@ -112,8 +111,7 @@ int cxgb_fcoe_disable(struct net_device *netdev)
netdev->features &= ~NETIF_F_FCOE_CRC;
netdev->vlan_features &= ~NETIF_F_FCOE_CRC;
- netdev->features &= ~NETIF_F_FCOE_MTU;
- netdev->vlan_features &= ~NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = false;
netdev_features_change(netdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
index 9050568a034c..64663112cad8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -242,7 +242,7 @@ struct cxgb4_next_header {
* field's value to jump to next header such as IHL field
* in IPv4 header.
*/
- struct tc_u32_sel sel;
+ struct tc_u32_sel_hdr sel;
struct tc_u32_key key;
/* location of jump to make */
const struct cxgb4_match_field *jump;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index a9599ba26975..d8cafaa7ddb4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -508,7 +508,6 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
unsigned int cxgb4_port_chan(const struct net_device *dev);
unsigned int cxgb4_port_e2cchan(const struct net_device *dev);
unsigned int cxgb4_port_viid(const struct net_device *dev);
-unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid);
unsigned int cxgb4_port_idx(const struct net_device *dev);
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx);
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 854d87e1125c..2e3973a32d9d 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -342,10 +342,10 @@ int cxgbi_ppm_release(struct cxgbi_ppm *ppm)
}
EXPORT_SYMBOL(cxgbi_ppm_release);
-static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
- unsigned int *pcpu_ppmax)
+static struct cxgbi_ppm_pool __percpu *
+ppm_alloc_cpu_pool(unsigned int *total, unsigned int *pcpu_ppmax)
{
- struct cxgbi_ppm_pool *pools;
+ struct cxgbi_ppm_pool __percpu *pools;
unsigned int ppmax = (*total) / num_possible_cpus();
unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
unsigned int bmap;
@@ -392,7 +392,7 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
unsigned int iscsi_edram_size)
{
struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
- struct cxgbi_ppm_pool *pool = NULL;
+ struct cxgbi_ppm_pool __percpu *pool = NULL;
unsigned int pool_index_max = 0;
unsigned int ppmax_pool = 0;
unsigned int ppod_bmap_size;
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 300ad05ee05b..0cc3644ee855 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -128,6 +128,40 @@ struct vxlan_offload {
u8 flags;
};
+struct enic_wq_stats {
+ u64 packets; /* pkts queued for Tx */
+ u64 stopped; /* Tx ring almost full, queue stopped */
+ u64 wake; /* Tx ring no longer full, queue woken up*/
+ u64 tso; /* non-encap tso pkt */
+ u64 encap_tso; /* encap tso pkt */
+ u64 encap_csum; /* encap HW csum */
+ u64 csum_partial; /* skb->ip_summed = CHECKSUM_PARTIAL */
+ u64 csum_none; /* HW csum not required */
+ u64 bytes; /* bytes queued for Tx */
+ u64 add_vlan; /* HW adds vlan tag */
+ u64 cq_work; /* Tx completions processed */
+ u64 cq_bytes; /* Tx bytes processed */
+ u64 null_pkt; /* skb length <= 0 */
+ u64 skb_linear_fail; /* linearize failures */
+ u64 desc_full_awake; /* TX ring full while queue awake */
+};
+
+struct enic_rq_stats {
+ u64 packets; /* pkts received */
+ u64 bytes; /* bytes received */
+ u64 l4_rss_hash; /* hashed on l4 */
+ u64 l3_rss_hash; /* hashed on l3 */
+ u64 csum_unnecessary; /* HW verified csum */
+ u64 csum_unnecessary_encap; /* HW verified csum on encap packet */
+ u64 vlan_stripped; /* HW stripped vlan */
+ u64 napi_complete; /* napi complete intr reenabled */
+ u64 napi_repoll; /* napi poll again */
+ u64 bad_fcs; /* bad pkts */
+ u64 pkt_truncated; /* truncated pkts */
+ u64 no_skb; /* out of skbs */
+ u64 desc_skip; /* Rx pkt went into later buffer */
+};
+
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
@@ -162,16 +196,16 @@ struct enic {
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
spinlock_t wq_lock[ENIC_WQ_MAX];
+ struct enic_wq_stats wq_stats[ENIC_WQ_MAX];
unsigned int wq_count;
u16 loop_enable;
u16 loop_tag;
/* receive queue cache line section */
____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
+ struct enic_rq_stats rq_stats[ENIC_RQ_MAX];
unsigned int rq_count;
struct vxlan_offload vxlan;
- u64 rq_truncated_pkts;
- u64 rq_bad_fcs;
struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
/* interrupt resource cache line section */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f2f1055880b2..f7986f2b6a17 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -32,6 +32,41 @@ struct enic_stat {
.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
}
+#define ENIC_PER_RQ_STAT(stat) { \
+ .name = "rq[%d]_"#stat, \
+ .index = offsetof(struct enic_rq_stats, stat) / sizeof(u64) \
+}
+
+#define ENIC_PER_WQ_STAT(stat) { \
+ .name = "wq[%d]_"#stat, \
+ .index = offsetof(struct enic_wq_stats, stat) / sizeof(u64) \
+}
+
+static const struct enic_stat enic_per_rq_stats[] = {
+ ENIC_PER_RQ_STAT(l4_rss_hash),
+ ENIC_PER_RQ_STAT(l3_rss_hash),
+ ENIC_PER_RQ_STAT(csum_unnecessary_encap),
+ ENIC_PER_RQ_STAT(vlan_stripped),
+ ENIC_PER_RQ_STAT(napi_complete),
+ ENIC_PER_RQ_STAT(napi_repoll),
+ ENIC_PER_RQ_STAT(no_skb),
+ ENIC_PER_RQ_STAT(desc_skip),
+};
+
+#define NUM_ENIC_PER_RQ_STATS ARRAY_SIZE(enic_per_rq_stats)
+
+static const struct enic_stat enic_per_wq_stats[] = {
+ ENIC_PER_WQ_STAT(encap_tso),
+ ENIC_PER_WQ_STAT(encap_csum),
+ ENIC_PER_WQ_STAT(add_vlan),
+ ENIC_PER_WQ_STAT(cq_work),
+ ENIC_PER_WQ_STAT(cq_bytes),
+ ENIC_PER_WQ_STAT(null_pkt),
+ ENIC_PER_WQ_STAT(skb_linear_fail),
+ ENIC_PER_WQ_STAT(desc_full_awake),
+};
+
+#define NUM_ENIC_PER_WQ_STATS ARRAY_SIZE(enic_per_wq_stats)
static const struct enic_stat enic_tx_stats[] = {
ENIC_TX_STAT(tx_frames_ok),
ENIC_TX_STAT(tx_unicast_frames_ok),
@@ -46,6 +81,8 @@ static const struct enic_stat enic_tx_stats[] = {
ENIC_TX_STAT(tx_tso),
};
+#define NUM_ENIC_TX_STATS ARRAY_SIZE(enic_tx_stats)
+
static const struct enic_stat enic_rx_stats[] = {
ENIC_RX_STAT(rx_frames_ok),
ENIC_RX_STAT(rx_frames_total),
@@ -70,13 +107,13 @@ static const struct enic_stat enic_rx_stats[] = {
ENIC_RX_STAT(rx_frames_to_max),
};
+#define NUM_ENIC_RX_STATS ARRAY_SIZE(enic_rx_stats)
+
static const struct enic_stat enic_gen_stats[] = {
ENIC_GEN_STAT(dma_map_error),
};
-static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
-static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
-static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
+#define NUM_ENIC_GEN_STATS ARRAY_SIZE(enic_gen_stats)
static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
{
@@ -141,22 +178,38 @@ static void enic_get_drvinfo(struct net_device *netdev,
static void enic_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
+ struct enic *enic = netdev_priv(netdev);
unsigned int i;
+ unsigned int j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < enic_n_tx_stats; i++) {
+ for (i = 0; i < NUM_ENIC_TX_STATS; i++) {
memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
- for (i = 0; i < enic_n_rx_stats; i++) {
+ for (i = 0; i < NUM_ENIC_RX_STATS; i++) {
memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
- for (i = 0; i < enic_n_gen_stats; i++) {
+ for (i = 0; i < NUM_ENIC_GEN_STATS; i++) {
memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ for (i = 0; i < enic->rq_count; i++) {
+ for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ enic_per_rq_stats[j].name, i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ for (i = 0; i < enic->wq_count; i++) {
+ for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ enic_per_wq_stats[j].name, i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
break;
}
}
@@ -242,9 +295,19 @@ err_out:
static int enic_get_sset_count(struct net_device *netdev, int sset)
{
+ struct enic *enic = netdev_priv(netdev);
+ unsigned int n_per_rq_stats;
+ unsigned int n_per_wq_stats;
+ unsigned int n_stats;
+
switch (sset) {
case ETH_SS_STATS:
- return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
+ n_per_rq_stats = NUM_ENIC_PER_RQ_STATS * enic->rq_count;
+ n_per_wq_stats = NUM_ENIC_PER_WQ_STATS * enic->wq_count;
+ n_stats = NUM_ENIC_TX_STATS + NUM_ENIC_RX_STATS +
+ NUM_ENIC_GEN_STATS +
+ n_per_rq_stats + n_per_wq_stats;
+ return n_stats;
default:
return -EOPNOTSUPP;
}
@@ -256,6 +319,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *vstats;
unsigned int i;
+ unsigned int j;
int err;
err = enic_dev_stats_dump(enic, &vstats);
@@ -266,12 +330,30 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
if (err == -ENOMEM)
return;
- for (i = 0; i < enic_n_tx_stats; i++)
+ for (i = 0; i < NUM_ENIC_TX_STATS; i++)
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
- for (i = 0; i < enic_n_rx_stats; i++)
+ for (i = 0; i < NUM_ENIC_RX_STATS; i++)
*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
- for (i = 0; i < enic_n_gen_stats; i++)
+ for (i = 0; i < NUM_ENIC_GEN_STATS; i++)
*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
+ for (i = 0; i < enic->rq_count; i++) {
+ struct enic_rq_stats *rqstats = &enic->rq_stats[i];
+ int index;
+
+ for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
+ index = enic_per_rq_stats[j].index;
+ *(data++) = ((u64 *)rqstats)[index];
+ }
+ }
+ for (i = 0; i < enic->wq_count; i++) {
+ struct enic_wq_stats *wqstats = &enic->wq_stats[i];
+ int index;
+
+ for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
+ index = enic_per_wq_stats[j].index;
+ *(data++) = ((u64 *)wqstats)[index];
+ }
+ }
}
static u32 enic_get_msglevel(struct net_device *netdev)
@@ -601,9 +683,7 @@ static int enic_set_rxfh(struct net_device *netdev,
static int enic_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *info)
{
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 5f26fc3ad655..ffed14b63d41 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -46,6 +46,7 @@
#include <linux/crash_dump.h>
#include <net/busy_poll.h>
#include <net/vxlan.h>
+#include <net/netdev_queues.h>
#include "cq_enet_desc.h"
#include "vnic_dev.h"
@@ -339,6 +340,10 @@ static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
static void enic_wq_free_buf(struct vnic_wq *wq,
struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ enic->wq_stats[wq->index].cq_work++;
+ enic->wq_stats[wq->index].cq_bytes += buf->len;
enic_free_wq_buf(wq, buf);
}
@@ -355,8 +360,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
vnic_wq_desc_avail(&enic->wq[q_number]) >=
- (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
+ (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
netif_wake_subqueue(enic->netdev, q_number);
+ enic->wq_stats[q_number].wake++;
+ }
spin_unlock(&enic->wq_lock[q_number]);
@@ -590,6 +597,11 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ /* The enic_queue_wq_desc() above does not do HW checksum */
+ enic->wq_stats[wq->index].csum_none++;
+ enic->wq_stats[wq->index].packets++;
+ enic->wq_stats[wq->index].bytes += skb->len;
+
return err;
}
@@ -622,6 +634,10 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ enic->wq_stats[wq->index].csum_partial++;
+ enic->wq_stats[wq->index].packets++;
+ enic->wq_stats[wq->index].bytes += skb->len;
+
return err;
}
@@ -676,15 +692,18 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
unsigned int offset = 0;
unsigned int hdr_len;
dma_addr_t dma_addr;
+ unsigned int pkts;
unsigned int len;
skb_frag_t *frag;
if (skb->encapsulation) {
hdr_len = skb_inner_tcp_all_headers(skb);
enic_preload_tcp_csum_encap(skb);
+ enic->wq_stats[wq->index].encap_tso++;
} else {
hdr_len = skb_tcp_all_headers(skb);
enic_preload_tcp_csum(skb);
+ enic->wq_stats[wq->index].tso++;
}
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
@@ -705,7 +724,7 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
}
if (eop)
- return 0;
+ goto tso_out_stats;
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
* for additional data fragments
@@ -732,6 +751,15 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
}
}
+tso_out_stats:
+ /* calculate how many packets tso sent */
+ len = skb->len - hdr_len;
+ pkts = len / mss;
+ if ((len % mss) > 0)
+ pkts++;
+ enic->wq_stats[wq->index].packets += pkts;
+ enic->wq_stats[wq->index].bytes += (len + (pkts * hdr_len));
+
return 0;
}
@@ -764,6 +792,10 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ enic->wq_stats[wq->index].encap_csum++;
+ enic->wq_stats[wq->index].packets++;
+ enic->wq_stats[wq->index].bytes += skb->len;
+
return err;
}
@@ -780,6 +812,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
/* VLAN tag from trunking driver */
vlan_tag_insert = 1;
vlan_tag = skb_vlan_tag_get(skb);
+ enic->wq_stats[wq->index].add_vlan++;
} else if (enic->loop_enable) {
vlan_tag = enic->loop_tag;
loopback = 1;
@@ -792,7 +825,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
else if (skb->encapsulation)
err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
vlan_tag, loopback);
- else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ else if (skb->ip_summed == CHECKSUM_PARTIAL)
err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
vlan_tag, loopback);
else
@@ -825,13 +858,15 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
unsigned int txq_map;
struct netdev_queue *txq;
+ txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
+ wq = &enic->wq[txq_map];
+
if (skb->len <= 0) {
dev_kfree_skb_any(skb);
+ enic->wq_stats[wq->index].null_pkt++;
return NETDEV_TX_OK;
}
- txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
- wq = &enic->wq[txq_map];
txq = netdev_get_tx_queue(netdev, txq_map);
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
@@ -843,6 +878,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
skb_linearize(skb)) {
dev_kfree_skb_any(skb);
+ enic->wq_stats[wq->index].skb_linear_fail++;
return NETDEV_TX_OK;
}
@@ -854,14 +890,17 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
/* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
spin_unlock(&enic->wq_lock[txq_map]);
+ enic->wq_stats[wq->index].desc_full_awake++;
return NETDEV_TX_BUSY;
}
if (enic_queue_wq_skb(enic, wq, skb))
goto error;
- if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
+ if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) {
netif_tx_stop_queue(txq);
+ enic->wq_stats[wq->index].stopped++;
+ }
skb_tx_timestamp(skb);
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
@@ -878,7 +917,10 @@ static void enic_get_stats(struct net_device *netdev,
{
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *stats;
+ u64 pkt_truncated = 0;
+ u64 bad_fcs = 0;
int err;
+ int i;
err = enic_dev_stats_dump(enic, &stats);
/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
@@ -897,8 +939,17 @@ static void enic_get_stats(struct net_device *netdev,
net_stats->rx_bytes = stats->rx.rx_bytes_ok;
net_stats->rx_errors = stats->rx.rx_errors;
net_stats->multicast = stats->rx.rx_multicast_frames_ok;
- net_stats->rx_over_errors = enic->rq_truncated_pkts;
- net_stats->rx_crc_errors = enic->rq_bad_fcs;
+
+ for (i = 0; i < ENIC_RQ_MAX; i++) {
+ struct enic_rq_stats *rqs = &enic->rq_stats[i];
+
+ if (!enic->rq->ctrl)
+ break;
+ pkt_truncated += rqs->pkt_truncated;
+ bad_fcs += rqs->bad_fcs;
+ }
+ net_stats->rx_over_errors = pkt_truncated;
+ net_stats->rx_crc_errors = bad_fcs;
net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
}
@@ -1261,8 +1312,10 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
return 0;
}
skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!skb)
+ if (!skb) {
+ enic->rq_stats[rq->index].no_skb++;
return -ENOMEM;
+ }
dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
DMA_FROM_DEVICE);
@@ -1313,6 +1366,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct net_device *netdev = enic->netdev;
struct sk_buff *skb;
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct enic_rq_stats *rqstats = &enic->rq_stats[rq->index];
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -1323,8 +1377,11 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
u32 rss_hash;
bool outer_csum_ok = true, encap = false;
- if (skipped)
+ rqstats->packets++;
+ if (skipped) {
+ rqstats->desc_skip++;
return;
+ }
skb = buf->os_buf;
@@ -1342,9 +1399,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
if (!fcs_ok) {
if (bytes_written > 0)
- enic->rq_bad_fcs++;
+ rqstats->bad_fcs++;
else if (bytes_written == 0)
- enic->rq_truncated_pkts++;
+ rqstats->pkt_truncated++;
}
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
@@ -1359,7 +1416,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
/* Good receive
*/
-
+ rqstats->bytes += bytes_written;
if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
buf->os_buf = NULL;
dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
@@ -1377,11 +1434,13 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
+ rqstats->l4_rss_hash++;
break;
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
+ rqstats->l3_rss_hash++;
break;
}
}
@@ -1418,11 +1477,16 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
(ipv4_csum_ok || ipv6)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = encap;
+ if (encap)
+ rqstats->csum_unnecessary_encap++;
+ else
+ rqstats->csum_unnecessary++;
}
- if (vlan_stripped)
+ if (vlan_stripped) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
-
+ rqstats->vlan_stripped++;
+ }
skb_mark_napi_id(skb, &enic->napi[rq->index]);
if (!(netdev->features & NETIF_F_GRO))
netif_receive_skb(skb);
@@ -1435,7 +1499,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
/* Buffer overflow
*/
-
+ rqstats->pkt_truncated++;
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
@@ -1568,6 +1632,9 @@ static int enic_poll(struct napi_struct *napi, int budget)
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[0]);
vnic_intr_unmask(&enic->intr[intr]);
+ enic->rq_stats[0].napi_complete++;
+ } else {
+ enic->rq_stats[0].napi_repoll++;
}
return rq_work_done;
@@ -1693,6 +1760,9 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
+ enic->rq_stats[rq].napi_complete++;
+ } else {
+ enic->rq_stats[rq].napi_repoll++;
}
return work_done;
@@ -2502,6 +2572,54 @@ static void enic_clear_intr_mode(struct enic *enic)
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
}
+static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *rxs)
+{
+ struct enic *enic = netdev_priv(dev);
+ struct enic_rq_stats *rqstats = &enic->rq_stats[idx];
+
+ rxs->bytes = rqstats->bytes;
+ rxs->packets = rqstats->packets;
+ rxs->hw_drops = rqstats->bad_fcs + rqstats->pkt_truncated;
+ rxs->hw_drop_overruns = rqstats->pkt_truncated;
+ rxs->csum_unnecessary = rqstats->csum_unnecessary +
+ rqstats->csum_unnecessary_encap;
+}
+
+static void enic_get_queue_stats_tx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *txs)
+{
+ struct enic *enic = netdev_priv(dev);
+ struct enic_wq_stats *wqstats = &enic->wq_stats[idx];
+
+ txs->bytes = wqstats->bytes;
+ txs->packets = wqstats->packets;
+ txs->csum_none = wqstats->csum_none;
+ txs->needs_csum = wqstats->csum_partial + wqstats->encap_csum +
+ wqstats->tso;
+ txs->hw_gso_packets = wqstats->tso;
+ txs->stop = wqstats->stopped;
+ txs->wake = wqstats->wake;
+}
+
+static void enic_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rxs,
+ struct netdev_queue_stats_tx *txs)
+{
+ rxs->bytes = 0;
+ rxs->packets = 0;
+ rxs->hw_drops = 0;
+ rxs->hw_drop_overruns = 0;
+ rxs->csum_unnecessary = 0;
+ txs->bytes = 0;
+ txs->packets = 0;
+ txs->csum_none = 0;
+ txs->needs_csum = 0;
+ txs->hw_gso_packets = 0;
+ txs->stop = 0;
+ txs->wake = 0;
+}
+
static const struct net_device_ops enic_netdev_dynamic_ops = {
.ndo_open = enic_open,
.ndo_stop = enic_stop,
@@ -2550,6 +2668,12 @@ static const struct net_device_ops enic_netdev_ops = {
.ndo_features_check = enic_features_check,
};
+static const struct netdev_stat_ops enic_netdev_stat_ops = {
+ .get_queue_stats_rx = enic_get_queue_stats_rx,
+ .get_queue_stats_tx = enic_get_queue_stats_tx,
+ .get_base_stats = enic_get_base_stats,
+};
+
static void enic_dev_deinit(struct enic *enic)
{
unsigned int i;
@@ -2892,6 +3016,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &enic_netdev_dynamic_ops;
else
netdev->netdev_ops = &enic_netdev_ops;
+ netdev->stat_ops = &enic_netdev_stat_ops;
netdev->watchdog_timeo = 2 * HZ;
enic_set_ethtool_ops(netdev);
diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c
index bcfe52c11804..59ea48d4c9de 100644
--- a/drivers/net/ethernet/davicom/dm9051.c
+++ b/drivers/net/ethernet/davicom/dm9051.c
@@ -1235,6 +1235,7 @@ static const struct of_device_id dm9051_match_table[] = {
{ .compatible = "davicom,dm9051" },
{}
};
+MODULE_DEVICE_TABLE(of, dm9051_match_table);
static const struct spi_device_id dm9051_id_table[] = {
{ "dm9051", 0 },
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 7bfeae04b52b..d0ea92607870 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1842,7 +1842,7 @@ static int rio_resume(struct device *device)
return 0;
}
-static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume);
#define RIO_PM_OPS (&rio_pm_ops)
#else
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 61fe9625bed1..e48b861e4ce1 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -966,9 +966,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
void be_link_status_update(struct be_adapter *adapter, u8 link_status);
void be_parse_stats(struct be_adapter *adapter);
int be_load_fw(struct be_adapter *adapter, u8 *func);
-bool be_is_wol_supported(struct be_adapter *adapter);
bool be_pause_supported(struct be_adapter *adapter);
-u32 be_get_fw_log_level(struct be_adapter *adapter);
int be_update_queues(struct be_adapter *adapter);
int be_poll(struct napi_struct *napi, int budget);
void be_eqd_update(struct be_adapter *adapter, bool force_update);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index e2085c68c0ee..d70818f06be7 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2381,7 +2381,6 @@ struct be_cmd_req_manage_iface_filters {
} __packed;
u16 be_POST_stage_get(struct be_adapter *adapter);
-int be_pci_fnum_get(struct be_adapter *adapter);
int be_fw_wait_ready(struct be_adapter *adapter);
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
bool permanent, u32 if_handle, u32 pmac_id);
@@ -2406,7 +2405,6 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q);
int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
u8 *link_status, u32 dom);
-int be_cmd_reset(struct be_adapter *adapter);
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd);
@@ -2488,7 +2486,6 @@ int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
int lancer_initiate_dump(struct be_adapter *adapter);
int lancer_delete_dump(struct be_adapter *adapter);
bool dump_present(struct be_adapter *adapter);
-int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
int be_cmd_query_port_name(struct be_adapter *adapter);
int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index 9aa286ba1f00..228a638eae16 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -310,16 +310,12 @@ static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
struct tsnep_adapter *adapter = netdev_priv(netdev);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 4c546c3aef0f..f3cc14cc757d 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -24,6 +24,7 @@
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include <linux/of_net.h>
+#include <linux/phy_fixed.h>
#include <net/ip.h>
#include <net/ncsi.h>
@@ -50,6 +51,15 @@
#define FTGMAC_100MHZ 100000000
#define FTGMAC_25MHZ 25000000
+/* For NC-SI to register a fixed-link phy device */
+static struct fixed_phy_status ncsi_phy_status = {
+ .link = 1,
+ .speed = SPEED_100,
+ .duplex = DUPLEX_FULL,
+ .pause = 0,
+ .asym_pause = 0
+};
+
struct ftgmac100 {
/* Registers */
struct resource *res;
@@ -1541,7 +1551,8 @@ static int ftgmac100_open(struct net_device *netdev)
if (netdev->phydev) {
/* If we have a PHY, start polling */
phy_start(netdev->phydev);
- } else if (priv->use_ncsi) {
+ }
+ if (priv->use_ncsi) {
/* If using NC-SI, set our carrier on and start the stack */
netif_carrier_on(netdev);
@@ -1554,6 +1565,7 @@ static int ftgmac100_open(struct net_device *netdev)
return 0;
err_ncsi:
+ phy_stop(netdev->phydev);
napi_disable(&priv->napi);
netif_stop_queue(netdev);
err_alloc:
@@ -1587,7 +1599,7 @@ static int ftgmac100_stop(struct net_device *netdev)
netif_napi_del(&priv->napi);
if (netdev->phydev)
phy_stop(netdev->phydev);
- else if (priv->use_ncsi)
+ if (priv->use_ncsi)
ncsi_stop_dev(priv->ndev);
ftgmac100_stop_hw(priv);
@@ -1725,6 +1737,9 @@ static void ftgmac100_phy_disconnect(struct net_device *netdev)
phy_disconnect(netdev->phydev);
if (of_phy_is_fixed_link(priv->dev->of_node))
of_phy_deregister_fixed_link(priv->dev->of_node);
+
+ if (priv->use_ncsi)
+ fixed_phy_unregister(netdev->phydev);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
@@ -1802,6 +1817,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
struct resource *res;
int irq;
struct net_device *netdev;
+ struct phy_device *phydev;
struct ftgmac100 *priv;
struct device_node *np;
int err = 0;
@@ -1889,6 +1905,14 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_phy_connect;
}
+
+ phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, NULL);
+ err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (err) {
+ dev_err(&pdev->dev, "Connecting PHY failed\n");
+ goto err_phy_connect;
+ }
} else if (np && of_phy_is_fixed_link(np)) {
struct phy_device *phy;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
index 63b3e02fab16..4968f6f0bdbc 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.h
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
@@ -84,7 +84,7 @@
FTGMAC100_INT_RPKT_BUF)
/* All the interrupts we care about */
-#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \
+#define FTGMAC100_INT_ALL (FTGMAC100_INT_RXTX | \
FTGMAC100_INT_BAD)
/*
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index cfe6b57b1da0..e15dd3d858df 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -229,7 +229,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->max_mtu = dpaa_get_max_mtu();
net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_LLTX | NETIF_F_RXHASH);
+ NETIF_F_RXHASH);
net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
/* The kernels enables GSO automatically, if we declare NETIF_F_SG.
@@ -239,6 +239,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->features |= NETIF_F_RXCSUM;
net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ net_dev->lltx = true;
/* we do not want shared skbs on TX */
net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
@@ -2272,12 +2273,12 @@ static netdev_tx_t
dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{
const int queue_mapping = skb_get_queue_mapping(skb);
- bool nonlinear = skb_is_nonlinear(skb);
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
struct netdev_queue *txq;
struct dpaa_priv *priv;
struct qm_fd fd;
+ bool nonlinear;
int offset = 0;
int err = 0;
@@ -2287,6 +2288,13 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
qm_fd_clear_fd(&fd);
+ /* Packet data is always read as 32-bit words, so zero out any part of
+ * the skb which might be sent if we have to pad the packet
+ */
+ if (__skb_put_padto(skb, ETH_ZLEN, false))
+ goto enomem;
+
+ nonlinear = skb_is_nonlinear(skb);
if (!nonlinear) {
/* We're going to store the skb backpointer at the beginning
* of the data buffer, so we need a privately owned skb
@@ -3156,8 +3164,9 @@ static void dpaa_napi_del(struct net_device *net_dev)
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- netif_napi_del(&percpu_priv->np.napi);
+ __netif_napi_del(&percpu_priv->np.napi);
}
+ synchronize_net();
}
static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 6866807973da..29886a8ba73f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4594,12 +4594,13 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
net_dev->priv_flags |= supported;
net_dev->priv_flags &= ~not_supported;
+ net_dev->lltx = true;
/* Features */
net_dev->features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_HIGHDMA |
- NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
+ NETIF_F_HW_TC | NETIF_F_TSO;
net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
net_dev->hw_features = net_dev->features;
net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 5c45f42232d3..032d8eadd003 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -977,7 +977,6 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
return j;
}
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static void enetc_get_rx_tstamp(struct net_device *ndev,
union enetc_rx_bd *rxbd,
struct sk_buff *skb)
@@ -1001,7 +1000,6 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
}
}
-#endif
static void enetc_get_offloads(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd, struct sk_buff *skb)
@@ -1041,10 +1039,9 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
__vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
}
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
- if (priv->active_offloads & ENETC_F_RX_TSTAMP)
+ if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) &&
+ (priv->active_offloads & ENETC_F_RX_TSTAMP))
enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
-#endif
}
/* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
@@ -2305,12 +2302,11 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
priv->ndev->name, i);
- err = request_irq(irq, enetc_msix, 0, v->name, v);
+ err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v);
if (err) {
dev_err(priv->dev, "request_irq() failed!\n");
goto irq_err;
}
- disable_irq(irq);
v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
@@ -2882,7 +2878,6 @@ void enetc_set_features(struct net_device *ndev, netdev_features_t features)
}
EXPORT_SYMBOL_GPL(enetc_set_features);
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -2951,17 +2946,17 @@ static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
-#endif
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
- if (cmd == SIOCSHWTSTAMP)
- return enetc_hwtstamp_set(ndev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return enetc_hwtstamp_get(ndev, rq);
-#endif
+
+ if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) {
+ if (cmd == SIOCSHWTSTAMP)
+ return enetc_hwtstamp_set(ndev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return enetc_hwtstamp_get(ndev, rq);
+ }
if (!priv->phylink)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index a9c2ff22431c..97524dfa234c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -184,10 +184,9 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
{
int hw_idx = i;
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
- if (rx_ring->ext_en)
+ if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
hw_idx = 2 * i;
-#endif
+
return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
}
@@ -199,10 +198,8 @@ static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
new_rxbd++;
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
- if (rx_ring->ext_en)
+ if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
new_rxbd++;
-#endif
if (unlikely(++new_index == rx_ring->bd_count)) {
new_rxbd = rx_ring->bd_base;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 5e684b23c5f5..2563eb8ac7b6 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -849,28 +849,26 @@ static int enetc_get_ts_info(struct net_device *ndev,
if (phc_idx) {
info->phc_index = *phc_idx;
symbol_put(enetc_phc_index);
- } else {
- info->phc_index = -1;
}
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) {
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
+
+ return 0;
+ }
+
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON) |
(1 << HWTSTAMP_TX_ONESTEP_SYNC);
+
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
-#else
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
-#endif
+
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a923cb95cdc6..acbb627d51bf 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2775,15 +2775,11 @@ static int fec_enet_get_ts_info(struct net_device *ndev,
if (fep->bufdesc_ex) {
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (fep->ptp_clock)
info->phc_index = ptp_clock_index(fep->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -4606,7 +4602,7 @@ fec_drv_remove(struct platform_device *pdev)
free_netdev(ndev);
}
-static int __maybe_unused fec_suspend(struct device *dev)
+static int fec_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4659,7 +4655,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused fec_resume(struct device *dev)
+static int fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4714,7 +4710,7 @@ failed_clk:
return ret;
}
-static int __maybe_unused fec_runtime_suspend(struct device *dev)
+static int fec_runtime_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4725,7 +4721,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused fec_runtime_resume(struct device *dev)
+static int fec_runtime_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4746,14 +4742,14 @@ failed_clk_ipg:
}
static const struct dev_pm_ops fec_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
- SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+ RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
};
static struct platform_driver fec_driver = {
.driver = {
.name = DRIVER_NAME,
- .pm = &fec_pm_ops,
+ .pm = pm_ptr(&fec_pm_ops),
.of_match_table = fec_dt_ids,
.suppress_bind_attrs = true,
},
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 2e4f3e1782a2..4cffda363a14 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -91,6 +91,30 @@
#define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL
/**
+ * fec_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc: the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static u64 fec_ptp_read(const struct cyclecounter *cc)
+{
+ struct fec_enet_private *fep =
+ container_of(cc, struct fec_enet_private, cc);
+ u32 tempval;
+
+ tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+ tempval |= FEC_T_CTRL_CAPTURE;
+ writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+ udelay(1);
+
+ return readl(fep->hwp + FEC_ATIME);
+}
+
+/**
* fec_ptp_enable_pps
* @fep: the fec_enet_private structure handle
* @enable: enable the channel pps output
@@ -136,7 +160,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = fep->cc.read(&fep->cc);
+ tempval = fec_ptp_read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
@@ -211,13 +235,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep)
timecounter_read(&fep->tc);
/* Get the current ptp hardware time counter */
- temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
- temp_val |= FEC_T_CTRL_CAPTURE;
- writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
- if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
- udelay(1);
-
- ptp_hc = readl(fep->hwp + FEC_ATIME);
+ ptp_hc = fec_ptp_read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
curr_time = timecounter_cyc2time(&fep->tc, ptp_hc);
@@ -272,30 +290,6 @@ static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer)
}
/**
- * fec_ptp_read - read raw cycle counter (to be used by time counter)
- * @cc: the cyclecounter structure
- *
- * this function reads the cyclecounter registers and is called by the
- * cyclecounter structure used to construct a ns counter from the
- * arbitrary fixed point registers
- */
-static u64 fec_ptp_read(const struct cyclecounter *cc)
-{
- struct fec_enet_private *fep =
- container_of(cc, struct fec_enet_private, cc);
- u32 tempval;
-
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
- udelay(1);
-
- return readl(fep->hwp + FEC_ATIME);
-}
-
-/**
* fec_ptp_start_cyclecounter - create the cycle counter from hw
* @ndev: network device
*
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 406e75e9e5ea..f17a4e511510 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1748,7 +1748,7 @@ static int fman_port_probe(struct platform_device *of_dev)
struct resource res;
struct resource *dev_res;
u32 val;
- int err = 0, lenp;
+ int err = 0;
enum fman_port_type port_type;
u16 port_speed;
u8 port_id;
@@ -1795,7 +1795,7 @@ static int fman_port_probe(struct platform_device *of_dev)
if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
port_type = FMAN_PORT_TYPE_TX;
port_speed = 1000;
- if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
+ if (of_property_read_bool(port_node, "fsl,fman-10g-port"))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
@@ -1808,7 +1808,7 @@ static int fman_port_probe(struct platform_device *of_dev)
} else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
port_type = FMAN_PORT_TYPE_RX;
port_speed = 1000;
- if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
+ if (of_property_read_bool(port_node, "fsl,fman-10g-port"))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig
index 7f20840fde07..57013bf14d7c 100644
--- a/drivers/net/ethernet/freescale/fs_enet/Kconfig
+++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig
@@ -3,7 +3,7 @@ config FS_ENET
tristate "Freescale Ethernet Driver"
depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
select MII
- select PHYLIB
+ select PHYLINK
config FS_ENET_MPC5121_FEC
def_bool y if (FS_ENET && PPC_MPC512x)
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index cf392faa6105..3425c4a6abcb 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
*
@@ -9,10 +10,6 @@
*
* Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
* and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -29,17 +26,18 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/property.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/pgtable.h>
+#include <linux/rtnetlink.h>
#include <linux/vmalloc.h>
#include <asm/irq.h>
@@ -72,6 +70,13 @@ static void fs_set_multicast_list(struct net_device *dev)
(*fep->ops->set_multicast_list)(dev);
}
+static int fs_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ return phylink_mii_ioctl(fep->phylink, ifr, cmd);
+}
+
static void skb_align(struct sk_buff *skb, int align)
{
int off = ((unsigned long)skb->data) & (align - 1);
@@ -84,15 +89,14 @@ static void skb_align(struct sk_buff *skb, int align)
static int fs_enet_napi(struct napi_struct *napi, int budget)
{
struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
- struct net_device *dev = fep->ndev;
const struct fs_platform_info *fpi = fep->fpi;
- cbd_t __iomem *bdp;
+ struct net_device *dev = fep->ndev;
+ int curidx, dirtyidx, received = 0;
+ int do_wake = 0, do_restart = 0;
+ int tx_left = TX_RING_SIZE;
struct sk_buff *skb, *skbn;
- int received = 0;
+ cbd_t __iomem *bdp;
u16 pkt_len, sc;
- int curidx;
- int dirtyidx, do_wake, do_restart;
- int tx_left = TX_RING_SIZE;
spin_lock(&fep->tx_lock);
bdp = fep->dirty_tx;
@@ -100,7 +104,6 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
/* clear status bits for napi*/
(*fep->ops->napi_clear_event)(dev);
- do_wake = do_restart = 0;
while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) {
dirtyidx = bdp - fep->tx_bd_base;
@@ -109,12 +112,9 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
skb = fep->tx_skbuff[dirtyidx];
- /*
- * Check for errors.
- */
+ /* Check for errors. */
if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
-
if (sc & BD_ENET_TX_HB) /* No heartbeat */
dev->stats.tx_heartbeat_errors++;
if (sc & BD_ENET_TX_LC) /* Late collision */
@@ -130,16 +130,16 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
dev->stats.tx_errors++;
do_restart = 1;
}
- } else
+ } else {
dev->stats.tx_packets++;
+ }
if (sc & BD_ENET_TX_READY) {
dev_warn(fep->dev,
"HEY! Enet xmit interrupt and TX_READY.\n");
}
- /*
- * Deferred means some collisions occurred during transmit,
+ /* Deferred means some collisions occurred during transmit,
* but we eventually sent the packet OK.
*/
if (sc & BD_ENET_TX_DEF)
@@ -153,25 +153,20 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
CBDR_DATLEN(bdp), DMA_TO_DEVICE);
- /*
- * Free the sk buffer associated with this last transmit.
- */
+ /* Free the sk buffer associated with this last transmit. */
if (skb) {
dev_kfree_skb(skb);
fep->tx_skbuff[dirtyidx] = NULL;
}
- /*
- * Update pointer to next buffer descriptor to be transmitted.
+ /* Update pointer to next buffer descriptor to be transmitted.
*/
if ((sc & BD_ENET_TX_WRAP) == 0)
bdp++;
else
bdp = fep->tx_bd_base;
- /*
- * Since we have freed up a buffer, the ring is no longer
- * full.
+ /* Since we have freed up a buffer, the ring is no longer full.
*/
if (++fep->tx_free == MAX_SKB_FRAGS)
do_wake = 1;
@@ -188,8 +183,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
if (do_wake)
netif_wake_queue(dev);
- /*
- * First, grab all of the stats for the incoming packet.
+ /* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
bdp = fep->cur_rx;
@@ -198,16 +192,13 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
received < budget) {
curidx = bdp - fep->rx_bd_base;
- /*
- * Since we have allocated space to hold a complete frame,
+ /* Since we have allocated space to hold a complete frame,
* the last indicator should be set.
*/
if ((sc & BD_ENET_RX_LAST) == 0)
dev_warn(fep->dev, "rcv is not +last\n");
- /*
- * Check for errors.
- */
+ /* Check for errors. */
if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
dev->stats.rx_errors++;
@@ -228,9 +219,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
} else {
skb = fep->rx_skbuff[curidx];
- /*
- * Process the incoming frame.
- */
+ /* Process the incoming frame */
dev->stats.rx_packets++;
pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
dev->stats.rx_bytes += pkt_len + 4;
@@ -238,15 +227,15 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
if (pkt_len <= fpi->rx_copybreak) {
/* +2 to make IP header L1 cache aligned */
skbn = netdev_alloc_skb(dev, pkt_len + 2);
- if (skbn != NULL) {
+ if (skbn) {
skb_reserve(skbn, 2); /* align IP header */
- skb_copy_from_linear_data(skb,
- skbn->data, pkt_len);
+ skb_copy_from_linear_data(skb, skbn->data,
+ pkt_len);
swap(skb, skbn);
dma_sync_single_for_cpu(fep->dev,
- CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(pkt_len),
- DMA_FROM_DEVICE);
+ CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(pkt_len),
+ DMA_FROM_DEVICE);
}
} else {
skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
@@ -256,20 +245,18 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
skb_align(skbn, ENET_RX_ALIGN);
- dma_unmap_single(fep->dev,
- CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
- dma = dma_map_single(fep->dev,
- skbn->data,
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
+ dma = dma_map_single(fep->dev, skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
CBDW_BUFADDR(bdp, dma);
}
}
- if (skbn != NULL) {
+ if (skbn) {
skb_put(skb, pkt_len); /* Make room */
skb->protocol = eth_type_trans(skb, dev);
received++;
@@ -284,9 +271,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
CBDW_DATLEN(bdp, 0);
CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
- /*
- * Update BD pointer to next entry.
- */
+ /* Update BD pointer to next entry */
if ((sc & BD_ENET_RX_WRAP) == 0)
bdp++;
else
@@ -308,19 +293,16 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
return budget;
}
-/*
- * The interrupt handler.
+/* The interrupt handler.
* This is called from the MPC core interrupt.
*/
static irqreturn_t
fs_enet_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
+ u32 int_events, int_clr_events;
struct fs_enet_private *fep;
- u32 int_events;
- u32 int_clr_events;
- int nr, napi_ok;
- int handled;
+ int nr, napi_ok, handled;
fep = netdev_priv(dev);
@@ -342,12 +324,12 @@ fs_enet_interrupt(int irq, void *dev_id)
(*fep->ops->napi_disable)(dev);
(*fep->ops->clear_int_events)(dev, fep->ev_napi);
- /* NOTE: it is possible for FCCs in NAPI mode */
- /* to submit a spurious interrupt while in poll */
+ /* NOTE: it is possible for FCCs in NAPI mode
+ * to submit a spurious interrupt while in poll
+ */
if (napi_ok)
__napi_schedule(&fep->napi);
}
-
}
handled = nr > 0;
@@ -357,45 +339,40 @@ fs_enet_interrupt(int irq, void *dev_id)
void fs_init_bds(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- cbd_t __iomem *bdp;
struct sk_buff *skb;
+ cbd_t __iomem *bdp;
int i;
fs_cleanup_bds(dev);
- fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->dirty_tx = fep->tx_bd_base;
+ fep->cur_tx = fep->tx_bd_base;
fep->tx_free = fep->tx_ring;
fep->cur_rx = fep->rx_bd_base;
- /*
- * Initialize the receive buffer descriptors.
- */
+ /* Initialize the receive buffer descriptors */
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
- if (skb == NULL)
+ if (!skb)
break;
skb_align(skb, ENET_RX_ALIGN);
fep->rx_skbuff[i] = skb;
- CBDW_BUFADDR(bdp,
- dma_map_single(fep->dev, skb->data,
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE));
+ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skb->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
CBDW_DATLEN(bdp, 0); /* zero */
CBDW_SC(bdp, BD_ENET_RX_EMPTY |
((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
}
- /*
- * if we failed, fillup remainder
- */
+
+ /* if we failed, fillup remainder */
for (; i < fep->rx_ring; i++, bdp++) {
fep->rx_skbuff[i] = NULL;
CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
}
- /*
- * ...and the same for transmit.
- */
+ /* ...and the same for transmit. */
for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
fep->tx_skbuff[i] = NULL;
CBDW_BUFADDR(bdp, 0);
@@ -411,32 +388,30 @@ void fs_cleanup_bds(struct net_device *dev)
cbd_t __iomem *bdp;
int i;
- /*
- * Reset SKB transmit buffers.
- */
+ /* Reset SKB transmit buffers. */
for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
- if ((skb = fep->tx_skbuff[i]) == NULL)
+ skb = fep->tx_skbuff[i];
+ if (!skb)
continue;
/* unmap */
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- skb->len, DMA_TO_DEVICE);
+ skb->len, DMA_TO_DEVICE);
fep->tx_skbuff[i] = NULL;
dev_kfree_skb(skb);
}
- /*
- * Reset SKB receive buffers
- */
+ /* Reset SKB receive buffers */
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
- if ((skb = fep->rx_skbuff[i]) == NULL)
+ skb = fep->rx_skbuff[i];
+ if (!skb)
continue;
/* unmap */
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
fep->rx_skbuff[i] = NULL;
@@ -444,12 +419,8 @@ void fs_cleanup_bds(struct net_device *dev)
}
}
-/**********************************************************************************/
-
#ifdef CONFIG_FS_ENET_MPC5121_FEC
-/*
- * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
- */
+/* MPC5121 FEC requires 4-byte alignment for TX data buffer! */
static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
struct sk_buff *skb)
{
@@ -481,15 +452,12 @@ static netdev_tx_t
fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
+ int curidx, nr_frags, len;
cbd_t __iomem *bdp;
- int curidx;
- u16 sc;
- int nr_frags;
skb_frag_t *frag;
- int len;
+ u16 sc;
#ifdef CONFIG_FS_ENET_MPC5121_FEC
- int is_aligned = 1;
- int i;
+ int i, is_aligned = 1;
if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
is_aligned = 0;
@@ -507,8 +475,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!is_aligned) {
skb = tx_skb_align_workaround(dev, skb);
if (!skb) {
- /*
- * We have lost packet due to memory allocation error
+ /* We have lost packet due to memory allocation error
* in tx_skb_align_workaround(). Hopefully original
* skb is still valid, so try transmit it later.
*/
@@ -519,9 +486,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock(&fep->tx_lock);
- /*
- * Fill in a Tx ring entry
- */
+ /* Fill in a Tx ring entry */
bdp = fep->cur_tx;
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -529,8 +494,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
spin_unlock(&fep->tx_lock);
- /*
- * Ooops. All transmit buffers are full. Bail out.
+ /* Ooops. All transmit buffers are full. Bail out.
* This should not happen, since the tx queue should be stopped.
*/
dev_warn(fep->dev, "tx queue full!.\n");
@@ -543,12 +507,12 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += len;
if (nr_frags)
len -= skb->data_len;
+
fep->tx_free -= nr_frags + 1;
- /*
- * Push the data cache so the CPM does not get stale memory data.
+ /* Push the data cache so the CPM does not get stale memory data.
*/
CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
- skb->data, len, DMA_TO_DEVICE));
+ skb->data, len, DMA_TO_DEVICE));
CBDW_DATLEN(bdp, len);
fep->mapped_as_page[curidx] = 0;
@@ -585,9 +549,11 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* note that while FEC does not have this bit
* it marks it as available for software use
- * yay for hw reuse :) */
+ * yay for hw reuse :)
+ */
if (skb->len <= 60)
sc |= BD_ENET_TX_PAD;
+
CBDC_SC(bdp, BD_ENET_TX_STATS);
CBDS_SC(bdp, sc);
@@ -599,6 +565,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
bdp++;
else
bdp = fep->tx_bd_base;
+
fep->cur_tx = bdp;
if (fep->tx_free < MAX_SKB_FRAGS)
@@ -623,15 +590,21 @@ static void fs_timeout_work(struct work_struct *work)
dev->stats.tx_errors++;
- spin_lock_irqsave(&fep->lock, flags);
+ /* In the event a timeout was detected, but the netdev is brought down
+ * shortly after, it no longer makes sense to try to recover from the
+ * timeout. netif_running() will return false when called from the
+ * .ndo_close() callback. Calling the following recovery code while
+ * called from .ndo_close() could deadlock on rtnl.
+ */
+ if (!netif_running(dev))
+ return;
- if (dev->flags & IFF_UP) {
- phy_stop(dev->phydev);
- (*fep->ops->stop)(dev);
- (*fep->ops->restart)(dev);
- }
+ rtnl_lock();
+ phylink_stop(fep->phylink);
+ phylink_start(fep->phylink);
+ rtnl_unlock();
- phy_start(dev->phydev);
+ spin_lock_irqsave(&fep->lock, flags);
wake = fep->tx_free >= MAX_SKB_FRAGS &&
!(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
spin_unlock_irqrestore(&fep->lock, flags);
@@ -647,82 +620,37 @@ static void fs_timeout(struct net_device *dev, unsigned int txqueue)
schedule_work(&fep->timeout_work);
}
-/*-----------------------------------------------------------------------------
- * generic link-change handler - should be sufficient for most cases
- *-----------------------------------------------------------------------------*/
-static void generic_adjust_link(struct net_device *dev)
+static void fs_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- int new_state = 0;
-
- if (phydev->link) {
- /* adjust to duplex mode */
- if (phydev->duplex != fep->oldduplex) {
- new_state = 1;
- fep->oldduplex = phydev->duplex;
- }
-
- if (phydev->speed != fep->oldspeed) {
- new_state = 1;
- fep->oldspeed = phydev->speed;
- }
-
- if (!fep->oldlink) {
- new_state = 1;
- fep->oldlink = 1;
- }
-
- if (new_state)
- fep->ops->restart(dev);
- } else if (fep->oldlink) {
- new_state = 1;
- fep->oldlink = 0;
- fep->oldspeed = 0;
- fep->oldduplex = -1;
- }
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct fs_enet_private *fep = netdev_priv(ndev);
+ unsigned long flags;
- if (new_state && netif_msg_link(fep))
- phy_print_status(phydev);
+ spin_lock_irqsave(&fep->lock, flags);
+ fep->ops->restart(ndev, interface, speed, duplex);
+ spin_unlock_irqrestore(&fep->lock, flags);
}
-
-static void fs_adjust_link(struct net_device *dev)
+static void fs_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
{
- struct fs_enet_private *fep = netdev_priv(dev);
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct fs_enet_private *fep = netdev_priv(ndev);
unsigned long flags;
spin_lock_irqsave(&fep->lock, flags);
-
- if(fep->ops->adjust_link)
- fep->ops->adjust_link(dev);
- else
- generic_adjust_link(dev);
-
+ fep->ops->stop(ndev);
spin_unlock_irqrestore(&fep->lock, flags);
}
-static int fs_init_phy(struct net_device *dev)
+static void fs_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct phy_device *phydev;
- phy_interface_t iface;
-
- fep->oldlink = 0;
- fep->oldspeed = 0;
- fep->oldduplex = -1;
-
- iface = fep->fpi->use_rmii ?
- PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII;
-
- phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
- iface);
- if (!phydev) {
- dev_err(&dev->dev, "Could not attach to PHY\n");
- return -ENODEV;
- }
-
- return 0;
+ /* Nothing to do */
}
static int fs_enet_open(struct net_device *dev)
@@ -731,8 +659,9 @@ static int fs_enet_open(struct net_device *dev)
int r;
int err;
- /* to initialize the fep->cur_rx,... */
- /* not doing this, will cause a crash in fs_enet_napi */
+ /* to initialize the fep->cur_rx,...
+ * not doing this, will cause a crash in fs_enet_napi
+ */
fs_init_bds(fep->ndev);
napi_enable(&fep->napi);
@@ -746,13 +675,13 @@ static int fs_enet_open(struct net_device *dev)
return -EINVAL;
}
- err = fs_init_phy(dev);
+ err = phylink_of_phy_connect(fep->phylink, fep->dev->of_node, 0);
if (err) {
free_irq(fep->interrupt, dev);
napi_disable(&fep->napi);
return err;
}
- phy_start(dev->phydev);
+ phylink_start(fep->phylink);
netif_start_queue(dev);
@@ -765,28 +694,25 @@ static int fs_enet_close(struct net_device *dev)
unsigned long flags;
netif_stop_queue(dev);
- netif_carrier_off(dev);
napi_disable(&fep->napi);
- cancel_work_sync(&fep->timeout_work);
- phy_stop(dev->phydev);
+ cancel_work(&fep->timeout_work);
+ phylink_stop(fep->phylink);
spin_lock_irqsave(&fep->lock, flags);
spin_lock(&fep->tx_lock);
(*fep->ops->stop)(dev);
spin_unlock(&fep->tx_lock);
spin_unlock_irqrestore(&fep->lock, flags);
+ phylink_disconnect_phy(fep->phylink);
/* release any irqs */
- phy_disconnect(dev->phydev);
free_irq(fep->interrupt, dev);
return 0;
}
-/*************************************************************************/
-
static void fs_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+ struct ethtool_drvinfo *info)
{
strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
@@ -799,7 +725,7 @@ static int fs_get_regs_len(struct net_device *dev)
}
static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
- void *p)
+ void *p)
{
struct fs_enet_private *fep = netdev_priv(dev);
unsigned long flags;
@@ -818,12 +744,14 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
static u32 fs_get_msglevel(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
+
return fep->msg_enable;
}
static void fs_set_msglevel(struct net_device *dev, u32 value)
{
struct fs_enet_private *fep = netdev_priv(dev);
+
fep->msg_enable = value;
}
@@ -865,6 +793,22 @@ static int fs_set_tunable(struct net_device *dev,
return ret;
}
+static int fs_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_set(fep->phylink, cmd);
+}
+
+static int fs_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_get(fep->phylink, cmd);
+}
+
static const struct ethtool_ops fs_ethtool_ops = {
.get_drvinfo = fs_get_drvinfo,
.get_regs_len = fs_get_regs_len,
@@ -874,14 +818,12 @@ static const struct ethtool_ops fs_ethtool_ops = {
.set_msglevel = fs_set_msglevel,
.get_regs = fs_get_regs,
.get_ts_info = ethtool_op_get_ts_info,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = fs_ethtool_get_link_ksettings,
+ .set_link_ksettings = fs_ethtool_set_link_ksettings,
.get_tunable = fs_get_tunable,
.set_tunable = fs_set_tunable,
};
-/**************************************************************************************/
-
#ifdef CONFIG_FS_ENET_HAS_FEC
#define IS_FEC(ops) ((ops) == &fs_fec_ops)
#else
@@ -894,7 +836,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
.ndo_start_xmit = fs_enet_start_xmit,
.ndo_tx_timeout = fs_timeout,
.ndo_set_rx_mode = fs_set_multicast_list,
- .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = fs_eth_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -902,17 +844,23 @@ static const struct net_device_ops fs_enet_netdev_ops = {
#endif
};
+static const struct phylink_mac_ops fs_enet_phylink_mac_ops = {
+ .mac_config = fs_mac_config,
+ .mac_link_down = fs_mac_link_down,
+ .mac_link_up = fs_mac_link_up,
+};
+
static int fs_enet_probe(struct platform_device *ofdev)
{
+ int privsize, len, ret = -ENODEV;
+ struct fs_platform_info *fpi;
+ struct fs_enet_private *fep;
+ phy_interface_t phy_mode;
const struct fs_ops *ops;
struct net_device *ndev;
- struct fs_enet_private *fep;
- struct fs_platform_info *fpi;
+ struct phylink *phylink;
const u32 *data;
struct clk *clk;
- int err;
- const char *phy_connection_type;
- int privsize, len, ret = -ENODEV;
ops = device_get_match_data(&ofdev->dev);
if (!ops)
@@ -930,51 +878,36 @@ static int fs_enet_probe(struct platform_device *ofdev)
fpi->cp_command = *data;
}
+ ret = of_get_phy_mode(ofdev->dev.of_node, &phy_mode);
+ if (ret) {
+ /* For compatibility, if the mode isn't specified in DT,
+ * assume MII
+ */
+ phy_mode = PHY_INTERFACE_MODE_MII;
+ }
+
fpi->rx_ring = RX_RING_SIZE;
fpi->tx_ring = TX_RING_SIZE;
fpi->rx_copybreak = 240;
fpi->napi_weight = 17;
- fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
- if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
- err = of_phy_register_fixed_link(ofdev->dev.of_node);
- if (err)
- goto out_free_fpi;
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- fpi->phy_node = of_node_get(ofdev->dev.of_node);
- }
-
- if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
- phy_connection_type = of_get_property(ofdev->dev.of_node,
- "phy-connection-type", NULL);
- if (phy_connection_type && !strcmp("rmii", phy_connection_type))
- fpi->use_rmii = 1;
- }
/* make clock lookup non-fatal (the driver is shared among platforms),
* but require enable to succeed when a clock was specified/found,
* keep a reference to the clock upon successful acquisition
*/
- clk = devm_clk_get(&ofdev->dev, "per");
- if (!IS_ERR(clk)) {
- ret = clk_prepare_enable(clk);
- if (ret)
- goto out_deregister_fixed_link;
-
- fpi->clk_per = clk;
- }
+ clk = devm_clk_get_optional_enabled(&ofdev->dev, "per");
+ if (IS_ERR(clk))
+ goto out_free_fpi;
privsize = sizeof(*fep) +
- sizeof(struct sk_buff **) *
+ sizeof(struct sk_buff **) *
(fpi->rx_ring + fpi->tx_ring) +
sizeof(char) * fpi->tx_ring;
ndev = alloc_etherdev(privsize);
if (!ndev) {
ret = -ENOMEM;
- goto out_put;
+ goto out_free_fpi;
}
SET_NETDEV_DEV(ndev, &ofdev->dev);
@@ -986,9 +919,29 @@ static int fs_enet_probe(struct platform_device *ofdev)
fep->fpi = fpi;
fep->ops = ops;
+ fep->phylink_config.dev = &ndev->dev;
+ fep->phylink_config.type = PHYLINK_NETDEV;
+ fep->phylink_config.mac_capabilities = MAC_10 | MAC_100;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ fep->phylink_config.supported_interfaces);
+
+ if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec"))
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ fep->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&fep->phylink_config, dev_fwnode(fep->dev),
+ phy_mode, &fs_enet_phylink_mac_ops);
+ if (IS_ERR(phylink)) {
+ ret = PTR_ERR(phylink);
+ goto out_free_dev;
+ }
+
+ fep->phylink = phylink;
+
ret = fep->ops->setup_data(ndev);
if (ret)
- goto out_free_dev;
+ goto out_phylink;
fep->rx_skbuff = (struct sk_buff **)&fep[1];
fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
@@ -1018,8 +971,6 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->ethtool_ops = &fs_ethtool_ops;
- netif_carrier_off(ndev);
-
ndev->features |= NETIF_F_SG;
ret = register_netdev(ndev);
@@ -1034,14 +985,10 @@ out_free_bd:
fep->ops->free_bd(ndev);
out_cleanup_data:
fep->ops->cleanup_data(ndev);
+out_phylink:
+ phylink_destroy(fep->phylink);
out_free_dev:
free_netdev(ndev);
-out_put:
- clk_disable_unprepare(fpi->clk_per);
-out_deregister_fixed_link:
- of_node_put(fpi->phy_node);
- if (of_phy_is_fixed_link(ofdev->dev.of_node))
- of_phy_deregister_fixed_link(ofdev->dev.of_node);
out_free_fpi:
kfree(fpi);
return ret;
@@ -1057,10 +1004,7 @@ static void fs_enet_remove(struct platform_device *ofdev)
fep->ops->free_bd(ndev);
fep->ops->cleanup_data(ndev);
dev_set_drvdata(fep->dev, NULL);
- of_node_put(fep->fpi->phy_node);
- clk_disable_unprepare(fep->fpi->clk_per);
- if (of_phy_is_fixed_link(ofdev->dev.of_node))
- of_phy_deregister_fixed_link(ofdev->dev.of_node);
+ phylink_destroy(fep->phylink);
free_netdev(ndev);
}
@@ -1114,9 +1058,9 @@ static struct platform_driver fs_enet_driver = {
#ifdef CONFIG_NET_POLL_CONTROLLER
static void fs_enet_netpoll(struct net_device *dev)
{
- disable_irq(dev->irq);
- fs_enet_interrupt(dev->irq, dev);
- enable_irq(dev->irq);
+ disable_irq(dev->irq);
+ fs_enet_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
}
#endif
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 21c07ac05225..36e4fcc29e36 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -3,11 +3,11 @@
#define FS_ENET_H
#include <linux/clk.h>
-#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_CPM1
@@ -77,8 +77,8 @@ struct fs_ops {
void (*free_bd)(struct net_device *dev);
void (*cleanup_data)(struct net_device *dev);
void (*set_multicast_list)(struct net_device *dev);
- void (*adjust_link)(struct net_device *dev);
- void (*restart)(struct net_device *dev);
+ void (*restart)(struct net_device *dev, phy_interface_t interface,
+ int speed, int duplex);
void (*stop)(struct net_device *dev);
void (*napi_clear_event)(struct net_device *dev);
void (*napi_enable)(struct net_device *dev);
@@ -93,14 +93,6 @@ struct fs_ops {
void (*tx_restart)(struct net_device *dev);
};
-struct phy_info {
- unsigned int id;
- const char *name;
- void (*startup) (struct net_device * dev);
- void (*shutdown) (struct net_device * dev);
- void (*ack_int) (struct net_device * dev);
-};
-
/* The FEC stores dest/src/type, data, and checksum for receive packets.
*/
#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
@@ -122,15 +114,9 @@ struct fs_platform_info {
u32 dpram_offset;
- struct device_node *phy_node;
-
int rx_ring, tx_ring; /* number of buffers on rx */
int rx_copybreak; /* limit we copy small frames */
int napi_weight; /* NAPI weight */
-
- int use_rmii; /* use RMII mode */
-
- struct clk *clk_per; /* 'per' clock for register access */
};
struct fs_enet_private {
@@ -154,14 +140,11 @@ struct fs_enet_private {
cbd_t __iomem *cur_rx;
cbd_t __iomem *cur_tx;
int tx_free;
- const struct phy_info *phy;
u32 msg_enable;
- struct mii_if_info mii_if;
- unsigned int last_mii_status;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
int interrupt;
- int oldduplex, oldspeed, oldlink; /* current settings */
-
/* event masks */
u32 ev_napi; /* mask of NAPI events */
u32 ev; /* event mask */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index e2ffac9eb2ad..be63293511d9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* FCC driver for Motorola MPC82xx (PQ2).
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -25,7 +22,6 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
@@ -239,7 +235,8 @@ static void set_multicast_list(struct net_device *dev)
set_promiscuous_mode(dev);
}
-static void restart(struct net_device *dev)
+static void restart(struct net_device *dev, phy_interface_t interface,
+ int speed, int duplex)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
@@ -363,8 +360,8 @@ static void restart(struct net_device *dev)
fs_init_bds(dev);
/* adjust to speed (for RMII mode) */
- if (fpi->use_rmii) {
- if (dev->phydev->speed == 100)
+ if (interface == PHY_INTERFACE_MODE_RMII) {
+ if (speed == SPEED_100)
C8(fcccp, fcc_gfemr, 0x20);
else
S8(fcccp, fcc_gfemr, 0x20);
@@ -386,11 +383,11 @@ static void restart(struct net_device *dev)
W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
- if (fpi->use_rmii)
+ if (interface == PHY_INTERFACE_MODE_RMII)
S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
/* adjust to duplex mode */
- if (dev->phydev->duplex)
+ if (duplex == DUPLEX_FULL)
S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
else
C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index cdc89d83cf07..f2ecd20027cf 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Freescale Ethernet controllers
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -26,7 +23,6 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
@@ -224,7 +220,8 @@ static void set_multicast_list(struct net_device *dev)
set_promiscuous_mode(dev);
}
-static void restart(struct net_device *dev)
+static void restart(struct net_device *dev, phy_interface_t interface,
+ int speed, int duplex)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
@@ -306,13 +303,13 @@ static void restart(struct net_device *dev)
* Only set MII/RMII mode - do not touch maximum frame length
* configured before.
*/
- FS(fecp, r_cntrl, fpi->use_rmii ?
- FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
+ FS(fecp, r_cntrl, interface == PHY_INTERFACE_MODE_RMII ?
+ FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
#endif
/*
* adjust to duplex mode
*/
- if (dev->phydev->duplex) {
+ if (duplex == DUPLEX_FULL) {
FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
} else {
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index a64cb6270515..6c97191649de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -25,7 +22,6 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
@@ -131,15 +127,14 @@ static int setup_data(struct net_device *dev)
static int allocate_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
+ struct fs_platform_info *fpi = fep->fpi;
- fep->ring_mem_addr = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) *
- sizeof(cbd_t), 8);
- if (IS_ERR_VALUE(fep->ring_mem_addr))
+ fpi->dpram_offset = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), 8);
+ if (IS_ERR_VALUE(fpi->dpram_offset))
return -ENOMEM;
- fep->ring_base = (void __iomem __force*)
- cpm_muram_addr(fep->ring_mem_addr);
+ fep->ring_base = cpm_muram_addr(fpi->dpram_offset);
return 0;
}
@@ -147,9 +142,10 @@ static int allocate_bd(struct net_device *dev)
static void free_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
if (fep->ring_base)
- cpm_muram_free(fep->ring_mem_addr);
+ cpm_muram_free(fpi->dpram_offset);
}
static void cleanup_data(struct net_device *dev)
@@ -230,7 +226,8 @@ static void set_multicast_list(struct net_device *dev)
* change. This only happens when switching between half and full
* duplex.
*/
-static void restart(struct net_device *dev)
+static void restart(struct net_device *dev, phy_interface_t interface,
+ int speed, int duplex)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
@@ -247,9 +244,9 @@ static void restart(struct net_device *dev)
__fs_out8((u8 __iomem *)ep + i, 0);
/* point to bds */
- W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
+ W16(ep, sen_genscc.scc_rbase, fpi->dpram_offset);
W16(ep, sen_genscc.scc_tbase,
- fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
+ fpi->dpram_offset + sizeof(cbd_t) * fpi->rx_ring);
/* Initialize function code registers for big-endian.
*/
@@ -341,7 +338,7 @@ static void restart(struct net_device *dev)
W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
/* Set full duplex mode if needed */
- if (dev->phydev->duplex)
+ if (duplex == DUPLEX_FULL)
S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
/* Restore multicast and promiscuous settings */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index f965a2329055..2e210a003558 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 7bb69727952a..93d91e8ad0de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
*
@@ -6,10 +7,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2baef59f741d..ecb1703ea150 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -754,6 +754,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
err = of_get_ethdev_address(np, dev);
+ if (err == -EPROBE_DEFER)
+ goto err_grp_init;
if (err) {
eth_hw_addr_random(dev);
dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index f581402ad740..a99b95c4bcfb 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1455,12 +1455,8 @@ static int gfar_get_ts_info(struct net_device *dev,
struct device_node *ptp_node;
struct ptp_qoriq *ptp = NULL;
- info->phc_index = -1;
-
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
@@ -1478,9 +1474,7 @@ static int gfar_get_ts_info(struct net_device *dev,
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c
index a7fbd4cd560a..ce97b76f9ae0 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_dev.c
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c
@@ -546,17 +546,14 @@ int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
unsigned int id0, enum fun_admin_bind_type type1,
unsigned int id1)
{
- struct {
- struct fun_admin_bind_req req;
- struct fun_admin_bind_entry entry[2];
- } cmd = {
- .req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
- sizeof(cmd)),
- .entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0),
- .entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1),
- };
+ DEFINE_RAW_FLEX(struct fun_admin_bind_req, cmd, entry, 2);
+
+ cmd->common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
+ __struct_size(cmd));
+ cmd->entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0);
+ cmd->entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1);
- return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0);
+ return fun_submit_admin_sync_cmd(fdev, &cmd->common, NULL, 0, 0);
}
EXPORT_SYMBOL_GPL(fun_bind);
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index 7f081e6e8c87..ba83dbf4ed22 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -1042,12 +1042,9 @@ static int fun_set_rxfh(struct net_device *netdev,
static int fun_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *info)
{
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
+ info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 84ac004d3953..301fa1ea4f51 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -784,6 +784,8 @@ struct gve_priv {
u32 adminq_verify_driver_compatibility_cnt;
u32 adminq_query_flow_rules_cnt;
u32 adminq_cfg_flow_rule_cnt;
+ u32 adminq_cfg_rss_cnt;
+ u32 adminq_query_rss_cnt;
/* Global stats */
u32 interface_up_cnt; /* count of times interface turned up since last reset */
@@ -831,6 +833,9 @@ struct gve_priv {
u32 num_flow_rules;
struct gve_flow_rules_cache flow_rules_cache;
+
+ u16 rss_key_size;
+ u16 rss_lut_size;
};
enum gve_service_task_flags_bit {
@@ -1148,7 +1153,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
int idx);
void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_alloc_rings_cfg *cfg);
-int gve_rx_alloc_rings(struct gve_priv *priv);
int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_gqi(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index c5bbc1b7524e..e44e8b139633 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -45,6 +45,7 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
+ struct gve_device_option_rss_config **dev_op_rss_config,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
@@ -207,6 +208,23 @@ void gve_parse_device_option(struct gve_priv *priv,
"Flow Steering");
*dev_op_flow_steering = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_RSS_CONFIG:
+ if (option_length < sizeof(**dev_op_rss_config) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "RSS config",
+ (int)sizeof(**dev_op_rss_config),
+ GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_rss_config))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "RSS config");
+ *dev_op_rss_config = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -227,6 +245,7 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
+ struct gve_device_option_rss_config **dev_op_rss_config,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
@@ -249,7 +268,8 @@ gve_process_device_options(struct gve_priv *priv,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
dev_op_dqo_qpl, dev_op_buffer_sizes,
- dev_op_flow_steering, dev_op_modify_ring);
+ dev_op_flow_steering, dev_op_rss_config,
+ dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -289,6 +309,8 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_get_ptype_map_cnt = 0;
priv->adminq_query_flow_rules_cnt = 0;
priv->adminq_cfg_flow_rule_cnt = 0;
+ priv->adminq_cfg_rss_cnt = 0;
+ priv->adminq_query_rss_cnt = 0;
/* Setup Admin queue with the device */
if (priv->pdev->revision < 0x1) {
@@ -534,6 +556,12 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
case GVE_ADMINQ_CONFIGURE_FLOW_RULE:
priv->adminq_cfg_flow_rule_cnt++;
break;
+ case GVE_ADMINQ_CONFIGURE_RSS:
+ priv->adminq_cfg_rss_cnt++;
+ break;
+ case GVE_ADMINQ_QUERY_RSS:
+ priv->adminq_query_rss_cnt++;
+ break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
}
@@ -867,6 +895,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
*dev_op_buffer_sizes,
const struct gve_device_option_flow_steering
*dev_op_flow_steering,
+ const struct gve_device_option_rss_config
+ *dev_op_rss_config,
const struct gve_device_option_modify_ring
*dev_op_modify_ring)
{
@@ -931,6 +961,14 @@ static void gve_enable_supported_features(struct gve_priv *priv,
priv->max_flow_rules);
}
}
+
+ if (dev_op_rss_config &&
+ (supported_features_mask & GVE_SUP_RSS_CONFIG_MASK)) {
+ priv->rss_key_size =
+ be16_to_cpu(dev_op_rss_config->hash_key_size);
+ priv->rss_lut_size =
+ be16_to_cpu(dev_op_rss_config->hash_lut_size);
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
@@ -939,6 +977,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
+ struct gve_device_option_rss_config *dev_op_rss_config = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
@@ -973,6 +1012,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
&dev_op_jumbo_frames, &dev_op_dqo_qpl,
&dev_op_buffer_sizes,
&dev_op_flow_steering,
+ &dev_op_rss_config,
&dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -1035,7 +1075,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
dev_op_buffer_sizes, dev_op_flow_steering,
- dev_op_modify_ring);
+ dev_op_rss_config, dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
@@ -1248,6 +1288,81 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv)
return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
}
+int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
+{
+ dma_addr_t lut_bus = 0, key_bus = 0;
+ u16 key_size = 0, lut_size = 0;
+ union gve_adminq_command cmd;
+ __be32 *lut = NULL;
+ u8 hash_alg = 0;
+ u8 *key = NULL;
+ int err = 0;
+ u16 i;
+
+ switch (rxfh->hfunc) {
+ case ETH_RSS_HASH_NO_CHANGE:
+ break;
+ case ETH_RSS_HASH_TOP:
+ hash_alg = ETH_RSS_HASH_TOP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (rxfh->indir) {
+ lut_size = priv->rss_lut_size;
+ lut = dma_alloc_coherent(&priv->pdev->dev,
+ lut_size * sizeof(*lut),
+ &lut_bus, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->rss_lut_size; i++)
+ lut[i] = cpu_to_be32(rxfh->indir[i]);
+ }
+
+ if (rxfh->key) {
+ key_size = priv->rss_key_size;
+ key = dma_alloc_coherent(&priv->pdev->dev,
+ key_size, &key_bus, GFP_KERNEL);
+ if (!key) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(key, rxfh->key, key_size);
+ }
+
+ /* Zero-valued fields in the cmd.configure_rss instruct the device to
+ * not update those fields.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_RSS);
+ cmd.configure_rss = (struct gve_adminq_configure_rss) {
+ .hash_types = cpu_to_be16(BIT(GVE_RSS_HASH_TCPV4) |
+ BIT(GVE_RSS_HASH_UDPV4) |
+ BIT(GVE_RSS_HASH_TCPV6) |
+ BIT(GVE_RSS_HASH_UDPV6)),
+ .hash_alg = hash_alg,
+ .hash_key_size = cpu_to_be16(key_size),
+ .hash_lut_size = cpu_to_be16(lut_size),
+ .hash_key_addr = cpu_to_be64(key_bus),
+ .hash_lut_addr = cpu_to_be64(lut_bus),
+ };
+
+ err = gve_adminq_execute_cmd(priv, &cmd);
+
+out:
+ if (lut)
+ dma_free_coherent(&priv->pdev->dev,
+ lut_size * sizeof(*lut),
+ lut, lut_bus);
+ if (key)
+ dma_free_coherent(&priv->pdev->dev,
+ key_size, key, key_bus);
+ return err;
+}
+
/* In the dma memory that the driver allocated for the device to query the flow rules, the device
* will first write it with a struct of gve_query_flow_rules_descriptor. Next to it, the device
* will write an array of rules or rule ids with the count that specified in the descriptor.
@@ -1325,3 +1440,66 @@ out:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
return err;
}
+
+static int gve_adminq_process_rss_query(struct gve_priv *priv,
+ struct gve_query_rss_descriptor *descriptor,
+ struct ethtool_rxfh_param *rxfh)
+{
+ u32 total_memory_length;
+ u16 hash_lut_length;
+ void *rss_info_addr;
+ __be32 *lut;
+ u16 i;
+
+ total_memory_length = be32_to_cpu(descriptor->total_length);
+ hash_lut_length = priv->rss_lut_size * sizeof(*rxfh->indir);
+
+ if (sizeof(*descriptor) + priv->rss_key_size + hash_lut_length != total_memory_length) {
+ dev_err(&priv->dev->dev,
+ "rss query desc from device has invalid length parameter.\n");
+ return -EINVAL;
+ }
+
+ rxfh->hfunc = descriptor->hash_alg;
+
+ rss_info_addr = (void *)(descriptor + 1);
+ if (rxfh->key)
+ memcpy(rxfh->key, rss_info_addr, priv->rss_key_size);
+
+ rss_info_addr += priv->rss_key_size;
+ lut = (__be32 *)rss_info_addr;
+ if (rxfh->indir) {
+ for (i = 0; i < priv->rss_lut_size; i++)
+ rxfh->indir[i] = be32_to_cpu(lut[i]);
+ }
+
+ return 0;
+}
+
+int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_query_rss_descriptor *descriptor;
+ union gve_adminq_command cmd;
+ dma_addr_t descriptor_bus;
+ int err = 0;
+
+ descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, &descriptor_bus);
+ if (!descriptor)
+ return -ENOMEM;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_QUERY_RSS);
+ cmd.query_rss = (struct gve_adminq_query_rss) {
+ .available_length = cpu_to_be64(GVE_ADMINQ_BUFFER_SIZE),
+ .rss_descriptor_addr = cpu_to_be64(descriptor_bus),
+ };
+ err = gve_adminq_execute_cmd(priv, &cmd);
+ if (err)
+ goto out;
+
+ err = gve_adminq_process_rss_query(priv, descriptor, rxfh);
+
+out:
+ dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
+ return err;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index ed1370c9b197..863683de9694 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -20,12 +20,14 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_DESTROY_TX_QUEUE = 0x7,
GVE_ADMINQ_DESTROY_RX_QUEUE = 0x8,
GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9,
+ GVE_ADMINQ_CONFIGURE_RSS = 0xA,
GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB,
GVE_ADMINQ_REPORT_STATS = 0xC,
GVE_ADMINQ_REPORT_LINK_SPEED = 0xD,
GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
GVE_ADMINQ_QUERY_FLOW_RULES = 0x10,
+ GVE_ADMINQ_QUERY_RSS = 0x12,
/* For commands that are larger than 56 bytes */
GVE_ADMINQ_EXTENDED_COMMAND = 0xFF,
@@ -164,6 +166,14 @@ struct gve_device_option_flow_steering {
static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
+struct gve_device_option_rss_config {
+ __be32 supported_features_mask;
+ __be16 hash_key_size;
+ __be16 hash_lut_size;
+};
+
+static_assert(sizeof(struct gve_device_option_rss_config) == 8);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -182,6 +192,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
GVE_DEV_OPT_ID_FLOW_STEERING = 0xb,
+ GVE_DEV_OPT_ID_RSS_CONFIG = 0xe,
};
enum gve_dev_opt_req_feat_mask {
@@ -194,6 +205,7 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG = 0x0,
};
enum gve_sup_feature_mask {
@@ -201,6 +213,7 @@ enum gve_sup_feature_mask {
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
GVE_SUP_FLOW_STEERING_MASK = 1 << 5,
+ GVE_SUP_RSS_CONFIG_MASK = 1 << 7,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -214,6 +227,7 @@ enum gve_driver_capbility {
gve_driver_capability_dqo_rda = 3,
gve_driver_capability_alt_miss_compl = 4,
gve_driver_capability_flexible_buffer_size = 5,
+ gve_driver_capability_flexible_rss_size = 6,
};
#define GVE_CAP1(a) BIT((int)a)
@@ -226,7 +240,8 @@ enum gve_driver_capbility {
GVE_CAP1(gve_driver_capability_gqi_rda) | \
GVE_CAP1(gve_driver_capability_dqo_rda) | \
GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
- GVE_CAP1(gve_driver_capability_flexible_buffer_size))
+ GVE_CAP1(gve_driver_capability_flexible_buffer_size) | \
+ GVE_CAP1(gve_driver_capability_flexible_rss_size))
#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
@@ -509,6 +524,44 @@ struct gve_adminq_query_flow_rules {
static_assert(sizeof(struct gve_adminq_query_flow_rules) == 24);
+enum gve_rss_hash_type {
+ GVE_RSS_HASH_IPV4,
+ GVE_RSS_HASH_TCPV4,
+ GVE_RSS_HASH_IPV6,
+ GVE_RSS_HASH_IPV6_EX,
+ GVE_RSS_HASH_TCPV6,
+ GVE_RSS_HASH_TCPV6_EX,
+ GVE_RSS_HASH_UDPV4,
+ GVE_RSS_HASH_UDPV6,
+ GVE_RSS_HASH_UDPV6_EX,
+};
+
+struct gve_adminq_configure_rss {
+ __be16 hash_types;
+ u8 hash_alg;
+ u8 reserved;
+ __be16 hash_key_size;
+ __be16 hash_lut_size;
+ __be64 hash_key_addr;
+ __be64 hash_lut_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_configure_rss) == 24);
+
+struct gve_query_rss_descriptor {
+ __be32 total_length;
+ __be16 hash_types;
+ u8 hash_alg;
+ u8 reserved;
+};
+
+struct gve_adminq_query_rss {
+ __be64 available_length;
+ __be64 rss_descriptor_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_query_rss) == 16);
+
union gve_adminq_command {
struct {
__be32 opcode;
@@ -530,6 +583,8 @@ union gve_adminq_command {
struct gve_adminq_verify_driver_compatibility
verify_driver_compatibility;
struct gve_adminq_query_flow_rules query_flow_rules;
+ struct gve_adminq_configure_rss configure_rss;
+ struct gve_adminq_query_rss query_rss;
struct gve_adminq_extended_command extended_command;
};
};
@@ -568,6 +623,8 @@ int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule
int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc);
int gve_adminq_reset_flow_rules(struct gve_priv *priv);
int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
+int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
+int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
struct gve_ptype_lut;
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 5a8b490ab3ad..bdfc6e77b2af 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -75,7 +75,8 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
"adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
- "adminq_query_flow_rules", "adminq_cfg_flow_rule",
+ "adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt",
+ "adminq_query_rss_cnt",
};
static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
@@ -453,6 +454,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->adminq_get_ptype_map_cnt;
data[i++] = priv->adminq_query_flow_rules_cnt;
data[i++] = priv->adminq_cfg_flow_rule_cnt;
+ data[i++] = priv->adminq_cfg_rss_cnt;
+ data[i++] = priv->adminq_query_rss_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -838,6 +841,41 @@ static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u
return err;
}
+static u32 gve_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ return priv->rss_key_size;
+}
+
+static u32 gve_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ return priv->rss_lut_size;
+}
+
+static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (!priv->rss_key_size || !priv->rss_lut_size)
+ return -EOPNOTSUPP;
+
+ return gve_adminq_query_rss_config(priv, rxfh);
+}
+
+static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (!priv->rss_key_size || !priv->rss_lut_size)
+ return -EOPNOTSUPP;
+
+ return gve_adminq_configure_rss(priv, rxfh);
+}
+
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
@@ -851,6 +889,10 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_channels = gve_get_channels,
.set_rxnfc = gve_set_rxnfc,
.get_rxnfc = gve_get_rxnfc,
+ .get_rxfh_indir_size = gve_get_rxfh_indir_size,
+ .get_rxfh_key_size = gve_get_rxfh_key_size,
+ .get_rxfh = gve_get_rxfh,
+ .set_rxfh = gve_set_rxfh,
.get_link = ethtool_op_get_link,
.get_coalesce = gve_get_coalesce,
.set_coalesce = gve_set_coalesce,
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index b91e7a06b97f..beb815e5289b 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -947,6 +947,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->tx_coalesce_timer.function = tx_done;
priv->map = syscon_node_to_regmap(arg.np);
+ of_node_put(arg.np);
if (IS_ERR(priv->map)) {
dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
ret = PTR_ERR(priv->map);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index f75668c47935..58baac7103b3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -734,7 +734,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
return -ENODATA;
phy = get_phy_device(mdio, addr, is_c45);
- if (!phy || IS_ERR(phy))
+ if (IS_ERR_OR_NULL(phy))
return -EIO;
phy->irq = mdio->irq[addr];
@@ -933,6 +933,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
mac_cb->cpld_ctrl = NULL;
} else {
syscon = syscon_node_to_regmap(cpld_args.np);
+ of_node_put(cpld_args.np);
if (IS_ERR_OR_NULL(syscon)) {
dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
mac_cb->cpld_ctrl = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 6c33195a1168..bd86efd92a5a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -13,8 +13,9 @@
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>
-#include <net/ipv6.h>
+
#include <net/rtnetlink.h>
+
#include "hclge_cmd.h"
#include "hclge_dcb.h"
#include "hclge_main.h"
@@ -6290,15 +6291,15 @@ static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs,
static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule, u8 ip_proto)
{
- be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ fs->h_u.tcp_ip6_spec.ip6src);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ fs->m_u.tcp_ip6_spec.ip6src);
- be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ fs->h_u.tcp_ip6_spec.ip6dst);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ fs->m_u.tcp_ip6_spec.ip6dst);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
@@ -6319,15 +6320,15 @@ static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule)
{
- be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ fs->h_u.usr_ip6_spec.ip6src);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ fs->m_u.usr_ip6_spec.ip6src);
- be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ fs->h_u.usr_ip6_spec.ip6dst);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ fs->m_u.usr_ip6_spec.ip6dst);
rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
@@ -6756,21 +6757,19 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
struct ethtool_tcpip6_spec *spec,
struct ethtool_tcpip6_spec *spec_mask)
{
- cpu_to_be32_array(spec->ip6src,
- rule->tuples.src_ip, IPV6_SIZE);
- cpu_to_be32_array(spec->ip6dst,
- rule->tuples.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
+ ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
else
- cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
- IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6src,
+ rule->tuples_mask.src_ip);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
else
- cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
- IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip);
spec->tclass = rule->tuples.ip_tos;
spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
@@ -6789,19 +6788,19 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
struct ethtool_usrip6_spec *spec,
struct ethtool_usrip6_spec *spec_mask)
{
- cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
- cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
+ ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
else
- cpu_to_be32_array(spec_mask->ip6src,
- rule->tuples_mask.src_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6src,
+ rule->tuples_mask.src_ip);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
else
- cpu_to_be32_array(spec_mask->ip6dst,
- rule->tuples_mask.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip);
spec->tclass = rule->tuples.ip_tos;
spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
@@ -7019,7 +7018,7 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
} else {
int i;
- for (i = 0; i < IPV6_SIZE; i++) {
+ for (i = 0; i < IPV6_ADDR_WORDS; i++) {
tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
}
@@ -7274,14 +7273,14 @@ static int hclge_get_cls_key_ip(const struct flow_rule *flow,
struct flow_match_ipv6_addrs match;
flow_rule_match_ipv6_addrs(flow, &match);
- be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip,
- match.mask->src.s6_addr32, IPV6_SIZE);
- be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip,
- match.mask->dst.s6_addr32, IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ match.key->src.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ match.mask->src.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ match.key->dst.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ match.mask->dst.s6_addr32);
} else {
rule->unused_tuple |= BIT(INNER_SRC_IP);
rule->unused_tuple |= BIT(INNER_DST_IP);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index b5178b0f88b3..b9fc719880bb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -8,7 +8,9 @@
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/kfifo.h>
+
#include <net/devlink.h>
+#include <net/ipv6.h>
#include "hclge_cmd.h"
#include "hclge_ptp.h"
@@ -718,15 +720,15 @@ struct hclge_fd_cfg {
};
#define IPV4_INDEX 3
-#define IPV6_SIZE 4
+
struct hclge_fd_rule_tuples {
u8 src_mac[ETH_ALEN];
u8 dst_mac[ETH_ALEN];
/* Be compatible for ip address of both ipv4 and ipv6.
* For ipv4 address, we store it in src/dst_ip[3].
*/
- u32 src_ip[IPV6_SIZE];
- u32 dst_ip[IPV6_SIZE];
+ u32 src_ip[IPV6_ADDR_WORDS];
+ u32 dst_ip[IPV6_ADDR_WORDS];
u16 src_port;
u16 dst_port;
u16 vlan_tag1;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 5fff8ed388f8..5505caea88e9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -389,16 +389,12 @@ int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
}
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (hdev->ptp->clock)
info->phc_index = ptp_clock_index(hdev->ptp->clock);
- else
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 65b9dcd38137..6db415d8b917 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -134,17 +134,17 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
reg += hclgevf_reg_get_header(reg);
/* fetching per-VF registers values from VF PCIe register space */
- reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
+ reg_um = ARRAY_SIZE(cmdq_reg_addr_list);
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg);
for (i = 0; i < reg_um; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
+ reg_um = ARRAY_SIZE(common_reg_addr_list);
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg);
for (i = 0; i < reg_um; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
- reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
+ reg_um = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
for (i = 0; i < reg_um; i++)
@@ -153,7 +153,7 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
HCLGEVF_RING_REG_OFFSET * j);
}
- reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
+ reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg);
for (i = 0; i < reg_um; i++)
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index ed73707176c1..8a047145f0c5 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -575,6 +575,7 @@ static int hns_mdio_probe(struct platform_device *pdev)
MDIO_SC_RESET_ST;
}
}
+ of_node_put(reg_args.np);
} else {
dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret);
mdio_dev->subctrl_vbase = NULL;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 0304f03d4093..c559dd4291d3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -1471,7 +1471,6 @@ static void hinic_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
- char *p = (char *)data;
u16 i, j;
switch (stringset) {
@@ -1479,31 +1478,19 @@ static void hinic_get_strings(struct net_device *netdev,
memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
return;
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) {
- memcpy(p, hinic_function_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++)
+ ethtool_puts(&data, hinic_function_stats[i].name);
- for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) {
- memcpy(p, hinic_port_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++)
+ ethtool_puts(&data, hinic_port_stats[i].name);
- for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) {
- sprintf(p, hinic_tx_queue_stats[j].name, i);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < nic_dev->num_qps; i++)
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++)
+ ethtool_sprintf(&data, hinic_tx_queue_stats[j].name, i);
- for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) {
- sprintf(p, hinic_rx_queue_stats[j].name, i);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < nic_dev->num_qps; i++)
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++)
+ ethtool_sprintf(&data, hinic_rx_queue_stats[j].name, i);
return;
default:
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 1e29e5c9a2df..c41c3f1cc506 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3063,14 +3063,13 @@ static void ehea_shutdown_single_port(struct ehea_port *port)
static int ehea_setup_ports(struct ehea_adapter *adapter)
{
struct device_node *lhea_dn;
- struct device_node *eth_dn = NULL;
+ struct device_node *eth_dn;
const u32 *dn_log_port_id;
int i = 0;
lhea_dn = adapter->ofdev->dev.of_node;
- while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
-
+ for_each_child_of_node(lhea_dn, eth_dn) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
NULL);
if (!dn_log_port_id) {
@@ -3102,12 +3101,11 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
u32 logical_port_id)
{
struct device_node *lhea_dn;
- struct device_node *eth_dn = NULL;
+ struct device_node *eth_dn;
const u32 *dn_log_port_id;
lhea_dn = adapter->ofdev->dev.of_node;
- while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
-
+ for_each_child_of_node(lhea_dn, eth_dn) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
NULL);
if (dn_log_port_id)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index a19d098f2e2b..dac570f3c110 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -32,7 +32,6 @@
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/bitops.h>
-#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -96,11 +95,6 @@ MODULE_LICENSE("GPL");
static u32 busy_phy_map;
static DEFINE_MUTEX(emac_phy_map_lock);
-/* This is the wait queue used to wait on any event related to probe, that
- * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
- */
-static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
-
/* Having stable interface names is a doomed idea. However, it would be nice
* if we didn't have completely random interface names at boot too :-) It's
* just a matter of making everybody's life easier. Since we are doing
@@ -116,9 +110,6 @@ static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
#define EMAC_BOOT_LIST_SIZE 4
static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
-/* How long should I wait for dependent devices ? */
-#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
-
/* I don't want to litter system log with timeout errors
* when we have brain-damaged PHY.
*/
@@ -418,8 +409,8 @@ do_retry:
static void emac_hash_mc(struct emac_instance *dev)
{
+ u32 __iomem *gaht_base = emac_gaht_base(dev);
const int regs = EMAC_XAHT_REGS(dev);
- u32 *gaht_base = emac_gaht_base(dev);
u32 gaht_temp[EMAC_XAHT_MAX_REGS];
struct netdev_hw_addr *ha;
int i;
@@ -973,8 +964,6 @@ static void __emac_set_multicast_list(struct emac_instance *dev)
* we need is just to stop RX channel. This seems to work on all
* tested SoCs. --ebs
*
- * If we need the full reset, we might just trigger the workqueue
- * and do it async... a bit nasty but should work --BenH
*/
dev->mcast_pending = 0;
emac_rx_disable(dev);
@@ -1228,18 +1217,10 @@ static void emac_print_link_status(struct emac_instance *dev)
static int emac_open(struct net_device *ndev)
{
struct emac_instance *dev = netdev_priv(ndev);
- int err, i;
+ int i;
DBG(dev, "open" NL);
- /* Setup error IRQ handler */
- err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
- if (err) {
- printk(KERN_ERR "%s: failed to request IRQ %d\n",
- ndev->name, dev->emac_irq);
- return err;
- }
-
/* Allocate RX ring */
for (i = 0; i < NUM_RX_BUFF; ++i)
if (emac_alloc_rx_skb(dev, i)) {
@@ -1293,8 +1274,6 @@ static int emac_open(struct net_device *ndev)
return 0;
oom:
emac_clean_rx_ring(dev);
- free_irq(dev->emac_irq, dev);
-
return -ENOMEM;
}
@@ -1408,8 +1387,6 @@ static int emac_close(struct net_device *ndev)
emac_clean_tx_ring(dev);
emac_clean_rx_ring(dev);
- free_irq(dev->emac_irq, dev);
-
netif_carrier_off(ndev);
return 0;
@@ -2390,7 +2367,9 @@ static int emac_check_deps(struct emac_instance *dev,
if (deps[i].drvdata != NULL)
there++;
}
- return there == EMAC_DEP_COUNT;
+ if (there != EMAC_DEP_COUNT)
+ return -EPROBE_DEFER;
+ return 0;
}
static void emac_put_deps(struct emac_instance *dev)
@@ -2402,19 +2381,6 @@ static void emac_put_deps(struct emac_instance *dev)
platform_device_put(dev->tah_dev);
}
-static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
- void *data)
-{
- /* We are only intereted in device addition */
- if (action == BUS_NOTIFY_BOUND_DRIVER)
- wake_up_all(&emac_probe_wait);
- return 0;
-}
-
-static struct notifier_block emac_of_bus_notifier = {
- .notifier_call = emac_of_bus_notify
-};
-
static int emac_wait_deps(struct emac_instance *dev)
{
struct emac_depentry deps[EMAC_DEP_COUNT];
@@ -2431,18 +2397,13 @@ static int emac_wait_deps(struct emac_instance *dev)
deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
if (dev->blist && dev->blist > emac_boot_list)
deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
- bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
- wait_event_timeout(emac_probe_wait,
- emac_check_deps(dev, deps),
- EMAC_PROBE_DEP_TIMEOUT);
- bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
- err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
+ err = emac_check_deps(dev, deps);
for (i = 0; i < EMAC_DEP_COUNT; i++) {
of_node_put(deps[i].node);
if (err)
platform_device_put(deps[i].ofdev);
}
- if (err == 0) {
+ if (!err) {
dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
@@ -2456,22 +2417,21 @@ static int emac_wait_deps(struct emac_instance *dev)
static int emac_read_uint_prop(struct device_node *np, const char *name,
u32 *val, int fatal)
{
- int len;
- const u32 *prop = of_get_property(np, name, &len);
- if (prop == NULL || len < sizeof(u32)) {
+ int err;
+
+ err = of_property_read_u32(np, name, val);
+ if (err) {
if (fatal)
- printk(KERN_ERR "%pOF: missing %s property\n",
- np, name);
- return -ENODEV;
+ pr_err("%pOF: missing %s property", np, name);
+ return err;
}
- *val = *prop;
return 0;
}
static void emac_adjust_link(struct net_device *ndev)
{
struct emac_instance *dev = netdev_priv(ndev);
- struct phy_device *phy = dev->phy_dev;
+ struct phy_device *phy = ndev->phydev;
dev->phy.autoneg = phy->autoneg;
dev->phy.speed = phy->speed;
@@ -2522,22 +2482,20 @@ static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
{
struct net_device *ndev = phy->dev;
- struct emac_instance *dev = netdev_priv(ndev);
phy->autoneg = AUTONEG_ENABLE;
phy->advertising = advertise;
- return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
+ return emac_mdio_phy_start_aneg(phy, ndev->phydev);
}
static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
{
struct net_device *ndev = phy->dev;
- struct emac_instance *dev = netdev_priv(ndev);
phy->autoneg = AUTONEG_DISABLE;
phy->speed = speed;
phy->duplex = fd;
- return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
+ return emac_mdio_phy_start_aneg(phy, ndev->phydev);
}
static int emac_mdio_poll_link(struct mii_phy *phy)
@@ -2546,20 +2504,19 @@ static int emac_mdio_poll_link(struct mii_phy *phy)
struct emac_instance *dev = netdev_priv(ndev);
int res;
- res = phy_read_status(dev->phy_dev);
+ res = phy_read_status(ndev->phydev);
if (res) {
dev_err(&dev->ofdev->dev, "link update failed (%d).", res);
return ethtool_op_get_link(ndev);
}
- return dev->phy_dev->link;
+ return ndev->phydev->link;
}
static int emac_mdio_read_link(struct mii_phy *phy)
{
struct net_device *ndev = phy->dev;
- struct emac_instance *dev = netdev_priv(ndev);
- struct phy_device *phy_dev = dev->phy_dev;
+ struct phy_device *phy_dev = ndev->phydev;
int res;
res = phy_read_status(phy_dev);
@@ -2576,10 +2533,9 @@ static int emac_mdio_read_link(struct mii_phy *phy)
static int emac_mdio_init_phy(struct mii_phy *phy)
{
struct net_device *ndev = phy->dev;
- struct emac_instance *dev = netdev_priv(ndev);
- phy_start(dev->phy_dev);
- return phy_init_hw(dev->phy_dev);
+ phy_start(ndev->phydev);
+ return phy_init_hw(ndev->phydev);
}
static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
@@ -2593,6 +2549,7 @@ static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
static int emac_dt_mdio_probe(struct emac_instance *dev)
{
struct device_node *mii_np;
+ struct mii_bus *bus;
int res;
mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
@@ -2606,23 +2563,23 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
goto put_node;
}
- dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev);
- if (!dev->mii_bus) {
+ bus = devm_mdiobus_alloc(&dev->ofdev->dev);
+ if (!bus) {
res = -ENOMEM;
goto put_node;
}
- dev->mii_bus->priv = dev->ndev;
- dev->mii_bus->parent = dev->ndev->dev.parent;
- dev->mii_bus->name = "emac_mdio";
- dev->mii_bus->read = &emac_mii_bus_read;
- dev->mii_bus->write = &emac_mii_bus_write;
- dev->mii_bus->reset = &emac_mii_bus_reset;
- snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
- res = of_mdiobus_register(dev->mii_bus, mii_np);
+ bus->priv = dev->ndev;
+ bus->parent = dev->ndev->dev.parent;
+ bus->name = "emac_mdio";
+ bus->read = &emac_mii_bus_read;
+ bus->write = &emac_mii_bus_write;
+ bus->reset = &emac_mii_bus_reset;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
+ res = devm_of_mdiobus_register(&dev->ofdev->dev, bus, mii_np);
if (res) {
dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)",
- dev->mii_bus->name, res);
+ bus->name, res);
}
put_node:
@@ -2633,26 +2590,28 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
static int emac_dt_phy_connect(struct emac_instance *dev,
struct device_node *phy_handle)
{
+ struct phy_device *phy_dev;
+
dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
GFP_KERNEL);
if (!dev->phy.def)
return -ENOMEM;
- dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link,
- 0, dev->phy_mode);
- if (!dev->phy_dev) {
+ phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link, 0,
+ dev->phy_mode);
+ if (!phy_dev) {
dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n");
return -ENODEV;
}
- dev->phy.def->phy_id = dev->phy_dev->drv->phy_id;
- dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
- dev->phy.def->name = dev->phy_dev->drv->name;
+ dev->phy.def->phy_id = phy_dev->drv->phy_id;
+ dev->phy.def->phy_id_mask = phy_dev->drv->phy_id_mask;
+ dev->phy.def->name = phy_dev->drv->name;
dev->phy.def->ops = &emac_dt_mdio_phy_ops;
ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
- dev->phy_dev->supported);
- dev->phy.address = dev->phy_dev->mdio.addr;
- dev->phy.mode = dev->phy_dev->interface;
+ phy_dev->supported);
+ dev->phy.address = phy_dev->mdio.addr;
+ dev->phy.mode = phy_dev->interface;
return 0;
}
@@ -2668,8 +2627,6 @@ static int emac_dt_phy_probe(struct emac_instance *dev)
res = emac_dt_mdio_probe(dev);
if (!res) {
res = emac_dt_phy_connect(dev, phy_handle);
- if (res)
- mdiobus_unregister(dev->mii_bus);
}
}
@@ -2708,13 +2665,11 @@ static int emac_init_phy(struct emac_instance *dev)
return res;
res = of_phy_register_fixed_link(np);
- dev->phy_dev = of_phy_find_device(np);
- if (res || !dev->phy_dev) {
- mdiobus_unregister(dev->mii_bus);
+ ndev->phydev = of_phy_find_device(np);
+ if (res || !ndev->phydev)
return res ? res : -EINVAL;
- }
emac_adjust_link(dev->ndev);
- put_device(&dev->phy_dev->mdio.dev);
+ put_device(&ndev->phydev->mdio.dev);
}
return 0;
}
@@ -3053,7 +3008,7 @@ static int emac_probe(struct platform_device *ofdev)
/* Allocate our net_device structure */
err = -ENOMEM;
- ndev = alloc_etherdev(sizeof(struct emac_instance));
+ ndev = devm_alloc_etherdev(&ofdev->dev, sizeof(struct emac_instance));
if (!ndev)
goto err_gone;
@@ -3072,35 +3027,40 @@ static int emac_probe(struct platform_device *ofdev)
/* Init various config data based on device-tree */
err = emac_init_config(dev);
if (err)
- goto err_free;
+ goto err_gone;
- /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
+ /* Get interrupts. EMAC irq is mandatory */
dev->emac_irq = irq_of_parse_and_map(np, 0);
- dev->wol_irq = irq_of_parse_and_map(np, 1);
if (!dev->emac_irq) {
printk(KERN_ERR "%pOF: Can't map main interrupt\n", np);
err = -ENODEV;
- goto err_free;
+ goto err_gone;
+ }
+
+ /* Setup error IRQ handler */
+ err = devm_request_irq(&ofdev->dev, dev->emac_irq, emac_irq, 0, "EMAC",
+ dev);
+ if (err) {
+ dev_err_probe(&ofdev->dev, err, "failed to request IRQ %d",
+ dev->emac_irq);
+ goto err_gone;
}
+
ndev->irq = dev->emac_irq;
/* Map EMAC regs */
// TODO : platform_get_resource() and devm_ioremap_resource()
- dev->emacp = of_iomap(np, 0);
- if (dev->emacp == NULL) {
- printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
+ dev->emacp = devm_of_iomap(&ofdev->dev, np, 0, NULL);
+ if (!dev->emacp) {
+ dev_err(&ofdev->dev, "can't map device registers");
err = -ENOMEM;
- goto err_irq_unmap;
+ goto err_gone;
}
/* Wait for dependent devices */
err = emac_wait_deps(dev);
- if (err) {
- printk(KERN_ERR
- "%pOF: Timeout waiting for dependent devices\n", np);
- /* display more info about what's missing ? */
- goto err_reg_unmap;
- }
+ if (err)
+ goto err_gone;
dev->mal = platform_get_drvdata(dev->mal_dev);
if (dev->mdio_dev != NULL)
dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
@@ -3187,7 +3147,7 @@ static int emac_probe(struct platform_device *ofdev)
netif_carrier_off(ndev);
- err = register_netdev(ndev);
+ err = devm_register_netdev(&ofdev->dev, ndev);
if (err) {
printk(KERN_ERR "%pOF: failed to register net device (%d)!\n",
np, err);
@@ -3200,10 +3160,6 @@ static int emac_probe(struct platform_device *ofdev)
wmb();
platform_set_drvdata(ofdev, dev);
- /* There's a new kid in town ! Let's tell everybody */
- wake_up_all(&emac_probe_wait);
-
-
printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
ndev->name, dev->cell_index, np, ndev->dev_addr);
@@ -3232,24 +3188,9 @@ static int emac_probe(struct platform_device *ofdev)
mal_unregister_commac(dev->mal, &dev->commac);
err_rel_deps:
emac_put_deps(dev);
- err_reg_unmap:
- iounmap(dev->emacp);
- err_irq_unmap:
- if (dev->wol_irq)
- irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq)
- irq_dispose_mapping(dev->emac_irq);
- err_free:
- free_netdev(ndev);
err_gone:
- /* if we were on the bootlist, remove us as we won't show up and
- * wake up all waiters to notify them in case they were waiting
- * on us
- */
- if (blist) {
+ if (blist)
*blist = NULL;
- wake_up_all(&emac_probe_wait);
- }
return err;
}
@@ -3259,8 +3200,6 @@ static void emac_remove(struct platform_device *ofdev)
DBG(dev, "remove" NL);
- unregister_netdev(dev->ndev);
-
cancel_work_sync(&dev->reset_work);
if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
@@ -3270,26 +3209,11 @@ static void emac_remove(struct platform_device *ofdev)
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
zmii_detach(dev->zmii_dev, dev->zmii_port);
- if (dev->phy_dev)
- phy_disconnect(dev->phy_dev);
-
- if (dev->mii_bus)
- mdiobus_unregister(dev->mii_bus);
-
busy_phy_map &= ~(1 << dev->phy.address);
DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
mal_unregister_commac(dev->mal, &dev->commac);
emac_put_deps(dev);
-
- iounmap(dev->emacp);
-
- if (dev->wol_irq)
- irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq)
- irq_dispose_mapping(dev->emac_irq);
-
- free_netdev(dev->ndev);
}
/* XXX Features in here should be replaced by properties... */
@@ -3328,16 +3252,15 @@ static void __init emac_make_bootlist(void)
/* Collect EMACs */
while((np = of_find_all_nodes(np)) != NULL) {
- const u32 *idx;
+ u32 idx;
if (of_match_node(emac_match, np) == NULL)
continue;
if (of_property_read_bool(np, "unused"))
continue;
- idx = of_get_property(np, "cell-index", NULL);
- if (idx == NULL)
+ if (of_property_read_u32(np, "cell-index", &idx))
continue;
- cell_indices[i] = *idx;
+ cell_indices[i] = idx;
emac_boot_list[i++] = of_node_get(np);
if (i >= EMAC_BOOT_LIST_SIZE) {
of_node_put(np);
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 295516b07662..89fa1683ec3c 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -188,10 +188,6 @@ struct emac_instance {
struct emac_instance *mdio_instance;
struct mutex mdio_lock;
- /* Device-tree based phy configuration */
- struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
-
/* ZMII infos if any */
u32 zmii_ph;
u32 zmii_port;
@@ -400,7 +396,7 @@ static inline int emac_has_feature(struct emac_instance *dev,
((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \
((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1)))
-static inline u32 *emac_xaht_base(struct emac_instance *dev)
+static inline u32 __iomem *emac_xaht_base(struct emac_instance *dev)
{
struct emac_regs __iomem *p = dev->emacp;
int offset;
@@ -413,10 +409,10 @@ static inline u32 *emac_xaht_base(struct emac_instance *dev)
else
offset = offsetof(struct emac_regs, u0.emac4.iaht1);
- return (u32 *)((ptrdiff_t)p + offset);
+ return (u32 __iomem *)((__force ptrdiff_t)p + offset);
}
-static inline u32 *emac_gaht_base(struct emac_instance *dev)
+static inline u32 __iomem *emac_gaht_base(struct emac_instance *dev)
{
/* GAHT registers always come after an identical number of
* IAHT registers.
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 4c9d9badd698..b619a3ec245b 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -39,7 +39,8 @@
#include "ibmveth.h"
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
+ bool reuse);
static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
static struct kobj_type ktype_veth_pool;
@@ -226,6 +227,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
for (i = 0; i < count; ++i) {
union ibmveth_buf_desc desc;
+ free_index = pool->consumer_index;
+ index = pool->free_map[free_index];
+ skb = NULL;
+
+ BUG_ON(index == IBM_VETH_INVALID_MAP);
+
+ /* are we allocating a new buffer or recycling an old one */
+ if (pool->skbuff[index])
+ goto reuse;
+
skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
if (!skb) {
@@ -235,46 +246,46 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
break;
}
- free_index = pool->consumer_index;
- pool->consumer_index++;
- if (pool->consumer_index >= pool->size)
- pool->consumer_index = 0;
- index = pool->free_map[free_index];
-
- BUG_ON(index == IBM_VETH_INVALID_MAP);
- BUG_ON(pool->skbuff[index] != NULL);
-
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
pool->buff_size, DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
goto failure;
- pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
pool->dma_addr[index] = dma_addr;
pool->skbuff[index] = skb;
- correlator = ((u64)pool->index << 32) | index;
- *(u64 *)skb->data = correlator;
-
- desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
- desc.fields.address = dma_addr;
-
if (rx_flush) {
unsigned int len = min(pool->buff_size,
- adapter->netdev->mtu +
- IBMVETH_BUFF_OH);
+ adapter->netdev->mtu +
+ IBMVETH_BUFF_OH);
ibmveth_flush_buffer(skb->data, len);
}
+reuse:
+ dma_addr = pool->dma_addr[index];
+ desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
+ desc.fields.address = dma_addr;
+
+ correlator = ((u64)pool->index << 32) | index;
+ *(u64 *)pool->skbuff[index]->data = correlator;
+
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
desc.desc);
if (lpar_rc != H_SUCCESS) {
+ netdev_warn(adapter->netdev,
+ "%sadd_logical_lan failed %lu\n",
+ skb ? "" : "When recycling: ", lpar_rc);
goto failure;
- } else {
- buffers_added++;
- adapter->replenish_add_buff_success++;
}
+
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
+ pool->consumer_index++;
+ if (pool->consumer_index >= pool->size)
+ pool->consumer_index = 0;
+
+ buffers_added++;
+ adapter->replenish_add_buff_success++;
}
mb();
@@ -282,17 +293,13 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
return;
failure:
- pool->free_map[free_index] = index;
- pool->skbuff[index] = NULL;
- if (pool->consumer_index == 0)
- pool->consumer_index = pool->size - 1;
- else
- pool->consumer_index--;
- if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
+
+ if (dma_addr && !dma_mapping_error(&adapter->vdev->dev, dma_addr))
dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[index], pool->buff_size,
DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(pool->skbuff[index]);
+ pool->skbuff[index] = NULL;
adapter->replenish_add_buff_failure++;
mb();
@@ -365,7 +372,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
/* remove a buffer from a pool */
static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
- u64 correlator)
+ u64 correlator, bool reuse)
{
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
@@ -376,15 +383,23 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
BUG_ON(index >= adapter->rx_buff_pool[pool].size);
skb = adapter->rx_buff_pool[pool].skbuff[index];
-
BUG_ON(skb == NULL);
- adapter->rx_buff_pool[pool].skbuff[index] = NULL;
+ /* if we are going to reuse the buffer then keep the pointers around
+ * but mark index as available. replenish will see the skb pointer and
+ * assume it is to be recycled.
+ */
+ if (!reuse) {
+ /* remove the skb pointer to mark free. actual freeing is done
+ * by upper level networking after gro_recieve
+ */
+ adapter->rx_buff_pool[pool].skbuff[index] = NULL;
- dma_unmap_single(&adapter->vdev->dev,
- adapter->rx_buff_pool[pool].dma_addr[index],
- adapter->rx_buff_pool[pool].buff_size,
- DMA_FROM_DEVICE);
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->rx_buff_pool[pool].dma_addr[index],
+ adapter->rx_buff_pool[pool].buff_size,
+ DMA_FROM_DEVICE);
+ }
free_index = adapter->rx_buff_pool[pool].producer_index;
adapter->rx_buff_pool[pool].producer_index++;
@@ -411,51 +426,13 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
return adapter->rx_buff_pool[pool].skbuff[index];
}
-/* recycle the current buffer on the rx queue */
-static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
+static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
+ bool reuse)
{
- u32 q_index = adapter->rx_queue.index;
- u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
- unsigned int pool = correlator >> 32;
- unsigned int index = correlator & 0xffffffffUL;
- union ibmveth_buf_desc desc;
- unsigned long lpar_rc;
- int ret = 1;
-
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
-
- if (!adapter->rx_buff_pool[pool].active) {
- ibmveth_rxq_harvest_buffer(adapter);
- ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
- goto out;
- }
-
- desc.fields.flags_len = IBMVETH_BUF_VALID |
- adapter->rx_buff_pool[pool].buff_size;
- desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
-
- lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
-
- if (lpar_rc != H_SUCCESS) {
- netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
- "during recycle rc=%ld", lpar_rc);
- ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
- ret = 0;
- }
-
- if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
- adapter->rx_queue.index = 0;
- adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
- }
-
-out:
- return ret;
-}
+ u64 cor;
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
-{
- ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
+ cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
+ ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
adapter->rx_queue.index = 0;
@@ -1337,6 +1314,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
unsigned long lpar_rc;
u16 mss = 0;
+restart_poll:
while (frames_processed < budget) {
if (!ibmveth_rxq_pending_buffer(adapter))
break;
@@ -1346,7 +1324,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
wmb(); /* suggested by larson1 */
adapter->rx_invalid_buffer++;
netdev_dbg(netdev, "recycling invalid buffer\n");
- ibmveth_rxq_recycle_buffer(adapter);
+ ibmveth_rxq_harvest_buffer(adapter, true);
} else {
struct sk_buff *skb, *new_skb;
int length = ibmveth_rxq_frame_length(adapter);
@@ -1379,11 +1357,10 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
if (rx_flush)
ibmveth_flush_buffer(skb->data,
length + offset);
- if (!ibmveth_rxq_recycle_buffer(adapter))
- kfree_skb(skb);
+ ibmveth_rxq_harvest_buffer(adapter, true);
skb = new_skb;
} else {
- ibmveth_rxq_harvest_buffer(adapter);
+ ibmveth_rxq_harvest_buffer(adapter, false);
skb_reserve(skb, offset);
}
@@ -1420,24 +1397,25 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
ibmveth_replenish_task(adapter);
- if (frames_processed < budget) {
- napi_complete_done(napi, frames_processed);
+ if (frames_processed == budget)
+ goto out;
- /* We think we are done - reenable interrupts,
- * then check once more to make sure we are done.
- */
- lpar_rc = h_vio_signal(adapter->vdev->unit_address,
- VIO_IRQ_ENABLE);
+ if (!napi_complete_done(napi, frames_processed))
+ goto out;
- BUG_ON(lpar_rc != H_SUCCESS);
+ /* We think we are done - reenable interrupts,
+ * then check once more to make sure we are done.
+ */
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
+ BUG_ON(lpar_rc != H_SUCCESS);
- if (ibmveth_rxq_pending_buffer(adapter) &&
- napi_schedule(napi)) {
- lpar_rc = h_vio_signal(adapter->vdev->unit_address,
- VIO_IRQ_DISABLE);
- }
+ if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) {
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address,
+ VIO_IRQ_DISABLE);
+ goto restart_poll;
}
+out:
return frames_processed;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 23ebeb143987..87e693a81433 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -117,6 +117,7 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
static void flush_reset_queue(struct ibmvnic_adapter *adapter);
+static void print_subcrq_error(struct device *dev, int rc, const char *func);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -2140,63 +2141,49 @@ static int ibmvnic_close(struct net_device *netdev)
}
/**
- * build_hdr_data - creates L2/L3/L4 header data buffer
+ * get_hdr_lens - fills list of L2/L3/L4 hdr lens
* @hdr_field: bitfield determining needed headers
* @skb: socket buffer
- * @hdr_len: array of header lengths
- * @hdr_data: buffer to write the header to
+ * @hdr_len: array of header lengths to be filled
*
* Reads hdr_field to determine which headers are needed by firmware.
* Builds a buffer containing these headers. Saves individual header
* lengths and total buffer length to be used to build descriptors.
+ *
+ * Return: total len of all headers
*/
-static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
- int *hdr_len, u8 *hdr_data)
+static int get_hdr_lens(u8 hdr_field, struct sk_buff *skb,
+ int *hdr_len)
{
int len = 0;
- u8 *hdr;
- if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
- hdr_len[0] = sizeof(struct vlan_ethhdr);
- else
- hdr_len[0] = sizeof(struct ethhdr);
+
+ if ((hdr_field >> 6) & 1) {
+ hdr_len[0] = skb_mac_header_len(skb);
+ len += hdr_len[0];
+ }
+
+ if ((hdr_field >> 5) & 1) {
+ hdr_len[1] = skb_network_header_len(skb);
+ len += hdr_len[1];
+ }
+
+ if (!((hdr_field >> 4) & 1))
+ return len;
if (skb->protocol == htons(ETH_P_IP)) {
- hdr_len[1] = ip_hdr(skb)->ihl * 4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
hdr_len[2] = tcp_hdrlen(skb);
else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
hdr_len[2] = sizeof(struct udphdr);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- hdr_len[1] = sizeof(struct ipv6hdr);
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
hdr_len[2] = tcp_hdrlen(skb);
else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
hdr_len[2] = sizeof(struct udphdr);
- } else if (skb->protocol == htons(ETH_P_ARP)) {
- hdr_len[1] = arp_hdr_len(skb->dev);
- hdr_len[2] = 0;
}
- memset(hdr_data, 0, 120);
- if ((hdr_field >> 6) & 1) {
- hdr = skb_mac_header(skb);
- memcpy(hdr_data, hdr, hdr_len[0]);
- len += hdr_len[0];
- }
-
- if ((hdr_field >> 5) & 1) {
- hdr = skb_network_header(skb);
- memcpy(hdr_data + len, hdr, hdr_len[1]);
- len += hdr_len[1];
- }
-
- if ((hdr_field >> 4) & 1) {
- hdr = skb_transport_header(skb);
- memcpy(hdr_data + len, hdr, hdr_len[2]);
- len += hdr_len[2];
- }
- return len;
+ return len + hdr_len[2];
}
/**
@@ -2209,12 +2196,14 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
*
* Creates header and, if needed, header extension descriptors and
* places them in a descriptor array, scrq_arr
+ *
+ * Return: Number of header descs
*/
static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
union sub_crq *scrq_arr)
{
- union sub_crq hdr_desc;
+ union sub_crq *hdr_desc;
int tmp_len = len;
int num_descs = 0;
u8 *data, *cur;
@@ -2223,28 +2212,26 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
while (tmp_len > 0) {
cur = hdr_data + len - tmp_len;
- memset(&hdr_desc, 0, sizeof(hdr_desc));
- if (cur != hdr_data) {
- data = hdr_desc.hdr_ext.data;
+ hdr_desc = &scrq_arr[num_descs];
+ if (num_descs) {
+ data = hdr_desc->hdr_ext.data;
tmp = tmp_len > 29 ? 29 : tmp_len;
- hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
- hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
- hdr_desc.hdr_ext.len = tmp;
+ hdr_desc->hdr_ext.first = IBMVNIC_CRQ_CMD;
+ hdr_desc->hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
+ hdr_desc->hdr_ext.len = tmp;
} else {
- data = hdr_desc.hdr.data;
+ data = hdr_desc->hdr.data;
tmp = tmp_len > 24 ? 24 : tmp_len;
- hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
- hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
- hdr_desc.hdr.len = tmp;
- hdr_desc.hdr.l2_len = (u8)hdr_len[0];
- hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
- hdr_desc.hdr.l4_len = (u8)hdr_len[2];
- hdr_desc.hdr.flag = hdr_field << 1;
+ hdr_desc->hdr.first = IBMVNIC_CRQ_CMD;
+ hdr_desc->hdr.type = IBMVNIC_HDR_DESC;
+ hdr_desc->hdr.len = tmp;
+ hdr_desc->hdr.l2_len = (u8)hdr_len[0];
+ hdr_desc->hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
+ hdr_desc->hdr.l4_len = (u8)hdr_len[2];
+ hdr_desc->hdr.flag = hdr_field << 1;
}
memcpy(data, cur, tmp);
tmp_len -= tmp;
- *scrq_arr = hdr_desc;
- scrq_arr++;
num_descs++;
}
@@ -2267,13 +2254,11 @@ static void build_hdr_descs_arr(struct sk_buff *skb,
int *num_entries, u8 hdr_field)
{
int hdr_len[3] = {0, 0, 0};
- u8 hdr_data[140] = {0};
int tot_len;
- tot_len = build_hdr_data(hdr_field, skb, hdr_len,
- hdr_data);
- *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
- indir_arr + 1);
+ tot_len = get_hdr_lens(hdr_field, skb, hdr_len);
+ *num_entries += create_hdr_descs(hdr_field, skb_mac_header(skb),
+ tot_len, hdr_len, indir_arr + 1);
}
static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
@@ -2350,8 +2335,29 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
}
}
+static int send_subcrq_direct(struct ibmvnic_adapter *adapter,
+ u64 remote_handle, u64 *entry)
+{
+ unsigned int ua = adapter->vdev->unit_address;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
+
+ /* Make sure the hypervisor sees the complete request */
+ dma_wmb();
+ rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
+ cpu_to_be64(remote_handle),
+ cpu_to_be64(entry[0]), cpu_to_be64(entry[1]),
+ cpu_to_be64(entry[2]), cpu_to_be64(entry[3]));
+
+ if (rc)
+ print_subcrq_error(dev, rc, __func__);
+
+ return rc;
+}
+
static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
- struct ibmvnic_sub_crq_queue *tx_scrq)
+ struct ibmvnic_sub_crq_queue *tx_scrq,
+ bool indirect)
{
struct ibmvnic_ind_xmit_queue *ind_bufp;
u64 dma_addr;
@@ -2366,7 +2372,13 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
if (!entries)
return 0;
- rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
+
+ if (indirect)
+ rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
+ else
+ rc = send_subcrq_direct(adapter, handle,
+ (u64 *)ind_bufp->indir_arr);
+
if (rc)
ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
else
@@ -2397,6 +2409,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned long lpar_rc;
union sub_crq tx_crq;
unsigned int offset;
+ bool use_scrq_send_direct = false;
int num_entries = 1;
unsigned char *dst;
int bufidx = 0;
@@ -2424,7 +2437,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_dropped++;
tx_send_failed++;
ret = NETDEV_TX_OK;
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
goto out;
@@ -2442,7 +2455,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
goto out;
@@ -2456,6 +2469,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
memset(dst, 0, tx_pool->buf_size);
data_dma_addr = ltb->addr + offset;
+ /* if we are going to send_subcrq_direct this then we need to
+ * update the checksum before copying the data into ltb. Essentially
+ * these packets force disable CSO so that we can guarantee that
+ * FW does not need header info and we can send direct.
+ */
+ if (!skb_is_gso(skb) && !ind_bufp->index && !netdev_xmit_more()) {
+ use_scrq_send_direct = true;
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_help(skb))
+ use_scrq_send_direct = false;
+ }
+
if (skb_shinfo(skb)->nr_frags) {
int cur, i;
@@ -2475,9 +2500,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
skb_copy_from_linear_data(skb, dst, skb->len);
}
- /* post changes to long_term_buff *dst before VIOS accessing it */
- dma_wmb();
-
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
@@ -2540,6 +2562,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
hdrs += 2;
+ } else if (use_scrq_send_direct) {
+ /* See above comment, CSO disabled with direct xmit */
+ tx_crq.v1.flags1 &= ~(IBMVNIC_TX_CHKSUM_OFFLOAD);
+ ind_bufp->index = 1;
+ tx_buff->num_entries = 1;
+ netdev_tx_sent_queue(txq, skb->len);
+ ind_bufp->indir_arr[0] = tx_crq;
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, false);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
+
+ goto early_exit;
}
if ((*hdrs >> 7) & 1)
@@ -2549,7 +2583,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_buff->num_entries = num_entries;
/* flush buffer if current entry can not fit */
if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_flush_err;
}
@@ -2557,15 +2591,17 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
indir_arr[0] = tx_crq;
memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
num_entries * sizeof(struct ibmvnic_generic_scrq));
+
ind_bufp->index += num_entries;
if (__netdev_tx_sent_queue(txq, skb->len,
netdev_xmit_more() &&
ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
}
+early_exit:
if (atomic_add_return(num_entries, &tx_scrq->used)
>= adapter->req_tx_entries_per_subcrq) {
netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
@@ -3527,9 +3563,8 @@ restart_poll:
}
if (adapter->state != VNIC_CLOSING &&
- ((atomic_read(&adapter->rx_pool[scrq_num].available) <
- adapter->req_rx_add_entries_per_subcrq / 2) ||
- frames_processed < budget))
+ (atomic_read(&adapter->rx_pool[scrq_num].available) <
+ adapter->req_rx_add_entries_per_subcrq / 2))
replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
if (frames_processed < budget) {
if (napi_complete_done(napi, frames_processed)) {
@@ -4169,20 +4204,17 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
struct device *dev = &adapter->vdev->dev;
+ int num_packets = 0, total_bytes = 0;
struct ibmvnic_tx_pool *tx_pool;
struct ibmvnic_tx_buff *txbuff;
struct netdev_queue *txq;
union sub_crq *next;
- int index;
- int i;
+ int index, i;
restart_loop:
while (pending_scrq(adapter, scrq)) {
unsigned int pool = scrq->pool_index;
int num_entries = 0;
- int total_bytes = 0;
- int num_packets = 0;
-
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
index = be32_to_cpu(next->tx_comp.correlators[i]);
@@ -4218,8 +4250,6 @@ restart_loop:
/* remove tx_comp scrq*/
next->tx_comp.first = 0;
- txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
- netdev_tx_completed_queue(txq, num_packets, total_bytes);
if (atomic_sub_return(num_entries, &scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
@@ -4244,6 +4274,9 @@ restart_loop:
goto restart_loop;
}
+ txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
+ netdev_tx_completed_queue(txq, num_packets, total_bytes);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 360ee26557f7..f103249b12fa 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6671,8 +6671,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
/* enable wakeup by the PHY */
retval = e1000_init_phy_wakeup(adapter, wufc);
- if (retval)
- return retval;
+ if (retval) {
+ e_err("Failed to enable wakeup\n");
+ goto skip_phy_configurations;
+ }
} else {
/* enable wakeup by the MAC */
ew32(WUFC, wufc);
@@ -6693,8 +6695,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
* or broadcast.
*/
retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
- if (retval)
- return retval;
+ if (retval) {
+ e_err("Failed to enable ULP\n");
+ goto skip_phy_configurations;
+ }
}
}
@@ -6726,6 +6730,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
hw->phy.ops.release(hw);
}
+skip_phy_configurations:
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@@ -6968,15 +6973,13 @@ static int e1000e_pm_suspend(struct device *dev)
e1000e_pm_freeze(dev);
rc = __e1000_shutdown(pdev, false);
- if (rc) {
- e1000e_pm_thaw(dev);
- } else {
+ if (!rc) {
/* Introduce S0ix implementation */
if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_entry_flow(adapter);
}
- return rc;
+ return 0;
}
static int e1000e_pm_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d546567e0286..2089a0e172bf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -4,6 +4,7 @@
#ifndef _I40E_H_
#define _I40E_H_
+#include <linux/linkmode.h>
#include <linux/pci.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/types.h>
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 1d0d2e526adb..f2506511bbff 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2555,16 +2555,12 @@ static int i40e_get_ts_info(struct net_device *dev,
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (pf->ptp_clock)
info->phc_index = ptp_clock_index(pf->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
@@ -5641,6 +5637,26 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static void i40e_eee_capability_to_kedata_supported(__le16 eee_capability_,
+ unsigned long *supported)
+{
+ const int eee_capability = le16_to_cpu(eee_capability_);
+ static const int lut[] = {
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ };
+
+ linkmode_zero(supported);
+ for (unsigned int i = ARRAY_SIZE(lut); i--; )
+ if (eee_capability & BIT(i + 1))
+ linkmode_set_bit(lut[i], supported);
+}
+
static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -5648,7 +5664,7 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- int status = 0;
+ int status;
/* Get initial PHY capabilities */
status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
@@ -5661,11 +5677,18 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
if (phy_cfg.eee_capability == 0)
return -EOPNOTSUPP;
+ i40e_eee_capability_to_kedata_supported(phy_cfg.eee_capability,
+ edata->supported);
+ linkmode_copy(edata->lp_advertised, edata->supported);
+
/* Get current configuration */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
if (status)
return -EAGAIN;
+ linkmode_zero(edata->advertised);
+ if (phy_cfg.eee_capability)
+ linkmode_copy(edata->advertised, edata->supported);
edata->eee_enabled = !!phy_cfg.eee_capability;
edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
@@ -5681,10 +5704,11 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_ethtool_not_used {
- u32 value;
+ bool value;
const char *name;
} param[] = {
- {edata->tx_lpi_timer, "tx-timer"},
+ {!!(edata->advertised[0] & ~edata->supported[0]), "advertise"},
+ {!!edata->tx_lpi_timer, "tx-timer"},
{edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
};
int i;
@@ -5710,7 +5734,7 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
__le16 eee_capability;
- int status = 0;
+ int status;
/* Deny parameters we don't support */
if (i40e_is_eee_param_supported(netdev, edata))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cbcfada7b357..03205eb9f925 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -7264,6 +7264,26 @@ out:
}
#endif /* CONFIG_I40E_DCB */
+static void i40e_print_link_message_eee(struct i40e_vsi *vsi,
+ const char *speed, const char *fc)
+{
+ struct ethtool_keee kedata;
+
+ memzero_explicit(&kedata, sizeof(kedata));
+ if (vsi->netdev->ethtool_ops->get_eee)
+ vsi->netdev->ethtool_ops->get_eee(vsi->netdev, &kedata);
+
+ if (!linkmode_empty(kedata.supported))
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Flow Control: %s, EEE: %s\n",
+ speed, fc,
+ kedata.eee_enabled ? "Enabled" : "Disabled");
+ else
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
+ speed, fc);
+}
+
/**
* i40e_print_link_message - print link up or down
* @vsi: the VSI for which link needs a message
@@ -7395,9 +7415,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
"NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
speed, req_fec, fec, an, fc);
} else {
- netdev_info(vsi->netdev,
- "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
- speed, fc);
+ i40e_print_link_message_eee(vsi, speed, fc);
}
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 23a6557fc3db..48cd1d06761c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -33,6 +33,7 @@
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_skbedit.h>
#include "iavf_type.h"
#include <linux/avf/virtchnl.h>
@@ -393,6 +394,8 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_VLAN_V2)
#define CRC_OFFLOAD_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_CRC)
+#define TC_U32_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_TC_U32)
#define VLAN_V2_FILTERING_ALLOWED(_a) \
(VLAN_V2_ALLOWED((_a)) && \
((_a)->vlan_v2_caps.filtering.filtering_support.outer || \
@@ -437,6 +440,7 @@ struct iavf_adapter {
#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
u16 fdir_active_fltr;
+ u16 raw_fdir_active_fltr;
struct list_head fdir_list_head;
spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */
@@ -444,6 +448,32 @@ struct iavf_adapter {
spinlock_t adv_rss_lock; /* protect the RSS management list */
};
+/* Must be called with fdir_fltr_lock lock held */
+static inline bool iavf_fdir_max_reached(struct iavf_adapter *adapter)
+{
+ return adapter->fdir_active_fltr + adapter->raw_fdir_active_fltr >=
+ IAVF_MAX_FDIR_FILTERS;
+}
+
+static inline void
+iavf_inc_fdir_active_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr)
+{
+ if (iavf_is_raw_fdir(fltr))
+ adapter->raw_fdir_active_fltr++;
+ else
+ adapter->fdir_active_fltr++;
+}
+
+static inline void
+iavf_dec_fdir_active_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr)
+{
+ if (iavf_is_raw_fdir(fltr))
+ adapter->raw_fdir_active_fltr--;
+ else
+ adapter->fdir_active_fltr--;
+}
/* Ethtool Private Flags */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 52273f7eab2c..74a1e9fe1821 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -927,7 +927,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
spin_lock_bh(&adapter->fdir_fltr_lock);
- rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
+ rule = iavf_find_fdir_fltr(adapter, false, fsp->location);
if (!rule) {
ret = -EINVAL;
goto release_lock;
@@ -1072,6 +1072,9 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
+ if (iavf_is_raw_fdir(fltr))
+ continue;
+
if (cnt == cmd->rule_cnt) {
val = -EMSGSIZE;
goto release_lock;
@@ -1263,15 +1266,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
return -EINVAL;
spin_lock_bh(&adapter->fdir_fltr_lock);
- if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
- spin_unlock_bh(&adapter->fdir_fltr_lock);
- dev_err(&adapter->pdev->dev,
- "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
- IAVF_MAX_FDIR_FILTERS);
- return -ENOSPC;
- }
-
- if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
+ if (iavf_find_fdir_fltr(adapter, false, fsp->location)) {
dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
spin_unlock_bh(&adapter->fdir_fltr_lock);
return -EEXIST;
@@ -1291,23 +1286,10 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
}
err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
- if (err)
- goto ret;
-
- spin_lock_bh(&adapter->fdir_fltr_lock);
- iavf_fdir_list_add_fltr(adapter, fltr);
- adapter->fdir_active_fltr++;
-
- if (adapter->link_up)
- fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
- else
- fltr->state = IAVF_FDIR_FLTR_INACTIVE;
- spin_unlock_bh(&adapter->fdir_fltr_lock);
+ if (!err)
+ err = iavf_fdir_add_fltr(adapter, fltr);
- if (adapter->link_up)
- iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
-ret:
- if (err && fltr)
+ if (err)
kfree(fltr);
mutex_unlock(&adapter->crit_lock);
@@ -1324,34 +1306,11 @@ ret:
static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
- struct iavf_fdir_fltr *fltr = NULL;
- int err = 0;
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
- spin_lock_bh(&adapter->fdir_fltr_lock);
- fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
- if (fltr) {
- if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
- fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
- } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
- list_del(&fltr->list);
- kfree(fltr);
- adapter->fdir_active_fltr--;
- fltr = NULL;
- } else {
- err = -EBUSY;
- }
- } else if (adapter->fdir_active_fltr) {
- err = -EINVAL;
- }
- spin_unlock_bh(&adapter->fdir_fltr_lock);
-
- if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
- iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
-
- return err;
+ return iavf_fdir_del_fltr(adapter, false, fsp->location);
}
/**
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
index 2d47b0b4640e..a1b3b44cc14a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
@@ -796,6 +796,9 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
+ if (iavf_is_raw_fdir(fltr))
+ continue;
+
if (tmp->flow_type != fltr->flow_type)
continue;
@@ -815,33 +818,52 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *
}
/**
- * iavf_find_fdir_fltr_by_loc - find filter with location
+ * iavf_find_fdir_fltr - find FDIR filter
* @adapter: pointer to the VF adapter structure
- * @loc: location to find.
+ * @is_raw: filter type, is raw (tc u32) or not (ethtool)
+ * @data: data to ID the filter, type dependent
*
- * Returns pointer to Flow Director filter if found or null
+ * Returns: pointer to Flow Director filter if found or NULL. Lock must be held.
*/
-struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc)
+struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter,
+ bool is_raw, u32 data)
{
struct iavf_fdir_fltr *rule;
- list_for_each_entry(rule, &adapter->fdir_list_head, list)
- if (rule->loc == loc)
+ list_for_each_entry(rule, &adapter->fdir_list_head, list) {
+ if ((is_raw && rule->cls_u32_handle == data) ||
+ (!is_raw && rule->loc == data))
return rule;
+ }
return NULL;
}
/**
- * iavf_fdir_list_add_fltr - add a new node to the flow director filter list
+ * iavf_fdir_add_fltr - add a new node to the flow director filter list
* @adapter: pointer to the VF adapter structure
* @fltr: filter node to add to structure
+ *
+ * Return: 0 on success or negative errno on failure.
*/
-void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+int iavf_fdir_add_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr)
{
struct iavf_fdir_fltr *rule, *parent = NULL;
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ if (iavf_fdir_max_reached(adapter)) {
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ dev_err(&adapter->pdev->dev,
+ "Unable to add Flow Director filter (limit (%u) reached)\n",
+ IAVF_MAX_FDIR_FILTERS);
+ return -ENOSPC;
+ }
+
list_for_each_entry(rule, &adapter->fdir_list_head, list) {
+ if (iavf_is_raw_fdir(fltr))
+ break;
+
if (rule->loc >= fltr->loc)
break;
parent = rule;
@@ -851,4 +873,55 @@ void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr
list_add(&fltr->list, &parent->list);
else
list_add(&fltr->list, &adapter->fdir_list_head);
+
+ iavf_inc_fdir_active_fltr(adapter, fltr);
+
+ if (adapter->link_up)
+ fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ else
+ fltr->state = IAVF_FDIR_FLTR_INACTIVE;
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (adapter->link_up)
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
+
+ return 0;
+}
+
+/**
+ * iavf_fdir_del_fltr - delete a flow director filter from the list
+ * @adapter: pointer to the VF adapter structure
+ * @is_raw: filter type, is raw (tc u32) or not (ethtool)
+ * @data: data to ID the filter, type dependent
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data)
+{
+ struct iavf_fdir_fltr *fltr = NULL;
+ int err = 0;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ fltr = iavf_find_fdir_fltr(adapter, is_raw, data);
+
+ if (fltr) {
+ if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
+ fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
+ list_del(&fltr->list);
+ iavf_dec_fdir_active_fltr(adapter, fltr);
+ kfree(fltr);
+ fltr = NULL;
+ } else {
+ err = -EBUSY;
+ }
+ } else if (adapter->fdir_active_fltr) {
+ err = -EINVAL;
+ }
+
+ if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
+
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ return err;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
index d31bd923ba8c..e84a5351162f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
@@ -117,17 +117,26 @@ struct iavf_fdir_fltr {
u32 flow_id;
+ u32 cls_u32_handle; /* for FDIR added via tc u32 */
u32 loc; /* Rule location inside the flow table */
u32 q_index;
struct virtchnl_fdir_add vc_add_msg;
};
+static inline bool iavf_is_raw_fdir(struct iavf_fdir_fltr *fltr)
+{
+ return !fltr->vc_add_msg.rule_cfg.proto_hdrs.count;
+}
+
int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *fltr);
int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
-void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
-struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc);
+int iavf_fdir_add_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr);
+int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data);
+struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter,
+ bool is_raw, u32 data);
#endif /* _IAVF_FDIR_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index ff11bafb3b4f..f782402cd789 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -4013,7 +4013,7 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter,
/**
* iavf_setup_tc_cls_flower - flower classifier offloads
- * @adapter: board private structure
+ * @adapter: pointer to iavf adapter structure
* @cls_flower: pointer to flow_cls_offload struct with flow info
*/
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
@@ -4032,6 +4032,154 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
}
/**
+ * iavf_add_cls_u32 - Add U32 classifier offloads
+ * @adapter: pointer to iavf adapter structure
+ * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int iavf_add_cls_u32(struct iavf_adapter *adapter,
+ struct tc_cls_u32_offload *cls_u32)
+{
+ struct netlink_ext_ack *extack = cls_u32->common.extack;
+ struct virtchnl_fdir_rule *rule_cfg;
+ struct virtchnl_filter_action *vact;
+ struct virtchnl_proto_hdrs *hdrs;
+ struct ethhdr *spec_h, *mask_h;
+ const struct tc_action *act;
+ struct iavf_fdir_fltr *fltr;
+ struct tcf_exts *exts;
+ unsigned int q_index;
+ int i, status = 0;
+ int off_base = 0;
+
+ if (cls_u32->knode.link_handle) {
+ NL_SET_ERR_MSG_MOD(extack, "Linking not supported");
+ return -EOPNOTSUPP;
+ }
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr)
+ return -ENOMEM;
+
+ rule_cfg = &fltr->vc_add_msg.rule_cfg;
+ hdrs = &rule_cfg->proto_hdrs;
+ hdrs->count = 0;
+
+ /* The parser lib at the PF expects the packet starting with MAC hdr */
+ switch (ntohs(cls_u32->common.protocol)) {
+ case ETH_P_802_3:
+ break;
+ case ETH_P_IP:
+ spec_h = (struct ethhdr *)hdrs->raw.spec;
+ mask_h = (struct ethhdr *)hdrs->raw.mask;
+ spec_h->h_proto = htons(ETH_P_IP);
+ mask_h->h_proto = htons(0xFFFF);
+ off_base += ETH_HLEN;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Only 802_3 and ip filter protocols are supported");
+ status = -EOPNOTSUPP;
+ goto free_alloc;
+ }
+
+ for (i = 0; i < cls_u32->knode.sel->nkeys; i++) {
+ __be32 val, mask;
+ int off;
+
+ off = off_base + cls_u32->knode.sel->keys[i].off;
+ val = cls_u32->knode.sel->keys[i].val;
+ mask = cls_u32->knode.sel->keys[i].mask;
+
+ if (off >= sizeof(hdrs->raw.spec)) {
+ NL_SET_ERR_MSG_MOD(extack, "Input exceeds maximum allowed.");
+ status = -EINVAL;
+ goto free_alloc;
+ }
+
+ memcpy(&hdrs->raw.spec[off], &val, sizeof(val));
+ memcpy(&hdrs->raw.mask[off], &mask, sizeof(mask));
+ hdrs->raw.pkt_len = off + sizeof(val);
+ }
+
+ /* Only one action is allowed */
+ rule_cfg->action_set.count = 1;
+ vact = &rule_cfg->action_set.actions[0];
+ exts = cls_u32->knode.exts;
+
+ tcf_exts_for_each_action(i, act, exts) {
+ /* FDIR queue */
+ if (is_tcf_skbedit_rx_queue_mapping(act)) {
+ q_index = tcf_skbedit_rx_queue_mapping(act);
+ if (q_index >= adapter->num_active_queues) {
+ status = -EINVAL;
+ goto free_alloc;
+ }
+
+ vact->type = VIRTCHNL_ACTION_QUEUE;
+ vact->act_conf.queue.index = q_index;
+ break;
+ }
+
+ /* Drop */
+ if (is_tcf_gact_shot(act)) {
+ vact->type = VIRTCHNL_ACTION_DROP;
+ break;
+ }
+
+ /* Unsupported action */
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action.");
+ status = -EOPNOTSUPP;
+ goto free_alloc;
+ }
+
+ fltr->vc_add_msg.vsi_id = adapter->vsi.id;
+ fltr->cls_u32_handle = cls_u32->knode.handle;
+ return iavf_fdir_add_fltr(adapter, fltr);
+
+free_alloc:
+ kfree(fltr);
+ return status;
+}
+
+/**
+ * iavf_del_cls_u32 - Delete U32 classifier offloads
+ * @adapter: pointer to iavf adapter structure
+ * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int iavf_del_cls_u32(struct iavf_adapter *adapter,
+ struct tc_cls_u32_offload *cls_u32)
+{
+ return iavf_fdir_del_fltr(adapter, true, cls_u32->knode.handle);
+}
+
+/**
+ * iavf_setup_tc_cls_u32 - U32 filter offloads
+ * @adapter: pointer to iavf adapter structure
+ * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int iavf_setup_tc_cls_u32(struct iavf_adapter *adapter,
+ struct tc_cls_u32_offload *cls_u32)
+{
+ if (!TC_U32_SUPPORT(adapter) || !FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ switch (cls_u32->command) {
+ case TC_CLSU32_NEW_KNODE:
+ case TC_CLSU32_REPLACE_KNODE:
+ return iavf_add_cls_u32(adapter, cls_u32);
+ case TC_CLSU32_DELETE_KNODE:
+ return iavf_del_cls_u32(adapter, cls_u32);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* iavf_setup_tc_block_cb - block callback for tc
* @type: type of offload
* @type_data: offload data
@@ -4050,6 +4198,8 @@ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
return iavf_setup_tc_cls_flower(cb_priv, type_data);
+ case TC_SETUP_CLSU32:
+ return iavf_setup_tc_cls_u32(cb_priv, type_data);
default:
return -EOPNOTSUPP;
}
@@ -4332,8 +4482,8 @@ static void iavf_disable_fdir(struct iavf_adapter *adapter)
fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
/* Delete filters not registered in PF */
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
} else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
@@ -4843,9 +4993,11 @@ int iavf_process_config(struct iavf_adapter *adapter)
/* get HW VLAN features that can be toggled */
hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
- /* Enable cloud filter if ADQ is supported */
- if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
+ /* Enable HW TC offload if ADQ or tc U32 is supported */
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ ||
+ TC_U32_SUPPORT(adapter))
hw_features |= NETIF_F_HW_TC;
+
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
hw_features |= NETIF_F_GSO_UDP_L4;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 1e543f6a7c30..7e810b65380c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -142,6 +142,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP |
+ VIRTCHNL_VF_OFFLOAD_TC_U32 |
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
@@ -1961,8 +1962,8 @@ static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
* list on PF is already cleared after a reset
*/
list_del(&f->list);
+ iavf_dec_fdir_active_fltr(adapter, f);
kfree(f);
- adapter->fdir_active_fltr--;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -2135,8 +2136,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
dev_err(&adapter->pdev->dev,
"%s\n", msg);
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -2451,8 +2452,12 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
- dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
- fdir->loc);
+ if (!iavf_is_raw_fdir(fdir))
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
+ fdir->loc);
+ else
+ dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is added\n",
+ TC_U32_USERHTID(fdir->cls_u32_handle));
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
fdir->flow_id = add_fltr->flow_id;
} else {
@@ -2460,8 +2465,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
add_fltr->status);
iavf_print_fdir_fltr(adapter, fdir);
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
}
}
}
@@ -2479,11 +2484,15 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
del_fltr->status ==
VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
- dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
- fdir->loc);
+ if (!iavf_is_raw_fdir(fdir))
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
+ fdir->loc);
+ else
+ dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is deleted\n",
+ TC_U32_USERHTID(fdir->cls_u32_handle));
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
} else {
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 03500e28ac99..3307d551f431 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -28,9 +28,13 @@ ice-y := ice_main.o \
ice_vlan_mode.o \
ice_flex_pipe.o \
ice_flow.o \
+ ice_parser.o \
+ ice_parser_rt.o \
ice_idc.o \
devlink/devlink.o \
devlink/devlink_port.o \
+ ice_sf_eth.o \
+ ice_sf_vsi_vlan_ops.o \
ice_ddp.o \
ice_fw_update.o \
ice_lag.o \
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 810a901d7afd..415445cefdb2 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -6,9 +6,11 @@
#include "ice.h"
#include "ice_lib.h"
#include "devlink.h"
+#include "devlink_port.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
+#include "ice_sf_eth.h"
/* context for devlink info version reporting */
struct ice_info_ctx {
@@ -744,6 +746,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
struct ice_sched_node *tc_node, struct ice_pf *pf)
{
struct devlink_rate *rate_node = NULL;
+ struct ice_dynamic_port *sf;
struct ice_vf *vf;
int i;
@@ -755,6 +758,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
/* create root node */
rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
} else if (node->vsi_handle &&
+ pf->vsi[node->vsi_handle]->type == ICE_VSI_VF &&
pf->vsi[node->vsi_handle]->vf) {
vf = pf->vsi[node->vsi_handle]->vf;
if (!vf->devlink_port.devlink_rate)
@@ -763,6 +767,16 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
*/
devl_rate_leaf_create(&vf->devlink_port, node,
node->parent->rate_node);
+ } else if (node->vsi_handle &&
+ pf->vsi[node->vsi_handle]->type == ICE_VSI_SF &&
+ pf->vsi[node->vsi_handle]->sf) {
+ sf = pf->vsi[node->vsi_handle]->sf;
+ if (!sf->devlink_port.devlink_rate)
+ /* leaf nodes doesn't have children
+ * so we don't set rate_node
+ */
+ devl_rate_leaf_create(&sf->devlink_port, node,
+ node->parent->rate_node);
} else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF &&
node->parent->rate_node) {
rate_node = devl_rate_node_create(devlink, node, node->name,
@@ -1277,8 +1291,12 @@ static const struct devlink_ops ice_devlink_ops = {
.rate_leaf_parent_set = ice_devlink_set_parent,
.rate_node_parent_set = ice_devlink_set_parent,
+
+ .port_new = ice_devlink_port_new,
};
+static const struct devlink_ops ice_sf_devlink_ops;
+
static int
ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
@@ -1562,6 +1580,34 @@ struct ice_pf *ice_allocate_pf(struct device *dev)
}
/**
+ * ice_allocate_sf - Allocate devlink and return SF structure pointer
+ * @dev: the device to allocate for
+ * @pf: pointer to the PF structure
+ *
+ * Allocate a devlink instance for SF.
+ *
+ * Return: ice_sf_priv pointer to allocated memory or ERR_PTR in case of error
+ */
+struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf)
+{
+ struct devlink *devlink;
+ int err;
+
+ devlink = devlink_alloc(&ice_sf_devlink_ops, sizeof(struct ice_sf_priv),
+ dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+
+ err = devl_nested_devlink_set(priv_to_devlink(pf), devlink);
+ if (err) {
+ devlink_free(devlink);
+ return ERR_PTR(err);
+ }
+
+ return devlink_priv(devlink);
+}
+
+/**
* ice_devlink_register - Register devlink interface for this PF
* @pf: the PF to register the devlink for.
*
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.h b/drivers/net/ethernet/intel/ice/devlink/devlink.h
index d291c0e2e17b..1af3b0763fbb 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.h
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.h
@@ -5,6 +5,7 @@
#define _ICE_DEVLINK_H_
struct ice_pf *ice_allocate_pf(struct device *dev);
+struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf);
void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
index 62ef8e2fb5f1..928c8bdb6649 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
@@ -5,6 +5,9 @@
#include "ice.h"
#include "devlink.h"
+#include "devlink_port.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
static int ice_active_port_option = -1;
@@ -485,3 +488,506 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf)
devl_rate_leaf_destroy(&vf->devlink_port);
devl_port_unregister(&vf->devlink_port);
}
+
+/**
+ * ice_devlink_create_sf_dev_port - Register virtual port for a subfunction
+ * @sf_dev: the subfunction device to create a devlink port for
+ *
+ * Register virtual flavour devlink port for the subfunction auxiliary device
+ * created after activating a dynamically added devlink port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev)
+{
+ struct devlink_port_attrs attrs = {};
+ struct ice_dynamic_port *dyn_port;
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+
+ dyn_port = sf_dev->dyn_port;
+ vsi = dyn_port->vsi;
+
+ devlink_port = &sf_dev->priv->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(sf_dev->priv);
+
+ return devl_port_register(devlink, devlink_port, vsi->idx);
+}
+
+/**
+ * ice_devlink_destroy_sf_dev_port - Destroy virtual port for a subfunction
+ * @sf_dev: the subfunction device to create a devlink port for
+ *
+ * Unregisters the virtual port associated with this subfunction.
+ */
+void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev)
+{
+ devl_port_unregister(&sf_dev->priv->devlink_port);
+}
+
+/**
+ * ice_activate_dynamic_port - Activate a dynamic port
+ * @dyn_port: dynamic port instance to activate
+ * @extack: extack for reporting error messages
+ *
+ * Activate the dynamic port based on its flavour.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_activate_dynamic_port(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (dyn_port->active)
+ return 0;
+
+ err = ice_sf_eth_activate(dyn_port, extack);
+ if (err)
+ return err;
+
+ dyn_port->active = true;
+
+ return 0;
+}
+
+/**
+ * ice_deactivate_dynamic_port - Deactivate a dynamic port
+ * @dyn_port: dynamic port instance to deactivate
+ *
+ * Undo activation of a dynamic port.
+ */
+static void ice_deactivate_dynamic_port(struct ice_dynamic_port *dyn_port)
+{
+ if (!dyn_port->active)
+ return;
+
+ ice_sf_eth_deactivate(dyn_port);
+ dyn_port->active = false;
+}
+
+/**
+ * ice_dealloc_dynamic_port - Deallocate and remove a dynamic port
+ * @dyn_port: dynamic port instance to deallocate
+ *
+ * Free resources associated with a dynamically added devlink port. Will
+ * deactivate the port if its currently active.
+ */
+static void ice_dealloc_dynamic_port(struct ice_dynamic_port *dyn_port)
+{
+ struct devlink_port *devlink_port = &dyn_port->devlink_port;
+ struct ice_pf *pf = dyn_port->pf;
+
+ ice_deactivate_dynamic_port(dyn_port);
+
+ xa_erase(&pf->sf_nums, devlink_port->attrs.pci_sf.sf);
+ ice_eswitch_detach_sf(pf, dyn_port);
+ ice_vsi_free(dyn_port->vsi);
+ xa_erase(&pf->dyn_ports, dyn_port->vsi->idx);
+ kfree(dyn_port);
+}
+
+/**
+ * ice_dealloc_all_dynamic_ports - Deallocate all dynamic devlink ports
+ * @pf: pointer to the pf structure
+ */
+void ice_dealloc_all_dynamic_ports(struct ice_pf *pf)
+{
+ struct ice_dynamic_port *dyn_port;
+ unsigned long index;
+
+ xa_for_each(&pf->dyn_ports, index, dyn_port)
+ ice_dealloc_dynamic_port(dyn_port);
+}
+
+/**
+ * ice_devlink_port_new_check_attr - Check that new port attributes are valid
+ * @pf: pointer to the PF structure
+ * @new_attr: the attributes for the new port
+ * @extack: extack for reporting error messages
+ *
+ * Check that the attributes for the new port are valid before continuing to
+ * allocate the devlink port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_new_check_attr(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack)
+{
+ if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
+ NL_SET_ERR_MSG_MOD(extack, "Flavour other than pcisf is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->controller_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Setting controller is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->port_index_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver does not support user defined port index assignment");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->pfnum != pf->hw.pf_id) {
+ NL_SET_ERR_MSG_MOD(extack, "Incorrect pfnum supplied");
+ return -EINVAL;
+ }
+
+ if (!pci_msix_can_alloc_dyn(pf->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Dynamic MSIX-X interrupt allocation is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_del - devlink handler for port delete
+ * @devlink: pointer to devlink
+ * @port: devlink port to be deleted
+ * @extack: pointer to extack
+ *
+ * Deletes devlink port and deallocates all resources associated with
+ * created subfunction.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_del(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+ ice_dealloc_dynamic_port(dyn_port);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_hw_addr_set - devlink handler for mac address set
+ * @port: pointer to devlink port
+ * @hw_addr: hw address to set
+ * @hw_addr_len: hw address length
+ * @extack: extack for reporting error messages
+ *
+ * Sets mac address for the port, verifies arguments and copies address
+ * to the subfunction structure.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_hw_addr_set(struct devlink_port *port, const u8 *hw_addr,
+ int hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ if (dyn_port->attached) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ethernet address can be change only in detached state");
+ return -EBUSY;
+ }
+
+ if (hw_addr_len != ETH_ALEN || !is_valid_ether_addr(hw_addr)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid ethernet address");
+ return -EADDRNOTAVAIL;
+ }
+
+ ether_addr_copy(dyn_port->hw_addr, hw_addr);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_hw_addr_get - devlink handler for mac address get
+ * @port: pointer to devlink port
+ * @hw_addr: hw address to set
+ * @hw_addr_len: hw address length
+ * @extack: extack for reporting error messages
+ *
+ * Returns mac address for the port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_hw_addr_get(struct devlink_port *port, u8 *hw_addr,
+ int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ ether_addr_copy(hw_addr, dyn_port->hw_addr);
+ *hw_addr_len = ETH_ALEN;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_state_set - devlink handler for port state set
+ * @port: pointer to devlink port
+ * @state: state to set
+ * @extack: extack for reporting error messages
+ *
+ * Activates or deactivates the port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_state_set(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ switch (state) {
+ case DEVLINK_PORT_FN_STATE_ACTIVE:
+ return ice_activate_dynamic_port(dyn_port, extack);
+
+ case DEVLINK_PORT_FN_STATE_INACTIVE:
+ ice_deactivate_dynamic_port(dyn_port);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_state_get - devlink handler for port state get
+ * @port: pointer to devlink port
+ * @state: admin configured state of the port
+ * @opstate: current port operational state
+ * @extack: extack for reporting error messages
+ *
+ * Gets port state.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_state_get(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ if (dyn_port->active)
+ *state = DEVLINK_PORT_FN_STATE_ACTIVE;
+ else
+ *state = DEVLINK_PORT_FN_STATE_INACTIVE;
+
+ if (dyn_port->attached)
+ *opstate = DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+ else
+ *opstate = DEVLINK_PORT_FN_OPSTATE_DETACHED;
+
+ return 0;
+}
+
+static const struct devlink_port_ops ice_devlink_port_sf_ops = {
+ .port_del = ice_devlink_port_del,
+ .port_fn_hw_addr_get = ice_devlink_port_fn_hw_addr_get,
+ .port_fn_hw_addr_set = ice_devlink_port_fn_hw_addr_set,
+ .port_fn_state_get = ice_devlink_port_fn_state_get,
+ .port_fn_state_set = ice_devlink_port_fn_state_set,
+};
+
+/**
+ * ice_reserve_sf_num - Reserve a subfunction number for this port
+ * @pf: pointer to the pf structure
+ * @new_attr: devlink port attributes requested
+ * @extack: extack for reporting error messages
+ * @sfnum: on success, the sf number reserved
+ *
+ * Reserve a subfunction number for this port. Only called for
+ * DEVLINK_PORT_FLAVOUR_PCI_SF ports.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_reserve_sf_num(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack, u32 *sfnum)
+{
+ int err;
+
+ /* If user didn't request an explicit number, pick one */
+ if (!new_attr->sfnum_valid)
+ return xa_alloc(&pf->sf_nums, sfnum, NULL, xa_limit_32b,
+ GFP_KERNEL);
+
+ /* Otherwise, check and use the number provided */
+ err = xa_insert(&pf->sf_nums, new_attr->sfnum, NULL, GFP_KERNEL);
+ if (err) {
+ if (err == -EBUSY)
+ NL_SET_ERR_MSG_MOD(extack, "Subfunction with given sfnum already exists");
+ return err;
+ }
+
+ *sfnum = new_attr->sfnum;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_create_sf_port - Register PCI subfunction devlink port
+ * @dyn_port: the dynamic port instance structure for this subfunction
+ *
+ * Register PCI subfunction flavour devlink port for a dynamically added
+ * subfunction port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct ice_pf *pf;
+
+ vsi = dyn_port->vsi;
+ pf = dyn_port->pf;
+
+ devlink_port = &dyn_port->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_SF;
+ attrs.pci_sf.pf = pf->hw.pf_id;
+ attrs.pci_sf.sf = dyn_port->sfnum;
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ return devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_sf_ops);
+}
+
+/**
+ * ice_devlink_destroy_sf_port - Destroy the devlink_port for this SF
+ * @dyn_port: the dynamic port instance structure for this subfunction
+ *
+ * Unregisters the devlink_port structure associated with this SF.
+ */
+void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port)
+{
+ devl_rate_leaf_destroy(&dyn_port->devlink_port);
+ devl_port_unregister(&dyn_port->devlink_port);
+}
+
+/**
+ * ice_alloc_dynamic_port - Allocate new dynamic port
+ * @pf: pointer to the pf structure
+ * @new_attr: devlink port attributes requested
+ * @extack: extack for reporting error messages
+ * @devlink_port: index of newly created devlink port
+ *
+ * Allocate a new dynamic port instance and prepare it for configuration
+ * with devlink.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_alloc_dynamic_port(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port)
+{
+ struct ice_dynamic_port *dyn_port;
+ struct ice_vsi *vsi;
+ u32 sfnum;
+ int err;
+
+ err = ice_reserve_sf_num(pf, new_attr, extack, &sfnum);
+ if (err)
+ return err;
+
+ dyn_port = kzalloc(sizeof(*dyn_port), GFP_KERNEL);
+ if (!dyn_port) {
+ err = -ENOMEM;
+ goto unroll_reserve_sf_num;
+ }
+
+ vsi = ice_vsi_alloc(pf);
+ if (!vsi) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VSI");
+ err = -ENOMEM;
+ goto unroll_dyn_port_alloc;
+ }
+
+ dyn_port->vsi = vsi;
+ dyn_port->pf = pf;
+ dyn_port->sfnum = sfnum;
+ eth_random_addr(dyn_port->hw_addr);
+
+ err = xa_insert(&pf->dyn_ports, vsi->idx, dyn_port, GFP_KERNEL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Port index reservation failed");
+ goto unroll_vsi_alloc;
+ }
+
+ err = ice_eswitch_attach_sf(pf, dyn_port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to attach SF to eswitch");
+ goto unroll_xa_insert;
+ }
+
+ *devlink_port = &dyn_port->devlink_port;
+
+ return 0;
+
+unroll_xa_insert:
+ xa_erase(&pf->dyn_ports, vsi->idx);
+unroll_vsi_alloc:
+ ice_vsi_free(vsi);
+unroll_dyn_port_alloc:
+ kfree(dyn_port);
+unroll_reserve_sf_num:
+ xa_erase(&pf->sf_nums, sfnum);
+
+ return err;
+}
+
+/**
+ * ice_devlink_port_new - devlink handler for the new port
+ * @devlink: pointer to devlink
+ * @new_attr: pointer to the port new attributes
+ * @extack: extack for reporting error messages
+ * @devlink_port: pointer to a new port
+ *
+ * Creates new devlink port, checks new port attributes and reject
+ * any unsupported parameters, allocates new subfunction for that port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int
+ice_devlink_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ int err;
+
+ err = ice_devlink_port_new_check_attr(pf, new_attr, extack);
+ if (err)
+ return err;
+
+ return ice_alloc_dynamic_port(pf, new_attr, extack, devlink_port);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
index 9223bcdb6444..d60efc340945 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
@@ -4,9 +4,55 @@
#ifndef _DEVLINK_PORT_H_
#define _DEVLINK_PORT_H_
+#include "../ice.h"
+#include "../ice_sf_eth.h"
+
+/**
+ * struct ice_dynamic_port - Track dynamically added devlink port instance
+ * @hw_addr: the HW address for this port
+ * @active: true if the port has been activated
+ * @attached: true it the prot is attached
+ * @devlink_port: the associated devlink port structure
+ * @pf: pointer to the PF private structure
+ * @vsi: the VSI associated with this port
+ * @repr_id: the representor ID
+ * @sfnum: the subfunction ID
+ * @sf_dev: pointer to the subfunction device
+ *
+ * An instance of a dynamically added devlink port. Each port flavour
+ */
+struct ice_dynamic_port {
+ u8 hw_addr[ETH_ALEN];
+ u8 active: 1;
+ u8 attached: 1;
+ struct devlink_port devlink_port;
+ struct ice_pf *pf;
+ struct ice_vsi *vsi;
+ unsigned long repr_id;
+ u32 sfnum;
+ /* Flavour-specific implementation data */
+ union {
+ struct ice_sf_dev *sf_dev;
+ };
+};
+
+void ice_dealloc_all_dynamic_ports(struct ice_pf *pf);
+
int ice_devlink_create_pf_port(struct ice_pf *pf);
void ice_devlink_destroy_pf_port(struct ice_pf *pf);
int ice_devlink_create_vf_port(struct ice_vf *vf);
void ice_devlink_destroy_vf_port(struct ice_vf *vf);
+int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port);
+void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port);
+int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev);
+void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev);
+
+#define ice_devlink_port_to_dyn(port) \
+ container_of(port, struct ice_dynamic_port, devlink_port)
+int
+ice_devlink_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port);
#endif /* _DEVLINK_PORT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index ce8b5505b16d..d6f80da30dec 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -451,7 +451,12 @@ struct ice_vsi {
struct_group_tagged(ice_vsi_cfg_params, params,
struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_channel *ch; /* VSI's channel structure, may be NULL */
- struct ice_vf *vf; /* VF associated with this VSI, may be NULL */
+ union {
+ /* VF associated with this VSI, may be NULL */
+ struct ice_vf *vf;
+ /* SF associated with this VSI, may be NULL */
+ struct ice_dynamic_port *sf;
+ };
u32 flags; /* VSI flags used for rebuild and configuration */
enum ice_vsi_type type; /* the type of the VSI */
);
@@ -652,6 +657,9 @@ struct ice_pf {
struct ice_eswitch eswitch;
struct ice_esw_br_port *br_port;
+ struct xarray dyn_ports;
+ struct xarray sf_nums;
+
#define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1
#define ICE_MAX_PF_AGG_NODES 32
@@ -918,6 +926,7 @@ int ice_vsi_open(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
+void ice_set_ethtool_sf_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
@@ -1003,6 +1012,14 @@ void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
int ice_init_dev(struct ice_pf *pf);
void ice_deinit_dev(struct ice_pf *pf);
+int ice_change_mtu(struct net_device *netdev, int new_mtu);
+void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue);
+int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+void ice_set_netdev_features(struct net_device *netdev);
+int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
+int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
+void ice_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
/**
* ice_set_rdma_cap - enable RDMA support
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 66f02988d549..0be1a98d7cc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -2632,12 +2632,16 @@ struct ice_aq_desc {
/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
#define ICE_AQ_LG_BUF 512
+#define ICE_AQ_FLAG_DD_S 0
+#define ICE_AQ_FLAG_CMP_S 1
#define ICE_AQ_FLAG_ERR_S 2
#define ICE_AQ_FLAG_LB_S 9
#define ICE_AQ_FLAG_RD_S 10
#define ICE_AQ_FLAG_BUF_S 12
#define ICE_AQ_FLAG_SI_S 13
+#define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */
+#define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */
#define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */
#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */
#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index c158749a80e0..4a9a6899fc45 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -325,6 +325,9 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
+ case ICE_VSI_SF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ break;
default:
return;
}
@@ -540,7 +543,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
ring->rx_buf_len = ring->vsi->rx_buf_len;
- if (ring->vsi->type == ICE_VSI_PF) {
+ if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 66f29bac783a..27208a60cece 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -10,6 +10,7 @@
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
+#include "ice_parser.h"
#include <linux/avf/virtchnl.h>
#include "ice_switch.h"
#include "ice_fdir.h"
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index ffaa6511c455..e3959ad442a2 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -99,17 +99,6 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
return -ENOMEM;
cq->sq.desc_buf.size = size;
- cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
- sizeof(struct ice_sq_cd), GFP_KERNEL);
- if (!cq->sq.cmd_buf) {
- dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
- cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
- cq->sq.desc_buf.va = NULL;
- cq->sq.desc_buf.pa = 0;
- cq->sq.desc_buf.size = 0;
- return -ENOMEM;
- }
-
return 0;
}
@@ -188,7 +177,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
if (cq->rq_buf_size > ICE_AQ_LG_BUF)
desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
desc->opcode = 0;
- /* This is in accordance with Admin queue design, there is no
+ /* This is in accordance with control queue design, there is no
* register for buffer size configuration
*/
desc->datalen = cpu_to_le16(bi->size);
@@ -338,8 +327,6 @@ do { \
(qi)->ring.r.ring##_bi[i].size = 0;\
} \
} \
- /* free the buffer info list */ \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
/* free DMA head */ \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
} while (0)
@@ -405,11 +392,11 @@ init_ctrlq_exit:
}
/**
- * ice_init_rq - initialize ARQ
+ * ice_init_rq - initialize receive side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
- * The main initialization routine for the Admin Receive (Event) Queue.
+ * The main initialization routine for Receive side of a control queue.
* Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_rq_entries
@@ -465,7 +452,7 @@ init_ctrlq_exit:
}
/**
- * ice_shutdown_sq - shutdown the Control ATQ
+ * ice_shutdown_sq - shutdown the transmit side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
@@ -482,7 +469,7 @@ static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto shutdown_sq_out;
}
- /* Stop firmware AdminQ processing */
+ /* Stop processing of the control queue */
wr32(hw, cq->sq.head, 0);
wr32(hw, cq->sq.tail, 0);
wr32(hw, cq->sq.len, 0);
@@ -501,7 +488,7 @@ shutdown_sq_out:
}
/**
- * ice_aq_ver_check - Check the reported AQ API version.
+ * ice_aq_ver_check - Check the reported AQ API version
* @hw: pointer to the hardware structure
*
* Checks if the driver should load on a given AQ API version.
@@ -521,14 +508,20 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
} else if (hw->api_maj_ver == exp_fw_api_ver_major) {
if (hw->api_min_ver > (exp_fw_api_ver_minor + 2))
dev_info(ice_hw_to_dev(hw),
- "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor)
dev_info(ice_hw_to_dev(hw),
- "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
} else {
/* Major API version is older than expected, log a warning */
dev_info(ice_hw_to_dev(hw),
- "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
}
return true;
}
@@ -855,7 +848,7 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw)
}
/**
- * ice_clean_sq - cleans Admin send queue (ATQ)
+ * ice_clean_sq - cleans send side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
@@ -865,21 +858,17 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
struct ice_ctl_q_ring *sq = &cq->sq;
u16 ntc = sq->next_to_clean;
- struct ice_sq_cd *details;
struct ice_aq_desc *desc;
desc = ICE_CTL_Q_DESC(*sq, ntc);
- details = ICE_CTL_Q_DETAILS(*sq, ntc);
while (rd32(hw, cq->sq.head) != ntc) {
ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
memset(desc, 0, sizeof(*desc));
- memset(details, 0, sizeof(*details));
ntc++;
if (ntc == sq->count)
ntc = 0;
desc = ICE_CTL_Q_DESC(*sq, ntc);
- details = ICE_CTL_Q_DETAILS(*sq, ntc);
}
sq->next_to_clean = ntc;
@@ -888,18 +877,43 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}
/**
+ * ice_ctl_q_str - Convert control queue type to string
+ * @qtype: the control queue type
+ *
+ * Return: A string name for the given control queue type.
+ */
+static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
+{
+ switch (qtype) {
+ case ICE_CTL_Q_UNKNOWN:
+ return "Unknown CQ";
+ case ICE_CTL_Q_ADMIN:
+ return "AQ";
+ case ICE_CTL_Q_MAILBOX:
+ return "MBXQ";
+ case ICE_CTL_Q_SB:
+ return "SBQ";
+ default:
+ return "Unrecognized CQ";
+ }
+}
+
+/**
* ice_debug_cq
* @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
* @desc: pointer to control queue descriptor
* @buf: pointer to command buffer
* @buf_len: max length of buf
+ * @response: true if this is the writeback response
*
* Dumps debug log about control command with descriptor contents.
*/
-static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
+static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ void *desc, void *buf, u16 buf_len, bool response)
{
struct ice_aq_desc *cq_desc = desc;
- u16 len;
+ u16 datalen, flags;
if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
@@ -908,48 +922,63 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
if (!desc)
return;
- len = le16_to_cpu(cq_desc->datalen);
+ datalen = le16_to_cpu(cq_desc->datalen);
+ flags = le16_to_cpu(cq_desc->flags);
- ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
- le16_to_cpu(cq_desc->opcode),
- le16_to_cpu(cq_desc->flags),
- le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
- ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n\tcookie (h,l) 0x%08X 0x%08X\n\tparam (0,1) 0x%08X 0x%08X\n\taddr (h,l) 0x%08X 0x%08X\n",
+ ice_ctl_q_str(cq->qtype), response ? "Response" : "Command",
+ le16_to_cpu(cq_desc->opcode), flags, datalen,
+ le16_to_cpu(cq_desc->retval),
le32_to_cpu(cq_desc->cookie_high),
- le32_to_cpu(cq_desc->cookie_low));
- ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->cookie_low),
le32_to_cpu(cq_desc->params.generic.param0),
- le32_to_cpu(cq_desc->params.generic.param1));
- ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->params.generic.param1),
le32_to_cpu(cq_desc->params.generic.addr_high),
le32_to_cpu(cq_desc->params.generic.addr_low));
- if (buf && cq_desc->datalen != 0) {
- ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
- if (buf_len < len)
- len = buf_len;
-
- ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len);
+ /* Dump buffer iff 1) one exists and 2) is either a response indicated
+ * by the DD and/or CMP flag set or a command with the RD flag set.
+ */
+ if (buf && cq_desc->datalen &&
+ (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP | ICE_AQ_FLAG_RD))) {
+ char prefix[] = KBUILD_MODNAME " 0x12341234 0x12341234 ";
+
+ sprintf(prefix, KBUILD_MODNAME " 0x%08X 0x%08X ",
+ le32_to_cpu(cq_desc->params.generic.addr_high),
+ le32_to_cpu(cq_desc->params.generic.addr_low));
+ ice_debug_array_w_prefix(hw, ICE_DBG_AQ_DESC_BUF, prefix,
+ buf,
+ min_t(u16, buf_len, datalen));
}
}
/**
- * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
+ * ice_sq_done - poll until the last send on a control queue has completed
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
*
- * Returns true if the firmware has processed all descriptors on the
- * admin send queue. Returns false if there are still requests pending.
+ * Use read_poll_timeout to poll the control queue head, checking until it
+ * matches next_to_use. According to the control queue designers, this has
+ * better timing reliability than the DD bit.
+ *
+ * Return: true if all the descriptors on the send side of a control queue
+ * are finished processing, false otherwise.
*/
static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- /* AQ designers suggest use of head for better
- * timing reliability than DD bit
+ u32 head;
+
+ /* Wait a short time before the initial check, to allow hardware time
+ * for completion.
*/
- return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
+ udelay(5);
+
+ return !rd32_poll_timeout(hw, cq->sq.head,
+ head, head == cq->sq.next_to_use,
+ 20, ICE_CTL_Q_SQ_CMD_TIMEOUT);
}
/**
- * ice_sq_send_cmd - send command to Control Queue (ATQ)
+ * ice_sq_send_cmd - send command to a control queue
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command
@@ -957,8 +986,9 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
*
- * This is the main send command routine for the ATQ. It runs the queue,
- * cleans the queue, etc.
+ * Main command for the transmit side of a control queue. It puts the command
+ * on the queue, bumps the tail, waits for processing of the command, captures
+ * command status and results, etc.
*/
int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@@ -968,8 +998,6 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_dma_mem *dma_buf = NULL;
struct ice_aq_desc *desc_on_ring;
bool cmd_completed = false;
- struct ice_sq_cd *details;
- unsigned long timeout;
int status = 0;
u16 retval = 0;
u32 val = 0;
@@ -1013,12 +1041,6 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
goto sq_send_command_error;
}
- details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
- if (cd)
- *details = *cd;
- else
- memset(details, 0, sizeof(*details));
-
/* Call clean and check queue available function to reclaim the
* descriptors that were processed by FW/MBX; the function returns the
* number of desc available. The clean function called here could be
@@ -1055,7 +1077,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
/* Debug desc and buffer */
ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
- ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
+ ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false);
(cq->sq.next_to_use)++;
if (cq->sq.next_to_use == cq->sq.count)
@@ -1063,20 +1085,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
wr32(hw, cq->sq.tail, cq->sq.next_to_use);
ice_flush(hw);
- /* Wait a short time before initial ice_sq_done() check, to allow
- * hardware time for completion.
+ /* Wait for the command to complete. If it finishes within the
+ * timeout, copy the descriptor back to temp.
*/
- udelay(5);
-
- timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT;
- do {
- if (ice_sq_done(hw, cq))
- break;
-
- usleep_range(100, 150);
- } while (time_before(jiffies, timeout));
-
- /* if ready, copy the desc back to temp */
if (ice_sq_done(hw, cq)) {
memcpy(desc, desc_on_ring, sizeof(*desc));
if (buf) {
@@ -1108,12 +1119,11 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
- ice_debug_cq(hw, (void *)desc, buf, buf_size);
+ ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
/* save writeback AQ if requested */
- if (details->wb_desc)
- memcpy(details->wb_desc, desc_on_ring,
- sizeof(*details->wb_desc));
+ if (cd && cd->wb_desc)
+ memcpy(cd->wb_desc, desc_on_ring, sizeof(*cd->wb_desc));
/* update the error if time out occurred */
if (!cmd_completed) {
@@ -1154,9 +1164,9 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
*
- * This function cleans one Admin Receive Queue element and returns
- * the contents through e. It can also return how many events are
- * left to process through 'pending'.
+ * Clean one element from the receive side of a control queue. On return 'e'
+ * contains contents of the message, and 'pending' contains the number of
+ * events left to process.
*/
int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@@ -1212,7 +1222,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
- ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
+ ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message size
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 1d54b1cdb1c5..ca97b7365a1b 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -43,14 +43,13 @@ enum ice_ctl_q {
};
/* Control Queue timeout settings - max delay 1s */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT HZ /* Wait max 1s */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT USEC_PER_SEC
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
struct ice_dma_mem desc_buf; /* descriptor ring memory */
- void *cmd_buf; /* command buffer memory */
union {
struct ice_dma_mem *sq_bi;
@@ -80,8 +79,6 @@ struct ice_sq_cd {
struct ice_aq_desc *wb_desc;
};
-#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i]))
-
/* rq event information */
struct ice_rq_event_info {
struct ice_aq_desc desc;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a94e7072b570..a7c510832824 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -187,6 +187,7 @@ void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
break;
case ICE_VSI_CHNL:
+ case ICE_VSI_SF:
vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
vsi->tc_cfg.numtc = 1;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index f182179529b7..953262b88a58 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -289,11 +289,11 @@ void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
* indicates a base offset of 10, and the index for the entry is 2, then
* section handler function should set the offset to 10 + 2 = 12.
*/
-static void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
- struct ice_pkg_enum *state, u32 sect_type,
- u32 *offset,
- void *(*handler)(u32 sect_type, void *section,
- u32 index, u32 *offset))
+void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state, u32 sect_type,
+ u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
{
void *entry;
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
index 622543f08b43..97f272317475 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -261,10 +261,17 @@ struct ice_meta_sect {
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
+#define ICE_SID_RXPARSER_CAM 50
+#define ICE_SID_RXPARSER_NOMATCH_CAM 51
+#define ICE_SID_RXPARSER_IMEM 52
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_RXPARSER_PROTO_GRP 57
#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_TXPARSER_BOOST_TCAM 66
+#define ICE_SID_RXPARSER_MARKER_GRP 72
+#define ICE_SID_RXPARSER_PG_SPILL 76
+#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
@@ -276,6 +283,7 @@ struct ice_meta_sect {
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
+#define ICE_SID_RXPARSER_FLAG_REDIR 97
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
@@ -451,6 +459,11 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
+void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type, u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset));
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index e92be6f130a3..cd95705d1e7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -9,6 +9,7 @@
#define ICE_CGU_STATE_ACQ_ERR_THRESHOLD 50
#define ICE_DPLL_PIN_IDX_INVALID 0xff
#define ICE_DPLL_RCLK_NUM_PER_PF 1
+#define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25
/**
* enum ice_dpll_pin_type - enumerate ice pin types:
@@ -30,6 +31,10 @@ static const char * const pin_type_name[] = {
[ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input",
};
+static const struct dpll_pin_frequency ice_esync_range[] = {
+ DPLL_PIN_FREQUENCY_RANGE(0, DPLL_PIN_FREQUENCY_1_HZ),
+};
+
/**
* ice_dpll_is_reset - check if reset is in progress
* @pf: private board structure
@@ -394,8 +399,8 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
switch (pin_type) {
case ICE_DPLL_PIN_TYPE_INPUT:
- ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
- NULL, &pin->flags[0],
+ ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, &pin->status,
+ NULL, NULL, &pin->flags[0],
&pin->freq, &pin->phase_adjust);
if (ret)
goto err;
@@ -430,7 +435,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
goto err;
parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL;
- if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
+ if (ICE_AQC_GET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
pin->state[pf->dplls.eec.dpll_idx] =
parent == pf->dplls.eec.dpll_idx ?
DPLL_PIN_STATE_CONNECTED :
@@ -1099,6 +1104,214 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_output_esync_set - callback for setting embedded sync
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @freq: requested embedded sync frequency
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting embedded sync frequency value
+ * on output pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_output_esync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ u8 flags = 0;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_OUT_EN)
+ flags = ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
+ if (freq == DPLL_PIN_FREQUENCY_1_HZ) {
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) {
+ ret = 0;
+ } else {
+ flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flags,
+ 0, 0, 0);
+ }
+ } else {
+ if (!(p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)) {
+ ret = 0;
+ } else {
+ flags &= ~ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flags,
+ 0, 0, 0);
+ }
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_output_esync_get - callback for getting embedded sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @esync: on success holds embedded sync pin properties
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting embedded sync frequency value
+ * and capabilities on output pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_output_esync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (!(p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_ABILITY) ||
+ p->freq != DPLL_PIN_FREQUENCY_10_MHZ) {
+ mutex_unlock(&pf->dplls.lock);
+ return -EOPNOTSUPP;
+ }
+ esync->range = ice_esync_range;
+ esync->range_num = ARRAY_SIZE(ice_esync_range);
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) {
+ esync->freq = DPLL_PIN_FREQUENCY_1_HZ;
+ esync->pulse = ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT;
+ } else {
+ esync->freq = 0;
+ esync->pulse = 0;
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_input_esync_set - callback for setting embedded sync
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @freq: requested embedded sync frequency
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting embedded sync frequency value
+ * on input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_esync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ u8 flags_en = 0;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN)
+ flags_en = ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN;
+ if (freq == DPLL_PIN_FREQUENCY_1_HZ) {
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) {
+ ret = 0;
+ } else {
+ flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0,
+ flags_en, 0, 0);
+ }
+ } else {
+ if (!(p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)) {
+ ret = 0;
+ } else {
+ flags_en &= ~ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0,
+ flags_en, 0, 0);
+ }
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_esync_get - callback for getting embedded sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @esync: on success holds embedded sync pin properties
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting embedded sync frequency value
+ * and capabilities on input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_esync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (!(p->status & ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_CAP) ||
+ p->freq != DPLL_PIN_FREQUENCY_10_MHZ) {
+ mutex_unlock(&pf->dplls.lock);
+ return -EOPNOTSUPP;
+ }
+ esync->range = ice_esync_range;
+ esync->range_num = ARRAY_SIZE(ice_esync_range);
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) {
+ esync->freq = DPLL_PIN_FREQUENCY_1_HZ;
+ esync->pulse = ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT;
+ } else {
+ esync->freq = 0;
+ esync->pulse = 0;
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
* ice_dpll_rclk_state_on_pin_set - set a state on rclk pin
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -1222,6 +1435,8 @@ static const struct dpll_pin_ops ice_dpll_input_ops = {
.phase_adjust_get = ice_dpll_pin_phase_adjust_get,
.phase_adjust_set = ice_dpll_input_phase_adjust_set,
.phase_offset_get = ice_dpll_phase_offset_get,
+ .esync_set = ice_dpll_input_esync_set,
+ .esync_get = ice_dpll_input_esync_get,
};
static const struct dpll_pin_ops ice_dpll_output_ops = {
@@ -1232,6 +1447,8 @@ static const struct dpll_pin_ops ice_dpll_output_ops = {
.direction_get = ice_dpll_output_direction,
.phase_adjust_get = ice_dpll_pin_phase_adjust_get,
.phase_adjust_set = ice_dpll_output_phase_adjust_set,
+ .esync_set = ice_dpll_output_esync_set,
+ .esync_get = ice_dpll_output_esync_get,
};
static const struct dpll_device_ops ice_dpll_ops = {
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
index 93172e93995b..c320f1bf7d6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.h
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -31,6 +31,7 @@ struct ice_dpll_pin {
struct dpll_pin_properties prop;
u32 freq;
s32 phase_adjust;
+ u8 status;
};
/** ice_dpll - store info required for DPLL control
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 3cfa071e3718..c0b3e70a7ea3 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -452,11 +452,9 @@ static void ice_eswitch_start_reprs(struct ice_pf *pf)
ice_eswitch_start_all_tx_queues(pf);
}
-int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+static int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_repr *repr, unsigned long *id)
{
- struct devlink *devlink = priv_to_devlink(pf);
- struct ice_repr *repr;
int err;
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
@@ -470,13 +468,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_stop_reprs(pf);
- devl_lock(devlink);
- repr = ice_repr_add_vf(vf);
- devl_unlock(devlink);
- if (IS_ERR(repr)) {
- err = PTR_ERR(repr);
+ err = repr->ops.add(repr);
+ if (err)
goto err_create_repr;
- }
err = ice_eswitch_setup_repr(pf, repr);
if (err)
@@ -486,7 +480,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
if (err)
goto err_xa_alloc;
- vf->repr_id = repr->id;
+ *id = repr->id;
ice_eswitch_start_reprs(pf);
@@ -495,9 +489,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
err_xa_alloc:
ice_eswitch_release_repr(pf, repr);
err_setup_repr:
- devl_lock(devlink);
- ice_repr_rem_vf(repr);
- devl_unlock(devlink);
+ repr->ops.rem(repr);
err_create_repr:
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
@@ -506,14 +498,59 @@ err_create_repr:
return err;
}
-void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
+/**
+ * ice_eswitch_attach_vf - attach VF to a eswitch
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF structure to be attached
+ *
+ * During attaching port representor for VF is created.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
+ struct ice_repr *repr = ice_repr_create_vf(vf);
struct devlink *devlink = priv_to_devlink(pf);
+ int err;
- if (!repr)
- return;
+ if (IS_ERR(repr))
+ return PTR_ERR(repr);
+
+ devl_lock(devlink);
+ err = ice_eswitch_attach(pf, repr, &vf->repr_id);
+ if (err)
+ ice_repr_destroy(repr);
+ devl_unlock(devlink);
+
+ return err;
+}
+
+/**
+ * ice_eswitch_attach_sf - attach SF to a eswitch
+ * @pf: pointer to PF structure
+ * @sf: pointer to SF structure to be attached
+ *
+ * During attaching port representor for SF is created.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = ice_repr_create_sf(sf);
+ int err;
+ if (IS_ERR(repr))
+ return PTR_ERR(repr);
+
+ err = ice_eswitch_attach(pf, repr, &sf->repr_id);
+ if (err)
+ ice_repr_destroy(repr);
+
+ return err;
+}
+
+static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr)
+{
ice_eswitch_stop_reprs(pf);
xa_erase(&pf->eswitch.reprs, repr->id);
@@ -521,10 +558,12 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_disable_switchdev(pf);
ice_eswitch_release_repr(pf, repr);
- devl_lock(devlink);
- ice_repr_rem_vf(repr);
+ repr->ops.rem(repr);
+ ice_repr_destroy(repr);
if (xa_empty(&pf->eswitch.reprs)) {
+ struct devlink *devlink = priv_to_devlink(pf);
+
/* since all port representors are destroyed, there is
* no point in keeping the nodes
*/
@@ -533,10 +572,42 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
} else {
ice_eswitch_start_reprs(pf);
}
+}
+
+/**
+ * ice_eswitch_detach_vf - detach VF from a eswitch
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF structure to be detached
+ */
+void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf)
+{
+ struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
+ struct devlink *devlink = priv_to_devlink(pf);
+
+ if (!repr)
+ return;
+
+ devl_lock(devlink);
+ ice_eswitch_detach(pf, repr);
devl_unlock(devlink);
}
/**
+ * ice_eswitch_detach_sf - detach SF from a eswitch
+ * @pf: pointer to PF structure
+ * @sf: pointer to SF structure to be detached
+ */
+void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = xa_load(&pf->eswitch.reprs, sf->repr_id);
+
+ if (!repr)
+ return;
+
+ ice_eswitch_detach(pf, repr);
+}
+
+/**
* ice_eswitch_get_target - get netdev based on src_vsi from descriptor
* @rx_ring: ring used to receive the packet
* @rx_desc: descriptor used to get src_vsi value
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index 78fd39a6935d..20ce32dda69c 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -5,11 +5,13 @@
#define _ICE_ESWITCH_H_
#include <net/devlink.h>
+#include "devlink/devlink_port.h"
#ifdef CONFIG_ICE_SWITCHDEV
-void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
-int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
+void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf);
+void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf);
+int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf);
+int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int
@@ -31,10 +33,20 @@ struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac);
void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac);
#else /* CONFIG_ICE_SWITCHDEV */
-static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
+static inline void
+ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf) { }
+
+static inline void
+ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) { }
+
+static inline int
+ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
static inline int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index bc79ba974e49..d5cc934d1359 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3792,8 +3792,6 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -4414,7 +4412,7 @@ ice_repr_get_drvinfo(struct net_device *netdev,
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
- if (ice_check_vf_ready_for_cfg(repr->vf))
+ if (repr->ops.ready(repr))
return;
__ice_get_drvinfo(netdev, drvinfo, repr->src_vsi);
@@ -4426,8 +4424,7 @@ ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
struct ice_repr *repr = ice_netdev_to_repr(netdev);
/* for port representors only ETH_SS_STATS is supported */
- if (ice_check_vf_ready_for_cfg(repr->vf) ||
- stringset != ETH_SS_STATS)
+ if (repr->ops.ready(repr) || stringset != ETH_SS_STATS)
return;
__ice_get_strings(netdev, stringset, data, repr->src_vsi);
@@ -4440,7 +4437,7 @@ ice_repr_get_ethtool_stats(struct net_device *netdev,
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
- if (ice_check_vf_ready_for_cfg(repr->vf))
+ if (repr->ops.ready(repr))
return;
__ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi);
@@ -4725,6 +4722,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
.cap_rss_sym_xor_supported = true,
+ .rxfh_per_ctx_key = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_fec_stats = ice_get_fec_stats,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 20d5db88c99f..ed95072ca6e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -2981,6 +2981,50 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
}
/**
+ * ice_disable_fd_swap - set register appropriately to disable FD SWAP
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ */
+static void
+ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id)
+{
+ u16 swap_val, fvw_num;
+ unsigned int i;
+
+ swap_val = ICE_SWAP_VALID;
+ fvw_num = hw->blk[ICE_BLK_FD].es.fvw / ICE_FDIR_REG_SET_SIZE;
+
+ /* Since the SWAP Flag in the Programming Desc doesn't work,
+ * here add method to disable the SWAP Option via setting
+ * certain SWAP and INSET register sets.
+ */
+ for (i = 0; i < fvw_num ; i++) {
+ u32 raw_swap, raw_in;
+ unsigned int j;
+
+ raw_swap = 0;
+ raw_in = 0;
+
+ for (j = 0; j < ICE_FDIR_REG_SET_SIZE; j++) {
+ raw_swap |= (swap_val++) << (j * BITS_PER_BYTE);
+ raw_in |= ICE_INSET_DFLT << (j * BITS_PER_BYTE);
+ }
+
+ /* write the FDIR swap register set */
+ wr32(hw, GLQF_FDSWAP(prof_id, i), raw_swap);
+
+ ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): 0x%x = 0x%08x\n",
+ prof_id, i, GLQF_FDSWAP(prof_id, i), raw_swap);
+
+ /* write the FDIR inset register set */
+ wr32(hw, GLQF_FDINSET(prof_id, i), raw_in);
+
+ ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): 0x%x = 0x%08x\n",
+ prof_id, i, GLQF_FDINSET(prof_id, i), raw_in);
+ }
+}
+
+/*
* ice_add_prof - add profile
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -2991,6 +3035,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* @es: extraction sequence (length of array is determined by the block)
* @masks: mask for extraction sequence
* @symm: symmetric setting for RSS profiles
+ * @fd_swap: enable/disable FDIR paired src/dst fields swap option
*
* This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
@@ -3000,7 +3045,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm)
+ struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap)
{
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
@@ -3020,7 +3065,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
status = ice_alloc_prof_id(hw, blk, &prof_id);
if (status)
goto err_ice_add_prof;
- if (blk == ICE_BLK_FD) {
+ if (blk == ICE_BLK_FD && fd_swap) {
/* For Flow Director block, the extraction sequence may
* need to be altered in the case where there are paired
* fields that have no match. This is necessary because
@@ -3031,6 +3076,8 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
status = ice_update_fd_swap(hw, prof_id, es);
if (status)
goto err_ice_add_prof;
+ } else if (blk == ICE_BLK_FD) {
+ ice_disable_fd_swap(hw, prof_id);
}
status = ice_update_prof_masking(hw, blk, prof_id, masks);
if (status)
@@ -4099,6 +4146,54 @@ err_ice_add_prof_id_flow:
}
/**
+ * ice_flow_assoc_fdir_prof - add an FDIR profile for main/ctrl VSI
+ * @hw: pointer to the HW struct
+ * @blk: HW block
+ * @dest_vsi: dest VSI
+ * @fdir_vsi: fdir programming VSI
+ * @hdl: profile handle
+ *
+ * Update the hardware tables to enable the FDIR profile indicated by @hdl for
+ * the VSI specified by @dest_vsi. On success, the flow will be enabled.
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+int
+ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
+ u16 dest_vsi, u16 fdir_vsi, u64 hdl)
+{
+ u16 vsi_num;
+ int status;
+
+ if (blk != ICE_BLK_FD)
+ return -EINVAL;
+
+ vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
+ status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for main VSI flow entry: %d\n",
+ status);
+ return status;
+ }
+
+ vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi);
+ status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for ctrl VSI flow entry: %d\n",
+ status);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, hdl);
+
+ return status;
+}
+
+/**
* ice_rem_prof_from_list - remove a profile from list
* @hw: pointer to the HW struct
* @lst: list to remove the profile from
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index b39d7cdc381f..90b9b0993122 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -6,6 +6,8 @@
#include "ice_type.h"
+#define ICE_FDIR_REG_SET_SIZE 4
+
int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
@@ -42,13 +44,16 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm);
+ struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
+int
+ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
+ u16 dest_vsi, u16 fdir_vsi, u64 hdl);
enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_ddp_state
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index fc2b58f56279..d97b751052f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -409,6 +409,29 @@ static const u32 ice_ptypes_gtpc_tid[] = {
};
/* Packet types for GTPU */
+static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
+};
+
static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
@@ -1400,7 +1423,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
params->attr, params->attr_cnt, params->es,
- params->mask, symm);
+ params->mask, symm, true);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@@ -1523,6 +1546,90 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
return status;
}
+#define FLAG_GTP_EH_PDU_LINK BIT_ULL(13)
+#define FLAG_GTP_EH_PDU BIT_ULL(14)
+
+#define HI_BYTE_IN_WORD GENMASK(15, 8)
+#define LO_BYTE_IN_WORD GENMASK(7, 0)
+
+#define FLAG_GTPU_MSK \
+ (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
+#define FLAG_GTPU_UP \
+ (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
+#define FLAG_GTPU_DW FLAG_GTP_EH_PDU
+
+/**
+ * ice_flow_set_parser_prof - Set flow profile based on the parsed profile info
+ * @hw: pointer to the HW struct
+ * @dest_vsi: dest VSI
+ * @fdir_vsi: fdir programming VSI
+ * @prof: stores parsed profile info from raw flow
+ * @blk: classification blk
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+int
+ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
+ struct ice_parser_profile *prof, enum ice_block blk)
+{
+ u64 id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
+ struct ice_flow_prof_params *params __free(kfree);
+ u8 fv_words = hw->blk[blk].es.fvw;
+ int status;
+ int i, idx;
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
+ params->es[i].prot_id = ICE_PROT_INVALID;
+ params->es[i].off = ICE_FV_OFFSET_INVAL;
+ }
+
+ for (i = 0; i < prof->fv_num; i++) {
+ if (hw->blk[blk].es.reverse)
+ idx = fv_words - i - 1;
+ else
+ idx = i;
+ params->es[idx].prot_id = prof->fv[i].proto_id;
+ params->es[idx].off = prof->fv[i].offset;
+ params->mask[idx] = (((prof->fv[i].msk) << BITS_PER_BYTE) &
+ HI_BYTE_IN_WORD) |
+ (((prof->fv[i].msk) >> BITS_PER_BYTE) &
+ LO_BYTE_IN_WORD);
+ }
+
+ switch (prof->flags) {
+ case FLAG_GTPU_DW:
+ params->attr = ice_attr_gtpu_down;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
+ break;
+ case FLAG_GTPU_UP:
+ params->attr = ice_attr_gtpu_up;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
+ break;
+ default:
+ if (prof->flags_msk & FLAG_GTPU_MSK) {
+ params->attr = ice_attr_gtpu_session;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
+ }
+ break;
+ }
+
+ status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
+ params->attr, params->attr_cnt,
+ params->es, params->mask, false, false);
+ if (status)
+ return status;
+
+ status = ice_flow_assoc_fdir_prof(hw, blk, dest_vsi, fdir_vsi, id);
+ if (status)
+ ice_rem_prof(hw, blk, id);
+
+ return status;
+}
+
/**
* ice_flow_add_prof - Add a flow profile for packet segments and matched fields
* @hw: pointer to the HW struct
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 2fd2e0cb483d..6cb7bb879c98 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -5,6 +5,7 @@
#define _ICE_FLOW_H_
#include "ice_flex_type.h"
+#include "ice_parser.h"
#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
@@ -326,6 +327,7 @@ enum ice_rss_cfg_hdr_type {
ICE_RSS_ANY_HEADERS
};
+struct ice_vsi;
struct ice_rss_hash_cfg {
u32 addl_hdrs; /* protocol header fields */
u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
@@ -445,6 +447,9 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
bool symm, struct ice_flow_prof **prof);
int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
int
+ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
+ struct ice_parser_profile *prof, enum ice_block blk);
+int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi, enum ice_flow_priority prio,
void *data, u64 *entry_h);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 737c00b02dd0..06e712cdc3d9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -7,6 +7,7 @@
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
+#include "ice_type.h"
#include "ice_vsi_vlan_ops.h"
/**
@@ -20,6 +21,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_PF";
case ICE_VSI_VF:
return "ICE_VSI_VF";
+ case ICE_VSI_SF:
+ return "ICE_VSI_SF";
case ICE_VSI_CTRL:
return "ICE_VSI_CTRL";
case ICE_VSI_CHNL:
@@ -135,6 +138,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SF:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
@@ -201,6 +205,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
max_t(int, vsi->alloc_rxq,
vsi->alloc_txq));
break;
+ case ICE_VSI_SF:
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
+ vsi->num_q_vectors = 1;
+ vsi->irq_dyn_alloc = true;
+ break;
case ICE_VSI_VF:
if (vf->num_req_qs)
vf->num_vf_qs = vf->num_req_qs;
@@ -423,7 +433,7 @@ err_out:
* This deallocates the VSI's queue resources, removes it from the PF's
* VSI array if necessary, and deallocates the VSI
*/
-static void ice_vsi_free(struct ice_vsi *vsi)
+void ice_vsi_free(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
struct device *dev;
@@ -559,6 +569,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SF:
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
@@ -595,7 +606,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
*
* returns a pointer to a VSI on success, NULL on failure.
*/
-static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
+struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
@@ -889,6 +900,11 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
max_rss_size);
vsi->rss_lut_type = ICE_LUT_PF;
break;
+ case ICE_VSI_SF:
+ vsi->rss_table_size = ICE_LUT_VSI_SIZE;
+ vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
+ vsi->rss_lut_type = ICE_LUT_VSI;
+ break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
* For VSI_LUT, LUT size should be set to 64 bytes.
@@ -1136,6 +1152,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
break;
case ICE_VSI_VF:
+ case ICE_VSI_SF:
/* VF VSI will gets a small RSS table which is a VSI LUT type */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
break;
@@ -1214,6 +1231,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
+ case ICE_VSI_SF:
case ICE_VSI_CHNL:
ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
break;
@@ -2095,6 +2113,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
case ICE_VSI_CHNL:
case ICE_VSI_LB:
case ICE_VSI_PF:
+ case ICE_VSI_SF:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0];
@@ -2264,6 +2283,7 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_CTRL:
+ case ICE_VSI_SF:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2413,13 +2433,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
int err;
- /* The Rx rule will only exist to remove if the LLDP FW
- * engine is currently stopped
- */
- if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
- !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
-
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
if (err)
@@ -2648,7 +2661,8 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
- if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
@@ -2676,7 +2690,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
- if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
@@ -2747,6 +2762,26 @@ void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
}
/**
+ * ice_napi_add - register NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be registered
+ *
+ * This function is only called in the driver's load path. Registering the NAPI
+ * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
+ * reset/rebuild, etc.)
+ */
+void ice_napi_add(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ ice_for_each_q_vector(vsi, v_idx)
+ netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
+ ice_napi_poll);
+}
+
+/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -2764,6 +2799,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_rss_clean(vsi);
ice_vsi_close(vsi);
+
+ /* The Rx rule will only exist to remove if the LLDP FW
+ * engine is currently stopped
+ */
+ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
+ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ ice_cfg_sw_lldp(vsi, false, false);
+
ice_vsi_decfg(vsi);
/* retain SW VSI data structure since it is needed to unregister and
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 36d86535695d..1a6cfc8693ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -45,6 +45,7 @@ struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
+void ice_napi_add(struct ice_vsi *vsi);
void ice_vsi_clear_napi_queues(struct ice_vsi *vsi);
@@ -59,6 +60,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
int ice_vsi_cfg(struct ice_vsi *vsi);
+struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf);
+void ice_vsi_free(struct ice_vsi *vsi);
bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c7db88b517da..eeb48cc48e08 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -15,6 +15,7 @@
#include "ice_dcb_nl.h"
#include "devlink/devlink.h"
#include "devlink/devlink_port.h"
+#include "ice_sf_eth.h"
#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
* ice tracepoint functions. This must be done exactly once across the
@@ -2974,6 +2975,9 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
if (avail < cpus / 2)
return -ENOMEM;
+ if (vsi->type == ICE_VSI_SF)
+ avail = vsi->alloc_txq;
+
vsi->num_xdp_txq = min_t(u16, avail, cpus);
if (vsi->num_xdp_txq < cpus)
@@ -3089,14 +3093,14 @@ static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
* @dev: netdevice
* @xdp: XDP command
*/
-static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct ice_netdev_priv *np = netdev_priv(dev);
struct ice_vsi *vsi = np->vsi;
int ret;
- if (vsi->type != ICE_VSI_PF) {
- NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) {
+ NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI");
return -EINVAL;
}
@@ -3556,26 +3560,6 @@ skip_req_irq:
}
/**
- * ice_napi_add - register NAPI handler for the VSI
- * @vsi: VSI for which NAPI handler is to be registered
- *
- * This function is only called in the driver's load path. Registering the NAPI
- * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
- * reset/rebuild, etc.)
- */
-static void ice_napi_add(struct ice_vsi *vsi)
-{
- int v_idx;
-
- if (!vsi->netdev)
- return;
-
- ice_for_each_q_vector(vsi, v_idx)
- netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
- ice_napi_poll);
-}
-
-/**
* ice_set_ops - set netdev and ethtools ops for the given netdev
* @vsi: the VSI associated with the new netdev
*/
@@ -3608,7 +3592,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
* ice_set_netdev_features - set features for the given netdev
* @netdev: netdev instance
*/
-static void ice_set_netdev_features(struct net_device *netdev)
+void ice_set_netdev_features(struct net_device *netdev)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
@@ -3790,8 +3774,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
*
* net_device_ops implementation for adding VLAN IDs
*/
-static int
-ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
@@ -3853,8 +3836,7 @@ finish:
*
* net_device_ops implementation for removing VLAN IDs
*/
-static int
-ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
@@ -4023,6 +4005,9 @@ static void ice_deinit_pf(struct ice_pf *pf)
if (pf->ptp.clock)
ptp_clock_unregister(pf->ptp.clock);
+
+ xa_destroy(&pf->dyn_ports);
+ xa_destroy(&pf->sf_nums);
}
/**
@@ -4116,6 +4101,9 @@ static int ice_init_pf(struct ice_pf *pf)
hash_init(pf->vfs.table);
ice_mbx_init_snapshot(&pf->hw);
+ xa_init(&pf->dyn_ports);
+ xa_init(&pf->sf_nums);
+
return 0;
}
@@ -5363,7 +5351,6 @@ err_load:
ice_deinit(pf);
err_init:
ice_adapter_put(pdev);
- pci_disable_device(pdev);
return err;
}
@@ -5458,6 +5445,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_remove_arfs(pf);
devl_lock(priv_to_devlink(pf));
+ ice_dealloc_all_dynamic_ports(pf);
ice_deinit_devlink(pf);
ice_unload(pf);
@@ -5470,7 +5458,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_set_wake(pf);
ice_adapter_put(pdev);
- pci_disable_device(pdev);
}
/**
@@ -5946,8 +5933,16 @@ static int __init ice_module_init(void)
goto err_dest_lag_wq;
}
+ status = ice_sf_driver_register();
+ if (status) {
+ pr_err("Failed to register SF driver, err %d\n", status);
+ goto err_sf_driver;
+ }
+
return 0;
+err_sf_driver:
+ pci_unregister_driver(&ice_driver);
err_dest_lag_wq:
destroy_workqueue(ice_lag_wq);
ice_debugfs_exit();
@@ -5965,6 +5960,7 @@ module_init(ice_module_init);
*/
static void __exit ice_module_exit(void)
{
+ ice_sf_driver_unregister();
pci_unregister_driver(&ice_driver);
ice_debugfs_exit();
destroy_workqueue(ice_wq);
@@ -6766,7 +6762,8 @@ static int ice_up_complete(struct ice_vsi *vsi)
if (vsi->port_info &&
(vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
- vsi->netdev && vsi->type == ICE_VSI_PF) {
+ ((vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)))) {
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
@@ -7124,7 +7121,6 @@ void ice_update_pf_stats(struct ice_pf *pf)
* @netdev: network interface device structure
* @stats: main device statistics structure
*/
-static
void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -7465,7 +7461,7 @@ int ice_vsi_open(struct ice_vsi *vsi)
ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
- if (vsi->type == ICE_VSI_PF) {
+ if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
if (err)
@@ -7801,7 +7797,7 @@ clear_recovery:
*
* Returns 0 on success, negative on failure
*/
-static int ice_change_mtu(struct net_device *netdev, int new_mtu)
+int ice_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -8225,7 +8221,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
* @netdev: network interface device structure
* @txqueue: Tx queue
*/
-static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_tx_ring *tx_ring = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index a2562f04267f..b9f383494b3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -12,6 +12,7 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
+#include <linux/iopoll.h>
#include <linux/pci_ids.h>
#ifndef CONFIG_64BIT
#include <linux/io-64-nonatomic-lo-hi.h>
@@ -23,6 +24,9 @@
#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define rd32_poll_timeout(a, addr, val, cond, delay_us, timeout_us) \
+ read_poll_timeout(rd32, val, cond, delay_us, timeout_us, false, a, addr)
+
#define ice_flush(a) rd32((a), GLGEN_STAT)
#define ICE_M(m, s) ((m ## U) << (s))
@@ -39,11 +43,10 @@ struct device *ice_hw_to_dev(struct ice_hw *hw);
#define ice_debug(hw, type, fmt, args...) \
dev_dbg(ice_hw_to_dev(hw), fmt, ##args)
-#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
- print_hex_dump_debug(KBUILD_MODNAME " ", \
- DUMP_PREFIX_OFFSET, rowsize, \
- groupsize, buf, len, false)
-#else
+#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \
+ print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, \
+ rowsize, groupsize, buf, len, false)
+#else /* CONFIG_DYNAMIC_DEBUG */
#define ice_debug(hw, type, fmt, args...) \
do { \
if ((type) & (hw)->debug_mask) \
@@ -51,16 +54,15 @@ do { \
} while (0)
#ifdef DEBUG
-#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \
do { \
if ((type) & (hw)->debug_mask) \
- print_hex_dump_debug(KBUILD_MODNAME, \
- DUMP_PREFIX_OFFSET, \
+ print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET,\
rowsize, groupsize, buf, \
len, false); \
} while (0)
-#else
-#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+#else /* DEBUG */
+#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \
do { \
struct ice_hw *hw_l = hw; \
if ((type) & (hw_l)->debug_mask) { \
@@ -78,4 +80,10 @@ do { \
#endif /* DEBUG */
#endif /* CONFIG_DYNAMIC_DEBUG */
+#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+ _ice_debug_array(hw, type, KBUILD_MODNAME, rowsize, groupsize, buf, len)
+
+#define ice_debug_array_w_prefix(hw, type, prefix, buf, len) \
+ _ice_debug_array(hw, type, prefix, 16, 1, buf, len)
+
#endif /* _ICE_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_parser.c b/drivers/net/ethernet/intel/ice/ice_parser.c
new file mode 100644
index 000000000000..664beb64f557
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_parser.c
@@ -0,0 +1,2430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "ice_common.h"
+
+struct ice_pkg_sect_hdr {
+ __le16 count;
+ __le16 offset;
+};
+
+/**
+ * ice_parser_sect_item_get - parse an item from a section
+ * @sect_type: section type
+ * @section: section object
+ * @index: index of the item to get
+ * @offset: dummy as prototype of ice_pkg_enum_entry's last parameter
+ *
+ * Return: a pointer to the item or NULL.
+ */
+static void *ice_parser_sect_item_get(u32 sect_type, void *section,
+ u32 index, u32 __maybe_unused *offset)
+{
+ size_t data_off = ICE_SEC_DATA_OFFSET;
+ struct ice_pkg_sect_hdr *hdr;
+ size_t size;
+
+ if (!section)
+ return NULL;
+
+ switch (sect_type) {
+ case ICE_SID_RXPARSER_IMEM:
+ size = ICE_SID_RXPARSER_IMEM_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_METADATA_INIT:
+ size = ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_CAM:
+ size = ICE_SID_RXPARSER_CAM_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_PG_SPILL:
+ size = ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_NOMATCH_CAM:
+ size = ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_NOMATCH_SPILL:
+ size = ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_BOOST_TCAM:
+ size = ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE;
+ break;
+ case ICE_SID_LBL_RXPARSER_TMEM:
+ data_off = ICE_SEC_LBL_DATA_OFFSET;
+ size = ICE_SID_LBL_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_MARKER_PTYPE:
+ size = ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_MARKER_GRP:
+ size = ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_PROTO_GRP:
+ size = ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_FLAG_REDIR:
+ size = ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE;
+ break;
+ default:
+ return NULL;
+ }
+
+ hdr = section;
+ if (index >= le16_to_cpu(hdr->count))
+ return NULL;
+
+ return section + data_off + index * size;
+}
+
+/**
+ * ice_parser_create_table - create an item table from a section
+ * @hw: pointer to the hardware structure
+ * @sect_type: section type
+ * @item_size: item size in bytes
+ * @length: number of items in the table to create
+ * @parse_item: the function to parse the item
+ * @no_offset: ignore header offset, calculate index from 0
+ *
+ * Return: a pointer to the allocated table or ERR_PTR.
+ */
+static void *
+ice_parser_create_table(struct ice_hw *hw, u32 sect_type,
+ u32 item_size, u32 length,
+ void (*parse_item)(struct ice_hw *hw, u16 idx,
+ void *item, void *data,
+ int size), bool no_offset)
+{
+ struct ice_pkg_enum state = {};
+ struct ice_seg *seg = hw->seg;
+ void *table, *data, *item;
+ u16 idx = 0;
+
+ if (!seg)
+ return ERR_PTR(-EINVAL);
+
+ table = kzalloc(item_size * length, GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ do {
+ data = ice_pkg_enum_entry(seg, &state, sect_type, NULL,
+ ice_parser_sect_item_get);
+ seg = NULL;
+ if (data) {
+ struct ice_pkg_sect_hdr *hdr = state.sect;
+
+ if (!no_offset)
+ idx = le16_to_cpu(hdr->offset) +
+ state.entry_idx;
+
+ item = (void *)((uintptr_t)table + idx * item_size);
+ parse_item(hw, idx, item, data, item_size);
+
+ if (no_offset)
+ idx++;
+ }
+ } while (data);
+
+ return table;
+}
+
+/*** ICE_SID_RXPARSER_IMEM section ***/
+static void ice_imem_bst_bm_dump(struct ice_hw *hw, struct ice_bst_main *bm)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "boost main:\n");
+ dev_info(dev, "\talu0 = %d\n", bm->alu0);
+ dev_info(dev, "\talu1 = %d\n", bm->alu1);
+ dev_info(dev, "\talu2 = %d\n", bm->alu2);
+ dev_info(dev, "\tpg = %d\n", bm->pg);
+}
+
+static void ice_imem_bst_kb_dump(struct ice_hw *hw,
+ struct ice_bst_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "boost key builder:\n");
+ dev_info(dev, "\tpriority = %d\n", kb->prio);
+ dev_info(dev, "\ttsr_ctrl = %d\n", kb->tsr_ctrl);
+}
+
+static void ice_imem_np_kb_dump(struct ice_hw *hw,
+ struct ice_np_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "next proto key builder:\n");
+ dev_info(dev, "\topc = %d\n", kb->opc);
+ dev_info(dev, "\tstart_or_reg0 = %d\n", kb->start_reg0);
+ dev_info(dev, "\tlen_or_reg1 = %d\n", kb->len_reg1);
+}
+
+static void ice_imem_pg_kb_dump(struct ice_hw *hw,
+ struct ice_pg_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "parse graph key builder:\n");
+ dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena);
+ dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena);
+ dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena);
+ dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena);
+ dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx);
+ dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx);
+ dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx);
+ dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx);
+ dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx);
+}
+
+static void ice_imem_alu_dump(struct ice_hw *hw,
+ struct ice_alu *alu, int index)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "alu%d:\n", index);
+ dev_info(dev, "\topc = %d\n", alu->opc);
+ dev_info(dev, "\tsrc_start = %d\n", alu->src_start);
+ dev_info(dev, "\tsrc_len = %d\n", alu->src_len);
+ dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel);
+ dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key);
+ dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id);
+ dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id);
+ dev_info(dev, "\tinc0 = %d\n", alu->inc0);
+ dev_info(dev, "\tinc1 = %d\n", alu->inc1);
+ dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc);
+ dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset);
+ dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr);
+ dev_info(dev, "\timm = %d\n", alu->imm);
+ dev_info(dev, "\tdst_start = %d\n", alu->dst_start);
+ dev_info(dev, "\tdst_len = %d\n", alu->dst_len);
+ dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm);
+ dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm);
+}
+
+/**
+ * ice_imem_dump - dump an imem item info
+ * @hw: pointer to the hardware structure
+ * @item: imem item to dump
+ */
+static void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %d\n", item->idx);
+ ice_imem_bst_bm_dump(hw, &item->b_m);
+ ice_imem_bst_kb_dump(hw, &item->b_kb);
+ dev_info(dev, "pg priority = %d\n", item->pg_prio);
+ ice_imem_np_kb_dump(hw, &item->np_kb);
+ ice_imem_pg_kb_dump(hw, &item->pg_kb);
+ ice_imem_alu_dump(hw, &item->alu0, 0);
+ ice_imem_alu_dump(hw, &item->alu1, 1);
+ ice_imem_alu_dump(hw, &item->alu2, 2);
+}
+
+#define ICE_IM_BM_ALU0 BIT(0)
+#define ICE_IM_BM_ALU1 BIT(1)
+#define ICE_IM_BM_ALU2 BIT(2)
+#define ICE_IM_BM_PG BIT(3)
+
+/**
+ * ice_imem_bm_init - parse 4 bits of Boost Main
+ * @bm: pointer to the Boost Main structure
+ * @data: Boost Main data to be parsed
+ */
+static void ice_imem_bm_init(struct ice_bst_main *bm, u8 data)
+{
+ bm->alu0 = FIELD_GET(ICE_IM_BM_ALU0, data);
+ bm->alu1 = FIELD_GET(ICE_IM_BM_ALU1, data);
+ bm->alu2 = FIELD_GET(ICE_IM_BM_ALU2, data);
+ bm->pg = FIELD_GET(ICE_IM_BM_PG, data);
+}
+
+#define ICE_IM_BKB_PRIO GENMASK(7, 0)
+#define ICE_IM_BKB_TSR_CTRL BIT(8)
+
+/**
+ * ice_imem_bkb_init - parse 10 bits of Boost Main Build
+ * @bkb: pointer to the Boost Main Build structure
+ * @data: Boost Main Build data to be parsed
+ */
+static void ice_imem_bkb_init(struct ice_bst_keybuilder *bkb, u16 data)
+{
+ bkb->prio = FIELD_GET(ICE_IM_BKB_PRIO, data);
+ bkb->tsr_ctrl = FIELD_GET(ICE_IM_BKB_TSR_CTRL, data);
+}
+
+#define ICE_IM_NPKB_OPC GENMASK(1, 0)
+#define ICE_IM_NPKB_S_R0 GENMASK(9, 2)
+#define ICE_IM_NPKB_L_R1 GENMASK(17, 10)
+
+/**
+ * ice_imem_npkb_init - parse 18 bits of Next Protocol Key Build
+ * @kb: pointer to the Next Protocol Key Build structure
+ * @data: Next Protocol Key Build data to be parsed
+ */
+static void ice_imem_npkb_init(struct ice_np_keybuilder *kb, u32 data)
+{
+ kb->opc = FIELD_GET(ICE_IM_NPKB_OPC, data);
+ kb->start_reg0 = FIELD_GET(ICE_IM_NPKB_S_R0, data);
+ kb->len_reg1 = FIELD_GET(ICE_IM_NPKB_L_R1, data);
+}
+
+#define ICE_IM_PGKB_F0_ENA BIT_ULL(0)
+#define ICE_IM_PGKB_F0_IDX GENMASK_ULL(6, 1)
+#define ICE_IM_PGKB_F1_ENA BIT_ULL(7)
+#define ICE_IM_PGKB_F1_IDX GENMASK_ULL(13, 8)
+#define ICE_IM_PGKB_F2_ENA BIT_ULL(14)
+#define ICE_IM_PGKB_F2_IDX GENMASK_ULL(20, 15)
+#define ICE_IM_PGKB_F3_ENA BIT_ULL(21)
+#define ICE_IM_PGKB_F3_IDX GENMASK_ULL(27, 22)
+#define ICE_IM_PGKB_AR_IDX GENMASK_ULL(34, 28)
+
+/**
+ * ice_imem_pgkb_init - parse 35 bits of Parse Graph Key Build
+ * @kb: pointer to the Parse Graph Key Build structure
+ * @data: Parse Graph Key Build data to be parsed
+ */
+static void ice_imem_pgkb_init(struct ice_pg_keybuilder *kb, u64 data)
+{
+ kb->flag0_ena = FIELD_GET(ICE_IM_PGKB_F0_ENA, data);
+ kb->flag0_idx = FIELD_GET(ICE_IM_PGKB_F0_IDX, data);
+ kb->flag1_ena = FIELD_GET(ICE_IM_PGKB_F1_ENA, data);
+ kb->flag1_idx = FIELD_GET(ICE_IM_PGKB_F1_IDX, data);
+ kb->flag2_ena = FIELD_GET(ICE_IM_PGKB_F2_ENA, data);
+ kb->flag2_idx = FIELD_GET(ICE_IM_PGKB_F2_IDX, data);
+ kb->flag3_ena = FIELD_GET(ICE_IM_PGKB_F3_ENA, data);
+ kb->flag3_idx = FIELD_GET(ICE_IM_PGKB_F3_IDX, data);
+ kb->alu_reg_idx = FIELD_GET(ICE_IM_PGKB_AR_IDX, data);
+}
+
+#define ICE_IM_ALU_OPC GENMASK_ULL(5, 0)
+#define ICE_IM_ALU_SS GENMASK_ULL(13, 6)
+#define ICE_IM_ALU_SL GENMASK_ULL(18, 14)
+#define ICE_IM_ALU_SXS BIT_ULL(19)
+#define ICE_IM_ALU_SXK GENMASK_ULL(23, 20)
+#define ICE_IM_ALU_SRID GENMASK_ULL(30, 24)
+#define ICE_IM_ALU_DRID GENMASK_ULL(37, 31)
+#define ICE_IM_ALU_INC0 BIT_ULL(38)
+#define ICE_IM_ALU_INC1 BIT_ULL(39)
+#define ICE_IM_ALU_POO GENMASK_ULL(41, 40)
+#define ICE_IM_ALU_PO GENMASK_ULL(49, 42)
+#define ICE_IM_ALU_BA_S 50 /* offset for the 2nd 64-bits field */
+#define ICE_IM_ALU_BA GENMASK_ULL(57 - ICE_IM_ALU_BA_S, \
+ 50 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_IMM GENMASK_ULL(73 - ICE_IM_ALU_BA_S, \
+ 58 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_DFE BIT_ULL(74 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_DS GENMASK_ULL(80 - ICE_IM_ALU_BA_S, \
+ 75 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_DL GENMASK_ULL(86 - ICE_IM_ALU_BA_S, \
+ 81 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_FEI BIT_ULL(87 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_FSI GENMASK_ULL(95 - ICE_IM_ALU_BA_S, \
+ 88 - ICE_IM_ALU_BA_S)
+
+/**
+ * ice_imem_alu_init - parse 96 bits of ALU entry
+ * @alu: pointer to the ALU entry structure
+ * @data: ALU entry data to be parsed
+ * @off: offset of the ALU entry data
+ */
+static void ice_imem_alu_init(struct ice_alu *alu, u8 *data, u8 off)
+{
+ u64 d64;
+ u8 idd;
+
+ d64 = *((u64 *)data) >> off;
+
+ alu->opc = FIELD_GET(ICE_IM_ALU_OPC, d64);
+ alu->src_start = FIELD_GET(ICE_IM_ALU_SS, d64);
+ alu->src_len = FIELD_GET(ICE_IM_ALU_SL, d64);
+ alu->shift_xlate_sel = FIELD_GET(ICE_IM_ALU_SXS, d64);
+ alu->shift_xlate_key = FIELD_GET(ICE_IM_ALU_SXK, d64);
+ alu->src_reg_id = FIELD_GET(ICE_IM_ALU_SRID, d64);
+ alu->dst_reg_id = FIELD_GET(ICE_IM_ALU_DRID, d64);
+ alu->inc0 = FIELD_GET(ICE_IM_ALU_INC0, d64);
+ alu->inc1 = FIELD_GET(ICE_IM_ALU_INC1, d64);
+ alu->proto_offset_opc = FIELD_GET(ICE_IM_ALU_POO, d64);
+ alu->proto_offset = FIELD_GET(ICE_IM_ALU_PO, d64);
+
+ idd = (ICE_IM_ALU_BA_S + off) / BITS_PER_BYTE;
+ off = (ICE_IM_ALU_BA_S + off) % BITS_PER_BYTE;
+ d64 = *((u64 *)(&data[idd])) >> off;
+
+ alu->branch_addr = FIELD_GET(ICE_IM_ALU_BA, d64);
+ alu->imm = FIELD_GET(ICE_IM_ALU_IMM, d64);
+ alu->dedicate_flags_ena = FIELD_GET(ICE_IM_ALU_DFE, d64);
+ alu->dst_start = FIELD_GET(ICE_IM_ALU_DS, d64);
+ alu->dst_len = FIELD_GET(ICE_IM_ALU_DL, d64);
+ alu->flags_extr_imm = FIELD_GET(ICE_IM_ALU_FEI, d64);
+ alu->flags_start_imm = FIELD_GET(ICE_IM_ALU_FSI, d64);
+}
+
+#define ICE_IMEM_BM_S 0
+#define ICE_IMEM_BKB_S 4
+#define ICE_IMEM_BKB_IDD (ICE_IMEM_BKB_S / BITS_PER_BYTE)
+#define ICE_IMEM_BKB_OFF (ICE_IMEM_BKB_S % BITS_PER_BYTE)
+#define ICE_IMEM_PGP GENMASK(15, 14)
+#define ICE_IMEM_NPKB_S 16
+#define ICE_IMEM_NPKB_IDD (ICE_IMEM_NPKB_S / BITS_PER_BYTE)
+#define ICE_IMEM_NPKB_OFF (ICE_IMEM_NPKB_S % BITS_PER_BYTE)
+#define ICE_IMEM_PGKB_S 34
+#define ICE_IMEM_PGKB_IDD (ICE_IMEM_PGKB_S / BITS_PER_BYTE)
+#define ICE_IMEM_PGKB_OFF (ICE_IMEM_PGKB_S % BITS_PER_BYTE)
+#define ICE_IMEM_ALU0_S 69
+#define ICE_IMEM_ALU0_IDD (ICE_IMEM_ALU0_S / BITS_PER_BYTE)
+#define ICE_IMEM_ALU0_OFF (ICE_IMEM_ALU0_S % BITS_PER_BYTE)
+#define ICE_IMEM_ALU1_S 165
+#define ICE_IMEM_ALU1_IDD (ICE_IMEM_ALU1_S / BITS_PER_BYTE)
+#define ICE_IMEM_ALU1_OFF (ICE_IMEM_ALU1_S % BITS_PER_BYTE)
+#define ICE_IMEM_ALU2_S 357
+#define ICE_IMEM_ALU2_IDD (ICE_IMEM_ALU2_S / BITS_PER_BYTE)
+#define ICE_IMEM_ALU2_OFF (ICE_IMEM_ALU2_S % BITS_PER_BYTE)
+
+/**
+ * ice_imem_parse_item - parse 384 bits of IMEM entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of IMEM entry
+ * @item: item of IMEM entry
+ * @data: IMEM entry data to be parsed
+ * @size: size of IMEM entry
+ */
+static void ice_imem_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_imem_item *ii = item;
+ u8 *buf = data;
+
+ ii->idx = idx;
+
+ ice_imem_bm_init(&ii->b_m, *(u8 *)buf);
+ ice_imem_bkb_init(&ii->b_kb,
+ *((u16 *)(&buf[ICE_IMEM_BKB_IDD])) >>
+ ICE_IMEM_BKB_OFF);
+
+ ii->pg_prio = FIELD_GET(ICE_IMEM_PGP, *(u16 *)buf);
+
+ ice_imem_npkb_init(&ii->np_kb,
+ *((u32 *)(&buf[ICE_IMEM_NPKB_IDD])) >>
+ ICE_IMEM_NPKB_OFF);
+ ice_imem_pgkb_init(&ii->pg_kb,
+ *((u64 *)(&buf[ICE_IMEM_PGKB_IDD])) >>
+ ICE_IMEM_PGKB_OFF);
+
+ ice_imem_alu_init(&ii->alu0,
+ &buf[ICE_IMEM_ALU0_IDD],
+ ICE_IMEM_ALU0_OFF);
+ ice_imem_alu_init(&ii->alu1,
+ &buf[ICE_IMEM_ALU1_IDD],
+ ICE_IMEM_ALU1_OFF);
+ ice_imem_alu_init(&ii->alu2,
+ &buf[ICE_IMEM_ALU2_IDD],
+ ICE_IMEM_ALU2_OFF);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_imem_dump(hw, ii);
+}
+
+/**
+ * ice_imem_table_get - create an imem table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated IMEM table.
+ */
+static struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_IMEM,
+ sizeof(struct ice_imem_item),
+ ICE_IMEM_TABLE_SIZE,
+ ice_imem_parse_item, false);
+}
+
+/*** ICE_SID_RXPARSER_METADATA_INIT section ***/
+/**
+ * ice_metainit_dump - dump an metainit item info
+ * @hw: pointer to the hardware structure
+ * @item: metainit item to dump
+ */
+static void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %d\n", item->idx);
+
+ dev_info(dev, "tsr = %d\n", item->tsr);
+ dev_info(dev, "ho = %d\n", item->ho);
+ dev_info(dev, "pc = %d\n", item->pc);
+ dev_info(dev, "pg_rn = %d\n", item->pg_rn);
+ dev_info(dev, "cd = %d\n", item->cd);
+
+ dev_info(dev, "gpr_a_ctrl = %d\n", item->gpr_a_ctrl);
+ dev_info(dev, "gpr_a_data_mdid = %d\n", item->gpr_a_data_mdid);
+ dev_info(dev, "gpr_a_data_start = %d\n", item->gpr_a_data_start);
+ dev_info(dev, "gpr_a_data_len = %d\n", item->gpr_a_data_len);
+ dev_info(dev, "gpr_a_id = %d\n", item->gpr_a_id);
+
+ dev_info(dev, "gpr_b_ctrl = %d\n", item->gpr_b_ctrl);
+ dev_info(dev, "gpr_b_data_mdid = %d\n", item->gpr_b_data_mdid);
+ dev_info(dev, "gpr_b_data_start = %d\n", item->gpr_b_data_start);
+ dev_info(dev, "gpr_b_data_len = %d\n", item->gpr_b_data_len);
+ dev_info(dev, "gpr_b_id = %d\n", item->gpr_b_id);
+
+ dev_info(dev, "gpr_c_ctrl = %d\n", item->gpr_c_ctrl);
+ dev_info(dev, "gpr_c_data_mdid = %d\n", item->gpr_c_data_mdid);
+ dev_info(dev, "gpr_c_data_start = %d\n", item->gpr_c_data_start);
+ dev_info(dev, "gpr_c_data_len = %d\n", item->gpr_c_data_len);
+ dev_info(dev, "gpr_c_id = %d\n", item->gpr_c_id);
+
+ dev_info(dev, "gpr_d_ctrl = %d\n", item->gpr_d_ctrl);
+ dev_info(dev, "gpr_d_data_mdid = %d\n", item->gpr_d_data_mdid);
+ dev_info(dev, "gpr_d_data_start = %d\n", item->gpr_d_data_start);
+ dev_info(dev, "gpr_d_data_len = %d\n", item->gpr_d_data_len);
+ dev_info(dev, "gpr_d_id = %d\n", item->gpr_d_id);
+
+ dev_info(dev, "flags = 0x%llx\n", (unsigned long long)(item->flags));
+}
+
+#define ICE_MI_TSR GENMASK_ULL(7, 0)
+#define ICE_MI_HO GENMASK_ULL(16, 8)
+#define ICE_MI_PC GENMASK_ULL(24, 17)
+#define ICE_MI_PGRN GENMASK_ULL(35, 25)
+#define ICE_MI_CD GENMASK_ULL(38, 36)
+#define ICE_MI_GAC BIT_ULL(39)
+#define ICE_MI_GADM GENMASK_ULL(44, 40)
+#define ICE_MI_GADS GENMASK_ULL(48, 45)
+#define ICE_MI_GADL GENMASK_ULL(53, 49)
+#define ICE_MI_GAI GENMASK_ULL(59, 56)
+#define ICE_MI_GBC BIT_ULL(60)
+#define ICE_MI_GBDM_S 61 /* offset for the 2nd 64-bits field */
+#define ICE_MI_GBDM_IDD (ICE_MI_GBDM_S / BITS_PER_BYTE)
+#define ICE_MI_GBDM_OFF (ICE_MI_GBDM_S % BITS_PER_BYTE)
+
+#define ICE_MI_GBDM_GENMASK_ULL(high, low) \
+ GENMASK_ULL((high) - ICE_MI_GBDM_S, (low) - ICE_MI_GBDM_S)
+#define ICE_MI_GBDM ICE_MI_GBDM_GENMASK_ULL(65, 61)
+#define ICE_MI_GBDS ICE_MI_GBDM_GENMASK_ULL(69, 66)
+#define ICE_MI_GBDL ICE_MI_GBDM_GENMASK_ULL(74, 70)
+#define ICE_MI_GBI ICE_MI_GBDM_GENMASK_ULL(80, 77)
+#define ICE_MI_GCC BIT_ULL(81 - ICE_MI_GBDM_S)
+#define ICE_MI_GCDM ICE_MI_GBDM_GENMASK_ULL(86, 82)
+#define ICE_MI_GCDS ICE_MI_GBDM_GENMASK_ULL(90, 87)
+#define ICE_MI_GCDL ICE_MI_GBDM_GENMASK_ULL(95, 91)
+#define ICE_MI_GCI ICE_MI_GBDM_GENMASK_ULL(101, 98)
+#define ICE_MI_GDC BIT_ULL(102 - ICE_MI_GBDM_S)
+#define ICE_MI_GDDM ICE_MI_GBDM_GENMASK_ULL(107, 103)
+#define ICE_MI_GDDS ICE_MI_GBDM_GENMASK_ULL(111, 108)
+#define ICE_MI_GDDL ICE_MI_GBDM_GENMASK_ULL(116, 112)
+#define ICE_MI_GDI ICE_MI_GBDM_GENMASK_ULL(122, 119)
+#define ICE_MI_FLAG_S 123 /* offset for the 3rd 64-bits field */
+#define ICE_MI_FLAG_IDD (ICE_MI_FLAG_S / BITS_PER_BYTE)
+#define ICE_MI_FLAG_OFF (ICE_MI_FLAG_S % BITS_PER_BYTE)
+#define ICE_MI_FLAG GENMASK_ULL(186 - ICE_MI_FLAG_S, \
+ 123 - ICE_MI_FLAG_S)
+
+/**
+ * ice_metainit_parse_item - parse 192 bits of Metadata Init entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Metadata Init entry
+ * @item: item of Metadata Init entry
+ * @data: Metadata Init entry data to be parsed
+ * @size: size of Metadata Init entry
+ */
+static void ice_metainit_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_metainit_item *mi = item;
+ u8 *buf = data;
+ u64 d64;
+
+ mi->idx = idx;
+
+ d64 = *(u64 *)buf;
+
+ mi->tsr = FIELD_GET(ICE_MI_TSR, d64);
+ mi->ho = FIELD_GET(ICE_MI_HO, d64);
+ mi->pc = FIELD_GET(ICE_MI_PC, d64);
+ mi->pg_rn = FIELD_GET(ICE_MI_PGRN, d64);
+ mi->cd = FIELD_GET(ICE_MI_CD, d64);
+
+ mi->gpr_a_ctrl = FIELD_GET(ICE_MI_GAC, d64);
+ mi->gpr_a_data_mdid = FIELD_GET(ICE_MI_GADM, d64);
+ mi->gpr_a_data_start = FIELD_GET(ICE_MI_GADS, d64);
+ mi->gpr_a_data_len = FIELD_GET(ICE_MI_GADL, d64);
+ mi->gpr_a_id = FIELD_GET(ICE_MI_GAI, d64);
+
+ mi->gpr_b_ctrl = FIELD_GET(ICE_MI_GBC, d64);
+
+ d64 = *((u64 *)&buf[ICE_MI_GBDM_IDD]) >> ICE_MI_GBDM_OFF;
+
+ mi->gpr_b_data_mdid = FIELD_GET(ICE_MI_GBDM, d64);
+ mi->gpr_b_data_start = FIELD_GET(ICE_MI_GBDS, d64);
+ mi->gpr_b_data_len = FIELD_GET(ICE_MI_GBDL, d64);
+ mi->gpr_b_id = FIELD_GET(ICE_MI_GBI, d64);
+
+ mi->gpr_c_ctrl = FIELD_GET(ICE_MI_GCC, d64);
+ mi->gpr_c_data_mdid = FIELD_GET(ICE_MI_GCDM, d64);
+ mi->gpr_c_data_start = FIELD_GET(ICE_MI_GCDS, d64);
+ mi->gpr_c_data_len = FIELD_GET(ICE_MI_GCDL, d64);
+ mi->gpr_c_id = FIELD_GET(ICE_MI_GCI, d64);
+
+ mi->gpr_d_ctrl = FIELD_GET(ICE_MI_GDC, d64);
+ mi->gpr_d_data_mdid = FIELD_GET(ICE_MI_GDDM, d64);
+ mi->gpr_d_data_start = FIELD_GET(ICE_MI_GDDS, d64);
+ mi->gpr_d_data_len = FIELD_GET(ICE_MI_GDDL, d64);
+ mi->gpr_d_id = FIELD_GET(ICE_MI_GDI, d64);
+
+ d64 = *((u64 *)&buf[ICE_MI_FLAG_IDD]) >> ICE_MI_FLAG_OFF;
+
+ mi->flags = FIELD_GET(ICE_MI_FLAG, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_metainit_dump(hw, mi);
+}
+
+/**
+ * ice_metainit_table_get - create a metainit table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Metadata initialization table.
+ */
+static struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_METADATA_INIT,
+ sizeof(struct ice_metainit_item),
+ ICE_METAINIT_TABLE_SIZE,
+ ice_metainit_parse_item, false);
+}
+
+/**
+ * ice_bst_tcam_search - find a TCAM item with specific type
+ * @tcam_table: the TCAM table
+ * @lbl_table: the lbl table to search
+ * @type: the type we need to match against
+ * @start: start searching from this index
+ *
+ * Return: a pointer to the matching BOOST TCAM item or NULL.
+ */
+struct ice_bst_tcam_item *
+ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table,
+ struct ice_lbl_item *lbl_table,
+ enum ice_lbl_type type, u16 *start)
+{
+ u16 i = *start;
+
+ for (; i < ICE_BST_TCAM_TABLE_SIZE; i++) {
+ if (lbl_table[i].type == type) {
+ *start = i;
+ return &tcam_table[lbl_table[i].idx];
+ }
+ }
+
+ return NULL;
+}
+
+/*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL,
+ * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM
+ * sections ***/
+static void ice_pg_cam_key_dump(struct ice_hw *hw, struct ice_pg_cam_key *key)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "key:\n");
+ dev_info(dev, "\tvalid = %d\n", key->valid);
+ dev_info(dev, "\tnode_id = %d\n", key->node_id);
+ dev_info(dev, "\tflag0 = %d\n", key->flag0);
+ dev_info(dev, "\tflag1 = %d\n", key->flag1);
+ dev_info(dev, "\tflag2 = %d\n", key->flag2);
+ dev_info(dev, "\tflag3 = %d\n", key->flag3);
+ dev_info(dev, "\tboost_idx = %d\n", key->boost_idx);
+ dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg);
+ dev_info(dev, "\tnext_proto = 0x%08x\n", key->next_proto);
+}
+
+static void ice_pg_nm_cam_key_dump(struct ice_hw *hw,
+ struct ice_pg_nm_cam_key *key)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "key:\n");
+ dev_info(dev, "\tvalid = %d\n", key->valid);
+ dev_info(dev, "\tnode_id = %d\n", key->node_id);
+ dev_info(dev, "\tflag0 = %d\n", key->flag0);
+ dev_info(dev, "\tflag1 = %d\n", key->flag1);
+ dev_info(dev, "\tflag2 = %d\n", key->flag2);
+ dev_info(dev, "\tflag3 = %d\n", key->flag3);
+ dev_info(dev, "\tboost_idx = %d\n", key->boost_idx);
+ dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg);
+}
+
+static void ice_pg_cam_action_dump(struct ice_hw *hw,
+ struct ice_pg_cam_action *action)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "action:\n");
+ dev_info(dev, "\tnext_node = %d\n", action->next_node);
+ dev_info(dev, "\tnext_pc = %d\n", action->next_pc);
+ dev_info(dev, "\tis_pg = %d\n", action->is_pg);
+ dev_info(dev, "\tproto_id = %d\n", action->proto_id);
+ dev_info(dev, "\tis_mg = %d\n", action->is_mg);
+ dev_info(dev, "\tmarker_id = %d\n", action->marker_id);
+ dev_info(dev, "\tis_last_round = %d\n", action->is_last_round);
+ dev_info(dev, "\tho_polarity = %d\n", action->ho_polarity);
+ dev_info(dev, "\tho_inc = %d\n", action->ho_inc);
+}
+
+/**
+ * ice_pg_cam_dump - dump an parse graph cam info
+ * @hw: pointer to the hardware structure
+ * @item: parse graph cam to dump
+ */
+static void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item)
+{
+ dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
+ ice_pg_cam_key_dump(hw, &item->key);
+ ice_pg_cam_action_dump(hw, &item->action);
+}
+
+/**
+ * ice_pg_nm_cam_dump - dump an parse graph no match cam info
+ * @hw: pointer to the hardware structure
+ * @item: parse graph no match cam to dump
+ */
+static void ice_pg_nm_cam_dump(struct ice_hw *hw,
+ struct ice_pg_nm_cam_item *item)
+{
+ dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
+ ice_pg_nm_cam_key_dump(hw, &item->key);
+ ice_pg_cam_action_dump(hw, &item->action);
+}
+
+#define ICE_PGCA_NN GENMASK_ULL(10, 0)
+#define ICE_PGCA_NPC GENMASK_ULL(18, 11)
+#define ICE_PGCA_IPG BIT_ULL(19)
+#define ICE_PGCA_PID GENMASK_ULL(30, 23)
+#define ICE_PGCA_IMG BIT_ULL(31)
+#define ICE_PGCA_MID GENMASK_ULL(39, 32)
+#define ICE_PGCA_ILR BIT_ULL(40)
+#define ICE_PGCA_HOP BIT_ULL(41)
+#define ICE_PGCA_HOI GENMASK_ULL(50, 42)
+
+/**
+ * ice_pg_cam_action_init - parse 55 bits of Parse Graph CAM Action
+ * @action: pointer to the Parse Graph CAM Action structure
+ * @data: Parse Graph CAM Action data to be parsed
+ */
+static void ice_pg_cam_action_init(struct ice_pg_cam_action *action, u64 data)
+{
+ action->next_node = FIELD_GET(ICE_PGCA_NN, data);
+ action->next_pc = FIELD_GET(ICE_PGCA_NPC, data);
+ action->is_pg = FIELD_GET(ICE_PGCA_IPG, data);
+ action->proto_id = FIELD_GET(ICE_PGCA_PID, data);
+ action->is_mg = FIELD_GET(ICE_PGCA_IMG, data);
+ action->marker_id = FIELD_GET(ICE_PGCA_MID, data);
+ action->is_last_round = FIELD_GET(ICE_PGCA_ILR, data);
+ action->ho_polarity = FIELD_GET(ICE_PGCA_HOP, data);
+ action->ho_inc = FIELD_GET(ICE_PGCA_HOI, data);
+}
+
+#define ICE_PGNCK_VLD BIT_ULL(0)
+#define ICE_PGNCK_NID GENMASK_ULL(11, 1)
+#define ICE_PGNCK_F0 BIT_ULL(12)
+#define ICE_PGNCK_F1 BIT_ULL(13)
+#define ICE_PGNCK_F2 BIT_ULL(14)
+#define ICE_PGNCK_F3 BIT_ULL(15)
+#define ICE_PGNCK_BH BIT_ULL(16)
+#define ICE_PGNCK_BI GENMASK_ULL(24, 17)
+#define ICE_PGNCK_AR GENMASK_ULL(40, 25)
+
+/**
+ * ice_pg_nm_cam_key_init - parse 41 bits of Parse Graph NoMatch CAM Key
+ * @key: pointer to the Parse Graph NoMatch CAM Key structure
+ * @data: Parse Graph NoMatch CAM Key data to be parsed
+ */
+static void ice_pg_nm_cam_key_init(struct ice_pg_nm_cam_key *key, u64 data)
+{
+ key->valid = FIELD_GET(ICE_PGNCK_VLD, data);
+ key->node_id = FIELD_GET(ICE_PGNCK_NID, data);
+ key->flag0 = FIELD_GET(ICE_PGNCK_F0, data);
+ key->flag1 = FIELD_GET(ICE_PGNCK_F1, data);
+ key->flag2 = FIELD_GET(ICE_PGNCK_F2, data);
+ key->flag3 = FIELD_GET(ICE_PGNCK_F3, data);
+
+ if (FIELD_GET(ICE_PGNCK_BH, data))
+ key->boost_idx = FIELD_GET(ICE_PGNCK_BI, data);
+ else
+ key->boost_idx = 0;
+
+ key->alu_reg = FIELD_GET(ICE_PGNCK_AR, data);
+}
+
+#define ICE_PGCK_VLD BIT_ULL(0)
+#define ICE_PGCK_NID GENMASK_ULL(11, 1)
+#define ICE_PGCK_F0 BIT_ULL(12)
+#define ICE_PGCK_F1 BIT_ULL(13)
+#define ICE_PGCK_F2 BIT_ULL(14)
+#define ICE_PGCK_F3 BIT_ULL(15)
+#define ICE_PGCK_BH BIT_ULL(16)
+#define ICE_PGCK_BI GENMASK_ULL(24, 17)
+#define ICE_PGCK_AR GENMASK_ULL(40, 25)
+#define ICE_PGCK_NPK_S 41 /* offset for the 2nd 64-bits field */
+#define ICE_PGCK_NPK_IDD (ICE_PGCK_NPK_S / BITS_PER_BYTE)
+#define ICE_PGCK_NPK_OFF (ICE_PGCK_NPK_S % BITS_PER_BYTE)
+#define ICE_PGCK_NPK GENMASK_ULL(72 - ICE_PGCK_NPK_S, \
+ 41 - ICE_PGCK_NPK_S)
+
+/**
+ * ice_pg_cam_key_init - parse 73 bits of Parse Graph CAM Key
+ * @key: pointer to the Parse Graph CAM Key structure
+ * @data: Parse Graph CAM Key data to be parsed
+ */
+static void ice_pg_cam_key_init(struct ice_pg_cam_key *key, u8 *data)
+{
+ u64 d64 = *(u64 *)data;
+
+ key->valid = FIELD_GET(ICE_PGCK_VLD, d64);
+ key->node_id = FIELD_GET(ICE_PGCK_NID, d64);
+ key->flag0 = FIELD_GET(ICE_PGCK_F0, d64);
+ key->flag1 = FIELD_GET(ICE_PGCK_F1, d64);
+ key->flag2 = FIELD_GET(ICE_PGCK_F2, d64);
+ key->flag3 = FIELD_GET(ICE_PGCK_F3, d64);
+
+ if (FIELD_GET(ICE_PGCK_BH, d64))
+ key->boost_idx = FIELD_GET(ICE_PGCK_BI, d64);
+ else
+ key->boost_idx = 0;
+
+ key->alu_reg = FIELD_GET(ICE_PGCK_AR, d64);
+
+ d64 = *((u64 *)&data[ICE_PGCK_NPK_IDD]) >> ICE_PGCK_NPK_OFF;
+
+ key->next_proto = FIELD_GET(ICE_PGCK_NPK, d64);
+}
+
+#define ICE_PG_CAM_ACT_S 73
+#define ICE_PG_CAM_ACT_IDD (ICE_PG_CAM_ACT_S / BITS_PER_BYTE)
+#define ICE_PG_CAM_ACT_OFF (ICE_PG_CAM_ACT_S % BITS_PER_BYTE)
+
+/**
+ * ice_pg_cam_parse_item - parse 128 bits of Parse Graph CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph CAM Entry
+ * @item: item of Parse Graph CAM Entry
+ * @data: Parse Graph CAM Entry data to be parsed
+ * @size: size of Parse Graph CAM Entry
+ */
+static void ice_pg_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_pg_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ ice_pg_cam_key_init(&ci->key, buf);
+
+ d64 = *((u64 *)&buf[ICE_PG_CAM_ACT_IDD]) >> ICE_PG_CAM_ACT_OFF;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_cam_dump(hw, ci);
+}
+
+#define ICE_PG_SP_CAM_KEY_S 56
+#define ICE_PG_SP_CAM_KEY_IDD (ICE_PG_SP_CAM_KEY_S / BITS_PER_BYTE)
+
+/**
+ * ice_pg_sp_cam_parse_item - parse 136 bits of Parse Graph Spill CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph Spill CAM Entry
+ * @item: item of Parse Graph Spill CAM Entry
+ * @data: Parse Graph Spill CAM Entry data to be parsed
+ * @size: size of Parse Graph Spill CAM Entry
+ */
+static void ice_pg_sp_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_pg_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ d64 = *(u64 *)buf;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ ice_pg_cam_key_init(&ci->key, &buf[ICE_PG_SP_CAM_KEY_IDD]);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_cam_dump(hw, ci);
+}
+
+#define ICE_PG_NM_CAM_ACT_S 41
+#define ICE_PG_NM_CAM_ACT_IDD (ICE_PG_NM_CAM_ACT_S / BITS_PER_BYTE)
+#define ICE_PG_NM_CAM_ACT_OFF (ICE_PG_NM_CAM_ACT_S % BITS_PER_BYTE)
+
+/**
+ * ice_pg_nm_cam_parse_item - parse 96 bits of Parse Graph NoMatch CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph NoMatch CAM Entry
+ * @item: item of Parse Graph NoMatch CAM Entry
+ * @data: Parse Graph NoMatch CAM Entry data to be parsed
+ * @size: size of Parse Graph NoMatch CAM Entry
+ */
+static void ice_pg_nm_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_pg_nm_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ d64 = *(u64 *)buf;
+ ice_pg_nm_cam_key_init(&ci->key, d64);
+
+ d64 = *((u64 *)&buf[ICE_PG_NM_CAM_ACT_IDD]) >> ICE_PG_NM_CAM_ACT_OFF;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_nm_cam_dump(hw, ci);
+}
+
+#define ICE_PG_NM_SP_CAM_ACT_S 56
+#define ICE_PG_NM_SP_CAM_ACT_IDD (ICE_PG_NM_SP_CAM_ACT_S / BITS_PER_BYTE)
+#define ICE_PG_NM_SP_CAM_ACT_OFF (ICE_PG_NM_SP_CAM_ACT_S % BITS_PER_BYTE)
+
+/**
+ * ice_pg_nm_sp_cam_parse_item - parse 104 bits of Parse Graph NoMatch Spill
+ * CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph NoMatch Spill CAM Entry
+ * @item: item of Parse Graph NoMatch Spill CAM Entry
+ * @data: Parse Graph NoMatch Spill CAM Entry data to be parsed
+ * @size: size of Parse Graph NoMatch Spill CAM Entry
+ */
+static void ice_pg_nm_sp_cam_parse_item(struct ice_hw *hw, u16 idx,
+ void *item, void *data,
+ int __maybe_unused size)
+{
+ struct ice_pg_nm_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ d64 = *(u64 *)buf;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ d64 = *((u64 *)&buf[ICE_PG_NM_SP_CAM_ACT_IDD]) >>
+ ICE_PG_NM_SP_CAM_ACT_OFF;
+ ice_pg_nm_cam_key_init(&ci->key, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_nm_cam_dump(hw, ci);
+}
+
+/**
+ * ice_pg_cam_table_get - create a parse graph cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph CAM table.
+ */
+static struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_CAM,
+ sizeof(struct ice_pg_cam_item),
+ ICE_PG_CAM_TABLE_SIZE,
+ ice_pg_cam_parse_item, false);
+}
+
+/**
+ * ice_pg_sp_cam_table_get - create a parse graph spill cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph Spill CAM table.
+ */
+static struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_PG_SPILL,
+ sizeof(struct ice_pg_cam_item),
+ ICE_PG_SP_CAM_TABLE_SIZE,
+ ice_pg_sp_cam_parse_item, false);
+}
+
+/**
+ * ice_pg_nm_cam_table_get - create a parse graph no match cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph No Match CAM table.
+ */
+static struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_CAM,
+ sizeof(struct ice_pg_nm_cam_item),
+ ICE_PG_NM_CAM_TABLE_SIZE,
+ ice_pg_nm_cam_parse_item, false);
+}
+
+/**
+ * ice_pg_nm_sp_cam_table_get - create a parse graph no match spill cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph No Match Spill CAM table.
+ */
+static struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_SPILL,
+ sizeof(struct ice_pg_nm_cam_item),
+ ICE_PG_NM_SP_CAM_TABLE_SIZE,
+ ice_pg_nm_sp_cam_parse_item, false);
+}
+
+static bool __ice_pg_cam_match(struct ice_pg_cam_item *item,
+ struct ice_pg_cam_key *key)
+{
+ return (item->key.valid &&
+ !memcmp(&item->key.val, &key->val, sizeof(key->val)));
+}
+
+static bool __ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *item,
+ struct ice_pg_cam_key *key)
+{
+ return (item->key.valid &&
+ !memcmp(&item->key.val, &key->val, sizeof(item->key.val)));
+}
+
+/**
+ * ice_pg_cam_match - search parse graph cam table by key
+ * @table: parse graph cam table to search
+ * @size: cam table size
+ * @key: search key
+ *
+ * Return: a pointer to the matching PG CAM item or NULL.
+ */
+struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table,
+ int size, struct ice_pg_cam_key *key)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ struct ice_pg_cam_item *item = &table[i];
+
+ if (__ice_pg_cam_match(item, key))
+ return item;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_pg_nm_cam_match - search parse graph no match cam table by key
+ * @table: parse graph no match cam table to search
+ * @size: cam table size
+ * @key: search key
+ *
+ * Return: a pointer to the matching PG No Match CAM item or NULL.
+ */
+struct ice_pg_nm_cam_item *
+ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
+ struct ice_pg_cam_key *key)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ struct ice_pg_nm_cam_item *item = &table[i];
+
+ if (__ice_pg_nm_cam_match(item, key))
+ return item;
+ }
+
+ return NULL;
+}
+
+/*** Ternary match ***/
+/* Perform a ternary match on a 1-byte pattern (@pat) given @key and @key_inv
+ * Rules (per bit):
+ * Key == 0 and Key_inv == 0 : Never match (Don't care)
+ * Key == 0 and Key_inv == 1 : Match on bit == 1
+ * Key == 1 and Key_inv == 0 : Match on bit == 0
+ * Key == 1 and Key_inv == 1 : Always match (Don't care)
+ *
+ * Return: true if all bits match, false otherwise.
+ */
+static bool ice_ternary_match_byte(u8 key, u8 key_inv, u8 pat)
+{
+ u8 bit_key, bit_key_inv, bit_pat;
+ int i;
+
+ for (i = 0; i < BITS_PER_BYTE; i++) {
+ bit_key = key & BIT(i);
+ bit_key_inv = key_inv & BIT(i);
+ bit_pat = pat & BIT(i);
+
+ if (bit_key != 0 && bit_key_inv != 0)
+ continue;
+
+ if ((bit_key == 0 && bit_key_inv == 0) || bit_key == bit_pat)
+ return false;
+ }
+
+ return true;
+}
+
+static bool ice_ternary_match(const u8 *key, const u8 *key_inv,
+ const u8 *pat, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (!ice_ternary_match_byte(key[i], key_inv[i], pat[i]))
+ return false;
+
+ return true;
+}
+
+/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
+static void ice_bst_np_kb_dump(struct ice_hw *hw, struct ice_np_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "next proto key builder:\n");
+ dev_info(dev, "\topc = %d\n", kb->opc);
+ dev_info(dev, "\tstart_reg0 = %d\n", kb->start_reg0);
+ dev_info(dev, "\tlen_reg1 = %d\n", kb->len_reg1);
+}
+
+static void ice_bst_pg_kb_dump(struct ice_hw *hw, struct ice_pg_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "parse graph key builder:\n");
+ dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena);
+ dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena);
+ dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena);
+ dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena);
+ dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx);
+ dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx);
+ dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx);
+ dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx);
+ dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx);
+}
+
+static void ice_bst_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int idx)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "alu%d:\n", idx);
+ dev_info(dev, "\topc = %d\n", alu->opc);
+ dev_info(dev, "\tsrc_start = %d\n", alu->src_start);
+ dev_info(dev, "\tsrc_len = %d\n", alu->src_len);
+ dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel);
+ dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key);
+ dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id);
+ dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id);
+ dev_info(dev, "\tinc0 = %d\n", alu->inc0);
+ dev_info(dev, "\tinc1 = %d\n", alu->inc1);
+ dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc);
+ dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset);
+ dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr);
+ dev_info(dev, "\timm = %d\n", alu->imm);
+ dev_info(dev, "\tdst_start = %d\n", alu->dst_start);
+ dev_info(dev, "\tdst_len = %d\n", alu->dst_len);
+ dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm);
+ dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm);
+}
+
+/**
+ * ice_bst_tcam_dump - dump a boost tcam info
+ * @hw: pointer to the hardware structure
+ * @item: boost tcam to dump
+ */
+static void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "addr = %d\n", item->addr);
+
+ dev_info(dev, "key : ");
+ for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key[i]);
+
+ dev_info(dev, "\n");
+
+ dev_info(dev, "key_inv: ");
+ for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key_inv[i]);
+
+ dev_info(dev, "\n");
+
+ dev_info(dev, "hit_idx_grp = %d\n", item->hit_idx_grp);
+ dev_info(dev, "pg_prio = %d\n", item->pg_prio);
+
+ ice_bst_np_kb_dump(hw, &item->np_kb);
+ ice_bst_pg_kb_dump(hw, &item->pg_kb);
+
+ ice_bst_alu_dump(hw, &item->alu0, ICE_ALU0_IDX);
+ ice_bst_alu_dump(hw, &item->alu1, ICE_ALU1_IDX);
+ ice_bst_alu_dump(hw, &item->alu2, ICE_ALU2_IDX);
+}
+
+static void ice_lbl_dump(struct ice_hw *hw, struct ice_lbl_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %u\n", item->idx);
+ dev_info(dev, "type = %u\n", item->type);
+ dev_info(dev, "label = %s\n", item->label);
+}
+
+#define ICE_BST_ALU_OPC GENMASK_ULL(5, 0)
+#define ICE_BST_ALU_SS GENMASK_ULL(13, 6)
+#define ICE_BST_ALU_SL GENMASK_ULL(18, 14)
+#define ICE_BST_ALU_SXS BIT_ULL(19)
+#define ICE_BST_ALU_SXK GENMASK_ULL(23, 20)
+#define ICE_BST_ALU_SRID GENMASK_ULL(30, 24)
+#define ICE_BST_ALU_DRID GENMASK_ULL(37, 31)
+#define ICE_BST_ALU_INC0 BIT_ULL(38)
+#define ICE_BST_ALU_INC1 BIT_ULL(39)
+#define ICE_BST_ALU_POO GENMASK_ULL(41, 40)
+#define ICE_BST_ALU_PO GENMASK_ULL(49, 42)
+#define ICE_BST_ALU_BA_S 50 /* offset for the 2nd 64-bits field */
+#define ICE_BST_ALU_BA GENMASK_ULL(57 - ICE_BST_ALU_BA_S, \
+ 50 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_IMM GENMASK_ULL(73 - ICE_BST_ALU_BA_S, \
+ 58 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_DFE BIT_ULL(74 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_DS GENMASK_ULL(80 - ICE_BST_ALU_BA_S, \
+ 75 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_DL GENMASK_ULL(86 - ICE_BST_ALU_BA_S, \
+ 81 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_FEI BIT_ULL(87 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_FSI GENMASK_ULL(95 - ICE_BST_ALU_BA_S, \
+ 88 - ICE_BST_ALU_BA_S)
+
+/**
+ * ice_bst_alu_init - parse 96 bits of ALU entry
+ * @alu: pointer to the ALU entry structure
+ * @data: ALU entry data to be parsed
+ * @off: offset of the ALU entry data
+ */
+static void ice_bst_alu_init(struct ice_alu *alu, u8 *data, u8 off)
+{
+ u64 d64;
+ u8 idd;
+
+ d64 = *((u64 *)data) >> off;
+
+ alu->opc = FIELD_GET(ICE_BST_ALU_OPC, d64);
+ alu->src_start = FIELD_GET(ICE_BST_ALU_SS, d64);
+ alu->src_len = FIELD_GET(ICE_BST_ALU_SL, d64);
+ alu->shift_xlate_sel = FIELD_GET(ICE_BST_ALU_SXS, d64);
+ alu->shift_xlate_key = FIELD_GET(ICE_BST_ALU_SXK, d64);
+ alu->src_reg_id = FIELD_GET(ICE_BST_ALU_SRID, d64);
+ alu->dst_reg_id = FIELD_GET(ICE_BST_ALU_DRID, d64);
+ alu->inc0 = FIELD_GET(ICE_BST_ALU_INC0, d64);
+ alu->inc1 = FIELD_GET(ICE_BST_ALU_INC1, d64);
+ alu->proto_offset_opc = FIELD_GET(ICE_BST_ALU_POO, d64);
+ alu->proto_offset = FIELD_GET(ICE_BST_ALU_PO, d64);
+
+ idd = (ICE_BST_ALU_BA_S + off) / BITS_PER_BYTE;
+ off = (ICE_BST_ALU_BA_S + off) % BITS_PER_BYTE;
+ d64 = *((u64 *)(&data[idd])) >> off;
+
+ alu->branch_addr = FIELD_GET(ICE_BST_ALU_BA, d64);
+ alu->imm = FIELD_GET(ICE_BST_ALU_IMM, d64);
+ alu->dedicate_flags_ena = FIELD_GET(ICE_BST_ALU_DFE, d64);
+ alu->dst_start = FIELD_GET(ICE_BST_ALU_DS, d64);
+ alu->dst_len = FIELD_GET(ICE_BST_ALU_DL, d64);
+ alu->flags_extr_imm = FIELD_GET(ICE_BST_ALU_FEI, d64);
+ alu->flags_start_imm = FIELD_GET(ICE_BST_ALU_FSI, d64);
+}
+
+#define ICE_BST_PGKB_F0_ENA BIT_ULL(0)
+#define ICE_BST_PGKB_F0_IDX GENMASK_ULL(6, 1)
+#define ICE_BST_PGKB_F1_ENA BIT_ULL(7)
+#define ICE_BST_PGKB_F1_IDX GENMASK_ULL(13, 8)
+#define ICE_BST_PGKB_F2_ENA BIT_ULL(14)
+#define ICE_BST_PGKB_F2_IDX GENMASK_ULL(20, 15)
+#define ICE_BST_PGKB_F3_ENA BIT_ULL(21)
+#define ICE_BST_PGKB_F3_IDX GENMASK_ULL(27, 22)
+#define ICE_BST_PGKB_AR_IDX GENMASK_ULL(34, 28)
+
+/**
+ * ice_bst_pgkb_init - parse 35 bits of Parse Graph Key Build
+ * @kb: pointer to the Parse Graph Key Build structure
+ * @data: Parse Graph Key Build data to be parsed
+ */
+static void ice_bst_pgkb_init(struct ice_pg_keybuilder *kb, u64 data)
+{
+ kb->flag0_ena = FIELD_GET(ICE_BST_PGKB_F0_ENA, data);
+ kb->flag0_idx = FIELD_GET(ICE_BST_PGKB_F0_IDX, data);
+ kb->flag1_ena = FIELD_GET(ICE_BST_PGKB_F1_ENA, data);
+ kb->flag1_idx = FIELD_GET(ICE_BST_PGKB_F1_IDX, data);
+ kb->flag2_ena = FIELD_GET(ICE_BST_PGKB_F2_ENA, data);
+ kb->flag2_idx = FIELD_GET(ICE_BST_PGKB_F2_IDX, data);
+ kb->flag3_ena = FIELD_GET(ICE_BST_PGKB_F3_ENA, data);
+ kb->flag3_idx = FIELD_GET(ICE_BST_PGKB_F3_IDX, data);
+ kb->alu_reg_idx = FIELD_GET(ICE_BST_PGKB_AR_IDX, data);
+}
+
+#define ICE_BST_NPKB_OPC GENMASK(1, 0)
+#define ICE_BST_NPKB_S_R0 GENMASK(9, 2)
+#define ICE_BST_NPKB_L_R1 GENMASK(17, 10)
+
+/**
+ * ice_bst_npkb_init - parse 18 bits of Next Protocol Key Build
+ * @kb: pointer to the Next Protocol Key Build structure
+ * @data: Next Protocol Key Build data to be parsed
+ */
+static void ice_bst_npkb_init(struct ice_np_keybuilder *kb, u32 data)
+{
+ kb->opc = FIELD_GET(ICE_BST_NPKB_OPC, data);
+ kb->start_reg0 = FIELD_GET(ICE_BST_NPKB_S_R0, data);
+ kb->len_reg1 = FIELD_GET(ICE_BST_NPKB_L_R1, data);
+}
+
+#define ICE_BT_KEY_S 32
+#define ICE_BT_KEY_IDD (ICE_BT_KEY_S / BITS_PER_BYTE)
+#define ICE_BT_KIV_S 192
+#define ICE_BT_KIV_IDD (ICE_BT_KIV_S / BITS_PER_BYTE)
+#define ICE_BT_HIG_S 352
+#define ICE_BT_HIG_IDD (ICE_BT_HIG_S / BITS_PER_BYTE)
+#define ICE_BT_PGP_S 360
+#define ICE_BT_PGP_IDD (ICE_BT_PGP_S / BITS_PER_BYTE)
+#define ICE_BT_PGP_M GENMASK(361 - ICE_BT_PGP_S, 360 - ICE_BT_PGP_S)
+#define ICE_BT_NPKB_S 362
+#define ICE_BT_NPKB_IDD (ICE_BT_NPKB_S / BITS_PER_BYTE)
+#define ICE_BT_NPKB_OFF (ICE_BT_NPKB_S % BITS_PER_BYTE)
+#define ICE_BT_PGKB_S 380
+#define ICE_BT_PGKB_IDD (ICE_BT_PGKB_S / BITS_PER_BYTE)
+#define ICE_BT_PGKB_OFF (ICE_BT_PGKB_S % BITS_PER_BYTE)
+#define ICE_BT_ALU0_S 415
+#define ICE_BT_ALU0_IDD (ICE_BT_ALU0_S / BITS_PER_BYTE)
+#define ICE_BT_ALU0_OFF (ICE_BT_ALU0_S % BITS_PER_BYTE)
+#define ICE_BT_ALU1_S 511
+#define ICE_BT_ALU1_IDD (ICE_BT_ALU1_S / BITS_PER_BYTE)
+#define ICE_BT_ALU1_OFF (ICE_BT_ALU1_S % BITS_PER_BYTE)
+#define ICE_BT_ALU2_S 607
+#define ICE_BT_ALU2_IDD (ICE_BT_ALU2_S / BITS_PER_BYTE)
+#define ICE_BT_ALU2_OFF (ICE_BT_ALU2_S % BITS_PER_BYTE)
+
+/**
+ * ice_bst_parse_item - parse 704 bits of Boost TCAM entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Boost TCAM entry
+ * @item: item of Boost TCAM entry
+ * @data: Boost TCAM entry data to be parsed
+ * @size: size of Boost TCAM entry
+ */
+static void ice_bst_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_bst_tcam_item *ti = item;
+ u8 *buf = (u8 *)data;
+ int i;
+
+ ti->addr = *(u16 *)buf;
+
+ for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++) {
+ ti->key[i] = buf[ICE_BT_KEY_IDD + i];
+ ti->key_inv[i] = buf[ICE_BT_KIV_IDD + i];
+ }
+ ti->hit_idx_grp = buf[ICE_BT_HIG_IDD];
+ ti->pg_prio = buf[ICE_BT_PGP_IDD] & ICE_BT_PGP_M;
+
+ ice_bst_npkb_init(&ti->np_kb,
+ *((u32 *)(&buf[ICE_BT_NPKB_IDD])) >>
+ ICE_BT_NPKB_OFF);
+ ice_bst_pgkb_init(&ti->pg_kb,
+ *((u64 *)(&buf[ICE_BT_PGKB_IDD])) >>
+ ICE_BT_PGKB_OFF);
+
+ ice_bst_alu_init(&ti->alu0, &buf[ICE_BT_ALU0_IDD], ICE_BT_ALU0_OFF);
+ ice_bst_alu_init(&ti->alu1, &buf[ICE_BT_ALU1_IDD], ICE_BT_ALU1_OFF);
+ ice_bst_alu_init(&ti->alu2, &buf[ICE_BT_ALU2_IDD], ICE_BT_ALU2_OFF);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_bst_tcam_dump(hw, ti);
+}
+
+/**
+ * ice_bst_tcam_table_get - create a boost tcam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Boost TCAM table.
+ */
+static struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_BOOST_TCAM,
+ sizeof(struct ice_bst_tcam_item),
+ ICE_BST_TCAM_TABLE_SIZE,
+ ice_bst_parse_item, true);
+}
+
+static void ice_parse_lbl_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_lbl_item *lbl_item = item;
+ struct ice_lbl_item *lbl_data = data;
+
+ lbl_item->idx = lbl_data->idx;
+ memcpy(lbl_item->label, lbl_data->label, sizeof(lbl_item->label));
+
+ if (strstarts(lbl_item->label, ICE_LBL_BST_DVM))
+ lbl_item->type = ICE_LBL_BST_TYPE_DVM;
+ else if (strstarts(lbl_item->label, ICE_LBL_BST_SVM))
+ lbl_item->type = ICE_LBL_BST_TYPE_SVM;
+ else if (strstarts(lbl_item->label, ICE_LBL_TNL_VXLAN))
+ lbl_item->type = ICE_LBL_BST_TYPE_VXLAN;
+ else if (strstarts(lbl_item->label, ICE_LBL_TNL_GENEVE))
+ lbl_item->type = ICE_LBL_BST_TYPE_GENEVE;
+ else if (strstarts(lbl_item->label, ICE_LBL_TNL_UDP_ECPRI))
+ lbl_item->type = ICE_LBL_BST_TYPE_UDP_ECPRI;
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_lbl_dump(hw, lbl_item);
+}
+
+/**
+ * ice_bst_lbl_table_get - create a boost label table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Boost label table.
+ */
+static struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_LBL_RXPARSER_TMEM,
+ sizeof(struct ice_lbl_item),
+ ICE_BST_TCAM_TABLE_SIZE,
+ ice_parse_lbl_item, true);
+}
+
+/**
+ * ice_bst_tcam_match - match a pattern on the boost tcam table
+ * @tcam_table: boost tcam table to search
+ * @pat: pattern to match
+ *
+ * Return: a pointer to the matching Boost TCAM item or NULL.
+ */
+struct ice_bst_tcam_item *
+ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat)
+{
+ int i;
+
+ for (i = 0; i < ICE_BST_TCAM_TABLE_SIZE; i++) {
+ struct ice_bst_tcam_item *item = &tcam_table[i];
+
+ if (item->hit_idx_grp == 0)
+ continue;
+ if (ice_ternary_match(item->key, item->key_inv, pat,
+ ICE_BST_TCAM_KEY_SIZE))
+ return item;
+ }
+
+ return NULL;
+}
+
+/*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/
+/**
+ * ice_ptype_mk_tcam_dump - dump an ptype marker tcam info
+ * @hw: pointer to the hardware structure
+ * @item: ptype marker tcam to dump
+ */
+static void ice_ptype_mk_tcam_dump(struct ice_hw *hw,
+ struct ice_ptype_mk_tcam_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "address = %d\n", item->address);
+ dev_info(dev, "ptype = %d\n", item->ptype);
+
+ dev_info(dev, "key :");
+ for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key[i]);
+
+ dev_info(dev, "\n");
+
+ dev_info(dev, "key_inv:");
+ for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key_inv[i]);
+
+ dev_info(dev, "\n");
+}
+
+static void ice_parse_ptype_mk_tcam_item(struct ice_hw *hw, u16 idx,
+ void *item, void *data, int size)
+{
+ memcpy(item, data, size);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_ptype_mk_tcam_dump(hw,
+ (struct ice_ptype_mk_tcam_item *)item);
+}
+
+/**
+ * ice_ptype_mk_tcam_table_get - create a ptype marker tcam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Marker PType TCAM table.
+ */
+static
+struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_PTYPE,
+ sizeof(struct ice_ptype_mk_tcam_item),
+ ICE_PTYPE_MK_TCAM_TABLE_SIZE,
+ ice_parse_ptype_mk_tcam_item, true);
+}
+
+/**
+ * ice_ptype_mk_tcam_match - match a pattern on a ptype marker tcam table
+ * @table: ptype marker tcam table to search
+ * @pat: pattern to match
+ * @len: length of the pattern
+ *
+ * Return: a pointer to the matching Marker PType item or NULL.
+ */
+struct ice_ptype_mk_tcam_item *
+ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table,
+ u8 *pat, int len)
+{
+ int i;
+
+ for (i = 0; i < ICE_PTYPE_MK_TCAM_TABLE_SIZE; i++) {
+ struct ice_ptype_mk_tcam_item *item = &table[i];
+
+ if (ice_ternary_match(item->key, item->key_inv, pat, len))
+ return item;
+ }
+
+ return NULL;
+}
+
+/*** ICE_SID_RXPARSER_MARKER_GRP section ***/
+/**
+ * ice_mk_grp_dump - dump an marker group item info
+ * @hw: pointer to the hardware structure
+ * @item: marker group item to dump
+ */
+static void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "index = %d\n", item->idx);
+
+ dev_info(dev, "markers: ");
+ for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++)
+ dev_info(dev, "%d ", item->markers[i]);
+
+ dev_info(dev, "\n");
+}
+
+static void ice_mk_grp_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_mk_grp_item *grp = item;
+ u8 *buf = data;
+ int i;
+
+ grp->idx = idx;
+
+ for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++)
+ grp->markers[i] = buf[i];
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_mk_grp_dump(hw, grp);
+}
+
+/**
+ * ice_mk_grp_table_get - create a marker group table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Marker Group ID table.
+ */
+static struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_GRP,
+ sizeof(struct ice_mk_grp_item),
+ ICE_MK_GRP_TABLE_SIZE,
+ ice_mk_grp_parse_item, false);
+}
+
+/*** ICE_SID_RXPARSER_PROTO_GRP section ***/
+static void ice_proto_off_dump(struct ice_hw *hw,
+ struct ice_proto_off *po, int idx)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "proto %d\n", idx);
+ dev_info(dev, "\tpolarity = %d\n", po->polarity);
+ dev_info(dev, "\tproto_id = %d\n", po->proto_id);
+ dev_info(dev, "\toffset = %d\n", po->offset);
+}
+
+/**
+ * ice_proto_grp_dump - dump a proto group item info
+ * @hw: pointer to the hardware structure
+ * @item: proto group item to dump
+ */
+static void ice_proto_grp_dump(struct ice_hw *hw,
+ struct ice_proto_grp_item *item)
+{
+ int i;
+
+ dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
+
+ for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++)
+ ice_proto_off_dump(hw, &item->po[i], i);
+}
+
+#define ICE_PO_POL BIT(0)
+#define ICE_PO_PID GENMASK(8, 1)
+#define ICE_PO_OFF GENMASK(21, 12)
+
+/**
+ * ice_proto_off_parse - parse 22 bits of Protocol entry
+ * @po: pointer to the Protocol entry structure
+ * @data: Protocol entry data to be parsed
+ */
+static void ice_proto_off_parse(struct ice_proto_off *po, u32 data)
+{
+ po->polarity = FIELD_GET(ICE_PO_POL, data);
+ po->proto_id = FIELD_GET(ICE_PO_PID, data);
+ po->offset = FIELD_GET(ICE_PO_OFF, data);
+}
+
+/**
+ * ice_proto_grp_parse_item - parse 192 bits of Protocol Group Table entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Protocol Group Table entry
+ * @item: item of Protocol Group Table entry
+ * @data: Protocol Group Table entry data to be parsed
+ * @size: size of Protocol Group Table entry
+ */
+static void ice_proto_grp_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_proto_grp_item *grp = item;
+ u8 *buf = (u8 *)data;
+ u8 idd, off;
+ u32 d32;
+ int i;
+
+ grp->idx = idx;
+
+ for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) {
+ idd = (ICE_PROTO_GRP_ITEM_SIZE * i) / BITS_PER_BYTE;
+ off = (ICE_PROTO_GRP_ITEM_SIZE * i) % BITS_PER_BYTE;
+ d32 = *((u32 *)&buf[idd]) >> off;
+ ice_proto_off_parse(&grp->po[i], d32);
+ }
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_proto_grp_dump(hw, grp);
+}
+
+/**
+ * ice_proto_grp_table_get - create a proto group table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Protocol Group table.
+ */
+static struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_PROTO_GRP,
+ sizeof(struct ice_proto_grp_item),
+ ICE_PROTO_GRP_TABLE_SIZE,
+ ice_proto_grp_parse_item, false);
+}
+
+/*** ICE_SID_RXPARSER_FLAG_REDIR section ***/
+/**
+ * ice_flg_rd_dump - dump a flag redirect item info
+ * @hw: pointer to the hardware structure
+ * @item: flag redirect item to dump
+ */
+static void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %d\n", item->idx);
+ dev_info(dev, "expose = %d\n", item->expose);
+ dev_info(dev, "intr_flg_id = %d\n", item->intr_flg_id);
+}
+
+#define ICE_FRT_EXPO BIT(0)
+#define ICE_FRT_IFID GENMASK(6, 1)
+
+/**
+ * ice_flg_rd_parse_item - parse 8 bits of Flag Redirect Table entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Flag Redirect Table entry
+ * @item: item of Flag Redirect Table entry
+ * @data: Flag Redirect Table entry data to be parsed
+ * @size: size of Flag Redirect Table entry
+ */
+static void ice_flg_rd_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_flg_rd_item *rdi = item;
+ u8 d8 = *(u8 *)data;
+
+ rdi->idx = idx;
+ rdi->expose = FIELD_GET(ICE_FRT_EXPO, d8);
+ rdi->intr_flg_id = FIELD_GET(ICE_FRT_IFID, d8);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_flg_rd_dump(hw, rdi);
+}
+
+/**
+ * ice_flg_rd_table_get - create a flag redirect table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Flags Redirection table.
+ */
+static struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_FLAG_REDIR,
+ sizeof(struct ice_flg_rd_item),
+ ICE_FLG_RD_TABLE_SIZE,
+ ice_flg_rd_parse_item, false);
+}
+
+/**
+ * ice_flg_redirect - redirect a parser flag to packet flag
+ * @table: flag redirect table
+ * @psr_flg: parser flag to redirect
+ *
+ * Return: flag or 0 if @psr_flag = 0.
+ */
+u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg)
+{
+ u64 flg = 0;
+ int i;
+
+ for (i = 0; i < ICE_FLG_RDT_SIZE; i++) {
+ struct ice_flg_rd_item *item = &table[i];
+
+ if (!item->expose)
+ continue;
+
+ if (psr_flg & BIT(item->intr_flg_id))
+ flg |= BIT(i);
+ }
+
+ return flg;
+}
+
+/*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL,
+ * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS
+ * sections ***/
+static void ice_xlt_kb_entry_dump(struct ice_hw *hw,
+ struct ice_xlt_kb_entry *entry, int idx)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "key builder entry %d\n", idx);
+ dev_info(dev, "\txlt1_ad_sel = %d\n", entry->xlt1_ad_sel);
+ dev_info(dev, "\txlt2_ad_sel = %d\n", entry->xlt2_ad_sel);
+
+ for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++)
+ dev_info(dev, "\tflg%d_sel = %d\n", i, entry->flg0_14_sel[i]);
+
+ dev_info(dev, "\txlt1_md_sel = %d\n", entry->xlt1_md_sel);
+ dev_info(dev, "\txlt2_md_sel = %d\n", entry->xlt2_md_sel);
+}
+
+/**
+ * ice_xlt_kb_dump - dump a xlt key build info
+ * @hw: pointer to the hardware structure
+ * @kb: key build to dump
+ */
+static void ice_xlt_kb_dump(struct ice_hw *hw, struct ice_xlt_kb *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "xlt1_pm = %d\n", kb->xlt1_pm);
+ dev_info(dev, "xlt2_pm = %d\n", kb->xlt2_pm);
+ dev_info(dev, "prof_id_pm = %d\n", kb->prof_id_pm);
+ dev_info(dev, "flag15 lo = 0x%08x\n", (u32)kb->flag15);
+ dev_info(dev, "flag15 hi = 0x%08x\n",
+ (u32)(kb->flag15 >> (sizeof(u32) * BITS_PER_BYTE)));
+
+ for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++)
+ ice_xlt_kb_entry_dump(hw, &kb->entries[i], i);
+}
+
+#define ICE_XLT_KB_X1AS_S 32 /* offset for the 1st 64-bits field */
+#define ICE_XLT_KB_X1AS_IDD (ICE_XLT_KB_X1AS_S / BITS_PER_BYTE)
+#define ICE_XLT_KB_X1AS_OFF (ICE_XLT_KB_X1AS_S % BITS_PER_BYTE)
+#define ICE_XLT_KB_X1AS GENMASK_ULL(34 - ICE_XLT_KB_X1AS_S, \
+ 32 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_X2AS GENMASK_ULL(37 - ICE_XLT_KB_X1AS_S, \
+ 35 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL00 GENMASK_ULL(46 - ICE_XLT_KB_X1AS_S, \
+ 38 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL01 GENMASK_ULL(55 - ICE_XLT_KB_X1AS_S, \
+ 47 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL02 GENMASK_ULL(64 - ICE_XLT_KB_X1AS_S, \
+ 56 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL03 GENMASK_ULL(73 - ICE_XLT_KB_X1AS_S, \
+ 65 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL04 GENMASK_ULL(82 - ICE_XLT_KB_X1AS_S, \
+ 74 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL05 GENMASK_ULL(91 - ICE_XLT_KB_X1AS_S, \
+ 83 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL06_S 92 /* offset for the 2nd 64-bits field */
+#define ICE_XLT_KB_FL06_IDD (ICE_XLT_KB_FL06_S / BITS_PER_BYTE)
+#define ICE_XLT_KB_FL06_OFF (ICE_XLT_KB_FL06_S % BITS_PER_BYTE)
+#define ICE_XLT_KB_FL06 GENMASK_ULL(100 - ICE_XLT_KB_FL06_S, \
+ 92 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL07 GENMASK_ULL(109 - ICE_XLT_KB_FL06_S, \
+ 101 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL08 GENMASK_ULL(118 - ICE_XLT_KB_FL06_S, \
+ 110 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL09 GENMASK_ULL(127 - ICE_XLT_KB_FL06_S, \
+ 119 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL10 GENMASK_ULL(136 - ICE_XLT_KB_FL06_S, \
+ 128 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL11 GENMASK_ULL(145 - ICE_XLT_KB_FL06_S, \
+ 137 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL12_S 146 /* offset for the 3rd 64-bits field */
+#define ICE_XLT_KB_FL12_IDD (ICE_XLT_KB_FL12_S / BITS_PER_BYTE)
+#define ICE_XLT_KB_FL12_OFF (ICE_XLT_KB_FL12_S % BITS_PER_BYTE)
+#define ICE_XLT_KB_FL12 GENMASK_ULL(154 - ICE_XLT_KB_FL12_S, \
+ 146 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_FL13 GENMASK_ULL(163 - ICE_XLT_KB_FL12_S, \
+ 155 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_FL14 GENMASK_ULL(181 - ICE_XLT_KB_FL12_S, \
+ 164 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_X1MS GENMASK_ULL(186 - ICE_XLT_KB_FL12_S, \
+ 182 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_X2MS GENMASK_ULL(191 - ICE_XLT_KB_FL12_S, \
+ 187 - ICE_XLT_KB_FL12_S)
+
+/**
+ * ice_kb_entry_init - parse 192 bits of XLT Key Builder entry
+ * @entry: pointer to the XLT Key Builder entry structure
+ * @data: XLT Key Builder entry data to be parsed
+ */
+static void ice_kb_entry_init(struct ice_xlt_kb_entry *entry, u8 *data)
+{
+ u8 i = 0;
+ u64 d64;
+
+ d64 = *((u64 *)&data[ICE_XLT_KB_X1AS_IDD]) >> ICE_XLT_KB_X1AS_OFF;
+
+ entry->xlt1_ad_sel = FIELD_GET(ICE_XLT_KB_X1AS, d64);
+ entry->xlt2_ad_sel = FIELD_GET(ICE_XLT_KB_X2AS, d64);
+
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL00, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL01, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL02, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL03, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL04, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL05, d64);
+
+ d64 = *((u64 *)&data[ICE_XLT_KB_FL06_IDD]) >> ICE_XLT_KB_FL06_OFF;
+
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL06, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL07, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL08, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL09, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL10, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL11, d64);
+
+ d64 = *((u64 *)&data[ICE_XLT_KB_FL12_IDD]) >> ICE_XLT_KB_FL12_OFF;
+
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL12, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL13, d64);
+ entry->flg0_14_sel[i] = FIELD_GET(ICE_XLT_KB_FL14, d64);
+
+ entry->xlt1_md_sel = FIELD_GET(ICE_XLT_KB_X1MS, d64);
+ entry->xlt2_md_sel = FIELD_GET(ICE_XLT_KB_X2MS, d64);
+}
+
+#define ICE_XLT_KB_X1PM_OFF 0
+#define ICE_XLT_KB_X2PM_OFF 1
+#define ICE_XLT_KB_PIPM_OFF 2
+#define ICE_XLT_KB_FL15_OFF 4
+#define ICE_XLT_KB_TBL_OFF 12
+
+/**
+ * ice_parse_kb_data - parse 204 bits of XLT Key Build Table
+ * @hw: pointer to the hardware structure
+ * @kb: pointer to the XLT Key Build Table structure
+ * @data: XLT Key Build Table data to be parsed
+ */
+static void ice_parse_kb_data(struct ice_hw *hw, struct ice_xlt_kb *kb,
+ void *data)
+{
+ u8 *buf = data;
+ int i;
+
+ kb->xlt1_pm = buf[ICE_XLT_KB_X1PM_OFF];
+ kb->xlt2_pm = buf[ICE_XLT_KB_X2PM_OFF];
+ kb->prof_id_pm = buf[ICE_XLT_KB_PIPM_OFF];
+
+ kb->flag15 = *(u64 *)&buf[ICE_XLT_KB_FL15_OFF];
+ for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++)
+ ice_kb_entry_init(&kb->entries[i],
+ &buf[ICE_XLT_KB_TBL_OFF +
+ i * ICE_XLT_KB_TBL_ENTRY_SIZE]);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_xlt_kb_dump(hw, kb);
+}
+
+static struct ice_xlt_kb *ice_xlt_kb_get(struct ice_hw *hw, u32 sect_type)
+{
+ struct ice_pkg_enum state = {};
+ struct ice_seg *seg = hw->seg;
+ struct ice_xlt_kb *kb;
+ void *data;
+
+ if (!seg)
+ return ERR_PTR(-EINVAL);
+
+ kb = kzalloc(sizeof(*kb), GFP_KERNEL);
+ if (!kb)
+ return ERR_PTR(-ENOMEM);
+
+ data = ice_pkg_enum_section(seg, &state, sect_type);
+ if (!data) {
+ ice_debug(hw, ICE_DBG_PARSER, "failed to find section type %d.\n",
+ sect_type);
+ kfree(kb);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ice_parse_kb_data(hw, kb, data);
+
+ return kb;
+}
+
+/**
+ * ice_xlt_kb_get_sw - create switch xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for Switch.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_SW);
+}
+
+/**
+ * ice_xlt_kb_get_acl - create acl xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for ACL.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_ACL);
+}
+
+/**
+ * ice_xlt_kb_get_fd - create fdir xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for Flow Director.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_FD);
+}
+
+/**
+ * ice_xlt_kb_get_rss - create rss xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for RSS.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_rss(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_RSS);
+}
+
+#define ICE_XLT_KB_MASK GENMASK_ULL(5, 0)
+
+/**
+ * ice_xlt_kb_flag_get - aggregate 64 bits packet flag into 16 bits xlt flag
+ * @kb: xlt key build
+ * @pkt_flag: 64 bits packet flag
+ *
+ * Return: XLT flag or 0 if @pkt_flag = 0.
+ */
+u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag)
+{
+ struct ice_xlt_kb_entry *entry = &kb->entries[0];
+ u16 flag = 0;
+ int i;
+
+ /* check flag 15 */
+ if (kb->flag15 & pkt_flag)
+ flag = BIT(ICE_XLT_KB_FLAG0_14_CNT);
+
+ /* check flag 0 - 14 */
+ for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++) {
+ /* only check first entry */
+ u16 idx = entry->flg0_14_sel[i] & ICE_XLT_KB_MASK;
+
+ if (pkt_flag & BIT(idx))
+ flag |= (u16)BIT(i);
+ }
+
+ return flag;
+}
+
+/*** Parser API ***/
+/**
+ * ice_parser_create - create a parser instance
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated parser instance or ERR_PTR
+ * in case of error.
+ */
+struct ice_parser *ice_parser_create(struct ice_hw *hw)
+{
+ struct ice_parser *p;
+ void *err;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ p->hw = hw;
+ p->rt.psr = p;
+
+ p->imem_table = ice_imem_table_get(hw);
+ if (IS_ERR(p->imem_table)) {
+ err = p->imem_table;
+ goto err;
+ }
+
+ p->mi_table = ice_metainit_table_get(hw);
+ if (IS_ERR(p->mi_table)) {
+ err = p->mi_table;
+ goto err;
+ }
+
+ p->pg_cam_table = ice_pg_cam_table_get(hw);
+ if (IS_ERR(p->pg_cam_table)) {
+ err = p->pg_cam_table;
+ goto err;
+ }
+
+ p->pg_sp_cam_table = ice_pg_sp_cam_table_get(hw);
+ if (IS_ERR(p->pg_sp_cam_table)) {
+ err = p->pg_sp_cam_table;
+ goto err;
+ }
+
+ p->pg_nm_cam_table = ice_pg_nm_cam_table_get(hw);
+ if (IS_ERR(p->pg_nm_cam_table)) {
+ err = p->pg_nm_cam_table;
+ goto err;
+ }
+
+ p->pg_nm_sp_cam_table = ice_pg_nm_sp_cam_table_get(hw);
+ if (IS_ERR(p->pg_nm_sp_cam_table)) {
+ err = p->pg_nm_sp_cam_table;
+ goto err;
+ }
+
+ p->bst_tcam_table = ice_bst_tcam_table_get(hw);
+ if (IS_ERR(p->bst_tcam_table)) {
+ err = p->bst_tcam_table;
+ goto err;
+ }
+
+ p->bst_lbl_table = ice_bst_lbl_table_get(hw);
+ if (IS_ERR(p->bst_lbl_table)) {
+ err = p->bst_lbl_table;
+ goto err;
+ }
+
+ p->ptype_mk_tcam_table = ice_ptype_mk_tcam_table_get(hw);
+ if (IS_ERR(p->ptype_mk_tcam_table)) {
+ err = p->ptype_mk_tcam_table;
+ goto err;
+ }
+
+ p->mk_grp_table = ice_mk_grp_table_get(hw);
+ if (IS_ERR(p->mk_grp_table)) {
+ err = p->mk_grp_table;
+ goto err;
+ }
+
+ p->proto_grp_table = ice_proto_grp_table_get(hw);
+ if (IS_ERR(p->proto_grp_table)) {
+ err = p->proto_grp_table;
+ goto err;
+ }
+
+ p->flg_rd_table = ice_flg_rd_table_get(hw);
+ if (IS_ERR(p->flg_rd_table)) {
+ err = p->flg_rd_table;
+ goto err;
+ }
+
+ p->xlt_kb_sw = ice_xlt_kb_get_sw(hw);
+ if (IS_ERR(p->xlt_kb_sw)) {
+ err = p->xlt_kb_sw;
+ goto err;
+ }
+
+ p->xlt_kb_acl = ice_xlt_kb_get_acl(hw);
+ if (IS_ERR(p->xlt_kb_acl)) {
+ err = p->xlt_kb_acl;
+ goto err;
+ }
+
+ p->xlt_kb_fd = ice_xlt_kb_get_fd(hw);
+ if (IS_ERR(p->xlt_kb_fd)) {
+ err = p->xlt_kb_fd;
+ goto err;
+ }
+
+ p->xlt_kb_rss = ice_xlt_kb_get_rss(hw);
+ if (IS_ERR(p->xlt_kb_rss)) {
+ err = p->xlt_kb_rss;
+ goto err;
+ }
+
+ return p;
+err:
+ ice_parser_destroy(p);
+ return err;
+}
+
+/**
+ * ice_parser_destroy - destroy a parser instance
+ * @psr: pointer to a parser instance
+ */
+void ice_parser_destroy(struct ice_parser *psr)
+{
+ kfree(psr->imem_table);
+ kfree(psr->mi_table);
+ kfree(psr->pg_cam_table);
+ kfree(psr->pg_sp_cam_table);
+ kfree(psr->pg_nm_cam_table);
+ kfree(psr->pg_nm_sp_cam_table);
+ kfree(psr->bst_tcam_table);
+ kfree(psr->bst_lbl_table);
+ kfree(psr->ptype_mk_tcam_table);
+ kfree(psr->mk_grp_table);
+ kfree(psr->proto_grp_table);
+ kfree(psr->flg_rd_table);
+ kfree(psr->xlt_kb_sw);
+ kfree(psr->xlt_kb_acl);
+ kfree(psr->xlt_kb_fd);
+ kfree(psr->xlt_kb_rss);
+
+ kfree(psr);
+}
+
+/**
+ * ice_parser_run - parse on a packet in binary and return the result
+ * @psr: pointer to a parser instance
+ * @pkt_buf: packet data
+ * @pkt_len: packet length
+ * @rslt: input/output parameter to save parser result.
+ *
+ * Return: 0 on success or errno.
+ */
+int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf,
+ int pkt_len, struct ice_parser_result *rslt)
+{
+ ice_parser_rt_reset(&psr->rt);
+ ice_parser_rt_pktbuf_set(&psr->rt, pkt_buf, pkt_len);
+
+ return ice_parser_rt_execute(&psr->rt, rslt);
+}
+
+/**
+ * ice_parser_result_dump - dump a parser result info
+ * @hw: pointer to the hardware structure
+ * @rslt: parser result info to dump
+ */
+void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "ptype = %d\n", rslt->ptype);
+ for (i = 0; i < rslt->po_num; i++)
+ dev_info(dev, "proto = %d, offset = %d\n",
+ rslt->po[i].proto_id, rslt->po[i].offset);
+
+ dev_info(dev, "flags_psr = 0x%016llx\n", rslt->flags_psr);
+ dev_info(dev, "flags_pkt = 0x%016llx\n", rslt->flags_pkt);
+ dev_info(dev, "flags_sw = 0x%04x\n", rslt->flags_sw);
+ dev_info(dev, "flags_fd = 0x%04x\n", rslt->flags_fd);
+ dev_info(dev, "flags_rss = 0x%04x\n", rslt->flags_rss);
+}
+
+#define ICE_BT_VLD_KEY 0xFF
+#define ICE_BT_INV_KEY 0xFE
+
+static void ice_bst_dvm_set(struct ice_parser *psr, enum ice_lbl_type type,
+ bool on)
+{
+ u16 i = 0;
+
+ while (true) {
+ struct ice_bst_tcam_item *item;
+ u8 key;
+
+ item = ice_bst_tcam_search(psr->bst_tcam_table,
+ psr->bst_lbl_table,
+ type, &i);
+ if (!item)
+ break;
+
+ key = on ? ICE_BT_VLD_KEY : ICE_BT_INV_KEY;
+ item->key[ICE_BT_VM_OFF] = key;
+ item->key_inv[ICE_BT_VM_OFF] = key;
+ i++;
+ }
+}
+
+/**
+ * ice_parser_dvm_set - configure double vlan mode for parser
+ * @psr: pointer to a parser instance
+ * @on: true to turn on; false to turn off
+ */
+void ice_parser_dvm_set(struct ice_parser *psr, bool on)
+{
+ ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_DVM, on);
+ ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_SVM, !on);
+}
+
+static int ice_tunnel_port_set(struct ice_parser *psr, enum ice_lbl_type type,
+ u16 udp_port, bool on)
+{
+ u8 *buf = (u8 *)&udp_port;
+ u16 i = 0;
+
+ while (true) {
+ struct ice_bst_tcam_item *item;
+
+ item = ice_bst_tcam_search(psr->bst_tcam_table,
+ psr->bst_lbl_table,
+ type, &i);
+ if (!item)
+ break;
+
+ /* found empty slot to add */
+ if (on && item->key[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY &&
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY) {
+ item->key_inv[ICE_BT_TUN_PORT_OFF_L] =
+ buf[ICE_UDP_PORT_OFF_L];
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] =
+ buf[ICE_UDP_PORT_OFF_H];
+
+ item->key[ICE_BT_TUN_PORT_OFF_L] =
+ ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_L];
+ item->key[ICE_BT_TUN_PORT_OFF_H] =
+ ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_H];
+
+ return 0;
+ /* found a matched slot to delete */
+ } else if (!on &&
+ (item->key_inv[ICE_BT_TUN_PORT_OFF_L] ==
+ buf[ICE_UDP_PORT_OFF_L] ||
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] ==
+ buf[ICE_UDP_PORT_OFF_H])) {
+ item->key_inv[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY;
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY;
+
+ item->key[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY;
+ item->key[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY;
+
+ return 0;
+ }
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * ice_parser_vxlan_tunnel_set - configure vxlan tunnel for parser
+ * @psr: pointer to a parser instance
+ * @udp_port: vxlan tunnel port in UDP header
+ * @on: true to turn on; false to turn off
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_vxlan_tunnel_set(struct ice_parser *psr,
+ u16 udp_port, bool on)
+{
+ return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_VXLAN, udp_port, on);
+}
+
+/**
+ * ice_parser_geneve_tunnel_set - configure geneve tunnel for parser
+ * @psr: pointer to a parser instance
+ * @udp_port: geneve tunnel port in UDP header
+ * @on: true to turn on; false to turn off
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_geneve_tunnel_set(struct ice_parser *psr,
+ u16 udp_port, bool on)
+{
+ return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_GENEVE, udp_port, on);
+}
+
+/**
+ * ice_parser_ecpri_tunnel_set - configure ecpri tunnel for parser
+ * @psr: pointer to a parser instance
+ * @udp_port: ecpri tunnel port in UDP header
+ * @on: true to turn on; false to turn off
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_ecpri_tunnel_set(struct ice_parser *psr,
+ u16 udp_port, bool on)
+{
+ return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_UDP_ECPRI,
+ udp_port, on);
+}
+
+/**
+ * ice_nearest_proto_id - find nearest protocol ID
+ * @rslt: pointer to a parser result instance
+ * @offset: a min value for the protocol offset
+ * @proto_id: the protocol ID (output)
+ * @proto_off: the protocol offset (output)
+ *
+ * From the protocols in @rslt, find the nearest protocol that has offset
+ * larger than @offset.
+ *
+ * Return: if true, the protocol's ID and offset
+ */
+static bool ice_nearest_proto_id(struct ice_parser_result *rslt, u16 offset,
+ u8 *proto_id, u16 *proto_off)
+{
+ u16 dist = U16_MAX;
+ u8 proto = 0;
+ int i;
+
+ for (i = 0; i < rslt->po_num; i++) {
+ if (offset < rslt->po[i].offset)
+ continue;
+ if (offset - rslt->po[i].offset < dist) {
+ proto = rslt->po[i].proto_id;
+ dist = offset - rslt->po[i].offset;
+ }
+ }
+
+ if (dist % 2)
+ return false;
+
+ *proto_id = proto;
+ *proto_off = dist;
+
+ return true;
+}
+
+/* default flag mask to cover GTP_EH_PDU, GTP_EH_PDU_LINK and TUN2
+ * In future, the flag masks should learn from DDP
+ */
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW 0x4002
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL 0x0000
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD 0x6080
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS 0x6010
+
+/**
+ * ice_parser_profile_init - initialize a FXP profile based on parser result
+ * @rslt: a instance of a parser result
+ * @pkt_buf: packet data buffer
+ * @msk_buf: packet mask buffer
+ * @buf_len: packet length
+ * @blk: FXP pipeline stage
+ * @prof: input/output parameter to save the profile
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_profile_init(struct ice_parser_result *rslt,
+ const u8 *pkt_buf, const u8 *msk_buf,
+ int buf_len, enum ice_block blk,
+ struct ice_parser_profile *prof)
+{
+ u8 proto_id = U8_MAX;
+ u16 proto_off = 0;
+ u16 off;
+
+ memset(prof, 0, sizeof(*prof));
+ set_bit(rslt->ptype, prof->ptypes);
+ if (blk == ICE_BLK_SW) {
+ prof->flags = rslt->flags_sw;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW;
+ } else if (blk == ICE_BLK_ACL) {
+ prof->flags = rslt->flags_acl;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL;
+ } else if (blk == ICE_BLK_FD) {
+ prof->flags = rslt->flags_fd;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD;
+ } else if (blk == ICE_BLK_RSS) {
+ prof->flags = rslt->flags_rss;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS;
+ } else {
+ return -EINVAL;
+ }
+
+ for (off = 0; off < buf_len - 1; off++) {
+ if (msk_buf[off] == 0 && msk_buf[off + 1] == 0)
+ continue;
+ if (!ice_nearest_proto_id(rslt, off, &proto_id, &proto_off))
+ continue;
+ if (prof->fv_num >= ICE_PARSER_FV_MAX)
+ return -EINVAL;
+
+ prof->fv[prof->fv_num].proto_id = proto_id;
+ prof->fv[prof->fv_num].offset = proto_off;
+ prof->fv[prof->fv_num].spec = *(const u16 *)&pkt_buf[off];
+ prof->fv[prof->fv_num].msk = *(const u16 *)&msk_buf[off];
+ prof->fv_num++;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_parser_profile_dump - dump an FXP profile info
+ * @hw: pointer to the hardware structure
+ * @prof: profile info to dump
+ */
+void ice_parser_profile_dump(struct ice_hw *hw,
+ struct ice_parser_profile *prof)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ u16 i;
+
+ dev_info(dev, "ptypes:\n");
+ for (i = 0; i < ICE_FLOW_PTYPE_MAX; i++)
+ if (test_bit(i, prof->ptypes))
+ dev_info(dev, "\t%u\n", i);
+
+ for (i = 0; i < prof->fv_num; i++)
+ dev_info(dev, "proto = %u, offset = %2u, spec = 0x%04x, mask = 0x%04x\n",
+ prof->fv[i].proto_id, prof->fv[i].offset,
+ prof->fv[i].spec, prof->fv[i].msk);
+
+ dev_info(dev, "flags = 0x%04x\n", prof->flags);
+ dev_info(dev, "flags_msk = 0x%04x\n", prof->flags_msk);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h
new file mode 100644
index 000000000000..6509d807627c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_parser.h
@@ -0,0 +1,540 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _ICE_PARSER_H_
+#define _ICE_PARSER_H_
+
+#define ICE_SEC_DATA_OFFSET 4
+#define ICE_SID_RXPARSER_IMEM_ENTRY_SIZE 48
+#define ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE 24
+#define ICE_SID_RXPARSER_CAM_ENTRY_SIZE 16
+#define ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE 17
+#define ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE 12
+#define ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE 13
+#define ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE 88
+#define ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE 24
+#define ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE 8
+#define ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE 24
+#define ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE 1
+
+#define ICE_SEC_LBL_DATA_OFFSET 2
+#define ICE_SID_LBL_ENTRY_SIZE 66
+
+/*** ICE_SID_RXPARSER_IMEM section ***/
+#define ICE_IMEM_TABLE_SIZE 192
+
+/* TCAM boost Master; if bit is set, and TCAM hit, TCAM output overrides iMEM
+ * output.
+ */
+struct ice_bst_main {
+ bool alu0;
+ bool alu1;
+ bool alu2;
+ bool pg;
+};
+
+struct ice_bst_keybuilder {
+ u8 prio; /* 0-3: PG precedence within ALUs (3 highest) */
+ bool tsr_ctrl; /* TCAM Search Register control */
+};
+
+/* Next protocol Key builder */
+struct ice_np_keybuilder {
+ u8 opc;
+ u8 start_reg0;
+ u8 len_reg1;
+};
+
+enum ice_np_keybuilder_opcode {
+ ICE_NPKB_OPC_EXTRACT = 0,
+ ICE_NPKB_OPC_BUILD = 1,
+ ICE_NPKB_OPC_BYPASS = 2,
+};
+
+/* Parse Graph Key builder */
+struct ice_pg_keybuilder {
+ bool flag0_ena;
+ bool flag1_ena;
+ bool flag2_ena;
+ bool flag3_ena;
+ u8 flag0_idx;
+ u8 flag1_idx;
+ u8 flag2_idx;
+ u8 flag3_idx;
+ u8 alu_reg_idx;
+};
+
+enum ice_alu_idx {
+ ICE_ALU0_IDX = 0,
+ ICE_ALU1_IDX = 1,
+ ICE_ALU2_IDX = 2,
+};
+
+enum ice_alu_opcode {
+ ICE_ALU_PARK = 0,
+ ICE_ALU_MOV_ADD = 1,
+ ICE_ALU_ADD = 2,
+ ICE_ALU_MOV_AND = 4,
+ ICE_ALU_AND = 5,
+ ICE_ALU_AND_IMM = 6,
+ ICE_ALU_MOV_OR = 7,
+ ICE_ALU_OR = 8,
+ ICE_ALU_MOV_XOR = 9,
+ ICE_ALU_XOR = 10,
+ ICE_ALU_NOP = 11,
+ ICE_ALU_BR = 12,
+ ICE_ALU_BREQ = 13,
+ ICE_ALU_BRNEQ = 14,
+ ICE_ALU_BRGT = 15,
+ ICE_ALU_BRLT = 16,
+ ICE_ALU_BRGEQ = 17,
+ ICE_ALU_BRLEG = 18,
+ ICE_ALU_SETEQ = 19,
+ ICE_ALU_ANDEQ = 20,
+ ICE_ALU_OREQ = 21,
+ ICE_ALU_SETNEQ = 22,
+ ICE_ALU_ANDNEQ = 23,
+ ICE_ALU_ORNEQ = 24,
+ ICE_ALU_SETGT = 25,
+ ICE_ALU_ANDGT = 26,
+ ICE_ALU_ORGT = 27,
+ ICE_ALU_SETLT = 28,
+ ICE_ALU_ANDLT = 29,
+ ICE_ALU_ORLT = 30,
+ ICE_ALU_MOV_SUB = 31,
+ ICE_ALU_SUB = 32,
+ ICE_ALU_INVALID = 64,
+};
+
+enum ice_proto_off_opcode {
+ ICE_PO_OFF_REMAIN = 0,
+ ICE_PO_OFF_HDR_ADD = 1,
+ ICE_PO_OFF_HDR_SUB = 2,
+};
+
+struct ice_alu {
+ enum ice_alu_opcode opc;
+ u8 src_start;
+ u8 src_len;
+ bool shift_xlate_sel;
+ u8 shift_xlate_key;
+ u8 src_reg_id;
+ u8 dst_reg_id;
+ bool inc0;
+ bool inc1;
+ u8 proto_offset_opc;
+ u8 proto_offset;
+ u8 branch_addr;
+ u16 imm;
+ bool dedicate_flags_ena;
+ u8 dst_start;
+ u8 dst_len;
+ bool flags_extr_imm;
+ u8 flags_start_imm;
+};
+
+/* Parser program code (iMEM) */
+struct ice_imem_item {
+ u16 idx;
+ struct ice_bst_main b_m;
+ struct ice_bst_keybuilder b_kb;
+ u8 pg_prio;
+ struct ice_np_keybuilder np_kb;
+ struct ice_pg_keybuilder pg_kb;
+ struct ice_alu alu0;
+ struct ice_alu alu1;
+ struct ice_alu alu2;
+};
+
+/*** ICE_SID_RXPARSER_METADATA_INIT section ***/
+#define ICE_METAINIT_TABLE_SIZE 16
+
+/* Metadata Initialization item */
+struct ice_metainit_item {
+ u16 idx;
+
+ u8 tsr; /* TCAM Search key Register */
+ u16 ho; /* Header Offset register */
+ u16 pc; /* Program Counter register */
+ u16 pg_rn; /* Parse Graph Root Node */
+ u8 cd; /* Control Domain ID */
+
+ /* General Purpose Registers */
+ bool gpr_a_ctrl;
+ u8 gpr_a_data_mdid;
+ u8 gpr_a_data_start;
+ u8 gpr_a_data_len;
+ u8 gpr_a_id;
+
+ bool gpr_b_ctrl;
+ u8 gpr_b_data_mdid;
+ u8 gpr_b_data_start;
+ u8 gpr_b_data_len;
+ u8 gpr_b_id;
+
+ bool gpr_c_ctrl;
+ u8 gpr_c_data_mdid;
+ u8 gpr_c_data_start;
+ u8 gpr_c_data_len;
+ u8 gpr_c_id;
+
+ bool gpr_d_ctrl;
+ u8 gpr_d_data_mdid;
+ u8 gpr_d_data_start;
+ u8 gpr_d_data_len;
+ u8 gpr_d_id;
+
+ u64 flags; /* Initial value for all flags */
+};
+
+/*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL,
+ * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM
+ * sections ***/
+#define ICE_PG_CAM_TABLE_SIZE 2048
+#define ICE_PG_SP_CAM_TABLE_SIZE 128
+#define ICE_PG_NM_CAM_TABLE_SIZE 1024
+#define ICE_PG_NM_SP_CAM_TABLE_SIZE 64
+
+struct ice_pg_cam_key {
+ bool valid;
+ struct_group_attr(val, __packed,
+ u16 node_id; /* Node ID of protocol in parse graph */
+ bool flag0;
+ bool flag1;
+ bool flag2;
+ bool flag3;
+ u8 boost_idx; /* Boost TCAM match index */
+ u16 alu_reg;
+ u32 next_proto; /* next Protocol value (must be last) */
+ );
+};
+
+struct ice_pg_nm_cam_key {
+ bool valid;
+ struct_group_attr(val, __packed,
+ u16 node_id;
+ bool flag0;
+ bool flag1;
+ bool flag2;
+ bool flag3;
+ u8 boost_idx;
+ u16 alu_reg;
+ );
+};
+
+struct ice_pg_cam_action {
+ u16 next_node; /* Parser Node ID for the next round */
+ u8 next_pc; /* next Program Counter */
+ bool is_pg; /* is protocol group */
+ u8 proto_id; /* protocol ID or proto group ID */
+ bool is_mg; /* is marker group */
+ u8 marker_id; /* marker ID or marker group ID */
+ bool is_last_round;
+ bool ho_polarity; /* header offset polarity */
+ u16 ho_inc;
+};
+
+/* Parse Graph item */
+struct ice_pg_cam_item {
+ u16 idx;
+ struct ice_pg_cam_key key;
+ struct ice_pg_cam_action action;
+};
+
+/* Parse Graph No Match item */
+struct ice_pg_nm_cam_item {
+ u16 idx;
+ struct ice_pg_nm_cam_key key;
+ struct ice_pg_cam_action action;
+};
+
+struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table,
+ int size, struct ice_pg_cam_key *key);
+struct ice_pg_nm_cam_item *
+ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
+ struct ice_pg_cam_key *key);
+
+/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
+#define ICE_BST_TCAM_TABLE_SIZE 256
+#define ICE_BST_TCAM_KEY_SIZE 20
+#define ICE_BST_KEY_TCAM_SIZE 19
+
+/* Boost TCAM item */
+struct ice_bst_tcam_item {
+ u16 addr;
+ u8 key[ICE_BST_TCAM_KEY_SIZE];
+ u8 key_inv[ICE_BST_TCAM_KEY_SIZE];
+ u8 hit_idx_grp;
+ u8 pg_prio;
+ struct ice_np_keybuilder np_kb;
+ struct ice_pg_keybuilder pg_kb;
+ struct ice_alu alu0;
+ struct ice_alu alu1;
+ struct ice_alu alu2;
+};
+
+#define ICE_LBL_LEN 64
+#define ICE_LBL_BST_DVM "BOOST_MAC_VLAN_DVM"
+#define ICE_LBL_BST_SVM "BOOST_MAC_VLAN_SVM"
+#define ICE_LBL_TNL_VXLAN "TNL_VXLAN"
+#define ICE_LBL_TNL_GENEVE "TNL_GENEVE"
+#define ICE_LBL_TNL_UDP_ECPRI "TNL_UDP_ECPRI"
+
+enum ice_lbl_type {
+ ICE_LBL_BST_TYPE_UNKNOWN,
+ ICE_LBL_BST_TYPE_DVM,
+ ICE_LBL_BST_TYPE_SVM,
+ ICE_LBL_BST_TYPE_VXLAN,
+ ICE_LBL_BST_TYPE_GENEVE,
+ ICE_LBL_BST_TYPE_UDP_ECPRI,
+};
+
+struct ice_lbl_item {
+ u16 idx;
+ char label[ICE_LBL_LEN];
+
+ /* must be at the end, not part of the DDP section */
+ enum ice_lbl_type type;
+};
+
+struct ice_bst_tcam_item *
+ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat);
+struct ice_bst_tcam_item *
+ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table,
+ struct ice_lbl_item *lbl_table,
+ enum ice_lbl_type type, u16 *start);
+
+/*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/
+#define ICE_PTYPE_MK_TCAM_TABLE_SIZE 1024
+#define ICE_PTYPE_MK_TCAM_KEY_SIZE 10
+
+struct ice_ptype_mk_tcam_item {
+ u16 address;
+ u16 ptype;
+ u8 key[ICE_PTYPE_MK_TCAM_KEY_SIZE];
+ u8 key_inv[ICE_PTYPE_MK_TCAM_KEY_SIZE];
+} __packed;
+
+struct ice_ptype_mk_tcam_item *
+ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table,
+ u8 *pat, int len);
+/*** ICE_SID_RXPARSER_MARKER_GRP section ***/
+#define ICE_MK_GRP_TABLE_SIZE 128
+#define ICE_MK_COUNT_PER_GRP 8
+
+/* Marker Group item */
+struct ice_mk_grp_item {
+ int idx;
+ u8 markers[ICE_MK_COUNT_PER_GRP];
+};
+
+/*** ICE_SID_RXPARSER_PROTO_GRP section ***/
+#define ICE_PROTO_COUNT_PER_GRP 8
+#define ICE_PROTO_GRP_TABLE_SIZE 192
+#define ICE_PROTO_GRP_ITEM_SIZE 22
+struct ice_proto_off {
+ bool polarity; /* true: positive, false: negative */
+ u8 proto_id;
+ u16 offset; /* 10 bit protocol offset */
+};
+
+/* Protocol Group item */
+struct ice_proto_grp_item {
+ u16 idx;
+ struct ice_proto_off po[ICE_PROTO_COUNT_PER_GRP];
+};
+
+/*** ICE_SID_RXPARSER_FLAG_REDIR section ***/
+#define ICE_FLG_RD_TABLE_SIZE 64
+#define ICE_FLG_RDT_SIZE 64
+
+/* Flags Redirection item */
+struct ice_flg_rd_item {
+ u16 idx;
+ bool expose;
+ u8 intr_flg_id; /* Internal Flag ID */
+};
+
+u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg);
+
+/*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL,
+ * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS
+ * sections ***/
+#define ICE_XLT_KB_FLAG0_14_CNT 15
+#define ICE_XLT_KB_TBL_CNT 8
+#define ICE_XLT_KB_TBL_ENTRY_SIZE 24
+
+struct ice_xlt_kb_entry {
+ u8 xlt1_ad_sel;
+ u8 xlt2_ad_sel;
+ u16 flg0_14_sel[ICE_XLT_KB_FLAG0_14_CNT];
+ u8 xlt1_md_sel;
+ u8 xlt2_md_sel;
+};
+
+/* XLT Key Builder */
+struct ice_xlt_kb {
+ u8 xlt1_pm; /* XLT1 Partition Mode */
+ u8 xlt2_pm; /* XLT2 Partition Mode */
+ u8 prof_id_pm; /* Profile ID Partition Mode */
+ u64 flag15;
+
+ struct ice_xlt_kb_entry entries[ICE_XLT_KB_TBL_CNT];
+};
+
+u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag);
+
+/*** Parser API ***/
+#define ICE_GPR_HV_IDX 64
+#define ICE_GPR_HV_SIZE 32
+#define ICE_GPR_ERR_IDX 84
+#define ICE_GPR_FLG_IDX 104
+#define ICE_GPR_FLG_SIZE 16
+
+#define ICE_GPR_TSR_IDX 108 /* TSR: TCAM Search Register */
+#define ICE_GPR_NN_IDX 109 /* NN: Next Parsing Cycle Node ID */
+#define ICE_GPR_HO_IDX 110 /* HO: Next Parsing Cycle hdr Offset */
+#define ICE_GPR_NP_IDX 111 /* NP: Next Parsing Cycle */
+
+#define ICE_PARSER_MAX_PKT_LEN 504
+#define ICE_PARSER_PKT_REV 32
+#define ICE_PARSER_GPR_NUM 128
+#define ICE_PARSER_FLG_NUM 64
+#define ICE_PARSER_ERR_NUM 16
+#define ICE_BST_KEY_SIZE 10
+#define ICE_MARKER_ID_SIZE 9
+#define ICE_MARKER_MAX_SIZE \
+ (ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1)
+#define ICE_MARKER_ID_NUM 8
+#define ICE_PO_PAIR_SIZE 256
+
+struct ice_gpr_pu {
+ /* array of flags to indicate if GRP needs to be updated */
+ bool gpr_val_upd[ICE_PARSER_GPR_NUM];
+ u16 gpr_val[ICE_PARSER_GPR_NUM];
+ u64 flg_msk;
+ u64 flg_val;
+ u16 err_msk;
+ u16 err_val;
+};
+
+enum ice_pg_prio {
+ ICE_PG_P0 = 0,
+ ICE_PG_P1 = 1,
+ ICE_PG_P2 = 2,
+ ICE_PG_P3 = 3,
+};
+
+struct ice_parser_rt {
+ struct ice_parser *psr;
+ u16 gpr[ICE_PARSER_GPR_NUM];
+ u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV];
+ u16 pkt_len;
+ u16 po;
+ u8 bst_key[ICE_BST_KEY_SIZE];
+ struct ice_pg_cam_key pg_key;
+ struct ice_alu *alu0;
+ struct ice_alu *alu1;
+ struct ice_alu *alu2;
+ struct ice_pg_cam_action *action;
+ u8 pg_prio;
+ struct ice_gpr_pu pu;
+ u8 markers[ICE_MARKER_ID_SIZE];
+ bool protocols[ICE_PO_PAIR_SIZE];
+ u16 offsets[ICE_PO_PAIR_SIZE];
+};
+
+struct ice_parser_proto_off {
+ u8 proto_id; /* hardware protocol ID */
+ u16 offset; /* offset from the start of the protocol header */
+};
+
+#define ICE_PARSER_PROTO_OFF_PAIR_SIZE 16
+#define ICE_PARSER_FLAG_PSR_SIZE 8
+#define ICE_PARSER_FV_SIZE 48
+#define ICE_PARSER_FV_MAX 24
+#define ICE_BT_TUN_PORT_OFF_H 16
+#define ICE_BT_TUN_PORT_OFF_L 15
+#define ICE_BT_VM_OFF 0
+#define ICE_UDP_PORT_OFF_H 1
+#define ICE_UDP_PORT_OFF_L 0
+
+struct ice_parser_result {
+ u16 ptype; /* 16 bits hardware PTYPE */
+ /* array of protocol and header offset pairs */
+ struct ice_parser_proto_off po[ICE_PARSER_PROTO_OFF_PAIR_SIZE];
+ int po_num; /* # of protocol-offset pairs must <= 16 */
+ u64 flags_psr; /* parser flags */
+ u64 flags_pkt; /* packet flags */
+ u16 flags_sw; /* key builder flags for SW */
+ u16 flags_acl; /* key builder flags for ACL */
+ u16 flags_fd; /* key builder flags for FD */
+ u16 flags_rss; /* key builder flags for RSS */
+};
+
+void ice_parser_rt_reset(struct ice_parser_rt *rt);
+void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf,
+ int pkt_len);
+int ice_parser_rt_execute(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt);
+
+struct ice_parser {
+ struct ice_hw *hw; /* pointer to the hardware structure */
+
+ struct ice_imem_item *imem_table;
+ struct ice_metainit_item *mi_table;
+
+ struct ice_pg_cam_item *pg_cam_table;
+ struct ice_pg_cam_item *pg_sp_cam_table;
+ struct ice_pg_nm_cam_item *pg_nm_cam_table;
+ struct ice_pg_nm_cam_item *pg_nm_sp_cam_table;
+
+ struct ice_bst_tcam_item *bst_tcam_table;
+ struct ice_lbl_item *bst_lbl_table;
+ struct ice_ptype_mk_tcam_item *ptype_mk_tcam_table;
+ struct ice_mk_grp_item *mk_grp_table;
+ struct ice_proto_grp_item *proto_grp_table;
+ struct ice_flg_rd_item *flg_rd_table;
+
+ struct ice_xlt_kb *xlt_kb_sw;
+ struct ice_xlt_kb *xlt_kb_acl;
+ struct ice_xlt_kb *xlt_kb_fd;
+ struct ice_xlt_kb *xlt_kb_rss;
+
+ struct ice_parser_rt rt;
+};
+
+struct ice_parser *ice_parser_create(struct ice_hw *hw);
+void ice_parser_destroy(struct ice_parser *psr);
+void ice_parser_dvm_set(struct ice_parser *psr, bool on);
+int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
+int ice_parser_geneve_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
+int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
+int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf,
+ int pkt_len, struct ice_parser_result *rslt);
+void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt);
+
+struct ice_parser_fv {
+ u8 proto_id; /* hardware protocol ID */
+ u16 offset; /* offset from the start of the protocol header */
+ u16 spec; /* pattern to match */
+ u16 msk; /* pattern mask */
+};
+
+struct ice_parser_profile {
+ /* array of field vectors */
+ struct ice_parser_fv fv[ICE_PARSER_FV_SIZE];
+ int fv_num; /* # of field vectors must <= 48 */
+ u16 flags; /* key builder flags */
+ u16 flags_msk; /* key builder flag mask */
+
+ DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); /* PTYPE bitmap */
+};
+
+int ice_parser_profile_init(struct ice_parser_result *rslt,
+ const u8 *pkt_buf, const u8 *msk_buf,
+ int buf_len, enum ice_block blk,
+ struct ice_parser_profile *prof);
+void ice_parser_profile_dump(struct ice_hw *hw,
+ struct ice_parser_profile *prof);
+#endif /* _ICE_PARSER_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
new file mode 100644
index 000000000000..dedf5e854e4b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
@@ -0,0 +1,861 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "ice_common.h"
+
+static void ice_rt_tsr_set(struct ice_parser_rt *rt, u16 tsr)
+{
+ rt->gpr[ICE_GPR_TSR_IDX] = tsr;
+}
+
+static void ice_rt_ho_set(struct ice_parser_rt *rt, u16 ho)
+{
+ rt->gpr[ICE_GPR_HO_IDX] = ho;
+ memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE);
+}
+
+static void ice_rt_np_set(struct ice_parser_rt *rt, u16 pc)
+{
+ rt->gpr[ICE_GPR_NP_IDX] = pc;
+}
+
+static void ice_rt_nn_set(struct ice_parser_rt *rt, u16 node)
+{
+ rt->gpr[ICE_GPR_NN_IDX] = node;
+}
+
+static void
+ice_rt_flag_set(struct ice_parser_rt *rt, unsigned int idx, bool set)
+{
+ struct ice_hw *hw = rt->psr->hw;
+ unsigned int word, id;
+
+ word = idx / ICE_GPR_FLG_SIZE;
+ id = idx % ICE_GPR_FLG_SIZE;
+
+ if (set) {
+ rt->gpr[ICE_GPR_FLG_IDX + word] |= (u16)BIT(id);
+ ice_debug(hw, ICE_DBG_PARSER, "Set parser flag %u\n", idx);
+ } else {
+ rt->gpr[ICE_GPR_FLG_IDX + word] &= ~(u16)BIT(id);
+ ice_debug(hw, ICE_DBG_PARSER, "Clear parser flag %u\n", idx);
+ }
+}
+
+static void ice_rt_gpr_set(struct ice_parser_rt *rt, int idx, u16 val)
+{
+ struct ice_hw *hw = rt->psr->hw;
+
+ if (idx == ICE_GPR_HO_IDX)
+ ice_rt_ho_set(rt, val);
+ else
+ rt->gpr[idx] = val;
+
+ ice_debug(hw, ICE_DBG_PARSER, "Set GPR %d value %d\n", idx, val);
+}
+
+static void ice_rt_err_set(struct ice_parser_rt *rt, unsigned int idx, bool set)
+{
+ struct ice_hw *hw = rt->psr->hw;
+
+ if (set) {
+ rt->gpr[ICE_GPR_ERR_IDX] |= (u16)BIT(idx);
+ ice_debug(hw, ICE_DBG_PARSER, "Set parser error %u\n", idx);
+ } else {
+ rt->gpr[ICE_GPR_ERR_IDX] &= ~(u16)BIT(idx);
+ ice_debug(hw, ICE_DBG_PARSER, "Reset parser error %u\n", idx);
+ }
+}
+
+/**
+ * ice_parser_rt_reset - reset the parser runtime
+ * @rt: pointer to the parser runtime
+ */
+void ice_parser_rt_reset(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+ struct ice_metainit_item *mi;
+ unsigned int i;
+
+ mi = &psr->mi_table[0];
+
+ memset(rt, 0, sizeof(*rt));
+ rt->psr = psr;
+
+ ice_rt_tsr_set(rt, mi->tsr);
+ ice_rt_ho_set(rt, mi->ho);
+ ice_rt_np_set(rt, mi->pc);
+ ice_rt_nn_set(rt, mi->pg_rn);
+
+ for (i = 0; i < ICE_PARSER_FLG_NUM; i++) {
+ if (mi->flags & BIT(i))
+ ice_rt_flag_set(rt, i, true);
+ }
+}
+
+/**
+ * ice_parser_rt_pktbuf_set - set a packet into parser runtime
+ * @rt: pointer to the parser runtime
+ * @pkt_buf: buffer with packet data
+ * @pkt_len: packet buffer length
+ */
+void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf,
+ int pkt_len)
+{
+ int len = min(ICE_PARSER_MAX_PKT_LEN, pkt_len);
+ u16 ho = rt->gpr[ICE_GPR_HO_IDX];
+
+ memcpy(rt->pkt_buf, pkt_buf, len);
+ rt->pkt_len = pkt_len;
+
+ memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE);
+}
+
+static void ice_bst_key_init(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ u8 tsr = (u8)rt->gpr[ICE_GPR_TSR_IDX];
+ u16 ho = rt->gpr[ICE_GPR_HO_IDX];
+ u8 *key = rt->bst_key;
+ int idd, i;
+
+ idd = ICE_BST_TCAM_KEY_SIZE - 1;
+ if (imem->b_kb.tsr_ctrl)
+ key[idd] = tsr;
+ else
+ key[idd] = imem->b_kb.prio;
+
+ idd = ICE_BST_KEY_TCAM_SIZE - 1;
+ for (i = idd; i >= 0; i--) {
+ int j;
+
+ j = ho + idd - i;
+ if (j < ICE_PARSER_MAX_PKT_LEN)
+ key[i] = rt->pkt_buf[ho + idd - i];
+ else
+ key[i] = 0;
+ }
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n");
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ key[0], key[1], key[2], key[3], key[4],
+ key[5], key[6], key[7], key[8], key[9]);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n");
+}
+
+static u16 ice_bit_rev_u16(u16 v, int len)
+{
+ return bitrev16(v) >> (BITS_PER_TYPE(v) - len);
+}
+
+static u32 ice_bit_rev_u32(u32 v, int len)
+{
+ return bitrev32(v) >> (BITS_PER_TYPE(v) - len);
+}
+
+static u32 ice_hv_bit_sel(struct ice_parser_rt *rt, int start, int len)
+{
+ int offset;
+ u32 buf[2];
+ u64 val;
+
+ offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16));
+
+ memcpy(buf, &rt->gpr[offset], sizeof(buf));
+
+ buf[0] = bitrev8x4(buf[0]);
+ buf[1] = bitrev8x4(buf[1]);
+
+ val = *(u64 *)buf;
+ val >>= start % BITS_PER_TYPE(u16);
+
+ return ice_bit_rev_u32(val, len);
+}
+
+static u32 ice_pk_build(struct ice_parser_rt *rt,
+ struct ice_np_keybuilder *kb)
+{
+ if (kb->opc == ICE_NPKB_OPC_EXTRACT)
+ return ice_hv_bit_sel(rt, kb->start_reg0, kb->len_reg1);
+ else if (kb->opc == ICE_NPKB_OPC_BUILD)
+ return rt->gpr[kb->start_reg0] |
+ ((u32)rt->gpr[kb->len_reg1] << BITS_PER_TYPE(u16));
+ else if (kb->opc == ICE_NPKB_OPC_BYPASS)
+ return 0;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported OP Code %u\n",
+ kb->opc);
+ return U32_MAX;
+}
+
+static bool ice_flag_get(struct ice_parser_rt *rt, unsigned int index)
+{
+ int word = index / ICE_GPR_FLG_SIZE;
+ int id = index % ICE_GPR_FLG_SIZE;
+
+ return !!(rt->gpr[ICE_GPR_FLG_IDX + word] & (u16)BIT(id));
+}
+
+static int ice_imem_pgk_init(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ memset(&rt->pg_key, 0, sizeof(rt->pg_key));
+ rt->pg_key.next_proto = ice_pk_build(rt, &imem->np_kb);
+ if (rt->pg_key.next_proto == U32_MAX)
+ return -EINVAL;
+
+ if (imem->pg_kb.flag0_ena)
+ rt->pg_key.flag0 = ice_flag_get(rt, imem->pg_kb.flag0_idx);
+ if (imem->pg_kb.flag1_ena)
+ rt->pg_key.flag1 = ice_flag_get(rt, imem->pg_kb.flag1_idx);
+ if (imem->pg_kb.flag2_ena)
+ rt->pg_key.flag2 = ice_flag_get(rt, imem->pg_kb.flag2_idx);
+ if (imem->pg_kb.flag3_ena)
+ rt->pg_key.flag3 = ice_flag_get(rt, imem->pg_kb.flag3_idx);
+
+ rt->pg_key.alu_reg = rt->gpr[imem->pg_kb.alu_reg_idx];
+ rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX];
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n",
+ rt->pg_key.node_id,
+ rt->pg_key.flag0,
+ rt->pg_key.flag1,
+ rt->pg_key.flag2,
+ rt->pg_key.flag3,
+ rt->pg_key.boost_idx,
+ rt->pg_key.alu_reg,
+ rt->pg_key.next_proto);
+
+ return 0;
+}
+
+static void ice_imem_alu0_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->alu0 = &imem->alu0;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from imem pc %d\n",
+ imem->idx);
+}
+
+static void ice_imem_alu1_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->alu1 = &imem->alu1;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from imem pc %d\n",
+ imem->idx);
+}
+
+static void ice_imem_alu2_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->alu2 = &imem->alu2;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from imem pc %d\n",
+ imem->idx);
+}
+
+static void ice_imem_pgp_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->pg_prio = imem->pg_prio;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from imem pc %d\n",
+ rt->pg_prio, imem->idx);
+}
+
+static int ice_bst_pgk_init(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ memset(&rt->pg_key, 0, sizeof(rt->pg_key));
+ rt->pg_key.boost_idx = bst->hit_idx_grp;
+ rt->pg_key.next_proto = ice_pk_build(rt, &bst->np_kb);
+ if (rt->pg_key.next_proto == U32_MAX)
+ return -EINVAL;
+
+ if (bst->pg_kb.flag0_ena)
+ rt->pg_key.flag0 = ice_flag_get(rt, bst->pg_kb.flag0_idx);
+ if (bst->pg_kb.flag1_ena)
+ rt->pg_key.flag1 = ice_flag_get(rt, bst->pg_kb.flag1_idx);
+ if (bst->pg_kb.flag2_ena)
+ rt->pg_key.flag2 = ice_flag_get(rt, bst->pg_kb.flag2_idx);
+ if (bst->pg_kb.flag3_ena)
+ rt->pg_key.flag3 = ice_flag_get(rt, bst->pg_kb.flag3_idx);
+
+ rt->pg_key.alu_reg = rt->gpr[bst->pg_kb.alu_reg_idx];
+ rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX];
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n",
+ rt->pg_key.node_id,
+ rt->pg_key.flag0,
+ rt->pg_key.flag1,
+ rt->pg_key.flag2,
+ rt->pg_key.flag3,
+ rt->pg_key.boost_idx,
+ rt->pg_key.alu_reg,
+ rt->pg_key.next_proto);
+
+ return 0;
+}
+
+static void ice_bst_alu0_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->alu0 = &bst->alu0;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from boost address %d\n",
+ bst->addr);
+}
+
+static void ice_bst_alu1_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->alu1 = &bst->alu1;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from boost address %d\n",
+ bst->addr);
+}
+
+static void ice_bst_alu2_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->alu2 = &bst->alu2;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from boost address %d\n",
+ bst->addr);
+}
+
+static void ice_bst_pgp_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->pg_prio = bst->pg_prio;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from boost address %d\n",
+ rt->pg_prio, bst->addr);
+}
+
+static struct ice_pg_cam_item *ice_rt_pg_cam_match(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+ struct ice_pg_cam_item *item;
+
+ item = ice_pg_cam_match(psr->pg_cam_table, ICE_PG_CAM_TABLE_SIZE,
+ &rt->pg_key);
+ if (!item)
+ item = ice_pg_cam_match(psr->pg_sp_cam_table,
+ ICE_PG_SP_CAM_TABLE_SIZE, &rt->pg_key);
+ return item;
+}
+
+static
+struct ice_pg_nm_cam_item *ice_rt_pg_nm_cam_match(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+ struct ice_pg_nm_cam_item *item;
+
+ item = ice_pg_nm_cam_match(psr->pg_nm_cam_table,
+ ICE_PG_NM_CAM_TABLE_SIZE, &rt->pg_key);
+
+ if (!item)
+ item = ice_pg_nm_cam_match(psr->pg_nm_sp_cam_table,
+ ICE_PG_NM_SP_CAM_TABLE_SIZE,
+ &rt->pg_key);
+ return item;
+}
+
+static void ice_gpr_add(struct ice_parser_rt *rt, int idx, u16 val)
+{
+ rt->pu.gpr_val_upd[idx] = true;
+ rt->pu.gpr_val[idx] = val;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for register %d value %d\n",
+ idx, val);
+}
+
+static void ice_pg_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action ...\n");
+
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, rt->action->next_pc);
+ ice_gpr_add(rt, ICE_GPR_NN_IDX, rt->action->next_node);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action done.\n");
+}
+
+static void ice_flg_add(struct ice_parser_rt *rt, int idx, bool val)
+{
+ rt->pu.flg_msk |= BIT_ULL(idx);
+ if (val)
+ rt->pu.flg_val |= BIT_ULL(idx);
+ else
+ rt->pu.flg_val &= ~BIT_ULL(idx);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for flag %d value %d\n",
+ idx, val);
+}
+
+static void ice_flg_update(struct ice_parser_rt *rt, struct ice_alu *alu)
+{
+ u32 hv_bit_sel;
+ int i;
+
+ if (!alu->dedicate_flags_ena)
+ return;
+
+ if (alu->flags_extr_imm) {
+ for (i = 0; i < alu->dst_len; i++)
+ ice_flg_add(rt, alu->dst_start + i,
+ !!(alu->flags_start_imm & BIT(i)));
+ } else {
+ for (i = 0; i < alu->dst_len; i++) {
+ hv_bit_sel = ice_hv_bit_sel(rt,
+ alu->flags_start_imm + i,
+ 1);
+ ice_flg_add(rt, alu->dst_start + i, !!hv_bit_sel);
+ }
+ }
+}
+
+static void ice_po_update(struct ice_parser_rt *rt, struct ice_alu *alu)
+{
+ if (alu->proto_offset_opc == ICE_PO_OFF_HDR_ADD)
+ rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] + alu->proto_offset);
+ else if (alu->proto_offset_opc == ICE_PO_OFF_HDR_SUB)
+ rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] - alu->proto_offset);
+ else if (alu->proto_offset_opc == ICE_PO_OFF_REMAIN)
+ rt->po = rt->gpr[ICE_GPR_HO_IDX];
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Update Protocol Offset = %d\n",
+ rt->po);
+}
+
+static u16 ice_reg_bit_sel(struct ice_parser_rt *rt, int reg_idx,
+ int start, int len)
+{
+ int offset;
+ u32 val;
+
+ offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16));
+
+ memcpy(&val, &rt->gpr[offset], sizeof(val));
+
+ val = bitrev8x4(val);
+ val >>= start % BITS_PER_TYPE(u16);
+
+ return ice_bit_rev_u16(val, len);
+}
+
+static void ice_err_add(struct ice_parser_rt *rt, int idx, bool val)
+{
+ rt->pu.err_msk |= (u16)BIT(idx);
+ if (val)
+ rt->pu.flg_val |= (u64)BIT_ULL(idx);
+ else
+ rt->pu.flg_val &= ~(u64)BIT_ULL(idx);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for error %d value %d\n",
+ idx, val);
+}
+
+static void ice_dst_reg_bit_set(struct ice_parser_rt *rt, struct ice_alu *alu,
+ bool val)
+{
+ u16 flg_idx;
+
+ if (alu->dedicate_flags_ena) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "DedicatedFlagsEnable should not be enabled in opcode %d\n",
+ alu->opc);
+ return;
+ }
+
+ if (alu->dst_reg_id == ICE_GPR_ERR_IDX) {
+ if (alu->dst_start >= ICE_PARSER_ERR_NUM) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid error %d\n",
+ alu->dst_start);
+ return;
+ }
+ ice_err_add(rt, alu->dst_start, val);
+ } else if (alu->dst_reg_id >= ICE_GPR_FLG_IDX) {
+ flg_idx = (u16)(((alu->dst_reg_id - ICE_GPR_FLG_IDX) << 4) +
+ alu->dst_start);
+
+ if (flg_idx >= ICE_PARSER_FLG_NUM) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid flag %d\n",
+ flg_idx);
+ return;
+ }
+ ice_flg_add(rt, flg_idx, val);
+ } else {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unexpected Dest Register Bit set, RegisterID %d Start %d\n",
+ alu->dst_reg_id, alu->dst_start);
+ }
+}
+
+static void ice_alu_exe(struct ice_parser_rt *rt, struct ice_alu *alu)
+{
+ u16 dst, src, shift, imm;
+
+ if (alu->shift_xlate_sel) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "shift_xlate_sel != 0 is not expected\n");
+ return;
+ }
+
+ ice_po_update(rt, alu);
+ ice_flg_update(rt, alu);
+
+ dst = rt->gpr[alu->dst_reg_id];
+ src = ice_reg_bit_sel(rt, alu->src_reg_id,
+ alu->src_start, alu->src_len);
+ shift = alu->shift_xlate_key;
+ imm = alu->imm;
+
+ switch (alu->opc) {
+ case ICE_ALU_PARK:
+ break;
+ case ICE_ALU_MOV_ADD:
+ dst = (src << shift) + imm;
+ ice_gpr_add(rt, alu->dst_reg_id, dst);
+ break;
+ case ICE_ALU_ADD:
+ dst += (src << shift) + imm;
+ ice_gpr_add(rt, alu->dst_reg_id, dst);
+ break;
+ case ICE_ALU_ORLT:
+ if (src < imm)
+ ice_dst_reg_bit_set(rt, alu, true);
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
+ break;
+ case ICE_ALU_OREQ:
+ if (src == imm)
+ ice_dst_reg_bit_set(rt, alu, true);
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
+ break;
+ case ICE_ALU_SETEQ:
+ ice_dst_reg_bit_set(rt, alu, src == imm);
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
+ break;
+ case ICE_ALU_MOV_XOR:
+ dst = (src << shift) ^ imm;
+ ice_gpr_add(rt, alu->dst_reg_id, dst);
+ break;
+ default:
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported ALU instruction %d\n",
+ alu->opc);
+ break;
+ }
+}
+
+static void ice_alu0_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 ...\n");
+ ice_alu_exe(rt, rt->alu0);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 done.\n");
+}
+
+static void ice_alu1_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 ...\n");
+ ice_alu_exe(rt, rt->alu1);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 done.\n");
+}
+
+static void ice_alu2_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 ...\n");
+ ice_alu_exe(rt, rt->alu2);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 done.\n");
+}
+
+static void ice_pu_exe(struct ice_parser_rt *rt)
+{
+ struct ice_gpr_pu *pu = &rt->pu;
+ unsigned int i;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers ...\n");
+
+ for (i = 0; i < ICE_PARSER_GPR_NUM; i++) {
+ if (pu->gpr_val_upd[i])
+ ice_rt_gpr_set(rt, i, pu->gpr_val[i]);
+ }
+
+ for (i = 0; i < ICE_PARSER_FLG_NUM; i++) {
+ if (pu->flg_msk & BIT(i))
+ ice_rt_flag_set(rt, i, pu->flg_val & BIT(i));
+ }
+
+ for (i = 0; i < ICE_PARSER_ERR_NUM; i++) {
+ if (pu->err_msk & BIT(i))
+ ice_rt_err_set(rt, i, pu->err_val & BIT(i));
+ }
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers done.\n");
+}
+
+static void ice_alu_pg_exe(struct ice_parser_rt *rt)
+{
+ memset(&rt->pu, 0, sizeof(rt->pu));
+
+ switch (rt->pg_prio) {
+ case (ICE_PG_P0):
+ ice_pg_exe(rt);
+ ice_alu0_exe(rt);
+ ice_alu1_exe(rt);
+ ice_alu2_exe(rt);
+ break;
+ case (ICE_PG_P1):
+ ice_alu0_exe(rt);
+ ice_pg_exe(rt);
+ ice_alu1_exe(rt);
+ ice_alu2_exe(rt);
+ break;
+ case (ICE_PG_P2):
+ ice_alu0_exe(rt);
+ ice_alu1_exe(rt);
+ ice_pg_exe(rt);
+ ice_alu2_exe(rt);
+ break;
+ case (ICE_PG_P3):
+ ice_alu0_exe(rt);
+ ice_alu1_exe(rt);
+ ice_alu2_exe(rt);
+ ice_pg_exe(rt);
+ break;
+ }
+
+ ice_pu_exe(rt);
+
+ if (rt->action->ho_inc == 0)
+ return;
+
+ if (rt->action->ho_polarity)
+ ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] + rt->action->ho_inc);
+ else
+ ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] - rt->action->ho_inc);
+}
+
+static void ice_proto_off_update(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+
+ if (rt->action->is_pg) {
+ struct ice_proto_grp_item *proto_grp =
+ &psr->proto_grp_table[rt->action->proto_id];
+ u16 po;
+ int i;
+
+ for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) {
+ struct ice_proto_off *entry = &proto_grp->po[i];
+
+ if (entry->proto_id == U8_MAX)
+ break;
+
+ if (!entry->polarity)
+ po = rt->po + entry->offset;
+ else
+ po = rt->po - entry->offset;
+
+ rt->protocols[entry->proto_id] = true;
+ rt->offsets[entry->proto_id] = po;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n",
+ entry->proto_id, po);
+ }
+ } else {
+ rt->protocols[rt->action->proto_id] = true;
+ rt->offsets[rt->action->proto_id] = rt->po;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n",
+ rt->action->proto_id, rt->po);
+ }
+}
+
+static void ice_marker_set(struct ice_parser_rt *rt, int idx)
+{
+ unsigned int byte = idx / BITS_PER_BYTE;
+ unsigned int bit = idx % BITS_PER_BYTE;
+
+ rt->markers[byte] |= (u8)BIT(bit);
+}
+
+static void ice_marker_update(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+
+ if (rt->action->is_mg) {
+ struct ice_mk_grp_item *mk_grp =
+ &psr->mk_grp_table[rt->action->marker_id];
+ int i;
+
+ for (i = 0; i < ICE_MARKER_ID_NUM; i++) {
+ u8 marker = mk_grp->markers[i];
+
+ if (marker == ICE_MARKER_MAX_SIZE)
+ break;
+
+ ice_marker_set(rt, marker);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n",
+ marker);
+ }
+ } else {
+ if (rt->action->marker_id != ICE_MARKER_MAX_SIZE)
+ ice_marker_set(rt, rt->action->marker_id);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n",
+ rt->action->marker_id);
+ }
+}
+
+static u16 ice_ptype_resolve(struct ice_parser_rt *rt)
+{
+ struct ice_ptype_mk_tcam_item *item;
+ struct ice_parser *psr = rt->psr;
+
+ item = ice_ptype_mk_tcam_match(psr->ptype_mk_tcam_table,
+ rt->markers, ICE_MARKER_ID_SIZE);
+ if (item)
+ return item->ptype;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Could not resolve PTYPE\n");
+ return U16_MAX;
+}
+
+static void ice_proto_off_resolve(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt)
+{
+ int i;
+
+ for (i = 0; i < ICE_PO_PAIR_SIZE - 1; i++) {
+ if (rt->protocols[i]) {
+ rslt->po[rslt->po_num].proto_id = (u8)i;
+ rslt->po[rslt->po_num].offset = rt->offsets[i];
+ rslt->po_num++;
+ }
+ }
+}
+
+static void ice_result_resolve(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt)
+{
+ struct ice_parser *psr = rt->psr;
+
+ memset(rslt, 0, sizeof(*rslt));
+
+ memcpy(&rslt->flags_psr, &rt->gpr[ICE_GPR_FLG_IDX],
+ ICE_PARSER_FLAG_PSR_SIZE);
+ rslt->flags_pkt = ice_flg_redirect(psr->flg_rd_table, rslt->flags_psr);
+ rslt->flags_sw = ice_xlt_kb_flag_get(psr->xlt_kb_sw, rslt->flags_pkt);
+ rslt->flags_fd = ice_xlt_kb_flag_get(psr->xlt_kb_fd, rslt->flags_pkt);
+ rslt->flags_rss = ice_xlt_kb_flag_get(psr->xlt_kb_rss, rslt->flags_pkt);
+
+ ice_proto_off_resolve(rt, rslt);
+ rslt->ptype = ice_ptype_resolve(rt);
+}
+
+/**
+ * ice_parser_rt_execute - parser execution routine
+ * @rt: pointer to the parser runtime
+ * @rslt: input/output parameter to save parser result
+ *
+ * Return: 0 on success or errno.
+ */
+int ice_parser_rt_execute(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt)
+{
+ struct ice_pg_nm_cam_item *pg_nm_cam;
+ struct ice_parser *psr = rt->psr;
+ struct ice_pg_cam_item *pg_cam;
+ int status = 0;
+ u16 node;
+ u16 pc;
+
+ node = rt->gpr[ICE_GPR_NN_IDX];
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Start with Node: %u\n", node);
+
+ while (true) {
+ struct ice_bst_tcam_item *bst;
+ struct ice_imem_item *imem;
+
+ pc = rt->gpr[ICE_GPR_NP_IDX];
+ imem = &psr->imem_table[pc];
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load imem at pc: %u\n",
+ pc);
+
+ ice_bst_key_init(rt, imem);
+ bst = ice_bst_tcam_match(psr->bst_tcam_table, rt->bst_key);
+ if (!bst) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "No Boost TCAM Match\n");
+ status = ice_imem_pgk_init(rt, imem);
+ if (status)
+ break;
+ ice_imem_alu0_set(rt, imem);
+ ice_imem_alu1_set(rt, imem);
+ ice_imem_alu2_set(rt, imem);
+ ice_imem_pgp_set(rt, imem);
+ } else {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Boost TCAM Match address: %u\n",
+ bst->addr);
+ if (imem->b_m.pg) {
+ status = ice_bst_pgk_init(rt, bst);
+ if (status)
+ break;
+ ice_bst_pgp_set(rt, bst);
+ } else {
+ status = ice_imem_pgk_init(rt, imem);
+ if (status)
+ break;
+ ice_imem_pgp_set(rt, imem);
+ }
+
+ if (imem->b_m.alu0)
+ ice_bst_alu0_set(rt, bst);
+ else
+ ice_imem_alu0_set(rt, imem);
+
+ if (imem->b_m.alu1)
+ ice_bst_alu1_set(rt, bst);
+ else
+ ice_imem_alu1_set(rt, imem);
+
+ if (imem->b_m.alu2)
+ ice_bst_alu2_set(rt, bst);
+ else
+ ice_imem_alu2_set(rt, imem);
+ }
+
+ rt->action = NULL;
+ pg_cam = ice_rt_pg_cam_match(rt);
+ if (!pg_cam) {
+ pg_nm_cam = ice_rt_pg_nm_cam_match(rt);
+ if (pg_nm_cam) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph Nomatch CAM Address %u\n",
+ pg_nm_cam->idx);
+ rt->action = &pg_nm_cam->action;
+ }
+ } else {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph CAM Address %u\n",
+ pg_cam->idx);
+ rt->action = &pg_cam->action;
+ }
+
+ if (!rt->action) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Failed to match ParseGraph CAM, stop parsing.\n");
+ status = -EINVAL;
+ break;
+ }
+
+ ice_alu_pg_exe(rt);
+ ice_marker_update(rt);
+ ice_proto_off_update(rt);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Go to node %u\n",
+ rt->action->next_node);
+
+ if (rt->action->is_last_round) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Last Round in ParseGraph Action, stop parsing.\n");
+ break;
+ }
+
+ if (rt->gpr[ICE_GPR_HO_IDX] >= rt->pkt_len) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Header Offset (%u) is larger than packet len (%u), stop parsing\n",
+ rt->gpr[ICE_GPR_HO_IDX], rt->pkt_len);
+ break;
+ }
+ }
+
+ ice_result_resolve(rt, rslt);
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index bdda3401e343..970a99a52bf1 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -59,12 +59,13 @@ static void
ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_repr *repr = np->repr;
struct ice_eth_stats *eth_stats;
struct ice_vsi *vsi;
- if (ice_is_vf_disabled(np->repr->vf))
+ if (repr->ops.ready(repr))
return;
- vsi = np->repr->src_vsi;
+ vsi = repr->src_vsi;
ice_update_vsi_stats(vsi);
eth_stats = &vsi->eth_stats;
@@ -93,7 +94,7 @@ struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
}
/**
- * ice_repr_open - Enable port representor's network interface
+ * ice_repr_vf_open - Enable port representor's network interface
* @netdev: network interface device structure
*
* The open entry point is called when a port representor's network
@@ -102,7 +103,7 @@ struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
*
* Returns 0 on success
*/
-static int ice_repr_open(struct net_device *netdev)
+static int ice_repr_vf_open(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
@@ -118,8 +119,16 @@ static int ice_repr_open(struct net_device *netdev)
return 0;
}
+static int ice_repr_sf_open(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+}
+
/**
- * ice_repr_stop - Disable port representor's network interface
+ * ice_repr_vf_stop - Disable port representor's network interface
* @netdev: network interface device structure
*
* The stop entry point is called when a port representor's network
@@ -128,7 +137,7 @@ static int ice_repr_open(struct net_device *netdev)
*
* Returns 0 on success
*/
-static int ice_repr_stop(struct net_device *netdev)
+static int ice_repr_vf_stop(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
@@ -144,6 +153,14 @@ static int ice_repr_stop(struct net_device *netdev)
return 0;
}
+static int ice_repr_sf_stop(struct net_device *netdev)
+{
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
/**
* ice_repr_sp_stats64 - get slow path stats for port representor
* @dev: network interface device structure
@@ -245,10 +262,20 @@ ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
}
-static const struct net_device_ops ice_repr_netdev_ops = {
+static const struct net_device_ops ice_repr_vf_netdev_ops = {
.ndo_get_stats64 = ice_repr_get_stats64,
- .ndo_open = ice_repr_open,
- .ndo_stop = ice_repr_stop,
+ .ndo_open = ice_repr_vf_open,
+ .ndo_stop = ice_repr_vf_stop,
+ .ndo_start_xmit = ice_eswitch_port_start_xmit,
+ .ndo_setup_tc = ice_repr_setup_tc,
+ .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
+ .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
+};
+
+static const struct net_device_ops ice_repr_sf_netdev_ops = {
+ .ndo_get_stats64 = ice_repr_get_stats64,
+ .ndo_open = ice_repr_sf_open,
+ .ndo_stop = ice_repr_sf_stop,
.ndo_start_xmit = ice_eswitch_port_start_xmit,
.ndo_setup_tc = ice_repr_setup_tc,
.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
@@ -261,18 +288,20 @@ static const struct net_device_ops ice_repr_netdev_ops = {
*/
bool ice_is_port_repr_netdev(const struct net_device *netdev)
{
- return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
+ return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops ||
+ netdev->netdev_ops == &ice_repr_sf_netdev_ops);
}
/**
* ice_repr_reg_netdev - register port representor netdev
* @netdev: pointer to port representor netdev
+ * @ops: new ops for netdev
*/
static int
-ice_repr_reg_netdev(struct net_device *netdev)
+ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops)
{
eth_hw_addr_random(netdev);
- netdev->netdev_ops = &ice_repr_netdev_ops;
+ netdev->netdev_ops = ops;
ice_set_ethtool_repr_ops(netdev);
netdev->hw_features |= NETIF_F_HW_TC;
@@ -283,57 +312,56 @@ ice_repr_reg_netdev(struct net_device *netdev)
return register_netdev(netdev);
}
-static void ice_repr_remove_node(struct devlink_port *devlink_port)
+static int ice_repr_ready_vf(struct ice_repr *repr)
+{
+ return !ice_check_vf_ready_for_cfg(repr->vf);
+}
+
+static int ice_repr_ready_sf(struct ice_repr *repr)
{
- devl_rate_leaf_destroy(devlink_port);
+ return !repr->sf->active;
}
/**
- * ice_repr_rem - remove representor from VF
+ * ice_repr_destroy - remove representor from VF
* @repr: pointer to representor structure
*/
-static void ice_repr_rem(struct ice_repr *repr)
+void ice_repr_destroy(struct ice_repr *repr)
{
free_percpu(repr->stats);
free_netdev(repr->netdev);
kfree(repr);
}
-/**
- * ice_repr_rem_vf - remove representor from VF
- * @repr: pointer to representor structure
- */
-void ice_repr_rem_vf(struct ice_repr *repr)
+static void ice_repr_rem_vf(struct ice_repr *repr)
{
- ice_repr_remove_node(&repr->vf->devlink_port);
ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
- ice_repr_rem(repr);
}
-static void ice_repr_set_tx_topology(struct ice_pf *pf)
+static void ice_repr_rem_sf(struct ice_repr *repr)
{
- struct devlink *devlink;
+ unregister_netdev(repr->netdev);
+ ice_devlink_destroy_sf_port(repr->sf);
+}
+static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink)
+{
/* only export if ADQ and DCB disabled and eswitch enabled*/
if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
!ice_is_switchdev_running(pf))
return;
- devlink = priv_to_devlink(pf);
ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
}
/**
- * ice_repr_add - add representor for generic VSI
- * @pf: pointer to PF structure
+ * ice_repr_create - add representor for generic VSI
* @src_vsi: pointer to VSI structure of device to represent
- * @parent_mac: device MAC address
*/
-static struct ice_repr *
-ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
+static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi)
{
struct ice_netdev_priv *np;
struct ice_repr *repr;
@@ -360,7 +388,10 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
np = netdev_priv(repr->netdev);
np->repr = repr;
- ether_addr_copy(repr->parent_mac, parent_mac);
+ repr->netdev->min_mtu = ETH_MIN_MTU;
+ repr->netdev->max_mtu = ICE_MAX_MTU;
+
+ SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back));
return repr;
@@ -371,34 +402,18 @@ err_alloc:
return ERR_PTR(err);
}
-struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
+static int ice_repr_add_vf(struct ice_repr *repr)
{
- struct ice_repr *repr;
- struct ice_vsi *vsi;
+ struct ice_vf *vf = repr->vf;
+ struct devlink *devlink;
int err;
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return ERR_PTR(-ENOENT);
-
err = ice_devlink_create_vf_port(vf);
if (err)
- return ERR_PTR(err);
-
- repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
- if (IS_ERR(repr)) {
- err = PTR_ERR(repr);
- goto err_repr_add;
- }
-
- repr->vf = vf;
-
- repr->netdev->min_mtu = ETH_MIN_MTU;
- repr->netdev->max_mtu = ICE_MAX_MTU;
+ return err;
- SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
- err = ice_repr_reg_netdev(repr->netdev);
+ err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops);
if (err)
goto err_netdev;
@@ -407,17 +422,97 @@ struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
goto err_cfg_vsi;
ice_virtchnl_set_repr_ops(vf);
- ice_repr_set_tx_topology(vf->pf);
- return repr;
+ devlink = priv_to_devlink(vf->pf);
+ ice_repr_set_tx_topology(vf->pf, devlink);
+
+ return 0;
err_cfg_vsi:
unregister_netdev(repr->netdev);
err_netdev:
- ice_repr_rem(repr);
-err_repr_add:
ice_devlink_destroy_vf_port(vf);
- return ERR_PTR(err);
+ return err;
+}
+
+/**
+ * ice_repr_create_vf - add representor for VF VSI
+ * @vf: VF to create port representor on
+ *
+ * Set correct representor type for VF and functions pointer.
+ *
+ * Return: created port representor on success, error otherwise
+ */
+struct ice_repr *ice_repr_create_vf(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_repr *repr;
+
+ if (!vsi)
+ return ERR_PTR(-EINVAL);
+
+ repr = ice_repr_create(vsi);
+ if (IS_ERR(repr))
+ return repr;
+
+ repr->type = ICE_REPR_TYPE_VF;
+ repr->vf = vf;
+ repr->ops.add = ice_repr_add_vf;
+ repr->ops.rem = ice_repr_rem_vf;
+ repr->ops.ready = ice_repr_ready_vf;
+
+ ether_addr_copy(repr->parent_mac, vf->hw_lan_addr);
+
+ return repr;
+}
+
+static int ice_repr_add_sf(struct ice_repr *repr)
+{
+ struct ice_dynamic_port *sf = repr->sf;
+ int err;
+
+ err = ice_devlink_create_sf_port(sf);
+ if (err)
+ return err;
+
+ SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port);
+ err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops);
+ if (err)
+ goto err_netdev;
+
+ ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back));
+
+ return 0;
+
+err_netdev:
+ ice_devlink_destroy_sf_port(sf);
+ return err;
+}
+
+/**
+ * ice_repr_create_sf - add representor for SF VSI
+ * @sf: SF to create port representor on
+ *
+ * Set correct representor type for SF and functions pointer.
+ *
+ * Return: created port representor on success, error otherwise
+ */
+struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = ice_repr_create(sf->vsi);
+
+ if (IS_ERR(repr))
+ return repr;
+
+ repr->type = ICE_REPR_TYPE_SF;
+ repr->sf = sf;
+ repr->ops.add = ice_repr_add_sf;
+ repr->ops.rem = ice_repr_rem_sf;
+ repr->ops.ready = ice_repr_ready_sf;
+
+ ether_addr_copy(repr->parent_mac, sf->hw_addr);
+
+ return repr;
}
struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 488661b2900b..35bd93165e1e 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -15,19 +15,35 @@ struct ice_repr_pcpu_stats {
u64 tx_drops;
};
+enum ice_repr_type {
+ ICE_REPR_TYPE_VF,
+ ICE_REPR_TYPE_SF,
+};
+
struct ice_repr {
struct ice_vsi *src_vsi;
- struct ice_vf *vf;
struct net_device *netdev;
struct metadata_dst *dst;
struct ice_esw_br_port *br_port;
struct ice_repr_pcpu_stats __percpu *stats;
u32 id;
u8 parent_mac[ETH_ALEN];
+ enum ice_repr_type type;
+ union {
+ struct ice_vf *vf;
+ struct ice_dynamic_port *sf;
+ };
+ struct {
+ int (*add)(struct ice_repr *repr);
+ void (*rem)(struct ice_repr *repr);
+ int (*ready)(struct ice_repr *repr);
+ } ops;
};
-struct ice_repr *ice_repr_add_vf(struct ice_vf *vf);
-void ice_repr_rem_vf(struct ice_repr *repr);
+struct ice_repr *ice_repr_create_vf(struct ice_vf *vf);
+struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf);
+
+void ice_repr_destroy(struct ice_repr *repr);
void ice_repr_start_tx_queues(struct ice_repr *repr);
void ice_repr_stop_tx_queues(struct ice_repr *repr);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index ecf8f5d60292..6ca13c5dcb14 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -28,9 +28,8 @@ ice_sched_add_root_node(struct ice_port_info *pi,
if (!root)
return -ENOMEM;
- /* coverity[suspicious_sizeof] */
root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
- sizeof(*root), GFP_KERNEL);
+ sizeof(*root->children), GFP_KERNEL);
if (!root->children) {
devm_kfree(ice_hw_to_dev(hw), root);
return -ENOMEM;
@@ -186,10 +185,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
if (!node)
return -ENOMEM;
if (hw->max_children[layer]) {
- /* coverity[suspicious_sizeof] */
node->children = devm_kcalloc(ice_hw_to_dev(hw),
hw->max_children[layer],
- sizeof(*node), GFP_KERNEL);
+ sizeof(*node->children), GFP_KERNEL);
if (!node->children) {
devm_kfree(ice_hw_to_dev(hw), node);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
new file mode 100644
index 000000000000..75d7147e1c01
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_txrx.h"
+#include "ice_fltr.h"
+#include "ice_sf_eth.h"
+#include "devlink/devlink_port.h"
+#include "devlink/devlink.h"
+
+static const struct net_device_ops ice_sf_netdev_ops = {
+ .ndo_open = ice_open,
+ .ndo_stop = ice_stop,
+ .ndo_start_xmit = ice_start_xmit,
+ .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
+ .ndo_change_mtu = ice_change_mtu,
+ .ndo_get_stats64 = ice_get_stats64,
+ .ndo_tx_timeout = ice_tx_timeout,
+ .ndo_bpf = ice_xdp,
+ .ndo_xdp_xmit = ice_xdp_xmit,
+ .ndo_xsk_wakeup = ice_xsk_wakeup,
+};
+
+/**
+ * ice_sf_cfg_netdev - Allocate, configure and register a netdev
+ * @dyn_port: subfunction associated with configured netdev
+ * @devlink_port: subfunction devlink port to be linked with netdev
+ *
+ * Return: 0 on success, negative value on failure
+ */
+static int ice_sf_cfg_netdev(struct ice_dynamic_port *dyn_port,
+ struct devlink_port *devlink_port)
+{
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct ice_netdev_priv *np;
+ struct net_device *netdev;
+ int err;
+
+ netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
+ vsi->alloc_rxq);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
+ set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ ice_set_netdev_features(netdev);
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
+ netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
+
+ eth_hw_addr_set(netdev, dyn_port->hw_addr);
+ ether_addr_copy(netdev->perm_addr, dyn_port->hw_addr);
+ netdev->netdev_ops = &ice_sf_netdev_ops;
+ SET_NETDEV_DEVLINK_PORT(netdev, devlink_port);
+
+ err = register_netdev(netdev);
+ if (err) {
+ free_netdev(netdev);
+ vsi->netdev = NULL;
+ return -ENOMEM;
+ }
+ set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
+static void ice_sf_decfg_netdev(struct ice_vsi *vsi)
+{
+ unregister_netdev(vsi->netdev);
+ clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+}
+
+/**
+ * ice_sf_dev_probe - subfunction driver probe function
+ * @adev: pointer to the auxiliary device
+ * @id: pointer to the auxiliary_device id
+ *
+ * Configure VSI and netdev resources for the subfunction device.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_sf_dev_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+ struct ice_dynamic_port *dyn_port = sf_dev->dyn_port;
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct ice_pf *pf = dyn_port->pf;
+ struct device *dev = &adev->dev;
+ struct ice_sf_priv *priv;
+ struct devlink *devlink;
+ int err;
+
+ vsi->type = ICE_VSI_SF;
+ vsi->port_info = pf->hw.port_info;
+ vsi->flags = ICE_VSI_FLAG_INIT;
+
+ priv = ice_allocate_sf(&adev->dev, pf);
+ if (IS_ERR(priv)) {
+ dev_err(dev, "Subfunction devlink alloc failed");
+ return PTR_ERR(priv);
+ }
+
+ priv->dev = sf_dev;
+ sf_dev->priv = priv;
+ devlink = priv_to_devlink(priv);
+
+ devl_lock(devlink);
+
+ err = ice_vsi_cfg(vsi);
+ if (err) {
+ dev_err(dev, "Subfunction vsi config failed");
+ goto err_free_devlink;
+ }
+ vsi->sf = dyn_port;
+
+ ice_eswitch_update_repr(&dyn_port->repr_id, vsi);
+
+ err = ice_devlink_create_sf_dev_port(sf_dev);
+ if (err) {
+ dev_err(dev, "Cannot add ice virtual devlink port for subfunction");
+ goto err_vsi_decfg;
+ }
+
+ err = ice_sf_cfg_netdev(dyn_port, &sf_dev->priv->devlink_port);
+ if (err) {
+ dev_err(dev, "Subfunction netdev config failed");
+ goto err_devlink_destroy;
+ }
+
+ err = devl_port_fn_devlink_set(&dyn_port->devlink_port, devlink);
+ if (err) {
+ dev_err(dev, "Can't link devlink instance to SF devlink port");
+ goto err_netdev_decfg;
+ }
+
+ ice_napi_add(vsi);
+
+ devl_register(devlink);
+ devl_unlock(devlink);
+
+ dyn_port->attached = true;
+
+ return 0;
+
+err_netdev_decfg:
+ ice_sf_decfg_netdev(vsi);
+err_devlink_destroy:
+ ice_devlink_destroy_sf_dev_port(sf_dev);
+err_vsi_decfg:
+ ice_vsi_decfg(vsi);
+err_free_devlink:
+ devl_unlock(devlink);
+ devlink_free(devlink);
+ return err;
+}
+
+/**
+ * ice_sf_dev_remove - subfunction driver remove function
+ * @adev: pointer to the auxiliary device
+ *
+ * Deinitalize VSI and netdev resources for the subfunction device.
+ */
+static void ice_sf_dev_remove(struct auxiliary_device *adev)
+{
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+ struct ice_dynamic_port *dyn_port = sf_dev->dyn_port;
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(sf_dev->priv);
+ devl_lock(devlink);
+
+ ice_vsi_close(vsi);
+
+ ice_sf_decfg_netdev(vsi);
+ ice_devlink_destroy_sf_dev_port(sf_dev);
+ devl_unregister(devlink);
+ devl_unlock(devlink);
+ devlink_free(devlink);
+ ice_vsi_decfg(vsi);
+
+ dyn_port->attached = false;
+}
+
+static const struct auxiliary_device_id ice_sf_dev_id_table[] = {
+ { .name = "ice.sf", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ice_sf_dev_id_table);
+
+static struct auxiliary_driver ice_sf_driver = {
+ .name = "sf",
+ .probe = ice_sf_dev_probe,
+ .remove = ice_sf_dev_remove,
+ .id_table = ice_sf_dev_id_table
+};
+
+static DEFINE_XARRAY_ALLOC1(ice_sf_aux_id);
+
+/**
+ * ice_sf_driver_register - Register new auxiliary subfunction driver
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_sf_driver_register(void)
+{
+ return auxiliary_driver_register(&ice_sf_driver);
+}
+
+/**
+ * ice_sf_driver_unregister - Unregister new auxiliary subfunction driver
+ *
+ */
+void ice_sf_driver_unregister(void)
+{
+ auxiliary_driver_unregister(&ice_sf_driver);
+}
+
+/**
+ * ice_sf_dev_release - Release device associated with auxiliary device
+ * @device: pointer to the device
+ *
+ * Since most of the code for subfunction deactivation is handled in
+ * the remove handler, here just free tracking resources.
+ */
+static void ice_sf_dev_release(struct device *device)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(device);
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+
+ xa_erase(&ice_sf_aux_id, adev->id);
+ kfree(sf_dev);
+}
+
+/**
+ * ice_sf_eth_activate - Activate Ethernet subfunction port
+ * @dyn_port: the dynamic port instance for this subfunction
+ * @extack: extack for reporting error messages
+ *
+ * Activate the dynamic port as an Ethernet subfunction. Setup the netdev
+ * resources associated and initialize the auxiliary device.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int
+ice_sf_eth_activate(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = dyn_port->pf;
+ struct ice_sf_dev *sf_dev;
+ struct pci_dev *pdev;
+ int err;
+ u32 id;
+
+ err = xa_alloc(&ice_sf_aux_id, &id, NULL, xa_limit_32b,
+ GFP_KERNEL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF ID");
+ return err;
+ }
+
+ sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL);
+ if (!sf_dev) {
+ err = -ENOMEM;
+ NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF memory");
+ goto xa_erase;
+ }
+ pdev = pf->pdev;
+
+ sf_dev->dyn_port = dyn_port;
+ sf_dev->adev.id = id;
+ sf_dev->adev.name = "sf";
+ sf_dev->adev.dev.release = ice_sf_dev_release;
+ sf_dev->adev.dev.parent = &pdev->dev;
+
+ err = auxiliary_device_init(&sf_dev->adev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize SF device");
+ goto sf_dev_free;
+ }
+
+ err = auxiliary_device_add(&sf_dev->adev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to add SF device");
+ goto aux_dev_uninit;
+ }
+
+ dyn_port->sf_dev = sf_dev;
+
+ return 0;
+
+aux_dev_uninit:
+ auxiliary_device_uninit(&sf_dev->adev);
+sf_dev_free:
+ kfree(sf_dev);
+xa_erase:
+ xa_erase(&ice_sf_aux_id, id);
+
+ return err;
+}
+
+/**
+ * ice_sf_eth_deactivate - Deactivate Ethernet subfunction port
+ * @dyn_port: the dynamic port instance for this subfunction
+ *
+ * Deactivate the Ethernet subfunction, removing its auxiliary device and the
+ * associated resources.
+ */
+void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port)
+{
+ struct ice_sf_dev *sf_dev = dyn_port->sf_dev;
+
+ auxiliary_device_delete(&sf_dev->adev);
+ auxiliary_device_uninit(&sf_dev->adev);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.h b/drivers/net/ethernet/intel/ice/ice_sf_eth.h
new file mode 100644
index 000000000000..c558cad0a183
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _ICE_SF_ETH_H_
+#define _ICE_SF_ETH_H_
+
+#include <linux/auxiliary_bus.h>
+#include "ice.h"
+
+struct ice_sf_dev {
+ struct auxiliary_device adev;
+ struct ice_dynamic_port *dyn_port;
+ struct ice_sf_priv *priv;
+};
+
+struct ice_sf_priv {
+ struct ice_sf_dev *dev;
+ struct devlink_port devlink_port;
+};
+
+static inline struct
+ice_sf_dev *ice_adev_to_sf_dev(struct auxiliary_device *adev)
+{
+ return container_of(adev, struct ice_sf_dev, adev);
+}
+
+int ice_sf_driver_register(void);
+void ice_sf_driver_unregister(void);
+
+int ice_sf_eth_activate(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack);
+void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port);
+#endif /* _ICE_SF_ETH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c
new file mode 100644
index 000000000000..3d7e96721cf9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023, Intel Corporation. */
+
+#include "ice_vsi_vlan_ops.h"
+#include "ice_vsi_vlan_lib.h"
+#include "ice_vlan_mode.h"
+#include "ice.h"
+#include "ice_sf_vsi_vlan_ops.h"
+
+void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ vlan_ops = &vsi->outer_vlan_ops;
+ else
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h
new file mode 100644
index 000000000000..8c44eafceea0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023, Intel Corporation. */
+
+#ifndef _ICE_SF_VSI_VLAN_OPS_H_
+#define _ICE_SF_VSI_VLAN_OPS_H_
+
+#include "ice_vsi_vlan_ops.h"
+
+struct ice_vsi;
+
+void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+
+#endif /* _ICE_SF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 55ef33208456..e34fe2516ccc 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -175,7 +175,7 @@ void ice_free_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
- ice_eswitch_detach(pf, vf);
+ ice_eswitch_detach_vf(pf, vf);
ice_dis_vf_qs(vf);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
@@ -598,7 +598,7 @@ static int ice_start_vfs(struct ice_pf *pf)
goto teardown;
}
- retval = ice_eswitch_attach(pf, vf);
+ retval = ice_eswitch_attach_vf(pf, vf);
if (retval) {
dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
vf->vf_id, retval);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index fe8847184cb1..79d91e95358c 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -3194,7 +3194,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
- return 0;
+ return -EEXIST;
/* Update the previously created VSI list set with
* the new VSI ID passed in
@@ -3264,7 +3264,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
list_head = &sw->recp_list[recp_id].filt_rules;
list_for_each_entry(list_itr, list_head, list_entry) {
- if (list_itr->vsi_list_info) {
+ if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
map_info = list_itr->vsi_list_info;
if (test_bit(vsi_handle, map_info->vsi_map)) {
*vsi_list_id = map_info->vsi_list_id;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index c9bc3f1add5d..8208055d6e7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -2368,7 +2368,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
- if (ice_is_switchdev_running(vsi->back))
+ if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF)
ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 96037bef3e78..45768796691f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -61,6 +61,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
ICE_DBG_AQ_DESC | \
ICE_DBG_AQ_DESC_BUF | \
ICE_DBG_AQ_CMD)
+#define ICE_DBG_PARSER BIT_ULL(28)
#define ICE_DBG_USER BIT_ULL(31)
@@ -158,6 +159,7 @@ enum ice_vsi_type {
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
+ ICE_VSI_SF = 9,
};
struct ice_link_status {
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 5635e9da2212..a69e91f88d81 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -766,7 +766,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
- ice_eswitch_detach(pf, vf);
+ ice_eswitch_detach_vf(pf, vf);
vf->driver_caps = 0;
ice_vc_set_default_allowlist(vf);
@@ -782,7 +782,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
- ice_eswitch_attach(pf, vf);
+ ice_eswitch_attach_vf(pf, vf);
mutex_unlock(&vf->cfg_lock);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index fec16919ec19..be4266899690 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -12,6 +12,7 @@
#include <net/devlink.h>
#include <linux/avf/virtchnl.h>
#include "ice_type.h"
+#include "ice_flow.h"
#include "ice_virtchnl_fdir.h"
#include "ice_vsi_vlan_ops.h"
@@ -52,6 +53,12 @@ struct ice_mdd_vf_events {
u16 last_printed;
};
+/* Structure to store fdir fv entry */
+struct ice_fdir_prof_info {
+ struct ice_parser_profile prof;
+ u64 fdir_active_cnt;
+};
+
/* VF operations */
struct ice_vf_ops {
enum ice_disq_rst_src reset_type;
@@ -91,6 +98,7 @@ struct ice_vf {
u16 lan_vsi_idx; /* index into PF struct */
u16 ctrl_vsi_idx;
struct ice_vf_fdir fdir;
+ struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
/* first vector index of this VF in the PF space */
int first_vector_idx;
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 1c6ce0c4ed4e..59f62306b9cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -461,6 +461,10 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_TC_U32 &&
+ vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_TC_U32;
+
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index b4feb0927687..14e3f0f89c78 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -26,6 +26,15 @@ enum ice_fdir_tunnel_type {
ICE_FDIR_TUNNEL_TYPE_NONE = 0,
ICE_FDIR_TUNNEL_TYPE_GTPU,
ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
+ ICE_FDIR_TUNNEL_TYPE_ECPRI,
+ ICE_FDIR_TUNNEL_TYPE_GTPU_INNER,
+ ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER,
+ ICE_FDIR_TUNNEL_TYPE_GRE,
+ ICE_FDIR_TUNNEL_TYPE_GTPOGRE,
+ ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER,
+ ICE_FDIR_TUNNEL_TYPE_GRE_INNER,
+ ICE_FDIR_TUNNEL_TYPE_L2TPV2,
+ ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER,
};
struct virtchnl_fdir_fltr_conf {
@@ -33,6 +42,11 @@ struct virtchnl_fdir_fltr_conf {
enum ice_fdir_tunnel_type ttype;
u64 inset_flag;
u32 flow_id;
+
+ struct ice_parser_profile *prof;
+ bool parser_ena;
+ u8 *pkt_buf;
+ u8 pkt_len;
};
struct virtchnl_fdir_inset_map {
@@ -787,6 +801,107 @@ err_exit:
}
/**
+ * ice_vc_fdir_is_raw_flow - check if FDIR flow is raw (binary)
+ * @proto: virtchnl protocol headers
+ *
+ * Check if the FDIR rule is raw flow (protocol agnostic flow) or not. Note
+ * that common FDIR rule must have non-zero proto->count. Thus, we choose the
+ * tunnel_level and count of proto as the indicators. If both tunnel_level and
+ * count of proto are zero, this FDIR rule will be regarded as raw flow.
+ *
+ * Returns: true if headers describe raw flow, false otherwise.
+ */
+static bool
+ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto)
+{
+ return (proto->tunnel_level == 0 && proto->count == 0);
+}
+
+/**
+ * ice_vc_fdir_parse_raw - parse a virtchnl raw FDIR rule
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ * @conf: FDIR configuration for each filter
+ *
+ * Parse the virtual channel filter's raw flow and store it in @conf
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int
+ice_vc_fdir_parse_raw(struct ice_vf *vf,
+ struct virtchnl_proto_hdrs *proto,
+ struct virtchnl_fdir_fltr_conf *conf)
+{
+ u8 *pkt_buf, *msk_buf __free(kfree);
+ struct ice_parser_result rslt;
+ struct ice_pf *pf = vf->pf;
+ struct ice_parser *psr;
+ int status = -ENOMEM;
+ struct ice_hw *hw;
+ u16 udp_port = 0;
+
+ pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
+ msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
+ if (!pkt_buf || !msk_buf)
+ goto err_mem_alloc;
+
+ memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len);
+ memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len);
+
+ hw = &pf->hw;
+
+ /* Get raw profile info via Parser Lib */
+ psr = ice_parser_create(hw);
+ if (IS_ERR(psr)) {
+ status = PTR_ERR(psr);
+ goto err_mem_alloc;
+ }
+
+ ice_parser_dvm_set(psr, ice_is_dvm_ena(hw));
+
+ if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
+ ice_parser_vxlan_tunnel_set(psr, udp_port, true);
+
+ status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt);
+ if (status)
+ goto err_parser_destroy;
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_parser_result_dump(hw, &rslt);
+
+ conf->prof = kzalloc(sizeof(*conf->prof), GFP_KERNEL);
+ if (!conf->prof) {
+ status = -ENOMEM;
+ goto err_parser_destroy;
+ }
+
+ status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
+ proto->raw.pkt_len, ICE_BLK_FD,
+ conf->prof);
+ if (status)
+ goto err_parser_profile_init;
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_parser_profile_dump(hw, conf->prof);
+
+ /* Store raw flow info into @conf */
+ conf->pkt_len = proto->raw.pkt_len;
+ conf->pkt_buf = pkt_buf;
+ conf->parser_ena = true;
+
+ ice_parser_destroy(psr);
+ return 0;
+
+err_parser_profile_init:
+ kfree(conf->prof);
+err_parser_destroy:
+ ice_parser_destroy(psr);
+err_mem_alloc:
+ kfree(pkt_buf);
+ return status;
+}
+
+/**
* ice_vc_fdir_parse_pattern
* @vf: pointer to the VF info
* @fltr: virtual channel add cmd buffer
@@ -813,6 +928,10 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
return -EINVAL;
}
+ /* For raw FDIR filters created by the parser */
+ if (ice_vc_fdir_is_raw_flow(proto))
+ return ice_vc_fdir_parse_raw(vf, proto, conf);
+
for (i = 0; i < proto->count; i++) {
struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
struct ip_esp_hdr *esph;
@@ -1101,8 +1220,10 @@ ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
int ret;
- if (!ice_vc_validate_pattern(vf, proto))
- return -EINVAL;
+ /* For raw FDIR filters created by the parser */
+ if (!ice_vc_fdir_is_raw_flow(proto))
+ if (!ice_vc_validate_pattern(vf, proto))
+ return -EINVAL;
ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
if (ret)
@@ -1295,11 +1416,15 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
return -ENOMEM;
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
- if (ret) {
- dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
- vf->vf_id, input->flow_type);
- goto err_free_pkt;
+ if (conf->parser_ena) {
+ memcpy(pkt, conf->pkt_buf, conf->pkt_len);
+ } else {
+ ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
+ if (ret) {
+ dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
+ vf->vf_id, input->flow_type);
+ goto err_free_pkt;
+ }
}
ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
@@ -1521,6 +1646,16 @@ err_exit:
return ret;
}
+static int ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype)
+{
+ return (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER);
+}
+
/**
* ice_vc_add_fdir_fltr_post
* @vf: pointer to the VF structure
@@ -1782,6 +1917,158 @@ static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
}
/**
+ * ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context
+ * @fv_a: struct of parsed FDIR profile field vector
+ * @fv_b: struct of parsed FDIR profile field vector
+ *
+ * Check if the two parsed FDIR profile field vector context are different,
+ * including proto_id, offset and mask.
+ *
+ * Return: true on different, false on otherwise.
+ */
+static bool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a,
+ struct ice_parser_fv *fv_b)
+{
+ return (fv_a->proto_id != fv_b->proto_id ||
+ fv_a->offset != fv_b->offset ||
+ fv_a->msk != fv_b->msk);
+}
+
+/**
+ * ice_vc_parser_fv_save - save parsed FDIR profile fv context
+ * @fv: struct of parsed FDIR profile field vector
+ * @fv_src: parsed FDIR profile field vector context to save
+ *
+ * Save the parsed FDIR profile field vector context, including proto_id,
+ * offset and mask.
+ *
+ * Return: Void.
+ */
+static void ice_vc_parser_fv_save(struct ice_parser_fv *fv,
+ struct ice_parser_fv *fv_src)
+{
+ fv->proto_id = fv_src->proto_id;
+ fv->offset = fv_src->offset;
+ fv->msk = fv_src->msk;
+ fv->spec = 0;
+}
+
+/**
+ * ice_vc_add_fdir_raw - add a raw FDIR filter for VF
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @v_ret: the final VIRTCHNL code
+ * @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER
+ * @len: length of the stat
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int
+ice_vc_add_fdir_raw(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf,
+ enum virtchnl_status_code *v_ret,
+ struct virtchnl_fdir_add *stat, int len)
+{
+ struct ice_vsi *vf_vsi, *ctrl_vsi;
+ struct ice_fdir_prof_info *pi;
+ struct ice_pf *pf = vf->pf;
+ int ret, ptg, id, i;
+ struct device *dev;
+ struct ice_hw *hw;
+ bool fv_found;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ *v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+
+ id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
+ ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
+
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id);
+ return -ENODEV;
+ }
+
+ ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
+ if (!ctrl_vsi) {
+ dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n",
+ vf->vf_id);
+ return -ENODEV;
+ }
+
+ fv_found = false;
+
+ /* Check if profile info already exists, then update the counter */
+ pi = &vf->fdir_prof_info[ptg];
+ if (pi->fdir_active_cnt != 0) {
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++)
+ if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i],
+ &conf->prof->fv[i]))
+ break;
+ if (i == ICE_MAX_FV_WORDS) {
+ fv_found = true;
+ pi->fdir_active_cnt++;
+ }
+ }
+
+ /* HW profile setting is only required for the first time */
+ if (!fv_found) {
+ ret = ice_flow_set_parser_prof(hw, vf_vsi->idx,
+ ctrl_vsi->idx, conf->prof,
+ ICE_BLK_FD);
+
+ if (ret) {
+ *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "VF %d: insert hw prof failed\n",
+ vf->vf_id);
+ return ret;
+ }
+ }
+
+ ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
+ if (ret) {
+ *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "VF %d: insert FDIR list failed\n",
+ vf->vf_id);
+ return ret;
+ }
+
+ ret = ice_vc_fdir_set_irq_ctx(vf, conf,
+ VIRTCHNL_OP_ADD_FDIR_FILTER);
+ if (ret) {
+ dev_dbg(dev, "VF %d: set FDIR context failed\n",
+ vf->vf_id);
+ goto err_rem_entry;
+ }
+
+ ret = ice_vc_fdir_write_fltr(vf, conf, true, false);
+ if (ret) {
+ dev_err(dev, "VF %d: adding FDIR raw flow rule failed, ret:%d\n",
+ vf->vf_id, ret);
+ goto err_clr_irq;
+ }
+
+ /* Save parsed profile fv info of the FDIR rule for the first time */
+ if (!fv_found) {
+ for (i = 0; i < conf->prof->fv_num; i++)
+ ice_vc_parser_fv_save(&pi->prof.fv[i],
+ &conf->prof->fv[i]);
+ pi->prof.fv_num = conf->prof->fv_num;
+ pi->fdir_active_cnt = 1;
+ }
+
+ return 0;
+
+err_clr_irq:
+ ice_vc_fdir_clear_irq_ctx(vf);
+err_rem_entry:
+ ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ return ret;
+}
+
+/**
* ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1846,7 +2133,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
len = sizeof(*stat);
ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
if (ret) {
- v_ret = VIRTCHNL_STATUS_SUCCESS;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
goto err_free_conf;
@@ -1861,6 +2148,15 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto exit;
}
+ /* For raw FDIR filters created by the parser */
+ if (conf->parser_ena) {
+ ret = ice_vc_add_fdir_raw(vf, conf, &v_ret, stat, len);
+ if (ret)
+ goto err_free_conf;
+ goto exit;
+ }
+
+ is_tun = ice_fdir_is_tunnel(conf->ttype);
ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
@@ -1922,6 +2218,78 @@ err_exit:
}
/**
+ * ice_vc_del_fdir_raw - delete a raw FDIR filter for VF
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @v_ret: the final VIRTCHNL code
+ * @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER
+ * @len: length of the stat
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int
+ice_vc_del_fdir_raw(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf,
+ enum virtchnl_status_code *v_ret,
+ struct virtchnl_fdir_del *stat, int len)
+{
+ struct ice_vsi *vf_vsi, *ctrl_vsi;
+ enum ice_block blk = ICE_BLK_FD;
+ struct ice_fdir_prof_info *pi;
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+ struct ice_hw *hw;
+ unsigned long id;
+ u16 vsi_num;
+ int ptg;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ *v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+
+ id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
+ ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
+
+ ret = ice_vc_fdir_write_fltr(vf, conf, false, false);
+ if (ret) {
+ dev_err(dev, "VF %u: deleting FDIR raw flow rule failed: %d\n",
+ vf->vf_id, ret);
+ return ret;
+ }
+
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
+ return -ENODEV;
+ }
+
+ ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
+ if (!ctrl_vsi) {
+ dev_err(dev, "Can not get FDIR ctrl_vsi for VF %u\n",
+ vf->vf_id);
+ return -ENODEV;
+ }
+
+ pi = &vf->fdir_prof_info[ptg];
+ if (pi->fdir_active_cnt != 0) {
+ pi->fdir_active_cnt--;
+ /* Remove the profile id flow if no active FDIR rule left */
+ if (!pi->fdir_active_cnt) {
+ vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, id);
+
+ vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, id);
+ }
+ }
+
+ conf->parser_ena = false;
+ return 0;
+}
+
+/**
* ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1933,7 +2301,10 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
struct virtchnl_fdir_del *stat = NULL;
struct virtchnl_fdir_fltr_conf *conf;
+ struct ice_vf_fdir *fdir = &vf->fdir;
enum virtchnl_status_code v_ret;
+ struct ice_fdir_fltr *input;
+ enum ice_fltr_ptype flow;
struct device *dev;
struct ice_pf *pf;
int is_tun = 0;
@@ -1983,6 +2354,15 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto err_exit;
}
+ /* For raw FDIR filters created by the parser */
+ if (conf->parser_ena) {
+ ret = ice_vc_del_fdir_raw(vf, conf, &v_ret, stat, len);
+ if (ret)
+ goto err_del_tmr;
+ goto exit;
+ }
+
+ is_tun = ice_fdir_is_tunnel(conf->ttype);
ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
@@ -1992,6 +2372,13 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto err_del_tmr;
}
+ /* Remove unused profiles to avoid unexpected behaviors */
+ input = &conf->input;
+ flow = input->flow_type;
+ if (fdir->fdir_fltr_cnt[flow][is_tun] == 1)
+ ice_vc_fdir_rem_prof(vf, flow, is_tun);
+
+exit:
kfree(stat);
return ret;
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
index 7aae7fdcfcdb..8c7a9b41fb63 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
@@ -3,6 +3,7 @@
#include "ice_pf_vsi_vlan_ops.h"
#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_sf_vsi_vlan_ops.h"
#include "ice_lib.h"
#include "ice.h"
@@ -77,6 +78,9 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi)
case ICE_VSI_VF:
ice_vf_vsi_init_vlan_ops(vsi);
break;
+ case ICE_VSI_SF:
+ ice_sf_vsi_init_vlan_ops(vsi);
+ break;
default:
dev_dbg(ice_pf_to_dev(vsi->back), "%s does not support VLAN operations\n",
ice_vsi_type_str(vsi->type));
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 5dee829bfc47..334ae945d640 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -289,7 +289,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
int err;
- if (vsi->type != ICE_VSI_PF)
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF)
return -EINVAL;
if (qid >= vsi->netdev->real_num_rx_queues ||
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 3df9935685e9..6c913a703df6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -97,8 +97,10 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl = idpf_get_reg_addr(adapter,
reg_vals[vec_id].dyn_ctl_reg);
intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M;
+ intr->dyn_ctl_intena_msk_m = PF_GLINT_DYN_CTL_INTENA_MSK_M;
intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S;
intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S;
+ intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_PF_ITR_IDX_SPACING);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 0b6c8fd5bc90..4f20343e49a9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -357,24 +357,11 @@ int idpf_intr_req(struct idpf_adapter *adapter)
goto free_msix;
}
- if (adapter->req_vec_chunks) {
- struct virtchnl2_vector_chunks *vchunks;
- struct virtchnl2_alloc_vectors *ac;
-
- ac = adapter->req_vec_chunks;
- vchunks = &ac->vchunks;
-
- num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
- vchunks);
- if (num_vec_ids < v_actual) {
- err = -EINVAL;
- goto free_vecids;
- }
- } else {
- int i;
-
- for (i = 0; i < v_actual; i++)
- vecids[i] = i;
+ num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
+ &adapter->req_vec_chunks->vchunks);
+ if (num_vec_ids < v_actual) {
+ err = -EINVAL;
+ goto free_vecids;
}
for (vector = 0; vector < v_actual; vector++) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index fe64febf7436..dfd7cf1d9aa0 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include <net/libeth/rx.h>
+#include <net/libeth/tx.h>
#include "idpf.h"
@@ -224,6 +225,7 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
dma_unmap_addr_set(tx_buf, dma, dma);
+ tx_buf->type = LIBETH_SQE_FRAG;
/* align size to end of page */
max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
@@ -237,14 +239,17 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
offsets,
max_data,
td_tag);
- tx_desc++;
- i++;
-
- if (i == tx_q->desc_count) {
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = &tx_q->tx_buf[0];
tx_desc = &tx_q->base_tx[0];
i = 0;
+ } else {
+ tx_buf++;
+ tx_desc++;
}
+ tx_buf->type = LIBETH_SQE_EMPTY;
+
dma += max_data;
size -= max_data;
@@ -257,12 +262,14 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
size, td_tag);
- tx_desc++;
- i++;
- if (i == tx_q->desc_count) {
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = &tx_q->tx_buf[0];
tx_desc = &tx_q->base_tx[0];
i = 0;
+ } else {
+ tx_buf++;
+ tx_desc++;
}
size = skb_frag_size(frag);
@@ -270,8 +277,6 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
DMA_TO_DEVICE);
-
- tx_buf = &tx_q->tx_buf[i];
}
skb_tx_timestamp(first->skb);
@@ -282,13 +287,13 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
size, td_tag);
- IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
+ first->type = LIBETH_SQE_SKB;
+ first->rs_idx = i;
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
+ IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- netdev_tx_sent_queue(nq, first->bytecount);
+ netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
@@ -306,8 +311,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
struct idpf_base_tx_ctx_desc *ctx_desc;
int ntu = txq->next_to_use;
- memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
- txq->tx_buf[ntu].ctx_entry = true;
+ txq->tx_buf[ntu].type = LIBETH_SQE_CTX;
ctx_desc = &txq->base_ctx[ntu];
@@ -371,6 +375,10 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
IDPF_TX_DESCS_FOR_CTX)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
+ u64_stats_update_end(&tx_q->stats_sync);
+
return NETDEV_TX_BUSY;
}
@@ -396,11 +404,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
first->skb = skb;
if (tso) {
- first->gso_segs = offload.tso_segs;
- first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len);
+ first->packets = offload.tso_segs;
+ first->bytes = skb->len + ((first->packets - 1) * offload.tso_hdr_len);
} else {
- first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
- first->gso_segs = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->packets = 1;
}
idpf_tx_singleq_map(tx_q, first, &offload);
@@ -420,10 +428,15 @@ out_drop:
static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
int *cleaned)
{
- unsigned int total_bytes = 0, total_pkts = 0;
+ struct libeth_sq_napi_stats ss = { };
struct idpf_base_tx_desc *tx_desc;
u32 budget = tx_q->clean_budget;
s16 ntc = tx_q->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = tx_q->dev,
+ .ss = &ss,
+ .napi = napi_budget,
+ };
struct idpf_netdev_priv *np;
struct idpf_tx_buf *tx_buf;
struct netdev_queue *nq;
@@ -441,47 +454,26 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
* such. We can skip this descriptor since there is no buffer
* to clean.
*/
- if (tx_buf->ctx_entry) {
- /* Clear this flag here to avoid stale flag values when
- * this buffer is used for actual data in the future.
- * There are cases where the tx_buf struct / the flags
- * field will not be cleared before being reused.
- */
- tx_buf->ctx_entry = false;
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) {
+ tx_buf->type = LIBETH_SQE_EMPTY;
goto fetch_next_txq_desc;
}
- /* if next_to_watch is not set then no work pending */
- eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch;
- if (!eop_desc)
+ if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
break;
- /* prevent any other reads prior to eop_desc */
+ /* prevent any other reads prior to type */
smp_rmb();
+ eop_desc = &tx_q->base_tx[tx_buf->rs_idx];
+
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->qw1 &
cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE)))
break;
- /* clear next_to_watch to prevent false hangs */
- tx_buf->next_to_watch = NULL;
-
/* update the statistics for this packet */
- total_bytes += tx_buf->bytecount;
- total_pkts += tx_buf->gso_segs;
-
- napi_consume_skb(tx_buf->skb, napi_budget);
-
- /* unmap skb header data */
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
-
- /* clear tx_buf data */
- tx_buf->skb = NULL;
- dma_unmap_len_set(tx_buf, len, 0);
+ libeth_tx_complete(tx_buf, &cp);
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
@@ -495,13 +487,7 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
}
/* unmap any remaining paged data */
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ libeth_tx_complete(tx_buf, &cp);
}
/* update budget only if we did something */
@@ -521,11 +507,11 @@ fetch_next_txq_desc:
ntc += tx_q->desc_count;
tx_q->next_to_clean = ntc;
- *cleaned += total_pkts;
+ *cleaned += ss.packets;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_add(&tx_q->q_stats.packets, total_pkts);
- u64_stats_add(&tx_q->q_stats.bytes, total_bytes);
+ u64_stats_add(&tx_q->q_stats.packets, ss.packets);
+ u64_stats_add(&tx_q->q_stats.bytes, ss.bytes);
u64_stats_update_end(&tx_q->stats_sync);
np = netdev_priv(tx_q->netdev);
@@ -533,7 +519,7 @@ fetch_next_txq_desc:
dont_wake = np->state != __IDPF_VPORT_UP ||
!netif_carrier_ok(tx_q->netdev);
- __netif_txq_completed_wake(nq, total_pkts, total_bytes,
+ __netif_txq_completed_wake(nq, ss.packets, ss.bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
dont_wake);
@@ -1134,8 +1120,10 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
&work_done);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ idpf_vport_intr_set_wb_on_itr(q_vector);
return budget;
+ }
work_done = min_t(int, work_done, budget - 1);
@@ -1144,6 +1132,8 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
*/
if (likely(napi_complete_done(napi, work_done)))
idpf_vport_intr_update_itr_ena_irq(q_vector);
+ else
+ idpf_vport_intr_set_wb_on_itr(q_vector);
return work_done;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 585c3dadd9bf..d4e6f0e10487 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2,10 +2,19 @@
/* Copyright (C) 2023 Intel Corporation */
#include <net/libeth/rx.h>
+#include <net/libeth/tx.h>
#include "idpf.h"
#include "idpf_virtchnl.h"
+struct idpf_tx_stash {
+ struct hlist_node hlist;
+ struct libeth_sqe buf;
+};
+
+#define idpf_tx_buf_compl_tag(buf) (*(u32 *)&(buf)->priv)
+LIBETH_SQE_CHECK_PRIV(u32);
+
static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
unsigned int count);
@@ -61,41 +70,20 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
- * idpf_tx_buf_rel - Release a Tx buffer
- * @tx_q: the queue that owns the buffer
- * @tx_buf: the buffer to free
- */
-static void idpf_tx_buf_rel(struct idpf_tx_queue *tx_q,
- struct idpf_tx_buf *tx_buf)
-{
- if (tx_buf->skb) {
- if (dma_unmap_len(tx_buf, len))
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dev_kfree_skb_any(tx_buf->skb);
- } else if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- }
-
- tx_buf->next_to_watch = NULL;
- tx_buf->skb = NULL;
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
- dma_unmap_len_set(tx_buf, len, 0);
-}
-
-/**
* idpf_tx_buf_rel_all - Free any empty Tx buffers
* @txq: queue to be cleaned
*/
static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
{
+ struct libeth_sq_napi_stats ss = { };
struct idpf_buf_lifo *buf_stack;
- u16 i;
+ struct idpf_tx_stash *stash;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
+ struct hlist_node *tmp;
+ u32 i, tag;
/* Buffers already cleared, nothing to do */
if (!txq->tx_buf)
@@ -103,7 +91,7 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
/* Free all the Tx buffer sk_buffs */
for (i = 0; i < txq->desc_count; i++)
- idpf_tx_buf_rel(txq, &txq->tx_buf[i]);
+ libeth_tx_complete(&txq->tx_buf[i], &cp);
kfree(txq->tx_buf);
txq->tx_buf = NULL;
@@ -115,6 +103,20 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
if (!buf_stack->bufs)
return;
+ /*
+ * If a Tx timeout occurred, there are potentially still bufs in the
+ * hash table, free them here.
+ */
+ hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash,
+ hlist) {
+ if (!stash)
+ continue;
+
+ libeth_tx_complete(&stash->buf, &cp);
+ hash_del(&stash->hlist);
+ idpf_buf_lifo_push(buf_stack, stash);
+ }
+
for (i = 0; i < buf_stack->size; i++)
kfree(buf_stack->bufs[i]);
@@ -131,6 +133,7 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
{
idpf_tx_buf_rel_all(txq);
+ netdev_tx_reset_subqueue(txq->netdev, txq->idx);
if (!txq->desc_ring)
return;
@@ -203,10 +206,6 @@ static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
if (!tx_q->tx_buf)
return -ENOMEM;
- /* Initialize tx_bufs with invalid completion tags */
- for (i = 0; i < tx_q->desc_count; i++)
- tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
-
if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
return 0;
@@ -1656,37 +1655,6 @@ static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
}
/**
- * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of
- * packet
- * @tx_q: tx queue to clean buffer from
- * @tx_buf: buffer to be cleaned
- * @cleaned: pointer to stats struct to track cleaned packets/bytes
- * @napi_budget: Used to determine if we are in netpoll
- */
-static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q,
- struct idpf_tx_buf *tx_buf,
- struct idpf_cleaned_stats *cleaned,
- int napi_budget)
-{
- napi_consume_skb(tx_buf->skb, napi_budget);
-
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
-
- dma_unmap_len_set(tx_buf, len, 0);
- }
-
- /* clear tx_buf data */
- tx_buf->skb = NULL;
-
- cleaned->bytes += tx_buf->bytecount;
- cleaned->packets += tx_buf->gso_segs;
-}
-
-/**
* idpf_tx_clean_stashed_bufs - clean bufs that were stored for
* out of order completions
* @txq: queue to clean
@@ -1696,33 +1664,28 @@ static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q,
*/
static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
u16 compl_tag,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
struct idpf_tx_stash *stash;
struct hlist_node *tmp_buf;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = cleaned,
+ .napi = budget,
+ };
/* Buffer completion */
hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
hlist, compl_tag) {
- if (unlikely(stash->buf.compl_tag != (int)compl_tag))
+ if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag))
continue;
- if (stash->buf.skb) {
- idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned,
- budget);
- } else if (dma_unmap_len(&stash->buf, len)) {
- dma_unmap_page(txq->dev,
- dma_unmap_addr(&stash->buf, dma),
- dma_unmap_len(&stash->buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(&stash->buf, len, 0);
- }
+ hash_del(&stash->hlist);
+ libeth_tx_complete(&stash->buf, &cp);
/* Push shadow buf back onto stack */
idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
-
- hash_del(&stash->hlist);
}
}
@@ -1737,8 +1700,7 @@ static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
{
struct idpf_tx_stash *stash;
- if (unlikely(!dma_unmap_addr(tx_buf, dma) &&
- !dma_unmap_len(tx_buf, len)))
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
return 0;
stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
@@ -1751,29 +1713,27 @@ static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
/* Store buffer params in shadow buffer */
stash->buf.skb = tx_buf->skb;
- stash->buf.bytecount = tx_buf->bytecount;
- stash->buf.gso_segs = tx_buf->gso_segs;
+ stash->buf.bytes = tx_buf->bytes;
+ stash->buf.packets = tx_buf->packets;
+ stash->buf.type = tx_buf->type;
+ stash->buf.nr_frags = tx_buf->nr_frags;
dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
- stash->buf.compl_tag = tx_buf->compl_tag;
+ idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
/* Add buffer to buf_hash table to be freed later */
hash_add(txq->stash->sched_buf_hash, &stash->hlist,
- stash->buf.compl_tag);
-
- memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
+ idpf_tx_buf_compl_tag(&stash->buf));
- /* Reinitialize buf_id portion of tag */
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ tx_buf->type = LIBETH_SQE_EMPTY;
return 0;
}
#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \
do { \
- (ntc)++; \
- if (unlikely(!(ntc))) { \
- ntc -= (txq)->desc_count; \
+ if (unlikely(++(ntc) == (txq)->desc_count)) { \
+ ntc = 0; \
buf = (txq)->tx_buf; \
desc = &(txq)->flex_tx[0]; \
} else { \
@@ -1797,69 +1757,71 @@ do { \
* Separate packet completion events will be reported on the completion queue,
* and the buffers will be cleaned separately. The stats are not updated from
* this function when using flow-based scheduling.
+ *
+ * Furthermore, in flow scheduling mode, check to make sure there are enough
+ * reserve buffers to stash the packet. If there are not, return early, which
+ * will leave next_to_clean pointing to the packet that failed to be stashed.
+ *
+ * Return: false in the scenario above, true otherwise.
*/
-static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
+static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
int napi_budget,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
bool descs_only)
{
union idpf_tx_flex_desc *next_pending_desc = NULL;
union idpf_tx_flex_desc *tx_desc;
- s16 ntc = tx_q->next_to_clean;
+ u32 ntc = tx_q->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = tx_q->dev,
+ .ss = cleaned,
+ .napi = napi_budget,
+ };
struct idpf_tx_buf *tx_buf;
+ bool clean_complete = true;
tx_desc = &tx_q->flex_tx[ntc];
next_pending_desc = &tx_q->flex_tx[end];
tx_buf = &tx_q->tx_buf[ntc];
- ntc -= tx_q->desc_count;
while (tx_desc != next_pending_desc) {
- union idpf_tx_flex_desc *eop_desc;
+ u32 eop_idx;
/* If this entry in the ring was used as a context descriptor,
- * it's corresponding entry in the buffer ring will have an
- * invalid completion tag since no buffer was used. We can
- * skip this descriptor since there is no buffer to clean.
+ * it's corresponding entry in the buffer ring is reserved. We
+ * can skip this descriptor since there is no buffer to clean.
*/
- if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG))
+ if (tx_buf->type <= LIBETH_SQE_CTX)
goto fetch_next_txq_desc;
- eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch;
+ if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
+ break;
- /* clear next_to_watch to prevent false hangs */
- tx_buf->next_to_watch = NULL;
+ eop_idx = tx_buf->rs_idx;
if (descs_only) {
- if (idpf_stash_flow_sch_buffers(tx_q, tx_buf))
+ if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) {
+ clean_complete = false;
goto tx_splitq_clean_out;
+ }
+
+ idpf_stash_flow_sch_buffers(tx_q, tx_buf);
- while (tx_desc != eop_desc) {
+ while (ntc != eop_idx) {
idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
tx_desc, tx_buf);
-
- if (dma_unmap_len(tx_buf, len)) {
- if (idpf_stash_flow_sch_buffers(tx_q,
- tx_buf))
- goto tx_splitq_clean_out;
- }
+ idpf_stash_flow_sch_buffers(tx_q, tx_buf);
}
} else {
- idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned,
- napi_budget);
+ libeth_tx_complete(tx_buf, &cp);
/* unmap remaining buffers */
- while (tx_desc != eop_desc) {
+ while (ntc != eop_idx) {
idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
tx_desc, tx_buf);
/* unmap any remaining paged data */
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ libeth_tx_complete(tx_buf, &cp);
}
}
@@ -1868,8 +1830,9 @@ fetch_next_txq_desc:
}
tx_splitq_clean_out:
- ntc += tx_q->desc_count;
tx_q->next_to_clean = ntc;
+
+ return clean_complete;
}
#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \
@@ -1895,57 +1858,68 @@ do { \
* this completion tag.
*/
static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
u16 idx = compl_tag & txq->compl_tag_bufid_m;
struct idpf_tx_buf *tx_buf = NULL;
- u16 ntc = txq->next_to_clean;
- u16 num_descs_cleaned = 0;
- u16 orig_idx = idx;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = cleaned,
+ .napi = budget,
+ };
+ u16 ntc, orig_idx = idx;
tx_buf = &txq->tx_buf[idx];
- while (tx_buf->compl_tag == (int)compl_tag) {
- if (tx_buf->skb) {
- idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget);
- } else if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(txq->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX ||
+ idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
+ return false;
+
+ if (tx_buf->type == LIBETH_SQE_SKB)
+ libeth_tx_complete(tx_buf, &cp);
- memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
- num_descs_cleaned++;
+ while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) {
+ libeth_tx_complete(tx_buf, &cp);
idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
}
- /* If we didn't clean anything on the ring for this completion, there's
- * nothing more to do.
- */
- if (unlikely(!num_descs_cleaned))
- return false;
-
- /* Otherwise, if we did clean a packet on the ring directly, it's safe
- * to assume that the descriptors starting from the original
- * next_to_clean up until the previously cleaned packet can be reused.
- * Therefore, we will go back in the ring and stash any buffers still
- * in the ring into the hash table to be cleaned later.
+ /*
+ * It's possible the packet we just cleaned was an out of order
+ * completion, which means we can stash the buffers starting from
+ * the original next_to_clean and reuse the descriptors. We need
+ * to compare the descriptor ring next_to_clean packet's "first" buffer
+ * to the "first" buffer of the packet we just cleaned to determine if
+ * this is the case. Howevever, next_to_clean can point to either a
+ * reserved buffer that corresponds to a context descriptor used for the
+ * next_to_clean packet (TSO packet) or the "first" buffer (single
+ * packet). The orig_idx from the packet we just cleaned will always
+ * point to the "first" buffer. If next_to_clean points to a reserved
+ * buffer, let's bump ntc once and start the comparison from there.
*/
+ ntc = txq->next_to_clean;
tx_buf = &txq->tx_buf[ntc];
- while (tx_buf != &txq->tx_buf[orig_idx]) {
- idpf_stash_flow_sch_buffers(txq, tx_buf);
+
+ if (tx_buf->type == LIBETH_SQE_CTX)
idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
- }
- /* Finally, update next_to_clean to reflect the work that was just done
- * on the ring, if any. If the packet was only cleaned from the hash
- * table, the ring will not be impacted, therefore we should not touch
- * next_to_clean. The updated idx is used here
+ /*
+ * If ntc still points to a different "first" buffer, clean the
+ * descriptor ring and stash all of the buffers for later cleaning. If
+ * we cannot stash all of the buffers, next_to_clean will point to the
+ * "first" buffer of the packet that could not be stashed and cleaning
+ * will start there next time.
+ */
+ if (unlikely(tx_buf != &txq->tx_buf[orig_idx] &&
+ !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned,
+ true)))
+ return true;
+
+ /*
+ * Otherwise, update next_to_clean to reflect the cleaning that was
+ * done above.
*/
txq->next_to_clean = idx;
@@ -1965,7 +1939,7 @@ static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
*/
static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
struct idpf_splitq_tx_compl_desc *desc,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
u16 compl_tag;
@@ -1973,7 +1947,8 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
- return idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
+ idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
+ return;
}
compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
@@ -2008,7 +1983,7 @@ static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
ntc -= complq->desc_count;
do {
- struct idpf_cleaned_stats cleaned_stats = { };
+ struct libeth_sq_napi_stats cleaned_stats = { };
struct idpf_tx_queue *tx_q;
int rel_tx_qid;
u16 hw_head;
@@ -2158,29 +2133,6 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
}
/**
- * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions
- * @tx_q: the queue to be checked
- * @size: number of descriptors we want to assure is available
- *
- * Returns 0 if stop is not needed
- */
-int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size)
-{
- struct netdev_queue *nq;
-
- if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
- return 0;
-
- u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.q_busy);
- u64_stats_update_end(&tx_q->stats_sync);
-
- nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
-
- return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
-}
-
-/**
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked
* @descs_needed: number of descriptors required for this packet
@@ -2191,7 +2143,7 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
unsigned int descs_needed)
{
if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
- goto splitq_stop;
+ goto out;
/* If there are too many outstanding completions expected on the
* completion queue, stop the TX queue to give the device some time to
@@ -2210,10 +2162,12 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
return 0;
splitq_stop:
+ netif_stop_subqueue(tx_q->netdev, tx_q->idx);
+
+out:
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
- netif_stop_subqueue(tx_q->netdev, tx_q->idx);
return -EBUSY;
}
@@ -2236,7 +2190,11 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
tx_q->next_to_use = val;
- idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
+ if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
+ u64_stats_update_end(&tx_q->stats_sync);
+ }
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -2307,6 +2265,12 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 idx)
{
+ struct libeth_sq_napi_stats ss = { };
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
+
u64_stats_update_begin(&txq->stats_sync);
u64_stats_inc(&txq->q_stats.dma_map_errs);
u64_stats_update_end(&txq->stats_sync);
@@ -2316,7 +2280,7 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *tx_buf;
tx_buf = &txq->tx_buf[idx];
- idpf_tx_buf_rel(txq, tx_buf);
+ libeth_tx_complete(tx_buf, &cp);
if (tx_buf == first)
break;
if (idx == 0)
@@ -2395,6 +2359,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
tx_buf = first;
+ first->nr_frags = 0;
params->compl_tag =
(tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
@@ -2405,7 +2370,9 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
if (dma_mapping_error(tx_q->dev, dma))
return idpf_tx_dma_map_error(tx_q, skb, first, i);
- tx_buf->compl_tag = params->compl_tag;
+ first->nr_frags++;
+ idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
+ tx_buf->type = LIBETH_SQE_FRAG;
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
@@ -2459,14 +2426,15 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
max_data);
- tx_desc++;
- i++;
-
- if (i == tx_q->desc_count) {
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
tx_q->compl_tag_cur_gen =
IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+ } else {
+ tx_buf++;
+ tx_desc++;
}
/* Since this packet has a buffer that is going to span
@@ -2479,8 +2447,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
* simply pass over these holes and finish cleaning the
* rest of the packet.
*/
- memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
- tx_q->tx_buf[i].compl_tag = params->compl_tag;
+ tx_buf->type = LIBETH_SQE_EMPTY;
/* Adjust the DMA offset and the remaining size of the
* fragment. On the first iteration of this loop,
@@ -2504,13 +2471,15 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
break;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
- tx_desc++;
- i++;
- if (i == tx_q->desc_count) {
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+ } else {
+ tx_buf++;
+ tx_desc++;
}
size = skb_frag_size(frag);
@@ -2518,26 +2487,24 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
DMA_TO_DEVICE);
-
- tx_buf = &tx_q->tx_buf[i];
}
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp(skb);
+ first->type = LIBETH_SQE_SKB;
+
/* write last descriptor with RS and EOP bits */
+ first->rs_idx = i;
td_cmd |= params->eop_cmd;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
i = idpf_tx_splitq_bump_ntu(tx_q, i);
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
-
tx_q->txq_grp->num_completions_pending++;
/* record bytecount for BQL */
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- netdev_tx_sent_queue(nq, first->bytecount);
+ netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
@@ -2737,8 +2704,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
struct idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
- memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
- txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ txq->tx_buf[i].type = LIBETH_SQE_CTX;
/* grab the next descriptor */
desc = &txq->flex_ctx[i];
@@ -2822,12 +2788,12 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
first->skb = skb;
if (tso) {
- first->gso_segs = tx_params.offload.tso_segs;
- first->bytecount = skb->len +
- ((first->gso_segs - 1) * tx_params.offload.tso_hdr_len);
+ first->packets = tx_params.offload.tso_segs;
+ first->bytes = skb->len +
+ ((first->packets - 1) * tx_params.offload.tso_hdr_len);
} else {
- first->gso_segs = 1;
- first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->packets = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
}
if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
@@ -3749,6 +3715,7 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
/* net_dim() updates ITR out-of-band using a work item */
idpf_net_dim(q_vector);
+ q_vector->wb_on_itr = false;
intval = idpf_vport_intr_buildreg_itr(q_vector,
IDPF_NO_ITR_UPDATE_IDX, 0);
@@ -4051,8 +4018,10 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ idpf_vport_intr_set_wb_on_itr(q_vector);
return budget;
+ }
work_done = min_t(int, work_done, budget - 1);
@@ -4061,6 +4030,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
*/
if (likely(napi_complete_done(napi, work_done)))
idpf_vport_intr_update_itr_ena_irq(q_vector);
+ else
+ idpf_vport_intr_set_wb_on_itr(q_vector);
/* Switch to poll mode in the tear-down path after sending disable
* queues virtchnl message, as the interrupts will be disabled after
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 6215dbee5546..f0537826f840 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -127,11 +127,10 @@ do { \
*/
#define IDPF_TX_COMPLQ_PENDING(txq) \
(((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
- 0 : U64_MAX) + \
+ 0 : U32_MAX) + \
(txq)->num_completions_pending - (txq)->complq->num_completions)
#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
-#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG -1
/* Adjust the generation for the completion tag and wrap if necessary */
#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
@@ -149,47 +148,7 @@ union idpf_tx_flex_desc {
struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
};
-/**
- * struct idpf_tx_buf
- * @next_to_watch: Next descriptor to clean
- * @skb: Pointer to the skb
- * @dma: DMA address
- * @len: DMA length
- * @bytecount: Number of bytes
- * @gso_segs: Number of GSO segments
- * @compl_tag: Splitq only, unique identifier for a buffer. Used to compare
- * with completion tag returned in buffer completion event.
- * Because the completion tag is expected to be the same in all
- * data descriptors for a given packet, and a single packet can
- * span multiple buffers, we need this field to track all
- * buffers associated with this completion tag independently of
- * the buf_id. The tag consists of a N bit buf_id and M upper
- * order "generation bits". See compl_tag_bufid_m and
- * compl_tag_gen_s in struct idpf_queue. We'll use a value of -1
- * to indicate the tag is not valid.
- * @ctx_entry: Singleq only. Used to indicate the corresponding entry
- * in the descriptor ring was used for a context descriptor and
- * this buffer entry should be skipped.
- */
-struct idpf_tx_buf {
- void *next_to_watch;
- struct sk_buff *skb;
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
- unsigned int bytecount;
- unsigned short gso_segs;
-
- union {
- int compl_tag;
-
- bool ctx_entry;
- };
-};
-
-struct idpf_tx_stash {
- struct hlist_node hlist;
- struct idpf_tx_buf buf;
-};
+#define idpf_tx_buf libeth_sqe
/**
* struct idpf_buf_lifo - LIFO for managing OOO completions
@@ -390,9 +349,11 @@ struct idpf_vec_regs {
* struct idpf_intr_reg
* @dyn_ctl: Dynamic control interrupt register
* @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
+ * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask
* @dyn_ctl_itridx_s: Register bit offset for ITR index
* @dyn_ctl_itridx_m: Mask for ITR index
* @dyn_ctl_intrvl_s: Register bit offset for ITR interval
+ * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
* @rx_itr: RX ITR register
* @tx_itr: TX ITR register
* @icr_ena: Interrupt cause register offset
@@ -401,9 +362,11 @@ struct idpf_vec_regs {
struct idpf_intr_reg {
void __iomem *dyn_ctl;
u32 dyn_ctl_intena_m;
+ u32 dyn_ctl_intena_msk_m;
u32 dyn_ctl_itridx_s;
u32 dyn_ctl_itridx_m;
u32 dyn_ctl_intrvl_s;
+ u32 dyn_ctl_wb_on_itr_m;
void __iomem *rx_itr;
void __iomem *tx_itr;
void __iomem *icr_ena;
@@ -424,6 +387,7 @@ struct idpf_intr_reg {
* @intr_reg: See struct idpf_intr_reg
* @napi: napi handler
* @total_events: Number of interrupts processed
+ * @wb_on_itr: whether WB on ITR is enabled
* @tx_dim: Data for TX net_dim algorithm
* @tx_itr_value: TX interrupt throttling rate
* @tx_intr_mode: Dynamic ITR or not
@@ -454,6 +418,7 @@ struct idpf_q_vector {
__cacheline_group_begin_aligned(read_write);
struct napi_struct napi;
u16 total_events;
+ bool wb_on_itr;
struct dim tx_dim;
u16 tx_itr_value;
@@ -472,7 +437,7 @@ struct idpf_q_vector {
cpumask_var_t affinity_mask;
__cacheline_group_end_aligned(cold);
};
-libeth_cacheline_set_assert(struct idpf_q_vector, 104,
+libeth_cacheline_set_assert(struct idpf_q_vector, 112,
424 + 2 * sizeof(struct dim),
8 + sizeof(cpumask_var_t));
@@ -496,11 +461,6 @@ struct idpf_tx_queue_stats {
u64_stats_t dma_map_errs;
};
-struct idpf_cleaned_stats {
- u32 packets;
- u32 bytes;
-};
-
#define IDPF_ITR_DYNAMIC 1
#define IDPF_ITR_MAX 0x1FE0
#define IDPF_ITR_20K 0x0032
@@ -688,7 +648,7 @@ struct idpf_tx_queue {
void *desc_ring;
};
- struct idpf_tx_buf *tx_buf;
+ struct libeth_sqe *tx_buf;
struct idpf_txq_group *txq_grp;
struct device *dev;
void __iomem *tail;
@@ -831,7 +791,7 @@ struct idpf_compl_queue {
u32 next_to_use;
u32 next_to_clean;
- u32 num_completions;
+ aligned_u64 num_completions;
__cacheline_group_end_aligned(read_write);
__cacheline_group_begin_aligned(cold);
@@ -963,7 +923,7 @@ struct idpf_txq_group {
struct idpf_compl_queue *complq;
- u32 num_completions_pending;
+ aligned_u64 num_completions_pending;
};
static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
@@ -1033,6 +993,25 @@ static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
}
+/**
+ * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
+ * @q_vector: pointer to queue vector struct
+ */
+static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
+{
+ struct idpf_intr_reg *reg;
+
+ if (q_vector->wb_on_itr)
+ return;
+
+ q_vector->wb_on_itr = true;
+ reg = &q_vector->intr_reg;
+
+ writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
+ (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s),
+ reg->dyn_ctl);
+}
+
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg);
@@ -1064,7 +1043,6 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 ring_idx);
unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb);
-int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q);
@@ -1073,4 +1051,12 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
+static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
+ u32 needed)
+{
+ return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ IDPF_DESC_UNUSED(tx_q),
+ needed, needed);
+}
+
#endif /* !_IDPF_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 629cb5cb7c9f..99b8dbaf4225 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -97,7 +97,9 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl = idpf_get_reg_addr(adapter,
reg_vals[vec_id].dyn_ctl_reg);
intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M;
+ intr->dyn_ctl_intena_msk_m = VF_INT_DYN_CTLN_INTENA_MSK_M;
intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
+ intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_VF_ITR_IDX_SPACING);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 06b9970dffad..ca6ccbc13954 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2387,15 +2387,11 @@ static int igb_get_ts_info(struct net_device *dev,
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
switch (adapter->hw.mac.type) {
case e1000_82575:
info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
case e1000_82576:
case e1000_82580:
@@ -2405,8 +2401,6 @@ static int igb_get_ts_info(struct net_device *dev,
case e1000_i211:
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 9dc7c60838ed..1ef4cb871452 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -33,6 +33,7 @@
#include <linux/bpf_trace.h>
#include <linux/pm_runtime.h>
#include <linux/etherdevice.h>
+#include <linux/lockdep.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -2914,8 +2915,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+/* This function assumes __netif_tx_lock is held by the caller. */
static void igb_xdp_ring_update_tail(struct igb_ring *ring)
{
+ lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*/
@@ -3000,11 +3004,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
nxmit++;
}
- __netif_tx_unlock(nq);
-
if (unlikely(flags & XDP_XMIT_FLUSH))
igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
+
return nxmit;
}
@@ -8864,12 +8868,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
+ unsigned int total_bytes = 0, total_packets = 0;
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *rx_ring = q_vector->rx.ring;
- struct sk_buff *skb = rx_ring->skb;
- unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
+ struct sk_buff *skb = rx_ring->skb;
+ int cpu = smp_processor_id();
unsigned int xdp_xmit = 0;
+ struct netdev_queue *nq;
struct xdp_buff xdp;
u32 frame_sz = 0;
int rx_buf_pgcnt;
@@ -8997,7 +9003,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (xdp_xmit & IGB_XDP_TX) {
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
}
u64_stats_update_begin(&rx_ring->rx_syncp);
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 7b83678ba83a..6ad35a00a287 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -282,7 +282,6 @@ enum igbvf_state_t {
extern char igbvf_driver_name[];
-void igbvf_check_options(struct igbvf_adapter *);
void igbvf_set_ethtool_ops(struct net_device *);
int igbvf_up(struct igbvf_adapter *);
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index e5b31818d565..7637d21445bf 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -49,7 +49,6 @@
#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
-void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
s32 e1000_init_mbx_params_vf(struct e1000_hw *);
#endif /* _E1000_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index c38b4d0f00ce..eac0f966e0e4 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -259,6 +259,10 @@ struct igc_adapter {
*/
spinlock_t qbv_tx_lock;
+ bool strict_priority_enable;
+ u8 num_tc;
+ u16 queue_per_tc[IGC_MAX_TX_QUEUES];
+
/* OS defined structs */
struct pci_dev *pdev;
/* lock for statistics */
@@ -382,9 +386,11 @@ extern char igc_driver_name[];
#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_TSN_QAV_ENABLED BIT(18)
+#define IGC_FLAG_TSN_LEGACY_ENABLED BIT(19)
-#define IGC_FLAG_TSN_ANY_ENABLED \
- (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED)
+#define IGC_FLAG_TSN_ANY_ENABLED \
+ (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \
+ IGC_FLAG_TSN_LEGACY_ENABLED)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
@@ -681,6 +687,7 @@ enum igc_ring_flags_t {
IGC_RING_FLAG_TX_DETECT_HANG,
IGC_RING_FLAG_AF_XDP_ZC,
IGC_RING_FLAG_TX_HWTSTAMP,
+ IGC_RING_FLAG_RX_ALLOC_FAILED,
};
#define ring_uses_large_buffer(ring) \
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 511384f3ec5c..8e449904aa7d 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -4,6 +4,8 @@
#ifndef _IGC_DEFINES_H_
#define _IGC_DEFINES_H_
+#include <linux/bitfield.h>
+
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define REQ_TX_DESCRIPTOR_MULTIPLE 8
#define REQ_RX_DESCRIPTOR_MULTIPLE 8
@@ -176,7 +178,6 @@
/* PHY GPY 211 registers */
#define STANDARD_AN_REG_MASK 0x0007 /* MMD */
-#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */
#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */
#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */
@@ -553,6 +554,15 @@
#define IGC_MAX_SR_QUEUES 2
+#define IGC_TXARB_TXQ_PRIO_0_MASK GENMASK(1, 0)
+#define IGC_TXARB_TXQ_PRIO_1_MASK GENMASK(3, 2)
+#define IGC_TXARB_TXQ_PRIO_2_MASK GENMASK(5, 4)
+#define IGC_TXARB_TXQ_PRIO_3_MASK GENMASK(7, 6)
+#define IGC_TXARB_TXQ_PRIO_0(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_0_MASK, (x))
+#define IGC_TXARB_TXQ_PRIO_1(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_1_MASK, (x))
+#define IGC_TXARB_TXQ_PRIO_2(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_2_MASK, (x))
+#define IGC_TXARB_TXQ_PRIO_3(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_3_MASK, (x))
+
/* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
@@ -641,6 +651,16 @@
#define IGC_MDIC_READY 0x10000000
#define IGC_MDIC_ERROR 0x40000000
+/* EEE Link Ability */
+#define IGC_EEE_2500BT_MASK BIT(0)
+#define IGC_EEE_1000BT_MASK BIT(2)
+#define IGC_EEE_100BT_MASK BIT(1)
+
+/* EEE Link-Partner Ability */
+#define IGC_LP_EEE_2500BT_MASK BIT(0)
+#define IGC_LP_EEE_1000BT_MASK BIT(2)
+#define IGC_LP_EEE_100BT_MASK BIT(1)
+
#define IGC_N0_QUEUE -1
#define IGC_MAX_MAC_HDR_LEN 127
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 3d3ef4e1547c..5b0c6f433767 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1540,6 +1540,10 @@ static int igc_ethtool_set_channels(struct net_device *netdev,
if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
+ /* Do not allow channel reconfiguration when mqprio is enabled */
+ if (adapter->strict_priority_enable)
+ return -EINVAL;
+
/* Verify the number of channels doesn't exceed hw limits */
max_combined = igc_get_max_rss_queues(adapter);
if (count > max_combined)
@@ -1565,15 +1569,11 @@ static int igc_ethtool_get_ts_info(struct net_device *dev,
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
switch (adapter->hw.mac.type) {
case igc_i225:
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -1627,8 +1627,11 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
- u32 eeer;
+ struct igc_phy_info *phy = &hw->phy;
+ u16 eee_advert, eee_lp_advert;
+ u32 eeer, ret_val;
+ /* EEE supported */
linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
edata->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
@@ -1636,6 +1639,74 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
edata->supported);
+ /* EEE Advertisement 1 - reg 7.60 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ IGC_ANEG_EEE_AB1,
+ &eee_advert);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Failed to read IEEE 7.60 register\n");
+ return -EINVAL;
+ }
+
+ if (eee_advert & IGC_EEE_1000BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised);
+
+ if (eee_advert & IGC_EEE_100BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->advertised);
+
+ /* EEE Advertisement 2 - reg 7.62 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ IGC_ANEG_EEE_AB2,
+ &eee_advert);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Failed to read IEEE 7.62 register\n");
+ return -EINVAL;
+ }
+
+ if (eee_advert & IGC_EEE_2500BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ edata->advertised);
+
+ /* EEE Link-Partner Ability 1 - reg 7.61 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ IGC_ANEG_EEE_LP_AB1,
+ &eee_lp_advert);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Failed to read IEEE 7.61 register\n");
+ return -EINVAL;
+ }
+
+ if (eee_lp_advert & IGC_LP_EEE_1000BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->lp_advertised);
+
+ if (eee_lp_advert & IGC_LP_EEE_100BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->lp_advertised);
+
+ /* EEE Link-Partner Ability 2 - reg 7.63 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ IGC_ANEG_EEE_LP_AB2,
+ &eee_lp_advert);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Failed to read IEEE 7.63 register\n");
+ return -EINVAL;
+ }
+
+ if (eee_lp_advert & IGC_LP_EEE_2500BT_MASK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ edata->lp_advertised);
+
eeer = rd32(IGC_EEER);
/* EEE status on negotiated link */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 0a095cdea4fb..6e70bca15db1 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2191,6 +2191,7 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -2207,6 +2208,7 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
__free_page(page);
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -2658,6 +2660,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
if (!skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
+ set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
@@ -2738,6 +2741,7 @@ static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
skb = igc_construct_skb_zc(ring, xdp);
if (!skb) {
ring->rx_stats.alloc_failed++;
+ set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags);
return;
}
@@ -5807,11 +5811,29 @@ no_wait:
if (adapter->flags & IGC_FLAG_HAS_MSIX) {
u32 eics = 0;
- for (i = 0; i < adapter->num_q_vectors; i++)
- eics |= adapter->q_vector[i]->eims_value;
- wr32(IGC_EICS, eics);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igc_q_vector *q_vector = adapter->q_vector[i];
+ struct igc_ring *rx_ring;
+
+ if (!q_vector->rx.ring)
+ continue;
+
+ rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index];
+
+ if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ eics |= q_vector->eims_value;
+ clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ }
+ }
+ if (eics)
+ wr32(IGC_EICS, eics);
} else {
- wr32(IGC_ICS, IGC_ICS_RXDMT0);
+ struct igc_ring *rx_ring = adapter->rx_ring[0];
+
+ if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ wr32(IGC_ICS, IGC_ICS_RXDMT0);
+ }
}
igc_ptp_tx_hang(adapter);
@@ -6515,6 +6537,13 @@ static int igc_tc_query_caps(struct igc_adapter *adapter,
struct igc_hw *hw = &adapter->hw;
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
@@ -6532,6 +6561,65 @@ static int igc_tc_query_caps(struct igc_adapter *adapter,
}
}
+static void igc_save_mqprio_params(struct igc_adapter *adapter, u8 num_tc,
+ u16 *offset)
+{
+ int i;
+
+ adapter->strict_priority_enable = true;
+ adapter->num_tc = num_tc;
+
+ for (i = 0; i < num_tc; i++)
+ adapter->queue_per_tc[i] = offset[i];
+}
+
+static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int i;
+
+ if (hw->mac.type != igc_i225)
+ return -EOPNOTSUPP;
+
+ if (!mqprio->qopt.num_tc) {
+ adapter->strict_priority_enable = false;
+ goto apply;
+ }
+
+ /* There are as many TCs as Tx queues. */
+ if (mqprio->qopt.num_tc != adapter->num_tx_queues) {
+ NL_SET_ERR_MSG_FMT_MOD(mqprio->extack,
+ "Only %d traffic classes supported",
+ adapter->num_tx_queues);
+ return -EOPNOTSUPP;
+ }
+
+ /* Only one queue per TC is supported. */
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ if (mqprio->qopt.count[i] != 1) {
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "Only one queue per TC supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* Preemption is not supported yet. */
+ if (mqprio->preemptible_tcs) {
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "Preemption is not supported yet");
+ return -EOPNOTSUPP;
+ }
+
+ igc_save_mqprio_params(adapter, mqprio->qopt.num_tc,
+ mqprio->qopt.offset);
+
+ mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+apply:
+ return igc_tsn_offload_apply(adapter);
+}
+
static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -6551,6 +6639,9 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
case TC_SETUP_QDISC_CBS:
return igc_tsn_enable_cbs(adapter, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return igc_tsn_enable_mqprio(adapter, type_data);
+
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 861f37076861..2801e5f24df9 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -240,7 +240,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
/* Read the MULTI GBT AN Control Register - reg 7.32 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
- ANEG_MULTIGBT_AN_CTRL,
+ IGC_ANEG_MULTIGBT_AN_CTRL,
&aneg_multigbt_an_ctrl);
if (ret_val)
@@ -380,7 +380,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
ret_val = phy->ops.write_reg(hw,
(STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
- ANEG_MULTIGBT_AN_CTRL,
+ IGC_ANEG_MULTIGBT_AN_CTRL,
aneg_multigbt_an_ctrl);
return ret_val;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index e5b893fc5b66..12ddc5793651 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -238,6 +238,8 @@
#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40))
#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40))
+#define IGC_TXARB 0x3354 /* Tx Arbitration Control TxARB - RW */
+
/* System Time Registers */
#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */
@@ -308,6 +310,16 @@
#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */
#define IGC_EEE_SU 0x0E34 /* EEE Setup */
+/* MULTI GBT AN Control Register - reg. 7.32 */
+#define IGC_ANEG_MULTIGBT_AN_CTRL 0x0020
+
+/* EEE ANeg Advertisement Register - reg 7.60 and reg 7.62 */
+#define IGC_ANEG_EEE_AB1 0x003c
+#define IGC_ANEG_EEE_AB2 0x003e
+/* EEE ANeg Link-Partner Advertisement Register - reg 7.61 and reg 7.63 */
+#define IGC_ANEG_EEE_LP_AB1 0x003d
+#define IGC_ANEG_EEE_LP_AB2 0x003f
+
/* LTR registers */
#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index d68fa7f3d5f0..1e44374ca1ff 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -46,6 +46,9 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
if (is_cbs_enabled(adapter))
new_flags |= IGC_FLAG_TSN_QAV_ENABLED;
+ if (adapter->strict_priority_enable)
+ new_flags |= IGC_FLAG_TSN_LEGACY_ENABLED;
+
return new_flags;
}
@@ -102,11 +105,32 @@ bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter)
adapter->taprio_offload_enable;
}
+static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 txarb;
+
+ txarb = rd32(IGC_TXARB);
+
+ txarb &= ~(IGC_TXARB_TXQ_PRIO_0_MASK |
+ IGC_TXARB_TXQ_PRIO_1_MASK |
+ IGC_TXARB_TXQ_PRIO_2_MASK |
+ IGC_TXARB_TXQ_PRIO_3_MASK);
+
+ txarb |= IGC_TXARB_TXQ_PRIO_0(queue_per_tc[3]);
+ txarb |= IGC_TXARB_TXQ_PRIO_1(queue_per_tc[2]);
+ txarb |= IGC_TXARB_TXQ_PRIO_2(queue_per_tc[1]);
+ txarb |= IGC_TXARB_TXQ_PRIO_3(queue_per_tc[0]);
+
+ wr32(IGC_TXARB, txarb);
+}
+
/* Returns the TSN specific registers to their default values after
* the adapter is reset.
*/
static int igc_tsn_disable_offload(struct igc_adapter *adapter)
{
+ u16 queue_per_tc[4] = { 3, 2, 1, 0 };
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl;
int i;
@@ -133,7 +157,16 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_QBVCYCLET_S, 0);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
+ /* Reset mqprio TC configuration. */
+ netdev_reset_tc(adapter->netdev);
+
+ /* Restore the default Tx arbitration: Priority 0 has the highest
+ * priority and is assigned to queue 0 and so on and so forth.
+ */
+ igc_tsn_tx_arb(adapter, queue_per_tc);
+
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
+ adapter->flags &= ~IGC_FLAG_TSN_LEGACY_ENABLED;
return 0;
}
@@ -172,6 +205,40 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
if (igc_is_device_id_i226(hw))
igc_tsn_set_retx_qbvfullthreshold(adapter);
+ if (adapter->strict_priority_enable) {
+ int err;
+
+ err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
+ if (err)
+ return err;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ err = netdev_set_tc_queue(adapter->netdev, i, 1,
+ adapter->queue_per_tc[i]);
+ if (err)
+ return err;
+ }
+
+ /* In case the card is configured with less than four queues. */
+ for (; i < IGC_MAX_TX_QUEUES; i++)
+ adapter->queue_per_tc[i] = i;
+
+ /* Configure queue priorities according to the user provided
+ * mapping.
+ */
+ igc_tsn_tx_arb(adapter, adapter->queue_per_tc);
+
+ /* Enable legacy TSN mode which will do strict priority without
+ * any other TSN features.
+ */
+ tqavctrl = rd32(IGC_TQAVCTRL);
+ tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN;
+ tqavctrl &= ~IGC_TQAVCTRL_ENHANCED_QAV;
+ wr32(IGC_TQAVCTRL, tqavctrl);
+
+ return 0;
+ }
+
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
u32 txqctl = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index e85f7d2e8810..f2709b10c2e5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -317,7 +317,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
#ifdef IXGBE_FCOE
- if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ if (adapter->netdev->fcoe_mtu)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 4cac76254966..9482e0cca8b7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3196,16 +3196,12 @@ static int ixgbe_get_ts_info(struct net_device *dev,
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types =
BIT(HWTSTAMP_TX_OFF) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 18d63c8c2ff4..955dced844a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -858,7 +858,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
/* enable FCoE and notify stack */
adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
- netdev->features |= NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = true;
netdev_features_change(netdev);
/* release existing queues and reallocate them */
@@ -898,7 +898,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
/* disable FCoE and notify stack */
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
- netdev->features &= ~NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = false;
netdev_features_change(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 0ee943db3dc9..16fa621ce0ff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -981,7 +981,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
#ifdef IXGBE_FCOE
- if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
+ if (adapter->netdev->fcoe_mtu) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
if ((rxr_idx >= f->offset) &&
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8057cef61f39..8b8404d8c946 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5079,7 +5079,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
netif_set_tso_max_size(adapter->netdev, 32768);
#ifdef IXGBE_FCOE
- if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ if (adapter->netdev->fcoe_mtu)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
@@ -5136,8 +5136,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
#ifdef IXGBE_FCOE
/* FCoE traffic class uses FCOE jumbo frames */
- if ((dev->features & NETIF_F_FCOE_MTU) &&
- (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ if (dev->fcoe_mtu && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE &&
(pb == ixgbe_fcoe_get_tc(adapter)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif
@@ -5197,8 +5196,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
#ifdef IXGBE_FCOE
/* FCoE traffic class uses FCOE jumbo frames */
- if ((dev->features & NETIF_F_FCOE_MTU) &&
- (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ if (dev->fcoe_mtu && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE &&
(pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif
@@ -11096,8 +11094,7 @@ skip_sriov:
NETIF_F_FCOE_CRC;
netdev->vlan_features |= NETIF_F_FSO |
- NETIF_F_FCOE_CRC |
- NETIF_F_FCOE_MTU;
+ NETIF_F_FCOE_CRC;
}
#endif /* IXGBE_FCOE */
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index fcfd0a075eee..e71715f5da22 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -495,7 +495,7 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf
int err = 0;
#ifdef CONFIG_FCOE
- if (dev->features & NETIF_F_FCOE_MTU)
+ if (dev->fcoe_mtu)
pf_max_frame = max_t(int, pf_max_frame,
IXGBE_FCOE_JUMBO_FRAME_SIZE);
@@ -857,7 +857,7 @@ static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf)
int pf_max_frame = dev->mtu + ETH_HLEN;
#if IS_ENABLED(CONFIG_FCOE)
- if (dev->features & NETIF_F_FCOE_MTU)
+ if (dev->fcoe_mtu)
pf_max_frame = max_t(int, pf_max_frame,
IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif /* CONFIG_FCOE */
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 9e6984815386..3c289bfe0a09 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -95,7 +95,6 @@ struct ltq_etop_priv {
struct mii_bus *mii_bus;
struct ltq_etop_chan ch[MAX_DMA_CHAN];
- int tx_free[MAX_DMA_CHAN >> 1];
int tx_burst_len;
int rx_burst_len;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index f35ae2c88091..9e80899546d9 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2802,7 +2802,7 @@ port_err:
static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
{
struct mv643xx_eth_shared_platform_data *pd;
- struct device_node *pnp, *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node;
int ret;
/* bail out if not registered from DT */
@@ -2816,10 +2816,9 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
- for_each_available_child_of_node(np, pnp) {
+ for_each_available_child_of_node_scoped(np, pnp) {
ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
if (ret) {
- of_node_put(pnp);
mv643xx_eth_shared_of_remove();
return ret;
}
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 9190eff6c0bb..e1d003fdbc2e 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -104,7 +104,7 @@ static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops,
return 0;
} else {
/* wait_event_timeout does not guarantee a delay of at
- * least one whole jiffie, so timeout must be no less
+ * least one whole jiffy, so timeout must be no less
* than two.
*/
timeout = max(usecs_to_jiffies(MVMDIO_SMI_TIMEOUT), 2);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 41894834fb53..d72b2d5f96db 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1781,7 +1781,7 @@ static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
}
/* Set TXQ descriptors fields relevant for CSUM calculation */
-static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvneta_txq_desc_csum(int l3_offs, __be16 l3_proto,
int ip_hdr_len, int l4_proto)
{
u32 command;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index e809f91c08fb..9e02e4367bec 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1088,7 +1088,7 @@ struct mvpp2 {
unsigned int max_port_rxqs;
/* Workqueue to gather hardware statistics */
- char queue_name[30];
+ char queue_name[31];
struct workqueue_struct *stats_queue;
/* Debugfs root entry */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 40aeaa7bd739..1641791a2d5b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1522,29 +1522,19 @@ static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
return 0;
}
-int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 port_ctx)
{
u32 rss_ctx;
- int ret, i;
+ int ret;
ret = mvpp22_rss_context_create(port, &rss_ctx);
if (ret)
return ret;
- /* Find the first available context number in the port, starting from 1.
- * Context 0 on each port is reserved for the default context.
- */
- for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
- if (port->rss_ctx[i] < 0)
- break;
- }
-
- if (i == MVPP22_N_RSS_TABLES)
+ if (WARN_ON_ONCE(port->rss_ctx[port_ctx] >= 0))
return -EINVAL;
- port->rss_ctx[i] = rss_ctx;
- *port_ctx = i;
-
+ port->rss_ctx[port_ctx] = rss_ctx;
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 663157dc8062..85c9c6e80678 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -264,7 +264,7 @@ int mvpp22_port_rss_init(struct mvpp2_port *port);
int mvpp22_port_rss_enable(struct mvpp2_port *port);
int mvpp22_port_rss_disable(struct mvpp2_port *port);
-int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *rss_ctx);
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 rss_ctx);
int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 rss_ctx);
int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 0d62a33afa80..3880dcc0418b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5268,8 +5268,6 @@ static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -5696,40 +5694,82 @@ static int mvpp2_ethtool_get_rxfh(struct net_device *dev,
return ret;
}
-static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
- struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
+static bool mvpp2_ethtool_rxfh_okay(struct mvpp2_port *port,
+ const struct ethtool_rxfh_param *rxfh)
{
- struct mvpp2_port *port = netdev_priv(dev);
- u32 *rss_context = &rxfh->rss_context;
- int ret = 0;
-
if (!mvpp22_rss_is_supported(port))
- return -EOPNOTSUPP;
+ return false;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_CRC32)
- return -EOPNOTSUPP;
+ return false;
if (rxfh->key)
+ return false;
+
+ return true;
+}
+
+static int mvpp2_create_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp2_ethtool_rxfh_okay(port, rxfh))
return -EOPNOTSUPP;
- if (*rss_context && rxfh->rss_delete)
- return mvpp22_port_rss_ctx_delete(port, *rss_context);
+ ctx->hfunc = ETH_RSS_HASH_CRC32;
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- ret = mvpp22_port_rss_ctx_create(port, rss_context);
- if (ret)
- return ret;
- }
+ ret = mvpp22_port_rss_ctx_create(port, rxfh->rss_context);
+ if (ret)
+ return ret;
- if (rxfh->indir)
- ret = mvpp22_port_rss_ctx_indir_set(port, *rss_context,
+ if (!rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_get(port, rxfh->rss_context,
+ ethtool_rxfh_context_indir(ctx));
+ else
+ ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context,
rxfh->indir);
+ return ret;
+}
+static int mvpp2_modify_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp2_ethtool_rxfh_okay(port, rxfh))
+ return -EOPNOTSUPP;
+
+ if (rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context,
+ rxfh->indir);
return ret;
}
+static int mvpp2_remove_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return mvpp22_port_rss_ctx_delete(port, rss_context);
+}
+
+static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack);
+}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -5749,7 +5789,7 @@ static const struct net_device_ops mvpp2_netdev_ops = {
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
- .cap_rss_ctx_supported = true,
+ .rxfh_max_num_contexts = MVPP22_N_RSS_TABLES,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvpp2_ethtool_nway_reset,
@@ -5772,6 +5812,9 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
+ .create_rxfh_context = mvpp2_create_rxfh_context,
+ .modify_rxfh_context = mvpp2_modify_rxfh_context,
+ .remove_rxfh_context = mvpp2_remove_rxfh_context,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -7417,8 +7460,6 @@ static int mvpp2_get_sram(struct platform_device *pdev,
static int mvpp2_probe(struct platform_device *pdev)
{
- struct fwnode_handle *fwnode = pdev->dev.fwnode;
- struct fwnode_handle *port_fwnode;
struct mvpp2 *priv;
struct resource *res;
void __iomem *base;
@@ -7591,7 +7632,7 @@ static int mvpp2_probe(struct platform_device *pdev)
}
/* Map DTS-active ports. Should be done before FIFO mvpp2_init */
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ device_for_each_child_node_scoped(&pdev->dev, port_fwnode) {
if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
priv->port_map |= BIT(i);
}
@@ -7614,7 +7655,7 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_axi_clk;
/* Initialize ports */
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ device_for_each_child_node_scoped(&pdev->dev, port_fwnode) {
err = mvpp2_port_probe(pdev, port_fwnode, priv);
if (err < 0)
goto err_port_probe;
@@ -7653,14 +7694,8 @@ static int mvpp2_probe(struct platform_device *pdev)
return 0;
err_port_probe:
- fwnode_handle_put(port_fwnode);
-
- i = 0;
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
- if (priv->port_list[i])
- mvpp2_port_remove(priv->port_list[i]);
- i++;
- }
+ for (i = 0; i < priv->port_count; i++)
+ mvpp2_port_remove(priv->port_list[i]);
err_axi_clk:
clk_disable_unprepare(priv->axi_clk);
err_mg_core_clk:
@@ -7677,18 +7712,13 @@ err_pp_clk:
static void mvpp2_remove(struct platform_device *pdev)
{
struct mvpp2 *priv = platform_get_drvdata(pdev);
- struct fwnode_handle *fwnode = pdev->dev.fwnode;
- int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
- struct fwnode_handle *port_fwnode;
+ int i, poolnum = MVPP2_BM_POOLS_NUM;
mvpp2_dbgfs_cleanup(priv);
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
- if (priv->port_list[i]) {
- mutex_destroy(&priv->port_list[i]->gather_stats_lock);
- mvpp2_port_remove(priv->port_list[i]);
- }
- i++;
+ for (i = 0; i < priv->port_count; i++) {
+ mutex_destroy(&priv->port_list[i]->gather_stats_lock);
+ mvpp2_port_remove(priv->port_list[i]);
}
destroy_workqueue(priv->stats_queue);
@@ -7711,7 +7741,7 @@ static void mvpp2_remove(struct platform_device *pdev)
aggr_txq->descs_dma);
}
- if (is_acpi_node(port_fwnode))
+ if (!dev_of_node(&pdev->dev))
return;
clk_disable_unprepare(priv->axi_clk);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index ed2160cc5acb..6ea2f3071fe8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -1856,8 +1856,9 @@ struct cpt_flt_eng_info_req {
struct cpt_flt_eng_info_rsp {
struct mbox_msghdr hdr;
- u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU];
- u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU];
+#define CPT_AF_MAX_FLT_INT_VECS 3
+ u64 flt_eng_map[CPT_AF_MAX_FLT_INT_VECS];
+ u64 rcvrd_eng_map[CPT_AF_MAX_FLT_INT_VECS];
u64 rsvd;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index ac7ee3f3598c..1a97fb9032fa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2479,9 +2479,9 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto free_regions;
}
- mw->mbox_wq = alloc_workqueue(name,
+ mw->mbox_wq = alloc_workqueue("%s",
WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
- num);
+ num, name);
if (!mw->mbox_wq) {
err = -ENOMEM;
goto unmap_regions;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 03ee93fd9e94..5016ba82e142 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -319,6 +319,7 @@ struct nix_mark_format {
/* smq(flush) to tl1 cir/pir info */
struct nix_smq_tree_ctx {
+ u16 schq;
u64 cir_off;
u64 cir_val;
u64 pir_off;
@@ -328,8 +329,6 @@ struct nix_smq_tree_ctx {
/* smq flush context */
struct nix_smq_flush_ctx {
int smq;
- u16 tl1_schq;
- u16 tl2_schq;
struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
};
@@ -400,6 +399,7 @@ struct hw_cap {
bool nix_multiple_dwrr_mtu; /* Multiple DWRR_MTU to choose from */
bool npc_hash_extract; /* Hash extract enabled ? */
bool npc_exact_match_enabled; /* Exact match supported ? */
+ bool cpt_rxc; /* Is CPT-RXC supported */
};
struct rvu_hwinfo {
@@ -690,6 +690,35 @@ static inline bool is_cnf10ka_a0(struct rvu *rvu)
return false;
}
+static inline bool is_cn10ka_a0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A &&
+ (pdev->revision & 0x0F) == 0x0)
+ return true;
+ return false;
+}
+
+static inline bool is_cn10ka_a1(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A &&
+ (pdev->revision & 0x0F) == 0x1)
+ return true;
+ return false;
+}
+
+static inline bool is_cn10kb(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
+ return true;
+ return false;
+}
+
static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
{
u64 npc_const3;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index daf4b951e905..3c5bbaf12e59 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -19,6 +19,12 @@
/* Length of initial context fetch in 128 byte words */
#define CPT_CTX_ILEN 1ULL
+/* Interrupt vector count of CPT RVU and RAS interrupts */
+#define CPT_10K_AF_RVU_RAS_INT_VEC_CNT 2
+
+/* Default CPT_AF_RXC_CFG1:max_rxc_icb_cnt */
+#define CPT_DFLT_MAX_RXC_ICB_CNT 0xC0ULL
+
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
u64 free_sts = 0, busy_sts = 0; \
@@ -37,6 +43,41 @@
(_rsp)->free_sts_##etype = free_sts; \
})
+#define MAX_AE GENMASK_ULL(47, 32)
+#define MAX_IE GENMASK_ULL(31, 16)
+#define MAX_SE GENMASK_ULL(15, 0)
+
+static u16 cpt_max_engines_get(struct rvu *rvu)
+{
+ u16 max_ses, max_ies, max_aes;
+ u64 reg;
+
+ reg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS1);
+ max_ses = FIELD_GET(MAX_SE, reg);
+ max_ies = FIELD_GET(MAX_IE, reg);
+ max_aes = FIELD_GET(MAX_AE, reg);
+
+ return max_ses + max_ies + max_aes;
+}
+
+/* Number of flt interrupt vectors are depends on number of engines that the
+ * chip has. Each flt vector represents 64 engines.
+ */
+static int cpt_10k_flt_nvecs_get(struct rvu *rvu, u16 max_engs)
+{
+ int flt_vecs;
+
+ flt_vecs = DIV_ROUND_UP(max_engs, 64);
+
+ if (flt_vecs > CPT_10K_AF_INT_VEC_FLT_MAX) {
+ dev_warn_once(rvu->dev, "flt_vecs:%d exceeds the max vectors:%d\n",
+ flt_vecs, CPT_10K_AF_INT_VEC_FLT_MAX);
+ flt_vecs = CPT_10K_AF_INT_VEC_FLT_MAX;
+ }
+
+ return flt_vecs;
+}
+
static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
{
struct rvu_block *block = ptr;
@@ -150,17 +191,26 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
{
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
- int i;
+ int i, flt_vecs;
+ u16 max_engs;
+ u8 nr;
+
+ max_engs = cpt_max_engines_get(rvu);
+ flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
/* Disable all CPT AF interrupts */
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL);
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL);
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF);
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) {
+ nr = (max_engs > 64) ? 64 : max_engs;
+ max_engs -= nr;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i),
+ INTR_MASK(nr));
+ }
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
- for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
+ /* CPT AF interrupt vectors are flt_int, rvu_int and ras_int. */
+ for (i = 0; i < flt_vecs + CPT_10K_AF_RVU_RAS_INT_VEC_CNT; i++)
if (rvu->irq_allocated[off + i]) {
free_irq(pci_irq_vector(rvu->pdev, off + i), block);
rvu->irq_allocated[off + i] = false;
@@ -206,12 +256,18 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu)
static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
{
+ int rvu_intr_vec, ras_intr_vec;
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
irq_handler_t flt_fn;
- int i, ret;
+ int i, ret, flt_vecs;
+ u16 max_engs;
+ u8 nr;
- for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
+ max_engs = cpt_max_engines_get(rvu);
+ flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
+
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) {
sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
switch (i) {
@@ -229,20 +285,24 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]);
if (ret)
goto err;
- if (i == CPT_10K_AF_INT_VEC_FLT2)
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF);
- else
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
+
+ nr = (max_engs > 64) ? 64 : max_engs;
+ max_engs -= nr;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i),
+ INTR_MASK(nr));
}
- ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
+ rvu_intr_vec = flt_vecs;
+ ras_intr_vec = rvu_intr_vec + 1;
+
+ ret = rvu_cpt_do_register_interrupt(block, off + rvu_intr_vec,
rvu_cpt_af_rvu_intr_handler,
"CPTAF RVU");
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
- ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
+ ret = rvu_cpt_do_register_interrupt(block, off + ras_intr_vec,
rvu_cpt_af_ras_intr_handler,
"CPTAF RAS");
if (ret)
@@ -680,6 +740,7 @@ static bool validate_and_update_reg_offset(struct rvu *rvu,
case CPT_AF_BLK_RST:
case CPT_AF_CONSTANTS1:
case CPT_AF_CTX_FLUSH_TIMER:
+ case CPT_AF_RXC_CFG1:
return true;
}
@@ -732,6 +793,8 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
{
+ struct rvu_hwinfo *hw = rvu->hw;
+
if (is_rvu_otx2(rvu))
return;
@@ -755,14 +818,16 @@ static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR);
rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID);
rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER);
+ rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
+ rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
+ if (!hw->cap.cpt_rxc)
+ return;
rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME);
rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
- rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
- rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
}
static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
@@ -921,13 +986,17 @@ int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_r
struct rvu_block *block;
unsigned long flags;
int blkaddr, vec;
+ int flt_vecs;
+ u16 max_engs;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
return blkaddr;
block = &rvu->hw->block[blkaddr];
- for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) {
+ max_engs = cpt_max_engines_get(rvu);
+ flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
+ for (vec = 0; vec < flt_vecs; vec++) {
spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
@@ -943,10 +1012,11 @@ int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_r
static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
{
struct cpt_rxc_time_cfg_req req, prev;
+ struct rvu_hwinfo *hw = rvu->hw;
int timeout = 2000;
u64 reg;
- if (is_rvu_otx2(rvu))
+ if (!hw->cap.cpt_rxc)
return;
/* Set time limit to minimum values, so that rxc entries will be
@@ -1219,10 +1289,30 @@ unlock:
return 0;
}
+#define MAX_RXC_ICB_CNT GENMASK_ULL(40, 32)
+
int rvu_cpt_init(struct rvu *rvu)
{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 reg_val;
+
/* Retrieve CPT PF number */
rvu->cpt_pf_num = get_cpt_pf_num(rvu);
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT0) && !is_rvu_otx2(rvu) &&
+ !is_cn10kb(rvu))
+ hw->cap.cpt_rxc = true;
+
+ if (hw->cap.cpt_rxc && !is_cn10ka_a0(rvu) && !is_cn10ka_a1(rvu)) {
+ /* Set CPT_AF_RXC_CFG1:max_rxc_icb_cnt to 0xc0 to not effect
+ * inline inbound peak performance
+ */
+ reg_val = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1);
+ reg_val &= ~MAX_RXC_ICB_CNT;
+ reg_val |= FIELD_PREP(MAX_RXC_ICB_CNT,
+ CPT_DFLT_MAX_RXC_ICB_CNT);
+ rvu_write64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1, reg_val);
+ }
+
spin_lock_init(&rvu->cpt_intr_lock);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 4a4ef5bd9e0b..87ba77e5026a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -838,10 +838,10 @@ RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
{
+ char cgx[10], lmac[10], chan[10];
struct rvu *rvu = filp->private;
struct pci_dev *pdev = NULL;
struct mac_ops *mac_ops;
- char cgx[10], lmac[10];
struct rvu_pfvf *pfvf;
int pf, domain, blkid;
u8 cgx_id, lmac_id;
@@ -852,7 +852,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
/* There can be no CGX devices at all */
if (!mac_ops)
return 0;
- seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
+ seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\tCHAN\n",
mac_ops->name);
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
@@ -876,8 +876,11 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
&lmac_id);
sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
sprintf(lmac, "LMAC%d", lmac_id);
- seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
- dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
+ sprintf(chan, "%d",
+ rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0));
+ seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\t%s\n",
+ dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac,
+ chan);
pci_dev_put(pdev);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 222f9e00b836..82832a24fbd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2259,14 +2259,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
schq = smq;
for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
+ smq_tree_ctx->schq = schq;
if (lvl == NIX_TXSCH_LVL_TL1) {
- smq_flush_ctx->tl1_schq = schq;
smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
smq_tree_ctx->pir_off = 0;
smq_tree_ctx->pir_val = 0;
parent_off = 0;
} else if (lvl == NIX_TXSCH_LVL_TL2) {
- smq_flush_ctx->tl2_schq = schq;
smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
parent_off = NIX_AF_TL2X_PARENT(schq);
@@ -2301,8 +2300,8 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
{
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
+ int tl2, tl2_schq;
u64 regoff;
- int tl2;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
@@ -2310,16 +2309,17 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
/* loop through all TL2s with matching PF_FUNC */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
/* skip the smq(flush) TL2 */
- if (tl2 == smq_flush_ctx->tl2_schq)
+ if (tl2 == tl2_schq)
continue;
/* skip unused TL2s */
if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
continue;
/* skip if PF_FUNC doesn't match */
if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
- (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
+ (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
~RVU_PFVF_FUNC_MASK)))
continue;
/* enable/disable XOFF */
@@ -2361,10 +2361,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
int smq, u16 pcifunc, int nixlf)
{
struct nix_smq_flush_ctx *smq_flush_ctx;
+ int err, restore_tx_en = 0, i;
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
- int err, restore_tx_en = 0;
- u64 cfg;
+ u16 tl2_tl3_link_schq;
+ u8 link, link_level;
+ u64 cfg, bmap = 0;
if (!is_rvu_otx2(rvu)) {
/* Skip SMQ flush if pkt count is zero */
@@ -2388,16 +2390,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
- /* Do SMQ flush and set enqueue xoff */
- cfg |= BIT_ULL(50) | BIT_ULL(49);
- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
-
/* Disable backpressure from physical link,
* otherwise SMQ flush may stall.
*/
rvu_cgx_enadis_rx_bp(rvu, pf, false);
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
+ link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
+
+ /* SMQ set enqueue xoff */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ cfg |= BIT_ULL(50);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+
+ /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
+ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
+ if (!(cfg & BIT_ULL(12)))
+ continue;
+ bmap |= (1 << i);
+ cfg &= ~BIT_ULL(12);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
+ }
+
+ /* Do SMQ flush and set enqueue xoff */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+
/* Wait for flush to complete */
err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
@@ -2406,6 +2430,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
"NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
nixlf, smq);
+ /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
+ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
+ if (!(bmap & (1 << i)))
+ continue;
+ cfg = rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
+ cfg |= BIT_ULL(12);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
+ }
+
/* clear XOFF on TL2s */
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index d56be5fb7eb4..2b299fa85159 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -545,6 +545,7 @@
#define CPT_AF_CTX_PSH_PC (0x49450ull)
#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3)
+#define CPT_AF_RXC_CFG1 (0x50000ull)
#define CPT_AF_RXC_TIME (0x50010ull)
#define CPT_AF_RXC_TIME_CFG (0x50018ull)
#define CPT_AF_RXC_DFRG (0x50020ull)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 5ef406c7e8a4..fc8da2090657 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -71,13 +71,11 @@ enum cpt_af_int_vec_e {
CPT_AF_INT_VEC_CNT = 0x4,
};
-enum cpt_10k_af_int_vec_e {
+enum cpt_cn10k_flt_int_vec_e {
CPT_10K_AF_INT_VEC_FLT0 = 0x0,
CPT_10K_AF_INT_VEC_FLT1 = 0x1,
CPT_10K_AF_INT_VEC_FLT2 = 0x2,
- CPT_10K_AF_INT_VEC_RVU = 0x3,
- CPT_10K_AF_INT_VEC_RAS = 0x4,
- CPT_10K_AF_INT_VEC_CNT = 0x5,
+ CPT_10K_AF_INT_VEC_FLT_MAX = 0x3,
};
/* NPA Admin function Interrupt Vector Enumeration */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 0db62eb0dab3..32468c663605 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -962,8 +962,6 @@ static int otx2_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 3eb85949677a..933e18ba2fb2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -687,7 +687,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
__be16 l3_proto = vlan_get_protocol(skb);
struct udphdr *udph = udp_hdr(skb);
- u16 iplen;
+ __be16 iplen;
ext->lso_sb = skb_transport_offset(skb) +
sizeof(struct udphdr);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 63ae01954dfc..22ca6ee9665e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -633,7 +633,8 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
if (err)
goto err_dl_port_register;
- dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
+ dev->features |= NETIF_F_HW_TC;
+ dev->netns_local = true;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
SET_NETDEV_DEV(dev, sw->dev->dev);
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
index 1c5b85a86df1..930f180688e5 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -18,6 +18,7 @@
#include <uapi/linux/ppp_defs.h>
#define AIROHA_MAX_NUM_GDM_PORTS 1
+#define AIROHA_MAX_NUM_QDMA 2
#define AIROHA_MAX_NUM_RSTS 3
#define AIROHA_MAX_NUM_XSI_RSTS 5
#define AIROHA_MAX_MTU 2000
@@ -66,9 +67,11 @@
#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
#define FE_RST_CORE_MASK BIT(0)
+#define REG_FE_WAN_MAC_H 0x0030
#define REG_FE_LAN_MAC_H 0x0040
-#define REG_FE_LAN_MAC_LMIN 0x0044
-#define REG_FE_LAN_MAC_LMAX 0x0048
+
+#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
+#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
#define REG_FE_CDM1_OQ_MAP0 0x0050
#define REG_FE_CDM1_OQ_MAP1 0x0054
@@ -727,7 +730,7 @@ struct airoha_queue_entry {
};
struct airoha_queue {
- struct airoha_eth *eth;
+ struct airoha_qdma *qdma;
/* protect concurrent queue accesses */
spinlock_t lock;
@@ -746,7 +749,7 @@ struct airoha_queue {
};
struct airoha_tx_irq_queue {
- struct airoha_eth *eth;
+ struct airoha_qdma *qdma;
struct napi_struct napi;
u32 *q;
@@ -782,9 +785,30 @@ struct airoha_hw_stats {
u64 rx_len[7];
};
+struct airoha_qdma {
+ struct airoha_eth *eth;
+ void __iomem *regs;
+
+ /* protect concurrent irqmask accesses */
+ spinlock_t irq_lock;
+ u32 irqmask[QDMA_INT_REG_MAX];
+ int irq;
+
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+
+ /* descriptor and packet buffers for qdma hw forward */
+ struct {
+ void *desc;
+ void *q;
+ } hfwd;
+};
+
struct airoha_gdm_port {
+ struct airoha_qdma *qdma;
struct net_device *dev;
- struct airoha_eth *eth;
int id;
struct airoha_hw_stats stats;
@@ -794,31 +818,15 @@ struct airoha_eth {
struct device *dev;
unsigned long state;
-
- void __iomem *qdma_regs;
void __iomem *fe_regs;
- /* protect concurrent irqmask accesses */
- spinlock_t irq_lock;
- u32 irqmask[QDMA_INT_REG_MAX];
- int irq;
-
struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
- struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
-
struct net_device *napi_dev;
- struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
- struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-
- struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
- /* descriptor and packet buffers for qdma hw forward */
- struct {
- void *desc;
- void *q;
- } hfwd;
+ struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
};
static u32 airoha_rr(void __iomem *base, u32 offset)
@@ -850,60 +858,72 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
#define airoha_fe_clear(eth, offset, val) \
airoha_rmw((eth)->fe_regs, (offset), (val), 0)
-#define airoha_qdma_rr(eth, offset) \
- airoha_rr((eth)->qdma_regs, (offset))
-#define airoha_qdma_wr(eth, offset, val) \
- airoha_wr((eth)->qdma_regs, (offset), (val))
-#define airoha_qdma_rmw(eth, offset, mask, val) \
- airoha_rmw((eth)->qdma_regs, (offset), (mask), (val))
-#define airoha_qdma_set(eth, offset, val) \
- airoha_rmw((eth)->qdma_regs, (offset), 0, (val))
-#define airoha_qdma_clear(eth, offset, val) \
- airoha_rmw((eth)->qdma_regs, (offset), (val), 0)
-
-static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index,
+#define airoha_qdma_rr(qdma, offset) \
+ airoha_rr((qdma)->regs, (offset))
+#define airoha_qdma_wr(qdma, offset, val) \
+ airoha_wr((qdma)->regs, (offset), (val))
+#define airoha_qdma_rmw(qdma, offset, mask, val) \
+ airoha_rmw((qdma)->regs, (offset), (mask), (val))
+#define airoha_qdma_set(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), 0, (val))
+#define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
+static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
u32 clear, u32 set)
{
unsigned long flags;
- if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask)))
+ if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
return;
- spin_lock_irqsave(&eth->irq_lock, flags);
+ spin_lock_irqsave(&qdma->irq_lock, flags);
- eth->irqmask[index] &= ~clear;
- eth->irqmask[index] |= set;
- airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]);
+ qdma->irqmask[index] &= ~clear;
+ qdma->irqmask[index] |= set;
+ airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
/* Read irq_enable register in order to guarantee the update above
* completes in the spinlock critical section.
*/
- airoha_qdma_rr(eth, REG_INT_ENABLE(index));
+ airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
- spin_unlock_irqrestore(&eth->irq_lock, flags);
+ spin_unlock_irqrestore(&qdma->irq_lock, flags);
}
-static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index,
+static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
u32 mask)
{
- airoha_qdma_set_irqmask(eth, index, 0, mask);
+ airoha_qdma_set_irqmask(qdma, index, 0, mask);
}
-static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index,
+static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
u32 mask)
{
- airoha_qdma_set_irqmask(eth, index, mask, 0);
+ airoha_qdma_set_irqmask(qdma, index, mask, 0);
}
-static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr)
+static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
{
- u32 val;
+ /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
+ * GDM{2,3,4} can be used as wan port connected to an external
+ * phy module.
+ */
+ return port->id == 1;
+}
+static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 val, reg;
+
+ reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
+ : REG_FE_WAN_MAC_H;
val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
- airoha_fe_wr(eth, REG_FE_LAN_MAC_H, val);
+ airoha_fe_wr(eth, reg, val);
val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
- airoha_fe_wr(eth, REG_FE_LAN_MAC_LMIN, val);
- airoha_fe_wr(eth, REG_FE_LAN_MAC_LMAX, val);
+ airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
+ airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
}
static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
@@ -1383,8 +1403,9 @@ static int airoha_fe_init(struct airoha_eth *eth)
static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
{
enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
- struct airoha_eth *eth = q->eth;
- int qid = q - &eth->q_rx[0];
+ struct airoha_qdma *qdma = q->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0];
int nframes = 0;
while (q->queued < q->ndesc - 1) {
@@ -1420,7 +1441,8 @@ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
WRITE_ONCE(desc->msg2, 0);
WRITE_ONCE(desc->msg3, 0);
- airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
+ airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
+ RX_RING_CPU_IDX_MASK,
FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
}
@@ -1450,8 +1472,9 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
{
enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
- struct airoha_eth *eth = q->eth;
- int qid = q - &eth->q_rx[0];
+ struct airoha_qdma *qdma = q->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0];
int done = 0;
while (done < budget) {
@@ -1513,7 +1536,6 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
{
struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
- struct airoha_eth *eth = q->eth;
int cur, done = 0;
do {
@@ -1522,14 +1544,14 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
} while (cur && done < budget);
if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1,
+ airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
RX_DONE_INT_MASK);
return done;
}
-static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
- struct airoha_queue *q, int ndesc)
+static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int ndesc)
{
const struct page_pool_params pp_params = {
.order = 0,
@@ -1538,15 +1560,16 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
.dma_dir = DMA_FROM_DEVICE,
.max_len = PAGE_SIZE,
.nid = NUMA_NO_NODE,
- .dev = eth->dev,
+ .dev = qdma->eth->dev,
.napi = &q->napi,
};
- int qid = q - &eth->q_rx[0], thr;
+ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0], thr;
dma_addr_t dma_addr;
q->buf_size = PAGE_SIZE / 2;
q->ndesc = ndesc;
- q->eth = eth;
+ q->qdma = qdma;
q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
GFP_KERNEL);
@@ -1568,14 +1591,15 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
- airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr);
- airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK,
+ airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
+ RX_RING_SIZE_MASK,
FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
thr = clamp(ndesc >> 3, 1, 32);
- airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
+ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
FIELD_PREP(RX_RING_THR_MASK, thr));
- airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
+ airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
airoha_qdma_fill_rx_queue(q);
@@ -1585,7 +1609,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
{
- struct airoha_eth *eth = q->eth;
+ struct airoha_eth *eth = q->qdma->eth;
while (q->queued) {
struct airoha_queue_entry *e = &q->entry[q->tail];
@@ -1599,11 +1623,11 @@ static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
}
}
-static int airoha_qdma_init_rx(struct airoha_eth *eth)
+static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
{
int i;
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
int err;
if (!(RX_DONE_INT_MASK & BIT(i))) {
@@ -1611,7 +1635,7 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth)
continue;
}
- err = airoha_qdma_init_rx_queue(eth, &eth->q_rx[i],
+ err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
RX_DSCP_NUM(i));
if (err)
return err;
@@ -1623,12 +1647,14 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth)
static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
{
struct airoha_tx_irq_queue *irq_q;
+ struct airoha_qdma *qdma;
struct airoha_eth *eth;
int id, done = 0;
irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
- eth = irq_q->eth;
- id = irq_q - &eth->q_tx_irq[0];
+ qdma = irq_q->qdma;
+ id = irq_q - &qdma->q_tx_irq[0];
+ eth = qdma->eth;
while (irq_q->queued > 0 && done < budget) {
u32 qid, last, val = irq_q->q[irq_q->head];
@@ -1645,10 +1671,10 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
- if (qid >= ARRAY_SIZE(eth->q_tx))
+ if (qid >= ARRAY_SIZE(qdma->q_tx))
continue;
- q = &eth->q_tx[qid];
+ q = &qdma->q_tx[qid];
if (!q->ndesc)
continue;
@@ -1697,28 +1723,29 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
int i, len = done >> 7;
for (i = 0; i < len; i++)
- airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
+ airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
IRQ_CLEAR_LEN_MASK, 0x80);
- airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
+ airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
IRQ_CLEAR_LEN_MASK, (done & 0x7f));
}
if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(id));
return done;
}
-static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
- struct airoha_queue *q, int size)
+static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int size)
{
- int i, qid = q - &eth->q_tx[0];
+ struct airoha_eth *eth = qdma->eth;
+ int i, qid = q - &qdma->q_tx[0];
dma_addr_t dma_addr;
spin_lock_init(&q->lock);
q->ndesc = size;
- q->eth = eth;
+ q->qdma = qdma;
q->free_thr = 1 + MAX_SKB_FRAGS;
q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
@@ -1738,20 +1765,20 @@ static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
}
- airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr);
- airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
- airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
+ airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
return 0;
}
-static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
- struct airoha_tx_irq_queue *irq_q,
- int size)
+static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
+ struct airoha_qdma *qdma, int size)
{
- int id = irq_q - &eth->q_tx_irq[0];
+ int id = irq_q - &qdma->q_tx_irq[0];
+ struct airoha_eth *eth = qdma->eth;
dma_addr_t dma_addr;
netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
@@ -1763,30 +1790,30 @@ static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
memset(irq_q->q, 0xff, size * sizeof(u32));
irq_q->size = size;
- irq_q->eth = eth;
+ irq_q->qdma = qdma;
- airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr);
- airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
+ airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
+ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
- airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
+ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
FIELD_PREP(TX_IRQ_THR_MASK, 1));
return 0;
}
-static int airoha_qdma_init_tx(struct airoha_eth *eth)
+static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
{
int i, err;
- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
- err = airoha_qdma_tx_irq_init(eth, &eth->q_tx_irq[i],
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
IRQ_QUEUE_LEN(i));
if (err)
return err;
}
- for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
- err = airoha_qdma_init_tx_queue(eth, &eth->q_tx[i],
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
TX_DSCP_NUM);
if (err)
return err;
@@ -1797,7 +1824,7 @@ static int airoha_qdma_init_tx(struct airoha_eth *eth)
static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
{
- struct airoha_eth *eth = q->eth;
+ struct airoha_eth *eth = q->qdma->eth;
spin_lock_bh(&q->lock);
while (q->queued) {
@@ -1814,34 +1841,35 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
spin_unlock_bh(&q->lock);
}
-static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth)
+static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
{
+ struct airoha_eth *eth = qdma->eth;
dma_addr_t dma_addr;
u32 status;
int size;
size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
- eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!eth->hfwd.desc)
+ qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL);
+ if (!qdma->hfwd.desc)
return -ENOMEM;
- airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr);
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
- eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!eth->hfwd.q)
+ qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL);
+ if (!qdma->hfwd.q)
return -ENOMEM;
- airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr);
+ airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
- airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG,
+ airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
- airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
+ airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
- airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG,
+ airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
HW_FWD_DESC_NUM_MASK,
FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
@@ -1849,87 +1877,87 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth)
return read_poll_timeout(airoha_qdma_rr, status,
!(status & LMGR_INIT_START), USEC_PER_MSEC,
- 30 * USEC_PER_MSEC, true, eth,
+ 30 * USEC_PER_MSEC, true, qdma,
REG_LMGR_INIT_CFG);
}
-static void airoha_qdma_init_qos(struct airoha_eth *eth)
+static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
{
- airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
- airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
+ airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
+ airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
- airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG,
+ airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
PSE_BUF_ESTIMATE_EN_MASK);
- airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG,
+ airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
EGRESS_RATE_METER_EN_MASK |
EGRESS_RATE_METER_EQ_RATE_EN_MASK);
/* 2047us x 31 = 63.457ms */
- airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
+ airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
EGRESS_RATE_METER_WINDOW_SZ_MASK,
FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
- airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
+ airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
EGRESS_RATE_METER_TIMESLICE_MASK,
FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
/* ratelimit init */
- airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
+ airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
/* fast-tick 25us */
- airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
+ airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
FIELD_PREP(GLB_FAST_TICK_MASK, 25));
- airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
+ airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
- airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
- airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
+ airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
+ airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
- airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG,
+ airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
EGRESS_SLOW_TICK_RATIO_MASK,
FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
- airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
- airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG,
+ airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
+ airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
INGRESS_TRTCM_MODE_MASK);
- airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
+ airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
- airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG,
+ airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
INGRESS_SLOW_TICK_RATIO_MASK,
FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
- airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
- airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
+ airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
+ airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
FIELD_PREP(SLA_FAST_TICK_MASK, 25));
- airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
+ airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
}
-static int airoha_qdma_hw_init(struct airoha_eth *eth)
+static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
{
int i;
/* clear pending irqs */
- for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++)
- airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff);
+ for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
/* setup irqs */
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
+ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
+ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
+ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
/* setup irq binding */
- for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
- if (!eth->q_tx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
continue;
if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
- airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i),
+ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
TX_RING_IRQ_BLOCKING_CFG_MASK);
else
- airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i),
+ airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
TX_RING_IRQ_BLOCKING_CFG_MASK);
}
- airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG,
+ airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
GLOBAL_CFG_RX_2B_OFFSET_MASK |
FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
GLOBAL_CFG_CPU_TXR_RR_MASK |
@@ -1940,18 +1968,18 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth)
GLOBAL_CFG_TX_WB_DONE_MASK |
FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
- airoha_qdma_init_qos(eth);
+ airoha_qdma_init_qos(qdma);
/* disable qdma rx delay interrupt */
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
- if (!eth->q_rx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
continue;
- airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i),
+ airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
RX_DELAY_INT_MASK);
}
- airoha_qdma_set(eth, REG_TXQ_CNGST_CFG,
+ airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
return 0;
@@ -1959,150 +1987,180 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth)
static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
{
- struct airoha_eth *eth = dev_instance;
- u32 intr[ARRAY_SIZE(eth->irqmask)];
+ struct airoha_qdma *qdma = dev_instance;
+ u32 intr[ARRAY_SIZE(qdma->irqmask)];
int i;
- for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) {
- intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i));
- intr[i] &= eth->irqmask[i];
- airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]);
+ for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
+ intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
+ intr[i] &= qdma->irqmask[i];
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
}
- if (!test_bit(DEV_STATE_INITIALIZED, &eth->state))
+ if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
return IRQ_NONE;
if (intr[1] & RX_DONE_INT_MASK) {
- airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1,
+ airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
RX_DONE_INT_MASK);
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
- if (!eth->q_rx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
continue;
if (intr[1] & BIT(i))
- napi_schedule(&eth->q_rx[i].napi);
+ napi_schedule(&qdma->q_rx[i].napi);
}
}
if (intr[0] & INT_TX_MASK) {
- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
- struct airoha_tx_irq_queue *irq_q = &eth->q_tx_irq[i];
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i];
u32 status, head;
if (!(intr[0] & TX_DONE_INT_MASK(i)))
continue;
- airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(i));
- status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i));
+ status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i));
head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
irq_q->head = head % irq_q->size;
irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
- napi_schedule(&eth->q_tx_irq[i].napi);
+ napi_schedule(&qdma->q_tx_irq[i].napi);
}
}
return IRQ_HANDLED;
}
-static int airoha_qdma_init(struct airoha_eth *eth)
+static int airoha_qdma_init(struct platform_device *pdev,
+ struct airoha_eth *eth,
+ struct airoha_qdma *qdma)
{
- int err;
+ int err, id = qdma - &eth->qdma[0];
+ const char *res;
- err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, eth);
- if (err)
- return err;
+ spin_lock_init(&qdma->irq_lock);
+ qdma->eth = eth;
- err = airoha_qdma_init_rx(eth);
+ res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
+ if (!res)
+ return -ENOMEM;
+
+ qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
+ if (IS_ERR(qdma->regs))
+ return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
+ "failed to iomap qdma%d regs\n", id);
+
+ qdma->irq = platform_get_irq(pdev, 4 * id);
+ if (qdma->irq < 0)
+ return qdma->irq;
+
+ err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, qdma);
if (err)
return err;
- err = airoha_qdma_init_tx(eth);
+ err = airoha_qdma_init_rx(qdma);
if (err)
return err;
- err = airoha_qdma_init_hfwd_queues(eth);
+ err = airoha_qdma_init_tx(qdma);
if (err)
return err;
- err = airoha_qdma_hw_init(eth);
+ err = airoha_qdma_init_hfwd_queues(qdma);
if (err)
return err;
- set_bit(DEV_STATE_INITIALIZED, &eth->state);
-
- return 0;
+ return airoha_qdma_hw_init(qdma);
}
-static int airoha_hw_init(struct airoha_eth *eth)
+static int airoha_hw_init(struct platform_device *pdev,
+ struct airoha_eth *eth)
{
- int err;
+ int err, i;
/* disable xsi */
- reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts);
+ err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
+ eth->xsi_rsts);
+ if (err)
+ return err;
+
+ err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ if (err)
+ return err;
- reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
- msleep(20);
- reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
msleep(20);
+ err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ if (err)
+ return err;
+ msleep(20);
err = airoha_fe_init(eth);
if (err)
return err;
- return airoha_qdma_init(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
+ err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
+ if (err)
+ return err;
+ }
+
+ set_bit(DEV_STATE_INITIALIZED, &eth->state);
+
+ return 0;
}
-static void airoha_hw_cleanup(struct airoha_eth *eth)
+static void airoha_hw_cleanup(struct airoha_qdma *qdma)
{
int i;
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
- if (!eth->q_rx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
continue;
- napi_disable(&eth->q_rx[i].napi);
- netif_napi_del(&eth->q_rx[i].napi);
- airoha_qdma_cleanup_rx_queue(&eth->q_rx[i]);
- if (eth->q_rx[i].page_pool)
- page_pool_destroy(eth->q_rx[i].page_pool);
+ napi_disable(&qdma->q_rx[i].napi);
+ netif_napi_del(&qdma->q_rx[i].napi);
+ airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
+ if (qdma->q_rx[i].page_pool)
+ page_pool_destroy(qdma->q_rx[i].page_pool);
}
- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
- napi_disable(&eth->q_tx_irq[i].napi);
- netif_napi_del(&eth->q_tx_irq[i].napi);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ napi_disable(&qdma->q_tx_irq[i].napi);
+ netif_napi_del(&qdma->q_tx_irq[i].napi);
}
- for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
- if (!eth->q_tx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
continue;
- airoha_qdma_cleanup_tx_queue(&eth->q_tx[i]);
+ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
}
}
-static void airoha_qdma_start_napi(struct airoha_eth *eth)
+static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
{
int i;
- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++)
- napi_enable(&eth->q_tx_irq[i].napi);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ napi_enable(&qdma->q_tx_irq[i].napi);
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
- if (!eth->q_rx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
continue;
- napi_enable(&eth->q_rx[i].napi);
+ napi_enable(&qdma->q_rx[i].napi);
}
}
static void airoha_update_hw_stats(struct airoha_gdm_port *port)
{
- struct airoha_eth *eth = port->eth;
+ struct airoha_eth *eth = port->qdma->eth;
u32 val, i = 0;
spin_lock(&port->stats.lock);
@@ -2247,23 +2305,24 @@ static void airoha_update_hw_stats(struct airoha_gdm_port *port)
static int airoha_dev_open(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
- struct airoha_eth *eth = port->eth;
+ struct airoha_qdma *qdma = port->qdma;
int err;
netif_tx_start_all_queues(dev);
- err = airoha_set_gdm_ports(eth, true);
+ err = airoha_set_gdm_ports(qdma->eth, true);
if (err)
return err;
if (netdev_uses_dsa(dev))
- airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id),
+ airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
else
- airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id),
+ airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
- airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
- airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
+ airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
return 0;
}
@@ -2271,16 +2330,17 @@ static int airoha_dev_open(struct net_device *dev)
static int airoha_dev_stop(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
- struct airoha_eth *eth = port->eth;
+ struct airoha_qdma *qdma = port->qdma;
int err;
netif_tx_disable(dev);
- err = airoha_set_gdm_ports(eth, false);
+ err = airoha_set_gdm_ports(qdma->eth, false);
if (err)
return err;
- airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
- airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
+ airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
return 0;
}
@@ -2294,7 +2354,7 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
if (err)
return err;
- airoha_set_macaddr(port->eth, dev->dev_addr);
+ airoha_set_macaddr(port, dev->dev_addr);
return 0;
}
@@ -2303,7 +2363,7 @@ static int airoha_dev_init(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
- airoha_set_macaddr(port->eth, dev->dev_addr);
+ airoha_set_macaddr(port, dev->dev_addr);
return 0;
}
@@ -2337,7 +2397,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct airoha_gdm_port *port = netdev_priv(dev);
u32 msg0 = 0, msg1, len = skb_headlen(skb);
int i, qid = skb_get_queue_mapping(skb);
- struct airoha_eth *eth = port->eth;
+ struct airoha_qdma *qdma = port->qdma;
u32 nr_frags = 1 + sinfo->nr_frags;
struct netdev_queue *txq;
struct airoha_queue *q;
@@ -2367,7 +2427,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
- q = &eth->q_tx[qid];
+ q = &qdma->q_tx[qid];
if (WARN_ON_ONCE(!q->ndesc))
goto error;
@@ -2411,7 +2471,8 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
e->dma_addr = addr;
e->dma_len = len;
- airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
+ TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
data = skb_frag_address(frag);
@@ -2448,7 +2509,7 @@ static void airoha_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct airoha_gdm_port *port = netdev_priv(dev);
- struct airoha_eth *eth = port->eth;
+ struct airoha_eth *eth = port->qdma->eth;
strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
@@ -2529,6 +2590,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
{
const __be32 *id_ptr = of_get_property(np, "reg", NULL);
struct airoha_gdm_port *port;
+ struct airoha_qdma *qdma;
struct net_device *dev;
int err, index;
u32 id;
@@ -2558,6 +2620,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
return -ENOMEM;
}
+ qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA];
dev->netdev_ops = &airoha_netdev_ops;
dev->ethtool_ops = &airoha_ethtool_ops;
dev->max_mtu = AIROHA_MAX_MTU;
@@ -2567,6 +2630,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
NETIF_F_SG | NETIF_F_TSO;
dev->features |= dev->hw_features;
dev->dev.of_node = np;
+ dev->irq = qdma->irq;
SET_NETDEV_DEV(dev, eth->dev);
err = of_get_ethdev_address(np, dev);
@@ -2582,8 +2646,8 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
port = netdev_priv(dev);
u64_stats_init(&port->stats.syncp);
spin_lock_init(&port->stats.lock);
+ port->qdma = qdma;
port->dev = dev;
- port->eth = eth;
port->id = id;
eth->ports[index] = port;
@@ -2613,11 +2677,6 @@ static int airoha_probe(struct platform_device *pdev)
return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
"failed to iomap fe regs\n");
- eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0");
- if (IS_ERR(eth->qdma_regs))
- return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs),
- "failed to iomap qdma regs\n");
-
eth->rsts[0].id = "fe";
eth->rsts[1].id = "pdma";
eth->rsts[2].id = "qdma";
@@ -2642,11 +2701,6 @@ static int airoha_probe(struct platform_device *pdev)
return err;
}
- spin_lock_init(&eth->irq_lock);
- eth->irq = platform_get_irq(pdev, 0);
- if (eth->irq < 0)
- return eth->irq;
-
eth->napi_dev = alloc_netdev_dummy(0);
if (!eth->napi_dev)
return -ENOMEM;
@@ -2656,11 +2710,13 @@ static int airoha_probe(struct platform_device *pdev)
strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
platform_set_drvdata(pdev, eth);
- err = airoha_hw_init(eth);
+ err = airoha_hw_init(pdev, eth);
if (err)
goto error;
- airoha_qdma_start_napi(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_qdma_start_napi(&eth->qdma[i]);
+
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_device_is_compatible(np, "airoha,eth-mac"))
continue;
@@ -2678,7 +2734,9 @@ static int airoha_probe(struct platform_device *pdev)
return 0;
error:
- airoha_hw_cleanup(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_hw_cleanup(&eth->qdma[i]);
+
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
@@ -2696,7 +2754,9 @@ static void airoha_remove(struct platform_device *pdev)
struct airoha_eth *eth = platform_get_drvdata(pdev);
int i;
- airoha_hw_cleanup(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_hw_cleanup(&eth->qdma[i]);
+
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
@@ -2715,6 +2775,7 @@ static const struct of_device_id of_airoha_match[] = {
{ .compatible = "airoha,en7581-eth" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_airoha_match);
static struct platform_driver airoha_driver = {
.probe = airoha_probe,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index eb1708b43aa3..0d5225f1d3ee 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -724,12 +724,8 @@ enum mtk_clks_map {
MTK_CLK_ETHWARP_WOCPU2,
MTK_CLK_ETHWARP_WOCPU1,
MTK_CLK_ETHWARP_WOCPU0,
- MTK_CLK_TOP_USXGMII_SBUS_0_SEL,
- MTK_CLK_TOP_USXGMII_SBUS_1_SEL,
MTK_CLK_TOP_SGM_0_SEL,
MTK_CLK_TOP_SGM_1_SEL,
- MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL,
- MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL,
MTK_CLK_TOP_ETH_GMII_SEL,
MTK_CLK_TOP_ETH_REFCK_50M_SEL,
MTK_CLK_TOP_ETH_SYS_200M_SEL,
@@ -800,19 +796,9 @@ enum mtk_clks_map {
BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \
BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \
BIT_ULL(MTK_CLK_CRYPTO) | \
- BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
- BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
- BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
- BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \
- BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_0_SEL) | \
- BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_1_SEL) | \
- BIT_ULL(MTK_CLK_TOP_SGM_0_SEL) | \
- BIT_ULL(MTK_CLK_TOP_SGM_1_SEL) | \
- BIT_ULL(MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL) | \
- BIT_ULL(MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 0acee405a749..ada852adc5f7 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -8,8 +8,11 @@
#include <linux/platform_device.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+
#include <net/dst_metadata.h>
#include <net/dsa.h>
+#include <net/ipv6.h>
+
#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
#include "mtk_ppe_regs.h"
@@ -338,7 +341,6 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
{
int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
u32 *src, *dest;
- int i;
switch (type) {
case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
@@ -359,10 +361,8 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
return -EINVAL;
}
- for (i = 0; i < 4; i++)
- src[i] = be32_to_cpu(src_addr[i]);
- for (i = 0; i < 4; i++)
- dest[i] = be32_to_cpu(dest_addr[i]);
+ ipv6_addr_be32_to_cpu(src, src_addr);
+ ipv6_addr_be32_to_cpu(dest, dest_addr);
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index 1a97feca77f2..570ebf91f693 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -3,6 +3,9 @@
#include <linux/kernel.h>
#include <linux/debugfs.h>
+
+#include <net/ipv6.h>
+
#include "mtk_eth_soc.h"
struct mtk_flow_addr_info
@@ -47,16 +50,14 @@ static const char *mtk_foe_pkt_type_str(int type)
static void
mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
{
- __be32 n_addr[4];
- int i;
+ __be32 n_addr[IPV6_ADDR_WORDS];
if (!ipv6) {
seq_printf(m, "%pI4h", addr);
return;
}
- for (i = 0; i < ARRAY_SIZE(n_addr); i++)
- n_addr[i] = htonl(addr[i]);
+ ipv6_addr_cpu_to_be32(n_addr, addr);
seq_printf(m, "%pI6", n_addr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 943d6918c2ec..cd17a3f4faf8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2036,20 +2036,20 @@ static int mlx4_en_get_module_info(struct net_device *dev,
switch (data[0] /* identifier */) {
case MLX4_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
break;
case MLX4_MODULE_ID_QSFP_PLUS:
if (data[1] >= 0x3) { /* revision id */
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
}
break;
case MLX4_MODULE_ID_QSFP28:
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 685335832a93..ea6070180c96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -172,6 +172,16 @@ config MLX5_SW_STEERING
help
Build support for software-managed steering in the NIC.
+config MLX5_HW_STEERING
+ bool "Mellanox Technologies hardware-managed steering"
+ depends on MLX5_CORE_EN && MLX5_ESWITCH
+ default y
+ help
+ Build support for Hardware-Managed Flow Steering (HMFS) in the NIC.
+ HMFS is a new approach to managing steering rules where STEs are
+ written to ICM by HW (as opposed to SW in software-managed steering),
+ which allows higher rate of rule insertion.
+
config MLX5_SF
bool "Mellanox Technologies subfunction device support using auxiliary device"
depends on MLX5_CORE && MLX5_CORE_EN
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 1289475e7be7..5912f7e614f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -119,6 +119,27 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_action.o steering/fs_dr.o \
steering/dr_definer.o steering/dr_ptrn.o \
steering/dr_arg.o steering/dr_dbg.o lib/smfs.o
+
+#
+# HW Steering
+#
+mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/mlx5hws_cmd.o \
+ steering/hws/mlx5hws_context.o \
+ steering/hws/mlx5hws_pat_arg.o \
+ steering/hws/mlx5hws_buddy.o \
+ steering/hws/mlx5hws_pool.o \
+ steering/hws/mlx5hws_table.o \
+ steering/hws/mlx5hws_action.o \
+ steering/hws/mlx5hws_rule.o \
+ steering/hws/mlx5hws_matcher.o \
+ steering/hws/mlx5hws_send.o \
+ steering/hws/mlx5hws_definer.o \
+ steering/hws/mlx5hws_bwc.o \
+ steering/hws/mlx5hws_debug.o \
+ steering/hws/mlx5hws_vport.o \
+ steering/hws/mlx5hws_bwc_complex.o
+
+
#
# SF device
#
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 20768ef2e9d2..a64d96effb9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -754,6 +754,8 @@ static const char *cmd_status_str(u8 status)
return "bad resource";
case MLX5_CMD_STAT_RES_BUSY:
return "resource busy";
+ case MLX5_CMD_STAT_NOT_READY:
+ return "FW not ready";
case MLX5_CMD_STAT_LIM_ERR:
return "limits exceeded";
case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
@@ -787,6 +789,7 @@ static int cmd_status_to_err(u8 status)
case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
+ case MLX5_CMD_STAT_NOT_READY: return -EAGAIN;
case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
@@ -815,14 +818,16 @@ EXPORT_SYMBOL(mlx5_cmd_out_err);
static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
{
u16 opcode, op_mod;
+ u8 status;
u16 uid;
opcode = in_to_opcode(in);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
+ status = MLX5_GET(mbox_out, out, status);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
- opcode != MLX5_CMD_OP_CREATE_UCTX)
+ opcode != MLX5_CMD_OP_CREATE_UCTX && status != MLX5_CMD_STAT_NOT_READY)
mlx5_cmd_out_err(dev, opcode, op_mod, out);
}
@@ -1882,10 +1887,12 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
if (throttle_op) {
- /* atomic context may not sleep */
- if (callback)
- return -EINVAL;
- down(&dev->cmd.vars.throttle_sem);
+ if (callback) {
+ if (down_trylock(&dev->cmd.vars.throttle_sem))
+ return -EBUSY;
+ } else {
+ down(&dev->cmd.vars.throttle_sem);
+ }
}
pages_queue = is_manage_pages(in);
@@ -2091,10 +2098,19 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
{
struct mlx5_async_work *work = _work;
struct mlx5_async_ctx *ctx;
+ struct mlx5_core_dev *dev;
+ u16 opcode;
ctx = work->ctx;
- status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out);
+ dev = ctx->dev;
+ opcode = work->opcode;
+ status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
+ /* Can't access "work" from this point on. It could have been freed in
+ * the callback.
+ */
+ if (mlx5_cmd_is_throttle_opcode(opcode))
+ up(&dev->cmd.vars.throttle_sem);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index ddf1b87f1bc0..9aed29fa4900 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -203,10 +203,10 @@ TRACE_EVENT(mlx5_fs_set_fte,
fs_get_obj(__entry->fg, fte->node.parent);
__entry->group_index = __entry->fg->id;
__entry->index = fte->index;
- __entry->action = fte->action.action;
+ __entry->action = fte->act_dests.action.action;
__entry->mask_enable = __entry->fg->mask.match_criteria_enable;
- __entry->flow_tag = fte->flow_context.flow_tag;
- __entry->flow_source = fte->flow_context.flow_source;
+ __entry->flow_tag = fte->act_dests.flow_context.flow_tag;
+ __entry->flow_source = fte->act_dests.flow_context.flow_source;
memcpy(__entry->mask_outer,
MLX5_ADDR_OF(fte_match_param,
&__entry->fg->mask.match_criteria,
@@ -284,7 +284,7 @@ TRACE_EVENT(mlx5_fs_add_rule,
TP_fast_assign(
__entry->rule = rule;
fs_get_obj(__entry->fte, rule->node.parent);
- __entry->index = __entry->fte->dests_size - 1;
+ __entry->index = __entry->fte->act_dests.dests_size - 1;
__entry->sw_action = rule->sw_action;
memcpy(__entry->destination,
&rule->dest_attr,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index d9e241423bc5..da0a1c65ec4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -1173,14 +1173,16 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param);
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param);
+ struct ethtool_ringparam *param,
+ struct netlink_ext_ack *extack);
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch);
int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch);
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
- struct kernel_ethtool_coalesce *kernel_coal);
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
index bb6b1a979ba1..62b3f7ff5562 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
@@ -25,6 +25,8 @@ struct mlx5_ct_fs_ops {
struct mlx5_flow_attr *attr,
struct flow_rule *flow_rule);
void (*ct_rule_del)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule);
+ int (*ct_rule_update)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr);
size_t priv_size;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
index ae4f55be48ce..64a82aafaaca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
@@ -65,9 +65,30 @@ mlx5_ct_fs_dmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru
kfree(dmfs_rule);
}
+static int mlx5_ct_fs_dmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_dmfs_rule *dmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_dmfs_rule,
+ fs_rule);
+ struct mlx5e_priv *priv = netdev_priv(fs->netdev);
+ struct mlx5_flow_handle *rule;
+
+ rule = mlx5_tc_rule_insert(priv, spec, attr);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
+ mlx5_tc_rule_delete(priv, dmfs_rule->rule, dmfs_rule->attr);
+
+ dmfs_rule->rule = rule;
+ dmfs_rule->attr = attr;
+
+ return 0;
+}
+
static struct mlx5_ct_fs_ops dmfs_ops = {
.ct_rule_add = mlx5_ct_fs_dmfs_ct_rule_add,
.ct_rule_del = mlx5_ct_fs_dmfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_dmfs_ct_rule_update,
.init = mlx5_ct_fs_dmfs_init,
.destroy = mlx5_ct_fs_dmfs_destroy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 8c531f4ec912..1c062a2e8996 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -368,9 +368,35 @@ mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru
kfree(smfs_rule);
}
+static int mlx5_ct_fs_smfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_smfs_rule,
+ fs_rule);
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+ struct mlx5dr_action *actions[3]; /* We only need to create 3 actions, see below. */
+ struct mlx5dr_rule *rule;
+
+ actions[0] = smfs_rule->count_action;
+ actions[1] = attr->modify_hdr->action.dr_action;
+ actions[2] = fs_smfs->fwd_action;
+
+ rule = mlx5_smfs_rule_create(smfs_rule->smfs_matcher->dr_matcher, spec,
+ ARRAY_SIZE(actions), actions, spec->flow_context.flow_source);
+ if (!rule)
+ return -EINVAL;
+
+ mlx5_smfs_rule_destroy(smfs_rule->rule);
+ smfs_rule->rule = rule;
+
+ return 0;
+}
+
static struct mlx5_ct_fs_ops fs_smfs_ops = {
.ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add,
.ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_smfs_ct_rule_update,
.init = mlx5_ct_fs_smfs_init,
.destroy = mlx5_ct_fs_smfs_destroy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 71a168746ebe..dcfccaaa8d91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -876,15 +876,14 @@ err_attr:
}
static int
-mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
- struct flow_rule *flow_rule,
- struct mlx5_ct_entry *entry,
- bool nat, u8 zone_restore_id)
+mlx5_tc_ct_entry_update_rule(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ bool nat, u8 zone_restore_id)
{
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr;
struct mlx5e_mod_hdr_handle *mh;
- struct mlx5_ct_fs_rule *rule;
struct mlx5_flow_spec *spec;
int err;
@@ -902,29 +901,26 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
nat, mlx5_tc_ct_entry_in_ct_nat_table(entry));
if (err) {
- ct_dbg("Failed to create ct entry mod hdr");
+ ct_dbg("Failed to create ct entry mod hdr, err: %d", err);
goto err_mod_hdr;
}
mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
- rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- ct_dbg("Failed to add replacement ct entry rule, nat: %d", nat);
+ err = ct_priv->fs_ops->ct_rule_update(ct_priv->fs, zone_rule->rule, spec, attr);
+ if (err) {
+ ct_dbg("Failed to update ct entry rule, nat: %d, err: %d", nat, err);
goto err_rule;
}
- ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
- zone_rule->rule = rule;
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
zone_rule->mh = mh;
mlx5_put_label_mapping(ct_priv, old_attr->ct_attr.ct_labels_id);
kfree(old_attr);
kvfree(spec);
- ct_dbg("Replaced ct entry rule in zone %d", entry->tuple.zone);
+ ct_dbg("Updated ct entry rule in zone %d", entry->tuple.zone);
return 0;
@@ -1141,23 +1137,23 @@ err_orig:
}
static int
-mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
- struct flow_rule *flow_rule,
- struct mlx5_ct_entry *entry,
- u8 zone_restore_id)
+mlx5_tc_ct_entry_update_rules(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ u8 zone_restore_id)
{
int err = 0;
if (mlx5_tc_ct_entry_in_ct_table(entry)) {
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
- zone_restore_id);
+ err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
if (err)
return err;
}
if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
- zone_restore_id);
+ err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
if (err && mlx5_tc_ct_entry_in_ct_table(entry))
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
}
@@ -1165,13 +1161,13 @@ mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
}
static int
-mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
- struct mlx5_ct_entry *entry, unsigned long cookie)
+mlx5_tc_ct_block_flow_offload_update(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry, unsigned long cookie)
{
struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
int err;
- err = mlx5_tc_ct_entry_replace_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
+ err = mlx5_tc_ct_entry_update_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
if (!err)
return 0;
@@ -1216,7 +1212,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
entry->restore_cookie = meta_action->ct_metadata.cookie;
spin_unlock_bh(&ct_priv->ht_lock);
- err = mlx5_tc_ct_block_flow_offload_replace(ft, flow_rule, entry, cookie);
+ err = mlx5_tc_ct_block_flow_offload_update(ft, flow_rule, entry, cookie);
mlx5_tc_ct_entry_put(entry);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 6cc23af66b5b..efb34de4cb7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -109,6 +109,7 @@ struct mlx5e_tc_flow {
struct completion init_done;
struct completion del_hw_done;
struct mlx5_flow_attr *attr;
+ struct mlx5_flow_attr *extra_split_attr;
struct list_head attrs;
u32 chain_mapping;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 797db853de36..53cfa39188cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -127,6 +127,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
attrs->lft.hard_packet_limit);
MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
+ MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
}
if (attrs->lft.soft_packet_limit != XFRM_INF) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 36845872ae94..1966736f98b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -83,17 +83,15 @@ struct ptys2ethtool_config ptys2ext_ethtool_table[MLX5E_EXT_LINK_MODES_NUMBER];
({ \
struct ptys2ethtool_config *cfg; \
const unsigned int modes[] = { __VA_ARGS__ }; \
- unsigned int i, bit, idx; \
+ unsigned int i; \
cfg = &ptys2##table##_ethtool_table[reg_]; \
bitmap_zero(cfg->supported, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \
bitmap_zero(cfg->advertised, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \
for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
- bit = modes[i] % 64; \
- idx = modes[i] / 64; \
- __set_bit(bit, &cfg->supported[idx]); \
- __set_bit(bit, &cfg->advertised[idx]); \
+ bitmap_set(cfg->supported, modes[i], 1); \
+ bitmap_set(cfg->advertised, modes[i], 1); \
} \
})
@@ -139,6 +137,10 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy,
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100BASE_TX, legacy,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_T, legacy,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy,
@@ -204,6 +206,12 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_8_400GBASE_CR8, ext,
+ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_1_100GBASE_CR_KR, ext,
ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
@@ -354,35 +362,25 @@ static void mlx5e_get_ringparam(struct net_device *dev,
}
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_params new_params;
u8 log_rq_size;
u8 log_sq_size;
int err = 0;
- if (param->rx_jumbo_pending) {
- netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n",
- __func__);
- return -EINVAL;
- }
- if (param->rx_mini_pending) {
- netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n",
- __func__);
- return -EINVAL;
- }
-
if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
- netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n",
- __func__, param->rx_pending,
- 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "rx (%d) < min (%d)",
+ param->rx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
return -EINVAL;
}
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
- netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%d)\n",
- __func__, param->tx_pending,
- 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "tx (%d) < min (%d)",
+ param->tx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL;
}
@@ -418,7 +416,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- return mlx5e_ethtool_set_ringparam(priv, param);
+ return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
@@ -445,7 +443,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
unsigned int count = ch->combined_count;
struct mlx5e_params new_params;
bool arfs_enabled;
- int rss_cnt;
bool opened;
int err = 0;
@@ -499,17 +496,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
goto out;
}
- /* Don't allow changing the number of channels if non-default RSS contexts exist,
- * the kernel doesn't protect against set_channels operations that break them.
- */
- rss_cnt = mlx5e_rx_res_rss_cnt(priv->rx_res) - 1;
- if (rss_cnt) {
- err = -EINVAL;
- netdev_err(priv->netdev, "%s: Non-default RSS contexts exist (%d), cannot change the number of channels\n",
- __func__, rss_cnt);
- goto out;
- }
-
/* Don't allow changing the number of channels if MQPRIO mode channel offload is active,
* because it defines a partition over the channels queues.
*/
@@ -557,12 +543,15 @@ static int mlx5e_set_channels(struct net_device *dev,
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
- struct kernel_ethtool_coalesce *kernel_coal)
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct dim_cq_moder *rx_moder, *tx_moder;
- if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
+ if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) {
+ NL_SET_ERR_MSG_MOD(extack, "CQ moderation not supported");
return -EOPNOTSUPP;
+ }
rx_moder = &priv->channels.params.rx_cq_moderation;
coal->rx_coalesce_usecs = rx_moder->usec;
@@ -586,7 +575,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5e_ethtool_get_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue,
@@ -708,26 +697,34 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
int err = 0;
if (!MLX5_CAP_GEN(mdev, cq_moderation) ||
- !MLX5_CAP_GEN(mdev, cq_period_mode_modify))
+ !MLX5_CAP_GEN(mdev, cq_period_mode_modify)) {
+ NL_SET_ERR_MSG_MOD(extack, "CQ moderation not supported");
return -EOPNOTSUPP;
+ }
if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
- netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n",
- __func__, MLX5E_MAX_COAL_TIME);
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Max coalesce time %lu usecs, tx-usecs (%u) rx-usecs (%u)",
+ MLX5E_MAX_COAL_TIME, coal->tx_coalesce_usecs,
+ coal->rx_coalesce_usecs);
return -ERANGE;
}
if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
- netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n",
- __func__, MLX5E_MAX_COAL_FRAMES);
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Max coalesce frames %lu, tx-frames (%u) rx-frames (%u)",
+ MLX5E_MAX_COAL_FRAMES, coal->tx_max_coalesced_frames,
+ coal->rx_max_coalesced_frames);
return -ERANGE;
}
if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
!MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) {
- NL_SET_ERR_MSG_MOD(extack, "cqe_mode_rx/tx is not supported on this device");
+ NL_SET_ERR_MSG_MOD(extack, "cqe-mode-rx/tx is not supported on this device");
return -EOPNOTSUPP;
}
@@ -1299,7 +1296,8 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
u32 i, ptys_modes = 0;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (*ptys2legacy_ethtool_table[i].advertised == 0)
+ if (bitmap_empty(ptys2legacy_ethtool_table[i].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
continue;
if (bitmap_intersects(ptys2legacy_ethtool_table[i].advertised,
link_modes,
@@ -1313,18 +1311,18 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
- unsigned long modes[2];
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) {
- if (ptys2ext_ethtool_table[i].advertised[0] == 0 &&
- ptys2ext_ethtool_table[i].advertised[1] == 0)
+ if (bitmap_empty(ptys2ext_ethtool_table[i].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
continue;
- memset(modes, 0, sizeof(modes));
+ bitmap_zero(modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(modes, ptys2ext_ethtool_table[i].advertised,
link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
- if (modes[0] == ptys2ext_ethtool_table[i].advertised[0] &&
- modes[1] == ptys2ext_ethtool_table[i].advertised[1])
+ if (bitmap_equal(modes, ptys2ext_ethtool_table[i].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
ptys_modes |= MLX5E_PROT_MASK(i);
}
return ptys_modes;
@@ -2015,8 +2013,10 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
if (size_read == -EINVAL)
return -EINVAL;
if (size_read < 0) {
- netdev_err(priv->netdev, "%s: mlx5_query_module_eeprom_by_page failed:0x%x\n",
- __func__, size_read);
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Query module eeprom by page failed, read %u bytes, err %d\n",
+ i, size_read);
return i;
}
@@ -2605,6 +2605,7 @@ static void mlx5e_get_ts_stats(struct net_device *netdev,
const struct ethtool_ops mlx5e_ethtool_ops = {
.cap_rss_ctx_supported = true,
+ .rxfh_per_ctx_key = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 16b67c457b60..a5659c0c4236 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1016,30 +1016,31 @@ err_rq_xdp_prog:
static void mlx5e_free_rq(struct mlx5e_rq *rq)
{
- struct bpf_prog *old_prog;
-
- if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
- old_prog = rcu_dereference_protected(rq->xdp_prog,
- lockdep_is_held(&rq->priv->state_lock));
- if (old_prog)
- bpf_prog_put(old_prog);
- }
+ kvfree(rq->dim);
+ page_pool_destroy(rq->page_pool);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ mlx5e_rq_free_shampo(rq);
kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
mlx5e_free_mpwqe_rq_drop_page(rq);
- mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
mlx5e_free_wqe_alloc_info(rq);
}
- kvfree(rq->dim);
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
+
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
+ struct bpf_prog *old_prog;
+
+ old_prog = rcu_dereference_protected(rq->xdp_prog,
+ lockdep_is_held(&rq->priv->state_lock));
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
}
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter)
@@ -4414,9 +4415,9 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
- features |= NETIF_F_NETNS_LOCAL;
+ netdev->netns_local = true;
} else {
- features &= ~NETIF_F_NETNS_LOCAL;
+ netdev->netns_local = false;
}
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 8790d57dc6db..92094bf60d59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -360,7 +360,7 @@ mlx5e_rep_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- return mlx5e_ethtool_set_ringparam(priv, param);
+ return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
static void mlx5e_rep_get_channels(struct net_device *dev,
@@ -386,7 +386,7 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5e_rep_set_coalesce(struct net_device *netdev,
@@ -898,7 +898,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->hw_features |= NETIF_F_RXCSUM;
netdev->features |= netdev->hw_features;
- netdev->features |= NETIF_F_NETNS_LOCAL;
+
+ netdev->netns_local = true;
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index de9d01036c28..8e24ba96c779 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -2346,6 +2346,9 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
stats->hds_nodata_packets++;
stats->hds_nodata_bytes += head_size;
}
+ } else {
+ stats->hds_nosplit_packets++;
+ stats->hds_nosplit_bytes += data_bcnt;
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index e7a3290a708a..611ec4b6f370 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -144,6 +144,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
@@ -347,6 +349,8 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_gro_large_hds += rq_stats->gro_large_hds;
s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets;
s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes;
+ s->rx_hds_nosplit_packets += rq_stats->hds_nosplit_packets;
+ s->rx_hds_nosplit_bytes += rq_stats->hds_nosplit_bytes;
s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
@@ -2062,6 +2066,8 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 4c5858c1dd82..5961c569cfe0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -156,6 +156,8 @@ struct mlx5e_sw_stats {
u64 rx_gro_large_hds;
u64 rx_hds_nodata_packets;
u64 rx_hds_nodata_bytes;
+ u64 rx_hds_nosplit_packets;
+ u64 rx_hds_nosplit_bytes;
u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
@@ -356,6 +358,8 @@ struct mlx5e_rq_stats {
u64 gro_large_hds;
u64 hds_nodata_packets;
u64 hds_nodata_bytes;
+ u64 hds_nosplit_packets;
+ u64 hds_nosplit_bytes;
u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 30673292e15f..6b3b1afe8312 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1740,10 +1740,118 @@ has_encap_dests(struct mlx5_flow_attr *attr)
}
static int
+extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
+{
+ bool int_dest = false, ext_dest = false;
+ struct mlx5_esw_flow_attr *esw_attr;
+ int i;
+
+ if (flow->attr != attr ||
+ !list_is_first(&attr->list, &flow->attrs))
+ return 0;
+
+ if (flow_flag_test(flow, SLOW))
+ return 0;
+
+ esw_attr = attr->esw_attr;
+ if (!esw_attr->split_count ||
+ esw_attr->split_count == esw_attr->out_count - 1)
+ return 0;
+
+ if (esw_attr->dest_int_port &&
+ (esw_attr->dests[esw_attr->split_count].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE))
+ return esw_attr->split_count + 1;
+
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
+ /* external dest with encap is considered as internal by firmware */
+ if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK &&
+ !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID))
+ ext_dest = true;
+ else
+ int_dest = true;
+
+ if (ext_dest && int_dest)
+ return esw_attr->split_count;
+ }
+
+ return 0;
+}
+
+static int
+extra_split_attr_dests(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr, int split_count)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5e_tc_flow_parse_attr *parse_attr, *parse_attr2;
+ struct mlx5_esw_flow_attr *esw_attr, *esw_attr2;
+ struct mlx5e_post_act_handle *handle;
+ struct mlx5_flow_attr *attr2;
+ int i, j, err;
+
+ if (IS_ERR(post_act))
+ return PTR_ERR(post_act);
+
+ attr2 = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
+ parse_attr2 = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
+ if (!attr2 || !parse_attr2) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+ attr2->parse_attr = parse_attr2;
+
+ handle = mlx5e_tc_post_act_add(post_act, attr2);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto err_free;
+ }
+
+ esw_attr = attr->esw_attr;
+ esw_attr2 = attr2->esw_attr;
+ esw_attr2->in_rep = esw_attr->in_rep;
+
+ parse_attr = attr->parse_attr;
+ parse_attr2->filter_dev = parse_attr->filter_dev;
+
+ for (i = split_count, j = 0; i < esw_attr->out_count; i++, j++)
+ esw_attr2->dests[j] = esw_attr->dests[i];
+
+ esw_attr2->out_count = j;
+ attr2->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ err = mlx5e_tc_post_act_offload(post_act, handle);
+ if (err)
+ goto err_post_act_offload;
+
+ err = mlx5e_tc_post_act_set_handle(flow->priv->mdev, handle,
+ &parse_attr->mod_hdr_acts);
+ if (err)
+ goto err_post_act_set_handle;
+
+ esw_attr->out_count = split_count;
+ attr->extra_split_ft = mlx5e_tc_post_act_get_ft(post_act);
+ flow->extra_split_attr = attr2;
+
+ attr2->post_act_handle = handle;
+
+ return 0;
+
+err_post_act_set_handle:
+ mlx5e_tc_post_act_unoffload(post_act, handle);
+err_post_act_offload:
+ mlx5e_tc_post_act_del(post_act, handle);
+err_free:
+ kvfree(parse_attr2);
+ kfree(attr2);
+ return err;
+}
+
+static int
post_process_attr(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack)
{
+ int extra_split;
bool vf_tun;
int err = 0;
@@ -1757,6 +1865,13 @@ post_process_attr(struct mlx5e_tc_flow *flow,
goto err_out;
}
+ extra_split = extra_split_attr_dests_needed(flow, attr);
+ if (extra_split > 0) {
+ err = extra_split_attr_dests(flow, attr, extra_split);
+ if (err)
+ goto err_out;
+ }
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
if (err)
@@ -1971,6 +2086,11 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
free_flow_post_acts(flow);
+ if (flow->extra_split_attr) {
+ mlx5_free_flow_attr_actions(flow, flow->extra_split_attr);
+ kvfree(flow->extra_split_attr->parse_attr);
+ kfree(flow->extra_split_attr);
+ }
mlx5_free_flow_attr_actions(flow, attr);
kvfree(attr->esw_attr->rx_tun_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index c24bda56b2b5..e1b8cb78369f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -86,6 +86,7 @@ struct mlx5_flow_attr {
u32 dest_chain;
struct mlx5_flow_table *ft;
struct mlx5_flow_table *dest_ft;
+ struct mlx5_flow_table *extra_split_ft;
u8 inner_match_level;
u8 outer_match_level;
u8 tun_ip_version;
@@ -139,7 +140,7 @@ struct mlx5_rx_tun_attr {
#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
#define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
-#define MLX5E_TC_MAX_INT_PORT_NUM (8)
+#define MLX5E_TC_MAX_INT_PORT_NUM (32)
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index cb7e7e4104af..2505f90c0b39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -835,28 +835,9 @@ static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
mlx5_irq_release_vector(irq);
}
-static int mlx5_cpumask_default_spread(int numa_node, int index)
+static int mlx5_cpumask_default_spread(struct mlx5_core_dev *dev, int index)
{
- const struct cpumask *prev = cpu_none_mask;
- const struct cpumask *mask;
- int found_cpu = 0;
- int i = 0;
- int cpu;
-
- rcu_read_lock();
- for_each_numa_hop_mask(mask, numa_node) {
- for_each_cpu_andnot(cpu, mask, prev) {
- if (i++ == index) {
- found_cpu = cpu;
- goto spread_done;
- }
- }
- prev = mask;
- }
-
-spread_done:
- rcu_read_unlock();
- return found_cpu;
+ return cpumask_local_spread(index, dev->priv.numa_node);
}
static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
@@ -880,7 +861,7 @@ static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
int cpu;
rmap = mlx5_eq_table_get_pci_rmap(dev);
- cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
+ cpu = mlx5_cpumask_default_spread(dev, vecidx);
irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
if (IS_ERR(irq))
return PTR_ERR(irq);
@@ -915,7 +896,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
if (!mlx5_irq_pool_is_sf_pool(pool))
return comp_irq_request_pci(dev, vecidx);
- af_desc.is_managed = 1;
+ af_desc.is_managed = false;
cpumask_copy(&af_desc.mask, cpu_online_mask);
cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
@@ -1145,7 +1126,7 @@ int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
if (mask)
cpu = cpumask_first(mask);
else
- cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector);
+ cpu = mlx5_cpumask_default_spread(dev, vector);
return cpu;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 255bc8b749f9..8587cd572da5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -319,7 +319,7 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
return -EPERM;
mutex_lock(&esw->state_lock);
- if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) {
err = -EOPNOTSUPP;
goto out;
}
@@ -339,7 +339,7 @@ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
if (!mlx5_esw_allowed(esw))
return -EPERM;
- if (esw->mode != MLX5_ESWITCH_LEGACY)
+ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw))
return -EOPNOTSUPP;
*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 20146a2dc7f4..02a3563f51ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -312,6 +312,25 @@ static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
return err;
}
+static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
+{
+ switch (type) {
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_TSAR;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+ }
+ return false;
+}
+
static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
struct mlx5_vport *vport,
u32 max_rate, u32 bw_share)
@@ -323,6 +342,9 @@ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
void *vport_elem;
int err;
+ if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT))
+ return -EOPNOTSUPP;
+
parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
@@ -421,6 +443,7 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_esw_rate_group *group;
+ __be32 *attr;
u32 divider;
int err;
@@ -428,6 +451,12 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
if (!group)
return ERR_PTR(-ENOMEM);
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
+
MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
esw->qos.root_tsar_ix);
err = mlx5_create_scheduling_element_cmd(esw->dev,
@@ -526,25 +555,6 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
return err;
}
-static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
-{
- switch (type) {
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_TSAR;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_VPORT;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_VPORT_TC;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
- }
- return false;
-}
-
static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
@@ -555,7 +565,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP;
- if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
+ if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR) ||
+ !(MLX5_CAP_QOS(dev, esw_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
return -EOPNOTSUPP;
MLX5_SET(scheduling_context, tsar_ctx, element_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 578466d69f21..f44b4c7ebcfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -887,9 +887,6 @@ int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_v
bool enable);
int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
u16 vport_num);
-void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw);
-void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw);
-
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 768199d2255a..f24f91d213f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -613,6 +613,13 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
}
}
+ if (attr->extra_split_ft) {
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[*i].ft = attr->extra_split_ft;
+ (*i)++;
+ }
+
out:
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 9b8599c200e2..676005854dad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -463,7 +463,7 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
int num_encap = 0;
*extended_dest = false;
- if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ if (!(fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -502,17 +502,17 @@ mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
execute_aso[0]);
MLX5_SET(execute_aso, execute_aso, valid, 1);
MLX5_SET(execute_aso, execute_aso, aso_object_id,
- fte->action.exe_aso.object_id);
+ fte->act_dests.action.exe_aso.object_id);
exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
- fte->action.exe_aso.return_reg_id);
+ fte->act_dests.action.exe_aso.return_reg_id);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
- fte->action.exe_aso.type);
+ fte->act_dests.action.exe_aso.type);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
- fte->action.exe_aso.flow_meter.init_color);
+ fte->act_dests.action.exe_aso.flow_meter.init_color);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
- fte->action.exe_aso.flow_meter.meter_idx);
+ fte->act_dests.action.exe_aso.flow_meter.meter_idx);
}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
@@ -541,7 +541,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
else
dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
- inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
+ inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->act_dests.dests_size * dst_cnt_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -553,7 +553,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
MLX5_SET(set_fte_in, in, ignore_flow_level,
- !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
+ !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport,
@@ -563,23 +563,23 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag,
- fte->flow_context.flow_tag);
+ fte->act_dests.flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
- fte->flow_context.flow_source);
+ fte->act_dests.flow_context.flow_source);
MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
- !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
+ !!(fte->act_dests.flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
- action = fte->action.action;
+ action = fte->act_dests.action.action;
if (extended_dest)
action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
MLX5_SET(flow_context, in_flow_context, action, action);
- if (!extended_dest && fte->action.pkt_reformat) {
- struct mlx5_pkt_reformat *pkt_reformat = fte->action.pkt_reformat;
+ if (!extended_dest && fte->act_dests.action.pkt_reformat) {
+ struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat;
if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat);
@@ -591,46 +591,46 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
goto err_out;
}
} else {
- reformat_id = fte->action.pkt_reformat->id;
+ reformat_id = fte->act_dests.action.pkt_reformat->id;
}
}
MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id);
- if (fte->action.modify_hdr) {
- if (fte->action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
+ if (fte->act_dests.action.modify_hdr) {
+ if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n");
err = -EOPNOTSUPP;
goto err_out;
}
MLX5_SET(flow_context, in_flow_context, modify_header_id,
- fte->action.modify_hdr->id);
+ fte->act_dests.action.modify_hdr->id);
}
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
- fte->action.crypto.type);
+ fte->act_dests.action.crypto.type);
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
- fte->action.crypto.obj_id);
+ fte->act_dests.action.crypto.obj_id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
- MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
- MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
- MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
+ MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[0].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[0].vid);
+ MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[0].prio);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
- MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
- MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
- MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
+ MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[1].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[1].vid);
+ MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[1].prio);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, sizeof(fte->val));
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -706,7 +706,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
log_max_flow_counter,
ft->type));
@@ -731,8 +731,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
- if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->act_dests.action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
} else {
err = -EOPNOTSUPP;
@@ -1071,7 +1071,7 @@ static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
- return 0;
+ return MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
}
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 53e0e5137d3f..7eb7b3ffe3d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -124,4 +124,12 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode);
int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect);
+
+static inline bool mlx5_fs_cmd_is_fw_term_table(struct mlx5_flow_table *ft)
+{
+ if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
+ return true;
+
+ return false;
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index a47d6419160d..8505d5e241e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -605,12 +605,37 @@ static void modify_fte(struct fs_fte *fte)
dev = get_dev(&fte->node);
root = find_root(&ft->node);
- err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
+ err = root->cmds->update_fte(root, ft, fg, fte->act_dests.modify_mask, fte);
if (err)
mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
- fte->modify_mask = 0;
+ fte->act_dests.modify_mask = 0;
+}
+
+static void del_sw_hw_dup_rule(struct fs_node *node)
+{
+ struct mlx5_flow_rule *rule;
+ struct fs_fte *fte;
+
+ fs_get_obj(rule, node);
+ fs_get_obj(fte, rule->node.parent);
+ trace_mlx5_fs_del_rule(rule);
+
+ if (is_fwd_next_action(rule->sw_action)) {
+ mutex_lock(&rule->dest_attr.ft->lock);
+ list_del(&rule->next_ft);
+ mutex_unlock(&rule->dest_attr.ft->lock);
+ }
+
+ /* If a pending rule is being deleted it means
+ * this is a NO APPEND rule, so there are no partial deletions,
+ * all the rules of the mlx5_flow_handle are going to be deleted
+ * and the rules aren't shared with any other mlx5_flow_handle instance
+ * so no need to do any bookkeeping like in del_sw_hw_rule().
+ */
+
+ kfree(rule);
}
static void del_sw_hw_rule(struct fs_node *node)
@@ -628,29 +653,29 @@ static void del_sw_hw_rule(struct fs_node *node)
}
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
- --fte->dests_size;
- fte->modify_mask |=
+ --fte->act_dests.dests_size;
+ fte->act_dests.modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
- fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
goto out;
}
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
- --fte->dests_size;
- fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
- fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ --fte->act_dests.dests_size;
+ fte->act_dests.modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+ fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
goto out;
}
if (is_fwd_dest_type(rule->dest_attr.type)) {
- --fte->dests_size;
- --fte->fwd_dests;
+ --fte->act_dests.dests_size;
+ --fte->act_dests.fwd_dests;
- if (!fte->fwd_dests)
- fte->action.action &=
+ if (!fte->act_dests.fwd_dests)
+ fte->act_dests.action.action &=
~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- fte->modify_mask |=
+ fte->act_dests.modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
goto out;
}
@@ -658,12 +683,33 @@ out:
kfree(rule);
}
+static void switch_to_pending_act_dests(struct fs_fte *fte)
+{
+ struct fs_node *iter;
+
+ memcpy(&fte->act_dests, &fte->dup->act_dests, sizeof(fte->act_dests));
+
+ list_bulk_move_tail(&fte->node.children,
+ fte->dup->children.next,
+ fte->dup->children.prev);
+
+ list_for_each_entry(iter, &fte->node.children, list)
+ iter->del_sw_func = del_sw_hw_rule;
+
+ /* Make sure the fte isn't deleted
+ * as mlx5_del_flow_rules() decreases the refcount
+ * of the fte to trigger deletion.
+ */
+ tree_get_node(&fte->node);
+}
+
static void del_hw_fte(struct fs_node *node)
{
struct mlx5_flow_root_namespace *root;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct mlx5_core_dev *dev;
+ bool pending_used = false;
struct fs_fte *fte;
int err;
@@ -672,16 +718,35 @@ static void del_hw_fte(struct fs_node *node)
fs_get_obj(ft, fg->node.parent);
trace_mlx5_fs_del_fte(fte);
- WARN_ON(fte->dests_size);
+ WARN_ON(fte->act_dests.dests_size);
dev = get_dev(&ft->node);
root = find_root(&ft->node);
+
+ if (fte->dup && !list_empty(&fte->dup->children)) {
+ switch_to_pending_act_dests(fte);
+ pending_used = true;
+ } else {
+ /* Avoid double call to del_hw_fte */
+ node->del_hw_func = NULL;
+ }
+
if (node->active) {
- err = root->cmds->delete_fte(root, ft, fte);
- if (err)
- mlx5_core_warn(dev,
- "flow steering can't delete fte in index %d of flow group id %d\n",
- fte->index, fg->id);
- node->active = false;
+ if (pending_used) {
+ err = root->cmds->update_fte(root, ft, fg,
+ fte->act_dests.modify_mask, fte);
+ if (err)
+ mlx5_core_warn(dev,
+ "flow steering can't update to pending rule in index %d of flow group id %d\n",
+ fte->index, fg->id);
+ fte->act_dests.modify_mask = 0;
+ } else {
+ err = root->cmds->delete_fte(root, ft, fte);
+ if (err)
+ mlx5_core_warn(dev,
+ "flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
+ node->active = false;
+ }
}
}
@@ -700,6 +765,7 @@ static void del_sw_fte(struct fs_node *node)
rhash_fte);
WARN_ON(err);
ida_free(&fg->fte_allocator, fte->index - fg->start_index);
+ kvfree(fte->dup);
kmem_cache_free(steering->ftes_cache, fte);
}
@@ -782,8 +848,8 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
memcpy(fte->val, &spec->match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
- fte->action = *flow_act;
- fte->flow_context = spec->flow_context;
+ fte->act_dests.action = *flow_act;
+ fte->act_dests.flow_context = spec->flow_context;
tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
@@ -1103,18 +1169,45 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
return err;
}
+static bool rule_is_pending(struct fs_fte *fte, struct mlx5_flow_rule *rule)
+{
+ struct mlx5_flow_rule *tmp_rule;
+ struct fs_node *iter;
+
+ if (!fte->dup || list_empty(&fte->dup->children))
+ return false;
+
+ list_for_each_entry(iter, &fte->dup->children, list) {
+ tmp_rule = container_of(iter, struct mlx5_flow_rule, node);
+
+ if (tmp_rule == rule)
+ return true;
+ }
+
+ return false;
+}
+
static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_root_namespace *root;
+ struct fs_fte_action *act_dests;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
+ bool pending = false;
struct fs_fte *fte;
int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
int err = 0;
fs_get_obj(fte, rule->node.parent);
- if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+
+ pending = rule_is_pending(fte, rule);
+ if (pending)
+ act_dests = &fte->dup->act_dests;
+ else
+ act_dests = &fte->act_dests;
+
+ if (!(act_dests->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return -EINVAL;
down_write_ref_node(&fte->node, false);
fs_get_obj(fg, fte->node.parent);
@@ -1122,8 +1215,9 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
memcpy(&rule->dest_attr, dest, sizeof(*dest));
root = find_root(&ft->node);
- err = root->cmds->update_fte(root, ft, fg,
- modify_mask, fte);
+ if (!pending)
+ err = root->cmds->update_fte(root, ft, fg,
+ modify_mask, fte);
up_write_ref_node(&fte->node, false);
return err;
@@ -1453,6 +1547,16 @@ static struct mlx5_flow_handle *alloc_handle(int num_rules)
return handle;
}
+static void destroy_flow_handle_dup(struct mlx5_flow_handle *handle,
+ int i)
+{
+ for (; --i >= 0;) {
+ list_del(&handle->rule[i]->node.list);
+ kfree(handle->rule[i]);
+ }
+ kfree(handle);
+}
+
static void destroy_flow_handle(struct fs_fte *fte,
struct mlx5_flow_handle *handle,
struct mlx5_flow_destination *dest,
@@ -1460,7 +1564,7 @@ static void destroy_flow_handle(struct fs_fte *fte,
{
for (; --i >= 0;) {
if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
- fte->dests_size--;
+ fte->act_dests.dests_size--;
list_del(&handle->rule[i]->node.list);
kfree(handle->rule[i]);
}
@@ -1469,6 +1573,61 @@ static void destroy_flow_handle(struct fs_fte *fte,
}
static struct mlx5_flow_handle *
+create_flow_handle_dup(struct list_head *children,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ struct fs_fte_action *act_dests)
+{
+ static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ struct mlx5_flow_rule *rule = NULL;
+ struct mlx5_flow_handle *handle;
+ int i = 0;
+ int type;
+
+ handle = alloc_handle((dest_num) ? dest_num : 1);
+ if (!handle)
+ return NULL;
+
+ do {
+ rule = alloc_rule(dest + i);
+ if (!rule)
+ goto free_rules;
+
+ /* Add dest to dests list- we need flow tables to be in the
+ * end of the list for forward to next prio rules.
+ */
+ tree_init_node(&rule->node, NULL, del_sw_hw_dup_rule);
+ if (dest &&
+ dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ list_add(&rule->node.list, children);
+ else
+ list_add_tail(&rule->node.list, children);
+
+ if (dest) {
+ act_dests->dests_size++;
+
+ if (is_fwd_dest_type(dest[i].type))
+ act_dests->fwd_dests++;
+
+ type = dest[i].type ==
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ act_dests->modify_mask |= type ? count : dst;
+ }
+ handle->rule[i] = rule;
+ } while (++i < dest_num);
+
+ return handle;
+
+free_rules:
+ destroy_flow_handle_dup(handle, i);
+ act_dests->dests_size = 0;
+ act_dests->fwd_dests = 0;
+
+ return NULL;
+}
+
+static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte *fte,
struct mlx5_flow_destination *dest,
int dest_num,
@@ -1510,10 +1669,10 @@ create_flow_handle(struct fs_fte *fte,
else
list_add_tail(&rule->node.list, &fte->node.children);
if (dest) {
- fte->dests_size++;
+ fte->act_dests.dests_size++;
if (is_fwd_dest_type(dest[i].type))
- fte->fwd_dests++;
+ fte->act_dests.fwd_dests++;
type = dest[i].type ==
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -1774,17 +1933,17 @@ static int check_conflicting_ftes(struct fs_fte *fte,
const struct mlx5_flow_context *flow_context,
const struct mlx5_flow_act *flow_act)
{
- if (check_conflicting_actions(flow_act, &fte->action)) {
+ if (check_conflicting_actions(flow_act, &fte->act_dests.action)) {
mlx5_core_warn(get_dev(&fte->node),
"Found two FTEs with conflicting actions\n");
return -EEXIST;
}
if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
- fte->flow_context.flow_tag != flow_context->flow_tag) {
+ fte->act_dests.flow_context.flow_tag != flow_context->flow_tag) {
mlx5_core_warn(get_dev(&fte->node),
"FTE flow tag %u already exists with different flow tag %u\n",
- fte->flow_context.flow_tag,
+ fte->act_dests.flow_context.flow_tag,
flow_context->flow_tag);
return -EEXIST;
}
@@ -1808,12 +1967,12 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
if (ret)
return ERR_PTR(ret);
- old_action = fte->action.action;
- fte->action.action |= flow_act->action;
+ old_action = fte->act_dests.action.action;
+ fte->act_dests.action.action |= flow_act->action;
handle = add_rule_fte(fte, fg, dest, dest_num,
old_action != flow_act->action);
if (IS_ERR(handle)) {
- fte->action.action = old_action;
+ fte->act_dests.action.action = old_action;
return handle;
}
trace_mlx5_fs_set_fte(fte, false);
@@ -1961,6 +2120,62 @@ out:
return fte_tmp;
}
+/* Native capability lacks support for adding an additional match with the same value
+ * to the same flow group. To accommodate the NO APPEND flag in these scenarios,
+ * we include the new rule in the existing flow table entry (fte) without immediate
+ * hardware commitment. When a request is made to delete the corresponding hardware rule,
+ * we then commit the pending rule to hardware.
+ */
+static struct mlx5_flow_handle *
+add_rule_dup_match_fte(struct fs_fte *fte,
+ const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num)
+{
+ struct mlx5_flow_handle *handle;
+ struct fs_fte_dup *dup;
+ int i = 0;
+
+ if (!fte->dup) {
+ dup = kvzalloc(sizeof(*dup), GFP_KERNEL);
+ if (!dup)
+ return ERR_PTR(-ENOMEM);
+ /* dup will be freed when the fte is freed
+ * this way we don't allocate / free dup on every rule deletion
+ * or creation
+ */
+ INIT_LIST_HEAD(&dup->children);
+ fte->dup = dup;
+ }
+
+ if (!list_empty(&fte->dup->children)) {
+ mlx5_core_warn(get_dev(&fte->node),
+ "Can have only a single duplicate rule\n");
+
+ return ERR_PTR(-EEXIST);
+ }
+
+ fte->dup->act_dests.action = *flow_act;
+ fte->dup->act_dests.flow_context = spec->flow_context;
+ fte->dup->act_dests.dests_size = 0;
+ fte->dup->act_dests.fwd_dests = 0;
+ fte->dup->act_dests.modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+
+ handle = create_flow_handle_dup(&fte->dup->children,
+ dest, dest_num,
+ &fte->dup->act_dests);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < handle->num_rules; i++) {
+ tree_add_node(&handle->rule[i]->node, &fte->node);
+ trace_mlx5_fs_add_rule(handle->rule[i]);
+ }
+
+ return handle;
+}
+
static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head,
@@ -1971,6 +2186,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
int ft_version)
{
struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule;
struct match_list *iter;
@@ -1984,7 +2200,9 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
return ERR_PTR(-ENOMEM);
search_again_locked:
- if (flow_act->flags & FLOW_ACT_NO_APPEND)
+ if (flow_act->flags & FLOW_ACT_NO_APPEND &&
+ (root->cmds->get_capabilities(root, root->table_type) &
+ MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH))
goto skip_search;
version = matched_fgs_get_version(match_head);
/* Try to find an fte with identical match value and attempt update its
@@ -1997,7 +2215,10 @@ search_again_locked:
fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
if (!fte_tmp)
continue;
- rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
+ if (flow_act->flags & FLOW_ACT_NO_APPEND)
+ rule = add_rule_dup_match_fte(fte_tmp, spec, flow_act, dest, dest_num);
+ else
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
/* No error check needed here, because insert_fte() is not called */
up_write_ref_node(&fte_tmp->node, false);
tree_put_node(&fte_tmp->node, false);
@@ -2265,12 +2486,10 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
tree_remove_node(&handle->rule[i]->node, true);
if (list_empty(&fte->node.children)) {
fte->node.del_hw_func(&fte->node);
- /* Avoid double call to del_hw_fte */
- fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
- } else if (fte->dests_size) {
- if (fte->modify_mask)
+ } else if (fte->act_dests.dests_size) {
+ if (fte->act_dests.modify_mask)
modify_fte(fte);
up_write_ref_node(&fte->node, false);
} else {
@@ -3590,8 +3809,8 @@ out:
}
EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
-static struct mlx5_flow_root_namespace
-*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
+struct mlx5_flow_root_namespace *
+mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
{
struct mlx5_flow_namespace *ns;
@@ -3614,7 +3833,7 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr;
int err;
- root = get_root_namespace(dev, ns_type);
+ root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@@ -3639,7 +3858,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
- root = get_root_namespace(dev, modify_hdr->ns_type);
+ root = mlx5_get_root_namespace(dev, modify_hdr->ns_type);
if (WARN_ON(!root))
return;
root->cmds->modify_header_dealloc(root, modify_hdr);
@@ -3655,7 +3874,7 @@ struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
struct mlx5_flow_root_namespace *root;
int err;
- root = get_root_namespace(dev, ns_type);
+ root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@@ -3681,7 +3900,7 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
- root = get_root_namespace(dev, pkt_reformat->ns_type);
+ root = mlx5_get_root_namespace(dev, pkt_reformat->ns_type);
if (WARN_ON(!root))
return;
root->cmds->packet_reformat_dealloc(root, pkt_reformat);
@@ -3703,7 +3922,7 @@ mlx5_create_match_definer(struct mlx5_core_dev *dev,
struct mlx5_flow_definer *definer;
int id;
- root = get_root_namespace(dev, ns_type);
+ root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@@ -3727,7 +3946,7 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
- root = get_root_namespace(dev, definer->ns_type);
+ root = mlx5_get_root_namespace(dev, definer->ns_type);
if (WARN_ON(!root))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 78eb6b7097e1..964937f17cf5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -110,7 +110,9 @@ enum fs_flow_table_type {
FS_FT_RDMA_RX = 0X7,
FS_FT_RDMA_TX = 0X8,
FS_FT_PORT_SEL = 0X9,
- FS_FT_MAX_TYPE = FS_FT_PORT_SEL,
+ FS_FT_FDB_RX = 0xa,
+ FS_FT_FDB_TX = 0xb,
+ FS_FT_MAX_TYPE = FS_FT_FDB_TX,
};
enum fs_flow_table_op_mod {
@@ -131,6 +133,7 @@ enum mlx5_flow_steering_capabilty {
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2,
+ MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH = 1UL << 3,
};
struct mlx5_flow_steering {
@@ -228,20 +231,29 @@ struct mlx5_ft_underlay_qp {
MLX5_BYTE_OFF(fte_match_param, \
MLX5_FTE_MATCH_PARAM_RESERVED)))
+struct fs_fte_action {
+ int modify_mask;
+ u32 dests_size;
+ u32 fwd_dests;
+ struct mlx5_flow_context flow_context;
+ struct mlx5_flow_act action;
+};
+
+struct fs_fte_dup {
+ struct list_head children;
+ struct fs_fte_action act_dests;
+};
+
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
struct mlx5_fs_dr_rule fs_dr_rule;
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
- u32 dests_size;
- u32 fwd_dests;
+ struct fs_fte_action act_dests;
+ struct fs_fte_dup *dup;
u32 index;
- struct mlx5_flow_context flow_context;
- struct mlx5_flow_act action;
enum fs_fte_status status;
- struct mlx5_fc *counter;
struct rhash_head hash;
- int modify_mask;
};
/* Type of children is mlx5_flow_table/namespace */
@@ -368,7 +380,9 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
(type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
(type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\
+ (type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_FDB_TX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index b61b7d966114..76ad46bf477d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -224,6 +224,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, mcam_reg)) {
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128);
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F);
+ mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9180_0x91FF);
}
if (MLX5_CAP_GEN(dev, qcam_reg))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index b43ca0b762c3..4f55e55ecb55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -26,6 +26,7 @@ struct mlx5_fw_reset {
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
unsigned long reset_flags;
+ u8 reset_method;
struct timer_list timer;
struct completion done;
int ret;
@@ -95,7 +96,7 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
}
static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
- u8 *reset_type, u8 *reset_state)
+ u8 *reset_type, u8 *reset_state, u8 *reset_method)
{
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
@@ -111,13 +112,26 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
*reset_type = MLX5_GET(mfrl_reg, out, reset_type);
if (reset_state)
*reset_state = MLX5_GET(mfrl_reg, out, reset_state);
+ if (reset_method)
+ *reset_method = MLX5_GET(mfrl_reg, out, pci_reset_req_method);
return 0;
}
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
{
- return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL);
+ return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL, NULL);
+}
+
+static int mlx5_fw_reset_get_reset_method(struct mlx5_core_dev *dev,
+ u8 *reset_method)
+{
+ if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method)) {
+ *reset_method = MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE;
+ return 0;
+ }
+
+ return mlx5_reg_mfrl_query(dev, NULL, NULL, NULL, reset_method);
}
static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
@@ -125,7 +139,7 @@ static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
{
u8 reset_state;
- if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
+ if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state, NULL))
goto out;
if (!reset_state)
@@ -398,7 +412,8 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
return 0;
}
-static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
+static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
+ u8 reset_method)
{
u16 dev_id;
int err;
@@ -409,9 +424,11 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
- err = mlx5_check_hotplug_interrupt(dev);
- if (err)
- return false;
+ if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
+ err = mlx5_check_hotplug_interrupt(dev);
+ if (err)
+ return false;
+ }
#endif
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
@@ -427,8 +444,12 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
- if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
- !mlx5_is_reset_now_capable(dev)) {
+ err = mlx5_fw_reset_get_reset_method(dev, &fw_reset->reset_method);
+ if (err)
+ mlx5_core_warn(dev, "Failed reading MFRL, err %d\n", err);
+
+ if (err || test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
+ !mlx5_is_reset_now_capable(dev, fw_reset->reset_method)) {
err = mlx5_fw_reset_set_reset_sync_nack(dev);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s",
err ? "Failed" : "Sent");
@@ -444,21 +465,15 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
}
-static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
+static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
struct pci_dev *bridge = bridge_bus->self;
unsigned long timeout;
struct pci_dev *sdev;
- u16 reg16, dev_id;
int cap, err;
+ u16 reg16;
- err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
- if (err)
- return pcibios_err_to_errno(err);
- err = mlx5_check_dev_ids(dev, dev_id);
- if (err)
- return err;
cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
if (!cap)
return -EOPNOTSUPP;
@@ -528,6 +543,44 @@ restore:
return err;
}
+static int mlx5_pci_reset_bus(struct mlx5_core_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method))
+ return -EOPNOTSUPP;
+
+ return pci_reset_bus(dev->pdev);
+}
+
+static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method)
+{
+ u16 dev_id;
+ int err;
+
+ err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
+ if (err)
+ return pcibios_err_to_errno(err);
+ err = mlx5_check_dev_ids(dev, dev_id);
+ if (err)
+ return err;
+
+ switch (reset_method) {
+ case MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE:
+ err = mlx5_pci_link_toggle(dev, dev_id);
+ if (err)
+ mlx5_core_warn(dev, "mlx5_pci_link_toggle failed\n");
+ break;
+ case MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET:
+ err = mlx5_pci_reset_bus(dev);
+ if (err)
+ mlx5_core_warn(dev, "mlx5_pci_reset_bus failed\n");
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
static void mlx5_sync_reset_now_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
@@ -546,9 +599,9 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
goto done;
}
- err = mlx5_pci_link_toggle(dev);
+ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
if (err) {
- mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
+ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, no reset done, err %d\n", err);
set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags);
}
@@ -610,9 +663,9 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
- err = mlx5_pci_link_toggle(dev);
+ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
if (err) {
- mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, err %d\n", err);
+ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", err);
fw_reset->ret = err;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 26f8a11b8906..9772327d5124 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -74,7 +74,7 @@ static int mlx5i_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- return mlx5e_ethtool_set_ringparam(priv, param);
+ return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
static void mlx5i_get_ringparam(struct net_device *dev,
@@ -132,7 +132,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5i_get_ts_info(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index cf8045b92689..8577db3308cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -445,6 +445,34 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
}
+static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev)
+{
+ struct net_device *ndev = NULL;
+ struct mlx5_lag *ldev;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+
+ if (!ldev)
+ goto unlock;
+
+ for (i = 0; i < ldev->ports; i++)
+ if (ldev->tracker.netdev_state[i].tx_enabled)
+ ndev = ldev->pf[i].netdev;
+ if (!ndev)
+ ndev = ldev->pf[ldev->ports - 1].netdev;
+
+ if (ndev)
+ dev_hold(ndev);
+
+unlock:
+ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return ndev;
+}
+
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
@@ -477,9 +505,18 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
}
}
- if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
- !(ldev->mode == MLX5_LAG_MODE_ROCE))
- mlx5_lag_drop_rule_setup(ldev, tracker);
+ if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ struct net_device *ndev = mlx5_lag_active_backup_get_netdev(dev0);
+
+ if(!(ldev->mode == MLX5_LAG_MODE_ROCE))
+ mlx5_lag_drop_rule_setup(ldev, tracker);
+ /** Only sriov and roce lag should have tracker->tx_type set so
+ * no need to check the mode
+ */
+ blocking_notifier_call_chain(&dev0->priv.lag_nh,
+ MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
+ ndev);
+ }
}
static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
@@ -613,6 +650,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
mlx5_core_err(dev0,
"Failed to deactivate RoCE LAG; driver restart required\n");
}
+ BLOCKING_INIT_NOTIFIER_HEAD(&dev0->priv.lag_nh);
return err;
}
@@ -1492,38 +1530,6 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
mlx5_queue_bond_work(ldev, 0);
}
-struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
-{
- struct net_device *ndev = NULL;
- struct mlx5_lag *ldev;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&lag_lock, flags);
- ldev = mlx5_lag_dev(dev);
-
- if (!(ldev && __mlx5_lag_is_roce(ldev)))
- goto unlock;
-
- if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
- for (i = 0; i < ldev->ports; i++)
- if (ldev->tracker.netdev_state[i].tx_enabled)
- ndev = ldev->pf[i].netdev;
- if (!ndev)
- ndev = ldev->pf[ldev->ports - 1].netdev;
- } else {
- ndev = ldev->pf[MLX5_LAG_P1].netdev;
- }
- if (ndev)
- dev_hold(ndev);
-
-unlock:
- spin_unlock_irqrestore(&lag_lock, flags);
-
- return ndev;
-}
-EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
-
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 0361741632a6..b306ae79bf97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -38,6 +38,10 @@
#include "lib/eq.h"
#include "en.h"
#include "clock.h"
+#ifdef CONFIG_X86
+#include <linux/timekeeping.h>
+#include <linux/cpufeature.h>
+#endif /* CONFIG_X86 */
enum {
MLX5_PIN_MODE_IN = 0x0,
@@ -148,6 +152,87 @@ static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
MLX5_REG_MTUTC, 0, 1);
}
+#ifdef CONFIG_X86
+static bool mlx5_is_ptm_source_time_available(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
+ int err;
+
+ if (!MLX5_CAP_MCAM_REG3(dev, mtptm))
+ return false;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTPTM,
+ 0, 0);
+ if (err)
+ return false;
+
+ return !!MLX5_GET(mtptm_reg, out, psta);
+}
+
+static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
+ struct system_counterval_t *sys_counterval,
+ void *ctx)
+{
+ u32 out[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
+ struct mlx5_core_dev *mdev = ctx;
+ bool real_time_mode;
+ u64 host, device;
+ int err;
+
+ real_time_mode = mlx5_real_time_mode(mdev);
+
+ MLX5_SET(mtctr_reg, in, first_clock_timestamp_request,
+ MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK);
+ MLX5_SET(mtctr_reg, in, second_clock_timestamp_request,
+ real_time_mode ? MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK :
+ MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTCTR,
+ 0, 0);
+ if (err)
+ return err;
+
+ if (!MLX5_GET(mtctr_reg, out, first_clock_valid) ||
+ !MLX5_GET(mtctr_reg, out, second_clock_valid))
+ return -EINVAL;
+
+ host = MLX5_GET64(mtctr_reg, out, first_clock_timestamp);
+ *sys_counterval = (struct system_counterval_t) {
+ .cycles = host,
+ .cs_id = CSID_X86_ART,
+ .use_nsecs = true,
+ };
+
+ device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
+ if (real_time_mode)
+ *device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
+ else
+ *device_time = mlx5_timecounter_cyc2time(&mdev->clock, device);
+
+ return 0;
+}
+
+static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct system_time_snapshot history_begin = {0};
+ struct mlx5_core_dev *mdev;
+
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+
+ if (!mlx5_is_ptm_source_time_available(mdev))
+ return -EBUSY;
+
+ ktime_get_snapshot(&history_begin);
+
+ return get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
+ &history_begin, cts);
+}
+#endif /* CONFIG_X86 */
+
static u64 mlx5_read_time(struct mlx5_core_dev *dev,
struct ptp_system_timestamp *sts,
bool real_time)
@@ -1034,6 +1119,12 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
if (MLX5_CAP_MCAM_REG(mdev, mtutc))
mlx5_init_timer_max_freq_adjustment(mdev);
+#ifdef CONFIG_X86
+ if (MLX5_CAP_MCAM_REG3(mdev, mtptm) &&
+ MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART))
+ clock->ptp_info.getcrosststamp = mlx5_ptp_getcrosststamp;
+#endif /* CONFIG_X86 */
+
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
mlx5_init_overflow_period(clock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 5b7e6f4b5c7e..220a9ac75c8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -454,8 +454,8 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
{
+ bool do_set = false, mem_page_fault = false;
void *set_hca_cap;
- bool do_set = false;
int err;
if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
@@ -470,6 +470,17 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur,
MLX5_ST_SZ_BYTES(odp_cap));
+ /* For best performance, enable memory scheme ODP only when
+ * it has page prefetch enabled.
+ */
+ if (MLX5_CAP_ODP_MAX(dev, mem_page_fault) &&
+ MLX5_CAP_ODP_MAX(dev, memory_page_fault_scheme_cap.page_prefetch)) {
+ mem_page_fault = true;
+ do_set = true;
+ MLX5_SET(odp_cap, set_hca_cap, mem_page_fault, mem_page_fault);
+ goto set;
+ }
+
#define ODP_CAP_SET_MAX(dev, field) \
do { \
u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
@@ -479,25 +490,28 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
} \
} while (0)
- ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
-
- if (!do_set)
- return 0;
-
- return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.ud_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.rc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.atomic);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.atomic);
+
+set:
+ if (do_set)
+ err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+
+ mlx5_core_dbg(dev, "Using ODP %s scheme\n",
+ mem_page_fault ? "memory" : "transport");
+ return err;
}
static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
@@ -619,6 +633,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_with_driver_unload))
MLX5_SET(cmd_hca_cap, set_hca_cap,
pci_sync_for_fw_update_with_driver_unload, 1);
+ if (MLX5_CAP_GEN_MAX(dev, pcie_reset_using_hotreset_method))
+ MLX5_SET(cmd_hca_cap, set_hca_cap,
+ pcie_reset_using_hotreset_method, 1);
if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
MLX5_SET(cmd_hca_cap,
@@ -923,6 +940,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
}
mlx5_pci_vsc_init(dev);
+
+ err = pci_enable_ptm(pdev, NULL);
+ if (err)
+ mlx5_core_info(dev, "PTM is not supported by PCIe\n");
+
return 0;
err_clr_master:
@@ -939,6 +961,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev)
* before removing the pci bars
*/
mlx5_drain_health_wq(dev);
+ pci_disable_ptm(dev->pdev);
iounmap(dev->iseg);
release_bar(dev->pdev);
mlx5_pci_disable_device(dev);
@@ -2217,6 +2240,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
{ PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
+ { PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d894a88fa9f2..972e8e9df585 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -608,6 +608,11 @@ enum {
RELEASE_ALL_PAGES_MASK = 0x4000,
};
+/* This limit is based on the capability of the firmware as it cannot release
+ * more than 50000 back to the host in one go.
+ */
+#define MAX_RECLAIM_NPAGES (-50000)
+
static int req_pages_handler(struct notifier_block *nb,
unsigned long type, void *data)
{
@@ -639,7 +644,16 @@ static int req_pages_handler(struct notifier_block *nb,
req->dev = dev;
req->func_id = func_id;
- req->npages = npages;
+
+ /* npages > 0 means HCA asking host to allocate/give pages,
+ * npages < 0 means HCA asking host to reclaim back the pages allocated.
+ * Here we are restricting the maximum number of pages that can be
+ * reclaimed to be MAX_RECLAIM_NPAGES. Note that MAX_RECLAIM_NPAGES is
+ * a negative value.
+ * Since MAX_RECLAIM is negative, we are using max() to restrict
+ * req->npages (and not min ()).
+ */
+ req->npages = max_t(s32, npages, MAX_RECLAIM_NPAGES);
req->ec_function = ec_function;
req->release_all = release_all;
INIT_WORK(&req->work, pages_work_handler);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
index 8bce730b5c5b..db2bd3ad63ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
@@ -28,6 +28,9 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP))
+ return -EOPNOTSUPP;
+
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP);
@@ -44,6 +47,10 @@ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
void *attr;
+ if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) ||
+ !(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
+ return -EOPNOTSUPP;
+
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 8c2a34a0d6be..baefb9a3fa05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -251,9 +251,9 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
- flow_table_context.sw_owner_icm_root_1);
+ flow_table_context.sws.sw_owner_icm_root_1);
output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
- flow_table_context.sw_owner_icm_root_0);
+ flow_table_context.sws.sw_owner_icm_root_0);
return 0;
}
@@ -480,15 +480,15 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
*/
if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_rx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_rx);
} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_tx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_tx);
} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_rx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_rx);
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_1, attr->icm_addr_tx);
+ sws.sw_owner_icm_root_1, attr->icm_addr_tx);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 50c2554c9ccf..833cb68c744f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -9,14 +9,6 @@
#include "fs_dr.h"
#include "dr_types.h"
-static bool dr_is_fw_term_table(struct mlx5_flow_table *ft)
-{
- if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
- return true;
-
- return false;
-}
-
static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
@@ -70,7 +62,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
u32 flags;
int err;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
ft_attr,
next_ft);
@@ -110,7 +102,7 @@ static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
int err;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
@@ -135,7 +127,7 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
return set_miss_action(ns, ft, next_ft);
@@ -154,7 +146,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
match_criteria_enable);
struct mlx5dr_match_parameters mask;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
fg);
@@ -179,7 +171,7 @@ static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
@@ -279,7 +271,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
int err = 0;
int i;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
@@ -306,12 +298,12 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
match_sz = sizeof(fte->val);
/* Drop reformat action bit if destination vport set with reformat */
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
if (!contain_vport_reformat_action(dst))
continue;
- fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
break;
}
}
@@ -321,7 +313,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
* TX: modify header -> push vlan -> encap
* RX: decap -> pop vlan -> modify header
*/
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
enum mlx5dr_action_reformat_type decap_type =
DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
@@ -337,26 +329,26 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
bool is_decap;
- if (fte->action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+ if (fte->act_dests.action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
err = -EINVAL;
mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n");
goto free_actions;
}
- is_decap = fte->action.pkt_reformat->reformat_type ==
+ is_decap = fte->act_dests.action.pkt_reformat->reformat_type ==
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
if (is_decap)
actions[num_actions++] =
- fte->action.pkt_reformat->action.dr_action;
+ fte->act_dests.action.pkt_reformat->action.dr_action;
else
delay_encap_set = true;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
tmp_action =
mlx5dr_action_create_pop_vlan();
if (!tmp_action) {
@@ -367,7 +359,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
tmp_action =
mlx5dr_action_create_pop_vlan();
if (!tmp_action) {
@@ -378,12 +370,12 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
actions[num_actions++] =
- fte->action.modify_hdr->action.dr_action;
+ fte->act_dests.action.modify_hdr->action.dr_action;
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
- tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[0]);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -392,8 +384,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
- tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[1]);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -404,11 +396,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (delay_encap_set)
actions[num_actions++] =
- fte->action.pkt_reformat->action.dr_action;
+ fte->act_dests.action.pkt_reformat->action.dr_action;
/* The order of the actions below is not important */
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
tmp_action = mlx5dr_action_create_drop();
if (!tmp_action) {
err = -ENOMEM;
@@ -418,9 +410,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
term_actions[num_term_actions++].dest = tmp_action;
}
- if (fte->flow_context.flow_tag) {
+ if (fte->act_dests.flow_context.flow_tag) {
tmp_action =
- mlx5dr_action_create_tag(fte->flow_context.flow_tag);
+ mlx5dr_action_create_tag(fte->act_dests.flow_context.flow_tag);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -429,7 +421,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 id;
@@ -510,7 +502,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
list_for_each_entry(dst, &fte->node.children, node.list) {
u32 id;
@@ -537,19 +529,21 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
- if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ struct mlx5_flow_act *action = &fte->act_dests.action;
+
+ if (fte->act_dests.action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
err = -EOPNOTSUPP;
goto free_actions;
}
tmp_action =
mlx5dr_action_create_aso(domain,
- fte->action.exe_aso.object_id,
- fte->action.exe_aso.return_reg_id,
- fte->action.exe_aso.type,
- fte->action.exe_aso.flow_meter.init_color,
- fte->action.exe_aso.flow_meter.meter_idx);
+ action->exe_aso.object_id,
+ action->exe_aso.return_reg_id,
+ action->exe_aso.type,
+ action->exe_aso.flow_meter.init_color,
+ action->exe_aso.flow_meter.meter_idx);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -576,8 +570,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = term_actions->dest;
} else if (num_term_actions > 1) {
bool ignore_flow_level =
- !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
- u32 flow_source = fte->flow_context.flow_source;
+ !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ u32 flow_source = fte->act_dests.flow_context.flow_source;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
@@ -601,7 +595,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
&params,
num_actions,
actions,
- fte->flow_context.flow_source);
+ fte->act_dests.flow_context.flow_source);
if (!rule) {
err = -EINVAL;
goto free_actions;
@@ -740,7 +734,7 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
int err;
int i;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
err = mlx5dr_rule_destroy(rule->dr_rule);
@@ -765,7 +759,7 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
struct fs_fte fte_tmp = {};
int ret;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte);
/* Backup current dr rule details */
@@ -819,11 +813,11 @@ static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
- u32 steering_caps = 0;
+ u32 steering_caps = MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
if (ft_type != FS_FT_FDB ||
MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
- return 0;
+ return steering_caps;
steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX;
steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile
new file mode 100644
index 000000000000..c78512eed8d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+subdir-ccflags-y += -I$(src)/..
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
new file mode 100644
index 000000000000..f39d636ff39a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -0,0 +1,926 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_H_
+#define MLX5HWS_H_
+
+struct mlx5hws_context;
+struct mlx5hws_table;
+struct mlx5hws_matcher;
+struct mlx5hws_rule;
+
+enum mlx5hws_table_type {
+ MLX5HWS_TABLE_TYPE_FDB,
+ MLX5HWS_TABLE_TYPE_MAX,
+};
+
+enum mlx5hws_matcher_resource_mode {
+ /* Allocate resources based on number of rules with minimal failure probability */
+ MLX5HWS_MATCHER_RESOURCE_MODE_RULE,
+ /* Allocate fixed size hash table based on given column and rows */
+ MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE,
+};
+
+enum mlx5hws_action_type {
+ MLX5HWS_ACTION_TYP_LAST,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
+ MLX5HWS_ACTION_TYP_DROP,
+ MLX5HWS_ACTION_TYP_MISS,
+ MLX5HWS_ACTION_TYP_TBL,
+ MLX5HWS_ACTION_TYP_CTR,
+ MLX5HWS_ACTION_TYP_TAG,
+ MLX5HWS_ACTION_TYP_MODIFY_HDR,
+ MLX5HWS_ACTION_TYP_VPORT,
+ MLX5HWS_ACTION_TYP_POP_VLAN,
+ MLX5HWS_ACTION_TYP_PUSH_VLAN,
+ MLX5HWS_ACTION_TYP_ASO_METER,
+ MLX5HWS_ACTION_TYP_INSERT_HEADER,
+ MLX5HWS_ACTION_TYP_REMOVE_HEADER,
+ MLX5HWS_ACTION_TYP_RANGE,
+ MLX5HWS_ACTION_TYP_SAMPLER,
+ MLX5HWS_ACTION_TYP_DEST_ARRAY,
+ MLX5HWS_ACTION_TYP_MAX,
+};
+
+enum mlx5hws_action_flags {
+ MLX5HWS_ACTION_FLAG_HWS_FDB = 1 << 0,
+ /* Shared action can be used over a few threads, since the
+ * data is written only once at the creation of the action.
+ */
+ MLX5HWS_ACTION_FLAG_SHARED = 1 << 1,
+};
+
+enum mlx5hws_action_aso_meter_color {
+ MLX5HWS_ACTION_ASO_METER_COLOR_RED = 0x0,
+ MLX5HWS_ACTION_ASO_METER_COLOR_YELLOW = 0x1,
+ MLX5HWS_ACTION_ASO_METER_COLOR_GREEN = 0x2,
+ MLX5HWS_ACTION_ASO_METER_COLOR_UNDEFINED = 0x3,
+};
+
+enum mlx5hws_send_queue_actions {
+ /* Start executing all pending queued rules */
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC = 1 << 0,
+ /* Start executing all pending queued rules wait till completion */
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC = 1 << 1,
+};
+
+struct mlx5hws_context_attr {
+ u16 queues;
+ u16 queue_size;
+ bool bwc; /* add support for backward compatible API*/
+};
+
+struct mlx5hws_table_attr {
+ enum mlx5hws_table_type type;
+ u32 level;
+};
+
+enum mlx5hws_matcher_flow_src {
+ MLX5HWS_MATCHER_FLOW_SRC_ANY = 0x0,
+ MLX5HWS_MATCHER_FLOW_SRC_WIRE = 0x1,
+ MLX5HWS_MATCHER_FLOW_SRC_VPORT = 0x2,
+};
+
+enum mlx5hws_matcher_insert_mode {
+ MLX5HWS_MATCHER_INSERT_BY_HASH = 0x0,
+ MLX5HWS_MATCHER_INSERT_BY_INDEX = 0x1,
+};
+
+enum mlx5hws_matcher_distribute_mode {
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH = 0x0,
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR = 0x1,
+};
+
+struct mlx5hws_matcher_attr {
+ /* Processing priority inside table */
+ u32 priority;
+ /* Provide all rules with unique rule_idx in num_log range to reduce locking */
+ bool optimize_using_rule_idx;
+ /* Resource mode and corresponding size */
+ enum mlx5hws_matcher_resource_mode mode;
+ /* Optimize insertion in case packet origin is the same for all rules */
+ enum mlx5hws_matcher_flow_src optimize_flow_src;
+ /* Define the insertion and distribution modes for this matcher */
+ enum mlx5hws_matcher_insert_mode insert_mode;
+ enum mlx5hws_matcher_distribute_mode distribute_mode;
+ /* Define whether the created matcher supports resizing into a bigger matcher */
+ bool resizable;
+ union {
+ struct {
+ u8 sz_row_log;
+ u8 sz_col_log;
+ } table;
+
+ struct {
+ u8 num_log;
+ } rule;
+ };
+ /* Optional AT attach configuration - Max number of additional AT */
+ u8 max_num_of_at_attach;
+};
+
+struct mlx5hws_rule_attr {
+ void *user_data;
+ /* Valid if matcher optimize_using_rule_idx is set or
+ * if matcher is configured to insert rules by index.
+ */
+ u32 rule_idx;
+ u32 flow_source;
+ u16 queue_id;
+ u32 burst:1;
+};
+
+/* In actions that take offset, the offset is unique, pointing to a single
+ * resource and the user should not reuse the same index because data changing
+ * is not atomic.
+ */
+struct mlx5hws_rule_action {
+ struct mlx5hws_action *action;
+ union {
+ struct {
+ u32 value;
+ } tag;
+
+ struct {
+ u32 offset;
+ } counter;
+
+ struct {
+ u32 offset;
+ u8 *data;
+ } modify_header;
+
+ struct {
+ u32 offset;
+ u8 hdr_idx;
+ u8 *data;
+ } reformat;
+
+ struct {
+ __be32 vlan_hdr;
+ } push_vlan;
+
+ struct {
+ u32 offset;
+ enum mlx5hws_action_aso_meter_color init_color;
+ } aso_meter;
+ };
+};
+
+struct mlx5hws_action_reformat_header {
+ size_t sz;
+ void *data;
+};
+
+struct mlx5hws_action_insert_header {
+ struct mlx5hws_action_reformat_header hdr;
+ /* PRM start anchor to which header will be inserted */
+ u8 anchor;
+ /* Header insertion offset in bytes, from the start
+ * anchor to the location where new header will be inserted.
+ */
+ u8 offset;
+ /* Indicates this header insertion adds encapsulation header to the packet,
+ * requiring device to update offloaded fields (for example IPv4 total length).
+ */
+ bool encap;
+};
+
+struct mlx5hws_action_remove_header_attr {
+ /* PRM start anchor from which header will be removed */
+ u8 anchor;
+ /* Header remove offset in bytes, from the start
+ * anchor to the location where remove header starts.
+ */
+ u8 offset;
+ /* Indicates the removed header size in bytes */
+ size_t size;
+};
+
+struct mlx5hws_action_mh_pattern {
+ /* Byte size of modify actions provided by "data" */
+ size_t sz;
+ /* PRM format modify actions pattern */
+ __be64 *data;
+};
+
+struct mlx5hws_action_dest_attr {
+ /* Required destination action to forward the packet */
+ struct mlx5hws_action *dest;
+ /* Optional reformat action */
+ struct mlx5hws_action *reformat;
+};
+
+/**
+ * mlx5hws_is_supported - Check whether HWS is supported
+ *
+ * @mdev: The device to check.
+ *
+ * Return: true if supported, false otherwise.
+ */
+static inline bool mlx5hws_is_supported(struct mlx5_core_dev *mdev)
+{
+ u8 ignore_flow_level_rtc_valid;
+ u8 wqe_based_flow_table_update;
+
+ wqe_based_flow_table_update =
+ MLX5_CAP_GEN(mdev, wqe_based_flow_table_update_cap);
+ ignore_flow_level_rtc_valid =
+ MLX5_CAP_FLOWTABLE(mdev,
+ flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
+
+ return wqe_based_flow_table_update && ignore_flow_level_rtc_valid;
+}
+
+/**
+ * mlx5hws_context_open - Open a context used for direct rule insertion
+ * using hardware steering.
+ *
+ * @mdev: The device to be used for HWS.
+ * @attr: Attributes used for context open.
+ *
+ * Return: pointer to mlx5hws_context on success NULL otherwise.
+ */
+struct mlx5hws_context *
+mlx5hws_context_open(struct mlx5_core_dev *mdev,
+ struct mlx5hws_context_attr *attr);
+
+/**
+ * mlx5hws_context_close - Close a context used for direct hardware steering.
+ *
+ * @ctx: mlx5hws context to close.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_context_close(struct mlx5hws_context *ctx);
+
+/**
+ * mlx5hws_context_set_peer - Set a peer context.
+ * Each context can have multiple contexts as peers.
+ *
+ * @ctx: The context in which the peer_ctx will be peered to it.
+ * @peer_ctx: The peer context.
+ * @peer_vhca_id: The peer context vhca id.
+ */
+void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
+ struct mlx5hws_context *peer_ctx,
+ u16 peer_vhca_id);
+
+/**
+ * mlx5hws_table_create - Create a new direct rule table.
+ * Each table can contain multiple matchers.
+ *
+ * @ctx: The context in which the new table will be opened.
+ * @attr: Attributes used for table creation.
+ *
+ * Return: pointer to mlx5hws_table on success NULL otherwise.
+ */
+struct mlx5hws_table *
+mlx5hws_table_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_table_attr *attr);
+
+/**
+ * mlx5hws_table_destroy - Destroy direct rule table.
+ *
+ * @tbl: Table to destroy.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_table_destroy(struct mlx5hws_table *tbl);
+
+/**
+ * mlx5hws_table_get_id() - Get ID of the flow table.
+ *
+ * @tbl:Table to get ID of.
+ *
+ * Return: ID of the table.
+ */
+u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl);
+
+/**
+ * mlx5hws_table_set_default_miss - Set default miss table for mlx5hws_table
+ * by using another mlx5hws_table.
+ * Traffic which all table matchers miss will be forwarded to miss table.
+ *
+ * @tbl: Source table
+ * @miss_tbl: Target (miss) table, or NULL to remove current miss table
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl);
+
+/**
+ * mlx5hws_match_template_create - Create a new match template based on items mask.
+ * The match template will be used for matcher creation.
+ *
+ * @ctx: The context in which the new template will be created.
+ * @match_param: Describe the mask based on PRM match parameters.
+ * @match_param_sz: Size of match param buffer.
+ * @match_criteria_enable: Bitmap for each sub-set in match_criteria buffer.
+ *
+ * Return: Pointer to mlx5hws_match_template on success, NULL otherwise.
+ */
+struct mlx5hws_match_template *
+mlx5hws_match_template_create(struct mlx5hws_context *ctx,
+ u32 *match_param,
+ u32 match_param_sz,
+ u8 match_criteria_enable);
+
+/**
+ * mlx5hws_match_template_destroy - Destroy a match template.
+ *
+ * @mt: Match template to destroy.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt);
+
+/**
+ * mlx5hws_action_template_create - Create a new action template based on an action_type array.
+ *
+ * @action_type: An array of actions based on the order of actions which will be provided
+ * with rule_actions to mlx5hws_rule_create. The last action is marked
+ * using MLX5HWS_ACTION_TYP_LAST.
+ *
+ * Return: Pointer to mlx5hws_action_template on success, NULL otherwise.
+ */
+struct mlx5hws_action_template *
+mlx5hws_action_template_create(enum mlx5hws_action_type action_type[]);
+
+/**
+ * mlx5hws_action_template_destroy - Destroy action template.
+ *
+ * @at: Action template to destroy.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at);
+
+/**
+ * mlx5hws_matcher_create - Create a new direct rule matcher.
+ *
+ * Each matcher can contain multiple rules. Matchers on the table will be
+ * processed by priority. Matching fields and mask are described by the
+ * match template. In some cases, multiple match templates can be used on
+ * the same matcher.
+ *
+ * @table: The table in which the new matcher will be opened.
+ * @mt: Array of match templates to be used on matcher.
+ * @num_of_mt: Number of match templates in mt array.
+ * @at: Array of action templates to be used on matcher.
+ * @num_of_at: Number of action templates in at array.
+ * @attr: Attributes used for matcher creation.
+ *
+ * Return: Pointer to mlx5hws_matcher on success, NULL otherwise.
+ *
+ */
+struct mlx5hws_matcher *
+mlx5hws_matcher_create(struct mlx5hws_table *table,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at,
+ struct mlx5hws_matcher_attr *attr);
+
+/**
+ * mlx5hws_matcher_destroy - Destroy a direct rule matcher.
+ *
+ * @matcher: Matcher to destroy.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher);
+
+/**
+ * mlx5hws_matcher_attach_at - Attach a new action template to a direct rule matcher.
+ *
+ * @matcher: Matcher to attach the action template to.
+ * @at: Action template to be attached to the matcher.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at);
+
+/**
+ * mlx5hws_matcher_resize_set_target - Link two matchers and enable moving rules.
+ *
+ * Both matchers must be in the same table type, must be created with the
+ * 'resizable' property, and should have the same characteristics (e.g., same
+ * match templates and action templates). It is the user's responsibility to
+ * ensure that the destination matcher is allocated with the appropriate size.
+ *
+ * Once the function is completed, the user is:
+ * - Allowed to move rules from the source into the destination matcher.
+ * - No longer allowed to insert rules into the source matcher.
+ *
+ * The user is always allowed to insert rules into the destination matcher and
+ * to delete rules from any matcher.
+ *
+ * @src_matcher: Source matcher for moving rules from.
+ * @dst_matcher: Destination matcher for moving rules to.
+ *
+ * Return: Zero on successful move, non-zero otherwise.
+ */
+int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher);
+
+/**
+ * mlx5hws_matcher_resize_rule_move - Enqueue moving rule operation.
+ *
+ * This function enqueues the operation of moving a rule from the source
+ * matcher to the destination matcher.
+ *
+ * @src_matcher: Matcher that the rule belongs to.
+ * @rule: The rule to move.
+ * @attr: Rule attributes.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+/**
+ * mlx5hws_rule_create - Enqueue create rule operation.
+ *
+ * @matcher: The matcher in which the new rule will be created.
+ * @mt_idx: Match template index to create the match with.
+ * @match_param: The match parameter PRM buffer used for value matching.
+ * @at_idx: Action template index to apply the actions with.
+ * @rule_actions: Rule actions to be executed on match.
+ * @attr: Rule creation attributes.
+ * @rule_handle: A valid rule handle. The handle doesn't require any initialization.
+ *
+ * Return: Zero on successful enqueue, non-zero otherwise.
+ */
+int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr,
+ struct mlx5hws_rule *rule_handle);
+
+/**
+ * mlx5hws_rule_destroy - Enqueue destroy rule operation.
+ *
+ * @rule: The rule destruction to enqueue.
+ * @attr: Rule destruction attributes.
+ *
+ * Return: Zero on successful enqueue, non-zero otherwise.
+ */
+int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+/**
+ * mlx5hws_rule_action_update - Enqueue update actions on an existing rule.
+ *
+ * @rule: A valid rule handle to update.
+ * @at_idx: Action template index to update the actions with.
+ * @rule_actions: Rule actions to be executed on match.
+ * @attr: Rule update attributes.
+ *
+ * Return: Zero on successful enqueue, non-zero otherwise.
+ */
+int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr);
+
+/**
+ * mlx5hws_action_get_type - Get action type.
+ *
+ * @action: The action to get the type of.
+ *
+ * Return: action type.
+ */
+enum mlx5hws_action_type
+mlx5hws_action_get_type(struct mlx5hws_action *action);
+
+/**
+ * mlx5hws_action_create_dest_drop - Create a direct rule drop action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: Pointer to mlx5hws_action on success, NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_default_miss - Create a direct rule default miss action.
+ * Defaults are RX: Drop, TX: Wire.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: Pointer to mlx5hws_action on success, NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_table - Create direct rule goto table action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @tbl: Destination table.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_table_num - Create direct rule goto table number action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @tbl_num: Destination table number.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx,
+ u32 tbl_num, u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_match_range - Create direct rule range match action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @field: Field to comapare the value.
+ * @hit_ft: Flow table to go to on hit.
+ * @miss_ft: Flow table to go to on miss.
+ * @min: Minimal value of the field to be considered as hit.
+ * @max: Maximal value of the field to be considered as hit.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min, u32 max, u32 flags);
+
+/**
+ * mlx5hws_action_create_flow_sampler - Create direct rule flow sampler action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @sampler_id: Flow sampler object ID.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
+ u32 sampler_id, u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_vport - Create direct rule goto vport action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @vport_num: Destination vport number.
+ * @vhca_id_valid: Tells if the vhca_id parameter is valid.
+ * @vhca_id: VHCA ID of the destination vport.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx,
+ u16 vport_num,
+ bool vhca_id_valid,
+ u16 vhca_id,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_tag - Create direct rule TAG action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_create_counter - Create direct rule counter action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @obj_id: Direct rule counter object ID.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_counter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_reformat - Create direct rule reformat action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @reformat_type: Type of reformat prefixed with MLX5HWS_ACTION_TYP_REFORMAT.
+ * @num_of_hdrs: Number of provided headers in "hdrs" array.
+ * @hdrs: Headers array containing header information.
+ * @log_bulk_size: Number of unique values used with this reformat.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_reformat(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type reformat_type,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_modify_header - Create direct rule modify header action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @num_of_patterns: Number of provided patterns in "patterns" array.
+ * @patterns: Patterns array containing pattern information.
+ * @log_bulk_size: Number of unique values used with this pattern.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *patterns,
+ u32 log_bulk_size,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_aso_meter - Create direct rule ASO flow meter action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @obj_id: ASO object ID.
+ * @return_reg_c: Copy the ASO object value into this reg_c,
+ * after a packet hits a rule with this ASO object.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u8 return_reg_c,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_pop_vlan - Create direct rule pop vlan action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_create_push_vlan - Create direct rule push vlan action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_array - Create a dest array action, this action can
+ * duplicate packets and forward to multiple destinations in the destination list.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @num_dest: The number of dests attributes.
+ * @dests: The destination array. Each contains a destination action and can
+ * have additional actions.
+ * @ignore_flow_level: Whether to turn on 'ignore_flow_level' for this dest.
+ * @flow_source: Source port of the traffic for this actions.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
+ size_t num_dest,
+ struct mlx5hws_action_dest_attr *dests,
+ bool ignore_flow_level,
+ u32 flow_source,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_insert_header - Create insert header action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @num_of_hdrs: Number of provided headers in "hdrs" array.
+ * @hdrs: Headers array containing header information.
+ * @log_bulk_size: Number of unique values used with this insert header.
+ * @flags: Action creation flags. (enum mlx5hws_action_flags)
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_insert_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_remove_header - Create remove header action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @attr: attributes that specifie the remove header type, PRM start anchor and
+ * the PRM end anchor or the PRM start anchor and remove size in bytes.
+ * @flags: Action creation flags. (enum mlx5hws_action_flags)
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_remove_header_attr *attr,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_last - Create direct rule LAST action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags. (enum mlx5hws_action_flags)
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_destroy - Destroy direct rule action.
+ *
+ * @action: The action to destroy.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_action_destroy(struct mlx5hws_action *action);
+
+enum mlx5hws_flow_op_status {
+ MLX5HWS_FLOW_OP_SUCCESS,
+ MLX5HWS_FLOW_OP_ERROR,
+};
+
+struct mlx5hws_flow_op_result {
+ enum mlx5hws_flow_op_status status;
+ void *user_data;
+};
+
+/**
+ * mlx5hws_send_queue_poll - Poll queue for rule creation and deletions completions.
+ *
+ * @ctx: The context to which the queue belong to.
+ * @queue_id: The id of the queue to poll.
+ * @res: Completion array.
+ * @res_nb: Maximum number of results to return.
+ *
+ * Return: negative number on failure, the number of completions otherwise.
+ */
+int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb);
+
+/**
+ * mlx5hws_send_queue_action - Perform an action on the queue
+ *
+ * @ctx: The context to which the queue belong to.
+ * @queue_id: The id of the queue to perform the action on.
+ * @actions: Actions to perform on the queue (enum mlx5hws_send_queue_actions)
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions);
+
+/**
+ * mlx5hws_debug_dump - Dump HWS info
+ *
+ * @ctx: The context which to dump the info from.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_debug_dump(struct mlx5hws_context *ctx);
+
+struct mlx5hws_bwc_matcher;
+struct mlx5hws_bwc_rule;
+
+struct mlx5hws_match_parameters {
+ size_t match_sz;
+ u32 *match_buf; /* Device spec format */
+};
+
+/**
+ * mlx5hws_bwc_matcher_create - Create a new BWC direct rule matcher.
+ *
+ * This function does the following:
+ * - creates match template based on flow items
+ * - creates an empty action template
+ * - creates a usual mlx5hws_matcher with these mt and at, setting
+ * its size to minimal
+ * Notes:
+ * - table->ctx must have BWC support
+ * - complex rules are not supported
+ *
+ * @table: The table in which the new matcher will be opened
+ * @priority: Priority for this BWC matcher
+ * @match_criteria_enable: Bitmask that defines matching criteria
+ * @mask: Match parameters
+ *
+ * Return: pointer to mlx5hws_bwc_matcher on success or NULL otherwise.
+ */
+struct mlx5hws_bwc_matcher *
+mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+/**
+ * mlx5hws_bwc_matcher_destroy - Destroy BWC direct rule matcher.
+ *
+ * @bwc_matcher: Matcher to destroy
+ *
+ * Return: zero on success, non zero otherwise
+ */
+int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+/**
+ * mlx5hws_bwc_rule_create - Create a new BWC rule.
+ *
+ * Unlike the usual rule creation function, this one is blocking: when the
+ * function returns, the rule is written to its place (no need to poll).
+ * This function does the following:
+ * - finds matching action template based on the provided rule_actions, or
+ * creates new action template if matching action template doesn't exist
+ * - updates corresponding BWC matcher stats
+ * - if needed, the function performs rehash:
+ * - creates a new matcher based on mt, at, new_sz
+ * - moves all the existing matcher rules to the new matcher
+ * - removes the old matcher
+ * - inserts new rule
+ * - polls till completion is received
+ * Notes:
+ * - matcher->tbl->ctx must have BWC support
+ * - separate BWC ctx queues are used
+ *
+ * @bwc_matcher: The BWC matcher in which the new rule will be created.
+ * @params: Match perameters
+ * @flow_source: Flow source for this rule
+ * @rule_actions: Rule action to be executed on match
+ *
+ * Return: valid BWC rule handle on success, NULL otherwise
+ */
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[]);
+
+/**
+ * mlx5hws_bwc_rule_destroy - Destroy BWC direct rule.
+ *
+ * @bwc_rule: Rule to destroy.
+ *
+ * Return: zero on success, non zero otherwise.
+ */
+int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule);
+
+/**
+ * mlx5hws_bwc_rule_action_update - Update actions on an existing BWC rule.
+ *
+ * @bwc_rule: Rule to update
+ * @rule_actions: Rule action to update with
+ *
+ * Return: zero on successful update, non zero otherwise.
+ */
+int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[]);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c
new file mode 100644
index 000000000000..b27bb4106532
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c
@@ -0,0 +1,2604 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+#define MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET 1
+
+/* Header removal size limited to 128B (64 words) */
+#define MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE 128
+
+/* This is the longest supported action sequence for FDB table:
+ * DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term.
+ */
+static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = {
+ [MLX5HWS_TABLE_TYPE_FDB] = {
+ BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
+ BIT(MLX5HWS_ACTION_TYP_CTR),
+ BIT(MLX5HWS_ACTION_TYP_TAG),
+ BIT(MLX5HWS_ACTION_TYP_ASO_METER),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_TBL) |
+ BIT(MLX5HWS_ACTION_TYP_VPORT) |
+ BIT(MLX5HWS_ACTION_TYP_DROP) |
+ BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
+ BIT(MLX5HWS_ACTION_TYP_RANGE) |
+ BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
+ BIT(MLX5HWS_ACTION_TYP_LAST),
+ },
+};
+
+static const char * const mlx5hws_action_type_str[] = {
+ [MLX5HWS_ACTION_TYP_LAST] = "LAST",
+ [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2] = "TNL_L2_TO_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2] = "L2_TO_TNL_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2] = "TNL_L3_TO_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3] = "L2_TO_TNL_L3",
+ [MLX5HWS_ACTION_TYP_DROP] = "DROP",
+ [MLX5HWS_ACTION_TYP_TBL] = "TBL",
+ [MLX5HWS_ACTION_TYP_CTR] = "CTR",
+ [MLX5HWS_ACTION_TYP_TAG] = "TAG",
+ [MLX5HWS_ACTION_TYP_MODIFY_HDR] = "MODIFY_HDR",
+ [MLX5HWS_ACTION_TYP_VPORT] = "VPORT",
+ [MLX5HWS_ACTION_TYP_MISS] = "DEFAULT_MISS",
+ [MLX5HWS_ACTION_TYP_POP_VLAN] = "POP_VLAN",
+ [MLX5HWS_ACTION_TYP_PUSH_VLAN] = "PUSH_VLAN",
+ [MLX5HWS_ACTION_TYP_ASO_METER] = "ASO_METER",
+ [MLX5HWS_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY",
+ [MLX5HWS_ACTION_TYP_INSERT_HEADER] = "INSERT_HEADER",
+ [MLX5HWS_ACTION_TYP_REMOVE_HEADER] = "REMOVE_HEADER",
+ [MLX5HWS_ACTION_TYP_SAMPLER] = "SAMPLER",
+ [MLX5HWS_ACTION_TYP_RANGE] = "RANGE",
+};
+
+static_assert(ARRAY_SIZE(mlx5hws_action_type_str) == MLX5HWS_ACTION_TYP_MAX,
+ "Missing mlx5hws_action_type_str");
+
+const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type)
+{
+ return mlx5hws_action_type_str[action_type];
+}
+
+enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action)
+{
+ return action->type;
+}
+
+static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
+ enum mlx5hws_context_shared_stc_type stc_type,
+ u8 tbl_type)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_action_shared_stc *shared_stc;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (ctx->common_res[tbl_type].shared_stc[stc_type]) {
+ ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++;
+ mutex_unlock(&ctx->ctrl_lock);
+ return 0;
+ }
+
+ shared_stc = kzalloc(sizeof(*shared_stc), GFP_KERNEL);
+ if (!shared_stc) {
+ ret = -ENOMEM;
+ goto unlock_and_out;
+ }
+ switch (stc_type) {
+ case MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3:
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.remove_header.decap = 0;
+ stc_attr.remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ stc_attr.remove_header.end_anchor = MLX5_HEADER_ANCHOR_IPV6_IPV4;
+ break;
+ case MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP:
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ stc_attr.remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
+ stc_attr.remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
+ break;
+ default:
+ mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type);
+ pr_warn("HWS: Invalid stc_type: %d\n", stc_type);
+ ret = -EINVAL;
+ goto unlock_and_out;
+ }
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &shared_stc->stc_chunk);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate shared decap l2 STC\n");
+ goto free_shared_stc;
+ }
+
+ ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc;
+ ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1;
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+free_shared_stc:
+ kfree(shared_stc);
+unlock_and_out:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static int hws_action_get_shared_stc(struct mlx5hws_action *action,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+ int ret;
+
+ if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
+ pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
+ return -EINVAL;
+ }
+
+ if (unlikely(!(action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB))) {
+ pr_warn("HWS: Invalid action->flags: %d\n", action->flags);
+ return -EINVAL;
+ }
+
+ ret = hws_action_get_shared_stc_nic(ctx, stc_type, MLX5HWS_TABLE_TYPE_FDB);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to allocate memory for FDB shared STCs (type: %d)\n",
+ stc_type);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hws_action_put_shared_stc(struct mlx5hws_action *action,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ enum mlx5hws_table_type tbl_type = MLX5HWS_TABLE_TYPE_FDB;
+ struct mlx5hws_action_shared_stc *shared_stc;
+ struct mlx5hws_context *ctx = action->ctx;
+
+ if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
+ pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
+ return;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) {
+ mutex_unlock(&ctx->ctrl_lock);
+ return;
+ }
+
+ shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type];
+
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk);
+ kfree(shared_stc);
+ ctx->common_res[tbl_type].shared_stc[stc_type] = NULL;
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static void hws_action_print_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions)
+{
+ mlx5hws_err(ctx, "Invalid action_type sequence");
+ while (*user_actions != MLX5HWS_ACTION_TYP_LAST) {
+ mlx5hws_err(ctx, " %s", mlx5hws_action_type_to_str(*user_actions));
+ user_actions++;
+ }
+ mlx5hws_err(ctx, "\n");
+}
+
+bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions,
+ enum mlx5hws_table_type table_type)
+{
+ const u32 *order_arr = action_order_arr[table_type];
+ u8 order_idx = 0;
+ u8 user_idx = 0;
+ bool valid_combo;
+
+ if (table_type >= MLX5HWS_TABLE_TYPE_MAX) {
+ mlx5hws_err(ctx, "Invalid table_type %d", table_type);
+ return false;
+ }
+
+ while (order_arr[order_idx] != BIT(MLX5HWS_ACTION_TYP_LAST)) {
+ /* User action order validated move to next user action */
+ if (BIT(user_actions[user_idx]) & order_arr[order_idx])
+ user_idx++;
+
+ /* Iterate to the next supported action in the order */
+ order_idx++;
+ }
+
+ /* Combination is valid if all user action were processed */
+ valid_combo = user_actions[user_idx] == MLX5HWS_ACTION_TYP_LAST;
+ if (!valid_combo)
+ hws_action_print_combo(ctx, user_actions);
+
+ return valid_combo;
+}
+
+static bool
+hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ struct mlx5hws_cmd_stc_modify_attr *fixup_stc_attr,
+ enum mlx5hws_table_type table_type,
+ bool is_mirror)
+{
+ bool use_fixup = false;
+ u32 fw_tbl_type;
+ u32 base_id;
+
+ fw_tbl_type = mlx5hws_table_get_res_fw_ft_type(table_type, is_mirror);
+
+ switch (stc_attr->action_type) {
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
+ if (is_mirror && stc_attr->ste_table.ignore_tx) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ use_fixup = true;
+ break;
+ }
+ if (!is_mirror)
+ base_id = mlx5hws_pool_chunk_get_base_id(stc_attr->ste_table.ste_pool,
+ &stc_attr->ste_table.ste);
+ else
+ base_id =
+ mlx5hws_pool_chunk_get_base_mirror_id(stc_attr->ste_table.ste_pool,
+ &stc_attr->ste_table.ste);
+
+ *fixup_stc_attr = *stc_attr;
+ fixup_stc_attr->ste_table.ste_obj_id = base_id;
+ use_fixup = true;
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_TAG:
+ if (fw_tbl_type == FS_FT_FDB_TX) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
+ fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ use_fixup = true;
+ }
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
+ if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
+ fixup_stc_attr->action_offset = stc_attr->action_offset;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ fixup_stc_attr->vport.esw_owner_vhca_id = ctx->caps->vhca_id;
+ fixup_stc_attr->vport.vport_num = ctx->caps->eswitch_manager_vport_number;
+ fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
+ ctx->caps->merged_eswitch;
+ use_fixup = true;
+ }
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
+ if (stc_attr->vport.vport_num != MLX5_VPORT_UPLINK)
+ break;
+
+ if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
+ /* The FW doesn't allow to go to wire in the TX/RX by JUMP_TO_VPORT */
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK;
+ fixup_stc_attr->action_offset = stc_attr->action_offset;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ fixup_stc_attr->vport.vport_num = 0;
+ fixup_stc_attr->vport.esw_owner_vhca_id = stc_attr->vport.esw_owner_vhca_id;
+ fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
+ stc_attr->vport.eswitch_owner_vhca_id_valid;
+ }
+ use_fixup = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return use_fixup;
+}
+
+int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
+ struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0};
+ bool use_fixup;
+ u32 obj_0_id;
+ int ret;
+
+ ret = mlx5hws_pool_chunk_alloc(stc_pool, stc);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate single action STC\n");
+ return ret;
+ }
+
+ stc_attr->stc_offset = stc->offset;
+
+ /* Dynamic reparse not supported, overwrite and use default */
+ if (!mlx5hws_context_cap_dynamic_reparse(ctx))
+ stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ obj_0_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+
+ /* According to table/action limitation change the stc_attr */
+ use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false);
+ ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id,
+ use_fixup ? &fixup_stc_attr : stc_attr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to modify STC action_type %d tbl_type %d\n",
+ stc_attr->action_type, table_type);
+ goto free_chunk;
+ }
+
+ /* Modify the FDB peer */
+ if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
+ u32 obj_1_id;
+
+ obj_1_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+
+ use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr,
+ &fixup_stc_attr,
+ table_type, true);
+ ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_1_id,
+ use_fixup ? &fixup_stc_attr : stc_attr);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to modify peer STC action_type %d tbl_type %d\n",
+ stc_attr->action_type, table_type);
+ goto clean_obj_0;
+ }
+ }
+
+ return 0;
+
+clean_obj_0:
+ cleanup_stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ cleanup_stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ cleanup_stc_attr.stc_offset = stc->offset;
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id, &cleanup_stc_attr);
+free_chunk:
+ mlx5hws_pool_chunk_free(stc_pool, stc);
+ return ret;
+}
+
+void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ u32 obj_id;
+
+ /* Modify the STC not to point to an object */
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.stc_offset = stc->offset;
+ obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
+
+ if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
+ }
+
+ mlx5hws_pool_chunk_free(stc_pool, stc);
+}
+
+static u32 hws_action_get_mh_stc_type(struct mlx5hws_context *ctx,
+ __be64 pattern)
+{
+ u8 action_type = MLX5_GET(set_action_in, &pattern, action_type);
+
+ switch (action_type) {
+ case MLX5_MODIFICATION_TYPE_SET:
+ return MLX5_IFC_STC_ACTION_TYPE_SET;
+ case MLX5_MODIFICATION_TYPE_ADD:
+ return MLX5_IFC_STC_ACTION_TYPE_ADD;
+ case MLX5_MODIFICATION_TYPE_COPY:
+ return MLX5_IFC_STC_ACTION_TYPE_COPY;
+ case MLX5_MODIFICATION_TYPE_ADD_FIELD:
+ return MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD;
+ default:
+ mlx5hws_err(ctx, "Unsupported action type: 0x%x\n", action_type);
+ return MLX5_IFC_STC_ACTION_TYPE_NOP;
+ }
+}
+
+static void hws_action_fill_stc_attr(struct mlx5hws_action *action,
+ u32 obj_id,
+ struct mlx5hws_cmd_stc_modify_attr *attr)
+{
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_TAG:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_TAG;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ break;
+ case MLX5HWS_ACTION_TYP_DROP:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ break;
+ case MLX5HWS_ACTION_TYP_MISS:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ break;
+ case MLX5HWS_ACTION_TYP_CTR:
+ attr->id = obj_id;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_COUNTER;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW0;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ if (action->modify_header.require_reparse)
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+
+ if (action->modify_header.num_of_actions == 1) {
+ attr->modify_action.data = action->modify_header.single_action;
+ attr->action_type = hws_action_get_mh_stc_type(action->ctx,
+ attr->modify_action.data);
+
+ if (attr->action_type == MLX5_IFC_STC_ACTION_TYPE_ADD ||
+ attr->action_type == MLX5_IFC_STC_ACTION_TYPE_SET)
+ MLX5_SET(set_action_in, &attr->modify_action.data, data, 0);
+ } else {
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST;
+ attr->modify_header.arg_id = action->modify_header.arg_id;
+ attr->modify_header.pattern_id = action->modify_header.pat_id;
+ }
+ break;
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ attr->dest_table_id = obj_id;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->remove_header.decap = 1;
+ attr->remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ attr->remove_header.end_anchor = MLX5_HEADER_ANCHOR_INNER_MAC;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ if (!action->reformat.require_reparse)
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->insert_header.encap = action->reformat.encap;
+ attr->insert_header.insert_anchor = action->reformat.anchor;
+ attr->insert_header.arg_id = action->reformat.arg_id;
+ attr->insert_header.header_size = action->reformat.header_size;
+ attr->insert_header.insert_offset = action->reformat.offset;
+ break;
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO;
+ attr->aso.aso_type = ASO_OPC_MOD_POLICER;
+ attr->aso.devx_obj_id = obj_id;
+ attr->aso.return_reg_id = action->aso.return_reg_id;
+ break;
+ case MLX5HWS_ACTION_TYP_VPORT:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
+ attr->vport.vport_num = action->vport.vport_num;
+ attr->vport.esw_owner_vhca_id = action->vport.esw_owner_vhca_id;
+ attr->vport.eswitch_owner_vhca_id_valid = action->vport.esw_owner_vhca_id_valid;
+ break;
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
+ attr->remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN / 2;
+ break;
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->insert_header.encap = 0;
+ attr->insert_header.is_inline = 1;
+ attr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ attr->insert_header.insert_offset = MLX5HWS_ACTION_HDR_LEN_L2_MACS;
+ attr->insert_header.header_size = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
+ break;
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ attr->remove_header.decap = 0; /* the mode we support decap is 0 */
+ attr->remove_words.start_anchor = action->remove_header.anchor;
+ /* the size is in already in words */
+ attr->remove_words.num_of_words = action->remove_header.size;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ break;
+ default:
+ mlx5hws_err(action->ctx, "Invalid action type %d\n", action->type);
+ }
+}
+
+static int
+hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_context *ctx = action->ctx;
+ int ret;
+
+ hws_action_fill_stc_attr(action, obj_id, &stc_attr);
+
+ /* Block unsupported parallel obj modify over the same base */
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Allocate STC for FDB */
+ if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) {
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
+ MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ if (ret)
+ goto out_err;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+out_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static void
+hws_action_destroy_stcs(struct mlx5hws_action *action)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+
+ /* Block unsupported parallel obj modify over the same base */
+ mutex_lock(&ctx->ctrl_lock);
+
+ if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB)
+ mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static bool hws_action_is_flag_hws_fdb(u32 flags)
+{
+ return flags & MLX5HWS_ACTION_FLAG_HWS_FDB;
+}
+
+static bool
+hws_action_validate_hws_action(struct mlx5hws_context *ctx, u32 flags)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
+ mlx5hws_err(ctx, "Cannot create HWS action since HWS is not supported\n");
+ return false;
+ }
+
+ if ((flags & MLX5HWS_ACTION_FLAG_HWS_FDB) && !ctx->caps->eswitch_manager) {
+ mlx5hws_err(ctx, "Cannot create HWS action for FDB for non-eswitch-manager\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct mlx5hws_action *
+hws_action_create_generic_bulk(struct mlx5hws_context *ctx,
+ u32 flags,
+ enum mlx5hws_action_type action_type,
+ u8 bulk_sz)
+{
+ struct mlx5hws_action *action;
+ int i;
+
+ if (!hws_action_is_flag_hws_fdb(flags)) {
+ mlx5hws_err(ctx,
+ "Action (type: %d) flags must specify only HWS FDB\n", action_type);
+ return NULL;
+ }
+
+ if (!hws_action_validate_hws_action(ctx, flags))
+ return NULL;
+
+ action = kcalloc(bulk_sz, sizeof(*action), GFP_KERNEL);
+ if (!action)
+ return NULL;
+
+ for (i = 0; i < bulk_sz; i++) {
+ action[i].ctx = ctx;
+ action[i].flags = flags;
+ action[i].type = action_type;
+ }
+
+ return action;
+}
+
+static struct mlx5hws_action *
+hws_action_create_generic(struct mlx5hws_context *ctx,
+ u32 flags,
+ enum mlx5hws_action_type action_type)
+{
+ return hws_action_create_generic_bulk(ctx, flags, action_type, 1);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx,
+ u32 table_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TBL);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, table_id);
+ if (ret)
+ goto free_action;
+
+ action->dest_obj.obj_id = table_id;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl,
+ u32 flags)
+{
+ return mlx5hws_action_create_dest_table_num(ctx, tbl->ft_id, flags);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DROP);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_MISS);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TAG);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static struct mlx5hws_action *
+hws_action_create_aso(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type action_type,
+ u32 obj_id,
+ u8 return_reg_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, action_type);
+ if (!action)
+ return NULL;
+
+ action->aso.obj_id = obj_id;
+ action->aso.return_reg_id = return_reg_id;
+
+ ret = hws_action_create_stcs(action, obj_id);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u8 return_reg_id,
+ u32 flags)
+{
+ return hws_action_create_aso(ctx, MLX5HWS_ACTION_TYP_ASO_METER,
+ obj_id, return_reg_id, flags);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_counter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_CTR);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, obj_id);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx,
+ u16 vport_num,
+ bool vhca_id_valid,
+ u16 vhca_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!(flags & MLX5HWS_ACTION_FLAG_HWS_FDB)) {
+ mlx5hws_err(ctx, "Vport action is supported for FDB only\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_VPORT);
+ if (!action)
+ return NULL;
+
+ if (!ctx->caps->merged_eswitch && vhca_id_valid && vhca_id != ctx->caps->vhca_id) {
+ mlx5hws_err(ctx, "Non merged eswitch cannot send to other vhca\n");
+ goto free_action;
+ }
+
+ action->vport.vport_num = vport_num;
+ action->vport.esw_owner_vhca_id_valid = vhca_id_valid;
+
+ if (vhca_id_valid)
+ action->vport.esw_owner_vhca_id = vhca_id;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for vport %d\n", vport_num);
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_PUSH_VLAN);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for push vlan\n");
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_POP_VLAN);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create remove stc for reformat\n");
+ goto free_action;
+ }
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for pop vlan\n");
+ goto free_shared;
+ }
+
+ return action;
+
+free_shared:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static int
+hws_action_handle_insert_with_ptr(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ size_t max_sz = 0;
+ u32 arg_id;
+ int ret, i;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].sz % W_SIZE != 0) {
+ mlx5hws_err(action->ctx,
+ "Header data size should be in WORD granularity\n");
+ return -EINVAL;
+ }
+ max_sz = max(hdrs[i].sz, max_sz);
+ }
+
+ /* Allocate single shared arg object for all headers */
+ ret = mlx5hws_arg_create(action->ctx,
+ hdrs->data,
+ max_sz,
+ log_bulk_sz,
+ action->flags & MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ action[i].reformat.arg_id = arg_id;
+ action[i].reformat.header_size = hdrs[i].sz;
+ action[i].reformat.num_of_hdrs = num_of_hdrs;
+ action[i].reformat.max_hdr_sz = max_sz;
+ action[i].reformat.require_reparse = true;
+
+ if (action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2 ||
+ action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3) {
+ action[i].reformat.anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ action[i].reformat.offset = 0;
+ action[i].reformat.encap = 1;
+ }
+
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ mlx5hws_err(action->ctx, "Failed to create stc for reformat\n");
+ goto free_stc;
+ }
+ }
+
+ return 0;
+
+free_stc:
+ while (i--)
+ hws_action_destroy_stcs(&action[i]);
+
+ mlx5hws_arg_destroy(action->ctx, arg_id);
+ return ret;
+}
+
+static int
+hws_action_handle_l2_to_tunnel_l3(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ int ret;
+
+ /* The action is remove-l2-header + insert-l3-header */
+ ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ if (ret) {
+ mlx5hws_err(action->ctx, "Failed to create remove stc for reformat\n");
+ return ret;
+ }
+
+ /* Reuse the insert with pointer for the L2L3 header */
+ ret = hws_action_handle_insert_with_ptr(action,
+ num_of_hdrs,
+ hdrs,
+ log_bulk_sz);
+ if (ret)
+ goto put_shared_stc;
+
+ return 0;
+
+put_shared_stc:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ return ret;
+}
+
+static void hws_action_prepare_decap_l3_actions(size_t data_sz,
+ u8 *mh_data,
+ int *num_of_actions)
+{
+ int actions;
+ u32 i;
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(stc_ste_param_remove, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE);
+ MLX5_SET(stc_ste_param_remove, mh_data, decap, 0x1);
+ MLX5_SET(stc_ste_param_remove, mh_data, remove_start_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ MLX5_SET(stc_ste_param_remove, mh_data, remove_end_anchor,
+ MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ mh_data += MLX5HWS_ACTION_DOUBLE_SIZE; /* Assume every action is 2 dw */
+ actions = 1;
+
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
+ for (i = 0; i < data_sz / MLX5HWS_ACTION_INLINE_DATA_SIZE + 1; i++) {
+ MLX5_SET(stc_ste_param_insert, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_INSERT);
+ MLX5_SET(stc_ste_param_insert, mh_data, inline_data, 0x1);
+ MLX5_SET(stc_ste_param_insert, mh_data, insert_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ MLX5_SET(stc_ste_param_insert, mh_data, insert_size, 2);
+ mh_data += MLX5HWS_ACTION_DOUBLE_SIZE;
+ actions++;
+ }
+
+ /* Remove first 2 extra bytes */
+ MLX5_SET(stc_ste_param_remove_words, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
+ MLX5_SET(stc_ste_param_remove_words, mh_data, remove_start_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(stc_ste_param_remove_words, mh_data, remove_size, 1);
+ actions++;
+
+ *num_of_actions = actions;
+}
+
+static int
+hws_action_handle_tunnel_l3_to_l2(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ u8 mh_data[MLX5HWS_ACTION_REFORMAT_DATA_SIZE] = {0};
+ struct mlx5hws_context *ctx = action->ctx;
+ u32 arg_id, pat_id;
+ int num_of_actions;
+ int mh_data_size;
+ int ret, i;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2 &&
+ hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN) {
+ mlx5hws_err(ctx, "Data size is not supported for decap-l3\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Create a full modify header action list in case shared */
+ hws_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions);
+ if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
+ mlx5hws_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions);
+
+ /* All DecapL3 cases require the same max arg size */
+ ret = mlx5hws_arg_create_modify_header_arg(ctx,
+ (__be64 *)mh_data,
+ num_of_actions,
+ log_bulk_sz,
+ action->flags & MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ memset(mh_data, 0, MLX5HWS_ACTION_REFORMAT_DATA_SIZE);
+ hws_action_prepare_decap_l3_actions(hdrs[i].sz, mh_data, &num_of_actions);
+ mh_data_size = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
+
+ ret = mlx5hws_pat_get_pattern(ctx, (__be64 *)mh_data, mh_data_size, &pat_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate pattern for DecapL3\n");
+ goto free_stc_and_pat;
+ }
+
+ action[i].modify_header.max_num_of_actions = num_of_actions;
+ action[i].modify_header.num_of_actions = num_of_actions;
+ action[i].modify_header.num_of_patterns = num_of_hdrs;
+ action[i].modify_header.arg_id = arg_id;
+ action[i].modify_header.pat_id = pat_id;
+ action[i].modify_header.require_reparse =
+ mlx5hws_pat_require_reparse((__be64 *)mh_data, num_of_actions);
+
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ mlx5hws_pat_put_pattern(ctx, pat_id);
+ goto free_stc_and_pat;
+ }
+ }
+
+ return 0;
+
+free_stc_and_pat:
+ while (i--) {
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
+ }
+
+ mlx5hws_arg_destroy(action->ctx, arg_id);
+ return ret;
+}
+
+static int
+hws_action_create_reformat_hws(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 bulk_size)
+{
+ int ret;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ ret = hws_action_create_stcs(action, 0);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ ret = hws_action_handle_l2_to_tunnel_l3(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ ret = hws_action_handle_tunnel_l3_to_l2(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ default:
+ mlx5hws_err(action->ctx, "Invalid HWS reformat action type\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_reformat(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type reformat_type,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!num_of_hdrs) {
+ mlx5hws_err(ctx, "Reformat num_of_hdrs cannot be zero\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic_bulk(ctx, flags, reformat_type, num_of_hdrs);
+ if (!action)
+ return NULL;
+
+ if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1)) {
+ mlx5hws_err(ctx, "Reformat flags don't fit HWS (flags: 0x%x)\n", flags);
+ goto free_action;
+ }
+
+ ret = hws_action_create_reformat_hws(action, num_of_hdrs, hdrs, log_bulk_size);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static int
+hws_action_create_modify_header_hws(struct mlx5hws_action *action,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *pattern,
+ u32 log_bulk_size)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+ u16 num_actions, max_mh_actions = 0;
+ int i, ret, size_in_bytes;
+ u32 pat_id, arg_id = 0;
+ __be64 *new_pattern;
+ size_t pat_max_sz;
+
+ pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE;
+ size_in_bytes = pat_max_sz * sizeof(__be64);
+ new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL);
+ if (!new_pattern)
+ return -ENOMEM;
+
+ /* Calculate maximum number of mh actions for shared arg allocation */
+ for (i = 0; i < num_of_patterns; i++) {
+ size_t new_num_actions;
+ size_t cur_num_actions;
+ u32 nope_location;
+
+ cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
+
+ mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions,
+ pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE,
+ &new_num_actions, &nope_location,
+ &new_pattern[i * pat_max_sz]);
+
+ action[i].modify_header.nope_locations = nope_location;
+ action[i].modify_header.num_of_actions = new_num_actions;
+
+ max_mh_actions = max(max_mh_actions, new_num_actions);
+ }
+
+ if (mlx5hws_arg_get_arg_log_size(max_mh_actions) >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
+ mlx5hws_err(ctx, "Num of actions (%d) bigger than allowed\n",
+ max_mh_actions);
+ ret = -EINVAL;
+ goto free_new_pat;
+ }
+
+ /* Allocate single shared arg for all patterns based on the max size */
+ if (max_mh_actions > 1) {
+ ret = mlx5hws_arg_create_modify_header_arg(ctx,
+ pattern->data,
+ max_mh_actions,
+ log_bulk_size,
+ action->flags &
+ MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ goto free_new_pat;
+ }
+
+ for (i = 0; i < num_of_patterns; i++) {
+ if (!mlx5hws_pat_verify_actions(ctx, pattern[i].data, pattern[i].sz)) {
+ mlx5hws_err(ctx, "Fail to verify pattern modify actions\n");
+ ret = -EINVAL;
+ goto free_stc_and_pat;
+ }
+ num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
+ action[i].modify_header.num_of_patterns = num_of_patterns;
+ action[i].modify_header.max_num_of_actions = max_mh_actions;
+
+ action[i].modify_header.require_reparse =
+ mlx5hws_pat_require_reparse(pattern[i].data, num_actions);
+
+ if (num_actions == 1) {
+ pat_id = 0;
+ /* Optimize single modify action to be used inline */
+ action[i].modify_header.single_action = pattern[i].data[0];
+ action[i].modify_header.single_action_type =
+ MLX5_GET(set_action_in, pattern[i].data, action_type);
+ } else {
+ /* Multiple modify actions require a pattern */
+ if (unlikely(action[i].modify_header.nope_locations)) {
+ size_t pattern_sz;
+
+ pattern_sz = action[i].modify_header.num_of_actions *
+ MLX5HWS_MODIFY_ACTION_SIZE;
+ ret =
+ mlx5hws_pat_get_pattern(ctx,
+ &new_pattern[i * pat_max_sz],
+ pattern_sz, &pat_id);
+ } else {
+ ret = mlx5hws_pat_get_pattern(ctx,
+ pattern[i].data,
+ pattern[i].sz,
+ &pat_id);
+ }
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to allocate pattern for modify header\n");
+ goto free_stc_and_pat;
+ }
+
+ action[i].modify_header.arg_id = arg_id;
+ action[i].modify_header.pat_id = pat_id;
+ }
+ /* Allocate STC for each action representing a header */
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ if (pat_id)
+ mlx5hws_pat_put_pattern(ctx, pat_id);
+ goto free_stc_and_pat;
+ }
+ }
+
+ kfree(new_pattern);
+ return 0;
+
+free_stc_and_pat:
+ while (i--) {
+ hws_action_destroy_stcs(&action[i]);
+ if (action[i].modify_header.pat_id)
+ mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
+ }
+
+ if (arg_id)
+ mlx5hws_arg_destroy(ctx, arg_id);
+free_new_pat:
+ kfree(new_pattern);
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *patterns,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!num_of_patterns) {
+ mlx5hws_err(ctx, "Invalid number of patterns\n");
+ return NULL;
+ }
+ action = hws_action_create_generic_bulk(ctx, flags,
+ MLX5HWS_ACTION_TYP_MODIFY_HDR,
+ num_of_patterns);
+ if (!action)
+ return NULL;
+
+ if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_patterns > 1)) {
+ mlx5hws_err(ctx, "Action cannot be shared with requested pattern or size\n");
+ goto free_action;
+ }
+
+ ret = hws_action_create_modify_header_hws(action,
+ num_of_patterns,
+ patterns,
+ log_bulk_size);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
+ size_t num_dest,
+ struct mlx5hws_action_dest_attr *dests,
+ bool ignore_flow_level,
+ u32 flow_source,
+ u32 flags)
+{
+ struct mlx5hws_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ struct mlx5hws_action *action;
+ u32 i /*, packet_reformat_id*/;
+ int ret;
+
+ if (num_dest <= 1) {
+ mlx5hws_err(ctx, "Action must have multiple dests\n");
+ return NULL;
+ }
+
+ if (flags == (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+ } else {
+ mlx5hws_err(ctx, "Action flags not supported\n");
+ return NULL;
+ }
+
+ dest_list = kcalloc(num_dest, sizeof(*dest_list), GFP_KERNEL);
+ if (!dest_list)
+ return NULL;
+
+ for (i = 0; i < num_dest; i++) {
+ enum mlx5hws_action_type action_type = dests[i].dest->type;
+ struct mlx5hws_action *reformat_action = dests[i].reformat;
+
+ switch (action_type) {
+ case MLX5HWS_ACTION_TYP_TBL:
+ dest_list[i].destination_type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = ignore_flow_level;
+ /* ToDo: In SW steering we have a handling of 'go to WIRE'
+ * destination here by upper layer setting 'is_wire_ft' flag
+ * if the destination is wire.
+ * This is because uplink should be last dest in the list.
+ */
+ break;
+ case MLX5HWS_ACTION_TYP_VPORT:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id = dests[i].dest->vport.vport_num;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (ctx->caps->merged_eswitch) {
+ dest_list[i].ext_flags |=
+ MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID;
+ dest_list[i].esw_owner_vhca_id =
+ dests[i].dest->vport.esw_owner_vhca_id;
+ }
+ break;
+ default:
+ mlx5hws_err(ctx, "Unsupported action in dest_array\n");
+ goto free_dest_list;
+ }
+
+ if (reformat_action) {
+ mlx5hws_err(ctx, "dest_array with reformat action - unsupported\n");
+ goto free_dest_list;
+ }
+ }
+
+ fte_attr.dests_num = num_dest;
+ fte_attr.dests = dest_list;
+
+ fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!fw_island)
+ goto free_dest_list;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DEST_ARRAY);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = hws_action_create_stcs(action, fw_island->ft_id);
+ if (ret)
+ goto free_action;
+
+ action->dest_array.fw_island = fw_island;
+ action->dest_array.num_dest = num_dest;
+ action->dest_array.dest_list = dest_list;
+
+ return action;
+
+free_action:
+ kfree(action);
+destroy_fw_island:
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island);
+free_dest_list:
+ for (i = 0; i < num_dest; i++) {
+ if (dest_list[i].ext_reformat_id)
+ mlx5hws_cmd_packet_reformat_destroy(ctx->mdev,
+ dest_list[i].ext_reformat_id);
+ }
+ kfree(dest_list);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_insert_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action_reformat_header *reformat_hdrs;
+ struct mlx5hws_action *action;
+ int ret;
+ int i;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_INSERT_HEADER);
+ if (!action)
+ return NULL;
+
+ reformat_hdrs = kcalloc(num_of_hdrs, sizeof(*reformat_hdrs), GFP_KERNEL);
+ if (!reformat_hdrs)
+ goto free_action;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].offset % W_SIZE != 0) {
+ mlx5hws_err(ctx, "Header offset should be in WORD granularity\n");
+ goto free_reformat_hdrs;
+ }
+
+ action[i].reformat.anchor = hdrs[i].anchor;
+ action[i].reformat.encap = hdrs[i].encap;
+ action[i].reformat.offset = hdrs[i].offset;
+
+ reformat_hdrs[i].sz = hdrs[i].hdr.sz;
+ reformat_hdrs[i].data = hdrs[i].hdr.data;
+ }
+
+ ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs,
+ reformat_hdrs, log_bulk_size);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
+ goto free_reformat_hdrs;
+ }
+
+ kfree(reformat_hdrs);
+
+ return action;
+
+free_reformat_hdrs:
+ kfree(reformat_hdrs);
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_remove_header_attr *attr,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_REMOVE_HEADER);
+ if (!action)
+ return NULL;
+
+ /* support only remove anchor with size */
+ if (attr->size % W_SIZE != 0) {
+ mlx5hws_err(ctx,
+ "Invalid size, HW supports header remove in WORD granularity\n");
+ goto free_action;
+ }
+
+ if (attr->size > MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE) {
+ mlx5hws_err(ctx, "Header removal size limited to %u bytes\n",
+ MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE);
+ goto free_action;
+ }
+
+ action->remove_header.anchor = attr->anchor;
+ action->remove_header.size = attr->size / W_SIZE;
+
+ if (hws_action_create_stcs(action, 0))
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static struct mlx5hws_definer *
+hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_definer *definer;
+ __be32 *tag;
+ int ret;
+
+ definer = kzalloc(sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return NULL;
+
+ definer->dw_selector[0] = MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN / 4;
+ /* Set DW0 tag mask */
+ tag = (__force __be32 *)definer->mask.jumbo;
+ tag[MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0] = htonl(0xffffUL << 16);
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ ret = mlx5hws_definer_get_obj(ctx, definer);
+ if (ret < 0) {
+ mutex_unlock(&ctx->ctrl_lock);
+ kfree(definer);
+ return NULL;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+ definer->obj_id = ret;
+
+ return definer;
+}
+
+static struct mlx5hws_matcher_action_ste *
+hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer,
+ u32 miss_ft_id)
+{
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
+ struct mlx5hws_action_default_stc *default_stc;
+ struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_pool_attr pool_attr = {0};
+ struct mlx5hws_pool *ste_pool, *stc_pool;
+ struct mlx5hws_pool_chunk *ste;
+ u32 *rtc_0_id, *rtc_1_id;
+ u32 obj_id;
+ int ret;
+
+ /* Check if STE range is supported */
+ if (!IS_BIT_SET(ctx->caps->supp_ste_format_gen_wqe, MLX5_IFC_RTC_STE_FORMAT_RANGE)) {
+ mlx5hws_err(ctx, "Range STE format not supported\n");
+ return NULL;
+ }
+
+ table_ste = kzalloc(sizeof(*table_ste), GFP_KERNEL);
+ if (!table_ste)
+ return NULL;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
+ pool_attr.alloc_log_sz = 1;
+ table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!table_ste->pool) {
+ mlx5hws_err(ctx, "Failed to allocate memory ste pool\n");
+ goto free_ste;
+ }
+
+ /* Allocate RTC */
+ rtc_0_id = &table_ste->rtc_0_id;
+ rtc_1_id = &table_ste->rtc_1_id;
+ ste_pool = table_ste->pool;
+ ste = &table_ste->ste;
+ ste->order = 1;
+
+ rtc_attr.log_size = 0;
+ rtc_attr.log_depth = 0;
+ rtc_attr.miss_ft_id = miss_ft_id;
+ rtc_attr.num_hash_definer = 1;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+ rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.fw_gen_wqe = true;
+ rtc_attr.is_scnd_range = true;
+
+ obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.ste_offset = ste->offset;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
+
+ /* STC is a single resource (obj_id), use any STC for the ID */
+ stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB];
+ default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc;
+ obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create RTC");
+ goto pool_destroy;
+ }
+
+ /* Create mirror RTC */
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true);
+
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create mirror RTC");
+ goto destroy_rtc_0;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return table_ste;
+
+destroy_rtc_0:
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
+pool_destroy:
+ mlx5hws_pool_destroy(table_ste->pool);
+free_ste:
+ mutex_unlock(&ctx->ctrl_lock);
+ kfree(table_ste);
+ return NULL;
+}
+
+static void
+hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_matcher_action_ste *table_ste)
+{
+ mutex_lock(&ctx->ctrl_lock);
+
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_0_id);
+ mlx5hws_pool_destroy(table_ste->pool);
+ kfree(table_ste);
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static int
+hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_matcher_action_ste *table_ste,
+ struct mlx5hws_action *hit_ft_action,
+ struct mlx5hws_definer *range_definer,
+ u32 min, u32 max)
+{
+ struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0};
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
+ u32 no_use, used_rtc_0_id, used_rtc_1_id, ret;
+ struct mlx5hws_context_common_res *common_res;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+ __be32 *wqe_data_arr;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Get the control queue */
+ queue = &ctx->send_queue[ctx->queues - 1];
+ if (unlikely(mlx5hws_send_engine_err(queue))) {
+ ret = -EIO;
+ goto error;
+ }
+
+ /* Init default send STE attributes */
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.user_data = &no_use;
+ ste_attr.send_attr.rule = NULL;
+ ste_attr.send_attr.fence = 1;
+ ste_attr.send_attr.notify_hw = true;
+ ste_attr.rtc_0 = table_ste->rtc_0_id;
+ ste_attr.rtc_1 = table_ste->rtc_1_id;
+ ste_attr.used_id_rtc_0 = &used_rtc_0_id;
+ ste_attr.used_id_rtc_1 = &used_rtc_1_id;
+
+ common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB];
+
+ /* init an empty match STE which will always hit */
+ ste_attr.wqe_ctrl = &wqe_ctrl;
+ ste_attr.wqe_data = &match_wqe_data;
+ ste_attr.send_attr.match_definer_id = ctx->caps->trivial_match_definer;
+
+ /* Fill WQE control data */
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
+ htonl(common_res->default_stc->nop_ctr.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(common_res->default_stc->nop_dw5.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
+ htonl(common_res->default_stc->nop_dw6.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
+ htonl(common_res->default_stc->nop_dw7.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
+ htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
+ htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset);
+
+ wqe_data_arr = (__force __be32 *)&range_wqe_data;
+
+ ste_attr.range_wqe_data = &range_wqe_data;
+ ste_attr.send_attr.len += MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.range_definer_id = mlx5hws_definer_get_id(range_definer);
+
+ /* Fill range matching fields,
+ * min/max_value_2 corresponds to match_dw_0 in its definer,
+ * min_value_2 sets in DW0 in the STE and max_value_2 sets in DW1 in the STE.
+ */
+ wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW0] = htonl(min << 16);
+ wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW1] = htonl(max << 16);
+
+ /* Send WQEs to FW */
+ mlx5hws_send_stes_fw(ctx, queue, &ste_attr);
+
+ /* Poll for completion */
+ ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to drain control queue");
+ goto error;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+error:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min, u32 max, u32 flags)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_action *hit_ft_action;
+ struct mlx5hws_definer *definer;
+ struct mlx5hws_action *action;
+ u32 miss_ft_id = miss_ft->id;
+ u32 hit_ft_id = hit_ft->id;
+ int ret;
+
+ if (field != MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN ||
+ min > 0xffff || max > 0xffff) {
+ mlx5hws_err(ctx, "Invalid match range parameters\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_RANGE);
+ if (!action)
+ return NULL;
+
+ definer = hws_action_create_dest_match_range_definer(ctx);
+ if (!definer)
+ goto free_action;
+
+ table_ste = hws_action_create_dest_match_range_table(ctx, definer, miss_ft_id);
+ if (!table_ste)
+ goto destroy_definer;
+
+ hit_ft_action = mlx5hws_action_create_dest_table_num(ctx, hit_ft_id, flags);
+ if (!hit_ft_action)
+ goto destroy_table_ste;
+
+ ret = hws_action_create_dest_match_range_fill_table(ctx, table_ste,
+ hit_ft_action,
+ definer, min, max);
+ if (ret)
+ goto destroy_hit_ft_action;
+
+ action->range.table_ste = table_ste;
+ action->range.definer = definer;
+ action->range.hit_ft_action = hit_ft_action;
+
+ /* Allocate STC for jumps to STE */
+ mutex_lock(&ctx->ctrl_lock);
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste = table_ste->ste;
+ stc_attr.ste_table.ste_pool = table_ste->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ if (ret)
+ goto error_unlock;
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return action;
+
+error_unlock:
+ mutex_unlock(&ctx->ctrl_lock);
+destroy_hit_ft_action:
+ mlx5hws_action_destroy(hit_ft_action);
+destroy_table_ste:
+ hws_action_destroy_dest_match_range_table(ctx, table_ste);
+destroy_definer:
+ mlx5hws_definer_free(ctx, definer);
+free_action:
+ kfree(action);
+ mlx5hws_err(ctx, "Failed to create action dest match range");
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags)
+{
+ return hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_LAST);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
+ u32 sampler_id, u32 flags)
+{
+ mlx5hws_err(ctx, "Flow sampler action - unsupported\n");
+ return NULL;
+}
+
+static void hws_action_destroy_hws(struct mlx5hws_action *action)
+{
+ u32 ext_reformat_id;
+ bool shared_arg;
+ u32 obj_id;
+ u32 i;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_MISS:
+ case MLX5HWS_ACTION_TYP_TAG:
+ case MLX5HWS_ACTION_TYP_DROP:
+ case MLX5HWS_ACTION_TYP_CTR:
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ case MLX5HWS_ACTION_TYP_VPORT:
+ hws_action_destroy_stcs(action);
+ break;
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ hws_action_destroy_stcs(action);
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+ break;
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ hws_action_destroy_stcs(action);
+ mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev, action->dest_array.fw_island);
+ for (i = 0; i < action->dest_array.num_dest; i++) {
+ ext_reformat_id = action->dest_array.dest_list[i].ext_reformat_id;
+ if (ext_reformat_id)
+ mlx5hws_cmd_packet_reformat_destroy(action->ctx->mdev,
+ ext_reformat_id);
+ }
+ kfree(action->dest_array.dest_list);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ shared_arg = false;
+ for (i = 0; i < action->modify_header.num_of_patterns; i++) {
+ hws_action_destroy_stcs(&action[i]);
+ if (action[i].modify_header.num_of_actions > 1) {
+ mlx5hws_pat_put_pattern(action[i].ctx,
+ action[i].modify_header.pat_id);
+ /* Save shared arg object to be freed after */
+ obj_id = action[i].modify_header.arg_id;
+ shared_arg = true;
+ }
+ }
+ if (shared_arg)
+ mlx5hws_arg_destroy(action->ctx, obj_id);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ for (i = 0; i < action->reformat.num_of_hdrs; i++)
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
+ break;
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ for (i = 0; i < action->reformat.num_of_hdrs; i++)
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
+ break;
+ case MLX5HWS_ACTION_TYP_RANGE:
+ hws_action_destroy_stcs(action);
+ hws_action_destroy_dest_match_range_table(action->ctx, action->range.table_ste);
+ mlx5hws_definer_free(action->ctx, action->range.definer);
+ mlx5hws_action_destroy(action->range.hit_ft_action);
+ break;
+ case MLX5HWS_ACTION_TYP_LAST:
+ break;
+ default:
+ pr_warn("HWS: Invalid action type: %d\n", action->type);
+ }
+}
+
+int mlx5hws_action_destroy(struct mlx5hws_action *action)
+{
+ hws_action_destroy_hws(action);
+
+ kfree(action);
+ return 0;
+}
+
+int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_action_default_stc *default_stc;
+ int ret;
+
+ if (ctx->common_res[tbl_type].default_stc) {
+ ctx->common_res[tbl_type].default_stc->refcount++;
+ return 0;
+ }
+
+ default_stc = kzalloc(sizeof(*default_stc), GFP_KERNEL);
+ if (!default_stc)
+ return -ENOMEM;
+
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW0;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_ctr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default counter STC\n");
+ goto free_default_stc;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw5);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW5 STC\n");
+ goto free_nop_ctr;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw6);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW6 STC\n");
+ goto free_nop_dw5;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW7;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw7);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW7 STC\n");
+ goto free_nop_dw6;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->default_hit);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default allow STC\n");
+ goto free_nop_dw7;
+ }
+
+ ctx->common_res[tbl_type].default_stc = default_stc;
+ ctx->common_res[tbl_type].default_stc->refcount++;
+
+ return 0;
+
+free_nop_dw7:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
+free_nop_dw6:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
+free_nop_dw5:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
+free_nop_ctr:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
+free_default_stc:
+ kfree(default_stc);
+ return ret;
+}
+
+void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_action_default_stc *default_stc;
+
+ default_stc = ctx->common_res[tbl_type].default_stc;
+
+ default_stc = ctx->common_res[tbl_type].default_stc;
+ if (--default_stc->refcount)
+ return;
+
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->default_hit);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
+ kfree(default_stc);
+ ctx->common_res[tbl_type].default_stc = NULL;
+}
+
+static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions,
+ u32 nope_locations)
+{
+ u8 *new_arg_data = NULL;
+ int i, j;
+
+ if (unlikely(nope_locations)) {
+ new_arg_data = kcalloc(num_of_actions,
+ MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
+ if (unlikely(!new_arg_data))
+ return;
+
+ for (i = 0, j = 0; i < num_of_actions; i++, j++) {
+ memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE);
+ if (BIT(i) & nope_locations)
+ j++;
+ }
+ }
+
+ mlx5hws_arg_write(queue, NULL, arg_idx,
+ new_arg_data ? new_arg_data : arg_data,
+ num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE);
+
+ kfree(new_arg_data);
+}
+
+void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst, u16 num_of_actions)
+{
+ u8 *e_src;
+ int i;
+
+ /* num_of_actions = remove l3l2 + 4/5 inserts + remove extra 2 bytes
+ * copy from end of src to the start of dst.
+ * move to the end, 2 is the leftover from 14B or 18B
+ */
+ if (num_of_actions == DECAP_L3_NUM_ACTIONS_W_NO_VLAN)
+ e_src = src + MLX5HWS_ACTION_HDR_LEN_L2;
+ else
+ e_src = src + MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN;
+
+ /* Move dst over the first remove action + zero data */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE;
+ /* Move dst over the first insert ctrl action */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE / 2;
+ /* Actions:
+ * no vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
+ * with vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
+ * the loop is without the last insertion.
+ */
+ for (i = 0; i < num_of_actions - 3; i++) {
+ e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE;
+ memcpy(dst, e_src, MLX5HWS_ACTION_INLINE_DATA_SIZE); /* data */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE;
+ }
+ /* Copy the last 2 bytes after a gap of 2 bytes which will be removed */
+ e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
+ dst += MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
+ memcpy(dst, e_src, 2);
+}
+
+static int
+hws_action_get_shared_stc_offset(struct mlx5hws_context_common_res *common_res,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ return common_res->shared_stc[stc_type]->stc_chunk.offset;
+}
+
+static struct mlx5hws_actions_wqe_setter *
+hws_action_setter_find_first(struct mlx5hws_actions_wqe_setter *setter,
+ u8 req_flags)
+{
+ /* Use a new setter if requested flags are taken */
+ while (setter->flags & req_flags)
+ setter++;
+
+ /* Use current setter in required flags are not used */
+ return setter;
+}
+
+static void
+hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply,
+ enum mlx5hws_action_stc_idx stc_idx,
+ u8 action_idx)
+{
+ struct mlx5hws_action *action = apply->rule_action[action_idx].action;
+
+ apply->wqe_ctrl->stc_ix[stc_idx] =
+ htonl(action->stc[apply->tbl_type].offset);
+}
+
+static void
+hws_action_setter_push_vlan(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = rule_action->push_vlan.vlan_hdr;
+
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+}
+
+static void
+hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_sz, arg_idx;
+ u8 *single_action;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action;
+
+ stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+
+ if (action->modify_header.num_of_actions == 1) {
+ if (action->modify_header.single_action_type ==
+ MLX5_MODIFICATION_TYPE_COPY ||
+ action->modify_header.single_action_type ==
+ MLX5_MODIFICATION_TYPE_ADD_FIELD) {
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ return;
+ }
+
+ if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
+ single_action = (u8 *)&action->modify_header.single_action;
+ else
+ single_action = rule_action->modify_header.data;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] =
+ *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data);
+ } else {
+ /* Argument offset multiple with number of args per these actions */
+ arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
+ arg_idx = rule_action->modify_header.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ hws_action_modify_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->modify_header.data,
+ action->modify_header.num_of_actions,
+ action->modify_header.nope_locations);
+ }
+ }
+}
+
+static void
+hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_idx, arg_sz;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action + rule_action->reformat.hdr_idx;
+
+ /* Argument offset multiple on args required for header size */
+ arg_sz = mlx5hws_arg_data_size_to_arg_size(action->reformat.max_hdr_sz);
+ arg_idx = rule_action->reformat.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ mlx5hws_arg_write(apply->queue, NULL,
+ action->reformat.arg_id + arg_idx,
+ rule_action->reformat.data,
+ action->reformat.header_size);
+ }
+}
+
+static void
+hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_sz, arg_idx;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action + rule_action->reformat.hdr_idx;
+
+ /* Argument offset multiple on args required for num of actions */
+ arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
+ arg_idx = rule_action->reformat.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ mlx5hws_arg_decapl3_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->reformat.data,
+ action->modify_header.num_of_actions);
+ }
+}
+
+static void
+hws_action_setter_aso(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ u32 exe_aso_ctrl;
+ u32 offset;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+
+ switch (rule_action->action->type) {
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ /* exe_aso_ctrl format:
+ * [STC only and reserved bits 29b][init_color 2b][meter_id 1b]
+ */
+ offset = rule_action->aso_meter.offset / MLX5_ASO_METER_NUM_PER_OBJ;
+ exe_aso_ctrl = rule_action->aso_meter.offset % MLX5_ASO_METER_NUM_PER_OBJ;
+ exe_aso_ctrl |= rule_action->aso_meter.init_color <<
+ MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET;
+ break;
+ default:
+ mlx5hws_err(rule_action->action->ctx,
+ "Unsupported ASO action type: %d\n", rule_action->action->type);
+ return;
+ }
+
+ /* aso_object_offset format: [24B] */
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = htonl(offset);
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(exe_aso_ctrl);
+
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+}
+
+static void
+hws_action_setter_tag(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_single];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = htonl(rule_action->tag.value);
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
+}
+
+static void
+hws_action_setter_ctrl_ctr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_ctr];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = htonl(rule_action->counter.offset);
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_CTRL, setter->idx_ctr);
+}
+
+static void
+hws_action_setter_single(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
+}
+
+static void
+hws_action_setter_single_double_pop(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(hws_action_get_shared_stc_offset(apply->common_res,
+ MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP));
+}
+
+static void
+hws_action_setter_hit(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
+}
+
+static void
+hws_action_setter_default_hit(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
+ htonl(apply->common_res->default_stc->default_hit.offset);
+}
+
+static void
+hws_action_setter_hit_next_action(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = htonl(apply->next_direct_idx << 6);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = htonl(apply->jump_to_action_stc);
+}
+
+static void
+hws_action_setter_common_decap(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(hws_action_get_shared_stc_offset(apply->common_res,
+ MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3));
+}
+
+static void
+hws_action_setter_range(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ /* Always jump to index zero */
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
+}
+
+int mlx5hws_action_template_process(struct mlx5hws_action_template *at)
+{
+ struct mlx5hws_actions_wqe_setter *start_setter = at->setters + 1;
+ enum mlx5hws_action_type *action_type = at->action_type_arr;
+ struct mlx5hws_actions_wqe_setter *setter = at->setters;
+ struct mlx5hws_actions_wqe_setter *pop_setter = NULL;
+ struct mlx5hws_actions_wqe_setter *last_setter;
+ int i;
+
+ /* Note: Given action combination must be valid */
+
+ /* Check if action were already processed */
+ if (at->num_of_action_stes)
+ return 0;
+
+ for (i = 0; i < MLX5HWS_ACTION_MAX_STE; i++)
+ setter[i].set_hit = &hws_action_setter_hit_next_action;
+
+ /* The same action template setters can be used with jumbo or match
+ * STE, to support both cases we reserve the first setter for cases
+ * with jumbo STE to allow jump to the first action STE.
+ * This extra setter can be reduced in some cases on rule creation.
+ */
+ setter = start_setter;
+ last_setter = start_setter;
+
+ for (i = 0; i < at->num_actions; i++) {
+ switch (action_type[i]) {
+ case MLX5HWS_ACTION_TYP_DROP:
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_VPORT:
+ case MLX5HWS_ACTION_TYP_MISS:
+ /* Hit action */
+ last_setter->flags |= ASF_HIT;
+ last_setter->set_hit = &hws_action_setter_hit;
+ last_setter->idx_hit = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_RANGE:
+ last_setter->flags |= ASF_HIT;
+ last_setter->set_hit = &hws_action_setter_range;
+ last_setter->idx_hit = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ /* Single remove header to header */
+ if (pop_setter) {
+ /* We have 2 pops, use the shared */
+ pop_setter->set_single = &hws_action_setter_single_double_pop;
+ break;
+ }
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_MODIFY |
+ ASF_INSERT);
+ setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
+ setter->set_single = &hws_action_setter_single;
+ setter->idx_single = i;
+ pop_setter = setter;
+ break;
+
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ /* Double insert inline */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_INSERT;
+ setter->set_double = &hws_action_setter_push_vlan;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ /* Double modify header list */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_MODIFY;
+ setter->set_double = &hws_action_setter_modify_header;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ /* Double ASO action */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE);
+ setter->flags |= ASF_DOUBLE;
+ setter->set_double = &hws_action_setter_aso;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ /* Single remove header to header */
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_MODIFY);
+ setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
+ setter->set_single = &hws_action_setter_single;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ /* Double insert header with pointer */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_INSERT;
+ setter->set_double = &hws_action_setter_insert_ptr;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ /* Single remove + Double insert header with pointer */
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_DOUBLE);
+ setter->flags |= ASF_SINGLE1 | ASF_DOUBLE;
+ setter->set_double = &hws_action_setter_insert_ptr;
+ setter->idx_double = i;
+ setter->set_single = &hws_action_setter_common_decap;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ /* Double modify header list with remove and push inline */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_MODIFY | ASF_INSERT;
+ setter->set_double = &hws_action_setter_tnl_l3_to_l2;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_TAG:
+ /* Single TAG action, search for any room from the start */
+ setter = hws_action_setter_find_first(start_setter, ASF_SINGLE1);
+ setter->flags |= ASF_SINGLE1;
+ setter->set_single = &hws_action_setter_tag;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_CTR:
+ /* Control counter action
+ * TODO: Current counter executed first. Support is needed
+ * for single ation counter action which is done last.
+ * Example: Decap + CTR
+ */
+ setter = hws_action_setter_find_first(start_setter, ASF_CTR);
+ setter->flags |= ASF_CTR;
+ setter->set_ctr = &hws_action_setter_ctrl_ctr;
+ setter->idx_ctr = i;
+ break;
+ default:
+ pr_warn("HWS: Invalid action type in processingaction template: action_type[%d]=%d\n",
+ i, action_type[i]);
+ return -EOPNOTSUPP;
+ }
+
+ last_setter = max(setter, last_setter);
+ }
+
+ /* Set default hit on the last STE if no hit action provided */
+ if (!(last_setter->flags & ASF_HIT))
+ last_setter->set_hit = &hws_action_setter_default_hit;
+
+ at->num_of_action_stes = last_setter - start_setter + 1;
+
+ /* Check if action template doesn't require any action DWs */
+ at->only_term = (at->num_of_action_stes == 1) &&
+ !(last_setter->flags & ~(ASF_CTR | ASF_HIT));
+
+ return 0;
+}
+
+struct mlx5hws_action_template *
+mlx5hws_action_template_create(enum mlx5hws_action_type action_type[])
+{
+ struct mlx5hws_action_template *at;
+ u8 num_actions = 0;
+ int i;
+
+ at = kzalloc(sizeof(*at), GFP_KERNEL);
+ if (!at)
+ return NULL;
+
+ while (action_type[num_actions++] != MLX5HWS_ACTION_TYP_LAST)
+ ;
+
+ at->num_actions = num_actions - 1;
+ at->action_type_arr = kcalloc(num_actions, sizeof(*action_type), GFP_KERNEL);
+ if (!at->action_type_arr)
+ goto free_at;
+
+ for (i = 0; i < num_actions; i++)
+ at->action_type_arr[i] = action_type[i];
+
+ return at;
+
+free_at:
+ kfree(at);
+ return NULL;
+}
+
+int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at)
+{
+ kfree(at->action_type_arr);
+ kfree(at);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h
new file mode 100644
index 000000000000..bf5c1b241006
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_ACTION_H_
+#define MLX5HWS_ACTION_H_
+
+/* Max number of STEs needed for a rule (including match) */
+#define MLX5HWS_ACTION_MAX_STE 20
+
+/* Max number of internal subactions of ipv6_ext */
+#define MLX5HWS_ACTION_IPV6_EXT_MAX_SA 4
+
+enum mlx5hws_action_stc_idx {
+ MLX5HWS_ACTION_STC_IDX_CTRL = 0,
+ MLX5HWS_ACTION_STC_IDX_HIT = 1,
+ MLX5HWS_ACTION_STC_IDX_DW5 = 2,
+ MLX5HWS_ACTION_STC_IDX_DW6 = 3,
+ MLX5HWS_ACTION_STC_IDX_DW7 = 4,
+ MLX5HWS_ACTION_STC_IDX_MAX = 5,
+ /* STC Jumvo STE combo: CTR, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE = 1,
+ /* STC combo1: CTR, SINGLE, DOUBLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 = 3,
+ /* STC combo2: CTR, 3 x SINGLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 = 4,
+ /* STC combo2: CTR, TRIPLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO3 = 2,
+};
+
+enum mlx5hws_action_offset {
+ MLX5HWS_ACTION_OFFSET_DW0 = 0,
+ MLX5HWS_ACTION_OFFSET_DW5 = 5,
+ MLX5HWS_ACTION_OFFSET_DW6 = 6,
+ MLX5HWS_ACTION_OFFSET_DW7 = 7,
+ MLX5HWS_ACTION_OFFSET_HIT = 3,
+ MLX5HWS_ACTION_OFFSET_HIT_LSB = 4,
+};
+
+enum {
+ MLX5HWS_ACTION_DOUBLE_SIZE = 8,
+ MLX5HWS_ACTION_INLINE_DATA_SIZE = 4,
+ MLX5HWS_ACTION_HDR_LEN_L2_MACS = 12,
+ MLX5HWS_ACTION_HDR_LEN_L2_VLAN = 4,
+ MLX5HWS_ACTION_HDR_LEN_L2_ETHER = 2,
+ MLX5HWS_ACTION_HDR_LEN_L2 = (MLX5HWS_ACTION_HDR_LEN_L2_MACS +
+ MLX5HWS_ACTION_HDR_LEN_L2_ETHER),
+ MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN = (MLX5HWS_ACTION_HDR_LEN_L2 +
+ MLX5HWS_ACTION_HDR_LEN_L2_VLAN),
+ MLX5HWS_ACTION_REFORMAT_DATA_SIZE = 64,
+ DECAP_L3_NUM_ACTIONS_W_NO_VLAN = 6,
+ DECAP_L3_NUM_ACTIONS_W_VLAN = 7,
+};
+
+enum mlx5hws_action_setter_flag {
+ ASF_SINGLE1 = 1 << 0,
+ ASF_SINGLE2 = 1 << 1,
+ ASF_SINGLE3 = 1 << 2,
+ ASF_DOUBLE = ASF_SINGLE2 | ASF_SINGLE3,
+ ASF_TRIPLE = ASF_SINGLE1 | ASF_DOUBLE,
+ ASF_INSERT = 1 << 3,
+ ASF_REMOVE = 1 << 4,
+ ASF_MODIFY = 1 << 5,
+ ASF_CTR = 1 << 6,
+ ASF_HIT = 1 << 7,
+};
+
+struct mlx5hws_action_default_stc {
+ struct mlx5hws_pool_chunk nop_ctr;
+ struct mlx5hws_pool_chunk nop_dw5;
+ struct mlx5hws_pool_chunk nop_dw6;
+ struct mlx5hws_pool_chunk nop_dw7;
+ struct mlx5hws_pool_chunk default_hit;
+ u32 refcount;
+};
+
+struct mlx5hws_action_shared_stc {
+ struct mlx5hws_pool_chunk stc_chunk;
+ u32 refcount;
+};
+
+struct mlx5hws_actions_apply_data {
+ struct mlx5hws_send_engine *queue;
+ struct mlx5hws_rule_action *rule_action;
+ __be32 *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ u32 jump_to_action_stc;
+ struct mlx5hws_context_common_res *common_res;
+ enum mlx5hws_table_type tbl_type;
+ u32 next_direct_idx;
+ u8 require_dep;
+};
+
+struct mlx5hws_actions_wqe_setter;
+
+typedef void (*mlx5hws_action_setter_fp)(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter);
+
+struct mlx5hws_actions_wqe_setter {
+ mlx5hws_action_setter_fp set_single;
+ mlx5hws_action_setter_fp set_double;
+ mlx5hws_action_setter_fp set_triple;
+ mlx5hws_action_setter_fp set_hit;
+ mlx5hws_action_setter_fp set_ctr;
+ u8 idx_single;
+ u8 idx_double;
+ u8 idx_triple;
+ u8 idx_ctr;
+ u8 idx_hit;
+ u8 stage_idx;
+ u8 flags;
+};
+
+struct mlx5hws_action_template {
+ struct mlx5hws_actions_wqe_setter setters[MLX5HWS_ACTION_MAX_STE];
+ enum mlx5hws_action_type *action_type_arr;
+ u8 num_of_action_stes;
+ u8 num_actions;
+ u8 only_term;
+};
+
+struct mlx5hws_action {
+ u8 type;
+ u8 flags;
+ struct mlx5hws_context *ctx;
+ union {
+ struct {
+ struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX];
+ union {
+ struct {
+ u32 pat_id;
+ u32 arg_id;
+ __be64 single_action;
+ u32 nope_locations;
+ u8 num_of_patterns;
+ u8 single_action_type;
+ u8 num_of_actions;
+ u8 max_num_of_actions;
+ u8 require_reparse;
+ } modify_header;
+ struct {
+ u32 arg_id;
+ u32 header_size;
+ u16 max_hdr_sz;
+ u8 num_of_hdrs;
+ u8 anchor;
+ u8 e_anchor;
+ u8 offset;
+ bool encap;
+ u8 require_reparse;
+ } reformat;
+ struct {
+ u32 obj_id;
+ u8 return_reg_id;
+ } aso;
+ struct {
+ u16 vport_num;
+ u16 esw_owner_vhca_id;
+ bool esw_owner_vhca_id_valid;
+ } vport;
+ struct {
+ u32 obj_id;
+ } dest_obj;
+ struct {
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ size_t num_dest;
+ struct mlx5hws_cmd_set_fte_dest *dest_list;
+ } dest_array;
+ struct {
+ u8 type;
+ u8 start_anchor;
+ u8 end_anchor;
+ u8 num_of_words;
+ bool decap;
+ } insert_hdr;
+ struct {
+ /* PRM start anchor from which header will be removed */
+ u8 anchor;
+ /* Header remove offset in bytes, from the start
+ * anchor to the location where remove header starts.
+ */
+ u8 offset;
+ /* Indicates the removed header size in bytes */
+ size_t size;
+ } remove_header;
+ struct {
+ struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_action *hit_ft_action;
+ struct mlx5hws_definer *definer;
+ } range;
+ };
+ };
+
+ struct ibv_flow_action *flow_action;
+ u32 obj_id;
+ struct ibv_qp *qp;
+ };
+};
+
+const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type);
+
+int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx,
+ u8 tbl_type);
+
+void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx,
+ u8 tbl_type);
+
+void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst,
+ u16 num_of_actions);
+
+int mlx5hws_action_template_process(struct mlx5hws_action_template *at);
+
+bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions,
+ enum mlx5hws_table_type table_type);
+
+int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc);
+
+void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc);
+
+static inline void
+mlx5hws_action_setter_default_single(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(apply->common_res->default_stc->nop_dw5.offset);
+}
+
+static inline void
+mlx5hws_action_setter_default_double(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
+ htonl(apply->common_res->default_stc->nop_dw6.offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
+ htonl(apply->common_res->default_stc->nop_dw7.offset);
+}
+
+static inline void
+mlx5hws_action_setter_default_ctr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
+ htonl(apply->common_res->default_stc->nop_ctr.offset);
+}
+
+static inline void
+mlx5hws_action_apply_setter(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter,
+ bool is_jumbo)
+{
+ u8 num_of_actions;
+
+ /* Set control counter */
+ if (setter->set_ctr)
+ setter->set_ctr(apply, setter);
+ else
+ mlx5hws_action_setter_default_ctr(apply, setter);
+
+ if (!is_jumbo) {
+ if (unlikely(setter->set_triple)) {
+ /* Set triple on match */
+ setter->set_triple(apply, setter);
+ num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_COMBO3;
+ } else {
+ /* Set single and double on match */
+ if (setter->set_single)
+ setter->set_single(apply, setter);
+ else
+ mlx5hws_action_setter_default_single(apply, setter);
+
+ if (setter->set_double)
+ setter->set_double(apply, setter);
+ else
+ mlx5hws_action_setter_default_double(apply, setter);
+
+ num_of_actions = setter->set_double ?
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 :
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO2;
+ }
+ } else {
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+ num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE;
+ }
+
+ /* Set next/final hit action */
+ setter->set_hit(apply, setter);
+
+ /* Set number of actions */
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
+ htonl(num_of_actions << 29);
+}
+
+#endif /* MLX5HWS_ACTION_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c
new file mode 100644
index 000000000000..e6ed66202a40
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+#include "mlx5hws_buddy.h"
+
+static int hws_buddy_init(struct mlx5hws_buddy_mem *buddy, u32 max_order)
+{
+ int i, s, ret = 0;
+
+ buddy->max_order = max_order;
+
+ buddy->bitmap = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->bitmap),
+ GFP_KERNEL);
+ if (!buddy->bitmap)
+ return -ENOMEM;
+
+ buddy->num_free = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->num_free),
+ GFP_KERNEL);
+ if (!buddy->num_free) {
+ ret = -ENOMEM;
+ goto err_out_free_bits;
+ }
+
+ for (i = 0; i <= (int)buddy->max_order; ++i) {
+ s = 1 << (buddy->max_order - i);
+
+ buddy->bitmap[i] = bitmap_zalloc(s, GFP_KERNEL);
+ if (!buddy->bitmap[i]) {
+ ret = -ENOMEM;
+ goto err_out_free_num_free;
+ }
+ }
+
+ bitmap_set(buddy->bitmap[buddy->max_order], 0, 1);
+ buddy->num_free[buddy->max_order] = 1;
+
+ return 0;
+
+err_out_free_num_free:
+ for (i = 0; i <= (int)buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+ kfree(buddy->num_free);
+
+err_out_free_bits:
+ kfree(buddy->bitmap);
+ return ret;
+}
+
+struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = kzalloc(sizeof(*buddy), GFP_KERNEL);
+ if (!buddy)
+ return NULL;
+
+ if (hws_buddy_init(buddy, max_order))
+ goto free_buddy;
+
+ return buddy;
+
+free_buddy:
+ kfree(buddy);
+ return NULL;
+}
+
+void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy)
+{
+ int i;
+
+ for (i = 0; i <= (int)buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+ kfree(buddy->num_free);
+ kfree(buddy->bitmap);
+}
+
+static int hws_buddy_find_free_seg(struct mlx5hws_buddy_mem *buddy,
+ u32 start_order,
+ u32 *segment,
+ u32 *order)
+{
+ unsigned int seg, order_iter, m;
+
+ for (order_iter = start_order;
+ order_iter <= buddy->max_order; ++order_iter) {
+ if (!buddy->num_free[order_iter])
+ continue;
+
+ m = 1 << (buddy->max_order - order_iter);
+ seg = find_first_bit(buddy->bitmap[order_iter], m);
+
+ if (WARN(seg >= m,
+ "ICM Buddy: failed finding free mem for order %d\n",
+ order_iter))
+ return -ENOMEM;
+
+ break;
+ }
+
+ if (order_iter > buddy->max_order)
+ return -ENOMEM;
+
+ *segment = seg;
+ *order = order_iter;
+ return 0;
+}
+
+int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order)
+{
+ u32 seg, order_iter, err;
+
+ err = hws_buddy_find_free_seg(buddy, order, &seg, &order_iter);
+ if (err)
+ return err;
+
+ bitmap_clear(buddy->bitmap[order_iter], seg, 1);
+ --buddy->num_free[order_iter];
+
+ while (order_iter > order) {
+ --order_iter;
+ seg <<= 1;
+ bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1);
+ ++buddy->num_free[order_iter];
+ }
+
+ seg <<= order;
+
+ return seg;
+}
+
+void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order)
+{
+ seg >>= order;
+
+ while (test_bit(seg ^ 1, buddy->bitmap[order])) {
+ bitmap_clear(buddy->bitmap[order], seg ^ 1, 1);
+ --buddy->num_free[order];
+ seg >>= 1;
+ ++order;
+ }
+
+ bitmap_set(buddy->bitmap[order], seg, 1);
+ ++buddy->num_free[order];
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h
new file mode 100644
index 000000000000..338c44bbedaf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BUDDY_H_
+#define MLX5HWS_BUDDY_H_
+
+struct mlx5hws_buddy_mem {
+ unsigned long **bitmap;
+ unsigned int *num_free;
+ u32 max_order;
+};
+
+struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order);
+
+void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy);
+
+int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order);
+
+void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order);
+
+#endif /* MLX5HWS_BUDDY_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
new file mode 100644
index 000000000000..bd52b05db367
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
@@ -0,0 +1,997 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx)
+{
+ /* assign random queue */
+ return get_random_u8() % mlx5hws_bwc_queues(ctx);
+}
+
+static u16
+hws_bwc_get_burst_th(struct mlx5hws_context *ctx, u16 queue_id)
+{
+ return min(ctx->send_queue[queue_id].num_entries / 2,
+ MLX5HWS_BWC_MATCHER_REHASH_BURST_TH);
+}
+
+static struct mutex *
+hws_bwc_get_queue_lock(struct mlx5hws_context *ctx, u16 idx)
+{
+ return &ctx->bwc_send_queue_locks[idx];
+}
+
+static void hws_bwc_lock_all_queues(struct mlx5hws_context *ctx)
+{
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mutex *queue_lock; /* Protect the queue */
+ int i;
+
+ for (i = 0; i < bwc_queues; i++) {
+ queue_lock = hws_bwc_get_queue_lock(ctx, i);
+ mutex_lock(queue_lock);
+ }
+}
+
+static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
+{
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mutex *queue_lock; /* Protect the queue */
+ int i = bwc_queues;
+
+ while (i--) {
+ queue_lock = hws_bwc_get_queue_lock(ctx, i);
+ mutex_unlock(queue_lock);
+ }
+}
+
+static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
+ u32 priority,
+ u8 size_log)
+{
+ memset(attr, 0, sizeof(*attr));
+
+ attr->priority = priority;
+ attr->optimize_using_rule_idx = 0;
+ attr->mode = MLX5HWS_MATCHER_RESOURCE_MODE_RULE;
+ attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY;
+ attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH;
+ attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH;
+ attr->rule.num_log = size_log;
+ attr->resizable = true;
+ attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+}
+
+int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ enum mlx5hws_action_type action_types[])
+{
+ enum mlx5hws_action_type init_action_types[1] = { MLX5HWS_ACTION_TYP_LAST };
+ struct mlx5hws_context *ctx = table->ctx;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_matcher_attr attr = {0};
+ int i;
+
+ bwc_matcher->rules = kcalloc(bwc_queues, sizeof(*bwc_matcher->rules), GFP_KERNEL);
+ if (!bwc_matcher->rules)
+ goto err;
+
+ for (i = 0; i < bwc_queues; i++)
+ INIT_LIST_HEAD(&bwc_matcher->rules[i]);
+
+ hws_bwc_matcher_init_attr(&attr,
+ priority,
+ MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG);
+
+ bwc_matcher->priority = priority;
+ bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+
+ /* create dummy action template */
+ bwc_matcher->at[0] =
+ mlx5hws_action_template_create(action_types ?
+ action_types : init_action_types);
+ if (!bwc_matcher->at[0]) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
+ goto free_bwc_matcher_rules;
+ }
+
+ bwc_matcher->num_of_at = 1;
+
+ bwc_matcher->mt = mlx5hws_match_template_create(ctx,
+ mask->match_buf,
+ mask->match_sz,
+ match_criteria_enable);
+ if (!bwc_matcher->mt) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating match template\n");
+ goto free_at;
+ }
+
+ bwc_matcher->matcher = mlx5hws_matcher_create(table,
+ &bwc_matcher->mt, 1,
+ &bwc_matcher->at[0],
+ bwc_matcher->num_of_at,
+ &attr);
+ if (!bwc_matcher->matcher) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating HWS matcher\n");
+ goto free_mt;
+ }
+
+ return 0;
+
+free_mt:
+ mlx5hws_match_template_destroy(bwc_matcher->mt);
+free_at:
+ mlx5hws_action_template_destroy(bwc_matcher->at[0]);
+free_bwc_matcher_rules:
+ kfree(bwc_matcher->rules);
+err:
+ return -EINVAL;
+}
+
+struct mlx5hws_bwc_matcher *
+mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ bool is_complex;
+ int ret;
+
+ if (!mlx5hws_context_bwc_supported(table->ctx)) {
+ mlx5hws_err(table->ctx,
+ "BWC matcher: context created w/o BWC API compatibility\n");
+ return NULL;
+ }
+
+ bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
+ if (!bwc_matcher)
+ return NULL;
+
+ /* Check if the required match params can be all matched
+ * in single STE, otherwise complex matcher is needed.
+ */
+
+ is_complex = mlx5hws_bwc_match_params_is_complex(table->ctx, match_criteria_enable, mask);
+ if (is_complex)
+ ret = mlx5hws_bwc_matcher_create_complex(bwc_matcher,
+ table,
+ priority,
+ match_criteria_enable,
+ mask);
+ else
+ ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
+ table,
+ priority,
+ match_criteria_enable,
+ mask,
+ NULL);
+ if (ret)
+ goto free_bwc_matcher;
+
+ return bwc_matcher;
+
+free_bwc_matcher:
+ kfree(bwc_matcher);
+
+ return NULL;
+}
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ int i;
+
+ mlx5hws_matcher_destroy(bwc_matcher->matcher);
+ bwc_matcher->matcher = NULL;
+
+ for (i = 0; i < bwc_matcher->num_of_at; i++)
+ mlx5hws_action_template_destroy(bwc_matcher->at[i]);
+
+ mlx5hws_match_template_destroy(bwc_matcher->mt);
+ kfree(bwc_matcher->rules);
+
+ return 0;
+}
+
+int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ if (bwc_matcher->num_of_rules)
+ mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+ "BWC matcher destroy: matcher still has %d rules\n",
+ bwc_matcher->num_of_rules);
+
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+
+ kfree(bwc_matcher);
+ return 0;
+}
+
+static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain)
+{
+ struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
+ u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
+ bool got_comp = *pending_rules >= burst_th;
+ bool queue_full;
+ int err = 0;
+ int ret;
+ int i;
+
+ /* Check if there are any completions at all */
+ if (!got_comp && !drain)
+ return 0;
+
+ queue_full = mlx5hws_send_engine_full(&ctx->send_queue[queue_id]);
+ while (queue_full || ((got_comp || drain) && *pending_rules)) {
+ ret = mlx5hws_send_queue_poll(ctx, queue_id, comp, burst_th);
+ if (unlikely(ret < 0)) {
+ mlx5hws_err(ctx, "BWC poll error: polling queue %d returned %d\n",
+ queue_id, ret);
+ return -EINVAL;
+ }
+
+ if (ret) {
+ (*pending_rules) -= ret;
+ for (i = 0; i < ret; i++) {
+ if (unlikely(comp[i].status != MLX5HWS_FLOW_OP_SUCCESS)) {
+ mlx5hws_err(ctx,
+ "BWC poll error: polling queue %d returned completion with error\n",
+ queue_id);
+ err = -EINVAL;
+ }
+ }
+ queue_full = false;
+ }
+
+ got_comp = !!ret;
+ }
+
+ return err;
+}
+
+void
+mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u16 bwc_queue_idx,
+ u32 flow_source,
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+ /* no use of INSERT_BY_INDEX in bwc rule */
+ rule_attr->rule_idx = 0;
+
+ /* notify HW at each rule insertion/deletion */
+ rule_attr->burst = 0;
+
+ /* We don't need user data, but the API requires it to exist */
+ rule_attr->user_data = (void *)0xFACADE;
+
+ rule_attr->queue_id = mlx5hws_bwc_get_queue_id(ctx, bwc_queue_idx);
+ rule_attr->flow_source = flow_source;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_bwc_rule *bwc_rule;
+
+ bwc_rule = kzalloc(sizeof(*bwc_rule), GFP_KERNEL);
+ if (unlikely(!bwc_rule))
+ goto out_err;
+
+ bwc_rule->rule = kzalloc(sizeof(*bwc_rule->rule), GFP_KERNEL);
+ if (unlikely(!bwc_rule->rule))
+ goto free_rule;
+
+ bwc_rule->bwc_matcher = bwc_matcher;
+ return bwc_rule;
+
+free_rule:
+ kfree(bwc_rule);
+out_err:
+ return NULL;
+}
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ if (likely(bwc_rule->rule))
+ kfree(bwc_rule->rule);
+ kfree(bwc_rule);
+}
+
+static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ bwc_matcher->num_of_rules++;
+ bwc_rule->bwc_queue_idx = idx;
+ list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
+}
+
+static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ bwc_matcher->num_of_rules--;
+ list_del_init(&bwc_rule->list_node);
+}
+
+static int
+hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ return mlx5hws_rule_destroy(bwc_rule->rule, attr);
+}
+
+static int
+hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_flow_op_result completion;
+ int ret;
+
+ ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ do {
+ ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
+ } while (ret != 1);
+
+ if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS ||
+ (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
+ bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) {
+ mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n",
+ completion.status, bwc_rule->rule->status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u16 idx = bwc_rule->bwc_queue_idx;
+ struct mlx5hws_rule_attr attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ int ret;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &attr);
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+ mutex_lock(queue_lock);
+
+ ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
+ hws_bwc_rule_list_remove(bwc_rule);
+
+ mutex_unlock(queue_lock);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ int ret;
+
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+
+ mlx5hws_bwc_rule_free(bwc_rule);
+ return ret;
+}
+
+static int
+hws_bwc_rule_create_async(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ return mlx5hws_rule_create(bwc_rule->bwc_matcher->matcher,
+ 0, /* only one match template supported */
+ match_param,
+ at_idx,
+ rule_actions,
+ rule_attr,
+ bwc_rule->rule);
+}
+
+static int
+hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+
+{
+ struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+ u32 expected_completions = 1;
+ int ret;
+
+ ret = hws_bwc_rule_create_async(bwc_rule, match_param,
+ at_idx, rule_actions,
+ rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+
+ return ret;
+}
+
+static int
+hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u32 expected_completions = 1;
+ int ret;
+
+ ret = mlx5hws_rule_action_update(bwc_rule->rule,
+ at_idx,
+ rule_actions,
+ rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+ if (unlikely(ret))
+ mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
+
+ return ret;
+}
+
+static bool
+hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
+
+ return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
+ caps->ste_alloc_log_max - 1;
+}
+
+static bool
+hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u32 num_of_rules)
+{
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher)))
+ return false;
+
+ if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >=
+ (1UL << bwc_matcher->size_log)))
+ return true;
+
+ return false;
+}
+
+static void
+hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[],
+ enum mlx5hws_action_type action_types[])
+{
+ int i = 0;
+
+ for (i = 0;
+ rule_actions[i].action && (rule_actions[i].action->type != MLX5HWS_ACTION_TYP_LAST);
+ i++) {
+ action_types[i] = (enum mlx5hws_action_type)rule_actions[i].action->type;
+ }
+
+ action_types[i] = MLX5HWS_ACTION_TYP_LAST;
+}
+
+static int
+hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
+
+ hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
+
+ bwc_matcher->at[bwc_matcher->num_of_at] =
+ mlx5hws_action_template_create(action_types);
+
+ if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at]))
+ return -ENOMEM;
+
+ bwc_matcher->num_of_at++;
+ return 0;
+}
+
+static int
+hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher))) {
+ mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n",
+ caps->rtc_log_depth_max);
+ return -ENOMEM;
+ }
+
+ bwc_matcher->size_log =
+ min(bwc_matcher->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+ caps->ste_alloc_log_max - MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
+
+ return 0;
+}
+
+static int
+hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ enum mlx5hws_action_type *action_type_arr;
+ int i, j;
+
+ /* start from index 1 - first action template is a dummy */
+ for (i = 1; i < bwc_matcher->num_of_at; i++) {
+ j = 0;
+ action_type_arr = bwc_matcher->at[i]->action_type_arr;
+
+ while (rule_actions[j].action &&
+ rule_actions[j].action->type != MLX5HWS_ACTION_TYP_LAST) {
+ if (action_type_arr[j] != rule_actions[j].action->type)
+ break;
+ j++;
+ }
+
+ if (action_type_arr[j] == MLX5HWS_ACTION_TYP_LAST &&
+ (!rule_actions[j].action ||
+ rule_actions[j].action->type == MLX5HWS_ACTION_TYP_LAST))
+ return i;
+ }
+
+ return -1;
+}
+
+static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule **bwc_rules;
+ struct mlx5hws_rule_attr rule_attr;
+ u32 *pending_rules;
+ int i, j, ret = 0;
+ bool all_done;
+ u16 burst_th;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+ pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL);
+ if (!pending_rules)
+ return -ENOMEM;
+
+ bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL);
+ if (!bwc_rules) {
+ ret = -ENOMEM;
+ goto free_pending_rules;
+ }
+
+ for (i = 0; i < bwc_queues; i++) {
+ if (list_empty(&bwc_matcher->rules[i]))
+ bwc_rules[i] = NULL;
+ else
+ bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i],
+ struct mlx5hws_bwc_rule,
+ list_node);
+ }
+
+ do {
+ all_done = true;
+
+ for (i = 0; i < bwc_queues; i++) {
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+ burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
+
+ for (j = 0; j < burst_th && bwc_rules[i]; j++) {
+ rule_attr.burst = !!((j + 1) % burst_th);
+ ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher,
+ bwc_rules[i]->rule,
+ &rule_attr);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule failed during rehash (%d)\n",
+ ret);
+ goto free_bwc_rules;
+ }
+
+ all_done = false;
+ pending_rules[i]++;
+ bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node,
+ &bwc_matcher->rules[i]) ?
+ NULL : list_next_entry(bwc_rules[i], list_node);
+
+ ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
+ &pending_rules[i], false);
+ if (unlikely(ret))
+ goto free_bwc_rules;
+ }
+ }
+ } while (!all_done);
+
+ /* drain all the bwc queues */
+ for (i = 0; i < bwc_queues; i++) {
+ if (pending_rules[i]) {
+ u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+
+ mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
+ ret = hws_bwc_queue_poll(ctx, queue_id,
+ &pending_rules[i], true);
+ if (unlikely(ret))
+ goto free_bwc_rules;
+ }
+ }
+
+free_bwc_rules:
+ kfree(bwc_rules);
+free_pending_rules:
+ kfree(pending_rules);
+
+ return ret;
+}
+
+static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ return hws_bwc_matcher_move_all_simple(bwc_matcher);
+}
+
+static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher_attr matcher_attr = {0};
+ struct mlx5hws_matcher *old_matcher;
+ struct mlx5hws_matcher *new_matcher;
+ int ret;
+
+ hws_bwc_matcher_init_attr(&matcher_attr,
+ bwc_matcher->priority,
+ bwc_matcher->size_log);
+
+ old_matcher = bwc_matcher->matcher;
+ new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
+ &bwc_matcher->mt, 1,
+ bwc_matcher->at,
+ bwc_matcher->num_of_at,
+ &matcher_attr);
+ if (!new_matcher) {
+ mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
+ return ret;
+ }
+
+ ret = hws_bwc_matcher_move_all(bwc_matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Rehash error: moving rules failed\n");
+ return -ENOMEM;
+ }
+
+ bwc_matcher->matcher = new_matcher;
+ mlx5hws_matcher_destroy(old_matcher);
+
+ return 0;
+}
+
+static int
+hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ u32 num_of_rules;
+ int ret;
+
+ /* If the current matcher size is already at its max size, we can't
+ * do the rehash. Skip it and try adding the rule again - perhaps
+ * there was some change.
+ */
+ if (hws_bwc_matcher_size_maxed_out(bwc_matcher))
+ return 0;
+
+ /* It is possible that other rule has already performed rehash.
+ * Need to check again if we really need rehash.
+ * If the reason for rehash was size, but not any more - skip rehash.
+ */
+ num_of_rules = __atomic_load_n(&bwc_matcher->num_of_rules, __ATOMIC_RELAXED);
+ if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))
+ return 0;
+
+ /* Now we're done all the checking - do the rehash:
+ * - extend match RTC size
+ * - create new matcher
+ * - move all the rules to the new matcher
+ * - destroy the old matcher
+ */
+
+ ret = hws_bwc_matcher_extend_size(bwc_matcher);
+ if (ret)
+ return ret;
+
+ return hws_bwc_matcher_move(bwc_matcher);
+}
+
+static int
+hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ /* Rehash by action template doesn't require any additional checking.
+ * The bwc_matcher already contains the new action template.
+ * Just do the usual rehash:
+ * - create new matcher
+ * - move all the rules to the new matcher
+ * - destroy the old matcher
+ */
+ return hws_bwc_matcher_move(bwc_matcher);
+}
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ struct mlx5hws_rule_action rule_actions[],
+ u32 flow_source,
+ u16 bwc_queue_idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ u32 num_of_rules;
+ int ret = 0;
+ int at_idx;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, bwc_queue_idx, flow_source, &rule_attr);
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+
+ mutex_lock(queue_lock);
+
+ /* check if rehash needed due to missing action template */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (unlikely(at_idx < 0)) {
+ /* we need to extend BWC matcher action templates array */
+ mutex_unlock(queue_lock);
+ hws_bwc_lock_all_queues(ctx);
+
+ ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+ if (unlikely(ret)) {
+ hws_bwc_unlock_all_queues(ctx);
+ return ret;
+ }
+
+ /* action templates array was extended, we need the last idx */
+ at_idx = bwc_matcher->num_of_at - 1;
+
+ ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+ bwc_matcher->at[at_idx]);
+ if (unlikely(ret)) {
+ /* Action template attach failed, possibly due to
+ * requiring more action STEs.
+ * Need to attempt creating new matcher with all
+ * the action templates, including the new one.
+ */
+ ret = hws_bwc_matcher_rehash_at(bwc_matcher);
+ if (unlikely(ret)) {
+ mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
+ bwc_matcher->at[at_idx] = NULL;
+ bwc_matcher->num_of_at--;
+
+ hws_bwc_unlock_all_queues(ctx);
+
+ mlx5hws_err(ctx,
+ "BWC rule insertion: rehash AT failed (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ hws_bwc_unlock_all_queues(ctx);
+ mutex_lock(queue_lock);
+ }
+
+ /* check if number of rules require rehash */
+ num_of_rules = bwc_matcher->num_of_rules;
+
+ if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ if (ret) {
+ mlx5hws_err(ctx, "BWC rule insertion: rehash size [%d -> %d] failed (%d)\n",
+ bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+ bwc_matcher->size_log,
+ ret);
+ return ret;
+ }
+
+ mutex_lock(queue_lock);
+ }
+
+ ret = hws_bwc_rule_create_sync(bwc_rule,
+ match_param,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+ if (likely(!ret)) {
+ hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+ return 0; /* rule inserted successfully */
+ }
+
+ /* At this point the rule wasn't added.
+ * It could be because there was collision, or some other problem.
+ * If we don't dive deeper than API, the only thing we know is that
+ * the status of completion is RTE_FLOW_OP_ERROR.
+ * Try rehash by size and insert rule again - last chance.
+ */
+
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ if (ret) {
+ mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* Rehash done, but we still have that pesky rule to add */
+ mutex_lock(queue_lock);
+
+ ret = hws_bwc_rule_create_sync(bwc_rule,
+ match_param,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+
+ if (unlikely(ret)) {
+ mutex_unlock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
+ return ret;
+ }
+
+ hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+
+ return 0;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_bwc_rule *bwc_rule;
+ u16 bwc_queue_idx;
+ int ret;
+
+ if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+ mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+ return NULL;
+ }
+
+ bwc_rule = mlx5hws_bwc_rule_alloc(bwc_matcher);
+ if (unlikely(!bwc_rule))
+ return NULL;
+
+ bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
+
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+ params->match_buf,
+ rule_actions,
+ flow_source,
+ bwc_queue_idx);
+ if (unlikely(ret)) {
+ mlx5hws_bwc_rule_free(bwc_rule);
+ return NULL;
+ }
+
+ return bwc_rule;
+}
+
+static int
+hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ int at_idx, ret;
+ u16 idx;
+
+ idx = bwc_rule->bwc_queue_idx;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &rule_attr);
+ queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+ mutex_lock(queue_lock);
+
+ /* check if rehash needed due to missing action template */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (unlikely(at_idx < 0)) {
+ /* we need to extend BWC matcher action templates array */
+ mutex_unlock(queue_lock);
+ hws_bwc_lock_all_queues(ctx);
+
+ /* check again - perhaps other thread already did extend_at */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (likely(at_idx < 0)) {
+ ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+ if (unlikely(ret)) {
+ hws_bwc_unlock_all_queues(ctx);
+ mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret);
+ return -EINVAL;
+ }
+
+ /* action templates array was extended, we need the last idx */
+ at_idx = bwc_matcher->num_of_at - 1;
+
+ ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+ bwc_matcher->at[at_idx]);
+ if (unlikely(ret)) {
+ /* Action template attach failed, possibly due to
+ * requiring more action STEs.
+ * Need to attempt creating new matcher with all
+ * the action templates, including the new one.
+ */
+ ret = hws_bwc_matcher_rehash_at(bwc_matcher);
+ if (unlikely(ret)) {
+ mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
+ bwc_matcher->at[at_idx] = NULL;
+ bwc_matcher->num_of_at--;
+
+ hws_bwc_unlock_all_queues(ctx);
+
+ mlx5hws_err(ctx,
+ "BWC rule update: rehash AT failed (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ hws_bwc_unlock_all_queues(ctx);
+ mutex_lock(queue_lock);
+ }
+
+ ret = hws_bwc_rule_update_sync(bwc_rule,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+ mutex_unlock(queue_lock);
+
+ if (unlikely(ret))
+ mlx5hws_err(ctx, "BWC rule: update failed (%d)\n", ret);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+ if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+ mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+ return -EINVAL;
+ }
+
+ return hws_bwc_rule_action_update(bwc_rule, rule_actions);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
new file mode 100644
index 000000000000..4fe8c32d8fbe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BWC_H_
+#define MLX5HWS_BWC_H_
+
+#define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
+#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
+#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
+#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
+#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
+
+#define MLX5HWS_BWC_MAX_ACTS 16
+
+struct mlx5hws_bwc_matcher {
+ struct mlx5hws_matcher *matcher;
+ struct mlx5hws_match_template *mt;
+ struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
+ u8 num_of_at;
+ u16 priority;
+ u8 size_log;
+ u32 num_of_rules; /* atomically accessed */
+ struct list_head *rules;
+};
+
+struct mlx5hws_bwc_rule {
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ struct mlx5hws_rule *rule;
+ u16 bwc_queue_idx;
+ struct list_head list_node;
+};
+
+int
+mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ enum mlx5hws_action_type action_types[]);
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule);
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ struct mlx5hws_rule_action rule_actions[],
+ u32 flow_source,
+ u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule);
+
+void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u16 bwc_queue_idx,
+ u32 flow_source,
+ struct mlx5hws_rule_attr *rule_attr);
+
+static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
+{
+ /* Besides the control queue, half of the queues are
+ * reguler HWS queues, and the other half are BWC queues.
+ */
+ return (ctx->queues - 1) / 2;
+}
+
+static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
+{
+ return idx + mlx5hws_bwc_queues(ctx);
+}
+
+#endif /* MLX5HWS_BWC_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
new file mode 100644
index 000000000000..bb563f50ef09
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_definer match_layout = {0};
+ struct mlx5hws_match_template *mt;
+ bool is_complex = false;
+ int ret;
+
+ if (!match_criteria_enable)
+ return false; /* empty matcher */
+
+ mt = mlx5hws_match_template_create(ctx,
+ mask->match_buf,
+ mask->match_sz,
+ match_criteria_enable);
+ if (!mt) {
+ mlx5hws_err(ctx, "BWC: failed creating match template\n");
+ return false;
+ }
+
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+ if (ret) {
+ /* The only case that we're interested in is E2BIG,
+ * which means that the match parameters need to be
+ * split into complex martcher.
+ * For all other cases (good or bad) - just return true
+ * and let the usual match creation path handle it,
+ * both for good and bad flows.
+ */
+ if (ret == E2BIG) {
+ is_complex = true;
+ mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n");
+ } else {
+ mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+ }
+ }
+
+ mlx5hws_match_template_destroy(mt);
+
+ return is_complex;
+}
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n");
+ return -EOPNOTSUPP;
+}
+
+void
+mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ /* nothing to do here */
+}
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx)
+{
+ mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx,
+ "Complex rule is not supported yet\n");
+ return -EOPNOTSUPP;
+}
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ return 0;
+}
+
+int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+ "Moving complex rule is not supported yet\n");
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
new file mode 100644
index 000000000000..068ee8118609
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BWC_COMPLEX_H_
+#define MLX5HWS_BWC_COMPLEX_H_
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule);
+
+#endif /* MLX5HWS_BWC_COMPLEX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c
new file mode 100644
index 000000000000..2c7b14172049
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c
@@ -0,0 +1,1300 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+static enum mlx5_ifc_flow_destination_type
+hws_cmd_dest_type_to_ifc_dest_type(enum mlx5_flow_destination_type type)
+{
+ switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
+ case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ case MLX5_FLOW_DESTINATION_TYPE_NONE:
+ case MLX5_FLOW_DESTINATION_TYPE_PORT:
+ case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ default:
+ pr_warn("HWS: unknown flow dest type %d\n", type);
+ return 0;
+ }
+};
+
+static int hws_cmd_general_obj_destroy(struct mlx5_core_dev *mdev,
+ u32 object_type,
+ u32 object_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, object_type);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, object_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ u32 *table_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
+ void *ft_ctx;
+ int ret;
+
+ MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
+ MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
+
+ ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
+ MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
+ MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
+ MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
+ MLX5_SET(flow_table_context, ft_ctx, decap_en, ft_attr->decap_en);
+
+ ret = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
+ if (ret)
+ return ret;
+
+ *table_id = MLX5_GET(create_flow_table_out, out, table_id);
+
+ return 0;
+}
+
+int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr,
+ u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
+ void *ft_ctx;
+
+ MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE);
+ MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs);
+ MLX5_SET(modify_flow_table_in, in, table_id, table_id);
+
+ ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context);
+
+ MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action);
+ MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id);
+ MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_0, ft_attr->rtc_id_0);
+ MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_1, ft_attr->rtc_id_1);
+
+ return mlx5_cmd_exec_in(mdev, modify_flow_table, in);
+}
+
+int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
+ u32 table_id,
+ struct mlx5hws_cmd_ft_query_attr *ft_attr,
+ u64 *icm_addr_0, u64 *icm_addr_1)
+{
+ u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0};
+ void *ft_ctx;
+ int ret;
+
+ MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE);
+ MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(query_flow_table_in, in, table_id, table_id);
+
+ ret = mlx5_cmd_exec_inout(mdev, query_flow_table, in, out);
+ if (ret)
+ return ret;
+
+ ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context);
+ *icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_0);
+ *icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_1);
+
+ return ret;
+}
+
+int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u8 fw_ft_type, u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
+
+ MLX5_SET(destroy_flow_table_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+ MLX5_SET(destroy_flow_table_in, in, table_type, fw_ft_type);
+ MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
+}
+
+void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u32 table_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id);
+}
+
+static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_fg_attr *fg_attr,
+ u32 *group_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ u32 *in;
+ int ret;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type);
+ MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id);
+
+ ret = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
+ if (ret)
+ goto out;
+
+ *group_id = MLX5_GET(create_flow_group_out, out, group_id);
+
+out:
+ kvfree(in);
+ return ret;
+}
+
+static int hws_cmd_flow_group_destroy(struct mlx5_core_dev *mdev,
+ u32 ft_id, u32 fg_id, u8 ft_type)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
+
+ MLX5_SET(destroy_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET(destroy_flow_group_in, in, table_type, ft_type);
+ MLX5_SET(destroy_flow_group_in, in, table_id, ft_id);
+ MLX5_SET(destroy_flow_group_in, in, group_id, fg_id);
+
+ return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
+}
+
+int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
+ void *in_flow_context;
+ u32 dest_entry_sz;
+ u32 total_dest_sz;
+ u32 action_flags;
+ u8 *in_dests;
+ u32 inlen;
+ u32 *in;
+ int ret;
+ u32 i;
+
+ dest_entry_sz = fte_attr->extended_dest ?
+ MLX5_ST_SZ_BYTES(extended_dest_format) :
+ MLX5_ST_SZ_BYTES(dest_format);
+ total_dest_sz = dest_entry_sz * fte_attr->dests_num;
+ inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+ MLX5_SET(set_fte_in, in, table_type, table_type);
+ MLX5_SET(set_fte_in, in, table_id, table_id);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+ MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
+
+ action_flags = fte_attr->action_flags;
+ MLX5_SET(flow_context, in_flow_context, action, action_flags);
+
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ MLX5_SET(flow_context, in_flow_context,
+ packet_reformat_id, fte_attr->packet_reformat_id);
+ }
+
+ if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
+ }
+
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ in_dests = (u8 *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+
+ for (i = 0; i < fte_attr->dests_num; i++) {
+ struct mlx5hws_cmd_set_fte_dest *dest = &fte_attr->dests[i];
+ enum mlx5_ifc_flow_destination_type ifc_dest_type =
+ hws_cmd_dest_type_to_ifc_dest_type(dest->destination_type);
+
+ switch (dest->destination_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dest->esw_owner_vhca_id);
+ }
+ fallthrough;
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ MLX5_SET(dest_format, in_dests, destination_type, ifc_dest_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_REFORMAT) {
+ MLX5_SET(dest_format, in_dests, packet_reformat, 1);
+ MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
+ dest->ext_reformat_id);
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ in_dests = in_dests + dest_entry_sz;
+ }
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed creating FLOW_TABLE_ENTRY\n");
+
+out:
+ kfree(in);
+ return ret;
+}
+
+int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
+
+ MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ MLX5_SET(delete_fte_in, in, table_type, table_type);
+ MLX5_SET(delete_fte_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec_in(mdev, delete_fte, in);
+}
+
+struct mlx5hws_cmd_forward_tbl *
+mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr)
+{
+ struct mlx5hws_cmd_fg_attr fg_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *tbl;
+ int ret;
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return NULL;
+
+ ret = mlx5hws_cmd_flow_table_create(mdev, ft_attr, &tbl->ft_id);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FT\n");
+ goto free_tbl;
+ }
+
+ fg_attr.table_id = tbl->ft_id;
+ fg_attr.table_type = ft_attr->type;
+
+ ret = hws_cmd_flow_group_create(mdev, &fg_attr, &tbl->fg_id);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FG\n");
+ goto free_ft;
+ }
+
+ ret = mlx5hws_cmd_set_fte(mdev, ft_attr->type,
+ tbl->ft_id, tbl->fg_id, fte_attr);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FTE\n");
+ goto free_fg;
+ }
+
+ tbl->type = ft_attr->type;
+ return tbl;
+
+free_fg:
+ hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, ft_attr->type);
+free_ft:
+ mlx5hws_cmd_flow_table_destroy(mdev, ft_attr->type, tbl->ft_id);
+free_tbl:
+ kfree(tbl);
+ return NULL;
+}
+
+void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_forward_tbl *tbl)
+{
+ mlx5hws_cmd_delete_fte(mdev, tbl->type, tbl->ft_id);
+ hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, tbl->type);
+ mlx5hws_cmd_flow_table_destroy(mdev, tbl->type, tbl->ft_id);
+ kfree(tbl);
+}
+
+void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
+ u32 fw_ft_type,
+ enum mlx5hws_table_type type,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr)
+{
+ u32 default_miss_tbl;
+
+ if (type != MLX5HWS_TABLE_TYPE_FDB)
+ return;
+
+ ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr->type = fw_ft_type;
+ ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
+
+ default_miss_tbl = ctx->common_res[type].default_miss->ft_id;
+ if (!default_miss_tbl) {
+ pr_warn("HWS: no flow table ID for default miss\n");
+ return;
+ }
+
+ ft_attr->table_miss_id = default_miss_tbl;
+}
+
+int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ u32 *rtc_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_rtc_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_rtc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_RTC);
+
+ attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
+ MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ?
+ MLX5_IFC_RTC_STE_FORMAT_11DW :
+ MLX5_IFC_RTC_STE_FORMAT_8DW);
+
+ if (rtc_attr->is_scnd_range) {
+ MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE);
+ MLX5_SET(rtc, attr, num_match_ste, 2);
+ }
+
+ MLX5_SET(rtc, attr, pd, rtc_attr->pd);
+ MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe);
+ MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
+ MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode);
+ MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
+ MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
+ MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
+ MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
+ MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
+ MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0);
+ MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
+ MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
+ MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
+ MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
+ MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
+ MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create RTC\n");
+ goto out;
+ }
+
+ *rtc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_RTC, rtc_id);
+}
+
+int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_create_attr *stc_attr,
+ u32 *stc_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STC);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, stc_attr->log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, stc);
+ MLX5_SET(stc, attr, table_type, stc_attr->table_type);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create STC\n");
+ goto out;
+ }
+
+ *stc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STC, stc_id);
+}
+
+static int
+hws_cmd_stc_modify_set_stc_param(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ void *stc_param)
+{
+ switch (stc_attr->action_type) {
+ case MLX5_IFC_STC_ACTION_TYPE_COUNTER:
+ MLX5_SET(stc_ste_param_flow_counter, stc_param, flow_counter_id, stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR:
+ MLX5_SET(stc_ste_param_tir, stc_param, tirn, stc_attr->dest_tir_num);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
+ MLX5_SET(stc_ste_param_table, stc_param, table_id, stc_attr->dest_table_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
+ MLX5_SET(stc_ste_param_header_modify_list, stc_param,
+ header_modify_pattern_id, stc_attr->modify_header.pattern_id);
+ MLX5_SET(stc_ste_param_header_modify_list, stc_param,
+ header_modify_argument_id, stc_attr->modify_header.arg_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE:
+ MLX5_SET(stc_ste_param_remove, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE);
+ MLX5_SET(stc_ste_param_remove, stc_param, decap,
+ stc_attr->remove_header.decap);
+ MLX5_SET(stc_ste_param_remove, stc_param, remove_start_anchor,
+ stc_attr->remove_header.start_anchor);
+ MLX5_SET(stc_ste_param_remove, stc_param, remove_end_anchor,
+ stc_attr->remove_header.end_anchor);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT:
+ MLX5_SET(stc_ste_param_insert, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_INSERT);
+ MLX5_SET(stc_ste_param_insert, stc_param, encap,
+ stc_attr->insert_header.encap);
+ MLX5_SET(stc_ste_param_insert, stc_param, inline_data,
+ stc_attr->insert_header.is_inline);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_anchor,
+ stc_attr->insert_header.insert_anchor);
+ /* HW gets the next 2 sizes in words */
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_size,
+ stc_attr->insert_header.header_size / W_SIZE);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_offset,
+ stc_attr->insert_header.insert_offset / W_SIZE);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_argument,
+ stc_attr->insert_header.arg_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_COPY:
+ case MLX5_IFC_STC_ACTION_TYPE_SET:
+ case MLX5_IFC_STC_ACTION_TYPE_ADD:
+ case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD:
+ *(__be64 *)stc_param = stc_attr->modify_action.data;
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK:
+ MLX5_SET(stc_ste_param_vport, stc_param, vport_number,
+ stc_attr->vport.vport_num);
+ MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id,
+ stc_attr->vport.esw_owner_vhca_id);
+ MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id_valid,
+ stc_attr->vport.eswitch_owner_vhca_id_valid);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_DROP:
+ case MLX5_IFC_STC_ACTION_TYPE_NOP:
+ case MLX5_IFC_STC_ACTION_TYPE_TAG:
+ case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_ASO:
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_object_id,
+ stc_attr->aso.devx_obj_id);
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, return_reg_id,
+ stc_attr->aso.return_reg_id);
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_type,
+ stc_attr->aso.aso_type);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
+ MLX5_SET(stc_ste_param_ste_table, stc_param, ste_obj_id,
+ stc_attr->ste_table.ste_obj_id);
+ MLX5_SET(stc_ste_param_ste_table, stc_param, match_definer_id,
+ stc_attr->ste_table.match_definer_id);
+ MLX5_SET(stc_ste_param_ste_table, stc_param, log_hash_size,
+ stc_attr->ste_table.log_hash_size);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS:
+ MLX5_SET(stc_ste_param_remove_words, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
+ MLX5_SET(stc_ste_param_remove_words, stc_param, remove_start_anchor,
+ stc_attr->remove_words.start_anchor);
+ MLX5_SET(stc_ste_param_remove_words, stc_param,
+ remove_size, stc_attr->remove_words.num_of_words);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION:
+ MLX5_SET(stc_ste_param_ipsec_encrypt, stc_param, ipsec_object_id,
+ stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION:
+ MLX5_SET(stc_ste_param_ipsec_decrypt, stc_param, ipsec_object_id,
+ stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_TRAILER:
+ MLX5_SET(stc_ste_param_trailer, stc_param, command,
+ stc_attr->reformat_trailer.op);
+ MLX5_SET(stc_ste_param_trailer, stc_param, type,
+ stc_attr->reformat_trailer.type);
+ MLX5_SET(stc_ste_param_trailer, stc_param, length,
+ stc_attr->reformat_trailer.size);
+ break;
+ default:
+ mlx5_core_err(mdev, "Not supported type %d\n", stc_attr->action_type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
+ u32 stc_id,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
+ void *stc_param;
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, stc_id);
+ MLX5_SET(general_obj_in_cmd_hdr, in,
+ op_param.query.obj_offset, stc_attr->stc_offset);
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, stc);
+ MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset);
+ MLX5_SET(stc, attr, action_type, stc_attr->action_type);
+ MLX5_SET(stc, attr, reparse_mode, stc_attr->reparse_mode);
+ MLX5_SET64(stc, attr, modify_field_select,
+ MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC);
+
+ /* Set destination TIRN, TAG, FT ID, STE ID */
+ stc_param = MLX5_ADDR_OF(stc, attr, stc_param);
+ ret = hws_cmd_stc_modify_set_stc_param(mdev, stc_attr, stc_param);
+ if (ret)
+ return ret;
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to modify STC FW action_type %d\n",
+ stc_attr->action_type);
+
+ return ret;
+}
+
+int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
+ u16 log_obj_range,
+ u32 pd,
+ u32 *arg_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_arg_in, in, arg);
+ MLX5_SET(arg, attr, access_pd, pd);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create ARG\n");
+ goto out;
+ }
+
+ *arg_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
+ u32 arg_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT, arg_id);
+}
+
+int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
+ u32 pattern_length,
+ u8 *actions,
+ u32 *ptrn_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ int num_of_actions;
+ u64 *pattern_data;
+ void *pattern;
+ void *attr;
+ int ret;
+ int i;
+
+ if (pattern_length > MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY) {
+ mlx5_core_err(mdev, "Pattern length %d exceeds limit %d\n",
+ pattern_length, MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY);
+ return -EINVAL;
+ }
+
+ attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN);
+
+ pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern);
+ /* Pattern_length is in ddwords */
+ MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE));
+
+ pattern_data = (u64 *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data);
+ memcpy(pattern_data, actions, pattern_length);
+
+ num_of_actions = pattern_length / MLX5HWS_MODIFY_ACTION_SIZE;
+ for (i = 0; i < num_of_actions; i++) {
+ int type;
+
+ type = MLX5_GET(set_action_in, &pattern_data[i], action_type);
+ if (type != MLX5_MODIFICATION_TYPE_COPY &&
+ type != MLX5_MODIFICATION_TYPE_ADD_FIELD)
+ /* Action typ-copy use all bytes for control */
+ MLX5_SET(set_action_in, &pattern_data[i], data, 0);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create header_modify_pattern\n");
+ goto out;
+ }
+
+ *ptrn_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
+ u32 ptrn_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN, ptrn_id);
+}
+
+int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ste_create_attr *ste_attr,
+ u32 *ste_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_ste_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_ste_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STE);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, ste_attr->log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_ste_in, in, ste);
+ MLX5_SET(ste, attr, table_type, ste_attr->table_type);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create STE\n");
+ goto out;
+ }
+
+ *ste_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STE, ste_id);
+}
+
+int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_definer_create_attr *def_attr,
+ u32 *definer_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_definer_in)] = {0};
+ void *ptr;
+ int ret;
+
+ MLX5_SET(general_obj_in_cmd_hdr,
+ in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
+
+ ptr = MLX5_ADDR_OF(create_definer_in, in, definer);
+ MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT);
+
+ MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]);
+ MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]);
+ MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]);
+ MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]);
+ MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]);
+ MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]);
+ MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]);
+ MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]);
+ MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]);
+
+ MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]);
+ MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]);
+ MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]);
+ MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]);
+ MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]);
+ MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]);
+ MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]);
+ MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]);
+
+ ptr = MLX5_ADDR_OF(definer, ptr, match_mask);
+ memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask));
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create Definer\n");
+ goto out;
+ }
+
+ *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
+ u32 definer_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MATCH_DEFINER, definer_id);
+}
+
+int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_packet_reformat_create_attr *attr,
+ u32 *reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ size_t insz, cmd_data_sz, cmd_total_sz;
+ void *prctx;
+ void *pdata;
+ void *in;
+ int ret;
+
+ cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+ cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
+ cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
+ insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
+ in = kzalloc(insz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+ prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
+ packet_reformat_context);
+ pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
+ memcpy(pdata, attr->data, attr->data_sz);
+
+ ret = mlx5_cmd_exec(mdev, in, insz, out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create packet reformat\n");
+ goto out;
+ }
+
+ *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+out:
+ kfree(in);
+ return ret;
+}
+
+int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
+ u32 reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0};
+ int ret;
+
+ MLX5_SET(dealloc_packet_reformat_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_in, in,
+ packet_reformat_id, reformat_id);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to destroy packet_reformat\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+ void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ int ret;
+
+ MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+ MLX5_SET(modify_sq_in, in, sqn, sqn);
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to modify SQ\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_allow_other_vhca_access_attr *attr)
+{
+ u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
+ void *key;
+ int ret;
+
+ MLX5_SET(allow_other_vhca_access_in,
+ in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
+ MLX5_SET(allow_other_vhca_access_in,
+ in, object_type_to_be_accessed, attr->obj_type);
+ MLX5_SET(allow_other_vhca_access_in,
+ in, object_id_to_be_accessed, attr->obj_id);
+
+ key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
+ memcpy(key, attr->access_key, sizeof(attr->access_key));
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
+ u32 *obj_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
+ void *attr;
+ void *key;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, alias_attr->obj_type);
+ MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1);
+
+ attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
+ MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
+ MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
+
+ key = MLX5_ADDR_OF(alias_context, attr, access_key);
+ memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n");
+ goto out;
+ }
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
+ u16 obj_type,
+ u32 obj_id)
+{
+ return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id);
+}
+
+int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_generate_wqe_attr *attr,
+ struct mlx5_cqe64 *ret_cqe)
+{
+ u32 out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0};
+ u8 status;
+ void *ptr;
+ int ret;
+
+ MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE);
+ MLX5_SET(generate_wqe_in, in, pdn, attr->pdn);
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl);
+ memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl));
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl);
+ memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl));
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0);
+ memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0));
+
+ if (attr->gta_data_1) {
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1);
+ memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1));
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to write GTA WQE using FW\n");
+ return ret;
+ }
+
+ status = MLX5_GET(generate_wqe_out, out, status);
+ if (status) {
+ mlx5_core_err(mdev, "Invalid FW CQE status %d\n", status);
+ return -EINVAL;
+ }
+
+ ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data);
+ memcpy(ret_cqe, ptr, sizeof(*ret_cqe));
+
+ return ret;
+}
+
+int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_query_caps *caps)
+{
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
+ u32 out_size;
+ u32 *out;
+ int ret;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device caps\n");
+ goto out;
+ }
+
+ caps->wqe_based_update =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.wqe_based_flow_table_update_cap);
+
+ caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.eswitch_manager);
+
+ caps->flex_protocols = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_protocols);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
+ caps->flex_parser_id_geneve_tlv_option_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_geneve_tlv_option_0);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
+ caps->flex_parser_id_mpls_over_gre =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_gre);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
+ caps->flex_parser_id_mpls_over_udp =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_udp_label);
+
+ caps->log_header_modify_argument_granularity =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_granularity);
+
+ caps->log_header_modify_argument_granularity -=
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_granularity_offset);
+
+ caps->log_header_modify_argument_max_alloc =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_max_alloc);
+
+ caps->definer_format_sup =
+ MLX5_GET64(query_hca_cap_out, out,
+ capability.cmd_hca_cap.match_definer_format_supported);
+
+ caps->vhca_id = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.vhca_id);
+
+ caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.sq_ts_format);
+
+ caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.ipsec_offload);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device caps 2\n");
+ goto out;
+ }
+
+ caps->full_dw_jumbo_support =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_8_6_ext);
+
+ caps->format_select_gtpu_dw_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_0);
+
+ caps->format_select_gtpu_dw_1 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_1);
+
+ caps->format_select_gtpu_dw_2 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_2);
+
+ caps->format_select_gtpu_ext_dw_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_first_ext_dw_0);
+
+ caps->supp_type_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.generate_wqe_type);
+
+ caps->flow_table_hash_type =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.flow_table_hash_type);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query flow table caps\n");
+ goto out;
+ }
+
+ caps->nic_ft.max_level =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
+
+ caps->nic_ft.reparse =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
+
+ caps->nic_ft.ignore_flow_level_rtc_valid =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
+
+ caps->flex_parser_ok_bits_supp =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
+
+ if (caps->wqe_based_update) {
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query WQE based FT caps\n");
+ goto out;
+ }
+
+ caps->rtc_reparse_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_reparse_mode);
+
+ caps->ste_format =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_format);
+
+ caps->rtc_index_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_index_mode);
+
+ caps->rtc_log_depth_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_log_depth_max);
+
+ caps->ste_alloc_log_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_alloc_log_max);
+
+ caps->ste_alloc_log_gran =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_alloc_log_granularity);
+
+ caps->trivial_match_definer =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.trivial_match_definer);
+
+ caps->stc_alloc_log_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.stc_alloc_log_max);
+
+ caps->stc_alloc_log_gran =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.stc_alloc_log_granularity);
+
+ caps->rtc_hash_split_table =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_hash_split_table);
+
+ caps->rtc_linear_lookup_table =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_linear_lookup_table);
+
+ caps->access_index_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.access_index_mode);
+
+ caps->linear_match_definer =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.linear_match_definer_reg_c3);
+
+ caps->rtc_max_hash_def_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_max_num_hash_definer_gen_wqe);
+
+ caps->supp_ste_format_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_format_gen_wqe);
+
+ caps->fdb_tir_stc =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.fdb_jump_to_tir_stc);
+ }
+
+ if (caps->eswitch_manager) {
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query flow table esw caps\n");
+ goto out;
+ }
+
+ caps->fdb_ft.max_level =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
+
+ caps->fdb_ft.reparse =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_ESW | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query eswitch capabilities\n");
+ goto out;
+ }
+
+ if (MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.esw_manager_vport_number_valid))
+ caps->eswitch_manager_vport_number =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.esw_manager_vport_number);
+
+ caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.merged_eswitch);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device attributes\n");
+ goto out;
+ }
+
+ snprintf(caps->fw_ver, sizeof(caps->fw_ver), "%d.%d.%d",
+ fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+
+ caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
+
+out:
+ kfree(out);
+ return ret;
+}
+
+int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
+ u16 vport_number, u16 *gvmi)
+{
+ bool ec_vf_func = other_function ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ int out_size;
+ void *out;
+ int err;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, other_function, other_function);
+ MLX5_SET(query_hca_cap_in, in, function_id,
+ mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
+ MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR);
+
+ err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ if (err) {
+ kfree(out);
+ return err;
+ }
+
+ *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
+
+ kfree(out);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h
new file mode 100644
index 000000000000..2fbcf4ff571a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_CMD_H_
+#define MLX5HWS_CMD_H_
+
+#define WIRE_PORT 0xFFFF
+
+#define ACCESS_KEY_LEN 32
+
+enum mlx5hws_cmd_ext_dest_flags {
+ MLX5HWS_CMD_EXT_DEST_REFORMAT = 1 << 0,
+ MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1,
+};
+
+struct mlx5hws_cmd_set_fte_dest {
+ u8 destination_type;
+ u32 destination_id;
+ enum mlx5hws_cmd_ext_dest_flags ext_flags;
+ u32 ext_reformat_id;
+ u16 esw_owner_vhca_id;
+};
+
+struct mlx5hws_cmd_set_fte_attr {
+ u32 action_flags;
+ bool ignore_flow_level;
+ u8 flow_source;
+ u8 extended_dest;
+ u8 encrypt_decrypt_type;
+ u32 encrypt_decrypt_obj_id;
+ u32 packet_reformat_id;
+ u32 dests_num;
+ struct mlx5hws_cmd_set_fte_dest *dests;
+};
+
+struct mlx5hws_cmd_ft_create_attr {
+ u8 type;
+ u8 level;
+ bool rtc_valid;
+ bool decap_en;
+ bool reformat_en;
+};
+
+struct mlx5hws_cmd_ft_modify_attr {
+ u8 type;
+ u32 rtc_id_0;
+ u32 rtc_id_1;
+ u32 table_miss_id;
+ u8 table_miss_action;
+ u64 modify_fs;
+};
+
+struct mlx5hws_cmd_ft_query_attr {
+ u8 type;
+};
+
+struct mlx5hws_cmd_fg_attr {
+ u32 table_id;
+ u32 table_type;
+};
+
+struct mlx5hws_cmd_forward_tbl {
+ u8 type;
+ u32 ft_id;
+ u32 fg_id;
+ u32 refcount;
+};
+
+struct mlx5hws_cmd_rtc_create_attr {
+ u32 pd;
+ u32 stc_base;
+ u32 ste_base;
+ u32 ste_offset;
+ u32 miss_ft_id;
+ bool fw_gen_wqe;
+ u8 update_index_mode;
+ u8 access_index_mode;
+ u8 num_hash_definer;
+ u8 log_depth;
+ u8 log_size;
+ u8 table_type;
+ u8 match_definer_0;
+ u8 match_definer_1;
+ u8 reparse_mode;
+ bool is_frst_jumbo;
+ bool is_scnd_range;
+};
+
+struct mlx5hws_cmd_alias_obj_create_attr {
+ u32 obj_id;
+ u16 vhca_id;
+ u16 obj_type;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5hws_cmd_stc_create_attr {
+ u8 log_obj_range;
+ u8 table_type;
+};
+
+struct mlx5hws_cmd_stc_modify_attr {
+ u32 stc_offset;
+ u8 action_offset;
+ u8 reparse_mode;
+ enum mlx5_ifc_stc_action_type action_type;
+ union {
+ u32 id; /* TIRN, TAG, FT ID, STE ID, CRYPTO */
+ struct {
+ u8 decap;
+ u16 start_anchor;
+ u16 end_anchor;
+ } remove_header;
+ struct {
+ u32 arg_id;
+ u32 pattern_id;
+ } modify_header;
+ struct {
+ __be64 data;
+ } modify_action;
+ struct {
+ u32 arg_id;
+ u32 header_size;
+ u8 is_inline;
+ u8 encap;
+ u16 insert_anchor;
+ u16 insert_offset;
+ } insert_header;
+ struct {
+ u8 aso_type;
+ u32 devx_obj_id;
+ u8 return_reg_id;
+ } aso;
+ struct {
+ u16 vport_num;
+ u16 esw_owner_vhca_id;
+ u8 eswitch_owner_vhca_id_valid;
+ } vport;
+ struct {
+ struct mlx5hws_pool_chunk ste;
+ struct mlx5hws_pool *ste_pool;
+ u32 ste_obj_id; /* Internal */
+ u32 match_definer_id;
+ u8 log_hash_size;
+ bool ignore_tx;
+ } ste_table;
+ struct {
+ u16 start_anchor;
+ u16 num_of_words;
+ } remove_words;
+ struct {
+ u8 type;
+ u8 op;
+ u8 size;
+ } reformat_trailer;
+
+ u32 dest_table_id;
+ u32 dest_tir_num;
+ };
+};
+
+struct mlx5hws_cmd_ste_create_attr {
+ u8 log_obj_range;
+ u8 table_type;
+};
+
+struct mlx5hws_cmd_definer_create_attr {
+ u8 *dw_selector;
+ u8 *byte_selector;
+ u8 *match_mask;
+};
+
+struct mlx5hws_cmd_allow_other_vhca_access_attr {
+ u16 obj_type;
+ u32 obj_id;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5hws_cmd_packet_reformat_create_attr {
+ u8 type;
+ size_t data_sz;
+ void *data;
+ u8 reformat_param_0;
+};
+
+struct mlx5hws_cmd_query_ft_caps {
+ u8 max_level;
+ u8 reparse;
+ u8 ignore_flow_level_rtc_valid;
+};
+
+struct mlx5hws_cmd_generate_wqe_attr {
+ u8 *wqe_ctrl;
+ u8 *gta_ctrl;
+ u8 *gta_data_0;
+ u8 *gta_data_1;
+ u32 pdn;
+};
+
+struct mlx5hws_cmd_query_caps {
+ u32 flex_protocols;
+ u8 wqe_based_update;
+ u8 rtc_reparse_mode;
+ u16 ste_format;
+ u8 rtc_index_mode;
+ u8 ste_alloc_log_max;
+ u8 ste_alloc_log_gran;
+ u8 stc_alloc_log_max;
+ u8 stc_alloc_log_gran;
+ u8 rtc_log_depth_max;
+ u8 format_select_gtpu_dw_0;
+ u8 format_select_gtpu_dw_1;
+ u8 flow_table_hash_type;
+ u8 format_select_gtpu_dw_2;
+ u8 format_select_gtpu_ext_dw_0;
+ u8 access_index_mode;
+ u32 linear_match_definer;
+ bool full_dw_jumbo_support;
+ bool rtc_hash_split_table;
+ bool rtc_linear_lookup_table;
+ u32 supp_type_gen_wqe;
+ u8 rtc_max_hash_def_gen_wqe;
+ u16 supp_ste_format_gen_wqe;
+ struct mlx5hws_cmd_query_ft_caps nic_ft;
+ struct mlx5hws_cmd_query_ft_caps fdb_ft;
+ bool eswitch_manager;
+ bool merged_eswitch;
+ u32 eswitch_manager_vport_number;
+ u8 log_header_modify_argument_granularity;
+ u8 log_header_modify_argument_max_alloc;
+ u8 sq_ts_format;
+ u8 fdb_tir_stc;
+ u64 definer_format_sup;
+ u32 trivial_match_definer;
+ u32 vhca_id;
+ u32 shared_vhca_id;
+ char fw_ver[64];
+ bool ipsec_offload;
+ bool is_ecpf;
+ u8 flex_parser_ok_bits_supp;
+ u8 flex_parser_id_geneve_tlv_option_0;
+ u8 flex_parser_id_mpls_over_gre;
+ u8 flex_parser_id_mpls_over_udp;
+};
+
+int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ u32 *table_id);
+
+int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr,
+ u32 table_id);
+
+int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
+ u32 obj_id,
+ struct mlx5hws_cmd_ft_query_attr *ft_attr,
+ u64 *icm_addr_0, u64 *icm_addr_1);
+
+int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u8 fw_ft_type, u32 table_id);
+
+void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u32 table_id);
+
+int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ u32 *rtc_id);
+
+void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id);
+
+int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_create_attr *stc_attr,
+ u32 *stc_id);
+
+int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
+ u32 stc_id,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr);
+
+void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id);
+
+int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_generate_wqe_attr *attr,
+ struct mlx5_cqe64 *ret_cqe);
+
+int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ste_create_attr *ste_attr,
+ u32 *ste_id);
+
+void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id);
+
+int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_definer_create_attr *def_attr,
+ u32 *definer_id);
+
+void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
+ u32 definer_id);
+
+int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
+ u16 log_obj_range,
+ u32 pd,
+ u32 *arg_id);
+
+void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
+ u32 arg_id);
+
+int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
+ u32 pattern_length,
+ u8 *actions,
+ u32 *ptrn_id);
+
+void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
+ u32 ptrn_id);
+
+int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_packet_reformat_create_attr *attr,
+ u32 *reformat_id);
+
+int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
+ u32 reformat_id);
+
+int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr);
+
+int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
+ u32 table_type, u32 table_id);
+
+struct mlx5hws_cmd_forward_tbl *
+mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr);
+
+void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_forward_tbl *tbl);
+
+int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
+ u32 *obj_id);
+
+int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
+ u16 obj_type,
+ u32 obj_id);
+
+int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn);
+
+int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_query_caps *caps);
+
+void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
+ u32 fw_ft_type,
+ enum mlx5hws_table_type type,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr);
+
+int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_allow_other_vhca_access_attr *attr);
+
+int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
+ u16 vport_number, u16 *gvmi);
+
+#endif /* MLX5HWS_CMD_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c
new file mode 100644
index 000000000000..00e4fdf4a558
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */
+
+#include "mlx5hws_internal.h"
+
+bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx)
+{
+ return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC);
+}
+
+u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx)
+{
+ /* Prefer to use dynamic reparse, reparse only specific actions */
+ if (mlx5hws_context_cap_dynamic_reparse(ctx))
+ return MLX5_IFC_RTC_REPARSE_NEVER;
+
+ /* Otherwise use less efficient static */
+ return MLX5_IFC_RTC_REPARSE_ALWAYS;
+}
+
+static int hws_context_pools_init(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_pool_attr pool_attr = {0};
+ u8 max_log_sz;
+ int ret;
+ int i;
+
+ ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
+ if (ret)
+ return ret;
+
+ ret = mlx5hws_definer_init_cache(&ctx->definer_cache);
+ if (ret)
+ goto uninit_pat_cache;
+
+ /* Create an STC pool per FT type */
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL;
+ max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
+ pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
+
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+ pool_attr.table_type = i;
+ ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!ctx->stc_pool[i]) {
+ mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i);
+ ret = -ENOMEM;
+ goto free_stc_pools;
+ }
+ }
+
+ return 0;
+
+free_stc_pools:
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++)
+ if (ctx->stc_pool[i])
+ mlx5hws_pool_destroy(ctx->stc_pool[i]);
+
+ mlx5hws_definer_uninit_cache(ctx->definer_cache);
+uninit_pat_cache:
+ mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
+ return ret;
+}
+
+static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+ if (ctx->stc_pool[i])
+ mlx5hws_pool_destroy(ctx->stc_pool[i]);
+ }
+
+ mlx5hws_definer_uninit_cache(ctx->definer_cache);
+ mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
+}
+
+static int hws_context_init_pd(struct mlx5hws_context *ctx)
+{
+ int ret = 0;
+
+ ret = mlx5_core_alloc_pd(ctx->mdev, &ctx->pd_num);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate PD\n");
+ return ret;
+ }
+
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_PRIVATE_PD;
+
+ return 0;
+}
+
+static int hws_context_uninit_pd(struct mlx5hws_context *ctx)
+{
+ if (ctx->flags & MLX5HWS_CONTEXT_FLAG_PRIVATE_PD)
+ mlx5_core_dealloc_pd(ctx->mdev, ctx->pd_num);
+
+ return 0;
+}
+
+static void hws_context_check_hws_supp(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ /* HWS not supported on device / FW */
+ if (!caps->wqe_based_update) {
+ mlx5hws_err(ctx, "Required HWS WQE based insertion cap not supported\n");
+ return;
+ }
+
+ if (!caps->eswitch_manager) {
+ mlx5hws_err(ctx, "HWS is not supported for non eswitch manager port\n");
+ return;
+ }
+
+ /* Current solution requires all rules to set reparse bit */
+ if ((!caps->nic_ft.reparse ||
+ (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
+ !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
+ mlx5hws_err(ctx, "Required HWS reparse cap not supported\n");
+ return;
+ }
+
+ /* FW/HW must support 8DW STE */
+ if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
+ mlx5hws_err(ctx, "Required HWS STE format not supported\n");
+ return;
+ }
+
+ /* Adding rules by hash and by offset are requirements */
+ if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
+ !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
+ mlx5hws_err(ctx, "Required HWS RTC update mode not supported\n");
+ return;
+ }
+
+ /* Support for SELECT definer ID is required */
+ if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
+ mlx5hws_err(ctx, "Required HWS Dynamic definer not supported\n");
+ return;
+ }
+
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT;
+}
+
+static int hws_context_init_hws(struct mlx5hws_context *ctx,
+ struct mlx5hws_context_attr *attr)
+{
+ int ret;
+
+ hws_context_check_hws_supp(ctx);
+
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
+ return 0;
+
+ ret = hws_context_init_pd(ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_context_pools_init(ctx);
+ if (ret)
+ goto uninit_pd;
+
+ if (attr->bwc)
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+
+ ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
+ if (ret)
+ goto pools_uninit;
+
+ INIT_LIST_HEAD(&ctx->tbl_list);
+
+ return 0;
+
+pools_uninit:
+ hws_context_pools_uninit(ctx);
+uninit_pd:
+ hws_context_uninit_pd(ctx);
+ return ret;
+}
+
+static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
+ return;
+
+ mlx5hws_send_queues_close(ctx);
+ hws_context_pools_uninit(ctx);
+ hws_context_uninit_pd(ctx);
+}
+
+struct mlx5hws_context *mlx5hws_context_open(struct mlx5_core_dev *mdev,
+ struct mlx5hws_context_attr *attr)
+{
+ struct mlx5hws_context *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->mdev = mdev;
+
+ mutex_init(&ctx->ctrl_lock);
+ xa_init(&ctx->peer_ctx_xa);
+
+ ctx->caps = kzalloc(sizeof(*ctx->caps), GFP_KERNEL);
+ if (!ctx->caps)
+ goto free_ctx;
+
+ ret = mlx5hws_cmd_query_caps(mdev, ctx->caps);
+ if (ret)
+ goto free_caps;
+
+ ret = mlx5hws_vport_init_vports(ctx);
+ if (ret)
+ goto free_caps;
+
+ ret = hws_context_init_hws(ctx, attr);
+ if (ret)
+ goto uninit_vports;
+
+ mlx5hws_debug_init_dump(ctx);
+
+ return ctx;
+
+uninit_vports:
+ mlx5hws_vport_uninit_vports(ctx);
+free_caps:
+ kfree(ctx->caps);
+free_ctx:
+ xa_destroy(&ctx->peer_ctx_xa);
+ mutex_destroy(&ctx->ctrl_lock);
+ kfree(ctx);
+ return NULL;
+}
+
+int mlx5hws_context_close(struct mlx5hws_context *ctx)
+{
+ mlx5hws_debug_uninit_dump(ctx);
+ hws_context_uninit_hws(ctx);
+ mlx5hws_vport_uninit_vports(ctx);
+ kfree(ctx->caps);
+ xa_destroy(&ctx->peer_ctx_xa);
+ mutex_destroy(&ctx->ctrl_lock);
+ kfree(ctx);
+ return 0;
+}
+
+void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
+ struct mlx5hws_context *peer_ctx,
+ u16 peer_vhca_id)
+{
+ mutex_lock(&ctx->ctrl_lock);
+
+ if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL)))
+ pr_warn("HWS: failed storing peer vhca ID in peer xarray\n");
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
new file mode 100644
index 000000000000..e5a7ce604334
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_CONTEXT_H_
+#define MLX5HWS_CONTEXT_H_
+
+enum mlx5hws_context_flags {
+ MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
+ MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
+ MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
+};
+
+enum mlx5hws_context_shared_stc_type {
+ MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3 = 0,
+ MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP = 1,
+ MLX5HWS_CONTEXT_SHARED_STC_MAX = 2,
+};
+
+struct mlx5hws_context_common_res {
+ struct mlx5hws_action_default_stc *default_stc;
+ struct mlx5hws_action_shared_stc *shared_stc[MLX5HWS_CONTEXT_SHARED_STC_MAX];
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+};
+
+struct mlx5hws_context_debug_info {
+ struct dentry *steering_debugfs;
+ struct dentry *fdb_debugfs;
+};
+
+struct mlx5hws_context_vports {
+ u16 esw_manager_gvmi;
+ u16 uplink_gvmi;
+ struct xarray vport_gvmi_xa;
+};
+
+struct mlx5hws_context {
+ struct mlx5_core_dev *mdev;
+ struct mlx5hws_cmd_query_caps *caps;
+ u32 pd_num;
+ struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pattern_cache *pattern_cache;
+ struct mlx5hws_definer_cache *definer_cache;
+ struct mutex ctrl_lock; /* control lock to protect the whole context */
+ enum mlx5hws_context_flags flags;
+ struct mlx5hws_send_engine *send_queue;
+ size_t queues;
+ struct mutex *bwc_send_queue_locks; /* protect BWC queues */
+ struct list_head tbl_list;
+ struct mlx5hws_context_debug_info debug_info;
+ struct xarray peer_ctx_xa;
+ struct mlx5hws_context_vports vports;
+};
+
+static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
+{
+ return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+}
+
+bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
+
+u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
+
+#endif /* MLX5HWS_CONTEXT_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c
new file mode 100644
index 000000000000..2b8c5a4e1c4c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/version.h>
+#include "mlx5hws_internal.h"
+
+static int
+hws_debug_dump_matcher_template_definer(struct seq_file *f,
+ void *parent_obj,
+ struct mlx5hws_definer *definer,
+ enum mlx5hws_debug_res_type type)
+{
+ int i;
+
+ if (!definer)
+ return 0;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,",
+ type,
+ HWS_PTR_TO_ID(definer),
+ HWS_PTR_TO_ID(parent_obj),
+ definer->obj_id,
+ definer->type);
+
+ for (i = 0; i < DW_SELECTORS; i++)
+ seq_printf(f, "0x%x%s", definer->dw_selector[i],
+ (i == DW_SELECTORS - 1) ? "," : "-");
+
+ for (i = 0; i < BYTE_SELECTORS; i++)
+ seq_printf(f, "0x%x%s", definer->byte_selector[i],
+ (i == BYTE_SELECTORS - 1) ? "," : "-");
+
+ for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
+ seq_printf(f, "%02x", definer->mask.jumbo[i]);
+
+ seq_puts(f, "\n");
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_match_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_debug_res_type type;
+ int i, ret;
+
+ for (i = 0; i < matcher->num_of_mt; i++) {
+ struct mlx5hws_match_template *mt = &matcher->mt[i];
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE,
+ HWS_PTR_TO_ID(mt),
+ HWS_PTR_TO_ID(matcher),
+ mt->fc_sz,
+ 0, 0);
+
+ type = MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER;
+ ret = hws_debug_dump_matcher_template_definer(f, mt, mt->definer, type);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_action_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_action_type action_type;
+ int i, j;
+
+ for (i = 0; i < matcher->num_of_at; i++) {
+ struct mlx5hws_action_template *at = &matcher->at[i];
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE,
+ HWS_PTR_TO_ID(at),
+ HWS_PTR_TO_ID(matcher),
+ at->only_term,
+ at->num_of_action_stes,
+ at->num_actions);
+
+ for (j = 0; j < at->num_actions; j++) {
+ action_type = at->action_type_arr[j];
+ seq_printf(f, ",%s", mlx5hws_action_type_to_str(action_type));
+ }
+
+ seq_puts(f, "\n");
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_attr(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR,
+ HWS_PTR_TO_ID(matcher),
+ attr->priority,
+ attr->mode,
+ attr->table.sz_row_log,
+ attr->table.sz_col_log,
+ attr->optimize_using_rule_idx,
+ attr->optimize_flow_src,
+ attr->insert_mode,
+ attr->distribute_mode);
+
+ return 0;
+}
+
+static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_table_type tbl_type = matcher->tbl->type;
+ struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
+ struct mlx5hws_pool_chunk *ste;
+ struct mlx5hws_pool *ste_pool;
+ u64 icm_addr_0 = 0;
+ u64 icm_addr_1 = 0;
+ u32 ste_0_id = -1;
+ u32 ste_1_id = -1;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,0x%llx",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER,
+ HWS_PTR_TO_ID(matcher),
+ HWS_PTR_TO_ID(matcher->tbl),
+ matcher->num_of_mt,
+ matcher->end_ft_id,
+ matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0);
+
+ ste = &matcher->match_ste.ste;
+ ste_pool = matcher->match_ste.pool;
+ if (ste_pool) {
+ ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ }
+
+ seq_printf(f, ",%d,%d,%d,%d",
+ matcher->match_ste.rtc_0_id,
+ (int)ste_0_id,
+ matcher->match_ste.rtc_1_id,
+ (int)ste_1_id);
+
+ ste = &matcher->action_ste[0].ste;
+ ste_pool = matcher->action_ste[0].pool;
+ if (ste_pool) {
+ ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ else
+ ste_1_id = -1;
+ } else {
+ ste_0_id = -1;
+ ste_1_id = -1;
+ }
+
+ ft_attr.type = matcher->tbl->fw_ft_type;
+ ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev,
+ matcher->end_ft_id,
+ &ft_attr,
+ &icm_addr_0,
+ &icm_addr_1);
+ if (ret)
+ return ret;
+
+ seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n",
+ matcher->action_ste[0].rtc_0_id,
+ (int)ste_0_id,
+ matcher->action_ste[0].rtc_1_id,
+ (int)ste_1_id,
+ 0,
+ mlx5hws_debug_icm_to_idx(icm_addr_0),
+ mlx5hws_debug_icm_to_idx(icm_addr_1));
+
+ ret = hws_debug_dump_matcher_attr(f, matcher);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_matcher_match_template(f, matcher);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_matcher_action_template(f, matcher);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hws_debug_dump_table(struct seq_file *f, struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
+ struct mlx5hws_matcher *matcher;
+ u64 local_icm_addr_0 = 0;
+ u64 local_icm_addr_1 = 0;
+ u64 icm_addr_0 = 0;
+ u64 icm_addr_1 = 0;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d,%d,%d",
+ MLX5HWS_DEBUG_RES_TYPE_TABLE,
+ HWS_PTR_TO_ID(tbl),
+ HWS_PTR_TO_ID(tbl->ctx),
+ tbl->ft_id,
+ MLX5HWS_TABLE_TYPE_BASE + tbl->type,
+ tbl->fw_ft_type,
+ tbl->level,
+ 0);
+
+ ft_attr.type = tbl->fw_ft_type;
+ ret = mlx5hws_cmd_flow_table_query(tbl->ctx->mdev,
+ tbl->ft_id,
+ &ft_attr,
+ &icm_addr_0,
+ &icm_addr_1);
+ if (ret)
+ return ret;
+
+ seq_printf(f, ",0x%llx,0x%llx,0x%llx,0x%llx,0x%llx\n",
+ mlx5hws_debug_icm_to_idx(icm_addr_0),
+ mlx5hws_debug_icm_to_idx(icm_addr_1),
+ mlx5hws_debug_icm_to_idx(local_icm_addr_0),
+ mlx5hws_debug_icm_to_idx(local_icm_addr_1),
+ HWS_PTR_TO_ID(tbl->default_miss.miss_tbl));
+
+ list_for_each_entry(matcher, &tbl->matchers_list, list_node) {
+ ret = hws_debug_dump_matcher(f, matcher);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_context_send_engine(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_send_engine *send_queue;
+ struct mlx5hws_send_ring *send_ring;
+ struct mlx5hws_send_ring_cq *cq;
+ struct mlx5hws_send_ring_sq *sq;
+ int i;
+
+ for (i = 0; i < (int)ctx->queues; i++) {
+ send_queue = &ctx->send_queue[i];
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE,
+ HWS_PTR_TO_ID(ctx),
+ i,
+ send_queue->used_entries,
+ send_queue->num_entries,
+ 1, /* one send ring per queue */
+ send_queue->num_entries,
+ send_queue->err,
+ send_queue->completed.ci,
+ send_queue->completed.pi,
+ send_queue->completed.mask);
+
+ send_ring = &send_queue->send_ring;
+ cq = &send_ring->send_cq;
+ sq = &send_ring->send_sq;
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING,
+ HWS_PTR_TO_ID(ctx),
+ 0, /* one send ring per send queue */
+ i,
+ cq->mcq.cqn,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ cq->mcq.cqe_sz,
+ sq->sqn,
+ 0,
+ 0,
+ 0);
+ }
+
+ return 0;
+}
+
+static int hws_debug_dump_context_caps(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ seq_printf(f, "%d,0x%llx,%s,%d,%d,%d,%d,",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS,
+ HWS_PTR_TO_ID(ctx),
+ caps->fw_ver,
+ caps->wqe_based_update,
+ caps->ste_format,
+ caps->ste_alloc_log_max,
+ caps->log_header_modify_argument_max_alloc);
+
+ seq_printf(f, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%s\n",
+ caps->flex_protocols,
+ caps->rtc_reparse_mode,
+ caps->rtc_index_mode,
+ caps->ste_alloc_log_gran,
+ caps->stc_alloc_log_max,
+ caps->stc_alloc_log_gran,
+ caps->rtc_log_depth_max,
+ caps->format_select_gtpu_dw_0,
+ caps->format_select_gtpu_dw_1,
+ caps->format_select_gtpu_dw_2,
+ caps->format_select_gtpu_ext_dw_0,
+ caps->nic_ft.max_level,
+ caps->nic_ft.reparse,
+ caps->fdb_ft.max_level,
+ caps->fdb_ft.reparse,
+ caps->log_header_modify_argument_granularity,
+ caps->linear_match_definer,
+ "regc_3");
+
+ return 0;
+}
+
+static int hws_debug_dump_context_attr(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ seq_printf(f, "%u,0x%llx,%d,%zu,%d,%s,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR,
+ HWS_PTR_TO_ID(ctx),
+ ctx->pd_num,
+ ctx->queues,
+ ctx->send_queue->num_entries,
+ "None", /* no shared gvmi */
+ ctx->caps->vhca_id,
+ 0xffff); /* no shared gvmi */
+
+ return 0;
+}
+
+static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5_core_dev *dev = ctx->mdev;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,%d,%s,%s.KERNEL_%u_%u_%u\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT,
+ HWS_PTR_TO_ID(ctx),
+ ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT,
+ pci_name(dev->pdev),
+ HWS_DEBUG_FORMAT_VERSION,
+ LINUX_VERSION_MAJOR,
+ LINUX_VERSION_PATCHLEVEL,
+ LINUX_VERSION_SUBLEVEL);
+
+ ret = hws_debug_dump_context_attr(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_caps(f, ctx);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hws_debug_dump_context_stc_resource(struct seq_file *f,
+ struct mlx5hws_context *ctx,
+ u32 tbl_type,
+ struct mlx5hws_pool_resource *resource)
+{
+ seq_printf(f, "%d,0x%llx,%u,%u\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC,
+ HWS_PTR_TO_ID(ctx),
+ tbl_type,
+ resource->base_id);
+
+ return 0;
+}
+
+static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_pool *stc_pool;
+ u32 table_type;
+ int ret;
+ int i;
+
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+ stc_pool = ctx->stc_pool[i];
+ table_type = MLX5HWS_TABLE_TYPE_BASE + i;
+
+ if (!stc_pool)
+ continue;
+
+ if (stc_pool->resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
+ stc_pool->resource[0]);
+ if (ret)
+ return ret;
+ }
+
+ if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
+ stc_pool->mirror_resource[0]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_table *tbl;
+ int ret;
+
+ ret = hws_debug_dump_context_info(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_send_engine(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_stc(f, ctx);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(tbl, &ctx->tbl_list, tbl_list_node) {
+ ret = hws_debug_dump_table(f, tbl);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ int ret;
+
+ if (!f || !ctx)
+ return -EINVAL;
+
+ mutex_lock(&ctx->ctrl_lock);
+ ret = hws_debug_dump_context(f, ctx);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return ret;
+}
+
+static int hws_dump_show(struct seq_file *file, void *priv)
+{
+ return hws_debug_dump(file, file->private);
+}
+DEFINE_SHOW_ATTRIBUTE(hws_dump);
+
+void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx)
+{
+ struct mlx5_core_dev *dev = ctx->mdev;
+ char file_name[128];
+
+ ctx->debug_info.steering_debugfs =
+ debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev));
+ ctx->debug_info.fdb_debugfs =
+ debugfs_create_dir("fdb", ctx->debug_info.steering_debugfs);
+
+ sprintf(file_name, "ctx_%p", ctx);
+ debugfs_create_file(file_name, 0444, ctx->debug_info.fdb_debugfs,
+ ctx, &hws_dump_fops);
+}
+
+void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx)
+{
+ debugfs_remove_recursive(ctx->debug_info.steering_debugfs);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h
new file mode 100644
index 000000000000..b93a536035d9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_DEBUG_H_
+#define MLX5HWS_DEBUG_H_
+
+#define HWS_DEBUG_FORMAT_VERSION "1.0"
+
+#define HWS_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL)
+
+enum mlx5hws_debug_res_type {
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT = 4000,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR = 4001,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS = 4002,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE = 4003,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING = 4004,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC = 4005,
+
+ MLX5HWS_DEBUG_RES_TYPE_TABLE = 4100,
+
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER = 4200,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR = 4201,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE = 4202,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER = 4203,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE = 4204,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207,
+};
+
+static inline u64
+mlx5hws_debug_icm_to_idx(u64 icm_addr)
+{
+ return (icm_addr >> 6) & 0xffffffff;
+}
+
+void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx);
+void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx);
+
+#endif /* MLX5HWS_DEBUG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
new file mode 100644
index 000000000000..3bdb5c90efff
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
@@ -0,0 +1,2146 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+/* Pattern tunnel Layer bits. */
+#define MLX5_FLOW_LAYER_VXLAN BIT(12)
+#define MLX5_FLOW_LAYER_VXLAN_GPE BIT(13)
+#define MLX5_FLOW_LAYER_GRE BIT(14)
+#define MLX5_FLOW_LAYER_MPLS BIT(15)
+
+/* Pattern tunnel Layer bits (continued). */
+#define MLX5_FLOW_LAYER_IPIP BIT(23)
+#define MLX5_FLOW_LAYER_IPV6_ENCAP BIT(24)
+#define MLX5_FLOW_LAYER_NVGRE BIT(25)
+#define MLX5_FLOW_LAYER_GENEVE BIT(26)
+
+#define MLX5_FLOW_ITEM_FLEX_TUNNEL BIT_ULL(39)
+
+/* Tunnel Masks. */
+#define MLX5_FLOW_LAYER_TUNNEL \
+ (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
+ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
+ MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
+ MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
+ MLX5_FLOW_ITEM_FLEX_TUNNEL)
+
+#define GTP_PDU_SC 0x85
+#define BAD_PORT 0xBAD
+#define ETH_TYPE_IPV4_VXLAN 0x0800
+#define ETH_TYPE_IPV6_VXLAN 0x86DD
+#define UDP_GTPU_PORT 2152
+#define UDP_PORT_MPLS 6635
+#define UDP_GENEVE_PORT 6081
+#define UDP_ROCEV2_PORT 4791
+#define HWS_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
+
+#define STE_NO_VLAN 0x0
+#define STE_SVLAN 0x1
+#define STE_CVLAN 0x2
+#define STE_NO_L3 0x0
+#define STE_IPV4 0x1
+#define STE_IPV6 0x2
+#define STE_NO_L4 0x0
+#define STE_TCP 0x1
+#define STE_UDP 0x2
+#define STE_ICMP 0x3
+#define STE_ESP 0x3
+
+#define IPV4 0x4
+#define IPV6 0x6
+
+/* Setter function based on bit offset and mask, for 32bit DW */
+#define _HWS_SET32(p, v, byte_off, bit_off, mask) \
+ do { \
+ u32 _v = v; \
+ *((__be32 *)(p) + ((byte_off) / 4)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + \
+ ((byte_off) / 4))) & \
+ (~((mask) << (bit_off)))) | \
+ (((_v) & (mask)) << \
+ (bit_off))); \
+ } while (0)
+
+/* Setter function based on bit offset and mask, for unaligned 32bit DW */
+#define HWS_SET32(p, v, byte_off, bit_off, mask) \
+ do { \
+ if (unlikely((bit_off) < 0)) { \
+ u32 _bit_off = -1 * (bit_off); \
+ u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
+ _HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
+ _HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
+ (bit_off) % BITS_IN_DW, second_dw_mask); \
+ } else { \
+ _HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
+ } \
+ } while (0)
+
+/* Getter for up to aligned 32bit DW */
+#define HWS_GET32(p, byte_off, bit_off, mask) \
+ ((be32_to_cpu(*((__be32 *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask))
+
+#define HWS_CALC_FNAME(field, inner) \
+ ((inner) ? MLX5HWS_DEFINER_FNAME_##field##_I : \
+ MLX5HWS_DEFINER_FNAME_##field##_O)
+
+#define HWS_GET_MATCH_PARAM(match_param, hdr) \
+ MLX5_GET(fte_match_param, match_param, hdr)
+
+#define HWS_IS_FLD_SET(match_param, hdr) \
+ (!!(HWS_GET_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) ({ \
+ BUILD_BUG_ON((sz_in_bits) % 32); \
+ u32 sz = sz_in_bits; \
+ u32 res = 0; \
+ u32 dw_off = __mlx5_dw_off(fte_match_param, hdr); \
+ while (!res && sz >= 32) { \
+ res = *((match_param) + (dw_off++)); \
+ sz -= 32; \
+ } \
+ res; \
+ })
+
+#define HWS_IS_FLD_SET_SZ(match_param, hdr, sz_in_bits) \
+ (((sz_in_bits) > 32) ? HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) : \
+ !!(HWS_GET_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_GET64_MATCH_PARAM(match_param, hdr) \
+ MLX5_GET64(fte_match_param, match_param, hdr)
+
+#define HWS_IS_FLD64_SET(match_param, hdr) \
+ (!!(HWS_GET64_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_CALC_HDR_SRC(fc, s_hdr) \
+ do { \
+ (fc)->s_bit_mask = __mlx5_mask(fte_match_param, s_hdr); \
+ (fc)->s_bit_off = __mlx5_dw_bit_off(fte_match_param, s_hdr); \
+ (fc)->s_byte_off = MLX5_BYTE_OFF(fte_match_param, s_hdr); \
+ } while (0)
+
+#define HWS_CALC_HDR_DST(fc, d_hdr) \
+ do { \
+ (fc)->bit_mask = __mlx5_mask(definer_hl, d_hdr); \
+ (fc)->bit_off = __mlx5_dw_bit_off(definer_hl, d_hdr); \
+ (fc)->byte_off = MLX5_BYTE_OFF(definer_hl, d_hdr); \
+ } while (0)
+
+#define HWS_CALC_HDR(fc, s_hdr, d_hdr) \
+ do { \
+ HWS_CALC_HDR_SRC(fc, s_hdr); \
+ HWS_CALC_HDR_DST(fc, d_hdr); \
+ (fc)->tag_set = &hws_definer_generic_set; \
+ } while (0)
+
+#define HWS_SET_HDR(fc_arr, match_param, fname, s_hdr, d_hdr) \
+ do { \
+ if (HWS_IS_FLD_SET(match_param, s_hdr)) \
+ HWS_CALC_HDR(&(fc_arr)[MLX5HWS_DEFINER_FNAME_##fname], s_hdr, d_hdr); \
+ } while (0)
+
+struct mlx5hws_definer_sel_ctrl {
+ u8 allowed_full_dw; /* Full DW selectors cover all offsets */
+ u8 allowed_lim_dw; /* Limited DW selectors cover offset < 64 */
+ u8 allowed_bytes; /* Bytes selectors, up to offset 255 */
+ u8 used_full_dw;
+ u8 used_lim_dw;
+ u8 used_bytes;
+ u8 full_dw_selector[DW_SELECTORS];
+ u8 lim_dw_selector[DW_SELECTORS_LIMITED];
+ u8 byte_selector[BYTE_SELECTORS];
+};
+
+struct mlx5hws_definer_conv_data {
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_definer_fc *fc;
+ /* enum mlx5hws_definer_match_flag */
+ u32 match_flags;
+};
+
+static void
+hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ HWS_SET32(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_generic_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ /* Can be optimized */
+ u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
+
+ HWS_SET32(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag))
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag))
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag))
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag))
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag,
+ bool inner)
+{
+ u32 second_cvlan_tag = inner ?
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) :
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag);
+ u32 second_svlan_tag = inner ?
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag) :
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag);
+
+ if (second_cvlan_tag)
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (second_svlan_tag)
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_second_vlan_type_set(fc, match_param, tag, true);
+}
+
+static void
+hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_second_vlan_type_set(fc, match_param, tag, false);
+}
+
+static void hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_code);
+ u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_type);
+ u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
+ (code << __mlx5_dw_bit_off(header_icmp, code));
+
+ HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_code);
+ u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_type);
+ u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
+ (code << __mlx5_dw_bit_off(header_icmp, code));
+
+ HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_l3_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
+
+ if (val == IPV4)
+ HWS_SET32(tag, STE_IPV4, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (val == IPV6)
+ HWS_SET32(tag, STE_IPV6, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_L3, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag,
+ struct mlx5hws_context *peer_ctx)
+{
+ u16 source_port = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port);
+ u16 vport_gvmi = 0;
+ int ret;
+
+ ret = mlx5hws_vport_get_gvmi(peer_ctx, source_port, &vport_gvmi);
+ if (ret) {
+ HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
+ mlx5hws_err(fc->ctx, "Vport 0x%x is disabled or invalid\n", source_port);
+ return;
+ }
+
+ if (vport_gvmi)
+ HWS_SET32(tag, vport_gvmi, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+__must_hold(&fc->ctx->ctrl_lock)
+{
+ int id = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_eswitch_owner_vhca_id);
+ struct mlx5hws_context *peer_ctx;
+
+ if (id == fc->ctx->caps->vhca_id)
+ peer_ctx = fc->ctx;
+ else
+ peer_ctx = xa_load(&fc->ctx->peer_ctx_xa, id);
+
+ if (!peer_ctx) {
+ HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
+ mlx5hws_err(fc->ctx, "Invalid vhca_id provided 0x%x\n", id);
+ return;
+ }
+
+ hws_definer_set_source_port_gvmi(fc, match_param, tag, peer_ctx);
+}
+
+static void
+hws_definer_set_source_gvmi(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_set_source_port_gvmi(fc, match_param, tag, fc->ctx);
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data *cd,
+ u8 parser_id)
+{
+ struct mlx5hws_definer_fc *fc;
+
+ switch (parser_id) {
+ case 0:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser0_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 1:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser1_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 2:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser2_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 3:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser3_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 4:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser4_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 5:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser5_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 6:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser6_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 7:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser7_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ default:
+ mlx5hws_err(cd->ctx, "Unsupported flex parser steering ok index %u\n", parser_id);
+ return NULL;
+ }
+
+ return fc;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data *cd,
+ u8 parser_id)
+{
+ struct mlx5hws_definer_fc *fc;
+
+ switch (parser_id) {
+ case 0:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_0);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 1:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_1);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 2:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_2);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 3:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_3);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 4:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_4);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 5:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_5);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 6:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_6);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 7:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_7);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ default:
+ mlx5hws_err(cd->ctx, "Unsupported flex parser %u\n", parser_id);
+ return NULL;
+ }
+
+ return fc;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data *cd,
+ bool *parser_is_used,
+ u32 id,
+ u32 value)
+{
+ if (id || value) {
+ if (id >= HWS_NUM_OF_FLEX_PARSERS) {
+ mlx5hws_err(cd->ctx, "Unsupported parser id\n");
+ return NULL;
+ }
+
+ if (parser_is_used[id]) {
+ mlx5hws_err(cd->ctx, "Parser id have been used\n");
+ return NULL;
+ }
+ }
+
+ parser_is_used[id] = true;
+
+ return hws_definer_flex_parser_handler(cd, id);
+}
+
+static int
+hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
+{
+ u32 flags;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2);
+
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 |
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 |
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_O |
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_I);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ return 0;
+
+err_conflict:
+ mlx5hws_err(cd->ctx, "Invalid definer fields combination\n");
+ return -EINVAL;
+}
+
+static int
+hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ u32 *s_ipv6, *d_ipv6;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) {
+ mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
+ return -EINVAL;
+ }
+
+ /* L2 Check ethertype */
+ HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
+ outer_headers.ethertype,
+ eth_l2_outer.l3_ethertype);
+ /* L2 Check SMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_O,
+ outer_headers.smac_47_16, eth_l2_src_outer.smac_47_16);
+ /* L2 Check SMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_O,
+ outer_headers.smac_15_0, eth_l2_src_outer.smac_15_0);
+ /* L2 Check DMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_O,
+ outer_headers.dmac_47_16, eth_l2_outer.dmac_47_16);
+ /* L2 Check DMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_O,
+ outer_headers.dmac_15_0, eth_l2_outer.dmac_15_0);
+
+ /* L2 VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_O,
+ outer_headers.first_prio, eth_l2_outer.first_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_CFI_O,
+ outer_headers.first_cfi, eth_l2_outer.first_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_ID_O,
+ outer_headers.first_vid, eth_l2_outer.first_vlan_id);
+
+ /* L2 CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.first_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_outer_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ /* L3 Check IP header */
+ HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
+ outer_headers.ip_protocol,
+ eth_l3_outer.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_TTL_O,
+ outer_headers.ttl_hoplimit,
+ eth_l3_outer.time_to_live_hop_limit);
+
+ /* L3 Check IPv4/IPv6 addresses */
+ s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout);
+ d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+
+ /* Assume IPv6 is used if ipv6 bits are set */
+ is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
+ is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ if (is_s_ipv6) {
+ /* Handle IPv6 source address */
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_src_outer.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_src_outer.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_src_outer.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_src_outer.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.source_address);
+ }
+ if (is_d_ipv6) {
+ /* Handle IPv6 destination address */
+ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_dst_outer.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_dst_outer.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_dst_outer.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_dst_outer.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 destination address */
+ HWS_SET_HDR(fc, match_param, IPV4_DST_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.destination_address);
+ }
+
+ /* L4 Handle TCP/UDP */
+ HWS_SET_HDR(fc, match_param, L4_SPORT_O,
+ outer_headers.tcp_sport, eth_l4_outer.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_O,
+ outer_headers.tcp_dport, eth_l4_outer.destination_port);
+ HWS_SET_HDR(fc, match_param, L4_SPORT_O,
+ outer_headers.udp_sport, eth_l4_outer.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_O,
+ outer_headers.udp_dport, eth_l4_outer.destination_port);
+ HWS_SET_HDR(fc, match_param, TCP_FLAGS_O,
+ outer_headers.tcp_flags, eth_l4_outer.tcp_flags);
+
+ /* L3 Handle DSCP, ECN and IHL */
+ HWS_SET_HDR(fc, match_param, IP_DSCP_O,
+ outer_headers.ip_dscp, eth_l3_outer.dscp);
+ HWS_SET_HDR(fc, match_param, IP_ECN_O,
+ outer_headers.ip_ecn, eth_l3_outer.ecn);
+ HWS_SET_HDR(fc, match_param, IPV4_IHL_O,
+ outer_headers.ipv4_ihl, eth_l3_outer.ihl);
+
+ /* Set IP fragmented bit */
+ if (HWS_IS_FLD_SET(match_param, outer_headers.frag)) {
+ smac_set = HWS_IS_FLD_SET(match_param, outer_headers.smac_15_0) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.smac_47_16);
+ dmac_set = HWS_IS_FLD_SET(match_param, outer_headers.dmac_15_0) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.dmac_47_16);
+ if (smac_set == dmac_set) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_O,
+ outer_headers.frag, eth_l4_outer.ip_fragmented);
+ } else {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_O,
+ outer_headers.frag, eth_l2_src_outer.ip_fragmented);
+ }
+ }
+
+ /* L3_type set */
+ if (HWS_IS_FLD_SET(match_param, outer_headers.ip_version)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.l3_type);
+ curr_fc->tag_set = &hws_definer_l3_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ HWS_CALC_HDR_SRC(curr_fc, outer_headers.ip_version);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ u32 *s_ipv6, *d_ipv6;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) {
+ mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
+ return -EINVAL;
+ }
+
+ /* L2 Check ethertype */
+ HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
+ inner_headers.ethertype,
+ eth_l2_inner.l3_ethertype);
+ /* L2 Check SMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_I,
+ inner_headers.smac_47_16, eth_l2_src_inner.smac_47_16);
+ /* L2 Check SMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_I,
+ inner_headers.smac_15_0, eth_l2_src_inner.smac_15_0);
+ /* L2 Check DMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_I,
+ inner_headers.dmac_47_16, eth_l2_inner.dmac_47_16);
+ /* L2 Check DMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_I,
+ inner_headers.dmac_15_0, eth_l2_inner.dmac_15_0);
+
+ /* L2 VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_I,
+ inner_headers.first_prio, eth_l2_inner.first_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_CFI_I,
+ inner_headers.first_cfi, eth_l2_inner.first_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_ID_I,
+ inner_headers.first_vid, eth_l2_inner.first_vlan_id);
+
+ /* L2 CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.first_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_inner_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+ /* L3 Check IP header */
+ HWS_SET_HDR(fc, match_param, IP_PROTOCOL_I,
+ inner_headers.ip_protocol,
+ eth_l3_inner.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_VERSION_I,
+ inner_headers.ip_version,
+ eth_l3_inner.ip_version);
+ HWS_SET_HDR(fc, match_param, IP_TTL_I,
+ inner_headers.ttl_hoplimit,
+ eth_l3_inner.time_to_live_hop_limit);
+
+ /* L3 Check IPv4/IPv6 addresses */
+ s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ inner_headers.src_ipv4_src_ipv6.ipv6_layout);
+ d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+
+ /* Assume IPv6 is used if ipv6 bits are set */
+ is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
+ is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ if (is_s_ipv6) {
+ /* Handle IPv6 source address */
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_src_inner.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_src_inner.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_src_inner.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_src_inner.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.source_address);
+ }
+ if (is_d_ipv6) {
+ /* Handle IPv6 destination address */
+ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_dst_inner.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_dst_inner.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_dst_inner.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_dst_inner.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 destination address */
+ HWS_SET_HDR(fc, match_param, IPV4_DST_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.destination_address);
+ }
+
+ /* L4 Handle TCP/UDP */
+ HWS_SET_HDR(fc, match_param, L4_SPORT_I,
+ inner_headers.tcp_sport, eth_l4_inner.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_I,
+ inner_headers.tcp_dport, eth_l4_inner.destination_port);
+ HWS_SET_HDR(fc, match_param, L4_SPORT_I,
+ inner_headers.udp_sport, eth_l4_inner.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_I,
+ inner_headers.udp_dport, eth_l4_inner.destination_port);
+ HWS_SET_HDR(fc, match_param, TCP_FLAGS_I,
+ inner_headers.tcp_flags, eth_l4_inner.tcp_flags);
+
+ /* L3 Handle DSCP, ECN and IHL */
+ HWS_SET_HDR(fc, match_param, IP_DSCP_I,
+ inner_headers.ip_dscp, eth_l3_inner.dscp);
+ HWS_SET_HDR(fc, match_param, IP_ECN_I,
+ inner_headers.ip_ecn, eth_l3_inner.ecn);
+ HWS_SET_HDR(fc, match_param, IPV4_IHL_I,
+ inner_headers.ipv4_ihl, eth_l3_inner.ihl);
+
+ /* Set IP fragmented bit */
+ if (HWS_IS_FLD_SET(match_param, inner_headers.frag)) {
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.vxlan_vni)) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l2_inner.ip_fragmented);
+ } else {
+ smac_set = HWS_IS_FLD_SET(match_param, inner_headers.smac_15_0) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.smac_47_16);
+ dmac_set = HWS_IS_FLD_SET(match_param, inner_headers.dmac_15_0) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.dmac_47_16);
+ if (smac_set == dmac_set) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l4_inner.ip_fragmented);
+ } else {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l2_src_inner.ip_fragmented);
+ }
+ }
+ }
+
+ /* L3_type set */
+ if (HWS_IS_FLD_SET(match_param, inner_headers.ip_version)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.l3_type);
+ curr_fc->tag_set = &hws_definer_l3_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ HWS_CALC_HDR_SRC(curr_fc, inner_headers.ip_version);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1, 0x1) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_64, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_d8, 0x6) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_e0, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_100, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_120, 0xa) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_140, 0x8) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.bth_dst_qp) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.bth_opcode) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.inner_esp_spi) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.outer_esp_spi) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.source_vhca_port) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1a0, 0x60)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc parameters set\n");
+ return -EINVAL;
+ }
+
+ /* Check GRE related fields */
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_c_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_C];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_c_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_c_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_c_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_k_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_K];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_k_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_k_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_s_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_S];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_s_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_s_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_s_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_protocol)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_protocol,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_key.key)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY;
+ HWS_SET_HDR(fc, match_param, GRE_OPT_KEY,
+ misc_parameters.gre_key.key, tunnel_header.tunnel_header_2);
+ }
+
+ /* Check GENEVE related fields */
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_VNI];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_vni,
+ tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_opt_len)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_opt_len,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, opt_len);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, opt_len);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_protocol_type)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_PROTO];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_protocol_type,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, protocol_type);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_oam)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OAM];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_oam,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, o_flag);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, o_flag);
+ }
+
+ HWS_SET_HDR(fc, match_param, SOURCE_QP,
+ misc_parameters.source_sqn, source_qp_gvmi.source_qp);
+ HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_O,
+ misc_parameters.outer_ipv6_flow_label, eth_l3_outer.flow_label);
+ HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_I,
+ misc_parameters.inner_ipv6_flow_label, eth_l3_inner.flow_label);
+
+ /* L2 Second VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_O,
+ misc_parameters.outer_second_prio, eth_l2_outer.second_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_I,
+ misc_parameters.inner_second_prio, eth_l2_inner.second_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_O,
+ misc_parameters.outer_second_cfi, eth_l2_outer.second_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_I,
+ misc_parameters.inner_second_cfi, eth_l2_inner.second_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_O,
+ misc_parameters.outer_second_vid, eth_l2_outer.second_vlan_id);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_I,
+ misc_parameters.inner_second_vid, eth_l2_inner.second_vlan_id);
+
+ /* L2 Second CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.second_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_outer_second_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.second_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_inner_second_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ /* VXLAN VNI */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.vxlan_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_VNI];
+ HWS_CALC_HDR(curr_fc, misc_parameters.vxlan_vni, tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni);
+ }
+
+ /* Flex protocol steering ok bits */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.geneve_tlv_option_0_exist)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+
+ if (!caps->flex_parser_ok_bits_supp) {
+ mlx5hws_err(cd->ctx, "Unsupported flex_parser_ok_bits_supp capability\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_steering_ok_bits_handler(
+ cd, caps->flex_parser_id_geneve_tlv_option_0);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters.geneve_tlv_option_0_exist);
+ }
+
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_SOURCE_GVMI];
+ HWS_CALC_HDR_DST(curr_fc, source_qp_gvmi.source_gvmi);
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ curr_fc->tag_set = HWS_IS_FLD_SET(match_param,
+ misc_parameters.source_eswitch_owner_vhca_id) ?
+ &hws_definer_set_source_gvmi_vhca_id :
+ &hws_definer_set_source_gvmi;
+ } else {
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.source_eswitch_owner_vhca_id)) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported source_eswitch_owner_vhca_id field usage\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 parameters set\n");
+ return -EINVAL;
+ }
+
+ HWS_SET_HDR(fc, match_param, MPLS0_O,
+ misc_parameters_2.outer_first_mpls, mpls_outer.mpls0_label);
+ HWS_SET_HDR(fc, match_param, MPLS0_I,
+ misc_parameters_2.inner_first_mpls, mpls_inner.mpls0_label);
+ HWS_SET_HDR(fc, match_param, REG_0,
+ misc_parameters_2.metadata_reg_c_0, registers.register_c_0);
+ HWS_SET_HDR(fc, match_param, REG_1,
+ misc_parameters_2.metadata_reg_c_1, registers.register_c_1);
+ HWS_SET_HDR(fc, match_param, REG_2,
+ misc_parameters_2.metadata_reg_c_2, registers.register_c_2);
+ HWS_SET_HDR(fc, match_param, REG_3,
+ misc_parameters_2.metadata_reg_c_3, registers.register_c_3);
+ HWS_SET_HDR(fc, match_param, REG_4,
+ misc_parameters_2.metadata_reg_c_4, registers.register_c_4);
+ HWS_SET_HDR(fc, match_param, REG_5,
+ misc_parameters_2.metadata_reg_c_5, registers.register_c_5);
+ HWS_SET_HDR(fc, match_param, REG_6,
+ misc_parameters_2.metadata_reg_c_6, registers.register_c_6);
+ HWS_SET_HDR(fc, match_param, REG_7,
+ misc_parameters_2.metadata_reg_c_7, registers.register_c_7);
+ HWS_SET_HDR(fc, match_param, REG_A,
+ misc_parameters_2.metadata_reg_a, metadata.general_purpose);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_gre)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over gre parameters set\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_gre);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_gre);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_udp)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over udp parameters set\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_udp);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_udp);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc3(struct mlx5hws_definer_conv_data *cd, u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ bool vxlan_gpe_flex_parser_enabled;
+
+ /* Check reserved and unsupported fields */
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_80, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_b0, 0x10) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_170, 0x10) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_1e0, 0x20)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc3 parameters set\n");
+ return -EINVAL;
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_seq_num) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_ack_num)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_I;
+ HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
+ misc_parameters_3.inner_tcp_seq_num, tcp_icmp.tcp_seq);
+ HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
+ misc_parameters_3.inner_tcp_ack_num, tcp_icmp.tcp_ack);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_seq_num) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_ack_num)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_O;
+ HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
+ misc_parameters_3.outer_tcp_seq_num, tcp_icmp.tcp_seq);
+ HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
+ misc_parameters_3.outer_tcp_ack_num, tcp_icmp.tcp_ack);
+ }
+
+ vxlan_gpe_flex_parser_enabled = caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_vni,
+ tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_next_protocol)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_next_protocol,
+ tunnel_header.tunnel_header_0);
+ curr_fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_flags)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_flags,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_header_data) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported ICMPv4 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ HWS_SET_HDR(fc, match_param, ICMP_DW3,
+ misc_parameters_3.icmp_header_data, tcp_icmp.icmp_dw3);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
+ HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
+ curr_fc->tag_set = &hws_definer_icmp_dw1_set;
+ }
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_header_data) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported ICMPv6 parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ HWS_SET_HDR(fc, match_param, ICMP_DW3,
+ misc_parameters_3.icmpv6_header_data, tcp_icmp.icmp_dw3);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
+ HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
+ curr_fc->tag_set = &hws_definer_icmpv6_dw1_set;
+ }
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.geneve_tlv_option_0_data)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+
+ curr_fc =
+ hws_definer_flex_parser_handler(cd,
+ caps->flex_parser_id_geneve_tlv_option_0);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.geneve_tlv_option_0_data);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_teid)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU TEID flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_TEID];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, teid);
+ fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_teid);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_type)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, msg_type);
+ fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);
+ fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_type);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_flags)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, msg_flags);
+ fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_flags);
+ fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_flags);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_2)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU DW2 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW2];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_2);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_first_ext_dw_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU first EXT DW0 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_first_ext_dw_0);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU DW0 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW0];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_0);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc4(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool parser_is_used[HWS_NUM_OF_FLEX_PARSERS] = {};
+ struct mlx5hws_definer_fc *fc;
+ u32 id, value;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_4.reserved_at_100, 0x100)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc4 parameters set\n");
+ return -EINVAL;
+ }
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_0);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_0);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_0);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_1);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_1);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_1);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_2);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_2);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_2);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_3);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_3);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_3);
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc5(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_definer_fc *fc = cd->fc;
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_0) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_1) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_2) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_3) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_5.reserved_at_100, 0x100)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc5 parameters set\n");
+ return -EINVAL;
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_0,
+ misc_parameters_5.tunnel_header_0, tunnel_header.tunnel_header_0);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_1)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_1,
+ misc_parameters_5.tunnel_header_1, tunnel_header.tunnel_header_1);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_2)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_2,
+ misc_parameters_5.tunnel_header_2, tunnel_header.tunnel_header_2);
+ }
+
+ HWS_SET_HDR(fc, match_param, TNL_HDR_3,
+ misc_parameters_5.tunnel_header_3, tunnel_header.tunnel_header_3);
+
+ return 0;
+}
+
+static int hws_definer_get_fc_size(struct mlx5hws_definer_fc *fc)
+{
+ u32 fc_sz = 0;
+ int i;
+
+ /* For empty matcher, ZERO_SIZE_PTR is returned */
+ if (fc == ZERO_SIZE_PTR)
+ return 0;
+
+ for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++)
+ if (fc[i].tag_set)
+ fc_sz++;
+ return fc_sz;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc *fc)
+{
+ struct mlx5hws_definer_fc *compressed_fc = NULL;
+ u32 definer_size = hws_definer_get_fc_size(fc);
+ u32 fc_sz = 0;
+ int i;
+
+ compressed_fc = kcalloc(definer_size, sizeof(*compressed_fc), GFP_KERNEL);
+ if (!compressed_fc)
+ return NULL;
+
+ /* For empty matcher, ZERO_SIZE_PTR is returned */
+ if (!definer_size)
+ return compressed_fc;
+
+ for (i = 0, fc_sz = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
+ if (!fc[i].tag_set)
+ continue;
+
+ fc[i].fname = i;
+ memcpy(&compressed_fc[fc_sz++], &fc[i], sizeof(*compressed_fc));
+ }
+
+ return compressed_fc;
+}
+
+static void
+hws_definer_set_hl(u8 *hl, struct mlx5hws_definer_fc *fc)
+{
+ int i;
+
+ /* nothing to do for empty matcher */
+ if (fc == ZERO_SIZE_PTR)
+ return;
+
+ for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
+ if (!fc[i].tag_set)
+ continue;
+
+ HWS_SET32(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
+ }
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_alloc_fc(struct mlx5hws_context *ctx,
+ size_t len)
+{
+ struct mlx5hws_definer_fc *fc;
+ int i;
+
+ fc = kcalloc(len, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return NULL;
+
+ for (i = 0; i < len; i++)
+ fc[i].ctx = ctx;
+
+ return fc;
+}
+
+static int
+hws_definer_conv_match_params_to_hl(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ u8 *hl)
+{
+ struct mlx5hws_definer_conv_data cd = {0};
+ struct mlx5hws_definer_fc *fc;
+ int ret;
+
+ fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
+ if (!fc)
+ return -ENOMEM;
+
+ cd.fc = fc;
+ cd.ctx = ctx;
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6) {
+ mlx5hws_err(ctx, "Unsupported match_criteria_enable provided\n");
+ ret = -EOPNOTSUPP;
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
+ ret = hws_definer_conv_outer(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
+ ret = hws_definer_conv_inner(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
+ ret = hws_definer_conv_misc(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
+ ret = hws_definer_conv_misc2(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
+ ret = hws_definer_conv_misc3(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
+ ret = hws_definer_conv_misc4(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
+ ret = hws_definer_conv_misc5(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ /* Check there is no conflicted fields set together */
+ ret = hws_definer_check_match_flags(&cd);
+ if (ret)
+ goto err_free_fc;
+
+ /* Allocate fc array on mt */
+ mt->fc = hws_definer_alloc_compressed_fc(fc);
+ if (!mt->fc) {
+ mlx5hws_err(ctx,
+ "Convert match params: failed to set field copy to match template\n");
+ ret = -ENOMEM;
+ goto err_free_fc;
+ }
+ mt->fc_sz = hws_definer_get_fc_size(fc);
+
+ /* Fill in headers layout */
+ hws_definer_set_hl(hl, fc);
+
+ kfree(fc);
+ return 0;
+
+err_free_fc:
+ kfree(fc);
+ return ret;
+}
+
+struct mlx5hws_definer_fc *
+mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ u32 *match_param,
+ int *fc_sz)
+{
+ struct mlx5hws_definer_fc *compressed_fc = NULL;
+ struct mlx5hws_definer_conv_data cd = {0};
+ struct mlx5hws_definer_fc *fc;
+ int ret;
+
+ fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
+ if (!fc)
+ return NULL;
+
+ cd.fc = fc;
+ cd.ctx = ctx;
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
+ ret = hws_definer_conv_outer(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
+ ret = hws_definer_conv_inner(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
+ ret = hws_definer_conv_misc(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
+ ret = hws_definer_conv_misc2(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
+ ret = hws_definer_conv_misc3(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
+ ret = hws_definer_conv_misc4(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
+ ret = hws_definer_conv_misc5(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ /* Allocate fc array on mt */
+ compressed_fc = hws_definer_alloc_compressed_fc(fc);
+ if (!compressed_fc) {
+ mlx5hws_err(ctx,
+ "Convert to compressed fc: failed to set field copy to match template\n");
+ goto err_free_fc;
+ }
+ *fc_sz = hws_definer_get_fc_size(fc);
+
+err_free_fc:
+ kfree(fc);
+ return compressed_fc;
+}
+
+static int
+hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
+ u32 hl_byte_off,
+ u32 *tag_byte_off)
+{
+ int i, dw_to_scan;
+ u8 byte_offset;
+
+ /* Avoid accessing unused DW selectors */
+ dw_to_scan = mlx5hws_definer_is_jumbo(definer) ?
+ DW_SELECTORS : DW_SELECTORS_MATCH;
+
+ /* Add offset since each DW covers multiple BYTEs */
+ byte_offset = hl_byte_off % DW_SIZE;
+ for (i = 0; i < dw_to_scan; i++) {
+ if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {
+ *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);
+ return 0;
+ }
+ }
+
+ /* Add offset to skip DWs in definer */
+ byte_offset = DW_SIZE * DW_SELECTORS;
+ /* Iterate in reverse since the code uses bytes from 7 -> 0 */
+ for (i = BYTE_SELECTORS; i-- > 0 ;) {
+ if (definer->byte_selector[i] == hl_byte_off) {
+ *tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int
+hws_definer_fc_bind(struct mlx5hws_definer *definer,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz)
+{
+ u32 tag_offset = 0;
+ int ret, byte_diff;
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ /* Map header layout byte offset to byte offset in tag */
+ ret = hws_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);
+ if (ret)
+ return ret;
+
+ /* Move setter based on the location in the definer */
+ byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;
+ fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;
+
+ /* Update offset in headers layout to offset in tag */
+ fc->byte_off = tag_offset;
+ fc++;
+ }
+
+ return 0;
+}
+
+static bool
+hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl *ctrl,
+ u32 cur_dw,
+ u32 *data)
+{
+ u8 bytes_set;
+ int byte_idx;
+ bool ret;
+ int i;
+
+ /* Reached end, nothing left to do */
+ if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
+ return true;
+
+ /* No data set, can skip to next DW */
+ while (!*data) {
+ cur_dw++;
+ data++;
+
+ /* Reached end, nothing left to do */
+ if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
+ return true;
+ }
+
+ /* Used all DW selectors and Byte selectors, no possible solution */
+ if (ctrl->allowed_full_dw == ctrl->used_full_dw &&
+ ctrl->allowed_lim_dw == ctrl->used_lim_dw &&
+ ctrl->allowed_bytes == ctrl->used_bytes)
+ return false;
+
+ /* Try to use limited DW selectors */
+ if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {
+ ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;
+ }
+
+ /* Try to use DW selectors */
+ if (ctrl->allowed_full_dw > ctrl->used_full_dw) {
+ ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ ctrl->full_dw_selector[--ctrl->used_full_dw] = 0;
+ }
+
+ /* No byte selector for offset bigger than 255 */
+ if (cur_dw * DW_SIZE > 255)
+ return false;
+
+ bytes_set = !!(0x000000ff & *data) +
+ !!(0x0000ff00 & *data) +
+ !!(0x00ff0000 & *data) +
+ !!(0xff000000 & *data);
+
+ /* Check if there are enough byte selectors left */
+ if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)
+ return false;
+
+ /* Try to use Byte selectors */
+ for (i = 0; i < DW_SIZE; i++)
+ if ((0xff000000 >> (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
+ /* Use byte selectors high to low */
+ byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
+ ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;
+ ctrl->used_bytes++;
+ }
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DW_SIZE; i++)
+ if ((0xff << (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
+ ctrl->used_bytes--;
+ byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
+ ctrl->byte_selector[byte_idx] = 0;
+ }
+
+ return false;
+}
+
+static void
+hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
+ struct mlx5hws_definer *definer)
+{
+ memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
+ memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
+ memcpy(definer->dw_selector + ctrl->allowed_full_dw,
+ ctrl->lim_dw_selector, ctrl->allowed_lim_dw);
+}
+
+static int
+hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer,
+ u8 *hl)
+{
+ struct mlx5hws_definer_sel_ctrl ctrl = {0};
+ bool found;
+
+ /* Try to create a match definer */
+ ctrl.allowed_full_dw = DW_SELECTORS_MATCH;
+ ctrl.allowed_lim_dw = 0;
+ ctrl.allowed_bytes = BYTE_SELECTORS;
+
+ found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
+ if (found) {
+ hws_definer_copy_sel_ctrl(&ctrl, definer);
+ definer->type = MLX5HWS_DEFINER_TYPE_MATCH;
+ return 0;
+ }
+
+ /* Try to create a full/limited jumbo definer */
+ ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
+ DW_SELECTORS_MATCH;
+ ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :
+ DW_SELECTORS_LIMITED;
+ ctrl.allowed_bytes = BYTE_SELECTORS;
+
+ found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
+ if (found) {
+ hws_definer_copy_sel_ctrl(&ctrl, definer);
+ definer->type = MLX5HWS_DEFINER_TYPE_JUMBO;
+ return 0;
+ }
+
+ return E2BIG;
+}
+
+static void
+hws_definer_create_tag_mask(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag)
+{
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ if (fc->tag_mask_set)
+ fc->tag_mask_set(fc, match_param, tag);
+ else
+ fc->tag_set(fc, match_param, tag);
+ fc++;
+ }
+}
+
+void mlx5hws_definer_create_tag(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag)
+{
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ fc->tag_set(fc, match_param, tag);
+ fc++;
+ }
+}
+
+int mlx5hws_definer_get_id(struct mlx5hws_definer *definer)
+{
+ return definer->obj_id;
+}
+
+int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
+ struct mlx5hws_definer *definer_b)
+{
+ int i;
+
+ /* Future: Optimize by comparing selectors with valid mask only */
+ for (i = 0; i < BYTE_SELECTORS; i++)
+ if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
+ return 1;
+
+ for (i = 0; i < DW_SELECTORS; i++)
+ if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
+ return 1;
+
+ for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
+ if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
+ return 1;
+
+ return 0;
+}
+
+int
+mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_definer)
+{
+ u8 *match_hl;
+ int ret;
+
+ /* Union header-layout (hl) is used for creating a single definer
+ * field layout used with different bitmasks for hash and match.
+ */
+ match_hl = kzalloc(MLX5_ST_SZ_BYTES(definer_hl), GFP_KERNEL);
+ if (!match_hl)
+ return -ENOMEM;
+
+ /* Convert all mt items to header layout (hl)
+ * and allocate the match and range field copy array (fc & fcr).
+ */
+ ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to convert items to header layout\n");
+ goto free_fc;
+ }
+
+ /* Find the match definer layout for header layout match union */
+ ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
+ if (ret) {
+ if (ret == E2BIG)
+ mlx5hws_dbg(ctx,
+ "Failed to create match definer from header layout - E2BIG\n");
+ else
+ mlx5hws_err(ctx,
+ "Failed to create match definer from header layout (%d)\n",
+ ret);
+ goto free_fc;
+ }
+
+ kfree(match_hl);
+ return 0;
+
+free_fc:
+ kfree(mt->fc);
+
+ kfree(match_hl);
+ return ret;
+}
+
+int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache)
+{
+ struct mlx5hws_definer_cache *new_cache;
+
+ new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
+ if (!new_cache)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&new_cache->list_head);
+ *cache = new_cache;
+
+ return 0;
+}
+
+void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache)
+{
+ kfree(cache);
+}
+
+int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer)
+{
+ struct mlx5hws_definer_cache *cache = ctx->definer_cache;
+ struct mlx5hws_cmd_definer_create_attr def_attr = {0};
+ struct mlx5hws_definer_cache_item *cached_definer;
+ u32 obj_id;
+ int ret;
+
+ /* Search definer cache for requested definer */
+ list_for_each_entry(cached_definer, &cache->list_head, list_node) {
+ if (mlx5hws_definer_compare(&cached_definer->definer, definer))
+ continue;
+
+ /* Reuse definer and set LRU (move to be first in the list) */
+ list_del_init(&cached_definer->list_node);
+ list_add(&cached_definer->list_node, &cache->list_head);
+ cached_definer->refcount++;
+ return cached_definer->definer.obj_id;
+ }
+
+ /* Allocate and create definer based on the bitmask tag */
+ def_attr.match_mask = definer->mask.jumbo;
+ def_attr.dw_selector = definer->dw_selector;
+ def_attr.byte_selector = definer->byte_selector;
+
+ ret = mlx5hws_cmd_definer_create(ctx->mdev, &def_attr, &obj_id);
+ if (ret)
+ return -1;
+
+ cached_definer = kzalloc(sizeof(*cached_definer), GFP_KERNEL);
+ if (!cached_definer)
+ goto free_definer_obj;
+
+ memcpy(&cached_definer->definer, definer, sizeof(*definer));
+ cached_definer->definer.obj_id = obj_id;
+ cached_definer->refcount = 1;
+ list_add(&cached_definer->list_node, &cache->list_head);
+
+ return obj_id;
+
+free_definer_obj:
+ mlx5hws_cmd_definer_destroy(ctx->mdev, obj_id);
+ return -1;
+}
+
+static void
+hws_definer_put_obj(struct mlx5hws_context *ctx, u32 obj_id)
+{
+ struct mlx5hws_definer_cache_item *cached_definer;
+
+ list_for_each_entry(cached_definer, &ctx->definer_cache->list_head, list_node) {
+ if (cached_definer->definer.obj_id != obj_id)
+ continue;
+
+ /* Object found */
+ if (--cached_definer->refcount)
+ return;
+
+ list_del_init(&cached_definer->list_node);
+ mlx5hws_cmd_definer_destroy(ctx->mdev, cached_definer->definer.obj_id);
+ kfree(cached_definer);
+ return;
+ }
+
+ /* Programming error, object must be part of cache */
+ pr_warn("HWS: failed putting definer object\n");
+}
+
+static struct mlx5hws_definer *
+hws_definer_alloc(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer_fc *fc,
+ int fc_sz,
+ u32 *match_param,
+ struct mlx5hws_definer *layout,
+ bool bind_fc)
+{
+ struct mlx5hws_definer *definer;
+ int ret;
+
+ definer = kmemdup(layout, sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return NULL;
+
+ /* Align field copy array based on given layout */
+ if (bind_fc) {
+ ret = hws_definer_fc_bind(definer, fc, fc_sz);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to bind field copy to definer\n");
+ goto free_definer;
+ }
+ }
+
+ /* Create the tag mask used for definer creation */
+ hws_definer_create_tag_mask(match_param, fc, fc_sz, definer->mask.jumbo);
+
+ ret = mlx5hws_definer_get_obj(ctx, definer);
+ if (ret < 0)
+ goto free_definer;
+
+ definer->obj_id = ret;
+ return definer;
+
+free_definer:
+ kfree(definer);
+ return NULL;
+}
+
+void mlx5hws_definer_free(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer)
+{
+ hws_definer_put_obj(ctx, definer->obj_id);
+ kfree(definer);
+}
+
+static int
+hws_definer_mt_match_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_layout)
+{
+ /* Create mandatory match definer */
+ mt->definer = hws_definer_alloc(ctx,
+ mt->fc,
+ mt->fc_sz,
+ mt->match_param,
+ match_layout,
+ true);
+ if (!mt->definer) {
+ mlx5hws_err(ctx, "Failed to create match definer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+hws_definer_mt_match_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ mlx5hws_definer_free(ctx, mt->definer);
+}
+
+int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ struct mlx5hws_definer match_layout = {0};
+ int ret;
+
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+ return ret;
+ }
+
+ /* Calculate definers needed for exact match */
+ ret = hws_definer_mt_match_init(ctx, mt, &match_layout);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to init match definers\n");
+ goto free_fc;
+ }
+
+ return 0;
+
+free_fc:
+ kfree(mt->fc);
+ return ret;
+}
+
+void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ hws_definer_mt_match_uninit(ctx, mt);
+ kfree(mt->fc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h
new file mode 100644
index 000000000000..2f6a7df4021c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_DEFINER_H_
+#define MLX5HWS_DEFINER_H_
+
+/* Max available selecotrs */
+#define DW_SELECTORS 9
+#define BYTE_SELECTORS 8
+
+/* Selectors based on match TAG */
+#define DW_SELECTORS_MATCH 6
+#define DW_SELECTORS_LIMITED 3
+
+/* Selectors based on range TAG */
+#define DW_SELECTORS_RANGE 2
+#define BYTE_SELECTORS_RANGE 8
+
+#define HWS_NUM_OF_FLEX_PARSERS 8
+
+enum mlx5hws_definer_fname {
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I,
+ MLX5HWS_DEFINER_FNAME_ETH_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_ETH_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_CFI_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_CFI_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_ID_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_ID_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_IHL_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_IHL_I,
+ MLX5HWS_DEFINER_FNAME_IP_DSCP_O,
+ MLX5HWS_DEFINER_FNAME_IP_DSCP_I,
+ MLX5HWS_DEFINER_FNAME_IP_ECN_O,
+ MLX5HWS_DEFINER_FNAME_IP_ECN_I,
+ MLX5HWS_DEFINER_FNAME_IP_TTL_O,
+ MLX5HWS_DEFINER_FNAME_IP_TTL_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_DST_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_DST_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_SRC_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_SRC_I,
+ MLX5HWS_DEFINER_FNAME_IP_VERSION_O,
+ MLX5HWS_DEFINER_FNAME_IP_VERSION_I,
+ MLX5HWS_DEFINER_FNAME_IP_FRAG_O,
+ MLX5HWS_DEFINER_FNAME_IP_FRAG_I,
+ MLX5HWS_DEFINER_FNAME_IP_LEN_O,
+ MLX5HWS_DEFINER_FNAME_IP_LEN_I,
+ MLX5HWS_DEFINER_FNAME_IP_TOS_O,
+ MLX5HWS_DEFINER_FNAME_IP_TOS_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I,
+ MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O,
+ MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I,
+ MLX5HWS_DEFINER_FNAME_L4_SPORT_O,
+ MLX5HWS_DEFINER_FNAME_L4_SPORT_I,
+ MLX5HWS_DEFINER_FNAME_L4_DPORT_O,
+ MLX5HWS_DEFINER_FNAME_L4_DPORT_I,
+ MLX5HWS_DEFINER_FNAME_TCP_FLAGS_I,
+ MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O,
+ MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM,
+ MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM,
+ MLX5HWS_DEFINER_FNAME_GTP_TEID,
+ MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_FLAG,
+ MLX5HWS_DEFINER_FNAME_GTP_NEXT_EXT_HDR,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_PDU,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_QFI,
+ MLX5HWS_DEFINER_FNAME_GTPU_DW0,
+ MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0,
+ MLX5HWS_DEFINER_FNAME_GTPU_DW2,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7,
+ MLX5HWS_DEFINER_FNAME_VPORT_REG_C_0,
+ MLX5HWS_DEFINER_FNAME_VXLAN_FLAGS,
+ MLX5HWS_DEFINER_FNAME_VXLAN_VNI,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OAM,
+ MLX5HWS_DEFINER_FNAME_GENEVE_PROTO,
+ MLX5HWS_DEFINER_FNAME_GENEVE_VNI,
+ MLX5HWS_DEFINER_FNAME_SOURCE_QP,
+ MLX5HWS_DEFINER_FNAME_SOURCE_GVMI,
+ MLX5HWS_DEFINER_FNAME_REG_0,
+ MLX5HWS_DEFINER_FNAME_REG_1,
+ MLX5HWS_DEFINER_FNAME_REG_2,
+ MLX5HWS_DEFINER_FNAME_REG_3,
+ MLX5HWS_DEFINER_FNAME_REG_4,
+ MLX5HWS_DEFINER_FNAME_REG_5,
+ MLX5HWS_DEFINER_FNAME_REG_6,
+ MLX5HWS_DEFINER_FNAME_REG_7,
+ MLX5HWS_DEFINER_FNAME_REG_8,
+ MLX5HWS_DEFINER_FNAME_REG_9,
+ MLX5HWS_DEFINER_FNAME_REG_10,
+ MLX5HWS_DEFINER_FNAME_REG_11,
+ MLX5HWS_DEFINER_FNAME_REG_A,
+ MLX5HWS_DEFINER_FNAME_REG_B,
+ MLX5HWS_DEFINER_FNAME_GRE_KEY_PRESENT,
+ MLX5HWS_DEFINER_FNAME_GRE_C,
+ MLX5HWS_DEFINER_FNAME_GRE_K,
+ MLX5HWS_DEFINER_FNAME_GRE_S,
+ MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_SEQ,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_CHECKSUM,
+ MLX5HWS_DEFINER_FNAME_INTEGRITY_O,
+ MLX5HWS_DEFINER_FNAME_INTEGRITY_I,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW1,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW2,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW3,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SPI,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SEQUENCE_NUMBER,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SYNDROME,
+ MLX5HWS_DEFINER_FNAME_MPLS0_O,
+ MLX5HWS_DEFINER_FNAME_MPLS1_O,
+ MLX5HWS_DEFINER_FNAME_MPLS2_O,
+ MLX5HWS_DEFINER_FNAME_MPLS3_O,
+ MLX5HWS_DEFINER_FNAME_MPLS4_O,
+ MLX5HWS_DEFINER_FNAME_MPLS0_I,
+ MLX5HWS_DEFINER_FNAME_MPLS1_I,
+ MLX5HWS_DEFINER_FNAME_MPLS2_I,
+ MLX5HWS_DEFINER_FNAME_MPLS3_I,
+ MLX5HWS_DEFINER_FNAME_MPLS4_I,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_I,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_0,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_2,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_3,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_4,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_5,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_6,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_7,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_0,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_2,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_3,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_4,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_5,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_6,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_7,
+ MLX5HWS_DEFINER_FNAME_IB_L4_OPCODE,
+ MLX5HWS_DEFINER_FNAME_IB_L4_QPN,
+ MLX5HWS_DEFINER_FNAME_IB_L4_A,
+ MLX5HWS_DEFINER_FNAME_RANDOM_NUM,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L2_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L2_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L3_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L3_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_I,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_0,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_1,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_2,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_3,
+ MLX5HWS_DEFINER_FNAME_MAX,
+};
+
+enum mlx5hws_definer_match_criteria {
+ MLX5HWS_DEFINER_MATCH_CRITERIA_EMPTY = 0,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER = 1 << 0,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC = 1 << 1,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_INNER = 1 << 2,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2 = 1 << 3,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3 = 1 << 4,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4 = 1 << 5,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5 = 1 << 6,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6 = 1 << 7,
+};
+
+enum mlx5hws_definer_type {
+ MLX5HWS_DEFINER_TYPE_MATCH,
+ MLX5HWS_DEFINER_TYPE_JUMBO,
+};
+
+enum mlx5hws_definer_match_flag {
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE = 1 << 0,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE = 1 << 1,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU = 1 << 2,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE = 1 << 3,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN = 1 << 4,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1 = 1 << 5,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY = 1 << 6,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2 = 1 << 7,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE = 1 << 8,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP = 1 << 9,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 = 1 << 10,
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 = 1 << 11,
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_O = 1 << 12,
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_I = 1 << 13,
+};
+
+struct mlx5hws_definer_fc {
+ struct mlx5hws_context *ctx;
+ /* Source */
+ u32 s_byte_off;
+ int s_bit_off;
+ u32 s_bit_mask;
+ /* Destination */
+ u32 byte_off;
+ int bit_off;
+ u32 bit_mask;
+ enum mlx5hws_definer_fname fname;
+ void (*tag_set)(struct mlx5hws_definer_fc *fc,
+ void *mach_param,
+ u8 *tag);
+ void (*tag_mask_set)(struct mlx5hws_definer_fc *fc,
+ void *mach_param,
+ u8 *tag);
+};
+
+struct mlx5_ifc_definer_hl_eth_l2_bits {
+ u8 dmac_47_16[0x20];
+ u8 dmac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+ u8 reserved_at_40[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 ip_fragmented[0x1];
+ u8 qp_type[0x2];
+ u8 encap_type[0x2];
+ u8 port_number[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_id[0xc];
+ u8 l4_type[0x4];
+ u8 reserved_at_64[0x2];
+ u8 ipsec_layer[0x2];
+ u8 l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 l2_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 l4_ok[0x1];
+ u8 second_vlan_qualifier[0x2];
+ u8 second_priority[0x3];
+ u8 second_cfi[0x1];
+ u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_definer_hl_eth_l2_src_bits {
+ u8 smac_47_16[0x20];
+ u8 smac_15_0[0x10];
+ u8 loopback_syndrome[0x8];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 ip_fragmented[0x1];
+ u8 functional_lb[0x1];
+};
+
+struct mlx5_ifc_definer_hl_ib_l2_bits {
+ u8 sx_sniffer[0x1];
+ u8 force_lb[0x1];
+ u8 functional_lb[0x1];
+ u8 reserved_at_3[0x3];
+ u8 port_number[0x2];
+ u8 sl[0x4];
+ u8 qp_type[0x2];
+ u8 lnh[0x2];
+ u8 dlid[0x10];
+ u8 vl[0x4];
+ u8 lrh_packet_length[0xc];
+ u8 slid[0x10];
+};
+
+struct mlx5_ifc_definer_hl_eth_l3_bits {
+ u8 ip_version[0x4];
+ u8 ihl[0x4];
+ union {
+ u8 tos[0x8];
+ struct {
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ };
+ };
+ u8 time_to_live_hop_limit[0x8];
+ u8 protocol_next_header[0x8];
+ u8 identification[0x10];
+ union {
+ u8 ipv4_frag[0x10];
+ struct {
+ u8 flags[0x3];
+ u8 fragment_offset[0xd];
+ };
+ };
+ u8 ipv4_total_length[0x10];
+ u8 checksum[0x10];
+ u8 reserved_at_60[0xc];
+ u8 flow_label[0x14];
+ u8 packet_length[0x10];
+ u8 ipv6_payload_length[0x10];
+};
+
+struct mlx5_ifc_definer_hl_eth_l4_bits {
+ u8 source_port[0x10];
+ u8 destination_port[0x10];
+ u8 data_offset[0x4];
+ u8 l4_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 ip_fragmented[0x1];
+ u8 tcp_ns[0x1];
+ union {
+ u8 tcp_flags[0x8];
+ struct {
+ u8 tcp_cwr[0x1];
+ u8 tcp_ece[0x1];
+ u8 tcp_urg[0x1];
+ u8 tcp_ack[0x1];
+ u8 tcp_psh[0x1];
+ u8 tcp_rst[0x1];
+ u8 tcp_syn[0x1];
+ u8 tcp_fin[0x1];
+ };
+ };
+ u8 first_fragment[0x1];
+ u8 reserved_at_31[0xf];
+};
+
+struct mlx5_ifc_definer_hl_src_qp_gvmi_bits {
+ u8 loopback_syndrome[0x8];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 reserved_at_e[0x1];
+ u8 functional_lb[0x1];
+ u8 source_gvmi[0x10];
+ u8 force_lb[0x1];
+ u8 ip_fragmented[0x1];
+ u8 source_is_requestor[0x1];
+ u8 reserved_at_23[0x5];
+ u8 source_qp[0x18];
+};
+
+struct mlx5_ifc_definer_hl_ib_l4_bits {
+ u8 opcode[0x8];
+ u8 qp[0x18];
+ u8 se[0x1];
+ u8 migreq[0x1];
+ u8 ackreq[0x1];
+ u8 fecn[0x1];
+ u8 becn[0x1];
+ u8 bth[0x1];
+ u8 deth[0x1];
+ u8 dcceth[0x1];
+ u8 reserved_at_28[0x2];
+ u8 pad_count[0x2];
+ u8 tver[0x4];
+ u8 p_key[0x10];
+ u8 reserved_at_40[0x8];
+ u8 deth_source_qp[0x18];
+};
+
+enum mlx5hws_integrity_ok1_bits {
+ MLX5HWS_DEFINER_OKS1_FIRST_L4_OK = 24,
+ MLX5HWS_DEFINER_OKS1_FIRST_L3_OK = 25,
+ MLX5HWS_DEFINER_OKS1_SECOND_L4_OK = 26,
+ MLX5HWS_DEFINER_OKS1_SECOND_L3_OK = 27,
+ MLX5HWS_DEFINER_OKS1_FIRST_L4_CSUM_OK = 28,
+ MLX5HWS_DEFINER_OKS1_FIRST_IPV4_CSUM_OK = 29,
+ MLX5HWS_DEFINER_OKS1_SECOND_L4_CSUM_OK = 30,
+ MLX5HWS_DEFINER_OKS1_SECOND_IPV4_CSUM_OK = 31,
+};
+
+struct mlx5_ifc_definer_hl_oks1_bits {
+ union {
+ u8 oks1_bits[0x20];
+ struct {
+ u8 second_ipv4_checksum_ok[0x1];
+ u8 second_l4_checksum_ok[0x1];
+ u8 first_ipv4_checksum_ok[0x1];
+ u8 first_l4_checksum_ok[0x1];
+ u8 second_l3_ok[0x1];
+ u8 second_l4_ok[0x1];
+ u8 first_l3_ok[0x1];
+ u8 first_l4_ok[0x1];
+ u8 flex_parser7_steering_ok[0x1];
+ u8 flex_parser6_steering_ok[0x1];
+ u8 flex_parser5_steering_ok[0x1];
+ u8 flex_parser4_steering_ok[0x1];
+ u8 flex_parser3_steering_ok[0x1];
+ u8 flex_parser2_steering_ok[0x1];
+ u8 flex_parser1_steering_ok[0x1];
+ u8 flex_parser0_steering_ok[0x1];
+ u8 second_ipv6_extension_header_vld[0x1];
+ u8 first_ipv6_extension_header_vld[0x1];
+ u8 l3_tunneling_ok[0x1];
+ u8 l2_tunneling_ok[0x1];
+ u8 second_tcp_ok[0x1];
+ u8 second_udp_ok[0x1];
+ u8 second_ipv4_ok[0x1];
+ u8 second_ipv6_ok[0x1];
+ u8 second_l2_ok[0x1];
+ u8 vxlan_ok[0x1];
+ u8 gre_ok[0x1];
+ u8 first_tcp_ok[0x1];
+ u8 first_udp_ok[0x1];
+ u8 first_ipv4_ok[0x1];
+ u8 first_ipv6_ok[0x1];
+ u8 first_l2_ok[0x1];
+ };
+ };
+};
+
+struct mlx5_ifc_definer_hl_oks2_bits {
+ u8 reserved_at_0[0xa];
+ u8 second_mpls_ok[0x1];
+ u8 second_mpls4_s_bit[0x1];
+ u8 second_mpls4_qualifier[0x1];
+ u8 second_mpls3_s_bit[0x1];
+ u8 second_mpls3_qualifier[0x1];
+ u8 second_mpls2_s_bit[0x1];
+ u8 second_mpls2_qualifier[0x1];
+ u8 second_mpls1_s_bit[0x1];
+ u8 second_mpls1_qualifier[0x1];
+ u8 second_mpls0_s_bit[0x1];
+ u8 second_mpls0_qualifier[0x1];
+ u8 first_mpls_ok[0x1];
+ u8 first_mpls4_s_bit[0x1];
+ u8 first_mpls4_qualifier[0x1];
+ u8 first_mpls3_s_bit[0x1];
+ u8 first_mpls3_qualifier[0x1];
+ u8 first_mpls2_s_bit[0x1];
+ u8 first_mpls2_qualifier[0x1];
+ u8 first_mpls1_s_bit[0x1];
+ u8 first_mpls1_qualifier[0x1];
+ u8 first_mpls0_s_bit[0x1];
+ u8 first_mpls0_qualifier[0x1];
+};
+
+struct mlx5_ifc_definer_hl_voq_bits {
+ u8 reserved_at_0[0x18];
+ u8 ecn_ok[0x1];
+ u8 congestion[0x1];
+ u8 profile[0x2];
+ u8 internal_prio[0x4];
+};
+
+struct mlx5_ifc_definer_hl_ipv4_src_dst_bits {
+ u8 source_address[0x20];
+ u8 destination_address[0x20];
+};
+
+struct mlx5_ifc_definer_hl_random_number_bits {
+ u8 random_number[0x10];
+ u8 reserved[0x10];
+};
+
+struct mlx5_ifc_definer_hl_ipv6_addr_bits {
+ u8 ipv6_address_127_96[0x20];
+ u8 ipv6_address_95_64[0x20];
+ u8 ipv6_address_63_32[0x20];
+ u8 ipv6_address_31_0[0x20];
+};
+
+struct mlx5_ifc_definer_tcp_icmp_header_bits {
+ union {
+ struct {
+ u8 icmp_dw1[0x20];
+ u8 icmp_dw2[0x20];
+ u8 icmp_dw3[0x20];
+ };
+ struct {
+ u8 tcp_seq[0x20];
+ u8 tcp_ack[0x20];
+ u8 tcp_win_urg[0x20];
+ };
+ };
+};
+
+struct mlx5_ifc_definer_hl_tunnel_header_bits {
+ u8 tunnel_header_0[0x20];
+ u8 tunnel_header_1[0x20];
+ u8 tunnel_header_2[0x20];
+ u8 tunnel_header_3[0x20];
+};
+
+struct mlx5_ifc_definer_hl_ipsec_bits {
+ u8 spi[0x20];
+ u8 sequence_number[0x20];
+ u8 reserved[0x10];
+ u8 ipsec_syndrome[0x8];
+ u8 next_header[0x8];
+};
+
+struct mlx5_ifc_definer_hl_metadata_bits {
+ u8 metadata_to_cqe[0x20];
+ u8 general_purpose[0x20];
+ u8 acomulated_hash[0x20];
+};
+
+struct mlx5_ifc_definer_hl_flex_parser_bits {
+ u8 flex_parser_7[0x20];
+ u8 flex_parser_6[0x20];
+ u8 flex_parser_5[0x20];
+ u8 flex_parser_4[0x20];
+ u8 flex_parser_3[0x20];
+ u8 flex_parser_2[0x20];
+ u8 flex_parser_1[0x20];
+ u8 flex_parser_0[0x20];
+};
+
+struct mlx5_ifc_definer_hl_registers_bits {
+ u8 register_c_10[0x20];
+ u8 register_c_11[0x20];
+ u8 register_c_8[0x20];
+ u8 register_c_9[0x20];
+ u8 register_c_6[0x20];
+ u8 register_c_7[0x20];
+ u8 register_c_4[0x20];
+ u8 register_c_5[0x20];
+ u8 register_c_2[0x20];
+ u8 register_c_3[0x20];
+ u8 register_c_0[0x20];
+ u8 register_c_1[0x20];
+};
+
+struct mlx5_ifc_definer_hl_mpls_bits {
+ u8 mpls0_label[0x20];
+ u8 mpls1_label[0x20];
+ u8 mpls2_label[0x20];
+ u8 mpls3_label[0x20];
+ u8 mpls4_label[0x20];
+};
+
+struct mlx5_ifc_definer_hl_bits {
+ struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_outer;
+ struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_inner;
+ struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_outer;
+ struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_inner;
+ struct mlx5_ifc_definer_hl_ib_l2_bits ib_l2;
+ struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_outer;
+ struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_inner;
+ struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_outer;
+ struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_inner;
+ struct mlx5_ifc_definer_hl_src_qp_gvmi_bits source_qp_gvmi;
+ struct mlx5_ifc_definer_hl_ib_l4_bits ib_l4;
+ struct mlx5_ifc_definer_hl_oks1_bits oks1;
+ struct mlx5_ifc_definer_hl_oks2_bits oks2;
+ struct mlx5_ifc_definer_hl_voq_bits voq;
+ u8 reserved_at_480[0x380];
+ struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_outer;
+ struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_inner;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_outer;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_inner;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_outer;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_inner;
+ u8 unsupported_dest_ib_l3[0x80];
+ u8 unsupported_source_ib_l3[0x80];
+ u8 unsupported_udp_misc_outer[0x20];
+ u8 unsupported_udp_misc_inner[0x20];
+ struct mlx5_ifc_definer_tcp_icmp_header_bits tcp_icmp;
+ struct mlx5_ifc_definer_hl_tunnel_header_bits tunnel_header;
+ struct mlx5_ifc_definer_hl_mpls_bits mpls_outer;
+ struct mlx5_ifc_definer_hl_mpls_bits mpls_inner;
+ u8 unsupported_config_headers_outer[0x80];
+ u8 unsupported_config_headers_inner[0x80];
+ struct mlx5_ifc_definer_hl_random_number_bits random_number;
+ struct mlx5_ifc_definer_hl_ipsec_bits ipsec;
+ struct mlx5_ifc_definer_hl_metadata_bits metadata;
+ u8 unsupported_utc_timestamp[0x40];
+ u8 unsupported_free_running_timestamp[0x40];
+ struct mlx5_ifc_definer_hl_flex_parser_bits flex_parser;
+ struct mlx5_ifc_definer_hl_registers_bits registers;
+ /* Reserved in case header layout on future HW */
+ u8 unsupported_reserved[0xd40];
+};
+
+enum mlx5hws_definer_gtp {
+ MLX5HWS_DEFINER_GTP_EXT_HDR_BIT = 0x04,
+};
+
+struct mlx5_ifc_header_gtp_bits {
+ u8 version[0x3];
+ u8 proto_type[0x1];
+ u8 reserved1[0x1];
+ union {
+ u8 msg_flags[0x3];
+ struct {
+ u8 ext_hdr_flag[0x1];
+ u8 seq_num_flag[0x1];
+ u8 pdu_flag[0x1];
+ };
+ };
+ u8 msg_type[0x8];
+ u8 msg_len[0x8];
+ u8 teid[0x20];
+};
+
+struct mlx5_ifc_header_opt_gtp_bits {
+ u8 seq_num[0x10];
+ u8 pdu_num[0x8];
+ u8 next_ext_hdr_type[0x8];
+};
+
+struct mlx5_ifc_header_gtp_psc_bits {
+ u8 len[0x8];
+ u8 pdu_type[0x4];
+ u8 flags[0x4];
+ u8 qfi[0x8];
+ u8 reserved2[0x8];
+};
+
+struct mlx5_ifc_header_ipv6_vtc_bits {
+ u8 version[0x4];
+ union {
+ u8 tos[0x8];
+ struct {
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ };
+ };
+ u8 flow_label[0x14];
+};
+
+struct mlx5_ifc_header_ipv6_routing_ext_bits {
+ u8 next_hdr[0x8];
+ u8 hdr_len[0x8];
+ u8 type[0x8];
+ u8 segments_left[0x8];
+ union {
+ u8 flags[0x20];
+ struct {
+ u8 last_entry[0x8];
+ u8 flag[0x8];
+ u8 tag[0x10];
+ };
+ };
+};
+
+struct mlx5_ifc_header_vxlan_bits {
+ u8 flags[0x8];
+ u8 reserved1[0x18];
+ u8 vni[0x18];
+ u8 reserved2[0x8];
+};
+
+struct mlx5_ifc_header_vxlan_gpe_bits {
+ u8 flags[0x8];
+ u8 rsvd0[0x10];
+ u8 protocol[0x8];
+ u8 vni[0x18];
+ u8 rsvd1[0x8];
+};
+
+struct mlx5_ifc_header_gre_bits {
+ union {
+ u8 c_rsvd0_ver[0x10];
+ struct {
+ u8 gre_c_present[0x1];
+ u8 reserved_at_1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 reserved_at_4[0x9];
+ u8 version[0x3];
+ };
+ };
+ u8 gre_protocol[0x10];
+ u8 checksum[0x10];
+ u8 reserved_at_30[0x10];
+};
+
+struct mlx5_ifc_header_geneve_bits {
+ union {
+ u8 ver_opt_len_o_c_rsvd[0x10];
+ struct {
+ u8 version[0x2];
+ u8 opt_len[0x6];
+ u8 o_flag[0x1];
+ u8 c_flag[0x1];
+ u8 reserved_at_a[0x6];
+ };
+ };
+ u8 protocol_type[0x10];
+ u8 vni[0x18];
+ u8 reserved_at_38[0x8];
+};
+
+struct mlx5_ifc_header_geneve_opt_bits {
+ u8 class[0x10];
+ u8 type[0x8];
+ u8 reserved[0x3];
+ u8 len[0x5];
+};
+
+struct mlx5_ifc_header_icmp_bits {
+ union {
+ u8 icmp_dw1[0x20];
+ struct {
+ u8 type[0x8];
+ u8 code[0x8];
+ u8 cksum[0x10];
+ };
+ };
+ union {
+ u8 icmp_dw2[0x20];
+ struct {
+ u8 ident[0x10];
+ u8 seq_nb[0x10];
+ };
+ };
+};
+
+struct mlx5hws_definer {
+ enum mlx5hws_definer_type type;
+ u8 dw_selector[DW_SELECTORS];
+ u8 byte_selector[BYTE_SELECTORS];
+ struct mlx5hws_rule_match_tag mask;
+ u32 obj_id;
+};
+
+struct mlx5hws_definer_cache {
+ struct list_head list_head;
+};
+
+struct mlx5hws_definer_cache_item {
+ struct mlx5hws_definer definer;
+ u32 refcount;
+ struct list_head list_node;
+};
+
+static inline bool
+mlx5hws_definer_is_jumbo(struct mlx5hws_definer *definer)
+{
+ return (definer->type == MLX5HWS_DEFINER_TYPE_JUMBO);
+}
+
+void mlx5hws_definer_create_tag(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag);
+
+int mlx5hws_definer_get_id(struct mlx5hws_definer *definer);
+
+int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt);
+
+void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt);
+
+int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache);
+
+void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache);
+
+int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
+ struct mlx5hws_definer *definer_b);
+
+int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer);
+
+void mlx5hws_definer_free(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer);
+
+int mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_definer);
+
+struct mlx5hws_definer_fc *
+mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ u32 *match_param,
+ int *fc_sz);
+
+#endif /* MLX5HWS_DEFINER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h
new file mode 100644
index 000000000000..5643be1cd5bf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_INTERNAL_H_
+#define MLX5HWS_INTERNAL_H_
+
+#include <linux/mlx5/transobj.h>
+#include <linux/mlx5/vport.h>
+#include "fs_core.h"
+#include "wq.h"
+#include "lib/mlx5.h"
+
+#include "mlx5hws_prm.h"
+#include "mlx5hws.h"
+#include "mlx5hws_pool.h"
+#include "mlx5hws_vport.h"
+#include "mlx5hws_context.h"
+#include "mlx5hws_table.h"
+#include "mlx5hws_send.h"
+#include "mlx5hws_rule.h"
+#include "mlx5hws_cmd.h"
+#include "mlx5hws_action.h"
+#include "mlx5hws_definer.h"
+#include "mlx5hws_matcher.h"
+#include "mlx5hws_debug.h"
+#include "mlx5hws_pat_arg.h"
+#include "mlx5hws_bwc.h"
+#include "mlx5hws_bwc_complex.h"
+
+#define W_SIZE 2
+#define DW_SIZE 4
+#define BITS_IN_BYTE 8
+#define BITS_IN_DW (BITS_IN_BYTE * DW_SIZE)
+
+#define IS_BIT_SET(_value, _bit) ((_value) & (1ULL << (_bit)))
+
+#define mlx5hws_err(ctx, arg...) mlx5_core_err((ctx)->mdev, ##arg)
+#define mlx5hws_info(ctx, arg...) mlx5_core_info((ctx)->mdev, ##arg)
+#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg)
+
+#define MLX5HWS_TABLE_TYPE_BASE 2
+#define MLX5HWS_ACTION_STE_IDX_ANY 0
+
+static inline bool is_mem_zero(const u8 *mem, size_t size)
+{
+ if (unlikely(!size)) {
+ pr_warn("HWS: invalid buffer of size 0 in %s\n", __func__);
+ return true;
+ }
+
+ return (*mem == 0) && memcmp(mem, mem + 1, size - 1) == 0;
+}
+
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
+#endif /* MLX5HWS_INTERNAL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
new file mode 100644
index 000000000000..33d2b31e4b46
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
@@ -0,0 +1,1216 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+enum mlx5hws_matcher_rtc_type {
+ HWS_MATCHER_RTC_TYPE_MATCH,
+ HWS_MATCHER_RTC_TYPE_STE_ARRAY,
+ HWS_MATCHER_RTC_TYPE_MAX,
+};
+
+static const char * const mlx5hws_matcher_rtc_type_str[] = {
+ [HWS_MATCHER_RTC_TYPE_MATCH] = "MATCH",
+ [HWS_MATCHER_RTC_TYPE_STE_ARRAY] = "STE_ARRAY",
+ [HWS_MATCHER_RTC_TYPE_MAX] = "UNKNOWN",
+};
+
+static const char *hws_matcher_rtc_type_to_str(enum mlx5hws_matcher_rtc_type rtc_type)
+{
+ if (rtc_type > HWS_MATCHER_RTC_TYPE_MAX)
+ rtc_type = HWS_MATCHER_RTC_TYPE_MAX;
+ return mlx5hws_matcher_rtc_type_str[rtc_type];
+}
+
+static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules)
+{
+ /* Collision table concatenation is done only for large rule tables */
+ return log_num_of_rules > MLX5HWS_MATCHER_ASSURED_RULES_TH;
+}
+
+static u8 hws_matcher_rules_to_tbl_depth(u8 log_num_of_rules)
+{
+ if (hws_matcher_requires_col_tbl(log_num_of_rules))
+ return MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH;
+
+ /* For small rule tables we use a single deep table to assure insertion */
+ return min(log_num_of_rules, MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH);
+}
+
+static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher)
+{
+ mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id);
+}
+
+static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ int ret;
+
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *prev = NULL;
+ struct mlx5hws_matcher *next = NULL;
+ struct mlx5hws_matcher *tmp_matcher;
+ int ret;
+
+ /* Find location in matcher list */
+ if (list_empty(&tbl->matchers_list)) {
+ list_add(&matcher->list_node, &tbl->matchers_list);
+ goto connect;
+ }
+
+ list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node) {
+ if (tmp_matcher->attr.priority > matcher->attr.priority) {
+ next = tmp_matcher;
+ break;
+ }
+ prev = tmp_matcher;
+ }
+
+ if (next)
+ /* insert before next */
+ list_add_tail(&matcher->list_node, &next->list_node);
+ else
+ /* insert after prev */
+ list_add(&matcher->list_node, &prev->list_node);
+
+connect:
+ if (next) {
+ /* Connect to next RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ matcher->end_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to connect new matcher to next RTC\n");
+ goto remove_from_list;
+ }
+ } else {
+ /* Connect last matcher to next miss_tbl if exists */
+ ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed connect new matcher to miss_tbl\n");
+ goto remove_from_list;
+ }
+ }
+
+ /* Connect to previous FT */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ prev ? prev->end_ft_id : tbl->ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to connect new matcher to previous FT\n");
+ goto remove_from_list;
+ }
+
+ /* Reset prev matcher FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev ? prev->end_ft_id : tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to reset matcher ft default miss\n");
+ goto remove_from_list;
+ }
+
+ if (!prev) {
+ /* Update tables missing to current matcher in the table */
+ ret = mlx5hws_table_update_connected_miss_tables(tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Fatal error, failed to update connected miss table\n");
+ goto remove_from_list;
+ }
+ }
+
+ return 0;
+
+remove_from_list:
+ list_del_init(&matcher->list_node);
+ return ret;
+}
+
+static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher *next = NULL, *prev = NULL;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ u32 prev_ft_id = tbl->ft_id;
+ int ret;
+
+ if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) {
+ prev = list_prev_entry(matcher, list_node);
+ prev_ft_id = prev->end_ft_id;
+ }
+
+ if (!list_is_last(&matcher->list_node, &tbl->matchers_list))
+ next = list_next_entry(matcher, list_node);
+
+ list_del_init(&matcher->list_node);
+
+ if (next) {
+ /* Connect previous end FT to next RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx,
+ prev_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n");
+ goto matcher_reconnect;
+ }
+ } else {
+ ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n");
+ goto matcher_reconnect;
+ }
+ }
+
+ /* Removing first matcher, update connected miss tables if exists */
+ if (prev_ft_id == tbl->ft_id) {
+ ret = mlx5hws_table_update_connected_miss_tables(tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n");
+ goto matcher_reconnect;
+ }
+ }
+
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
+ goto matcher_reconnect;
+ }
+
+ return 0;
+
+matcher_reconnect:
+ if (list_empty(&tbl->matchers_list) || !prev)
+ list_add(&matcher->list_node, &tbl->matchers_list);
+ else
+ /* insert after prev matcher */
+ list_add(&matcher->list_node, &prev->list_node);
+
+ return ret;
+}
+
+static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ enum mlx5hws_matcher_rtc_type rtc_type,
+ bool is_mirror)
+{
+ struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste;
+ enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
+ bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH;
+
+ if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) ||
+ (flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) {
+ /* Optimize FDB RTC */
+ rtc_attr->log_size = 0;
+ rtc_attr->log_depth = 0;
+ } else {
+ /* Keep original values */
+ rtc_attr->log_size = is_match_rtc ? matcher->attr.table.sz_row_log : ste->order;
+ rtc_attr->log_depth = is_match_rtc ? matcher->attr.table.sz_col_log : 0;
+ }
+}
+
+static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
+ enum mlx5hws_matcher_rtc_type rtc_type,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
+ struct mlx5hws_match_template *mt = matcher->mt;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_action_default_stc *default_stc;
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_pool *ste_pool, *stc_pool;
+ struct mlx5hws_pool_chunk *ste;
+ u32 *rtc_0_id, *rtc_1_id;
+ u32 obj_id;
+ int ret;
+
+ switch (rtc_type) {
+ case HWS_MATCHER_RTC_TYPE_MATCH:
+ rtc_0_id = &matcher->match_ste.rtc_0_id;
+ rtc_1_id = &matcher->match_ste.rtc_1_id;
+ ste_pool = matcher->match_ste.pool;
+ ste = &matcher->match_ste.ste;
+ ste->order = attr->table.sz_col_log + attr->table.sz_row_log;
+
+ rtc_attr.log_size = attr->table.sz_row_log;
+ rtc_attr.log_depth = attr->table.sz_col_log;
+ rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ rtc_attr.is_scnd_range = 0;
+ rtc_attr.miss_ft_id = matcher->end_ft_id;
+
+ if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
+ /* The usual Hash Table */
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+
+ /* The first mt is used since all share the same definer */
+ rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ rtc_attr.num_hash_definer = 1;
+
+ if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
+ rtc_attr.match_definer_0 = ctx->caps->linear_match_definer;
+ }
+ }
+
+ /* Match pool requires implicit allocation */
+ ret = mlx5hws_pool_chunk_alloc(ste_pool, ste);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate STE for %s RTC",
+ hws_matcher_rtc_type_to_str(rtc_type));
+ return ret;
+ }
+ break;
+
+ case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
+ action_ste = &matcher->action_ste[action_ste_selector];
+
+ rtc_0_id = &action_ste->rtc_0_id;
+ rtc_1_id = &action_ste->rtc_1_id;
+ ste_pool = action_ste->pool;
+ ste = &action_ste->ste;
+ ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
+ attr->table.sz_row_log;
+ rtc_attr.log_size = ste->order;
+ rtc_attr.log_depth = 0;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ /* The action STEs use the default always hit definer */
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.is_frst_jumbo = false;
+ rtc_attr.miss_ft_id = 0;
+ break;
+
+ default:
+ mlx5hws_err(ctx, "HWS Invalid RTC type\n");
+ return -EINVAL;
+ }
+
+ obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.ste_offset = ste->offset;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false);
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false);
+
+ /* STC is a single resource (obj_id), use any STC for the ID */
+ stc_pool = ctx->stc_pool[tbl->type];
+ default_stc = ctx->common_res[tbl->type].default_stc;
+ obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create matcher RTC of type %s",
+ hws_matcher_rtc_type_to_str(rtc_type));
+ goto free_ste;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true);
+
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, true);
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create peer matcher RTC of type %s",
+ hws_matcher_rtc_type_to_str(rtc_type));
+ goto destroy_rtc_0;
+ }
+ }
+
+ return 0;
+
+destroy_rtc_0:
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
+free_ste:
+ if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
+ mlx5hws_pool_chunk_free(ste_pool, ste);
+ return ret;
+}
+
+static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
+ enum mlx5hws_matcher_rtc_type rtc_type,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_pool_chunk *ste;
+ struct mlx5hws_pool *ste_pool;
+ u32 rtc_0_id, rtc_1_id;
+
+ switch (rtc_type) {
+ case HWS_MATCHER_RTC_TYPE_MATCH:
+ rtc_0_id = matcher->match_ste.rtc_0_id;
+ rtc_1_id = matcher->match_ste.rtc_1_id;
+ ste_pool = matcher->match_ste.pool;
+ ste = &matcher->match_ste.ste;
+ break;
+ case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
+ action_ste = &matcher->action_ste[action_ste_selector];
+ rtc_0_id = action_ste->rtc_0_id;
+ rtc_1_id = action_ste->rtc_1_id;
+ ste_pool = action_ste->pool;
+ ste = &action_ste->ste;
+ break;
+ default:
+ return;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_1_id);
+
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_0_id);
+ if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
+ mlx5hws_pool_chunk_free(ste_pool, ste);
+}
+
+static int
+hws_matcher_check_attr_sz(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+ if (attr->table.sz_col_log > caps->rtc_log_depth_max) {
+ mlx5hws_err(matcher->tbl->ctx, "Matcher depth exceeds limit %d\n",
+ caps->rtc_log_depth_max);
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->table.sz_col_log + attr->table.sz_row_log > caps->ste_alloc_log_max) {
+ mlx5hws_err(matcher->tbl->ctx, "Total matcher size exceeds limit %d\n",
+ caps->ste_alloc_log_max);
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->table.sz_col_log + attr->table.sz_row_log < caps->ste_alloc_log_gran) {
+ mlx5hws_err(matcher->tbl->ctx, "Total matcher size below limit %d\n",
+ caps->ste_alloc_log_gran);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void hws_matcher_set_pool_attr(struct mlx5hws_pool_attr *attr,
+ struct mlx5hws_matcher *matcher)
+{
+ switch (matcher->attr.optimize_flow_src) {
+ case MLX5HWS_MATCHER_FLOW_SRC_VPORT:
+ attr->opt_type = MLX5HWS_POOL_OPTIMIZE_ORIG;
+ break;
+ case MLX5HWS_MATCHER_FLOW_SRC_WIRE:
+ attr->opt_type = MLX5HWS_POOL_OPTIMIZE_MIRROR;
+ break;
+ default:
+ break;
+ }
+}
+
+static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ bool valid;
+ int ret;
+
+ valid = mlx5hws_action_check_combo(ctx, at->action_type_arr, matcher->tbl->type);
+ if (!valid) {
+ mlx5hws_err(ctx, "Invalid combination in action template\n");
+ return -EINVAL;
+ }
+
+ /* Process action template to setters */
+ ret = mlx5hws_action_template_process(at);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to process action template\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher)
+{
+ struct mlx5hws_matcher_resize_data *resize_data;
+
+ resize_data = kzalloc(sizeof(*resize_data), GFP_KERNEL);
+ if (!resize_data)
+ return -ENOMEM;
+
+ resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
+
+ resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc;
+ resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id;
+ resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id;
+ resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ?
+ src_matcher->action_ste[0].pool :
+ NULL;
+ resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc;
+ resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id;
+ resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id;
+ resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ?
+ src_matcher->action_ste[1].pool :
+ NULL;
+
+ /* Place the new resized matcher on the dst matcher's list */
+ list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
+
+ /* Move all the previous resized matchers to the dst matcher's list */
+ while (!list_empty(&src_matcher->resize_data)) {
+ resize_data = list_first_entry(&src_matcher->resize_data,
+ struct mlx5hws_matcher_resize_data,
+ list_node);
+ list_del_init(&resize_data->list_node);
+ list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
+ }
+
+ return 0;
+}
+
+static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_resize_data *resize_data;
+
+ if (!mlx5hws_matcher_is_resizable(matcher))
+ return;
+
+ while (!list_empty(&matcher->resize_data)) {
+ resize_data = list_first_entry(&matcher->resize_data,
+ struct mlx5hws_matcher_resize_data,
+ list_node);
+ list_del_init(&resize_data->list_node);
+
+ if (resize_data->max_stes) {
+ mlx5hws_action_free_single_stc(matcher->tbl->ctx,
+ matcher->tbl->type,
+ &resize_data->action_ste[1].stc);
+ mlx5hws_action_free_single_stc(matcher->tbl->ctx,
+ matcher->tbl->type,
+ &resize_data->action_ste[0].stc);
+
+ if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[1].rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[0].rtc_1_id);
+ }
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[1].rtc_0_id);
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[0].rtc_0_id);
+ if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) {
+ mlx5hws_pool_destroy(resize_data->action_ste[1].pool);
+ mlx5hws_pool_destroy(resize_data->action_ste[0].pool);
+ }
+ }
+
+ kfree(resize_data);
+ }
+}
+
+static int
+hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_pool_attr pool_attr = {0};
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ action_ste = &matcher->action_ste[action_ste_selector];
+
+ /* Allocate action STE mempool */
+ pool_attr.table_type = tbl->type;
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
+ pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
+ matcher->attr.table.sz_row_log;
+ hws_matcher_set_pool_attr(&pool_attr, matcher);
+ action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!action_ste->pool) {
+ mlx5hws_err(ctx, "Failed to create action ste pool\n");
+ return -EINVAL;
+ }
+
+ /* Allocate action RTC */
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create action RTC\n");
+ goto free_ste_pool;
+ }
+
+ /* Allocate STC for jumps to STE */
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste = action_ste->ste;
+ stc_attr.ste_table.ste_pool = action_ste->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl->type,
+ &action_ste->stc);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create action jump to table STC\n");
+ goto free_rtc;
+ }
+
+ return 0;
+
+free_rtc:
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+free_ste_pool:
+ mlx5hws_pool_destroy(action_ste->pool);
+ return ret;
+}
+
+static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+{
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+
+ action_ste = &matcher->action_ste[action_ste_selector];
+
+ if (!action_ste->max_stes ||
+ matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION ||
+ mlx5hws_matcher_is_in_resize(matcher))
+ return;
+
+ mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ mlx5hws_pool_destroy(action_ste->pool);
+}
+
+static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u32 required_stes;
+ u8 max_stes = 0;
+ int i, ret;
+
+ if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
+ return 0;
+
+ for (i = 0; i < matcher->num_of_at; i++) {
+ struct mlx5hws_action_template *at = &matcher->at[i];
+
+ ret = hws_matcher_check_and_process_at(matcher, at);
+ if (ret) {
+ mlx5hws_err(ctx, "Invalid at %d", i);
+ return ret;
+ }
+
+ required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+ max_stes = max(max_stes, required_stes);
+
+ /* Future: Optimize reparse */
+ }
+
+ /* There are no additional STEs required for matcher */
+ if (!max_stes)
+ return 0;
+
+ matcher->action_ste[0].max_stes = max_stes;
+ matcher->action_ste[1].max_stes = max_stes;
+
+ ret = hws_matcher_bind_at_idx(matcher, 0);
+ if (ret)
+ return ret;
+
+ ret = hws_matcher_bind_at_idx(matcher, 1);
+ if (ret)
+ goto free_at_0;
+
+ return 0;
+
+free_at_0:
+ hws_matcher_unbind_at_idx(matcher, 0);
+ return ret;
+}
+
+static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_unbind_at_idx(matcher, 1);
+ hws_matcher_unbind_at_idx(matcher, 0);
+}
+
+static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_pool_attr pool_attr = {0};
+ int ret;
+
+ /* Calculate match, range and hash definers */
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) {
+ ret = mlx5hws_definer_mt_init(ctx, matcher->mt);
+ if (ret) {
+ if (ret == E2BIG)
+ mlx5hws_err(ctx, "Failed to set matcher templates with match definers\n");
+ return ret;
+ }
+ }
+
+ /* Create an STE pool per matcher*/
+ pool_attr.table_type = matcher->tbl->type;
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL;
+ pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log +
+ matcher->attr.table.sz_row_log;
+ hws_matcher_set_pool_attr(&pool_attr, matcher);
+
+ matcher->match_ste.pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!matcher->match_ste.pool) {
+ mlx5hws_err(ctx, "Failed to allocate matcher STE pool\n");
+ ret = -EOPNOTSUPP;
+ goto uninit_match_definer;
+ }
+
+ return 0;
+
+uninit_match_definer:
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ mlx5hws_definer_mt_uninit(ctx, matcher->mt);
+ return ret;
+}
+
+static void hws_matcher_unbind_mt(struct mlx5hws_matcher *matcher)
+{
+ mlx5hws_pool_destroy(matcher->match_ste.pool);
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ mlx5hws_definer_mt_uninit(matcher->tbl->ctx, matcher->mt);
+}
+
+static int
+hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+ switch (attr->insert_mode) {
+ case MLX5HWS_MATCHER_INSERT_BY_HASH:
+ if (matcher->attr.distribute_mode != MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ mlx5hws_err(ctx, "Invalid matcher distribute mode\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case MLX5HWS_MATCHER_INSERT_BY_INDEX:
+ if (attr->table.sz_col_log) {
+ mlx5hws_err(ctx, "Matcher with INSERT_BY_INDEX supports only Nx1 table size\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ if (!caps->rtc_hash_split_table) {
+ mlx5hws_err(ctx, "FW doesn't support insert by index and hash distribute\n");
+ return -EOPNOTSUPP;
+ }
+ } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ if (!caps->rtc_linear_lookup_table ||
+ !IS_BIT_SET(caps->access_index_mode,
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR)) {
+ mlx5hws_err(ctx, "FW doesn't support insert by index and linear distribute\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->table.sz_row_log > MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX) {
+ mlx5hws_err(ctx, "Matcher with linear distribute: rows exceed limit %d",
+ MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX);
+ return -EOPNOTSUPP;
+ }
+ } else {
+ mlx5hws_err(ctx, "Matcher has unsupported distribute mode\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ mlx5hws_err(ctx, "Matcher has unsupported insert mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+ if (hws_matcher_validate_insert_mode(caps, matcher))
+ return -EOPNOTSUPP;
+
+ if (matcher->tbl->type != MLX5HWS_TABLE_TYPE_FDB && attr->optimize_flow_src) {
+ mlx5hws_err(matcher->tbl->ctx, "NIC domain doesn't support flow_src\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Convert number of rules to the required depth */
+ if (attr->mode == MLX5HWS_MATCHER_RESOURCE_MODE_RULE &&
+ attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH)
+ attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log);
+
+ matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0;
+
+ return hws_matcher_check_attr_sz(caps, matcher);
+}
+
+static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
+{
+ int ret;
+
+ /* Select and create the definers for current matcher */
+ ret = hws_matcher_bind_mt(matcher);
+ if (ret)
+ return ret;
+
+ /* Calculate and verify action combination */
+ ret = hws_matcher_bind_at(matcher);
+ if (ret)
+ goto unbind_mt;
+
+ /* Create matcher end flow table anchor */
+ ret = hws_matcher_create_end_ft(matcher);
+ if (ret)
+ goto unbind_at;
+
+ /* Allocate the RTC for the new matcher */
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ if (ret)
+ goto destroy_end_ft;
+
+ /* Connect the matcher to the matcher list */
+ ret = hws_matcher_connect(matcher);
+ if (ret)
+ goto destroy_rtc;
+
+ return 0;
+
+destroy_rtc:
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+destroy_end_ft:
+ hws_matcher_destroy_end_ft(matcher);
+unbind_at:
+ hws_matcher_unbind_at(matcher);
+unbind_mt:
+ hws_matcher_unbind_mt(matcher);
+ return ret;
+}
+
+static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_resize_uninit(matcher);
+ hws_matcher_disconnect(matcher);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_end_ft(matcher);
+ hws_matcher_unbind_at(matcher);
+ hws_matcher_unbind_mt(matcher);
+}
+
+static int
+hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_matcher *col_matcher;
+ int ret;
+
+ if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
+ matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
+ return 0;
+
+ if (!hws_matcher_requires_col_tbl(matcher->attr.rule.num_log))
+ return 0;
+
+ col_matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+ if (!col_matcher)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&col_matcher->resize_data);
+
+ col_matcher->tbl = matcher->tbl;
+ col_matcher->mt = matcher->mt;
+ col_matcher->at = matcher->at;
+ col_matcher->num_of_at = matcher->num_of_at;
+ col_matcher->num_of_mt = matcher->num_of_mt;
+ col_matcher->attr.priority = matcher->attr.priority;
+ col_matcher->flags = matcher->flags;
+ col_matcher->flags |= MLX5HWS_MATCHER_FLAGS_COLLISION;
+ col_matcher->attr.mode = MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE;
+ col_matcher->attr.optimize_flow_src = matcher->attr.optimize_flow_src;
+ col_matcher->attr.table.sz_row_log = matcher->attr.rule.num_log;
+ col_matcher->attr.table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH;
+ if (col_matcher->attr.table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO)
+ col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
+
+ col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach;
+
+ ret = hws_matcher_process_attr(ctx->caps, col_matcher);
+ if (ret)
+ goto free_col_matcher;
+
+ ret = hws_matcher_create_and_connect(col_matcher);
+ if (ret)
+ goto free_col_matcher;
+
+ matcher->col_matcher = col_matcher;
+
+ return 0;
+
+free_col_matcher:
+ kfree(col_matcher);
+ mlx5hws_err(ctx, "Failed to create assured collision matcher\n");
+ return ret;
+}
+
+static void
+hws_matcher_destroy_col_matcher(struct mlx5hws_matcher *matcher)
+{
+ if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
+ matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
+ return;
+
+ if (matcher->col_matcher) {
+ hws_matcher_destroy_and_disconnect(matcher->col_matcher);
+ kfree(matcher->col_matcher);
+ }
+}
+
+static int hws_matcher_init(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ int ret;
+
+ INIT_LIST_HEAD(&matcher->resize_data);
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Allocate matcher resource and connect to the packet pipe */
+ ret = hws_matcher_create_and_connect(matcher);
+ if (ret)
+ goto unlock_err;
+
+ /* Create additional matcher for collision handling */
+ ret = hws_matcher_create_col_matcher(matcher);
+ if (ret)
+ goto destory_and_disconnect;
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+destory_and_disconnect:
+ hws_matcher_destroy_and_disconnect(matcher);
+unlock_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static int hws_matcher_uninit(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+ mutex_lock(&ctx->ctrl_lock);
+ hws_matcher_destroy_col_matcher(matcher);
+ hws_matcher_destroy_and_disconnect(matcher);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+}
+
+int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u32 required_stes;
+ int ret;
+
+ if (!matcher->attr.max_num_of_at_attach) {
+ mlx5hws_dbg(ctx, "Num of current at (%d) exceed allowed value\n",
+ matcher->num_of_at);
+ return -EOPNOTSUPP;
+ }
+
+ ret = hws_matcher_check_and_process_at(matcher, at);
+ if (ret)
+ return ret;
+
+ required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+ if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) {
+ mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n",
+ required_stes,
+ matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes);
+ return -ENOMEM;
+ }
+
+ matcher->at[matcher->num_of_at] = *at;
+ matcher->num_of_at += 1;
+ matcher->attr.max_num_of_at_attach -= 1;
+
+ if (matcher->col_matcher)
+ matcher->col_matcher->num_of_at = matcher->num_of_at;
+
+ return 0;
+}
+
+static int
+hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ int ret = 0;
+ int i;
+
+ if (!num_of_mt || !num_of_at) {
+ mlx5hws_err(ctx, "Number of action/match template cannot be zero\n");
+ return -EOPNOTSUPP;
+ }
+
+ matcher->mt = kcalloc(num_of_mt, sizeof(*matcher->mt), GFP_KERNEL);
+ if (!matcher->mt)
+ return -ENOMEM;
+
+ matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
+ sizeof(*matcher->at),
+ GFP_KERNEL);
+ if (!matcher->at) {
+ mlx5hws_err(ctx, "Failed to allocate action template array\n");
+ ret = -ENOMEM;
+ goto free_mt;
+ }
+
+ for (i = 0; i < num_of_mt; i++)
+ matcher->mt[i] = *mt[i];
+
+ for (i = 0; i < num_of_at; i++)
+ matcher->at[i] = *at[i];
+
+ matcher->num_of_mt = num_of_mt;
+ matcher->num_of_at = num_of_at;
+
+ return 0;
+
+free_mt:
+ kfree(matcher->mt);
+ return ret;
+}
+
+static void
+hws_matcher_unset_templates(struct mlx5hws_matcher *matcher)
+{
+ kfree(matcher->at);
+ kfree(matcher->mt);
+}
+
+struct mlx5hws_matcher *
+mlx5hws_matcher_create(struct mlx5hws_table *tbl,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at,
+ struct mlx5hws_matcher_attr *attr)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *matcher;
+ int ret;
+
+ matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+ if (!matcher)
+ return NULL;
+
+ matcher->tbl = tbl;
+ matcher->attr = *attr;
+
+ ret = hws_matcher_process_attr(tbl->ctx->caps, matcher);
+ if (ret)
+ goto free_matcher;
+
+ ret = hws_matcher_set_templates(matcher, mt, num_of_mt, at, num_of_at);
+ if (ret)
+ goto free_matcher;
+
+ ret = hws_matcher_init(matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to initialise matcher: %d\n", ret);
+ goto unset_templates;
+ }
+
+ return matcher;
+
+unset_templates:
+ hws_matcher_unset_templates(matcher);
+free_matcher:
+ kfree(matcher);
+ return NULL;
+}
+
+int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_uninit(matcher);
+ hws_matcher_unset_templates(matcher);
+ kfree(matcher);
+ return 0;
+}
+
+struct mlx5hws_match_template *
+mlx5hws_match_template_create(struct mlx5hws_context *ctx,
+ u32 *match_param,
+ u32 match_param_sz,
+ u8 match_criteria_enable)
+{
+ struct mlx5hws_match_template *mt;
+
+ mt = kzalloc(sizeof(*mt), GFP_KERNEL);
+ if (!mt)
+ return NULL;
+
+ mt->match_param = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!mt->match_param)
+ goto free_template;
+
+ memcpy(mt->match_param, match_param, match_param_sz);
+ mt->match_criteria_enable = match_criteria_enable;
+
+ return mt;
+
+free_template:
+ kfree(mt);
+ return NULL;
+}
+
+int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt)
+{
+ kfree(mt->match_param);
+ kfree(mt);
+ return 0;
+}
+
+static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher)
+{
+ struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
+ int i;
+
+ if (src_matcher->tbl->type != dst_matcher->tbl->type) {
+ mlx5hws_err(ctx, "Table type mismatch for src/dst matchers\n");
+ return -EINVAL;
+ }
+
+ if (!mlx5hws_matcher_is_resizable(src_matcher) ||
+ !mlx5hws_matcher_is_resizable(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matcher is not resizable\n");
+ return -EINVAL;
+ }
+
+ if (mlx5hws_matcher_is_insert_by_idx(src_matcher) !=
+ mlx5hws_matcher_is_insert_by_idx(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matchers insert mode mismatch\n");
+ return -EINVAL;
+ }
+
+ if (mlx5hws_matcher_is_in_resize(src_matcher) ||
+ mlx5hws_matcher_is_in_resize(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matcher is already in resize\n");
+ return -EINVAL;
+ }
+
+ /* Compare match templates - make sure the definers are equivalent */
+ if (src_matcher->num_of_mt != dst_matcher->num_of_mt) {
+ mlx5hws_err(ctx, "Src/dst matcher match templates mismatch\n");
+ return -EINVAL;
+ }
+
+ if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes >
+ dst_matcher->action_ste[0].max_stes) {
+ mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < src_matcher->num_of_mt; i++) {
+ if (mlx5hws_definer_compare(src_matcher->mt[i].definer,
+ dst_matcher->mt[i].definer)) {
+ mlx5hws_err(ctx, "Src/dst matcher definers mismatch\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher)
+{
+ int ret = 0;
+
+ mutex_lock(&src_matcher->tbl->ctx->ctrl_lock);
+
+ ret = hws_matcher_resize_precheck(src_matcher, dst_matcher);
+ if (ret)
+ goto out;
+
+ src_matcher->resize_dst = dst_matcher;
+
+ ret = hws_matcher_resize_init(src_matcher);
+ if (ret)
+ src_matcher->resize_dst = NULL;
+
+out:
+ mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock);
+ return ret;
+}
+
+int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
+
+ if (unlikely(!mlx5hws_matcher_is_in_resize(src_matcher))) {
+ mlx5hws_err(ctx, "Matcher is not resizable or not in resize\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(src_matcher != rule->matcher)) {
+ mlx5hws_err(ctx, "Rule doesn't belong to src matcher\n");
+ return -EINVAL;
+ }
+
+ return mlx5hws_rule_move_hws_add(rule, attr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h
new file mode 100644
index 000000000000..125391d1a114
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_MATCHER_H_
+#define MLX5HWS_MATCHER_H_
+
+/* We calculated that concatenating a collision table to the main table with
+ * 3% of the main table rows will be enough resources for high insertion
+ * success probability.
+ *
+ * The calculation: log2(2^x * 3 / 100) = log2(2^x) + log2(3/100) = x - 5.05 ~ 5
+ */
+#define MLX5HWS_MATCHER_ASSURED_ROW_RATIO 5
+/* Threshold to determine if amount of rules require a collision table */
+#define MLX5HWS_MATCHER_ASSURED_RULES_TH 10
+/* Required depth of an assured collision table */
+#define MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH 4
+/* Required depth of the main large table */
+#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
+
+enum mlx5hws_matcher_offset {
+ MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
+ MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
+};
+
+enum mlx5hws_matcher_flags {
+ MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2,
+ MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3,
+};
+
+struct mlx5hws_match_template {
+ struct mlx5hws_definer *definer;
+ struct mlx5hws_definer_fc *fc;
+ u32 *match_param;
+ u8 match_criteria_enable;
+ u16 fc_sz;
+};
+
+struct mlx5hws_matcher_match_ste {
+ struct mlx5hws_pool_chunk ste;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct mlx5hws_pool *pool;
+};
+
+struct mlx5hws_matcher_action_ste {
+ struct mlx5hws_pool_chunk ste;
+ struct mlx5hws_pool_chunk stc;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct mlx5hws_pool *pool;
+ u8 max_stes;
+};
+
+struct mlx5hws_matcher_resize_data_node {
+ struct mlx5hws_pool_chunk stc;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct mlx5hws_pool *pool;
+};
+
+struct mlx5hws_matcher_resize_data {
+ struct mlx5hws_matcher_resize_data_node action_ste[2];
+ u8 max_stes;
+ struct list_head list_node;
+};
+
+struct mlx5hws_matcher {
+ struct mlx5hws_table *tbl;
+ struct mlx5hws_matcher_attr attr;
+ struct mlx5hws_match_template *mt;
+ struct mlx5hws_action_template *at;
+ u8 num_of_at;
+ u8 num_of_mt;
+ /* enum mlx5hws_matcher_flags */
+ u8 flags;
+ u32 end_ft_id;
+ struct mlx5hws_matcher *col_matcher;
+ struct mlx5hws_matcher *resize_dst;
+ struct mlx5hws_matcher_match_ste match_ste;
+ struct mlx5hws_matcher_action_ste action_ste[2];
+ struct list_head list_node;
+ struct list_head resize_data;
+};
+
+static inline bool
+mlx5hws_matcher_mt_is_jumbo(struct mlx5hws_match_template *mt)
+{
+ return mlx5hws_definer_is_jumbo(mt->definer);
+}
+
+static inline bool mlx5hws_matcher_is_resizable(struct mlx5hws_matcher *matcher)
+{
+ return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_RESIZABLE);
+}
+
+static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher)
+{
+ return !!matcher->resize_dst;
+}
+
+static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher)
+{
+ return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX;
+}
+
+#endif /* MLX5HWS_MATCHER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c
new file mode 100644
index 000000000000..e084a5cbf81f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)
+{
+ /* Return the roundup of log2(data_size) */
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE)
+ return MLX5HWS_ARG_CHUNK_SIZE_1;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 2)
+ return MLX5HWS_ARG_CHUNK_SIZE_2;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 4)
+ return MLX5HWS_ARG_CHUNK_SIZE_3;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 8)
+ return MLX5HWS_ARG_CHUNK_SIZE_4;
+
+ return MLX5HWS_ARG_CHUNK_SIZE_MAX;
+}
+
+u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size)
+{
+ return BIT(mlx5hws_arg_data_size_to_arg_log_size(data_size));
+}
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_get_arg_log_size(u16 num_of_actions)
+{
+ return mlx5hws_arg_data_size_to_arg_log_size(num_of_actions *
+ MLX5HWS_MODIFY_ACTION_SIZE);
+}
+
+u32 mlx5hws_arg_get_arg_size(u16 num_of_actions)
+{
+ return BIT(mlx5hws_arg_get_arg_log_size(num_of_actions));
+}
+
+bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions)
+{
+ u16 i, field;
+ u8 action_id;
+
+ for (i = 0; i < num_of_actions; i++) {
+ action_id = MLX5_GET(set_action_in, &actions[i], action_type);
+
+ switch (action_id) {
+ case MLX5_MODIFICATION_TYPE_NOP:
+ field = MLX5_MODI_OUT_NONE;
+ break;
+
+ case MLX5_MODIFICATION_TYPE_SET:
+ case MLX5_MODIFICATION_TYPE_ADD:
+ field = MLX5_GET(set_action_in, &actions[i], field);
+ break;
+
+ case MLX5_MODIFICATION_TYPE_COPY:
+ case MLX5_MODIFICATION_TYPE_ADD_FIELD:
+ field = MLX5_GET(copy_action_in, &actions[i], dst_field);
+ break;
+
+ default:
+ /* Insert/Remove/Unknown actions require reparse */
+ return true;
+ }
+
+ /* Below fields can change packet structure require a reparse */
+ if (field == MLX5_MODI_OUT_ETHERTYPE ||
+ field == MLX5_MODI_OUT_IPV6_NEXT_HDR)
+ return true;
+ }
+
+ return false;
+}
+
+/* Cache and cache element handling */
+int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache)
+{
+ struct mlx5hws_pattern_cache *new_cache;
+
+ new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
+ if (!new_cache)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&new_cache->ptrn_list);
+ mutex_init(&new_cache->lock);
+
+ *cache = new_cache;
+
+ return 0;
+}
+
+void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache)
+{
+ mutex_destroy(&cache->lock);
+ kfree(cache);
+}
+
+static bool mlx5hws_pat_compare_pattern(int cur_num_of_actions,
+ __be64 cur_actions[],
+ int num_of_actions,
+ __be64 actions[])
+{
+ int i;
+
+ if (cur_num_of_actions != num_of_actions)
+ return false;
+
+ for (i = 0; i < num_of_actions; i++) {
+ u8 action_id =
+ MLX5_GET(set_action_in, &actions[i], action_type);
+
+ if (action_id == MLX5_MODIFICATION_TYPE_COPY ||
+ action_id == MLX5_MODIFICATION_TYPE_ADD_FIELD) {
+ if (actions[i] != cur_actions[i])
+ return false;
+ } else {
+ /* Compare just the control, not the values */
+ if ((__force __be32)actions[i] !=
+ (__force __be32)cur_actions[i])
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache *cache,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pat = NULL;
+
+ list_for_each_entry(cached_pat, &cache->ptrn_list, ptrn_list_node) {
+ if (mlx5hws_pat_compare_pattern(cached_pat->mh_data.num_of_actions,
+ (__be64 *)cached_pat->mh_data.data,
+ num_of_actions,
+ actions))
+ return cached_pat;
+ }
+
+ return NULL;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
+ if (cached_pattern) {
+ /* LRU: move it to be first in the list */
+ list_del_init(&cached_pattern->ptrn_list_node);
+ list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ cached_pattern->refcount++;
+ }
+
+ return cached_pattern;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache *cache,
+ u32 pattern_id,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ cached_pattern = kzalloc(sizeof(*cached_pattern), GFP_KERNEL);
+ if (!cached_pattern)
+ return NULL;
+
+ cached_pattern->mh_data.num_of_actions = num_of_actions;
+ cached_pattern->mh_data.pattern_id = pattern_id;
+ cached_pattern->mh_data.data =
+ kmemdup(actions, num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
+ if (!cached_pattern->mh_data.data)
+ goto free_cached_obj;
+
+ list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ cached_pattern->refcount = 1;
+
+ return cached_pattern;
+
+free_cached_obj:
+ kfree(cached_pattern);
+ return NULL;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache *cache,
+ u32 ptrn_id)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern = NULL;
+
+ list_for_each_entry(cached_pattern, &cache->ptrn_list, ptrn_list_node) {
+ if (cached_pattern->mh_data.pattern_id == ptrn_id)
+ return cached_pattern;
+ }
+
+ return NULL;
+}
+
+static void
+mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item *cached_pattern)
+{
+ list_del_init(&cached_pattern->ptrn_list_node);
+
+ kfree(cached_pattern->mh_data.data);
+ kfree(cached_pattern);
+}
+
+void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, u32 ptrn_id)
+{
+ struct mlx5hws_pattern_cache *cache = ctx->pattern_cache;
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ mutex_lock(&cache->lock);
+ cached_pattern = mlx5hws_pat_find_cached_pattern_by_id(cache, ptrn_id);
+ if (!cached_pattern) {
+ mlx5hws_err(ctx, "Failed to find cached pattern with provided ID\n");
+ pr_warn("HWS: pattern ID %d is not found\n", ptrn_id);
+ goto out;
+ }
+
+ if (--cached_pattern->refcount)
+ goto out;
+
+ mlx5hws_pat_remove_pattern(cached_pattern);
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
+
+out:
+ mutex_unlock(&cache->lock);
+}
+
+int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
+ __be64 *pattern, size_t pattern_sz,
+ u32 *pattern_id)
+{
+ u16 num_of_actions = pattern_sz / MLX5HWS_MODIFY_ACTION_SIZE;
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+ u32 ptrn_id = 0;
+ int ret = 0;
+
+ mutex_lock(&ctx->pattern_cache->lock);
+
+ cached_pattern = mlx5hws_pat_get_existing_cached_pattern(ctx->pattern_cache,
+ num_of_actions,
+ pattern);
+ if (cached_pattern) {
+ *pattern_id = cached_pattern->mh_data.pattern_id;
+ goto out_unlock;
+ }
+
+ ret = mlx5hws_cmd_header_modify_pattern_create(ctx->mdev,
+ pattern_sz,
+ (u8 *)pattern,
+ &ptrn_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create pattern FW object\n");
+ goto out_unlock;
+ }
+
+ cached_pattern = mlx5hws_pat_add_pattern_to_cache(ctx->pattern_cache,
+ ptrn_id,
+ num_of_actions,
+ pattern);
+ if (!cached_pattern) {
+ mlx5hws_err(ctx, "Failed to add pattern to cache\n");
+ ret = -EINVAL;
+ goto clean_pattern;
+ }
+
+ mutex_unlock(&ctx->pattern_cache->lock);
+ *pattern_id = ptrn_id;
+
+ return ret;
+
+clean_pattern:
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id);
+out_unlock:
+ mutex_unlock(&ctx->pattern_cache->lock);
+ return ret;
+}
+
+static void
+mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr *send_attr,
+ void *comp_data,
+ u32 arg_idx)
+{
+ send_attr->opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ send_attr->opmod = MLX5HWS_WQE_GTA_OPMOD_MOD_ARG;
+ send_attr->len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ send_attr->id = arg_idx;
+ send_attr->user_data = comp_data;
+}
+
+void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg = NULL;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl = NULL;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ size_t wqe_len;
+
+ mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx);
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ mlx5hws_action_prepare_decap_l3_data(arg_data, (u8 *)wqe_arg,
+ num_of_actions);
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+}
+
+void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
+ void *comp_data,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ int i, full_iter, leftover;
+ size_t wqe_len;
+
+ mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx);
+
+ /* Each WQE can hold 64B of data, it might require multiple iteration */
+ full_iter = data_size / MLX5HWS_ARG_DATA_SIZE;
+ leftover = data_size & (MLX5HWS_ARG_DATA_SIZE - 1);
+
+ for (i = 0; i < full_iter; i++) {
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ memcpy(wqe_arg, arg_data, wqe_len);
+ send_attr.id = arg_idx++;
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+
+ /* Move to next argument data */
+ arg_data += MLX5HWS_ARG_DATA_SIZE;
+ }
+
+ if (leftover) {
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ memcpy(wqe_arg, arg_data, leftover);
+ send_attr.id = arg_idx;
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+ }
+}
+
+int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size)
+{
+ struct mlx5hws_send_engine *queue;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Get the control queue */
+ queue = &ctx->send_queue[ctx->queues - 1];
+
+ mlx5hws_arg_write(queue, arg_data, arg_idx, arg_data, data_size);
+
+ mlx5hws_send_engine_flush_queue(queue);
+
+ /* Poll for completion */
+ ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
+ if (ret)
+ mlx5hws_err(ctx, "Failed to drain arg queue\n");
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return ret;
+}
+
+bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
+ u32 arg_size)
+{
+ if (arg_size < ctx->caps->log_header_modify_argument_granularity ||
+ arg_size > ctx->caps->log_header_modify_argument_max_alloc) {
+ return false;
+ }
+ return true;
+}
+
+int mlx5hws_arg_create(struct mlx5hws_context *ctx,
+ u8 *data,
+ size_t data_sz,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id)
+{
+ u16 single_arg_log_sz;
+ u16 multi_arg_log_sz;
+ int ret;
+ u32 id;
+
+ single_arg_log_sz = mlx5hws_arg_data_size_to_arg_log_size(data_sz);
+ multi_arg_log_sz = single_arg_log_sz + log_bulk_sz;
+
+ if (single_arg_log_sz >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
+ mlx5hws_err(ctx, "Requested single arg %u not supported\n", single_arg_log_sz);
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5hws_arg_is_valid_arg_request_size(ctx, multi_arg_log_sz)) {
+ mlx5hws_err(ctx, "Argument log size %d not supported by FW\n", multi_arg_log_sz);
+ return -EOPNOTSUPP;
+ }
+
+ /* Alloc bulk of args */
+ ret = mlx5hws_cmd_arg_create(ctx->mdev, multi_arg_log_sz, ctx->pd_num, &id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed allocating arg in order: %d\n", multi_arg_log_sz);
+ return ret;
+ }
+
+ if (write_data) {
+ ret = mlx5hws_arg_write_inline_arg_data(ctx, id,
+ data, data_sz);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed writing arg data\n");
+ mlx5hws_cmd_arg_destroy(ctx->mdev, id);
+ return ret;
+ }
+ }
+
+ *arg_id = id;
+ return ret;
+}
+
+void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id)
+{
+ mlx5hws_cmd_arg_destroy(ctx->mdev, arg_id);
+}
+
+int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
+ __be64 *data,
+ u8 num_of_actions,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id)
+{
+ size_t data_sz = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
+ int ret;
+
+ ret = mlx5hws_arg_create(ctx,
+ (u8 *)data,
+ data_sz,
+ log_bulk_sz,
+ write_data,
+ arg_id);
+ if (ret)
+ mlx5hws_err(ctx, "Failed creating modify header arg\n");
+
+ return ret;
+}
+
+static int
+hws_action_modify_check_field_limitation(u8 action_type, __be64 *pattern)
+{
+ /* Need to check field limitation here, but for now - return OK */
+ return 0;
+}
+
+#define INVALID_FIELD 0xffff
+
+static void
+hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
+ u16 *src_field, u16 *dst_field)
+{
+ switch (action_type) {
+ case MLX5_ACTION_TYPE_SET:
+ case MLX5_ACTION_TYPE_ADD:
+ *src_field = MLX5_GET(set_action_in, pattern, field);
+ *dst_field = INVALID_FIELD;
+ break;
+ case MLX5_ACTION_TYPE_COPY:
+ *src_field = MLX5_GET(copy_action_in, pattern, src_field);
+ *dst_field = MLX5_GET(copy_action_in, pattern, dst_field);
+ break;
+ default:
+ pr_warn("HWS: invalid modify header action type %d\n", action_type);
+ }
+}
+
+bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz)
+{
+ size_t i;
+
+ for (i = 0; i < sz / MLX5HWS_MODIFY_ACTION_SIZE; i++) {
+ u8 action_type =
+ MLX5_GET(set_action_in, &pattern[i], action_type);
+ if (action_type >= MLX5_MODIFICATION_TYPE_MAX) {
+ mlx5hws_err(ctx, "Unsupported action id %d\n", action_type);
+ return false;
+ }
+ if (hws_action_modify_check_field_limitation(action_type, &pattern[i])) {
+ mlx5hws_err(ctx, "Unsupported action number %zu\n", i);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nope_location, __be64 *new_pat)
+{
+ u16 prev_src_field = 0, prev_dst_field = 0;
+ u16 src_field, dst_field;
+ u8 action_type;
+ size_t i, j;
+
+ *new_size = num_actions;
+ *nope_location = 0;
+
+ if (num_actions == 1)
+ return;
+
+ for (i = 0, j = 0; i < num_actions; i++, j++) {
+ action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
+
+ hws_action_modify_get_target_fields(action_type, &pattern[i],
+ &src_field, &dst_field);
+ if (i % 2) {
+ if (action_type == MLX5_ACTION_TYPE_COPY &&
+ (prev_src_field == src_field ||
+ prev_dst_field == dst_field)) {
+ /* need Nope */
+ *new_size += 1;
+ *nope_location |= BIT(i);
+ memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
+ MLX5_SET(set_action_in, &new_pat[j],
+ action_type,
+ MLX5_MODIFICATION_TYPE_NOP);
+ j++;
+ } else if (prev_src_field == src_field) {
+ /* need Nope*/
+ *new_size += 1;
+ *nope_location |= BIT(i);
+ MLX5_SET(set_action_in, &new_pat[j],
+ action_type,
+ MLX5_MODIFICATION_TYPE_NOP);
+ j++;
+ }
+ }
+ memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
+ /* check if no more space */
+ if (j > max_actions) {
+ *new_size = num_actions;
+ *nope_location = 0;
+ return;
+ }
+
+ prev_src_field = src_field;
+ prev_dst_field = dst_field;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h
new file mode 100644
index 000000000000..27ca93385b08
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_PAT_ARG_H_
+#define MLX5HWS_PAT_ARG_H_
+
+/* Modify-header arg pool */
+enum mlx5hws_arg_chunk_size {
+ MLX5HWS_ARG_CHUNK_SIZE_1,
+ /* Keep MIN updated when changing */
+ MLX5HWS_ARG_CHUNK_SIZE_MIN = MLX5HWS_ARG_CHUNK_SIZE_1,
+ MLX5HWS_ARG_CHUNK_SIZE_2,
+ MLX5HWS_ARG_CHUNK_SIZE_3,
+ MLX5HWS_ARG_CHUNK_SIZE_4,
+ MLX5HWS_ARG_CHUNK_SIZE_MAX,
+};
+
+enum {
+ MLX5HWS_MODIFY_ACTION_SIZE = 8,
+ MLX5HWS_ARG_DATA_SIZE = 64,
+};
+
+struct mlx5hws_pattern_cache {
+ struct mutex lock; /* Protect pattern list */
+ struct list_head ptrn_list;
+};
+
+struct mlx5hws_pattern_cache_item {
+ struct {
+ u32 pattern_id;
+ u8 *data;
+ u16 num_of_actions;
+ } mh_data;
+ u32 refcount;
+ struct list_head ptrn_list_node;
+};
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_get_arg_log_size(u16 num_of_actions);
+
+u32 mlx5hws_arg_get_arg_size(u16 num_of_actions);
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_data_size_to_arg_log_size(u16 data_size);
+
+u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size);
+
+int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache);
+
+void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache);
+
+bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz);
+
+int mlx5hws_arg_create(struct mlx5hws_context *ctx,
+ u8 *data,
+ size_t data_sz,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id);
+
+void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id);
+
+int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
+ __be64 *data,
+ u8 num_of_actions,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *modify_hdr_arg_id);
+
+int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
+ __be64 *pattern,
+ size_t pattern_sz,
+ u32 *ptrn_id);
+
+void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx,
+ u32 ptrn_id);
+
+bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
+ u32 arg_size);
+
+bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions);
+
+void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
+ void *comp_data,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size);
+
+void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions);
+
+int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size);
+
+void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions,
+ size_t *new_size, u32 *nope_location, __be64 *new_pat);
+#endif /* MLX5HWS_PAT_ARG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c
new file mode 100644
index 000000000000..a8a63e3278be
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+#include "mlx5hws_buddy.h"
+
+static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource)
+{
+ switch (resource->pool->type) {
+ case MLX5HWS_POOL_TYPE_STE:
+ mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id);
+ break;
+ case MLX5HWS_POOL_TYPE_STC:
+ mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id);
+ break;
+ default:
+ break;
+ }
+
+ kfree(resource);
+}
+
+static void hws_pool_resource_free(struct mlx5hws_pool *pool,
+ int resource_idx)
+{
+ hws_pool_free_one_resource(pool->resource[resource_idx]);
+ pool->resource[resource_idx] = NULL;
+
+ if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+ hws_pool_free_one_resource(pool->mirror_resource[resource_idx]);
+ pool->mirror_resource[resource_idx] = NULL;
+ }
+}
+
+static struct mlx5hws_pool_resource *
+hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range,
+ u32 fw_ft_type)
+{
+ struct mlx5hws_cmd_ste_create_attr ste_attr;
+ struct mlx5hws_cmd_stc_create_attr stc_attr;
+ struct mlx5hws_pool_resource *resource;
+ u32 obj_id = 0;
+ int ret;
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
+ return NULL;
+
+ switch (pool->type) {
+ case MLX5HWS_POOL_TYPE_STE:
+ ste_attr.log_obj_range = log_range;
+ ste_attr.table_type = fw_ft_type;
+ ret = mlx5hws_cmd_ste_create(pool->ctx->mdev, &ste_attr, &obj_id);
+ break;
+ case MLX5HWS_POOL_TYPE_STC:
+ stc_attr.log_obj_range = log_range;
+ stc_attr.table_type = fw_ft_type;
+ ret = mlx5hws_cmd_stc_create(pool->ctx->mdev, &stc_attr, &obj_id);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to allocate resource objects\n");
+ goto free_resource;
+ }
+
+ resource->pool = pool;
+ resource->range = 1 << log_range;
+ resource->base_id = obj_id;
+
+ return resource;
+
+free_resource:
+ kfree(resource);
+ return NULL;
+}
+
+static int
+hws_pool_resource_alloc(struct mlx5hws_pool *pool, u32 log_range, int idx)
+{
+ struct mlx5hws_pool_resource *resource;
+ u32 fw_ft_type, opt_log_range;
+
+ fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false);
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_range;
+ resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+ if (!resource) {
+ mlx5hws_err(pool->ctx, "Failed allocating resource\n");
+ return -EINVAL;
+ }
+
+ pool->resource[idx] = resource;
+
+ if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+ struct mlx5hws_pool_resource *mirror_resource;
+
+ fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true);
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_range;
+ mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+ if (!mirror_resource) {
+ mlx5hws_err(pool->ctx, "Failed allocating mirrored resource\n");
+ hws_pool_free_one_resource(resource);
+ pool->resource[idx] = NULL;
+ return -EINVAL;
+ }
+ pool->mirror_resource[idx] = mirror_resource;
+ }
+
+ return 0;
+}
+
+static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
+{
+ unsigned long *cur_bmp;
+
+ cur_bmp = bitmap_zalloc(1 << log_range, GFP_KERNEL);
+ if (!cur_bmp)
+ return NULL;
+
+ bitmap_fill(cur_bmp, 1 << log_range);
+
+ return cur_bmp;
+}
+
+static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = pool->db.buddy_manager->buddies[chunk->resource_idx];
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx);
+ return;
+ }
+
+ mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
+}
+
+static struct mlx5hws_buddy_mem *
+hws_pool_buddy_get_next_buddy(struct mlx5hws_pool *pool, int idx,
+ u32 order, bool *is_new_buddy)
+{
+ static struct mlx5hws_buddy_mem *buddy;
+ u32 new_buddy_size;
+
+ buddy = pool->db.buddy_manager->buddies[idx];
+ if (buddy)
+ return buddy;
+
+ new_buddy_size = max(pool->alloc_log_sz, order);
+ *is_new_buddy = true;
+ buddy = mlx5hws_buddy_create(new_buddy_size);
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Failed to create buddy order: %d index: %d\n",
+ new_buddy_size, idx);
+ return NULL;
+ }
+
+ if (hws_pool_resource_alloc(pool, new_buddy_size, idx) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+ pool->type, new_buddy_size, idx);
+ mlx5hws_buddy_cleanup(buddy);
+ return NULL;
+ }
+
+ pool->db.buddy_manager->buddies[idx] = buddy;
+
+ return buddy;
+}
+
+static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
+ int order,
+ u32 *buddy_idx,
+ int *seg)
+{
+ struct mlx5hws_buddy_mem *buddy;
+ bool new_mem = false;
+ int ret = 0;
+ int i;
+
+ *seg = -1;
+
+ /* Find the next free place from the buddy array */
+ while (*seg == -1) {
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ buddy = hws_pool_buddy_get_next_buddy(pool, i,
+ order,
+ &new_mem);
+ if (!buddy) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ *seg = mlx5hws_buddy_alloc_mem(buddy, order);
+ if (*seg != -1)
+ goto found;
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) {
+ mlx5hws_err(pool->ctx,
+ "Fail to allocate seg for one resource pool\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (new_mem) {
+ /* We have new memory pool, should be place for us */
+ mlx5hws_err(pool->ctx,
+ "No memory for order: %d with buddy no: %d\n",
+ order, i);
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+found:
+ *buddy_idx = i;
+out:
+ return ret;
+}
+
+static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret = 0;
+
+ /* Go over the buddies and find next free slot */
+ ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order,
+ &chunk->resource_idx,
+ &chunk->offset);
+ if (ret)
+ mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+ chunk->order);
+
+ return ret;
+}
+
+static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_buddy_mem *buddy;
+ int i;
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ buddy = pool->db.buddy_manager->buddies[i];
+ if (buddy) {
+ mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
+ pool->db.buddy_manager->buddies[i] = NULL;
+ }
+ }
+
+ kfree(pool->db.buddy_manager);
+}
+
+static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range)
+{
+ pool->db.buddy_manager = kzalloc(sizeof(*pool->db.buddy_manager), GFP_KERNEL);
+ if (!pool->db.buddy_manager)
+ return -ENOMEM;
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE) {
+ bool new_buddy;
+
+ if (!hws_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) {
+ mlx5hws_err(pool->ctx,
+ "Failed allocating memory on create log_sz: %d\n", log_range);
+ kfree(pool->db.buddy_manager);
+ return -ENOMEM;
+ }
+ }
+
+ pool->p_db_uninit = &hws_pool_buddy_db_uninit;
+ pool->p_get_chunk = &hws_pool_buddy_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_buddy_db_put_chunk;
+
+ return 0;
+}
+
+static int hws_pool_create_resource_on_index(struct mlx5hws_pool *pool,
+ u32 alloc_size, int idx)
+{
+ int ret = hws_pool_resource_alloc(pool, alloc_size, idx);
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+ pool->type, alloc_size, idx);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct mlx5hws_pool_elements *
+hws_pool_element_create_new_elem(struct mlx5hws_pool *pool, u32 order, int idx)
+{
+ struct mlx5hws_pool_elements *elem;
+ u32 alloc_size;
+
+ alloc_size = pool->alloc_log_sz;
+
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+ if (!elem)
+ return NULL;
+
+ /* Sharing the same resource, also means that all the elements are with size 1 */
+ if ((pool->flags & MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS) &&
+ !(pool->flags & MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) {
+ /* Currently all chunks in size 1 */
+ elem->bitmap = hws_pool_create_and_init_bitmap(alloc_size - order);
+ if (!elem->bitmap) {
+ mlx5hws_err(pool->ctx,
+ "Failed to create bitmap type: %d: size %d index: %d\n",
+ pool->type, alloc_size, idx);
+ goto free_elem;
+ }
+
+ elem->log_size = alloc_size - order;
+ }
+
+ if (hws_pool_create_resource_on_index(pool, alloc_size, idx)) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+ pool->type, alloc_size, idx);
+ goto free_db;
+ }
+
+ pool->db.element_manager->elements[idx] = elem;
+
+ return elem;
+
+free_db:
+ bitmap_free(elem->bitmap);
+free_elem:
+ kfree(elem);
+ return NULL;
+}
+
+static int hws_pool_element_find_seg(struct mlx5hws_pool_elements *elem, int *seg)
+{
+ unsigned int segment, size;
+
+ size = 1 << elem->log_size;
+
+ segment = find_first_bit(elem->bitmap, size);
+ if (segment >= size) {
+ elem->is_full = true;
+ return -ENOMEM;
+ }
+
+ bitmap_clear(elem->bitmap, segment, 1);
+ *seg = segment;
+ return 0;
+}
+
+static int
+hws_pool_onesize_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
+ u32 *idx, int *seg)
+{
+ struct mlx5hws_pool_elements *elem;
+
+ elem = pool->db.element_manager->elements[0];
+ if (!elem)
+ elem = hws_pool_element_create_new_elem(pool, order, 0);
+ if (!elem)
+ goto err_no_elem;
+
+ if (hws_pool_element_find_seg(elem, seg) != 0) {
+ mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
+ return -ENOMEM;
+ }
+
+ *idx = 0;
+ elem->num_of_elements++;
+ return 0;
+
+err_no_elem:
+ mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
+ return -ENOMEM;
+}
+
+static int
+hws_pool_general_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
+ u32 *idx, int *seg)
+{
+ int ret, i;
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ if (!pool->resource[i]) {
+ ret = hws_pool_create_resource_on_index(pool, order, i);
+ if (ret)
+ goto err_no_res;
+ *idx = i;
+ *seg = 0; /* One memory slot in that element */
+ return 0;
+ }
+ }
+
+ mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
+ return -ENOMEM;
+
+err_no_res:
+ mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
+ return -ENOMEM;
+}
+
+static int hws_pool_general_element_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret;
+
+ /* Go over all memory elements and find/allocate free slot */
+ ret = hws_pool_general_element_get_mem_chunk(pool, chunk->order,
+ &chunk->resource_idx,
+ &chunk->offset);
+ if (ret)
+ mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+ chunk->order);
+
+ return ret;
+}
+
+static void hws_pool_general_element_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ if (unlikely(!pool->resource[chunk->resource_idx]))
+ pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE)
+ hws_pool_resource_free(pool, chunk->resource_idx);
+}
+
+static void hws_pool_general_element_db_uninit(struct mlx5hws_pool *pool)
+{
+ (void)pool;
+}
+
+/* This memory management works as the following:
+ * - At start doesn't allocate no mem at all.
+ * - When new request for chunk arrived:
+ * allocate resource and give it.
+ * - When free that chunk:
+ * the resource is freed.
+ */
+static int hws_pool_general_element_db_init(struct mlx5hws_pool *pool)
+{
+ pool->p_db_uninit = &hws_pool_general_element_db_uninit;
+ pool->p_get_chunk = &hws_pool_general_element_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_general_element_db_put_chunk;
+
+ return 0;
+}
+
+static void hws_onesize_element_db_destroy_element(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_elements *elem,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ if (unlikely(!pool->resource[chunk->resource_idx]))
+ pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+ hws_pool_resource_free(pool, chunk->resource_idx);
+ kfree(elem);
+ pool->db.element_manager->elements[chunk->resource_idx] = NULL;
+}
+
+static void hws_onesize_element_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ struct mlx5hws_pool_elements *elem;
+
+ if (unlikely(chunk->resource_idx))
+ pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+ elem = pool->db.element_manager->elements[chunk->resource_idx];
+ if (!elem) {
+ mlx5hws_err(pool->ctx, "No such element (%d)\n", chunk->resource_idx);
+ return;
+ }
+
+ bitmap_set(elem->bitmap, chunk->offset, 1);
+ elem->is_full = false;
+ elem->num_of_elements--;
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE &&
+ !elem->num_of_elements)
+ hws_onesize_element_db_destroy_element(pool, elem, chunk);
+}
+
+static int hws_onesize_element_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret = 0;
+
+ /* Go over all memory elements and find/allocate free slot */
+ ret = hws_pool_onesize_element_get_mem_chunk(pool, chunk->order,
+ &chunk->resource_idx,
+ &chunk->offset);
+ if (ret)
+ mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+ chunk->order);
+
+ return ret;
+}
+
+static void hws_onesize_element_db_uninit(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_pool_elements *elem;
+ int i;
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ elem = pool->db.element_manager->elements[i];
+ if (elem) {
+ bitmap_free(elem->bitmap);
+ kfree(elem);
+ pool->db.element_manager->elements[i] = NULL;
+ }
+ }
+ kfree(pool->db.element_manager);
+}
+
+/* This memory management works as the following:
+ * - At start doesn't allocate no mem at all.
+ * - When new request for chunk arrived:
+ * aloocate the first and only slot of memory/resource
+ * when it ended return error.
+ */
+static int hws_pool_onesize_element_db_init(struct mlx5hws_pool *pool)
+{
+ pool->db.element_manager = kzalloc(sizeof(*pool->db.element_manager), GFP_KERNEL);
+ if (!pool->db.element_manager)
+ return -ENOMEM;
+
+ pool->p_db_uninit = &hws_onesize_element_db_uninit;
+ pool->p_get_chunk = &hws_onesize_element_db_get_chunk;
+ pool->p_put_chunk = &hws_onesize_element_db_put_chunk;
+
+ return 0;
+}
+
+static int hws_pool_db_init(struct mlx5hws_pool *pool,
+ enum mlx5hws_db_type db_type)
+{
+ int ret;
+
+ if (db_type == MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE)
+ ret = hws_pool_general_element_db_init(pool);
+ else if (db_type == MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE)
+ ret = hws_pool_onesize_element_db_init(pool);
+ else
+ ret = hws_pool_buddy_db_init(pool, pool->alloc_log_sz);
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to init general db : %d (ret: %d)\n", db_type, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hws_pool_db_unint(struct mlx5hws_pool *pool)
+{
+ pool->p_db_uninit(pool);
+}
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->p_get_chunk(pool, chunk);
+ mutex_unlock(&pool->lock);
+
+ return ret;
+}
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ mutex_lock(&pool->lock);
+ pool->p_put_chunk(pool, chunk);
+ mutex_unlock(&pool->lock);
+}
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_attr)
+{
+ enum mlx5hws_db_type res_db_type;
+ struct mlx5hws_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->ctx = ctx;
+ pool->type = pool_attr->pool_type;
+ pool->alloc_log_sz = pool_attr->alloc_log_sz;
+ pool->flags = pool_attr->flags;
+ pool->tbl_type = pool_attr->table_type;
+ pool->opt_type = pool_attr->opt_type;
+
+ /* Support general db */
+ if (pool->flags == (MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK))
+ res_db_type = MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE;
+ else if (pool->flags == (MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS))
+ res_db_type = MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE;
+ else
+ res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY;
+
+ pool->alloc_log_sz = pool_attr->alloc_log_sz;
+
+ if (hws_pool_db_init(pool, res_db_type))
+ goto free_pool;
+
+ mutex_init(&pool->lock);
+
+ return pool;
+
+free_pool:
+ kfree(pool);
+ return NULL;
+}
+
+int mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
+{
+ int i;
+
+ mutex_destroy(&pool->lock);
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++)
+ if (pool->resource[i])
+ hws_pool_resource_free(pool, i);
+
+ hws_pool_db_unint(pool);
+
+ kfree(pool);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h
new file mode 100644
index 000000000000..621298b352b2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_POOL_H_
+#define MLX5HWS_POOL_H_
+
+#define MLX5HWS_POOL_STC_LOG_SZ 15
+
+#define MLX5HWS_POOL_RESOURCE_ARR_SZ 100
+
+enum mlx5hws_pool_type {
+ MLX5HWS_POOL_TYPE_STE,
+ MLX5HWS_POOL_TYPE_STC,
+};
+
+struct mlx5hws_pool_chunk {
+ u32 resource_idx;
+ /* Internal offset, relative to base index */
+ int offset;
+ int order;
+};
+
+struct mlx5hws_pool_resource {
+ struct mlx5hws_pool *pool;
+ u32 base_id;
+ u32 range;
+};
+
+enum mlx5hws_pool_flags {
+ /* Only a one resource in that pool */
+ MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0,
+ MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1,
+ /* No sharing resources between chunks */
+ MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2,
+ /* All objects are in the same size */
+ MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3,
+ /* Managed by buddy allocator */
+ MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4,
+ /* Allocate pool_type memory on pool creation */
+ MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5,
+
+ /* These values should be used by the caller */
+ MLX5HWS_POOL_FLAGS_FOR_STC_POOL =
+ MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS,
+ MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL =
+ MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK,
+ MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL =
+ MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_BUDDY_MANAGED |
+ MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE,
+};
+
+enum mlx5hws_pool_optimize {
+ MLX5HWS_POOL_OPTIMIZE_NONE = 0x0,
+ MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1,
+ MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2,
+};
+
+struct mlx5hws_pool_attr {
+ enum mlx5hws_pool_type pool_type;
+ enum mlx5hws_table_type table_type;
+ enum mlx5hws_pool_flags flags;
+ enum mlx5hws_pool_optimize opt_type;
+ /* Allocation size once memory is depleted */
+ size_t alloc_log_sz;
+};
+
+enum mlx5hws_db_type {
+ /* Uses for allocating chunk of big memory, each element has its own resource in the FW*/
+ MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE,
+ /* One resource only, all the elements are with same one size */
+ MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE,
+ /* Many resources, the memory allocated with buddy mechanism */
+ MLX5HWS_POOL_DB_TYPE_BUDDY,
+};
+
+struct mlx5hws_buddy_manager {
+ struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+};
+
+struct mlx5hws_pool_elements {
+ u32 num_of_elements;
+ unsigned long *bitmap;
+ u32 log_size;
+ bool is_full;
+};
+
+struct mlx5hws_element_manager {
+ struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+};
+
+struct mlx5hws_pool_db {
+ enum mlx5hws_db_type type;
+ union {
+ struct mlx5hws_element_manager *element_manager;
+ struct mlx5hws_buddy_manager *buddy_manager;
+ };
+};
+
+typedef int (*mlx5hws_pool_db_get_chunk)(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_db_put_chunk)(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_unint_db)(struct mlx5hws_pool *pool);
+
+struct mlx5hws_pool {
+ struct mlx5hws_context *ctx;
+ enum mlx5hws_pool_type type;
+ enum mlx5hws_pool_flags flags;
+ struct mutex lock; /* protect the pool */
+ size_t alloc_log_sz;
+ enum mlx5hws_table_type tbl_type;
+ enum mlx5hws_pool_optimize opt_type;
+ struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+ struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+ /* DB */
+ struct mlx5hws_pool_db db;
+ /* Functions */
+ mlx5hws_pool_unint_db p_db_uninit;
+ mlx5hws_pool_db_get_chunk p_get_chunk;
+ mlx5hws_pool_db_put_chunk p_put_chunk;
+};
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_pool_attr *pool_attr);
+
+int mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+
+static inline u32
+mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ return pool->resource[chunk->resource_idx]->base_id;
+}
+
+static inline u32
+mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ return pool->mirror_resource[chunk->resource_idx]->base_id;
+}
+#endif /* MLX5HWS_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h
new file mode 100644
index 000000000000..de92cecbeb92
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h
@@ -0,0 +1,514 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5_PRM_H_
+#define MLX5_PRM_H_
+
+#define MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY 512
+
+/* Action type of header modification. */
+enum {
+ MLX5_MODIFICATION_TYPE_SET = 0x1,
+ MLX5_MODIFICATION_TYPE_ADD = 0x2,
+ MLX5_MODIFICATION_TYPE_COPY = 0x3,
+ MLX5_MODIFICATION_TYPE_INSERT = 0x4,
+ MLX5_MODIFICATION_TYPE_REMOVE = 0x5,
+ MLX5_MODIFICATION_TYPE_NOP = 0x6,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS = 0x7,
+ MLX5_MODIFICATION_TYPE_ADD_FIELD = 0x8,
+ MLX5_MODIFICATION_TYPE_MAX,
+};
+
+/* The field of packet to be modified. */
+enum mlx5_modification_field {
+ MLX5_MODI_OUT_NONE = -1,
+ MLX5_MODI_OUT_SMAC_47_16 = 1,
+ MLX5_MODI_OUT_SMAC_15_0,
+ MLX5_MODI_OUT_ETHERTYPE,
+ MLX5_MODI_OUT_DMAC_47_16,
+ MLX5_MODI_OUT_DMAC_15_0,
+ MLX5_MODI_OUT_IP_DSCP,
+ MLX5_MODI_OUT_TCP_FLAGS,
+ MLX5_MODI_OUT_TCP_SPORT,
+ MLX5_MODI_OUT_TCP_DPORT,
+ MLX5_MODI_OUT_IPV4_TTL,
+ MLX5_MODI_OUT_UDP_SPORT,
+ MLX5_MODI_OUT_UDP_DPORT,
+ MLX5_MODI_OUT_SIPV6_127_96,
+ MLX5_MODI_OUT_SIPV6_95_64,
+ MLX5_MODI_OUT_SIPV6_63_32,
+ MLX5_MODI_OUT_SIPV6_31_0,
+ MLX5_MODI_OUT_DIPV6_127_96,
+ MLX5_MODI_OUT_DIPV6_95_64,
+ MLX5_MODI_OUT_DIPV6_63_32,
+ MLX5_MODI_OUT_DIPV6_31_0,
+ MLX5_MODI_OUT_SIPV4,
+ MLX5_MODI_OUT_DIPV4,
+ MLX5_MODI_OUT_FIRST_VID,
+ MLX5_MODI_IN_SMAC_47_16 = 0x31,
+ MLX5_MODI_IN_SMAC_15_0,
+ MLX5_MODI_IN_ETHERTYPE,
+ MLX5_MODI_IN_DMAC_47_16,
+ MLX5_MODI_IN_DMAC_15_0,
+ MLX5_MODI_IN_IP_DSCP,
+ MLX5_MODI_IN_TCP_FLAGS,
+ MLX5_MODI_IN_TCP_SPORT,
+ MLX5_MODI_IN_TCP_DPORT,
+ MLX5_MODI_IN_IPV4_TTL,
+ MLX5_MODI_IN_UDP_SPORT,
+ MLX5_MODI_IN_UDP_DPORT,
+ MLX5_MODI_IN_SIPV6_127_96,
+ MLX5_MODI_IN_SIPV6_95_64,
+ MLX5_MODI_IN_SIPV6_63_32,
+ MLX5_MODI_IN_SIPV6_31_0,
+ MLX5_MODI_IN_DIPV6_127_96,
+ MLX5_MODI_IN_DIPV6_95_64,
+ MLX5_MODI_IN_DIPV6_63_32,
+ MLX5_MODI_IN_DIPV6_31_0,
+ MLX5_MODI_IN_SIPV4,
+ MLX5_MODI_IN_DIPV4,
+ MLX5_MODI_OUT_IPV6_HOPLIMIT,
+ MLX5_MODI_IN_IPV6_HOPLIMIT,
+ MLX5_MODI_META_DATA_REG_A,
+ MLX5_MODI_META_DATA_REG_B = 0x50,
+ MLX5_MODI_META_REG_C_0,
+ MLX5_MODI_META_REG_C_1,
+ MLX5_MODI_META_REG_C_2,
+ MLX5_MODI_META_REG_C_3,
+ MLX5_MODI_META_REG_C_4,
+ MLX5_MODI_META_REG_C_5,
+ MLX5_MODI_META_REG_C_6,
+ MLX5_MODI_META_REG_C_7,
+ MLX5_MODI_OUT_TCP_SEQ_NUM,
+ MLX5_MODI_IN_TCP_SEQ_NUM,
+ MLX5_MODI_OUT_TCP_ACK_NUM,
+ MLX5_MODI_IN_TCP_ACK_NUM = 0x5C,
+ MLX5_MODI_GTP_TEID = 0x6E,
+ MLX5_MODI_OUT_IP_ECN = 0x73,
+ MLX5_MODI_TUNNEL_HDR_DW_1 = 0x75,
+ MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76,
+ MLX5_MODI_HASH_RESULT = 0x81,
+ MLX5_MODI_IN_MPLS_LABEL_0 = 0x8a,
+ MLX5_MODI_IN_MPLS_LABEL_1,
+ MLX5_MODI_IN_MPLS_LABEL_2,
+ MLX5_MODI_IN_MPLS_LABEL_3,
+ MLX5_MODI_IN_MPLS_LABEL_4,
+ MLX5_MODI_OUT_IP_PROTOCOL = 0x4A,
+ MLX5_MODI_OUT_IPV6_NEXT_HDR = 0x4A,
+ MLX5_MODI_META_REG_C_8 = 0x8F,
+ MLX5_MODI_META_REG_C_9 = 0x90,
+ MLX5_MODI_META_REG_C_10 = 0x91,
+ MLX5_MODI_META_REG_C_11 = 0x92,
+ MLX5_MODI_META_REG_C_12 = 0x93,
+ MLX5_MODI_META_REG_C_13 = 0x94,
+ MLX5_MODI_META_REG_C_14 = 0x95,
+ MLX5_MODI_META_REG_C_15 = 0x96,
+ MLX5_MODI_OUT_IPV4_TOTAL_LEN = 0x11D,
+ MLX5_MODI_OUT_IPV6_PAYLOAD_LEN = 0x11E,
+ MLX5_MODI_OUT_IPV4_IHL = 0x11F,
+ MLX5_MODI_OUT_TCP_DATA_OFFSET = 0x120,
+ MLX5_MODI_OUT_ESP_SPI = 0x5E,
+ MLX5_MODI_OUT_ESP_SEQ_NUM = 0x82,
+ MLX5_MODI_OUT_IPSEC_NEXT_HDR = 0x126,
+ MLX5_MODI_INVALID = INT_MAX,
+};
+
+enum {
+ MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE = 0x8 << 1,
+ MLX5_SET_HCA_CAP_OP_MOD_ESW = 0x9 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE = 0x1B << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1,
+};
+
+enum mlx5_ifc_rtc_update_mode {
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH = 0x0,
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET = 0x1,
+};
+
+enum mlx5_ifc_rtc_access_mode {
+ MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH = 0x0,
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR = 0x1,
+};
+
+enum mlx5_ifc_rtc_ste_format {
+ MLX5_IFC_RTC_STE_FORMAT_8DW = 0x4,
+ MLX5_IFC_RTC_STE_FORMAT_11DW = 0x5,
+ MLX5_IFC_RTC_STE_FORMAT_RANGE = 0x7,
+};
+
+enum mlx5_ifc_rtc_reparse_mode {
+ MLX5_IFC_RTC_REPARSE_NEVER = 0x0,
+ MLX5_IFC_RTC_REPARSE_ALWAYS = 0x1,
+ MLX5_IFC_RTC_REPARSE_BY_STC = 0x2,
+};
+
+#define MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX 16
+
+struct mlx5_ifc_rtc_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x40];
+ u8 update_index_mode[0x2];
+ u8 reparse_mode[0x2];
+ u8 num_match_ste[0x4];
+ u8 pd[0x18];
+ u8 reserved_at_a0[0x9];
+ u8 access_index_mode[0x3];
+ u8 num_hash_definer[0x4];
+ u8 update_method[0x1];
+ u8 reserved_at_b1[0x2];
+ u8 log_depth[0x5];
+ u8 log_hash_size[0x8];
+ u8 ste_format_0[0x8];
+ u8 table_type[0x8];
+ u8 ste_format_1[0x8];
+ u8 reserved_at_d8[0x8];
+ u8 match_definer_0[0x20];
+ u8 stc_id[0x20];
+ u8 ste_table_base_id[0x20];
+ u8 ste_table_offset[0x20];
+ u8 reserved_at_160[0x8];
+ u8 miss_flow_table_id[0x18];
+ u8 match_definer_1[0x20];
+ u8 reserved_at_1a0[0x260];
+};
+
+enum mlx5_ifc_stc_action_type {
+ MLX5_IFC_STC_ACTION_TYPE_NOP = 0x00,
+ MLX5_IFC_STC_ACTION_TYPE_COPY = 0x05,
+ MLX5_IFC_STC_ACTION_TYPE_SET = 0x06,
+ MLX5_IFC_STC_ACTION_TYPE_ADD = 0x07,
+ MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS = 0x08,
+ MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE = 0x09,
+ MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT = 0x0b,
+ MLX5_IFC_STC_ACTION_TYPE_TAG = 0x0c,
+ MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST = 0x0e,
+ MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION = 0x10,
+ MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION = 0x11,
+ MLX5_IFC_STC_ACTION_TYPE_ASO = 0x12,
+ MLX5_IFC_STC_ACTION_TYPE_TRAILER = 0x13,
+ MLX5_IFC_STC_ACTION_TYPE_COUNTER = 0x14,
+ MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD = 0x1b,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE = 0x80,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR = 0x81,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT = 0x82,
+ MLX5_IFC_STC_ACTION_TYPE_DROP = 0x83,
+ MLX5_IFC_STC_ACTION_TYPE_ALLOW = 0x84,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT = 0x85,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK = 0x86,
+};
+
+enum mlx5_ifc_stc_reparse_mode {
+ MLX5_IFC_STC_REPARSE_IGNORE = 0x0,
+ MLX5_IFC_STC_REPARSE_NEVER = 0x1,
+ MLX5_IFC_STC_REPARSE_ALWAYS = 0x2,
+};
+
+struct mlx5_ifc_stc_ste_param_ste_table_bits {
+ u8 ste_obj_id[0x20];
+ u8 match_definer_id[0x20];
+ u8 reserved_at_40[0x3];
+ u8 log_hash_size[0x5];
+ u8 reserved_at_48[0x38];
+};
+
+struct mlx5_ifc_stc_ste_param_tir_bits {
+ u8 reserved_at_0[0x8];
+ u8 tirn[0x18];
+ u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_stc_ste_param_table_bits {
+ u8 reserved_at_0[0x8];
+ u8 table_id[0x18];
+ u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_stc_ste_param_flow_counter_bits {
+ u8 flow_counter_id[0x20];
+};
+
+enum {
+ MLX5_ASO_CT_NUM_PER_OBJ = 1,
+ MLX5_ASO_METER_NUM_PER_OBJ = 2,
+ MLX5_ASO_IPSEC_NUM_PER_OBJ = 1,
+ MLX5_ASO_FIRST_HIT_NUM_PER_OBJ = 512,
+};
+
+struct mlx5_ifc_stc_ste_param_execute_aso_bits {
+ u8 aso_object_id[0x20];
+ u8 return_reg_id[0x4];
+ u8 aso_type[0x4];
+ u8 reserved_at_28[0x18];
+};
+
+struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits {
+ u8 ipsec_object_id[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits {
+ u8 ipsec_object_id[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_trailer_bits {
+ u8 reserved_at_0[0x8];
+ u8 command[0x4];
+ u8 reserved_at_c[0x2];
+ u8 type[0x2];
+ u8 reserved_at_10[0xa];
+ u8 length[0x6];
+};
+
+struct mlx5_ifc_stc_ste_param_header_modify_list_bits {
+ u8 header_modify_pattern_id[0x20];
+ u8 header_modify_argument_id[0x20];
+};
+
+enum mlx5_ifc_header_anchors {
+ MLX5_HEADER_ANCHOR_PACKET_START = 0x0,
+ MLX5_HEADER_ANCHOR_MAC = 0x1,
+ MLX5_HEADER_ANCHOR_FIRST_VLAN_START = 0x2,
+ MLX5_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ MLX5_HEADER_ANCHOR_ESP = 0x08,
+ MLX5_HEADER_ANCHOR_TCP_UDP = 0x09,
+ MLX5_HEADER_ANCHOR_TUNNEL_HEADER = 0x0a,
+ MLX5_HEADER_ANCHOR_INNER_MAC = 0x13,
+ MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+ MLX5_HEADER_ANCHOR_INNER_TCP_UDP = 0x1a,
+ MLX5_HEADER_ANCHOR_L4_PAYLOAD = 0x1b,
+ MLX5_HEADER_ANCHOR_INNER_L4_PAYLOAD = 0x1c
+};
+
+struct mlx5_ifc_stc_ste_param_remove_bits {
+ u8 action_type[0x4];
+ u8 decap[0x1];
+ u8 reserved_at_5[0x5];
+ u8 remove_start_anchor[0x6];
+ u8 reserved_at_10[0x2];
+ u8 remove_end_anchor[0x6];
+ u8 reserved_at_18[0x8];
+};
+
+struct mlx5_ifc_stc_ste_param_remove_words_bits {
+ u8 action_type[0x4];
+ u8 reserved_at_4[0x6];
+ u8 remove_start_anchor[0x6];
+ u8 reserved_at_10[0x1];
+ u8 remove_offset[0x7];
+ u8 reserved_at_18[0x2];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_stc_ste_param_insert_bits {
+ u8 action_type[0x4];
+ u8 encap[0x1];
+ u8 inline_data[0x1];
+ u8 reserved_at_6[0x4];
+ u8 insert_anchor[0x6];
+ u8 reserved_at_10[0x1];
+ u8 insert_offset[0x7];
+ u8 reserved_at_18[0x1];
+ u8 insert_size[0x7];
+ u8 insert_argument[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_vport_bits {
+ u8 eswitch_owner_vhca_id[0x10];
+ u8 vport_number[0x10];
+ u8 eswitch_owner_vhca_id_valid[0x1];
+ u8 reserved_at_21[0x5f];
+};
+
+union mlx5_ifc_stc_param_bits {
+ struct mlx5_ifc_stc_ste_param_ste_table_bits ste_table;
+ struct mlx5_ifc_stc_ste_param_tir_bits tir;
+ struct mlx5_ifc_stc_ste_param_table_bits table;
+ struct mlx5_ifc_stc_ste_param_flow_counter_bits counter;
+ struct mlx5_ifc_stc_ste_param_header_modify_list_bits modify_header;
+ struct mlx5_ifc_stc_ste_param_execute_aso_bits aso;
+ struct mlx5_ifc_stc_ste_param_remove_bits remove_header;
+ struct mlx5_ifc_stc_ste_param_insert_bits insert_header;
+ struct mlx5_ifc_set_action_in_bits add;
+ struct mlx5_ifc_set_action_in_bits set;
+ struct mlx5_ifc_copy_action_in_bits copy;
+ struct mlx5_ifc_stc_ste_param_vport_bits vport;
+ struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits ipsec_encrypt;
+ struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits ipsec_decrypt;
+ struct mlx5_ifc_stc_ste_param_trailer_bits trailer;
+ u8 reserved_at_0[0x80];
+};
+
+enum {
+ MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC = BIT(0),
+};
+
+struct mlx5_ifc_stc_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x46];
+ u8 reparse_mode[0x2];
+ u8 table_type[0x8];
+ u8 ste_action_offset[0x8];
+ u8 action_type[0x8];
+ u8 reserved_at_a0[0x60];
+ union mlx5_ifc_stc_param_bits stc_param;
+ u8 reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_ste_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x48];
+ u8 table_type[0x8];
+ u8 reserved_at_90[0x370];
+};
+
+struct mlx5_ifc_definer_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x50];
+ u8 format_id[0x10];
+ u8 reserved_at_60[0x60];
+ u8 format_select_dw3[0x8];
+ u8 format_select_dw2[0x8];
+ u8 format_select_dw1[0x8];
+ u8 format_select_dw0[0x8];
+ u8 format_select_dw7[0x8];
+ u8 format_select_dw6[0x8];
+ u8 format_select_dw5[0x8];
+ u8 format_select_dw4[0x8];
+ u8 reserved_at_100[0x18];
+ u8 format_select_dw8[0x8];
+ u8 reserved_at_120[0x20];
+ u8 format_select_byte3[0x8];
+ u8 format_select_byte2[0x8];
+ u8 format_select_byte1[0x8];
+ u8 format_select_byte0[0x8];
+ u8 format_select_byte7[0x8];
+ u8 format_select_byte6[0x8];
+ u8 format_select_byte5[0x8];
+ u8 format_select_byte4[0x8];
+ u8 reserved_at_180[0x40];
+ u8 ctrl[0xa0];
+ u8 match_mask[0x160];
+};
+
+struct mlx5_ifc_arg_bits {
+ u8 rsvd0[0x88];
+ u8 access_pd[0x18];
+};
+
+struct mlx5_ifc_header_modify_pattern_in_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 pattern_length[0x8];
+ u8 reserved_at_88[0x18];
+
+ u8 reserved_at_a0[0x60];
+
+ u8 pattern_data[MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY * 8];
+};
+
+struct mlx5_ifc_create_rtc_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_rtc_bits rtc;
+};
+
+struct mlx5_ifc_create_stc_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_stc_bits stc;
+};
+
+struct mlx5_ifc_create_ste_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_ste_bits ste;
+};
+
+struct mlx5_ifc_create_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_definer_bits definer;
+};
+
+struct mlx5_ifc_create_arg_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_arg_bits arg;
+};
+
+struct mlx5_ifc_create_header_modify_pattern_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_header_modify_pattern_in_bits pattern;
+};
+
+struct mlx5_ifc_generate_wqe_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mode[0x10];
+ u8 reserved_at_40[0x40];
+ u8 reserved_at_80[0x8];
+ u8 pdn[0x18];
+ u8 reserved_at_a0[0x160];
+ u8 wqe_ctrl[0x80];
+ u8 wqe_gta_ctrl[0x180];
+ u8 wqe_gta_data_0[0x200];
+ u8 wqe_gta_data_1[0x200];
+};
+
+struct mlx5_ifc_generate_wqe_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x1c0];
+ u8 cqe_data[0x200];
+};
+
+enum mlx5_access_aso_opc_mod {
+ ASO_OPC_MOD_IPSEC = 0x0,
+ ASO_OPC_MOD_CONNECTION_TRACKING = 0x1,
+ ASO_OPC_MOD_POLICER = 0x2,
+ ASO_OPC_MOD_RACE_AVOIDANCE = 0x3,
+ ASO_OPC_MOD_FLOW_HIT = 0x4,
+};
+
+enum {
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION = BIT(0),
+ MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID = BIT(1),
+};
+
+enum {
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT = 0,
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1,
+};
+
+struct mlx5_ifc_alloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_packet_reformat_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+#endif /* MLX5_PRM_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c
new file mode 100644
index 000000000000..8a011b958b43
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+static void hws_rule_skip(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_match_template *mt,
+ u32 flow_source,
+ bool *skip_rx, bool *skip_tx)
+{
+ /* By default FDB rules are added to both RX and TX */
+ *skip_rx = false;
+ *skip_tx = false;
+
+ if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) {
+ *skip_rx = true;
+ } else if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
+ *skip_tx = true;
+ } else {
+ /* If no flow source was set for current rule,
+ * check for flow source in matcher attributes.
+ */
+ if (matcher->attr.optimize_flow_src) {
+ *skip_tx =
+ matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE;
+ *skip_rx =
+ matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT;
+ return;
+ }
+ }
+}
+
+static void
+hws_rule_update_copy_tag(struct mlx5hws_rule *rule,
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
+ bool is_jumbo)
+{
+ struct mlx5hws_rule_match_tag *tag;
+
+ if (!mlx5hws_matcher_is_resizable(rule->matcher)) {
+ tag = &rule->tag;
+ } else {
+ struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
+ (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
+ tag = (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
+ }
+
+ if (is_jumbo)
+ memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ else
+ memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
+}
+
+static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ bool skip_rx, skip_tx;
+
+ dep_wqe->rule = rule;
+ dep_wqe->user_data = attr->user_data;
+ dep_wqe->direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
+ attr->rule_idx : 0;
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ hws_rule_skip(matcher, mt, attr->flow_source, &skip_rx, &skip_tx);
+
+ if (!skip_rx) {
+ dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id;
+ dep_wqe->retry_rtc_0 = matcher->col_matcher ?
+ matcher->col_matcher->match_ste.rtc_0_id : 0;
+ } else {
+ dep_wqe->rtc_0 = 0;
+ dep_wqe->retry_rtc_0 = 0;
+ }
+
+ if (!skip_tx) {
+ dep_wqe->rtc_1 = matcher->match_ste.rtc_1_id;
+ dep_wqe->retry_rtc_1 = matcher->col_matcher ?
+ matcher->col_matcher->match_ste.rtc_1_id : 0;
+ } else {
+ dep_wqe->rtc_1 = 0;
+ dep_wqe->retry_rtc_1 = 0;
+ }
+ } else {
+ pr_warn("HWS: invalid tbl->type: %d\n", tbl->type);
+ }
+}
+
+static void hws_rule_move_get_rtc(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_matcher *dst_matcher = rule->matcher->resize_dst;
+
+ if (rule->resize_info->rtc_0) {
+ ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0_id;
+ ste_attr->retry_rtc_0 = dst_matcher->col_matcher ?
+ dst_matcher->col_matcher->match_ste.rtc_0_id : 0;
+ }
+ if (rule->resize_info->rtc_1) {
+ ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1_id;
+ ste_attr->retry_rtc_1 = dst_matcher->col_matcher ?
+ dst_matcher->col_matcher->match_ste.rtc_1_id : 0;
+ }
+}
+
+static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_rule *rule,
+ bool err,
+ void *user_data,
+ enum mlx5hws_rule_status rule_status_on_succ)
+{
+ enum mlx5hws_flow_op_status comp_status;
+
+ if (!err) {
+ comp_status = MLX5HWS_FLOW_OP_SUCCESS;
+ rule->status = rule_status_on_succ;
+ } else {
+ comp_status = MLX5HWS_FLOW_OP_ERROR;
+ rule->status = MLX5HWS_RULE_STATUS_FAILED;
+ }
+
+ mlx5hws_send_engine_inc_rule(queue);
+ mlx5hws_send_engine_gen_comp(queue, user_data, comp_status);
+}
+
+static void
+hws_rule_save_resize_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr,
+ bool is_update)
+{
+ if (!mlx5hws_matcher_is_resizable(rule->matcher))
+ return;
+
+ if (likely(!is_update)) {
+ rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
+ if (unlikely(!rule->resize_info)) {
+ pr_warn("HWS: resize info isn't allocated for rule\n");
+ return;
+ }
+
+ rule->resize_info->max_stes =
+ rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
+ rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ?
+ rule->matcher->action_ste[0].pool :
+ NULL;
+ rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ?
+ rule->matcher->action_ste[1].pool :
+ NULL;
+ }
+
+ memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
+ sizeof(rule->resize_info->ctrl_seg));
+ memcpy(rule->resize_info->data_seg, ste_attr->wqe_data,
+ sizeof(rule->resize_info->data_seg));
+}
+
+void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule)
+{
+ if (mlx5hws_matcher_is_resizable(rule->matcher) &&
+ rule->resize_info) {
+ kfree(rule->resize_info);
+ rule->resize_info = NULL;
+ }
+}
+
+static void
+hws_rule_save_delete_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_match_template *mt = rule->matcher->mt;
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+
+ if (mlx5hws_matcher_is_resizable(rule->matcher))
+ return;
+
+ if (is_jumbo)
+ memcpy(&rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ else
+ memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5HWS_MATCH_TAG_SZ);
+}
+
+static void
+hws_rule_clear_delete_info(struct mlx5hws_rule *rule)
+{
+ /* nothing to do here */
+}
+
+static void
+hws_rule_load_delete_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher))) {
+ ste_attr->wqe_tag = &rule->tag;
+ } else {
+ struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
+ (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
+ struct mlx5hws_rule_match_tag *tag =
+ (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
+ ste_attr->wqe_tag = tag;
+ }
+}
+
+static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_pool_chunk ste = {0};
+ int ret;
+
+ action_ste = &matcher->action_ste[action_ste_selector];
+ ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
+ ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
+ if (unlikely(ret)) {
+ mlx5hws_err(matcher->tbl->ctx,
+ "Failed to allocate STE for rule actions");
+ return ret;
+ }
+ rule->action_ste_idx = ste.offset;
+
+ return 0;
+}
+
+static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_pool_chunk ste = {0};
+ struct mlx5hws_pool *pool;
+ u8 max_stes;
+
+ if (mlx5hws_matcher_is_resizable(matcher)) {
+ /* Free the original action pool if rule was resized */
+ max_stes = rule->resize_info->max_stes;
+ pool = rule->resize_info->action_ste_pool[action_ste_selector];
+ } else {
+ max_stes = matcher->action_ste[action_ste_selector].max_stes;
+ pool = matcher->action_ste[action_ste_selector].pool;
+ }
+
+ /* This release is safe only when the rule match part was deleted */
+ ste.order = ilog2(roundup_pow_of_two(max_stes));
+ ste.offset = rule->action_ste_idx;
+
+ mlx5hws_pool_chunk_free(pool, &ste);
+}
+
+static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ int action_ste_idx;
+ int ret;
+
+ ret = hws_rule_alloc_action_ste_idx(rule, 0);
+ if (unlikely(ret))
+ return ret;
+
+ action_ste_idx = rule->action_ste_idx;
+
+ ret = hws_rule_alloc_action_ste_idx(rule, 1);
+ if (unlikely(ret)) {
+ hws_rule_free_action_ste_idx(rule, 0);
+ return ret;
+ }
+
+ /* Both pools have to return the same index */
+ if (unlikely(rule->action_ste_idx != action_ste_idx)) {
+ pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule)
+{
+ if (rule->action_ste_idx > -1) {
+ hws_rule_free_action_ste_idx(rule, 1);
+ hws_rule_free_action_ste_idx(rule, 0);
+ }
+}
+
+static void hws_rule_create_init(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr,
+ struct mlx5hws_actions_apply_data *apply,
+ bool is_update)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+
+ /* Init rule before reuse */
+ if (!is_update) {
+ /* In update we use these rtc's */
+ rule->rtc_0 = 0;
+ rule->rtc_1 = 0;
+ rule->action_ste_selector = 0;
+ } else {
+ rule->action_ste_selector = !rule->action_ste_selector;
+ }
+
+ rule->pending_wqes = 0;
+ rule->action_ste_idx = -1;
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
+
+ /* Init default send STE attributes */
+ ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr->send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr->send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr->send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+
+ /* Init default action apply */
+ apply->tbl_type = tbl->type;
+ apply->common_res = &ctx->common_res[tbl->type];
+ apply->jump_to_action_stc = matcher->action_ste[0].stc.offset;
+ apply->require_dep = 0;
+}
+
+static void hws_rule_move_init(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ /* Save the old RTC IDs to be later used in match STE delete */
+ rule->resize_info->rtc_0 = rule->rtc_0;
+ rule->resize_info->rtc_1 = rule->rtc_1;
+ rule->resize_info->rule_idx = attr->rule_idx;
+
+ rule->rtc_0 = 0;
+ rule->rtc_1 = 0;
+
+ rule->pending_wqes = 0;
+ rule->action_ste_idx = -1;
+ rule->action_ste_selector = 0;
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
+ rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
+}
+
+bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule)
+{
+ return mlx5hws_matcher_is_in_resize(rule->matcher) &&
+ rule->resize_info &&
+ rule->resize_info->state != MLX5HWS_RULE_RESIZE_STATE_IDLE;
+}
+
+static int hws_rule_create_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_action_template *at = &rule->matcher->at[at_idx];
+ struct mlx5hws_match_template *mt = &rule->matcher->mt[mt_idx];
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+ struct mlx5hws_actions_wqe_setter *setter;
+ struct mlx5hws_actions_apply_data apply;
+ struct mlx5hws_send_engine *queue;
+ u8 total_stes, action_stes;
+ bool is_update;
+ int i, ret;
+
+ is_update = !match_param;
+
+ setter = &at->setters[at->num_of_action_stes];
+ total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
+ action_stes = total_stes - 1;
+
+ queue = &ctx->send_queue[attr->queue_id];
+ if (unlikely(mlx5hws_send_engine_err(queue)))
+ return -EIO;
+
+ hws_rule_create_init(rule, &ste_attr, &apply, is_update);
+
+ /* Allocate dependent match WQE since rule might have dependent writes.
+ * The queued dependent WQE can be later aborted or kept as a dependency.
+ * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
+ */
+ dep_wqe = mlx5hws_send_add_new_dep_wqe(queue);
+ hws_rule_init_dep_wqe(dep_wqe, rule, mt, attr);
+
+ ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ ste_attr.wqe_data = &dep_wqe->wqe_data;
+ apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ apply.wqe_data = (__force __be32 *)&dep_wqe->wqe_data;
+ apply.rule_action = rule_actions;
+ apply.queue = queue;
+
+ if (action_stes) {
+ /* Allocate action STEs for rules that need more than match STE */
+ if (!is_update) {
+ ret = hws_rule_alloc_action_ste(rule, attr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ return ret;
+ }
+ }
+ /* Skip RX/TX based on the dep_wqe init */
+ ste_attr.rtc_0 = dep_wqe->rtc_0 ?
+ matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1 ?
+ matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0;
+ /* Action STEs are written to a specific index last to first */
+ ste_attr.direct_index = rule->action_ste_idx + action_stes;
+ apply.next_direct_idx = ste_attr.direct_index;
+ } else {
+ apply.next_direct_idx = 0;
+ }
+
+ for (i = total_stes; i-- > 0;) {
+ mlx5hws_action_apply_setter(&apply, setter--, !i && is_jumbo);
+
+ if (i == 0) {
+ /* Handle last match STE.
+ * For hash split / linear lookup RTCs, packets reaching any STE
+ * will always match and perform the specified actions, which
+ * makes the tag irrelevant.
+ */
+ if (likely(!mlx5hws_matcher_is_insert_by_idx(matcher) && !is_update))
+ mlx5hws_definer_create_tag(match_param, mt->fc, mt->fc_sz,
+ (u8 *)dep_wqe->wqe_data.action);
+ else if (is_update)
+ hws_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo);
+
+ /* Rule has dependent WQEs, match dep_wqe is queued */
+ if (action_stes || apply.require_dep)
+ break;
+
+ /* Rule has no dependencies, abort dep_wqe and send WQE now */
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = dep_wqe->user_data;
+ ste_attr.send_attr.rule = dep_wqe->rule;
+ ste_attr.rtc_0 = dep_wqe->rtc_0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+ ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+ ste_attr.direct_index = dep_wqe->direct_index;
+ } else {
+ apply.next_direct_idx = --ste_attr.direct_index;
+ }
+
+ mlx5hws_send_ste(queue, &ste_attr);
+ }
+
+ /* Backup TAG on the rule for deletion and resize info for
+ * moving rules to a new matcher, only after insertion.
+ */
+ if (!is_update)
+ hws_rule_save_delete_info(rule, &ste_attr);
+
+ hws_rule_save_resize_info(rule, &ste_attr, is_update);
+ mlx5hws_send_engine_inc_rule(queue);
+
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ return 0;
+}
+
+static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_send_engine *queue;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ hws_rule_gen_comp(queue, rule, false,
+ attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
+
+ /* Rule failed now we can safely release action STEs */
+ mlx5hws_rule_free_action_ste(rule);
+
+ /* Clear complex tag */
+ hws_rule_clear_delete_info(rule);
+
+ /* Clear info that was saved for resizing */
+ mlx5hws_rule_clear_resize_info(rule);
+
+ /* If a rule that was indicated as burst (need to trigger HW) has failed
+ * insertion we won't ring the HW as nothing is being written to the WQ.
+ * In such case update the last WQE and ring the HW with that work
+ */
+ if (attr->burst)
+ return;
+
+ mlx5hws_send_all_dep_wqe(queue);
+ mlx5hws_send_engine_flush_queue(queue);
+}
+
+static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ if (unlikely(mlx5hws_send_engine_err(queue))) {
+ hws_rule_destroy_failed_hws(rule, attr);
+ return 0;
+ }
+
+ /* Rule is not completed yet */
+ if (rule->status == MLX5HWS_RULE_STATUS_CREATING)
+ return -EBUSY;
+
+ /* Rule failed and doesn't require cleanup */
+ if (rule->status == MLX5HWS_RULE_STATUS_FAILED) {
+ hws_rule_destroy_failed_hws(rule, attr);
+ return 0;
+ }
+
+ if (rule->skip_delete) {
+ /* Rule shouldn't be deleted in HW.
+ * Generate completion as if write succeeded, and we can
+ * safely release action STEs and clear resize info.
+ */
+ hws_rule_gen_comp(queue, rule, false,
+ attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
+
+ mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_clear_resize_info(rule);
+ return 0;
+ }
+
+ mlx5hws_send_engine_inc_rule(queue);
+
+ /* Send dependent WQE */
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ rule->status = MLX5HWS_RULE_STATUS_DELETING;
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = attr->user_data;
+
+ ste_attr.rtc_0 = rule->rtc_0;
+ ste_attr.rtc_1 = rule->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.wqe_ctrl = &wqe_ctrl;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
+ if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
+ ste_attr.direct_index = attr->rule_idx;
+
+ hws_rule_load_delete_info(rule, &ste_attr);
+ mlx5hws_send_ste(queue, &ste_attr);
+ hws_rule_clear_delete_info(rule);
+
+ return 0;
+}
+
+static int hws_rule_enqueue_precheck(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+
+ if (unlikely(!attr->user_data))
+ return -EINVAL;
+
+ /* Check if there is room in queue */
+ if (unlikely(mlx5hws_send_engine_full(&ctx->send_queue[attr->queue_id])))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int hws_rule_enqueue_precheck_move(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
+ return -EINVAL;
+
+ return hws_rule_enqueue_precheck(rule, attr);
+}
+
+static int hws_rule_enqueue_precheck_create(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ if (unlikely(mlx5hws_matcher_is_in_resize(rule->matcher)))
+ /* Matcher in resize - new rules are not allowed */
+ return -EAGAIN;
+
+ return hws_rule_enqueue_precheck(rule, attr);
+}
+
+static int hws_rule_enqueue_precheck_update(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+
+ if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher) &&
+ !matcher->attr.optimize_using_rule_idx &&
+ !mlx5hws_matcher_is_insert_by_idx(matcher))) {
+ return -EOPNOTSUPP;
+ }
+
+ if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
+ return -EBUSY;
+
+ return hws_rule_enqueue_precheck_create(rule, attr);
+}
+
+int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
+ void *queue_ptr,
+ void *user_data)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_wqe_gta_ctrl_seg empty_wqe_ctrl = {0};
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_send_engine *queue = queue_ptr;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+
+ mlx5hws_send_all_dep_wqe(queue);
+
+ rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_DELETING;
+
+ ste_attr.send_attr.fence = 0;
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.notify_hw = 1;
+ ste_attr.send_attr.user_data = user_data;
+ ste_attr.rtc_0 = rule->resize_info->rtc_0;
+ ste_attr.rtc_1 = rule->resize_info->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1;
+ ste_attr.wqe_ctrl = &empty_wqe_ctrl;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
+
+ if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
+ ste_attr.direct_index = rule->resize_info->rule_idx;
+
+ hws_rule_load_delete_info(rule, &ste_attr);
+ mlx5hws_send_ste(queue, &ste_attr);
+
+ return 0;
+}
+
+int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+ int ret;
+
+ ret = hws_rule_enqueue_precheck_move(rule, attr);
+ if (unlikely(ret))
+ return ret;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ ret = mlx5hws_send_engine_err(queue);
+ if (ret)
+ return ret;
+
+ hws_rule_move_init(rule, attr);
+ hws_rule_move_get_rtc(rule, &ste_attr);
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.fence = 0;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = attr->user_data;
+
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.wqe_ctrl = (struct mlx5hws_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg;
+ ste_attr.wqe_data = (struct mlx5hws_wqe_gta_data_seg_ste *)rule->resize_info->data_seg;
+ ste_attr.direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
+ attr->rule_idx : 0;
+
+ mlx5hws_send_ste(queue, &ste_attr);
+ mlx5hws_send_engine_inc_rule(queue);
+
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ return 0;
+}
+
+int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr,
+ struct mlx5hws_rule *rule_handle)
+{
+ int ret;
+
+ rule_handle->matcher = matcher;
+
+ ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(!(matcher->num_of_mt >= mt_idx) ||
+ !(matcher->num_of_at >= at_idx) ||
+ !match_param)) {
+ pr_warn("HWS: Invalid rule creation parameters (MTs, ATs or match params)\n");
+ return -EINVAL;
+ }
+
+ ret = hws_rule_create_hws(rule_handle,
+ attr,
+ mt_idx,
+ match_param,
+ at_idx,
+ rule_actions);
+
+ return ret;
+}
+
+int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ int ret;
+
+ ret = hws_rule_enqueue_precheck(rule, attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_rule_destroy_hws(rule, attr);
+
+ return ret;
+}
+
+int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr)
+{
+ int ret;
+
+ ret = hws_rule_enqueue_precheck_update(rule, attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_rule_create_hws(rule,
+ attr,
+ 0,
+ NULL,
+ at_idx,
+ rule_actions);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h
new file mode 100644
index 000000000000..495cdd17e9f3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_RULE_H_
+#define MLX5HWS_RULE_H_
+
+enum {
+ MLX5HWS_STE_CTRL_SZ = 20,
+ MLX5HWS_ACTIONS_SZ = 12,
+ MLX5HWS_MATCH_TAG_SZ = 32,
+ MLX5HWS_JUMBO_TAG_SZ = 44,
+};
+
+enum mlx5hws_rule_status {
+ MLX5HWS_RULE_STATUS_UNKNOWN,
+ MLX5HWS_RULE_STATUS_CREATING,
+ MLX5HWS_RULE_STATUS_CREATED,
+ MLX5HWS_RULE_STATUS_DELETING,
+ MLX5HWS_RULE_STATUS_DELETED,
+ MLX5HWS_RULE_STATUS_FAILING,
+ MLX5HWS_RULE_STATUS_FAILED,
+};
+
+enum mlx5hws_rule_move_state {
+ MLX5HWS_RULE_RESIZE_STATE_IDLE,
+ MLX5HWS_RULE_RESIZE_STATE_WRITING,
+ MLX5HWS_RULE_RESIZE_STATE_DELETING,
+};
+
+enum mlx5hws_rule_jumbo_match_tag_offset {
+ MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0 = 8,
+};
+
+struct mlx5hws_rule_match_tag {
+ union {
+ u8 jumbo[MLX5HWS_JUMBO_TAG_SZ];
+ struct {
+ u8 reserved[MLX5HWS_ACTIONS_SZ];
+ u8 match[MLX5HWS_MATCH_TAG_SZ];
+ };
+ };
+};
+
+struct mlx5hws_rule_resize_info {
+ struct mlx5hws_pool *action_ste_pool[2];
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 rule_idx;
+ u8 state;
+ u8 max_stes;
+ u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */
+ u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */
+};
+
+struct mlx5hws_rule {
+ struct mlx5hws_matcher *matcher;
+ union {
+ struct mlx5hws_rule_match_tag tag;
+ struct mlx5hws_rule_resize_info *resize_info;
+ };
+ u32 rtc_0; /* The RTC into which the STE was inserted */
+ u32 rtc_1; /* The RTC into which the STE was inserted */
+ int action_ste_idx; /* STE array index */
+ u8 status; /* enum mlx5hws_rule_status */
+ u8 action_ste_selector; /* For rule update - which action STE is in use */
+ u8 pending_wqes;
+ bool skip_delete; /* For complex rules - another rule with same tag
+ * still exists, so don't actually delete this rule.
+ */
+};
+
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule);
+
+int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
+ void *queue, void *user_data);
+
+int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule);
+
+void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule);
+
+#endif /* MLX5HWS_RULE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
new file mode 100644
index 000000000000..a1adbb48735c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
@@ -0,0 +1,1209 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+#include "lib/clock.h"
+
+enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
+
+struct mlx5hws_send_ring_dep_wqe *
+mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ unsigned int idx = send_sq->head_dep_idx++ & (queue->num_entries - 1);
+
+ memset(&send_sq->dep_wqe[idx].wqe_data.tag, 0, MLX5HWS_MATCH_TAG_SZ);
+
+ return &send_sq->dep_wqe[idx];
+}
+
+void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ queue->send_ring.send_sq.head_dep_idx--;
+}
+
+void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+
+ /* Fence first from previous depend WQEs */
+ ste_attr.send_attr.fence = 1;
+
+ while (send_sq->head_dep_idx != send_sq->tail_dep_idx) {
+ dep_wqe = &send_sq->dep_wqe[send_sq->tail_dep_idx++ & (queue->num_entries - 1)];
+
+ /* Notify HW on the last WQE */
+ ste_attr.send_attr.notify_hw = (send_sq->tail_dep_idx == send_sq->head_dep_idx);
+ ste_attr.send_attr.user_data = dep_wqe->user_data;
+ ste_attr.send_attr.rule = dep_wqe->rule;
+
+ ste_attr.rtc_0 = dep_wqe->rtc_0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1;
+ ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+ ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+ ste_attr.used_id_rtc_0 = &dep_wqe->rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1;
+ ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ ste_attr.wqe_data = &dep_wqe->wqe_data;
+ ste_attr.direct_index = dep_wqe->direct_index;
+
+ mlx5hws_send_ste(queue, &ste_attr);
+
+ /* Fencing is done only on the first WQE */
+ ste_attr.send_attr.fence = 0;
+ }
+}
+
+struct mlx5hws_send_engine_post_ctrl
+mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+
+ ctrl.queue = queue;
+ /* Currently only one send ring is supported */
+ ctrl.send_ring = &queue->send_ring;
+ ctrl.num_wqebbs = 0;
+
+ return ctrl;
+}
+
+void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ char **buf, size_t *len)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &ctrl->send_ring->send_sq;
+ unsigned int idx;
+
+ idx = (send_sq->cur_post + ctrl->num_wqebbs) & send_sq->buf_mask;
+
+ /* Note that *buf is a single MLX5_SEND_WQE_BB. It cannot be used
+ * as buffer of more than one WQE_BB, since the two MLX5_SEND_WQE_BB
+ * can be on 2 different kernel memory pages.
+ */
+ *buf = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+ *len = MLX5_SEND_WQE_BB;
+
+ if (!ctrl->num_wqebbs) {
+ *buf += sizeof(struct mlx5hws_wqe_ctrl_seg);
+ *len -= sizeof(struct mlx5hws_wqe_ctrl_seg);
+ }
+
+ ctrl->num_wqebbs++;
+}
+
+static void hws_send_engine_post_ring(struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_wqe_ctrl_seg *doorbell_cseg)
+{
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ *sq->wq.db = cpu_to_be32(sq->cur_post);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)doorbell_cseg, sq->uar_map);
+
+ /* Ensure doorbell is written on uar_page before poll_cq */
+ WRITE_ONCE(doorbell_cseg, NULL);
+}
+
+static void
+hws_send_wqe_set_tag(struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
+ struct mlx5hws_rule_match_tag *tag,
+ bool is_jumbo)
+{
+ if (is_jumbo) {
+ /* Clear previous possibly dirty control */
+ memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ);
+ memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ } else {
+ /* Clear previous possibly dirty control and actions */
+ memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ + MLX5HWS_ACTIONS_SZ);
+ memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
+ }
+}
+
+void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ struct mlx5hws_send_engine_post_attr *attr)
+{
+ struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_ring_sq *sq;
+ unsigned int idx;
+ u32 flags = 0;
+
+ sq = &ctrl->send_ring->send_sq;
+ idx = sq->cur_post & sq->buf_mask;
+ sq->last_idx = idx;
+
+ wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, idx);
+
+ wqe_ctrl->opmod_idx_opcode =
+ cpu_to_be32((attr->opmod << 24) |
+ ((sq->cur_post & 0xffff) << 8) |
+ attr->opcode);
+ wqe_ctrl->qpn_ds =
+ cpu_to_be32((attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16 |
+ sq->sqn << 8);
+ wqe_ctrl->imm = cpu_to_be32(attr->id);
+
+ flags |= attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ flags |= attr->fence ? MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE : 0;
+ wqe_ctrl->flags = cpu_to_be32(flags);
+
+ sq->wr_priv[idx].id = attr->id;
+ sq->wr_priv[idx].retry_id = attr->retry_id;
+
+ sq->wr_priv[idx].rule = attr->rule;
+ sq->wr_priv[idx].user_data = attr->user_data;
+ sq->wr_priv[idx].num_wqebbs = ctrl->num_wqebbs;
+
+ if (attr->rule) {
+ sq->wr_priv[idx].rule->pending_wqes++;
+ sq->wr_priv[idx].used_id = attr->used_id;
+ }
+
+ sq->cur_post += ctrl->num_wqebbs;
+
+ if (attr->notify_hw)
+ hws_send_engine_post_ring(sq, wqe_ctrl);
+}
+
+static void hws_send_wqe(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_engine_post_attr *send_attr,
+ struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
+ void *send_wqe_data,
+ void *send_wqe_tag,
+ bool is_jumbo,
+ u8 gta_opcode,
+ u32 direct_index)
+{
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ size_t wqe_len;
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
+
+ wqe_ctrl->op_dirix = cpu_to_be32(gta_opcode << 28 | direct_index);
+ memcpy(wqe_ctrl->stc_ix, send_wqe_ctrl->stc_ix,
+ sizeof(send_wqe_ctrl->stc_ix));
+
+ if (send_wqe_data)
+ memcpy(wqe_data, send_wqe_data, sizeof(*wqe_data));
+ else
+ hws_send_wqe_set_tag(wqe_data, send_wqe_tag, is_jumbo);
+
+ mlx5hws_send_engine_post_end(&ctrl, send_attr);
+}
+
+void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
+ u8 notify_hw = send_attr->notify_hw;
+ u8 fence = send_attr->fence;
+
+ if (ste_attr->rtc_1) {
+ send_attr->id = ste_attr->rtc_1;
+ send_attr->used_id = ste_attr->used_id_rtc_1;
+ send_attr->retry_id = ste_attr->retry_rtc_1;
+ send_attr->fence = fence;
+ send_attr->notify_hw = notify_hw && !ste_attr->rtc_0;
+ hws_send_wqe(queue, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode,
+ ste_attr->direct_index);
+ }
+
+ if (ste_attr->rtc_0) {
+ send_attr->id = ste_attr->rtc_0;
+ send_attr->used_id = ste_attr->used_id_rtc_0;
+ send_attr->retry_id = ste_attr->retry_rtc_0;
+ send_attr->fence = fence && !ste_attr->rtc_1;
+ send_attr->notify_hw = notify_hw;
+ hws_send_wqe(queue, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode,
+ ste_attr->direct_index);
+ }
+
+ /* Restore to original requested values */
+ send_attr->notify_hw = notify_hw;
+ send_attr->fence = fence;
+}
+
+static void hws_send_engine_retry_post_send(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ u16 wqe_cnt)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ struct mlx5hws_send_ring_sq *send_sq;
+ unsigned int idx;
+ size_t wqe_len;
+ char *p;
+
+ send_attr.rule = priv->rule;
+ send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ send_attr.len = MLX5_SEND_WQE_BB * 2 - sizeof(struct mlx5hws_wqe_ctrl_seg);
+ send_attr.notify_hw = 1;
+ send_attr.fence = 0;
+ send_attr.user_data = priv->user_data;
+ send_attr.id = priv->retry_id;
+ send_attr.used_id = priv->used_id;
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
+
+ send_sq = &ctrl.send_ring->send_sq;
+ idx = wqe_cnt & send_sq->buf_mask;
+ p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+
+ /* Copy old gta ctrl */
+ memcpy(wqe_ctrl, p + sizeof(struct mlx5hws_wqe_ctrl_seg),
+ MLX5_SEND_WQE_BB - sizeof(struct mlx5hws_wqe_ctrl_seg));
+
+ idx = (wqe_cnt + 1) & send_sq->buf_mask;
+ p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+
+ /* Copy old gta data */
+ memcpy(wqe_data, p, MLX5_SEND_WQE_BB);
+
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+}
+
+void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *sq = &queue->send_ring.send_sq;
+ struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
+
+ wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, sq->last_idx);
+ wqe_ctrl->flags |= cpu_to_be32(MLX5_WQE_CTRL_CQ_UPDATE);
+
+ hws_send_engine_post_ring(sq, wqe_ctrl);
+}
+
+static void
+hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ enum mlx5hws_flow_op_status *status)
+{
+ switch (priv->rule->resize_info->state) {
+ case MLX5HWS_RULE_RESIZE_STATE_WRITING:
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
+ /* Backup original RTCs */
+ u32 orig_rtc_0 = priv->rule->resize_info->rtc_0;
+ u32 orig_rtc_1 = priv->rule->resize_info->rtc_1;
+
+ /* Delete partially failed move rule using resize_info */
+ priv->rule->resize_info->rtc_0 = priv->rule->rtc_0;
+ priv->rule->resize_info->rtc_1 = priv->rule->rtc_1;
+
+ /* Move rule to original RTC for future delete */
+ priv->rule->rtc_0 = orig_rtc_0;
+ priv->rule->rtc_1 = orig_rtc_1;
+ }
+ /* Clean leftovers */
+ mlx5hws_rule_move_hws_remove(priv->rule, queue, priv->user_data);
+ break;
+
+ case MLX5HWS_RULE_RESIZE_STATE_DELETING:
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
+ *status = MLX5HWS_FLOW_OP_ERROR;
+ } else {
+ *status = MLX5HWS_FLOW_OP_SUCCESS;
+ priv->rule->matcher = priv->rule->matcher->resize_dst;
+ }
+ priv->rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_IDLE;
+ priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ u16 wqe_cnt,
+ enum mlx5hws_flow_op_status *status)
+{
+ priv->rule->pending_wqes--;
+
+ if (*status == MLX5HWS_FLOW_OP_ERROR) {
+ if (priv->retry_id) {
+ hws_send_engine_retry_post_send(queue, priv, wqe_cnt);
+ return;
+ }
+ /* Some part of the rule failed */
+ priv->rule->status = MLX5HWS_RULE_STATUS_FAILING;
+ *priv->used_id = 0;
+ } else {
+ *priv->used_id = priv->id;
+ }
+
+ /* Update rule status for the last completion */
+ if (!priv->rule->pending_wqes) {
+ if (unlikely(mlx5hws_rule_move_in_progress(priv->rule))) {
+ hws_send_engine_update_rule_resize(queue, priv, status);
+ return;
+ }
+
+ if (unlikely(priv->rule->status == MLX5HWS_RULE_STATUS_FAILING)) {
+ /* Rule completely failed and doesn't require cleanup */
+ if (!priv->rule->rtc_0 && !priv->rule->rtc_1)
+ priv->rule->status = MLX5HWS_RULE_STATUS_FAILED;
+
+ *status = MLX5HWS_FLOW_OP_ERROR;
+ } else {
+ /* Increase the status, this only works on good flow as the enum
+ * is arrange it away creating -> created -> deleting -> deleted
+ */
+ priv->rule->status++;
+ *status = MLX5HWS_FLOW_OP_SUCCESS;
+ /* Rule was deleted now we can safely release action STEs
+ * and clear resize info
+ */
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) {
+ mlx5hws_rule_free_action_ste(priv->rule);
+ mlx5hws_rule_clear_resize_info(priv->rule);
+ }
+ }
+ }
+}
+
+static void hws_send_engine_update(struct mlx5hws_send_engine *queue,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5hws_send_ring_priv *priv,
+ struct mlx5hws_flow_op_result res[],
+ s64 *i,
+ u32 res_nb,
+ u16 wqe_cnt)
+{
+ enum mlx5hws_flow_op_status status;
+
+ if (!cqe || (likely(be32_to_cpu(cqe->byte_cnt) >> 31 == 0) &&
+ likely(get_cqe_opcode(cqe) == MLX5_CQE_REQ))) {
+ status = MLX5HWS_FLOW_OP_SUCCESS;
+ } else {
+ status = MLX5HWS_FLOW_OP_ERROR;
+ }
+
+ if (priv->user_data) {
+ if (priv->rule) {
+ hws_send_engine_update_rule(queue, priv, wqe_cnt, &status);
+ /* Completion is provided on the last rule WQE */
+ if (priv->rule->pending_wqes)
+ return;
+ }
+
+ if (*i < res_nb) {
+ res[*i].user_data = priv->user_data;
+ res[*i].status = status;
+ (*i)++;
+ mlx5hws_send_engine_dec_rule(queue);
+ } else {
+ mlx5hws_send_engine_gen_comp(queue, priv->user_data, status);
+ }
+ }
+}
+
+static int mlx5hws_parse_cqe(struct mlx5hws_send_ring_cq *cq,
+ struct mlx5_cqe64 *cqe64)
+{
+ if (unlikely(get_cqe_opcode(cqe64) != MLX5_CQE_REQ)) {
+ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe64;
+
+ mlx5_core_err(cq->mdev, "Bad OP in HWS SQ CQE: 0x%x\n", get_cqe_opcode(cqe64));
+ mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n", err_cqe->vendor_err_synd);
+ mlx5_core_err(cq->mdev, "syndrome=%x\n", err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+ 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+ return CQ_POLL_ERR;
+ }
+
+ return CQ_OK;
+}
+
+static int mlx5hws_cq_poll_one(struct mlx5hws_send_ring_cq *cq)
+{
+ struct mlx5_cqe64 *cqe64;
+ int err;
+
+ cqe64 = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe64) {
+ if (unlikely(cq->mdev->state ==
+ MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
+ mlx5_core_dbg_once(cq->mdev,
+ "Polling CQ while device is shutting down\n");
+ return CQ_POLL_ERR;
+ }
+ return CQ_EMPTY;
+ }
+
+ mlx5_cqwq_pop(&cq->wq);
+ err = mlx5hws_parse_cqe(cq, cqe64);
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ return err;
+}
+
+static void hws_send_engine_poll_cq(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ s64 *polled,
+ u32 res_nb)
+{
+ struct mlx5hws_send_ring *send_ring = &queue->send_ring;
+ struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq;
+ struct mlx5hws_send_ring_sq *sq = &send_ring->send_sq;
+ struct mlx5hws_send_ring_priv *priv;
+ struct mlx5_cqe64 *cqe;
+ u8 cqe_opcode;
+ u16 wqe_cnt;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return;
+
+ cqe_opcode = get_cqe_opcode(cqe);
+ if (cqe_opcode == MLX5_CQE_INVALID)
+ return;
+
+ if (unlikely(cqe_opcode != MLX5_CQE_REQ))
+ queue->err = true;
+
+ wqe_cnt = be16_to_cpu(cqe->wqe_counter) & sq->buf_mask;
+
+ while (cq->poll_wqe != wqe_cnt) {
+ priv = &sq->wr_priv[cq->poll_wqe];
+ hws_send_engine_update(queue, NULL, priv, res, polled, res_nb, 0);
+ cq->poll_wqe = (cq->poll_wqe + priv->num_wqebbs) & sq->buf_mask;
+ }
+
+ priv = &sq->wr_priv[wqe_cnt];
+ cq->poll_wqe = (wqe_cnt + priv->num_wqebbs) & sq->buf_mask;
+ hws_send_engine_update(queue, cqe, priv, res, polled, res_nb, wqe_cnt);
+ mlx5hws_cq_poll_one(cq);
+}
+
+static void hws_send_engine_poll_list(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ s64 *polled,
+ u32 res_nb)
+{
+ struct mlx5hws_completed_poll *comp = &queue->completed;
+
+ while (comp->ci != comp->pi) {
+ if (*polled < res_nb) {
+ res[*polled].status =
+ comp->entries[comp->ci].status;
+ res[*polled].user_data =
+ comp->entries[comp->ci].user_data;
+ (*polled)++;
+ comp->ci = (comp->ci + 1) & comp->mask;
+ mlx5hws_send_engine_dec_rule(queue);
+ } else {
+ return;
+ }
+ }
+}
+
+static int hws_send_engine_poll(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb)
+{
+ s64 polled = 0;
+
+ hws_send_engine_poll_list(queue, res, &polled, res_nb);
+
+ if (polled >= res_nb)
+ return polled;
+
+ hws_send_engine_poll_cq(queue, res, &polled, res_nb);
+
+ return polled;
+}
+
+int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb)
+{
+ return hws_send_engine_poll(&ctx->send_queue[queue_id], res, res_nb);
+}
+
+static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ void *sqc_data)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5_wq_param param;
+ size_t buf_sz;
+ int err;
+
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->mdev = mdev;
+
+ param.db_numa_node = numa_node;
+ param.buf_numa_node = numa_node;
+ err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ wq->db = &wq->db[MLX5_SND_DBR];
+
+ buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
+ sq->dep_wqe = kcalloc(queue->num_entries, sizeof(*sq->dep_wqe), GFP_KERNEL);
+ if (!sq->dep_wqe) {
+ err = -ENOMEM;
+ goto destroy_wq_cyc;
+ }
+
+ sq->wr_priv = kzalloc(sizeof(*sq->wr_priv) * buf_sz, GFP_KERNEL);
+ if (!sq->wr_priv) {
+ err = -ENOMEM;
+ goto free_dep_wqe;
+ }
+
+ sq->buf_mask = (queue->num_entries * MAX_WQES_PER_RULE) - 1;
+
+ return 0;
+
+free_dep_wqe:
+ kfree(sq->dep_wqe);
+destroy_wq_cyc:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ return err;
+}
+
+static void hws_send_ring_free_sq(struct mlx5hws_send_ring_sq *sq)
+{
+ if (!sq)
+ return;
+ kfree(sq->wr_priv);
+ kfree(sq->dep_wqe);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+ u8 ts_format;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, cq->mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int hws_send_ring_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ void *in, *sqc;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sqn, in);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void hws_send_ring_close_sq(struct mlx5hws_send_ring_sq *sq)
+{
+ mlx5_core_destroy_sq(sq->mdev, sq->sqn);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ kfree(sq->wr_priv);
+ kfree(sq->dep_wqe);
+}
+
+static int hws_send_ring_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ int err;
+
+ err = hws_send_ring_create_sq(mdev, pdn, sqc_data, queue, sq, cq);
+ if (err)
+ return err;
+
+ err = hws_send_ring_set_sq_rdy(mdev, sq->sqn);
+ if (err)
+ hws_send_ring_close_sq(sq);
+
+ return err;
+}
+
+static int hws_send_ring_open_sq(struct mlx5hws_context *ctx,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ size_t buf_sz, sq_log_buf_sz;
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
+ sq_log_buf_sz = ilog2(roundup_pow_of_two(buf_sz));
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, ctx->pd_num);
+ MLX5_SET(wq, wq, log_wq_sz, sq_log_buf_sz);
+
+ err = hws_send_ring_alloc_sq(ctx->mdev, numa_node, queue, sq, sqc_data);
+ if (err)
+ goto err_free_sqc;
+
+ err = hws_send_ring_create_sq_rdy(ctx->mdev, ctx->pd_num, sqc_data,
+ queue, sq, cq);
+ if (err)
+ goto err_free_sq;
+
+ kvfree(sqc_data);
+
+ return 0;
+err_free_sq:
+ hws_send_ring_free_sq(sq);
+err_free_sqc:
+ kvfree(sqc_data);
+ return err;
+}
+
+static void hws_cq_complete(struct mlx5_core_cq *mcq,
+ struct mlx5_eqe *eqe)
+{
+ pr_err("CQ completion CQ: #%u\n", mcq->cqn);
+}
+
+static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ void *cqc_data,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param;
+ struct mlx5_cqe64 *cqe;
+ int err;
+ u32 i;
+
+ param.buf_numa_node = numa_node;
+ param.db_numa_node = numa_node;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ mcq->comp = hws_cq_complete;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev,
+ struct mlx5hws_send_engine *queue,
+ void *cqc_data,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ void *in, *cqc;
+ int inlen, eqn;
+ int err;
+
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
+ struct mlx5hws_send_engine *queue,
+ int numa_node,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ void *cqc_data;
+ int err;
+
+ cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc_data)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries);
+ MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
+
+ err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
+ if (err)
+ goto err_out;
+
+ err = hws_send_ring_create_cq(mdev, queue, cqc_data, cq);
+ if (err)
+ goto err_free_cq;
+
+ kvfree(cqc_data);
+
+ return 0;
+
+err_free_cq:
+ mlx5_wq_destroy(&cq->wq_ctrl);
+err_out:
+ kvfree(cqc_data);
+ return err;
+}
+
+static void hws_send_ring_close_cq(struct mlx5hws_send_ring_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static void hws_send_ring_close(struct mlx5hws_send_engine *queue)
+{
+ hws_send_ring_close_sq(&queue->send_ring.send_sq);
+ hws_send_ring_close_cq(&queue->send_ring.send_cq);
+}
+
+static int mlx5hws_send_ring_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue)
+{
+ int numa_node = dev_to_node(mlx5_core_dma_dev(ctx->mdev));
+ struct mlx5hws_send_ring *ring = &queue->send_ring;
+ int err;
+
+ err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq);
+ if (err)
+ return err;
+
+ err = hws_send_ring_open_sq(ctx, numa_node, queue, &ring->send_sq,
+ &ring->send_cq);
+ if (err)
+ goto close_cq;
+
+ return err;
+
+close_cq:
+ hws_send_ring_close_cq(&ring->send_cq);
+ return err;
+}
+
+void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
+{
+ hws_send_ring_close(queue);
+ kfree(queue->completed.entries);
+}
+
+int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size)
+{
+ int err;
+
+ mutex_init(&queue->lock);
+
+ queue->num_entries = roundup_pow_of_two(queue_size);
+ queue->used_entries = 0;
+
+ queue->completed.entries = kcalloc(queue->num_entries,
+ sizeof(queue->completed.entries[0]),
+ GFP_KERNEL);
+ if (!queue->completed.entries)
+ return -ENOMEM;
+
+ queue->completed.pi = 0;
+ queue->completed.ci = 0;
+ queue->completed.mask = queue->num_entries - 1;
+ err = mlx5hws_send_ring_open(ctx, queue);
+ if (err)
+ goto free_completed_entries;
+
+ return 0;
+
+free_completed_entries:
+ kfree(queue->completed.entries);
+ return err;
+}
+
+static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues)
+{
+ while (queues--)
+ mlx5hws_send_queue_close(&ctx->send_queue[queues]);
+}
+
+static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
+{
+ int bwc_queues = ctx->queues - 1;
+ int i;
+
+ if (!mlx5hws_context_bwc_supported(ctx))
+ return;
+
+ for (i = 0; i < bwc_queues; i++)
+ mutex_destroy(&ctx->bwc_send_queue_locks[i]);
+ kfree(ctx->bwc_send_queue_locks);
+}
+
+void mlx5hws_send_queues_close(struct mlx5hws_context *ctx)
+{
+ hws_send_queues_bwc_locks_destroy(ctx);
+ __hws_send_queues_close(ctx, ctx->queues);
+ kfree(ctx->send_queue);
+}
+
+static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
+{
+ /* Number of BWC queues is equal to number of the usual HWS queues */
+ int bwc_queues = ctx->queues - 1;
+ int i;
+
+ if (!mlx5hws_context_bwc_supported(ctx))
+ return 0;
+
+ ctx->queues += bwc_queues;
+
+ ctx->bwc_send_queue_locks = kcalloc(bwc_queues,
+ sizeof(*ctx->bwc_send_queue_locks),
+ GFP_KERNEL);
+
+ if (!ctx->bwc_send_queue_locks)
+ return -ENOMEM;
+
+ for (i = 0; i < bwc_queues; i++)
+ mutex_init(&ctx->bwc_send_queue_locks[i]);
+
+ return 0;
+}
+
+int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size)
+{
+ int err = 0;
+ u32 i;
+
+ /* Open one extra queue for control path */
+ ctx->queues = queues + 1;
+
+ /* open a separate set of queues and locks for bwc API */
+ err = hws_bwc_send_queues_init(ctx);
+ if (err)
+ return err;
+
+ ctx->send_queue = kcalloc(ctx->queues, sizeof(*ctx->send_queue), GFP_KERNEL);
+ if (!ctx->send_queue) {
+ err = -ENOMEM;
+ goto free_bwc_locks;
+ }
+
+ for (i = 0; i < ctx->queues; i++) {
+ err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
+ if (err)
+ goto close_send_queues;
+ }
+
+ return 0;
+
+close_send_queues:
+ __hws_send_queues_close(ctx, i);
+
+ kfree(ctx->send_queue);
+
+free_bwc_locks:
+ hws_send_queues_bwc_locks_destroy(ctx);
+
+ return err;
+}
+
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions)
+{
+ struct mlx5hws_send_ring_sq *send_sq;
+ struct mlx5hws_send_engine *queue;
+ bool wait_comp = false;
+ s64 polled = 0;
+
+ queue = &ctx->send_queue[queue_id];
+ send_sq = &queue->send_ring.send_sq;
+
+ switch (actions) {
+ case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC:
+ wait_comp = true;
+ fallthrough;
+ case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC:
+ if (send_sq->head_dep_idx != send_sq->tail_dep_idx)
+ /* Send dependent WQEs to drain the queue */
+ mlx5hws_send_all_dep_wqe(queue);
+ else
+ /* Signal on the last posted WQE */
+ mlx5hws_send_engine_flush_queue(queue);
+
+ /* Poll queue until empty */
+ while (wait_comp && !mlx5hws_send_engine_empty(queue))
+ hws_send_engine_poll_cq(queue, NULL, &polled, 0);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+hws_send_wqe_fw(struct mlx5_core_dev *mdev,
+ u32 pd_num,
+ struct mlx5hws_send_engine_post_attr *send_attr,
+ struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
+ void *send_wqe_match_data,
+ void *send_wqe_match_tag,
+ void *send_wqe_range_data,
+ void *send_wqe_range_tag,
+ bool is_jumbo,
+ u8 gta_opcode)
+{
+ bool has_range = send_wqe_range_data || send_wqe_range_tag;
+ bool has_match = send_wqe_match_data || send_wqe_match_tag;
+ struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data0 = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data1 = {0};
+ struct mlx5hws_wqe_gta_ctrl_seg gta_wqe_ctrl = {0};
+ struct mlx5hws_cmd_generate_wqe_attr attr = {0};
+ struct mlx5hws_wqe_ctrl_seg wqe_ctrl = {0};
+ struct mlx5_cqe64 cqe;
+ u32 flags = 0;
+ int ret;
+
+ /* Set WQE control */
+ wqe_ctrl.opmod_idx_opcode = cpu_to_be32((send_attr->opmod << 24) | send_attr->opcode);
+ wqe_ctrl.qpn_ds = cpu_to_be32((send_attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16);
+ flags |= send_attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ wqe_ctrl.flags = cpu_to_be32(flags);
+ wqe_ctrl.imm = cpu_to_be32(send_attr->id);
+
+ /* Set GTA WQE CTRL */
+ memcpy(gta_wqe_ctrl.stc_ix, send_wqe_ctrl->stc_ix, sizeof(send_wqe_ctrl->stc_ix));
+ gta_wqe_ctrl.op_dirix = cpu_to_be32(gta_opcode << 28);
+
+ /* Set GTA match WQE DATA */
+ if (has_match) {
+ if (send_wqe_match_data)
+ memcpy(&gta_wqe_data0, send_wqe_match_data, sizeof(gta_wqe_data0));
+ else
+ hws_send_wqe_set_tag(&gta_wqe_data0, send_wqe_match_tag, is_jumbo);
+
+ gta_wqe_data0.rsvd1_definer = cpu_to_be32(send_attr->match_definer_id << 8);
+ attr.gta_data_0 = (u8 *)&gta_wqe_data0;
+ }
+
+ /* Set GTA range WQE DATA */
+ if (has_range) {
+ if (send_wqe_range_data)
+ memcpy(&gta_wqe_data1, send_wqe_range_data, sizeof(gta_wqe_data1));
+ else
+ hws_send_wqe_set_tag(&gta_wqe_data1, send_wqe_range_tag, false);
+
+ gta_wqe_data1.rsvd1_definer = cpu_to_be32(send_attr->range_definer_id << 8);
+ attr.gta_data_1 = (u8 *)&gta_wqe_data1;
+ }
+
+ attr.pdn = pd_num;
+ attr.wqe_ctrl = (u8 *)&wqe_ctrl;
+ attr.gta_ctrl = (u8 *)&gta_wqe_ctrl;
+
+send_wqe:
+ ret = mlx5hws_cmd_generate_wqe(mdev, &attr, &cqe);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to write WQE using command");
+ return ret;
+ }
+
+ if ((get_cqe_opcode(&cqe) == MLX5_CQE_REQ) &&
+ (be32_to_cpu(cqe.byte_cnt) >> 31 == 0)) {
+ *send_attr->used_id = send_attr->id;
+ return 0;
+ }
+
+ /* Retry if rule failed */
+ if (send_attr->retry_id) {
+ wqe_ctrl.imm = cpu_to_be32(send_attr->retry_id);
+ send_attr->id = send_attr->retry_id;
+ send_attr->retry_id = 0;
+ goto send_wqe;
+ }
+
+ return -1;
+}
+
+void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
+ struct mlx5hws_rule *rule = send_attr->rule;
+ struct mlx5_core_dev *mdev;
+ u16 queue_id;
+ u32 pdn;
+ int ret;
+
+ queue_id = queue - ctx->send_queue;
+ mdev = ctx->mdev;
+ pdn = ctx->pd_num;
+
+ /* Writing through FW can't HW fence, therefore we drain the queue */
+ if (send_attr->fence)
+ mlx5hws_send_queue_action(ctx,
+ queue_id,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
+ if (ste_attr->rtc_1) {
+ send_attr->id = ste_attr->rtc_1;
+ send_attr->used_id = ste_attr->used_id_rtc_1;
+ send_attr->retry_id = ste_attr->retry_rtc_1;
+ ret = hws_send_wqe_fw(mdev, pdn, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->range_wqe_data,
+ ste_attr->range_wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode);
+ if (ret)
+ goto fail_rule;
+ }
+
+ if (ste_attr->rtc_0) {
+ send_attr->id = ste_attr->rtc_0;
+ send_attr->used_id = ste_attr->used_id_rtc_0;
+ send_attr->retry_id = ste_attr->retry_rtc_0;
+ ret = hws_send_wqe_fw(mdev, pdn, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->range_wqe_data,
+ ste_attr->range_wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode);
+ if (ret)
+ goto fail_rule;
+ }
+
+ /* Increase the status, this only works on good flow as the enum
+ * is arrange it away creating -> created -> deleting -> deleted
+ */
+ if (likely(rule))
+ rule->status++;
+
+ mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_SUCCESS);
+
+ return;
+
+fail_rule:
+ if (likely(rule))
+ rule->status = !rule->rtc_0 && !rule->rtc_1 ?
+ MLX5HWS_RULE_STATUS_FAILED : MLX5HWS_RULE_STATUS_FAILING;
+
+ mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_ERROR);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h
new file mode 100644
index 000000000000..b50825d6dc53
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_SEND_H_
+#define MLX5HWS_SEND_H_
+
+/* As a single operation requires at least two WQEBBS.
+ * This means a maximum of 16 such operations per rule.
+ */
+#define MAX_WQES_PER_RULE 32
+
+enum mlx5hws_wqe_opcode {
+ MLX5HWS_WQE_OPCODE_TBL_ACCESS = 0x2c,
+};
+
+enum mlx5hws_wqe_opmod {
+ MLX5HWS_WQE_OPMOD_GTA_STE = 0,
+ MLX5HWS_WQE_OPMOD_GTA_MOD_ARG = 1,
+};
+
+enum mlx5hws_wqe_gta_opcode {
+ MLX5HWS_WQE_GTA_OP_ACTIVATE = 0,
+ MLX5HWS_WQE_GTA_OP_DEACTIVATE = 1,
+};
+
+enum mlx5hws_wqe_gta_opmod {
+ MLX5HWS_WQE_GTA_OPMOD_STE = 0,
+ MLX5HWS_WQE_GTA_OPMOD_MOD_ARG = 1,
+};
+
+enum mlx5hws_wqe_gta_sz {
+ MLX5HWS_WQE_SZ_GTA_CTRL = 48,
+ MLX5HWS_WQE_SZ_GTA_DATA = 64,
+};
+
+/* WQE Control segment. */
+struct mlx5hws_wqe_ctrl_seg {
+ __be32 opmod_idx_opcode;
+ __be32 qpn_ds;
+ __be32 flags;
+ __be32 imm;
+};
+
+struct mlx5hws_wqe_gta_ctrl_seg {
+ __be32 op_dirix;
+ __be32 stc_ix[5];
+ __be32 rsvd0[6];
+};
+
+struct mlx5hws_wqe_gta_data_seg_ste {
+ __be32 rsvd0_ctr_id;
+ __be32 rsvd1_definer;
+ __be32 rsvd2[3];
+ union {
+ struct {
+ __be32 action[3];
+ __be32 tag[8];
+ };
+ __be32 jumbo[11];
+ };
+};
+
+struct mlx5hws_wqe_gta_data_seg_arg {
+ __be32 action_args[8];
+};
+
+struct mlx5hws_wqe_gta {
+ struct mlx5hws_wqe_gta_ctrl_seg gta_ctrl;
+ union {
+ struct mlx5hws_wqe_gta_data_seg_ste seg_ste;
+ struct mlx5hws_wqe_gta_data_seg_arg seg_arg;
+ };
+};
+
+struct mlx5hws_send_ring_cq {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_cqwq wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_core_cq mcq;
+ u16 poll_wqe;
+};
+
+struct mlx5hws_send_ring_priv {
+ struct mlx5hws_rule *rule;
+ void *user_data;
+ u32 num_wqebbs;
+ u32 id;
+ u32 retry_id;
+ u32 *used_id;
+};
+
+struct mlx5hws_send_ring_dep_wqe {
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl;
+ struct mlx5hws_wqe_gta_data_seg_ste wqe_data;
+ struct mlx5hws_rule *rule;
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 retry_rtc_0;
+ u32 retry_rtc_1;
+ u32 direct_index;
+ void *user_data;
+};
+
+struct mlx5hws_send_ring_sq {
+ struct mlx5_core_dev *mdev;
+ u16 cur_post;
+ u16 buf_mask;
+ struct mlx5hws_send_ring_priv *wr_priv;
+ unsigned int last_idx;
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+ unsigned int head_dep_idx;
+ unsigned int tail_dep_idx;
+ u32 sqn;
+ struct mlx5_wq_cyc wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ void __iomem *uar_map;
+};
+
+struct mlx5hws_send_ring {
+ struct mlx5hws_send_ring_cq send_cq;
+ struct mlx5hws_send_ring_sq send_sq;
+};
+
+struct mlx5hws_completed_poll_entry {
+ void *user_data;
+ enum mlx5hws_flow_op_status status;
+};
+
+struct mlx5hws_completed_poll {
+ struct mlx5hws_completed_poll_entry *entries;
+ u16 ci;
+ u16 pi;
+ u16 mask;
+};
+
+struct mlx5hws_send_engine {
+ struct mlx5hws_send_ring send_ring;
+ struct mlx5_uars_page *uar; /* Uar is shared between rings of a queue */
+ struct mlx5hws_completed_poll completed;
+ u16 used_entries;
+ u16 num_entries;
+ bool err;
+ struct mutex lock; /* Protects the send engine */
+};
+
+struct mlx5hws_send_engine_post_ctrl {
+ struct mlx5hws_send_engine *queue;
+ struct mlx5hws_send_ring *send_ring;
+ size_t num_wqebbs;
+};
+
+struct mlx5hws_send_engine_post_attr {
+ u8 opcode;
+ u8 opmod;
+ u8 notify_hw;
+ u8 fence;
+ u8 match_definer_id;
+ u8 range_definer_id;
+ size_t len;
+ struct mlx5hws_rule *rule;
+ u32 id;
+ u32 retry_id;
+ u32 *used_id;
+ void *user_data;
+};
+
+struct mlx5hws_send_ste_attr {
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 retry_rtc_0;
+ u32 retry_rtc_1;
+ u32 *used_id_rtc_0;
+ u32 *used_id_rtc_1;
+ bool wqe_tag_is_jumbo;
+ u8 gta_opcode;
+ u32 direct_index;
+ struct mlx5hws_send_engine_post_attr send_attr;
+ struct mlx5hws_rule_match_tag *wqe_tag;
+ struct mlx5hws_rule_match_tag *range_wqe_tag;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_data_seg_ste *range_wqe_data;
+};
+
+struct mlx5hws_send_ring_dep_wqe *
+mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
+
+int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size);
+
+void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
+
+int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size);
+
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions);
+
+int mlx5hws_send_test(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size);
+
+struct mlx5hws_send_engine_post_ctrl
+mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ char **buf, size_t *len);
+
+void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ struct mlx5hws_send_engine_post_attr *attr);
+
+void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr);
+
+void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr);
+
+void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue);
+
+static inline bool mlx5hws_send_engine_empty(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq;
+
+ return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe);
+}
+
+static inline bool mlx5hws_send_engine_full(struct mlx5hws_send_engine *queue)
+{
+ return queue->used_entries >= queue->num_entries;
+}
+
+static inline void mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine *queue)
+{
+ queue->used_entries++;
+}
+
+static inline void mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine *queue)
+{
+ queue->used_entries--;
+}
+
+static inline void mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine *queue,
+ void *user_data,
+ int comp_status)
+{
+ struct mlx5hws_completed_poll *comp = &queue->completed;
+
+ comp->entries[comp->pi].status = comp_status;
+ comp->entries[comp->pi].user_data = user_data;
+
+ comp->pi = (comp->pi + 1) & comp->mask;
+}
+
+static inline bool mlx5hws_send_engine_err(struct mlx5hws_send_engine *queue)
+{
+ return queue->err;
+}
+
+#endif /* MLX5HWS_SEND_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c
new file mode 100644
index 000000000000..8c063a8d87d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
+{
+ return tbl->ft_id;
+}
+
+static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr)
+{
+ ft_attr->type = tbl->fw_ft_type;
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1;
+ else
+ ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1;
+ ft_attr->rtc_valid = true;
+}
+
+static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr)
+{
+ /* Enabling reformat_en or decap_en for the first flow table
+ * must be done when all VFs are down.
+ * However, HWS doesn't know when it is required to create the first FT.
+ * On the other hand, HWS doesn't use all these FT capabilities at all
+ * (the API doesn't even provide a way to specify these flags), so we'll
+ * just set these caps on all the flow tables.
+ * If HCA_CAP.fdb_dynamic_tunnel is set, this constraint is N/A.
+ */
+ if (!MLX5_CAP_ESW_FLOWTABLE(tbl->ctx->mdev, fdb_dynamic_tunnel)) {
+ ft_attr->reformat_en = true;
+ ft_attr->decap_en = true;
+ }
+}
+
+static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+ struct mlx5hws_cmd_set_fte_dest dest = {0};
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u8 tbl_type = tbl->type;
+
+ if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
+ return 0;
+
+ if (ctx->common_res[tbl_type].default_miss) {
+ ctx->common_res[tbl_type].default_miss->refcount++;
+ return 0;
+ }
+
+ ft_attr.type = tbl->fw_ft_type;
+ ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */
+ ft_attr.rtc_valid = false;
+
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.destination_id = ctx->caps->eswitch_manager_vport_number;
+
+ fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
+
+ default_miss = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!default_miss) {
+ mlx5hws_err(ctx, "Failed to default miss table type: 0x%x\n", tbl_type);
+ return -EINVAL;
+ }
+
+ /* ctx->ctrl_lock must be held here */
+ ctx->common_res[tbl_type].default_miss = default_miss;
+ ctx->common_res[tbl_type].default_miss->refcount++;
+
+ return 0;
+}
+
+/* Called under ctx->ctrl_lock */
+static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u8 tbl_type = tbl->type;
+
+ if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
+ return;
+
+ default_miss = ctx->common_res[tbl_type].default_miss;
+ if (--default_miss->refcount)
+ return;
+
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss);
+ ctx->common_res[tbl_type].default_miss = NULL;
+}
+
+static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ int ret;
+
+ if (unlikely(tbl->type != MLX5HWS_TABLE_TYPE_FDB))
+ pr_warn("HWS: invalid table type %d\n", tbl->type);
+
+ mlx5hws_cmd_set_attr_connect_miss_tbl(tbl->ctx,
+ tbl->fw_ft_type,
+ tbl->type,
+ &ft_attr);
+
+ ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to connect FT to default FDB FT\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+ struct mlx5hws_table *tbl,
+ u32 *ft_id)
+{
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ int ret;
+
+ hws_table_init_next_ft_attr(tbl, &ft_attr);
+ hws_table_set_cap_attr(tbl, &ft_attr);
+
+ ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed creating default ft\n");
+ return ret;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ /* Take/create ref over the default miss */
+ ret = hws_table_up_default_fdb_miss_tbl(tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to get default fdb miss\n");
+ goto free_ft_obj;
+ }
+ ret = hws_table_connect_to_default_miss_tbl(tbl, *ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed connecting to default miss tbl\n");
+ goto down_miss_tbl;
+ }
+ }
+
+ return 0;
+
+down_miss_tbl:
+ hws_table_down_default_fdb_miss_tbl(tbl);
+free_ft_obj:
+ mlx5hws_cmd_flow_table_destroy(mdev, ft_attr.type, *ft_id);
+ return ret;
+}
+
+void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
+ u32 ft_id)
+{
+ mlx5hws_cmd_flow_table_destroy(tbl->ctx->mdev, tbl->fw_ft_type, ft_id);
+ hws_table_down_default_fdb_miss_tbl(tbl);
+}
+
+static int hws_table_init_check_hws_support(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
+ mlx5hws_err(ctx, "HWS not supported, cannot create mlx5hws_table\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int hws_table_init(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ ret = hws_table_init_check_hws_support(ctx, tbl);
+ if (ret)
+ return ret;
+
+ if (mlx5hws_table_get_fw_ft_type(tbl->type, (u8 *)&tbl->fw_ft_type)) {
+ pr_warn("HWS: invalid table type %d\n", tbl->type);
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to create flow table object\n");
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+ }
+
+ ret = mlx5hws_action_get_default_stc(ctx, tbl->type);
+ if (ret)
+ goto tbl_destroy;
+
+ INIT_LIST_HEAD(&tbl->matchers_list);
+ INIT_LIST_HEAD(&tbl->default_miss.head);
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+tbl_destroy:
+ mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static void hws_table_uninit(struct mlx5hws_table *tbl)
+{
+ mutex_lock(&tbl->ctx->ctrl_lock);
+ mlx5hws_action_put_default_stc(tbl->ctx, tbl->type);
+ mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
+ mutex_unlock(&tbl->ctx->ctrl_lock);
+}
+
+struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_table_attr *attr)
+{
+ struct mlx5hws_table *tbl;
+ int ret;
+
+ if (attr->type > MLX5HWS_TABLE_TYPE_FDB) {
+ mlx5hws_err(ctx, "Invalid table type %d\n", attr->type);
+ return NULL;
+ }
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return NULL;
+
+ tbl->ctx = ctx;
+ tbl->type = attr->type;
+ tbl->level = attr->level;
+
+ ret = hws_table_init(tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to initialise table\n");
+ goto free_tbl;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ list_add(&tbl->tbl_list_node, &ctx->tbl_list);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return tbl;
+
+free_tbl:
+ kfree(tbl);
+ return NULL;
+}
+
+int mlx5hws_table_destroy(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (!list_empty(&tbl->matchers_list)) {
+ mlx5hws_err(tbl->ctx, "Cannot destroy table containing matchers\n");
+ ret = -EBUSY;
+ goto unlock_err;
+ }
+
+ if (!list_empty(&tbl->default_miss.head)) {
+ mlx5hws_err(tbl->ctx, "Cannot destroy table pointed by default miss\n");
+ ret = -EBUSY;
+ goto unlock_err;
+ }
+
+ list_del_init(&tbl->tbl_list_node);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ hws_table_uninit(tbl);
+ kfree(tbl);
+
+ return 0;
+
+unlock_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static u32 hws_table_get_last_ft(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_matcher *matcher;
+
+ if (list_empty(&tbl->matchers_list))
+ return tbl->ft_id;
+
+ matcher = list_last_entry(&tbl->matchers_list, struct mlx5hws_matcher, list_node);
+ return matcher->end_ft_id;
+}
+
+int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ int ret;
+
+ /* Due to FW limitation, resetting the flow table to default action will
+ * disconnect RTC when ignore_flow_level_rtc_valid is not supported.
+ */
+ if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid)
+ return 0;
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ return hws_table_connect_to_default_miss_tbl(tbl, ft_id);
+
+ ft_attr.type = tbl->fw_ft_type;
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT;
+
+ ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to set FT default miss action\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 rtc_0_id,
+ u32 rtc_1_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID;
+ ft_attr.type = fw_ft_type;
+ ft_attr.rtc_id_0 = rtc_0_id;
+ ft_attr.rtc_id_1 = rtc_1_id;
+
+ return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
+}
+
+static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
+ ft_attr.type = fw_ft_type;
+ ft_attr.table_miss_id = next_ft_id;
+
+ return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
+}
+
+int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl)
+{
+ struct mlx5hws_table *src_tbl;
+ int ret;
+
+ if (list_empty(&dst_tbl->default_miss.head))
+ return 0;
+
+ list_for_each_entry(src_tbl, &dst_tbl->default_miss.head, default_miss.next) {
+ ret = mlx5hws_table_connect_to_miss_table(src_tbl, dst_tbl);
+ if (ret) {
+ mlx5hws_err(dst_tbl->ctx,
+ "Failed to update source miss table, unexpected behavior\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
+ struct mlx5hws_table *dst_tbl)
+{
+ struct mlx5hws_matcher *matcher;
+ u32 last_ft_id;
+ int ret;
+
+ last_ft_id = hws_table_get_last_ft(src_tbl);
+
+ if (dst_tbl) {
+ if (list_empty(&dst_tbl->matchers_list)) {
+ /* Connect src_tbl last_ft to dst_tbl start anchor */
+ ret = hws_table_ft_set_next_ft(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ dst_tbl->ft_id);
+ if (ret)
+ return ret;
+
+ /* Reset last_ft RTC to default RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ 0, 0);
+ if (ret)
+ return ret;
+ } else {
+ /* Connect src_tbl last_ft to first matcher RTC */
+ matcher = list_first_entry(&dst_tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret)
+ return ret;
+
+ /* Reset next miss FT to default */
+ ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /* Reset next miss FT to default */
+ ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
+ if (ret)
+ return ret;
+
+ /* Reset last_ft RTC to default RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ src_tbl->default_miss.miss_tbl = dst_tbl;
+
+ return 0;
+}
+
+static int hws_table_set_default_miss_not_valid(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl)
+{
+ if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid) {
+ mlx5hws_err(tbl->ctx, "Default miss table is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((miss_tbl && miss_tbl->type != tbl->type)) {
+ mlx5hws_err(tbl->ctx, "Invalid arguments\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_table *old_miss_tbl;
+ int ret;
+
+ ret = hws_table_set_default_miss_not_valid(tbl, miss_tbl);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ old_miss_tbl = tbl->default_miss.miss_tbl;
+ ret = mlx5hws_table_connect_to_miss_table(tbl, miss_tbl);
+ if (ret)
+ goto out;
+
+ if (old_miss_tbl)
+ list_del_init(&tbl->default_miss.next);
+
+ old_miss_tbl = tbl->default_miss.miss_tbl;
+ if (old_miss_tbl)
+ list_del_init(&old_miss_tbl->default_miss.head);
+
+ if (miss_tbl)
+ list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head);
+
+ mutex_unlock(&ctx->ctrl_lock);
+ return 0;
+out:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h
new file mode 100644
index 000000000000..dd50420eec9e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_TABLE_H_
+#define MLX5HWS_TABLE_H_
+
+struct mlx5hws_default_miss {
+ /* My miss table */
+ struct mlx5hws_table *miss_tbl;
+ struct list_head next;
+ /* Tables missing to my table */
+ struct list_head head;
+};
+
+struct mlx5hws_table {
+ struct mlx5hws_context *ctx;
+ u32 ft_id;
+ enum mlx5hws_table_type type;
+ u32 fw_ft_type;
+ u32 level;
+ struct list_head matchers_list;
+ struct list_head tbl_list_node;
+ struct mlx5hws_default_miss default_miss;
+};
+
+static inline
+u32 mlx5hws_table_get_fw_ft_type(enum mlx5hws_table_type type,
+ u8 *ret_type)
+{
+ if (type != MLX5HWS_TABLE_TYPE_FDB)
+ return -EOPNOTSUPP;
+
+ *ret_type = FS_FT_FDB;
+
+ return 0;
+}
+
+static inline
+u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type,
+ bool is_mirror)
+{
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ return is_mirror ? FS_FT_FDB_TX : FS_FT_FDB_RX;
+
+ return 0;
+}
+
+int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+ struct mlx5hws_table *tbl,
+ u32 *ft_id);
+
+void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
+ u32 ft_id);
+
+int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
+ struct mlx5hws_table *dst_tbl);
+
+int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl);
+
+int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id);
+
+int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 rtc_0_id,
+ u32 rtc_1_id);
+
+#endif /* MLX5HWS_TABLE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c
new file mode 100644
index 000000000000..faf42421c43f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx)
+{
+ int ret;
+
+ if (!ctx->caps->eswitch_manager)
+ return 0;
+
+ xa_init(&ctx->vports.vport_gvmi_xa);
+
+ /* Set gvmi for eswitch manager and uplink vports only. Rest of the vports
+ * (vport 0 of other function, VFs and SFs) will be queried dynamically.
+ */
+
+ ret = mlx5hws_cmd_query_gvmi(ctx->mdev, false, 0, &ctx->vports.esw_manager_gvmi);
+ if (ret)
+ return ret;
+
+ ctx->vports.uplink_gvmi = 0;
+ return 0;
+}
+
+void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx)
+{
+ if (ctx->caps->eswitch_manager)
+ xa_destroy(&ctx->vports.vport_gvmi_xa);
+}
+
+static int hws_vport_add_gvmi(struct mlx5hws_context *ctx, u16 vport)
+{
+ u16 vport_gvmi;
+ int ret;
+
+ ret = mlx5hws_cmd_query_gvmi(ctx->mdev, true, vport, &vport_gvmi);
+ if (ret)
+ return -EINVAL;
+
+ ret = xa_insert(&ctx->vports.vport_gvmi_xa, vport,
+ xa_mk_value(vport_gvmi), GFP_KERNEL);
+ if (ret)
+ mlx5hws_dbg(ctx, "Couldn't insert new vport gvmi into xarray (%d)\n", ret);
+
+ return ret;
+}
+
+static bool hws_vport_is_esw_mgr_vport(struct mlx5hws_context *ctx, u16 vport)
+{
+ return ctx->caps->is_ecpf ? vport == MLX5_VPORT_ECPF :
+ vport == MLX5_VPORT_PF;
+}
+
+int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi)
+{
+ void *entry;
+ int ret;
+
+ if (!ctx->caps->eswitch_manager)
+ return -EINVAL;
+
+ if (hws_vport_is_esw_mgr_vport(ctx, vport)) {
+ *vport_gvmi = ctx->vports.esw_manager_gvmi;
+ return 0;
+ }
+
+ if (vport == MLX5_VPORT_UPLINK) {
+ *vport_gvmi = ctx->vports.uplink_gvmi;
+ return 0;
+ }
+
+load_entry:
+ entry = xa_load(&ctx->vports.vport_gvmi_xa, vport);
+
+ if (!xa_is_value(entry)) {
+ ret = hws_vport_add_gvmi(ctx, vport);
+ if (ret && ret != -EBUSY)
+ return ret;
+ goto load_entry;
+ }
+
+ *vport_gvmi = (u16)xa_to_value(entry);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h
new file mode 100644
index 000000000000..0912fc166b3a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_VPORT_H_
+#define MLX5HWS_VPORT_H_
+
+int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx);
+
+void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx);
+
+int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi);
+
+#endif /* MLX5HWS_VPORT_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index d61478c0c632..e746cd9c68ed 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -165,52 +165,22 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
return -ENODEV;
}
-static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
+static bool mlxsw_thermal_should_bind(struct thermal_zone_device *tzdev,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
struct mlxsw_thermal *thermal = thermal_zone_device_priv(tzdev);
- struct device *dev = thermal->bus_info->dev;
- int i, err;
+ const struct mlxsw_cooling_states *state = trip->priv;
/* If the cooling device is one of ours bind it */
if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
+ return false;
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- const struct mlxsw_cooling_states *state = &thermal->cooling_states[i];
+ c->upper = state->max_state;
+ c->lower = state->min_state;
- err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
- state->max_state,
- state->min_state,
- THERMAL_WEIGHT_DEFAULT);
- if (err < 0) {
- dev_err(dev, "Failed to bind cooling device to trip %d\n", i);
- return err;
- }
- }
- return 0;
-}
-
-static int mlxsw_thermal_unbind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
-{
- struct mlxsw_thermal *thermal = thermal_zone_device_priv(tzdev);
- struct device *dev = thermal->bus_info->dev;
- int i;
- int err;
-
- /* If the cooling device is our one unbind it */
- if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
-
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- err = thermal_zone_unbind_cooling_device(tzdev, i, cdev);
- if (err < 0) {
- dev_err(dev, "Failed to unbind cooling device\n");
- return err;
- }
- }
- return 0;
+ return true;
}
static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
@@ -240,57 +210,27 @@ static struct thermal_zone_params mlxsw_thermal_params = {
};
static struct thermal_zone_device_ops mlxsw_thermal_ops = {
- .bind = mlxsw_thermal_bind,
- .unbind = mlxsw_thermal_unbind,
+ .should_bind = mlxsw_thermal_should_bind,
.get_temp = mlxsw_thermal_get_temp,
};
-static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
+static bool mlxsw_thermal_module_should_bind(struct thermal_zone_device *tzdev,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
struct mlxsw_thermal_module *tz = thermal_zone_device_priv(tzdev);
+ const struct mlxsw_cooling_states *state = trip->priv;
struct mlxsw_thermal *thermal = tz->parent;
- int i, j, err;
/* If the cooling device is one of ours bind it */
if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
-
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- const struct mlxsw_cooling_states *state = &tz->cooling_states[i];
-
- err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
- state->max_state,
- state->min_state,
- THERMAL_WEIGHT_DEFAULT);
- if (err < 0)
- goto err_thermal_zone_bind_cooling_device;
- }
- return 0;
-
-err_thermal_zone_bind_cooling_device:
- for (j = i - 1; j >= 0; j--)
- thermal_zone_unbind_cooling_device(tzdev, j, cdev);
- return err;
-}
-
-static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev,
- struct thermal_cooling_device *cdev)
-{
- struct mlxsw_thermal_module *tz = thermal_zone_device_priv(tzdev);
- struct mlxsw_thermal *thermal = tz->parent;
- int i;
- int err;
+ return false;
- /* If the cooling device is one of ours unbind it */
- if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
- return 0;
+ c->upper = state->max_state;
+ c->lower = state->min_state;
- for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- err = thermal_zone_unbind_cooling_device(tzdev, i, cdev);
- WARN_ON(err);
- }
- return err;
+ return true;
}
static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
@@ -313,8 +253,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
}
static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
- .bind = mlxsw_thermal_module_bind,
- .unbind = mlxsw_thermal_module_unbind,
+ .should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_module_temp_get,
};
@@ -342,8 +281,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
}
static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
- .bind = mlxsw_thermal_module_bind,
- .unbind = mlxsw_thermal_module_unbind,
+ .should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_gearbox_temp_get,
};
@@ -411,7 +349,7 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = {
static int
mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
{
- char tz_name[THERMAL_NAME_LENGTH];
+ char tz_name[40];
int err;
if (module_tz->slot_index)
@@ -445,17 +383,14 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev)
thermal_zone_device_unregister(tzdev);
}
-static void
-mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal,
+static int
+mlxsw_thermal_module_init(struct mlxsw_thermal *thermal,
struct mlxsw_thermal_area *area, u8 module)
{
struct mlxsw_thermal_module *module_tz;
+ int i;
module_tz = &area->tz_module_arr[module];
- /* Skip if parent is already set (case of port split). */
- if (module_tz->parent)
- return;
module_tz->module = module;
module_tz->slot_index = area->slot_index;
module_tz->parent = thermal;
@@ -465,15 +400,15 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
sizeof(thermal->trips));
memcpy(module_tz->cooling_states, default_cooling_states,
sizeof(thermal->cooling_states));
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++)
+ module_tz->trips[i].priv = &module_tz->cooling_states[i];
+
+ return mlxsw_thermal_module_tz_init(module_tz);
}
static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
{
- if (module_tz && module_tz->tzdev) {
- mlxsw_thermal_module_tz_fini(module_tz->tzdev);
- module_tz->tzdev = NULL;
- module_tz->parent = NULL;
- }
+ mlxsw_thermal_module_tz_fini(module_tz->tzdev);
}
static int
@@ -481,7 +416,6 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal *thermal,
struct mlxsw_thermal_area *area)
{
- struct mlxsw_thermal_module *module_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
int i, err;
@@ -503,22 +437,16 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
if (!area->tz_module_arr)
return -ENOMEM;
- for (i = 0; i < area->tz_module_num; i++)
- mlxsw_thermal_module_init(dev, core, thermal, area, i);
-
for (i = 0; i < area->tz_module_num; i++) {
- module_tz = &area->tz_module_arr[i];
- if (!module_tz->parent)
- continue;
- err = mlxsw_thermal_module_tz_init(module_tz);
+ err = mlxsw_thermal_module_init(thermal, area, i);
if (err)
- goto err_thermal_module_tz_init;
+ goto err_thermal_module_init;
}
return 0;
-err_thermal_module_tz_init:
- for (i = area->tz_module_num - 1; i >= 0; i--)
+err_thermal_module_init:
+ for (i--; i >= 0; i--)
mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
kfree(area->tz_module_arr);
return err;
@@ -579,7 +507,7 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal_module *gearbox_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
u8 gbox_num;
- int i;
+ int i, j;
int err;
mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
@@ -606,6 +534,9 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
sizeof(thermal->trips));
memcpy(gearbox_tz->cooling_states, default_cooling_states,
sizeof(thermal->cooling_states));
+ for (j = 0; j < MLXSW_THERMAL_NUM_TRIPS; j++)
+ gearbox_tz->trips[j].priv = &gearbox_tz->cooling_states[j];
+
gearbox_tz->module = i;
gearbox_tz->parent = thermal;
gearbox_tz->slot_index = area->slot_index;
@@ -722,6 +653,9 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
thermal->bus_info = bus_info;
memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
memcpy(thermal->cooling_states, default_cooling_states, sizeof(thermal->cooling_states));
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++)
+ thermal->trips[i].priv = &thermal->cooling_states[i];
+
thermal->line_cards[0].slot_index = 0;
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
@@ -821,10 +755,7 @@ err_linecards_event_ops_register:
err_thermal_gearboxes_init:
mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
err_thermal_modules_init:
- if (thermal->tzdev) {
- thermal_zone_device_unregister(thermal->tzdev);
- thermal->tzdev = NULL;
- }
+ thermal_zone_device_unregister(thermal->tzdev);
err_thermal_zone_device_register:
err_thermal_cooling_device_register:
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
@@ -845,10 +776,7 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
thermal);
mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
- if (thermal->tzdev) {
- thermal_zone_device_unregister(thermal->tzdev);
- thermal->tzdev = NULL;
- }
+ thermal_zone_device_unregister(thermal->tzdev);
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
thermal_cooling_device_unregister(thermal->cdevs[i].cdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f064789f3240..3f5e5d99251b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1676,9 +1676,11 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
netif_carrier_off(dev);
- dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_TC;
dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
+ dev->lltx = true;
+ dev->netns_local = true;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR;
@@ -2784,7 +2786,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
.hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
.shaper_work = mlxsw_sp1_ptp_shaper_work,
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
.get_ts_info = mlxsw_sp1_ptp_get_ts_info,
+#endif
.get_stats_count = mlxsw_sp1_get_stats_count,
.get_stats_strings = mlxsw_sp1_get_stats_strings,
.get_stats = mlxsw_sp1_get_stats,
@@ -2801,7 +2805,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
.hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
.shaper_work = mlxsw_sp2_ptp_shaper_work,
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
.get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+#endif
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
@@ -2818,7 +2824,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
.hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
.hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
.shaper_work = mlxsw_sp2_ptp_shaper_work,
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
.get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+#endif
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 769095d4932d..c8aa1452fbb9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -11,14 +11,6 @@ struct mlxsw_sp;
struct mlxsw_sp_port;
struct mlxsw_sp_ptp_clock;
-static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct kernel_ethtool_ts_info *info)
-{
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
- return 0;
-}
-
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
struct mlxsw_sp_ptp_clock *
@@ -151,12 +143,6 @@ static inline void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
{
}
-static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct kernel_ethtool_ts_info *info)
-{
- return mlxsw_sp_ptp_get_ts_info_noptp(info);
-}
-
static inline int mlxsw_sp1_get_stats_count(void)
{
return 0;
@@ -226,12 +212,6 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
-static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct kernel_ethtool_ts_info *info)
-{
- return mlxsw_sp_ptp_get_ts_info_noptp(info);
-}
-
static inline int
mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
index c002ede36402..85519690b837 100644
--- a/drivers/net/ethernet/meta/Kconfig
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -23,6 +23,8 @@ config FBNIC
depends on !S390
depends on MAX_SKB_FRAGS < 22
depends on PCI_MSI
+ select NET_DEVLINK
+ select PAGE_POOL
select PHYLINK
help
This driver supports Meta Platforms Host Network Interface.
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index 9373b558fdc9..ed4533a73c57 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -8,7 +8,9 @@
obj-$(CONFIG_FBNIC) += fbnic.o
fbnic-y := fbnic_devlink.o \
+ fbnic_ethtool.o \
fbnic_fw.o \
+ fbnic_hw_stats.o \
fbnic_irq.o \
fbnic_mac.o \
fbnic_netdev.o \
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index ad2689bfd6cb..0f9e8d79461c 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -11,6 +11,7 @@
#include "fbnic_csr.h"
#include "fbnic_fw.h"
+#include "fbnic_hw_stats.h"
#include "fbnic_mac.h"
#include "fbnic_rpc.h"
@@ -47,6 +48,9 @@ struct fbnic_dev {
/* Number of TCQs/RCQs available on hardware */
u16 max_num_queues;
+
+ /* Local copy of hardware statistics */
+ struct fbnic_hw_stats hw_stats;
};
/* Reserve entry 0 in the MSI-X "others" array until we have filled all
@@ -132,6 +136,9 @@ void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data);
void fbnic_free_irqs(struct fbnic_dev *fbd);
int fbnic_alloc_irqs(struct fbnic_dev *fbd);
+void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
+ const size_t str_sz);
+
enum fbnic_boards {
fbnic_board_asic
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index a64360de0552..21db509acbc1 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -660,6 +660,43 @@ enum {
#define FBNIC_SIG_PCS_INTR_MASK 0x11816 /* 0x46058 */
#define FBNIC_CSR_END_SIG 0x1184e /* CSR section delimiter */
+#define FBNIC_CSR_START_MAC_STAT 0x11a00
+#define FBNIC_MAC_STAT_RX_BYTE_COUNT_L 0x11a08 /* 0x46820 */
+#define FBNIC_MAC_STAT_RX_BYTE_COUNT_H 0x11a09 /* 0x46824 */
+#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_L \
+ 0x11a0a /* 0x46828 */
+#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_H \
+ 0x11a0b /* 0x4682c */
+#define FBNIC_MAC_STAT_RX_TOOLONG_L 0x11a0e /* 0x46838 */
+#define FBNIC_MAC_STAT_RX_TOOLONG_H 0x11a0f /* 0x4683c */
+#define FBNIC_MAC_STAT_RX_RECEIVED_OK_L \
+ 0x11a12 /* 0x46848 */
+#define FBNIC_MAC_STAT_RX_RECEIVED_OK_H \
+ 0x11a13 /* 0x4684c */
+#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_L \
+ 0x11a14 /* 0x46850 */
+#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_H \
+ 0x11a15 /* 0x46854 */
+#define FBNIC_MAC_STAT_RX_IFINERRORS_L 0x11a18 /* 0x46860 */
+#define FBNIC_MAC_STAT_RX_IFINERRORS_H 0x11a19 /* 0x46864 */
+#define FBNIC_MAC_STAT_RX_MULTICAST_L 0x11a1c /* 0x46870 */
+#define FBNIC_MAC_STAT_RX_MULTICAST_H 0x11a1d /* 0x46874 */
+#define FBNIC_MAC_STAT_RX_BROADCAST_L 0x11a1e /* 0x46878 */
+#define FBNIC_MAC_STAT_RX_BROADCAST_H 0x11a1f /* 0x4687c */
+#define FBNIC_MAC_STAT_TX_BYTE_COUNT_L 0x11a3e /* 0x468f8 */
+#define FBNIC_MAC_STAT_TX_BYTE_COUNT_H 0x11a3f /* 0x468fc */
+#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_L \
+ 0x11a42 /* 0x46908 */
+#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_H \
+ 0x11a43 /* 0x4690c */
+#define FBNIC_MAC_STAT_TX_IFOUTERRORS_L \
+ 0x11a46 /* 0x46918 */
+#define FBNIC_MAC_STAT_TX_IFOUTERRORS_H \
+ 0x11a47 /* 0x4691c */
+#define FBNIC_MAC_STAT_TX_MULTICAST_L 0x11a4a /* 0x46928 */
+#define FBNIC_MAC_STAT_TX_MULTICAST_H 0x11a4b /* 0x4692c */
+#define FBNIC_MAC_STAT_TX_BROADCAST_L 0x11a4c /* 0x46930 */
+#define FBNIC_MAC_STAT_TX_BROADCAST_H 0x11a4d /* 0x46934 */
/* PUL User Registers */
#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
index e87049dfd223..ef05ae8f5039 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -10,6 +10,56 @@
#define FBNIC_SN_STR_LEN 24
+static int fbnic_version_running_put(struct devlink_info_req *req,
+ struct fbnic_fw_ver *fw_ver,
+ char *ver_name)
+{
+ char running_ver[FBNIC_FW_VER_MAX_SIZE];
+ int err;
+
+ fbnic_mk_fw_ver_str(fw_ver->version, running_ver);
+ err = devlink_info_version_running_put(req, ver_name, running_ver);
+ if (err)
+ return err;
+
+ if (strlen(fw_ver->commit) > 0) {
+ char commit_name[FBNIC_SN_STR_LEN];
+
+ snprintf(commit_name, FBNIC_SN_STR_LEN, "%s.commit", ver_name);
+ err = devlink_info_version_running_put(req, commit_name,
+ fw_ver->commit);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int fbnic_version_stored_put(struct devlink_info_req *req,
+ struct fbnic_fw_ver *fw_ver,
+ char *ver_name)
+{
+ char stored_ver[FBNIC_FW_VER_MAX_SIZE];
+ int err;
+
+ fbnic_mk_fw_ver_str(fw_ver->version, stored_ver);
+ err = devlink_info_version_stored_put(req, ver_name, stored_ver);
+ if (err)
+ return err;
+
+ if (strlen(fw_ver->commit) > 0) {
+ char commit_name[FBNIC_SN_STR_LEN];
+
+ snprintf(commit_name, FBNIC_SN_STR_LEN, "%s.commit", ver_name);
+ err = devlink_info_version_stored_put(req, commit_name,
+ fw_ver->commit);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int fbnic_devlink_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
@@ -17,6 +67,31 @@ static int fbnic_devlink_info_get(struct devlink *devlink,
struct fbnic_dev *fbd = devlink_priv(devlink);
int err;
+ err = fbnic_version_running_put(req, &fbd->fw_cap.running.mgmt,
+ DEVLINK_INFO_VERSION_GENERIC_FW);
+ if (err)
+ return err;
+
+ err = fbnic_version_running_put(req, &fbd->fw_cap.running.bootloader,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER);
+ if (err)
+ return err;
+
+ err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.mgmt,
+ DEVLINK_INFO_VERSION_GENERIC_FW);
+ if (err)
+ return err;
+
+ err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.bootloader,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER);
+ if (err)
+ return err;
+
+ err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.undi,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI);
+ if (err)
+ return err;
+
if (fbd->dsn) {
unsigned char serial[FBNIC_SN_STR_LEN];
u8 dsn[8];
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
new file mode 100644
index 000000000000..5d980e178941
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -0,0 +1,75 @@
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_tlv.h"
+
+static void
+fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
+ sizeof(drvinfo->fw_version));
+}
+
+static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
+{
+ if (counter->reported)
+ *stat = counter->value;
+}
+
+static void
+fbnic_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *eth_mac_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+ const struct fbnic_mac *mac;
+
+ mac_stats = &fbd->hw_stats.mac;
+ mac = fbd->mac;
+
+ mac->get_eth_mac_stats(fbd, false, &mac_stats->eth_mac);
+
+ fbnic_set_counter(&eth_mac_stats->FramesTransmittedOK,
+ &mac_stats->eth_mac.FramesTransmittedOK);
+ fbnic_set_counter(&eth_mac_stats->FramesReceivedOK,
+ &mac_stats->eth_mac.FramesReceivedOK);
+ fbnic_set_counter(&eth_mac_stats->FrameCheckSequenceErrors,
+ &mac_stats->eth_mac.FrameCheckSequenceErrors);
+ fbnic_set_counter(&eth_mac_stats->AlignmentErrors,
+ &mac_stats->eth_mac.AlignmentErrors);
+ fbnic_set_counter(&eth_mac_stats->OctetsTransmittedOK,
+ &mac_stats->eth_mac.OctetsTransmittedOK);
+ fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACXmitError,
+ &mac_stats->eth_mac.FramesLostDueToIntMACXmitError);
+ fbnic_set_counter(&eth_mac_stats->OctetsReceivedOK,
+ &mac_stats->eth_mac.OctetsReceivedOK);
+ fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACRcvError,
+ &mac_stats->eth_mac.FramesLostDueToIntMACRcvError);
+ fbnic_set_counter(&eth_mac_stats->MulticastFramesXmittedOK,
+ &mac_stats->eth_mac.MulticastFramesXmittedOK);
+ fbnic_set_counter(&eth_mac_stats->BroadcastFramesXmittedOK,
+ &mac_stats->eth_mac.BroadcastFramesXmittedOK);
+ fbnic_set_counter(&eth_mac_stats->MulticastFramesReceivedOK,
+ &mac_stats->eth_mac.MulticastFramesReceivedOK);
+ fbnic_set_counter(&eth_mac_stats->BroadcastFramesReceivedOK,
+ &mac_stats->eth_mac.BroadcastFramesReceivedOK);
+ fbnic_set_counter(&eth_mac_stats->FrameTooLongErrors,
+ &mac_stats->eth_mac.FrameTooLongErrors);
+}
+
+static const struct ethtool_ops fbnic_ethtool_ops = {
+ .get_drvinfo = fbnic_get_drvinfo,
+ .get_eth_mac_stats = fbnic_get_eth_mac_stats,
+};
+
+void fbnic_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &fbnic_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 0c6e1b4c119b..8f7a2a19ddf8 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -789,3 +789,16 @@ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN;
} while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts);
}
+
+void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
+ const size_t str_sz)
+{
+ struct fbnic_fw_ver *mgmt = &fbd->fw_cap.running.mgmt;
+ const char *delim = "";
+
+ if (mgmt->commit[0])
+ delim = "_";
+
+ fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
+ fw_version, str_sz);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index c65bca613665..221faf8c6756 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -53,10 +53,10 @@ int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
-#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str) \
+#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
do { \
const u32 __rev_id = _rev_id; \
- snprintf(_str, sizeof(_str), "%02lu.%02lu.%02lu-%03lu%s%s", \
+ snprintf(_str, _str_sz, "%02lu.%02lu.%02lu-%03lu%s%s", \
FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MAJOR, __rev_id), \
FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MINOR, __rev_id), \
FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_PATCH, __rev_id), \
@@ -65,7 +65,7 @@ do { \
} while (0)
#define fbnic_mk_fw_ver_str(_rev_id, _str) \
- fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str)
+ fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str, sizeof(_str))
#define FW_HEARTBEAT_PERIOD (10 * HZ)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
new file mode 100644
index 000000000000..a0acc7606aa1
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
@@ -0,0 +1,27 @@
+#include "fbnic.h"
+
+u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset)
+{
+ u32 prev_upper, upper, lower, diff;
+
+ prev_upper = rd32(fbd, reg + offset);
+ lower = rd32(fbd, reg);
+ upper = rd32(fbd, reg + offset);
+
+ diff = upper - prev_upper;
+ if (!diff)
+ return ((u64)upper << 32) | lower;
+
+ if (diff > 1)
+ dev_warn_once(fbd->dev,
+ "Stats inconsistent, upper 32b of %#010x updating too quickly\n",
+ reg * 4);
+
+ /* Return only the upper bits as we cannot guarantee
+ * the accuracy of the lower bits. We will add them in
+ * when the counter slows down enough that we can get
+ * a snapshot with both upper values being the same
+ * between reads.
+ */
+ return ((u64)upper << 32);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
new file mode 100644
index 000000000000..30348904b510
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
@@ -0,0 +1,40 @@
+#include <linux/ethtool.h>
+
+#include "fbnic_csr.h"
+
+struct fbnic_stat_counter {
+ u64 value;
+ union {
+ u32 old_reg_value_32;
+ u64 old_reg_value_64;
+ } u;
+ bool reported;
+};
+
+struct fbnic_eth_mac_stats {
+ struct fbnic_stat_counter FramesTransmittedOK;
+ struct fbnic_stat_counter FramesReceivedOK;
+ struct fbnic_stat_counter FrameCheckSequenceErrors;
+ struct fbnic_stat_counter AlignmentErrors;
+ struct fbnic_stat_counter OctetsTransmittedOK;
+ struct fbnic_stat_counter FramesLostDueToIntMACXmitError;
+ struct fbnic_stat_counter OctetsReceivedOK;
+ struct fbnic_stat_counter FramesLostDueToIntMACRcvError;
+ struct fbnic_stat_counter MulticastFramesXmittedOK;
+ struct fbnic_stat_counter BroadcastFramesXmittedOK;
+ struct fbnic_stat_counter MulticastFramesReceivedOK;
+ struct fbnic_stat_counter BroadcastFramesReceivedOK;
+ struct fbnic_stat_counter FrameTooLongErrors;
+};
+
+struct fbnic_mac_stats {
+ struct fbnic_eth_mac_stats eth_mac;
+};
+
+struct fbnic_hw_stats {
+ struct fbnic_mac_stats mac;
+};
+
+u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset);
+
+void fbnic_get_hw_stats(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 7920e7af82d9..7b654d0a6dac 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -403,6 +403,21 @@ static void fbnic_mac_init_regs(struct fbnic_dev *fbd)
fbnic_mac_init_txb(fbd);
}
+static void __fbnic_mac_stat_rd64(struct fbnic_dev *fbd, bool reset, u32 reg,
+ struct fbnic_stat_counter *stat)
+{
+ u64 new_reg_value;
+
+ new_reg_value = fbnic_stat_rd64(fbd, reg, 1);
+ if (!reset)
+ stat->value += new_reg_value - stat->u.old_reg_value_64;
+ stat->u.old_reg_value_64 = new_reg_value;
+ stat->reported = true;
+}
+
+#define fbnic_mac_stat_rd64(fbd, reset, __stat, __CSR) \
+ __fbnic_mac_stat_rd64(fbd, reset, FBNIC_##__CSR##_L, &(__stat))
+
static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause)
{
u32 rxb_pause_ctrl;
@@ -637,12 +652,47 @@ static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd,
wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg);
}
+static void
+fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_mac_stats *mac_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsReceivedOK,
+ MAC_STAT_RX_BYTE_COUNT);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->AlignmentErrors,
+ MAC_STAT_RX_ALIGN_ERROR);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameTooLongErrors,
+ MAC_STAT_RX_TOOLONG);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesReceivedOK,
+ MAC_STAT_RX_RECEIVED_OK);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameCheckSequenceErrors,
+ MAC_STAT_RX_PACKET_BAD_FCS);
+ fbnic_mac_stat_rd64(fbd, reset,
+ mac_stats->FramesLostDueToIntMACRcvError,
+ MAC_STAT_RX_IFINERRORS);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesReceivedOK,
+ MAC_STAT_RX_MULTICAST);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesReceivedOK,
+ MAC_STAT_RX_BROADCAST);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsTransmittedOK,
+ MAC_STAT_TX_BYTE_COUNT);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesTransmittedOK,
+ MAC_STAT_TX_TRANSMITTED_OK);
+ fbnic_mac_stat_rd64(fbd, reset,
+ mac_stats->FramesLostDueToIntMACXmitError,
+ MAC_STAT_TX_IFOUTERRORS);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesXmittedOK,
+ MAC_STAT_TX_MULTICAST);
+ fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesXmittedOK,
+ MAC_STAT_TX_BROADCAST);
+}
+
static const struct fbnic_mac fbnic_mac_asic = {
.init_regs = fbnic_mac_init_regs,
.pcs_enable = fbnic_pcs_enable_asic,
.pcs_disable = fbnic_pcs_disable_asic,
.pcs_get_link = fbnic_pcs_get_link_asic,
.pcs_get_link_event = fbnic_pcs_get_link_event_asic,
+ .get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
.link_down = fbnic_mac_link_down_asic,
.link_up = fbnic_mac_link_up_asic,
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index f53be6e6aef9..476239a9d381 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -78,6 +78,9 @@ struct fbnic_mac {
bool (*pcs_get_link)(struct fbnic_dev *fbd);
int (*pcs_get_link_event)(struct fbnic_dev *fbd);
+ void (*get_eth_mac_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_mac_stats *mac_stats);
+
void (*link_down)(struct fbnic_dev *fbd);
void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index b7ce6da68543..a400616a24d4 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -4,6 +4,7 @@
#include <linux/etherdevice.h>
#include <linux/ipv6.h>
#include <linux/types.h>
+#include <net/netdev_queues.h>
#include "fbnic.h"
#include "fbnic_netdev.h"
@@ -316,6 +317,74 @@ void fbnic_clear_rx_mode(struct net_device *netdev)
__dev_mc_unsync(netdev, NULL);
}
+static void fbnic_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats64)
+{
+ u64 tx_bytes, tx_packets, tx_dropped = 0;
+ u64 rx_bytes, rx_packets, rx_dropped = 0;
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_queue_stats *stats;
+ unsigned int start, i;
+
+ stats = &fbn->tx_stats;
+
+ tx_bytes = stats->bytes;
+ tx_packets = stats->packets;
+ tx_dropped = stats->dropped;
+
+ stats64->tx_bytes = tx_bytes;
+ stats64->tx_packets = tx_packets;
+ stats64->tx_dropped = tx_dropped;
+
+ for (i = 0; i < fbn->num_tx_queues; i++) {
+ struct fbnic_ring *txr = fbn->tx[i];
+
+ if (!txr)
+ continue;
+
+ stats = &txr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tx_bytes = stats->bytes;
+ tx_packets = stats->packets;
+ tx_dropped = stats->dropped;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats64->tx_bytes += tx_bytes;
+ stats64->tx_packets += tx_packets;
+ stats64->tx_dropped += tx_dropped;
+ }
+
+ stats = &fbn->rx_stats;
+
+ rx_bytes = stats->bytes;
+ rx_packets = stats->packets;
+ rx_dropped = stats->dropped;
+
+ stats64->rx_bytes = rx_bytes;
+ stats64->rx_packets = rx_packets;
+ stats64->rx_dropped = rx_dropped;
+
+ for (i = 0; i < fbn->num_rx_queues; i++) {
+ struct fbnic_ring *rxr = fbn->rx[i];
+
+ if (!rxr)
+ continue;
+
+ stats = &rxr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ rx_bytes = stats->bytes;
+ rx_packets = stats->packets;
+ rx_dropped = stats->dropped;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats64->rx_bytes += rx_bytes;
+ stats64->rx_packets += rx_packets;
+ stats64->rx_dropped += rx_dropped;
+ }
+}
+
static const struct net_device_ops fbnic_netdev_ops = {
.ndo_open = fbnic_open,
.ndo_stop = fbnic_stop,
@@ -324,6 +393,72 @@ static const struct net_device_ops fbnic_netdev_ops = {
.ndo_features_check = fbnic_features_check,
.ndo_set_mac_address = fbnic_set_mac,
.ndo_set_rx_mode = fbnic_set_rx_mode,
+ .ndo_get_stats64 = fbnic_get_stats64,
+};
+
+static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *rx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_ring *rxr = fbn->rx[idx];
+ struct fbnic_queue_stats *stats;
+ unsigned int start;
+ u64 bytes, packets;
+
+ if (!rxr)
+ return;
+
+ stats = &rxr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ bytes = stats->bytes;
+ packets = stats->packets;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ rx->bytes = bytes;
+ rx->packets = packets;
+}
+
+static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_ring *txr = fbn->tx[idx];
+ struct fbnic_queue_stats *stats;
+ unsigned int start;
+ u64 bytes, packets;
+
+ if (!txr)
+ return;
+
+ stats = &txr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ bytes = stats->bytes;
+ packets = stats->packets;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ tx->bytes = bytes;
+ tx->packets = packets;
+}
+
+static void fbnic_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+
+ tx->bytes = fbn->tx_stats.bytes;
+ tx->packets = fbn->tx_stats.packets;
+
+ rx->bytes = fbn->rx_stats.bytes;
+ rx->packets = fbn->rx_stats.packets;
+}
+
+static const struct netdev_stat_ops fbnic_stat_ops = {
+ .get_queue_stats_rx = fbnic_get_queue_stats_rx,
+ .get_queue_stats_tx = fbnic_get_queue_stats_tx,
+ .get_base_stats = fbnic_get_base_stats,
};
void fbnic_reset_queues(struct fbnic_net *fbn,
@@ -384,6 +519,9 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbd->netdev = netdev;
netdev->netdev_ops = &fbnic_netdev_ops;
+ netdev->stat_ops = &fbnic_stat_ops;
+
+ fbnic_set_ethtool_ops(netdev);
fbn = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index 6bc0ebeb8182..6c27da09a612 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -40,6 +40,9 @@ struct fbnic_net {
u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN];
u32 rss_flow_hash[FBNIC_NUM_HASH_OPT];
+ /* Storage for stats after ring destruction */
+ struct fbnic_queue_stats tx_stats;
+ struct fbnic_queue_stats rx_stats;
u64 link_down_events;
struct list_head napis;
@@ -55,6 +58,7 @@ int fbnic_netdev_register(struct net_device *netdev);
void fbnic_netdev_unregister(struct net_device *netdev);
void fbnic_reset_queues(struct fbnic_net *fbn,
unsigned int tx, unsigned int rx);
+void fbnic_set_ethtool_ops(struct net_device *dev);
void __fbnic_set_rx_mode(struct net_device *netdev);
void fbnic_clear_rx_mode(struct net_device *netdev);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index 0ed4c9fff5d8..6a6d7e22f1a7 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -273,6 +273,9 @@ fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
err_free:
dev_kfree_skb_any(skb);
err_count:
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped++;
+ u64_stats_update_end(&ring->stats.syncp);
return NETDEV_TX_OK;
}
@@ -363,10 +366,19 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
txq = txring_txq(nv->napi.dev, ring);
if (unlikely(discard)) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+
netdev_tx_completed_queue(txq, total_packets, total_bytes);
return;
}
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.bytes += total_bytes;
+ ring->stats.packets += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+
netif_txq_completed_wake(txq, total_packets, total_bytes,
fbnic_desc_unused(ring),
FBNIC_TX_DESC_WAKEUP);
@@ -730,12 +742,12 @@ static bool fbnic_rcd_metadata_err(u64 rcd)
static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt, int budget)
{
+ unsigned int packets = 0, bytes = 0, dropped = 0;
struct fbnic_ring *rcq = &qt->cmpl;
struct fbnic_pkt_buff *pkt;
s32 head0 = -1, head1 = -1;
__le64 *raw_rcd, done;
u32 head = rcq->head;
- u64 packets = 0;
done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0;
raw_rcd = &rcq->desc[head & rcq->size_mask];
@@ -780,9 +792,11 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
fbnic_populate_skb_fields(nv, rcd, skb, qt);
packets++;
+ bytes += skb->len;
napi_gro_receive(&nv->napi, skb);
} else {
+ dropped++;
fbnic_put_pkt_buff(nv, pkt, 1);
}
@@ -799,6 +813,14 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
}
}
+ u64_stats_update_begin(&rcq->stats.syncp);
+ rcq->stats.packets += packets;
+ rcq->stats.bytes += bytes;
+ /* Re-add ethernet header length (removed in fbnic_build_skb) */
+ rcq->stats.bytes += ETH_HLEN * packets;
+ rcq->stats.dropped += dropped;
+ u64_stats_update_end(&rcq->stats.syncp);
+
/* Unmap and free processed buffers */
if (head0 >= 0)
fbnic_clean_bdq(nv, budget, &qt->sub0, head0);
@@ -865,12 +887,36 @@ static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ struct fbnic_queue_stats *stats = &rxr->stats;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->rx_stats.bytes += stats->bytes;
+ fbn->rx_stats.packets += stats->packets;
+ fbn->rx_stats.dropped += stats->dropped;
+}
+
+static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ struct fbnic_queue_stats *stats = &txr->stats;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->tx_stats.bytes += stats->bytes;
+ fbn->tx_stats.packets += stats->packets;
+ fbn->tx_stats.dropped += stats->dropped;
+}
+
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
struct fbnic_ring *txr)
{
if (!(txr->flags & FBNIC_RING_F_STATS))
return;
+ fbnic_aggregate_ring_tx_counters(fbn, txr);
+
/* Remove pointer to the Tx ring */
WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr);
fbn->tx[txr->q_idx] = NULL;
@@ -882,6 +928,8 @@ static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
if (!(rxr->flags & FBNIC_RING_F_STATS))
return;
+ fbnic_aggregate_ring_rx_counters(fbn, rxr);
+
/* Remove pointer to the Rx ring */
WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr);
fbn->rx[rxr->q_idx] = NULL;
@@ -974,6 +1022,7 @@ static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
int q_idx, u8 flags)
{
+ u64_stats_init(&ring->stats.syncp);
ring->doorbell = doorbell;
ring->q_idx = q_idx;
ring->flags = flags;
@@ -1012,14 +1061,14 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
nv->fbd = fbd;
nv->v_idx = v_idx;
- /* Record IRQ to NAPI struct */
- netif_napi_set_irq(&nv->napi,
- pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
-
/* Tie napi to netdev */
list_add(&nv->napis, &fbn->napis);
netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
+ /* Record IRQ to NAPI struct */
+ netif_napi_set_irq(&nv->napi,
+ pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
+
/* Tie nv back to PCIe dev */
nv->dev = fbd->dev;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 4a206c0e7192..2f91f68d11d5 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
#include <net/xdp.h>
struct fbnic_net;
@@ -51,6 +52,13 @@ struct fbnic_pkt_buff {
u16 nr_frags;
};
+struct fbnic_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 dropped;
+ struct u64_stats_sync syncp;
+};
+
/* Pagecnt bias is long max to reserve the last bit to catch overflow
* cases where if we overcharge the bias it will flip over to be negative.
*/
@@ -77,6 +85,8 @@ struct fbnic_ring {
u32 head, tail; /* Head/Tail of ring */
+ struct fbnic_queue_stats stats;
+
/* Slow path fields follow */
dma_addr_t dma; /* Phys addr of descriptor memory */
size_t size; /* Size of descriptor ring in memory */
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 43ba71e82260..ee046468652c 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -46,18 +46,21 @@ config LAN743X
tristate "LAN743x support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
- select PHYLIB
select FIXED_PHY
select CRC16
select CRC32
+ select PHYLINK
help
- Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
+ Support for the Microchip LAN743x and PCI11x1x families of PCI
+ Express Ethernet devices
To compile this driver as a module, choose M here. The module will be
called lan743x.
+source "drivers/net/ethernet/microchip/lan865x/Kconfig"
source "drivers/net/ethernet/microchip/lan966x/Kconfig"
source "drivers/net/ethernet/microchip/sparx5/Kconfig"
source "drivers/net/ethernet/microchip/vcap/Kconfig"
+source "drivers/net/ethernet/microchip/fdma/Kconfig"
endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index bbd349264e6f..3c65baed9fd8 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_LAN743X) += lan743x.o
lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
+obj-$(CONFIG_LAN865X) += lan865x/
obj-$(CONFIG_LAN966X_SWITCH) += lan966x/
obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
obj-$(CONFIG_VCAP) += vcap/
+obj-$(CONFIG_FDMA) += fdma/
diff --git a/drivers/net/ethernet/microchip/fdma/Kconfig b/drivers/net/ethernet/microchip/fdma/Kconfig
new file mode 100644
index 000000000000..59159ad6701a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/fdma/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Microchip FDMA API configuration
+#
+
+if NET_VENDOR_MICROCHIP
+
+config FDMA
+ bool "FDMA API"
+ help
+ Provides the basic FDMA functionality for multiple Microchip
+ switchcores.
+
+ Say Y here if you want to build the FDMA API that provides a common
+ set of functions and data structures for interacting with the Frame
+ DMA engine in multiple microchip switchcores.
+
+endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/fdma/Makefile b/drivers/net/ethernet/microchip/fdma/Makefile
new file mode 100644
index 000000000000..cc9a736be357
--- /dev/null
+++ b/drivers/net/ethernet/microchip/fdma/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for Microchip FDMA
+#
+
+obj-$(CONFIG_FDMA) += fdma.o
+fdma-y += fdma_api.o
diff --git a/drivers/net/ethernet/microchip/fdma/fdma_api.c b/drivers/net/ethernet/microchip/fdma/fdma_api.c
new file mode 100644
index 000000000000..e78c3590da9e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/fdma/fdma_api.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "fdma_api.h"
+
+#include <linux/bits.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+
+/* Add a DB to a DCB, providing a callback for getting the DB dataptr. */
+static int __fdma_db_add(struct fdma *fdma, int dcb_idx, int db_idx, u64 status,
+ int (*cb)(struct fdma *fdma, int dcb_idx,
+ int db_idx, u64 *dataptr))
+{
+ struct fdma_db *db = fdma_db_get(fdma, dcb_idx, db_idx);
+
+ db->status = status;
+
+ return cb(fdma, dcb_idx, db_idx, &db->dataptr);
+}
+
+/* Add a DB to a DCB, using the callback set in the fdma_ops struct. */
+int fdma_db_add(struct fdma *fdma, int dcb_idx, int db_idx, u64 status)
+{
+ return __fdma_db_add(fdma,
+ dcb_idx,
+ db_idx,
+ status,
+ fdma->ops.dataptr_cb);
+}
+
+/* Add a DCB with callbacks for getting the DB dataptr and the DCB nextptr. */
+int __fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status,
+ int (*dcb_cb)(struct fdma *fdma, int dcb_idx, u64 *nextptr),
+ int (*db_cb)(struct fdma *fdma, int dcb_idx, int db_idx,
+ u64 *dataptr))
+{
+ struct fdma_dcb *dcb = fdma_dcb_get(fdma, dcb_idx);
+ int i, err;
+
+ for (i = 0; i < fdma->n_dbs; i++) {
+ err = __fdma_db_add(fdma, dcb_idx, i, status, db_cb);
+ if (unlikely(err))
+ return err;
+ }
+
+ err = dcb_cb(fdma, dcb_idx, &fdma->last_dcb->nextptr);
+ if (unlikely(err))
+ return err;
+
+ fdma->last_dcb = dcb;
+
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = info;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__fdma_dcb_add);
+
+/* Add a DCB, using the preset callbacks in the fdma_ops struct. */
+int fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status)
+{
+ return __fdma_dcb_add(fdma,
+ dcb_idx,
+ info, status,
+ fdma->ops.nextptr_cb,
+ fdma->ops.dataptr_cb);
+}
+EXPORT_SYMBOL_GPL(fdma_dcb_add);
+
+/* Initialize the DCB's and DB's. */
+int fdma_dcbs_init(struct fdma *fdma, u64 info, u64 status)
+{
+ int i, err;
+
+ fdma->last_dcb = fdma->dcbs;
+ fdma->db_index = 0;
+ fdma->dcb_index = 0;
+
+ for (i = 0; i < fdma->n_dcbs; i++) {
+ err = fdma_dcb_add(fdma, i, info, status);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fdma_dcbs_init);
+
+/* Allocate coherent DMA memory for FDMA. */
+int fdma_alloc_coherent(struct device *dev, struct fdma *fdma)
+{
+ fdma->dcbs = dma_alloc_coherent(dev,
+ fdma->size,
+ &fdma->dma,
+ GFP_KERNEL);
+ if (!fdma->dcbs)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fdma_alloc_coherent);
+
+/* Allocate physical memory for FDMA. */
+int fdma_alloc_phys(struct fdma *fdma)
+{
+ fdma->dcbs = kzalloc(fdma->size, GFP_KERNEL);
+ if (!fdma->dcbs)
+ return -ENOMEM;
+
+ fdma->dma = virt_to_phys(fdma->dcbs);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fdma_alloc_phys);
+
+/* Free coherent DMA memory. */
+void fdma_free_coherent(struct device *dev, struct fdma *fdma)
+{
+ dma_free_coherent(dev, fdma->size, fdma->dcbs, fdma->dma);
+}
+EXPORT_SYMBOL_GPL(fdma_free_coherent);
+
+/* Free virtual memory. */
+void fdma_free_phys(struct fdma *fdma)
+{
+ kfree(fdma->dcbs);
+}
+EXPORT_SYMBOL_GPL(fdma_free_phys);
+
+/* Get the size of the FDMA memory */
+u32 fdma_get_size(struct fdma *fdma)
+{
+ return ALIGN(sizeof(struct fdma_dcb) * fdma->n_dcbs, PAGE_SIZE);
+}
+EXPORT_SYMBOL_GPL(fdma_get_size);
+
+/* Get the size of the FDMA memory. This function is only applicable if the
+ * dataptr addresses and DCB's are in contiguous memory.
+ */
+u32 fdma_get_size_contiguous(struct fdma *fdma)
+{
+ return ALIGN(fdma->n_dcbs * sizeof(struct fdma_dcb) +
+ fdma->n_dcbs * fdma->n_dbs * fdma->db_size,
+ PAGE_SIZE);
+}
+EXPORT_SYMBOL_GPL(fdma_get_size_contiguous);
diff --git a/drivers/net/ethernet/microchip/fdma/fdma_api.h b/drivers/net/ethernet/microchip/fdma/fdma_api.h
new file mode 100644
index 000000000000..d91affe8bd98
--- /dev/null
+++ b/drivers/net/ethernet/microchip/fdma/fdma_api.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _FDMA_API_H_
+#define _FDMA_API_H_
+
+#include <linux/bits.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+
+/* This provides a common set of functions and data structures for interacting
+ * with the Frame DMA engine on multiple Microchip switchcores.
+ *
+ * Frame DMA DCB format:
+ *
+ * +---------------------------+
+ * | Next Ptr |
+ * +---------------------------+
+ * | Reserved | Info |
+ * +---------------------------+
+ * | Data0 Ptr |
+ * +---------------------------+
+ * | Reserved | Status0 |
+ * +---------------------------+
+ * | Data1 Ptr |
+ * +---------------------------+
+ * | Reserved | Status1 |
+ * +---------------------------+
+ * | Data2 Ptr |
+ * +---------------------------+
+ * | Reserved | Status2 |
+ * |-------------|-------------|
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * |---------------------------|
+ * | Data14 Ptr |
+ * +-------------|-------------+
+ * | Reserved | Status14 |
+ * +-------------|-------------+
+ *
+ * The data pointers points to the actual frame data to be received or sent. The
+ * addresses of the data pointers can, as of writing, be either a: DMA address,
+ * physical address or mapped address.
+ *
+ */
+
+#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_INFO_TOKEN BIT(17)
+#define FDMA_DCB_INFO_INTR BIT(18)
+#define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24))
+
+#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_STATUS_SOF BIT(16)
+#define FDMA_DCB_STATUS_EOF BIT(17)
+#define FDMA_DCB_STATUS_INTR BIT(18)
+#define FDMA_DCB_STATUS_DONE BIT(19)
+#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
+#define FDMA_DCB_INVALID_DATA 0x1
+
+#define FDMA_DB_MAX 15 /* Max number of DB's on Sparx5 */
+
+struct fdma;
+
+struct fdma_db {
+ u64 dataptr;
+ u64 status;
+};
+
+struct fdma_dcb {
+ u64 nextptr;
+ u64 info;
+ struct fdma_db db[FDMA_DB_MAX];
+};
+
+struct fdma_ops {
+ /* User-provided callback to set the dataptr */
+ int (*dataptr_cb)(struct fdma *fdma, int dcb_idx, int db_idx, u64 *ptr);
+ /* User-provided callback to set the nextptr */
+ int (*nextptr_cb)(struct fdma *fdma, int dcb_idx, u64 *ptr);
+};
+
+struct fdma {
+ void *priv;
+
+ /* Virtual addresses */
+ struct fdma_dcb *dcbs;
+ struct fdma_dcb *last_dcb;
+
+ /* DMA address */
+ dma_addr_t dma;
+
+ /* Size of DCB + DB memory */
+ int size;
+
+ /* Indexes used to access the next-to-be-used DCB or DB */
+ int db_index;
+ int dcb_index;
+
+ /* Number of DCB's and DB's */
+ u32 n_dcbs;
+ u32 n_dbs;
+
+ /* Size of DB's */
+ u32 db_size;
+
+ /* Channel id this FDMA object operates on */
+ u32 channel_id;
+
+ struct fdma_ops ops;
+};
+
+/* Advance the DCB index and wrap if required. */
+static inline void fdma_dcb_advance(struct fdma *fdma)
+{
+ fdma->dcb_index++;
+ if (fdma->dcb_index >= fdma->n_dcbs)
+ fdma->dcb_index = 0;
+}
+
+/* Advance the DB index. */
+static inline void fdma_db_advance(struct fdma *fdma)
+{
+ fdma->db_index++;
+}
+
+/* Reset the db index to zero. */
+static inline void fdma_db_reset(struct fdma *fdma)
+{
+ fdma->db_index = 0;
+}
+
+/* Check if a DCB can be reused in case of multiple DB's per DCB. */
+static inline bool fdma_dcb_is_reusable(struct fdma *fdma)
+{
+ return fdma->db_index != fdma->n_dbs;
+}
+
+/* Check if the FDMA has marked this DB as done. */
+static inline bool fdma_db_is_done(struct fdma_db *db)
+{
+ return db->status & FDMA_DCB_STATUS_DONE;
+}
+
+/* Get the length of a DB. */
+static inline int fdma_db_len_get(struct fdma_db *db)
+{
+ return FDMA_DCB_STATUS_BLOCKL(db->status);
+}
+
+/* Set the length of a DB. */
+static inline void fdma_dcb_len_set(struct fdma_dcb *dcb, u32 len)
+{
+ dcb->info = FDMA_DCB_INFO_DATAL(len);
+}
+
+/* Get a DB by index. */
+static inline struct fdma_db *fdma_db_get(struct fdma *fdma, int dcb_idx,
+ int db_idx)
+{
+ return &fdma->dcbs[dcb_idx].db[db_idx];
+}
+
+/* Get the next DB. */
+static inline struct fdma_db *fdma_db_next_get(struct fdma *fdma)
+{
+ return fdma_db_get(fdma, fdma->dcb_index, fdma->db_index);
+}
+
+/* Get a DCB by index. */
+static inline struct fdma_dcb *fdma_dcb_get(struct fdma *fdma, int dcb_idx)
+{
+ return &fdma->dcbs[dcb_idx];
+}
+
+/* Get the next DCB. */
+static inline struct fdma_dcb *fdma_dcb_next_get(struct fdma *fdma)
+{
+ return fdma_dcb_get(fdma, fdma->dcb_index);
+}
+
+/* Check if the FDMA has frames ready for extraction. */
+static inline bool fdma_has_frames(struct fdma *fdma)
+{
+ return fdma_db_is_done(fdma_db_next_get(fdma));
+}
+
+/* Get a nextptr by index */
+static inline int fdma_nextptr_cb(struct fdma *fdma, int dcb_idx, u64 *nextptr)
+{
+ *nextptr = fdma->dma + (sizeof(struct fdma_dcb) * dcb_idx);
+ return 0;
+}
+
+/* Get the DMA address of a dataptr, by index. This function is only applicable
+ * if the dataptr addresses and DCB's are in contiguous memory and the driver
+ * supports XDP.
+ */
+static inline u64 fdma_dataptr_get_contiguous(struct fdma *fdma, int dcb_idx,
+ int db_idx)
+{
+ return fdma->dma + (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ (dcb_idx * fdma->n_dbs + db_idx) * fdma->db_size +
+ XDP_PACKET_HEADROOM;
+}
+
+/* Get the virtual address of a dataptr, by index. This function is only
+ * applicable if the dataptr addresses and DCB's are in contiguous memory and
+ * the driver supports XDP.
+ */
+static inline void *fdma_dataptr_virt_get_contiguous(struct fdma *fdma,
+ int dcb_idx, int db_idx)
+{
+ return (u8 *)fdma->dcbs + (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ (dcb_idx * fdma->n_dbs + db_idx) * fdma->db_size +
+ XDP_PACKET_HEADROOM;
+}
+
+/* Check if this DCB is the last used DCB. */
+static inline bool fdma_is_last(struct fdma *fdma, struct fdma_dcb *dcb)
+{
+ return dcb == fdma->last_dcb;
+}
+
+int fdma_dcbs_init(struct fdma *fdma, u64 info, u64 status);
+int fdma_db_add(struct fdma *fdma, int dcb_idx, int db_idx, u64 status);
+int fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status);
+int __fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status,
+ int (*dcb_cb)(struct fdma *fdma, int dcb_idx, u64 *nextptr),
+ int (*db_cb)(struct fdma *fdma, int dcb_idx, int db_idx,
+ u64 *dataptr));
+
+int fdma_alloc_coherent(struct device *dev, struct fdma *fdma);
+int fdma_alloc_phys(struct fdma *fdma);
+
+void fdma_free_coherent(struct device *dev, struct fdma *fdma);
+void fdma_free_phys(struct fdma *fdma);
+
+u32 fdma_get_size(struct fdma *fdma);
+u32 fdma_get_size_contiguous(struct fdma *fdma);
+
+#endif
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 3a63ec091413..1a1cbd034eda 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1034,16 +1034,12 @@ static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
struct lan743x_adapter *adapter = netdev_priv(netdev);
ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (adapter->ptp.ptp_clock)
ts_info->phc_index = ptp_clock_index(adapter->ptp.ptp_clock);
- else
- ts_info->phc_index = -1;
ts_info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON) |
@@ -1058,61 +1054,55 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- u32 buf;
- int ret;
-
- if (!phydev)
- return -EIO;
- if (!phydev->drv) {
- netif_err(adapter, drv, adapter->netdev,
- "Missing PHY Driver\n");
- return -EIO;
- }
- ret = phy_ethtool_get_eee(phydev, eee);
- if (ret < 0)
- return ret;
+ eee->tx_lpi_timer = lan743x_csr_read(adapter,
+ MAC_EEE_TX_LPI_REQ_DLY_CNT);
- buf = lan743x_csr_read(adapter, MAC_CR);
- if (buf & MAC_CR_EEE_EN_) {
- /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
- buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
- eee->tx_lpi_timer = buf;
- } else {
- eee->tx_lpi_timer = 0;
- }
-
- return 0;
+ return phylink_ethtool_get_eee(adapter->phylink, eee);
}
static int lan743x_ethtool_set_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
- struct lan743x_adapter *adapter;
- struct phy_device *phydev;
- u32 buf = 0;
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 tx_lpi_timer;
- if (!netdev)
- return -EINVAL;
- adapter = netdev_priv(netdev);
- if (!adapter)
- return -EINVAL;
- phydev = netdev->phydev;
- if (!phydev)
- return -EIO;
- if (!phydev->drv) {
- netif_err(adapter, drv, adapter->netdev,
- "Missing PHY Driver\n");
- return -EIO;
- }
+ tx_lpi_timer = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
+ if (tx_lpi_timer != eee->tx_lpi_timer) {
+ u32 mac_cr = lan743x_csr_read(adapter, MAC_CR);
+
+ /* Software should only change this field when Energy Efficient
+ * Ethernet Enable (EEEEN) is cleared.
+ * This function will trigger an autonegotiation restart and
+ * eee will be reenabled during link up if eee was negotiated.
+ */
+ lan743x_mac_eee_enable(adapter, false);
+ lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT,
+ eee->tx_lpi_timer);
- if (eee->eee_enabled) {
- buf = (u32)eee->tx_lpi_timer;
- lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf);
+ if (mac_cr & MAC_CR_EEE_EN_)
+ lan743x_mac_eee_enable(adapter, true);
}
- return phy_ethtool_set_eee(phydev, eee);
+ return phylink_ethtool_set_eee(adapter->phylink, eee);
+}
+
+static int
+lan743x_ethtool_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_set(adapter->phylink, cmd);
+}
+
+static int
+lan743x_ethtool_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_get(adapter->phylink, cmd);
}
#ifdef CONFIG_PM
@@ -1124,8 +1114,7 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
wol->supported = 0;
wol->wolopts = 0;
- if (netdev->phydev)
- phy_ethtool_get_wol(netdev->phydev, wol);
+ phylink_ethtool_get_wol(adapter->phylink, wol);
if (wol->supported != adapter->phy_wol_supported)
netif_warn(adapter, drv, adapter->netdev,
@@ -1166,7 +1155,7 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
!(adapter->phy_wol_supported & WAKE_MAGICSECURE))
phy_wol.wolopts &= ~WAKE_MAGIC;
- ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol);
+ ret = phylink_ethtool_set_wol(adapter->phylink, wol);
if (ret && (ret != -EOPNOTSUPP))
return ret;
@@ -1355,44 +1344,16 @@ static void lan743x_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct lan743x_adapter *adapter = netdev_priv(dev);
- struct lan743x_phy *phy = &adapter->phy;
- if (phy->fc_request_control & FLOW_CTRL_TX)
- pause->tx_pause = 1;
- if (phy->fc_request_control & FLOW_CTRL_RX)
- pause->rx_pause = 1;
- pause->autoneg = phy->fc_autoneg;
+ phylink_ethtool_get_pauseparam(adapter->phylink, pause);
}
static int lan743x_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct lan743x_adapter *adapter = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- struct lan743x_phy *phy = &adapter->phy;
-
- if (!phydev)
- return -ENODEV;
-
- if (!phy_validate_pause(phydev, pause))
- return -EINVAL;
-
- phy->fc_request_control = 0;
- if (pause->rx_pause)
- phy->fc_request_control |= FLOW_CTRL_RX;
- if (pause->tx_pause)
- phy->fc_request_control |= FLOW_CTRL_TX;
-
- phy->fc_autoneg = pause->autoneg;
-
- if (pause->autoneg == AUTONEG_DISABLE)
- lan743x_mac_flow_ctrl_set_enables(adapter, pause->tx_pause,
- pause->rx_pause);
- else
- phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
-
- return 0;
+ return phylink_ethtool_set_pauseparam(adapter->phylink, pause);
}
const struct ethtool_ops lan743x_ethtool_ops = {
@@ -1417,8 +1378,8 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.get_ts_info = lan743x_ethtool_get_ts_info,
.get_eee = lan743x_ethtool_get_eee,
.set_eee = lan743x_ethtool_set_eee,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = lan743x_ethtool_get_link_ksettings,
+ .set_link_ksettings = lan743x_ethtool_set_link_ksettings,
.get_regs_len = lan743x_get_regs_len,
.get_regs = lan743x_get_regs,
.get_pauseparam = lan743x_get_pauseparam,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e418539565b1..4dc5adcda6a3 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -15,6 +15,7 @@
#include <linux/rtnetlink.h>
#include <linux/iopoll.h>
#include <linux/crc16.h>
+#include <linux/phylink.h>
#include "lan743x_main.h"
#include "lan743x_ethtool.h"
@@ -992,6 +993,42 @@ static int lan743x_sgmii_write(struct lan743x_adapter *adapter,
return ret;
}
+static int lan743x_get_lsd(int speed, int duplex, u8 mss)
+{
+ int lsd;
+
+ switch (speed) {
+ case SPEED_2500:
+ if (mss == MASTER_SLAVE_STATE_SLAVE)
+ lsd = LINK_2500_SLAVE;
+ else
+ lsd = LINK_2500_MASTER;
+ break;
+ case SPEED_1000:
+ if (mss == MASTER_SLAVE_STATE_SLAVE)
+ lsd = LINK_1000_SLAVE;
+ else
+ lsd = LINK_1000_MASTER;
+ break;
+ case SPEED_100:
+ if (duplex == DUPLEX_FULL)
+ lsd = LINK_100FD;
+ else
+ lsd = LINK_100HD;
+ break;
+ case SPEED_10:
+ if (duplex == DUPLEX_FULL)
+ lsd = LINK_10FD;
+ else
+ lsd = LINK_10HD;
+ break;
+ default:
+ lsd = -EINVAL;
+ }
+
+ return lsd;
+}
+
static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter,
u16 baud)
{
@@ -1041,26 +1078,7 @@ static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter,
VR_MII_BAUD_RATE_1P25GBPS);
}
-static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter,
- bool *status)
-{
- int ret;
-
- ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
- VR_MII_GEN2_4_MPLL_CTRL1);
- if (ret < 0)
- return ret;
-
- if (ret == VR_MII_MPLL_MULTIPLIER_125 ||
- ret == VR_MII_MPLL_MULTIPLIER_50)
- *status = true;
- else
- *status = false;
-
- return 0;
-}
-
-static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter)
+static int lan743x_serdes_clock_and_aneg_update(struct lan743x_adapter *adapter)
{
enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd;
int mii_ctrl;
@@ -1147,68 +1165,11 @@ static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state)
return 0;
}
-static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
+static int lan743x_pcs_power_reset(struct lan743x_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- struct phy_device *phydev = netdev->phydev;
- enum lan743x_sgmii_lsd lsd = POWER_DOWN;
int mii_ctl;
- bool status;
int ret;
- switch (phydev->speed) {
- case SPEED_2500:
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
- lsd = LINK_2500_MASTER;
- else
- lsd = LINK_2500_SLAVE;
- break;
- case SPEED_1000:
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
- lsd = LINK_1000_MASTER;
- else
- lsd = LINK_1000_SLAVE;
- break;
- case SPEED_100:
- if (phydev->duplex)
- lsd = LINK_100FD;
- else
- lsd = LINK_100HD;
- break;
- case SPEED_10:
- if (phydev->duplex)
- lsd = LINK_10FD;
- else
- lsd = LINK_10HD;
- break;
- default:
- netif_err(adapter, drv, adapter->netdev,
- "Invalid speed %d\n", phydev->speed);
- return -EINVAL;
- }
-
- adapter->sgmii_lsd = lsd;
- ret = lan743x_sgmii_aneg_update(adapter);
- if (ret < 0) {
- netif_err(adapter, drv, adapter->netdev,
- "error %d SGMII cfg failed\n", ret);
- return ret;
- }
-
- ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
- if (ret < 0) {
- netif_err(adapter, drv, adapter->netdev,
- "error %d SGMII get mode failed\n", ret);
- return ret;
- }
-
- if (status)
- netif_dbg(adapter, drv, adapter->netdev,
- "SGMII 2.5G mode enable\n");
- else
- netif_dbg(adapter, drv, adapter->netdev,
- "SGMII 1G mode enable\n");
-
/* SGMII/1000/2500BASE-X PCS power down */
mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
if (mii_ctl < 0)
@@ -1229,11 +1190,7 @@ static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
if (ret < 0)
return ret;
- ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
- if (ret < 0)
- return ret;
-
- return 0;
+ return lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
}
static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
@@ -1389,103 +1346,11 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
50000, 1000000);
}
-static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
- u16 local_adv, u16 remote_adv)
-{
- struct lan743x_phy *phy = &adapter->phy;
- u8 cap;
-
- if (phy->fc_autoneg)
- cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
- else
- cap = phy->fc_request_control;
-
- lan743x_mac_flow_ctrl_set_enables(adapter,
- cap & FLOW_CTRL_TX,
- cap & FLOW_CTRL_RX);
-}
-
static int lan743x_phy_init(struct lan743x_adapter *adapter)
{
return lan743x_phy_reset(adapter);
}
-static void lan743x_phy_link_status_change(struct net_device *netdev)
-{
- struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- u32 data;
-
- phy_print_status(phydev);
- if (phydev->state == PHY_RUNNING) {
- int remote_advertisement = 0;
- int local_advertisement = 0;
-
- data = lan743x_csr_read(adapter, MAC_CR);
-
- /* set duplex mode */
- if (phydev->duplex)
- data |= MAC_CR_DPX_;
- else
- data &= ~MAC_CR_DPX_;
-
- /* set bus speed */
- switch (phydev->speed) {
- case SPEED_10:
- data &= ~MAC_CR_CFG_H_;
- data &= ~MAC_CR_CFG_L_;
- break;
- case SPEED_100:
- data &= ~MAC_CR_CFG_H_;
- data |= MAC_CR_CFG_L_;
- break;
- case SPEED_1000:
- data |= MAC_CR_CFG_H_;
- data &= ~MAC_CR_CFG_L_;
- break;
- case SPEED_2500:
- data |= MAC_CR_CFG_H_;
- data |= MAC_CR_CFG_L_;
- break;
- }
- lan743x_csr_write(adapter, MAC_CR, data);
-
- local_advertisement =
- linkmode_adv_to_mii_adv_t(phydev->advertising);
- remote_advertisement =
- linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
-
- lan743x_phy_update_flowcontrol(adapter, local_advertisement,
- remote_advertisement);
- lan743x_ptp_update_latency(adapter, phydev->speed);
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII ||
- phydev->interface == PHY_INTERFACE_MODE_1000BASEX ||
- phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
- lan743x_sgmii_config(adapter);
-
- data = lan743x_csr_read(adapter, MAC_CR);
- if (phydev->enable_tx_lpi)
- data |= MAC_CR_EEE_EN_;
- else
- data &= ~MAC_CR_EEE_EN_;
- lan743x_csr_write(adapter, MAC_CR, data);
- }
-}
-
-static void lan743x_phy_close(struct lan743x_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct phy_device *phydev = netdev->phydev;
-
- phy_stop(netdev->phydev);
- phy_disconnect(netdev->phydev);
-
- /* using phydev here as phy_disconnect NULLs netdev->phydev */
- if (phy_is_pseudo_fixed_link(phydev))
- fixed_phy_unregister(phydev);
-
-}
-
static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
{
u32 id_rev;
@@ -1502,65 +1367,9 @@ static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
adapter->phy_interface = PHY_INTERFACE_MODE_MII;
else
adapter->phy_interface = PHY_INTERFACE_MODE_RGMII;
-}
-
-static int lan743x_phy_open(struct lan743x_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct lan743x_phy *phy = &adapter->phy;
- struct fixed_phy_status fphy_status = {
- .link = 1,
- .speed = SPEED_1000,
- .duplex = DUPLEX_FULL,
- };
- struct phy_device *phydev;
- int ret = -EIO;
-
- /* try devicetree phy, or fixed link */
- phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node,
- lan743x_phy_link_status_change);
-
- if (!phydev) {
- /* try internal phy */
- phydev = phy_find_first(adapter->mdiobus);
- if (!phydev) {
- if ((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
- ID_REV_ID_LAN7431_) {
- phydev = fixed_phy_register(PHY_POLL,
- &fphy_status, NULL);
- if (IS_ERR(phydev)) {
- netdev_err(netdev, "No PHY/fixed_PHY found\n");
- return PTR_ERR(phydev);
- }
- } else {
- goto return_error;
- }
- }
-
- lan743x_phy_interface_select(adapter);
-
- ret = phy_connect_direct(netdev, phydev,
- lan743x_phy_link_status_change,
- adapter->phy_interface);
- if (ret)
- goto return_error;
- }
-
- /* MAC doesn't support 1000T Half */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- /* support both flow controls */
- phy_support_asym_pause(phydev);
- phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
- phy->fc_autoneg = phydev->autoneg;
-
- phy_start(phydev);
- phy_start_aneg(phydev);
- phy_attached_info(phydev);
- return 0;
-return_error:
- return ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "selected phy interface: 0x%X\n", adapter->phy_interface);
}
static void lan743x_rfe_open(struct lan743x_adapter *adapter)
@@ -3061,6 +2870,336 @@ return_error:
return ret;
}
+static int lan743x_phylink_sgmii_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from the External PHY via SGMII */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl &= ~SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d sgmii aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+static int lan743x_phylink_1000basex_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from 1000BASE-X PCS link status */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d 1000basex aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_2500, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from 2500BASE-X PCS link status */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d 2500basex aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
+{
+ u32 mac_cr;
+
+ mac_cr = lan743x_csr_read(adapter, MAC_CR);
+ if (enable)
+ mac_cr |= MAC_CR_EEE_EN_;
+ else
+ mac_cr &= ~MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, mac_cr);
+}
+
+static void lan743x_phylink_mac_config(struct phylink_config *config,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_2500BASEX:
+ ret = lan743x_phylink_2500basex_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "2500BASEX config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "2500BASEX mode selected and configured\n");
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ ret = lan743x_phylink_1000basex_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "1000BASEX config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "1000BASEX mode selected and configured\n");
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ ret = lan743x_phylink_sgmii_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "SGMII config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII mode selected and configured\n");
+ break;
+ default:
+ netif_dbg(adapter, drv, adapter->netdev,
+ "RGMII/GMII/MII(0x%X) mode enable\n",
+ state->interface);
+ break;
+ }
+}
+
+static void lan743x_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int link_an_mode,
+ phy_interface_t interface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ netif_tx_stop_all_queues(to_net_dev(config->dev));
+ lan743x_mac_eee_enable(adapter, false);
+}
+
+static void lan743x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int mac_cr;
+ u8 cap;
+
+ mac_cr = lan743x_csr_read(adapter, MAC_CR);
+ /* Pre-initialize register bits.
+ * Resulting value corresponds to SPEED_10
+ */
+ mac_cr &= ~(MAC_CR_CFG_H_ | MAC_CR_CFG_L_);
+ if (speed == SPEED_2500)
+ mac_cr |= MAC_CR_CFG_H_ | MAC_CR_CFG_L_;
+ else if (speed == SPEED_1000)
+ mac_cr |= MAC_CR_CFG_H_;
+ else if (speed == SPEED_100)
+ mac_cr |= MAC_CR_CFG_L_;
+
+ lan743x_csr_write(adapter, MAC_CR, mac_cr);
+
+ lan743x_ptp_update_latency(adapter, speed);
+
+ /* Flow Control operation */
+ cap = 0;
+ if (tx_pause)
+ cap |= FLOW_CTRL_TX;
+ if (rx_pause)
+ cap |= FLOW_CTRL_RX;
+
+ lan743x_mac_flow_ctrl_set_enables(adapter,
+ cap & FLOW_CTRL_TX,
+ cap & FLOW_CTRL_RX);
+
+ if (phydev)
+ lan743x_mac_eee_enable(adapter, phydev->enable_tx_lpi);
+
+ netif_tx_wake_all_queues(netdev);
+}
+
+static const struct phylink_mac_ops lan743x_phylink_mac_ops = {
+ .mac_config = lan743x_phylink_mac_config,
+ .mac_link_down = lan743x_phylink_mac_link_down,
+ .mac_link_up = lan743x_phylink_mac_link_up,
+};
+
+static int lan743x_phylink_create(struct lan743x_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct phylink *pl;
+
+ adapter->phylink_config.dev = &netdev->dev;
+ adapter->phylink_config.type = PHYLINK_NETDEV;
+ adapter->phylink_config.mac_managed_pm = false;
+
+ adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ lan743x_phy_interface_select(adapter);
+
+ switch (adapter->phy_interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ adapter->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ adapter->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ adapter->phylink_config.supported_interfaces);
+ adapter->phylink_config.mac_capabilities |= MAC_2500FD;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ adapter->phylink_config.supported_interfaces);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ adapter->phylink_config.supported_interfaces);
+ break;
+ default:
+ phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces);
+ }
+
+ pl = phylink_create(&adapter->phylink_config, NULL,
+ adapter->phy_interface, &lan743x_phylink_mac_ops);
+
+ if (IS_ERR(pl)) {
+ netdev_err(netdev, "Could not create phylink (%pe)\n", pl);
+ return PTR_ERR(pl);
+ }
+
+ adapter->phylink = pl;
+ netdev_dbg(netdev, "lan743x phylink created");
+
+ return 0;
+}
+
+static bool lan743x_phy_handle_exists(struct device_node *dn)
+{
+ dn = of_parse_phandle(dn, "phy-handle", 0);
+ of_node_put(dn);
+ return dn != NULL;
+}
+
+static int lan743x_phylink_connect(struct lan743x_adapter *adapter)
+{
+ struct device_node *dn = adapter->pdev->dev.of_node;
+ struct net_device *dev = adapter->netdev;
+ struct phy_device *phydev;
+ int ret;
+
+ if (dn)
+ ret = phylink_of_phy_connect(adapter->phylink, dn, 0);
+
+ if (!dn || (ret && !lan743x_phy_handle_exists(dn))) {
+ phydev = phy_find_first(adapter->mdiobus);
+ if (phydev) {
+ /* attach the mac to the phy */
+ ret = phylink_connect_phy(adapter->phylink, phydev);
+ } else if (((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
+ ID_REV_ID_LAN7431_) || adapter->is_pci11x1x) {
+ struct phylink_link_state state;
+ unsigned long caps;
+
+ caps = adapter->phylink_config.mac_capabilities;
+ if (caps & MAC_2500FD) {
+ state.speed = SPEED_2500;
+ state.duplex = DUPLEX_FULL;
+ } else if (caps & MAC_1000FD) {
+ state.speed = SPEED_1000;
+ state.duplex = DUPLEX_FULL;
+ } else {
+ state.speed = SPEED_UNKNOWN;
+ state.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ret = phylink_set_fixed_link(adapter->phylink, &state);
+ if (ret) {
+ netdev_err(dev, "Could not set fixed link\n");
+ return ret;
+ }
+ } else {
+ netdev_err(dev, "no PHY found\n");
+ return -ENXIO;
+ }
+ }
+
+ if (ret) {
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+ return ret;
+ }
+
+ phylink_start(adapter->phylink);
+
+ return 0;
+}
+
+static void lan743x_phylink_disconnect(struct lan743x_adapter *adapter)
+{
+ phylink_stop(adapter->phylink);
+ phylink_disconnect_phy(adapter->phylink);
+}
+
static int lan743x_netdev_close(struct net_device *netdev)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
@@ -3074,7 +3213,7 @@ static int lan743x_netdev_close(struct net_device *netdev)
lan743x_ptp_close(adapter);
- lan743x_phy_close(adapter);
+ lan743x_phylink_disconnect(adapter);
lan743x_mac_close(adapter);
@@ -3097,13 +3236,13 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_intr;
- ret = lan743x_phy_open(adapter);
+ ret = lan743x_phylink_connect(adapter);
if (ret)
goto close_mac;
ret = lan743x_ptp_open(adapter);
if (ret)
- goto close_phy;
+ goto close_mac;
lan743x_rfe_open(adapter);
@@ -3119,6 +3258,9 @@ static int lan743x_netdev_open(struct net_device *netdev)
goto close_tx;
}
+ if (netdev->phydev)
+ phy_support_eee(netdev->phydev);
+
#ifdef CONFIG_PM
if (adapter->netdev->phydev) {
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
@@ -3143,9 +3285,8 @@ close_rx:
lan743x_rx_close(&adapter->rx[index]);
}
lan743x_ptp_close(adapter);
-
-close_phy:
- lan743x_phy_close(adapter);
+ if (adapter->phylink)
+ lan743x_phylink_disconnect(adapter);
close_mac:
lan743x_mac_close(adapter);
@@ -3174,11 +3315,14 @@ static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
static int lan743x_netdev_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
if (!netif_running(netdev))
return -EINVAL;
if (cmd == SIOCSHWTSTAMP)
return lan743x_ptp_ioctl(netdev, ifr, cmd);
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+
+ return phylink_mii_ioctl(adapter->phylink, ifr, cmd);
}
static void lan743x_netdev_set_multicast(struct net_device *netdev)
@@ -3283,10 +3427,17 @@ static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
mdiobus_unregister(adapter->mdiobus);
}
+static void lan743x_destroy_phylink(struct lan743x_adapter *adapter)
+{
+ phylink_destroy(adapter->phylink);
+ adapter->phylink = NULL;
+}
+
static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
{
unregister_netdev(adapter->netdev);
+ lan743x_destroy_phylink(adapter);
lan743x_mdiobus_cleanup(adapter);
lan743x_hardware_cleanup(adapter);
lan743x_pci_cleanup(adapter);
@@ -3500,14 +3651,21 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
adapter->netdev->hw_features = adapter->netdev->features;
- /* carrier off reporting is important to ethtool even BEFORE open */
- netif_carrier_off(netdev);
+ ret = lan743x_phylink_create(adapter);
+ if (ret < 0) {
+ netif_err(adapter, probe, netdev,
+ "failed to setup phylink (%d)\n", ret);
+ goto cleanup_mdiobus;
+ }
ret = register_netdev(adapter->netdev);
if (ret < 0)
- goto cleanup_mdiobus;
+ goto cleanup_phylink;
return 0;
+cleanup_phylink:
+ lan743x_destroy_phylink(adapter);
+
cleanup_mdiobus:
lan743x_mdiobus_cleanup(adapter);
@@ -3763,6 +3921,7 @@ static int lan743x_pm_resume(struct device *dev)
MAC_WK_SRC_WK_FR_SAVED_;
lan743x_csr_write(adapter, MAC_WK_SRC, data);
+ rtnl_lock();
/* open netdev when netdev is at running state while resume.
* For instance, it is true when system wakesup after pm-suspend
* However, it is false when system wakes up after suspend GUI menu
@@ -3771,6 +3930,7 @@ static int lan743x_pm_resume(struct device *dev)
lan743x_netdev_open(netdev);
netif_device_attach(netdev);
+ rtnl_unlock();
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 3b2585a384e2..8ef897c114d3 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -5,6 +5,7 @@
#define _LAN743X_H
#include <linux/phy.h>
+#include <linux/phylink.h>
#include "lan743x_ptp.h"
#define DRIVER_AUTHOR "Bryan Whitehead <Bryan.Whitehead@microchip.com>"
@@ -1083,6 +1084,8 @@ struct lan743x_adapter {
u32 flags;
u32 hw_cfg;
phy_interface_t phy_interface;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
@@ -1203,5 +1206,6 @@ void lan743x_hs_syslock_release(struct lan743x_adapter *adapter);
void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
bool tx_enable, bool rx_enable);
int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr);
+void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable);
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan865x/Kconfig b/drivers/net/ethernet/microchip/lan865x/Kconfig
new file mode 100644
index 000000000000..7f2a4e7e1915
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan865x/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Microchip LAN865x Driver Support
+#
+
+if NET_VENDOR_MICROCHIP
+
+config LAN865X
+ tristate "LAN865x support"
+ depends on SPI
+ select OA_TC6
+ help
+ Support for the Microchip LAN8650/1 Rev.B0/B1 MACPHY Ethernet chip. It
+ uses OPEN Alliance 10BASE-T1x Serial Interface specification.
+
+ To compile this driver as a module, choose M here. The module will be
+ called lan865x.
+
+endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/lan865x/Makefile b/drivers/net/ethernet/microchip/lan865x/Makefile
new file mode 100644
index 000000000000..9f5dd89c1eb8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan865x/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip LAN865x Driver
+#
+
+obj-$(CONFIG_LAN865X) += lan865x.o
diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
new file mode 100644
index 000000000000..dd436bdff0f8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Microchip's LAN865x 10BASE-T1S MAC-PHY driver
+ *
+ * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/oa_tc6.h>
+
+#define DRV_NAME "lan8650"
+
+/* MAC Network Control Register */
+#define LAN865X_REG_MAC_NET_CTL 0x00010000
+#define MAC_NET_CTL_TXEN BIT(3) /* Transmit Enable */
+#define MAC_NET_CTL_RXEN BIT(2) /* Receive Enable */
+
+/* MAC Network Configuration Reg */
+#define LAN865X_REG_MAC_NET_CFG 0x00010001
+#define MAC_NET_CFG_PROMISCUOUS_MODE BIT(4)
+#define MAC_NET_CFG_MULTICAST_MODE BIT(6)
+#define MAC_NET_CFG_UNICAST_MODE BIT(7)
+
+/* MAC Hash Register Bottom */
+#define LAN865X_REG_MAC_L_HASH 0x00010020
+/* MAC Hash Register Top */
+#define LAN865X_REG_MAC_H_HASH 0x00010021
+/* MAC Specific Addr 1 Bottom Reg */
+#define LAN865X_REG_MAC_L_SADDR1 0x00010022
+/* MAC Specific Addr 1 Top Reg */
+#define LAN865X_REG_MAC_H_SADDR1 0x00010023
+
+struct lan865x_priv {
+ struct work_struct multicast_work;
+ struct net_device *netdev;
+ struct spi_device *spi;
+ struct oa_tc6 *tc6;
+};
+
+static int lan865x_set_hw_macaddr_low_bytes(struct oa_tc6 *tc6, const u8 *mac)
+{
+ u32 regval;
+
+ regval = (mac[3] << 24) | (mac[2] << 16) | (mac[1] << 8) | mac[0];
+
+ return oa_tc6_write_register(tc6, LAN865X_REG_MAC_L_SADDR1, regval);
+}
+
+static int lan865x_set_hw_macaddr(struct lan865x_priv *priv, const u8 *mac)
+{
+ int restore_ret;
+ u32 regval;
+ int ret;
+
+ /* Configure MAC address low bytes */
+ ret = lan865x_set_hw_macaddr_low_bytes(priv->tc6, mac);
+ if (ret)
+ return ret;
+
+ /* Prepare and configure MAC address high bytes */
+ regval = (mac[5] << 8) | mac[4];
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_SADDR1,
+ regval);
+ if (!ret)
+ return 0;
+
+ /* Restore the old MAC address low bytes from netdev if the new MAC
+ * address high bytes setting failed.
+ */
+ restore_ret = lan865x_set_hw_macaddr_low_bytes(priv->tc6,
+ priv->netdev->dev_addr);
+ if (restore_ret)
+ return restore_ret;
+
+ return ret;
+}
+
+static const struct ethtool_ops lan865x_ethtool_ops = {
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static int lan865x_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+ struct sockaddr *address = addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret < 0)
+ return ret;
+
+ if (ether_addr_equal(address->sa_data, netdev->dev_addr))
+ return 0;
+
+ ret = lan865x_set_hw_macaddr(priv, address->sa_data);
+ if (ret)
+ return ret;
+
+ eth_commit_mac_addr_change(netdev, addr);
+
+ return 0;
+}
+
+static u32 get_address_bit(u8 addr[ETH_ALEN], u32 bit)
+{
+ return ((addr[bit / 8]) >> (bit % 8)) & 1;
+}
+
+static u32 lan865x_hash(u8 addr[ETH_ALEN])
+{
+ u32 hash_index = 0;
+
+ for (int i = 0; i < 6; i++) {
+ u32 hash = 0;
+
+ for (int j = 0; j < 8; j++)
+ hash ^= get_address_bit(addr, (j * 6) + i);
+
+ hash_index |= (hash << i);
+ }
+
+ return hash_index;
+}
+
+static int lan865x_set_specific_multicast_addr(struct lan865x_priv *priv)
+{
+ struct netdev_hw_addr *ha;
+ u32 hash_lo = 0;
+ u32 hash_hi = 0;
+ int ret;
+
+ netdev_for_each_mc_addr(ha, priv->netdev) {
+ u32 bit_num = lan865x_hash(ha->addr);
+
+ if (bit_num >= BIT(5))
+ hash_hi |= (1 << (bit_num - BIT(5)));
+ else
+ hash_lo |= (1 << bit_num);
+ }
+
+ /* Enabling specific multicast addresses */
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH, hash_hi);
+ if (ret) {
+ netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH, hash_lo);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int lan865x_set_all_multicast_addr(struct lan865x_priv *priv)
+{
+ int ret;
+
+ /* Enabling all multicast addresses */
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH,
+ 0xffffffff);
+ if (ret) {
+ netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH,
+ 0xffffffff);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int lan865x_clear_all_multicast_addr(struct lan865x_priv *priv)
+{
+ int ret;
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH, 0);
+ if (ret) {
+ netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH, 0);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n",
+ ret);
+
+ return ret;
+}
+
+static void lan865x_multicast_work_handler(struct work_struct *work)
+{
+ struct lan865x_priv *priv = container_of(work, struct lan865x_priv,
+ multicast_work);
+ u32 regval = 0;
+ int ret;
+
+ if (priv->netdev->flags & IFF_PROMISC) {
+ /* Enabling promiscuous mode */
+ regval |= MAC_NET_CFG_PROMISCUOUS_MODE;
+ regval &= (~MAC_NET_CFG_MULTICAST_MODE);
+ regval &= (~MAC_NET_CFG_UNICAST_MODE);
+ } else if (priv->netdev->flags & IFF_ALLMULTI) {
+ /* Enabling all multicast mode */
+ if (lan865x_set_all_multicast_addr(priv))
+ return;
+
+ regval &= (~MAC_NET_CFG_PROMISCUOUS_MODE);
+ regval |= MAC_NET_CFG_MULTICAST_MODE;
+ regval &= (~MAC_NET_CFG_UNICAST_MODE);
+ } else if (!netdev_mc_empty(priv->netdev)) {
+ /* Enabling specific multicast mode */
+ if (lan865x_set_specific_multicast_addr(priv))
+ return;
+
+ regval &= (~MAC_NET_CFG_PROMISCUOUS_MODE);
+ regval |= MAC_NET_CFG_MULTICAST_MODE;
+ regval &= (~MAC_NET_CFG_UNICAST_MODE);
+ } else {
+ /* Enabling local mac address only */
+ if (lan865x_clear_all_multicast_addr(priv))
+ return;
+ }
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CFG, regval);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to enable promiscuous/multicast/normal mode: %d\n",
+ ret);
+}
+
+static void lan865x_set_multicast_list(struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+
+ schedule_work(&priv->multicast_work);
+}
+
+static netdev_tx_t lan865x_send_packet(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+
+ return oa_tc6_start_xmit(priv->tc6, skb);
+}
+
+static int lan865x_hw_disable(struct lan865x_priv *priv)
+{
+ u32 regval;
+
+ if (oa_tc6_read_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, &regval))
+ return -ENODEV;
+
+ regval &= ~(MAC_NET_CTL_TXEN | MAC_NET_CTL_RXEN);
+
+ if (oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, regval))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int lan865x_net_close(struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ netif_stop_queue(netdev);
+ phy_stop(netdev->phydev);
+ ret = lan865x_hw_disable(priv);
+ if (ret) {
+ netdev_err(netdev, "Failed to disable the hardware: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan865x_hw_enable(struct lan865x_priv *priv)
+{
+ u32 regval;
+
+ if (oa_tc6_read_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, &regval))
+ return -ENODEV;
+
+ regval |= MAC_NET_CTL_TXEN | MAC_NET_CTL_RXEN;
+
+ if (oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, regval))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int lan865x_net_open(struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ ret = lan865x_hw_enable(priv);
+ if (ret) {
+ netdev_err(netdev, "Failed to enable hardware: %d\n", ret);
+ return ret;
+ }
+
+ phy_start(netdev->phydev);
+
+ return 0;
+}
+
+static const struct net_device_ops lan865x_netdev_ops = {
+ .ndo_open = lan865x_net_open,
+ .ndo_stop = lan865x_net_close,
+ .ndo_start_xmit = lan865x_send_packet,
+ .ndo_set_rx_mode = lan865x_set_multicast_list,
+ .ndo_set_mac_address = lan865x_set_mac_address,
+};
+
+static int lan865x_probe(struct spi_device *spi)
+{
+ struct net_device *netdev;
+ struct lan865x_priv *priv;
+ int ret;
+
+ netdev = alloc_etherdev(sizeof(struct lan865x_priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->spi = spi;
+ spi_set_drvdata(spi, priv);
+ INIT_WORK(&priv->multicast_work, lan865x_multicast_work_handler);
+
+ priv->tc6 = oa_tc6_init(spi, netdev);
+ if (!priv->tc6) {
+ ret = -ENODEV;
+ goto free_netdev;
+ }
+
+ /* As per the point s3 in the below errata, SPI receive Ethernet frame
+ * transfer may halt when starting the next frame in the same data block
+ * (chunk) as the end of a previous frame. The RFA field should be
+ * configured to 01b or 10b for proper operation. In these modes, only
+ * one receive Ethernet frame will be placed in a single data block.
+ * When the RFA field is written to 01b, received frames will be forced
+ * to only start in the first word of the data block payload (SWO=0). As
+ * recommended, enable zero align receive frame feature for proper
+ * operation.
+ *
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/AIS/ProductDocuments/Errata/LAN8650-1-Errata-80001075.pdf
+ */
+ ret = oa_tc6_zero_align_receive_frame_enable(priv->tc6);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to set ZARFE: %d\n", ret);
+ goto oa_tc6_exit;
+ }
+
+ /* Get the MAC address from the SPI device tree node */
+ if (device_get_ethdev_address(&spi->dev, netdev))
+ eth_hw_addr_random(netdev);
+
+ ret = lan865x_set_hw_macaddr(priv, netdev->dev_addr);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to configure MAC: %d\n", ret);
+ goto oa_tc6_exit;
+ }
+
+ netdev->if_port = IF_PORT_10BASET;
+ netdev->irq = spi->irq;
+ netdev->netdev_ops = &lan865x_netdev_ops;
+ netdev->ethtool_ops = &lan865x_ethtool_ops;
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(&spi->dev, "Register netdev failed (ret = %d)", ret);
+ goto oa_tc6_exit;
+ }
+
+ return 0;
+
+oa_tc6_exit:
+ oa_tc6_exit(priv->tc6);
+free_netdev:
+ free_netdev(priv->netdev);
+ return ret;
+}
+
+static void lan865x_remove(struct spi_device *spi)
+{
+ struct lan865x_priv *priv = spi_get_drvdata(spi);
+
+ cancel_work_sync(&priv->multicast_work);
+ unregister_netdev(priv->netdev);
+ oa_tc6_exit(priv->tc6);
+ free_netdev(priv->netdev);
+}
+
+static const struct spi_device_id spidev_spi_ids[] = {
+ { .name = "lan8650" },
+ {},
+};
+
+static const struct of_device_id lan865x_dt_ids[] = {
+ { .compatible = "microchip,lan8650" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lan865x_dt_ids);
+
+static struct spi_driver lan865x_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = lan865x_dt_ids,
+ },
+ .probe = lan865x_probe,
+ .remove = lan865x_remove,
+ .id_table = spidev_spi_ids,
+};
+module_spi_driver(lan865x_driver);
+
+MODULE_DESCRIPTION(DRV_NAME " 10Base-T1S MACPHY Ethernet Driver");
+MODULE_AUTHOR("Parthiban Veerasooran <parthiban.veerasooran@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
index f9ebffc04eb8..f663b6e12466 100644
--- a/drivers/net/ethernet/microchip/lan966x/Kconfig
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -8,6 +8,7 @@ config LAN966X_SWITCH
select PHYLINK
select PAGE_POOL
select VCAP
+ select FDMA
help
This driver supports the Lan966x network switch device.
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index 3b6ac331691d..4cdbe263502c 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -20,3 +20,4 @@ lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
index aec7066d83b3..2474dfd330f4 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -549,16 +549,13 @@ static int lan966x_get_ts_info(struct net_device *dev,
phc = &lan966x->phc[LAN966X_PHC_PORT];
- info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
- if (info->phc_index == -1) {
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ if (phc->clock) {
+ info->phc_index = ptp_clock_index(phc->clock);
+ } else {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index 3960534ac2ad..502670718104 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -6,31 +6,55 @@
#include "lan966x_main.h"
-static int lan966x_fdma_channel_active(struct lan966x *lan966x)
-{
- return lan_rd(lan966x, FDMA_CH_ACTIVE);
-}
-
-static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
- struct lan966x_db *db)
+static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
{
+ struct lan966x *lan966x = (struct lan966x *)fdma->priv;
+ struct lan966x_rx *rx = &lan966x->rx;
struct page *page;
page = page_pool_dev_alloc_pages(rx->page_pool);
if (unlikely(!page))
- return NULL;
+ return -ENOMEM;
+
+ rx->page[dcb][db] = page;
+ *dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
+
+ return 0;
+}
- db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
+static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct lan966x *lan966x = (struct lan966x *)fdma->priv;
+
+ *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
- return page;
+ return 0;
+}
+
+static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct lan966x *lan966x = (struct lan966x *)fdma->priv;
+
+ *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
+
+ return 0;
+}
+
+static int lan966x_fdma_channel_active(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, FDMA_CH_ACTIVE);
}
static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
{
+ struct fdma *fdma = &rx->fdma;
int i, j;
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
- for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
+ for (i = 0; i < fdma->n_dcbs; ++i) {
+ for (j = 0; j < fdma->n_dbs; ++j)
page_pool_put_full_page(rx->page_pool,
rx->page[i][j], false);
}
@@ -38,41 +62,23 @@ static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
{
+ struct fdma *fdma = &rx->fdma;
struct page *page;
- page = rx->page[rx->dcb_index][rx->db_index];
+ page = rx->page[fdma->dcb_index][fdma->db_index];
if (unlikely(!page))
return;
page_pool_recycle_direct(rx->page_pool, page);
}
-static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
- struct lan966x_rx_dcb *dcb,
- u64 nextptr)
-{
- struct lan966x_db *db;
- int i;
-
- for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
- db = &dcb->db[i];
- db->status = FDMA_DCB_STATUS_INTR;
- }
-
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
-
- rx->last_entry->nextptr = nextptr;
- rx->last_entry = dcb;
-}
-
static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
struct page_pool_params pp_params = {
.order = rx->page_order,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
- .pool_size = FDMA_DCB_MAX,
+ .pool_size = rx->fdma.n_dcbs,
.nid = NUMA_NO_NODE,
.dev = lan966x->dev,
.dma_dir = DMA_FROM_DEVICE,
@@ -104,84 +110,41 @@ static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
- struct lan966x_rx_dcb *dcb;
- struct lan966x_db *db;
- struct page *page;
- int i, j;
- int size;
+ struct fdma *fdma = &rx->fdma;
+ int err;
if (lan966x_fdma_rx_alloc_page_pool(rx))
return PTR_ERR(rx->page_pool);
- /* calculate how many pages are needed to allocate the dcbs */
- size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
-
- rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
- if (!rx->dcbs)
- return -ENOMEM;
-
- rx->last_entry = rx->dcbs;
- rx->db_index = 0;
- rx->dcb_index = 0;
-
- /* Now for each dcb allocate the dbs */
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
- dcb = &rx->dcbs[i];
- dcb->info = 0;
-
- /* For each db allocate a page and map it to the DB dataptr. */
- for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
- db = &dcb->db[j];
- page = lan966x_fdma_rx_alloc_page(rx, db);
- if (!page)
- return -ENOMEM;
-
- db->status = 0;
- rx->page[i][j] = page;
- }
+ err = fdma_alloc_coherent(lan966x->dev, fdma);
+ if (err)
+ return err;
- lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
- }
+ fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
return 0;
}
-static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
-{
- rx->dcb_index++;
- rx->dcb_index &= FDMA_DCB_MAX - 1;
-}
-
-static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
-{
- struct lan966x *lan966x = rx->lan966x;
- u32 size;
-
- /* Now it is possible to do the cleanup of dcb */
- size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
-}
-
static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
+ struct fdma *fdma = &rx->fdma;
u32 mask;
/* When activating a channel, first is required to write the first DCB
* address and then to activate it
*/
- lan_wr(lower_32_bits((u64)rx->dma), lan966x,
- FDMA_DCB_LLP(rx->channel_id));
- lan_wr(upper_32_bits((u64)rx->dma), lan966x,
- FDMA_DCB_LLP1(rx->channel_id));
+ lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP(fdma->channel_id));
+ lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP1(fdma->channel_id));
- lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
FDMA_CH_CFG_CH_MEM_SET(1),
- lan966x, FDMA_CH_CFG(rx->channel_id));
+ lan966x, FDMA_CH_CFG(fdma->channel_id));
/* Start fdma */
lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
@@ -191,13 +154,13 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
/* Enable interrupts */
mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
- mask |= BIT(rx->channel_id);
+ mask |= BIT(fdma->channel_id);
lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
FDMA_INTR_DB_ENA_INTR_DB_ENA,
lan966x, FDMA_INTR_DB_ENA);
/* Activate the channel */
- lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
FDMA_CH_ACTIVATE_CH_ACTIVATE,
lan966x, FDMA_CH_ACTIVATE);
}
@@ -205,18 +168,19 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
+ struct fdma *fdma = &rx->fdma;
u32 val;
/* Disable the channel */
- lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
FDMA_CH_DISABLE_CH_DISABLE,
lan966x, FDMA_CH_DISABLE);
readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
- val, !(val & BIT(rx->channel_id)),
+ val, !(val & BIT(fdma->channel_id)),
READL_SLEEP_US, READL_TIMEOUT_US);
- lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
FDMA_CH_DB_DISCARD_DB_DISCARD,
lan966x, FDMA_CH_DB_DISCARD);
}
@@ -225,50 +189,27 @@ static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
- lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)),
FDMA_CH_RELOAD_CH_RELOAD,
lan966x, FDMA_CH_RELOAD);
}
-static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
- struct lan966x_tx_dcb *dcb)
-{
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = 0;
-}
-
static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
- struct lan966x_tx_dcb *dcb;
- struct lan966x_db *db;
- int size;
- int i, j;
+ struct fdma *fdma = &tx->fdma;
+ int err;
- tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
+ tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf),
GFP_KERNEL);
if (!tx->dcbs_buf)
return -ENOMEM;
- /* calculate how many pages are needed to allocate the dcbs */
- size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
- if (!tx->dcbs)
+ err = fdma_alloc_coherent(lan966x->dev, fdma);
+ if (err)
goto out;
- /* Now for each dcb allocate the db */
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
- dcb = &tx->dcbs[i];
-
- for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
- db = &dcb->db[j];
- db->dataptr = 0;
- db->status = 0;
- }
-
- lan966x_fdma_tx_add_dcb(tx, dcb);
- }
+ fdma_dcbs_init(fdma, 0, 0);
return 0;
@@ -280,33 +221,30 @@ out:
static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
- int size;
kfree(tx->dcbs_buf);
-
- size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
+ fdma_free_coherent(lan966x->dev, &tx->fdma);
}
static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
+ struct fdma *fdma = &tx->fdma;
u32 mask;
/* When activating a channel, first is required to write the first DCB
* address and then to activate it
*/
- lan_wr(lower_32_bits((u64)tx->dma), lan966x,
- FDMA_DCB_LLP(tx->channel_id));
- lan_wr(upper_32_bits((u64)tx->dma), lan966x,
- FDMA_DCB_LLP1(tx->channel_id));
+ lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP(fdma->channel_id));
+ lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
+ FDMA_DCB_LLP1(fdma->channel_id));
- lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
FDMA_CH_CFG_CH_MEM_SET(1),
- lan966x, FDMA_CH_CFG(tx->channel_id));
+ lan966x, FDMA_CH_CFG(fdma->channel_id));
/* Start fdma */
lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
@@ -316,13 +254,13 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
/* Enable interrupts */
mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
- mask |= BIT(tx->channel_id);
+ mask |= BIT(fdma->channel_id);
lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
FDMA_INTR_DB_ENA_INTR_DB_ENA,
lan966x, FDMA_INTR_DB_ENA);
/* Activate the channel */
- lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
FDMA_CH_ACTIVATE_CH_ACTIVATE,
lan966x, FDMA_CH_ACTIVATE);
}
@@ -330,23 +268,23 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
+ struct fdma *fdma = &tx->fdma;
u32 val;
/* Disable the channel */
- lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)),
FDMA_CH_DISABLE_CH_DISABLE,
lan966x, FDMA_CH_DISABLE);
readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
- val, !(val & BIT(tx->channel_id)),
+ val, !(val & BIT(fdma->channel_id)),
READL_SLEEP_US, READL_TIMEOUT_US);
- lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)),
FDMA_CH_DB_DISCARD_DB_DISCARD,
lan966x, FDMA_CH_DB_DISCARD);
tx->activated = false;
- tx->last_in_use = -1;
}
static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
@@ -354,7 +292,7 @@ static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
struct lan966x *lan966x = tx->lan966x;
/* Write the registers to reload the channel */
- lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)),
FDMA_CH_RELOAD_CH_RELOAD,
lan966x, FDMA_CH_RELOAD);
}
@@ -393,23 +331,24 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
struct lan966x_tx *tx = &lan966x->tx;
struct lan966x_rx *rx = &lan966x->rx;
struct lan966x_tx_dcb_buf *dcb_buf;
+ struct fdma *fdma = &tx->fdma;
struct xdp_frame_bulk bq;
- struct lan966x_db *db;
unsigned long flags;
bool clear = false;
+ struct fdma_db *db;
int i;
xdp_frame_bulk_init(&bq);
spin_lock_irqsave(&lan966x->tx_lock, flags);
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ for (i = 0; i < fdma->n_dcbs; ++i) {
dcb_buf = &tx->dcbs_buf[i];
if (!dcb_buf->used)
continue;
- db = &tx->dcbs[i].db[0];
- if (!(db->status & FDMA_DCB_STATUS_DONE))
+ db = fdma_db_get(fdma, i, 0);
+ if (!fdma_db_is_done(db))
continue;
dcb_buf->dev->stats.tx_packets++;
@@ -449,27 +388,16 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
spin_unlock_irqrestore(&lan966x->tx_lock, flags);
}
-static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
-{
- struct lan966x_db *db;
-
- /* Check if there is any data */
- db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
- if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
- return false;
-
- return true;
-}
-
static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
{
struct lan966x *lan966x = rx->lan966x;
+ struct fdma *fdma = &rx->fdma;
struct lan966x_port *port;
- struct lan966x_db *db;
+ struct fdma_db *db;
struct page *page;
- db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
- page = rx->page[rx->dcb_index][rx->db_index];
+ db = fdma_db_next_get(fdma);
+ page = rx->page[fdma->dcb_index][fdma->db_index];
if (unlikely(!page))
return FDMA_ERROR;
@@ -494,16 +422,17 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
u64 src_port)
{
struct lan966x *lan966x = rx->lan966x;
- struct lan966x_db *db;
+ struct fdma *fdma = &rx->fdma;
struct sk_buff *skb;
+ struct fdma_db *db;
struct page *page;
u64 timestamp;
/* Get the received frame and unmap it */
- db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
- page = rx->page[rx->dcb_index][rx->db_index];
+ db = fdma_db_next_get(fdma);
+ page = rx->page[fdma->dcb_index][fdma->db_index];
- skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
+ skb = build_skb(page_address(page), fdma->db_size);
if (unlikely(!skb))
goto free_page;
@@ -546,21 +475,19 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
{
struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
struct lan966x_rx *rx = &lan966x->rx;
- int dcb_reload = rx->dcb_index;
- struct lan966x_rx_dcb *old_dcb;
- struct lan966x_db *db;
+ int old_dcb, dcb_reload, counter = 0;
+ struct fdma *fdma = &rx->fdma;
bool redirect = false;
struct sk_buff *skb;
- struct page *page;
- int counter = 0;
u64 src_port;
- u64 nextptr;
+
+ dcb_reload = fdma->dcb_index;
lan966x_fdma_tx_clear_buf(lan966x, weight);
/* Get all received skb */
while (counter < weight) {
- if (!lan966x_fdma_rx_more_frames(rx))
+ if (!fdma_has_frames(fdma))
break;
counter++;
@@ -570,22 +497,22 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
break;
case FDMA_ERROR:
lan966x_fdma_rx_free_page(rx);
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
goto allocate_new;
case FDMA_REDIRECT:
redirect = true;
fallthrough;
case FDMA_TX:
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
continue;
case FDMA_DROP:
lan966x_fdma_rx_free_page(rx);
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
continue;
}
skb = lan966x_fdma_rx_get_frame(rx, src_port);
- lan966x_fdma_rx_advance_dcb(rx);
+ fdma_dcb_advance(fdma);
if (!skb)
goto allocate_new;
@@ -594,20 +521,14 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
allocate_new:
/* Allocate new pages and map them */
- while (dcb_reload != rx->dcb_index) {
- db = &rx->dcbs[dcb_reload].db[rx->db_index];
- page = lan966x_fdma_rx_alloc_page(rx, db);
- if (unlikely(!page))
- break;
- rx->page[dcb_reload][rx->db_index] = page;
-
- old_dcb = &rx->dcbs[dcb_reload];
+ while (dcb_reload != fdma->dcb_index) {
+ old_dcb = dcb_reload;
dcb_reload++;
- dcb_reload &= FDMA_DCB_MAX - 1;
+ dcb_reload &= fdma->n_dcbs - 1;
+
+ fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
- nextptr = rx->dma + ((unsigned long)old_dcb -
- (unsigned long)rx->dcbs);
- lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
lan966x_fdma_rx_reload(rx);
}
@@ -650,56 +571,30 @@ irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
{
struct lan966x_tx_dcb_buf *dcb_buf;
+ struct fdma *fdma = &tx->fdma;
int i;
- for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ for (i = 0; i < fdma->n_dcbs; ++i) {
dcb_buf = &tx->dcbs_buf[i];
- if (!dcb_buf->used && i != tx->last_in_use)
+ if (!dcb_buf->used &&
+ !fdma_is_last(&tx->fdma, &tx->fdma.dcbs[i]))
return i;
}
return -1;
}
-static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
- int next_to_use, int len,
- dma_addr_t dma_addr)
-{
- struct lan966x_tx_dcb *next_dcb;
- struct lan966x_db *next_db;
-
- next_dcb = &tx->dcbs[next_to_use];
- next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
-
- next_db = &next_dcb->db[0];
- next_db->dataptr = dma_addr;
- next_db->status = FDMA_DCB_STATUS_SOF |
- FDMA_DCB_STATUS_EOF |
- FDMA_DCB_STATUS_INTR |
- FDMA_DCB_STATUS_BLOCKO(0) |
- FDMA_DCB_STATUS_BLOCKL(len);
-}
-
-static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
+static void lan966x_fdma_tx_start(struct lan966x_tx *tx)
{
struct lan966x *lan966x = tx->lan966x;
- struct lan966x_tx_dcb *dcb;
if (likely(lan966x->tx.activated)) {
- /* Connect current dcb to the next db */
- dcb = &tx->dcbs[tx->last_in_use];
- dcb->nextptr = tx->dma + (next_to_use *
- sizeof(struct lan966x_tx_dcb));
-
lan966x_fdma_tx_reload(tx);
} else {
/* Because it is first time, then just activate */
lan966x->tx.activated = true;
lan966x_fdma_tx_activate(tx);
}
-
- /* Move to next dcb because this last in use */
- tx->last_in_use = next_to_use;
}
int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
@@ -752,11 +647,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->data.xdpf = xdpf;
next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
-
- /* Setup next dcb */
- lan966x_fdma_tx_setup_dcb(tx, next_to_use,
- xdpf->len + IFH_LEN_BYTES,
- dma_addr);
} else {
page = ptr;
@@ -773,11 +663,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->data.page = page;
next_dcb_buf->len = len + IFH_LEN_BYTES;
-
- /* Setup next dcb */
- lan966x_fdma_tx_setup_dcb(tx, next_to_use,
- len + IFH_LEN_BYTES,
- dma_addr + XDP_PACKET_HEADROOM);
}
/* Fill up the buffer */
@@ -788,8 +673,19 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
next_dcb_buf->ptp = false;
next_dcb_buf->dev = port->dev;
+ __fdma_dcb_add(&tx->fdma,
+ next_to_use,
+ 0,
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len),
+ &fdma_nextptr_cb,
+ &lan966x_fdma_xdp_tx_dataptr_cb);
+
/* Start the transmission */
- lan966x_fdma_tx_start(tx, next_to_use);
+ lan966x_fdma_tx_start(tx);
out:
spin_unlock(&lan966x->tx_lock);
@@ -847,9 +743,6 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
goto release;
}
- /* Setup next dcb */
- lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
-
/* Fill up the buffer */
next_dcb_buf = &tx->dcbs_buf[next_to_use];
next_dcb_buf->use_skb = true;
@@ -861,12 +754,21 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
next_dcb_buf->ptp = false;
next_dcb_buf->dev = dev;
+ fdma_dcb_add(&tx->fdma,
+ next_to_use,
+ 0,
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len));
+
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
next_dcb_buf->ptp = true;
/* Start the transmission */
- lan966x_fdma_tx_start(tx, next_to_use);
+ lan966x_fdma_tx_start(tx);
return NETDEV_TX_OK;
@@ -908,14 +810,11 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x)
static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
{
struct page_pool *page_pool;
- dma_addr_t rx_dma;
- void *rx_dcbs;
- u32 size;
+ struct fdma fdma_rx_old;
int err;
/* Store these for later to free them */
- rx_dma = lan966x->rx.dma;
- rx_dcbs = lan966x->rx.dcbs;
+ memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma));
page_pool = lan966x->rx.page_pool;
napi_synchronize(&lan966x->napi);
@@ -931,9 +830,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
goto restore;
lan966x_fdma_rx_start(&lan966x->rx);
- size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
+ fdma_free_coherent(lan966x->dev, &fdma_rx_old);
page_pool_destroy(page_pool);
@@ -943,8 +840,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
return err;
restore:
lan966x->rx.page_pool = page_pool;
- lan966x->rx.dma = rx_dma;
- lan966x->rx.dcbs = rx_dcbs;
+ memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma));
lan966x_fdma_rx_start(&lan966x->rx);
return err;
@@ -1034,11 +930,24 @@ int lan966x_fdma_init(struct lan966x *lan966x)
return 0;
lan966x->rx.lan966x = lan966x;
- lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
+ lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
+ lan966x->rx.fdma.priv = lan966x;
+ lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
+ lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
+ lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
+ lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
lan966x->tx.lan966x = lan966x;
- lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
- lan966x->tx.last_in_use = -1;
+ lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
+ lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
+ lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
+ lan966x->tx.fdma.priv = lan966x;
+ lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
+ lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
+ lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
+ lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
err = lan966x_fdma_rx_alloc(&lan966x->rx);
if (err)
@@ -1046,7 +955,7 @@ int lan966x_fdma_init(struct lan966x *lan966x)
err = lan966x_fdma_tx_alloc(&lan966x->tx);
if (err) {
- lan966x_fdma_rx_free(&lan966x->rx);
+ fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
return err;
}
@@ -1067,7 +976,7 @@ void lan966x_fdma_deinit(struct lan966x *lan966x)
napi_disable(&lan966x->napi);
lan966x_fdma_rx_free_pages(&lan966x->rx);
- lan966x_fdma_rx_free(&lan966x->rx);
+ fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
page_pool_destroy(lan966x->rx.page_pool);
lan966x_fdma_tx_free(&lan966x->tx);
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index ec672af12e25..534d4716d5f7 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -816,7 +816,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_HW_TC;
dev->hw_features |= NETIF_F_HW_TC;
- dev->priv_flags |= IFF_SEE_ALL_HWTSTAMP_REQUESTS;
+ dev->see_all_hwtstamp_requests = true;
dev->needed_headroom = IFH_LEN_BYTES;
eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index f8bebbcf77b2..25cb2f61986f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -16,6 +16,7 @@
#include <net/switchdev.h>
#include <net/xdp.h>
+#include <fdma_api.h>
#include <vcap_api.h>
#include <vcap_api_client.h>
@@ -76,15 +77,6 @@
#define FDMA_RX_DCB_MAX_DBS 1
#define FDMA_TX_DCB_MAX_DBS 1
-#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
-
-#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
-#define FDMA_DCB_STATUS_SOF BIT(16)
-#define FDMA_DCB_STATUS_EOF BIT(17)
-#define FDMA_DCB_STATUS_INTR BIT(18)
-#define FDMA_DCB_STATUS_DONE BIT(19)
-#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
-#define FDMA_DCB_INVALID_DATA 0x1
#define FDMA_XTR_CHANNEL 6
#define FDMA_INJ_CHANNEL 0
@@ -199,49 +191,14 @@ enum vcap_is1_port_sel_rt {
struct lan966x_port;
-struct lan966x_db {
- u64 dataptr;
- u64 status;
-};
-
-struct lan966x_rx_dcb {
- u64 nextptr;
- u64 info;
- struct lan966x_db db[FDMA_RX_DCB_MAX_DBS];
-};
-
-struct lan966x_tx_dcb {
- u64 nextptr;
- u64 info;
- struct lan966x_db db[FDMA_TX_DCB_MAX_DBS];
-};
-
struct lan966x_rx {
struct lan966x *lan966x;
- /* Pointer to the array of hardware dcbs. */
- struct lan966x_rx_dcb *dcbs;
-
- /* Pointer to the last address in the dcbs. */
- struct lan966x_rx_dcb *last_entry;
+ struct fdma fdma;
/* For each DB, there is a page */
struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
- /* Represents the db_index, it can have a value between 0 and
- * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS
- * it means that the DCB can be reused.
- */
- int db_index;
-
- /* Represents the index in the dcbs. It has a value between 0 and
- * FDMA_DCB_MAX
- */
- int dcb_index;
-
- /* Represents the dma address to the dcbs array */
- dma_addr_t dma;
-
/* Represents the page order that is used to allocate the pages for the
* RX buffers. This value is calculated based on max MTU of the devices.
*/
@@ -252,8 +209,6 @@ struct lan966x_rx {
*/
u32 max_mtu;
- u8 channel_id;
-
struct page_pool *page_pool;
};
@@ -275,18 +230,11 @@ struct lan966x_tx_dcb_buf {
struct lan966x_tx {
struct lan966x *lan966x;
- /* Pointer to the dcb list */
- struct lan966x_tx_dcb *dcbs;
- u16 last_in_use;
-
- /* Represents the DMA address to the first entry of the dcb entries. */
- dma_addr_t dma;
+ struct fdma fdma;
/* Array of dcbs that are given to the HW */
struct lan966x_tx_dcb_buf *dcbs_buf;
- u8 channel_id;
-
bool activated;
};
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index f58c506bda22..3f04992eace6 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -10,6 +10,7 @@ config SPARX5_SWITCH
select PHY_SPARX5_SERDES
select RESET_CONTROLLER
select VCAP
+ select FDMA
help
This driver supports the Sparx5 network switch device.
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index b68fe9c9a656..288de95add18 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -18,3 +18,4 @@ sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 4f800c1a435d..d898a7238b48 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1194,16 +1194,13 @@ static int sparx5_get_ts_info(struct net_device *dev,
phc = &sparx5->phc[SPARX5_PHC_PORT];
- info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
- if (info->phc_index == -1) {
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ if (phc->clock) {
+ info->phc_index = ptp_clock_index(phc->clock);
+ } else {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 1915998f6079..61df874b7623 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -21,107 +21,51 @@
#define FDMA_XTR_CHANNEL 6
#define FDMA_INJ_CHANNEL 0
-#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
-#define FDMA_DCB_INFO_TOKEN BIT(17)
-#define FDMA_DCB_INFO_INTR BIT(18)
-#define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24))
-
-#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
-#define FDMA_DCB_STATUS_SOF BIT(16)
-#define FDMA_DCB_STATUS_EOF BIT(17)
-#define FDMA_DCB_STATUS_INTR BIT(18)
-#define FDMA_DCB_STATUS_DONE BIT(19)
-#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
-#define FDMA_DCB_INVALID_DATA 0x1
-
#define FDMA_XTR_BUFFER_SIZE 2048
#define FDMA_WEIGHT 4
-/* Frame DMA DCB format
- *
- * +---------------------------+
- * | Next Ptr |
- * +---------------------------+
- * | Reserved | Info |
- * +---------------------------+
- * | Data0 Ptr |
- * +---------------------------+
- * | Reserved | Status0 |
- * +---------------------------+
- * | Data1 Ptr |
- * +---------------------------+
- * | Reserved | Status1 |
- * +---------------------------+
- * | Data2 Ptr |
- * +---------------------------+
- * | Reserved | Status2 |
- * |-------------|-------------|
- * | |
- * | |
- * | |
- * | |
- * | |
- * |---------------------------|
- * | Data14 Ptr |
- * +-------------|-------------+
- * | Reserved | Status14 |
- * +-------------|-------------+
- */
-
-/* For each hardware DB there is an entry in this list and when the HW DB
- * entry is used, this SW DB entry is moved to the back of the list
- */
-struct sparx5_db {
- struct list_head list;
- void *cpu_addr;
-};
-
-static void sparx5_fdma_rx_add_dcb(struct sparx5_rx *rx,
- struct sparx5_rx_dcb_hw *dcb,
- u64 nextptr)
+static int sparx5_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
{
- int idx = 0;
-
- /* Reset the status of the DB */
- for (idx = 0; idx < FDMA_RX_DCB_MAX_DBS; ++idx) {
- struct sparx5_db_hw *db = &dcb->db[idx];
+ *dataptr = fdma->dma + (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ ((dcb * fdma->n_dbs + db) * fdma->db_size);
- db->status = FDMA_DCB_STATUS_INTR;
- }
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE);
- rx->last_entry->nextptr = nextptr;
- rx->last_entry = dcb;
+ return 0;
}
-static void sparx5_fdma_tx_add_dcb(struct sparx5_tx *tx,
- struct sparx5_tx_dcb_hw *dcb,
- u64 nextptr)
+static int sparx5_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
{
- int idx = 0;
+ struct sparx5 *sparx5 = fdma->priv;
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sk_buff *skb;
- /* Reset the status of the DB */
- for (idx = 0; idx < FDMA_TX_DCB_MAX_DBS; ++idx) {
- struct sparx5_db_hw *db = &dcb->db[idx];
+ skb = __netdev_alloc_skb(rx->ndev, fdma->db_size, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return -ENOMEM;
- db->status = FDMA_DCB_STATUS_DONE;
- }
- dcb->nextptr = FDMA_DCB_INVALID_DATA;
- dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE);
+ *dataptr = virt_to_phys(skb->data);
+
+ rx->skb[dcb][db] = skb;
+
+ return 0;
}
static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx)
{
+ struct fdma *fdma = &rx->fdma;
+
/* Write the buffer address in the LLP and LLP1 regs */
- spx5_wr(((u64)rx->dma) & GENMASK(31, 0), sparx5,
- FDMA_DCB_LLP(rx->channel_id));
- spx5_wr(((u64)rx->dma) >> 32, sparx5, FDMA_DCB_LLP1(rx->channel_id));
+ spx5_wr(((u64)fdma->dma) & GENMASK(31, 0), sparx5,
+ FDMA_DCB_LLP(fdma->channel_id));
+ spx5_wr(((u64)fdma->dma) >> 32, sparx5,
+ FDMA_DCB_LLP1(fdma->channel_id));
/* Set the number of RX DBs to be used, and DB end-of-frame interrupt */
- spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE),
- sparx5, FDMA_CH_CFG(rx->channel_id));
+ sparx5, FDMA_CH_CFG(fdma->channel_id));
/* Set the RX Watermark to max */
spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM,
@@ -133,22 +77,24 @@ static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx)
sparx5, FDMA_PORT_CTRL(0));
/* Enable RX channel DB interrupt */
- spx5_rmw(BIT(rx->channel_id),
- BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ spx5_rmw(BIT(fdma->channel_id),
+ BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
sparx5, FDMA_INTR_DB_ENA);
/* Activate the RX channel */
- spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_ACTIVATE);
+ spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_ACTIVATE);
}
static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx)
{
+ struct fdma *fdma = &rx->fdma;
+
/* Deactivate the RX channel */
- spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ spx5_rmw(0, BIT(fdma->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
sparx5, FDMA_CH_ACTIVATE);
/* Disable RX channel DB interrupt */
- spx5_rmw(0, BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ spx5_rmw(0, BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
sparx5, FDMA_INTR_DB_ENA);
/* Stop RX fdma */
@@ -158,75 +104,55 @@ static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *r
static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx)
{
+ struct fdma *fdma = &tx->fdma;
+
/* Write the buffer address in the LLP and LLP1 regs */
- spx5_wr(((u64)tx->dma) & GENMASK(31, 0), sparx5,
- FDMA_DCB_LLP(tx->channel_id));
- spx5_wr(((u64)tx->dma) >> 32, sparx5, FDMA_DCB_LLP1(tx->channel_id));
+ spx5_wr(((u64)fdma->dma) & GENMASK(31, 0), sparx5,
+ FDMA_DCB_LLP(fdma->channel_id));
+ spx5_wr(((u64)fdma->dma) >> 32, sparx5,
+ FDMA_DCB_LLP1(fdma->channel_id));
/* Set the number of TX DBs to be used, and DB end-of-frame interrupt */
- spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE),
- sparx5, FDMA_CH_CFG(tx->channel_id));
+ sparx5, FDMA_CH_CFG(fdma->channel_id));
/* Start TX fdma */
spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP,
sparx5, FDMA_PORT_CTRL(0));
/* Activate the channel */
- spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_ACTIVATE);
+ spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_ACTIVATE);
}
static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx)
{
/* Disable the channel */
- spx5_rmw(0, BIT(tx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ spx5_rmw(0, BIT(tx->fdma.channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
sparx5, FDMA_CH_ACTIVATE);
}
-static void sparx5_fdma_rx_reload(struct sparx5 *sparx5, struct sparx5_rx *rx)
+static void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
{
/* Reload the RX channel */
- spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_RELOAD);
-}
-
-static void sparx5_fdma_tx_reload(struct sparx5 *sparx5, struct sparx5_tx *tx)
-{
- /* Reload the TX channel */
- spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_RELOAD);
-}
-
-static struct sk_buff *sparx5_fdma_rx_alloc_skb(struct sparx5_rx *rx)
-{
- return __netdev_alloc_skb(rx->ndev, FDMA_XTR_BUFFER_SIZE,
- GFP_ATOMIC);
+ spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_RELOAD);
}
static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx)
{
- struct sparx5_db_hw *db_hw;
- unsigned int packet_size;
+ struct fdma *fdma = &rx->fdma;
struct sparx5_port *port;
- struct sk_buff *new_skb;
+ struct fdma_db *db_hw;
struct frame_info fi;
struct sk_buff *skb;
- dma_addr_t dma_addr;
/* Check if the DCB is done */
- db_hw = &rx->dcb_entries[rx->dcb_index].db[rx->db_index];
- if (unlikely(!(db_hw->status & FDMA_DCB_STATUS_DONE)))
- return false;
- skb = rx->skb[rx->dcb_index][rx->db_index];
- /* Replace the DB entry with a new SKB */
- new_skb = sparx5_fdma_rx_alloc_skb(rx);
- if (unlikely(!new_skb))
+ db_hw = fdma_db_next_get(fdma);
+ if (unlikely(!fdma_db_is_done(db_hw)))
return false;
- /* Map the new skb data and set the new skb */
- dma_addr = virt_to_phys(new_skb->data);
- rx->skb[rx->dcb_index][rx->db_index] = new_skb;
- db_hw->dataptr = dma_addr;
- packet_size = FDMA_DCB_STATUS_BLOCKL(db_hw->status);
- skb_put(skb, packet_size);
+ skb = rx->skb[fdma->dcb_index][fdma->db_index];
+ skb_put(skb, fdma_db_len_get(db_hw));
/* Now do the normal processing of the skb */
sparx5_ifh_parse((u32 *)skb->data, &fi);
/* Map to port netdev */
@@ -259,84 +185,62 @@ static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
{
struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
+ struct fdma *fdma = &rx->fdma;
int counter = 0;
while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) {
- struct sparx5_rx_dcb_hw *old_dcb;
-
- rx->db_index++;
+ fdma_db_advance(fdma);
counter++;
/* Check if the DCB can be reused */
- if (rx->db_index != FDMA_RX_DCB_MAX_DBS)
+ if (fdma_dcb_is_reusable(fdma))
continue;
- /* As the DCB can be reused, just advance the dcb_index
- * pointer and set the nextptr in the DCB
- */
- rx->db_index = 0;
- old_dcb = &rx->dcb_entries[rx->dcb_index];
- rx->dcb_index++;
- rx->dcb_index &= FDMA_DCB_MAX - 1;
- sparx5_fdma_rx_add_dcb(rx, old_dcb,
- rx->dma +
- ((unsigned long)old_dcb -
- (unsigned long)rx->dcb_entries));
+ fdma_dcb_add(fdma, fdma->dcb_index,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+ fdma_db_reset(fdma);
+ fdma_dcb_advance(fdma);
}
if (counter < weight) {
napi_complete_done(&rx->napi, counter);
- spx5_rmw(BIT(rx->channel_id),
- BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ spx5_rmw(BIT(fdma->channel_id),
+ BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
sparx5, FDMA_INTR_DB_ENA);
}
if (counter)
- sparx5_fdma_rx_reload(sparx5, rx);
+ sparx5_fdma_reload(sparx5, fdma);
return counter;
}
-static struct sparx5_tx_dcb_hw *sparx5_fdma_next_dcb(struct sparx5_tx *tx,
- struct sparx5_tx_dcb_hw *dcb)
-{
- struct sparx5_tx_dcb_hw *next_dcb;
-
- next_dcb = dcb;
- next_dcb++;
- /* Handle wrap-around */
- if ((unsigned long)next_dcb >=
- ((unsigned long)tx->first_entry + FDMA_DCB_MAX * sizeof(*dcb)))
- next_dcb = tx->first_entry;
- return next_dcb;
-}
-
int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
{
- struct sparx5_tx_dcb_hw *next_dcb_hw;
struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
static bool first_time = true;
- struct sparx5_db_hw *db_hw;
- struct sparx5_db *db;
+ void *virt_addr;
- next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry);
- db_hw = &next_dcb_hw->db[0];
- if (!(db_hw->status & FDMA_DCB_STATUS_DONE))
+ fdma_dcb_advance(fdma);
+ if (!fdma_db_is_done(fdma_db_get(fdma, fdma->dcb_index, 0)))
return -EINVAL;
- db = list_first_entry(&tx->db_list, struct sparx5_db, list);
- list_move_tail(&db->list, &tx->db_list);
- next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA;
- tx->curr_entry->nextptr = tx->dma +
- ((unsigned long)next_dcb_hw -
- (unsigned long)tx->first_entry);
- tx->curr_entry = next_dcb_hw;
- memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE);
- memcpy(db->cpu_addr, ifh, IFH_LEN * 4);
- memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len);
- db_hw->status = FDMA_DCB_STATUS_SOF |
- FDMA_DCB_STATUS_EOF |
- FDMA_DCB_STATUS_BLOCKO(0) |
- FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4);
+
+ /* Get the virtual address of the dataptr for the next DB */
+ virt_addr = ((u8 *)fdma->dcbs +
+ (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ ((fdma->dcb_index * fdma->n_dbs) * fdma->db_size));
+
+ memcpy(virt_addr, ifh, IFH_LEN * 4);
+ memcpy(virt_addr + IFH_LEN * 4, skb->data, skb->len);
+
+ fdma_dcb_add(fdma, fdma->dcb_index, 0,
+ FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4));
+
if (first_time) {
sparx5_fdma_tx_activate(sparx5, tx);
first_time = false;
} else {
- sparx5_fdma_tx_reload(sparx5, tx);
+ sparx5_fdma_reload(sparx5, fdma);
}
return NETDEV_TX_OK;
}
@@ -344,43 +248,16 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
{
struct sparx5_rx *rx = &sparx5->rx;
- struct sparx5_rx_dcb_hw *dcb;
- int idx, jdx;
- int size;
-
- size = sizeof(struct sparx5_rx_dcb_hw) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- rx->dcb_entries = devm_kzalloc(sparx5->dev, size, GFP_KERNEL);
- if (!rx->dcb_entries)
- return -ENOMEM;
- rx->dma = virt_to_phys(rx->dcb_entries);
- rx->last_entry = rx->dcb_entries;
- rx->db_index = 0;
- rx->dcb_index = 0;
- /* Now for each dcb allocate the db */
- for (idx = 0; idx < FDMA_DCB_MAX; ++idx) {
- dcb = &rx->dcb_entries[idx];
- dcb->info = 0;
- /* For each db allocate an skb and map skb data pointer to the DB
- * dataptr. In this way when the frame is received the skb->data
- * will contain the frame, so no memcpy is needed
- */
- for (jdx = 0; jdx < FDMA_RX_DCB_MAX_DBS; ++jdx) {
- struct sparx5_db_hw *db_hw = &dcb->db[jdx];
- dma_addr_t dma_addr;
- struct sk_buff *skb;
-
- skb = sparx5_fdma_rx_alloc_skb(rx);
- if (!skb)
- return -ENOMEM;
-
- dma_addr = virt_to_phys(skb->data);
- db_hw->dataptr = dma_addr;
- db_hw->status = 0;
- rx->skb[idx][jdx] = skb;
- }
- sparx5_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * idx);
- }
+ struct fdma *fdma = &rx->fdma;
+ int err;
+
+ err = fdma_alloc_phys(fdma);
+ if (err)
+ return err;
+
+ fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback,
FDMA_WEIGHT);
napi_enable(&rx->napi);
@@ -391,57 +268,33 @@ static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5)
{
struct sparx5_tx *tx = &sparx5->tx;
- struct sparx5_tx_dcb_hw *dcb;
- int idx, jdx;
- int size;
-
- size = sizeof(struct sparx5_tx_dcb_hw) * FDMA_DCB_MAX;
- size = ALIGN(size, PAGE_SIZE);
- tx->curr_entry = devm_kzalloc(sparx5->dev, size, GFP_KERNEL);
- if (!tx->curr_entry)
- return -ENOMEM;
- tx->dma = virt_to_phys(tx->curr_entry);
- tx->first_entry = tx->curr_entry;
- INIT_LIST_HEAD(&tx->db_list);
- /* Now for each dcb allocate the db */
- for (idx = 0; idx < FDMA_DCB_MAX; ++idx) {
- dcb = &tx->curr_entry[idx];
- dcb->info = 0;
- /* TX databuffers must be 16byte aligned */
- for (jdx = 0; jdx < FDMA_TX_DCB_MAX_DBS; ++jdx) {
- struct sparx5_db_hw *db_hw = &dcb->db[jdx];
- struct sparx5_db *db;
- dma_addr_t phys;
- void *cpu_addr;
-
- cpu_addr = devm_kzalloc(sparx5->dev,
- FDMA_XTR_BUFFER_SIZE,
- GFP_KERNEL);
- if (!cpu_addr)
- return -ENOMEM;
- phys = virt_to_phys(cpu_addr);
- db_hw->dataptr = phys;
- db_hw->status = 0;
- db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- return -ENOMEM;
- db->cpu_addr = cpu_addr;
- list_add_tail(&db->list, &tx->db_list);
- }
- sparx5_fdma_tx_add_dcb(tx, dcb, tx->dma + sizeof(*dcb) * idx);
- /* Let the curr_entry to point to the last allocated entry */
- if (idx == FDMA_DCB_MAX - 1)
- tx->curr_entry = dcb;
- }
+ struct fdma *fdma = &tx->fdma;
+ int err;
+
+ err = fdma_alloc_phys(fdma);
+ if (err)
+ return err;
+
+ fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_DONE);
+
return 0;
}
static void sparx5_fdma_rx_init(struct sparx5 *sparx5,
struct sparx5_rx *rx, int channel)
{
+ struct fdma *fdma = &rx->fdma;
int idx;
- rx->channel_id = channel;
+ fdma->channel_id = channel;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = FDMA_RX_DCB_MAX_DBS;
+ fdma->priv = sparx5;
+ fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE);
+ fdma->size = fdma_get_size(&sparx5->rx.fdma);
+ fdma->ops.dataptr_cb = &sparx5_fdma_rx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
/* Fetch a netdev for SKB and NAPI use, any will do */
for (idx = 0; idx < SPX5_PORTS; ++idx) {
struct sparx5_port *port = sparx5->ports[idx];
@@ -456,7 +309,16 @@ static void sparx5_fdma_rx_init(struct sparx5 *sparx5,
static void sparx5_fdma_tx_init(struct sparx5 *sparx5,
struct sparx5_tx *tx, int channel)
{
- tx->channel_id = channel;
+ struct fdma *fdma = &tx->fdma;
+
+ fdma->channel_id = channel;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = FDMA_TX_DCB_MAX_DBS;
+ fdma->priv = sparx5;
+ fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE);
+ fdma->size = fdma_get_size_contiguous(&sparx5->tx.fdma);
+ fdma->ops.dataptr_cb = &sparx5_fdma_tx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
}
irqreturn_t sparx5_fdma_handler(int irq, void *args)
@@ -594,5 +456,7 @@ int sparx5_fdma_stop(struct sparx5 *sparx5)
read_poll_timeout(sparx5_fdma_port_ctrl, val,
FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0,
500, 10000, 0, sparx5);
+ fdma_free_phys(&sparx5->rx.fdma);
+ fdma_free_phys(&sparx5->tx.fdma);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 1982ae03b4fe..3309060b1e4c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -20,6 +20,8 @@
#include <linux/debugfs.h>
#include <net/flow_offload.h>
+#include <fdma_api.h>
+
#include "sparx5_main_regs.h"
/* Target chip type */
@@ -100,23 +102,6 @@ enum sparx5_vlan_port_type {
struct sparx5;
-struct sparx5_db_hw {
- u64 dataptr;
- u64 status;
-};
-
-struct sparx5_rx_dcb_hw {
- u64 nextptr;
- u64 info;
- struct sparx5_db_hw db[FDMA_RX_DCB_MAX_DBS];
-};
-
-struct sparx5_tx_dcb_hw {
- u64 nextptr;
- u64 info;
- struct sparx5_db_hw db[FDMA_TX_DCB_MAX_DBS];
-};
-
/* Frame DMA receive state:
* For each DB, there is a SKB, and the skb data pointer is mapped in
* the DB. Once a frame is received the skb is given to the upper layers
@@ -124,14 +109,10 @@ struct sparx5_tx_dcb_hw {
* When the db_index reached FDMA_RX_DCB_MAX_DBS the DB is reused.
*/
struct sparx5_rx {
- struct sparx5_rx_dcb_hw *dcb_entries;
- struct sparx5_rx_dcb_hw *last_entry;
+ struct fdma fdma;
struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
- int db_index;
- int dcb_index;
dma_addr_t dma;
struct napi_struct napi;
- u32 channel_id;
struct net_device *ndev;
u64 packets;
};
@@ -140,11 +121,7 @@ struct sparx5_rx {
* DCBs are chained using the DCBs nextptr field.
*/
struct sparx5_tx {
- struct sparx5_tx_dcb_hw *curr_entry;
- struct sparx5_tx_dcb_hw *first_entry;
- struct list_head db_list;
- dma_addr_t dma;
- u32 channel_id;
+ struct fdma fdma;
u64 packets;
u64 dropped;
};
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index ddb8f68d80a2..ca4ed58f1206 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1496,11 +1496,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto release_region;
- err = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
- if (err) {
- dev_err(&pdev->dev, "Failed to set dma device segment size\n");
- goto release_region;
- }
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
err = -ENOMEM;
gc = vzalloc(sizeof(*gc));
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 3d151700f658..c47266d1c7c2 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -511,7 +511,7 @@ static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
}
/* Release pre-allocated RX buffers */
-static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
+void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
{
struct device *dev;
int i;
@@ -608,7 +608,7 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
*datasize = mtu + ETH_HLEN;
}
-static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
+int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues)
{
struct device *dev;
struct page *page;
@@ -622,7 +622,7 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
dev = mpc->ac->gdma_dev->gdma_context->dev;
- num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE;
+ num_rxb = num_queues * mpc->rx_queue_size;
WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
@@ -682,7 +682,7 @@ static int mana_change_mtu(struct net_device *ndev, int new_mtu)
int err;
/* Pre-allocate buffers to prevent failure in mana_attach later */
- err = mana_pre_alloc_rxbufs(mpc, new_mtu);
+ err = mana_pre_alloc_rxbufs(mpc, new_mtu, mpc->num_queues);
if (err) {
netdev_err(ndev, "Insufficient memory for new MTU\n");
return err;
@@ -1911,15 +1911,17 @@ static int mana_create_txq(struct mana_port_context *apc,
return -ENOMEM;
/* The minimum size of the WQE is 32 bytes, hence
- * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
+ * apc->tx_queue_size represents the maximum number of WQEs
* the SQ can store. This value is then used to size other queues
* to prevent overflow.
+ * Also note that the txq_size is always going to be MANA_PAGE_ALIGNED,
+ * as min val of apc->tx_queue_size is 128 and that would make
+ * txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size
+ * are always power of two
*/
- txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
- BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size));
+ txq_size = apc->tx_queue_size * 32;
- cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
- cq_size = MANA_PAGE_ALIGN(cq_size);
+ cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
gc = gd->gdma_context;
@@ -2159,10 +2161,11 @@ static int mana_push_wqe(struct mana_rxq *rxq)
static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
{
+ struct mana_port_context *mpc = netdev_priv(rxq->ndev);
struct page_pool_params pprm = {};
int ret;
- pprm.pool_size = RX_BUFFERS_PER_QUEUE;
+ pprm.pool_size = mpc->rx_queue_size;
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
pprm.netdev = rxq->ndev;
@@ -2194,13 +2197,13 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc = gd->gdma_context;
- rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
+ rxq = kzalloc(struct_size(rxq, rx_oobs, apc->rx_queue_size),
GFP_KERNEL);
if (!rxq)
return NULL;
rxq->ndev = ndev;
- rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
+ rxq->num_rx_buf = apc->rx_queue_size;
rxq->rxq_idx = rxq_idx;
rxq->rxobj = INVALID_MANA_HANDLE;
@@ -2748,6 +2751,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
apc->ndev = ndev;
apc->max_queues = gc->max_num_queues;
apc->num_queues = gc->max_num_queues;
+ apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE;
+ apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE;
apc->port_handle = INVALID_MANA_HANDLE;
apc->pf_filter_handle = INVALID_MANA_HANDLE;
apc->port_idx = port_idx;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index 146d5db1792f..dc3864377538 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -345,27 +345,101 @@ static int mana_set_channels(struct net_device *ndev,
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int new_count = channels->combined_count;
unsigned int old_count = apc->num_queues;
- int err, err2;
+ int err;
+
+ err = mana_pre_alloc_rxbufs(apc, ndev->mtu, new_count);
+ if (err) {
+ netdev_err(ndev, "Insufficient memory for new allocations");
+ return err;
+ }
err = mana_detach(ndev, false);
if (err) {
netdev_err(ndev, "mana_detach failed: %d\n", err);
- return err;
+ goto out;
}
apc->num_queues = new_count;
err = mana_attach(ndev);
- if (!err)
- return 0;
+ if (err) {
+ apc->num_queues = old_count;
+ netdev_err(ndev, "mana_attach failed: %d\n", err);
+ }
+
+out:
+ mana_pre_dealloc_rxbufs(apc);
+ return err;
+}
+
+static void mana_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+
+ ring->rx_pending = apc->rx_queue_size;
+ ring->tx_pending = apc->tx_queue_size;
+ ring->rx_max_pending = MAX_RX_BUFFERS_PER_QUEUE;
+ ring->tx_max_pending = MAX_TX_BUFFERS_PER_QUEUE;
+}
+
+static int mana_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ u32 new_tx, new_rx;
+ u32 old_tx, old_rx;
+ int err;
- netdev_err(ndev, "mana_attach failed: %d\n", err);
+ old_tx = apc->tx_queue_size;
+ old_rx = apc->rx_queue_size;
- /* Try to roll it back to the old configuration. */
- apc->num_queues = old_count;
- err2 = mana_attach(ndev);
- if (err2)
- netdev_err(ndev, "mana re-attach failed: %d\n", err2);
+ if (ring->tx_pending < MIN_TX_BUFFERS_PER_QUEUE) {
+ NL_SET_ERR_MSG_FMT(extack, "tx:%d less than the min:%d", ring->tx_pending,
+ MIN_TX_BUFFERS_PER_QUEUE);
+ return -EINVAL;
+ }
+
+ if (ring->rx_pending < MIN_RX_BUFFERS_PER_QUEUE) {
+ NL_SET_ERR_MSG_FMT(extack, "rx:%d less than the min:%d", ring->rx_pending,
+ MIN_RX_BUFFERS_PER_QUEUE);
+ return -EINVAL;
+ }
+
+ new_rx = roundup_pow_of_two(ring->rx_pending);
+ new_tx = roundup_pow_of_two(ring->tx_pending);
+ netdev_info(ndev, "Using nearest power of 2 values for Txq:%d Rxq:%d\n",
+ new_tx, new_rx);
+
+ /* pre-allocating new buffers to prevent failures in mana_attach() later */
+ apc->rx_queue_size = new_rx;
+ err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
+ apc->rx_queue_size = old_rx;
+ if (err) {
+ netdev_err(ndev, "Insufficient memory for new allocations\n");
+ return err;
+ }
+ err = mana_detach(ndev, false);
+ if (err) {
+ netdev_err(ndev, "mana_detach failed: %d\n", err);
+ goto out;
+ }
+
+ apc->tx_queue_size = new_tx;
+ apc->rx_queue_size = new_rx;
+
+ err = mana_attach(ndev);
+ if (err) {
+ netdev_err(ndev, "mana_attach failed: %d\n", err);
+ apc->tx_queue_size = old_tx;
+ apc->rx_queue_size = old_rx;
+ }
+out:
+ mana_pre_dealloc_rxbufs(apc);
return err;
}
@@ -380,4 +454,6 @@ const struct ethtool_ops mana_ethtool_ops = {
.set_rxfh = mana_set_rxfh,
.get_channels = mana_get_channels,
.set_channels = mana_set_channels,
+ .get_ringparam = mana_get_ringparam,
+ .set_ringparam = mana_set_ringparam,
};
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index b3c28260adf8..e172638b0601 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -582,17 +582,13 @@ EXPORT_SYMBOL(ocelot_hwstamp_set);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
struct kernel_ethtool_ts_info *info)
{
- info->phc_index = ocelot->ptp_clock ?
- ptp_clock_index(ocelot->ptp_clock) : -1;
- if (info->phc_index == -1) {
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ if (ocelot->ptp_clock) {
+ info->phc_index = ptp_clock_index(ocelot->ptp_clock);
+ } else {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index df2ab5cbd49b..3a02eef58cc6 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -4537,8 +4537,8 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
u64 *prog;
int err;
- prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
- GFP_KERNEL);
+ prog = kmemdup_array(nfp_prog->prog, nfp_prog->prog_len, sizeof(u64),
+ GFP_KERNEL);
if (!prog)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 182ba0a8b095..6e0929af0f72 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -821,14 +821,13 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
snprintf(r_vec->name, sizeof(r_vec->name),
"%s-rxtx-%d", nfp_net_name(nn), idx);
- err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
- r_vec);
+ err = request_irq(r_vec->irq_vector, r_vec->handler, IRQF_NO_AUTOEN,
+ r_vec->name, r_vec);
if (err) {
nfp_net_napi_del(&nn->dp, r_vec);
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
return err;
}
- disable_irq(r_vec->irq_vector);
irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
index 2dd37557185e..7276e44a21d0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -41,6 +41,8 @@ struct nfp_dump_tl {
);
char data[];
};
+static_assert(offsetof(struct nfp_dump_tl, data) == sizeof(struct nfp_dump_tl_hdr),
+ "struct member likely outside of struct_group_tagged()");
/* NFP CPP parameters */
struct nfp_dumpspec_cpp_isl_id {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index eee0bfc41074..227e7a5d712e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -248,7 +248,6 @@ nfp_repr_fix_features(struct net_device *netdev, netdev_features_t features)
features = netdev_intersect_features(features, lower_features);
features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_HW_TC);
- features |= NETIF_F_LLTX;
return features;
}
@@ -386,7 +385,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
- netdev->features |= NETIF_F_LLTX;
+ netdev->lltx = true;
if (nfp_app_has_tc(app)) {
netdev->features |= NETIF_F_HW_TC;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 7136bc48530b..df0234a338a8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -278,7 +278,7 @@ struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp)
res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP);
if (IS_ERR(res))
- return (void *)res;
+ return ERR_CAST(res);
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state) {
diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c
new file mode 100644
index 000000000000..f9c0dcd965c2
--- /dev/null
+++ b/drivers/net/ethernet/oa_tc6.c
@@ -0,0 +1,1361 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework
+ *
+ * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/oa_tc6.h>
+
+/* OPEN Alliance TC6 registers */
+/* Standard Capabilities Register */
+#define OA_TC6_REG_STDCAP 0x0002
+#define STDCAP_DIRECT_PHY_REG_ACCESS BIT(8)
+
+/* Reset Control and Status Register */
+#define OA_TC6_REG_RESET 0x0003
+#define RESET_SWRESET BIT(0) /* Software Reset */
+
+/* Configuration Register #0 */
+#define OA_TC6_REG_CONFIG0 0x0004
+#define CONFIG0_SYNC BIT(15)
+#define CONFIG0_ZARFE_ENABLE BIT(12)
+
+/* Status Register #0 */
+#define OA_TC6_REG_STATUS0 0x0008
+#define STATUS0_RESETC BIT(6) /* Reset Complete */
+#define STATUS0_HEADER_ERROR BIT(5)
+#define STATUS0_LOSS_OF_FRAME_ERROR BIT(4)
+#define STATUS0_RX_BUFFER_OVERFLOW_ERROR BIT(3)
+#define STATUS0_TX_PROTOCOL_ERROR BIT(0)
+
+/* Buffer Status Register */
+#define OA_TC6_REG_BUFFER_STATUS 0x000B
+#define BUFFER_STATUS_TX_CREDITS_AVAILABLE GENMASK(15, 8)
+#define BUFFER_STATUS_RX_CHUNKS_AVAILABLE GENMASK(7, 0)
+
+/* Interrupt Mask Register #0 */
+#define OA_TC6_REG_INT_MASK0 0x000C
+#define INT_MASK0_HEADER_ERR_MASK BIT(5)
+#define INT_MASK0_LOSS_OF_FRAME_ERR_MASK BIT(4)
+#define INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK BIT(3)
+#define INT_MASK0_TX_PROTOCOL_ERR_MASK BIT(0)
+
+/* PHY Clause 22 registers base address and mask */
+#define OA_TC6_PHY_STD_REG_ADDR_BASE 0xFF00
+#define OA_TC6_PHY_STD_REG_ADDR_MASK 0x1F
+
+/* Control command header */
+#define OA_TC6_CTRL_HEADER_DATA_NOT_CTRL BIT(31)
+#define OA_TC6_CTRL_HEADER_WRITE_NOT_READ BIT(29)
+#define OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR GENMASK(27, 24)
+#define OA_TC6_CTRL_HEADER_ADDR GENMASK(23, 8)
+#define OA_TC6_CTRL_HEADER_LENGTH GENMASK(7, 1)
+#define OA_TC6_CTRL_HEADER_PARITY BIT(0)
+
+/* Data header */
+#define OA_TC6_DATA_HEADER_DATA_NOT_CTRL BIT(31)
+#define OA_TC6_DATA_HEADER_DATA_VALID BIT(21)
+#define OA_TC6_DATA_HEADER_START_VALID BIT(20)
+#define OA_TC6_DATA_HEADER_START_WORD_OFFSET GENMASK(19, 16)
+#define OA_TC6_DATA_HEADER_END_VALID BIT(14)
+#define OA_TC6_DATA_HEADER_END_BYTE_OFFSET GENMASK(13, 8)
+#define OA_TC6_DATA_HEADER_PARITY BIT(0)
+
+/* Data footer */
+#define OA_TC6_DATA_FOOTER_EXTENDED_STS BIT(31)
+#define OA_TC6_DATA_FOOTER_RXD_HEADER_BAD BIT(30)
+#define OA_TC6_DATA_FOOTER_CONFIG_SYNC BIT(29)
+#define OA_TC6_DATA_FOOTER_RX_CHUNKS GENMASK(28, 24)
+#define OA_TC6_DATA_FOOTER_DATA_VALID BIT(21)
+#define OA_TC6_DATA_FOOTER_START_VALID BIT(20)
+#define OA_TC6_DATA_FOOTER_START_WORD_OFFSET GENMASK(19, 16)
+#define OA_TC6_DATA_FOOTER_END_VALID BIT(14)
+#define OA_TC6_DATA_FOOTER_END_BYTE_OFFSET GENMASK(13, 8)
+#define OA_TC6_DATA_FOOTER_TX_CREDITS GENMASK(5, 1)
+
+/* PHY – Clause 45 registers memory map selector (MMS) as per table 6 in the
+ * OPEN Alliance specification.
+ */
+#define OA_TC6_PHY_C45_PCS_MMS2 2 /* MMD 3 */
+#define OA_TC6_PHY_C45_PMA_PMD_MMS3 3 /* MMD 1 */
+#define OA_TC6_PHY_C45_VS_PLCA_MMS4 4 /* MMD 31 */
+#define OA_TC6_PHY_C45_AUTO_NEG_MMS5 5 /* MMD 7 */
+#define OA_TC6_PHY_C45_POWER_UNIT_MMS6 6 /* MMD 13 */
+
+#define OA_TC6_CTRL_HEADER_SIZE 4
+#define OA_TC6_CTRL_REG_VALUE_SIZE 4
+#define OA_TC6_CTRL_IGNORED_SIZE 4
+#define OA_TC6_CTRL_MAX_REGISTERS 128
+#define OA_TC6_CTRL_SPI_BUF_SIZE (OA_TC6_CTRL_HEADER_SIZE +\
+ (OA_TC6_CTRL_MAX_REGISTERS *\
+ OA_TC6_CTRL_REG_VALUE_SIZE) +\
+ OA_TC6_CTRL_IGNORED_SIZE)
+#define OA_TC6_CHUNK_PAYLOAD_SIZE 64
+#define OA_TC6_DATA_HEADER_SIZE 4
+#define OA_TC6_CHUNK_SIZE (OA_TC6_DATA_HEADER_SIZE +\
+ OA_TC6_CHUNK_PAYLOAD_SIZE)
+#define OA_TC6_MAX_TX_CHUNKS 48
+#define OA_TC6_SPI_DATA_BUF_SIZE (OA_TC6_MAX_TX_CHUNKS *\
+ OA_TC6_CHUNK_SIZE)
+#define STATUS0_RESETC_POLL_DELAY 1000
+#define STATUS0_RESETC_POLL_TIMEOUT 1000000
+
+/* Internal structure for MAC-PHY drivers */
+struct oa_tc6 {
+ struct device *dev;
+ struct net_device *netdev;
+ struct phy_device *phydev;
+ struct mii_bus *mdiobus;
+ struct spi_device *spi;
+ struct mutex spi_ctrl_lock; /* Protects spi control transfer */
+ void *spi_ctrl_tx_buf;
+ void *spi_ctrl_rx_buf;
+ void *spi_data_tx_buf;
+ void *spi_data_rx_buf;
+ struct sk_buff *ongoing_tx_skb;
+ struct sk_buff *waiting_tx_skb;
+ struct sk_buff *rx_skb;
+ struct task_struct *spi_thread;
+ wait_queue_head_t spi_wq;
+ u16 tx_skb_offset;
+ u16 spi_data_tx_buf_offset;
+ u16 tx_credits;
+ u8 rx_chunks_available;
+ bool rx_buf_overflow;
+ bool int_flag;
+};
+
+enum oa_tc6_header_type {
+ OA_TC6_CTRL_HEADER,
+ OA_TC6_DATA_HEADER,
+};
+
+enum oa_tc6_register_op {
+ OA_TC6_CTRL_REG_READ = 0,
+ OA_TC6_CTRL_REG_WRITE = 1,
+};
+
+enum oa_tc6_data_valid_info {
+ OA_TC6_DATA_INVALID,
+ OA_TC6_DATA_VALID,
+};
+
+enum oa_tc6_data_start_valid_info {
+ OA_TC6_DATA_START_INVALID,
+ OA_TC6_DATA_START_VALID,
+};
+
+enum oa_tc6_data_end_valid_info {
+ OA_TC6_DATA_END_INVALID,
+ OA_TC6_DATA_END_VALID,
+};
+
+static int oa_tc6_spi_transfer(struct oa_tc6 *tc6,
+ enum oa_tc6_header_type header_type, u16 length)
+{
+ struct spi_transfer xfer = { 0 };
+ struct spi_message msg;
+
+ if (header_type == OA_TC6_DATA_HEADER) {
+ xfer.tx_buf = tc6->spi_data_tx_buf;
+ xfer.rx_buf = tc6->spi_data_rx_buf;
+ } else {
+ xfer.tx_buf = tc6->spi_ctrl_tx_buf;
+ xfer.rx_buf = tc6->spi_ctrl_rx_buf;
+ }
+ xfer.len = length;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(tc6->spi, &msg);
+}
+
+static int oa_tc6_get_parity(u32 p)
+{
+ /* Public domain code snippet, lifted from
+ * http://www-graphics.stanford.edu/~seander/bithacks.html
+ */
+ p ^= p >> 1;
+ p ^= p >> 2;
+ p = (p & 0x11111111U) * 0x11111111U;
+
+ /* Odd parity is used here */
+ return !((p >> 28) & 1);
+}
+
+static __be32 oa_tc6_prepare_ctrl_header(u32 addr, u8 length,
+ enum oa_tc6_register_op reg_op)
+{
+ u32 header;
+
+ header = FIELD_PREP(OA_TC6_CTRL_HEADER_DATA_NOT_CTRL,
+ OA_TC6_CTRL_HEADER) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_WRITE_NOT_READ, reg_op) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR, addr >> 16) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_ADDR, addr) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_LENGTH, length - 1);
+ header |= FIELD_PREP(OA_TC6_CTRL_HEADER_PARITY,
+ oa_tc6_get_parity(header));
+
+ return cpu_to_be32(header);
+}
+
+static void oa_tc6_update_ctrl_write_data(struct oa_tc6 *tc6, u32 value[],
+ u8 length)
+{
+ __be32 *tx_buf = tc6->spi_ctrl_tx_buf + OA_TC6_CTRL_HEADER_SIZE;
+
+ for (int i = 0; i < length; i++)
+ *tx_buf++ = cpu_to_be32(value[i]);
+}
+
+static u16 oa_tc6_calculate_ctrl_buf_size(u8 length)
+{
+ /* Control command consists 4 bytes header + 4 bytes register value for
+ * each register + 4 bytes ignored value.
+ */
+ return OA_TC6_CTRL_HEADER_SIZE + OA_TC6_CTRL_REG_VALUE_SIZE * length +
+ OA_TC6_CTRL_IGNORED_SIZE;
+}
+
+static void oa_tc6_prepare_ctrl_spi_buf(struct oa_tc6 *tc6, u32 address,
+ u32 value[], u8 length,
+ enum oa_tc6_register_op reg_op)
+{
+ __be32 *tx_buf = tc6->spi_ctrl_tx_buf;
+
+ *tx_buf = oa_tc6_prepare_ctrl_header(address, length, reg_op);
+
+ if (reg_op == OA_TC6_CTRL_REG_WRITE)
+ oa_tc6_update_ctrl_write_data(tc6, value, length);
+}
+
+static int oa_tc6_check_ctrl_write_reply(struct oa_tc6 *tc6, u8 size)
+{
+ u8 *tx_buf = tc6->spi_ctrl_tx_buf;
+ u8 *rx_buf = tc6->spi_ctrl_rx_buf;
+
+ rx_buf += OA_TC6_CTRL_IGNORED_SIZE;
+
+ /* The echoed control write must match with the one that was
+ * transmitted.
+ */
+ if (memcmp(tx_buf, rx_buf, size - OA_TC6_CTRL_IGNORED_SIZE))
+ return -EPROTO;
+
+ return 0;
+}
+
+static int oa_tc6_check_ctrl_read_reply(struct oa_tc6 *tc6, u8 size)
+{
+ u32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE;
+ u32 *tx_buf = tc6->spi_ctrl_tx_buf;
+
+ /* The echoed control read header must match with the one that was
+ * transmitted.
+ */
+ if (*tx_buf != *rx_buf)
+ return -EPROTO;
+
+ return 0;
+}
+
+static void oa_tc6_copy_ctrl_read_data(struct oa_tc6 *tc6, u32 value[],
+ u8 length)
+{
+ __be32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE +
+ OA_TC6_CTRL_HEADER_SIZE;
+
+ for (int i = 0; i < length; i++)
+ value[i] = be32_to_cpu(*rx_buf++);
+}
+
+static int oa_tc6_perform_ctrl(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length, enum oa_tc6_register_op reg_op)
+{
+ u16 size;
+ int ret;
+
+ /* Prepare control command and copy to SPI control buffer */
+ oa_tc6_prepare_ctrl_spi_buf(tc6, address, value, length, reg_op);
+
+ size = oa_tc6_calculate_ctrl_buf_size(length);
+
+ /* Perform SPI transfer */
+ ret = oa_tc6_spi_transfer(tc6, OA_TC6_CTRL_HEADER, size);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "SPI transfer failed for control: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Check echoed/received control write command reply for errors */
+ if (reg_op == OA_TC6_CTRL_REG_WRITE)
+ return oa_tc6_check_ctrl_write_reply(tc6, size);
+
+ /* Check echoed/received control read command reply for errors */
+ ret = oa_tc6_check_ctrl_read_reply(tc6, size);
+ if (ret)
+ return ret;
+
+ oa_tc6_copy_ctrl_read_data(tc6, value, length);
+
+ return 0;
+}
+
+/**
+ * oa_tc6_read_registers - function for reading multiple consecutive registers.
+ * @tc6: oa_tc6 struct.
+ * @address: address of the first register to be read in the MAC-PHY.
+ * @value: values to be read from the starting register address @address.
+ * @length: number of consecutive registers to be read from @address.
+ *
+ * Maximum of 128 consecutive registers can be read starting at @address.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length)
+{
+ int ret;
+
+ if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
+ dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&tc6->spi_ctrl_lock);
+ ret = oa_tc6_perform_ctrl(tc6, address, value, length,
+ OA_TC6_CTRL_REG_READ);
+ mutex_unlock(&tc6->spi_ctrl_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_read_registers);
+
+/**
+ * oa_tc6_read_register - function for reading a MAC-PHY register.
+ * @tc6: oa_tc6 struct.
+ * @address: register address of the MAC-PHY to be read.
+ * @value: value read from the @address register address of the MAC-PHY.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, u32 *value)
+{
+ return oa_tc6_read_registers(tc6, address, value, 1);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_read_register);
+
+/**
+ * oa_tc6_write_registers - function for writing multiple consecutive registers.
+ * @tc6: oa_tc6 struct.
+ * @address: address of the first register to be written in the MAC-PHY.
+ * @value: values to be written from the starting register address @address.
+ * @length: number of consecutive registers to be written from @address.
+ *
+ * Maximum of 128 consecutive registers can be written starting at @address.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length)
+{
+ int ret;
+
+ if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
+ dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&tc6->spi_ctrl_lock);
+ ret = oa_tc6_perform_ctrl(tc6, address, value, length,
+ OA_TC6_CTRL_REG_WRITE);
+ mutex_unlock(&tc6->spi_ctrl_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_write_registers);
+
+/**
+ * oa_tc6_write_register - function for writing a MAC-PHY register.
+ * @tc6: oa_tc6 struct.
+ * @address: register address of the MAC-PHY to be written.
+ * @value: value to be written in the @address register address of the MAC-PHY.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, u32 value)
+{
+ return oa_tc6_write_registers(tc6, address, &value, 1);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_write_register);
+
+static int oa_tc6_check_phy_reg_direct_access_capability(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_STDCAP, &regval);
+ if (ret)
+ return ret;
+
+ if (!(regval & STDCAP_DIRECT_PHY_REG_ACCESS))
+ return -ENODEV;
+
+ return 0;
+}
+
+static void oa_tc6_handle_link_change(struct net_device *netdev)
+{
+ phy_print_status(netdev->phydev);
+}
+
+static int oa_tc6_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+ u32 regval;
+ bool ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
+ (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
+ &regval);
+ if (ret)
+ return ret;
+
+ return regval;
+}
+
+static int oa_tc6_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+
+ return oa_tc6_write_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
+ (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
+ val);
+}
+
+static int oa_tc6_get_phy_c45_mms(int devnum)
+{
+ switch (devnum) {
+ case MDIO_MMD_PCS:
+ return OA_TC6_PHY_C45_PCS_MMS2;
+ case MDIO_MMD_PMAPMD:
+ return OA_TC6_PHY_C45_PMA_PMD_MMS3;
+ case MDIO_MMD_VEND2:
+ return OA_TC6_PHY_C45_VS_PLCA_MMS4;
+ case MDIO_MMD_AN:
+ return OA_TC6_PHY_C45_AUTO_NEG_MMS5;
+ case MDIO_MMD_POWER_UNIT:
+ return OA_TC6_PHY_C45_POWER_UNIT_MMS6;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int oa_tc6_mdiobus_read_c45(struct mii_bus *bus, int addr, int devnum,
+ int regnum)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_get_phy_c45_mms(devnum);
+ if (ret < 0)
+ return ret;
+
+ ret = oa_tc6_read_register(tc6, (ret << 16) | regnum, &regval);
+ if (ret)
+ return ret;
+
+ return regval;
+}
+
+static int oa_tc6_mdiobus_write_c45(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 val)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+ int ret;
+
+ ret = oa_tc6_get_phy_c45_mms(devnum);
+ if (ret < 0)
+ return ret;
+
+ return oa_tc6_write_register(tc6, (ret << 16) | regnum, val);
+}
+
+static int oa_tc6_mdiobus_register(struct oa_tc6 *tc6)
+{
+ int ret;
+
+ tc6->mdiobus = mdiobus_alloc();
+ if (!tc6->mdiobus) {
+ netdev_err(tc6->netdev, "MDIO bus alloc failed\n");
+ return -ENOMEM;
+ }
+
+ tc6->mdiobus->priv = tc6;
+ tc6->mdiobus->read = oa_tc6_mdiobus_read;
+ tc6->mdiobus->write = oa_tc6_mdiobus_write;
+ /* OPEN Alliance 10BASE-T1x compliance MAC-PHYs will have both C22 and
+ * C45 registers space. If the PHY is discovered via C22 bus protocol it
+ * assumes it uses C22 protocol and always uses C22 registers indirect
+ * access to access C45 registers. This is because, we don't have a
+ * clean separation between C22/C45 register space and C22/C45 MDIO bus
+ * protocols. Resulting, PHY C45 registers direct access can't be used
+ * which can save multiple SPI bus access. To support this feature, PHY
+ * drivers can set .read_mmd/.write_mmd in the PHY driver to call
+ * .read_c45/.write_c45. Ex: drivers/net/phy/microchip_t1s.c
+ */
+ tc6->mdiobus->read_c45 = oa_tc6_mdiobus_read_c45;
+ tc6->mdiobus->write_c45 = oa_tc6_mdiobus_write_c45;
+ tc6->mdiobus->name = "oa-tc6-mdiobus";
+ tc6->mdiobus->parent = tc6->dev;
+
+ snprintf(tc6->mdiobus->id, ARRAY_SIZE(tc6->mdiobus->id), "%s",
+ dev_name(&tc6->spi->dev));
+
+ ret = mdiobus_register(tc6->mdiobus);
+ if (ret) {
+ netdev_err(tc6->netdev, "Could not register MDIO bus\n");
+ mdiobus_free(tc6->mdiobus);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void oa_tc6_mdiobus_unregister(struct oa_tc6 *tc6)
+{
+ mdiobus_unregister(tc6->mdiobus);
+ mdiobus_free(tc6->mdiobus);
+}
+
+static int oa_tc6_phy_init(struct oa_tc6 *tc6)
+{
+ int ret;
+
+ ret = oa_tc6_check_phy_reg_direct_access_capability(tc6);
+ if (ret) {
+ netdev_err(tc6->netdev,
+ "Direct PHY register access is not supported by the MAC-PHY\n");
+ return ret;
+ }
+
+ ret = oa_tc6_mdiobus_register(tc6);
+ if (ret)
+ return ret;
+
+ tc6->phydev = phy_find_first(tc6->mdiobus);
+ if (!tc6->phydev) {
+ netdev_err(tc6->netdev, "No PHY found\n");
+ oa_tc6_mdiobus_unregister(tc6);
+ return -ENODEV;
+ }
+
+ tc6->phydev->is_internal = true;
+ ret = phy_connect_direct(tc6->netdev, tc6->phydev,
+ &oa_tc6_handle_link_change,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (ret) {
+ netdev_err(tc6->netdev, "Can't attach PHY to %s\n",
+ tc6->mdiobus->id);
+ oa_tc6_mdiobus_unregister(tc6);
+ return ret;
+ }
+
+ phy_attached_info(tc6->netdev->phydev);
+
+ return 0;
+}
+
+static void oa_tc6_phy_exit(struct oa_tc6 *tc6)
+{
+ phy_disconnect(tc6->phydev);
+ oa_tc6_mdiobus_unregister(tc6);
+}
+
+static int oa_tc6_read_status0(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &regval);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "STATUS0 register read failed: %d\n",
+ ret);
+ return 0;
+ }
+
+ return regval;
+}
+
+static int oa_tc6_sw_reset_macphy(struct oa_tc6 *tc6)
+{
+ u32 regval = RESET_SWRESET;
+ int ret;
+
+ ret = oa_tc6_write_register(tc6, OA_TC6_REG_RESET, regval);
+ if (ret)
+ return ret;
+
+ /* Poll for soft reset complete for every 1ms until 1s timeout */
+ ret = readx_poll_timeout(oa_tc6_read_status0, tc6, regval,
+ regval & STATUS0_RESETC,
+ STATUS0_RESETC_POLL_DELAY,
+ STATUS0_RESETC_POLL_TIMEOUT);
+ if (ret)
+ return -ENODEV;
+
+ /* Clear the reset complete status */
+ return oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, regval);
+}
+
+static int oa_tc6_unmask_macphy_error_interrupts(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_INT_MASK0, &regval);
+ if (ret)
+ return ret;
+
+ regval &= ~(INT_MASK0_TX_PROTOCOL_ERR_MASK |
+ INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK |
+ INT_MASK0_LOSS_OF_FRAME_ERR_MASK |
+ INT_MASK0_HEADER_ERR_MASK);
+
+ return oa_tc6_write_register(tc6, OA_TC6_REG_INT_MASK0, regval);
+}
+
+static int oa_tc6_enable_data_transfer(struct oa_tc6 *tc6)
+{
+ u32 value;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &value);
+ if (ret)
+ return ret;
+
+ /* Enable configuration synchronization for data transfer */
+ value |= CONFIG0_SYNC;
+
+ return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, value);
+}
+
+static void oa_tc6_cleanup_ongoing_rx_skb(struct oa_tc6 *tc6)
+{
+ if (tc6->rx_skb) {
+ tc6->netdev->stats.rx_dropped++;
+ kfree_skb(tc6->rx_skb);
+ tc6->rx_skb = NULL;
+ }
+}
+
+static void oa_tc6_cleanup_ongoing_tx_skb(struct oa_tc6 *tc6)
+{
+ if (tc6->ongoing_tx_skb) {
+ tc6->netdev->stats.tx_dropped++;
+ kfree_skb(tc6->ongoing_tx_skb);
+ tc6->ongoing_tx_skb = NULL;
+ }
+}
+
+static int oa_tc6_process_extended_status(struct oa_tc6 *tc6)
+{
+ u32 value;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &value);
+ if (ret) {
+ netdev_err(tc6->netdev, "STATUS0 register read failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Clear the error interrupts status */
+ ret = oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, value);
+ if (ret) {
+ netdev_err(tc6->netdev, "STATUS0 register write failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (FIELD_GET(STATUS0_RX_BUFFER_OVERFLOW_ERROR, value)) {
+ tc6->rx_buf_overflow = true;
+ oa_tc6_cleanup_ongoing_rx_skb(tc6);
+ net_err_ratelimited("%s: Receive buffer overflow error\n",
+ tc6->netdev->name);
+ return -EAGAIN;
+ }
+ if (FIELD_GET(STATUS0_TX_PROTOCOL_ERROR, value)) {
+ netdev_err(tc6->netdev, "Transmit protocol error\n");
+ return -ENODEV;
+ }
+ /* TODO: Currently loss of frame and header errors are treated as
+ * non-recoverable errors. They will be handled in the next version.
+ */
+ if (FIELD_GET(STATUS0_LOSS_OF_FRAME_ERROR, value)) {
+ netdev_err(tc6->netdev, "Loss of frame error\n");
+ return -ENODEV;
+ }
+ if (FIELD_GET(STATUS0_HEADER_ERROR, value)) {
+ netdev_err(tc6->netdev, "Header error\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int oa_tc6_process_rx_chunk_footer(struct oa_tc6 *tc6, u32 footer)
+{
+ /* Process rx chunk footer for the following,
+ * 1. tx credits
+ * 2. errors if any from MAC-PHY
+ * 3. receive chunks available
+ */
+ tc6->tx_credits = FIELD_GET(OA_TC6_DATA_FOOTER_TX_CREDITS, footer);
+ tc6->rx_chunks_available = FIELD_GET(OA_TC6_DATA_FOOTER_RX_CHUNKS,
+ footer);
+
+ if (FIELD_GET(OA_TC6_DATA_FOOTER_EXTENDED_STS, footer)) {
+ int ret = oa_tc6_process_extended_status(tc6);
+
+ if (ret)
+ return ret;
+ }
+
+ /* TODO: Currently received header bad and configuration unsync errors
+ * are treated as non-recoverable errors. They will be handled in the
+ * next version.
+ */
+ if (FIELD_GET(OA_TC6_DATA_FOOTER_RXD_HEADER_BAD, footer)) {
+ netdev_err(tc6->netdev, "Rxd header bad error\n");
+ return -ENODEV;
+ }
+
+ if (!FIELD_GET(OA_TC6_DATA_FOOTER_CONFIG_SYNC, footer)) {
+ netdev_err(tc6->netdev, "Config unsync error\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void oa_tc6_submit_rx_skb(struct oa_tc6 *tc6)
+{
+ tc6->rx_skb->protocol = eth_type_trans(tc6->rx_skb, tc6->netdev);
+ tc6->netdev->stats.rx_packets++;
+ tc6->netdev->stats.rx_bytes += tc6->rx_skb->len;
+
+ netif_rx(tc6->rx_skb);
+
+ tc6->rx_skb = NULL;
+}
+
+static void oa_tc6_update_rx_skb(struct oa_tc6 *tc6, u8 *payload, u8 length)
+{
+ memcpy(skb_put(tc6->rx_skb, length), payload, length);
+}
+
+static int oa_tc6_allocate_rx_skb(struct oa_tc6 *tc6)
+{
+ tc6->rx_skb = netdev_alloc_skb_ip_align(tc6->netdev, tc6->netdev->mtu +
+ ETH_HLEN + ETH_FCS_LEN);
+ if (!tc6->rx_skb) {
+ tc6->netdev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int oa_tc6_prcs_complete_rx_frame(struct oa_tc6 *tc6, u8 *payload,
+ u16 size)
+{
+ int ret;
+
+ ret = oa_tc6_allocate_rx_skb(tc6);
+ if (ret)
+ return ret;
+
+ oa_tc6_update_rx_skb(tc6, payload, size);
+
+ oa_tc6_submit_rx_skb(tc6);
+
+ return 0;
+}
+
+static int oa_tc6_prcs_rx_frame_start(struct oa_tc6 *tc6, u8 *payload, u16 size)
+{
+ int ret;
+
+ ret = oa_tc6_allocate_rx_skb(tc6);
+ if (ret)
+ return ret;
+
+ oa_tc6_update_rx_skb(tc6, payload, size);
+
+ return 0;
+}
+
+static void oa_tc6_prcs_rx_frame_end(struct oa_tc6 *tc6, u8 *payload, u16 size)
+{
+ oa_tc6_update_rx_skb(tc6, payload, size);
+
+ oa_tc6_submit_rx_skb(tc6);
+}
+
+static void oa_tc6_prcs_ongoing_rx_frame(struct oa_tc6 *tc6, u8 *payload,
+ u32 footer)
+{
+ oa_tc6_update_rx_skb(tc6, payload, OA_TC6_CHUNK_PAYLOAD_SIZE);
+}
+
+static int oa_tc6_prcs_rx_chunk_payload(struct oa_tc6 *tc6, u8 *data,
+ u32 footer)
+{
+ u8 start_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_START_WORD_OFFSET,
+ footer) * sizeof(u32);
+ u8 end_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_END_BYTE_OFFSET,
+ footer);
+ bool start_valid = FIELD_GET(OA_TC6_DATA_FOOTER_START_VALID, footer);
+ bool end_valid = FIELD_GET(OA_TC6_DATA_FOOTER_END_VALID, footer);
+ u16 size;
+
+ /* Restart the new rx frame after receiving rx buffer overflow error */
+ if (start_valid && tc6->rx_buf_overflow)
+ tc6->rx_buf_overflow = false;
+
+ if (tc6->rx_buf_overflow)
+ return 0;
+
+ /* Process the chunk with complete rx frame */
+ if (start_valid && end_valid && start_byte_offset < end_byte_offset) {
+ size = end_byte_offset + 1 - start_byte_offset;
+ return oa_tc6_prcs_complete_rx_frame(tc6,
+ &data[start_byte_offset],
+ size);
+ }
+
+ /* Process the chunk with only rx frame start */
+ if (start_valid && !end_valid) {
+ size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
+ return oa_tc6_prcs_rx_frame_start(tc6,
+ &data[start_byte_offset],
+ size);
+ }
+
+ /* Process the chunk with only rx frame end */
+ if (end_valid && !start_valid) {
+ size = end_byte_offset + 1;
+ oa_tc6_prcs_rx_frame_end(tc6, data, size);
+ return 0;
+ }
+
+ /* Process the chunk with previous rx frame end and next rx frame
+ * start.
+ */
+ if (start_valid && end_valid && start_byte_offset > end_byte_offset) {
+ /* After rx buffer overflow error received, there might be a
+ * possibility of getting an end valid of a previously
+ * incomplete rx frame along with the new rx frame start valid.
+ */
+ if (tc6->rx_skb) {
+ size = end_byte_offset + 1;
+ oa_tc6_prcs_rx_frame_end(tc6, data, size);
+ }
+ size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
+ return oa_tc6_prcs_rx_frame_start(tc6,
+ &data[start_byte_offset],
+ size);
+ }
+
+ /* Process the chunk with ongoing rx frame data */
+ oa_tc6_prcs_ongoing_rx_frame(tc6, data, footer);
+
+ return 0;
+}
+
+static u32 oa_tc6_get_rx_chunk_footer(struct oa_tc6 *tc6, u16 footer_offset)
+{
+ u8 *rx_buf = tc6->spi_data_rx_buf;
+ __be32 footer;
+
+ footer = *((__be32 *)&rx_buf[footer_offset]);
+
+ return be32_to_cpu(footer);
+}
+
+static int oa_tc6_process_spi_data_rx_buf(struct oa_tc6 *tc6, u16 length)
+{
+ u16 no_of_rx_chunks = length / OA_TC6_CHUNK_SIZE;
+ u32 footer;
+ int ret;
+
+ /* All the rx chunks in the receive SPI data buffer are examined here */
+ for (int i = 0; i < no_of_rx_chunks; i++) {
+ /* Last 4 bytes in each received chunk consist footer info */
+ footer = oa_tc6_get_rx_chunk_footer(tc6, i * OA_TC6_CHUNK_SIZE +
+ OA_TC6_CHUNK_PAYLOAD_SIZE);
+
+ ret = oa_tc6_process_rx_chunk_footer(tc6, footer);
+ if (ret)
+ return ret;
+
+ /* If there is a data valid chunks then process it for the
+ * information needed to determine the validity and the location
+ * of the receive frame data.
+ */
+ if (FIELD_GET(OA_TC6_DATA_FOOTER_DATA_VALID, footer)) {
+ u8 *payload = tc6->spi_data_rx_buf + i *
+ OA_TC6_CHUNK_SIZE;
+
+ ret = oa_tc6_prcs_rx_chunk_payload(tc6, payload,
+ footer);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static __be32 oa_tc6_prepare_data_header(bool data_valid, bool start_valid,
+ bool end_valid, u8 end_byte_offset)
+{
+ u32 header = FIELD_PREP(OA_TC6_DATA_HEADER_DATA_NOT_CTRL,
+ OA_TC6_DATA_HEADER) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_DATA_VALID, data_valid) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_START_VALID, start_valid) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_END_VALID, end_valid) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_END_BYTE_OFFSET,
+ end_byte_offset);
+
+ header |= FIELD_PREP(OA_TC6_DATA_HEADER_PARITY,
+ oa_tc6_get_parity(header));
+
+ return cpu_to_be32(header);
+}
+
+static void oa_tc6_add_tx_skb_to_spi_buf(struct oa_tc6 *tc6)
+{
+ enum oa_tc6_data_end_valid_info end_valid = OA_TC6_DATA_END_INVALID;
+ __be32 *tx_buf = tc6->spi_data_tx_buf + tc6->spi_data_tx_buf_offset;
+ u16 remaining_len = tc6->ongoing_tx_skb->len - tc6->tx_skb_offset;
+ u8 *tx_skb_data = tc6->ongoing_tx_skb->data + tc6->tx_skb_offset;
+ enum oa_tc6_data_start_valid_info start_valid;
+ u8 end_byte_offset = 0;
+ u16 length_to_copy;
+
+ /* Initial value is assigned here to avoid more than 80 characters in
+ * the declaration place.
+ */
+ start_valid = OA_TC6_DATA_START_INVALID;
+
+ /* Set start valid if the current tx chunk contains the start of the tx
+ * ethernet frame.
+ */
+ if (!tc6->tx_skb_offset)
+ start_valid = OA_TC6_DATA_START_VALID;
+
+ /* If the remaining tx skb length is more than the chunk payload size of
+ * 64 bytes then copy only 64 bytes and leave the ongoing tx skb for
+ * next tx chunk.
+ */
+ length_to_copy = min_t(u16, remaining_len, OA_TC6_CHUNK_PAYLOAD_SIZE);
+
+ /* Copy the tx skb data to the tx chunk payload buffer */
+ memcpy(tx_buf + 1, tx_skb_data, length_to_copy);
+ tc6->tx_skb_offset += length_to_copy;
+
+ /* Set end valid if the current tx chunk contains the end of the tx
+ * ethernet frame.
+ */
+ if (tc6->ongoing_tx_skb->len == tc6->tx_skb_offset) {
+ end_valid = OA_TC6_DATA_END_VALID;
+ end_byte_offset = length_to_copy - 1;
+ tc6->tx_skb_offset = 0;
+ tc6->netdev->stats.tx_bytes += tc6->ongoing_tx_skb->len;
+ tc6->netdev->stats.tx_packets++;
+ kfree_skb(tc6->ongoing_tx_skb);
+ tc6->ongoing_tx_skb = NULL;
+ }
+
+ *tx_buf = oa_tc6_prepare_data_header(OA_TC6_DATA_VALID, start_valid,
+ end_valid, end_byte_offset);
+ tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
+}
+
+static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
+{
+ u16 used_tx_credits;
+
+ /* Get tx skbs and convert them into tx chunks based on the tx credits
+ * available.
+ */
+ for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits;
+ used_tx_credits++) {
+ if (!tc6->ongoing_tx_skb) {
+ tc6->ongoing_tx_skb = tc6->waiting_tx_skb;
+ tc6->waiting_tx_skb = NULL;
+ }
+ if (!tc6->ongoing_tx_skb)
+ break;
+ oa_tc6_add_tx_skb_to_spi_buf(tc6);
+ }
+
+ return used_tx_credits * OA_TC6_CHUNK_SIZE;
+}
+
+static void oa_tc6_add_empty_chunks_to_spi_buf(struct oa_tc6 *tc6,
+ u16 needed_empty_chunks)
+{
+ __be32 header;
+
+ header = oa_tc6_prepare_data_header(OA_TC6_DATA_INVALID,
+ OA_TC6_DATA_START_INVALID,
+ OA_TC6_DATA_END_INVALID, 0);
+
+ while (needed_empty_chunks--) {
+ __be32 *tx_buf = tc6->spi_data_tx_buf +
+ tc6->spi_data_tx_buf_offset;
+
+ *tx_buf = header;
+ tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
+ }
+}
+
+static u16 oa_tc6_prepare_spi_tx_buf_for_rx_chunks(struct oa_tc6 *tc6, u16 len)
+{
+ u16 tx_chunks = len / OA_TC6_CHUNK_SIZE;
+ u16 needed_empty_chunks;
+
+ /* If there are more chunks to receive than to transmit, we need to add
+ * enough empty tx chunks to allow the reception of the excess rx
+ * chunks.
+ */
+ if (tx_chunks >= tc6->rx_chunks_available)
+ return len;
+
+ needed_empty_chunks = tc6->rx_chunks_available - tx_chunks;
+
+ oa_tc6_add_empty_chunks_to_spi_buf(tc6, needed_empty_chunks);
+
+ return needed_empty_chunks * OA_TC6_CHUNK_SIZE + len;
+}
+
+static int oa_tc6_try_spi_transfer(struct oa_tc6 *tc6)
+{
+ int ret;
+
+ while (true) {
+ u16 spi_len = 0;
+
+ tc6->spi_data_tx_buf_offset = 0;
+
+ if (tc6->ongoing_tx_skb || tc6->waiting_tx_skb)
+ spi_len = oa_tc6_prepare_spi_tx_buf_for_tx_skbs(tc6);
+
+ spi_len = oa_tc6_prepare_spi_tx_buf_for_rx_chunks(tc6, spi_len);
+
+ if (tc6->int_flag) {
+ tc6->int_flag = false;
+ if (spi_len == 0) {
+ oa_tc6_add_empty_chunks_to_spi_buf(tc6, 1);
+ spi_len = OA_TC6_CHUNK_SIZE;
+ }
+ }
+
+ if (spi_len == 0)
+ break;
+
+ ret = oa_tc6_spi_transfer(tc6, OA_TC6_DATA_HEADER, spi_len);
+ if (ret) {
+ netdev_err(tc6->netdev, "SPI data transfer failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_process_spi_data_rx_buf(tc6, spi_len);
+ if (ret) {
+ if (ret == -EAGAIN)
+ continue;
+
+ oa_tc6_cleanup_ongoing_tx_skb(tc6);
+ oa_tc6_cleanup_ongoing_rx_skb(tc6);
+ netdev_err(tc6->netdev, "Device error: %d\n", ret);
+ return ret;
+ }
+
+ if (!tc6->waiting_tx_skb && netif_queue_stopped(tc6->netdev))
+ netif_wake_queue(tc6->netdev);
+ }
+
+ return 0;
+}
+
+static int oa_tc6_spi_thread_handler(void *data)
+{
+ struct oa_tc6 *tc6 = data;
+ int ret;
+
+ while (likely(!kthread_should_stop())) {
+ /* This kthread will be waken up if there is a tx skb or mac-phy
+ * interrupt to perform spi transfer with tx chunks.
+ */
+ wait_event_interruptible(tc6->spi_wq, tc6->waiting_tx_skb ||
+ tc6->int_flag ||
+ kthread_should_stop());
+
+ if (kthread_should_stop())
+ break;
+
+ ret = oa_tc6_try_spi_transfer(tc6);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int oa_tc6_update_buffer_status_from_register(struct oa_tc6 *tc6)
+{
+ u32 value;
+ int ret;
+
+ /* Initially tx credits and rx chunks available to be updated from the
+ * register as there is no data transfer performed yet. Later they will
+ * be updated from the rx footer.
+ */
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_BUFFER_STATUS, &value);
+ if (ret)
+ return ret;
+
+ tc6->tx_credits = FIELD_GET(BUFFER_STATUS_TX_CREDITS_AVAILABLE, value);
+ tc6->rx_chunks_available = FIELD_GET(BUFFER_STATUS_RX_CHUNKS_AVAILABLE,
+ value);
+
+ return 0;
+}
+
+static irqreturn_t oa_tc6_macphy_isr(int irq, void *data)
+{
+ struct oa_tc6 *tc6 = data;
+
+ /* MAC-PHY interrupt can occur for the following reasons.
+ * - availability of tx credits if it was 0 before and not reported in
+ * the previous rx footer.
+ * - availability of rx chunks if it was 0 before and not reported in
+ * the previous rx footer.
+ * - extended status event not reported in the previous rx footer.
+ */
+ tc6->int_flag = true;
+ /* Wake spi kthread to perform spi transfer */
+ wake_up_interruptible(&tc6->spi_wq);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * oa_tc6_zero_align_receive_frame_enable - function to enable zero align
+ * receive frame feature.
+ * @tc6: oa_tc6 struct.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &regval);
+ if (ret)
+ return ret;
+
+ /* Set Zero-Align Receive Frame Enable */
+ regval |= CONFIG0_ZARFE_ENABLE;
+
+ return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, regval);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_zero_align_receive_frame_enable);
+
+/**
+ * oa_tc6_start_xmit - function for sending the tx skb which consists ethernet
+ * frame.
+ * @tc6: oa_tc6 struct.
+ * @skb: socket buffer in which the ethernet frame is stored.
+ *
+ * Return: NETDEV_TX_OK if the transmit ethernet frame skb added in the tx_skb_q
+ * otherwise returns NETDEV_TX_BUSY.
+ */
+netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb)
+{
+ if (tc6->waiting_tx_skb) {
+ netif_stop_queue(tc6->netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ tc6->netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ tc6->waiting_tx_skb = skb;
+
+ /* Wake spi kthread to perform spi transfer */
+ wake_up_interruptible(&tc6->spi_wq);
+
+ return NETDEV_TX_OK;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_start_xmit);
+
+/**
+ * oa_tc6_init - allocates and initializes oa_tc6 structure.
+ * @spi: device with which data will be exchanged.
+ * @netdev: network device interface structure.
+ *
+ * Return: pointer reference to the oa_tc6 structure if the MAC-PHY
+ * initialization is successful otherwise NULL.
+ */
+struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
+{
+ struct oa_tc6 *tc6;
+ int ret;
+
+ tc6 = devm_kzalloc(&spi->dev, sizeof(*tc6), GFP_KERNEL);
+ if (!tc6)
+ return NULL;
+
+ tc6->spi = spi;
+ tc6->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &spi->dev);
+ mutex_init(&tc6->spi_ctrl_lock);
+
+ /* Set the SPI controller to pump at realtime priority */
+ tc6->spi->rt = true;
+ spi_setup(tc6->spi);
+
+ tc6->spi_ctrl_tx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_CTRL_SPI_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_ctrl_tx_buf)
+ return NULL;
+
+ tc6->spi_ctrl_rx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_CTRL_SPI_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_ctrl_rx_buf)
+ return NULL;
+
+ tc6->spi_data_tx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_SPI_DATA_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_data_tx_buf)
+ return NULL;
+
+ tc6->spi_data_rx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_SPI_DATA_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_data_rx_buf)
+ return NULL;
+
+ ret = oa_tc6_sw_reset_macphy(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "MAC-PHY software reset failed: %d\n", ret);
+ return NULL;
+ }
+
+ ret = oa_tc6_unmask_macphy_error_interrupts(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "MAC-PHY error interrupts unmask failed: %d\n", ret);
+ return NULL;
+ }
+
+ ret = oa_tc6_phy_init(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "MAC internal PHY initialization failed: %d\n", ret);
+ return NULL;
+ }
+
+ ret = oa_tc6_enable_data_transfer(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "Failed to enable data transfer: %d\n",
+ ret);
+ goto phy_exit;
+ }
+
+ ret = oa_tc6_update_buffer_status_from_register(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "Failed to update buffer status: %d\n", ret);
+ goto phy_exit;
+ }
+
+ init_waitqueue_head(&tc6->spi_wq);
+
+ tc6->spi_thread = kthread_run(oa_tc6_spi_thread_handler, tc6,
+ "oa-tc6-spi-thread");
+ if (IS_ERR(tc6->spi_thread)) {
+ dev_err(&tc6->spi->dev, "Failed to create SPI thread\n");
+ goto phy_exit;
+ }
+
+ sched_set_fifo(tc6->spi_thread);
+
+ ret = devm_request_irq(&tc6->spi->dev, tc6->spi->irq, oa_tc6_macphy_isr,
+ IRQF_TRIGGER_FALLING, dev_name(&tc6->spi->dev),
+ tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "Failed to request macphy isr %d\n",
+ ret);
+ goto kthread_stop;
+ }
+
+ /* oa_tc6_sw_reset_macphy() function resets and clears the MAC-PHY reset
+ * complete status. IRQ is also asserted on reset completion and it is
+ * remain asserted until MAC-PHY receives a data chunk. So performing an
+ * empty data chunk transmission will deassert the IRQ. Refer section
+ * 7.7 and 9.2.8.8 in the OPEN Alliance specification for more details.
+ */
+ tc6->int_flag = true;
+ wake_up_interruptible(&tc6->spi_wq);
+
+ return tc6;
+
+kthread_stop:
+ kthread_stop(tc6->spi_thread);
+phy_exit:
+ oa_tc6_phy_exit(tc6);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_init);
+
+/**
+ * oa_tc6_exit - exit function.
+ * @tc6: oa_tc6 struct.
+ */
+void oa_tc6_exit(struct oa_tc6 *tc6)
+{
+ oa_tc6_phy_exit(tc6);
+ kthread_stop(tc6->spi_thread);
+ dev_kfree_skb_any(tc6->ongoing_tx_skb);
+ dev_kfree_skb_any(tc6->waiting_tx_skb);
+ dev_kfree_skb_any(tc6->rx_skb);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_exit);
+
+MODULE_DESCRIPTION("OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface Lib");
+MODULE_AUTHOR("Parthiban Veerasooran <parthiban.veerasooran@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 62ba269da902..cb4e12df7719 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1699,8 +1699,9 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &mac->napi, pasemi_mac_poll);
- dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_GSO;
+ dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_GSO;
+ dev->lltx = true;
mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
if (!mac->dma_pdev) {
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 3f7519e435b8..01fe76786f77 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -23,6 +23,7 @@ config IONIC
depends on PTP_1588_CLOCK_OPTIONAL
select NET_DEVLINK
select DIMLIB
+ select PAGE_POOL
help
This enables the support for the Pensando family of Ethernet
adapters. More specific information on this driver can be
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 59e5a9f21105..c98b4e75e288 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -123,7 +123,7 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
struct ionic_cq *cq = &qcq->cq;
qcq_dentry = debugfs_create_dir(q->name, lif->dentry);
- if (IS_ERR_OR_NULL(qcq_dentry))
+ if (IS_ERR(qcq_dentry))
return;
qcq->dentry = qcq_dentry;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index f2f07bf88545..c8c710cfe70c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -181,10 +181,7 @@ struct ionic_queue;
struct ionic_qcq;
#define IONIC_MAX_BUF_LEN ((u16)-1)
-#define IONIC_PAGE_SIZE PAGE_SIZE
-#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2)
-#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
- __GFP_COMP | __GFP_MEMALLOC)
+#define IONIC_PAGE_SIZE MIN(PAGE_SIZE, IONIC_MAX_BUF_LEN)
#define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \
(VLAN_ETH_HLEN + \
@@ -238,9 +235,8 @@ struct ionic_queue {
unsigned int index;
unsigned int num_descs;
unsigned int max_sg_elems;
+
u64 features;
- unsigned int type;
- unsigned int hw_index;
unsigned int hw_type;
bool xdp_flush;
union {
@@ -250,18 +246,23 @@ struct ionic_queue {
struct ionic_admin_cmd *adminq;
};
union {
- void __iomem *cmb_base;
- struct ionic_txq_desc __iomem *cmb_txq;
- struct ionic_rxq_desc __iomem *cmb_rxq;
- };
- union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
struct ionic_txq_sg_desc_v1 *txq_sgl_v1;
struct ionic_rxq_sg_desc *rxq_sgl;
};
struct xdp_rxq_info *xdp_rxq_info;
+ struct bpf_prog *xdp_prog;
+ struct page_pool *page_pool;
struct ionic_queue *partner;
+
+ union {
+ void __iomem *cmb_base;
+ struct ionic_txq_desc __iomem *cmb_txq;
+ struct ionic_rxq_desc __iomem *cmb_rxq;
+ };
+ unsigned int type;
+ unsigned int hw_index;
dma_addr_t base_pa;
dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 4619fd74f3e3..dda22fa4448c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -989,8 +989,6 @@ static int ionic_get_ts_info(struct net_device *netdev,
info->phc_index = ptp_clock_index(lif->phc->ptp);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 86774d9922d8..40496587b2b3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -13,6 +13,7 @@
#include <linux/cpumask.h>
#include <linux/crash_dump.h>
#include <linux/vmalloc.h>
+#include <net/page_pool/helpers.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -46,8 +47,9 @@ static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif);
-static int ionic_xdp_queues_config(struct ionic_lif *lif);
-static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q);
+static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif);
+static void ionic_unregister_rxq_info(struct ionic_queue *q);
+static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id);
static void ionic_dim_work(struct work_struct *work)
{
@@ -380,6 +382,7 @@ static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
if (!(qcq->flags & IONIC_QCQ_F_INITED))
return;
+ ionic_unregister_rxq_info(&qcq->q);
if (qcq->flags & IONIC_QCQ_F_INTR) {
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
@@ -437,9 +440,10 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->sg_base_pa = 0;
}
- ionic_xdp_unregister_rxq_info(&qcq->q);
- ionic_qcq_intr_free(lif, qcq);
+ page_pool_destroy(qcq->q.page_pool);
+ qcq->q.page_pool = NULL;
+ ionic_qcq_intr_free(lif, qcq);
vfree(qcq->q.info);
qcq->q.info = NULL;
}
@@ -553,7 +557,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int cq_desc_size,
unsigned int sg_desc_size,
unsigned int desc_info_size,
- unsigned int pid, struct ionic_qcq **qcq)
+ unsigned int pid, struct bpf_prog *xdp_prog,
+ struct ionic_qcq **qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
struct device *dev = lif->ionic->dev;
@@ -579,6 +584,31 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
goto err_out_free_qcq;
}
+ if (type == IONIC_QTYPE_RXQ) {
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = 0,
+ .pool_size = num_descs,
+ .nid = NUMA_NO_NODE,
+ .dev = lif->ionic->dev,
+ .napi = &new->napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = PAGE_SIZE,
+ .netdev = lif->netdev,
+ };
+
+ if (xdp_prog)
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+
+ new->q.page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(new->q.page_pool)) {
+ netdev_err(lif->netdev, "Cannot create page_pool\n");
+ err = PTR_ERR(new->q.page_pool);
+ new->q.page_pool = NULL;
+ goto err_out_free_q_info;
+ }
+ }
+
new->q.type = type;
new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
@@ -586,12 +616,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
desc_size, sg_desc_size, pid);
if (err) {
netdev_err(lif->netdev, "Cannot initialize queue\n");
- goto err_out_free_q_info;
+ goto err_out_free_page_pool;
}
err = ionic_alloc_qcq_interrupt(lif, new);
if (err)
- goto err_out_free_q_info;
+ goto err_out_free_page_pool;
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
@@ -712,6 +742,8 @@ err_out_free_irq:
devm_free_irq(dev, new->intr.vector, &new->napi);
ionic_intr_free(lif->ionic, new->intr.index);
}
+err_out_free_page_pool:
+ page_pool_destroy(new->q.page_pool);
err_out_free_q_info:
vfree(new->q.info);
err_out_free_qcq:
@@ -734,7 +766,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
sizeof(struct ionic_admin_comp),
0,
sizeof(struct ionic_admin_desc_info),
- lif->kern_pid, &lif->adminqcq);
+ lif->kern_pid, NULL, &lif->adminqcq);
if (err)
return err;
ionic_debugfs_add_qcq(lif, lif->adminqcq);
@@ -747,7 +779,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
sizeof(union ionic_notifyq_comp),
0,
sizeof(struct ionic_admin_desc_info),
- lif->kern_pid, &lif->notifyqcq);
+ lif->kern_pid, NULL, &lif->notifyqcq);
if (err)
goto err_out;
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
@@ -925,6 +957,11 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
else
netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
+ err = ionic_register_rxq_info(q, qcq->napi.napi_id);
+ if (err) {
+ netif_napi_del(&qcq->napi);
+ return err;
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -960,7 +997,7 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &txq);
+ lif->kern_pid, NULL, &txq);
if (err)
goto err_qcq_alloc;
@@ -1020,7 +1057,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &rxq);
+ lif->kern_pid, NULL, &rxq);
if (err)
goto err_qcq_alloc;
@@ -1037,7 +1074,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
goto err_qcq_init;
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
- ionic_rx_fill(&rxq->q);
+ ionic_rx_fill(&rxq->q, NULL);
err = ionic_qcq_enable(rxq);
if (err)
goto err_qcq_enable;
@@ -2046,7 +2083,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &lif->txqcqs[i]);
+ lif->kern_pid, NULL, &lif->txqcqs[i]);
if (err)
goto err_out;
@@ -2078,7 +2115,8 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &lif->rxqcqs[i]);
+ lif->kern_pid, lif->xdp_prog,
+ &lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2143,9 +2181,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
int derr = 0;
int i, err;
- err = ionic_xdp_queues_config(lif);
- if (err)
- return err;
+ ionic_xdp_rxqs_prog_update(lif);
for (i = 0; i < lif->nxqs; i++) {
if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
@@ -2154,7 +2190,8 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
goto err_out;
}
- ionic_rx_fill(&lif->rxqcqs[i]->q);
+ ionic_rx_fill(&lif->rxqcqs[i]->q,
+ READ_ONCE(lif->rxqcqs[i]->q.xdp_prog));
err = ionic_qcq_enable(lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2167,7 +2204,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
}
if (lif->hwstamp_rxq) {
- ionic_rx_fill(&lif->hwstamp_rxq->q);
+ ionic_rx_fill(&lif->hwstamp_rxq->q, NULL);
err = ionic_qcq_enable(lif->hwstamp_rxq);
if (err)
goto err_out_hwstamp_rx;
@@ -2192,7 +2229,7 @@ err_out:
derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
}
- ionic_xdp_queues_config(lif);
+ ionic_xdp_rxqs_prog_update(lif);
return err;
}
@@ -2651,7 +2688,7 @@ static void ionic_vf_attr_replay(struct ionic_lif *lif)
ionic_vf_start(ionic);
}
-static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
+static void ionic_unregister_rxq_info(struct ionic_queue *q)
{
struct xdp_rxq_info *xi;
@@ -2665,7 +2702,7 @@ static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
kfree(xi);
}
-static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
+static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
{
struct xdp_rxq_info *rxq_info;
int err;
@@ -2676,15 +2713,15 @@ static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_
err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
if (err) {
- dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n",
- q->index, err);
+ netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg failed, err %d\n",
+ q->index, err);
goto err_out;
}
- err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL);
+ err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_POOL, q->page_pool);
if (err) {
- dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
- q->index, err);
+ netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg_mem_model failed, err %d\n",
+ q->index, err);
xdp_rxq_info_unreg(rxq_info);
goto err_out;
}
@@ -2698,44 +2735,20 @@ err_out:
return err;
}
-static int ionic_xdp_queues_config(struct ionic_lif *lif)
+static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif)
{
+ struct bpf_prog *xdp_prog;
unsigned int i;
- int err;
if (!lif->rxqcqs)
- return 0;
-
- /* There's no need to rework memory if not going to/from NULL program.
- * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info
- * This way we don't need to keep an *xdp_prog in every queue struct.
- */
- if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info)
- return 0;
+ return;
+ xdp_prog = READ_ONCE(lif->xdp_prog);
for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
struct ionic_queue *q = &lif->rxqcqs[i]->q;
- if (q->xdp_rxq_info) {
- ionic_xdp_unregister_rxq_info(q);
- continue;
- }
-
- err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id);
- if (err) {
- dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n",
- i, err);
- goto err_out;
- }
+ WRITE_ONCE(q->xdp_prog, xdp_prog);
}
-
- return 0;
-
-err_out:
- for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++)
- ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q);
-
- return err;
}
static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
@@ -2765,11 +2778,17 @@ static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
if (!netif_running(netdev)) {
old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ } else if (lif->xdp_prog && bpf->prog) {
+ old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ ionic_xdp_rxqs_prog_update(lif);
} else {
+ struct ionic_queue_params qparams;
+
+ ionic_init_queue_params(lif, &qparams);
+ qparams.xdp_prog = bpf->prog;
mutex_lock(&lif->queue_lock);
- ionic_stop_queues_reconfig(lif);
+ ionic_reconfigure_queues(lif, &qparams);
old_prog = xchg(&lif->xdp_prog, bpf->prog);
- ionic_start_queues_reconfig(lif);
mutex_unlock(&lif->queue_lock);
}
@@ -2871,13 +2890,23 @@ err_out:
static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
{
- /* only swapping the queues, not the napi, flags, or other stuff */
+ /* only swapping the queues and napi, not flags or other stuff */
+ swap(a->napi, b->napi);
+
+ if (a->q.type == IONIC_QTYPE_RXQ) {
+ swap(a->q.page_pool, b->q.page_pool);
+ a->q.page_pool->p.napi = &a->napi;
+ if (b->q.page_pool) /* is NULL when increasing queue count */
+ b->q.page_pool->p.napi = &b->napi;
+ }
+
swap(a->q.features, b->q.features);
swap(a->q.num_descs, b->q.num_descs);
swap(a->q.desc_size, b->q.desc_size);
swap(a->q.base, b->q.base);
swap(a->q.base_pa, b->q.base_pa);
swap(a->q.info, b->q.info);
+ swap(a->q.xdp_prog, b->q.xdp_prog);
swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info);
swap(a->q.partner, b->q.partner);
swap(a->q_base, b->q_base);
@@ -2928,7 +2957,8 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
}
if (qparam->nxqs != lif->nxqs ||
qparam->nrxq_descs != lif->nrxq_descs ||
- qparam->rxq_features != lif->rxq_features) {
+ qparam->rxq_features != lif->rxq_features ||
+ qparam->xdp_prog != lif->xdp_prog) {
rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!rx_qcqs) {
@@ -2959,7 +2989,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &lif->txqcqs[i]);
+ lif->kern_pid, NULL, &lif->txqcqs[i]);
if (err)
goto err_out;
}
@@ -2968,7 +2998,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
- lif->kern_pid, &tx_qcqs[i]);
+ lif->kern_pid, NULL, &tx_qcqs[i]);
if (err)
goto err_out;
}
@@ -2990,7 +3020,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &lif->rxqcqs[i]);
+ lif->kern_pid, NULL, &lif->rxqcqs[i]);
if (err)
goto err_out;
}
@@ -2999,11 +3029,12 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
- lif->kern_pid, &rx_qcqs[i]);
+ lif->kern_pid, qparam->xdp_prog, &rx_qcqs[i]);
if (err)
goto err_out;
rx_qcqs[i]->q.features = qparam->rxq_features;
+ rx_qcqs[i]->q.xdp_prog = qparam->xdp_prog;
}
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 3e1005293c4a..e01756fb7fdd 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -268,6 +268,7 @@ struct ionic_queue_params {
unsigned int ntxq_descs;
unsigned int nrxq_descs;
u64 rxq_features;
+ struct bpf_prog *xdp_prog;
bool intr_split;
bool cmb_tx;
bool cmb_rx;
@@ -280,6 +281,7 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif,
qparam->ntxq_descs = lif->ntxq_descs;
qparam->nrxq_descs = lif->nrxq_descs;
qparam->rxq_features = lif->rxq_features;
+ qparam->xdp_prog = lif->xdp_prog;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
qparam->cmb_tx = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
qparam->cmb_rx = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 1ee2f285cb42..528114877677 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -312,8 +312,8 @@ static int ionic_lif_filter_add(struct ionic_lif *lif,
int err = 0;
ctx.cmd.rx_filter_add = *ac;
- ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD,
- ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index),
+ ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD;
+ ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index);
spin_lock_bh(&lif->rx_filters.lock);
f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index fc79baad4561..0eeda7e502db 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -6,6 +6,7 @@
#include <linux/if_vlan.h>
#include <net/ip6_checksum.h>
#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
#include "ionic.h"
#include "ionic_lif.h"
@@ -118,108 +119,57 @@ static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info)
static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
{
- return buf_info->dma_addr + buf_info->page_offset;
+ return page_pool_get_dma_addr(buf_info->page) + buf_info->page_offset;
}
-static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
+static void __ionic_rx_put_buf(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info,
+ bool recycle_direct)
{
- return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset);
-}
-
-static int ionic_rx_page_alloc(struct ionic_queue *q,
- struct ionic_buf_info *buf_info)
-{
- struct device *dev = q->dev;
- dma_addr_t dma_addr;
- struct page *page;
-
- page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
- if (unlikely(!page)) {
- net_err_ratelimited("%s: %s page alloc failed\n",
- dev_name(dev), q->name);
- q_to_rx_stats(q)->alloc_err++;
- return -ENOMEM;
- }
-
- dma_addr = dma_map_page(dev, page, 0,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma_addr))) {
- __free_pages(page, 0);
- net_err_ratelimited("%s: %s dma map failed\n",
- dev_name(dev), q->name);
- q_to_rx_stats(q)->dma_map_err++;
- return -EIO;
- }
-
- buf_info->dma_addr = dma_addr;
- buf_info->page = page;
- buf_info->page_offset = 0;
-
- return 0;
-}
-
-static void ionic_rx_page_free(struct ionic_queue *q,
- struct ionic_buf_info *buf_info)
-{
- struct device *dev = q->dev;
-
- if (unlikely(!buf_info)) {
- net_err_ratelimited("%s: %s invalid buf_info in free\n",
- dev_name(dev), q->name);
- return;
- }
-
if (!buf_info->page)
return;
- dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- __free_pages(buf_info->page, 0);
+ page_pool_put_full_page(q->page_pool, buf_info->page, recycle_direct);
buf_info->page = NULL;
+ buf_info->len = 0;
+ buf_info->page_offset = 0;
}
-static bool ionic_rx_buf_recycle(struct ionic_queue *q,
- struct ionic_buf_info *buf_info, u32 len)
-{
- u32 size;
-
- /* don't re-use pages allocated in low-mem condition */
- if (page_is_pfmemalloc(buf_info->page))
- return false;
-
- /* don't re-use buffers from non-local numa nodes */
- if (page_to_nid(buf_info->page) != numa_mem_id())
- return false;
-
- size = ALIGN(len, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ);
- buf_info->page_offset += size;
- if (buf_info->page_offset >= IONIC_PAGE_SIZE)
- return false;
- get_page(buf_info->page);
+static void ionic_rx_put_buf(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info)
+{
+ __ionic_rx_put_buf(q, buf_info, false);
+}
- return true;
+static void ionic_rx_put_buf_direct(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info)
+{
+ __ionic_rx_put_buf(q, buf_info, true);
}
static void ionic_rx_add_skb_frag(struct ionic_queue *q,
struct sk_buff *skb,
struct ionic_buf_info *buf_info,
- u32 off, u32 len,
+ u32 headroom, u32 len,
bool synced)
{
if (!synced)
- dma_sync_single_range_for_cpu(q->dev, ionic_rx_buf_pa(buf_info),
- off, len, DMA_FROM_DEVICE);
+ page_pool_dma_sync_for_cpu(q->page_pool,
+ buf_info->page,
+ buf_info->page_offset + headroom,
+ len);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf_info->page, buf_info->page_offset + off,
- len,
- IONIC_PAGE_SIZE);
+ buf_info->page, buf_info->page_offset + headroom,
+ len, buf_info->len);
- if (!ionic_rx_buf_recycle(q, buf_info, len)) {
- dma_unmap_page(q->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- buf_info->page = NULL;
- }
+ /* napi_gro_frags() will release/recycle the
+ * page_pool buffers from the frags list
+ */
+ buf_info->page = NULL;
+ buf_info->len = 0;
+ buf_info->page_offset = 0;
}
static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
@@ -244,12 +194,13 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
q_to_rx_stats(q)->alloc_err++;
return NULL;
}
+ skb_mark_for_recycle(skb);
if (headroom)
frag_len = min_t(u16, len,
IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
else
- frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
+ frag_len = min_t(u16, len, IONIC_PAGE_SIZE);
if (unlikely(!buf_info->page))
goto err_bad_buf_page;
@@ -260,7 +211,7 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
for (i = 0; i < num_sg_elems; i++, buf_info++) {
if (unlikely(!buf_info->page))
goto err_bad_buf_page;
- frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
+ frag_len = min_t(u16, len, buf_info->len);
ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced);
len -= frag_len;
}
@@ -277,11 +228,13 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
struct ionic_rx_desc_info *desc_info,
unsigned int headroom,
unsigned int len,
+ unsigned int num_sg_elems,
bool synced)
{
struct ionic_buf_info *buf_info;
struct device *dev = q->dev;
struct sk_buff *skb;
+ int i;
buf_info = &desc_info->bufs[0];
@@ -292,54 +245,52 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
q_to_rx_stats(q)->alloc_err++;
return NULL;
}
-
- if (unlikely(!buf_info->page)) {
- dev_kfree_skb(skb);
- return NULL;
- }
+ skb_mark_for_recycle(skb);
if (!synced)
- dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
- headroom, len, DMA_FROM_DEVICE);
+ page_pool_dma_sync_for_cpu(q->page_pool,
+ buf_info->page,
+ buf_info->page_offset + headroom,
+ len);
+
skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
- dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info),
- headroom, len, DMA_FROM_DEVICE);
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, netdev);
+ /* recycle the Rx buffer now that we're done with it */
+ ionic_rx_put_buf_direct(q, buf_info);
+ buf_info++;
+ for (i = 0; i < num_sg_elems; i++, buf_info++)
+ ionic_rx_put_buf_direct(q, buf_info);
+
return skb;
}
static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
- struct ionic_tx_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info,
+ bool in_napi)
{
- unsigned int nbufs = desc_info->nbufs;
- struct ionic_buf_info *buf_info;
- struct device *dev = q->dev;
- int i;
+ struct xdp_frame_bulk bq;
- if (!nbufs)
+ if (!desc_info->nbufs)
return;
- buf_info = desc_info->bufs;
- dma_unmap_single(dev, buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- if (desc_info->act == XDP_TX)
- __free_pages(buf_info->page, 0);
- buf_info->page = NULL;
+ xdp_frame_bulk_init(&bq);
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
- buf_info++;
- for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) {
- dma_unmap_page(dev, buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- if (desc_info->act == XDP_TX)
- __free_pages(buf_info->page, 0);
- buf_info->page = NULL;
+ if (desc_info->act == XDP_TX) {
+ if (likely(in_napi))
+ xdp_return_frame_rx_napi(desc_info->xdpf);
+ else
+ xdp_return_frame(desc_info->xdpf);
+ } else if (desc_info->act == XDP_REDIRECT) {
+ ionic_tx_desc_unmap_bufs(q, desc_info);
+ xdp_return_frame_bulk(desc_info->xdpf, &bq);
}
- if (desc_info->act == XDP_REDIRECT)
- xdp_return_frame(desc_info->xdpf);
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
desc_info->nbufs = 0;
desc_info->xdpf = NULL;
@@ -363,9 +314,17 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
buf_info = desc_info->bufs;
stats = q_to_tx_stats(q);
- dma_addr = ionic_tx_map_single(q, frame->data, len);
- if (!dma_addr)
- return -EIO;
+ if (act == XDP_TX) {
+ dma_addr = page_pool_get_dma_addr(page) +
+ off + XDP_PACKET_HEADROOM;
+ dma_sync_single_for_device(q->dev, dma_addr,
+ len, DMA_TO_DEVICE);
+ } else /* XDP_REDIRECT */ {
+ dma_addr = ionic_tx_map_single(q, frame->data, len);
+ if (!dma_addr)
+ return -EIO;
+ }
+
buf_info->dma_addr = dma_addr;
buf_info->len = len;
buf_info->page = page;
@@ -387,10 +346,21 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
frag = sinfo->frags;
elem = ionic_tx_sg_elems(q);
for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
- dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
- if (!dma_addr) {
- ionic_tx_desc_unmap_bufs(q, desc_info);
- return -EIO;
+ if (act == XDP_TX) {
+ struct page *pg = skb_frag_page(frag);
+
+ dma_addr = page_pool_get_dma_addr(pg) +
+ skb_frag_off(frag);
+ dma_sync_single_for_device(q->dev, dma_addr,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ } else {
+ dma_addr = ionic_tx_map_frag(q, frag, 0,
+ skb_frag_size(frag));
+ if (dma_mapping_error(q->dev, dma_addr)) {
+ ionic_tx_desc_unmap_bufs(q, desc_info);
+ return -EIO;
+ }
}
bi->dma_addr = dma_addr;
bi->len = skb_frag_size(frag);
@@ -481,15 +451,13 @@ int ionic_xdp_xmit(struct net_device *netdev, int n,
return nxmit;
}
-static void ionic_xdp_rx_put_bufs(struct ionic_queue *q,
- struct ionic_buf_info *buf_info,
- int nbufs)
+static void ionic_xdp_rx_unlink_bufs(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info,
+ int nbufs)
{
int i;
for (i = 0; i < nbufs; i++) {
- dma_unmap_page(q->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
buf_info->page = NULL;
buf_info++;
}
@@ -516,11 +484,9 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
XDP_PACKET_HEADROOM, frag_len, false);
-
- dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
- XDP_PACKET_HEADROOM, frag_len,
- DMA_FROM_DEVICE);
-
+ page_pool_dma_sync_for_cpu(rxq->page_pool, buf_info->page,
+ buf_info->page_offset + XDP_PACKET_HEADROOM,
+ frag_len);
prefetchw(&xdp_buf.data_hard_start);
/* We limit MTU size to one buffer if !xdp_has_frags, so
@@ -542,15 +508,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
do {
if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
err = -ENOSPC;
- goto out_xdp_abort;
+ break;
}
frag = &sinfo->frags[sinfo->nr_frags];
sinfo->nr_frags++;
bi++;
- frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi));
- dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi),
- 0, frag_len, DMA_FROM_DEVICE);
+ frag_len = min_t(u16, remain_len, bi->len);
+ page_pool_dma_sync_for_cpu(rxq->page_pool, bi->page,
+ buf_info->page_offset,
+ frag_len);
skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
sinfo->xdp_frags_size += frag_len;
remain_len -= frag_len;
@@ -569,14 +536,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
return false; /* false = we didn't consume the packet */
case XDP_DROP:
- ionic_rx_page_free(rxq, buf_info);
+ ionic_rx_put_buf_direct(rxq, buf_info);
stats->xdp_drop++;
break;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(&xdp_buf);
- if (!xdpf)
- goto out_xdp_abort;
+ if (!xdpf) {
+ err = -ENOSPC;
+ break;
+ }
txq = rxq->partner;
nq = netdev_get_tx_queue(netdev, txq->index);
@@ -588,7 +557,8 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
ionic_q_space_avail(txq),
1, 1)) {
__netif_tx_unlock(nq);
- goto out_xdp_abort;
+ err = -EIO;
+ break;
}
err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
@@ -598,49 +568,47 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
__netif_tx_unlock(nq);
if (unlikely(err)) {
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
- goto out_xdp_abort;
+ break;
}
- ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
+ ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
stats->xdp_tx++;
-
- /* the Tx completion will free the buffers */
break;
case XDP_REDIRECT:
err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
if (unlikely(err)) {
netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
- goto out_xdp_abort;
+ break;
}
- ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
+ ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
rxq->xdp_flush = true;
stats->xdp_redirect++;
break;
case XDP_ABORTED:
default:
- goto out_xdp_abort;
+ err = -EIO;
+ break;
}
- return true;
-
-out_xdp_abort:
- trace_xdp_exception(netdev, xdp_prog, xdp_action);
- ionic_rx_page_free(rxq, buf_info);
- stats->xdp_aborted++;
+ if (err) {
+ ionic_rx_put_buf_direct(rxq, buf_info);
+ trace_xdp_exception(netdev, xdp_prog, xdp_action);
+ stats->xdp_aborted++;
+ }
return true;
}
static void ionic_rx_clean(struct ionic_queue *q,
struct ionic_rx_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+ struct ionic_rxq_comp *comp,
+ struct bpf_prog *xdp_prog)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_qcq *qcq = q_to_qcq(q);
struct ionic_rx_stats *stats;
- struct bpf_prog *xdp_prog;
- unsigned int headroom;
+ unsigned int headroom = 0;
struct sk_buff *skb;
bool synced = false;
bool use_copybreak;
@@ -648,7 +616,14 @@ static void ionic_rx_clean(struct ionic_queue *q,
stats = q_to_rx_stats(q);
- if (comp->status) {
+ if (unlikely(comp->status)) {
+ /* Most likely status==2 and the pkt received was bigger
+ * than the buffer available: comp->len will show the
+ * pkt size received that didn't fit the advertised desc.len
+ */
+ dev_dbg(q->dev, "q%d drop comp->status %d comp->len %d desc->len %d\n",
+ q->index, comp->status, comp->len, q->rxq[q->head_idx].len);
+
stats->dropped++;
return;
}
@@ -657,18 +632,18 @@ static void ionic_rx_clean(struct ionic_queue *q,
stats->pkts++;
stats->bytes += len;
- xdp_prog = READ_ONCE(q->lif->xdp_prog);
if (xdp_prog) {
if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
return;
synced = true;
+ headroom = XDP_PACKET_HEADROOM;
}
- headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
use_copybreak = len <= q->lif->rx_copybreak;
if (use_copybreak)
skb = ionic_rx_copybreak(netdev, q, desc_info,
- headroom, len, synced);
+ headroom, len,
+ comp->num_sg_elems, synced);
else
skb = ionic_rx_build_skb(q, desc_info, headroom, len,
comp->num_sg_elems, synced);
@@ -744,7 +719,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
napi_gro_frags(&qcq->napi);
}
-bool ionic_rx_service(struct ionic_cq *cq)
+static bool __ionic_rx_service(struct ionic_cq *cq, struct bpf_prog *xdp_prog)
{
struct ionic_rx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
@@ -766,11 +741,16 @@ bool ionic_rx_service(struct ionic_cq *cq)
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */
- ionic_rx_clean(q, desc_info, comp);
+ ionic_rx_clean(q, desc_info, comp, xdp_prog);
return true;
}
+bool ionic_rx_service(struct ionic_cq *cq)
+{
+ return __ionic_rx_service(cq, NULL);
+}
+
static inline void ionic_write_cmb_desc(struct ionic_queue *q,
void *desc)
{
@@ -781,7 +761,7 @@ static inline void ionic_write_cmb_desc(struct ionic_queue *q,
memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0]));
}
-void ionic_rx_fill(struct ionic_queue *q)
+void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_rx_desc_info *desc_info;
@@ -789,6 +769,9 @@ void ionic_rx_fill(struct ionic_queue *q)
struct ionic_buf_info *buf_info;
unsigned int fill_threshold;
struct ionic_rxq_desc *desc;
+ unsigned int first_frag_len;
+ unsigned int first_buf_len;
+ unsigned int headroom = 0;
unsigned int remain_len;
unsigned int frag_len;
unsigned int nfrags;
@@ -806,35 +789,43 @@ void ionic_rx_fill(struct ionic_queue *q)
len = netdev->mtu + VLAN_ETH_HLEN;
- for (i = n_fill; i; i--) {
- unsigned int headroom;
- unsigned int buf_len;
+ if (xdp_prog) {
+ /* Always alloc the full size buffer, but only need
+ * the actual frag_len in the descriptor
+ * XDP uses space in the first buffer, so account for
+ * head room, tail room, and ip header in the first frag size.
+ */
+ headroom = XDP_PACKET_HEADROOM;
+ first_buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN + headroom;
+ first_frag_len = min_t(u16, len + headroom, first_buf_len);
+ } else {
+ /* Use MTU size if smaller than max buffer size */
+ first_frag_len = min_t(u16, len, IONIC_PAGE_SIZE);
+ first_buf_len = first_frag_len;
+ }
+ for (i = n_fill; i; i--) {
+ /* fill main descriptor - buf[0] */
nfrags = 0;
remain_len = len;
desc = &q->rxq[q->head_idx];
desc_info = &q->rx_info[q->head_idx];
buf_info = &desc_info->bufs[0];
- if (!buf_info->page) { /* alloc a new buffer? */
- if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
- desc->addr = 0;
- desc->len = 0;
- return;
- }
+ buf_info->len = first_buf_len;
+ frag_len = first_frag_len - headroom;
+
+ /* get a new buffer if we can't reuse one */
+ if (!buf_info->page)
+ buf_info->page = page_pool_alloc(q->page_pool,
+ &buf_info->page_offset,
+ &buf_info->len,
+ GFP_ATOMIC);
+ if (unlikely(!buf_info->page)) {
+ buf_info->len = 0;
+ return;
}
- /* fill main descriptor - buf[0]
- * XDP uses space in the first buffer, so account for
- * head room, tail room, and ip header in the first frag size.
- */
- headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
- if (q->xdp_rxq_info)
- buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN;
- else
- buf_len = ionic_rx_buf_size(buf_info);
- frag_len = min_t(u16, len, buf_len);
-
desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
@@ -844,16 +835,26 @@ void ionic_rx_fill(struct ionic_queue *q)
/* fill sg descriptors - buf[1..n] */
sg_elem = q->rxq_sgl[q->head_idx].elems;
for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) {
- if (!buf_info->page) { /* alloc a new sg buffer? */
- if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
- sg_elem->addr = 0;
- sg_elem->len = 0;
+ frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE);
+
+ /* Recycle any leftover buffers that are too small to reuse */
+ if (unlikely(buf_info->page && buf_info->len < frag_len))
+ ionic_rx_put_buf_direct(q, buf_info);
+
+ /* Get new buffer if needed */
+ if (!buf_info->page) {
+ buf_info->len = frag_len;
+ buf_info->page = page_pool_alloc(q->page_pool,
+ &buf_info->page_offset,
+ &buf_info->len,
+ GFP_ATOMIC);
+ if (unlikely(!buf_info->page)) {
+ buf_info->len = 0;
return;
}
}
sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
- frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info));
sg_elem->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
@@ -883,17 +884,12 @@ void ionic_rx_fill(struct ionic_queue *q)
void ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_rx_desc_info *desc_info;
- struct ionic_buf_info *buf_info;
unsigned int i, j;
for (i = 0; i < q->num_descs; i++) {
desc_info = &q->rx_info[i];
- for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) {
- buf_info = &desc_info->bufs[j];
- if (buf_info->page)
- ionic_rx_page_free(q, buf_info);
- }
-
+ for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++)
+ ionic_rx_put_buf(q, &desc_info->bufs[j]);
desc_info->nbufs = 0;
}
@@ -974,6 +970,32 @@ static void ionic_xdp_do_flush(struct ionic_cq *cq)
}
}
+static unsigned int ionic_rx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do)
+{
+ struct ionic_queue *q = cq->bound_q;
+ unsigned int work_done = 0;
+ struct bpf_prog *xdp_prog;
+
+ if (work_to_do == 0)
+ return 0;
+
+ xdp_prog = READ_ONCE(q->xdp_prog);
+ while (__ionic_rx_service(cq, xdp_prog)) {
+ if (cq->tail_idx == cq->num_descs - 1)
+ cq->done_color = !cq->done_color;
+
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+
+ if (++work_done >= work_to_do)
+ break;
+ }
+ ionic_rx_fill(q, xdp_prog);
+ ionic_xdp_do_flush(cq);
+
+ return work_done;
+}
+
int ionic_rx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
@@ -984,12 +1006,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
if (unlikely(!budget))
return budget;
- work_done = ionic_cq_service(cq, budget,
- ionic_rx_service, NULL, NULL);
-
- ionic_rx_fill(cq->bound_q);
+ work_done = ionic_rx_cq_service(cq, budget);
- ionic_xdp_do_flush(cq);
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -1030,12 +1048,8 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
if (unlikely(!budget))
return budget;
- rx_work_done = ionic_cq_service(rxcq, budget,
- ionic_rx_service, NULL, NULL);
-
- ionic_rx_fill(rxcq->bound_q);
+ rx_work_done = ionic_rx_cq_service(rxcq, budget);
- ionic_xdp_do_flush(rxcq);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
ionic_dim_update(rxqcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -1166,7 +1180,7 @@ static void ionic_tx_clean(struct ionic_queue *q,
struct sk_buff *skb;
if (desc_info->xdpf) {
- ionic_xdp_tx_desc_clean(q->partner, desc_info);
+ ionic_xdp_tx_desc_clean(q->partner, desc_info, in_napi);
stats->clean++;
if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index 9e73e324e7a1..b2b9a2dc9eb8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -4,9 +4,11 @@
#ifndef _IONIC_TXRX_H_
#define _IONIC_TXRX_H_
+struct bpf_prog;
+
void ionic_tx_flush(struct ionic_cq *cq);
-void ionic_rx_fill(struct ionic_queue *q);
+void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog);
void ionic_rx_empty(struct ionic_queue *q);
void ionic_tx_empty(struct ionic_queue *q);
int ionic_rx_napi(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index ed24d6af7487..9cff0a8ffb2c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -3185,8 +3185,7 @@ netxen_list_config_ip(struct netxen_adapter *adapter,
struct list_head *head;
bool ret = false;
- dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
-
+ dev = ifa->ifa_dev->dev;
if (dev == NULL)
goto out;
@@ -3379,7 +3378,7 @@ netxen_inetaddr_event(struct notifier_block *this,
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
unsigned long ip_event;
- dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+ dev = ifa->ifa_dev->dev;
ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
recheck:
if (dev == NULL)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 63e3dac4d5f7..9d6399a5c780 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -326,25 +326,18 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *i
struct qede_ptp *ptp = edev->ptp;
if (!ptp) {
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (ptp->clock)
info->phc_index = ptp_clock_index(ptp->clock);
- else
- info->phc_index = -1;
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index b25102fded7b..3d0b5cd978cb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1608,7 +1608,6 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *);
int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
-void qlcnic_watchdog_task(struct work_struct *work);
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
void qlcnic_set_multi(struct net_device *netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index bcef8ab715bf..d7cdea8f604d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2042,12 +2042,14 @@ int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
{
- int err;
- u32 word;
struct qlcnic_cmd_args cmd;
- const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
- 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
- 0x255b0ec26d5a56daULL };
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
+ u32 word;
+ int err;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
if (err)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 23cd47d588e5..a55fe6ac06c7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -539,7 +539,6 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *);
void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
-int qlcnic_send_ctrl_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *, u32);
void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
@@ -577,8 +576,6 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
-void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
- struct qlcnic_info *);
int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *,
struct ethtool_coalesce *);
int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *);
@@ -590,7 +587,6 @@ irqreturn_t qlcnic_83xx_intr(int, void *);
irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
const struct pci_device_id *);
-int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
void qlcnic_83xx_register_map(struct qlcnic_hardware_context *);
@@ -602,8 +598,6 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *, u32, u32 *, int);
int qlcnic_83xx_flash_write32(struct qlcnic_adapter *, u32, u32 *);
int qlcnic_83xx_lock_flash(struct qlcnic_adapter *);
void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *);
-int qlcnic_83xx_save_flash_status(struct qlcnic_adapter *);
-int qlcnic_83xx_restore_flash_status(struct qlcnic_adapter *, int);
int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *);
int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
@@ -616,13 +610,9 @@ void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
-int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
-int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
- struct qlcnic_info *, u8);
-int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 90df4a0909fa..b3588a1ebc25 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -4146,7 +4146,7 @@ qlcnic_inetaddr_event(struct notifier_block *this,
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
- dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+ dev = ifa->ifa_dev->dev;
recheck:
if (dev == NULL)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index f1e40aade127..4f0ddcedfa97 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -286,7 +286,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->needs_free_netdev = true;
rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
- rmnet_dev->features |= NETIF_F_LLTX;
+ rmnet_dev->lltx = true;
/* This perm addr will be used as interface identifier by IPv6 */
rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 03015b665f4e..8a8ea51c639e 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -120,4 +120,23 @@ config R8169_LEDS
Optional support for controlling the NIC LED's with the netdev
LED trigger.
+config RTASE
+ tristate "Realtek Automotive Switch 9054/9068/9072/9075/9068/9071 PCIe Interface support"
+ depends on PCI
+ select CRC32
+ select PAGE_POOL
+ help
+ Say Y here and it will be compiled and linked with the kernel
+ if you have a Realtek Ethernet adapter belonging to the
+ following families:
+ RTL9054 5GBit Ethernet
+ RTL9068 5GBit Ethernet
+ RTL9072 5GBit Ethernet
+ RTL9075 5GBit Ethernet
+ RTL9068 5GBit Ethernet
+ RTL9071 5GBit Ethernet
+
+ To compile this driver as a module, choose M here: the module
+ will be called rtase. This is recommended.
+
endif # NET_VENDOR_REALTEK
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index 635491d8826e..046adf503ff4 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_ATP) += atp.o
r8169-y += r8169_main.o r8169_firmware.o r8169_phy_config.o
r8169-$(CONFIG_R8169_LEDS) += r8169_leds.o
obj-$(CONFIG_R8169) += r8169.o
+obj-$(CONFIG_RTASE) += rtase/
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 00882ffc7a02..e2db944e6fa8 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -69,6 +69,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
RTL_GIGA_MAC_VER_65,
+ RTL_GIGA_MAC_VER_66,
RTL_GIGA_MAC_NONE
};
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 3507c2e28110..45ac8befba29 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -56,6 +56,7 @@
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
+#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw"
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -138,6 +139,7 @@ static const struct {
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
[RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
[RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2},
+ [RTL_GIGA_MAC_VER_66] = {"RTL8126A", FIRMWARE_8126A_3},
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -576,7 +578,7 @@ struct rtl8169_counters {
__le64 rx_broadcast;
__le32 rx_multicast;
__le16 tx_aborted;
- __le16 tx_underun;
+ __le16 tx_underrun;
};
struct rtl8169_tc_offsets {
@@ -1201,7 +1203,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1216,7 +1218,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1425,7 +1427,7 @@ static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66:
if (enable)
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
else
@@ -1592,7 +1594,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66:
if (wolopts)
rtl_mod_config2(tp, 0, PME_SIGNAL);
else
@@ -1841,7 +1843,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
data[9] = le64_to_cpu(counters->rx_broadcast);
data[10] = le32_to_cpu(counters->rx_multicast);
data[11] = le16_to_cpu(counters->tx_aborted);
- data[12] = le16_to_cpu(counters->tx_underun);
+ data[12] = le16_to_cpu(counters->tx_underrun);
}
static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2071,6 +2073,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_63:
case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_66:
tp->tx_lpi_timer = timer_val;
RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
break;
@@ -2199,6 +2202,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
enum mac_version ver;
} mac_info[] = {
/* 8126A family. */
+ { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_66 },
{ 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 },
/* 8125B family. */
@@ -2470,6 +2474,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
break;
case RTL_GIGA_MAC_VER_63:
case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_66:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2656,7 +2661,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2899,7 +2904,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2913,7 +2918,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -2940,6 +2945,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
rtl_mod_config5(tp, 0, ASPM_en);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_66:
val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -2950,7 +2956,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -2962,7 +2968,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
@@ -2971,6 +2977,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_66:
val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -3690,10 +3697,12 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
/* disable new tx descriptor format */
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_66)
RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_66)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
@@ -3711,7 +3720,8 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_66)
r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
else
r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
@@ -3825,6 +3835,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
[RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8126a,
};
if (hw_configs[tp->mac_version])
@@ -3845,6 +3856,7 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
break;
case RTL_GIGA_MAC_VER_63:
case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_66:
for (i = 0xa00; i < 0xa80; i += 4)
RTL_W32(tp, i, 0);
RTL_W16(tp, INT_CFG1_8125, 0x0000);
@@ -4073,7 +4085,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4224,7 +4236,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -5257,7 +5269,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
rtl_hw_init_8125(tp);
break;
default:
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 1f74317beb88..cf29b1208482 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -1060,6 +1060,7 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
rtl8168g_enable_gphy_10m(phydev);
+ rtl8168g_disable_aldps(phydev);
rtl8125a_config_eee_phy(phydev);
}
@@ -1099,6 +1100,7 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000);
rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
rtl8125b_config_eee_phy(phydev);
}
@@ -1159,6 +1161,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
[RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_66] = rtl8126a_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/realtek/rtase/Makefile b/drivers/net/ethernet/realtek/rtase/Makefile
new file mode 100644
index 000000000000..ba3d8550f9e6
--- /dev/null
+++ b/drivers/net/ethernet/realtek/rtase/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# Copyright(c) 2024 Realtek Semiconductor Corp. All rights reserved.
+
+#
+# Makefile for the Realtek PCIe driver
+#
+
+obj-$(CONFIG_RTASE) += rtase.o
+
+rtase-objs := rtase_main.o
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
new file mode 100644
index 000000000000..583c33930f88
--- /dev/null
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * rtase is the Linux device driver released for Realtek Automotive Switch
+ * controllers with PCI-Express interface.
+ *
+ * Copyright(c) 2024 Realtek Semiconductor Corp.
+ */
+
+#ifndef RTASE_H
+#define RTASE_H
+
+#define RTASE_HW_VER_MASK 0x7C800000
+
+#define RTASE_RX_DMA_BURST_256 4
+#define RTASE_TX_DMA_BURST_UNLIMITED 7
+
+#define RTASE_RX_BUF_SIZE (PAGE_SIZE - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define RTASE_MAX_JUMBO_SIZE (RTASE_RX_BUF_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN)
+
+/* 3 means InterFrameGap = the shortest one */
+#define RTASE_INTERFRAMEGAP 0x03
+
+#define RTASE_REGS_SIZE 256
+#define RTASE_PCI_REGS_SIZE 0x100
+
+#define RTASE_MULTICAST_FILTER_MASK GENMASK(30, 26)
+
+#define RTASE_VLAN_FILTER_ENTRY_NUM 32
+#define RTASE_NUM_TX_QUEUE 8
+#define RTASE_NUM_RX_QUEUE 4
+
+#define RTASE_TXQ_CTRL 1
+#define RTASE_FUNC_TXQ_NUM 1
+#define RTASE_FUNC_RXQ_NUM 1
+#define RTASE_INTERRUPT_NUM 1
+
+#define RTASE_MITI_TIME_COUNT_MASK GENMASK(3, 0)
+#define RTASE_MITI_TIME_UNIT_MASK GENMASK(7, 4)
+#define RTASE_MITI_DEFAULT_TIME 128
+#define RTASE_MITI_MAX_TIME 491520
+#define RTASE_MITI_PKT_NUM_COUNT_MASK GENMASK(11, 8)
+#define RTASE_MITI_PKT_NUM_UNIT_MASK GENMASK(13, 12)
+#define RTASE_MITI_DEFAULT_PKT_NUM 64
+#define RTASE_MITI_MAX_PKT_NUM_IDX 3
+#define RTASE_MITI_MAX_PKT_NUM_UNIT 16
+#define RTASE_MITI_MAX_PKT_NUM 240
+#define RTASE_MITI_COUNT_BIT_NUM 4
+
+#define RTASE_NUM_MSIX 4
+
+#define RTASE_DWORD_MOD 16
+
+/*****************************************************************************/
+enum rtase_registers {
+ RTASE_MAC0 = 0x0000,
+ RTASE_MAC4 = 0x0004,
+ RTASE_MAR0 = 0x0008,
+ RTASE_MAR1 = 0x000C,
+ RTASE_DTCCR0 = 0x0010,
+ RTASE_DTCCR4 = 0x0014,
+#define RTASE_COUNTER_RESET BIT(0)
+#define RTASE_COUNTER_DUMP BIT(3)
+
+ RTASE_FCR = 0x0018,
+#define RTASE_FCR_RXQ_MASK GENMASK(5, 4)
+
+ RTASE_LBK_CTRL = 0x001A,
+#define RTASE_LBK_ATLD BIT(1)
+#define RTASE_LBK_CLR BIT(0)
+
+ RTASE_TX_DESC_ADDR0 = 0x0020,
+ RTASE_TX_DESC_ADDR4 = 0x0024,
+ RTASE_TX_DESC_COMMAND = 0x0028,
+#define RTASE_TX_DESC_CMD_CS BIT(15)
+#define RTASE_TX_DESC_CMD_WE BIT(14)
+
+ RTASE_BOOT_CTL = 0x6004,
+ RTASE_CLKSW_SET = 0x6018,
+
+ RTASE_CHIP_CMD = 0x0037,
+#define RTASE_STOP_REQ BIT(7)
+#define RTASE_STOP_REQ_DONE BIT(6)
+#define RTASE_RE BIT(3)
+#define RTASE_TE BIT(2)
+
+ RTASE_IMR0 = 0x0038,
+ RTASE_ISR0 = 0x003C,
+#define RTASE_TOK7 BIT(30)
+#define RTASE_TOK6 BIT(28)
+#define RTASE_TOK5 BIT(26)
+#define RTASE_TOK4 BIT(24)
+#define RTASE_FOVW BIT(6)
+#define RTASE_RDU BIT(4)
+#define RTASE_TOK BIT(2)
+#define RTASE_ROK BIT(0)
+
+ RTASE_IMR1 = 0x0800,
+ RTASE_ISR1 = 0x0802,
+#define RTASE_Q_TOK BIT(4)
+#define RTASE_Q_RDU BIT(1)
+#define RTASE_Q_ROK BIT(0)
+
+ RTASE_EPHY_ISR = 0x6014,
+ RTASE_EPHY_IMR = 0x6016,
+
+ RTASE_TX_CONFIG_0 = 0x0040,
+#define RTASE_TX_INTER_FRAME_GAP_MASK GENMASK(25, 24)
+ /* DMA burst value (0-7) is shift this many bits */
+#define RTASE_TX_DMA_MASK GENMASK(10, 8)
+
+ RTASE_RX_CONFIG_0 = 0x0044,
+#define RTASE_RX_SINGLE_FETCH BIT(14)
+#define RTASE_RX_SINGLE_TAG BIT(13)
+#define RTASE_RX_MX_DMA_MASK GENMASK(10, 8)
+#define RTASE_ACPT_FLOW BIT(7)
+#define RTASE_ACCEPT_ERR BIT(5)
+#define RTASE_ACCEPT_RUNT BIT(4)
+#define RTASE_ACCEPT_BROADCAST BIT(3)
+#define RTASE_ACCEPT_MULTICAST BIT(2)
+#define RTASE_ACCEPT_MYPHYS BIT(1)
+#define RTASE_ACCEPT_ALLPHYS BIT(0)
+#define RTASE_ACCEPT_MASK (RTASE_ACPT_FLOW | RTASE_ACCEPT_ERR | \
+ RTASE_ACCEPT_RUNT | RTASE_ACCEPT_BROADCAST | \
+ RTASE_ACCEPT_MULTICAST | RTASE_ACCEPT_MYPHYS | \
+ RTASE_ACCEPT_ALLPHYS)
+
+ RTASE_RX_CONFIG_1 = 0x0046,
+#define RTASE_RX_MAX_FETCH_DESC_MASK GENMASK(15, 11)
+#define RTASE_RX_NEW_DESC_FORMAT_EN BIT(8)
+#define RTASE_OUTER_VLAN_DETAG_EN BIT(7)
+#define RTASE_INNER_VLAN_DETAG_EN BIT(6)
+#define RTASE_PCIE_NEW_FLOW BIT(2)
+#define RTASE_PCIE_RELOAD_EN BIT(0)
+
+ RTASE_EEM = 0x0050,
+#define RTASE_EEM_UNLOCK 0xC0
+
+ RTASE_TDFNR = 0x0057,
+ RTASE_TPPOLL = 0x0090,
+ RTASE_PDR = 0x00B0,
+ RTASE_FIFOR = 0x00D3,
+#define RTASE_TX_FIFO_EMPTY BIT(5)
+#define RTASE_RX_FIFO_EMPTY BIT(4)
+
+ RTASE_RMS = 0x00DA,
+ RTASE_CPLUS_CMD = 0x00E0,
+#define RTASE_FORCE_RXFLOW_EN BIT(11)
+#define RTASE_FORCE_TXFLOW_EN BIT(10)
+#define RTASE_RX_CHKSUM BIT(5)
+
+ RTASE_Q0_RX_DESC_ADDR0 = 0x00E4,
+ RTASE_Q0_RX_DESC_ADDR4 = 0x00E8,
+ RTASE_Q1_RX_DESC_ADDR0 = 0x4000,
+ RTASE_Q1_RX_DESC_ADDR4 = 0x4004,
+ RTASE_MTPS = 0x00EC,
+#define RTASE_TAG_NUM_SEL_MASK GENMASK(10, 8)
+
+ RTASE_MISC = 0x00F2,
+#define RTASE_RX_DV_GATE_EN BIT(3)
+
+ RTASE_TFUN_CTRL = 0x0400,
+#define RTASE_TX_NEW_DESC_FORMAT_EN BIT(0)
+
+ RTASE_TX_CONFIG_1 = 0x203E,
+#define RTASE_TC_MODE_MASK GENMASK(11, 10)
+
+ RTASE_TOKSEL = 0x2046,
+ RTASE_RFIFONFULL = 0x4406,
+ RTASE_INT_MITI_TX = 0x0A00,
+ RTASE_INT_MITI_RX = 0x0A80,
+
+ RTASE_VLAN_ENTRY_0 = 0xAC80,
+};
+
+enum rtase_desc_status_bit {
+ RTASE_DESC_OWN = BIT(31), /* Descriptor is owned by NIC */
+ RTASE_RING_END = BIT(30), /* End of descriptor ring */
+};
+
+enum rtase_sw_flag_content {
+ RTASE_SWF_MSI_ENABLED = BIT(1),
+ RTASE_SWF_MSIX_ENABLED = BIT(2),
+};
+
+#define RSVD_MASK 0x3FFFC000
+
+struct rtase_tx_desc {
+ __le32 opts1;
+ __le32 opts2;
+ __le64 addr;
+ __le32 opts3;
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 reserved3;
+} __packed;
+
+/*------ offset 0 of tx descriptor ------*/
+#define RTASE_TX_FIRST_FRAG BIT(29) /* Tx First segment of a packet */
+#define RTASE_TX_LAST_FRAG BIT(28) /* Tx Final segment of a packet */
+#define RTASE_GIANT_SEND_V4 BIT(26) /* TCP Giant Send Offload V4 (GSOv4) */
+#define RTASE_GIANT_SEND_V6 BIT(25) /* TCP Giant Send Offload V6 (GSOv6) */
+#define RTASE_TX_VLAN_TAG BIT(17) /* Add VLAN tag */
+
+/*------ offset 4 of tx descriptor ------*/
+#define RTASE_TX_UDPCS_C BIT(31) /* Calculate UDP/IP checksum */
+#define RTASE_TX_TCPCS_C BIT(30) /* Calculate TCP/IP checksum */
+#define RTASE_TX_IPCS_C BIT(29) /* Calculate IP checksum */
+#define RTASE_TX_IPV6F_C BIT(28) /* Indicate it is an IPv6 packet */
+
+union rtase_rx_desc {
+ struct {
+ __le64 header_buf_addr;
+ __le32 reserved1;
+ __le32 opts_header_len;
+ __le64 addr;
+ __le32 reserved2;
+ __le32 opts1;
+ } __packed desc_cmd;
+
+ struct {
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 rss;
+ __le32 opts4;
+ __le32 reserved3;
+ __le32 opts3;
+ __le32 opts2;
+ __le32 opts1;
+ } __packed desc_status;
+} __packed;
+
+/*------ offset 28 of rx descriptor ------*/
+#define RTASE_RX_FIRST_FRAG BIT(25) /* Rx First segment of a packet */
+#define RTASE_RX_LAST_FRAG BIT(24) /* Rx Final segment of a packet */
+#define RTASE_RX_RES BIT(20)
+#define RTASE_RX_RUNT BIT(19)
+#define RTASE_RX_RWT BIT(18)
+#define RTASE_RX_CRC BIT(16)
+#define RTASE_RX_V6F BIT(31)
+#define RTASE_RX_V4F BIT(30)
+#define RTASE_RX_UDPT BIT(29)
+#define RTASE_RX_TCPT BIT(28)
+#define RTASE_RX_IPF BIT(26) /* IP checksum failed */
+#define RTASE_RX_UDPF BIT(25) /* UDP/IP checksum failed */
+#define RTASE_RX_TCPF BIT(24) /* TCP/IP checksum failed */
+#define RTASE_RX_VLAN_TAG BIT(16) /* VLAN tag available */
+
+#define RTASE_NUM_DESC 1024
+#define RTASE_TX_BUDGET_DEFAULT 256
+#define RTASE_TX_RING_DESC_SIZE (RTASE_NUM_DESC * sizeof(struct rtase_tx_desc))
+#define RTASE_RX_RING_DESC_SIZE (RTASE_NUM_DESC * sizeof(union rtase_rx_desc))
+#define RTASE_TX_STOP_THRS (MAX_SKB_FRAGS + 1)
+#define RTASE_TX_START_THRS (2 * RTASE_TX_STOP_THRS)
+#define RTASE_VLAN_TAG_MASK GENMASK(15, 0)
+#define RTASE_RX_PKT_SIZE_MASK GENMASK(13, 0)
+
+#define RTASE_IVEC_NAME_SIZE (IFNAMSIZ + 10)
+
+struct rtase_int_vector {
+ struct rtase_private *tp;
+ unsigned int irq;
+ char name[RTASE_IVEC_NAME_SIZE];
+ u16 index;
+ u16 imr_addr;
+ u16 isr_addr;
+ u32 imr;
+ struct list_head ring_list;
+ struct napi_struct napi;
+ int (*poll)(struct napi_struct *napi, int budget);
+};
+
+struct rtase_ring {
+ struct rtase_int_vector *ivec;
+ void *desc;
+ dma_addr_t phy_addr;
+ u32 cur_idx;
+ u32 dirty_idx;
+ u16 index;
+
+ struct sk_buff *skbuff[RTASE_NUM_DESC];
+ void *data_buf[RTASE_NUM_DESC];
+ union {
+ u32 len[RTASE_NUM_DESC];
+ dma_addr_t data_phy_addr[RTASE_NUM_DESC];
+ } mis;
+
+ struct list_head ring_entry;
+ int (*ring_handler)(struct rtase_ring *ring, int budget);
+ u64 alloc_fail;
+};
+
+struct rtase_stats {
+ u64 tx_dropped;
+ u64 rx_dropped;
+ u64 multicast;
+ u64 rx_errors;
+ u64 rx_length_errors;
+ u64 rx_crc_errors;
+};
+
+struct rtase_private {
+ void __iomem *mmio_addr;
+ u32 sw_flag;
+
+ struct pci_dev *pdev;
+ struct net_device *dev;
+ u32 rx_buf_sz;
+
+ struct page_pool *page_pool;
+ struct rtase_ring tx_ring[RTASE_NUM_TX_QUEUE];
+ struct rtase_ring rx_ring[RTASE_NUM_RX_QUEUE];
+ struct rtase_counters *tally_vaddr;
+ dma_addr_t tally_paddr;
+
+ u32 vlan_filter_ctrl;
+ u16 vlan_filter_vid[RTASE_VLAN_FILTER_ENTRY_NUM];
+
+ struct msix_entry msix_entry[RTASE_NUM_MSIX];
+ struct rtase_int_vector int_vector[RTASE_NUM_MSIX];
+
+ struct rtase_stats stats;
+
+ u16 tx_queue_ctrl;
+ u16 func_tx_queue_num;
+ u16 func_rx_queue_num;
+ u16 int_nums;
+ u16 tx_int_mit;
+ u16 rx_int_mit;
+};
+
+#define RTASE_LSO_64K 64000
+
+#define RTASE_NIC_MAX_PHYS_BUF_COUNT_LSO2 (16 * 4)
+
+#define RTASE_TCPHO_MASK GENMASK(24, 18)
+
+#define RTASE_MSS_MASK GENMASK(28, 18)
+
+#endif /* RTASE_H */
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
new file mode 100644
index 000000000000..f8777b7663d3
--- /dev/null
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -0,0 +1,2288 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * rtase is the Linux device driver released for Realtek Automotive Switch
+ * controllers with PCI-Express interface.
+ *
+ * Copyright(c) 2024 Realtek Semiconductor Corp.
+ *
+ * Below is a simplified block diagram of the chip and its relevant interfaces.
+ *
+ * *************************
+ * * *
+ * * CPU network device *
+ * * *
+ * * +-------------+ *
+ * * | PCIE Host | *
+ * ***********++************
+ * ||
+ * PCIE
+ * ||
+ * ********************++**********************
+ * * | PCIE Endpoint | *
+ * * +---------------+ *
+ * * | GMAC | *
+ * * +--++--+ Realtek *
+ * * || RTL90xx Series *
+ * * || *
+ * * +-------------++----------------+ *
+ * * | | MAC | | *
+ * * | +-----+ | *
+ * * | | *
+ * * | Ethernet Switch Core | *
+ * * | | *
+ * * | +-----+ +-----+ | *
+ * * | | MAC |...........| MAC | | *
+ * * +---+-----+-----------+-----+---+ *
+ * * | PHY |...........| PHY | *
+ * * +--++-+ +--++-+ *
+ * *************||****************||***********
+ *
+ * The block of the Realtek RTL90xx series is our entire chip architecture,
+ * the GMAC is connected to the switch core, and there is no PHY in between.
+ * In addition, this driver is mainly used to control GMAC, but does not
+ * control the switch core, so it is not the same as DSA. Linux only plays
+ * the role of a normal leaf node in this model.
+ */
+
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/prefetch.h>
+#include <linux/rtnetlink.h>
+#include <linux/tcp.h>
+#include <asm/irq.h>
+#include <net/ip6_checksum.h>
+#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
+#include <net/pkt_cls.h>
+
+#include "rtase.h"
+
+#define RTK_OPTS1_DEBUG_VALUE 0x0BADBEEF
+#define RTK_MAGIC_NUMBER 0x0BADBADBADBADBAD
+
+static const struct pci_device_id rtase_pci_tbl[] = {
+ {PCI_VDEVICE(REALTEK, 0x906A)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, rtase_pci_tbl);
+
+MODULE_AUTHOR("Realtek ARD Software Team");
+MODULE_DESCRIPTION("Network Driver for the PCIe interface of Realtek Automotive Ethernet Switch");
+MODULE_LICENSE("Dual BSD/GPL");
+
+struct rtase_counters {
+ __le64 tx_packets;
+ __le64 rx_packets;
+ __le64 tx_errors;
+ __le32 rx_errors;
+ __le16 rx_missed;
+ __le16 align_errors;
+ __le32 tx_one_collision;
+ __le32 tx_multi_collision;
+ __le64 rx_unicast;
+ __le64 rx_broadcast;
+ __le32 rx_multicast;
+ __le16 tx_aborted;
+ __le16 tx_underrun;
+} __packed;
+
+static void rtase_w8(const struct rtase_private *tp, u16 reg, u8 val8)
+{
+ writeb(val8, tp->mmio_addr + reg);
+}
+
+static void rtase_w16(const struct rtase_private *tp, u16 reg, u16 val16)
+{
+ writew(val16, tp->mmio_addr + reg);
+}
+
+static void rtase_w32(const struct rtase_private *tp, u16 reg, u32 val32)
+{
+ writel(val32, tp->mmio_addr + reg);
+}
+
+static u8 rtase_r8(const struct rtase_private *tp, u16 reg)
+{
+ return readb(tp->mmio_addr + reg);
+}
+
+static u16 rtase_r16(const struct rtase_private *tp, u16 reg)
+{
+ return readw(tp->mmio_addr + reg);
+}
+
+static u32 rtase_r32(const struct rtase_private *tp, u16 reg)
+{
+ return readl(tp->mmio_addr + reg);
+}
+
+static void rtase_free_desc(struct rtase_private *tp)
+{
+ struct pci_dev *pdev = tp->pdev;
+ u32 i;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++) {
+ if (!tp->tx_ring[i].desc)
+ continue;
+
+ dma_free_coherent(&pdev->dev, RTASE_TX_RING_DESC_SIZE,
+ tp->tx_ring[i].desc,
+ tp->tx_ring[i].phy_addr);
+ tp->tx_ring[i].desc = NULL;
+ }
+
+ for (i = 0; i < tp->func_rx_queue_num; i++) {
+ if (!tp->rx_ring[i].desc)
+ continue;
+
+ dma_free_coherent(&pdev->dev, RTASE_RX_RING_DESC_SIZE,
+ tp->rx_ring[i].desc,
+ tp->rx_ring[i].phy_addr);
+ tp->rx_ring[i].desc = NULL;
+ }
+}
+
+static int rtase_alloc_desc(struct rtase_private *tp)
+{
+ struct pci_dev *pdev = tp->pdev;
+ u32 i;
+
+ /* rx and tx descriptors needs 256 bytes alignment.
+ * dma_alloc_coherent provides more.
+ */
+ for (i = 0; i < tp->func_tx_queue_num; i++) {
+ tp->tx_ring[i].desc =
+ dma_alloc_coherent(&pdev->dev,
+ RTASE_TX_RING_DESC_SIZE,
+ &tp->tx_ring[i].phy_addr,
+ GFP_KERNEL);
+ if (!tp->tx_ring[i].desc)
+ goto err_out;
+ }
+
+ for (i = 0; i < tp->func_rx_queue_num; i++) {
+ tp->rx_ring[i].desc =
+ dma_alloc_coherent(&pdev->dev,
+ RTASE_RX_RING_DESC_SIZE,
+ &tp->rx_ring[i].phy_addr,
+ GFP_KERNEL);
+ if (!tp->rx_ring[i].desc)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ rtase_free_desc(tp);
+ return -ENOMEM;
+}
+
+static void rtase_unmap_tx_skb(struct pci_dev *pdev, u32 len,
+ struct rtase_tx_desc *desc)
+{
+ dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
+ DMA_TO_DEVICE);
+ desc->opts1 = cpu_to_le32(RTK_OPTS1_DEBUG_VALUE);
+ desc->opts2 = 0x00;
+ desc->addr = cpu_to_le64(RTK_MAGIC_NUMBER);
+}
+
+static void rtase_tx_clear_range(struct rtase_ring *ring, u32 start, u32 n)
+{
+ struct rtase_tx_desc *desc_base = ring->desc;
+ struct rtase_private *tp = ring->ivec->tp;
+ u32 i;
+
+ for (i = 0; i < n; i++) {
+ u32 entry = (start + i) % RTASE_NUM_DESC;
+ struct rtase_tx_desc *desc = desc_base + entry;
+ u32 len = ring->mis.len[entry];
+ struct sk_buff *skb;
+
+ if (len == 0)
+ continue;
+
+ rtase_unmap_tx_skb(tp->pdev, len, desc);
+ ring->mis.len[entry] = 0;
+ skb = ring->skbuff[entry];
+ if (!skb)
+ continue;
+
+ tp->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ ring->skbuff[entry] = NULL;
+ }
+}
+
+static void rtase_tx_clear(struct rtase_private *tp)
+{
+ struct rtase_ring *ring;
+ u16 i;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++) {
+ ring = &tp->tx_ring[i];
+ rtase_tx_clear_range(ring, ring->dirty_idx, RTASE_NUM_DESC);
+ ring->cur_idx = 0;
+ ring->dirty_idx = 0;
+ }
+}
+
+static void rtase_mark_to_asic(union rtase_rx_desc *desc, u32 rx_buf_sz)
+{
+ u32 eor = le32_to_cpu(desc->desc_cmd.opts1) & RTASE_RING_END;
+
+ desc->desc_status.opts2 = 0;
+ /* force memory writes to complete before releasing descriptor */
+ dma_wmb();
+ WRITE_ONCE(desc->desc_cmd.opts1,
+ cpu_to_le32(RTASE_DESC_OWN | eor | rx_buf_sz));
+}
+
+static u32 rtase_tx_avail(struct rtase_ring *ring)
+{
+ return READ_ONCE(ring->dirty_idx) + RTASE_NUM_DESC -
+ READ_ONCE(ring->cur_idx);
+}
+
+static int tx_handler(struct rtase_ring *ring, int budget)
+{
+ const struct rtase_private *tp = ring->ivec->tp;
+ struct net_device *dev = tp->dev;
+ u32 dirty_tx, tx_left;
+ u32 bytes_compl = 0;
+ u32 pkts_compl = 0;
+ int workdone = 0;
+
+ dirty_tx = ring->dirty_idx;
+ tx_left = READ_ONCE(ring->cur_idx) - dirty_tx;
+
+ while (tx_left > 0) {
+ u32 entry = dirty_tx % RTASE_NUM_DESC;
+ struct rtase_tx_desc *desc = ring->desc +
+ sizeof(struct rtase_tx_desc) * entry;
+ u32 status;
+
+ status = le32_to_cpu(desc->opts1);
+
+ if (status & RTASE_DESC_OWN)
+ break;
+
+ rtase_unmap_tx_skb(tp->pdev, ring->mis.len[entry], desc);
+ ring->mis.len[entry] = 0;
+ if (ring->skbuff[entry]) {
+ pkts_compl++;
+ bytes_compl += ring->skbuff[entry]->len;
+ napi_consume_skb(ring->skbuff[entry], budget);
+ ring->skbuff[entry] = NULL;
+ }
+
+ dirty_tx++;
+ tx_left--;
+ workdone++;
+
+ if (workdone == RTASE_TX_BUDGET_DEFAULT)
+ break;
+ }
+
+ if (ring->dirty_idx != dirty_tx) {
+ dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl);
+ WRITE_ONCE(ring->dirty_idx, dirty_tx);
+
+ netif_subqueue_completed_wake(dev, ring->index, pkts_compl,
+ bytes_compl,
+ rtase_tx_avail(ring),
+ RTASE_TX_START_THRS);
+
+ if (ring->cur_idx != dirty_tx)
+ rtase_w8(tp, RTASE_TPPOLL, BIT(ring->index));
+ }
+
+ return 0;
+}
+
+static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
+{
+ struct rtase_ring *ring = &tp->tx_ring[idx];
+ struct rtase_tx_desc *desc;
+ u32 i;
+
+ memset(ring->desc, 0x0, RTASE_TX_RING_DESC_SIZE);
+ memset(ring->skbuff, 0x0, sizeof(ring->skbuff));
+ ring->cur_idx = 0;
+ ring->dirty_idx = 0;
+ ring->index = idx;
+ ring->alloc_fail = 0;
+
+ for (i = 0; i < RTASE_NUM_DESC; i++) {
+ ring->mis.len[i] = 0;
+ if ((RTASE_NUM_DESC - 1) == i) {
+ desc = ring->desc + sizeof(struct rtase_tx_desc) * i;
+ desc->opts1 = cpu_to_le32(RTASE_RING_END);
+ }
+ }
+
+ ring->ring_handler = tx_handler;
+ if (idx < 4) {
+ ring->ivec = &tp->int_vector[idx];
+ list_add_tail(&ring->ring_entry,
+ &tp->int_vector[idx].ring_list);
+ } else {
+ ring->ivec = &tp->int_vector[0];
+ list_add_tail(&ring->ring_entry, &tp->int_vector[0].ring_list);
+ }
+}
+
+static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping,
+ u32 rx_buf_sz)
+{
+ desc->desc_cmd.addr = cpu_to_le64(mapping);
+
+ rtase_mark_to_asic(desc, rx_buf_sz);
+}
+
+static void rtase_make_unusable_by_asic(union rtase_rx_desc *desc)
+{
+ desc->desc_cmd.addr = cpu_to_le64(RTK_MAGIC_NUMBER);
+ desc->desc_cmd.opts1 &= ~cpu_to_le32(RTASE_DESC_OWN | RSVD_MASK);
+}
+
+static int rtase_alloc_rx_data_buf(struct rtase_ring *ring,
+ void **p_data_buf,
+ union rtase_rx_desc *desc,
+ dma_addr_t *rx_phy_addr)
+{
+ struct rtase_int_vector *ivec = ring->ivec;
+ const struct rtase_private *tp = ivec->tp;
+ dma_addr_t mapping;
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(tp->page_pool);
+ if (!page) {
+ ring->alloc_fail++;
+ goto err_out;
+ }
+
+ *p_data_buf = page_address(page);
+ mapping = page_pool_get_dma_addr(page);
+ *rx_phy_addr = mapping;
+ rtase_map_to_asic(desc, mapping, tp->rx_buf_sz);
+
+ return 0;
+
+err_out:
+ rtase_make_unusable_by_asic(desc);
+
+ return -ENOMEM;
+}
+
+static u32 rtase_rx_ring_fill(struct rtase_ring *ring, u32 ring_start,
+ u32 ring_end)
+{
+ union rtase_rx_desc *desc_base = ring->desc;
+ u32 cur;
+
+ for (cur = ring_start; ring_end - cur > 0; cur++) {
+ u32 i = cur % RTASE_NUM_DESC;
+ union rtase_rx_desc *desc = desc_base + i;
+ int ret;
+
+ if (ring->data_buf[i])
+ continue;
+
+ ret = rtase_alloc_rx_data_buf(ring, &ring->data_buf[i], desc,
+ &ring->mis.data_phy_addr[i]);
+ if (ret)
+ break;
+ }
+
+ return cur - ring_start;
+}
+
+static void rtase_mark_as_last_descriptor(union rtase_rx_desc *desc)
+{
+ desc->desc_cmd.opts1 |= cpu_to_le32(RTASE_RING_END);
+}
+
+static void rtase_rx_ring_clear(struct page_pool *page_pool,
+ struct rtase_ring *ring)
+{
+ union rtase_rx_desc *desc;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < RTASE_NUM_DESC; i++) {
+ desc = ring->desc + sizeof(union rtase_rx_desc) * i;
+ page = virt_to_head_page(ring->data_buf[i]);
+
+ if (ring->data_buf[i])
+ page_pool_put_full_page(page_pool, page, true);
+
+ rtase_make_unusable_by_asic(desc);
+ }
+}
+
+static int rtase_fragmented_frame(u32 status)
+{
+ return (status & (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG)) !=
+ (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG);
+}
+
+static void rtase_rx_csum(const struct rtase_private *tp, struct sk_buff *skb,
+ const union rtase_rx_desc *desc)
+{
+ u32 opts2 = le32_to_cpu(desc->desc_status.opts2);
+
+ /* rx csum offload */
+ if (((opts2 & RTASE_RX_V4F) && !(opts2 & RTASE_RX_IPF)) ||
+ (opts2 & RTASE_RX_V6F)) {
+ if (((opts2 & RTASE_RX_TCPT) && !(opts2 & RTASE_RX_TCPF)) ||
+ ((opts2 & RTASE_RX_UDPT) && !(opts2 & RTASE_RX_UDPF)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+}
+
+static void rtase_rx_vlan_skb(union rtase_rx_desc *desc, struct sk_buff *skb)
+{
+ u32 opts2 = le32_to_cpu(desc->desc_status.opts2);
+
+ if (!(opts2 & RTASE_RX_VLAN_TAG))
+ return;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ swab16(opts2 & RTASE_VLAN_TAG_MASK));
+}
+
+static void rtase_rx_skb(const struct rtase_ring *ring, struct sk_buff *skb)
+{
+ struct rtase_int_vector *ivec = ring->ivec;
+
+ napi_gro_receive(&ivec->napi, skb);
+}
+
+static int rx_handler(struct rtase_ring *ring, int budget)
+{
+ union rtase_rx_desc *desc_base = ring->desc;
+ u32 pkt_size, cur_rx, delta, entry, status;
+ struct rtase_private *tp = ring->ivec->tp;
+ struct net_device *dev = tp->dev;
+ union rtase_rx_desc *desc;
+ struct sk_buff *skb;
+ int workdone = 0;
+
+ cur_rx = ring->cur_idx;
+ entry = cur_rx % RTASE_NUM_DESC;
+ desc = &desc_base[entry];
+
+ while (workdone < budget) {
+ status = le32_to_cpu(desc->desc_status.opts1);
+
+ if (status & RTASE_DESC_OWN)
+ break;
+
+ /* This barrier is needed to keep us from reading
+ * any other fields out of the rx descriptor until
+ * we know the status of RTASE_DESC_OWN
+ */
+ dma_rmb();
+
+ if (unlikely(status & RTASE_RX_RES)) {
+ if (net_ratelimit())
+ netdev_warn(dev, "Rx ERROR. status = %08x\n",
+ status);
+
+ tp->stats.rx_errors++;
+
+ if (status & (RTASE_RX_RWT | RTASE_RX_RUNT))
+ tp->stats.rx_length_errors++;
+
+ if (status & RTASE_RX_CRC)
+ tp->stats.rx_crc_errors++;
+
+ if (dev->features & NETIF_F_RXALL)
+ goto process_pkt;
+
+ rtase_mark_to_asic(desc, tp->rx_buf_sz);
+ goto skip_process_pkt;
+ }
+
+process_pkt:
+ pkt_size = status & RTASE_RX_PKT_SIZE_MASK;
+ if (likely(!(dev->features & NETIF_F_RXFCS)))
+ pkt_size -= ETH_FCS_LEN;
+
+ /* The driver does not support incoming fragmented frames.
+ * They are seen as a symptom of over-mtu sized frames.
+ */
+ if (unlikely(rtase_fragmented_frame(status))) {
+ tp->stats.rx_dropped++;
+ tp->stats.rx_length_errors++;
+ rtase_mark_to_asic(desc, tp->rx_buf_sz);
+ goto skip_process_pkt;
+ }
+
+ dma_sync_single_for_cpu(&tp->pdev->dev,
+ ring->mis.data_phy_addr[entry],
+ tp->rx_buf_sz, DMA_FROM_DEVICE);
+
+ skb = build_skb(ring->data_buf[entry], PAGE_SIZE);
+ if (!skb) {
+ tp->stats.rx_dropped++;
+ rtase_mark_to_asic(desc, tp->rx_buf_sz);
+ goto skip_process_pkt;
+ }
+ ring->data_buf[entry] = NULL;
+
+ if (dev->features & NETIF_F_RXCSUM)
+ rtase_rx_csum(tp, skb, desc);
+
+ skb_put(skb, pkt_size);
+ skb_mark_for_recycle(skb);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (skb->pkt_type == PACKET_MULTICAST)
+ tp->stats.multicast++;
+
+ rtase_rx_vlan_skb(desc, skb);
+ rtase_rx_skb(ring, skb);
+
+ dev_sw_netstats_rx_add(dev, pkt_size);
+
+skip_process_pkt:
+ workdone++;
+ cur_rx++;
+ entry = cur_rx % RTASE_NUM_DESC;
+ desc = ring->desc + sizeof(union rtase_rx_desc) * entry;
+ }
+
+ ring->cur_idx = cur_rx;
+ delta = rtase_rx_ring_fill(ring, ring->dirty_idx, ring->cur_idx);
+ ring->dirty_idx += delta;
+
+ return workdone;
+}
+
+static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
+{
+ struct rtase_ring *ring = &tp->rx_ring[idx];
+ u16 i;
+
+ memset(ring->desc, 0x0, RTASE_RX_RING_DESC_SIZE);
+ memset(ring->data_buf, 0x0, sizeof(ring->data_buf));
+ ring->cur_idx = 0;
+ ring->dirty_idx = 0;
+ ring->index = idx;
+ ring->alloc_fail = 0;
+
+ for (i = 0; i < RTASE_NUM_DESC; i++)
+ ring->mis.data_phy_addr[i] = 0;
+
+ ring->ring_handler = rx_handler;
+ ring->ivec = &tp->int_vector[idx];
+ list_add_tail(&ring->ring_entry, &tp->int_vector[idx].ring_list);
+}
+
+static void rtase_rx_clear(struct rtase_private *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->func_rx_queue_num; i++)
+ rtase_rx_ring_clear(tp->page_pool, &tp->rx_ring[i]);
+
+ page_pool_destroy(tp->page_pool);
+ tp->page_pool = NULL;
+}
+
+static int rtase_init_ring(const struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ struct page_pool_params pp_params = { 0 };
+ struct page_pool *page_pool;
+ u32 num;
+ u16 i;
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.order = 0;
+ pp_params.pool_size = RTASE_NUM_DESC * tp->func_rx_queue_num;
+ pp_params.nid = dev_to_node(&tp->pdev->dev);
+ pp_params.dev = &tp->pdev->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE;
+ pp_params.offset = 0;
+
+ page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(page_pool)) {
+ netdev_err(tp->dev, "failed to create page pool\n");
+ return -ENOMEM;
+ }
+
+ tp->page_pool = page_pool;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++)
+ rtase_tx_desc_init(tp, i);
+
+ for (i = 0; i < tp->func_rx_queue_num; i++) {
+ rtase_rx_desc_init(tp, i);
+
+ num = rtase_rx_ring_fill(&tp->rx_ring[i], 0, RTASE_NUM_DESC);
+ if (num != RTASE_NUM_DESC)
+ goto err_out;
+
+ rtase_mark_as_last_descriptor(tp->rx_ring[i].desc +
+ sizeof(union rtase_rx_desc) *
+ (RTASE_NUM_DESC - 1));
+ }
+
+ return 0;
+
+err_out:
+ rtase_rx_clear(tp);
+ return -ENOMEM;
+}
+
+static void rtase_interrupt_mitigation(const struct rtase_private *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++)
+ rtase_w16(tp, RTASE_INT_MITI_TX + i * 2, tp->tx_int_mit);
+
+ for (i = 0; i < tp->func_rx_queue_num; i++)
+ rtase_w16(tp, RTASE_INT_MITI_RX + i * 2, tp->rx_int_mit);
+}
+
+static void rtase_tally_counter_addr_fill(const struct rtase_private *tp)
+{
+ rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(tp->tally_paddr));
+ rtase_w32(tp, RTASE_DTCCR0, lower_32_bits(tp->tally_paddr));
+}
+
+static void rtase_tally_counter_clear(const struct rtase_private *tp)
+{
+ u32 cmd = lower_32_bits(tp->tally_paddr);
+
+ rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(tp->tally_paddr));
+ rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_RESET);
+}
+
+static void rtase_desc_addr_fill(const struct rtase_private *tp)
+{
+ const struct rtase_ring *ring;
+ u16 i, cmd, val;
+ int err;
+
+ for (i = 0; i < tp->func_tx_queue_num; i++) {
+ ring = &tp->tx_ring[i];
+
+ rtase_w32(tp, RTASE_TX_DESC_ADDR0,
+ lower_32_bits(ring->phy_addr));
+ rtase_w32(tp, RTASE_TX_DESC_ADDR4,
+ upper_32_bits(ring->phy_addr));
+
+ cmd = i | RTASE_TX_DESC_CMD_WE | RTASE_TX_DESC_CMD_CS;
+ rtase_w16(tp, RTASE_TX_DESC_COMMAND, cmd);
+
+ err = read_poll_timeout(rtase_r16, val,
+ !(val & RTASE_TX_DESC_CMD_CS), 10,
+ 1000, false, tp,
+ RTASE_TX_DESC_COMMAND);
+
+ if (err == -ETIMEDOUT)
+ netdev_err(tp->dev,
+ "error occurred in fill tx descriptor\n");
+ }
+
+ for (i = 0; i < tp->func_rx_queue_num; i++) {
+ ring = &tp->rx_ring[i];
+
+ if (i == 0) {
+ rtase_w32(tp, RTASE_Q0_RX_DESC_ADDR0,
+ lower_32_bits(ring->phy_addr));
+ rtase_w32(tp, RTASE_Q0_RX_DESC_ADDR4,
+ upper_32_bits(ring->phy_addr));
+ } else {
+ rtase_w32(tp, (RTASE_Q1_RX_DESC_ADDR0 + ((i - 1) * 8)),
+ lower_32_bits(ring->phy_addr));
+ rtase_w32(tp, (RTASE_Q1_RX_DESC_ADDR4 + ((i - 1) * 8)),
+ upper_32_bits(ring->phy_addr));
+ }
+ }
+}
+
+static void rtase_hw_set_features(const struct net_device *dev,
+ netdev_features_t features)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 rx_config, val;
+
+ rx_config = rtase_r16(tp, RTASE_RX_CONFIG_0);
+ if (features & NETIF_F_RXALL)
+ rx_config |= (RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT);
+ else
+ rx_config &= ~(RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT);
+
+ rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config);
+
+ val = rtase_r16(tp, RTASE_CPLUS_CMD);
+ if (features & NETIF_F_RXCSUM)
+ rtase_w16(tp, RTASE_CPLUS_CMD, val | RTASE_RX_CHKSUM);
+ else
+ rtase_w16(tp, RTASE_CPLUS_CMD, val & ~RTASE_RX_CHKSUM);
+
+ rx_config = rtase_r16(tp, RTASE_RX_CONFIG_1);
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ rx_config |= (RTASE_INNER_VLAN_DETAG_EN |
+ RTASE_OUTER_VLAN_DETAG_EN);
+ else
+ rx_config &= ~(RTASE_INNER_VLAN_DETAG_EN |
+ RTASE_OUTER_VLAN_DETAG_EN);
+
+ rtase_w16(tp, RTASE_RX_CONFIG_1, rx_config);
+}
+
+static void rtase_hw_set_rx_packet_filter(struct net_device *dev)
+{
+ u32 mc_filter[2] = { 0xFFFFFFFF, 0xFFFFFFFF };
+ struct rtase_private *tp = netdev_priv(dev);
+ u16 rx_mode;
+
+ rx_mode = rtase_r16(tp, RTASE_RX_CONFIG_0) & ~RTASE_ACCEPT_MASK;
+ rx_mode |= RTASE_ACCEPT_BROADCAST | RTASE_ACCEPT_MYPHYS;
+
+ if (dev->flags & IFF_PROMISC) {
+ rx_mode |= RTASE_ACCEPT_MULTICAST | RTASE_ACCEPT_ALLPHYS;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ rx_mode |= RTASE_ACCEPT_MULTICAST;
+ } else {
+ struct netdev_hw_addr *hw_addr;
+
+ mc_filter[0] = 0;
+ mc_filter[1] = 0;
+
+ netdev_for_each_mc_addr(hw_addr, dev) {
+ u32 bit_nr = eth_hw_addr_crc(hw_addr);
+ u32 idx = u32_get_bits(bit_nr, BIT(31));
+ u32 bit = u32_get_bits(bit_nr,
+ RTASE_MULTICAST_FILTER_MASK);
+
+ mc_filter[idx] |= BIT(bit);
+ rx_mode |= RTASE_ACCEPT_MULTICAST;
+ }
+ }
+
+ if (dev->features & NETIF_F_RXALL)
+ rx_mode |= RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT;
+
+ rtase_w32(tp, RTASE_MAR0, swab32(mc_filter[1]));
+ rtase_w32(tp, RTASE_MAR1, swab32(mc_filter[0]));
+ rtase_w16(tp, RTASE_RX_CONFIG_0, rx_mode);
+}
+
+static void rtase_irq_dis_and_clear(const struct rtase_private *tp)
+{
+ const struct rtase_int_vector *ivec = &tp->int_vector[0];
+ u32 val1;
+ u16 val2;
+ u8 i;
+
+ rtase_w32(tp, ivec->imr_addr, 0);
+ val1 = rtase_r32(tp, ivec->isr_addr);
+ rtase_w32(tp, ivec->isr_addr, val1);
+
+ for (i = 1; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ rtase_w16(tp, ivec->imr_addr, 0);
+ val2 = rtase_r16(tp, ivec->isr_addr);
+ rtase_w16(tp, ivec->isr_addr, val2);
+ }
+}
+
+static void rtase_poll_timeout(const struct rtase_private *tp, u32 cond,
+ u32 sleep_us, u64 timeout_us, u16 reg)
+{
+ int err;
+ u8 val;
+
+ err = read_poll_timeout(rtase_r8, val, val & cond, sleep_us,
+ timeout_us, false, tp, reg);
+
+ if (err == -ETIMEDOUT)
+ netdev_err(tp->dev, "poll reg 0x00%x timeout\n", reg);
+}
+
+static void rtase_nic_reset(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 rx_config;
+ u8 val;
+
+ rx_config = rtase_r16(tp, RTASE_RX_CONFIG_0);
+ rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config & ~RTASE_ACCEPT_MASK);
+
+ val = rtase_r8(tp, RTASE_MISC);
+ rtase_w8(tp, RTASE_MISC, val | RTASE_RX_DV_GATE_EN);
+
+ val = rtase_r8(tp, RTASE_CHIP_CMD);
+ rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_STOP_REQ);
+ mdelay(2);
+
+ rtase_poll_timeout(tp, RTASE_STOP_REQ_DONE, 100, 150000,
+ RTASE_CHIP_CMD);
+
+ rtase_poll_timeout(tp, RTASE_TX_FIFO_EMPTY, 100, 100000,
+ RTASE_FIFOR);
+
+ rtase_poll_timeout(tp, RTASE_RX_FIFO_EMPTY, 100, 100000,
+ RTASE_FIFOR);
+
+ val = rtase_r8(tp, RTASE_CHIP_CMD);
+ rtase_w8(tp, RTASE_CHIP_CMD, val & ~(RTASE_TE | RTASE_RE));
+ val = rtase_r8(tp, RTASE_CHIP_CMD);
+ rtase_w8(tp, RTASE_CHIP_CMD, val & ~RTASE_STOP_REQ);
+
+ rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config);
+}
+
+static void rtase_hw_reset(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+
+ rtase_irq_dis_and_clear(tp);
+
+ rtase_nic_reset(dev);
+}
+
+static void rtase_set_rx_queue(const struct rtase_private *tp)
+{
+ u16 reg_data;
+
+ reg_data = rtase_r16(tp, RTASE_FCR);
+ switch (tp->func_rx_queue_num) {
+ case 1:
+ u16p_replace_bits(&reg_data, 0x1, RTASE_FCR_RXQ_MASK);
+ break;
+ case 2:
+ u16p_replace_bits(&reg_data, 0x2, RTASE_FCR_RXQ_MASK);
+ break;
+ case 4:
+ u16p_replace_bits(&reg_data, 0x3, RTASE_FCR_RXQ_MASK);
+ break;
+ }
+ rtase_w16(tp, RTASE_FCR, reg_data);
+}
+
+static void rtase_set_tx_queue(const struct rtase_private *tp)
+{
+ u16 reg_data;
+
+ reg_data = rtase_r16(tp, RTASE_TX_CONFIG_1);
+ switch (tp->tx_queue_ctrl) {
+ case 1:
+ u16p_replace_bits(&reg_data, 0x0, RTASE_TC_MODE_MASK);
+ break;
+ case 2:
+ u16p_replace_bits(&reg_data, 0x1, RTASE_TC_MODE_MASK);
+ break;
+ case 3:
+ case 4:
+ u16p_replace_bits(&reg_data, 0x2, RTASE_TC_MODE_MASK);
+ break;
+ default:
+ u16p_replace_bits(&reg_data, 0x3, RTASE_TC_MODE_MASK);
+ break;
+ }
+ rtase_w16(tp, RTASE_TX_CONFIG_1, reg_data);
+}
+
+static void rtase_hw_config(struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u32 reg_data32;
+ u16 reg_data16;
+
+ rtase_hw_reset(dev);
+
+ /* set rx dma burst */
+ reg_data16 = rtase_r16(tp, RTASE_RX_CONFIG_0);
+ reg_data16 &= ~(RTASE_RX_SINGLE_TAG | RTASE_RX_SINGLE_FETCH);
+ u16p_replace_bits(&reg_data16, RTASE_RX_DMA_BURST_256,
+ RTASE_RX_MX_DMA_MASK);
+ rtase_w16(tp, RTASE_RX_CONFIG_0, reg_data16);
+
+ /* new rx descritpor */
+ reg_data16 = rtase_r16(tp, RTASE_RX_CONFIG_1);
+ reg_data16 |= RTASE_RX_NEW_DESC_FORMAT_EN | RTASE_PCIE_NEW_FLOW;
+ u16p_replace_bits(&reg_data16, 0xF, RTASE_RX_MAX_FETCH_DESC_MASK);
+ rtase_w16(tp, RTASE_RX_CONFIG_1, reg_data16);
+
+ rtase_set_rx_queue(tp);
+
+ rtase_interrupt_mitigation(tp);
+
+ /* set tx dma burst size and interframe gap time */
+ reg_data32 = rtase_r32(tp, RTASE_TX_CONFIG_0);
+ u32p_replace_bits(&reg_data32, RTASE_TX_DMA_BURST_UNLIMITED,
+ RTASE_TX_DMA_MASK);
+ u32p_replace_bits(&reg_data32, RTASE_INTERFRAMEGAP,
+ RTASE_TX_INTER_FRAME_GAP_MASK);
+ rtase_w32(tp, RTASE_TX_CONFIG_0, reg_data32);
+
+ /* new tx descriptor */
+ reg_data16 = rtase_r16(tp, RTASE_TFUN_CTRL);
+ rtase_w16(tp, RTASE_TFUN_CTRL, reg_data16 |
+ RTASE_TX_NEW_DESC_FORMAT_EN);
+
+ /* tx fetch desc number */
+ rtase_w8(tp, RTASE_TDFNR, 0x10);
+
+ /* tag num select */
+ reg_data16 = rtase_r16(tp, RTASE_MTPS);
+ u16p_replace_bits(&reg_data16, 0x4, RTASE_TAG_NUM_SEL_MASK);
+ rtase_w16(tp, RTASE_MTPS, reg_data16);
+
+ rtase_set_tx_queue(tp);
+
+ rtase_w16(tp, RTASE_TOKSEL, 0x5555);
+
+ rtase_tally_counter_addr_fill(tp);
+ rtase_desc_addr_fill(tp);
+ rtase_hw_set_features(dev, dev->features);
+
+ /* enable flow control */
+ reg_data16 = rtase_r16(tp, RTASE_CPLUS_CMD);
+ reg_data16 |= (RTASE_FORCE_TXFLOW_EN | RTASE_FORCE_RXFLOW_EN);
+ rtase_w16(tp, RTASE_CPLUS_CMD, reg_data16);
+ /* set near fifo threshold - rx missed issue. */
+ rtase_w16(tp, RTASE_RFIFONFULL, 0x190);
+
+ rtase_w16(tp, RTASE_RMS, tp->rx_buf_sz);
+
+ rtase_hw_set_rx_packet_filter(dev);
+}
+
+static void rtase_nic_enable(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 rcr = rtase_r16(tp, RTASE_RX_CONFIG_1);
+ u8 val;
+
+ rtase_w16(tp, RTASE_RX_CONFIG_1, rcr & ~RTASE_PCIE_RELOAD_EN);
+ rtase_w16(tp, RTASE_RX_CONFIG_1, rcr | RTASE_PCIE_RELOAD_EN);
+
+ val = rtase_r8(tp, RTASE_CHIP_CMD);
+ rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_TE | RTASE_RE);
+
+ val = rtase_r8(tp, RTASE_MISC);
+ rtase_w8(tp, RTASE_MISC, val & ~RTASE_RX_DV_GATE_EN);
+}
+
+static void rtase_enable_hw_interrupt(const struct rtase_private *tp)
+{
+ const struct rtase_int_vector *ivec = &tp->int_vector[0];
+ u32 i;
+
+ rtase_w32(tp, ivec->imr_addr, ivec->imr);
+
+ for (i = 1; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ rtase_w16(tp, ivec->imr_addr, ivec->imr);
+ }
+}
+
+static void rtase_hw_start(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+
+ rtase_nic_enable(dev);
+ rtase_enable_hw_interrupt(tp);
+}
+
+/* the interrupt handler does RXQ0 and TXQ0, TXQ4~7 interrutp status
+ */
+static irqreturn_t rtase_interrupt(int irq, void *dev_instance)
+{
+ const struct rtase_private *tp;
+ struct rtase_int_vector *ivec;
+ u32 status;
+
+ ivec = dev_instance;
+ tp = ivec->tp;
+ status = rtase_r32(tp, ivec->isr_addr);
+
+ rtase_w32(tp, ivec->imr_addr, 0x0);
+ rtase_w32(tp, ivec->isr_addr, status & ~RTASE_FOVW);
+
+ if (napi_schedule_prep(&ivec->napi))
+ __napi_schedule(&ivec->napi);
+
+ return IRQ_HANDLED;
+}
+
+/* the interrupt handler does RXQ1&TXQ1 or RXQ2&TXQ2 or RXQ3&TXQ3 interrupt
+ * status according to interrupt vector
+ */
+static irqreturn_t rtase_q_interrupt(int irq, void *dev_instance)
+{
+ const struct rtase_private *tp;
+ struct rtase_int_vector *ivec;
+ u16 status;
+
+ ivec = dev_instance;
+ tp = ivec->tp;
+ status = rtase_r16(tp, ivec->isr_addr);
+
+ rtase_w16(tp, ivec->imr_addr, 0x0);
+ rtase_w16(tp, ivec->isr_addr, status);
+
+ if (napi_schedule_prep(&ivec->napi))
+ __napi_schedule(&ivec->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int rtase_poll(struct napi_struct *napi, int budget)
+{
+ const struct rtase_int_vector *ivec;
+ const struct rtase_private *tp;
+ struct rtase_ring *ring;
+ int total_workdone = 0;
+
+ ivec = container_of(napi, struct rtase_int_vector, napi);
+ tp = ivec->tp;
+
+ list_for_each_entry(ring, &ivec->ring_list, ring_entry)
+ total_workdone += ring->ring_handler(ring, budget);
+
+ if (total_workdone >= budget)
+ return budget;
+
+ if (napi_complete_done(napi, total_workdone)) {
+ if (!ivec->index)
+ rtase_w32(tp, ivec->imr_addr, ivec->imr);
+ else
+ rtase_w16(tp, ivec->imr_addr, ivec->imr);
+ }
+
+ return total_workdone;
+}
+
+static int rtase_open(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ const struct pci_dev *pdev = tp->pdev;
+ struct rtase_int_vector *ivec;
+ u16 i = 0, j;
+ int ret;
+
+ ivec = &tp->int_vector[0];
+ tp->rx_buf_sz = RTASE_RX_BUF_SIZE;
+
+ ret = rtase_alloc_desc(tp);
+ if (ret)
+ return ret;
+
+ ret = rtase_init_ring(dev);
+ if (ret)
+ goto err_free_all_allocated_mem;
+
+ rtase_hw_config(dev);
+
+ if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
+ ret = request_irq(ivec->irq, rtase_interrupt, 0,
+ dev->name, ivec);
+ if (ret)
+ goto err_free_all_allocated_irq;
+
+ /* request other interrupts to handle multiqueue */
+ for (i = 1; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ snprintf(ivec->name, sizeof(ivec->name), "%s_int%i",
+ tp->dev->name, i);
+ ret = request_irq(ivec->irq, rtase_q_interrupt, 0,
+ ivec->name, ivec);
+ if (ret)
+ goto err_free_all_allocated_irq;
+ }
+ } else {
+ ret = request_irq(pdev->irq, rtase_interrupt, 0, dev->name,
+ ivec);
+ if (ret)
+ goto err_free_all_allocated_mem;
+ }
+
+ rtase_hw_start(dev);
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ napi_enable(&ivec->napi);
+ }
+
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+
+ return 0;
+
+err_free_all_allocated_irq:
+ for (j = 0; j < i; j++)
+ free_irq(tp->int_vector[j].irq, &tp->int_vector[j]);
+
+err_free_all_allocated_mem:
+ rtase_free_desc(tp);
+
+ return ret;
+}
+
+static void rtase_down(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_int_vector *ivec;
+ struct rtase_ring *ring, *tmp;
+ u32 i;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ napi_disable(&ivec->napi);
+ list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
+ ring_entry)
+ list_del(&ring->ring_entry);
+ }
+
+ netif_tx_disable(dev);
+
+ netif_carrier_off(dev);
+
+ rtase_hw_reset(dev);
+
+ rtase_tx_clear(tp);
+
+ rtase_rx_clear(tp);
+}
+
+static int rtase_close(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ const struct pci_dev *pdev = tp->pdev;
+ u32 i;
+
+ rtase_down(dev);
+
+ if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
+ for (i = 0; i < tp->int_nums; i++)
+ free_irq(tp->int_vector[i].irq, &tp->int_vector[i]);
+
+ } else {
+ free_irq(pdev->irq, &tp->int_vector[0]);
+ }
+
+ rtase_free_desc(tp);
+
+ return 0;
+}
+
+static u32 rtase_tx_vlan_tag(const struct rtase_private *tp,
+ const struct sk_buff *skb)
+{
+ return (skb_vlan_tag_present(skb)) ?
+ (RTASE_TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb))) : 0x00;
+}
+
+static u32 rtase_tx_csum(struct sk_buff *skb, const struct net_device *dev)
+{
+ u32 csum_cmd = 0;
+ u8 ip_protocol;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ csum_cmd = RTASE_TX_IPCS_C;
+ ip_protocol = ip_hdr(skb)->protocol;
+ break;
+
+ case htons(ETH_P_IPV6):
+ csum_cmd = RTASE_TX_IPV6F_C;
+ ip_protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+
+ default:
+ ip_protocol = IPPROTO_RAW;
+ break;
+ }
+
+ if (ip_protocol == IPPROTO_TCP)
+ csum_cmd |= RTASE_TX_TCPCS_C;
+ else if (ip_protocol == IPPROTO_UDP)
+ csum_cmd |= RTASE_TX_UDPCS_C;
+
+ csum_cmd |= u32_encode_bits(skb_transport_offset(skb),
+ RTASE_TCPHO_MASK);
+
+ return csum_cmd;
+}
+
+static int rtase_xmit_frags(struct rtase_ring *ring, struct sk_buff *skb,
+ u32 opts1, u32 opts2)
+{
+ const struct skb_shared_info *info = skb_shinfo(skb);
+ const struct rtase_private *tp = ring->ivec->tp;
+ const u8 nr_frags = info->nr_frags;
+ struct rtase_tx_desc *txd = NULL;
+ u32 cur_frag, entry;
+
+ entry = ring->cur_idx;
+ for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
+ const skb_frag_t *frag = &info->frags[cur_frag];
+ dma_addr_t mapping;
+ u32 status, len;
+ void *addr;
+
+ entry = (entry + 1) % RTASE_NUM_DESC;
+
+ txd = ring->desc + sizeof(struct rtase_tx_desc) * entry;
+ len = skb_frag_size(frag);
+ addr = skb_frag_address(frag);
+ mapping = dma_map_single(&tp->pdev->dev, addr, len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
+ if (unlikely(net_ratelimit()))
+ netdev_err(tp->dev,
+ "Failed to map TX fragments DMA!\n");
+
+ goto err_out;
+ }
+
+ if (((entry + 1) % RTASE_NUM_DESC) == 0)
+ status = (opts1 | len | RTASE_RING_END);
+ else
+ status = opts1 | len;
+
+ if (cur_frag == (nr_frags - 1)) {
+ ring->skbuff[entry] = skb;
+ status |= RTASE_TX_LAST_FRAG;
+ }
+
+ ring->mis.len[entry] = len;
+ txd->addr = cpu_to_le64(mapping);
+ txd->opts2 = cpu_to_le32(opts2);
+
+ /* make sure the operating fields have been updated */
+ dma_wmb();
+ txd->opts1 = cpu_to_le32(status);
+ }
+
+ return cur_frag;
+
+err_out:
+ rtase_tx_clear_range(ring, ring->cur_idx + 1, cur_frag);
+ return -EIO;
+}
+
+static netdev_tx_t rtase_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct rtase_private *tp = netdev_priv(dev);
+ u32 q_idx, entry, len, opts1, opts2;
+ struct netdev_queue *tx_queue;
+ bool stop_queue, door_bell;
+ u32 mss = shinfo->gso_size;
+ struct rtase_tx_desc *txd;
+ struct rtase_ring *ring;
+ dma_addr_t mapping;
+ int frags;
+
+ /* multiqueues */
+ q_idx = skb_get_queue_mapping(skb);
+ ring = &tp->tx_ring[q_idx];
+ tx_queue = netdev_get_tx_queue(dev, q_idx);
+
+ if (unlikely(!rtase_tx_avail(ring))) {
+ if (net_ratelimit())
+ netdev_err(dev,
+ "BUG! Tx Ring full when queue awake!\n");
+
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = ring->cur_idx % RTASE_NUM_DESC;
+ txd = ring->desc + sizeof(struct rtase_tx_desc) * entry;
+
+ opts1 = RTASE_DESC_OWN;
+ opts2 = rtase_tx_vlan_tag(tp, skb);
+
+ /* tcp segmentation offload (or tcp large send) */
+ if (mss) {
+ if (shinfo->gso_type & SKB_GSO_TCPV4) {
+ opts1 |= RTASE_GIANT_SEND_V4;
+ } else if (shinfo->gso_type & SKB_GSO_TCPV6) {
+ if (skb_cow_head(skb, 0))
+ goto err_dma_0;
+
+ tcp_v6_gso_csum_prep(skb);
+ opts1 |= RTASE_GIANT_SEND_V6;
+ } else {
+ WARN_ON_ONCE(1);
+ }
+
+ opts1 |= u32_encode_bits(skb_transport_offset(skb),
+ RTASE_TCPHO_MASK);
+ opts2 |= u32_encode_bits(mss, RTASE_MSS_MASK);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ opts2 |= rtase_tx_csum(skb, dev);
+ }
+
+ frags = rtase_xmit_frags(ring, skb, opts1, opts2);
+ if (unlikely(frags < 0))
+ goto err_dma_0;
+
+ if (frags) {
+ len = skb_headlen(skb);
+ opts1 |= RTASE_TX_FIRST_FRAG;
+ } else {
+ len = skb->len;
+ ring->skbuff[entry] = skb;
+ opts1 |= RTASE_TX_FIRST_FRAG | RTASE_TX_LAST_FRAG;
+ }
+
+ if (((entry + 1) % RTASE_NUM_DESC) == 0)
+ opts1 |= (len | RTASE_RING_END);
+ else
+ opts1 |= len;
+
+ mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
+ if (unlikely(net_ratelimit()))
+ netdev_err(dev, "Failed to map TX DMA!\n");
+
+ goto err_dma_1;
+ }
+
+ ring->mis.len[entry] = len;
+ txd->addr = cpu_to_le64(mapping);
+ txd->opts2 = cpu_to_le32(opts2);
+ txd->opts1 = cpu_to_le32(opts1 & ~RTASE_DESC_OWN);
+
+ /* make sure the operating fields have been updated */
+ dma_wmb();
+
+ door_bell = __netdev_tx_sent_queue(tx_queue, skb->len,
+ netdev_xmit_more());
+
+ txd->opts1 = cpu_to_le32(opts1);
+
+ skb_tx_timestamp(skb);
+
+ /* tx needs to see descriptor changes before updated cur_idx */
+ smp_wmb();
+
+ WRITE_ONCE(ring->cur_idx, ring->cur_idx + frags + 1);
+
+ stop_queue = !netif_subqueue_maybe_stop(dev, ring->index,
+ rtase_tx_avail(ring),
+ RTASE_TX_STOP_THRS,
+ RTASE_TX_START_THRS);
+
+ if (door_bell || stop_queue)
+ rtase_w8(tp, RTASE_TPPOLL, BIT(ring->index));
+
+ return NETDEV_TX_OK;
+
+err_dma_1:
+ ring->skbuff[entry] = NULL;
+ rtase_tx_clear_range(ring, ring->cur_idx + 1, frags);
+
+err_dma_0:
+ tp->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void rtase_set_rx_mode(struct net_device *dev)
+{
+ rtase_hw_set_rx_packet_filter(dev);
+}
+
+static void rtase_enable_eem_write(const struct rtase_private *tp)
+{
+ u8 val;
+
+ val = rtase_r8(tp, RTASE_EEM);
+ rtase_w8(tp, RTASE_EEM, val | RTASE_EEM_UNLOCK);
+}
+
+static void rtase_disable_eem_write(const struct rtase_private *tp)
+{
+ u8 val;
+
+ val = rtase_r8(tp, RTASE_EEM);
+ rtase_w8(tp, RTASE_EEM, val & ~RTASE_EEM_UNLOCK);
+}
+
+static void rtase_rar_set(const struct rtase_private *tp, const u8 *addr)
+{
+ u32 rar_low, rar_high;
+
+ rar_low = (u32)addr[0] | ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24);
+
+ rar_high = (u32)addr[4] | ((u32)addr[5] << 8);
+
+ rtase_enable_eem_write(tp);
+ rtase_w32(tp, RTASE_MAC0, rar_low);
+ rtase_w32(tp, RTASE_MAC4, rar_high);
+ rtase_disable_eem_write(tp);
+ rtase_w16(tp, RTASE_LBK_CTRL, RTASE_LBK_ATLD | RTASE_LBK_CLR);
+}
+
+static int rtase_set_mac_address(struct net_device *dev, void *p)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (ret)
+ return ret;
+
+ rtase_rar_set(tp, dev->dev_addr);
+
+ return 0;
+}
+
+static int rtase_change_mtu(struct net_device *dev, int new_mtu)
+{
+ dev->mtu = new_mtu;
+
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static void rtase_wait_for_quiescence(const struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_int_vector *ivec;
+ u32 i;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ synchronize_irq(ivec->irq);
+ /* wait for any pending NAPI task to complete */
+ napi_disable(&ivec->napi);
+ }
+
+ rtase_irq_dis_and_clear(tp);
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ napi_enable(&ivec->napi);
+ }
+}
+
+static void rtase_sw_reset(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ int ret;
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ rtase_hw_reset(dev);
+
+ /* let's wait a bit while any (async) irq lands on */
+ rtase_wait_for_quiescence(dev);
+ rtase_tx_clear(tp);
+ rtase_rx_clear(tp);
+
+ ret = rtase_init_ring(dev);
+ if (ret) {
+ netdev_err(dev, "unable to init ring\n");
+ rtase_free_desc(tp);
+ return;
+ }
+
+ rtase_hw_config(dev);
+ /* always link, so start to transmit & receive */
+ rtase_hw_start(dev);
+
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+}
+
+static void rtase_dump_tally_counter(const struct rtase_private *tp)
+{
+ dma_addr_t paddr = tp->tally_paddr;
+ u32 cmd = lower_32_bits(paddr);
+ u32 val;
+ int err;
+
+ rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(paddr));
+ rtase_w32(tp, RTASE_DTCCR0, cmd);
+ rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_DUMP);
+
+ err = read_poll_timeout(rtase_r32, val, !(val & RTASE_COUNTER_DUMP),
+ 10, 250, false, tp, RTASE_DTCCR0);
+
+ if (err == -ETIMEDOUT)
+ netdev_err(tp->dev, "error occurred in dump tally counter\n");
+}
+
+static void rtase_dump_state(const struct net_device *dev)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ int max_reg_size = RTASE_PCI_REGS_SIZE;
+ const struct rtase_counters *counters;
+ const struct rtase_ring *ring;
+ u32 dword_rd;
+ int n = 0;
+
+ ring = &tp->tx_ring[0];
+ netdev_err(dev, "Tx descriptor info:\n");
+ netdev_err(dev, "Tx curIdx = 0x%x\n", ring->cur_idx);
+ netdev_err(dev, "Tx dirtyIdx = 0x%x\n", ring->dirty_idx);
+ netdev_err(dev, "Tx phyAddr = %pad\n", &ring->phy_addr);
+
+ ring = &tp->rx_ring[0];
+ netdev_err(dev, "Rx descriptor info:\n");
+ netdev_err(dev, "Rx curIdx = 0x%x\n", ring->cur_idx);
+ netdev_err(dev, "Rx dirtyIdx = 0x%x\n", ring->dirty_idx);
+ netdev_err(dev, "Rx phyAddr = %pad\n", &ring->phy_addr);
+
+ netdev_err(dev, "Device Registers:\n");
+ netdev_err(dev, "Chip Command = 0x%02x\n",
+ rtase_r8(tp, RTASE_CHIP_CMD));
+ netdev_err(dev, "IMR = %08x\n", rtase_r32(tp, RTASE_IMR0));
+ netdev_err(dev, "ISR = %08x\n", rtase_r32(tp, RTASE_ISR0));
+ netdev_err(dev, "Boot Ctrl Reg(0xE004) = %04x\n",
+ rtase_r16(tp, RTASE_BOOT_CTL));
+ netdev_err(dev, "EPHY ISR(0xE014) = %04x\n",
+ rtase_r16(tp, RTASE_EPHY_ISR));
+ netdev_err(dev, "EPHY IMR(0xE016) = %04x\n",
+ rtase_r16(tp, RTASE_EPHY_IMR));
+ netdev_err(dev, "CLKSW SET REG(0xE018) = %04x\n",
+ rtase_r16(tp, RTASE_CLKSW_SET));
+
+ netdev_err(dev, "Dump PCI Registers:\n");
+
+ while (n < max_reg_size) {
+ if ((n % RTASE_DWORD_MOD) == 0)
+ netdev_err(tp->dev, "0x%03x:\n", n);
+
+ pci_read_config_dword(tp->pdev, n, &dword_rd);
+ netdev_err(tp->dev, "%08x\n", dword_rd);
+ n += 4;
+ }
+
+ netdev_err(dev, "Dump tally counter:\n");
+ counters = tp->tally_vaddr;
+ rtase_dump_tally_counter(tp);
+
+ netdev_err(dev, "tx_packets %lld\n",
+ le64_to_cpu(counters->tx_packets));
+ netdev_err(dev, "rx_packets %lld\n",
+ le64_to_cpu(counters->rx_packets));
+ netdev_err(dev, "tx_errors %lld\n",
+ le64_to_cpu(counters->tx_errors));
+ netdev_err(dev, "rx_errors %d\n",
+ le32_to_cpu(counters->rx_errors));
+ netdev_err(dev, "rx_missed %d\n",
+ le16_to_cpu(counters->rx_missed));
+ netdev_err(dev, "align_errors %d\n",
+ le16_to_cpu(counters->align_errors));
+ netdev_err(dev, "tx_one_collision %d\n",
+ le32_to_cpu(counters->tx_one_collision));
+ netdev_err(dev, "tx_multi_collision %d\n",
+ le32_to_cpu(counters->tx_multi_collision));
+ netdev_err(dev, "rx_unicast %lld\n",
+ le64_to_cpu(counters->rx_unicast));
+ netdev_err(dev, "rx_broadcast %lld\n",
+ le64_to_cpu(counters->rx_broadcast));
+ netdev_err(dev, "rx_multicast %d\n",
+ le32_to_cpu(counters->rx_multicast));
+ netdev_err(dev, "tx_aborted %d\n",
+ le16_to_cpu(counters->tx_aborted));
+ netdev_err(dev, "tx_underrun %d\n",
+ le16_to_cpu(counters->tx_underrun));
+}
+
+static void rtase_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ rtase_dump_state(dev);
+ rtase_sw_reset(dev);
+}
+
+static void rtase_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ const struct rtase_counters *counters;
+
+ counters = tp->tally_vaddr;
+
+ dev_fetch_sw_netstats(stats, dev->tstats);
+
+ /* fetch additional counter values missing in stats collected by driver
+ * from tally counter
+ */
+ rtase_dump_tally_counter(tp);
+ stats->rx_errors = tp->stats.rx_errors;
+ stats->tx_errors = le64_to_cpu(counters->tx_errors);
+ stats->rx_dropped = tp->stats.rx_dropped;
+ stats->tx_dropped = tp->stats.tx_dropped;
+ stats->multicast = tp->stats.multicast;
+ stats->rx_length_errors = tp->stats.rx_length_errors;
+}
+
+static netdev_features_t rtase_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t features_fix = features;
+
+ /* not support TSO for jumbo frames */
+ if (dev->mtu > ETH_DATA_LEN)
+ features_fix &= ~NETIF_F_ALL_TSO;
+
+ return features_fix;
+}
+
+static int rtase_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t features_set = features;
+
+ features_set &= NETIF_F_RXALL | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (features_set ^ dev->features)
+ rtase_hw_set_features(dev, features_set);
+
+ return 0;
+}
+
+static const struct net_device_ops rtase_netdev_ops = {
+ .ndo_open = rtase_open,
+ .ndo_stop = rtase_close,
+ .ndo_start_xmit = rtase_start_xmit,
+ .ndo_set_rx_mode = rtase_set_rx_mode,
+ .ndo_set_mac_address = rtase_set_mac_address,
+ .ndo_change_mtu = rtase_change_mtu,
+ .ndo_tx_timeout = rtase_tx_timeout,
+ .ndo_get_stats64 = rtase_get_stats64,
+ .ndo_fix_features = rtase_fix_features,
+ .ndo_set_features = rtase_set_features,
+};
+
+static void rtase_get_mac_address(struct net_device *dev)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ u8 mac_addr[ETH_ALEN] __aligned(2) = {};
+ u32 i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = rtase_r8(tp, RTASE_MAC0 + i);
+
+ if (!is_valid_ether_addr(mac_addr)) {
+ eth_hw_addr_random(dev);
+ netdev_warn(dev, "Random ether addr %pM\n", dev->dev_addr);
+ } else {
+ eth_hw_addr_set(dev, mac_addr);
+ ether_addr_copy(dev->perm_addr, dev->dev_addr);
+ }
+
+ rtase_rar_set(tp, dev->dev_addr);
+}
+
+static int rtase_get_settings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ u32 supported = SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ cmd->base.speed = SPEED_5000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_MII;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static void rtase_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 value = rtase_r16(tp, RTASE_CPLUS_CMD);
+
+ pause->autoneg = AUTONEG_DISABLE;
+ pause->tx_pause = !!(value & RTASE_FORCE_TXFLOW_EN);
+ pause->rx_pause = !!(value & RTASE_FORCE_RXFLOW_EN);
+}
+
+static int rtase_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+ u16 value = rtase_r16(tp, RTASE_CPLUS_CMD);
+
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ value &= ~(RTASE_FORCE_TXFLOW_EN | RTASE_FORCE_RXFLOW_EN);
+
+ if (pause->tx_pause)
+ value |= RTASE_FORCE_TXFLOW_EN;
+
+ if (pause->rx_pause)
+ value |= RTASE_FORCE_RXFLOW_EN;
+
+ rtase_w16(tp, RTASE_CPLUS_CMD, value);
+ return 0;
+}
+
+static void rtase_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *stats)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+ const struct rtase_counters *counters;
+
+ counters = tp->tally_vaddr;
+
+ rtase_dump_tally_counter(tp);
+
+ stats->FramesTransmittedOK = le64_to_cpu(counters->tx_packets);
+ stats->FramesReceivedOK = le64_to_cpu(counters->rx_packets);
+ stats->FramesLostDueToIntMACXmitError =
+ le64_to_cpu(counters->tx_errors);
+ stats->BroadcastFramesReceivedOK = le64_to_cpu(counters->rx_broadcast);
+}
+
+static const struct ethtool_ops rtase_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = rtase_get_settings,
+ .get_pauseparam = rtase_get_pauseparam,
+ .set_pauseparam = rtase_set_pauseparam,
+ .get_eth_mac_stats = rtase_get_eth_mac_stats,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static void rtase_init_netdev_ops(struct net_device *dev)
+{
+ dev->netdev_ops = &rtase_netdev_ops;
+ dev->ethtool_ops = &rtase_ethtool_ops;
+}
+
+static void rtase_reset_interrupt(struct pci_dev *pdev,
+ const struct rtase_private *tp)
+{
+ if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED)
+ pci_disable_msix(pdev);
+ else
+ pci_disable_msi(pdev);
+}
+
+static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp)
+{
+ int ret, irq;
+ u16 i;
+
+ memset(tp->msix_entry, 0x0, RTASE_NUM_MSIX *
+ sizeof(struct msix_entry));
+
+ for (i = 0; i < RTASE_NUM_MSIX; i++)
+ tp->msix_entry[i].entry = i;
+
+ ret = pci_enable_msix_exact(pdev, tp->msix_entry, tp->int_nums);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ irq = pci_irq_vector(pdev, i);
+ if (!irq) {
+ pci_disable_msix(pdev);
+ return irq;
+ }
+
+ tp->int_vector[i].irq = irq;
+ }
+
+ return 0;
+}
+
+static int rtase_alloc_interrupt(struct pci_dev *pdev,
+ struct rtase_private *tp)
+{
+ int ret;
+
+ ret = rtase_alloc_msix(pdev, tp);
+ if (ret) {
+ ret = pci_enable_msi(pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to alloc interrupt.(MSI)\n");
+ return ret;
+ }
+
+ tp->sw_flag |= RTASE_SWF_MSI_ENABLED;
+ } else {
+ tp->sw_flag |= RTASE_SWF_MSIX_ENABLED;
+ }
+
+ return 0;
+}
+
+static void rtase_init_hardware(const struct rtase_private *tp)
+{
+ u16 i;
+
+ for (i = 0; i < RTASE_VLAN_FILTER_ENTRY_NUM; i++)
+ rtase_w32(tp, RTASE_VLAN_ENTRY_0 + i * 4, 0);
+}
+
+static void rtase_init_int_vector(struct rtase_private *tp)
+{
+ u16 i;
+
+ /* interrupt vector 0 */
+ tp->int_vector[0].tp = tp;
+ tp->int_vector[0].index = 0;
+ tp->int_vector[0].imr_addr = RTASE_IMR0;
+ tp->int_vector[0].isr_addr = RTASE_ISR0;
+ tp->int_vector[0].imr = RTASE_ROK | RTASE_RDU | RTASE_TOK |
+ RTASE_TOK4 | RTASE_TOK5 | RTASE_TOK6 |
+ RTASE_TOK7;
+ tp->int_vector[0].poll = rtase_poll;
+
+ memset(tp->int_vector[0].name, 0x0, sizeof(tp->int_vector[0].name));
+ INIT_LIST_HEAD(&tp->int_vector[0].ring_list);
+
+ netif_napi_add(tp->dev, &tp->int_vector[0].napi,
+ tp->int_vector[0].poll);
+
+ /* interrupt vector 1 ~ 3 */
+ for (i = 1; i < tp->int_nums; i++) {
+ tp->int_vector[i].tp = tp;
+ tp->int_vector[i].index = i;
+ tp->int_vector[i].imr_addr = RTASE_IMR1 + (i - 1) * 4;
+ tp->int_vector[i].isr_addr = RTASE_ISR1 + (i - 1) * 4;
+ tp->int_vector[i].imr = RTASE_Q_ROK | RTASE_Q_RDU |
+ RTASE_Q_TOK;
+ tp->int_vector[i].poll = rtase_poll;
+
+ memset(tp->int_vector[i].name, 0x0,
+ sizeof(tp->int_vector[0].name));
+ INIT_LIST_HEAD(&tp->int_vector[i].ring_list);
+
+ netif_napi_add(tp->dev, &tp->int_vector[i].napi,
+ tp->int_vector[i].poll);
+ }
+}
+
+static u16 rtase_calc_time_mitigation(u32 time_us)
+{
+ u8 msb, time_count, time_unit;
+ u16 int_miti;
+
+ time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME);
+
+ msb = fls(time_us);
+ if (msb >= RTASE_MITI_COUNT_BIT_NUM) {
+ time_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
+ time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM);
+ } else {
+ time_unit = 0;
+ time_count = time_us;
+ }
+
+ int_miti = u16_encode_bits(time_count, RTASE_MITI_TIME_COUNT_MASK) |
+ u16_encode_bits(time_unit, RTASE_MITI_TIME_UNIT_MASK);
+
+ return int_miti;
+}
+
+static u16 rtase_calc_packet_num_mitigation(u16 pkt_num)
+{
+ u8 msb, pkt_num_count, pkt_num_unit;
+ u16 int_miti;
+
+ pkt_num = min_t(int, pkt_num, RTASE_MITI_MAX_PKT_NUM);
+
+ if (pkt_num > 60) {
+ pkt_num_unit = RTASE_MITI_MAX_PKT_NUM_IDX;
+ pkt_num_count = pkt_num / RTASE_MITI_MAX_PKT_NUM_UNIT;
+ } else {
+ msb = fls(pkt_num);
+ if (msb >= RTASE_MITI_COUNT_BIT_NUM) {
+ pkt_num_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
+ pkt_num_count = pkt_num >> (msb -
+ RTASE_MITI_COUNT_BIT_NUM);
+ } else {
+ pkt_num_unit = 0;
+ pkt_num_count = pkt_num;
+ }
+ }
+
+ int_miti = u16_encode_bits(pkt_num_count,
+ RTASE_MITI_PKT_NUM_COUNT_MASK) |
+ u16_encode_bits(pkt_num_unit,
+ RTASE_MITI_PKT_NUM_UNIT_MASK);
+
+ return int_miti;
+}
+
+static void rtase_init_software_variable(struct pci_dev *pdev,
+ struct rtase_private *tp)
+{
+ u16 int_miti;
+
+ tp->tx_queue_ctrl = RTASE_TXQ_CTRL;
+ tp->func_tx_queue_num = RTASE_FUNC_TXQ_NUM;
+ tp->func_rx_queue_num = RTASE_FUNC_RXQ_NUM;
+ tp->int_nums = RTASE_INTERRUPT_NUM;
+
+ int_miti = rtase_calc_time_mitigation(RTASE_MITI_DEFAULT_TIME) |
+ rtase_calc_packet_num_mitigation(RTASE_MITI_DEFAULT_PKT_NUM);
+ tp->tx_int_mit = int_miti;
+ tp->rx_int_mit = int_miti;
+
+ tp->sw_flag = 0;
+
+ rtase_init_int_vector(tp);
+
+ /* MTU range: 60 - hw-specific max */
+ tp->dev->min_mtu = ETH_ZLEN;
+ tp->dev->max_mtu = RTASE_MAX_JUMBO_SIZE;
+}
+
+static bool rtase_check_mac_version_valid(struct rtase_private *tp)
+{
+ u32 hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK;
+ bool known_ver = false;
+
+ switch (hw_ver) {
+ case 0x00800000:
+ case 0x04000000:
+ case 0x04800000:
+ known_ver = true;
+ break;
+ }
+
+ return known_ver;
+}
+
+static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
+ void __iomem **ioaddr_out)
+{
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ int ret = -ENOMEM;
+
+ /* dev zeroed in alloc_etherdev */
+ dev = alloc_etherdev_mq(sizeof(struct rtase_private),
+ RTASE_FUNC_TXQ_NUM);
+ if (!dev)
+ goto err_out;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0)
+ goto err_out_free_dev;
+
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ ret = -ENODEV;
+ goto err_out_disable;
+ }
+
+ /* check for weird/broken PCI region reporting */
+ if (pci_resource_len(pdev, 2) < RTASE_REGS_SIZE) {
+ ret = -ENODEV;
+ goto err_out_disable;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret < 0)
+ goto err_out_disable;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "no usable dma addressing method\n");
+ goto err_out_free_res;
+ }
+
+ pci_set_master(pdev);
+
+ /* ioremap MMIO region */
+ ioaddr = ioremap(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ if (!ioaddr) {
+ ret = -EIO;
+ goto err_out_free_res;
+ }
+
+ *ioaddr_out = ioaddr;
+ *dev_out = dev;
+
+ return ret;
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable:
+ pci_disable_device(pdev);
+
+err_out_free_dev:
+ free_netdev(dev);
+
+err_out:
+ *ioaddr_out = NULL;
+ *dev_out = NULL;
+
+ return ret;
+}
+
+static void rtase_release_board(struct pci_dev *pdev, struct net_device *dev,
+ void __iomem *ioaddr)
+{
+ const struct rtase_private *tp = netdev_priv(dev);
+
+ rtase_rar_set(tp, tp->dev->perm_addr);
+ iounmap(ioaddr);
+
+ if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED)
+ pci_disable_msix(pdev);
+ else
+ pci_disable_msi(pdev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ free_netdev(dev);
+}
+
+static int rtase_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct rtase_int_vector *ivec;
+ void __iomem *ioaddr = NULL;
+ struct rtase_private *tp;
+ int ret, i;
+
+ if (!pdev->is_physfn && pdev->is_virtfn) {
+ dev_err(&pdev->dev,
+ "This module does not support a virtual function.");
+ return -EINVAL;
+ }
+
+ dev_dbg(&pdev->dev, "Automotive Switch Ethernet driver loaded\n");
+
+ ret = rtase_init_board(pdev, &dev, &ioaddr);
+ if (ret != 0)
+ return ret;
+
+ tp = netdev_priv(dev);
+ tp->mmio_addr = ioaddr;
+ tp->dev = dev;
+ tp->pdev = pdev;
+
+ /* identify chip attached to board */
+ if (!rtase_check_mac_version_valid(tp))
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "unknown chip version, contact rtase maintainers (see MAINTAINERS file)\n");
+
+ rtase_init_software_variable(pdev, tp);
+ rtase_init_hardware(tp);
+
+ ret = rtase_alloc_interrupt(pdev, tp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n");
+ goto err_out_1;
+ }
+
+ rtase_init_netdev_ops(dev);
+
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_IP_CSUM | NETIF_F_HIGHDMA |
+ NETIF_F_RXCSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO6;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_TSO | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_RXALL | NETIF_F_RXFCS |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HIGHDMA;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netif_set_tso_max_size(dev, RTASE_LSO_64K);
+ netif_set_tso_max_segs(dev, RTASE_NIC_MAX_PHYS_BUF_COUNT_LSO2);
+
+ rtase_get_mac_address(dev);
+
+ tp->tally_vaddr = dma_alloc_coherent(&pdev->dev,
+ sizeof(*tp->tally_vaddr),
+ &tp->tally_paddr,
+ GFP_KERNEL);
+ if (!tp->tally_vaddr) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ rtase_tally_counter_clear(tp);
+
+ pci_set_drvdata(pdev, dev);
+
+ netif_carrier_off(dev);
+
+ ret = register_netdev(dev);
+ if (ret != 0)
+ goto err_out;
+
+ netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq);
+
+ return 0;
+
+err_out:
+ if (tp->tally_vaddr) {
+ dma_free_coherent(&pdev->dev,
+ sizeof(*tp->tally_vaddr),
+ tp->tally_vaddr,
+ tp->tally_paddr);
+
+ tp->tally_vaddr = NULL;
+ }
+
+err_out_1:
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ netif_napi_del(&ivec->napi);
+ }
+
+ rtase_release_board(pdev, dev, ioaddr);
+
+ return ret;
+}
+
+static void rtase_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_int_vector *ivec;
+ u32 i;
+
+ unregister_netdev(dev);
+
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ netif_napi_del(&ivec->napi);
+ }
+
+ rtase_reset_interrupt(pdev, tp);
+ if (tp->tally_vaddr) {
+ dma_free_coherent(&pdev->dev,
+ sizeof(*tp->tally_vaddr),
+ tp->tally_vaddr,
+ tp->tally_paddr);
+ tp->tally_vaddr = NULL;
+ }
+
+ rtase_release_board(pdev, dev, tp->mmio_addr);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void rtase_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ const struct rtase_private *tp;
+
+ tp = netdev_priv(dev);
+
+ if (netif_running(dev))
+ rtase_close(dev);
+
+ rtase_reset_interrupt(pdev, tp);
+}
+
+static int rtase_suspend(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ rtase_hw_reset(dev);
+ }
+
+ return 0;
+}
+
+static int rtase_resume(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct rtase_private *tp = netdev_priv(dev);
+ int ret;
+
+ /* restore last modified mac address */
+ rtase_rar_set(tp, dev->dev_addr);
+
+ if (!netif_running(dev))
+ goto out;
+
+ rtase_wait_for_quiescence(dev);
+
+ rtase_tx_clear(tp);
+ rtase_rx_clear(tp);
+
+ ret = rtase_init_ring(dev);
+ if (ret) {
+ netdev_err(dev, "unable to init ring\n");
+ rtase_free_desc(tp);
+ return -ENOMEM;
+ }
+
+ rtase_hw_config(dev);
+ /* always link, so start to transmit & receive */
+ rtase_hw_start(dev);
+
+ netif_device_attach(dev);
+out:
+
+ return 0;
+}
+
+static const struct dev_pm_ops rtase_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(rtase_suspend, rtase_resume)
+};
+
+static struct pci_driver rtase_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rtase_pci_tbl,
+ .probe = rtase_init_one,
+ .remove = rtase_remove_one,
+ .shutdown = rtase_shutdown,
+ .driver.pm = pm_ptr(&rtase_pm_ops),
+};
+
+module_pci_driver(rtase_pci_driver);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c02fb296bf7d..c7ec23688d56 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1744,8 +1744,6 @@ static int ravb_get_ts_info(struct net_device *ndev,
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -1756,6 +1754,8 @@ static int ravb_get_ts_info(struct net_device *ndev,
(1 << HWTSTAMP_FILTER_ALL);
if (hw_info->gptp || hw_info->ccc_gac)
info->phc_index = ptp_clock_index(priv->ptp.clock);
+ else
+ info->phc_index = 0;
return 0;
}
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index ff50e20856ec..b80aa27a7214 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1815,8 +1815,6 @@ static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts
info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
index 0e6cea42f007..f9f63c61d792 100644
--- a/drivers/net/ethernet/renesas/rtsn.c
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -1219,8 +1219,6 @@ static int rtsn_get_ts_info(struct net_device *ndev,
info->phc_index = ptp_clock_index(priv->ptp_priv->clock);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index e097ce3e69ea..84fa911c78db 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2575,7 +2575,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx);
rocker_carrier_init(rocker_port);
- dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
+ dev->features |= NETIF_F_SG;
+ dev->netns_local = true;
/* MTU range: 68 - 9000 */
dev->min_mtu = ROCKER_PORT_MIN_MTU;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 7d69302ffa0a..de131fc5fa0b 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4302,3 +4302,130 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.sensor_event = efx_mcdi_sensor_event,
.rx_recycle_ring_size = efx_ef10_recycle_ring_size,
};
+
+const struct efx_nic_type efx_x4_nic_type = {
+ .is_vf = false,
+ .mem_bar = efx_ef10_pf_mem_bar,
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe_pf,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_ef10_fini_nic,
+ .map_reset_reason = efx_ef10_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_ef10_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_fini_dmaq,
+ .prepare_flr = efx_ef10_prepare_flr,
+ .finish_flr = efx_port_dummy_op_void,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats_pf,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol,
+ .set_wol = efx_ef10_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ .get_fec_stats = efx_ef10_get_fec_stats,
+ .test_chip = efx_ef10_test_chip,
+ .test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_write = efx_ef10_tx_write,
+ .tx_limit_len = efx_ef10_tx_limit_len,
+ .tx_enqueue = __efx_enqueue_skb,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .rx_packet = __efx_rx_packet,
+ .ev_probe = efx_mcdi_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_ef10_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time,
+ .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+ .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
+ .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
+ .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
+ .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
+#ifdef CONFIG_SFC_SRIOV
+ /* currently set to the VF versions of these functions
+ * because SRIOV will be reimplemented later.
+ */
+ .vswitching_probe = efx_ef10_vswitching_probe_vf,
+ .vswitching_restore = efx_ef10_vswitching_restore_vf,
+ .vswitching_remove = efx_ef10_vswitching_remove_vf,
+#endif
+ .get_mac_address = efx_ef10_get_mac_address_pf,
+ .set_mac_address = efx_ef10_set_mac_address,
+ .tso_versions = efx_ef10_tso_versions,
+
+ .get_phys_port_id = efx_ef10_get_phys_port_id,
+ .revision = EFX_REV_X4,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .option_descriptors = true,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = EF10_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
+ .check_caps = ef10_check_caps,
+ .print_additional_fwver = efx_ef10_print_additional_fwver,
+ .sensor_event = efx_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_ef10_recycle_ring_size,
+};
+
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 896ffca4aee2..5c2551369812 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -37,7 +37,6 @@ ef100_ethtool_get_ringparam(struct net_device *net_dev,
/* Ethtool options available
*/
const struct ethtool_ops ef100_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.get_drvinfo = efx_ethtool_get_drvinfo,
.get_msglevel = efx_ethtool_get_msglevel,
.set_msglevel = efx_ethtool_set_msglevel,
@@ -59,6 +58,7 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_per_ctx_key = true,
.rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c
index 0b3083ef0ead..e923e1796369 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.c
+++ b/drivers/net/ethernet/sfc/ef100_rep.c
@@ -233,8 +233,8 @@ static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
net_dev->min_mtu = EFX_MIN_MTU;
net_dev->max_mtu = EFX_MAX_MTU;
- net_dev->features |= NETIF_F_LLTX;
- net_dev->hw_features |= NETIF_F_LLTX;
+ net_dev->lltx = true;
+
return efv;
fail1:
free_netdev(net_dev);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 6f1a01ded7d4..36b3b57e2055 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -821,6 +821,10 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0c03), /* X4 PF (FF/LL) */
+ .driver_data = (unsigned long)&efx_x4_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x2c03), /* X4 PF (FF only) */
+ .driver_data = (unsigned long)&efx_x4_nic_type},
{0} /* end of list */
};
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 7c887160e2ef..bb1930818beb 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -230,17 +230,11 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
- /* Software capabilities */
- ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE);
- ts_info->phc_index = -1;
-
efx_ptp_get_ts_info(efx, ts_info);
return 0;
}
const struct ethtool_ops efx_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -268,6 +262,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_per_ctx_key = true,
.rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 1db64fc6e909..9fa5c4c713ab 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -211,4 +211,6 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
extern const struct efx_nic_type efx_hunt_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
+extern const struct efx_nic_type efx_x4_nic_type;
+
#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index 466df5348b29..7ec4ac7b7ff5 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -21,6 +21,7 @@ enum {
*/
EFX_REV_HUNT_A0 = 4,
EFX_REV_EF100 = 5,
+ EFX_REV_X4 = 6,
};
static inline int efx_nic_rev(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 6fd2fdbaa418..aaacdcfa54ae 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -884,7 +884,7 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
- timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
+ timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND);
timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
/* Ignore seconds */
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index cf195162e270..a0966f879664 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -725,7 +725,6 @@ void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method)
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
- mutex_lock(&efx->rss_lock);
efx->type->fini(efx);
}
@@ -786,9 +785,6 @@ int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
" VFs may not function\n", rc);
#endif
- if (efx->type->rx_restore_rss_contexts)
- efx->type->rx_restore_rss_contexts(efx);
- mutex_unlock(&efx->rss_lock);
efx->type->filter_table_restore(efx);
up_write(&efx->filter_sem);
if (efx->type->sriov_reset)
@@ -806,7 +802,6 @@ int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
fail:
efx->port_initialized = false;
- mutex_unlock(&efx->rss_lock);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
@@ -1016,9 +1011,7 @@ int efx_siena_init_struct(struct efx_nic *efx,
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
efx->rx_packet_ts_offset =
efx->type->rx_ts_offset - efx->type->rx_prefix_size;
- INIT_LIST_HEAD(&efx->rss_context.list);
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- mutex_init(&efx->rss_lock);
efx->vport_id = EVB_PORT_ID_ASSIGNED;
spin_lock_init(&efx->stats_lock);
efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index 4c182d4edfc2..c5ad84db9613 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -230,17 +230,11 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
- /* Software capabilities */
- ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE);
- ts_info->phc_index = -1;
-
efx_siena_ptp_get_ts_info(efx, ts_info);
return 0;
}
const struct ethtool_ops efx_siena_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
index 5f0a8127e967..075fef64de68 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -820,27 +820,16 @@ int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
return 0;
case ETHTOOL_GRXFH: {
- struct efx_rss_context *ctx = &efx->rss_context;
__u64 data;
- mutex_lock(&efx->rss_lock);
- if (info->flow_type & FLOW_RSS && info->rss_context) {
- ctx = efx_siena_find_rss_context_entry(efx,
- info->rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- }
-
data = 0;
- if (!efx_rss_active(ctx)) /* No RSS */
- goto out_setdata_unlock;
+ if (!efx_rss_active(&efx->rss_context)) /* No RSS */
+ goto out_setdata;
- switch (info->flow_type & ~FLOW_RSS) {
+ switch (info->flow_type) {
case UDP_V4_FLOW:
case UDP_V6_FLOW:
- if (ctx->rx_hash_udp_4tuple)
+ if (efx->rss_context.rx_hash_udp_4tuple)
data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
RXH_IP_SRC | RXH_IP_DST);
else
@@ -862,10 +851,8 @@ int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
default:
break;
}
-out_setdata_unlock:
+out_setdata:
info->data = data;
-out_unlock:
- mutex_unlock(&efx->rss_lock);
return rc;
}
@@ -1164,47 +1151,12 @@ u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev)
return efx->type->rx_hash_key_size;
}
-static int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev,
- struct ethtool_rxfh_param *rxfh)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_rss_context *ctx;
- int rc = 0;
-
- if (!efx->type->rx_pull_rss_context_config)
- return -EOPNOTSUPP;
-
- mutex_lock(&efx->rss_lock);
- ctx = efx_siena_find_rss_context_entry(efx, rxfh->rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- rc = efx->type->rx_pull_rss_context_config(efx, ctx);
- if (rc)
- goto out_unlock;
-
- rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (rxfh->indir)
- memcpy(rxfh->indir, ctx->rx_indir_table,
- sizeof(ctx->rx_indir_table));
- if (rxfh->key)
- memcpy(rxfh->key, ctx->rx_hash_key,
- efx->type->rx_hash_key_size);
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
-}
-
int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
- if (rxfh->rss_context)
- return efx_siena_ethtool_get_rxfh_context(net_dev, rxfh);
-
rc = efx->type->rx_pull_rss_config(efx);
if (rc)
return rc;
@@ -1219,70 +1171,6 @@ int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
return 0;
}
-static int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
- struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- u32 *rss_context = &rxfh->rss_context;
- struct efx_rss_context *ctx;
- u32 *indir = rxfh->indir;
- bool allocated = false;
- u8 *key = rxfh->key;
- int rc;
-
- if (!efx->type->rx_push_rss_context_config)
- return -EOPNOTSUPP;
-
- mutex_lock(&efx->rss_lock);
-
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- if (rxfh->rss_delete) {
- /* alloc + delete == Nothing to do */
- rc = -EINVAL;
- goto out_unlock;
- }
- ctx = efx_siena_alloc_rss_context_entry(efx);
- if (!ctx) {
- rc = -ENOMEM;
- goto out_unlock;
- }
- ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- /* Initialise indir table and key to defaults */
- efx_siena_set_default_rx_indir_table(efx, ctx);
- netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
- allocated = true;
- } else {
- ctx = efx_siena_find_rss_context_entry(efx, *rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- }
-
- if (rxfh->rss_delete) {
- /* delete this context */
- rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
- if (!rc)
- efx_siena_free_rss_context_entry(ctx);
- goto out_unlock;
- }
-
- if (!key)
- key = ctx->rx_hash_key;
- if (!indir)
- indir = ctx->rx_indir_table;
-
- rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
- if (rc && allocated)
- efx_siena_free_rss_context_entry(ctx);
- else
- *rss_context = ctx->user_id;
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
-}
-
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
@@ -1296,9 +1184,6 @@ int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- efx_siena_ethtool_set_rxfh_context(net_dev, rxfh, extack);
-
if (!indir && !key)
return 0;
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index 94152f595acd..3fa7c652ae9b 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -707,20 +707,14 @@ struct vfdi_status;
/* The reserved RSS context value */
#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff
/**
- * struct efx_rss_context - A user-defined RSS context for filtering
- * @list: node of linked list on which this struct is stored
- * @context_id: the RSS_CONTEXT_ID returned by MC firmware, or
- * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC.
- * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
- * @user_id: the rss_context ID exposed to userspace over ethtool.
+ * struct efx_rss_context - An RSS context for filtering
+ * @context_id: 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
* @rx_hash_key: Toeplitz hash key for this RSS context
* @indir_table: Indirection table for this RSS context
*/
struct efx_rss_context {
- struct list_head list;
u32 context_id;
- u32 user_id;
bool rx_hash_udp_4tuple;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
@@ -851,9 +845,7 @@ enum efx_xdp_tx_queues_mode {
* @rx_packet_ts_offset: Offset of timestamp from start of packet data
* (valid only if channel->sync_timestamps_enabled; always negative)
* @rx_scatter: Scatter mode enabled for receives
- * @rss_context: Main RSS context. Its @list member is the head of the list of
- * RSS contexts created by user requests
- * @rss_lock: Protects custom RSS context software state in @rss_context.list
+ * @rss_context: Main RSS context
* @vport_id: The function's vport ID, only relevant for PFs
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
@@ -1018,7 +1010,6 @@ struct efx_nic {
int rx_packet_ts_offset;
bool rx_scatter;
struct efx_rss_context rss_context;
- struct mutex rss_lock;
u32 vport_id;
unsigned int_error_count;
@@ -1220,10 +1211,6 @@ struct efx_udp_tunnel {
* @tx_enqueue: Add an SKB to TX queue
* @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
* @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC
- * @rx_push_rss_context_config: Write RSS hash key and indirection table for
- * user RSS context to the NIC
- * @rx_pull_rss_context_config: Read RSS hash key and indirection table for user
- * RSS context back from the NIC
* @rx_probe: Allocate resources for RX queue
* @rx_init: Initialise RX queue on the NIC
* @rx_remove: Free resources for RX queue
@@ -1366,13 +1353,6 @@ struct efx_nic_type {
int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
const u32 *rx_indir_table, const u8 *key);
int (*rx_pull_rss_config)(struct efx_nic *efx);
- int (*rx_push_rss_context_config)(struct efx_nic *efx,
- struct efx_rss_context *ctx,
- const u32 *rx_indir_table,
- const u8 *key);
- int (*rx_pull_rss_context_config)(struct efx_nic *efx,
- struct efx_rss_context *ctx);
- void (*rx_restore_rss_contexts)(struct efx_nic *efx);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c
index c473a4b6dd44..85005196b4c5 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.c
+++ b/drivers/net/ethernet/sfc/siena/ptp.c
@@ -897,7 +897,7 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
- timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
+ timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND);
timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
/* Ignore seconds */
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 219fb358a646..082e35c6caaa 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -558,62 +558,6 @@ efx_siena_rx_packet_gro(struct efx_channel *channel,
napi_gro_frags(napi);
}
-/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
- * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
- */
-struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx, *new;
- u32 id = 1; /* Don't use zero, that refers to the master RSS context */
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- /* Search for first gap in the numbering */
- list_for_each_entry(ctx, head, list) {
- if (ctx->user_id != id)
- break;
- id++;
- /* Check for wrap. If this happens, we have nearly 2^32
- * allocated RSS contexts, which seems unlikely.
- */
- if (WARN_ON_ONCE(!id))
- return NULL;
- }
-
- /* Create the new entry */
- new = kmalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
- return NULL;
- new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- new->rx_hash_udp_4tuple = false;
-
- /* Insert the new entry into the gap */
- new->user_id = id;
- list_add_tail(&new->list, &ctx->list);
- return new;
-}
-
-struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
- u32 id)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- list_for_each_entry(ctx, head, list)
- if (ctx->user_id == id)
- return ctx;
- return NULL;
-}
-
-void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx)
-{
- list_del(&ctx->list);
- kfree(ctx);
-}
-
void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
struct efx_rss_context *ctx)
{
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.h b/drivers/net/ethernet/sfc/siena/rx_common.h
index 6b37f83ecb30..f90a8320d396 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.h
+++ b/drivers/net/ethernet/sfc/siena/rx_common.h
@@ -78,10 +78,6 @@ efx_siena_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh, __wsum csum);
-struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx);
-struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
- u32 id);
-void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx);
void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
struct efx_rss_context *ctx);
diff --git a/drivers/net/ethernet/sfc/tc_counters.c b/drivers/net/ethernet/sfc/tc_counters.c
index c44088424323..a421b0123506 100644
--- a/drivers/net/ethernet/sfc/tc_counters.c
+++ b/drivers/net/ethernet/sfc/tc_counters.c
@@ -249,7 +249,7 @@ struct efx_tc_counter_index *efx_tc_flower_get_counter_index(
&ctr->linkage,
efx_tc_counter_id_ht_params);
kfree(ctr);
- return (void *)cnt; /* it's an ERR_PTR */
+ return ERR_CAST(cnt);
}
ctr->cnt = cnt;
refcount_set(&ctr->ref, 1);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 907498848028..a5e23e2da90f 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2355,7 +2355,7 @@ static int smc_drv_probe(struct platform_device *pdev)
* the resource supplies a trigger, override the irqflags with
* the trigger flags from the resource.
*/
- irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
+ irq_resflags = irq_get_trigger_type(ndev->irq);
if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index cd36ff4da68c..684489156dce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -29,6 +29,7 @@
/* Synopsys Core versions */
#define DWMAC_CORE_3_40 0x34
#define DWMAC_CORE_3_50 0x35
+#define DWMAC_CORE_3_70 0x37
#define DWMAC_CORE_4_00 0x40
#define DWMAC_CORE_4_10 0x41
#define DWMAC_CORE_5_00 0x50
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index 9e40c28d453a..bfe6e2d631bd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -8,15 +8,90 @@
#include <linux/device.h>
#include <linux/of_irq.h>
#include "stmmac.h"
+#include "dwmac_dma.h"
+#include "dwmac1000.h"
+
+/* Normal Loongson Tx Summary */
+#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000
+/* Normal Loongson Rx Summary */
+#define DMA_INTR_ENA_NIE_RX_LOONGSON 0x00020000
+
+#define DMA_INTR_NORMAL_LOONGSON (DMA_INTR_ENA_NIE_TX_LOONGSON | \
+ DMA_INTR_ENA_NIE_RX_LOONGSON | \
+ DMA_INTR_ENA_RIE | DMA_INTR_ENA_TIE)
+
+/* Abnormal Loongson Tx Summary */
+#define DMA_INTR_ENA_AIE_TX_LOONGSON 0x00010000
+/* Abnormal Loongson Rx Summary */
+#define DMA_INTR_ENA_AIE_RX_LOONGSON 0x00008000
+
+#define DMA_INTR_ABNORMAL_LOONGSON (DMA_INTR_ENA_AIE_TX_LOONGSON | \
+ DMA_INTR_ENA_AIE_RX_LOONGSON | \
+ DMA_INTR_ENA_FBE | DMA_INTR_ENA_UNE)
+
+#define DMA_INTR_DEFAULT_MASK_LOONGSON (DMA_INTR_NORMAL_LOONGSON | \
+ DMA_INTR_ABNORMAL_LOONGSON)
+
+/* Normal Loongson Tx Interrupt Summary */
+#define DMA_STATUS_NIS_TX_LOONGSON 0x00040000
+/* Normal Loongson Rx Interrupt Summary */
+#define DMA_STATUS_NIS_RX_LOONGSON 0x00020000
+
+/* Abnormal Loongson Tx Interrupt Summary */
+#define DMA_STATUS_AIS_TX_LOONGSON 0x00010000
+/* Abnormal Loongson Rx Interrupt Summary */
+#define DMA_STATUS_AIS_RX_LOONGSON 0x00008000
+
+/* Fatal Loongson Tx Bus Error Interrupt */
+#define DMA_STATUS_FBI_TX_LOONGSON 0x00002000
+/* Fatal Loongson Rx Bus Error Interrupt */
+#define DMA_STATUS_FBI_RX_LOONGSON 0x00001000
+
+#define DMA_STATUS_MSK_COMMON_LOONGSON (DMA_STATUS_NIS_TX_LOONGSON | \
+ DMA_STATUS_NIS_RX_LOONGSON | \
+ DMA_STATUS_AIS_TX_LOONGSON | \
+ DMA_STATUS_AIS_RX_LOONGSON | \
+ DMA_STATUS_FBI_TX_LOONGSON | \
+ DMA_STATUS_FBI_RX_LOONGSON)
+
+#define DMA_STATUS_MSK_RX_LOONGSON (DMA_STATUS_ERI | DMA_STATUS_RWT | \
+ DMA_STATUS_RPS | DMA_STATUS_RU | \
+ DMA_STATUS_RI | DMA_STATUS_OVF | \
+ DMA_STATUS_MSK_COMMON_LOONGSON)
+
+#define DMA_STATUS_MSK_TX_LOONGSON (DMA_STATUS_ETI | DMA_STATUS_UNF | \
+ DMA_STATUS_TJT | DMA_STATUS_TU | \
+ DMA_STATUS_TPS | DMA_STATUS_TI | \
+ DMA_STATUS_MSK_COMMON_LOONGSON)
+
+#define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03
+#define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13
+#define DWMAC_CORE_LS_MULTICHAN 0x10 /* Loongson custom ID */
+#define CHANNEL_NUM 8
+
+struct loongson_data {
+ u32 loongson_id;
+ struct device *dev;
+};
+
+struct stmmac_pci_info {
+ int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
+};
-static int loongson_default_data(struct plat_stmmacenet_data *plat)
+static void loongson_default_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
{
+ /* Get bus_id, this can be overwritten later */
+ plat->bus_id = pci_dev_id(pdev);
+
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
/* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
+ plat->multicast_filter_bins = 256;
+
+ plat->mac_interface = PHY_INTERFACE_MODE_NA;
/* Set default value for unicast filter entries */
plat->unicast_filter_entries = 1;
@@ -24,10 +99,6 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat)
/* Set the maxmtu to a default of JUMBO_LEN */
plat->maxmtu = JUMBO_LEN;
- /* Set default number of RX and TX queues to use */
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
-
/* Disable Priority config by default */
plat->tx_queues_cfg[0].use_prio = false;
plat->rx_queues_cfg[0].use_prio = false;
@@ -35,30 +106,424 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat)
/* Disable RX queues routing by default */
plat->rx_queues_cfg[0].pkt_route = 0x0;
+ plat->clk_ref_rate = 125000000;
+ plat->clk_ptp_rate = 125000000;
+
/* Default to phy auto-detection */
plat->phy_addr = -1;
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
+}
+
+static int loongson_gmac_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct loongson_data *ld;
+ int i;
+
+ ld = plat->bsp_priv;
+
+ loongson_default_data(pdev, plat);
+
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
+ plat->rx_queues_to_use = CHANNEL_NUM;
+ plat->tx_queues_to_use = CHANNEL_NUM;
+
+ /* Only channel 0 supports checksum,
+ * so turn off checksum to enable multiple channels.
+ */
+ for (i = 1; i < CHANNEL_NUM; i++)
+ plat->tx_queues_cfg[i].coe_unsupported = 1;
+ } else {
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
+ }
+
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
- plat->multicast_filter_bins = 256;
return 0;
}
-static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static struct stmmac_pci_info loongson_gmac_pci_info = {
+ .setup = loongson_gmac_data,
+};
+
+static void loongson_gnet_fix_speed(void *priv, unsigned int speed,
+ unsigned int mode)
{
- struct plat_stmmacenet_data *plat;
- struct stmmac_resources res;
- struct device_node *np;
- int ret, i, phy_mode;
+ struct loongson_data *ld = (struct loongson_data *)priv;
+ struct net_device *ndev = dev_get_drvdata(ld->dev);
+ struct stmmac_priv *ptr = netdev_priv(ndev);
+
+ /* The integrated PHY has a weird problem with switching from the low
+ * speeds to 1000Mbps mode. The speedup procedure requires the PHY-link
+ * re-negotiation.
+ */
+ if (speed == SPEED_1000) {
+ if (readl(ptr->ioaddr + MAC_CTRL_REG) &
+ GMAC_CONTROL_PS)
+ /* Word around hardware bug, restart autoneg */
+ phy_restart_aneg(ndev->phydev);
+ }
+}
- np = dev_of_node(&pdev->dev);
+static int loongson_gnet_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct loongson_data *ld;
+ int i;
- if (!np) {
- pr_info("dwmac_loongson_pci: No OF node\n");
- return -ENODEV;
+ ld = plat->bsp_priv;
+
+ loongson_default_data(pdev, plat);
+
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
+ plat->rx_queues_to_use = CHANNEL_NUM;
+ plat->tx_queues_to_use = CHANNEL_NUM;
+
+ /* Only channel 0 supports checksum,
+ * so turn off checksum to enable multiple channels.
+ */
+ for (i = 1; i < CHANNEL_NUM; i++)
+ plat->tx_queues_cfg[i].coe_unsupported = 1;
+ } else {
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
}
+ plat->phy_interface = PHY_INTERFACE_MODE_GMII;
+ plat->mdio_bus_data->phy_mask = ~(u32)BIT(2);
+ plat->fix_mac_speed = loongson_gnet_fix_speed;
+
+ return 0;
+}
+
+static struct stmmac_pci_info loongson_gnet_pci_info = {
+ .setup = loongson_gnet_data,
+};
+
+static void loongson_dwmac_dma_init_channel(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 chan)
+{
+ int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+ int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 value;
+
+ value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
+
+ if (dma_cfg->pblx8)
+ value |= DMA_BUS_MODE_MAXPBL;
+
+ value |= DMA_BUS_MODE_USP;
+ value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
+ value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+ value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+
+ /* Set the Fixed burst mode */
+ if (dma_cfg->fixed_burst)
+ value |= DMA_BUS_MODE_FB;
+
+ /* Mixed Burst has no effect when fb is set */
+ if (dma_cfg->mixed_burst)
+ value |= DMA_BUS_MODE_MB;
+
+ if (dma_cfg->atds)
+ value |= DMA_BUS_MODE_ATDS;
+
+ if (dma_cfg->aal)
+ value |= DMA_BUS_MODE_AAL;
+
+ writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK_LOONGSON, ioaddr +
+ DMA_CHAN_INTR_ENA(chan));
+}
+
+static int loongson_dwmac_dma_interrupt(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
+ struct stmmac_extra_stats *x,
+ u32 chan, u32 dir)
+{
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
+ u32 abnor_intr_status;
+ u32 nor_intr_status;
+ u32 fb_intr_status;
+ u32 intr_status;
+ int ret = 0;
+
+ /* read the status register (CSR5) */
+ intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
+
+ if (dir == DMA_DIR_RX)
+ intr_status &= DMA_STATUS_MSK_RX_LOONGSON;
+ else if (dir == DMA_DIR_TX)
+ intr_status &= DMA_STATUS_MSK_TX_LOONGSON;
+
+ nor_intr_status = intr_status & (DMA_STATUS_NIS_TX_LOONGSON |
+ DMA_STATUS_NIS_RX_LOONGSON);
+ abnor_intr_status = intr_status & (DMA_STATUS_AIS_TX_LOONGSON |
+ DMA_STATUS_AIS_RX_LOONGSON);
+ fb_intr_status = intr_status & (DMA_STATUS_FBI_TX_LOONGSON |
+ DMA_STATUS_FBI_RX_LOONGSON);
+
+ /* ABNORMAL interrupts */
+ if (unlikely(abnor_intr_status)) {
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ ret = tx_hard_error_bump_tc;
+ x->tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT))
+ x->tx_jabber_irq++;
+ if (unlikely(intr_status & DMA_STATUS_OVF))
+ x->rx_overflow_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RU))
+ x->rx_buf_unav_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RPS))
+ x->rx_process_stopped_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RWT))
+ x->rx_watchdog_irq++;
+ if (unlikely(intr_status & DMA_STATUS_ETI))
+ x->tx_early_irq++;
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(fb_intr_status)) {
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (likely(nor_intr_status)) {
+ if (likely(intr_status & DMA_STATUS_RI)) {
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+ /* to schedule NAPI on real RIE event. */
+ if (likely(value & DMA_INTR_ENA_RIE)) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & DMA_STATUS_TI)) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_tx;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ERI))
+ x->rx_early_irq++;
+ }
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_warn("%s: unexpected status %08x\n", __func__, intr_status);
+
+ /* Clear the interrupt by writing a logic 1 to the CSR5[19-0] */
+ writel((intr_status & 0x7ffff), ioaddr + DMA_CHAN_STATUS(chan));
+
+ return ret;
+}
+
+static struct mac_device_info *loongson_dwmac_setup(void *apriv)
+{
+ struct stmmac_priv *priv = apriv;
+ struct mac_device_info *mac;
+ struct stmmac_dma_ops *dma;
+ struct loongson_data *ld;
+ struct pci_dev *pdev;
+
+ ld = priv->plat->bsp_priv;
+ pdev = to_pci_dev(priv->device);
+
+ mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ dma = devm_kzalloc(priv->device, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return NULL;
+
+ /* The Loongson GMAC and GNET devices are based on the DW GMAC
+ * v3.50a and v3.73a IP-cores. But the HW designers have changed the
+ * GMAC_VERSION.SNPSVER field to the custom 0x10 value on the
+ * network controllers with the multi-channels feature
+ * available to emphasize the differences: multiple DMA-channels,
+ * AV feature and GMAC_INT_STATUS CSR flags layout. Get back the
+ * original value so the correct HW-interface would be selected.
+ */
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
+ priv->synopsys_id = DWMAC_CORE_3_70;
+ *dma = dwmac1000_dma_ops;
+ dma->init_chan = loongson_dwmac_dma_init_channel;
+ dma->dma_interrupt = loongson_dwmac_dma_interrupt;
+ mac->dma = dma;
+ }
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Pre-initialize the respective "mac" fields as it's done in
+ * dwmac1000_setup()
+ */
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ /* Loongson GMAC doesn't support the flow control. LS2K2000
+ * GNET doesn't support the half-duplex link mode.
+ */
+ if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) {
+ mac->link.caps = MAC_10 | MAC_100 | MAC_1000;
+ } else {
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+ else
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD;
+ }
+
+ mac->link.duplex = GMAC_CONTROL_DM;
+ mac->link.speed10 = GMAC_CONTROL_PS;
+ mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+ mac->mii.addr = GMAC_MII_ADDR;
+ mac->mii.data = GMAC_MII_DATA;
+ mac->mii.addr_shift = 11;
+ mac->mii.addr_mask = 0x0000F800;
+ mac->mii.reg_shift = 6;
+ mac->mii.reg_mask = 0x000007C0;
+ mac->mii.clk_csr_shift = 2;
+ mac->mii.clk_csr_mask = GENMASK(5, 2);
+
+ return mac;
+}
+
+static int loongson_dwmac_msi_config(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ int i, ret, vecs;
+
+ vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1);
+ ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_warn(&pdev->dev, "Failed to allocate MSI IRQs\n");
+ return ret;
+ }
+
+ res->irq = pci_irq_vector(pdev, 0);
+
+ for (i = 0; i < plat->rx_queues_to_use; i++) {
+ res->rx_irq[CHANNEL_NUM - 1 - i] =
+ pci_irq_vector(pdev, 1 + i * 2);
+ }
+
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ res->tx_irq[CHANNEL_NUM - 1 - i] =
+ pci_irq_vector(pdev, 2 + i * 2);
+ }
+
+ plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
+
+ return 0;
+}
+
+static void loongson_dwmac_msi_clear(struct pci_dev *pdev)
+{
+ pci_free_irq_vectors(pdev);
+}
+
+static int loongson_dwmac_dt_config(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ struct device_node *np = dev_of_node(&pdev->dev);
+ int ret;
+
+ plat->mdio_node = of_get_child_by_name(np, "mdio");
+ if (plat->mdio_node) {
+ dev_info(&pdev->dev, "Found MDIO subnode\n");
+ plat->mdio_bus_data->needs_reset = true;
+ }
+
+ ret = of_alias_get_id(np, "ethernet");
+ if (ret >= 0)
+ plat->bus_id = ret;
+
+ res->irq = of_irq_get_byname(np, "macirq");
+ if (res->irq < 0) {
+ dev_err(&pdev->dev, "IRQ macirq not found\n");
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ res->wol_irq = of_irq_get_byname(np, "eth_wake_irq");
+ if (res->wol_irq < 0) {
+ dev_info(&pdev->dev,
+ "IRQ eth_wake_irq not found, using macirq\n");
+ res->wol_irq = res->irq;
+ }
+
+ res->lpi_irq = of_irq_get_byname(np, "eth_lpi");
+ if (res->lpi_irq < 0) {
+ dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ ret = device_get_phy_mode(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "phy_mode not found\n");
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ plat->phy_interface = ret;
+
+ return 0;
+
+err_put_node:
+ of_node_put(plat->mdio_node);
+
+ return ret;
+}
+
+static void loongson_dwmac_dt_clear(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ of_node_put(plat->mdio_node);
+}
+
+static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ if (!pdev->irq)
+ return -EINVAL;
+
+ res->irq = pdev->irq;
+
+ return 0;
+}
+
+static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_pci_info *info;
+ struct stmmac_resources res;
+ struct loongson_data *ld;
+ int ret, i;
+
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
return -ENOMEM;
@@ -69,25 +534,23 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (!plat->mdio_bus_data)
return -ENOMEM;
- plat->mdio_node = of_get_child_by_name(np, "mdio");
- if (plat->mdio_node) {
- dev_info(&pdev->dev, "Found MDIO subnode\n");
- plat->mdio_bus_data->needs_reset = true;
- }
-
plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
- if (!plat->dma_cfg) {
- ret = -ENOMEM;
- goto err_put_node;
- }
+ if (!plat->dma_cfg)
+ return -ENOMEM;
+
+ ld = devm_kzalloc(&pdev->dev, sizeof(*ld), GFP_KERNEL);
+ if (!ld)
+ return -ENOMEM;
/* Enable pci device */
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__);
- goto err_put_node;
+ return ret;
}
+ pci_set_master(pdev);
+
/* Get the base address of device */
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
@@ -98,59 +561,43 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
break;
}
- plat->bus_id = of_alias_get_id(np, "ethernet");
- if (plat->bus_id < 0)
- plat->bus_id = pci_dev_id(pdev);
-
- phy_mode = device_get_phy_mode(&pdev->dev);
- if (phy_mode < 0) {
- dev_err(&pdev->dev, "phy_mode not found\n");
- ret = phy_mode;
- goto err_disable_device;
- }
-
- plat->phy_interface = phy_mode;
- plat->mac_interface = PHY_INTERFACE_MODE_GMII;
-
- pci_set_master(pdev);
-
- loongson_default_data(plat);
- pci_enable_msi(pdev);
memset(&res, 0, sizeof(res));
res.addr = pcim_iomap_table(pdev)[0];
- res.irq = of_irq_get_byname(np, "macirq");
- if (res.irq < 0) {
- dev_err(&pdev->dev, "IRQ macirq not found\n");
- ret = -ENODEV;
- goto err_disable_msi;
- }
+ plat->bsp_priv = ld;
+ plat->setup = loongson_dwmac_setup;
+ ld->dev = &pdev->dev;
+ ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
- res.wol_irq = of_irq_get_byname(np, "eth_wake_irq");
- if (res.wol_irq < 0) {
- dev_info(&pdev->dev, "IRQ eth_wake_irq not found, using macirq\n");
- res.wol_irq = res.irq;
- }
+ info = (struct stmmac_pci_info *)id->driver_data;
+ ret = info->setup(pdev, plat);
+ if (ret)
+ goto err_disable_device;
- res.lpi_irq = of_irq_get_byname(np, "eth_lpi");
- if (res.lpi_irq < 0) {
- dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
- ret = -ENODEV;
- goto err_disable_msi;
- }
+ if (dev_of_node(&pdev->dev))
+ ret = loongson_dwmac_dt_config(pdev, plat, &res);
+ else
+ ret = loongson_dwmac_acpi_config(pdev, plat, &res);
+ if (ret)
+ goto err_disable_device;
+
+ /* Use the common MAC IRQ if per-channel MSIs allocation failed */
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ loongson_dwmac_msi_config(pdev, plat, &res);
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (ret)
- goto err_disable_msi;
+ goto err_plat_clear;
- return ret;
+ return 0;
-err_disable_msi:
- pci_disable_msi(pdev);
+err_plat_clear:
+ if (dev_of_node(&pdev->dev))
+ loongson_dwmac_dt_clear(pdev, plat);
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ loongson_dwmac_msi_clear(pdev);
err_disable_device:
pci_disable_device(pdev);
-err_put_node:
- of_node_put(plat->mdio_node);
return ret;
}
@@ -158,11 +605,18 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
{
struct net_device *ndev = dev_get_drvdata(&pdev->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
+ struct loongson_data *ld;
int i;
- of_node_put(priv->plat->mdio_node);
+ ld = priv->plat->bsp_priv;
stmmac_dvr_remove(&pdev->dev);
+ if (dev_of_node(&pdev->dev))
+ loongson_dwmac_dt_clear(pdev, priv->plat);
+
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ loongson_dwmac_msi_clear(pdev);
+
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
@@ -170,7 +624,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
break;
}
- pci_disable_msi(pdev);
pci_disable_device(pdev);
}
@@ -213,7 +666,8 @@ static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
loongson_dwmac_resume);
static const struct pci_device_id loongson_dwmac_id_table[] = {
- { PCI_VDEVICE(LOONGSON, 0x7a03) },
+ { PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) },
+ { PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) },
{}
};
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
@@ -232,4 +686,5 @@ module_pci_driver(loongson_dwmac_driver);
MODULE_DESCRIPTION("Loongson DWMAC PCI driver");
MODULE_AUTHOR("Qing Zhang <zhangqing@loongson.cn>");
+MODULE_AUTHOR("Yanteng Si <siyanteng@loongson.cn>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 7ae04d8d291c..50073bdade46 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1116,6 +1116,161 @@ static const struct rk_gmac_ops rk3568_ops = {
},
};
+/* VCCIO0_1_3_IOC */
+#define RK3576_VCCIO0_1_3_IOC_CON2 0X6408
+#define RK3576_VCCIO0_1_3_IOC_CON3 0X640c
+#define RK3576_VCCIO0_1_3_IOC_CON4 0X6410
+#define RK3576_VCCIO0_1_3_IOC_CON5 0X6414
+
+#define RK3576_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3576_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3576_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
+#define RK3576_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
+
+#define RK3576_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3576_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* SDGMAC_GRF */
+#define RK3576_GRF_GMAC_CON0 0X0020
+#define RK3576_GRF_GMAC_CON1 0X0024
+
+#define RK3576_GMAC_RMII_MODE GRF_BIT(3)
+#define RK3576_GMAC_RGMII_MODE GRF_CLR_BIT(3)
+
+#define RK3576_GMAC_CLK_SELECT_IO GRF_BIT(7)
+#define RK3576_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(7)
+
+#define RK3576_GMAC_CLK_RMII_DIV2 GRF_BIT(5)
+#define RK3576_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(5)
+
+#define RK3576_GMAC_CLK_RGMII_DIV1 \
+ (GRF_CLR_BIT(6) | GRF_CLR_BIT(5))
+#define RK3576_GMAC_CLK_RGMII_DIV5 \
+ (GRF_BIT(6) | GRF_BIT(5))
+#define RK3576_GMAC_CLK_RGMII_DIV50 \
+ (GRF_BIT(6) | GRF_CLR_BIT(5))
+
+#define RK3576_GMAC_CLK_RMII_GATE GRF_BIT(4)
+#define RK3576_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(4)
+
+static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int offset_con;
+
+ if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
+ dev_err(dev, "Missing rockchip,grf or rockchip,php-grf property\n");
+ return;
+ }
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RGMII_MODE);
+
+ offset_con = bsp_priv->id == 1 ? RK3576_VCCIO0_1_3_IOC_CON4 :
+ RK3576_VCCIO0_1_3_IOC_CON2;
+
+ /* m0 && m1 delay enabled */
+ regmap_write(bsp_priv->php_grf, offset_con,
+ DELAY_ENABLE(RK3576, tx_delay, rx_delay));
+ regmap_write(bsp_priv->php_grf, offset_con + 0x4,
+ DELAY_ENABLE(RK3576, tx_delay, rx_delay));
+
+ /* m0 && m1 delay value */
+ regmap_write(bsp_priv->php_grf, offset_con,
+ RK3576_GMAC_CLK_TX_DL_CFG(tx_delay) |
+ RK3576_GMAC_CLK_RX_DL_CFG(rx_delay));
+ regmap_write(bsp_priv->php_grf, offset_con + 0x4,
+ RK3576_GMAC_CLK_TX_DL_CFG(tx_delay) |
+ RK3576_GMAC_CLK_RX_DL_CFG(rx_delay));
+}
+
+static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int offset_con;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RMII_MODE);
+}
+
+static void rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int val = 0, offset_con;
+
+ switch (speed) {
+ case 10:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3576_GMAC_CLK_RMII_DIV20;
+ else
+ val = RK3576_GMAC_CLK_RGMII_DIV50;
+ break;
+ case 100:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3576_GMAC_CLK_RMII_DIV2;
+ else
+ val = RK3576_GMAC_CLK_RGMII_DIV5;
+ break;
+ case 1000:
+ if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
+ val = RK3576_GMAC_CLK_RGMII_DIV1;
+ else
+ goto err;
+ break;
+ default:
+ goto err;
+ }
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, val);
+
+ return;
+err:
+ dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+}
+
+static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
+ bool enable)
+{
+ unsigned int val = input ? RK3576_GMAC_CLK_SELECT_IO :
+ RK3576_GMAC_CLK_SELECT_CRU;
+ unsigned int offset_con;
+
+ val |= enable ? RK3576_GMAC_CLK_RMII_NOGATE :
+ RK3576_GMAC_CLK_RMII_GATE;
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, val);
+}
+
+static const struct rk_gmac_ops rk3576_ops = {
+ .set_to_rgmii = rk3576_set_to_rgmii,
+ .set_to_rmii = rk3576_set_to_rmii,
+ .set_rgmii_speed = rk3576_set_gmac_speed,
+ .set_rmii_speed = rk3576_set_gmac_speed,
+ .set_clock_selection = rk3576_set_clock_selection,
+ .regs_valid = true,
+ .regs = {
+ 0x2a220000, /* gmac0 */
+ 0x2a230000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
/* sys_grf */
#define RK3588_GRF_GMAC_CON7 0X031c
#define RK3588_GRF_GMAC_CON8 0X0320
@@ -1141,8 +1296,8 @@ static const struct rk_gmac_ops rk3568_ops = {
#define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id))
#define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id))
-#define RK3588_GMAC_CLK_SELET_CRU(id) GRF_BIT(5 * (id) + 4)
-#define RK3588_GMAC_CLK_SELET_IO(id) GRF_CLR_BIT(5 * (id) + 4)
+#define RK3588_GMAC_CLK_SELECT_CRU(id) GRF_BIT(5 * (id) + 4)
+#define RK3588_GMAC_CLK_SELECT_IO(id) GRF_CLR_BIT(5 * (id) + 4)
#define RK3588_GMA_CLK_RMII_DIV2(id) GRF_BIT(5 * (id) + 2)
#define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2)
@@ -1240,8 +1395,8 @@ err:
static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
bool enable)
{
- unsigned int val = input ? RK3588_GMAC_CLK_SELET_IO(bsp_priv->id) :
- RK3588_GMAC_CLK_SELET_CRU(bsp_priv->id);
+ unsigned int val = input ? RK3588_GMAC_CLK_SELECT_IO(bsp_priv->id) :
+ RK3588_GMAC_CLK_SELECT_CRU(bsp_priv->id);
val |= enable ? RK3588_GMAC_CLK_RMII_NOGATE(bsp_priv->id) :
RK3588_GMAC_CLK_RMII_GATE(bsp_priv->id);
@@ -1908,6 +2063,7 @@ static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
{ .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
+ { .compatible = "rockchip,rk3576-gmac", .data = &rk3576_ops },
{ .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
{ .compatible = "rockchip,rv1108-gmac", .data = &rv1108_ops },
{ .compatible = "rockchip,rv1126-gmac", .data = &rv1126_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index e1b761dcfa1d..4a0ae92b3055 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -299,7 +299,7 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr)
* Called from stmmac via stmmac_dma_ops->init
*/
static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
@@ -395,7 +395,7 @@ static void sun8i_dwmac_dma_start_tx(struct stmmac_priv *priv,
writel(v, ioaddr + EMAC_TX_CTL1);
}
-static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr)
+static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
{
u32 v;
@@ -774,8 +774,8 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv)
static int get_ephy_nodes(struct stmmac_priv *priv)
{
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- struct device_node *mdio_mux, *iphynode;
struct device_node *mdio_internal;
+ struct device_node *mdio_mux;
int ret;
mdio_mux = of_get_child_by_name(priv->device->of_node, "mdio-mux");
@@ -793,7 +793,7 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
}
/* Seek for internal PHY */
- for_each_child_of_node(mdio_internal, iphynode) {
+ for_each_child_of_node_scoped(mdio_internal, iphynode) {
gmac->ephy_clk = of_clk_get(iphynode, 0);
if (IS_ERR(gmac->ephy_clk))
continue;
@@ -801,14 +801,12 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
if (IS_ERR(gmac->rst_ephy)) {
ret = PTR_ERR(gmac->rst_ephy);
if (ret == -EPROBE_DEFER) {
- of_node_put(iphynode);
of_node_put(mdio_internal);
return ret;
}
continue;
}
dev_info(priv->device, "Found internal PHY node\n");
- of_node_put(iphynode);
of_node_put(mdio_internal);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index adccdd816ea9..118a22406a2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -70,15 +70,17 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(value, ioaddr + DMA_AXI_BUS_MODE);
}
-static void dwmac1000_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+static void dwmac1000_dma_init_channel(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 value;
- /*
- * Set the DMA PBL (Programmable Burst Length) mode.
+ value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
+
+ /* Set the DMA PBL (Programmable Burst Length) mode.
*
* Note: before stmmac core 3.50 this mode bit was 4xPBL, and
* post 3.5 mode bit acts as 8*PBL.
@@ -98,16 +100,16 @@ static void dwmac1000_dma_init(void __iomem *ioaddr,
if (dma_cfg->mixed_burst)
value |= DMA_BUS_MODE_MB;
- if (atds)
+ if (dma_cfg->atds)
value |= DMA_BUS_MODE_ATDS;
if (dma_cfg->aal)
value |= DMA_BUS_MODE_AAL;
- writel(value, ioaddr + DMA_BUS_MODE);
+ writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
/* Mask interrupts by writing to CSR7 */
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
@@ -116,7 +118,7 @@ static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
dma_addr_t dma_rx_phy, u32 chan)
{
/* RX descriptor base address list must be written into DMA CSR3 */
- writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
+ writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RCV_BASE_ADDR(chan));
}
static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
@@ -125,7 +127,7 @@ static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
dma_addr_t dma_tx_phy, u32 chan)
{
/* TX descriptor base address list must be written into DMA CSR4 */
- writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
+ writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
}
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
@@ -153,7 +155,7 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
@@ -175,14 +177,14 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
/* Configure flow control based on rx fifo size */
csr6 = dwmac1000_configure_fc(csr6, fifosz);
- writel(csr6, ioaddr + DMA_CONTROL);
+ writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
}
static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
@@ -209,7 +211,7 @@ static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
csr6 |= DMA_CONTROL_TTC_256;
}
- writel(csr6, ioaddr + DMA_CONTROL);
+ writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
}
static void dwmac1000_dump_dma_regs(struct stmmac_priv *priv,
@@ -271,12 +273,12 @@ static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
static void dwmac1000_rx_watchdog(struct stmmac_priv *priv,
void __iomem *ioaddr, u32 riwt, u32 queue)
{
- writel(riwt, ioaddr + DMA_RX_WATCHDOG);
+ writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue));
}
const struct stmmac_dma_ops dwmac1000_dma_ops = {
.reset = dwmac_dma_reset,
- .init = dwmac1000_dma_init,
+ .init_chan = dwmac1000_dma_init_channel,
.init_rx_chan = dwmac1000_dma_init_rx,
.init_tx_chan = dwmac1000_dma_init_tx,
.axi = dwmac1000_dma_axi,
@@ -294,3 +296,4 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = {
.get_hw_feature = dwmac1000_get_hw_feature,
.rx_watchdog = dwmac1000_rx_watchdog,
};
+EXPORT_SYMBOL_GPL(dwmac1000_dma_ops);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index b402fb54f613..82957db47c99 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -19,7 +19,7 @@
#include "dwmac_dma.h"
static void dwmac100_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
/* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 31c387cc5f26..a1858f083eef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -58,10 +58,6 @@ static void dwmac4_core_init(struct mac_device_info *hw,
if (hw->pcs)
value |= GMAC_PCS_IRQ_DEFAULT;
- /* Enable FPE interrupt */
- if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
- value |= GMAC_INT_FPE_EN;
-
writel(value, ioaddr + GMAC_INT_EN);
if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
@@ -1268,6 +1264,9 @@ const struct stmmac_ops dwmac410_ops = {
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
+ .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size,
+ .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size,
+ .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
@@ -1320,6 +1319,9 @@ const struct stmmac_ops dwmac510_ops = {
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
+ .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size,
+ .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size,
+ .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 1c5802e0d7f4..e99401bcc1f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -186,10 +186,12 @@ static void dwmac4_set_tx_owner(struct dma_desc *p)
static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
+ u32 flags = (RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
if (!disable_rx_ic)
- p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
+ flags |= RDES3_INT_ON_COMPLETION_EN;
+
+ p->des3 |= cpu_to_le32(flags);
}
static int dwmac4_get_tx_ls(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 84d3a8551b03..e0165358c4ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -153,7 +153,7 @@ static void dwmac410_dma_init_channel(struct stmmac_priv *priv,
}
static void dwmac4_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index e02cebc3f1b7..08add508db84 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -575,11 +575,11 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
- bool enable)
+ bool tx_enable, bool pmac_enable)
{
u32 value;
- if (enable) {
+ if (tx_enable) {
cfg->fpe_csr = EFPE;
value = readl(ioaddr + GMAC_RXQ_CTRL1);
value &= ~GMAC_RXQCTRL_FPRQ;
@@ -589,6 +589,21 @@ void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
cfg->fpe_csr = 0;
}
writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
+
+ value = readl(ioaddr + GMAC_INT_EN);
+
+ if (pmac_enable) {
+ if (!(value & GMAC_INT_FPE_EN)) {
+ /* Dummy read to clear any pending masked interrupts */
+ readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ value |= GMAC_INT_FPE_EN;
+ }
+ } else {
+ value &= ~GMAC_INT_FPE_EN;
+ }
+
+ writel(value, ioaddr + GMAC_INT_EN);
}
int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
@@ -605,22 +620,22 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
if (value & TRSP) {
status |= FPE_EVENT_TRSP;
- netdev_info(dev, "FPE: Respond mPacket is transmitted\n");
+ netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n");
}
if (value & TVER) {
status |= FPE_EVENT_TVER;
- netdev_info(dev, "FPE: Verify mPacket is transmitted\n");
+ netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n");
}
if (value & RRSP) {
status |= FPE_EVENT_RRSP;
- netdev_info(dev, "FPE: Respond mPacket is received\n");
+ netdev_dbg(dev, "FPE: Respond mPacket is received\n");
}
if (value & RVER) {
status |= FPE_EVENT_RVER;
- netdev_info(dev, "FPE: Verify mPacket is received\n");
+ netdev_dbg(dev, "FPE: Verify mPacket is received\n");
}
return status;
@@ -638,3 +653,72 @@ void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
writel(value, ioaddr + MAC_FPE_CTRL_STS);
}
+
+int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr)
+{
+ return FIELD_GET(DWMAC5_ADD_FRAG_SZ, readl(ioaddr + MTL_FPE_CTRL_STS));
+}
+
+void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size)
+{
+ u32 value;
+
+ value = readl(ioaddr + MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(value, add_frag_size, DWMAC5_ADD_FRAG_SZ),
+ ioaddr + MTL_FPE_CTRL_STS);
+}
+
+#define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping"
+#define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]"
+
+int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass)
+{
+ u32 val, offset, count, queue_weight, preemptible_txqs = 0;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ u32 num_tc = ndev->num_tc;
+
+ if (!pclass)
+ goto update_mapping;
+
+ /* DWMAC CORE4+ can not program TC:TXQ mapping to hardware.
+ *
+ * Synopsys Databook:
+ * "The number of Tx DMA channels is equal to the number of Tx queues,
+ * and is direct one-to-one mapping."
+ */
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ count = ndev->tc_to_txq[tc].count;
+ offset = ndev->tc_to_txq[tc].offset;
+
+ if (pclass & BIT(tc))
+ preemptible_txqs |= GENMASK(offset + count - 1, offset);
+
+ /* This is 1:1 mapping, go to next TC */
+ if (count == 1)
+ continue;
+
+ if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) {
+ NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG);
+ return -EINVAL;
+ }
+
+ queue_weight = priv->plat->tx_queues_cfg[offset].weight;
+
+ for (u32 i = 1; i < count; i++) {
+ if (priv->plat->tx_queues_cfg[offset + i].weight !=
+ queue_weight) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG,
+ queue_weight, tc);
+ return -EINVAL;
+ }
+ }
+ }
+
+update_mapping:
+ val = readl(priv->ioaddr + MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(val, preemptible_txqs, DWMAC5_PREEMPTION_CLASS),
+ priv->ioaddr + MTL_FPE_CTRL_STS);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index bf33a51d229e..6c6eb6790e83 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -39,6 +39,12 @@
#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10))
+#define MTL_FPE_CTRL_STS 0x00000c90
+/* Preemption Classification */
+#define DWMAC5_PREEMPTION_CLASS GENMASK(15, 8)
+/* Additional Fragment Size of preempted frames */
+#define DWMAC5_ADD_FRAG_SZ GENMASK(1, 0)
+
#define MTL_RXP_CONTROL_STATUS 0x00000ca0
#define RXPI BIT(31)
#define NPE GENMASK(23, 16)
@@ -104,10 +110,14 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
u32 sub_second_inc, u32 systime_flags);
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
- bool enable);
+ bool tx_enable, bool pmac_enable);
void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
struct stmmac_fpe_cfg *cfg,
enum stmmac_mpacket_type type);
int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
+int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr);
+void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size);
+int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass);
#endif /* __DWMAC5_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 72672391675f..5d9c18f5bbf5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -22,6 +22,31 @@
#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+/* Following DMA defines are channels oriented */
+#define DMA_CHAN_BASE_OFFSET 0x100
+
+static inline u32 dma_chan_base_addr(u32 base, u32 chan)
+{
+ return base + chan * DMA_CHAN_BASE_OFFSET;
+}
+
+#define DMA_CHAN_BUS_MODE(chan) dma_chan_base_addr(DMA_BUS_MODE, chan)
+#define DMA_CHAN_XMT_POLL_DEMAND(chan) \
+ dma_chan_base_addr(DMA_XMT_POLL_DEMAND, chan)
+#define DMA_CHAN_RCV_POLL_DEMAND(chan) \
+ dma_chan_base_addr(DMA_RCV_POLL_DEMAND, chan)
+#define DMA_CHAN_RCV_BASE_ADDR(chan) \
+ dma_chan_base_addr(DMA_RCV_BASE_ADDR, chan)
+#define DMA_CHAN_TX_BASE_ADDR(chan) \
+ dma_chan_base_addr(DMA_TX_BASE_ADDR, chan)
+#define DMA_CHAN_STATUS(chan) dma_chan_base_addr(DMA_STATUS, chan)
+#define DMA_CHAN_CONTROL(chan) dma_chan_base_addr(DMA_CONTROL, chan)
+#define DMA_CHAN_INTR_ENA(chan) dma_chan_base_addr(DMA_INTR_ENA, chan)
+#define DMA_CHAN_MISSED_FRAME_CTR(chan) \
+ dma_chan_base_addr(DMA_MISSED_FRAME_CTR, chan)
+#define DMA_CHAN_RX_WATCHDOG(chan) \
+ dma_chan_base_addr(DMA_RX_WATCHDOG, chan)
+
/* SW Reset */
#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
@@ -152,7 +177,7 @@
#define NUM_DWMAC1000_DMA_REGS 23
#define NUM_DWMAC4_DMA_REGS 27
-void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan);
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 85e18f9a22f9..4846bf49c576 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -28,65 +28,65 @@ int dwmac_dma_reset(void __iomem *ioaddr)
}
/* CSR1 enables the transmit DMA to check for new descriptor */
-void dwmac_enable_dma_transmission(void __iomem *ioaddr)
+void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
{
- writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
+ writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan));
}
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
- u32 value = readl(ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
if (rx)
value |= DMA_INTR_DEFAULT_RX;
if (tx)
value |= DMA_INTR_DEFAULT_TX;
- writel(value, ioaddr + DMA_INTR_ENA);
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
- u32 value = readl(ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
if (rx)
value &= ~DMA_INTR_DEFAULT_RX;
if (tx)
value &= ~DMA_INTR_DEFAULT_TX;
- writel(value, ioaddr + DMA_INTR_ENA);
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
void dwmac_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value |= DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value &= ~DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value |= DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value &= ~DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
#ifdef DWMAC_DMA_DEBUG
@@ -165,7 +165,7 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
/* read the status register (CSR5) */
- u32 intr_status = readl(ioaddr + DMA_STATUS);
+ u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
#ifdef DWMAC_DMA_DEBUG
/* Enable it to monitor DMA rx/tx status in case of critical problems */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index f196cd99d510..f519d43738b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -846,42 +846,41 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
};
-#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
-#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
-
+static const char dpp_rx_err[] = "Read Rx Descriptor Parity checker Error";
+static const char dpp_tx_err[] = "Read Tx Descriptor Parity checker Error";
static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
- { true, "TDPES0", DPP_TX_ERR },
- { true, "TDPES1", DPP_TX_ERR },
- { true, "TDPES2", DPP_TX_ERR },
- { true, "TDPES3", DPP_TX_ERR },
- { true, "TDPES4", DPP_TX_ERR },
- { true, "TDPES5", DPP_TX_ERR },
- { true, "TDPES6", DPP_TX_ERR },
- { true, "TDPES7", DPP_TX_ERR },
- { true, "TDPES8", DPP_TX_ERR },
- { true, "TDPES9", DPP_TX_ERR },
- { true, "TDPES10", DPP_TX_ERR },
- { true, "TDPES11", DPP_TX_ERR },
- { true, "TDPES12", DPP_TX_ERR },
- { true, "TDPES13", DPP_TX_ERR },
- { true, "TDPES14", DPP_TX_ERR },
- { true, "TDPES15", DPP_TX_ERR },
- { true, "RDPES0", DPP_RX_ERR },
- { true, "RDPES1", DPP_RX_ERR },
- { true, "RDPES2", DPP_RX_ERR },
- { true, "RDPES3", DPP_RX_ERR },
- { true, "RDPES4", DPP_RX_ERR },
- { true, "RDPES5", DPP_RX_ERR },
- { true, "RDPES6", DPP_RX_ERR },
- { true, "RDPES7", DPP_RX_ERR },
- { true, "RDPES8", DPP_RX_ERR },
- { true, "RDPES9", DPP_RX_ERR },
- { true, "RDPES10", DPP_RX_ERR },
- { true, "RDPES11", DPP_RX_ERR },
- { true, "RDPES12", DPP_RX_ERR },
- { true, "RDPES13", DPP_RX_ERR },
- { true, "RDPES14", DPP_RX_ERR },
- { true, "RDPES15", DPP_RX_ERR },
+ { true, "TDPES0", dpp_tx_err },
+ { true, "TDPES1", dpp_tx_err },
+ { true, "TDPES2", dpp_tx_err },
+ { true, "TDPES3", dpp_tx_err },
+ { true, "TDPES4", dpp_tx_err },
+ { true, "TDPES5", dpp_tx_err },
+ { true, "TDPES6", dpp_tx_err },
+ { true, "TDPES7", dpp_tx_err },
+ { true, "TDPES8", dpp_tx_err },
+ { true, "TDPES9", dpp_tx_err },
+ { true, "TDPES10", dpp_tx_err },
+ { true, "TDPES11", dpp_tx_err },
+ { true, "TDPES12", dpp_tx_err },
+ { true, "TDPES13", dpp_tx_err },
+ { true, "TDPES14", dpp_tx_err },
+ { true, "TDPES15", dpp_tx_err },
+ { true, "RDPES0", dpp_rx_err },
+ { true, "RDPES1", dpp_rx_err },
+ { true, "RDPES2", dpp_rx_err },
+ { true, "RDPES3", dpp_rx_err },
+ { true, "RDPES4", dpp_rx_err },
+ { true, "RDPES5", dpp_rx_err },
+ { true, "RDPES6", dpp_rx_err },
+ { true, "RDPES7", dpp_rx_err },
+ { true, "RDPES8", dpp_rx_err },
+ { true, "RDPES9", dpp_rx_err },
+ { true, "RDPES10", dpp_rx_err },
+ { true, "RDPES11", dpp_rx_err },
+ { true, "RDPES12", dpp_rx_err },
+ { true, "RDPES13", dpp_rx_err },
+ { true, "RDPES14", dpp_rx_err },
+ { true, "RDPES15", dpp_rx_err },
};
static void dwxgmac3_handle_dma_err(struct net_device *ndev,
@@ -1505,13 +1504,14 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
-static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
- u32 num_txq,
- u32 num_rxq, bool enable)
+static void dwxgmac3_fpe_configure(void __iomem *ioaddr,
+ struct stmmac_fpe_cfg *cfg,
+ u32 num_txq, u32 num_rxq,
+ bool tx_enable, bool pmac_enable)
{
u32 value;
- if (!enable) {
+ if (!tx_enable) {
value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
value &= ~XGMAC_EFPE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index fc82862a612c..389aad7b5c1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -56,10 +56,12 @@ static void dwxgmac2_set_tx_owner(struct dma_desc *p)
static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 |= cpu_to_le32(XGMAC_RDES3_OWN);
+ u32 flags = XGMAC_RDES3_OWN;
if (!disable_rx_ic)
- p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC);
+ flags |= XGMAC_RDES3_IOC;
+
+ p->des3 |= cpu_to_le32(flags);
}
static int dwxgmac2_get_tx_ls(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index dd2ab6185c40..7840bc403788 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -20,7 +20,7 @@ static int dwxgmac2_dma_reset(void __iomem *ioaddr)
}
static void dwxgmac2_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 29367105df54..88cce28b2f98 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -171,7 +171,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwmac4_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = &dwmac510_tc_ops,
+ .tc = &dwmac4_tc_ops,
.mmc = &dwmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwmac4_setup,
@@ -252,7 +252,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwxgmac210_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = &dwmac510_tc_ops,
+ .tc = &dwxgmac_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxgmac2_setup,
@@ -273,7 +273,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwxlgmac2_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = &dwmac510_tc_ops,
+ .tc = &dwxgmac_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxlgmac2_setup,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index e53c32362774..d5a9f01ecac5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/stmmac.h>
+#include <net/pkt_cls.h>
#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \
({ \
@@ -28,6 +29,8 @@
struct stmmac_extra_stats;
struct stmmac_priv;
struct stmmac_safety_stats;
+struct stmmac_fpe_cfg;
+enum stmmac_mpacket_type;
struct dma_desc;
struct dma_extended_desc;
struct dma_edesc;
@@ -175,8 +178,7 @@ struct dma_features;
struct stmmac_dma_ops {
/* DMA core initialization */
int (*reset)(void __iomem *ioaddr);
- void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
- int atds);
+ void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg);
void (*init_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan);
void (*init_rx_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -198,7 +200,7 @@ struct stmmac_dma_ops {
/* To track extra statistic (if supported) */
void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x,
void __iomem *ioaddr);
- void (*enable_dma_transmission) (void __iomem *ioaddr);
+ void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan);
void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -420,11 +422,16 @@ struct stmmac_ops {
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
- bool enable);
+ bool tx_enable, bool pmac_enable);
void (*fpe_send_mpacket)(void __iomem *ioaddr,
struct stmmac_fpe_cfg *cfg,
enum stmmac_mpacket_type type);
int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
+ int (*fpe_get_add_frag_size)(const void __iomem *ioaddr);
+ void (*fpe_set_add_frag_size)(void __iomem *ioaddr, u32 add_frag_size);
+ int (*fpe_map_preemption_class)(struct net_device *ndev,
+ struct netlink_ext_ack *extack,
+ u32 pclass);
};
#define stmmac_core_init(__priv, __args...) \
@@ -529,6 +536,12 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args)
#define stmmac_fpe_irq_status(__priv, __args...) \
stmmac_do_callback(__priv, mac, fpe_irq_status, __args)
+#define stmmac_fpe_get_add_frag_size(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, fpe_get_add_frag_size, __args)
+#define stmmac_fpe_set_add_frag_size(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_set_add_frag_size, __args)
+#define stmmac_fpe_map_preemption_class(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_map_preemption_class, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -616,6 +629,8 @@ struct stmmac_tc_ops {
struct tc_etf_qopt_offload *qopt);
int (*query_caps)(struct stmmac_priv *priv,
struct tc_query_caps_base *base);
+ int (*setup_mqprio)(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
@@ -632,6 +647,8 @@ struct stmmac_tc_ops {
stmmac_do_callback(__priv, tc, setup_etf, __args)
#define stmmac_tc_query_caps(__priv, __args...) \
stmmac_do_callback(__priv, tc, query_caps, __args)
+#define stmmac_tc_setup_mqprio(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_mqprio, __args)
struct stmmac_counters;
@@ -675,7 +692,9 @@ extern const struct stmmac_dma_ops dwmac4_dma_ops;
extern const struct stmmac_ops dwmac410_ops;
extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
+extern const struct stmmac_tc_ops dwmac4_tc_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
+extern const struct stmmac_tc_ops dwxgmac_tc_ops;
extern const struct stmmac_ops dwxgmac210_ops;
extern const struct stmmac_ops dwxlgmac2_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b23b920eedb1..ea135203ff2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -146,6 +146,32 @@ struct stmmac_channel {
u32 index;
};
+/* FPE link-partner hand-shaking mPacket type */
+enum stmmac_mpacket_type {
+ MPACKET_VERIFY = 0,
+ MPACKET_RESPONSE = 1,
+};
+
+#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3
+#define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128
+
+struct stmmac_fpe_cfg {
+ /* Serialize access to MAC Merge state between ethtool requests
+ * and link state updates.
+ */
+ spinlock_t lock;
+
+ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
+
+ enum ethtool_mm_verify_status status;
+ struct timer_list verify_timer;
+ bool verify_enabled;
+ int verify_retries;
+ bool pmac_enabled;
+ u32 verify_time;
+ bool tx_enabled;
+};
+
struct stmmac_tc_entry {
bool in_use;
bool in_hw;
@@ -339,11 +365,8 @@ struct stmmac_priv {
struct workqueue_struct *wq;
struct work_struct service_task;
- /* Workqueue for handling FPE hand-shaking */
- unsigned long fpe_task_state;
- struct workqueue_struct *fpe_wq;
- struct work_struct fpe_task;
- char wq_name[IFNAMSIZ + 4];
+ /* Frame Preemption feature (FPE) */
+ struct stmmac_fpe_cfg fpe_cfg;
/* TC Handling */
unsigned int tc_entries_max;
@@ -397,7 +420,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
-void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable);
+void stmmac_fpe_apply(struct stmmac_priv *priv);
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 7008219fd88d..2a37592a6281 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -19,6 +19,7 @@
#include "stmmac.h"
#include "dwmac_dma.h"
#include "dwxgmac2.h"
+#include "dwmac5.h"
#define REG_SPACE_SIZE 0x1060
#define GMAC4_REG_SPACE_SIZE 0x116C
@@ -438,13 +439,6 @@ static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
}
-static int stmmac_check_if_running(struct net_device *dev)
-{
- if (!netif_running(dev))
- return -EBUSY;
- return 0;
-}
-
static int stmmac_ethtool_get_regs_len(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -1207,13 +1201,13 @@ static int stmmac_get_ts_info(struct net_device *dev,
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (priv->ptp_clock)
info->phc_index = ptp_clock_index(priv->ptp_clock);
+ else
+ info->phc_index = 0;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
@@ -1270,10 +1264,101 @@ static int stmmac_set_tunable(struct net_device *dev,
return ret;
}
+static int stmmac_get_mm(struct net_device *ndev,
+ struct ethtool_mm_state *state)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+ u32 frag_size;
+
+ if (!priv->dma_cap.fpesel)
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&priv->fpe_cfg.lock, flags);
+
+ state->max_verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
+ state->verify_enabled = priv->fpe_cfg.verify_enabled;
+ state->pmac_enabled = priv->fpe_cfg.pmac_enabled;
+ state->verify_time = priv->fpe_cfg.verify_time;
+ state->tx_enabled = priv->fpe_cfg.tx_enabled;
+ state->verify_status = priv->fpe_cfg.status;
+ state->rx_min_frag_size = ETH_ZLEN;
+
+ /* FPE active if common tx_enabled and
+ * (verification success or disabled(forced))
+ */
+ if (state->tx_enabled &&
+ (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
+ state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED))
+ state->tx_active = true;
+ else
+ state->tx_active = false;
+
+ frag_size = stmmac_fpe_get_add_frag_size(priv, priv->ioaddr);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size);
+
+ spin_unlock_irqrestore(&priv->fpe_cfg.lock, flags);
+
+ return 0;
+}
+
+static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ unsigned long flags;
+ u32 frag_size;
+ int err;
+
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
+ &frag_size, extack);
+ if (err)
+ return err;
+
+ /* Wait for the verification that's currently in progress to finish */
+ timer_shutdown_sync(&fpe_cfg->verify_timer);
+
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
+
+ fpe_cfg->verify_enabled = cfg->verify_enabled;
+ fpe_cfg->pmac_enabled = cfg->pmac_enabled;
+ fpe_cfg->verify_time = cfg->verify_time;
+ fpe_cfg->tx_enabled = cfg->tx_enabled;
+
+ if (!cfg->verify_enabled)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+
+ stmmac_fpe_set_add_frag_size(priv, priv->ioaddr, frag_size);
+ stmmac_fpe_apply(priv);
+
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
+
+ return 0;
+}
+
+static void stmmac_get_mm_stats(struct net_device *ndev,
+ struct ethtool_mm_stats *s)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_counters *mmc = &priv->mmc;
+
+ if (!priv->dma_cap.rmon)
+ return;
+
+ stmmac_mmc_read(priv, priv->mmcaddr, mmc);
+
+ s->MACMergeFrameAssErrorCount = mmc->mmc_rx_packet_assembly_err_cntr;
+ s->MACMergeFrameAssOkCount = mmc->mmc_rx_packet_assembly_ok_cntr;
+ s->MACMergeFrameSmdErrorCount = mmc->mmc_rx_packet_smd_err_cntr;
+ s->MACMergeFragCountRx = mmc->mmc_rx_fpe_fragment_cntr;
+ s->MACMergeFragCountTx = mmc->mmc_tx_fpe_fragment_cntr;
+ s->MACMergeHoldCount = mmc->mmc_tx_hold_req_cntr;
+}
+
static const struct ethtool_ops stmmac_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
- .begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
.get_msglevel = stmmac_ethtool_getmsglevel,
.set_msglevel = stmmac_ethtool_setmsglevel,
@@ -1309,6 +1394,9 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.set_tunable = stmmac_set_tunable,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
+ .get_mm = stmmac_get_mm,
+ .set_mm = stmmac_set_mm,
+ .get_mm_stats = stmmac_get_mm_stats,
};
void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f3a1b179aaea..d3895d7eecfc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -968,18 +968,31 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
{
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ unsigned long flags;
- if (is_up && *hs_enable) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
- MPACKET_VERIFY);
+ timer_shutdown_sync(&fpe_cfg->verify_timer);
+
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
+
+ if (is_up && fpe_cfg->pmac_enabled) {
+ /* VERIFY process requires pmac enabled when NIC comes up */
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ false, true);
+
+ /* New link => maybe new partner => new verification process */
+ stmmac_fpe_apply(priv);
} else {
- *lo_state = FPE_STATE_OFF;
- *lp_state = FPE_STATE_OFF;
+ /* No link => turn off EFPE */
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ false, false);
}
+
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
}
static void stmmac_mac_link_down(struct phylink_config *config,
@@ -2367,9 +2380,11 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
- /* Adjust for real per queue fifo size */
- rxfifosz /= rx_channels_count;
- txfifosz /= tx_channels_count;
+ /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
+ rxfifosz /= rx_channels_count;
+ txfifosz /= tx_channels_count;
+ }
if (priv->plat->force_thresh_dma_mode) {
txmode = tc;
@@ -2553,7 +2568,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
true, priv->mode, true, true,
xdp_desc.len);
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
xsk_tx_metadata_to_compl(meta,
&tx_q->tx_skbuff_dma[entry].xsk_meta);
@@ -3003,7 +3018,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
struct stmmac_rx_queue *rx_q;
struct stmmac_tx_queue *tx_q;
u32 chan = 0;
- int atds = 0;
int ret = 0;
if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
@@ -3012,7 +3026,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
}
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
- atds = 1;
+ priv->plat->dma_cfg->atds = 1;
ret = stmmac_reset(priv, priv->ioaddr);
if (ret) {
@@ -3021,7 +3035,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
}
/* DMA Configuration */
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
if (priv->plat->axi)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
@@ -3357,27 +3371,6 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
}
}
-static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
-{
- char *name;
-
- clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
- clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
-
- name = priv->wq_name;
- sprintf(name, "%s-fpe", priv->dev->name);
-
- priv->fpe_wq = create_singlethread_workqueue(name);
- if (!priv->fpe_wq) {
- netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
-
- return -ENOMEM;
- }
- netdev_info(priv->dev, "FPE workqueue start");
-
- return 0;
-}
-
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
@@ -3532,13 +3525,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_set_hw_vlan_mode(priv, priv->hw);
- if (priv->dma_cap.fpesel) {
- stmmac_fpe_start_wq(priv);
-
- if (priv->plat->fpe_cfg->enable)
- stmmac_fpe_handshake(priv, true);
- }
-
return 0;
}
@@ -4035,18 +4021,6 @@ static int stmmac_open(struct net_device *dev)
return ret;
}
-static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
-{
- set_bit(__FPE_REMOVING, &priv->fpe_task_state);
-
- if (priv->fpe_wq) {
- destroy_workqueue(priv->fpe_wq);
- priv->fpe_wq = NULL;
- }
-
- netdev_info(priv->dev, "FPE workqueue stop");
-}
-
/**
* stmmac_release - close entry point of the driver
* @dev : device pointer.
@@ -4094,10 +4068,10 @@ static int stmmac_release(struct net_device *dev)
stmmac_release_ptp(priv);
- pm_runtime_put(priv->device);
-
if (priv->dma_cap.fpesel)
- stmmac_fpe_stop_wq(priv);
+ timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
+
+ pm_runtime_put(priv->device);
return 0;
}
@@ -4754,7 +4728,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -4981,7 +4955,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
u64_stats_update_end(&txq_stats->q_syncp);
}
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
@@ -5981,45 +5955,31 @@ static int stmmac_set_features(struct net_device *netdev,
static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
{
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
- if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
- return;
+ /* This is interrupt context, just spin_lock() */
+ spin_lock(&fpe_cfg->lock);
- /* If LP has sent verify mPacket, LP is FPE capable */
- if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
- if (*lp_state < FPE_STATE_CAPABLE)
- *lp_state = FPE_STATE_CAPABLE;
+ if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
+ goto unlock_out;
- /* If user has requested FPE enable, quickly response */
- if (*hs_enable)
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- fpe_cfg,
- MPACKET_RESPONSE);
- }
-
- /* If Local has sent verify mPacket, Local is FPE capable */
- if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
- if (*lo_state < FPE_STATE_CAPABLE)
- *lo_state = FPE_STATE_CAPABLE;
- }
+ /* LP has sent verify mPacket */
+ if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
+ MPACKET_RESPONSE);
- /* If LP has sent response mPacket, LP is entering FPE ON */
- if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
- *lp_state = FPE_STATE_ENTERING_ON;
+ /* Local has sent verify mPacket */
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
- /* If Local has sent response mPacket, Local is entering FPE ON */
- if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
- *lo_state = FPE_STATE_ENTERING_ON;
+ /* LP has sent response mPacket */
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
+ fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
- if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
- !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
- priv->fpe_wq) {
- queue_work(priv->fpe_wq, &priv->fpe_task);
- }
+unlock_out:
+ spin_unlock(&fpe_cfg->lock);
}
static void stmmac_common_interrupt(struct stmmac_priv *priv)
@@ -6256,6 +6216,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
switch (type) {
case TC_QUERY_CAPS:
return stmmac_tc_query_caps(priv, priv, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return stmmac_tc_setup_mqprio(priv, priv, type_data);
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple(type_data,
&stmmac_block_cb_list,
@@ -7375,68 +7337,87 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
return ret;
}
-#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
-static void stmmac_fpe_lp_task(struct work_struct *work)
+/**
+ * stmmac_fpe_verify_timer - Timer for MAC Merge verification
+ * @t: timer_list struct containing private info
+ *
+ * Verify the MAC Merge capability in the local TX direction, by
+ * transmitting Verify mPackets up to 3 times. Wait until link
+ * partner responds with a Response mPacket, otherwise fail.
+ */
+static void stmmac_fpe_verify_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
- fpe_task);
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
- bool *enable = &fpe_cfg->enable;
- int retries = 20;
-
- while (retries-- > 0) {
- /* Bail out immediately if FPE handshake is OFF */
- if (*lo_state == FPE_STATE_OFF || !*hs_enable)
- break;
-
- if (*lo_state == FPE_STATE_ENTERING_ON &&
- *lp_state == FPE_STATE_ENTERING_ON) {
- stmmac_fpe_configure(priv, priv->ioaddr,
- fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- *enable);
+ struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
+ struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
+ fpe_cfg);
+ unsigned long flags;
+ bool rearm = false;
- netdev_info(priv->dev, "configured FPE\n");
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
- *lo_state = FPE_STATE_ON;
- *lp_state = FPE_STATE_ON;
- netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
- break;
- }
-
- if ((*lo_state == FPE_STATE_CAPABLE ||
- *lo_state == FPE_STATE_ENTERING_ON) &&
- *lp_state != FPE_STATE_ON) {
- netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
- *lo_state, *lp_state);
+ switch (fpe_cfg->status) {
+ case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
+ case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
+ if (fpe_cfg->verify_retries != 0) {
stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- fpe_cfg,
- MPACKET_VERIFY);
+ fpe_cfg, MPACKET_VERIFY);
+ rearm = true;
+ } else {
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
}
- /* Sleep then retry */
- msleep(500);
+
+ fpe_cfg->verify_retries--;
+ break;
+
+ case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ true, true);
+ break;
+
+ default:
+ break;
+ }
+
+ if (rearm) {
+ mod_timer(&fpe_cfg->verify_timer,
+ jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
}
- clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
}
-void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
+static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
{
- if (priv->plat->fpe_cfg->hs_enable != enable) {
- if (enable) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- MPACKET_VERIFY);
- } else {
- priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
- priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
- }
+ if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
+ fpe_cfg->verify_enabled &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
+ timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
+ mod_timer(&fpe_cfg->verify_timer, jiffies);
+ }
+}
- priv->plat->fpe_cfg->hs_enable = enable;
+void stmmac_fpe_apply(struct stmmac_priv *priv)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+
+ /* If verification is disabled, configure FPE right away.
+ * Otherwise let the timer code do it.
+ */
+ if (!fpe_cfg->verify_enabled) {
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ fpe_cfg->tx_enabled,
+ fpe_cfg->pmac_enabled);
+ } else {
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
+ fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
+
+ if (netif_running(priv->dev))
+ stmmac_fpe_verify_timer_arm(fpe_cfg);
}
}
@@ -7554,9 +7535,6 @@ int stmmac_dvr_probe(struct device *device,
INIT_WORK(&priv->service_task, stmmac_service_task);
- /* Initialize Link Partner FPE workqueue */
- INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
-
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances
*/
@@ -7721,6 +7699,12 @@ int stmmac_dvr_probe(struct device *device,
mutex_init(&priv->lock);
+ priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
+ priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
+ priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
+ spin_lock_init(&priv->fpe_cfg.lock);
+
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
* changed at run-time and it is fixed. Viceversa the driver'll try to
@@ -7894,16 +7878,8 @@ int stmmac_suspend(struct device *dev)
}
rtnl_unlock();
- if (priv->dma_cap.fpesel) {
- /* Disable FPE */
- stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use, false);
-
- stmmac_fpe_handshake(priv, false);
- stmmac_fpe_stop_wq(priv);
- }
+ if (priv->dma_cap.fpesel)
+ timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
priv->speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 996f2bcd07a2..832998bc020b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -282,16 +282,6 @@ static int tc_init(struct stmmac_priv *priv)
if (ret)
return -ENOMEM;
- if (!priv->plat->fpe_cfg) {
- priv->plat->fpe_cfg = devm_kzalloc(priv->device,
- sizeof(*priv->plat->fpe_cfg),
- GFP_KERNEL);
- if (!priv->plat->fpe_cfg)
- return -ENOMEM;
- } else {
- memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg));
- }
-
/* Fail silently as we can still use remaining features, e.g. CBS */
if (!dma_cap->frpsel)
return 0;
@@ -941,9 +931,9 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
+ struct netlink_ext_ack *extack = qopt->mqprio.extack;
struct timespec64 time, current_time, qopt_time;
ktime_t current_time_ns;
- bool fpe = false;
int i, ret = 0;
u64 ctr;
@@ -1028,16 +1018,12 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
switch (qopt->entries[i].command) {
case TC_TAPRIO_CMD_SET_GATES:
- if (fpe)
- return -EINVAL;
break;
case TC_TAPRIO_CMD_SET_AND_HOLD:
gates |= BIT(0);
- fpe = true;
break;
case TC_TAPRIO_CMD_SET_AND_RELEASE:
gates &= ~BIT(0);
- fpe = true;
break;
default:
return -EOPNOTSUPP;
@@ -1068,16 +1054,6 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
tc_taprio_map_maxsdu_txq(priv, qopt);
- if (fpe && !priv->dma_cap.fpesel) {
- mutex_unlock(&priv->est_lock);
- return -EOPNOTSUPP;
- }
-
- /* Actual FPE register configuration will be done after FPE handshake
- * is success.
- */
- priv->plat->fpe_cfg->enable = fpe;
-
ret = stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->est_lock);
@@ -1086,12 +1062,10 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
goto disable;
}
- netdev_info(priv->dev, "configured EST\n");
-
- if (fpe) {
- stmmac_fpe_handshake(priv, true);
- netdev_info(priv->dev, "start FPE handshake\n");
- }
+ ret = stmmac_fpe_map_preemption_class(priv, priv->dev, extack,
+ qopt->mqprio.preemptible_tcs);
+ if (ret)
+ goto disable;
return 0;
@@ -1109,16 +1083,7 @@ disable:
mutex_unlock(&priv->est_lock);
}
- priv->plat->fpe_cfg->enable = false;
- stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- false);
- netdev_info(priv->dev, "disabled FPE\n");
-
- stmmac_fpe_handshake(priv, false);
- netdev_info(priv->dev, "stop FPE handshake\n");
+ stmmac_fpe_map_preemption_class(priv, priv->dev, extack, 0);
return ret;
}
@@ -1174,6 +1139,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return err;
}
+static int tc_setup_taprio_without_fpe(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ if (!qopt->mqprio.preemptible_tcs)
+ return tc_setup_taprio(priv, qopt);
+
+ NL_SET_ERR_MSG_MOD(qopt->mqprio.extack,
+ "taprio with FPE is not implemented for this MAC");
+
+ return -EOPNOTSUPP;
+}
+
static int tc_setup_etf(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt)
{
@@ -1198,6 +1175,13 @@ static int tc_query_caps(struct stmmac_priv *priv,
struct tc_query_caps_base *base)
{
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
@@ -1214,6 +1198,81 @@ static int tc_query_caps(struct stmmac_priv *priv,
}
}
+static void stmmac_reset_tc_mqprio(struct net_device *ndev,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ netdev_reset_tc(ndev);
+ netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
+ stmmac_fpe_map_preemption_class(priv, ndev, extack, 0);
+}
+
+static int tc_setup_dwmac510_mqprio(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct netlink_ext_ack *extack = mqprio->extack;
+ struct tc_mqprio_qopt *qopt = &mqprio->qopt;
+ u32 offset, count, num_stack_tx_queues = 0;
+ struct net_device *ndev = priv->dev;
+ u32 num_tc = qopt->num_tc;
+ int err;
+
+ if (!num_tc) {
+ stmmac_reset_tc_mqprio(ndev, extack);
+ return 0;
+ }
+
+ err = netdev_set_num_tc(ndev, num_tc);
+ if (err)
+ return err;
+
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ offset = qopt->offset[tc];
+ count = qopt->count[tc];
+ num_stack_tx_queues += count;
+
+ err = netdev_set_tc_queue(ndev, tc, count, offset);
+ if (err)
+ goto err_reset_tc;
+ }
+
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ if (err)
+ goto err_reset_tc;
+
+ err = stmmac_fpe_map_preemption_class(priv, ndev, extack,
+ mqprio->preemptible_tcs);
+ if (err)
+ goto err_reset_tc;
+
+ return 0;
+
+err_reset_tc:
+ stmmac_reset_tc_mqprio(ndev, extack);
+
+ return err;
+}
+
+static int tc_setup_mqprio_unimplemented(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "mqprio HW offload is not implemented for this MAC");
+ return -EOPNOTSUPP;
+}
+
+const struct stmmac_tc_ops dwmac4_tc_ops = {
+ .init = tc_init,
+ .setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
+ .setup_cls = tc_setup_cls,
+ .setup_taprio = tc_setup_taprio_without_fpe,
+ .setup_etf = tc_setup_etf,
+ .query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_mqprio_unimplemented,
+};
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
@@ -1222,4 +1281,16 @@ const struct stmmac_tc_ops dwmac510_tc_ops = {
.setup_taprio = tc_setup_taprio,
.setup_etf = tc_setup_etf,
.query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_dwmac510_mqprio,
+};
+
+const struct stmmac_tc_ops dwxgmac_tc_ops = {
+ .init = tc_init,
+ .setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
+ .setup_cls = tc_setup_cls,
+ .setup_taprio = tc_setup_taprio_without_fpe,
+ .setup_etf = tc_setup_etf,
+ .query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_mqprio_unimplemented,
};
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 2f30715e9b67..1e887d951a04 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -114,37 +114,23 @@ static void vnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct vnet *vp = (struct vnet *)netdev_priv(dev);
struct vnet_port *port;
- char *p = (char *)buf;
switch (stringset) {
case ETH_SS_STATS:
memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
- p += sizeof(ethtool_stats_keys);
+ buf += sizeof(ethtool_stats_keys);
rcu_read_lock();
list_for_each_entry_rcu(port, &vp->port_list, list) {
- snprintf(p, ETH_GSTRING_LEN, "p%u.%s-%pM",
- port->q_index, port->switch_port ? "s" : "q",
- port->raddr);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.rx_packets",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.tx_packets",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.rx_bytes",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.tx_bytes",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.event_up",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.event_reset",
- port->q_index);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&buf, "p%u.%s-%pM", port->q_index,
+ port->switch_port ? "s" : "q",
+ port->raddr);
+ ethtool_sprintf(&buf, "p%u.rx_packets", port->q_index);
+ ethtool_sprintf(&buf, "p%u.tx_packets", port->q_index);
+ ethtool_sprintf(&buf, "p%u.rx_bytes", port->q_index);
+ ethtool_sprintf(&buf, "p%u.tx_bytes", port->q_index);
+ ethtool_sprintf(&buf, "p%u.event_up", port->q_index);
+ ethtool_sprintf(&buf, "p%u.event_reset", port->q_index);
}
rcu_read_unlock();
break;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index ede5f7890fb4..fc77f424f90b 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1671,7 +1671,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
#endif
#ifdef BDX_LLTX
- netif_trans_update(ndev); /* NETIF_F_LLTX driver :( */
+ netif_trans_update(ndev); /* dev->lltx driver :( */
#endif
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
@@ -2019,7 +2019,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* set multicast list callback has to use priv->tx_lock.
*/
#ifdef BDX_LLTX
- ndev->features |= NETIF_F_LLTX;
+ ndev->lltx = true;
#endif
/* MTU range: 60 - 16384 */
ndev->min_mtu = ETH_ZLEN;
diff --git a/drivers/net/ethernet/tehuti/tehuti.h b/drivers/net/ethernet/tehuti/tehuti.h
index 909e7296cecf..47a2d3e5f8ed 100644
--- a/drivers/net/ethernet/tehuti/tehuti.h
+++ b/drivers/net/ethernet/tehuti/tehuti.h
@@ -260,7 +260,7 @@ struct bdx_priv {
int tx_update_mark;
int tx_noupd;
#endif
- spinlock_t tx_lock; /* NETIF_F_LLTX mode */
+ spinlock_t tx_lock; /* dev->lltx mode */
/* rarely used */
u8 port;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index b60976947da5..9032444435e9 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -427,9 +427,9 @@ static void am65_cpsw_get_channels(struct net_device *ndev,
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- ch->max_rx = AM65_CPSW_MAX_RX_QUEUES;
- ch->max_tx = AM65_CPSW_MAX_TX_QUEUES;
- ch->rx_count = AM65_CPSW_MAX_RX_QUEUES;
+ ch->max_rx = AM65_CPSW_MAX_QUEUES;
+ ch->max_tx = AM65_CPSW_MAX_QUEUES;
+ ch->rx_count = common->rx_ch_num_flows;
ch->tx_count = common->tx_ch_num;
}
@@ -447,9 +447,8 @@ static int am65_cpsw_set_channels(struct net_device *ndev,
if (common->usage_count)
return -EBUSY;
- am65_cpsw_nuss_remove_tx_chns(common);
-
- return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count);
+ return am65_cpsw_nuss_update_tx_rx_chns(common, chs->tx_count,
+ chs->rx_count);
}
static void
@@ -714,8 +713,6 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = am65_cpts_phc_index(common->cpts);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
@@ -915,80 +912,64 @@ static void am65_cpsw_get_mm_stats(struct net_device *ndev,
s->MACMergeHoldCount = readl(base + AM65_CPSW_STATN_IET_TX_HOLD);
}
-static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
- struct kernel_ethtool_coalesce *kernel_coal,
- struct netlink_ext_ack *extack)
-{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- struct am65_cpsw_tx_chn *tx_chn;
-
- tx_chn = &common->tx_chns[0];
-
- coal->rx_coalesce_usecs = common->rx_pace_timeout / 1000;
- coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
-
- return 0;
-}
-
static int am65_cpsw_get_per_queue_coalesce(struct net_device *ndev, u32 queue,
struct ethtool_coalesce *coal)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_rx_flow *rx_flow;
struct am65_cpsw_tx_chn *tx_chn;
- if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ if (queue >= AM65_CPSW_MAX_QUEUES)
return -EINVAL;
tx_chn = &common->tx_chns[queue];
-
coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
+ rx_flow = &common->rx_chns.flows[queue];
+ coal->rx_coalesce_usecs = rx_flow->rx_pace_timeout / 1000;
+
return 0;
}
-static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- struct am65_cpsw_tx_chn *tx_chn;
-
- tx_chn = &common->tx_chns[0];
-
- if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
- return -EINVAL;
-
- if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
- return -EINVAL;
-
- common->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
- tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
-
- return 0;
+ return am65_cpsw_get_per_queue_coalesce(ndev, 0, coal);
}
static int am65_cpsw_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
struct ethtool_coalesce *coal)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_rx_flow *rx_flow;
struct am65_cpsw_tx_chn *tx_chn;
- if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ if (queue >= AM65_CPSW_MAX_QUEUES)
return -EINVAL;
tx_chn = &common->tx_chns[queue];
-
- if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20) {
- dev_info(common->dev, "defaulting to min value of 20us for tx-usecs for tx-%u\n",
- queue);
- coal->tx_coalesce_usecs = 20;
- }
+ if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
+ return -EINVAL;
tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
+ rx_flow = &common->rx_chns.flows[queue];
+ if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
+ return -EINVAL;
+
+ rx_flow->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
+
return 0;
}
+static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ return am65_cpsw_set_per_queue_coalesce(ndev, 0, coal);
+}
+
const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.begin = am65_cpsw_ethtool_op_begin,
.complete = am65_cpsw_ethtool_op_complete,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index b06b8872b4eb..cbe99017cbfa 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -138,7 +138,7 @@
AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN)
#define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
-/* Number of TX/RX descriptors */
+/* Number of TX/RX descriptors per channel/flow */
#define AM65_CPSW_MAX_TX_DESC 500
#define AM65_CPSW_MAX_RX_DESC 500
@@ -150,6 +150,7 @@
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
#define AM65_CPSW_DEFAULT_TX_CHNS 8
+#define AM65_CPSW_DEFAULT_RX_CHN_FLOWS 1
/* CPPI streaming packet interface */
#define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF
@@ -331,7 +332,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
}
static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
- struct page *page)
+ struct page *page, u32 flow_idx)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct cppi5_host_desc_t *desc_rx;
@@ -364,7 +365,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
swdata = cppi5_hdesc_get_swdata(desc_rx);
*((void **)swdata) = page_address(page);
- return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx,
+ desc_rx, desc_dma);
}
void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
@@ -399,22 +401,27 @@ static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
- int i;
+ int id, port;
- for (i = 0; i < common->port_num; i++) {
- if (!common->ports[i].ndev)
- continue;
+ for (id = 0; id < common->rx_ch_num_flows; id++) {
+ flow = &rx_chn->flows[id];
- rxq = &common->ports[i].xdp_rxq;
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ continue;
- if (xdp_rxq_info_is_reg(rxq))
- xdp_rxq_info_unreg(rxq);
- }
+ rxq = &common->ports[port].xdp_rxq[id];
+
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+ }
- if (rx_chn->page_pool) {
- page_pool_destroy(rx_chn->page_pool);
- rx_chn->page_pool = NULL;
+ if (flow->page_pool) {
+ page_pool_destroy(flow->page_pool);
+ flow->page_pool = NULL;
+ }
}
}
@@ -428,31 +435,44 @@ static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
.nid = dev_to_node(common->dev),
.dev = common->dev,
.dma_dir = DMA_BIDIRECTIONAL,
- .napi = &common->napi_rx,
+ /* .napi set dynamically */
};
+ struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
struct page_pool *pool;
- int i, ret;
-
- pool = page_pool_create(&pp_params);
- if (IS_ERR(pool))
- return PTR_ERR(pool);
+ int id, port, ret;
+
+ for (id = 0; id < common->rx_ch_num_flows; id++) {
+ flow = &rx_chn->flows[id];
+ pp_params.napi = &flow->napi_rx;
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ goto err;
+ }
- rx_chn->page_pool = pool;
+ flow->page_pool = pool;
- for (i = 0; i < common->port_num; i++) {
- if (!common->ports[i].ndev)
- continue;
+ /* using same page pool is allowed as no running rx handlers
+ * simultaneously for both ndevs
+ */
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ continue;
- rxq = &common->ports[i].xdp_rxq;
+ rxq = &common->ports[port].xdp_rxq[id];
- ret = xdp_rxq_info_reg(rxq, common->ports[i].ndev, i, 0);
- if (ret)
- goto err;
+ ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
+ id, flow->napi_rx.napi_id);
+ if (ret)
+ goto err;
- ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
- if (ret)
- goto err;
+ ret = xdp_rxq_info_reg_mem_model(rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool);
+ if (ret)
+ goto err;
+ }
}
return 0;
@@ -497,25 +517,27 @@ static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_ch
desc_idx);
}
-static inline void am65_cpsw_put_page(struct am65_cpsw_rx_chn *rx_chn,
+static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
struct page *page,
bool allow_direct,
int desc_idx)
{
- page_pool_put_full_page(rx_chn->page_pool, page, allow_direct);
- rx_chn->pages[desc_idx] = NULL;
+ page_pool_put_full_page(flow->page_pool, page, allow_direct);
+ flow->pages[desc_idx] = NULL;
}
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
{
- struct am65_cpsw_rx_chn *rx_chn = data;
+ struct am65_cpsw_rx_flow *flow = data;
struct cppi5_host_desc_t *desc_rx;
+ struct am65_cpsw_rx_chn *rx_chn;
dma_addr_t buf_dma;
u32 buf_dma_len;
void *page_addr;
void **swdata;
int desc_idx;
+ rx_chn = &flow->common->rx_chns;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
page_addr = *swdata;
@@ -526,7 +548,7 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
rx_chn->dsize_log2);
- am65_cpsw_put_page(rx_chn, virt_to_page(page_addr), false, desc_idx);
+ am65_cpsw_put_page(flow, virt_to_page(page_addr), false, desc_idx);
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
@@ -602,7 +624,8 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
struct am65_cpsw_host *host_p = am65_common_get_host(common);
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
- int port_idx, i, ret, tx;
+ int port_idx, i, ret, tx, flow_idx;
+ struct am65_cpsw_rx_flow *flow;
u32 val, port_mask;
struct page *page;
@@ -670,27 +693,26 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
return ret;
}
- for (i = 0; i < rx_chn->descs_num; i++) {
- page = page_pool_dev_alloc_pages(rx_chn->page_pool);
- if (!page) {
- ret = -ENOMEM;
- if (i)
+ for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) {
+ flow = &rx_chn->flows[flow_idx];
+ for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
+ page = page_pool_dev_alloc_pages(flow->page_pool);
+ if (!page) {
+ dev_err(common->dev, "cannot allocate page in flow %d\n",
+ flow_idx);
+ ret = -ENOMEM;
goto fail_rx;
+ }
+ flow->pages[i] = page;
- return ret;
- }
- rx_chn->pages[i] = page;
-
- ret = am65_cpsw_nuss_rx_push(common, page);
- if (ret < 0) {
- dev_err(common->dev,
- "cannot submit page to channel rx: %d\n",
- ret);
- am65_cpsw_put_page(rx_chn, page, false, i);
- if (i)
+ ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
+ if (ret < 0) {
+ dev_err(common->dev,
+ "cannot submit page to rx channel flow %d, error %d\n",
+ flow_idx, ret);
+ am65_cpsw_put_page(flow, page, false, i);
goto fail_rx;
-
- return ret;
+ }
}
}
@@ -700,6 +722,14 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
goto fail_rx;
}
+ for (i = 0; i < common->rx_ch_num_flows ; i++) {
+ napi_enable(&rx_chn->flows[i].napi_rx);
+ if (rx_chn->flows[i].irq_disabled) {
+ rx_chn->flows[i].irq_disabled = false;
+ enable_irq(rx_chn->flows[i].irq);
+ }
+ }
+
for (tx = 0; tx < common->tx_ch_num; tx++) {
ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
if (ret) {
@@ -711,12 +741,6 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
napi_enable(&tx_chn[tx].napi_tx);
}
- napi_enable(&common->napi_rx);
- if (common->rx_irq_disabled) {
- common->rx_irq_disabled = false;
- enable_irq(rx_chn->irq);
- }
-
dev_dbg(common->dev, "cpsw_nuss started\n");
return 0;
@@ -727,11 +751,24 @@ fail_tx:
tx--;
}
+ for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) {
+ flow = &rx_chn->flows[flow_idx];
+ if (!flow->irq_disabled) {
+ disable_irq(flow->irq);
+ flow->irq_disabled = true;
+ }
+ napi_disable(&flow->napi_rx);
+ }
+
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
fail_rx:
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, 0, rx_chn,
- am65_cpsw_nuss_rx_cleanup, 0);
+ for (i = 0; i < common->rx_ch_num_flows; i--)
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
+ am65_cpsw_nuss_rx_cleanup, 0);
+
+ am65_cpsw_destroy_xdp_rxqs(common);
+
return ret;
}
@@ -780,12 +817,12 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
dev_err(common->dev, "rx teardown timeout\n");
}
- napi_disable(&common->napi_rx);
- hrtimer_cancel(&common->rx_hrtimer);
-
- for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
- am65_cpsw_nuss_rx_cleanup, !!i);
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ napi_disable(&rx_chn->flows[i].napi_rx);
+ hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
+ am65_cpsw_nuss_rx_cleanup, 0);
+ }
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
@@ -794,10 +831,6 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
- for (i = 0; i < rx_chn->descs_num; i++) {
- if (rx_chn->pages[i])
- am65_cpsw_put_page(rx_chn, rx_chn->pages[i], false, i);
- }
am65_cpsw_destroy_xdp_rxqs(common);
dev_dbg(common->dev, "cpsw_nuss stopped\n");
@@ -868,7 +901,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
goto runtime_put;
}
- ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
+ ret = netif_set_real_num_rx_queues(ndev, common->rx_ch_num_flows);
if (ret) {
dev_err(common->dev, "cannot set real number of rx queues\n");
goto runtime_put;
@@ -992,12 +1025,12 @@ pool_free:
return ret;
}
-static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
+static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct am65_cpsw_port *port,
struct xdp_buff *xdp,
int desc_idx, int cpu, int *len)
{
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_common *common = flow->common;
struct am65_cpsw_ndev_priv *ndev_priv;
struct net_device *ndev = port->ndev;
struct am65_cpsw_ndev_stats *stats;
@@ -1026,7 +1059,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
ret = AM65_CPSW_XDP_PASS;
goto out;
case XDP_TX:
- tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES];
+ tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
xdpf = xdp_convert_buff_to_frame(xdp);
@@ -1068,7 +1101,8 @@ drop:
}
page = virt_to_head_page(xdp->data);
- am65_cpsw_put_page(rx_chn, page, true, desc_idx);
+ am65_cpsw_put_page(flow, page, true, desc_idx);
+
out:
return ret;
}
@@ -1106,11 +1140,12 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
}
}
-static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
- u32 flow_idx, int cpu, int *xdp_state)
+static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
+ int cpu, int *xdp_state)
{
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
+ struct am65_cpsw_common *common = flow->common;
struct am65_cpsw_ndev_priv *ndev_priv;
struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_rx;
@@ -1120,6 +1155,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
struct am65_cpsw_port *port;
int headroom, desc_idx, ret;
struct net_device *ndev;
+ u32 flow_idx = flow->id;
struct sk_buff *skb;
struct xdp_buff xdp;
void *page_addr;
@@ -1174,10 +1210,10 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
}
if (port->xdp_prog) {
- xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq);
+ xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
pkt_len, false);
- *xdp_state = am65_cpsw_run_xdp(common, port, &xdp, desc_idx,
+ *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, desc_idx,
cpu, &pkt_len);
if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate;
@@ -1195,7 +1231,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
skb_mark_for_recycle(skb);
skb->protocol = eth_type_trans(skb, ndev);
am65_cpsw_nuss_rx_csum(skb, csum_info);
- napi_gro_receive(&common->napi_rx, skb);
+ napi_gro_receive(&flow->napi_rx, skb);
stats = this_cpu_ptr(ndev_priv->stats);
@@ -1205,24 +1241,24 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
u64_stats_update_end(&stats->syncp);
allocate:
- new_page = page_pool_dev_alloc_pages(rx_chn->page_pool);
+ new_page = page_pool_dev_alloc_pages(flow->page_pool);
if (unlikely(!new_page)) {
dev_err(dev, "page alloc failed\n");
return -ENOMEM;
}
- rx_chn->pages[desc_idx] = new_page;
+ flow->pages[desc_idx] = new_page;
if (netif_dormant(ndev)) {
- am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
+ am65_cpsw_put_page(flow, new_page, true, desc_idx);
ndev->stats.rx_dropped++;
return 0;
}
requeue:
- ret = am65_cpsw_nuss_rx_push(common, new_page);
+ ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx);
if (WARN_ON(ret < 0)) {
- am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
+ am65_cpsw_put_page(flow, new_page, true, desc_idx);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -1232,38 +1268,32 @@ requeue:
static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer)
{
- struct am65_cpsw_common *common =
- container_of(timer, struct am65_cpsw_common, rx_hrtimer);
+ struct am65_cpsw_rx_flow *flow = container_of(timer,
+ struct am65_cpsw_rx_flow,
+ rx_hrtimer);
- enable_irq(common->rx_chns.irq);
+ enable_irq(flow->irq);
return HRTIMER_NORESTART;
}
static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
- struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
- int flow = AM65_CPSW_MAX_RX_FLOWS;
+ struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx);
+ struct am65_cpsw_common *common = flow->common;
int cpu = smp_processor_id();
int xdp_state_or = 0;
int cur_budget, ret;
int xdp_state;
int num_rx = 0;
- /* process every flow */
- while (flow--) {
- cur_budget = budget - num_rx;
-
- while (cur_budget--) {
- ret = am65_cpsw_nuss_rx_packets(common, flow, cpu,
- &xdp_state);
- xdp_state_or |= xdp_state;
- if (ret)
- break;
- num_rx++;
- }
-
- if (num_rx >= budget)
+ /* process only this flow */
+ cur_budget = budget;
+ while (cur_budget--) {
+ ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state);
+ xdp_state_or |= xdp_state;
+ if (ret)
break;
+ num_rx++;
}
if (xdp_state_or & AM65_CPSW_XDP_REDIRECT)
@@ -1272,14 +1302,14 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
- if (common->rx_irq_disabled) {
- common->rx_irq_disabled = false;
- if (unlikely(common->rx_pace_timeout)) {
- hrtimer_start(&common->rx_hrtimer,
- ns_to_ktime(common->rx_pace_timeout),
+ if (flow->irq_disabled) {
+ flow->irq_disabled = false;
+ if (unlikely(flow->rx_pace_timeout)) {
+ hrtimer_start(&flow->rx_hrtimer,
+ ns_to_ktime(flow->rx_pace_timeout),
HRTIMER_MODE_REL_PINNED);
} else {
- enable_irq(common->rx_chns.irq);
+ enable_irq(flow->irq);
}
}
}
@@ -1527,11 +1557,11 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
{
- struct am65_cpsw_common *common = dev_id;
+ struct am65_cpsw_rx_flow *flow = dev_id;
- common->rx_irq_disabled = true;
+ flow->irq_disabled = true;
disable_irq_nosync(irq);
- napi_schedule(&common->napi_rx);
+ napi_schedule(&flow->napi_rx);
return IRQ_HANDLED;
}
@@ -2176,7 +2206,7 @@ static void am65_cpsw_nuss_free_tx_chns(void *data)
}
}
-void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
+static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
int i;
@@ -2191,15 +2221,9 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
devm_free_irq(dev, tx_chn->irq, tx_chn);
netif_napi_del(&tx_chn->napi_tx);
-
- if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
- k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
-
- if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
- k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
-
- memset(tx_chn, 0, sizeof(*tx_chn));
}
+
+ am65_cpsw_nuss_free_tx_chns(common);
}
static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
@@ -2331,19 +2355,22 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
}
-static void am65_cpsw_nuss_remove_rx_chns(void *data)
+static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common)
{
- struct am65_cpsw_common *common = data;
struct device *dev = common->dev;
struct am65_cpsw_rx_chn *rx_chn;
+ struct am65_cpsw_rx_flow *flows;
+ int i;
rx_chn = &common->rx_chns;
+ flows = rx_chn->flows;
devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
- if (!(rx_chn->irq < 0))
- devm_free_irq(dev, rx_chn->irq, common);
-
- netif_napi_del(&common->napi_rx);
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ if (!(flows[i].irq < 0))
+ devm_free_irq(dev, flows[i].irq, &flows[i]);
+ netif_napi_del(&flows[i].napi_rx);
+ }
am65_cpsw_nuss_free_rx_chns(common);
@@ -2356,6 +2383,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
struct device *dev = common->dev;
+ struct am65_cpsw_rx_flow *flow;
u32 hdesc_size, hdesc_size_out;
u32 fdqring_id;
int i, ret = 0;
@@ -2364,12 +2392,21 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
AM65_CPSW_NAV_SW_DATA_SIZE);
rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
- rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
+ rx_cfg.flow_id_num = common->rx_ch_num_flows;
rx_cfg.flow_id_base = common->rx_flow_id_base;
/* init all flows */
rx_chn->dev = dev;
- rx_chn->descs_num = max_desc_num;
+ rx_chn->descs_num = max_desc_num * rx_cfg.flow_id_num;
+
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ flow = &rx_chn->flows[i];
+ flow->page_pool = NULL;
+ flow->pages = devm_kcalloc(dev, AM65_CPSW_MAX_RX_DESC,
+ sizeof(*flow->pages), GFP_KERNEL);
+ if (!flow->pages)
+ return -ENOMEM;
+ }
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
if (IS_ERR(rx_chn->rx_chn)) {
@@ -2392,13 +2429,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_chn->dsize_log2 = __fls(hdesc_size_out);
WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2));
- rx_chn->page_pool = NULL;
-
- rx_chn->pages = devm_kcalloc(dev, rx_chn->descs_num,
- sizeof(*rx_chn->pages), GFP_KERNEL);
- if (!rx_chn->pages)
- return -ENOMEM;
-
common->rx_flow_id_base =
k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
@@ -2422,6 +2452,10 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
};
+ flow = &rx_chn->flows[i];
+ flow->id = i;
+ flow->common = common;
+
rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
rx_flow_cfg.rx_cfg.size = max_desc_num;
rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
@@ -2438,30 +2472,37 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
i);
- rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
-
- if (rx_chn->irq < 0) {
+ flow->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+ if (flow->irq <= 0) {
dev_err(dev, "Failed to get rx dma irq %d\n",
- rx_chn->irq);
- ret = rx_chn->irq;
+ flow->irq);
+ ret = flow->irq;
goto err;
}
- }
-
- netif_napi_add(common->dma_ndev, &common->napi_rx,
- am65_cpsw_nuss_rx_poll);
- hrtimer_init(&common->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- common->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
- ret = devm_request_irq(dev, rx_chn->irq,
- am65_cpsw_nuss_rx_irq,
- IRQF_TRIGGER_HIGH, dev_name(dev), common);
- if (ret) {
- dev_err(dev, "failure requesting rx irq %u, %d\n",
- rx_chn->irq, ret);
- goto err;
+ snprintf(flow->name,
+ sizeof(flow->name), "%s-rx%d",
+ dev_name(dev), i);
+ netif_napi_add(common->dma_ndev, &flow->napi_rx,
+ am65_cpsw_nuss_rx_poll);
+ hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
+
+ ret = devm_request_irq(dev, flow->irq,
+ am65_cpsw_nuss_rx_irq,
+ IRQF_TRIGGER_HIGH,
+ flow->name, flow);
+ if (ret) {
+ dev_err(dev, "failure requesting rx %d irq %u, %d\n",
+ i, flow->irq, ret);
+ goto err;
+ }
}
+ /* setup classifier to route priorities to flows */
+ cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows);
+
err:
i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
if (i) {
@@ -2705,8 +2746,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
/* alloc netdev */
port->ndev = devm_alloc_etherdev_mqs(common->dev,
sizeof(struct am65_cpsw_ndev_priv),
- AM65_CPSW_MAX_TX_QUEUES,
- AM65_CPSW_MAX_RX_QUEUES);
+ AM65_CPSW_MAX_QUEUES,
+ AM65_CPSW_MAX_QUEUES);
if (!port->ndev) {
dev_err(dev, "error allocating slave net_device %u\n",
port->port_id);
@@ -2777,7 +2818,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
}
phylink = phylink_create(&port->slave.phylink_config,
- of_node_to_fwnode(port->slave.port_np),
+ of_fwnode_handle(port->slave.port_np),
port->slave.phy_if,
&am65_cpsw_phylink_mac_ops);
if (IS_ERR(phylink))
@@ -3303,9 +3344,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
}
- for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
- k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
- am65_cpsw_nuss_rx_cleanup, !!i);
+ for (i = 0; i < common->rx_ch_num_flows; i++)
+ k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
+ &rx_chan->flows[i],
+ am65_cpsw_nuss_rx_cleanup, 0);
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
@@ -3346,12 +3388,21 @@ err_cleanup_ndev:
return ret;
}
-int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
+int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
+ int num_tx, int num_rx)
{
int ret;
+ am65_cpsw_nuss_remove_tx_chns(common);
+ am65_cpsw_nuss_remove_rx_chns(common);
+
common->tx_ch_num = num_tx;
+ common->rx_ch_num_flows = num_rx;
ret = am65_cpsw_nuss_init_tx_chns(common);
+ if (ret)
+ return ret;
+
+ ret = am65_cpsw_nuss_init_rx_chns(common);
return ret;
}
@@ -3481,6 +3532,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete);
common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
+ common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
common->pf_p0_rx_ptype_rrobin = false;
common->default_vlan = 1;
@@ -3672,8 +3724,10 @@ static int am65_cpsw_nuss_resume(struct device *dev)
return ret;
/* If RX IRQ was disabled before suspend, keep it disabled */
- if (common->rx_irq_disabled)
- disable_irq(common->rx_chns.irq);
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ if (common->rx_chns.flows[i].irq_disabled)
+ disable_irq(common->rx_chns.flows[i].irq);
+ }
am65_cpts_resume(common->cpts);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index e2ce2be320bd..dc8d544230dc 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -21,9 +21,7 @@ struct am65_cpts;
#define HOST_PORT_NUM 0
-#define AM65_CPSW_MAX_TX_QUEUES 8
-#define AM65_CPSW_MAX_RX_QUEUES 1
-#define AM65_CPSW_MAX_RX_FLOWS 1
+#define AM65_CPSW_MAX_QUEUES 8 /* both TX & RX */
#define AM65_CPSW_PORT_VLAN_REG_OFFSET 0x014
@@ -58,7 +56,7 @@ struct am65_cpsw_port {
struct am65_cpsw_qos qos;
struct devlink_port devlink_port;
struct bpf_prog *xdp_prog;
- struct xdp_rxq_info xdp_rxq;
+ struct xdp_rxq_info xdp_rxq[AM65_CPSW_MAX_QUEUES];
/* Only for suspend resume context */
u32 vid_context;
};
@@ -94,16 +92,27 @@ struct am65_cpsw_tx_chn {
u32 rate_mbps;
};
+struct am65_cpsw_rx_flow {
+ u32 id;
+ struct napi_struct napi_rx;
+ struct am65_cpsw_common *common;
+ int irq;
+ bool irq_disabled;
+ struct hrtimer rx_hrtimer;
+ unsigned long rx_pace_timeout;
+ struct page_pool *page_pool;
+ struct page **pages;
+ char name[32];
+};
+
struct am65_cpsw_rx_chn {
struct device *dev;
struct device *dma_dev;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_rx_channel *rx_chn;
- struct page_pool *page_pool;
- struct page **pages;
u32 descs_num;
unsigned char dsize_log2;
- int irq;
+ struct am65_cpsw_rx_flow flows[AM65_CPSW_MAX_QUEUES];
};
#define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
@@ -145,16 +154,12 @@ struct am65_cpsw_common {
u32 tx_ch_rate_msk;
u32 rx_flow_id_base;
- struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_TX_QUEUES];
+ struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_QUEUES];
struct completion tdown_complete;
atomic_t tdown_cnt;
+ int rx_ch_num_flows;
struct am65_cpsw_rx_chn rx_chns;
- struct napi_struct napi_rx;
-
- bool rx_irq_disabled;
- struct hrtimer rx_hrtimer;
- unsigned long rx_pace_timeout;
u32 nuss_ver;
u32 cpsw_ver;
@@ -203,8 +208,8 @@ struct am65_cpsw_ndev_priv {
#define am65_common_get_host(common) (&(common)->host)
#define am65_common_get_port(common, id) (&(common)->ports[(id) - 1])
-#define am65_cpsw_napi_to_common(pnapi) \
- container_of(pnapi, struct am65_cpsw_common, napi_rx)
+#define am65_cpsw_napi_to_rx_flow(pnapi) \
+ container_of(pnapi, struct am65_cpsw_rx_flow, napi_rx)
#define am65_cpsw_napi_to_tx_chn(pnapi) \
container_of(pnapi, struct am65_cpsw_tx_chn, napi_tx)
@@ -215,8 +220,8 @@ struct am65_cpsw_ndev_priv {
extern const struct ethtool_ops am65_cpsw_ethtool_ops_slave;
void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common);
-void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common);
-int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx);
+int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
+ int num_tx, int num_rx);
bool am65_cpsw_port_dev_check(const struct net_device *dev);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 64bf22cd860c..0d5d8917c70b 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/err.h>
@@ -45,6 +46,24 @@
#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C
#define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg)))
+#define ALE_POLICER_PORT_OUI 0x100
+#define ALE_POLICER_DA_SA 0x104
+#define ALE_POLICER_VLAN 0x108
+#define ALE_POLICER_ETHERTYPE_IPSA 0x10c
+#define ALE_POLICER_IPDA 0x110
+#define ALE_POLICER_PIR 0x118
+#define ALE_POLICER_CIR 0x11c
+#define ALE_POLICER_TBL_CTL 0x120
+#define ALE_POLICER_CTL 0x124
+#define ALE_POLICER_TEST_CTL 0x128
+#define ALE_POLICER_HIT_STATUS 0x12c
+#define ALE_THREAD_DEF 0x134
+#define ALE_THREAD_CTL 0x138
+#define ALE_THREAD_VAL 0x13c
+
+#define ALE_POLICER_TBL_WRITE_ENABLE BIT(31)
+#define ALE_POLICER_TBL_INDEX_MASK GENMASK(4, 0)
+
#define AM65_CPSW_ALE_THREAD_DEF_REG 0x134
/* ALE_AGING_TIMER */
@@ -76,7 +95,7 @@ enum {
* @dev_id: ALE version/SoC id
* @features: features supported by ALE
* @tbl_entries: number of ALE entries
- * @major_ver_mask: mask of ALE Major Version Value in ALE_IDVER reg.
+ * @reg_fields: pointer to array of register field configuration
* @nu_switch_ale: NU Switch ALE
* @vlan_entry_tbl: ALE vlan entry fields description tbl
*/
@@ -84,7 +103,7 @@ struct cpsw_ale_dev_id {
const char *dev_id;
u32 features;
u32 tbl_entries;
- u32 major_ver_mask;
+ const struct reg_field *reg_fields;
bool nu_switch_ale;
const struct ale_entry_fld *vlan_entry_tbl;
};
@@ -102,7 +121,7 @@ struct cpsw_ale_dev_id {
#define ALE_UCAST_TOUCHED 3
#define ALE_TABLE_SIZE_MULTIPLIER 1024
-#define ALE_STATUS_SIZE_MASK 0x1f
+#define ALE_POLICER_SIZE_MULTIPLIER 8
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
@@ -1292,25 +1311,108 @@ void cpsw_ale_stop(struct cpsw_ale *ale)
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
}
+static const struct reg_field ale_fields_cpsw[] = {
+ /* CPSW_ALE_IDVER_REG */
+ [MINOR_VER] = REG_FIELD(ALE_IDVER, 0, 7),
+ [MAJOR_VER] = REG_FIELD(ALE_IDVER, 8, 15),
+};
+
+static const struct reg_field ale_fields_cpsw_nu[] = {
+ /* CPSW_ALE_IDVER_REG */
+ [MINOR_VER] = REG_FIELD(ALE_IDVER, 0, 7),
+ [MAJOR_VER] = REG_FIELD(ALE_IDVER, 8, 10),
+ /* CPSW_ALE_STATUS_REG */
+ [ALE_ENTRIES] = REG_FIELD(ALE_STATUS, 0, 7),
+ [ALE_POLICERS] = REG_FIELD(ALE_STATUS, 8, 15),
+ /* CPSW_ALE_POLICER_PORT_OUI_REG */
+ [POL_PORT_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 31, 31),
+ [POL_TRUNK_ID] = REG_FIELD(ALE_POLICER_PORT_OUI, 30, 30),
+ [POL_PORT_NUM] = REG_FIELD(ALE_POLICER_PORT_OUI, 25, 25),
+ [POL_PRI_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 19, 19),
+ [POL_PRI_VAL] = REG_FIELD(ALE_POLICER_PORT_OUI, 16, 18),
+ [POL_OUI_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 15, 15),
+ [POL_OUI_INDEX] = REG_FIELD(ALE_POLICER_PORT_OUI, 0, 5),
+
+ /* CPSW_ALE_POLICER_DA_SA_REG */
+ [POL_DST_MEN] = REG_FIELD(ALE_POLICER_DA_SA, 31, 31),
+ [POL_DST_INDEX] = REG_FIELD(ALE_POLICER_DA_SA, 16, 21),
+ [POL_SRC_MEN] = REG_FIELD(ALE_POLICER_DA_SA, 15, 15),
+ [POL_SRC_INDEX] = REG_FIELD(ALE_POLICER_DA_SA, 0, 5),
+
+ /* CPSW_ALE_POLICER_VLAN_REG */
+ [POL_OVLAN_MEN] = REG_FIELD(ALE_POLICER_VLAN, 31, 31),
+ [POL_OVLAN_INDEX] = REG_FIELD(ALE_POLICER_VLAN, 16, 21),
+ [POL_IVLAN_MEN] = REG_FIELD(ALE_POLICER_VLAN, 15, 15),
+ [POL_IVLAN_INDEX] = REG_FIELD(ALE_POLICER_VLAN, 0, 5),
+
+ /* CPSW_ALE_POLICER_ETHERTYPE_IPSA_REG */
+ [POL_ETHERTYPE_MEN] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 31, 31),
+ [POL_ETHERTYPE_INDEX] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 16, 21),
+ [POL_IPSRC_MEN] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 15, 15),
+ [POL_IPSRC_INDEX] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 0, 5),
+
+ /* CPSW_ALE_POLICER_IPDA_REG */
+ [POL_IPDST_MEN] = REG_FIELD(ALE_POLICER_IPDA, 31, 31),
+ [POL_IPDST_INDEX] = REG_FIELD(ALE_POLICER_IPDA, 16, 21),
+
+ /* CPSW_ALE_POLICER_TBL_CTL_REG */
+ /**
+ * REG_FIELDS not defined for this as fields cannot be correctly
+ * used independently
+ */
+
+ /* CPSW_ALE_POLICER_CTL_REG */
+ [POL_EN] = REG_FIELD(ALE_POLICER_CTL, 31, 31),
+ [POL_RED_DROP_EN] = REG_FIELD(ALE_POLICER_CTL, 29, 29),
+ [POL_YELLOW_DROP_EN] = REG_FIELD(ALE_POLICER_CTL, 28, 28),
+ [POL_YELLOW_THRESH] = REG_FIELD(ALE_POLICER_CTL, 24, 26),
+ [POL_POL_MATCH_MODE] = REG_FIELD(ALE_POLICER_CTL, 22, 23),
+ [POL_PRIORITY_THREAD_EN] = REG_FIELD(ALE_POLICER_CTL, 21, 21),
+ [POL_MAC_ONLY_DEF_DIS] = REG_FIELD(ALE_POLICER_CTL, 20, 20),
+
+ /* CPSW_ALE_POLICER_TEST_CTL_REG */
+ [POL_TEST_CLR] = REG_FIELD(ALE_POLICER_TEST_CTL, 31, 31),
+ [POL_TEST_CLR_RED] = REG_FIELD(ALE_POLICER_TEST_CTL, 30, 30),
+ [POL_TEST_CLR_YELLOW] = REG_FIELD(ALE_POLICER_TEST_CTL, 29, 29),
+ [POL_TEST_CLR_SELECTED] = REG_FIELD(ALE_POLICER_TEST_CTL, 28, 28),
+ [POL_TEST_ENTRY] = REG_FIELD(ALE_POLICER_TEST_CTL, 0, 4),
+
+ /* CPSW_ALE_POLICER_HIT_STATUS_REG */
+ [POL_STATUS_HIT] = REG_FIELD(ALE_POLICER_HIT_STATUS, 31, 31),
+ [POL_STATUS_HIT_RED] = REG_FIELD(ALE_POLICER_HIT_STATUS, 30, 30),
+ [POL_STATUS_HIT_YELLOW] = REG_FIELD(ALE_POLICER_HIT_STATUS, 29, 29),
+
+ /* CPSW_ALE_THREAD_DEF_REG */
+ [ALE_DEFAULT_THREAD_EN] = REG_FIELD(ALE_THREAD_DEF, 15, 15),
+ [ALE_DEFAULT_THREAD_VAL] = REG_FIELD(ALE_THREAD_DEF, 0, 5),
+
+ /* CPSW_ALE_THREAD_CTL_REG */
+ [ALE_THREAD_CLASS_INDEX] = REG_FIELD(ALE_THREAD_CTL, 0, 4),
+
+ /* CPSW_ALE_THREAD_VAL_REG */
+ [ALE_THREAD_ENABLE] = REG_FIELD(ALE_THREAD_VAL, 15, 15),
+ [ALE_THREAD_VALUE] = REG_FIELD(ALE_THREAD_VAL, 0, 5),
+};
+
static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
{
/* am3/4/5, dra7. dm814x, 66ak2hk-gbe */
.dev_id = "cpsw",
.tbl_entries = 1024,
- .major_ver_mask = 0xff,
+ .reg_fields = ale_fields_cpsw,
.vlan_entry_tbl = vlan_entry_cpsw,
},
{
/* 66ak2h_xgbe */
.dev_id = "66ak2h-xgbe",
.tbl_entries = 2048,
- .major_ver_mask = 0xff,
+ .reg_fields = ale_fields_cpsw,
.vlan_entry_tbl = vlan_entry_cpsw,
},
{
.dev_id = "66ak2el",
.features = CPSW_ALE_F_STATUS_REG,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
.nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu,
},
@@ -1318,7 +1420,7 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.dev_id = "66ak2g",
.features = CPSW_ALE_F_STATUS_REG,
.tbl_entries = 64,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
.nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu,
},
@@ -1326,20 +1428,20 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.dev_id = "am65x-cpsw2g",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
.tbl_entries = 64,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
.nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu,
},
{
.dev_id = "j721e-cpswxg",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
.vlan_entry_tbl = vlan_entry_k3_cpswxg,
},
{
.dev_id = "am64-cpswxg",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
.vlan_entry_tbl = vlan_entry_k3_cpswxg,
.tbl_entries = 512,
},
@@ -1361,47 +1463,80 @@ cpsw_ale_dev_id *cpsw_ale_match_id(const struct cpsw_ale_dev_id *id,
return NULL;
}
+static const struct regmap_config ale_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .name = "cpsw-ale",
+};
+
+static int cpsw_ale_regfield_init(struct cpsw_ale *ale)
+{
+ const struct reg_field *reg_fields = ale->params.reg_fields;
+ struct device *dev = ale->params.dev;
+ struct regmap *regmap = ale->regmap;
+ int i;
+
+ for (i = 0; i < ALE_FIELDS_MAX; i++) {
+ ale->fields[i] = devm_regmap_field_alloc(dev, regmap,
+ reg_fields[i]);
+ if (IS_ERR(ale->fields[i])) {
+ dev_err(dev, "Unable to allocate regmap field %d\n", i);
+ return PTR_ERR(ale->fields[i]);
+ }
+ }
+
+ return 0;
+}
+
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
{
+ u32 ale_entries, rev_major, rev_minor, policers;
const struct cpsw_ale_dev_id *ale_dev_id;
struct cpsw_ale *ale;
- u32 rev, ale_entries;
+ int ret;
ale_dev_id = cpsw_ale_match_id(cpsw_ale_id_match, params->dev_id);
if (!ale_dev_id)
return ERR_PTR(-EINVAL);
params->ale_entries = ale_dev_id->tbl_entries;
- params->major_ver_mask = ale_dev_id->major_ver_mask;
params->nu_switch_ale = ale_dev_id->nu_switch_ale;
+ params->reg_fields = ale_dev_id->reg_fields;
ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
if (!ale)
return ERR_PTR(-ENOMEM);
+ ale->regmap = devm_regmap_init_mmio(params->dev, params->ale_regs,
+ &ale_regmap_cfg);
+ if (IS_ERR(ale->regmap)) {
+ dev_err(params->dev, "Couldn't create CPSW ALE regmap\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ale->params = *params;
+ ret = cpsw_ale_regfield_init(ale);
+ if (ret)
+ return ERR_PTR(ret);
ale->p0_untag_vid_mask = devm_bitmap_zalloc(params->dev, VLAN_N_VID,
GFP_KERNEL);
if (!ale->p0_untag_vid_mask)
return ERR_PTR(-ENOMEM);
- ale->params = *params;
ale->ageout = ale->params.ale_ageout * HZ;
ale->features = ale_dev_id->features;
ale->vlan_entry_tbl = ale_dev_id->vlan_entry_tbl;
- rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER);
- ale->version =
- (ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) |
- ALE_VERSION_MINOR(rev);
+ regmap_field_read(ale->fields[MINOR_VER], &rev_minor);
+ regmap_field_read(ale->fields[MAJOR_VER], &rev_major);
+ ale->version = rev_major << 8 | rev_minor;
dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n",
- ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask),
- ALE_VERSION_MINOR(rev));
+ rev_major, rev_minor);
if (ale->features & CPSW_ALE_F_STATUS_REG &&
!ale->params.ale_entries) {
- ale_entries =
- readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
- ALE_STATUS_SIZE_MASK;
+ regmap_field_read(ale->fields[ALE_ENTRIES], &ale_entries);
/* ALE available on newer NetCP switches has introduced
* a register, ALE_STATUS, to indicate the size of ALE
* table which shows the size as a multiple of 1024 entries.
@@ -1415,8 +1550,20 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
ale->params.ale_entries = ale_entries;
}
+
+ if (ale->features & CPSW_ALE_F_STATUS_REG &&
+ !ale->params.num_policers) {
+ regmap_field_read(ale->fields[ALE_POLICERS], &policers);
+ if (!policers)
+ return ERR_PTR(-EINVAL);
+
+ policers *= ALE_POLICER_SIZE_MULTIPLIER;
+ ale->params.num_policers = policers;
+ }
+
dev_info(ale->params.dev,
- "ALE Table size %ld\n", ale->params.ale_entries);
+ "ALE Table size %ld, Policers %ld\n", ale->params.ale_entries,
+ ale->params.num_policers);
/* set default bits for existing h/w */
ale->port_mask_bits = ale->params.ale_ports;
@@ -1480,3 +1627,97 @@ u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale)
{
return ale ? ale->params.ale_entries : 0;
}
+
+/* Reads the specified policer index into ALE POLICER registers */
+static void cpsw_ale_policer_read_idx(struct cpsw_ale *ale, u32 idx)
+{
+ idx &= ALE_POLICER_TBL_INDEX_MASK;
+ writel_relaxed(idx, ale->params.ale_regs + ALE_POLICER_TBL_CTL);
+}
+
+/* Writes the ALE POLICER registers into the specified policer index */
+static void cpsw_ale_policer_write_idx(struct cpsw_ale *ale, u32 idx)
+{
+ idx &= ALE_POLICER_TBL_INDEX_MASK;
+ idx |= ALE_POLICER_TBL_WRITE_ENABLE;
+ writel_relaxed(idx, ale->params.ale_regs + ALE_POLICER_TBL_CTL);
+}
+
+/* enables/disables the custom thread value for the specified policer index */
+static void cpsw_ale_policer_thread_idx_enable(struct cpsw_ale *ale, u32 idx,
+ u32 thread_id, bool enable)
+{
+ regmap_field_write(ale->fields[ALE_THREAD_CLASS_INDEX], idx);
+ regmap_field_write(ale->fields[ALE_THREAD_VALUE], thread_id);
+ regmap_field_write(ale->fields[ALE_THREAD_ENABLE], enable ? 1 : 0);
+}
+
+/* Disable all policer entries and thread mappings */
+static void cpsw_ale_policer_reset(struct cpsw_ale *ale)
+{
+ int i;
+
+ for (i = 0; i < ale->params.num_policers ; i++) {
+ cpsw_ale_policer_read_idx(ale, i);
+ regmap_field_write(ale->fields[POL_PORT_MEN], 0);
+ regmap_field_write(ale->fields[POL_PRI_MEN], 0);
+ regmap_field_write(ale->fields[POL_OUI_MEN], 0);
+ regmap_field_write(ale->fields[POL_DST_MEN], 0);
+ regmap_field_write(ale->fields[POL_SRC_MEN], 0);
+ regmap_field_write(ale->fields[POL_OVLAN_MEN], 0);
+ regmap_field_write(ale->fields[POL_IVLAN_MEN], 0);
+ regmap_field_write(ale->fields[POL_ETHERTYPE_MEN], 0);
+ regmap_field_write(ale->fields[POL_IPSRC_MEN], 0);
+ regmap_field_write(ale->fields[POL_IPDST_MEN], 0);
+ regmap_field_write(ale->fields[POL_EN], 0);
+ regmap_field_write(ale->fields[POL_RED_DROP_EN], 0);
+ regmap_field_write(ale->fields[POL_YELLOW_DROP_EN], 0);
+ regmap_field_write(ale->fields[POL_PRIORITY_THREAD_EN], 0);
+
+ cpsw_ale_policer_thread_idx_enable(ale, i, 0, 0);
+ }
+}
+
+/* Default classifier is to map 8 user priorities to N receive channels */
+void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch)
+{
+ int pri, idx;
+ /* IEEE802.1D-2004, Standard for Local and metropolitan area networks
+ * Table G-2 - Traffic type acronyms
+ * Table G-3 - Defining traffic types
+ * User priority values 1 and 2 effectively communicate a lower
+ * priority than 0. In the below table 0 is assigned to higher priority
+ * thread than 1 and 2 wherever possible.
+ * The below table maps which thread the user priority needs to be
+ * sent to for a given number of threads (RX channels). Upper threads
+ * have higher priority.
+ * e.g. if number of threads is 8 then user priority 0 will map to
+ * pri_thread_map[8-1][0] i.e. thread 2
+ */
+ int pri_thread_map[8][8] = { { 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 1, 1, 1, 1, },
+ { 0, 0, 0, 0, 1, 1, 2, 2, },
+ { 1, 0, 0, 1, 2, 2, 3, 3, },
+ { 1, 0, 0, 1, 2, 3, 4, 4, },
+ { 1, 0, 0, 2, 3, 4, 5, 5, },
+ { 1, 0, 0, 2, 3, 4, 5, 6, },
+ { 2, 0, 1, 3, 4, 5, 6, 7, } };
+
+ cpsw_ale_policer_reset(ale);
+
+ /* use first 8 classifiers to map 8 (DSCP/PCP) priorities to channels */
+ for (pri = 0; pri < 8; pri++) {
+ idx = pri;
+
+ /* Classifier 'idx' match on priority 'pri' */
+ cpsw_ale_policer_read_idx(ale, idx);
+ regmap_field_write(ale->fields[POL_PRI_VAL], pri);
+ regmap_field_write(ale->fields[POL_PRI_MEN], 1);
+ cpsw_ale_policer_write_idx(ale, idx);
+
+ /* Map Classifier 'idx' to thread provided by the map */
+ cpsw_ale_policer_thread_idx_enable(ale, idx,
+ pri_thread_map[num_rx_ch - 1][pri],
+ 1);
+ }
+}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 6779ee111d57..1e4e9a3dd234 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -8,11 +8,14 @@
#ifndef __TI_CPSW_ALE_H__
#define __TI_CPSW_ALE_H__
+struct reg_fields;
+
struct cpsw_ale_params {
struct device *dev;
void __iomem *ale_regs;
unsigned long ale_ageout; /* in secs */
unsigned long ale_entries;
+ unsigned long num_policers;
unsigned long ale_ports;
/* NU Switch has specific handling as number of bits in ALE entries
* are different than other versions of ALE. Also there are specific
@@ -20,19 +23,69 @@ struct cpsw_ale_params {
* to identify this hardware.
*/
bool nu_switch_ale;
- /* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So
- * pass it from caller.
- */
- u32 major_ver_mask;
+ const struct reg_field *reg_fields;
const char *dev_id;
unsigned long bus_freq;
};
struct ale_entry_fld;
+struct regmap;
+
+enum ale_fields {
+ MINOR_VER,
+ MAJOR_VER,
+ ALE_ENTRIES,
+ ALE_POLICERS,
+ POL_PORT_MEN,
+ POL_TRUNK_ID,
+ POL_PORT_NUM,
+ POL_PRI_MEN,
+ POL_PRI_VAL,
+ POL_OUI_MEN,
+ POL_OUI_INDEX,
+ POL_DST_MEN,
+ POL_DST_INDEX,
+ POL_SRC_MEN,
+ POL_SRC_INDEX,
+ POL_OVLAN_MEN,
+ POL_OVLAN_INDEX,
+ POL_IVLAN_MEN,
+ POL_IVLAN_INDEX,
+ POL_ETHERTYPE_MEN,
+ POL_ETHERTYPE_INDEX,
+ POL_IPSRC_MEN,
+ POL_IPSRC_INDEX,
+ POL_IPDST_MEN,
+ POL_IPDST_INDEX,
+ POL_EN,
+ POL_RED_DROP_EN,
+ POL_YELLOW_DROP_EN,
+ POL_YELLOW_THRESH,
+ POL_POL_MATCH_MODE,
+ POL_PRIORITY_THREAD_EN,
+ POL_MAC_ONLY_DEF_DIS,
+ POL_TEST_CLR,
+ POL_TEST_CLR_RED,
+ POL_TEST_CLR_YELLOW,
+ POL_TEST_CLR_SELECTED,
+ POL_TEST_ENTRY,
+ POL_STATUS_HIT,
+ POL_STATUS_HIT_RED,
+ POL_STATUS_HIT_YELLOW,
+ ALE_DEFAULT_THREAD_EN,
+ ALE_DEFAULT_THREAD_VAL,
+ ALE_THREAD_CLASS_INDEX,
+ ALE_THREAD_ENABLE,
+ ALE_THREAD_VALUE,
+ /* terminator */
+ ALE_FIELDS_MAX,
+};
struct cpsw_ale {
struct cpsw_ale_params params;
struct timer_list timer;
+ struct regmap *regmap;
+ struct regmap_field *fields[ALE_FIELDS_MAX];
unsigned long ageout;
u32 version;
u32 features;
@@ -140,5 +193,6 @@ int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask);
void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
bool add);
+void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch);
#endif
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 53ed23d68722..21d55a180ef6 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -725,8 +725,6 @@ int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *inf
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = cpsw->cpts->phc_index;
info->tx_types =
@@ -741,10 +739,7 @@ int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *inf
int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = 0;
info->rx_filters = 0;
return 0;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 2baa198ebfa0..557cc71b9dd2 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1407,7 +1407,8 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
cpsw->slaves[i].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC;
+ ndev->netns_local = true;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 75c294ce6fb6..5d6d1cf78e93 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -53,78 +53,6 @@
#define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n))
#define IEP_CAP_CFG_CAP_ASYNC_EN(n) BIT(LATCH_INDEX(n) + 10)
-enum {
- ICSS_IEP_GLOBAL_CFG_REG,
- ICSS_IEP_GLOBAL_STATUS_REG,
- ICSS_IEP_COMPEN_REG,
- ICSS_IEP_SLOW_COMPEN_REG,
- ICSS_IEP_COUNT_REG0,
- ICSS_IEP_COUNT_REG1,
- ICSS_IEP_CAPTURE_CFG_REG,
- ICSS_IEP_CAPTURE_STAT_REG,
-
- ICSS_IEP_CAP6_RISE_REG0,
- ICSS_IEP_CAP6_RISE_REG1,
-
- ICSS_IEP_CAP7_RISE_REG0,
- ICSS_IEP_CAP7_RISE_REG1,
-
- ICSS_IEP_CMP_CFG_REG,
- ICSS_IEP_CMP_STAT_REG,
- ICSS_IEP_CMP0_REG0,
- ICSS_IEP_CMP0_REG1,
- ICSS_IEP_CMP1_REG0,
- ICSS_IEP_CMP1_REG1,
-
- ICSS_IEP_CMP8_REG0,
- ICSS_IEP_CMP8_REG1,
- ICSS_IEP_SYNC_CTRL_REG,
- ICSS_IEP_SYNC0_STAT_REG,
- ICSS_IEP_SYNC1_STAT_REG,
- ICSS_IEP_SYNC_PWIDTH_REG,
- ICSS_IEP_SYNC0_PERIOD_REG,
- ICSS_IEP_SYNC1_DELAY_REG,
- ICSS_IEP_SYNC_START_REG,
- ICSS_IEP_MAX_REGS,
-};
-
-/**
- * struct icss_iep_plat_data - Plat data to handle SoC variants
- * @config: Regmap configuration data
- * @reg_offs: register offsets to capture offset differences across SoCs
- * @flags: Flags to represent IEP properties
- */
-struct icss_iep_plat_data {
- const struct regmap_config *config;
- u32 reg_offs[ICSS_IEP_MAX_REGS];
- u32 flags;
-};
-
-struct icss_iep {
- struct device *dev;
- void __iomem *base;
- const struct icss_iep_plat_data *plat_data;
- struct regmap *map;
- struct device_node *client_np;
- unsigned long refclk_freq;
- int clk_tick_time; /* one refclk tick time in ns */
- struct ptp_clock_info ptp_info;
- struct ptp_clock *ptp_clock;
- struct mutex ptp_clk_mutex; /* PHC access serializer */
- u32 def_inc;
- s16 slow_cmp_inc;
- u32 slow_cmp_count;
- const struct icss_iep_clockops *ops;
- void *clockops_data;
- u32 cycle_time_ns;
- u32 perout_enabled;
- bool pps_enabled;
- int cap_cmp_irq;
- u64 period;
- u32 latch_enable;
- struct work_struct work;
-};
-
/**
* icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
* @iep: Pointer to structure representing IEP.
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.h b/drivers/net/ethernet/ti/icssg/icss_iep.h
index 803a4b714893..0bdca0155abd 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.h
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.h
@@ -12,7 +12,78 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
-struct icss_iep;
+enum {
+ ICSS_IEP_GLOBAL_CFG_REG,
+ ICSS_IEP_GLOBAL_STATUS_REG,
+ ICSS_IEP_COMPEN_REG,
+ ICSS_IEP_SLOW_COMPEN_REG,
+ ICSS_IEP_COUNT_REG0,
+ ICSS_IEP_COUNT_REG1,
+ ICSS_IEP_CAPTURE_CFG_REG,
+ ICSS_IEP_CAPTURE_STAT_REG,
+
+ ICSS_IEP_CAP6_RISE_REG0,
+ ICSS_IEP_CAP6_RISE_REG1,
+
+ ICSS_IEP_CAP7_RISE_REG0,
+ ICSS_IEP_CAP7_RISE_REG1,
+
+ ICSS_IEP_CMP_CFG_REG,
+ ICSS_IEP_CMP_STAT_REG,
+ ICSS_IEP_CMP0_REG0,
+ ICSS_IEP_CMP0_REG1,
+ ICSS_IEP_CMP1_REG0,
+ ICSS_IEP_CMP1_REG1,
+
+ ICSS_IEP_CMP8_REG0,
+ ICSS_IEP_CMP8_REG1,
+ ICSS_IEP_SYNC_CTRL_REG,
+ ICSS_IEP_SYNC0_STAT_REG,
+ ICSS_IEP_SYNC1_STAT_REG,
+ ICSS_IEP_SYNC_PWIDTH_REG,
+ ICSS_IEP_SYNC0_PERIOD_REG,
+ ICSS_IEP_SYNC1_DELAY_REG,
+ ICSS_IEP_SYNC_START_REG,
+ ICSS_IEP_MAX_REGS,
+};
+
+/**
+ * struct icss_iep_plat_data - Plat data to handle SoC variants
+ * @config: Regmap configuration data
+ * @reg_offs: register offsets to capture offset differences across SoCs
+ * @flags: Flags to represent IEP properties
+ */
+struct icss_iep_plat_data {
+ const struct regmap_config *config;
+ u32 reg_offs[ICSS_IEP_MAX_REGS];
+ u32 flags;
+};
+
+struct icss_iep {
+ struct device *dev;
+ void __iomem *base;
+ const struct icss_iep_plat_data *plat_data;
+ struct regmap *map;
+ struct device_node *client_np;
+ unsigned long refclk_freq;
+ int clk_tick_time; /* one refclk tick time in ns */
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct mutex ptp_clk_mutex; /* PHC access serializer */
+ u32 def_inc;
+ s16 slow_cmp_inc;
+ u32 slow_cmp_count;
+ const struct icss_iep_clockops *ops;
+ void *clockops_data;
+ u32 cycle_time_ns;
+ u32 perout_enabled;
+ bool pps_enabled;
+ int cap_cmp_irq;
+ u64 period;
+ u32 latch_enable;
+ struct work_struct work;
+};
+
extern const struct icss_iep_clockops prueth_iep_clockops;
/* Firmware specific clock operations */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
index 9ec504d976d6..833ca86d0b71 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
@@ -290,6 +290,7 @@ void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac)
mac[2] << 16 | mac[3] << 24));
regmap_write(miig_rt, MAC_INTERFACE_1, (u32)(mac[4] | mac[5] << 8));
}
+EXPORT_SYMBOL_GPL(icssg_class_set_host_mac_addr);
void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac)
{
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index b9d8a93d1680..fdebeb2f84e0 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -660,14 +660,15 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
{
struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
struct netdev_queue *netif_txq;
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
+ u32 pkt_len, dst_tag_id;
int i, ret = 0, q_idx;
bool in_tx_ts = 0;
int tx_ts_cookie;
void **swdata;
- u32 pkt_len;
u32 *epib;
pkt_len = skb_headlen(skb);
@@ -712,9 +713,20 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
/* set dst tag to indicate internal qid at the firmware which is at
* bit8..bit15. bit0..bit7 indicates port num for directed
- * packets in case of switch mode operation
+ * packets in case of switch mode operation and port num 0
+ * for undirected packets in case of HSR offload mode
*/
- cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
+ dst_tag_id = emac->port_id | (q_idx << 8);
+
+ if (prueth->is_hsr_offload_mode &&
+ (ndev->features & NETIF_F_HW_HSR_DUP))
+ dst_tag_id = PRUETH_UNDIRECTED_PKT_DST_TAG;
+
+ if (prueth->is_hsr_offload_mode &&
+ (ndev->features & NETIF_F_HW_HSR_TAG_INS))
+ epib[1] |= PRUETH_UNDIRECTED_PKT_TAG_INS;
+
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, dst_tag_id);
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index dae52a83a378..72ace151d8e9 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -107,7 +107,7 @@ static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = {
},
};
-static void icssg_config_mii_init_switch(struct prueth_emac *emac)
+static void icssg_config_mii_init_fw_offload(struct prueth_emac *emac)
{
struct prueth *prueth = emac->prueth;
int mii = prueth_emac_slice(emac);
@@ -278,7 +278,7 @@ static int emac_r30_is_done(struct prueth_emac *emac)
return 1;
}
-static int prueth_switch_buffer_setup(struct prueth_emac *emac)
+static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
{
struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
struct icssg_rxq_ctx __iomem *rxq_ctx;
@@ -424,7 +424,7 @@ static void icssg_init_emac_mode(struct prueth *prueth)
icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
}
-static void icssg_init_switch_mode(struct prueth *prueth)
+static void icssg_init_fw_offload_mode(struct prueth *prueth)
{
u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
int i;
@@ -455,8 +455,8 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
struct icssg_flow_cfg __iomem *flow_cfg;
int ret;
- if (prueth->is_switch_mode)
- icssg_init_switch_mode(prueth);
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+ icssg_init_fw_offload_mode(prueth);
else
icssg_init_emac_mode(prueth);
@@ -472,8 +472,8 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET,
ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT);
icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if);
- if (prueth->is_switch_mode)
- icssg_config_mii_init_switch(emac);
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+ icssg_config_mii_init_fw_offload(emac);
else
icssg_config_mii_init(emac);
icssg_config_ipg(emac);
@@ -498,8 +498,8 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
writeb(0, config + SPL_PKT_DEFAULT_PRIORITY);
writeb(0, config + QUEUE_NUM_UNTAGGED);
- if (prueth->is_switch_mode)
- ret = prueth_switch_buffer_setup(emac);
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+ ret = prueth_fw_offload_buffer_setup(emac);
else
ret = prueth_emac_buffer_setup(emac);
if (ret)
@@ -531,7 +531,9 @@ static const struct icssg_r30_cmd emac_r32_bitmask[] = {
{{EMAC_NONE, 0xffff4000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx ENABLE*/
{{EMAC_NONE, 0xbfff0000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx DISABLE*/
{{0xffff0010, EMAC_NONE, 0xffff0010, EMAC_NONE}}, /* VLAN AWARE*/
- {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}} /* VLAN UNWARE*/
+ {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}}, /* VLAN UNWARE*/
+ {{0xffff2000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* HSR_RX_OFFLOAD_ENABLE */
+ {{0xdfff0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}} /* HSR_RX_OFFLOAD_DISABLE */
};
int icssg_set_port_state(struct prueth_emac *emac,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
index 1ac60283923b..92c2deaa3068 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -80,6 +80,8 @@ enum icssg_port_state_cmd {
ICSSG_EMAC_PORT_PREMPT_TX_DISABLE,
ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE,
ICSSG_EMAC_PORT_VLAN_AWARE_DISABLE,
+ ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE,
+ ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE,
ICSSG_EMAC_PORT_MAX_COMMANDS
};
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index 5688f054cec5..b715af21d23a 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -68,9 +68,13 @@ static int emac_nway_reset(struct net_device *ndev)
static int emac_get_sset_count(struct net_device *ndev, int stringset)
{
+ struct prueth_emac *emac = netdev_priv(ndev);
switch (stringset) {
case ETH_SS_STATS:
- return ICSSG_NUM_ETHTOOL_STATS;
+ if (emac->prueth->pa_stats)
+ return ICSSG_NUM_ETHTOOL_STATS;
+ else
+ return ICSSG_NUM_ETHTOOL_STATS - ICSSG_NUM_PA_STATS;
default:
return -EOPNOTSUPP;
}
@@ -78,18 +82,18 @@ static int emac_get_sset_count(struct net_device *ndev, int stringset)
static void emac_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
+ struct prueth_emac *emac = netdev_priv(ndev);
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) {
- if (!icssg_all_stats[i].standard_stats) {
- memcpy(p, icssg_all_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++)
+ if (!icssg_all_miig_stats[i].standard_stats)
+ ethtool_puts(&p, icssg_all_miig_stats[i].name);
+ if (emac->prueth->pa_stats)
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++)
+ ethtool_puts(&p, icssg_all_pa_stats[i].name);
break;
default:
break;
@@ -104,9 +108,13 @@ static void emac_get_ethtool_stats(struct net_device *ndev,
emac_update_hardware_stats(emac);
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++)
- if (!icssg_all_stats[i].standard_stats)
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++)
+ if (!icssg_all_miig_stats[i].standard_stats)
*(data++) = emac->stats[i];
+
+ if (emac->prueth->pa_stats)
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++)
+ *(data++) = emac->pa_stats[i];
}
static int emac_get_ts_info(struct net_device *ndev,
@@ -118,8 +126,6 @@ static int emac_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = icss_iep_get_ptp_clock_idx(emac->iep);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index e3451beed323..5fd9902ab181 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -13,6 +13,7 @@
#include <linux/dma/ti-cppi5.h>
#include <linux/etherdevice.h>
#include <linux/genalloc.h>
+#include <linux/if_hsr.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -40,6 +41,11 @@
#define DEFAULT_PORT_MASK 1
#define DEFAULT_UNTAG_MASK 1
+#define NETIF_PRUETH_HSR_OFFLOAD_FEATURES (NETIF_F_HW_HSR_FWD | \
+ NETIF_F_HW_HSR_DUP | \
+ NETIF_F_HW_HSR_TAG_INS | \
+ NETIF_F_HW_HSR_TAG_RM)
+
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
@@ -118,6 +124,19 @@ static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static struct icssg_firmwares icssg_hsr_firmwares[] = {
+ {
+ .pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf",
+ },
+ {
+ .pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf",
+ }
+};
+
static struct icssg_firmwares icssg_switch_firmwares[] = {
{
.pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf",
@@ -152,6 +171,8 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
if (prueth->is_switch_mode)
firmwares = icssg_switch_firmwares;
+ else if (prueth->is_hsr_offload_mode)
+ firmwares = icssg_hsr_firmwares;
else
firmwares = icssg_emac_firmwares;
@@ -365,7 +386,8 @@ static void prueth_iep_settime(void *clockops_data, u64 ns)
sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
sc_desc.iepcount_set = ns % cycletime;
- sc_desc.CMP0_current = cycletime - 4; //Count from 0 to (cycle time)-4
+ /* Count from 0 to (cycle time) - emac->iep->def_inc */
+ sc_desc.CMP0_current = cycletime - emac->iep->def_inc;
memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
@@ -470,6 +492,36 @@ static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
return 0;
}
+static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ icssg_fdb_add_del(emac, addr, prueth->default_vlan,
+ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_BLOCK, true);
+
+ icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id),
+ BIT(emac->port_id), true);
+ return 0;
+}
+
+static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ icssg_fdb_add_del(emac, addr, prueth->default_vlan,
+ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_BLOCK, false);
+
+ return 0;
+}
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
@@ -630,7 +682,10 @@ static int emac_ndo_stop(struct net_device *ndev)
icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
- __dev_mc_unsync(ndev, icssg_prueth_del_mcast);
+ if (emac->prueth->is_hsr_offload_mode)
+ __dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
+ else
+ __dev_mc_unsync(ndev, icssg_prueth_del_mcast);
atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
/* ensure new tdown_cnt value is visible */
@@ -708,7 +763,12 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work)
return;
}
- __dev_mc_sync(ndev, icssg_prueth_add_mcast, icssg_prueth_del_mcast);
+ if (emac->prueth->is_hsr_offload_mode)
+ __dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
+ icssg_prueth_hsr_del_mcast);
+ else
+ __dev_mc_sync(ndev, icssg_prueth_add_mcast,
+ icssg_prueth_del_mcast);
}
/**
@@ -725,6 +785,29 @@ static void emac_ndo_set_rx_mode(struct net_device *ndev)
queue_work(emac->cmd_wq, &emac->rx_mode_work);
}
+static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ /* hsr tag insertion offload and hsr dup offload are tightly coupled in
+ * firmware implementation. Both these features need to be enabled /
+ * disabled together.
+ */
+ if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS)))
+ if ((features & NETIF_F_HW_HSR_DUP) ||
+ (features & NETIF_F_HW_HSR_TAG_INS))
+ features |= NETIF_F_HW_HSR_DUP |
+ NETIF_F_HW_HSR_TAG_INS;
+
+ if ((ndev->features & NETIF_F_HW_HSR_DUP) ||
+ (ndev->features & NETIF_F_HW_HSR_TAG_INS))
+ if (!(features & NETIF_F_HW_HSR_DUP) ||
+ !(features & NETIF_F_HW_HSR_TAG_INS))
+ features &= ~(NETIF_F_HW_HSR_DUP |
+ NETIF_F_HW_HSR_TAG_INS);
+
+ return features;
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -736,6 +819,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_eth_ioctl = icssg_ndo_ioctl,
.ndo_get_stats64 = icssg_ndo_get_stats64,
.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
+ .ndo_fix_features = emac_ndo_fix_features,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -857,12 +941,14 @@ static int prueth_netdev_init(struct prueth *prueth,
}
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+ ndev->dev.of_node = eth_node;
ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
ndev->max_mtu = PRUETH_MAX_MTU;
ndev->netdev_ops = &emac_netdev_ops;
ndev->ethtool_ops = &icssg_ethtool_ops;
ndev->hw_features = NETIF_F_SG;
ndev->features = ndev->hw_features;
+ ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
@@ -951,7 +1037,7 @@ static void prueth_emac_restart(struct prueth *prueth)
netif_device_attach(emac1->ndev);
}
-static void icssg_enable_switch_mode(struct prueth *prueth)
+static void icssg_change_mode(struct prueth *prueth)
{
struct prueth_emac *emac;
int mac;
@@ -960,6 +1046,13 @@ static void icssg_enable_switch_mode(struct prueth *prueth)
for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
emac = prueth->emac[mac];
+ if (prueth->is_hsr_offload_mode) {
+ if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
+ else
+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
+ }
+
if (netif_running(emac->ndev)) {
icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
@@ -971,8 +1064,13 @@ static void icssg_enable_switch_mode(struct prueth *prueth)
BIT(emac->port_id) | DEFAULT_PORT_MASK,
BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
true);
+ if (prueth->is_hsr_offload_mode)
+ icssg_vtbl_modify(emac, DEFAULT_VID,
+ DEFAULT_PORT_MASK,
+ DEFAULT_UNTAG_MASK, true);
icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
- icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
+ if (prueth->is_switch_mode)
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
}
}
}
@@ -1010,7 +1108,7 @@ static int prueth_netdevice_port_link(struct net_device *ndev,
prueth->is_switch_mode = true;
prueth->default_vlan = 1;
emac->port_vlan = prueth->default_vlan;
- icssg_enable_switch_mode(prueth);
+ icssg_change_mode(prueth);
}
}
@@ -1038,6 +1136,61 @@ static void prueth_netdevice_port_unlink(struct net_device *ndev)
prueth->hw_bridge_dev = NULL;
}
+static int prueth_hsr_port_link(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct prueth_emac *emac0;
+ struct prueth_emac *emac1;
+
+ emac0 = prueth->emac[PRUETH_MAC0];
+ emac1 = prueth->emac[PRUETH_MAC1];
+
+ if (prueth->is_switch_mode)
+ return -EOPNOTSUPP;
+
+ prueth->hsr_members |= BIT(emac->port_id);
+ if (!prueth->is_hsr_offload_mode) {
+ if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) &&
+ prueth->hsr_members & BIT(PRUETH_PORT_MII1)) {
+ if (!(emac0->ndev->features &
+ NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
+ !(emac1->ndev->features &
+ NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
+ return -EOPNOTSUPP;
+ prueth->is_hsr_offload_mode = true;
+ prueth->default_vlan = 1;
+ emac0->port_vlan = prueth->default_vlan;
+ emac1->port_vlan = prueth->default_vlan;
+ icssg_change_mode(prueth);
+ netdev_dbg(ndev, "Enabling HSR offload mode\n");
+ }
+ }
+
+ return 0;
+}
+
+static void prueth_hsr_port_unlink(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct prueth_emac *emac0;
+ struct prueth_emac *emac1;
+
+ emac0 = prueth->emac[PRUETH_MAC0];
+ emac1 = prueth->emac[PRUETH_MAC1];
+
+ prueth->hsr_members &= ~BIT(emac->port_id);
+ if (prueth->is_hsr_offload_mode) {
+ prueth->is_hsr_offload_mode = false;
+ emac0->port_vlan = 0;
+ emac1->port_vlan = 0;
+ prueth->hsr_dev = NULL;
+ prueth_emac_restart(prueth);
+ netdev_dbg(ndev, "Disabling HSR Offload mode\n");
+ }
+}
+
/* netdev notifier */
static int prueth_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
@@ -1045,6 +1198,8 @@ static int prueth_netdevice_event(struct notifier_block *unused,
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
int ret = NOTIFY_DONE;
if (ndev->netdev_ops != &emac_netdev_ops)
@@ -1054,6 +1209,25 @@ static int prueth_netdevice_event(struct notifier_block *unused,
case NETDEV_CHANGEUPPER:
info = ptr;
+ if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
+ is_hsr_master(info->upper_dev)) {
+ if (info->linking) {
+ if (!prueth->hsr_dev) {
+ prueth->hsr_dev = info->upper_dev;
+ icssg_class_set_host_mac_addr(prueth->miig_rt,
+ prueth->hsr_dev->dev_addr);
+ } else {
+ if (prueth->hsr_dev != info->upper_dev) {
+ netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ prueth_hsr_port_link(ndev);
+ } else {
+ prueth_hsr_port_unlink(ndev);
+ }
+ }
+
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack);
@@ -1181,6 +1355,12 @@ static int prueth_probe(struct platform_device *pdev)
return -ENODEV;
}
+ prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats");
+ if (IS_ERR(prueth->pa_stats)) {
+ dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n");
+ prueth->pa_stats = NULL;
+ }
+
if (eth0_node) {
ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
if (ret)
@@ -1271,8 +1451,8 @@ static int prueth_probe(struct platform_device *pdev)
goto exit_iep;
}
- if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL))
- prueth->emac[PRUETH_MAC0]->half_duplex = 1;
+ prueth->emac[PRUETH_MAC0]->half_duplex =
+ of_property_read_bool(eth0_node, "ti,half-duplex-capable");
prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
}
@@ -1285,8 +1465,8 @@ static int prueth_probe(struct platform_device *pdev)
goto netdev_exit;
}
- if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL))
- prueth->emac[PRUETH_MAC1]->half_duplex = 1;
+ prueth->emac[PRUETH_MAC1]->half_duplex =
+ of_property_read_bool(eth1_node, "ti,half-duplex-capable");
prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index f678d656a3ed..bba6da2e6bd8 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -50,13 +50,18 @@
#define ICSSG_MAX_RFLOWS 8 /* per slice */
+#define ICSSG_NUM_PA_STATS 4
+#define ICSSG_NUM_MIIG_STATS 60
/* Number of ICSSG related stats */
-#define ICSSG_NUM_STATS 60
+#define ICSSG_NUM_STATS (ICSSG_NUM_MIIG_STATS + ICSSG_NUM_PA_STATS)
#define ICSSG_NUM_STANDARD_STATS 31
#define ICSSG_NUM_ETHTOOL_STATS (ICSSG_NUM_STATS - ICSSG_NUM_STANDARD_STATS)
#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */
+#define PRUETH_UNDIRECTED_PKT_DST_TAG 0
+#define PRUETH_UNDIRECTED_PKT_TAG_INS BIT(30)
+
/* Firmware status codes */
#define ICSS_HS_FW_READY 0x55555555
#define ICSS_HS_FW_DEAD 0xDEAD0000 /* lower 16 bits contain error code */
@@ -190,7 +195,8 @@ struct prueth_emac {
int port_vlan;
struct delayed_work stats_work;
- u64 stats[ICSSG_NUM_STATS];
+ u64 stats[ICSSG_NUM_MIIG_STATS];
+ u64 pa_stats[ICSSG_NUM_PA_STATS];
/* RX IRQ Coalescing Related */
struct hrtimer rx_hrtimer;
@@ -230,6 +236,7 @@ struct icssg_firmwares {
* @registered_netdevs: list of registered netdevs
* @miig_rt: regmap to mii_g_rt block
* @mii_rt: regmap to mii_rt block
+ * @pa_stats: regmap to pa_stats block
* @pru_id: ID for each of the PRUs
* @pdev: pointer to ICSSG platform device
* @pdata: pointer to platform data for ICSSG driver
@@ -239,11 +246,14 @@ struct icssg_firmwares {
* @iep1: pointer to IEP1 device
* @vlan_tbl: VLAN-FID table pointer
* @hw_bridge_dev: pointer to HW bridge net device
+ * @hsr_dev: pointer to the HSR net device
* @br_members: bitmask of bridge member ports
+ * @hsr_members: bitmask of hsr member ports
* @prueth_netdevice_nb: netdevice notifier block
* @prueth_switchdev_nb: switchdev notifier block
* @prueth_switchdev_bl_nb: switchdev blocking notifier block
* @is_switch_mode: flag to indicate if device is in Switch mode
+ * @is_hsr_offload_mode: flag to indicate if device is in hsr offload mode
* @is_switchmode_supported: indicates platform support for switch mode
* @switch_id: ID for mapping switch ports to bridge
* @default_vlan: Default VLAN for host
@@ -263,6 +273,7 @@ struct prueth {
struct net_device *registered_netdevs[PRUETH_NUM_MACS];
struct regmap *miig_rt;
struct regmap *mii_rt;
+ struct regmap *pa_stats;
enum pruss_pru_id pru_id[PRUSS_NUM_PRUS];
struct platform_device *pdev;
@@ -274,11 +285,14 @@ struct prueth {
struct prueth_vlan_tbl *vlan_tbl;
struct net_device *hw_bridge_dev;
+ struct net_device *hsr_dev;
u8 br_members;
+ u8 hsr_members;
struct notifier_block prueth_netdevice_nb;
struct notifier_block prueth_switchdev_nb;
struct notifier_block prueth_switchdev_bl_nb;
bool is_switch_mode;
+ bool is_hsr_offload_mode;
bool is_switchmode_supported;
unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN];
int default_vlan;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index e180c1166170..292f04d29f4f 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -847,6 +847,7 @@ static int prueth_netdev_init(struct prueth *prueth,
}
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+ ndev->dev.of_node = eth_node;
ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
ndev->max_mtu = PRUETH_MAX_MTU;
ndev->netdev_ops = &emac_netdev_ops;
@@ -1045,8 +1046,8 @@ static int prueth_probe(struct platform_device *pdev)
goto exit_iep;
}
- if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL))
- prueth->emac[PRUETH_MAC0]->half_duplex = 1;
+ prueth->emac[PRUETH_MAC0]->half_duplex =
+ of_property_read_bool(eth0_node, "ti,half-duplex-capable");
prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
}
@@ -1059,8 +1060,8 @@ static int prueth_probe(struct platform_device *pdev)
goto netdev_exit;
}
- if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL))
- prueth->emac[PRUETH_MAC1]->half_duplex = 1;
+ prueth->emac[PRUETH_MAC1]->half_duplex =
+ of_property_read_bool(eth1_node, "ti,half-duplex-capable");
prueth->emac[PRUETH_MAC1]->iep = prueth->iep1;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 2fb150c13078..8800bd3a8d07 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -11,6 +11,7 @@
#define ICSSG_TX_PACKET_OFFSET 0xA0
#define ICSSG_TX_BYTE_OFFSET 0xEC
+#define ICSSG_FW_STATS_BASE 0x0248
static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */
0xb18, /* Slice 1 stats start */
@@ -22,24 +23,34 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
int slice = prueth_emac_slice(emac);
u32 base = stats_base[slice];
u32 tx_pkt_cnt = 0;
- u32 val;
+ u32 val, reg;
int i;
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
regmap_read(prueth->miig_rt,
- base + icssg_all_stats[i].offset,
+ base + icssg_all_miig_stats[i].offset,
&val);
regmap_write(prueth->miig_rt,
- base + icssg_all_stats[i].offset,
+ base + icssg_all_miig_stats[i].offset,
val);
- if (icssg_all_stats[i].offset == ICSSG_TX_PACKET_OFFSET)
+ if (icssg_all_miig_stats[i].offset == ICSSG_TX_PACKET_OFFSET)
tx_pkt_cnt = val;
emac->stats[i] += val;
- if (icssg_all_stats[i].offset == ICSSG_TX_BYTE_OFFSET)
+ if (icssg_all_miig_stats[i].offset == ICSSG_TX_BYTE_OFFSET)
emac->stats[i] -= tx_pkt_cnt * 8;
}
+
+ if (prueth->pa_stats) {
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
+ reg = ICSSG_FW_STATS_BASE +
+ icssg_all_pa_stats[i].offset *
+ PRUETH_NUM_MACS + slice * sizeof(u32);
+ regmap_read(prueth->pa_stats, reg, &val);
+ emac->pa_stats[i] += val;
+ }
+ }
}
void icssg_stats_work_handler(struct work_struct *work)
@@ -57,9 +68,16 @@ int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name)
{
int i;
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) {
- if (!strcmp(icssg_all_stats[i].name, stat_name))
- return emac->stats[icssg_all_stats[i].offset / sizeof(u32)];
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
+ if (!strcmp(icssg_all_miig_stats[i].name, stat_name))
+ return emac->stats[icssg_all_miig_stats[i].offset / sizeof(u32)];
+ }
+
+ if (emac->prueth->pa_stats) {
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
+ if (!strcmp(icssg_all_pa_stats[i].name, stat_name))
+ return emac->pa_stats[icssg_all_pa_stats[i].offset / sizeof(u32)];
+ }
}
netdev_err(emac->ndev, "Invalid stats %s\n", stat_name);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.h b/drivers/net/ethernet/ti/icssg/icssg_stats.h
index 999a4a91276c..e88b919f532c 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.h
@@ -77,82 +77,114 @@ struct miig_stats_regs {
u32 tx_bytes;
};
-#define ICSSG_STATS(field, stats_type) \
+#define ICSSG_MIIG_STATS(field, stats_type) \
{ \
#field, \
offsetof(struct miig_stats_regs, field), \
stats_type \
}
-struct icssg_stats {
+struct icssg_miig_stats {
char name[ETH_GSTRING_LEN];
u32 offset;
bool standard_stats;
};
-static const struct icssg_stats icssg_all_stats[] = {
+static const struct icssg_miig_stats icssg_all_miig_stats[] = {
/* Rx */
- ICSSG_STATS(rx_packets, true),
- ICSSG_STATS(rx_broadcast_frames, false),
- ICSSG_STATS(rx_multicast_frames, true),
- ICSSG_STATS(rx_crc_errors, true),
- ICSSG_STATS(rx_mii_error_frames, false),
- ICSSG_STATS(rx_odd_nibble_frames, false),
- ICSSG_STATS(rx_frame_max_size, true),
- ICSSG_STATS(rx_max_size_error_frames, false),
- ICSSG_STATS(rx_frame_min_size, true),
- ICSSG_STATS(rx_min_size_error_frames, false),
- ICSSG_STATS(rx_over_errors, true),
- ICSSG_STATS(rx_class0_hits, false),
- ICSSG_STATS(rx_class1_hits, false),
- ICSSG_STATS(rx_class2_hits, false),
- ICSSG_STATS(rx_class3_hits, false),
- ICSSG_STATS(rx_class4_hits, false),
- ICSSG_STATS(rx_class5_hits, false),
- ICSSG_STATS(rx_class6_hits, false),
- ICSSG_STATS(rx_class7_hits, false),
- ICSSG_STATS(rx_class8_hits, false),
- ICSSG_STATS(rx_class9_hits, false),
- ICSSG_STATS(rx_class10_hits, false),
- ICSSG_STATS(rx_class11_hits, false),
- ICSSG_STATS(rx_class12_hits, false),
- ICSSG_STATS(rx_class13_hits, false),
- ICSSG_STATS(rx_class14_hits, false),
- ICSSG_STATS(rx_class15_hits, false),
- ICSSG_STATS(rx_smd_frags, false),
- ICSSG_STATS(rx_bucket1_size, true),
- ICSSG_STATS(rx_bucket2_size, true),
- ICSSG_STATS(rx_bucket3_size, true),
- ICSSG_STATS(rx_bucket4_size, true),
- ICSSG_STATS(rx_64B_frames, true),
- ICSSG_STATS(rx_bucket1_frames, true),
- ICSSG_STATS(rx_bucket2_frames, true),
- ICSSG_STATS(rx_bucket3_frames, true),
- ICSSG_STATS(rx_bucket4_frames, true),
- ICSSG_STATS(rx_bucket5_frames, true),
- ICSSG_STATS(rx_bytes, true),
- ICSSG_STATS(rx_tx_total_bytes, false),
+ ICSSG_MIIG_STATS(rx_packets, true),
+ ICSSG_MIIG_STATS(rx_broadcast_frames, false),
+ ICSSG_MIIG_STATS(rx_multicast_frames, true),
+ ICSSG_MIIG_STATS(rx_crc_errors, true),
+ ICSSG_MIIG_STATS(rx_mii_error_frames, false),
+ ICSSG_MIIG_STATS(rx_odd_nibble_frames, false),
+ ICSSG_MIIG_STATS(rx_frame_max_size, true),
+ ICSSG_MIIG_STATS(rx_max_size_error_frames, false),
+ ICSSG_MIIG_STATS(rx_frame_min_size, true),
+ ICSSG_MIIG_STATS(rx_min_size_error_frames, false),
+ ICSSG_MIIG_STATS(rx_over_errors, true),
+ ICSSG_MIIG_STATS(rx_class0_hits, false),
+ ICSSG_MIIG_STATS(rx_class1_hits, false),
+ ICSSG_MIIG_STATS(rx_class2_hits, false),
+ ICSSG_MIIG_STATS(rx_class3_hits, false),
+ ICSSG_MIIG_STATS(rx_class4_hits, false),
+ ICSSG_MIIG_STATS(rx_class5_hits, false),
+ ICSSG_MIIG_STATS(rx_class6_hits, false),
+ ICSSG_MIIG_STATS(rx_class7_hits, false),
+ ICSSG_MIIG_STATS(rx_class8_hits, false),
+ ICSSG_MIIG_STATS(rx_class9_hits, false),
+ ICSSG_MIIG_STATS(rx_class10_hits, false),
+ ICSSG_MIIG_STATS(rx_class11_hits, false),
+ ICSSG_MIIG_STATS(rx_class12_hits, false),
+ ICSSG_MIIG_STATS(rx_class13_hits, false),
+ ICSSG_MIIG_STATS(rx_class14_hits, false),
+ ICSSG_MIIG_STATS(rx_class15_hits, false),
+ ICSSG_MIIG_STATS(rx_smd_frags, false),
+ ICSSG_MIIG_STATS(rx_bucket1_size, true),
+ ICSSG_MIIG_STATS(rx_bucket2_size, true),
+ ICSSG_MIIG_STATS(rx_bucket3_size, true),
+ ICSSG_MIIG_STATS(rx_bucket4_size, true),
+ ICSSG_MIIG_STATS(rx_64B_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket1_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket2_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket3_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket4_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket5_frames, true),
+ ICSSG_MIIG_STATS(rx_bytes, true),
+ ICSSG_MIIG_STATS(rx_tx_total_bytes, false),
/* Tx */
- ICSSG_STATS(tx_packets, true),
- ICSSG_STATS(tx_broadcast_frames, false),
- ICSSG_STATS(tx_multicast_frames, false),
- ICSSG_STATS(tx_odd_nibble_frames, false),
- ICSSG_STATS(tx_underflow_errors, false),
- ICSSG_STATS(tx_frame_max_size, true),
- ICSSG_STATS(tx_max_size_error_frames, false),
- ICSSG_STATS(tx_frame_min_size, true),
- ICSSG_STATS(tx_min_size_error_frames, false),
- ICSSG_STATS(tx_bucket1_size, true),
- ICSSG_STATS(tx_bucket2_size, true),
- ICSSG_STATS(tx_bucket3_size, true),
- ICSSG_STATS(tx_bucket4_size, true),
- ICSSG_STATS(tx_64B_frames, true),
- ICSSG_STATS(tx_bucket1_frames, true),
- ICSSG_STATS(tx_bucket2_frames, true),
- ICSSG_STATS(tx_bucket3_frames, true),
- ICSSG_STATS(tx_bucket4_frames, true),
- ICSSG_STATS(tx_bucket5_frames, true),
- ICSSG_STATS(tx_bytes, true),
+ ICSSG_MIIG_STATS(tx_packets, true),
+ ICSSG_MIIG_STATS(tx_broadcast_frames, false),
+ ICSSG_MIIG_STATS(tx_multicast_frames, false),
+ ICSSG_MIIG_STATS(tx_odd_nibble_frames, false),
+ ICSSG_MIIG_STATS(tx_underflow_errors, false),
+ ICSSG_MIIG_STATS(tx_frame_max_size, true),
+ ICSSG_MIIG_STATS(tx_max_size_error_frames, false),
+ ICSSG_MIIG_STATS(tx_frame_min_size, true),
+ ICSSG_MIIG_STATS(tx_min_size_error_frames, false),
+ ICSSG_MIIG_STATS(tx_bucket1_size, true),
+ ICSSG_MIIG_STATS(tx_bucket2_size, true),
+ ICSSG_MIIG_STATS(tx_bucket3_size, true),
+ ICSSG_MIIG_STATS(tx_bucket4_size, true),
+ ICSSG_MIIG_STATS(tx_64B_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket1_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket2_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket3_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket4_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket5_frames, true),
+ ICSSG_MIIG_STATS(tx_bytes, true),
+};
+
+/**
+ * struct pa_stats_regs - ICSSG Firmware maintained PA Stats register
+ * @fw_rx_cnt: Number of valid packets sent by Rx PRU to Host on PSI
+ * @fw_tx_cnt: Number of valid packets copied by RTU0 to Tx queues
+ * @fw_tx_pre_overflow: Host Egress Q (Pre-emptible) Overflow Counter
+ * @fw_tx_exp_overflow: Host Egress Q (Express) Overflow Counter
+ */
+struct pa_stats_regs {
+ u32 fw_rx_cnt;
+ u32 fw_tx_cnt;
+ u32 fw_tx_pre_overflow;
+ u32 fw_tx_exp_overflow;
+};
+
+#define ICSSG_PA_STATS(field) \
+{ \
+ #field, \
+ offsetof(struct pa_stats_regs, field), \
+}
+
+struct icssg_pa_stats {
+ char name[ETH_GSTRING_LEN];
+ u32 offset;
+};
+
+static const struct icssg_pa_stats icssg_all_pa_stats[] = {
+ ICSSG_PA_STATS(fw_rx_cnt),
+ ICSSG_PA_STATS(fw_tx_cnt),
+ ICSSG_PA_STATS(fw_tx_pre_overflow),
+ ICSSG_PA_STATS(fw_tx_exp_overflow),
};
#endif /* __NET_TI_ICSSG_STATS_H */
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index d286709ca3b9..63e686f0b119 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2012,8 +2012,6 @@ static int keystone_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
info->tx_types =
@@ -2030,10 +2028,7 @@ static int keystone_get_ts_info(struct net_device *ndev,
struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = 0;
info->rx_filters = 0;
return 0;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 87e67121477c..a4937c18d7cb 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2277,10 +2277,11 @@ spider_net_setup_netdev(struct spider_net_card *card)
netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
if (SPIDER_NET_RX_CSUM_DEFAULT)
netdev->features |= NETIF_F_RXCSUM;
- netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
+ netdev->features |= NETIF_F_IP_CSUM;
/* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
* NETIF_F_HW_VLAN_CTAG_FILTER
*/
+ netdev->lltx = true;
/* MTU range: 64 - 2294 */
netdev->min_mtu = SPIDER_NET_MIN_MTU;
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index edd8b59680e5..a04d4073def9 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -377,8 +377,8 @@ static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
int ret;
bool first = true;
- if (txb->len < 60)
- pad = 60 - txb->len;
+ if (txb->len < ETH_ZLEN)
+ pad = ETH_ZLEN - txb->len;
while (1) {
mse102x_tx_cmd_spi(mse, CMD_RTS | (txb->len + pad));
@@ -451,7 +451,7 @@ static void mse102x_tx_work(struct work_struct *work)
if (ret == -ETIMEDOUT) {
if (netif_msg_timer(mse))
- netdev_err(mse->ndev, "tx work timeout\n");
+ netdev_err_once(mse->ndev, "tx work timeout\n");
mse->stats.tx_timeout++;
}
@@ -485,8 +485,8 @@ static void mse102x_init_mac(struct mse102x_net *mse, struct device_node *np)
if (ret) {
eth_hw_addr_random(ndev);
- netdev_err(ndev, "Using random MAC address: %pM\n",
- ndev->dev_addr);
+ dev_warn(ndev->dev.parent, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
}
}
@@ -622,8 +622,6 @@ static const struct ethtool_ops mse102x_ethtool_ops = {
/* driver bus management functions */
-#ifdef CONFIG_PM_SLEEP
-
static int mse102x_suspend(struct device *dev)
{
struct mse102x_net *mse = dev_get_drvdata(dev);
@@ -649,9 +647,8 @@ static int mse102x_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume);
static int mse102x_probe_spi(struct spi_device *spi)
{
@@ -736,9 +733,6 @@ static void mse102x_remove_spi(struct spi_device *spi)
struct mse102x_net *mse = dev_get_drvdata(&spi->dev);
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
- if (netif_msg_drv(mse))
- dev_info(&spi->dev, "remove\n");
-
mse102x_remove_device_debugfs(mses);
unregister_netdev(mse->ndev);
}
@@ -761,7 +755,7 @@ static struct spi_driver mse102x_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = mse102x_match_table,
- .pm = &mse102x_pm_ops,
+ .pm = pm_sleep_ptr(&mse102x_pm_ops),
},
.probe = mse102x_probe_spi,
.remove = mse102x_remove_spi,
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 85cdbdd44fec..e46ccebcfd22 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -41,10 +41,9 @@ config TXGBE
tristate "Wangxun(R) 10GbE PCI Express adapters support"
depends on PCI
depends on COMMON_CLK
+ depends on I2C_DESIGNWARE_PLATFORM
select MARVELL_10G_PHY
select REGMAP
- select I2C
- select I2C_DESIGNWARE_PLATFORM
select PHYLINK
select HWMON if TXGBE=y
select SFP
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 1eecba984f3b..2b3d6586f44a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -251,10 +251,7 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
rx_buffer->page_offset;
/* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
-#endif
+ net_prefetch(page_addr);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 1d57b047817b..b54bffda027b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -426,9 +426,9 @@ enum WX_MSCA_CMD_value {
#define WX_MIN_RXD 128
#define WX_MIN_TXD 128
-/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 8
-#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Transmit and Receive Descriptors must be a multiple of 128 */
+#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 128
+#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 128
#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
#define VMDQ_P(p) p
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index d6b2b3c781b6..cd1372da92a9 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -103,8 +103,7 @@ static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum)
if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
*checksum += local_buffer[i];
- if (eeprom_ptrs)
- kvfree(eeprom_ptrs);
+ kvfree(eeprom_ptrs);
*checksum = TXGBE_EEPROM_SUM - *checksum;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 5f502265f0a6..67b61afdde96 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -688,8 +688,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe)
mii_bus->parent = &pdev->dev;
mii_bus->phy_mask = GENMASK(31, 1);
mii_bus->priv = wx;
- snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x",
- (pdev->bus->number << 8) | pdev->devfn);
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x", pci_dev_id(pdev));
ret = devm_mdiobus_register(&pdev->dev, mii_bus);
if (ret) {
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 1223fcc1a8da..d64b8abcf018 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -29,26 +29,26 @@
/* Configuration options */
/* Accept all incoming packets. Default: disabled (cleared) */
-#define XAE_OPTION_PROMISC (1 << 0)
+#define XAE_OPTION_PROMISC BIT(0)
/* Jumbo frame support for Tx & Rx. Default: disabled (cleared) */
-#define XAE_OPTION_JUMBO (1 << 1)
+#define XAE_OPTION_JUMBO BIT(1)
/* VLAN Rx & Tx frame support. Default: disabled (cleared) */
-#define XAE_OPTION_VLAN (1 << 2)
+#define XAE_OPTION_VLAN BIT(2)
/* Enable recognition of flow control frames on Rx. Default: enabled (set) */
-#define XAE_OPTION_FLOW_CONTROL (1 << 4)
+#define XAE_OPTION_FLOW_CONTROL BIT(4)
/* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
* stripped. Default: disabled (set)
*/
-#define XAE_OPTION_FCS_STRIP (1 << 5)
+#define XAE_OPTION_FCS_STRIP BIT(5)
/* Generate FCS field and add PAD automatically for outgoing frames.
* Default: enabled (set)
*/
-#define XAE_OPTION_FCS_INSERT (1 << 6)
+#define XAE_OPTION_FCS_INSERT BIT(6)
/* Enable Length/Type error checking for incoming frames. When this option is
* set, the MAC will filter frames that have a mismatched type/length field
@@ -56,13 +56,13 @@
* types of frames are encountered. When this option is cleared, the MAC will
* allow these types of frames to be received. Default: enabled (set)
*/
-#define XAE_OPTION_LENTYPE_ERR (1 << 7)
+#define XAE_OPTION_LENTYPE_ERR BIT(7)
/* Enable the transmitter. Default: enabled (set) */
-#define XAE_OPTION_TXEN (1 << 11)
+#define XAE_OPTION_TXEN BIT(11)
/* Enable the receiver. Default: enabled (set) */
-#define XAE_OPTION_RXEN (1 << 12)
+#define XAE_OPTION_RXEN BIT(12)
/* Default options set when device is initialized or reset */
#define XAE_OPTION_DEFAULTS \
@@ -156,6 +156,7 @@
#define XAE_TPID0_OFFSET 0x00000028 /* VLAN TPID0 register */
#define XAE_TPID1_OFFSET 0x0000002C /* VLAN TPID1 register */
#define XAE_PPST_OFFSET 0x00000030 /* PCS PMA Soft Temac Status Reg */
+#define XAE_STATS_OFFSET 0x00000200 /* Statistics counters */
#define XAE_RCW0_OFFSET 0x00000400 /* Rx Configuration Word 0 */
#define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */
#define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */
@@ -163,6 +164,7 @@
#define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */
#define XAE_PHYC_OFFSET 0x00000414 /* RX Max Frame Configuration */
#define XAE_ID_OFFSET 0x000004F8 /* Identification register */
+#define XAE_ABILITY_OFFSET 0x000004FC /* Ability Register offset */
#define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */
#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */
#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */
@@ -173,6 +175,8 @@
#define XAE_FFE_OFFSET 0x0000070C /* Frame Filter Enable */
#define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */
#define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */
+#define XAE_AM0_OFFSET 0x00000750 /* Frame Filter Mask Value Bytes 3-0 */
+#define XAE_AM1_OFFSET 0x00000754 /* Frame Filter Mask Value Bytes 7-4 */
#define XAE_TX_VLAN_DATA_OFFSET 0x00004000 /* TX VLAN data table address */
#define XAE_RX_VLAN_DATA_OFFSET 0x00008000 /* RX VLAN data table address */
@@ -284,6 +288,16 @@
#define XAE_PHYC_SGLINKSPD_100 0x40000000 /* SGMII link 100 Mbit */
#define XAE_PHYC_SGLINKSPD_1000 0x80000000 /* SGMII link 1000 Mbit */
+/* Bit masks for Axi Ethernet ability register */
+#define XAE_ABILITY_PFC BIT(16)
+#define XAE_ABILITY_FRAME_FILTER BIT(10)
+#define XAE_ABILITY_HALF_DUPLEX BIT(9)
+#define XAE_ABILITY_STATS BIT(8)
+#define XAE_ABILITY_2_5G BIT(3)
+#define XAE_ABILITY_1G BIT(2)
+#define XAE_ABILITY_100M BIT(1)
+#define XAE_ABILITY_10M BIT(0)
+
/* Bit masks for Axi Ethernet MDIO interface MC register */
#define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable */
#define XAE_MDIO_MC_CLOCK_DIVIDE_MAX 0x3F /* Maximum MDIO divisor */
@@ -327,11 +341,12 @@
#define XAE_MULTICAST_CAM_TABLE_NUM 4
/* Axi Ethernet Synthesis features */
-#define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0)
-#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1)
-#define XAE_FEATURE_FULL_RX_CSUM (1 << 2)
-#define XAE_FEATURE_FULL_TX_CSUM (1 << 3)
-#define XAE_FEATURE_DMA_64BIT (1 << 4)
+#define XAE_FEATURE_PARTIAL_RX_CSUM BIT(0)
+#define XAE_FEATURE_PARTIAL_TX_CSUM BIT(1)
+#define XAE_FEATURE_FULL_RX_CSUM BIT(2)
+#define XAE_FEATURE_FULL_TX_CSUM BIT(3)
+#define XAE_FEATURE_DMA_64BIT BIT(4)
+#define XAE_FEATURE_STATS BIT(5)
#define XAE_NO_CSUM_OFFLOAD 0
@@ -345,6 +360,61 @@
#define XLNX_MII_STD_SELECT_REG 0x11
#define XLNX_MII_STD_SELECT_SGMII BIT(0)
+/* enum temac_stat - TEMAC statistics counters
+ *
+ * Index of statistics counters within the TEMAC. This must match the
+ * order/offset of hardware registers exactly.
+ */
+enum temac_stat {
+ STAT_RX_BYTES = 0,
+ STAT_TX_BYTES,
+ STAT_UNDERSIZE_FRAMES,
+ STAT_FRAGMENT_FRAMES,
+ STAT_RX_64_BYTE_FRAMES,
+ STAT_RX_65_127_BYTE_FRAMES,
+ STAT_RX_128_255_BYTE_FRAMES,
+ STAT_RX_256_511_BYTE_FRAMES,
+ STAT_RX_512_1023_BYTE_FRAMES,
+ STAT_RX_1024_MAX_BYTE_FRAMES,
+ STAT_RX_OVERSIZE_FRAMES,
+ STAT_TX_64_BYTE_FRAMES,
+ STAT_TX_65_127_BYTE_FRAMES,
+ STAT_TX_128_255_BYTE_FRAMES,
+ STAT_TX_256_511_BYTE_FRAMES,
+ STAT_TX_512_1023_BYTE_FRAMES,
+ STAT_TX_1024_MAX_BYTE_FRAMES,
+ STAT_TX_OVERSIZE_FRAMES,
+ STAT_RX_GOOD_FRAMES,
+ STAT_RX_FCS_ERRORS,
+ STAT_RX_BROADCAST_FRAMES,
+ STAT_RX_MULTICAST_FRAMES,
+ STAT_RX_CONTROL_FRAMES,
+ STAT_RX_LENGTH_ERRORS,
+ STAT_RX_VLAN_FRAMES,
+ STAT_RX_PAUSE_FRAMES,
+ STAT_RX_CONTROL_OPCODE_ERRORS,
+ STAT_TX_GOOD_FRAMES,
+ STAT_TX_BROADCAST_FRAMES,
+ STAT_TX_MULTICAST_FRAMES,
+ STAT_TX_UNDERRUN_ERRORS,
+ STAT_TX_CONTROL_FRAMES,
+ STAT_TX_VLAN_FRAMES,
+ STAT_TX_PAUSE_FRAMES,
+ STAT_TX_SINGLE_COLLISION_FRAMES,
+ STAT_TX_MULTIPLE_COLLISION_FRAMES,
+ STAT_TX_DEFERRED_FRAMES,
+ STAT_TX_LATE_COLLISIONS,
+ STAT_TX_EXCESS_COLLISIONS,
+ STAT_TX_EXCESS_DEFERRAL,
+ STAT_RX_ALIGNMENT_ERRORS,
+ STAT_TX_PFC_FRAMES,
+ STAT_RX_PFC_FRAMES,
+ STAT_USER_DEFINED0,
+ STAT_USER_DEFINED1,
+ STAT_USER_DEFINED2,
+ STAT_COUNT,
+};
+
/**
* struct axidma_bd - Axi Dma buffer descriptor layout
* @next: MM2S/S2MM Next Descriptor Pointer
@@ -435,6 +505,16 @@ struct skbuf_dma_descriptor {
* @tx_packets: TX packet count for statistics
* @tx_bytes: TX byte count for statistics
* @tx_stat_sync: Synchronization object for TX stats
+ * @hw_stat_base: Base offset for statistics counters. This may be nonzero if
+ * the statistics counteres were reset or wrapped around.
+ * @hw_last_counter: Last-seen value of each statistic counter
+ * @reset_in_progress: Set while we are performing a reset and statistics
+ * counters may be invalid
+ * @hw_stats_seqcount: Sequence counter for @hw_stat_base, @hw_last_counter,
+ * and @reset_in_progress.
+ * @stats_lock: Lock for @hw_stats_seqcount
+ * @stats_work: Work for reading the hardware statistics counters often enough
+ * to catch overflows.
* @dma_err_task: Work structure to process Axi DMA errors
* @stopping: Set when @dma_err_task shouldn't do anything because we are
* about to stop the device.
@@ -449,8 +529,6 @@ struct skbuf_dma_descriptor {
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
* @rxmem: Stores rx memory size for jumbo frame handling.
- * @csum_offload_on_tx_path: Stores the checksum selection on TX side.
- * @csum_offload_on_rx_path: Stores the checksum selection on RX side.
* @coalesce_count_rx: Store the irq coalesce on RX side.
* @coalesce_usec_rx: IRQ coalesce delay for RX
* @coalesce_count_tx: Store the irq coalesce on TX side.
@@ -508,6 +586,13 @@ struct axienet_local {
u64_stats_t tx_bytes;
struct u64_stats_sync tx_stat_sync;
+ u64 hw_stat_base[STAT_COUNT];
+ u32 hw_last_counter[STAT_COUNT];
+ seqcount_mutex_t hw_stats_seqcount;
+ struct mutex stats_lock;
+ struct delayed_work stats_work;
+ bool reset_in_progress;
+
struct work_struct dma_err_task;
bool stopping;
@@ -522,9 +607,6 @@ struct axienet_local {
u32 max_frm_size;
u32 rxmem;
- int csum_offload_on_tx_path;
- int csum_offload_on_rx_path;
-
u32 coalesce_count_rx;
u32 coalesce_usec_rx;
u32 coalesce_count_tx;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9eb300fc3590..ea7d7c03f48e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -415,6 +415,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
static int netdev_set_mac_address(struct net_device *ndev, void *p)
{
struct sockaddr *addr = p;
+
axienet_set_mac_address(ndev, addr->sa_data);
return 0;
}
@@ -436,24 +437,27 @@ static void axienet_set_multicast_list(struct net_device *ndev)
u32 reg, af0reg, af1reg;
struct axienet_local *lp = netdev_priv(ndev);
- if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
- netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
- /* We must make the kernel realize we had to move into
- * promiscuous mode. If it was a promiscuous mode request
- * the flag is already set. If not we set it.
- */
- ndev->flags |= IFF_PROMISC;
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
+ reg = axienet_ior(lp, XAE_FMI_OFFSET);
+ reg &= ~XAE_FMI_PM_MASK;
+ if (ndev->flags & IFF_PROMISC)
reg |= XAE_FMI_PM_MASK;
+ else
+ reg &= ~XAE_FMI_PM_MASK;
+ axienet_iow(lp, XAE_FMI_OFFSET, reg);
+
+ if (ndev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
+ reg &= 0xFFFFFF00;
axienet_iow(lp, XAE_FMI_OFFSET, reg);
- dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
+ axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
+ axienet_iow(lp, XAE_AF1_OFFSET, 0);
+ axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
+ axienet_iow(lp, XAE_AM1_OFFSET, 0);
+ axienet_iow(lp, XAE_FFE_OFFSET, 1);
+ i = 1;
} else if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
- reg &= ~XAE_FMI_PM_MASK;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
-
netdev_for_each_mc_addr(ha, ndev) {
if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
break;
@@ -466,25 +470,21 @@ static void axienet_set_multicast_list(struct net_device *ndev)
af1reg = (ha->addr[4]);
af1reg |= (ha->addr[5] << 8);
- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg &= 0xFFFFFF00;
reg |= i;
axienet_iow(lp, XAE_FMI_OFFSET, reg);
axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
+ axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
+ axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
axienet_iow(lp, XAE_FFE_OFFSET, 1);
i++;
}
- } else {
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
- reg &= ~XAE_FMI_PM_MASK;
-
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
- dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg &= 0xFFFFFF00;
reg |= i;
axienet_iow(lp, XAE_FMI_OFFSET, reg);
axienet_iow(lp, XAE_FFE_OFFSET, 0);
@@ -519,11 +519,55 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
+static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
+{
+ u32 counter;
+
+ if (lp->reset_in_progress)
+ return lp->hw_stat_base[stat];
+
+ counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
+ return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
+}
+
+static void axienet_stats_update(struct axienet_local *lp, bool reset)
+{
+ enum temac_stat stat;
+
+ write_seqcount_begin(&lp->hw_stats_seqcount);
+ lp->reset_in_progress = reset;
+ for (stat = 0; stat < STAT_COUNT; stat++) {
+ u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
+
+ lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
+ lp->hw_last_counter[stat] = counter;
+ }
+ write_seqcount_end(&lp->hw_stats_seqcount);
+}
+
+static void axienet_refresh_stats(struct work_struct *work)
+{
+ struct axienet_local *lp = container_of(work, struct axienet_local,
+ stats_work.work);
+
+ mutex_lock(&lp->stats_lock);
+ axienet_stats_update(lp, false);
+ mutex_unlock(&lp->stats_lock);
+
+ /* Just less than 2^32 bytes at 2.5 GBit/s */
+ schedule_delayed_work(&lp->stats_work, 13 * HZ);
+}
+
static int __axienet_device_reset(struct axienet_local *lp)
{
u32 value;
int ret;
+ /* Save statistics counters in case they will be reset */
+ mutex_lock(&lp->stats_lock);
+ if (lp->features & XAE_FEATURE_STATS)
+ axienet_stats_update(lp, true);
+
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
@@ -538,7 +582,7 @@ static int __axienet_device_reset(struct axienet_local *lp)
XAXIDMA_TX_CR_OFFSET);
if (ret) {
dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
- return ret;
+ goto out;
}
/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
@@ -548,10 +592,29 @@ static int __axienet_device_reset(struct axienet_local *lp)
XAE_IS_OFFSET);
if (ret) {
dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
- return ret;
+ goto out;
}
- return 0;
+ /* Update statistics counters with new values */
+ if (lp->features & XAE_FEATURE_STATS) {
+ enum temac_stat stat;
+
+ write_seqcount_begin(&lp->hw_stats_seqcount);
+ lp->reset_in_progress = false;
+ for (stat = 0; stat < STAT_COUNT; stat++) {
+ u32 counter =
+ axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
+
+ lp->hw_stat_base[stat] +=
+ lp->hw_last_counter[stat] - counter;
+ lp->hw_last_counter[stat] = counter;
+ }
+ write_seqcount_end(&lp->hw_stats_seqcount);
+ }
+
+out:
+ mutex_unlock(&lp->stats_lock);
+ return ret;
}
/**
@@ -614,8 +677,7 @@ static int axienet_device_reset(struct net_device *ndev)
lp->options |= XAE_OPTION_VLAN;
lp->options &= (~XAE_OPTION_JUMBO);
- if ((ndev->mtu > XAE_MTU) &&
- (ndev->mtu <= XAE_JUMBO_MTU)) {
+ if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
XAE_TRL_SIZE;
@@ -1126,9 +1188,7 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
- } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
- skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
+ } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
@@ -1297,7 +1357,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
ndev->stats.rx_missed_errors++;
if (pending & XAE_INT_RXRJECT_MASK)
- ndev->stats.rx_frame_errors++;
+ ndev->stats.rx_dropped++;
axienet_iow(lp, XAE_IS_OFFSET, pending);
return IRQ_HANDLED;
@@ -1516,8 +1576,6 @@ static int axienet_open(struct net_device *ndev)
int ret;
struct axienet_local *lp = netdev_priv(ndev);
- dev_dbg(&ndev->dev, "%s\n", __func__);
-
/* When we do an Axi Ethernet reset, it resets the complete core
* including the MDIO. MDIO must be disabled before resetting.
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
@@ -1534,6 +1592,9 @@ static int axienet_open(struct net_device *ndev)
phylink_start(lp->phylink);
+ /* Start the statistics refresh work */
+ schedule_delayed_work(&lp->stats_work, 0);
+
if (lp->use_dmaengine) {
/* Enable interrupts for Axi Ethernet core (if defined) */
if (lp->eth_irq > 0) {
@@ -1558,6 +1619,7 @@ err_free_eth_irq:
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
err_phy:
+ cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
return ret;
@@ -1578,8 +1640,6 @@ static int axienet_stop(struct net_device *ndev)
struct axienet_local *lp = netdev_priv(ndev);
int i;
- dev_dbg(&ndev->dev, "axienet_close()\n");
-
if (!lp->use_dmaengine) {
WRITE_ONCE(lp->stopping, true);
flush_work(&lp->dma_err_task);
@@ -1588,6 +1648,8 @@ static int axienet_stop(struct net_device *ndev)
napi_disable(&lp->napi_rx);
}
+ cancel_delayed_work_sync(&lp->stats_work);
+
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
@@ -1662,6 +1724,7 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
static void axienet_poll_controller(struct net_device *ndev)
{
struct axienet_local *lp = netdev_priv(ndev);
+
disable_irq(lp->tx_irq);
disable_irq(lp->rx_irq);
axienet_rx_irq(lp->tx_irq, ndev);
@@ -1700,6 +1763,35 @@ axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_packets = u64_stats_read(&lp->tx_packets);
stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ stats->rx_length_errors =
+ axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
+ stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
+ stats->rx_frame_errors =
+ axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
+ stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
+ axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
+ stats->rx_length_errors +
+ stats->rx_crc_errors +
+ stats->rx_frame_errors;
+ stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
+
+ stats->tx_aborted_errors =
+ axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
+ stats->tx_fifo_errors =
+ axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
+ stats->tx_window_errors =
+ axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
+ stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
+ stats->tx_aborted_errors +
+ stats->tx_fifo_errors +
+ stats->tx_window_errors;
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
}
static const struct net_device_ops axienet_netdev_ops = {
@@ -1992,6 +2084,213 @@ static int axienet_ethtools_nway_reset(struct net_device *dev)
return phylink_ethtool_nway_reset(lp->phylink);
}
+static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ data[0] = axienet_stat(lp, STAT_RX_BYTES);
+ data[1] = axienet_stat(lp, STAT_TX_BYTES);
+ data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
+ data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
+ data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
+ data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
+ data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
+ data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
+ data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+}
+
+static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
+ "Received bytes",
+ "Transmitted bytes",
+ "RX Good VLAN Tagged Frames",
+ "TX Good VLAN Tagged Frames",
+ "TX Good PFC Frames",
+ "RX Good PFC Frames",
+ "User Defined Counter 0",
+ "User Defined Counter 1",
+ "User Defined Counter 2",
+};
+
+static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, axienet_ethtool_stats_strings,
+ sizeof(axienet_ethtool_stats_strings));
+ break;
+ }
+}
+
+static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (lp->features & XAE_FEATURE_STATS)
+ return ARRAY_SIZE(axienet_ethtool_stats_strings);
+ fallthrough;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+axienet_ethtools_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ pause_stats->tx_pause_frames =
+ axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
+ pause_stats->rx_pause_frames =
+ axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+}
+
+static void
+axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ mac_stats->FramesTransmittedOK =
+ axienet_stat(lp, STAT_TX_GOOD_FRAMES);
+ mac_stats->SingleCollisionFrames =
+ axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
+ mac_stats->MultipleCollisionFrames =
+ axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
+ mac_stats->FramesReceivedOK =
+ axienet_stat(lp, STAT_RX_GOOD_FRAMES);
+ mac_stats->FrameCheckSequenceErrors =
+ axienet_stat(lp, STAT_RX_FCS_ERRORS);
+ mac_stats->AlignmentErrors =
+ axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
+ mac_stats->FramesWithDeferredXmissions =
+ axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
+ mac_stats->LateCollisions =
+ axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
+ mac_stats->FramesAbortedDueToXSColls =
+ axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
+ mac_stats->MulticastFramesXmittedOK =
+ axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
+ mac_stats->BroadcastFramesXmittedOK =
+ axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
+ mac_stats->FramesWithExcessiveDeferral =
+ axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
+ mac_stats->MulticastFramesReceivedOK =
+ axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
+ mac_stats->BroadcastFramesReceivedOK =
+ axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
+ mac_stats->InRangeLengthErrors =
+ axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+}
+
+static void
+axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ ctrl_stats->MACControlFramesTransmitted =
+ axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
+ ctrl_stats->MACControlFramesReceived =
+ axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
+ ctrl_stats->UnsupportedOpcodesReceived =
+ axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+}
+
+static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 16384 },
+ { },
+};
+
+static void
+axienet_ethtool_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ rmon_stats->undersize_pkts =
+ axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
+ rmon_stats->oversize_pkts =
+ axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
+ rmon_stats->fragments =
+ axienet_stat(lp, STAT_FRAGMENT_FRAMES);
+
+ rmon_stats->hist[0] =
+ axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
+ rmon_stats->hist[1] =
+ axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
+ rmon_stats->hist[2] =
+ axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
+ rmon_stats->hist[3] =
+ axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
+ rmon_stats->hist[4] =
+ axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
+ rmon_stats->hist[5] =
+ axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
+ rmon_stats->hist[6] =
+ rmon_stats->oversize_pkts;
+
+ rmon_stats->hist_tx[0] =
+ axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
+ rmon_stats->hist_tx[1] =
+ axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
+ rmon_stats->hist_tx[2] =
+ axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
+ rmon_stats->hist_tx[3] =
+ axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
+ rmon_stats->hist_tx[4] =
+ axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
+ rmon_stats->hist_tx[5] =
+ axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
+ rmon_stats->hist_tx[6] =
+ axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+
+ *ranges = axienet_rmon_ranges;
+}
+
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS,
@@ -2008,6 +2307,13 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.get_link_ksettings = axienet_ethtools_get_link_ksettings,
.set_link_ksettings = axienet_ethtools_set_link_ksettings,
.nway_reset = axienet_ethtools_nway_reset,
+ .get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
+ .get_strings = axienet_ethtools_get_strings,
+ .get_sset_count = axienet_ethtools_get_sset_count,
+ .get_pause_stats = axienet_ethtools_get_pause_stats,
+ .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
+ .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
+ .get_rmon_stats = axienet_ethtool_get_rmon_stats,
};
static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
@@ -2280,6 +2586,10 @@ static int axienet_probe(struct platform_device *pdev)
u64_stats_init(&lp->rx_stat_sync);
u64_stats_init(&lp->tx_stat_sync);
+ mutex_init(&lp->stats_lock);
+ seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
+ INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
+
lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
if (!lp->axi_clk) {
/* For backward compatibility, if named AXI clock is not present,
@@ -2320,42 +2630,35 @@ static int axienet_probe(struct platform_device *pdev)
/* Setup checksum offload, but default to off if not specified */
lp->features = 0;
+ if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
+ lp->features |= XAE_FEATURE_STATS;
+
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
if (!ret) {
switch (value) {
case 1:
- lp->csum_offload_on_tx_path =
- XAE_FEATURE_PARTIAL_TX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
- /* Can checksum TCP/UDP over IPv4. */
- ndev->features |= NETIF_F_IP_CSUM;
+ /* Can checksum any contiguous range */
+ ndev->features |= NETIF_F_HW_CSUM;
break;
case 2:
- lp->csum_offload_on_tx_path =
- XAE_FEATURE_FULL_TX_CSUM;
lp->features |= XAE_FEATURE_FULL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
break;
- default:
- lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
}
}
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
if (!ret) {
switch (value) {
case 1:
- lp->csum_offload_on_rx_path =
- XAE_FEATURE_PARTIAL_RX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
+ ndev->features |= NETIF_F_RXCSUM;
break;
case 2:
- lp->csum_offload_on_rx_path =
- XAE_FEATURE_FULL_RX_CSUM;
lp->features |= XAE_FEATURE_FULL_RX_CSUM;
+ ndev->features |= NETIF_F_RXCSUM;
break;
- default:
- lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
}
}
/* For supporting jumbo frames, the Axi Ethernet hardware must have
@@ -2405,7 +2708,7 @@ static int axienet_probe(struct platform_device *pdev)
goto cleanup_clk;
}
- if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) {
+ if (!of_property_present(pdev->dev.of_node, "dmas")) {
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 56df37f8d50a..aef316278eb4 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1026,9 +1026,7 @@ static int ixp4xx_get_ts_info(struct net_device *dev,
if (info->phc_index < 0) {
info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
info->so_timestamping =
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 838e85ddec67..7f611c74eb62 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1194,7 +1194,6 @@ static void geneve_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &geneve_type);
- dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
@@ -1215,6 +1214,7 @@ static void geneve_setup(struct net_device *dev)
netif_keep_dst(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+ dev->lltx = true;
eth_hw_addr_random(dev);
}
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 2e94d10348cc..a60bfb1abb7f 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1356,7 +1356,7 @@ static void gtp_link_setup(struct net_device *dev)
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->priv_flags |= IFF_NO_QUEUE;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
netif_keep_dst(dev);
dev->needed_headroom = LL_MAX_HEADER + GTP_IPV4_MAXLEN;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 83a16d10eedb..bac1bb69d63a 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -458,7 +458,7 @@ static void bpq_setup(struct net_device *dev)
dev->needs_free_netdev = true;
dev->flags = 0;
- dev->features = NETIF_F_LLTX; /* Allow recursion */
+ dev->lltx = true; /* Allow recursion */
#if IS_ENABLED(CONFIG_AX25)
dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 810977952f95..e690b95b1bbb 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -882,7 +882,7 @@ struct nvsp_message {
#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */
#define VRSS_CHANNEL_MAX 64
-#define VRSS_CHANNEL_DEFAULT 8
+#define VRSS_CHANNEL_DEFAULT 16
#define RNDIS_MAX_PKT_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 4a9522689fa4..e01c5997a551 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -183,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
- ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
+ ret = dev_xdp_propagate(vf_netdev, &xdp);
if (ret && prog)
bpf_prog_put(prog);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 44142245343d..153b97f8ec0d 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -987,7 +987,8 @@ struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
dev_info->bprog = prog;
}
} else {
- dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
+ dev_info->num_chn = max(VRSS_CHANNEL_DEFAULT,
+ netif_get_num_default_rss_queues());
dev_info->send_sections = NETVSC_DEFAULT_TX;
dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
dev_info->recv_sections = NETVSC_DEFAULT_RX;
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index 65fd14da0f86..c572da9e9bc4 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -242,11 +242,8 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
int ret;
clk = clk_get(dev, "core");
- if (IS_ERR(clk)) {
- dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
-
- return ERR_CAST(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_cast_probe(dev, clk, "error getting core clock\n");
ret = clk_set_rate(clk, data->core_clock_rate);
if (ret) {
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index fef4eff7753a..b1afcb8740de 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
*/
+#include <net/inet_dscp.h>
+
#include "ipvlan.h"
static u32 ipvlan_jhash_secret __read_mostly;
@@ -420,7 +422,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
int err, ret = NET_XMIT_DROP;
struct flowi4 fl4 = {
.flowi4_oif = dev->ifindex,
- .flowi4_tos = RT_TOS(ip4h->tos),
+ .flowi4_tos = ip4h->tos & INET_DSCP_MASK,
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
.daddr = ip4h->daddr,
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 094f44dac5c8..ee2c3cf4df36 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -114,7 +114,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
#define IPVLAN_ALWAYS_ON \
- (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
+ (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_VLAN_CHALLENGED)
#define IPVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
@@ -141,6 +141,7 @@ static int ipvlan_init(struct net_device *dev)
dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
dev->hw_enc_features |= dev->features;
+ dev->lltx = true;
netif_inherit_tso_max(dev, phy_dev);
dev->hard_header_len = phy_dev->hard_header_len;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2b486e7c749c..1993b90b1a5f 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -171,6 +171,8 @@ static void gen_lo_setup(struct net_device *dev,
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+ dev->lltx = true;
+ dev->netns_local = true;
netif_keep_dst(dev);
dev->hw_features = NETIF_F_GSO_SOFTWARE;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
@@ -179,8 +181,6 @@ static void gen_lo_setup(struct net_device *dev,
| NETIF_F_RXCSUM
| NETIF_F_SCTP_CRC
| NETIF_F_HIGHDMA
- | NETIF_F_LLTX
- | NETIF_F_NETNS_LOCAL
| NETIF_F_VLAN_CHALLENGED
| NETIF_F_LOOPBACK;
dev->ethtool_ops = eth_ops;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 2da70bc3dd86..12d1b205f6d1 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3550,7 +3550,8 @@ static int macsec_dev_init(struct net_device *dev)
return err;
dev->features = real_dev->features & MACSEC_FEATURES;
- dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->lltx = true;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
macsec_set_head_tail_room(dev);
@@ -3581,7 +3582,6 @@ static netdev_features_t macsec_fix_features(struct net_device *dev,
features &= (real_dev->features & MACSEC_FEATURES) |
NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
- features |= NETIF_F_LLTX;
return features;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 24298a33e0e9..cf18e66de142 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -900,7 +900,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
-#define ALWAYS_ON_FEATURES (ALWAYS_ON_OFFLOADS | NETIF_F_LLTX)
+#define ALWAYS_ON_FEATURES ALWAYS_ON_OFFLOADS
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
@@ -932,6 +932,7 @@ static int macvlan_init(struct net_device *dev)
dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
dev->vlan_features |= ALWAYS_ON_OFFLOADS;
dev->hw_enc_features |= dev->features;
+ dev->lltx = true;
netif_inherit_tso_max(dev, lowerdev);
dev->hard_header_len = lowerdev->hard_header_len;
macvlan_set_lockdep_class(dev);
@@ -1213,7 +1214,8 @@ void macvlan_common_setup(struct net_device *dev)
dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
- dev->priv_flags |= IFF_UNICAST_FLT | IFF_CHANGE_PROTO_DOWN;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->change_proto_down = true;
dev->netdev_ops = &macvlan_netdev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = macvlan_dev_free;
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index fd02f5cbc853..b156493d7084 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -7,6 +7,7 @@
*/
#include <linux/acpi.h>
+#include <linux/dev_printk.h>
#include <linux/fwnode_mdio.h>
#include <linux/of.h>
#include <linux/phy.h>
@@ -104,7 +105,7 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
return rc;
}
- dev_dbg(&mdio->dev, "registered phy %p fwnode at address %i\n",
+ dev_dbg(&mdio->dev, "registered phy fwnode %pfw at address %i\n",
child, addr);
return 0;
}
diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c
index de08419d0c98..b70e6d1ad429 100644
--- a/drivers/net/mdio/mdio-mux-mmioreg.c
+++ b/drivers/net/mdio/mdio-mux-mmioreg.c
@@ -96,7 +96,7 @@ static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
{
- struct device_node *np2, *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node;
struct mdio_mux_mmioreg_state *s;
struct resource res;
const __be32 *iprop;
@@ -109,52 +109,42 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
return -ENOMEM;
ret = of_address_to_resource(np, 0, &res);
- if (ret) {
- dev_err(&pdev->dev, "could not obtain memory map for node %pOF\n",
- np);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not obtain memory map for node %pOF\n", np);
s->phys = res.start;
s->iosize = resource_size(&res);
if (s->iosize != sizeof(uint8_t) &&
s->iosize != sizeof(uint16_t) &&
- s->iosize != sizeof(uint32_t)) {
- dev_err(&pdev->dev, "only 8/16/32-bit registers are supported\n");
- return -EINVAL;
- }
+ s->iosize != sizeof(uint32_t))
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "only 8/16/32-bit registers are supported\n");
iprop = of_get_property(np, "mux-mask", &len);
- if (!iprop || len != sizeof(uint32_t)) {
- dev_err(&pdev->dev, "missing or invalid mux-mask property\n");
- return -ENODEV;
- }
- if (be32_to_cpup(iprop) >= BIT(s->iosize * 8)) {
- dev_err(&pdev->dev, "only 8/16/32-bit registers are supported\n");
- return -EINVAL;
- }
+ if (!iprop || len != sizeof(uint32_t))
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "missing or invalid mux-mask property\n");
+ if (be32_to_cpup(iprop) >= BIT(s->iosize * 8))
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "only 8/16/32-bit registers are supported\n");
s->mask = be32_to_cpup(iprop);
/*
* Verify that the 'reg' property of each child MDIO bus does not
* set any bits outside of the 'mask'.
*/
- for_each_available_child_of_node(np, np2) {
+ for_each_available_child_of_node_scoped(np, np2) {
u64 reg;
- if (of_property_read_reg(np2, 0, &reg, NULL)) {
- dev_err(&pdev->dev, "mdio-mux child node %pOF is "
- "missing a 'reg' property\n", np2);
- of_node_put(np2);
- return -ENODEV;
- }
- if ((u32)reg & ~s->mask) {
- dev_err(&pdev->dev, "mdio-mux child node %pOF has "
- "a 'reg' value with unmasked bits\n",
- np2);
- of_node_put(np2);
- return -ENODEV;
- }
+ if (of_property_read_reg(np2, 0, &reg, NULL))
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "mdio-mux child node %pOF is missing a 'reg' property\n",
+ np2);
+ if ((u32)reg & ~s->mask)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "mdio-mux child node %pOF has a 'reg' value with unmasked bits\n",
+ np2);
}
ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 08e607f62e10..2f4fc664d2e1 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -390,7 +390,7 @@ EXPORT_SYMBOL(of_phy_get_and_connect);
bool of_phy_is_fixed_link(struct device_node *np)
{
struct device_node *dn;
- int len, err;
+ int err;
const char *managed;
/* New binding */
@@ -405,8 +405,7 @@ bool of_phy_is_fixed_link(struct device_node *np)
return true;
/* Old binding */
- if (of_get_property(np, "fixed-link", &len) &&
- len == (5 * sizeof(__be32)))
+ if (of_property_count_u32_elems(np, "fixed-link") == 5)
return true;
return false;
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 963d8b4af28d..54c8b9d5b5fc 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -731,10 +731,10 @@ struct failover *net_failover_create(struct net_device *standby_dev)
IFF_TX_SKB_SHARING);
/* don't acquire failover netdev's netif_tx_lock when transmitting */
- failover_dev->features |= NETIF_F_LLTX;
+ failover_dev->lltx = true;
/* Don't allow failover devices to change network namespaces. */
- failover_dev->features |= NETIF_F_NETNS_LOCAL;
+ failover_dev->netns_local = true;
failover_dev->hw_features = FAILOVER_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_TX |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 9c09293b5258..01cf33fa7503 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -37,8 +37,9 @@
#include <linux/configfs.h>
#include <linux/etherdevice.h>
#include <linux/utsname.h>
+#include <linux/rtnetlink.h>
-MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>");
+MODULE_AUTHOR("Matt Mackall <mpm@selenic.com>");
MODULE_DESCRIPTION("Console driver for network interfaces");
MODULE_LICENSE("GPL");
@@ -72,9 +73,16 @@ __setup("netconsole=", option_setup);
/* Linked list of all configured targets */
static LIST_HEAD(target_list);
+/* target_cleanup_list is used to track targets that need to be cleaned outside
+ * of target_list_lock. It should be cleaned in the same function it is
+ * populated.
+ */
+static LIST_HEAD(target_cleanup_list);
/* This needs to be a spinlock because write_msg() cannot sleep */
static DEFINE_SPINLOCK(target_list_lock);
+/* This needs to be a mutex because netpoll_cleanup might sleep */
+static DEFINE_MUTEX(target_cleanup_list_lock);
/*
* Console driver for extended netconsoles. Registered on the first use to
@@ -210,6 +218,33 @@ static struct netconsole_target *alloc_and_init(void)
return nt;
}
+/* Clean up every target in the cleanup_list and move the clean targets back to
+ * the main target_list.
+ */
+static void netconsole_process_cleanups_core(void)
+{
+ struct netconsole_target *nt, *tmp;
+ unsigned long flags;
+
+ /* The cleanup needs RTNL locked */
+ ASSERT_RTNL();
+
+ mutex_lock(&target_cleanup_list_lock);
+ list_for_each_entry_safe(nt, tmp, &target_cleanup_list, list) {
+ /* all entries in the cleanup_list needs to be disabled */
+ WARN_ON_ONCE(nt->enabled);
+ do_netpoll_cleanup(&nt->np);
+ /* moved the cleaned target to target_list. Need to hold both
+ * locks
+ */
+ spin_lock_irqsave(&target_list_lock, flags);
+ list_move(&nt->list, &target_list);
+ spin_unlock_irqrestore(&target_list_lock, flags);
+ }
+ WARN_ON_ONCE(!list_empty(&target_cleanup_list));
+ mutex_unlock(&target_cleanup_list_lock);
+}
+
#ifdef CONFIG_NETCONSOLE_DYNAMIC
/*
@@ -246,6 +281,19 @@ static struct netconsole_target *to_target(struct config_item *item)
struct netconsole_target, group);
}
+/* Do the list cleanup with the rtnl lock hold. rtnl lock is necessary because
+ * netdev might be cleaned-up by calling __netpoll_cleanup(),
+ */
+static void netconsole_process_cleanups(void)
+{
+ /* rtnl lock is called here, because it has precedence over
+ * target_cleanup_list_lock mutex and target_cleanup_list
+ */
+ rtnl_lock();
+ netconsole_process_cleanups_core();
+ rtnl_unlock();
+}
+
/* Get rid of possible trailing newline, returning the new length */
static void trim_newline(char *s, size_t maxlen)
{
@@ -336,14 +384,14 @@ static ssize_t enabled_store(struct config_item *item,
struct netconsole_target *nt = to_target(item);
unsigned long flags;
bool enabled;
- int err;
+ ssize_t ret;
mutex_lock(&dynamic_netconsole_mutex);
- err = kstrtobool(buf, &enabled);
- if (err)
+ ret = kstrtobool(buf, &enabled);
+ if (ret)
goto out_unlock;
- err = -EINVAL;
+ ret = -EINVAL;
if (enabled == nt->enabled) {
pr_info("network logging has already %s\n",
nt->enabled ? "started" : "stopped");
@@ -365,8 +413,8 @@ static ssize_t enabled_store(struct config_item *item,
*/
netpoll_print_options(&nt->np);
- err = netpoll_setup(&nt->np);
- if (err)
+ ret = netpoll_setup(&nt->np);
+ if (ret)
goto out_unlock;
nt->enabled = true;
@@ -376,17 +424,23 @@ static ssize_t enabled_store(struct config_item *item,
* otherwise we might end up in write_msg() with
* nt->np.dev == NULL and nt->enabled == true
*/
+ mutex_lock(&target_cleanup_list_lock);
spin_lock_irqsave(&target_list_lock, flags);
nt->enabled = false;
+ /* Remove the target from the list, while holding
+ * target_list_lock
+ */
+ list_move(&nt->list, &target_cleanup_list);
spin_unlock_irqrestore(&target_list_lock, flags);
- netpoll_cleanup(&nt->np);
+ mutex_unlock(&target_cleanup_list_lock);
}
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
+ /* Deferred cleanup */
+ netconsole_process_cleanups();
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return err;
+ return ret;
}
static ssize_t release_store(struct config_item *item, const char *buf,
@@ -394,27 +448,26 @@ static ssize_t release_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
bool release;
- int err;
+ ssize_t ret;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
- err = -EINVAL;
+ ret = -EINVAL;
goto out_unlock;
}
- err = kstrtobool(buf, &release);
- if (err)
+ ret = kstrtobool(buf, &release);
+ if (ret)
goto out_unlock;
nt->release = release;
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return err;
+ return ret;
}
static ssize_t extended_store(struct config_item *item, const char *buf,
@@ -422,27 +475,25 @@ static ssize_t extended_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
bool extended;
- int err;
+ ssize_t ret;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
- err = -EINVAL;
+ ret = -EINVAL;
goto out_unlock;
}
- err = kstrtobool(buf, &extended);
- if (err)
+ ret = kstrtobool(buf, &extended);
+ if (ret)
goto out_unlock;
nt->extended = extended;
-
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return err;
+ return ret;
}
static ssize_t dev_name_store(struct config_item *item, const char *buf,
@@ -469,7 +520,7 @@ static ssize_t local_port_store(struct config_item *item, const char *buf,
size_t count)
{
struct netconsole_target *nt = to_target(item);
- int rv = -EINVAL;
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -478,21 +529,20 @@ static ssize_t local_port_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- rv = kstrtou16(buf, 10, &nt->np.local_port);
- if (rv < 0)
+ ret = kstrtou16(buf, 10, &nt->np.local_port);
+ if (ret < 0)
goto out_unlock;
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return rv;
+ return ret;
}
static ssize_t remote_port_store(struct config_item *item,
const char *buf, size_t count)
{
struct netconsole_target *nt = to_target(item);
- int rv = -EINVAL;
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -501,20 +551,20 @@ static ssize_t remote_port_store(struct config_item *item,
goto out_unlock;
}
- rv = kstrtou16(buf, 10, &nt->np.remote_port);
- if (rv < 0)
+ ret = kstrtou16(buf, 10, &nt->np.remote_port);
+ if (ret < 0)
goto out_unlock;
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return rv;
+ return ret;
}
static ssize_t local_ip_store(struct config_item *item, const char *buf,
size_t count)
{
struct netconsole_target *nt = to_target(item);
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -541,17 +591,17 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return -EINVAL;
+ return ret;
}
static ssize_t remote_ip_store(struct config_item *item, const char *buf,
size_t count)
{
struct netconsole_target *nt = to_target(item);
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -578,11 +628,10 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return -EINVAL;
+ return ret;
}
static ssize_t remote_mac_store(struct config_item *item, const char *buf,
@@ -590,6 +639,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
u8 remote_mac[ETH_ALEN];
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -604,11 +654,10 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
goto out_unlock;
memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN);
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return -EINVAL;
+ return ret;
}
struct userdatum {
@@ -685,7 +734,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
struct userdatum *udm = to_userdatum(item);
struct netconsole_target *nt;
struct userdata *ud;
- int ret;
+ ssize_t ret;
if (count > MAX_USERDATA_VALUE_LENGTH)
return -EMSGSIZE;
@@ -700,9 +749,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
ud = to_userdata(item->ci_parent);
nt = userdata_to_target(ud);
update_userdata(nt);
-
- mutex_unlock(&dynamic_netconsole_mutex);
- return count;
+ ret = count;
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
return ret;
@@ -778,7 +825,7 @@ static struct configfs_group_operations userdata_ops = {
.drop_item = userdatum_drop,
};
-static struct config_item_type userdata_type = {
+static const struct config_item_type userdata_type = {
.ct_item_ops = &userdatum_ops,
.ct_group_ops = &userdata_ops,
.ct_attrs = userdata_attrs,
@@ -950,7 +997,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
unsigned long flags;
- struct netconsole_target *nt;
+ struct netconsole_target *nt, *tmp;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
bool stopped = false;
@@ -958,9 +1005,9 @@ static int netconsole_netdev_event(struct notifier_block *this,
event == NETDEV_RELEASE || event == NETDEV_JOIN))
goto done;
+ mutex_lock(&target_cleanup_list_lock);
spin_lock_irqsave(&target_list_lock, flags);
-restart:
- list_for_each_entry(nt, &target_list, list) {
+ list_for_each_entry_safe(nt, tmp, &target_list, list) {
netconsole_target_get(nt);
if (nt->np.dev == dev) {
switch (event) {
@@ -970,25 +1017,16 @@ restart:
case NETDEV_RELEASE:
case NETDEV_JOIN:
case NETDEV_UNREGISTER:
- /* rtnl_lock already held
- * we might sleep in __netpoll_cleanup()
- */
nt->enabled = false;
- spin_unlock_irqrestore(&target_list_lock, flags);
-
- __netpoll_cleanup(&nt->np);
-
- spin_lock_irqsave(&target_list_lock, flags);
- netdev_put(nt->np.dev, &nt->np.dev_tracker);
- nt->np.dev = NULL;
+ list_move(&nt->list, &target_cleanup_list);
stopped = true;
- netconsole_target_put(nt);
- goto restart;
}
}
netconsole_target_put(nt);
}
spin_unlock_irqrestore(&target_list_lock, flags);
+ mutex_unlock(&target_cleanup_list_lock);
+
if (stopped) {
const char *msg = "had an event";
@@ -1007,6 +1045,11 @@ restart:
dev->name, msg);
}
+ /* Process target_cleanup_list entries. By the end, target_cleanup_list
+ * should be empty
+ */
+ netconsole_process_cleanups_core();
+
done:
return NOTIFY_DONE;
}
@@ -1215,11 +1258,18 @@ static struct netconsole_target *alloc_param_target(char *target_config,
goto fail;
err = netpoll_setup(&nt->np);
- if (err)
- goto fail;
-
+ if (err) {
+ pr_err("Not enabling netconsole for %s%d. Netpoll setup failed\n",
+ NETCONSOLE_PARAM_TARGET_PREFIX, cmdline_count);
+ if (!IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC))
+ /* only fail if dynamic reconfiguration is set,
+ * otherwise, keep the target in the list, but disabled.
+ */
+ goto fail;
+ } else {
+ nt->enabled = true;
+ }
populate_configfs_item(nt, cmdline_count);
- nt->enabled = true;
return nt;
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 16789cd446e9..059269557d92 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -65,6 +65,7 @@ static struct netkit *netkit_priv(const struct net_device *dev)
static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct netkit *nk = netkit_priv(dev);
enum netkit_action ret = READ_ONCE(nk->policy);
netdev_tx_t ret_dev = NET_XMIT_SUCCESS;
@@ -72,6 +73,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *peer;
int len = skb->len;
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
rcu_read_lock();
peer = rcu_dereference(nk->peer);
if (unlikely(!peer || !(peer->flags & IFF_UP) ||
@@ -110,6 +112,7 @@ drop_stats:
break;
}
rcu_read_unlock();
+ bpf_net_ctx_clear(bpf_net_ctx);
return ret_dev;
}
@@ -255,11 +258,13 @@ static void netkit_setup(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->priv_flags |= IFF_PHONY_HEADROOM;
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ dev->lltx = true;
dev->ethtool_ops = &netkit_ethtool_ops;
dev->netdev_ops = &netkit_netdev_ops;
- dev->features |= netkit_features | NETIF_F_LLTX;
+ dev->features |= netkit_features;
dev->hw_features = netkit_features;
dev->hw_enc_features = netkit_features;
dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index e5a0987a263e..8bfd4ee5a8c4 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -63,13 +63,13 @@ static void nlmon_setup(struct net_device *dev)
{
dev->type = ARPHRD_NETLINK;
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->lltx = true;
dev->netdev_ops = &nlmon_ops;
dev->ethtool_ops = &nlmon_ethtool_ops;
dev->needs_free_netdev = true;
- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
- NETIF_F_HIGHDMA | NETIF_F_LLTX;
+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
dev->flags = IFF_NOARP;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 7fddc8306d82..01b235b3bb7e 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -44,6 +44,9 @@ config LED_TRIGGER_PHY
<Speed in megabits>Mbps OR <Speed in gigabits>Gbps OR link
for any speed known to the PHY.
+config OPEN_ALLIANCE_HELPERS
+ bool
+
config PHYLIB_LEDS
def_bool OF
depends on LEDS_CLASS=y || LEDS_CLASS=PHYLIB
@@ -109,6 +112,13 @@ config ADIN1100_PHY
Currently supports the:
- ADIN1100 - Robust,Industrial, Low Power 10BASE-T1L Ethernet PHY
+config AMCC_QT2025_PHY
+ tristate "AMCC QT2025 PHY"
+ depends on RUST_PHYLIB_ABSTRACTIONS
+ depends on RUST_FW_LOADER_ABSTRACTIONS
+ help
+ Adds support for the Applied Micro Circuits Corporation QT2025 PHY.
+
source "drivers/net/phy/aquantia/Kconfig"
config AX88796B_PHY
@@ -414,6 +424,7 @@ config DP83TD510_PHY
config DP83TG720_PHY
tristate "Texas Instruments DP83TG720 Ethernet 1000Base-T1 PHY"
+ select OPEN_ALLIANCE_HELPERS
help
The DP83TG720S-Q1 is an automotive Ethernet physical layer
transceiver compliant with IEEE 802.3bp and Open Alliance
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 202ed7f450da..90f886844381 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -2,7 +2,7 @@
# Makefile for Linux PHY drivers
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
- linkmode.o
+ linkmode.o phy_link_topology.o
mdio-bus-y += mdio_bus.o mdio_device.o
ifdef CONFIG_MDIO_DEVICE
@@ -22,6 +22,7 @@ endif
obj-$(CONFIG_MDIO_DEVRES) += mdio_devres.o
libphy-$(CONFIG_SWPHY) += swphy.o
libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
+libphy-$(CONFIG_OPEN_ALLIANCE_HELPERS) += open_alliance_helpers.o
obj-$(CONFIG_PHYLINK) += phylink.o
obj-$(CONFIG_PHYLIB) += libphy.o
@@ -36,6 +37,7 @@ obj-$(CONFIG_ADIN_PHY) += adin.o
obj-$(CONFIG_ADIN1100_PHY) += adin1100.o
obj-$(CONFIG_AIR_EN8811H_PHY) += air_en8811h.o
obj-$(CONFIG_AMD_PHY) += amd.o
+obj-$(CONFIG_AMCC_QT2025_PHY) += qt2025.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia/
ifdef CONFIG_AX88796B_RUST_PHY
obj-$(CONFIG_AX88796B_PHY) += ax88796b_rust.o
diff --git a/drivers/net/phy/ax88796b_rust.rs b/drivers/net/phy/ax88796b_rust.rs
index 5c92572962dc..8c7eb009d9fc 100644
--- a/drivers/net/phy/ax88796b_rust.rs
+++ b/drivers/net/phy/ax88796b_rust.rs
@@ -6,7 +6,7 @@
//! C version of this driver: [`drivers/net/phy/ax88796b.c`](./ax88796b.c)
use kernel::{
c_str,
- net::phy::{self, DeviceId, Driver},
+ net::phy::{self, reg::C22, DeviceId, Driver},
prelude::*,
uapi,
};
@@ -24,7 +24,6 @@ kernel::module_phy_driver! {
license: "GPL",
}
-const MII_BMCR: u16 = uapi::MII_BMCR as u16;
const BMCR_SPEED100: u16 = uapi::BMCR_SPEED100 as u16;
const BMCR_FULLDPLX: u16 = uapi::BMCR_FULLDPLX as u16;
@@ -33,7 +32,7 @@ const BMCR_FULLDPLX: u16 = uapi::BMCR_FULLDPLX as u16;
// Toggle BMCR_RESET bit off to accommodate broken AX8796B PHY implementation
// such as used on the Individual Computers' X-Surf 100 Zorro card.
fn asix_soft_reset(dev: &mut phy::Device) -> Result {
- dev.write(uapi::MII_BMCR as u16, 0)?;
+ dev.write(C22::BMCR, 0)?;
dev.genphy_soft_reset()
}
@@ -55,7 +54,7 @@ impl Driver for PhyAX88772A {
}
// If MII_LPA is 0, phy_resolve_aneg_linkmode() will fail to resolve
// linkmode so use MII_BMCR as default values.
- let ret = dev.read(MII_BMCR)?;
+ let ret = dev.read(C22::BMCR)?;
if ret & BMCR_SPEED100 != 0 {
dev.set_speed(uapi::SPEED_100);
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index efeb643c1373..fc247f479257 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -271,8 +271,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
- /* Private data pointer is NULL on DP83825 */
- if (!dp83822 || !dp83822->fx_enabled)
+ if (!dp83822->fx_enabled)
misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
DP83822_DUP_MODE_CHANGE_INT_EN |
DP83822_SPEED_CHANGED_INT_EN;
@@ -292,8 +291,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_PAGE_RX_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
- /* Private data pointer is NULL on DP83825 */
- if (!dp83822 || !dp83822->fx_enabled)
+ if (!dp83822->fx_enabled)
misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
@@ -691,10 +689,9 @@ static int dp83822_read_straps(struct phy_device *phydev)
return 0;
}
-static int dp83822_probe(struct phy_device *phydev)
+static int dp8382x_probe(struct phy_device *phydev)
{
struct dp83822_private *dp83822;
- int ret;
dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822),
GFP_KERNEL);
@@ -703,6 +700,20 @@ static int dp83822_probe(struct phy_device *phydev)
phydev->priv = dp83822;
+ return 0;
+}
+
+static int dp83822_probe(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822;
+ int ret;
+
+ ret = dp8382x_probe(phydev);
+ if (ret)
+ return ret;
+
+ dp83822 = phydev->priv;
+
ret = dp83822_read_straps(phydev);
if (ret)
return ret;
@@ -717,14 +728,11 @@ static int dp83822_probe(struct phy_device *phydev)
static int dp83826_probe(struct phy_device *phydev)
{
- struct dp83822_private *dp83822;
-
- dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822),
- GFP_KERNEL);
- if (!dp83822)
- return -ENOMEM;
+ int ret;
- phydev->priv = dp83822;
+ ret = dp8382x_probe(phydev);
+ if (ret)
+ return ret;
dp83826_of_init(phydev);
@@ -795,6 +803,7 @@ static int dp83822_resume(struct phy_device *phydev)
PHY_ID_MATCH_MODEL(_id), \
.name = (_name), \
/* PHY_BASIC_FEATURES */ \
+ .probe = dp8382x_probe, \
.soft_reset = dp83822_phy_reset, \
.config_init = dp8382x_config_init, \
.get_wol = dp83822_get_wol, \
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
index 551e37786c2d..92aa3a2b9744 100644
--- a/drivers/net/phy/dp83td510.c
+++ b/drivers/net/phy/dp83td510.c
@@ -58,6 +58,10 @@ static const u16 dp83td510_mse_sqi_map[] = {
0x0000 /* 24dB =< SNR */
};
+struct dp83td510_priv {
+ bool alcd_test_active;
+};
+
/* Time Domain Reflectometry (TDR) Functionality of DP83TD510 PHY
*
* I assume that this PHY is using a variation of Spread Spectrum Time Domain
@@ -169,6 +173,10 @@ static const u16 dp83td510_mse_sqi_map[] = {
#define DP83TD510E_UNKN_030E 0x30e
#define DP83TD510E_030E_VAL 0x2520
+#define DP83TD510E_ALCD_STAT 0xa9f
+#define DP83TD510E_ALCD_COMPLETE BIT(15)
+#define DP83TD510E_ALCD_CABLE_LENGTH GENMASK(10, 0)
+
static int dp83td510_config_intr(struct phy_device *phydev)
{
int ret;
@@ -325,8 +333,23 @@ static int dp83td510_get_sqi_max(struct phy_device *phydev)
*/
static int dp83td510_cable_test_start(struct phy_device *phydev)
{
+ struct dp83td510_priv *priv = phydev->priv;
int ret;
+ /* If link partner is active, we won't be able to use TDR, since
+ * we can't force link partner to be silent. The autonegotiation
+ * pulses will be too frequent and the TDR sequence will be
+ * too long. So, TDR will always fail. Since the link is established
+ * we already know that the cable is working, so we can get some
+ * extra information line the cable length using ALCD.
+ */
+ if (phydev->link) {
+ priv->alcd_test_active = true;
+ return 0;
+ }
+
+ priv->alcd_test_active = false;
+
ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_CTRL,
DP83TD510E_CTRL_HW_RESET);
if (ret)
@@ -402,8 +425,8 @@ static int dp83td510_cable_test_start(struct phy_device *phydev)
}
/**
- * dp83td510_cable_test_get_status - Get the status of the cable test for the
- * DP83TD510 PHY.
+ * dp83td510_cable_test_get_tdr_status - Get the status of the TDR test for the
+ * DP83TD510 PHY.
* @phydev: Pointer to the phy_device structure.
* @finished: Pointer to a boolean that indicates whether the test is finished.
*
@@ -411,13 +434,11 @@ static int dp83td510_cable_test_start(struct phy_device *phydev)
*
* Returns: 0 on success or a negative error code on failure.
*/
-static int dp83td510_cable_test_get_status(struct phy_device *phydev,
- bool *finished)
+static int dp83td510_cable_test_get_tdr_status(struct phy_device *phydev,
+ bool *finished)
{
int ret, stat;
- *finished = false;
-
ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG);
if (ret < 0)
return ret;
@@ -459,6 +480,77 @@ static int dp83td510_cable_test_get_status(struct phy_device *phydev,
return phy_init_hw(phydev);
}
+/**
+ * dp83td510_cable_test_get_alcd_status - Get the status of the ALCD test for the
+ * DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ * @finished: Pointer to a boolean that indicates whether the test is finished.
+ *
+ * The function sets the @finished flag to true if the test is complete.
+ * The function reads the cable length and reports it to the user.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83td510_cable_test_get_alcd_status(struct phy_device *phydev,
+ bool *finished)
+{
+ unsigned int location;
+ int ret, phy_sts;
+
+ phy_sts = phy_read(phydev, DP83TD510E_PHY_STS);
+
+ if (!(phy_sts & DP83TD510E_LINK_STATUS)) {
+ /* If the link is down, we can't do any thing usable now */
+ ethnl_cable_test_result_with_src(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC,
+ ETHTOOL_A_CABLE_INF_SRC_ALCD);
+ *finished = true;
+ return 0;
+ }
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_ALCD_STAT);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & DP83TD510E_ALCD_COMPLETE))
+ return 0;
+
+ location = FIELD_GET(DP83TD510E_ALCD_CABLE_LENGTH, ret) * 100;
+
+ ethnl_cable_test_fault_length_with_src(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ location,
+ ETHTOOL_A_CABLE_INF_SRC_ALCD);
+
+ ethnl_cable_test_result_with_src(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_OK,
+ ETHTOOL_A_CABLE_INF_SRC_ALCD);
+ *finished = true;
+
+ return 0;
+}
+
+/**
+ * dp83td510_cable_test_get_status - Get the status of the cable test for the
+ * DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ * @finished: Pointer to a boolean that indicates whether the test is finished.
+ *
+ * The function sets the @finished flag to true if the test is complete.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83td510_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ struct dp83td510_priv *priv = phydev->priv;
+ *finished = false;
+
+ if (priv->alcd_test_active)
+ return dp83td510_cable_test_get_alcd_status(phydev, finished);
+
+ return dp83td510_cable_test_get_tdr_status(phydev, finished);
+}
+
static int dp83td510_get_features(struct phy_device *phydev)
{
/* This PHY can't respond on MDIO bus if no RMII clock is enabled.
@@ -477,12 +569,27 @@ static int dp83td510_get_features(struct phy_device *phydev)
return 0;
}
+static int dp83td510_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct dp83td510_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static struct phy_driver dp83td510_driver[] = {
{
PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID),
.name = "TI DP83TD510E",
.flags = PHY_POLL_CABLE_TEST,
+ .probe = dp83td510_probe,
.config_aneg = dp83td510_config_aneg,
.read_status = dp83td510_read_status,
.get_features = dp83td510_get_features,
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index c706429b225a..0ef4d7dba065 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -3,10 +3,13 @@
* Copyright (c) 2023 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
*/
#include <linux/bitfield.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
+#include "open_alliance_helpers.h"
+
#define DP83TG720S_PHY_ID 0x2000a284
/* MDIO_MMD_VEND2 registers */
@@ -14,6 +17,17 @@
#define DP83TG720S_STS_MII_INT BIT(7)
#define DP83TG720S_LINK_STATUS BIT(0)
+/* TDR Configuration Register (0x1E) */
+#define DP83TG720S_TDR_CFG 0x1e
+/* 1b = TDR start, 0b = No TDR */
+#define DP83TG720S_TDR_START BIT(15)
+/* 1b = TDR auto on link down, 0b = Manual TDR start */
+#define DP83TG720S_CFG_TDR_AUTO_RUN BIT(14)
+/* 1b = TDR done, 0b = TDR in progress */
+#define DP83TG720S_TDR_DONE BIT(1)
+/* 1b = TDR fail, 0b = TDR success */
+#define DP83TG720S_TDR_FAIL BIT(0)
+
#define DP83TG720S_PHY_RESET 0x1f
#define DP83TG720S_HW_RESET BIT(15)
@@ -22,18 +36,155 @@
/* Power Mode 0 is Normal mode */
#define DP83TG720S_LPS_CFG3_PWR_MODE_0 BIT(0)
+/* Open Aliance 1000BaseT1 compatible HDD.TDR Fault Status Register */
+#define DP83TG720S_TDR_FAULT_STATUS 0x30f
+
+/* Register 0x0301: TDR Configuration 2 */
+#define DP83TG720S_TDR_CFG2 0x301
+
+/* Register 0x0303: TDR Configuration 3 */
+#define DP83TG720S_TDR_CFG3 0x303
+
+/* Register 0x0304: TDR Configuration 4 */
+#define DP83TG720S_TDR_CFG4 0x304
+
+/* Register 0x0405: Unknown Register */
+#define DP83TG720S_UNKNOWN_0405 0x405
+
+/* Register 0x0576: TDR Master Link Down Control */
+#define DP83TG720S_TDR_MASTER_LINK_DOWN 0x576
+
#define DP83TG720S_RGMII_DELAY_CTRL 0x602
/* In RGMII mode, Enable or disable the internal delay for RXD */
#define DP83TG720S_RGMII_RX_CLK_SEL BIT(1)
/* In RGMII mode, Enable or disable the internal delay for TXD */
#define DP83TG720S_RGMII_TX_CLK_SEL BIT(0)
+/* Register 0x083F: Unknown Register */
+#define DP83TG720S_UNKNOWN_083F 0x83f
+
#define DP83TG720S_SQI_REG_1 0x871
#define DP83TG720S_SQI_OUT_WORST GENMASK(7, 5)
#define DP83TG720S_SQI_OUT GENMASK(3, 1)
#define DP83TG720_SQI_MAX 7
+/**
+ * dp83tg720_cable_test_start - Start the cable test for the DP83TG720 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * This sequence is based on the documented procedure for the DP83TG720 PHY.
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ */
+static int dp83tg720_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Initialize the PHY to run the TDR test as described in the
+ * "DP83TG720S-Q1: Configuring for Open Alliance Specification
+ * Compliance (Rev. B)" application note.
+ * Most of the registers are not documented. Some of register names
+ * are guessed by comparing the register offsets with the DP83TD510E.
+ */
+
+ /* Force master link down */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TG720S_TDR_MASTER_LINK_DOWN, 0x0400);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG2,
+ 0xa008);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG3,
+ 0x0928);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG4,
+ 0x0004);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_UNKNOWN_0405,
+ 0x6400);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_UNKNOWN_083F,
+ 0x3003);
+ if (ret)
+ return ret;
+
+ /* Start the TDR */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG,
+ DP83TG720S_TDR_START);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * dp83tg720_cable_test_get_status - Get the status of the cable test for the
+ * DP83TG720 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ * @finished: Pointer to a boolean that indicates whether the test is finished.
+ *
+ * The function sets the @finished flag to true if the test is complete.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83tg720_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret, stat;
+
+ *finished = false;
+
+ /* Read the TDR status */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG);
+ if (ret < 0)
+ return ret;
+
+ /* Check if the TDR test is done */
+ if (!(ret & DP83TG720S_TDR_DONE))
+ return 0;
+
+ /* Check for TDR test failure */
+ if (!(ret & DP83TG720S_TDR_FAIL)) {
+ int location;
+
+ /* Read fault status */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TG720S_TDR_FAULT_STATUS);
+ if (ret < 0)
+ return ret;
+
+ /* Get fault type */
+ stat = oa_1000bt1_get_ethtool_cable_result_code(ret);
+
+ /* Determine fault location */
+ location = oa_1000bt1_get_tdr_distance(ret);
+ if (location > 0)
+ ethnl_cable_test_fault_length(phydev,
+ ETHTOOL_A_CABLE_PAIR_A,
+ location);
+ } else {
+ /* Active link partner or other issues */
+ stat = ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+
+ *finished = true;
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, stat);
+
+ return phy_init_hw(phydev);
+}
+
static int dp83tg720_config_aneg(struct phy_device *phydev)
{
int ret;
@@ -195,12 +346,15 @@ static struct phy_driver dp83tg720_driver[] = {
PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID),
.name = "TI DP83TG720S",
+ .flags = PHY_POLL_CABLE_TEST,
.config_aneg = dp83tg720_config_aneg,
.read_status = dp83tg720_read_status,
.get_features = genphy_c45_pma_read_ext_abilities,
.config_init = dp83tg720_config_init,
.get_sqi = dp83tg720_get_sqi,
.get_sqi_max = dp83tg720_get_sqi_max,
+ .cable_test_start = dp83tg720_cable_test_start,
+ .cable_test_get_status = dp83tg720_cable_test_get_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index b88398e6872b..0b777cdd7078 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -553,6 +553,8 @@ static const struct sfp_upstream_ops sfp_phy_ops = {
.link_down = mv2222_sfp_link_down,
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
};
static int mv2222_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b89fbffa6a93..9964bf3dea2f 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -3613,6 +3613,8 @@ static const struct sfp_upstream_ops m88e1510_sfp_ops = {
.module_remove = m88e1510_sfp_remove,
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
};
static int m88e1510_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index ad43e280930c..6642eb642d4b 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -503,6 +503,8 @@ static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
static const struct sfp_upstream_ops mv3310_sfp_ops = {
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
.module_insert = mv3310_sfp_insert,
};
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index a35528497a57..a5ef8fe50704 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -12,6 +12,7 @@
#define PHY_ID_LAN87XX 0x0007c150
#define PHY_ID_LAN937X 0x0007c180
+#define PHY_ID_LAN887X 0x0007c1f0
/* External Register Control Register */
#define LAN87XX_EXT_REG_CTL (0x14)
@@ -94,8 +95,155 @@
/* SQI defines */
#define LAN87XX_MAX_SQI 0x07
+/* Chiptop registers */
+#define LAN887X_PMA_EXT_ABILITY_2 0x12
+#define LAN887X_PMA_EXT_ABILITY_2_1000T1 BIT(1)
+#define LAN887X_PMA_EXT_ABILITY_2_100T1 BIT(0)
+
+/* DSP 100M registers */
+#define LAN887x_CDR_CONFIG1_100 0x0405
+#define LAN887x_LOCK1_EQLSR_CONFIG_100 0x0411
+#define LAN887x_SLV_HD_MUFAC_CONFIG_100 0x0417
+#define LAN887x_PLOCK_MUFAC_CONFIG_100 0x041c
+#define LAN887x_PROT_DISABLE_100 0x0425
+#define LAN887x_KF_LOOP_SAT_CONFIG_100 0x0454
+
+/* DSP 1000M registers */
+#define LAN887X_LOCK1_EQLSR_CONFIG 0x0811
+#define LAN887X_LOCK3_EQLSR_CONFIG 0x0813
+#define LAN887X_PROT_DISABLE 0x0825
+#define LAN887X_FFE_GAIN6 0x0843
+#define LAN887X_FFE_GAIN7 0x0844
+#define LAN887X_FFE_GAIN8 0x0845
+#define LAN887X_FFE_GAIN9 0x0846
+#define LAN887X_ECHO_DELAY_CONFIG 0x08ec
+#define LAN887X_FFE_MAX_CONFIG 0x08ee
+
+/* PCS 1000M registers */
+#define LAN887X_SCR_CONFIG_3 0x8043
+#define LAN887X_INFO_FLD_CONFIG_5 0x8048
+
+/* T1 afe registers */
+#define LAN887X_ZQCAL_CONTROL_1 0x8080
+#define LAN887X_AFE_PORT_TESTBUS_CTRL2 0x8089
+#define LAN887X_AFE_PORT_TESTBUS_CTRL4 0x808b
+#define LAN887X_AFE_PORT_TESTBUS_CTRL6 0x808d
+#define LAN887X_TX_AMPLT_1000T1_REG 0x80b0
+#define LAN887X_INIT_COEFF_DFE1_100 0x0422
+
+/* PMA registers */
+#define LAN887X_DSP_PMA_CONTROL 0x810e
+#define LAN887X_DSP_PMA_CONTROL_LNK_SYNC BIT(4)
+
+/* PCS 100M registers */
+#define LAN887X_IDLE_ERR_TIMER_WIN 0x8204
+#define LAN887X_IDLE_ERR_CNT_THRESH 0x8213
+
+/* Misc registers */
+#define LAN887X_REG_REG26 0x001a
+#define LAN887X_REG_REG26_HW_INIT_SEQ_EN BIT(8)
+
+/* Mis registers */
+#define LAN887X_MIS_CFG_REG0 0xa00
+#define LAN887X_MIS_CFG_REG0_RCLKOUT_DIS BIT(5)
+#define LAN887X_MIS_CFG_REG0_MAC_MODE_SEL GENMASK(1, 0)
+
+#define LAN887X_MAC_MODE_RGMII 0x01
+#define LAN887X_MAC_MODE_SGMII 0x03
+
+#define LAN887X_MIS_DLL_CFG_REG0 0xa01
+#define LAN887X_MIS_DLL_CFG_REG1 0xa02
+
+#define LAN887X_MIS_DLL_DELAY_EN BIT(15)
+#define LAN887X_MIS_DLL_EN BIT(0)
+#define LAN887X_MIS_DLL_CONF (LAN887X_MIS_DLL_DELAY_EN |\
+ LAN887X_MIS_DLL_EN)
+
+#define LAN887X_MIS_CFG_REG2 0xa03
+#define LAN887X_MIS_CFG_REG2_FE_LPBK_EN BIT(2)
+
+#define LAN887X_MIS_PKT_STAT_REG0 0xa06
+#define LAN887X_MIS_PKT_STAT_REG1 0xa07
+#define LAN887X_MIS_PKT_STAT_REG3 0xa09
+#define LAN887X_MIS_PKT_STAT_REG4 0xa0a
+#define LAN887X_MIS_PKT_STAT_REG5 0xa0b
+#define LAN887X_MIS_PKT_STAT_REG6 0xa0c
+
+/* Chiptop common registers */
+#define LAN887X_COMMON_LED3_LED2 0xc05
+#define LAN887X_COMMON_LED2_MODE_SEL_MASK GENMASK(4, 0)
+#define LAN887X_LED_LINK_ACT_ANY_SPEED 0x0
+
+/* MX chip top registers */
+#define LAN887X_CHIP_HARD_RST 0xf03e
+#define LAN887X_CHIP_HARD_RST_RESET BIT(0)
+
+#define LAN887X_CHIP_SOFT_RST 0xf03f
+#define LAN887X_CHIP_SOFT_RST_RESET BIT(0)
+
+#define LAN887X_SGMII_CTL 0xf01a
+#define LAN887X_SGMII_CTL_SGMII_MUX_EN BIT(0)
+
+#define LAN887X_SGMII_PCS_CFG 0xf034
+#define LAN887X_SGMII_PCS_CFG_PCS_ENA BIT(9)
+
+#define LAN887X_EFUSE_READ_DAT9 0xf209
+#define LAN887X_EFUSE_READ_DAT9_SGMII_DIS BIT(9)
+#define LAN887X_EFUSE_READ_DAT9_MAC_MODE GENMASK(1, 0)
+
+#define LAN887X_CALIB_CONFIG_100 0x437
+#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_USE_LOCAL_SMPL BIT(5)
+#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_STB_SYNC_MODE BIT(4)
+#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_CLK_ALGN_MODE BIT(3)
+#define LAN887X_CALIB_CONFIG_100_VAL \
+ (LAN887X_CALIB_CONFIG_100_CBL_DIAG_CLK_ALGN_MODE |\
+ LAN887X_CALIB_CONFIG_100_CBL_DIAG_STB_SYNC_MODE |\
+ LAN887X_CALIB_CONFIG_100_CBL_DIAG_USE_LOCAL_SMPL)
+
+#define LAN887X_MAX_PGA_GAIN_100 0x44f
+#define LAN887X_MIN_PGA_GAIN_100 0x450
+#define LAN887X_START_CBL_DIAG_100 0x45a
+#define LAN887X_CBL_DIAG_DONE BIT(1)
+#define LAN887X_CBL_DIAG_START BIT(0)
+#define LAN887X_CBL_DIAG_STOP 0x0
+
+#define LAN887X_CBL_DIAG_TDR_THRESH_100 0x45b
+#define LAN887X_CBL_DIAG_AGC_THRESH_100 0x45c
+#define LAN887X_CBL_DIAG_MIN_WAIT_CONFIG_100 0x45d
+#define LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100 0x45e
+#define LAN887X_CBL_DIAG_CYC_CONFIG_100 0x45f
+#define LAN887X_CBL_DIAG_TX_PULSE_CONFIG_100 0x460
+#define LAN887X_CBL_DIAG_MIN_PGA_GAIN_100 0x462
+#define LAN887X_CBL_DIAG_AGC_GAIN_100 0x497
+#define LAN887X_CBL_DIAG_POS_PEAK_VALUE_100 0x499
+#define LAN887X_CBL_DIAG_NEG_PEAK_VALUE_100 0x49a
+#define LAN887X_CBL_DIAG_POS_PEAK_TIME_100 0x49c
+#define LAN887X_CBL_DIAG_NEG_PEAK_TIME_100 0x49d
+
+#define MICROCHIP_CABLE_NOISE_MARGIN 20
+#define MICROCHIP_CABLE_TIME_MARGIN 89
+#define MICROCHIP_CABLE_MIN_TIME_DIFF 96
+#define MICROCHIP_CABLE_MAX_TIME_DIFF \
+ (MICROCHIP_CABLE_MIN_TIME_DIFF + MICROCHIP_CABLE_TIME_MARGIN)
+
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
-#define DRIVER_DESC "Microchip LAN87XX/LAN937x T1 PHY driver"
+#define DRIVER_DESC "Microchip LAN87XX/LAN937x/LAN887x T1 PHY driver"
+
+/* TEST_MODE_NORMAL: Non-hybrid results to calculate cable status(open/short/ok)
+ * TEST_MODE_HYBRID: Hybrid results to calculate distance to fault
+ */
+enum cable_diag_mode {
+ TEST_MODE_NORMAL,
+ TEST_MODE_HYBRID
+};
+
+/* CD_TEST_INIT: Cable test is initated
+ * CD_TEST_DONE: Cable test is done
+ */
+enum cable_diag_state {
+ CD_TEST_INIT,
+ CD_TEST_DONE
+};
struct access_ereg_val {
u8 mode;
@@ -105,6 +253,32 @@ struct access_ereg_val {
u16 mask;
};
+struct lan887x_hw_stat {
+ const char *string;
+ u8 mmd;
+ u16 reg;
+ u8 bits;
+};
+
+static const struct lan887x_hw_stat lan887x_hw_stats[] = {
+ { "TX Good Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG0, 14},
+ { "RX Good Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG1, 14},
+ { "RX ERR Count detected by PCS", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG3, 16},
+ { "TX CRC ERR Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG4, 8},
+ { "RX CRC ERR Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG5, 8},
+ { "RX ERR Count for SGMII MII2GMII", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG6, 8},
+};
+
+struct lan887x_regwr_map {
+ u8 mmd;
+ u16 reg;
+ u16 val;
+};
+
+struct lan887x_priv {
+ u64 stats[ARRAY_SIZE(lan887x_hw_stats)];
+};
+
static int lan937x_dsp_workaround(struct phy_device *phydev, u16 ereg, u8 bank)
{
u8 prev_bank;
@@ -860,6 +1034,802 @@ static int lan87xx_get_sqi_max(struct phy_device *phydev)
return LAN87XX_MAX_SQI;
}
+static int lan887x_rgmii_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* SGMII mux disable */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_SGMII_CTL,
+ LAN887X_SGMII_CTL_SGMII_MUX_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Select MAC_MODE as RGMII */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_CFG_REG0,
+ LAN887X_MIS_CFG_REG0_MAC_MODE_SEL,
+ LAN887X_MAC_MODE_RGMII);
+ if (ret < 0)
+ return ret;
+
+ /* Disable PCS */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_SGMII_PCS_CFG,
+ LAN887X_SGMII_PCS_CFG_PCS_ENA);
+ if (ret < 0)
+ return ret;
+
+ /* LAN887x Errata: RGMII rx clock active in SGMII mode
+ * Disabled it for SGMII mode
+ * Re-enabling it for RGMII mode
+ */
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_MIS_CFG_REG0,
+ LAN887X_MIS_CFG_REG0_RCLKOUT_DIS);
+}
+
+static int lan887x_sgmii_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* SGMII mux enable */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_SGMII_CTL,
+ LAN887X_SGMII_CTL_SGMII_MUX_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Select MAC_MODE as SGMII */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_CFG_REG0,
+ LAN887X_MIS_CFG_REG0_MAC_MODE_SEL,
+ LAN887X_MAC_MODE_SGMII);
+ if (ret < 0)
+ return ret;
+
+ /* LAN887x Errata: RGMII rx clock active in SGMII mode.
+ * So disabling it for SGMII mode
+ */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_CFG_REG0,
+ LAN887X_MIS_CFG_REG0_RCLKOUT_DIS);
+ if (ret < 0)
+ return ret;
+
+ /* Enable PCS */
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_SGMII_PCS_CFG,
+ LAN887X_SGMII_PCS_CFG_PCS_ENA);
+}
+
+static int lan887x_config_rgmii_en(struct phy_device *phydev)
+{
+ int txc;
+ int rxc;
+ int ret;
+
+ ret = lan887x_rgmii_init(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Control bit to enable/disable TX DLL delay line in signal path */
+ txc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG0);
+ if (txc < 0)
+ return txc;
+
+ /* Control bit to enable/disable RX DLL delay line in signal path */
+ rxc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG1);
+ if (rxc < 0)
+ return rxc;
+
+ /* Configures the phy to enable RX/TX delay
+ * RGMII - TX & RX delays are either added by MAC or not needed,
+ * phy should not add
+ * RGMII_ID - Configures phy to enable TX & RX delays, MAC shouldn't add
+ * RGMII_RX_ID - Configures the PHY to enable the RX delay.
+ * The MAC shouldn't add the RX delay
+ * RGMII_TX_ID - Configures the PHY to enable the TX delay.
+ * The MAC shouldn't add the TX delay in this case
+ */
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ txc &= ~LAN887X_MIS_DLL_CONF;
+ rxc &= ~LAN887X_MIS_DLL_CONF;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ txc |= LAN887X_MIS_DLL_CONF;
+ rxc |= LAN887X_MIS_DLL_CONF;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ txc &= ~LAN887X_MIS_DLL_CONF;
+ rxc |= LAN887X_MIS_DLL_CONF;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ txc |= LAN887X_MIS_DLL_CONF;
+ rxc &= ~LAN887X_MIS_DLL_CONF;
+ break;
+ default:
+ WARN_ONCE(1, "Invalid phydev interface %d\n", phydev->interface);
+ return 0;
+ }
+
+ /* Configures the PHY to enable/disable RX delay in signal path */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG1,
+ LAN887X_MIS_DLL_CONF, rxc);
+ if (ret < 0)
+ return ret;
+
+ /* Configures the PHY to enable/disable the TX delay in signal path */
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG0,
+ LAN887X_MIS_DLL_CONF, txc);
+}
+
+static int lan887x_config_phy_interface(struct phy_device *phydev)
+{
+ int interface_mode;
+ int sgmii_dis;
+ int ret;
+
+ /* Read sku efuse data for interfaces supported by sku */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_EFUSE_READ_DAT9);
+ if (ret < 0)
+ return ret;
+
+ /* If interface_mode is 1 then efuse sets RGMII operations.
+ * If interface mode is 3 then efuse sets SGMII operations.
+ */
+ interface_mode = ret & LAN887X_EFUSE_READ_DAT9_MAC_MODE;
+ /* SGMII disable is set for RGMII operations */
+ sgmii_dis = ret & LAN887X_EFUSE_READ_DAT9_SGMII_DIS;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* Reject RGMII settings for SGMII only sku */
+ ret = -EOPNOTSUPP;
+
+ if (!((interface_mode & LAN887X_MAC_MODE_SGMII) ==
+ LAN887X_MAC_MODE_SGMII))
+ ret = lan887x_config_rgmii_en(phydev);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ /* Reject SGMII setting for RGMII only sku */
+ ret = -EOPNOTSUPP;
+
+ if (!sgmii_dis)
+ ret = lan887x_sgmii_init(phydev);
+ break;
+ default:
+ /* Reject setting for unsupported interfaces */
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int lan887x_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Enable twisted pair */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
+
+ /* First patch only supports 100Mbps and 1000Mbps force-mode.
+ * T1 Auto-Negotiation (Clause 98 of IEEE 802.3) will be added later.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+
+ return 0;
+}
+
+static int lan887x_phy_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Clear loopback */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_MIS_CFG_REG2,
+ LAN887X_MIS_CFG_REG2_FE_LPBK_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Configure default behavior of led to link and activity for any
+ * speed
+ */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_COMMON_LED3_LED2,
+ LAN887X_COMMON_LED2_MODE_SEL_MASK,
+ LAN887X_LED_LINK_ACT_ANY_SPEED);
+ if (ret < 0)
+ return ret;
+
+ /* PHY interface setup */
+ return lan887x_config_phy_interface(phydev);
+}
+
+static int lan887x_phy_config(struct phy_device *phydev,
+ const struct lan887x_regwr_map *reg_map, int cnt)
+{
+ int ret;
+
+ for (int i = 0; i < cnt; i++) {
+ ret = phy_write_mmd(phydev, reg_map[i].mmd,
+ reg_map[i].reg, reg_map[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan887x_phy_setup(struct phy_device *phydev)
+{
+ static const struct lan887x_regwr_map phy_cfg[] = {
+ /* PORT_AFE writes */
+ {MDIO_MMD_PMAPMD, LAN887X_ZQCAL_CONTROL_1, 0x4008},
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL2, 0x0000},
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL6, 0x0040},
+ /* 100T1_PCS_VENDOR writes */
+ {MDIO_MMD_PCS, LAN887X_IDLE_ERR_CNT_THRESH, 0x0008},
+ {MDIO_MMD_PCS, LAN887X_IDLE_ERR_TIMER_WIN, 0x800d},
+ /* 100T1 DSP writes */
+ {MDIO_MMD_VEND1, LAN887x_CDR_CONFIG1_100, 0x0ab1},
+ {MDIO_MMD_VEND1, LAN887x_LOCK1_EQLSR_CONFIG_100, 0x5274},
+ {MDIO_MMD_VEND1, LAN887x_SLV_HD_MUFAC_CONFIG_100, 0x0d74},
+ {MDIO_MMD_VEND1, LAN887x_PLOCK_MUFAC_CONFIG_100, 0x0aea},
+ {MDIO_MMD_VEND1, LAN887x_PROT_DISABLE_100, 0x0360},
+ {MDIO_MMD_VEND1, LAN887x_KF_LOOP_SAT_CONFIG_100, 0x0c30},
+ /* 1000T1 DSP writes */
+ {MDIO_MMD_VEND1, LAN887X_LOCK1_EQLSR_CONFIG, 0x2a78},
+ {MDIO_MMD_VEND1, LAN887X_LOCK3_EQLSR_CONFIG, 0x1368},
+ {MDIO_MMD_VEND1, LAN887X_PROT_DISABLE, 0x1354},
+ {MDIO_MMD_VEND1, LAN887X_FFE_GAIN6, 0x3C84},
+ {MDIO_MMD_VEND1, LAN887X_FFE_GAIN7, 0x3ca5},
+ {MDIO_MMD_VEND1, LAN887X_FFE_GAIN8, 0x3ca5},
+ {MDIO_MMD_VEND1, LAN887X_FFE_GAIN9, 0x3ca5},
+ {MDIO_MMD_VEND1, LAN887X_ECHO_DELAY_CONFIG, 0x0024},
+ {MDIO_MMD_VEND1, LAN887X_FFE_MAX_CONFIG, 0x227f},
+ /* 1000T1 PCS writes */
+ {MDIO_MMD_PCS, LAN887X_SCR_CONFIG_3, 0x1e00},
+ {MDIO_MMD_PCS, LAN887X_INFO_FLD_CONFIG_5, 0x0fa1},
+ };
+
+ return lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg));
+}
+
+static int lan887x_100M_setup(struct phy_device *phydev)
+{
+ int ret;
+
+ /* (Re)configure the speed/mode dependent T1 settings */
+ if (phydev->master_slave_set == MASTER_SLAVE_CFG_MASTER_FORCE ||
+ phydev->master_slave_set == MASTER_SLAVE_CFG_MASTER_PREFERRED){
+ static const struct lan887x_regwr_map phy_cfg[] = {
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL4, 0x00b8},
+ {MDIO_MMD_PMAPMD, LAN887X_TX_AMPLT_1000T1_REG, 0x0038},
+ {MDIO_MMD_VEND1, LAN887X_INIT_COEFF_DFE1_100, 0x000f},
+ };
+
+ ret = lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg));
+ } else {
+ static const struct lan887x_regwr_map phy_cfg[] = {
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL4, 0x0038},
+ {MDIO_MMD_VEND1, LAN887X_INIT_COEFF_DFE1_100, 0x0014},
+ };
+
+ ret = lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg));
+ }
+ if (ret < 0)
+ return ret;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_REG_REG26,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN);
+}
+
+static int lan887x_1000M_setup(struct phy_device *phydev)
+{
+ static const struct lan887x_regwr_map phy_cfg[] = {
+ {MDIO_MMD_PMAPMD, LAN887X_TX_AMPLT_1000T1_REG, 0x003f},
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL4, 0x00b8},
+ };
+ int ret;
+
+ /* (Re)configure the speed/mode dependent T1 settings */
+ ret = lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg));
+ if (ret < 0)
+ return ret;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, LAN887X_DSP_PMA_CONTROL,
+ LAN887X_DSP_PMA_CONTROL_LNK_SYNC);
+}
+
+static int lan887x_link_setup(struct phy_device *phydev)
+{
+ int ret = -EINVAL;
+
+ if (phydev->speed == SPEED_1000)
+ ret = lan887x_1000M_setup(phydev);
+ else if (phydev->speed == SPEED_100)
+ ret = lan887x_100M_setup(phydev);
+
+ return ret;
+}
+
+/* LAN887x Errata: speed configuration changes require soft reset
+ * and chip soft reset
+ */
+static int lan887x_phy_reset(struct phy_device *phydev)
+{
+ int ret, val;
+
+ /* Clear 1000M link sync */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, LAN887X_DSP_PMA_CONTROL,
+ LAN887X_DSP_PMA_CONTROL_LNK_SYNC);
+ if (ret < 0)
+ return ret;
+
+ /* Clear 100M link sync */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_REG_REG26,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Chiptop soft-reset to allow the speed/mode change */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, LAN887X_CHIP_SOFT_RST,
+ LAN887X_CHIP_SOFT_RST_RESET);
+ if (ret < 0)
+ return ret;
+
+ /* CL22 soft-reset to let the link re-train */
+ ret = phy_modify(phydev, MII_BMCR, BMCR_RESET, BMCR_RESET);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for reset complete or timeout if > 10ms */
+ return phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+ 5000, 10000, true);
+}
+
+static int lan887x_phy_reconfig(struct phy_device *phydev)
+{
+ int ret;
+
+ linkmode_zero(phydev->advertising);
+
+ ret = genphy_c45_pma_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ return lan887x_link_setup(phydev);
+}
+
+static int lan887x_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ /* LAN887x Errata: speed configuration changes require soft reset
+ * and chip soft reset
+ */
+ ret = lan887x_phy_reset(phydev);
+ if (ret < 0)
+ return ret;
+
+ return lan887x_phy_reconfig(phydev);
+}
+
+static int lan887x_probe(struct phy_device *phydev)
+{
+ struct lan887x_priv *priv;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return lan887x_phy_setup(phydev);
+}
+
+static u64 lan887x_get_stat(struct phy_device *phydev, int i)
+{
+ struct lan887x_hw_stat stat = lan887x_hw_stats[i];
+ struct lan887x_priv *priv = phydev->priv;
+ int val;
+ u64 ret;
+
+ if (stat.mmd)
+ val = phy_read_mmd(phydev, stat.mmd, stat.reg);
+ else
+ val = phy_read(phydev, stat.reg);
+
+ if (val < 0) {
+ ret = U64_MAX;
+ } else {
+ val = val & ((1 << stat.bits) - 1);
+ priv->stats[i] += val;
+ ret = priv->stats[i];
+ }
+
+ return ret;
+}
+
+static void lan887x_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ for (int i = 0; i < ARRAY_SIZE(lan887x_hw_stats); i++)
+ data[i] = lan887x_get_stat(phydev, i);
+}
+
+static int lan887x_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(lan887x_hw_stats);
+}
+
+static void lan887x_get_strings(struct phy_device *phydev, u8 *data)
+{
+ for (int i = 0; i < ARRAY_SIZE(lan887x_hw_stats); i++)
+ ethtool_puts(&data, lan887x_hw_stats[i].string);
+}
+
+static int lan887x_cd_reset(struct phy_device *phydev,
+ enum cable_diag_state cd_done)
+{
+ u16 val;
+ int rc;
+
+ /* Chip hard-reset */
+ rc = phy_write_mmd(phydev, MDIO_MMD_VEND1, LAN887X_CHIP_HARD_RST,
+ LAN887X_CHIP_HARD_RST_RESET);
+ if (rc < 0)
+ return rc;
+
+ /* Wait for reset to complete */
+ rc = phy_read_poll_timeout(phydev, MII_PHYSID2, val,
+ ((val & GENMASK(15, 4)) ==
+ (PHY_ID_LAN887X & GENMASK(15, 4))),
+ 5000, 50000, true);
+ if (rc < 0)
+ return rc;
+
+ if (cd_done == CD_TEST_DONE) {
+ /* Cable diagnostics complete. Restore PHY. */
+ rc = lan887x_phy_setup(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = lan887x_phy_init(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = lan887x_phy_reconfig(phydev);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int lan887x_cable_test_prep(struct phy_device *phydev,
+ enum cable_diag_mode mode)
+{
+ static const struct lan887x_regwr_map values[] = {
+ {MDIO_MMD_VEND1, LAN887X_MAX_PGA_GAIN_100, 0x1f},
+ {MDIO_MMD_VEND1, LAN887X_MIN_PGA_GAIN_100, 0x0},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_TDR_THRESH_100, 0x1},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_AGC_THRESH_100, 0x3c},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MIN_WAIT_CONFIG_100, 0x0},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100, 0x46},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_CYC_CONFIG_100, 0x5a},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_TX_PULSE_CONFIG_100, 0x44d5},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MIN_PGA_GAIN_100, 0x0},
+
+ };
+ int rc;
+
+ rc = lan887x_cd_reset(phydev, CD_TEST_INIT);
+ if (rc < 0)
+ return rc;
+
+ /* Forcing DUT to master mode, as we don't care about
+ * mode during diagnostics
+ */
+ rc = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL,
+ MDIO_PMA_PMD_BT1_CTRL_CFG_MST);
+ if (rc < 0)
+ return rc;
+
+ rc = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x80b0, 0x0038);
+ if (rc < 0)
+ return rc;
+
+ rc = phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CALIB_CONFIG_100, 0,
+ LAN887X_CALIB_CONFIG_100_VAL);
+ if (rc < 0)
+ return rc;
+
+ for (int i = 0; i < ARRAY_SIZE(values); i++) {
+ rc = phy_write_mmd(phydev, values[i].mmd, values[i].reg,
+ values[i].val);
+ if (rc < 0)
+ return rc;
+
+ if (mode &&
+ values[i].reg == LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100) {
+ rc = phy_write_mmd(phydev, values[i].mmd,
+ values[i].reg, 0xa);
+ if (rc < 0)
+ return rc;
+ }
+ }
+
+ if (mode == TEST_MODE_HYBRID) {
+ rc = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD,
+ LAN887X_AFE_PORT_TESTBUS_CTRL4,
+ BIT(0), BIT(0));
+ if (rc < 0)
+ return rc;
+ }
+
+ /* HW_INIT 100T1, Get DUT running in 100T1 mode */
+ rc = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_REG_REG26,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN);
+ if (rc < 0)
+ return rc;
+
+ /* Cable diag requires hard reset and is sensitive regarding the delays.
+ * Hard reset is expected into and out of cable diag.
+ * Wait for 50ms
+ */
+ msleep(50);
+
+ /* Start cable diag */
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100,
+ LAN887X_CBL_DIAG_START);
+}
+
+static int lan887x_cable_test_chk(struct phy_device *phydev,
+ enum cable_diag_mode mode)
+{
+ int val;
+ int rc;
+
+ if (mode == TEST_MODE_HYBRID) {
+ /* Cable diag requires hard reset and is sensitive regarding the delays.
+ * Hard reset is expected into and out of cable diag.
+ * Wait for cable diag to complete.
+ * Minimum wait time is 50ms if the condition is not a match.
+ */
+ rc = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100, val,
+ ((val & LAN887X_CBL_DIAG_DONE) ==
+ LAN887X_CBL_DIAG_DONE),
+ 50000, 500000, false);
+ if (rc < 0)
+ return rc;
+ } else {
+ rc = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100);
+ if (rc < 0)
+ return rc;
+
+ if ((rc & LAN887X_CBL_DIAG_DONE) != LAN887X_CBL_DIAG_DONE)
+ return -EAGAIN;
+ }
+
+ /* Stop cable diag */
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100,
+ LAN887X_CBL_DIAG_STOP);
+}
+
+static int lan887x_cable_test_start(struct phy_device *phydev)
+{
+ int rc, ret;
+
+ rc = lan887x_cable_test_prep(phydev, TEST_MODE_NORMAL);
+ if (rc < 0) {
+ ret = lan887x_cd_reset(phydev, CD_TEST_DONE);
+ if (ret < 0)
+ return ret;
+
+ return rc;
+ }
+
+ return 0;
+}
+
+static int lan887x_cable_test_report(struct phy_device *phydev)
+{
+ int pos_peak_cycle, pos_peak_cycle_hybrid, pos_peak_in_phases;
+ int pos_peak_time, pos_peak_time_hybrid, neg_peak_time;
+ int neg_peak_cycle, neg_peak_in_phases;
+ int pos_peak_in_phases_hybrid;
+ int gain_idx, gain_idx_hybrid;
+ int pos_peak_phase_hybrid;
+ int pos_peak, neg_peak;
+ int distance;
+ int detect;
+ int length;
+ int ret;
+ int rc;
+
+ /* Read non-hybrid results */
+ gain_idx = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_AGC_GAIN_100);
+ if (gain_idx < 0) {
+ rc = gain_idx;
+ goto error;
+ }
+
+ pos_peak = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_POS_PEAK_VALUE_100);
+ if (pos_peak < 0) {
+ rc = pos_peak;
+ goto error;
+ }
+
+ neg_peak = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_NEG_PEAK_VALUE_100);
+ if (neg_peak < 0) {
+ rc = neg_peak;
+ goto error;
+ }
+
+ pos_peak_time = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_POS_PEAK_TIME_100);
+ if (pos_peak_time < 0) {
+ rc = pos_peak_time;
+ goto error;
+ }
+
+ neg_peak_time = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_NEG_PEAK_TIME_100);
+ if (neg_peak_time < 0) {
+ rc = neg_peak_time;
+ goto error;
+ }
+
+ /* Calculate non-hybrid values */
+ pos_peak_cycle = (pos_peak_time >> 7) & 0x7f;
+ pos_peak_in_phases = (pos_peak_cycle * 96) + (pos_peak_time & 0x7f);
+ neg_peak_cycle = (neg_peak_time >> 7) & 0x7f;
+ neg_peak_in_phases = (neg_peak_cycle * 96) + (neg_peak_time & 0x7f);
+
+ /* Deriving the status of cable */
+ if (pos_peak > MICROCHIP_CABLE_NOISE_MARGIN &&
+ neg_peak > MICROCHIP_CABLE_NOISE_MARGIN && gain_idx >= 0) {
+ if (pos_peak_in_phases > neg_peak_in_phases &&
+ ((pos_peak_in_phases - neg_peak_in_phases) >=
+ MICROCHIP_CABLE_MIN_TIME_DIFF) &&
+ ((pos_peak_in_phases - neg_peak_in_phases) <
+ MICROCHIP_CABLE_MAX_TIME_DIFF) &&
+ pos_peak_in_phases > 0) {
+ detect = LAN87XX_CABLE_TEST_SAME_SHORT;
+ } else if (neg_peak_in_phases > pos_peak_in_phases &&
+ ((neg_peak_in_phases - pos_peak_in_phases) >=
+ MICROCHIP_CABLE_MIN_TIME_DIFF) &&
+ ((neg_peak_in_phases - pos_peak_in_phases) <
+ MICROCHIP_CABLE_MAX_TIME_DIFF) &&
+ neg_peak_in_phases > 0) {
+ detect = LAN87XX_CABLE_TEST_OPEN;
+ } else {
+ detect = LAN87XX_CABLE_TEST_OK;
+ }
+ } else {
+ detect = LAN87XX_CABLE_TEST_OK;
+ }
+
+ if (detect == LAN87XX_CABLE_TEST_OK) {
+ distance = 0;
+ goto get_len;
+ }
+
+ /* Re-initialize PHY and start cable diag test */
+ rc = lan887x_cable_test_prep(phydev, TEST_MODE_HYBRID);
+ if (rc < 0)
+ goto cd_stop;
+
+ /* Wait for cable diag test completion */
+ rc = lan887x_cable_test_chk(phydev, TEST_MODE_HYBRID);
+ if (rc < 0)
+ goto cd_stop;
+
+ /* Read hybrid results */
+ gain_idx_hybrid = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_AGC_GAIN_100);
+ if (gain_idx_hybrid < 0) {
+ rc = gain_idx_hybrid;
+ goto error;
+ }
+
+ pos_peak_time_hybrid = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_POS_PEAK_TIME_100);
+ if (pos_peak_time_hybrid < 0) {
+ rc = pos_peak_time_hybrid;
+ goto error;
+ }
+
+ /* Calculate hybrid values to derive cable length to fault */
+ pos_peak_cycle_hybrid = (pos_peak_time_hybrid >> 7) & 0x7f;
+ pos_peak_phase_hybrid = pos_peak_time_hybrid & 0x7f;
+ pos_peak_in_phases_hybrid = pos_peak_cycle_hybrid * 96 +
+ pos_peak_phase_hybrid;
+
+ /* Distance to fault calculation.
+ * distance = (peak_in_phases - peak_in_phases_hybrid) *
+ * propagationconstant.
+ * constant to convert number of phases to meters
+ * propagationconstant = 0.015953
+ * (0.6811 * 2.9979 * 156.2499 * 0.0001 * 0.5)
+ * Applying constant 1.5953 as ethtool further devides by 100 to
+ * convert to meters.
+ */
+ if (detect == LAN87XX_CABLE_TEST_OPEN) {
+ distance = (((pos_peak_in_phases - pos_peak_in_phases_hybrid)
+ * 15953) / 10000);
+ } else if (detect == LAN87XX_CABLE_TEST_SAME_SHORT) {
+ distance = (((neg_peak_in_phases - pos_peak_in_phases_hybrid)
+ * 15953) / 10000);
+ } else {
+ distance = 0;
+ }
+
+get_len:
+ rc = lan887x_cd_reset(phydev, CD_TEST_DONE);
+ if (rc < 0)
+ return rc;
+
+ length = ((u32)distance & GENMASK(15, 0));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ lan87xx_cable_test_report_trans(detect));
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A, length);
+
+ return 0;
+
+cd_stop:
+ /* Stop cable diag */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100,
+ LAN887X_CBL_DIAG_STOP);
+ if (ret < 0)
+ return ret;
+
+error:
+ /* Cable diag test failed */
+ ret = lan887x_cd_reset(phydev, CD_TEST_DONE);
+ if (ret < 0)
+ return ret;
+
+ /* Return error in failure case */
+ return rc;
+}
+
+static int lan887x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int rc;
+
+ rc = lan887x_cable_test_chk(phydev, TEST_MODE_NORMAL);
+ if (rc < 0) {
+ /* Let PHY statemachine poll again */
+ if (rc == -EAGAIN)
+ return 0;
+ return rc;
+ }
+
+ /* Cable diag test complete */
+ *finished = true;
+
+ /* Retrieve test status and cable length to fault */
+ return lan887x_cable_test_report(phydev);
+}
+
static struct phy_driver microchip_t1_phy_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX),
@@ -894,6 +1864,23 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.get_sqi_max = lan87xx_get_sqi_max,
.cable_test_start = lan87xx_cable_test_start,
.cable_test_get_status = lan87xx_cable_test_get_status,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN887X),
+ .name = "Microchip LAN887x T1 PHY",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = lan887x_probe,
+ .get_features = lan887x_get_features,
+ .config_init = lan887x_phy_init,
+ .config_aneg = lan887x_config_aneg,
+ .get_stats = lan887x_get_stats,
+ .get_sset_count = lan887x_get_sset_count,
+ .get_strings = lan887x_get_strings,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_status = genphy_c45_read_status,
+ .cable_test_start = lan887x_cable_test_start,
+ .cable_test_get_status = lan887x_cable_test_get_status,
}
};
@@ -902,6 +1889,7 @@ module_phy_driver(microchip_t1_phy_driver);
static struct mdio_device_id __maybe_unused microchip_t1_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX) },
{ PHY_ID_MATCH_MODEL(PHY_ID_LAN937X) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN887X) },
{ }
};
diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c
index 534ca7d1b061..3614839a8e51 100644
--- a/drivers/net/phy/microchip_t1s.c
+++ b/drivers/net/phy/microchip_t1s.c
@@ -268,6 +268,34 @@ static int lan86xx_read_status(struct phy_device *phydev)
return 0;
}
+/* OPEN Alliance 10BASE-T1x compliance MAC-PHYs will have both C22 and
+ * C45 registers space. If the PHY is discovered via C22 bus protocol it assumes
+ * it uses C22 protocol and always uses C22 registers indirect access to access
+ * C45 registers. This is because, we don't have a clean separation between
+ * C22/C45 register space and C22/C45 MDIO bus protocols. Resulting, PHY C45
+ * registers direct access can't be used which can save multiple SPI bus access.
+ * To support this feature, set .read_mmd/.write_mmd in the PHY driver to call
+ * .read_c45/.write_c45 in the OPEN Alliance framework
+ * drivers/net/ethernet/oa_tc6.c
+ */
+static int lan865x_phy_read_mmd(struct phy_device *phydev, int devnum,
+ u16 regnum)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ int addr = phydev->mdio.addr;
+
+ return __mdiobus_c45_read(bus, addr, devnum, regnum);
+}
+
+static int lan865x_phy_write_mmd(struct phy_device *phydev, int devnum,
+ u16 regnum, u16 val)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ int addr = phydev->mdio.addr;
+
+ return __mdiobus_c45_write(bus, addr, devnum, regnum, val);
+}
+
static struct phy_driver microchip_t1s_driver[] = {
{
PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVB1),
@@ -285,6 +313,8 @@ static struct phy_driver microchip_t1s_driver[] = {
.features = PHY_BASIC_T1S_P2MP_FEATURES,
.config_init = lan865x_revb0_config_init,
.read_status = lan86xx_read_status,
+ .read_mmd = lan865x_phy_read_mmd,
+ .write_mmd = lan865x_phy_write_mmd,
.get_plca_cfg = genphy_c45_plca_get_cfg,
.set_plca_cfg = genphy_c45_plca_set_cfg,
.get_plca_status = genphy_c45_plca_get_status,
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 7a11fdb687cc..0e91f5d1a4fd 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Motorcomm 8511/8521/8531/8531S PHY driver.
+ * Motorcomm 8511/8521/8531/8531S/8821 PHY driver.
*
* Author: Peter Geis <pgwipeout@gmail.com>
* Author: Frank <Frank.Sae@motor-comm.com>
@@ -16,8 +16,8 @@
#define PHY_ID_YT8521 0x0000011a
#define PHY_ID_YT8531 0x4f51e91b
#define PHY_ID_YT8531S 0x4f51e91a
-
-/* YT8521/YT8531S Register Overview
+#define PHY_ID_YT8821 0x4f51ea19
+/* YT8521/YT8531S/YT8821 Register Overview
* UTP Register space | FIBER Register space
* ------------------------------------------------------------
* | UTP MII | FIBER MII |
@@ -46,12 +46,12 @@
/* Specific Status Register */
#define YTPHY_SPECIFIC_STATUS_REG 0x11
-#define YTPHY_SSR_SPEED_MODE_OFFSET 14
-
-#define YTPHY_SSR_SPEED_MODE_MASK (BIT(15) | BIT(14))
-#define YTPHY_SSR_SPEED_10M 0x0
-#define YTPHY_SSR_SPEED_100M 0x1
-#define YTPHY_SSR_SPEED_1000M 0x2
+#define YTPHY_SSR_SPEED_MASK ((0x3 << 14) | BIT(9))
+#define YTPHY_SSR_SPEED_10M ((0x0 << 14))
+#define YTPHY_SSR_SPEED_100M ((0x1 << 14))
+#define YTPHY_SSR_SPEED_1000M ((0x2 << 14))
+#define YTPHY_SSR_SPEED_10G ((0x3 << 14))
+#define YTPHY_SSR_SPEED_2500M ((0x0 << 14) | BIT(9))
#define YTPHY_SSR_DUPLEX_OFFSET 13
#define YTPHY_SSR_DUPLEX BIT(13)
#define YTPHY_SSR_PAGE_RECEIVED BIT(12)
@@ -270,12 +270,89 @@
#define YT8531_SCR_CLK_SRC_REF_25M 4
#define YT8531_SCR_CLK_SRC_SSC_25M 5
+#define YT8821_SDS_EXT_CSR_CTRL_REG 0x23
+#define YT8821_SDS_EXT_CSR_VCO_LDO_EN BIT(15)
+#define YT8821_SDS_EXT_CSR_VCO_BIAS_LPF_EN BIT(8)
+
+#define YT8821_UTP_EXT_PI_CTRL_REG 0x56
+#define YT8821_UTP_EXT_PI_RST_N_FIFO BIT(5)
+#define YT8821_UTP_EXT_PI_TX_CLK_SEL_AFE BIT(4)
+#define YT8821_UTP_EXT_PI_RX_CLK_3_SEL_AFE BIT(3)
+#define YT8821_UTP_EXT_PI_RX_CLK_2_SEL_AFE BIT(2)
+#define YT8821_UTP_EXT_PI_RX_CLK_1_SEL_AFE BIT(1)
+#define YT8821_UTP_EXT_PI_RX_CLK_0_SEL_AFE BIT(0)
+
+#define YT8821_UTP_EXT_VCT_CFG6_CTRL_REG 0x97
+#define YT8821_UTP_EXT_FECHO_AMP_TH_HUGE GENMASK(15, 8)
+
+#define YT8821_UTP_EXT_ECHO_CTRL_REG 0x336
+#define YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000 GENMASK(14, 8)
+
+#define YT8821_UTP_EXT_GAIN_CTRL_REG 0x340
+#define YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000 GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_RPDN_CTRL_REG 0x34E
+#define YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 BIT(15)
+#define YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500 BIT(7)
+#define YT8821_UTP_EXT_RPDN_IPR_SHT_2500 GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_TH_20DB_2500_CTRL_REG 0x36A
+#define YT8821_UTP_EXT_TH_20DB_2500 GENMASK(15, 0)
+
+#define YT8821_UTP_EXT_TRACE_CTRL_REG 0x372
+#define YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500 GENMASK(14, 8)
+#define YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500 GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_ALPHA_IPR_CTRL_REG 0x374
+#define YT8821_UTP_EXT_ALPHA_SHT_2500 GENMASK(14, 8)
+#define YT8821_UTP_EXT_IPR_LNG_2500 GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_PLL_CTRL_REG 0x450
+#define YT8821_UTP_EXT_PLL_SPARE_CFG GENMASK(7, 0)
+
+#define YT8821_UTP_EXT_DAC_IMID_CH_2_3_CTRL_REG 0x466
+#define YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG GENMASK(14, 8)
+#define YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_DAC_IMID_CH_0_1_CTRL_REG 0x467
+#define YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG GENMASK(14, 8)
+#define YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_DAC_IMSB_CH_2_3_CTRL_REG 0x468
+#define YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG GENMASK(14, 8)
+#define YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_DAC_IMSB_CH_0_1_CTRL_REG 0x469
+#define YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG GENMASK(14, 8)
+#define YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG GENMASK(6, 0)
+
+#define YT8821_UTP_EXT_MU_COARSE_FR_CTRL_REG 0x4B3
+#define YT8821_UTP_EXT_MU_COARSE_FR_F_FFE GENMASK(14, 12)
+#define YT8821_UTP_EXT_MU_COARSE_FR_F_FBE GENMASK(10, 8)
+
+#define YT8821_UTP_EXT_MU_FINE_FR_CTRL_REG 0x4B5
+#define YT8821_UTP_EXT_MU_FINE_FR_F_FFE GENMASK(14, 12)
+#define YT8821_UTP_EXT_MU_FINE_FR_F_FBE GENMASK(10, 8)
+
+#define YT8821_UTP_EXT_VGA_LPF1_CAP_CTRL_REG 0x4D2
+#define YT8821_UTP_EXT_VGA_LPF1_CAP_OTHER GENMASK(7, 4)
+#define YT8821_UTP_EXT_VGA_LPF1_CAP_2500 GENMASK(3, 0)
+
+#define YT8821_UTP_EXT_VGA_LPF2_CAP_CTRL_REG 0x4D3
+#define YT8821_UTP_EXT_VGA_LPF2_CAP_OTHER GENMASK(7, 4)
+#define YT8821_UTP_EXT_VGA_LPF2_CAP_2500 GENMASK(3, 0)
+
+#define YT8821_UTP_EXT_TXGE_NFR_FR_THP_CTRL_REG 0x660
+#define YT8821_UTP_EXT_NFR_TX_ABILITY BIT(3)
/* Extended Register end */
#define YTPHY_DTS_OUTPUT_CLK_DIS 0
#define YTPHY_DTS_OUTPUT_CLK_25M 25000000
#define YTPHY_DTS_OUTPUT_CLK_125M 125000000
+#define YT8821_CHIP_MODE_AUTO_BX2500_SGMII 0
+#define YT8821_CHIP_MODE_FORCE_BX2500 1
+
struct yt8521_priv {
/* combo_advertising is used for case of YT8521 in combo mode,
* this means that yt8521 may work in utp or fiber mode which depends
@@ -1187,8 +1264,7 @@ static int yt8521_adjust_status(struct phy_device *phydev, int status,
else
duplex = DUPLEX_FULL; /* for fiber, it always DUPLEX_FULL */
- speed_mode = (status & YTPHY_SSR_SPEED_MODE_MASK) >>
- YTPHY_SSR_SPEED_MODE_OFFSET;
+ speed_mode = status & YTPHY_SSR_SPEED_MASK;
switch (speed_mode) {
case YTPHY_SSR_SPEED_10M:
@@ -2252,6 +2328,572 @@ static int yt8521_get_features(struct phy_device *phydev)
return ret;
}
+/**
+ * yt8821_get_features - read mmd register to get 2.5G capability
+ * @phydev: target phy_device struct
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_ext_abilities(phydev);
+ if (ret < 0)
+ return ret;
+
+ return genphy_read_abilities(phydev);
+}
+
+/**
+ * yt8821_get_rate_matching - read register to get phy chip mode
+ * @phydev: target phy_device struct
+ * @iface: PHY data interface type
+ *
+ * Returns: rate matching type or negative errno code
+ */
+static int yt8821_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ int val;
+
+ val = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG);
+ if (val < 0)
+ return val;
+
+ if (FIELD_GET(YT8521_CCR_MODE_SEL_MASK, val) ==
+ YT8821_CHIP_MODE_FORCE_BX2500)
+ return RATE_MATCH_PAUSE;
+
+ return RATE_MATCH_NONE;
+}
+
+/**
+ * yt8821_aneg_done() - determines the auto negotiation result
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0(no link)or 1(utp link) or negative errno code
+ */
+static int yt8821_aneg_done(struct phy_device *phydev)
+{
+ return yt8521_aneg_done_paged(phydev, YT8521_RSSR_UTP_SPACE);
+}
+
+/**
+ * yt8821_serdes_init() - serdes init
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_serdes_init(struct phy_device *phydev)
+{
+ int old_page;
+ int ret = 0;
+ u16 mask;
+ u16 set;
+
+ old_page = phy_select_page(phydev, YT8521_RSSR_FIBER_SPACE);
+ if (old_page < 0) {
+ phydev_err(phydev, "Failed to select page: %d\n",
+ old_page);
+ goto err_restore_page;
+ }
+
+ ret = __phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_SDS_EXT_CSR_VCO_LDO_EN |
+ YT8821_SDS_EXT_CSR_VCO_BIAS_LPF_EN;
+ set = YT8821_SDS_EXT_CSR_VCO_LDO_EN;
+ ret = ytphy_modify_ext(phydev, YT8821_SDS_EXT_CSR_CTRL_REG, mask,
+ set);
+
+err_restore_page:
+ return phy_restore_page(phydev, old_page, ret);
+}
+
+/**
+ * yt8821_utp_init() - utp init
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_utp_init(struct phy_device *phydev)
+{
+ int old_page;
+ int ret = 0;
+ u16 mask;
+ u16 save;
+ u16 set;
+
+ old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE);
+ if (old_page < 0) {
+ phydev_err(phydev, "Failed to select page: %d\n",
+ old_page);
+ goto err_restore_page;
+ }
+
+ mask = YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 |
+ YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500 |
+ YT8821_UTP_EXT_RPDN_IPR_SHT_2500;
+ set = YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 |
+ YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500;
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_RPDN_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_VGA_LPF1_CAP_OTHER |
+ YT8821_UTP_EXT_VGA_LPF1_CAP_2500;
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_VGA_LPF1_CAP_CTRL_REG,
+ mask, 0);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_VGA_LPF2_CAP_OTHER |
+ YT8821_UTP_EXT_VGA_LPF2_CAP_2500;
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_VGA_LPF2_CAP_CTRL_REG,
+ mask, 0);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500 |
+ YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500;
+ set = FIELD_PREP(YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500, 0x5a) |
+ FIELD_PREP(YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500, 0x3c);
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_TRACE_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_IPR_LNG_2500;
+ set = FIELD_PREP(YT8821_UTP_EXT_IPR_LNG_2500, 0x6c);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_ALPHA_IPR_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000;
+ set = FIELD_PREP(YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000, 0x2a);
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_ECHO_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000;
+ set = FIELD_PREP(YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000, 0x22);
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_GAIN_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_TH_20DB_2500;
+ set = FIELD_PREP(YT8821_UTP_EXT_TH_20DB_2500, 0x8000);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_TH_20DB_2500_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_MU_COARSE_FR_F_FFE |
+ YT8821_UTP_EXT_MU_COARSE_FR_F_FBE;
+ set = FIELD_PREP(YT8821_UTP_EXT_MU_COARSE_FR_F_FFE, 0x7) |
+ FIELD_PREP(YT8821_UTP_EXT_MU_COARSE_FR_F_FBE, 0x7);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_MU_COARSE_FR_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_MU_FINE_FR_F_FFE |
+ YT8821_UTP_EXT_MU_FINE_FR_F_FBE;
+ set = FIELD_PREP(YT8821_UTP_EXT_MU_FINE_FR_F_FFE, 0x2) |
+ FIELD_PREP(YT8821_UTP_EXT_MU_FINE_FR_F_FBE, 0x2);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_MU_FINE_FR_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ /* save YT8821_UTP_EXT_PI_CTRL_REG's val for use later */
+ ret = ytphy_read_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG);
+ if (ret < 0)
+ goto err_restore_page;
+
+ save = ret;
+
+ mask = YT8821_UTP_EXT_PI_TX_CLK_SEL_AFE |
+ YT8821_UTP_EXT_PI_RX_CLK_3_SEL_AFE |
+ YT8821_UTP_EXT_PI_RX_CLK_2_SEL_AFE |
+ YT8821_UTP_EXT_PI_RX_CLK_1_SEL_AFE |
+ YT8821_UTP_EXT_PI_RX_CLK_0_SEL_AFE;
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG,
+ mask, 0);
+ if (ret < 0)
+ goto err_restore_page;
+
+ /* restore YT8821_UTP_EXT_PI_CTRL_REG's val */
+ ret = ytphy_write_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG, save);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_FECHO_AMP_TH_HUGE;
+ set = FIELD_PREP(YT8821_UTP_EXT_FECHO_AMP_TH_HUGE, 0x38);
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_VCT_CFG6_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_NFR_TX_ABILITY;
+ set = YT8821_UTP_EXT_NFR_TX_ABILITY;
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_TXGE_NFR_FR_THP_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_PLL_SPARE_CFG;
+ set = FIELD_PREP(YT8821_UTP_EXT_PLL_SPARE_CFG, 0xe9);
+ ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_PLL_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG |
+ YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG;
+ set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG, 0x64) |
+ FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG, 0x64);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_DAC_IMID_CH_2_3_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG |
+ YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG;
+ set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG, 0x64) |
+ FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG, 0x64);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_DAC_IMID_CH_0_1_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG |
+ YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG;
+ set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG, 0x64) |
+ FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG, 0x64);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_DAC_IMSB_CH_2_3_CTRL_REG,
+ mask, set);
+ if (ret < 0)
+ goto err_restore_page;
+
+ mask = YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG |
+ YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG;
+ set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG, 0x64) |
+ FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG, 0x64);
+ ret = ytphy_modify_ext(phydev,
+ YT8821_UTP_EXT_DAC_IMSB_CH_0_1_CTRL_REG,
+ mask, set);
+
+err_restore_page:
+ return phy_restore_page(phydev, old_page, ret);
+}
+
+/**
+ * yt8821_auto_sleep_config() - phy auto sleep config
+ * @phydev: a pointer to a &struct phy_device
+ * @enable: true enable auto sleep, false disable auto sleep
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_auto_sleep_config(struct phy_device *phydev,
+ bool enable)
+{
+ int old_page;
+ int ret = 0;
+
+ old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE);
+ if (old_page < 0) {
+ phydev_err(phydev, "Failed to select page: %d\n",
+ old_page);
+ goto err_restore_page;
+ }
+
+ ret = ytphy_modify_ext(phydev,
+ YT8521_EXTREG_SLEEP_CONTROL1_REG,
+ YT8521_ESC1R_SLEEP_SW,
+ enable ? 1 : 0);
+
+err_restore_page:
+ return phy_restore_page(phydev, old_page, ret);
+}
+
+/**
+ * yt8821_soft_reset() - soft reset utp and serdes
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_soft_reset(struct phy_device *phydev)
+{
+ return ytphy_modify_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG,
+ YT8521_CCR_SW_RST, 0);
+}
+
+/**
+ * yt8821_config_init() - phy initializatioin
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_config_init(struct phy_device *phydev)
+{
+ u8 mode = YT8821_CHIP_MODE_AUTO_BX2500_SGMII;
+ int ret;
+ u16 set;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
+ mode = YT8821_CHIP_MODE_FORCE_BX2500;
+
+ set = FIELD_PREP(YT8521_CCR_MODE_SEL_MASK, mode);
+ ret = ytphy_modify_ext_with_lock(phydev,
+ YT8521_CHIP_CONFIG_REG,
+ YT8521_CCR_MODE_SEL_MASK,
+ set);
+ if (ret < 0)
+ return ret;
+
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ phydev->possible_interfaces);
+
+ if (mode == YT8821_CHIP_MODE_AUTO_BX2500_SGMII) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ phydev->possible_interfaces);
+
+ phydev->rate_matching = RATE_MATCH_NONE;
+ } else if (mode == YT8821_CHIP_MODE_FORCE_BX2500) {
+ phydev->rate_matching = RATE_MATCH_PAUSE;
+ }
+
+ ret = yt8821_serdes_init(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = yt8821_utp_init(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* disable auto sleep */
+ ret = yt8821_auto_sleep_config(phydev, false);
+ if (ret < 0)
+ return ret;
+
+ /* soft reset */
+ return yt8821_soft_reset(phydev);
+}
+
+/**
+ * yt8821_adjust_status() - update speed and duplex to phydev
+ * @phydev: a pointer to a &struct phy_device
+ * @val: read from YTPHY_SPECIFIC_STATUS_REG
+ */
+static void yt8821_adjust_status(struct phy_device *phydev, int val)
+{
+ int speed, duplex;
+ int speed_mode;
+
+ duplex = FIELD_GET(YTPHY_SSR_DUPLEX, val);
+ speed_mode = val & YTPHY_SSR_SPEED_MASK;
+ switch (speed_mode) {
+ case YTPHY_SSR_SPEED_10M:
+ speed = SPEED_10;
+ break;
+ case YTPHY_SSR_SPEED_100M:
+ speed = SPEED_100;
+ break;
+ case YTPHY_SSR_SPEED_1000M:
+ speed = SPEED_1000;
+ break;
+ case YTPHY_SSR_SPEED_2500M:
+ speed = SPEED_2500;
+ break;
+ default:
+ speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ phydev->speed = speed;
+ phydev->duplex = duplex;
+}
+
+/**
+ * yt8821_update_interface() - update interface per current speed
+ * @phydev: a pointer to a &struct phy_device
+ */
+static void yt8821_update_interface(struct phy_device *phydev)
+{
+ if (!phydev->link)
+ return;
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ case SPEED_1000:
+ case SPEED_100:
+ case SPEED_10:
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ default:
+ phydev_warn(phydev, "phy speed err :%d\n", phydev->speed);
+ break;
+ }
+}
+
+/**
+ * yt8821_read_status() - determines the negotiated speed and duplex
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_read_status(struct phy_device *phydev)
+{
+ int link;
+ int ret;
+ int val;
+
+ ret = ytphy_write_ext_with_lock(phydev,
+ YT8521_REG_SPACE_SELECT_REG,
+ YT8521_RSSR_UTP_SPACE);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->autoneg_complete) {
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = phy_read(phydev, YTPHY_SPECIFIC_STATUS_REG);
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+
+ link = val & YTPHY_SSR_LINK;
+ if (link)
+ yt8821_adjust_status(phydev, val);
+
+ if (link) {
+ if (phydev->link == 0)
+ phydev_dbg(phydev,
+ "%s, phy addr: %d, link up\n",
+ __func__, phydev->mdio.addr);
+ phydev->link = 1;
+ } else {
+ if (phydev->link == 1)
+ phydev_dbg(phydev,
+ "%s, phy addr: %d, link down\n",
+ __func__, phydev->mdio.addr);
+ phydev->link = 0;
+ }
+
+ val = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG);
+ if (val < 0)
+ return val;
+
+ if (FIELD_GET(YT8521_CCR_MODE_SEL_MASK, val) ==
+ YT8821_CHIP_MODE_AUTO_BX2500_SGMII)
+ yt8821_update_interface(phydev);
+
+ return 0;
+}
+
+/**
+ * yt8821_modify_utp_fiber_bmcr - bits modify a PHY's BMCR register
+ * @phydev: the phy_device struct
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * NOTE: Convenience function which allows a PHY's BMCR register to be
+ * modified as new register value = (old register value & ~mask) | set.
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_modify_utp_fiber_bmcr(struct phy_device *phydev,
+ u16 mask, u16 set)
+{
+ int ret;
+
+ ret = yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_UTP_SPACE,
+ mask, set);
+ if (ret < 0)
+ return ret;
+
+ return yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_FIBER_SPACE,
+ mask, set);
+}
+
+/**
+ * yt8821_suspend() - suspend the hardware
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_suspend(struct phy_device *phydev)
+{
+ int wol_config;
+
+ wol_config = ytphy_read_ext_with_lock(phydev,
+ YTPHY_WOL_CONFIG_REG);
+ if (wol_config < 0)
+ return wol_config;
+
+ /* if wol enable, do nothing */
+ if (wol_config & YTPHY_WCR_ENABLE)
+ return 0;
+
+ return yt8821_modify_utp_fiber_bmcr(phydev, 0, BMCR_PDOWN);
+}
+
+/**
+ * yt8821_resume() - resume the hardware
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Returns: 0 or negative errno code
+ */
+static int yt8821_resume(struct phy_device *phydev)
+{
+ int wol_config;
+ int ret;
+
+ /* disable auto sleep */
+ ret = yt8821_auto_sleep_config(phydev, false);
+ if (ret < 0)
+ return ret;
+
+ wol_config = ytphy_read_ext_with_lock(phydev,
+ YTPHY_WOL_CONFIG_REG);
+ if (wol_config < 0)
+ return wol_config;
+
+ /* if wol enable, do nothing */
+ if (wol_config & YTPHY_WCR_ENABLE)
+ return 0;
+
+ return yt8821_modify_utp_fiber_bmcr(phydev, BMCR_PDOWN, 0);
+}
+
static struct phy_driver motorcomm_phy_drvs[] = {
{
PHY_ID_MATCH_EXACT(PHY_ID_YT8511),
@@ -2307,11 +2949,28 @@ static struct phy_driver motorcomm_phy_drvs[] = {
.suspend = yt8521_suspend,
.resume = yt8521_resume,
},
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_YT8821),
+ .name = "YT8821 2.5Gbps PHY",
+ .get_features = yt8821_get_features,
+ .read_page = yt8521_read_page,
+ .write_page = yt8521_write_page,
+ .get_wol = ytphy_get_wol,
+ .set_wol = ytphy_set_wol,
+ .config_aneg = genphy_config_aneg,
+ .aneg_done = yt8821_aneg_done,
+ .config_init = yt8821_config_init,
+ .get_rate_matching = yt8821_get_rate_matching,
+ .read_status = yt8821_read_status,
+ .soft_reset = yt8821_soft_reset,
+ .suspend = yt8821_suspend,
+ .resume = yt8821_resume,
+ },
};
module_phy_driver(motorcomm_phy_drvs);
-MODULE_DESCRIPTION("Motorcomm 8511/8521/8531/8531S PHY driver");
+MODULE_DESCRIPTION("Motorcomm 8511/8521/8531/8531S/8821 PHY driver");
MODULE_AUTHOR("Peter Geis");
MODULE_AUTHOR("Frank");
MODULE_LICENSE("GPL");
@@ -2321,6 +2980,7 @@ static const struct mdio_device_id __maybe_unused motorcomm_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8521) },
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8531) },
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8531S) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_YT8821) },
{ /* sentinel */ }
};
diff --git a/drivers/net/phy/open_alliance_helpers.c b/drivers/net/phy/open_alliance_helpers.c
new file mode 100644
index 000000000000..36a70451d7da
--- /dev/null
+++ b/drivers/net/phy/open_alliance_helpers.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * open_alliance_helpers.c - OPEN Alliance specific PHY diagnostic helpers
+ *
+ * This file contains helper functions for implementing advanced diagnostic
+ * features as specified by the OPEN Alliance for automotive Ethernet PHYs.
+ * These helpers include functionality for Time Delay Reflection (TDR), dynamic
+ * channel quality assessment, and other PHY diagnostics.
+ *
+ * For more information on the specifications, refer to the OPEN Alliance
+ * documentation: https://opensig.org/automotive-ethernet-specifications/
+ * Currently following specifications are partially or fully implemented:
+ * - Advanced diagnostic features for 1000BASE-T1 automotive Ethernet PHYs.
+ * TC12 - advanced PHY features.
+ * https://opensig.org/wp-content/uploads/2024/03/Advanced_PHY_features_for_automotive_Ethernet_v2.0_fin.pdf
+ */
+
+#include <linux/bitfield.h>
+#include <linux/ethtool_netlink.h>
+
+#include "open_alliance_helpers.h"
+
+/**
+ * oa_1000bt1_get_ethtool_cable_result_code - Convert TDR status to ethtool
+ * result code
+ * @reg_value: Value read from the TDR register
+ *
+ * This function takes a register value from the HDD.TDR register and converts
+ * the TDR status to the corresponding ethtool cable test result code.
+ *
+ * Return: The appropriate ethtool result code based on the TDR status
+ */
+int oa_1000bt1_get_ethtool_cable_result_code(u16 reg_value)
+{
+ u8 tdr_status = FIELD_GET(OA_1000BT1_HDD_TDR_STATUS_MASK, reg_value);
+ u8 dist_val = FIELD_GET(OA_1000BT1_HDD_TDR_DISTANCE_MASK, reg_value);
+
+ switch (tdr_status) {
+ case OA_1000BT1_HDD_TDR_STATUS_CABLE_OK:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case OA_1000BT1_HDD_TDR_STATUS_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case OA_1000BT1_HDD_TDR_STATUS_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case OA_1000BT1_HDD_TDR_STATUS_NOISE:
+ return ETHTOOL_A_CABLE_RESULT_CODE_NOISE;
+ default:
+ if (dist_val == OA_1000BT1_HDD_TDR_DISTANCE_RESOLUTION_NOT_POSSIBLE)
+ return ETHTOOL_A_CABLE_RESULT_CODE_RESOLUTION_NOT_POSSIBLE;
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+EXPORT_SYMBOL_GPL(oa_1000bt1_get_ethtool_cable_result_code);
+
+/**
+ * oa_1000bt1_get_tdr_distance - Get distance to the main fault from TDR
+ * register value
+ * @reg_value: Value read from the TDR register
+ *
+ * This function takes a register value from the HDD.TDR register and extracts
+ * the distance to the main fault detected by the TDR feature. The distance is
+ * measured in centimeters and ranges from 0 to 3100 centimeters. If the
+ * distance is not available (0x3f), the function returns -ERANGE.
+ *
+ * Return: The distance to the main fault in centimeters, or -ERANGE if the
+ * resolution is not possible.
+ */
+int oa_1000bt1_get_tdr_distance(u16 reg_value)
+{
+ u8 dist_val = FIELD_GET(OA_1000BT1_HDD_TDR_DISTANCE_MASK, reg_value);
+
+ if (dist_val == OA_1000BT1_HDD_TDR_DISTANCE_RESOLUTION_NOT_POSSIBLE)
+ return -ERANGE;
+
+ return dist_val * 100;
+}
+EXPORT_SYMBOL_GPL(oa_1000bt1_get_tdr_distance);
diff --git a/drivers/net/phy/open_alliance_helpers.h b/drivers/net/phy/open_alliance_helpers.h
new file mode 100644
index 000000000000..8b7d97bc6f18
--- /dev/null
+++ b/drivers/net/phy/open_alliance_helpers.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef OPEN_ALLIANCE_HELPERS_H
+#define OPEN_ALLIANCE_HELPERS_H
+
+/*
+ * These defines reflect the TDR (Time Delay Reflection) diagnostic feature
+ * for 1000BASE-T1 automotive Ethernet PHYs as specified by the OPEN Alliance.
+ *
+ * The register values are part of the HDD.TDR register, which provides
+ * information about the cable status and faults. The exact register offset
+ * is device-specific and should be provided by the driver.
+ */
+#define OA_1000BT1_HDD_TDR_ACTIVATION_MASK GENMASK(1, 0)
+#define OA_1000BT1_HDD_TDR_ACTIVATION_OFF 1
+#define OA_1000BT1_HDD_TDR_ACTIVATION_ON 2
+
+#define OA_1000BT1_HDD_TDR_STATUS_MASK GENMASK(7, 4)
+#define OA_1000BT1_HDD_TDR_STATUS_SHORT 3
+#define OA_1000BT1_HDD_TDR_STATUS_OPEN 6
+#define OA_1000BT1_HDD_TDR_STATUS_NOISE 5
+#define OA_1000BT1_HDD_TDR_STATUS_CABLE_OK 7
+#define OA_1000BT1_HDD_TDR_STATUS_TEST_IN_PROGRESS 8
+#define OA_1000BT1_HDD_TDR_STATUS_TEST_NOT_POSSIBLE 13
+
+/*
+ * OA_1000BT1_HDD_TDR_DISTANCE_MASK:
+ * This mask is used to extract the distance to the first/main fault
+ * detected by the TDR feature. Each bit represents an approximate distance
+ * of 1 meter, ranging from 0 to 31 meters. The exact interpretation of the
+ * bits may vary, but generally:
+ * 000000 = no error
+ * 000001 = error about 0-1m away
+ * 000010 = error between 1-2m away
+ * ...
+ * 011111 = error about 30-31m away
+ * 111111 = resolution not possible / out of distance
+ */
+#define OA_1000BT1_HDD_TDR_DISTANCE_MASK GENMASK(13, 8)
+#define OA_1000BT1_HDD_TDR_DISTANCE_NO_ERROR 0
+#define OA_1000BT1_HDD_TDR_DISTANCE_RESOLUTION_NOT_POSSIBLE 0x3f
+
+int oa_1000bt1_get_ethtool_cable_result_code(u16 reg_value);
+int oa_1000bt1_get_tdr_distance(u16 reg_value);
+
+#endif /* OPEN_ALLIANCE_HELPERS_H */
+
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 785182fa5fe0..4f3e742907cb 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -342,14 +342,19 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
if (mdio_phy_id_is_c45(mii_data->phy_id)) {
prtad = mdio_phy_id_prtad(mii_data->phy_id);
devad = mdio_phy_id_devad(mii_data->phy_id);
- mii_data->val_out = mdiobus_c45_read(
- phydev->mdio.bus, prtad, devad,
- mii_data->reg_num);
+ ret = mdiobus_c45_read(phydev->mdio.bus, prtad, devad,
+ mii_data->reg_num);
+
} else {
- mii_data->val_out = mdiobus_read(
- phydev->mdio.bus, mii_data->phy_id,
- mii_data->reg_num);
+ ret = mdiobus_read(phydev->mdio.bus, mii_data->phy_id,
+ mii_data->reg_num);
}
+
+ if (ret < 0)
+ return ret;
+
+ mii_data->val_out = ret;
+
return 0;
case SIOCSMIIREG:
@@ -1089,7 +1094,10 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
+ if (autoneg == AUTONEG_ENABLE &&
+ (linkmode_empty(advertising) ||
+ !linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported)))
return -EINVAL;
if (autoneg == AUTONEG_DISABLE &&
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 6bb2793de0a9..560e338b307a 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -29,6 +29,7 @@
#include <linux/phy.h>
#include <linux/phylib_stubs.h>
#include <linux/phy_led_triggers.h>
+#include <linux/phy_link_topology.h>
#include <linux/pse-pd/pse.h>
#include <linux/property.h>
#include <linux/rtnetlink.h>
@@ -279,6 +280,15 @@ static struct phy_driver genphy_driver;
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
+static bool phy_drv_wol_enabled(struct phy_device *phydev)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phy_ethtool_get_wol(phydev, &wol);
+
+ return wol.wolopts != 0;
+}
+
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
{
struct device_driver *drv = phydev->mdio.dev.driver;
@@ -288,6 +298,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (!drv || !phydrv->suspend)
return false;
+ /* If the PHY on the mido bus is not attached but has WOL enabled
+ * we cannot suspend the PHY.
+ */
+ if (!netdev && phy_drv_wol_enabled(phydev))
+ return false;
+
/* PHY not attached? May suspend if the PHY has not already been
* suspended as part of a prior call to phy_disconnect() ->
* phy_detach() -> phy_suspend() because the parent netdev might be the
@@ -1370,6 +1386,48 @@ phy_standalone_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(phy_standalone);
/**
+ * phy_sfp_connect_phy - Connect the SFP module's PHY to the upstream PHY
+ * @upstream: pointer to the upstream phy device
+ * @phy: pointer to the SFP module's phy device
+ *
+ * This helper allows keeping track of PHY devices on the link. It adds the
+ * SFP module's phy to the phy namespace of the upstream phy
+ *
+ * Return: 0 on success, otherwise a negative error code.
+ */
+int phy_sfp_connect_phy(void *upstream, struct phy_device *phy)
+{
+ struct phy_device *phydev = upstream;
+ struct net_device *dev = phydev->attached_dev;
+
+ if (dev)
+ return phy_link_topo_add_phy(dev, phy, PHY_UPSTREAM_PHY, phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_sfp_connect_phy);
+
+/**
+ * phy_sfp_disconnect_phy - Disconnect the SFP module's PHY from the upstream PHY
+ * @upstream: pointer to the upstream phy device
+ * @phy: pointer to the SFP module's phy device
+ *
+ * This helper allows keeping track of PHY devices on the link. It removes the
+ * SFP module's phy to the phy namespace of the upstream phy. As the module phy
+ * will be destroyed, re-inserting the same module will add a new phy with a
+ * new index.
+ */
+void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy)
+{
+ struct phy_device *phydev = upstream;
+ struct net_device *dev = phydev->attached_dev;
+
+ if (dev)
+ phy_link_topo_del_phy(dev, phy);
+}
+EXPORT_SYMBOL(phy_sfp_disconnect_phy);
+
+/**
* phy_sfp_attach - attach the SFP bus to the PHY upstream network device
* @upstream: pointer to the phy device
* @bus: sfp bus representing cage being attached
@@ -1511,6 +1569,10 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (phydev->sfp_bus_attached)
dev->sfp_bus = phydev->sfp_bus;
+
+ err = phy_link_topo_add_phy(dev, phydev, PHY_UPSTREAM_MAC, dev);
+ if (err)
+ goto error;
}
/* Some Ethernet drivers try to connect to a PHY device before
@@ -1938,6 +2000,7 @@ void phy_detach(struct phy_device *phydev)
if (dev) {
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
+ phy_link_topo_del_phy(dev, phydev);
}
phydev->phylink = NULL;
@@ -1975,7 +2038,6 @@ EXPORT_SYMBOL(phy_detach);
int phy_suspend(struct phy_device *phydev)
{
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
struct net_device *netdev = phydev->attached_dev;
const struct phy_driver *phydrv = phydev->drv;
int ret;
@@ -1983,8 +2045,7 @@ int phy_suspend(struct phy_device *phydev)
if (phydev->suspended || !phydrv)
return 0;
- phy_ethtool_get_wol(phydev, &wol);
- phydev->wol_enabled = wol.wolopts ||
+ phydev->wol_enabled = phy_drv_wol_enabled(phydev) ||
(netdev && netdev->ethtool->wol_enabled);
/* If the device has WOL enabled, we cannot suspend the PHY */
if (phydev->wol_enabled && !(phydrv->flags & PHY_ALWAYS_CALL_SUSPEND))
@@ -2095,22 +2156,20 @@ EXPORT_SYMBOL(phy_reset_after_clk_enable);
/**
* genphy_config_advert - sanitize and advertise auto-negotiation parameters
* @phydev: target phy_device struct
+ * @advert: auto-negotiation parameters to advertise
*
* Description: Writes MII_ADVERTISE with the appropriate values,
* after sanitizing the values to make sure we only advertise
* what is supported. Returns < 0 on error, 0 if the PHY's advertisement
* hasn't changed, and > 0 if it has changed.
*/
-static int genphy_config_advert(struct phy_device *phydev)
+static int genphy_config_advert(struct phy_device *phydev,
+ const unsigned long *advert)
{
int err, bmsr, changed = 0;
u32 adv;
- /* Only allow advertising what this PHY supports */
- linkmode_and(phydev->advertising, phydev->advertising,
- phydev->supported);
-
- adv = linkmode_adv_to_mii_adv_t(phydev->advertising);
+ adv = linkmode_adv_to_mii_adv_t(advert);
/* Setup standard advertisement */
err = phy_modify_changed(phydev, MII_ADVERTISE,
@@ -2133,7 +2192,7 @@ static int genphy_config_advert(struct phy_device *phydev)
if (!(bmsr & BMSR_ESTATEN))
return changed;
- adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+ adv = linkmode_adv_to_mii_ctrl1000_t(advert);
err = phy_modify_changed(phydev, MII_CTRL1000,
ADVERTISE_1000FULL | ADVERTISE_1000HALF,
@@ -2357,6 +2416,9 @@ EXPORT_SYMBOL(genphy_check_and_restart_aneg);
*/
int __genphy_config_aneg(struct phy_device *phydev, bool changed)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(fixed_advert);
+ const struct phy_setting *set;
+ unsigned long *advert;
int err;
err = genphy_c45_an_config_eee_aneg(phydev);
@@ -2371,10 +2433,25 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
else if (err)
changed = true;
- if (AUTONEG_ENABLE != phydev->autoneg)
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ /* Only allow advertising what this PHY supports */
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+ advert = phydev->advertising;
+ } else if (phydev->speed < SPEED_1000) {
return genphy_setup_forced(phydev);
+ } else {
+ linkmode_zero(fixed_advert);
+
+ set = phy_lookup_setting(phydev->speed, phydev->duplex,
+ phydev->supported, true);
+ if (set)
+ linkmode_set_bit(set->bit, fixed_advert);
+
+ advert = fixed_advert;
+ }
- err = genphy_config_advert(phydev);
+ err = genphy_config_advert(phydev, advert);
if (err < 0) /* error */
return err;
else if (err)
@@ -3330,7 +3407,7 @@ static int of_phy_led(struct phy_device *phydev,
static int of_phy_leds(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
- struct device_node *leds, *led;
+ struct device_node *leds;
int err;
if (!IS_ENABLED(CONFIG_OF_MDIO))
@@ -3343,10 +3420,9 @@ static int of_phy_leds(struct phy_device *phydev)
if (!leds)
return 0;
- for_each_available_child_of_node(leds, led) {
+ for_each_available_child_of_node_scoped(leds, led) {
err = of_phy_led(phydev, led);
if (err) {
- of_node_put(led);
of_node_put(leds);
phy_leds_unregister(phydev);
return err;
diff --git a/drivers/net/phy/phy_link_topology.c b/drivers/net/phy/phy_link_topology.c
new file mode 100644
index 000000000000..4a5d73002a1a
--- /dev/null
+++ b/drivers/net/phy/phy_link_topology.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Infrastructure to handle all PHY devices connected to a given netdev,
+ * either directly or indirectly attached.
+ *
+ * Copyright (c) 2023 Maxime Chevallier<maxime.chevallier@bootlin.com>
+ */
+
+#include <linux/phy_link_topology.h>
+#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include <linux/xarray.h>
+
+static int netdev_alloc_phy_link_topology(struct net_device *dev)
+{
+ struct phy_link_topology *topo;
+
+ topo = kzalloc(sizeof(*topo), GFP_KERNEL);
+ if (!topo)
+ return -ENOMEM;
+
+ xa_init_flags(&topo->phys, XA_FLAGS_ALLOC1);
+ topo->next_phy_index = 1;
+
+ dev->link_topo = topo;
+
+ return 0;
+}
+
+int phy_link_topo_add_phy(struct net_device *dev,
+ struct phy_device *phy,
+ enum phy_upstream upt, void *upstream)
+{
+ struct phy_link_topology *topo = dev->link_topo;
+ struct phy_device_node *pdn;
+ int ret;
+
+ if (!topo) {
+ ret = netdev_alloc_phy_link_topology(dev);
+ if (ret)
+ return ret;
+
+ topo = dev->link_topo;
+ }
+
+ pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
+ if (!pdn)
+ return -ENOMEM;
+
+ pdn->phy = phy;
+ switch (upt) {
+ case PHY_UPSTREAM_MAC:
+ pdn->upstream.netdev = (struct net_device *)upstream;
+ if (phy_on_sfp(phy))
+ pdn->parent_sfp_bus = pdn->upstream.netdev->sfp_bus;
+ break;
+ case PHY_UPSTREAM_PHY:
+ pdn->upstream.phydev = (struct phy_device *)upstream;
+ if (phy_on_sfp(phy))
+ pdn->parent_sfp_bus = pdn->upstream.phydev->sfp_bus;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ pdn->upstream_type = upt;
+
+ /* Attempt to re-use a previously allocated phy_index */
+ if (phy->phyindex)
+ ret = xa_insert(&topo->phys, phy->phyindex, pdn, GFP_KERNEL);
+ else
+ ret = xa_alloc_cyclic(&topo->phys, &phy->phyindex, pdn,
+ xa_limit_32b, &topo->next_phy_index,
+ GFP_KERNEL);
+
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(pdn);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_link_topo_add_phy);
+
+void phy_link_topo_del_phy(struct net_device *dev,
+ struct phy_device *phy)
+{
+ struct phy_link_topology *topo = dev->link_topo;
+ struct phy_device_node *pdn;
+
+ if (!topo)
+ return;
+
+ pdn = xa_erase(&topo->phys, phy->phyindex);
+
+ /* We delete the PHY from the topology, however we don't re-set the
+ * phy->phyindex field. If the PHY isn't gone, we can re-assign it the
+ * same index next time it's added back to the topology
+ */
+
+ kfree(pdn);
+}
+EXPORT_SYMBOL_GPL(phy_link_topo_del_phy);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 51c526d227fa..4309317de3d1 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1636,6 +1636,48 @@ static int phylink_register_sfp(struct phylink *pl,
}
/**
+ * phylink_set_fixed_link() - set the fixed link
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @state: a pointer to a struct phylink_link_state.
+ *
+ * This function is used when the link parameters are known and do not change,
+ * making it suitable for certain types of network connections.
+ *
+ * Returns: zero on success or negative error code.
+ */
+int phylink_set_fixed_link(struct phylink *pl,
+ const struct phylink_link_state *state)
+{
+ const struct phy_setting *s;
+ unsigned long *adv;
+
+ if (pl->cfg_link_an_mode != MLO_AN_PHY || !state ||
+ !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
+ return -EINVAL;
+
+ s = phy_lookup_setting(state->speed, state->duplex,
+ pl->supported, true);
+ if (!s)
+ return -EINVAL;
+
+ adv = pl->link_config.advertising;
+ linkmode_zero(adv);
+ linkmode_set_bit(s->bit, adv);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv);
+
+ pl->link_config.speed = state->speed;
+ pl->link_config.duplex = state->duplex;
+ pl->link_config.link = 1;
+ pl->link_config.an_complete = 1;
+
+ pl->cfg_link_an_mode = MLO_AN_FIXED;
+ pl->cur_link_an_mode = pl->cfg_link_an_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phylink_set_fixed_link);
+
+/**
* phylink_create() - create a phylink instance
* @config: a pointer to the target &struct phylink_config
* @fwnode: a pointer to a &struct fwnode_handle describing the network
@@ -3423,7 +3465,8 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
return ret;
}
-static void phylink_sfp_disconnect_phy(void *upstream)
+static void phylink_sfp_disconnect_phy(void *upstream,
+ struct phy_device *phydev)
{
phylink_disconnect_phy(upstream);
}
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index c8f83e5f78ab..105602581a03 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -770,6 +770,8 @@ static const struct sfp_upstream_ops at8031_sfp_ops = {
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
.module_insert = at8031_sfp_insert,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
};
static int at8031_parse_dt(struct phy_device *phydev)
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 672c6929119a..bd8a51ec0ecd 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -699,6 +699,8 @@ static const struct sfp_upstream_ops qca807x_sfp_ops = {
.detach = phy_sfp_detach,
.module_insert = qca807x_sfp_insert,
.module_remove = qca807x_sfp_remove,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
};
static int qca807x_probe(struct phy_device *phydev)
@@ -733,16 +735,6 @@ static int qca807x_probe(struct phy_device *phydev)
"qcom,dac-disable-bias-current-tweak");
#if IS_ENABLED(CONFIG_GPIOLIB)
- /* Make sure we don't have mixed leds node and gpio-controller
- * to prevent registering leds and having gpio-controller usage
- * conflicting with them.
- */
- if (of_find_property(node, "leds", NULL) &&
- of_find_property(node, "gpio-controller", NULL)) {
- phydev_err(phydev, "Invalid property detected. LEDs and gpio-controller are mutually exclusive.");
- return -EINVAL;
- }
-
/* Do not register a GPIO controller unless flagged for it */
if (of_property_read_bool(node, "gpio-controller")) {
ret = qca807x_gpio(phydev);
diff --git a/drivers/net/phy/qcom/qca83xx.c b/drivers/net/phy/qcom/qca83xx.c
index 5d083ef0250e..a05d0df6fa16 100644
--- a/drivers/net/phy/qcom/qca83xx.c
+++ b/drivers/net/phy/qcom/qca83xx.c
@@ -15,7 +15,6 @@
#define QCA8327_A_PHY_ID 0x004dd033
#define QCA8327_B_PHY_ID 0x004dd034
#define QCA8337_PHY_ID 0x004dd036
-#define QCA8K_PHY_ID_MASK 0xffffffff
#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
@@ -216,8 +215,7 @@ static int qca8327_suspend(struct phy_device *phydev)
static struct phy_driver qca83xx_driver[] = {
{
/* QCA8337 */
- .phy_id = QCA8337_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
+ PHY_ID_MATCH_EXACT(QCA8337_PHY_ID),
.name = "Qualcomm Atheros 8337 internal PHY",
/* PHY_GBIT_FEATURES */
.probe = qca83xx_probe,
@@ -231,8 +229,7 @@ static struct phy_driver qca83xx_driver[] = {
.resume = qca83xx_resume,
}, {
/* QCA8327-A from switch QCA8327-AL1A */
- .phy_id = QCA8327_A_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
+ PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID),
.name = "Qualcomm Atheros 8327-A internal PHY",
/* PHY_GBIT_FEATURES */
.link_change_notify = qca83xx_link_change_notify,
@@ -247,8 +244,7 @@ static struct phy_driver qca83xx_driver[] = {
.resume = qca83xx_resume,
}, {
/* QCA8327-B from switch QCA8327-BL1A */
- .phy_id = QCA8327_B_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
+ PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID),
.name = "Qualcomm Atheros 8327-B internal PHY",
/* PHY_GBIT_FEATURES */
.link_change_notify = qca83xx_link_change_notify,
diff --git a/drivers/net/phy/qt2025.rs b/drivers/net/phy/qt2025.rs
new file mode 100644
index 000000000000..28d8981f410b
--- /dev/null
+++ b/drivers/net/phy/qt2025.rs
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) Tehuti Networks Ltd.
+// Copyright (C) 2024 FUJITA Tomonori <fujita.tomonori@gmail.com>
+
+//! Applied Micro Circuits Corporation QT2025 PHY driver
+//!
+//! This driver is based on the vendor driver `QT2025_phy.c`. This source
+//! and firmware can be downloaded on the EN-9320SFP+ support site.
+//!
+//! The QT2025 PHY integrates an Intel 8051 micro-controller.
+
+use kernel::c_str;
+use kernel::error::code;
+use kernel::firmware::Firmware;
+use kernel::net::phy::{
+ self,
+ reg::{Mmd, C45},
+ DeviceId, Driver,
+};
+use kernel::prelude::*;
+use kernel::sizes::{SZ_16K, SZ_8K};
+
+kernel::module_phy_driver! {
+ drivers: [PhyQT2025],
+ device_table: [
+ DeviceId::new_with_driver::<PhyQT2025>(),
+ ],
+ name: "qt2025_phy",
+ author: "FUJITA Tomonori <fujita.tomonori@gmail.com>",
+ description: "AMCC QT2025 PHY driver",
+ license: "GPL",
+ firmware: ["qt2025-2.0.3.3.fw"],
+}
+
+struct PhyQT2025;
+
+#[vtable]
+impl Driver for PhyQT2025 {
+ const NAME: &'static CStr = c_str!("QT2025 10Gpbs SFP+");
+ const PHY_DEVICE_ID: phy::DeviceId = phy::DeviceId::new_with_exact_mask(0x0043a400);
+
+ fn probe(dev: &mut phy::Device) -> Result<()> {
+ // Check the hardware revision code.
+ // Only 0x3b works with this driver and firmware.
+ let hw_rev = dev.read(C45::new(Mmd::PMAPMD, 0xd001))?;
+ if (hw_rev >> 8) != 0xb3 {
+ return Err(code::ENODEV);
+ }
+
+ // `MICRO_RESETN`: hold the micro-controller in reset while configuring.
+ dev.write(C45::new(Mmd::PMAPMD, 0xc300), 0x0000)?;
+ // `SREFCLK_FREQ`: configure clock frequency of the micro-controller.
+ dev.write(C45::new(Mmd::PMAPMD, 0xc302), 0x0004)?;
+ // Non loopback mode.
+ dev.write(C45::new(Mmd::PMAPMD, 0xc319), 0x0038)?;
+ // `CUS_LAN_WAN_CONFIG`: select between LAN and WAN (WIS) mode.
+ dev.write(C45::new(Mmd::PMAPMD, 0xc31a), 0x0098)?;
+ // The following writes use standardized registers (3.38 through
+ // 3.41 5/10/25GBASE-R PCS test pattern seed B) for something else.
+ // We don't know what.
+ dev.write(C45::new(Mmd::PCS, 0x0026), 0x0e00)?;
+ dev.write(C45::new(Mmd::PCS, 0x0027), 0x0893)?;
+ dev.write(C45::new(Mmd::PCS, 0x0028), 0xa528)?;
+ dev.write(C45::new(Mmd::PCS, 0x0029), 0x0003)?;
+ // Configure transmit and recovered clock.
+ dev.write(C45::new(Mmd::PMAPMD, 0xa30a), 0x06e1)?;
+ // `MICRO_RESETN`: release the micro-controller from the reset state.
+ dev.write(C45::new(Mmd::PMAPMD, 0xc300), 0x0002)?;
+ // The micro-controller will start running from the boot ROM.
+ dev.write(C45::new(Mmd::PCS, 0xe854), 0x00c0)?;
+
+ let fw = Firmware::request(c_str!("qt2025-2.0.3.3.fw"), dev.as_ref())?;
+ if fw.data().len() > SZ_16K + SZ_8K {
+ return Err(code::EFBIG);
+ }
+
+ // The 24kB of program memory space is accessible by MDIO.
+ // The first 16kB of memory is located in the address range 3.8000h - 3.BFFFh.
+ // The next 8kB of memory is located at 4.8000h - 4.9FFFh.
+ let mut dst_offset = 0;
+ let mut dst_mmd = Mmd::PCS;
+ for (src_idx, val) in fw.data().iter().enumerate() {
+ if src_idx == SZ_16K {
+ // Start writing to the next register with no offset
+ dst_offset = 0;
+ dst_mmd = Mmd::PHYXS;
+ }
+
+ dev.write(C45::new(dst_mmd, 0x8000 + dst_offset), (*val).into())?;
+
+ dst_offset += 1;
+ }
+ // The micro-controller will start running from SRAM.
+ dev.write(C45::new(Mmd::PCS, 0xe854), 0x0040)?;
+
+ // TODO: sleep here until the hw becomes ready.
+ Ok(())
+ }
+
+ fn read_status(dev: &mut phy::Device) -> Result<u16> {
+ dev.genphy_read_status::<C45>()
+ }
+}
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 2f44fc51848f..f13c00b5b449 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -487,7 +487,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
bus->socket_ops->stop(bus->sfp);
bus->socket_ops->detach(bus->sfp);
if (bus->phydev && ops && ops->disconnect_phy)
- ops->disconnect_phy(bus->upstream);
+ ops->disconnect_phy(bus->upstream, bus->phydev);
}
bus->registered = false;
}
@@ -722,6 +722,28 @@ void sfp_bus_del_upstream(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_bus_del_upstream);
+/**
+ * sfp_get_name() - Get the SFP device name
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Gets the SFP device's name, if @bus has a registered socket. Callers must
+ * hold RTNL, and the returned name is only valid until RTNL is released.
+ *
+ * Returns:
+ * - The name of the SFP device registered with sfp_register_socket()
+ * - %NULL if no device was registered on @bus
+ */
+const char *sfp_get_name(struct sfp_bus *bus)
+{
+ ASSERT_RTNL();
+
+ if (bus->sfp_dev)
+ return dev_name(bus->sfp_dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sfp_get_name);
+
/* Socket driver entry points */
int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev)
{
@@ -743,7 +765,7 @@ void sfp_remove_phy(struct sfp_bus *bus)
const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
if (ops && ops->disconnect_phy)
- ops->disconnect_phy(bus->upstream);
+ ops->disconnect_phy(bus->upstream, bus->phydev);
bus->phydev = NULL;
}
EXPORT_SYMBOL_GPL(sfp_remove_phy);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 3b5fcaf0dd36..2377179de017 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -10,8 +10,10 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/bitfield.h>
/* Vitesse Extended Page Magic Register(s) */
+#define MII_VSC73XX_EXT_PAGE_1E 0x01
#define MII_VSC82X4_EXT_PAGE_16E 0x10
#define MII_VSC82X4_EXT_PAGE_17E 0x11
#define MII_VSC82X4_EXT_PAGE_18E 0x12
@@ -60,6 +62,28 @@
/* Vitesse Extended Page Access Register */
#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f
+/* Vitesse VSC73XX Extended Control Register */
+#define MII_VSC73XX_PHY_CTRL_EXT3 0x14
+
+#define MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN BIT(4)
+#define MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT GENMASK(3, 2)
+#define MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_STA BIT(1)
+#define MII_VSC73XX_DOWNSHIFT_MAX 5
+#define MII_VSC73XX_DOWNSHIFT_INVAL 1
+
+/* VSC73XX PHY_BYPASS_CTRL register*/
+#define MII_VSC73XX_PHY_BYPASS_CTRL MII_DCOUNTER
+#define MII_VSC73XX_PBC_TX_DIS BIT(15)
+#define MII_VSC73XX_PBC_FOR_SPD_AUTO_MDIX_DIS BIT(7)
+#define MII_VSC73XX_PBC_PAIR_SWAP_DIS BIT(5)
+#define MII_VSC73XX_PBC_POL_INV_DIS BIT(4)
+#define MII_VSC73XX_PBC_PARALLEL_DET_DIS BIT(3)
+#define MII_VSC73XX_PBC_AUTO_NP_EXCHANGE_DIS BIT(1)
+
+/* VSC73XX PHY_AUX_CTRL_STAT register */
+#define MII_VSC73XX_PHY_AUX_CTRL_STAT MII_NCONFIG
+#define MII_VSC73XX_PACS_NO_MDI_X_IND BIT(13)
+
/* Vitesse VSC8601 Extended PHY Control Register 1 */
#define MII_VSC8601_EPHY_CTL 0x17
#define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8)
@@ -128,6 +152,74 @@ static int vsc73xx_write_page(struct phy_device *phydev, int page)
return __phy_write(phydev, VSC73XX_EXT_PAGE_ACCESS, page);
}
+static int vsc73xx_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, enable, cnt;
+
+ val = phy_read_paged(phydev, MII_VSC73XX_EXT_PAGE_1E,
+ MII_VSC73XX_PHY_CTRL_EXT3);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT, val) + 2;
+
+ *data = enable ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int vsc73xx_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ u16 mask, val;
+ int ret;
+
+ if (cnt > MII_VSC73XX_DOWNSHIFT_MAX)
+ return -E2BIG;
+ else if (cnt == MII_VSC73XX_DOWNSHIFT_INVAL)
+ return -EINVAL;
+
+ mask = MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN;
+
+ if (!cnt) {
+ val = 0;
+ } else {
+ mask |= MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT;
+ val = MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN |
+ FIELD_PREP(MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT,
+ cnt - 2);
+ }
+
+ ret = phy_modify_paged(phydev, MII_VSC73XX_EXT_PAGE_1E,
+ MII_VSC73XX_PHY_CTRL_EXT3, mask, val);
+ if (ret < 0)
+ return ret;
+
+ return genphy_soft_reset(phydev);
+}
+
+static int vsc73xx_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return vsc73xx_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int vsc73xx_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return vsc73xx_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static void vsc73xx_config_init(struct phy_device *phydev)
{
/* Receiver init */
@@ -137,6 +229,12 @@ static void vsc73xx_config_init(struct phy_device *phydev)
/* Config LEDs 0x61 */
phy_modify(phydev, MII_TPISTATUS, 0xff00, 0x0061);
+
+ /* Enable downshift by default */
+ vsc73xx_set_downshift(phydev, MII_VSC73XX_DOWNSHIFT_MAX);
+
+ /* Set Auto MDI-X by default */
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
}
static int vsc738x_config_init(struct phy_device *phydev)
@@ -237,6 +335,75 @@ static int vsc739x_config_init(struct phy_device *phydev)
return 0;
}
+static int vsc73xx_mdix_set(struct phy_device *phydev, u8 mdix)
+{
+ int ret;
+ u16 val;
+
+ val = phy_read(phydev, MII_VSC73XX_PHY_BYPASS_CTRL);
+
+ switch (mdix) {
+ case ETH_TP_MDI:
+ val |= MII_VSC73XX_PBC_FOR_SPD_AUTO_MDIX_DIS |
+ MII_VSC73XX_PBC_PAIR_SWAP_DIS |
+ MII_VSC73XX_PBC_POL_INV_DIS;
+ break;
+ case ETH_TP_MDI_X:
+ /* When MDI-X auto configuration is disabled, is possible
+ * to force only MDI mode. Let's use autoconfig for forced
+ * MDIX mode.
+ */
+ case ETH_TP_MDI_AUTO:
+ val &= ~(MII_VSC73XX_PBC_FOR_SPD_AUTO_MDIX_DIS |
+ MII_VSC73XX_PBC_PAIR_SWAP_DIS |
+ MII_VSC73XX_PBC_POL_INV_DIS);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = phy_write(phydev, MII_VSC73XX_PHY_BYPASS_CTRL, val);
+ if (ret)
+ return ret;
+
+ return genphy_restart_aneg(phydev);
+}
+
+static int vsc73xx_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = vsc73xx_mdix_set(phydev, phydev->mdix_ctrl);
+ if (ret)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
+static int vsc73xx_mdix_get(struct phy_device *phydev, u8 *mdix)
+{
+ u16 reg_val;
+
+ reg_val = phy_read(phydev, MII_VSC73XX_PHY_AUX_CTRL_STAT);
+ if (reg_val & MII_VSC73XX_PACS_NO_MDI_X_IND)
+ *mdix = ETH_TP_MDI;
+ else
+ *mdix = ETH_TP_MDI_X;
+
+ return 0;
+}
+
+static int vsc73xx_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = vsc73xx_mdix_get(phydev, &phydev->mdix);
+ if (ret < 0)
+ return ret;
+
+ return genphy_read_status(phydev);
+}
+
/* This adds a skew for both TX and RX clocks, so the skew should only be
* applied to "rgmii-id" interfaces. It may not work as expected
* on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces.
@@ -434,32 +601,48 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_status = vsc73xx_read_status,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
+ .get_tunable = vsc73xx_get_tunable,
+ .set_tunable = vsc73xx_set_tunable,
}, {
.phy_id = PHY_ID_VSC7388,
.name = "Vitesse VSC7388",
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_status = vsc73xx_read_status,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
+ .get_tunable = vsc73xx_get_tunable,
+ .set_tunable = vsc73xx_set_tunable,
}, {
.phy_id = PHY_ID_VSC7395,
.name = "Vitesse VSC7395",
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_status = vsc73xx_read_status,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
+ .get_tunable = vsc73xx_get_tunable,
+ .set_tunable = vsc73xx_set_tunable,
}, {
.phy_id = PHY_ID_VSC7398,
.name = "Vitesse VSC7398",
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_status = vsc73xx_read_status,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
+ .get_tunable = vsc73xx_get_tunable,
+ .set_tunable = vsc73xx_set_tunable,
}, {
.phy_id = PHY_ID_VSC8662,
.name = "Vitesse VSC8662",
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index eb9acfcaeb09..4b2971e2bf48 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1631,7 +1631,7 @@ static void ppp_setup(struct net_device *dev)
dev->netdev_ops = &ppp_netdev_ops;
SET_NETDEV_DEVTYPE(dev, &ppp_type);
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
dev->hard_header_len = PPP_HDRLEN;
dev->mtu = PPP_MRU;
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
index 2ea75686a319..5c4e88be46ee 100644
--- a/drivers/net/pse-pd/tps23881.c
+++ b/drivers/net/pse-pd/tps23881.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -737,6 +738,7 @@ static int tps23881_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tps23881_priv *priv;
+ struct gpio_desc *reset;
int ret;
u8 val;
@@ -749,6 +751,25 @@ static int tps23881_i2c_probe(struct i2c_client *client)
if (!priv)
return -ENOMEM;
+ reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset))
+ return dev_err_probe(&client->dev, PTR_ERR(reset), "Failed to get reset GPIO\n");
+
+ if (reset) {
+ /* TPS23880 datasheet (Rev G) indicates minimum reset pulse is 5us */
+ usleep_range(5, 10);
+ gpiod_set_value_cansleep(reset, 0); /* De-assert reset */
+
+ /* TPS23880 datasheet indicates the minimum time after power on reset
+ * should be 20ms, but the document describing how to load SRAM ("How
+ * to Load TPS2388x SRAM and Parity Code over I2C" (Rev E))
+ * indicates we should delay that programming by at least 50ms. So
+ * we'll wait the entire 50ms here to ensure we're safe to go to the
+ * SRAM loading proceedure.
+ */
+ msleep(50);
+ }
+
ret = i2c_smbus_read_byte_data(client, TPS23881_REG_DEVID);
if (ret < 0)
return ret;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 4eececc94513..318a0ef1af50 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -515,7 +515,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
/* MTU range: 68 - 4082 */
ndev->min_mtu = ETH_MIN_MTU;
ndev->max_mtu = RIONET_MAX_MTU;
- ndev->features = NETIF_F_LLTX;
+ ndev->lltx = true;
SET_NETDEV_DEV(ndev, &mport->dev);
ndev->ethtool_ops = &rionet_ethtool_ops;
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index d591e33268e5..55aa8d0c8e1f 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -893,7 +893,7 @@ static const struct mii_phy_ops bcm5201_phy_ops = {
.read_link = genmii_read_link,
};
-static struct mii_phy_def bcm5201_phy_def = {
+static const struct mii_phy_def bcm5201_phy_def = {
.phy_id = 0x00406210,
.phy_id_mask = 0xfffffff0,
.name = "BCM5201",
@@ -912,7 +912,7 @@ static const struct mii_phy_ops bcm5221_phy_ops = {
.read_link = genmii_read_link,
};
-static struct mii_phy_def bcm5221_phy_def = {
+static const struct mii_phy_def bcm5221_phy_def = {
.phy_id = 0x004061e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5221",
@@ -930,7 +930,8 @@ static const struct mii_phy_ops bcm5241_phy_ops = {
.poll_link = genmii_poll_link,
.read_link = genmii_read_link,
};
-static struct mii_phy_def bcm5241_phy_def = {
+
+static const struct mii_phy_def bcm5241_phy_def = {
.phy_id = 0x0143bc30,
.phy_id_mask = 0xfffffff0,
.name = "BCM5241",
@@ -949,7 +950,7 @@ static const struct mii_phy_ops bcm5400_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5400_phy_def = {
+static const struct mii_phy_def bcm5400_phy_def = {
.phy_id = 0x00206040,
.phy_id_mask = 0xfffffff0,
.name = "BCM5400",
@@ -968,7 +969,7 @@ static const struct mii_phy_ops bcm5401_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5401_phy_def = {
+static const struct mii_phy_def bcm5401_phy_def = {
.phy_id = 0x00206050,
.phy_id_mask = 0xfffffff0,
.name = "BCM5401",
@@ -987,7 +988,7 @@ static const struct mii_phy_ops bcm5411_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5411_phy_def = {
+static const struct mii_phy_def bcm5411_phy_def = {
.phy_id = 0x00206070,
.phy_id_mask = 0xfffffff0,
.name = "BCM5411",
@@ -1007,7 +1008,7 @@ static const struct mii_phy_ops bcm5421_phy_ops = {
.enable_fiber = bcm5421_enable_fiber,
};
-static struct mii_phy_def bcm5421_phy_def = {
+static const struct mii_phy_def bcm5421_phy_def = {
.phy_id = 0x002060e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5421",
@@ -1026,7 +1027,7 @@ static const struct mii_phy_ops bcm5421k2_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5421k2_phy_def = {
+static const struct mii_phy_def bcm5421k2_phy_def = {
.phy_id = 0x002062e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5421-K2",
@@ -1045,7 +1046,7 @@ static const struct mii_phy_ops bcm5461_phy_ops = {
.enable_fiber = bcm5461_enable_fiber,
};
-static struct mii_phy_def bcm5461_phy_def = {
+static const struct mii_phy_def bcm5461_phy_def = {
.phy_id = 0x002060c0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5461",
@@ -1064,7 +1065,7 @@ static const struct mii_phy_ops bcm5462V_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5462V_phy_def = {
+static const struct mii_phy_def bcm5462V_phy_def = {
.phy_id = 0x002060d0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5462-Vesta",
@@ -1094,7 +1095,7 @@ static const struct mii_phy_ops marvell88e1111_phy_ops = {
/* two revs in darwin for the 88e1101 ... I could use a datasheet
* to get the proper names...
*/
-static struct mii_phy_def marvell88e1101v1_phy_def = {
+static const struct mii_phy_def marvell88e1101v1_phy_def = {
.phy_id = 0x01410c20,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1101v1",
@@ -1102,7 +1103,8 @@ static struct mii_phy_def marvell88e1101v1_phy_def = {
.magic_aneg = 1,
.ops = &marvell88e1101_phy_ops
};
-static struct mii_phy_def marvell88e1101v2_phy_def = {
+
+static const struct mii_phy_def marvell88e1101v2_phy_def = {
.phy_id = 0x01410c60,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1101v2",
@@ -1110,7 +1112,8 @@ static struct mii_phy_def marvell88e1101v2_phy_def = {
.magic_aneg = 1,
.ops = &marvell88e1101_phy_ops
};
-static struct mii_phy_def marvell88e1111_phy_def = {
+
+static const struct mii_phy_def marvell88e1111_phy_def = {
.phy_id = 0x01410cc0,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1111",
@@ -1127,7 +1130,7 @@ static const struct mii_phy_ops generic_phy_ops = {
.read_link = genmii_read_link
};
-static struct mii_phy_def genmii_phy_def = {
+static const struct mii_phy_def genmii_phy_def = {
.phy_id = 0x00000000,
.phy_id_mask = 0x00000000,
.name = "Generic MII",
@@ -1136,7 +1139,7 @@ static struct mii_phy_def genmii_phy_def = {
.ops = &generic_phy_ops
};
-static struct mii_phy_def* mii_phy_table[] = {
+static const struct mii_phy_def *mii_phy_table[] = {
&bcm5201_phy_def,
&bcm5221_phy_def,
&bcm5241_phy_def,
@@ -1156,9 +1159,9 @@ static struct mii_phy_def* mii_phy_table[] = {
int sungem_phy_probe(struct mii_phy *phy, int mii_id)
{
+ const struct mii_phy_def *def;
int rc;
u32 id;
- struct mii_phy_def* def;
int i;
/* We do not reset the mii_phy structure as the driver
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index ab1935a4aa2c..18191d5a8bd4 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -2189,12 +2189,12 @@ static void team_setup(struct net_device *dev)
* Let this up to underlay drivers.
*/
dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
-
- dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_GRO;
+ dev->lltx = true;
/* Don't allow team devices to change network namespaces. */
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netns_local = true;
+
+ dev->features |= NETIF_F_GRO;
dev->hw_features = TEAM_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 1d06c560c5e6..5f77faef0ff1 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -990,10 +990,11 @@ static int tun_net_init(struct net_device *dev)
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
- dev->features = dev->hw_features | NETIF_F_LLTX;
+ dev->features = dev->hw_features;
dev->vlan_features = dev->features &
~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
+ dev->lltx = true;
tun->flags = (tun->flags & ~TUN_FEATURES) |
(ifr->ifr_flags & TUN_FEATURES);
@@ -1129,7 +1130,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
- /* NETIF_F_LLTX requires to do our own update of trans_start */
+ /* dev->lltx requires to do our own update of trans_start */
queue = netdev_get_tx_queue(dev, txq);
txq_trans_cond_update(queue);
@@ -3451,6 +3452,12 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
struct tun_file *tfile = file->private_data;
int ret;
+ if (on) {
+ ret = file_f_owner_allocate(file);
+ if (ret)
+ goto out;
+ }
+
if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
goto out;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 6d61052353f0..a6469235d904 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -418,7 +418,8 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
event->wValue ? "on" : "off");
- usbnet_link_change(dev, !!event->wValue, 0);
+ if (netif_carrier_ok(dev->net) != !!event->wValue)
+ usbnet_link_change(dev, !!event->wValue, 0);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n",
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 426e68a95067..18148e068aa0 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1696,11 +1696,12 @@ static void veth_setup(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->priv_flags |= IFF_NO_QUEUE;
dev->priv_flags |= IFF_PHONY_HEADROOM;
+ dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ dev->lltx = true;
dev->netdev_ops = &veth_netdev_ops;
dev->xdp_metadata_ops = &veth_xdp_metadata_ops;
dev->ethtool_ops = &veth_ethtool_ops;
- dev->features |= NETIF_F_LLTX;
dev->features |= VETH_FEATURES;
dev->vlan_features = dev->features &
~(NETIF_F_HW_VLAN_CTAG_TX |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c6af18948092..6f4781ec2b36 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -356,6 +356,9 @@ struct receive_queue {
struct xdp_rxq_info xsk_rxq_info;
struct xdp_buff **xsk_buffs;
+
+ /* Do dma by self */
+ bool do_dma;
};
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -885,7 +888,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
void *buf;
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
- if (buf)
+ if (buf && rq->do_dma)
virtnet_rq_unmap(rq, buf, *len);
return buf;
@@ -898,6 +901,11 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
u32 offset;
void *head;
+ if (!rq->do_dma) {
+ sg_init_one(rq->sg, buf, len);
+ return;
+ }
+
head = page_address(rq->alloc_frag.page);
offset = buf - head;
@@ -923,42 +931,44 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
head = page_address(alloc_frag->page);
- dma = head;
+ if (rq->do_dma) {
+ dma = head;
+
+ /* new pages */
+ if (!alloc_frag->offset) {
+ if (rq->last_dma) {
+ /* Now, the new page is allocated, the last dma
+ * will not be used. So the dma can be unmapped
+ * if the ref is 0.
+ */
+ virtnet_rq_unmap(rq, rq->last_dma, 0);
+ rq->last_dma = NULL;
+ }
- /* new pages */
- if (!alloc_frag->offset) {
- if (rq->last_dma) {
- /* Now, the new page is allocated, the last dma
- * will not be used. So the dma can be unmapped
- * if the ref is 0.
- */
- virtnet_rq_unmap(rq, rq->last_dma, 0);
- rq->last_dma = NULL;
- }
+ dma->len = alloc_frag->size - sizeof(*dma);
- dma->len = alloc_frag->size - sizeof(*dma);
+ addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
+ dma->len, DMA_FROM_DEVICE, 0);
+ if (virtqueue_dma_mapping_error(rq->vq, addr))
+ return NULL;
- addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
- dma->len, DMA_FROM_DEVICE, 0);
- if (virtqueue_dma_mapping_error(rq->vq, addr))
- return NULL;
+ dma->addr = addr;
+ dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
- dma->addr = addr;
- dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
+ /* Add a reference to dma to prevent the entire dma from
+ * being released during error handling. This reference
+ * will be freed after the pages are no longer used.
+ */
+ get_page(alloc_frag->page);
+ dma->ref = 1;
+ alloc_frag->offset = sizeof(*dma);
- /* Add a reference to dma to prevent the entire dma from
- * being released during error handling. This reference
- * will be freed after the pages are no longer used.
- */
- get_page(alloc_frag->page);
- dma->ref = 1;
- alloc_frag->offset = sizeof(*dma);
+ rq->last_dma = dma;
+ }
- rq->last_dma = dma;
+ ++dma->ref;
}
- ++dma->ref;
-
buf = head + alloc_frag->offset;
get_page(alloc_frag->page);
@@ -967,19 +977,6 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
return buf;
}
-static void virtnet_rq_set_premapped(struct virtnet_info *vi)
-{
- int i;
-
- /* disable for big mode */
- if (!vi->mergeable_rx_bufs && vi->big_packets)
- return;
-
- for (i = 0; i < vi->max_queue_pairs; i++)
- /* error should never happen */
- BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
-}
-
static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
{
struct virtnet_info *vi = vq->vdev->priv;
@@ -993,7 +990,7 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
return;
}
- if (!vi->big_packets || vi->mergeable_rx_bufs)
+ if (rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_free_buf(vi, rq, buf);
@@ -2430,7 +2427,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- virtnet_rq_unmap(rq, buf, 0);
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -2544,7 +2542,8 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- virtnet_rq_unmap(rq, buf, 0);
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -2701,7 +2700,7 @@ static int virtnet_receive_packets(struct virtnet_info *vi,
}
} else {
while (packets < budget &&
- (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
+ (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats);
packets++;
}
@@ -2885,6 +2884,25 @@ static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim)
net_dim_work_cancel(dim);
}
+static void virtnet_update_settings(struct virtnet_info *vi)
+{
+ u32 speed;
+ u8 duplex;
+
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
+ return;
+
+ virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
+
+ if (ethtool_validate_speed(speed))
+ vi->speed = speed;
+
+ virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
+
+ if (ethtool_validate_duplex(duplex))
+ vi->duplex = duplex;
+}
+
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -2903,6 +2921,15 @@ static int virtnet_open(struct net_device *dev)
goto err_enable_qp;
}
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
+ if (vi->status & VIRTIO_NET_S_LINK_UP)
+ netif_carrier_on(vi->dev);
+ virtio_config_driver_enable(vi->vdev);
+ } else {
+ vi->status = VIRTIO_NET_S_LINK_UP;
+ netif_carrier_on(dev);
+ }
+
return 0;
err_enable_qp:
@@ -3381,12 +3408,22 @@ static int virtnet_close(struct net_device *dev)
disable_delayed_refill(vi);
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
+ /* Prevent the config change callback from changing carrier
+ * after close
+ */
+ virtio_config_driver_disable(vi->vdev);
+ /* Stop getting status/speed updates: we don't care until next
+ * open
+ */
+ cancel_work_sync(&vi->config_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_disable_queue_pair(vi, i);
virtnet_cancel_dim(vi, &vi->rq[i].dim);
}
+ netif_carrier_off(dev);
+
return 0;
}
@@ -5095,25 +5132,6 @@ static void virtnet_init_settings(struct net_device *dev)
vi->duplex = DUPLEX_UNKNOWN;
}
-static void virtnet_update_settings(struct virtnet_info *vi)
-{
- u32 speed;
- u8 duplex;
-
- if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
- return;
-
- virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
-
- if (ethtool_validate_speed(speed))
- vi->speed = speed;
-
- virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
-
- if (ethtool_validate_duplex(duplex))
- vi->duplex = duplex;
-}
-
static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
{
return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
@@ -5892,7 +5910,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page) {
- if (vi->rq[i].last_dma)
+ if (vi->rq[i].do_dma && vi->rq[i].last_dma)
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
}
@@ -6090,8 +6108,6 @@ static int init_vqs(struct virtnet_info *vi)
if (ret)
goto err_free;
- virtnet_rq_set_premapped(vi);
-
cpus_read_lock();
virtnet_set_affinity(vi);
cpus_read_unlock();
@@ -6524,6 +6540,9 @@ static int virtnet_probe(struct virtio_device *vdev)
goto free_failover;
}
+ /* Disable config change notification until ndo_open. */
+ virtio_config_driver_disable(vi->vdev);
+
virtio_device_ready(vdev);
virtnet_set_queues(vi, vi->curr_queue_pairs);
@@ -6573,19 +6592,11 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->device_stats_cap = le64_to_cpu(v);
}
- rtnl_unlock();
-
- err = virtnet_cpu_notif_add(vi);
- if (err) {
- pr_debug("virtio_net: registering cpu notifier failed\n");
- goto free_unregister_netdev;
- }
-
/* Assume link up if device can't report link status,
otherwise get link status from config. */
netif_carrier_off(dev);
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
- schedule_work(&vi->config_work);
+ virtnet_config_changed_work(&vi->config_work);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
virtnet_update_settings(vi);
@@ -6597,6 +6608,14 @@ static int virtnet_probe(struct virtio_device *vdev)
set_bit(guest_offloads[i], &vi->guest_offloads);
vi->guest_offloads_capable = vi->guest_offloads;
+ rtnl_unlock();
+
+ err = virtnet_cpu_notif_add(vi);
+ if (err) {
+ pr_debug("virtio_net: registering cpu notifier failed\n");
+ goto free_unregister_netdev;
+ }
+
pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
dev->name, max_queue_pairs);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 040f0bb36c0e..4d8ccaf9a2b4 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -37,6 +37,7 @@
#include <net/sch_generic.h>
#include <net/netns/generic.h>
#include <net/netfilter/nf_conntrack.h>
+#include <net/inet_dscp.h>
#define DRV_NAME "vrf"
#define DRV_VERSION "1.1"
@@ -520,7 +521,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
/* needed to match OIF rule */
fl4.flowi4_l3mdev = vrf_dev->ifindex;
fl4.flowi4_iif = LOOPBACK_IFINDEX;
- fl4.flowi4_tos = RT_TOS(ip4h->tos);
+ fl4.flowi4_tos = ip4h->tos & INET_DSCP_MASK;
fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
fl4.flowi4_proto = ip4h->protocol;
fl4.daddr = ip4h->daddr;
@@ -1634,10 +1635,10 @@ static void vrf_setup(struct net_device *dev)
eth_hw_addr_random(dev);
/* don't acquire vrf device's netif_tx_lock when transmitting */
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
/* don't allow vrf devices to change network namespaces. */
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netns_local = true;
/* does not make sense for a VLAN to be added to a vrf device */
dev->features |= NETIF_F_VLAN_CHALLENGED;
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index 4c260074c091..53fb76d574c6 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -83,13 +83,13 @@ static void vsockmon_setup(struct net_device *dev)
{
dev->type = ARPHRD_VSOCKMON;
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->lltx = true;
dev->netdev_ops = &vsockmon_ops;
dev->ethtool_ops = &vsockmon_ethtool_ops;
dev->needs_free_netdev = true;
- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
- NETIF_F_HIGHDMA | NETIF_F_LLTX;
+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
dev->flags = IFF_NOARP;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index ba59e92ab941..53dcb9fffc04 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -277,8 +277,7 @@ static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
@@ -2690,11 +2689,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *rdst, *fdst = NULL;
const struct ip_tunnel_info *info;
- bool did_rsc = false;
struct vxlan_fdb *f;
struct ethhdr *eth;
__be32 vni = 0;
u32 nhid = 0;
+ bool did_rsc;
info = skb_tunnel_info(skb);
@@ -3322,7 +3321,6 @@ static void vxlan_setup(struct net_device *dev)
dev->needs_free_netdev = true;
SET_NETDEV_DEVTYPE(dev, &vxlan_type);
- dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
@@ -3332,7 +3330,9 @@ static void vxlan_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
netif_keep_dst(dev);
- dev->priv_flags |= IFF_NO_QUEUE | IFF_CHANGE_PROTO_DOWN;
+ dev->priv_flags |= IFF_NO_QUEUE;
+ dev->change_proto_down = true;
+ dev->lltx = true;
/* MTU range: 68 - 65535 */
dev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 3feb36ee5bfb..45e9b908dbfb 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -289,7 +289,7 @@ static void wg_setup(struct net_device *dev)
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->priv_flags |= IFF_NO_QUEUE;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
dev->features |= WG_NETDEV_FEATURES;
dev->hw_features |= WG_NETDEV_FEATURES;
dev->hw_enc_features |= WG_NETDEV_FEATURES;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index b93a64bf8190..35bfe7232e95 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -1774,7 +1774,7 @@ static ssize_t ath10k_write_simulate_radar(struct file *file,
if (!arvif->is_started)
return -EINVAL;
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
return count;
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a5da32e87106..646e1737d4c4 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1437,7 +1437,7 @@ static void ath10k_recalc_radar_detection(struct ath10k *ar)
* by indicating that radar was detected.
*/
ath10k_warn(ar, "failed to start CAC: %d\n", ret);
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
}
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index fe2344598364..4861179b2217 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3990,7 +3990,7 @@ static void ath10k_radar_detected(struct ath10k *ar)
if (ar->dfs_block_radar_events)
ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
}
static void ath10k_radar_confirmation_work(struct work_struct *work)
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index b655967a465b..09c37e19a168 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -399,6 +399,7 @@ struct ath11k_vif {
u8 bssid[ETH_ALEN];
struct cfg80211_bitrate_mask bitrate_mask;
struct delayed_work connection_loss_work;
+ struct work_struct bcn_tx_work;
int num_legacy_stations;
int rtscts_prot_mode;
int txpower;
@@ -406,11 +407,17 @@ struct ath11k_vif {
bool wpaie_present;
bool bcca_zero_sent;
bool do_not_send_tmpl;
- struct ieee80211_chanctx_conf chanctx;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
struct ath11k_reg_tpc_power_info reg_tpc_info;
+
+ /* Must be last - ends in a flexible-array member.
+ *
+ * FIXME: Driver should not copy struct ieee80211_chanctx_conf,
+ * especially because it has a flexible array. Find a better way.
+ */
+ struct ieee80211_chanctx_conf chanctx;
};
struct ath11k_vif_iter {
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 2f6dd69d3be2..65d2bc0687c8 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -1305,18 +1305,6 @@ struct htt_ppdu_stats_user_rate {
#define HTT_TX_INFO_PEERID(_flags) \
FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M, _flags)
-struct htt_tx_ppdu_stats_info {
- struct htt_tlv tlv_hdr;
- u32 tx_success_bytes;
- u32 tx_retry_bytes;
- u32 tx_failed_bytes;
- u32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
- u16 tx_success_msdus;
- u16 tx_retry_msdus;
- u16 tx_failed_msdus;
- u16 tx_duration; /* united in us */
-} __packed;
-
enum htt_ppdu_stats_usr_compln_status {
HTT_PPDU_STATS_USER_STATUS_OK,
HTT_PPDU_STATS_USER_STATUS_FILTERED,
@@ -1364,17 +1352,6 @@ struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
u32 success_bytes;
} __packed;
-struct htt_ppdu_stats_usr_cmn_array {
- struct htt_tlv tlv_hdr;
- u32 num_ppdu_stats;
- /* tx_ppdu_stats_info is filled by multiple struct htt_tx_ppdu_stats_info
- * elements.
- * tx_ppdu_stats_info is variable length, with length =
- * number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
- */
- struct htt_tx_ppdu_stats_info tx_ppdu_info[];
-} __packed;
-
struct htt_ppdu_user_stats {
u16 peer_id;
u32 tlv_flags;
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 86485580dd89..c087d8a0f5b2 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -2697,7 +2697,7 @@ try_again:
if (unlikely(push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
dev_kfree_skb_any(msdu);
- ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
+ ab->soc_stats.hal_reo_error[ring_id]++;
continue;
}
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 7c0ef6916dd2..f8068d2e848c 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -6599,6 +6599,16 @@ static int ath11k_mac_vdev_delete(struct ath11k *ar, struct ath11k_vif *arvif)
return ret;
}
+static void ath11k_mac_bcn_tx_work(struct work_struct *work)
+{
+ struct ath11k_vif *arvif = container_of(work, struct ath11k_vif,
+ bcn_tx_work);
+
+ mutex_lock(&arvif->ar->conf_mutex);
+ ath11k_mac_bcn_tx_event(arvif);
+ mutex_unlock(&arvif->ar->conf_mutex);
+}
+
static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -6637,6 +6647,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
arvif->vif = vif;
INIT_LIST_HEAD(&arvif->list);
+ INIT_WORK(&arvif->bcn_tx_work, ath11k_mac_bcn_tx_work);
INIT_DELAYED_WORK(&arvif->connection_loss_work,
ath11k_mac_vif_sta_connection_loss_work);
@@ -6879,6 +6890,7 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
int i;
cancel_delayed_work_sync(&arvif->connection_loss_work);
+ cancel_work_sync(&arvif->bcn_tx_work);
mutex_lock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 38f175dd1557..87abfa547529 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -7404,7 +7404,9 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s
rcu_read_unlock();
return;
}
- ath11k_mac_bcn_tx_event(arvif);
+
+ queue_work(ab->workqueue, &arvif->bcn_tx_work);
+
rcu_read_unlock();
}
@@ -8356,7 +8358,7 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
if (ar->dfs_block_radar_events)
ath11k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
exit:
rcu_read_unlock();
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index cdfd43a7321a..7f2e9a9b4097 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -287,7 +287,6 @@ struct ath12k_vif {
int txpower;
bool rsnie_present;
bool wpaie_present;
- struct ieee80211_chanctx_conf chanctx;
u32 key_cipher;
u8 tx_encap_type;
u8 vdev_stats_id;
@@ -295,6 +294,13 @@ struct ath12k_vif {
bool ps;
struct ath12k_vif_cache *cache;
struct ath12k_rekey_data rekey_data;
+
+ /* Must be last - ends in a flexible-array member.
+ *
+ * FIXME: Driver should not copy struct ieee80211_chanctx_conf,
+ * especially because it has a flexible array. Find a better way.
+ */
+ struct ieee80211_chanctx_conf chanctx;
};
struct ath12k_vif_iter {
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
index ce80e7b5175b..f1b7e74aefe4 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -1117,6 +1117,336 @@ ath12k_htt_print_tx_tqm_pdev_stats_tlv(const void *tag_buf, u16 tag_len,
stats_req->buf_len = len;
}
+static void
+ath12k_htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n",
+ le32_to_cpu(htt_stats_buf->tcl2fw_entry_count));
+ len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n",
+ le32_to_cpu(htt_stats_buf->not_to_fw));
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_pdev_vdev_peer));
+ len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n",
+ le32_to_cpu(htt_stats_buf->tcl_res_invalid_addrx));
+ len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n",
+ le32_to_cpu(htt_stats_buf->wbm2fw_entry_count));
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_pdev));
+ len += scnprintf(buf + len, buf_len - len, "tcl_res_addrx_timeout = %u\n",
+ le32_to_cpu(htt_stats_buf->tcl_res_addrx_timeout));
+ len += scnprintf(buf + len, buf_len - len, "invalid_vdev = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_vdev));
+ len += scnprintf(buf + len, buf_len - len, "invalid_tcl_exp_frame_desc = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_tcl_exp_frame_desc));
+ len += scnprintf(buf + len, buf_len - len, "vdev_id_mismatch_count = %u\n\n",
+ le32_to_cpu(htt_stats_buf->vdev_id_mismatch_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->m1_packets));
+ len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->m2_packets));
+ len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->m3_packets));
+ len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->m4_packets));
+ len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->g1_packets));
+ len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->g2_packets));
+ len += scnprintf(buf + len, buf_len - len, "rc4_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->rc4_packets));
+ len += scnprintf(buf + len, buf_len - len, "eap_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->eap_packets));
+ len += scnprintf(buf + len, buf_len - len, "eapol_start_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->eapol_start_packets));
+ len += scnprintf(buf + len, buf_len - len, "eapol_logoff_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->eapol_logoff_packets));
+ len += scnprintf(buf + len, buf_len - len, "eapol_encap_asf_packets = %u\n\n",
+ le32_to_cpu(htt_stats_buf->eapol_encap_asf_packets));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_classify_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->arp_packets));
+ len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->igmp_packets));
+ len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n",
+ le32_to_cpu(htt_stats_buf->dhcp_packets));
+ len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n",
+ le32_to_cpu(htt_stats_buf->host_inspected));
+ len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_included));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_mcs));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_nss));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_preamble_type));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_chainmask));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_guard_interval));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_retries));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_bw_info));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_power));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_key_flags));
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_valid_no_encryption));
+ len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_entry_count));
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_priority_be));
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_priority_high));
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_priority_low));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_be));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_over_sub));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_bursty));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_interactive));
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_periodic));
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_hwqueue_alloc));
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_hwqueue_created));
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_hwqueue_send_to_host));
+ len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n",
+ le32_to_cpu(htt_stats_buf->mcast_entry));
+ len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n",
+ le32_to_cpu(htt_stats_buf->bcast_entry));
+ len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_update_peer_cache));
+ len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n",
+ le32_to_cpu(htt_stats_buf->htt_learning_frame));
+ len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_invalid_peer));
+ len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n\n",
+ le32_to_cpu(htt_stats_buf->mec_notify));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n",
+ le32_to_cpu(htt_stats_buf->ap_bss_peer_not_found));
+ len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->ap_bcast_mcast_no_peer));
+ len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n",
+ le32_to_cpu(htt_stats_buf->sta_delete_in_progress));
+ len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->ibss_no_bss_peer));
+ len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_vdev_type));
+ len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_ast_peer_entry));
+ len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n",
+ le32_to_cpu(htt_stats_buf->peer_entry_invalid));
+ len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n",
+ le32_to_cpu(htt_stats_buf->ethertype_not_ip));
+ len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->eapol_lookup_failed));
+ len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n",
+ le32_to_cpu(htt_stats_buf->qpeer_not_allow_data));
+ len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_tid_override));
+ len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n",
+ le32_to_cpu(htt_stats_buf->ipv6_jumbogram_zero_length));
+ len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n",
+ le32_to_cpu(htt_stats_buf->qos_to_non_qos_in_prog));
+ len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_eapol = %u\n",
+ le32_to_cpu(htt_stats_buf->ap_bcast_mcast_eapol));
+ len += scnprintf(buf + len, buf_len - len, "unicast_on_ap_bss_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->unicast_on_ap_bss_peer));
+ len += scnprintf(buf + len, buf_len - len, "ap_vdev_invalid = %u\n",
+ le32_to_cpu(htt_stats_buf->ap_vdev_invalid));
+ len += scnprintf(buf + len, buf_len - len, "incomplete_llc = %u\n",
+ le32_to_cpu(htt_stats_buf->incomplete_llc));
+ len += scnprintf(buf + len, buf_len - len, "eapol_duplicate_m3 = %u\n",
+ le32_to_cpu(htt_stats_buf->eapol_duplicate_m3));
+ len += scnprintf(buf + len, buf_len - len, "eapol_duplicate_m4 = %u\n\n",
+ le32_to_cpu(htt_stats_buf->eapol_duplicate_m4));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "eok = %u\n",
+ le32_to_cpu(htt_stats_buf->eok));
+ len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n",
+ le32_to_cpu(htt_stats_buf->classify_done));
+ len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->lookup_failed));
+ len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n",
+ le32_to_cpu(htt_stats_buf->send_host_dhcp));
+ len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n",
+ le32_to_cpu(htt_stats_buf->send_host_mcast));
+ len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n",
+ le32_to_cpu(htt_stats_buf->send_host_unknown_dest));
+ len += scnprintf(buf + len, buf_len - len, "send_host = %u\n",
+ le32_to_cpu(htt_stats_buf->send_host));
+ len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n\n",
+ le32_to_cpu(htt_stats_buf->status_invalid));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n",
+ le32_to_cpu(htt_stats_buf->enqueued_pkts));
+ len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->to_tqm));
+ len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n\n",
+ le32_to_cpu(htt_stats_buf->to_tqm_bypass));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n",
+ le32_to_cpu(htt_stats_buf->discarded_pkts));
+ len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n",
+ le32_to_cpu(htt_stats_buf->local_frames));
+ len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n\n",
+ le32_to_cpu(htt_stats_buf->is_ext_msdu));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_de_compl_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n",
+ le32_to_cpu(htt_stats_buf->tcl_dummy_frame));
+ len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_dummy_frame));
+ len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_notify_frame));
+ len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n",
+ le32_to_cpu(htt_stats_buf->fw2wbm_enq));
+ len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n\n",
+ le32_to_cpu(htt_stats_buf->tqm_bypass_frame));
+
+ stats_req->buf_len = len;
+}
+
static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -1198,6 +1528,30 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_TX_TQM_PDEV_TAG:
ath12k_htt_print_tx_tqm_pdev_stats_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_TX_DE_CMN_TAG:
+ ath12k_htt_print_tx_de_cmn_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG:
+ ath12k_htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG:
+ ath12k_htt_print_tx_de_classify_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG:
+ ath12k_htt_print_tx_de_classify_failed_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG:
+ ath12k_htt_print_tx_de_classify_status_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG:
+ ath12k_htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG:
+ ath12k_htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_DE_COMPL_STATS_TAG:
+ ath12k_htt_print_tx_de_compl_stats_tlv(tag_buf, len, stats_req);
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
index 6294a082cf8a..d52b26b23e65 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
@@ -128,6 +128,7 @@ enum ath12k_dbg_htt_ext_stats_type {
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+ ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
/* keep this last */
ATH12K_DBG_HTT_NUM_EXT_STATS,
@@ -143,6 +144,13 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13,
HTT_STATS_TX_TQM_CMN_TAG = 14,
HTT_STATS_TX_TQM_PDEV_TAG = 15,
+ HTT_STATS_TX_DE_EAPOL_PACKETS_TAG = 17,
+ HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG = 18,
+ HTT_STATS_TX_DE_CLASSIFY_STATS_TAG = 19,
+ HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG = 20,
+ HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG = 21,
+ HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG = 22,
+ HTT_STATS_TX_DE_CMN_TAG = 23,
HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
HTT_STATS_TX_SCHED_CMN_TAG = 37,
HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
@@ -150,6 +158,7 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44,
HTT_STATS_HW_INTR_MISC_TAG = 54,
HTT_STATS_HW_PDEV_ERRS_TAG = 56,
+ HTT_STATS_TX_DE_COMPL_STATS_TAG = 65,
HTT_STATS_WHAL_TX_TAG = 66,
HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67,
HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
@@ -564,4 +573,121 @@ struct ath12k_htt_tx_tqm_pdev_stats_tlv {
__le32 sched_nonudp_notify2;
} __packed;
+struct ath12k_htt_tx_de_cmn_stats_tlv {
+ __le32 mac_id__word;
+ __le32 tcl2fw_entry_count;
+ __le32 not_to_fw;
+ __le32 invalid_pdev_vdev_peer;
+ __le32 tcl_res_invalid_addrx;
+ __le32 wbm2fw_entry_count;
+ __le32 invalid_pdev;
+ __le32 tcl_res_addrx_timeout;
+ __le32 invalid_vdev;
+ __le32 invalid_tcl_exp_frame_desc;
+ __le32 vdev_id_mismatch_cnt;
+} __packed;
+
+struct ath12k_htt_tx_de_eapol_packets_stats_tlv {
+ __le32 m1_packets;
+ __le32 m2_packets;
+ __le32 m3_packets;
+ __le32 m4_packets;
+ __le32 g1_packets;
+ __le32 g2_packets;
+ __le32 rc4_packets;
+ __le32 eap_packets;
+ __le32 eapol_start_packets;
+ __le32 eapol_logoff_packets;
+ __le32 eapol_encap_asf_packets;
+} __packed;
+
+struct ath12k_htt_tx_de_classify_stats_tlv {
+ __le32 arp_packets;
+ __le32 igmp_packets;
+ __le32 dhcp_packets;
+ __le32 host_inspected;
+ __le32 htt_included;
+ __le32 htt_valid_mcs;
+ __le32 htt_valid_nss;
+ __le32 htt_valid_preamble_type;
+ __le32 htt_valid_chainmask;
+ __le32 htt_valid_guard_interval;
+ __le32 htt_valid_retries;
+ __le32 htt_valid_bw_info;
+ __le32 htt_valid_power;
+ __le32 htt_valid_key_flags;
+ __le32 htt_valid_no_encryption;
+ __le32 fse_entry_count;
+ __le32 fse_priority_be;
+ __le32 fse_priority_high;
+ __le32 fse_priority_low;
+ __le32 fse_traffic_ptrn_be;
+ __le32 fse_traffic_ptrn_over_sub;
+ __le32 fse_traffic_ptrn_bursty;
+ __le32 fse_traffic_ptrn_interactive;
+ __le32 fse_traffic_ptrn_periodic;
+ __le32 fse_hwqueue_alloc;
+ __le32 fse_hwqueue_created;
+ __le32 fse_hwqueue_send_to_host;
+ __le32 mcast_entry;
+ __le32 bcast_entry;
+ __le32 htt_update_peer_cache;
+ __le32 htt_learning_frame;
+ __le32 fse_invalid_peer;
+ __le32 mec_notify;
+} __packed;
+
+struct ath12k_htt_tx_de_classify_failed_stats_tlv {
+ __le32 ap_bss_peer_not_found;
+ __le32 ap_bcast_mcast_no_peer;
+ __le32 sta_delete_in_progress;
+ __le32 ibss_no_bss_peer;
+ __le32 invalid_vdev_type;
+ __le32 invalid_ast_peer_entry;
+ __le32 peer_entry_invalid;
+ __le32 ethertype_not_ip;
+ __le32 eapol_lookup_failed;
+ __le32 qpeer_not_allow_data;
+ __le32 fse_tid_override;
+ __le32 ipv6_jumbogram_zero_length;
+ __le32 qos_to_non_qos_in_prog;
+ __le32 ap_bcast_mcast_eapol;
+ __le32 unicast_on_ap_bss_peer;
+ __le32 ap_vdev_invalid;
+ __le32 incomplete_llc;
+ __le32 eapol_duplicate_m3;
+ __le32 eapol_duplicate_m4;
+} __packed;
+
+struct ath12k_htt_tx_de_classify_status_stats_tlv {
+ __le32 eok;
+ __le32 classify_done;
+ __le32 lookup_failed;
+ __le32 send_host_dhcp;
+ __le32 send_host_mcast;
+ __le32 send_host_unknown_dest;
+ __le32 send_host;
+ __le32 status_invalid;
+} __packed;
+
+struct ath12k_htt_tx_de_enqueue_packets_stats_tlv {
+ __le32 enqueued_pkts;
+ __le32 to_tqm;
+ __le32 to_tqm_bypass;
+} __packed;
+
+struct ath12k_htt_tx_de_enqueue_discard_stats_tlv {
+ __le32 discarded_pkts;
+ __le32 local_frames;
+ __le32 is_ext_msdu;
+} __packed;
+
+struct ath12k_htt_tx_de_compl_stats_tlv {
+ __le32 tcl_dummy_frame;
+ __le32 tqm_dummy_frame;
+ __le32 tqm_notify_frame;
+ __le32 fw2wbm_enq;
+ __le32 tqm_bypass_frame;
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index b77497c14ac4..2fb18b83b3ee 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -1495,18 +1495,6 @@ struct htt_ppdu_stats_user_rate {
#define HTT_TX_INFO_PEERID(_flags) \
u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M)
-struct htt_tx_ppdu_stats_info {
- struct htt_tlv tlv_hdr;
- __le32 tx_success_bytes;
- __le32 tx_retry_bytes;
- __le32 tx_failed_bytes;
- __le32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
- __le16 tx_success_msdus;
- __le16 tx_retry_msdus;
- __le16 tx_failed_msdus;
- __le16 tx_duration; /* united in us */
-} __packed;
-
enum htt_ppdu_stats_usr_compln_status {
HTT_PPDU_STATS_USER_STATUS_OK,
HTT_PPDU_STATS_USER_STATUS_FILTERED,
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 14236d0a0c89..91e3393f7b5f 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -2681,7 +2681,7 @@ try_again:
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
dev_kfree_skb_any(msdu);
- ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
+ ab->soc_stats.hal_reo_error[ring_id]++;
continue;
}
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index 7b0b6a7f4701..ec1bda95e555 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -926,6 +926,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.supports_dynamic_smps_6ghz = true,
.iova_mask = 0,
+
+ .supports_aspm = false,
},
{
.name = "wcn7850 hw2.0",
@@ -1004,6 +1006,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.supports_dynamic_smps_6ghz = false,
.iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1,
+
+ .supports_aspm = true,
},
{
.name = "qcn9274 hw2.0",
@@ -1078,6 +1082,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.supports_dynamic_smps_6ghz = true,
.iova_mask = 0,
+
+ .supports_aspm = false,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index b1d302c48326..8d52182e28ae 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -189,6 +189,7 @@ struct ath12k_hw_params {
bool tcl_ring_retry:1;
bool reoq_lut_support:1;
bool supports_shadow_regs:1;
+ bool supports_aspm:1;
u32 num_tcl_banks;
u32 max_tx_ring;
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index ce41c8153080..137394c36460 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -2196,9 +2196,8 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
* request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu
* length.
*/
- ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] &
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >>
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK;
+ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
if (ampdu_factor) {
if (sta->deflink.vht_cap.vht_supported)
@@ -3664,7 +3663,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ath12k *ar, *prev_ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
- struct ath12k_wmi_scan_req_arg arg = {};
+ struct ath12k_wmi_scan_req_arg *arg = NULL;
int ret;
int i;
bool create = true;
@@ -3746,42 +3745,47 @@ scan:
if (ret)
goto exit;
- ath12k_wmi_start_scan_init(ar, &arg);
- arg.vdev_id = arvif->vdev_id;
- arg.scan_id = ATH12K_SCAN_ID;
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ath12k_wmi_start_scan_init(ar, arg);
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH12K_SCAN_ID;
if (req->ie_len) {
- arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
- if (!arg.extraie.ptr) {
+ arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
+ if (!arg->extraie.ptr) {
ret = -ENOMEM;
goto exit;
}
- arg.extraie.len = req->ie_len;
+ arg->extraie.len = req->ie_len;
}
if (req->n_ssids) {
- arg.num_ssids = req->n_ssids;
- for (i = 0; i < arg.num_ssids; i++)
- arg.ssid[i] = req->ssids[i];
+ arg->num_ssids = req->n_ssids;
+ for (i = 0; i < arg->num_ssids; i++)
+ arg->ssid[i] = req->ssids[i];
} else {
- arg.scan_f_passive = 1;
+ arg->scan_f_passive = 1;
}
if (req->n_channels) {
- arg.num_chan = req->n_channels;
- arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
- GFP_KERNEL);
-
- if (!arg.chan_list) {
+ arg->num_chan = req->n_channels;
+ arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
+ GFP_KERNEL);
+ if (!arg->chan_list) {
ret = -ENOMEM;
goto exit;
}
- for (i = 0; i < arg.num_chan; i++)
- arg.chan_list[i] = req->channels[i]->center_freq;
+ for (i = 0; i < arg->num_chan; i++)
+ arg->chan_list[i] = req->channels[i]->center_freq;
}
- ret = ath12k_start_scan(ar, &arg);
+ ret = ath12k_start_scan(ar, arg);
if (ret) {
ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
@@ -3791,14 +3795,15 @@ scan:
/* Add a margin to account for event/command processing */
ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
- msecs_to_jiffies(arg.max_scan_time +
+ msecs_to_jiffies(arg->max_scan_time +
ATH12K_MAC_SCAN_TIMEOUT_MSECS));
exit:
- kfree(arg.chan_list);
-
- if (req->ie_len)
- kfree(arg.extraie.ptr);
+ if (arg) {
+ kfree(arg->chan_list);
+ kfree(arg->extraie.ptr);
+ kfree(arg);
+ }
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 9e0b9e329bda..bd269aa1740b 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -954,7 +954,8 @@ static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab)
static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
{
- if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
+ if (ab_pci->ab->hw_params->supports_aspm &&
+ test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPMC,
ab_pci->link_ctl &
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 9f6be557365e..2cd3ff9b0164 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -1538,6 +1538,7 @@ int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
sizeof(*cmd));
cmd->req_type = cpu_to_le32(type);
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI bss chan info req type %d\n", type);
@@ -6788,7 +6789,7 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
if (ar->dfs_block_radar_events)
ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ath12k_ar_to_hw(ar));
+ ieee80211_radar_detected(ath12k_ar_to_hw(ar), NULL);
exit:
rcu_read_unlock();
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index f1f52175a52b..6a913f9b8315 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -3121,6 +3121,7 @@ struct wmi_pdev_bss_chan_info_req_cmd {
__le32 tlv_header;
/* ref wmi_bss_chan_info_req_type */
__le32 req_type;
+ __le32 pdev_id;
} __packed;
struct wmi_ap_ps_peer_cmd {
@@ -4085,7 +4086,6 @@ struct wmi_vdev_stopped_event {
} __packed;
struct wmi_pdev_bss_chan_info_event {
- __le32 pdev_id;
__le32 freq; /* Units in MHz */
__le32 noise_floor; /* units are dBm */
/* rx clear - how often the channel was unused */
@@ -4103,6 +4103,7 @@ struct wmi_pdev_bss_chan_info_event {
/*rx_cycle cnt for my bss in 64bits format */
__le32 rx_bss_cycle_count_low;
__le32 rx_bss_cycle_count_high;
+ __le32 pdev_id;
} __packed;
#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
index a5eb43f30320..004ca5f536be 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
+++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
@@ -65,7 +65,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
dev_info(&pdev->dev, "fixup device configuration\n");
- mem = pcim_iomap(pdev, 0, 0);
+ mem = pci_iomap(pdev, 0, 0);
if (!mem) {
dev_err(&pdev->dev, "ioremap error\n");
return -EINVAL;
@@ -103,7 +103,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
pci_write_config_word(pdev, PCI_COMMAND, cmd);
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, bar0);
- pcim_iounmap(pdev, mem);
+ pci_iounmap(pdev, mem);
pci_disable_device(pdev);
@@ -200,11 +200,9 @@ static int owl_probe(struct pci_dev *pdev,
const char *eeprom_name;
int err = 0;
- if (pcim_enable_device(pdev))
+ if (pci_enable_device(pdev))
return -EIO;
- pcim_pin_device(pdev);
-
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index fb270df75eb2..4b331c85509c 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -32,11 +32,8 @@ static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
- if (sort[j] > sort[j - 1]) {
- nfval = sort[j];
- sort[j] = sort[j - 1];
- sort[j - 1] = nfval;
- }
+ if (sort[j] > sort[j - 1])
+ swap(sort[j], sort[j - 1]);
}
}
nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index d84e3ee7b5d9..51abc470125b 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -1325,11 +1325,11 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
int i = 0;
- data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_pkts_all +
+ data[i++] = ((u64)sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_pkts_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_pkts_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_pkts_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_pkts_all);
- data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_bytes_all +
+ data[i++] = ((u64)sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_bytes_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_bytes_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_bytes_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_bytes_all);
@@ -1380,8 +1380,6 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy = debugfs_create_dir("ath9k",
sc->hw->wiphy->debugfsdir);
- if (IS_ERR(sc->debug.debugfs_phy))
- return -ENOMEM;
#ifdef CONFIG_ATH_DEBUG
debugfs_create_file("debug", 0600, sc->debug.debugfs_phy,
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 11349218bc21..3689e12db9f7 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -280,7 +280,7 @@ ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe)
if (!pd->add_pulse(pd, pe, NULL))
return;
DFS_STAT_INC(sc, radar_detected);
- ieee80211_radar_detected(sc->hw);
+ ieee80211_radar_detected(sc->hw, NULL);
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 8e18e9b4ef48..426caa057396 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -116,7 +116,7 @@ static ssize_t write_file_simulate_radar(struct file *file,
{
struct ath_softc *sc = file->private_data;
- ieee80211_radar_detected(sc->hw);
+ ieee80211_radar_detected(sc->hw, NULL);
return count;
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 0c7841f95228..a3733c9b484e 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -716,8 +716,7 @@ static void ath9k_hif_usb_rx_cb(struct urb *urb)
}
resubmit:
- skb_reset_tail_pointer(skb);
- skb_trim(skb, 0);
+ __skb_set_length(skb, 0);
usb_anchor_urb(urb, &hif_dev->rx_submitted);
ret = usb_submit_urb(urb, GFP_ATOMIC);
@@ -754,8 +753,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
case -ESHUTDOWN:
goto free_skb;
default:
- skb_reset_tail_pointer(skb);
- skb_trim(skb, 0);
+ __skb_set_length(skb, 0);
goto resubmit;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index f7c6d9bc9311..9437d69877cc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -486,8 +486,6 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME,
priv->hw->wiphy->debugfsdir);
- if (IS_ERR(priv->debug.debugfs_phy))
- return -ENOMEM;
ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 5982e0db45f9..04a4b9ea61c3 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2732,7 +2732,7 @@ static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out,
if (ah->caps.gpio_requested & BIT(gpio))
return;
- err = gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label);
+ err = devm_gpio_request_one(ah->dev, gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label);
if (err) {
ath_err(ath9k_hw_common(ah), "request GPIO%d failed:%d\n",
gpio, err);
@@ -2801,10 +2801,8 @@ void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio)
WARN_ON(gpio >= ah->caps.num_gpio_pins);
- if (ah->caps.gpio_requested & BIT(gpio)) {
- gpio_free(gpio);
+ if (ah->caps.gpio_requested & BIT(gpio))
ah->caps.gpio_requested &= ~BIT(gpio);
- }
}
EXPORT_SYMBOL(ath9k_hw_gpio_free);
diff --git a/drivers/net/wireless/broadcom/b43/tables_lpphy.c b/drivers/net/wireless/broadcom/b43/tables_lpphy.c
index 71a7cd8dc787..fb70a1e3544b 100644
--- a/drivers/net/wireless/broadcom/b43/tables_lpphy.c
+++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.c
@@ -1066,7 +1066,7 @@ static const u32 lpphy_papd_mult_table[] = {
0x00036963, 0x000339f2, 0x00030a89, 0x0002db28,
};
-static struct lpphy_tx_gain_table_entry lpphy_rev0_nopa_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev0_nopa_tx_gain_table[] = {
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 152, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 147, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 143, },
@@ -1197,7 +1197,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev0_nopa_tx_gain_table[] = {
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev0_2ghz_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev0_2ghz_tx_gain_table[] = {
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, },
@@ -1328,7 +1328,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev0_2ghz_tx_gain_table[] = {
{ .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 72, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev0_5ghz_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev0_5ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 99, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 96, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 93, },
@@ -1459,7 +1459,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev0_5ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev1_nopa_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev1_nopa_tx_gain_table[] = {
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 152, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 147, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 143, },
@@ -1599,7 +1599,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev1_nopa_tx_gain_table[] = {
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev1_2ghz_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev1_2ghz_tx_gain_table[] = {
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 90, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 88, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 85, },
@@ -1730,7 +1730,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev1_2ghz_tx_gain_table[] = {
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 60, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev1_5ghz_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev1_5ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 99, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 96, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 93, },
@@ -1861,7 +1861,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev1_5ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev2_nopa_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev2_nopa_tx_gain_table[] = {
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 152, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 147, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 143, },
@@ -1992,7 +1992,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev2_nopa_tx_gain_table[] = {
{ .gm = 255, .pga = 111, .pad = 29, .dac = 0, .bb_mult = 64, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev2_2ghz_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev2_2ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 99, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 96, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 93, .pad = 255, .dac = 0, .bb_mult = 64, },
@@ -2123,7 +2123,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev2_2ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 13, .pad = 52, .dac = 0, .bb_mult = 64, },
};
-static struct lpphy_tx_gain_table_entry lpphy_rev2_5ghz_tx_gain_table[] = {
+static const struct lpphy_tx_gain_table_entry lpphy_rev2_5ghz_tx_gain_table[] = {
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 152, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 147, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 143, },
@@ -2392,7 +2392,7 @@ void lpphy_write_gain_table(struct b43_wldev *dev, int offset,
}
void lpphy_write_gain_table_bulk(struct b43_wldev *dev, int offset, int count,
- struct lpphy_tx_gain_table_entry *table)
+ const struct lpphy_tx_gain_table_entry *table)
{
int i;
diff --git a/drivers/net/wireless/broadcom/b43/tables_lpphy.h b/drivers/net/wireless/broadcom/b43/tables_lpphy.h
index 62002098bbda..1971ccccabf8 100644
--- a/drivers/net/wireless/broadcom/b43/tables_lpphy.h
+++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.h
@@ -36,7 +36,7 @@ struct lpphy_tx_gain_table_entry {
void lpphy_write_gain_table(struct b43_wldev *dev, int offset,
struct lpphy_tx_gain_table_entry data);
void lpphy_write_gain_table_bulk(struct b43_wldev *dev, int offset, int count,
- struct lpphy_tx_gain_table_entry *table);
+ const struct lpphy_tx_gain_table_entry *table);
void lpphy_rev0_1_table_init(struct b43_wldev *dev);
void lpphy_rev2plus_table_init(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
index 0c3d119d1219..1e8495f50c16 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
@@ -123,7 +123,7 @@ static s32 brcmf_btcoex_params_read(struct brcmf_if *ifp, u32 addr, u32 *data)
{
*data = addr;
- return brcmf_fil_iovar_int_get(ifp, "btc_params", data);
+ return brcmf_fil_iovar_int_query(ifp, "btc_params", data);
}
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index d4cc5fa92341..349aa3439502 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -663,8 +663,8 @@ static int brcmf_cfg80211_request_sta_if(struct brcmf_if *ifp, u8 *macaddr)
/* interface_create version 3+ */
/* get supported version from firmware side */
iface_create_ver = 0;
- err = brcmf_fil_bsscfg_int_get(ifp, "interface_create",
- &iface_create_ver);
+ err = brcmf_fil_bsscfg_int_query(ifp, "interface_create",
+ &iface_create_ver);
if (err) {
brcmf_err("fail to get supported version, err=%d\n", err);
return -EOPNOTSUPP;
@@ -756,8 +756,8 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
/* interface_create version 3+ */
/* get supported version from firmware side */
iface_create_ver = 0;
- err = brcmf_fil_bsscfg_int_get(ifp, "interface_create",
- &iface_create_ver);
+ err = brcmf_fil_bsscfg_int_query(ifp, "interface_create",
+ &iface_create_ver);
if (err) {
brcmf_err("fail to get supported version, err=%d\n", err);
return -EOPNOTSUPP;
@@ -1135,7 +1135,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
offset = offsetof(struct brcmf_scan_params_v2_le, channel_list) +
n_channels * sizeof(u16);
offset = roundup(offset, sizeof(u32));
- length += sizeof(ssid_le) * n_ssids,
+ length += sizeof(ssid_le) * n_ssids;
ptr = (char *)params_le + offset;
for (i = 0; i < n_ssids; i++) {
memset(&ssid_le, 0, sizeof(ssid_le));
@@ -2101,7 +2101,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
if (!sme->crypto.n_akm_suites)
return 0;
- err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "wpa_auth", &val);
+ err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev),
+ "wpa_auth", &val);
if (err) {
bphy_err(drvr, "could not get wpa_auth (%d)\n", err);
return err;
@@ -2680,7 +2681,7 @@ brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev);
struct brcmf_pub *drvr = cfg->pub;
- s32 qdbm = 0;
+ s32 qdbm;
s32 err;
brcmf_dbg(TRACE, "Enter\n");
@@ -3067,7 +3068,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
struct brcmf_scb_val_le scbval;
struct brcmf_pktcnt_le pktcnt;
s32 err;
- u32 rate = 0;
+ u32 rate;
u32 rssi;
/* Get the current tx rate */
@@ -7046,8 +7047,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
ch.bw = BRCMU_CHAN_BW_20;
cfg->d11inf.encchspec(&ch);
chaninfo = ch.chspec;
- err = brcmf_fil_bsscfg_int_get(ifp, "per_chan_info",
- &chaninfo);
+ err = brcmf_fil_bsscfg_int_query(ifp, "per_chan_info",
+ &chaninfo);
if (!err) {
if (chaninfo & WL_CHAN_RADAR)
channel->flags |=
@@ -7081,7 +7082,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
/* verify support for bw_cap command */
val = WLC_BAND_5G;
- err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val);
+ err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &val);
if (!err) {
/* only set 2G bandwidth using bw_cap command */
@@ -7157,11 +7158,11 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
int err;
band = WLC_BAND_2G;
- err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
+ err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band);
if (!err) {
bw_cap[NL80211_BAND_2GHZ] = band;
band = WLC_BAND_5G;
- err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
+ err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band);
if (!err) {
bw_cap[NL80211_BAND_5GHZ] = band;
return;
@@ -7170,7 +7171,6 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
return;
}
brcmf_dbg(INFO, "fallback to mimo_bw_cap info\n");
- mimo_bwcap = 0;
err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &mimo_bwcap);
if (err)
/* assume 20MHz if firmware does not give a clue */
@@ -7266,10 +7266,10 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
struct brcmf_pub *drvr = cfg->pub;
struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
struct wiphy *wiphy = cfg_to_wiphy(cfg);
- u32 nmode = 0;
+ u32 nmode;
u32 vhtmode = 0;
u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT };
- u32 rxchain = 0;
+ u32 rxchain;
u32 nchain;
int err;
s32 i;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index bf91b1e1368f..df53dd1d7e74 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -691,7 +691,7 @@ static int brcmf_net_mon_open(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- u32 monitor = 0;
+ u32 monitor;
int err;
brcmf_dbg(TRACE, "Enter\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index ea76b8d33401..39226b9c0fa8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -48,20 +48,20 @@
/**
* struct brcmf_ampdu_rx_reorder - AMPDU receive reorder info
*
- * @pktslots: dynamic allocated array for ordering AMPDU packets.
* @flow_id: AMPDU flow identifier.
* @cur_idx: last AMPDU index from firmware.
* @exp_idx: expected next AMPDU index.
* @max_idx: maximum amount of packets per AMPDU.
* @pend_pkts: number of packets currently in @pktslots.
+ * @pktslots: array for ordering AMPDU packets.
*/
struct brcmf_ampdu_rx_reorder {
- struct sk_buff **pktslots;
u8 flow_id;
u8 cur_idx;
u8 exp_idx;
u8 max_idx;
u8 pend_pkts;
+ struct sk_buff *pktslots[];
};
/* Forward decls for struct brcmf_pub (see below) */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index f23310a77a5d..0d9ae197fa1e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -184,7 +184,7 @@ static void brcmf_feat_wlc_version_overrides(struct brcmf_pub *drv)
static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
enum brcmf_feat_id id, char *name)
{
- u32 data = 0;
+ u32 data;
int err;
/* we need to know firmware error */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index a315a7fac6a0..31e080e4da66 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -96,15 +96,22 @@ static inline
s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
{
s32 err;
- __le32 data_le = cpu_to_le32(*data);
- err = brcmf_fil_cmd_data_get(ifp, cmd, &data_le, sizeof(data_le));
+ err = brcmf_fil_cmd_data_get(ifp, cmd, data, sizeof(*data));
if (err == 0)
- *data = le32_to_cpu(data_le);
+ *data = le32_to_cpu(*(__le32 *)data);
brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
return err;
}
+static inline
+s32 brcmf_fil_cmd_int_query(struct brcmf_if *ifp, u32 cmd, u32 *data)
+{
+ __le32 *data_le = (__le32 *)data;
+
+ *data_le = cpu_to_le32(*data);
+ return brcmf_fil_cmd_int_get(ifp, cmd, data);
+}
s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name,
const void *data, u32 len);
@@ -120,14 +127,21 @@ s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
static inline
s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
{
- __le32 data_le = cpu_to_le32(*data);
s32 err;
- err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
+ err = brcmf_fil_iovar_data_get(ifp, name, data, sizeof(*data));
if (err == 0)
- *data = le32_to_cpu(data_le);
+ *data = le32_to_cpu(*(__le32 *)data);
return err;
}
+static inline
+s32 brcmf_fil_iovar_int_query(struct brcmf_if *ifp, const char *name, u32 *data)
+{
+ __le32 *data_le = (__le32 *)data;
+
+ *data_le = cpu_to_le32(*data);
+ return brcmf_fil_iovar_int_get(ifp, name, data);
+}
s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
@@ -145,15 +159,21 @@ s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
static inline
s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
{
- __le32 data_le = cpu_to_le32(*data);
s32 err;
- err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
- sizeof(data_le));
+ err = brcmf_fil_bsscfg_data_get(ifp, name, data, sizeof(*data));
if (err == 0)
- *data = le32_to_cpu(data_le);
+ *data = le32_to_cpu(*(__le32 *)data);
return err;
}
+static inline
+s32 brcmf_fil_bsscfg_int_query(struct brcmf_if *ifp, const char *name, u32 *data)
+{
+ __le32 *data_le = (__le32 *)data;
+
+ *data_le = cpu_to_le32(*data);
+ return brcmf_fil_bsscfg_int_get(ifp, name, data);
+}
s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 36af81975855..0949e7975ff1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -1673,7 +1673,6 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
struct sk_buff_head reorder_list;
struct sk_buff *pnext;
u8 flags;
- u32 buf_size;
reorder_data = ((struct brcmf_skb_reorder_data *)pkt->cb)->reorder;
flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
@@ -1708,15 +1707,13 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
}
/* from here on we need a flow reorder instance */
if (rfi == NULL) {
- buf_size = sizeof(*rfi);
max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
- buf_size += (max_idx + 1) * sizeof(pkt);
-
/* allocate space for flow reorder info */
brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
flow_id, max_idx);
- rfi = kzalloc(buf_size, GFP_ATOMIC);
+ rfi = kzalloc(struct_size(rfi, pktslots, max_idx + 1),
+ GFP_ATOMIC);
if (rfi == NULL) {
bphy_err(drvr, "failed to alloc buffer\n");
brcmf_netif_rx(ifp, pkt);
@@ -1724,7 +1721,6 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
}
ifp->drvr->reorder_flows[flow_id] = rfi;
- rfi->pktslots = (struct sk_buff **)(rfi + 1);
rfi->max_idx = max_idx;
}
if (flags & BRCMF_RXREORDER_NEW_HOLE) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
index 33d17b779201..a767cbb79185 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
@@ -351,9 +351,7 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
{
struct ampdu_info *ampdu = wlc->ampdu;
u32 phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
- u32 txunfl_ratio;
u8 max_mpdu;
- u32 current_ampdu_cnt = 0;
u16 max_pld_size;
u32 new_txunfl;
struct brcms_fifo_info *fifo = (ampdu->fifo_tb + fid);
@@ -389,26 +387,8 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
if (fifo->accum_txfunfl < 10)
return 0;
- brcms_dbg_ht(wlc->hw->d11core, "ampdu_count %d tx_underflows %d\n",
- current_ampdu_cnt, fifo->accum_txfunfl);
+ brcms_dbg_ht(wlc->hw->d11core, "tx_underflows %d\n", fifo->accum_txfunfl);
- /*
- compute the current ratio of tx unfl per ampdu.
- When the current ampdu count becomes too
- big while the ratio remains small, we reset
- the current count in order to not
- introduce too big of a latency in detecting a
- large amount of tx underflows later.
- */
-
- txunfl_ratio = current_ampdu_cnt / fifo->accum_txfunfl;
-
- if (txunfl_ratio > ampdu->tx_max_funl) {
- if (current_ampdu_cnt >= FFPLD_MAX_AMPDU_CNT)
- fifo->accum_txfunfl = 0;
-
- return 0;
- }
max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
AMPDU_NUM_MPDU_LEGACY);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index d86f28b8bc60..5b1d35601bbd 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -1611,10 +1611,9 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
if (le32_to_cpu(hdr->idx) == idx) {
pdata = wl->fw.fw_bin[i]->data +
le32_to_cpu(hdr->offset);
- *pbuf = kvmalloc(len, GFP_KERNEL);
+ *pbuf = kvmemdup(pdata, len, GFP_KERNEL);
if (*pbuf == NULL)
- goto fail;
- memcpy(*pbuf, pdata, len);
+ return -ENOMEM;
return 0;
}
}
@@ -1622,7 +1621,6 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
brcms_err(wl->wlc->hw->d11core,
"ERROR: ucode buf tag:%d can not be found!\n", idx);
*pbuf = NULL;
-fail:
return -ENODATA;
}
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h
index 9065ca5b0208..bad080d33c07 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw.h
+++ b/drivers/net/wireless/intel/ipw2x00/libipw.h
@@ -345,14 +345,19 @@ struct libipw_hdr_2addr {
} __packed;
struct libipw_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(libipw_hdr_3addr_hdr, hdr, __packed,
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctl;
+ );
u8 payload[];
} __packed;
+static_assert(offsetof(struct libipw_hdr_3addr, payload) == sizeof(struct libipw_hdr_3addr_hdr),
+ "struct member likely outside of __struct_group()");
struct libipw_hdr_4addr {
__le16 frame_ctl;
@@ -400,7 +405,7 @@ struct libipw_info_element {
*/
struct libipw_auth {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
__le16 algorithm;
__le16 transaction;
__le16 status;
@@ -417,7 +422,7 @@ struct libipw_channel_switch {
} __packed;
struct libipw_action {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
u8 category;
u8 action;
union {
@@ -430,7 +435,7 @@ struct libipw_action {
} __packed;
struct libipw_disassoc {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
__le16 reason;
} __packed;
@@ -438,13 +443,13 @@ struct libipw_disassoc {
#define libipw_deauth libipw_disassoc
struct libipw_probe_request {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
/* SSID, supported rates */
u8 variable[];
} __packed;
struct libipw_probe_response {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
__le32 time_stamp[2];
__le16 beacon_interval;
__le16 capability;
@@ -456,16 +461,8 @@ struct libipw_probe_response {
/* Alias beacon for probe_response */
#define libipw_beacon libipw_probe_response
-struct libipw_assoc_request {
- struct libipw_hdr_3addr header;
- __le16 capability;
- __le16 listen_interval;
- /* SSID, supported rates, RSN */
- u8 variable[];
-} __packed;
-
struct libipw_reassoc_request {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
__le16 capability;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
@@ -473,7 +470,7 @@ struct libipw_reassoc_request {
} __packed;
struct libipw_assoc_response {
- struct libipw_hdr_3addr header;
+ struct libipw_hdr_3addr_hdr header;
__le16 capability;
__le16 status;
__le16 aid;
@@ -588,13 +585,6 @@ struct libipw_channel_map {
u8 map;
} __packed;
-struct libipw_ibss_dfs {
- struct libipw_info_element ie;
- u8 owner[ETH_ALEN];
- u8 recovery_interval;
- struct libipw_channel_map channel_map[];
-};
-
struct libipw_csa {
u8 mode;
u8 channel;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
index 903de34028ef..dbc7153d0a3d 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
@@ -509,7 +509,7 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
int i, idx, ret = 0;
int group_key = 0;
const char *alg, *module;
- struct lib80211_crypto_ops *ops;
+ const struct lib80211_crypto_ops *ops;
struct lib80211_crypt_data **crypt;
struct libipw_security sec = {
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 1fab7849f56d..e95800b77f6b 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -527,7 +527,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
struct ieee80211_hdr *header;
struct ieee80211_rx_status rx_status = {};
struct il_rx_pkt *pkt = rxb_addr(rxb);
- struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
+ struct il3945_rx_frame_stats_hdr *rx_stats = IL_RX_STATS(pkt);
struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.h b/drivers/net/wireless/intel/iwlegacy/3945.h
index 82e4a4878bc2..ffbe11902628 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.h
+++ b/drivers/net/wireless/intel/iwlegacy/3945.h
@@ -157,8 +157,10 @@ struct il3945_ibss_seq {
};
#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
- x->u.rx_frame.stats.payload + \
- x->u.rx_frame.stats.phy_count))
+ container_of(&x->u.rx_frame.stats, \
+ struct il3945_rx_frame_stats, \
+ hdr)->payload + \
+ x->u.rx_frame.stats.phy_count))
#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
IL_RX_HDR(x)->payload + \
le16_to_cpu(IL_RX_HDR(x)->len)))
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 1600c344edbb..fcccde7bb659 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -1769,7 +1769,7 @@ il4965_tx_skb(struct il_priv *il,
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_cmd = txq->cmd[q->write_ptr];
out_meta = &txq->meta[q->write_ptr];
- tx_cmd = &out_cmd->cmd.tx;
+ tx_cmd = container_of(&out_cmd->cmd.tx, struct il_tx_cmd, __hdr);
memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
diff --git a/drivers/net/wireless/intel/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h
index 28cf4e832152..4a9fa8b83f0f 100644
--- a/drivers/net/wireless/intel/iwlegacy/commands.h
+++ b/drivers/net/wireless/intel/iwlegacy/commands.h
@@ -201,9 +201,6 @@ struct il_cmd_header {
* 15 unsolicited RX or uCode-originated notification
*/
__le16 sequence;
-
- /* command or response/notification data follows immediately */
- u8 data[];
} __packed;
/**
@@ -1160,23 +1157,33 @@ struct il_wep_cmd {
#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
struct il3945_rx_frame_stats {
- u8 phy_count;
- u8 id;
- u8 rssi;
- u8 agc;
- __le16 sig_avg;
- __le16 noise_diff;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(il3945_rx_frame_stats_hdr, hdr, __packed,
+ u8 phy_count;
+ u8 id;
+ u8 rssi;
+ u8 agc;
+ __le16 sig_avg;
+ __le16 noise_diff;
+ );
u8 payload[];
} __packed;
+static_assert(offsetof(struct il3945_rx_frame_stats, payload) == sizeof(struct il3945_rx_frame_stats_hdr),
+ "struct member likely outside of __struct_group()");
struct il3945_rx_frame_hdr {
- __le16 channel;
- __le16 phy_flags;
- u8 reserved1;
- u8 rate;
- __le16 len;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(il3945_rx_frame_hdr_hdr, hdr, __packed,
+ __le16 channel;
+ __le16 phy_flags;
+ u8 reserved1;
+ u8 rate;
+ __le16 len;
+ );
u8 payload[];
} __packed;
+static_assert(offsetof(struct il3945_rx_frame_hdr, payload) == sizeof(struct il3945_rx_frame_hdr_hdr),
+ "struct member likely outside of __struct_group()");
struct il3945_rx_frame_end {
__le32 status;
@@ -1193,8 +1200,8 @@ struct il3945_rx_frame_end {
* stats.phy_count
*/
struct il3945_rx_frame {
- struct il3945_rx_frame_stats stats;
- struct il3945_rx_frame_hdr hdr;
+ struct il3945_rx_frame_stats_hdr stats;
+ struct il3945_rx_frame_hdr_hdr hdr;
struct il3945_rx_frame_end end;
} __packed;
@@ -1352,67 +1359,69 @@ struct il_rx_mpdu_res_start {
*/
struct il3945_tx_cmd {
- /*
- * MPDU byte count:
- * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
- * + 8 byte IV for CCM or TKIP (not used for WEP)
- * + Data payload
- * + 8-byte MIC (not used for CCM/WEP)
- * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
- * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
- * Range: 14-2342 bytes.
- */
- __le16 len;
-
- /*
- * MPDU or MSDU byte count for next frame.
- * Used for fragmentation and bursting, but not 11n aggregation.
- * Same as "len", but for next frame. Set to 0 if not applicable.
- */
- __le16 next_frame_len;
-
- __le32 tx_flags; /* TX_CMD_FLG_* */
-
- u8 rate;
-
- /* Index of recipient station in uCode's station table */
- u8 sta_id;
- u8 tid_tspec;
- u8 sec_ctl;
- u8 key[16];
- union {
- u8 byte[8];
- __le16 word[4];
- __le32 dw[2];
- } tkip_mic;
- __le32 next_frame_info;
- union {
- __le32 life_time;
- __le32 attempt;
- } stop_time;
- u8 supp_rates[2];
- u8 rts_retry_limit; /*byte 50 */
- u8 data_retry_limit; /*byte 51 */
- union {
- __le16 pm_frame_timeout;
- __le16 attempt_duration;
- } timeout;
-
- /*
- * Duration of EDCA burst Tx Opportunity, in 32-usec units.
- * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
- */
- __le16 driver_txop;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(il3945_tx_cmd_hdr, __hdr, __packed,
+ /*
+ * MPDU byte count:
+ * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+ * Range: 14-2342 bytes.
+ */
+ __le16 len;
+
+ /*
+ * MPDU or MSDU byte count for next frame.
+ * Used for fragmentation and bursting, but not 11n aggregation.
+ * Same as "len", but for next frame. Set to 0 if not applicable.
+ */
+ __le16 next_frame_len;
+
+ __le32 tx_flags; /* TX_CMD_FLG_* */
+
+ u8 rate;
+
+ /* Index of recipient station in uCode's station table */
+ u8 sta_id;
+ u8 tid_tspec;
+ u8 sec_ctl;
+ u8 key[16];
+ union {
+ u8 byte[8];
+ __le16 word[4];
+ __le32 dw[2];
+ } tkip_mic;
+ __le32 next_frame_info;
+ union {
+ __le32 life_time;
+ __le32 attempt;
+ } stop_time;
+ u8 supp_rates[2];
+ u8 rts_retry_limit; /*byte 50 */
+ u8 data_retry_limit; /*byte 51 */
+ union {
+ __le16 pm_frame_timeout;
+ __le16 attempt_duration;
+ } timeout;
+
+ /*
+ * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+ * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+ */
+ __le16 driver_txop;
+ );
/*
* MAC header goes here, followed by 2 bytes padding if MAC header
* length is 26 or 30 bytes, followed by payload data
*/
- union {
- DECLARE_FLEX_ARRAY(u8, payload);
- DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
- };
+ struct ieee80211_hdr hdr[];
} __packed;
+static_assert(offsetof(struct il3945_tx_cmd, hdr) == sizeof(struct il3945_tx_cmd_hdr),
+ "struct member likely outside of __struct_group()");
/*
* C_TX = 0x1c (response)
@@ -1438,83 +1447,87 @@ struct il_dram_scratch {
} __packed;
struct il_tx_cmd {
- /*
- * MPDU byte count:
- * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
- * + 8 byte IV for CCM or TKIP (not used for WEP)
- * + Data payload
- * + 8-byte MIC (not used for CCM/WEP)
- * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
- * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
- * Range: 14-2342 bytes.
- */
- __le16 len;
-
- /*
- * MPDU or MSDU byte count for next frame.
- * Used for fragmentation and bursting, but not 11n aggregation.
- * Same as "len", but for next frame. Set to 0 if not applicable.
- */
- __le16 next_frame_len;
-
- __le32 tx_flags; /* TX_CMD_FLG_* */
-
- /* uCode may modify this field of the Tx command (in host DRAM!).
- * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
- struct il_dram_scratch scratch;
-
- /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
- __le32 rate_n_flags; /* RATE_MCS_* */
-
- /* Index of destination station in uCode's station table */
- u8 sta_id;
-
- /* Type of security encryption: CCM or TKIP */
- u8 sec_ctl; /* TX_CMD_SEC_* */
-
- /*
- * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
- * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
- * data frames, this field may be used to selectively reduce initial
- * rate (via non-0 value) for special frames (e.g. management), while
- * still supporting rate scaling for all frames.
- */
- u8 initial_rate_idx;
- u8 reserved;
- u8 key[16];
- __le16 next_frame_flags;
- __le16 reserved2;
- union {
- __le32 life_time;
- __le32 attempt;
- } stop_time;
-
- /* Host DRAM physical address pointer to "scratch" in this command.
- * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
- __le32 dram_lsb_ptr;
- u8 dram_msb_ptr;
-
- u8 rts_retry_limit; /*byte 50 */
- u8 data_retry_limit; /*byte 51 */
- u8 tid_tspec;
- union {
- __le16 pm_frame_timeout;
- __le16 attempt_duration;
- } timeout;
-
- /*
- * Duration of EDCA burst Tx Opportunity, in 32-usec units.
- * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
- */
- __le16 driver_txop;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(il_tx_cmd_hdr, __hdr, __packed,
+ /*
+ * MPDU byte count:
+ * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+ * Range: 14-2342 bytes.
+ */
+ __le16 len;
+
+ /*
+ * MPDU or MSDU byte count for next frame.
+ * Used for fragmentation and bursting, but not 11n aggregation.
+ * Same as "len", but for next frame. Set to 0 if not applicable.
+ */
+ __le16 next_frame_len;
+
+ __le32 tx_flags; /* TX_CMD_FLG_* */
+
+ /* uCode may modify this field of the Tx command (in host DRAM!).
+ * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
+ struct il_dram_scratch scratch;
+
+ /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
+ __le32 rate_n_flags; /* RATE_MCS_* */
+
+ /* Index of destination station in uCode's station table */
+ u8 sta_id;
+
+ /* Type of security encryption: CCM or TKIP */
+ u8 sec_ctl; /* TX_CMD_SEC_* */
+
+ /*
+ * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
+ * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
+ * data frames, this field may be used to selectively reduce initial
+ * rate (via non-0 value) for special frames (e.g. management), while
+ * still supporting rate scaling for all frames.
+ */
+ u8 initial_rate_idx;
+ u8 reserved;
+ u8 key[16];
+ __le16 next_frame_flags;
+ __le16 reserved2;
+ union {
+ __le32 life_time;
+ __le32 attempt;
+ } stop_time;
+
+ /* Host DRAM physical address pointer to "scratch" in this command.
+ * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
+ __le32 dram_lsb_ptr;
+ u8 dram_msb_ptr;
+
+ u8 rts_retry_limit; /*byte 50 */
+ u8 data_retry_limit; /*byte 51 */
+ u8 tid_tspec;
+ union {
+ __le16 pm_frame_timeout;
+ __le16 attempt_duration;
+ } timeout;
+
+ /*
+ * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+ * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+ */
+ __le16 driver_txop;
+ );
/*
* MAC header goes here, followed by 2 bytes padding if MAC header
* length is 26 or 30 bytes, followed by payload data
*/
- u8 payload[0];
struct ieee80211_hdr hdr[];
} __packed;
+static_assert(offsetof(struct il_tx_cmd, hdr) == sizeof(struct il_tx_cmd_hdr),
+ "struct member likely outside of __struct_group()");
/* TX command response is sent after *3945* transmission attempts.
*
@@ -2502,7 +2515,7 @@ struct il3945_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct il3945_tx_cmd tx_cmd;
+ struct il3945_tx_cmd_hdr tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
@@ -2546,7 +2559,7 @@ struct il_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct il_tx_cmd tx_cmd;
+ struct il_tx_cmd_hdr tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
struct il_ssid_ie direct_scan[PROBE_OPTION_MAX];
@@ -2662,7 +2675,7 @@ struct il4965_beacon_notif {
*/
struct il3945_tx_beacon_cmd {
- struct il3945_tx_cmd tx;
+ struct il3945_tx_cmd_hdr tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
@@ -2670,7 +2683,7 @@ struct il3945_tx_beacon_cmd {
} __packed;
struct il_tx_beacon_cmd {
- struct il_tx_cmd tx;
+ struct il_tx_cmd_hdr tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 69687fcf963f..2147781b5fff 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -553,7 +553,7 @@ struct il_device_cmd {
u8 val8;
u16 val16;
u32 val32;
- struct il_tx_cmd tx;
+ struct il_tx_cmd_hdr tx;
u8 payload[DEF_CMD_PAYLOAD_SIZE];
} __packed cmd;
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 3b6b8b410be5..fa1be8c54d3c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 92
+#define IWL_BZ_UCODE_API_MAX 93
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 90
@@ -148,6 +148,17 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
+const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_BZ,
+ .base_params = &iwl_bz_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
+ .gen2 = true,
+ .umac_prph_offset = 0x300000,
+ .xtal_latency = 12000,
+ .low_latency_xtal = true,
+};
+
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 4ccb0b7bdc20..f1dd1c29f305 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 92
+#define IWL_SC_UCODE_API_MAX 93
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 90
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 65b7c68e5ca7..e0b14be25b02 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1325,8 +1325,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
iwlwifi_mod_params.amsdu_size);
}
- trans_cfg.cmd_q_wdg_timeout = IWL_WATCHDOG_DISABLED;
-
trans_cfg.command_groups = iwl_dvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_dvm_groups);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 8c8880b44827..a7cea0a55b35 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -357,6 +357,11 @@ int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
}
mcc_val = wifi_pkg->package.elements[1].integer.value;
+ if (mcc_val != BIOS_MCC_CHINA) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "ACPI WRDD is supported only for CN\n");
+ goto out_free;
+ }
mcc[0] = (mcc_val >> 8) & 0xff;
mcc[1] = mcc_val & 0xff;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
index b97a43353779..ddc84430d895 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -95,7 +95,7 @@ enum iwl_bt_ci_compliance {
}; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */
/**
- * struct iwl_bt_coex_profile_notif - notification about BT coex
+ * struct iwl_bt_coex_prof_old_notif - notification about BT coex
* @mbox_msg: message from BT to WiFi
* @msg_idx: the index of the message
* @bt_ci_compliance: enum %iwl_bt_ci_compliance
@@ -110,7 +110,7 @@ enum iwl_bt_ci_compliance {
* @wifi_loss_mid_high_rssi: The predicted lost WiFi rate (% of air time that
* BT is utilizing) when the RSSI is mid/high (>= -65 dBm)
*/
-struct iwl_bt_coex_profile_notif {
+struct iwl_bt_coex_prof_old_notif {
__le32 mbox_msg[4];
__le32 msg_idx;
__le32 bt_ci_compliance;
@@ -126,4 +126,29 @@ struct iwl_bt_coex_profile_notif {
* BT_COEX_PROFILE_NTFY_API_S_VER_5
*/
+/**
+ * enum iwl_bt_coex_subcmd_ids - coex configuration command IDs
+ */
+enum iwl_bt_coex_subcmd_ids {
+ /**
+ *@PROFILE_NOTIF: &struct iwl_bt_coex_profile_notif
+ */
+ PROFILE_NOTIF = 0xFF,
+};
+
+#define COEX_NUM_BAND 3
+#define COEX_NUM_CHAINS 2
+
+/**
+ * struct iwl_bt_coex_profile_notif - notification about BT coex
+ * @wifi_loss_low_rssi: The predicted lost WiFi rate (% of air time that BT is
+ * utilizing) when the RSSI is low (<= -65 dBm)
+ * @wifi_loss_mid_high_rssi: The predicted lost WiFi rate (% of air time that
+ * BT is utilizing) when the RSSI is mid/high (>= -65 dBm)
+ */
+struct iwl_bt_coex_profile_notif {
+ u8 wifi_loss_low_rssi[COEX_NUM_BAND][COEX_NUM_CHAINS];
+ u8 wifi_loss_mid_high_rssi[COEX_NUM_BAND][COEX_NUM_CHAINS];
+} __packed; /* BT_COEX_BT_PROFILE_NTF_API_S_VER_1 */
+
#endif /* __iwl_fw_api_coex_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 7544c4cb1a30..2f40e69db318 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2022, 2024 Intel Corporation
*/
#ifndef __iwl_fw_api_commands_h__
#define __iwl_fw_api_commands_h__
@@ -25,6 +25,8 @@
* @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids
* @LOCATION_GROUP: location group, uses command IDs from
* &enum iwl_location_subcmd_ids
+ * @BT_COEX_GROUP: bt coex group, uses command IDs from
+ * &enum iwl_bt_coex_subcmd_ids
* @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from
* &enum iwl_prot_offload_subcmd_ids
* @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from
@@ -43,6 +45,7 @@ enum iwl_mvm_command_groups {
SCAN_GROUP = 0x6,
NAN_GROUP = 0x7,
LOCATION_GROUP = 0x8,
+ BT_COEX_GROUP = 0x9,
PROT_OFFLOAD_GROUP = 0xb,
REGULATORY_AND_NVM_GROUP = 0xc,
DEBUG_GROUP = 0xf,
@@ -144,8 +147,8 @@ enum iwl_legacy_cmds {
/**
* @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
* &struct iwl_tx_cmd_gen3,
- * response in &struct iwl_mvm_tx_resp or
- * &struct iwl_mvm_tx_resp_v3
+ * response in &struct iwl_tx_resp or
+ * &struct iwl_tx_resp_v3
*/
TX_CMD = 0x1c,
@@ -398,7 +401,7 @@ enum iwl_legacy_cmds {
REDUCE_TX_POWER_CMD = 0x9f,
/**
- * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif
+ * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif_v4
*/
MISSED_BEACONS_NOTIFICATION = 0xa2,
@@ -467,7 +470,7 @@ enum iwl_legacy_cmds {
MARKER_CMD = 0xcb,
/**
- * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif
+ * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_prof_old_notif
*/
BT_PROFILE_NOTIFICATION = 0xce,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 855cd13a181e..550de6db1834 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -476,6 +476,8 @@ enum iwl_fw_ini_region_device_memory_subtype {
* @IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START: start handling override preset
* request
* @IWL_FW_INI_TIME_SCAN_FAILURE: failed scan channel list
+ * @IWL_FW_INI_TIME_ESR_LINK_UP: EMLSR is active (several links are activated)
+ * @IWL_FW_INI_TIME_ESR_LINK_DOWN: EMLSR is inactive (only one active link left)
* @IWL_FW_INI_TIME_POINT_NUM: number of time points
*/
enum iwl_fw_ini_time_point {
@@ -509,6 +511,8 @@ enum iwl_fw_ini_time_point {
IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ,
IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START,
IWL_FW_INI_TIME_SCAN_FAILURE,
+ IWL_FW_INI_TIME_ESR_LINK_UP,
+ IWL_FW_INI_TIME_ESR_LINK_DOWN,
IWL_FW_INI_TIME_POINT_NUM,
}; /* FW_TLV_DEBUG_TIME_POINT_API_E */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index ca6fa66d1917..c46e24fc6a1e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -42,7 +42,7 @@ enum iwl_mac_conf_subcmd_ids {
*/
LINK_CONFIG_CMD = 0x9,
/**
- * @STA_CONFIG_CMD: &struct iwl_mvm_sta_cfg_cmd
+ * @STA_CONFIG_CMD: &struct iwl_sta_cfg_cmd
*/
STA_CONFIG_CMD = 0xA,
/**
@@ -50,7 +50,7 @@ enum iwl_mac_conf_subcmd_ids {
*/
AUX_STA_CMD = 0xB,
/**
- * @STA_REMOVE_CMD: &struct iwl_mvm_remove_sta_cmd
+ * @STA_REMOVE_CMD: &struct iwl_remove_sta_cmd
*/
STA_REMOVE_CMD = 0xC,
/**
@@ -62,6 +62,14 @@ enum iwl_mac_conf_subcmd_ids {
*/
ROC_CMD = 0xE,
/**
+ * @MISSED_BEACONS_NOTIF: &struct iwl_missed_beacons_notif
+ */
+ MISSED_BEACONS_NOTIF = 0xF6,
+ /**
+ * @EMLSR_TRANS_FAIL_NOTIF: &struct iwl_esr_trans_fail_notif
+ */
+ EMLSR_TRANS_FAIL_NOTIF = 0xF7,
+ /**
* @ROC_NOTIF: &struct iwl_roc_notif
*/
ROC_NOTIF = 0xF8,
@@ -446,6 +454,9 @@ enum iwl_link_ctx_flags {
* @listen_lmac: indicates whether the link should be allocated on the Listen
* Lmac or on the Main Lmac. Cannot be changed on an active Link.
* Relevant only for eSR.
+ * @block_tx: tell the firmware that this link can't Tx. This should be used
+ * only when a link is de-activated because of CSA with mode = 1.
+ * Available since version 5.
* @reserved1: in version 2, listen_lmac became reserved
* @cck_rates: basic rates available for CCK
* @ofdm_rates: basic rates available for OFDM
@@ -472,7 +483,9 @@ enum iwl_link_ctx_flags {
* @bssid_index: index of the associated VAP
* @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
* @spec_link_id: link_id as the AP knows it
- * @reserved2: alignment
+ * @ul_mu_data_disable: OM Control UL MU Data Disable RX Support (bit 44) in
+ * HE MAC Capabilities information field as defined in figure 9-897 in
+ * IEEE802.11REVme-D5.0
* @ibss_bssid_addr: bssid for ibss
* @reserved_for_ibss_bssid_addr: reserved
* @reserved3: reserved for future use
@@ -488,7 +501,10 @@ struct iwl_link_config_cmd {
__le32 active;
union {
__le32 listen_lmac;
- __le32 reserved1;
+ struct {
+ u8 block_tx;
+ u8 reserved1[3];
+ };
};
__le32 cck_rates;
__le32 ofdm_rates;
@@ -515,17 +531,17 @@ struct iwl_link_config_cmd {
u8 bssid_index;
u8 bss_color;
u8 spec_link_id;
- u8 reserved2;
+ u8 ul_mu_data_disable;
u8 ibss_bssid_addr[6];
__le16 reserved_for_ibss_bssid_addr;
__le32 reserved3[8];
-} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3 */
+} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3, _VER_4, _VER_5 */
/* Currently FW supports link ids in the range 0-3 and can have
* at most two active links for each vif.
*/
-#define IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM 2
-#define IWL_MVM_FW_MAX_LINK_ID 3
+#define IWL_FW_MAX_ACTIVE_LINKS_NUM 2
+#define IWL_FW_MAX_LINK_ID 3
/**
* enum iwl_fw_sta_type - FW station types
@@ -547,7 +563,7 @@ enum iwl_fw_sta_type {
}; /* STATION_TYPE_E_VER_1 */
/**
- * struct iwl_mvm_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's
+ * struct iwl_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's
* station table
* ( STA_CONFIG_CMD = 0xA )
*
@@ -579,7 +595,7 @@ enum iwl_fw_sta_type {
* capa
* @htc_flags: which features are supported in HTC
*/
-struct iwl_mvm_sta_cfg_cmd {
+struct iwl_sta_cfg_cmd {
__le32 sta_id;
__le32 link_id;
u8 peer_mld_address[ETH_ALEN];
@@ -620,13 +636,13 @@ struct iwl_mvm_aux_sta_cmd {
} __packed; /* AUX_STA_CMD_API_S_VER_1 */
/**
- * struct iwl_mvm_remove_sta_cmd - a cmd structure to remove a sta added by
+ * struct iwl_remove_sta_cmd - a cmd structure to remove a sta added by
* STA_CONFIG_CMD or AUX_STA_CONFIG_CMD
* ( STA_REMOVE_CMD = 0xC )
*
* @sta_id: index of station to remove
*/
-struct iwl_mvm_remove_sta_cmd {
+struct iwl_remove_sta_cmd {
__le32 sta_id;
} __packed; /* REMOVE_STA_API_S_VER_1 */
@@ -663,4 +679,51 @@ struct iwl_mvm_esr_mode_notif {
__le32 action;
} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */
+/**
+ * struct iwl_missed_beacons_notif - sent when by the firmware upon beacon loss
+ * ( MISSED_BEACONS_NOTIF = 0xF6 )
+ * @link_id: fw link ID
+ * @consec_missed_beacons_since_last_rx: number of consecutive missed
+ * beacons since last RX.
+ * @consec_missed_beacons: number of consecutive missed beacons
+ * @other_link_id: used in EMLSR only. The fw link ID for
+ * &consec_missed_beacons_other_link. IWL_MVM_FW_LINK_ID_INVALID (0xff) if
+ * invalid.
+ * @consec_missed_beacons_other_link: number of consecutive missed beacons on
+ * &other_link_id.
+ */
+struct iwl_missed_beacons_notif {
+ __le32 link_id;
+ __le32 consec_missed_beacons_since_last_rx;
+ __le32 consec_missed_beacons;
+ __le32 other_link_id;
+ __le32 consec_missed_beacons_other_link;
+} __packed; /* MISSED_BEACON_NTFY_API_S_VER_5 */
+
+/*
+ * enum iwl_esr_trans_fail_code: to be used to parse the notif below
+ *
+ * @ESR_TRANS_FAILED_TX_STATUS_ERROR: failed to TX EML OMN frame
+ * @ESR_TRANSITION_FAILED_TX_TIMEOUT: timeout on the EML OMN frame
+ * @ESR_TRANSITION_FAILED_BEACONS_NOT_HEARD: can't get a beacon on the new link
+ */
+enum iwl_esr_trans_fail_code {
+ ESR_TRANS_FAILED_TX_STATUS_ERROR,
+ ESR_TRANSITION_FAILED_TX_TIMEOUT,
+ ESR_TRANSITION_FAILED_BEACONS_NOT_HEARD,
+};
+
+/**
+ * struct iwl_esr_trans_fail_notif - FW reports a failure in EMLSR transition
+ *
+ * @link_id: the link_id that still works after the failure
+ * @activation: true if the link was activated, false otherwise
+ * @err_code: see &enum iwl_esr_trans_fail_code
+ */
+struct iwl_esr_trans_fail_notif {
+ __le32 link_id;
+ __le32 activation;
+ __le32 err_code;
+} __packed; /* ESR_TRANSITION_FAILED_NTFY_API_S_VER_1 */
+
#endif /* __iwl_fw_api_mac_cfg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index bcbbf8c4a297..977ca4ac166d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -16,7 +16,7 @@
#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1)
#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
-#define IWL_MVM_STATION_COUNT_MAX 16
+#define IWL_STATION_COUNT_MAX 16
#define IWL_MVM_INVALID_STA 0xFF
enum iwl_ac {
@@ -378,7 +378,7 @@ struct iwl_missed_beacons_notif_ver_3 {
} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
/**
- * struct iwl_missed_beacons_notif - information on missed beacons
+ * struct iwl_missed_beacons_notif_v4 - information on missed beacons
* ( MISSED_BEACONS_NOTIFICATION = 0xa2 )
* @link_id: fw link ID
* @consec_missed_beacons_since_last_rx: number of consecutive missed
@@ -387,7 +387,7 @@ struct iwl_missed_beacons_notif_ver_3 {
* @num_expected_beacons: number of expected beacons
* @num_recvd_beacons: number of received beacons
*/
-struct iwl_missed_beacons_notif {
+struct iwl_missed_beacons_notif_v4 {
__le32 link_id;
__le32 consec_missed_beacons_since_last_rx;
__le32 consec_missed_beacons;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 6e6a92d173cc..df0680eae30c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -285,18 +285,12 @@ enum iwl_dev_tx_power_cmd_mode {
* @set_mode: see &enum iwl_dev_tx_power_cmd_mode
* @mac_context_id: id of the mac ctx for which we are reducing TX power.
* @pwr_restriction: TX power restriction in 1/8 dBms.
- * @dev_24: device TX power restriction in 1/8 dBms
- * @dev_52_low: device TX power restriction upper band - low
- * @dev_52_high: device TX power restriction upper band - high
*/
struct iwl_dev_tx_power_common {
__le32 set_mode;
__le32 mac_context_id;
__le16 pwr_restriction;
- __le16 dev_24;
- __le16 dev_52_low;
- __le16 dev_52_high;
-};
+} __packed;
/**
* struct iwl_dev_tx_power_cmd_v3 - TX power reduction command version 3
@@ -412,8 +406,20 @@ struct iwl_dev_tx_power_cmd_v8 {
__le32 tpc_vlp_backoff_level;
} __packed; /* TX_REDUCED_POWER_API_S_VER_8 */
+/*
+ * @dev_24: device TX power restriction in 1/8 dBms
+ * @dev_52_low: device TX power restriction upper band - low
+ * @dev_52_high: device TX power restriction upper band - high
+ */
+struct iwl_dev_tx_power_cmd_per_band {
+ __le16 dev_24;
+ __le16 dev_52_low;
+ __le16 dev_52_high;
+} __packed;
+
/**
- * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
+ * struct iwl_dev_tx_power_cmd_v3_v8 - TX power reduction command (multiversion)
+ * @per_band: per band restrictions
* @common: common part of the command
* @v3: version 3 part of the command
* @v4: version 4 part of the command
@@ -422,8 +428,9 @@ struct iwl_dev_tx_power_cmd_v8 {
* @v7: version 7 part of the command
* @v8: version 8 part of the command
*/
-struct iwl_dev_tx_power_cmd {
+struct iwl_dev_tx_power_cmd_v3_v8 {
struct iwl_dev_tx_power_common common;
+ struct iwl_dev_tx_power_cmd_per_band per_band;
union {
struct iwl_dev_tx_power_cmd_v3 v3;
struct iwl_dev_tx_power_cmd_v4 v4;
@@ -434,6 +441,60 @@ struct iwl_dev_tx_power_cmd {
};
};
+/**
+ * struct iwl_dev_tx_power_cmd_v9 - TX power reduction cmd
+ * @reserved: reserved (padding)
+ * @per_chain: per chain restrictions
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved1: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ */
+struct iwl_dev_tx_power_cmd_v9 {
+ __le16 reserved;
+ __le16 per_chain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1];
+ u8 per_chain_restriction_changed;
+ u8 reserved1[3];
+ __le32 timer_period;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_9 */
+
+/**
+ * struct iwl_dev_tx_power_cmd_v10 - TX power reduction cmd
+ * @per_chain: per chain restrictions
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ * @flags: reduce power flags.
+ */
+struct iwl_dev_tx_power_cmd_v10 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ u8 per_chain_restriction_changed;
+ u8 reserved;
+ __le32 timer_period;
+ __le32 flags;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_10 */
+
+/*
+ * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
+ * @common: common part of the command
+ * @v9: version 9 part of the command
+ * @v10: version 10 part of the command
+ */
+struct iwl_dev_tx_power_cmd {
+ struct iwl_dev_tx_power_common common;
+ union {
+ struct iwl_dev_tx_power_cmd_v9 v9;
+ struct iwl_dev_tx_power_cmd_v10 v10;
+ };
+} __packed; /* TX_REDUCED_POWER_API_S_VER_9_VER10 */
+
#define IWL_NUM_GEO_PROFILES 3
#define IWL_NUM_GEO_PROFILES_V3 8
#define IWL_NUM_BANDS_PER_CHAIN_V1 2
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 8598031567bb..f486d624500b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -731,39 +731,46 @@ enum iwl_umac_scan_general_params_flags2 {
* struct iwl_scan_channel_cfg_umac
* @flags: bitmap - 0-19: directed scan to i'th ssid.
* @channel_num: channel number 1-13 etc.
- * @band: band of channel: 0 for 2GHz, 1 for 5GHz
- * @iter_count: repetition count for the channel.
- * @iter_interval: interval between two scan iterations on one channel.
+ * @v1: command version 1
+ * @v1.iter_count: repetition count for the channel.
+ * @v1.iter_interval: interval between two scan iterations on one channel.
+ * @v2: command versions 2-4
+ * @v2.band: band of channel: 0 for 2GHz, 1 for 5GHz
+ * @v2.iter_count: repetition count for the channel.
+ * @v2.iter_interval: interval between two scan iterations on one channel.
+ * @v5: command versions 5 and up
+ * @v5.iter_count: repetition count for the channel.
+ * @v5.iter_interval: interval between two scan iterations on one channel.
+ * @v5.psd_20: highest PSD value for all APs known so far
+ * on this channel.
*/
struct iwl_scan_channel_cfg_umac {
#define IWL_CHAN_CFG_FLAGS_BAND_POS 30
__le32 flags;
+ u8 channel_num;
/* All versions are of the same size, so use a union without adjusting
* the command size later
*/
union {
struct {
- u8 channel_num;
u8 iter_count;
__le16 iter_interval;
- } v1; /* SCAN_CHANNEL_CONFIG_API_S_VER_1 */
+ } __packed v1; /* SCAN_CHANNEL_CONFIG_API_S_VER_1 */
struct {
- u8 channel_num;
u8 band;
u8 iter_count;
u8 iter_interval;
- } v2; /* SCAN_CHANNEL_CONFIG_API_S_VER_2
- * SCAN_CHANNEL_CONFIG_API_S_VER_3
- * SCAN_CHANNEL_CONFIG_API_S_VER_4
- */
+ } __packed v2; /* SCAN_CHANNEL_CONFIG_API_S_VER_2
+ * SCAN_CHANNEL_CONFIG_API_S_VER_3
+ * SCAN_CHANNEL_CONFIG_API_S_VER_4
+ */
struct {
- u8 channel_num;
u8 psd_20;
u8 iter_count;
u8 iter_interval;
- } v5; /* SCAN_CHANNEL_CONFIG_API_S_VER_5 */
- };
+ } __packed v5; /* SCAN_CHANNEL_CONFIG_API_S_VER_5 */
+ } __packed;
} __packed;
/**
@@ -1133,6 +1140,19 @@ struct iwl_umac_scan_abort {
} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
/**
+ * enum iwl_umac_scan_abort_status
+ *
+ * @IWL_UMAC_SCAN_ABORT_STATUS_SUCCESS: scan was successfully aborted
+ * @IWL_UMAC_SCAN_ABORT_STATUS_IN_PROGRESS: scan abort is in progress
+ * @IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND: nothing to abort
+ */
+enum iwl_umac_scan_abort_status {
+ IWL_UMAC_SCAN_ABORT_STATUS_SUCCESS = 0,
+ IWL_UMAC_SCAN_ABORT_STATUS_IN_PROGRESS,
+ IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND,
+};
+
+/**
* struct iwl_umac_scan_complete
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @last_schedule: last scheduling line
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
index 2271b19213fa..0a9f14fb04be 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020 - 2021, 2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020 - 2021, 2023 - 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -327,14 +327,14 @@ struct mvm_statistics_load {
__le32 air_time[MAC_INDEX_AUX];
__le32 byte_count[MAC_INDEX_AUX];
__le32 pkt_count[MAC_INDEX_AUX];
- u8 avg_energy[IWL_MVM_STATION_COUNT_MAX];
+ u8 avg_energy[IWL_STATION_COUNT_MAX];
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */
struct mvm_statistics_load_v1 {
__le32 air_time[NUM_MAC_INDEX];
__le32 byte_count[NUM_MAC_INDEX];
__le32 pkt_count[NUM_MAC_INDEX];
- u8 avg_energy[IWL_MVM_STATION_COUNT_MAX];
+ u8 avg_energy[IWL_STATION_COUNT_MAX];
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */
struct mvm_statistics_rx {
@@ -594,7 +594,7 @@ struct iwl_stats_ntfy_per_sta {
} __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */
#define IWL_STATS_MAX_PHY_OPERATIONAL 3
-#define IWL_STATS_MAX_FW_LINKS (IWL_MVM_FW_MAX_LINK_ID + 1)
+#define IWL_STATS_MAX_FW_LINKS (IWL_FW_MAX_LINK_ID + 1)
/**
* struct iwl_system_statistics_notif_oper
@@ -608,7 +608,7 @@ struct iwl_system_statistics_notif_oper {
__le32 time_stamp;
struct iwl_stats_ntfy_per_link per_link[IWL_STATS_MAX_FW_LINKS];
struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL];
- struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX];
+ struct iwl_stats_ntfy_per_sta per_sta[IWL_STATION_COUNT_MAX];
} __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_API_S_VER_3 */
/**
@@ -651,7 +651,7 @@ struct iwl_statistics_operational_ntfy {
__le32 flags;
struct iwl_stats_ntfy_per_mac per_mac[MAC_INDEX_AUX];
struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL];
- struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX];
+ struct iwl_stats_ntfy_per_sta per_sta[IWL_STATION_COUNT_MAX];
__le64 rx_time;
__le64 tx_time;
__le64 on_time_rf;
@@ -699,7 +699,7 @@ struct iwl_statistics_operational_ntfy_ver_14 {
__le64 tx_time;
__le64 on_time_rf;
__le64 on_time_scan;
- __le32 average_energy[IWL_MVM_STATION_COUNT_MAX];
+ __le32 average_energy[IWL_STATION_COUNT_MAX];
__le32 reserved;
} __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_14 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index c5277e2f8cd4..f3bf2e087a40 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -486,7 +486,7 @@ struct agg_tx_status {
#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
/**
- * struct iwl_mvm_tx_resp_v3 - notifies that fw is TXing a packet
+ * struct iwl_tx_resp_v3 - notifies that fw is TXing a packet
* ( REPLY_TX = 0x1c )
* @frame_count: 1 no aggregation, >1 aggregation
* @bt_kill_count: num of times blocked by bluetooth (unused for agg)
@@ -517,7 +517,7 @@ struct agg_tx_status {
* After the array of statuses comes the SSN of the SCD. Look at
* %iwl_mvm_get_scd_ssn for more details.
*/
-struct iwl_mvm_tx_resp_v3 {
+struct iwl_tx_resp_v3 {
u8 frame_count;
u8 bt_kill_count;
u8 failure_rts;
@@ -543,7 +543,7 @@ struct iwl_mvm_tx_resp_v3 {
} __packed; /* TX_RSP_API_S_VER_3 */
/**
- * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet
+ * struct iwl_tx_resp - notifies that fw is TXing a packet
* ( REPLY_TX = 0x1c )
* @frame_count: 1 no aggregation, >1 aggregation
* @bt_kill_count: num of times blocked by bluetooth (unused for agg)
@@ -575,7 +575,7 @@ struct iwl_mvm_tx_resp_v3 {
* After the array of statuses comes the SSN of the SCD. Look at
* %iwl_mvm_get_scd_ssn for more details.
*/
-struct iwl_mvm_tx_resp {
+struct iwl_tx_resp {
u8 frame_count;
u8 bt_kill_count;
u8 failure_rts;
@@ -823,7 +823,7 @@ struct iwl_mac_beacon_cmd {
*/
struct iwl_beacon_notif {
- struct iwl_mvm_tx_resp beacon_notify_hdr;
+ struct iwl_tx_resp beacon_notify_hdr;
__le64 tsf;
__le32 ibss_mgr_status;
} __packed;
@@ -836,7 +836,7 @@ struct iwl_beacon_notif {
* @gp2: last beacon time in gp2
*/
struct iwl_extended_beacon_notif_v5 {
- struct iwl_mvm_tx_resp beacon_notify_hdr;
+ struct iwl_tx_resp beacon_notify_hdr;
__le64 tsf;
__le32 ibss_mgr_status;
__le32 gp2;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index 560a91998cc4..4d9a1f83ef8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -634,3 +634,19 @@ int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
GET_BIOS_TABLE(dsm, fwrt, func, value);
}
IWL_EXPORT_SYMBOL(iwl_bios_get_dsm);
+
+bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc)
+{
+ /* Some kind of regulatory mess means we need to currently disallow
+ * puncturing in the US and Canada unless enabled in BIOS.
+ */
+ switch (mcc) {
+ case IWL_MCC_US:
+ return puncturing & IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK;
+ case IWL_MCC_CANADA:
+ return puncturing & IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK;
+ default:
+ return true;
+ }
+}
+IWL_EXPORT_SYMBOL(iwl_puncturing_is_allowed_in_bios);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index e2c056f483c1..81787501d4a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -45,6 +45,8 @@
#define IWL_WTAS_ENABLE_IEC_MSK 0x4
#define IWL_WTAS_USA_UHB_MSK BIT(16)
+#define BIOS_MCC_CHINA 0x434e
+
/*
* The profile for revision 2 is a superset of revision 1, which is in
* turn a superset of revision 0. So we can store all revisions
@@ -217,4 +219,6 @@ static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes,
return ppag_modes & (ppag_ver < 3 ? IWL_PPAG_ETSI_CHINA_MASK :
IWL_PPAG_REV3_MASK);
}
+
+bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc);
#endif /* __fw_regulatory_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index fb982d4fe851..091fb6fd7c78 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -638,7 +638,7 @@ int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
goto out;
}
- if (data->mcc != UEFI_MCC_CHINA) {
+ if (data->mcc != BIOS_MCC_CHINA) {
ret = -EINVAL;
IWL_DEBUG_RADIO(fwrt, "UEFI WRDD is supported only for CN\n");
goto out;
@@ -729,3 +729,32 @@ out:
kfree(data);
return ret;
}
+
+int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_puncturing_data *data;
+ /* default value is not enabled if there is any issue in reading
+ * uefi variable or revision is not supported
+ */
+ int puncturing = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans,
+ IWL_UEFI_PUNCTURING_NAME,
+ "UefiCnvWlanPuncturing",
+ sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return puncturing;
+
+ if (data->revision != IWL_UEFI_PUNCTURING_REVISION) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI PUNCTURING rev:%d\n",
+ data->revision);
+ } else {
+ puncturing = data->puncturing & IWL_UEFI_PUNCTURING_REV0_MASK;
+ IWL_DEBUG_RADIO(fwrt, "Loaded puncturing bits from UEFI: %d\n",
+ puncturing);
+ }
+
+ kfree(data);
+ return puncturing;
+}
+IWL_EXPORT_SYMBOL(iwl_uefi_get_puncturing);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index 1f8884ca8997..e525d449e656 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -22,6 +22,7 @@
#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV"
#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM"
+#define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing"
#define IWL_SGOM_MAP_SIZE 339
@@ -38,6 +39,7 @@
#define IWL_UEFI_ECKV_REVISION 0
#define IWL_UEFI_WBEM_REVISION 0
#define IWL_UEFI_DSM_REVISION 4
+#define IWL_UEFI_PUNCTURING_REVISION 0
struct pnvm_sku_package {
u8 rev;
@@ -149,8 +151,6 @@ struct uefi_cnv_var_splc {
u32 default_pwr_limit;
} __packed;
-#define UEFI_MCC_CHINA 0x434e
-
/* struct uefi_cnv_var_wrdd - WRDD table as defined in UEFI
* @revision: the revision of the table
* @mcc: country identifier as defined in ISO/IEC 3166-1 Alpha 2 code
@@ -194,6 +194,25 @@ struct uefi_cnv_wlan_wbem_data {
u32 wbem_320mhz_per_mcc;
} __packed;
+enum iwl_uefi_cnv_puncturing_flags {
+ IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK = BIT(0),
+ IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK = BIT(1),
+};
+
+#define IWL_UEFI_PUNCTURING_REV0_MASK (IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK | \
+ IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK)
+/**
+ * struct uefi_cnv_var_puncturing_data - controlling channel
+ * puncturing for few countries.
+ * @revision: the revision of the table
+ * @puncturing: enablement of channel puncturing per mcc
+ * see &enum iwl_uefi_cnv_puncturing_flags.
+ */
+struct uefi_cnv_var_puncturing_data {
+ u8 revision;
+ u32 puncturing;
+} __packed;
+
/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
@@ -224,6 +243,7 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
int iwl_uefi_get_uats_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt);
#else /* CONFIG_EFI */
static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
@@ -320,5 +340,11 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
{
return 0;
}
+
+static inline
+int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt)
+{
+ return 0;
+}
#endif /* CONFIG_EFI */
#endif /* __iwl_fw_uefi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index b2abd4fd1944..34c91deca57b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -504,6 +504,7 @@ extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg;
extern const char iwl9162_name[];
extern const char iwl9260_name[];
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index aaaabd67f959..2abfc986701f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1205,7 +1205,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
if (le32_to_cpup((const __le32 *)tlv_data) >
- IWL_MVM_STATION_COUNT_MAX) {
+ IWL_STATION_COUNT_MAX) {
IWL_ERR(drv,
"%d is an invalid number of station\n",
le32_to_cpup((const __le32 *)tlv_data));
@@ -1479,7 +1479,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
- fw->ucode_capa.num_stations = IWL_MVM_STATION_COUNT_MAX;
+ fw->ucode_capa.num_stations = IWL_STATION_COUNT_MAX;
fw->ucode_capa.num_beacons = 1;
/* dump all fw memory areas by default */
fw->dbg.dump_mask = 0xffffffff;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 0ef48effeefb..e95ffe303547 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -386,7 +386,6 @@ struct iwl_dump_sanitize_ops {
* @cmd_queue: the index of the command queue.
* Must be set before start_fw.
* @cmd_fifo: the fifo for host commands
- * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
* @no_reclaim_cmds: Some devices erroneously don't set the
* SEQ_RX_FRAME bit on some notifications, this is the
* list of such notifications to filter. Max length is
@@ -412,7 +411,6 @@ struct iwl_trans_config {
u8 cmd_queue;
u8 cmd_fifo;
- unsigned int cmd_q_wdg_timeout;
const u8 *no_reclaim_cmds;
unsigned int n_no_reclaim_cmds;
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
index 4900de3cc0d3..2081775e0ec9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
+++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
@@ -283,6 +283,16 @@ struct iwl_mei_colloc_info {
u8 bssid[ETH_ALEN];
};
+/**
+ * enum iwl_mei_sap_version - SAP version
+ * @IWL_MEI_SAP_VERSION_3: SAP version 3
+ * @IWL_MEI_SAP_VERSION_4: SAP version 4
+ */
+enum iwl_mei_sap_version {
+ IWL_MEI_SAP_VERSION_3 = 3,
+ IWL_MEI_SAP_VERSION_4 = 4,
+};
+
/*
* struct iwl_mei_ops - driver's operations called by iwlmei
* Operations will not be called more than once concurrently.
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
index 1dd9106c6513..dce0b7cf7b26 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2021-2023 Intel Corporation
+ * Copyright (C) 2021-2024 Intel Corporation
*/
#include <linux/etherdevice.h>
@@ -58,7 +58,6 @@ bool iwl_mei_is_connected(void)
}
EXPORT_SYMBOL_GPL(iwl_mei_is_connected);
-#define SAP_VERSION 3
#define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */
struct iwl_sap_q_ctrl_blk {
@@ -110,16 +109,19 @@ struct iwl_sap_shared_mem_ctrl_blk {
#define SAP_H2M_DATA_Q_SZ 48256
#define SAP_M2H_DATA_Q_SZ 24128
-#define SAP_H2M_NOTIF_Q_SZ 2240
+#define SAP_H2M_NOTIF_Q_SZ_VER3 2240
+#define SAP_H2M_NOTIF_Q_SZ_VER4 32768
#define SAP_M2H_NOTIF_Q_SZ 62720
-#define _IWL_MEI_SAP_SHARED_MEM_SZ \
+#define _IWL_MEI_SAP_SHARED_MEM_SZ_VER3 \
(sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
- SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \
+ SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ_VER3 + \
SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
-#define IWL_MEI_SAP_SHARED_MEM_SZ \
- (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE))
+#define _IWL_MEI_SAP_SHARED_MEM_SZ_VER4 \
+ (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
+ SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ_VER4 + \
+ SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
struct iwl_mei_shared_mem_ptrs {
struct iwl_sap_shared_mem_ctrl_blk *ctrl;
@@ -206,6 +208,7 @@ struct iwl_mei {
* @mac_address: interface MAC address.
* @nvm_address: NVM MAC address.
* @priv: A pointer to iwlwifi.
+ * @sap_version: The SAP version to use. enum iwl_mei_sap_version.
*
* This used to cache the configurations coming from iwlwifi's way. The data
* is cached here so that we can buffer the configuration even if we don't have
@@ -220,6 +223,7 @@ struct iwl_mei_cache {
u16 mcc;
u8 mac_address[6];
u8 nvm_address[6];
+ enum iwl_mei_sap_version sap_version;
void *priv;
};
@@ -238,14 +242,17 @@ static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev)
#define HBM_DMA_BUF_ID_WLAN 1
-static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
+static int iwl_mei_alloc_mem_for_version(struct mei_cl_device *cldev,
+ enum iwl_mei_sap_version version)
{
struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
+ u32 mem_size = roundup(version == IWL_MEI_SAP_VERSION_4 ?
+ _IWL_MEI_SAP_SHARED_MEM_SZ_VER4 :
+ _IWL_MEI_SAP_SHARED_MEM_SZ_VER3, PAGE_SIZE);
- mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN,
- IWL_MEI_SAP_SHARED_MEM_SZ);
-
+ iwl_mei_cache.sap_version = version;
+ mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN, mem_size);
if (IS_ERR(mem->ctrl)) {
int ret = PTR_ERR(mem->ctrl);
@@ -254,11 +261,30 @@ static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
return ret;
}
- memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ);
+ memset(mem->ctrl, 0, mem_size);
return 0;
}
+static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
+{
+ int ret;
+
+ /*
+ * SAP version 4 uses a larger Host to MEI notif queue.
+ * Since it is unknown at this stage which SAP version is used by the
+ * CSME firmware on this platform, try to allocate the version 4 first.
+ * If the CSME firmware uses version 3, this allocation is expected to
+ * fail because the CSME firmware allocated less memory for our driver.
+ */
+ ret = iwl_mei_alloc_mem_for_version(cldev, IWL_MEI_SAP_VERSION_4);
+ if (ret)
+ ret = iwl_mei_alloc_mem_for_version(cldev,
+ IWL_MEI_SAP_VERSION_3);
+
+ return ret;
+}
+
static void iwl_mei_init_shared_mem(struct iwl_mei *mei)
{
struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
@@ -277,7 +303,9 @@ static void iwl_mei_init_shared_mem(struct iwl_mei *mei)
h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
cpu_to_le32(SAP_H2M_DATA_Q_SZ);
h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
- cpu_to_le32(SAP_H2M_NOTIF_Q_SZ);
+ iwl_mei_cache.sap_version == IWL_MEI_SAP_VERSION_3 ?
+ cpu_to_le32(SAP_H2M_NOTIF_Q_SZ_VER3) :
+ cpu_to_le32(SAP_H2M_NOTIF_Q_SZ_VER4);
m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
cpu_to_le32(SAP_M2H_DATA_Q_SZ);
m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
@@ -647,7 +675,7 @@ iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
return;
}
- if (rsp->supported_version != SAP_VERSION) {
+ if (rsp->supported_version != iwl_mei_cache.sap_version) {
dev_err(&cldev->dev,
"didn't get the expected version: got %d\n",
rsp->supported_version);
@@ -1281,7 +1309,7 @@ static int iwl_mei_send_start(struct mei_cl_device *cldev)
.hdr.type = cpu_to_le32(SAP_ME_MSG_START),
.hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
.hdr.len = cpu_to_le32(sizeof(msg)),
- .supported_versions[0] = SAP_VERSION,
+ .supported_versions[0] = iwl_mei_cache.sap_version,
.init_data_seq_num = cpu_to_le16(0x100),
.init_notif_seq_num = cpu_to_le16(0x800),
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 8873904f51ec..59751f123571 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -13,7 +13,7 @@ iwlmvm-y += ptp.o
iwlmvm-y += time-sync.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
-iwlmvm-$(CONFIG_PM) += d3.o
+iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o
subdir-ccflags-y += -I $(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index ad3e14a0d043..b607961970e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -208,7 +208,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
}
struct iwl_bt_iterator_data {
- struct iwl_bt_coex_profile_notif *notif;
+ struct iwl_bt_coex_prof_old_notif *notif;
struct iwl_mvm *mvm;
struct ieee80211_chanctx_conf *primary;
struct ieee80211_chanctx_conf *secondary;
@@ -266,10 +266,26 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
bool have_wifi_loss_rate =
iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
- BT_PROFILE_NOTIFICATION, 0) > 4;
+ BT_PROFILE_NOTIFICATION, 0) > 4 ||
+ iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
+ PROFILE_NOTIF, 0) >= 1;
+ u8 wifi_loss_mid_high_rssi;
+ u8 wifi_loss_low_rssi;
u8 wifi_loss_rate;
- if (mvm->last_bt_notif.wifi_loss_low_rssi == BT_OFF)
+ if (iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
+ PROFILE_NOTIF, 0) >= 1) {
+ /* For now, we consider 2.4 GHz band / ANT_A only */
+ wifi_loss_mid_high_rssi =
+ mvm->last_bt_wifi_loss.wifi_loss_mid_high_rssi[PHY_BAND_24][0];
+ wifi_loss_low_rssi =
+ mvm->last_bt_wifi_loss.wifi_loss_low_rssi[PHY_BAND_24][0];
+ } else {
+ wifi_loss_mid_high_rssi = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ wifi_loss_low_rssi = mvm->last_bt_notif.wifi_loss_low_rssi;
+ }
+
+ if (wifi_loss_low_rssi == BT_OFF)
return true;
if (primary)
@@ -286,20 +302,20 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
* we will get an update on this and exit eSR.
*/
if (!link_rssi)
- wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ wifi_loss_rate = wifi_loss_mid_high_rssi;
else if (mvmvif->esr_active)
/* RSSI needs to get really low to disable eSR... */
wifi_loss_rate =
link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
- mvm->last_bt_notif.wifi_loss_low_rssi :
- mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ wifi_loss_low_rssi :
+ wifi_loss_mid_high_rssi;
else
/* ...And really high before we enable it back */
wifi_loss_rate =
link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ?
- mvm->last_bt_notif.wifi_loss_low_rssi :
- mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ wifi_loss_low_rssi :
+ wifi_loss_mid_high_rssi;
return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
}
@@ -509,6 +525,35 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id);
}
+/* must be called under rcu_read_lock */
+static void iwl_mvm_bt_coex_notif_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = _data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ for (int link_id = 0;
+ link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+ link_id++) {
+ struct ieee80211_bss_conf *link_conf =
+ rcu_dereference_check(vif->link_conf[link_id],
+ lockdep_is_held(&mvm->mutex));
+ struct ieee80211_chanctx_conf *chanctx_conf =
+ rcu_dereference_check(link_conf->chanctx_conf,
+ lockdep_is_held(&mvm->mutex));
+
+ if ((!chanctx_conf ||
+ chanctx_conf->def.chan->band != NL80211_BAND_2GHZ))
+ continue;
+
+ iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
+ }
+}
+
static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
{
struct iwl_bt_iterator_data data = {
@@ -591,11 +636,11 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
}
}
-void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
+void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
+ struct iwl_bt_coex_prof_old_notif *notif = (void *)pkt->data;
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
@@ -612,6 +657,22 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
iwl_mvm_bt_coex_notif_handle(mvm);
}
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ const struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ const struct iwl_bt_coex_profile_notif *notif = (const void *)pkt->data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvm->last_bt_wifi_loss = *notif;
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bt_coex_notif_iterator,
+ mvm);
+}
+
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data rssi_event)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index c4c1e67b9ac7..ddf484027d4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -16,6 +16,8 @@
#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
#define IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC 30
#define IWL_MVM_TPT_COUNT_WINDOW_SEC 5
+#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5
+#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH 11
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
@@ -109,7 +111,7 @@
#define IWL_MVM_FTM_INITIATOR_SECURE_LTF false
#define IWL_MVM_FTM_RESP_NDP_SUPPORT true
#define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true
-#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 5
+#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7
#define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000
#define IWL_MVM_D3_DEBUG false
#define IWL_MVM_USE_TWT true
@@ -125,7 +127,6 @@
#define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */
#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16
#define IWL_MVM_AUTO_EML_ENABLE true
-#define IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH 7
#define IWL_MVM_HIGH_RSSI_THRESH_20MHZ -67
#define IWL_MVM_LOW_RSSI_THRESH_20MHZ -71
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index afd90a52d4ec..55245f913286 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -772,6 +772,7 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_ftm_iter_data target;
target.bssid = bssid;
+ target.cipher = cipher;
ieee80211_iter_keys(mvm->hw, vif, iter, &target);
} else {
memcpy(tk, entry->tk, sizeof(entry->tk));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 08c4898c8f1a..08546e673cf5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -863,7 +863,10 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
- struct iwl_dev_tx_power_cmd cmd = {
+ struct iwl_dev_tx_power_cmd_v3_v8 cmd = {
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ };
+ struct iwl_dev_tx_power_cmd cmd_v9_v10 = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
__le16 *per_chain;
@@ -871,8 +874,19 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
u16 len = 0;
u32 n_subbands;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
+ void *cmd_data = &cmd;
- if (cmd_ver >= 7) {
+ if (cmd_ver == 10) {
+ len = sizeof(cmd_v9_v10.v10);
+ n_subbands = IWL_NUM_SUB_BANDS_V2;
+ per_chain = &cmd_v9_v10.v10.per_chain[0][0][0];
+ cmd_v9_v10.v10.flags =
+ cpu_to_le32(mvm->fwrt.reduced_power_flags);
+ } else if (cmd_ver == 9) {
+ len = sizeof(cmd_v9_v10.v9);
+ n_subbands = IWL_NUM_SUB_BANDS_V1;
+ per_chain = &cmd_v9_v10.v9.per_chain[0][0];
+ } else if (cmd_ver >= 7) {
len = sizeof(cmd.v7);
n_subbands = IWL_NUM_SUB_BANDS_V2;
per_chain = cmd.v7.per_chain[0][0];
@@ -899,9 +913,14 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
per_chain = cmd.v3.per_chain[0][0];
}
- /* all structs have the same common part, add it */
+ /* all structs have the same common part, add its length */
len += sizeof(cmd.common);
+ if (cmd_ver < 9)
+ len += sizeof(cmd.per_band);
+ else
+ cmd_data = &cmd_v9_v10;
+
ret = iwl_sar_fill_profile(&mvm->fwrt, per_chain,
IWL_NUM_CHAIN_TABLES,
n_subbands, prof_a, prof_b);
@@ -913,7 +932,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
iwl_mei_set_power_limit(per_chain);
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
- return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, cmd_data);
}
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
@@ -1464,7 +1483,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL);
}
- for (i = 0; i < IWL_MVM_FW_MAX_LINK_ID + 1; i++)
+ for (i = 0; i < IWL_FW_MAX_LINK_ID + 1; i++)
RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL);
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index a9929aa49913..2b0652168002 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -17,7 +17,8 @@
HOW(EXIT_COEX) \
HOW(EXIT_BANDWIDTH) \
HOW(EXIT_CSA) \
- HOW(EXIT_LINK_USAGE)
+ HOW(EXIT_LINK_USAGE) \
+ HOW(EXIT_FAIL_ENTRY)
static const char *const iwl_mvm_esr_states_names[] = {
#define NAME_ENTRY(x) [ilog2(IWL_MVM_ESR_##x)] = #x,
@@ -233,10 +234,15 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
WARN_ON_ONCE(active == link_info->active);
/* When deactivating a link session protection should
- * be stopped
+ * be stopped. Also let the firmware know if we can't Tx.
*/
- if (!active && vif->type == NL80211_IFTYPE_STATION)
+ if (!active && vif->type == NL80211_IFTYPE_STATION) {
iwl_mvm_stop_session_protection(mvm, vif);
+ if (link_info->csa_block_tx) {
+ cmd.block_tx = 1;
+ link_info->csa_block_tx = false;
+ }
+ }
}
cmd.link_id = cpu_to_le32(link_info->fw_link_id);
@@ -258,7 +264,7 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid)
memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN);
- iwl_mvm_set_fw_basic_rates(mvm, vif, link_conf,
+ iwl_mvm_set_fw_basic_rates(mvm, vif, link_info,
&cmd.cck_rates, &cmd.ofdm_rates);
cmd.cck_short_preamble = cpu_to_le32(link_conf->use_short_preamble);
@@ -293,6 +299,17 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
(link_conf->uora_ocw_range >> 3) & 0x7;
}
+ /* ap_sta may be NULL if we're disconnecting */
+ if (changes & LINK_CONTEXT_MODIFY_HE_PARAMS && mvmvif->ap_sta) {
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_check(mvmvif->ap_sta, link_id);
+
+ if (!WARN_ON(!link_sta) && link_sta->he_cap.has_he &&
+ link_sta->he_cap.he_cap_elem.mac_cap_info[5] &
+ IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX)
+ cmd.ul_mu_data_disable = 1;
+ }
+
/* TODO how to set ndp_fdbk_buff_th_exp? */
if (iwl_mvm_set_fw_mu_edca_params(mvm, mvmvif->link[link_id],
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index dfcc96f18b4f..a7a10e716e65 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -413,19 +413,18 @@ static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
}
void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf,
+ struct iwl_mvm_vif_link_info *link_info,
__le32 *cck_rates, __le32 *ofdm_rates)
{
- struct ieee80211_chanctx_conf *chanctx;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
u8 cck_ack_rates = 0, ofdm_ack_rates = 0;
+ enum nl80211_band band = NL80211_BAND_2GHZ;
- rcu_read_lock();
- chanctx = rcu_dereference(link_conf->chanctx_conf);
- iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
- : NL80211_BAND_2GHZ,
- &cck_ack_rates, &ofdm_ack_rates);
+ phy_ctxt = link_info->phy_ctxt;
+ if (phy_ctxt && phy_ctxt->channel)
+ band = phy_ctxt->channel->band;
- rcu_read_unlock();
+ iwl_mvm_ack_rates(mvm, vif, band, &cck_ack_rates, &ofdm_ack_rates);
*cck_rates = cpu_to_le32((u32)cck_ack_rates);
*ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates);
@@ -563,7 +562,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
else
eth_broadcast_addr(cmd->bssid_addr);
- iwl_mvm_set_fw_basic_rates(mvm, vif, &vif->bss_conf, &cmd->cck_rates,
+ iwl_mvm_set_fw_basic_rates(mvm, vif, &mvmvif->deflink, &cmd->cck_rates,
&cmd->ofdm_rates);
cmd->cck_short_preamble =
@@ -1528,7 +1527,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
if (!iwl_mvm_is_short_beacon_notif_supported(mvm)) {
- struct iwl_mvm_tx_resp *beacon_notify_hdr =
+ struct iwl_tx_resp *beacon_notify_hdr =
&beacon_v5->beacon_notify_hdr;
if (unlikely(pkt_len < sizeof(*beacon_v5)))
@@ -1586,11 +1585,11 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
}
}
-void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
+static void
+iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm,
+ const struct iwl_missed_beacons_notif *mb,
+ struct iwl_rx_packet *pkt)
{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
struct iwl_fw_dbg_trigger_tlv *trigger;
u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
@@ -1604,6 +1603,16 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
MISSED_BEACONS_NOTIFICATION,
0);
+ u8 new_notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ MISSED_BEACONS_NOTIF, 0);
+
+ /* If the firmware uses the new notification (from MAC_CONF_GROUP),
+ * refer to that notification's version.
+ * Note that the new notification from MAC_CONF_GROUP starts from
+ * version 5.
+ */
+ if (new_notif_ver)
+ notif_ver = new_notif_ver;
/* before version four the ID in the notification refers to mac ID */
if (notif_ver < 4) {
@@ -1620,13 +1629,11 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
}
IWL_DEBUG_INFO(mvm,
- "missed bcn %s_id=%u, consecutive=%u (%u, %u, %u)\n",
+ "missed bcn %s_id=%u, consecutive=%u (%u)\n",
notif_ver < 4 ? "mac" : "link",
id,
le32_to_cpu(mb->consec_missed_beacons),
- le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
- le32_to_cpu(mb->num_recvd_beacons),
- le32_to_cpu(mb->num_expected_beacons));
+ le32_to_cpu(mb->consec_missed_beacons_since_last_rx));
if (!vif)
return;
@@ -1656,10 +1663,27 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
"missed_beacons:%d, missed_beacons_since_rx:%d\n",
rx_missed_bcon, rx_missed_bcon_since_rx);
}
- } else if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH &&
- link_id >= 0 && hweight16(vif->active_links) > 1) {
- iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_MISSED_BEACON,
- iwl_mvm_get_other_link(vif, link_id));
+ } else if (link_id >= 0 && hweight16(vif->active_links) > 1) {
+ u32 scnd_lnk_bcn_lost = 0;
+
+ if (notif_ver >= 5 &&
+ !IWL_FW_CHECK(mvm,
+ le32_to_cpu(mb->other_link_id) == IWL_MVM_FW_LINK_ID_INVALID,
+ "No data for other link id but we are in EMLSR active_links: 0x%x\n",
+ vif->active_links))
+ scnd_lnk_bcn_lost =
+ le32_to_cpu(mb->consec_missed_beacons_other_link);
+
+ /* Exit EMLSR if we lost more than
+ * IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links
+ * OR more than IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH on any link.
+ */
+ if ((rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS &&
+ scnd_lnk_bcn_lost >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS) ||
+ rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH)
+ iwl_mvm_exit_esr(mvm, vif,
+ IWL_MVM_ESR_EXIT_MISSED_BEACON,
+ iwl_mvm_get_primary_link(vif));
} else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) {
if (!iwl_mvm_has_new_tx_api(mvm))
ieee80211_beacon_loss(vif);
@@ -1687,6 +1711,31 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
}
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ iwl_mvm_handle_missed_beacons_notif(mvm, (const void *)pkt->data, pkt);
+}
+
+void iwl_mvm_rx_missed_beacons_notif_legacy(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ const struct iwl_missed_beacons_notif_v4 *mb_v4 =
+ (const void *)pkt->data;
+ struct iwl_missed_beacons_notif mb = {
+ .link_id = mb_v4->link_id,
+ .consec_missed_beacons = mb_v4->consec_missed_beacons,
+ .consec_missed_beacons_since_last_rx =
+ mb_v4->consec_missed_beacons_since_last_rx,
+ .other_link_id = cpu_to_le32(IWL_MVM_FW_LINK_ID_INVALID),
+ };
+
+ iwl_mvm_handle_missed_beacons_notif(mvm, &mb, pkt);
+}
+
void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 625ccf566e1c..a327893c6dce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -165,12 +165,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
mvm->lar_regdom_set = true;
mvm->mcc_src = src_id;
- /* Some kind of regulatory mess means we need to currently disallow
- * puncturing in the US and Canada. Do that here, at least until we
- * figure out the new chanctx APIs for puncturing.
- */
- if (resp->mcc == cpu_to_le16(IWL_MCC_US) ||
- resp->mcc == cpu_to_le16(IWL_MCC_CANADA))
+ if (!iwl_puncturing_is_allowed_in_bios(mvm->bios_enable_puncturing,
+ le16_to_cpu(resp->mcc)))
ieee80211_hw_set(mvm->hw, DISALLOW_PUNCTURING);
else
__clear_bit(IEEE80211_HW_DISALLOW_PUNCTURING, mvm->hw->flags);
@@ -639,10 +635,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_P2P_GO_OPPPS |
NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
- NL80211_FEATURE_DYNAMIC_SMPS |
- NL80211_FEATURE_STATIC_SMPS |
NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
+ /* when firmware supports RLC/SMPS offload, do not set these
+ * driver features, since it's no longer supported by driver.
+ */
+ if (!iwl_mvm_has_rlc_offload(mvm))
+ hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS |
+ NL80211_FEATURE_DYNAMIC_SMPS;
+
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
@@ -838,20 +839,10 @@ void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
if (ieee80211_is_mgmt(hdr->frame_control))
sta = NULL;
- /* If there is no sta, and it's not offchannel - send through AP */
+ /* this shouldn't even happen: just drop */
if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION &&
- !offchannel) {
- struct iwl_mvm_vif *mvmvif =
- iwl_mvm_vif_from_mac80211(info->control.vif);
- u8 ap_sta_id = READ_ONCE(mvmvif->deflink.ap_sta_id);
-
- if (ap_sta_id < mvm->fw->ucode_capa.num_stations) {
- /* mac80211 holds rcu read lock */
- sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
- if (IS_ERR_OR_NULL(sta))
- goto drop;
- }
- }
+ !offchannel)
+ goto drop;
if (tmp_sta && !sta && link_id != IEEE80211_LINK_UNSPECIFIED &&
!ieee80211_is_probe_resp(hdr->frame_control)) {
@@ -1241,7 +1232,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
mvm->nvm_data = NULL;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/* fast_resume will be cleared by iwl_mvm_fast_resume */
fast_resume = mvm->fast_resume;
@@ -1263,7 +1254,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
}
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
/*
@@ -1480,19 +1471,33 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
int len;
- struct iwl_dev_tx_power_cmd cmd = {
+ struct iwl_dev_tx_power_cmd_v3_v8 cmd = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
.common.mac_context_id =
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
- .common.pwr_restriction = cpu_to_le16(8 * tx_power),
};
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
-
- if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
- cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
-
- if (cmd_ver == 8)
+ struct iwl_dev_tx_power_cmd cmd_v9_v10;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
+ u16 u_tx_power = tx_power == IWL_DEFAULT_MAX_TX_POWER ?
+ IWL_DEV_MAX_TX_POWER : 8 * tx_power;
+ void *cmd_data = &cmd;
+
+ cmd.common.pwr_restriction = cpu_to_le16(u_tx_power);
+
+ if (cmd_ver > 8) {
+ /* Those fields sit on the same place for v9 and v10 */
+ cmd_v9_v10.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC);
+ cmd_v9_v10.common.mac_context_id =
+ cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id);
+ cmd_v9_v10.common.pwr_restriction = cpu_to_le16(u_tx_power);
+ cmd_data = &cmd_v9_v10;
+ }
+
+ if (cmd_ver == 10)
+ len = sizeof(cmd_v9_v10.v10);
+ else if (cmd_ver == 9)
+ len = sizeof(cmd_v9_v10.v9);
+ else if (cmd_ver == 8)
len = sizeof(cmd.v8);
else if (cmd_ver == 7)
len = sizeof(cmd.v7);
@@ -1507,10 +1512,14 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
else
len = sizeof(cmd.v3);
- /* all structs have the same common part, add it */
+ /* all structs have the same common part, add its length */
len += sizeof(cmd.common);
- return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
+ if (cmd_ver < 9)
+ len += sizeof(cmd.per_band);
+
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, cmd_data);
+
}
static void iwl_mvm_post_csa_tx(void *data, struct ieee80211_sta *sta)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
index 8a38fc4b0b0f..455f5f417506 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
@@ -144,7 +144,7 @@ static void iwl_mvm_mld_update_sta_key(struct ieee80211_hw *hw,
if (sta != data->sta || key->link_id >= 0)
return;
- err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cmd), &cmd);
+ err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
if (err)
data->err = err;
@@ -162,8 +162,8 @@ int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm,
.new_sta_mask = new_sta_mask,
};
- ieee80211_iter_keys_rcu(mvm->hw, vif, iwl_mvm_mld_update_sta_key,
- &data);
+ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_mld_update_sta_key,
+ &data);
return data.err;
}
@@ -402,7 +402,7 @@ void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
if (!sec_key_ver)
return;
- ieee80211_iter_keys_rcu(mvm->hw, vif,
- iwl_mvm_sec_key_remove_ap_iter,
- (void *)(uintptr_t)link_id);
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_sec_key_remove_ap_iter,
+ (void *)(uintptr_t)link_id);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 3c99396ad369..f2378e0fb2fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -223,7 +223,7 @@ static void iwl_mvm_restart_mpdu_count(struct iwl_mvm *mvm,
spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
}
- IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n");
+ IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n");
}
static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
@@ -269,6 +269,9 @@ static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
*/
iwl_mvm_restart_mpdu_count(mvm, mvmvif);
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_UP,
+ NULL);
+
return ret;
}
@@ -456,6 +459,9 @@ static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm,
/* Start a new counting window */
iwl_mvm_restart_mpdu_count(mvm, mvmvif);
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_DOWN,
+ NULL);
+
return ret;
}
@@ -1335,6 +1341,22 @@ iwl_mvm_mld_mac_pre_channel_switch(struct ieee80211_hw *hw,
else
selected = primary;
+ /*
+ * remembers to tell the firmware that this link can't tx
+ * Note that this logic seems to be unrelated to esr, but it
+ * really is needed only when esr is active. When we have a
+ * single link, the firmware will handle all this on its own.
+ * In multi-link scenarios, we can learn about the CSA from
+ * another link and this logic is too complex for the firmware
+ * to track.
+ * Since we want to de-activate the link that got a CSA, we
+ * need to tell the firmware not to send any frame on that link
+ * as the firmware may not be aware that link is under a CSA
+ * with mode=1 (no Tx allowed).
+ */
+ if (chsw->block_tx && mvmvif->link[chsw->link_id])
+ mvmvif->link[chsw->link_id]->csa_block_tx = true;
+
iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_CSA, selected);
mutex_unlock(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index d5a204e52076..28a9d90ad1cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -46,7 +46,7 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm,
- struct iwl_mvm_sta_cfg_cmd *cmd)
+ struct iwl_sta_cfg_cmd *cmd)
{
int ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
@@ -63,7 +63,7 @@ static int iwl_mvm_mld_add_int_sta_to_fw(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta,
const u8 *addr, int link_id)
{
- struct iwl_mvm_sta_cfg_cmd cmd;
+ struct iwl_sta_cfg_cmd cmd;
lockdep_assert_held(&mvm->mutex);
@@ -94,7 +94,7 @@ static int iwl_mvm_mld_add_int_sta_to_fw(struct iwl_mvm *mvm,
*/
static int iwl_mvm_mld_rm_sta_from_fw(struct iwl_mvm *mvm, u32 sta_id)
{
- struct iwl_mvm_remove_sta_cmd rm_sta_cmd = {
+ struct iwl_remove_sta_cmd rm_sta_cmd = {
.sta_id = cpu_to_le32(sta_id),
};
int ret;
@@ -216,7 +216,7 @@ int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
const u8 *baddr = _baddr;
unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ iwl_mvm_get_wd_timeout(mvm, vif);
u16 *queue;
lockdep_assert_held(&mvm->mutex);
@@ -254,7 +254,7 @@ int iwl_mvm_mld_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *msta = &mvm_link->mcast_sta;
static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
const u8 *maddr = _maddr;
- unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif);
lockdep_assert_held(&mvm->mutex);
@@ -438,7 +438,7 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_vif_link_info *link_info =
mvm_vif->link[link_conf->link_id];
- struct iwl_mvm_sta_cfg_cmd cmd = {
+ struct iwl_sta_cfg_cmd cmd = {
.sta_id = cpu_to_le32(mvm_link_sta->sta_id),
.station_type = cpu_to_le32(mvm_sta->sta_type),
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 22f48b66d79c..ef07cff203b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -299,6 +299,7 @@ struct iwl_probe_resp_data {
* @active: indicates the link is active in FW (for sanity checking)
* @cab_queue: content-after-beacon (multicast) queue
* @listen_lmac: indicates this link is allocated to the listen LMAC
+ * @csa_block_tx: we got CSA with mode=1
* @mcast_sta: multicast station
* @phy_ctxt: phy context allocated to this link, if any
* @bf_data: beacon filtering data
@@ -324,6 +325,7 @@ struct iwl_mvm_vif_link_info {
bool he_ru_2mhz_block;
bool active;
bool listen_lmac;
+ bool csa_block_tx;
u16 cab_queue;
/* Assigned while mac80211 has the link in a channel context,
@@ -368,6 +370,7 @@ struct iwl_mvm_vif_link_info {
* preventing the enablement of EMLSR
* @IWL_MVM_ESR_EXIT_CSA: CSA happened, so exit EMLSR
* @IWL_MVM_ESR_EXIT_LINK_USAGE: Exit EMLSR due to low tpt on secondary link
+ * @IWL_MVM_ESR_EXIT_FAIL_ENTRY: Exit EMLSR due to entry failure
*/
enum iwl_mvm_esr_state {
IWL_MVM_ESR_BLOCKED_PREVENTION = 0x1,
@@ -382,6 +385,7 @@ enum iwl_mvm_esr_state {
IWL_MVM_ESR_EXIT_BANDWIDTH = 0x80000,
IWL_MVM_ESR_EXIT_CSA = 0x100000,
IWL_MVM_ESR_EXIT_LINK_USAGE = 0x200000,
+ IWL_MVM_ESR_EXIT_FAIL_ENTRY = 0x400000,
};
#define IWL_MVM_BLOCK_ESR_REASONS 0xffff
@@ -508,7 +512,7 @@ struct iwl_mvm_vif {
bool bf_enabled;
bool ba_enabled;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/* WoWLAN GTK rekey data */
struct {
u8 kck[NL80211_KCK_EXT_LEN];
@@ -770,7 +774,6 @@ struct iwl_mvm_tcm {
* @num_stored: number of mpdus stored in the buffer
* @queue: queue of this reorder buffer
* @last_amsdu: track last ASMDU SN for duplication detection
- * @last_sub_index: track ASMDU sub frame index for duplication detection
* @valid: reordering is valid for this queue
* @lock: protect reorder buffer internal state
*/
@@ -779,7 +782,6 @@ struct iwl_mvm_reorder_buffer {
u16 num_stored;
int queue;
u16 last_amsdu;
- u8 last_sub_index;
bool valid;
spinlock_t lock;
} ____cacheline_aligned_in_smp;
@@ -1074,8 +1076,8 @@ struct iwl_mvm {
/* data related to data path */
struct iwl_rx_phy_info last_phy_info;
- struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX];
- struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_MVM_STATION_COUNT_MAX];
+ struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_STATION_COUNT_MAX];
+ struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX];
u8 rx_ba_sessions;
/* configured by mac80211 */
@@ -1164,7 +1166,7 @@ struct iwl_mvm {
struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER];
- struct ieee80211_bss_conf __rcu *link_id_to_link_conf[IWL_MVM_FW_MAX_LINK_ID + 1];
+ struct ieee80211_bss_conf __rcu *link_id_to_link_conf[IWL_FW_MAX_LINK_ID + 1];
/* -1 for always, 0 for never, >0 for that many times */
s8 fw_restart;
@@ -1176,7 +1178,7 @@ struct iwl_mvm {
struct ieee80211_vif *p2p_device_vif;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
struct wiphy_wowlan_support wowlan;
int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
@@ -1200,8 +1202,11 @@ struct iwl_mvm {
wait_queue_head_t rx_sync_waitq;
- /* BT-Coex */
- struct iwl_bt_coex_profile_notif last_bt_notif;
+ /* BT-Coex - only one of those will be used */
+ union {
+ struct iwl_bt_coex_prof_old_notif last_bt_notif;
+ struct iwl_bt_coex_profile_notif last_bt_wifi_loss;
+ };
struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
u8 bt_tx_prio;
@@ -1365,6 +1370,7 @@ struct iwl_mvm {
struct iwl_mvm_acs_survey *acs_survey;
bool statistics_clear;
+ u32 bios_enable_puncturing;
};
/* Extract MVM priv from op_mode and _hw */
@@ -1700,9 +1706,9 @@ static inline struct agg_tx_status *
iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp)
{
if (iwl_mvm_has_new_tx_api(mvm))
- return &((struct iwl_mvm_tx_resp *)tx_resp)->status;
+ return &((struct iwl_tx_resp *)tx_resp)->status;
else
- return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status;
+ return ((struct iwl_tx_resp_v3 *)tx_resp)->status;
}
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
@@ -1745,7 +1751,7 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
if (iwl_mvm_is_esr_supported(trans) ||
(CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))
- return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM;
+ return IWL_FW_MAX_ACTIVE_LINKS_NUM;
return 1;
}
@@ -1764,6 +1770,13 @@ static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
return iwl_mvm_ac_to_tx_fifo[ac];
}
+static inline bool iwl_mvm_has_rlc_offload(struct iwl_mvm *mvm)
+{
+ return iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD),
+ 0) >= 3;
+}
+
struct iwl_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
@@ -2003,7 +2016,7 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf,
+ struct iwl_mvm_vif_link_info *link_info,
__le32 *cck_rates, __le32 *ofdm_rates);
void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -2062,6 +2075,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_beacons_notif_legacy(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
@@ -2291,7 +2306,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx);
extern const struct file_operations iwl_dbgfs_d3_test_ops;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_fast_suspend(struct iwl_mvm *mvm);
@@ -2322,6 +2337,8 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
/* BT Coex */
int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
+void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -2570,8 +2587,7 @@ u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool tdls, bool cmd_q);
+ struct ieee80211_vif *vif);
void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
const char *errmsg);
void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 836ca22597bc..80ec59c58ae4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -611,6 +611,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
char mcc[3];
struct ieee80211_regdomain *regd;
int wgds_tbl_idx;
+ bool changed = false;
lockdep_assert_held(&mvm->mutex);
@@ -630,10 +631,15 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
IWL_DEBUG_LAR(mvm,
"RX: received chub update mcc cmd (mcc '%s' src %d)\n",
mcc, src);
- regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
+ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, &changed);
if (IS_ERR_OR_NULL(regd))
return;
+ if (!changed) {
+ IWL_DEBUG_LAR(mvm, "RX: No change in the regulatory data\n");
+ goto out;
+ }
+
wgds_tbl_idx = iwl_mvm_get_sar_geo_profile(mvm);
if (wgds_tbl_idx < 1)
IWL_DEBUG_INFO(mvm,
@@ -644,5 +650,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
wgds_tbl_idx);
regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+
+out:
kfree(regd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index b9daaffd9c7f..4dd4a9d5c71f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -159,6 +159,43 @@ static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
iwl_mvm_get_primary_link(vif));
}
+static void iwl_mvm_rx_esr_trans_fail_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_esr_trans_fail_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+ u8 fw_link_id = le32_to_cpu(notif->link_id);
+ struct ieee80211_bss_conf *bss_conf;
+
+ if (IS_ERR_OR_NULL(vif))
+ return;
+
+ IWL_DEBUG_INFO(mvm, "Failed to %s eSR on link %d, reason %d\n",
+ le32_to_cpu(notif->activation) ? "enter" : "exit",
+ le32_to_cpu(notif->link_id),
+ le32_to_cpu(notif->err_code));
+
+ /* we couldn't go back to single link, disconnect */
+ if (!le32_to_cpu(notif->activation)) {
+ iwl_mvm_connection_loss(mvm, vif, "emlsr exit failed");
+ return;
+ }
+
+ bss_conf = iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, fw_link_id, false);
+ if (IWL_FW_CHECK(mvm, !bss_conf,
+ "FW reported failure to activate EMLSR on a non-existing link: %d\n",
+ fw_link_id))
+ return;
+
+ /*
+ * We failed to activate the second link and enter EMLSR, we need to go
+ * back to single link.
+ */
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_FAIL_ENTRY,
+ bss_conf->link_id);
+}
+
static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -261,6 +298,12 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_thermal_dual_chain_request *req = (void *)pkt->data;
+ /* firmware is expected to handle that in RLC offload mode */
+ if (IWL_FW_CHECK(mvm, iwl_mvm_has_rlc_offload(mvm),
+ "Got THERMAL_DUAL_CHAIN_REQUEST (0x%x) in RLC offload mode\n",
+ req->event))
+ return;
+
/*
* We could pass it to the iterator data, but also need to remember
* it for new interfaces that are added while in this state.
@@ -325,7 +368,7 @@ struct iwl_rx_handlers {
*/
static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC,
- struct iwl_mvm_tx_resp),
+ struct iwl_tx_resp),
RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC,
struct iwl_mvm_ba_notif),
@@ -333,9 +376,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC,
struct iwl_tlc_update_notif),
- RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
+ RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_old_notif,
RX_HANDLER_ASYNC_LOCKED_WIPHY,
- struct iwl_bt_coex_profile_notif),
+ struct iwl_bt_coex_prof_old_notif),
+ RX_HANDLER_GRP(BT_COEX_GROUP, PROFILE_NOTIF, iwl_mvm_rx_bt_coex_notif,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_bt_coex_profile_notif),
RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
@@ -385,10 +431,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
struct iwl_umac_scan_iter_complete_notif),
- RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
+ RX_HANDLER(MISSED_BEACONS_NOTIFICATION,
+ iwl_mvm_rx_missed_beacons_notif_legacy,
RX_HANDLER_ASYNC_LOCKED_WIPHY,
- struct iwl_missed_beacons_notif),
+ struct iwl_missed_beacons_notif_v4),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, MISSED_BEACONS_NOTIF,
+ iwl_mvm_rx_missed_beacons_notif,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_missed_beacons_notif),
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC,
struct iwl_error_resp),
RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
@@ -472,6 +523,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_umac_scan_channel_survey_notif),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, EMLSR_TRANS_FAIL_NOTIF,
+ iwl_mvm_rx_esr_trans_fail_notif,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_esr_trans_fail_notif),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@@ -602,6 +657,7 @@ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
HCMD_NAME(STA_REMOVE_CMD),
HCMD_NAME(STA_DISABLE_TX_CMD),
HCMD_NAME(ROC_CMD),
+ HCMD_NAME(EMLSR_TRANS_FAIL_NOTIF),
HCMD_NAME(ROC_NOTIF),
HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF),
HCMD_NAME(MISSED_VAP_NOTIF),
@@ -713,6 +769,13 @@ static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
HCMD_NAME(TAS_CONFIG),
};
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_bt_coex_names[] = {
+ HCMD_NAME(PROFILE_NOTIF),
+};
+
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
@@ -722,6 +785,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
[SCAN_GROUP] = HCMD_ARR(iwl_mvm_scan_names),
[LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
+ [BT_COEX_GROUP] = HCMD_ARR(iwl_mvm_bt_coex_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
[REGULATORY_AND_NVM_GROUP] =
HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
@@ -1223,12 +1287,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
/*
- * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station
+ * We use IWL_STATION_COUNT_MAX to check the validity of the station
* index all over the driver - check that its value corresponds to the
* array size.
*/
BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) !=
- IWL_MVM_STATION_COUNT_MAX);
+ IWL_STATION_COUNT_MAX);
/********************************
* 1. Allocating and configuring HW data
@@ -1287,6 +1351,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
+ mvm->bios_enable_puncturing = iwl_uefi_get_puncturing(&mvm->fwrt);
if (iwl_mvm_has_new_tx_api(mvm)) {
/*
@@ -1386,10 +1451,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
driver_data[2]);
- /* Set a short watchdog for the command queue */
- trans_cfg.cmd_q_wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
-
snprintf(mvm->hw->wiphy->fw_version,
sizeof(mvm->hw->wiphy->fw_version),
"%.31s", fw->fw_version);
@@ -2094,6 +2155,7 @@ static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
}
+#ifdef CONFIG_PM_SLEEP
static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@@ -2102,11 +2164,13 @@ static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_mvm_stop_device(mvm);
-#ifdef CONFIG_PM
mvm->fast_resume = false;
-#endif
mutex_unlock(&mvm->mutex);
}
+#else
+static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
+{}
+#endif
#define IWL_MVM_COMMON_OPS \
/* these could be differentiated */ \
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index ce264b386029..7cab5373c8ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -159,7 +159,11 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
.phy_id = cpu_to_le32(ctxt->id),
};
- if (ctxt->rlc_disabled)
+ /* From version 3, RLC is offloaded to firmware, so the driver no
+ * longer needs to send cmd.rlc, note that we are not using any
+ * other fields in the command - don't send it.
+ */
+ if (iwl_mvm_has_rlc_offload(mvm) || ctxt->rlc_disabled)
return 0;
if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 151289e13308..047c020f8efa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -738,8 +738,8 @@ static void iwl_mvm_stats_energy_iter(void *_data,
u8 *energy = _data;
u32 sta_id = mvmsta->deflink.sta_id;
- if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT_MAX, "sta_id %d >= %d",
- sta_id, IWL_MVM_STATION_COUNT_MAX))
+ if (WARN_ONCE(sta_id >= IWL_STATION_COUNT_MAX, "sta_id %d >= %d",
+ sta_id, IWL_STATION_COUNT_MAX))
return;
if (energy[sta_id])
@@ -991,7 +991,7 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
/* The link IDs that doesn't exist will contain 0 */
- for (int link = 0; link < IWL_MVM_FW_MAX_LINK_ID; link++) {
+ for (int link = 0; link < IWL_FW_MAX_LINK_ID; link++) {
total_tx += mvmsta->mpdu_counters[q].per_link[link].tx;
total_rx += mvmsta->mpdu_counters[q].per_link[link].rx;
}
@@ -1009,8 +1009,8 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
}
- IWL_DEBUG_STATS(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
- total_tx, total_rx);
+ IWL_DEBUG_INFO(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
+ total_tx, total_rx);
/* If we don't have enough MPDUs - exit EMLSR */
if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH &&
@@ -1020,6 +1020,9 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
return;
}
+ IWL_DEBUG_INFO(mvm, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
+ sec_link, sec_link_tx, sec_link_rx);
+
/* Calculate the percentage of the secondary link TX/RX */
sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
@@ -1039,7 +1042,7 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
- u8 average_energy[IWL_MVM_STATION_COUNT_MAX];
+ u8 average_energy[IWL_STATION_COUNT_MAX];
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_system_statistics_notif_oper *stats;
int i;
@@ -1098,7 +1101,7 @@ static void
iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
- u8 average_energy[IWL_MVM_STATION_COUNT_MAX];
+ u8 average_energy[IWL_STATION_COUNT_MAX];
__le32 air_time[MAC_INDEX_AUX];
__le32 rx_bytes[MAC_INDEX_AUX];
__le32 flags = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 1a210d0c22b3..65f8933c34b4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -729,8 +729,6 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
bool last_subframe =
desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
u8 tid = ieee80211_get_tid(hdr);
- u8 sub_frame_idx = desc->amsdu_info &
- IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
struct iwl_mvm_reorder_buf_entry *entries;
u32 sta_mask;
int index;
@@ -843,10 +841,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
__skb_queue_tail(&entries[index].frames, skb);
buffer->num_stored++;
- if (amsdu) {
+ if (amsdu)
buffer->last_amsdu = sn;
- buffer->last_sub_index = sub_frame_idx;
- }
/*
* We cannot trust NSSN for AMSDU sub-frames that are not the last.
@@ -2542,7 +2538,7 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
goto out;
}
- if (WARN(tid != baid_data->tid || sta_id > IWL_MVM_STATION_COUNT_MAX ||
+ if (WARN(tid != baid_data->tid || sta_id > IWL_STATION_COUNT_MAX ||
!(baid_data->sta_mask & BIT(sta_id)),
"baid 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n",
baid, baid_data->sta_mask, baid_data->tid, sta_id,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 1cc9c426bb15..3ce9150213a7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1594,7 +1594,7 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
for (i = 0; i < n_channels; i++) {
channel_cfg[i].flags = cpu_to_le32(flags);
- channel_cfg[i].v1.channel_num = channels[i]->hw_value;
+ channel_cfg[i].channel_num = channels[i]->hw_value;
if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
enum nl80211_band band = channels[i]->band;
@@ -1626,13 +1626,13 @@ iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm,
&cp->channel_config[i];
cfg->flags = cpu_to_le32(flags);
- cfg->v2.channel_num = channels[i]->hw_value;
+ cfg->channel_num = channels[i]->hw_value;
cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band);
cfg->v2.iter_count = 1;
cfg->v2.iter_interval = 0;
iwl_mvm_scan_ch_add_n_aps_override(vif_type,
- cfg->v2.channel_num,
+ cfg->channel_num,
cfg->v2.band, bitmap,
bitmap_n_entries);
}
@@ -1656,7 +1656,7 @@ iwl_mvm_umac_scan_cfg_channels_v7(struct iwl_mvm *mvm,
u8 iwl_band = iwl_mvm_phy_band_from_nl80211(band);
cfg->flags = cpu_to_le32(flags | n_aps_flag);
- cfg->v2.channel_num = channels[i]->hw_value;
+ cfg->channel_num = channels[i]->hw_value;
if (cfg80211_channel_is_psc(channels[i]))
cfg->flags = 0;
@@ -1789,7 +1789,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
!params->n_6ghz_params && params->n_ssids)
continue;
- cfg->v1.channel_num = params->channels[i]->hw_value;
+ cfg->channel_num = params->channels[i]->hw_value;
if (version < 17)
cfg->v2.band = PHY_BAND_6;
else
@@ -2477,7 +2477,7 @@ iwl_mvm_scan_umac_fill_ch_p_v7(struct iwl_mvm *mvm,
if (!cfg80211_channel_is_psc(channel))
continue;
- cfg->v5.channel_num = channel->hw_value;
+ cfg->channel_num = channel->hw_value;
cfg->v5.iter_count = 1;
cfg->v5.iter_interval = 0;
@@ -3313,13 +3313,23 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
mvm->scan_start);
}
-static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
+static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type, bool *wait)
{
- struct iwl_umac_scan_abort cmd = {};
+ struct iwl_umac_scan_abort abort_cmd = {};
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
+ .len = { sizeof(abort_cmd), },
+ .data = { &abort_cmd, },
+ .flags = CMD_SEND_IN_RFKILL,
+ };
+
int uid, ret;
+ u32 status = IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND;
lockdep_assert_held(&mvm->mutex);
+ *wait = true;
+
/* We should always get a valid index here, because we already
* checked that this type of scan was running in the generic
* code.
@@ -3328,17 +3338,28 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
if (WARN_ON_ONCE(uid < 0))
return uid;
- cmd.uid = cpu_to_le32(uid);
+ abort_cmd.uid = cpu_to_le32(uid);
IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
- ret = iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
- CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
+
+ IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d, status=%u\n", ret, status);
if (!ret)
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
- IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret);
+ /* Handle the case that the FW is no longer familiar with the scan that
+ * is to be stopped. In such a case, it is expected that the scan
+ * complete notification was already received but not yet processed.
+ * In such a case, there is no need to wait for a scan complete
+ * notification and the flow should continue similar to the case that
+ * the scan was really aborted.
+ */
+ if (status == IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND) {
+ mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+ *wait = false;
+ }
+
return ret;
}
@@ -3348,6 +3369,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
SCAN_OFFLOAD_COMPLETE, };
int ret;
+ bool wait = true;
lockdep_assert_held(&mvm->mutex);
@@ -3359,7 +3381,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
- ret = iwl_mvm_umac_scan_abort(mvm, type);
+ ret = iwl_mvm_umac_scan_abort(mvm, type, &wait);
else
ret = iwl_mvm_lmac_scan_abort(mvm);
@@ -3367,6 +3389,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
return ret;
+ } else if (!wait) {
+ IWL_DEBUG_SCAN(mvm, "no need to wait for scan type %d\n", type);
+ iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
+ return 0;
}
return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 15e64d94d6ea..b6c99cd6d9e5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -29,7 +29,7 @@ int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype)
int sta_id;
u32 reserved_ids = 0;
- BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
+ BUILD_BUG_ON(IWL_STATION_COUNT_MAX > 32);
WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
lockdep_assert_held(&mvm->mutex);
@@ -900,7 +900,7 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_tid(sta, tid);
unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
int queue = -1;
lockdep_assert_held(&mvm->mutex);
@@ -1080,7 +1080,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
return;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+ wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
@@ -1330,7 +1330,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
.frame_limit = IWL_FRAME_LIMIT,
};
unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
int queue = -1;
u16 queue_tmp;
unsigned long disable_agg_tids = 0;
@@ -1622,7 +1622,7 @@ void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
unsigned int wdg =
- iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
+ iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif);
int i;
struct iwl_trans_txq_scd_cfg cfg = {
.sta_id = mvm_sta->deflink.sta_id,
@@ -2359,7 +2359,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
int queue;
int ret;
unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ iwl_mvm_get_wd_timeout(mvm, vif);
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_VO,
.sta_id = mvmvif->deflink.bcast_sta.sta_id,
@@ -2568,7 +2568,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
- unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif);
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -3207,7 +3207,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
+ iwl_mvm_get_wd_timeout(mvm, vif);
int queue, ret;
bool alloc_queue = true;
enum iwl_mvm_queue_status queue_status;
@@ -4326,7 +4326,7 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u16 queue;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ iwl_mvm_get_wd_timeout(mvm, vif);
bool mld = iwl_mvm_has_mld_api(mvm->fw);
u32 type = mld ? STATION_TYPE_PEER : IWL_STA_LINK;
@@ -4455,10 +4455,10 @@ void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
sizeof(queue_counter->per_link));
queue_counter->window_start = jiffies;
- IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n");
+ IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n");
}
- for (int i = 0; i < IWL_MVM_FW_MAX_LINK_ID; i++)
+ for (int i = 0; i < IWL_FW_MAX_LINK_ID; i++)
total_mpdus += tx ? queue_counter->per_link[i].tx :
queue_counter->per_link[i].rx;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 0dc83d6afb3c..4a3799ae7c18 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -12,7 +12,7 @@
#include <linux/wait.h>
#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */
-#include "fw-api.h" /* IWL_MVM_STATION_COUNT_MAX */
+#include "fw-api.h" /* IWL_STATION_COUNT_MAX */
#include "rs.h"
struct iwl_mvm;
@@ -361,7 +361,7 @@ struct iwl_mvm_mpdu_counter {
*/
struct iwl_mvm_tpt_counter {
spinlock_t lock;
- struct iwl_mvm_mpdu_counter per_link[IWL_MVM_FW_MAX_LINK_ID];
+ struct iwl_mvm_mpdu_counter per_link[IWL_FW_MAX_LINK_ID];
unsigned long window_start;
} ____cacheline_aligned_in_smp;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index a8c42ce3b630..72fa7ac86516 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -114,16 +114,14 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
mvm->aux_sta.tfd_queue_msk);
- if (mvm->mld_api_is_used) {
- iwl_mvm_mld_rm_aux_sta(mvm);
- mutex_unlock(&mvm->mutex);
- return;
- }
-
/* In newer version of this command an aux station is added only
* in cases of dedicated tx queue and need to be removed in end
- * of use */
- if (iwl_mvm_has_new_station_api(mvm->fw))
+ * of use. For the even newer mld api, use the appropriate
+ * function.
+ */
+ if (mvm->mld_api_is_used)
+ iwl_mvm_mld_rm_aux_sta(mvm);
+ else if (iwl_mvm_has_new_station_api(mvm->fw))
iwl_mvm_rm_aux_sta(mvm);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 7ff5ea5e7aca..ca026b5256ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1203,6 +1203,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
bool is_ampdu = false;
int hdrlen;
+ if (WARN_ON_ONCE(!sta))
+ return -1;
+
mvmsta = iwl_mvm_sta_from_mac80211(sta);
fc = hdr->frame_control;
hdrlen = ieee80211_hdrlen(fc);
@@ -1210,9 +1213,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
return -1;
- if (WARN_ON_ONCE(!mvmsta))
- return -1;
-
if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
return -1;
@@ -1343,7 +1343,7 @@ drop:
int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta)
{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_sta *mvmsta;
struct ieee80211_tx_info info;
struct sk_buff_head mpdus_skbs;
struct ieee80211_vif *vif;
@@ -1352,9 +1352,11 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct sk_buff *orig_skb = skb;
const u8 *addr3;
- if (WARN_ON_ONCE(!mvmsta))
+ if (WARN_ON_ONCE(!sta))
return -1;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
return -1;
@@ -1678,7 +1680,7 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
* For 22000-series and lower, this is just 12 bits. For later, 16 bits.
*/
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
- struct iwl_mvm_tx_resp *tx_resp)
+ struct iwl_tx_resp *tx_resp)
{
u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
tx_resp->frame_count);
@@ -1694,8 +1696,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct ieee80211_sta *sta;
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
int txq_id = SEQ_TO_QUEUE(sequence);
- /* struct iwl_mvm_tx_resp_v3 is almost the same */
- struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ /* struct iwl_tx_resp_v3 is almost the same */
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
struct agg_tx_status *agg_status =
@@ -1952,7 +1954,7 @@ static const char *iwl_get_agg_tx_status(u16 status)
static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
- struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
struct agg_tx_status *frame_status =
iwl_mvm_get_agg_status(mvm, tx_resp);
int i;
@@ -1986,7 +1988,7 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
- struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -2027,7 +2029,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
if (tx_resp->frame_count == 1)
iwl_mvm_rx_tx_cmd_single(mvm, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 0e5fa8374103..1d1364d03f02 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -297,6 +297,10 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (vif->type != NL80211_IFTYPE_STATION)
return;
+ /* SMPS is handled by firmware */
+ if (iwl_mvm_has_rlc_offload(mvm))
+ return;
+
mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (WARN_ON_ONCE(!mvmvif->link[link_id]))
@@ -743,58 +747,20 @@ bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
}
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool tdls, bool cmd_q)
+ struct ieee80211_vif *vif)
{
- struct iwl_fw_dbg_trigger_tlv *trigger;
- struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
- unsigned int default_timeout = cmd_q ?
- IWL_DEF_WD_TIMEOUT :
+ unsigned int default_timeout =
mvm->trans->trans_cfg->base_params->wd_timeout;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
- /*
- * We can't know when the station is asleep or awake, so we
- * must disable the queue hang detection.
- */
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
- vif && vif->type == NL80211_IFTYPE_AP)
- return IWL_WATCHDOG_DISABLED;
- return default_timeout;
- }
-
- trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
- txq_timer = (void *)trigger->data;
-
- if (tdls)
- return le32_to_cpu(txq_timer->tdls);
-
- if (cmd_q)
- return le32_to_cpu(txq_timer->command_queue);
-
- if (WARN_ON(!vif))
- return default_timeout;
-
- switch (ieee80211_vif_type_p2p(vif)) {
- case NL80211_IFTYPE_ADHOC:
- return le32_to_cpu(txq_timer->ibss);
- case NL80211_IFTYPE_STATION:
- return le32_to_cpu(txq_timer->bss);
- case NL80211_IFTYPE_AP:
- return le32_to_cpu(txq_timer->softap);
- case NL80211_IFTYPE_P2P_CLIENT:
- return le32_to_cpu(txq_timer->p2p_client);
- case NL80211_IFTYPE_P2P_GO:
- return le32_to_cpu(txq_timer->p2p_go);
- case NL80211_IFTYPE_P2P_DEVICE:
- return le32_to_cpu(txq_timer->p2p_device);
- case NL80211_IFTYPE_MONITOR:
- return default_timeout;
- default:
- WARN_ON(1);
- return mvm->trans->trans_cfg->base_params->wd_timeout;
- }
+ /*
+ * We can't know when the station is asleep or awake, so we
+ * must disable the queue hang detection.
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
+ vif->type == NL80211_IFTYPE_AP)
+ return IWL_WATCHDOG_DISABLED;
+ return default_timeout;
}
void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 84fd93278450..805fb249a0c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -500,9 +500,7 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
/* Bz devices */
- {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 18dda89b7985..8903a5692dfb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -526,6 +526,8 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
keep_ram_busy = !iwl_pcie_set_ltr(trans);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ IWL_DEBUG_POWER(trans, "function scratch register value is 0x%08x\n",
+ iwl_read32(trans, CSR_FUNC_SCRATCH));
iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE);
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_ROM_START);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 719ddc4b72c5..3b9943eb6934 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1967,7 +1967,6 @@ void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue;
trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo;
- trans_pcie->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs;
trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
@@ -3567,6 +3566,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
+ /* Set a short watchdog for the command queue */
+ trans_pcie->txqs.cmd.wdg_timeout = IWL_DEF_WD_TIMEOUT;
+
trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans_pcie->txqs.tso_hdr_page) {
ret = -ENOMEM;
diff --git a/drivers/net/wireless/marvell/libertas/cmd.h b/drivers/net/wireless/marvell/libertas/cmd.h
index 3c193074662b..d7be232f5739 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.h
+++ b/drivers/net/wireless/marvell/libertas/cmd.h
@@ -116,11 +116,6 @@ int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
int8_t p2, int usesnr);
-int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
-
-int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
- uint16_t cmd_action);
-
int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
diff --git a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
index 631b5da09f86..a5d4c09fb918 100644
--- a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
@@ -484,12 +484,9 @@ void lbtf_complete_command(struct lbtf_private *priv, struct cmd_ctrl_node *cmd,
void lbtf_cmd_response_rx(struct lbtf_private *priv);
/* main.c */
-struct chan_freq_power *lbtf_get_region_cfp_table(u8 region,
- int *cfp_no);
struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev,
const struct lbtf_ops *ops);
int lbtf_remove_card(struct lbtf_private *priv);
-int lbtf_start_card(struct lbtf_private *priv);
int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb);
void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail);
void lbtf_bcn_sent(struct lbtf_private *priv);
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index b90f922f1cdc..032b93a41d99 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -117,12 +117,12 @@ void mwifiex_dfs_cac_work_queue(struct work_struct *work)
dfs_cac_work);
chandef = priv->dfs_chandef;
- if (priv->wdev.cac_started) {
+ if (priv->wdev.links[0].cac_started) {
mwifiex_dbg(priv->adapter, MSG,
"CAC timer finished; No radar detected\n");
cfg80211_cac_event(priv->netdev, &chandef,
NL80211_RADAR_CAC_FINISHED,
- GFP_KERNEL);
+ GFP_KERNEL, 0);
}
}
@@ -174,7 +174,7 @@ int mwifiex_stop_radar_detection(struct mwifiex_private *priv,
*/
void mwifiex_abort_cac(struct mwifiex_private *priv)
{
- if (priv->wdev.cac_started) {
+ if (priv->wdev.links[0].cac_started) {
if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef))
mwifiex_dbg(priv->adapter, ERROR,
"failed to stop CAC in FW\n");
@@ -182,7 +182,8 @@ void mwifiex_abort_cac(struct mwifiex_private *priv)
"Aborting delayed work for CAC.\n");
cancel_delayed_work_sync(&priv->dfs_cac_work);
cfg80211_cac_event(priv->netdev, &priv->dfs_chandef,
- NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
+ NL80211_RADAR_CAC_ABORTED, GFP_KERNEL,
+ 0);
}
}
@@ -221,7 +222,7 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
cfg80211_cac_event(priv->netdev,
&priv->dfs_chandef,
NL80211_RADAR_DETECTED,
- GFP_KERNEL);
+ GFP_KERNEL, 0);
}
break;
default:
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index c0c635e74bc5..66f0f5377ac1 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -881,8 +881,6 @@ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter)
struct mwifiex_private *priv;
for (i = 0; i < adapter->priv_num; i++) {
- if (!adapter->priv[i])
- continue;
priv = adapter->priv[i];
tx_win_size = priv->add_ba_param.tx_win_size;
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.h b/drivers/net/wireless/marvell/mwifiex/11n.h
index 7738ebe1fec1..773bd5c0f007 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n.h
@@ -108,9 +108,7 @@ static inline u8 mwifiex_space_avail_for_new_ba_stream(
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv)
- ba_stream_num += list_count_nodes(
- &priv->tx_ba_stream_tbl_ptr);
+ ba_stream_num += list_count_nodes(&priv->tx_ba_stream_tbl_ptr);
}
if (adapter->fw_api_ver == MWIFIEX_FW_V15) {
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 10690e82358b..cb948ca34373 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -810,8 +810,6 @@ void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (!priv)
- continue;
spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
@@ -834,8 +832,6 @@ static void mwifiex_update_ampdu_rxwinsize(struct mwifiex_adapter *adapter,
dev_dbg(adapter->dev, "Update rxwinsize %d\n", coex_flag);
for (i = 0; i < adapter->priv_num; i++) {
- if (!adapter->priv[i])
- continue;
priv = adapter->priv[i];
rx_win_size = priv->add_ba_param.rx_win_size;
if (coex_flag) {
@@ -882,17 +878,16 @@ void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter)
u8 count = 0;
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- priv = adapter->priv[i];
- if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
- if (priv->media_connected)
- count++;
- }
- if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
- if (priv->bss_started)
- count++;
- }
+ priv = adapter->priv[i];
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
+ if (priv->media_connected)
+ count++;
}
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ if (priv->bss_started)
+ count++;
+ }
+
if (count >= MWIFIEX_BSS_COEX_COUNT)
break;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index bf35c92f91d7..fca3eea7ee84 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -221,6 +221,26 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
return 0;
}
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ if (ieee80211_is_auth(mgmt->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: send auth to %pM\n", mgmt->da);
+ if (ieee80211_is_deauth(mgmt->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: send deauth to %pM\n", mgmt->da);
+ if (ieee80211_is_disassoc(mgmt->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "assoc: send disassoc to %pM\n", mgmt->da);
+ if (ieee80211_is_assoc_resp(mgmt->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "assoc: send assoc resp to %pM\n",
+ mgmt->da);
+ if (ieee80211_is_reassoc_resp(mgmt->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "assoc: send reassoc resp to %pM\n",
+ mgmt->da);
+ }
+
pkt_len = len + ETH_ALEN;
skb = dev_alloc_skb(MWIFIEX_MIN_DATA_HEADER_LEN +
MWIFIEX_MGMT_FRAME_HEADER_SIZE +
@@ -268,6 +288,8 @@ mwifiex_cfg80211_update_mgmt_frame_registrations(struct wiphy *wiphy,
if (mask != priv->mgmt_frame_mask) {
priv->mgmt_frame_mask = mask;
+ if (priv->host_mlme_reg)
+ priv->mgmt_frame_mask |= HOST_MLME_MGMT_MASK;
mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
HostCmd_ACT_GEN_SET, 0,
&priv->mgmt_frame_mask, false);
@@ -503,6 +525,9 @@ mwifiex_cfg80211_set_default_mgmt_key(struct wiphy *wiphy,
wiphy_dbg(wiphy, "set default mgmt key, key index=%d\n", key_index);
+ if (priv->adapter->host_mlme_enabled)
+ return 0;
+
memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
encrypt_key.key_len = WLAN_KEY_LEN_CCMP;
encrypt_key.key_index = key_index;
@@ -848,6 +873,7 @@ static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
struct mwifiex_adapter *adapter = priv->adapter;
unsigned long flags;
+ priv->host_mlme_reg = false;
priv->mgmt_frame_mask = 0;
if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
HostCmd_ACT_GEN_SET, 0,
@@ -1880,7 +1906,7 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_sta_node *sta_node;
u8 deauth_mac[ETH_ALEN];
- if (!priv->bss_started && priv->wdev.cac_started) {
+ if (!priv->bss_started && priv->wdev.links[0].cac_started) {
mwifiex_dbg(priv->adapter, INFO, "%s: abort CAC!\n", __func__);
mwifiex_abort_cac(priv);
}
@@ -3482,7 +3508,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv && priv->netdev)
+ if (priv->netdev)
netif_device_detach(priv->netdev);
}
@@ -3554,7 +3580,7 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv && priv->netdev)
+ if (priv->netdev)
netif_device_attach(priv->netdev);
}
@@ -3633,6 +3659,9 @@ static int mwifiex_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
if (!ISSUPP_FIRMWARE_SUPPLICANT(priv->adapter->fw_cap_info))
return -EOPNOTSUPP;
+ if (priv->adapter->host_mlme_enabled)
+ return 0;
+
return mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG,
HostCmd_ACT_GEN_SET, 0, data, true);
}
@@ -3948,11 +3977,42 @@ mwifiex_cfg80211_tdls_cancel_chan_switch(struct wiphy *wiphy,
}
static int
+mwifiex_cfg80211_uap_add_station(struct mwifiex_private *priv, const u8 *mac,
+ struct station_parameters *params)
+{
+ struct mwifiex_sta_info add_sta;
+ int ret;
+
+ memcpy(add_sta.peer_mac, mac, ETH_ALEN);
+ add_sta.params = params;
+
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_ADD_NEW_STATION,
+ HostCmd_ACT_ADD_STA, 0, (void *)&add_sta, true);
+
+ if (!ret) {
+ struct station_info *sinfo;
+
+ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+ if (!sinfo)
+ return -ENOMEM;
+
+ cfg80211_new_sta(priv->netdev, mac, sinfo, GFP_KERNEL);
+ kfree(sinfo);
+ }
+
+ return ret;
+}
+
+static int
mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_parameters *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ if (priv->adapter->host_mlme_enabled &&
+ (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP))
+ return mwifiex_cfg80211_uap_add_station(priv, mac, params);
+
if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
return -EOPNOTSUPP;
@@ -3978,7 +4038,7 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
return -EBUSY;
}
- if (priv->wdev.cac_started)
+ if (priv->wdev.links[0].cac_started)
return -EBUSY;
if (cfg80211_chandef_identical(&params->chandef,
@@ -4145,7 +4205,7 @@ static int
mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct mwifiex_radar_params radar_params;
@@ -4190,6 +4250,10 @@ mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev,
int ret;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ if (priv->adapter->host_mlme_enabled &&
+ (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP))
+ return 0;
+
/* we support change_station handler only for TDLS peers*/
if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
return -EOPNOTSUPP;
@@ -4206,8 +4270,307 @@ mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
+static int
+mwifiex_cfg80211_authenticate(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_auth_request *req)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct sk_buff *skb;
+ u16 pkt_len, auth_alg;
+ int ret;
+ struct mwifiex_ieee80211_mgmt *mgmt;
+ struct mwifiex_txinfo *tx_info;
+ u32 tx_control = 0, pkt_type = PKT_TYPE_MGMT;
+ u8 trans = 1, status_code = 0;
+ u8 *varptr = NULL;
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ mwifiex_dbg(priv->adapter, ERROR, "Interface role is AP\n");
+ return -EFAULT;
+ }
+
+ if (priv->wdev.iftype != NL80211_IFTYPE_STATION) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Interface type is not correct (type %d)\n",
+ priv->wdev.iftype);
+ return -EINVAL;
+ }
+
+ if (priv->auth_alg != WLAN_AUTH_SAE &&
+ (priv->auth_flag & HOST_MLME_AUTH_PENDING)) {
+ mwifiex_dbg(priv->adapter, ERROR, "Pending auth on going\n");
+ return -EBUSY;
+ }
+
+ if (!priv->host_mlme_reg) {
+ priv->host_mlme_reg = true;
+ priv->mgmt_frame_mask |= HOST_MLME_MGMT_MASK;
+ mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->mgmt_frame_mask, false);
+ }
+
+ switch (req->auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ auth_alg = WLAN_AUTH_OPEN;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ auth_alg = WLAN_AUTH_SHARED_KEY;
+ break;
+ case NL80211_AUTHTYPE_FT:
+ auth_alg = WLAN_AUTH_FT;
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ auth_alg = WLAN_AUTH_LEAP;
+ break;
+ case NL80211_AUTHTYPE_SAE:
+ auth_alg = WLAN_AUTH_SAE;
+ break;
+ default:
+ mwifiex_dbg(priv->adapter, ERROR,
+ "unsupported auth type=%d\n", req->auth_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (!priv->auth_flag) {
+ ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_SET,
+ req->bss->channel,
+ AUTH_TX_DEFAULT_WAIT_TIME);
+
+ if (!ret) {
+ priv->roc_cfg.cookie = get_random_u32() | 1;
+ priv->roc_cfg.chan = *req->bss->channel;
+ } else {
+ return -EFAULT;
+ }
+ }
+
+ priv->sec_info.authentication_mode = auth_alg;
+
+ mwifiex_cancel_scan(adapter);
+
+ pkt_len = (u16)req->ie_len + req->auth_data_len +
+ MWIFIEX_MGMT_HEADER_LEN + MWIFIEX_AUTH_BODY_LEN;
+ if (req->auth_data_len >= 4)
+ pkt_len -= 4;
+
+ skb = dev_alloc_skb(MWIFIEX_MIN_DATA_HEADER_LEN +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+ pkt_len + sizeof(pkt_len));
+ if (!skb) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "allocate skb failed for management frame\n");
+ return -ENOMEM;
+ }
+
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+ tx_info->pkt_len = pkt_len;
+
+ skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+ memcpy(skb_push(skb, sizeof(pkt_len)), &pkt_len, sizeof(pkt_len));
+ memcpy(skb_push(skb, sizeof(tx_control)),
+ &tx_control, sizeof(tx_control));
+ memcpy(skb_push(skb, sizeof(pkt_type)), &pkt_type, sizeof(pkt_type));
+
+ mgmt = (struct mwifiex_ieee80211_mgmt *)skb_put(skb, pkt_len);
+ memset(mgmt, 0, pkt_len);
+ mgmt->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
+ memcpy(mgmt->da, req->bss->bssid, ETH_ALEN);
+ memcpy(mgmt->sa, priv->curr_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, req->bss->bssid, ETH_ALEN);
+ eth_broadcast_addr(mgmt->addr4);
+
+ if (req->auth_data_len >= 4) {
+ if (req->auth_type == NL80211_AUTHTYPE_SAE) {
+ __le16 *pos = (__le16 *)req->auth_data;
+
+ trans = le16_to_cpu(pos[0]);
+ status_code = le16_to_cpu(pos[1]);
+ }
+ memcpy((u8 *)(&mgmt->auth.variable), req->auth_data + 4,
+ req->auth_data_len - 4);
+ varptr = (u8 *)&mgmt->auth.variable +
+ (req->auth_data_len - 4);
+ }
+
+ mgmt->auth.auth_alg = cpu_to_le16(auth_alg);
+ mgmt->auth.auth_transaction = cpu_to_le16(trans);
+ mgmt->auth.status_code = cpu_to_le16(status_code);
+
+ if (req->ie && req->ie_len) {
+ if (!varptr)
+ varptr = (u8 *)&mgmt->auth.variable;
+ memcpy((u8 *)varptr, req->ie, req->ie_len);
+ }
+
+ priv->auth_flag = HOST_MLME_AUTH_PENDING;
+ priv->auth_alg = auth_alg;
+
+ skb->priority = WMM_HIGHEST_PRIORITY;
+ __net_timestamp(skb);
+
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: send authentication to %pM\n", req->bss->bssid);
+
+ mwifiex_queue_tx_pkt(priv, skb);
+
+ return 0;
+}
+
+static int
+mwifiex_cfg80211_associate(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_assoc_request *req)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ struct mwifiex_adapter *adapter = priv->adapter;
+ int ret;
+ struct cfg80211_ssid req_ssid;
+ const u8 *ssid_ie;
+
+ if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
+ mwifiex_dbg(adapter, ERROR,
+ "%s: reject infra assoc request in non-STA role\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
+ mwifiex_dbg(adapter, ERROR,
+ "%s: Ignore association.\t"
+ "Card removed or FW in bad state\n",
+ dev->name);
+ return -EFAULT;
+ }
+
+ if (priv->auth_alg == WLAN_AUTH_SAE)
+ priv->auth_flag = HOST_MLME_AUTH_DONE;
+
+ if (priv->auth_flag && !(priv->auth_flag & HOST_MLME_AUTH_DONE))
+ return -EBUSY;
+
+ if (priv->roc_cfg.cookie) {
+ ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_REMOVE,
+ &priv->roc_cfg.chan, 0);
+ if (!ret)
+ memset(&priv->roc_cfg, 0,
+ sizeof(struct mwifiex_roc_cfg));
+ else
+ return -EFAULT;
+ }
+
+ if (!mwifiex_stop_bg_scan(priv))
+ cfg80211_sched_scan_stopped_locked(priv->wdev.wiphy, 0);
+
+ memset(&req_ssid, 0, sizeof(struct cfg80211_ssid));
+ rcu_read_lock();
+ ssid_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
+
+ if (!ssid_ie)
+ goto ssid_err;
+
+ req_ssid.ssid_len = ssid_ie[1];
+ if (req_ssid.ssid_len > IEEE80211_MAX_SSID_LEN) {
+ mwifiex_dbg(adapter, ERROR, "invalid SSID - aborting\n");
+ goto ssid_err;
+ }
+
+ memcpy(req_ssid.ssid, ssid_ie + 2, req_ssid.ssid_len);
+ if (!req_ssid.ssid_len || req_ssid.ssid[0] < 0x20) {
+ mwifiex_dbg(adapter, ERROR, "invalid SSID - aborting\n");
+ goto ssid_err;
+ }
+ rcu_read_unlock();
+
+ /* As this is new association, clear locally stored
+ * keys and security related flags
+ */
+ priv->sec_info.wpa_enabled = false;
+ priv->sec_info.wpa2_enabled = false;
+ priv->wep_key_curr_index = 0;
+ priv->sec_info.encryption_mode = 0;
+ priv->sec_info.is_authtype_auto = 0;
+ if (mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1)) {
+ mwifiex_dbg(priv->adapter, ERROR, "deleting the crypto keys\n");
+ return -EFAULT;
+ }
+
+ if (req->crypto.n_ciphers_pairwise)
+ priv->sec_info.encryption_mode =
+ req->crypto.ciphers_pairwise[0];
+
+ if (req->crypto.cipher_group)
+ priv->sec_info.encryption_mode = req->crypto.cipher_group;
+
+ if (req->ie)
+ mwifiex_set_gen_ie(priv, req->ie, req->ie_len);
+
+ memcpy(priv->cfg_bssid, req->bss->bssid, ETH_ALEN);
+
+ mwifiex_dbg(adapter, MSG,
+ "assoc: send association to %pM\n", req->bss->bssid);
+
+ cfg80211_ref_bss(adapter->wiphy, req->bss);
+ ret = mwifiex_bss_start(priv, req->bss, &req_ssid);
+ if (ret) {
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+ eth_zero_addr(priv->cfg_bssid);
+ }
+
+ if (priv->assoc_rsp_size) {
+ priv->req_bss = req->bss;
+ adapter->assoc_resp_received = true;
+ queue_work(adapter->host_mlme_workqueue,
+ &adapter->host_mlme_work);
+ }
+
+ cfg80211_put_bss(priv->adapter->wiphy, req->bss);
+
+ return 0;
+
+ssid_err:
+ rcu_read_unlock();
+ return -EFAULT;
+}
+
+static int
+mwifiex_cfg80211_deauthenticate(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_deauth_request *req)
+{
+ return mwifiex_cfg80211_disconnect(wiphy, dev, req->reason_code);
+}
+
+static int
+mwifiex_cfg80211_disassociate(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_disassoc_request *req)
+{
+ return mwifiex_cfg80211_disconnect(wiphy, dev, req->reason_code);
+}
+
+static int
+mwifiex_cfg80211_probe_client(struct wiphy *wiphy,
+ struct net_device *dev, const u8 *peer,
+ u64 *cookie)
+{
+ /* hostapd looks for NL80211_CMD_PROBE_CLIENT support; otherwise,
+ * it requires monitor-mode support (which mwifiex doesn't support).
+ * Provide fake probe_client support to work around this.
+ */
+ return -EOPNOTSUPP;
+}
+
/* station cfg80211 operations */
-static struct cfg80211_ops mwifiex_cfg80211_ops = {
+static const struct cfg80211_ops mwifiex_cfg80211_ops = {
.add_virtual_intf = mwifiex_add_virtual_intf,
.del_virtual_intf = mwifiex_del_virtual_intf,
.change_virtual_intf = mwifiex_cfg80211_change_virtual_intf,
@@ -4342,24 +4705,58 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
struct mwifiex_private *priv = adapter->priv[MWIFIEX_BSS_TYPE_STA];
u8 *country_code;
u32 thr, retry;
+ struct cfg80211_ops *ops;
+
+ ops = devm_kmemdup(adapter->dev, &mwifiex_cfg80211_ops,
+ sizeof(mwifiex_cfg80211_ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
/* create a new wiphy for use with cfg80211 */
- wiphy = wiphy_new(&mwifiex_cfg80211_ops,
- sizeof(struct mwifiex_adapter *));
+ wiphy = wiphy_new(ops, sizeof(struct mwifiex_adapter *));
if (!wiphy) {
mwifiex_dbg(adapter, ERROR,
"%s: creating new wiphy\n", __func__);
return -ENOMEM;
}
+ if (adapter->host_mlme_enabled) {
+ ops->auth = mwifiex_cfg80211_authenticate;
+ ops->assoc = mwifiex_cfg80211_associate;
+ ops->deauth = mwifiex_cfg80211_deauthenticate;
+ ops->disassoc = mwifiex_cfg80211_disassociate;
+ ops->disconnect = NULL;
+ ops->connect = NULL;
+ ops->probe_client = mwifiex_cfg80211_probe_client;
+ }
wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
- wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
+ if (adapter->host_mlme_enabled) {
+ memcpy(adapter->mwifiex_mgmt_stypes,
+ mwifiex_mgmt_stypes,
+ NUM_NL80211_IFTYPES *
+ sizeof(struct ieee80211_txrx_stypes));
+
+ adapter->mwifiex_mgmt_stypes[NL80211_IFTYPE_AP].tx = 0xffff;
+ adapter->mwifiex_mgmt_stypes[NL80211_IFTYPE_AP].rx =
+ BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4);
+ wiphy->mgmt_stypes = adapter->mwifiex_mgmt_stypes;
+ } else {
+ wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
+ }
wiphy->max_remain_on_channel_duration = 5000;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_AP);
+ wiphy->max_num_akm_suites = CFG80211_MAX_NUM_AKM_SUITES;
+
if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
@@ -4411,14 +4808,18 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
ether_addr_copy(wiphy->perm_addr, adapter->perm_addr);
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
- WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
+ wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_HAS_CHANNEL_SWITCH |
WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ if (adapter->host_mlme_enabled)
+ wiphy->flags |= WIPHY_FLAG_REPORTS_OBSS;
+ else
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
+
if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
@@ -4448,6 +4849,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_NEED_OBSS_SCAN;
+ if (adapter->host_mlme_enabled)
+ wiphy->features |= NL80211_FEATURE_SAE;
+
if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
wiphy->features |= NL80211_FEATURE_HT_IBSS;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 9eff29a25544..7894102f03eb 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -482,7 +482,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
if ((adapter->event_cause & EVENT_ID_MASK) == EVENT_RADAR_DETECTED) {
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv && mwifiex_is_11h_active(priv)) {
+ if (mwifiex_is_11h_active(priv)) {
adapter->event_cause |=
((priv->bss_num & 0xff) << 16) |
((priv->bss_type & 0xff) << 24);
@@ -635,6 +635,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
case HostCmd_CMD_UAP_STA_DEAUTH:
case HOST_CMD_APCMD_SYS_RESET:
case HOST_CMD_APCMD_STA_LIST:
+ case HostCmd_CMD_CHAN_REPORT_REQUEST:
+ case HostCmd_CMD_ADD_NEW_STATION:
ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action,
cmd_oid, data_buf,
cmd_ptr);
@@ -924,6 +926,24 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
return ret;
}
+void mwifiex_process_assoc_resp(struct mwifiex_adapter *adapter)
+{
+ struct cfg80211_rx_assoc_resp_data assoc_resp = {
+ .uapsd_queues = -1,
+ };
+ struct mwifiex_private *priv =
+ mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+
+ if (priv->assoc_rsp_size) {
+ assoc_resp.links[0].bss = priv->req_bss;
+ assoc_resp.buf = priv->assoc_rsp_buf;
+ assoc_resp.len = priv->assoc_rsp_size;
+ cfg80211_rx_assoc_resp(priv->netdev,
+ &assoc_resp);
+ priv->assoc_rsp_size = 0;
+ }
+}
+
/*
* This function handles the timeout of command sending.
*
@@ -1672,6 +1692,13 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
if (adapter->fw_api_ver == MWIFIEX_FW_V15)
adapter->scan_chan_gap_enabled = true;
+ if (adapter->key_api_major_ver != KEY_API_VER_MAJOR_V2)
+ adapter->host_mlme_enabled = false;
+
+ mwifiex_dbg(adapter, MSG, "host_mlme: %s, key_api: %d\n",
+ adapter->host_mlme_enabled ? "enable" : "disable",
+ adapter->key_api_major_ver);
+
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h
index 326ffb05d791..84603f1e7f6e 100644
--- a/drivers/net/wireless/marvell/mwifiex/decl.h
+++ b/drivers/net/wireless/marvell/mwifiex/decl.h
@@ -31,6 +31,29 @@
* + sizeof(tx_control)
*/
+#define FRMCTL_LEN 2
+#define DURATION_LEN 2
+#define SEQCTL_LEN 2
+/* special FW 4 address management header */
+#define MWIFIEX_MGMT_HEADER_LEN (FRMCTL_LEN + DURATION_LEN + ETH_ALEN + \
+ ETH_ALEN + ETH_ALEN + SEQCTL_LEN + ETH_ALEN)
+
+#define AUTH_ALG_LEN 2
+#define AUTH_TRANSACTION_LEN 2
+#define AUTH_STATUS_LEN 2
+#define MWIFIEX_AUTH_BODY_LEN (AUTH_ALG_LEN + AUTH_TRANSACTION_LEN + \
+ AUTH_STATUS_LEN)
+
+#define HOST_MLME_AUTH_PENDING BIT(0)
+#define HOST_MLME_AUTH_DONE BIT(1)
+
+#define HOST_MLME_MGMT_MASK (BIT(IEEE80211_STYPE_AUTH >> 4) | \
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) | \
+ BIT(IEEE80211_STYPE_DISASSOC >> 4))
+#define AUTH_TX_DEFAULT_WAIT_TIME 2400
+
+#define WLAN_AUTH_NONE 0xFFFF
+
#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
#define MWIFIEX_MAX_TDLS_PEER_SUPPORTED 8
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 3adc447b715f..d03129d5d24e 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -210,6 +210,9 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_RANDOM_MAC (PROPRIETARY_TLV_BASE_ID + 236)
#define TLV_TYPE_CHAN_ATTR_CFG (PROPRIETARY_TLV_BASE_ID + 237)
#define TLV_TYPE_MAX_CONN (PROPRIETARY_TLV_BASE_ID + 279)
+#define TLV_TYPE_HOST_MLME (PROPRIETARY_TLV_BASE_ID + 307)
+#define TLV_TYPE_UAP_STA_FLAGS (PROPRIETARY_TLV_BASE_ID + 313)
+#define TLV_TYPE_SAE_PWE_MODE (PROPRIETARY_TLV_BASE_ID + 339)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -405,6 +408,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_STA_CONFIGURE 0x023f
#define HostCmd_CMD_CHAN_REGION_CFG 0x0242
#define HostCmd_CMD_PACKET_AGGR_CTRL 0x0251
+#define HostCmd_CMD_ADD_NEW_STATION 0x025f
#define PROTOCOL_NO_SECURITY 0x01
#define PROTOCOL_STATIC_WEP 0x02
@@ -415,6 +419,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define KEY_MGMT_NONE 0x04
#define KEY_MGMT_PSK 0x02
#define KEY_MGMT_EAP 0x01
+#define KEY_MGMT_PSK_SHA256 0x100
+#define KEY_MGMT_SAE 0x400
#define CIPHER_TKIP 0x04
#define CIPHER_AES_CCMP 0x08
#define VALID_CIPHER_BITMAP 0x0c
@@ -500,6 +506,9 @@ enum mwifiex_channel_flags {
#define HostCmd_ACT_GET_TX 0x0008
#define HostCmd_ACT_GET_BOTH 0x000c
+#define HostCmd_ACT_REMOVE_STA 0x0
+#define HostCmd_ACT_ADD_STA 0x1
+
#define RF_ANTENNA_AUTO 0xFFFF
#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) \
@@ -744,6 +753,25 @@ struct uap_rxpd {
u8 flags;
} __packed;
+struct mwifiex_auth {
+ __le16 auth_alg;
+ __le16 auth_transaction;
+ __le16 status_code;
+ /* possibly followed by Challenge text */
+ u8 variable[];
+} __packed;
+
+struct mwifiex_ieee80211_mgmt {
+ __le16 frame_control;
+ __le16 duration;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ struct mwifiex_auth auth;
+} __packed;
+
struct mwifiex_fw_chan_stats {
u8 chan_num;
u8 bandcfg;
@@ -803,6 +831,11 @@ struct mwifiex_ie_types_ssid_param_set {
u8 ssid[];
} __packed;
+struct mwifiex_ie_types_host_mlme {
+ struct mwifiex_ie_types_header header;
+ u8 host_mlme;
+} __packed;
+
struct mwifiex_ie_types_num_probes {
struct mwifiex_ie_types_header header;
__le16 num_probes;
@@ -906,6 +939,13 @@ struct mwifiex_ie_types_tdls_idle_timeout {
__le16 value;
} __packed;
+#define MWIFIEX_AUTHTYPE_SAE 6
+
+struct mwifiex_ie_types_sae_pwe_mode {
+ struct mwifiex_ie_types_header header;
+ u8 pwe[];
+} __packed;
+
struct mwifiex_ie_types_rsn_param_set {
struct mwifiex_ie_types_header header;
u8 rsn_ie[];
@@ -1587,7 +1627,7 @@ struct host_cmd_ds_802_11_scan_rsp {
struct host_cmd_ds_802_11_scan_ext {
u32 reserved;
- u8 tlv_buffer[1];
+ u8 tlv_buffer[];
} __packed;
struct mwifiex_ie_types_bss_mode {
@@ -2298,6 +2338,20 @@ struct host_cmd_ds_sta_configure {
u8 tlv_buffer[];
} __packed;
+struct mwifiex_ie_types_sta_flag {
+ struct mwifiex_ie_types_header header;
+ __le32 sta_flags;
+} __packed;
+
+struct host_cmd_ds_add_station {
+ __le16 action;
+ __le16 aid;
+ u8 peer_mac[ETH_ALEN];
+ __le32 listen_interval;
+ __le16 cap_info;
+ u8 tlv[];
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -2376,6 +2430,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_chan_region_cfg reg_cfg;
struct host_cmd_ds_pkt_aggr_ctrl pkt_aggr_ctrl;
struct host_cmd_ds_sta_configure sta_cfg;
+ struct host_cmd_ds_add_station sta_info;
} params;
} __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index c9c58419c37b..8b61e45cd667 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -81,6 +81,9 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->bcn_avg_factor = DEFAULT_BCN_AVG_FACTOR;
priv->data_avg_factor = DEFAULT_DATA_AVG_FACTOR;
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+
priv->sec_info.wep_enabled = 0;
priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
priv->sec_info.encryption_mode = 0;
@@ -220,6 +223,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->cmd_resp_received = false;
adapter->event_received = false;
adapter->data_received = false;
+ adapter->assoc_resp_received = false;
+ adapter->priv_link_lost = NULL;
+ adapter->host_mlme_link_lost = false;
clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
@@ -362,15 +368,13 @@ static void mwifiex_invalidate_lists(struct mwifiex_adapter *adapter)
list_del(&adapter->bss_prio_tbl[i].bss_prio_head);
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- priv = adapter->priv[i];
- for (j = 0; j < MAX_NUM_TID; ++j)
- list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
- list_del(&priv->tx_ba_stream_tbl_ptr);
- list_del(&priv->rx_reorder_tbl_ptr);
- list_del(&priv->sta_list);
- list_del(&priv->auto_tdls_list);
- }
+ priv = adapter->priv[i];
+ for (j = 0; j < MAX_NUM_TID; ++j)
+ list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
+ list_del(&priv->tx_ba_stream_tbl_ptr);
+ list_del(&priv->rx_reorder_tbl_ptr);
+ list_del(&priv->sta_list);
+ list_del(&priv->auto_tdls_list);
}
}
@@ -419,13 +423,11 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
spin_lock_init(&adapter->mwifiex_cmd_lock);
spin_lock_init(&adapter->queue_lock);
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- priv = adapter->priv[i];
- spin_lock_init(&priv->wmm.ra_list_spinlock);
- spin_lock_init(&priv->curr_bcn_buf_lock);
- spin_lock_init(&priv->sta_list_spinlock);
- spin_lock_init(&priv->auto_tdls_lock);
- }
+ priv = adapter->priv[i];
+ spin_lock_init(&priv->wmm.ra_list_spinlock);
+ spin_lock_init(&priv->curr_bcn_buf_lock);
+ spin_lock_init(&priv->sta_list_spinlock);
+ spin_lock_init(&priv->auto_tdls_lock);
}
/* Initialize cmd_free_q */
@@ -449,8 +451,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
}
for (i = 0; i < adapter->priv_num; i++) {
- if (!adapter->priv[i])
- continue;
priv = adapter->priv[i];
for (j = 0; j < MAX_NUM_TID; ++j)
INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[j].ra_list);
@@ -500,31 +500,24 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
mwifiex_init_adapter(adapter);
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- priv = adapter->priv[i];
+ priv = adapter->priv[i];
- /* Initialize private structure */
- ret = mwifiex_init_priv(priv);
- if (ret)
- return -1;
- }
+ /* Initialize private structure */
+ ret = mwifiex_init_priv(priv);
+ if (ret)
+ return -1;
}
if (adapter->mfg_mode) {
adapter->hw_status = MWIFIEX_HW_STATUS_READY;
ret = -EINPROGRESS;
} else {
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- ret = mwifiex_sta_init_cmd(adapter->priv[i],
- first_sta, true);
- if (ret == -1)
- return -1;
-
- first_sta = false;
- }
-
-
+ ret = mwifiex_sta_init_cmd(adapter->priv[i],
+ first_sta, true);
+ if (ret == -1)
+ return -1;
+ first_sta = false;
}
}
@@ -631,13 +624,11 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
/* Clean up Tx/Rx queues and delete BSS priority table */
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- priv = adapter->priv[i];
+ priv = adapter->priv[i];
- mwifiex_clean_auto_tdls(priv);
- mwifiex_abort_cac(priv);
- mwifiex_free_priv(priv);
- }
+ mwifiex_clean_auto_tdls(priv);
+ mwifiex_abort_cac(priv);
+ mwifiex_free_priv(priv);
}
atomic_set(&adapter->tx_queued, 0);
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index e8825f302de8..516159b721d3 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -158,6 +158,11 @@ struct mwifiex_bss_info {
u8 bssid[ETH_ALEN];
};
+struct mwifiex_sta_info {
+ u8 peer_mac[ETH_ALEN];
+ struct station_parameters *params;
+};
+
#define MAX_NUM_TID 8
#define MAX_RX_WINSIZE 64
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index 9d98a1908dd6..6d8f1d1d7ca4 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -382,7 +382,9 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
struct mwifiex_ie_types_ss_param_set *ss_tlv;
struct mwifiex_ie_types_rates_param_set *rates_tlv;
struct mwifiex_ie_types_auth_type *auth_tlv;
+ struct mwifiex_ie_types_sae_pwe_mode *sae_pwe_tlv;
struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
+ struct mwifiex_ie_types_host_mlme *host_mlme_tlv;
u8 rates[MWIFIEX_SUPPORTED_RATES];
u32 rates_size;
u16 tmp_cap;
@@ -460,6 +462,24 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
pos += sizeof(auth_tlv->header) + le16_to_cpu(auth_tlv->header.len);
+ if (priv->sec_info.authentication_mode == WLAN_AUTH_SAE) {
+ auth_tlv->auth_type = cpu_to_le16(MWIFIEX_AUTHTYPE_SAE);
+ if (bss_desc->bcn_rsnx_ie &&
+ bss_desc->bcn_rsnx_ie->ieee_hdr.len &&
+ (bss_desc->bcn_rsnx_ie->data[0] &
+ WLAN_RSNX_CAPA_SAE_H2E)) {
+ sae_pwe_tlv =
+ (struct mwifiex_ie_types_sae_pwe_mode *)pos;
+ sae_pwe_tlv->header.type =
+ cpu_to_le16(TLV_TYPE_SAE_PWE_MODE);
+ sae_pwe_tlv->header.len =
+ cpu_to_le16(sizeof(sae_pwe_tlv->pwe[0]));
+ sae_pwe_tlv->pwe[0] = bss_desc->bcn_rsnx_ie->data[0];
+ pos += sizeof(sae_pwe_tlv->header) +
+ sizeof(sae_pwe_tlv->pwe[0]);
+ }
+ }
+
if (IS_SUPPORT_MULTI_BANDS(priv->adapter) &&
!(ISSUPP_11NENABLED(priv->adapter->fw_cap_info) &&
(!bss_desc->disable_11n) &&
@@ -491,6 +511,16 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
sizeof(struct mwifiex_chan_scan_param_set);
}
+ if (priv->adapter->host_mlme_enabled) {
+ host_mlme_tlv = (struct mwifiex_ie_types_host_mlme *)pos;
+ host_mlme_tlv->header.type = cpu_to_le16(TLV_TYPE_HOST_MLME);
+ host_mlme_tlv->header.len =
+ cpu_to_le16(sizeof(host_mlme_tlv->host_mlme));
+ host_mlme_tlv->host_mlme = 1;
+ pos += sizeof(host_mlme_tlv->header) +
+ sizeof(host_mlme_tlv->host_mlme);
+ }
+
if (!priv->wps.session_enable) {
if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);
@@ -641,7 +671,21 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
goto done;
}
- assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
+ if (adapter->host_mlme_enabled) {
+ struct ieee80211_mgmt *hdr;
+
+ hdr = (struct ieee80211_mgmt *)&resp->params;
+ if (!memcmp(hdr->bssid,
+ priv->attempted_bss_desc->mac_address,
+ ETH_ALEN))
+ assoc_rsp = (struct ieee_types_assoc_rsp *)
+ &hdr->u.assoc_resp;
+ else
+ assoc_rsp =
+ (struct ieee_types_assoc_rsp *)&resp->params;
+ } else {
+ assoc_rsp = (struct ieee_types_assoc_rsp *)&resp->params;
+ }
cap_info = le16_to_cpu(assoc_rsp->cap_info_bitmap);
status_code = le16_to_cpu(assoc_rsp->status_code);
@@ -680,6 +724,9 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, ERROR,
"ASSOC_RESP: UNSPECIFIED failure\n");
}
+
+ if (priv->adapter->host_mlme_enabled)
+ priv->assoc_rsp_size = 0;
} else {
ret = status_code;
}
@@ -778,7 +825,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
priv->adapter->dbg.num_cmd_assoc_success++;
- mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: associated\n");
+ mwifiex_dbg(priv->adapter, MSG, "assoc: associated with %pM\n",
+ priv->attempted_bss_desc->mac_address);
/* Add the ra_list here for infra mode as there will be only 1 ra
always */
@@ -1491,6 +1539,20 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
if (!priv->media_connected)
return 0;
+ if (priv->adapter->host_mlme_enabled) {
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+ priv->host_mlme_reg = false;
+ priv->mgmt_frame_mask = 0;
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->mgmt_frame_mask, false)) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "could not unregister mgmt frame rx\n");
+ return -1;
+ }
+ }
+
switch (priv->bss_mode) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
@@ -1520,8 +1582,7 @@ void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv)
- mwifiex_deauthenticate(priv, NULL);
+ mwifiex_deauthenticate(priv, NULL);
}
}
EXPORT_SYMBOL_GPL(mwifiex_deauthenticate_all);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index d99127dc466e..96d1f6039fbc 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -127,10 +127,8 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
/* Free private structures */
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- mwifiex_free_curr_bcn(adapter->priv[i]);
- kfree(adapter->priv[i]);
- }
+ mwifiex_free_curr_bcn(adapter->priv[i]);
+ kfree(adapter->priv[i]);
}
if (adapter->nd_info) {
@@ -530,6 +528,11 @@ static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
destroy_workqueue(adapter->rx_workqueue);
adapter->rx_workqueue = NULL;
}
+
+ if (adapter->host_mlme_workqueue) {
+ destroy_workqueue(adapter->host_mlme_workqueue);
+ adapter->host_mlme_workqueue = NULL;
+ }
}
/*
@@ -802,6 +805,10 @@ mwifiex_bypass_tx_queue(struct mwifiex_private *priv,
"bypass txqueue; eth type %#x, mgmt %d\n",
ntohs(eth_hdr->h_proto),
mwifiex_is_skb_mgmt_frame(skb));
+ if (eth_hdr->h_proto == htons(ETH_P_PAE))
+ mwifiex_dbg(priv->adapter, MSG,
+ "key: send EAPOL to %pM\n",
+ eth_hdr->h_dest);
return true;
}
@@ -1162,7 +1169,7 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
}
for (i = 0; i < adapter->priv_num; i++) {
- if (!adapter->priv[i] || !adapter->priv[i]->netdev)
+ if (!adapter->priv[i]->netdev)
continue;
priv = adapter->priv[i];
p += sprintf(p, "\n[interface : \"%s\"]\n",
@@ -1201,7 +1208,7 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
if (debug_info) {
for (i = 0; i < adapter->priv_num; i++) {
- if (!adapter->priv[i] || !adapter->priv[i]->netdev)
+ if (!adapter->priv[i]->netdev)
continue;
priv = adapter->priv[i];
mwifiex_get_debug_info(priv, debug_info);
@@ -1384,6 +1391,35 @@ int is_command_pending(struct mwifiex_adapter *adapter)
return !is_cmd_pend_q_empty;
}
+/* This is the host mlme work queue function.
+ * It handles the host mlme operations.
+ */
+static void mwifiex_host_mlme_work_queue(struct work_struct *work)
+{
+ struct mwifiex_adapter *adapter =
+ container_of(work, struct mwifiex_adapter, host_mlme_work);
+
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
+ return;
+
+ /* Check for host mlme disconnection */
+ if (adapter->host_mlme_link_lost) {
+ if (adapter->priv_link_lost) {
+ mwifiex_reset_connect_state(adapter->priv_link_lost,
+ WLAN_REASON_DEAUTH_LEAVING,
+ true);
+ adapter->priv_link_lost = NULL;
+ }
+ adapter->host_mlme_link_lost = false;
+ }
+
+ /* Check for host mlme Assoc Resp */
+ if (adapter->assoc_resp_received) {
+ mwifiex_process_assoc_resp(adapter);
+ adapter->assoc_resp_received = false;
+ }
+}
+
/*
* This is the RX work queue function.
*
@@ -1434,7 +1470,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
/* Stop data */
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv && priv->netdev) {
+ if (priv->netdev) {
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
@@ -1459,8 +1495,6 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (!priv)
- continue;
rtnl_lock();
if (priv->netdev &&
priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) {
@@ -1558,6 +1592,18 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue);
}
+ if (adapter->host_mlme_enabled) {
+ adapter->host_mlme_workqueue =
+ alloc_workqueue("MWIFIEX_HOST_MLME_WORK_QUEUE",
+ WQ_HIGHPRI |
+ WQ_MEM_RECLAIM |
+ WQ_UNBOUND, 0);
+ if (!adapter->host_mlme_workqueue)
+ goto err_kmalloc;
+ INIT_WORK(&adapter->host_mlme_work,
+ mwifiex_host_mlme_work_queue);
+ }
+
/* Register the device. Fill up the private data structure with
* relevant information from the card. Some code extracted from
* mwifiex_register_dev()
@@ -1721,6 +1767,18 @@ mwifiex_add_card(void *card, struct completion *fw_done,
goto err_registerdev;
}
+ if (adapter->host_mlme_enabled) {
+ adapter->host_mlme_workqueue =
+ alloc_workqueue("MWIFIEX_HOST_MLME_WORK_QUEUE",
+ WQ_HIGHPRI |
+ WQ_MEM_RECLAIM |
+ WQ_UNBOUND, 0);
+ if (!adapter->host_mlme_workqueue)
+ goto err_kmalloc;
+ INIT_WORK(&adapter->host_mlme_work,
+ mwifiex_host_mlme_work_queue);
+ }
+
if (mwifiex_init_hw_fw(adapter, true)) {
pr_err("%s: firmware init failed\n", __func__);
goto err_init_fw;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index c5164ae41b54..566adce3413c 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -424,6 +424,8 @@ struct mwifiex_bssdescriptor {
u16 wpa_offset;
struct ieee_types_generic *bcn_rsn_ie;
u16 rsn_offset;
+ struct ieee_types_generic *bcn_rsnx_ie;
+ u16 rsnx_offset;
struct ieee_types_generic *bcn_wapi_ie;
u16 wapi_offset;
u8 *beacon_buf;
@@ -525,6 +527,8 @@ struct mwifiex_private {
u8 bss_priority;
u8 bss_num;
u8 bss_started;
+ u8 auth_flag;
+ u16 auth_alg;
u8 frame_type;
u8 curr_addr[ETH_ALEN];
u8 media_connected;
@@ -608,6 +612,7 @@ struct mwifiex_private {
#define MWIFIEX_ASSOC_RSP_BUF_SIZE 500
u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE];
u32 assoc_rsp_size;
+ struct cfg80211_bss *req_bss;
#define MWIFIEX_GENIE_BUF_SIZE 256
u8 gen_ie_buf[MWIFIEX_GENIE_BUF_SIZE];
@@ -647,6 +652,7 @@ struct mwifiex_private {
u16 gen_idx;
u8 ap_11n_enabled;
u8 ap_11ac_enabled;
+ bool host_mlme_reg;
u32 mgmt_frame_mask;
struct mwifiex_roc_cfg roc_cfg;
bool scan_aborting;
@@ -793,7 +799,7 @@ struct mwifiex_auto_tdls_peer {
u8 mac_addr[ETH_ALEN];
u8 tdls_status;
int rssi;
- long rssi_jiffies;
+ unsigned long rssi_jiffies;
u8 failure_count;
u8 do_discover;
u8 do_setup;
@@ -876,6 +882,8 @@ struct mwifiex_adapter {
struct work_struct main_work;
struct workqueue_struct *rx_workqueue;
struct work_struct rx_work;
+ struct workqueue_struct *host_mlme_workqueue;
+ struct work_struct host_mlme_work;
bool rx_work_enabled;
bool rx_processing;
bool delay_main_work;
@@ -907,6 +915,9 @@ struct mwifiex_adapter {
u8 cmd_resp_received;
u8 event_received;
u8 data_received;
+ u8 assoc_resp_received;
+ struct mwifiex_private *priv_link_lost;
+ u8 host_mlme_link_lost;
u16 seq_num;
struct cmd_ctrl_node *cmd_pool;
struct cmd_ctrl_node *curr_cmd;
@@ -996,6 +1007,8 @@ struct mwifiex_adapter {
bool is_up;
bool ext_scan;
+ bool host_mlme_enabled;
+ struct ieee80211_txrx_stypes mwifiex_mgmt_stypes[NUM_NL80211_IFTYPES];
u8 fw_api_ver;
u8 key_api_major_ver, key_api_minor_ver;
u8 max_p2p_conn, max_sta_conn;
@@ -1061,6 +1074,9 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb);
int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
struct sk_buff *skb);
+void mwifiex_host_mlme_disconnect(struct mwifiex_private *priv,
+ u16 reason_code, u8 *sa);
+
int mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
struct sk_buff *skb);
@@ -1092,6 +1108,7 @@ void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter);
int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter);
+void mwifiex_process_assoc_resp(struct mwifiex_adapter *adapter);
int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
struct sk_buff *skb);
int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
@@ -1286,14 +1303,12 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
int i;
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
- continue;
+ if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
+ continue;
- if ((adapter->priv[i]->bss_num == bss_num) &&
- (adapter->priv[i]->bss_type == bss_type))
- break;
- }
+ if ((adapter->priv[i]->bss_num == bss_num) &&
+ (adapter->priv[i]->bss_type == bss_type))
+ break;
}
return ((i < adapter->priv_num) ? adapter->priv[i] : NULL);
}
@@ -1309,11 +1324,9 @@ mwifiex_get_priv(struct mwifiex_adapter *adapter,
int i;
for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- if (bss_role == MWIFIEX_BSS_ROLE_ANY ||
- GET_BSS_ROLE(adapter->priv[i]) == bss_role)
- break;
- }
+ if (bss_role == MWIFIEX_BSS_ROLE_ANY ||
+ GET_BSS_ROLE(adapter->priv[i]) == bss_role)
+ break;
}
return ((i < adapter->priv_num) ? adapter->priv[i] : NULL);
@@ -1331,12 +1344,10 @@ mwifiex_get_unused_bss_num(struct mwifiex_adapter *adapter, u8 bss_type)
memset(index, 0, sizeof(index));
for (i = 0; i < adapter->priv_num; i++)
- if (adapter->priv[i]) {
- if (adapter->priv[i]->bss_type == bss_type &&
- !(adapter->priv[i]->bss_mode ==
- NL80211_IFTYPE_UNSPECIFIED)) {
- index[adapter->priv[i]->bss_num] = 1;
- }
+ if (adapter->priv[i]->bss_type == bss_type &&
+ !(adapter->priv[i]->bss_mode ==
+ NL80211_IFTYPE_UNSPECIFIED)) {
+ index[adapter->priv[i]->bss_num] = 1;
}
for (j = 0; j < MWIFIEX_MAX_BSS_NUM; j++)
if (!index[j])
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 0326b121747c..cab889af4c4a 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1371,6 +1371,12 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bss_entry->rsn_offset = (u16) (current_ptr -
bss_entry->beacon_buf);
break;
+ case WLAN_EID_RSNX:
+ bss_entry->bcn_rsnx_ie =
+ (struct ieee_types_generic *)current_ptr;
+ bss_entry->rsnx_offset =
+ (u16)(current_ptr - bss_entry->beacon_buf);
+ break;
case WLAN_EID_BSS_AC_ACCESS_DELAY:
bss_entry->bcn_wapi_ie =
(struct ieee_types_generic *) current_ptr;
@@ -2045,8 +2051,6 @@ void mwifiex_cancel_scan(struct mwifiex_adapter *adapter)
spin_unlock_bh(&adapter->mwifiex_cmd_lock);
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (!priv)
- continue;
if (priv->scan_request) {
struct cfg80211_scan_info info = {
.aborted = true,
@@ -2530,8 +2534,7 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
ext_scan_resp = &resp->params.ext_scan;
tlv = (void *)ext_scan_resp->tlv_buffer;
- buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN
- - 1);
+ buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN);
while (buf_left >= sizeof(struct mwifiex_ie_types_header)) {
type = le16_to_cpu(tlv->type);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index bda9b2b8a1f3..490ffd981164 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -332,6 +332,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.can_auto_tdls = false,
.can_ext_scan = false,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -348,6 +349,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -364,6 +366,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -380,6 +383,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
@@ -397,6 +401,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = {
@@ -414,6 +419,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = true,
+ .host_mlme = true,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
@@ -432,6 +438,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
@@ -448,6 +455,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
.can_auto_tdls = true,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
@@ -465,6 +473,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
.can_auto_tdls = true,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
@@ -481,6 +490,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
+ .host_mlme = false,
};
static struct memory_type_mapping generic_mem_type_map[] = {
@@ -574,6 +584,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->can_auto_tdls = data->can_auto_tdls;
card->can_ext_scan = data->can_ext_scan;
card->fw_ready_extra_delay = data->fw_ready_extra_delay;
+ card->host_mlme = data->host_mlme;
INIT_WORK(&card->work, mwifiex_sdio_work);
}
@@ -2511,6 +2522,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
}
+ adapter->host_mlme_enabled = card->host_mlme;
+
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index cb63ad55d675..65d142286c46 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -256,6 +256,7 @@ struct sdio_mmc_card {
bool can_auto_tdls;
bool can_ext_scan;
bool fw_ready_extra_delay;
+ bool host_mlme;
struct mwifiex_sdio_mpa_tx mpa_tx;
struct mwifiex_sdio_mpa_rx mpa_rx;
@@ -280,6 +281,7 @@ struct mwifiex_sdio_device {
bool can_auto_tdls;
bool can_ext_scan;
bool fw_ready_extra_delay;
+ bool host_mlme;
};
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 7b69d27e0c0e..9c53825f222d 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1398,6 +1398,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_UAP_STA_DEAUTH:
break;
+ case HostCmd_CMD_ADD_NEW_STATION:
+ break;
case HOST_CMD_APCMD_SYS_RESET:
break;
case HostCmd_CMD_MEF_CFG:
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index df9cdd10a494..b5f3821a6a8f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -135,6 +135,9 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code,
priv->media_connected = false;
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+
priv->scan_block = false;
priv->port_open = false;
@@ -222,8 +225,12 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code,
priv->cfg_bssid, reason_code);
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
- cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
- !from_ap, GFP_KERNEL);
+ if (adapter->host_mlme_enabled && adapter->host_mlme_link_lost)
+ mwifiex_host_mlme_disconnect(adapter->priv_link_lost,
+ reason_code, NULL);
+ else
+ cfg80211_disconnected(priv->netdev, reason_code, NULL,
+ 0, !from_ap, GFP_KERNEL);
}
eth_zero_addr(priv->cfg_bssid);
@@ -746,7 +753,15 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (priv->media_connected) {
reason_code =
get_unaligned_le16(adapter->event_body);
- mwifiex_reset_connect_state(priv, reason_code, true);
+ if (adapter->host_mlme_enabled) {
+ adapter->priv_link_lost = priv;
+ adapter->host_mlme_link_lost = true;
+ queue_work(adapter->host_mlme_workqueue,
+ &adapter->host_mlme_work);
+ } else {
+ mwifiex_reset_connect_state(priv, reason_code,
+ true);
+ }
}
break;
@@ -999,10 +1014,17 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_REMAIN_ON_CHAN_EXPIRED:
mwifiex_dbg(adapter, EVENT,
"event: Remain on channel expired\n");
- cfg80211_remain_on_channel_expired(&priv->wdev,
- priv->roc_cfg.cookie,
- &priv->roc_cfg.chan,
- GFP_ATOMIC);
+
+ if (adapter->host_mlme_enabled &&
+ (priv->auth_flag & HOST_MLME_AUTH_PENDING)) {
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+ } else {
+ cfg80211_remain_on_channel_expired(&priv->wdev,
+ priv->roc_cfg.cookie,
+ &priv->roc_cfg.chan,
+ GFP_ATOMIC);
+ }
memset(&priv->roc_cfg, 0x00, sizeof(struct mwifiex_roc_cfg));
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 32a27fad7b79..d3cba6895f8c 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -339,7 +339,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
ret = mwifiex_associate(priv, bss_desc);
}
- if (bss)
+ if (bss && !priv->adapter->host_mlme_enabled)
cfg80211_put_bss(priv->adapter->wiphy, bss);
} else {
/* Adhoc mode */
@@ -503,8 +503,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
if (disconnect_on_suspend) {
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv)
- mwifiex_deauthenticate(priv, NULL);
+ mwifiex_deauthenticate(priv, NULL);
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index 70c2790b8e35..9d0ef04ebe02 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -36,7 +36,7 @@ void mwifiex_process_sta_txpd(struct mwifiex_private *priv,
struct txpd *local_tx_pd;
struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
unsigned int pad;
- u16 pkt_type, pkt_offset;
+ u16 pkt_type, pkt_length, pkt_offset;
int hroom = adapter->intf_hdr_len;
pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
@@ -49,9 +49,10 @@ void mwifiex_process_sta_txpd(struct mwifiex_private *priv,
memset(local_tx_pd, 0, sizeof(struct txpd));
local_tx_pd->bss_num = priv->bss_num;
local_tx_pd->bss_type = priv->bss_type;
- local_tx_pd->tx_pkt_length = cpu_to_le16((u16)(skb->len -
- (sizeof(struct txpd) +
- pad)));
+ pkt_length = (u16)(skb->len - (sizeof(struct txpd) + pad));
+ if (pkt_type == PKT_TYPE_MGMT)
+ pkt_length -= MWIFIEX_MGMT_FRAME_HEADER_SIZE;
+ local_tx_pd->tx_pkt_length = cpu_to_le16(pkt_length);
local_tx_pd->priority = (u8) skb->priority;
local_tx_pd->pkt_delay_2ms =
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 6c60621b6ccc..7823e67694e8 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -1439,8 +1439,8 @@ void mwifiex_check_auto_tdls(struct timer_list *t)
spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) {
- if ((jiffies - tdls_peer->rssi_jiffies) >
- (MWIFIEX_AUTO_TDLS_IDLE_TIME * HZ)) {
+ if (time_after(jiffies, tdls_peer->rssi_jiffies +
+ MWIFIEX_AUTO_TDLS_IDLE_TIME * HZ)) {
tdls_peer->rssi = 0;
tdls_peer->do_discover = true;
priv->check_tdls_tx = true;
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 491e36611909..1c0ceac6b27f 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -46,31 +46,26 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
bss_config->key_mgmt_operation |= KEY_MGMT_ON_HOST;
+ bss_config->protocol = 0;
+ if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ bss_config->protocol |= PROTOCOL_WPA;
+ if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ bss_config->protocol |= PROTOCOL_WPA2;
+
+ bss_config->key_mgmt = 0;
for (i = 0; i < params->crypto.n_akm_suites; i++) {
switch (params->crypto.akm_suites[i]) {
case WLAN_AKM_SUITE_8021X:
- if (params->crypto.wpa_versions &
- NL80211_WPA_VERSION_1) {
- bss_config->protocol = PROTOCOL_WPA;
- bss_config->key_mgmt = KEY_MGMT_EAP;
- }
- if (params->crypto.wpa_versions &
- NL80211_WPA_VERSION_2) {
- bss_config->protocol |= PROTOCOL_WPA2;
- bss_config->key_mgmt = KEY_MGMT_EAP;
- }
+ bss_config->key_mgmt |= KEY_MGMT_EAP;
break;
case WLAN_AKM_SUITE_PSK:
- if (params->crypto.wpa_versions &
- NL80211_WPA_VERSION_1) {
- bss_config->protocol = PROTOCOL_WPA;
- bss_config->key_mgmt = KEY_MGMT_PSK;
- }
- if (params->crypto.wpa_versions &
- NL80211_WPA_VERSION_2) {
- bss_config->protocol |= PROTOCOL_WPA2;
- bss_config->key_mgmt = KEY_MGMT_PSK;
- }
+ bss_config->key_mgmt |= KEY_MGMT_PSK;
+ break;
+ case WLAN_AKM_SUITE_PSK_SHA256:
+ bss_config->key_mgmt |= KEY_MGMT_PSK_SHA256;
+ break;
+ case WLAN_AKM_SUITE_SAE:
+ bss_config->key_mgmt |= KEY_MGMT_SAE;
break;
default:
break;
@@ -751,6 +746,28 @@ mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action,
return 0;
}
+/* This function prepares AP start up command with or without host MLME
+ */
+static void mwifiex_cmd_uap_bss_start(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd)
+{
+ struct mwifiex_ie_types_host_mlme *tlv;
+ int size;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_UAP_BSS_START);
+ size = S_DS_GEN;
+
+ if (priv->adapter->host_mlme_enabled) {
+ tlv = (struct mwifiex_ie_types_host_mlme *)((u8 *)cmd + size);
+ tlv->header.type = cpu_to_le16(TLV_TYPE_HOST_MLME);
+ tlv->header.len = cpu_to_le16(sizeof(tlv->host_mlme));
+ tlv->host_mlme = 1;
+ size += sizeof(struct mwifiex_ie_types_host_mlme);
+ }
+
+ cmd->size = cpu_to_le16(size);
+}
+
/* This function prepares AP specific deauth command with mac supplied in
* function parameter.
*/
@@ -768,6 +785,144 @@ static int mwifiex_cmd_uap_sta_deauth(struct mwifiex_private *priv,
return 0;
}
+/* This function prepares AP specific add station command.
+ */
+static int mwifiex_cmd_uap_add_station(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, void *data_buf)
+{
+ struct host_cmd_ds_add_station *new_sta = &cmd->params.sta_info;
+ struct mwifiex_sta_info *add_sta = (struct mwifiex_sta_info *)data_buf;
+ struct station_parameters *params = add_sta->params;
+ struct mwifiex_sta_node *sta_ptr;
+ u8 *pos;
+ u8 qos_capa;
+ u16 header_len = sizeof(struct mwifiex_ie_types_header);
+ u16 tlv_len;
+ int size;
+ struct mwifiex_ie_types_data *tlv;
+ struct mwifiex_ie_types_sta_flag *sta_flag;
+ int i;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_ADD_NEW_STATION);
+ new_sta->action = cpu_to_le16(cmd_action);
+ size = sizeof(struct host_cmd_ds_add_station) + S_DS_GEN;
+
+ if (cmd_action == HostCmd_ACT_ADD_STA)
+ sta_ptr = mwifiex_add_sta_entry(priv, add_sta->peer_mac);
+ else
+ sta_ptr = mwifiex_get_sta_entry(priv, add_sta->peer_mac);
+
+ if (!sta_ptr)
+ return -1;
+
+ memcpy(new_sta->peer_mac, add_sta->peer_mac, ETH_ALEN);
+
+ if (cmd_action == HostCmd_ACT_REMOVE_STA) {
+ cmd->size = cpu_to_le16(size);
+ return 0;
+ }
+
+ new_sta->aid = cpu_to_le16(params->aid);
+ new_sta->listen_interval = cpu_to_le32(params->listen_interval);
+ new_sta->cap_info = cpu_to_le16(params->capability);
+
+ pos = new_sta->tlv;
+
+ if (params->sta_flags_set & NL80211_STA_FLAG_WME)
+ sta_ptr->is_wmm_enabled = 1;
+ sta_flag = (struct mwifiex_ie_types_sta_flag *)pos;
+ sta_flag->header.type = cpu_to_le16(TLV_TYPE_UAP_STA_FLAGS);
+ sta_flag->header.len = cpu_to_le16(sizeof(__le32));
+ sta_flag->sta_flags = cpu_to_le32(params->sta_flags_set);
+ pos += sizeof(struct mwifiex_ie_types_sta_flag);
+ size += sizeof(struct mwifiex_ie_types_sta_flag);
+
+ if (params->ext_capab_len) {
+ tlv = (struct mwifiex_ie_types_data *)pos;
+ tlv->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
+ tlv_len = params->ext_capab_len;
+ tlv->header.len = cpu_to_le16(tlv_len);
+ memcpy(tlv->data, params->ext_capab, tlv_len);
+ pos += (header_len + tlv_len);
+ size += (header_len + tlv_len);
+ }
+
+ if (params->link_sta_params.supported_rates_len) {
+ tlv = (struct mwifiex_ie_types_data *)pos;
+ tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
+ tlv_len = params->link_sta_params.supported_rates_len;
+ tlv->header.len = cpu_to_le16(tlv_len);
+ memcpy(tlv->data,
+ params->link_sta_params.supported_rates, tlv_len);
+ pos += (header_len + tlv_len);
+ size += (header_len + tlv_len);
+ }
+
+ if (params->uapsd_queues || params->max_sp) {
+ tlv = (struct mwifiex_ie_types_data *)pos;
+ tlv->header.type = cpu_to_le16(WLAN_EID_QOS_CAPA);
+ tlv_len = sizeof(qos_capa);
+ tlv->header.len = cpu_to_le16(tlv_len);
+ qos_capa = params->uapsd_queues | (params->max_sp << 5);
+ memcpy(tlv->data, &qos_capa, tlv_len);
+ pos += (header_len + tlv_len);
+ size += (header_len + tlv_len);
+ sta_ptr->is_wmm_enabled = 1;
+ }
+
+ if (params->link_sta_params.ht_capa) {
+ tlv = (struct mwifiex_ie_types_data *)pos;
+ tlv->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+ tlv_len = sizeof(struct ieee80211_ht_cap);
+ tlv->header.len = cpu_to_le16(tlv_len);
+ memcpy(tlv->data, params->link_sta_params.ht_capa, tlv_len);
+ pos += (header_len + tlv_len);
+ size += (header_len + tlv_len);
+ sta_ptr->is_11n_enabled = 1;
+ sta_ptr->max_amsdu =
+ le16_to_cpu(params->link_sta_params.ht_capa->cap_info) &
+ IEEE80211_HT_CAP_MAX_AMSDU ?
+ MWIFIEX_TX_DATA_BUF_SIZE_8K :
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+ }
+
+ if (params->link_sta_params.vht_capa) {
+ tlv = (struct mwifiex_ie_types_data *)pos;
+ tlv->header.type = cpu_to_le16(WLAN_EID_VHT_CAPABILITY);
+ tlv_len = sizeof(struct ieee80211_vht_cap);
+ tlv->header.len = cpu_to_le16(tlv_len);
+ memcpy(tlv->data, params->link_sta_params.vht_capa, tlv_len);
+ pos += (header_len + tlv_len);
+ size += (header_len + tlv_len);
+ sta_ptr->is_11ac_enabled = 1;
+ }
+
+ if (params->link_sta_params.opmode_notif_used) {
+ tlv = (struct mwifiex_ie_types_data *)pos;
+ tlv->header.type = cpu_to_le16(WLAN_EID_OPMODE_NOTIF);
+ tlv_len = sizeof(u8);
+ tlv->header.len = cpu_to_le16(tlv_len);
+ memcpy(tlv->data, &params->link_sta_params.opmode_notif,
+ tlv_len);
+ pos += (header_len + tlv_len);
+ size += (header_len + tlv_len);
+ }
+
+ for (i = 0; i < MAX_NUM_TID; i++) {
+ if (sta_ptr->is_11n_enabled)
+ sta_ptr->ampdu_sta[i] =
+ priv->aggr_prio_tbl[i].ampdu_user;
+ else
+ sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
+ }
+
+ memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
+ cmd->size = cpu_to_le16(size);
+
+ return 0;
+}
+
/* This function prepares the AP specific commands before sending them
* to the firmware.
* This is a generic function which calls specific command preparation
@@ -785,6 +940,8 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
return -1;
break;
case HostCmd_CMD_UAP_BSS_START:
+ mwifiex_cmd_uap_bss_start(priv, cmd);
+ break;
case HostCmd_CMD_UAP_BSS_STOP:
case HOST_CMD_APCMD_SYS_RESET:
case HOST_CMD_APCMD_STA_LIST:
@@ -800,6 +957,11 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
data_buf))
return -1;
break;
+ case HostCmd_CMD_ADD_NEW_STATION:
+ if (mwifiex_cmd_uap_add_station(priv, cmd, cmd_action,
+ data_buf))
+ return -1;
+ break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd %#x\n", cmd_no);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 515e6db410f2..6085cd50970d 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -745,8 +745,6 @@ static void mwifiex_usb_port_resync(struct mwifiex_adapter *adapter)
if (adapter->usb_mc_status) {
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (!priv)
- continue;
if ((priv->bss_role == MWIFIEX_BSS_ROLE_UAP &&
!priv->bss_started) ||
(priv->bss_role == MWIFIEX_BSS_ROLE_STA &&
@@ -758,8 +756,6 @@ static void mwifiex_usb_port_resync(struct mwifiex_adapter *adapter)
} else {
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (!priv)
- continue;
if ((priv->bss_role == MWIFIEX_BSS_ROLE_UAP &&
priv->bss_started) ||
(priv->bss_role == MWIFIEX_BSS_ROLE_STA &&
@@ -770,8 +766,7 @@ static void mwifiex_usb_port_resync(struct mwifiex_adapter *adapter)
}
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv)
- priv->usb_port = active_port;
+ priv->usb_port = active_port;
}
for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
if (active_port == card->port[i].tx_data_ep)
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 745b1d925b21..42c04bf858da 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -370,6 +370,45 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
return 0;
}
+
+/* This function sends deauth packet to the kernel. */
+void mwifiex_host_mlme_disconnect(struct mwifiex_private *priv,
+ u16 reason_code, u8 *sa)
+{
+ u8 frame_buf[100];
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)frame_buf;
+
+ memset(frame_buf, 0, sizeof(frame_buf));
+ mgmt->frame_control = cpu_to_le16(IEEE80211_STYPE_DEAUTH);
+ mgmt->duration = 0;
+ mgmt->seq_ctrl = 0;
+ mgmt->u.deauth.reason_code = cpu_to_le16(reason_code);
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
+ eth_broadcast_addr(mgmt->da);
+ memcpy(mgmt->sa,
+ priv->curr_bss_params.bss_descriptor.mac_address,
+ ETH_ALEN);
+ memcpy(mgmt->bssid, priv->cfg_bssid, ETH_ALEN);
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+ } else {
+ memcpy(mgmt->da, priv->curr_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sa, ETH_ALEN);
+ memcpy(mgmt->bssid, priv->curr_addr, ETH_ALEN);
+ }
+
+ if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) {
+ wiphy_lock(priv->wdev.wiphy);
+ cfg80211_rx_mlme_mgmt(priv->netdev, frame_buf, 26);
+ wiphy_unlock(priv->wdev.wiphy);
+ } else {
+ cfg80211_rx_mgmt(&priv->wdev,
+ priv->bss_chandef.chan->center_freq,
+ 0, frame_buf, 26, 0);
+ }
+}
+
/*
* This function processes the received management packet and send it
* to the kernel.
@@ -417,6 +456,71 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
pkt_len -= ETH_ALEN;
rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
+ if (priv->host_mlme_reg &&
+ (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) &&
+ (ieee80211_is_auth(ieee_hdr->frame_control) ||
+ ieee80211_is_deauth(ieee_hdr->frame_control) ||
+ ieee80211_is_disassoc(ieee_hdr->frame_control))) {
+ if (ieee80211_is_auth(ieee_hdr->frame_control)) {
+ if (priv->auth_flag & HOST_MLME_AUTH_PENDING) {
+ if (priv->auth_alg != WLAN_AUTH_SAE) {
+ priv->auth_flag &=
+ ~HOST_MLME_AUTH_PENDING;
+ priv->auth_flag |=
+ HOST_MLME_AUTH_DONE;
+ }
+ } else {
+ return 0;
+ }
+
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: receive authentication from %pM\n",
+ ieee_hdr->addr3);
+ } else {
+ if (!priv->wdev.connected)
+ return 0;
+
+ if (ieee80211_is_deauth(ieee_hdr->frame_control)) {
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: receive deauth from %pM\n",
+ ieee_hdr->addr3);
+ priv->auth_flag = 0;
+ priv->auth_alg = WLAN_AUTH_NONE;
+ } else {
+ mwifiex_dbg
+ (priv->adapter, MSG,
+ "assoc: receive disassoc from %pM\n",
+ ieee_hdr->addr3);
+ }
+ }
+
+ cfg80211_rx_mlme_mgmt(priv->netdev, skb->data, pkt_len);
+ }
+
+ if (priv->adapter->host_mlme_enabled &&
+ (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)) {
+ if (ieee80211_is_auth(ieee_hdr->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: receive auth from %pM\n",
+ ieee_hdr->addr2);
+ if (ieee80211_is_deauth(ieee_hdr->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "auth: receive deauth from %pM\n",
+ ieee_hdr->addr2);
+ if (ieee80211_is_disassoc(ieee_hdr->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "assoc: receive disassoc from %pM\n",
+ ieee_hdr->addr2);
+ if (ieee80211_is_assoc_req(ieee_hdr->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "assoc: receive assoc req from %pM\n",
+ ieee_hdr->addr2);
+ if (ieee80211_is_reassoc_req(ieee_hdr->frame_control))
+ mwifiex_dbg(priv->adapter, MSG,
+ "assoc: receive reassoc req from %pM\n",
+ ieee_hdr->addr2);
+ }
+
cfg80211_rx_mgmt(&priv->wdev, priv->roc_cfg.chan.center_freq,
CAL_RSSI(rx_pd->snr, rx_pd->nf), skb->data, pkt_len,
0);
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 8558995e8fc7..bcb61dab7dc8 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -454,8 +454,6 @@ int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (!priv)
- continue;
if (adapter->if_ops.is_port_ready &&
!adapter->if_ops.is_port_ready(priv))
continue;
@@ -477,8 +475,6 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; ++i) {
priv = adapter->priv[i];
- if (!priv)
- continue;
if (!priv->port_open &&
(priv->bss_mode != NL80211_IFTYPE_ADHOC))
continue;
@@ -1491,9 +1487,6 @@ void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; ++i) {
priv = adapter->priv[i];
- if (!priv)
- continue;
-
if (adapter->if_ops.is_port_ready &&
!adapter->if_ops.is_port_ready(priv))
continue;
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index b130e057370f..bab9ef37a1ab 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -587,6 +587,7 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv, char *fw_image,
}
struct mwl8k_cmd_pkt {
+ /* New members MUST be added within the __struct_group() macro below. */
__struct_group(mwl8k_cmd_pkt_hdr, hdr, __packed,
__le16 code;
__le16 length;
@@ -596,6 +597,8 @@ struct mwl8k_cmd_pkt {
);
char payload[];
} __packed;
+static_assert(offsetof(struct mwl8k_cmd_pkt, payload) == sizeof(struct mwl8k_cmd_pkt_hdr),
+ "struct member likely outside of __struct_group()");
/*
* Firmware loading.
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index bb291fe314fb..9d5561f44134 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -929,14 +929,19 @@ void mt76_update_survey(struct mt76_phy *phy)
}
EXPORT_SYMBOL_GPL(mt76_update_survey);
-void mt76_set_channel(struct mt76_phy *phy)
+int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel)
{
struct mt76_dev *dev = phy->dev;
- struct ieee80211_hw *hw = phy->hw;
- struct cfg80211_chan_def *chandef = &hw->conf.chandef;
- bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
int timeout = HZ / 5;
+ int ret;
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mutex_lock(&dev->mutex);
+ set_bit(MT76_RESET, &phy->state);
+
+ mt76_worker_disable(&dev->tx_worker);
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(phy);
@@ -946,14 +951,34 @@ void mt76_set_channel(struct mt76_phy *phy)
phy->chandef = *chandef;
phy->chan_state = mt76_channel_state(phy, chandef->chan);
+ phy->offchannel = offchannel;
if (!offchannel)
phy->main_chan = chandef->chan;
if (chandef->chan != phy->main_chan)
memset(phy->chan_state, 0, sizeof(*phy->chan_state));
+ mt76_worker_enable(&dev->tx_worker);
+
+ ret = dev->drv->set_channel(phy);
+
+ clear_bit(MT76_RESET, &phy->state);
+ mt76_worker_schedule(&dev->tx_worker);
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(mt76_set_channel);
+
+int mt76_update_channel(struct mt76_phy *phy)
+{
+ struct ieee80211_hw *hw = phy->hw;
+ struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+ bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
+
+ return mt76_set_channel(phy, chandef, offchannel);
+}
+EXPORT_SYMBOL_GPL(mt76_update_channel);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
@@ -1484,21 +1509,32 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
+ enum mt76_sta_event ev;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE)
return mt76_sta_add(phy, vif, sta);
- if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC &&
- dev->drv->sta_assoc)
- dev->drv->sta_assoc(dev, vif, sta);
-
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)
mt76_sta_remove(dev, vif, sta);
- return 0;
+ if (!dev->drv->sta_event)
+ return 0;
+
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC)
+ ev = MT76_STA_EVENT_ASSOC;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED)
+ ev = MT76_STA_EVENT_AUTHORIZE;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH)
+ ev = MT76_STA_EVENT_DISASSOC;
+ else
+ return 0;
+
+ return dev->drv->sta_event(dev, vif, sta, ev);
}
EXPORT_SYMBOL_GPL(mt76_sta_state);
@@ -1521,6 +1557,7 @@ void mt76_wcid_init(struct mt76_wcid *wcid)
{
INIT_LIST_HEAD(&wcid->tx_list);
skb_queue_head_init(&wcid->tx_pending);
+ skb_queue_head_init(&wcid->tx_offchannel);
INIT_LIST_HEAD(&wcid->list);
idr_init(&wcid->pktid);
@@ -1529,7 +1566,7 @@ EXPORT_SYMBOL_GPL(mt76_wcid_init);
void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
{
- struct mt76_phy *phy = dev->phys[wcid->phy_idx];
+ struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
struct ieee80211_hw *hw;
struct sk_buff_head list;
struct sk_buff *skb;
@@ -1697,14 +1734,15 @@ int mt76_get_rate(struct mt76_dev *dev,
struct ieee80211_supported_band *sband,
int idx, bool cck)
{
+ bool is_2g = sband->band == NL80211_BAND_2GHZ;
int i, offset = 0, len = sband->n_bitrates;
if (cck) {
- if (sband != &dev->phy.sband_2g.sband)
+ if (!is_2g)
return 0;
idx &= ~BIT(2); /* short preamble */
- } else if (sband == &dev->phy.sband_2g.sband) {
+ } else if (is_2g) {
offset = 4;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
index a8cafa39a56d..98da82b74094 100644
--- a/drivers/net/wireless/mediatek/mt76/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -73,6 +73,8 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp,
struct sk_buff **ret_skb)
{
+ unsigned int retry = 0;
+ struct sk_buff *orig_skb = NULL;
unsigned long expires;
int ret, seq;
@@ -81,6 +83,14 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
mutex_lock(&dev->mcu.mutex);
+ if (dev->mcu_ops->mcu_skb_prepare_msg) {
+ ret = dev->mcu_ops->mcu_skb_prepare_msg(dev, skb, cmd, &seq);
+ if (ret < 0)
+ goto out;
+ }
+
+retry:
+ orig_skb = skb_get(skb);
ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, &seq);
if (ret < 0)
goto out;
@@ -94,6 +104,14 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
do {
skb = mt76_mcu_get_response(dev, expires);
+ if (!skb && !test_bit(MT76_MCU_RESET, &dev->phy.state) &&
+ retry++ < dev->mcu_ops->max_retry) {
+ dev_err(dev->dev, "Retry message %08x (seq %d)\n",
+ cmd, seq);
+ skb = orig_skb;
+ goto retry;
+ }
+
ret = dev->mcu_ops->mcu_parse_response(dev, cmd, skb, seq);
if (!ret && ret_skb)
*ret_skb = skb;
@@ -101,7 +119,9 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
dev_kfree_skb(skb);
} while (ret == -EAGAIN);
+
out:
+ dev_kfree_skb(orig_skb);
mutex_unlock(&dev->mcu.mutex);
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 4a58a78d5ed2..0b75a45ad2e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -162,8 +162,8 @@ enum mt76_dfs_state {
struct mt76_queue_buf {
dma_addr_t addr;
- u16 len;
- bool skip_unmap;
+ u16 len:15,
+ skip_unmap:1;
};
struct mt76_tx_info {
@@ -230,11 +230,14 @@ struct mt76_queue {
};
struct mt76_mcu_ops {
+ unsigned int max_retry;
u32 headroom;
u32 tailroom;
int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp);
+ int (*mcu_skb_prepare_msg)(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, int *seq);
int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, int *seq);
int (*mcu_parse_response)(struct mt76_dev *dev, int cmd,
@@ -347,6 +350,7 @@ struct mt76_wcid {
u8 hw_key_idx2;
u8 sta:1;
+ u8 sta_disabled:1;
u8 amsdu:1;
u8 phy_idx:2;
u8 link_id:4;
@@ -361,6 +365,7 @@ struct mt76_wcid {
struct list_head tx_list;
struct sk_buff_head tx_pending;
+ struct sk_buff_head tx_offchannel;
struct list_head list;
struct idr pktid;
@@ -466,6 +471,12 @@ enum {
MT76_STATE_WED_RESET,
};
+enum mt76_sta_event {
+ MT76_STA_EVENT_ASSOC,
+ MT76_STA_EVENT_AUTHORIZE,
+ MT76_STA_EVENT_DISASSOC,
+};
+
struct mt76_hw_cap {
bool has_2ghz;
bool has_5ghz;
@@ -487,6 +498,7 @@ struct mt76_driver_ops {
u8 mcs_rates;
void (*update_survey)(struct mt76_phy *phy);
+ int (*set_channel)(struct mt76_phy *phy);
int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
@@ -511,8 +523,8 @@ struct mt76_driver_ops {
int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
- void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+ int (*sta_event)(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -768,6 +780,7 @@ struct mt76_phy {
struct cfg80211_chan_def chandef;
struct ieee80211_channel *main_chan;
+ bool offchannel;
struct mt76_channel_state *chan_state;
enum mt76_dfs_state dfs_state;
@@ -1370,7 +1383,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
enum ieee80211_frame_release_type reason,
bool more_data);
bool mt76_has_tx_pending(struct mt76_phy *phy);
-void mt76_set_channel(struct mt76_phy *phy);
+int mt76_update_channel(struct mt76_phy *phy);
void mt76_update_survey(struct mt76_phy *phy);
void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
@@ -1484,6 +1497,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
void mt76_testmode_tx_pending(struct mt76_phy *phy);
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e);
+int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index c223f7c19e6d..6457ee06bb5a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -107,7 +107,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
struct sk_buff *skb;
int i, nframes;
- if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (dev->mphy.offchannel)
return;
data.dev = dev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index ea017f22fff2..863e5770df51 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -29,7 +29,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
struct ieee80211_sta *sta;
struct mt7603_sta *msta;
struct mt76_wcid *wcid;
- u8 tid = 0, hwq = 0;
+ u8 qid, tid = 0, hwq = 0;
void *priv;
int idx;
u32 val;
@@ -57,7 +57,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
if (ieee80211_is_data_qos(hdr->frame_control)) {
tid = *ieee80211_get_qos_ctl(hdr) &
IEEE80211_QOS_CTL_TAG1D_MASK;
- u8 qid = tid_to_ac[tid];
+ qid = tid_to_ac[tid];
hwq = wmm_queue_map[qid];
skb_set_queue_mapping(skb, qid);
} else if (ieee80211_is_data(hdr->frame_control)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
index d951cb81df83..f5a6b03bc61d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
@@ -181,6 +181,7 @@ int mt7603_eeprom_init(struct mt7603_dev *dev)
is_mt7688(dev))
dev->mphy.antenna_mask = 1;
+ dev->mphy.chainmask = dev->mphy.antenna_mask;
mt76_eeprom_override(&dev->mphy);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 6c55c72f28a2..86617a3e4328 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -15,9 +15,10 @@ const struct mt76_driver_ops mt7603_drv_ops = {
.rx_poll_complete = mt7603_rx_poll_complete,
.sta_ps = mt7603_sta_ps,
.sta_add = mt7603_sta_add,
- .sta_assoc = mt7603_sta_assoc,
+ .sta_event = mt7603_sta_event,
.sta_remove = mt7603_sta_remove,
.update_survey = mt7603_update_channel,
+ .set_channel = mt7603_set_channel,
};
static void
@@ -456,11 +457,13 @@ mt7603_init_txpower(struct mt7603_dev *dev,
int target_power = eeprom[MT_EE_TX_POWER_0_START_2G + 2] & ~BIT(7);
u8 *rate_power = &eeprom[MT_EE_TX_POWER_CCK];
bool ext_pa = eeprom[MT_EE_NIC_CONF_0 + 1] & BIT(1);
+ u8 ext_pa_pwr;
int max_offset, cur_offset;
int i;
- if (ext_pa && is_mt7603(dev))
- target_power = eeprom[MT_EE_TX_POWER_TSSI_OFF] & ~BIT(7);
+ ext_pa_pwr = eeprom[MT_EE_TX_POWER_TSSI_OFF];
+ if (ext_pa && is_mt7603(dev) && ext_pa_pwr != 0 && ext_pa_pwr != 0xff)
+ target_power = ext_pa_pwr & ~BIT(7);
if (target_power & BIT(6))
target_power = -(target_power & GENMASK(5, 0));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index f35fa643c0da..574f74ad325d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -133,30 +133,24 @@ void mt7603_init_edcca(struct mt7603_dev *dev)
mt7603_edcca_set_strict(dev, false);
}
-static int
-mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def)
+int mt7603_set_channel(struct mt76_phy *mphy)
{
- struct mt7603_dev *dev = hw->priv;
+ struct mt7603_dev *dev = container_of(mphy->dev, struct mt7603_dev, mt76);
+ struct cfg80211_chan_def *def = &mphy->chandef;
+
u8 *rssi_data = (u8 *)dev->mt76.eeprom.data;
int idx, ret;
u8 bw = MT_BW_20;
bool failed = false;
- ieee80211_stop_queues(hw);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mphy.state);
-
mt7603_beacon_set_timer(dev, -1, 0);
- mt76_set_channel(&dev->mphy);
mt7603_mac_stop(dev);
if (def->width == NL80211_CHAN_WIDTH_40)
bw = MT_BW_40;
- dev->mphy.chandef = *def;
mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw);
ret = mt7603_mcu_set_channel(dev);
if (ret) {
@@ -180,10 +174,6 @@ mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def)
mt7603_mac_set_timing(dev);
mt7603_mac_start(dev);
- clear_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_txq_schedule_all(&dev->mphy);
-
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
msecs_to_jiffies(MT7603_WATCHDOG_TIME));
@@ -199,17 +189,14 @@ mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def)
mt7603_init_edcca(dev);
out:
- if (!(mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ if (!mphy->offchannel)
mt7603_beacon_set_timer(dev, -1, dev->mt76.beacon_int);
- mutex_unlock(&dev->mt76.mutex);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
if (failed)
mt7603_mac_work(&dev->mphy.mac_work.work);
- ieee80211_wake_queues(hw);
-
return ret;
}
@@ -227,7 +214,7 @@ static int mt7603_set_sar_specs(struct ieee80211_hw *hw,
if (err)
return err;
- return mt7603_set_channel(hw, &mphy->chandef);
+ return mt76_update_channel(mphy);
}
static int
@@ -238,7 +225,7 @@ mt7603_config(struct ieee80211_hw *hw, u32 changed)
if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
IEEE80211_CONF_CHANGE_POWER))
- ret = mt7603_set_channel(hw, &hw->conf.chandef);
+ ret = mt76_update_channel(&dev->mphy);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
mutex_lock(&dev->mt76.mutex);
@@ -368,13 +355,19 @@ mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
return ret;
}
-void
-mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int
+mt7603_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
- mt7603_wtbl_update_cap(dev, sta);
+ if (ev == MT76_STA_EVENT_ASSOC) {
+ mutex_lock(&dev->mt76.mutex);
+ mt7603_wtbl_update_cap(dev, sta);
+ mutex_unlock(&dev->mt76.mutex);
+ }
+
+ return 0;
}
void
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 9e58df7042ad..55a034ccbacd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -213,6 +213,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev);
void mt7603_pse_client_reset(struct mt7603_dev *dev);
+int mt7603_set_channel(struct mt76_phy *mphy);
int mt7603_mcu_set_channel(struct mt7603_dev *dev);
int mt7603_mcu_set_eeprom(struct mt7603_dev *dev);
void mt7603_mcu_exit(struct mt7603_dev *dev);
@@ -245,8 +246,8 @@ void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt7603_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index f7722f67db57..66ba3be27343 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -56,6 +56,9 @@ int mt7615_thermal_init(struct mt7615_dev *dev)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7615_%s",
wiphy_name(wiphy));
+ if (!name)
+ return -ENOMEM;
+
hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev,
mt7615_hwmon_groups);
return PTR_ERR_OR_ZERO(hwmon);
@@ -319,7 +322,7 @@ void mt7615_init_work(struct mt7615_dev *dev)
mt7615_mcu_set_eeprom(dev);
mt7615_mac_init(dev);
mt7615_phy_init(dev);
- mt7615_mcu_del_wtbl_all(dev);
+ mt76_connac_mcu_del_wtbl_all(&dev->mt76);
mt7615_check_offload_capability(dev);
}
EXPORT_SYMBOL_GPL(mt7615_init_work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 50e262c1622f..376975388007 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -282,19 +282,14 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &mvif->sta.wcid);
}
-int mt7615_set_channel(struct mt7615_phy *phy)
+int mt7615_set_channel(struct mt76_phy *mphy)
{
+ struct mt7615_phy *phy = mphy->priv;
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mt7615_mutex_acquire(dev);
-
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
+ mt76_connac_pm_wake(mphy, &dev->pm);
if (is_mt7615(&dev->mt76) && dev->flash_eeprom) {
ret = mt7615_mcu_apply_rx_dcoc(phy);
@@ -325,11 +320,8 @@ int mt7615_set_channel(struct mt7615_phy *phy)
phy->chfreq = mt76_rr(dev, MT_CHFREQ(ext_phy));
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
+ mt76_connac_power_save_sched(mphy, &dev->pm);
- mt7615_mutex_release(dev);
-
- mt76_worker_schedule(&dev->mt76.tx_worker);
if (!mt76_testmode_enabled(phy->mt76)) {
unsigned long timeout = mt7615_get_macwork_timeout(dev);
@@ -339,6 +331,7 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(mt7615_set_channel);
static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
@@ -425,11 +418,7 @@ static int mt7615_set_sar_specs(struct ieee80211_hw *hw,
if (mt7615_firmware_offload(phy->dev))
return mt76_connac_mcu_set_rate_txpower(phy->mt76);
- ieee80211_stop_queues(hw);
- err = mt7615_set_channel(phy);
- ieee80211_wake_queues(hw);
-
- return err;
+ return mt76_update_channel(phy->mt76);
}
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
@@ -448,9 +437,7 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
mt7615_mutex_release(dev);
}
#endif
- ieee80211_stop_queues(hw);
- ret = mt7615_set_channel(phy);
- ieee80211_wake_queues(hw);
+ ret = mt76_update_channel(phy->mt76);
}
mt7615_mutex_acquire(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index d50d967828be..96e34277fece 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -394,7 +394,7 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
if (mt76_phy_dfs_state(mphy) < MT_DFS_STATE_CAC)
return;
- ieee80211_radar_detected(mphy->hw);
+ ieee80211_radar_detected(mphy->hw, NULL);
dev->hw_pattern++;
}
@@ -847,6 +847,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct wtbl_req_hdr *wtbl_hdr;
struct mt7615_sta *msta;
bool new_entry = true;
+ int conn_state;
int cmd, err;
msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
@@ -863,8 +864,9 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
else
mvif->sta_added = true;
}
+ conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, link_sta,
- enable, new_entry);
+ conn_state, new_entry);
if (enable && sta)
mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
MT76_STA_INFO_STATE_ASSOC);
@@ -1878,16 +1880,6 @@ out:
sizeof(req), true);
}
-int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
-{
- struct wtbl_req_hdr req = {
- .operation = WTBL_RESET_ALL,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(WTBL_UPDATE),
- &req, sizeof(req), true);
-}
-
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
{
struct {
@@ -2151,7 +2143,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ else if (phy->mt76->offchannel)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
NL80211_IFTYPE_AP))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index 87a956ea3ad7..dbb2c82407df 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -182,6 +182,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
+ .set_channel = mt7615_set_channel,
};
struct mt76_bus_ops *bus_ops;
struct ieee80211_ops *ops;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index a20322aae967..530da48ce3ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -399,7 +399,6 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *rates);
void mt7615_pm_wake_work(struct work_struct *work);
void mt7615_pm_power_save_work(struct work_struct *work);
-int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
const struct ieee80211_tx_queue_params *params);
@@ -457,7 +456,7 @@ void mt7615_roc_work(struct work_struct *work);
void mt7615_roc_timer(struct timer_list *timer);
void mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband);
-int mt7615_set_channel(struct mt7615_phy *phy);
+int mt7615_set_channel(struct mt76_phy *mphy);
void mt7615_init_work(struct mt7615_dev *dev);
int mt7615_mcu_restart(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 9692890ba51b..aebfc4576aa4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -87,6 +87,7 @@ static int mt7663s_probe(struct sdio_func *func,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
+ .set_channel = mt7615_set_channel,
};
static const struct mt76_bus_ops mt7663s_ops = {
.rr = mt76s_rr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
index a3d1cfa729ed..03f5af84424b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
@@ -141,7 +141,7 @@ mt7615_tm_init(struct mt7615_phy *phy)
mt7615_mcu_set_sku_en(phy, phy->mt76->test.state == MT76_TM_STATE_OFF);
mutex_unlock(&dev->mt76.mutex);
- mt7615_set_channel(phy);
+ mt76_update_channel(phy->mt76);
mt7615_ops.configure_filter(phy->mt76->hw, 0, &total_flags, 0);
mutex_lock(&dev->mt76.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 9335ca0776fe..5020af52c68c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -123,6 +123,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
+ .set_channel = mt7615_set_channel,
};
static struct mt76_bus_ops bus_ops = {
.rr = mt7663u_rr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
index 5f132115ebfc..eb4765365b8c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
@@ -355,4 +355,11 @@ enum tx_port_idx {
MT_TX_PORT_IDX_MCU
};
+enum tx_frag_idx {
+ MT_TX_FRAG_NONE,
+ MT_TX_FRAG_FIRST,
+ MT_TX_FRAG_MID,
+ MT_TX_FRAG_LAST
+};
+
#endif /* __MT76_CONNAC2_MAC_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index 353e66069840..db0c29e65185 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -28,8 +28,6 @@ enum {
#define MT_RXD0_MESH BIT(18)
#define MT_RXD0_MHCP BIT(19)
#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
-#define MT_RXD0_NORMAL_IP_SUM BIT(23)
-#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
#define MT_RXD0_SW_PKT_TYPE_MASK GENMASK(31, 16)
#define MT_RXD0_SW_PKT_TYPE_MAP 0x380F
@@ -80,6 +78,8 @@ enum {
#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
#define MT_RXD3_NORMAL_CO_ANT BIT(22)
#define MT_RXD3_NORMAL_FCS_ERR BIT(24)
+#define MT_RXD3_NORMAL_IP_SUM BIT(26)
+#define MT_RXD3_NORMAL_UDP_TCP_SUM BIT(27)
#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
/* RXD DW4 */
@@ -197,6 +197,13 @@ enum tx_mgnt_type {
MT_TX_ADDBA,
};
+enum tx_frag_idx {
+ MT_TX_FRAG_NONE,
+ MT_TX_FRAG_FIRST,
+ MT_TX_FRAG_MID,
+ MT_TX_FRAG_LAST
+};
+
#define MT_CT_INFO_APPLY_TXD BIT(0)
#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
#define MT_CT_INFO_MGMT_FRAME BIT(2)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index b841bf628d02..a3db65254e37 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -391,6 +391,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
bool multicast = is_multicast_ether_addr(hdr->addr1);
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
__le16 fc = hdr->frame_control;
+ __le16 sc = hdr->seq_ctrl;
u8 fc_type, fc_stype;
u32 val;
@@ -432,6 +433,13 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
info->flags & IEEE80211_TX_CTL_USE_MINRATE)
val |= MT_TXD2_FIX_RATE;
+ if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
+ else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
+ else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
+
txwi[2] |= cpu_to_le32(val);
if (ieee80211_is_beacon(fc)) {
@@ -440,7 +448,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
}
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(hdr->seq_ctrl);
+ u16 seqno = le16_to_cpu(sc);
if (ieee80211_is_back_req(hdr->frame_control)) {
struct ieee80211_bar *bar;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 4dce03ddbfa4..864246f94088 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -283,7 +283,7 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
};
struct sk_buff *skb;
- if (wcid && !wcid->sta)
+ if (wcid && !wcid->sta && !wcid->sta_disabled)
hdr.muar_idx = 0xe;
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
@@ -371,7 +371,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
- bool enable, bool newly)
+ int conn_state, bool newly)
{
struct sta_rec_basic *basic;
struct tlv *tlv;
@@ -382,13 +382,9 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic = (struct sta_rec_basic *)tlv;
basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
- if (enable) {
- if (newly)
- basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
- basic->conn_state = CONN_STATE_PORT_SECURE;
- } else {
- basic->conn_state = CONN_STATE_DISCONNECT;
- }
+ if (newly && conn_state != CONN_STATE_DISCONNECT)
+ basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
+ basic->conn_state = conn_state;
if (!link_sta) {
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
@@ -1051,15 +1047,18 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct wtbl_req_hdr *wtbl_hdr;
struct tlv *sta_wtbl;
struct sk_buff *skb;
+ int conn_state;
skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
+ conn_state = info->enable ? CONN_STATE_PORT_SECURE :
+ CONN_STATE_DISCONNECT;
link_sta = info->sta ? &info->sta->deflink : NULL;
if (info->sta || !info->offload_fw)
mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
- link_sta, info->enable,
+ link_sta, conn_state,
info->newly);
if (info->sta && info->enable)
mt76_connac_mcu_sta_tlv(phy, skb, info->sta,
@@ -2850,6 +2849,17 @@ int mt76_connac_mcu_restart(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_restart);
+int mt76_connac_mcu_del_wtbl_all(struct mt76_dev *dev)
+{
+ struct wtbl_req_hdr req = {
+ .operation = WTBL_RESET_ALL,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(WTBL_UPDATE),
+ &req, sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_del_wtbl_all);
+
int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 4242d436de26..1b0e80dfc346 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -115,21 +115,26 @@ struct mt76_connac2_mcu_uni_txd {
} __packed __aligned(4);
struct mt76_connac2_mcu_rxd {
- __le32 rxd[6];
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(mt76_connac2_mcu_rxd_hdr, hdr,
+ __le32 rxd[6];
- __le16 len;
- __le16 pkt_type_id;
+ __le16 len;
+ __le16 pkt_type_id;
- u8 eid;
- u8 seq;
- u8 option;
- u8 rsv;
- u8 ext_eid;
- u8 rsv1[2];
- u8 s2d_index;
+ u8 eid;
+ u8 seq;
+ u8 option;
+ u8 rsv;
+ u8 ext_eid;
+ u8 rsv1[2];
+ u8 s2d_index;
+ );
u8 tlv[];
};
+static_assert(offsetof(struct mt76_connac2_mcu_rxd, tlv) == sizeof(struct mt76_connac2_mcu_rxd_hdr),
+ "struct member likely outside of struct_group_tagged()");
struct mt76_connac2_patch_hdr {
char build_date[16];
@@ -1898,7 +1903,7 @@ int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
- bool enable, bool newly);
+ int state, bool newly);
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, void *sta_wtbl,
@@ -2032,6 +2037,7 @@ void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
void *sta_wtbl, void *wtbl_tlv);
int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter);
int mt76_connac_mcu_restart(struct mt76_dev *dev);
+int mt76_connac_mcu_del_wtbl_all(struct mt76_dev *dev);
int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val);
int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index 07380cce8755..4aa2dcedc874 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -8,16 +8,15 @@
#include <linux/etherdevice.h>
#include "mt76x0.h"
-static void
-mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+int mt76x0_set_channel(struct mt76_phy *mphy)
{
- cancel_delayed_work_sync(&dev->cal_work);
+ struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
+
mt76x02_pre_tbtt_enable(dev, false);
if (mt76_is_mmio(&dev->mt76))
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- mt76_set_channel(&dev->mphy);
- mt76x0_phy_set_channel(dev, chandef);
+ mt76x0_phy_set_channel(dev, &mphy->chandef);
mt76x02_mac_cc_reset(dev);
mt76x02_edcca_init(dev);
@@ -28,8 +27,9 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
}
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mphy);
+ return 0;
}
+EXPORT_SYMBOL_GPL(mt76x0_set_channel);
int mt76x0_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar)
@@ -61,13 +61,10 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ mt76_update_channel(&dev->mphy);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- mt76x0_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
+ mutex_lock(&dev->mt76.mutex);
if (changed & IEEE80211_CONF_CHANGE_POWER) {
struct mt76_phy *mphy = &dev->mphy;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index 99dcb8feb9f7..50f755344968 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -49,6 +49,7 @@ void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
void mt76x0_mac_stop(struct mt76x02_dev *dev);
int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
+int mt76x0_set_channel(struct mt76_phy *mphy);
int mt76x0_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 2ecee7c5c80d..1eb955f3ca13 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -159,6 +159,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x0_set_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 390f502e97f0..b031c500b741 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -217,6 +217,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
.drv_flags = MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x0_set_channel,
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index 024a5c0a5a57..7a07636d09c6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -630,7 +630,7 @@ static void mt76x02_dfs_tasklet(struct tasklet_struct *t)
radar_detected = mt76x02_dfs_check_detection(dev);
if (radar_detected) {
/* sw detector rx radar pattern */
- ieee80211_radar_detected(dev->mt76.hw);
+ ieee80211_radar_detected(dev->mt76.hw, NULL);
mt76x02_dfs_detector_reset(dev);
return;
@@ -658,7 +658,7 @@ static void mt76x02_dfs_tasklet(struct tasklet_struct *t)
/* hw detector rx radar pattern */
dfs_pd->stats[i].hw_pattern++;
- ieee80211_radar_detected(dev->mt76.hw);
+ ieee80211_radar_detected(dev->mt76.hw, NULL);
mt76x02_dfs_detector_reset(dev);
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 35b7ebc2c9c6..4a49a3036a46 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -22,7 +22,7 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
struct sk_buff *skb;
int i;
- if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (dev->mphy.offchannel)
return;
__skb_queue_head_init(&data.q);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 29b9a15f8dbe..0e1ede9314d8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -188,10 +188,7 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work)
struct sk_buff *skb;
int nbeacons;
- if (!dev->mt76.beacon_mask)
- return;
-
- if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (!dev->mt76.beacon_mask || dev->mphy.offchannel)
return;
__skb_queue_head_init(&data.q);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index be1217329a77..f051721bb00e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -47,6 +47,8 @@ void mt76x2_phy_power_on(struct mt76x02_dev *dev);
void mt76x2_stop_hardware(struct mt76x02_dev *dev);
int mt76x2_eeprom_init(struct mt76x02_dev *dev);
int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel);
+int mt76x2e_set_channel(struct mt76_phy *phy);
+int mt76x2u_set_channel(struct mt76_phy *phy);
void mt76x2_phy_set_antenna(struct mt76x02_dev *dev);
int mt76x2_phy_start(struct mt76x02_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 30959746e924..67c9d1caa0bd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -25,6 +25,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x2e_set_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 6accea551319..eb70130d2711 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -32,33 +32,25 @@ mt76x2_stop(struct ieee80211_hw *hw, bool suspend)
mt76x2_stop_hardware(dev);
}
-static void
-mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+int mt76x2e_set_channel(struct mt76_phy *phy)
{
- cancel_delayed_work_sync(&dev->cal_work);
+ struct mt76x02_dev *dev = container_of(phy->dev, struct mt76x02_dev, mt76);
+
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_set_channel(&dev->mphy);
-
mt76x2_mac_stop(dev, true);
- mt76x2_phy_set_channel(dev, chandef);
+ mt76x2_phy_set_channel(dev, &phy->chandef);
mt76x02_mac_cc_reset(dev);
mt76x02_dfs_init_params(dev);
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mphy.state);
- mutex_unlock(&dev->mt76.mutex);
-
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
- mt76_txq_schedule_all(&dev->mphy);
+ return 0;
}
static int
@@ -95,11 +87,8 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- mt76x2_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ mt76_update_channel(&dev->mphy);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index e92bb871f231..e832ad53e239 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -32,6 +32,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
.drv_flags = MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x2u_set_channel,
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index ba0241c36672..83e7061b10e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -31,32 +31,20 @@ static void mt76x2u_stop(struct ieee80211_hw *hw, bool suspend)
mt76x2u_stop_hw(dev);
}
-static int
-mt76x2u_set_channel(struct mt76x02_dev *dev,
- struct cfg80211_chan_def *chandef)
+int mt76x2u_set_channel(struct mt76_phy *mphy)
{
+ struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
int err;
- cancel_delayed_work_sync(&dev->cal_work);
mt76x02_pre_tbtt_enable(dev, false);
-
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_set_channel(&dev->mphy);
-
mt76x2_mac_stop(dev, false);
- err = mt76x2u_phy_set_channel(dev, chandef);
+ err = mt76x2u_phy_set_channel(dev, &mphy->chandef);
mt76x02_mac_cc_reset(dev);
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mphy.state);
- mutex_unlock(&dev->mt76.mutex);
-
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mphy);
return err;
}
@@ -93,11 +81,8 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- err = mt76x2u_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ mt76_update_channel(&dev->mphy);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index a978f434dc5e..6bef96e3d2a3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -194,6 +194,8 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7915_%s",
wiphy_name(wiphy));
+ if (!name)
+ return -ENOMEM;
cdev = thermal_cooling_device_register(name, phy, &mt7915_thermal_ops);
if (!IS_ERR(cdev)) {
@@ -398,6 +400,7 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
hw->max_tx_fragments = 4;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 8008ce3fa6c7..cf77ce0c8759 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -1448,6 +1448,7 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
dev->recovery.hw_full_reset = true;
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
ieee80211_stop_queues(mt76_hw(dev));
if (ext_phy)
@@ -1462,26 +1463,27 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
if (!mt7915_mac_restart(dev))
break;
}
- mutex_unlock(&dev->mt76.mutex);
if (i == 10)
dev_err(dev->mt76.dev, "chip full reset failed\n");
- ieee80211_restart_hw(mt76_hw(dev));
- if (ext_phy)
- ieee80211_restart_hw(ext_phy->hw);
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ while (!list_empty(&dev->mt76.sta_poll_list))
+ list_del_init(dev->mt76.sta_poll_list.next);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
- ieee80211_wake_queues(mt76_hw(dev));
- if (ext_phy)
- ieee80211_wake_queues(ext_phy->hw);
+ memset(dev->mt76.wcid_mask, 0, sizeof(dev->mt76.wcid_mask));
+ dev->mt76.vif_mask = 0;
+ i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
+ dev->mt76.global_wcid.idx = i;
dev->recovery.hw_full_reset = false;
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
- MT7915_WATCHDOG_TIME);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ ieee80211_restart_hw(mt76_hw(dev));
if (ext_phy)
- ieee80211_queue_delayed_work(ext_phy->hw,
- &ext_phy->mac_work,
- MT7915_WATCHDOG_TIME);
+ ieee80211_restart_hw(ext_phy->hw);
}
/* system error recovery */
@@ -1537,12 +1539,14 @@ void mt7915_mac_reset_work(struct work_struct *work)
set_bit(MT76_RESET, &phy2->mt76->state);
cancel_delayed_work_sync(&phy2->mt76->mac_work);
}
+
+ mutex_lock(&dev->mt76.mutex);
+
mt76_worker_disable(&dev->mt76.tx_worker);
mt76_for_each_q_rx(&dev->mt76, i)
napi_disable(&dev->mt76.napi[i]);
napi_disable(&dev->mt76.tx_napi);
- mutex_lock(&dev->mt76.mutex);
if (mtk_wed_device_active(&dev->mt76.mmio.wed))
mtk_wed_device_stop(&dev->mt76.mmio.wed);
@@ -1692,6 +1696,11 @@ void mt7915_reset(struct mt7915_dev *dev)
return;
}
+ if ((READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) {
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ }
+
queue_work(dev->mt76.wq, &dev->reset_work);
wake_up(&dev->reset_wait);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 049223df9beb..d75e8dea1fbd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -245,7 +245,9 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
- idx = MT7915_WTBL_RESERVED - mvif->mt76.idx;
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, mt7915_wtbl_size(dev));
+ if (idx < 0)
+ return -ENOSPC;
INIT_LIST_HEAD(&mvif->sta.rc_list);
INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
@@ -272,7 +274,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
memset(&mvif->cap, -1, sizeof(mvif->cap));
mt7915_mcu_add_bss_info(phy, vif, true);
- mt7915_mcu_add_sta(dev, vif, NULL, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, true);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out:
@@ -291,7 +293,8 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
int idx = msta->wcid.idx;
mt7915_mcu_add_bss_info(phy, vif, false);
- mt7915_mcu_add_sta(dev, vif, NULL, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false);
+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, mvif->sta.wcid.idx);
mutex_lock(&dev->mt76.mutex);
mt76_testmode_reset(phy->mt76, true);
@@ -317,18 +320,12 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
}
-int mt7915_set_channel(struct mt7915_phy *phy)
+int mt7915_set_channel(struct mt76_phy *mphy)
{
+ struct mt7915_phy *phy = mphy->priv;
struct mt7915_dev *dev = phy->dev;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
-
if (dev->cal) {
ret = mt7915_mcu_apply_tx_dpd(phy);
if (ret)
@@ -347,11 +344,6 @@ int mt7915_set_channel(struct mt7915_phy *phy)
phy->noise = 0;
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
- mutex_unlock(&dev->mt76.mutex);
-
- mt76_txq_schedule_all(phy->mt76);
-
if (!mt76_testmode_enabled(phy->mt76))
ieee80211_queue_delayed_work(phy->mt76->hw,
&phy->mt76->mac_work,
@@ -374,6 +366,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int idx = key->keyidx;
int err = 0;
+ if (sta && !wcid->sta)
+ return -EOPNOTSUPP;
+
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
*/
@@ -464,11 +459,9 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
}
#endif
- ieee80211_stop_queues(hw);
- ret = mt7915_set_channel(phy);
+ ret = mt76_update_channel(phy->mt76);
if (ret)
return ret;
- ieee80211_wake_queues(hw);
}
if (changed & (IEEE80211_CONF_CHANGE_POWER |
@@ -564,8 +557,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
MT_WF_RFCR_DROP_RTS |
- MT_WF_RFCR_DROP_CTL_RSV |
- MT_WF_RFCR_DROP_NDPA);
+ MT_WF_RFCR_DROP_CTL_RSV);
*total_flags = flags;
rxfilter = phy->rxfilter;
@@ -633,7 +625,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
if (set_bss_info == 1)
mt7915_mcu_add_bss_info(phy, vif, true);
if (set_sta == 1)
- mt7915_mcu_add_sta(dev, vif, NULL, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, false);
if (changed & BSS_CHANGED_ERP_CTS_PROT)
mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot);
@@ -668,7 +660,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
if (set_bss_info == 0)
mt7915_mcu_add_bss_info(phy, vif, false);
if (set_sta == 0)
- mt7915_mcu_add_sta(dev, vif, NULL, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false);
mutex_unlock(&dev->mt76.mutex);
}
@@ -706,7 +698,7 @@ mt7915_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
err = mt7915_mcu_add_bss_info(phy, vif, true);
if (err)
goto out;
- err = mt7915_mcu_add_sta(dev, vif, NULL, true);
+ err = mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, false);
out:
mutex_unlock(&dev->mt76.mutex);
@@ -720,7 +712,7 @@ mt7915_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7915_dev *dev = mt7915_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7915_mcu_add_sta(dev, vif, NULL, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false);
mutex_unlock(&dev->mt76.mutex);
}
@@ -743,8 +735,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
bool ext_phy = mvif->phy != &dev->phy;
- int ret, idx;
- u32 addr;
+ int idx;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
if (idx < 0)
@@ -753,25 +744,61 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
INIT_LIST_HEAD(&msta->rc_list);
INIT_LIST_HEAD(&msta->wcid.poll_list);
msta->vif = mvif;
- msta->wcid.sta = 1;
+ msta->wcid.sta_disabled = 1;
msta->wcid.idx = idx;
msta->wcid.phy_idx = ext_phy;
- msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->jiffies = jiffies;
ewma_avg_signal_init(&msta->avg_ack_signal);
mt7915_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_DISCONNECT, true);
- ret = mt7915_mcu_add_sta(dev, vif, sta, true);
- if (ret)
- return ret;
+ return 0;
+}
- addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 30);
- mt76_rmw_field(dev, addr, GENMASK(7, 0), 0xa0);
+int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ int i, ret;
+ u32 addr;
+
+ switch (ev) {
+ case MT76_STA_EVENT_ASSOC:
+ ret = mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_CONNECT, true);
+ if (ret)
+ return ret;
+
+ addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 30);
+ mt76_rmw_field(dev, addr, GENMASK(7, 0), 0xa0);
+
+ ret = mt7915_mcu_add_rate_ctrl(dev, vif, sta, false);
+ if (ret)
+ return ret;
+
+ msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta->wcid.sta = 1;
+ msta->wcid.sta_disabled = 0;
+
+ return 0;
+
+ case MT76_STA_EVENT_AUTHORIZE:
+ return mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_PORT_SECURE, false);
- return mt7915_mcu_add_rate_ctrl(dev, vif, sta, false);
+ case MT76_STA_EVENT_DISASSOC:
+ for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
+ mt7915_mac_twt_teardown_flow(dev, msta, i);
+
+ mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_DISCONNECT, false);
+ msta->wcid.sta_disabled = 1;
+ msta->wcid.sta = 0;
+ return 0;
+ }
+
+ return 0;
}
void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -779,16 +806,10 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- int i;
-
- mt7915_mcu_add_sta(dev, vif, sta, false);
mt7915_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
- mt7915_mac_twt_teardown_flow(dev, msta, i);
-
spin_lock_bh(&mdev->sta_poll_lock);
if (!list_empty(&msta->wcid.poll_list))
list_del_init(&msta->wcid.poll_list);
@@ -896,22 +917,6 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static int
-mt7915_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
- IEEE80211_STA_NONE);
-}
-
-static int
-mt7915_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
- IEEE80211_STA_NOTEXIST);
-}
-
-static int
mt7915_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -1089,8 +1094,7 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
struct rate_info *txrate = &msta->wcid.rate;
struct rate_info rxrate = {};
- if (is_mt7915(&phy->dev->mt76) &&
- !mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
+ if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
sinfo->rxrate = rxrate;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
@@ -1164,6 +1168,10 @@ static void mt7915_sta_rc_update(struct ieee80211_hw *hw,
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_dev *dev = phy->dev;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ if (!msta->wcid.sta)
+ return;
mt7915_sta_rc_work(&changed, sta);
ieee80211_queue_work(hw, &dev->rc_work);
@@ -1207,6 +1215,9 @@ static void mt7915_sta_set_4addr(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
+ if (!msta->wcid.sta)
+ return;
+
mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
@@ -1223,6 +1234,9 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ if (!msta->wcid.sta)
+ return;
+
mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
@@ -1579,6 +1593,12 @@ mt7915_twt_teardown_request(struct ieee80211_hw *hw,
}
static int
+mt7915_set_frag_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ return 0;
+}
+
+static int
mt7915_set_radar_background(struct ieee80211_hw *hw,
struct cfg80211_chan_def *chandef)
{
@@ -1660,6 +1680,17 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
}
#endif
+static void
+mt7915_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+
+ ieee80211_wake_queues(hw);
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ MT7915_WATCHDOG_TIME);
+}
+
const struct ieee80211_ops mt7915_ops = {
.add_chanctx = ieee80211_emulate_add_chanctx,
.remove_chanctx = ieee80211_emulate_remove_chanctx,
@@ -1676,8 +1707,7 @@ const struct ieee80211_ops mt7915_ops = {
.bss_info_changed = mt7915_bss_info_changed,
.start_ap = mt7915_start_ap,
.stop_ap = mt7915_stop_ap,
- .sta_add = mt7915_sta_add,
- .sta_remove = mt7915_sta_remove,
+ .sta_state = mt76_sta_state,
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.sta_rc_update = mt7915_sta_rc_update,
.set_key = mt7915_set_key,
@@ -1708,6 +1738,7 @@ const struct ieee80211_ops mt7915_ops = {
.sta_set_decap_offload = mt7915_sta_set_decap_offload,
.add_twt_setup = mt7915_mac_add_twt_setup,
.twt_teardown_request = mt7915_twt_teardown_request,
+ .set_frag_threshold = mt7915_set_frag_threshold,
CFG80211_TESTMODE_CMD(mt76_testmode_cmd)
CFG80211_TESTMODE_DUMP(mt76_testmode_dump)
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -1718,4 +1749,5 @@ const struct ieee80211_ops mt7915_ops = {
.net_fill_forward_path = mt7915_net_fill_forward_path,
.net_setup_tc = mt76_wed_net_setup_tc,
#endif
+ .reconfig_complete = mt7915_reconfig_complete,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 2185cd24e2e1..87d0dd040001 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -157,12 +157,21 @@ static int
mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt76_connac2_mcu_rxd *rxd;
int ret = 0;
if (!skb) {
dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
cmd, seq);
+
+ if (!test_and_set_bit(MT76_MCU_RESET, &dev->mphy.state)) {
+ dev->recovery.restart = true;
+ wake_up(&dev->mt76.mcu.wait);
+ queue_work(dev->mt76.wq, &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+
return -ETIMEDOUT;
}
@@ -191,11 +200,6 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
enum mt76_mcuq_id qid;
- int ret;
-
- ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, wait_seq);
- if (ret)
- return ret;
if (cmd == MCU_CMD(FW_SCATTER))
qid = MT_MCUQ_FWDL;
@@ -293,7 +297,7 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
&dev->rdd2_chandef,
GFP_ATOMIC);
else
- ieee80211_radar_detected(mphy->hw);
+ ieee80211_radar_detected(mphy->hw, NULL);
dev->hw_pattern++;
}
@@ -690,13 +694,17 @@ int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
{
struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
struct mt7915_vif *mvif = msta->vif;
+ int ret;
+ mt76_worker_disable(&dev->mt76.tx_worker);
if (enable && !params->amsdu)
msta->wcid.amsdu = false;
+ ret = mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, true);
+ mt76_worker_enable(&dev->mt76.tx_worker);
- return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
- MCU_EXT_CMD(STA_REC_UPDATE),
- enable, true);
+ return ret;
}
int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
@@ -1653,7 +1661,7 @@ mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
}
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
+ struct ieee80211_sta *sta, int conn_state, bool newly)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct ieee80211_link_sta *link_sta;
@@ -1670,13 +1678,10 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta, enable,
- !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx]));
- if (!enable)
- goto out;
-
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
+ conn_state, newly);
/* tag order is in accordance with firmware dependency. */
- if (sta) {
+ if (sta && conn_state != CONN_STATE_DISCONNECT) {
/* starec bfer */
mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta);
/* starec ht */
@@ -1687,12 +1692,17 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
mt76_connac_mcu_sta_uapsd(skb, vif, sta);
}
- ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
- if (ret) {
- dev_kfree_skb(skb);
- return ret;
+ if (newly || conn_state != CONN_STATE_DISCONNECT) {
+ ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
}
+ if (conn_state == CONN_STATE_DISCONNECT)
+ goto out;
+
if (sta) {
/* starec amsdu */
mt7915_mcu_sta_amsdu_tlv(dev, skb, vif, sta);
@@ -2352,6 +2362,8 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
if (ret)
return ret;
+ mt76_connac_mcu_del_wtbl_all(&dev->mt76);
+
if ((mtk_wed_device_active(&dev->mt76.mmio.wed) &&
is_mt7915(&dev->mt76)) ||
!mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
@@ -2376,7 +2388,9 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
int mt7915_mcu_init(struct mt7915_dev *dev)
{
static const struct mt76_mcu_ops mt7915_mcu_ops = {
+ .max_retry = 3,
.headroom = sizeof(struct mt76_connac2_mcu_txd),
+ .mcu_skb_prepare_msg = mt76_connac2_mcu_fill_message,
.mcu_skb_send_msg = mt7915_mcu_send_message,
.mcu_parse_response = mt7915_mcu_parse_response,
};
@@ -2747,7 +2761,7 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
+ else if (phy->mt76->offchannel ||
phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index b41ac4aaced7..49476a4182fd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -29,7 +29,7 @@ struct mt7915_mcu_thermal_ctrl {
} __packed;
struct mt7915_mcu_thermal_notify {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
struct mt7915_mcu_thermal_ctrl ctrl;
__le32 temperature;
@@ -37,7 +37,7 @@ struct mt7915_mcu_thermal_notify {
} __packed;
struct mt7915_mcu_csa_notify {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
u8 omac_idx;
u8 csa_count;
@@ -46,7 +46,7 @@ struct mt7915_mcu_csa_notify {
} __packed;
struct mt7915_mcu_bcc_notify {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
u8 band_idx;
u8 omac_idx;
@@ -55,7 +55,7 @@ struct mt7915_mcu_bcc_notify {
} __packed;
struct mt7915_mcu_rdd_report {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
u8 band_idx;
u8 long_detected;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index d6ecd698cdcd..44e112b8b5b3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -927,8 +927,10 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
.rx_check = mt7915_rx_check,
.rx_poll_complete = mt7915_rx_poll_complete,
.sta_add = mt7915_mac_sta_add,
+ .sta_event = mt7915_mac_sta_event,
.sta_remove = mt7915_mac_sta_remove,
.update_survey = mt7915_update_channel,
+ .set_channel = mt7915_set_channel,
};
struct mt7915_dev *dev;
struct mt76_dev *mdev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index a30d08eb0656..ac0b1f0eb27c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -444,7 +444,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, int enable);
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable);
+ struct ieee80211_sta *sta, int conn_state, bool newly);
int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
@@ -463,7 +463,7 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool changed);
int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int mt7915_set_channel(struct mt7915_phy *phy);
+int mt7915_set_channel(struct mt76_phy *mphy);
int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd);
int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif);
int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *req);
@@ -560,6 +560,8 @@ void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
void mt7915_mac_set_timing(struct mt7915_phy *phy);
int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7915_mac_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index 0d76ae31b376..d534fff5c952 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -404,6 +404,7 @@ static void
mt7915_tm_init(struct mt7915_phy *phy, bool en)
{
struct mt7915_dev *dev = phy->dev;
+ int state;
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
@@ -415,7 +416,8 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
mt7915_tm_set_trx(phy, TM_MAC_TXRX, !en);
mt7915_mcu_add_bss_info(phy, phy->monitor_vif, en);
- mt7915_mcu_add_sta(dev, phy->monitor_vif, NULL, en);
+ state = en ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
+ mt7915_mcu_add_sta(dev, phy->monitor_vif, NULL, state, true);
if (!en)
mt7915_tm_set_tam_arb(phy, en, 0);
@@ -425,7 +427,7 @@ static void
mt7915_tm_update_channel(struct mt7915_phy *phy)
{
mutex_unlock(&phy->dev->mt76.mutex);
- mt7915_set_channel(phy);
+ mt76_update_channel(phy->mt76);
mutex_lock(&phy->dev->mt76.mutex);
mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index ef0c721d26e3..d1d64fa7d35d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -52,6 +52,8 @@ static int mt7921_thermal_init(struct mt792x_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7921_%s",
wiphy_name(wiphy));
+ if (!name)
+ return -ENOMEM;
hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy,
mt7921_hwmon_groups);
@@ -83,7 +85,7 @@ mt7921_regd_channel_update(struct wiphy *wiphy, struct mt792x_dev *dev)
}
/* UNII-4 */
- if (IS_UNII_INVALID(0, 5850, 5925))
+ if (IS_UNII_INVALID(0, 5845, 5925))
ch->flags |= IEEE80211_CHAN_DISABLED;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 23b228804289..a7f5bfbc02ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -455,37 +455,30 @@ static int mt7921_cancel_remain_on_channel(struct ieee80211_hw *hw,
return mt7921_abort_roc(phy, mvif);
}
-static int mt7921_set_channel(struct mt792x_phy *phy)
+int mt7921_set_channel(struct mt76_phy *mphy)
{
+ struct mt792x_phy *phy = mphy->priv;
struct mt792x_dev *dev = phy->dev;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mt792x_mutex_acquire(dev);
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
-
+ mt76_connac_pm_wake(mphy, &dev->pm);
ret = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH));
if (ret)
goto out;
mt792x_mac_set_timeing(phy);
-
mt792x_mac_reset_counters(phy);
phy->noise = 0;
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
- mt792x_mutex_release(dev);
+ mt76_connac_power_save_sched(mphy, &dev->pm);
- mt76_worker_schedule(&dev->mt76.tx_worker);
- ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work,
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT792x_WATCHDOG_TIME);
return ret;
}
+EXPORT_SYMBOL_GPL(mt7921_set_channel);
static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
@@ -620,11 +613,9 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
int ret = 0;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- ret = mt7921_set_channel(phy);
+ ret = mt76_update_channel(phy->mt76);
if (ret)
return ret;
- ieee80211_wake_queues(hw);
}
mt792x_mutex_acquire(dev);
@@ -831,13 +822,16 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt7921_mac_sta_add);
-void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int mt7921_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ if (ev != MT76_STA_EVENT_ASSOC)
+ return 0;
+
mt792x_mutex_acquire(dev);
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
@@ -853,8 +847,10 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
mt792x_mutex_release(dev);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(mt7921_mac_sta_assoc);
+EXPORT_SYMBOL_GPL(mt7921_mac_sta_event);
void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 394fcd799345..02c1de8620a7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -890,7 +890,7 @@ int mt7921_mcu_set_chan_info(struct mt792x_phy *phy, int cmd)
if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ else if (phy->mt76->offchannel)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef,
NL80211_IFTYPE_AP))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 6c5392c5d207..16c89815c0b8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -186,6 +186,7 @@ int __mt7921_start(struct mt792x_phy *phy);
int mt7921_register_device(struct mt792x_dev *dev);
void mt7921_unregister_device(struct mt792x_dev *dev);
int mt7921_run_firmware(struct mt792x_dev *dev);
+int mt7921_set_channel(struct mt76_phy *mphy);
int mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
@@ -244,8 +245,8 @@ int mt7921_mac_init(struct mt792x_dev *dev);
bool mt7921_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask);
int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt7921_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7921_mac_reset_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index a7430216a80d..67723c22aea6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -244,9 +244,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.rx_skb = mt7921_queue_rx_skb,
.rx_poll_complete = mt792x_rx_poll_complete,
.sta_add = mt7921_mac_sta_add,
- .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_event = mt7921_mac_sta_event,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt792x_update_channel,
+ .set_channel = mt7921_set_channel,
};
static const struct mt792x_hif_ops mt7921_pcie_ops = {
.init_reset = mt7921e_init_reset,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 004d942ee11a..95f526f7bb99 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -100,9 +100,10 @@ static int mt7921s_probe(struct sdio_func *func,
.rx_skb = mt7921_queue_rx_skb,
.rx_check = mt7921_rx_check,
.sta_add = mt7921_mac_sta_add,
- .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_event = mt7921_mac_sta_event,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt792x_update_channel,
+ .set_channel = mt7921_set_channel,
};
static const struct mt76_bus_ops mt7921s_ops = {
.rr = mt76s_rr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index 8b7c03c47598..8aa4f0203208 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -151,9 +151,10 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
.rx_skb = mt7921_queue_rx_skb,
.rx_check = mt7921_rx_check,
.sta_add = mt7921_mac_sta_add,
- .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_event = mt7921_mac_sta_event,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt792x_update_channel,
+ .set_channel = mt7921_set_channel,
};
static const struct mt792x_hif_ops hif_ops = {
.mcu_init = mt7921u_mcu_init,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
index cf36750cf709..634c42bbf23f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
@@ -352,7 +352,7 @@ mt7925_mac_fill_rx_rate(struct mt792x_dev *dev,
static int
mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
{
- u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
bool hdr_trans, unicast, insert_ccmp_hdr = false;
u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
@@ -362,7 +362,6 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
struct mt792x_phy *phy = &dev->phy;
struct ieee80211_supported_band *sband;
u32 csum_status = *(u32 *)skb->cb;
- u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
@@ -420,7 +419,7 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
if (!sband->channels)
return -EINVAL;
- if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
+ if (mt76_is_mmio(&dev->mt76) && (rxd3 & csum_mask) == csum_mask &&
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 8c0768bf9343..791c8b00e112 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -439,6 +439,19 @@ static void mt7925_roc_iter(void *priv, u8 *mac,
mt7925_mcu_abort_roc(phy, &mvif->bss_conf, phy->roc_token_id);
}
+void mt7925_roc_abort_sync(struct mt792x_dev *dev)
+{
+ struct mt792x_phy *phy = &dev->phy;
+
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+ if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ ieee80211_iterate_interfaces(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7925_roc_iter, (void *)phy);
+}
+EXPORT_SYMBOL_GPL(mt7925_roc_abort_sync);
+
void mt7925_roc_work(struct work_struct *work)
{
struct mt792x_phy *phy;
@@ -1078,23 +1091,26 @@ static void mt7925_mac_link_sta_assoc(struct mt76_dev *mdev,
mt792x_mutex_release(dev);
}
-void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int mt7925_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
+ struct ieee80211_link_sta *link_sta = &sta->deflink;
+
+ if (ev != MT76_STA_EVENT_ASSOC)
+ return 0;
+
if (ieee80211_vif_is_mld(vif)) {
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- struct ieee80211_link_sta *link_sta;
link_sta = mt792x_sta_to_link_sta(vif, sta, msta->deflink_id);
-
mt7925_mac_set_links(mdev, vif);
-
- mt7925_mac_link_sta_assoc(mdev, vif, link_sta);
- } else {
- mt7925_mac_link_sta_assoc(mdev, vif, &sta->deflink);
}
+
+ mt7925_mac_link_sta_assoc(mdev, vif, link_sta);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(mt7925_mac_sta_assoc);
+EXPORT_SYMBOL_GPL(mt7925_mac_sta_event);
static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
struct ieee80211_vif *vif,
@@ -1109,6 +1125,8 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
mlink = mt792x_sta_to_link(msta, link_id);
+ mt7925_roc_abort_sync(dev);
+
mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid);
mt76_connac_pm_wake(&dev->mphy, &dev->pm);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index 9dc22fbe25d3..748ea6adbc6b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -638,6 +638,9 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
clc = (const struct mt7925_clc *)(clc_base + offset);
+ if (clc->idx > ARRAY_SIZE(phy->clc))
+ break;
+
/* do not init buf again if chip reset triggered */
if (phy->clc[clc->idx])
continue;
@@ -1770,16 +1773,19 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct sk_buff *skb;
+ int conn_state;
skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid,
MT7925_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
+ conn_state = info->enable ? CONN_STATE_PORT_SECURE :
+ CONN_STATE_DISCONNECT;
if (info->link_sta)
mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
info->link_sta,
- info->enable, info->newly);
+ conn_state, info->newly);
if (info->link_sta && info->enable) {
mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta);
mt7925_mcu_sta_ht_tlv(skb, info->link_sta);
@@ -2171,12 +2177,12 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_RLM, sizeof(*req));
req = (struct bss_rlm_tlv *)tlv;
- req->control_channel = chandef->chan->hw_value,
- req->center_chan = ieee80211_frequency_to_channel(freq1),
- req->center_chan2 = ieee80211_frequency_to_channel(freq2),
- req->tx_streams = hweight8(phy->antenna_mask),
- req->ht_op_info = 4, /* set HT 40M allowed */
- req->rx_streams = hweight8(phy->antenna_mask),
+ req->control_channel = chandef->chan->hw_value;
+ req->center_chan = ieee80211_frequency_to_channel(freq1);
+ req->center_chan2 = ieee80211_frequency_to_channel(freq2);
+ req->tx_streams = hweight8(phy->antenna_mask);
+ req->ht_op_info = 4; /* set HT 40M allowed */
+ req->rx_streams = hweight8(phy->antenna_mask);
req->band = band;
switch (chandef->width) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index 669f3a079d04..f5c02e5f5066 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -219,8 +219,8 @@ int mt7925_mac_init(struct mt792x_dev *dev);
int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
bool mt7925_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask);
-void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt7925_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7925_mac_reset_work(struct work_struct *work);
@@ -307,6 +307,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
enum mt7925_roc_req type, u8 token_id);
int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
u8 token_id);
+void mt7925_roc_abort_sync(struct mt792x_dev *dev);
int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq);
int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 6e4f4e78c350..9aec675450f2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -279,7 +279,7 @@ static int mt7925_pci_probe(struct pci_dev *pdev,
.rx_skb = mt7925_queue_rx_skb,
.rx_poll_complete = mt792x_rx_poll_complete,
.sta_add = mt7925_mac_sta_add,
- .sta_assoc = mt7925_mac_sta_assoc,
+ .sta_event = mt7925_mac_sta_event,
.sta_remove = mt7925_mac_sta_remove,
.update_survey = mt792x_update_channel,
};
@@ -449,6 +449,8 @@ static int mt7925_pci_suspend(struct device *device)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
+ mt7925_roc_abort_sync(dev);
+
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto restore_suspend;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
index 1e0f094fc905..682db1bab21c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
@@ -142,7 +142,7 @@ static int mt7925u_probe(struct usb_interface *usb_intf,
.rx_skb = mt7925_queue_rx_skb,
.rx_check = mt7925_rx_check,
.sta_add = mt7925_mac_sta_add,
- .sta_assoc = mt7925_mac_sta_assoc,
+ .sta_event = mt7925_mac_sta_event,
.sta_remove = mt7925_mac_sta_remove,
.update_survey = mt792x_update_channel,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index 7fa74d59cc48..ab12616ec2b8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -68,7 +68,7 @@ struct mt792x_fw_features {
enum {
MT792x_CLC_POWER,
- MT792x_CLC_CHAN,
+ MT792x_CLC_POWER_EXT,
MT792x_CLC_MAX_NUM,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 283df84f1b43..5e96973226bb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -42,6 +42,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
+ .beacon_int_min_gcd = 100,
}
};
@@ -941,8 +942,12 @@ void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy)
cap = &phy->mt76->sband_5g.sband.vht_cap.cap;
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
- IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
- FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, sts - 1);
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+ if (is_mt7996(phy->mt76->dev))
+ *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 3);
+ else
+ *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 4);
*cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
@@ -987,9 +992,15 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
elem->phy_cap_info[2] |= c;
- c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE;
+
+ if (is_mt7996(phy->mt76->dev))
+ c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ else
+ c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5;
+
elem->phy_cap_info[4] |= c;
/* do not support NG16 due to spec D4.0 changes subcarrier idx */
@@ -1011,8 +1022,6 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
return;
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
- if (vif == NL80211_IFTYPE_AP)
- elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1) |
@@ -1020,6 +1029,11 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
sts - 1);
elem->phy_cap_info[5] |= c;
+ if (vif != NL80211_IFTYPE_AP)
+ return;
+
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
@@ -1179,12 +1193,12 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
IEEE80211_EHT_MAC_CAP0_OM_CONTROL;
eht_cap_elem->phy_cap_info[0] =
- IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
- val = max_t(u8, sts - 1, 3);
+ /* Set the maximum capability regardless of the antenna configuration. */
+ val = is_mt7992(phy->mt76->dev) ? 4 : 3;
eht_cap_elem->phy_cap_info[0] |=
u8_encode_bits(u8_get_bits(val, BIT(0)),
IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
@@ -1193,30 +1207,36 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)),
IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
u8_encode_bits(val,
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) |
- u8_encode_bits(val,
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
eht_cap_elem->phy_cap_info[2] =
u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) |
- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK) |
- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK);
+ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK);
+
+ if (band == NL80211_BAND_6GHZ) {
+ eht_cap_elem->phy_cap_info[0] |=
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+
+ eht_cap_elem->phy_cap_info[1] |=
+ u8_encode_bits(val,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] |=
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK);
+ }
eht_cap_elem->phy_cap_info[3] =
IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
- IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK;
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK;
eht_cap_elem->phy_cap_info[4] =
u8_encode_bits(min_t(int, sts - 1, 2),
IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
eht_cap_elem->phy_cap_info[5] =
- IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US,
IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) |
u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)),
@@ -1230,14 +1250,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) |
u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK);
- eht_cap_elem->phy_cap_info[7] =
- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ;
-
val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) |
u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_TX);
#define SET_EHT_MAX_NSS(_bw, _val) do { \
@@ -1248,8 +1260,29 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
SET_EHT_MAX_NSS(80, val);
SET_EHT_MAX_NSS(160, val);
- SET_EHT_MAX_NSS(320, val);
+ if (band == NL80211_BAND_6GHZ)
+ SET_EHT_MAX_NSS(320, val);
#undef SET_EHT_MAX_NSS
+
+ if (iftype != NL80211_IFTYPE_AP)
+ return;
+
+ eht_cap_elem->phy_cap_info[3] |=
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK;
+
+ eht_cap_elem->phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ;
+
+ if (band != NL80211_BAND_6GHZ)
+ return;
+
+ eht_cap_elem->phy_cap_info[7] |=
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ;
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index bc7111a71f98..0d21414e2c88 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -435,7 +435,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
- u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
u32 csum_status = *(u32 *)skb->cb;
u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
@@ -497,7 +497,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
if (!sband->channels)
return -EINVAL;
- if ((rxd0 & csum_mask) == csum_mask &&
+ if ((rxd3 & csum_mask) == csum_mask &&
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -746,7 +746,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool multicast = is_multicast_ether_addr(hdr->addr1);
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- __le16 fc = hdr->frame_control;
+ __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
u8 fc_type, fc_stype;
u32 val;
@@ -780,6 +780,15 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
+ if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
+ else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
+ else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
+ else
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE);
+
txwi[2] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
@@ -789,7 +798,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
}
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(hdr->seq_ctrl);
+ u16 seqno = le16_to_cpu(sc);
if (ieee80211_is_back_req(hdr->frame_control)) {
struct ieee80211_bar *bar;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index bce082038219..39f071ece35e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -206,7 +206,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
mvif->mt76.omac_idx = idx;
mvif->phy = phy;
mvif->mt76.band_idx = band_idx;
- mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
+ mvif->mt76.wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
ret = mt7996_mcu_add_dev_info(phy, vif, true);
if (ret)
@@ -291,22 +291,19 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
}
-int mt7996_set_channel(struct mt7996_phy *phy)
+int mt7996_set_channel(struct mt76_phy *mphy)
{
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_phy *phy = mphy->priv;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
-
ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_SWITCH);
if (ret)
goto out;
+ ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_RX_PATH);
+ if (ret)
+ goto out;
+
ret = mt7996_dfs_init_radar_detector(phy);
mt7996_mac_cca_stats_reset(phy);
@@ -314,13 +311,7 @@ int mt7996_set_channel(struct mt7996_phy *phy)
phy->noise = 0;
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
- mutex_unlock(&dev->mt76.mutex);
-
- mt76_txq_schedule_all(phy->mt76);
-
- ieee80211_queue_delayed_work(phy->mt76->hw,
- &phy->mt76->mac_work,
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7996_WATCHDOG_TIME);
return ret;
@@ -360,14 +351,14 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_SMS4:
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- wcid_keyidx = &wcid->hw_key_idx2;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
- fallthrough;
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- if (key->keyidx == 6 || key->keyidx == 7)
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ wcid_keyidx = &wcid->hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
break;
+ }
fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -411,11 +402,9 @@ static int mt7996_config(struct ieee80211_hw *hw, u32 changed)
int ret;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- ret = mt7996_set_channel(phy);
+ ret = mt76_update_channel(phy->mt76);
if (ret)
return ret;
- ieee80211_wake_queues(hw);
}
if (changed & (IEEE80211_CONF_CHANGE_POWER |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 2e4fa9f48dfb..6c445a9dbc03 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -371,7 +371,7 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
&dev->rdd2_chandef,
GFP_ATOMIC);
else
- ieee80211_radar_detected(mphy->hw);
+ ieee80211_radar_detected(mphy->hw, NULL);
dev->hw_pattern++;
}
@@ -735,7 +735,7 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb)
static struct tlv *
mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len)
{
- struct tlv *ptlv = skb_put(skb, len);
+ struct tlv *ptlv = skb_put_zero(skb, len);
ptlv->tag = cpu_to_le16(tag);
ptlv->len = cpu_to_le16(len);
@@ -822,7 +822,7 @@ mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_uni_mbssid *mbssid;
struct tlv *tlv;
- if (!vif->bss_conf.bssid_indicator)
+ if (!vif->bss_conf.bssid_indicator && enable)
return;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_11V_MBSSID, sizeof(*mbssid));
@@ -1429,10 +1429,10 @@ mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
if (bfee)
return vif->bss_conf.eht_su_beamformee &&
- EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
+ EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
else
return vif->bss_conf.eht_su_beamformer &&
- EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
+ EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
}
if (sta->deflink.he_cap.has_he) {
@@ -1544,6 +1544,9 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map);
u8 snd_dim, sts;
+ if (!vc)
+ return;
+
bf->tx_mode = MT_PHY_TYPE_HE_SU;
mt7996_mcu_sta_sounding_rate(bf);
@@ -1653,7 +1656,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_phy *phy = mvif->phy;
- int tx_ant = hweight8(phy->mt76->chainmask) - 1;
+ int tx_ant = hweight16(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
static const u8 matrix[4][4] = {
@@ -2160,6 +2163,7 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta;
struct mt7996_sta *msta;
struct sk_buff *skb;
+ int conn_state;
int ret;
msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
@@ -2172,8 +2176,9 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
+ conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
- enable, newly);
+ conn_state, newly);
if (!enable)
goto out;
@@ -3460,7 +3465,7 @@ int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag)
if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
+ else if (phy->mt76->offchannel ||
phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
@@ -3923,8 +3928,9 @@ int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action)
tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_mod_en));
req_mod_en = (struct bf_mod_en_ctrl *)tlv;
- req_mod_en->bf_num = 3;
- req_mod_en->bf_bitmap = GENMASK(2, 0);
+ req_mod_en->bf_num = mt7996_band_valid(dev, MT_BAND2) ? 3 : 2;
+ req_mod_en->bf_bitmap = mt7996_band_valid(dev, MT_BAND2) ?
+ GENMASK(2, 0) : GENMASK(1, 0);
break;
}
default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 928a9663b49e..40e45fb2b626 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -620,6 +620,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
.sta_add = mt7996_mac_sta_add,
.sta_remove = mt7996_mac_sta_remove,
.update_survey = mt7996_update_channel,
+ .set_channel = mt7996_set_channel,
};
struct mt7996_dev *dev;
struct mt76_dev *mdev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 177cfff31120..ab8c9070630b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -468,7 +468,7 @@ int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_he_obss_pd *he_obss_pd);
int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool changed);
-int mt7996_set_channel(struct mt7996_phy *phy);
+int mt7996_set_channel(struct mt76_phy *mphy);
int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag);
int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif);
int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 5cf6edee4d13..ce193e625666 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -313,6 +313,9 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
return idx;
wcid = (struct mt76_wcid *)sta->drv_priv;
+ if (!wcid->sta)
+ return idx;
+
q->entry[idx].wcid = wcid->idx;
if (!non_aql)
@@ -330,6 +333,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct sk_buff_head *head;
if (mt76_testmode_enabled(phy)) {
ieee80211_free_txskb(phy->hw, skb);
@@ -345,9 +349,15 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
- spin_lock_bh(&wcid->tx_pending.lock);
- __skb_queue_tail(&wcid->tx_pending, skb);
- spin_unlock_bh(&wcid->tx_pending.lock);
+ if ((info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
+ (info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK))
+ head = &wcid->tx_offchannel;
+ else
+ head = &wcid->tx_pending;
+
+ spin_lock_bh(&head->lock);
+ __skb_queue_tail(head, skb);
+ spin_unlock_bh(&head->lock);
spin_lock_bh(&phy->tx_lock);
if (list_empty(&wcid->tx_list))
@@ -478,7 +488,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
return idx;
do {
- if (test_bit(MT76_RESET, &phy->state))
+ if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
return -EBUSY;
if (stop || mt76_txq_stopped(q))
@@ -522,7 +532,7 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
while (1) {
int n_frames = 0;
- if (test_bit(MT76_RESET, &phy->state))
+ if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
return -EBUSY;
if (dev->queue_ops->tx_cleanup &&
@@ -568,7 +578,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
{
int len;
- if (qid >= 4)
+ if (qid >= 4 || phy->offchannel)
return;
local_bh_disable();
@@ -586,7 +596,8 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
static int
-mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
+mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid,
+ struct sk_buff_head *head)
{
struct mt76_dev *dev = phy->dev;
struct ieee80211_sta *sta;
@@ -594,8 +605,8 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
struct sk_buff *skb;
int ret = 0;
- spin_lock(&wcid->tx_pending.lock);
- while ((skb = skb_peek(&wcid->tx_pending)) != NULL) {
+ spin_lock(&head->lock);
+ while ((skb = skb_peek(head)) != NULL) {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int qid = skb_get_queue_mapping(skb);
@@ -607,13 +618,13 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
qid = MT_TXQ_PSD;
q = phy->q_tx[qid];
- if (mt76_txq_stopped(q)) {
+ if (mt76_txq_stopped(q) || test_bit(MT76_RESET, &phy->state)) {
ret = -1;
break;
}
- __skb_unlink(skb, &wcid->tx_pending);
- spin_unlock(&wcid->tx_pending.lock);
+ __skb_unlink(skb, head);
+ spin_unlock(&head->lock);
sta = wcid_to_sta(wcid);
spin_lock(&q->lock);
@@ -621,15 +632,17 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
dev->queue_ops->kick(dev, q);
spin_unlock(&q->lock);
- spin_lock(&wcid->tx_pending.lock);
+ spin_lock(&head->lock);
}
- spin_unlock(&wcid->tx_pending.lock);
+ spin_unlock(&head->lock);
return ret;
}
static void mt76_txq_schedule_pending(struct mt76_phy *phy)
{
+ LIST_HEAD(tx_list);
+
if (list_empty(&phy->tx_list))
return;
@@ -637,22 +650,27 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
rcu_read_lock();
spin_lock(&phy->tx_lock);
- while (!list_empty(&phy->tx_list)) {
- struct mt76_wcid *wcid = NULL;
+ list_splice_init(&phy->tx_list, &tx_list);
+ while (!list_empty(&tx_list)) {
+ struct mt76_wcid *wcid;
int ret;
- wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list);
+ wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list);
list_del_init(&wcid->tx_list);
spin_unlock(&phy->tx_lock);
- ret = mt76_txq_schedule_pending_wcid(phy, wcid);
+ ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel);
+ if (ret >= 0 && !phy->offchannel)
+ ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending);
spin_lock(&phy->tx_lock);
- if (ret) {
- if (list_empty(&wcid->tx_list))
- list_add_tail(&wcid->tx_list, &phy->tx_list);
+ if (!skb_queue_empty(&wcid->tx_pending) &&
+ !skb_queue_empty(&wcid->tx_offchannel) &&
+ list_empty(&wcid->tx_list))
+ list_add_tail(&wcid->tx_list, &phy->tx_list);
+
+ if (ret < 0)
break;
- }
}
spin_unlock(&phy->tx_lock);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index 3c48e1a57b24..bba53307b960 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -384,6 +384,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct wilc_join_bss_param *param;
u8 rates_len = 0;
int ies_len;
+ u64 ies_tsf;
int ret;
param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -399,6 +400,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
return NULL;
}
ies_len = ies->len;
+ ies_tsf = ies->tsf;
rcu_read_unlock();
param->beacon_period = cpu_to_le16(bss->beacon_interval);
@@ -454,7 +456,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *)&noa_attr, sizeof(noa_attr));
if (ret > 0) {
- param->tsf_lo = cpu_to_le32(ies->tsf);
+ param->tsf_lo = cpu_to_le32(ies_tsf);
param->noa_enabled = 1;
param->idx = noa_attr.index;
if (noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) {
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 0043f7a0fdf9..b4da05d5a498 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -174,19 +174,18 @@ static int wilc_sdio_probe(struct sdio_func *func,
wilc->bus_data = sdio_priv;
wilc->dev = &func->dev;
- wilc->rtc_clk = devm_clk_get_optional(&func->card->dev, "rtc");
+ wilc->rtc_clk = devm_clk_get_optional_enabled(&func->card->dev, "rtc");
if (IS_ERR(wilc->rtc_clk)) {
ret = PTR_ERR(wilc->rtc_clk);
goto dispose_irq;
}
- clk_prepare_enable(wilc->rtc_clk);
wilc_sdio_init(wilc, false);
ret = wilc_load_mac_from_nv(wilc);
if (ret) {
pr_err("Can not retrieve MAC address from chip\n");
- goto clk_disable_unprepare;
+ goto dispose_irq;
}
wilc_sdio_deinit(wilc);
@@ -195,14 +194,12 @@ static int wilc_sdio_probe(struct sdio_func *func,
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto clk_disable_unprepare;
+ goto dispose_irq;
}
dev_info(&func->dev, "Driver Initializing success\n");
return 0;
-clk_disable_unprepare:
- clk_disable_unprepare(wilc->rtc_clk);
dispose_irq:
irq_dispose_mapping(wilc->dev_irq_num);
wilc_netdev_cleanup(wilc);
@@ -217,7 +214,6 @@ static void wilc_sdio_remove(struct sdio_func *func)
struct wilc *wilc = sdio_get_drvdata(func);
struct wilc_sdio *sdio_priv = wilc->bus_data;
- clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
@@ -977,6 +973,9 @@ static int wilc_sdio_suspend(struct device *dev)
dev_info(dev, "sdio suspend\n");
+ if (!wilc->initialized)
+ return 0;
+
if (!IS_ERR(wilc->rtc_clk))
clk_disable_unprepare(wilc->rtc_clk);
@@ -999,6 +998,13 @@ static int wilc_sdio_resume(struct device *dev)
struct wilc *wilc = sdio_get_drvdata(func);
dev_info(dev, "sdio resume\n");
+
+ if (!wilc->initialized)
+ return 0;
+
+ if (!IS_ERR(wilc->rtc_clk))
+ clk_prepare_enable(wilc->rtc_clk);
+
wilc_sdio_init(wilc, true);
wilc_sdio_enable_interrupt(wilc);
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 5ff940c53ad9..05b577b1068e 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -228,12 +228,11 @@ static int wilc_bus_probe(struct spi_device *spi)
if (ret < 0)
goto netdev_cleanup;
- wilc->rtc_clk = devm_clk_get_optional(&spi->dev, "rtc");
+ wilc->rtc_clk = devm_clk_get_optional_enabled(&spi->dev, "rtc");
if (IS_ERR(wilc->rtc_clk)) {
ret = PTR_ERR(wilc->rtc_clk);
goto netdev_cleanup;
}
- clk_prepare_enable(wilc->rtc_clk);
dev_info(&spi->dev, "Selected CRC config: crc7=%s, crc16=%s\n",
enable_crc7 ? "on" : "off", enable_crc16 ? "on" : "off");
@@ -266,7 +265,6 @@ static int wilc_bus_probe(struct spi_device *spi)
return 0;
power_down:
- clk_disable_unprepare(wilc->rtc_clk);
wilc_wlan_power(wilc, false);
netdev_cleanup:
wilc_netdev_cleanup(wilc);
@@ -280,7 +278,6 @@ static void wilc_bus_remove(struct spi_device *spi)
struct wilc *wilc = spi_get_drvdata(spi);
struct wilc_spi *spi_priv = wilc->bus_data;
- clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
kfree(spi_priv);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 663d77770fce..8b97accf6638 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -837,7 +837,7 @@ static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev,
static int qtnf_start_radar_detection(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
int ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 76b07db284f8..71840f41b73c 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -520,21 +520,21 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif,
cfg80211_radar_event(wiphy, &chandef, GFP_KERNEL);
break;
case QLINK_RADAR_CAC_FINISHED:
- if (!vif->wdev.cac_started)
+ if (!vif->wdev.links[0].cac_started)
break;
cfg80211_cac_event(vif->netdev, &chandef,
- NL80211_RADAR_CAC_FINISHED, GFP_KERNEL);
+ NL80211_RADAR_CAC_FINISHED, GFP_KERNEL, 0);
break;
case QLINK_RADAR_CAC_ABORTED:
- if (!vif->wdev.cac_started)
+ if (!vif->wdev.links[0].cac_started)
break;
cfg80211_cac_event(vif->netdev, &chandef,
- NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
+ NL80211_RADAR_CAC_ABORTED, GFP_KERNEL, 0);
break;
case QLINK_RADAR_CAC_STARTED:
- if (vif->wdev.cac_started)
+ if (vif->wdev.links[0].cac_started)
break;
if (!wiphy_ext_feature_isset(wiphy,
@@ -542,7 +542,7 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif,
break;
cfg80211_cac_event(vif->netdev, &chandef,
- NL80211_RADAR_CAC_STARTED, GFP_KERNEL);
+ NL80211_RADAR_CAC_STARTED, GFP_KERNEL, 0);
break;
default:
pr_warn("%s: unhandled radar event %u\n",
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
index 44ad94757a03..14d0343368ac 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
@@ -20,9 +20,8 @@ config RTL8XXXU
memory footprint than the vendor drivers and benefits
from the in kernel mac80211 stack.
- It can coexist with drivers from drivers/staging/rtl8723au,
- drivers/staging/rtl8192u, and drivers/net/wireless/rtlwifi,
- but you will need to control which module you wish to load.
+ It can coexist with the rtlwifi driver but you will need
+ to control which module you wish to load.
To compile this driver as a module, choose M here: the module will
be called rtl8xxxu. If unsure, say N.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index 043fa364e701..7891c988dd5f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -8110,6 +8110,12 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817f, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x819a, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8754, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817c, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
/* Tested by Larry Finger */
{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7811, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig
index 22838ede03cd..02b0d698413b 100644
--- a/drivers/net/wireless/realtek/rtw88/Kconfig
+++ b/drivers/net/wireless/realtek/rtw88/Kconfig
@@ -12,6 +12,7 @@ if RTW88
config RTW88_CORE
tristate
+ select WANT_DEV_COREDUMP
config RTW88_PCI
tristate
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index de3332eb7a22..a99776af56c2 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -2194,7 +2194,6 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 table_case, tdma_case;
- bool wl_cpt_test = false, bt_cpt_test = false;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2202,29 +2201,16 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
if (efuse->share_ant) {
/* Shared-Ant */
- if (wl_cpt_test) {
- if (coex_stat->wl_gl_busy) {
- table_case = 20;
- tdma_case = 17;
- } else {
- table_case = 10;
- tdma_case = 15;
- }
- } else if (bt_cpt_test) {
- table_case = 26;
- tdma_case = 26;
- } else {
- if (coex_stat->wl_gl_busy &&
- coex_stat->wl_noisy_level == 0)
- table_case = 14;
- else
- table_case = 10;
+ if (coex_stat->wl_gl_busy &&
+ coex_stat->wl_noisy_level == 0)
+ table_case = 14;
+ else
+ table_case = 10;
- if (coex_stat->wl_gl_busy)
- tdma_case = 15;
- else
- tdma_case = 20;
- }
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 15;
+ else
+ tdma_case = 20;
} else {
/* Non-Shared-Ant */
table_case = 112;
@@ -2235,11 +2221,7 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
tdma_case = 120;
}
- if (wl_cpt_test)
- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[1]);
- else
- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
-
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
rtw_coex_table(rtwdev, false, table_case);
rtw_coex_tdma(rtwdev, false, tdma_case);
}
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 5b2036798159..c26a6905fd15 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -43,6 +43,62 @@ struct rtw_debugfs_priv {
};
};
+struct rtw_debugfs {
+ struct rtw_debugfs_priv mac_0;
+ struct rtw_debugfs_priv mac_1;
+ struct rtw_debugfs_priv mac_2;
+ struct rtw_debugfs_priv mac_3;
+ struct rtw_debugfs_priv mac_4;
+ struct rtw_debugfs_priv mac_5;
+ struct rtw_debugfs_priv mac_6;
+ struct rtw_debugfs_priv mac_7;
+ struct rtw_debugfs_priv mac_10;
+ struct rtw_debugfs_priv mac_11;
+ struct rtw_debugfs_priv mac_12;
+ struct rtw_debugfs_priv mac_13;
+ struct rtw_debugfs_priv mac_14;
+ struct rtw_debugfs_priv mac_15;
+ struct rtw_debugfs_priv mac_16;
+ struct rtw_debugfs_priv mac_17;
+ struct rtw_debugfs_priv bb_8;
+ struct rtw_debugfs_priv bb_9;
+ struct rtw_debugfs_priv bb_a;
+ struct rtw_debugfs_priv bb_b;
+ struct rtw_debugfs_priv bb_c;
+ struct rtw_debugfs_priv bb_d;
+ struct rtw_debugfs_priv bb_e;
+ struct rtw_debugfs_priv bb_f;
+ struct rtw_debugfs_priv bb_18;
+ struct rtw_debugfs_priv bb_19;
+ struct rtw_debugfs_priv bb_1a;
+ struct rtw_debugfs_priv bb_1b;
+ struct rtw_debugfs_priv bb_1c;
+ struct rtw_debugfs_priv bb_1d;
+ struct rtw_debugfs_priv bb_1e;
+ struct rtw_debugfs_priv bb_1f;
+ struct rtw_debugfs_priv bb_2c;
+ struct rtw_debugfs_priv bb_2d;
+ struct rtw_debugfs_priv bb_40;
+ struct rtw_debugfs_priv bb_41;
+ struct rtw_debugfs_priv rf_dump;
+ struct rtw_debugfs_priv tx_pwr_tbl;
+ struct rtw_debugfs_priv write_reg;
+ struct rtw_debugfs_priv h2c;
+ struct rtw_debugfs_priv rf_write;
+ struct rtw_debugfs_priv rf_read;
+ struct rtw_debugfs_priv read_reg;
+ struct rtw_debugfs_priv fix_rate;
+ struct rtw_debugfs_priv dump_cam;
+ struct rtw_debugfs_priv rsvd_page;
+ struct rtw_debugfs_priv phy_info;
+ struct rtw_debugfs_priv coex_enable;
+ struct rtw_debugfs_priv coex_info;
+ struct rtw_debugfs_priv edcca_enable;
+ struct rtw_debugfs_priv fw_crash;
+ struct rtw_debugfs_priv force_lowest_basic_rate;
+ struct rtw_debugfs_priv dm_cap;
+};
+
static const char * const rtw_dm_cap_strs[] = {
[RTW_DM_CAP_NA] = "NA",
[RTW_DM_CAP_TXGAPK] = "TXGAPK",
@@ -524,7 +580,7 @@ static int rtw_debug_get_bb_page(struct seq_file *m, void *v)
return 0;
}
-static int rtw_debug_get_rf_dump(struct seq_file *m, void *v)
+static int rtw_debugfs_get_rf_dump(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
@@ -1074,139 +1130,102 @@ static int rtw_debugfs_get_dm_cap(struct seq_file *m, void *v)
return 0;
}
-#define rtw_debug_impl_mac(page, addr) \
-static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \
+#define rtw_debug_priv_mac(addr) \
+{ \
.cb_read = rtw_debug_get_mac_page, \
.cb_data = addr, \
}
-rtw_debug_impl_mac(0, 0x0000);
-rtw_debug_impl_mac(1, 0x0100);
-rtw_debug_impl_mac(2, 0x0200);
-rtw_debug_impl_mac(3, 0x0300);
-rtw_debug_impl_mac(4, 0x0400);
-rtw_debug_impl_mac(5, 0x0500);
-rtw_debug_impl_mac(6, 0x0600);
-rtw_debug_impl_mac(7, 0x0700);
-rtw_debug_impl_mac(10, 0x1000);
-rtw_debug_impl_mac(11, 0x1100);
-rtw_debug_impl_mac(12, 0x1200);
-rtw_debug_impl_mac(13, 0x1300);
-rtw_debug_impl_mac(14, 0x1400);
-rtw_debug_impl_mac(15, 0x1500);
-rtw_debug_impl_mac(16, 0x1600);
-rtw_debug_impl_mac(17, 0x1700);
-
-#define rtw_debug_impl_bb(page, addr) \
-static struct rtw_debugfs_priv rtw_debug_priv_bb_ ##page = { \
+#define rtw_debug_priv_bb(addr) \
+{ \
.cb_read = rtw_debug_get_bb_page, \
.cb_data = addr, \
}
-rtw_debug_impl_bb(8, 0x0800);
-rtw_debug_impl_bb(9, 0x0900);
-rtw_debug_impl_bb(a, 0x0a00);
-rtw_debug_impl_bb(b, 0x0b00);
-rtw_debug_impl_bb(c, 0x0c00);
-rtw_debug_impl_bb(d, 0x0d00);
-rtw_debug_impl_bb(e, 0x0e00);
-rtw_debug_impl_bb(f, 0x0f00);
-rtw_debug_impl_bb(18, 0x1800);
-rtw_debug_impl_bb(19, 0x1900);
-rtw_debug_impl_bb(1a, 0x1a00);
-rtw_debug_impl_bb(1b, 0x1b00);
-rtw_debug_impl_bb(1c, 0x1c00);
-rtw_debug_impl_bb(1d, 0x1d00);
-rtw_debug_impl_bb(1e, 0x1e00);
-rtw_debug_impl_bb(1f, 0x1f00);
-rtw_debug_impl_bb(2c, 0x2c00);
-rtw_debug_impl_bb(2d, 0x2d00);
-rtw_debug_impl_bb(40, 0x4000);
-rtw_debug_impl_bb(41, 0x4100);
-
-static struct rtw_debugfs_priv rtw_debug_priv_rf_dump = {
- .cb_read = rtw_debug_get_rf_dump,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_tx_pwr_tbl = {
- .cb_read = rtw_debugfs_get_tx_pwr_tbl,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_write_reg = {
- .cb_write = rtw_debugfs_set_write_reg,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_h2c = {
- .cb_write = rtw_debugfs_set_h2c,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_rf_write = {
- .cb_write = rtw_debugfs_set_rf_write,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_rf_read = {
- .cb_write = rtw_debugfs_set_rf_read,
- .cb_read = rtw_debugfs_get_rf_read,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_read_reg = {
- .cb_write = rtw_debugfs_set_read_reg,
- .cb_read = rtw_debugfs_get_read_reg,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_fix_rate = {
- .cb_write = rtw_debugfs_set_fix_rate,
- .cb_read = rtw_debugfs_get_fix_rate,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_dump_cam = {
- .cb_write = rtw_debugfs_set_single_input,
- .cb_read = rtw_debugfs_get_dump_cam,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_rsvd_page = {
- .cb_write = rtw_debugfs_set_rsvd_page,
- .cb_read = rtw_debugfs_get_rsvd_page,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_phy_info = {
- .cb_read = rtw_debugfs_get_phy_info,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_coex_enable = {
- .cb_write = rtw_debugfs_set_coex_enable,
- .cb_read = rtw_debugfs_get_coex_enable,
-};
-
-static struct rtw_debugfs_priv rtw_debug_priv_coex_info = {
- .cb_read = rtw_debugfs_get_coex_info,
-};
+#define rtw_debug_priv_get(name) \
+{ \
+ .cb_read = rtw_debugfs_get_ ##name, \
+}
-static struct rtw_debugfs_priv rtw_debug_priv_edcca_enable = {
- .cb_write = rtw_debugfs_set_edcca_enable,
- .cb_read = rtw_debugfs_get_edcca_enable,
-};
+#define rtw_debug_priv_set(name) \
+{ \
+ .cb_write = rtw_debugfs_set_ ##name, \
+}
-static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = {
- .cb_write = rtw_debugfs_set_fw_crash,
- .cb_read = rtw_debugfs_get_fw_crash,
-};
+#define rtw_debug_priv_set_and_get(name) \
+{ \
+ .cb_write = rtw_debugfs_set_ ##name, \
+ .cb_read = rtw_debugfs_get_ ##name, \
+}
-static struct rtw_debugfs_priv rtw_debug_priv_force_lowest_basic_rate = {
- .cb_write = rtw_debugfs_set_force_lowest_basic_rate,
- .cb_read = rtw_debugfs_get_force_lowest_basic_rate,
-};
+#define rtw_debug_priv_set_single_and_get(name) \
+{ \
+ .cb_write = rtw_debugfs_set_single_input, \
+ .cb_read = rtw_debugfs_get_ ##name, \
+}
-static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = {
- .cb_write = rtw_debugfs_set_dm_cap,
- .cb_read = rtw_debugfs_get_dm_cap,
+static const struct rtw_debugfs rtw_debugfs_templ = {
+ .mac_0 = rtw_debug_priv_mac(0x0000),
+ .mac_1 = rtw_debug_priv_mac(0x0100),
+ .mac_2 = rtw_debug_priv_mac(0x0200),
+ .mac_3 = rtw_debug_priv_mac(0x0300),
+ .mac_4 = rtw_debug_priv_mac(0x0400),
+ .mac_5 = rtw_debug_priv_mac(0x0500),
+ .mac_6 = rtw_debug_priv_mac(0x0600),
+ .mac_7 = rtw_debug_priv_mac(0x0700),
+ .mac_10 = rtw_debug_priv_mac(0x1000),
+ .mac_11 = rtw_debug_priv_mac(0x1100),
+ .mac_12 = rtw_debug_priv_mac(0x1200),
+ .mac_13 = rtw_debug_priv_mac(0x1300),
+ .mac_14 = rtw_debug_priv_mac(0x1400),
+ .mac_15 = rtw_debug_priv_mac(0x1500),
+ .mac_16 = rtw_debug_priv_mac(0x1600),
+ .mac_17 = rtw_debug_priv_mac(0x1700),
+ .bb_8 = rtw_debug_priv_bb(0x0800),
+ .bb_9 = rtw_debug_priv_bb(0x0900),
+ .bb_a = rtw_debug_priv_bb(0x0a00),
+ .bb_b = rtw_debug_priv_bb(0x0b00),
+ .bb_c = rtw_debug_priv_bb(0x0c00),
+ .bb_d = rtw_debug_priv_bb(0x0d00),
+ .bb_e = rtw_debug_priv_bb(0x0e00),
+ .bb_f = rtw_debug_priv_bb(0x0f00),
+ .bb_18 = rtw_debug_priv_bb(0x1800),
+ .bb_19 = rtw_debug_priv_bb(0x1900),
+ .bb_1a = rtw_debug_priv_bb(0x1a00),
+ .bb_1b = rtw_debug_priv_bb(0x1b00),
+ .bb_1c = rtw_debug_priv_bb(0x1c00),
+ .bb_1d = rtw_debug_priv_bb(0x1d00),
+ .bb_1e = rtw_debug_priv_bb(0x1e00),
+ .bb_1f = rtw_debug_priv_bb(0x1f00),
+ .bb_2c = rtw_debug_priv_bb(0x2c00),
+ .bb_2d = rtw_debug_priv_bb(0x2d00),
+ .bb_40 = rtw_debug_priv_bb(0x4000),
+ .bb_41 = rtw_debug_priv_bb(0x4100),
+ .rf_dump = rtw_debug_priv_get(rf_dump),
+ .tx_pwr_tbl = rtw_debug_priv_get(tx_pwr_tbl),
+ .write_reg = rtw_debug_priv_set(write_reg),
+ .h2c = rtw_debug_priv_set(h2c),
+ .rf_write = rtw_debug_priv_set(rf_write),
+ .rf_read = rtw_debug_priv_set_and_get(rf_read),
+ .read_reg = rtw_debug_priv_set_and_get(read_reg),
+ .fix_rate = rtw_debug_priv_set_and_get(fix_rate),
+ .dump_cam = rtw_debug_priv_set_single_and_get(dump_cam),
+ .rsvd_page = rtw_debug_priv_set_and_get(rsvd_page),
+ .phy_info = rtw_debug_priv_get(phy_info),
+ .coex_enable = rtw_debug_priv_set_and_get(coex_enable),
+ .coex_info = rtw_debug_priv_get(coex_info),
+ .edcca_enable = rtw_debug_priv_set_and_get(edcca_enable),
+ .fw_crash = rtw_debug_priv_set_and_get(fw_crash),
+ .force_lowest_basic_rate = rtw_debug_priv_set_and_get(force_lowest_basic_rate),
+ .dm_cap = rtw_debug_priv_set_and_get(dm_cap),
};
#define rtw_debugfs_add_core(name, mode, fopname, parent) \
do { \
- rtw_debug_priv_ ##name.rtwdev = rtwdev; \
+ struct rtw_debugfs_priv *priv = &rtwdev->debugfs->name; \
+ priv->rtwdev = rtwdev; \
if (IS_ERR(debugfs_create_file(#name, mode, \
- parent, &rtw_debug_priv_ ##name,\
+ parent, priv, \
&file_ops_ ##fopname))) \
pr_debug("Unable to initialize debugfs:%s\n", \
#name); \
@@ -1219,12 +1238,9 @@ static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = {
#define rtw_debugfs_add_r(name) \
rtw_debugfs_add_core(name, S_IFREG | 0444, single_r, debugfs_topdir)
-void rtw_debugfs_init(struct rtw_dev *rtwdev)
+static
+void rtw_debugfs_add_basic(struct rtw_dev *rtwdev, struct dentry *debugfs_topdir)
{
- struct dentry *debugfs_topdir;
-
- debugfs_topdir = debugfs_create_dir("rtw88",
- rtwdev->hw->wiphy->debugfsdir);
rtw_debugfs_add_w(write_reg);
rtw_debugfs_add_rw(read_reg);
rtw_debugfs_add_w(rf_write);
@@ -1236,6 +1252,17 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_r(coex_info);
rtw_debugfs_add_rw(coex_enable);
rtw_debugfs_add_w(h2c);
+ rtw_debugfs_add_r(rf_dump);
+ rtw_debugfs_add_r(tx_pwr_tbl);
+ rtw_debugfs_add_rw(edcca_enable);
+ rtw_debugfs_add_rw(fw_crash);
+ rtw_debugfs_add_rw(force_lowest_basic_rate);
+ rtw_debugfs_add_rw(dm_cap);
+}
+
+static
+void rtw_debugfs_add_sec0(struct rtw_dev *rtwdev, struct dentry *debugfs_topdir)
+{
rtw_debugfs_add_r(mac_0);
rtw_debugfs_add_r(mac_1);
rtw_debugfs_add_r(mac_2);
@@ -1252,6 +1279,11 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_r(bb_d);
rtw_debugfs_add_r(bb_e);
rtw_debugfs_add_r(bb_f);
+}
+
+static
+void rtw_debugfs_add_sec1(struct rtw_dev *rtwdev, struct dentry *debugfs_topdir)
+{
rtw_debugfs_add_r(mac_10);
rtw_debugfs_add_r(mac_11);
rtw_debugfs_add_r(mac_12);
@@ -1274,14 +1306,29 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_r(bb_40);
rtw_debugfs_add_r(bb_41);
}
- rtw_debugfs_add_r(rf_dump);
- rtw_debugfs_add_r(tx_pwr_tbl);
- rtw_debugfs_add_rw(edcca_enable);
- rtw_debugfs_add_rw(fw_crash);
- rtw_debugfs_add_rw(force_lowest_basic_rate);
- rtw_debugfs_add_rw(dm_cap);
}
+void rtw_debugfs_init(struct rtw_dev *rtwdev)
+{
+ struct dentry *debugfs_topdir;
+
+ rtwdev->debugfs = kmemdup(&rtw_debugfs_templ, sizeof(rtw_debugfs_templ),
+ GFP_KERNEL);
+ if (!rtwdev->debugfs)
+ return;
+
+ debugfs_topdir = debugfs_create_dir("rtw88",
+ rtwdev->hw->wiphy->debugfsdir);
+
+ rtw_debugfs_add_basic(rtwdev, debugfs_topdir);
+ rtw_debugfs_add_sec0(rtwdev, debugfs_topdir);
+ rtw_debugfs_add_sec1(rtwdev, debugfs_topdir);
+}
+
+void rtw_debugfs_deinit(struct rtw_dev *rtwdev)
+{
+ kfree(rtwdev->debugfs);
+}
#endif /* CONFIG_RTW88_DEBUGFS */
#ifdef CONFIG_RTW88_DEBUG
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index eb69006c463e..6570e84d8d24 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -25,6 +25,7 @@ enum rtw_debug_mask {
RTW_DBG_HW_SCAN = 0x00010000,
RTW_DBG_STATE = 0x00020000,
RTW_DBG_SDIO = 0x00040000,
+ RTW_DBG_USB = 0x00080000,
RTW_DBG_UNEXP = 0x80000000,
RTW_DBG_ALL = 0xffffffff
@@ -33,11 +34,13 @@ enum rtw_debug_mask {
#ifdef CONFIG_RTW88_DEBUGFS
void rtw_debugfs_init(struct rtw_dev *rtwdev);
+void rtw_debugfs_deinit(struct rtw_dev *rtwdev);
void rtw_debugfs_get_simple_phy_info(struct seq_file *m);
#else
static inline void rtw_debugfs_init(struct rtw_dev *rtwdev) {}
+static inline void rtw_debugfs_deinit(struct rtw_dev *rtwdev) {}
#endif /* CONFIG_RTW88_DEBUGFS */
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index ab7d414d0ba6..b9b0114e253b 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -1468,10 +1468,12 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
val |= BIT_ENSWBCN >> 8;
rtw_write8(rtwdev, REG_CR + 1, val);
- val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
- bckp[1] = val;
- val &= ~(BIT_EN_BCNQ_DL >> 16);
- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) {
+ val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
+ bckp[1] = val;
+ val &= ~(BIT_EN_BCNQ_DL >> 16);
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
+ }
ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
if (ret) {
@@ -1496,7 +1498,8 @@ restore:
rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
rsvd_pg_head | BIT_BCN_VALID_V1);
- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
return ret;
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index 830d7532f2a3..96aeda26014e 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -18,6 +18,7 @@ struct rtw_hci_ops {
void (*deep_ps)(struct rtw_dev *rtwdev, bool enter);
void (*link_ps)(struct rtw_dev *rtwdev, bool enter);
void (*interface_cfg)(struct rtw_dev *rtwdev);
+ void (*dynamic_rx_agg)(struct rtw_dev *rtwdev, bool enable);
int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
@@ -72,6 +73,12 @@ static inline void rtw_hci_interface_cfg(struct rtw_dev *rtwdev)
rtwdev->hci.ops->interface_cfg(rtwdev);
}
+static inline void rtw_hci_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
+{
+ if (rtwdev->hci.ops->dynamic_rx_agg)
+ rtwdev->hci.ops->dynamic_rx_agg(rtwdev, enable);
+}
+
static inline int
rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 63326b352738..b39e90fb66b4 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -167,6 +167,12 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
+ rtwvif->mac_id = rtw_acquire_macid(rtwdev);
+ if (rtwvif->mac_id >= RTW_MAX_MAC_ID_NUM) {
+ mutex_unlock(&rtwdev->mutex);
+ return -ENOSPC;
+ }
+
port = find_first_zero_bit(rtwdev->hw_port, RTW_PORT_NUM);
if (port >= RTW_PORT_NUM) {
mutex_unlock(&rtwdev->mutex);
@@ -214,7 +220,8 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
- rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM mac_id %d on port %d\n",
+ vif->addr, rtwvif->mac_id, rtwvif->port);
return 0;
}
@@ -225,7 +232,8 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
- rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM mac_id %d on port %d\n",
+ vif->addr, rtwvif->mac_id, rtwvif->port);
mutex_lock(&rtwdev->mutex);
@@ -242,6 +250,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
config |= PORT_SET_BCN_CTRL;
rtw_vif_port_config(rtwdev, rtwvif, config);
clear_bit(rtwvif->port, rtwdev->hw_port);
+ rtw_release_macid(rtwdev, rtwvif->mac_id);
rtw_recalc_lps(rtwdev, NULL);
mutex_unlock(&rtwdev->mutex);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 7ab7a988b123..bbdef38c7e34 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -212,6 +212,7 @@ static void rtw_watch_dog_work(struct work_struct *work)
struct rtw_traffic_stats *stats = &rtwdev->stats;
struct rtw_watch_dog_iter_data data = {};
bool busy_traffic = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
+ u32 tx_unicast_mbps, rx_unicast_mbps;
bool ps_active;
mutex_lock(&rtwdev->mutex);
@@ -236,10 +237,11 @@ static void rtw_watch_dog_work(struct work_struct *work)
else
ps_active = false;
- ewma_tp_add(&stats->tx_ewma_tp,
- (u32)(stats->tx_unicast >> RTW_TP_SHIFT));
- ewma_tp_add(&stats->rx_ewma_tp,
- (u32)(stats->rx_unicast >> RTW_TP_SHIFT));
+ tx_unicast_mbps = stats->tx_unicast >> RTW_TP_SHIFT;
+ rx_unicast_mbps = stats->rx_unicast >> RTW_TP_SHIFT;
+
+ ewma_tp_add(&stats->tx_ewma_tp, tx_unicast_mbps);
+ ewma_tp_add(&stats->rx_ewma_tp, rx_unicast_mbps);
stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp);
stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp);
@@ -259,6 +261,9 @@ static void rtw_watch_dog_work(struct work_struct *work)
rtw_phy_dynamic_mechanism(rtwdev);
+ rtw_hci_dynamic_rx_agg(rtwdev,
+ tx_unicast_mbps >= 1 || rx_unicast_mbps >= 1);
+
data.rtwdev = rtwdev;
/* rtw_iterate_vifs internally uses an atomic iterator which is needed
* to avoid taking local->iflist_mtx mutex
@@ -306,17 +311,6 @@ static void rtw_ips_work(struct work_struct *work)
mutex_unlock(&rtwdev->mutex);
}
-static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
-{
- unsigned long mac_id;
-
- mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
- if (mac_id < RTW_MAX_MAC_ID_NUM)
- set_bit(mac_id, rtwdev->mac_id_map);
-
- return mac_id;
-}
-
static void rtw_sta_rc_work(struct work_struct *work)
{
struct rtw_sta_info *si = container_of(work, struct rtw_sta_info,
@@ -335,12 +329,14 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
int i;
- si->mac_id = rtw_acquire_macid(rtwdev);
- if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
- return -ENOSPC;
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ si->mac_id = rtwvif->mac_id;
+ } else {
+ si->mac_id = rtw_acquire_macid(rtwdev);
+ if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
+ return -ENOSPC;
+ }
- if (vif->type == NL80211_IFTYPE_STATION && vif->cfg.assoc == 0)
- rtwvif->mac_id = si->mac_id;
si->rtwdev = rtwdev;
si->sta = sta;
si->vif = vif;
@@ -365,11 +361,13 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
bool fw_exist)
{
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ struct ieee80211_vif *vif = si->vif;
int i;
cancel_work_sync(&si->rc_work);
- rtw_release_macid(rtwdev, si->mac_id);
+ if (vif->type != NL80211_IFTYPE_STATION)
+ rtw_release_macid(rtwdev, si->mac_id);
if (fw_exist)
rtw_fw_media_status_report(rtwdev, si->mac_id, false);
@@ -609,6 +607,8 @@ static void rtw_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
rtw_bf_disassoc(rtwdev, vif, NULL);
rtw_vif_assoc_changed(rtwvif, NULL);
rtw_txq_cleanup(rtwdev, vif->txq);
+
+ rtw_release_macid(rtwdev, rtwvif->mac_id);
}
void rtw_fw_recovery(struct rtw_dev *rtwdev)
@@ -1313,20 +1313,21 @@ static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
{
const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fw_state *fw;
+ int ret = 0;
fw = &rtwdev->fw;
wait_for_completion(&fw->completion);
if (!fw->firmware)
- return -EINVAL;
+ ret = -EINVAL;
if (chip->wow_fw_name) {
fw = &rtwdev->wow_fw;
wait_for_completion(&fw->completion);
if (!fw->firmware)
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ return ret;
}
static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev,
@@ -2005,7 +2006,7 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0;
efuse->ext_lna_2g = efuse->lna_type_2g & BIT(3) ? 1 : 0;
efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0;
- efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0;
+ efuse->ext_lna_5g = efuse->lna_type_5g & BIT(3) ? 1 : 0;
if (!is_valid_ether_addr(efuse->addr)) {
eth_random_addr(efuse->addr);
@@ -2133,7 +2134,6 @@ int rtw_core_init(struct rtw_dev *rtwdev)
rtwdev->sec.total_cam_num = 32;
rtwdev->hal.current_channel = 1;
rtwdev->dm_info.fix_rate = U8_MAX;
- set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map);
rtw_stats_init(rtwdev);
@@ -2299,6 +2299,7 @@ void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_unregister_hw(hw);
rtw_unset_supported_band(hw, chip);
+ rtw_debugfs_deinit(rtwdev);
}
EXPORT_SYMBOL(rtw_unregister_hw);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 49a3fd4fb7dc..945117afe143 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -50,6 +50,7 @@ extern const struct ieee80211_ops rtw_ops;
#define RTW_MAX_CHANNEL_NUM_5G 49
struct rtw_dev;
+struct rtw_debugfs;
enum rtw_hci_type {
RTW_HCI_TYPE_PCIE,
@@ -622,6 +623,7 @@ struct rtw_rx_pkt_stat {
bool crc_err;
bool decrypted;
bool is_c2h;
+ bool channel_invalid;
s32 signal_power;
u16 pkt_len;
@@ -740,7 +742,6 @@ struct rtw_txq {
unsigned long flags;
};
-#define RTW_BC_MC_MACID 1
DECLARE_EWMA(rssi, 10, 16);
struct rtw_sta_info {
@@ -803,7 +804,7 @@ struct rtw_bf_info {
struct rtw_vif {
enum rtw_net_type net_type;
u16 aid;
- u8 mac_id; /* for STA mode only */
+ u8 mac_id;
u8 mac_addr[ETH_ALEN];
u8 bssid[ETH_ALEN];
u8 port;
@@ -1785,6 +1786,8 @@ struct rtw_efuse {
bool share_ant;
u8 bt_setting;
+ u8 usb_mode_switch;
+
struct {
u8 hci;
u8 bw;
@@ -2051,7 +2054,7 @@ struct rtw_dev {
bool beacon_loss;
struct completion lps_leave_check;
- struct dentry *debugfs;
+ struct rtw_debugfs *debugfs;
u8 sta_cnt;
u32 rts_threshold;
@@ -2127,6 +2130,17 @@ static inline bool rtw_chip_has_tx_stbc(struct rtw_dev *rtwdev)
return rtwdev->chip->tx_stbc;
}
+static inline u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
+{
+ unsigned long mac_id;
+
+ mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
+ if (mac_id < RTW_MAX_MAC_ID_NUM)
+ set_bit(mac_id, rtwdev->mac_id_map);
+
+ return mac_id;
+}
+
static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
{
clear_bit(mac_id, rtwdev->mac_id_map);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index a5b9d6c7be37..0b9b8807af2c 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -1088,6 +1088,7 @@ static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
/* remove rx_desc */
skb_pull(new, pkt_offset);
+ rtw_update_rx_freq_for_invalid(rtwdev, new, &rx_status, &pkt_stat);
rtw_rx_stats(rtwdev, pkt_stat.vif, new);
memcpy(new->cb, &rx_status, sizeof(rx_status));
ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
@@ -1600,6 +1601,7 @@ static struct rtw_hci_ops rtw_pci_ops = {
.deep_ps = rtw_pci_deep_ps,
.link_ps = rtw_pci_link_ps,
.interface_cfg = rtw_pci_interface_cfg,
+ .dynamic_rx_agg = NULL,
.read8 = rtw_pci_read8,
.read16 = rtw_pci_read16,
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 02ef9a77316b..4d9b8668e8b0 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -15,6 +15,7 @@
#define BIT_WLOCK_1C_B6 BIT(5)
#define REG_SYS_PW_CTRL 0x0004
#define BIT_PFM_WOWL BIT(3)
+#define BIT_APFM_OFFMAC BIT(9)
#define REG_SYS_CLK_CTRL 0x0008
#define BIT_CPU_CLK_EN BIT(14)
@@ -133,6 +134,14 @@
#define REG_PMC_DBG_CTRL1 0xa8
#define BITS_PMC_BT_IQK_STS GENMASK(22, 21)
+#define REG_PAD_CTRL2 0x00C4
+#define BIT_RSM_EN_V1 BIT(16)
+#define BIT_NO_PDN_CHIPOFF_V1 BIT(17)
+#define BIT_MASK_USB23_SW_MODE_V1 GENMASK(19, 18)
+#define BIT_USB3_USB2_TRANSITION BIT(20)
+#define BIT_USB_MODE_U2 1
+#define BIT_USB_MODE_U3 2
+
#define REG_EFUSE_ACCESS 0x00CF
#define EFUSE_ACCESS_ON 0x69
#define EFUSE_ACCESS_OFF 0x00
@@ -313,6 +322,12 @@
#define REG_RXDMA_DPR 0x028C
#define REG_RXDMA_MODE 0x0290
#define BIT_DMA_MODE BIT(1)
+#define BIT_DMA_BURST_CNT GENMASK(3, 2)
+#define BIT_DMA_BURST_SIZE GENMASK(5, 4)
+#define BIT_DMA_BURST_SIZE_64 2
+#define BIT_DMA_BURST_SIZE_512 1
+#define BIT_DMA_BURST_SIZE_1024 0
+
#define REG_RXPKTNUM 0x02B0
#define REG_INT_MIG 0x0304
@@ -568,6 +583,8 @@
#define BIT_WL_SECURITY_CLK BIT(15)
#define BIT_DDMA_EN BIT(8)
+#define REG_SW_MDIO 0x10C0
+
#define REG_H2C_PKT_READADDR 0x10D0
#define REG_H2C_PKT_WRITEADDR 0x10D4
#define REG_FW_DBG6 0x10F8
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
index e2c7d9f87683..a019f4085e73 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
@@ -31,8 +31,6 @@ static const struct usb_device_id rtw_8821cu_id_table[] = {
.driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82c, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 2456ff242818..6edb17aea90e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -46,6 +46,7 @@ static int rtw8822b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
map = (struct rtw8822b_efuse *)log_map;
+ efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(7));
efuse->rfe_option = map->rfe_option;
efuse->rf_board_option = map->rf_board_option;
efuse->crystal_cap = map->xtal_k;
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
index 2dc3a6660f06..cf85e63966a1 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -72,7 +72,9 @@ struct rtw8822bs_efuse {
struct rtw8822b_efuse {
__le16 rtl_id;
- u8 res0[0x0e];
+ u8 res0[4];
+ u8 usb_mode;
+ u8 res1[0x09];
/* power index for four RF paths */
struct rtw_txpwr_idx txpwr_idx_table[4];
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 62376d1cca22..1dbe1cdbc3fd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -49,6 +49,7 @@ static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
map = (struct rtw8822c_efuse *)log_map;
+ efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(7));
efuse->rfe_option = map->rfe_option;
efuse->rf_board_option = map->rf_board_option;
efuse->crystal_cap = map->xtal_k & XCAP_MASK;
@@ -2575,9 +2576,10 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
rx_power[RF_PATH_B] -= 110;
channel = GET_PHY_STAT_P0_CHANNEL(phy_status);
- if (channel == 0)
- channel = rtwdev->hal.current_channel;
- rtw_set_rx_freq_band(pkt_stat, channel);
+ if (channel != 0)
+ rtw_set_rx_freq_band(pkt_stat, channel);
+ else
+ pkt_stat->channel_invalid = true;
pkt_stat->rx_power[RF_PATH_A] = rx_power[RF_PATH_A];
pkt_stat->rx_power[RF_PATH_B] = rx_power[RF_PATH_B];
@@ -2611,12 +2613,14 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
else
rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
- if (rxsc >= 9 && rxsc <= 12)
+ if (rxsc == 0)
+ bw = rtwdev->hal.current_band_width;
+ else if (rxsc >= 1 && rxsc <= 8)
+ bw = RTW_CHANNEL_WIDTH_20;
+ else if (rxsc >= 9 && rxsc <= 12)
bw = RTW_CHANNEL_WIDTH_40;
- else if (rxsc >= 13)
- bw = RTW_CHANNEL_WIDTH_80;
else
- bw = RTW_CHANNEL_WIDTH_20;
+ bw = RTW_CHANNEL_WIDTH_80;
channel = GET_PHY_STAT_P1_CHANNEL(phy_status);
rtw_set_rx_freq_band(pkt_stat, channel);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index 1bc0e7f5d6bb..e2b383d633cd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -59,16 +59,18 @@ struct rtw8822ce_efuse {
struct rtw8822c_efuse {
__le16 rtl_id;
- u8 res0[0x0e];
+ u8 res0[4];
+ u8 usb_mode;
+ u8 res1[0x09];
/* power index for four RF paths */
struct rtw_txpwr_idx txpwr_idx_table[4];
u8 channel_plan; /* 0xb8 */
u8 xtal_k;
- u8 res1;
+ u8 res2;
u8 iqk_lck;
- u8 res2[5]; /* 0xbc */
+ u8 res3[5]; /* 0xbc */
u8 rf_board_option;
u8 rf_feature_option;
u8 rf_bt_setting;
@@ -80,21 +82,21 @@ struct rtw8822c_efuse {
u8 rf_antenna_option; /* 0xc9 */
u8 rfe_option;
u8 country_code[2];
- u8 res3[3];
+ u8 res4[3];
u8 path_a_thermal; /* 0xd0 */
u8 path_b_thermal;
- u8 res4[2];
+ u8 res5[2];
u8 rx_gain_gap_2g_ofdm;
- u8 res5;
- u8 rx_gain_gap_2g_cck;
u8 res6;
- u8 rx_gain_gap_5gl;
+ u8 rx_gain_gap_2g_cck;
u8 res7;
- u8 rx_gain_gap_5gm;
+ u8 rx_gain_gap_5gl;
u8 res8;
- u8 rx_gain_gap_5gh;
+ u8 rx_gain_gap_5gm;
u8 res9;
- u8 res10[0x42];
+ u8 rx_gain_gap_5gh;
+ u8 res10;
+ u8 res11[0x42];
union {
struct rtw8822ce_efuse e;
struct rtw8822cu_efuse u;
diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c
index 84aedabdf285..66f9419588cf 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.c
+++ b/drivers/net/wireless/realtek/rtw88/rx.c
@@ -146,6 +146,47 @@ static void rtw_set_rx_freq_by_pktstat(struct rtw_rx_pkt_stat *pkt_stat,
rx_status->band = pkt_stat->band;
}
+void rtw_update_rx_freq_from_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ int channel = rtwdev->hal.current_channel;
+ size_t hdr_len, ielen;
+ int channel_number;
+ u8 *variable;
+
+ if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ goto fill_rx_status;
+
+ if (ieee80211_is_beacon(mgmt->frame_control)) {
+ variable = mgmt->u.beacon.variable;
+ hdr_len = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ } else if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ variable = mgmt->u.probe_resp.variable;
+ hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+ } else {
+ goto fill_rx_status;
+ }
+
+ if (skb->len > hdr_len)
+ ielen = skb->len - hdr_len;
+ else
+ goto fill_rx_status;
+
+ channel_number = cfg80211_get_ies_channel_number(variable, ielen,
+ NL80211_BAND_2GHZ);
+ if (channel_number != -1)
+ channel = channel_number;
+
+fill_rx_status:
+ rtw_set_rx_freq_band(pkt_stat, channel);
+ rtw_set_rx_freq_by_pktstat(pkt_stat, rx_status);
+}
+EXPORT_SYMBOL(rtw_update_rx_freq_from_ie);
+
void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
struct rtw_rx_pkt_stat *pkt_stat,
struct ieee80211_hdr *hdr,
diff --git a/drivers/net/wireless/realtek/rtw88/rx.h b/drivers/net/wireless/realtek/rtw88/rx.h
index d3668c4efc24..9f0019112987 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.h
+++ b/drivers/net/wireless/realtek/rtw88/rx.h
@@ -41,7 +41,7 @@ enum rtw_rx_desc_enc {
#define GET_RX_DESC_TSFL(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x05), GENMASK(31, 0))
#define GET_RX_DESC_BW(rxdesc) \
- (le32_get_bits(*((__le32 *)(rxdesc) + 0x04), GENMASK(31, 24)))
+ (le32_get_bits(*((__le32 *)(rxdesc) + 0x04), GENMASK(5, 4)))
void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct sk_buff *skb);
@@ -50,5 +50,18 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *rx_status,
u8 *phy_status);
+void rtw_update_rx_freq_from_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status,
+ struct rtw_rx_pkt_stat *pkt_stat);
+
+static inline
+void rtw_update_rx_freq_for_invalid(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ if (pkt_stat->channel_invalid)
+ rtw_update_rx_freq_from_ie(rtwdev, skb, rx_status, pkt_stat);
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index 0cae5746f540..21d0754dd7f6 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -948,6 +948,7 @@ static void rtw_sdio_rx_skb(struct rtw_dev *rtwdev, struct sk_buff *skb,
skb_put(skb, pkt_stat->pkt_len);
skb_reserve(skb, pkt_offset);
+ rtw_update_rx_freq_for_invalid(rtwdev, skb, rx_status, pkt_stat);
rtw_rx_stats(rtwdev, pkt_stat->vif, skb);
ieee80211_rx_irqsafe(rtwdev->hw, skb);
@@ -1156,6 +1157,7 @@ static struct rtw_hci_ops rtw_sdio_ops = {
.deep_ps = rtw_sdio_deep_ps,
.link_ps = rtw_sdio_link_ps,
.interface_cfg = rtw_sdio_interface_cfg,
+ .dynamic_rx_agg = NULL,
.read8 = rtw_sdio_read8,
.read16 = rtw_sdio_read16,
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index c02ac673be32..dae7ca148865 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -46,7 +46,8 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
le32_encode_bits(pkt_info->ls, RTW_TX_DESC_W0_LS) |
le32_encode_bits(pkt_info->dis_qselseq, RTW_TX_DESC_W0_DISQSELSEQ);
- tx_desc->w1 = le32_encode_bits(pkt_info->qsel, RTW_TX_DESC_W1_QSEL) |
+ tx_desc->w1 = le32_encode_bits(pkt_info->mac_id, RTW_TX_DESC_W1_MACID) |
+ le32_encode_bits(pkt_info->qsel, RTW_TX_DESC_W1_QSEL) |
le32_encode_bits(pkt_info->rate_id, RTW_TX_DESC_W1_RATE_ID) |
le32_encode_bits(pkt_info->sec_type, RTW_TX_DESC_W1_SEC_TYPE) |
le32_encode_bits(pkt_info->pkt_offset, RTW_TX_DESC_W1_PKT_OFFSET) |
@@ -401,14 +402,18 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_vif *vif = info->control.vif;
struct rtw_sta_info *si;
- struct ieee80211_vif *vif = NULL;
+ struct rtw_vif *rtwvif;
__le16 fc = hdr->frame_control;
bool bmc;
if (sta) {
si = (struct rtw_sta_info *)sta->drv_priv;
- vif = si->vif;
+ pkt_info->mac_id = si->mac_id;
+ } else if (vif) {
+ rtwvif = (struct rtw_vif *)vif->drv_priv;
+ pkt_info->mac_id = rtwvif->mac_id;
}
if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index 324189606257..3d544fd7f60f 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -27,6 +27,7 @@ struct rtw_tx_desc {
#define RTW_TX_DESC_W0_BMC BIT(24)
#define RTW_TX_DESC_W0_LS BIT(26)
#define RTW_TX_DESC_W0_DISQSELSEQ BIT(31)
+#define RTW_TX_DESC_W1_MACID GENMASK(7, 0)
#define RTW_TX_DESC_W1_QSEL GENMASK(12, 8)
#define RTW_TX_DESC_W1_RATE_ID GENMASK(20, 16)
#define RTW_TX_DESC_W1_SEC_TYPE GENMASK(23, 22)
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index a55ca5a24227..e83ab6fb83f5 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -14,6 +14,11 @@
#include "ps.h"
#include "usb.h"
+static bool rtw_switch_usb_mode = true;
+module_param_named(switch_usb_mode, rtw_switch_usb_mode, bool, 0644);
+MODULE_PARM_DESC(switch_usb_mode,
+ "Set to N to disable switching to USB 3 mode to avoid potential interference in the 2.4 GHz band (default: Y)");
+
#define RTW_USB_MAX_RXQ_LEN 512
struct rtw_usb_txcb {
@@ -541,11 +546,12 @@ static void rtw_usb_rx_handler(struct work_struct *work)
struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, rx_work);
struct rtw_dev *rtwdev = rtwusb->rtwdev;
const struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_rx_pkt_stat pkt_stat;
+ u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
struct ieee80211_rx_status rx_status;
+ u32 pkt_offset, next_pkt, urb_len;
+ struct rtw_rx_pkt_stat pkt_stat;
+ struct sk_buff *next_skb;
struct sk_buff *skb;
- u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
- u32 pkt_offset;
u8 *rx_desc;
int limit;
@@ -554,28 +560,48 @@ static void rtw_usb_rx_handler(struct work_struct *work)
if (!skb)
break;
- rx_desc = skb->data;
- chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat,
- &rx_status);
- pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
- pkt_stat.shift;
-
- if (pkt_stat.is_c2h) {
- skb_put(skb, pkt_stat.pkt_len + pkt_offset);
- rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb);
- continue;
- }
-
if (skb_queue_len(&rtwusb->rx_queue) >= RTW_USB_MAX_RXQ_LEN) {
dev_dbg_ratelimited(rtwdev->dev, "failed to get rx_queue, overflow\n");
dev_kfree_skb_any(skb);
continue;
}
- skb_put(skb, pkt_stat.pkt_len);
- skb_reserve(skb, pkt_offset);
- memcpy(skb->cb, &rx_status, sizeof(rx_status));
- ieee80211_rx_irqsafe(rtwdev->hw, skb);
+ urb_len = skb->len;
+
+ do {
+ rx_desc = skb->data;
+ chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat,
+ &rx_status);
+ pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
+ pkt_stat.shift;
+
+ next_pkt = round_up(pkt_stat.pkt_len + pkt_offset, 8);
+
+ if (urb_len >= next_pkt + pkt_desc_sz)
+ next_skb = skb_clone(skb, GFP_KERNEL);
+ else
+ next_skb = NULL;
+
+ if (pkt_stat.is_c2h) {
+ skb_trim(skb, pkt_stat.pkt_len + pkt_offset);
+ rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb);
+ } else {
+ skb_pull(skb, pkt_offset);
+ skb_trim(skb, pkt_stat.pkt_len);
+ rtw_update_rx_freq_for_invalid(rtwdev, skb,
+ &rx_status,
+ &pkt_stat);
+ rtw_rx_stats(rtwdev, pkt_stat.vif, skb);
+ memcpy(skb->cb, &rx_status, sizeof(rx_status));
+ ieee80211_rx_irqsafe(rtwdev->hw, skb);
+ }
+
+ skb = next_skb;
+ if (skb)
+ skb_pull(skb, next_pkt);
+
+ urb_len -= next_pkt;
+ } while (skb);
}
}
@@ -619,6 +645,7 @@ static void rtw_usb_read_port_complete(struct urb *urb)
if (skb)
dev_kfree_skb_any(skb);
} else {
+ skb_put(skb, urb->actual_length);
skb_queue_tail(&rtwusb->rx_queue, skb);
queue_work(rtwusb->rxwq, &rtwusb->rx_work);
}
@@ -713,9 +740,69 @@ static void rtw_usb_link_ps(struct rtw_dev *rtwdev, bool enter)
/* empty function for rtw_hci_ops */
}
+static void rtw_usb_init_burst_pkt_len(struct rtw_dev *rtwdev)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ enum usb_device_speed speed = rtwusb->udev->speed;
+ u8 rxdma, burst_size;
+
+ rxdma = BIT_DMA_BURST_CNT | BIT_DMA_MODE;
+
+ if (speed == USB_SPEED_SUPER)
+ burst_size = BIT_DMA_BURST_SIZE_1024;
+ else if (speed == USB_SPEED_HIGH)
+ burst_size = BIT_DMA_BURST_SIZE_512;
+ else
+ burst_size = BIT_DMA_BURST_SIZE_64;
+
+ u8p_replace_bits(&rxdma, burst_size, BIT_DMA_BURST_SIZE);
+
+ rtw_write8(rtwdev, REG_RXDMA_MODE, rxdma);
+ rtw_write16_set(rtwdev, REG_TXDMA_OFFSET_CHK, BIT_DROP_DATA_EN);
+}
+
static void rtw_usb_interface_cfg(struct rtw_dev *rtwdev)
{
- /* empty function for rtw_hci_ops */
+ rtw_usb_init_burst_pkt_len(rtwdev);
+}
+
+static void rtw_usb_dynamic_rx_agg_v1(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 size, timeout;
+ u16 val16;
+
+ rtw_write32_set(rtwdev, REG_RXDMA_AGG_PG_TH, BIT_EN_PRE_CALC);
+ rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN);
+ rtw_write8_clr(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7));
+
+ if (enable) {
+ size = 0x5;
+ timeout = 0x20;
+ } else {
+ size = 0x0;
+ timeout = 0x1;
+ }
+ val16 = u16_encode_bits(size, BIT_RXDMA_AGG_PG_TH) |
+ u16_encode_bits(timeout, BIT_DMA_AGG_TO_V1);
+
+ rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16);
+}
+
+static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
+{
+ switch (rtwdev->chip->id) {
+ case RTW_CHIP_TYPE_8822C:
+ case RTW_CHIP_TYPE_8822B:
+ case RTW_CHIP_TYPE_8821C:
+ rtw_usb_dynamic_rx_agg_v1(rtwdev, enable);
+ break;
+ case RTW_CHIP_TYPE_8723D:
+ /* Doesn't like aggregation. */
+ break;
+ case RTW_CHIP_TYPE_8703B:
+ /* Likely not found in USB devices. */
+ break;
+ }
}
static struct rtw_hci_ops rtw_usb_ops = {
@@ -727,6 +814,7 @@ static struct rtw_hci_ops rtw_usb_ops = {
.deep_ps = rtw_usb_deep_ps,
.link_ps = rtw_usb_link_ps,
.interface_cfg = rtw_usb_interface_cfg,
+ .dynamic_rx_agg = rtw_usb_dynamic_rx_agg,
.write8 = rtw_usb_write8,
.write16 = rtw_usb_write16,
@@ -841,6 +929,77 @@ static void rtw_usb_intf_deinit(struct rtw_dev *rtwdev,
usb_set_intfdata(intf, NULL);
}
+static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev)
+{
+ enum usb_device_speed cur_speed;
+ u8 id = rtwdev->chip->id;
+ bool can_switch;
+ u32 pad_ctrl2;
+
+ if (rtw_read8(rtwdev, REG_SYS_CFG2 + 3) == 0x20)
+ cur_speed = USB_SPEED_SUPER;
+ else
+ cur_speed = USB_SPEED_HIGH;
+
+ if (cur_speed == USB_SPEED_SUPER)
+ return 0;
+
+ pad_ctrl2 = rtw_read32(rtwdev, REG_PAD_CTRL2);
+
+ can_switch = !!(pad_ctrl2 & (BIT_MASK_USB23_SW_MODE_V1 |
+ BIT_USB3_USB2_TRANSITION));
+
+ if (!can_switch) {
+ rtw_dbg(rtwdev, RTW_DBG_USB,
+ "Switching to USB 3 mode unsupported by the chip\n");
+ return 0;
+ }
+
+ /* At this point cur_speed is USB_SPEED_HIGH. If we already tried
+ * to switch don't try again - it's a USB 2 port.
+ */
+ if (u32_get_bits(pad_ctrl2, BIT_MASK_USB23_SW_MODE_V1) == BIT_USB_MODE_U3)
+ return 0;
+
+ /* Enable IO wrapper timeout */
+ if (id == RTW_CHIP_TYPE_8822B || id == RTW_CHIP_TYPE_8821C)
+ rtw_write8_clr(rtwdev, REG_SW_MDIO + 3, BIT(0));
+
+ u32p_replace_bits(&pad_ctrl2, BIT_USB_MODE_U3, BIT_MASK_USB23_SW_MODE_V1);
+ pad_ctrl2 |= BIT_RSM_EN_V1;
+
+ rtw_write32(rtwdev, REG_PAD_CTRL2, pad_ctrl2);
+ rtw_write8(rtwdev, REG_PAD_CTRL2 + 1, 4);
+
+ rtw_write16_set(rtwdev, REG_SYS_PW_CTRL, BIT_APFM_OFFMAC);
+ usleep_range(1000, 1001);
+ rtw_write32_set(rtwdev, REG_PAD_CTRL2, BIT_NO_PDN_CHIPOFF_V1);
+
+ return 1;
+}
+
+static int rtw_usb_switch_mode(struct rtw_dev *rtwdev)
+{
+ u8 id = rtwdev->chip->id;
+
+ if (id != RTW_CHIP_TYPE_8822C && id != RTW_CHIP_TYPE_8822B)
+ return 0;
+
+ if (!rtwdev->efuse.usb_mode_switch) {
+ rtw_dbg(rtwdev, RTW_DBG_USB,
+ "Switching to USB 3 mode disabled by chip's efuse\n");
+ return 0;
+ }
+
+ if (!rtw_switch_usb_mode) {
+ rtw_dbg(rtwdev, RTW_DBG_USB,
+ "Switching to USB 3 mode disabled by module parameter\n");
+ return 0;
+ }
+
+ return rtw_usb_switch_mode_new(rtwdev);
+}
+
int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct rtw_dev *rtwdev;
@@ -896,6 +1055,14 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto err_destroy_rxwq;
}
+ ret = rtw_usb_switch_mode(rtwdev);
+ if (ret) {
+ /* Not a fail, but we do need to skip rtw_register_hw. */
+ rtw_dbg(rtwdev, RTW_DBG_USB, "switching to USB 3 mode\n");
+ ret = 0;
+ goto err_destroy_rxwq;
+ }
+
ret = rtw_register_hw(rtwdev, rtwdev->hw);
if (ret) {
rtw_err(rtwdev, "failed to register hw\n");
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index 3c9f864805b1..d2a3361669d7 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -12,6 +12,7 @@ if RTW89
config RTW89_CORE
tristate
+ select WANT_DEV_COREDUMP
config RTW89_PCI
tristate
@@ -28,6 +29,9 @@ config RTW89_8852B_COMMON
config RTW89_8852B
tristate
+config RTW89_8852BT
+ tristate
+
config RTW89_8852C
tristate
@@ -68,6 +72,18 @@ config RTW89_8852BE
802.11ax PCIe wireless network (Wi-Fi 6) adapter
+config RTW89_8852BTE
+ tristate "Realtek 8852BE-VT PCI wireless network (Wi-Fi 6) adapter"
+ depends on PCI
+ select RTW89_CORE
+ select RTW89_PCI
+ select RTW89_8852BT
+ select RTW89_8852B_COMMON
+ help
+ Select this option will enable support for 8852BE-VT chipset
+
+ 802.11ax PCIe wireless network (Wi-Fi 6) adapter
+
config RTW89_8852CE
tristate "Realtek 8852CE PCI wireless network (Wi-Fi 6E) adapter"
depends on PCI
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
index 1f1050a7a89d..c751013e811e 100644
--- a/drivers/net/wireless/realtek/rtw89/Makefile
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -52,6 +52,14 @@ rtw89_8852b-objs := rtw8852b.o \
obj-$(CONFIG_RTW89_8852BE) += rtw89_8852be.o
rtw89_8852be-objs := rtw8852be.o
+obj-$(CONFIG_RTW89_8852BT) += rtw89_8852bt.o
+rtw89_8852bt-objs := rtw8852bt.o \
+ rtw8852bt_rfk.o \
+ rtw8852bt_rfk_table.o
+
+obj-$(CONFIG_RTW89_8852BTE) += rtw89_8852bte.o
+rtw89_8852bte-objs := rtw8852bte.o
+
obj-$(CONFIG_RTW89_8852C) += rtw89_8852c.o
rtw89_8852c-objs := rtw8852c.o \
rtw8852c_table.o \
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 4557c6e035a9..4476fc7e53db 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -384,20 +384,24 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
break;
case WLAN_CIPHER_SUITE_CCMP:
hw_key_type = RTW89_SEC_KEY_TYPE_CCMP128;
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ if (!chip->hw_mgmt_tx_encrypt)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
break;
case WLAN_CIPHER_SUITE_CCMP_256:
hw_key_type = RTW89_SEC_KEY_TYPE_CCMP256;
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ if (!chip->hw_mgmt_tx_encrypt)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ext_key = true;
break;
case WLAN_CIPHER_SUITE_GCMP:
hw_key_type = RTW89_SEC_KEY_TYPE_GCMP128;
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ if (!chip->hw_mgmt_tx_encrypt)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
break;
case WLAN_CIPHER_SUITE_GCMP_256:
hw_key_type = RTW89_SEC_KEY_TYPE_GCMP256;
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ if (!chip->hw_mgmt_tx_encrypt)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ext_key = true;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index 7f90d93dcdc0..7070c85e2c28 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -124,12 +124,12 @@ void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
}
bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct rtw89_chan *new)
{
struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_chan *chan = &hal->sub[idx].chan;
- struct rtw89_chan_rcd *rcd = &hal->sub[idx].rcd;
+ struct rtw89_chan *chan = &hal->chanctx[idx].chan;
+ struct rtw89_chan_rcd *rcd = &hal->chanctx[idx].rcd;
bool band_changed;
rcd->prev_primary_channel = chan->primary_channel;
@@ -153,7 +153,7 @@ int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev,
lockdep_assert_held(&rtwdev->mutex);
- for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) {
+ for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_CHANCTX) {
chan = rtw89_chan_get(rtwdev, idx);
ret = iterator(chan, data);
if (ret)
@@ -164,36 +164,36 @@ int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev,
}
static void __rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct cfg80211_chan_def *chandef,
bool from_stack)
{
struct rtw89_hal *hal = &rtwdev->hal;
- hal->sub[idx].chandef = *chandef;
+ hal->chanctx[idx].chandef = *chandef;
if (from_stack)
set_bit(idx, hal->entity_map);
}
void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct cfg80211_chan_def *chandef)
{
__rtw89_config_entity_chandef(rtwdev, idx, chandef, true);
}
void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct cfg80211_chan_def *chandef)
{
struct rtw89_hal *hal = &rtwdev->hal;
- enum rtw89_sub_entity_idx cur;
+ enum rtw89_chanctx_idx cur;
if (chandef) {
- cur = atomic_cmpxchg(&hal->roc_entity_idx,
- RTW89_SUB_ENTITY_IDLE, idx);
- if (cur != RTW89_SUB_ENTITY_IDLE) {
+ cur = atomic_cmpxchg(&hal->roc_chanctx_idx,
+ RTW89_CHANCTX_IDLE, idx);
+ if (cur != RTW89_CHANCTX_IDLE) {
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
"ROC still processing on entity %d\n", idx);
return;
@@ -201,12 +201,12 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
hal->roc_chandef = *chandef;
} else {
- cur = atomic_cmpxchg(&hal->roc_entity_idx, idx,
- RTW89_SUB_ENTITY_IDLE);
+ cur = atomic_cmpxchg(&hal->roc_chanctx_idx, idx,
+ RTW89_CHANCTX_IDLE);
if (cur == idx)
return;
- if (cur == RTW89_SUB_ENTITY_IDLE)
+ if (cur == RTW89_CHANCTX_IDLE)
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
"ROC already finished on entity %d\n", idx);
else
@@ -220,7 +220,7 @@ static void rtw89_config_default_chandef(struct rtw89_dev *rtwdev)
struct cfg80211_chan_def chandef = {0};
rtw89_get_default_chandef(&chandef);
- __rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0, &chandef, false);
+ __rtw89_config_entity_chandef(rtwdev, RTW89_CHANCTX_0, &chandef, false);
}
void rtw89_entity_init(struct rtw89_dev *rtwdev)
@@ -228,9 +228,9 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
struct rtw89_hal *hal = &rtwdev->hal;
hal->entity_pause = false;
- bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ bitmap_zero(hal->entity_map, NUM_OF_RTW89_CHANCTX);
bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES);
- atomic_set(&hal->roc_entity_idx, RTW89_SUB_ENTITY_IDLE);
+ atomic_set(&hal->roc_chanctx_idx, RTW89_CHANCTX_IDLE);
rtw89_config_default_chandef(rtwdev);
}
@@ -242,8 +242,8 @@ static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif;
int idx;
- for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) {
- cfg = hal->sub[idx].cfg;
+ for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_CHANCTX) {
+ cfg = hal->chanctx[idx].cfg;
if (!cfg) {
/* doesn't run with chanctx ops; one channel at most */
w->active_chanctxs = 1;
@@ -262,7 +262,7 @@ static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev,
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
{
- DECLARE_BITMAP(recalc_map, NUM_OF_RTW89_SUB_ENTITY) = {};
+ DECLARE_BITMAP(recalc_map, NUM_OF_RTW89_CHANCTX) = {};
struct rtw89_hal *hal = &rtwdev->hal;
const struct cfg80211_chan_def *chandef;
struct rtw89_entity_weight w = {};
@@ -272,23 +272,23 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
lockdep_assert_held(&rtwdev->mutex);
- bitmap_copy(recalc_map, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ bitmap_copy(recalc_map, hal->entity_map, NUM_OF_RTW89_CHANCTX);
rtw89_entity_calculate_weight(rtwdev, &w);
switch (w.active_chanctxs) {
default:
rtw89_warn(rtwdev, "unknown ent chanctxs weight: %d\n",
w.active_chanctxs);
- bitmap_zero(recalc_map, NUM_OF_RTW89_SUB_ENTITY);
+ bitmap_zero(recalc_map, NUM_OF_RTW89_CHANCTX);
fallthrough;
case 0:
rtw89_config_default_chandef(rtwdev);
- set_bit(RTW89_SUB_ENTITY_0, recalc_map);
+ set_bit(RTW89_CHANCTX_0, recalc_map);
fallthrough;
case 1:
mode = RTW89_ENTITY_MODE_SCC;
break;
- case 2 ... NUM_OF_RTW89_SUB_ENTITY:
+ case 2 ... NUM_OF_RTW89_CHANCTX:
if (w.active_roles != NUM_OF_RTW89_MCC_ROLES) {
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"unhandled ent: %d chanctxs %d roles\n",
@@ -304,7 +304,7 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
break;
}
- for_each_set_bit(idx, recalc_map, NUM_OF_RTW89_SUB_ENTITY) {
+ for_each_set_bit(idx, recalc_map, NUM_OF_RTW89_CHANCTX) {
chandef = rtw89_chandef_get(rtwdev, idx);
rtw89_get_channel_params(chandef, &chan);
if (chan.channel == 0) {
@@ -650,7 +650,7 @@ static int rtw89_mcc_fill_role(struct rtw89_dev *rtwdev,
role->duration = role->beacon_interval / 2;
- chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
role->is_2ghz = chan->band_type == RTW89_BAND_2G;
role->is_go = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_GO;
role->is_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
@@ -678,10 +678,10 @@ static void rtw89_mcc_fill_bt_role(struct rtw89_dev *rtwdev)
}
struct rtw89_mcc_fill_role_selector {
- struct rtw89_vif *bind_vif[NUM_OF_RTW89_SUB_ENTITY];
+ struct rtw89_vif *bind_vif[NUM_OF_RTW89_CHANCTX];
};
-static_assert((u8)NUM_OF_RTW89_SUB_ENTITY >= NUM_OF_RTW89_MCC_ROLES);
+static_assert((u8)NUM_OF_RTW89_CHANCTX >= NUM_OF_RTW89_MCC_ROLES);
static int rtw89_mcc_fill_role_iterator(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *mcc_role,
@@ -719,14 +719,14 @@ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev)
if (!rtwvif->chanctx_assigned)
continue;
- if (sel.bind_vif[rtwvif->sub_entity_idx]) {
+ if (sel.bind_vif[rtwvif->chanctx_idx]) {
rtw89_warn(rtwdev,
"MCC skip extra vif <macid %d> on chanctx[%d]\n",
- rtwvif->mac_id, rtwvif->sub_entity_idx);
+ rtwvif->mac_id, rtwvif->chanctx_idx);
continue;
}
- sel.bind_vif[rtwvif->sub_entity_idx] = rtwvif;
+ sel.bind_vif[rtwvif->chanctx_idx] = rtwvif;
}
ret = rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_fill_role_iterator, &sel);
@@ -1390,7 +1390,7 @@ static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *ro
const struct rtw89_chan *chan;
int ret;
- chan = rtw89_chan_get(rtwdev, role->rtwvif->sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, role->rtwvif->chanctx_idx);
req.central_ch_seg0 = chan->channel;
req.primary_ch = chan->primary_channel;
req.bandwidth = chan->band_width;
@@ -1448,7 +1448,7 @@ void __mrc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role,
slot_arg->duration = role->duration;
slot_arg->role_num = 1;
- chan = rtw89_chan_get(rtwdev, role->rtwvif->sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, role->rtwvif->chanctx_idx);
slot_arg->roles[0].role_type = RTW89_H2C_MRC_ROLE_WIFI;
slot_arg->roles[0].is_master = role == ref;
@@ -1934,22 +1934,53 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
return 0;
}
+struct rtw89_mcc_stop_sel {
+ u8 mac_id;
+ u8 slot_idx;
+};
+
+static void rtw89_mcc_stop_sel_fill(struct rtw89_mcc_stop_sel *sel,
+ const struct rtw89_mcc_role *mcc_role)
+{
+ sel->mac_id = mcc_role->rtwvif->mac_id;
+ sel->slot_idx = mcc_role->slot_idx;
+}
+
+static int rtw89_mcc_stop_sel_iterator(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_role *mcc_role,
+ unsigned int ordered_idx,
+ void *data)
+{
+ struct rtw89_mcc_stop_sel *sel = data;
+
+ if (!mcc_role->rtwvif->chanctx_assigned)
+ return 0;
+
+ rtw89_mcc_stop_sel_fill(sel, mcc_role);
+ return 1; /* break iteration */
+}
+
static void rtw89_mcc_stop(struct rtw89_dev *rtwdev)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_stop_sel sel;
int ret;
- rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop\n");
+ /* by default, stop at ref */
+ rtw89_mcc_stop_sel_fill(&sel, ref);
+ rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_stop_sel_iterator, &sel);
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop at <macid %d>\n", sel.mac_id);
if (rtw89_concurrent_via_mrc(rtwdev)) {
- ret = rtw89_fw_h2c_mrc_del(rtwdev, mcc->group);
+ ret = rtw89_fw_h2c_mrc_del(rtwdev, mcc->group, sel.slot_idx);
if (ret)
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MRC h2c failed to trigger del: %d\n", ret);
} else {
ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group,
- ref->rtwvif->mac_id, true);
+ sel.mac_id, true);
if (ret)
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC h2c failed to trigger stop: %d\n", ret);
@@ -2339,9 +2370,9 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
rtw89_queue_chanctx_work(rtwdev);
}
-static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx1,
- enum rtw89_sub_entity_idx idx2)
+static void rtw89_swap_chanctx(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_idx idx1,
+ enum rtw89_chanctx_idx idx2)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_vif *rtwvif;
@@ -2350,25 +2381,25 @@ static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev,
if (idx1 == idx2)
return;
- hal->sub[idx1].cfg->idx = idx2;
- hal->sub[idx2].cfg->idx = idx1;
+ hal->chanctx[idx1].cfg->idx = idx2;
+ hal->chanctx[idx2].cfg->idx = idx1;
- swap(hal->sub[idx1], hal->sub[idx2]);
+ swap(hal->chanctx[idx1], hal->chanctx[idx2]);
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
if (!rtwvif->chanctx_assigned)
continue;
- if (rtwvif->sub_entity_idx == idx1)
- rtwvif->sub_entity_idx = idx2;
- else if (rtwvif->sub_entity_idx == idx2)
- rtwvif->sub_entity_idx = idx1;
+ if (rtwvif->chanctx_idx == idx1)
+ rtwvif->chanctx_idx = idx2;
+ else if (rtwvif->chanctx_idx == idx2)
+ rtwvif->chanctx_idx = idx1;
}
- cur = atomic_read(&hal->roc_entity_idx);
+ cur = atomic_read(&hal->roc_chanctx_idx);
if (cur == idx1)
- atomic_set(&hal->roc_entity_idx, idx2);
+ atomic_set(&hal->roc_chanctx_idx, idx2);
else if (cur == idx2)
- atomic_set(&hal->roc_entity_idx, idx1);
+ atomic_set(&hal->roc_chanctx_idx, idx1);
}
int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
@@ -2379,14 +2410,14 @@ int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
const struct rtw89_chip_info *chip = rtwdev->chip;
u8 idx;
- idx = find_first_zero_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ idx = find_first_zero_bit(hal->entity_map, NUM_OF_RTW89_CHANCTX);
if (idx >= chip->support_chanctx_num)
return -ENOENT;
rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
cfg->idx = idx;
cfg->ref_count = 0;
- hal->sub[idx].cfg = cfg;
+ hal->chanctx[idx].cfg = cfg;
return 0;
}
@@ -2419,19 +2450,19 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
struct rtw89_entity_weight w = {};
- rtwvif->sub_entity_idx = cfg->idx;
+ rtwvif->chanctx_idx = cfg->idx;
rtwvif->chanctx_assigned = true;
cfg->ref_count++;
- if (cfg->idx == RTW89_SUB_ENTITY_0)
+ if (cfg->idx == RTW89_CHANCTX_0)
goto out;
rtw89_entity_calculate_weight(rtwdev, &w);
if (w.active_chanctxs != 1)
goto out;
- /* put the first active chanctx at RTW89_SUB_ENTITY_0 */
- rtw89_swap_sub_entity(rtwdev, cfg->idx, RTW89_SUB_ENTITY_0);
+ /* put the first active chanctx at RTW89_CHANCTX_0 */
+ rtw89_swap_chanctx(rtwdev, cfg->idx, RTW89_CHANCTX_0);
out:
return rtw89_set_channel(rtwdev);
@@ -2443,47 +2474,60 @@ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
{
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_entity_weight w = {};
- enum rtw89_sub_entity_idx roll;
+ enum rtw89_chanctx_idx roll;
enum rtw89_entity_mode cur;
+ enum rtw89_entity_mode new;
+ int ret;
- rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
+ rtwvif->chanctx_idx = RTW89_CHANCTX_0;
rtwvif->chanctx_assigned = false;
cfg->ref_count--;
if (cfg->ref_count != 0)
goto out;
- if (cfg->idx != RTW89_SUB_ENTITY_0)
+ if (cfg->idx != RTW89_CHANCTX_0)
goto out;
- roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY,
+ roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_CHANCTX,
cfg->idx + 1);
/* Follow rtw89_config_default_chandef() when rtw89_entity_recalc(). */
- if (roll == NUM_OF_RTW89_SUB_ENTITY)
+ if (roll == NUM_OF_RTW89_CHANCTX)
goto out;
- /* RTW89_SUB_ENTITY_0 is going to release, and another exists.
- * Make another roll down to RTW89_SUB_ENTITY_0 to replace.
+ /* RTW89_CHANCTX_0 is going to release, and another exists.
+ * Make another roll down to RTW89_CHANCTX_0 to replace.
*/
- rtw89_swap_sub_entity(rtwdev, cfg->idx, roll);
+ rtw89_swap_chanctx(rtwdev, cfg->idx, roll);
out:
- rtw89_entity_calculate_weight(rtwdev, &w);
+ if (!hal->entity_pause) {
+ cur = rtw89_get_entity_mode(rtwdev);
+ switch (cur) {
+ case RTW89_ENTITY_MODE_MCC:
+ rtw89_mcc_stop(rtwdev);
+ break;
+ default:
+ break;
+ }
+ }
+
+ ret = rtw89_set_channel(rtwdev);
+ if (ret)
+ return;
+
+ if (hal->entity_pause)
+ return;
- cur = rtw89_get_entity_mode(rtwdev);
- switch (cur) {
+ new = rtw89_get_entity_mode(rtwdev);
+ switch (new) {
case RTW89_ENTITY_MODE_MCC:
- /* If still multi-roles, re-plan MCC for chanctx changes.
- * Otherwise, just stop MCC.
- */
- rtw89_mcc_stop(rtwdev);
- if (w.active_roles == NUM_OF_RTW89_MCC_ROLES)
- rtw89_mcc_start(rtwdev);
+ /* re-plan MCC for chanctx changes. */
+ ret = rtw89_mcc_start(rtwdev);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to start MCC: %d\n", ret);
break;
default:
break;
}
-
- rtw89_set_channel(rtwdev);
}
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index 5278ff8c513b..c6d31984e575 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -76,17 +76,17 @@ static inline void rtw89_set_entity_mode(struct rtw89_dev *rtwdev,
void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
enum rtw89_band band, enum rtw89_bandwidth bandwidth);
bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct rtw89_chan *new);
int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev,
int (*iterator)(const struct rtw89_chan *chan,
void *data),
void *data);
void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct cfg80211_chan_def *chandef);
void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx,
+ enum rtw89_chanctx_idx idx,
const struct cfg80211_chan_def *chandef);
void rtw89_entity_init(struct rtw89_dev *rtwdev);
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index 24929ef534e0..df51b29142aa 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -129,6 +129,13 @@ static const u32 cxtbl[] = {
static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
/* firmware version must be in decreasing order for each chip */
+ {RTL8852BT, RTW89_FW_VER_CODE(0, 29, 90, 0),
+ .fcxbtcrpt = 7, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
+ .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
+ .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
+ .fwlrole = 7, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7,
+ .fwevntrptl = 1, .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6,
+ },
{RTL8922A, RTW89_FW_VER_CODE(0, 35, 8, 0),
.fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
.fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
@@ -1351,6 +1358,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfinfo = &pfwinfo->rpt_ctrl.finfo.v8;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v8);
break;
+ } else if (ver->fcxbtcrpt == 7) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v7);
+ break;
} else {
goto err;
}
@@ -1655,6 +1666,38 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfwinfo->event[BTF_EVNT_RPT]);
dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
+ } else if (ver->fcxbtcrpt == 7) {
+ prpt->v7 = pfwinfo->rpt_ctrl.finfo.v7;
+ pfwinfo->rpt_en_map = le32_to_cpu(prpt->v7.rpt_info.en);
+ wl->ver_info.fw_coex = le32_to_cpu(prpt->v7.rpt_info.cx_ver);
+ wl->ver_info.fw = le32_to_cpu(prpt->v7.rpt_info.fw_ver);
+
+ for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ memcpy(&dm->gnt.band[i], &prpt->v7.gnt_val[i][0],
+ sizeof(dm->gnt.band[i]));
+
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_HI_TX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_HI_RX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_LO_TX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_LO_RX_V105]);
+
+ val1 = le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_POLLUTED_V105]);
+ if (val1 > btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW])
+ val1 -= btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW]; /* diff */
+
+ btc->cx.cnt_bt[BTC_BCNT_POLUT_DIFF] = val1;
+ btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_POLLUTED_V105]);
+
+ val1 = pfwinfo->event[BTF_EVNT_RPT];
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_HANG, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_HANG, val1);
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_FW_VER_MATCH, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_BTTX_HANG, 0);
} else if (ver->fcxbtcrpt == 8) {
prpt->v8 = pfwinfo->rpt_ctrl.finfo.v8;
pfwinfo->rpt_en_map = le32_to_cpu(prpt->v8.rpt_info.en);
@@ -2397,7 +2440,7 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
if (val == fwinfo->rpt_en_map)
return;
- if (btc->ver->fcxbtcrpt == 8) {
+ if (btc->ver->fcxbtcrpt == 7 || btc->ver->fcxbtcrpt == 8) {
r.v8.type = SET_REPORT_EN;
r.v8.fver = btc->ver->fcxbtcrpt;
r.v8.len = sizeof(r.v8.map);
@@ -2567,6 +2610,10 @@ static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
rtw89_fw_h2c_cxdrv_role_v1(rtwdev, type);
else if (ver->fwlrole == 2)
rtw89_fw_h2c_cxdrv_role_v2(rtwdev, type);
+ else if (ver->fwlrole == 7)
+ rtw89_fw_h2c_cxdrv_role_v7(rtwdev, type);
+ else if (ver->fwlrole == 8)
+ rtw89_fw_h2c_cxdrv_role_v8(rtwdev, type);
break;
case CXDRVINFO_CTRL:
if (ver->drvinfo_type == 1)
@@ -2748,7 +2795,7 @@ static void _set_gnt_v1(struct rtw89_dev *rtwdev, u8 phy_map,
rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt);
}
-#define BTC_TDMA_WLROLE_MAX 2
+#define BTC_TDMA_WLROLE_MAX 3
static void _set_bt_ignore_wlan_act(struct rtw89_dev *rtwdev, u8 enable)
{
@@ -2998,10 +3045,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_wl_active_role *r;
struct rtw89_btc_wl_active_role_v1 *r1;
struct rtw89_btc_wl_active_role_v2 *r2;
+ struct rtw89_btc_wl_active_role_v7 *r7;
struct rtw89_btc_wl_rlink *rlink;
u8 en = 0, i, ch = 0, bw = 0;
u8 mode, connect_cnt;
@@ -3018,6 +3067,9 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
} else if (ver->fwlrole == 2) {
mode = wl_rinfo_v2->link_mode;
connect_cnt = wl_rinfo_v2->connect_cnt;
+ } else if (ver->fwlrole == 7) {
+ mode = wl_rinfo_v7->link_mode;
+ connect_cnt = wl_rinfo_v7->connect_cnt;
} else if (ver->fwlrole == 8) {
mode = wl_rinfo_v8->link_mode;
connect_cnt = wl_rinfo_v8->connect_cnt;
@@ -3036,6 +3088,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
r2 = &wl_rinfo_v2->active_role_v2[i];
+ r7 = &wl_rinfo_v7->active_role[i];
rlink = &wl_rinfo_v8->rlink[i][0];
if (ver->fwlrole == 0 &&
@@ -3056,6 +3109,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
ch = r2->ch;
bw = r2->bw;
break;
+ } else if (ver->fwlrole == 7 &&
+ (r7->role == RTW89_WIFI_ROLE_P2P_GO ||
+ r7->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ ch = r7->ch;
+ bw = r7->bw;
+ break;
} else if (ver->fwlrole == 8 &&
(rlink->role == RTW89_WIFI_ROLE_P2P_GO ||
rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
@@ -3071,6 +3130,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
r2 = &wl_rinfo_v2->active_role_v2[i];
+ r7 = &wl_rinfo_v7->active_role[i];
rlink = &wl_rinfo_v8->rlink[i][0];
if (ver->fwlrole == 0 &&
@@ -3088,6 +3148,11 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
ch = r2->ch;
bw = r2->bw;
break;
+ } else if (ver->fwlrole == 7 &&
+ r7->connected && r7->band == RTW89_BAND_2G) {
+ ch = r7->ch;
+ bw = r7->bw;
+ break;
} else if (ver->fwlrole == 8 &&
rlink->connected && rlink->rf_band == RTW89_BAND_2G) {
ch = rlink->ch;
@@ -3146,6 +3211,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc;
@@ -3164,6 +3230,8 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
connect_cnt = wl_rinfo_v1->connect_cnt;
else if (ver->fwlrole == 2)
connect_cnt = wl_rinfo_v2->connect_cnt;
+ else if (ver->fwlrole == 7)
+ connect_cnt = wl_rinfo_v7->connect_cnt;
else if (ver->fwlrole == 8)
connect_cnt = wl_rinfo_v8->connect_cnt;
@@ -4082,6 +4150,8 @@ static void _set_ant_v0(struct rtw89_dev *rtwdev, bool force_exec,
dbcc_chg = wl->role_info_v1.dbcc_chg;
else if (btc->ver->fwlrole == 2)
dbcc_chg = wl->role_info_v2.dbcc_chg;
+ else if (btc->ver->fwlrole == 7)
+ dbcc_chg = wl->role_info_v7.dbcc_chg;
else if (btc->ver->fwlrole == 8)
dbcc_chg = wl->role_info_v8.dbcc_chg;
@@ -4754,6 +4824,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_wl_role_info *wl_rinfo_v0 = &wl->role_info;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
@@ -4775,6 +4846,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
wl_rinfo.link_mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
wl_rinfo.link_mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ wl_rinfo.link_mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
wl_rinfo.link_mode = wl_rinfo_v8->link_mode;
else
@@ -4790,6 +4863,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
wl_rinfo.dbcc_2g_phy = wl_rinfo_v1->dbcc_2g_phy;
} else if (ver->fwlrole == 2) {
wl_rinfo.dbcc_2g_phy = wl_rinfo_v2->dbcc_2g_phy;
+ } else if (ver->fwlrole == 7) {
+ wl_rinfo.dbcc_2g_phy = wl_rinfo_v7->dbcc_2g_phy;
} else if (ver->fwlrole == 8) {
wl_rinfo.dbcc_2g_phy = wl_rinfo_v8->dbcc_2g_phy;
} else {
@@ -4835,37 +4910,56 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
- struct rtw89_btc_wl_role_info_v2 *wl_rinfo = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v2 *rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7;
+ struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &wl->role_info_v8;
const struct rtw89_chip_info *chip = rtwdev->chip;
- const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_dm *dm = &btc->dm;
- u8 is_preagc, val;
+ u8 is_preagc, val, link_mode, dbcc_2g_phy;
+ u8 role_ver = rtwdev->btc.ver->fwlrole;
+ bool dbcc_en;
if (btc->manual_ctrl)
return;
- if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC)
+ if (role_ver == 2) {
+ dbcc_en = rinfo_v2->dbcc_en;
+ link_mode = rinfo_v2->link_mode;
+ dbcc_2g_phy = rinfo_v2->dbcc_2g_phy;
+ } else if (role_ver == 7) {
+ dbcc_en = rinfo_v7->dbcc_en;
+ link_mode = rinfo_v7->link_mode;
+ dbcc_2g_phy = rinfo_v7->dbcc_2g_phy;
+ } else if (role_ver == 8) {
+ dbcc_en = rinfo_v8->dbcc_en;
+ link_mode = rinfo_v8->link_mode;
+ dbcc_2g_phy = rinfo_v7->dbcc_2g_phy;
+ } else {
+ return;
+ }
+
+ if (link_mode == BTC_WLINK_25G_MCC) {
is_preagc = BTC_PREAGC_BB_FWCTRL;
- else if (!(bt->run_patch_code && bt->enable.now))
+ } else if (!(bt->run_patch_code && bt->enable.now)) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (wl_rinfo->link_mode == BTC_WLINK_5G)
+ } else if (link_mode == BTC_WLINK_5G) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (wl_rinfo->link_mode == BTC_WLINK_NOLINK ||
- btc->cx.bt.link_info.profile_cnt.now == 0)
+ } else if (link_mode == BTC_WLINK_NOLINK ||
+ btc->cx.bt.link_info.profile_cnt.now == 0) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (dm->tdma_now.type != CXTDMA_OFF &&
+ } else if (dm->tdma_now.type != CXTDMA_OFF &&
!bt_linfo->hfp_desc.exist &&
!bt_linfo->hid_desc.exist &&
- dm->fddt_train == BTC_FDDT_DISABLE)
+ dm->fddt_train == BTC_FDDT_DISABLE) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (ver->fwlrole == 2 && wl_rinfo->dbcc_en &&
- wl_rinfo->dbcc_2g_phy != RTW89_PHY_1)
+ } else if (dbcc_en && (dbcc_2g_phy != RTW89_PHY_1)) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (btc->ant_type == BTC_ANT_SHARED)
+ } else if (btc->ant_type == BTC_ANT_SHARED) {
is_preagc = BTC_PREAGC_DISABLE;
- else
+ } else {
is_preagc = BTC_PREAGC_ENABLE;
+ }
if (dm->wl_pre_agc_rb != dm->wl_pre_agc &&
dm->wl_pre_agc_rb != BTC_PREAGC_NOTFOUND) {
@@ -4968,6 +5062,7 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_txtime_data data = {.rtwdev = rtwdev};
u8 mode, igno_bt, tx_retry;
@@ -4984,6 +5079,8 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -5043,6 +5140,7 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
bool bt_hi_lna_rx = false;
@@ -5054,6 +5152,8 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -5359,15 +5459,26 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_wl_role_info_v2 *wl_rinfo = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v2 *rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7;
+ u32 dur, mrole_type, mrole_noa_duration;
u16 policy_type = BTC_CXP_OFF_BT;
- u32 dur;
+
+ if (btc->ver->fwlrole == 2) {
+ mrole_type = rinfo_v2->mrole_type;
+ mrole_noa_duration = rinfo_v2->mrole_noa_duration;
+ } else if (btc->ver->fwlrole == 7) {
+ mrole_type = rinfo_v7->mrole_type;
+ mrole_noa_duration = rinfo_v7->mrole_noa_duration;
+ } else {
+ return;
+ }
if (btc->ant_type == BTC_ANT_DEDICATED) {
policy_type = BTC_CXP_OFF_EQ0;
} else {
/* shared-antenna */
- switch (wl_rinfo->mrole_type) {
+ switch (mrole_type) {
case BTC_WLMROLE_STA_GC:
dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_P2P_CLIENT;
@@ -5385,7 +5496,7 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev)
case BTC_WLMROLE_STA_GO_NOA:
dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_NONE;
- dur = wl_rinfo->mrole_noa_duration;
+ dur = mrole_noa_duration;
if (wl->status.map._4way) {
dm->wl_scc.ebt_null = 0;
@@ -5567,6 +5678,14 @@ _update_rssi_state(struct rtw89_dev *rtwdev, u8 pre_state, u8 rssi, u8 thresh)
return next_state;
}
+static void _wl_req_mac(struct rtw89_dev *rtwdev, u8 mac)
+{
+ if (mac == RTW89_MAC_0)
+ rtw89_write32_clr(rtwdev, R_AX_BTC_CFG, B_AX_WL_SRC);
+ else
+ rtw89_write32_set(rtwdev, R_AX_BTC_CFG, B_AX_WL_SRC);
+}
+
static
void _update_dbcc_band(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
@@ -6065,12 +6184,20 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
u8 *phy, u8 *role, u8 *dbcc_2g_phy)
{
struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl;
- struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7;
+ struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &wl->role_info_v8;
bool is_2g_ch_exist = false, is_multi_role_in_2g_phy = false;
- u8 j, k, dbcc_2g_cid, dbcc_2g_cid2;
+ u8 j, k, dbcc_2g_cid, dbcc_2g_cid2, connect_cnt;
+
+ if (rtwdev->btc.ver->fwlrole == 7)
+ connect_cnt = rinfo_v7->connect_cnt;
+ else if (rtwdev->btc.ver->fwlrole == 8)
+ connect_cnt = rinfo_v8->connect_cnt;
+ else
+ return BTC_WLINK_NOLINK;
/* find out the 2G-PHY by connect-id ->ch */
- for (j = 0; j < wl_rinfo->connect_cnt; j++) {
+ for (j = 0; j < connect_cnt; j++) {
if (ch[j].center_ch <= 14) {
is_2g_ch_exist = true;
break;
@@ -6085,11 +6212,11 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
*dbcc_2g_phy = phy[dbcc_2g_cid];
/* connect_cnt <= 2 */
- if (wl_rinfo->connect_cnt < BTC_TDMA_WLROLE_MAX)
+ if (connect_cnt < BTC_TDMA_WLROLE_MAX)
return (_get_role_link_mode((role[dbcc_2g_cid])));
/* find the other-port in the 2G-PHY, ex: PHY-0:6G, PHY1: mcc/scc */
- for (k = 0; k < wl_rinfo->connect_cnt; k++) {
+ for (k = 0; k < connect_cnt; k++) {
if (k == dbcc_2g_cid)
continue;
@@ -6116,29 +6243,54 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
static void _update_role_link_mode(struct rtw89_dev *rtwdev,
bool client_joined, u32 noa)
{
- struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &rtwdev->btc.cx.wl.role_info_v8;
+ struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &rtwdev->btc.cx.wl.role_info_v8;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &rtwdev->btc.cx.wl.role_info_v7;
+ u8 role_ver = rtwdev->btc.ver->fwlrole;
u32 type = BTC_WLMROLE_NONE, dur = 0;
- u32 wl_role = wl_rinfo->role_map;
+ u8 link_mode, connect_cnt;
+ u32 wl_role;
+
+ if (role_ver == 7) {
+ wl_role = rinfo_v7->role_map;
+ link_mode = rinfo_v7->link_mode;
+ connect_cnt = rinfo_v7->connect_cnt;
+ } else if (role_ver == 8) {
+ wl_role = rinfo_v8->role_map;
+ link_mode = rinfo_v8->link_mode;
+ connect_cnt = rinfo_v8->connect_cnt;
+ } else {
+ return;
+ }
/* if no client_joined, don't care P2P-GO/AP role */
if (((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
(wl_role & BIT(RTW89_WIFI_ROLE_AP))) && !client_joined) {
- if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC) {
- wl_rinfo->link_mode = BTC_WLINK_2G_STA;
- wl_rinfo->connect_cnt--;
- } else if (wl_rinfo->link_mode == BTC_WLINK_2G_GO ||
- wl_rinfo->link_mode == BTC_WLINK_2G_AP) {
- wl_rinfo->link_mode = BTC_WLINK_NOLINK;
- wl_rinfo->connect_cnt--;
+ if (link_mode == BTC_WLINK_2G_SCC) {
+ if (role_ver == 7) {
+ rinfo_v7->link_mode = BTC_WLINK_2G_STA;
+ rinfo_v7->connect_cnt--;
+ } else if (role_ver == 8) {
+ rinfo_v8->link_mode = BTC_WLINK_2G_STA;
+ rinfo_v8->connect_cnt--;
+ }
+ } else if (link_mode == BTC_WLINK_2G_GO ||
+ link_mode == BTC_WLINK_2G_AP) {
+ if (role_ver == 7) {
+ rinfo_v7->link_mode = BTC_WLINK_NOLINK;
+ rinfo_v7->connect_cnt--;
+ } else if (role_ver == 8) {
+ rinfo_v8->link_mode = BTC_WLINK_NOLINK;
+ rinfo_v8->connect_cnt--;
+ }
}
}
/* Identify 2-Role type */
- if (wl_rinfo->connect_cnt >= 2 &&
- (wl_rinfo->link_mode == BTC_WLINK_2G_SCC ||
- wl_rinfo->link_mode == BTC_WLINK_2G_MCC ||
- wl_rinfo->link_mode == BTC_WLINK_25G_MCC ||
- wl_rinfo->link_mode == BTC_WLINK_5G)) {
+ if (connect_cnt >= 2 &&
+ (link_mode == BTC_WLINK_2G_SCC ||
+ link_mode == BTC_WLINK_2G_MCC ||
+ link_mode == BTC_WLINK_25G_MCC ||
+ link_mode == BTC_WLINK_5G)) {
if ((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
(wl_role & BIT(RTW89_WIFI_ROLE_AP)))
type = noa ? BTC_WLMROLE_STA_GO_NOA : BTC_WLMROLE_STA_GO;
@@ -6150,8 +6302,167 @@ static void _update_role_link_mode(struct rtw89_dev *rtwdev,
dur = noa;
}
- wl_rinfo->mrole_type = type;
- wl_rinfo->mrole_noa_duration = dur;
+ if (role_ver == 7) {
+ rinfo_v7->mrole_type = type;
+ rinfo_v7->mrole_noa_duration = dur;
+ } else if (role_ver == 8) {
+ rinfo_v8->mrole_type = type;
+ rinfo_v8->mrole_noa_duration = dur;
+ }
+}
+
+static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
+{
+ struct rtw89_btc_chdef cid_ch[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo = &wl->role_info_v7;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info;
+ struct rtw89_btc_wl_active_role_v7 *act_role = NULL;
+ u8 i, mode, cnt = 0, cnt_2g = 0, cnt_5g = 0, phy_now = RTW89_PHY_MAX, phy_dbcc;
+ bool b2g = false, b5g = false, client_joined = false, client_inc_2g = false;
+ u8 client_cnt_last[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 cid_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 cid_phy[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 mac = RTW89_MAC_0, dbcc_2g_phy = RTW89_PHY_0;
+ u32 noa_duration = 0;
+
+ memset(wl_rinfo, 0, sizeof(*wl_rinfo));
+
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
+ if (!wl_linfo[i].active || wl_linfo[i].phy >= RTW89_PHY_MAX)
+ continue;
+
+ act_role = &wl_rinfo->active_role[i];
+ act_role->role = wl_linfo[i].role;
+
+ /* check if role connect? */
+ if (wl_linfo[i].connected == MLME_NO_LINK) {
+ act_role->connected = 0;
+ continue;
+ } else if (wl_linfo[i].connected == MLME_LINKING) {
+ continue;
+ }
+
+ cnt++;
+ act_role->connected = 1;
+ act_role->pid = wl_linfo[i].pid;
+ act_role->phy = wl_linfo[i].phy;
+ act_role->band = wl_linfo[i].band;
+ act_role->ch = wl_linfo[i].ch;
+ act_role->bw = wl_linfo[i].bw;
+ act_role->noa = wl_linfo[i].noa;
+ act_role->noa_dur = wl_linfo[i].noa_duration;
+ cid_ch[cnt - 1] = wl_linfo[i].chdef;
+ cid_phy[cnt - 1] = wl_linfo[i].phy;
+ cid_role[cnt - 1] = wl_linfo[i].role;
+ wl_rinfo->role_map |= BIT(wl_linfo[i].role);
+
+ if (rid == i)
+ phy_now = act_role->phy;
+
+ if (wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_AP) {
+ if (wl_linfo[i].client_cnt > 1)
+ client_joined = true;
+ if (client_cnt_last[i] < wl_linfo[i].client_cnt &&
+ wl_linfo[i].chdef.band == RTW89_BAND_2G)
+ client_inc_2g = true;
+ act_role->client_cnt = wl_linfo[i].client_cnt;
+ } else {
+ act_role->client_cnt = 0;
+ }
+
+ if (act_role->noa && act_role->noa_dur > 0)
+ noa_duration = act_role->noa_dur;
+
+ if (rtwdev->dbcc_en) {
+ phy_dbcc = wl_linfo[i].phy;
+ wl_dinfo->role[phy_dbcc] |= BIT(wl_linfo[i].role);
+ wl_dinfo->op_band[phy_dbcc] = wl_linfo[i].chdef.band;
+ }
+
+ if (wl_linfo[i].chdef.band != RTW89_BAND_2G) {
+ cnt_5g++;
+ b5g = true;
+ } else {
+ if (((wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_AP) &&
+ client_joined) ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ wl_rinfo->p2p_2g = 1;
+
+ if ((wl_linfo[i].mode & BIT(BTC_WL_MODE_11B)) ||
+ (wl_linfo[i].mode & BIT(BTC_WL_MODE_11G)))
+ wl->bg_mode = 1;
+ else if (wl_linfo[i].mode & BIT(BTC_WL_MODE_HE))
+ wl->he_mode = true;
+
+ cnt_2g++;
+ b2g = true;
+ }
+
+ if (act_role->band == RTW89_BAND_5G && act_role->ch >= 100)
+ wl->is_5g_hi_channel = 1;
+ else
+ wl->is_5g_hi_channel = 0;
+ }
+
+ wl_rinfo->connect_cnt = cnt;
+ wl->client_cnt_inc_2g = client_inc_2g;
+
+ if (cnt == 0) {
+ mode = BTC_WLINK_NOLINK;
+ wl_rinfo->role_map = BIT(RTW89_WIFI_ROLE_NONE);
+ } else if (!b2g && b5g) {
+ mode = BTC_WLINK_5G;
+ } else if (wl_rinfo->role_map & BIT(RTW89_WIFI_ROLE_NAN)) {
+ mode = BTC_WLINK_2G_NAN;
+ } else if (cnt > BTC_TDMA_WLROLE_MAX) {
+ mode = BTC_WLINK_OTHER;
+ } else if (rtwdev->dbcc_en) {
+ mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role, &dbcc_2g_phy);
+
+ /* correct 2G-located PHY band for gnt ctrl */
+ if (dbcc_2g_phy < RTW89_PHY_MAX)
+ wl_dinfo->op_band[dbcc_2g_phy] = RTW89_BAND_2G;
+ } else if (b2g && b5g && cnt == 2) {
+ mode = BTC_WLINK_25G_MCC;
+ } else if (!b5g && cnt == 2) { /* cnt_connect = 2 */
+ if (_chk_role_ch_group(&cid_ch[0], &cid_ch[cnt - 1]))
+ mode = BTC_WLINK_2G_SCC;
+ else
+ mode = BTC_WLINK_2G_MCC;
+ } else if (!b5g && cnt == 1) { /* cnt_connect = 1 */
+ mode = _get_role_link_mode(cid_role[0]);
+ } else {
+ mode = BTC_WLINK_NOLINK;
+ }
+
+ wl_rinfo->link_mode = mode;
+ _update_role_link_mode(rtwdev, client_joined, noa_duration);
+
+ /* todo DBCC related event */
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] wl_info phy_now=%d\n", phy_now);
+
+ if (wl_rinfo->dbcc_en != rtwdev->dbcc_en) {
+ wl_rinfo->dbcc_chg = 1;
+ wl_rinfo->dbcc_en = rtwdev->dbcc_en;
+ btc->cx.cnt_wl[BTC_WCNT_DBCC_CHG]++;
+ }
+
+ if (rtwdev->dbcc_en) {
+ wl_rinfo->dbcc_2g_phy = dbcc_2g_phy;
+
+ if (dbcc_2g_phy == RTW89_PHY_1)
+ mac = RTW89_MAC_1;
+
+ _update_dbcc_band(rtwdev, RTW89_PHY_0);
+ _update_dbcc_band(rtwdev, RTW89_PHY_1);
+ }
+ _wl_req_mac(rtwdev, mac);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
}
static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id,
@@ -6496,6 +6807,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode, igno_bt, always_freerun;
@@ -6511,6 +6823,8 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -6657,7 +6971,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
_action_wl_2g_scc(rtwdev);
else if (ver->fwlrole == 1)
_action_wl_2g_scc_v1(rtwdev);
- else if (ver->fwlrole == 2)
+ else if (ver->fwlrole == 2 || ver->fwlrole == 7)
_action_wl_2g_scc_v2(rtwdev);
else if (ver->fwlrole == 8)
_action_wl_2g_scc_v8(rtwdev);
@@ -7169,7 +7483,7 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
struct rtw89_sta *rtwsta, enum btc_role_state state)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
struct rtw89_btc *btc = &rtwdev->btc;
@@ -7250,6 +7564,9 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
} else if (ver->fwlrole == 2) {
*wlinfo = r;
_update_wl_info_v2(rtwdev);
+ } else if (ver->fwlrole == 7) {
+ *wlinfo = r;
+ _update_wl_info_v7(rtwdev, r.pid);
} else if (ver->fwlrole == 8) {
wlinfo = &wl->rlink_info[r.pid][rlink_id];
*wlinfo = r;
@@ -7856,6 +8173,7 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode;
@@ -7870,6 +8188,8 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -10288,6 +10608,108 @@ static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt[BTC_NCNT_CUSTOMERIZE]);
}
+static void _show_summary_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
+ struct rtw89_btc_fbtc_rpt_ctrl_v7 *prptctrl = NULL;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_cx *cx = &rtwdev->btc.cx;
+ struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ u32 *cnt = rtwdev->btc.dm.cnt_notify;
+ u32 cnt_sum = 0;
+ u8 i;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
+ return;
+
+ seq_printf(m, "%s", "\n\r========== [Statistics] ==========");
+
+ pcinfo = &pfwinfo->rpt_ctrl.cinfo;
+ if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF &&
+ !wl->status.map.rf_off) {
+ prptctrl = &pfwinfo->rpt_ctrl.finfo.v7;
+
+ seq_printf(m,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d),"
+ "c2h_cnt=%d(fw_send:%d, len:%d, max:%d), ",
+ "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h),
+ rtwdev->btc.ver->info_buf);
+
+ seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
+
+ if (dm->error.map.wl_fw_hang)
+ seq_puts(m, " (WL FW Hang!!)");
+
+ seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ seq_printf(m,
+ "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
+ wl->rfk_info.proc_time);
+
+ seq_printf(m, ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ } else {
+ seq_printf(m,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
+ "[summary]",
+ pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ pfwinfo->cnt_c2h,
+ wl->status.map.lps, wl->status.map.rf_off);
+ }
+
+ for (i = 0; i < BTC_NCNT_NUM; i++)
+ cnt_sum += dm->cnt_notify[i];
+
+ seq_printf(m,
+ "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ seq_printf(m,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
+
+ seq_printf(m,
+ "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
+
+ seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
+ rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
+ cnt[BTC_NCNT_COUNTRYCODE]);
+}
+
static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
@@ -10440,6 +10862,8 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_summary_v5(rtwdev, m);
else if (ver->fcxbtcrpt == 105)
_show_summary_v105(rtwdev, m);
+ else if (ver->fcxbtcrpt == 7)
+ _show_summary_v7(rtwdev, m);
else if (ver->fcxbtcrpt == 8)
_show_summary_v8(rtwdev, m);
}
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index 0e5f268616f7..de53b56632f7 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -193,6 +193,8 @@ enum btc_wa_type {
BTC_WA_5G_HI_CH_RX = BIT(0),
BTC_WA_NULL_AP = BIT(1),
BTC_WA_HFP_ZB = BIT(2), /* HFP PTA req bit4 define issue */
+ BTC_WA_HFP_LAG = BIT(3), /* 52BT WL break BT Rx lag issue */
+ BTC_WA_INIT_SCAN = BIT(4) /* 52A/C/D init scan move to wl slot WA */
};
enum btc_3cx_type {
@@ -289,9 +291,10 @@ void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev);
static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
- enum rtw89_rf_path_bit paths)
+ enum rtw89_rf_path_bit paths,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 phy_map;
phy_map = FIELD_PREP(BTC_RFK_PATH_MAP, paths) |
@@ -303,9 +306,10 @@ static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev,
static inline u8 rtw89_btc_path_phymap(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- return rtw89_btc_phymap(rtwdev, phy_idx, BIT(path));
+ return rtw89_btc_phymap(rtwdev, phy_idx, BIT(path), chanctx_idx);
}
/* return bt req len in TU */
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 7019f7d482a8..4553810634c6 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -346,8 +346,8 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
struct rtw89_hal *hal = &rtwdev->hal;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_chan *chan;
- enum rtw89_sub_entity_idx sub_entity_idx;
- enum rtw89_sub_entity_idx roc_idx;
+ enum rtw89_chanctx_idx chanctx_idx;
+ enum rtw89_chanctx_idx roc_idx;
enum rtw89_phy_idx phy_idx;
enum rtw89_entity_mode mode;
bool entity_active;
@@ -360,22 +360,22 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
switch (mode) {
case RTW89_ENTITY_MODE_SCC:
case RTW89_ENTITY_MODE_MCC:
- sub_entity_idx = RTW89_SUB_ENTITY_0;
+ chanctx_idx = RTW89_CHANCTX_0;
break;
case RTW89_ENTITY_MODE_MCC_PREPARE:
- sub_entity_idx = RTW89_SUB_ENTITY_1;
+ chanctx_idx = RTW89_CHANCTX_1;
break;
default:
WARN(1, "Invalid ent mode: %d\n", mode);
return;
}
- roc_idx = atomic_read(&hal->roc_entity_idx);
- if (roc_idx != RTW89_SUB_ENTITY_IDLE)
- sub_entity_idx = roc_idx;
+ roc_idx = atomic_read(&hal->roc_chanctx_idx);
+ if (roc_idx != RTW89_CHANCTX_IDLE)
+ chanctx_idx = roc_idx;
phy_idx = RTW89_PHY_0;
- chan = rtw89_chan_get(rtwdev, sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, chanctx_idx);
chip->ops->set_txpwr(rtwdev, chan, phy_idx);
}
@@ -385,8 +385,8 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev)
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_chan_rcd *chan_rcd;
const struct rtw89_chan *chan;
- enum rtw89_sub_entity_idx sub_entity_idx;
- enum rtw89_sub_entity_idx roc_idx;
+ enum rtw89_chanctx_idx chanctx_idx;
+ enum rtw89_chanctx_idx roc_idx;
enum rtw89_mac_idx mac_idx;
enum rtw89_phy_idx phy_idx;
struct rtw89_channel_help_params bak;
@@ -399,25 +399,25 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev)
switch (mode) {
case RTW89_ENTITY_MODE_SCC:
case RTW89_ENTITY_MODE_MCC:
- sub_entity_idx = RTW89_SUB_ENTITY_0;
+ chanctx_idx = RTW89_CHANCTX_0;
break;
case RTW89_ENTITY_MODE_MCC_PREPARE:
- sub_entity_idx = RTW89_SUB_ENTITY_1;
+ chanctx_idx = RTW89_CHANCTX_1;
break;
default:
WARN(1, "Invalid ent mode: %d\n", mode);
return -EINVAL;
}
- roc_idx = atomic_read(&hal->roc_entity_idx);
- if (roc_idx != RTW89_SUB_ENTITY_IDLE)
- sub_entity_idx = roc_idx;
+ roc_idx = atomic_read(&hal->roc_chanctx_idx);
+ if (roc_idx != RTW89_CHANCTX_IDLE)
+ chanctx_idx = roc_idx;
mac_idx = RTW89_MAC_0;
phy_idx = RTW89_PHY_0;
- chan = rtw89_chan_get(rtwdev, sub_entity_idx);
- chan_rcd = rtw89_chan_rcd_get(rtwdev, sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ chan_rcd = rtw89_chan_rcd_get(rtwdev, chanctx_idx);
rtw89_chip_set_channel_prepare(rtwdev, &bak, chan, mac_idx, phy_idx);
@@ -429,7 +429,7 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev)
if (!entity_active || chan_rcd->band_changed) {
rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan->band_type);
- rtw89_chip_rfk_band_changed(rtwdev, phy_idx);
+ rtw89_chip_rfk_band_changed(rtwdev, phy_idx, chan);
}
rtw89_set_entity_state(rtwdev, true);
@@ -441,7 +441,7 @@ void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
{
const struct cfg80211_chan_def *chandef;
- chandef = rtw89_chandef_get(rtwdev, rtwvif->sub_entity_idx);
+ chandef = rtw89_chandef_get(rtwdev, rtwvif->chanctx_idx);
rtw89_get_channel_params(chandef, chan);
}
@@ -602,15 +602,28 @@ static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
return rtwsta->mac_id;
}
+static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+
+ desc_info->hdr_llc_len = ieee80211_hdrlen(fc);
+ desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */
+}
+
static void
rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_vif *vif = tx_req->vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
+ struct sk_buff *skb = tx_req->skb;
u8 qsel, ch_dma;
qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT;
@@ -629,6 +642,11 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
desc_info->dis_data_fb = true;
desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req, chan);
+ if (chip->hw_mgmt_tx_encrypt && IEEE80211_SKB_CB(skb)->control.hw_key) {
+ rtw89_core_tx_update_sec_key(rtwdev, tx_req);
+ rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb);
+ }
+
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
"tx mgmt frame with rate 0x%x on channel %d (band %d, bw %d)\n",
desc_info->data_rate, chan->channel, chan->band_type,
@@ -769,7 +787,7 @@ static u16 rtw89_core_get_data_rate(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta = tx_req->sta;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
- enum rtw89_sub_entity_idx idx = rtwvif->sub_entity_idx;
+ enum rtw89_chanctx_idx idx = rtwvif->chanctx_idx;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx);
u16 lowest_rate;
@@ -862,17 +880,6 @@ rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
return PACKET_MAX;
}
-static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev,
- struct rtw89_tx_desc_info *desc_info,
- struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (void *)skb->data;
- __le16 fc = hdr->frame_control;
-
- desc_info->hdr_llc_len = ieee80211_hdrlen(fc);
- desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */
-}
-
static void
rtw89_core_tx_wake(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
@@ -1449,16 +1456,20 @@ static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev,
return -EINVAL;
}
- /* For WiFi 7 chips, RXWD.mac_id of PPDU status is not set by hardware,
- * so update mac_id by rxinfo_user[].mac_id.
- */
- for (i = 0; i < usr_num && chip_gen == RTW89_CHIP_BE; i++) {
+ for (i = 0; i < usr_num; i++) {
user = &rxinfo->user[i];
if (!le32_get_bits(user->w0, RTW89_RXINFO_USER_MAC_ID_VALID))
continue;
-
- phy_ppdu->mac_id =
- le32_get_bits(user->w0, RTW89_RXINFO_USER_MACID);
+ /* For WiFi 7 chips, RXWD.mac_id of PPDU status is not set
+ * by hardware, so update mac_id by rxinfo_user[].mac_id.
+ */
+ if (chip_gen == RTW89_CHIP_BE)
+ phy_ppdu->mac_id =
+ le32_get_bits(user->w0, RTW89_RXINFO_USER_MACID);
+ phy_ppdu->has_data =
+ le32_get_bits(user->w0, RTW89_RXINFO_USER_DATA);
+ phy_ppdu->has_bcn =
+ le32_get_bits(user->w0, RTW89_RXINFO_USER_BCN);
break;
}
@@ -1480,6 +1491,26 @@ static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev,
return 0;
}
+static u8 rtw89_get_data_rate_nss(struct rtw89_dev *rtwdev, u16 data_rate)
+{
+ u8 data_rate_mode;
+
+ data_rate_mode = rtw89_get_data_rate_mode(rtwdev, data_rate);
+ switch (data_rate_mode) {
+ case DATA_RATE_MODE_NON_HT:
+ return 1;
+ case DATA_RATE_MODE_HT:
+ return rtw89_get_data_ht_nss(rtwdev, data_rate) + 1;
+ case DATA_RATE_MODE_VHT:
+ case DATA_RATE_MODE_HE:
+ case DATA_RATE_MODE_EHT:
+ return rtw89_get_data_nss(rtwdev, data_rate) + 1;
+ default:
+ rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode);
+ return 0;
+ }
+}
+
static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
struct ieee80211_sta *sta)
{
@@ -1509,10 +1540,14 @@ static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
ewma_rssi_add(&rtwsta->rssi[i], phy_ppdu->rssi[i]);
}
- if (phy_ppdu->ofdm.has) {
+ if (phy_ppdu->ofdm.has && (phy_ppdu->has_data || phy_ppdu->has_bcn)) {
ewma_snr_add(&rtwsta->avg_snr, phy_ppdu->ofdm.avg_snr);
- ewma_evm_add(&rtwsta->evm_min[evm_pos], phy_ppdu->ofdm.evm_min);
- ewma_evm_add(&rtwsta->evm_max[evm_pos], phy_ppdu->ofdm.evm_max);
+ if (rtw89_get_data_rate_nss(rtwdev, phy_ppdu->rate) == 1) {
+ ewma_evm_add(&rtwsta->evm_1ss, phy_ppdu->ofdm.evm_min);
+ } else {
+ ewma_evm_add(&rtwsta->evm_min[evm_pos], phy_ppdu->ofdm.evm_min);
+ ewma_evm_add(&rtwsta->evm_max[evm_pos], phy_ppdu->ofdm.evm_max);
+ }
}
}
@@ -1548,11 +1583,27 @@ static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev,
return ie_len;
}
+static void rtw89_core_parse_phy_status_ie01_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_sts_iehdr *iehdr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_ie01_v2 *ie;
+ u8 *rpl_fd = phy_ppdu->rpl_fd;
+
+ ie = (const struct rtw89_phy_sts_ie01_v2 *)iehdr;
+ rpl_fd[RF_PATH_A] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_A);
+ rpl_fd[RF_PATH_B] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_B);
+ rpl_fd[RF_PATH_C] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_C);
+ rpl_fd[RF_PATH_D] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_D);
+
+ phy_ppdu->bw_idx = le32_get_bits(ie->w5, RTW89_PHY_STS_IE01_V2_W5_BW_IDX);
+}
+
static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
const struct rtw89_phy_sts_iehdr *iehdr,
struct rtw89_rx_phy_ppdu *phy_ppdu)
{
- const struct rtw89_phy_sts_ie0 *ie = (const struct rtw89_phy_sts_ie0 *)iehdr;
+ const struct rtw89_phy_sts_ie01 *ie = (const struct rtw89_phy_sts_ie01 *)iehdr;
s16 cfo;
u32 t;
@@ -1563,12 +1614,17 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
phy_ppdu->stbc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_STBC);
}
+ if (!phy_ppdu->hdr_2_en)
+ phy_ppdu->rx_path_en =
+ le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RX_PATH_EN);
+
if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6)
return;
if (!phy_ppdu->to_self)
return;
+ phy_ppdu->rpl_avg = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RSSI_AVG_FD);
phy_ppdu->ofdm.avg_snr = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_AVG_SNR);
phy_ppdu->ofdm.evm_max = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MAX);
phy_ppdu->ofdm.evm_min = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MIN);
@@ -1584,6 +1640,39 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
}
rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ rtw89_core_parse_phy_status_ie01_v2(rtwdev, iehdr, phy_ppdu);
+}
+
+static void rtw89_core_parse_phy_status_ie00(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_sts_iehdr *iehdr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_ie00 *ie = (const struct rtw89_phy_sts_ie00 *)iehdr;
+ u16 tmp_rpl;
+
+ tmp_rpl = le32_get_bits(ie->w0, RTW89_PHY_STS_IE00_W0_RPL);
+ phy_ppdu->rpl_avg = tmp_rpl >> 1;
+}
+
+static void rtw89_core_parse_phy_status_ie00_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_sts_iehdr *iehdr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_ie00_v2 *ie;
+ u8 *rpl_path = phy_ppdu->rpl_path;
+ u16 tmp_rpl[RF_PATH_MAX];
+ u8 i;
+
+ ie = (const struct rtw89_phy_sts_ie00_v2 *)iehdr;
+ tmp_rpl[RF_PATH_A] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_A);
+ tmp_rpl[RF_PATH_B] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_B);
+ tmp_rpl[RF_PATH_C] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_C);
+ tmp_rpl[RF_PATH_D] = le32_get_bits(ie->w5, RTW89_PHY_STS_IE00_V2_W5_RPL_TD_D);
+
+ for (i = 0; i < RF_PATH_MAX; i++)
+ rpl_path[i] = tmp_rpl[i] >> 1;
}
static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
@@ -1595,6 +1684,11 @@ static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
ie = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_TYPE);
switch (ie) {
+ case RTW89_PHYSTS_IE00_CMN_CCK:
+ rtw89_core_parse_phy_status_ie00(rtwdev, iehdr, phy_ppdu);
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ rtw89_core_parse_phy_status_ie00_v2(rtwdev, iehdr, phy_ppdu);
+ break;
case RTW89_PHYSTS_IE01_CMN_OFDM:
rtw89_core_parse_phy_status_ie01(rtwdev, iehdr, phy_ppdu);
break;
@@ -1605,6 +1699,13 @@ static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
return 0;
}
+static void rtw89_core_update_phy_ppdu_hdr_v2(struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_hdr_v2 *hdr = phy_ppdu->buf + PHY_STS_HDR_LEN;
+
+ phy_ppdu->rx_path_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_V2_W0_PATH_EN);
+}
+
static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
{
const struct rtw89_phy_sts_hdr *hdr = phy_ppdu->buf;
@@ -1616,6 +1717,10 @@ static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
rssi[RF_PATH_B] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_B);
rssi[RF_PATH_C] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_C);
rssi[RF_PATH_D] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_D);
+
+ phy_ppdu->hdr_2_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_HDR_2_EN);
+ if (phy_ppdu->hdr_2_en)
+ rtw89_core_update_phy_ppdu_hdr_v2(phy_ppdu);
}
static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev,
@@ -1668,6 +1773,7 @@ static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev,
}
}
+ rtw89_chip_convert_rpl_to_rssi(rtwdev, phy_ppdu);
rtw89_phy_antdiv_parse(rtwdev, phy_ppdu);
return 0;
@@ -1959,7 +2065,7 @@ static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *status)
{
const struct rtw89_chan_rcd *rcd =
- rtw89_chan_rcd_get(rtwdev, RTW89_SUB_ENTITY_0);
+ rtw89_chan_rcd_get(rtwdev, RTW89_CHANCTX_0);
u16 chan = rcd->prev_primary_channel;
u8 band = rtw89_hw_to_nl80211_band(rcd->prev_band_type);
@@ -2363,7 +2469,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *rx_status)
{
const struct cfg80211_chan_def *chandef =
- rtw89_chandef_get(rtwdev, RTW89_SUB_ENTITY_0);
+ rtw89_chandef_get(rtwdev, RTW89_CHANCTX_0);
u16 data_rate;
u8 data_rate_mode;
bool eht = false;
@@ -2856,7 +2962,7 @@ static void rtw89_core_sta_pending_tx_iter(void *data,
struct sk_buff *skb, *tmp;
int qsel, ret;
- if (rtwvif->sub_entity_idx != rtwvif_target->sub_entity_idx)
+ if (rtwvif->chanctx_idx != rtwvif_target->chanctx_idx)
return;
if (skb_queue_len(&rtwsta->roc_queue) == 0)
@@ -2950,11 +3056,11 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
"roc send null-1 failed: %d\n", ret);
rtw89_for_each_rtwvif(rtwdev, tmp)
- if (tmp->sub_entity_idx == rtwvif->sub_entity_idx)
+ if (tmp->chanctx_idx == rtwvif->chanctx_idx)
tmp->offchan = true;
cfg80211_chandef_create(&roc_chan, &roc->chan, NL80211_CHAN_NO_HT);
- rtw89_config_roc_chandef(rtwdev, rtwvif->sub_entity_idx, &roc_chan);
+ rtw89_config_roc_chandef(rtwdev, rtwvif->chanctx_idx, &roc_chan);
rtw89_set_channel(rtwdev);
rtw89_write32_clr(rtwdev,
rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
@@ -2987,7 +3093,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtwdev->hal.rx_fltr);
roc->state = RTW89_ROC_IDLE;
- rtw89_config_roc_chandef(rtwdev, rtwvif->sub_entity_idx, NULL);
+ rtw89_config_roc_chandef(rtwdev, rtwvif->chanctx_idx, NULL);
rtw89_chanctx_proceed(rtwdev);
ret = rtw89_core_send_nullfunc(rtwdev, rtwvif, true, false);
if (ret)
@@ -2995,7 +3101,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
"roc send null-0 failed: %d\n", ret);
rtw89_for_each_rtwvif(rtwdev, tmp)
- if (tmp->sub_entity_idx == rtwvif->sub_entity_idx)
+ if (tmp->chanctx_idx == rtwvif->chanctx_idx)
tmp->offchan = false;
rtw89_core_handle_sta_pending_tx(rtwdev, rtwvif);
@@ -3189,6 +3295,7 @@ static void rtw89_track_work(struct work_struct *work)
rtw89_phy_edcca_track(rtwdev);
rtw89_tas_track(rtwdev);
rtw89_chanctx_track(rtwdev);
+ rtw89_core_rfkill_poll(rtwdev, false);
if (rtwdev->lps_enabled && !rtwdev->btc.lps)
rtw89_enter_lps_track(rtwdev);
@@ -3367,6 +3474,7 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
ewma_rssi_init(&rtwsta->avg_rssi);
ewma_snr_init(&rtwsta->avg_snr);
+ ewma_evm_init(&rtwsta->evm_1ss);
for (i = 0; i < ant_num; i++) {
ewma_rssi_init(&rtwsta->rssi[i]);
ewma_evm_init(&rtwsta->evm_min[i]);
@@ -3384,7 +3492,7 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_START);
- rtw89_chip_rfk_channel(rtwdev);
+ rtw89_chip_rfk_channel(rtwdev, rtwvif);
} else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
rtwsta->mac_id = rtw89_acquire_mac_id(rtwdev);
if (rtwsta->mac_id == RTW89_MAX_MAC_ID_NUM)
@@ -3491,7 +3599,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif, rtwsta);
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
int ret;
if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
@@ -4226,9 +4334,14 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 mac_id_num = chip->support_macid_num;
+ u8 mac_id_num;
u8 mac_id;
+ if (rtwdev->support_mlo)
+ mac_id_num = chip->support_macid_num / chip->support_link_num;
+ else
+ mac_id_num = chip->support_macid_num;
+
mac_id = find_first_zero_bit(rtwdev->mac_id_map, mac_id_num);
if (mac_id == mac_id_num)
return RTW89_MAX_MAC_ID_NUM;
@@ -4278,6 +4391,8 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_init_wait(&rtwdev->mcc.wait);
rtw89_init_wait(&rtwdev->mac.fw_ofld_wait);
+ rtw89_init_wait(&rtwdev->wow.wait);
+ rtw89_init_wait(&rtwdev->mac.ps_wait);
INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
INIT_WORK(&rtwdev->ips_work, rtw89_ips_work);
@@ -4333,7 +4448,7 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
const u8 *mac_addr, bool hw_scan)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
rtwdev->scanning = true;
rtw89_leave_lps(rtwdev);
@@ -4342,7 +4457,7 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
ether_addr_copy(rtwvif->mac_addr, mac_addr);
rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, chan->band_type);
- rtw89_chip_rfk_scan(rtwdev, true);
+ rtw89_chip_rfk_scan(rtwdev, rtwvif, true);
rtw89_hci_recalc_int_mit(rtwdev);
rtw89_phy_config_edcca(rtwdev, true);
@@ -4360,7 +4475,7 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
ether_addr_copy(rtwvif->mac_addr, vif->addr);
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
- rtw89_chip_rfk_scan(rtwdev, false);
+ rtw89_chip_rfk_scan(rtwdev, rtwvif, false);
rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
rtw89_phy_config_edcca(rtwdev, false);
@@ -4470,6 +4585,70 @@ static int rtw89_chip_board_info_setup(struct rtw89_dev *rtwdev)
return 0;
}
+static bool rtw89_chip_has_rfkill(struct rtw89_dev *rtwdev)
+{
+ return !!rtwdev->chip->rfkill_init;
+}
+
+static void rtw89_core_rfkill_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_rfkill_regs *regs = rtwdev->chip->rfkill_init;
+
+ rtw89_write16_mask(rtwdev, regs->pinmux.addr,
+ regs->pinmux.mask, regs->pinmux.data);
+ rtw89_write16_mask(rtwdev, regs->mode.addr,
+ regs->mode.mask, regs->mode.data);
+}
+
+static bool rtw89_core_rfkill_get(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_reg_def *reg = &rtwdev->chip->rfkill_get;
+
+ return !rtw89_read8_mask(rtwdev, reg->addr, reg->mask);
+}
+
+static void rtw89_rfkill_polling_init(struct rtw89_dev *rtwdev)
+{
+ if (!rtw89_chip_has_rfkill(rtwdev))
+ return;
+
+ rtw89_core_rfkill_init(rtwdev);
+ rtw89_core_rfkill_poll(rtwdev, true);
+ wiphy_rfkill_start_polling(rtwdev->hw->wiphy);
+}
+
+static void rtw89_rfkill_polling_deinit(struct rtw89_dev *rtwdev)
+{
+ if (!rtw89_chip_has_rfkill(rtwdev))
+ return;
+
+ wiphy_rfkill_stop_polling(rtwdev->hw->wiphy);
+}
+
+void rtw89_core_rfkill_poll(struct rtw89_dev *rtwdev, bool force)
+{
+ bool prev, blocked;
+
+ if (!rtw89_chip_has_rfkill(rtwdev))
+ return;
+
+ prev = test_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags);
+ blocked = rtw89_core_rfkill_get(rtwdev);
+
+ if (!force && prev == blocked)
+ return;
+
+ rtw89_info(rtwdev, "rfkill hardware state changed to %s\n",
+ blocked ? "disable" : "enable");
+
+ if (blocked)
+ set_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags);
+ else
+ clear_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags);
+
+ wiphy_rfkill_set_hw_state(rtwdev->hw->wiphy, blocked);
+}
+
int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
{
int ret;
@@ -4580,6 +4759,9 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
if (chip->chip_gen == RTW89_CHIP_BE)
hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+ if (rtwdev->support_mlo)
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
@@ -4587,6 +4769,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
#ifdef CONFIG_PM
hw->wiphy->wowlan = rtwdev->chip->wowlan_stub;
+ hw->wiphy->max_sched_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
#endif
hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
@@ -4625,6 +4808,8 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
goto err_unregister_hw;
}
+ rtw89_rfkill_polling_init(rtwdev);
+
return 0;
err_unregister_hw:
@@ -4639,6 +4824,7 @@ static void rtw89_core_unregister_hw(struct rtw89_dev *rtwdev)
{
struct ieee80211_hw *hw = rtwdev->hw;
+ rtw89_rfkill_polling_deinit(rtwdev);
ieee80211_unregister_hw(hw);
rtw89_core_clr_supported_band(rtwdev);
}
@@ -4662,6 +4848,8 @@ EXPORT_SYMBOL(rtw89_core_register);
void rtw89_core_unregister(struct rtw89_dev *rtwdev)
{
rtw89_core_unregister_hw(rtwdev);
+
+ rtw89_debugfs_deinit(rtwdev);
}
EXPORT_SYMBOL(rtw89_core_unregister);
@@ -4676,6 +4864,7 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
struct ieee80211_ops *ops;
u32 driver_data_size;
int fw_format = -1;
+ bool support_mlo;
bool no_chanctx;
firmware = rtw89_early_fw_feature_recognize(device, chip, &early_fw, &fw_format);
@@ -4704,6 +4893,14 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
if (!hw)
goto err;
+ /* TODO: When driver MLO arch. is done, determine whether to support MLO
+ * according to the following conditions.
+ * 1. run with chanctx_ops
+ * 2. chip->support_link_num != 0
+ * 3. FW feature supports AP_LINK_PS
+ */
+ support_mlo = false;
+
hw->wiphy->iface_combinations = rtw89_iface_combs;
if (no_chanctx || chip->support_chanctx_num == 1)
@@ -4718,9 +4915,12 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
rtwdev->chip = chip;
rtwdev->fw.req.firmware = firmware;
rtwdev->fw.fw_format = fw_format;
+ rtwdev->support_mlo = support_mlo;
- rtw89_debug(rtwdev, RTW89_DBG_FW, "probe driver %s chanctx\n",
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s chanctx\n",
no_chanctx ? "without" : "with");
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s MLO cap\n",
+ support_mlo ? "with" : "without");
return rtwdev;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 11fa003a9788..4ed9034fdb46 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -21,6 +21,7 @@ struct rtw89_efuse_block_cfg;
struct rtw89_h2c_rf_tssi;
struct rtw89_fw_txpwr_track_cfg;
struct rtw89_phy_rfk_log_fmt;
+struct rtw89_debugfs;
extern const struct ieee80211_ops rtw89_ops;
@@ -796,16 +797,24 @@ struct rtw89_rx_phy_ppdu {
u8 chan_idx;
u8 ie;
u16 rate;
+ u8 rpl_avg;
+ u8 rpl_path[RF_PATH_MAX];
+ u8 rpl_fd[RF_PATH_MAX];
+ u8 bw_idx;
+ u8 rx_path_en;
struct {
bool has;
u8 avg_snr;
u8 evm_max;
u8 evm_min;
} ofdm;
+ bool has_data;
+ bool has_bcn;
bool ldpc;
bool stbc;
bool to_self;
bool valid;
+ bool hdr_2_en;
};
enum rtw89_mac_idx {
@@ -820,12 +829,12 @@ enum rtw89_phy_idx {
RTW89_PHY_MAX
};
-enum rtw89_sub_entity_idx {
- RTW89_SUB_ENTITY_0 = 0,
- RTW89_SUB_ENTITY_1 = 1,
+enum rtw89_chanctx_idx {
+ RTW89_CHANCTX_0 = 0,
+ RTW89_CHANCTX_1 = 1,
- NUM_OF_RTW89_SUB_ENTITY,
- RTW89_SUB_ENTITY_IDLE = NUM_OF_RTW89_SUB_ENTITY,
+ NUM_OF_RTW89_CHANCTX,
+ RTW89_CHANCTX_IDLE = NUM_OF_RTW89_CHANCTX,
};
enum rtw89_rf_path {
@@ -925,10 +934,12 @@ enum rtw89_sc_offset {
RTW89_SC_40_LOWER = 10,
};
+/* only mgd features can be added to the enum */
enum rtw89_wow_flags {
RTW89_WOW_FLAG_EN_MAGIC_PKT,
RTW89_WOW_FLAG_EN_REKEY_PKT,
RTW89_WOW_FLAG_EN_DISCONNECT,
+ RTW89_WOW_FLAG_EN_PATTERN,
RTW89_WOW_FLAG_NUM,
};
@@ -1585,6 +1596,23 @@ struct rtw89_btc_wl_active_role_v2 {
u32 noa_duration; /* ms */
};
+struct rtw89_btc_wl_active_role_v7 {
+ u8 connected;
+ u8 pid;
+ u8 phy;
+ u8 noa;
+
+ u8 band;
+ u8 client_ps;
+ u8 bw;
+ u8 role;
+
+ u8 ch;
+ u8 noa_dur;
+ u8 client_cnt;
+ u8 rsvd2;
+} __packed;
+
struct rtw89_btc_wl_role_info_bpos {
u16 none: 1;
u16 station: 1;
@@ -1666,6 +1694,22 @@ struct rtw89_btc_wl_rlink { /* H2C info, struct size must be n*4 bytes */
} __packed;
#define RTW89_BE_BTC_WL_MAX_ROLE_NUMBER 6
+struct rtw89_btc_wl_role_info_v7 { /* struct size must be n*4 bytes */
+ u8 connect_cnt;
+ u8 link_mode;
+ u8 link_mode_chg;
+ u8 p2p_2g;
+
+ struct rtw89_btc_wl_active_role_v7 active_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+
+ u32 role_map;
+ u32 mrole_type; /* btc_wl_mrole_type */
+ u32 mrole_noa_duration; /* ms */
+ u32 dbcc_en;
+ u32 dbcc_chg;
+ u32 dbcc_2g_phy; /* which phy operate in 2G, HW_PHY_0 or HW_PHY_1 */
+} __packed;
+
struct rtw89_btc_wl_role_info_v8 { /* H2C info, struct size must be n*4 bytes */
u8 connect_cnt;
u8 link_mode;
@@ -1829,6 +1873,7 @@ struct rtw89_btc_wl_info {
struct rtw89_btc_wl_role_info role_info;
struct rtw89_btc_wl_role_info_v1 role_info_v1;
struct rtw89_btc_wl_role_info_v2 role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 role_info_v7;
struct rtw89_btc_wl_role_info_v8 role_info_v8;
struct rtw89_btc_wl_scan_info scan_info;
struct rtw89_btc_wl_dbcc_info dbcc_info;
@@ -1846,8 +1891,10 @@ struct rtw89_btc_wl_info {
bool is_5g_hi_channel;
bool pta_reg_mac_chg;
bool bg_mode;
+ bool he_mode;
bool scbd_change;
bool fw_ver_mismatch;
+ bool client_cnt_inc_2g;
u32 scbd;
};
@@ -2198,6 +2245,19 @@ struct rtw89_btc_fbtc_rpt_ctrl_v105 {
struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_v7 {
+ u8 fver;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+
+ u8 gnt_val[RTW89_PHY_MAX][4];
+ __le16 bt_cnt[BTC_BCNT_STA_MAX_V105];
+
+ struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
+} __packed;
+
struct rtw89_btc_fbtc_rpt_ctrl_v8 {
u8 fver;
u8 rsvd0;
@@ -2216,6 +2276,7 @@ union rtw89_btc_fbtc_rpt_ctrl_ver_info {
struct rtw89_btc_fbtc_rpt_ctrl_v4 v4;
struct rtw89_btc_fbtc_rpt_ctrl_v5 v5;
struct rtw89_btc_fbtc_rpt_ctrl_v105 v105;
+ struct rtw89_btc_fbtc_rpt_ctrl_v7 v7;
struct rtw89_btc_fbtc_rpt_ctrl_v8 v8;
};
@@ -3306,6 +3367,7 @@ struct rtw89_sta {
struct ewma_rssi avg_rssi;
struct ewma_rssi rssi[RF_PATH_MAX];
struct ewma_snr avg_snr;
+ struct ewma_evm evm_1ss;
struct ewma_evm evm_min[RF_PATH_MAX];
struct ewma_evm evm_max[RF_PATH_MAX];
struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
@@ -3403,7 +3465,7 @@ struct rtw89_vif {
struct rtw89_dev *rtwdev;
struct rtw89_roc roc;
bool chanctx_assigned; /* only valid when running with chanctx_ops */
- enum rtw89_sub_entity_idx sub_entity_idx;
+ enum rtw89_chanctx_idx chanctx_idx;
enum rtw89_reg_6ghz_power reg_6ghz_power;
struct rtw89_reg_6ghz_tpe reg_6ghz_tpe;
@@ -3537,10 +3599,12 @@ struct rtw89_chip_ops {
void (*rfk_hw_init)(struct rtw89_dev *rtwdev);
void (*rfk_init)(struct rtw89_dev *rtwdev);
void (*rfk_init_late)(struct rtw89_dev *rtwdev);
- void (*rfk_channel)(struct rtw89_dev *rtwdev);
+ void (*rfk_channel)(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void (*rfk_band_changed)(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx);
- void (*rfk_scan)(struct rtw89_dev *rtwdev, bool start);
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+ void (*rfk_scan)(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start);
void (*rfk_track)(struct rtw89_dev *rtwdev);
void (*power_trim)(struct rtw89_dev *rtwdev);
void (*set_txpwr)(struct rtw89_dev *rtwdev,
@@ -3555,11 +3619,15 @@ struct rtw89_chip_ops {
void (*query_ppdu)(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status);
+ void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu);
void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx);
void (*cfg_txrx_path)(struct rtw89_dev *rtwdev);
void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev,
s8 pw_ofst, enum rtw89_mac_idx mac_idx);
+ void (*digital_pwr_comp)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
int (*pwr_on_func)(struct rtw89_dev *rtwdev);
int (*pwr_off_func)(struct rtw89_dev *rtwdev);
void (*query_rxdesc)(struct rtw89_dev *rtwdev,
@@ -3671,6 +3739,7 @@ struct rtw89_scan_option {
u16 slow_pd;
u16 norm_cy;
u8 opch_end;
+ u16 delay;
u64 prohib_chan;
enum rtw89_phy_idx band;
enum rtw89_scan_be_operation operation;
@@ -3909,16 +3978,22 @@ struct rtw89_txpwr_conf {
const void *data;
};
+static inline bool rtw89_txpwr_entcpy(void *entry, const void *cursor, u8 size,
+ const struct rtw89_txpwr_conf *conf)
+{
+ u8 valid_size = min(size, conf->ent_sz);
+
+ memcpy(entry, cursor, valid_size);
+ return true;
+}
+
#define rtw89_txpwr_conf_valid(conf) (!!(conf)->data)
#define rtw89_for_each_in_txpwr_conf(entry, cursor, conf) \
- for (typecheck(const void *, cursor), (cursor) = (conf)->data, \
- memcpy(&(entry), cursor, \
- min_t(u8, sizeof(entry), (conf)->ent_sz)); \
+ for (typecheck(const void *, cursor), (cursor) = (conf)->data; \
(cursor) < (conf)->data + (conf)->num_ents * (conf)->ent_sz; \
- (cursor) += (conf)->ent_sz, \
- memcpy(&(entry), cursor, \
- min_t(u8, sizeof(entry), (conf)->ent_sz)))
+ (cursor) += (conf)->ent_sz) \
+ if (rtw89_txpwr_entcpy(&(entry), cursor, sizeof(entry), conf))
struct rtw89_txpwr_byrate_data {
struct rtw89_txpwr_conf conf;
@@ -4066,6 +4141,11 @@ struct rtw89_rrsr_cfgs {
struct rtw89_reg3_def rsc;
};
+struct rtw89_rfkill_regs {
+ struct rtw89_reg3_def pinmux;
+ struct rtw89_reg3_def mode;
+};
+
struct rtw89_dig_regs {
u32 seg0_pd_reg;
u32 pd_lower_bound_mask;
@@ -4166,6 +4246,7 @@ struct rtw89_chip_info {
u8 wde_qempty_mgq_grpsel;
u32 rf_base_addr[2];
u8 support_macid_num;
+ u8 support_link_num;
u8 support_chanctx_num;
u8 support_bands;
u16 support_bandwidths;
@@ -4174,6 +4255,7 @@ struct rtw89_chip_info {
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
bool hw_sec_hdr;
+ bool hw_mgmt_tx_encrypt;
u8 rf_path_num;
u8 tx_nss;
u8 rx_nss;
@@ -4257,6 +4339,8 @@ struct rtw89_chip_info {
const struct rtw89_rrsr_cfgs *rrsr_cfgs;
struct rtw89_reg_def bss_clr_vld;
u32 bss_clr_map_reg;
+ const struct rtw89_rfkill_regs *rfkill_init;
+ struct rtw89_reg_def rfkill_get;
u32 dma_ch_mask;
const struct rtw89_edcca_regs *edcca_regs;
const struct wiphy_wowlan_support *wowlan_stub;
@@ -4327,6 +4411,8 @@ struct rtw89_mac_info {
/* see RTW89_FW_OFLD_WAIT_COND series for wait condition */
struct rtw89_wait_info fw_ofld_wait;
+ /* see RTW89_PS_WAIT_COND series for wait condition */
+ struct rtw89_wait_info ps_wait;
};
enum rtw89_fwdl_check_type {
@@ -4356,7 +4442,9 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_NO_LPS_PG,
RTW89_FW_FEATURE_BEACON_FILTER,
RTW89_FW_FEATURE_MACID_PAUSE_SLEEP,
+ RTW89_FW_FEATURE_SCAN_OFFLOAD_BE_V0,
RTW89_FW_FEATURE_WOW_REASON_V1,
+ RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V0,
};
struct rtw89_fw_suit {
@@ -4519,7 +4607,7 @@ struct rtw89_tas_info {
};
struct rtw89_chanctx_cfg {
- enum rtw89_sub_entity_idx idx;
+ enum rtw89_chanctx_idx idx;
int ref_count;
};
@@ -4544,7 +4632,7 @@ enum rtw89_entity_mode {
RTW89_ENTITY_MODE_UNHANDLED = -ESRCH,
};
-struct rtw89_sub_entity {
+struct rtw89_chanctx {
struct cfg80211_chan_def chandef;
struct rtw89_chan chan;
struct rtw89_chan_rcd rcd;
@@ -4577,11 +4665,11 @@ struct rtw89_hal {
bool ant_diversity_fixed;
bool support_cckpd;
bool support_igi;
- atomic_t roc_entity_idx;
+ atomic_t roc_chanctx_idx;
DECLARE_BITMAP(changes, NUM_OF_RTW89_CHANCTX_CHANGES);
- DECLARE_BITMAP(entity_map, NUM_OF_RTW89_SUB_ENTITY);
- struct rtw89_sub_entity sub[NUM_OF_RTW89_SUB_ENTITY];
+ DECLARE_BITMAP(entity_map, NUM_OF_RTW89_CHANCTX);
+ struct rtw89_chanctx chanctx[NUM_OF_RTW89_CHANCTX];
struct cfg80211_chan_def roc_chandef;
bool entity_active;
@@ -4615,6 +4703,7 @@ enum rtw89_flags {
RTW89_FLAG_WOWLAN,
RTW89_FLAG_FORBIDDEN_TRACK_WROK,
RTW89_FLAG_CHANGING_INTERFACE,
+ RTW89_FLAG_HW_RFKILL_STATE,
NUM_OF_RTW89_FLAGS,
};
@@ -5293,6 +5382,13 @@ struct rtw89_wow_param {
u8 gtk_alg;
u8 ptk_keyidx;
u8 akm;
+
+ /* see RTW89_WOW_WAIT_COND series for wait condition */
+ struct rtw89_wait_info wait;
+
+ bool pno_inited;
+ struct list_head pno_pkt_list;
+ struct cfg80211_sched_scan_request *nd_config;
};
struct rtw89_mcc_limit {
@@ -5399,6 +5495,7 @@ struct rtw89_dev {
const struct ieee80211_ops *ops;
bool dbcc_en;
+ bool support_mlo;
enum rtw89_mlo_dbcc_mode mlo_dbcc_mode;
struct rtw89_hw_scan_info scan_info;
const struct rtw89_chip_info *chip;
@@ -5505,6 +5602,8 @@ struct rtw89_dev {
struct napi_struct napi;
int napi_budget_countdown;
+ struct rtw89_debugfs *debugfs;
+
/* HCI related data, keep last */
u8 priv[] __aligned(sizeof(void *));
};
@@ -6028,33 +6127,33 @@ void rtw89_chip_set_channel_done(struct rtw89_dev *rtwdev,
static inline
const struct cfg80211_chan_def *rtw89_chandef_get(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx)
+ enum rtw89_chanctx_idx idx)
{
struct rtw89_hal *hal = &rtwdev->hal;
- enum rtw89_sub_entity_idx roc_idx = atomic_read(&hal->roc_entity_idx);
+ enum rtw89_chanctx_idx roc_idx = atomic_read(&hal->roc_chanctx_idx);
if (roc_idx == idx)
return &hal->roc_chandef;
- return &hal->sub[idx].chandef;
+ return &hal->chanctx[idx].chandef;
}
static inline
const struct rtw89_chan *rtw89_chan_get(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx)
+ enum rtw89_chanctx_idx idx)
{
struct rtw89_hal *hal = &rtwdev->hal;
- return &hal->sub[idx].chan;
+ return &hal->chanctx[idx].chan;
}
static inline
const struct rtw89_chan_rcd *rtw89_chan_rcd_get(struct rtw89_dev *rtwdev,
- enum rtw89_sub_entity_idx idx)
+ enum rtw89_chanctx_idx idx)
{
struct rtw89_hal *hal = &rtwdev->hal;
- return &hal->sub[idx].rcd;
+ return &hal->chanctx[idx].rcd;
}
static inline
@@ -6064,9 +6163,9 @@ const struct rtw89_chan *rtw89_scan_chan_get(struct rtw89_dev *rtwdev)
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
if (rtwvif)
- return rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
+ return rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
else
- return rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ return rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
}
static inline void rtw89_chip_fem_setup(struct rtw89_dev *rtwdev)
@@ -6140,29 +6239,32 @@ static inline void rtw89_chip_rfk_init_late(struct rtw89_dev *rtwdev)
chip->ops->rfk_init_late(rtwdev);
}
-static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev)
+static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->rfk_channel)
- chip->ops->rfk_channel(rtwdev);
+ chip->ops->rfk_channel(rtwdev, rtwvif);
}
static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->rfk_band_changed)
- chip->ops->rfk_band_changed(rtwdev, phy_idx);
+ chip->ops->rfk_band_changed(rtwdev, phy_idx, chan);
}
-static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool start)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->rfk_scan)
- chip->ops->rfk_scan(rtwdev, start);
+ chip->ops->rfk_scan(rtwdev, rtwvif, start);
}
static inline void rtw89_chip_rfk_track(struct rtw89_dev *rtwdev)
@@ -6219,6 +6321,15 @@ static inline void rtw89_chip_query_ppdu(struct rtw89_dev *rtwdev,
chip->ops->query_ppdu(rtwdev, phy_ppdu, status);
}
+static inline void rtw89_chip_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->convert_rpl_to_rssi)
+ chip->ops->convert_rpl_to_rssi(rtwdev, phy_ppdu);
+}
+
static inline void rtw89_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
@@ -6250,6 +6361,15 @@ void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
chip->ops->set_txpwr_ul_tb_offset(rtwdev, 0, rtwvif->mac_idx);
}
+static inline void rtw89_chip_digital_pwr_comp(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->digital_pwr_comp)
+ chip->ops->digital_pwr_comp(rtwdev, phy_idx);
+}
+
static inline void rtw89_load_txpwr_table(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl)
{
@@ -6503,6 +6623,7 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta,
struct cfg80211_tid_config *tid_config);
+void rtw89_core_rfkill_poll(struct rtw89_dev *rtwdev, bool force);
void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks);
int rtw89_core_init(struct rtw89_dev *rtwdev);
void rtw89_core_deinit(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 9e1353cce9cc..29f85210f919 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -52,6 +52,27 @@ struct rtw89_debugfs_priv {
};
};
+struct rtw89_debugfs {
+ struct rtw89_debugfs_priv read_reg;
+ struct rtw89_debugfs_priv write_reg;
+ struct rtw89_debugfs_priv read_rf;
+ struct rtw89_debugfs_priv write_rf;
+ struct rtw89_debugfs_priv rf_reg_dump;
+ struct rtw89_debugfs_priv txpwr_table;
+ struct rtw89_debugfs_priv mac_reg_dump;
+ struct rtw89_debugfs_priv mac_mem_dump;
+ struct rtw89_debugfs_priv mac_dbg_port_dump;
+ struct rtw89_debugfs_priv send_h2c;
+ struct rtw89_debugfs_priv early_h2c;
+ struct rtw89_debugfs_priv fw_crash;
+ struct rtw89_debugfs_priv btc_info;
+ struct rtw89_debugfs_priv btc_manual;
+ struct rtw89_debugfs_priv fw_log_manual;
+ struct rtw89_debugfs_priv phy_info;
+ struct rtw89_debugfs_priv stations;
+ struct rtw89_debugfs_priv disable_dm;
+};
+
static const u16 rtw89_rate_info_bw_to_mhz_map[] = {
[RATE_INFO_BW_20] = 20,
[RATE_INFO_BW_40] = 40,
@@ -851,7 +872,7 @@ static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v)
mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
- chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
rtw89_debug_priv_txpwr_table_get_regd(m, rtwdev, chan);
@@ -3463,9 +3484,9 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
return count;
}
-static ssize_t rtw89_debug_fw_log_manual_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static ssize_t rtw89_debug_priv_fw_log_manual_set(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
{
struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
@@ -3505,7 +3526,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
struct rtw89_hal *hal = &rtwdev->hal;
u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
bool ant_asterisk = hal->tx_path_diversity || hal->ant_diversity;
- u8 evm_min, evm_max;
+ u8 evm_min, evm_max, evm_1ss;
u8 rssi;
u8 snr;
int i;
@@ -3574,7 +3595,8 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
}
seq_puts(m, "]\n");
- seq_puts(m, "EVM: [");
+ evm_1ss = ewma_evm_read(&rtwsta->evm_1ss);
+ seq_printf(m, "EVM: [%2u.%02u, ", evm_1ss >> 2, (evm_1ss & 0x3) * 25);
for (i = 0; i < (hal->ant_diversity ? 2 : 1); i++) {
evm_min = ewma_evm_read(&rtwsta->evm_min[i]);
evm_max = ewma_evm_read(&rtwsta->evm_max[i]);
@@ -3853,92 +3875,55 @@ rtw89_debug_priv_disable_dm_set(struct file *filp, const char __user *user_buf,
return count;
}
-static struct rtw89_debugfs_priv rtw89_debug_priv_read_reg = {
- .cb_read = rtw89_debug_priv_read_reg_get,
- .cb_write = rtw89_debug_priv_read_reg_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_write_reg = {
- .cb_write = rtw89_debug_priv_write_reg_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_read_rf = {
- .cb_read = rtw89_debug_priv_read_rf_get,
- .cb_write = rtw89_debug_priv_read_rf_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_write_rf = {
- .cb_write = rtw89_debug_priv_write_rf_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_rf_reg_dump = {
- .cb_read = rtw89_debug_priv_rf_reg_dump_get,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_txpwr_table = {
- .cb_read = rtw89_debug_priv_txpwr_table_get,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_mac_reg_dump = {
- .cb_read = rtw89_debug_priv_mac_reg_dump_get,
- .cb_write = rtw89_debug_priv_mac_reg_dump_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_mac_mem_dump = {
- .cb_read = rtw89_debug_priv_mac_mem_dump_get,
- .cb_write = rtw89_debug_priv_mac_mem_dump_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_mac_dbg_port_dump = {
- .cb_read = rtw89_debug_priv_mac_dbg_port_dump_get,
- .cb_write = rtw89_debug_priv_mac_dbg_port_dump_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_send_h2c = {
- .cb_write = rtw89_debug_priv_send_h2c_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_early_h2c = {
- .cb_read = rtw89_debug_priv_early_h2c_get,
- .cb_write = rtw89_debug_priv_early_h2c_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_fw_crash = {
- .cb_read = rtw89_debug_priv_fw_crash_get,
- .cb_write = rtw89_debug_priv_fw_crash_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_btc_info = {
- .cb_read = rtw89_debug_priv_btc_info_get,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_btc_manual = {
- .cb_write = rtw89_debug_priv_btc_manual_set,
-};
+#define rtw89_debug_priv_get(name) \
+{ \
+ .cb_read = rtw89_debug_priv_ ##name## _get, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_fw_log_manual = {
- .cb_write = rtw89_debug_fw_log_manual_set,
-};
+#define rtw89_debug_priv_set(name) \
+{ \
+ .cb_write = rtw89_debug_priv_ ##name## _set, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_phy_info = {
- .cb_read = rtw89_debug_priv_phy_info_get,
-};
+#define rtw89_debug_priv_select_and_get(name) \
+{ \
+ .cb_write = rtw89_debug_priv_ ##name## _select, \
+ .cb_read = rtw89_debug_priv_ ##name## _get, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_stations = {
- .cb_read = rtw89_debug_priv_stations_get,
-};
+#define rtw89_debug_priv_set_and_get(name) \
+{ \
+ .cb_write = rtw89_debug_priv_ ##name## _set, \
+ .cb_read = rtw89_debug_priv_ ##name## _get, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_disable_dm = {
- .cb_read = rtw89_debug_priv_disable_dm_get,
- .cb_write = rtw89_debug_priv_disable_dm_set,
+static const struct rtw89_debugfs rtw89_debugfs_templ = {
+ .read_reg = rtw89_debug_priv_select_and_get(read_reg),
+ .write_reg = rtw89_debug_priv_set(write_reg),
+ .read_rf = rtw89_debug_priv_select_and_get(read_rf),
+ .write_rf = rtw89_debug_priv_set(write_rf),
+ .rf_reg_dump = rtw89_debug_priv_get(rf_reg_dump),
+ .txpwr_table = rtw89_debug_priv_get(txpwr_table),
+ .mac_reg_dump = rtw89_debug_priv_select_and_get(mac_reg_dump),
+ .mac_mem_dump = rtw89_debug_priv_select_and_get(mac_mem_dump),
+ .mac_dbg_port_dump = rtw89_debug_priv_select_and_get(mac_dbg_port_dump),
+ .send_h2c = rtw89_debug_priv_set(send_h2c),
+ .early_h2c = rtw89_debug_priv_set_and_get(early_h2c),
+ .fw_crash = rtw89_debug_priv_set_and_get(fw_crash),
+ .btc_info = rtw89_debug_priv_get(btc_info),
+ .btc_manual = rtw89_debug_priv_set(btc_manual),
+ .fw_log_manual = rtw89_debug_priv_set(fw_log_manual),
+ .phy_info = rtw89_debug_priv_get(phy_info),
+ .stations = rtw89_debug_priv_get(stations),
+ .disable_dm = rtw89_debug_priv_set_and_get(disable_dm),
};
#define rtw89_debugfs_add(name, mode, fopname, parent) \
do { \
- rtw89_debug_priv_ ##name.rtwdev = rtwdev; \
- if (!debugfs_create_file(#name, mode, \
- parent, &rtw89_debug_priv_ ##name, \
- &file_ops_ ##fopname)) \
+ struct rtw89_debugfs_priv *priv = &rtwdev->debugfs->name; \
+ priv->rtwdev = rtwdev; \
+ if (IS_ERR(debugfs_create_file(#name, mode, parent, priv, \
+ &file_ops_ ##fopname))) \
pr_debug("Unable to initialize debugfs:%s\n", #name); \
} while (0)
@@ -3949,13 +3934,9 @@ static struct rtw89_debugfs_priv rtw89_debug_priv_disable_dm = {
#define rtw89_debugfs_add_r(name) \
rtw89_debugfs_add(name, S_IFREG | 0444, single_r, debugfs_topdir)
-void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
+static
+void rtw89_debugfs_add_sec0(struct rtw89_dev *rtwdev, struct dentry *debugfs_topdir)
{
- struct dentry *debugfs_topdir;
-
- debugfs_topdir = debugfs_create_dir("rtw89",
- rtwdev->hw->wiphy->debugfsdir);
-
rtw89_debugfs_add_rw(read_reg);
rtw89_debugfs_add_w(write_reg);
rtw89_debugfs_add_rw(read_rf);
@@ -3965,6 +3946,11 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
rtw89_debugfs_add_rw(mac_reg_dump);
rtw89_debugfs_add_rw(mac_mem_dump);
rtw89_debugfs_add_rw(mac_dbg_port_dump);
+}
+
+static
+void rtw89_debugfs_add_sec1(struct rtw89_dev *rtwdev, struct dentry *debugfs_topdir)
+{
rtw89_debugfs_add_w(send_h2c);
rtw89_debugfs_add_rw(early_h2c);
rtw89_debugfs_add_rw(fw_crash);
@@ -3975,6 +3961,27 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
rtw89_debugfs_add_r(stations);
rtw89_debugfs_add_rw(disable_dm);
}
+
+void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
+{
+ struct dentry *debugfs_topdir;
+
+ rtwdev->debugfs = kmemdup(&rtw89_debugfs_templ,
+ sizeof(rtw89_debugfs_templ), GFP_KERNEL);
+ if (!rtwdev->debugfs)
+ return;
+
+ debugfs_topdir = debugfs_create_dir("rtw89",
+ rtwdev->hw->wiphy->debugfsdir);
+
+ rtw89_debugfs_add_sec0(rtwdev, debugfs_topdir);
+ rtw89_debugfs_add_sec1(rtwdev, debugfs_topdir);
+}
+
+void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev)
+{
+ kfree(rtwdev->debugfs);
+}
#endif
#ifdef CONFIG_RTW89_DEBUGMSG
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index 800ea59873a1..fc690f7c55dc 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -49,8 +49,10 @@ enum rtw89_debug_mac_reg_sel {
#ifdef CONFIG_RTW89_DEBUGFS
void rtw89_debugfs_init(struct rtw89_dev *rtwdev);
+void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev);
#else
static inline void rtw89_debugfs_init(struct rtw89_dev *rtwdev) {}
+static inline void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev) {}
#endif
#define rtw89_info(rtwdev, a...) dev_info((rtwdev)->dev, ##a)
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index fbe08c162b93..d9b0e7ebe619 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -670,6 +670,10 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
@@ -679,8 +683,10 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -2491,7 +2497,7 @@ fail:
int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_h2c_lps_ch_info *h2c;
u32 len = sizeof(*h2c);
@@ -2810,7 +2816,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
struct sk_buff *skb;
u8 pads[RTW89_PPE_BW_NUM];
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
@@ -2943,9 +2949,9 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
u8 pads[RTW89_PPE_BW_NUM];
@@ -3206,7 +3212,7 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct rtw89_h2c_bcn_upd *h2c;
struct sk_buff *skb_beacon;
@@ -3285,7 +3291,7 @@ EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon);
int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct rtw89_h2c_bcn_upd_be *h2c;
struct sk_buff *skb_beacon;
@@ -4319,6 +4325,52 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7;
+ struct rtw89_h2c_cxrole_v7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data;
+
+ h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fwlrole;
+ h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
+ memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
+ h2c->_u32.role_map = cpu_to_le32(role->role_map);
+ h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
+ h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
+ h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en);
+ h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg);
+ h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -4337,6 +4389,7 @@ int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data;
h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fwlrole;
h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
h2c->_u32.role_map = cpu_to_le32(role->role_map);
@@ -4417,7 +4470,7 @@ int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type)
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
- rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n");
return -ENOMEM;
}
skb_put(skb, len);
@@ -4802,16 +4855,20 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
return 0;
}
-int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
- struct rtw89_scan_option *option,
- struct rtw89_vif *rtwvif)
+#define RTW89_SCAN_DELAY_TSF_UNIT 104800
+int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif *rtwvif,
+ bool wowlan)
{
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
+ enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE;
struct rtw89_h2c_scanofld *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
unsigned int cond;
+ u64 tsf = 0;
int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
@@ -4822,6 +4879,17 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
skb_put(skb, len);
h2c = (struct rtw89_h2c_scanofld *)skb->data;
+ if (option->delay) {
+ ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif, &tsf);
+ if (ret) {
+ rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret);
+ scan_mode = RTW89_SCAN_IMMEDIATE;
+ } else {
+ scan_mode = RTW89_SCAN_DELAY;
+ tsf += option->delay * RTW89_SCAN_DELAY_TSF_UNIT;
+ }
+ }
+
h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) |
le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) |
le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) |
@@ -4830,9 +4898,11 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) |
le32_encode_bits(option->target_ch_mode,
RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) |
- le32_encode_bits(RTW89_SCAN_IMMEDIATE,
- RTW89_H2C_SCANOFLD_W1_START_MODE) |
- le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE);
+ le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) |
+ le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE);
+
+ h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) |
+ le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD);
if (option->target_ch_mode) {
h2c->w1 |= le32_encode_bits(op->band_width,
@@ -4845,6 +4915,11 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND);
}
+ h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf),
+ RTW89_H2C_SCANOFLD_W3_TSF_HIGH);
+ h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf),
+ RTW89_H2C_SCANOFLD_W4_TSF_LOW);
+
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
H2C_FUNC_SCANOFLD, 1, 1,
@@ -4888,7 +4963,8 @@ static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *option,
- struct rtw89_vif *rtwvif)
+ struct rtw89_vif *rtwvif,
+ bool wowlan)
{
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
@@ -4902,6 +4978,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
u8 opch_size = sizeof(*opch) * option->num_opch;
u8 probe_id[NUM_NL80211_BANDS];
+ u8 cfg_len = sizeof(*h2c);
unsigned int cond;
void *ptr;
int ret;
@@ -4910,7 +4987,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
rtw89_scan_get_6g_disabled_chan(rtwdev, option);
- len = sizeof(*h2c) + macc_role_size + opch_size;
+ len = cfg_len + macc_role_size + opch_size;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
@@ -4923,11 +5000,13 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id));
- list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
- if (pkt_info->wildcard_6ghz) {
- /* Provide wildcard as template */
- probe_id[NL80211_BAND_6GHZ] = pkt_info->id;
- break;
+ if (!wowlan) {
+ list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
+ if (pkt_info->wildcard_6ghz) {
+ /* Provide wildcard as template */
+ probe_id[NL80211_BAND_6GHZ] = pkt_info->id;
+ break;
+ }
}
}
@@ -4958,7 +5037,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) |
le32_encode_bits(probe_id[NL80211_BAND_6GHZ],
RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) |
- le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
+ le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE);
@@ -4966,7 +5045,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW);
h2c->w7 = le32_encode_bits(option->prohib_chan >> 32,
RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH);
- if (req->no_cck) {
+ if (!wowlan && req->no_cck) {
h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE);
h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6,
RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) |
@@ -4975,10 +5054,24 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
le32_encode_bits(RTW89_HW_RATE_OFDM6,
RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ);
}
- ptr += sizeof(*h2c);
+
+ if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) {
+ cfg_len = offsetofend(typeof(*h2c), w8);
+ goto flex_member;
+ }
+
+ h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0),
+ RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) |
+ le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0),
+ RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) |
+ le32_encode_bits(sizeof(*opch) / sizeof(opch->w0),
+ RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP);
+
+flex_member:
+ ptr += cfg_len;
for (i = 0; i < option->num_macc_role; i++) {
- macc_role = (struct rtw89_h2c_scanofld_be_macc_role *)&h2c->role[i];
+ macc_role = ptr;
macc_role->w0 =
le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) |
le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) |
@@ -5132,14 +5225,21 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0;
struct rtw89_fw_h2c_rfk_pre_info *h2c;
u8 tbl_sel = rfk_mcc->table_idx;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
+ u8 ver = U8_MAX;
u8 tbl, path;
u32 val32;
int ret;
+ if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
+ len = sizeof(*h2c_v0);
+ ver = 0;
+ }
+
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n");
@@ -5148,41 +5248,53 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
skb_put(skb, len);
h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
- h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+ h2c->common.mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
- h2c->dbcc.ch[path][tbl] = cpu_to_le32(rfk_mcc->ch[tbl]);
- h2c->dbcc.band[path][tbl] = cpu_to_le32(rfk_mcc->band[tbl]);
+ h2c->common.dbcc.ch[path][tbl] =
+ cpu_to_le32(rfk_mcc->ch[tbl]);
+ h2c->common.dbcc.band[path][tbl] =
+ cpu_to_le32(rfk_mcc->band[tbl]);
}
}
for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
- h2c->tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
- h2c->tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]);
+ h2c->common.tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
+ h2c->common.tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]);
}
- h2c->phy_idx = cpu_to_le32(phy_idx);
- h2c->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]);
- h2c->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]);
- h2c->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
-
- val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1);
- h2c->ktbl_sel0 = cpu_to_le32(val32);
- val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1);
- h2c->ktbl_sel1 = cpu_to_le32(val32);
- val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
- h2c->rfmod0 = cpu_to_le32(val32);
- val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
- h2c->rfmod1 = cpu_to_le32(val32);
+ h2c->common.phy_idx = cpu_to_le32(phy_idx);
- if (rtw89_is_mlo_1_1(rtwdev))
- h2c->mlo_1_1 = cpu_to_le32(1);
+ if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */
+ h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data;
+
+ h2c_v0->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]);
+ h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]);
+ h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
+
+ val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1);
+ h2c_v0->ktbl_sel0 = cpu_to_le32(val32);
+ val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1);
+ h2c_v0->ktbl_sel1 = cpu_to_le32(val32);
+ val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ h2c_v0->rfmod0 = cpu_to_le32(val32);
+ val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
+ h2c_v0->rfmod1 = cpu_to_le32(val32);
+
+ if (rtw89_is_mlo_1_1(rtwdev))
+ h2c_v0->mlo_1_1 = cpu_to_le32(1);
+
+ h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type);
- h2c->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type);
+ goto done;
+ }
+ if (rtw89_is_mlo_1_1(rtwdev))
+ h2c->mlo_1_1 = cpu_to_le32(1);
+done:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
H2C_FUNC_RFK_PRE_NOTIFY, 0, 0,
@@ -5202,10 +5314,8 @@ fail:
}
int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- enum rtw89_tssi_mode tssi_mode)
+ const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_SUB_ENTITY_0);
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_h2c_rf_tssi *h2c;
u32 len = sizeof(*h2c);
@@ -5249,7 +5359,8 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
struct rtw89_h2c_rf_iqk *h2c;
u32 len = sizeof(*h2c);
@@ -5284,10 +5395,9 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_SUB_ENTITY_0);
struct rtw89_h2c_rf_dpk *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
@@ -5327,10 +5437,9 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_SUB_ENTITY_0);
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_h2c_rf_txgapk *h2c;
u32 len = sizeof(*h2c);
@@ -5371,7 +5480,8 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
struct rtw89_h2c_rf_dack *h2c;
u32 len = sizeof(*h2c);
@@ -5407,10 +5517,9 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_SUB_ENTITY_0);
struct rtw89_h2c_rf_rxdck *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
@@ -5926,6 +6035,56 @@ out:
return ret;
}
+static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
+ int chan_type, int ssid_num,
+ struct rtw89_mac_chinfo *ch_info)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_pktofld_info *info;
+ u8 probe_count = 0;
+
+ ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
+ ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
+ ch_info->bw = RTW89_SCAN_WIDTH;
+ ch_info->tx_pkt = true;
+ ch_info->cfg_tx_pwr = false;
+ ch_info->tx_pwr_idx = 0;
+ ch_info->tx_null = false;
+ ch_info->pause_data = false;
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+
+ if (ssid_num) {
+ list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
+ if (info->channel_6ghz &&
+ ch_info->pri_ch != info->channel_6ghz)
+ continue;
+ else if (info->channel_6ghz && probe_count != 0)
+ ch_info->period += RTW89_CHANNEL_TIME_6G;
+
+ if (info->wildcard_6ghz)
+ continue;
+
+ ch_info->pkt_id[probe_count++] = info->id;
+ if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
+ break;
+ }
+ ch_info->num_pkt = probe_count;
+ }
+
+ switch (chan_type) {
+ case RTW89_CHAN_DFS:
+ if (ch_info->ch_band != RTW89_BAND_6G)
+ ch_info->period = max_t(u8, ch_info->period,
+ RTW89_DFS_CHAN_TIME);
+ ch_info->dwell_time = RTW89_DWELL_TIME;
+ break;
+ case RTW89_CHAN_ACTIVE:
+ break;
+ default:
+ rtw89_err(rtwdev, "Channel type out of bound\n");
+ }
+}
+
static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
int ssid_num,
struct rtw89_mac_chinfo *ch_info)
@@ -6004,6 +6163,45 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
}
}
+static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
+ int ssid_num,
+ struct rtw89_mac_chinfo_be *ch_info)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_pktofld_info *info;
+ u8 probe_count = 0, i;
+
+ ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
+ ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
+ ch_info->bw = RTW89_SCAN_WIDTH;
+ ch_info->tx_null = false;
+ ch_info->pause_data = false;
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+
+ if (ssid_num) {
+ list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
+ ch_info->pkt_id[probe_count++] = info->id;
+ if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
+ break;
+ }
+ }
+
+ for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
+ ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
+
+ switch (chan_type) {
+ case RTW89_CHAN_DFS:
+ ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
+ ch_info->dwell_time = RTW89_DWELL_TIME;
+ break;
+ case RTW89_CHAN_ACTIVE:
+ break;
+ default:
+ rtw89_warn(rtwdev, "Channel type out of bound\n");
+ break;
+ }
+}
+
static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
int ssid_num,
struct rtw89_mac_chinfo_be *ch_info)
@@ -6066,8 +6264,58 @@ static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
}
}
-int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, bool connected)
+int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
+ struct rtw89_mac_chinfo *ch_info, *tmp;
+ struct ieee80211_channel *channel;
+ struct list_head chan_list;
+ int list_len;
+ enum rtw89_chan_type type;
+ int ret = 0;
+ u32 idx;
+
+ INIT_LIST_HEAD(&chan_list);
+ for (idx = 0, list_len = 0;
+ idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx++, list_len++) {
+ channel = nd_config->channels[idx];
+ ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
+ if (!ch_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ch_info->period = RTW89_CHANNEL_TIME;
+ ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
+ ch_info->central_ch = channel->hw_value;
+ ch_info->pri_ch = channel->hw_value;
+ ch_info->is_psc = cfg80211_channel_is_psc(channel);
+
+ if (channel->flags &
+ (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+ type = RTW89_CHAN_DFS;
+ else
+ type = RTW89_CHAN_ACTIVE;
+
+ rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info);
+ list_add_tail(&ch_info->list, &chan_list);
+ }
+ ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
+
+out:
+ list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
+int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected)
{
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_mac_chinfo *ch_info, *tmp;
@@ -6143,6 +6391,58 @@ out:
return ret;
}
+int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
+ struct rtw89_mac_chinfo_be *ch_info, *tmp;
+ struct ieee80211_channel *channel;
+ struct list_head chan_list;
+ enum rtw89_chan_type type;
+ int list_len, ret;
+ u32 idx;
+
+ INIT_LIST_HEAD(&chan_list);
+
+ for (idx = 0, list_len = 0;
+ idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx++, list_len++) {
+ channel = nd_config->channels[idx];
+ ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
+ if (!ch_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ch_info->period = RTW89_CHANNEL_TIME;
+ ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
+ ch_info->central_ch = channel->hw_value;
+ ch_info->pri_ch = channel->hw_value;
+ ch_info->is_psc = cfg80211_channel_is_psc(channel);
+
+ if (channel->flags &
+ (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+ type = RTW89_CHAN_DFS;
+ else
+ type = RTW89_CHAN_ACTIVE;
+
+ rtw89_pno_scan_add_chan_be(rtwdev, type,
+ nd_config->n_match_sets, ch_info);
+ list_add_tail(&ch_info->list, &chan_list);
+ }
+
+ ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list);
+
+out:
+ list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool connected)
{
@@ -6352,7 +6652,7 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID;
}
- ret = mac->scan_offload(rtwdev, &opt, rtwvif);
+ ret = mac->scan_offload(rtwdev, &opt, rtwvif, false);
out:
return ret;
}
@@ -6602,6 +6902,57 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
+ struct rtw89_h2c_cfg_nlo *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret, i;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for nlo\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cfg_nlo *)skb->data;
+
+ h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) |
+ le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) |
+ le32_encode_bits(rtwvif->mac_id, RTW89_H2C_NLO_W0_MACID);
+
+ if (enable) {
+ h2c->nlo_cnt = nd_config->n_match_sets;
+ for (i = 0 ; i < nd_config->n_match_sets; i++) {
+ h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len;
+ memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid_len);
+ }
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_NLO, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+
int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool enable)
{
@@ -6816,20 +7167,46 @@ hdr:
goto fail;
}
return 0;
-
fail:
dev_kfree_skb_any(skb);
return ret;
}
+int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait;
+ struct rtw89_h2c_fwips *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw ips\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_fwips *)skb->data;
+
+ h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_FW_IPS_W0_MACID) |
+ le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_PS,
+ H2C_FUNC_IPS_CFG, 0, 1,
+ len);
+
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG);
+}
+
int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
{
- struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_wait_info *wait = &rtwdev->wow.wait;
struct rtw89_h2c_wow_aoac *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
- unsigned int cond;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -6848,8 +7225,7 @@ int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
H2C_FUNC_AOAC_REPORT_REQ, 1, 0,
len);
- cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ);
- return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC);
}
/* Return < 0, if failures happen during waiting for the condition.
@@ -7341,7 +7717,7 @@ int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
}
-int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx)
+int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx)
{
struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
struct rtw89_h2c_mrc_del *h2c;
@@ -7358,7 +7734,8 @@ int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx)
skb_put(skb, len);
h2c = (struct rtw89_h2c_mrc_del *)skb->data;
- h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX);
+ h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) |
+ le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index c3b4324c621c..ad47e77d740b 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -1898,6 +1898,24 @@ struct rtw89_h2c_wow_global {
#define RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO GENMASK(23, 16)
#define RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO GENMASK(31, 24)
+#define RTW89_MAX_SUPPORT_NL_NUM 16
+struct rtw89_h2c_cfg_nlo {
+ __le32 w0;
+ u8 nlo_cnt;
+ u8 rsvd[3];
+ __le32 patterncheck;
+ __le32 rsvd1;
+ __le32 rsvd2;
+ u8 ssid_len[RTW89_MAX_SUPPORT_NL_NUM];
+ u8 chiper[RTW89_MAX_SUPPORT_NL_NUM];
+ u8 rsvd3[24];
+ u8 ssid[RTW89_MAX_SUPPORT_NL_NUM][IEEE80211_MAX_SSID_LEN];
+} __packed;
+
+#define RTW89_H2C_NLO_W0_ENABLE BIT(0)
+#define RTW89_H2C_NLO_W0_IGNORE_CIPHER BIT(2)
+#define RTW89_H2C_NLO_W0_MACID GENMASK(31, 24)
+
static inline void RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(void *h2c, u32 val)
{
le32p_replace_bits((__le32 *)h2c, val, BIT(0));
@@ -2089,10 +2107,15 @@ enum rtw89_btc_cxdrvinfo {
enum rtw89_scan_mode {
RTW89_SCAN_IMMEDIATE,
+ RTW89_SCAN_DELAY,
};
enum rtw89_scan_type {
RTW89_SCAN_ONCE,
+ RTW89_SCAN_NORMAL,
+ RTW89_SCAN_NORMAL_SLOW,
+ RTW89_SCAN_SEAMLESS,
+ RTW89_SCAN_MAX,
};
static inline void RTW89_SET_FWCMD_CXHDR_TYPE(void *cmd, u8 val)
@@ -2124,6 +2147,30 @@ struct rtw89_h2c_cxctrl_v7 {
#define H2C_LEN_CXDRVHDR sizeof(struct rtw89_h2c_cxhdr)
#define H2C_LEN_CXDRVHDR_V7 sizeof(struct rtw89_h2c_cxhdr_v7)
+struct rtw89_btc_wl_role_info_v7_u8 {
+ u8 connect_cnt;
+ u8 link_mode;
+ u8 link_mode_chg;
+ u8 p2p_2g;
+
+ struct rtw89_btc_wl_active_role_v7 active_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+} __packed;
+
+struct rtw89_btc_wl_role_info_v7_u32 {
+ __le32 role_map;
+ __le32 mrole_type;
+ __le32 mrole_noa_duration;
+ __le32 dbcc_en;
+ __le32 dbcc_chg;
+ __le32 dbcc_2g_phy;
+} __packed;
+
+struct rtw89_h2c_cxrole_v7 {
+ struct rtw89_h2c_cxhdr_v7 hdr;
+ struct rtw89_btc_wl_role_info_v7_u8 _u8;
+ struct rtw89_btc_wl_role_info_v7_u32 _u32;
+} __packed;
+
struct rtw89_btc_wl_role_info_v8_u8 {
u8 connect_cnt;
u8 link_mode;
@@ -2145,7 +2192,7 @@ struct rtw89_btc_wl_role_info_v8_u32 {
} __packed;
struct rtw89_h2c_cxrole_v8 {
- struct rtw89_h2c_cxhdr hdr;
+ struct rtw89_h2c_cxhdr_v7 hdr;
struct rtw89_btc_wl_role_info_v8_u8 _u8;
struct rtw89_btc_wl_role_info_v8_u32 _u32;
} __packed;
@@ -2664,6 +2711,8 @@ struct rtw89_h2c_scanofld {
#define RTW89_H2C_SCANOFLD_W1_PROBE_REQ_PKT_ID GENMASK(31, 24)
#define RTW89_H2C_SCANOFLD_W2_NORM_PD GENMASK(15, 0)
#define RTW89_H2C_SCANOFLD_W2_SLOW_PD GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_W3_TSF_HIGH GENMASK(31, 0)
+#define RTW89_H2C_SCANOFLD_W4_TSF_LOW GENMASK(31, 0)
struct rtw89_h2c_scanofld_be_macc_role {
__le32 w0;
@@ -2711,7 +2760,9 @@ struct rtw89_h2c_scanofld_be {
__le32 w6;
__le32 w7;
__le32 w8;
- struct rtw89_h2c_scanofld_be_macc_role role[];
+ __le32 w9; /* Added after SCAN_OFFLOAD_BE_V1 */
+ /* struct rtw89_h2c_scanofld_be_macc_role (flexible number) */
+ /* struct rtw89_h2c_scanofld_be_opch (flexible number) */
} __packed;
#define RTW89_H2C_SCANOFLD_BE_W0_OP GENMASK(1, 0)
@@ -2742,6 +2793,16 @@ struct rtw89_h2c_scanofld_be {
#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ GENMASK(7, 0)
#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ GENMASK(15, 8)
#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP GENMASK(23, 16)
+
+struct rtw89_h2c_fwips {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_FW_IPS_W0_MACID GENMASK(7, 0)
+#define RTW89_H2C_FW_IPS_W0_ENABLE BIT(8)
static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val)
{
@@ -3741,17 +3802,28 @@ enum rtw89_fw_element_id {
RTW89_FW_ELEMENT_ID_NUM,
};
-#define BITS_OF_RTW89_TXPWR_FW_ELEMENTS \
+#define BITS_OF_RTW89_TXPWR_FW_ELEMENTS_NO_6GHZ \
(BIT(RTW89_FW_ELEMENT_ID_TXPWR_BYRATE) | \
BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ) | \
BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ) | \
- BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ) | \
BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ) | \
BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ) | \
- BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ) | \
BIT(RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT) | \
BIT(RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU))
+#define BITS_OF_RTW89_TXPWR_FW_ELEMENTS \
+ (BITS_OF_RTW89_TXPWR_FW_ELEMENTS_NO_6GHZ | \
+ BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ) | \
+ BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ))
+
+#define RTW89_AX_GEN_DEF_NEEDED_FW_ELEMENTS_NO_6GHZ \
+ (BIT(RTW89_FW_ELEMENT_ID_BB_REG) | \
+ BIT(RTW89_FW_ELEMENT_ID_RADIO_A) | \
+ BIT(RTW89_FW_ELEMENT_ID_RADIO_B) | \
+ BIT(RTW89_FW_ELEMENT_ID_RF_NCTL) | \
+ BIT(RTW89_FW_ELEMENT_ID_TXPWR_TRK) | \
+ BITS_OF_RTW89_TXPWR_FW_ELEMENTS_NO_6GHZ)
+
#define RTW89_BE_GEN_DEF_NEEDED_FW_ELEMENTS (BIT(RTW89_FW_ELEMENT_ID_BBMCU0) | \
BIT(RTW89_FW_ELEMENT_ID_BB_REG) | \
BIT(RTW89_FW_ELEMENT_ID_RADIO_A) | \
@@ -3935,6 +4007,7 @@ enum rtw89_wow_h2c_func {
H2C_FUNC_WOW_GLOBAL = 0x2,
H2C_FUNC_GTK_OFLD = 0x3,
H2C_FUNC_ARP_OFLD = 0x4,
+ H2C_FUNC_NLO = 0x7,
H2C_FUNC_WAKEUP_CTRL = 0x8,
H2C_FUNC_WOW_CAM_UPD = 0xC,
H2C_FUNC_AOAC_REPORT_REQ = 0xD,
@@ -3942,13 +4015,27 @@ enum rtw89_wow_h2c_func {
NUM_OF_RTW89_WOW_H2C_FUNC,
};
-#define RTW89_WOW_WAIT_COND(func) \
- (NUM_OF_RTW89_WOW_H2C_FUNC + (func))
+#define RTW89_WOW_WAIT_COND(tag, func) \
+ ((tag) * NUM_OF_RTW89_WOW_H2C_FUNC + (func))
+
+#define RTW89_WOW_WAIT_COND_AOAC \
+ RTW89_WOW_WAIT_COND(0 /* don't care */, H2C_FUNC_AOAC_REPORT_REQ)
/* CLASS 2 - PS */
#define H2C_CL_MAC_PS 0x2
-#define H2C_FUNC_MAC_LPS_PARM 0x0
-#define H2C_FUNC_P2P_ACT 0x1
+enum rtw89_ps_h2c_func {
+ H2C_FUNC_MAC_LPS_PARM = 0x0,
+ H2C_FUNC_P2P_ACT = 0x1,
+ H2C_FUNC_IPS_CFG = 0x3,
+
+ NUM_OF_RTW89_PS_H2C_FUNC,
+};
+
+#define RTW89_PS_WAIT_COND(tag, func) \
+ ((tag) * NUM_OF_RTW89_PS_H2C_FUNC + (func))
+
+#define RTW89_PS_WAIT_COND_IPS_CFG \
+ RTW89_PS_WAIT_COND(0 /* don't care */, H2C_FUNC_IPS_CFG)
/* CLASS 3 - FW download */
#define H2C_CL_MAC_FWDL 0x3
@@ -4095,7 +4182,7 @@ struct rtw89_fw_h2c_rf_get_mccch {
#define NUM_OF_RTW89_FW_RFK_PATH 2
#define NUM_OF_RTW89_FW_RFK_TBL 3
-struct rtw89_fw_h2c_rfk_pre_info {
+struct rtw89_fw_h2c_rfk_pre_info_common {
struct {
__le32 ch[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL];
__le32 band[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL];
@@ -4108,6 +4195,11 @@ struct rtw89_fw_h2c_rfk_pre_info {
} __packed tbl;
__le32 phy_idx;
+} __packed;
+
+struct rtw89_fw_h2c_rfk_pre_info_v0 {
+ struct rtw89_fw_h2c_rfk_pre_info_common common;
+
__le32 cur_band;
__le32 cur_bw;
__le32 cur_center_ch;
@@ -4127,6 +4219,11 @@ struct rtw89_fw_h2c_rfk_pre_info {
} __packed mlo;
} __packed;
+struct rtw89_fw_h2c_rfk_pre_info {
+ struct rtw89_fw_h2c_rfk_pre_info_common common;
+ __le32 mlo_1_1;
+} __packed;
+
struct rtw89_h2c_rf_tssi {
__le16 len;
u8 phy;
@@ -4366,6 +4463,7 @@ int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type);
@@ -4378,12 +4476,14 @@ int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
struct list_head *chan_list);
int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
struct list_head *chan_list);
-int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
- struct rtw89_scan_option *opt,
- struct rtw89_vif *vif);
+int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *opt,
+ struct rtw89_vif *vif,
+ bool wowlan);
int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *opt,
- struct rtw89_vif *vif);
+ struct rtw89_vif *vif,
+ bool wowlan);
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page);
@@ -4391,12 +4491,17 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- enum rtw89_tssi_mode tssi_mode);
-int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+ const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode);
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);
@@ -4420,6 +4525,8 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif);
+int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len);
int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
@@ -4434,10 +4541,14 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool enable);
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
-int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, bool connected);
+int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
+int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool connected);
+int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
const struct rtw89_pkt_drop_params *params);
@@ -4450,6 +4561,8 @@ int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool enable);
int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool enable);
+int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable);
int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool enable);
int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev,
@@ -4488,7 +4601,7 @@ int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_add_arg *arg);
int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_start_arg *arg);
-int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx);
+int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx);
int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_req_tsf_arg *arg,
struct rtw89_mac_mrc_tsf_rpt *rpt);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index e2399796aeb1..c70a23a763b0 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1625,6 +1625,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_size18 = {RTW89_WDE_PG_64, 0, 2048,},
/* 8852C PCIE SCC */
.wde_size19 = {RTW89_WDE_PG_64, 3328, 0,},
+ .wde_size23 = {RTW89_WDE_PG_64, 1022, 2,},
/* PCIE */
.ple_size0 = {RTW89_PLE_PG_128, 1520, 16,},
.ple_size0_v1 = {RTW89_PLE_PG_128, 2688, 240, 212992,},
@@ -1635,6 +1636,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_size6 = {RTW89_PLE_PG_128, 496, 16,},
/* DLFW */
.ple_size8 = {RTW89_PLE_PG_128, 64, 960,},
+ .ple_size9 = {RTW89_PLE_PG_128, 2288, 16,},
/* 8852C DLFW */
.ple_size18 = {RTW89_PLE_PG_128, 2544, 16,},
/* 8852C PCIE SCC */
@@ -1652,6 +1654,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_qt17 = {0, 0, 0, 0,},
/* 8852C PCIE SCC */
.wde_qt18 = {3228, 60, 0, 40,},
+ .wde_qt23 = {958, 48, 0, 16,},
.ple_qt0 = {320, 320, 32, 16, 13, 13, 292, 292, 64, 18, 1, 4, 0,},
.ple_qt1 = {320, 320, 32, 16, 1316, 1316, 1595, 1595, 1367, 1321, 1, 1307, 0,},
/* PCIE SCC */
@@ -1671,12 +1674,16 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt46 = {525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,},
/* 8852C PCIE SCC */
.ple_qt47 = {525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,},
+ .ple_qt57 = {147, 0, 16, 20, 13, 13, 178, 0, 32, 14, 8, 0,},
/* PCIE 64 */
.ple_qt58 = {147, 0, 16, 20, 157, 13, 229, 0, 172, 14, 24, 0,},
+ .ple_qt59 = {147, 0, 32, 20, 1860, 13, 2025, 0, 1879, 14, 24, 0,},
/* 8852A PCIE WOW */
.ple_qt_52a_wow = {264, 0, 32, 20, 64, 13, 1005, 0, 64, 128, 120,},
/* 8852B PCIE WOW */
.ple_qt_52b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
+ /* 8852BT PCIE WOW */
+ .ple_qt_52bt_wow = {147, 0, 32, 20, 1860, 13, 1929, 0, 1879, 14, 24, 0,},
/* 8851B PCIE WOW */
.ple_qt_51b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
.ple_rsvd_qt0 = {2, 107, 107, 6, 6, 6, 6, 0, 0, 0,},
@@ -2025,11 +2032,16 @@ int rtw89_mac_resize_ple_rx_quota(struct rtw89_dev *rtwdev, bool wow)
void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool enable)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u32 msk32 = B_AX_UC_MGNT_DEC | B_AX_BMC_MGNT_DEC;
if (rtwdev->chip->chip_gen != RTW89_CHIP_AX)
return;
+ /* 8852C enable B_AX_UC_MGNT_DEC by default */
+ if (chip->chip_id == RTL8852C)
+ msk32 = B_AX_BMC_MGNT_DEC;
+
if (enable)
rtw89_write32_set(rtwdev, R_AX_SEC_ENG_CTRL, msk32);
else
@@ -2254,6 +2266,8 @@ static int sec_eng_init_ax(struct rtw89_dev *rtwdev)
/* init TX encryption */
val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC);
val |= (B_AX_MC_DEC | B_AX_BC_DEC);
+ if (chip->chip_id == RTL8852C)
+ val |= B_AX_UC_MGNT_DEC;
if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
chip->chip_id == RTL8851B)
val &= ~B_AX_TX_PARTIAL_MODE;
@@ -2728,6 +2742,7 @@ bool rtw89_mac_is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val, reg;
int ret;
@@ -2766,6 +2781,12 @@ static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU);
}
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AGG_LEN_VHT_0, mac_idx);
+ rtw89_write32_mask(rtwdev, reg,
+ B_AX_AMPDU_MAX_LEN_VHT_MASK, 0x3FF80);
+ }
+
return 0;
}
@@ -3781,7 +3802,7 @@ static int rtw89_mac_enable_cpu_ax(struct rtw89_dev *rtwdev, u8 boot_reason,
rtw89_write32(rtwdev, R_AX_WCPU_FW_CTRL, val);
- if (rtwdev->chip->chip_id == RTL8852B)
+ if (rtw89_is_rtl885xb(rtwdev))
rtw89_write32_mask(rtwdev, R_AX_SEC_CTRL,
B_AX_SEC_IDMEM_SIZE_CONFIG_MASK, 0x2);
@@ -4774,14 +4795,14 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
case RTW89_SCAN_ENTER_OP_NOTIFY:
case RTW89_SCAN_ENTER_CH_NOTIFY:
if (rtw89_is_op_chan(rtwdev, band, chan)) {
- rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx,
+ rtw89_assign_entity_chan(rtwdev, rtwvif->chanctx_idx,
&rtwdev->scan_info.op_chan);
rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
ieee80211_wake_queues(rtwdev->hw);
} else {
rtw89_chan_create(&new, chan, chan, band,
RTW89_CHANNEL_WIDTH_20);
- rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx,
+ rtw89_assign_entity_chan(rtwdev, rtwvif->chanctx_idx,
&new);
}
break;
@@ -4866,6 +4887,7 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
{
/* N.B. This will run in interrupt context. */
struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_wait_info *ps_wait = &rtwdev->mac.ps_wait;
const struct rtw89_c2h_done_ack *c2h =
(const struct rtw89_c2h_done_ack *)skb_c2h->data;
u8 h2c_cat = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_CAT);
@@ -4886,6 +4908,18 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
switch (h2c_class) {
default:
return;
+ case H2C_CL_MAC_PS:
+ switch (h2c_func) {
+ default:
+ return;
+ case H2C_FUNC_IPS_CFG:
+ cond = RTW89_PS_WAIT_COND_IPS_CFG;
+ break;
+ }
+
+ data.err = !!h2c_return;
+ rtw89_complete_cond(ps_wait, cond, &data);
+ return;
case H2C_CL_MAC_FW_OFLD:
switch (h2c_func) {
default:
@@ -5144,11 +5178,10 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
- struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_wait_info *wait = &rtw_wow->wait;
const struct rtw89_c2h_wow_aoac_report *c2h =
(const struct rtw89_c2h_wow_aoac_report *)skb->data;
struct rtw89_completion_data data = {};
- unsigned int cond;
aoac_rpt->rpt_ver = c2h->rpt_ver;
aoac_rpt->sec_type = c2h->sec_type;
@@ -5166,8 +5199,7 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le
aoac_rpt->igtk_ipn = le64_to_cpu(c2h->igtk_ipn);
memcpy(aoac_rpt->igtk, c2h->igtk, sizeof(aoac_rpt->igtk));
- cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ);
- rtw89_complete_cond(wait, cond, &data);
+ rtw89_complete_cond(wait, RTW89_WOW_WAIT_COND_AOAC, &data);
}
static void
@@ -6513,8 +6545,9 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.is_txq_empty = mac_is_txq_empty_ax,
- .add_chan_list = rtw89_hw_scan_add_chan_list,
- .scan_offload = rtw89_fw_h2c_scan_offload,
+ .add_chan_list = rtw89_hw_scan_add_chan_list_ax,
+ .add_chan_list_pno = rtw89_pno_scan_add_chan_list_ax,
+ .scan_offload = rtw89_fw_h2c_scan_offload_ax,
.wow_config_mac = rtw89_wow_config_mac_ax,
};
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index d5895516b3ed..67c2a4507124 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -421,7 +421,6 @@ enum rtw89_mac_c2h_mrc_func {
enum rtw89_mac_c2h_wow_func {
RTW89_MAC_C2H_FUNC_AOAC_REPORT,
- RTW89_MAC_C2H_FUNC_READ_WOW_CAM,
NUM_OF_RTW89_MAC_C2H_FUNC_WOW,
};
@@ -885,12 +884,14 @@ struct rtw89_mac_size_set {
const struct rtw89_dle_size wde_size9;
const struct rtw89_dle_size wde_size18;
const struct rtw89_dle_size wde_size19;
+ const struct rtw89_dle_size wde_size23;
const struct rtw89_dle_size ple_size0;
const struct rtw89_dle_size ple_size0_v1;
const struct rtw89_dle_size ple_size3_v1;
const struct rtw89_dle_size ple_size4;
const struct rtw89_dle_size ple_size6;
const struct rtw89_dle_size ple_size8;
+ const struct rtw89_dle_size ple_size9;
const struct rtw89_dle_size ple_size18;
const struct rtw89_dle_size ple_size19;
const struct rtw89_wde_quota wde_qt0;
@@ -900,6 +901,7 @@ struct rtw89_mac_size_set {
const struct rtw89_wde_quota wde_qt7;
const struct rtw89_wde_quota wde_qt17;
const struct rtw89_wde_quota wde_qt18;
+ const struct rtw89_wde_quota wde_qt23;
const struct rtw89_ple_quota ple_qt0;
const struct rtw89_ple_quota ple_qt1;
const struct rtw89_ple_quota ple_qt4;
@@ -911,9 +913,12 @@ struct rtw89_mac_size_set {
const struct rtw89_ple_quota ple_qt45;
const struct rtw89_ple_quota ple_qt46;
const struct rtw89_ple_quota ple_qt47;
+ const struct rtw89_ple_quota ple_qt57;
const struct rtw89_ple_quota ple_qt58;
+ const struct rtw89_ple_quota ple_qt59;
const struct rtw89_ple_quota ple_qt_52a_wow;
const struct rtw89_ple_quota ple_qt_52b_wow;
+ const struct rtw89_ple_quota ple_qt_52bt_wow;
const struct rtw89_ple_quota ple_qt_51b_wow;
const struct rtw89_rsvd_quota ple_rsvd_qt0;
const struct rtw89_rsvd_quota ple_rsvd_qt1;
@@ -1000,9 +1005,12 @@ struct rtw89_mac_gen_def {
int (*add_chan_list)(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool connected);
+ int (*add_chan_list_pno)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int (*scan_offload)(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *option,
- struct rtw89_vif *rtwvif);
+ struct rtw89_vif *rtwvif,
+ bool wowlan);
int (*wow_config_mac)(struct rtw89_dev *rtwdev, bool enable_wow);
};
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 1508693032cb..48ad0d0f76bf 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -90,7 +90,7 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
rtw89_leave_ips(rtwdev);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0,
+ rtw89_config_entity_chandef(rtwdev, RTW89_CHANCTX_0,
&hw->conf.chandef);
rtw89_set_channel(rtwdev);
}
@@ -126,7 +126,9 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->rtwdev = rtwdev;
rtwvif->roc.state = RTW89_ROC_IDLE;
rtwvif->offchan = false;
- list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
+ if (!rtw89_rtwvif_in_list(rtwdev, rtwvif))
+ list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
+
INIT_WORK(&rtwvif->update_beacon_work, rtw89_core_update_beacon_work);
INIT_DELAYED_WORK(&rtwvif->roc.roc_work, rtw89_roc_work);
rtw89_leave_ps_mode(rtwdev);
@@ -144,7 +146,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->bcn_hit_cond = 0;
rtwvif->mac_idx = RTW89_MAC_0;
rtwvif->phy_idx = RTW89_PHY_0;
- rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
+ rtwvif->chanctx_idx = RTW89_CHANCTX_0;
rtwvif->chanctx_assigned = false;
rtwvif->hit_rule = 0;
rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
@@ -313,7 +315,7 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev,
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
u8 slot_time;
u8 sifs;
@@ -503,7 +505,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
- chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
if (chan->band_type == RTW89_BAND_6G) {
mutex_unlock(&rtwdev->mutex);
return -EOPNOTSUPP;
@@ -519,7 +521,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_TYPE_CHANGE);
rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
- rtw89_chip_rfk_channel(rtwdev);
+ rtw89_chip_rfk_channel(rtwdev, rtwvif);
rtw89_queue_chanctx_work(rtwdev);
mutex_unlock(&rtwdev->mutex);
@@ -783,7 +785,7 @@ static void rtw89_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta
rtwsta->use_cfg_mask = true;
rtwsta->mask = *br_data->mask;
- rtw89_phy_ra_updata_sta(br_data->rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
+ rtw89_phy_ra_update_sta(br_data->rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
}
static void rtw89_ra_mask_info_update(struct rtw89_dev *rtwdev,
@@ -925,7 +927,7 @@ static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
- rtw89_phy_ra_updata_sta(rtwdev, sta, changed);
+ rtw89_phy_ra_update_sta(rtwdev, sta, changed);
}
static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw,
@@ -1147,6 +1149,22 @@ static void rtw89_set_rekey_data(struct ieee80211_hw *hw,
}
#endif
+static void rtw89_ops_rfkill_poll(struct ieee80211_hw *hw)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+
+ /* wl_disable GPIO get floating when entering LPS */
+ if (test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
+ goto out;
+
+ rtw89_core_rfkill_poll(rtwdev, false);
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+}
+
const struct ieee80211_ops rtw89_ops = {
.tx = rtw89_ops_tx,
.wake_tx_queue = rtw89_ops_wake_tx_queue,
@@ -1193,5 +1211,6 @@ const struct ieee80211_ops rtw89_ops = {
.set_wakeup = rtw89_ops_set_wakeup,
.set_rekey_data = rtw89_set_rekey_data,
#endif
+ .rfkill_poll = rtw89_ops_rfkill_poll,
};
EXPORT_SYMBOL(rtw89_ops);
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index f212b67771d5..31f0a5225b11 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -2599,6 +2599,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.is_txq_empty = mac_is_txq_empty_be,
.add_chan_list = rtw89_hw_scan_add_chan_list_be,
+ .add_chan_list_pno = rtw89_pno_scan_add_chan_list_be,
.scan_offload = rtw89_fw_h2c_scan_offload_be,
.wow_config_mac = rtw89_wow_config_mac_be,
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index ad11d1414874..c7165e757842 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -302,7 +302,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
struct rtw89_ra_info *ra = &rtwsta->ra;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
@@ -341,8 +341,11 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
mode |= RTW89_RA_MODE_VHT;
csi_mode = RTW89_RA_RPT_MODE_VHT;
- /* MCS9, MCS8, MCS7 */
- ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
+ /* MCS9 (non-20MHz), MCS8, MCS7 */
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ ra_mask |= get_mcs_ra_mask(mcs_map, 8, 1);
+ else
+ ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
high_rate_masks = rtw89_ra_mask_vht_rates;
if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
stbc_en = 1;
@@ -353,8 +356,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
csi_mode = RTW89_RA_RPT_MODE_HT;
ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) |
((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) |
- (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
- (sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
high_rate_masks = rtw89_ra_mask_ht_rates;
if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = 1;
@@ -462,7 +465,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ra->csi_mode = csi_mode;
}
-void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
+void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
u32 changed)
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
@@ -528,7 +531,7 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_phy_rate_pattern next_pattern = {0};
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = {
RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0),
RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0),
@@ -610,17 +613,17 @@ out:
rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n");
}
-static void rtw89_phy_ra_updata_sta_iter(void *data, struct ieee80211_sta *sta)
+static void rtw89_phy_ra_update_sta_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
- rtw89_phy_ra_updata_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
+ rtw89_phy_ra_update_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
}
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
{
ieee80211_iterate_stations_atomic(rtwdev->hw,
- rtw89_phy_ra_updata_sta_iter,
+ rtw89_phy_ra_update_sta_iter,
rtwdev);
}
@@ -3081,6 +3084,7 @@ EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait);
int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
enum rtw89_tssi_mode tssi_mode,
unsigned int ms)
{
@@ -3088,7 +3092,7 @@ int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, tssi_mode);
+ ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, chan, tssi_mode);
if (ret)
return ret;
@@ -3098,13 +3102,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait);
int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3114,13 +3119,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait);
int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3130,13 +3136,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait);
int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3146,13 +3153,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait);
int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3162,13 +3170,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait);
int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -4285,7 +4294,7 @@ void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif->sub_entity_idx);
+ rtwvif->chanctx_idx);
struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
if (!chip->ul_tb_waveform_ctrl)
@@ -5367,7 +5376,7 @@ static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
{
struct rtw89_dig_info *dig = &rtwdev->dig;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
bool is_linked = rtwdev->total_sta_assoc > 0;
const u16 *fa_th_src = NULL;
@@ -5392,7 +5401,7 @@ static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th));
}
-static const u8 pd_low_th_offset = 20, dynamic_igi_min = 0x20;
+static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20;
static const u8 igi_max_performance_mode = 0x5a;
static const u8 dynamic_pd_threshold_max;
@@ -5611,7 +5620,7 @@ static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev)
static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
bool enable)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
enum rtw89_bandwidth cbw = chan->band_width;
struct rtw89_dig_info *dig = &rtwdev->dig;
@@ -5690,38 +5699,47 @@ void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
}
#define IGI_RSSI_MIN 10
+#define ABS_IGI_MIN 0xc
void rtw89_phy_dig(struct rtw89_dev *rtwdev)
{
struct rtw89_dig_info *dig = &rtwdev->dig;
bool is_linked = rtwdev->total_sta_assoc > 0;
+ u8 igi_min;
if (unlikely(dig->bypass_dig)) {
dig->bypass_dig = false;
return;
}
+ rtw89_phy_dig_update_rssi_info(rtwdev);
+
if (!dig->is_linked_pre && is_linked) {
rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
rtw89_phy_dig_update_para(rtwdev);
+ dig->igi_fa_rssi = dig->igi_rssi;
} else if (dig->is_linked_pre && !is_linked) {
rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
rtw89_phy_dig_update_para(rtwdev);
+ dig->igi_fa_rssi = dig->igi_rssi;
}
dig->is_linked_pre = is_linked;
rtw89_phy_dig_igi_offset_by_env(rtwdev);
- rtw89_phy_dig_update_rssi_info(rtwdev);
- dig->dyn_igi_min = (dig->igi_rssi > IGI_RSSI_MIN) ?
- dig->igi_rssi - IGI_RSSI_MIN : 0;
- dig->dyn_igi_max = dig->dyn_igi_min + IGI_OFFSET_MAX;
- dig->igi_fa_rssi = dig->dyn_igi_min + dig->fa_rssi_ofst;
+ igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0);
+ dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode);
+ dig->dyn_igi_min = max(igi_min, ABS_IGI_MIN);
- dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
- dig->dyn_igi_max);
+ if (dig->dyn_igi_max >= dig->dyn_igi_min) {
+ dig->igi_fa_rssi += dig->fa_rssi_ofst;
+ dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
+ dig->dyn_igi_max);
+ } else {
+ dig->igi_fa_rssi = dig->dyn_igi_max;
+ }
rtw89_debug(rtwdev, RTW89_DBG_DIG,
- "rssi=%03d, dyn(max,min)=(%d,%d), final_rssi=%d\n",
+ "rssi=%03d, dyn_joint(max,min)=(%d,%d), final_rssi=%d\n",
dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
dig->igi_fa_rssi);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index d8df553b9cb0..6dd8ec46939a 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -894,7 +894,7 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta);
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
-void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
+void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
u32 changed);
void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
@@ -907,22 +907,28 @@ int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
unsigned int ms);
int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
enum rtw89_tssi_mode tssi_mode,
unsigned int ms);
int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 92074b73ebeb..aebd6404f802 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -98,10 +98,10 @@ static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
}
-static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
struct rtw89_lps_parm lps_param = {
- .macid = mac_id,
+ .macid = rtwvif->mac_id,
.psmode = RTW89_MAC_AX_PS_MODE_ACTIVE,
.lastrpwm = RTW89_LAST_RPWM_ACTIVE,
};
@@ -109,6 +109,7 @@ static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id)
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
rtw89_fw_leave_lps_check(rtwdev, 0);
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
+ rtw89_chip_digital_pwr_comp(rtwdev, rtwvif->phy_idx);
}
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
@@ -137,7 +138,7 @@ static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwv
rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
return;
- __rtw89_leave_lps(rtwdev, rtwvif->mac_id);
+ __rtw89_leave_lps(rtwdev, rtwvif);
}
void rtw89_leave_lps(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 7df36f3bff0b..69678eab2309 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -107,6 +107,15 @@
#define B_AX_DBG_SEL0_16BIT BIT(11)
#define B_AX_DBG_SEL0 GENMASK(7, 0)
+#define R_AX_GPIO_EXT_CTRL 0x0060
+#define B_AX_GPIO_MOD_15_TO_8_MASK GENMASK(31, 24)
+#define B_AX_GPIO_MOD_9 BIT(25)
+#define B_AX_GPIO_IO_SEL_15_TO_8_MASK GENMASK(23, 16)
+#define B_AX_GPIO_IO_SEL_9 BIT(17)
+#define B_AX_GPIO_OUT_15_TO_8_MASK GENMASK(15, 8)
+#define B_AX_GPIO_IN_15_TO_8_MASK GENMASK(7, 0)
+#define B_AX_GPIO_IN_9 BIT(1)
+
#define R_AX_SYS_SDIO_CTRL 0x0070
#define B_AX_PCIE_DIS_L2_CTRL_LDO_HCI BIT(15)
#define B_AX_PCIE_DIS_WLSUS_AFT_PDN BIT(14)
@@ -267,6 +276,9 @@
#define R_AX_GPIO0_7_FUNC_SEL 0x02D0
+#define R_AX_GPIO8_15_FUNC_SEL 0x02D4
+#define B_AX_PINMUX_GPIO9_FUNC_SEL_MASK GENMASK(7, 4)
+
#define R_AX_EECS_EESK_FUNC_SEL 0x02D8
#define B_AX_PINMUX_EESK_FUNC_SEL_MASK GENMASK(7, 4)
@@ -706,6 +718,14 @@
B_AX_HDT_CHANNEL_DMA_ERR_INT_EN | \
B_AX_HDT_TOTAL_LEN_ERR_INT_EN | \
B_AX_HDT_DMA_PROCESS_ERR_INT_EN)
+#define B_AX_HOST_DISP_IMR_SET_V01 (B_AX_HDT_CHANNEL_DIFF_ERR_INT_EN | \
+ B_AX_HDT_PAYLOAD_OVERFLOW_INT_EN | \
+ B_AX_HDT_PAYLOAD_UNDERFLOW_INT_EN | \
+ B_AX_HDT_CHANNEL_DMA_ERR_INT_EN | \
+ B_AX_HDT_TOTAL_LEN_ERR_INT_EN | \
+ B_AX_HDT_DMA_PROCESS_ERR_INT_EN | \
+ B_AX_HDT_RX_WRITE_OVERFLOW_INT_EN | \
+ B_AX_HDT_RX_WRITE_UNDERFLOW_INT_EN)
#define B_AX_HR_WRFF_UNDERFLOW_ERR_INT_EN BIT(31)
#define B_AX_HR_WRFF_OVERFLOW_ERR_INT_EN BIT(30)
@@ -1096,6 +1116,7 @@
#define B_AX_WDE_BUFMGN_FRZTMR_MODE BIT(0)
#define R_AX_WDE_ERR_IMR 0x8C38
+#define B_AX_WDE_DATCHN_UAPG_ERR_INT_EN BIT(30)
#define B_AX_WDE_DATCHN_RRDY_ERR_INT_EN BIT(27)
#define B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN BIT(26)
#define B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN BIT(25)
@@ -1135,6 +1156,29 @@
B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN)
+#define B_AX_WDE_IMR_CLR_V01 (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_UAPG_ERR_INT_EN)
#define B_AX_WDE_IMR_SET (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \
B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
@@ -1154,6 +1198,28 @@
B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN)
+#define B_AX_WDE_IMR_SET_V01 (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN)
#define B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
#define B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
@@ -2440,6 +2506,10 @@
#define B_AX_RTS_TXTIME_TH_MASK GENMASK(15, 8)
#define B_AX_RTS_LEN_TH_MASK GENMASK(7, 0)
+#define R_AX_AGG_LEN_VHT_0 0xC618
+#define R_AX_AGG_LEN_VHT_0_C1 0xE618
+#define B_AX_AMPDU_MAX_LEN_VHT_MASK GENMASK(19, 0)
+
#define S_AX_CTS2S_TH_SEC_256B 1
#define R_AX_SIFS_SETTING 0xC624
#define R_AX_SIFS_SETTING_C1 0xE624
@@ -3098,7 +3168,9 @@
B_AX_OFDM_CCA_TIMEOUT_INT_EN | \
B_AX_DATA_ON_TIMEOUT_INT_EN | \
B_AX_STS_ON_TIMEOUT_INT_EN | \
- B_AX_CSI_ON_TIMEOUT_INT_EN)
+ B_AX_CSI_ON_TIMEOUT_INT_EN | \
+ B_AX_PHYINTF_TIMEOUT_THR_MSAK)
+#define B_AX_PHYINFO_IMR_SET (B_AX_PHY_TXON_TIMEOUT_INT_EN | 0x7)
#define R_AX_PHYINFO_ERR_ISR 0xCCFC
#define R_AX_PHYINFO_ERR_ISR_C1 0xECFC
@@ -3854,6 +3926,15 @@
#define R_BE_EFUSE_CTRL_1_V1 0x0034
#define B_BE_EF_DATA_MASK GENMASK(31, 0)
+#define R_BE_GPIO_EXT_CTRL 0x0060
+#define B_BE_GPIO_MOD_15_TO_8_MASK GENMASK(31, 24)
+#define B_BE_GPIO_MOD_9 BIT(25)
+#define B_BE_GPIO_IO_SEL_15_TO_8_MASK GENMASK(23, 16)
+#define B_BE_GPIO_IO_SEL_9 BIT(17)
+#define B_BE_GPIO_OUT_15_TO_8_MASK GENMASK(15, 8)
+#define B_BE_GPIO_IN_15_TO_8_MASK GENMASK(7, 0)
+#define B_BE_GPIO_IN_9 BIT(1)
+
#define R_BE_WL_BT_PWR_CTRL 0x0068
#define B_BE_ISO_BD2PP BIT(31)
#define B_BE_LDOV12B_EN BIT(30)
@@ -4299,6 +4380,9 @@
#define B_BE_REG_CK40M_EN BIT(1)
#define B_BE_REG_CK640M_EN BIT(0)
+#define R_BE_GPIO8_15_FUNC_SEL 0x02D4
+#define B_BE_PINMUX_GPIO9_FUNC_SEL_MASK GENMASK(7, 4)
+
#define R_BE_WLAN_XTAL_SI_CTRL 0x0270
#define B_BE_WL_XTAL_SI_CMD_POLL BIT(31)
#define B_BE_WL_XTAL_SI_CHIPID_MASK GENMASK(30, 28)
@@ -5964,6 +6048,9 @@
#define R_BE_WP_PAGE_INFO1 0xB7AC
#define B_BE_WP_AVAL_PG_MASK GENMASK(28, 16)
+#define R_BE_LTPC_T0_PATH0 0xBA28
+#define R_BE_LTPC_T0_PATH1 0xBB28
+
#define R_BE_CMAC_SHARE_FUNC_EN 0x0E000
#define B_BE_CMAC_SHARE_CRPRT BIT(31)
#define B_BE_CMAC_SHARE_EN BIT(30)
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index a251b0e3b16e..a7720a1f17a7 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -800,7 +800,7 @@ static bool __rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev)
const struct rtw89_reg_6ghz_tpe *tmp;
const struct rtw89_chan *chan;
- chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
if (chan->band_type != RTW89_BAND_6G)
continue;
@@ -872,7 +872,7 @@ static bool __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
u8 index;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
- chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx);
if (chan->band_type != RTW89_BAND_6G)
continue;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 40cf84a79c46..1679bd408ef3 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -185,6 +185,15 @@ static const struct rtw89_rrsr_cfgs rtw8851b_rrsr_cfgs = {
.rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2},
};
+static const struct rtw89_rfkill_regs rtw8851b_rfkill_regs = {
+ .pinmux = {R_AX_GPIO8_15_FUNC_SEL,
+ B_AX_PINMUX_GPIO9_FUNC_SEL_MASK,
+ 0xf},
+ .mode = {R_AX_GPIO_EXT_CTRL + 2,
+ (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16,
+ 0x0},
+};
+
static const struct rtw89_dig_regs rtw8851b_dig_regs = {
.seg0_pd_reg = R_SEG0R_PD_V1,
.pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
@@ -1578,28 +1587,31 @@ static void rtw8851b_rfk_init(struct rtw89_dev *rtwdev)
rtw8851b_aack(rtwdev);
rtw8851b_rck(rtwdev);
rtw8851b_dack(rtwdev);
- rtw8851b_rx_dck(rtwdev, RTW89_PHY_0);
+ rtw8851b_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
}
-static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev)
+static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8851b_rx_dck(rtwdev, phy_idx);
- rtw8851b_iqk(rtwdev, phy_idx);
- rtw8851b_tssi(rtwdev, phy_idx, true);
- rtw8851b_dpk(rtwdev, phy_idx);
+ rtw8851b_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ rtw8851b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8851b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8851b_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8851b_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8851b_tssi_scan(rtwdev, phy_idx);
+ rtw8851b_tssi_scan(rtwdev, phy_idx, chan);
}
-static void rtw8851b_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+static void rtw8851b_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start)
{
- rtw8851b_wifi_scan_notify(rtwdev, start, RTW89_PHY_0);
+ rtw8851b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
}
static void rtw8851b_rfk_track(struct rtw89_dev *rtwdev)
@@ -1801,7 +1813,7 @@ rtw8851b_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
static void rtw8851b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8851b_btc_preagc_en_defs_tbl :
&rtw8851b_btc_preagc_dis_defs_tbl);
@@ -1824,7 +1836,7 @@ static void rtw8851b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
static void rtw8851b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
if (en) {
rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
@@ -1869,7 +1881,7 @@ static void rtw8851b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
static void rtw8851b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
enum rtw89_rf_path_bit rx_path)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u32 rst_mask0;
if (rx_path == RF_A) {
@@ -2375,9 +2387,11 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.get_thermal = rtw8851b_get_thermal,
.ctrl_btg_bt_rx = rtw8851b_ctrl_btg_bt_rx,
.query_ppdu = rtw8851b_query_ppdu,
+ .convert_rpl_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8851b_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8851b_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8851b_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8851b_pwr_on_func,
.pwr_off_func = rtw8851b_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2452,6 +2466,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.dig_regs = &rtw8851b_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 0,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -2463,6 +2478,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
+ .hw_mgmt_tx_encrypt = false,
.rf_path_num = 1,
.tx_nss = 1,
.rx_nss = 1,
@@ -2524,6 +2540,8 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.rrsr_cfgs = &rtw8851b_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0},
.bss_clr_map_reg = R_BSS_CLR_MAP_V1,
+ .rfkill_init = &rtw8851b_rfkill_regs,
+ .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
.dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
index a221f94627f5..364e36354225 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
@@ -521,9 +521,10 @@ static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
}
static void _rx_dck_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool is_afe)
+ enum rtw89_rf_path path, bool is_afe,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[RX_DCK] ==== S%d RX DCK (%s / CH%d / %s / by %s)====\n", path,
@@ -574,7 +575,8 @@ static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 rf
_rxbb_ofst_swap(rtwdev, path, rf_mode);
}
-static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
+static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 rf_reg5;
u8 path;
@@ -584,7 +586,7 @@ static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_af
0x2, rtwdev->hal.cv);
for (path = 0; path < RF_PATH_NUM_8851B; path++) {
- _rx_dck_info(rtwdev, phy, path, is_afe);
+ _rx_dck_info(rtwdev, phy, path, is_afe, chanctx_idx);
rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
@@ -1481,9 +1483,9 @@ static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
}
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- u8 path)
+ u8 path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 idx = 0;
@@ -1586,10 +1588,11 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
u32 backup_rf_val[RTW8851B_IQK_SS][BACKUP_RF_REGS_NR];
u32 backup_bb_val[BACKUP_BB_REGS_NR];
@@ -1602,7 +1605,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8851B_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
@@ -1618,9 +1621,10 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ bool force, enum rtw89_chanctx_idx chanctx_idx)
{
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
}
static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 *reg,
@@ -1746,9 +1750,9 @@ static void _dpk_init(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
}
static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -2449,7 +2453,8 @@ _error:
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u32 kip_bkup[RF_PATH_NUM_8851B][DPK_KIP_REG_NUM_8851B] = {};
@@ -2465,7 +2470,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
continue;
_dpk_bkup_kip(rtwdev, dpk_kip_reg, kip_bkup, path);
_dpk_bkup_rf(rtwdev, dpk_rf_reg, rf_bkup, path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
_dpk_init(rtwdev, path);
if (rtwdev->is_tssi_mode[path])
@@ -2505,13 +2510,14 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
_dpk_kip_pwr_clk_onoff(rtwdev, false);
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** 8851B DPK Start (Ver: 0x%x, Cv: %d) ******\n",
DPK_VER_8851B, rtwdev->hal.cv);
- _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx);
}
static void _dpk_track(struct rtw89_dev *rtwdev)
@@ -2617,9 +2623,8 @@ static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_sys_defs_tbl);
@@ -2650,7 +2655,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8851B_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2664,7 +2669,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -2755,9 +2759,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
@@ -2766,9 +2769,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool all)
+ enum rtw89_rf_path path, bool all,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
@@ -2944,10 +2947,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u32 gidx, gidx_1st, gidx_2nd;
u8 ch = chan->channel;
s8 de_1st;
@@ -2980,10 +2982,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u32 tgidx, tgidx_1st, tgidx_2nd;
u8 ch = chan->channel;
s8 tde_1st;
@@ -3017,10 +3018,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
return val;
}
-static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3033,7 +3034,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
for (i = RF_PATH_A; i < RTW8851B_TSSI_PATH_NR; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3049,8 +3050,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3096,10 +3097,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p
}
static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 channel = chan->channel;
u8 band;
@@ -3255,9 +3256,10 @@ void rtw8851b_dack(struct rtw89_dev *rtwdev)
_dac_cal(rtwdev, false);
}
-void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
@@ -3265,30 +3267,32 @@ void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
-void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
- _rx_dck(rtwdev, phy_idx, false);
+ _rx_dck(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
@@ -3297,7 +3301,7 @@ void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -3308,9 +3312,11 @@ void rtw8851b_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_A);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_A, chanctx_idx);
u8 i;
rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
@@ -3319,26 +3325,26 @@ void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_e
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) {
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
_tssi_set_tssi_slope(rtwdev, phy, i);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 channel = chan->channel;
u32 i;
@@ -3348,20 +3354,21 @@ void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) {
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, bool enable)
+ enum rtw89_phy_idx phy, bool enable,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 channel = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
@@ -3379,7 +3386,7 @@ static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x\n",
@@ -3391,12 +3398,13 @@ static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev,
}
void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
if (scan_start)
- rtw8851b_tssi_default_txagc(rtwdev, phy_idx, true);
+ rtw8851b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
else
- rtw8851b_tssi_default_txagc(rtwdev, phy_idx, false);
+ rtw8851b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
}
static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
index b66a23d6d367..ea7df628256b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
@@ -12,15 +12,21 @@ void rtw8851b_lck_init(struct rtw89_dev *rtwdev);
void rtw8851b_lck_track(struct rtw89_dev *rtwdev);
void rtw8851b_rck(struct rtw89_dev *rtwdev);
void rtw8851b_dack(struct rtw89_dev *rtwdev);
-void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8851b_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8851b_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
-void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8851b_set_channel_rf(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 08e148328c62..dde96bd63021 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -337,6 +337,11 @@ static const struct rtw89_pwr_cfg rtw8852a_pwroff[] = {
PWR_INTF_MSK_PCIE,
PWR_BASE_MAC,
PWR_CMD_WRITE, BIT(0), 0},
+ {0x0092,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_PCIE,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(4), BIT(4)},
{0x0005,
PWR_CV_MSK_ALL,
PWR_INTF_MSK_PCIE,
@@ -478,6 +483,15 @@ static const struct rtw89_rrsr_cfgs rtw8852a_rrsr_cfgs = {
.rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2},
};
+static const struct rtw89_rfkill_regs rtw8852a_rfkill_regs = {
+ .pinmux = {R_AX_GPIO8_15_FUNC_SEL,
+ B_AX_PINMUX_GPIO9_FUNC_SEL_MASK,
+ 0xf},
+ .mode = {R_AX_GPIO_EXT_CTRL + 2,
+ (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16,
+ 0x0},
+};
+
static const struct rtw89_dig_regs rtw8852a_dig_regs = {
.seg0_pd_reg = R_SEG0R_PD,
.pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
@@ -1332,29 +1346,32 @@ static void rtw8852a_rfk_init(struct rtw89_dev *rtwdev)
rtwdev->is_tssi_mode[RF_PATH_B] = false;
rtw8852a_rck(rtwdev);
- rtw8852a_dack(rtwdev);
- rtw8852a_rx_dck(rtwdev, RTW89_PHY_0, true);
+ rtw8852a_dack(rtwdev, RTW89_CHANCTX_0);
+ rtw8852a_rx_dck(rtwdev, RTW89_PHY_0, true, RTW89_CHANCTX_0);
}
-static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev)
+static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8852a_rx_dck(rtwdev, phy_idx, true);
- rtw8852a_iqk(rtwdev, phy_idx);
- rtw8852a_tssi(rtwdev, phy_idx);
- rtw8852a_dpk(rtwdev, phy_idx);
+ rtw8852a_rx_dck(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8852a_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852a_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw8852a_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852a_tssi_scan(rtwdev, phy_idx);
+ rtw8852a_tssi_scan(rtwdev, phy_idx, chan);
}
-static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start)
{
- rtw8852a_wifi_scan_notify(rtwdev, start, RTW89_PHY_0);
+ rtw8852a_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx);
}
static void rtw8852a_rfk_track(struct rtw89_dev *rtwdev)
@@ -1534,10 +1551,8 @@ static void rtw8852a_start_pmac_tx(struct rtw89_dev *rtwdev,
void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
-
if (!tx_info->en_pmac_tx) {
rtw8852a_stop_pmac_tx(rtwdev, tx_info, idx);
rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
@@ -1559,7 +1574,7 @@ void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
struct rtw8852a_bb_pmac_info tx_info = {0};
@@ -1569,7 +1584,7 @@ void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
tx_info.tx_cnt = tx_cnt;
tx_info.period = period;
tx_info.tx_time = tx_time;
- rtw8852a_bb_set_pmac_tx(rtwdev, &tx_info, idx);
+ rtw8852a_bb_set_pmac_tx(rtwdev, &tx_info, idx, chan);
}
void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
@@ -2098,9 +2113,11 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.get_thermal = rtw8852a_get_thermal,
.ctrl_btg_bt_rx = rtw8852a_ctrl_btg_bt_rx,
.query_ppdu = rtw8852a_query_ppdu,
+ .convert_rpl_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = NULL,
.set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = NULL,
.pwr_off_func = NULL,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2167,6 +2184,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.dig_regs = &rtw8852a_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 1,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -2178,6 +2196,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
+ .hw_mgmt_tx_encrypt = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
@@ -2240,6 +2259,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.rrsr_cfgs = &rtw8852a_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0},
.bss_clr_map_reg = R_BSS_CLR_MAP,
+ .rfkill_init = &rtw8852a_rfkill_regs,
+ .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
.dma_ch_mask = 0,
.edcca_regs = &rtw8852a_edcca_regs,
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.h b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
index ea82fed7b7be..d6c1acd09238 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
@@ -97,10 +97,10 @@ extern const struct rtw89_chip_info rtw8852a_chip_info;
void rtw8852a_bb_set_plcp_tx(struct rtw89_dev *rtwdev);
void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx);
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan);
void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx);
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan);
void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
enum rtw89_phy_idx idx);
void rtw8852a_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
index d86429e4a35f..9db8713ac99b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -493,11 +493,12 @@ static void _dack(struct rtw89_dev *rtwdev)
_dack_s1(rtwdev);
}
-static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dack_info *dack = &rtwdev->dack;
u32 rf0_0, rf1_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, chanctx_idx);
dack->dack_done = false;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
@@ -799,12 +800,13 @@ static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
}
static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
+ enum rtw89_phy_idx phy_idx, u8 path, u8 ktype,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool fail = false;
u32 iqk_cmd = 0x0;
- u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path);
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path, chanctx_idx);
u32 addr_rfc_ctl = 0x0;
if (path == RF_PATH_A)
@@ -888,7 +890,8 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
}
static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
static const u32 rxgn_a[4] = {0x18C, 0x1A0, 0x28C, 0x2A0};
@@ -927,7 +930,7 @@ static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN, 0x1);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK, chanctx_idx);
rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
}
@@ -952,7 +955,8 @@ static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
}
static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 group = 0x0;
@@ -991,7 +995,7 @@ static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
B_CFIR_LUT_GP, group);
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK, chanctx_idx);
switch (iqk_info->iqk_band[path]) {
case RTW89_BAND_2G:
@@ -1040,7 +1044,8 @@ static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
}
static bool _txk_group_sel(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
static const u32 a_txgain[4] = {0xE466, 0x646D, 0xE4E2, 0x64ED};
static const u32 g_txgain[4] = {0x60e8, 0x60f0, 0x61e8, 0x61ED};
@@ -1083,7 +1088,7 @@ static bool _txk_group_sel(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
B_CFIR_LUT_GP, gp);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK, chanctx_idx);
rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(8 + gp + path * 4), fail);
}
@@ -1098,7 +1103,8 @@ static bool _txk_group_sel(struct rtw89_dev *rtwdev,
}
static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 group = 0x2;
@@ -1131,7 +1137,7 @@ static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK, chanctx_idx);
if (!fail) {
tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
iqk_info->nb_txcfir[path] = tmp | 0x2;
@@ -1179,7 +1185,8 @@ static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
}
static bool _iqk_lok(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 rf0 = 0x0;
@@ -1210,11 +1217,11 @@ static bool _iqk_lok(struct rtw89_dev *rtwdev,
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
- tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE, chanctx_idx);
iqk_info->lok_cor_fail[0][path] = tmp;
fsleep(10);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
- tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE, chanctx_idx);
iqk_info->lok_fin_fail[0][path] = tmp;
fail = _lok_finetune_check(rtwdev, path);
return fail;
@@ -1321,7 +1328,8 @@ static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
}
static
-void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool lok_is_fail = false;
@@ -1333,30 +1341,35 @@ void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
for (i = 0; i < 3; i++) {
_lok_res_table(rtwdev, path, ibias++);
_iqk_txk_setting(rtwdev, path);
- lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
+ lok_is_fail = _iqk_lok(rtwdev, phy_idx, path, chanctx_idx);
if (!lok_is_fail)
break;
}
if (iqk_info->is_nbiqk)
- iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
+ iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path,
+ chanctx_idx);
else
- iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
+ iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path,
+ chanctx_idx);
_iqk_rxclk_setting(rtwdev, path);
_iqk_rxk_setting(rtwdev, path);
if (iqk_info->is_nbiqk || rtwdev->dbcc_en || iqk_info->iqk_band[path] == RTW89_BAND_2G)
- iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
+ iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path,
+ chanctx_idx);
else
- iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
+ iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path,
+ chanctx_idx);
_iqk_info_iqk(rtwdev, phy_idx, path);
}
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, u8 path)
+ enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u32 reg_rf18 = 0x0, reg_35c = 0x0;
u8 idx = 0;
u8 get_empty_table = false;
@@ -1413,9 +1426,9 @@ static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
}
static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- u8 path)
+ u8 path, enum rtw89_chanctx_idx chanctx_idx)
{
- _iqk_by_path(rtwdev, phy_idx, path);
+ _iqk_by_path(rtwdev, phy_idx, path, chanctx_idx);
}
static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
@@ -1513,7 +1526,8 @@ static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
rtw89_rfk_parser(rtwdev, tbl);
}
-static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
+static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 phy_idx = 0x0;
@@ -1525,10 +1539,10 @@ static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
else
phy_idx = RTW89_PHY_1;
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_iqk_macbb_setting(rtwdev, phy_idx, path);
_iqk_preset(rtwdev, path);
- _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path, chanctx_idx);
_iqk_restore(rtwdev, path);
_iqk_afebb_restore(rtwdev, phy_idx, path);
}
@@ -1607,12 +1621,13 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852A_IQK_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1622,12 +1637,12 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852A_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
_iqk_macbb_setting(rtwdev, phy_idx, path);
_iqk_preset(rtwdev, path);
- _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path, chanctx_idx);
_iqk_restore(rtwdev, path);
_iqk_afebb_restore(rtwdev, phy_idx, path);
_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
@@ -1635,18 +1650,19 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
switch (_kpath(rtwdev, phy_idx)) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1656,9 +1672,10 @@ static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool forc
#define RXDCK_VER_8852A 0xe
static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool is_afe)
+ enum rtw89_rf_path path, bool is_afe,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, chanctx_idx);
u32 ori_val;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
@@ -1704,7 +1721,7 @@ static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- bool is_afe)
+ bool is_afe, enum rtw89_chanctx_idx chanctx_idx)
{
u8 path, kpath, dck_tune;
u32 rf_reg5;
@@ -1732,7 +1749,7 @@ static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
- _set_rx_dck(rtwdev, phy, path, is_afe);
+ _set_rx_dck(rtwdev, phy, path, is_afe, chanctx_idx);
rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
@@ -1800,9 +1817,10 @@ static void _dpk_reload_kip(struct rtw89_dev *rtwdev, u32 *reg,
}
static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, enum rtw8852a_dpk_id id)
+ enum rtw89_rf_path path, enum rtw8852a_dpk_id id,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, chanctx_idx);
u16 dpk_cmd = 0x0;
u32 val;
int ret;
@@ -1841,18 +1859,19 @@ static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _dpk_rx_dck(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
- _set_rx_dck(rtwdev, phy, path, false);
+ _set_rx_dck(rtwdev, phy, path, false, chanctx_idx);
}
static void _dpk_information(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 kidx = dpk->cur_idx[path];
dpk->bp[path][kidx].band = chan->band_type;
@@ -1967,7 +1986,8 @@ static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u8 cur_rxbb;
@@ -1997,7 +2017,7 @@ static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
- _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
+ _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK, chanctx_idx);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
@@ -2186,10 +2206,11 @@ static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
}
static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kidx)
+ enum rtw89_rf_path path, u8 kidx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
_dpk_tpg_sel(rtwdev, path, kidx);
- _dpk_one_shot(rtwdev, phy, path, SYNC);
+ _dpk_one_shot(rtwdev, phy, path, SYNC, chanctx_idx);
return _dpk_sync_check(rtwdev, path); /*1= fail*/
}
@@ -2242,10 +2263,10 @@ static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
static void _dpk_gainloss(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy, enum rtw89_rf_path path,
- u8 kidx)
+ u8 kidx, enum rtw89_chanctx_idx chanctx_idx)
{
_dpk_table_select(rtwdev, path, kidx, 1);
- _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
+ _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS, chanctx_idx);
}
#define DPK_TXAGC_LOWER 0x2e
@@ -2322,7 +2343,7 @@ static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
- bool loss_only)
+ bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
{
#define DPK_AGC_ADJ_LMT 6
#define DPK_DGAIN_UPPER 1922
@@ -2330,7 +2351,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
#define DPK_RXBB_UPPER 0x1f
#define DPK_RXBB_LOWER 0
#define DPK_GL_CRIT 7
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
u8 agc_cnt = 0;
bool limited_rxbb = false;
@@ -2344,7 +2365,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
do {
switch (step) {
case DPK_AGC_STEP_SYNC_DGAIN:
- if (_dpk_sync(rtwdev, phy, path, kidx)) {
+ if (_dpk_sync(rtwdev, phy, path, kidx, chanctx_idx)) {
tmp_txagc = DPK_TXAGC_INVAL;
goout = true;
break;
@@ -2380,7 +2401,8 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
_dpk_bypass_rxcfir(rtwdev, path, true);
else
- _dpk_lbk_rxiqk(rtwdev, phy, path);
+ _dpk_lbk_rxiqk(rtwdev, phy, path,
+ chanctx_idx);
}
if (dgain > DPK_DGAIN_UPPER || dgain < DPK_DGAIN_LOWER)
step = DPK_AGC_STEP_SYNC_DGAIN;
@@ -2391,7 +2413,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
break;
case DPK_AGC_STEP_GAIN_LOSS_IDX:
- _dpk_gainloss(rtwdev, phy, path, kidx);
+ _dpk_gainloss(rtwdev, phy, path, kidx, chanctx_idx);
tmp_gl_idx = _dpk_gainloss_read(rtwdev);
if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
@@ -2475,11 +2497,12 @@ static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
}
static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kidx, u8 gain)
+ enum rtw89_rf_path path, u8 kidx, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
_dpk_set_mdpd_para(rtwdev, 0x0);
_dpk_table_select(rtwdev, path, kidx, 1);
- _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
+ _dpk_one_shot(rtwdev, phy, path, MDPK_IDL, chanctx_idx);
}
static void _dpk_fill_result(struct rtw89_dev *rtwdev,
@@ -2518,10 +2541,10 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
bool is_reload = false;
u8 idx, cur_band, cur_ch;
@@ -2545,7 +2568,8 @@ static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 gain)
+ enum rtw89_rf_path path, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 txagc = 0, kidx = dpk->cur_idx[path];
@@ -2558,16 +2582,16 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
_rf_direct_cntrl(rtwdev, path, false);
txagc = _dpk_set_tx_pwr(rtwdev, gain, path);
_dpk_rf_setting(rtwdev, gain, path, kidx);
- _dpk_rx_dck(rtwdev, phy, path);
+ _dpk_rx_dck(rtwdev, phy, path, chanctx_idx);
_dpk_kip_setting(rtwdev, path, kidx);
_dpk_manual_txcfir(rtwdev, path, true);
- txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
if (txagc == DPK_TXAGC_INVAL)
is_fail = true;
_dpk_get_thermal(rtwdev, kidx, path);
- _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
+ _dpk_idl_mpa(rtwdev, phy, path, kidx, gain, chanctx_idx);
rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
_dpk_fill_result(rtwdev, path, kidx, gain, txagc);
_dpk_manual_txcfir(rtwdev, path, false);
@@ -2584,7 +2608,8 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
@@ -2599,7 +2624,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (!(kpath & BIT(path)))
continue;
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
+ chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch != 0)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2624,7 +2650,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
_dpk_tssi_pause(rtwdev, path, true);
_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
}
_dpk_bb_afe_setting(rtwdev, phy, path, kpath);
@@ -2633,7 +2659,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (!(kpath & BIT(path)) || reloaded[path])
continue;
- is_fail = _dpk_main(rtwdev, phy, path, 1);
+ is_fail = _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
_dpk_onoff(rtwdev, path, is_fail);
}
@@ -2652,10 +2678,11 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
}
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_fem_info *fem = &rtwdev->fem;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK,
@@ -2682,17 +2709,19 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool force, enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
RTW8852A_DPK_VER, rtwdev->hal.cv,
RTW8852A_RF_REL_VERSION);
- if (_dpk_bypass_check(rtwdev, phy))
+ if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
_dpk_force_bypass(rtwdev, phy);
else
- _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy),
+ chanctx_idx);
}
static void _dpk_onoff(struct rtw89_dev *rtwdev,
@@ -2815,9 +2844,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev)
}
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
@@ -2826,9 +2854,9 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
}
-static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl);
@@ -2838,9 +2866,9 @@ static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
@@ -2869,7 +2897,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define __get_val(ptr, idx) \
({ \
@@ -2883,7 +2911,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -3076,9 +3103,8 @@ static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
}
static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 subband = chan->subband_type;
switch (subband) {
@@ -3252,10 +3278,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st = 0;
@@ -3290,10 +3315,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st = 0;
@@ -3328,11 +3352,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
}
static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy)
+ enum rtw89_phy_idx phy, const struct rtw89_chan *chan)
{
#define __DE_MASK 0x003ff000
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858};
static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860};
static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838};
@@ -3352,7 +3375,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
for (i = 0; i < RF_PATH_NUM_8852A; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3368,8 +3391,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
rtw89_phy_read32_mask(rtwdev, r_cck_long[i],
__DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3458,10 +3481,10 @@ static void _tssi_track(struct rtw89_dev *rtwdev)
}
}
-static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel, ch_tmp;
u8 bw = chan->band_width;
u8 band = chan->band_type;
@@ -3497,24 +3520,25 @@ static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- u8 path, s16 pwr_dbm, u8 enable)
+ u8 path, s16 pwr_dbm, u8 enable, const struct rtw89_chan *chan)
{
rtw8852a_bb_set_plcp_tx(rtwdev);
rtw8852a_bb_cfg_tx_path(rtwdev, path);
rtw8852a_bb_set_power(rtwdev, pwr_dbm, phy);
- rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy);
+ rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy, chan);
}
-static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
const struct rtw89_chip_info *mac_reg = rtwdev->chip;
u8 ch = chan->channel, ch_tmp;
u8 bw = chan->band_width;
u8 band = chan->band_type;
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0, chanctx_idx);
s8 power;
s16 xdbm;
u32 i, tx_counter = 0;
@@ -3546,9 +3570,9 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
- _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true, chan);
mdelay(15);
- _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false, chan);
tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD) -
tx_counter;
@@ -3600,19 +3624,21 @@ void rtw8852a_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852a_dack(struct rtw89_dev *rtwdev)
+void rtw8852a_dack(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
- _dac_cal(rtwdev, false);
+ _dac_cal(rtwdev, false, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
@@ -3620,34 +3646,35 @@ void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_iqk_init(rtwdev);
if (rtwdev->dbcc_en)
- _iqk_dbcc(rtwdev, phy_idx);
+ _iqk_dbcc(rtwdev, phy_idx, chanctx_idx);
else
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- bool is_afe)
+ bool is_afe, enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
- _rx_dck(rtwdev, phy_idx, is_afe);
+ _rx_dck(rtwdev, phy_idx, is_afe, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
@@ -3655,7 +3682,7 @@ void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -3666,8 +3693,10 @@ void rtw8852a_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 i;
rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
@@ -3676,26 +3705,27 @@ void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy);
- _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, chan);
+ _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
_tssi_slope_cal_org(rtwdev, phy, i);
_tssi_set_rf_gap_tbl(rtwdev, phy, i);
_tssi_set_slope(rtwdev, phy, i);
- _tssi_pak(rtwdev, phy, i);
+ _tssi_pak(rtwdev, phy, i, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
- _tssi_high_power(rtwdev, phy);
- _tssi_pre_tx(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
+ _tssi_high_power(rtwdev, phy, chan);
+ _tssi_pre_tx(rtwdev, phy, chanctx_idx);
}
-void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
u8 i;
@@ -3710,14 +3740,14 @@ void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_pak(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_pak(rtwdev, phy, i, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
void rtw8852a_tssi_track(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
index fa058ccc8616..8761f2cc9359 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
@@ -8,14 +8,19 @@
#include "core.h"
void rtw8852a_rck(struct rtw89_dev *rtwdev);
-void rtw8852a_dack(struct rtw89_dev *rtwdev);
-void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852a_dack(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- bool is_afe);
-void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+ bool is_afe, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852a_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
-void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8852a_tssi_track(struct rtw89_dev *rtwdev);
void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index a22847a311ad..12be52f76427 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -150,6 +150,15 @@ static const struct rtw89_rrsr_cfgs rtw8852b_rrsr_cfgs = {
.rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2},
};
+static const struct rtw89_rfkill_regs rtw8852b_rfkill_regs = {
+ .pinmux = {R_AX_GPIO8_15_FUNC_SEL,
+ B_AX_PINMUX_GPIO9_FUNC_SEL_MASK,
+ 0xf},
+ .mode = {R_AX_GPIO_EXT_CTRL + 2,
+ (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16,
+ 0x0},
+};
+
static const struct rtw89_dig_regs rtw8852b_dig_regs = {
.seg0_pd_reg = R_SEG0R_PD_V1,
.pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
@@ -549,29 +558,32 @@ static void rtw8852b_rfk_init(struct rtw89_dev *rtwdev)
rtw8852b_dpk_init(rtwdev);
rtw8852b_rck(rtwdev);
- rtw8852b_dack(rtwdev);
- rtw8852b_rx_dck(rtwdev, RTW89_PHY_0);
+ rtw8852b_dack(rtwdev, RTW89_CHANCTX_0);
+ rtw8852b_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
}
-static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev)
+static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8852b_rx_dck(rtwdev, phy_idx);
- rtw8852b_iqk(rtwdev, phy_idx);
- rtw8852b_tssi(rtwdev, phy_idx, true);
- rtw8852b_dpk(rtwdev, phy_idx);
+ rtw8852b_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ rtw8852b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8852b_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8852b_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852b_tssi_scan(rtwdev, phy_idx);
+ rtw8852b_tssi_scan(rtwdev, phy_idx, chan);
}
-static void rtw8852b_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+static void rtw8852b_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start)
{
- rtw8852b_wifi_scan_notify(rtwdev, start, RTW89_PHY_0);
+ rtw8852b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
}
static void rtw8852b_rfk_track(struct rtw89_dev *rtwdev)
@@ -729,9 +741,11 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.get_thermal = rtw8852bx_get_thermal,
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
+ .convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
.ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8852b_pwr_on_func,
.pwr_off_func = rtw8852b_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -807,6 +821,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.dig_regs = &rtw8852b_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 0,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -818,6 +833,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
+ .hw_mgmt_tx_encrypt = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
@@ -880,6 +896,8 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.rrsr_cfgs = &rtw8852b_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0},
.bss_clr_map_reg = R_BSS_CLR_MAP_V1,
+ .rfkill_init = &rtw8852b_rfkill_regs,
+ .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
.dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
index 1745c2882acf..ede0ca5426ae 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -1445,10 +1445,8 @@ static void rtw8852bx_start_pmac_tx(struct rtw89_dev *rtwdev,
static
void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852bx_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
-
if (!tx_info->en_pmac_tx) {
rtw8852bx_stop_pmac_tx(rtwdev, tx_info, idx);
rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
@@ -1473,7 +1471,7 @@ void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
static
void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
struct rtw8852bx_bb_pmac_info tx_info = {0};
@@ -1484,7 +1482,7 @@ void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
tx_info.period = period;
tx_info.tx_time = tx_time;
- rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx);
+ rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx, chan);
}
static
@@ -1623,9 +1621,9 @@ static void __rtw8852bx_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
static
void __rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path)
+ enum rtw89_rf_path_bit rx_path,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u32 rst_mask0;
u32 rst_mask1;
@@ -1713,9 +1711,10 @@ static void rtw8852bx_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev,
static void __rtw8852bx_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB;
- rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
rtw8852bx_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path);
if (rtwdev->hal.rx_nss == 1) {
@@ -1948,6 +1947,19 @@ static void __rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
rtw8852bx_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
}
+static void __rtw8852bx_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ u8 delta = phy_ppdu->rpl_avg - phy_ppdu->rssi_avg;
+ u8 *rssi = phy_ppdu->rssi;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++)
+ rssi[i] += delta;
+
+ phy_ppdu->rssi_avg = phy_ppdu->rpl_avg;
+}
+
static int __rtw8852bx_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -2030,6 +2042,7 @@ const struct rtw8852bx_info rtw8852bx_info = {
.ctrl_nbtg_bt_tx = __rtw8852bx_ctrl_nbtg_bt_tx,
.ctrl_btg_bt_rx = __rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = __rtw8852bx_query_ppdu,
+ .convert_rpl_to_rssi = __rtw8852bx_convert_rpl_to_rssi,
.read_efuse = __rtw8852bx_read_efuse,
.read_phycap = __rtw8852bx_read_phycap,
.power_trim = __rtw8852bx_power_trim,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
index 801e7ab9f4fa..3dce5422f41e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
@@ -121,13 +121,14 @@ struct rtw8852bx_info {
void (*bb_cfg_txrx_path)(struct rtw89_dev *rtwdev);
void (*bb_cfg_tx_path)(struct rtw89_dev *rtwdev, u8 tx_path);
void (*bb_ctrl_rx_path)(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path);
+ enum rtw89_rf_path_bit rx_path,
+ const struct rtw89_chan *chan);
void (*bb_set_plcp_tx)(struct rtw89_dev *rtwdev);
void (*bb_set_power)(struct rtw89_dev *rtwdev, s16 pwr_dbm,
enum rtw89_phy_idx idx);
void (*bb_set_pmac_pkt_tx)(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx);
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan);
void (*bb_backup_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
struct rtw8852bx_bb_tssi_bak *bak);
void (*bb_restore_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
@@ -145,6 +146,8 @@ struct rtw8852bx_info {
void (*query_ppdu)(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status);
+ void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu);
int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map,
enum rtw89_efuse_block block);
int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
@@ -207,9 +210,10 @@ void rtw8852bx_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path)
static inline
void rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path)
+ enum rtw89_rf_path_bit rx_path,
+ const struct rtw89_chan *chan)
{
- rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path, chan);
}
static inline
@@ -228,9 +232,10 @@ void rtw8852bx_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
static inline
void rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
- rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx);
+ rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx,
+ chan);
}
static inline
@@ -291,6 +296,13 @@ void rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
}
static inline
+void rtw8852bx_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ rtw8852bx_info.convert_rpl_to_rssi(rtwdev, phy_ppdu);
+}
+
+static inline
int rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
enum rtw89_efuse_block block)
{
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
index 12354612441c..ef47a5facc83 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
@@ -1382,9 +1382,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
_iqk_info_iqk(rtwdev, phy_idx, path);
}
-static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 reg_rf18;
u32 reg_35c;
@@ -1608,12 +1609,13 @@ static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852B_IQK_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1623,7 +1625,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852B_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
@@ -1638,20 +1640,21 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u8 kpath = _kpath(rtwdev, phy_idx);
switch (kpath) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1761,9 +1764,9 @@ static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -1786,9 +1789,10 @@ static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kpath)
+ enum rtw89_rf_path path, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_defs_tbl);
@@ -1803,9 +1807,10 @@ static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kpath)
+ enum rtw89_rf_path path, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_restore_defs_tbl);
@@ -2217,9 +2222,9 @@ static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
- bool loss_only)
+ bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 step = DPK_AGC_STEP_SYNC_DGAIN;
u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
u8 goout = 0, agc_cnt = 0, limited_rxbb = 0;
@@ -2416,9 +2421,9 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
bool is_reload = false;
u8 idx, cur_band, cur_ch;
@@ -2443,7 +2448,8 @@ static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 gain)
+ enum rtw89_rf_path path, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 txagc = 0x38, kidx = dpk->cur_idx[path];
@@ -2464,7 +2470,7 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
_dpk_kip_set_rxagc(rtwdev, phy, path);
_dpk_table_select(rtwdev, path, kidx, gain);
- txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust txagc = 0x%x\n", txagc);
if (txagc == 0xff) {
@@ -2491,7 +2497,8 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120};
@@ -2503,7 +2510,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (dpk->is_dpk_reload_en) {
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
+ chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2519,19 +2527,19 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
if (rtwdev->is_tssi_mode[path])
_dpk_tssi_pause(rtwdev, path, true);
}
- _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
+ _dpk_bb_afe_setting(rtwdev, phy, path, kpath, chanctx_idx);
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
- is_fail = _dpk_main(rtwdev, phy, path, 1);
+ is_fail = _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
_dpk_onoff(rtwdev, path, is_fail);
}
- _dpk_bb_afe_restore(rtwdev, phy, path, kpath);
+ _dpk_bb_afe_restore(rtwdev, phy, path, kpath, chanctx_idx);
_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
@@ -2543,9 +2551,10 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
}
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_fem_info *fem = &rtwdev->fem;
if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
@@ -2577,17 +2586,18 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
RTW8852B_DPK_VER, rtwdev->hal.cv,
RTW8852B_RF_REL_VERSION);
- if (_dpk_bypass_check(rtwdev, phy))
+ if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
_dpk_force_bypass(rtwdev, phy);
else
- _dpk_cal_select(rtwdev, force, phy, RF_AB);
+ _dpk_cal_select(rtwdev, force, phy, RF_AB, chanctx_idx);
}
static void _dpk_track(struct rtw89_dev *rtwdev)
@@ -2722,9 +2732,8 @@ static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
@@ -2734,9 +2743,8 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852b_tssi_sys_defs_tbl);
@@ -2778,7 +2786,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8852B_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2792,7 +2800,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -2944,9 +2951,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A)
@@ -2960,9 +2966,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool all)
+ enum rtw89_rf_path path, bool all,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl = NULL;
u8 ch = chan->channel;
@@ -3231,10 +3237,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st;
@@ -3267,10 +3272,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st;
@@ -3304,10 +3308,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
return val;
}
-static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3320,7 +3324,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3336,8 +3340,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3383,10 +3387,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p
}
static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 channel = chan->channel;
u8 band;
@@ -3420,7 +3424,7 @@ static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
- u8 enable)
+ u8 enable, const struct rtw89_chan *chan)
{
enum rtw89_rf_path_bit rx_path;
@@ -3436,11 +3440,11 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (enable) {
rtw8852bx_bb_set_plcp_tx(rtwdev);
rtw8852bx_bb_cfg_tx_path(rtwdev, path);
- rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
}
- rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
+ rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan);
}
static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
@@ -3494,7 +3498,7 @@ static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, const s16 *power,
- u32 *tssi_cw_rpt)
+ u32 *tssi_cw_rpt, const struct rtw89_chan *chan)
{
u32 tx_counter, tx_counter_tmp;
const int retry = 100;
@@ -3513,9 +3517,10 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
_tssi_trigger[path], tmp, path);
if (j == 0)
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true, chan);
else
- _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true,
+ chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3546,14 +3551,14 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
"[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
k, path);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
return false;
}
tssi_cw_rpt[j] =
rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], B_TSSI_CWRPT);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3567,14 +3572,13 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
0x78e4, 0x49c0, 0x0d18, 0x0d80};
static const s16 power_2g[4] = {48, 20, 4, 4};
static const s16 power_5g[4] = {48, 20, 4, 4};
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0};
u8 channel = chan->channel;
@@ -3635,7 +3639,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
- ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
+ ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan);
if (!ok)
goto out;
@@ -3755,18 +3759,19 @@ void rtw8852b_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852b_dack(struct rtw89_dev *rtwdev)
+void rtw8852b_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
_dac_cal(rtwdev, false);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
@@ -3774,15 +3779,16 @@ void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
-void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
@@ -3795,9 +3801,10 @@ void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
@@ -3806,7 +3813,7 @@ void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -3817,9 +3824,11 @@ void rtw8852b_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx);
u32 tx_en;
u8 i;
@@ -3829,34 +3838,34 @@ void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_e
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
_tssi_set_tssi_slope(rtwdev, phy, i);
rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
_tmac_tx_pause(rtwdev, phy, true);
if (hwtx_en)
- _tssi_alimentk(rtwdev, phy, i);
+ _tssi_alimentk(rtwdev, phy, i, chan);
_tmac_tx_pause(rtwdev, phy, false);
rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
u8 channel = chan->channel;
u8 band;
@@ -3879,24 +3888,25 @@ void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RTW8852B_TSSI_PATH_NR; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
if (tssi_info->alignment_done[i][band])
- _tssi_alimentk_done(rtwdev, phy, i);
+ _tssi_alimentk_done(rtwdev, phy, i, chan);
else
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, bool enable)
+ enum rtw89_phy_idx phy, bool enable,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 channel = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
@@ -3904,7 +3914,7 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
if (enable) {
if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
- rtw8852b_tssi(rtwdev, phy, true);
+ rtw8852b_tssi(rtwdev, phy, true, chanctx_idx);
return;
}
@@ -3921,8 +3931,8 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
@@ -3935,12 +3945,13 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
}
void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
if (scan_start)
- rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true);
+ rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
else
- rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false);
+ rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
}
static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
index f52832065600..c31ba446e6e0 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
@@ -8,16 +8,22 @@
#include "core.h"
void rtw8852b_rck(struct rtw89_dev *rtwdev);
-void rtw8852b_dack(struct rtw89_dev *rtwdev);
-void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852b_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852b_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852b_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
-void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
new file mode 100644
index 000000000000..7dfdcb5964e1
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852bt.h"
+#include "rtw8852bt_rfk.h"
+#include "rtw8852b_common.h"
+
+#define RTW8852BT_FW_FORMAT_MAX 0
+#define RTW8852BT_FW_BASENAME "rtw89/rtw8852bt_fw"
+#define RTW8852BT_MODULE_FIRMWARE \
+ RTW8852BT_FW_BASENAME ".bin"
+
+static const struct rtw89_hfc_ch_cfg rtw8852bt_hfc_chcfg_pcie[] = {
+ {16, 742, grp_0}, /* ACH 0 */
+ {16, 742, grp_0}, /* ACH 1 */
+ {16, 742, grp_0}, /* ACH 2 */
+ {16, 742, grp_0}, /* ACH 3 */
+ {0, 0, grp_0}, /* ACH 4 */
+ {0, 0, grp_0}, /* ACH 5 */
+ {0, 0, grp_0}, /* ACH 6 */
+ {0, 0, grp_0}, /* ACH 7 */
+ {15, 743, grp_0}, /* B0MGQ */
+ {15, 743, grp_0}, /* B0HIQ */
+ {0, 0, grp_0}, /* B1MGQ */
+ {0, 0, grp_0}, /* B1HIQ */
+ {40, 0, 0} /* FWCMDQ */
+};
+
+static const struct rtw89_hfc_pub_cfg rtw8852bt_hfc_pubcfg_pcie = {
+ 958, /* Group 0 */
+ 0, /* Group 1 */
+ 958, /* Public Max */
+ 0 /* WP threshold */
+};
+
+static const struct rtw89_hfc_param_ini rtw8852bt_hfc_param_ini_pcie[] = {
+ [RTW89_QTA_SCC] = {rtw8852bt_hfc_chcfg_pcie, &rtw8852bt_hfc_pubcfg_pcie,
+ &rtw89_mac_size.hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_preccfg_pcie,
+ RTW89_HCIFC_POH},
+ [RTW89_QTA_INVALID] = {NULL},
+};
+
+static const struct rtw89_dle_mem rtw8852bt_dle_mem_pcie[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size23,
+ &rtw89_mac_size.ple_size9, &rtw89_mac_size.wde_qt23,
+ &rtw89_mac_size.wde_qt23, &rtw89_mac_size.ple_qt57,
+ &rtw89_mac_size.ple_qt59},
+ [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size23,
+ &rtw89_mac_size.ple_size9, &rtw89_mac_size.wde_qt23,
+ &rtw89_mac_size.wde_qt23, &rtw89_mac_size.ple_qt57,
+ &rtw89_mac_size.ple_qt_52bt_wow},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size4,
+ &rtw89_mac_size.ple_size4, &rtw89_mac_size.wde_qt4,
+ &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13,
+ &rtw89_mac_size.ple_qt13},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
+static const u32 rtw8852bt_h2c_regs[RTW89_H2CREG_MAX] = {
+ R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1, R_AX_H2CREG_DATA2,
+ R_AX_H2CREG_DATA3
+};
+
+static const u32 rtw8852bt_c2h_regs[RTW89_C2HREG_MAX] = {
+ R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1, R_AX_C2HREG_DATA2,
+ R_AX_C2HREG_DATA3
+};
+
+static const u32 rtw8852bt_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = {
+ R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3,
+};
+
+static const struct rtw89_page_regs rtw8852bt_page_regs = {
+ .hci_fc_ctrl = R_AX_HCI_FC_CTRL,
+ .ch_page_ctrl = R_AX_CH_PAGE_CTRL,
+ .ach_page_ctrl = R_AX_ACH0_PAGE_CTRL,
+ .ach_page_info = R_AX_ACH0_PAGE_INFO,
+ .pub_page_info3 = R_AX_PUB_PAGE_INFO3,
+ .pub_page_ctrl1 = R_AX_PUB_PAGE_CTRL1,
+ .pub_page_ctrl2 = R_AX_PUB_PAGE_CTRL2,
+ .pub_page_info1 = R_AX_PUB_PAGE_INFO1,
+ .pub_page_info2 = R_AX_PUB_PAGE_INFO2,
+ .wp_page_ctrl1 = R_AX_WP_PAGE_CTRL1,
+ .wp_page_ctrl2 = R_AX_WP_PAGE_CTRL2,
+ .wp_page_info1 = R_AX_WP_PAGE_INFO1,
+};
+
+static const struct rtw89_reg_def rtw8852bt_dcfo_comp = {
+ R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK
+};
+
+static const struct rtw89_imr_info rtw8852bt_imr_info = {
+ .wdrls_imr_set = B_AX_WDRLS_IMR_SET,
+ .wsec_imr_reg = R_AX_SEC_DEBUG,
+ .wsec_imr_set = B_AX_IMR_ERROR,
+ .mpdu_tx_imr_set = 0,
+ .mpdu_rx_imr_set = 0,
+ .sta_sch_imr_set = B_AX_STA_SCHEDULER_IMR_SET,
+ .txpktctl_imr_b0_reg = R_AX_TXPKTCTL_ERR_IMR_ISR,
+ .txpktctl_imr_b0_clr = B_AX_TXPKTCTL_IMR_B0_CLR,
+ .txpktctl_imr_b0_set = B_AX_TXPKTCTL_IMR_B0_SET,
+ .txpktctl_imr_b1_reg = R_AX_TXPKTCTL_ERR_IMR_ISR_B1,
+ .txpktctl_imr_b1_clr = B_AX_TXPKTCTL_IMR_B1_CLR,
+ .txpktctl_imr_b1_set = B_AX_TXPKTCTL_IMR_B1_SET,
+ .wde_imr_clr = B_AX_WDE_IMR_CLR_V01,
+ .wde_imr_set = B_AX_WDE_IMR_SET_V01,
+ .ple_imr_clr = B_AX_PLE_IMR_CLR,
+ .ple_imr_set = B_AX_PLE_IMR_SET,
+ .host_disp_imr_clr = B_AX_HOST_DISP_IMR_CLR,
+ .host_disp_imr_set = B_AX_HOST_DISP_IMR_SET_V01,
+ .cpu_disp_imr_clr = B_AX_CPU_DISP_IMR_CLR,
+ .cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET,
+ .other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR,
+ .other_disp_imr_set = 0,
+ .bbrpt_com_err_imr_reg = R_AX_BBRPT_COM_ERR_IMR_ISR,
+ .bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR_ISR,
+ .bbrpt_err_imr_set = 0,
+ .bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR_ISR,
+ .ptcl_imr_clr = B_AX_PTCL_IMR_CLR_ALL,
+ .ptcl_imr_set = B_AX_PTCL_IMR_SET,
+ .cdma_imr_0_reg = R_AX_DLE_CTRL,
+ .cdma_imr_0_clr = B_AX_DLE_IMR_CLR,
+ .cdma_imr_0_set = B_AX_DLE_IMR_SET,
+ .cdma_imr_1_reg = 0,
+ .cdma_imr_1_clr = 0,
+ .cdma_imr_1_set = 0,
+ .phy_intf_imr_reg = R_AX_PHYINFO_ERR_IMR,
+ .phy_intf_imr_clr = B_AX_PHYINFO_IMR_EN_ALL,
+ .phy_intf_imr_set = B_AX_PHYINFO_IMR_SET,
+ .rmac_imr_reg = R_AX_RMAC_ERR_ISR,
+ .rmac_imr_clr = B_AX_RMAC_IMR_CLR,
+ .rmac_imr_set = B_AX_RMAC_IMR_SET,
+ .tmac_imr_reg = R_AX_TMAC_ERR_IMR_ISR,
+ .tmac_imr_clr = B_AX_TMAC_IMR_CLR,
+ .tmac_imr_set = B_AX_TMAC_IMR_SET,
+};
+
+static const struct rtw89_rrsr_cfgs rtw8852bt_rrsr_cfgs = {
+ .ref_rate = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_REF_RATE_SEL, 0},
+ .rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2},
+};
+
+static const struct rtw89_rfkill_regs rtw8852bt_rfkill_regs = {
+ .pinmux = {R_AX_GPIO8_15_FUNC_SEL,
+ B_AX_PINMUX_GPIO9_FUNC_SEL_MASK,
+ 0xf},
+ .mode = {R_AX_GPIO_EXT_CTRL + 2,
+ (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16,
+ 0x0},
+};
+
+static const struct rtw89_dig_regs rtw8852bt_dig_regs = {
+ .seg0_pd_reg = R_SEG0R_PD_V1,
+ .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
+ .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1,
+ .bmode_pd_reg = R_BMODE_PDTH_EN_V1,
+ .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1,
+ .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V1,
+ .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1,
+ .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK},
+ .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK},
+ .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1},
+ .p1_tia_init = {R_PATH1_TIA_INIT_V1, B_PATH1_TIA_INIT_IDX_MSK_V1},
+ .p0_rxb_init = {R_PATH0_RXB_INIT_V1, B_PATH0_RXB_INIT_IDX_MSK_V1},
+ .p1_rxb_init = {R_PATH1_RXB_INIT_V1, B_PATH1_RXB_INIT_IDX_MSK_V1},
+ .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC_V2,
+ B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC_V2,
+ B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC_V2,
+ B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC_V2,
+ B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
+static const struct rtw89_edcca_regs rtw8852bt_edcca_regs = {
+ .edcca_level = R_SEG0R_EDCCA_LVL_V1,
+ .edcca_mask = B_EDCCA_LVL_MSK0,
+ .edcca_p_mask = B_EDCCA_LVL_MSK1,
+ .ppdu_level = R_SEG0R_EDCCA_LVL_V1,
+ .ppdu_mask = B_EDCCA_LVL_MSK3,
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
+ .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
+};
+
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8852bt_rf_ul[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 1, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {6, 1, 0, 7},
+ {13, 1, 0, 7},
+ {13, 1, 0, 7}
+};
+
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8852bt_rf_dl[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 1, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {255, 1, 0, 7},
+ {255, 1, 0, 7},
+ {255, 1, 0, 7}
+};
+
+static const struct rtw89_btc_fbtc_mreg rtw89_btc_8852bt_mon_reg[] = {
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda24),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda28),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda2c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda30),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda4c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda10),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda20),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda34),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xcef4),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0x8424),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd200),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd220),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x980),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x4aa4),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x4778),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x476c),
+};
+
+static const u8 rtw89_btc_8852bt_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {70, 60, 50, 40};
+static const u8 rtw89_btc_8852bt_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {50, 40, 30, 20};
+
+static int rtw8852bt_pwr_on_func(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 ret;
+
+ rtw89_write32_set(rtwdev, R_AX_LDO_AON_CTRL0, B_AX_PD_REGU_L);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN |
+ B_AX_AFSM_PCIE_SUS_EN);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_DIS_WLBT_PDNSUSEN_SOPC);
+ rtw89_write32_set(rtwdev, R_AX_WLLPS_CTRL, B_AX_DIS_WLBT_LPSEN_LOPC);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APDM_HPDN);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_OCP_L1_MASK, 7);
+
+ ret = read_poll_timeout(rtw89_read32, val32, val32 & B_AX_RDY_SYSPWR,
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFN_ONMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFN_ONMAC),
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_CALIB_EN_V1);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+ XTAL_SI_GND_SHDN_WL, XTAL_SI_GND_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+ XTAL_SI_SHDN_WL, XTAL_SI_SHDN_WL);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_WEI,
+ XTAL_SI_OFF_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_EI,
+ XTAL_SI_OFF_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_RFC2RF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_WEI,
+ XTAL_SI_PON_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_EI,
+ XTAL_SI_PON_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SRAM2RFC);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_SRAM_CTRL, 0, XTAL_SI_SRAM_DIS);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0, XTAL_SI_LDO_LPS);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_4, 0, XTAL_SI_LPS_CAP);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+
+ fsleep(1000);
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+ rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+
+ if (!rtwdev->efuse.valid || rtwdev->efuse.power_k_valid)
+ goto func_en;
+
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_VOL_L1_MASK, 0x9);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_VREFPFM_L_MASK, 0xA);
+
+func_en:
+ rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN,
+ B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MPDU_PROC_EN |
+ B_AX_WD_RLS_EN | B_AX_DLE_WDE_EN | B_AX_TXPKT_CTRL_EN |
+ B_AX_STA_SCH_EN | B_AX_DLE_PLE_EN | B_AX_PKT_BUF_EN |
+ B_AX_DMAC_TBL_EN | B_AX_PKT_IN_EN | B_AX_DLE_CPUIO_EN |
+ B_AX_DISPATCHER_EN | B_AX_BBRPT_EN | B_AX_MAC_SEC_EN |
+ B_AX_DMACREG_GCKEN);
+ rtw89_write32_set(rtwdev, R_AX_CMAC_FUNC_EN,
+ B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN |
+ B_AX_FORCE_CMACREG_GCKEN | B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN |
+ B_AX_PTCLTOP_EN | B_AX_SCHEDULER_EN | B_AX_TMAC_EN |
+ B_AX_RMAC_EN);
+
+ rtw89_write32_mask(rtwdev, R_AX_EECS_EESK_FUNC_SEL,
+ B_AX_PINMUX_EESK_FUNC_SEL_MASK, 0x1);
+
+ return 0;
+}
+
+static int rtw8852bt_pwr_off_func(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF,
+ XTAL_SI_RFC2RF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0, XTAL_SI_RF00);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0, XTAL_SI_RF10);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_SRAM2RFC,
+ XTAL_SI_SRAM2RFC);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_WEI);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_GND_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_OFFMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFM_OFFMAC),
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, SW_LPS_OPTION);
+ rtw89_write32_set(rtwdev, R_AX_SYS_SWR_CTRL1, B_AX_SYM_CTRL_SPS_PWMFREQ);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x3);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+
+ return 0;
+}
+
+static void rtw8852bt_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band,
+ enum rtw89_phy_idx phy_idx, bool en)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS,
+ B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS,
+ B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ if (band == RTW89_BAND_2G)
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS,
+ B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS,
+ B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ fsleep(1);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx);
+ }
+}
+
+static void rtw8852bt_bb_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
+ B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI, 0x1);
+ rtw89_phy_write32_set(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB,
+ B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI, 0x1);
+ rtw89_phy_write32_set(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
+ rtw8852bx_bb_reset_all(rtwdev, phy_idx);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
+ B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI, 3);
+ rtw89_phy_write32_clr(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB,
+ B_P1_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI, 0x3);
+ rtw89_phy_write32_clr(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
+}
+
+static void rtw8852bt_set_channel(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8852bx_set_channel_bb(rtwdev, chan, phy_idx);
+ rtw8852bt_set_channel_rf(rtwdev, chan, phy_idx);
+}
+
+static void rtw8852bt_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_rf_path path)
+{
+ static const u32 tssi_trk[2] = {R_P0_TSSI_TRK, R_P1_TSSI_TRK};
+
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, tssi_trk[path], B_P0_TSSI_TRK_EN, 0x0);
+ else
+ rtw89_phy_write32_mask(rtwdev, tssi_trk[path], B_P0_TSSI_TRK_EN, 0x1);
+}
+
+static void rtw8852bt_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en,
+ u8 phy_idx, const struct rtw89_chan *chan)
+{
+ if (!rtwdev->dbcc_en) {
+ rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ rtw8852bt_tssi_scan(rtwdev, phy_idx, chan);
+ } else {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ else
+ rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ }
+}
+
+static void rtw8852bt_adc_en(struct rtw89_dev *rtwdev, bool en)
+{
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0xf);
+}
+
+static void rtw8852bt_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (enter) {
+ rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw8852bt_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0, chan);
+ rtw8852bt_adc_en(rtwdev, false);
+ fsleep(40);
+ rtw8852bt_bb_reset_en(rtwdev, chan->band_type, phy_idx, false);
+ } else {
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw8852bt_adc_en(rtwdev, true);
+ rtw8852bt_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0, chan);
+ rtw8852bt_bb_reset_en(rtwdev, chan->band_type, phy_idx, true);
+ rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
+ }
+}
+
+static void rtw8852bt_rfk_init(struct rtw89_dev *rtwdev)
+{
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+
+ rtw8852bt_dpk_init(rtwdev);
+ rtw8852bt_rck(rtwdev);
+ rtw8852bt_dack(rtwdev, RTW89_CHANCTX_0);
+ rtw8852bt_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
+}
+
+static void rtw8852bt_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
+
+ rtw8852bt_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ rtw8852bt_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852bt_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8852bt_dpk(rtwdev, phy_idx, chanctx_idx);
+}
+
+static void rtw8852bt_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
+{
+ rtw8852bt_tssi_scan(rtwdev, phy_idx, chan);
+}
+
+static void rtw8852bt_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start)
+{
+ rtw8852bt_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
+}
+
+static void rtw8852bt_rfk_track(struct rtw89_dev *rtwdev)
+{
+ rtw8852bt_dpk_track(rtwdev);
+}
+
+static void rtw8852bt_btc_set_rfe(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
+
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.kt_ver_adie = rtwdev->hal.acv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
+ md->md_v7.wa_type = 0;
+
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.ant.num = 2;
+ md->md_v7.ant.isolation = 10;
+ md->md_v7.ant.diversity = 0;
+ /* WL 1-stream+1-Ant is located at 0:s0(path-A) or 1:s1(path-B) */
+ md->md_v7.ant.single_pos = RF_PATH_A;
+ md->md_v7.ant.btg_pos = RF_PATH_B;
+
+ if (md->md_v7.rfe_type == 0) {
+ rtwdev->btc.dm.error.map.rfe_type0 = true;
+ return;
+ }
+
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 2) ? 2 : 3;
+ md->md_v7.ant.stream_cnt = 2;
+ md->md_v7.wa_type |= BTC_WA_INIT_SCAN;
+
+ if (md->md_v7.ant.num == 2) {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ md->md_v7.wa_type |= BTC_WA_HFP_LAG;
+ } else {
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ }
+ } else {
+ return;
+ }
+}
+
+static void
+rtw8852bt_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val)
+{
+ u16 ctrl_all_time = u32_get_bits(txpwr_val, GENMASK(15, 0));
+ u16 ctrl_gnt_bt = u32_get_bits(txpwr_val, GENMASK(31, 16));
+
+ switch (ctrl_all_time) {
+ case 0xffff:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_RATE_CTRL,
+ B_AX_FORCE_PWR_BY_RATE_EN, 0x0);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_RATE_CTRL,
+ B_AX_FORCE_PWR_BY_RATE_VALUE_MASK, 0x0);
+ break;
+ default:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_RATE_CTRL,
+ B_AX_FORCE_PWR_BY_RATE_VALUE_MASK,
+ ctrl_all_time);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_RATE_CTRL,
+ B_AX_FORCE_PWR_BY_RATE_EN, 0x1);
+ break;
+ }
+
+ switch (ctrl_gnt_bt) {
+ case 0xffff:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_COEXT_CTRL,
+ B_AX_TXAGC_BT_EN, 0x0);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_COEXT_CTRL,
+ B_AX_TXAGC_BT_MASK, 0x0);
+ break;
+ default:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_COEXT_CTRL,
+ B_AX_TXAGC_BT_MASK, ctrl_gnt_bt);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_COEXT_CTRL,
+ B_AX_TXAGC_BT_EN, 0x1);
+ break;
+ }
+}
+
+static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
+ .enable_bb_rf = rtw8852bx_mac_enable_bb_rf,
+ .disable_bb_rf = rtw8852bx_mac_disable_bb_rf,
+ .bb_preinit = NULL,
+ .bb_postinit = NULL,
+ .bb_reset = rtw8852bt_bb_reset,
+ .bb_sethw = rtw8852bx_bb_sethw,
+ .read_rf = rtw89_phy_read_rf_v1,
+ .write_rf = rtw89_phy_write_rf_v1,
+ .set_channel = rtw8852bt_set_channel,
+ .set_channel_help = rtw8852bt_set_channel_help,
+ .read_efuse = rtw8852bx_read_efuse,
+ .read_phycap = rtw8852bx_read_phycap,
+ .fem_setup = NULL,
+ .rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
+ .rfk_init = rtw8852bt_rfk_init,
+ .rfk_init_late = NULL,
+ .rfk_channel = rtw8852bt_rfk_channel,
+ .rfk_band_changed = rtw8852bt_rfk_band_changed,
+ .rfk_scan = rtw8852bt_rfk_scan,
+ .rfk_track = rtw8852bt_rfk_track,
+ .power_trim = rtw8852bx_power_trim,
+ .set_txpwr = rtw8852bx_set_txpwr,
+ .set_txpwr_ctrl = rtw8852bx_set_txpwr_ctrl,
+ .init_txpwr_unit = rtw8852bx_init_txpwr_unit,
+ .get_thermal = rtw8852bx_get_thermal,
+ .ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
+ .query_ppdu = rtw8852bx_query_ppdu,
+ .convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
+ .ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
+ .cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
+ .set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
+ .pwr_on_func = rtw8852bt_pwr_on_func,
+ .pwr_off_func = rtw8852bt_pwr_off_func,
+ .query_rxdesc = rtw89_core_query_rxdesc,
+ .fill_txdesc = rtw89_core_fill_txdesc,
+ .fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
+ .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
+ .mac_cfg_gnt = rtw89_mac_cfg_gnt,
+ .stop_sch_tx = rtw89_mac_stop_sch_tx,
+ .resume_sch_tx = rtw89_mac_resume_sch_tx,
+ .h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
+
+ .btc_set_rfe = rtw8852bt_btc_set_rfe,
+ .btc_init_cfg = rtw8852bx_btc_init_cfg,
+ .btc_set_wl_pri = rtw8852bx_btc_set_wl_pri,
+ .btc_set_wl_txpwr_ctrl = rtw8852bt_btc_set_wl_txpwr_ctrl,
+ .btc_get_bt_rssi = rtw8852bx_btc_get_bt_rssi,
+ .btc_update_bt_cnt = rtw8852bx_btc_update_bt_cnt,
+ .btc_wl_s1_standby = rtw8852bx_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = rtw8852bx_btc_set_wl_rx_gain,
+ .btc_set_policy = rtw89_btc_set_policy_v1,
+};
+
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support rtw_wowlan_stub_8852bt = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = RTW89_MAX_PATTERN_NUM,
+ .pattern_max_len = RTW89_MAX_PATTERN_SIZE,
+ .pattern_min_len = 1,
+};
+#endif
+
+const struct rtw89_chip_info rtw8852bt_chip_info = {
+ .chip_id = RTL8852BT,
+ .chip_gen = RTW89_CHIP_AX,
+ .ops = &rtw8852bt_chip_ops,
+ .mac_def = &rtw89_mac_gen_ax,
+ .phy_def = &rtw89_phy_gen_ax,
+ .fw_basename = RTW8852BT_FW_BASENAME,
+ .fw_format_max = RTW8852BT_FW_FORMAT_MAX,
+ .try_ce_fw = true,
+ .bbmcu_nr = 0,
+ .needed_fw_elms = RTW89_AX_GEN_DEF_NEEDED_FW_ELEMENTS_NO_6GHZ,
+ .fifo_size = 458752,
+ .small_fifo_size = true,
+ .dle_scc_rsvd_size = 98304,
+ .max_amsdu_limit = 5000,
+ .dis_2g_40m_ul_ofdma = true,
+ .rsvd_ple_ofst = 0x6f800,
+ .hfc_param_ini = rtw8852bt_hfc_param_ini_pcie,
+ .dle_mem = rtw8852bt_dle_mem_pcie,
+ .wde_qempty_acq_grpnum = 4,
+ .wde_qempty_mgq_grpsel = 4,
+ .rf_base_addr = {0xe000, 0xf000},
+ .pwr_on_seq = NULL,
+ .pwr_off_seq = NULL,
+ .bb_table = NULL,
+ .bb_gain_table = NULL,
+ .rf_table = {},
+ .nctl_table = NULL,
+ .nctl_post_table = NULL,
+ .dflt_parms = NULL,
+ .rfe_parms_conf = NULL,
+ .txpwr_factor_rf = 2,
+ .txpwr_factor_mac = 1,
+ .dig_table = NULL,
+ .dig_regs = &rtw8852bt_dig_regs,
+ .tssi_dbw_table = NULL,
+ .support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
+ .support_chanctx_num = 1,
+ .support_rnr = false,
+ .support_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ),
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+ .support_unii4 = true,
+ .ul_tb_waveform_ctrl = true,
+ .ul_tb_pwr_diff = false,
+ .hw_sec_hdr = false,
+ .hw_mgmt_tx_encrypt = false,
+ .rf_path_num = 2,
+ .tx_nss = 2,
+ .rx_nss = 2,
+ .acam_num = 128,
+ .bcam_num = 10,
+ .scam_num = 128,
+ .bacam_num = 2,
+ .bacam_dynamic_num = 4,
+ .bacam_ver = RTW89_BACAM_V0,
+ .ppdu_max_usr = 4,
+ .sec_ctrl_efuse_size = 4,
+ .physical_efuse_size = 1216,
+ .logical_efuse_size = 2048,
+ .limit_efuse_size = 1280,
+ .dav_phy_efuse_size = 96,
+ .dav_log_efuse_size = 16,
+ .efuse_blocks = NULL,
+ .phycap_addr = 0x580,
+ .phycap_size = 128,
+ .para_ver = 0,
+ .wlcx_desired = 0x070e0000,
+ .btcx_desired = 0x7,
+ .scbd = 0x1,
+ .mailbox = 0x1,
+
+ .afh_guard_ch = 6,
+ .wl_rssi_thres = rtw89_btc_8852bt_wl_rssi_thres,
+ .bt_rssi_thres = rtw89_btc_8852bt_bt_rssi_thres,
+ .rssi_tol = 2,
+ .mon_reg_num = ARRAY_SIZE(rtw89_btc_8852bt_mon_reg),
+ .mon_reg = rtw89_btc_8852bt_mon_reg,
+ .rf_para_ulink_num = ARRAY_SIZE(rtw89_btc_8852bt_rf_ul),
+ .rf_para_ulink = rtw89_btc_8852bt_rf_ul,
+ .rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8852bt_rf_dl),
+ .rf_para_dlink = rtw89_btc_8852bt_rf_dl,
+ .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
+ BIT(RTW89_PS_MODE_CLK_GATED) |
+ BIT(RTW89_PS_MODE_PWR_GATED),
+ .low_power_hci_modes = 0,
+ .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD,
+ .hci_func_en_addr = R_AX_HCI_FUNC_EN,
+ .h2c_desc_size = sizeof(struct rtw89_txwd_body),
+ .txwd_body_size = sizeof(struct rtw89_txwd_body),
+ .txwd_info_size = sizeof(struct rtw89_txwd_info),
+ .h2c_ctrl_reg = R_AX_H2CREG_CTRL,
+ .h2c_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8},
+ .h2c_regs = rtw8852bt_h2c_regs,
+ .c2h_ctrl_reg = R_AX_C2HREG_CTRL,
+ .c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
+ .c2h_regs = rtw8852bt_c2h_regs,
+ .page_regs = &rtw8852bt_page_regs,
+ .wow_reason_reg = rtw8852bt_wow_wakeup_regs,
+ .cfo_src_fd = true,
+ .cfo_hw_comp = true,
+ .dcfo_comp = &rtw8852bt_dcfo_comp,
+ .dcfo_comp_sft = 10,
+ .imr_info = &rtw8852bt_imr_info,
+ .imr_dmac_table = NULL,
+ .imr_cmac_table = NULL,
+ .rrsr_cfgs = &rtw8852bt_rrsr_cfgs,
+ .bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0},
+ .bss_clr_map_reg = R_BSS_CLR_MAP_V1,
+ .rfkill_init = &rtw8852bt_rfkill_regs,
+ .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
+ .dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
+ BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
+ BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
+ .edcca_regs = &rtw8852bt_edcca_regs,
+#ifdef CONFIG_PM
+ .wowlan_stub = &rtw_wowlan_stub_8852bt,
+#endif
+ .xtal_info = NULL,
+};
+EXPORT_SYMBOL(rtw8852bt_chip_info);
+
+MODULE_FIRMWARE(RTW8852BT_MODULE_FIRMWARE);
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852BT driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h
index 6177f36ad667..b76b36aaf025 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h
@@ -10,4 +10,6 @@
#define RF_PATH_NUM_8852BT 2
#define BB_PATH_NUM_8852BT 2
+extern const struct rtw89_chip_info rtw8852bt_chip_info;
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
index fa0e49d58112..336a83e1d46b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
@@ -1525,9 +1525,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
lok_result, txk_result, rxk_result);
}
-static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 get_empty_table = false;
u32 reg_rf18;
@@ -1755,12 +1756,13 @@ static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1770,7 +1772,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852BT_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, backup_bb_val);
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
@@ -1785,20 +1787,21 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u8 kpath = _kpath(rtwdev, phy_idx);
switch (kpath) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1824,7 +1827,7 @@ static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool o
BIT(24), val);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
- kidx, dpk->is_dpk_enable & off_reverse ? "enable" : "disable");
+ kidx, str_enable_disable(dpk->is_dpk_enable & off_reverse));
}
static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
@@ -1863,7 +1866,7 @@ static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
id == 0x14 ? "PWR_CAL" :
id == 0x15 ? "DPK_RXAGC" :
id == 0x16 ? "KIP_PRESET" :
- id == 0x17 ? "KIP_RESOTRE" :
+ id == 0x17 ? "KIP_RESTORE" :
"DPK_TXAGC", dpk_cmd);
}
@@ -1879,9 +1882,9 @@ static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -2277,9 +2280,9 @@ static bool _dpk_pas_read(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
- bool loss_only)
+ bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 goout = 0, agc_cnt = 0, limited_rxbb = 0, gl_cnt = 0;
u8 tmp_txagc, tmp_rxbb, tmp_gl_idx = 0;
@@ -2504,9 +2507,9 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 idx, cur_band, cur_ch;
bool is_reload = false;
@@ -2549,7 +2552,8 @@ void _drf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool i
}
static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 gain)
+ enum rtw89_rf_path path, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 txagc = 0x38, kidx = dpk->cur_idx[path];
@@ -2569,7 +2573,7 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
_dpk_kip_set_rxagc(rtwdev, phy, path);
_dpk_table_select(rtwdev, path, kidx, gain);
- txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
_rfk_get_thermal(rtwdev, kidx, path);
@@ -2601,7 +2605,8 @@ _error:
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u32 backup_kip_val[BACKUP_KIP_REGS_NR];
@@ -2611,7 +2616,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
u8 path;
for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path, chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch != 0)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2623,7 +2628,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
if (rtwdev->is_tssi_mode[path])
_dpk_tssi_pause(rtwdev, path, true);
}
@@ -2631,7 +2636,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
_rfk_bb_afe_setting(rtwdev, phy, path, kpath);
for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++)
- _dpk_main(rtwdev, phy, path, 1);
+ _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
_rfk_bb_afe_restore(rtwdev, phy, path, kpath);
@@ -2646,9 +2651,10 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
}
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_fem_info *fem = &rtwdev->fem;
if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
@@ -2817,9 +2823,8 @@ static void _tssi_dpk_off(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
@@ -2829,9 +2834,8 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852bt_tssi_sys_defs_tbl);
@@ -2878,7 +2882,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8852BT_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2893,7 +2897,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
})
struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -3047,9 +3050,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A)
@@ -3063,9 +3065,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool all)
+ enum rtw89_rf_path path, bool all,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl = NULL;
u8 ch = chan->channel;
@@ -3310,10 +3312,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st;
@@ -3346,10 +3347,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st;
@@ -3383,10 +3383,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
return val;
}
-static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3399,7 +3399,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3415,8 +3415,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3463,10 +3463,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p
}
static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 channel = chan->channel;
u8 band;
@@ -3500,7 +3500,7 @@ static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
- u8 enable)
+ u8 enable, const struct rtw89_chan *chan)
{
enum rtw89_rf_path_bit rx_path;
@@ -3516,11 +3516,11 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (enable) {
rtw8852bx_bb_set_plcp_tx(rtwdev);
rtw8852bx_bb_cfg_tx_path(rtwdev, path);
- rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
}
- rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
+ rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan);
}
static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
@@ -3574,7 +3574,7 @@ static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, const s16 *power,
- u32 *tssi_cw_rpt)
+ u32 *tssi_cw_rpt, const struct rtw89_chan *chan)
{
u32 tx_counter, tx_counter_tmp;
const int retry = 100;
@@ -3593,9 +3593,11 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
_tssi_trigger[path], tmp, path);
if (j == 0)
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true,
+ chan);
else
- _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true,
+ chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3626,7 +3628,7 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
"[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
k, path);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
return false;
}
@@ -3634,7 +3636,7 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
B_TSSI_CWRPT);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3648,14 +3650,13 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
0x78e4, 0x49c0, 0x0d18, 0x0d80};
static const s16 power_2g[4] = {48, 20, 4, -8};
static const s16 power_5g[4] = {48, 20, 4, 4};
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
u32 tssi_cw_rpt[RTW8852BT_TSSI_PATH_NR] = {};
u8 channel = chan->channel;
@@ -3701,7 +3702,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
- ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
+ ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan);
if (!ok)
goto out;
@@ -3833,18 +3834,19 @@ void rtw8852bt_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852bt_dack(struct rtw89_dev *rtwdev)
+void rtw8852bt_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
_dac_cal(rtwdev, false);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
@@ -3852,15 +3854,16 @@ void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
-void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
@@ -3873,15 +3876,16 @@ void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x) ******\n", RTW8852BT_DPK_VER);
- if (_dpk_bypass_check(rtwdev, phy_idx))
+ if (_dpk_bypass_check(rtwdev, phy_idx, chanctx_idx))
_dpk_force_bypass(rtwdev, phy_idx);
else
- _dpk_cal_select(rtwdev, phy_idx, RF_AB);
+ _dpk_cal_select(rtwdev, phy_idx, RF_AB, chanctx_idx);
}
void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
@@ -3889,10 +3893,12 @@ void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
static const u32 reg[2] = {R_DPD_CH0A, R_DPD_CH0B};
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx);
u32 reg_backup[2] = {};
u32 tx_en;
u8 i;
@@ -3905,36 +3911,36 @@ void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
_tssi_set_tssi_slope(rtwdev, phy, i);
rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
_tmac_tx_pause(rtwdev, phy, true);
if (hwtx_en)
- _tssi_alimentk(rtwdev, phy, i);
+ _tssi_alimentk(rtwdev, phy, i, chan);
_tmac_tx_pause(rtwdev, phy, false);
rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
_tssi_reload_bb_registers(rtwdev, phy, reg, reg_backup, 2);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
u8 channel = chan->channel;
u8 band;
@@ -3957,24 +3963,25 @@ void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RTW8852BT_TSSI_PATH_NR; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
if (tssi_info->alignment_done[i][band])
- _tssi_alimentk_done(rtwdev, phy, i);
+ _tssi_alimentk_done(rtwdev, phy, i, chan);
else
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, bool enable)
+ enum rtw89_phy_idx phy, bool enable,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 channel = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
@@ -3996,8 +4003,8 @@ static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
@@ -4010,10 +4017,237 @@ static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
}
void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
if (scan_start)
- rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true);
+ rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
else
- rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false);
+ rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
+}
+
+static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ enum rtw89_bandwidth bw, bool dav)
+{
+ u32 rf_reg18;
+ u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
+
+ rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
+ if (rf_reg18 == INV_RF_DATA) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]Invalid RF_0x18 for Path-%d\n", path);
+ return;
+ }
+ rf_reg18 &= ~RR_CFGCH_BW;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n");
+ }
+
+ rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
+ RR_CFGCH_BW2) & RFREG_MASK;
+ rf_reg18 |= RR_CFGCH_BW2;
+ rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n",
+ bw, path, reg18_addr,
+ rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
+}
+
+static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_bandwidth bw)
+{
+ _bw_setting(rtwdev, RF_PATH_A, bw, true);
+ _bw_setting(rtwdev, RF_PATH_B, bw, true);
+ _bw_setting(rtwdev, RF_PATH_A, bw, false);
+ _bw_setting(rtwdev, RF_PATH_B, bw, false);
+}
+
+static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val)
+{
+ u32 tmp;
+ int ret;
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000,
+ false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n");
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
+ return !!ret;
+}
+
+static void _lck_check(struct rtw89_dev *rtwdev)
+{
+ u32 tmp;
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n");
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0);
+ }
+
+ udelay(10);
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n");
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ _set_s0_arfc18(rtwdev, tmp);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
+ }
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n");
+
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ _set_s0_arfc18(rtwdev, tmp);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n",
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK),
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK));
+ }
+}
+
+static void _set_ch(struct rtw89_dev *rtwdev, u32 val)
+{
+ bool timeout;
+ u32 bak;
+
+ bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1);
+ timeout = _set_s0_arfc18(rtwdev, val);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak);
+ if (!timeout)
+ _lck_check(rtwdev);
+}
+
+static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ u8 central_ch, bool dav)
+{
+ u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
+ bool is_2g_ch = central_ch <= 14;
+ u32 rf_reg18;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
+
+ rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
+ rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH |
+ RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH);
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
+
+ if (!is_2g_ch)
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) |
+ FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
+
+ rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
+ RR_CFGCH_BW2) & RFREG_MASK;
+ rf_reg18 |= RR_CFGCH_BW2;
+
+ if (path == RF_PATH_A && dav)
+ _set_ch(rtwdev, rf_reg18);
+ else
+ rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
+
+ rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0);
+ rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n",
+ central_ch, path, reg18_addr,
+ rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
+}
+
+static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch)
+{
+ _ch_setting(rtwdev, RF_PATH_A, central_ch, true);
+ _ch_setting(rtwdev, RF_PATH_B, central_ch, true);
+ _ch_setting(rtwdev, RF_PATH_A, central_ch, false);
+ _ch_setting(rtwdev, RF_PATH_B, central_ch, false);
+}
+
+static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
+ enum rtw89_rf_path path)
+{
+ rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12);
+
+ if (bw == RTW89_CHANNEL_WIDTH_20)
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b);
+ else if (bw == RTW89_CHANNEL_WIDTH_40)
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13);
+ else if (bw == RTW89_CHANNEL_WIDTH_80)
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb);
+ else
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n",
+ path, rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB));
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
+}
+
+static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_bandwidth bw)
+{
+ u8 kpath, path;
+
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ _set_rxbb_bw(rtwdev, bw, path);
+ }
+}
+
+static void rtw8852bt_ctrl_bw_ch(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, u8 central_ch,
+ enum rtw89_band band, enum rtw89_bandwidth bw)
+{
+ _ctrl_ch(rtwdev, central_ch);
+ _ctrl_bw(rtwdev, phy, bw);
+ _rxbb_bw(rtwdev, phy, bw);
+}
+
+void rtw8852bt_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bt_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
+ chan->band_width);
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
index 09918835c6e8..e34560b4905f 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
@@ -8,15 +8,24 @@
#include "core.h"
void rtw8852bt_rck(struct rtw89_dev *rtwdev);
-void rtw8852bt_dack(struct rtw89_dev *rtwdev);
-void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852bt_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
-void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
new file mode 100644
index 000000000000..702948119646
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "reg.h"
+#include "rtw8852bt.h"
+
+static const struct rtw89_pci_info rtw8852bt_pci_info = {
+ .gen_def = &rtw89_pci_gen_ax,
+ .txbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_mode = MAC_AX_RXBD_PKT,
+ .tag_mode = MAC_AX_TAG_MULTI,
+ .tx_burst = MAC_AX_TX_BURST_2048B,
+ .rx_burst = MAC_AX_RX_BURST_128B,
+ .wd_dma_idle_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .wd_dma_act_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .multi_tag_num = MAC_AX_TAG_NUM_8,
+ .lbc_en = MAC_AX_PCIE_ENABLE,
+ .lbc_tmr = MAC_AX_LBC_TMR_2MS,
+ .autok_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+ .rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
+
+ .init_cfg_reg = R_AX_PCIE_INIT_CFG1,
+ .txhci_en_bit = B_AX_TXHCI_EN,
+ .rxhci_en_bit = B_AX_RXHCI_EN,
+ .rxbd_mode_bit = B_AX_RXBD_MODE,
+ .exp_ctrl_reg = R_AX_PCIE_EXP_CTRL,
+ .max_tag_num_mask = B_AX_MAX_TAG_NUM,
+ .rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR,
+ .txbd_rwptr_clr2_reg = 0,
+ .dma_io_stop = {R_AX_PCIE_DMA_STOP1, B_AX_STOP_PCIEIO},
+ .dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK_V1},
+ .dma_stop2 = {0},
+ .dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK_V1},
+ .dma_busy2_reg = 0,
+ .dma_busy3_reg = R_AX_PCIE_DMA_BUSY1,
+
+ .rpwm_addr = R_AX_PCIE_HRPWM,
+ .cpwm_addr = R_AX_CPWM,
+ .mit_addr = R_AX_INT_MIT_RX,
+ .wp_sel_addr = 0,
+ .tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) |
+ BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) |
+ BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
+ .bd_idx_addr_low_power = NULL,
+ .dma_addr_set = &rtw89_pci_ch_dma_addr_set,
+ .bd_ram_table = &rtw89_bd_ram_table_single,
+
+ .ltr_set = rtw89_pci_ltr_set,
+ .fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .config_intr_mask = rtw89_pci_config_intr_mask,
+ .enable_intr = rtw89_pci_enable_intr,
+ .disable_intr = rtw89_pci_disable_intr,
+ .recognize_intrs = rtw89_pci_recognize_intrs,
+};
+
+static const struct rtw89_driver_info rtw89_8852bte_info = {
+ .chip = &rtw8852bt_chip_info,
+ .quirks = NULL,
+ .bus = {
+ .pci = &rtw8852bt_pci_info,
+ },
+};
+
+static const struct pci_device_id rtw89_8852bte_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xb520),
+ .driver_data = (kernel_ulong_t)&rtw89_8852bte_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_8852bte_id_table);
+
+static struct pci_driver rtw89_8852bte_driver = {
+ .name = "rtw89_8852bte",
+ .id_table = rtw89_8852bte_id_table,
+ .probe = rtw89_pci_probe,
+ .remove = rtw89_pci_remove,
+ .driver.pm = &rtw89_pm_ops,
+};
+module_pci_driver(rtw89_8852bte_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852BE-VT driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 193168dc7b6c..1c6e89ab0f4b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -14,10 +14,10 @@
#include "rtw8852c_table.h"
#include "util.h"
-#define RTW8852C_FW_FORMAT_MAX 0
+#define RTW8852C_FW_FORMAT_MAX 1
#define RTW8852C_FW_BASENAME "rtw89/rtw8852c_fw"
#define RTW8852C_MODULE_FIRMWARE \
- RTW8852C_FW_BASENAME ".bin"
+ RTW8852C_FW_BASENAME "-" __stringify(RTW8852C_FW_FORMAT_MAX) ".bin"
static const struct rtw89_hfc_ch_cfg rtw8852c_hfc_chcfg_pcie[] = {
{13, 1614, grp_0}, /* ACH 0 */
@@ -147,6 +147,15 @@ static const struct rtw89_rrsr_cfgs rtw8852c_rrsr_cfgs = {
.rsc = {R_AX_PTCL_RRSR1, B_AX_RSC_MASK, 2},
};
+static const struct rtw89_rfkill_regs rtw8852c_rfkill_regs = {
+ .pinmux = {R_AX_GPIO8_15_FUNC_SEL,
+ B_AX_PINMUX_GPIO9_FUNC_SEL_MASK,
+ 0xf},
+ .mode = {R_AX_GPIO_EXT_CTRL + 2,
+ (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16,
+ 0x0},
+};
+
static const struct rtw89_dig_regs rtw8852c_dig_regs = {
.seg0_pd_reg = R_SEG0R_PD,
.pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
@@ -1808,7 +1817,7 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
RTW89_SCH_TX_SEL_ALL);
rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
rtw8852c_dfs_en(rtwdev, false);
- rtw8852c_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, false, phy_idx, chan);
rtw8852c_adc_en(rtwdev, false);
fsleep(40);
rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, false);
@@ -1816,7 +1825,7 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
rtw8852c_adc_en(rtwdev, true);
rtw8852c_dfs_en(rtwdev, true);
- rtw8852c_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, true, phy_idx, chan);
rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, true);
rtw89_chip_resume_sch_tx(rtwdev, mac_idx, p->tx_en);
}
@@ -1833,31 +1842,34 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev)
rtw8852c_dpk_init(rtwdev);
rtw8852c_rck(rtwdev);
- rtw8852c_dack(rtwdev);
+ rtw8852c_dack(rtwdev, RTW89_CHANCTX_0);
rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false);
}
-static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev)
+static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
rtw8852c_mcc_get_ch_info(rtwdev, phy_idx);
rtw8852c_rx_dck(rtwdev, phy_idx, false);
- rtw8852c_iqk(rtwdev, phy_idx);
- rtw8852c_tssi(rtwdev, phy_idx);
- rtw8852c_dpk(rtwdev, phy_idx);
+ rtw8852c_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852c_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw8852c_dpk(rtwdev, phy_idx, chanctx_idx);
rtw89_fw_h2c_rf_ntfy_mcc(rtwdev);
}
static void rtw8852c_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852c_tssi_scan(rtwdev, phy_idx);
+ rtw8852c_tssi_scan(rtwdev, phy_idx, chan);
}
-static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start)
{
- rtw8852c_wifi_scan_notify(rtwdev, start, RTW89_PHY_0);
+ rtw8852c_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx);
}
static void rtw8852c_rfk_track(struct rtw89_dev *rtwdev)
@@ -2108,7 +2120,7 @@ rtw8852c_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
static void rtw8852c_bb_cfg_rx_path(struct rtw89_dev *rtwdev, u8 rx_path)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 band = chan->band_type;
u32 rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI;
u32 rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI;
@@ -2840,10 +2852,12 @@ static const struct rtw89_chanctx_listener rtw8852c_chanctx_listener = {
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8852c = {
- .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_NET_DETECT,
.n_patterns = RTW89_MAX_PATTERN_NUM,
.pattern_max_len = RTW89_MAX_PATTERN_SIZE,
.pattern_min_len = 1,
+ .max_nd_match_sets = RTW89_SCANOFLD_MAX_SSID,
};
#endif
@@ -2876,9 +2890,11 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.get_thermal = rtw8852c_get_thermal,
.ctrl_btg_bt_rx = rtw8852c_ctrl_btg_bt_rx,
.query_ppdu = rtw8852c_query_ppdu,
+ .convert_rpl_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852c_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852c_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8852c_pwr_on_func,
.pwr_off_func = rtw8852c_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2946,6 +2962,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dig_regs = &rtw8852c_dig_regs,
.tssi_dbw_table = &rtw89_8852c_tssi_dbw_table,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 2,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -2959,6 +2976,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
.hw_sec_hdr = true,
+ .hw_mgmt_tx_encrypt = true,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
@@ -3022,6 +3040,8 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.rrsr_cfgs = &rtw8852c_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0},
.bss_clr_map_reg = R_BSS_CLR_MAP,
+ .rfkill_init = &rtw8852c_rfkill_regs,
+ .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9},
.dma_ch_mask = 0,
.edcca_regs = &rtw8852c_edcca_regs,
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index 743f7014bf3e..211c051c2967 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -5,6 +5,7 @@
#include "chan.h"
#include "coex.h"
#include "debug.h"
+#include "fw.h"
#include "phy.h"
#include "reg.h"
#include "rtw8852c.h"
@@ -584,11 +585,12 @@ static void _drck(struct rtw89_dev *rtwdev)
rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
}
-static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dack_info *dack = &rtwdev->dack;
u32 rf0_0, rf1_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, chanctx_idx);
dack->dack_done = false;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
@@ -1321,9 +1323,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
}
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, u8 path)
+ enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
@@ -1516,12 +1519,13 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1531,7 +1535,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852C_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, backup_bb_val);
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
_iqk_macbb_setting(rtwdev, phy_idx, path);
@@ -1544,18 +1548,19 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
switch (_kpath(rtwdev, phy_idx)) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1901,9 +1906,9 @@ static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _dpk_information(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -2495,9 +2500,9 @@ static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
bool is_reload = false;
u8 idx, cur_band, cur_ch;
@@ -2689,7 +2694,8 @@ static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_byb
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0c4, 0xc0e8, 0xc0d4, 0xc0d8};
@@ -2705,7 +2711,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (!(kpath & BIT(path)))
continue;
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
+ chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch != 0)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2722,7 +2729,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
path, dpk->cur_idx[path]);
_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
_dpk_init(rtwdev, path);
if (rtwdev->is_tssi_mode[path])
_dpk_tssi_pause(rtwdev, path, true);
@@ -2755,10 +2762,11 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
_dpk_kip_pwr_clk_onoff(rtwdev, false);
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_fem_info *fem = &rtwdev->fem;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 band = chan->band_type;
if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) {
@@ -2790,17 +2798,18 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
RTW8852C_DPK_VER, rtwdev->hal.cv,
RTW8852C_RF_REL_VERSION);
- if (_dpk_bypass_check(rtwdev, phy))
+ if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
_dpk_force_bypass(rtwdev, phy);
else
- _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx);
if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1)
rtw8852c_rx_dck(rtwdev, phy, false);
@@ -2891,9 +2900,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev)
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_bandwidth bw = chan->band_width;
enum rtw89_band band = chan->band_type;
u32 clk = 0x0;
@@ -2945,9 +2953,8 @@ static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
}
static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A) {
@@ -2972,7 +2979,7 @@ static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8852C_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2985,8 +2992,8 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
} \
__val; \
})
+ struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -3001,56 +3008,88 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
switch (subband) {
default:
case RTW89_CH_2G:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
break;
case RTW89_CH_5G_BAND_1:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
break;
case RTW89_CH_5G_BAND_3:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
break;
case RTW89_CH_5G_BAND_4:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
break;
case RTW89_CH_6G_BAND_IDX0:
case RTW89_CH_6G_BAND_IDX1:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
break;
case RTW89_CH_6G_BAND_IDX2:
case RTW89_CH_6G_BAND_IDX3:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
break;
case RTW89_CH_6G_BAND_IDX4:
case RTW89_CH_6G_BAND_IDX5:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
break;
case RTW89_CH_6G_BAND_IDX6:
case RTW89_CH_6G_BAND_IDX7:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
break;
}
@@ -3158,9 +3197,8 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A) {
@@ -3175,9 +3213,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl;
@@ -3586,10 +3624,9 @@ static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
@@ -3650,10 +3687,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
enum rtw89_band band = chan->band_type;
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
@@ -3715,10 +3751,9 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
}
static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy)
+ enum rtw89_phy_idx phy, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3741,7 +3776,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
for (i = path; i < path_max; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3757,8 +3792,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3781,7 +3816,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
}
static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
static const u32 tssi_trk[2] = {0x5818, 0x7818};
static const u32 tssi_en[2] = {0x5820, 0x7820};
@@ -3790,25 +3825,26 @@ static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0);
rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0);
if (rtwdev->dbcc_en && path == RF_PATH_B)
- _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1);
+ _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1, chan);
else
- _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0);
+ _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0, chan);
} else {
rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1);
rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1);
}
}
-void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx,
+ const struct rtw89_chan *chan)
{
if (!rtwdev->dbcc_en) {
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A, chan);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B, chan);
} else {
if (phy_idx == RTW89_PHY_0)
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A, chan);
else
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B, chan);
}
}
@@ -4079,10 +4115,10 @@ void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_i
mode = rtw89_get_entity_mode(rtwdev);
switch (mode) {
case RTW89_ENTITY_MODE_MCC_PREPARE:
- chan_idx = RTW89_SUB_ENTITY_1;
+ chan_idx = RTW89_CHANCTX_1;
break;
default:
- chan_idx = RTW89_SUB_ENTITY_0;
+ chan_idx = RTW89_CHANCTX_0;
break;
}
@@ -4112,26 +4148,27 @@ void rtw8852c_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852c_dack(struct rtw89_dev *rtwdev)
+void rtw8852c_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
- _dac_cal(rtwdev, false);
+ _dac_cal(rtwdev, false, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
@@ -4202,10 +4239,11 @@ void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_a
void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_chanctx_idx chanctx_idx = RTW89_CHANCTX_0;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u8 dck_channel;
u8 cur_thermal;
u32 tx_en;
@@ -4259,16 +4297,17 @@ void rtw8852c_dpk_init(struct rtw89_dev *rtwdev)
dpk->is_dpk_reload_en = false;
}
-void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -4279,8 +4318,10 @@ void rtw8852c_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
@@ -4298,23 +4339,24 @@ void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = path; i < path_max; i++) {
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
- _tssi_set_dck(rtwdev, phy, i);
+ _tssi_set_dck(rtwdev, phy, i, chan);
_tssi_set_bbgain_split(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_set_aligk_default(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_set_aligk_default(rtwdev, phy, i, chan);
_tssi_set_slope(rtwdev, phy, i);
_tssi_run_slope(rtwdev, phy, i);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
-void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
@@ -4339,15 +4381,15 @@ void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = path; i < path_max; i++) {
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_set_aligk_default(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_dck(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_set_aligk_default(rtwdev, phy, i, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev,
@@ -4422,7 +4464,7 @@ void rtw8852c_rfk_chanctx_cb(struct rtw89_dev *rtwdev,
dpk->is_dpk_enable = true;
for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
_dpk_onoff(rtwdev, path, false);
- rtw8852c_dpk(rtwdev, RTW89_PHY_0);
+ rtw8852c_dpk(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
break;
default:
break;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
index 6605137e61aa..306dd0a0be73 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
@@ -9,16 +9,21 @@
void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
void rtw8852c_rck(struct rtw89_dev *rtwdev);
-void rtw8852c_dack(struct rtw89_dev *rtwdev);
-void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852c_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe);
void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev);
void rtw8852c_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852c_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
-void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
-void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx);
+void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
+void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx,
+ const struct rtw89_chan *chan);
void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
enum rtw89_phy_idx phy_idx);
void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 2af568a3264d..63b1ff2f98ed 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -13,10 +13,10 @@
#include "rtw8922a_rfk.h"
#include "util.h"
-#define RTW8922A_FW_FORMAT_MAX 0
+#define RTW8922A_FW_FORMAT_MAX 1
#define RTW8922A_FW_BASENAME "rtw89/rtw8922a_fw"
#define RTW8922A_MODULE_FIRMWARE \
- RTW8922A_FW_BASENAME ".bin"
+ RTW8922A_FW_BASENAME "-" __stringify(RTW8922A_FW_FORMAT_MAX) ".bin"
#define HE_N_USER_MAX_8922A 4
@@ -165,6 +165,15 @@ static const struct rtw89_rrsr_cfgs rtw8922a_rrsr_cfgs = {
.rsc = {R_BE_PTCL_RRSR1, B_BE_RSC_MASK, 2},
};
+static const struct rtw89_rfkill_regs rtw8922a_rfkill_regs = {
+ .pinmux = {R_BE_GPIO8_15_FUNC_SEL,
+ B_BE_PINMUX_GPIO9_FUNC_SEL_MASK,
+ 0xf},
+ .mode = {R_BE_GPIO_EXT_CTRL + 2,
+ (B_BE_GPIO_MOD_9 | B_BE_GPIO_IO_SEL_9) >> 16,
+ 0x0},
+};
+
static const struct rtw89_dig_regs rtw8922a_dig_regs = {
.seg0_pd_reg = R_SEG0R_PD_V2,
.pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
@@ -1680,9 +1689,63 @@ static int rtw8922a_ctrl_rx_path_tmac(struct rtw89_dev *rtwdev,
return 0;
}
+#define DIGITAL_PWR_COMP_REG_NUM 22
+static const u32 rtw8922a_digital_pwr_comp_val[][DIGITAL_PWR_COMP_REG_NUM] = {
+ {0x012C0096, 0x044C02BC, 0x00322710, 0x015E0096, 0x03C8028A,
+ 0x0BB80708, 0x17701194, 0x02020100, 0x03030303, 0x01000303,
+ 0x05030302, 0x06060605, 0x06050300, 0x0A090807, 0x02000B0B,
+ 0x09080604, 0x0D0D0C0B, 0x08060400, 0x110F0C0B, 0x05001111,
+ 0x0D0C0907, 0x12121210},
+ {0x012C0096, 0x044C02BC, 0x00322710, 0x015E0096, 0x03C8028A,
+ 0x0BB80708, 0x17701194, 0x04030201, 0x05050505, 0x01000505,
+ 0x07060504, 0x09090908, 0x09070400, 0x0E0D0C0B, 0x03000E0E,
+ 0x0D0B0907, 0x1010100F, 0x0B080500, 0x1512100D, 0x05001515,
+ 0x100D0B08, 0x15151512},
+};
+
+static void rtw8922a_set_digital_pwr_comp(struct rtw89_dev *rtwdev,
+ bool enable, u8 nss,
+ enum rtw89_rf_path path)
+{
+ static const u32 ltpc_t0[2] = {R_BE_LTPC_T0_PATH0, R_BE_LTPC_T0_PATH1};
+ const u32 *digital_pwr_comp;
+ u32 addr, val;
+ u32 i;
+
+ if (nss == 1)
+ digital_pwr_comp = rtw8922a_digital_pwr_comp_val[0];
+ else
+ digital_pwr_comp = rtw8922a_digital_pwr_comp_val[1];
+
+ addr = ltpc_t0[path];
+ for (i = 0; i < DIGITAL_PWR_COMP_REG_NUM; i++, addr += 4) {
+ val = enable ? digital_pwr_comp[i] : 0;
+ rtw89_phy_write32(rtwdev, addr, val);
+ }
+}
+
+static void rtw8922a_digital_pwr_comp(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ bool enable = chan->band_type != RTW89_BAND_2G;
+ u8 path;
+
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0)
+ path = RF_PATH_A;
+ else
+ path = RF_PATH_B;
+ rtw8922a_set_digital_pwr_comp(rtwdev, enable, 1, path);
+ } else {
+ rtw8922a_set_digital_pwr_comp(rtwdev, enable, 2, RF_PATH_A);
+ rtw8922a_set_digital_pwr_comp(rtwdev, enable, 2, RF_PATH_B);
+ }
+}
+
static int rtw8922a_ctrl_mlo(struct rtw89_dev *rtwdev, enum rtw89_mlo_dbcc_mode mode)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
if (mode == MLO_1_PLUS_1_1RF || mode == DBCC_LEGACY) {
rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x1);
@@ -1801,11 +1864,13 @@ static void rtw8922a_pre_set_channel_bb(struct rtw89_dev *rtwdev,
}
static void rtw8922a_post_set_channel_bb(struct rtw89_dev *rtwdev,
- enum rtw89_mlo_dbcc_mode mode)
+ enum rtw89_mlo_dbcc_mode mode,
+ enum rtw89_phy_idx phy_idx)
{
if (!rtwdev->dbcc_en)
return;
+ rtw8922a_digital_pwr_comp(rtwdev, phy_idx);
rtw8922a_ctrl_mlo(rtwdev, mode);
}
@@ -1912,7 +1977,7 @@ static void rtw8922a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
rtw8922a_hal_reset(rtwdev, phy_idx, mac_idx, chan->band_type, &p->tx_en, enter);
if (!enter) {
- rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode);
+ rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode, phy_idx);
rtw8922a_post_set_channel_rf(rtwdev, phy_idx);
}
}
@@ -1928,10 +1993,12 @@ static void rtw8922a_rfk_init(struct rtw89_dev *rtwdev)
static void rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+
rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, RTW89_PHY_0, 5);
- rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, 58);
- rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+ rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, chan, 58);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, chan, 32);
}
static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
@@ -1953,10 +2020,12 @@ static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
}
}
-static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev)
+static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_START);
@@ -1964,23 +2033,25 @@ static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev)
_wait_rx_mode(rtwdev, RF_AB);
rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, phy_idx, 5);
- rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, 54);
- rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, 84);
- rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_NORMAL, 6);
- rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, 34);
- rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+ rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, chan, 54);
+ rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, chan, 84);
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, chan, RTW89_TSSI_NORMAL, 6);
+ rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, chan, 34);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, chan, 32);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_STOP);
}
static void rtw8922a_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_SCAN, 6);
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, chan, RTW89_TSSI_SCAN, 6);
}
-static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool start)
{
}
@@ -2109,7 +2180,7 @@ static void rtw8922a_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
static void rtw8922a_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
struct rtw89_hal *hal = &rtwdev->hal;
u8 ntx_path = RF_PATH_AB;
@@ -2426,6 +2497,38 @@ static void rtw8922a_query_ppdu(struct rtw89_dev *rtwdev,
rtw8922a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
}
+static void rtw8922a_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ /* Mapping to BW: 5, 10, 20, 40, 80, 160, 80_80 */
+ static const u8 bw_compensate[] = {0, 0, 0, 6, 12, 18, 0};
+ u8 *rssi = phy_ppdu->rssi;
+ u8 compensate = 0;
+ u16 rpl_tmp;
+ u8 i;
+
+ if (phy_ppdu->bw_idx < ARRAY_SIZE(bw_compensate))
+ compensate = bw_compensate[phy_ppdu->bw_idx];
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ if (!(phy_ppdu->rx_path_en & BIT(i))) {
+ rssi[i] = 0;
+ phy_ppdu->rpl_path[i] = 0;
+ phy_ppdu->rpl_fd[i] = 0;
+ }
+ if (phy_ppdu->rate >= RTW89_HW_RATE_OFDM6) {
+ rpl_tmp = phy_ppdu->rpl_fd[i];
+ if (rpl_tmp)
+ rpl_tmp += compensate;
+
+ phy_ppdu->rpl_path[i] = rpl_tmp;
+ }
+ rssi[i] = phy_ppdu->rpl_path[i];
+ }
+
+ phy_ppdu->rssi_avg = phy_ppdu->rpl_avg;
+}
+
static int rtw8922a_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_set(rtwdev, R_BE_FEN_RST_ENABLE,
@@ -2445,10 +2548,12 @@ static int rtw8922a_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
- .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_NET_DETECT,
.n_patterns = RTW89_MAX_PATTERN_NUM,
.pattern_max_len = RTW89_MAX_PATTERN_SIZE,
.pattern_min_len = 1,
+ .max_nd_match_sets = RTW89_SCANOFLD_MAX_SSID,
};
#endif
@@ -2481,9 +2586,11 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.get_thermal = rtw8922a_get_thermal,
.ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx,
.query_ppdu = rtw8922a_query_ppdu,
+ .convert_rpl_to_rssi = rtw8922a_convert_rpl_to_rssi,
.ctrl_nbtg_bt_tx = rtw8922a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8922a_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = NULL,
+ .digital_pwr_comp = rtw8922a_digital_pwr_comp,
.pwr_on_func = rtw8922a_pwr_on_func,
.pwr_off_func = rtw8922a_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc_v2,
@@ -2549,6 +2656,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.dig_regs = &rtw8922a_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = 32,
+ .support_link_num = 2,
.support_chanctx_num = 2,
.support_rnr = true,
.support_bands = BIT(NL80211_BAND_2GHZ) |
@@ -2562,6 +2670,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = true,
+ .hw_mgmt_tx_encrypt = true,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
@@ -2624,6 +2733,8 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.rrsr_cfgs = &rtw8922a_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_VLD_V2, B_BSS_CLR_VLD0_V2},
.bss_clr_map_reg = R_BSS_CLR_MAP_V2,
+ .rfkill_init = &rtw8922a_rfkill_regs,
+ .rfkill_get = {R_BE_GPIO_EXT_CTRL, B_BE_GPIO_IN_9},
.dma_ch_mask = 0,
.edcca_regs = &rtw8922a_edcca_regs,
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
index 0ebcb06ae848..28907df7407d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
@@ -256,7 +256,7 @@ static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev)
{
struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V1] = {};
- enum rtw89_sub_entity_idx sub_entity_idx;
+ enum rtw89_chanctx_idx chanctx_idx;
const struct rtw89_chan *chan;
enum rtw89_entity_mode mode;
u8 s0_tbl, s1_tbl;
@@ -265,14 +265,14 @@ static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev)
mode = rtw89_get_entity_mode(rtwdev);
switch (mode) {
case RTW89_ENTITY_MODE_MCC_PREPARE:
- sub_entity_idx = RTW89_SUB_ENTITY_1;
+ chanctx_idx = RTW89_CHANCTX_1;
break;
default:
- sub_entity_idx = RTW89_SUB_ENTITY_0;
+ chanctx_idx = RTW89_CHANCTX_0;
break;
}
- chan = rtw89_chan_get(rtwdev, sub_entity_idx);
+ chan = rtw89_chan_get(rtwdev, chanctx_idx);
for (tbl_sel = 0; tbl_sel < ARRAY_SIZE(desc); tbl_sel++) {
struct rtw89_rfk_chan_desc *p = &desc[tbl_sel];
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index 1b2a400406ae..27826d909785 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -366,7 +366,7 @@ static void rtw89_tas_state_update(struct rtw89_dev *rtwdev)
if (src == RTW89_SAR_SOURCE_NONE)
return;
- chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
ret = sar_hdl->query_sar_config(rtwdev, chan->freq, &cfg);
if (ret)
return;
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index 3882938c0893..b2e47829983f 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -14,6 +14,7 @@
#define DATA_RATE_HT_IDX_MASK GENMASK(4, 0)
#define DATA_RATE_HT_IDX_MASK_V1 GENMASK(4, 0)
#define DATA_RATE_MODE_HT 0x1
+#define DATA_RATE_HT_NSS_MASK GENMASK(4, 3)
#define DATA_RATE_VHT_HE_NSS_MASK GENMASK(6, 4)
#define DATA_RATE_VHT_HE_IDX_MASK GENMASK(3, 0)
#define DATA_RATE_NSS_MASK_V1 GENMASK(7, 5)
@@ -51,6 +52,11 @@ static inline u8 rtw89_get_data_mcs(struct rtw89_dev *rtwdev, u16 hw_rate)
return u16_get_bits(hw_rate, DATA_RATE_VHT_HE_IDX_MASK);
}
+static inline u8 rtw89_get_data_ht_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
+{
+ return u16_get_bits(hw_rate, DATA_RATE_HT_NSS_MASK);
+}
+
static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
{
if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
@@ -408,7 +414,7 @@ struct rtw89_rxinfo_user {
#define RTW89_RXINFO_USER_DATA BIT(1)
#define RTW89_RXINFO_USER_CTRL BIT(2)
#define RTW89_RXINFO_USER_MGMT BIT(3)
-#define RTW89_RXINFO_USER_BCM BIT(4)
+#define RTW89_RXINFO_USER_BCN BIT(4)
#define RTW89_RXINFO_USER_MACID GENMASK(15, 8)
struct rtw89_rxinfo {
@@ -435,6 +441,7 @@ struct rtw89_phy_sts_hdr {
} __packed;
#define RTW89_PHY_STS_HDR_W0_IE_MAP GENMASK(4, 0)
+#define RTW89_PHY_STS_HDR_W0_HDR_2_EN BIT(5)
#define RTW89_PHY_STS_HDR_W0_VALID BIT(7)
#define RTW89_PHY_STS_HDR_W0_LEN GENMASK(15, 8)
#define RTW89_PHY_STS_HDR_W0_RSSI_AVG GENMASK(31, 24)
@@ -443,6 +450,13 @@ struct rtw89_phy_sts_hdr {
#define RTW89_PHY_STS_HDR_W1_RSSI_C GENMASK(23, 16)
#define RTW89_PHY_STS_HDR_W1_RSSI_D GENMASK(31, 24)
+struct rtw89_phy_sts_hdr_v2 {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_PHY_STS_HDR_V2_W0_PATH_EN GENMASK(20, 16)
+
struct rtw89_phy_sts_iehdr {
__le32 w0;
};
@@ -546,13 +560,43 @@ struct rtw89_phy_sts_iehdr {
#define BE_RXD_HDR_OFFSET_MASK GENMASK(20, 16)
#define BE_RXD_WL_HD_IV_LEN_MASK GENMASK(26, 21)
-struct rtw89_phy_sts_ie0 {
+struct rtw89_phy_sts_ie00 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+} __packed;
+
+#define RTW89_PHY_STS_IE00_W0_RPL GENMASK(15, 7)
+
+struct rtw89_phy_sts_ie00_v2 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+} __packed;
+
+#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_A GENMASK(8, 0)
+#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_B GENMASK(17, 9)
+#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_C GENMASK(26, 18)
+#define RTW89_PHY_STS_IE00_V2_W5_RPL_TD_D GENMASK(8, 0)
+
+struct rtw89_phy_sts_ie01 {
__le32 w0;
__le32 w1;
__le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
} __packed;
#define RTW89_PHY_STS_IE01_W0_CH_IDX GENMASK(23, 16)
+#define RTW89_PHY_STS_IE01_W0_RSSI_AVG_FD GENMASK(15, 8)
+#define RTW89_PHY_STS_IE01_W0_RX_PATH_EN GENMASK(31, 28)
#define RTW89_PHY_STS_IE01_W1_FD_CFO GENMASK(19, 8)
#define RTW89_PHY_STS_IE01_W1_PREMB_CFO GENMASK(31, 20)
#define RTW89_PHY_STS_IE01_W2_AVG_SNR GENMASK(5, 0)
@@ -561,6 +605,25 @@ struct rtw89_phy_sts_ie0 {
#define RTW89_PHY_STS_IE01_W2_LDPC BIT(28)
#define RTW89_PHY_STS_IE01_W2_STBC BIT(30)
+struct rtw89_phy_sts_ie01_v2 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+} __packed;
+
+#define RTW89_PHY_STS_IE01_V2_W5_BW_IDX GENMASK(31, 29)
+#define RTW89_PHY_STS_IE01_V2_W8_RPL_FD_A GENMASK(11, 4)
+#define RTW89_PHY_STS_IE01_V2_W8_RPL_FD_B GENMASK(23, 16)
+#define RTW89_PHY_STS_IE01_V2_W9_RPL_FD_C GENMASK(11, 4)
+#define RTW89_PHY_STS_IE01_V2_W9_RPL_FD_D GENMASK(23, 16)
+
enum rtw89_tx_channel {
RTW89_TXCH_ACH0 = 0,
RTW89_TXCH_ACH1 = 1,
diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h
index e82e7df052d8..e669544cafd3 100644
--- a/drivers/net/wireless/realtek/rtw89/util.h
+++ b/drivers/net/wireless/realtek/rtw89/util.h
@@ -16,6 +16,24 @@
#define rtw89_for_each_rtwvif(rtwdev, rtwvif) \
list_for_each_entry(rtwvif, &(rtwdev)->rtwvifs_list, list)
+/* Before adding rtwvif to list, we need to check if it already exist, beacase
+ * in some case such as SER L2 happen during WoWLAN flow, calling reconfig
+ * twice cause the list to be added twice.
+ */
+static inline bool rtw89_rtwvif_in_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *new)
+{
+ struct rtw89_vif *rtwvif;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ if (rtwvif == new)
+ return true;
+
+ return false;
+}
+
/* The result of negative dividend and positive divisor is undefined, but it
* should be one case of round-down or round-up. So, make it round-down if the
* result is round-up.
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 9882064ef68d..86e24e07780d 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -687,17 +687,30 @@ static void rtw89_wow_enter_deep_ps(struct rtw89_dev *rtwdev)
__rtw89_enter_ps_mode(rtwdev, rtwvif);
}
-static void rtw89_wow_enter_lps(struct rtw89_dev *rtwdev)
+static void rtw89_wow_enter_ps(struct rtw89_dev *rtwdev)
{
struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
- rtw89_enter_lps(rtwdev, rtwvif, false);
+ if (rtw89_wow_mgd_linked(rtwdev))
+ rtw89_enter_lps(rtwdev, rtwvif, false);
+ else if (rtw89_wow_no_link(rtwdev))
+ rtw89_fw_h2c_fwips(rtwdev, rtwvif, true);
}
-static void rtw89_wow_leave_lps(struct rtw89_dev *rtwdev)
+static void rtw89_wow_leave_ps(struct rtw89_dev *rtwdev, bool enable_wow)
{
- rtw89_leave_lps(rtwdev);
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+
+ if (rtw89_wow_mgd_linked(rtwdev)) {
+ rtw89_leave_lps(rtwdev);
+ } else if (rtw89_wow_no_link(rtwdev)) {
+ if (enable_wow)
+ rtw89_leave_ips(rtwdev);
+ else
+ rtw89_fw_h2c_fwips(rtwdev, rtwvif, false);
+ }
}
static int rtw89_wow_config_mac(struct rtw89_dev *rtwdev, bool enable_wow)
@@ -781,17 +794,22 @@ static void rtw89_wow_vif_iter(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvi
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- /* Current wowlan function support setting of only one STATION vif.
- * So when one suitable vif is found, stop the iteration.
+ /* Current WoWLAN function support setting of only vif in
+ * infra mode or no link mode. When one suitable vif is found,
+ * stop the iteration.
*/
if (rtw_wow->wow_vif || vif->type != NL80211_IFTYPE_STATION)
return;
switch (rtwvif->net_type) {
case RTW89_NET_TYPE_INFRA:
- rtw_wow->wow_vif = vif;
+ if (rtw_wow_has_mgd_features(rtwdev))
+ rtw_wow->wow_vif = vif;
break;
case RTW89_NET_TYPE_NO_LINK:
+ if (rtw_wow->pno_inited)
+ rtw_wow->wow_vif = vif;
+ break;
default:
break;
}
@@ -1025,6 +1043,23 @@ static void rtw89_wow_clear_wakeups(struct rtw89_dev *rtwdev)
rtw_wow->wow_vif = NULL;
rtw89_core_release_all_bits_map(rtw_wow->flags, RTW89_WOW_FLAG_NUM);
rtw_wow->pattern_cnt = 0;
+ rtw_wow->pno_inited = false;
+}
+
+static void rtw89_wow_init_pno(struct rtw89_dev *rtwdev,
+ struct cfg80211_sched_scan_request *nd_config)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+
+ if (!nd_config->n_match_sets || !nd_config->n_channels)
+ return;
+
+ rtw_wow->nd_config = nd_config;
+ rtw_wow->pno_inited = true;
+
+ INIT_LIST_HEAD(&rtw_wow->pno_pkt_list);
+
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: net-detect is enabled\n");
}
static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev,
@@ -1037,6 +1072,11 @@ static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev,
set_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags);
if (wowlan->magic_pkt)
set_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags);
+ if (wowlan->n_patterns && wowlan->patterns)
+ set_bit(RTW89_WOW_FLAG_EN_PATTERN, rtw_wow->flags);
+
+ if (wowlan->nd_config)
+ rtw89_wow_init_pno(rtwdev, wowlan->nd_config);
rtw89_for_each_rtwvif(rtwdev, rtwvif)
rtw89_wow_vif_iter(rtwdev, rtwvif);
@@ -1048,6 +1088,34 @@ static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev,
return rtw89_wow_parse_patterns(rtwdev, rtwvif, wowlan);
}
+static int rtw89_wow_cfg_wake_pno(struct rtw89_dev *rtwdev, bool wow)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+ int ret;
+
+ ret = rtw89_fw_h2c_cfg_pno(rtwdev, rtwvif, true);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to config pno\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_wow_wakeup_ctrl(rtwdev, rtwvif, wow);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to fw wow wakeup ctrl\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_wow_global(rtwdev, rtwvif, wow);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to fw wow global\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int rtw89_wow_cfg_wake(struct rtw89_dev *rtwdev, bool wow)
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
@@ -1308,100 +1376,239 @@ static int rtw89_wow_disable_trx_post(struct rtw89_dev *rtwdev)
return ret;
}
-static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev)
+static void rtw89_fw_release_pno_pkt_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)rtw_wow->wow_vif->drv_priv;
+ struct list_head *pkt_list = &rtw_wow->pno_pkt_list;
+ struct rtw89_pktofld_info *info, *tmp;
+
+ list_for_each_entry_safe(info, tmp, pkt_list, list) {
+ rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
+ list_del(&info->list);
+ kfree(info);
+ }
+}
+
+static int rtw89_pno_scan_update_probe_req(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
+ u8 num = nd_config->n_match_sets, i;
+ struct rtw89_pktofld_info *info;
+ struct sk_buff *skb;
int ret;
- rtw89_wow_pattern_write(rtwdev);
- rtw89_wow_construct_key_info(rtwdev);
+ for (i = 0; i < num; i++) {
+ skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
+ nd_config->match_sets[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid_len,
+ nd_config->ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, nd_config->ie, nd_config->ie_len);
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ kfree_skb(skb);
+ rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif);
+ return -ENOMEM;
+ }
- ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, true);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to enable keep alive\n");
- return ret;
+ ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
+ if (ret) {
+ kfree_skb(skb);
+ kfree(info);
+ rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif);
+ return ret;
+ }
+
+ list_add_tail(&info->list, &rtw_wow->pno_pkt_list);
+ kfree_skb(skb);
}
- ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, true);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to enable disconnect detect\n");
- goto out;
+ return 0;
+}
+
+static int rtw89_pno_scan_offload(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+ int interval = rtw_wow->nd_config->scan_plans[0].interval;
+ struct rtw89_scan_option opt = {};
+ int ret;
+
+ if (enable) {
+ ret = rtw89_pno_scan_update_probe_req(rtwdev, rtwvif);
+ if (ret) {
+ rtw89_err(rtwdev, "Update probe request failed\n");
+ return ret;
+ }
+
+ ret = mac->add_chan_list_pno(rtwdev, rtwvif);
+ if (ret) {
+ rtw89_err(rtwdev, "Update channel list failed\n");
+ return ret;
+ }
}
- ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, true);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to enable GTK offload\n");
- goto out;
+ opt.enable = enable;
+ opt.repeat = RTW89_SCAN_NORMAL;
+ opt.norm_pd = max(interval, 1) * 10; /* in unit of 100ms */
+ opt.delay = max(rtw_wow->nd_config->delay, 1);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP;
+ opt.scan_mode = RTW89_SCAN_MODE_SA;
+ opt.band = RTW89_PHY_0;
+ opt.num_macc_role = 0;
+ opt.mlo_mode = rtwdev->mlo_dbcc_mode;
+ opt.num_opch = 0;
+ opt.opch_end = RTW89_CHAN_INVALID;
}
- ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, true);
- if (ret)
- rtw89_warn(rtwdev, "wow: failed to enable arp offload\n");
+ mac->scan_offload(rtwdev, &opt, rtwvif, true);
- ret = rtw89_wow_cfg_wake(rtwdev, true);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to config wake\n");
- goto out;
+ return 0;
+}
+
+static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+ int ret;
+
+ if (rtw89_wow_no_link(rtwdev)) {
+ ret = rtw89_pno_scan_offload(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable pno scan offload\n");
+ return ret;
+ }
+
+ ret = rtw89_pno_scan_offload(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable pno scan offload\n");
+ return ret;
+ }
+ } else {
+ rtw89_wow_pattern_write(rtwdev);
+ rtw89_wow_construct_key_info(rtwdev);
+
+ ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable keep alive\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable disconnect detect\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable GTK offload\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, true);
+ if (ret)
+ rtw89_warn(rtwdev, "wow: failed to enable arp offload\n");
+ }
+
+ if (rtw89_wow_no_link(rtwdev)) {
+ ret = rtw89_wow_cfg_wake_pno(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to config wake PNO\n");
+ return ret;
+ }
+ } else {
+ ret = rtw89_wow_cfg_wake(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to config wake\n");
+ return ret;
+ }
}
ret = rtw89_wow_check_fw_status(rtwdev, true);
if (ret) {
rtw89_err(rtwdev, "wow: failed to check enable fw ready\n");
- goto out;
+ return ret;
}
-out:
- return ret;
+ return 0;
}
static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)rtw_wow->wow_vif->drv_priv;
+ struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
int ret;
- rtw89_wow_pattern_clear(rtwdev);
+ if (rtw89_wow_no_link(rtwdev)) {
+ ret = rtw89_pno_scan_offload(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable pno scan offload\n");
+ return ret;
+ }
- ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, false);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to disable keep alive\n");
- goto out;
- }
+ ret = rtw89_fw_h2c_cfg_pno(rtwdev, rtwvif, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable pno\n");
+ return ret;
+ }
- ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, false);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to disable disconnect detect\n");
- goto out;
- }
+ rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif);
+ } else {
+ rtw89_wow_pattern_clear(rtwdev);
- ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, false);
- if (ret) {
- rtw89_err(rtwdev, "wow: failed to disable GTK offload\n");
- goto out;
- }
+ ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable keep alive\n");
+ return ret;
+ }
- ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, false);
- if (ret)
- rtw89_warn(rtwdev, "wow: failed to disable arp offload\n");
+ ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable disconnect detect\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable GTK offload\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, false);
+ if (ret)
+ rtw89_warn(rtwdev, "wow: failed to disable arp offload\n");
+
+ rtw89_wow_key_clear(rtwdev);
+ rtw89_fw_release_general_pkt_list(rtwdev, true);
+ }
- rtw89_wow_key_clear(rtwdev);
- rtw89_fw_release_general_pkt_list(rtwdev, true);
ret = rtw89_wow_cfg_wake(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to disable config wake\n");
- goto out;
+ return ret;
}
ret = rtw89_wow_check_fw_status(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to check disable fw ready\n");
- goto out;
+ return ret;
}
-out:
- return ret;
+ return 0;
}
static int rtw89_wow_enable(struct rtw89_dev *rtwdev)
@@ -1430,7 +1637,7 @@ static int rtw89_wow_enable(struct rtw89_dev *rtwdev)
goto out;
}
- rtw89_wow_enter_lps(rtwdev);
+ rtw89_wow_enter_ps(rtwdev);
ret = rtw89_wow_enable_trx_post(rtwdev);
if (ret) {
@@ -1455,7 +1662,7 @@ static int rtw89_wow_disable(struct rtw89_dev *rtwdev)
goto out;
}
- rtw89_wow_leave_lps(rtwdev);
+ rtw89_wow_leave_ps(rtwdev, false);
ret = rtw89_wow_fw_stop(rtwdev);
if (ret) {
@@ -1480,6 +1687,12 @@ out:
return ret;
}
+static void rtw89_wow_restore_ps(struct rtw89_dev *rtwdev)
+{
+ if (rtw89_wow_no_link(rtwdev))
+ rtw89_enter_ips(rtwdev);
+}
+
int rtw89_wow_resume(struct rtw89_dev *rtwdev)
{
int ret;
@@ -1504,6 +1717,7 @@ int rtw89_wow_resume(struct rtw89_dev *rtwdev)
if (ret)
rtw89_err(rtwdev, "failed to disable wow\n");
+ rtw89_wow_restore_ps(rtwdev);
out:
rtw89_wow_clear_wakeups(rtwdev);
return ret;
@@ -1519,7 +1733,7 @@ int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan)
return ret;
}
- rtw89_wow_leave_lps(rtwdev);
+ rtw89_wow_leave_ps(rtwdev, true);
ret = rtw89_wow_enable(rtwdev);
if (ret) {
diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h
index 0d90add0e88d..3fbc2b87c058 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.h
+++ b/drivers/net/wireless/realtek/rtw89/wow.h
@@ -95,6 +95,29 @@ static inline int rtw89_wow_get_sec_hdr_len(struct rtw89_dev *rtwdev)
}
#ifdef CONFIG_PM
+static inline bool rtw89_wow_mgd_linked(struct rtw89_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+
+ return rtwvif->net_type == RTW89_NET_TYPE_INFRA;
+}
+
+static inline bool rtw89_wow_no_link(struct rtw89_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+
+ return rtwvif->net_type == RTW89_NET_TYPE_NO_LINK;
+}
+
+static inline bool rtw_wow_has_mgd_features(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+
+ return !bitmap_empty(rtw_wow->flags, RTW89_WOW_FLAG_NUM);
+}
+
int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan);
int rtw89_wow_resume(struct rtw89_dev *rtwdev);
void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb);
diff --git a/drivers/net/wireless/rsi/rsi_debugfs.h b/drivers/net/wireless/rsi/rsi_debugfs.h
index a6a28640ad40..bbc1200dbb62 100644
--- a/drivers/net/wireless/rsi/rsi_debugfs.h
+++ b/drivers/net/wireless/rsi/rsi_debugfs.h
@@ -39,7 +39,6 @@ struct rsi_dbg_files {
struct rsi_debugfs {
struct dentry *subdir;
- struct rsi_dbg_ops *dfs_get_ops;
struct dentry *rsi_files[MAX_DEBUGFS_ENTRIES];
};
int rsi_init_dbgfs(struct rsi_hw *adapter);
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 34d95f458e1a..a9f090e15cbb 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -142,7 +142,7 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
wl18xx_radar_type_decode(mbox->radar_type));
if (!wl->radar_debug_mode)
- ieee80211_radar_detected(wl->hw);
+ ieee80211_radar_detected(wl->hw, NULL);
}
if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index d86e6ff4523d..f0e528abb1b4 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(mlo, "Support MLO");
static bool multi_radio;
module_param(multi_radio, bool, 0444);
-MODULE_PARM_DESC(mlo, "Support Multiple Radios per wiphy");
+MODULE_PARM_DESC(multi_radio, "Support Multiple Radios per wiphy");
/**
* enum hwsim_regtest - the type of regulatory tests we offer
@@ -1146,7 +1146,7 @@ static int hwsim_write_simulate_radar(void *dat, u64 val)
{
struct mac80211_hwsim_data *data = dat;
- ieee80211_radar_detected(data->hw);
+ ieee80211_radar_detected(data->hw, NULL);
return 0;
}
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
index 8d864d4ed77f..79f17100f70b 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -53,6 +53,7 @@
#define RGU_RESET_DELAY_MS 10
#define PORT_RESET_DELAY_MS 2000
+#define FASTBOOT_RESET_DELAY_MS 2000
#define EX_HS_TIMEOUT_MS 5000
#define EX_HS_POLL_DELAY_MS 10
@@ -167,19 +168,52 @@ static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
}
kfree(buffer.pointer);
+#else
+ struct device *dev = &t7xx_dev->pdev->dev;
+ int ret;
+ ret = pci_reset_function(t7xx_dev->pdev);
+ if (ret) {
+ dev_err(dev, "Failed to reset device, error:%d\n", ret);
+ return ret;
+ }
#endif
return 0;
}
-int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
+static void t7xx_host_event_notify(struct t7xx_pci_dev *t7xx_dev, unsigned int event_id)
{
- return t7xx_acpi_reset(t7xx_dev, "_RST");
+ u32 value;
+
+ value = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ value &= ~HOST_EVENT_MASK;
+ value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
+ iowrite32(value, IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
}
-int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev)
+int t7xx_reset_device(struct t7xx_pci_dev *t7xx_dev, enum reset_type type)
{
- return t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+ int ret = 0;
+
+ pci_save_state(t7xx_dev->pdev);
+ t7xx_pci_reprobe_early(t7xx_dev);
+ t7xx_mode_update(t7xx_dev, T7XX_RESET);
+
+ if (type == FLDR) {
+ ret = t7xx_acpi_reset(t7xx_dev, "_RST");
+ } else if (type == PLDR) {
+ ret = t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+ } else if (type == FASTBOOT) {
+ t7xx_host_event_notify(t7xx_dev, FASTBOOT_DL_NOTIFY);
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
+ msleep(FASTBOOT_RESET_DELAY_MS);
+ }
+
+ pci_restore_state(t7xx_dev->pdev);
+ if (ret)
+ return ret;
+
+ return t7xx_pci_reprobe(t7xx_dev, true);
}
static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
@@ -188,16 +222,15 @@ static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
if (val & MISC_RESET_TYPE_PLDR)
- t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+ t7xx_reset_device(t7xx_dev, PLDR);
else if (val & MISC_RESET_TYPE_FLDR)
- t7xx_acpi_fldr_func(t7xx_dev);
+ t7xx_reset_device(t7xx_dev, FLDR);
}
static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
{
struct t7xx_pci_dev *t7xx_dev = data;
- t7xx_mode_update(t7xx_dev, T7XX_RESET);
msleep(RGU_RESET_DELAY_MS);
t7xx_reset_device_via_pmic(t7xx_dev);
return IRQ_HANDLED;
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
index b39e945a92e0..39ed0000fbba 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
@@ -78,14 +78,19 @@ struct t7xx_modem {
spinlock_t exp_lock; /* Protects exception events */
};
+enum reset_type {
+ FLDR,
+ PLDR,
+ FASTBOOT,
+};
+
void t7xx_md_exception_handshake(struct t7xx_modem *md);
void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id);
int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev);
int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev);
void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev);
void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev);
-int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev);
-int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_reset_device(struct t7xx_pci_dev *t7xx_dev, enum reset_type type);
int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev);
#endif /* __T7XX_MODEM_OPS_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 10a8c1080b10..e556e5bd49ab 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -69,6 +69,7 @@ static ssize_t t7xx_mode_store(struct device *dev,
{
struct t7xx_pci_dev *t7xx_dev;
struct pci_dev *pdev;
+ enum t7xx_mode mode;
int index = 0;
pdev = to_pci_dev(dev);
@@ -76,12 +77,22 @@ static ssize_t t7xx_mode_store(struct device *dev,
if (!t7xx_dev)
return -ENODEV;
+ mode = READ_ONCE(t7xx_dev->mode);
+
index = sysfs_match_string(t7xx_mode_names, buf);
+ if (index == mode)
+ return -EBUSY;
+
if (index == T7XX_FASTBOOT_SWITCHING) {
+ if (mode == T7XX_FASTBOOT_DOWNLOAD)
+ return count;
+
WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING);
+ pm_runtime_resume(dev);
+ t7xx_reset_device(t7xx_dev, FASTBOOT);
} else if (index == T7XX_RESET) {
- WRITE_ONCE(t7xx_dev->mode, T7XX_RESET);
- t7xx_acpi_pldr_func(t7xx_dev);
+ pm_runtime_resume(dev);
+ t7xx_reset_device(t7xx_dev, PLDR);
}
return count;
@@ -446,7 +457,7 @@ static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
if (is_d3) {
t7xx_mhccif_init(t7xx_dev);
- return t7xx_pci_pm_reinit(t7xx_dev);
+ t7xx_pci_pm_reinit(t7xx_dev);
}
return 0;
@@ -481,6 +492,33 @@ static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
return ret;
}
+int t7xx_pci_reprobe_early(struct t7xx_pci_dev *t7xx_dev)
+{
+ enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
+ int ret;
+
+ if (mode == T7XX_FASTBOOT_DOWNLOAD)
+ pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
+
+ ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int t7xx_pci_reprobe(struct t7xx_pci_dev *t7xx_dev, bool boot)
+{
+ int ret;
+
+ ret = t7xx_pcie_reinit(t7xx_dev, boot);
+ if (ret)
+ return ret;
+
+ t7xx_clear_rgu_irq(t7xx_dev);
+ return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
+}
+
static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
{
struct t7xx_pci_dev *t7xx_dev;
@@ -507,16 +545,11 @@ static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
if (prev_state == PM_RESUME_REG_STATE_L3 ||
(prev_state == PM_RESUME_REG_STATE_INIT &&
atr_reg_val == ATR_SRC_ADDR_INVALID)) {
- ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
- if (ret)
- return ret;
-
- ret = t7xx_pcie_reinit(t7xx_dev, true);
+ ret = t7xx_pci_reprobe_early(t7xx_dev);
if (ret)
return ret;
- t7xx_clear_rgu_irq(t7xx_dev);
- return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
+ return t7xx_pci_reprobe(t7xx_dev, true);
}
if (prev_state == PM_RESUME_REG_STATE_EXP ||
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h
index 49a11586d8d8..cd8ea17c2644 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -133,4 +133,7 @@ int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_en
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);
void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev);
void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode);
+int t7xx_pci_reprobe(struct t7xx_pci_dev *t7xx_dev, bool boot);
+int t7xx_pci_reprobe_early(struct t7xx_pci_dev *t7xx_dev);
+
#endif /* __T7XX_PCI_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
index 7d6388bf1d7c..35743e7de0c3 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -553,7 +553,6 @@ static int t7xx_proxy_alloc(struct t7xx_modem *md)
md->port_prox = port_prox;
port_prox->dev = dev;
- t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
return 0;
}
diff --git a/drivers/net/wwan/t7xx/t7xx_port_trace.c b/drivers/net/wwan/t7xx/t7xx_port_trace.c
index 6a3f36385865..4ed8b4e29bf1 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_trace.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_trace.c
@@ -59,6 +59,7 @@ static void t7xx_trace_port_uninit(struct t7xx_port *port)
relay_close(relaych);
debugfs_remove_recursive(debugfs_dir);
+ port->log.relaych = NULL;
}
static int t7xx_trace_port_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
index 9889ca4621cf..3931c7a13f5a 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -213,16 +213,6 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm
fsm_finish_command(ctl, cmd, 0);
}
-static void t7xx_host_event_notify(struct t7xx_modem *md, unsigned int event_id)
-{
- u32 value;
-
- value = ioread32(IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
- value &= ~HOST_EVENT_MASK;
- value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
- iowrite32(value, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
-}
-
static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status)
{
struct t7xx_modem *md = ctl->md;
@@ -264,8 +254,14 @@ static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int
static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
{
+ enum t7xx_mode mode;
+
ctl->curr_state = FSM_STATE_STOPPED;
+ mode = READ_ONCE(ctl->md->t7xx_dev->mode);
+ if (mode == T7XX_FASTBOOT_DOWNLOAD || mode == T7XX_FASTBOOT_DUMP)
+ return 0;
+
t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED);
return t7xx_md_reset(ctl->md->t7xx_dev);
}
@@ -284,8 +280,6 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma
{
struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev;
- enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
- int err;
if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
fsm_finish_command(ctl, cmd, -EINVAL);
@@ -296,21 +290,10 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
t7xx_cldma_stop(md_ctrl);
- if (mode == T7XX_FASTBOOT_SWITCHING)
- t7xx_host_event_notify(ctl->md, FASTBOOT_DL_NOTIFY);
-
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
/* Wait for the DRM disable to take effect */
msleep(FSM_DRM_DISABLE_DELAY_MS);
- if (mode == T7XX_FASTBOOT_SWITCHING) {
- t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
- } else {
- err = t7xx_acpi_fldr_func(t7xx_dev);
- if (err)
- t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
- }
-
fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
}
@@ -414,7 +397,9 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
case T7XX_DEV_STAGE_LK:
dev_dbg(dev, "LK_STAGE Entered\n");
+ t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
t7xx_lk_stage_event_handling(ctl, status);
+
break;
case T7XX_DEV_STAGE_LINUX:
@@ -436,6 +421,9 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
}
finish_command:
+ if (ret)
+ t7xx_mode_update(md->t7xx_dev, T7XX_UNKNOWN);
+
fsm_finish_command(ctl, cmd, ret);
}
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index ff96f22648ef..45ddce35f6d2 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -95,7 +95,7 @@ static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
static void xenvif_flush_hash(struct xenvif *vif)
{
- struct xenvif_hash_cache_entry *entry;
+ struct xenvif_hash_cache_entry *entry, *n;
unsigned long flags;
if (xenvif_hash_cache_size == 0)
@@ -103,8 +103,7 @@ static void xenvif_flush_hash(struct xenvif *vif)
spin_lock_irqsave(&vif->hash.cache.lock, flags);
- list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
- lockdep_is_held(&vif->hash.cache.lock)) {
+ list_for_each_entry_safe(entry, n, &vif->hash.cache.list, link) {
list_del_rcu(&entry->link);
vif->hash.cache.count--;
kfree_rcu(entry, rcu);
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index a187f0e0b0f7..ffd7367ce119 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -254,7 +254,6 @@ struct pn533_acr122_ccid_hdr {
* byte for reposnse msg
*/
u8 params[3];
- u8 data[]; /* payload */
} __packed;
struct pn533_acr122_apdu_hdr {
diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c
index d702bee78082..ed6f4adc6130 100644
--- a/drivers/ntb/core.c
+++ b/drivers/ntb/core.c
@@ -72,7 +72,7 @@ MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
-static struct bus_type ntb_bus;
+static const struct bus_type ntb_bus;
static void ntb_dev_release(struct device *dev);
int __ntb_register_client(struct ntb_client *client, struct module *mod,
@@ -298,7 +298,7 @@ static void ntb_dev_release(struct device *dev)
complete(&ntb->released);
}
-static struct bus_type ntb_bus = {
+static const struct bus_type ntb_bus = {
.name = "ntb",
.probe = ntb_probe,
.remove = ntb_remove,
diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c
index b640aa0bf45e..00f0e78f685b 100644
--- a/drivers/ntb/hw/epf/ntb_hw_epf.c
+++ b/drivers/ntb/hw/epf/ntb_hw_epf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* Host side endpoint driver to implement Non-Transparent Bridge functionality
*
* Copyright (C) 2020 Texas Instruments
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index 48dfb1a69a77..6fc9dfe82474 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2547,7 +2547,7 @@ static void idt_deinit_dbgfs(struct idt_ntb_dev *ndev)
*/
/*
- * idt_check_setup() - Check whether the IDT PCIe-swtich is properly
+ * idt_check_setup() - Check whether the IDT PCIe-switch is properly
* pre-initialized
* @pdev: Pointer to the PCI device descriptor
*
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index 9ab836d0d4f1..079b8cd79785 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -778,7 +778,7 @@ static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
ndev->debugfs_dir =
debugfs_create_dir(pci_name(ndev->ntb.pdev),
debugfs_dir);
- if (!ndev->debugfs_dir)
+ if (IS_ERR(ndev->debugfs_dir))
ndev->debugfs_info = NULL;
else
ndev->debugfs_info =
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index 31946387badf..ad1786be2554 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -1554,6 +1554,7 @@ static void switchtec_ntb_remove(struct device *dev)
switchtec_ntb_deinit_db_msg_irq(sndev);
switchtec_ntb_deinit_shared_mw(sndev);
switchtec_ntb_deinit_crosslink(sndev);
+ cancel_work_sync(&sndev->check_link_status_work);
kfree(sndev);
dev_info(dev, "ntb device unregistered\n");
}
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 77e55debeed6..a22ea4a4b202 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -314,7 +314,7 @@ static void ntb_transport_bus_remove(struct device *dev)
put_device(dev);
}
-static struct bus_type ntb_transport_bus = {
+static const struct bus_type ntb_transport_bus = {
.name = "ntb_transport",
.match = ntb_transport_bus_match,
.probe = ntb_transport_bus_probe,
@@ -377,6 +377,8 @@ EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
* @device_name: Name of NTB client device
*
* Register an NTB client device with the NTB transport layer
+ *
+ * Returns: %0 on success or -errno code on error
*/
int ntb_transport_register_client_dev(char *device_name)
{
@@ -807,16 +809,29 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
}
static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw,
- struct device *dma_dev, size_t align)
+ struct device *ntb_dev, size_t align)
{
dma_addr_t dma_addr;
void *alloc_addr, *virt_addr;
int rc;
- alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size,
- &dma_addr, GFP_KERNEL);
+ /*
+ * The buffer here is allocated against the NTB device. The reason to
+ * use dma_alloc_*() call is to allocate a large IOVA contiguous buffer
+ * backing the NTB BAR for the remote host to write to. During receive
+ * processing, the data is being copied out of the receive buffer to
+ * the kernel skbuff. When a DMA device is being used, dma_map_page()
+ * is called on the kvaddr of the receive buffer (from dma_alloc_*())
+ * and remapped against the DMA device. It appears to be a double
+ * DMA mapping of buffers, but first is mapped to the NTB device and
+ * second is to the DMA device. DMA_ATTR_FORCE_CONTIGUOUS is necessary
+ * in order for the later dma_map_page() to not fail.
+ */
+ alloc_addr = dma_alloc_attrs(ntb_dev, mw->alloc_size,
+ &dma_addr, GFP_KERNEL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
if (!alloc_addr) {
- dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n",
+ dev_err(ntb_dev, "Unable to alloc MW buff of size %zu\n",
mw->alloc_size);
return -ENOMEM;
}
@@ -845,7 +860,7 @@ static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw,
return 0;
err:
- dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr);
+ dma_free_coherent(ntb_dev, mw->alloc_size, alloc_addr, dma_addr);
return rc;
}
@@ -1966,9 +1981,9 @@ static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
/**
* ntb_transport_create_queue - Create a new NTB transport layer queue
- * @rx_handler: receive callback function
- * @tx_handler: transmit callback function
- * @event_handler: event callback function
+ * @data: pointer for callback data
+ * @client_dev: &struct device pointer
+ * @handlers: pointer to various ntb queue (callback) handlers
*
* Create a new NTB transport layer queue and provide the queue with a callback
* routine for both transmit and receive. The receive callback routine will be
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 553f1f46bc66..72bc1d017a46 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -1227,7 +1227,7 @@ static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf,
"\tOut buffer addr 0x%pK\n", peer->outbuf);
pos += scnprintf(buf + pos, buf_size - pos,
- "\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr);
+ "\tOut buff phys addr %pap\n", &peer->out_phys_addr);
pos += scnprintf(buf + pos, buf_size - pos,
"\tOut buffer size %pa\n", &peer->outbuf_size);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index d6d558f94d6b..55cfbf1e0a95 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1612,9 +1612,6 @@ static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id)
{
int i;
- if (!pmem_id)
- return -ENODEV;
-
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -1790,9 +1787,6 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
case -EINVAL:
dev_dbg(&nd_region->dev, "invalid label(s)\n");
break;
- case -ENODEV:
- dev_dbg(&nd_region->dev, "label not found\n");
- break;
default:
dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
break;
@@ -1937,12 +1931,16 @@ static int cmp_dpa(const void *a, const void *b)
static struct device **scan_labels(struct nd_region *nd_region)
{
int i, count = 0;
- struct device *dev, **devs = NULL;
+ struct device *dev, **devs;
struct nd_label_ent *label_ent, *e;
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
+ devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
+ if (!devs)
+ return NULL;
+
/* "safe" because create_namespace_pmem() might list_move() label_ent */
list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
struct nd_namespace_label *nd_label = label_ent->label;
@@ -1961,12 +1959,14 @@ static struct device **scan_labels(struct nd_region *nd_region)
goto err;
if (i < count)
continue;
- __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
- if (!__devs)
- goto err;
- memcpy(__devs, devs, sizeof(dev) * count);
- kfree(devs);
- devs = __devs;
+ if (count) {
+ __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
+ if (!__devs)
+ goto err;
+ memcpy(__devs, devs, sizeof(dev) * count);
+ kfree(devs);
+ devs = __devs;
+ }
dev = create_namespace_pmem(nd_region, nd_mapping, nd_label);
if (IS_ERR(dev)) {
@@ -1974,9 +1974,6 @@ static struct device **scan_labels(struct nd_region *nd_region)
case -EAGAIN:
/* skip invalid labels */
continue;
- case -ENODEV:
- /* fallthrough to seed creation */
- break;
default:
goto err;
}
@@ -1993,11 +1990,6 @@ static struct device **scan_labels(struct nd_region *nd_region)
/* Publish a zero-sized namespace for userspace to configure. */
nd_mapping_free_labels(nd_mapping);
-
- devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
- if (!devs)
- goto err;
-
nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
if (!nspm)
goto err;
@@ -2036,11 +2028,10 @@ static struct device **scan_labels(struct nd_region *nd_region)
return devs;
err:
- if (devs) {
- for (i = 0; devs[i]; i++)
- namespace_pmem_release(devs[i]);
- kfree(devs);
- }
+ for (i = 0; devs[i]; i++)
+ namespace_pmem_release(devs[i]);
+ kfree(devs);
+
return NULL;
}
diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
index 35c8fbbba10e..f55d60922b87 100644
--- a/drivers/nvdimm/nd_virtio.c
+++ b/drivers/nvdimm/nd_virtio.c
@@ -44,6 +44,15 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
unsigned long flags;
int err, err1;
+ /*
+ * Don't bother to submit the request to the device if the device is
+ * not activated.
+ */
+ if (vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_NEEDS_RESET) {
+ dev_info(&vdev->dev, "virtio pmem device needs a reset\n");
+ return -EIO;
+ }
+
might_sleep();
req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
if (!req_data)
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 403384f25ce3..b4a1cf70e8b7 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -47,7 +47,7 @@ static int of_pmem_region_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, priv);
- is_volatile = !!of_find_property(np, "volatile", NULL);
+ is_volatile = of_property_read_bool(np, "volatile");
dev_dbg(&pdev->dev, "Registering %s regions from %pOF\n",
is_volatile ? "volatile" : "non-volatile", np);
diff --git a/drivers/nvme/common/keyring.c b/drivers/nvme/common/keyring.c
index 6f7e7a8fa5ae..ed5167f942d8 100644
--- a/drivers/nvme/common/keyring.c
+++ b/drivers/nvme/common/keyring.c
@@ -20,6 +20,28 @@ key_serial_t nvme_keyring_id(void)
}
EXPORT_SYMBOL_GPL(nvme_keyring_id);
+static bool nvme_tls_psk_revoked(struct key *psk)
+{
+ return test_bit(KEY_FLAG_REVOKED, &psk->flags) ||
+ test_bit(KEY_FLAG_INVALIDATED, &psk->flags);
+}
+
+struct key *nvme_tls_key_lookup(key_serial_t key_id)
+{
+ struct key *key = key_lookup(key_id);
+
+ if (IS_ERR(key)) {
+ pr_err("key id %08x not found\n", key_id);
+ return key;
+ }
+ if (nvme_tls_psk_revoked(key)) {
+ pr_err("key id %08x revoked\n", key_id);
+ return ERR_PTR(-EKEYREVOKED);
+ }
+ return key;
+}
+EXPORT_SYMBOL_GPL(nvme_tls_key_lookup);
+
static void nvme_tls_psk_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
@@ -36,14 +58,12 @@ static bool nvme_tls_psk_match(const struct key *key,
pr_debug("%s: no key description\n", __func__);
return false;
}
- match_len = strlen(key->description);
- pr_debug("%s: id %s len %zd\n", __func__, key->description, match_len);
-
if (!match_data->raw_data) {
pr_debug("%s: no match data\n", __func__);
return false;
}
match_id = match_data->raw_data;
+ match_len = strlen(match_id);
pr_debug("%s: match '%s' '%s' len %zd\n",
__func__, match_id, key->description, match_len);
return !memcmp(key->description, match_id, match_len);
@@ -71,7 +91,7 @@ static struct key_type nvme_tls_psk_key_type = {
static struct key *nvme_tls_psk_lookup(struct key *keyring,
const char *hostnqn, const char *subnqn,
- int hmac, bool generated)
+ u8 hmac, u8 psk_ver, bool generated)
{
char *identity;
size_t identity_len = (NVMF_NQN_SIZE) * 2 + 11;
@@ -82,8 +102,8 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring,
if (!identity)
return ERR_PTR(-ENOMEM);
- snprintf(identity, identity_len, "NVMe0%c%02d %s %s",
- generated ? 'G' : 'R', hmac, hostnqn, subnqn);
+ snprintf(identity, identity_len, "NVMe%u%c%02u %s %s",
+ psk_ver, generated ? 'G' : 'R', hmac, hostnqn, subnqn);
if (!keyring)
keyring = nvme_keyring;
@@ -107,21 +127,38 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring,
/*
* NVMe PSK priority list
*
- * 'Retained' PSKs (ie 'generated == false')
- * should be preferred to 'generated' PSKs,
- * and SHA-384 should be preferred to SHA-256.
+ * 'Retained' PSKs (ie 'generated == false') should be preferred to 'generated'
+ * PSKs, PSKs with hash (psk_ver 1) should be preferred to PSKs without hash
+ * (psk_ver 0), and SHA-384 should be preferred to SHA-256.
*/
static struct nvme_tls_psk_priority_list {
bool generated;
+ u8 psk_ver;
enum nvme_tcp_tls_cipher cipher;
} nvme_tls_psk_prio[] = {
{ .generated = false,
+ .psk_ver = 1,
+ .cipher = NVME_TCP_TLS_CIPHER_SHA384, },
+ { .generated = false,
+ .psk_ver = 1,
+ .cipher = NVME_TCP_TLS_CIPHER_SHA256, },
+ { .generated = false,
+ .psk_ver = 0,
.cipher = NVME_TCP_TLS_CIPHER_SHA384, },
{ .generated = false,
+ .psk_ver = 0,
+ .cipher = NVME_TCP_TLS_CIPHER_SHA256, },
+ { .generated = true,
+ .psk_ver = 1,
+ .cipher = NVME_TCP_TLS_CIPHER_SHA384, },
+ { .generated = true,
+ .psk_ver = 1,
.cipher = NVME_TCP_TLS_CIPHER_SHA256, },
{ .generated = true,
+ .psk_ver = 0,
.cipher = NVME_TCP_TLS_CIPHER_SHA384, },
{ .generated = true,
+ .psk_ver = 0,
.cipher = NVME_TCP_TLS_CIPHER_SHA256, },
};
@@ -137,10 +174,11 @@ key_serial_t nvme_tls_psk_default(struct key *keyring,
for (prio = 0; prio < ARRAY_SIZE(nvme_tls_psk_prio); prio++) {
bool generated = nvme_tls_psk_prio[prio].generated;
+ u8 ver = nvme_tls_psk_prio[prio].psk_ver;
enum nvme_tcp_tls_cipher cipher = nvme_tls_psk_prio[prio].cipher;
tls_key = nvme_tls_psk_lookup(keyring, hostnqn, subnqn,
- cipher, generated);
+ cipher, ver, generated);
if (!IS_ERR(tls_key)) {
tls_key_id = tls_key->serial;
key_put(tls_key);
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index a3caef75aa0a..486afe598184 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -41,6 +41,7 @@ config NVME_HWMON
config NVME_FABRICS
select NVME_CORE
+ select NVME_KEYRING if NVME_TCP_TLS
tristate
config NVME_RDMA
@@ -94,7 +95,6 @@ config NVME_TCP
config NVME_TCP_TLS
bool "NVMe over Fabrics TCP TLS encryption support"
depends on NVME_TCP
- select NVME_KEYRING
select NET_HANDSHAKE
select KEYS
help
@@ -109,6 +109,7 @@ config NVME_HOST_AUTH
bool "NVMe over Fabrics In-Band Authentication in host side"
depends on NVME_CORE
select NVME_AUTH
+ select NVME_KEYRING if NVME_TCP_TLS
help
This provides support for NVMe over Fabrics In-Band Authentication in
host side.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 983909a600ad..ba6508455e18 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011-2014, Intel Corporation.
*/
+#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-integrity.h>
@@ -987,8 +988,8 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
cmnd->rw.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
cmnd->rw.reftag = 0;
- cmnd->rw.apptag = 0;
- cmnd->rw.appmask = 0;
+ cmnd->rw.lbat = 0;
+ cmnd->rw.lbatm = 0;
if (ns->head->ms) {
/*
@@ -2467,11 +2468,6 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
if (ret)
return ret;
- /* Flush write to device (required if transport is PCI) */
- ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
- if (ret)
- return ret;
-
/* CAP value may change after initial CC write */
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
if (ret)
@@ -4040,6 +4036,35 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
}
}
+/**
+ * struct async_scan_info - keeps track of controller & NSIDs to scan
+ * @ctrl: Controller on which namespaces are being scanned
+ * @next_nsid: Index of next NSID to scan in ns_list
+ * @ns_list: Pointer to list of NSIDs to scan
+ *
+ * Note: There is a single async_scan_info structure shared by all instances
+ * of nvme_scan_ns_async() scanning a given controller, so the atomic
+ * operations on next_nsid are critical to ensure each instance scans a unique
+ * NSID.
+ */
+struct async_scan_info {
+ struct nvme_ctrl *ctrl;
+ atomic_t next_nsid;
+ __le32 *ns_list;
+};
+
+static void nvme_scan_ns_async(void *data, async_cookie_t cookie)
+{
+ struct async_scan_info *scan_info = data;
+ int idx;
+ u32 nsid;
+
+ idx = (u32)atomic_fetch_inc(&scan_info->next_nsid);
+ nsid = le32_to_cpu(scan_info->ns_list[idx]);
+
+ nvme_scan_ns(scan_info->ctrl, nsid);
+}
+
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid)
{
@@ -4066,11 +4091,15 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
__le32 *ns_list;
u32 prev = 0;
int ret = 0, i;
+ ASYNC_DOMAIN(domain);
+ struct async_scan_info scan_info;
ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
if (!ns_list)
return -ENOMEM;
+ scan_info.ctrl = ctrl;
+ scan_info.ns_list = ns_list;
for (;;) {
struct nvme_command cmd = {
.identify.opcode = nvme_admin_identify,
@@ -4086,19 +4115,23 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
goto free;
}
+ atomic_set(&scan_info.next_nsid, 0);
for (i = 0; i < nr_entries; i++) {
u32 nsid = le32_to_cpu(ns_list[i]);
if (!nsid) /* end of the list? */
goto out;
- nvme_scan_ns(ctrl, nsid);
+ async_schedule_domain(nvme_scan_ns_async, &scan_info,
+ &domain);
while (++prev < nsid)
nvme_ns_remove_by_nsid(ctrl, prev);
}
+ async_synchronize_full_domain(&domain);
}
out:
nvme_remove_invalid_namespaces(ctrl, prev);
free:
+ async_synchronize_full_domain(&domain);
kfree(ns_list);
return ret;
}
@@ -4568,7 +4601,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
set->flags = BLK_MQ_F_SHOULD_MERGE;
if (ctrl->ops->flags & NVME_F_BLOCKING)
set->flags |= BLK_MQ_F_BLOCKING;
- set->cmd_size = cmd_size,
+ set->cmd_size = cmd_size;
set->driver_data = ctrl;
set->nr_hw_queues = ctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT;
@@ -4678,7 +4711,6 @@ static void nvme_free_ctrl(struct device *dev)
if (!subsys || ctrl->instance != subsys->instance)
ida_free(&nvme_instance_ida, ctrl->instance);
- key_put(ctrl->tls_key);
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
cleanup_srcu_struct(&ctrl->srcu);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index f5f545fa0103..432efcbf9e2f 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -665,7 +665,7 @@ static struct key *nvmf_parse_key(int key_id)
return ERR_PTR(-EINVAL);
}
- key = key_lookup(key_id);
+ key = nvme_tls_key_lookup(key_id);
if (IS_ERR(key))
pr_err("key id %08x not found\n", key_id);
else
diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c
index 1d1b6441a339..105d6cb41c72 100644
--- a/drivers/nvme/host/fault_inject.c
+++ b/drivers/nvme/host/fault_inject.c
@@ -6,6 +6,7 @@
*/
#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
#include "nvme.h"
static DECLARE_FAULT_ATTR(fail_default_attr);
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index f1d58e70933f..b9b79ccfabf8 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -3,7 +3,7 @@
* Copyright (c) 2011-2014, Intel Corporation.
* Copyright (c) 2017-2021 Christoph Hellwig.
*/
-#include <linux/bio-integrity.h>
+#include <linux/blk-integrity.h>
#include <linux/ptrace.h> /* for force_successful_syscall_return */
#include <linux/nvme_ioctl.h>
#include <linux/io_uring/cmd.h>
@@ -119,9 +119,14 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
struct request_queue *q = req->q;
struct nvme_ns *ns = q->queuedata;
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
+ bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
+ bool has_metadata = meta_buffer && meta_len;
struct bio *bio = NULL;
int ret;
+ if (has_metadata && !supports_metadata)
+ return -EINVAL;
+
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
struct iov_iter iter;
@@ -143,15 +148,14 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
goto out;
bio = req->bio;
- if (bdev) {
+ if (bdev)
bio_set_dev(bio, bdev);
- if (meta_buffer && meta_len) {
- ret = bio_integrity_map_user(bio, meta_buffer, meta_len,
- meta_seed);
- if (ret)
- goto out_unmap;
- req->cmd_flags |= REQ_INTEGRITY;
- }
+
+ if (has_metadata) {
+ ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len,
+ meta_seed);
+ if (ret)
+ goto out_unmap;
}
return ret;
@@ -260,8 +264,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.control = cpu_to_le16(io.control);
c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
c.rw.reftag = cpu_to_le32(io.reftag);
- c.rw.apptag = cpu_to_le16(io.apptag);
- c.rw.appmask = cpu_to_le16(io.appmask);
+ c.rw.lbat = cpu_to_le16(io.apptag);
+ c.rw.lbatm = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
meta_len, lower_32_bits(io.slba), NULL, 0, 0);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 518e22dd4f9b..48e7a8906d01 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -421,6 +421,9 @@ static bool nvme_available_path(struct nvme_ns_head *head)
{
struct nvme_ns *ns;
+ if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
+ return NULL;
+
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
continue;
@@ -648,7 +651,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
rc = device_add_disk(&head->subsys->dev, head->disk,
nvme_ns_attr_groups);
if (rc) {
- clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
+ clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags);
return;
}
nvme_add_ns_head_cdev(head);
@@ -969,11 +972,16 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- kblockd_schedule_work(&head->requeue_work);
- if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+ if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
nvme_cdev_del(&head->cdev, &head->cdev_device);
del_gendisk(head->disk);
}
+ /*
+ * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
+ * to allow multipath to fail all I/O.
+ */
+ synchronize_srcu(&head->srcu);
+ kblockd_schedule_work(&head->requeue_work);
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index da57947130cc..313a4f978a2c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -91,6 +91,11 @@ enum nvme_quirks {
NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
/*
+ * Problems seen with concurrent commands
+ */
+ NVME_QUIRK_QDEPTH_ONE = (1 << 6),
+
+ /*
* Set MEDIUM priority on SQ creation
*/
NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
@@ -372,7 +377,7 @@ struct nvme_ctrl {
struct nvme_dhchap_key *ctrl_key;
u16 transaction;
#endif
- struct key *tls_key;
+ key_serial_t tls_pskid;
/* Power saving configuration */
u64 ps_max_latency_us;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c0533f3f64cb..7990c3f22ecf 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2563,15 +2563,8 @@ static int nvme_pci_enable(struct nvme_dev *dev)
else
dev->io_sqes = NVME_NVM_IOSQES;
- /*
- * Temporary fix for the Apple controller found in the MacBook8,1 and
- * some MacBook7,1 to avoid controller resets and data loss.
- */
- if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
+ if (dev->ctrl.quirks & NVME_QUIRK_QDEPTH_ONE) {
dev->q_depth = 2;
- dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
- "set queue depth=%u to work around controller resets\n",
- dev->q_depth);
} else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
(pdev->device == 0xa821 || pdev->device == 0xa822) &&
NVME_CAP_MQES(dev->ctrl.cap) == 0) {
@@ -3442,6 +3435,8 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
+ .driver_data = NVME_QUIRK_QDEPTH_ONE },
{ PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_BOGUS_NID, },
@@ -3576,7 +3571,12 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
- .driver_data = NVME_QUIRK_SINGLE_VECTOR },
+ /*
+ * Fix for the Apple controller found in the MacBook8,1 and
+ * some MacBook7,1 to avoid controller resets and data loss.
+ */
+ .driver_data = NVME_QUIRK_SINGLE_VECTOR |
+ NVME_QUIRK_QDEPTH_ONE },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 2eb33842f971..c8fd0e8f0237 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1363,8 +1363,8 @@ static void nvme_rdma_set_sig_domain(struct blk_integrity *bi,
if (control & NVME_RW_PRINFO_PRCHK_REF)
domain->sig.dif.ref_remap = true;
- domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
- domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
+ domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat);
+ domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm);
domain->sig.dif.app_escape = true;
if (pi_type == NVME_NS_DPS_PI_TYPE3)
domain->sig.dif.ref_escape = true;
@@ -1496,7 +1496,7 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
req->metadata_sgl->sg_table.sgl =
(struct scatterlist *)(req->metadata_sgl + 1);
ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
- blk_rq_count_integrity_sg(rq->q, rq->bio),
+ rq->nr_integrity_segments,
req->metadata_sgl->sg_table.sgl,
NVME_INLINE_METADATA_SG_CNT);
if (unlikely(ret)) {
@@ -1504,8 +1504,8 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
goto out_unmap_sg;
}
- req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
- rq->bio, req->metadata_sgl->sg_table.sgl);
+ req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq,
+ req->metadata_sgl->sg_table.sgl);
*pi_count = ib_dma_map_sg(ibdev,
req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents,
@@ -1876,6 +1876,8 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
*/
priv.hrqsize = cpu_to_le16(queue->queue_size);
priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
+ /* cntlid should only be set when creating an I/O queue */
+ priv.cntlid = cpu_to_le16(ctrl->ctrl.cntlid);
}
ret = rdma_connect_locked(queue->cm_id, &param);
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index ba05faaac562..b68a9e5f1ea3 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -664,19 +664,6 @@ static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
#endif
-#ifdef CONFIG_NVME_TCP_TLS
-static ssize_t tls_key_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
-
- if (!ctrl->tls_key)
- return 0;
- return sysfs_emit(buf, "%08x", key_serial(ctrl->tls_key));
-}
-static DEVICE_ATTR_RO(tls_key);
-#endif
-
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -704,9 +691,6 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_dhchap_secret.attr,
&dev_attr_dhchap_ctrl_secret.attr,
#endif
-#ifdef CONFIG_NVME_TCP_TLS
- &dev_attr_tls_key.attr,
-#endif
&dev_attr_adm_passthru_err_log_enabled.attr,
NULL
};
@@ -737,11 +721,6 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
return 0;
#endif
-#ifdef CONFIG_NVME_TCP_TLS
- if (a == &dev_attr_tls_key.attr &&
- (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp")))
- return 0;
-#endif
return a->mode;
}
@@ -752,8 +731,78 @@ const struct attribute_group nvme_dev_attrs_group = {
};
EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
+#ifdef CONFIG_NVME_TCP_TLS
+static ssize_t tls_key_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (!ctrl->tls_pskid)
+ return 0;
+ return sysfs_emit(buf, "%08x\n", ctrl->tls_pskid);
+}
+static DEVICE_ATTR_RO(tls_key);
+
+static ssize_t tls_configured_key_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct key *key = ctrl->opts->tls_key;
+
+ return sysfs_emit(buf, "%08x\n", key_serial(key));
+}
+static DEVICE_ATTR_RO(tls_configured_key);
+
+static ssize_t tls_keyring_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct key *keyring = ctrl->opts->keyring;
+
+ return sysfs_emit(buf, "%s\n", keyring->description);
+}
+static DEVICE_ATTR_RO(tls_keyring);
+
+static struct attribute *nvme_tls_attrs[] = {
+ &dev_attr_tls_key.attr,
+ &dev_attr_tls_configured_key.attr,
+ &dev_attr_tls_keyring.attr,
+ NULL,
+};
+
+static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp"))
+ return 0;
+
+ if (a == &dev_attr_tls_key.attr &&
+ !ctrl->opts->tls)
+ return 0;
+ if (a == &dev_attr_tls_configured_key.attr &&
+ !ctrl->opts->tls_key)
+ return 0;
+ if (a == &dev_attr_tls_keyring.attr &&
+ !ctrl->opts->keyring)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group nvme_tls_attrs_group = {
+ .attrs = nvme_tls_attrs,
+ .is_visible = nvme_tls_attrs_are_visible,
+};
+#endif
+
const struct attribute_group *nvme_dev_attr_groups[] = {
&nvme_dev_attrs_group,
+#ifdef CONFIG_NVME_TCP_TLS
+ &nvme_tls_attrs_group,
+#endif
NULL,
};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index a2a47d3ab99f..89c44413c593 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -165,6 +165,7 @@ struct nvme_tcp_queue {
bool hdr_digest;
bool data_digest;
+ bool tls_enabled;
struct ahash_request *rcv_hash;
struct ahash_request *snd_hash;
__le32 exp_ddgst;
@@ -213,7 +214,21 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues;
}
-static inline bool nvme_tcp_tls(struct nvme_ctrl *ctrl)
+/*
+ * Check if the queue is TLS encrypted
+ */
+static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue)
+{
+ if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
+ return 0;
+
+ return queue->tls_enabled;
+}
+
+/*
+ * Check if TLS is configured for the controller.
+ */
+static inline bool nvme_tcp_tls_configured(struct nvme_ctrl *ctrl)
{
if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
return 0;
@@ -368,7 +383,7 @@ static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue)
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
{
- return !nvme_tcp_tls(&queue->ctrl->ctrl) &&
+ return !nvme_tcp_queue_tls(queue) &&
nvme_tcp_queue_has_pending(queue);
}
@@ -1051,7 +1066,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
else
msg.msg_flags |= MSG_MORE;
- if (!sendpage_ok(page))
+ if (!sendpages_ok(page, len, offset))
msg.msg_flags &= ~MSG_SPLICE_PAGES;
bvec_set_page(&bvec, page, len, offset);
@@ -1427,7 +1442,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
memset(&msg, 0, sizeof(msg));
iov.iov_base = icresp;
iov.iov_len = sizeof(*icresp);
- if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
+ if (nvme_tcp_queue_tls(queue)) {
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
@@ -1439,7 +1454,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
goto free_icresp;
}
ret = -ENOTCONN;
- if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
+ if (nvme_tcp_queue_tls(queue)) {
ctype = tls_get_record_type(queue->sock->sk,
(struct cmsghdr *)cbuf);
if (ctype != TLS_RECORD_TYPE_DATA) {
@@ -1581,13 +1596,16 @@ static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
goto out_complete;
}
- tls_key = key_lookup(pskid);
+ tls_key = nvme_tls_key_lookup(pskid);
if (IS_ERR(tls_key)) {
dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n",
qid, pskid);
queue->tls_err = -ENOKEY;
} else {
- ctrl->ctrl.tls_key = tls_key;
+ queue->tls_enabled = true;
+ if (qid == 0)
+ ctrl->ctrl.tls_pskid = key_serial(tls_key);
+ key_put(tls_key);
queue->tls_err = 0;
}
@@ -1768,7 +1786,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
}
/* If PSKs are configured try to start TLS */
- if (IS_ENABLED(CONFIG_NVME_TCP_TLS) && pskid) {
+ if (nvme_tcp_tls_configured(nctrl) && pskid) {
ret = nvme_tcp_start_tls(nctrl, queue, pskid);
if (ret)
goto err_init_connect;
@@ -1829,6 +1847,8 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
__nvme_tcp_stop_queue(queue);
+ /* Stopping the queue will disable TLS */
+ queue->tls_enabled = false;
mutex_unlock(&queue->queue_lock);
}
@@ -1925,16 +1945,17 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
int ret;
key_serial_t pskid = 0;
- if (nvme_tcp_tls(ctrl)) {
+ if (nvme_tcp_tls_configured(ctrl)) {
if (ctrl->opts->tls_key)
pskid = key_serial(ctrl->opts->tls_key);
- else
+ else {
pskid = nvme_tls_psk_default(ctrl->opts->keyring,
ctrl->opts->host->nqn,
ctrl->opts->subsysnqn);
- if (!pskid) {
- dev_err(ctrl->device, "no valid PSK found\n");
- return -ENOKEY;
+ if (!pskid) {
+ dev_err(ctrl->device, "no valid PSK found\n");
+ return -ENOKEY;
+ }
}
}
@@ -1957,13 +1978,14 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
int i, ret;
- if (nvme_tcp_tls(ctrl) && !ctrl->tls_key) {
+ if (nvme_tcp_tls_configured(ctrl) && !ctrl->tls_pskid) {
dev_err(ctrl->device, "no PSK negotiated\n");
return -ENOKEY;
}
+
for (i = 1; i < ctrl->queue_count; i++) {
ret = nvme_tcp_alloc_queue(ctrl, i,
- key_serial(ctrl->tls_key));
+ ctrl->tls_pskid);
if (ret)
goto out_free_queues;
}
@@ -2144,6 +2166,11 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
if (remove)
nvme_unquiesce_admin_queue(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, remove);
+ if (ctrl->tls_pskid) {
+ dev_dbg(ctrl->device, "Wipe negotiated TLS_PSK %08x\n",
+ ctrl->tls_pskid);
+ ctrl->tls_pskid = 0;
+ }
}
static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 85006b2df8ae..954d4c074770 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1015,8 +1015,6 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_admin_cmd(req);
- if (unlikely(!nvmet_check_auth_status(req)))
- return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index 8bc3f431c77f..7897d02c681d 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -25,6 +25,18 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
unsigned char key_hash;
char *dhchap_secret;
+ if (!strlen(secret)) {
+ if (set_ctrl) {
+ kfree(host->dhchap_ctrl_secret);
+ host->dhchap_ctrl_secret = NULL;
+ host->dhchap_ctrl_key_hash = 0;
+ } else {
+ kfree(host->dhchap_secret);
+ host->dhchap_secret = NULL;
+ host->dhchap_key_hash = 0;
+ }
+ return 0;
+ }
if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1)
return -EINVAL;
if (key_hash > 3) {
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 1eff8ca6a5f1..1b6264fa5803 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -578,8 +578,8 @@ static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi,
if (control & NVME_RW_PRINFO_PRCHK_REF)
domain->sig.dif.ref_remap = true;
- domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
- domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
+ domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat);
+ domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm);
domain->sig.dif.app_escape = true;
if (pi_type == NVME_NS_DPS_PI_TYPE3)
domain->sig.dif.ref_escape = true;
diff --git a/drivers/nvmem/layouts.c b/drivers/nvmem/layouts.c
index 77a4119efea8..65d39e19f6ec 100644
--- a/drivers/nvmem/layouts.c
+++ b/drivers/nvmem/layouts.c
@@ -123,7 +123,7 @@ static int nvmem_layout_bus_populate(struct nvmem_device *nvmem,
int ret;
/* Make sure it has a compatible property */
- if (!of_get_property(layout_dn, "compatible", NULL)) {
+ if (!of_property_present(layout_dn, "compatible")) {
pr_debug("%s() - skipping %pOF, no compatible prop\n",
__func__, layout_dn);
return 0;
diff --git a/drivers/of/.kunitconfig b/drivers/of/.kunitconfig
index 5a8fee11978c..4c53d2c7a275 100644
--- a/drivers/of/.kunitconfig
+++ b/drivers/of/.kunitconfig
@@ -1,3 +1,4 @@
CONFIG_KUNIT=y
CONFIG_OF=y
CONFIG_OF_KUNIT_TEST=y
+CONFIG_OF_OVERLAY_KUNIT_TEST=y
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index dd726c7056bf..0e2d608c3e20 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -107,6 +107,16 @@ config OF_OVERLAY
While this option is selected automatically when needed, you can
enable it manually to improve device tree unit test coverage.
+config OF_OVERLAY_KUNIT_TEST
+ tristate "Device Tree overlay KUnit tests" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ select OF_OVERLAY
+ help
+ This option builds KUnit unit tests for the device tree overlay code.
+
+ If unsure, say N here, but this option is safe to enable.
+
config OF_NUMA
bool
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 251d33532148..379a0afcbdc0 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -19,6 +19,9 @@ obj-y += kexec.o
endif
endif
+obj-$(CONFIG_KUNIT) += of_kunit_helpers.o
obj-$(CONFIG_OF_KUNIT_TEST) += of_test.o
+obj-$(CONFIG_OF_OVERLAY_KUNIT_TEST) += overlay-test.o
+overlay-test-y := overlay_test.o kunit_overlay_test.dtbo.o
obj-$(CONFIG_OF_UNITTEST) += unittest-data/
diff --git a/drivers/of/address.c b/drivers/of/address.c
index d669ce25b5f9..286f0c161e33 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -8,6 +8,7 @@
#include <linux/logic_pio.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/overflow.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/sizes.h>
@@ -197,6 +198,23 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
#endif /* CONFIG_PCI */
+static int __of_address_resource_bounds(struct resource *r, u64 start, u64 size)
+{
+ u64 end = start;
+
+ if (overflows_type(start, r->start))
+ return -EOVERFLOW;
+ if (size && check_add_overflow(end, size - 1, &end))
+ return -EOVERFLOW;
+ if (overflows_type(end, r->end))
+ return -EOVERFLOW;
+
+ r->start = start;
+ r->end = end;
+
+ return 0;
+}
+
/*
* of_pci_range_to_resource - Create a resource from an of_pci_range
* @range: the PCI range that describes the resource
@@ -215,6 +233,7 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
int of_pci_range_to_resource(struct of_pci_range *range,
struct device_node *np, struct resource *res)
{
+ u64 start;
int err;
res->flags = range->flags;
res->parent = res->child = res->sibling = NULL;
@@ -231,18 +250,11 @@ int of_pci_range_to_resource(struct of_pci_range *range,
err = -EINVAL;
goto invalid_range;
}
- res->start = port;
+ start = port;
} else {
- if ((sizeof(resource_size_t) < 8) &&
- upper_32_bits(range->cpu_addr)) {
- err = -EINVAL;
- goto invalid_range;
- }
-
- res->start = range->cpu_addr;
+ start = range->cpu_addr;
}
- res->end = res->start + range->size - 1;
- return 0;
+ return __of_address_resource_bounds(res, start, range->size);
invalid_range:
res->start = (resource_size_t)OF_BAD_ADDR;
@@ -258,8 +270,8 @@ EXPORT_SYMBOL(of_pci_range_to_resource);
* @res: pointer to a valid resource that will be updated to
* reflect the values contained in the range.
*
- * Returns ENOENT if the entry is not found or EINVAL if the range cannot be
- * converted to resource.
+ * Returns -ENOENT if the entry is not found or -EOVERFLOW if the range
+ * cannot be converted to resource.
*/
int of_range_to_resource(struct device_node *np, int index, struct resource *res)
{
@@ -1061,12 +1073,10 @@ static int __of_address_to_resource(struct device_node *dev, int index, int bar_
if (of_mmio_is_nonposted(dev))
flags |= IORESOURCE_MEM_NONPOSTED;
- r->start = taddr;
- r->end = taddr + size - 1;
r->flags = flags;
r->name = name ? name : dev->full_name;
- return 0;
+ return __of_address_resource_bounds(r, taddr, size);
}
/**
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 68103ad230ee..4d528c10df3a 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -34,7 +34,7 @@
/*
* __dtb_empty_root_begin[] and __dtb_empty_root_end[] magically created by
- * cmd_dt_S_dtb in scripts/Makefile.lib
+ * cmd_wrap_S_dtb in scripts/Makefile.dtbs
*/
extern uint8_t __dtb_empty_root_begin[];
extern uint8_t __dtb_empty_root_end[];
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 8fd63100ba8f..a494f56a0d0e 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -357,8 +357,8 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
addr = of_get_property(device, "reg", &addr_len);
/* Prevent out-of-bounds read in case of longer interrupt parent address size */
- if (addr_len > (3 * sizeof(__be32)))
- addr_len = 3 * sizeof(__be32);
+ if (addr_len > sizeof(addr_buf))
+ addr_len = sizeof(addr_buf);
if (addr)
memcpy(addr_buf, addr, addr_len);
@@ -429,9 +429,8 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
of_property_read_string_index(dev, "interrupt-names", index,
&name);
- r->start = r->end = irq;
- r->flags = IORESOURCE_IRQ | irqd_get_trigger_type(irq_get_irq_data(irq));
- r->name = name ? name : of_node_full_name(dev);
+ *r = DEFINE_RES_IRQ_NAMED(irq, name ?: of_node_full_name(dev));
+ r->flags |= irq_get_trigger_type(irq);
}
return irq;
@@ -716,8 +715,7 @@ struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id,
* @np: device node for @dev
* @token: bus type for this domain
*
- * Parse the msi-parent property (both the simple and the complex
- * versions), and returns the corresponding MSI domain.
+ * Parse the msi-parent property and returns the corresponding MSI domain.
*
* Returns: the MSI domain for this device (or NULL on failure).
*/
@@ -725,33 +723,14 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np,
enum irq_domain_bus_token token)
{
- struct device_node *msi_np;
+ struct of_phandle_iterator it;
struct irq_domain *d;
+ int err;
- /* Check for a single msi-parent property */
- msi_np = of_parse_phandle(np, "msi-parent", 0);
- if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) {
- d = irq_find_matching_host(msi_np, token);
- if (!d)
- of_node_put(msi_np);
- return d;
- }
-
- if (token == DOMAIN_BUS_PLATFORM_MSI) {
- /* Check for the complex msi-parent version */
- struct of_phandle_args args;
- int index = 0;
-
- while (!of_parse_phandle_with_args(np, "msi-parent",
- "#msi-cells",
- index, &args)) {
- d = irq_find_matching_host(args.np, token);
- if (d)
- return d;
-
- of_node_put(args.np);
- index++;
- }
+ of_for_each_phandle(&it, err, np, "msi-parent", "#msi-cells", 0) {
+ d = irq_find_matching_host(it.node, token);
+ if (d)
+ return d;
}
return NULL;
diff --git a/drivers/of/kunit_overlay_test.dtso b/drivers/of/kunit_overlay_test.dtso
new file mode 100644
index 000000000000..85f20b4b4c16
--- /dev/null
+++ b/drivers/of/kunit_overlay_test.dtso
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ kunit-test {
+ compatible = "test,empty";
+ };
+};
diff --git a/drivers/of/of_kunit_helpers.c b/drivers/of/of_kunit_helpers.c
new file mode 100644
index 000000000000..287d6c91bb37
--- /dev/null
+++ b/drivers/of/of_kunit_helpers.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test managed DeviceTree APIs
+ */
+
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+
+#include <kunit/of.h>
+#include <kunit/test.h>
+#include <kunit/resource.h>
+
+#if defined(CONFIG_OF_OVERLAY) && defined(CONFIG_OF_EARLY_FLATTREE)
+
+static void of_overlay_fdt_apply_kunit_exit(void *ovcs_id)
+{
+ of_overlay_remove(ovcs_id);
+}
+
+/**
+ * of_overlay_fdt_apply_kunit() - Test managed of_overlay_fdt_apply()
+ * @test: test context
+ * @overlay_fdt: device tree overlay to apply
+ * @overlay_fdt_size: size in bytes of @overlay_fdt
+ * @ovcs_id: identifier of overlay, used to remove the overlay
+ *
+ * Just like of_overlay_fdt_apply(), except the overlay is managed by the test
+ * case and is automatically removed with of_overlay_remove() after the test
+ * case concludes.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt,
+ u32 overlay_fdt_size, int *ovcs_id)
+{
+ int ret;
+ int *copy_id;
+
+ copy_id = kunit_kmalloc(test, sizeof(*copy_id), GFP_KERNEL);
+ if (!copy_id)
+ return -ENOMEM;
+
+ ret = of_overlay_fdt_apply(overlay_fdt, overlay_fdt_size,
+ ovcs_id, NULL);
+ if (ret)
+ return ret;
+
+ *copy_id = *ovcs_id;
+
+ return kunit_add_action_or_reset(test, of_overlay_fdt_apply_kunit_exit,
+ copy_id);
+}
+EXPORT_SYMBOL_GPL(of_overlay_fdt_apply_kunit);
+
+#endif
+
+KUNIT_DEFINE_ACTION_WRAPPER(of_node_put_wrapper, of_node_put, struct device_node *);
+
+/**
+ * of_node_put_kunit() - Test managed of_node_put()
+ * @test: test context
+ * @node: node to pass to `of_node_put()`
+ *
+ * Just like of_node_put(), except the node is managed by the test case and is
+ * automatically put with of_node_put() after the test case concludes.
+ */
+void of_node_put_kunit(struct kunit *test, struct device_node *node)
+{
+ if (kunit_add_action(test, of_node_put_wrapper, node)) {
+ KUNIT_FAIL(test,
+ "Can't allocate a kunit resource to put of_node\n");
+ }
+}
+EXPORT_SYMBOL_GPL(of_node_put_kunit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Test managed DeviceTree APIs");
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 5949829a1b00..2ec20886d176 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -10,6 +10,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/nodemask.h>
+#include <linux/numa_memblks.h>
#include <asm/numa.h>
@@ -44,7 +45,7 @@ static int __init of_numa_parse_memory_nodes(void)
struct device_node *np = NULL;
struct resource rsrc;
u32 nid;
- int i, r;
+ int i, r = -EINVAL;
for_each_node_by_type(np, "memory") {
r = of_property_read_u32(np, "numa-node-id", &nid);
@@ -71,7 +72,7 @@ static int __init of_numa_parse_memory_nodes(void)
}
}
- return 0;
+ return r;
}
static int __init of_numa_parse_distance_map_v1(struct device_node *map)
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 4d861a75d694..cbdecccca097 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -472,7 +472,6 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
static int build_changeset_next_level(struct overlay_changeset *ovcs,
struct target *target, const struct device_node *overlay_node)
{
- struct device_node *child;
struct property *prop;
int ret;
@@ -485,12 +484,11 @@ static int build_changeset_next_level(struct overlay_changeset *ovcs,
}
}
- for_each_child_of_node(overlay_node, child) {
+ for_each_child_of_node_scoped(overlay_node, child) {
ret = add_changeset_node(ovcs, target, child);
if (ret) {
pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n",
target->np, child, ret);
- of_node_put(child);
return ret;
}
}
@@ -1078,16 +1076,12 @@ EXPORT_SYMBOL_GPL(of_overlay_fdt_apply);
*/
static int find_node(struct device_node *tree, struct device_node *np)
{
- struct device_node *child;
-
if (tree == np)
return 1;
- for_each_child_of_node(tree, child) {
- if (find_node(child, np)) {
- of_node_put(child);
+ for_each_child_of_node_scoped(tree, child) {
+ if (find_node(child, np))
return 1;
- }
}
return 0;
diff --git a/drivers/of/overlay_test.c b/drivers/of/overlay_test.c
new file mode 100644
index 000000000000..19a292cdeee3
--- /dev/null
+++ b/drivers/of/overlay_test.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit tests for device tree overlays
+ */
+#include <linux/device/bus.h>
+#include <linux/kconfig.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <kunit/of.h>
+#include <kunit/test.h>
+
+static const char * const kunit_node_name = "kunit-test";
+static const char * const kunit_compatible = "test,empty";
+
+/* Test that of_overlay_apply_kunit() adds a node to the live tree */
+static void of_overlay_apply_kunit_apply(struct kunit *test)
+{
+ struct device_node *np;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ of_overlay_apply_kunit(test, kunit_overlay_test));
+
+ np = of_find_node_by_name(NULL, kunit_node_name);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, np);
+ of_node_put(np);
+}
+
+/*
+ * Test that of_overlay_apply_kunit() creates platform devices with the
+ * expected device_node
+ */
+static void of_overlay_apply_kunit_platform_device(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ of_overlay_apply_kunit(test, kunit_overlay_test));
+
+ np = of_find_node_by_name(NULL, kunit_node_name);
+ of_node_put_kunit(test, np);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, np);
+
+ pdev = of_find_device_by_node(np);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, pdev);
+ if (pdev)
+ put_device(&pdev->dev);
+}
+
+static int of_overlay_bus_match_compatible(struct device *dev, const void *data)
+{
+ return of_device_is_compatible(dev->of_node, data);
+}
+
+/* Test that of_overlay_apply_kunit() cleans up after the test is finished */
+static void of_overlay_apply_kunit_cleanup(struct kunit *test)
+{
+ struct kunit fake;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct device_node *np;
+
+ if (!IS_ENABLED(CONFIG_OF_EARLY_FLATTREE))
+ kunit_skip(test, "requires CONFIG_OF_EARLY_FLATTREE for root node");
+
+ kunit_init_test(&fake, "fake test", NULL);
+ KUNIT_ASSERT_EQ(test, fake.status, KUNIT_SUCCESS);
+
+ KUNIT_ASSERT_EQ(test, 0,
+ of_overlay_apply_kunit(&fake, kunit_overlay_test));
+
+ np = of_find_node_by_name(NULL, kunit_node_name);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, np);
+ of_node_put_kunit(test, np);
+
+ pdev = of_find_device_by_node(np);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ put_device(&pdev->dev); /* Not derefing 'pdev' after this */
+
+ /* Remove overlay */
+ kunit_cleanup(&fake);
+
+ /* The node and device should be removed */
+ np = of_find_node_by_name(NULL, kunit_node_name);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, np);
+ of_node_put(np);
+
+ dev = bus_find_device(&platform_bus_type, NULL, kunit_compatible,
+ of_overlay_bus_match_compatible);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, dev);
+ put_device(dev);
+}
+
+static struct kunit_case of_overlay_apply_kunit_test_cases[] = {
+ KUNIT_CASE(of_overlay_apply_kunit_apply),
+ KUNIT_CASE(of_overlay_apply_kunit_platform_device),
+ KUNIT_CASE(of_overlay_apply_kunit_cleanup),
+ {}
+};
+
+/*
+ * Test suite for test managed device tree overlays.
+ */
+static struct kunit_suite of_overlay_apply_kunit_suite = {
+ .name = "of_overlay_apply_kunit",
+ .test_cases = of_overlay_apply_kunit_test_cases,
+};
+
+kunit_test_suites(
+ &of_overlay_apply_kunit_suite,
+);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit tests for device tree overlays");
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index ef622d41eb5b..9bafcff3e628 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -338,7 +338,6 @@ static int of_platform_bus_create(struct device_node *bus,
struct device *parent, bool strict)
{
const struct of_dev_auxdata *auxdata;
- struct device_node *child;
struct platform_device *dev;
const char *bus_id = NULL;
void *platform_data = NULL;
@@ -382,13 +381,11 @@ static int of_platform_bus_create(struct device_node *bus,
if (!dev || !of_match_node(matches, bus))
return 0;
- for_each_child_of_node(bus, child) {
+ for_each_child_of_node_scoped(bus, child) {
pr_debug(" create child: %pOF\n", child);
rc = of_platform_bus_create(child, matches, lookup, &dev->dev, strict);
- if (rc) {
- of_node_put(child);
+ if (rc)
break;
- }
}
of_node_set_flag(bus, OF_POPULATED_BUS);
return rc;
@@ -459,7 +456,6 @@ int of_platform_populate(struct device_node *root,
const struct of_dev_auxdata *lookup,
struct device *parent)
{
- struct device_node *child;
int rc = 0;
root = root ? of_node_get(root) : of_find_node_by_path("/");
@@ -470,12 +466,10 @@ int of_platform_populate(struct device_node *root,
pr_debug(" starting at: %pOF\n", root);
device_links_supplier_sync_state_pause();
- for_each_child_of_node(root, child) {
+ for_each_child_of_node_scoped(root, child) {
rc = of_platform_bus_create(child, matches, lookup, parent, true);
- if (rc) {
- of_node_put(child);
+ if (rc)
break;
- }
}
device_links_supplier_sync_state_resume();
@@ -732,11 +726,14 @@ static int of_platform_notify(struct notifier_block *nb,
struct of_reconfig_data *rd = arg;
struct platform_device *pdev_parent, *pdev;
bool children_left;
+ struct device_node *parent;
switch (of_reconfig_get_state_change(action, rd)) {
case OF_RECONFIG_CHANGE_ADD:
- /* verify that the parent is a bus */
- if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS))
+ parent = rd->dn->parent;
+ /* verify that the parent is a bus (or the root node) */
+ if (!of_node_is_root(parent) &&
+ !of_node_check_flag(parent, OF_POPULATED_BUS))
return NOTIFY_OK; /* not for us */
/* already populated? (driver using of_populate manually) */
@@ -749,7 +746,7 @@ static int of_platform_notify(struct notifier_block *nb,
*/
rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
/* pdev_parent may be NULL when no bus platform device */
- pdev_parent = of_find_device_by_node(rd->dn->parent);
+ pdev_parent = of_find_device_by_node(parent);
pdev = of_platform_device_create(rd->dn, NULL,
pdev_parent ? &pdev_parent->dev : NULL);
platform_device_put(pdev_parent);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 164d77cb9445..11b922fde7af 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -452,12 +452,17 @@ EXPORT_SYMBOL_GPL(of_property_read_string);
/**
* of_property_match_string() - Find string in a list and return index
- * @np: pointer to node containing string list property
+ * @np: pointer to the node containing the string list property
* @propname: string list property name
- * @string: pointer to string to search for in string list
+ * @string: pointer to the string to search for in the string list
*
- * This function searches a string list property and returns the index
- * of a specific string value.
+ * Search for an exact match of string in a device node property which is a
+ * string of lists.
+ *
+ * Return: the index of the first occurrence of the string on success, -EINVAL
+ * if the property does not exist, -ENODATA if the property does not have a
+ * value, and -EILSEQ if the string is not null-terminated within the length of
+ * the property data.
*/
int of_property_match_string(const struct device_node *np, const char *propname,
const char *string)
@@ -773,16 +778,11 @@ EXPORT_SYMBOL(of_graph_get_port_parent);
struct device_node *of_graph_get_remote_port_parent(
const struct device_node *node)
{
- struct device_node *np, *pp;
-
/* Get remote endpoint node. */
- np = of_graph_get_remote_endpoint(node);
-
- pp = of_graph_get_port_parent(np);
+ struct device_node *np __free(device_node) =
+ of_graph_get_remote_endpoint(node);
- of_node_put(np);
-
- return pp;
+ return of_graph_get_port_parent(np);
}
EXPORT_SYMBOL(of_graph_get_remote_port_parent);
@@ -1064,19 +1064,15 @@ static void of_link_to_phandle(struct device_node *con_np,
struct device_node *sup_np,
u8 flags)
{
- struct device_node *tmp_np = of_node_get(sup_np);
+ struct device_node *tmp_np __free(device_node) = of_node_get(sup_np);
/* Check that sup_np and its ancestors are available. */
while (tmp_np) {
- if (of_fwnode_handle(tmp_np)->dev) {
- of_node_put(tmp_np);
+ if (of_fwnode_handle(tmp_np)->dev)
break;
- }
- if (!of_device_is_available(tmp_np)) {
- of_node_put(tmp_np);
+ if (!of_device_is_available(tmp_np))
return;
- }
tmp_np = of_get_next_parent(tmp_np);
}
@@ -1440,16 +1436,13 @@ static int of_link_property(struct device_node *con_np, const char *prop_name)
}
while ((phandle = s->parse_prop(con_np, prop_name, i))) {
- struct device_node *con_dev_np;
+ struct device_node *con_dev_np __free(device_node) =
+ s->get_con_dev ? s->get_con_dev(con_np) : of_node_get(con_np);
- con_dev_np = s->get_con_dev
- ? s->get_con_dev(con_np)
- : of_node_get(con_np);
matched = true;
i++;
of_link_to_phandle(con_dev_np, phandle, s->fwlink_flags);
of_node_put(phandle);
- of_node_put(con_dev_np);
}
s++;
}
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 2780928764a4..5cf96776dd7d 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -150,7 +150,7 @@ static int node_name_cmp(const struct device_node *dn1,
static int adjust_local_phandle_references(struct device_node *local_fixups,
struct device_node *overlay, int phandle_delta)
{
- struct device_node *child, *overlay_child;
+ struct device_node *overlay_child;
struct property *prop_fix, *prop;
int err, i, count;
unsigned int off;
@@ -194,7 +194,7 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
* The roots of the subtrees are the overlay's __local_fixups__ node
* and the overlay's root node.
*/
- for_each_child_of_node(local_fixups, child) {
+ for_each_child_of_node_scoped(local_fixups, child) {
for_each_child_of_node(overlay, overlay_child)
if (!node_name_cmp(child, overlay_child)) {
@@ -202,17 +202,13 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
break;
}
- if (!overlay_child) {
- of_node_put(child);
+ if (!overlay_child)
return -EINVAL;
- }
err = adjust_local_phandle_references(child, overlay_child,
phandle_delta);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index c830f346df45..daf9a2dddd7e 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -900,8 +900,8 @@ static void __init of_unittest_changeset(void)
unittest(!of_find_node_by_path("/testcase-data/changeset/n2/n21"),
"'%pOF' still present after revert\n", n21);
- ppremove = of_find_property(parent, "prop-remove", NULL);
- unittest(ppremove, "failed to find removed prop after revert\n");
+ unittest(of_property_present(parent, "prop-remove"),
+ "failed to find removed prop after revert\n");
ret = of_property_read_string(parent, "prop-update", &propstr);
unittest(!ret, "failed to find updated prop after revert\n");
@@ -1861,7 +1861,7 @@ static int __init unittest_data_add(void)
struct device_node *unittest_data_node = NULL, *np;
/*
* __dtbo_testcases_begin[] and __dtbo_testcases_end[] are magically
- * created by cmd_dt_S_dtbo in scripts/Makefile.lib
+ * created by cmd_wrap_S_dtbo in scripts/Makefile.dtbs
*/
extern uint8_t __dtbo_testcases_begin[];
extern uint8_t __dtbo_testcases_end[];
@@ -3525,7 +3525,7 @@ out_skip_tests:
/*
* __dtbo_##overlay_name##_begin[] and __dtbo_##overlay_name##_end[] are
- * created by cmd_dt_S_dtbo in scripts/Makefile.lib
+ * created by cmd_wrap_S_dtbo in scripts/Makefile.dtbs
*/
#define OVERLAY_INFO_EXTERN(overlay_name) \
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index ec0056a4bb13..5f0fb3ea385b 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -405,7 +405,7 @@ static struct platform_driver ti_opp_supply_driver = {
.probe = ti_opp_supply_probe,
.driver = {
.name = "ti_opp_supply",
- .of_match_table = of_match_ptr(ti_opp_supply_of_match),
+ .of_match_table = ti_opp_supply_of_match,
},
};
module_platform_driver(ti_opp_supply_driver);
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 633266447e2f..16f4496bca95 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -483,7 +483,7 @@ static struct attribute *paths_subsys_attrs[] = {
ATTRIBUTE_GROUPS(paths_subsys);
/* Specific kobject type for our PDC paths */
-static struct kobj_type ktype_pdcspath = {
+static const struct kobj_type ktype_pdcspath = {
.sysfs_ops = &pdcspath_attr_ops,
.default_groups = paths_subsys_groups,
};
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index aa4d1833f442..0d94e4a967d8 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -143,6 +143,15 @@ config PCI_IOV
If unsure, say N.
+config PCI_NPEM
+ bool "Native PCIe Enclosure Management"
+ depends on LEDS_CLASS=y
+ help
+ Support for Native PCIe Enclosure Management. It allows managing LED
+ indications in storage enclosures. Enclosure must support following
+ indications: OK, Locate, Fail, Rebuild, other indications are
+ optional.
+
config PCI_PRI
bool "PCI PRI support"
select PCI_ATS
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 8ddad57934a6..374c5c06d92f 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
obj-$(CONFIG_VGA_ARB) += vgaarb.o
obj-$(CONFIG_PCI_DOE) += doe.o
obj-$(CONFIG_PCI_DYNAMIC_OF_NODES) += of_property.o
+obj-$(CONFIG_PCI_NPEM) += npem.o
# Endpoint library must be initialized before its users
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index c570892b2090..6afff1f1b143 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -48,6 +48,39 @@ bool pci_ats_supported(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_ats_supported);
/**
+ * pci_prepare_ats - Setup the PS for ATS
+ * @dev: the PCI device
+ * @ps: the IOMMU page shift
+ *
+ * This must be done by the IOMMU driver on the PF before any VFs are created to
+ * ensure that the VF can have ATS enabled.
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_prepare_ats(struct pci_dev *dev, int ps)
+{
+ u16 ctrl;
+
+ if (!pci_ats_supported(dev))
+ return -EINVAL;
+
+ if (WARN_ON(dev->ats_enabled))
+ return -EBUSY;
+
+ if (ps < PCI_ATS_MIN_STU)
+ return -EINVAL;
+
+ if (dev->is_virtfn)
+ return 0;
+
+ dev->ats_stu = ps;
+ ctrl = PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
+ pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_prepare_ats);
+
+/**
* pci_enable_ats - enable the ATS capability
* @dev: the PCI device
* @ps: the IOMMU page shift
@@ -455,8 +488,8 @@ void pci_restore_pasid_state(struct pci_dev *pdev)
* pci_pasid_features - Check which PASID features are supported
* @pdev: PCI device structure
*
- * Returns a negative value when no PASI capability is present.
- * Otherwise is returns a bitmask with supported features. Current
+ * Return a negative value when no PASID capability is present.
+ * Otherwise return a bitmask with supported features. Current
* features reported are:
* PCI_PASID_CAP_EXEC - Execute permission supported
* PCI_PASID_CAP_PRIV - Privileged mode supported
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 4d2c188f5835..9800b7681054 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -196,7 +196,7 @@ config PCIE_MEDIATEK
config PCIE_MEDIATEK_GEN3
tristate "MediaTek Gen3 PCIe controller"
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on PCI_MSI
help
Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 1d5a70c9055e..8a0044bb3989 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -38,7 +38,7 @@ config PCIE_CADENCE_PLAT_EP
select PCIE_CADENCE_EP
select PCIE_CADENCE_PLAT
help
- Say Y here if you want to support the Cadence PCIe platform controller in
+ Say Y here if you want to support the Cadence PCIe platform controller in
endpoint mode. This PCIe controller may be embedded into many
different vendors SoCs.
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 85718246016b..284f2e0e4d26 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -7,6 +7,8 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/container_of.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
@@ -22,6 +24,8 @@
#include "../../pci.h"
#include "pcie-cadence.h"
+#define cdns_pcie_to_rc(p) container_of(p, struct cdns_pcie_rc, pcie)
+
#define ENABLE_REG_SYS_2 0x108
#define STATUS_REG_SYS_2 0x508
#define STATUS_CLR_REG_SYS_2 0x708
@@ -44,6 +48,7 @@ enum link_status {
#define J721E_MODE_RC BIT(7)
#define LANE_COUNT(n) ((n) << 8)
+#define ACSPCIE_PAD_DISABLE_MASK GENMASK(1, 0)
#define GENERATION_SEL_MASK GENMASK(1, 0)
struct j721e_pcie {
@@ -52,6 +57,7 @@ struct j721e_pcie {
u32 mode;
u32 num_lanes;
u32 max_lanes;
+ struct gpio_desc *reset_gpio;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
u32 linkdown_irq_regfield;
@@ -220,6 +226,36 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
return ret;
}
+static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
+ struct regmap *syscon)
+{
+ struct device *dev = pcie->cdns_pcie->dev;
+ struct device_node *node = dev->of_node;
+ u32 mask = ACSPCIE_PAD_DISABLE_MASK;
+ struct of_phandle_args args;
+ u32 val;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(node,
+ "ti,syscon-acspcie-proxy-ctrl",
+ 1, 0, &args);
+ if (ret) {
+ dev_err(dev,
+ "ti,syscon-acspcie-proxy-ctrl has invalid arguments\n");
+ return ret;
+ }
+
+ /* Clear PAD IO disable bits to enable refclk output */
+ val = ~(args.args[0]);
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to enable ACSPCIE refclk: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
{
struct device *dev = pcie->cdns_pcie->dev;
@@ -259,7 +295,13 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
return ret;
}
- return 0;
+ /* Enable ACSPCIE refclk output if the optional property exists */
+ syscon = syscon_regmap_lookup_by_phandle_optional(node,
+ "ti,syscon-acspcie-proxy-ctrl");
+ if (!syscon)
+ return 0;
+
+ return j721e_enable_acspcie_refclk(pcie, syscon);
}
static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -482,20 +524,20 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
goto err_get_sync;
}
ret = j721e_pcie_ctrl_init(pcie);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
goto err_get_sync;
}
ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0,
"j721e-pcie-link-down-irq", pcie);
if (ret < 0) {
- dev_err(dev, "failed to request link state IRQ %d\n", irq);
+ dev_err_probe(dev, ret, "failed to request link state IRQ %d\n", irq);
goto err_get_sync;
}
@@ -505,42 +547,40 @@ static int j721e_pcie_probe(struct platform_device *pdev)
case PCI_MODE_RC:
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
- ret = PTR_ERR(gpiod);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get reset GPIO\n");
+ ret = dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get reset GPIO\n");
goto err_get_sync;
}
+ pcie->reset_gpio = gpiod;
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
clk = devm_clk_get_optional(dev, "pcie_refclk");
if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(dev, "failed to get pcie_refclk\n");
+ ret = dev_err_probe(dev, PTR_ERR(clk), "failed to get pcie_refclk\n");
goto err_pcie_setup;
}
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(dev, "failed to enable pcie_refclk\n");
+ dev_err_probe(dev, ret, "failed to enable pcie_refclk\n");
goto err_pcie_setup;
}
pcie->refclk = clk;
/*
- * "Power Sequencing and Reset Signal Timings" table in
- * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
- * indicates PERST# should be deasserted after minimum of 100us
- * once REFCLK is stable. The REFCLK to the connector in RC
- * mode is selected while enabling the PHY. So deassert PERST#
- * after 100 us.
+ * The "Power Sequencing and Reset Signal Timings" table of the
+ * PCI Express Card Electromechanical Specification, Revision
+ * 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST#
+ * should be deasserted after minimum of 100us once REFCLK is
+ * stable. The REFCLK to the connector in RC mode is selected
+ * while enabling the PHY. So deassert PERST# after 100 us.
*/
if (gpiod) {
- usleep_range(100, 200);
+ fsleep(PCIE_T_PERST_CLK_US);
gpiod_set_value_cansleep(gpiod, 1);
}
@@ -554,7 +594,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
case PCI_MODE_EP:
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
@@ -589,6 +629,87 @@ static void j721e_pcie_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
}
+static int j721e_pcie_suspend_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+
+ if (pcie->mode == PCI_MODE_RC) {
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+ clk_disable_unprepare(pcie->refclk);
+ }
+
+ cdns_pcie_disable_phy(pcie->cdns_pcie);
+
+ return 0;
+}
+
+static int j721e_pcie_resume_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+ struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
+ int ret;
+
+ ret = j721e_pcie_ctrl_init(pcie);
+ if (ret < 0)
+ return ret;
+
+ j721e_pcie_config_link_irq(pcie);
+
+ /*
+ * This is not called explicitly in the probe, it is called by
+ * cdns_pcie_init_phy().
+ */
+ ret = cdns_pcie_enable_phy(pcie->cdns_pcie);
+ if (ret < 0)
+ return ret;
+
+ if (pcie->mode == PCI_MODE_RC) {
+ struct cdns_pcie_rc *rc = cdns_pcie_to_rc(cdns_pcie);
+
+ ret = clk_prepare_enable(pcie->refclk);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The "Power Sequencing and Reset Signal Timings" table of the
+ * PCI Express Card Electromechanical Specification, Revision
+ * 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST#
+ * should be deasserted after minimum of 100us once REFCLK is
+ * stable. The REFCLK to the connector in RC mode is selected
+ * while enabling the PHY. So deassert PERST# after 100 us.
+ */
+ if (pcie->reset_gpio) {
+ fsleep(PCIE_T_PERST_CLK_US);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ }
+
+ ret = cdns_pcie_host_link_setup(rc);
+ if (ret < 0) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+
+ /*
+ * Reset internal status of BARs to force reinitialization in
+ * cdns_pcie_host_init().
+ */
+ for (enum cdns_pcie_rp_bar bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
+ rc->avail_ib_bar[bar] = true;
+
+ ret = cdns_pcie_host_init(rc);
+ if (ret) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(j721e_pcie_pm_ops,
+ j721e_pcie_suspend_noirq,
+ j721e_pcie_resume_noirq);
+
static struct platform_driver j721e_pcie_driver = {
.probe = j721e_pcie_probe,
.remove_new = j721e_pcie_remove,
@@ -596,6 +717,7 @@ static struct platform_driver j721e_pcie_driver = {
.name = "j721e-pcie",
.of_match_table = of_j721e_pcie_match,
.suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&j721e_pcie_pm_ops),
},
};
builtin_platform_driver(j721e_pcie_driver);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 5b14f7ee3c79..8af95e9da7ce 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -485,8 +485,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
return cdns_pcie_host_map_dma_ranges(rc);
}
-static int cdns_pcie_host_init(struct device *dev,
- struct cdns_pcie_rc *rc)
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
{
int err;
@@ -497,6 +496,30 @@ static int cdns_pcie_host_init(struct device *dev,
return cdns_pcie_host_init_address_translation(rc);
}
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = rc->pcie.dev;
+ int ret;
+
+ if (rc->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+
+ cdns_pcie_host_enable_ptm_response(pcie);
+
+ ret = cdns_pcie_start_link(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to start link\n");
+ return ret;
+ }
+
+ ret = cdns_pcie_host_start_link(rc);
+ if (ret)
+ dev_dbg(dev, "PCIe link never came up\n");
+
+ return 0;
+}
+
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
struct device *dev = rc->pcie.dev;
@@ -533,25 +556,14 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return PTR_ERR(rc->cfg_base);
rc->cfg_res = res;
- if (rc->quirk_detect_quiet_flag)
- cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
-
- cdns_pcie_host_enable_ptm_response(pcie);
-
- ret = cdns_pcie_start_link(pcie);
- if (ret) {
- dev_err(dev, "Failed to start link\n");
- return ret;
- }
-
- ret = cdns_pcie_host_start_link(rc);
+ ret = cdns_pcie_host_link_setup(rc);
if (ret)
- dev_dbg(dev, "PCIe link never came up\n");
+ return ret;
for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
rc->avail_ib_bar[bar] = true;
- ret = cdns_pcie_host_init(dev, rc);
+ ret = cdns_pcie_host_init(rc);
if (ret)
return ret;
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 7a66a2f815dc..f5eeff834ec1 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -314,7 +314,6 @@ struct cdns_pcie {
/**
* struct cdns_pcie_rc - private data for this PCIe Root Complex driver
* @pcie: Cadence PCIe controller
- * @dev: pointer to PCIe device
* @cfg_res: start/end offsets in the physical system memory to map PCI
* configuration space accesses
* @cfg_base: IO mapped window to access the PCI configuration space of a
@@ -521,10 +520,22 @@ static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
}
#ifdef CONFIG_PCIE_CADENCE_HOST
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
+static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
+static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
return 0;
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 4c38181acffa..b6d6778b0698 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -265,12 +265,16 @@ config PCIE_DW_PLAT_EP
order to enable device-specific features PCI_DW_PLAT_EP must be
selected.
+config PCIE_QCOM_COMMON
+ bool
+
config PCIE_QCOM
bool "Qualcomm PCIe controller (host mode)"
depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_MSI
select PCIE_DW_HOST
select CRC8
+ select PCIE_QCOM_COMMON
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
PCIe controller uses the DesignWare core plus Qualcomm-specific
@@ -281,6 +285,7 @@ config PCIE_QCOM_EP
depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_ENDPOINT
select PCIE_DW_EP
+ select PCIE_QCOM_COMMON
help
Say Y here to enable support for the PCIe controllers on Qualcomm SoCs
to work in endpoint mode. The PCIe controller uses the DesignWare core
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index ec215b3d6191..a8308d9ea986 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
+obj-$(CONFIG_PCIE_QCOM_COMMON) += pcie-qcom-common.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 4fe3b0cb72ec..5c62e1a3ba52 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -850,14 +850,21 @@ static int dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx->mode = mode;
ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler,
- IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+ IRQF_SHARED | IRQF_ONESHOT,
+ "dra7xx-pcie-main", dra7xx);
if (ret) {
dev_err(dev, "failed to request irq\n");
- goto err_gpio;
+ goto err_deinit;
}
return 0;
+err_deinit:
+ if (dra7xx->mode == DW_PCIE_RC_TYPE)
+ dw_pcie_host_deinit(&dra7xx->pci->pp);
+ else
+ dw_pcie_ep_deinit(&dra7xx->pci->ep);
+
err_gpio:
err_get_sync:
pm_runtime_put(dev);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 964d67756eb2..808d1f105417 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
+#include <linux/phy/pcie.h>
#include <linux/phy/phy.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -54,9 +55,9 @@
#define IMX95_PE0_GEN_CTRL_3 0x1058
#define IMX95_PCIE_LTSSM_EN BIT(0)
-#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
+#define to_imx_pcie(x) dev_get_drvdata((x)->dev)
-enum imx6_pcie_variants {
+enum imx_pcie_variants {
IMX6Q,
IMX6SX,
IMX6QP,
@@ -64,6 +65,7 @@ enum imx6_pcie_variants {
IMX8MQ,
IMX8MM,
IMX8MP,
+ IMX8Q,
IMX95,
IMX8MQ_EP,
IMX8MM_EP,
@@ -71,25 +73,25 @@ enum imx6_pcie_variants {
IMX95_EP,
};
-#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
-#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
-#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
-#define IMX6_PCIE_FLAG_HAS_PHYDRV BIT(3)
-#define IMX6_PCIE_FLAG_HAS_APP_RESET BIT(4)
-#define IMX6_PCIE_FLAG_HAS_PHY_RESET BIT(5)
-#define IMX6_PCIE_FLAG_HAS_SERDES BIT(6)
-#define IMX6_PCIE_FLAG_SUPPORT_64BIT BIT(7)
+#define IMX_PCIE_FLAG_IMX_PHY BIT(0)
+#define IMX_PCIE_FLAG_IMX_SPEED_CHANGE BIT(1)
+#define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3)
+#define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4)
+#define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5)
+#define IMX_PCIE_FLAG_HAS_SERDES BIT(6)
+#define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7)
+#define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8)
-#define imx6_check_flag(pci, val) (pci->drvdata->flags & val)
+#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
-#define IMX6_PCIE_MAX_CLKS 6
+#define IMX_PCIE_MAX_CLKS 6
+#define IMX_PCIE_MAX_INSTANCES 2
-#define IMX6_PCIE_MAX_INSTANCES 2
+struct imx_pcie;
-struct imx6_pcie;
-
-struct imx6_pcie_drvdata {
- enum imx6_pcie_variants variant;
+struct imx_pcie_drvdata {
+ enum imx_pcie_variants variant;
enum dw_pcie_device_mode mode;
u32 flags;
int dbi_length;
@@ -98,17 +100,19 @@ struct imx6_pcie_drvdata {
const u32 clks_cnt;
const u32 ltssm_off;
const u32 ltssm_mask;
- const u32 mode_off[IMX6_PCIE_MAX_INSTANCES];
- const u32 mode_mask[IMX6_PCIE_MAX_INSTANCES];
+ const u32 mode_off[IMX_PCIE_MAX_INSTANCES];
+ const u32 mode_mask[IMX_PCIE_MAX_INSTANCES];
const struct pci_epc_features *epc_features;
- int (*init_phy)(struct imx6_pcie *pcie);
+ int (*init_phy)(struct imx_pcie *pcie);
+ int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable);
+ int (*core_reset)(struct imx_pcie *pcie, bool assert);
};
-struct imx6_pcie {
+struct imx_pcie {
struct dw_pcie *pci;
struct gpio_desc *reset_gpiod;
bool link_is_up;
- struct clk_bulk_data clks[IMX6_PCIE_MAX_CLKS];
+ struct clk_bulk_data clks[IMX_PCIE_MAX_CLKS];
struct regmap *iomuxc_gpr;
u16 msi_ctrl;
u32 controller_id;
@@ -129,7 +133,7 @@ struct imx6_pcie {
/* power domain for pcie phy */
struct device *pd_pcie_phy;
struct phy *phy;
- const struct imx6_pcie_drvdata *drvdata;
+ const struct imx_pcie_drvdata *drvdata;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -184,28 +188,28 @@ struct imx6_pcie {
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
-static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie)
{
- WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
- imx6_pcie->drvdata->variant != IMX8MQ_EP &&
- imx6_pcie->drvdata->variant != IMX8MM &&
- imx6_pcie->drvdata->variant != IMX8MM_EP &&
- imx6_pcie->drvdata->variant != IMX8MP &&
- imx6_pcie->drvdata->variant != IMX8MP_EP);
- return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+ WARN_ON(imx_pcie->drvdata->variant != IMX8MQ &&
+ imx_pcie->drvdata->variant != IMX8MQ_EP &&
+ imx_pcie->drvdata->variant != IMX8MM &&
+ imx_pcie->drvdata->variant != IMX8MM_EP &&
+ imx_pcie->drvdata->variant != IMX8MP &&
+ imx_pcie->drvdata->variant != IMX8MP_EP);
+ return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
-static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_SS_RW_REG_0,
IMX95_PCIE_PHY_CR_PARA_SEL,
IMX95_PCIE_PHY_CR_PARA_SEL);
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_PHY_GEN_CTRL,
IMX95_PCIE_REF_USE_PAD, 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_SS_RW_REG_0,
IMX95_PCIE_REF_CLKEN,
IMX95_PCIE_REF_CLKEN);
@@ -213,9 +217,9 @@ static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie)
return 0;
}
-static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_configure_type(struct imx_pcie *imx_pcie)
{
- const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
unsigned int mask, val, mode, id;
if (drvdata->mode == DW_PCIE_EP_TYPE)
@@ -223,7 +227,11 @@ static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
else
mode = PCI_EXP_TYPE_ROOT_PORT;
- id = imx6_pcie->controller_id;
+ id = imx_pcie->controller_id;
+
+ /* If mode_mask is 0, then generic PHY driver is used to set the mode */
+ if (!drvdata->mode_mask[0])
+ return;
/* If mode_mask[id] is zero, means each controller have its individual gpr */
if (!drvdata->mode_mask[id])
@@ -232,12 +240,12 @@ static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
mask = drvdata->mode_mask[id];
val = mode << (ffs(mask) - 1);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
}
-static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
+static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
bool val;
u32 max_iterations = 10;
u32 wait_counter = 0;
@@ -256,9 +264,9 @@ static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
return -ETIMEDOUT;
}
-static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
+static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 val;
int ret;
@@ -268,24 +276,24 @@ static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
val |= PCIE_PHY_CTRL_CAP_ADR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- return pcie_phy_poll_ack(imx6_pcie, false);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
-static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
+static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 phy_ctl;
int ret;
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
@@ -293,7 +301,7 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
phy_ctl = PCIE_PHY_CTRL_RD;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -302,18 +310,18 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
/* deassert Read signal */
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
- return pcie_phy_poll_ack(imx6_pcie, false);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
-static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
+static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 var;
int ret;
/* write addr */
/* cap addr */
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
@@ -324,7 +332,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
var |= PCIE_PHY_CTRL_CAP_DAT;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -333,7 +341,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, false);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
@@ -342,7 +350,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack */
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -351,7 +359,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, false);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
@@ -360,74 +368,74 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
return 0;
}
-static int imx8mq_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie)
{
/* TODO: Currently this code assumes external oscillator is being used */
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
IMX8MQ_GPR_PCIE_REF_USE_PAD,
IMX8MQ_GPR_PCIE_REF_USE_PAD);
/*
* Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is
* supplied by 3.3V, the VREG_BYPASS should be cleared to zero.
*/
- if (imx6_pcie->vph && regulator_get_voltage(imx6_pcie->vph) > 3000000)
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
+ if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000)
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
IMX8MQ_GPR_PCIE_VREG_BYPASS,
0);
return 0;
}
-static int imx7d_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx7d_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
return 0;
}
-static int imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
/* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
+ imx_pcie->tx_swing_low << 25);
return 0;
}
-static int imx6sx_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2);
- return imx6_pcie_init_phy(imx6_pcie);
+ return imx_pcie_init_phy(imx_pcie);
}
-static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
{
u32 val;
- struct device *dev = imx6_pcie->pci->dev;
+ struct device *dev = imx_pcie->pci->dev;
- if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr,
IOMUXC_GPR22, val,
val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
PHY_PLL_LOCK_WAIT_USLEEP_MAX,
@@ -435,19 +443,19 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
dev_err(dev, "PCIe PLL lock timeout\n");
}
-static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie)
{
unsigned long phy_rate = 0;
int mult, div;
u16 val;
int i;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return 0;
- for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
- if (strncmp(imx6_pcie->clks[i].id, "pcie_phy", 8) == 0)
- phy_rate = clk_get_rate(imx6_pcie->clks[i].clk);
+ for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++)
+ if (strncmp(imx_pcie->clks[i].id, "pcie_phy", 8) == 0)
+ phy_rate = clk_get_rate(imx_pcie->clks[i].clk);
switch (phy_rate) {
case 125000000:
@@ -465,46 +473,46 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
div = 1;
break;
default:
- dev_err(imx6_pcie->pci->dev,
+ dev_err(imx_pcie->pci->dev,
"Unsupported PHY reference clock rate %lu\n", phy_rate);
return -EINVAL;
}
- pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
- pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+ pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
- pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val);
val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
val |= PCIE_PHY_ATEOVRD_EN;
- pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+ pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val);
return 0;
}
-static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie)
{
u16 tmp;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return;
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
usleep_range(2000, 3000);
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
}
#ifdef CONFIG_ARM
@@ -543,22 +551,22 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
}
#endif
-static int imx6_pcie_attach_pd(struct device *dev)
+static int imx_pcie_attach_pd(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
struct device_link *link;
/* Do nothing when in a single power domain */
if (dev->pm_domain)
return 0;
- imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
- if (IS_ERR(imx6_pcie->pd_pcie))
- return PTR_ERR(imx6_pcie->pd_pcie);
+ imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
+ if (IS_ERR(imx_pcie->pd_pcie))
+ return PTR_ERR(imx_pcie->pd_pcie);
/* Do nothing when power domain missing */
- if (!imx6_pcie->pd_pcie)
+ if (!imx_pcie->pd_pcie)
return 0;
- link = device_link_add(dev, imx6_pcie->pd_pcie,
+ link = device_link_add(dev, imx_pcie->pd_pcie,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
@@ -567,11 +575,11 @@ static int imx6_pcie_attach_pd(struct device *dev)
return -EINVAL;
}
- imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pd_pcie_phy))
- return PTR_ERR(imx6_pcie->pd_pcie_phy);
+ imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
+ if (IS_ERR(imx_pcie->pd_pcie_phy))
+ return PTR_ERR(imx_pcie->pd_pcie_phy);
- link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
+ link = device_link_add(dev, imx_pcie->pd_pcie_phy,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
@@ -583,21 +591,20 @@ static int imx6_pcie_attach_pd(struct device *dev)
return 0;
}
-static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- unsigned int offset;
- int ret = 0;
+ if (enable)
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
- break;
- case IMX6QP:
- case IMX6Q:
+ return 0;
+}
+
+static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+{
+ if (enable) {
/* power up core phy and enable ref clock */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
/*
* the async reset input need ref clock to sync internally,
* when the ref clock comes after reset, internal synced
@@ -605,71 +612,51 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
* add one ~10us delay here.
*/
usleep_range(10, 100);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
- break;
- case IMX7D:
- case IMX95:
- case IMX95_EP:
- break;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MP:
- case IMX8MP_EP:
- offset = imx6_pcie_grp_offset(imx6_pcie);
- /*
- * Set the over ride low and enabled
- * make sure that REF_CLK is turned on.
- */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
- 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
- break;
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ } else {
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
}
- return ret;
+ return 0;
}
-static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
+static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- switch (imx6_pcie->drvdata->variant) {
- case IMX6QP:
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD,
- IMX6Q_GPR1_PCIE_TEST_PD);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
- break;
- default:
- break;
+ int offset = imx_pcie_grp_offset(imx_pcie);
+
+ if (enable) {
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
}
+
+ return 0;
+}
+
+static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+{
+ if (!enable)
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ return 0;
}
-static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
int ret;
- ret = clk_bulk_prepare_enable(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ ret = clk_bulk_prepare_enable(imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
if (ret)
return ret;
- ret = imx6_pcie_enable_ref_clk(imx6_pcie);
- if (ret) {
- dev_err(dev, "unable to enable pcie ref clock\n");
- goto err_ref_clk;
+ if (imx_pcie->drvdata->enable_ref_clk) {
+ ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to enable PCIe REFCLK\n");
+ goto err_ref_clk;
+ }
}
/* allow the clocks to stabilize */
@@ -677,99 +664,120 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
return 0;
err_ref_clk:
- clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
return ret;
}
-static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie)
{
- imx6_pcie_disable_ref_clk(imx6_pcie);
- clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ if (imx_pcie->drvdata->enable_ref_clk)
+ imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
+ clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
}
-static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- reset_control_assert(imx6_pcie->pciephy_reset);
- reset_control_assert(imx6_pcie->apps_reset);
+ if (assert)
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Force PCIe PHY reset */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET,
- IMX6SX_GPR5_PCIE_BTNRST_RESET);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST,
- IMX6Q_GPR1_PCIE_SW_RST);
- break;
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
- break;
- default:
- break;
- }
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0);
+ return 0;
+}
- /* Some boards don't have PCIe reset GPIO. */
- gpiod_set_value_cansleep(imx6_pcie->reset_gpiod, 1);
+static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST,
+ assert ? IMX6Q_GPR1_PCIE_SW_RST : 0);
+ if (!assert)
+ usleep_range(200, 500);
+
+ return 0;
}
-static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
+ if (!assert)
+ return 0;
- reset_control_deassert(imx6_pcie->pciephy_reset);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
- switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- /* Workaround for ERR010728, failure of PCI-e PLL VCO to
- * oscillate, especially when cold. This turns off "Duty-cycle
- * Corrector" and other mysterious undocumented things.
- */
- if (likely(imx6_pcie->phy_base)) {
- /* De-assert DCC_FB_EN */
- writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
- /* Assert RX_EQS and RX_EQS_SEL */
- writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
- | PCIE_PHY_CMN_REG24_RX_EQ,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
- /* Assert ATT_MODE */
- writel(PCIE_PHY_CMN_REG26_ATT_MODE,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
- } else {
- dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
- }
+ return 0;
+}
- imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST, 0);
+static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct device *dev = pci->dev;
- usleep_range(200, 500);
- break;
- default:
- break;
+ if (assert)
+ return 0;
+
+ /*
+ * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023):
+ *
+ * PCIe: PLL may fail to lock under corner conditions.
+ *
+ * Initial VCO oscillation may fail under corner conditions such as
+ * cold temperature which will cause the PCIe PLL fail to lock in the
+ * initialization phase.
+ *
+ * The Duty-cycle Corrector calibration must be disabled.
+ *
+ * 1. De-assert the G_RST signal by clearing
+ * SRC_PCIEPHY_RCR[PCIEPHY_G_RST].
+ * 2. De-assert DCC_FB_EN by writing data “0x29†to the register
+ * address 0x306d0014 (PCIE_PHY_CMN_REG4).
+ * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48†to the register
+ * address 0x306d0090 (PCIE_PHY_CMN_REG24).
+ * 4. Assert ATT_MODE by writing data “0xbc†to the register
+ * address 0x306d0098 (PCIE_PHY_CMN_REG26).
+ * 5. De-assert the CMN_RST signal by clearing register bit
+ * SRC_PCIEPHY_RCR[PCIEPHY_BTN]
+ */
+
+ if (likely(imx_pcie->phy_base)) {
+ /* De-assert DCC_FB_EN */
+ writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4);
+ /* Assert RX_EQS and RX_EQS_SEL */
+ writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ,
+ imx_pcie->phy_base + PCIE_PHY_CMN_REG24);
+ /* Assert ATT_MODE */
+ writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26);
+ } else {
+ dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
}
+ imx7d_pcie_wait_for_phy_pll_lock(imx_pcie);
+ return 0;
+}
+
+static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
+{
+ reset_control_assert(imx_pcie->pciephy_reset);
+ reset_control_assert(imx_pcie->apps_reset);
+
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, true);
+
+ /* Some boards don't have PCIe reset GPIO. */
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1);
+}
+
+static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
+{
+ reset_control_deassert(imx_pcie->pciephy_reset);
+
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, false);
/* Some boards don't have PCIe reset GPIO. */
- if (imx6_pcie->reset_gpiod) {
+ if (imx_pcie->reset_gpiod) {
msleep(100);
- gpiod_set_value_cansleep(imx6_pcie->reset_gpiod, 0);
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0);
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep(100);
}
@@ -777,9 +785,9 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
return 0;
}
-static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
u32 tmp;
unsigned int retries;
@@ -796,33 +804,38 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
return -ETIMEDOUT;
}
-static void imx6_pcie_ltssm_enable(struct device *dev)
+static void imx_pcie_ltssm_enable(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP);
+ u32 tmp;
+ tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP);
+ phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp));
if (drvdata->ltssm_mask)
- regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
drvdata->ltssm_mask);
- reset_control_deassert(imx6_pcie->apps_reset);
+ reset_control_deassert(imx_pcie->apps_reset);
}
-static void imx6_pcie_ltssm_disable(struct device *dev)
+static void imx_pcie_ltssm_disable(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ phy_set_speed(imx_pcie->phy, 0);
if (drvdata->ltssm_mask)
- regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off,
drvdata->ltssm_mask, 0);
- reset_control_assert(imx6_pcie->apps_reset);
+ reset_control_assert(imx_pcie->apps_reset);
}
-static int imx6_pcie_start_link(struct dw_pcie *pci)
+static int imx_pcie_start_link(struct dw_pcie *pci)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
struct device *dev = pci->dev;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 tmp;
@@ -841,18 +854,18 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dw_pcie_dbi_ro_wr_dis(pci);
/* Start LTSSM. */
- imx6_pcie_ltssm_enable(dev);
+ imx_pcie_ltssm_enable(dev);
ret = dw_pcie_wait_for_link(pci);
if (ret)
goto err_reset_phy;
- if (pci->link_gen > 1) {
+ if (pci->max_link_speed > 1) {
/* Allow faster modes after the link is up */
dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
tmp &= ~PCI_EXP_LNKCAP_SLS;
- tmp |= pci->link_gen;
+ tmp |= pci->max_link_speed;
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
@@ -864,8 +877,8 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
dw_pcie_dbi_ro_wr_dis(pci);
- if (imx6_pcie->drvdata->flags &
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
+ if (imx_pcie->drvdata->flags &
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE) {
/*
* On i.MX7, DIRECT_SPEED_CHANGE behaves differently
* from i.MX6 family when no link speed transition
@@ -875,7 +888,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
* failure.
*/
- ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
+ ret = imx_pcie_wait_for_speed_change(imx_pcie);
if (ret) {
dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
@@ -890,37 +903,37 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dev_info(dev, "Link: Only Gen1 is enabled\n");
}
- imx6_pcie->link_is_up = true;
+ imx_pcie->link_is_up = true;
tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
- imx6_pcie->link_is_up = false;
+ imx_pcie->link_is_up = false;
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
- imx6_pcie_reset_phy(imx6_pcie);
+ imx_pcie_reset_phy(imx_pcie);
return 0;
}
-static void imx6_pcie_stop_link(struct dw_pcie *pci)
+static void imx_pcie_stop_link(struct dw_pcie *pci)
{
struct device *dev = pci->dev;
/* Turn off PCIe LTSSM */
- imx6_pcie_ltssm_disable(dev);
+ imx_pcie_ltssm_disable(dev);
}
-static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+static int imx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
int ret;
- if (imx6_pcie->vpcie) {
- ret = regulator_enable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie) {
+ ret = regulator_enable(imx_pcie->vpcie);
if (ret) {
dev_err(dev, "failed to enable vpcie regulator: %d\n",
ret);
@@ -928,83 +941,105 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
}
- imx6_pcie_assert_core_reset(imx6_pcie);
+ imx_pcie_assert_core_reset(imx_pcie);
- if (imx6_pcie->drvdata->init_phy)
- imx6_pcie->drvdata->init_phy(imx6_pcie);
+ if (imx_pcie->drvdata->init_phy)
+ imx_pcie->drvdata->init_phy(imx_pcie);
- imx6_pcie_configure_type(imx6_pcie);
+ imx_pcie_configure_type(imx_pcie);
- ret = imx6_pcie_clk_enable(imx6_pcie);
+ ret = imx_pcie_clk_enable(imx_pcie);
if (ret) {
dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
goto err_reg_disable;
}
- if (imx6_pcie->phy) {
- ret = phy_init(imx6_pcie->phy);
+ if (imx_pcie->phy) {
+ ret = phy_init(imx_pcie->phy);
if (ret) {
dev_err(dev, "pcie PHY power up failed\n");
goto err_clk_disable;
}
- }
- if (imx6_pcie->phy) {
- ret = phy_power_on(imx6_pcie->phy);
+ ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ if (ret) {
+ dev_err(dev, "unable to set PCIe PHY mode\n");
+ goto err_phy_exit;
+ }
+
+ ret = phy_power_on(imx_pcie->phy);
if (ret) {
dev_err(dev, "waiting for PHY ready timeout!\n");
- goto err_phy_off;
+ goto err_phy_exit;
}
}
- ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+ ret = imx_pcie_deassert_core_reset(imx_pcie);
if (ret < 0) {
dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
goto err_phy_off;
}
- imx6_setup_phy_mpll(imx6_pcie);
+ imx_setup_phy_mpll(imx_pcie);
return 0;
err_phy_off:
- if (imx6_pcie->phy)
- phy_exit(imx6_pcie->phy);
+ phy_power_off(imx_pcie->phy);
+err_phy_exit:
+ phy_exit(imx_pcie->phy);
err_clk_disable:
- imx6_pcie_clk_disable(imx6_pcie);
+ imx_pcie_clk_disable(imx_pcie);
err_reg_disable:
- if (imx6_pcie->vpcie)
- regulator_disable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
return ret;
}
-static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
+static void imx_pcie_host_exit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
- if (imx6_pcie->phy) {
- if (phy_power_off(imx6_pcie->phy))
+ if (imx_pcie->phy) {
+ if (phy_power_off(imx_pcie->phy))
dev_err(pci->dev, "unable to power off PHY\n");
- phy_exit(imx6_pcie->phy);
+ phy_exit(imx_pcie->phy);
}
- imx6_pcie_clk_disable(imx6_pcie);
+ imx_pcie_clk_disable(imx_pcie);
- if (imx6_pcie->vpcie)
- regulator_disable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
}
-static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
- .init = imx6_pcie_host_init,
- .deinit = imx6_pcie_host_exit,
+static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr)
+{
+ struct imx_pcie *imx_pcie = to_imx_pcie(pcie);
+ struct dw_pcie_rp *pp = &pcie->pp;
+ struct resource_entry *entry;
+
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_CPU_ADDR_FIXUP))
+ return cpu_addr;
+
+ entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (!entry)
+ return cpu_addr;
+
+ return cpu_addr - entry->offset;
+}
+
+static const struct dw_pcie_host_ops imx_pcie_host_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
};
static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = imx6_pcie_start_link,
- .stop_link = imx6_pcie_stop_link,
+ .start_link = imx_pcie_start_link,
+ .stop_link = imx_pcie_stop_link,
+ .cpu_addr_fixup = imx_pcie_cpu_addr_fixup,
};
-static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
+static void imx_pcie_ep_init(struct dw_pcie_ep *ep)
{
enum pci_barno bar;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -1013,7 +1048,7 @@ static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -1060,35 +1095,35 @@ static const struct pci_epc_features imx95_pcie_epc_features = {
};
static const struct pci_epc_features*
-imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
+imx_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
- return imx6_pcie->drvdata->epc_features;
+ return imx_pcie->drvdata->epc_features;
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .init = imx6_pcie_ep_init,
- .raise_irq = imx6_pcie_ep_raise_irq,
- .get_features = imx6_pcie_ep_get_features,
+ .init = imx_pcie_ep_init,
+ .raise_irq = imx_pcie_ep_raise_irq,
+ .get_features = imx_pcie_ep_get_features,
};
-static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
+static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
struct platform_device *pdev)
{
int ret;
unsigned int pcie_dbi2_offset;
struct dw_pcie_ep *ep;
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
- imx6_pcie_host_init(pp);
+ imx_pcie_host_init(pp);
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
- switch (imx6_pcie->drvdata->variant) {
+ switch (imx_pcie->drvdata->variant) {
case IMX8MQ_EP:
case IMX8MM_EP:
case IMX8MP_EP:
@@ -1110,9 +1145,11 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
if (device_property_match_string(dev, "reg-names", "dbi2") >= 0)
pci->dbi_base2 = NULL;
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_SUPPORT_64BIT))
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT))
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ ep->page_size = imx_pcie->drvdata->epc_features->align;
+
ret = dw_pcie_ep_init(ep);
if (ret) {
dev_err(dev, "failed to initialize endpoint\n");
@@ -1129,30 +1166,30 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
pci_epc_init_notify(ep->epc);
/* Start LTSSM. */
- imx6_pcie_ltssm_enable(dev);
+ imx_pcie_ltssm_enable(dev);
return 0;
}
-static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_pm_turnoff(struct imx_pcie *imx_pcie)
{
- struct device *dev = imx6_pcie->pci->dev;
+ struct device *dev = imx_pcie->pci->dev;
/* Some variants have a turnoff reset in DT */
- if (imx6_pcie->turnoff_reset) {
- reset_control_assert(imx6_pcie->turnoff_reset);
- reset_control_deassert(imx6_pcie->turnoff_reset);
+ if (imx_pcie->turnoff_reset) {
+ reset_control_assert(imx_pcie->turnoff_reset);
+ reset_control_deassert(imx_pcie->turnoff_reset);
goto pm_turnoff_sleep;
}
/* Others poke directly at IOMUXC registers */
- switch (imx6_pcie->drvdata->variant) {
+ switch (imx_pcie->drvdata->variant) {
case IMX6SX:
case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_PM_TURN_OFF,
IMX6SX_GPR12_PCIE_PM_TURN_OFF);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
break;
default:
@@ -1171,73 +1208,73 @@ pm_turnoff_sleep:
usleep_range(1000, 10000);
}
-static void imx6_pcie_msi_save_restore(struct imx6_pcie *imx6_pcie, bool save)
+static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
{
u8 offset;
u16 val;
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
if (pci_msi_enabled()) {
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
if (save) {
val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
- imx6_pcie->msi_ctrl = val;
+ imx_pcie->msi_ctrl = val;
} else {
dw_pcie_dbi_ro_wr_en(pci);
- val = imx6_pcie->msi_ctrl;
+ val = imx_pcie->msi_ctrl;
dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
dw_pcie_dbi_ro_wr_dis(pci);
}
}
}
-static int imx6_pcie_suspend_noirq(struct device *dev)
+static int imx_pcie_suspend_noirq(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ struct dw_pcie_rp *pp = &imx_pcie->pci->pp;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- imx6_pcie_msi_save_restore(imx6_pcie, true);
- imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_stop_link(imx6_pcie->pci);
- imx6_pcie_host_exit(pp);
+ imx_pcie_msi_save_restore(imx_pcie, true);
+ imx_pcie_pm_turnoff(imx_pcie);
+ imx_pcie_stop_link(imx_pcie->pci);
+ imx_pcie_host_exit(pp);
return 0;
}
-static int imx6_pcie_resume_noirq(struct device *dev)
+static int imx_pcie_resume_noirq(struct device *dev)
{
int ret;
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ struct dw_pcie_rp *pp = &imx_pcie->pci->pp;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- ret = imx6_pcie_host_init(pp);
+ ret = imx_pcie_host_init(pp);
if (ret)
return ret;
- imx6_pcie_msi_save_restore(imx6_pcie, false);
+ imx_pcie_msi_save_restore(imx_pcie, false);
dw_pcie_setup_rc(pp);
- if (imx6_pcie->link_is_up)
- imx6_pcie_start_link(imx6_pcie->pci);
+ if (imx_pcie->link_is_up)
+ imx_pcie_start_link(imx_pcie->pci);
return 0;
}
-static const struct dev_pm_ops imx6_pcie_pm_ops = {
- NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
- imx6_pcie_resume_noirq)
+static const struct dev_pm_ops imx_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq,
+ imx_pcie_resume_noirq)
};
-static int imx6_pcie_probe(struct platform_device *pdev)
+static int imx_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
- struct imx6_pcie *imx6_pcie;
+ struct imx_pcie *imx_pcie;
struct device_node *np;
struct resource *dbi_base;
struct device_node *node = dev->of_node;
@@ -1245,8 +1282,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
u16 val;
int i;
- imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
- if (!imx6_pcie)
+ imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL);
+ if (!imx_pcie)
return -ENOMEM;
pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
@@ -1255,10 +1292,10 @@ static int imx6_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- pci->pp.ops = &imx6_pcie_host_ops;
+ pci->pp.ops = &imx_pcie_host_ops;
- imx6_pcie->pci = pci;
- imx6_pcie->drvdata = of_device_get_match_data(dev);
+ imx_pcie->pci = pci;
+ imx_pcie->drvdata = of_device_get_match_data(dev);
/* Find the PHY if one is defined, only imx7d uses it */
np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
@@ -1270,9 +1307,9 @@ static int imx6_pcie_probe(struct platform_device *pdev)
dev_err(dev, "Unable to map PCIe PHY\n");
return ret;
}
- imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
- if (IS_ERR(imx6_pcie->phy_base))
- return PTR_ERR(imx6_pcie->phy_base);
+ imx_pcie->phy_base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(imx_pcie->phy_base))
+ return PTR_ERR(imx_pcie->phy_base);
}
pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base);
@@ -1280,72 +1317,72 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pci->dbi_base);
/* Fetch GPIOs */
- imx6_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(imx6_pcie->reset_gpiod))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->reset_gpiod),
+ imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(imx_pcie->reset_gpiod))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod),
"unable to get reset gpio\n");
- gpiod_set_consumer_name(imx6_pcie->reset_gpiod, "PCIe reset");
+ gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset");
- if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS)
+ if (imx_pcie->drvdata->clks_cnt >= IMX_PCIE_MAX_CLKS)
return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n");
- for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
- imx6_pcie->clks[i].id = imx6_pcie->drvdata->clk_names[i];
+ for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++)
+ imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i];
/* Fetch clocks */
- ret = devm_clk_bulk_get(dev, imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ ret = devm_clk_bulk_get(dev, imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
if (ret)
return ret;
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHYDRV)) {
- imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
- if (IS_ERR(imx6_pcie->phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) {
+ imx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->phy),
"failed to get pcie phy\n");
}
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_APP_RESET)) {
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
- if (IS_ERR(imx6_pcie->apps_reset))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) {
+ imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
+ if (IS_ERR(imx_pcie->apps_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset),
"failed to get pcie apps reset control\n");
}
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHY_RESET)) {
- imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
- if (IS_ERR(imx6_pcie->pciephy_reset))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pciephy_reset),
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) {
+ imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
+ if (IS_ERR(imx_pcie->pciephy_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset),
"Failed to get PCIEPHY reset control\n");
}
- switch (imx6_pcie->drvdata->variant) {
+ switch (imx_pcie->drvdata->variant) {
case IMX8MQ:
case IMX8MQ_EP:
case IMX7D:
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
- imx6_pcie->controller_id = 1;
+ imx_pcie->controller_id = 1;
break;
default:
break;
}
/* Grab turnoff reset */
- imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
- if (IS_ERR(imx6_pcie->turnoff_reset)) {
+ imx_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
+ if (IS_ERR(imx_pcie->turnoff_reset)) {
dev_err(dev, "Failed to get TURNOFF reset control\n");
- return PTR_ERR(imx6_pcie->turnoff_reset);
+ return PTR_ERR(imx_pcie->turnoff_reset);
}
- if (imx6_pcie->drvdata->gpr) {
+ if (imx_pcie->drvdata->gpr) {
/* Grab GPR config register range */
- imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
- if (IS_ERR(imx6_pcie->iomuxc_gpr))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ imx_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
"unable to find iomuxc registers\n");
}
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_SERDES)) {
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) {
void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app");
if (IS_ERR(off))
@@ -1358,59 +1395,59 @@ static int imx6_pcie_probe(struct platform_device *pdev)
.reg_stride = 4,
};
- imx6_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
- if (IS_ERR(imx6_pcie->iomuxc_gpr))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
"unable to find iomuxc registers\n");
}
/* Grab PCIe PHY Tx Settings */
if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
- &imx6_pcie->tx_deemph_gen1))
- imx6_pcie->tx_deemph_gen1 = 0;
+ &imx_pcie->tx_deemph_gen1))
+ imx_pcie->tx_deemph_gen1 = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
- &imx6_pcie->tx_deemph_gen2_3p5db))
- imx6_pcie->tx_deemph_gen2_3p5db = 0;
+ &imx_pcie->tx_deemph_gen2_3p5db))
+ imx_pcie->tx_deemph_gen2_3p5db = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
- &imx6_pcie->tx_deemph_gen2_6db))
- imx6_pcie->tx_deemph_gen2_6db = 20;
+ &imx_pcie->tx_deemph_gen2_6db))
+ imx_pcie->tx_deemph_gen2_6db = 20;
if (of_property_read_u32(node, "fsl,tx-swing-full",
- &imx6_pcie->tx_swing_full))
- imx6_pcie->tx_swing_full = 127;
+ &imx_pcie->tx_swing_full))
+ imx_pcie->tx_swing_full = 127;
if (of_property_read_u32(node, "fsl,tx-swing-low",
- &imx6_pcie->tx_swing_low))
- imx6_pcie->tx_swing_low = 127;
+ &imx_pcie->tx_swing_low))
+ imx_pcie->tx_swing_low = 127;
/* Limit link speed */
- pci->link_gen = 1;
- of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
-
- imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
- if (IS_ERR(imx6_pcie->vpcie)) {
- if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
- return PTR_ERR(imx6_pcie->vpcie);
- imx6_pcie->vpcie = NULL;
+ pci->max_link_speed = 1;
+ of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed);
+
+ imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+ if (IS_ERR(imx_pcie->vpcie)) {
+ if (PTR_ERR(imx_pcie->vpcie) != -ENODEV)
+ return PTR_ERR(imx_pcie->vpcie);
+ imx_pcie->vpcie = NULL;
}
- imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
- if (IS_ERR(imx6_pcie->vph)) {
- if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
- return PTR_ERR(imx6_pcie->vph);
- imx6_pcie->vph = NULL;
+ imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
+ if (IS_ERR(imx_pcie->vph)) {
+ if (PTR_ERR(imx_pcie->vph) != -ENODEV)
+ return PTR_ERR(imx_pcie->vph);
+ imx_pcie->vph = NULL;
}
- platform_set_drvdata(pdev, imx6_pcie);
+ platform_set_drvdata(pdev, imx_pcie);
- ret = imx6_pcie_attach_pd(dev);
+ ret = imx_pcie_attach_pd(dev);
if (ret)
return ret;
- if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
- ret = imx6_add_pcie_ep(imx6_pcie, pdev);
+ if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
+ ret = imx_add_pcie_ep(imx_pcie, pdev);
if (ret < 0)
return ret;
} else {
@@ -1430,24 +1467,25 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return 0;
}
-static void imx6_pcie_shutdown(struct platform_device *pdev)
+static void imx_pcie_shutdown(struct platform_device *pdev)
{
- struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+ struct imx_pcie *imx_pcie = platform_get_drvdata(pdev);
/* bring down link, so bootloader gets clean state in case of reboot */
- imx6_pcie_assert_core_reset(imx6_pcie);
+ imx_pcie_assert_core_reset(imx_pcie);
}
static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"};
static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
+static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"};
-static const struct imx6_pcie_drvdata drvdata[] = {
+static const struct imx_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
.clk_names = imx6q_clks,
@@ -1456,13 +1494,15 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
- .init_phy = imx6_pcie_init_phy,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6q_pcie_core_reset,
},
[IMX6SX] = {
.variant = IMX6SX,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
- IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
.clk_names = imx6sx_clks,
.clks_cnt = ARRAY_SIZE(imx6sx_clks),
@@ -1471,12 +1511,14 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.init_phy = imx6sx_pcie_init_phy,
+ .enable_ref_clk = imx6sx_pcie_enable_ref_clk,
+ .core_reset = imx6sx_pcie_core_reset,
},
[IMX6QP] = {
.variant = IMX6QP,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
- IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
.clk_names = imx6q_clks,
@@ -1485,24 +1527,28 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
- .init_phy = imx6_pcie_init_phy,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6qp_pcie_core_reset,
},
[IMX7D] = {
.variant = IMX7D,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
- IMX6_PCIE_FLAG_HAS_APP_RESET |
- IMX6_PCIE_FLAG_HAS_PHY_RESET,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx7d-iomuxc-gpr",
.clk_names = imx6q_clks,
.clks_cnt = ARRAY_SIZE(imx6q_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.init_phy = imx7d_pcie_init_phy,
+ .enable_ref_clk = imx7d_pcie_enable_ref_clk,
+ .core_reset = imx7d_pcie_core_reset,
},
[IMX8MQ] = {
.variant = IMX8MQ,
- .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
- IMX6_PCIE_FLAG_HAS_PHY_RESET,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx8mq-iomuxc-gpr",
.clk_names = imx8mq_clks,
.clks_cnt = ARRAY_SIZE(imx8mq_clks),
@@ -1511,32 +1557,42 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.mode_off[1] = IOMUXC_GPR12,
.mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
.init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MM] = {
.variant = IMX8MM,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
- IMX6_PCIE_FLAG_HAS_PHYDRV |
- IMX6_PCIE_FLAG_HAS_APP_RESET,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mm-iomuxc-gpr",
.clk_names = imx8mm_clks,
.clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MP] = {
.variant = IMX8MP,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
- IMX6_PCIE_FLAG_HAS_PHYDRV |
- IMX6_PCIE_FLAG_HAS_APP_RESET,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mp-iomuxc-gpr",
.clk_names = imx8mm_clks,
.clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8Q] = {
+ .variant = IMX8Q,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_CPU_ADDR_FIXUP,
+ .clk_names = imx8q_clks,
+ .clks_cnt = ARRAY_SIZE(imx8q_clks),
},
[IMX95] = {
.variant = IMX95,
- .flags = IMX6_PCIE_FLAG_HAS_SERDES,
+ .flags = IMX_PCIE_FLAG_HAS_SERDES,
.clk_names = imx8mq_clks,
.clks_cnt = ARRAY_SIZE(imx8mq_clks),
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
@@ -1547,8 +1603,8 @@ static const struct imx6_pcie_drvdata drvdata[] = {
},
[IMX8MQ_EP] = {
.variant = IMX8MQ_EP,
- .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
- IMX6_PCIE_FLAG_HAS_PHY_RESET,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mq-iomuxc-gpr",
.clk_names = imx8mq_clks,
@@ -1559,10 +1615,12 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
.init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MM_EP] = {
.variant = IMX8MM_EP,
- .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mm-iomuxc-gpr",
.clk_names = imx8mm_clks,
@@ -1570,10 +1628,12 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MP_EP] = {
.variant = IMX8MP_EP,
- .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mp-iomuxc-gpr",
.clk_names = imx8mm_clks,
@@ -1581,11 +1641,12 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX95_EP] = {
.variant = IMX95_EP,
- .flags = IMX6_PCIE_FLAG_HAS_SERDES |
- IMX6_PCIE_FLAG_SUPPORT_64BIT,
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_SUPPORT_64BIT,
.clk_names = imx8mq_clks,
.clks_cnt = ARRAY_SIZE(imx8mq_clks),
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
@@ -1598,7 +1659,7 @@ static const struct imx6_pcie_drvdata drvdata[] = {
},
};
-static const struct of_device_id imx6_pcie_of_match[] = {
+static const struct of_device_id imx_pcie_of_match[] = {
{ .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
@@ -1606,6 +1667,7 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
{ .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], },
{ .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], },
{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
@@ -1614,19 +1676,19 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{},
};
-static struct platform_driver imx6_pcie_driver = {
+static struct platform_driver imx_pcie_driver = {
.driver = {
.name = "imx6q-pcie",
- .of_match_table = imx6_pcie_of_match,
+ .of_match_table = imx_pcie_of_match,
.suppress_bind_attrs = true,
- .pm = &imx6_pcie_pm_ops,
+ .pm = &imx_pcie_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe = imx6_pcie_probe,
- .shutdown = imx6_pcie_shutdown,
+ .probe = imx_pcie_probe,
+ .shutdown = imx_pcie_shutdown,
};
-static void imx6_pcie_quirk(struct pci_dev *dev)
+static void imx_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
struct dw_pcie_rp *pp = bus->sysdata;
@@ -1636,33 +1698,33 @@ static void imx6_pcie_quirk(struct pci_dev *dev)
return;
/* Make sure we only quirk devices associated with this driver */
- if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
+ if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver)
return;
if (pci_is_root_bus(bus)) {
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
/*
* Limit config length to avoid the kernel reading beyond
* the register set and causing an abort on i.MX 6Quad
*/
- if (imx6_pcie->drvdata->dbi_length) {
- dev->cfg_size = imx6_pcie->drvdata->dbi_length;
+ if (imx_pcie->drvdata->dbi_length) {
+ dev->cfg_size = imx_pcie->drvdata->dbi_length;
dev_info(&dev->dev, "Limiting cfg_size to %d\n",
dev->cfg_size);
}
}
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
- PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
+ PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk);
-static int __init imx6_pcie_init(void)
+static int __init imx_pcie_init(void)
{
#ifdef CONFIG_ARM
struct device_node *np;
- np = of_find_matching_node(NULL, imx6_pcie_of_match);
+ np = of_find_matching_node(NULL, imx_pcie_of_match);
if (!np)
return -ENODEV;
of_node_put(np);
@@ -1678,6 +1740,6 @@ static int __init imx6_pcie_init(void)
"external abort on non-linefetch");
#endif
- return platform_driver_register(&imx6_pcie_driver);
+ return platform_driver_register(&imx_pcie_driver);
}
-device_initcall(imx6_pcie_init);
+device_initcall(imx_pcie_init);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 52c6420ae200..2219b1a866fa 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -189,12 +189,6 @@ static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void ks_pcie_msi_mask(struct irq_data *data)
{
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
@@ -247,7 +241,6 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
.name = "KEYSTONE-PCI-MSI",
.irq_ack = ks_pcie_msi_irq_ack,
.irq_compose_msi_msg = ks_pcie_compose_msi_msg,
- .irq_set_affinity = ks_pcie_msi_set_affinity,
.irq_mask = ks_pcie_msi_mask,
.irq_unmask = ks_pcie_msi_unmask,
};
@@ -577,7 +570,7 @@ static void ks_pcie_quirk(struct pci_dev *dev)
*/
if (pci_match_id(am6_pci_devids, bridge)) {
bridge_dev = pci_get_host_bridge_device(dev);
- if (!bridge_dev && !bridge_dev->parent)
+ if (!bridge_dev || !bridge_dev->parent)
return;
ks_pcie = dev_get_drvdata(bridge_dev->parent);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index a0822d5371bc..3e41865c7290 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -48,8 +48,9 @@ static struct irq_chip dw_pcie_msi_irq_chip = {
};
static struct msi_domain_info dw_pcie_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI,
.chip = &dw_pcie_msi_irq_chip,
};
@@ -116,12 +117,6 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
(int)d->hwirq, msg->address_hi, msg->address_lo);
}
-static int dw_pci_msi_set_affinity(struct irq_data *d,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void dw_pci_bottom_mask(struct irq_data *d)
{
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
@@ -177,7 +172,6 @@ static struct irq_chip dw_pci_msi_bottom_irq_chip = {
.name = "DWPCI-MSI",
.irq_ack = dw_pci_bottom_ack,
.irq_compose_msi_msg = dw_pci_setup_msi_msg,
- .irq_set_affinity = dw_pci_msi_set_affinity,
.irq_mask = dw_pci_bottom_mask,
.irq_unmask = dw_pci_bottom_unmask,
};
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 1b5aba1f0c92..6d6cbc8b5b2c 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -112,6 +112,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
+ pci->dbi_phys_addr = res->start;
}
/* DBI2 is mainly useful for the endpoint controller */
@@ -134,6 +135,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->atu_base = devm_ioremap_resource(pci->dev, res);
if (IS_ERR(pci->atu_base))
return PTR_ERR(pci->atu_base);
+ pci->atu_phys_addr = res->start;
} else {
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
}
@@ -166,8 +168,8 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
return ret;
}
- if (pci->link_gen < 1)
- pci->link_gen = of_pci_get_max_link_speed(np);
+ if (pci->max_link_speed < 1)
+ pci->max_link_speed = of_pci_get_max_link_speed(np);
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
@@ -687,16 +689,27 @@ void dw_pcie_upconfig_setup(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
-static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+static void dw_pcie_link_set_max_speed(struct dw_pcie *pci)
{
u32 cap, ctrl2, link_speed;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+
+ /*
+ * Even if the platform doesn't want to limit the maximum link speed,
+ * just cache the hardware default value so that the vendor drivers can
+ * use it to do any link specific configuration.
+ */
+ if (pci->max_link_speed < 1) {
+ pci->max_link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
+ return;
+ }
+
ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
- switch (pcie_link_speed[link_gen]) {
+ switch (pcie_link_speed[pci->max_link_speed]) {
case PCIE_SPEED_2_5GT:
link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
break;
@@ -1058,8 +1071,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
{
u32 val;
- if (pci->link_gen > 0)
- dw_pcie_link_set_max_speed(pci, pci->link_gen);
+ dw_pcie_link_set_max_speed(pci);
/* Configure Gen1 N_FTS */
if (pci->n_fts[0]) {
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 53c4c8f399c8..347ab74ac35a 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -125,6 +125,19 @@
#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT 0x1
+
+#define GEN3_EQ_CONTROL_OFF 0x8A8
+#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0)
+#define GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE BIT(4)
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC GENMASK(23, 8)
+#define GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL BIT(24)
+
+#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x8AC
+#define GEN3_EQ_FMDC_T_MIN_PHASE23 GENMASK(4, 0)
+#define GEN3_EQ_FMDC_N_EVALS GENMASK(9, 5)
+#define GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA GENMASK(13, 10)
+#define GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA GENMASK(17, 14)
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
@@ -198,6 +211,24 @@
#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
/*
+ * 16.0 GT/s (Gen 4) lane margining register definitions
+ */
+#define GEN4_LANE_MARGINING_1_OFF 0xB80
+#define MARGINING_MAX_VOLTAGE_OFFSET GENMASK(29, 24)
+#define MARGINING_NUM_VOLTAGE_STEPS GENMASK(22, 16)
+#define MARGINING_MAX_TIMING_OFFSET GENMASK(13, 8)
+#define MARGINING_NUM_TIMING_STEPS GENMASK(5, 0)
+
+#define GEN4_LANE_MARGINING_2_OFF 0xB84
+#define MARGINING_IND_ERROR_SAMPLER BIT(28)
+#define MARGINING_SAMPLE_REPORTING_METHOD BIT(27)
+#define MARGINING_IND_LEFT_RIGHT_TIMING BIT(26)
+#define MARGINING_IND_UP_DOWN_VOLTAGE BIT(25)
+#define MARGINING_VOLTAGE_SUPPORTED BIT(24)
+#define MARGINING_MAXLANES GENMASK(20, 16)
+#define MARGINING_SAMPLE_RATE_TIMING GENMASK(13, 8)
+#define MARGINING_SAMPLE_RATE_VOLTAGE GENMASK(5, 0)
+/*
* iATU Unroll-specific register definitions
* From 4.80 core version the address translation will be made by unroll
*/
@@ -407,8 +438,10 @@ struct dw_pcie_ops {
struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
+ resource_size_t dbi_phys_addr;
void __iomem *dbi_base2;
void __iomem *atu_base;
+ resource_size_t atu_phys_addr;
size_t atu_size;
u32 num_ib_windows;
u32 num_ob_windows;
@@ -421,7 +454,7 @@ struct dw_pcie {
u32 type;
unsigned long caps;
int num_lanes;
- int link_gen;
+ int max_link_speed;
u8 n_fts[2];
struct dw_edma_chip edma;
struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index acbe4f6d3291..676d2aba4fbd 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -132,7 +132,7 @@ static void intel_pcie_link_setup(struct intel_pcie *pcie)
static void intel_pcie_init_n_fts(struct dw_pcie *pci)
{
- switch (pci->link_gen) {
+ switch (pci->max_link_speed) {
case 3:
pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;
break;
@@ -252,7 +252,7 @@ static int intel_pcie_wait_l2(struct intel_pcie *pcie)
int ret;
struct dw_pcie *pci = &pcie->pci;
- if (pci->link_gen < 3)
+ if (pci->max_link_speed < 3)
return 0;
/* Send PME_TURN_OFF message */
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 0a29136491b8..85a2c77b1835 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -420,11 +420,11 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
"unable to get a valid reset gpio\n");
}
- pcie->num_slots++;
- if (pcie->num_slots > MAX_PCI_SLOTS) {
+ if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) {
dev_err(dev, "Too many PCI slots!\n");
return -EINVAL;
}
+ pcie->num_slots++;
ret = of_pci_get_devfn(child);
if (ret < 0) {
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.c b/drivers/pci/controller/dwc/pcie-qcom-common.c
new file mode 100644
index 000000000000..3aad19b56da8
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pci.h>
+
+#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
+
+void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ /*
+ * GEN3_RELATED_OFF register is repurposed to apply equalization
+ * settings at various data transmission rates through registers namely
+ * GEN3_EQ_*. The RATE_SHADOW_SEL bit field of GEN3_RELATED_OFF
+ * determines the data rate for which these equalization settings are
+ * applied.
+ */
+ reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
+ GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT);
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
+ reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
+ GEN3_EQ_FMDC_N_EVALS |
+ GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA |
+ GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA);
+ reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
+ FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA, 0x5) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA, 0x5);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
+ GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
+ GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_equalization);
+
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_1_OFF);
+ reg &= ~(MARGINING_MAX_VOLTAGE_OFFSET |
+ MARGINING_NUM_VOLTAGE_STEPS |
+ MARGINING_MAX_TIMING_OFFSET |
+ MARGINING_NUM_TIMING_STEPS);
+ reg |= FIELD_PREP(MARGINING_MAX_VOLTAGE_OFFSET, 0x24) |
+ FIELD_PREP(MARGINING_NUM_VOLTAGE_STEPS, 0x78) |
+ FIELD_PREP(MARGINING_MAX_TIMING_OFFSET, 0x32) |
+ FIELD_PREP(MARGINING_NUM_TIMING_STEPS, 0x10);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_1_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_2_OFF);
+ reg |= MARGINING_IND_ERROR_SAMPLER |
+ MARGINING_SAMPLE_REPORTING_METHOD |
+ MARGINING_IND_LEFT_RIGHT_TIMING |
+ MARGINING_VOLTAGE_SUPPORTED;
+ reg &= ~(MARGINING_IND_UP_DOWN_VOLTAGE |
+ MARGINING_MAXLANES |
+ MARGINING_SAMPLE_RATE_TIMING |
+ MARGINING_SAMPLE_RATE_VOLTAGE);
+ reg |= FIELD_PREP(MARGINING_MAXLANES, pci->num_lanes) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_TIMING, 0x3f) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_VOLTAGE, 0x3f);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_2_OFF, reg);
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_lane_margining);
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.h b/drivers/pci/controller/dwc/pcie-qcom-common.h
new file mode 100644
index 000000000000..7d88d29e4766
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _PCIE_QCOM_COMMON_H
+#define _PCIE_QCOM_COMMON_H
+
+struct dw_pcie;
+
+void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci);
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci);
+
+#endif
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index a9b263f749b6..e588fcc54589 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -25,6 +25,7 @@
#include "../../pci.h"
#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
/* PARF registers */
#define PARF_SYS_CTRL 0x00
@@ -498,6 +499,11 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
goto err_disable_resources;
}
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
+ qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_16gt_lane_margining(pci);
+ }
+
/*
* The physical address of the MMIO region which is exposed as the BAR
* should be written to MHI BASE registers.
@@ -659,11 +665,9 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
struct dw_pcie *pci = &pcie_ep->pci;
struct device *dev = pci->dev;
u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS);
- u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK);
u32 dstate, val;
writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR);
- status &= mask;
if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
dev_dbg(dev, "Received Linkdown event\n");
@@ -693,7 +697,8 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
dw_pcie_ep_linkup(&pci->ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP;
} else {
- dev_err(dev, "Received unknown event: %d\n", status);
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
}
return IRQ_HANDLED;
@@ -724,8 +729,15 @@ static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
struct qcom_pcie_ep *pcie_ep)
{
+ struct device *dev = pcie_ep->pci.dev;
+ char *name;
int ret;
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_global_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
pcie_ep->global_irq = platform_get_irq_byname(pdev, "global");
if (pcie_ep->global_irq < 0)
return pcie_ep->global_irq;
@@ -733,18 +745,23 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL,
qcom_pcie_ep_global_irq_thread,
IRQF_ONESHOT,
- "global_irq", pcie_ep);
+ name, pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request Global IRQ\n");
return ret;
}
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_perst_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset);
irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL,
qcom_pcie_ep_perst_irq_thread,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "perst_irq", pcie_ep);
+ name, pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
disable_irq(pcie_ep->global_irq);
@@ -858,21 +875,15 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = qcom_pcie_enable_resources(pcie_ep);
- if (ret) {
- dev_err(dev, "Failed to enable resources: %d\n", ret);
- return ret;
- }
-
ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
- goto err_disable_resources;
+ return ret;
}
ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
if (ret)
- goto err_disable_resources;
+ goto err_ep_deinit;
name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
if (!name) {
@@ -889,8 +900,8 @@ err_disable_irqs:
disable_irq(pcie_ep->global_irq);
disable_irq(pcie_ep->perst_irq);
-err_disable_resources:
- qcom_pcie_disable_resources(pcie_ep);
+err_ep_deinit:
+ dw_pcie_ep_deinit(&pcie_ep->pci.ep);
return ret;
}
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 6f953e32d990..ef44a82be058 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -35,6 +35,7 @@
#include "../../pci.h"
#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
/* PARF registers */
#define PARF_SYS_CTRL 0x00
@@ -45,15 +46,24 @@
#define PARF_PHY_REFCLK 0x4c
#define PARF_CONFIG_BITS 0x50
#define PARF_DBI_BASE_ADDR 0x168
+#define PARF_SLV_ADDR_SPACE_SIZE 0x16c
#define PARF_MHI_CLOCK_RESET_CTRL 0x174
#define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
#define PARF_Q2A_FLUSH 0x1ac
#define PARF_LTSSM 0x1b0
+#define PARF_INT_ALL_STATUS 0x224
+#define PARF_INT_ALL_CLEAR 0x228
+#define PARF_INT_ALL_MASK 0x22c
#define PARF_SID_OFFSET 0x234
#define PARF_BDF_TRANSLATE_CFG 0x24c
-#define PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PARF_DBI_BASE_ADDR_V2 0x350
+#define PARF_DBI_BASE_ADDR_V2_HI 0x354
+#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
+#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
#define PARF_NO_SNOOP_OVERIDE 0x3d4
+#define PARF_ATU_BASE_ADDR 0x634
+#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_TABLE_N 0x2000
#define PARF_BDF_TO_SID_CFG 0x2c00
@@ -108,7 +118,7 @@
#define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x)
/* PARF_SLV_ADDR_SPACE_SIZE register value */
-#define SLV_ADDR_SPACE_SZ 0x10000000
+#define SLV_ADDR_SPACE_SZ 0x80000000
/* PARF_MHI_CLOCK_RESET_CTRL register fields */
#define AHB_CLK_EN BIT(0)
@@ -121,6 +131,9 @@
/* PARF_LTSSM register fields */
#define LTSSM_EN BIT(8)
+/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
+#define PARF_INT_ALL_LINK_UP BIT(13)
+
/* PARF_NO_SNOOP_OVERIDE register fields */
#define WR_NO_SNOOP_OVERIDE_EN BIT(1)
#define RD_NO_SNOOP_OVERIDE_EN BIT(3)
@@ -284,6 +297,11 @@ static int qcom_pcie_start_link(struct dw_pcie *pci)
{
struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
+ qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_16gt_lane_margining(pci);
+ }
+
/* Enable Link Training state machine */
if (pcie->cfg->ops->ltssm_enable)
pcie->cfg->ops->ltssm_enable(pcie);
@@ -325,6 +343,50 @@ static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
dw_pcie_dbi_ro_wr_dis(pci);
}
+static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR register is in CPU domain and require to
+ * be programmed with CPU physical address.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE);
+ }
+}
+
+static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are
+ * in CPU domain and require to be programmed with CPU
+ * physical addresses.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2);
+ writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2_HI);
+
+ if (pci->atu_phys_addr) {
+ writel(lower_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR);
+ writel(upper_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR_HI);
+ }
+
+ writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE_V2_HI);
+ }
+}
+
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
{
u32 val;
@@ -541,8 +603,7 @@ err_assert_reset:
static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
{
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_base(pcie);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
@@ -629,8 +690,7 @@ static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
val = readl(pcie->parf + PARF_SYS_CTRL);
@@ -812,13 +872,11 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val;
- writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
-
val = readl(pcie->parf + PARF_PHY_CTRL);
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
@@ -914,8 +972,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
val = readl(pcie->parf + PARF_SYS_CTRL);
@@ -1124,14 +1181,11 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
u32 val;
int i;
- writel(SLV_ADDR_SPACE_SZ,
- pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
-
val = readl(pcie->parf + PARF_PHY_CTRL);
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
@@ -1489,6 +1543,29 @@ static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
qcom_pcie_link_transition_count);
}
+static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie *pcie = data;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+ struct device *dev = pcie->pci->dev;
+ u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS);
+
+ writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR);
+
+ if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+ } else {
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
+ }
+
+ return IRQ_HANDLED;
+}
+
static int qcom_pcie_probe(struct platform_device *pdev)
{
const struct qcom_pcie_cfg *pcie_cfg;
@@ -1499,7 +1576,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
struct dw_pcie_rp *pp;
struct resource *res;
struct dw_pcie *pci;
- int ret;
+ int ret, irq;
+ char *name;
pcie_cfg = of_device_get_match_data(dev);
if (!pcie_cfg || !pcie_cfg->ops) {
@@ -1620,6 +1698,27 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_phy_exit;
}
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d",
+ pci_domain_nr(pp->bridge->bus));
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_host_deinit;
+ }
+
+ irq = platform_get_irq_byname_optional(pdev, "global");
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_global_irq_thread,
+ IRQF_ONESHOT, name, pcie);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret,
+ "Failed to request Global IRQ\n");
+ goto err_host_deinit;
+ }
+
+ writel_relaxed(PARF_INT_ALL_LINK_UP, pcie->parf + PARF_INT_ALL_MASK);
+ }
+
qcom_pcie_icc_opp_update(pcie);
if (pcie->mhi)
@@ -1627,6 +1726,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return 0;
+err_host_deinit:
+ dw_pcie_host_deinit(pp);
err_phy_exit:
phy_exit(pcie->phy);
err_pm_runtime_put:
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index f0f3ebd1a033..3a5511c3f7d9 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -141,10 +141,10 @@ static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)
}
/*
- * Require direct speed change with retrying here if the link_gen is
- * PCIe Gen2 or higher.
+ * Require direct speed change with retrying here if the max_link_speed
+ * is PCIe Gen2 or higher.
*/
- changes = min_not_zero(dw->link_gen, RCAR_MAX_LINK_SPEED) - 1;
+ changes = min_not_zero(dw->max_link_speed, RCAR_MAX_LINK_SPEED) - 1;
/*
* Since dw_pcie_setup_rc() sets it once, PCIe Gen2 will be trained.
@@ -606,7 +606,12 @@ static int rcar_gen4_pcie_reg_test_bit(struct rcar_gen4_pcie *rcar,
static int rcar_gen4_pcie_download_phy_firmware(struct rcar_gen4_pcie *rcar)
{
/* The check_addr values are magical numbers in the datasheet */
- const u32 check_addr[] = { 0x00101018, 0x00101118, 0x00101021, 0x00101121};
+ static const u32 check_addr[] = {
+ 0x00101018,
+ 0x00101118,
+ 0x00101021,
+ 0x00101121,
+ };
struct dw_pcie *dw = &rcar->dw;
const struct firmware *fw;
unsigned int i, timeout;
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 201dced209f0..ff986ced56b2 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -233,7 +233,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
}
if (of_property_read_bool(np, "st,pcie-is-gen1"))
- pci->link_gen = 1;
+ pci->max_link_speed = 1;
platform_set_drvdata(pdev, spear13xx_pcie);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 4bf7b433417a..c1394f2ab63f 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -177,17 +177,12 @@
#define N_FTS_VAL 52
#define FTS_VAL 52
-#define GEN3_EQ_CONTROL_OFF 0x8a8
-#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
-#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
-#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
-
#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
-#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
-#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
-#define AMBA_ERROR_RESPONSE_CRS_OKAY 0
-#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
-#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
+#define AMBA_ERROR_RESPONSE_RRS_SHIFT 3
+#define AMBA_ERROR_RESPONSE_RRS_MASK GENMASK(1, 0)
+#define AMBA_ERROR_RESPONSE_RRS_OKAY 0
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFFFFFF 1
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 2
#define MSIX_ADDR_MATCH_LOW_OFF 0x940
#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
@@ -861,9 +856,9 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
- val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC, 0x3ff);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -872,10 +867,10 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (pcie->of_data->gen4_preset_vec <<
- GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
- val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC,
+ pcie->of_data->gen4_preset_vec);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -907,11 +902,11 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- /* Enable as 0xFFFF0001 response for CRS */
+ /* Enable as 0xFFFF0001 response for RRS */
val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
- val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
- val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
- AMBA_ERROR_RESPONSE_CRS_SHIFT);
+ val &= ~(AMBA_ERROR_RESPONSE_RRS_MASK << AMBA_ERROR_RESPONSE_RRS_SHIFT);
+ val |= (AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 <<
+ AMBA_ERROR_RESPONSE_RRS_SHIFT);
dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
/* Clear Slot Clock Configuration bit if SRNS configuration */
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index 32951f7d6d6d..0e088e74155d 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -360,8 +360,8 @@ static struct irq_chip mobiveil_msi_irq_chip = {
};
static struct msi_domain_info mobiveil_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &mobiveil_msi_irq_chip,
};
@@ -378,16 +378,9 @@ static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip mobiveil_msi_bottom_irq_chip = {
.name = "Mobiveil MSI",
.irq_compose_msi_msg = mobiveil_compose_msi_msg,
- .irq_set_affinity = mobiveil_msi_set_affinity,
};
static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 8b3e1a079cf3..a598a98247ce 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -50,7 +50,7 @@
#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
#define PIO_COMPLETION_STATUS_OK 0
#define PIO_COMPLETION_STATUS_UR 1
-#define PIO_COMPLETION_STATUS_CRS 2
+#define PIO_COMPLETION_STATUS_RRS 2
#define PIO_COMPLETION_STATUS_CA 4
#define PIO_NON_POSTED_REQ BIT(10)
#define PIO_ERR_STATUS BIT(11)
@@ -262,7 +262,7 @@ enum {
#define MSI_IRQ_NUM 32
-#define CFG_RD_CRS_VAL 0xffff0001
+#define CFG_RD_RRS_VAL 0xffff0001
struct advk_pcie {
struct platform_device *pdev;
@@ -649,7 +649,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_pcie_train_link(pcie);
}
-static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_rrs, u32 *val)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
@@ -669,7 +669,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
* means a PIO write error, and for PIO read it is successful with
* a read value of 0xFFFFFFFF.
- * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
+ * 3) value Config Request Retry Status(RRS) of COMPLETION_STATUS(bit9:7)
* only means a PIO write error, and for PIO read it is successful
* with a read value of 0xFFFF0001.
* 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
@@ -694,10 +694,10 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
strcomp_status = "UR";
ret = -EOPNOTSUPP;
break;
- case PIO_COMPLETION_STATUS_CRS:
- if (allow_crs && val) {
- /* PCIe r4.0, sec 2.3.2, says:
- * If CRS Software Visibility is enabled:
+ case PIO_COMPLETION_STATUS_RRS:
+ if (allow_rrs && val) {
+ /* PCIe r6.0, sec 2.3.2, says:
+ * If Configuration RRS Software Visibility is enabled:
* For a Configuration Read Request that includes both
* bytes of the Vendor ID field of a device Function's
* Configuration Space Header, the Root Complex must
@@ -706,22 +706,22 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* all '1's for any additional bytes included in the
* request.
*
- * So CRS in this case is not an error status.
+ * So RRS in this case is not an error status.
*/
- *val = CFG_RD_CRS_VAL;
+ *val = CFG_RD_RRS_VAL;
strcomp_status = NULL;
ret = 0;
break;
}
- /* PCIe r4.0, sec 2.3.2, says:
- * If CRS Software Visibility is not enabled, the Root Complex
+ /* PCIe r6.0, sec 2.3.2, says:
+ * If RRS Software Visibility is not enabled, the Root Complex
* must re-issue the Configuration Request as a new Request.
- * If CRS Software Visibility is enabled: For a Configuration
+ * If RRS Software Visibility is enabled: For a Configuration
* Write Request or for any other Configuration Read Request,
* the Root Complex must re-issue the Configuration Request as
* a new Request.
* A Root Complex implementation may choose to limit the number
- * of Configuration Request/CRS Completion Status loops before
+ * of Configuration Request/RRS Completion Status loops before
* determining that something is wrong with the target of the
* Request and taking appropriate action, e.g., complete the
* Request to the host as a failed transaction.
@@ -729,7 +729,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* So return -EAGAIN and caller (pci-aardvark.c driver) will
* re-issue request again up to the PIO_RETRY_CNT retries.
*/
- strcomp_status = "CRS";
+ strcomp_status = "RRS";
ret = -EAGAIN;
break;
case PIO_COMPLETION_STATUS_CA:
@@ -920,8 +920,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: {
u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
- /* Only emulation of PMEIE and CRSSVE bits is provided */
- rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
+ /* Only emulation of PMEIE and RRS_SVE bits is provided */
+ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_RRS_SVE;
bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
break;
}
@@ -1075,7 +1075,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
/* Indicates supports for Completion Retry Status */
- bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_RRS_SV);
bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
@@ -1141,7 +1141,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
{
struct advk_pcie *pcie = bus->sysdata;
int retry_count;
- bool allow_crs;
+ bool allow_rrs;
u32 reg;
int ret;
@@ -1153,16 +1153,16 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
size, val);
/*
- * Completion Retry Status is possible to return only when reading all
- * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
- * CRSSVE flag on Root Bridge is enabled.
+ * Configuration Request Retry Status (RRS) is possible to return
+ * only when reading both bytes from PCI_VENDOR_ID at once and
+ * RRS_SVE flag on Root Port is enabled.
*/
- allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
+ allow_rrs = (where == PCI_VENDOR_ID) && (size >= 2) &&
(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
- PCI_EXP_RTCTL_CRSSVE);
+ PCI_EXP_RTCTL_RRS_SVE);
if (advk_pcie_pio_is_running(pcie))
- goto try_crs;
+ goto try_rrs;
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -1189,12 +1189,12 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
ret = advk_pcie_wait_pio(pcie);
if (ret < 0)
- goto try_crs;
+ goto try_rrs;
retry_count += ret;
/* Check PIO status and get the read result */
- ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
+ ret = advk_pcie_check_pio_status(pcie, allow_rrs, val);
} while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
if (ret < 0)
@@ -1207,13 +1207,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return PCIBIOS_SUCCESSFUL;
-try_crs:
+try_rrs:
/*
- * If it is possible, return Completion Retry Status so that caller
- * tries to issue the request again instead of failing.
+ * If it is possible, return Configuration Request Retry Status so
+ * that caller tries to issue the request again instead of failing.
*/
- if (allow_crs) {
- *val = CFG_RD_CRS_VAL;
+ if (allow_rrs) {
+ *val = CFG_RD_RRS_VAL;
return PCIBIOS_SUCCESSFUL;
}
@@ -1304,12 +1304,6 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
msg->data = data->hwirq;
}
-static int advk_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void advk_msi_irq_mask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
@@ -1353,7 +1347,6 @@ static void advk_msi_top_irq_unmask(struct irq_data *d)
static struct irq_chip advk_msi_bottom_irq_chip = {
.name = "MSI",
.irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
- .irq_set_affinity = advk_msi_set_affinity,
.irq_mask = advk_msi_irq_mask,
.irq_unmask = advk_msi_irq_unmask,
};
@@ -1451,7 +1444,8 @@ static struct irq_chip advk_msi_irq_chip = {
static struct msi_domain_info advk_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI |
+ MSI_FLAG_PCI_MSIX,
.chip = &advk_msi_irq_chip,
};
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 038d974a318e..d7517c3976e7 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -1629,11 +1629,6 @@ static void tegra_msi_irq_unmask(struct irq_data *d)
spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
@@ -1648,7 +1643,6 @@ static struct irq_chip tegra_msi_bottom_chip = {
.irq_ack = tegra_msi_irq_ack,
.irq_mask = tegra_msi_irq_mask,
.irq_unmask = tegra_msi_irq_unmask,
- .irq_set_affinity = tegra_msi_set_affinity,
.irq_compose_msi_msg = tegra_compose_msi_msg,
};
@@ -1697,8 +1691,8 @@ static const struct irq_domain_ops tegra_msi_domain_ops = {
};
static struct msi_domain_info tegra_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &tegra_msi_top_chip,
};
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 8e457fa450a2..1e2ebbfa36d1 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -171,17 +171,17 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
/*
* The v1 controller has a bug in its Configuration Request Retry
- * Status (CRS) logic: when CRS Software Visibility is enabled and
+ * Status (RRS) logic: when RRS Software Visibility is enabled and
* we read the Vendor and Device ID of a non-existent device, the
* controller fabricates return data of 0xFFFF0001 ("device exists
* but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE)
* ("device does not exist"). This causes the PCI core to retry
* the read until it times out. Avoid this by not claiming to
- * support CRS SV.
+ * support RRS SV.
*/
if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
if (size <= 2)
*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 16336a525c16..e36a6e158d23 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -81,8 +81,8 @@ static struct irq_chip altera_msi_irq_chip = {
};
static struct msi_domain_info altera_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &altera_msi_irq_chip,
};
@@ -99,16 +99,9 @@ static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int altera_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip altera_msi_bottom_irq_chip = {
.name = "Altera MSI",
.irq_compose_msi_msg = altera_compose_msi_msg,
- .irq_set_affinity = altera_msi_set_affinity,
};
static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index ef73baefaeb9..650b2dd81c48 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -55,12 +55,11 @@
#define TLP_READ_TAG 0x1d
#define TLP_WRITE_TAG 0x10
#define RP_DEVFN 0
-#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_CFG_DW0(pcie, cfg) \
(((cfg) << 24) | \
TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(pcie, tag, be) \
- (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
+ (((PCI_DEVID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
(((bus) << 24) | ((devfn) << 16) | (offset))
#define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index c08683febdd4..9321280f6edb 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -75,15 +75,19 @@
#define PCIE_MEM_WIN0_HI(win) \
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
+/*
+ * NOTE: You may see the term "BAR" in a number of register names used by
+ * this driver. The term is an artifact of when the HW core was an
+ * endpoint device (EP). Now it is a root complex (RC) and anywhere a
+ * register has the term "BAR" it is related to an inbound window.
+ */
+
+#define PCIE_BRCM_MAX_INBOUND_WINS 16
#define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c
#define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f
-#define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034
-#define PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f
-#define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038
+#define PCIE_MISC_RC_BAR4_CONFIG_LO 0x40d4
-#define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c
-#define PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f
#define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044
#define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048
@@ -122,7 +126,6 @@
#define PCIE_MEM_WIN0_LIMIT_HI(win) \
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
-#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK 0x200000
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
@@ -131,9 +134,13 @@
(PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \
PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK)
-#define PCIE_INTR2_CPU_BASE 0x4300
+#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP 0x40ac
+#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK BIT(0)
+#define PCIE_MISC_UBUS_BAR4_CONFIG_REMAP 0x410c
+
#define PCIE_MSI_INTR2_BASE 0x4500
-/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
+
+/* Offsets from INTR2_CPU and MSI_INTR2 BASE offsets */
#define MSI_INT_STATUS 0x0
#define MSI_INT_CLR 0x8
#define MSI_INT_MASK_SET 0x10
@@ -184,9 +191,11 @@
#define SSC_STATUS_PLL_LOCK_MASK 0x800
#define PCIE_BRCM_MAX_MEMC 3
-#define IDX_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_INDEX])
-#define DATA_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_DATA])
-#define PCIE_RGR1_SW_INIT_1(pcie) (pcie->reg_offsets[RGR1_SW_INIT_1])
+#define IDX_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_INDEX])
+#define DATA_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_DATA])
+#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->reg_offsets[RGR1_SW_INIT_1])
+#define HARD_DEBUG(pcie) ((pcie)->reg_offsets[PCIE_HARD_DEBUG])
+#define INTR2_CPU_BASE(pcie) ((pcie)->reg_offsets[PCIE_INTR2_CPU_BASE])
/* Rescal registers */
#define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
@@ -205,27 +214,33 @@ enum {
RGR1_SW_INIT_1,
EXT_CFG_INDEX,
EXT_CFG_DATA,
+ PCIE_HARD_DEBUG,
+ PCIE_INTR2_CPU_BASE,
};
-enum {
- RGR1_SW_INIT_1_INIT_MASK,
- RGR1_SW_INIT_1_INIT_SHIFT,
-};
-
-enum pcie_type {
+enum pcie_soc_base {
GENERIC,
- BCM7425,
- BCM7435,
+ BCM2711,
BCM4908,
BCM7278,
- BCM2711,
+ BCM7425,
+ BCM7435,
+ BCM7712,
+};
+
+struct inbound_win {
+ u64 size;
+ u64 pci_offset;
+ u64 cpu_addr;
};
struct pcie_cfg_data {
const int *offsets;
- const enum pcie_type type;
- void (*perst_set)(struct brcm_pcie *pcie, u32 val);
- void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ const enum pcie_soc_base soc_base;
+ const bool has_phy;
+ u8 num_inbound_wins;
+ int (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
};
struct subdev_regulators {
@@ -262,21 +277,25 @@ struct brcm_pcie {
u64 msi_target_addr;
struct brcm_msi *msi;
const int *reg_offsets;
- enum pcie_type type;
+ enum pcie_soc_base soc_base;
struct reset_control *rescal;
struct reset_control *perst_reset;
+ struct reset_control *bridge_reset;
+ struct reset_control *swinit_reset;
int num_memc;
u64 memc_size[PCIE_BRCM_MAX_MEMC];
u32 hw_rev;
- void (*perst_set)(struct brcm_pcie *pcie, u32 val);
- void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ int (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
struct subdev_regulators *sr;
bool ep_wakeup_capable;
+ bool has_phy;
+ u8 num_inbound_wins;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
{
- return pcie->type == BCM7435 || pcie->type == BCM7425;
+ return pcie->soc_base == BCM7435 || pcie->soc_base == BCM7425;
}
/*
@@ -394,7 +413,7 @@ static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
}
static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
- unsigned int win, u64 cpu_addr,
+ u8 win, u64 cpu_addr,
u64 pcie_addr, u64 size)
{
u32 cpu_addr_mb_high, limit_addr_mb_high;
@@ -445,8 +464,8 @@ static struct irq_chip brcm_msi_irq_chip = {
};
static struct msi_domain_info brcm_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &brcm_msi_irq_chip,
};
@@ -484,12 +503,6 @@ static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
}
-static int brcm_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void brcm_msi_ack_irq(struct irq_data *data)
{
struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
@@ -502,7 +515,6 @@ static void brcm_msi_ack_irq(struct irq_data *data)
static struct irq_chip brcm_msi_bottom_irq_chip = {
.name = "BRCM STB MSI",
.irq_compose_msi_msg = brcm_msi_compose_msi_msg,
- .irq_set_affinity = brcm_msi_set_affinity,
.irq_ack = brcm_msi_ack_irq,
};
@@ -649,7 +661,7 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
if (msi->legacy) {
- msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
+ msi->intr_base = msi->base + INTR2_CPU_BASE(pcie);
msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
msi->legacy_shift = 24;
} else {
@@ -730,17 +742,33 @@ static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
return base + DATA_ADDR(pcie);
}
-static void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
{
- u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
+ u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
+ int ret = 0;
+
+ if (pcie->bridge_reset) {
+ if (val)
+ ret = reset_control_assert(pcie->bridge_reset);
+ else
+ ret = reset_control_deassert(pcie->bridge_reset);
+
+ if (ret)
+ dev_err(pcie->dev, "failed to %s 'bridge' reset, err=%d\n",
+ val ? "assert" : "deassert", ret);
+
+ return ret;
+ }
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
tmp = (tmp & ~mask) | ((val << shift) & mask);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return ret;
}
-static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
@@ -748,20 +776,29 @@ static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
tmp = (tmp & ~mask) | ((val << shift) & mask);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return 0;
}
-static void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
{
+ int ret;
+
if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n"))
- return;
+ return -EINVAL;
if (val)
- reset_control_assert(pcie->perst_reset);
+ ret = reset_control_assert(pcie->perst_reset);
else
- reset_control_deassert(pcie->perst_reset);
+ ret = reset_control_deassert(pcie->perst_reset);
+
+ if (ret)
+ dev_err(pcie->dev, "failed to %s 'perst' reset, err=%d\n",
+ val ? "assert" : "deassert", ret);
+ return ret;
}
-static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
@@ -769,34 +806,77 @@ static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL);
+
+ return 0;
}
-static void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return 0;
}
-static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
- u64 *rc_bar2_size,
- u64 *rc_bar2_offset)
+static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size,
+ u64 cpu_addr, u64 pci_offset)
+{
+ b->size = size;
+ b->cpu_addr = cpu_addr;
+ b->pci_offset = pci_offset;
+ (*count)++;
+}
+
+static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
+ struct inbound_win inbound_wins[])
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ u64 pci_offset, cpu_addr, size = 0, tot_size = 0;
struct resource_entry *entry;
struct device *dev = pcie->dev;
u64 lowest_pcie_addr = ~(u64)0;
int ret, i = 0;
- u64 size = 0;
+ u8 n = 0;
+
+ /*
+ * The HW registers (and PCIe) use order-1 numbering for BARs. As such,
+ * we have inbound_wins[0] unused and BAR1 starts at inbound_wins[1].
+ */
+ struct inbound_win *b_begin = &inbound_wins[1];
+ struct inbound_win *b = b_begin;
+
+ /*
+ * STB chips beside 7712 disable the first inbound window default.
+ * Rather being mapped to system memory it is mapped to the
+ * internal registers of the SoC. This feature is deprecated, has
+ * security considerations, and is not implemented in our modern
+ * SoCs.
+ */
+ if (pcie->soc_base != BCM7712)
+ add_inbound_win(b++, &n, 0, 0, 0);
resource_list_for_each_entry(entry, &bridge->dma_ranges) {
- u64 pcie_beg = entry->res->start - entry->offset;
+ u64 pcie_start = entry->res->start - entry->offset;
+ u64 cpu_start = entry->res->start;
+
+ size = resource_size(entry->res);
+ tot_size += size;
+ if (pcie_start < lowest_pcie_addr)
+ lowest_pcie_addr = pcie_start;
+ /*
+ * 7712 and newer chips may have many BARs, with each
+ * offering a non-overlapping viewport to system memory.
+ * That being said, each BARs size must still be a power of
+ * two.
+ */
+ if (pcie->soc_base == BCM7712)
+ add_inbound_win(b++, &n, size, cpu_start, pcie_start);
- size += entry->res->end - entry->res->start + 1;
- if (pcie_beg < lowest_pcie_addr)
- lowest_pcie_addr = pcie_beg;
+ if (n > pcie->num_inbound_wins)
+ break;
}
if (lowest_pcie_addr == ~(u64)0) {
@@ -804,13 +884,20 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
return -EINVAL;
}
+ /*
+ * 7712 and newer chips do not have an internal memory mapping system
+ * that enables multiple memory controllers. As such, it can return
+ * now w/o doing special configuration.
+ */
+ if (pcie->soc_base == BCM7712)
+ return n;
+
ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
PCIE_BRCM_MAX_MEMC);
-
if (ret <= 0) {
/* Make an educated guess */
pcie->num_memc = 1;
- pcie->memc_size[0] = 1ULL << fls64(size - 1);
+ pcie->memc_size[0] = 1ULL << fls64(tot_size - 1);
} else {
pcie->num_memc = ret;
}
@@ -819,10 +906,15 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
for (i = 0, size = 0; i < pcie->num_memc; i++)
size += pcie->memc_size[i];
- /* System memory starts at this address in PCIe-space */
- *rc_bar2_offset = lowest_pcie_addr;
- /* The sum of all memc views must also be a power of 2 */
- *rc_bar2_size = 1ULL << fls64(size - 1);
+ /* Our HW mandates that the window size must be a power of 2 */
+ size = 1ULL << fls64(size - 1);
+
+ /*
+ * For STB chips, the BAR2 cpu_addr is hardwired to the start
+ * of system memory, so we set it to 0.
+ */
+ cpu_addr = 0;
+ pci_offset = lowest_pcie_addr;
/*
* We validate the inbound memory view even though we should trust
@@ -857,44 +949,119 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
* outbound memory @ 3GB). So instead it will start at the 1x
* multiple of its size
*/
- if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
- (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
- dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
- *rc_bar2_size, *rc_bar2_offset);
+ if (!size || (pci_offset & (size - 1)) ||
+ (pci_offset < SZ_4G && pci_offset > SZ_2G)) {
+ dev_err(dev, "Invalid inbound_win2_offset/size: size 0x%llx, off 0x%llx\n",
+ size, pci_offset);
return -EINVAL;
}
- return 0;
+ /* Enable inbound window 2, the main inbound window for STB chips */
+ add_inbound_win(b++, &n, size, cpu_addr, pci_offset);
+
+ /*
+ * Disable inbound window 3. On some chips presents the same
+ * window as #2 but the data appears in a settable endianness.
+ */
+ add_inbound_win(b++, &n, 0, 0, 0);
+
+ return n;
+}
+
+static u32 brcm_bar_reg_offset(int bar)
+{
+ if (bar <= 3)
+ return PCIE_MISC_RC_BAR1_CONFIG_LO + 8 * (bar - 1);
+ else
+ return PCIE_MISC_RC_BAR4_CONFIG_LO + 8 * (bar - 4);
+}
+
+static u32 brcm_ubus_reg_offset(int bar)
+{
+ if (bar <= 3)
+ return PCIE_MISC_UBUS_BAR1_CONFIG_REMAP + 8 * (bar - 1);
+ else
+ return PCIE_MISC_UBUS_BAR4_CONFIG_REMAP + 8 * (bar - 4);
+}
+
+static void set_inbound_win_registers(struct brcm_pcie *pcie,
+ const struct inbound_win *inbound_wins,
+ u8 num_inbound_wins)
+{
+ void __iomem *base = pcie->base;
+ int i;
+
+ for (i = 1; i <= num_inbound_wins; i++) {
+ u64 pci_offset = inbound_wins[i].pci_offset;
+ u64 cpu_addr = inbound_wins[i].cpu_addr;
+ u64 size = inbound_wins[i].size;
+ u32 reg_offset = brcm_bar_reg_offset(i);
+ u32 tmp = lower_32_bits(pci_offset);
+
+ u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(size),
+ PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK);
+
+ /* Write low */
+ writel_relaxed(tmp, base + reg_offset);
+ /* Write high */
+ writel_relaxed(upper_32_bits(pci_offset), base + reg_offset + 4);
+
+ /*
+ * Most STB chips:
+ * Do nothing.
+ * 7712:
+ * All of their BARs need to be set.
+ */
+ if (pcie->soc_base == BCM7712) {
+ /* BUS remap register settings */
+ reg_offset = brcm_ubus_reg_offset(i);
+ tmp = lower_32_bits(cpu_addr) & ~0xfff;
+ tmp |= PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK;
+ writel_relaxed(tmp, base + reg_offset);
+ tmp = upper_32_bits(cpu_addr);
+ writel_relaxed(tmp, base + reg_offset + 4);
+ }
+ }
}
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
- u64 rc_bar2_offset, rc_bar2_size;
+ struct inbound_win inbound_wins[PCIE_BRCM_MAX_INBOUND_WINS];
void __iomem *base = pcie->base;
struct pci_host_bridge *bridge;
struct resource_entry *entry;
u32 tmp, burst, aspm_support;
- int num_out_wins = 0;
- int ret, memc;
+ u8 num_out_wins = 0;
+ int num_inbound_wins = 0;
+ int memc, ret;
/* Reset the bridge */
- pcie->bridge_sw_init_set(pcie, 1);
+ ret = pcie->bridge_sw_init_set(pcie, 1);
+ if (ret)
+ return ret;
/* Ensure that PERST# is asserted; some bootloaders may deassert it. */
- if (pcie->type == BCM2711)
- pcie->perst_set(pcie, 1);
+ if (pcie->soc_base == BCM2711) {
+ ret = pcie->perst_set(pcie, 1);
+ if (ret) {
+ pcie->bridge_sw_init_set(pcie, 0);
+ return ret;
+ }
+ }
usleep_range(100, 200);
/* Take the bridge out of reset */
- pcie->bridge_sw_init_set(pcie, 0);
+ ret = pcie->bridge_sw_init_set(pcie, 0);
+ if (ret)
+ return ret;
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
if (is_bmips(pcie))
tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
else
tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* Wait for SerDes to be stable */
usleep_range(100, 200);
@@ -905,9 +1072,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
*/
if (is_bmips(pcie))
burst = 0x1; /* 256 bytes */
- else if (pcie->type == BCM2711)
+ else if (pcie->soc_base == BCM2711)
burst = 0x0; /* 128 bytes */
- else if (pcie->type == BCM7278)
+ else if (pcie->soc_base == BCM7278)
burst = 0x3; /* 512 bytes */
else
burst = 0x2; /* 512 bytes */
@@ -924,17 +1091,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK);
writel(tmp, base + PCIE_MISC_MISC_CTRL);
- ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
- &rc_bar2_offset);
- if (ret)
- return ret;
+ num_inbound_wins = brcm_pcie_get_inbound_wins(pcie, inbound_wins);
+ if (num_inbound_wins < 0)
+ return num_inbound_wins;
- tmp = lower_32_bits(rc_bar2_offset);
- u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
- PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
- writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
- writel(upper_32_bits(rc_bar2_offset),
- base + PCIE_MISC_RC_BAR2_CONFIG_HI);
+ set_inbound_win_registers(pcie, inbound_wins, num_inbound_wins);
+
+ if (!brcm_pcie_rc_mode(pcie)) {
+ dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
+ return -EINVAL;
+ }
tmp = readl(base + PCIE_MISC_MISC_CTRL);
for (memc = 0; memc < pcie->num_memc; memc++) {
@@ -956,25 +1122,12 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
* 4GB or when the inbound area is smaller than 4GB (taking into
* account the rounding-up we're forced to perform).
*/
- if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
+ if (inbound_wins[2].pci_offset >= SZ_4G ||
+ (inbound_wins[2].size + inbound_wins[2].pci_offset) < SZ_4G)
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
else
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
- if (!brcm_pcie_rc_mode(pcie)) {
- dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
- return -EINVAL;
- }
-
- /* disable the PCIe->GISB memory window (RC_BAR1) */
- tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
- tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
- writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
-
- /* disable the PCIe->SCB memory window (RC_BAR3) */
- tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
- tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
- writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
/* Don't advertise L0s capability if 'aspm-no-l0s' */
aspm_support = PCIE_LINK_STATE_L1;
@@ -1025,7 +1178,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
num_out_wins++;
}
- /* PCIe->SCB endian mode for BAR */
+ /* PCIe->SCB endian mode for inbound window */
tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
@@ -1045,6 +1198,10 @@ static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie)
const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8;
u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
+ /* 7712 does not have this (RGR1) timer */
+ if (pcie->soc_base == BCM7712)
+ return;
+
/* Each unit in timeout register is 1/216,000,000 seconds */
writel(216 * timeout_us, pcie->base + REG_OFFSET);
}
@@ -1063,7 +1220,7 @@ static void brcm_config_clkreq(struct brcm_pcie *pcie)
}
/* Start out assuming safe mode (both mode bits cleared) */
- clkreq_cntl = readl(pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ clkreq_cntl = readl(pcie->base + HARD_DEBUG(pcie));
clkreq_cntl &= ~PCIE_CLKREQ_MASK;
if (strcmp(mode, "no-l1ss") == 0) {
@@ -1106,7 +1263,7 @@ static void brcm_config_clkreq(struct brcm_pcie *pcie)
dev_err(pcie->dev, err_msg);
mode = "safe";
}
- writel(clkreq_cntl, pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(clkreq_cntl, pcie->base + HARD_DEBUG(pcie));
dev_info(pcie->dev, "clkreq-mode set to %s\n", mode);
}
@@ -1120,7 +1277,9 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
int ret, i;
/* Unassert the fundamental reset */
- pcie->perst_set(pcie, 0);
+ ret = pcie->perst_set(pcie, 0);
+ if (ret)
+ return ret;
/*
* Wait for 100ms after PERST# deassertion; see PCIe CEM specification
@@ -1304,23 +1463,25 @@ static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
static inline int brcm_phy_start(struct brcm_pcie *pcie)
{
- return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
+ return pcie->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
}
static inline int brcm_phy_stop(struct brcm_pcie *pcie)
{
- return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
+ return pcie->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
}
-static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
+static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
{
void __iomem *base = pcie->base;
- int tmp;
+ int tmp, ret;
if (brcm_pcie_link_up(pcie))
brcm_pcie_enter_l23(pcie);
/* Assert fundamental reset */
- pcie->perst_set(pcie, 1);
+ ret = pcie->perst_set(pcie, 1);
+ if (ret)
+ return ret;
/* Deassert request for L23 in case it was asserted */
tmp = readl(base + PCIE_MISC_PCIE_CTRL);
@@ -1328,12 +1489,14 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
writel(tmp, base + PCIE_MISC_PCIE_CTRL);
/* Turn off SerDes */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* Shutdown PCIe bridge */
- pcie->bridge_sw_init_set(pcie, 1);
+ ret = pcie->bridge_sw_init_set(pcie, 1);
+
+ return ret;
}
static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
@@ -1351,9 +1514,12 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- int ret;
+ int ret, rret;
+
+ ret = brcm_pcie_turn_off(pcie);
+ if (ret)
+ return ret;
- brcm_pcie_turn_off(pcie);
/*
* If brcm_phy_stop() returns an error, just dev_err(). If we
* return the error it will cause the suspend to fail and this is a
@@ -1382,7 +1548,10 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
pcie->sr->supplies);
if (ret) {
dev_err(dev, "Could not turn off regulators\n");
- reset_control_reset(pcie->rescal);
+ rret = reset_control_reset(pcie->rescal);
+ if (rret)
+ dev_err(dev, "failed to reset 'rascal' controller ret=%d\n",
+ rret);
return ret;
}
}
@@ -1397,7 +1566,7 @@ static int brcm_pcie_resume_noirq(struct device *dev)
struct brcm_pcie *pcie = dev_get_drvdata(dev);
void __iomem *base;
u32 tmp;
- int ret;
+ int ret, rret;
base = pcie->base;
ret = clk_prepare_enable(pcie->clk);
@@ -1416,9 +1585,9 @@ static int brcm_pcie_resume_noirq(struct device *dev)
pcie->bridge_sw_init_set(pcie, 0);
/* SERDES_IDDQ = 0 */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* wait for serdes to be stable */
udelay(100);
@@ -1459,7 +1628,9 @@ err_regulator:
if (pcie->sr)
regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_reset:
- reset_control_rearm(pcie->rescal);
+ rret = reset_control_rearm(pcie->rescal);
+ if (rret)
+ dev_err(pcie->dev, "failed to rearm 'rescal' reset, err=%d\n", rret);
err_disable_clk:
clk_disable_unprepare(pcie->clk);
return ret;
@@ -1487,74 +1658,111 @@ static void brcm_pcie_remove(struct platform_device *pdev)
}
static const int pcie_offsets[] = {
- [RGR1_SW_INIT_1] = 0x9210,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
};
-static const int pcie_offsets_bmips_7425[] = {
- [RGR1_SW_INIT_1] = 0x8010,
- [EXT_CFG_INDEX] = 0x8300,
- [EXT_CFG_DATA] = 0x8304,
+static const int pcie_offsets_bcm7278[] = {
+ [RGR1_SW_INIT_1] = 0xc010,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
};
-static const struct pcie_cfg_data generic_cfg = {
- .offsets = pcie_offsets,
- .type = GENERIC,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+static const int pcie_offsets_bcm7425[] = {
+ [RGR1_SW_INIT_1] = 0x8010,
+ [EXT_CFG_INDEX] = 0x8300,
+ [EXT_CFG_DATA] = 0x8304,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
};
-static const struct pcie_cfg_data bcm7425_cfg = {
- .offsets = pcie_offsets_bmips_7425,
- .type = BCM7425,
+static const int pcie_offsets_bcm7712[] = {
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+ [PCIE_HARD_DEBUG] = 0x4304,
+ [PCIE_INTR2_CPU_BASE] = 0x4400,
+};
+
+static const struct pcie_cfg_data generic_cfg = {
+ .offsets = pcie_offsets,
+ .soc_base = GENERIC,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm7435_cfg = {
+static const struct pcie_cfg_data bcm2711_cfg = {
.offsets = pcie_offsets,
- .type = BCM7435,
+ .soc_base = BCM2711,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
};
static const struct pcie_cfg_data bcm4908_cfg = {
.offsets = pcie_offsets,
- .type = BCM4908,
+ .soc_base = BCM4908,
.perst_set = brcm_pcie_perst_set_4908,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const int pcie_offset_bcm7278[] = {
- [RGR1_SW_INIT_1] = 0xc010,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ .num_inbound_wins = 3,
};
static const struct pcie_cfg_data bcm7278_cfg = {
- .offsets = pcie_offset_bcm7278,
- .type = BCM7278,
+ .offsets = pcie_offsets_bcm7278,
+ .soc_base = BCM7278,
.perst_set = brcm_pcie_perst_set_7278,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm2711_cfg = {
+static const struct pcie_cfg_data bcm7425_cfg = {
+ .offsets = pcie_offsets_bcm7425,
+ .soc_base = BCM7425,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7435_cfg = {
.offsets = pcie_offsets,
- .type = BCM2711,
+ .soc_base = BCM7435,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7216_cfg = {
+ .offsets = pcie_offsets_bcm7278,
+ .soc_base = BCM7278,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+ .has_phy = true,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7712_cfg = {
+ .offsets = pcie_offsets_bcm7712,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .soc_base = BCM7712,
+ .num_inbound_wins = 10,
};
static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg },
{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
- { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
- { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
- { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
{ .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg },
+ { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
+ { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7712-pcie", .data = &bcm7712_cfg },
{},
};
@@ -1596,9 +1804,11 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie->dev = &pdev->dev;
pcie->np = np;
pcie->reg_offsets = data->offsets;
- pcie->type = data->type;
+ pcie->soc_base = data->soc_base;
pcie->perst_set = data->perst_set;
pcie->bridge_sw_init_set = data->bridge_sw_init_set;
+ pcie->has_phy = data->has_phy;
+ pcie->num_inbound_wins = data->num_inbound_wins;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
@@ -1613,25 +1823,52 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
- ret = clk_prepare_enable(pcie->clk);
- if (ret) {
- dev_err(&pdev->dev, "could not enable clock\n");
- return ret;
- }
pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
- if (IS_ERR(pcie->rescal)) {
- clk_disable_unprepare(pcie->clk);
+ if (IS_ERR(pcie->rescal))
return PTR_ERR(pcie->rescal);
- }
+
pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst");
- if (IS_ERR(pcie->perst_reset)) {
- clk_disable_unprepare(pcie->clk);
+ if (IS_ERR(pcie->perst_reset))
return PTR_ERR(pcie->perst_reset);
+
+ pcie->bridge_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "bridge");
+ if (IS_ERR(pcie->bridge_reset))
+ return PTR_ERR(pcie->bridge_reset);
+
+ pcie->swinit_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "swinit");
+ if (IS_ERR(pcie->swinit_reset))
+ return PTR_ERR(pcie->swinit_reset);
+
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "could not enable clock\n");
+
+ pcie->bridge_sw_init_set(pcie, 0);
+
+ if (pcie->swinit_reset) {
+ ret = reset_control_assert(pcie->swinit_reset);
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "could not assert reset 'swinit'\n");
+ }
+
+ /* HW team recommends 1us for proper sync and propagation of reset */
+ udelay(1);
+
+ ret = reset_control_deassert(pcie->swinit_reset);
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "could not de-assert reset 'swinit'\n");
+ }
}
ret = reset_control_reset(pcie->rescal);
- if (ret)
- dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret, "failed to deassert 'rescal'\n");
+ }
ret = brcm_phy_start(pcie);
if (ret) {
@@ -1645,7 +1882,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
goto fail;
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
- if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
+ if (pcie->soc_base == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
ret = -ENODEV;
goto fail;
@@ -1660,7 +1897,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
}
}
- bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
+ bridge->ops = pcie->soc_base == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
@@ -1678,6 +1915,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
fail:
__brcm_pcie_remove(pcie);
+
return ret;
}
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 97f739a2c9f8..22134e95574b 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -54,7 +54,7 @@
#define CFG_RD_SUCCESS 0
#define CFG_RD_UR 1
-#define CFG_RD_CRS 2
+#define CFG_RD_RRS 2
#define CFG_RD_CA 3
#define CFG_RETRY_STATUS 0xffff0001
#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
@@ -485,31 +485,31 @@ static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
u32 status;
/*
- * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
+ * As per PCIe r6.0, sec 2.3.2, Config RRS Software Visibility only
* affects config reads of the Vendor ID. For config writes or any
* other config reads, the Root may automatically reissue the
* configuration request again as a new request.
*
* For config reads, this hardware returns CFG_RETRY_STATUS data
- * when it receives a CRS completion, regardless of the address of
- * the read or the CRS Software Visibility Enable bit. As a
+ * when it receives a RRS completion, regardless of the address of
+ * the read or the RRS Software Visibility Enable bit. As a
* partial workaround for this, we retry in software any read that
* returns CFG_RETRY_STATUS.
*
* Note that a non-Vendor ID config register may have a value of
* CFG_RETRY_STATUS. If we read that, we can't distinguish it from
- * a CRS completion, so we will incorrectly retry the read and
+ * a RRS completion, so we will incorrectly retry the read and
* eventually return the wrong data (0xffffffff).
*/
data = readl(cfg_data_p);
while (data == CFG_RETRY_STATUS && timeout--) {
/*
- * CRS state is set in CFG_RD status register
+ * RRS state is set in CFG_RD status register
* This will handle the case where CFG_RETRY_STATUS is
* valid config data.
*/
status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
- if (status != CFG_RD_CRS)
+ if (status != CFG_RD_RRS)
return data;
udelay(1);
@@ -556,8 +556,8 @@ static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
break;
case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
- /* Don't advertise CRS SV support */
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ /* Don't advertise RRS SV support */
+ *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
break;
default:
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index b7e8e24f6a40..66ce4b5d309b 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -6,7 +6,9 @@
* Author: Jianjun Wang <jianjun.wang@mediatek.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
@@ -15,6 +17,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/msi.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -29,6 +33,12 @@
#define PCI_CLASS(class) (class << 8)
#define PCIE_RC_MODE BIT(0)
+#define PCIE_EQ_PRESET_01_REG 0x100
+#define PCIE_VAL_LN0_DOWNSTREAM GENMASK(6, 0)
+#define PCIE_VAL_LN0_UPSTREAM GENMASK(14, 8)
+#define PCIE_VAL_LN1_DOWNSTREAM GENMASK(22, 16)
+#define PCIE_VAL_LN1_UPSTREAM GENMASK(30, 24)
+
#define PCIE_CFGNUM_REG 0x140
#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
@@ -68,6 +78,14 @@
#define PCIE_MSI_SET_ENABLE_REG 0x190
#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
+#define PCIE_PIPE4_PIE8_REG 0x338
+#define PCIE_K_FINETUNE_MAX GENMASK(5, 0)
+#define PCIE_K_FINETUNE_ERR GENMASK(7, 6)
+#define PCIE_K_PRESET_TO_USE GENMASK(18, 8)
+#define PCIE_K_PHYPARAM_QUERY BIT(19)
+#define PCIE_K_QUERY_TIMEOUT BIT(20)
+#define PCIE_K_PRESET_TO_USE_16G GENMASK(31, 21)
+
#define PCIE_MSI_SET_BASE_REG 0xc00
#define PCIE_MSI_SET_OFFSET 0x10
#define PCIE_MSI_SET_STATUS_OFFSET 0x04
@@ -100,6 +118,26 @@
#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
+#define MAX_NUM_PHY_RESETS 3
+
+/* Time in ms needed to complete PCIe reset on EN7581 SoC */
+#define PCIE_EN7581_RESET_TIME_MS 100
+
+struct mtk_gen3_pcie;
+
+/**
+ * struct mtk_gen3_pcie_pdata - differentiate between host generations
+ * @power_up: pcie power_up callback
+ * @phy_resets: phy reset lines SoC data.
+ */
+struct mtk_gen3_pcie_pdata {
+ int (*power_up)(struct mtk_gen3_pcie *pcie);
+ struct {
+ const char *id[MAX_NUM_PHY_RESETS];
+ int num_resets;
+ } phy_resets;
+};
+
/**
* struct mtk_msi_set - MSI information for each set
* @base: IO mapped register base
@@ -118,7 +156,7 @@ struct mtk_msi_set {
* @base: IO mapped register base
* @reg_base: physical register base
* @mac_reset: MAC reset control
- * @phy_reset: PHY reset control
+ * @phy_resets: PHY reset controllers
* @phy: PHY controller block
* @clks: PCIe clocks
* @num_clks: PCIe clocks count for this port
@@ -131,13 +169,14 @@ struct mtk_msi_set {
* @msi_sets: MSI sets information
* @lock: lock protecting IRQ bit map
* @msi_irq_in_use: bit map for assigned MSI IRQ
+ * @soc: pointer to SoC-dependent operations
*/
struct mtk_gen3_pcie {
struct device *dev;
void __iomem *base;
phys_addr_t reg_base;
struct reset_control *mac_reset;
- struct reset_control *phy_reset;
+ struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS];
struct phy *phy;
struct clk_bulk_data *clks;
int num_clks;
@@ -151,6 +190,8 @@ struct mtk_gen3_pcie {
struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
+
+ const struct mtk_gen3_pcie_pdata *soc;
};
/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
@@ -424,12 +465,6 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
return 0;
}
-static int mtk_pcie_set_affinity(struct irq_data *data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void mtk_pcie_msi_irq_mask(struct irq_data *data)
{
pci_msi_mask_irq(data);
@@ -450,8 +485,9 @@ static struct irq_chip mtk_msi_irq_chip = {
};
static struct msi_domain_info mtk_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI,
.chip = &mtk_msi_irq_chip,
};
@@ -517,7 +553,6 @@ static struct irq_chip mtk_msi_bottom_irq_chip = {
.irq_mask = mtk_msi_bottom_irq_mask,
.irq_unmask = mtk_msi_bottom_irq_unmask,
.irq_compose_msi_msg = mtk_compose_msi_msg,
- .irq_set_affinity = mtk_pcie_set_affinity,
.name = "MSI",
};
@@ -618,7 +653,6 @@ static struct irq_chip mtk_intx_irq_chip = {
.irq_mask = mtk_intx_mask,
.irq_unmask = mtk_intx_unmask,
.irq_eoi = mtk_intx_eoi,
- .irq_set_affinity = mtk_pcie_set_affinity,
.name = "INTx",
};
@@ -775,10 +809,10 @@ static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
{
+ int i, ret, num_resets = pcie->soc->phy_resets.num_resets;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *regs;
- int ret;
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
if (!regs)
@@ -791,12 +825,12 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
pcie->reg_base = regs->start;
- pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
- if (IS_ERR(pcie->phy_reset)) {
- ret = PTR_ERR(pcie->phy_reset);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get PHY reset\n");
+ for (i = 0; i < num_resets; i++)
+ pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
+ ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets);
+ if (ret) {
+ dev_err(dev, "failed to get PHY bulk reset\n");
return ret;
}
@@ -827,13 +861,96 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
return 0;
}
+static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ int err;
+ u32 val;
+
+ /*
+ * Wait for the time needed to complete the bulk assert in
+ * mtk_pcie_setup for EN7581 SoC.
+ */
+ mdelay(PCIE_EN7581_RESET_TIME_MS);
+
+ err = phy_init(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to initialize PHY\n");
+ return err;
+ }
+
+ err = phy_power_on(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to power on PHY\n");
+ goto err_phy_on;
+ }
+
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ if (err) {
+ dev_err(dev, "failed to deassert PHYs\n");
+ goto err_phy_deassert;
+ }
+
+ /*
+ * Wait for the time needed to complete the bulk de-assert above.
+ * This time is specific for EN7581 SoC.
+ */
+ mdelay(PCIE_EN7581_RESET_TIME_MS);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ err = clk_bulk_prepare(pcie->num_clks, pcie->clks);
+ if (err) {
+ dev_err(dev, "failed to prepare clock\n");
+ goto err_clk_prepare;
+ }
+
+ val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
+ FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
+ FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
+ FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41);
+ writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG);
+
+ val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT |
+ FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) |
+ FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) |
+ FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
+ writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
+
+ err = clk_bulk_enable(pcie->num_clks, pcie->clks);
+ if (err) {
+ dev_err(dev, "failed to prepare clock\n");
+ goto err_clk_enable;
+ }
+
+ return 0;
+
+err_clk_enable:
+ clk_bulk_unprepare(pcie->num_clks, pcie->clks);
+err_clk_prepare:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+err_phy_deassert:
+ phy_power_off(pcie->phy);
+err_phy_on:
+ phy_exit(pcie->phy);
+
+ return err;
+}
+
static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
/* PHY power on and enable pipe clock */
- reset_control_deassert(pcie->phy_reset);
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ if (err) {
+ dev_err(dev, "failed to deassert PHYs\n");
+ return err;
+ }
err = phy_init(pcie->phy);
if (err) {
@@ -869,7 +986,7 @@ err_clk_init:
err_phy_on:
phy_exit(pcie->phy);
err_phy_init:
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
return err;
}
@@ -884,7 +1001,7 @@ static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
phy_power_off(pcie->phy);
phy_exit(pcie->phy);
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
}
static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
@@ -896,15 +1013,21 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
return err;
/*
+ * Deassert the line in order to avoid unbalance in deassert_count
+ * counter since the bulk is shared.
+ */
+ reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ /*
* The controller may have been left out of reset by the bootloader
* so make sure that we get a clean start by asserting resets here.
*/
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+
reset_control_assert(pcie->mac_reset);
usleep_range(10, 20);
/* Don't touch the hardware registers before power up */
- err = mtk_pcie_power_up(pcie);
+ err = pcie->soc->power_up(pcie);
if (err)
return err;
@@ -939,6 +1062,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(host);
pcie->dev = dev;
+ pcie->soc = device_get_match_data(dev);
platform_set_drvdata(pdev, pcie);
err = mtk_pcie_setup(pcie);
@@ -1054,7 +1178,7 @@ static int mtk_pcie_resume_noirq(struct device *dev)
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
- err = mtk_pcie_power_up(pcie);
+ err = pcie->soc->power_up(pcie);
if (err)
return err;
@@ -1074,8 +1198,27 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = {
mtk_pcie_resume_noirq)
};
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
+ .power_up = mtk_pcie_power_up,
+ .phy_resets = {
+ .id[0] = "phy",
+ .num_resets = 1,
+ },
+};
+
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
+ .power_up = mtk_pcie_en7581_power_up,
+ .phy_resets = {
+ .id[0] = "phy-lane0",
+ .id[1] = "phy-lane1",
+ .id[2] = "phy-lane2",
+ .num_resets = 3,
+ },
+};
+
static const struct of_device_id mtk_pcie_of_match[] = {
- { .compatible = "mediatek,mt8192-pcie" },
+ { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
+ { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
{},
};
MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 7fc0d7709b7f..7f7d04c2ea57 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -211,7 +211,6 @@ struct mtk_pcie_port {
* @base: IO mapped register base
* @cfg: IO mapped register map for PCIe config
* @free_ck: free-run reference clock
- * @mem: non-prefetchable memory resource
* @ports: pointer to PCIe port information
* @soc: pointer to SoC-dependent operations
*/
@@ -407,12 +406,6 @@ static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int mtk_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void mtk_msi_ack_irq(struct irq_data *data)
{
struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
@@ -424,7 +417,6 @@ static void mtk_msi_ack_irq(struct irq_data *data)
static struct irq_chip mtk_msi_bottom_irq_chip = {
.name = "MTK MSI",
.irq_compose_msi_msg = mtk_compose_msi_msg,
- .irq_set_affinity = mtk_msi_set_affinity,
.irq_ack = mtk_msi_ack_irq,
};
@@ -486,8 +478,8 @@ static struct irq_chip mtk_msi_irq_chip = {
};
static struct msi_domain_info mtk_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &mtk_msi_irq_chip,
};
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index c01efc6ea64f..3dd653f3d784 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -658,11 +658,6 @@ static void rcar_msi_irq_unmask(struct irq_data *d)
spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
@@ -678,7 +673,6 @@ static struct irq_chip rcar_msi_bottom_chip = {
.irq_ack = rcar_msi_irq_ack,
.irq_mask = rcar_msi_irq_mask,
.irq_unmask = rcar_msi_irq_unmask,
- .irq_set_affinity = rcar_msi_set_affinity,
.irq_compose_msi_msg = rcar_compose_msi_msg,
};
@@ -725,8 +719,8 @@ static const struct irq_domain_ops rcar_msi_domain_ops = {
};
static struct msi_domain_info rcar_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &rcar_msi_top_chip,
};
diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c
index 5be5dfd8398f..dd117f07fc95 100644
--- a/drivers/pci/controller/pcie-xilinx-dma-pl.c
+++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c
@@ -71,10 +71,24 @@
/* Phy Status/Control Register definitions */
#define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11)
+#define QDMA_BRIDGE_BASE_OFF 0xcd8
/* Number of MSI IRQs */
#define XILINX_NUM_MSI_IRQS 64
+enum xilinx_pl_dma_version {
+ XDMA,
+ QDMA,
+};
+
+/**
+ * struct xilinx_pl_dma_variant - PL DMA PCIe variant information
+ * @version: DMA version
+ */
+struct xilinx_pl_dma_variant {
+ enum xilinx_pl_dma_version version;
+};
+
struct xilinx_msi {
struct irq_domain *msi_domain;
unsigned long *bitmap;
@@ -88,6 +102,7 @@ struct xilinx_msi {
* struct pl_dma_pcie - PCIe port information
* @dev: Device pointer
* @reg_base: IO Mapped Register Base
+ * @cfg_base: IO Mapped Configuration Base
* @irq: Interrupt number
* @cfg: Holds mappings of config space window
* @phys_reg_base: Physical address of reg base
@@ -97,10 +112,12 @@ struct xilinx_msi {
* @msi: MSI information
* @intx_irq: INTx error interrupt number
* @lock: Lock protecting shared register access
+ * @variant: PL DMA PCIe version check pointer
*/
struct pl_dma_pcie {
struct device *dev;
void __iomem *reg_base;
+ void __iomem *cfg_base;
int irq;
struct pci_config_window *cfg;
phys_addr_t phys_reg_base;
@@ -110,16 +127,23 @@ struct pl_dma_pcie {
struct xilinx_msi msi;
int intx_irq;
raw_spinlock_t lock;
+ const struct xilinx_pl_dma_variant *variant;
};
static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg)
{
+ if (port->variant->version == QDMA)
+ return readl(port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
+
return readl(port->reg_base + reg);
}
static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg)
{
- writel(val, port->reg_base + reg);
+ if (port->variant->version == QDMA)
+ writel(val, port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
+ else
+ writel(val, port->reg_base + reg);
}
static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port)
@@ -173,6 +197,9 @@ static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus,
if (!xilinx_pl_dma_pcie_valid_device(bus, devfn))
return NULL;
+ if (port->variant->version == QDMA)
+ return port->cfg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+
return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
}
@@ -355,8 +382,8 @@ static struct irq_chip xilinx_msi_irq_chip = {
};
static struct msi_domain_info xilinx_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &xilinx_msi_irq_chip,
};
@@ -370,16 +397,9 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = data->hwirq;
}
-static int xilinx_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip xilinx_irq_chip = {
.name = "pl_dma:MSI",
.irq_compose_msi_msg = xilinx_compose_msi_msg,
- .irq_set_affinity = xilinx_msi_set_affinity,
};
static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -731,6 +751,15 @@ static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port,
port->reg_base = port->cfg->win;
+ if (port->variant->version == QDMA) {
+ port->cfg_base = port->cfg->win;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
+ port->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+ port->phys_reg_base = res->start;
+ }
+
err = xilinx_request_msi_irq(port);
if (err) {
pci_ecam_free(port->cfg);
@@ -760,6 +789,8 @@ static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev)
if (!bus)
return -ENODEV;
+ port->variant = of_device_get_match_data(dev);
+
err = xilinx_pl_dma_pcie_parse_dt(port, bus->res);
if (err) {
dev_err(dev, "Parsing DT failed\n");
@@ -791,9 +822,22 @@ err_irq_domain:
return err;
}
+static const struct xilinx_pl_dma_variant xdma_host = {
+ .version = XDMA,
+};
+
+static const struct xilinx_pl_dma_variant qdma_host = {
+ .version = QDMA,
+};
+
static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = {
{
.compatible = "xlnx,xdma-host-3.00",
+ .data = &xdma_host,
+ },
+ {
+ .compatible = "xlnx,qdma-host-3.00",
+ .data = &qdma_host,
},
{}
};
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 0408f4d612b5..a8ae14474dd0 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/irqchip/chained_irq.h>
@@ -80,8 +81,8 @@
#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
#define MSGF_MISC_SR_FATAL_DEV BIT(23)
#define MSGF_MISC_SR_LINK_DOWN BIT(24)
-#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
-#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
+#define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25)
+#define MSGF_MISC_SR_LINK_BWIDTH BIT(26)
#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
MSGF_MISC_SR_RXMSG_OVER | \
@@ -96,8 +97,8 @@
MSGF_MISC_SR_NON_FATAL_DEV | \
MSGF_MISC_SR_FATAL_DEV | \
MSGF_MISC_SR_LINK_DOWN | \
- MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
- MSGF_MSIC_SR_LINK_BWIDTH)
+ MSGF_MISC_SR_LINK_AUTO_BWIDTH | \
+ MSGF_MISC_SR_LINK_BWIDTH)
/* Legacy interrupt status mask bits */
#define MSGF_LEG_SR_INTA BIT(0)
@@ -157,6 +158,7 @@ struct nwl_pcie {
void __iomem *breg_base;
void __iomem *pcireg_base;
void __iomem *ecam_base;
+ struct phy *phy[4];
phys_addr_t phys_breg_base; /* Physical Bridge Register Base */
phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
phys_addr_t phys_ecam_base; /* Physical Configuration Base */
@@ -267,42 +269,42 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
return IRQ_NONE;
if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
- dev_err(dev, "Received Message FIFO Overflow\n");
+ dev_err_ratelimited(dev, "Received Message FIFO Overflow\n");
if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
- dev_err(dev, "Slave error\n");
+ dev_err_ratelimited(dev, "Slave error\n");
if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
- dev_err(dev, "Master error\n");
+ dev_err_ratelimited(dev, "Master error\n");
if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
- dev_err(dev, "In Misc Ingress address translation error\n");
+ dev_err_ratelimited(dev, "In Misc Ingress address translation error\n");
if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
- dev_err(dev, "In Misc Egress address translation error\n");
+ dev_err_ratelimited(dev, "In Misc Egress address translation error\n");
if (misc_stat & MSGF_MISC_SR_FATAL_AER)
- dev_err(dev, "Fatal Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
- dev_err(dev, "Non-Fatal Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Non-Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_CORR_AER)
- dev_err(dev, "Correctable Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Correctable Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_UR_DETECT)
- dev_err(dev, "Unsupported request Detected\n");
+ dev_err_ratelimited(dev, "Unsupported request Detected\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
- dev_err(dev, "Non-Fatal Error Detected\n");
+ dev_err_ratelimited(dev, "Non-Fatal Error Detected\n");
if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
- dev_err(dev, "Fatal Error Detected\n");
+ dev_err_ratelimited(dev, "Fatal Error Detected\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
+ if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH)
dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
+ if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH)
dev_info(dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */
@@ -371,7 +373,7 @@ static void nwl_mask_intx_irq(struct irq_data *data)
u32 mask;
u32 val;
- mask = 1 << (data->hwirq - 1);
+ mask = 1 << data->hwirq;
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
@@ -385,7 +387,7 @@ static void nwl_unmask_intx_irq(struct irq_data *data)
u32 mask;
u32 val;
- mask = 1 << (data->hwirq - 1);
+ mask = 1 << data->hwirq;
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
@@ -425,8 +427,8 @@ static struct irq_chip nwl_msi_irq_chip = {
};
static struct msi_domain_info nwl_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &nwl_msi_irq_chip,
};
#endif
@@ -441,16 +443,9 @@ static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = data->hwirq;
}
-static int nwl_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip nwl_irq_chip = {
.name = "Xilinx MSI",
.irq_compose_msi_msg = nwl_compose_msi_msg,
- .irq_set_affinity = nwl_msi_set_affinity,
};
static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -521,6 +516,60 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
return 0;
}
+static void nwl_pcie_phy_power_off(struct nwl_pcie *pcie, int i)
+{
+ int err = phy_power_off(pcie->phy[i]);
+
+ if (err)
+ dev_err(pcie->dev, "could not power off phy %d (err=%d)\n", i,
+ err);
+}
+
+static void nwl_pcie_phy_exit(struct nwl_pcie *pcie, int i)
+{
+ int err = phy_exit(pcie->phy[i]);
+
+ if (err)
+ dev_err(pcie->dev, "could not exit phy %d (err=%d)\n", i, err);
+}
+
+static int nwl_pcie_phy_enable(struct nwl_pcie *pcie)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret)
+ goto err;
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret) {
+ nwl_pcie_phy_exit(pcie, i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ nwl_pcie_phy_power_off(pcie, i);
+ nwl_pcie_phy_exit(pcie, i);
+ }
+
+ return ret;
+}
+
+static void nwl_pcie_phy_disable(struct nwl_pcie *pcie)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(pcie->phy); i--;) {
+ nwl_pcie_phy_power_off(pcie, i);
+ nwl_pcie_phy_exit(pcie, i);
+ }
+}
+
static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -732,6 +781,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
{
struct device *dev = pcie->dev;
struct resource *res;
+ int i;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
pcie->breg_base = devm_ioremap_resource(dev, res);
@@ -759,6 +809,18 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
irq_set_chained_handler_and_data(pcie->irq_intx,
nwl_pcie_leg_handler, pcie);
+
+ for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
+ pcie->phy[i] = devm_of_phy_get_by_index(dev, dev->of_node, i);
+ if (PTR_ERR(pcie->phy[i]) == -ENODEV) {
+ pcie->phy[i] = NULL;
+ break;
+ }
+
+ if (IS_ERR(pcie->phy[i]))
+ return PTR_ERR(pcie->phy[i]);
+ }
+
return 0;
}
@@ -779,6 +841,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return -ENODEV;
pcie = pci_host_bridge_priv(bridge);
+ platform_set_drvdata(pdev, pcie);
pcie->dev = dev;
@@ -798,16 +861,22 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
+ err = nwl_pcie_phy_enable(pcie);
+ if (err) {
+ dev_err(dev, "could not enable PHYs\n");
+ goto err_clk;
+ }
+
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(dev, "HW Initialization failed\n");
- return err;
+ goto err_phy;
}
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(dev, "Failed creating IRQ Domain\n");
- return err;
+ goto err_phy;
}
bridge->sysdata = pcie;
@@ -817,11 +886,27 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = nwl_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(dev, "failed to enable MSI support: %d\n", err);
- return err;
+ goto err_phy;
}
}
- return pci_host_probe(bridge);
+ err = pci_host_probe(bridge);
+ if (!err)
+ return 0;
+
+err_phy:
+ nwl_pcie_phy_disable(pcie);
+err_clk:
+ clk_disable_unprepare(pcie->clk);
+ return err;
+}
+
+static void nwl_pcie_remove(struct platform_device *pdev)
+{
+ struct nwl_pcie *pcie = platform_get_drvdata(pdev);
+
+ nwl_pcie_phy_disable(pcie);
+ clk_disable_unprepare(pcie->clk);
}
static struct platform_driver nwl_pcie_driver = {
@@ -831,5 +916,6 @@ static struct platform_driver nwl_pcie_driver = {
.of_match_table = nwl_pcie_of_match,
},
.probe = nwl_pcie_probe,
+ .remove_new = nwl_pcie_remove,
};
builtin_platform_driver(nwl_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index cb6e9f7b0152..0b534f73a942 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -208,11 +208,6 @@ static struct irq_chip xilinx_msi_top_chip = {
.irq_ack = xilinx_msi_top_irq_ack,
};
-static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -225,7 +220,6 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
static struct irq_chip xilinx_msi_bottom_chip = {
.name = "Xilinx MSI",
- .irq_set_affinity = xilinx_msi_set_affinity,
.irq_compose_msi_msg = xilinx_compose_msi_msg,
};
@@ -271,7 +265,8 @@ static const struct irq_domain_ops xilinx_msi_domain_ops = {
};
static struct msi_domain_info xilinx_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY,
.chip = &xilinx_msi_top_chip,
};
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
index a18923d7cea6..8533dc618d45 100644
--- a/drivers/pci/controller/plda/pcie-plda-host.c
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -76,17 +76,10 @@ static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int plda_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip plda_msi_bottom_irq_chip = {
.name = "PLDA MSI",
.irq_ack = plda_msi_bottom_irq_ack,
.irq_compose_msi_msg = plda_compose_msi_msg,
- .irq_set_affinity = plda_msi_set_affinity,
};
static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
@@ -146,8 +139,8 @@ static struct irq_chip plda_msi_irq_chip = {
};
static struct msi_domain_info plda_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &plda_msi_irq_chip,
};
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index a726de0af011..264a180403a0 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -204,22 +204,11 @@ static void vmd_irq_disable(struct irq_data *data)
raw_spin_unlock_irqrestore(&list_lock, flags);
}
-/*
- * XXX: Stubbed until we develop acceptable way to not create conflicts with
- * other devices sharing the same vector.
- */
-static int vmd_irq_set_affinity(struct irq_data *data,
- const struct cpumask *dest, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip vmd_msi_controller = {
.name = "VMD-MSI",
.irq_enable = vmd_irq_enable,
.irq_disable = vmd_irq_disable,
.irq_compose_msi_msg = vmd_compose_msi_msg,
- .irq_set_affinity = vmd_irq_set_affinity,
};
static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
@@ -326,7 +315,7 @@ static struct msi_domain_ops vmd_msi_domain_ops = {
static struct msi_domain_info vmd_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.ops = &vmd_msi_domain_ops,
.chip = &vmd_msi_controller,
};
@@ -1053,9 +1042,9 @@ static void vmd_remove(struct pci_dev *dev)
static void vmd_shutdown(struct pci_dev *dev)
{
- struct vmd_dev *vmd = pci_get_drvdata(dev);
+ struct vmd_dev *vmd = pci_get_drvdata(dev);
- vmd_remove_irq_domain(vmd);
+ vmd_remove_irq_domain(vmd);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
index 3780a9f9ec00..b133967faef8 100644
--- a/drivers/pci/devres.c
+++ b/drivers/pci/devres.c
@@ -483,6 +483,8 @@ static void pcim_disable_device(void *pdev_raw)
if (!pdev->pinned)
pci_disable_device(pdev);
+
+ pdev->is_managed = false;
}
/**
@@ -728,7 +730,7 @@ EXPORT_SYMBOL(pcim_iounmap);
* Mapping and region will get automatically released on driver detach. If
* desired, release manually only with pcim_iounmap_region().
*/
-static void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
+void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
const char *name)
{
int ret;
@@ -761,6 +763,7 @@ err_region:
return IOMEM_ERR_PTR(ret);
}
+EXPORT_SYMBOL(pcim_iomap_region);
/**
* pcim_iounmap_region - Unmap and release a PCI BAR
@@ -783,7 +786,7 @@ static void pcim_iounmap_region(struct pci_dev *pdev, int bar)
}
/**
- * pcim_iomap_regions - Request and iomap PCI BARs
+ * pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED)
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to request and iomap
* @name: Name associated with the requests
@@ -791,6 +794,9 @@ static void pcim_iounmap_region(struct pci_dev *pdev, int bar)
* Returns: 0 on success, negative error code on failure.
*
* Request and iomap regions specified by @mask.
+ *
+ * This function is DEPRECATED. Do not use it in new code.
+ * Use pcim_iomap_region() instead.
*/
int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
{
@@ -863,6 +869,7 @@ int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
{
return _pcim_request_region(pdev, bar, name, 0);
}
+EXPORT_SYMBOL(pcim_request_region);
/**
* pcim_request_region_exclusive - Request a PCI BAR exclusively
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 84309dfe0c68..17f007109255 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -838,6 +838,10 @@ void pci_epc_destroy(struct pci_epc *epc)
{
pci_ep_cfs_remove_epc_group(epc->group);
device_unregister(&epc->dev);
+
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ pci_bus_release_domain_nr(&epc->dev, epc->domain_nr);
+#endif
}
EXPORT_SYMBOL_GPL(pci_epc_destroy);
@@ -900,6 +904,16 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
epc->dev.release = pci_epc_release;
epc->ops = ops;
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ epc->domain_nr = pci_bus_find_domain_nr(NULL, dev);
+#else
+ /*
+ * TODO: If the architecture doesn't support generic PCI
+ * domains, then a custom implementation has to be used.
+ */
+ WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n");
+#endif
+
ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
if (ret)
goto put_dev;
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
index 9d428b0ea524..92e6e20e8595 100644
--- a/drivers/pci/hotplug/TODO
+++ b/drivers/pci/hotplug/TODO
@@ -51,11 +51,6 @@ ibmphp:
shpchp:
-* There is only a single implementation of struct hpc_ops. Can the struct be
- removed and its functions invoked directly? This has already been done in
- pciehp with commit 82a9e79ef132 ("PCI: pciehp: remove hpc_ops"). Clarify
- if there was a specific reason not to apply the same change to shpchp.
-
* The hardirq handler shpc_isr() queues events on a workqueue. It can be
simplified by converting it to threaded IRQ handling. Use pciehp as a
template.
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c94b40e64baf..47a3ed16159a 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -328,7 +328,7 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
} else {
/* Did not get a match on the target PCI device. Check
* if the current IRQ table entry is a PCI-to-PCI
- * bridge device. If so, and it's secondary bus
+ * bridge device. If so, and its secondary bus
* matches the bus number for the target device, I need
* to save the bridge's slot number. If I can not find
* an entry for the target device, I will have to
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index e9f1fb333a71..718bc6cf12cb 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -138,7 +138,7 @@ static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 o
if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID) == -1)
return -1;
- if (vendID == 0xffffffff)
+ if (PCI_POSSIBLE_ERROR(vendID))
return -1;
return pci_bus_read_config_dword(bus, devfn, offset, value);
}
@@ -253,7 +253,7 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
*dev_num = tdevice;
ctrl->pci_bus->number = tbus;
pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
- if (!nobridge || (work == 0xffffffff))
+ if (!nobridge || PCI_POSSIBLE_ERROR(work))
return 0;
dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 7333b305f2a5..055518ee354d 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -112,7 +112,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- /* if the slot exits it always contains a function */
+ /* if the slot exists it always contains a function */
*value = 1;
return 0;
}
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 3a97f455336e..f0e2d2d54d71 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -72,7 +72,6 @@ struct slot {
u8 latch_save;
u8 pwr_save;
struct controller *ctrl;
- const struct hpc_ops *hpc_ops;
struct hotplug_slot hotplug_slot;
struct list_head slot_list;
struct delayed_work work; /* work for button event */
@@ -94,7 +93,6 @@ struct controller {
int slot_num_inc; /* 1 or -1 */
struct pci_dev *pci_dev;
struct list_head slot_list;
- const struct hpc_ops *hpc_ops;
wait_queue_head_t queue; /* sleep & wake process */
u8 slot_device_offset;
u32 pcix_misc2_reg; /* for amd pogo errata */
@@ -300,24 +298,22 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot)
pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, pcix_misc2_temp);
}
-struct hpc_ops {
- int (*power_on_slot)(struct slot *slot);
- int (*slot_enable)(struct slot *slot);
- int (*slot_disable)(struct slot *slot);
- int (*set_bus_speed_mode)(struct slot *slot, enum pci_bus_speed speed);
- int (*get_power_status)(struct slot *slot, u8 *status);
- int (*get_attention_status)(struct slot *slot, u8 *status);
- int (*set_attention_status)(struct slot *slot, u8 status);
- int (*get_latch_status)(struct slot *slot, u8 *status);
- int (*get_adapter_status)(struct slot *slot, u8 *status);
- int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed);
- int (*get_prog_int)(struct slot *slot, u8 *prog_int);
- int (*query_power_fault)(struct slot *slot);
- void (*green_led_on)(struct slot *slot);
- void (*green_led_off)(struct slot *slot);
- void (*green_led_blink)(struct slot *slot);
- void (*release_ctlr)(struct controller *ctrl);
- int (*check_cmd_status)(struct controller *ctrl);
-};
+int shpchp_power_on_slot(struct slot *slot);
+int shpchp_slot_enable(struct slot *slot);
+int shpchp_slot_disable(struct slot *slot);
+int shpchp_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed speed);
+int shpchp_get_power_status(struct slot *slot, u8 *status);
+int shpchp_get_attention_status(struct slot *slot, u8 *status);
+int shpchp_set_attention_status(struct slot *slot, u8 status);
+int shpchp_get_latch_status(struct slot *slot, u8 *status);
+int shpchp_get_adapter_status(struct slot *slot, u8 *status);
+int shpchp_get_adapter_speed(struct slot *slot, enum pci_bus_speed *speed);
+int shpchp_get_prog_int(struct slot *slot, u8 *prog_int);
+int shpchp_query_power_fault(struct slot *slot);
+void shpchp_green_led_on(struct slot *slot);
+void shpchp_green_led_off(struct slot *slot);
+void shpchp_green_led_blink(struct slot *slot);
+void shpchp_release_ctlr(struct controller *ctrl);
+int shpchp_check_cmd_status(struct controller *ctrl);
#endif /* _SHPCHP_H */
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 56c7795ed890..a92e28b72908 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -81,7 +81,6 @@ static int init_slots(struct controller *ctrl)
slot->ctrl = ctrl;
slot->bus = ctrl->pci_dev->subordinate->number;
slot->device = ctrl->slot_device_offset + i;
- slot->hpc_ops = ctrl->hpc_ops;
slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i);
slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number);
@@ -150,7 +149,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
__func__, slot_name(slot));
slot->attention_save = status;
- slot->hpc_ops->set_attention_status(slot, status);
+ shpchp_set_attention_status(slot, status);
return 0;
}
@@ -183,7 +182,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_power_status(slot, value);
+ retval = shpchp_get_power_status(slot, value);
if (retval < 0)
*value = slot->pwr_save;
@@ -198,7 +197,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_attention_status(slot, value);
+ retval = shpchp_get_attention_status(slot, value);
if (retval < 0)
*value = slot->attention_save;
@@ -213,7 +212,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_latch_status(slot, value);
+ retval = shpchp_get_latch_status(slot, value);
if (retval < 0)
*value = slot->latch_save;
@@ -228,7 +227,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_adapter_status(slot, value);
+ retval = shpchp_get_adapter_status(slot, value);
if (retval < 0)
*value = slot->presence_save;
@@ -293,7 +292,7 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_cleanup_slots:
cleanup_slots(ctrl);
err_out_release_ctlr:
- ctrl->hpc_ops->release_ctlr(ctrl);
+ shpchp_release_ctlr(ctrl);
err_out_free_ctrl:
kfree(ctrl);
err_out_none:
@@ -306,7 +305,7 @@ static void shpc_remove(struct pci_dev *dev)
dev->shpc_managed = 0;
shpchp_remove_ctrl_files(ctrl);
- ctrl->hpc_ops->release_ctlr(ctrl);
+ shpchp_release_ctlr(ctrl);
kfree(ctrl);
}
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 6a6705e0cf17..e6c6f23bae27 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -51,7 +51,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
ctrl_dbg(ctrl, "Attention button interrupt received\n");
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
/*
* Button pressed - See if need to TAKE ACTION!!!
@@ -75,8 +75,8 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
ctrl_dbg(ctrl, "Switch interrupt received\n");
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
+ shpchp_get_latch_status(p_slot, &getstatus);
ctrl_dbg(ctrl, "Card present %x Power status %x\n",
p_slot->presence_save, p_slot->pwr_save);
@@ -116,7 +116,7 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
/*
* Save the presence state
*/
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
if (p_slot->presence_save) {
/*
* Card Present
@@ -148,7 +148,7 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- if (!(p_slot->hpc_ops->query_power_fault(p_slot))) {
+ if (!(shpchp_query_power_fault(p_slot))) {
/*
* Power fault Cleared
*/
@@ -181,7 +181,7 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
int rc = 0;
ctrl_dbg(ctrl, "Change speed to %d\n", speed);
- rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed);
+ rc = shpchp_set_bus_speed_mode(p_slot, speed);
if (rc) {
ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
__func__);
@@ -241,14 +241,14 @@ static int board_added(struct slot *p_slot)
__func__, p_slot->device, ctrl->slot_device_offset, hp_slot);
/* Power on slot without connecting to bus */
- rc = p_slot->hpc_ops->power_on_slot(p_slot);
+ rc = shpchp_power_on_slot(p_slot);
if (rc) {
ctrl_err(ctrl, "Failed to power on slot\n");
return -1;
}
if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
- rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
+ rc = shpchp_set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
if (rc) {
ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
__func__);
@@ -256,14 +256,14 @@ static int board_added(struct slot *p_slot)
}
/* turn on board, blink green LED, turn off Amber LED */
- rc = p_slot->hpc_ops->slot_enable(p_slot);
+ rc = shpchp_slot_enable(p_slot);
if (rc) {
ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
return rc;
}
}
- rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp);
+ rc = shpchp_get_adapter_speed(p_slot, &asp);
if (rc) {
ctrl_err(ctrl, "Can't get adapter speed or bus mode mismatch\n");
return WRONG_BUS_FREQUENCY;
@@ -285,7 +285,7 @@ static int board_added(struct slot *p_slot)
return rc;
/* turn on board, blink green LED, turn off Amber LED */
- rc = p_slot->hpc_ops->slot_enable(p_slot);
+ rc = shpchp_slot_enable(p_slot);
if (rc) {
ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
return rc;
@@ -313,13 +313,13 @@ static int board_added(struct slot *p_slot)
p_slot->is_a_board = 0x01;
p_slot->pwr_save = 1;
- p_slot->hpc_ops->green_led_on(p_slot);
+ shpchp_green_led_on(p_slot);
return 0;
err_exit:
/* turn off slot, turn on Amber LED, turn off Green LED */
- rc = p_slot->hpc_ops->slot_disable(p_slot);
+ rc = shpchp_slot_disable(p_slot);
if (rc) {
ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
__func__);
@@ -352,14 +352,14 @@ static int remove_board(struct slot *p_slot)
p_slot->status = 0x01;
/* turn off slot, turn on Amber LED, turn off Green LED */
- rc = p_slot->hpc_ops->slot_disable(p_slot);
+ rc = shpchp_slot_disable(p_slot);
if (rc) {
ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
__func__);
return rc;
}
- rc = p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ rc = shpchp_set_attention_status(p_slot, 0);
if (rc) {
ctrl_err(ctrl, "Issue of Set Attention command failed\n");
return rc;
@@ -401,7 +401,7 @@ static void shpchp_pushbutton_thread(struct work_struct *work)
case POWERON_STATE:
mutex_unlock(&p_slot->lock);
if (shpchp_enable_slot(p_slot))
- p_slot->hpc_ops->green_led_off(p_slot);
+ shpchp_green_led_off(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
@@ -446,10 +446,10 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
static void update_slot_info(struct slot *slot)
{
- slot->hpc_ops->get_power_status(slot, &slot->pwr_save);
- slot->hpc_ops->get_attention_status(slot, &slot->attention_save);
- slot->hpc_ops->get_latch_status(slot, &slot->latch_save);
- slot->hpc_ops->get_adapter_status(slot, &slot->presence_save);
+ shpchp_get_power_status(slot, &slot->pwr_save);
+ shpchp_get_attention_status(slot, &slot->attention_save);
+ shpchp_get_latch_status(slot, &slot->latch_save);
+ shpchp_get_adapter_status(slot, &slot->presence_save);
}
/*
@@ -462,7 +462,7 @@ static void handle_button_press_event(struct slot *p_slot)
switch (p_slot->state) {
case STATIC_STATE:
- p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ shpchp_get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n",
@@ -473,8 +473,8 @@ static void handle_button_press_event(struct slot *p_slot)
slot_name(p_slot));
}
/* blink green LED and turn off amber */
- p_slot->hpc_ops->green_led_blink(p_slot);
- p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ shpchp_green_led_blink(p_slot);
+ shpchp_set_attention_status(p_slot, 0);
queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
break;
@@ -489,10 +489,10 @@ static void handle_button_press_event(struct slot *p_slot)
slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
if (p_slot->state == BLINKINGOFF_STATE)
- p_slot->hpc_ops->green_led_on(p_slot);
+ shpchp_green_led_on(p_slot);
else
- p_slot->hpc_ops->green_led_off(p_slot);
- p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ shpchp_green_led_off(p_slot);
+ shpchp_set_attention_status(p_slot, 0);
ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n",
slot_name(p_slot));
p_slot->state = STATIC_STATE;
@@ -526,8 +526,8 @@ static void interrupt_event_handler(struct work_struct *work)
break;
case INT_POWER_FAULT:
ctrl_dbg(p_slot->ctrl, "%s: Power fault\n", __func__);
- p_slot->hpc_ops->set_attention_status(p_slot, 1);
- p_slot->hpc_ops->green_led_off(p_slot);
+ shpchp_set_attention_status(p_slot, 1);
+ shpchp_green_led_off(p_slot);
break;
default:
update_slot_info(p_slot);
@@ -547,17 +547,17 @@ static int shpchp_enable_slot (struct slot *p_slot)
/* Check to see if (latch closed, card present, power off) */
mutex_lock(&p_slot->ctrl->crit_sect);
- rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ rc = shpchp_get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ rc = shpchp_get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ rc = shpchp_get_power_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Already enabled on slot(%s)\n",
slot_name(p_slot));
@@ -567,10 +567,10 @@ static int shpchp_enable_slot (struct slot *p_slot)
p_slot->is_a_board = 1;
/* We have to save the presence info for these slots */
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
- p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save));
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
+ shpchp_get_power_status(p_slot, &p_slot->pwr_save);
ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ shpchp_get_latch_status(p_slot, &getstatus);
if ((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD &&
p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)
@@ -584,9 +584,8 @@ static int shpchp_enable_slot (struct slot *p_slot)
retval = board_added(p_slot);
if (retval) {
- p_slot->hpc_ops->get_adapter_status(p_slot,
- &(p_slot->presence_save));
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
+ shpchp_get_latch_status(p_slot, &getstatus);
}
update_slot_info(p_slot);
@@ -608,17 +607,17 @@ static int shpchp_disable_slot (struct slot *p_slot)
/* Check to see if (latch closed, card present, power on) */
mutex_lock(&p_slot->ctrl->crit_sect);
- rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ rc = shpchp_get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ rc = shpchp_get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ rc = shpchp_get_power_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "Already disabled on slot(%s)\n",
slot_name(p_slot));
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 48e4daefc44a..012b9e3fe5b0 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -167,7 +167,6 @@
static irqreturn_t shpc_isr(int irq, void *dev_id);
static void start_int_poll_timer(struct controller *ctrl, int sec);
-static int hpc_check_cmd_status(struct controller *ctrl);
static inline u8 shpc_readb(struct controller *ctrl, int reg)
{
@@ -317,7 +316,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
if (retval)
goto out;
- cmd_status = hpc_check_cmd_status(slot->ctrl);
+ cmd_status = shpchp_check_cmd_status(slot->ctrl);
if (cmd_status) {
ctrl_err(ctrl, "Failed to issued command 0x%x (error code = %d)\n",
cmd, cmd_status);
@@ -328,7 +327,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
return retval;
}
-static int hpc_check_cmd_status(struct controller *ctrl)
+int shpchp_check_cmd_status(struct controller *ctrl)
{
int retval = 0;
u16 cmd_status = shpc_readw(ctrl, CMD_STATUS) & 0x000F;
@@ -357,7 +356,7 @@ static int hpc_check_cmd_status(struct controller *ctrl)
}
-static int hpc_get_attention_status(struct slot *slot, u8 *status)
+int shpchp_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -381,7 +380,7 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_power_status(struct slot *slot, u8 *status)
+int shpchp_get_power_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -406,7 +405,7 @@ static int hpc_get_power_status(struct slot *slot, u8 *status)
}
-static int hpc_get_latch_status(struct slot *slot, u8 *status)
+int shpchp_get_latch_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -416,7 +415,7 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_adapter_status(struct slot *slot, u8 *status)
+int shpchp_get_adapter_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -427,7 +426,7 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
+int shpchp_get_prog_int(struct slot *slot, u8 *prog_int)
{
struct controller *ctrl = slot->ctrl;
@@ -436,7 +435,7 @@ static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
return 0;
}
-static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
+int shpchp_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
{
int retval = 0;
struct controller *ctrl = slot->ctrl;
@@ -444,7 +443,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
u8 m66_cap = !!(slot_reg & MHZ66_CAP);
u8 pi, pcix_cap;
- retval = hpc_get_prog_int(slot, &pi);
+ retval = shpchp_get_prog_int(slot, &pi);
if (retval)
return retval;
@@ -489,7 +488,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
return retval;
}
-static int hpc_query_power_fault(struct slot *slot)
+int shpchp_query_power_fault(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -498,7 +497,7 @@ static int hpc_query_power_fault(struct slot *slot)
return !(slot_reg & POWER_FAULT);
}
-static int hpc_set_attention_status(struct slot *slot, u8 value)
+int shpchp_set_attention_status(struct slot *slot, u8 value)
{
u8 slot_cmd = 0;
@@ -520,22 +519,22 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
}
-static void hpc_set_green_led_on(struct slot *slot)
+void shpchp_green_led_on(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_ON);
}
-static void hpc_set_green_led_off(struct slot *slot)
+void shpchp_green_led_off(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_OFF);
}
-static void hpc_set_green_led_blink(struct slot *slot)
+void shpchp_green_led_blink(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_BLINK);
}
-static void hpc_release_ctlr(struct controller *ctrl)
+void shpchp_release_ctlr(struct controller *ctrl)
{
int i;
u32 slot_reg, serr_int;
@@ -575,7 +574,7 @@ static void hpc_release_ctlr(struct controller *ctrl)
release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
}
-static int hpc_power_on_slot(struct slot *slot)
+int shpchp_power_on_slot(struct slot *slot)
{
int retval;
@@ -586,7 +585,7 @@ static int hpc_power_on_slot(struct slot *slot)
return retval;
}
-static int hpc_slot_enable(struct slot *slot)
+int shpchp_slot_enable(struct slot *slot)
{
int retval;
@@ -599,7 +598,7 @@ static int hpc_slot_enable(struct slot *slot)
return retval;
}
-static int hpc_slot_disable(struct slot *slot)
+int shpchp_slot_disable(struct slot *slot)
{
int retval;
@@ -681,7 +680,7 @@ static int shpc_get_cur_bus_speed(struct controller *ctrl)
}
-static int hpc_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value)
+int shpchp_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value)
{
int retval;
struct controller *ctrl = slot->ctrl;
@@ -871,28 +870,6 @@ static int shpc_get_max_bus_speed(struct controller *ctrl)
return retval;
}
-static const struct hpc_ops shpchp_hpc_ops = {
- .power_on_slot = hpc_power_on_slot,
- .slot_enable = hpc_slot_enable,
- .slot_disable = hpc_slot_disable,
- .set_bus_speed_mode = hpc_set_bus_speed_mode,
- .set_attention_status = hpc_set_attention_status,
- .get_power_status = hpc_get_power_status,
- .get_attention_status = hpc_get_attention_status,
- .get_latch_status = hpc_get_latch_status,
- .get_adapter_status = hpc_get_adapter_status,
-
- .get_adapter_speed = hpc_get_adapter_speed,
- .get_prog_int = hpc_get_prog_int,
-
- .query_power_fault = hpc_query_power_fault,
- .green_led_on = hpc_set_green_led_on,
- .green_led_off = hpc_set_green_led_off,
- .green_led_blink = hpc_set_green_led_blink,
-
- .release_ctlr = hpc_release_ctlr,
-};
-
int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
{
int rc = -1, num_slots = 0;
@@ -978,8 +955,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
/* Setup wait queue */
init_waitqueue_head(&ctrl->queue);
- ctrl->hpc_ops = &shpchp_hpc_ops;
-
/* Return PCI Controller Info */
slot_config = shpc_readl(ctrl, SLOT_CONFIG);
ctrl->slot_device_offset = (slot_config & FIRST_DEV_NUM) >> 8;
diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c
index a715a4803c95..9fb7cacc15cd 100644
--- a/drivers/pci/iomap.c
+++ b/drivers/pci/iomap.c
@@ -156,7 +156,7 @@ EXPORT_SYMBOL_GPL(pci_iomap_wc);
* the different IOMAP ranges.
*
* But if the architecture does not use the generic iomap code, and if
- * it has _not_ defined it's own private pci_iounmap function, we define
+ * it has _not_ defined its own private pci_iounmap function, we define
* it here.
*
* NOTE! This default implementation assumes that if the architecture
diff --git a/drivers/pci/npem.c b/drivers/pci/npem.c
new file mode 100644
index 000000000000..97507e0df769
--- /dev/null
+++ b/drivers/pci/npem.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe Enclosure management driver created for LED interfaces based on
+ * indications. It says *what indications* blink but does not specify *how*
+ * they blink - it is hardware defined.
+ *
+ * The driver name refers to Native PCIe Enclosure Management. It is
+ * first indication oriented standard with specification.
+ *
+ * Native PCIe Enclosure Management (NPEM)
+ * PCIe Base Specification r6.1 sec 6.28, 7.9.19
+ *
+ * _DSM Definitions for PCIe SSD Status LED
+ * PCI Firmware Specification, r3.3 sec 4.7
+ *
+ * Two backends are supported to manipulate indications: Direct NPEM register
+ * access (npem_ops) and indirect access through the ACPI _DSM (dsm_ops).
+ * _DSM is used if supported, else NPEM.
+ *
+ * Copyright (c) 2021-2022 Dell Inc.
+ * Copyright (c) 2023-2024 Intel Corporation
+ * Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/leds.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/types.h>
+#include <linux/uleds.h>
+
+#include "pci.h"
+
+struct indication {
+ u32 bit;
+ const char *name;
+};
+
+static const struct indication npem_indications[] = {
+ {PCI_NPEM_IND_OK, "enclosure:ok"},
+ {PCI_NPEM_IND_LOCATE, "enclosure:locate"},
+ {PCI_NPEM_IND_FAIL, "enclosure:fail"},
+ {PCI_NPEM_IND_REBUILD, "enclosure:rebuild"},
+ {PCI_NPEM_IND_PFA, "enclosure:pfa"},
+ {PCI_NPEM_IND_HOTSPARE, "enclosure:hotspare"},
+ {PCI_NPEM_IND_ICA, "enclosure:ica"},
+ {PCI_NPEM_IND_IFA, "enclosure:ifa"},
+ {PCI_NPEM_IND_IDT, "enclosure:idt"},
+ {PCI_NPEM_IND_DISABLED, "enclosure:disabled"},
+ {PCI_NPEM_IND_SPEC_0, "enclosure:specific_0"},
+ {PCI_NPEM_IND_SPEC_1, "enclosure:specific_1"},
+ {PCI_NPEM_IND_SPEC_2, "enclosure:specific_2"},
+ {PCI_NPEM_IND_SPEC_3, "enclosure:specific_3"},
+ {PCI_NPEM_IND_SPEC_4, "enclosure:specific_4"},
+ {PCI_NPEM_IND_SPEC_5, "enclosure:specific_5"},
+ {PCI_NPEM_IND_SPEC_6, "enclosure:specific_6"},
+ {PCI_NPEM_IND_SPEC_7, "enclosure:specific_7"},
+ {0, NULL}
+};
+
+/* _DSM PCIe SSD LED States correspond to NPEM register values */
+static const struct indication dsm_indications[] = {
+ {PCI_NPEM_IND_OK, "enclosure:ok"},
+ {PCI_NPEM_IND_LOCATE, "enclosure:locate"},
+ {PCI_NPEM_IND_FAIL, "enclosure:fail"},
+ {PCI_NPEM_IND_REBUILD, "enclosure:rebuild"},
+ {PCI_NPEM_IND_PFA, "enclosure:pfa"},
+ {PCI_NPEM_IND_HOTSPARE, "enclosure:hotspare"},
+ {PCI_NPEM_IND_ICA, "enclosure:ica"},
+ {PCI_NPEM_IND_IFA, "enclosure:ifa"},
+ {PCI_NPEM_IND_IDT, "enclosure:idt"},
+ {PCI_NPEM_IND_DISABLED, "enclosure:disabled"},
+ {0, NULL}
+};
+
+#define for_each_indication(ind, inds) \
+ for (ind = inds; ind->bit; ind++)
+
+/*
+ * The driver has internal list of supported indications. Ideally, the driver
+ * should not touch bits that are not defined and for which LED devices are
+ * not exposed but in reality, it needs to turn them off.
+ *
+ * Otherwise, there will be no possibility to turn off indications turned on by
+ * other utilities or turned on by default and it leads to bad user experience.
+ *
+ * Additionally, it excludes NPEM commands like RESET or ENABLE.
+ */
+static u32 reg_to_indications(u32 caps, const struct indication *inds)
+{
+ const struct indication *ind;
+ u32 supported_indications = 0;
+
+ for_each_indication(ind, inds)
+ supported_indications |= ind->bit;
+
+ return caps & supported_indications;
+}
+
+/**
+ * struct npem_led - LED details
+ * @indication: indication details
+ * @npem: NPEM device
+ * @name: LED name
+ * @led: LED device
+ */
+struct npem_led {
+ const struct indication *indication;
+ struct npem *npem;
+ char name[LED_MAX_NAME_SIZE];
+ struct led_classdev led;
+};
+
+/**
+ * struct npem_ops - backend specific callbacks
+ * @get_active_indications: get active indications
+ * npem: NPEM device
+ * inds: response buffer
+ * @set_active_indications: set new indications
+ * npem: npem device
+ * inds: bit mask to set
+ * @inds: supported indications array, set of indications is backend specific
+ * @name: backend name
+ */
+struct npem_ops {
+ int (*get_active_indications)(struct npem *npem, u32 *inds);
+ int (*set_active_indications)(struct npem *npem, u32 inds);
+ const struct indication *inds;
+ const char *name;
+};
+
+/**
+ * struct npem - NPEM device properties
+ * @dev: PCI device this driver is attached to
+ * @ops: backend specific callbacks
+ * @lock: serializes concurrent access to NPEM device by multiple LED devices
+ * @pos: cached offset of NPEM Capability Register in Configuration Space;
+ * only used if NPEM registers are accessed directly and not through _DSM
+ * @supported_indications: cached bit mask of supported indications;
+ * non-indication and reserved bits in the NPEM Capability Register are
+ * cleared in this bit mask
+ * @active_indications: cached bit mask of active indications;
+ * non-indication and reserved bits in the NPEM Control Register are
+ * cleared in this bit mask
+ * @active_inds_initialized: whether @active_indications has been initialized;
+ * On Dell platforms, it is required that IPMI drivers are loaded before
+ * the GET_STATE_DSM method is invoked: They use an IPMI OpRegion to
+ * get/set the active LEDs. By initializing @active_indications lazily
+ * (on first access to an LED), IPMI drivers are given a chance to load.
+ * If they are not loaded in time, users will see various errors on LED
+ * access in dmesg. Once they are loaded, the errors go away and LED
+ * access becomes possible.
+ * @led_cnt: size of @leds array
+ * @leds: array containing LED class devices of all supported LEDs
+ */
+struct npem {
+ struct pci_dev *dev;
+ const struct npem_ops *ops;
+ struct mutex lock;
+ u16 pos;
+ u32 supported_indications;
+ u32 active_indications;
+ unsigned int active_inds_initialized:1;
+ int led_cnt;
+ struct npem_led leds[];
+};
+
+static int npem_read_reg(struct npem *npem, u16 reg, u32 *val)
+{
+ int ret = pci_read_config_dword(npem->dev, npem->pos + reg, val);
+
+ return pcibios_err_to_errno(ret);
+}
+
+static int npem_write_ctrl(struct npem *npem, u32 reg)
+{
+ int pos = npem->pos + PCI_NPEM_CTRL;
+ int ret = pci_write_config_dword(npem->dev, pos, reg);
+
+ return pcibios_err_to_errno(ret);
+}
+
+static int npem_get_active_indications(struct npem *npem, u32 *inds)
+{
+ u32 ctrl;
+ int ret;
+
+ ret = npem_read_reg(npem, PCI_NPEM_CTRL, &ctrl);
+ if (ret)
+ return ret;
+
+ /* If PCI_NPEM_CTRL_ENABLE is not set then no indication should blink */
+ if (!(ctrl & PCI_NPEM_CTRL_ENABLE)) {
+ *inds = 0;
+ return 0;
+ }
+
+ *inds = ctrl & npem->supported_indications;
+
+ return 0;
+}
+
+static int npem_set_active_indications(struct npem *npem, u32 inds)
+{
+ int ctrl, ret, ret_val;
+ u32 cc_status;
+
+ lockdep_assert_held(&npem->lock);
+
+ /* This bit is always required */
+ ctrl = inds | PCI_NPEM_CTRL_ENABLE;
+
+ ret = npem_write_ctrl(npem, ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * For the case where a NPEM command has not completed immediately,
+ * it is recommended that software not continuously "spin" on polling
+ * the status register, but rather poll under interrupt at a reduced
+ * rate; for example at 10 ms intervals.
+ *
+ * PCIe r6.1 sec 6.28 "Implementation Note: Software Polling of NPEM
+ * Command Completed"
+ */
+ ret = read_poll_timeout(npem_read_reg, ret_val,
+ ret_val || (cc_status & PCI_NPEM_STATUS_CC),
+ 10 * USEC_PER_MSEC, USEC_PER_SEC, false, npem,
+ PCI_NPEM_STATUS, &cc_status);
+ if (ret)
+ return ret;
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * All writes to control register, including writes that do not change
+ * the register value, are NPEM commands and should eventually result
+ * in a command completion indication in the NPEM Status Register.
+ *
+ * PCIe Base Specification r6.1 sec 7.9.19.3
+ *
+ * Register may not be updated, or other conflicting bits may be
+ * cleared. Spec is not strict here. Read NPEM Control register after
+ * write to keep cache in-sync.
+ */
+ return npem_get_active_indications(npem, &npem->active_indications);
+}
+
+static const struct npem_ops npem_ops = {
+ .get_active_indications = npem_get_active_indications,
+ .set_active_indications = npem_set_active_indications,
+ .name = "Native PCIe Enclosure Management",
+ .inds = npem_indications,
+};
+
+#define DSM_GUID GUID_INIT(0x5d524d9d, 0xfff9, 0x4d4b, 0x8c, 0xb7, 0x74, 0x7e,\
+ 0xd5, 0x1e, 0x19, 0x4d)
+#define GET_SUPPORTED_STATES_DSM 1
+#define GET_STATE_DSM 2
+#define SET_STATE_DSM 3
+
+static const guid_t dsm_guid = DSM_GUID;
+
+static bool npem_has_dsm(struct pci_dev *pdev)
+{
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(&pdev->dev);
+ if (!handle)
+ return false;
+
+ return acpi_check_dsm(handle, &dsm_guid, 0x1,
+ BIT(GET_SUPPORTED_STATES_DSM) |
+ BIT(GET_STATE_DSM) | BIT(SET_STATE_DSM));
+}
+
+struct dsm_output {
+ u16 status;
+ u8 function_specific_err;
+ u8 vendor_specific_err;
+ u32 state;
+};
+
+/**
+ * dsm_evaluate() - send DSM PCIe SSD Status LED command
+ * @pdev: PCI device
+ * @dsm_func: DSM LED Function
+ * @output: buffer to copy DSM Response
+ * @value_to_set: value for SET_STATE_DSM function
+ *
+ * To not bother caller with ACPI context, the returned _DSM Output Buffer is
+ * copied.
+ */
+static int dsm_evaluate(struct pci_dev *pdev, u64 dsm_func,
+ struct dsm_output *output, u32 value_to_set)
+{
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ union acpi_object *out_obj, arg3[2];
+ union acpi_object *arg3_p = NULL;
+
+ if (dsm_func == SET_STATE_DSM) {
+ arg3[0].type = ACPI_TYPE_PACKAGE;
+ arg3[0].package.count = 1;
+ arg3[0].package.elements = &arg3[1];
+
+ arg3[1].type = ACPI_TYPE_BUFFER;
+ arg3[1].buffer.length = 4;
+ arg3[1].buffer.pointer = (u8 *)&value_to_set;
+
+ arg3_p = arg3;
+ }
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &dsm_guid, 0x1, dsm_func,
+ arg3_p, ACPI_TYPE_BUFFER);
+ if (!out_obj)
+ return -EIO;
+
+ if (out_obj->buffer.length < sizeof(struct dsm_output)) {
+ ACPI_FREE(out_obj);
+ return -EIO;
+ }
+
+ memcpy(output, out_obj->buffer.pointer, sizeof(struct dsm_output));
+
+ ACPI_FREE(out_obj);
+ return 0;
+}
+
+static int dsm_get(struct pci_dev *pdev, u64 dsm_func, u32 *buf)
+{
+ struct dsm_output output;
+ int ret = dsm_evaluate(pdev, dsm_func, &output, 0);
+
+ if (ret)
+ return ret;
+
+ if (output.status != 0)
+ return -EIO;
+
+ *buf = output.state;
+ return 0;
+}
+
+static int dsm_get_active_indications(struct npem *npem, u32 *buf)
+{
+ int ret = dsm_get(npem->dev, GET_STATE_DSM, buf);
+
+ /* Filter out not supported indications in response */
+ *buf &= npem->supported_indications;
+ return ret;
+}
+
+static int dsm_set_active_indications(struct npem *npem, u32 value)
+{
+ struct dsm_output output;
+ int ret = dsm_evaluate(npem->dev, SET_STATE_DSM, &output, value);
+
+ if (ret)
+ return ret;
+
+ switch (output.status) {
+ case 4:
+ /*
+ * Not all bits are set. If this bit is set, the platform
+ * disregarded some or all of the request state changes. OSPM
+ * should check the resulting PCIe SSD Status LED States to see
+ * what, if anything, has changed.
+ *
+ * PCI Firmware Specification, r3.3 Table 4-19.
+ */
+ if (output.function_specific_err != 1)
+ return -EIO;
+ fallthrough;
+ case 0:
+ break;
+ default:
+ return -EIO;
+ }
+
+ npem->active_indications = output.state;
+
+ return 0;
+}
+
+static const struct npem_ops dsm_ops = {
+ .get_active_indications = dsm_get_active_indications,
+ .set_active_indications = dsm_set_active_indications,
+ .name = "_DSM PCIe SSD Status LED Management",
+ .inds = dsm_indications,
+};
+
+static int npem_initialize_active_indications(struct npem *npem)
+{
+ int ret;
+
+ lockdep_assert_held(&npem->lock);
+
+ if (npem->active_inds_initialized)
+ return 0;
+
+ ret = npem->ops->get_active_indications(npem,
+ &npem->active_indications);
+ if (ret)
+ return ret;
+
+ npem->active_inds_initialized = true;
+ return 0;
+}
+
+/*
+ * The status of each indicator is cached on first brightness_ get/set time
+ * and updated at write time. brightness_get() is only responsible for
+ * reflecting the last written/cached value.
+ */
+static enum led_brightness brightness_get(struct led_classdev *led)
+{
+ struct npem_led *nled = container_of(led, struct npem_led, led);
+ struct npem *npem = nled->npem;
+ int ret, val = 0;
+
+ ret = mutex_lock_interruptible(&npem->lock);
+ if (ret)
+ return ret;
+
+ ret = npem_initialize_active_indications(npem);
+ if (ret)
+ goto out;
+
+ if (npem->active_indications & nled->indication->bit)
+ val = 1;
+
+out:
+ mutex_unlock(&npem->lock);
+ return val;
+}
+
+static int brightness_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct npem_led *nled = container_of(led, struct npem_led, led);
+ struct npem *npem = nled->npem;
+ u32 indications;
+ int ret;
+
+ ret = mutex_lock_interruptible(&npem->lock);
+ if (ret)
+ return ret;
+
+ ret = npem_initialize_active_indications(npem);
+ if (ret)
+ goto out;
+
+ if (brightness == 0)
+ indications = npem->active_indications & ~(nled->indication->bit);
+ else
+ indications = npem->active_indications | nled->indication->bit;
+
+ ret = npem->ops->set_active_indications(npem, indications);
+
+out:
+ mutex_unlock(&npem->lock);
+ return ret;
+}
+
+static void npem_free(struct npem *npem)
+{
+ struct npem_led *nled;
+ int cnt;
+
+ if (!npem)
+ return;
+
+ for (cnt = 0; cnt < npem->led_cnt; cnt++) {
+ nled = &npem->leds[cnt];
+
+ if (nled->name[0])
+ led_classdev_unregister(&nled->led);
+ }
+
+ mutex_destroy(&npem->lock);
+ kfree(npem);
+}
+
+static int pci_npem_set_led_classdev(struct npem *npem, struct npem_led *nled)
+{
+ struct led_classdev *led = &nled->led;
+ struct led_init_data init_data = {};
+ char *name = nled->name;
+ int ret;
+
+ init_data.devicename = pci_name(npem->dev);
+ init_data.default_label = nled->indication->name;
+
+ ret = led_compose_name(&npem->dev->dev, &init_data, name);
+ if (ret)
+ return ret;
+
+ led->name = name;
+ led->brightness_set_blocking = brightness_set;
+ led->brightness_get = brightness_get;
+ led->max_brightness = 1;
+ led->default_trigger = "none";
+ led->flags = 0;
+
+ ret = led_classdev_register(&npem->dev->dev, led);
+ if (ret)
+ /* Clear the name to indicate that it is not registered. */
+ name[0] = 0;
+ return ret;
+}
+
+static int pci_npem_init(struct pci_dev *dev, const struct npem_ops *ops,
+ int pos, u32 caps)
+{
+ u32 supported = reg_to_indications(caps, ops->inds);
+ int supported_cnt = hweight32(supported);
+ const struct indication *indication;
+ struct npem_led *nled;
+ struct npem *npem;
+ int led_idx = 0;
+ int ret;
+
+ npem = kzalloc(struct_size(npem, leds, supported_cnt), GFP_KERNEL);
+ if (!npem)
+ return -ENOMEM;
+
+ npem->supported_indications = supported;
+ npem->led_cnt = supported_cnt;
+ npem->pos = pos;
+ npem->dev = dev;
+ npem->ops = ops;
+
+ mutex_init(&npem->lock);
+
+ for_each_indication(indication, npem_indications) {
+ if (!(npem->supported_indications & indication->bit))
+ continue;
+
+ nled = &npem->leds[led_idx++];
+ nled->indication = indication;
+ nled->npem = npem;
+
+ ret = pci_npem_set_led_classdev(npem, nled);
+ if (ret) {
+ npem_free(npem);
+ return ret;
+ }
+ }
+
+ dev->npem = npem;
+ return 0;
+}
+
+void pci_npem_remove(struct pci_dev *dev)
+{
+ npem_free(dev->npem);
+}
+
+void pci_npem_create(struct pci_dev *dev)
+{
+ const struct npem_ops *ops = &npem_ops;
+ int pos = 0, ret;
+ u32 cap;
+
+ if (npem_has_dsm(dev)) {
+ /*
+ * OS should use the DSM for LED control if it is available
+ * PCI Firmware Spec r3.3 sec 4.7.
+ */
+ ret = dsm_get(dev, GET_SUPPORTED_STATES_DSM, &cap);
+ if (ret)
+ return;
+
+ ops = &dsm_ops;
+ } else {
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_NPEM);
+ if (pos == 0)
+ return;
+
+ if (pci_read_config_dword(dev, pos + PCI_NPEM_CAP, &cap) != 0 ||
+ (cap & PCI_NPEM_CAP_CAPABLE) == 0)
+ return;
+ }
+
+ pci_info(dev, "Configuring %s\n", ops->name);
+
+ ret = pci_npem_init(dev, ops, pos, cap);
+ if (ret)
+ pci_err(dev, "Failed to register %s, err: %d\n", ops->name,
+ ret);
+}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 9cc447da9475..af370628e583 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -15,6 +15,7 @@
#include <linux/pci_hotplug.h>
#include <linux/module.h>
#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/rwsem.h>
@@ -1541,3 +1542,184 @@ static int __init acpi_pci_init(void)
return 0;
}
arch_initcall(acpi_pci_init);
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+
+/*
+ * Try to assign the IRQ number when probing a new device
+ */
+int pcibios_alloc_irq(struct pci_dev *dev)
+{
+ if (!acpi_disabled)
+ acpi_pci_irq_enable(dev);
+
+ return 0;
+}
+
+struct acpi_pci_generic_root_info {
+ struct acpi_pci_root_info common;
+ struct pci_config_window *cfg; /* config space mapping */
+};
+
+int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct acpi_device *adev = to_acpi_device(cfg->parent);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+
+ return root->segment;
+}
+
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct pci_config_window *cfg;
+ struct acpi_device *adev;
+ struct device *bus_dev;
+
+ if (acpi_disabled)
+ return 0;
+
+ cfg = bridge->bus->sysdata;
+
+ /*
+ * On Hyper-V there is no corresponding ACPI device for a root bridge,
+ * therefore ->parent is set as NULL by the driver. And set 'adev' as
+ * NULL in this case because there is no proper ACPI device.
+ */
+ if (!cfg->parent)
+ adev = NULL;
+ else
+ adev = to_acpi_device(cfg->parent);
+
+ bus_dev = &bridge->bus->dev;
+
+ ACPI_COMPANION_SET(&bridge->dev, adev);
+ set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev)));
+
+ return 0;
+}
+
+static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
+{
+ struct resource_entry *entry, *tmp;
+ int status;
+
+ status = acpi_pci_probe_root_resources(ci);
+ resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
+ if (!(entry->res->flags & IORESOURCE_WINDOW))
+ resource_list_destroy_entry(entry);
+ }
+ return status;
+}
+
+/*
+ * Lookup the bus range for the domain in MCFG, and set up config space
+ * mapping.
+ */
+static struct pci_config_window *
+pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
+{
+ struct device *dev = &root->device->dev;
+ struct resource *bus_res = &root->secondary;
+ u16 seg = root->segment;
+ const struct pci_ecam_ops *ecam_ops;
+ struct resource cfgres;
+ struct acpi_device *adev;
+ struct pci_config_window *cfg;
+ int ret;
+
+ ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops);
+ if (ret) {
+ dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res);
+ return NULL;
+ }
+
+ adev = acpi_resource_consumer(&cfgres);
+ if (adev)
+ dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres,
+ dev_name(&adev->dev));
+ else
+ dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n",
+ &cfgres);
+
+ cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
+ if (IS_ERR(cfg)) {
+ dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res,
+ PTR_ERR(cfg));
+ return NULL;
+ }
+
+ return cfg;
+}
+
+/* release_info: free resources allocated by init_info */
+static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
+{
+ struct acpi_pci_generic_root_info *ri;
+
+ ri = container_of(ci, struct acpi_pci_generic_root_info, common);
+ pci_ecam_free(ri->cfg);
+ kfree(ci->ops);
+ kfree(ri);
+}
+
+/* Interface called from ACPI code to setup PCI host controller */
+struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
+{
+ struct acpi_pci_generic_root_info *ri;
+ struct pci_bus *bus, *child;
+ struct acpi_pci_root_ops *root_ops;
+ struct pci_host_bridge *host;
+
+ ri = kzalloc(sizeof(*ri), GFP_KERNEL);
+ if (!ri)
+ return NULL;
+
+ root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
+ if (!root_ops) {
+ kfree(ri);
+ return NULL;
+ }
+
+ ri->cfg = pci_acpi_setup_ecam_mapping(root);
+ if (!ri->cfg) {
+ kfree(ri);
+ kfree(root_ops);
+ return NULL;
+ }
+
+ root_ops->release_info = pci_acpi_generic_release_info;
+ root_ops->prepare_resources = pci_acpi_root_prepare_resources;
+ root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
+ bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
+ if (!bus)
+ return NULL;
+
+ /* If we must preserve the resource configuration, claim now */
+ host = pci_find_host_bridge(bus);
+ if (host->preserve_config)
+ pci_bus_claim_resources(bus);
+
+ /*
+ * Assign whatever was left unassigned. If we didn't claim above,
+ * this will reassign everything.
+ */
+ pci_assign_unassigned_root_bus_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ return bus;
+}
+
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+ acpi_pci_remove_bus(bus);
+}
+
+#endif
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 9334b2dd4764..6658c1edd464 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -257,8 +257,8 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] =
*/
.rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE |
- PCI_EXP_RTCTL_CRSSVE),
- .ro = PCI_EXP_RTCAP_CRSVIS << 16,
+ PCI_EXP_RTCTL_RRS_SVE),
+ .ro = PCI_EXP_RTCAP_RRS_SV << 16,
},
[PCI_EXP_RTSTA / 4] = {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f412ef73a6e4..35270172c833 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1670,7 +1670,7 @@ static void pci_dma_cleanup(struct device *dev)
iommu_device_unuse_default_domain(dev);
}
-struct bus_type pci_bus_type = {
+const struct bus_type pci_bus_type = {
.name = "pci",
.match = pci_bus_match,
.uevent = pci_uevent,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 40cfa716392f..5d0f4db1cab7 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -31,6 +31,10 @@
#include <linux/aperture.h>
#include "pci.h"
+#ifndef ARCH_PCI_DEV_GROUPS
+#define ARCH_PCI_DEV_GROUPS
+#endif
+
static int sysfs_initialized; /* = 0 */
/* show configuration fields */
@@ -1624,6 +1628,7 @@ const struct attribute_group *pci_dev_groups[] = {
&pci_dev_acpi_attr_group,
#endif
&pci_dev_resource_resize_group,
+ ARCH_PCI_DEV_GROUPS
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index ffaaca0978cb..7d85c04fbba2 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1283,7 +1283,9 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
{
int delay = 1;
bool retrain = false;
- struct pci_dev *bridge;
+ struct pci_dev *root, *bridge;
+
+ root = pcie_find_root_port(dev);
if (pci_is_pcie(dev)) {
bridge = pci_upstream_bridge(dev);
@@ -1292,16 +1294,23 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
}
/*
- * After reset, the device should not silently discard config
- * requests, but it may still indicate that it needs more time by
- * responding to them with CRS completions. The Root Port will
- * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
- * the read (except when CRS SV is enabled and the read was for the
- * Vendor ID; in that case it synthesizes 0x0001 data).
+ * The caller has already waited long enough after a reset that the
+ * device should respond to config requests, but it may respond
+ * with Request Retry Status (RRS) if it needs more time to
+ * initialize.
*
- * Wait for the device to return a non-CRS completion. Read the
- * Command register instead of Vendor ID so we don't have to
- * contend with the CRS SV value.
+ * If the device is below a Root Port with Configuration RRS
+ * Software Visibility enabled, reading the Vendor ID returns a
+ * special data value if the device responded with RRS. Read the
+ * Vendor ID until we get non-RRS status.
+ *
+ * If there's no Root Port or Configuration RRS Software Visibility
+ * is not enabled, the device may still respond with RRS, but
+ * hardware may retry the config request. If no retries receive
+ * Successful Completion, hardware generally synthesizes ~0
+ * (PCI_ERROR_RESPONSE) data to complete the read. Reading Vendor
+ * ID for VFs and non-existent devices also returns ~0, so read the
+ * Command register until it returns something other than ~0.
*/
for (;;) {
u32 id;
@@ -1311,9 +1320,15 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
return -ENOTTY;
}
- pci_read_config_dword(dev, PCI_COMMAND, &id);
- if (!PCI_POSSIBLE_ERROR(id))
- break;
+ if (root && root->config_rrs_sv) {
+ pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
+ if (!pci_bus_rrs_vendor_id(id))
+ break;
+ } else {
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ if (!PCI_POSSIBLE_ERROR(id))
+ break;
+ }
if (delay > timeout) {
pci_warn(dev, "not ready %dms after %s; giving up\n",
@@ -1324,7 +1339,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
if (delay > PCI_RESET_WAIT) {
if (retrain) {
retrain = false;
- if (pcie_failed_link_retrain(bridge)) {
+ if (pcie_failed_link_retrain(bridge) == 0) {
delay = 1;
continue;
}
@@ -4718,7 +4733,15 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
}
- return pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+ rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+
+ /*
+ * Clear LBMS after a manual retrain so that the bit can be used
+ * to track link speed or width changes made by hardware itself
+ * in attempt to correct unreliable link operation.
+ */
+ pcie_capability_write_word(pdev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
+ return rc;
}
/**
@@ -5672,8 +5695,10 @@ static void pci_bus_restore_locked(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
pci_dev_restore(dev);
- if (dev->subordinate)
+ if (dev->subordinate) {
+ pci_bridge_wait_for_secondary_bus(dev, "bus reset");
pci_bus_restore_locked(dev->subordinate);
+ }
}
}
@@ -5707,8 +5732,10 @@ static void pci_slot_restore_locked(struct pci_slot *slot)
if (!dev->slot || dev->slot != slot)
continue;
pci_dev_restore(dev);
- if (dev->subordinate)
+ if (dev->subordinate) {
+ pci_bridge_wait_for_secondary_bus(dev, "slot reset");
pci_bus_restore_locked(dev->subordinate);
+ }
}
}
@@ -6802,16 +6829,16 @@ static int of_pci_bus_find_domain_nr(struct device *parent)
return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
}
-static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
+static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr)
{
- if (bus->domain_nr < 0)
+ if (domain_nr < 0)
return;
/* Release domain from IDA where it was allocated. */
- if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr)
- ida_free(&pci_domain_nr_static_ida, bus->domain_nr);
+ if (of_get_pci_domain_nr(parent->of_node) == domain_nr)
+ ida_free(&pci_domain_nr_static_ida, domain_nr);
else
- ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr);
+ ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
}
int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
@@ -6820,11 +6847,11 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
acpi_pci_bus_find_domain_nr(bus);
}
-void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
+void pci_bus_release_domain_nr(struct device *parent, int domain_nr)
{
if (!acpi_disabled)
return;
- of_pci_bus_release_domain_nr(bus, parent);
+ of_pci_bus_release_domain_nr(parent, domain_nr);
}
#endif
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 79c8398f3938..14d00ce45bfa 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -13,10 +13,25 @@
#define PCIE_LINK_RETRAIN_TIMEOUT_MS 1000
-/* Power stable to PERST# inactive from PCIe card Electromechanical Spec */
+/*
+ * Power stable to PERST# inactive.
+ *
+ * See the "Power Sequencing and Reset Signal Timings" table of the PCI Express
+ * Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol
+ * "T_PVPERL".
+ */
#define PCIE_T_PVPERL_MS 100
/*
+ * REFCLK stable before PERST# inactive.
+ *
+ * See the "Power Sequencing and Reset Signal Timings" table of the PCI Express
+ * Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol
+ * "T_PERST-CLK".
+ */
+#define PCIE_T_PERST_CLK_US 100
+
+/*
* End of conventional reset (PERST# de-asserted) to first configuration
* request (device able to respond with a "Request Retry Status" completion),
* from PCIe r6.0, sec 6.6.1.
@@ -124,7 +139,6 @@ void pcie_clear_device_status(struct pci_dev *dev);
void pcie_clear_root_pme_status(struct pci_dev *dev);
bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
-int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
void pci_pme_restore(struct pci_dev *dev);
bool pci_dev_need_resume(struct pci_dev *dev);
void pci_dev_adjust_pme(struct pci_dev *dev);
@@ -139,6 +153,11 @@ bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type);
+static inline bool pci_bus_rrs_vendor_id(u32 l)
+{
+ return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
+}
+
static inline void pci_wakeup_event(struct pci_dev *dev)
{
/* Wait 100 ms before the system can be put into a sleep state. */
@@ -169,7 +188,6 @@ static inline bool pcie_downstream_port(const struct pci_dev *dev)
}
void pci_vpd_init(struct pci_dev *dev);
-void pci_vpd_release(struct pci_dev *dev);
extern const struct attribute_group pci_dev_vpd_attr_group;
/* PCI Virtual Channel */
@@ -290,10 +308,10 @@ void pci_put_host_bridge_device(struct device *dev);
int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
- int crs_timeout);
+ int rrs_timeout);
bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
- int crs_timeout);
-int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout);
+ int rrs_timeout);
+int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int rrs_timeout);
int pci_setup_device(struct pci_dev *dev);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
@@ -398,6 +416,14 @@ static inline void pci_doe_destroy(struct pci_dev *pdev) { }
static inline void pci_doe_disconnected(struct pci_dev *pdev) { }
#endif
+#ifdef CONFIG_PCI_NPEM
+void pci_npem_create(struct pci_dev *dev);
+void pci_npem_remove(struct pci_dev *dev);
+#else
+static inline void pci_npem_create(struct pci_dev *dev) { }
+static inline void pci_npem_remove(struct pci_dev *dev) { }
+#endif
+
/**
* pci_dev_set_io_state - Set the new error state if possible.
*
@@ -606,7 +632,7 @@ void pci_acs_init(struct pci_dev *dev);
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
int pci_dev_specific_enable_acs(struct pci_dev *dev);
int pci_dev_specific_disable_acs_redir(struct pci_dev *dev);
-bool pcie_failed_link_retrain(struct pci_dev *dev);
+int pcie_failed_link_retrain(struct pci_dev *dev);
#else
static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
u16 acs_flags)
@@ -621,9 +647,9 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
{
return -ENOTTY;
}
-static inline bool pcie_failed_link_retrain(struct pci_dev *dev)
+static inline int pcie_failed_link_retrain(struct pci_dev *dev)
{
- return false;
+ return -ENOTTY;
}
#endif
@@ -887,8 +913,6 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
#endif
int pcim_intx(struct pci_dev *dev, int enable);
-
-int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
const char *name);
void pcim_release_region(struct pci_dev *pdev, int bar);
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index f81b2303bf6a..91acc7b17f68 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -430,7 +430,7 @@ static int aer_inject(struct aer_error_inj *einj)
else
rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
rperr->source_id &= 0xffff0000;
- rperr->source_id |= (einj->bus << 8) | devfn;
+ rperr->source_id |= PCI_DEVID(einj->bus, devfn);
}
if (einj->uncor_status) {
if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
@@ -443,7 +443,7 @@ static int aer_inject(struct aer_error_inj *einj)
rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
rperr->source_id &= 0x0000ffff;
- rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
+ rperr->source_id |= PCI_DEVID(einj->bus, devfn) << 16;
}
spin_unlock_irqrestore(&inject_lock, flags);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index b14b9876c030..4f68414c3086 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1061,7 +1061,7 @@ unregister:
free:
#ifdef CONFIG_PCI_DOMAINS_GENERIC
- pci_bus_release_domain_nr(bus, parent);
+ pci_bus_release_domain_nr(parent, bus->domain_nr);
#endif
kfree(bus);
return err;
@@ -1203,15 +1203,17 @@ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
}
EXPORT_SYMBOL(pci_add_new_bus);
-static void pci_enable_crs(struct pci_dev *pdev)
+static void pci_enable_rrs_sv(struct pci_dev *pdev)
{
u16 root_cap = 0;
- /* Enable CRS Software Visibility if supported */
+ /* Enable Configuration RRS Software Visibility if supported */
pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
- if (root_cap & PCI_EXP_RTCAP_CRSVIS)
+ if (root_cap & PCI_EXP_RTCAP_RRS_SV) {
pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
- PCI_EXP_RTCTL_CRSSVE);
+ PCI_EXP_RTCTL_RRS_SVE);
+ pdev->config_rrs_sv = 1;
+ }
}
static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
@@ -1326,7 +1328,7 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
- pci_enable_crs(dev);
+ pci_enable_rrs_sv(dev);
if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
!is_cardbus && !broken) {
@@ -2343,28 +2345,23 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_alloc_dev);
-static bool pci_bus_crs_vendor_id(u32 l)
-{
- return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
-}
-
-static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
+static bool pci_bus_wait_rrs(struct pci_bus *bus, int devfn, u32 *l,
int timeout)
{
int delay = 1;
- if (!pci_bus_crs_vendor_id(*l))
- return true; /* not a CRS completion */
+ if (!pci_bus_rrs_vendor_id(*l))
+ return true; /* not a Configuration RRS completion */
if (!timeout)
- return false; /* CRS, but caller doesn't want to wait */
+ return false; /* RRS, but caller doesn't want to wait */
/*
* We got the reserved Vendor ID that indicates a completion with
- * Configuration Request Retry Status (CRS). Retry until we get a
+ * Configuration Request Retry Status (RRS). Retry until we get a
* valid Vendor ID or we time out.
*/
- while (pci_bus_crs_vendor_id(*l)) {
+ while (pci_bus_rrs_vendor_id(*l)) {
if (delay > timeout) {
pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
pci_domain_nr(bus), bus->number,
@@ -2403,8 +2400,8 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
*l == 0x0000ffff || *l == 0xffff0000)
return false;
- if (pci_bus_crs_vendor_id(*l))
- return pci_bus_wait_crs(bus, devfn, l, timeout);
+ if (pci_bus_rrs_vendor_id(*l))
+ return pci_bus_wait_rrs(bus, devfn, l, timeout);
return true;
}
@@ -2593,6 +2590,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->match_driver = false;
ret = device_add(&dev->dev);
WARN_ON(ret < 0);
+
+ pci_npem_create(dev);
}
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
diff --git a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
index f07758c9edad..a23a4312574b 100644
--- a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
+++ b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
@@ -67,6 +67,11 @@ static const struct of_device_id pci_pwrctl_pwrseq_of_match[] = {
.data = "wlan",
},
{
+ /* ATH11K in WCN6855 package. */
+ .compatible = "pci17cb,1103",
+ .data = "wlan",
+ },
+ {
/* ATH12K in WCN7850 package. */
.compatible = "pci17cb,1107",
.data = "wlan",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index a2ce4e08edf5..dccb60c1d9cc 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -66,7 +66,7 @@
* apply this erratum workaround to any downstream ports as long as they
* support Link Active reporting and have the Link Control 2 register.
* Restrict the speed to 2.5GT/s then with the Target Link Speed field,
- * request a retrain and wait 200ms for the data link to go up.
+ * request a retrain and check the result.
*
* If this turns out successful and we know by the Vendor:Device ID it is
* safe to do so, then lift the restriction, letting the devices negotiate
@@ -74,33 +74,45 @@
* firmware may have already arranged and lift it with ports that already
* report their data link being up.
*
- * Return TRUE if the link has been successfully retrained, otherwise FALSE.
+ * Otherwise revert the speed to the original setting and request a retrain
+ * again to remove any residual state, ignoring the result as it's supposed
+ * to fail anyway.
+ *
+ * Return 0 if the link has been successfully retrained. Return an error
+ * if retraining was not needed or we attempted a retrain and it failed.
*/
-bool pcie_failed_link_retrain(struct pci_dev *dev)
+int pcie_failed_link_retrain(struct pci_dev *dev)
{
static const struct pci_device_id ids[] = {
{ PCI_VDEVICE(ASMEDIA, 0x2824) }, /* ASMedia ASM2824 */
{}
};
u16 lnksta, lnkctl2;
+ int ret = -ENOTTY;
if (!pci_is_pcie(dev) || !pcie_downstream_port(dev) ||
!pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting)
- return false;
+ return ret;
pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) ==
PCI_EXP_LNKSTA_LBMS) {
+ u16 oldlnkctl2 = lnkctl2;
+
pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT;
pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
- if (pcie_retrain_link(dev, false)) {
+ ret = pcie_retrain_link(dev, false);
+ if (ret) {
pci_info(dev, "retraining failed\n");
- return false;
+ pcie_capability_write_word(dev, PCI_EXP_LNKCTL2,
+ oldlnkctl2);
+ pcie_retrain_link(dev, true);
+ return ret;
}
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
@@ -117,13 +129,14 @@ bool pcie_failed_link_retrain(struct pci_dev *dev)
lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS;
pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
- if (pcie_retrain_link(dev, false)) {
+ ret = pcie_retrain_link(dev, false);
+ if (ret) {
pci_info(dev, "retraining failed\n");
- return false;
+ return ret;
}
}
- return true;
+ return ret;
}
static ktime_t fixup_debug_start(struct pci_dev *dev,
@@ -3608,6 +3621,8 @@ DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004, /* Ceton InfiniTV4 */
quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K2,
+ quirk_broken_intx_masking);
/*
* Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
@@ -4246,6 +4261,10 @@ static void quirk_dma_func0_alias(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
+/* Some Glenfly chips use function 0 as the PCIe Requester ID for DMA */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d40, quirk_dma_func0_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d41, quirk_dma_func0_alias);
+
static void quirk_dma_func1_alias(struct pci_dev *dev)
{
if (PCI_FUNC(dev->devfn) != 1)
@@ -5070,6 +5089,8 @@ static const struct pci_dev_acs_enabled {
/* QCOM QDF2xxx root ports */
{ PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
{ PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
+ /* QCOM SA8775P root port */
+ { PCI_VENDOR_ID_QCOM, 0x0115, pci_quirk_qcom_rp_acs },
/* HXT SD4800 root ports. The ACS design is same as QCOM QDF2xxx */
{ PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
/* Intel PCH root ports */
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 4770cb87e3f0..e4ce1145aa3e 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -50,6 +50,8 @@ static void pci_destroy_dev(struct pci_dev *dev)
if (!dev->dev.kobj.parent)
return;
+ pci_npem_remove(dev);
+
device_del(&dev->dev);
down_write(&pci_bus_sem);
@@ -179,7 +181,7 @@ void pci_remove_root_bus(struct pci_bus *bus)
#ifdef CONFIG_PCI_DOMAINS_GENERIC
/* Release domain_nr if it was dynamically allocated */
if (host_bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET)
- pci_bus_release_domain_nr(bus, host_bridge->dev.parent);
+ pci_bus_release_domain_nr(host_bridge->dev.parent, bus->domain_nr);
#endif
pci_remove_bus(bus);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index aa9530b4064f..bab8ba64162f 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -48,6 +48,13 @@ config ARM_CMN
Support for PMU events monitoring on the Arm CMN-600 Coherent Mesh
Network interconnect.
+config ARM_NI
+ tristate "Arm NI-700 PMU support"
+ depends on ARM64 || COMPILE_TEST
+ help
+ Support for PMU events monitoring on the Arm NI-700 Network-on-Chip
+ interconnect and family.
+
config ARM_PMU
depends on ARM || ARM64
bool "ARM PMU framework"
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index d43df81d52f7..8268f38e42c5 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_ARM_CCI_PMU) += arm-cci.o
obj-$(CONFIG_ARM_CCN) += arm-ccn.o
obj-$(CONFIG_ARM_CMN) += arm-cmn.o
obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
+obj-$(CONFIG_ARM_NI) += arm-ni.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_ARM_PMUV3) += arm_pmuv3.o
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
index 38a2947ae813..c6ff1bc7d336 100644
--- a/drivers/perf/alibaba_uncore_drw_pmu.c
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -400,7 +400,7 @@ static irqreturn_t ali_drw_pmu_isr(int irq_num, void *data)
}
/* clear common counter intr status */
- clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, 1);
+ clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, status);
writel(clr_status,
drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR);
}
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index f322e5ca1114..1d4d01e1275e 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -47,46 +47,79 @@
* implementations, we'll have to introduce per cpu-type tables.
*/
enum m1_pmu_events {
- M1_PMU_PERFCTR_UNKNOWN_01 = 0x01,
- M1_PMU_PERFCTR_CPU_CYCLES = 0x02,
- M1_PMU_PERFCTR_INSTRUCTIONS = 0x8c,
- M1_PMU_PERFCTR_UNKNOWN_8d = 0x8d,
- M1_PMU_PERFCTR_UNKNOWN_8e = 0x8e,
- M1_PMU_PERFCTR_UNKNOWN_8f = 0x8f,
- M1_PMU_PERFCTR_UNKNOWN_90 = 0x90,
- M1_PMU_PERFCTR_UNKNOWN_93 = 0x93,
- M1_PMU_PERFCTR_UNKNOWN_94 = 0x94,
- M1_PMU_PERFCTR_UNKNOWN_95 = 0x95,
- M1_PMU_PERFCTR_UNKNOWN_96 = 0x96,
- M1_PMU_PERFCTR_UNKNOWN_97 = 0x97,
- M1_PMU_PERFCTR_UNKNOWN_98 = 0x98,
- M1_PMU_PERFCTR_UNKNOWN_99 = 0x99,
- M1_PMU_PERFCTR_UNKNOWN_9a = 0x9a,
- M1_PMU_PERFCTR_UNKNOWN_9b = 0x9b,
- M1_PMU_PERFCTR_UNKNOWN_9c = 0x9c,
- M1_PMU_PERFCTR_UNKNOWN_9f = 0x9f,
- M1_PMU_PERFCTR_UNKNOWN_bf = 0xbf,
- M1_PMU_PERFCTR_UNKNOWN_c0 = 0xc0,
- M1_PMU_PERFCTR_UNKNOWN_c1 = 0xc1,
- M1_PMU_PERFCTR_UNKNOWN_c4 = 0xc4,
- M1_PMU_PERFCTR_UNKNOWN_c5 = 0xc5,
- M1_PMU_PERFCTR_UNKNOWN_c6 = 0xc6,
- M1_PMU_PERFCTR_UNKNOWN_c8 = 0xc8,
- M1_PMU_PERFCTR_UNKNOWN_ca = 0xca,
- M1_PMU_PERFCTR_UNKNOWN_cb = 0xcb,
- M1_PMU_PERFCTR_UNKNOWN_f5 = 0xf5,
- M1_PMU_PERFCTR_UNKNOWN_f6 = 0xf6,
- M1_PMU_PERFCTR_UNKNOWN_f7 = 0xf7,
- M1_PMU_PERFCTR_UNKNOWN_f8 = 0xf8,
- M1_PMU_PERFCTR_UNKNOWN_fd = 0xfd,
- M1_PMU_PERFCTR_LAST = M1_PMU_CFG_EVENT,
+ M1_PMU_PERFCTR_RETIRE_UOP = 0x1,
+ M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE = 0x2,
+ M1_PMU_PERFCTR_L1I_TLB_FILL = 0x4,
+ M1_PMU_PERFCTR_L1D_TLB_FILL = 0x5,
+ M1_PMU_PERFCTR_MMU_TABLE_WALK_INSTRUCTION = 0x7,
+ M1_PMU_PERFCTR_MMU_TABLE_WALK_DATA = 0x8,
+ M1_PMU_PERFCTR_L2_TLB_MISS_INSTRUCTION = 0xa,
+ M1_PMU_PERFCTR_L2_TLB_MISS_DATA = 0xb,
+ M1_PMU_PERFCTR_MMU_VIRTUAL_MEMORY_FAULT_NONSPEC = 0xd,
+ M1_PMU_PERFCTR_SCHEDULE_UOP = 0x52,
+ M1_PMU_PERFCTR_INTERRUPT_PENDING = 0x6c,
+ M1_PMU_PERFCTR_MAP_STALL_DISPATCH = 0x70,
+ M1_PMU_PERFCTR_MAP_REWIND = 0x75,
+ M1_PMU_PERFCTR_MAP_STALL = 0x76,
+ M1_PMU_PERFCTR_MAP_INT_UOP = 0x7c,
+ M1_PMU_PERFCTR_MAP_LDST_UOP = 0x7d,
+ M1_PMU_PERFCTR_MAP_SIMD_UOP = 0x7e,
+ M1_PMU_PERFCTR_FLUSH_RESTART_OTHER_NONSPEC = 0x84,
+ M1_PMU_PERFCTR_INST_ALL = 0x8c,
+ M1_PMU_PERFCTR_INST_BRANCH = 0x8d,
+ M1_PMU_PERFCTR_INST_BRANCH_CALL = 0x8e,
+ M1_PMU_PERFCTR_INST_BRANCH_RET = 0x8f,
+ M1_PMU_PERFCTR_INST_BRANCH_TAKEN = 0x90,
+ M1_PMU_PERFCTR_INST_BRANCH_INDIR = 0x93,
+ M1_PMU_PERFCTR_INST_BRANCH_COND = 0x94,
+ M1_PMU_PERFCTR_INST_INT_LD = 0x95,
+ M1_PMU_PERFCTR_INST_INT_ST = 0x96,
+ M1_PMU_PERFCTR_INST_INT_ALU = 0x97,
+ M1_PMU_PERFCTR_INST_SIMD_LD = 0x98,
+ M1_PMU_PERFCTR_INST_SIMD_ST = 0x99,
+ M1_PMU_PERFCTR_INST_SIMD_ALU = 0x9a,
+ M1_PMU_PERFCTR_INST_LDST = 0x9b,
+ M1_PMU_PERFCTR_INST_BARRIER = 0x9c,
+ M1_PMU_PERFCTR_UNKNOWN_9f = 0x9f,
+ M1_PMU_PERFCTR_L1D_TLB_ACCESS = 0xa0,
+ M1_PMU_PERFCTR_L1D_TLB_MISS = 0xa1,
+ M1_PMU_PERFCTR_L1D_CACHE_MISS_ST = 0xa2,
+ M1_PMU_PERFCTR_L1D_CACHE_MISS_LD = 0xa3,
+ M1_PMU_PERFCTR_LD_UNIT_UOP = 0xa6,
+ M1_PMU_PERFCTR_ST_UNIT_UOP = 0xa7,
+ M1_PMU_PERFCTR_L1D_CACHE_WRITEBACK = 0xa8,
+ M1_PMU_PERFCTR_LDST_X64_UOP = 0xb1,
+ M1_PMU_PERFCTR_LDST_XPG_UOP = 0xb2,
+ M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_SUCC = 0xb3,
+ M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_FAIL = 0xb4,
+ M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC = 0xbf,
+ M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC = 0xc0,
+ M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC = 0xc1,
+ M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC = 0xc4,
+ M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC = 0xc5,
+ M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC = 0xc6,
+ M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC = 0xc8,
+ M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC = 0xca,
+ M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC = 0xcb,
+ M1_PMU_PERFCTR_L1I_TLB_MISS_DEMAND = 0xd4,
+ M1_PMU_PERFCTR_MAP_DISPATCH_BUBBLE = 0xd6,
+ M1_PMU_PERFCTR_L1I_CACHE_MISS_DEMAND = 0xdb,
+ M1_PMU_PERFCTR_FETCH_RESTART = 0xde,
+ M1_PMU_PERFCTR_ST_NT_UOP = 0xe5,
+ M1_PMU_PERFCTR_LD_NT_UOP = 0xe6,
+ M1_PMU_PERFCTR_UNKNOWN_f5 = 0xf5,
+ M1_PMU_PERFCTR_UNKNOWN_f6 = 0xf6,
+ M1_PMU_PERFCTR_UNKNOWN_f7 = 0xf7,
+ M1_PMU_PERFCTR_UNKNOWN_f8 = 0xf8,
+ M1_PMU_PERFCTR_UNKNOWN_fd = 0xfd,
+ M1_PMU_PERFCTR_LAST = M1_PMU_CFG_EVENT,
/*
* From this point onwards, these are not actual HW events,
* but attributes that get stored in hw->config_base.
*/
- M1_PMU_CFG_COUNT_USER = BIT(8),
- M1_PMU_CFG_COUNT_KERNEL = BIT(9),
+ M1_PMU_CFG_COUNT_USER = BIT(8),
+ M1_PMU_CFG_COUNT_KERNEL = BIT(9),
};
/*
@@ -96,46 +129,45 @@ enum m1_pmu_events {
* counters had strange affinities.
*/
static const u16 m1_pmu_event_affinity[M1_PMU_PERFCTR_LAST + 1] = {
- [0 ... M1_PMU_PERFCTR_LAST] = ANY_BUT_0_1,
- [M1_PMU_PERFCTR_UNKNOWN_01] = BIT(7),
- [M1_PMU_PERFCTR_CPU_CYCLES] = ANY_BUT_0_1 | BIT(0),
- [M1_PMU_PERFCTR_INSTRUCTIONS] = BIT(7) | BIT(1),
- [M1_PMU_PERFCTR_UNKNOWN_8d] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_8e] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_8f] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_90] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_93] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_94] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_95] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_96] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_97] = BIT(7),
- [M1_PMU_PERFCTR_UNKNOWN_98] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_99] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_9a] = BIT(7),
- [M1_PMU_PERFCTR_UNKNOWN_9b] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_9c] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_9f] = BIT(7),
- [M1_PMU_PERFCTR_UNKNOWN_bf] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c0] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c1] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c4] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c5] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c6] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c8] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_ca] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_cb] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_f5] = ONLY_2_4_6,
- [M1_PMU_PERFCTR_UNKNOWN_f6] = ONLY_2_4_6,
- [M1_PMU_PERFCTR_UNKNOWN_f7] = ONLY_2_4_6,
- [M1_PMU_PERFCTR_UNKNOWN_f8] = ONLY_2_TO_7,
- [M1_PMU_PERFCTR_UNKNOWN_fd] = ONLY_2_4_6,
+ [0 ... M1_PMU_PERFCTR_LAST] = ANY_BUT_0_1,
+ [M1_PMU_PERFCTR_RETIRE_UOP] = BIT(7),
+ [M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE] = ANY_BUT_0_1 | BIT(0),
+ [M1_PMU_PERFCTR_INST_ALL] = BIT(7) | BIT(1),
+ [M1_PMU_PERFCTR_INST_BRANCH] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_CALL] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_RET] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_TAKEN] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_INDIR] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_COND] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_INT_LD] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_INT_ST] = BIT(7),
+ [M1_PMU_PERFCTR_INST_INT_ALU] = BIT(7),
+ [M1_PMU_PERFCTR_INST_SIMD_LD] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_SIMD_ST] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_SIMD_ALU] = BIT(7),
+ [M1_PMU_PERFCTR_INST_LDST] = BIT(7),
+ [M1_PMU_PERFCTR_INST_BARRIER] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_9f] = BIT(7),
+ [M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_f5] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f6] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f7] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f8] = ONLY_2_TO_7,
+ [M1_PMU_PERFCTR_UNKNOWN_fd] = ONLY_2_4_6,
};
static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INSTRUCTIONS,
- /* No idea about the rest yet */
+ [PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE,
+ [PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INST_ALL,
};
/* sysfs definitions */
@@ -154,8 +186,8 @@ static ssize_t m1_pmu_events_sysfs_show(struct device *dev,
PMU_EVENT_ATTR_ID(name, m1_pmu_events_sysfs_show, config)
static struct attribute *m1_pmu_event_attrs[] = {
- M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CPU_CYCLES),
- M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INSTRUCTIONS),
+ M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE),
+ M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INST_ALL),
NULL,
};
@@ -400,7 +432,7 @@ static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu)
regs = get_irq_regs();
- for (idx = 0; idx < cpu_pmu->num_events; idx++) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, M1_PMU_NR_COUNTERS) {
struct perf_event *event = cpuc->events[idx];
struct perf_sample_data data;
@@ -560,7 +592,7 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags)
cpu_pmu->reset = m1_pmu_reset;
cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
- cpu_pmu->num_events = M1_PMU_NR_COUNTERS;
+ bitmap_set(cpu_pmu->cntr_mask, 0, M1_PMU_NR_COUNTERS);
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group;
return 0;
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index c932d9d355cf..397a46410f7c 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -24,14 +24,6 @@
#define CMN_NI_NODE_ID GENMASK_ULL(31, 16)
#define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32)
-#define CMN_NODEID_DEVID(reg) ((reg) & 3)
-#define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1)
-#define CMN_NODEID_PID(reg) (((reg) >> 2) & 1)
-#define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3)
-#define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7)
-#define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits)))
-#define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1))
-
#define CMN_CHILD_INFO 0x0080
#define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
#define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
@@ -43,6 +35,9 @@
#define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION)
#define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
+/* Currently XPs are the node type we can have most of; others top out at 128 */
+#define CMN_MAX_NODES_PER_EVENT CMN_MAX_XPS
+
/* The CFG node has various info besides the discovery tree */
#define CMN_CFGM_PERIPH_ID_01 0x0008
#define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0)
@@ -50,24 +45,28 @@
#define CMN_CFGM_PERIPH_ID_23 0x0010
#define CMN_CFGM_PID2_REVISION GENMASK_ULL(7, 4)
-#define CMN_CFGM_INFO_GLOBAL 0x900
+#define CMN_CFGM_INFO_GLOBAL 0x0900
#define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63)
#define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52)
#define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50)
+#define CMN_INFO_DEVICE_ISO_ENABLE BIT_ULL(44)
-#define CMN_CFGM_INFO_GLOBAL_1 0x908
+#define CMN_CFGM_INFO_GLOBAL_1 0x0908
#define CMN_INFO_SNP_VC_NUM GENMASK_ULL(3, 2)
#define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0)
/* XPs also have some local topology info which has uses too */
#define CMN_MXP__CONNECT_INFO(p) (0x0008 + 8 * (p))
-#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0)
+#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(5, 0)
#define CMN_MAX_PORTS 6
#define CI700_CONNECT_INFO_P2_5_OFFSET 0x10
/* PMU registers occupy the 3rd 4KB page of each node's region */
#define CMN_PMU_OFFSET 0x2000
+/* ...except when they don't :( */
+#define CMN_S3_DTM_OFFSET 0xa000
+#define CMN_S3_PMU_OFFSET 0xd900
/* For most nodes, this is all there is */
#define CMN_PMU_EVENT_SEL 0x000
@@ -78,7 +77,8 @@
/* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */
#define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32)
-/* HN-Ps are weird... */
+/* Some types are designed to coexist with another device in the same node */
+#define CMN_CCLA_PMU_EVENT_SEL 0x008
#define CMN_HNP_PMU_EVENT_SEL 0x008
/* DTMs live in the PMU space of XP registers */
@@ -123,27 +123,28 @@
/* The DTC node is where the magic happens */
#define CMN_DT_DTC_CTL 0x0a00
#define CMN_DT_DTC_CTL_DT_EN BIT(0)
+#define CMN_DT_DTC_CTL_CG_DISABLE BIT(10)
/* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */
#define _CMN_DT_CNT_REG(n) ((((n) / 2) * 4 + (n) % 2) * 4)
-#define CMN_DT_PMEVCNT(n) (CMN_PMU_OFFSET + _CMN_DT_CNT_REG(n))
-#define CMN_DT_PMCCNTR (CMN_PMU_OFFSET + 0x40)
+#define CMN_DT_PMEVCNT(dtc, n) ((dtc)->pmu_base + _CMN_DT_CNT_REG(n))
+#define CMN_DT_PMCCNTR(dtc) ((dtc)->pmu_base + 0x40)
-#define CMN_DT_PMEVCNTSR(n) (CMN_PMU_OFFSET + 0x50 + _CMN_DT_CNT_REG(n))
-#define CMN_DT_PMCCNTRSR (CMN_PMU_OFFSET + 0x90)
+#define CMN_DT_PMEVCNTSR(dtc, n) ((dtc)->pmu_base + 0x50 + _CMN_DT_CNT_REG(n))
+#define CMN_DT_PMCCNTRSR(dtc) ((dtc)->pmu_base + 0x90)
-#define CMN_DT_PMCR (CMN_PMU_OFFSET + 0x100)
+#define CMN_DT_PMCR(dtc) ((dtc)->pmu_base + 0x100)
#define CMN_DT_PMCR_PMU_EN BIT(0)
#define CMN_DT_PMCR_CNTR_RST BIT(5)
#define CMN_DT_PMCR_OVFL_INTR_EN BIT(6)
-#define CMN_DT_PMOVSR (CMN_PMU_OFFSET + 0x118)
-#define CMN_DT_PMOVSR_CLR (CMN_PMU_OFFSET + 0x120)
+#define CMN_DT_PMOVSR(dtc) ((dtc)->pmu_base + 0x118)
+#define CMN_DT_PMOVSR_CLR(dtc) ((dtc)->pmu_base + 0x120)
-#define CMN_DT_PMSSR (CMN_PMU_OFFSET + 0x128)
+#define CMN_DT_PMSSR(dtc) ((dtc)->pmu_base + 0x128)
#define CMN_DT_PMSSR_SS_STATUS(n) BIT(n)
-#define CMN_DT_PMSRR (CMN_PMU_OFFSET + 0x130)
+#define CMN_DT_PMSRR(dtc) ((dtc)->pmu_base + 0x130)
#define CMN_DT_PMSRR_SS_REQ BIT(0)
#define CMN_DT_NUM_COUNTERS 8
@@ -198,10 +199,11 @@ enum cmn_model {
CMN650 = 2,
CMN700 = 4,
CI700 = 8,
+ CMNS3 = 16,
/* ...and then we can use bitmap tricks for commonality */
CMN_ANY = -1,
NOT_CMN600 = -2,
- CMN_650ON = CMN650 | CMN700,
+ CMN_650ON = CMN650 | CMN700 | CMNS3,
};
/* Actual part numbers and revision IDs defined by the hardware */
@@ -210,6 +212,7 @@ enum cmn_part {
PART_CMN650 = 0x436,
PART_CMN700 = 0x43c,
PART_CI700 = 0x43a,
+ PART_CMN_S3 = 0x43e,
};
/* CMN-600 r0px shouldn't exist in silicon, thankfully */
@@ -261,6 +264,7 @@ enum cmn_node_type {
CMN_TYPE_HNS = 0x200,
CMN_TYPE_HNS_MPAM_S,
CMN_TYPE_HNS_MPAM_NS,
+ CMN_TYPE_APB = 0x1000,
/* Not a real node type */
CMN_TYPE_WP = 0x7770
};
@@ -280,8 +284,11 @@ struct arm_cmn_node {
u16 id, logid;
enum cmn_node_type type;
+ /* XP properties really, but replicated to children for convenience */
u8 dtm;
s8 dtc;
+ u8 portid_bits:4;
+ u8 deviceid_bits:4;
/* DN/HN-F/CXHA */
struct {
u8 val : 4;
@@ -307,8 +314,9 @@ struct arm_cmn_dtm {
struct arm_cmn_dtc {
void __iomem *base;
+ void __iomem *pmu_base;
int irq;
- int irq_friend;
+ s8 irq_friend;
bool cc_active;
struct perf_event *counters[CMN_DT_NUM_COUNTERS];
@@ -357,49 +365,33 @@ struct arm_cmn {
static int arm_cmn_hp_state;
struct arm_cmn_nodeid {
- u8 x;
- u8 y;
u8 port;
u8 dev;
};
static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
{
- return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2);
+ return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1));
}
-static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id)
+static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn_node *dn)
{
struct arm_cmn_nodeid nid;
- if (cmn->num_xps == 1) {
- nid.x = 0;
- nid.y = 0;
- nid.port = CMN_NODEID_1x1_PID(id);
- nid.dev = CMN_NODEID_DEVID(id);
- } else {
- int bits = arm_cmn_xyidbits(cmn);
-
- nid.x = CMN_NODEID_X(id, bits);
- nid.y = CMN_NODEID_Y(id, bits);
- if (cmn->ports_used & 0xc) {
- nid.port = CMN_NODEID_EXT_PID(id);
- nid.dev = CMN_NODEID_EXT_DEVID(id);
- } else {
- nid.port = CMN_NODEID_PID(id);
- nid.dev = CMN_NODEID_DEVID(id);
- }
- }
+ nid.dev = dn->id & ((1U << dn->deviceid_bits) - 1);
+ nid.port = (dn->id >> dn->deviceid_bits) & ((1U << dn->portid_bits) - 1);
return nid;
}
static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn,
const struct arm_cmn_node *dn)
{
- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
- int xp_idx = cmn->mesh_x * nid.y + nid.x;
+ int id = dn->id >> (dn->portid_bits + dn->deviceid_bits);
+ int bits = arm_cmn_xyidbits(cmn);
+ int x = id >> bits;
+ int y = id & ((1U << bits) - 1);
- return cmn->xps + xp_idx;
+ return cmn->xps + cmn->mesh_x * y + x;
}
static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
enum cmn_node_type type)
@@ -423,15 +415,27 @@ static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn)
return CMN700;
case PART_CI700:
return CI700;
+ case PART_CMN_S3:
+ return CMNS3;
default:
return 0;
};
}
+static int arm_cmn_pmu_offset(const struct arm_cmn *cmn, const struct arm_cmn_node *dn)
+{
+ if (cmn->part == PART_CMN_S3) {
+ if (dn->type == CMN_TYPE_XP)
+ return CMN_S3_DTM_OFFSET;
+ return CMN_S3_PMU_OFFSET;
+ }
+ return CMN_PMU_OFFSET;
+}
+
static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
const struct arm_cmn_node *xp, int port)
{
- int offset = CMN_MXP__CONNECT_INFO(port);
+ int offset = CMN_MXP__CONNECT_INFO(port) - arm_cmn_pmu_offset(cmn, xp);
if (port >= 2) {
if (cmn->part == PART_CMN600 || cmn->part == PART_CMN650)
@@ -444,7 +448,7 @@ static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
offset += CI700_CONNECT_INFO_P2_5_OFFSET;
}
- return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset);
+ return readl_relaxed(xp->pmu_base + offset);
}
static struct dentry *arm_cmn_debugfs;
@@ -478,20 +482,25 @@ static const char *arm_cmn_device_type(u8 type)
case 0x17: return "RN-F_C_E|";
case 0x18: return " RN-F_E |";
case 0x19: return "RN-F_E_E|";
+ case 0x1a: return " HN-S |";
+ case 0x1b: return " LCN |";
case 0x1c: return " MTSX |";
case 0x1d: return " HN-V |";
case 0x1e: return " CCG |";
+ case 0x20: return " RN-F_F |";
+ case 0x21: return "RN-F_F_E|";
+ case 0x22: return " SN-F_F |";
default: return " ???? |";
}
}
-static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
+static void arm_cmn_show_logid(struct seq_file *s, const struct arm_cmn_node *xp, int p, int d)
{
struct arm_cmn *cmn = s->private;
struct arm_cmn_node *dn;
+ u16 id = xp->id | d | (p << xp->deviceid_bits);
for (dn = cmn->dns; dn->type; dn++) {
- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
int pad = dn->logid < 10;
if (dn->type == CMN_TYPE_XP)
@@ -500,7 +509,7 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
if (dn->type < CMN_TYPE_HNI)
continue;
- if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d)
+ if (dn->id != id)
continue;
seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid);
@@ -521,6 +530,7 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
y = cmn->mesh_y;
while (y--) {
int xp_base = cmn->mesh_x * y;
+ struct arm_cmn_node *xp = cmn->xps + xp_base;
u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];
for (x = 0; x < cmn->mesh_x; x++)
@@ -528,16 +538,14 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
seq_printf(s, "\n%-2d |", y);
for (x = 0; x < cmn->mesh_x; x++) {
- struct arm_cmn_node *xp = cmn->xps + xp_base + x;
-
for (p = 0; p < CMN_MAX_PORTS; p++)
- port[p][x] = arm_cmn_device_connect_info(cmn, xp, p);
+ port[p][x] = arm_cmn_device_connect_info(cmn, xp + x, p);
seq_printf(s, " XP #%-3d|", xp_base + x);
}
seq_puts(s, "\n |");
for (x = 0; x < cmn->mesh_x; x++) {
- s8 dtc = cmn->xps[xp_base + x].dtc;
+ s8 dtc = xp[x].dtc;
if (dtc < 0)
seq_puts(s, " DTC ?? |");
@@ -554,10 +562,10 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
seq_puts(s, arm_cmn_device_type(port[p][x]));
seq_puts(s, "\n 0|");
for (x = 0; x < cmn->mesh_x; x++)
- arm_cmn_show_logid(s, x, y, p, 0);
+ arm_cmn_show_logid(s, xp + x, p, 0);
seq_puts(s, "\n 1|");
for (x = 0; x < cmn->mesh_x; x++)
- arm_cmn_show_logid(s, x, y, p, 1);
+ arm_cmn_show_logid(s, xp + x, p, 1);
}
seq_puts(s, "\n-----+");
}
@@ -585,7 +593,7 @@ static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
struct arm_cmn_hw_event {
struct arm_cmn_node *dn;
- u64 dtm_idx[4];
+ u64 dtm_idx[DIV_ROUND_UP(CMN_MAX_NODES_PER_EVENT * 2, 64)];
s8 dtc_idx[CMN_MAX_DTCS];
u8 num_dns;
u8 dtm_offset;
@@ -599,6 +607,7 @@ struct arm_cmn_hw_event {
bool wide_sel;
enum cmn_filter_select filter_sel;
};
+static_assert(sizeof(struct arm_cmn_hw_event) <= offsetof(struct hw_perf_event, target));
#define for_each_hw_dn(hw, dn, i) \
for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)
@@ -609,7 +618,6 @@ struct arm_cmn_hw_event {
static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event)
{
- BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target));
return (struct arm_cmn_hw_event *)&event->hw;
}
@@ -790,8 +798,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event)
#define CMN_EVENT_CCRA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccra_##_name, CMN_TYPE_CCRA, _event)
-#define CMN_EVENT_CCHA(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, ccha_##_name, CMN_TYPE_CCHA, _event)
+#define CMN_EVENT_CCHA(_model, _name, _event) \
+ CMN_EVENT_ATTR(_model, ccha_##_name, CMN_TYPE_CCHA, _event)
#define CMN_EVENT_CCLA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
#define CMN_EVENT_CCLA_RNI(_name, _event) \
@@ -1149,42 +1157,43 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_CCRA(wdb_alloc, 0x59),
CMN_EVENT_CCRA(ssb_alloc, 0x5a),
- CMN_EVENT_CCHA(rddatbyp, 0x61),
- CMN_EVENT_CCHA(chirsp_up_stall, 0x62),
- CMN_EVENT_CCHA(chidat_up_stall, 0x63),
- CMN_EVENT_CCHA(snppcrd_link0_stall, 0x64),
- CMN_EVENT_CCHA(snppcrd_link1_stall, 0x65),
- CMN_EVENT_CCHA(snppcrd_link2_stall, 0x66),
- CMN_EVENT_CCHA(reqtrk_occ, 0x67),
- CMN_EVENT_CCHA(rdb_occ, 0x68),
- CMN_EVENT_CCHA(rdbyp_occ, 0x69),
- CMN_EVENT_CCHA(wdb_occ, 0x6a),
- CMN_EVENT_CCHA(snptrk_occ, 0x6b),
- CMN_EVENT_CCHA(sdb_occ, 0x6c),
- CMN_EVENT_CCHA(snphaz_occ, 0x6d),
- CMN_EVENT_CCHA(reqtrk_alloc, 0x6e),
- CMN_EVENT_CCHA(rdb_alloc, 0x6f),
- CMN_EVENT_CCHA(rdbyp_alloc, 0x70),
- CMN_EVENT_CCHA(wdb_alloc, 0x71),
- CMN_EVENT_CCHA(snptrk_alloc, 0x72),
- CMN_EVENT_CCHA(sdb_alloc, 0x73),
- CMN_EVENT_CCHA(snphaz_alloc, 0x74),
- CMN_EVENT_CCHA(pb_rhu_req_occ, 0x75),
- CMN_EVENT_CCHA(pb_rhu_req_alloc, 0x76),
- CMN_EVENT_CCHA(pb_rhu_pcie_req_occ, 0x77),
- CMN_EVENT_CCHA(pb_rhu_pcie_req_alloc, 0x78),
- CMN_EVENT_CCHA(pb_pcie_wr_req_occ, 0x79),
- CMN_EVENT_CCHA(pb_pcie_wr_req_alloc, 0x7a),
- CMN_EVENT_CCHA(pb_pcie_reg_req_occ, 0x7b),
- CMN_EVENT_CCHA(pb_pcie_reg_req_alloc, 0x7c),
- CMN_EVENT_CCHA(pb_pcie_rsvd_req_occ, 0x7d),
- CMN_EVENT_CCHA(pb_pcie_rsvd_req_alloc, 0x7e),
- CMN_EVENT_CCHA(pb_rhu_dat_occ, 0x7f),
- CMN_EVENT_CCHA(pb_rhu_dat_alloc, 0x80),
- CMN_EVENT_CCHA(pb_rhu_pcie_dat_occ, 0x81),
- CMN_EVENT_CCHA(pb_rhu_pcie_dat_alloc, 0x82),
- CMN_EVENT_CCHA(pb_pcie_wr_dat_occ, 0x83),
- CMN_EVENT_CCHA(pb_pcie_wr_dat_alloc, 0x84),
+ CMN_EVENT_CCHA(CMN_ANY, rddatbyp, 0x61),
+ CMN_EVENT_CCHA(CMN_ANY, chirsp_up_stall, 0x62),
+ CMN_EVENT_CCHA(CMN_ANY, chidat_up_stall, 0x63),
+ CMN_EVENT_CCHA(CMN_ANY, snppcrd_link0_stall, 0x64),
+ CMN_EVENT_CCHA(CMN_ANY, snppcrd_link1_stall, 0x65),
+ CMN_EVENT_CCHA(CMN_ANY, snppcrd_link2_stall, 0x66),
+ CMN_EVENT_CCHA(CMN_ANY, reqtrk_occ, 0x67),
+ CMN_EVENT_CCHA(CMN_ANY, rdb_occ, 0x68),
+ CMN_EVENT_CCHA(CMN_ANY, rdbyp_occ, 0x69),
+ CMN_EVENT_CCHA(CMN_ANY, wdb_occ, 0x6a),
+ CMN_EVENT_CCHA(CMN_ANY, snptrk_occ, 0x6b),
+ CMN_EVENT_CCHA(CMN_ANY, sdb_occ, 0x6c),
+ CMN_EVENT_CCHA(CMN_ANY, snphaz_occ, 0x6d),
+ CMN_EVENT_CCHA(CMN_ANY, reqtrk_alloc, 0x6e),
+ CMN_EVENT_CCHA(CMN_ANY, rdb_alloc, 0x6f),
+ CMN_EVENT_CCHA(CMN_ANY, rdbyp_alloc, 0x70),
+ CMN_EVENT_CCHA(CMN_ANY, wdb_alloc, 0x71),
+ CMN_EVENT_CCHA(CMN_ANY, snptrk_alloc, 0x72),
+ CMN_EVENT_CCHA(CMN_ANY, db_alloc, 0x73),
+ CMN_EVENT_CCHA(CMN_ANY, snphaz_alloc, 0x74),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_req_occ, 0x75),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_req_alloc, 0x76),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_req_occ, 0x77),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_req_alloc, 0x78),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_req_occ, 0x79),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_req_alloc, 0x7a),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_reg_req_occ, 0x7b),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_reg_req_alloc, 0x7c),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_rsvd_req_occ, 0x7d),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_rsvd_req_alloc, 0x7e),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_dat_occ, 0x7f),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_dat_alloc, 0x80),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_dat_occ, 0x81),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_dat_alloc, 0x82),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_dat_occ, 0x83),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_dat_alloc, 0x84),
+ CMN_EVENT_CCHA(CMNS3, chirsp1_up_stall, 0x85),
CMN_EVENT_CCLA(rx_cxs, 0x21),
CMN_EVENT_CCLA(tx_cxs, 0x22),
@@ -1271,15 +1280,11 @@ static ssize_t arm_cmn_format_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_cmn_format_attr *fmt = container_of(attr, typeof(*fmt), attr);
- int lo = __ffs(fmt->field), hi = __fls(fmt->field);
-
- if (lo == hi)
- return sysfs_emit(buf, "config:%d\n", lo);
if (!fmt->config)
- return sysfs_emit(buf, "config:%d-%d\n", lo, hi);
+ return sysfs_emit(buf, "config:%*pbl\n", 64, &fmt->field);
- return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi);
+ return sysfs_emit(buf, "config%d:%*pbl\n", fmt->config, 64, &fmt->field);
}
#define _CMN_FORMAT_ATTR(_name, _cfg, _fld) \
@@ -1415,7 +1420,7 @@ static u32 arm_cmn_wp_config(struct perf_event *event, int wp_idx)
static void arm_cmn_set_state(struct arm_cmn *cmn, u32 state)
{
if (!cmn->state)
- writel_relaxed(0, cmn->dtc[0].base + CMN_DT_PMCR);
+ writel_relaxed(0, CMN_DT_PMCR(&cmn->dtc[0]));
cmn->state |= state;
}
@@ -1424,7 +1429,7 @@ static void arm_cmn_clear_state(struct arm_cmn *cmn, u32 state)
cmn->state &= ~state;
if (!cmn->state)
writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN,
- cmn->dtc[0].base + CMN_DT_PMCR);
+ CMN_DT_PMCR(&cmn->dtc[0]));
}
static void arm_cmn_pmu_enable(struct pmu *pmu)
@@ -1459,18 +1464,19 @@ static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw,
static u64 arm_cmn_read_cc(struct arm_cmn_dtc *dtc)
{
- u64 val = readq_relaxed(dtc->base + CMN_DT_PMCCNTR);
+ void __iomem *pmccntr = CMN_DT_PMCCNTR(dtc);
+ u64 val = readq_relaxed(pmccntr);
- writeq_relaxed(CMN_CC_INIT, dtc->base + CMN_DT_PMCCNTR);
+ writeq_relaxed(CMN_CC_INIT, pmccntr);
return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1);
}
static u32 arm_cmn_read_counter(struct arm_cmn_dtc *dtc, int idx)
{
- u32 val, pmevcnt = CMN_DT_PMEVCNT(idx);
+ void __iomem *pmevcnt = CMN_DT_PMEVCNT(dtc, idx);
+ u32 val = readl_relaxed(pmevcnt);
- val = readl_relaxed(dtc->base + pmevcnt);
- writel_relaxed(CMN_COUNTER_INIT, dtc->base + pmevcnt);
+ writel_relaxed(CMN_COUNTER_INIT, pmevcnt);
return val - CMN_COUNTER_INIT;
}
@@ -1481,7 +1487,7 @@ static void arm_cmn_init_counter(struct perf_event *event)
u64 count;
for_each_hw_dtc_idx(hw, i, idx) {
- writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + CMN_DT_PMEVCNT(idx));
+ writel_relaxed(CMN_COUNTER_INIT, CMN_DT_PMEVCNT(&cmn->dtc[i], idx));
cmn->dtc[i].counters[idx] = event;
}
@@ -1564,9 +1570,12 @@ static void arm_cmn_event_start(struct perf_event *event, int flags)
int i;
if (type == CMN_TYPE_DTC) {
- i = hw->dtc_idx[0];
- writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR);
- cmn->dtc[i].cc_active = true;
+ struct arm_cmn_dtc *dtc = cmn->dtc + hw->dtc_idx[0];
+
+ writel_relaxed(CMN_DT_DTC_CTL_DT_EN | CMN_DT_DTC_CTL_CG_DISABLE,
+ dtc->base + CMN_DT_DTC_CTL);
+ writeq_relaxed(CMN_CC_INIT, CMN_DT_PMCCNTR(dtc));
+ dtc->cc_active = true;
} else if (type == CMN_TYPE_WP) {
u64 val = CMN_EVENT_WP_VAL(event);
u64 mask = CMN_EVENT_WP_MASK(event);
@@ -1595,8 +1604,10 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags)
int i;
if (type == CMN_TYPE_DTC) {
- i = hw->dtc_idx[0];
- cmn->dtc[i].cc_active = false;
+ struct arm_cmn_dtc *dtc = cmn->dtc + hw->dtc_idx[0];
+
+ dtc->cc_active = false;
+ writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL);
} else if (type == CMN_TYPE_WP) {
for_each_hw_dn(hw, dn, i) {
void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
@@ -1784,7 +1795,8 @@ static int arm_cmn_event_init(struct perf_event *event)
/* ...but the DTM may depend on which port we're watching */
if (cmn->multi_dtm)
hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
- } else if (type == CMN_TYPE_XP && cmn->part == PART_CMN700) {
+ } else if (type == CMN_TYPE_XP &&
+ (cmn->part == PART_CMN700 || cmn->part == PART_CMN_S3)) {
hw->wide_sel = true;
}
@@ -1815,10 +1827,7 @@ static int arm_cmn_event_init(struct perf_event *event)
}
if (!hw->num_dns) {
- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid);
-
- dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n",
- nodeid, nid.x, nid.y, nid.port, nid.dev, type);
+ dev_dbg(cmn->dev, "invalid node 0x%x type 0x%x\n", nodeid, type);
return -EINVAL;
}
@@ -1921,7 +1930,7 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
arm_cmn_claim_wp_idx(dtm, event, d, wp_idx, i);
writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx));
} else {
- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
+ struct arm_cmn_nodeid nid = arm_cmn_nid(dn);
if (cmn->multi_dtm)
nid.port %= 2;
@@ -2010,7 +2019,7 @@ static int arm_cmn_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_nod
cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
node = dev_to_node(cmn->dev);
- if (node != NUMA_NO_NODE && cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node)
+ if (cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node)
arm_cmn_migrate(cmn, cpu);
return 0;
}
@@ -2043,7 +2052,7 @@ static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
irqreturn_t ret = IRQ_NONE;
for (;;) {
- u32 status = readl_relaxed(dtc->base + CMN_DT_PMOVSR);
+ u32 status = readl_relaxed(CMN_DT_PMOVSR(dtc));
u64 delta;
int i;
@@ -2065,7 +2074,7 @@ static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
}
}
- writel_relaxed(status, dtc->base + CMN_DT_PMOVSR_CLR);
+ writel_relaxed(status, CMN_DT_PMOVSR_CLR(dtc));
if (!dtc->irq_friend)
return ret;
@@ -2119,15 +2128,16 @@ static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int id
{
struct arm_cmn_dtc *dtc = cmn->dtc + idx;
- dtc->base = dn->pmu_base - CMN_PMU_OFFSET;
+ dtc->pmu_base = dn->pmu_base;
+ dtc->base = dtc->pmu_base - arm_cmn_pmu_offset(cmn, dn);
dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx);
if (dtc->irq < 0)
return dtc->irq;
writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL);
- writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
- writeq_relaxed(0, dtc->base + CMN_DT_PMCCNTR);
- writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR);
+ writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, CMN_DT_PMCR(dtc));
+ writeq_relaxed(0, CMN_DT_PMCCNTR(dtc));
+ writel_relaxed(0x1ff, CMN_DT_PMOVSR_CLR(dtc));
return 0;
}
@@ -2168,10 +2178,12 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
continue;
xp = arm_cmn_node_to_xp(cmn, dn);
+ dn->portid_bits = xp->portid_bits;
+ dn->deviceid_bits = xp->deviceid_bits;
dn->dtc = xp->dtc;
dn->dtm = xp->dtm;
if (cmn->multi_dtm)
- dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2;
+ dn->dtm += arm_cmn_nid(dn).port / 2;
if (dn->type == CMN_TYPE_DTC) {
int err = arm_cmn_init_dtc(cmn, dn, dtc_idx++);
@@ -2213,7 +2225,7 @@ static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_c
node->id = FIELD_GET(CMN_NI_NODE_ID, reg);
node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg);
- node->pmu_base = cmn->base + offset + CMN_PMU_OFFSET;
+ node->pmu_base = cmn->base + offset + arm_cmn_pmu_offset(cmn, node);
if (node->type == CMN_TYPE_CFG)
level = 0;
@@ -2271,7 +2283,17 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_23);
cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg);
+ /*
+ * With the device isolation feature, if firmware has neglected to enable
+ * an XP port then we risk locking up if we try to access anything behind
+ * it; however we also have no way to tell from Non-Secure whether any
+ * given port is disabled or not, so the only way to win is not to play...
+ */
reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL);
+ if (reg & CMN_INFO_DEVICE_ISO_ENABLE) {
+ dev_err(cmn->dev, "Device isolation enabled, not continuing due to risk of lockup\n");
+ return -ENODEV;
+ }
cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN;
cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg);
cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg);
@@ -2341,18 +2363,27 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
arm_cmn_init_dtm(dtm++, xp, 0);
/*
* Keeping track of connected ports will let us filter out
- * unnecessary XP events easily. We can also reliably infer the
- * "extra device ports" configuration for the node ID format
- * from this, since in that case we will see at least one XP
- * with port 2 connected, for the HN-D.
+ * unnecessary XP events easily, and also infer the per-XP
+ * part of the node ID format.
*/
for (int p = 0; p < CMN_MAX_PORTS; p++)
if (arm_cmn_device_connect_info(cmn, xp, p))
xp_ports |= BIT(p);
- if (cmn->multi_dtm && (xp_ports & 0xc))
+ if (cmn->num_xps == 1) {
+ xp->portid_bits = 3;
+ xp->deviceid_bits = 2;
+ } else if (xp_ports > 0x3) {
+ xp->portid_bits = 2;
+ xp->deviceid_bits = 1;
+ } else {
+ xp->portid_bits = 1;
+ xp->deviceid_bits = 2;
+ }
+
+ if (cmn->multi_dtm && (xp_ports > 0x3))
arm_cmn_init_dtm(dtm++, xp, 1);
- if (cmn->multi_dtm && (xp_ports & 0x30))
+ if (cmn->multi_dtm && (xp_ports > 0xf))
arm_cmn_init_dtm(dtm++, xp, 2);
cmn->ports_used |= xp_ports;
@@ -2407,10 +2438,13 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
case CMN_TYPE_CXHA:
case CMN_TYPE_CCRA:
case CMN_TYPE_CCHA:
- case CMN_TYPE_CCLA:
case CMN_TYPE_HNS:
dn++;
break;
+ case CMN_TYPE_CCLA:
+ dn->pmu_base += CMN_CCLA_PMU_EVENT_SEL;
+ dn++;
+ break;
/* Nothing to see here */
case CMN_TYPE_MPAM_S:
case CMN_TYPE_MPAM_NS:
@@ -2418,6 +2452,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
case CMN_TYPE_CXLA:
case CMN_TYPE_HNS_MPAM_S:
case CMN_TYPE_HNS_MPAM_NS:
+ case CMN_TYPE_APB:
break;
/*
* Split "optimised" combination nodes into separate
@@ -2428,7 +2463,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
case CMN_TYPE_HNP:
case CMN_TYPE_CCLA_RNI:
dn[1] = dn[0];
- dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL;
+ dn[0].pmu_base += CMN_CCLA_PMU_EVENT_SEL;
dn[1].type = arm_cmn_subtype(dn->type);
dn += 2;
break;
@@ -2603,6 +2638,7 @@ static const struct of_device_id arm_cmn_of_match[] = {
{ .compatible = "arm,cmn-600", .data = (void *)PART_CMN600 },
{ .compatible = "arm,cmn-650" },
{ .compatible = "arm,cmn-700" },
+ { .compatible = "arm,cmn-s3" },
{ .compatible = "arm,ci-700" },
{}
};
diff --git a/drivers/perf/arm-ni.c b/drivers/perf/arm-ni.c
new file mode 100644
index 000000000000..90fcfe693439
--- /dev/null
+++ b/drivers/perf/arm-ni.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2022-2024 Arm Limited
+// NI-700 Network-on-Chip PMU driver
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Common registers */
+#define NI_NODE_TYPE 0x000
+#define NI_NODE_TYPE_NODE_ID GENMASK(31, 16)
+#define NI_NODE_TYPE_NODE_TYPE GENMASK(15, 0)
+
+#define NI_CHILD_NODE_INFO 0x004
+#define NI_CHILD_PTR(n) (0x008 + (n) * 4)
+
+#define NI700_PMUSELA 0x00c
+
+/* Config node */
+#define NI_PERIPHERAL_ID0 0xfe0
+#define NI_PIDR0_PART_7_0 GENMASK(7, 0)
+#define NI_PERIPHERAL_ID1 0xfe4
+#define NI_PIDR1_PART_11_8 GENMASK(3, 0)
+#define NI_PERIPHERAL_ID2 0xfe8
+#define NI_PIDR2_VERSION GENMASK(7, 4)
+
+/* PMU node */
+#define NI_PMEVCNTR(n) (0x008 + (n) * 8)
+#define NI_PMCCNTR_L 0x0f8
+#define NI_PMCCNTR_U 0x0fc
+#define NI_PMEVTYPER(n) (0x400 + (n) * 4)
+#define NI_PMEVTYPER_NODE_TYPE GENMASK(12, 9)
+#define NI_PMEVTYPER_NODE_ID GENMASK(8, 0)
+#define NI_PMCNTENSET 0xc00
+#define NI_PMCNTENCLR 0xc20
+#define NI_PMINTENSET 0xc40
+#define NI_PMINTENCLR 0xc60
+#define NI_PMOVSCLR 0xc80
+#define NI_PMOVSSET 0xcc0
+#define NI_PMCFGR 0xe00
+#define NI_PMCR 0xe04
+#define NI_PMCR_RESET_CCNT BIT(2)
+#define NI_PMCR_RESET_EVCNT BIT(1)
+#define NI_PMCR_ENABLE BIT(0)
+
+#define NI_NUM_COUNTERS 8
+#define NI_CCNT_IDX 31
+
+/* Event attributes */
+#define NI_CONFIG_TYPE GENMASK_ULL(15, 0)
+#define NI_CONFIG_NODEID GENMASK_ULL(31, 16)
+#define NI_CONFIG_EVENTID GENMASK_ULL(47, 32)
+
+#define NI_EVENT_TYPE(event) FIELD_GET(NI_CONFIG_TYPE, (event)->attr.config)
+#define NI_EVENT_NODEID(event) FIELD_GET(NI_CONFIG_NODEID, (event)->attr.config)
+#define NI_EVENT_EVENTID(event) FIELD_GET(NI_CONFIG_EVENTID, (event)->attr.config)
+
+enum ni_part {
+ PART_NI_700 = 0x43b,
+ PART_NI_710AE = 0x43d,
+};
+
+enum ni_node_type {
+ NI_GLOBAL,
+ NI_VOLTAGE,
+ NI_POWER,
+ NI_CLOCK,
+ NI_ASNI,
+ NI_AMNI,
+ NI_PMU,
+ NI_HSNI,
+ NI_HMNI,
+ NI_PMNI,
+};
+
+struct arm_ni_node {
+ void __iomem *base;
+ enum ni_node_type type;
+ u16 id;
+ u32 num_components;
+};
+
+struct arm_ni_unit {
+ void __iomem *pmusela;
+ enum ni_node_type type;
+ u16 id;
+ bool ns;
+ union {
+ __le64 pmusel;
+ u8 event[8];
+ };
+};
+
+struct arm_ni_cd {
+ void __iomem *pmu_base;
+ u16 id;
+ int num_units;
+ int irq;
+ int cpu;
+ struct hlist_node cpuhp_node;
+ struct pmu pmu;
+ struct arm_ni_unit *units;
+ struct perf_event *evcnt[NI_NUM_COUNTERS];
+ struct perf_event *ccnt;
+};
+
+struct arm_ni {
+ struct device *dev;
+ void __iomem *base;
+ enum ni_part part;
+ int id;
+ int num_cds;
+ struct arm_ni_cd cds[] __counted_by(num_cds);
+};
+
+#define cd_to_ni(cd) container_of((cd), struct arm_ni, cds[(cd)->id])
+#define pmu_to_cd(p) container_of((p), struct arm_ni_cd, pmu)
+
+#define cd_for_each_unit(cd, u) \
+ for (struct arm_ni_unit *u = cd->units; u < cd->units + cd->num_units; u++)
+
+static int arm_ni_hp_state;
+
+struct arm_ni_event_attr {
+ struct device_attribute attr;
+ enum ni_node_type type;
+};
+
+#define NI_EVENT_ATTR(_name, _type) \
+ (&((struct arm_ni_event_attr[]) {{ \
+ .attr = __ATTR(_name, 0444, arm_ni_event_show, NULL), \
+ .type = _type, \
+ }})[0].attr.attr)
+
+static ssize_t arm_ni_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ni_event_attr *eattr = container_of(attr, typeof(*eattr), attr);
+
+ if (eattr->type == NI_PMU)
+ return sysfs_emit(buf, "type=0x%x\n", eattr->type);
+
+ return sysfs_emit(buf, "type=0x%x,eventid=?,nodeid=?\n", eattr->type);
+}
+
+static umode_t arm_ni_event_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct arm_ni_cd *cd = pmu_to_cd(dev_get_drvdata(dev));
+ struct arm_ni_event_attr *eattr;
+
+ eattr = container_of(attr, typeof(*eattr), attr.attr);
+
+ cd_for_each_unit(cd, unit) {
+ if (unit->type == eattr->type && unit->ns)
+ return attr->mode;
+ }
+
+ return 0;
+}
+
+static struct attribute *arm_ni_event_attrs[] = {
+ NI_EVENT_ATTR(asni, NI_ASNI),
+ NI_EVENT_ATTR(amni, NI_AMNI),
+ NI_EVENT_ATTR(cycles, NI_PMU),
+ NI_EVENT_ATTR(hsni, NI_HSNI),
+ NI_EVENT_ATTR(hmni, NI_HMNI),
+ NI_EVENT_ATTR(pmni, NI_PMNI),
+ NULL
+};
+
+static const struct attribute_group arm_ni_event_attrs_group = {
+ .name = "events",
+ .attrs = arm_ni_event_attrs,
+ .is_visible = arm_ni_event_attr_is_visible,
+};
+
+struct arm_ni_format_attr {
+ struct device_attribute attr;
+ u64 field;
+};
+
+#define NI_FORMAT_ATTR(_name, _fld) \
+ (&((struct arm_ni_format_attr[]) {{ \
+ .attr = __ATTR(_name, 0444, arm_ni_format_show, NULL), \
+ .field = _fld, \
+ }})[0].attr.attr)
+
+static ssize_t arm_ni_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ni_format_attr *fmt = container_of(attr, typeof(*fmt), attr);
+
+ return sysfs_emit(buf, "config:%*pbl\n", 64, &fmt->field);
+}
+
+static struct attribute *arm_ni_format_attrs[] = {
+ NI_FORMAT_ATTR(type, NI_CONFIG_TYPE),
+ NI_FORMAT_ATTR(nodeid, NI_CONFIG_NODEID),
+ NI_FORMAT_ATTR(eventid, NI_CONFIG_EVENTID),
+ NULL
+};
+
+static const struct attribute_group arm_ni_format_attrs_group = {
+ .name = "format",
+ .attrs = arm_ni_format_attrs,
+};
+
+static ssize_t arm_ni_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ni_cd *cd = pmu_to_cd(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(cd->cpu));
+}
+
+static struct device_attribute arm_ni_cpumask_attr =
+ __ATTR(cpumask, 0444, arm_ni_cpumask_show, NULL);
+
+static ssize_t arm_ni_identifier_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ni *ni = cd_to_ni(pmu_to_cd(dev_get_drvdata(dev)));
+ u32 reg = readl_relaxed(ni->base + NI_PERIPHERAL_ID2);
+ int version = FIELD_GET(NI_PIDR2_VERSION, reg);
+
+ return sysfs_emit(buf, "%03x%02x\n", ni->part, version);
+}
+
+static struct device_attribute arm_ni_identifier_attr =
+ __ATTR(identifier, 0444, arm_ni_identifier_show, NULL);
+
+static struct attribute *arm_ni_other_attrs[] = {
+ &arm_ni_cpumask_attr.attr,
+ &arm_ni_identifier_attr.attr,
+ NULL
+};
+
+static const struct attribute_group arm_ni_other_attr_group = {
+ .attrs = arm_ni_other_attrs,
+ NULL
+};
+
+static const struct attribute_group *arm_ni_attr_groups[] = {
+ &arm_ni_event_attrs_group,
+ &arm_ni_format_attrs_group,
+ &arm_ni_other_attr_group,
+ NULL
+};
+
+static void arm_ni_pmu_enable(struct pmu *pmu)
+{
+ writel_relaxed(NI_PMCR_ENABLE, pmu_to_cd(pmu)->pmu_base + NI_PMCR);
+}
+
+static void arm_ni_pmu_disable(struct pmu *pmu)
+{
+ writel_relaxed(0, pmu_to_cd(pmu)->pmu_base + NI_PMCR);
+}
+
+struct arm_ni_val {
+ unsigned int evcnt;
+ unsigned int ccnt;
+};
+
+static bool arm_ni_val_count_event(struct perf_event *evt, struct arm_ni_val *val)
+{
+ if (is_software_event(evt))
+ return true;
+
+ if (NI_EVENT_TYPE(evt) == NI_PMU) {
+ val->ccnt++;
+ return val->ccnt <= 1;
+ }
+
+ val->evcnt++;
+ return val->evcnt <= NI_NUM_COUNTERS;
+}
+
+static int arm_ni_validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct arm_ni_val val = { 0 };
+
+ if (leader == event)
+ return 0;
+
+ arm_ni_val_count_event(event, &val);
+ if (!arm_ni_val_count_event(leader, &val))
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, leader) {
+ if (!arm_ni_val_count_event(sibling, &val))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int arm_ni_event_init(struct perf_event *event)
+{
+ struct arm_ni_cd *cd = pmu_to_cd(event->pmu);
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event))
+ return -EINVAL;
+
+ event->cpu = cd->cpu;
+ if (NI_EVENT_TYPE(event) == NI_PMU)
+ return arm_ni_validate_group(event);
+
+ cd_for_each_unit(cd, unit) {
+ if (unit->type == NI_EVENT_TYPE(event) &&
+ unit->id == NI_EVENT_NODEID(event) && unit->ns) {
+ event->hw.config_base = (unsigned long)unit;
+ return arm_ni_validate_group(event);
+ }
+ }
+ return -EINVAL;
+}
+
+static u64 arm_ni_read_ccnt(struct arm_ni_cd *cd)
+{
+ u64 l, u_old, u_new;
+ int retries = 3; /* 1st time unlucky, 2nd improbable, 3rd just broken */
+
+ u_new = readl_relaxed(cd->pmu_base + NI_PMCCNTR_U);
+ do {
+ u_old = u_new;
+ l = readl_relaxed(cd->pmu_base + NI_PMCCNTR_L);
+ u_new = readl_relaxed(cd->pmu_base + NI_PMCCNTR_U);
+ } while (u_new != u_old && --retries);
+ WARN_ON(!retries);
+
+ return (u_new << 32) | l;
+}
+
+static void arm_ni_event_read(struct perf_event *event)
+{
+ struct arm_ni_cd *cd = pmu_to_cd(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ u64 count, prev;
+ bool ccnt = hw->idx == NI_CCNT_IDX;
+
+ do {
+ prev = local64_read(&hw->prev_count);
+ if (ccnt)
+ count = arm_ni_read_ccnt(cd);
+ else
+ count = readl_relaxed(cd->pmu_base + NI_PMEVCNTR(hw->idx));
+ } while (local64_cmpxchg(&hw->prev_count, prev, count) != prev);
+
+ count -= prev;
+ if (!ccnt)
+ count = (u32)count;
+ local64_add(count, &event->count);
+}
+
+static void arm_ni_event_start(struct perf_event *event, int flags)
+{
+ struct arm_ni_cd *cd = pmu_to_cd(event->pmu);
+
+ writel_relaxed(1U << event->hw.idx, cd->pmu_base + NI_PMCNTENSET);
+}
+
+static void arm_ni_event_stop(struct perf_event *event, int flags)
+{
+ struct arm_ni_cd *cd = pmu_to_cd(event->pmu);
+
+ writel_relaxed(1U << event->hw.idx, cd->pmu_base + NI_PMCNTENCLR);
+ if (flags & PERF_EF_UPDATE)
+ arm_ni_event_read(event);
+}
+
+static void arm_ni_init_ccnt(struct arm_ni_cd *cd)
+{
+ local64_set(&cd->ccnt->hw.prev_count, S64_MIN);
+ lo_hi_writeq_relaxed(S64_MIN, cd->pmu_base + NI_PMCCNTR_L);
+}
+
+static void arm_ni_init_evcnt(struct arm_ni_cd *cd, int idx)
+{
+ local64_set(&cd->evcnt[idx]->hw.prev_count, S32_MIN);
+ writel_relaxed(S32_MIN, cd->pmu_base + NI_PMEVCNTR(idx));
+}
+
+static int arm_ni_event_add(struct perf_event *event, int flags)
+{
+ struct arm_ni_cd *cd = pmu_to_cd(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ni_unit *unit;
+ enum ni_node_type type = NI_EVENT_TYPE(event);
+ u32 reg;
+
+ if (type == NI_PMU) {
+ if (cd->ccnt)
+ return -ENOSPC;
+ hw->idx = NI_CCNT_IDX;
+ cd->ccnt = event;
+ arm_ni_init_ccnt(cd);
+ } else {
+ hw->idx = 0;
+ while (cd->evcnt[hw->idx]) {
+ if (++hw->idx == NI_NUM_COUNTERS)
+ return -ENOSPC;
+ }
+ cd->evcnt[hw->idx] = event;
+ unit = (void *)hw->config_base;
+ unit->event[hw->idx] = NI_EVENT_EVENTID(event);
+ arm_ni_init_evcnt(cd, hw->idx);
+ lo_hi_writeq_relaxed(le64_to_cpu(unit->pmusel), unit->pmusela);
+
+ reg = FIELD_PREP(NI_PMEVTYPER_NODE_TYPE, type) |
+ FIELD_PREP(NI_PMEVTYPER_NODE_ID, NI_EVENT_NODEID(event));
+ writel_relaxed(reg, cd->pmu_base + NI_PMEVTYPER(hw->idx));
+ }
+ if (flags & PERF_EF_START)
+ arm_ni_event_start(event, 0);
+ return 0;
+}
+
+static void arm_ni_event_del(struct perf_event *event, int flags)
+{
+ struct arm_ni_cd *cd = pmu_to_cd(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ arm_ni_event_stop(event, PERF_EF_UPDATE);
+
+ if (hw->idx == NI_CCNT_IDX)
+ cd->ccnt = NULL;
+ else
+ cd->evcnt[hw->idx] = NULL;
+}
+
+static irqreturn_t arm_ni_handle_irq(int irq, void *dev_id)
+{
+ struct arm_ni_cd *cd = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ u32 reg = readl_relaxed(cd->pmu_base + NI_PMOVSCLR);
+
+ if (reg & (1U << NI_CCNT_IDX)) {
+ ret = IRQ_HANDLED;
+ if (!(WARN_ON(!cd->ccnt))) {
+ arm_ni_event_read(cd->ccnt);
+ arm_ni_init_ccnt(cd);
+ }
+ }
+ for (int i = 0; i < NI_NUM_COUNTERS; i++) {
+ if (!(reg & (1U << i)))
+ continue;
+ ret = IRQ_HANDLED;
+ if (!(WARN_ON(!cd->evcnt[i]))) {
+ arm_ni_event_read(cd->evcnt[i]);
+ arm_ni_init_evcnt(cd, i);
+ }
+ }
+ writel_relaxed(reg, cd->pmu_base + NI_PMOVSCLR);
+ return ret;
+}
+
+static int arm_ni_init_cd(struct arm_ni *ni, struct arm_ni_node *node, u64 res_start)
+{
+ struct arm_ni_cd *cd = ni->cds + node->id;
+ const char *name;
+ int err;
+
+ cd->id = node->id;
+ cd->num_units = node->num_components;
+ cd->units = devm_kcalloc(ni->dev, cd->num_units, sizeof(*(cd->units)), GFP_KERNEL);
+ if (!cd->units)
+ return -ENOMEM;
+
+ for (int i = 0; i < cd->num_units; i++) {
+ u32 reg = readl_relaxed(node->base + NI_CHILD_PTR(i));
+ void __iomem *unit_base = ni->base + reg;
+ struct arm_ni_unit *unit = cd->units + i;
+
+ reg = readl_relaxed(unit_base + NI_NODE_TYPE);
+ unit->type = FIELD_GET(NI_NODE_TYPE_NODE_TYPE, reg);
+ unit->id = FIELD_GET(NI_NODE_TYPE_NODE_ID, reg);
+
+ switch (unit->type) {
+ case NI_PMU:
+ reg = readl_relaxed(unit_base + NI_PMCFGR);
+ if (!reg) {
+ dev_info(ni->dev, "No access to PMU %d\n", cd->id);
+ devm_kfree(ni->dev, cd->units);
+ return 0;
+ }
+ unit->ns = true;
+ cd->pmu_base = unit_base;
+ break;
+ case NI_ASNI:
+ case NI_AMNI:
+ case NI_HSNI:
+ case NI_HMNI:
+ case NI_PMNI:
+ unit->pmusela = unit_base + NI700_PMUSELA;
+ writel_relaxed(1, unit->pmusela);
+ if (readl_relaxed(unit->pmusela) != 1)
+ dev_info(ni->dev, "No access to node 0x%04x%04x\n", unit->id, unit->type);
+ else
+ unit->ns = true;
+ break;
+ default:
+ /*
+ * e.g. FMU - thankfully bits 3:2 of FMU_ERR_FR0 are RES0 so
+ * can't alias any of the leaf node types we're looking for.
+ */
+ dev_dbg(ni->dev, "Mystery node 0x%04x%04x\n", unit->id, unit->type);
+ break;
+ }
+ }
+
+ res_start += cd->pmu_base - ni->base;
+ if (!devm_request_mem_region(ni->dev, res_start, SZ_4K, dev_name(ni->dev))) {
+ dev_err(ni->dev, "Failed to request PMU region 0x%llx\n", res_start);
+ return -EBUSY;
+ }
+
+ writel_relaxed(NI_PMCR_RESET_CCNT | NI_PMCR_RESET_EVCNT,
+ cd->pmu_base + NI_PMCR);
+ writel_relaxed(U32_MAX, cd->pmu_base + NI_PMCNTENCLR);
+ writel_relaxed(U32_MAX, cd->pmu_base + NI_PMOVSCLR);
+ writel_relaxed(U32_MAX, cd->pmu_base + NI_PMINTENSET);
+
+ cd->irq = platform_get_irq(to_platform_device(ni->dev), cd->id);
+ if (cd->irq < 0)
+ return cd->irq;
+
+ err = devm_request_irq(ni->dev, cd->irq, arm_ni_handle_irq,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(ni->dev), cd);
+ if (err)
+ return err;
+
+ cd->cpu = cpumask_local_spread(0, dev_to_node(ni->dev));
+ cd->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .parent = ni->dev,
+ .attr_groups = arm_ni_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .pmu_enable = arm_ni_pmu_enable,
+ .pmu_disable = arm_ni_pmu_disable,
+ .event_init = arm_ni_event_init,
+ .add = arm_ni_event_add,
+ .del = arm_ni_event_del,
+ .start = arm_ni_event_start,
+ .stop = arm_ni_event_stop,
+ .read = arm_ni_event_read,
+ };
+
+ name = devm_kasprintf(ni->dev, GFP_KERNEL, "arm_ni_%d_cd_%d", ni->id, cd->id);
+ if (!name)
+ return -ENOMEM;
+
+ err = cpuhp_state_add_instance_nocalls(arm_ni_hp_state, &cd->cpuhp_node);
+ if (err)
+ return err;
+
+ err = perf_pmu_register(&cd->pmu, name, -1);
+ if (err)
+ cpuhp_state_remove_instance_nocalls(arm_ni_hp_state, &cd->cpuhp_node);
+
+ return err;
+}
+
+static void arm_ni_probe_domain(void __iomem *base, struct arm_ni_node *node)
+{
+ u32 reg = readl_relaxed(base + NI_NODE_TYPE);
+
+ node->base = base;
+ node->type = FIELD_GET(NI_NODE_TYPE_NODE_TYPE, reg);
+ node->id = FIELD_GET(NI_NODE_TYPE_NODE_ID, reg);
+ node->num_components = readl_relaxed(base + NI_CHILD_NODE_INFO);
+}
+
+static int arm_ni_probe(struct platform_device *pdev)
+{
+ struct arm_ni_node cfg, vd, pd, cd;
+ struct arm_ni *ni;
+ struct resource *res;
+ void __iomem *base;
+ static atomic_t id;
+ int num_cds;
+ u32 reg, part;
+
+ /*
+ * We want to map the whole configuration space for ease of discovery,
+ * but the PMU pages are the only ones for which we can honestly claim
+ * exclusive ownership, so we'll request them explicitly once found.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!base)
+ return -ENOMEM;
+
+ arm_ni_probe_domain(base, &cfg);
+ if (cfg.type != NI_GLOBAL)
+ return -ENODEV;
+
+ reg = readl_relaxed(cfg.base + NI_PERIPHERAL_ID0);
+ part = FIELD_GET(NI_PIDR0_PART_7_0, reg);
+ reg = readl_relaxed(cfg.base + NI_PERIPHERAL_ID1);
+ part |= FIELD_GET(NI_PIDR1_PART_11_8, reg) << 8;
+
+ switch (part) {
+ case PART_NI_700:
+ case PART_NI_710AE:
+ break;
+ default:
+ dev_WARN(&pdev->dev, "Unknown part number: 0x%03x, this may go badly\n", part);
+ break;
+ }
+
+ num_cds = 0;
+ for (int v = 0; v < cfg.num_components; v++) {
+ reg = readl_relaxed(cfg.base + NI_CHILD_PTR(v));
+ arm_ni_probe_domain(base + reg, &vd);
+ for (int p = 0; p < vd.num_components; p++) {
+ reg = readl_relaxed(vd.base + NI_CHILD_PTR(p));
+ arm_ni_probe_domain(base + reg, &pd);
+ num_cds += pd.num_components;
+ }
+ }
+
+ ni = devm_kzalloc(&pdev->dev, struct_size(ni, cds, num_cds), GFP_KERNEL);
+ if (!ni)
+ return -ENOMEM;
+
+ ni->dev = &pdev->dev;
+ ni->base = base;
+ ni->num_cds = num_cds;
+ ni->part = part;
+ ni->id = atomic_fetch_inc(&id);
+
+ for (int v = 0; v < cfg.num_components; v++) {
+ reg = readl_relaxed(cfg.base + NI_CHILD_PTR(v));
+ arm_ni_probe_domain(base + reg, &vd);
+ for (int p = 0; p < vd.num_components; p++) {
+ reg = readl_relaxed(vd.base + NI_CHILD_PTR(p));
+ arm_ni_probe_domain(base + reg, &pd);
+ for (int c = 0; c < pd.num_components; c++) {
+ int ret;
+
+ reg = readl_relaxed(pd.base + NI_CHILD_PTR(c));
+ arm_ni_probe_domain(base + reg, &cd);
+ ret = arm_ni_init_cd(ni, &cd, res->start);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void arm_ni_remove(struct platform_device *pdev)
+{
+ struct arm_ni *ni = platform_get_drvdata(pdev);
+
+ for (int i = 0; i < ni->num_cds; i++) {
+ struct arm_ni_cd *cd = ni->cds + i;
+
+ if (!cd->pmu_base)
+ continue;
+
+ writel_relaxed(0, cd->pmu_base + NI_PMCR);
+ writel_relaxed(U32_MAX, cd->pmu_base + NI_PMINTENCLR);
+ perf_pmu_unregister(&cd->pmu);
+ cpuhp_state_remove_instance_nocalls(arm_ni_hp_state, &cd->cpuhp_node);
+ }
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id arm_ni_of_match[] = {
+ { .compatible = "arm,ni-700" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, arm_ni_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id arm_ni_acpi_match[] = {
+ { "ARMHCB70" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, arm_ni_acpi_match);
+#endif
+
+static struct platform_driver arm_ni_driver = {
+ .driver = {
+ .name = "arm-ni",
+ .of_match_table = of_match_ptr(arm_ni_of_match),
+ .acpi_match_table = ACPI_PTR(arm_ni_acpi_match),
+ },
+ .probe = arm_ni_probe,
+ .remove = arm_ni_remove,
+};
+
+static void arm_ni_pmu_migrate(struct arm_ni_cd *cd, unsigned int cpu)
+{
+ perf_pmu_migrate_context(&cd->pmu, cd->cpu, cpu);
+ irq_set_affinity(cd->irq, cpumask_of(cpu));
+ cd->cpu = cpu;
+}
+
+static int arm_ni_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct arm_ni_cd *cd;
+ int node;
+
+ cd = hlist_entry_safe(cpuhp_node, struct arm_ni_cd, cpuhp_node);
+ node = dev_to_node(cd_to_ni(cd)->dev);
+ if (cpu_to_node(cd->cpu) != node && cpu_to_node(cpu) == node)
+ arm_ni_pmu_migrate(cd, cpu);
+ return 0;
+}
+
+static int arm_ni_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct arm_ni_cd *cd;
+ unsigned int target;
+ int node;
+
+ cd = hlist_entry_safe(cpuhp_node, struct arm_ni_cd, cpuhp_node);
+ if (cpu != cd->cpu)
+ return 0;
+
+ node = dev_to_node(cd_to_ni(cd)->dev);
+ target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target < nr_cpu_ids)
+ arm_ni_pmu_migrate(cd, target);
+ return 0;
+}
+
+static int __init arm_ni_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/arm/ni:online",
+ arm_ni_pmu_online_cpu,
+ arm_ni_pmu_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ arm_ni_hp_state = ret;
+
+ ret = platform_driver_register(&arm_ni_driver);
+ if (ret)
+ cpuhp_remove_multi_state(arm_ni_hp_state);
+ return ret;
+}
+
+static void __exit arm_ni_exit(void)
+{
+ platform_driver_unregister(&arm_ni_driver);
+ cpuhp_remove_multi_state(arm_ni_hp_state);
+}
+
+module_init(arm_ni_init);
+module_exit(arm_ni_exit);
+
+MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
+MODULE_DESCRIPTION("Arm NI-700 PMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 8458fe2cebb4..398cce3d76fc 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -522,7 +522,7 @@ static void armpmu_enable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
- bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
+ bool enabled = !bitmap_empty(hw_events->used_mask, ARMPMU_MAX_HWEVENTS);
/* For task-bound events we may be called on other CPUs */
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
@@ -742,7 +742,7 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
struct perf_event *event;
int idx;
- for (idx = 0; idx < armpmu->num_events; idx++) {
+ for_each_set_bit(idx, armpmu->cntr_mask, ARMPMU_MAX_HWEVENTS) {
event = hw_events->events[idx];
if (!event)
continue;
@@ -772,7 +772,7 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
{
struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
- bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
+ bool enabled = !bitmap_empty(hw_events->used_mask, ARMPMU_MAX_HWEVENTS);
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return NOTIFY_DONE;
@@ -924,8 +924,9 @@ int armpmu_register(struct arm_pmu *pmu)
if (ret)
goto out_destroy;
- pr_info("enabled with %s PMU driver, %d counters available%s\n",
- pmu->name, pmu->num_events,
+ pr_info("enabled with %s PMU driver, %d (%*pb) counters available%s\n",
+ pmu->name, bitmap_weight(pmu->cntr_mask, ARMPMU_MAX_HWEVENTS),
+ ARMPMU_MAX_HWEVENTS, &pmu->cntr_mask,
has_nmi ? ", using NMIs" : "");
kvm_host_pmu_init(pmu);
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 4b1a9a92ea11..118170a5cede 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -59,7 +59,7 @@ static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq)
static bool pmu_has_irq_affinity(struct device_node *node)
{
- return !!of_find_property(node, "interrupt-affinity", NULL);
+ return of_property_present(node, "interrupt-affinity");
}
static int pmu_parse_irq_affinity(struct device *dev, int i)
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index d246840797b6..0afe02f879b4 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -452,13 +452,6 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = {
};
/*
- * Perf Events' indices
- */
-#define ARMV8_IDX_CYCLE_COUNTER 0
-#define ARMV8_IDX_COUNTER0 1
-#define ARMV8_IDX_CYCLE_COUNTER_USER 32
-
-/*
* We unconditionally enable ARMv8.5-PMU long event counter support
* (64-bit events) where supported. Indicate if this arm_pmu has long
* event counter support.
@@ -489,19 +482,12 @@ static bool armv8pmu_event_is_chained(struct perf_event *event)
return !armv8pmu_event_has_user_read(event) &&
armv8pmu_event_is_64bit(event) &&
!armv8pmu_has_long_event(cpu_pmu) &&
- (idx != ARMV8_IDX_CYCLE_COUNTER);
+ (idx < ARMV8_PMU_MAX_GENERAL_COUNTERS);
}
/*
* ARMv8 low level PMU access
*/
-
-/*
- * Perf Event to low level counters mapping
- */
-#define ARMV8_IDX_TO_COUNTER(x) \
- (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
-
static u64 armv8pmu_pmcr_read(void)
{
return read_pmcr();
@@ -514,21 +500,19 @@ static void armv8pmu_pmcr_write(u64 val)
write_pmcr(val);
}
-static int armv8pmu_has_overflowed(u32 pmovsr)
+static int armv8pmu_has_overflowed(u64 pmovsr)
{
- return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
+ return !!(pmovsr & ARMV8_PMU_OVERFLOWED_MASK);
}
-static int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
+static int armv8pmu_counter_has_overflowed(u64 pmnc, int idx)
{
- return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
+ return !!(pmnc & BIT(idx));
}
static u64 armv8pmu_read_evcntr(int idx)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
-
- return read_pmevcntrn(counter);
+ return read_pmevcntrn(idx);
}
static u64 armv8pmu_read_hw_counter(struct perf_event *event)
@@ -557,7 +541,7 @@ static bool armv8pmu_event_needs_bias(struct perf_event *event)
return false;
if (armv8pmu_has_long_event(cpu_pmu) ||
- idx == ARMV8_IDX_CYCLE_COUNTER)
+ idx >= ARMV8_PMU_MAX_GENERAL_COUNTERS)
return true;
return false;
@@ -585,8 +569,10 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
int idx = hwc->idx;
u64 value;
- if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ if (idx == ARMV8_PMU_CYCLE_IDX)
value = read_pmccntr();
+ else if (idx == ARMV8_PMU_INSTR_IDX)
+ value = read_pmicntr();
else
value = armv8pmu_read_hw_counter(event);
@@ -595,9 +581,7 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
static void armv8pmu_write_evcntr(int idx, u64 value)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
-
- write_pmevcntrn(counter, value);
+ write_pmevcntrn(idx, value);
}
static void armv8pmu_write_hw_counter(struct perf_event *event,
@@ -620,15 +604,16 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
value = armv8pmu_bias_long_counter(event, value);
- if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ if (idx == ARMV8_PMU_CYCLE_IDX)
write_pmccntr(value);
+ else if (idx == ARMV8_PMU_INSTR_IDX)
+ write_pmicntr(value);
else
armv8pmu_write_hw_counter(event, value);
}
static void armv8pmu_write_evtype(int idx, unsigned long val)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
unsigned long mask = ARMV8_PMU_EVTYPE_EVENT |
ARMV8_PMU_INCLUDE_EL2 |
ARMV8_PMU_EXCLUDE_EL0 |
@@ -638,7 +623,7 @@ static void armv8pmu_write_evtype(int idx, unsigned long val)
mask |= ARMV8_PMU_EVTYPE_TC | ARMV8_PMU_EVTYPE_TH;
val &= mask;
- write_pmevtypern(counter, val);
+ write_pmevtypern(idx, val);
}
static void armv8pmu_write_event_type(struct perf_event *event)
@@ -658,24 +643,26 @@ static void armv8pmu_write_event_type(struct perf_event *event)
armv8pmu_write_evtype(idx - 1, hwc->config_base);
armv8pmu_write_evtype(idx, chain_evt);
} else {
- if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ if (idx == ARMV8_PMU_CYCLE_IDX)
write_pmccfiltr(hwc->config_base);
+ else if (idx == ARMV8_PMU_INSTR_IDX)
+ write_pmicfiltr(hwc->config_base);
else
armv8pmu_write_evtype(idx, hwc->config_base);
}
}
-static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
+static u64 armv8pmu_event_cnten_mask(struct perf_event *event)
{
- int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
- u32 mask = BIT(counter);
+ int counter = event->hw.idx;
+ u64 mask = BIT(counter);
if (armv8pmu_event_is_chained(event))
mask |= BIT(counter - 1);
return mask;
}
-static void armv8pmu_enable_counter(u32 mask)
+static void armv8pmu_enable_counter(u64 mask)
{
/*
* Make sure event configuration register writes are visible before we
@@ -688,7 +675,7 @@ static void armv8pmu_enable_counter(u32 mask)
static void armv8pmu_enable_event_counter(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
- u32 mask = armv8pmu_event_cnten_mask(event);
+ u64 mask = armv8pmu_event_cnten_mask(event);
kvm_set_pmu_events(mask, attr);
@@ -697,7 +684,7 @@ static void armv8pmu_enable_event_counter(struct perf_event *event)
armv8pmu_enable_counter(mask);
}
-static void armv8pmu_disable_counter(u32 mask)
+static void armv8pmu_disable_counter(u64 mask)
{
write_pmcntenclr(mask);
/*
@@ -710,7 +697,7 @@ static void armv8pmu_disable_counter(u32 mask)
static void armv8pmu_disable_event_counter(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
- u32 mask = armv8pmu_event_cnten_mask(event);
+ u64 mask = armv8pmu_event_cnten_mask(event);
kvm_clr_pmu_events(mask);
@@ -719,18 +706,17 @@ static void armv8pmu_disable_event_counter(struct perf_event *event)
armv8pmu_disable_counter(mask);
}
-static void armv8pmu_enable_intens(u32 mask)
+static void armv8pmu_enable_intens(u64 mask)
{
write_pmintenset(mask);
}
static void armv8pmu_enable_event_irq(struct perf_event *event)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
- armv8pmu_enable_intens(BIT(counter));
+ armv8pmu_enable_intens(BIT(event->hw.idx));
}
-static void armv8pmu_disable_intens(u32 mask)
+static void armv8pmu_disable_intens(u64 mask)
{
write_pmintenclr(mask);
isb();
@@ -741,13 +727,12 @@ static void armv8pmu_disable_intens(u32 mask)
static void armv8pmu_disable_event_irq(struct perf_event *event)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
- armv8pmu_disable_intens(BIT(counter));
+ armv8pmu_disable_intens(BIT(event->hw.idx));
}
-static u32 armv8pmu_getreset_flags(void)
+static u64 armv8pmu_getreset_flags(void)
{
- u32 value;
+ u64 value;
/* Read */
value = read_pmovsclr();
@@ -786,9 +771,12 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
/* Clear any unused counters to avoid leaking their contents */
- for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) {
- if (i == ARMV8_IDX_CYCLE_COUNTER)
+ for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask,
+ ARMPMU_MAX_HWEVENTS) {
+ if (i == ARMV8_PMU_CYCLE_IDX)
write_pmccntr(0);
+ else if (i == ARMV8_PMU_INSTR_IDX)
+ write_pmicntr(0);
else
armv8pmu_write_evcntr(i, 0);
}
@@ -842,7 +830,7 @@ static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
- u32 pmovsr;
+ u64 pmovsr;
struct perf_sample_data data;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
@@ -869,7 +857,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
* to prevent skews in group events.
*/
armv8pmu_stop(cpu_pmu);
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -908,7 +896,7 @@ static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
{
int idx;
- for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
@@ -924,7 +912,9 @@ static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
* Chaining requires two consecutive event counters, where
* the lower idx must be even.
*/
- for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS) {
+ if (!(idx & 0x1))
+ continue;
if (!test_and_set_bit(idx, cpuc->used_mask)) {
/* Check if the preceding even counter is available */
if (!test_and_set_bit(idx - 1, cpuc->used_mask))
@@ -946,8 +936,8 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
/* Always prefer to place a cycle counter into the cycle counter. */
if ((evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) &&
!armv8pmu_event_get_threshold(&event->attr)) {
- if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
- return ARMV8_IDX_CYCLE_COUNTER;
+ if (!test_and_set_bit(ARMV8_PMU_CYCLE_IDX, cpuc->used_mask))
+ return ARMV8_PMU_CYCLE_IDX;
else if (armv8pmu_event_is_64bit(event) &&
armv8pmu_event_want_user_access(event) &&
!armv8pmu_has_long_event(cpu_pmu))
@@ -955,6 +945,19 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
/*
+ * Always prefer to place a instruction counter into the instruction counter,
+ * but don't expose the instruction counter to userspace access as userspace
+ * may not know how to handle it.
+ */
+ if ((evtype == ARMV8_PMUV3_PERFCTR_INST_RETIRED) &&
+ !armv8pmu_event_get_threshold(&event->attr) &&
+ test_bit(ARMV8_PMU_INSTR_IDX, cpu_pmu->cntr_mask) &&
+ !armv8pmu_event_want_user_access(event)) {
+ if (!test_and_set_bit(ARMV8_PMU_INSTR_IDX, cpuc->used_mask))
+ return ARMV8_PMU_INSTR_IDX;
+ }
+
+ /*
* Otherwise use events counters
*/
if (armv8pmu_event_is_chained(event))
@@ -978,15 +981,7 @@ static int armv8pmu_user_event_idx(struct perf_event *event)
if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event))
return 0;
- /*
- * We remap the cycle counter index to 32 to
- * match the offset applied to the rest of
- * the counter indices.
- */
- if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER)
- return ARMV8_IDX_CYCLE_COUNTER_USER;
-
- return event->hw.idx;
+ return event->hw.idx + 1;
}
/*
@@ -1061,14 +1056,16 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
static void armv8pmu_reset(void *info)
{
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
- u64 pmcr;
+ u64 pmcr, mask;
+
+ bitmap_to_arr64(&mask, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS);
/* The counter and interrupt enable registers are unknown at reset. */
- armv8pmu_disable_counter(U32_MAX);
- armv8pmu_disable_intens(U32_MAX);
+ armv8pmu_disable_counter(mask);
+ armv8pmu_disable_intens(mask);
/* Clear the counters we flip at guest entry/exit */
- kvm_clr_pmu_events(U32_MAX);
+ kvm_clr_pmu_events(mask);
/*
* Initialize & Reset PMNC. Request overflow interrupt for
@@ -1089,14 +1086,14 @@ static int __armv8_pmuv3_map_event_id(struct arm_pmu *armpmu,
if (event->attr.type == PERF_TYPE_HARDWARE &&
event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) {
- if (test_bit(ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
- armpmu->pmceid_bitmap))
- return ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED;
-
if (test_bit(ARMV8_PMUV3_PERFCTR_BR_RETIRED,
armpmu->pmceid_bitmap))
return ARMV8_PMUV3_PERFCTR_BR_RETIRED;
+ if (test_bit(ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
+ armpmu->pmceid_bitmap))
+ return ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED;
+
return HW_OP_UNSUPPORTED;
}
@@ -1211,10 +1208,15 @@ static void __armv8pmu_probe_pmu(void *info)
probe->present = true;
/* Read the nb of CNTx counters supported from PMNC */
- cpu_pmu->num_events = FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read());
+ bitmap_set(cpu_pmu->cntr_mask,
+ 0, FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read()));
/* Add the CPU cycles counter */
- cpu_pmu->num_events += 1;
+ set_bit(ARMV8_PMU_CYCLE_IDX, cpu_pmu->cntr_mask);
+
+ /* Add the CPU instructions counter */
+ if (pmuv3_has_icntr())
+ set_bit(ARMV8_PMU_INSTR_IDX, cpu_pmu->cntr_mask);
pmceid[0] = pmceid_raw[0] = read_pmceid0();
pmceid[1] = pmceid_raw[1] = read_pmceid1();
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 9100d82bfabc..3569050f9cf3 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -41,7 +41,7 @@
/*
* Cache if the event is allowed to trace Context information.
- * This allows us to perform the check, i.e, perfmon_capable(),
+ * This allows us to perform the check, i.e, perf_allow_kernel(),
* in the context of the event owner, once, during the event_init().
*/
#define SPE_PMU_HW_FLAGS_CX 0x00001
@@ -50,7 +50,7 @@ static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_C
static void set_spe_event_has_cx(struct perf_event *event)
{
- if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
+ if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && !perf_allow_kernel(&event->attr))
event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
}
@@ -745,9 +745,8 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
set_spe_event_has_cx(event);
reg = arm_spe_event_to_pmscr(event);
- if (!perfmon_capable() &&
- (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT)))
- return -EACCES;
+ if (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT))
+ return perf_allow_kernel(&event->attr);
return 0;
}
diff --git a/drivers/perf/arm_v6_pmu.c b/drivers/perf/arm_v6_pmu.c
index 0bb685b4bac5..b09615bb2bb2 100644
--- a/drivers/perf/arm_v6_pmu.c
+++ b/drivers/perf/arm_v6_pmu.c
@@ -64,6 +64,7 @@ enum armv6_counters {
ARMV6_CYCLE_COUNTER = 0,
ARMV6_COUNTER0,
ARMV6_COUNTER1,
+ ARMV6_NUM_COUNTERS
};
/*
@@ -254,7 +255,7 @@ armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
*/
armv6_pmcr_write(pmcr);
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV6_NUM_COUNTERS) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -391,7 +392,8 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6_map_event;
- cpu_pmu->num_events = 3;
+
+ bitmap_set(cpu_pmu->cntr_mask, 0, ARMV6_NUM_COUNTERS);
}
static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
diff --git a/drivers/perf/arm_v7_pmu.c b/drivers/perf/arm_v7_pmu.c
index 928ac3d626ed..420cadd108e7 100644
--- a/drivers/perf/arm_v7_pmu.c
+++ b/drivers/perf/arm_v7_pmu.c
@@ -649,25 +649,13 @@ static struct attribute_group armv7_pmuv2_events_attr_group = {
/*
* Perf Events' indices
*/
-#define ARMV7_IDX_CYCLE_COUNTER 0
-#define ARMV7_IDX_COUNTER0 1
-#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
- (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
-
-#define ARMV7_MAX_COUNTERS 32
-#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
-
+#define ARMV7_IDX_CYCLE_COUNTER 31
+#define ARMV7_IDX_COUNTER_MAX 31
/*
* ARMv7 low level PMNC access
*/
/*
- * Perf Event to low level counters mapping
- */
-#define ARMV7_IDX_TO_COUNTER(x) \
- (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
-
-/*
* Per-CPU PMNC: config reg
*/
#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
@@ -725,19 +713,17 @@ static inline int armv7_pmnc_has_overflowed(u32 pmnc)
static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
{
- return idx >= ARMV7_IDX_CYCLE_COUNTER &&
- idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
+ return test_bit(idx, cpu_pmu->cntr_mask);
}
static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
{
- return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
+ return pmnc & BIT(idx);
}
static inline void armv7_pmnc_select_counter(int idx)
{
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
+ asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (idx));
isb();
}
@@ -787,29 +773,25 @@ static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
static inline void armv7_pmnc_enable_counter(int idx)
{
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(idx)));
}
static inline void armv7_pmnc_disable_counter(int idx)
{
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(idx)));
}
static inline void armv7_pmnc_enable_intens(int idx)
{
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
+ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(idx)));
}
static inline void armv7_pmnc_disable_intens(int idx)
{
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
+ asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(idx)));
isb();
/* Clear the overflow flag in case an interrupt is pending. */
- asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(idx)));
isb();
}
@@ -853,15 +835,12 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
pr_info("CCNT =0x%08x\n", val);
- for (cnt = ARMV7_IDX_COUNTER0;
- cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+ for_each_set_bit(cnt, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) {
armv7_pmnc_select_counter(cnt);
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
- pr_info("CNT[%d] count =0x%08x\n",
- ARMV7_IDX_TO_COUNTER(cnt), val);
+ pr_info("CNT[%d] count =0x%08x\n", cnt, val);
asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
- pr_info("CNT[%d] evtsel=0x%08x\n",
- ARMV7_IDX_TO_COUNTER(cnt), val);
+ pr_info("CNT[%d] evtsel=0x%08x\n", cnt, val);
}
}
#endif
@@ -958,7 +937,7 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
*/
regs = get_irq_regs();
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -1027,7 +1006,7 @@ static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
* For anything other than a cycle counter, try and use
* the events counters
*/
- for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
@@ -1073,7 +1052,7 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event,
static void armv7pmu_reset(void *info)
{
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
- u32 idx, nb_cnt = cpu_pmu->num_events, val;
+ u32 idx, val;
if (cpu_pmu->secure_access) {
asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
@@ -1082,7 +1061,7 @@ static void armv7pmu_reset(void *info)
}
/* The counter and interrupt enable registers are unknown at reset. */
- for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) {
armv7_pmnc_disable_counter(idx);
armv7_pmnc_disable_intens(idx);
}
@@ -1161,20 +1140,22 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
static void armv7_read_num_pmnc_events(void *info)
{
- int *nb_cnt = info;
+ int nb_cnt;
+ struct arm_pmu *cpu_pmu = info;
/* Read the nb of CNTx counters supported from PMNC */
- *nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
+ nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
+ bitmap_set(cpu_pmu->cntr_mask, 0, nb_cnt);
/* Add the CPU cycles counter */
- *nb_cnt += 1;
+ set_bit(ARMV7_IDX_CYCLE_COUNTER, cpu_pmu->cntr_mask);
}
static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
{
return smp_call_function_any(&arm_pmu->supported_cpus,
armv7_read_num_pmnc_events,
- &arm_pmu->num_events, 1);
+ arm_pmu, 1);
}
static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
@@ -1524,7 +1505,7 @@ static void krait_pmu_reset(void *info)
{
u32 vval, fval;
struct arm_pmu *cpu_pmu = info;
- u32 idx, nb_cnt = cpu_pmu->num_events;
+ u32 idx;
armv7pmu_reset(info);
@@ -1538,7 +1519,7 @@ static void krait_pmu_reset(void *info)
venum_post_pmresr(vval, fval);
/* Reset PMxEVNCTCR to sane default */
- for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) {
armv7_pmnc_select_counter(idx);
asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
}
@@ -1562,7 +1543,7 @@ static int krait_event_to_bit(struct perf_event *event, unsigned int region,
* Lower bits are reserved for use by the counters (see
* armv7pmu_get_event_idx() for more info)
*/
- bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
+ bit += bitmap_weight(cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX);
return bit;
}
@@ -1845,7 +1826,7 @@ static void scorpion_pmu_reset(void *info)
{
u32 vval, fval;
struct arm_pmu *cpu_pmu = info;
- u32 idx, nb_cnt = cpu_pmu->num_events;
+ u32 idx;
armv7pmu_reset(info);
@@ -1860,7 +1841,7 @@ static void scorpion_pmu_reset(void *info)
venum_post_pmresr(vval, fval);
/* Reset PMxEVNCTCR to sane default */
- for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) {
armv7_pmnc_select_counter(idx);
asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
}
@@ -1883,7 +1864,7 @@ static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
* Lower bits are reserved for use by the counters (see
* armv7pmu_get_event_idx() for more info)
*/
- bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
+ bit += bitmap_weight(cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX);
return bit;
}
diff --git a/drivers/perf/arm_xscale_pmu.c b/drivers/perf/arm_xscale_pmu.c
index 3d8b72d6b37f..638fea9b1263 100644
--- a/drivers/perf/arm_xscale_pmu.c
+++ b/drivers/perf/arm_xscale_pmu.c
@@ -53,6 +53,8 @@ enum xscale_counters {
XSCALE_COUNTER2,
XSCALE_COUNTER3,
};
+#define XSCALE1_NUM_COUNTERS 3
+#define XSCALE2_NUM_COUNTERS 5
static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
@@ -168,7 +170,7 @@ xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu)
regs = get_irq_regs();
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, XSCALE1_NUM_COUNTERS) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -364,7 +366,8 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->start = xscale1pmu_start;
cpu_pmu->stop = xscale1pmu_stop;
cpu_pmu->map_event = xscale_map_event;
- cpu_pmu->num_events = 3;
+
+ bitmap_set(cpu_pmu->cntr_mask, 0, XSCALE1_NUM_COUNTERS);
return 0;
}
@@ -500,7 +503,7 @@ xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu)
regs = get_irq_regs();
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, XSCALE2_NUM_COUNTERS) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -719,7 +722,8 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->start = xscale2pmu_start;
cpu_pmu->stop = xscale2pmu_stop;
cpu_pmu->map_event = xscale_map_event;
- cpu_pmu->num_events = 5;
+
+ bitmap_set(cpu_pmu->cntr_mask, 0, XSCALE2_NUM_COUNTERS);
return 0;
}
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
index c5e328f23841..4ca50f9b6dfe 100644
--- a/drivers/perf/dwc_pcie_pmu.c
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -107,6 +107,7 @@ struct dwc_pcie_vendor_id {
static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = {
{.vendor_id = PCI_VENDOR_ID_ALIBABA },
+ {.vendor_id = PCI_VENDOR_ID_QCOM },
{} /* terminator */
};
@@ -556,10 +557,10 @@ static int dwc_pcie_register_dev(struct pci_dev *pdev)
{
struct platform_device *plat_dev;
struct dwc_pcie_dev_info *dev_info;
- u32 bdf;
+ u32 sbdf;
- bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
- plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", bdf,
+ sbdf = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn);
+ plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", sbdf,
pdev, sizeof(*pdev));
if (IS_ERR(plat_dev))
@@ -611,15 +612,15 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
struct pci_dev *pdev = plat_dev->dev.platform_data;
struct dwc_pcie_pmu *pcie_pmu;
char *name;
- u32 bdf, val;
+ u32 sbdf, val;
u16 vsec;
int ret;
vsec = pci_find_vsec_capability(pdev, pdev->vendor,
DWC_PCIE_VSEC_RAS_DES_ID);
pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
- bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
- name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", bdf);
+ sbdf = plat_dev->id;
+ name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf);
if (!name)
return -ENOMEM;
@@ -650,7 +651,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state,
&pcie_pmu->cpuhp_node);
if (ret) {
- pci_err(pdev, "Error %d registering hotplug @%x\n", ret, bdf);
+ pci_err(pdev, "Error %d registering hotplug @%x\n", ret, sbdf);
return ret;
}
@@ -663,7 +664,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
ret = perf_pmu_register(&pcie_pmu->pmu, name, -1);
if (ret) {
- pci_err(pdev, "Error %d registering PMU @%x\n", ret, bdf);
+ pci_err(pdev, "Error %d registering PMU @%x\n", ret, sbdf);
return ret;
}
ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu,
@@ -726,7 +727,6 @@ static struct platform_driver dwc_pcie_pmu_driver = {
static int __init dwc_pcie_pmu_init(void)
{
struct pci_dev *pdev = NULL;
- bool found = false;
int ret;
for_each_pci_dev(pdev) {
@@ -738,11 +738,7 @@ static int __init dwc_pcie_pmu_init(void)
pci_dev_put(pdev);
return ret;
}
-
- found = true;
}
- if (!found)
- return -ENODEV;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"perf/dwc_pcie_pmu:online",
diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
index f06027574a24..c5394d007b61 100644
--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
+++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
@@ -141,6 +141,22 @@ static ssize_t bus_show(struct device *dev, struct device_attribute *attr, char
}
static DEVICE_ATTR_RO(bus);
+static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%#04x\n", pcie_pmu->bdf_min);
+}
+static DEVICE_ATTR_RO(bdf_min);
+
+static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%#04x\n", pcie_pmu->bdf_max);
+}
+static DEVICE_ATTR_RO(bdf_max);
+
static struct hisi_pcie_reg_pair
hisi_pcie_parse_reg_value(struct hisi_pcie_pmu *pcie_pmu, u32 reg_off)
{
@@ -208,7 +224,7 @@ static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset,
static u64 hisi_pcie_pmu_get_event_ctrl_val(struct perf_event *event)
{
u64 port, trig_len, thr_len, len_mode;
- u64 reg = HISI_PCIE_INIT_SET;
+ u64 reg = 0;
/* Config HISI_PCIE_EVENT_CTRL according to event. */
reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event));
@@ -452,10 +468,24 @@ static void hisi_pcie_pmu_set_period(struct perf_event *event)
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
+ u64 orig_cnt, cnt;
+
+ orig_cnt = hisi_pcie_pmu_read_counter(event);
local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL);
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL);
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL);
+
+ /*
+ * The counter maybe unwritable if the target event is unsupported.
+ * Check this by comparing the counts after setting the period. If
+ * the counts stay unchanged after setting the period then update
+ * the hwc->prev_count correctly. Otherwise the final counts user
+ * get maybe totally wrong.
+ */
+ cnt = hisi_pcie_pmu_read_counter(event);
+ if (orig_cnt == cnt)
+ local64_set(&hwc->prev_count, cnt);
}
static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
@@ -749,6 +779,8 @@ static const struct attribute_group hisi_pcie_pmu_format_group = {
static struct attribute *hisi_pcie_pmu_bus_attrs[] = {
&dev_attr_bus.attr,
+ &dev_attr_bdf_max.attr,
+ &dev_attr_bdf_min.attr,
NULL
};
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index 0a02e85a8951..7644147d50b4 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -39,7 +39,6 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time_short = 0;
userpg->cap_user_rdpmc = riscv_perf_user_access(event);
-#ifdef CONFIG_RISCV_PMU
/*
* The counters are 64-bit but the priv spec doesn't mandate all the
* bits to be implemented: that's why, counter width can vary based on
@@ -47,7 +46,6 @@ void arch_perf_update_userpage(struct perf_event *event,
*/
if (userpg->cap_user_rdpmc)
userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1;
-#endif
do {
rd = sched_clock_read_begin(&seq);
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 31a17a56eb3b..5c39fbd8ed04 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -60,7 +60,7 @@ asm volatile(ALTERNATIVE( \
#define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY)
PMU_FORMAT_ATTR(event, "config:0-47");
-PMU_FORMAT_ATTR(firmware, "config:63");
+PMU_FORMAT_ATTR(firmware, "config:62-63");
static bool sbi_v2_available;
static DEFINE_STATIC_KEY_FALSE(sbi_pmu_snapshot_available);
@@ -507,7 +507,6 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
{
u32 type = event->attr.type;
u64 config = event->attr.config;
- int bSoftware;
u64 raw_config_val;
int ret;
@@ -528,18 +527,32 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
break;
case PERF_TYPE_RAW:
/*
- * As per SBI specification, the upper 16 bits must be unused for
- * a raw event. Use the MSB (63b) to distinguish between hardware
- * raw event and firmware events.
+ * As per SBI specification, the upper 16 bits must be unused
+ * for a raw event.
+ * Bits 63:62 are used to distinguish between raw events
+ * 00 - Hardware raw event
+ * 10 - SBI firmware events
+ * 11 - Risc-V platform specific firmware event
*/
- bSoftware = config >> 63;
raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
- if (bSoftware) {
+ switch (config >> 62) {
+ case 0:
+ ret = RISCV_PMU_RAW_EVENT_IDX;
+ *econfig = raw_config_val;
+ break;
+ case 2:
ret = (raw_config_val & 0xFFFF) |
(SBI_PMU_EVENT_TYPE_FW << 16);
- } else {
- ret = RISCV_PMU_RAW_EVENT_IDX;
+ break;
+ case 3:
+ /*
+ * For Risc-V platform specific firmware events
+ * Event code - 0xFFFF
+ * Event data - raw event encoding
+ */
+ ret = SBI_PMU_EVENT_TYPE_FW << 16 | RISCV_PLAT_FW_EVENT;
*econfig = raw_config_val;
+ break;
}
break;
default:
@@ -1373,11 +1386,15 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
/* SBI PMU Snapsphot is only available in SBI v2.0 */
if (sbi_v2_available) {
+ int cpu;
+
ret = pmu_sbi_snapshot_alloc(pmu);
if (ret)
goto out_unregister;
- ret = pmu_sbi_snapshot_setup(pmu, smp_processor_id());
+ cpu = get_cpu();
+
+ ret = pmu_sbi_snapshot_setup(pmu, cpu);
if (ret) {
/* Snapshot is an optional feature. Continue if not available */
pmu_sbi_snapshot_free(pmu);
@@ -1391,6 +1408,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
*/
static_branch_enable(&sbi_pmu_snapshot_available);
}
+ put_cpu();
}
register_sysctl("kernel", sbi_pmu_sysctl_table);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index dfab1c66b3e5..f73abff416be 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -95,6 +95,7 @@ source "drivers/phy/mediatek/Kconfig"
source "drivers/phy/microchip/Kconfig"
source "drivers/phy/motorola/Kconfig"
source "drivers/phy/mscc/Kconfig"
+source "drivers/phy/nuvoton/Kconfig"
source "drivers/phy/qualcomm/Kconfig"
source "drivers/phy/ralink/Kconfig"
source "drivers/phy/realtek/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 5fcbce5f9ab1..ebc399560da4 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -25,6 +25,7 @@ obj-y += allwinner/ \
microchip/ \
motorola/ \
mscc/ \
+ nuvoton/ \
qualcomm/ \
ralink/ \
realtek/ \
diff --git a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
index cc29b08e49eb..462c61a24ec5 100644
--- a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
@@ -113,11 +113,10 @@ static const struct phy_ops cygnus_pcie_phy_ops = {
static int cygnus_pcie_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
struct cygnus_pcie_phy_core *core;
struct phy_provider *provider;
unsigned cnt = 0;
- int ret;
if (of_get_child_count(node) == 0) {
dev_err(dev, "PHY no child node\n");
@@ -136,35 +135,31 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
mutex_init(&core->lock);
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
unsigned int id;
struct cygnus_pcie_phy *p;
if (of_property_read_u32(child, "reg", &id)) {
dev_err(dev, "missing reg property for %pOFn\n",
child);
- ret = -EINVAL;
- goto put_child;
+ return -EINVAL;
}
if (id >= MAX_NUM_PHYS) {
dev_err(dev, "invalid PHY id: %u\n", id);
- ret = -EINVAL;
- goto put_child;
+ return -EINVAL;
}
if (core->phys[id].phy) {
dev_err(dev, "duplicated PHY id: %u\n", id);
- ret = -EINVAL;
- goto put_child;
+ return -EINVAL;
}
p = &core->phys[id];
p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops);
if (IS_ERR(p->phy)) {
dev_err(dev, "failed to create PHY\n");
- ret = PTR_ERR(p->phy);
- goto put_child;
+ return PTR_ERR(p->phy);
}
p->core = core;
@@ -184,9 +179,6 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt);
return 0;
-put_child:
- of_node_put(child);
- return ret;
}
static const struct of_device_id cygnus_pcie_phy_match_table[] = {
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index ed9e18791ec9..228100357054 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -751,11 +751,11 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
{
const char *rxaeq_mode;
struct device *dev = &pdev->dev;
- struct device_node *dn = dev->of_node, *child;
+ struct device_node *dn = dev->of_node;
const struct of_device_id *of_id;
struct brcm_sata_phy *priv;
struct phy_provider *provider;
- int ret, count = 0;
+ int count = 0;
if (of_get_child_count(dn) == 0)
return -ENODEV;
@@ -782,26 +782,23 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
return PTR_ERR(priv->ctrl_base);
}
- for_each_available_child_of_node(dn, child) {
+ for_each_available_child_of_node_scoped(dn, child) {
unsigned int id;
struct brcm_sata_port *port;
if (of_property_read_u32(child, "reg", &id)) {
dev_err(dev, "missing reg property in node %pOFn\n",
child);
- ret = -EINVAL;
- goto put_child;
+ return -EINVAL;
}
if (id >= MAX_PORTS) {
dev_err(dev, "invalid reg: %u\n", id);
- ret = -EINVAL;
- goto put_child;
+ return -EINVAL;
}
if (priv->phys[id].phy) {
dev_err(dev, "already registered port %u\n", id);
- ret = -EINVAL;
- goto put_child;
+ return -EINVAL;
}
port = &priv->phys[id];
@@ -822,8 +819,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc");
if (IS_ERR(port->phy)) {
dev_err(dev, "failed to create PHY\n");
- ret = PTR_ERR(port->phy);
- goto put_child;
+ return PTR_ERR(port->phy);
}
phy_set_drvdata(port->phy, port);
@@ -839,9 +835,6 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
dev_info(dev, "registered %d port(s)\n", count);
return 0;
-put_child:
- of_node_put(child);
- return ret;
}
static struct platform_driver brcm_sata_phy_driver = {
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index d4eb93ce8232..aeec6eb6be23 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -310,7 +310,7 @@ static const struct clk_parent_data pll_mux_parent_data[][SIERRA_NUM_CMN_PLLC_PA
},
};
-static u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
+static const u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
[CMN_PLLLC] = { 0, 1 },
[CMN_PLLLC1] = { 1, 0 },
};
@@ -362,14 +362,14 @@ struct cdns_sierra_data {
u32 id_value;
u8 block_offset_shift;
u8 reg_offset_shift;
- struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
+ const struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ const struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ const struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ const struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
};
struct cdns_regmap_cdb_context {
@@ -539,12 +539,12 @@ static int cdns_sierra_phy_init(struct phy *gphy)
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent);
const struct cdns_sierra_data *init_data = phy->init_data;
- struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ const struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
enum cdns_sierra_phy_type phy_type = ins->phy_type;
+ const struct cdns_sierra_vals *phy_pma_ln_vals;
enum cdns_sierra_ssc_mode ssc = ins->ssc_mode;
- struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_sierra_vals *pcs_cmn_vals;
const struct cdns_reg_pairs *reg_pairs;
- struct cdns_sierra_vals *pcs_cmn_vals;
struct regmap *regmap;
u32 num_regs;
int i, j;
@@ -1244,12 +1244,12 @@ static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp,
static int cdns_sierra_phy_configure_multilink(struct cdns_sierra_phy *sp)
{
+ const struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
const struct cdns_sierra_data *init_data = sp->init_data;
- struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ const struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_sierra_vals *pcs_cmn_vals;
enum cdns_sierra_phy_type phy_t1, phy_t2;
- struct cdns_sierra_vals *phy_pma_ln_vals;
const struct cdns_reg_pairs *reg_pairs;
- struct cdns_sierra_vals *pcs_cmn_vals;
int i, j, node, mlane, num_lanes, ret;
enum cdns_sierra_ssc_mode ssc;
struct regmap *regmap;
@@ -1366,7 +1366,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
unsigned int id_value;
int ret, node = 0;
void __iomem *base;
- struct device_node *dn = dev->of_node, *child;
+ struct device_node *dn = dev->of_node;
if (of_get_child_count(dn) == 0)
return -ENODEV;
@@ -1438,7 +1438,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
sp->autoconf = of_property_read_bool(dn, "cdns,autoconf");
- for_each_available_child_of_node(dn, child) {
+ for_each_available_child_of_node_scoped(dn, child) {
struct phy *gphy;
if (!(of_node_name_eq(child, "phy") ||
@@ -1452,7 +1452,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
dev_err(dev, "failed to get reset %s\n",
child->full_name);
ret = PTR_ERR(sp->phys[node].lnk_rst);
- of_node_put(child);
goto put_control;
}
@@ -1461,7 +1460,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "missing property in node %s\n",
child->name);
- of_node_put(child);
reset_control_put(sp->phys[node].lnk_rst);
goto put_control;
}
@@ -1475,7 +1473,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
gphy = devm_phy_create(dev, child, &noop_ops);
if (IS_ERR(gphy)) {
ret = PTR_ERR(gphy);
- of_node_put(child);
reset_control_put(sp->phys[node].lnk_rst);
goto put_control;
}
@@ -1544,11 +1541,11 @@ static void cdns_sierra_phy_remove(struct platform_device *pdev)
}
/* SGMII PHY PMA lane configuration */
-static struct cdns_reg_pairs sgmii_phy_pma_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_phy_pma_ln_regs[] = {
{0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
};
-static struct cdns_sierra_vals sgmii_phy_pma_ln_vals = {
+static const struct cdns_sierra_vals sgmii_phy_pma_ln_vals = {
.reg_pairs = sgmii_phy_pma_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_phy_pma_ln_regs),
};
@@ -1598,22 +1595,22 @@ static const struct cdns_reg_pairs sgmii_100_no_ssc_plllc1_opt3_ln_regs[] = {
{0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG}
};
-static struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_cmn_vals = {
+static const struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_cmn_vals = {
.reg_pairs = sgmii_100_no_ssc_plllc1_opt3_cmn_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_plllc1_opt3_cmn_regs),
};
-static struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_ln_vals = {
+static const struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_ln_vals = {
.reg_pairs = sgmii_100_no_ssc_plllc1_opt3_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_plllc1_opt3_ln_regs),
};
/* QSGMII PHY PMA lane configuration */
-static struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = {
+static const struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = {
{0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
};
-static struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = {
+static const struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = {
.reg_pairs = qsgmii_phy_pma_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_phy_pma_ln_regs),
};
@@ -1664,22 +1661,22 @@ static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_ln_regs[] = {
{0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG}
};
-static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = {
+static const struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = {
.reg_pairs = qsgmii_100_no_ssc_plllc1_cmn_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_cmn_regs),
};
-static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = {
+static const struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = {
.reg_pairs = qsgmii_100_no_ssc_plllc1_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_ln_regs),
};
/* PCIE PHY PCS common configuration */
-static struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = {
{0x0430, SIERRA_PHY_PIPE_CMN_CTRL1}
};
-static struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = {
+static const struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = {
.reg_pairs = pcie_phy_pcs_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_phy_pcs_cmn_regs),
};
@@ -1745,12 +1742,12 @@ static const struct cdns_reg_pairs ml_pcie_100_no_ssc_ln_regs[] = {
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = {
+static const struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = {
.reg_pairs = pcie_100_no_ssc_plllc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_no_ssc_plllc_cmn_regs),
};
-static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
+static const struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
.reg_pairs = ml_pcie_100_no_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs),
};
@@ -1810,7 +1807,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_no_ssc_ln_regs[] = {
{0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
};
-static struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = {
+static const struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = {
.reg_pairs = ti_ml_pcie_100_no_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ti_ml_pcie_100_no_ssc_ln_regs),
};
@@ -1886,12 +1883,12 @@ static const struct cdns_reg_pairs ml_pcie_100_int_ssc_ln_regs[] = {
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = {
+static const struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = {
.reg_pairs = pcie_100_int_ssc_plllc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_int_ssc_plllc_cmn_regs),
};
-static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
+static const struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
.reg_pairs = ml_pcie_100_int_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs),
};
@@ -1954,7 +1951,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_int_ssc_ln_regs[] = {
{0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
};
-static struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = {
+static const struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = {
.reg_pairs = ti_ml_pcie_100_int_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ti_ml_pcie_100_int_ssc_ln_regs),
};
@@ -2024,12 +2021,12 @@ static const struct cdns_reg_pairs ml_pcie_100_ext_ssc_ln_regs[] = {
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = {
+static const struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = {
.reg_pairs = pcie_100_ext_ssc_plllc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_ext_ssc_plllc_cmn_regs),
};
-static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
+static const struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
.reg_pairs = ml_pcie_100_ext_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs),
};
@@ -2092,7 +2089,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_ext_ssc_ln_regs[] = {
{0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
};
-static struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = {
+static const struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = {
.reg_pairs = ti_ml_pcie_100_ext_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ti_ml_pcie_100_ext_ssc_ln_regs),
};
@@ -2152,12 +2149,12 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_no_ssc[] = {
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = {
+static const struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = {
.reg_pairs = cdns_pcie_cmn_regs_no_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_no_ssc),
};
-static struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = {
+static const struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = {
.reg_pairs = cdns_pcie_ln_regs_no_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_no_ssc),
};
@@ -2227,12 +2224,12 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_int_ssc[] = {
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = {
+static const struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = {
.reg_pairs = cdns_pcie_cmn_regs_int_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_int_ssc),
};
-static struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = {
+static const struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = {
.reg_pairs = cdns_pcie_ln_regs_int_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_int_ssc),
};
@@ -2296,12 +2293,12 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = {
+static const struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = {
.reg_pairs = cdns_pcie_cmn_regs_ext_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
};
-static struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = {
+static const struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = {
.reg_pairs = cdns_pcie_ln_regs_ext_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
};
@@ -2413,12 +2410,12 @@ static const struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = {
+static const struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = {
.reg_pairs = cdns_usb_cmn_regs_ext_ssc,
.num_regs = ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
};
-static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = {
+static const struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = {
.reg_pairs = cdns_usb_ln_regs_ext_ssc,
.num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
};
@@ -2443,7 +2440,7 @@ static const struct cdns_reg_pairs sgmii_pma_cmn_vals[] = {
{0x0013, SIERRA_CMN_PLLLC1_DCOCAL_CTRL_PREG},
};
-static struct cdns_sierra_vals sgmii_cmn_vals = {
+static const struct cdns_sierra_vals sgmii_cmn_vals = {
.reg_pairs = sgmii_pma_cmn_vals,
.num_regs = ARRAY_SIZE(sgmii_pma_cmn_vals),
};
@@ -2489,7 +2486,7 @@ static const struct cdns_reg_pairs sgmii_ln_regs[] = {
{0x321F, SIERRA_CPICAL_RES_STARTCODE_MODE01_PREG},
};
-static struct cdns_sierra_vals sgmii_pma_ln_vals = {
+static const struct cdns_sierra_vals sgmii_pma_ln_vals = {
.reg_pairs = sgmii_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_ln_regs),
};
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index 56ce82a47f88..8bbbbb87bb22 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -285,7 +285,7 @@ static const int refclk_driver_parent_index[] = {
CDNS_TORRENT_RECEIVED_REFCLK
};
-static u32 cdns_torrent_refclk_driver_mux_table[] = { 1, 0 };
+static const u32 cdns_torrent_refclk_driver_mux_table[] = { 1, 0 };
enum cdns_torrent_phy_type {
TYPE_NONE,
@@ -351,6 +351,7 @@ struct cdns_torrent_phy {
void __iomem *sd_base; /* SD0801 registers base */
u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
u32 dp_pll;
+ u32 protocol_bitmask;
struct reset_control *phy_rst;
struct reset_control *apb_rst;
struct device *dev;
@@ -422,17 +423,17 @@ struct cdns_reg_pairs {
};
struct cdns_torrent_vals {
- struct cdns_reg_pairs *reg_pairs;
+ const struct cdns_reg_pairs *reg_pairs;
u32 num_regs;
};
struct cdns_torrent_vals_entry {
u32 key;
- struct cdns_torrent_vals *vals;
+ const struct cdns_torrent_vals *vals;
};
struct cdns_torrent_vals_table {
- struct cdns_torrent_vals_entry *entries;
+ const struct cdns_torrent_vals_entry *entries;
u32 num_entries;
};
@@ -454,12 +455,12 @@ struct cdns_regmap_cdb_context {
u8 reg_offset_shift;
};
-static struct cdns_torrent_vals *cdns_torrent_get_tbl_vals(const struct cdns_torrent_vals_table *tbl,
- enum cdns_torrent_ref_clk refclk0,
- enum cdns_torrent_ref_clk refclk1,
- enum cdns_torrent_phy_type link0,
- enum cdns_torrent_phy_type link1,
- enum cdns_torrent_ssc_mode ssc)
+static const struct cdns_torrent_vals *cdns_torrent_get_tbl_vals(const struct cdns_torrent_vals_table *tbl,
+ enum cdns_torrent_ref_clk refclk0,
+ enum cdns_torrent_ref_clk refclk1,
+ enum cdns_torrent_phy_type link0,
+ enum cdns_torrent_phy_type link1,
+ enum cdns_torrent_ssc_mode ssc)
{
int i;
u32 key = CDNS_TORRENT_KEY(refclk0, refclk1, link0, link1, ssc);
@@ -2306,16 +2307,16 @@ static int cdns_torrent_regmap_init(struct cdns_torrent_phy *cdns_phy)
static int cdns_torrent_phy_init(struct phy *phy)
{
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ const struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
const struct cdns_torrent_data *init_data = cdns_phy->init_data;
- struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
+ const struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate;
- struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
enum cdns_torrent_phy_type phy_type = inst->phy_type;
+ const struct cdns_torrent_vals *phy_pma_cmn_vals;
enum cdns_torrent_ssc_mode ssc = inst->ssc_mode;
- struct cdns_torrent_vals *phy_pma_cmn_vals;
- struct cdns_torrent_vals *pcs_cmn_vals;
- struct cdns_reg_pairs *reg_pairs;
+ const struct cdns_torrent_vals *pcs_cmn_vals;
+ const struct cdns_reg_pairs *reg_pairs;
struct regmap *regmap;
u32 num_regs;
int i, j;
@@ -2463,166 +2464,216 @@ static const struct phy_ops cdns_torrent_phy_ops = {
static
int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
{
+ const struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
const struct cdns_torrent_data *init_data = cdns_phy->init_data;
- struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
+ const struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
enum cdns_torrent_ref_clk ref_clk1 = cdns_phy->ref_clk1_rate;
enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate;
- struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
+ const struct cdns_torrent_vals *phy_pma_cmn_vals;
+ const struct cdns_torrent_vals *pcs_cmn_vals;
enum cdns_torrent_phy_type phy_t1, phy_t2;
- struct cdns_torrent_vals *phy_pma_cmn_vals;
- struct cdns_torrent_vals *pcs_cmn_vals;
+ const struct cdns_reg_pairs *reg_pairs;
int i, j, node, mlane, num_lanes, ret;
- struct cdns_reg_pairs *reg_pairs;
+ struct device *dev = cdns_phy->dev;
enum cdns_torrent_ssc_mode ssc;
struct regmap *regmap;
- u32 num_regs;
+ u32 num_regs, num_protocols, protocol;
- /* Maximum 2 links (subnodes) are supported */
- if (cdns_phy->nsubnodes != 2)
+ num_protocols = hweight32(cdns_phy->protocol_bitmask);
+ /* Maximum 2 protocols are supported */
+ if (num_protocols > 2) {
+ dev_err(dev, "at most 2 protocols are supported\n");
return -EINVAL;
+ }
+
+
+ /**
+ * Get PHY types directly from subnodes if only 2 subnodes exist.
+ * It is possible for phy_t1 to be the same as phy_t2 for special
+ * configurations such as PCIe Multilink.
+ */
+ if (cdns_phy->nsubnodes == 2) {
+ phy_t1 = cdns_phy->phys[0].phy_type;
+ phy_t2 = cdns_phy->phys[1].phy_type;
+ } else {
+ /**
+ * Both PHY types / protocols should be unique.
+ * If they are the same, it should be expressed with either
+ * a) Single-Link (1 Sub-node) - handled via PHY APIs
+ * OR
+ * b) Double-Link (2 Sub-nodes) - handled above
+ */
+ if (num_protocols != 2) {
+ dev_err(dev, "incorrect representation of link\n");
+ return -EINVAL;
+ }
- phy_t1 = cdns_phy->phys[0].phy_type;
- phy_t2 = cdns_phy->phys[1].phy_type;
+ phy_t1 = fns(cdns_phy->protocol_bitmask, 0);
+ phy_t2 = fns(cdns_phy->protocol_bitmask, 1);
+ }
/**
- * First configure the PHY for first link with phy_t1. Get the array
- * values as [phy_t1][phy_t2][ssc].
+ * Configure all links with the protocol phy_t1 first followed by
+ * configuring all links with the protocol phy_t2.
+ *
+ * When phy_t1 = phy_t2, it is a single protocol and configuration
+ * is performed with a single iteration of the protocol and multiple
+ * iterations over the sub-nodes (links).
+ *
+ * When phy_t1 != phy_t2, there are two protocols and configuration
+ * is performed by iterating over all sub-nodes matching the first
+ * protocol and configuring them first, followed by iterating over
+ * all sub-nodes matching the second protocol and configuring them
+ * next.
*/
- for (node = 0; node < cdns_phy->nsubnodes; node++) {
- if (node == 1) {
+ for (protocol = 0; protocol < num_protocols; protocol++) {
+ /**
+ * For the case where num_protocols is 1,
+ * phy_t1 = phy_t2 and the swap is unnecessary.
+ *
+ * Swapping phy_t1 and phy_t2 is only required when the
+ * number of protocols is 2 and there are 2 or more links.
+ */
+ if (protocol == 1) {
/**
- * If first link with phy_t1 is configured, then
- * configure the PHY for second link with phy_t2.
+ * If first protocol with phy_t1 is configured, then
+ * configure the PHY for second protocol with phy_t2.
* Get the array values as [phy_t2][phy_t1][ssc].
*/
swap(phy_t1, phy_t2);
swap(ref_clk, ref_clk1);
}
- mlane = cdns_phy->phys[node].mlane;
- ssc = cdns_phy->phys[node].ssc_mode;
- num_lanes = cdns_phy->phys[node].num_lanes;
+ for (node = 0; node < cdns_phy->nsubnodes; node++) {
+ if (cdns_phy->phys[node].phy_type != phy_t1)
+ continue;
- /**
- * PHY configuration specific registers:
- * link_cmn_vals depend on combination of PHY types being
- * configured and are common for both PHY types, so array
- * values should be same for [phy_t1][phy_t2][ssc] and
- * [phy_t2][phy_t1][ssc].
- * xcvr_diag_vals also depend on combination of PHY types
- * being configured, but these can be different for particular
- * PHY type and are per lane.
- */
- link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl,
- CLK_ANY, CLK_ANY,
- phy_t1, phy_t2, ANY_SSC);
- if (link_cmn_vals) {
- reg_pairs = link_cmn_vals->reg_pairs;
- num_regs = link_cmn_vals->num_regs;
- regmap = cdns_phy->regmap_common_cdb;
+ mlane = cdns_phy->phys[node].mlane;
+ ssc = cdns_phy->phys[node].ssc_mode;
+ num_lanes = cdns_phy->phys[node].num_lanes;
/**
- * First array value in link_cmn_vals must be of
- * PHY_PLL_CFG register
+ * PHY configuration specific registers:
+ * link_cmn_vals depend on combination of PHY types being
+ * configured and are common for both PHY types, so array
+ * values should be same for [phy_t1][phy_t2][ssc] and
+ * [phy_t2][phy_t1][ssc].
+ * xcvr_diag_vals also depend on combination of PHY types
+ * being configured, but these can be different for particular
+ * PHY type and are per lane.
*/
- regmap_field_write(cdns_phy->phy_pll_cfg,
- reg_pairs[0].val);
+ link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
+ if (link_cmn_vals) {
+ reg_pairs = link_cmn_vals->reg_pairs;
+ num_regs = link_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_common_cdb;
+
+ /**
+ * First array value in link_cmn_vals must be of
+ * PHY_PLL_CFG register
+ */
+ regmap_field_write(cdns_phy->phy_pll_cfg,
+ reg_pairs[0].val);
+
+ for (i = 1; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
- for (i = 1; i < num_regs; i++)
- regmap_write(regmap, reg_pairs[i].off,
- reg_pairs[i].val);
- }
+ xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
+ if (xcvr_diag_vals) {
+ reg_pairs = xcvr_diag_vals->reg_pairs;
+ num_regs = xcvr_diag_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
- xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl,
- CLK_ANY, CLK_ANY,
- phy_t1, phy_t2, ANY_SSC);
- if (xcvr_diag_vals) {
- reg_pairs = xcvr_diag_vals->reg_pairs;
- num_regs = xcvr_diag_vals->num_regs;
- for (i = 0; i < num_lanes; i++) {
- regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
- for (j = 0; j < num_regs; j++)
- regmap_write(regmap, reg_pairs[j].off,
- reg_pairs[j].val);
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
}
- }
- /* PHY PCS common registers configurations */
- pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl,
- CLK_ANY, CLK_ANY,
- phy_t1, phy_t2, ANY_SSC);
- if (pcs_cmn_vals) {
- reg_pairs = pcs_cmn_vals->reg_pairs;
- num_regs = pcs_cmn_vals->num_regs;
- regmap = cdns_phy->regmap_phy_pcs_common_cdb;
- for (i = 0; i < num_regs; i++)
- regmap_write(regmap, reg_pairs[i].off,
- reg_pairs[i].val);
- }
+ /* PHY PMA common registers configurations */
+ phy_pma_cmn_vals =
+ cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY, phy_t1, phy_t2,
+ ANY_SSC);
+ if (phy_pma_cmn_vals) {
+ reg_pairs = phy_pma_cmn_vals->reg_pairs;
+ num_regs = phy_pma_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_phy_pma_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
- /* PHY PMA common registers configurations */
- phy_pma_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl,
- CLK_ANY, CLK_ANY,
- phy_t1, phy_t2, ANY_SSC);
- if (phy_pma_cmn_vals) {
- reg_pairs = phy_pma_cmn_vals->reg_pairs;
- num_regs = phy_pma_cmn_vals->num_regs;
- regmap = cdns_phy->regmap_phy_pma_common_cdb;
- for (i = 0; i < num_regs; i++)
- regmap_write(regmap, reg_pairs[i].off,
- reg_pairs[i].val);
- }
+ /* PMA common registers configurations */
+ cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl,
+ ref_clk, ref_clk1,
+ phy_t1, phy_t2, ssc);
+ if (cmn_vals) {
+ reg_pairs = cmn_vals->reg_pairs;
+ num_regs = cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
- /* PMA common registers configurations */
- cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl,
- ref_clk, ref_clk1,
- phy_t1, phy_t2, ssc);
- if (cmn_vals) {
- reg_pairs = cmn_vals->reg_pairs;
- num_regs = cmn_vals->num_regs;
- regmap = cdns_phy->regmap_common_cdb;
- for (i = 0; i < num_regs; i++)
- regmap_write(regmap, reg_pairs[i].off,
- reg_pairs[i].val);
- }
+ /* PMA TX lane registers configurations */
+ tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl,
+ ref_clk, ref_clk1,
+ phy_t1, phy_t2, ssc);
+ if (tx_ln_vals) {
+ reg_pairs = tx_ln_vals->reg_pairs;
+ num_regs = tx_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
- /* PMA TX lane registers configurations */
- tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl,
- ref_clk, ref_clk1,
- phy_t1, phy_t2, ssc);
- if (tx_ln_vals) {
- reg_pairs = tx_ln_vals->reg_pairs;
- num_regs = tx_ln_vals->num_regs;
- for (i = 0; i < num_lanes; i++) {
- regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
- for (j = 0; j < num_regs; j++)
- regmap_write(regmap, reg_pairs[j].off,
- reg_pairs[j].val);
+ /* PMA RX lane registers configurations */
+ rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl,
+ ref_clk, ref_clk1,
+ phy_t1, phy_t2, ssc);
+ if (rx_ln_vals) {
+ reg_pairs = rx_ln_vals->reg_pairs;
+ num_regs = rx_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = cdns_phy->regmap_rx_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
}
- }
- /* PMA RX lane registers configurations */
- rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl,
- ref_clk, ref_clk1,
- phy_t1, phy_t2, ssc);
- if (rx_ln_vals) {
- reg_pairs = rx_ln_vals->reg_pairs;
- num_regs = rx_ln_vals->num_regs;
- for (i = 0; i < num_lanes; i++) {
- regmap = cdns_phy->regmap_rx_lane_cdb[i + mlane];
- for (j = 0; j < num_regs; j++)
- regmap_write(regmap, reg_pairs[j].off,
- reg_pairs[j].val);
+ if (phy_t1 == TYPE_DP) {
+ ret = cdns_torrent_dp_get_pll(cdns_phy, phy_t2);
+ if (ret)
+ return ret;
}
- }
- if (phy_t1 == TYPE_DP) {
- ret = cdns_torrent_dp_get_pll(cdns_phy, phy_t2);
- if (ret)
- return ret;
+ reset_control_deassert(cdns_phy->phys[node].lnk_rst);
}
-
- reset_control_deassert(cdns_phy->phys[node].lnk_rst);
}
/* Take the PHY out of reset */
@@ -2826,6 +2877,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
dev_set_drvdata(dev, cdns_phy);
cdns_phy->dev = dev;
cdns_phy->init_data = data;
+ cdns_phy->protocol_bitmask = 0;
cdns_phy->sd_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cdns_phy->sd_base))
@@ -3010,6 +3062,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
}
cdns_phy->phys[node].phy = gphy;
+ cdns_phy->protocol_bitmask |= BIT(cdns_phy->phys[node].phy_type);
phy_set_drvdata(gphy, &cdns_phy->phys[node]);
node++;
@@ -3079,21 +3132,21 @@ static void cdns_torrent_phy_remove(struct platform_device *pdev)
}
/* SGMII and QSGMII link configuration */
-static struct cdns_reg_pairs sgmii_qsgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs sgmii_qsgmii_link_cmn_regs[] = {
{0x0002, PHY_PLL_CFG}
};
-static struct cdns_reg_pairs sgmii_qsgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_qsgmii_xcvr_diag_ln_regs[] = {
{0x0003, XCVR_DIAG_HSCLK_DIV},
{0x0113, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals sgmii_qsgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals sgmii_qsgmii_link_cmn_vals = {
.reg_pairs = sgmii_qsgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(sgmii_qsgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals sgmii_qsgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sgmii_qsgmii_xcvr_diag_ln_vals = {
.reg_pairs = sgmii_qsgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_qsgmii_xcvr_diag_ln_regs),
};
@@ -3155,73 +3208,73 @@ static DEFINE_NOIRQ_DEV_PM_OPS(cdns_torrent_phy_pm_ops,
cdns_torrent_phy_resume_noirq);
/* USB and DP link configuration */
-static struct cdns_reg_pairs usb_dp_link_cmn_regs[] = {
+static const struct cdns_reg_pairs usb_dp_link_cmn_regs[] = {
{0x0002, PHY_PLL_CFG},
{0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}
};
-static struct cdns_reg_pairs usb_dp_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs usb_dp_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0041, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs dp_usb_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs dp_usb_xcvr_diag_ln_regs[] = {
{0x0001, XCVR_DIAG_HSCLK_SEL},
{0x0009, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals usb_dp_link_cmn_vals = {
+static const struct cdns_torrent_vals usb_dp_link_cmn_vals = {
.reg_pairs = usb_dp_link_cmn_regs,
.num_regs = ARRAY_SIZE(usb_dp_link_cmn_regs),
};
-static struct cdns_torrent_vals usb_dp_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals usb_dp_xcvr_diag_ln_vals = {
.reg_pairs = usb_dp_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(usb_dp_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = {
.reg_pairs = dp_usb_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(dp_usb_xcvr_diag_ln_regs),
};
/* USXGMII and SGMII/QSGMII link configuration */
-static struct cdns_reg_pairs usxgmii_sgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs usxgmii_sgmii_link_cmn_regs[] = {
{0x0002, PHY_PLL_CFG},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
};
-static struct cdns_reg_pairs usxgmii_sgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs usxgmii_sgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0001, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs sgmii_usxgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_usxgmii_xcvr_diag_ln_regs[] = {
{0x0111, XCVR_DIAG_HSCLK_SEL},
{0x0103, XCVR_DIAG_HSCLK_DIV},
{0x0A9B, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals usxgmii_sgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals usxgmii_sgmii_link_cmn_vals = {
.reg_pairs = usxgmii_sgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(usxgmii_sgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals usxgmii_sgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals usxgmii_sgmii_xcvr_diag_ln_vals = {
.reg_pairs = usxgmii_sgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(usxgmii_sgmii_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals sgmii_usxgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sgmii_usxgmii_xcvr_diag_ln_vals = {
.reg_pairs = sgmii_usxgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_usxgmii_xcvr_diag_ln_regs),
};
/* Multilink USXGMII, using PLL0, 156.25 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
{0x0014, CMN_PLL0_DSM_FBH_OVRD_M0},
{0x0005, CMN_PLL0_DSM_FBL_OVRD_M0},
{0x061B, CMN_PLL0_VCOCAL_INIT_TMR},
@@ -3233,13 +3286,13 @@ static struct cdns_reg_pairs ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
{0x0138, CMN_PLL0_LOCK_PLLCNT_START}
};
-static struct cdns_torrent_vals ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = {
.reg_pairs = ml_usxgmii_pll0_156_25_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(ml_usxgmii_pll0_156_25_no_ssc_cmn_regs),
};
/* Multilink SGMII/QSGMII, using PLL1, 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
@@ -3248,13 +3301,13 @@ static struct cdns_reg_pairs ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_torrent_vals ml_sgmii_pll1_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals ml_sgmii_pll1_100_no_ssc_cmn_vals = {
.reg_pairs = ml_sgmii_pll1_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(ml_sgmii_pll1_100_no_ssc_cmn_regs),
};
/* TI J7200, Multilink USXGMII, using PLL0, 156.25 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
{0x0014, CMN_SSM_BIAS_TMR},
{0x0028, CMN_PLLSM0_PLLPRE_TMR},
{0x00A4, CMN_PLLSM0_PLLLOCK_TMR},
@@ -3280,13 +3333,13 @@ static struct cdns_reg_pairs j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
{0x0138, CMN_PLL0_LOCK_PLLCNT_START}
};
-static struct cdns_torrent_vals j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = {
.reg_pairs = j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs),
};
/* TI J7200, Multilink SGMII/QSGMII, using PLL1, 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
{0x0028, CMN_PLLSM1_PLLPRE_TMR},
{0x00A4, CMN_PLLSM1_PLLLOCK_TMR},
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
@@ -3297,42 +3350,42 @@ static struct cdns_reg_pairs j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_torrent_vals j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals = {
.reg_pairs = j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs),
};
/* PCIe and USXGMII link configuration */
-static struct cdns_reg_pairs pcie_usxgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_usxgmii_link_cmn_regs[] = {
{0x0003, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
{0x0400, CMN_PDIAG_PLL1_CLK_SEL_M0}
};
-static struct cdns_reg_pairs pcie_usxgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs pcie_usxgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0012, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs usxgmii_pcie_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs usxgmii_pcie_xcvr_diag_ln_regs[] = {
{0x0011, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0089, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals pcie_usxgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals pcie_usxgmii_link_cmn_vals = {
.reg_pairs = pcie_usxgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_usxgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals pcie_usxgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals pcie_usxgmii_xcvr_diag_ln_vals = {
.reg_pairs = pcie_usxgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(pcie_usxgmii_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals usxgmii_pcie_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals usxgmii_pcie_xcvr_diag_ln_vals = {
.reg_pairs = usxgmii_pcie_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(usxgmii_pcie_xcvr_diag_ln_regs),
};
@@ -3340,7 +3393,7 @@ static struct cdns_torrent_vals usxgmii_pcie_xcvr_diag_ln_vals = {
/*
* Multilink USXGMII, using PLL1, 156.25 MHz Ref clk, no SSC
*/
-static struct cdns_reg_pairs ml_usxgmii_pll1_156_25_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs ml_usxgmii_pll1_156_25_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x0014, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x0005, CMN_PLL1_DSM_FBL_OVRD_M0},
@@ -3355,7 +3408,7 @@ static struct cdns_reg_pairs ml_usxgmii_pll1_156_25_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3 },
@@ -3363,7 +3416,7 @@ static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_tx_ln_regs[] = {
{0x0000, XCVR_DIAG_PSC_OVRD}
};
-static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_rx_ln_regs[] = {
{0x091D, RX_PSC_A0},
{0x0900, RX_PSC_A2},
{0x0100, RX_PSC_A3},
@@ -3381,55 +3434,55 @@ static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_rx_ln_regs[] = {
{0x018C, RX_CDRLF_CNFG}
};
-static struct cdns_torrent_vals ml_usxgmii_pll1_156_25_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals ml_usxgmii_pll1_156_25_no_ssc_cmn_vals = {
.reg_pairs = ml_usxgmii_pll1_156_25_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(ml_usxgmii_pll1_156_25_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_tx_ln_vals = {
.reg_pairs = ml_usxgmii_156_25_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(ml_usxgmii_156_25_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_rx_ln_vals = {
.reg_pairs = ml_usxgmii_156_25_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(ml_usxgmii_156_25_no_ssc_rx_ln_regs),
};
/* TI USXGMII configuration: Enable cmn_refclk_rcv_out_en */
-static struct cdns_reg_pairs ti_usxgmii_phy_pma_cmn_regs[] = {
+static const struct cdns_reg_pairs ti_usxgmii_phy_pma_cmn_regs[] = {
{0x0040, PHY_PMA_CMN_CTRL1},
};
-static struct cdns_torrent_vals ti_usxgmii_phy_pma_cmn_vals = {
+static const struct cdns_torrent_vals ti_usxgmii_phy_pma_cmn_vals = {
.reg_pairs = ti_usxgmii_phy_pma_cmn_regs,
.num_regs = ARRAY_SIZE(ti_usxgmii_phy_pma_cmn_regs),
};
/* Single USXGMII link configuration */
-static struct cdns_reg_pairs sl_usxgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_usxgmii_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0}
};
-static struct cdns_reg_pairs sl_usxgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sl_usxgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0001, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals sl_usxgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals sl_usxgmii_link_cmn_vals = {
.reg_pairs = sl_usxgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usxgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals sl_usxgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sl_usxgmii_xcvr_diag_ln_vals = {
.reg_pairs = sl_usxgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sl_usxgmii_xcvr_diag_ln_regs),
};
/* Single link USXGMII, 156.25 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = {
{0x0014, CMN_SSM_BIAS_TMR},
{0x0028, CMN_PLLSM0_PLLPRE_TMR},
{0x00A4, CMN_PLLSM0_PLLLOCK_TMR},
@@ -3467,7 +3520,7 @@ static struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = {
{0x0138, CMN_PLL1_LOCK_PLLCNT_START}
};
-static struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = {
{0x07A2, TX_RCVDET_ST_TMR},
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
@@ -3476,7 +3529,7 @@ static struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = {
{0x0000, XCVR_DIAG_PSC_OVRD}
};
-static struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = {
{0x0014, RX_SDCAL0_INIT_TMR},
{0x0062, RX_SDCAL0_ITER_TMR},
{0x0014, RX_SDCAL1_INIT_TMR},
@@ -3498,68 +3551,68 @@ static struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = {
{0x018C, RX_CDRLF_CNFG}
};
-static struct cdns_torrent_vals sl_usxgmii_156_25_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_usxgmii_156_25_no_ssc_cmn_vals = {
.reg_pairs = sl_usxgmii_156_25_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usxgmii_156_25_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals usxgmii_156_25_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals usxgmii_156_25_no_ssc_tx_ln_vals = {
.reg_pairs = usxgmii_156_25_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals usxgmii_156_25_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals usxgmii_156_25_no_ssc_rx_ln_vals = {
.reg_pairs = usxgmii_156_25_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_rx_ln_regs),
};
/* PCIe and DP link configuration */
-static struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = {
{0x0003, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}
};
-static struct cdns_reg_pairs pcie_dp_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs pcie_dp_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0012, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs dp_pcie_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs dp_pcie_xcvr_diag_ln_regs[] = {
{0x0001, XCVR_DIAG_HSCLK_SEL},
{0x0009, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals pcie_dp_link_cmn_vals = {
+static const struct cdns_torrent_vals pcie_dp_link_cmn_vals = {
.reg_pairs = pcie_dp_link_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_dp_link_cmn_regs),
};
-static struct cdns_torrent_vals pcie_dp_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals pcie_dp_xcvr_diag_ln_vals = {
.reg_pairs = pcie_dp_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(pcie_dp_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals dp_pcie_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals dp_pcie_xcvr_diag_ln_vals = {
.reg_pairs = dp_pcie_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(dp_pcie_xcvr_diag_ln_regs),
};
/* DP Multilink, 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs dp_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs dp_100_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_reg_pairs dp_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs dp_100_no_ssc_tx_ln_regs[] = {
{0x00FB, TX_PSC_A0},
{0x04AA, TX_PSC_A2},
{0x04AA, TX_PSC_A3},
{0x000F, XCVR_DIAG_BIDI_CTRL}
};
-static struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = {
{0x0000, RX_PSC_A0},
{0x0000, RX_PSC_A2},
{0x0000, RX_PSC_A3},
@@ -3569,43 +3622,43 @@ static struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = {
{0x0000, RX_REE_PERGCSM_CTRL}
};
-static struct cdns_torrent_vals dp_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals dp_100_no_ssc_cmn_vals = {
.reg_pairs = dp_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(dp_100_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals dp_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals dp_100_no_ssc_tx_ln_vals = {
.reg_pairs = dp_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(dp_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals dp_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals dp_100_no_ssc_rx_ln_vals = {
.reg_pairs = dp_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(dp_100_no_ssc_rx_ln_regs),
};
/* Single DisplayPort(DP) link configuration */
-static struct cdns_reg_pairs sl_dp_link_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_dp_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
};
-static struct cdns_reg_pairs sl_dp_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals sl_dp_link_cmn_vals = {
+static const struct cdns_torrent_vals sl_dp_link_cmn_vals = {
.reg_pairs = sl_dp_link_cmn_regs,
.num_regs = ARRAY_SIZE(sl_dp_link_cmn_regs),
};
-static struct cdns_torrent_vals sl_dp_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_xcvr_diag_ln_vals = {
.reg_pairs = sl_dp_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_xcvr_diag_ln_regs),
};
/* Single DP, 19.2 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = {
{0x0014, CMN_SSM_BIAS_TMR},
{0x0027, CMN_PLLSM0_PLLPRE_TMR},
{0x00A1, CMN_PLLSM0_PLLLOCK_TMR},
@@ -3642,7 +3695,7 @@ static struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = {
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
-static struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = {
{0x0780, TX_RCVDET_ST_TMR},
{0x00FB, TX_PSC_A0},
{0x04AA, TX_PSC_A2},
@@ -3650,7 +3703,7 @@ static struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = {
{0x000F, XCVR_DIAG_BIDI_CTRL}
};
-static struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = {
{0x0000, RX_PSC_A0},
{0x0000, RX_PSC_A2},
{0x0000, RX_PSC_A3},
@@ -3660,23 +3713,23 @@ static struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = {
{0x0000, RX_REE_PERGCSM_CTRL}
};
-static struct cdns_torrent_vals sl_dp_19_2_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_dp_19_2_no_ssc_cmn_vals = {
.reg_pairs = sl_dp_19_2_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals sl_dp_19_2_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_19_2_no_ssc_tx_ln_vals = {
.reg_pairs = sl_dp_19_2_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals sl_dp_19_2_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_19_2_no_ssc_rx_ln_vals = {
.reg_pairs = sl_dp_19_2_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_rx_ln_regs),
};
/* Single DP, 25 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = {
{0x0019, CMN_SSM_BIAS_TMR},
{0x0032, CMN_PLLSM0_PLLPRE_TMR},
{0x00D1, CMN_PLLSM0_PLLLOCK_TMR},
@@ -3713,7 +3766,7 @@ static struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = {
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
-static struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = {
{0x09C4, TX_RCVDET_ST_TMR},
{0x00FB, TX_PSC_A0},
{0x04AA, TX_PSC_A2},
@@ -3721,7 +3774,7 @@ static struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = {
{0x000F, XCVR_DIAG_BIDI_CTRL}
};
-static struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = {
{0x0000, RX_PSC_A0},
{0x0000, RX_PSC_A2},
{0x0000, RX_PSC_A3},
@@ -3731,35 +3784,35 @@ static struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = {
{0x0000, RX_REE_PERGCSM_CTRL}
};
-static struct cdns_torrent_vals sl_dp_25_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_dp_25_no_ssc_cmn_vals = {
.reg_pairs = sl_dp_25_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals sl_dp_25_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_25_no_ssc_tx_ln_vals = {
.reg_pairs = sl_dp_25_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals sl_dp_25_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_25_no_ssc_rx_ln_vals = {
.reg_pairs = sl_dp_25_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_rx_ln_regs),
};
/* Single DP, 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_dp_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_dp_100_no_ssc_cmn_regs[] = {
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
-static struct cdns_reg_pairs sl_dp_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_100_no_ssc_tx_ln_regs[] = {
{0x00FB, TX_PSC_A0},
{0x04AA, TX_PSC_A2},
{0x04AA, TX_PSC_A3},
{0x000F, XCVR_DIAG_BIDI_CTRL}
};
-static struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = {
{0x0000, RX_PSC_A0},
{0x0000, RX_PSC_A2},
{0x0000, RX_PSC_A3},
@@ -3769,92 +3822,92 @@ static struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = {
{0x0000, RX_REE_PERGCSM_CTRL}
};
-static struct cdns_torrent_vals sl_dp_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_dp_100_no_ssc_cmn_vals = {
.reg_pairs = sl_dp_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals sl_dp_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_100_no_ssc_tx_ln_vals = {
.reg_pairs = sl_dp_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals sl_dp_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_100_no_ssc_rx_ln_vals = {
.reg_pairs = sl_dp_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_rx_ln_regs),
};
/* USB and SGMII/QSGMII link configuration */
-static struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = {
{0x0002, PHY_PLL_CFG},
{0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
};
-static struct cdns_reg_pairs usb_sgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs usb_sgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0041, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs sgmii_usb_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_usb_xcvr_diag_ln_regs[] = {
{0x0011, XCVR_DIAG_HSCLK_SEL},
{0x0003, XCVR_DIAG_HSCLK_DIV},
{0x009B, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals usb_sgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals usb_sgmii_link_cmn_vals = {
.reg_pairs = usb_sgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(usb_sgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals usb_sgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals usb_sgmii_xcvr_diag_ln_vals = {
.reg_pairs = usb_sgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(usb_sgmii_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals sgmii_usb_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sgmii_usb_xcvr_diag_ln_vals = {
.reg_pairs = sgmii_usb_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_usb_xcvr_diag_ln_regs),
};
/* PCIe and USB Unique SSC link configuration */
-static struct cdns_reg_pairs pcie_usb_link_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_usb_link_cmn_regs[] = {
{0x0003, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
{0x8600, CMN_PDIAG_PLL1_CLK_SEL_M0}
};
-static struct cdns_reg_pairs pcie_usb_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs pcie_usb_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0012, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs usb_pcie_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs usb_pcie_xcvr_diag_ln_regs[] = {
{0x0011, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x00C9, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals pcie_usb_link_cmn_vals = {
+static const struct cdns_torrent_vals pcie_usb_link_cmn_vals = {
.reg_pairs = pcie_usb_link_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_usb_link_cmn_regs),
};
-static struct cdns_torrent_vals pcie_usb_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals pcie_usb_xcvr_diag_ln_vals = {
.reg_pairs = pcie_usb_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(pcie_usb_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals usb_pcie_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals usb_pcie_xcvr_diag_ln_vals = {
.reg_pairs = usb_pcie_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(usb_pcie_xcvr_diag_ln_regs),
};
/* USB 100 MHz Ref clk, internal SSC */
-static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
@@ -3907,47 +3960,47 @@ static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = {
+static const struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = {
.reg_pairs = usb_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(usb_100_int_ssc_cmn_regs),
};
/* Single USB link configuration */
-static struct cdns_reg_pairs sl_usb_link_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_usb_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
{0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}
};
-static struct cdns_reg_pairs sl_usb_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sl_usb_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0041, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals sl_usb_link_cmn_vals = {
+static const struct cdns_torrent_vals sl_usb_link_cmn_vals = {
.reg_pairs = sl_usb_link_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usb_link_cmn_regs),
};
-static struct cdns_torrent_vals sl_usb_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sl_usb_xcvr_diag_ln_vals = {
.reg_pairs = sl_usb_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sl_usb_xcvr_diag_ln_regs),
};
/* USB PHY PCS common configuration */
-static struct cdns_reg_pairs usb_phy_pcs_cmn_regs[] = {
+static const struct cdns_reg_pairs usb_phy_pcs_cmn_regs[] = {
{0x0A0A, PHY_PIPE_USB3_GEN2_PRE_CFG0},
{0x1000, PHY_PIPE_USB3_GEN2_POST_CFG0},
{0x0010, PHY_PIPE_USB3_GEN2_POST_CFG1}
};
-static struct cdns_torrent_vals usb_phy_pcs_cmn_vals = {
+static const struct cdns_torrent_vals usb_phy_pcs_cmn_vals = {
.reg_pairs = usb_phy_pcs_cmn_regs,
.num_regs = ARRAY_SIZE(usb_phy_pcs_cmn_regs),
};
/* USB 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
@@ -3957,19 +4010,19 @@ static struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = {
{0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
};
-static struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = {
.reg_pairs = sl_usb_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usb_100_no_ssc_cmn_regs),
};
-static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = {
{0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
{0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD},
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = {
{0x02FF, TX_PSC_A0},
{0x06AF, TX_PSC_A1},
{0x06AE, TX_PSC_A2},
@@ -3979,7 +4032,7 @@ static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = {
{0x0003, XCVR_DIAG_PSC_OVRD}
};
-static struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = {
{0x0D1D, RX_PSC_A0},
{0x0D1D, RX_PSC_A1},
{0x0D00, RX_PSC_A2},
@@ -4002,23 +4055,23 @@ static struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = {
{0x0003, RX_CDRLF_CNFG3}
};
-static struct cdns_torrent_vals usb_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals usb_100_no_ssc_cmn_vals = {
.reg_pairs = usb_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(usb_100_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals usb_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals usb_100_no_ssc_tx_ln_vals = {
.reg_pairs = usb_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(usb_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals usb_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals usb_100_no_ssc_rx_ln_vals = {
.reg_pairs = usb_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(usb_100_no_ssc_rx_ln_regs),
};
/* Single link USB, 100 MHz Ref clk, internal SSC */
-static struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
@@ -4059,48 +4112,48 @@ static struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = {
{0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
};
-static struct cdns_torrent_vals sl_usb_100_int_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_usb_100_int_ssc_cmn_vals = {
.reg_pairs = sl_usb_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usb_100_int_ssc_cmn_regs),
};
/* PCIe and SGMII/QSGMII Unique SSC link configuration */
-static struct cdns_reg_pairs pcie_sgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_sgmii_link_cmn_regs[] = {
{0x0003, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
{0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
};
-static struct cdns_reg_pairs pcie_sgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs pcie_sgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0012, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs sgmii_pcie_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_pcie_xcvr_diag_ln_regs[] = {
{0x0011, XCVR_DIAG_HSCLK_SEL},
{0x0003, XCVR_DIAG_HSCLK_DIV},
{0x009B, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals pcie_sgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals pcie_sgmii_link_cmn_vals = {
.reg_pairs = pcie_sgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_sgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals pcie_sgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals pcie_sgmii_xcvr_diag_ln_vals = {
.reg_pairs = pcie_sgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(pcie_sgmii_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = {
.reg_pairs = sgmii_pcie_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_pcie_xcvr_diag_ln_regs),
};
/* SGMII 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
@@ -4108,17 +4161,17 @@ static struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = {
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
-static struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = {
.reg_pairs = sl_sgmii_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_sgmii_100_no_ssc_cmn_regs),
};
-static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
@@ -4127,7 +4180,7 @@ static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
{0x0002, XCVR_DIAG_PSC_OVRD}
};
-static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
@@ -4137,7 +4190,7 @@ static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = {
{0x4000, XCVR_DIAG_RXCLK_CTRL}
};
-static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = {
{0x091D, RX_PSC_A0},
{0x0900, RX_PSC_A2},
{0x0100, RX_PSC_A3},
@@ -4155,28 +4208,28 @@ static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = {
{0x018C, RX_CDRLF_CNFG},
};
-static struct cdns_torrent_vals sgmii_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sgmii_100_no_ssc_cmn_vals = {
.reg_pairs = sgmii_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = sgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = ti_sgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(ti_sgmii_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = sgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_rx_ln_regs),
};
/* TI J7200, multilink SGMII */
-static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs j7200_sgmii_100_no_ssc_tx_ln_regs[] = {
{0x07A2, TX_RCVDET_ST_TMR},
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
@@ -4187,12 +4240,12 @@ static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_tx_ln_regs[] = {
{0x4000, XCVR_DIAG_RXCLK_CTRL}
};
-static struct cdns_torrent_vals j7200_sgmii_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals j7200_sgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = j7200_sgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(j7200_sgmii_100_no_ssc_tx_ln_regs),
};
-static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs j7200_sgmii_100_no_ssc_rx_ln_regs[] = {
{0x0014, RX_SDCAL0_INIT_TMR},
{0x0062, RX_SDCAL0_ITER_TMR},
{0x0014, RX_SDCAL1_INIT_TMR},
@@ -4214,13 +4267,13 @@ static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_rx_ln_regs[] = {
{0x018C, RX_CDRLF_CNFG}
};
-static struct cdns_torrent_vals j7200_sgmii_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals j7200_sgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = j7200_sgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(j7200_sgmii_100_no_ssc_rx_ln_regs),
};
/* SGMII 100 MHz Ref clk, internal SSC */
-static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
@@ -4271,13 +4324,13 @@ static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = {
.reg_pairs = sgmii_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sgmii_100_int_ssc_cmn_regs),
};
/* QSGMII 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
@@ -4285,17 +4338,17 @@ static struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = {
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
-static struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = {
.reg_pairs = sl_qsgmii_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_qsgmii_100_no_ssc_cmn_regs),
};
-static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
@@ -4305,7 +4358,7 @@ static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x0002, XCVR_DIAG_PSC_OVRD}
};
-static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
@@ -4316,7 +4369,7 @@ static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x4000, XCVR_DIAG_RXCLK_CTRL}
};
-static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = {
{0x091D, RX_PSC_A0},
{0x0900, RX_PSC_A2},
{0x0100, RX_PSC_A3},
@@ -4334,28 +4387,28 @@ static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = {
{0x018C, RX_CDRLF_CNFG},
};
-static struct cdns_torrent_vals qsgmii_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals qsgmii_100_no_ssc_cmn_vals = {
.reg_pairs = qsgmii_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = qsgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = ti_qsgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(ti_qsgmii_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = qsgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_rx_ln_regs),
};
/* TI J7200, multilink QSGMII */
-static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x07A2, TX_RCVDET_ST_TMR},
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
@@ -4367,12 +4420,12 @@ static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x4000, XCVR_DIAG_RXCLK_CTRL}
};
-static struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = j7200_qsgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(j7200_qsgmii_100_no_ssc_tx_ln_regs),
};
-static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_rx_ln_regs[] = {
{0x0014, RX_SDCAL0_INIT_TMR},
{0x0062, RX_SDCAL0_ITER_TMR},
{0x0014, RX_SDCAL1_INIT_TMR},
@@ -4394,13 +4447,13 @@ static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_rx_ln_regs[] = {
{0x018C, RX_CDRLF_CNFG}
};
-static struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = j7200_qsgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(j7200_qsgmii_100_no_ssc_rx_ln_regs),
};
/* QSGMII 100 MHz Ref clk, internal SSC */
-static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
@@ -4451,35 +4504,35 @@ static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = {
+static const struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = {
.reg_pairs = qsgmii_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_int_ssc_cmn_regs),
};
/* Single SGMII/QSGMII link configuration */
-static struct cdns_reg_pairs sl_sgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_sgmii_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}
};
-static struct cdns_reg_pairs sl_sgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sl_sgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0003, XCVR_DIAG_HSCLK_DIV},
{0x0013, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals sl_sgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals sl_sgmii_link_cmn_vals = {
.reg_pairs = sl_sgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(sl_sgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = {
.reg_pairs = sl_sgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sl_sgmii_xcvr_diag_ln_regs),
};
/* Multi link PCIe, 100 MHz Ref clk, internal SSC */
-static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
@@ -4528,13 +4581,13 @@ static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = {
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
};
-static struct cdns_torrent_vals pcie_100_int_ssc_cmn_vals = {
+static const struct cdns_torrent_vals pcie_100_int_ssc_cmn_vals = {
.reg_pairs = pcie_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_int_ssc_cmn_regs),
};
/* Single link PCIe, 100 MHz Ref clk, internal SSC */
-static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
@@ -4583,35 +4636,35 @@ static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = {
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
};
-static struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = {
.reg_pairs = sl_pcie_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_pcie_100_int_ssc_cmn_regs),
};
/* PCIe, 100 MHz Ref clk, no SSC & external SSC */
-static struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}
};
-static struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = {
{0x0019, RX_REE_TAP1_CLIP},
{0x0019, RX_REE_TAP2TON_CLIP},
{0x0001, RX_DIAG_ACYA}
};
-static struct cdns_torrent_vals pcie_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals pcie_100_no_ssc_cmn_vals = {
.reg_pairs = pcie_100_ext_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = {
.reg_pairs = pcie_100_ext_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_rx_ln_regs),
};
-static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
+static const struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &pcie_dp_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &usb_dp_link_cmn_vals},
@@ -4647,7 +4700,7 @@ static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &usxgmii_sgmii_link_cmn_vals},
};
-static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
+static const struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &dp_pcie_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &dp_usb_xcvr_diag_ln_vals},
@@ -4683,7 +4736,7 @@ static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &usxgmii_sgmii_xcvr_diag_ln_vals},
};
-static struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = {
+static const struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &usb_phy_pcs_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_phy_pcs_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_phy_pcs_cmn_vals},
@@ -4691,7 +4744,7 @@ static struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_phy_pcs_cmn_vals},
};
-static struct cdns_torrent_vals_entry cmn_vals_entries[] = {
+static const struct cdns_torrent_vals_entry cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals},
@@ -4773,7 +4826,7 @@ static struct cdns_torrent_vals_entry cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_pll0_156_25_no_ssc_cmn_vals},
};
-static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
+static const struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
@@ -4855,7 +4908,7 @@ static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
};
-static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
+static const struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals},
@@ -4966,14 +5019,14 @@ static const struct cdns_torrent_data cdns_map_torrent = {
},
};
-static struct cdns_torrent_vals_entry j721e_phy_pma_cmn_vals_entries[] = {
+static const struct cdns_torrent_vals_entry j721e_phy_pma_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &ti_usxgmii_phy_pma_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_PCIE), &ti_usxgmii_phy_pma_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_SGMII), &ti_usxgmii_phy_pma_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &ti_usxgmii_phy_pma_cmn_vals},
};
-static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
+static const struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
@@ -5089,7 +5142,7 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
};
/* TI J7200 (Torrent SD0805) */
-static struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = {
+static const struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals},
@@ -5171,7 +5224,7 @@ static struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals},
};
-static struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = {
+static const struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
@@ -5253,7 +5306,7 @@ static struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
};
-static struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = {
+static const struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals},
diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
index c138cd4807d6..c843923252aa 100644
--- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
+++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
@@ -138,7 +138,6 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct hisi_inno_phy_priv *priv;
struct phy_provider *provider;
- struct device_node *child;
int i = 0;
int ret;
@@ -162,24 +161,20 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
priv->type = (uintptr_t) of_device_get_match_data(dev);
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
struct reset_control *rst;
struct phy *phy;
rst = of_reset_control_get_exclusive(child, NULL);
- if (IS_ERR(rst)) {
- of_node_put(child);
+ if (IS_ERR(rst))
return PTR_ERR(rst);
- }
priv->ports[i].utmi_rst = rst;
priv->ports[i].priv = priv;
phy = devm_phy_create(dev, child, &hisi_inno_phy_ops);
- if (IS_ERR(phy)) {
- of_node_put(child);
+ if (IS_ERR(phy))
return PTR_ERR(phy);
- }
phy_set_bus_width(phy, 8);
phy_set_drvdata(phy, &priv->ports[i]);
@@ -187,7 +182,6 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
if (i >= INNO_PHY_PORT_NUM) {
dev_warn(dev, "Support %d ports in maximum\n", i);
- of_node_put(child);
break;
}
}
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index da5e8f405749..fefc02d921e6 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -244,8 +244,8 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
GEN_CONF(4, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
GEN_CONF(4, 1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
- ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_2500BASEX),
- ETH_CONF(4, 1, PHY_INTERFACE_MODE_5GBASER, -1, COMPHY_FW_MODE_XFI),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_2500BASEX),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_5GBASER, 0x1, COMPHY_FW_MODE_XFI),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GBASER, -1, COMPHY_FW_MODE_XFI),
/* lane 5 */
ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 25b86bbb9cec..3f7095ec5978 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -1577,12 +1577,11 @@ static int mtk_tphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct device_node *child_np;
struct phy_provider *provider;
struct resource *sif_res;
struct mtk_tphy *tphy;
struct resource res;
- int port, retval;
+ int port;
tphy = devm_kzalloc(dev, sizeof(*tphy), GFP_KERNEL);
if (!tphy)
@@ -1623,25 +1622,23 @@ static int mtk_tphy_probe(struct platform_device *pdev)
}
port = 0;
- for_each_child_of_node(np, child_np) {
+ for_each_child_of_node_scoped(np, child_np) {
struct mtk_phy_instance *instance;
struct clk_bulk_data *clks;
struct device *subdev;
struct phy *phy;
+ int retval;
instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL);
- if (!instance) {
- retval = -ENOMEM;
- goto put_child;
- }
+ if (!instance)
+ return -ENOMEM;
tphy->phys[port] = instance;
phy = devm_phy_create(dev, child_np, &mtk_tphy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy\n");
- retval = PTR_ERR(phy);
- goto put_child;
+ return PTR_ERR(phy);
}
subdev = &phy->dev;
@@ -1649,14 +1646,12 @@ static int mtk_tphy_probe(struct platform_device *pdev)
if (retval) {
dev_err(subdev, "failed to get address resource(id-%d)\n",
port);
- goto put_child;
+ return retval;
}
instance->port_base = devm_ioremap_resource(subdev, &res);
- if (IS_ERR(instance->port_base)) {
- retval = PTR_ERR(instance->port_base);
- goto put_child;
- }
+ if (IS_ERR(instance->port_base))
+ return PTR_ERR(instance->port_base);
instance->phy = phy;
instance->index = port;
@@ -1668,19 +1663,16 @@ static int mtk_tphy_probe(struct platform_device *pdev)
clks[1].id = "da_ref"; /* analog clock */
retval = devm_clk_bulk_get_optional(subdev, TPHY_CLKS_CNT, clks);
if (retval)
- goto put_child;
+ return retval;
retval = phy_type_syscon_get(instance, child_np);
if (retval)
- goto put_child;
+ return retval;
}
provider = devm_of_phy_provider_register(dev, mtk_phy_xlate);
return PTR_ERR_OR_ZERO(provider);
-put_child:
- of_node_put(child_np);
- return retval;
}
static struct platform_driver mtk_tphy_driver = {
diff --git a/drivers/phy/mediatek/phy-mtk-xsphy.c b/drivers/phy/mediatek/phy-mtk-xsphy.c
index 064fd0941727..7c248f5cfca5 100644
--- a/drivers/phy/mediatek/phy-mtk-xsphy.c
+++ b/drivers/phy/mediatek/phy-mtk-xsphy.c
@@ -432,12 +432,11 @@ static int mtk_xsphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct device_node *child_np;
struct phy_provider *provider;
struct resource *glb_res;
struct mtk_xsphy *xsphy;
struct resource res;
- int port, retval;
+ int port;
xsphy = devm_kzalloc(dev, sizeof(*xsphy), GFP_KERNEL);
if (!xsphy)
@@ -471,37 +470,34 @@ static int mtk_xsphy_probe(struct platform_device *pdev)
device_property_read_u32(dev, "mediatek,src-coef", &xsphy->src_coef);
port = 0;
- for_each_child_of_node(np, child_np) {
+ for_each_child_of_node_scoped(np, child_np) {
struct xsphy_instance *inst;
struct phy *phy;
+ int retval;
inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
- if (!inst) {
- retval = -ENOMEM;
- goto put_child;
- }
+ if (!inst)
+ return -ENOMEM;
xsphy->phys[port] = inst;
phy = devm_phy_create(dev, child_np, &mtk_xsphy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy\n");
- retval = PTR_ERR(phy);
- goto put_child;
+ return PTR_ERR(phy);
}
retval = of_address_to_resource(child_np, 0, &res);
if (retval) {
dev_err(dev, "failed to get address resource(id-%d)\n",
port);
- goto put_child;
+ return retval;
}
inst->port_base = devm_ioremap_resource(&phy->dev, &res);
if (IS_ERR(inst->port_base)) {
dev_err(dev, "failed to remap phy regs\n");
- retval = PTR_ERR(inst->port_base);
- goto put_child;
+ return PTR_ERR(inst->port_base);
}
inst->phy = phy;
@@ -512,17 +508,12 @@ static int mtk_xsphy_probe(struct platform_device *pdev)
inst->ref_clk = devm_clk_get(&phy->dev, "ref");
if (IS_ERR(inst->ref_clk)) {
dev_err(dev, "failed to get ref_clk(id-%d)\n", port);
- retval = PTR_ERR(inst->ref_clk);
- goto put_child;
+ return PTR_ERR(inst->ref_clk);
}
}
provider = devm_of_phy_provider_register(dev, mtk_phy_xlate);
return PTR_ERR_OR_ZERO(provider);
-
-put_child:
- of_node_put(child_np);
- return retval;
}
static struct platform_driver mtk_xsphy_driver = {
diff --git a/drivers/phy/nuvoton/Kconfig b/drivers/phy/nuvoton/Kconfig
new file mode 100644
index 000000000000..d02cae2db315
--- /dev/null
+++ b/drivers/phy/nuvoton/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# PHY drivers for Nuvoton MA35 platforms
+#
+config PHY_MA35_USB
+ tristate "Nuvoton MA35 USB2.0 PHY driver"
+ depends on ARCH_MA35 || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
+ Enable this to support the USB2.0 PHY on the Nuvoton MA35
+ series SoCs.
diff --git a/drivers/phy/nuvoton/Makefile b/drivers/phy/nuvoton/Makefile
new file mode 100644
index 000000000000..2937e3921898
--- /dev/null
+++ b/drivers/phy/nuvoton/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_PHY_MA35_USB) += phy-ma35d1-usb2.o
diff --git a/drivers/phy/nuvoton/phy-ma35d1-usb2.c b/drivers/phy/nuvoton/phy-ma35d1-usb2.c
new file mode 100644
index 000000000000..9a459b700ed4
--- /dev/null
+++ b/drivers/phy/nuvoton/phy-ma35d1-usb2.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Nuvoton Technology Corp.
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* USB PHY Miscellaneous Control Register */
+#define MA35_SYS_REG_USBPMISCR 0x60
+#define PHY0POR BIT(0) /* PHY Power-On Reset Control Bit */
+#define PHY0SUSPEND BIT(1) /* PHY Suspend; 0: suspend, 1: operaion */
+#define PHY0COMN BIT(2) /* PHY Common Block Power-Down Control */
+#define PHY0DEVCKSTB BIT(10) /* PHY 60 MHz UTMI clock stable bit */
+
+struct ma35_usb_phy {
+ struct clk *clk;
+ struct device *dev;
+ struct regmap *sysreg;
+};
+
+static int ma35_usb_phy_power_on(struct phy *phy)
+{
+ struct ma35_usb_phy *p_phy = phy_get_drvdata(phy);
+ unsigned int val;
+ int ret;
+
+ ret = clk_prepare_enable(p_phy->clk);
+ if (ret < 0) {
+ dev_err(p_phy->dev, "Failed to enable PHY clock: %d\n", ret);
+ return ret;
+ }
+
+ regmap_read(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, &val);
+ if (val & PHY0SUSPEND) {
+ /*
+ * USB PHY0 is in operation mode already
+ * make sure USB PHY 60 MHz UTMI Interface Clock ready
+ */
+ ret = regmap_read_poll_timeout(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, val,
+ val & PHY0DEVCKSTB, 10, 1000);
+ if (ret == 0)
+ return 0;
+ }
+
+ /*
+ * reset USB PHY0.
+ * wait until USB PHY0 60 MHz UTMI Interface Clock ready
+ */
+ regmap_update_bits(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, 0x7, (PHY0POR | PHY0SUSPEND));
+ udelay(20);
+
+ /* make USB PHY0 enter operation mode */
+ regmap_update_bits(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, 0x7, PHY0SUSPEND);
+
+ /* make sure USB PHY 60 MHz UTMI Interface Clock ready */
+ ret = regmap_read_poll_timeout(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, val,
+ val & PHY0DEVCKSTB, 10, 1000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(p_phy->dev, "Check PHY clock, Timeout: %d\n", ret);
+ clk_disable_unprepare(p_phy->clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ma35_usb_phy_power_off(struct phy *phy)
+{
+ struct ma35_usb_phy *p_phy = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(p_phy->clk);
+ return 0;
+}
+
+static const struct phy_ops ma35_usb_phy_ops = {
+ .power_on = ma35_usb_phy_power_on,
+ .power_off = ma35_usb_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int ma35_usb_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *provider;
+ struct ma35_usb_phy *p_phy;
+ struct phy *phy;
+
+ p_phy = devm_kzalloc(&pdev->dev, sizeof(*p_phy), GFP_KERNEL);
+ if (!p_phy)
+ return -ENOMEM;
+
+ p_phy->dev = &pdev->dev;
+ platform_set_drvdata(pdev, p_phy);
+
+ p_phy->sysreg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "nuvoton,sys");
+ if (IS_ERR(p_phy->sysreg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(p_phy->sysreg),
+ "Failed to get SYS registers\n");
+
+ p_phy->clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(p_phy->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(p_phy->clk),
+ "failed to find usb_phy clock\n");
+
+ phy = devm_phy_create(&pdev->dev, NULL, &ma35_usb_phy_ops);
+ if (IS_ERR(phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(phy), "Failed to create PHY\n");
+
+ phy_set_drvdata(phy, p_phy);
+
+ provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
+ if (IS_ERR(provider))
+ return dev_err_probe(&pdev->dev, PTR_ERR(provider),
+ "Failed to register PHY provider\n");
+ return 0;
+}
+
+static const struct of_device_id ma35_usb_phy_of_match[] = {
+ { .compatible = "nuvoton,ma35d1-usb2-phy", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ma35_usb_phy_of_match);
+
+static struct platform_driver ma35_usb_phy_driver = {
+ .probe = ma35_usb_phy_probe,
+ .driver = {
+ .name = "ma35d1-usb2-phy",
+ .of_match_table = ma35_usb_phy_of_match,
+ },
+};
+module_platform_driver(ma35_usb_phy_driver);
+
+MODULE_DESCRIPTION("Nuvoton ma35d1 USB2.0 PHY driver");
+MODULE_AUTHOR("Hui-Ping Chen <hpchen0nvt@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/phy-airoha-pcie.c b/drivers/phy/phy-airoha-pcie.c
index bd3edaa986c8..1e410eb41058 100644
--- a/drivers/phy/phy-airoha-pcie.c
+++ b/drivers/phy/phy-airoha-pcie.c
@@ -18,6 +18,9 @@
#define LEQ_LEN_CTRL_MAX_VAL 7
#define FREQ_LOCK_MAX_ATTEMPT 10
+/* PCIe-PHY initialization time in ms needed by the hw to complete */
+#define PHY_HW_INIT_TIME_MS 30
+
enum airoha_pcie_port_gen {
PCIE_PORT_GEN1 = 1,
PCIE_PORT_GEN2,
@@ -1181,7 +1184,8 @@ static int airoha_pcie_phy_init(struct phy *phy)
airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
PCIE_DA_XPON_CDR_PR_PWDB);
- usleep_range(100, 200);
+ /* Wait for the PCIe PHY to complete initialization before returning */
+ msleep(PHY_HW_INIT_TIME_MS);
return 0;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 7b00945f7191..a8adc3214bfe 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -2190,24 +2190,25 @@ static int qmp_combo_dp_serdes_init(struct qmp_combo *qmp)
void __iomem *serdes = qmp->dp_serdes;
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
- qmp_configure(serdes, cfg->dp_serdes_tbl, cfg->dp_serdes_tbl_num);
+ qmp_configure(qmp->dev, serdes, cfg->dp_serdes_tbl,
+ cfg->dp_serdes_tbl_num);
switch (dp_opts->link_rate) {
case 1620:
- qmp_configure(serdes, cfg->serdes_tbl_rbr,
- cfg->serdes_tbl_rbr_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_rbr,
+ cfg->serdes_tbl_rbr_num);
break;
case 2700:
- qmp_configure(serdes, cfg->serdes_tbl_hbr,
- cfg->serdes_tbl_hbr_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_hbr,
+ cfg->serdes_tbl_hbr_num);
break;
case 5400:
- qmp_configure(serdes, cfg->serdes_tbl_hbr2,
- cfg->serdes_tbl_hbr2_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_hbr2,
+ cfg->serdes_tbl_hbr2_num);
break;
case 8100:
- qmp_configure(serdes, cfg->serdes_tbl_hbr3,
- cfg->serdes_tbl_hbr3_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_hbr3,
+ cfg->serdes_tbl_hbr3_num);
break;
default:
/* Other link rates aren't supported */
@@ -2807,8 +2808,8 @@ static int qmp_combo_dp_power_on(struct phy *phy)
qmp_combo_dp_serdes_init(qmp);
- qmp_configure_lane(tx, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 1);
- qmp_configure_lane(tx2, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 2);
+ qmp_configure_lane(qmp->dev, tx, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, tx2, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 2);
/* Configure special DP tx tunings */
cfg->configure_dp_tx(qmp);
@@ -2850,7 +2851,7 @@ static int qmp_combo_usb_power_on(struct phy *phy)
unsigned int val;
int ret;
- qmp_configure(serdes, cfg->serdes_tbl, cfg->serdes_tbl_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_tbl, cfg->serdes_tbl_num);
ret = clk_prepare_enable(qmp->pipe_clk);
if (ret) {
@@ -2859,16 +2860,17 @@ static int qmp_combo_usb_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- qmp_configure_lane(tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ qmp_configure_lane(qmp->dev, tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
- qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- qmp_configure_lane(rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ qmp_configure_lane(qmp->dev, rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
- qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_configure(qmp->dev, pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
if (pcs_usb)
- qmp_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
+ qmp_configure(qmp->dev, pcs_usb, cfg->pcs_usb_tbl,
+ cfg->pcs_usb_tbl_num);
if (cfg->has_pwrdn_delay)
usleep_range(10, 20);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-common.h b/drivers/phy/qualcomm/phy-qcom-qmp-common.h
index 799384210509..b945fc14cece 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-common.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-common.h
@@ -9,6 +9,7 @@
struct qmp_phy_init_tbl {
unsigned int offset;
unsigned int val;
+ char *name;
/*
* mask of lanes for which this register is written
* for cases when second lane needs different values
@@ -20,6 +21,7 @@ struct qmp_phy_init_tbl {
{ \
.offset = o, \
.val = v, \
+ .name = #o, \
.lane_mask = 0xff, \
}
@@ -27,13 +29,13 @@ struct qmp_phy_init_tbl {
{ \
.offset = o, \
.val = v, \
+ .name = #o, \
.lane_mask = l, \
}
-static inline void qmp_configure_lane(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num,
- u8 lane_mask)
+static inline void qmp_configure_lane(struct device *dev, void __iomem *base,
+ const struct qmp_phy_init_tbl tbl[],
+ int num, u8 lane_mask)
{
int i;
const struct qmp_phy_init_tbl *t = tbl;
@@ -45,15 +47,16 @@ static inline void qmp_configure_lane(void __iomem *base,
if (!(t->lane_mask & lane_mask))
continue;
+ dev_dbg(dev, "Writing Reg: %s Offset: 0x%04x Val: 0x%02x\n",
+ t->name, t->offset, t->val);
writel(t->val, base + t->offset);
}
}
-static inline void qmp_configure(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num)
+static inline void qmp_configure(struct device *dev, void __iomem *base,
+ const struct qmp_phy_init_tbl tbl[], int num)
{
- qmp_configure_lane(base, tbl, num, 0xff);
+ qmp_configure_lane(dev, base, tbl, num, 0xff);
}
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
index 0442b3120563..a7c65cfe31df 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
@@ -288,7 +288,7 @@ static int qmp_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
unsigned int val;
int ret;
- qmp_configure(serdes, serdes_tbl, serdes_tbl_num);
+ qmp_configure(qmp->dev, serdes, serdes_tbl, serdes_tbl_num);
qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET);
qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
@@ -431,9 +431,9 @@ static int qmp_pcie_msm8996_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_configure_lane(qmp->dev, tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_configure(qmp->dev, pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
/*
* Pull out PHY from POWER DOWN state.
@@ -725,7 +725,6 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev)
{
struct qcom_qmp *qmp;
struct device *dev = &pdev->dev;
- struct device_node *child;
struct phy_provider *phy_provider;
void __iomem *serdes;
const struct qmp_phy_cfg *cfg = NULL;
@@ -773,13 +772,13 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev)
return -ENOMEM;
id = 0;
- for_each_available_child_of_node(dev->of_node, child) {
+ for_each_available_child_of_node_scoped(dev->of_node, child) {
/* Create per-lane phy */
ret = qmp_pcie_msm8996_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
- goto err_node_put;
+ return ret;
}
/*
@@ -790,7 +789,7 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev)
if (ret) {
dev_err(qmp->dev,
"failed to register pipe clock source\n");
- goto err_node_put;
+ return ret;
}
id++;
@@ -799,10 +798,6 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
-
-err_node_put:
- of_node_put(child);
- return ret;
}
static struct platform_driver qmp_pcie_msm8996_driver = {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 06cd9787e700..f71787fb4d7e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -1242,6 +1242,10 @@ static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_VCO_DC_LEVEL_CTRL, 0x0f),
};
+static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x4_pcie_serdes_4ln_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x1c),
+};
+
static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RXCLK_DIV2_CTRL, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_DFE_DAC_ENABLE1, 0x88),
@@ -3654,6 +3658,41 @@ static const struct qmp_phy_cfg x1e80100_qmp_gen4x2_pciephy_cfg = {
.ln_shrd = x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl,
.ln_shrd_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl),
},
+
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = sm8550_qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l),
+ .regs = pciephy_v6_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+ .has_nocsr_reset = true,
+};
+
+static const struct qmp_phy_cfg x1e80100_qmp_gen4x4_pciephy_cfg = {
+ .lanes = 4,
+
+ .offsets = &qmp_pcie_offsets_v6_20,
+
+ .tbls = {
+ .serdes = x1e80100_qmp_gen4x2_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_serdes_tbl),
+ .tx = x1e80100_qmp_gen4x2_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_tx_tbl),
+ .rx = x1e80100_qmp_gen4x2_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_rx_tbl),
+ .pcs = x1e80100_qmp_gen4x2_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_pcs_tbl),
+ .pcs_misc = x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl),
+ .ln_shrd = x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl,
+ .ln_shrd_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl),
+ },
+
+ .serdes_4ln_tbl = x1e80100_qmp_gen4x4_pcie_serdes_4ln_tbl,
+ .serdes_4ln_num = ARRAY_SIZE(x1e80100_qmp_gen4x4_pcie_serdes_4ln_tbl),
+
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = sm8550_qmp_phy_vreg_l,
@@ -3669,18 +3708,30 @@ static void qmp_pcie_init_port_b(struct qmp_pcie *qmp, const struct qmp_phy_cfg_
{
const struct qmp_phy_cfg *cfg = qmp->cfg;
const struct qmp_pcie_offsets *offs = cfg->offsets;
- void __iomem *tx3, *rx3, *tx4, *rx4;
+ void __iomem *serdes, *tx3, *rx3, *tx4, *rx4, *pcs, *pcs_misc, *ln_shrd;
+ serdes = qmp->port_b + offs->serdes;
tx3 = qmp->port_b + offs->tx;
rx3 = qmp->port_b + offs->rx;
tx4 = qmp->port_b + offs->tx2;
rx4 = qmp->port_b + offs->rx2;
+ pcs = qmp->port_b + offs->pcs;
+ pcs_misc = qmp->port_b + offs->pcs_misc;
+ ln_shrd = qmp->port_b + offs->ln_shrd;
- qmp_configure_lane(tx3, tbls->tx, tbls->tx_num, 1);
- qmp_configure_lane(rx3, tbls->rx, tbls->rx_num, 1);
+ qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_4ln_tbl, cfg->serdes_4ln_num);
- qmp_configure_lane(tx4, tbls->tx, tbls->tx_num, 2);
- qmp_configure_lane(rx4, tbls->rx, tbls->rx_num, 2);
+ qmp_configure_lane(qmp->dev, tx3, tbls->tx, tbls->tx_num, 1);
+ qmp_configure_lane(qmp->dev, rx3, tbls->rx, tbls->rx_num, 1);
+
+ qmp_configure_lane(qmp->dev, tx4, tbls->tx, tbls->tx_num, 2);
+ qmp_configure_lane(qmp->dev, rx4, tbls->rx, tbls->rx_num, 2);
+
+ qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num);
+ qmp_configure(qmp->dev, pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num);
+
+ qmp_configure(qmp->dev, ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num);
}
static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_cfg_tbls *tbls)
@@ -3698,25 +3749,26 @@ static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_c
if (!tbls)
return;
- qmp_configure(serdes, tbls->serdes, tbls->serdes_num);
+ qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num);
- qmp_configure_lane(tx, tbls->tx, tbls->tx_num, 1);
- qmp_configure_lane(rx, tbls->rx, tbls->rx_num, 1);
+ qmp_configure_lane(qmp->dev, tx, tbls->tx, tbls->tx_num, 1);
+ qmp_configure_lane(qmp->dev, rx, tbls->rx, tbls->rx_num, 1);
if (cfg->lanes >= 2) {
- qmp_configure_lane(tx2, tbls->tx, tbls->tx_num, 2);
- qmp_configure_lane(rx2, tbls->rx, tbls->rx_num, 2);
+ qmp_configure_lane(qmp->dev, tx2, tbls->tx, tbls->tx_num, 2);
+ qmp_configure_lane(qmp->dev, rx2, tbls->rx, tbls->rx_num, 2);
}
- qmp_configure(pcs, tbls->pcs, tbls->pcs_num);
- qmp_configure(pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num);
+ qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num);
+ qmp_configure(qmp->dev, pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num);
if (cfg->lanes >= 4 && qmp->tcsr_4ln_config) {
- qmp_configure(serdes, cfg->serdes_4ln_tbl, cfg->serdes_4ln_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_4ln_tbl,
+ cfg->serdes_4ln_num);
qmp_pcie_init_port_b(qmp, tbls);
}
- qmp_configure(ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num);
+ qmp_configure(qmp->dev, ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num);
}
static int qmp_pcie_init(struct phy *phy)
@@ -4423,6 +4475,9 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
}, {
.compatible = "qcom,x1e80100-qmp-gen4x2-pcie-phy",
.data = &x1e80100_qmp_gen4x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,x1e80100-qmp-gen4x4-pcie-phy",
+ .data = &x1e80100_qmp_gen4x4_pciephy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index a57e8a4657f4..d964bdfe8700 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -1527,7 +1527,7 @@ static void qmp_ufs_serdes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tb
{
void __iomem *serdes = qmp->serdes;
- qmp_configure(serdes, tbls->serdes, tbls->serdes_num);
+ qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num);
}
static void qmp_ufs_lanes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls *tbls)
@@ -1536,12 +1536,12 @@ static void qmp_ufs_lanes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbl
void __iomem *tx = qmp->tx;
void __iomem *rx = qmp->rx;
- qmp_configure_lane(tx, tbls->tx, tbls->tx_num, 1);
- qmp_configure_lane(rx, tbls->rx, tbls->rx_num, 1);
+ qmp_configure_lane(qmp->dev, tx, tbls->tx, tbls->tx_num, 1);
+ qmp_configure_lane(qmp->dev, rx, tbls->rx, tbls->rx_num, 1);
if (cfg->lanes >= 2) {
- qmp_configure_lane(qmp->tx2, tbls->tx, tbls->tx_num, 2);
- qmp_configure_lane(qmp->rx2, tbls->rx, tbls->rx_num, 2);
+ qmp_configure_lane(qmp->dev, qmp->tx2, tbls->tx, tbls->tx_num, 2);
+ qmp_configure_lane(qmp->dev, qmp->rx2, tbls->rx, tbls->rx_num, 2);
}
}
@@ -1549,7 +1549,7 @@ static void qmp_ufs_pcs_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls
{
void __iomem *pcs = qmp->pcs;
- qmp_configure(pcs, tbls->pcs, tbls->pcs_num);
+ qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num);
}
static int qmp_ufs_get_gear_overlay(struct qmp_ufs *qmp, const struct qmp_phy_cfg *cfg)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 9b0eb87b1680..2fd49355aa37 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -1649,7 +1649,7 @@ static int qmp_usb_serdes_init(struct qmp_usb *qmp)
const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
int serdes_tbl_num = cfg->serdes_tbl_num;
- qmp_configure(serdes, serdes_tbl, serdes_tbl_num);
+ qmp_configure(qmp->dev, serdes, serdes_tbl, serdes_tbl_num);
return 0;
}
@@ -1730,13 +1730,13 @@ static int qmp_usb_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_configure(qmp->dev, pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
if (pcs_usb)
- qmp_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
+ qmp_configure(qmp->dev, pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
if (cfg->has_pwrdn_delay)
usleep_range(10, 20);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
index 5cbc5fd529eb..d4fa1063ea61 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
@@ -526,7 +526,8 @@ static int qmp_usbc_power_on(struct phy *phy)
unsigned int val;
int ret;
- qmp_configure(qmp->serdes, cfg->serdes_tbl, cfg->serdes_tbl_num);
+ qmp_configure(qmp->dev, qmp->serdes, cfg->serdes_tbl,
+ cfg->serdes_tbl_num);
ret = clk_prepare_enable(qmp->pipe_clk);
if (ret) {
@@ -535,13 +536,13 @@ static int qmp_usbc_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qmp_configure_lane(qmp->tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- qmp_configure_lane(qmp->rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, qmp->tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, qmp->rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- qmp_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
- qmp_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ qmp_configure_lane(qmp->dev, qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ qmp_configure_lane(qmp->dev, qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
- qmp_configure(qmp->pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_configure(qmp->dev, qmp->pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
/* Pull PHY out of reset state */
qphy_clrbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index 7594f64eb737..58e123305152 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -19,12 +19,14 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/string.h>
#include <linux/usb/of.h>
#include <linux/workqueue.h>
/******* USB2.0 Host registers (original offset is +0x200) *******/
#define USB2_INT_ENABLE 0x000
+#define USB2_AHB_BUS_CTR 0x008
#define USB2_USBCTR 0x00c
#define USB2_SPD_RSM_TIMSET 0x10c
#define USB2_OC_TIMSET 0x110
@@ -40,6 +42,10 @@
#define USB2_INT_ENABLE_USBH_INTB_EN BIT(2) /* For EHCI */
#define USB2_INT_ENABLE_USBH_INTA_EN BIT(1) /* For OHCI */
+/* AHB_BUS_CTR */
+#define USB2_AHB_BUS_CTR_MBL_MASK GENMASK(1, 0)
+#define USB2_AHB_BUS_CTR_MBL_INCR4 2
+
/* USBCTR */
#define USB2_USBCTR_DIRPD BIT(2)
#define USB2_USBCTR_PLL_RST BIT(1)
@@ -111,6 +117,7 @@ struct rcar_gen3_chan {
struct extcon_dev *extcon;
struct rcar_gen3_phy rphys[NUM_OF_PHYS];
struct regulator *vbus;
+ struct reset_control *rstc;
struct work_struct work;
struct mutex lock; /* protects rphys[...].powered */
enum usb_dr_mode dr_mode;
@@ -125,6 +132,7 @@ struct rcar_gen3_chan {
struct rcar_gen3_phy_drv_data {
const struct phy_ops *phy_usb2_ops;
bool no_adp_ctrl;
+ bool init_bus;
};
/*
@@ -575,6 +583,12 @@ static const struct rcar_gen3_phy_drv_data rz_g2l_phy_usb2_data = {
.no_adp_ctrl = true,
};
+static const struct rcar_gen3_phy_drv_data rz_g3s_phy_usb2_data = {
+ .phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
+ .no_adp_ctrl = true,
+ .init_bus = true,
+};
+
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
{
.compatible = "renesas,usb2-phy-r8a77470",
@@ -597,6 +611,10 @@ static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
.data = &rz_g2l_phy_usb2_data,
},
{
+ .compatible = "renesas,usb2-phy-r9a08g045",
+ .data = &rz_g3s_phy_usb2_data,
+ },
+ {
.compatible = "renesas,rcar-gen3-usb2-phy",
.data = &rcar_gen3_phy_usb2_data,
},
@@ -650,6 +668,35 @@ static enum usb_dr_mode rcar_gen3_get_dr_mode(struct device_node *np)
return candidate;
}
+static int rcar_gen3_phy_usb2_init_bus(struct rcar_gen3_chan *channel)
+{
+ struct device *dev = channel->dev;
+ int ret;
+ u32 val;
+
+ channel->rstc = devm_reset_control_array_get_shared(dev);
+ if (IS_ERR(channel->rstc))
+ return PTR_ERR(channel->rstc);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(channel->rstc);
+ if (ret)
+ goto rpm_put;
+
+ val = readl(channel->base + USB2_AHB_BUS_CTR);
+ val &= ~USB2_AHB_BUS_CTR_MBL_MASK;
+ val |= USB2_AHB_BUS_CTR_MBL_INCR4;
+ writel(val, channel->base + USB2_AHB_BUS_CTR);
+
+rpm_put:
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
{
const struct rcar_gen3_phy_drv_data *phy_data;
@@ -703,6 +750,15 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
goto error;
}
+ platform_set_drvdata(pdev, channel);
+ channel->dev = dev;
+
+ if (phy_data->init_bus) {
+ ret = rcar_gen3_phy_usb2_init_bus(channel);
+ if (ret)
+ goto error;
+ }
+
channel->soc_no_adp_ctrl = phy_data->no_adp_ctrl;
if (phy_data->no_adp_ctrl)
channel->obint_enable_bits = USB2_OBINT_IDCHG_EN;
@@ -733,9 +789,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
channel->vbus = NULL;
}
- platform_set_drvdata(pdev, channel);
- channel->dev = dev;
-
provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate);
if (IS_ERR(provider)) {
dev_err(dev, "Failed to register PHY provider\n");
@@ -762,6 +815,7 @@ static void rcar_gen3_phy_usb2_remove(struct platform_device *pdev)
if (channel->is_otg_channel)
device_remove_file(&pdev->dev, &dev_attr_role);
+ reset_control_assert(channel->rstc);
pm_runtime_disable(&pdev->dev);
};
diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
index 946c01210ac8..9f084697dd05 100644
--- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
@@ -8,6 +8,7 @@
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -15,6 +16,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/rational.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -190,6 +192,8 @@
#define LN3_TX_SER_RATE_SEL_HBR2 BIT(3)
#define LN3_TX_SER_RATE_SEL_HBR3 BIT(2)
+#define HDMI20_MAX_RATE 600000000
+
struct lcpll_config {
u32 bit_rate;
u8 lcvco_mode_en;
@@ -272,6 +276,12 @@ struct rk_hdptx_phy {
struct clk_bulk_data *clks;
int nr_clks;
struct reset_control_bulk_data rsts[RST_MAX];
+
+ /* clk provider */
+ struct clk_hw hw;
+ unsigned long rate;
+
+ atomic_t usage_count;
};
static const struct ropll_config ropll_tmds_cfg[] = {
@@ -759,6 +769,8 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx,
struct ropll_config rc = {0};
int i;
+ hdptx->rate = rate * 100;
+
for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++)
if (rate == ropll_tmds_cfg[i].bit_rate) {
cfg = &ropll_tmds_cfg[i];
@@ -822,19 +834,6 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx,
static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx,
unsigned int rate)
{
- u32 val;
- int ret;
-
- ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &val);
- if (ret)
- return ret;
-
- if (!(val & HDPTX_O_PLL_LOCK_DONE)) {
- ret = rk_hdptx_ropll_tmds_cmn_config(hdptx, rate);
- if (ret)
- return ret;
- }
-
rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_common_sb_init_seq);
regmap_write(hdptx->regmap, LNTOP_REG(0200), 0x06);
@@ -856,10 +855,68 @@ static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx,
return rk_hdptx_post_enable_lane(hdptx);
}
+static int rk_hdptx_phy_consumer_get(struct rk_hdptx_phy *hdptx,
+ unsigned int rate)
+{
+ u32 status;
+ int ret;
+
+ if (atomic_inc_return(&hdptx->usage_count) > 1)
+ return 0;
+
+ ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &status);
+ if (ret)
+ goto dec_usage;
+
+ if (status & HDPTX_O_PLL_LOCK_DONE)
+ dev_warn(hdptx->dev, "PLL locked by unknown consumer!\n");
+
+ if (rate) {
+ ret = rk_hdptx_ropll_tmds_cmn_config(hdptx, rate);
+ if (ret)
+ goto dec_usage;
+ }
+
+ return 0;
+
+dec_usage:
+ atomic_dec(&hdptx->usage_count);
+ return ret;
+}
+
+static int rk_hdptx_phy_consumer_put(struct rk_hdptx_phy *hdptx, bool force)
+{
+ u32 status;
+ int ret;
+
+ ret = atomic_dec_return(&hdptx->usage_count);
+ if (ret > 0)
+ return 0;
+
+ if (ret < 0) {
+ dev_warn(hdptx->dev, "Usage count underflow!\n");
+ ret = -EINVAL;
+ } else {
+ ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &status);
+ if (!ret) {
+ if (status & HDPTX_O_PLL_LOCK_DONE)
+ rk_hdptx_phy_disable(hdptx);
+ return 0;
+ } else if (force) {
+ return 0;
+ }
+ }
+
+ atomic_inc(&hdptx->usage_count);
+ return ret;
+}
+
static int rk_hdptx_phy_power_on(struct phy *phy)
{
struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy);
- int ret, bus_width = phy_get_bus_width(hdptx->phy);
+ int bus_width = phy_get_bus_width(hdptx->phy);
+ int ret;
+
/*
* FIXME: Temporary workaround to pass pixel_clk_rate
* from the HDMI bridge driver until phy_configure_opts_hdmi
@@ -870,15 +927,13 @@ static int rk_hdptx_phy_power_on(struct phy *phy)
dev_dbg(hdptx->dev, "%s bus_width=%x rate=%u\n",
__func__, bus_width, rate);
- ret = pm_runtime_resume_and_get(hdptx->dev);
- if (ret) {
- dev_err(hdptx->dev, "Failed to resume phy: %d\n", ret);
+ ret = rk_hdptx_phy_consumer_get(hdptx, rate);
+ if (ret)
return ret;
- }
ret = rk_hdptx_ropll_tmds_mode_config(hdptx, rate);
if (ret)
- pm_runtime_put(hdptx->dev);
+ rk_hdptx_phy_consumer_put(hdptx, true);
return ret;
}
@@ -886,16 +941,8 @@ static int rk_hdptx_phy_power_on(struct phy *phy)
static int rk_hdptx_phy_power_off(struct phy *phy)
{
struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy);
- u32 val;
- int ret;
- ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &val);
- if (ret == 0 && (val & HDPTX_O_PLL_LOCK_DONE))
- rk_hdptx_phy_disable(hdptx);
-
- pm_runtime_put(hdptx->dev);
-
- return ret;
+ return rk_hdptx_phy_consumer_put(hdptx, false);
}
static const struct phy_ops rk_hdptx_phy_ops = {
@@ -904,6 +951,99 @@ static const struct phy_ops rk_hdptx_phy_ops = {
.owner = THIS_MODULE,
};
+static struct rk_hdptx_phy *to_rk_hdptx_phy(struct clk_hw *hw)
+{
+ return container_of(hw, struct rk_hdptx_phy, hw);
+}
+
+static int rk_hdptx_phy_clk_prepare(struct clk_hw *hw)
+{
+ struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw);
+
+ return rk_hdptx_phy_consumer_get(hdptx, hdptx->rate / 100);
+}
+
+static void rk_hdptx_phy_clk_unprepare(struct clk_hw *hw)
+{
+ struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw);
+
+ rk_hdptx_phy_consumer_put(hdptx, true);
+}
+
+static unsigned long rk_hdptx_phy_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw);
+
+ return hdptx->rate;
+}
+
+static long rk_hdptx_phy_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ u32 bit_rate = rate / 100;
+ int i;
+
+ if (rate > HDMI20_MAX_RATE)
+ return rate;
+
+ for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++)
+ if (bit_rate == ropll_tmds_cfg[i].bit_rate)
+ break;
+
+ if (i == ARRAY_SIZE(ropll_tmds_cfg) &&
+ !rk_hdptx_phy_clk_pll_calc(bit_rate, NULL))
+ return -EINVAL;
+
+ return rate;
+}
+
+static int rk_hdptx_phy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw);
+
+ return rk_hdptx_ropll_tmds_cmn_config(hdptx, rate / 100);
+}
+
+static const struct clk_ops hdptx_phy_clk_ops = {
+ .prepare = rk_hdptx_phy_clk_prepare,
+ .unprepare = rk_hdptx_phy_clk_unprepare,
+ .recalc_rate = rk_hdptx_phy_clk_recalc_rate,
+ .round_rate = rk_hdptx_phy_clk_round_rate,
+ .set_rate = rk_hdptx_phy_clk_set_rate,
+};
+
+static int rk_hdptx_phy_clk_register(struct rk_hdptx_phy *hdptx)
+{
+ struct device *dev = hdptx->dev;
+ const char *name, *pname;
+ struct clk *refclk;
+ int ret, id;
+
+ refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(refclk))
+ return dev_err_probe(dev, PTR_ERR(refclk),
+ "Failed to get ref clock\n");
+
+ id = of_alias_get_id(dev->of_node, "hdptxphy");
+ name = id > 0 ? "clk_hdmiphy_pixel1" : "clk_hdmiphy_pixel0";
+ pname = __clk_get_name(refclk);
+
+ hdptx->hw.init = CLK_HW_INIT(name, pname, &hdptx_phy_clk_ops,
+ CLK_GET_RATE_NOCACHE);
+
+ ret = devm_clk_hw_register(dev, &hdptx->hw);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register clock\n");
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &hdptx->hw);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clk provider\n");
+ return 0;
+}
+
static int rk_hdptx_phy_runtime_suspend(struct device *dev)
{
struct rk_hdptx_phy *hdptx = dev_get_drvdata(dev);
@@ -976,6 +1116,10 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(hdptx->grf),
"Could not get GRF syscon\n");
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+
hdptx->phy = devm_phy_create(dev, NULL, &rk_hdptx_phy_ops);
if (IS_ERR(hdptx->phy))
return dev_err_probe(dev, PTR_ERR(hdptx->phy),
@@ -985,10 +1129,6 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
phy_set_drvdata(hdptx->phy, hdptx);
phy_set_bus_width(hdptx->phy, 8);
- ret = devm_pm_runtime_enable(dev);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
-
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider))
return dev_err_probe(dev, PTR_ERR(phy_provider),
@@ -998,7 +1138,7 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
reset_control_deassert(hdptx->rsts[RST_CMN].rstc);
reset_control_deassert(hdptx->rsts[RST_INIT].rstc);
- return 0;
+ return rk_hdptx_phy_clk_register(hdptx);
}
static const struct dev_pm_ops rk_hdptx_phy_pm_ops = {
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index 9cbf90142950..c421b495eb0f 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -607,7 +607,7 @@ exynos5_usbdrd_usbdp_g2_v4_ctrl_pma_ready(struct exynos5_usbdrd_phy *phy_drd)
reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL);
reg &= ~SECPMACTL_PMA_REF_FREQ_SEL;
- reg |= FIELD_PREP_CONST(SECPMACTL_PMA_REF_FREQ_SEL, 1);
+ reg |= FIELD_PREP(SECPMACTL_PMA_REF_FREQ_SEL, 1);
/* SFR reset */
reg |= (SECPMACTL_PMA_LOW_PWR | SECPMACTL_PMA_APB_SW_RST);
reg &= ~(SECPMACTL_PMA_ROPLL_REF_CLK_SEL |
@@ -1123,19 +1123,19 @@ static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
reg &= ~SSPPLLCTL_FSEL;
switch (phy_drd->extrefclk) {
case EXYNOS5_FSEL_50MHZ:
- reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 7);
+ reg |= FIELD_PREP(SSPPLLCTL_FSEL, 7);
break;
case EXYNOS5_FSEL_26MHZ:
- reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 6);
+ reg |= FIELD_PREP(SSPPLLCTL_FSEL, 6);
break;
case EXYNOS5_FSEL_24MHZ:
- reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 2);
+ reg |= FIELD_PREP(SSPPLLCTL_FSEL, 2);
break;
case EXYNOS5_FSEL_20MHZ:
- reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 1);
+ reg |= FIELD_PREP(SSPPLLCTL_FSEL, 1);
break;
case EXYNOS5_FSEL_19MHZ2:
- reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 0);
+ reg |= FIELD_PREP(SSPPLLCTL_FSEL, 0);
break;
default:
dev_warn(phy_drd->dev, "unsupported ref clk: %#.2x\n",
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index 673449607c02..3bf3aff4b1c7 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -7,6 +7,7 @@
*/
#include <dt-bindings/phy/phy.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
@@ -644,7 +645,6 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
struct device_node *node = am654_phy->of_node;
struct device *dev = am654_phy->dev;
struct serdes_am654_clk_mux *mux;
- struct device_node *regmap_node;
const char **parent_names;
struct clk_init_data *init;
unsigned int num_parents;
@@ -652,7 +652,6 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
const __be32 *addr;
unsigned int reg;
struct clk *clk;
- int ret = 0;
mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -660,41 +659,30 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
init = &mux->clk_data;
- regmap_node = of_parse_phandle(node, "ti,serdes-clk", 0);
- if (!regmap_node) {
- dev_err(dev, "Fail to get serdes-clk node\n");
- ret = -ENODEV;
- goto out_put_node;
- }
+ struct device_node *regmap_node __free(device_node) =
+ of_parse_phandle(node, "ti,serdes-clk", 0);
+ if (!regmap_node)
+ return dev_err_probe(dev, -ENODEV, "Fail to get serdes-clk node\n");
regmap = syscon_node_to_regmap(regmap_node->parent);
- if (IS_ERR(regmap)) {
- dev_err(dev, "Fail to get Syscon regmap\n");
- ret = PTR_ERR(regmap);
- goto out_put_node;
- }
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Fail to get Syscon regmap\n");
num_parents = of_clk_get_parent_count(node);
- if (num_parents < 2) {
- dev_err(dev, "SERDES clock must have parents\n");
- ret = -EINVAL;
- goto out_put_node;
- }
+ if (num_parents < 2)
+ return dev_err_probe(dev, -EINVAL, "SERDES clock must have parents\n");
parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents),
GFP_KERNEL);
- if (!parent_names) {
- ret = -ENOMEM;
- goto out_put_node;
- }
+ if (!parent_names)
+ return -ENOMEM;
of_clk_parent_fill(node, parent_names, num_parents);
addr = of_get_address(regmap_node, 0, NULL, NULL);
- if (!addr) {
- ret = -EINVAL;
- goto out_put_node;
- }
+ if (!addr)
+ return -EINVAL;
reg = be32_to_cpu(*addr);
@@ -710,16 +698,12 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
mux->hw.init = init;
clk = devm_clk_register(dev, &mux->hw);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto out_put_node;
- }
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
am654_phy->clks[clock_num] = clk;
-out_put_node:
- of_node_put(regmap_node);
- return ret;
+ return 0;
}
static const struct of_device_id serdes_am654_id_table[] = {
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index b30bf740e2e0..103b266fec77 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -468,11 +468,9 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
priv->regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(priv->regmap)) {
priv->regmap = device_node_to_regmap(node);
- if (IS_ERR(priv->regmap)) {
- ret = PTR_ERR(priv->regmap);
- dev_err(dev, "Failed to get syscon %d\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap),
+ "Failed to get syscon\n");
priv->no_offset = true;
}
@@ -485,11 +483,9 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
priv->phy_provider =
devm_of_phy_provider_register(dev,
phy_gmii_sel_of_xlate);
- if (IS_ERR(priv->phy_provider)) {
- ret = PTR_ERR(priv->phy_provider);
- dev_err(dev, "Failed to create phy provider %d\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->phy_provider))
+ return dev_err_probe(dev, PTR_ERR(priv->phy_provider),
+ "Failed to create phy provider\n");
return 0;
}
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 7f626c597025..a6c0c5607ffd 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -1179,14 +1179,13 @@ static int wiz_clock_probe(struct wiz *wiz, struct device_node *node)
ret = wiz_mux_of_clk_register(wiz, clk_node, wiz->mux_sel_field[i],
clk_mux_sel[i].table);
+ of_node_put(clk_node);
if (ret) {
dev_err_probe(dev, ret, "Failed to register %s clock\n",
node_name);
- of_node_put(clk_node);
goto err;
}
- of_node_put(clk_node);
}
for (i = 0; i < wiz->clk_div_sel_num; i++) {
@@ -1199,14 +1198,12 @@ static int wiz_clock_probe(struct wiz *wiz, struct device_node *node)
ret = wiz_div_clk_register(wiz, clk_node, wiz->div_sel_field[i],
clk_div_sel[i].table);
+ of_node_put(clk_node);
if (ret) {
dev_err_probe(dev, ret, "Failed to register %s clock\n",
node_name);
- of_node_put(clk_node);
goto err;
}
-
- of_node_put(clk_node);
}
return 0;
@@ -1407,7 +1404,7 @@ MODULE_DEVICE_TABLE(of, wiz_id_table);
static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
{
- struct device_node *serdes, *subnode;
+ struct device_node *serdes;
serdes = of_get_child_by_name(dev->of_node, "serdes");
if (!serdes) {
@@ -1415,7 +1412,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
return -EINVAL;
}
- for_each_child_of_node(serdes, subnode) {
+ for_each_child_of_node_scoped(serdes, subnode) {
u32 reg, num_lanes = 1, phy_type = PHY_NONE;
int ret, i;
@@ -1425,7 +1422,6 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
ret = of_property_read_u32(subnode, "reg", &reg);
if (ret) {
- of_node_put(subnode);
dev_err(dev,
"%s: Reading \"reg\" from \"%s\" failed: %d\n",
__func__, subnode->name, ret);
@@ -1578,8 +1574,8 @@ static int wiz_probe(struct platform_device *pdev)
phy_reset_dev = &wiz->wiz_phy_reset_dev;
phy_reset_dev->dev = dev;
- phy_reset_dev->ops = &wiz_phy_reset_ops,
- phy_reset_dev->owner = THIS_MODULE,
+ phy_reset_dev->ops = &wiz_phy_reset_ops;
+ phy_reset_dev->owner = THIS_MODULE;
phy_reset_dev->of_node = node;
/* Reset for each of the lane and one for the entire SERDES */
phy_reset_dev->nr_resets = num_lanes + 1;
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index 751fecd466e3..c3ae9d7948d7 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -411,12 +411,6 @@ static int tusb1210_psy_get_prop(struct power_supply *psy,
return 0;
}
-static const enum power_supply_usb_type tusb1210_psy_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_UNKNOWN,
-};
-
static const enum power_supply_property tusb1210_psy_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_USB_TYPE,
@@ -426,8 +420,9 @@ static const enum power_supply_property tusb1210_psy_props[] = {
static const struct power_supply_desc tusb1210_psy_desc = {
.name = "tusb1211-charger-detect",
.type = POWER_SUPPLY_TYPE_USB,
- .usb_types = tusb1210_psy_usb_types,
- .num_usb_types = ARRAY_SIZE(tusb1210_psy_usb_types),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
.properties = tusb1210_psy_props,
.num_properties = ARRAY_SIZE(tusb1210_psy_props),
.get_property = tusb1210_psy_get_prop,
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 7e4f93a3bc7a..18306569ef50 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -213,6 +213,21 @@ config PINCTRL_EQUILIBRIUM
desired pin functions, configure GPIO attributes for LGM SoC pins.
Pin muxing and pin config settings are retrieved from device tree.
+config PINCTRL_EYEQ5
+ bool "Mobileye EyeQ5 pinctrl driver"
+ depends on OF
+ depends on MACH_EYEQ5 || COMPILE_TEST
+ select PINMUX
+ select GENERIC_PINCONF
+ select AUXILIARY_BUS
+ default MACH_EYEQ5
+ help
+ Pin controller driver for the Mobileye EyeQ5 platform. It does both
+ pin config & pin muxing. It does not handle GPIO.
+
+ Pin muxing supports two functions for each pin: first is GPIO, second
+ is pin-dependent. Pin config is about bias & drive strength.
+
config PINCTRL_GEMINI
bool
depends on ARCH_GEMINI
@@ -583,6 +598,7 @@ source "drivers/pinctrl/qcom/Kconfig"
source "drivers/pinctrl/realtek/Kconfig"
source "drivers/pinctrl/renesas/Kconfig"
source "drivers/pinctrl/samsung/Kconfig"
+source "drivers/pinctrl/sophgo/Kconfig"
source "drivers/pinctrl/spear/Kconfig"
source "drivers/pinctrl/sprd/Kconfig"
source "drivers/pinctrl/starfive/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index cc809669405a..3c2355150961 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o
obj-$(CONFIG_PINCTRL_DA9062) += pinctrl-da9062.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
+obj-$(CONFIG_PINCTRL_EYEQ5) += pinctrl-eyeq5.o
obj-$(CONFIG_PINCTRL_GEMINI) += pinctrl-gemini.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_K210) += pinctrl-k210.o
@@ -73,6 +74,7 @@ obj-y += qcom/
obj-$(CONFIG_ARCH_REALTEK) += realtek/
obj-$(CONFIG_PINCTRL_RENESAS) += renesas/
obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/
+obj-y += sophgo/
obj-$(CONFIG_PINCTRL_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_SOC_STARFIVE) += starfive/
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 184641e221d4..cc1fe0555e19 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -1280,6 +1280,7 @@ static const struct of_device_id bcm2835_pinctrl_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, bcm2835_pinctrl_match);
static int bcm2835_pinctrl_probe(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index 898b197c3738..2932d7aba725 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -1063,12 +1063,9 @@ static int madera_pin_probe(struct platform_device *pdev)
if (pdata->gpio_configs) {
ret = pinctrl_register_mappings(pdata->gpio_configs,
pdata->n_gpio_configs);
- if (ret) {
- dev_err(priv->dev,
- "Failed to register pdata mappings (%d)\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "Failed to register pdata mappings\n");
}
ret = pinctrl_enable(priv->pctl);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 314ab93d7691..4061890a1748 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1971,7 +1971,7 @@ static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
static void pinctrl_init_debugfs(void)
{
debugfs_root = debugfs_create_dir("pinctrl", NULL);
- if (IS_ERR(debugfs_root) || !debugfs_root) {
+ if (IS_ERR(debugfs_root)) {
pr_warn("failed to create debugfs directory\n");
debugfs_root = NULL;
return;
diff --git a/drivers/pinctrl/freescale/pinctrl-imx-scmi.c b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
index 2991047535bc..8f15c4c4dc44 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
@@ -130,7 +130,7 @@ static int pinctrl_scmi_imx_dt_node_to_map(struct pinctrl_dev *pctldev,
cfg[j++] = pinconf_to_config_packed(IMX_SCMI_PIN_DAISY_CFG, input_val);
}
- configs = kmemdup(cfg, ncfg * sizeof(unsigned long), GFP_KERNEL);
+ configs = kmemdup_array(cfg, ncfg, sizeof(unsigned long), GFP_KERNEL);
new_map[i].type = PIN_MAP_TYPE_CONFIGS_PIN;
new_map[i].data.configs.group_or_pin = pin_get_name(pctldev, pin_id);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 9c2680df082c..d05c2c478e79 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -804,14 +804,14 @@ int imx_pinctrl_probe(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(imx_pinctrl_probe);
-static int __maybe_unused imx_pinctrl_suspend(struct device *dev)
+static int imx_pinctrl_suspend(struct device *dev)
{
struct imx_pinctrl *ipctl = dev_get_drvdata(dev);
return pinctrl_force_sleep(ipctl->pctl);
}
-static int __maybe_unused imx_pinctrl_resume(struct device *dev)
+static int imx_pinctrl_resume(struct device *dev)
{
struct imx_pinctrl *ipctl = dev_get_drvdata(dev);
@@ -819,8 +819,7 @@ static int __maybe_unused imx_pinctrl_resume(struct device *dev)
}
const struct dev_pm_ops imx_pinctrl_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(imx_pinctrl_suspend,
- imx_pinctrl_resume)
+ LATE_SYSTEM_SLEEP_PM_OPS(imx_pinctrl_suspend, imx_pinctrl_resume)
};
EXPORT_SYMBOL_GPL(imx_pinctrl_pm_ops);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mq.c b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
index 529eebe46298..e59e4fc80193 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8mq.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
@@ -341,7 +341,7 @@ static struct platform_driver imx8mq_pinctrl_driver = {
.driver = {
.name = "imx8mq-pinctrl",
.of_match_table = imx8mq_pinctrl_of_match,
- .pm = &imx_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&imx_pinctrl_pm_ops),
.suppress_bind_attrs = true,
},
.probe = imx8mq_pinctrl_probe,
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 4e87f5b875c0..4533c4d0a9e7 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -560,9 +560,10 @@ static DEFINE_RAW_SPINLOCK(byt_lock);
static void __iomem *byt_gpio_reg(struct intel_pinctrl *vg, unsigned int offset,
int reg)
{
- struct intel_community *comm = intel_get_community(vg, offset);
+ const struct intel_community *comm;
u32 reg_offset;
+ comm = intel_get_community(vg, offset);
if (!comm)
return NULL;
@@ -1541,10 +1542,8 @@ static int byt_gpio_probe(struct intel_pinctrl *vg)
}
ret = devm_gpiochip_add_data(vg->dev, gc, vg);
- if (ret) {
+ if (ret)
dev_err(vg->dev, "failed adding byt-gpio chip\n");
- return ret;
- }
return ret;
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 89bd7ce6711a..928607a21d36 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -70,6 +70,12 @@
#define PADCFG0_PMODE_SHIFT 10
#define PADCFG0_PMODE_MASK GENMASK(13, 10)
#define PADCFG0_PMODE_GPIO 0
+#define PADCFG0_GPIODIS_SHIFT 8
+#define PADCFG0_GPIODIS_MASK GENMASK(9, 8)
+#define PADCFG0_GPIODIS_NONE 0
+#define PADCFG0_GPIODIS_OUTPUT 1
+#define PADCFG0_GPIODIS_INPUT 2
+#define PADCFG0_GPIODIS_FULL 3
#define PADCFG0_GPIORXDIS BIT(9)
#define PADCFG0_GPIOTXDIS BIT(8)
#define PADCFG0_GPIORXSTATE BIT(1)
@@ -108,13 +114,30 @@ struct intel_community_context {
#define pin_to_padno(c, p) ((p) - (c)->pin_base)
#define padgroup_offset(g, p) ((p) - (g)->base)
-struct intel_community *intel_get_community(struct intel_pinctrl *pctrl, unsigned int pin)
+#define for_each_intel_pin_community(pctrl, community) \
+ for (unsigned int __ci = 0; \
+ __ci < pctrl->ncommunities && (community = &pctrl->communities[__ci]); \
+ __ci++) \
+
+#define for_each_intel_community_pad_group(community, grp) \
+ for (unsigned int __gi = 0; \
+ __gi < community->ngpps && (grp = &community->gpps[__gi]); \
+ __gi++) \
+
+#define for_each_intel_pad_group(pctrl, community, grp) \
+ for_each_intel_pin_community(pctrl, community) \
+ for_each_intel_community_pad_group(community, grp)
+
+#define for_each_intel_gpio_group(pctrl, community, grp) \
+ for_each_intel_pad_group(pctrl, community, grp) \
+ if (grp->gpio_base == INTEL_GPIO_BASE_NOMAP) {} else
+
+const struct intel_community *intel_get_community(const struct intel_pinctrl *pctrl,
+ unsigned int pin)
{
- struct intel_community *community;
- int i;
+ const struct intel_community *community;
- for (i = 0; i < pctrl->ncommunities; i++) {
- community = &pctrl->communities[i];
+ for_each_intel_pin_community(pctrl, community) {
if (pin >= community->pin_base &&
pin < community->pin_base + community->npins)
return community;
@@ -129,11 +152,9 @@ static const struct intel_padgroup *
intel_community_get_padgroup(const struct intel_community *community,
unsigned int pin)
{
- int i;
-
- for (i = 0; i < community->ngpps; i++) {
- const struct intel_padgroup *padgrp = &community->gpps[i];
+ const struct intel_padgroup *padgrp;
+ for_each_intel_community_pad_group(community, padgrp) {
if (pin >= padgrp->base && pin < padgrp->base + padgrp->size)
return padgrp;
}
@@ -161,7 +182,7 @@ static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl,
return community->pad_regs + reg + padno * nregs * 4;
}
-static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned int pin)
+static bool intel_pad_owned_by_host(const struct intel_pinctrl *pctrl, unsigned int pin)
{
const struct intel_community *community;
const struct intel_padgroup *padgrp;
@@ -186,7 +207,7 @@ static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned int pi
return !(readl(padown) & PADOWN_MASK(gpp_offset));
}
-static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned int pin)
+static bool intel_pad_acpi_mode(const struct intel_pinctrl *pctrl, unsigned int pin)
{
const struct intel_community *community;
const struct intel_padgroup *padgrp;
@@ -212,7 +233,6 @@ static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned int pin)
/**
* enum - Locking variants of the pad configuration
- *
* @PAD_UNLOCKED: pad is fully controlled by the configuration registers
* @PAD_LOCKED: pad configuration registers, except TX state, are locked
* @PAD_LOCKED_TX: pad configuration TX state is locked
@@ -229,9 +249,9 @@ enum {
PAD_LOCKED_FULL = PAD_LOCKED | PAD_LOCKED_TX,
};
-static int intel_pad_locked(struct intel_pinctrl *pctrl, unsigned int pin)
+static int intel_pad_locked(const struct intel_pinctrl *pctrl, unsigned int pin)
{
- struct intel_community *community;
+ const struct intel_community *community;
const struct intel_padgroup *padgrp;
unsigned int offset, gpp_offset;
u32 value;
@@ -267,19 +287,19 @@ static int intel_pad_locked(struct intel_pinctrl *pctrl, unsigned int pin)
return ret;
}
-static bool intel_pad_is_unlocked(struct intel_pinctrl *pctrl, unsigned int pin)
+static bool intel_pad_is_unlocked(const struct intel_pinctrl *pctrl, unsigned int pin)
{
return (intel_pad_locked(pctrl, pin) & PAD_LOCKED) == PAD_UNLOCKED;
}
-static bool intel_pad_usable(struct intel_pinctrl *pctrl, unsigned int pin)
+static bool intel_pad_usable(const struct intel_pinctrl *pctrl, unsigned int pin)
{
return intel_pad_owned_by_host(pctrl, pin) && intel_pad_is_unlocked(pctrl, pin);
}
int intel_get_groups_count(struct pinctrl_dev *pctldev)
{
- struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->ngroups;
}
@@ -287,7 +307,7 @@ EXPORT_SYMBOL_NS_GPL(intel_get_groups_count, PINCTRL_INTEL);
const char *intel_get_group_name(struct pinctrl_dev *pctldev, unsigned int group)
{
- struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->groups[group].grp.name;
}
@@ -296,7 +316,7 @@ EXPORT_SYMBOL_NS_GPL(intel_get_group_name, PINCTRL_INTEL);
int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
const unsigned int **pins, unsigned int *npins)
{
- struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
*pins = pctrl->soc->groups[group].grp.pins;
*npins = pctrl->soc->groups[group].grp.npins;
@@ -364,7 +384,7 @@ static const struct pinctrl_ops intel_pinctrl_ops = {
int intel_get_functions_count(struct pinctrl_dev *pctldev)
{
- struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->nfunctions;
}
@@ -372,7 +392,7 @@ EXPORT_SYMBOL_NS_GPL(intel_get_functions_count, PINCTRL_INTEL);
const char *intel_get_function_name(struct pinctrl_dev *pctldev, unsigned int function)
{
- struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->functions[function].func.name;
}
@@ -381,7 +401,7 @@ EXPORT_SYMBOL_NS_GPL(intel_get_function_name, PINCTRL_INTEL);
int intel_get_function_groups(struct pinctrl_dev *pctldev, unsigned int function,
const char * const **groups, unsigned int * const ngroups)
{
- struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
*groups = pctrl->soc->functions[function].func.groups;
*ngroups = pctrl->soc->functions[function].func.ngroups;
@@ -429,19 +449,49 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev,
return 0;
}
-static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
-{
- u32 value;
+/**
+ * enum - Possible pad physical connections
+ * @PAD_CONNECT_NONE: pad is fully disconnected
+ * @PAD_CONNECT_INPUT: pad is in input only mode
+ * @PAD_CONNECT_OUTPUT: pad is in output only mode
+ * @PAD_CONNECT_FULL: pad is fully connected
+ */
+enum {
+ PAD_CONNECT_NONE = 0,
+ PAD_CONNECT_INPUT = 1,
+ PAD_CONNECT_OUTPUT = 2,
+ PAD_CONNECT_FULL = PAD_CONNECT_INPUT | PAD_CONNECT_OUTPUT,
+};
- value = readl(padcfg0);
- if (input) {
+static int __intel_gpio_get_direction(u32 value)
+{
+ switch ((value & PADCFG0_GPIODIS_MASK) >> PADCFG0_GPIODIS_SHIFT) {
+ case PADCFG0_GPIODIS_FULL:
+ return PAD_CONNECT_NONE;
+ case PADCFG0_GPIODIS_OUTPUT:
+ return PAD_CONNECT_INPUT;
+ case PADCFG0_GPIODIS_INPUT:
+ return PAD_CONNECT_OUTPUT;
+ case PADCFG0_GPIODIS_NONE:
+ return PAD_CONNECT_FULL;
+ default:
+ return -ENOTSUPP;
+ };
+}
+
+static u32 __intel_gpio_set_direction(u32 value, bool input, bool output)
+{
+ if (input)
value &= ~PADCFG0_GPIORXDIS;
- value |= PADCFG0_GPIOTXDIS;
- } else {
- value &= ~PADCFG0_GPIOTXDIS;
+ else
value |= PADCFG0_GPIORXDIS;
- }
- writel(value, padcfg0);
+
+ if (output)
+ value &= ~PADCFG0_GPIOTXDIS;
+ else
+ value |= PADCFG0_GPIOTXDIS;
+
+ return value;
}
static int __intel_gpio_get_gpio_mode(u32 value)
@@ -465,8 +515,7 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
value |= PADCFG0_PMODE_GPIO;
/* Disable TX buffer and enable RX (this will be input) */
- value &= ~PADCFG0_GPIORXDIS;
- value |= PADCFG0_GPIOTXDIS;
+ value = __intel_gpio_set_direction(value, true, false);
/* Disable SCI/SMI/NMI generation */
value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
@@ -512,12 +561,18 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
+ u32 value;
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
guard(raw_spinlock_irqsave)(&pctrl->lock);
- __intel_gpio_set_direction(padcfg0, input);
+ value = readl(padcfg0);
+ if (input)
+ value = __intel_gpio_set_direction(value, true, false);
+ else
+ value = __intel_gpio_set_direction(value, false, true);
+ writel(value, padcfg0);
return 0;
}
@@ -612,6 +667,23 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
return 0;
}
+static int intel_config_get_high_impedance(struct intel_pinctrl *pctrl, unsigned int pin,
+ enum pin_config_param param, u32 *arg)
+{
+ void __iomem *padcfg0;
+ u32 value;
+
+ padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
+
+ scoped_guard(raw_spinlock_irqsave, &pctrl->lock)
+ value = readl(padcfg0);
+
+ if (__intel_gpio_get_direction(value) != PAD_CONNECT_NONE)
+ return -EINVAL;
+
+ return 0;
+}
+
static int intel_config_get_debounce(struct intel_pinctrl *pctrl, unsigned int pin,
enum pin_config_param param, u32 *arg)
{
@@ -655,6 +727,12 @@ static int intel_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
return ret;
break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ ret = intel_config_get_high_impedance(pctrl, pin, param, &arg);
+ if (ret)
+ return ret;
+ break;
+
case PIN_CONFIG_INPUT_DEBOUNCE:
ret = intel_config_get_debounce(pctrl, pin, param, &arg);
if (ret)
@@ -753,11 +831,34 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
return 0;
}
+static void intel_gpio_set_high_impedance(struct intel_pinctrl *pctrl, unsigned int pin)
+{
+ void __iomem *padcfg0;
+ u32 value;
+
+ padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
+
+ guard(raw_spinlock_irqsave)(&pctrl->lock);
+
+ value = readl(padcfg0);
+ value = __intel_gpio_set_direction(value, false, false);
+ writel(value, padcfg0);
+}
+
static int intel_config_set_debounce(struct intel_pinctrl *pctrl,
unsigned int pin, unsigned int debounce)
{
void __iomem *padcfg0, *padcfg2;
u32 value0, value2;
+ unsigned long v;
+
+ if (debounce) {
+ v = order_base_2(debounce * NSEC_PER_USEC / DEBOUNCE_PERIOD_NSEC);
+ if (v < 3 || v > 15)
+ return -EINVAL;
+ } else {
+ v = 0;
+ }
padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2);
if (!padcfg2)
@@ -770,21 +871,15 @@ static int intel_config_set_debounce(struct intel_pinctrl *pctrl,
value0 = readl(padcfg0);
value2 = readl(padcfg2);
- /* Disable glitch filter and debouncer */
- value0 &= ~PADCFG0_PREGFRXSEL;
- value2 &= ~(PADCFG2_DEBEN | PADCFG2_DEBOUNCE_MASK);
-
- if (debounce) {
- unsigned long v;
-
- v = order_base_2(debounce * NSEC_PER_USEC / DEBOUNCE_PERIOD_NSEC);
- if (v < 3 || v > 15)
- return -EINVAL;
-
+ value2 = (value2 & ~PADCFG2_DEBOUNCE_MASK) | (v << PADCFG2_DEBOUNCE_SHIFT);
+ if (v) {
/* Enable glitch filter and debouncer */
value0 |= PADCFG0_PREGFRXSEL;
- value2 |= v << PADCFG2_DEBOUNCE_SHIFT;
value2 |= PADCFG2_DEBEN;
+ } else {
+ /* Disable glitch filter and debouncer */
+ value0 &= ~PADCFG0_PREGFRXSEL;
+ value2 &= ~PADCFG2_DEBEN;
}
writel(value0, padcfg0);
@@ -812,6 +907,10 @@ static int intel_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
return ret;
break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ intel_gpio_set_high_impedance(pctrl, pin);
+ break;
+
case PIN_CONFIG_INPUT_DEBOUNCE:
ret = intel_config_set_debounce(pctrl, pin,
pinconf_to_config_argument(configs[i]));
@@ -854,34 +953,21 @@ static const struct pinctrl_desc intel_pinctrl_desc = {
* Return: a pin number and pointers to the community and pad group, which
* the pin belongs to, or negative error code if translation can't be done.
*/
-static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset,
+static int intel_gpio_to_pin(const struct intel_pinctrl *pctrl, unsigned int offset,
const struct intel_community **community,
const struct intel_padgroup **padgrp)
{
- int i;
-
- for (i = 0; i < pctrl->ncommunities; i++) {
- const struct intel_community *comm = &pctrl->communities[i];
- int j;
-
- for (j = 0; j < comm->ngpps; j++) {
- const struct intel_padgroup *pgrp = &comm->gpps[j];
+ const struct intel_community *comm;
+ const struct intel_padgroup *grp;
- if (pgrp->gpio_base == INTEL_GPIO_BASE_NOMAP)
- continue;
+ for_each_intel_gpio_group(pctrl, comm, grp) {
+ if (offset >= grp->gpio_base && offset < grp->gpio_base + grp->size) {
+ if (community)
+ *community = comm;
+ if (padgrp)
+ *padgrp = grp;
- if (offset >= pgrp->gpio_base &&
- offset < pgrp->gpio_base + pgrp->size) {
- int pin;
-
- pin = pgrp->base + offset - pgrp->gpio_base;
- if (community)
- *community = comm;
- if (padgrp)
- *padgrp = pgrp;
-
- return pin;
- }
+ return grp->base + offset - grp->gpio_base;
}
}
@@ -897,7 +983,7 @@ static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset,
*
* Return: a GPIO offset, or negative error code if translation can't be done.
*/
-static int intel_pin_to_gpio(struct intel_pinctrl *pctrl, int pin)
+static int intel_pin_to_gpio(const struct intel_pinctrl *pctrl, int pin)
{
const struct intel_community *community;
const struct intel_padgroup *padgrp;
@@ -929,7 +1015,7 @@ static int intel_gpio_get(struct gpio_chip *chip, unsigned int offset)
return -EINVAL;
padcfg0 = readl(reg);
- if (!(padcfg0 & PADCFG0_GPIOTXDIS))
+ if (__intel_gpio_get_direction(padcfg0) & PAD_CONNECT_OUTPUT)
return !!(padcfg0 & PADCFG0_GPIOTXSTATE);
return !!(padcfg0 & PADCFG0_GPIORXSTATE);
@@ -982,10 +1068,10 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
if (padcfg0 & PADCFG0_PMODE_MASK)
return -EINVAL;
- if (padcfg0 & PADCFG0_GPIOTXDIS)
- return GPIO_LINE_DIRECTION_IN;
+ if (__intel_gpio_get_direction(padcfg0) & PAD_CONNECT_OUTPUT)
+ return GPIO_LINE_DIRECTION_OUT;
- return GPIO_LINE_DIRECTION_OUT;
+ return GPIO_LINE_DIRECTION_IN;
}
static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -1171,15 +1257,16 @@ static const struct irq_chip intel_gpio_irq_chip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
-static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
- const struct intel_community *community)
+static irqreturn_t intel_gpio_irq(int irq, void *data)
{
- struct gpio_chip *gc = &pctrl->chip;
- unsigned int gpp;
+ const struct intel_community *community;
+ const struct intel_padgroup *padgrp;
+ struct intel_pinctrl *pctrl = data;
int ret = 0;
- for (gpp = 0; gpp < community->ngpps; gpp++) {
- const struct intel_padgroup *padgrp = &community->gpps[gpp];
+ /* Need to check all communities for pending interrupts */
+ for_each_intel_pad_group(pctrl, community, padgrp) {
+ struct gpio_chip *gc = &pctrl->chip;
unsigned long pending, enabled;
unsigned int gpp, gpp_offset;
void __iomem *reg, *is;
@@ -1203,36 +1290,17 @@ static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
ret += pending ? 1 : 0;
}
- return ret;
-}
-
-static irqreturn_t intel_gpio_irq(int irq, void *data)
-{
- const struct intel_community *community;
- struct intel_pinctrl *pctrl = data;
- unsigned int i;
- int ret = 0;
-
- /* Need to check all communities for pending interrupts */
- for (i = 0; i < pctrl->ncommunities; i++) {
- community = &pctrl->communities[i];
- ret += intel_gpio_community_irq_handler(pctrl, community);
- }
-
return IRQ_RETVAL(ret);
}
static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
{
- int i;
+ const struct intel_community *community;
- for (i = 0; i < pctrl->ncommunities; i++) {
- const struct intel_community *community;
+ for_each_intel_pin_community(pctrl, community) {
void __iomem *reg, *is;
unsigned int gpp;
- community = &pctrl->communities[i];
-
for (gpp = 0; gpp < community->ngpps; gpp++) {
reg = community->regs + community->ie_offset + gpp * 4;
is = community->regs + community->is_offset + gpp * 4;
@@ -1257,36 +1325,17 @@ static int intel_gpio_irq_init_hw(struct gpio_chip *gc)
return 0;
}
-static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
- const struct intel_community *community)
-{
- int ret = 0, i;
-
- for (i = 0; i < community->ngpps; i++) {
- const struct intel_padgroup *gpp = &community->gpps[i];
-
- if (gpp->gpio_base == INTEL_GPIO_BASE_NOMAP)
- continue;
-
- ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev),
- gpp->gpio_base, gpp->base,
- gpp->size);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
static int intel_gpio_add_pin_ranges(struct gpio_chip *gc)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- int ret, i;
-
- for (i = 0; i < pctrl->ncommunities; i++) {
- struct intel_community *community = &pctrl->communities[i];
+ const struct intel_community *community;
+ const struct intel_padgroup *grp;
+ int ret;
- ret = intel_gpio_add_community_ranges(pctrl, community);
+ for_each_intel_gpio_group(pctrl, community, grp) {
+ ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev),
+ grp->gpio_base, grp->base,
+ grp->size);
if (ret) {
dev_err(pctrl->dev, "failed to add GPIO pin range\n");
return ret;
@@ -1299,20 +1348,12 @@ static int intel_gpio_add_pin_ranges(struct gpio_chip *gc)
static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
{
const struct intel_community *community;
+ const struct intel_padgroup *grp;
unsigned int ngpio = 0;
- int i, j;
- for (i = 0; i < pctrl->ncommunities; i++) {
- community = &pctrl->communities[i];
- for (j = 0; j < community->ngpps; j++) {
- const struct intel_padgroup *gpp = &community->gpps[j];
-
- if (gpp->gpio_base == INTEL_GPIO_BASE_NOMAP)
- continue;
-
- if (gpp->gpio_base + gpp->size > ngpio)
- ngpio = gpp->gpio_base + gpp->size;
- }
+ for_each_intel_gpio_group(pctrl, community, grp) {
+ if (grp->gpio_base + grp->size > ngpio)
+ ngpio = grp->gpio_base + grp->size;
}
return ngpio;
@@ -1682,7 +1723,8 @@ EXPORT_SYMBOL_NS_GPL(intel_pinctrl_get_soc_data, PINCTRL_INTEL);
static bool __intel_gpio_is_direct_irq(u32 value)
{
- return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
+ return (value & PADCFG0_GPIROUTIOXAPIC) &&
+ (__intel_gpio_get_direction(value) == PAD_CONNECT_INPUT) &&
(__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO);
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 6981e2fab93f..4d4e1257afdf 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -264,7 +264,8 @@ int intel_pinctrl_probe_by_uid(struct platform_device *pdev);
extern const struct dev_pm_ops intel_pinctrl_pm_ops;
-struct intel_community *intel_get_community(struct intel_pinctrl *pctrl, unsigned int pin);
+const struct intel_community *intel_get_community(const struct intel_pinctrl *pctrl,
+ unsigned int pin);
int intel_get_groups_count(struct pinctrl_dev *pctldev);
const char *intel_get_group_name(struct pinctrl_dev *pctldev, unsigned int group);
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index 1fb0bba8b386..bcce97f3b897 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -211,7 +211,7 @@ static void __iomem *lp_gpio_reg(struct gpio_chip *chip, unsigned int offset,
int reg)
{
struct intel_pinctrl *lg = gpiochip_get_data(chip);
- struct intel_community *comm;
+ const struct intel_community *comm;
int reg_offset;
comm = intel_get_community(lg, offset);
diff --git a/drivers/pinctrl/intel/pinctrl-meteorlake.c b/drivers/pinctrl/intel/pinctrl-meteorlake.c
index cc44890c6699..885fa3b0d6d9 100644
--- a/drivers/pinctrl/intel/pinctrl-meteorlake.c
+++ b/drivers/pinctrl/intel/pinctrl-meteorlake.c
@@ -584,6 +584,7 @@ static const struct intel_pinctrl_soc_data mtls_soc_data = {
};
static const struct acpi_device_id mtl_pinctrl_acpi_match[] = {
+ { "INTC105E", (kernel_ulong_t)&mtlp_soc_data },
{ "INTC1083", (kernel_ulong_t)&mtlp_soc_data },
{ "INTC1082", (kernel_ulong_t)&mtls_soc_data },
{ }
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index e12316c42698..87e958d827bf 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -1044,11 +1044,8 @@ int mtk_paris_pinctrl_probe(struct platform_device *pdev)
hw->nbase = hw->soc->nbase_names;
- if (of_find_property(hw->dev->of_node,
- "mediatek,rsel-resistance-in-si-unit", NULL))
- hw->rsel_si_unit = true;
- else
- hw->rsel_si_unit = false;
+ hw->rsel_si_unit = of_property_read_bool(hw->dev->of_node,
+ "mediatek,rsel-resistance-in-si-unit");
spin_lock_init(&hw->lock);
diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-c3.c b/drivers/pinctrl/meson/pinctrl-amlogic-c3.c
index 04f1e87bae99..776d32465ab9 100644
--- a/drivers/pinctrl/meson/pinctrl-amlogic-c3.c
+++ b/drivers/pinctrl/meson/pinctrl-amlogic-c3.c
@@ -375,7 +375,7 @@ static const unsigned int spi_a_mosi_a_pins[] = { GPIOA_3 };
static const unsigned int gen_clk_a4_pins[] = { GPIOA_4 };
static const unsigned int clk12_24_a_pins[] = { GPIOA_5 };
-static struct meson_pmx_group c3_periphs_groups[] = {
+static const struct meson_pmx_group c3_periphs_groups[] = {
GPIO_GROUP(GPIOE_0),
GPIO_GROUP(GPIOE_1),
GPIO_GROUP(GPIOE_2),
@@ -987,7 +987,7 @@ static const char * const lcd_groups[] = {
"lcd_clk_a", "lcd_clk_x", "lcd_hs", "lcd_vs",
};
-static struct meson_pmx_func c3_periphs_functions[] = {
+static const struct meson_pmx_func c3_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(uart_a),
FUNCTION(uart_b),
@@ -1036,7 +1036,7 @@ static struct meson_pmx_func c3_periphs_functions[] = {
FUNCTION(lcd),
};
-static struct meson_bank c3_periphs_banks[] = {
+static const struct meson_bank c3_periphs_banks[] = {
/* name first last irq pullen pull dir out in ds */
BANK_DS("X", GPIOX_0, GPIOX_13, 40, 53,
0x03, 0, 0x04, 0, 0x02, 0, 0x01, 0, 0x00, 0, 0x07, 0),
@@ -1054,7 +1054,7 @@ static struct meson_bank c3_periphs_banks[] = {
0x73, 0, 0x74, 0, 0x72, 0, 0x71, 0, 0x70, 0, 0x77, 0),
};
-static struct meson_pmx_bank c3_periphs_pmx_banks[] = {
+static const struct meson_pmx_bank c3_periphs_pmx_banks[] = {
/* name first last reg offset */
BANK_PMX("B", GPIOB_0, GPIOB_14, 0x00, 0),
BANK_PMX("X", GPIOX_0, GPIOX_13, 0x03, 0),
@@ -1065,12 +1065,12 @@ static struct meson_pmx_bank c3_periphs_pmx_banks[] = {
BANK_PMX("TEST_N", GPIO_TEST_N, GPIO_TEST_N, 0x02, 0),
};
-static struct meson_axg_pmx_data c3_periphs_pmx_banks_data = {
+static const struct meson_axg_pmx_data c3_periphs_pmx_banks_data = {
.pmx_banks = c3_periphs_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(c3_periphs_pmx_banks),
};
-static struct meson_pinctrl_data c3_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data c3_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = c3_periphs_pins,
.groups = c3_periphs_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-t7.c b/drivers/pinctrl/meson/pinctrl-amlogic-t7.c
index 0aed5de3f068..cfd98b9dcb68 100644
--- a/drivers/pinctrl/meson/pinctrl-amlogic-t7.c
+++ b/drivers/pinctrl/meson/pinctrl-amlogic-t7.c
@@ -535,7 +535,7 @@ static const unsigned int i2c0_sck_h_pins[] = { GPIOH_7 };
/* Bank H func3 */
static const unsigned int pcieck_reqn_h_pins[] = { GPIOH_2 };
-static struct meson_pmx_group t7_periphs_groups[] = {
+static const struct meson_pmx_group t7_periphs_groups[] = {
GPIO_GROUP(GPIOB_0),
GPIO_GROUP(GPIOB_1),
GPIO_GROUP(GPIOB_2),
@@ -1443,7 +1443,7 @@ static const char * const mic_mute_groups[] = {
"mic_mute_key", "mic_mute_led",
};
-static struct meson_pmx_func t7_periphs_functions[] = {
+static const struct meson_pmx_func t7_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
FUNCTION(nor),
@@ -1524,7 +1524,7 @@ static struct meson_pmx_func t7_periphs_functions[] = {
FUNCTION(mic_mute),
};
-static struct meson_bank t7_periphs_banks[] = {
+static const struct meson_bank t7_periphs_banks[] = {
/* name first last irq pullen pull dir out in ds */
BANK_DS("D", GPIOD_0, GPIOD_12, 57, 69,
0x03, 0, 0x04, 0, 0x02, 0, 0x01, 0, 0x00, 0, 0x07, 0),
@@ -1552,7 +1552,7 @@ static struct meson_bank t7_periphs_banks[] = {
0x83, 0, 0x84, 0, 0x82, 0, 0x81, 0, 0x80, 0, 0x87, 0),
};
-static struct meson_pmx_bank t7_periphs_pmx_banks[] = {
+static const struct meson_pmx_bank t7_periphs_pmx_banks[] = {
/* name first last reg offset */
BANK_PMX("D", GPIOD_0, GPIOD_12, 0x0a, 0),
BANK_PMX("E", GPIOE_0, GPIOE_6, 0x0c, 0),
@@ -1568,12 +1568,12 @@ static struct meson_pmx_bank t7_periphs_pmx_banks[] = {
BANK_PMX("TEST_N", GPIO_TEST_N, GPIO_TEST_N, 0x09, 0),
};
-static struct meson_axg_pmx_data t7_periphs_pmx_banks_data = {
+static const struct meson_axg_pmx_data t7_periphs_pmx_banks_data = {
.pmx_banks = t7_periphs_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(t7_periphs_pmx_banks),
};
-static struct meson_pinctrl_data t7_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data t7_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = t7_periphs_pins,
.groups = t7_periphs_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
index d2ac9ca72a3e..20c4323d4223 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-a1.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
@@ -339,7 +339,7 @@ static const unsigned int tst_out11_pins[] = { GPIOA_11 };
static const unsigned int mute_key_pins[] = { GPIOA_4 };
static const unsigned int mute_en_pins[] = { GPIOA_5 };
-static struct meson_pmx_group meson_a1_periphs_groups[] = {
+static const struct meson_pmx_group meson_a1_periphs_groups[] = {
GPIO_GROUP(GPIOP_0),
GPIO_GROUP(GPIOP_1),
GPIO_GROUP(GPIOP_2),
@@ -832,7 +832,7 @@ static const char * const mute_groups[] = {
"mute_key", "mute_en",
};
-static struct meson_pmx_func meson_a1_periphs_functions[] = {
+static const struct meson_pmx_func meson_a1_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(psram),
FUNCTION(pwm_a),
@@ -875,7 +875,7 @@ static struct meson_pmx_func meson_a1_periphs_functions[] = {
FUNCTION(mute),
};
-static struct meson_bank meson_a1_periphs_banks[] = {
+static const struct meson_bank meson_a1_periphs_banks[] = {
/* name first last irq pullen pull dir out in ds*/
BANK_DS("P", GPIOP_0, GPIOP_12, 0, 12, 0x3, 0, 0x4, 0,
0x2, 0, 0x1, 0, 0x0, 0, 0x5, 0),
@@ -889,7 +889,7 @@ static struct meson_bank meson_a1_periphs_banks[] = {
0x42, 0, 0x41, 0, 0x40, 0, 0x45, 0),
};
-static struct meson_pmx_bank meson_a1_periphs_pmx_banks[] = {
+static const struct meson_pmx_bank meson_a1_periphs_pmx_banks[] = {
/* name first lask reg offset */
BANK_PMX("P", GPIOP_0, GPIOP_12, 0x0, 0),
BANK_PMX("B", GPIOB_0, GPIOB_6, 0x2, 0),
@@ -898,12 +898,12 @@ static struct meson_pmx_bank meson_a1_periphs_pmx_banks[] = {
BANK_PMX("A", GPIOA_0, GPIOA_11, 0x8, 0),
};
-static struct meson_axg_pmx_data meson_a1_periphs_pmx_banks_data = {
+static const struct meson_axg_pmx_data meson_a1_periphs_pmx_banks_data = {
.pmx_banks = meson_a1_periphs_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(meson_a1_periphs_pmx_banks),
};
-static struct meson_pinctrl_data meson_a1_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data meson_a1_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_a1_periphs_pins,
.groups = meson_a1_periphs_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c
index cad411d90727..00c3829216d6 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c
@@ -27,10 +27,10 @@
static int meson_axg_pmx_get_bank(struct meson_pinctrl *pc,
unsigned int pin,
- struct meson_pmx_bank **bank)
+ const struct meson_pmx_bank **bank)
{
int i;
- struct meson_axg_pmx_data *pmx = pc->data->pmx_data;
+ const struct meson_axg_pmx_data *pmx = pc->data->pmx_data;
for (i = 0; i < pmx->num_pmx_banks; i++)
if (pin >= pmx->pmx_banks[i].first &&
@@ -42,7 +42,7 @@ static int meson_axg_pmx_get_bank(struct meson_pinctrl *pc,
return -EINVAL;
}
-static int meson_pmx_calc_reg_and_offset(struct meson_pmx_bank *bank,
+static int meson_pmx_calc_reg_and_offset(const struct meson_pmx_bank *bank,
unsigned int pin, unsigned int *reg,
unsigned int *offset)
{
@@ -59,10 +59,10 @@ static int meson_pmx_calc_reg_and_offset(struct meson_pmx_bank *bank,
static int meson_axg_pmx_update_function(struct meson_pinctrl *pc,
unsigned int pin, unsigned int func)
{
+ const struct meson_pmx_bank *bank;
int ret;
int reg;
int offset;
- struct meson_pmx_bank *bank;
ret = meson_axg_pmx_get_bank(pc, pin, &bank);
if (ret)
@@ -82,8 +82,8 @@ static int meson_axg_pmx_set_mux(struct pinctrl_dev *pcdev,
int i;
int ret;
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
- struct meson_pmx_func *func = &pc->data->funcs[func_num];
- struct meson_pmx_group *group = &pc->data->groups[group_num];
+ const struct meson_pmx_func *func = &pc->data->funcs[func_num];
+ const struct meson_pmx_group *group = &pc->data->groups[group_num];
struct meson_pmx_axg_data *pmx_data =
(struct meson_pmx_axg_data *)group->data;
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
index 67147ebaef1b..63b9d471e980 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
@@ -17,7 +17,7 @@ struct meson_pmx_bank {
};
struct meson_axg_pmx_data {
- struct meson_pmx_bank *pmx_banks;
+ const struct meson_pmx_bank *pmx_banks;
unsigned int num_pmx_banks;
};
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index 8f4e7154b73f..fa2df4896390 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -352,7 +352,7 @@ static const unsigned int tdmb_dout2_pins[] = {GPIOA_12};
static const unsigned int tdmb_din3_pins[] = {GPIOA_13};
static const unsigned int tdmb_dout3_pins[] = {GPIOA_13};
-static struct meson_pmx_group meson_axg_periphs_groups[] = {
+static const struct meson_pmx_group meson_axg_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0),
GPIO_GROUP(GPIOZ_1),
GPIO_GROUP(GPIOZ_2),
@@ -675,7 +675,7 @@ static const unsigned int jtag_ao_tms_pins[] = {GPIOAO_7};
/* gen_clk */
static const unsigned int gen_clk_ee_pins[] = {GPIOAO_13};
-static struct meson_pmx_group meson_axg_aobus_groups[] = {
+static const struct meson_pmx_group meson_axg_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
GPIO_GROUP(GPIOAO_2),
@@ -955,7 +955,7 @@ static const char * const gen_clk_ee_groups[] = {
"gen_clk_ee",
};
-static struct meson_pmx_func meson_axg_periphs_functions[] = {
+static const struct meson_pmx_func meson_axg_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
FUNCTION(nor),
@@ -987,7 +987,7 @@ static struct meson_pmx_func meson_axg_periphs_functions[] = {
FUNCTION(tdmc),
};
-static struct meson_pmx_func meson_axg_aobus_functions[] = {
+static const struct meson_pmx_func meson_axg_aobus_functions[] = {
FUNCTION(gpio_aobus),
FUNCTION(uart_ao_a),
FUNCTION(uart_ao_b),
@@ -1003,7 +1003,7 @@ static struct meson_pmx_func meson_axg_aobus_functions[] = {
FUNCTION(gen_clk_ee),
};
-static struct meson_bank meson_axg_periphs_banks[] = {
+static const struct meson_bank meson_axg_periphs_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("Z", GPIOZ_0, GPIOZ_10, 14, 24, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
BANK("BOOT", BOOT_0, BOOT_14, 25, 39, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
@@ -1012,12 +1012,12 @@ static struct meson_bank meson_axg_periphs_banks[] = {
BANK("Y", GPIOY_0, GPIOY_15, 84, 99, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
};
-static struct meson_bank meson_axg_aobus_banks[] = {
+static const struct meson_bank meson_axg_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
-static struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = {
+static const struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = {
/* name first lask reg offset */
BANK_PMX("Z", GPIOZ_0, GPIOZ_10, 0x2, 0),
BANK_PMX("BOOT", BOOT_0, BOOT_14, 0x0, 0),
@@ -1026,21 +1026,21 @@ static struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = {
BANK_PMX("Y", GPIOY_0, GPIOY_15, 0x8, 0),
};
-static struct meson_axg_pmx_data meson_axg_periphs_pmx_banks_data = {
+static const struct meson_axg_pmx_data meson_axg_periphs_pmx_banks_data = {
.pmx_banks = meson_axg_periphs_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(meson_axg_periphs_pmx_banks),
};
-static struct meson_pmx_bank meson_axg_aobus_pmx_banks[] = {
+static const struct meson_pmx_bank meson_axg_aobus_pmx_banks[] = {
BANK_PMX("AO", GPIOAO_0, GPIOAO_13, 0x0, 0),
};
-static struct meson_axg_pmx_data meson_axg_aobus_pmx_banks_data = {
+static const struct meson_axg_pmx_data meson_axg_aobus_pmx_banks_data = {
.pmx_banks = meson_axg_aobus_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(meson_axg_aobus_pmx_banks),
};
-static struct meson_pinctrl_data meson_axg_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data meson_axg_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_axg_periphs_pins,
.groups = meson_axg_periphs_groups,
@@ -1054,7 +1054,7 @@ static struct meson_pinctrl_data meson_axg_periphs_pinctrl_data = {
.pmx_data = &meson_axg_periphs_pmx_banks_data,
};
-static struct meson_pinctrl_data meson_axg_aobus_pinctrl_data = {
+static const struct meson_pinctrl_data meson_axg_aobus_pinctrl_data = {
.name = "aobus-banks",
.pins = meson_axg_aobus_pins,
.groups = meson_axg_aobus_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-g12a.c b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
index 32830269a5b4..e2788bfc5874 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-g12a.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
@@ -436,7 +436,7 @@ static const unsigned int tdm_c_dout1_z_pins[] = { GPIOZ_3 };
static const unsigned int tdm_c_dout2_z_pins[] = { GPIOZ_4 };
static const unsigned int tdm_c_dout3_z_pins[] = { GPIOZ_5 };
-static struct meson_pmx_group meson_g12a_periphs_groups[] = {
+static const struct meson_pmx_group meson_g12a_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0),
GPIO_GROUP(GPIOZ_1),
GPIO_GROUP(GPIOZ_2),
@@ -860,7 +860,7 @@ static const unsigned int tdm_ao_b_dout2_pins[] = { GPIOAO_6 };
/* mclk0_ao */
static const unsigned int mclk0_ao_pins[] = { GPIOAO_9 };
-static struct meson_pmx_group meson_g12a_aobus_groups[] = {
+static const struct meson_pmx_group meson_g12a_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
GPIO_GROUP(GPIOAO_2),
@@ -1253,7 +1253,7 @@ static const char * const mclk0_ao_groups[] = {
"mclk0_ao",
};
-static struct meson_pmx_func meson_g12a_periphs_functions[] = {
+static const struct meson_pmx_func meson_g12a_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
FUNCTION(nor),
@@ -1295,7 +1295,7 @@ static struct meson_pmx_func meson_g12a_periphs_functions[] = {
FUNCTION(tdm_c),
};
-static struct meson_pmx_func meson_g12a_aobus_functions[] = {
+static const struct meson_pmx_func meson_g12a_aobus_functions[] = {
FUNCTION(gpio_aobus),
FUNCTION(uart_ao_a),
FUNCTION(uart_ao_b),
@@ -1317,7 +1317,7 @@ static struct meson_pmx_func meson_g12a_aobus_functions[] = {
FUNCTION(mclk0_ao),
};
-static struct meson_bank meson_g12a_periphs_banks[] = {
+static const struct meson_bank meson_g12a_periphs_banks[] = {
/* name first last irq pullen pull dir out in ds */
BANK_DS("Z", GPIOZ_0, GPIOZ_15, IRQID_GPIOZ_0, IRQID_GPIOZ_15,
4, 0, 4, 0, 12, 0, 13, 0, 14, 0, 5, 0),
@@ -1333,7 +1333,7 @@ static struct meson_bank meson_g12a_periphs_banks[] = {
2, 0, 2, 0, 6, 0, 7, 0, 8, 0, 2, 0),
};
-static struct meson_bank meson_g12a_aobus_banks[] = {
+static const struct meson_bank meson_g12a_aobus_banks[] = {
/* name first last irq pullen pull dir out in ds */
BANK_DS("AO", GPIOAO_0, GPIOAO_11, IRQID_GPIOAO_0, IRQID_GPIOAO_11,
3, 0, 2, 0, 0, 0, 4, 0, 1, 0, 0, 0),
@@ -1342,7 +1342,7 @@ static struct meson_bank meson_g12a_aobus_banks[] = {
3, 16, 2, 16, 0, 16, 4, 16, 1, 16, 1, 0),
};
-static struct meson_pmx_bank meson_g12a_periphs_pmx_banks[] = {
+static const struct meson_pmx_bank meson_g12a_periphs_pmx_banks[] = {
/* name first last reg offset */
BANK_PMX("Z", GPIOZ_0, GPIOZ_15, 0x6, 0),
BANK_PMX("H", GPIOH_0, GPIOH_8, 0xb, 0),
@@ -1352,17 +1352,17 @@ static struct meson_pmx_bank meson_g12a_periphs_pmx_banks[] = {
BANK_PMX("X", GPIOX_0, GPIOX_19, 0x3, 0),
};
-static struct meson_axg_pmx_data meson_g12a_periphs_pmx_banks_data = {
+static const struct meson_axg_pmx_data meson_g12a_periphs_pmx_banks_data = {
.pmx_banks = meson_g12a_periphs_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(meson_g12a_periphs_pmx_banks),
};
-static struct meson_pmx_bank meson_g12a_aobus_pmx_banks[] = {
+static const struct meson_pmx_bank meson_g12a_aobus_pmx_banks[] = {
BANK_PMX("AO", GPIOAO_0, GPIOAO_11, 0x0, 0),
BANK_PMX("E", GPIOE_0, GPIOE_2, 0x1, 16),
};
-static struct meson_axg_pmx_data meson_g12a_aobus_pmx_banks_data = {
+static const struct meson_axg_pmx_data meson_g12a_aobus_pmx_banks_data = {
.pmx_banks = meson_g12a_aobus_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(meson_g12a_aobus_pmx_banks),
};
@@ -1375,7 +1375,7 @@ static int meson_g12a_aobus_parse_dt_extra(struct meson_pinctrl *pc)
return 0;
}
-static struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_g12a_periphs_pins,
.groups = meson_g12a_periphs_groups,
@@ -1389,7 +1389,7 @@ static struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = {
.pmx_data = &meson_g12a_periphs_pmx_banks_data,
};
-static struct meson_pinctrl_data meson_g12a_aobus_pinctrl_data = {
+static const struct meson_pinctrl_data meson_g12a_aobus_pinctrl_data = {
.name = "aobus-banks",
.pins = meson_g12a_aobus_pins,
.groups = meson_g12a_aobus_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 2867f397fec6..4e8b9d7c2e4b 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -307,7 +307,7 @@ static const unsigned int spdif_out_ao_13_pins[] = { GPIOAO_13 };
static const unsigned int ao_cec_pins[] = { GPIOAO_12 };
static const unsigned int ee_cec_pins[] = { GPIOAO_12 };
-static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
+static const struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0),
GPIO_GROUP(GPIOZ_1),
GPIO_GROUP(GPIOZ_2),
@@ -541,7 +541,7 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GROUP(sdcard_clk, 2, 11),
};
-static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
+static const struct meson_pmx_group meson_gxbb_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
GPIO_GROUP(GPIOAO_2),
@@ -798,7 +798,7 @@ static const char * const cec_ao_groups[] = {
"ao_cec", "ee_cec",
};
-static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
+static const struct meson_pmx_func meson_gxbb_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
FUNCTION(nor),
@@ -829,7 +829,7 @@ static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
FUNCTION(tsin_b),
};
-static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
+static const struct meson_pmx_func meson_gxbb_aobus_functions[] = {
FUNCTION(gpio_aobus),
FUNCTION(uart_ao),
FUNCTION(uart_ao_b),
@@ -845,7 +845,7 @@ static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
FUNCTION(cec_ao),
};
-static struct meson_bank meson_gxbb_periphs_banks[] = {
+static const struct meson_bank meson_gxbb_periphs_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("X", GPIOX_0, GPIOX_22, 106, 128, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
BANK("Y", GPIOY_0, GPIOY_16, 89, 105, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
@@ -857,12 +857,12 @@ static struct meson_bank meson_gxbb_periphs_banks[] = {
BANK("CLK", GPIOCLK_0, GPIOCLK_3, 129, 132, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
};
-static struct meson_bank meson_gxbb_aobus_banks[] = {
+static const struct meson_bank meson_gxbb_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
-static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_gxbb_periphs_pins,
.groups = meson_gxbb_periphs_groups,
@@ -875,7 +875,7 @@ static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
.pmx_ops = &meson8_pmx_ops,
};
-static struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
+static const struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
.name = "aobus-banks",
.pins = meson_gxbb_aobus_pins,
.groups = meson_gxbb_aobus_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index a2f25fa02852..9171de657f97 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -301,7 +301,7 @@ static const unsigned int spdif_out_ao_9_pins[] = { GPIOAO_9 };
static const unsigned int ao_cec_pins[] = { GPIOAO_8 };
static const unsigned int ee_cec_pins[] = { GPIOAO_8 };
-static struct meson_pmx_group meson_gxl_periphs_groups[] = {
+static const struct meson_pmx_group meson_gxl_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0),
GPIO_GROUP(GPIOZ_1),
GPIO_GROUP(GPIOZ_2),
@@ -527,7 +527,7 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GROUP(pwm_f_clk, 8, 30),
};
-static struct meson_pmx_group meson_gxl_aobus_groups[] = {
+static const struct meson_pmx_group meson_gxl_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
GPIO_GROUP(GPIOAO_2),
@@ -763,7 +763,7 @@ static const char * const cec_ao_groups[] = {
"ao_cec", "ee_cec",
};
-static struct meson_pmx_func meson_gxl_periphs_functions[] = {
+static const struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
FUNCTION(nor),
@@ -793,7 +793,7 @@ static struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(tsin_b),
};
-static struct meson_pmx_func meson_gxl_aobus_functions[] = {
+static const struct meson_pmx_func meson_gxl_aobus_functions[] = {
FUNCTION(gpio_aobus),
FUNCTION(uart_ao),
FUNCTION(uart_ao_b),
@@ -807,7 +807,7 @@ static struct meson_pmx_func meson_gxl_aobus_functions[] = {
FUNCTION(cec_ao),
};
-static struct meson_bank meson_gxl_periphs_banks[] = {
+static const struct meson_bank meson_gxl_periphs_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("X", GPIOX_0, GPIOX_18, 89, 107, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
BANK("DV", GPIODV_0, GPIODV_29, 83, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
@@ -818,12 +818,12 @@ static struct meson_bank meson_gxl_periphs_banks[] = {
BANK("CLK", GPIOCLK_0, GPIOCLK_1, 108, 109, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
};
-static struct meson_bank meson_gxl_aobus_banks[] = {
+static const struct meson_bank meson_gxl_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
-static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_gxl_periphs_pins,
.groups = meson_gxl_periphs_groups,
@@ -836,7 +836,7 @@ static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
.pmx_ops = &meson8_pmx_ops,
};
-static struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
+static const struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
.name = "aobus-banks",
.pins = meson_gxl_aobus_pins,
.groups = meson_gxl_aobus_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-s4.c b/drivers/pinctrl/meson/pinctrl-meson-s4.c
index 60c7d5003e8a..872948699e9f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-s4.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-s4.c
@@ -411,7 +411,7 @@ static const unsigned int s2_demod_gpio0_pins[] = { GPIOZ_12 };
static const unsigned int gen_clk_z9_pins[] = { GPIOZ_9 };
static const unsigned int gen_clk_z12_pins[] = { GPIOZ_12 };
-static struct meson_pmx_group meson_s4_periphs_groups[] = {
+static const struct meson_pmx_group meson_s4_periphs_groups[] = {
GPIO_GROUP(GPIOE_0),
GPIO_GROUP(GPIOE_1),
@@ -1100,7 +1100,7 @@ static const char * const s2_demod_groups[] = {
"s2_demod_gpio3", "s2_demod_gpio2", "s2_demod_gpio1", "s2_demod_gpio0",
};
-static struct meson_pmx_func meson_s4_periphs_functions[] = {
+static const struct meson_pmx_func meson_s4_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(i2c0),
FUNCTION(i2c1),
@@ -1160,7 +1160,7 @@ static struct meson_pmx_func meson_s4_periphs_functions[] = {
FUNCTION(s2_demod),
};
-static struct meson_bank meson_s4_periphs_banks[] = {
+static const struct meson_bank meson_s4_periphs_banks[] = {
/* name first last irq pullen pull dir out in */
BANK_DS("B", GPIOB_0, GPIOB_13, 0, 13,
0x63, 0, 0x64, 0, 0x62, 0, 0x61, 0, 0x60, 0, 0x67, 0),
@@ -1180,7 +1180,7 @@ static struct meson_bank meson_s4_periphs_banks[] = {
0x83, 0, 0x84, 0, 0x82, 0, 0x81, 0, 0x80, 0, 0x87, 0),
};
-static struct meson_pmx_bank meson_s4_periphs_pmx_banks[] = {
+static const struct meson_pmx_bank meson_s4_periphs_pmx_banks[] = {
/*name first lask reg offset*/
BANK_PMX("B", GPIOB_0, GPIOB_13, 0x00, 0),
BANK_PMX("C", GPIOC_0, GPIOC_7, 0x9, 0),
@@ -1192,12 +1192,12 @@ static struct meson_pmx_bank meson_s4_periphs_pmx_banks[] = {
BANK_PMX("TEST_N", GPIO_TEST_N, GPIO_TEST_N, 0xf, 0)
};
-static struct meson_axg_pmx_data meson_s4_periphs_pmx_banks_data = {
+static const struct meson_axg_pmx_data meson_s4_periphs_pmx_banks_data = {
.pmx_banks = meson_s4_periphs_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(meson_s4_periphs_pmx_banks),
};
-static struct meson_pinctrl_data meson_s4_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data meson_s4_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_s4_periphs_pins,
.groups = meson_s4_periphs_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index ef002b9dd464..253a0cc57e39 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -70,7 +70,7 @@ static const unsigned int meson_bit_strides[] = {
* Return: 0 on success, a negative value on error
*/
static int meson_get_bank(struct meson_pinctrl *pc, unsigned int pin,
- struct meson_bank **bank)
+ const struct meson_bank **bank)
{
int i;
@@ -94,11 +94,12 @@ static int meson_get_bank(struct meson_pinctrl *pc, unsigned int pin,
* @reg: the computed register offset
* @bit: the computed bit
*/
-static void meson_calc_reg_and_bit(struct meson_bank *bank, unsigned int pin,
+static void meson_calc_reg_and_bit(const struct meson_bank *bank,
+ unsigned int pin,
enum meson_reg_type reg_type,
unsigned int *reg, unsigned int *bit)
{
- struct meson_reg_desc *desc = &bank->regs[reg_type];
+ const struct meson_reg_desc *desc = &bank->regs[reg_type];
*bit = (desc->bit + pin - bank->first) * meson_bit_strides[reg_type];
*reg = (desc->reg + (*bit / 32)) * 4;
@@ -181,7 +182,7 @@ static int meson_pinconf_set_gpio_bit(struct meson_pinctrl *pc,
unsigned int reg_type,
bool arg)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit;
int ret;
@@ -198,7 +199,7 @@ static int meson_pinconf_get_gpio_bit(struct meson_pinctrl *pc,
unsigned int pin,
unsigned int reg_type)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit, val;
int ret;
@@ -261,7 +262,7 @@ static int meson_pinconf_set_output_drive(struct meson_pinctrl *pc,
static int meson_pinconf_disable_bias(struct meson_pinctrl *pc,
unsigned int pin)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit = 0;
int ret;
@@ -280,7 +281,7 @@ static int meson_pinconf_disable_bias(struct meson_pinctrl *pc,
static int meson_pinconf_enable_bias(struct meson_pinctrl *pc, unsigned int pin,
bool pull_up)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit, val = 0;
int ret;
@@ -308,7 +309,7 @@ static int meson_pinconf_set_drive_strength(struct meson_pinctrl *pc,
unsigned int pin,
u16 drive_strength_ua)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit, ds_val;
int ret;
@@ -399,7 +400,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
static int meson_pinconf_get_pull(struct meson_pinctrl *pc, unsigned int pin)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit, val;
int ret, conf;
@@ -435,7 +436,7 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc,
unsigned int pin,
u16 *drive_strength_ua)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit;
unsigned int val;
int ret;
@@ -528,7 +529,7 @@ static int meson_pinconf_group_set(struct pinctrl_dev *pcdev,
unsigned long *configs, unsigned num_configs)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
- struct meson_pmx_group *group = &pc->data->groups[num_group];
+ const struct meson_pmx_group *group = &pc->data->groups[num_group];
int i;
dev_dbg(pc->dev, "set pinconf for group %s\n", group->name);
@@ -587,8 +588,8 @@ static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
static int meson_gpio_get(struct gpio_chip *chip, unsigned gpio)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
+ const struct meson_bank *bank;
unsigned int reg, bit, val;
- struct meson_bank *bank;
int ret;
ret = meson_get_bank(pc, gpio, &bank);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index 34fc4e8612e4..7883ea31a001 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -110,15 +110,15 @@ struct meson_bank {
struct meson_pinctrl_data {
const char *name;
const struct pinctrl_pin_desc *pins;
- struct meson_pmx_group *groups;
- struct meson_pmx_func *funcs;
+ const struct meson_pmx_group *groups;
+ const struct meson_pmx_func *funcs;
unsigned int num_pins;
unsigned int num_groups;
unsigned int num_funcs;
- struct meson_bank *banks;
+ const struct meson_bank *banks;
unsigned int num_banks;
const struct pinmux_ops *pmx_ops;
- void *pmx_data;
+ const void *pmx_data;
int (*parse_dt)(struct meson_pinctrl *pc);
};
diff --git a/drivers/pinctrl/meson/pinctrl-meson8-pmx.c b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
index 7f22aa0f8e36..10adf52edda6 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
@@ -32,7 +32,7 @@
static void meson8_pmx_disable_other_groups(struct meson_pinctrl *pc,
unsigned int pin, int sel_group)
{
- struct meson_pmx_group *group;
+ const struct meson_pmx_group *group;
struct meson8_pmx_data *pmx_data;
int i, j;
@@ -57,8 +57,8 @@ static int meson8_pmx_set_mux(struct pinctrl_dev *pcdev, unsigned func_num,
unsigned group_num)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
- struct meson_pmx_func *func = &pc->data->funcs[func_num];
- struct meson_pmx_group *group = &pc->data->groups[group_num];
+ const struct meson_pmx_func *func = &pc->data->funcs[func_num];
+ const struct meson_pmx_group *group = &pc->data->groups[group_num];
struct meson8_pmx_data *pmx_data =
(struct meson8_pmx_data *)group->data;
int i, ret = 0;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index dd17100efdcf..3da7f3799c3f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -405,7 +405,7 @@ static const unsigned int i2s_out_ch01_ao_pins[] = { GPIOAO_11 };
static const unsigned int hdmi_cec_ao_pins[] = { GPIOAO_12 };
-static struct meson_pmx_group meson8_cbus_groups[] = {
+static const struct meson_pmx_group meson8_cbus_groups[] = {
GPIO_GROUP(GPIOX_0),
GPIO_GROUP(GPIOX_1),
GPIO_GROUP(GPIOX_2),
@@ -745,7 +745,7 @@ static struct meson_pmx_group meson8_cbus_groups[] = {
GROUP(sdxc_cmd_b, 2, 4),
};
-static struct meson_pmx_group meson8_aobus_groups[] = {
+static const struct meson_pmx_group meson8_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
GPIO_GROUP(GPIOAO_2),
@@ -1015,7 +1015,7 @@ static const char * const hdmi_cec_ao_groups[] = {
"hdmi_cec_ao"
};
-static struct meson_pmx_func meson8_cbus_functions[] = {
+static const struct meson_pmx_func meson8_cbus_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(sd_a),
FUNCTION(sdxc_a),
@@ -1051,7 +1051,7 @@ static struct meson_pmx_func meson8_cbus_functions[] = {
FUNCTION(spdif),
};
-static struct meson_pmx_func meson8_aobus_functions[] = {
+static const struct meson_pmx_func meson8_aobus_functions[] = {
FUNCTION(gpio_aobus),
FUNCTION(uart_ao),
FUNCTION(remote),
@@ -1063,7 +1063,7 @@ static struct meson_pmx_func meson8_aobus_functions[] = {
FUNCTION(hdmi_cec_ao),
};
-static struct meson_bank meson8_cbus_banks[] = {
+static const struct meson_bank meson8_cbus_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("X", GPIOX_0, GPIOX_21, 112, 133, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
BANK("Y", GPIOY_0, GPIOY_16, 95, 111, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
@@ -1074,12 +1074,12 @@ static struct meson_bank meson8_cbus_banks[] = {
BANK("BOOT", BOOT_0, BOOT_18, 39, 57, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
};
-static struct meson_bank meson8_aobus_banks[] = {
+static const struct meson_bank meson8_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
-static struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
+static const struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
.name = "cbus-banks",
.pins = meson8_cbus_pins,
.groups = meson8_cbus_groups,
@@ -1092,7 +1092,7 @@ static struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
.pmx_ops = &meson8_pmx_ops,
};
-static struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
+static const struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
.name = "ao-bank",
.pins = meson8_aobus_pins,
.groups = meson8_aobus_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 6cd4b3ec1b40..a71e1f41358a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -349,7 +349,7 @@ static const unsigned int eth_ref_clk_pins[] = { DIF_3_N };
static const unsigned int eth_mdc_pins[] = { DIF_4_P };
static const unsigned int eth_mdio_en_pins[] = { DIF_4_N };
-static struct meson_pmx_group meson8b_cbus_groups[] = {
+static const struct meson_pmx_group meson8b_cbus_groups[] = {
GPIO_GROUP(GPIOX_0),
GPIO_GROUP(GPIOX_1),
GPIO_GROUP(GPIOX_2),
@@ -603,7 +603,7 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
GROUP(eth_rxd2, 7, 23),
};
-static struct meson_pmx_group meson8b_aobus_groups[] = {
+static const struct meson_pmx_group meson8b_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
GPIO_GROUP(GPIOAO_2),
@@ -869,7 +869,7 @@ static const char * const tsin_b_groups[] = {
"tsin_d0_b", "tsin_clk_b", "tsin_sop_b", "tsin_d_valid_b"
};
-static struct meson_pmx_func meson8b_cbus_functions[] = {
+static const struct meson_pmx_func meson8b_cbus_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(sd_a),
FUNCTION(sdxc_a),
@@ -903,7 +903,7 @@ static struct meson_pmx_func meson8b_cbus_functions[] = {
FUNCTION(clk_24m),
};
-static struct meson_pmx_func meson8b_aobus_functions[] = {
+static const struct meson_pmx_func meson8b_aobus_functions[] = {
FUNCTION(gpio_aobus),
FUNCTION(uart_ao),
FUNCTION(uart_ao_b),
@@ -917,7 +917,7 @@ static struct meson_pmx_func meson8b_aobus_functions[] = {
FUNCTION(hdmi_cec),
};
-static struct meson_bank meson8b_cbus_banks[] = {
+static const struct meson_bank meson8b_cbus_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("X0..11", GPIOX_0, GPIOX_11, 97, 108, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
BANK("X16..21", GPIOX_16, GPIOX_21, 113, 118, 4, 16, 4, 16, 0, 16, 1, 16, 2, 16),
@@ -938,12 +938,12 @@ static struct meson_bank meson8b_cbus_banks[] = {
BANK("DIF", DIF_0_P, DIF_4_N, -1, -1, 5, 8, 5, 8, 12, 12, 13, 12, 14, 12),
};
-static struct meson_bank meson8b_aobus_banks[] = {
+static const struct meson_bank meson8b_aobus_banks[] = {
/* name first lastc irq pullen pull dir out in */
BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
-static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
+static const struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
.name = "cbus-banks",
.pins = meson8b_cbus_pins,
.groups = meson8b_cbus_groups,
@@ -956,7 +956,7 @@ static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
.pmx_ops = &meson8_pmx_ops,
};
-static struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
+static const struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
.name = "aobus-banks",
.pins = meson8b_aobus_pins,
.groups = meson8b_aobus_groups,
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index 1947da73e512..dce601d99372 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -767,7 +767,7 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
struct resource fb_res;
struct mvebu_mpp_ctrl_data *mpp_data;
void __iomem *base;
- int i;
+ int i, ret;
pdev->dev.platform_data = (void *)device_get_match_data(&pdev->dev);
@@ -783,13 +783,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
clk_prepare_enable(clk);
base = devm_platform_get_and_ioremap_resource(pdev, 0, &mpp_res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err_probe;
+ }
mpp_data = devm_kcalloc(&pdev->dev, dove_pinctrl_info.ncontrols,
sizeof(*mpp_data), GFP_KERNEL);
- if (!mpp_data)
- return -ENOMEM;
+ if (!mpp_data) {
+ ret = -ENOMEM;
+ goto err_probe;
+ }
dove_pinctrl_info.control_data = mpp_data;
for (i = 0; i < ARRAY_SIZE(dove_mpp_controls); i++)
@@ -808,8 +812,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
}
mpp4_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp4_base))
- return PTR_ERR(mpp4_base);
+ if (IS_ERR(mpp4_base)) {
+ ret = PTR_ERR(mpp4_base);
+ goto err_probe;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (!res) {
@@ -820,8 +826,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
}
pmu_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pmu_base))
- return PTR_ERR(pmu_base);
+ if (IS_ERR(pmu_base)) {
+ ret = PTR_ERR(pmu_base);
+ goto err_probe;
+ }
gconfmap = syscon_regmap_lookup_by_compatible("marvell,dove-global-config");
if (IS_ERR(gconfmap)) {
@@ -831,12 +839,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
adjust_resource(&fb_res,
(mpp_res->start & INT_REGS_MASK) + GC_REGS_OFFS, 0x14);
gc_base = devm_ioremap_resource(&pdev->dev, &fb_res);
- if (IS_ERR(gc_base))
- return PTR_ERR(gc_base);
+ if (IS_ERR(gc_base)) {
+ ret = PTR_ERR(gc_base);
+ goto err_probe;
+ }
+
gconfmap = devm_regmap_init_mmio(&pdev->dev,
gc_base, &gc_regmap_config);
- if (IS_ERR(gconfmap))
- return PTR_ERR(gconfmap);
+ if (IS_ERR(gconfmap)) {
+ ret = PTR_ERR(gconfmap);
+ goto err_probe;
+ }
}
/* Warn on any missing DT resource */
@@ -844,6 +857,9 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, FW_BUG "Missing pinctrl regs in DTB. Please update your firmware.\n");
return mvebu_pinctrl_probe(pdev);
+err_probe:
+ clk_disable_unprepare(clk);
+ return ret;
}
static struct platform_driver dove_pinctrl_driver = {
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 47f62c89955a..68750b6f8e57 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -716,8 +716,7 @@ static int abx500_dt_add_map_configs(struct pinctrl_map **map,
if (*num_maps == *reserved_maps)
return -ENOSPC;
- dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
- GFP_KERNEL);
+ dup_configs = kmemdup_array(configs, num_configs, sizeof(*dup_configs), GFP_KERNEL);
if (!dup_configs)
return -ENOMEM;
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index fa78d5ecc685..f4f10c60c1d2 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -601,8 +601,7 @@ static int nmk_dt_add_map_configs(struct pinctrl_map **map,
if (*num_maps == *reserved_maps)
return -ENOSPC;
- dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
- GFP_KERNEL);
+ dup_configs = kmemdup_array(configs, num_configs, sizeof(*dup_configs), GFP_KERNEL);
if (!dup_configs)
return -ENOMEM;
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
index a377d36b0eb0..471f644c5eef 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
@@ -241,6 +241,7 @@ static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type)
npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_EDGE_BOTH:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio);
npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_EVBE, gpio);
break;
case IRQ_TYPE_LEVEL_LOW:
@@ -315,8 +316,8 @@ static struct irq_chip npcmgpio_irqchip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
-static const int gpi36_pins[] = { 58 };
-static const int gpi35_pins[] = { 58 };
+static const int gpi36_pins[] = { 36 };
+static const int gpi35_pins[] = { 35 };
static const int tp_jtag3_pins[] = { 44, 62, 45, 46 };
static const int tp_uart_pins[] = { 50, 51 };
@@ -437,7 +438,6 @@ static const int smb4_pins[] = { 28, 29 };
static const int smb4b_pins[] = { 18, 19 };
static const int smb4c_pins[] = { 20, 21 };
static const int smb4d_pins[] = { 22, 23 };
-static const int smb4den_pins[] = { 17 };
static const int smb5_pins[] = { 26, 27 };
static const int smb5b_pins[] = { 13, 12 };
static const int smb5c_pins[] = { 15, 14 };
@@ -515,7 +515,7 @@ static const int rg2_pins[] = { 110, 111, 112, 113, 208, 209, 210, 211, 212,
static const int rg2mdio_pins[] = { 216, 217 };
static const int ddr_pins[] = { 110, 111, 112, 113, 208, 209, 210, 211, 212,
- 213, 214, 215, 216, 217 };
+ 213, 214, 215, 216, 217, 250 };
static const int iox1_pins[] = { 0, 1, 2, 3 };
static const int iox2_pins[] = { 4, 5, 6, 7 };
@@ -570,7 +570,6 @@ static const int spi3cs3_pins[] = { 189 };
static const int ddc_pins[] = { 204, 205, 206, 207 };
static const int lpc_pins[] = { 95, 161, 163, 164, 165, 166, 167 };
-static const int lpcclk_pins[] = { 168 };
static const int espi_pins[] = { 95, 161, 163, 164, 165, 166, 167, 168 };
static const int lkgpo0_pins[] = { 16 };
@@ -699,7 +698,6 @@ struct npcm8xx_pingroup {
NPCM8XX_GRP(smb4b), \
NPCM8XX_GRP(smb4c), \
NPCM8XX_GRP(smb4d), \
- NPCM8XX_GRP(smb4den), \
NPCM8XX_GRP(smb5), \
NPCM8XX_GRP(smb5b), \
NPCM8XX_GRP(smb5c), \
@@ -808,7 +806,6 @@ struct npcm8xx_pingroup {
NPCM8XX_GRP(spi3cs3), \
NPCM8XX_GRP(spi0cs1), \
NPCM8XX_GRP(lpc), \
- NPCM8XX_GRP(lpcclk), \
NPCM8XX_GRP(espi), \
NPCM8XX_GRP(lkgpo0), \
NPCM8XX_GRP(lkgpo1), \
@@ -948,7 +945,6 @@ NPCM8XX_SFUNC(smb4);
NPCM8XX_SFUNC(smb4b);
NPCM8XX_SFUNC(smb4c);
NPCM8XX_SFUNC(smb4d);
-NPCM8XX_SFUNC(smb4den);
NPCM8XX_SFUNC(smb5);
NPCM8XX_SFUNC(smb5b);
NPCM8XX_SFUNC(smb5c);
@@ -1056,7 +1052,6 @@ NPCM8XX_SFUNC(spi3cs2);
NPCM8XX_SFUNC(spi3cs3);
NPCM8XX_SFUNC(spi0cs1);
NPCM8XX_SFUNC(lpc);
-NPCM8XX_SFUNC(lpcclk);
NPCM8XX_SFUNC(espi);
NPCM8XX_SFUNC(lkgpo0);
NPCM8XX_SFUNC(lkgpo1);
@@ -1172,7 +1167,6 @@ static struct npcm8xx_func npcm8xx_funcs[] = {
NPCM8XX_MKFUNC(smb4b),
NPCM8XX_MKFUNC(smb4c),
NPCM8XX_MKFUNC(smb4d),
- NPCM8XX_MKFUNC(smb4den),
NPCM8XX_MKFUNC(smb5),
NPCM8XX_MKFUNC(smb5b),
NPCM8XX_MKFUNC(smb5c),
@@ -1280,7 +1274,6 @@ static struct npcm8xx_func npcm8xx_funcs[] = {
NPCM8XX_MKFUNC(spi3cs3),
NPCM8XX_MKFUNC(spi0cs1),
NPCM8XX_MKFUNC(lpc),
- NPCM8XX_MKFUNC(lpcclk),
NPCM8XX_MKFUNC(espi),
NPCM8XX_MKFUNC(lkgpo0),
NPCM8XX_MKFUNC(lkgpo1),
@@ -1347,7 +1340,7 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(14, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(15, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(16, lkgpo0, FLOCKR1, 0, smb7b, I2CSEGSEL, 27, tp_gpio2b, MFSEL7, 10, none, NONE, 0, none, NONE, 0, SLEW),
- NPCM8XX_PINCFG(17, pspi, MFSEL3, 13, cp1gpio5, MFSEL6, 7, smb4den, I2CSEGSEL, 23, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(17, pspi, MFSEL3, 13, cp1gpio5, MFSEL6, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(18, pspi, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(19, pspi, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(20, hgpio0, MFSEL2, 24, smb15, MFSEL3, 8, smb4c, I2CSEGSEL, 15, none, NONE, 0, none, NONE, 0, SLEW),
@@ -1365,6 +1358,8 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(32, spi0cs1, MFSEL1, 3, smb14b, MFSEL7, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(33, i3c4, MFSEL6, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(34, i3c4, MFSEL6, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(35, gpi35, MFSEL5, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(36, gpi36, MFSEL5, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(37, smb3c, I2CSEGSEL, 12, smb23, MFSEL5, 31, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(38, smb3c, I2CSEGSEL, 12, smb23, MFSEL5, 31, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(39, smb3b, I2CSEGSEL, 11, smb22, MFSEL5, 30, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
@@ -1438,10 +1433,10 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(107, i3c5, MFSEL3, 22, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(108, sg1mdio, MFSEL4, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(109, sg1mdio, MFSEL4, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
- NPCM8XX_PINCFG(110, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, 0),
- NPCM8XX_PINCFG(111, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, 0),
- NPCM8XX_PINCFG(112, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
- NPCM8XX_PINCFG(113, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(110, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(111, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(112, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(113, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(114, smb0, MFSEL1, 6, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(115, smb0, MFSEL1, 6, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(116, smb1, MFSEL1, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
@@ -1490,13 +1485,13 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(161, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
- NPCM8XX_PINCFG(162, serirq, MFSEL1, 31, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM8XX_PINCFG(162, clkrun, MFSEL3, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
NPCM8XX_PINCFG(163, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(164, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(165, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(166, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(167, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
- NPCM8XX_PINCFG(168, lpcclk, MFSEL1, 31, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(168, serirq, MFSEL1, 31, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(169, scipme, MFSEL3, 0, smb21, MFSEL5, 29, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(170, smi, MFSEL1, 22, smb21, MFSEL5, 29, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(171, smb6, MFSEL3, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
@@ -1515,22 +1510,22 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(184, gpio1836, MFSEL6, 19, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(185, gpio1836, MFSEL6, 19, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(186, gpio1836, MFSEL6, 19, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
- NPCM8XX_PINCFG(187, gpo187, MFSEL7, 24, smb14b, MFSEL7, 26, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(187, gpo187, MFSEL7, 24, smb14b, MFSEL7, 26, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(188, gpio1889, MFSEL7, 25, spi3cs2, MFSEL4, 18, spi3quad, MFSEL4, 20, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(189, gpio1889, MFSEL7, 25, spi3cs3, MFSEL4, 19, spi3quad, MFSEL4, 20, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(190, nprd_smi, FLOCKR1, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
- NPCM8XX_PINCFG(191, spi1d23, MFSEL5, 3, spi1cs2, MFSEL5, 4, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, DSTR(0, 2)), /* XX */
- NPCM8XX_PINCFG(192, spi1d23, MFSEL5, 3, spi1cs3, MFSEL5, 5, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, DSTR(0, 2)), /* XX */
+ NPCM8XX_PINCFG(191, spi1d23, MFSEL5, 3, spi1cs2, MFSEL5, 4, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, SLEW), /* XX */
+ NPCM8XX_PINCFG(192, spi1d23, MFSEL5, 3, spi1cs3, MFSEL5, 5, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, SLEW), /* XX */
NPCM8XX_PINCFG(193, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
- NPCM8XX_PINCFG(194, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
- NPCM8XX_PINCFG(195, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
- NPCM8XX_PINCFG(196, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
- NPCM8XX_PINCFG(197, smb0den, I2CSEGSEL, 22, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
- NPCM8XX_PINCFG(198, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
- NPCM8XX_PINCFG(199, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
+ NPCM8XX_PINCFG(194, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(195, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(196, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(197, smb0den, I2CSEGSEL, 22, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(198, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(199, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(200, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
NPCM8XX_PINCFG(201, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
- NPCM8XX_PINCFG(202, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
+ NPCM8XX_PINCFG(202, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(203, faninx, MFSEL3, 3, spi1cs0, MFSEL3, 4, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
NPCM8XX_PINCFG(208, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), /* DSCNT */
NPCM8XX_PINCFG(209, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, SLEW), /* DSCNT */
@@ -1553,10 +1548,10 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(226, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO | DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(227, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(228, spixcs1, MFSEL4, 28, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
- NPCM8XX_PINCFG(229, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
- NPCM8XX_PINCFG(230, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(229, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO | DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(230, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO | DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 12) | SLEW),
- NPCM8XX_PINCFG(233, spi1cs1, MFSEL5, 0, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEWLPC), /* slewlpc ? */
+ NPCM8XX_PINCFG(233, spi1cs1, MFSEL5, 0, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), /* slewlpc ? */
NPCM8XX_PINCFG(234, pwm10, MFSEL6, 13, smb20, MFSEL5, 28, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(235, pwm11, MFSEL6, 14, smb20, MFSEL5, 28, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(240, i3c0, MFSEL5, 17, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
@@ -1567,7 +1562,8 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(245, i3c2, MFSEL5, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(246, i3c3, MFSEL5, 23, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(247, i3c3, MFSEL5, 23, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
- NPCM8XX_PINCFG(251, jm2, MFSEL5, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(250, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(251, jm2, MFSEL5, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(253, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC1 power */
NPCM8XX_PINCFG(254, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC2 power */
NPCM8XX_PINCFG(255, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* DACOSEL */
@@ -1610,6 +1606,8 @@ static const struct pinctrl_pin_desc npcm8xx_pins[] = {
PINCTRL_PIN(32, "GPIO32/SMB14B_SCL/SPI0_nCS1"),
PINCTRL_PIN(33, "GPIO33/I3C4_SCL"),
PINCTRL_PIN(34, "GPIO34/I3C4_SDA"),
+ PINCTRL_PIN(35, "MCBPCK/GPI35_AHB2PCI_DIS"),
+ PINCTRL_PIN(36, "SYSBPCK/GPI36"),
PINCTRL_PIN(37, "GPIO37/SMB3C_SDA/SMB23_SDA"),
PINCTRL_PIN(38, "GPIO38/SMB3C_SCL/SMB23_SCL"),
PINCTRL_PIN(39, "GPIO39/SMB3B_SDA/SMB22_SDA"),
@@ -2044,7 +2042,7 @@ static int npcm8xx_gpio_request_enable(struct pinctrl_dev *pctldev,
const unsigned int *pin = &offset;
int mode = fn_gpio;
- if (pin[0] >= 183 && pin[0] <= 189)
+ if ((pin[0] >= 183 && pin[0] <= 189) || pin[0] == 35 || pin[0] == 36)
mode = pincfg[pin[0]].fn0;
npcm8xx_setfunc(npcm->gcr_regmap, &offset, 1, mode);
diff --git a/drivers/pinctrl/nxp/pinctrl-s32cc.c b/drivers/pinctrl/nxp/pinctrl-s32cc.c
index f2609a35c312..501eb296c760 100644
--- a/drivers/pinctrl/nxp/pinctrl-s32cc.c
+++ b/drivers/pinctrl/nxp/pinctrl-s32cc.c
@@ -2,7 +2,7 @@
/*
* Core driver for the S32 CC (Common Chassis) pin controller
*
- * Copyright 2017-2022 NXP
+ * Copyright 2017-2022,2024 NXP
* Copyright (C) 2022 SUSE LLC
* Copyright 2015-2016 Freescale Semiconductor, Inc.
*/
@@ -39,6 +39,11 @@
#define S32_MSCR_ODE BIT(20)
#define S32_MSCR_OBE BIT(21)
+enum s32_write_type {
+ S32_PINCONF_UPDATE_ONLY,
+ S32_PINCONF_OVERWRITE,
+};
+
static struct regmap_config s32_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -431,16 +436,15 @@ static int s32_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
unsigned int offset,
bool input)
{
- unsigned int config;
+ /* Always enable IBE for GPIOs. This allows us to read the
+ * actual line value and compare it with the one set.
+ */
+ unsigned int config = S32_MSCR_IBE;
unsigned int mask = S32_MSCR_IBE | S32_MSCR_OBE;
- if (input) {
- /* Disable output buffer and enable input buffer */
- config = S32_MSCR_IBE;
- } else {
- /* Disable input buffer and enable output buffer */
- config = S32_MSCR_OBE;
- }
+ /* Enable output buffer */
+ if (!input)
+ config |= S32_MSCR_OBE;
return s32_regmap_update(pctldev, offset, mask, config);
}
@@ -511,6 +515,10 @@ static int s32_parse_pincfg(unsigned long pincfg, unsigned int *mask,
*config |= S32_MSCR_ODE;
*mask |= S32_MSCR_ODE;
break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ *config &= ~S32_MSCR_ODE;
+ *mask |= S32_MSCR_ODE;
+ break;
case PIN_CONFIG_OUTPUT_ENABLE:
if (arg)
*config |= S32_MSCR_OBE;
@@ -549,10 +557,11 @@ static int s32_parse_pincfg(unsigned long pincfg, unsigned int *mask,
return 0;
}
-static int s32_pinconf_mscr_update(struct pinctrl_dev *pctldev,
+static int s32_pinconf_mscr_write(struct pinctrl_dev *pctldev,
unsigned int pin_id,
unsigned long *configs,
- unsigned int num_configs)
+ unsigned int num_configs,
+ enum s32_write_type write_type)
{
struct s32_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
unsigned int config = 0, mask = 0;
@@ -571,10 +580,20 @@ static int s32_pinconf_mscr_update(struct pinctrl_dev *pctldev,
return ret;
}
+ /* If the MSCR configuration has to be written,
+ * the SSS field should not be touched.
+ */
+ if (write_type == S32_PINCONF_OVERWRITE)
+ mask = (unsigned int)~S32_MSCR_SSS_MASK;
+
if (!config && !mask)
return 0;
- dev_dbg(ipctl->dev, "update: pin %u cfg 0x%x\n", pin_id, config);
+ if (write_type == S32_PINCONF_OVERWRITE)
+ dev_dbg(ipctl->dev, "set: pin %u cfg 0x%x\n", pin_id, config);
+ else
+ dev_dbg(ipctl->dev, "update: pin %u cfg 0x%x\n", pin_id,
+ config);
return s32_regmap_update(pctldev, pin_id, mask, config);
}
@@ -590,8 +609,8 @@ static int s32_pinconf_set(struct pinctrl_dev *pctldev,
unsigned int pin_id, unsigned long *configs,
unsigned int num_configs)
{
- return s32_pinconf_mscr_update(pctldev, pin_id, configs,
- num_configs);
+ return s32_pinconf_mscr_write(pctldev, pin_id, configs,
+ num_configs, S32_PINCONF_UPDATE_ONLY);
}
static int s32_pconf_group_set(struct pinctrl_dev *pctldev, unsigned int selector,
@@ -604,8 +623,8 @@ static int s32_pconf_group_set(struct pinctrl_dev *pctldev, unsigned int selecto
grp = &info->groups[selector];
for (i = 0; i < grp->data.npins; i++) {
- ret = s32_pinconf_mscr_update(pctldev, grp->data.pins[i],
- configs, num_configs);
+ ret = s32_pinconf_mscr_write(pctldev, grp->data.pins[i],
+ configs, num_configs, S32_PINCONF_OVERWRITE);
if (ret)
return ret;
}
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index a499b8af5c1f..0b13d7f17b32 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -44,6 +44,7 @@ static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "usec", true),
PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL, false),
+ PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_UV, "input schmitt threshold", "uV", true),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_MODE_LOW_POWER, "pin low power", "mode", true),
PCONFDUMP(PIN_CONFIG_OUTPUT_ENABLE, "output enabled", NULL, false),
@@ -177,6 +178,7 @@ static const struct pinconf_generic_params dt_params[] = {
{ "input-schmitt", PIN_CONFIG_INPUT_SCHMITT, 0 },
{ "input-schmitt-disable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 0 },
{ "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
+ { "input-schmitt-microvolts", PIN_CONFIG_INPUT_SCHMITT_UV, 0 },
{ "low-power-disable", PIN_CONFIG_MODE_LOW_POWER, 0 },
{ "low-power-enable", PIN_CONFIG_MODE_LOW_POWER, 1 },
{ "output-disable", PIN_CONFIG_OUTPUT_ENABLE, 0 },
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 9a92707d2525..5096ccdd459e 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -62,11 +62,11 @@
#define MAX_BANK 8
#define BANK_SZ 8
#define MAX_LINE (MAX_BANK * BANK_SZ)
-#define MUXED_STRIDE (CY8C95X0_DRV_HIZ - CY8C95X0_INTMASK)
+#define MUXED_STRIDE 16
#define CY8C95X0_GPIO_MASK GENMASK(7, 0)
-#define CY8C95X0_VIRTUAL (CY8C95X0_COMMAND + 1)
+#define CY8C95X0_VIRTUAL 0x40
#define CY8C95X0_MUX_REGMAP_TO_OFFSET(x, p) \
- (CY8C95X0_VIRTUAL + (x) - CY8C95X0_INTMASK + (p) * MUXED_STRIDE)
+ (CY8C95X0_VIRTUAL + (x) - CY8C95X0_PORTSEL + (p) * MUXED_STRIDE)
static const struct i2c_device_id cy8c95x0_id[] = {
{ "cy8c9520", 20, },
@@ -329,7 +329,11 @@ static int cypress_get_pin_mask(struct cy8c95x0_pinctrl *chip, unsigned int pin)
static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
{
- if (reg >= CY8C95X0_VIRTUAL)
+ /*
+ * Only 12 registers are present per port (see Table 6 in the
+ * datasheet).
+ */
+ if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) < 12)
return true;
switch (reg) {
@@ -444,7 +448,7 @@ static const struct regmap_range_cfg cy8c95x0_ranges[] = {
.selector_reg = CY8C95X0_PORTSEL,
.selector_mask = 0x07,
.selector_shift = 0x0,
- .window_start = CY8C95X0_INTMASK,
+ .window_start = CY8C95X0_PORTSEL,
.window_len = MUXED_STRIDE,
}
};
diff --git a/drivers/pinctrl/pinctrl-eyeq5.c b/drivers/pinctrl/pinctrl-eyeq5.c
new file mode 100644
index 000000000000..5f6af934a516
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-eyeq5.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Pinctrl driver for the Mobileye EyeQ5 platform.
+ *
+ * The registers are located in a syscon region called OLB. There are two pin
+ * banks, each being controlled by 5 registers (see enum eq5p_regs) for
+ * pull-down, pull-up, drive strength and muxing.
+ *
+ * For each pin, muxing is between two functions: (0) GPIO or (1) another one
+ * that is pin-dependent. Functions are declared statically in this driver.
+ *
+ * We create pinctrl groups that are 1:1 equivalent to pins: each group has a
+ * single pin, and its index/selector is the pin number.
+ *
+ * We use eq5p_ as prefix, as-in "EyeQ5 Pinctrl", but way shorter.
+ *
+ * Copyright (C) 2024 Mobileye Vision Technologies Ltd.
+ */
+
+#include <linux/array_size.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "core.h"
+#include "pinctrl-utils.h"
+
+struct eq5p_pinctrl {
+ struct pinctrl_desc desc;
+ void __iomem *base;
+};
+
+enum eq5p_bank {
+ EQ5P_BANK_A,
+ EQ5P_BANK_B,
+
+ EQ5P_BANK_COUNT,
+};
+
+enum eq5p_regs {
+ EQ5P_PD,
+ EQ5P_PU,
+ EQ5P_DS_LOW,
+ EQ5P_DS_HIGH,
+ EQ5P_IOCR,
+
+ EQ5P_REG_COUNT,
+};
+
+static const unsigned int eq5p_regs[EQ5P_BANK_COUNT][EQ5P_REG_COUNT] = {
+ [EQ5P_BANK_A] = {0x0C0, 0x0C4, 0x0D0, 0x0D4, 0x0B0},
+ [EQ5P_BANK_B] = {0x0C8, 0x0CC, 0x0D8, 0x0DC, 0x0B4},
+};
+
+/*
+ * Drive strength; two bits per pin.
+ */
+#define EQ5P_DS_MASK GENMASK(1, 0)
+
+/*
+ * Comments to the right of each pin are the "signal name" in the datasheet.
+ */
+static const struct pinctrl_pin_desc eq5p_pins[] = {
+ /* Bank A */
+ PINCTRL_PIN(0, "PA0"), /* A0_TIMER0_CK */
+ PINCTRL_PIN(1, "PA1"), /* A1_TIMER0_EOC */
+ PINCTRL_PIN(2, "PA2"), /* A2_TIMER1_CK */
+ PINCTRL_PIN(3, "PA3"), /* A3_TIMER1_EOC */
+ PINCTRL_PIN(4, "PA4"), /* A4_TIMER2_CK */
+ PINCTRL_PIN(5, "PA5"), /* A5_TIMER2_EOC */
+ PINCTRL_PIN(6, "PA6"), /* A6_TIMER5_EXT_INCAP1 */
+ PINCTRL_PIN(7, "PA7"), /* A7_TIMER5_EXT_INCAP2 */
+ PINCTRL_PIN(8, "PA8"), /* A8_TIMER5_EXT_OUTCMP1 */
+ PINCTRL_PIN(9, "PA9"), /* A9_TIMER5_EXT_OUTCMP2 */
+ PINCTRL_PIN(10, "PA10"), /* A10_UART_0_TX */
+ PINCTRL_PIN(11, "PA11"), /* A11_UART_0_RX */
+ PINCTRL_PIN(12, "PA12"), /* A12_UART_1_TX */
+ PINCTRL_PIN(13, "PA13"), /* A13_UART_1_RX */
+ PINCTRL_PIN(14, "PA14"), /* A14_CAN_0_TX */
+ PINCTRL_PIN(15, "PA15"), /* A15_CAN_0_RX */
+ PINCTRL_PIN(16, "PA16"), /* A16_CAN_1_TX */
+ PINCTRL_PIN(17, "PA17"), /* A17_CAN_1_RX */
+ PINCTRL_PIN(18, "PA18"), /* A18_SPI_0_DO */
+ PINCTRL_PIN(19, "PA19"), /* A19_SPI_0_DI */
+ PINCTRL_PIN(20, "PA20"), /* A20_SPI_0_CK */
+ PINCTRL_PIN(21, "PA21"), /* A21_SPI_0_CS0 */
+ PINCTRL_PIN(22, "PA22"), /* A22_SPI_0_CS1 */
+ PINCTRL_PIN(23, "PA23"), /* A23_SPI_1_DO */
+ PINCTRL_PIN(24, "PA24"), /* A24_SPI_1_DI */
+ PINCTRL_PIN(25, "PA25"), /* A25_SPI_1_CK */
+ PINCTRL_PIN(26, "PA26"), /* A26_SPI_1_CS0 */
+ PINCTRL_PIN(27, "PA27"), /* A27_SPI_1_CS1 */
+ PINCTRL_PIN(28, "PA28"), /* A28_REF_CLK0 */
+
+#define EQ5P_PIN_OFFSET_BANK_B 29
+
+ /* Bank B */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 0, "PB0"), /* B0_TIMER3_CK */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 1, "PB1"), /* B1_TIMER3_EOC */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 2, "PB2"), /* B2_TIMER4_CK */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 3, "PB3"), /* B3_TIMER4_EOC */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 4, "PB4"), /* B4_TIMER6_EXT_INCAP1 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 5, "PB5"), /* B5_TIMER6_EXT_INCAP2 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 6, "PB6"), /* B6_TIMER6_EXT_OUTCMP1 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 7, "PB7"), /* B7_TIMER6_EXT_OUTCMP2 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 8, "PB8"), /* B8_UART_2_TX */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 9, "PB9"), /* B9_UART_2_RX */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 10, "PB10"), /* B10_CAN_2_TX */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 11, "PB11"), /* B11_CAN_2_RX */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 12, "PB12"), /* B12_SPI_2_DO */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 13, "PB13"), /* B13_SPI_2_DI */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 14, "PB14"), /* B14_SPI_2_CK */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 15, "PB15"), /* B15_SPI_2_CS0 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 16, "PB16"), /* B16_SPI_2_CS1 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 17, "PB17"), /* B17_SPI_3_DO */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 18, "PB18"), /* B18_SPI_3_DI */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 19, "PB19"), /* B19_SPI_3_CK */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 20, "PB20"), /* B20_SPI_3_CS0 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 21, "PB21"), /* B21_SPI_3_CS1 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 22, "PB22"), /* B22_MCLK0 */
+};
+
+static const char * const gpio_groups[] = {
+ /* Bank A */
+ "PA0", "PA1", "PA2", "PA3", "PA4", "PA5", "PA6", "PA7",
+ "PA8", "PA9", "PA10", "PA11", "PA12", "PA13", "PA14", "PA15",
+ "PA16", "PA17", "PA18", "PA19", "PA20", "PA21", "PA22", "PA23",
+ "PA24", "PA25", "PA26", "PA27", "PA28",
+
+ /* Bank B */
+ "PB0", "PB1", "PB2", "PB3", "PB4", "PB5", "PB6", "PB7",
+ "PB8", "PB9", "PB10", "PB11", "PB12", "PB13", "PB14", "PB15",
+ "PB16", "PB17", "PB18", "PB19", "PB20", "PB21", "PB22",
+};
+
+/* Groups of functions on bank A */
+static const char * const timer0_groups[] = { "PA0", "PA1" };
+static const char * const timer1_groups[] = { "PA2", "PA3" };
+static const char * const timer2_groups[] = { "PA4", "PA5" };
+static const char * const timer5_groups[] = { "PA6", "PA7", "PA8", "PA9" };
+static const char * const uart0_groups[] = { "PA10", "PA11" };
+static const char * const uart1_groups[] = { "PA12", "PA13" };
+static const char * const can0_groups[] = { "PA14", "PA15" };
+static const char * const can1_groups[] = { "PA16", "PA17" };
+static const char * const spi0_groups[] = { "PA18", "PA19", "PA20", "PA21", "PA22" };
+static const char * const spi1_groups[] = { "PA23", "PA24", "PA25", "PA26", "PA27" };
+static const char * const refclk0_groups[] = { "PA28" };
+
+/* Groups of functions on bank B */
+static const char * const timer3_groups[] = { "PB0", "PB1" };
+static const char * const timer4_groups[] = { "PB2", "PB3" };
+static const char * const timer6_groups[] = { "PB4", "PB5", "PB6", "PB7" };
+static const char * const uart2_groups[] = { "PB8", "PB9" };
+static const char * const can2_groups[] = { "PB10", "PB11" };
+static const char * const spi2_groups[] = { "PB12", "PB13", "PB14", "PB15", "PB16" };
+static const char * const spi3_groups[] = { "PB17", "PB18", "PB19", "PB20", "PB21" };
+static const char * const mclk0_groups[] = { "PB22" };
+
+static const struct pinfunction eq5p_functions[] = {
+ /* GPIO having a fixed index is depended upon, see GPIO_FUNC_SELECTOR. */
+ PINCTRL_PINFUNCTION("gpio", gpio_groups, ARRAY_SIZE(gpio_groups)),
+#define GPIO_FUNC_SELECTOR 0
+
+ /* Bank A functions */
+ PINCTRL_PINFUNCTION("timer0", timer0_groups, ARRAY_SIZE(timer0_groups)),
+ PINCTRL_PINFUNCTION("timer1", timer1_groups, ARRAY_SIZE(timer1_groups)),
+ PINCTRL_PINFUNCTION("timer2", timer2_groups, ARRAY_SIZE(timer2_groups)),
+ PINCTRL_PINFUNCTION("timer5", timer5_groups, ARRAY_SIZE(timer5_groups)),
+ PINCTRL_PINFUNCTION("uart0", uart0_groups, ARRAY_SIZE(uart0_groups)),
+ PINCTRL_PINFUNCTION("uart1", uart1_groups, ARRAY_SIZE(uart1_groups)),
+ PINCTRL_PINFUNCTION("can0", can0_groups, ARRAY_SIZE(can0_groups)),
+ PINCTRL_PINFUNCTION("can1", can1_groups, ARRAY_SIZE(can1_groups)),
+ PINCTRL_PINFUNCTION("spi0", spi0_groups, ARRAY_SIZE(spi0_groups)),
+ PINCTRL_PINFUNCTION("spi1", spi1_groups, ARRAY_SIZE(spi1_groups)),
+ PINCTRL_PINFUNCTION("refclk0", refclk0_groups, ARRAY_SIZE(refclk0_groups)),
+
+ /* Bank B functions */
+ PINCTRL_PINFUNCTION("timer3", timer3_groups, ARRAY_SIZE(timer3_groups)),
+ PINCTRL_PINFUNCTION("timer4", timer4_groups, ARRAY_SIZE(timer4_groups)),
+ PINCTRL_PINFUNCTION("timer6", timer6_groups, ARRAY_SIZE(timer6_groups)),
+ PINCTRL_PINFUNCTION("uart2", uart2_groups, ARRAY_SIZE(uart2_groups)),
+ PINCTRL_PINFUNCTION("can2", can2_groups, ARRAY_SIZE(can2_groups)),
+ PINCTRL_PINFUNCTION("spi2", spi2_groups, ARRAY_SIZE(spi2_groups)),
+ PINCTRL_PINFUNCTION("spi3", spi3_groups, ARRAY_SIZE(spi3_groups)),
+ PINCTRL_PINFUNCTION("mclk0", mclk0_groups, ARRAY_SIZE(mclk0_groups)),
+};
+
+static void eq5p_update_bits(const struct eq5p_pinctrl *pctrl,
+ enum eq5p_bank bank, enum eq5p_regs reg,
+ u32 mask, u32 val)
+{
+ void __iomem *ptr = pctrl->base + eq5p_regs[bank][reg];
+
+ writel((readl(ptr) & ~mask) | (val & mask), ptr);
+}
+
+static bool eq5p_test_bit(const struct eq5p_pinctrl *pctrl,
+ enum eq5p_bank bank, enum eq5p_regs reg, int offset)
+{
+ u32 val = readl(pctrl->base + eq5p_regs[bank][reg]);
+
+ if (WARN_ON(offset > 31))
+ return false;
+
+ return (val & BIT(offset)) != 0;
+}
+
+static enum eq5p_bank eq5p_pin_to_bank(unsigned int pin)
+{
+ if (pin < EQ5P_PIN_OFFSET_BANK_B)
+ return EQ5P_BANK_A;
+ else
+ return EQ5P_BANK_B;
+}
+
+static unsigned int eq5p_pin_to_offset(unsigned int pin)
+{
+ if (pin < EQ5P_PIN_OFFSET_BANK_B)
+ return pin;
+ else
+ return pin - EQ5P_PIN_OFFSET_BANK_B;
+}
+
+static int eq5p_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(eq5p_pins);
+}
+
+static const char *eq5p_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return pctldev->desc->pins[selector].name;
+}
+
+static int eq5p_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ *pins = &pctldev->desc->pins[selector].number;
+ *num_pins = 1;
+ return 0;
+}
+
+static int eq5p_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int offset = eq5p_pin_to_offset(pin);
+ enum eq5p_bank bank = eq5p_pin_to_bank(pin);
+ u32 val_ds, arg;
+ bool pd, pu;
+
+ pd = eq5p_test_bit(pctrl, bank, EQ5P_PD, offset);
+ pu = eq5p_test_bit(pctrl, bank, EQ5P_PU, offset);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ arg = !(pd || pu);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ arg = pd;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = pu;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ offset *= 2; /* two bits per pin */
+ if (offset >= 32) {
+ val_ds = readl(pctrl->base + eq5p_regs[bank][EQ5P_DS_HIGH]);
+ offset -= 32;
+ } else {
+ val_ds = readl(pctrl->base + eq5p_regs[bank][EQ5P_DS_LOW]);
+ }
+ arg = (val_ds >> offset) & EQ5P_DS_MASK;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+static void eq5p_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned int pin)
+{
+ struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const char *pin_name = pctrl->desc.pins[pin].name;
+ unsigned int offset = eq5p_pin_to_offset(pin);
+ enum eq5p_bank bank = eq5p_pin_to_bank(pin);
+ const char *func_name, *bias;
+ unsigned long ds_config;
+ u32 drive_strength;
+ bool pd, pu;
+ int i, j;
+
+ /*
+ * First, let's get the function name. All pins have only two functions:
+ * GPIO (IOCR == 0) and something else (IOCR == 1).
+ */
+ if (eq5p_test_bit(pctrl, bank, EQ5P_IOCR, offset)) {
+ func_name = NULL;
+ for (i = 0; i < ARRAY_SIZE(eq5p_functions); i++) {
+ if (i == GPIO_FUNC_SELECTOR)
+ continue;
+
+ for (j = 0; j < eq5p_functions[i].ngroups; j++) {
+ /* Groups and pins are the same thing for us. */
+ const char *x = eq5p_functions[i].groups[j];
+
+ if (strcmp(x, pin_name) == 0) {
+ func_name = eq5p_functions[i].name;
+ break;
+ }
+ }
+
+ if (func_name)
+ break;
+ }
+
+ /*
+ * We have not found the function attached to this pin, this
+ * should never occur as all pins have exactly two functions.
+ */
+ if (!func_name)
+ func_name = "unknown";
+ } else {
+ func_name = eq5p_functions[GPIO_FUNC_SELECTOR].name;
+ }
+
+ /* Second, we retrieve the bias. */
+ pd = eq5p_test_bit(pctrl, bank, EQ5P_PD, offset);
+ pu = eq5p_test_bit(pctrl, bank, EQ5P_PU, offset);
+ if (pd && pu)
+ bias = "both";
+ else if (pd && !pu)
+ bias = "pulldown";
+ else if (!pd && pu)
+ bias = "pullup";
+ else
+ bias = "none";
+
+ /* Third, we get the drive strength. */
+ ds_config = pinconf_to_config_packed(PIN_CONFIG_DRIVE_STRENGTH, 0);
+ eq5p_pinconf_get(pctldev, pin, &ds_config);
+ drive_strength = pinconf_to_config_argument(ds_config);
+
+ seq_printf(s, "function=%s bias=%s drive_strength=%d",
+ func_name, bias, drive_strength);
+}
+
+static const struct pinctrl_ops eq5p_pinctrl_ops = {
+ .get_groups_count = eq5p_pinctrl_get_groups_count,
+ .get_group_name = eq5p_pinctrl_get_group_name,
+ .get_group_pins = eq5p_pinctrl_get_group_pins,
+ .pin_dbg_show = eq5p_pinctrl_pin_dbg_show,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int eq5p_pinmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(eq5p_functions);
+}
+
+static const char *eq5p_pinmux_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return eq5p_functions[selector].name;
+}
+
+static int eq5p_pinmux_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups)
+{
+ *groups = eq5p_functions[selector].groups;
+ *num_groups = eq5p_functions[selector].ngroups;
+ return 0;
+}
+
+static int eq5p_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int func_selector, unsigned int pin)
+{
+ struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const char *func_name = eq5p_functions[func_selector].name;
+ const char *group_name = pctldev->desc->pins[pin].name;
+ bool is_gpio = func_selector == GPIO_FUNC_SELECTOR;
+ unsigned int offset = eq5p_pin_to_offset(pin);
+ enum eq5p_bank bank = eq5p_pin_to_bank(pin);
+ u32 mask, val;
+
+ dev_dbg(pctldev->dev, "func=%s group=%s\n", func_name, group_name);
+
+ mask = BIT(offset);
+ val = is_gpio ? 0 : mask;
+ eq5p_update_bits(pctrl, bank, EQ5P_IOCR, mask, val);
+ return 0;
+}
+
+static int eq5p_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ /* Pin numbers and group selectors are the same thing in our case. */
+ return eq5p_pinmux_set_mux(pctldev, GPIO_FUNC_SELECTOR, pin);
+}
+
+static const struct pinmux_ops eq5p_pinmux_ops = {
+ .get_functions_count = eq5p_pinmux_get_functions_count,
+ .get_function_name = eq5p_pinmux_get_function_name,
+ .get_function_groups = eq5p_pinmux_get_function_groups,
+ .set_mux = eq5p_pinmux_set_mux,
+ .gpio_request_enable = eq5p_pinmux_gpio_request_enable,
+ .strict = true,
+};
+
+static int eq5p_pinconf_set_drive_strength(struct pinctrl_dev *pctldev,
+ unsigned int pin, u32 arg)
+{
+ struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int offset = eq5p_pin_to_offset(pin);
+ enum eq5p_bank bank = eq5p_pin_to_bank(pin);
+ unsigned int reg;
+ u32 mask, val;
+
+ if (arg & ~EQ5P_DS_MASK) {
+ dev_err(pctldev->dev, "Unsupported drive strength: %u\n", arg);
+ return -EINVAL;
+ }
+
+ offset *= 2; /* two bits per pin */
+
+ if (offset >= 32) {
+ reg = EQ5P_DS_HIGH;
+ offset -= 32;
+ } else {
+ reg = EQ5P_DS_LOW;
+ }
+
+ mask = EQ5P_DS_MASK << offset;
+ val = arg << offset;
+ eq5p_update_bits(pctrl, bank, reg, mask, val);
+ return 0;
+}
+
+static int eq5p_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const char *pin_name = pctldev->desc->pins[pin].name;
+ unsigned int offset = eq5p_pin_to_offset(pin);
+ enum eq5p_bank bank = eq5p_pin_to_bank(pin);
+ struct device *dev = pctldev->dev;
+ u32 val = BIT(offset);
+ unsigned int i;
+
+ for (i = 0; i < num_configs; i++) {
+ enum pin_config_param param = pinconf_to_config_param(configs[i]);
+ u32 arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ dev_dbg(dev, "pin=%s bias_disable\n", pin_name);
+
+ eq5p_update_bits(pctrl, bank, EQ5P_PD, val, 0);
+ eq5p_update_bits(pctrl, bank, EQ5P_PU, val, 0);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ dev_dbg(dev, "pin=%s bias_pull_down arg=%u\n",
+ pin_name, arg);
+
+ if (arg == 0) /* cannot connect to GND */
+ return -ENOTSUPP;
+
+ eq5p_update_bits(pctrl, bank, EQ5P_PD, val, val);
+ eq5p_update_bits(pctrl, bank, EQ5P_PU, val, 0);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ dev_dbg(dev, "pin=%s bias_pull_up arg=%u\n",
+ pin_name, arg);
+
+ if (arg == 0) /* cannot connect to VDD */
+ return -ENOTSUPP;
+
+ eq5p_update_bits(pctrl, bank, EQ5P_PD, val, 0);
+ eq5p_update_bits(pctrl, bank, EQ5P_PU, val, val);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ dev_dbg(dev, "pin=%s drive_strength arg=%u\n",
+ pin_name, arg);
+
+ eq5p_pinconf_set_drive_strength(pctldev, pin, arg);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported pinconf %u\n", param);
+ return -ENOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops eq5p_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = eq5p_pinconf_get,
+ .pin_config_set = eq5p_pinconf_set,
+ /* Pins and groups are equivalent in this driver. */
+ .pin_config_group_get = eq5p_pinconf_get,
+ .pin_config_group_set = eq5p_pinconf_set,
+};
+
+static int eq5p_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct pinctrl_dev *pctldev;
+ struct eq5p_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->base = (void __iomem *)dev_get_platdata(dev);
+ pctrl->desc.name = dev_name(dev);
+ pctrl->desc.pins = eq5p_pins;
+ pctrl->desc.npins = ARRAY_SIZE(eq5p_pins);
+ pctrl->desc.pctlops = &eq5p_pinctrl_ops;
+ pctrl->desc.pmxops = &eq5p_pinmux_ops;
+ pctrl->desc.confops = &eq5p_pinconf_ops;
+ pctrl->desc.owner = THIS_MODULE;
+
+ ret = devm_pinctrl_register_and_init(dev, &pctrl->desc, pctrl, &pctldev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed registering pinctrl device\n");
+
+ ret = pinctrl_enable(pctldev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed enabling pinctrl device\n");
+
+ return 0;
+}
+
+static const struct auxiliary_device_id eq5p_id_table[] = {
+ { .name = "clk_eyeq.pinctrl" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, eq5p_id_table);
+
+static struct auxiliary_driver eq5p_driver = {
+ .probe = eq5p_probe,
+ .id_table = eq5p_id_table,
+};
+module_auxiliary_driver(eq5p_driver);
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
index a898e40451fe..0f6b55fec31d 100644
--- a/drivers/pinctrl/pinctrl-k210.c
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -925,7 +925,6 @@ static int k210_fpioa_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct k210_fpioa_data *pdata;
- int ret;
dev_info(dev, "K210 FPIOA pin controller\n");
@@ -940,46 +939,28 @@ static int k210_fpioa_probe(struct platform_device *pdev)
if (IS_ERR(pdata->fpioa))
return PTR_ERR(pdata->fpioa);
- pdata->clk = devm_clk_get(dev, "ref");
+ pdata->clk = devm_clk_get_enabled(dev, "ref");
if (IS_ERR(pdata->clk))
return PTR_ERR(pdata->clk);
- ret = clk_prepare_enable(pdata->clk);
- if (ret)
- return ret;
-
- pdata->pclk = devm_clk_get_optional(dev, "pclk");
- if (!IS_ERR(pdata->pclk)) {
- ret = clk_prepare_enable(pdata->pclk);
- if (ret)
- goto disable_clk;
- }
+ pdata->pclk = devm_clk_get_optional_enabled(dev, "pclk");
+ if (IS_ERR(pdata->pclk))
+ return PTR_ERR(pdata->pclk);
pdata->sysctl_map =
syscon_regmap_lookup_by_phandle_args(np,
"canaan,k210-sysctl-power",
1, &pdata->power_offset);
- if (IS_ERR(pdata->sysctl_map)) {
- ret = PTR_ERR(pdata->sysctl_map);
- goto disable_pclk;
- }
+ if (IS_ERR(pdata->sysctl_map))
+ return PTR_ERR(pdata->sysctl_map);
k210_fpioa_init_ties(pdata);
pdata->pctl = pinctrl_register(&k210_pinctrl_desc, dev, (void *)pdata);
- if (IS_ERR(pdata->pctl)) {
- ret = PTR_ERR(pdata->pctl);
- goto disable_pclk;
- }
+ if (IS_ERR(pdata->pctl))
+ return PTR_ERR(pdata->pctl);
return 0;
-
-disable_pclk:
- clk_disable_unprepare(pdata->pclk);
-disable_clk:
- clk_disable_unprepare(pdata->clk);
-
- return ret;
}
static const struct of_device_id k210_fpioa_dt_ids[] = {
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 6878bc86faa2..5c1bc4d5b662 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -84,6 +84,27 @@
}, \
}
+#define PIN_BANK_IOMUX_FLAGS_OFFSET_PULL_FLAGS(id, pins, label, iom0, \
+ iom1, iom2, iom3, \
+ offset0, offset1, \
+ offset2, offset3, pull0, \
+ pull1, pull2, pull3) \
+ { \
+ .bank_num = id, \
+ .nr_pins = pins, \
+ .name = label, \
+ .iomux = { \
+ { .type = iom0, .offset = offset0 }, \
+ { .type = iom1, .offset = offset1 }, \
+ { .type = iom2, .offset = offset2 }, \
+ { .type = iom3, .offset = offset3 }, \
+ }, \
+ .pull_type[0] = pull0, \
+ .pull_type[1] = pull1, \
+ .pull_type[2] = pull2, \
+ .pull_type[3] = pull3, \
+ }
+
#define PIN_BANK_DRV_FLAGS(id, pins, label, type0, type1, type2, type3) \
{ \
.bank_num = id, \
@@ -1120,6 +1141,11 @@ static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin)
if (bank->recalced_mask & BIT(pin))
rockchip_get_recalced_mux(bank, pin, &reg, &bit, &mask);
+ if (ctrl->type == RK3576) {
+ if ((bank->bank_num == 0) && (pin >= RK_PB4) && (pin <= RK_PB7))
+ reg += 0x1ff4; /* GPIO0_IOC_GPIO0B_IOMUX_SEL_H */
+ }
+
if (ctrl->type == RK3588) {
if (bank->bank_num == 0) {
if ((pin >= RK_PB4) && (pin <= RK_PD7)) {
@@ -1234,6 +1260,11 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
if (bank->recalced_mask & BIT(pin))
rockchip_get_recalced_mux(bank, pin, &reg, &bit, &mask);
+ if (ctrl->type == RK3576) {
+ if ((bank->bank_num == 0) && (pin >= RK_PB4) && (pin <= RK_PB7))
+ reg += 0x1ff4; /* GPIO0_IOC_GPIO0B_IOMUX_SEL_H */
+ }
+
if (ctrl->type == RK3588) {
if (bank->bank_num == 0) {
if ((pin >= RK_PB4) && (pin <= RK_PD7)) {
@@ -2038,6 +2069,142 @@ static int rk3568_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RK3576_DRV_BITS_PER_PIN 4
+#define RK3576_DRV_PINS_PER_REG 4
+#define RK3576_DRV_GPIO0_AL_OFFSET 0x10
+#define RK3576_DRV_GPIO0_BH_OFFSET 0x2014
+#define RK3576_DRV_GPIO1_OFFSET 0x6020
+#define RK3576_DRV_GPIO2_OFFSET 0x6040
+#define RK3576_DRV_GPIO3_OFFSET 0x6060
+#define RK3576_DRV_GPIO4_AL_OFFSET 0x6080
+#define RK3576_DRV_GPIO4_CL_OFFSET 0xA090
+#define RK3576_DRV_GPIO4_DL_OFFSET 0xB098
+
+static int rk3576_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+
+ if (bank->bank_num == 0 && pin_num < 12)
+ *reg = RK3576_DRV_GPIO0_AL_OFFSET;
+ else if (bank->bank_num == 0)
+ *reg = RK3576_DRV_GPIO0_BH_OFFSET - 0xc;
+ else if (bank->bank_num == 1)
+ *reg = RK3576_DRV_GPIO1_OFFSET;
+ else if (bank->bank_num == 2)
+ *reg = RK3576_DRV_GPIO2_OFFSET;
+ else if (bank->bank_num == 3)
+ *reg = RK3576_DRV_GPIO3_OFFSET;
+ else if (bank->bank_num == 4 && pin_num < 16)
+ *reg = RK3576_DRV_GPIO4_AL_OFFSET;
+ else if (bank->bank_num == 4 && pin_num < 24)
+ *reg = RK3576_DRV_GPIO4_CL_OFFSET - 0x10;
+ else if (bank->bank_num == 4)
+ *reg = RK3576_DRV_GPIO4_DL_OFFSET - 0x18;
+ else
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+
+ *reg += ((pin_num / RK3576_DRV_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3576_DRV_PINS_PER_REG;
+ *bit *= RK3576_DRV_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RK3576_PULL_BITS_PER_PIN 2
+#define RK3576_PULL_PINS_PER_REG 8
+#define RK3576_PULL_GPIO0_AL_OFFSET 0x20
+#define RK3576_PULL_GPIO0_BH_OFFSET 0x2028
+#define RK3576_PULL_GPIO1_OFFSET 0x6110
+#define RK3576_PULL_GPIO2_OFFSET 0x6120
+#define RK3576_PULL_GPIO3_OFFSET 0x6130
+#define RK3576_PULL_GPIO4_AL_OFFSET 0x6140
+#define RK3576_PULL_GPIO4_CL_OFFSET 0xA148
+#define RK3576_PULL_GPIO4_DL_OFFSET 0xB14C
+
+static int rk3576_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+
+ if (bank->bank_num == 0 && pin_num < 12)
+ *reg = RK3576_PULL_GPIO0_AL_OFFSET;
+ else if (bank->bank_num == 0)
+ *reg = RK3576_PULL_GPIO0_BH_OFFSET - 0x4;
+ else if (bank->bank_num == 1)
+ *reg = RK3576_PULL_GPIO1_OFFSET;
+ else if (bank->bank_num == 2)
+ *reg = RK3576_PULL_GPIO2_OFFSET;
+ else if (bank->bank_num == 3)
+ *reg = RK3576_PULL_GPIO3_OFFSET;
+ else if (bank->bank_num == 4 && pin_num < 16)
+ *reg = RK3576_PULL_GPIO4_AL_OFFSET;
+ else if (bank->bank_num == 4 && pin_num < 24)
+ *reg = RK3576_PULL_GPIO4_CL_OFFSET - 0x8;
+ else if (bank->bank_num == 4)
+ *reg = RK3576_PULL_GPIO4_DL_OFFSET - 0xc;
+ else
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+
+ *reg += ((pin_num / RK3576_PULL_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3576_PULL_PINS_PER_REG;
+ *bit *= RK3576_PULL_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RK3576_SMT_BITS_PER_PIN 1
+#define RK3576_SMT_PINS_PER_REG 8
+#define RK3576_SMT_GPIO0_AL_OFFSET 0x30
+#define RK3576_SMT_GPIO0_BH_OFFSET 0x2040
+#define RK3576_SMT_GPIO1_OFFSET 0x6210
+#define RK3576_SMT_GPIO2_OFFSET 0x6220
+#define RK3576_SMT_GPIO3_OFFSET 0x6230
+#define RK3576_SMT_GPIO4_AL_OFFSET 0x6240
+#define RK3576_SMT_GPIO4_CL_OFFSET 0xA248
+#define RK3576_SMT_GPIO4_DL_OFFSET 0xB24C
+
+static int rk3576_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num,
+ struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+
+ if (bank->bank_num == 0 && pin_num < 12)
+ *reg = RK3576_SMT_GPIO0_AL_OFFSET;
+ else if (bank->bank_num == 0)
+ *reg = RK3576_SMT_GPIO0_BH_OFFSET - 0x4;
+ else if (bank->bank_num == 1)
+ *reg = RK3576_SMT_GPIO1_OFFSET;
+ else if (bank->bank_num == 2)
+ *reg = RK3576_SMT_GPIO2_OFFSET;
+ else if (bank->bank_num == 3)
+ *reg = RK3576_SMT_GPIO3_OFFSET;
+ else if (bank->bank_num == 4 && pin_num < 16)
+ *reg = RK3576_SMT_GPIO4_AL_OFFSET;
+ else if (bank->bank_num == 4 && pin_num < 24)
+ *reg = RK3576_SMT_GPIO4_CL_OFFSET - 0x8;
+ else if (bank->bank_num == 4)
+ *reg = RK3576_SMT_GPIO4_DL_OFFSET - 0xc;
+ else
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+
+ *reg += ((pin_num / RK3576_SMT_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3576_SMT_PINS_PER_REG;
+ *bit *= RK3576_SMT_BITS_PER_PIN;
+
+ return 0;
+}
+
#define RK3588_PMU1_IOC_REG (0x0000)
#define RK3588_PMU2_IOC_REG (0x4000)
#define RK3588_BUS_IOC_REG (0x8000)
@@ -2332,6 +2499,10 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
rmask_bits = RK3568_DRV_BITS_PER_PIN;
ret = (1 << (strength + 1)) - 1;
goto config;
+ } else if (ctrl->type == RK3576) {
+ rmask_bits = RK3576_DRV_BITS_PER_PIN;
+ ret = ((strength & BIT(2)) >> 2) | ((strength & BIT(0)) << 2) | (strength & BIT(1));
+ goto config;
}
if (ctrl->type == RV1126) {
@@ -2469,6 +2640,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RK3368:
case RK3399:
case RK3568:
+ case RK3576:
case RK3588:
pull_type = bank->pull_type[pin_num / 8];
data >>= bit;
@@ -2528,6 +2700,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3368:
case RK3399:
case RK3568:
+ case RK3576:
case RK3588:
pull_type = bank->pull_type[pin_num / 8];
ret = -EINVAL;
@@ -2793,6 +2966,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3368:
case RK3399:
case RK3568:
+ case RK3576:
case RK3588:
return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
}
@@ -3949,6 +4123,37 @@ static struct rockchip_pin_ctrl rk3568_pin_ctrl = {
.schmitt_calc_reg = rk3568_calc_schmitt_reg_and_bit,
};
+#define RK3576_PIN_BANK(ID, LABEL, OFFSET0, OFFSET1, OFFSET2, OFFSET3) \
+ PIN_BANK_IOMUX_FLAGS_OFFSET_PULL_FLAGS(ID, 32, LABEL, \
+ IOMUX_WIDTH_4BIT, \
+ IOMUX_WIDTH_4BIT, \
+ IOMUX_WIDTH_4BIT, \
+ IOMUX_WIDTH_4BIT, \
+ OFFSET0, OFFSET1, \
+ OFFSET2, OFFSET3, \
+ PULL_TYPE_IO_1V8_ONLY, \
+ PULL_TYPE_IO_1V8_ONLY, \
+ PULL_TYPE_IO_1V8_ONLY, \
+ PULL_TYPE_IO_1V8_ONLY)
+
+static struct rockchip_pin_bank rk3576_pin_banks[] = {
+ RK3576_PIN_BANK(0, "gpio0", 0, 0x8, 0x2004, 0x200C),
+ RK3576_PIN_BANK(1, "gpio1", 0x4020, 0x4028, 0x4030, 0x4038),
+ RK3576_PIN_BANK(2, "gpio2", 0x4040, 0x4048, 0x4050, 0x4058),
+ RK3576_PIN_BANK(3, "gpio3", 0x4060, 0x4068, 0x4070, 0x4078),
+ RK3576_PIN_BANK(4, "gpio4", 0x4080, 0x4088, 0xA390, 0xB398),
+};
+
+static struct rockchip_pin_ctrl rk3576_pin_ctrl __maybe_unused = {
+ .pin_banks = rk3576_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3576_pin_banks),
+ .label = "RK3576-GPIO",
+ .type = RK3576,
+ .pull_calc_reg = rk3576_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3576_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rk3576_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rk3588_pin_banks[] = {
RK3588_PIN_BANK_FLAGS(0, 32, "gpio0",
IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY),
@@ -4005,6 +4210,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &rk3399_pin_ctrl },
{ .compatible = "rockchip,rk3568-pinctrl",
.data = &rk3568_pin_ctrl },
+ { .compatible = "rockchip,rk3576-pinctrl",
+ .data = &rk3576_pin_ctrl },
{ .compatible = "rockchip,rk3588-pinctrl",
.data = &rk3588_pin_ctrl },
{},
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index 849266f8b191..6ebbb0a88ce7 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -197,6 +197,7 @@ enum rockchip_pinctrl_type {
RK3368,
RK3399,
RK3568,
+ RK3576,
RK3588,
};
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 4da3c3f422b6..2ec599e383e4 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1913,7 +1913,8 @@ static int pcs_probe(struct platform_device *pdev)
dev_info(pcs->dev, "%i pins, size %u\n", pcs->desc.npins, pcs->size);
- if (pinctrl_enable(pcs->pctl))
+ ret = pinctrl_enable(pcs->pctl);
+ if (ret)
goto free;
return 0;
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 6313be370eb7..d2c5321dd025 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinmux.h>
@@ -369,14 +370,14 @@ static void stmfx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
return;
if (dir == GPIO_LINE_DIRECTION_OUT) {
- seq_printf(s, "output %s ", val ? "high" : "low");
+ seq_printf(s, "output %s ", str_high_low(val));
if (type)
seq_printf(s, "open drain %s internal pull-up ",
pupd ? "with" : "without");
else
seq_puts(s, "push pull no pull ");
} else {
- seq_printf(s, "input %s ", val ? "high" : "low");
+ seq_printf(s, "input %s ", str_high_low(val));
if (type)
seq_printf(s, "with internal pull-%s ",
pupd ? "up" : "down");
diff --git a/drivers/pinctrl/pinctrl-utils.c b/drivers/pinctrl/pinctrl-utils.c
index d81d7b46116c..b880e44b8221 100644
--- a/drivers/pinctrl/pinctrl-utils.c
+++ b/drivers/pinctrl/pinctrl-utils.c
@@ -70,8 +70,8 @@ int pinctrl_utils_add_map_configs(struct pinctrl_dev *pctldev,
if (WARN_ON(*num_maps == *reserved_maps))
return -ENOSPC;
- dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
- GFP_KERNEL);
+ dup_configs = kmemdup_array(configs, num_configs,
+ sizeof(*dup_configs), GFP_KERNEL);
if (!dup_configs)
return -ENOMEM;
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index 0e8de27d0de8..caa8a2ca3e68 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -1202,6 +1202,7 @@ static const struct of_device_id zynq_pinctrl_of_match[] = {
{ .compatible = "xlnx,pinctrl-zynq" },
{ }
};
+MODULE_DEVICE_TABLE(of, zynq_pinctrl_of_match);
static struct platform_driver zynq_pinctrl_driver = {
.driver = {
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index aae71a37219b..02033ea1c643 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -442,8 +442,7 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting)
gname = pctlops->get_group_name(pctldev,
setting->data.mux.group);
dev_err_probe(pctldev->dev, ret,
- "could not request pin %d (%s) from group %s "
- " on device %s\n",
+ "could not request pin %d (%s) from group %s on device %s\n",
pins[i], pname, gname,
pinctrl_dev_get_name(pctldev));
goto err_pin_request;
@@ -526,9 +525,7 @@ void pinmux_disable_setting(const struct pinctrl_setting *setting)
gname = pctlops->get_group_name(pctldev,
setting->data.mux.group);
dev_warn(pctldev->dev,
- "not freeing pin %d (%s) as part of "
- "deactivating group %s - it is already "
- "used for some other setting",
+ "not freeing pin %d (%s) as part of deactivating group %s - it is already used for some other setting",
pins[i], desc->name, gname);
}
}
diff --git a/drivers/pinctrl/realtek/pinctrl-rtd.c b/drivers/pinctrl/realtek/pinctrl-rtd.c
index 208896593b61..244060486332 100644
--- a/drivers/pinctrl/realtek/pinctrl-rtd.c
+++ b/drivers/pinctrl/realtek/pinctrl-rtd.c
@@ -533,7 +533,7 @@ static const struct pinconf_ops rtd_pinconf_ops = {
.pin_config_group_set = rtd_pin_config_group_set,
};
-static struct regmap_config rtd_pinctrl_regmap_config = {
+static const struct regmap_config rtd_pinctrl_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 632180570b70..5a403915fed2 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
@@ -51,17 +52,15 @@
#define PIN_CFG_IO_VMC_QSPI BIT(7)
#define PIN_CFG_IO_VMC_ETH0 BIT(8)
#define PIN_CFG_IO_VMC_ETH1 BIT(9)
-#define PIN_CFG_FILONOFF BIT(10)
-#define PIN_CFG_FILNUM BIT(11)
-#define PIN_CFG_FILCLKSEL BIT(12)
-#define PIN_CFG_IOLH_C BIT(13)
-#define PIN_CFG_SOFT_PS BIT(14)
-#define PIN_CFG_OEN BIT(15)
-#define PIN_CFG_NOGPIO_INT BIT(16)
-#define PIN_CFG_NOD BIT(17) /* N-ch Open Drain */
-#define PIN_CFG_SMT BIT(18) /* Schmitt-trigger input control */
-#define PIN_CFG_ELC BIT(19)
-#define PIN_CFG_IOLH_RZV2H BIT(20)
+#define PIN_CFG_NF BIT(10) /* Digital noise filter */
+#define PIN_CFG_IOLH_C BIT(11)
+#define PIN_CFG_SOFT_PS BIT(12)
+#define PIN_CFG_OEN BIT(13)
+#define PIN_CFG_NOGPIO_INT BIT(14)
+#define PIN_CFG_NOD BIT(15) /* N-ch Open Drain */
+#define PIN_CFG_SMT BIT(16) /* Schmitt-trigger input control */
+#define PIN_CFG_ELC BIT(17)
+#define PIN_CFG_IOLH_RZV2H BIT(18)
#define RZG2L_SINGLE_PIN BIT_ULL(63) /* Dedicated pin */
#define RZG2L_VARIABLE_CFG BIT_ULL(62) /* Variable cfg for port pins */
@@ -69,9 +68,7 @@
#define RZG2L_MPXED_COMMON_PIN_FUNCS(group) \
(PIN_CFG_IOLH_##group | \
PIN_CFG_PUPD | \
- PIN_CFG_FILONOFF | \
- PIN_CFG_FILNUM | \
- PIN_CFG_FILCLKSEL)
+ PIN_CFG_NF)
#define RZG2L_MPXED_PIN_FUNCS (RZG2L_MPXED_COMMON_PIN_FUNCS(A) | \
PIN_CFG_SR)
@@ -84,10 +81,7 @@
PIN_CFG_SR | \
PIN_CFG_SMT)
-#define RZG2L_MPXED_ETH_PIN_FUNCS(x) ((x) | \
- PIN_CFG_FILONOFF | \
- PIN_CFG_FILNUM | \
- PIN_CFG_FILCLKSEL)
+#define RZG2L_MPXED_ETH_PIN_FUNCS(x) ((x) | PIN_CFG_NF)
#define PIN_CFG_PIN_MAP_MASK GENMASK_ULL(61, 54)
#define PIN_CFG_PIN_REG_MASK GENMASK_ULL(53, 46)
@@ -394,13 +388,13 @@ static const u64 r9a09g057_variable_pin_cfg[] = {
#ifdef CONFIG_RISCV
static const u64 r9a07g043f_variable_pin_cfg[] = {
RZG2L_VARIABLE_PIN_CFG_PACK(20, 0, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_NF |
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
RZG2L_VARIABLE_PIN_CFG_PACK(20, 1, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_NF |
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
RZG2L_VARIABLE_PIN_CFG_PACK(20, 2, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_NF |
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
RZG2L_VARIABLE_PIN_CFG_PACK(20, 3, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
@@ -431,7 +425,7 @@ static const u64 r9a07g043f_variable_pin_cfg[] = {
RZG2L_VARIABLE_PIN_CFG_PACK(24, 4, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
PIN_CFG_NOGPIO_INT),
RZG2L_VARIABLE_PIN_CFG_PACK(24, 5, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_NF |
PIN_CFG_NOGPIO_INT),
};
#endif
@@ -528,8 +522,7 @@ static int rzg2l_map_add_config(struct pinctrl_map *map,
{
unsigned long *cfgs;
- cfgs = kmemdup(configs, num_configs * sizeof(*cfgs),
- GFP_KERNEL);
+ cfgs = kmemdup_array(configs, num_configs, sizeof(*cfgs), GFP_KERNEL);
if (!cfgs)
return -ENOMEM;
@@ -1261,7 +1254,9 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_OUTPUT_ENABLE:
- if (!pctrl->data->oen_read || !(cfg & PIN_CFG_OEN))
+ if (!(cfg & PIN_CFG_OEN))
+ return -EINVAL;
+ if (!pctrl->data->oen_read)
return -EOPNOTSUPP;
arg = pctrl->data->oen_read(pctrl, _pin);
if (!arg)
@@ -1390,9 +1385,9 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(_configs[i]);
+ arg = pinconf_to_config_argument(_configs[i]);
switch (param) {
case PIN_CONFIG_INPUT_ENABLE:
- arg = pinconf_to_config_argument(_configs[i]);
if (!(cfg & PIN_CFG_IEN))
return -EINVAL;
@@ -1401,8 +1396,9 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_OUTPUT_ENABLE:
- arg = pinconf_to_config_argument(_configs[i]);
- if (!pctrl->data->oen_write || !(cfg & PIN_CFG_OEN))
+ if (!(cfg & PIN_CFG_OEN))
+ return -EINVAL;
+ if (!pctrl->data->oen_write)
return -EOPNOTSUPP;
ret = pctrl->data->oen_write(pctrl, _pin, !!arg);
if (ret)
@@ -1410,12 +1406,10 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_POWER_SOURCE:
- settings.power_source = pinconf_to_config_argument(_configs[i]);
+ settings.power_source = arg;
break;
case PIN_CONFIG_SLEW_RATE:
- arg = pinconf_to_config_argument(_configs[i]);
-
if (!(cfg & PIN_CFG_SR) || arg > 1)
return -EINVAL;
@@ -1436,8 +1430,6 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- arg = pinconf_to_config_argument(_configs[i]);
-
if (!(cfg & PIN_CFG_IOLH_A) || hwcfg->drive_strength_ua)
return -EINVAL;
@@ -1457,12 +1449,10 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
!hwcfg->drive_strength_ua)
return -EINVAL;
- settings.drive_strength_ua = pinconf_to_config_argument(_configs[i]);
+ settings.drive_strength_ua = arg;
break;
case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS:
- arg = pinconf_to_config_argument(_configs[i]);
-
if (!(cfg & PIN_CFG_IOLH_B) || !hwcfg->iolh_groupb_oi[0])
return -EINVAL;
@@ -1480,7 +1470,6 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
if (!(cfg & PIN_CFG_IOLH_RZV2H))
return -EINVAL;
- arg = pinconf_to_config_argument(_configs[i]);
if (arg > 3)
return -EINVAL;
rzg2l_rmw_pin_config(pctrl, IOLH(off), bit, IOLH_MASK, arg);
@@ -1883,8 +1872,7 @@ static const u64 r9a07g043_gpio_configs[] = {
#ifdef CONFIG_RISCV
/* Below additional port pins (P19 - P28) are exclusively available on RZ/Five SoC only */
RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x06, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
- PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P19 */
+ PIN_CFG_NF | PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P19 */
RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x07), /* P20 */
RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x08, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P21 */
@@ -1892,8 +1880,7 @@ static const u64 r9a07g043_gpio_configs[] = {
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P22 */
RZG2L_GPIO_PORT_SPARSE_PACK_VARIABLE(0x3e, 0x0a), /* P23 */
RZG2L_GPIO_PORT_PACK_VARIABLE(6, 0x0b), /* P24 */
- RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x0c, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_FILONOFF |
- PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x0c, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_NF |
PIN_CFG_NOGPIO_INT), /* P25 */
0x0, /* P26 */
0x0, /* P27 */
@@ -1971,8 +1958,7 @@ static const struct {
struct rzg2l_dedicated_configs rzg2l_pins[7];
} rzg2l_dedicated_pins = {
.common = {
- { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0,
- (PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL)) },
+ { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, PIN_CFG_NF) },
{ "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x2, 0,
(PIN_CFG_IOLH_A | PIN_CFG_SR | PIN_CFG_IEN)) },
{ "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 0,
@@ -2053,8 +2039,7 @@ static const struct {
};
static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = {
- { "NMI", RZG2L_SINGLE_PIN_PACK(0x0, 0, (PIN_CFG_FILONOFF | PIN_CFG_FILNUM |
- PIN_CFG_FILCLKSEL)) },
+ { "NMI", RZG2L_SINGLE_PIN_PACK(0x0, 0, PIN_CFG_NF) },
{ "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x1, 0, (PIN_CFG_IOLH_A | PIN_CFG_IEN |
PIN_CFG_SOFT_PS)) },
{ "TDO", RZG2L_SINGLE_PIN_PACK(0x1, 1, (PIN_CFG_IOLH_A | PIN_CFG_SOFT_PS)) },
@@ -2093,8 +2078,7 @@ static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = {
};
static struct rzg2l_dedicated_configs rzv2h_dedicated_pins[] = {
- { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, (PIN_CFG_FILONOFF | PIN_CFG_FILNUM |
- PIN_CFG_FILCLKSEL)) },
+ { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, PIN_CFG_NF) },
{ "TMS_SWDIO", RZG2L_SINGLE_PIN_PACK(0x3, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
PIN_CFG_IEN)) },
{ "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
@@ -2596,16 +2580,13 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
return -EPROBE_DEFER;
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &of_args);
- if (ret) {
- dev_err(pctrl->dev, "Unable to parse gpio-ranges\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(pctrl->dev, ret, "Unable to parse gpio-ranges\n");
if (of_args.args[0] != 0 || of_args.args[1] != 0 ||
- of_args.args[2] != pctrl->data->n_port_pins) {
- dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n");
- return -EINVAL;
- }
+ of_args.args[2] != pctrl->data->n_port_pins)
+ return dev_err_probe(pctrl->dev, -EINVAL,
+ "gpio-ranges does not match selected SOC\n");
chip->names = pctrl->data->port_pins;
chip->request = rzg2l_gpio_request;
@@ -2623,7 +2604,7 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
girq = &chip->irq;
gpio_irq_chip_set_chip(girq, &rzg2l_gpio_irqchip);
- girq->fwnode = of_node_to_fwnode(np);
+ girq->fwnode = dev_fwnode(pctrl->dev);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = rzg2l_gpio_child_to_parent_hwirq;
girq->populate_parent_alloc_arg = rzg2l_gpio_populate_parent_fwspec;
@@ -2637,10 +2618,8 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
pctrl->gpio_range.name = chip->label;
pctrl->gpio_range.gc = chip;
ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
- if (ret) {
- dev_err(pctrl->dev, "failed to add GPIO controller\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(pctrl->dev, ret, "failed to add GPIO controller\n");
dev_dbg(pctrl->dev, "Registered gpio controller\n");
@@ -2726,22 +2705,16 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
ret = devm_pinctrl_register_and_init(pctrl->dev, &pctrl->desc, pctrl,
&pctrl->pctl);
- if (ret) {
- dev_err(pctrl->dev, "pinctrl registration failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(pctrl->dev, ret, "pinctrl registration failed\n");
ret = pinctrl_enable(pctrl->pctl);
- if (ret) {
- dev_err(pctrl->dev, "pinctrl enable failed\n");
- return ret;
- }
+ if (ret)
+ dev_err_probe(pctrl->dev, ret, "pinctrl enable failed\n");
ret = rzg2l_gpio_register(pctrl);
- if (ret) {
- dev_err(pctrl->dev, "failed to add GPIO chip: %i\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(pctrl->dev, ret, "failed to add GPIO chip\n");
return 0;
}
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
index 0cae5472ac67..4062c56619f5 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -196,8 +196,7 @@ static int rzv2m_map_add_config(struct pinctrl_map *map,
{
unsigned long *cfgs;
- cfgs = kmemdup(configs, num_configs * sizeof(*cfgs),
- GFP_KERNEL);
+ cfgs = kmemdup_array(configs, num_configs, sizeof(*cfgs), GFP_KERNEL);
if (!cfgs)
return -ENOMEM;
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index 03e9bdbc82b9..29d16c9c1bd1 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -83,8 +83,7 @@ static int sh_pfc_map_add_config(struct pinctrl_map *map,
{
unsigned long *cfgs;
- cfgs = kmemdup(configs, num_configs * sizeof(*cfgs),
- GFP_KERNEL);
+ cfgs = kmemdup_array(configs, num_configs, sizeof(*cfgs), GFP_KERNEL);
if (cfgs == NULL)
return -ENOMEM;
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
index 85ddf49a5188..d3d8672f74dc 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
@@ -40,6 +40,19 @@ static const struct samsung_pin_bank_type bank_type_alive = {
#define S5P_OTHERS_RET_MMC (1 << 29)
#define S5P_OTHERS_RET_UART (1 << 28)
+#define S5P_PIN_PULL_DISABLE 0
+#define S5P_PIN_PULL_DOWN 1
+#define S5P_PIN_PULL_UP 2
+
+static void s5pv210_pud_value_init(struct samsung_pinctrl_drv_data *drvdata)
+{
+ unsigned int *pud_val = drvdata->pud_val;
+
+ pud_val[PUD_PULL_DISABLE] = S5P_PIN_PULL_DISABLE;
+ pud_val[PUD_PULL_DOWN] = S5P_PIN_PULL_DOWN;
+ pud_val[PUD_PULL_UP] = S5P_PIN_PULL_UP;
+}
+
static void s5pv210_retention_disable(struct samsung_pinctrl_drv_data *drvdata)
{
void __iomem *clk_base = (void __iomem *)drvdata->retention_ctrl->priv;
@@ -133,6 +146,7 @@ static const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {
.nr_banks = ARRAY_SIZE(s5pv210_pin_bank),
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
+ .pud_value_init = s5pv210_pud_value_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
.retention_data = &s5pv210_retention_data,
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index ce5e6783b5b9..b79c211c0374 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -662,7 +662,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
__init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
{
struct device *dev = d->dev;
- struct device_node *wkup_np = NULL;
+ struct device_node *wkup_np __free(device_node) = NULL;
struct device_node *np;
struct samsung_pin_bank *bank;
struct exynos_weint_data *weint_data;
@@ -692,17 +692,14 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
bank->irq_chip = devm_kmemdup(dev, irq_chip, sizeof(*irq_chip),
GFP_KERNEL);
- if (!bank->irq_chip) {
- of_node_put(wkup_np);
+ if (!bank->irq_chip)
return -ENOMEM;
- }
bank->irq_chip->chip.name = bank->name;
bank->irq_domain = irq_domain_create_linear(bank->fwnode,
bank->nr_pins, &exynos_eint_irqd_ops, bank);
if (!bank->irq_domain) {
dev_err(dev, "wkup irq domain add failed\n");
- of_node_put(wkup_np);
return -ENXIO;
}
@@ -715,10 +712,8 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
weint_data = devm_kcalloc(dev,
bank->nr_pins, sizeof(*weint_data),
GFP_KERNEL);
- if (!weint_data) {
- of_node_put(wkup_np);
+ if (!weint_data)
return -ENOMEM;
- }
for (idx = 0; idx < bank->nr_pins; ++idx) {
irq = irq_of_parse_and_map(to_of_node(bank->fwnode), idx);
@@ -735,13 +730,10 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
}
}
- if (!muxed_banks) {
- of_node_put(wkup_np);
+ if (!muxed_banks)
return 0;
- }
irq = irq_of_parse_and_map(wkup_np, 0);
- of_node_put(wkup_np);
if (!irq) {
dev_err(dev, "irq number for muxed EINTs not found\n");
return 0;
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index c5d92db4fdb1..68715c09baa9 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -63,6 +63,10 @@
#define EINT_CON_MASK 0xF
#define EINT_CON_LEN 4
+#define S3C_PIN_PULL_DISABLE 0
+#define S3C_PIN_PULL_DOWN 1
+#define S3C_PIN_PULL_UP 2
+
static const struct samsung_pin_bank_type bank_type_4bit_off = {
.fld_width = { 4, 1, 2, 0, 2, 2, },
.reg_offset = { 0x00, 0x04, 0x08, 0, 0x0c, 0x10, },
@@ -255,6 +259,15 @@ static int s3c64xx_irq_get_trigger(unsigned int type)
return trigger;
}
+static void s3c64xx_pud_value_init(struct samsung_pinctrl_drv_data *drvdata)
+{
+ unsigned int *pud_val = drvdata->pud_val;
+
+ pud_val[PUD_PULL_DISABLE] = S3C_PIN_PULL_DISABLE;
+ pud_val[PUD_PULL_DOWN] = S3C_PIN_PULL_DOWN;
+ pud_val[PUD_PULL_UP] = S3C_PIN_PULL_UP;
+}
+
static void s3c64xx_irq_set_handler(struct irq_data *d, unsigned int type)
{
/* Edge- and level-triggered interrupts need different handlers */
@@ -797,6 +810,7 @@ static const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = {
.nr_banks = ARRAY_SIZE(s3c64xx_pin_banks0),
.eint_gpio_init = s3c64xx_eint_gpio_init,
.eint_wkup_init = s3c64xx_eint_eint0_init,
+ .pud_value_init = s3c64xx_pud_value_init,
},
};
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 623df65a5d6f..675efa5d86a9 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -122,8 +122,8 @@ static int add_map_configs(struct device *dev, struct pinctrl_map **map,
if (WARN_ON(*num_maps == *reserved_maps))
return -ENOSPC;
- dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
- GFP_KERNEL);
+ dup_configs = kmemdup_array(configs, num_configs, sizeof(*dup_configs),
+ GFP_KERNEL);
if (!dup_configs)
return -ENOMEM;
@@ -251,7 +251,6 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
{
struct samsung_pinctrl_drv_data *drvdata;
unsigned reserved_maps;
- struct device_node *np;
int ret;
drvdata = pinctrl_dev_get_drvdata(pctldev);
@@ -266,12 +265,11 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
&reserved_maps,
num_maps);
- for_each_child_of_node(np_config, np) {
+ for_each_child_of_node_scoped(np_config, np) {
ret = samsung_dt_subnode_to_map(drvdata, pctldev->dev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
samsung_dt_free_map(pctldev, *map, *num_maps);
- of_node_put(np);
return ret;
}
}
@@ -823,16 +821,16 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
struct device_node *func_np;
if (!of_get_child_count(cfg_np)) {
- if (!of_find_property(cfg_np,
- "samsung,pin-function", NULL))
+ if (!of_property_present(cfg_np,
+ "samsung,pin-function"))
continue;
++func_cnt;
continue;
}
for_each_child_of_node(cfg_np, func_np) {
- if (!of_find_property(func_np,
- "samsung,pin-function", NULL))
+ if (!of_property_present(func_np,
+ "samsung,pin-function"))
continue;
++func_cnt;
}
@@ -849,16 +847,12 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
* and create pin groups and pin function lists.
*/
func_cnt = 0;
- for_each_child_of_node(dev_np, cfg_np) {
- struct device_node *func_np;
-
+ for_each_child_of_node_scoped(dev_np, cfg_np) {
if (!of_get_child_count(cfg_np)) {
ret = samsung_pinctrl_create_function(dev, drvdata,
cfg_np, func);
- if (ret < 0) {
- of_node_put(cfg_np);
+ if (ret < 0)
return ERR_PTR(ret);
- }
if (ret > 0) {
++func;
++func_cnt;
@@ -866,14 +860,11 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
continue;
}
- for_each_child_of_node(cfg_np, func_np) {
+ for_each_child_of_node_scoped(cfg_np, func_np) {
ret = samsung_pinctrl_create_function(dev, drvdata,
func_np, func);
- if (ret < 0) {
- of_node_put(func_np);
- of_node_put(cfg_np);
+ if (ret < 0)
return ERR_PTR(ret);
- }
if (ret > 0) {
++func;
++func_cnt;
@@ -997,6 +988,77 @@ static int samsung_pinctrl_unregister(struct platform_device *pdev,
return 0;
}
+static void samsung_pud_value_init(struct samsung_pinctrl_drv_data *drvdata)
+{
+ unsigned int *pud_val = drvdata->pud_val;
+
+ pud_val[PUD_PULL_DISABLE] = EXYNOS_PIN_PUD_PULL_DISABLE;
+ pud_val[PUD_PULL_DOWN] = EXYNOS_PIN_PID_PULL_DOWN;
+ pud_val[PUD_PULL_UP] = EXYNOS_PIN_PID_PULL_UP;
+}
+
+/*
+ * Enable or Disable the pull-down and pull-up for the gpio pins in the
+ * PUD register.
+ */
+static void samsung_gpio_set_pud(struct gpio_chip *gc, unsigned int offset,
+ unsigned int value)
+{
+ struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+ const struct samsung_pin_bank_type *type = bank->type;
+ void __iomem *reg;
+ unsigned int data, mask;
+
+ reg = bank->pctl_base + bank->pctl_offset;
+ data = readl(reg + type->reg_offset[PINCFG_TYPE_PUD]);
+ mask = (1 << type->fld_width[PINCFG_TYPE_PUD]) - 1;
+ data &= ~(mask << (offset * type->fld_width[PINCFG_TYPE_PUD]));
+ data |= value << (offset * type->fld_width[PINCFG_TYPE_PUD]);
+ writel(data, reg + type->reg_offset[PINCFG_TYPE_PUD]);
+}
+
+/*
+ * Identify the type of PUD config based on the gpiolib request to enable
+ * or disable the PUD config.
+ */
+static int samsung_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
+{
+ struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+ struct samsung_pinctrl_drv_data *drvdata = bank->drvdata;
+ unsigned int value;
+ int ret = 0;
+ unsigned long flags;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ value = drvdata->pud_val[PUD_PULL_DISABLE];
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value = drvdata->pud_val[PUD_PULL_DOWN];
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value = drvdata->pud_val[PUD_PULL_UP];
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ ret = clk_enable(drvdata->pclk);
+ if (ret) {
+ dev_err(drvdata->dev, "failed to enable clock\n");
+ return ret;
+ }
+
+ raw_spin_lock_irqsave(&bank->slock, flags);
+ samsung_gpio_set_pud(gc, offset, value);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+ clk_disable(drvdata->pclk);
+
+ return ret;
+}
+
static const struct gpio_chip samsung_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
@@ -1006,6 +1068,7 @@ static const struct gpio_chip samsung_gpiolib_chip = {
.direction_output = samsung_gpio_direction_output,
.to_irq = samsung_gpio_to_irq,
.add_pin_ranges = samsung_add_pin_ranges,
+ .set_config = samsung_gpio_set_config,
.owner = THIS_MODULE,
};
@@ -1237,6 +1300,11 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
if (ctrl->eint_wkup_init)
ctrl->eint_wkup_init(drvdata);
+ if (ctrl->pud_value_init)
+ ctrl->pud_value_init(drvdata);
+ else
+ samsung_pud_value_init(drvdata);
+
ret = samsung_gpiolib_register(pdev, drvdata);
if (ret)
goto err_unregister;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index d50ba6f07d5d..a1e7377bd890 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -61,6 +61,25 @@ enum pincfg_type {
#define PIN_CON_FUNC_INPUT 0x0
#define PIN_CON_FUNC_OUTPUT 0x1
+/* Values for the pin PUD register */
+#define EXYNOS_PIN_PUD_PULL_DISABLE 0x0
+#define EXYNOS_PIN_PID_PULL_DOWN 0x1
+#define EXYNOS_PIN_PID_PULL_UP 0x3
+
+/*
+ * enum pud_index - Possible index values to access the pud_val array.
+ * @PUD_PULL_DISABLE: Index for the value of pud disable
+ * @PUD_PULL_DOWN: Index for the value of pull down enable
+ * @PUD_PULL_UP: Index for the value of pull up enable
+ * @PUD_MAX: Maximum value of the index
+ */
+enum pud_index {
+ PUD_PULL_DISABLE,
+ PUD_PULL_DOWN,
+ PUD_PULL_UP,
+ PUD_MAX,
+};
+
/**
* enum eint_type - possible external interrupt types.
* @EINT_TYPE_NONE: bank does not support external interrupts
@@ -261,6 +280,7 @@ struct samsung_pin_ctrl {
int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
+ void (*pud_value_init)(struct samsung_pinctrl_drv_data *drvdata);
void (*suspend)(struct samsung_pinctrl_drv_data *);
void (*resume)(struct samsung_pinctrl_drv_data *);
};
@@ -307,6 +327,7 @@ struct samsung_pinctrl_drv_data {
struct samsung_pin_bank *pin_banks;
unsigned int nr_banks;
unsigned int nr_pins;
+ unsigned int pud_val[PUD_MAX];
struct samsung_retention_ctrl *retention_ctrl;
diff --git a/drivers/pinctrl/sophgo/Kconfig b/drivers/pinctrl/sophgo/Kconfig
new file mode 100644
index 000000000000..b14792ee46fc
--- /dev/null
+++ b/drivers/pinctrl/sophgo/Kconfig
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Sophgo SoC PINCTRL drivers
+#
+
+config PINCTRL_SOPHGO_CV18XX
+ bool
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+
+config PINCTRL_SOPHGO_CV1800B
+ tristate "Sophgo CV1800B SoC Pinctrl driver"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on OF
+ select PINCTRL_SOPHGO_CV18XX
+ help
+ Say Y to select the pinctrl driver for CV1800B SoC.
+ This pin controller allows selecting the mux function for
+ each pin. This driver can also be built as a module called
+ pinctrl-cv1800b.
+
+config PINCTRL_SOPHGO_CV1812H
+ tristate "Sophgo CV1812H SoC Pinctrl driver"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on OF
+ select PINCTRL_SOPHGO_CV18XX
+ help
+ Say Y to select the pinctrl driver for CV1812H SoC.
+ This pin controller allows selecting the mux function for
+ each pin. This driver can also be built as a module called
+ pinctrl-cv1812h.
+
+config PINCTRL_SOPHGO_SG2000
+ tristate "Sophgo SG2000 SoC Pinctrl driver"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on OF
+ select PINCTRL_SOPHGO_CV18XX
+ help
+ Say Y to select the pinctrl driver for SG2000 SoC.
+ This pin controller allows selecting the mux function for
+ each pin. This driver can also be built as a module called
+ pinctrl-sg2000.
+
+config PINCTRL_SOPHGO_SG2002
+ tristate "Sophgo SG2000 SoC Pinctrl driver"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on OF
+ select PINCTRL_SOPHGO_CV18XX
+ help
+ Say Y to select the pinctrl driver for SG2002 SoC.
+ This pin controller allows selecting the mux function for
+ each pin. This driver can also be built as a module called
+ pinctrl-sg2002.
diff --git a/drivers/pinctrl/sophgo/Makefile b/drivers/pinctrl/sophgo/Makefile
new file mode 100644
index 000000000000..4113a5c9191b
--- /dev/null
+++ b/drivers/pinctrl/sophgo/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_PINCTRL_SOPHGO_CV18XX) += pinctrl-cv18xx.o
+obj-$(CONFIG_PINCTRL_SOPHGO_CV1800B) += pinctrl-cv1800b.o
+obj-$(CONFIG_PINCTRL_SOPHGO_CV1812H) += pinctrl-cv1812h.o
+obj-$(CONFIG_PINCTRL_SOPHGO_SG2000) += pinctrl-sg2000.o
+obj-$(CONFIG_PINCTRL_SOPHGO_SG2002) += pinctrl-sg2002.o
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv1800b.c b/drivers/pinctrl/sophgo/pinctrl-cv1800b.c
new file mode 100644
index 000000000000..3322906689e7
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-cv1800b.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo CV1800B SoC pinctrl driver.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/pinctrl-cv1800b.h>
+
+#include "pinctrl-cv18xx.h"
+
+enum CV1800B_POWER_DOMAIN {
+ VDD18A_AUD = 0,
+ VDD18A_USB_PLL_ETH_CSI = 1,
+ VDD33A_ETH_USB_SD1 = 2,
+ VDDIO_RTC = 3,
+ VDDIO_SD0_SPI = 4
+};
+
+static const char *const cv1800b_power_domain_desc[] = {
+ [VDD18A_AUD] = "VDD18A_AUD",
+ [VDD18A_USB_PLL_ETH_CSI] = "VDD18A_USB_PLL_ETH_CSI",
+ [VDD33A_ETH_USB_SD1] = "VDD33A_ETH_USB_SD1",
+ [VDDIO_RTC] = "VDDIO_RTC",
+ [VDDIO_SD0_SPI] = "VDDIO_SD0_SPI",
+};
+
+static int cv1800b_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 79000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 60000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 60000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int cv1800b_get_pull_down(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 87000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 61000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 62000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 cv1800b_1v8_oc_map[] = {
+ 12800,
+ 25300,
+ 37400,
+ 49000
+};
+
+static const u32 cv1800b_18od33_1v8_oc_map[] = {
+ 7800,
+ 11700,
+ 15500,
+ 19200,
+ 23000,
+ 26600,
+ 30200,
+ 33700
+};
+
+static const u32 cv1800b_18od33_3v3_oc_map[] = {
+ 5500,
+ 8200,
+ 10800,
+ 13400,
+ 16100,
+ 18700,
+ 21200,
+ 23700
+};
+
+static const u32 cv1800b_eth_oc_map[] = {
+ 15700,
+ 17800
+};
+
+static int cv1800b_get_oc_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = cv1800b_1v8_oc_map;
+ return ARRAY_SIZE(cv1800b_1v8_oc_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = cv1800b_18od33_1v8_oc_map;
+ return ARRAY_SIZE(cv1800b_18od33_1v8_oc_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = cv1800b_18od33_3v3_oc_map;
+ return ARRAY_SIZE(cv1800b_18od33_3v3_oc_map);
+ }
+ }
+
+ if (type == IO_TYPE_ETH) {
+ *map = cv1800b_eth_oc_map;
+ return ARRAY_SIZE(cv1800b_eth_oc_map);
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 cv1800b_1v8_schmitt_map[] = {
+ 0,
+ 970000,
+ 1040000
+};
+
+static const u32 cv1800b_18od33_1v8_schmitt_map[] = {
+ 0,
+ 1070000
+};
+
+static const u32 cv1800b_18od33_3v3_schmitt_map[] = {
+ 0,
+ 1100000
+};
+
+static int cv1800b_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = cv1800b_1v8_schmitt_map;
+ return ARRAY_SIZE(cv1800b_1v8_schmitt_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = cv1800b_18od33_1v8_schmitt_map;
+ return ARRAY_SIZE(cv1800b_18od33_1v8_schmitt_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = cv1800b_18od33_3v3_schmitt_map;
+ return ARRAY_SIZE(cv1800b_18od33_3v3_schmitt_map);
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static const struct cv1800_vddio_cfg_ops cv1800b_vddio_cfg_ops = {
+ .get_pull_up = cv1800b_get_pull_up,
+ .get_pull_down = cv1800b_get_pull_down,
+ .get_oc_map = cv1800b_get_oc_map,
+ .get_schmitt_map = cv1800b_get_schmitt_map,
+};
+
+static const struct pinctrl_pin_desc cv1800b_pins[] = {
+ PINCTRL_PIN(PIN_AUD_AOUTR, "AUD_AOUTR"),
+ PINCTRL_PIN(PIN_SD0_CLK, "SD0_CLK"),
+ PINCTRL_PIN(PIN_SD0_CMD, "SD0_CMD"),
+ PINCTRL_PIN(PIN_SD0_D0, "SD0_D0"),
+ PINCTRL_PIN(PIN_SD0_D1, "SD0_D1"),
+ PINCTRL_PIN(PIN_SD0_D2, "SD0_D2"),
+ PINCTRL_PIN(PIN_SD0_D3, "SD0_D3"),
+ PINCTRL_PIN(PIN_SD0_CD, "SD0_CD"),
+ PINCTRL_PIN(PIN_SD0_PWR_EN, "SD0_PWR_EN"),
+ PINCTRL_PIN(PIN_SPK_EN, "SPK_EN"),
+ PINCTRL_PIN(PIN_UART0_TX, "UART0_TX"),
+ PINCTRL_PIN(PIN_UART0_RX, "UART0_RX"),
+ PINCTRL_PIN(PIN_SPINOR_HOLD_X, "SPINOR_HOLD_X"),
+ PINCTRL_PIN(PIN_SPINOR_SCK, "SPINOR_SCK"),
+ PINCTRL_PIN(PIN_SPINOR_MOSI, "SPINOR_MOSI"),
+ PINCTRL_PIN(PIN_SPINOR_WP_X, "SPINOR_WP_X"),
+ PINCTRL_PIN(PIN_SPINOR_MISO, "SPINOR_MISO"),
+ PINCTRL_PIN(PIN_SPINOR_CS_X, "SPINOR_CS_X"),
+ PINCTRL_PIN(PIN_IIC0_SCL, "IIC0_SCL"),
+ PINCTRL_PIN(PIN_IIC0_SDA, "IIC0_SDA"),
+ PINCTRL_PIN(PIN_AUX0, "AUX0"),
+ PINCTRL_PIN(PIN_PWR_VBAT_DET, "PWR_VBAT_DET"),
+ PINCTRL_PIN(PIN_PWR_SEQ2, "PWR_SEQ2"),
+ PINCTRL_PIN(PIN_XTAL_XIN, "XTAL_XIN"),
+ PINCTRL_PIN(PIN_SD1_GPIO0, "SD1_GPIO0"),
+ PINCTRL_PIN(PIN_SD1_GPIO1, "SD1_GPIO1"),
+ PINCTRL_PIN(PIN_SD1_D3, "SD1_D3"),
+ PINCTRL_PIN(PIN_SD1_D2, "SD1_D2"),
+ PINCTRL_PIN(PIN_SD1_D1, "SD1_D1"),
+ PINCTRL_PIN(PIN_SD1_D0, "SD1_D0"),
+ PINCTRL_PIN(PIN_SD1_CMD, "SD1_CMD"),
+ PINCTRL_PIN(PIN_SD1_CLK, "SD1_CLK"),
+ PINCTRL_PIN(PIN_ADC1, "ADC1"),
+ PINCTRL_PIN(PIN_USB_VBUS_DET, "USB_VBUS_DET"),
+ PINCTRL_PIN(PIN_ETH_TXP, "ETH_TXP"),
+ PINCTRL_PIN(PIN_ETH_TXM, "ETH_TXM"),
+ PINCTRL_PIN(PIN_ETH_RXP, "ETH_RXP"),
+ PINCTRL_PIN(PIN_ETH_RXM, "ETH_RXM"),
+ PINCTRL_PIN(PIN_MIPIRX4N, "MIPIRX4N"),
+ PINCTRL_PIN(PIN_MIPIRX4P, "MIPIRX4P"),
+ PINCTRL_PIN(PIN_MIPIRX3N, "MIPIRX3N"),
+ PINCTRL_PIN(PIN_MIPIRX3P, "MIPIRX3P"),
+ PINCTRL_PIN(PIN_MIPIRX2N, "MIPIRX2N"),
+ PINCTRL_PIN(PIN_MIPIRX2P, "MIPIRX2P"),
+ PINCTRL_PIN(PIN_MIPIRX1N, "MIPIRX1N"),
+ PINCTRL_PIN(PIN_MIPIRX1P, "MIPIRX1P"),
+ PINCTRL_PIN(PIN_MIPIRX0N, "MIPIRX0N"),
+ PINCTRL_PIN(PIN_MIPIRX0P, "MIPIRX0P"),
+ PINCTRL_PIN(PIN_AUD_AINL_MIC, "AUD_AINL_MIC"),
+};
+
+static const struct cv1800_pin cv1800b_pin_data[ARRAY_SIZE(cv1800b_pins)] = {
+ CV1800_FUNC_PIN(PIN_AUD_AOUTR, VDD18A_AUD,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x12c, 6),
+ CV1800_GENERAL_PIN(PIN_SD0_CLK, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x000, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa00),
+ CV1800_GENERAL_PIN(PIN_SD0_CMD, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x004, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa04),
+ CV1800_GENERAL_PIN(PIN_SD0_D0, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x008, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa08),
+ CV1800_GENERAL_PIN(PIN_SD0_D1, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x00c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa0c),
+ CV1800_GENERAL_PIN(PIN_SD0_D2, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x010, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa10),
+ CV1800_GENERAL_PIN(PIN_SD0_D3, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x014, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa14),
+ CV1800_GENERAL_PIN(PIN_SD0_CD, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x018, 3,
+ CV1800_PINCONF_AREA_SYS, 0x900),
+ CV1800_GENERAL_PIN(PIN_SD0_PWR_EN, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x01c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x904),
+ CV1800_GENERAL_PIN(PIN_SPK_EN, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x020, 3,
+ CV1800_PINCONF_AREA_SYS, 0x908),
+ CV1800_GENERAL_PIN(PIN_UART0_TX, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x024, 7,
+ CV1800_PINCONF_AREA_SYS, 0x90c),
+ CV1800_GENERAL_PIN(PIN_UART0_RX, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x028, 7,
+ CV1800_PINCONF_AREA_SYS, 0x910),
+ CV1800_GENERAL_PIN(PIN_SPINOR_HOLD_X, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x02c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x914),
+ CV1800_GENERAL_PIN(PIN_SPINOR_SCK, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x030, 3,
+ CV1800_PINCONF_AREA_SYS, 0x918),
+ CV1800_GENERAL_PIN(PIN_SPINOR_MOSI, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x034, 3,
+ CV1800_PINCONF_AREA_SYS, 0x91c),
+ CV1800_GENERAL_PIN(PIN_SPINOR_WP_X, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x038, 3,
+ CV1800_PINCONF_AREA_SYS, 0x920),
+ CV1800_GENERAL_PIN(PIN_SPINOR_MISO, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x03c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x924),
+ CV1800_GENERAL_PIN(PIN_SPINOR_CS_X, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x040, 3,
+ CV1800_PINCONF_AREA_SYS, 0x928),
+ CV1800_GENERAL_PIN(PIN_IIC0_SCL, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x04c, 7,
+ CV1800_PINCONF_AREA_SYS, 0x934),
+ CV1800_GENERAL_PIN(PIN_IIC0_SDA, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x050, 7,
+ CV1800_PINCONF_AREA_SYS, 0x938),
+ CV1800_GENERAL_PIN(PIN_AUX0, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x054, 7,
+ CV1800_PINCONF_AREA_SYS, 0x93c),
+ CV1800_GENERAL_PIN(PIN_PWR_VBAT_DET, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x05c, 0,
+ CV1800_PINCONF_AREA_RTC, 0x004),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x068, 3,
+ CV1800_PINCONF_AREA_RTC, 0x010),
+ CV1800_GENERAL_PIN(PIN_XTAL_XIN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x074, 0,
+ CV1800_PINCONF_AREA_RTC, 0x020),
+ CV1800_GENERAL_PIN(PIN_SD1_GPIO0, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x088, 7,
+ CV1800_PINCONF_AREA_RTC, 0x034),
+ CV1800_GENERAL_PIN(PIN_SD1_GPIO1, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x084, 7,
+ CV1800_PINCONF_AREA_RTC, 0x030),
+ CV1800_GENERAL_PIN(PIN_SD1_D3, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x08c, 7,
+ CV1800_PINCONF_AREA_RTC, 0x038),
+ CV1800_GENERAL_PIN(PIN_SD1_D2, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x090, 7,
+ CV1800_PINCONF_AREA_RTC, 0x03c),
+ CV1800_GENERAL_PIN(PIN_SD1_D1, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x094, 7,
+ CV1800_PINCONF_AREA_RTC, 0x040),
+ CV1800_GENERAL_PIN(PIN_SD1_D0, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x098, 7,
+ CV1800_PINCONF_AREA_RTC, 0x044),
+ CV1800_GENERAL_PIN(PIN_SD1_CMD, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x09c, 7,
+ CV1800_PINCONF_AREA_RTC, 0x048),
+ CV1800_GENERAL_PIN(PIN_SD1_CLK, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0a0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x04c),
+ CV1800_GENERAL_PIN(PIN_ADC1, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a8, 6,
+ CV1800_PINCONF_AREA_SYS, 0x804),
+ CV1800_GENERAL_PIN(PIN_USB_VBUS_DET, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ac, 6,
+ CV1800_PINCONF_AREA_SYS, 0x808),
+ CV1800_FUNC_PIN(PIN_ETH_TXP, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x0c0, 7),
+ CV1800_FUNC_PIN(PIN_ETH_TXM, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x0c4, 7),
+ CV1800_FUNC_PIN(PIN_ETH_RXP, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x0c8, 7),
+ CV1800_FUNC_PIN(PIN_ETH_RXM, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x0cc, 7),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4N, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0d4, 7,
+ CV1800_PINCONF_AREA_SYS, 0x0bc, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc04),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4P, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0d8, 7,
+ CV1800_PINCONF_AREA_SYS, 0x0b8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc08),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3N, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0dc, 7,
+ CV1800_PINCONF_AREA_SYS, 0x0b0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc0c),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3P, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0e0, 7,
+ CV1800_PINCONF_AREA_SYS, 0x0b4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc10),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2N, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0e4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc14),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2P, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0e8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc18),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1N, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ec, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc1c),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1P, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc20),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0N, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc24),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0P, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc28),
+ CV1800_FUNC_PIN(PIN_AUD_AINL_MIC, VDD18A_AUD,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x120, 5),
+};
+
+static const struct cv1800_pinctrl_data cv1800b_pindata = {
+ .pins = cv1800b_pins,
+ .pindata = cv1800b_pin_data,
+ .pdnames = cv1800b_power_domain_desc,
+ .vddio_ops = &cv1800b_vddio_cfg_ops,
+ .npins = ARRAY_SIZE(cv1800b_pins),
+ .npd = ARRAY_SIZE(cv1800b_power_domain_desc),
+};
+
+static const struct of_device_id cv1800b_pinctrl_ids[] = {
+ { .compatible = "sophgo,cv1800b-pinctrl", .data = &cv1800b_pindata },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cv1800b_pinctrl_ids);
+
+static struct platform_driver cv1800b_pinctrl_driver = {
+ .probe = cv1800_pinctrl_probe,
+ .driver = {
+ .name = "cv1800b-pinctrl",
+ .suppress_bind_attrs = true,
+ .of_match_table = cv1800b_pinctrl_ids,
+ },
+};
+module_platform_driver(cv1800b_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for the CV1800B series SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv1812h.c b/drivers/pinctrl/sophgo/pinctrl-cv1812h.c
new file mode 100644
index 000000000000..5632290b46fa
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-cv1812h.c
@@ -0,0 +1,771 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo CV1812H SoC pinctrl driver.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/pinctrl-cv1812h.h>
+
+#include "pinctrl-cv18xx.h"
+
+enum CV1812H_POWER_DOMAIN {
+ VDD18A_EPHY = 0,
+ VDD18A_MIPI = 1,
+ VDDIO18_1 = 2,
+ VDDIO_EMMC = 3,
+ VDDIO_RTC = 4,
+ VDDIO_SD0 = 5,
+ VDDIO_SD1 = 6,
+ VDDIO_VIVO = 7
+};
+
+static const char *const cv1812h_power_domain_desc[] = {
+ [VDD18A_EPHY] = "VDD18A_EPHY",
+ [VDD18A_MIPI] = "VDD18A_MIPI",
+ [VDDIO18_1] = "VDDIO18_1",
+ [VDDIO_EMMC] = "VDDIO_EMMC",
+ [VDDIO_RTC] = "VDDIO_RTC",
+ [VDDIO_SD0] = "VDDIO_SD0",
+ [VDDIO_SD1] = "VDDIO_SD1",
+ [VDDIO_VIVO] = "VDDIO_VIVO",
+};
+
+static int cv1812h_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 79000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 60000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 60000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int cv1812h_get_pull_down(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 87000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 61000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 62000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 cv1812h_1v8_oc_map[] = {
+ 12800,
+ 25300,
+ 37400,
+ 49000
+};
+
+static const u32 cv1812h_18od33_1v8_oc_map[] = {
+ 7800,
+ 11700,
+ 15500,
+ 19200,
+ 23000,
+ 26600,
+ 30200,
+ 33700
+};
+
+static const u32 cv1812h_18od33_3v3_oc_map[] = {
+ 5500,
+ 8200,
+ 10800,
+ 13400,
+ 16100,
+ 18700,
+ 21200,
+ 23700
+};
+
+static const u32 cv1812h_eth_oc_map[] = {
+ 15700,
+ 17800
+};
+
+static int cv1812h_get_oc_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = cv1812h_1v8_oc_map;
+ return ARRAY_SIZE(cv1812h_1v8_oc_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = cv1812h_18od33_1v8_oc_map;
+ return ARRAY_SIZE(cv1812h_18od33_1v8_oc_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = cv1812h_18od33_3v3_oc_map;
+ return ARRAY_SIZE(cv1812h_18od33_3v3_oc_map);
+ }
+ }
+
+ if (type == IO_TYPE_ETH) {
+ *map = cv1812h_eth_oc_map;
+ return ARRAY_SIZE(cv1812h_eth_oc_map);
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 cv1812h_1v8_schmitt_map[] = {
+ 0,
+ 970000,
+ 1040000
+};
+
+static const u32 cv1812h_18od33_1v8_schmitt_map[] = {
+ 0,
+ 1070000
+};
+
+static const u32 cv1812h_18od33_3v3_schmitt_map[] = {
+ 0,
+ 1100000
+};
+
+static int cv1812h_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = cv1812h_1v8_schmitt_map;
+ return ARRAY_SIZE(cv1812h_1v8_schmitt_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = cv1812h_18od33_1v8_schmitt_map;
+ return ARRAY_SIZE(cv1812h_18od33_1v8_schmitt_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = cv1812h_18od33_3v3_schmitt_map;
+ return ARRAY_SIZE(cv1812h_18od33_3v3_schmitt_map);
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static const struct cv1800_vddio_cfg_ops cv1812h_vddio_cfg_ops = {
+ .get_pull_up = cv1812h_get_pull_up,
+ .get_pull_down = cv1812h_get_pull_down,
+ .get_oc_map = cv1812h_get_oc_map,
+ .get_schmitt_map = cv1812h_get_schmitt_map,
+};
+
+static const struct pinctrl_pin_desc cv1812h_pins[] = {
+ PINCTRL_PIN(PIN_MIPI_TXM4, "MIPI_TXM4"),
+ PINCTRL_PIN(PIN_MIPIRX0N, "MIPIRX0N"),
+ PINCTRL_PIN(PIN_MIPIRX3P, "MIPIRX3P"),
+ PINCTRL_PIN(PIN_MIPIRX4P, "MIPIRX4P"),
+ PINCTRL_PIN(PIN_VIVO_D2, "VIVO_D2"),
+ PINCTRL_PIN(PIN_VIVO_D3, "VIVO_D3"),
+ PINCTRL_PIN(PIN_VIVO_D10, "VIVO_D10"),
+ PINCTRL_PIN(PIN_USB_VBUS_DET, "USB_VBUS_DET"),
+ PINCTRL_PIN(PIN_MIPI_TXP3, "MIPI_TXP3"),
+ PINCTRL_PIN(PIN_MIPI_TXM3, "MIPI_TXM3"),
+ PINCTRL_PIN(PIN_MIPI_TXP4, "MIPI_TXP4"),
+ PINCTRL_PIN(PIN_MIPIRX0P, "MIPIRX0P"),
+ PINCTRL_PIN(PIN_MIPIRX1N, "MIPIRX1N"),
+ PINCTRL_PIN(PIN_MIPIRX2N, "MIPIRX2N"),
+ PINCTRL_PIN(PIN_MIPIRX4N, "MIPIRX4N"),
+ PINCTRL_PIN(PIN_MIPIRX5N, "MIPIRX5N"),
+ PINCTRL_PIN(PIN_VIVO_D1, "VIVO_D1"),
+ PINCTRL_PIN(PIN_VIVO_D5, "VIVO_D5"),
+ PINCTRL_PIN(PIN_VIVO_D7, "VIVO_D7"),
+ PINCTRL_PIN(PIN_VIVO_D9, "VIVO_D9"),
+ PINCTRL_PIN(PIN_USB_ID, "USB_ID"),
+ PINCTRL_PIN(PIN_ETH_RXM, "ETH_RXM"),
+ PINCTRL_PIN(PIN_MIPI_TXP2, "MIPI_TXP2"),
+ PINCTRL_PIN(PIN_MIPI_TXM2, "MIPI_TXM2"),
+ PINCTRL_PIN(PIN_CAM_PD0, "CAM_PD0"),
+ PINCTRL_PIN(PIN_CAM_MCLK0, "CAM_MCLK0"),
+ PINCTRL_PIN(PIN_MIPIRX1P, "MIPIRX1P"),
+ PINCTRL_PIN(PIN_MIPIRX2P, "MIPIRX2P"),
+ PINCTRL_PIN(PIN_MIPIRX3N, "MIPIRX3N"),
+ PINCTRL_PIN(PIN_MIPIRX5P, "MIPIRX5P"),
+ PINCTRL_PIN(PIN_VIVO_CLK, "VIVO_CLK"),
+ PINCTRL_PIN(PIN_VIVO_D6, "VIVO_D6"),
+ PINCTRL_PIN(PIN_VIVO_D8, "VIVO_D8"),
+ PINCTRL_PIN(PIN_USB_VBUS_EN, "USB_VBUS_EN"),
+ PINCTRL_PIN(PIN_ETH_RXP, "ETH_RXP"),
+ PINCTRL_PIN(PIN_GPIO_RTX, "GPIO_RTX"),
+ PINCTRL_PIN(PIN_MIPI_TXP1, "MIPI_TXP1"),
+ PINCTRL_PIN(PIN_MIPI_TXM1, "MIPI_TXM1"),
+ PINCTRL_PIN(PIN_CAM_MCLK1, "CAM_MCLK1"),
+ PINCTRL_PIN(PIN_IIC3_SCL, "IIC3_SCL"),
+ PINCTRL_PIN(PIN_VIVO_D4, "VIVO_D4"),
+ PINCTRL_PIN(PIN_ETH_TXM, "ETH_TXM"),
+ PINCTRL_PIN(PIN_ETH_TXP, "ETH_TXP"),
+ PINCTRL_PIN(PIN_MIPI_TXP0, "MIPI_TXP0"),
+ PINCTRL_PIN(PIN_MIPI_TXM0, "MIPI_TXM0"),
+ PINCTRL_PIN(PIN_CAM_PD1, "CAM_PD1"),
+ PINCTRL_PIN(PIN_CAM_RST0, "CAM_RST0"),
+ PINCTRL_PIN(PIN_VIVO_D0, "VIVO_D0"),
+ PINCTRL_PIN(PIN_ADC1, "ADC1"),
+ PINCTRL_PIN(PIN_ADC2, "ADC2"),
+ PINCTRL_PIN(PIN_ADC3, "ADC3"),
+ PINCTRL_PIN(PIN_AUD_AOUTL, "AUD_AOUTL"),
+ PINCTRL_PIN(PIN_IIC3_SDA, "IIC3_SDA"),
+ PINCTRL_PIN(PIN_SD1_D2, "SD1_D2"),
+ PINCTRL_PIN(PIN_AUD_AOUTR, "AUD_AOUTR"),
+ PINCTRL_PIN(PIN_SD1_D3, "SD1_D3"),
+ PINCTRL_PIN(PIN_SD1_CLK, "SD1_CLK"),
+ PINCTRL_PIN(PIN_SD1_CMD, "SD1_CMD"),
+ PINCTRL_PIN(PIN_AUD_AINL_MIC, "AUD_AINL_MIC"),
+ PINCTRL_PIN(PIN_RSTN, "RSTN"),
+ PINCTRL_PIN(PIN_PWM0_BUCK, "PWM0_BUCK"),
+ PINCTRL_PIN(PIN_SD1_D1, "SD1_D1"),
+ PINCTRL_PIN(PIN_SD1_D0, "SD1_D0"),
+ PINCTRL_PIN(PIN_AUD_AINR_MIC, "AUD_AINR_MIC"),
+ PINCTRL_PIN(PIN_IIC2_SCL, "IIC2_SCL"),
+ PINCTRL_PIN(PIN_IIC2_SDA, "IIC2_SDA"),
+ PINCTRL_PIN(PIN_SD0_CD, "SD0_CD"),
+ PINCTRL_PIN(PIN_SD0_D1, "SD0_D1"),
+ PINCTRL_PIN(PIN_UART2_RX, "UART2_RX"),
+ PINCTRL_PIN(PIN_UART2_CTS, "UART2_CTS"),
+ PINCTRL_PIN(PIN_UART2_TX, "UART2_TX"),
+ PINCTRL_PIN(PIN_SD0_CLK, "SD0_CLK"),
+ PINCTRL_PIN(PIN_SD0_D0, "SD0_D0"),
+ PINCTRL_PIN(PIN_SD0_CMD, "SD0_CMD"),
+ PINCTRL_PIN(PIN_CLK32K, "CLK32K"),
+ PINCTRL_PIN(PIN_UART2_RTS, "UART2_RTS"),
+ PINCTRL_PIN(PIN_SD0_D3, "SD0_D3"),
+ PINCTRL_PIN(PIN_SD0_D2, "SD0_D2"),
+ PINCTRL_PIN(PIN_UART0_RX, "UART0_RX"),
+ PINCTRL_PIN(PIN_UART0_TX, "UART0_TX"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TRST, "JTAG_CPU_TRST"),
+ PINCTRL_PIN(PIN_PWR_ON, "PWR_ON"),
+ PINCTRL_PIN(PIN_PWR_GPIO2, "PWR_GPIO2"),
+ PINCTRL_PIN(PIN_PWR_GPIO0, "PWR_GPIO0"),
+ PINCTRL_PIN(PIN_CLK25M, "CLK25M"),
+ PINCTRL_PIN(PIN_SD0_PWR_EN, "SD0_PWR_EN"),
+ PINCTRL_PIN(PIN_SPK_EN, "SPK_EN"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TCK, "JTAG_CPU_TCK"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TMS, "JTAG_CPU_TMS"),
+ PINCTRL_PIN(PIN_PWR_WAKEUP1, "PWR_WAKEUP1"),
+ PINCTRL_PIN(PIN_PWR_WAKEUP0, "PWR_WAKEUP0"),
+ PINCTRL_PIN(PIN_PWR_GPIO1, "PWR_GPIO1"),
+ PINCTRL_PIN(PIN_EMMC_DAT3, "EMMC_DAT3"),
+ PINCTRL_PIN(PIN_EMMC_DAT0, "EMMC_DAT0"),
+ PINCTRL_PIN(PIN_EMMC_DAT2, "EMMC_DAT2"),
+ PINCTRL_PIN(PIN_EMMC_RSTN, "EMMC_RSTN"),
+ PINCTRL_PIN(PIN_AUX0, "AUX0"),
+ PINCTRL_PIN(PIN_IIC0_SDA, "IIC0_SDA"),
+ PINCTRL_PIN(PIN_PWR_SEQ3, "PWR_SEQ3"),
+ PINCTRL_PIN(PIN_PWR_VBAT_DET, "PWR_VBAT_DET"),
+ PINCTRL_PIN(PIN_PWR_SEQ1, "PWR_SEQ1"),
+ PINCTRL_PIN(PIN_PWR_BUTTON1, "PWR_BUTTON1"),
+ PINCTRL_PIN(PIN_EMMC_DAT1, "EMMC_DAT1"),
+ PINCTRL_PIN(PIN_EMMC_CMD, "EMMC_CMD"),
+ PINCTRL_PIN(PIN_EMMC_CLK, "EMMC_CLK"),
+ PINCTRL_PIN(PIN_IIC0_SCL, "IIC0_SCL"),
+ PINCTRL_PIN(PIN_GPIO_ZQ, "GPIO_ZQ"),
+ PINCTRL_PIN(PIN_PWR_RSTN, "PWR_RSTN"),
+ PINCTRL_PIN(PIN_PWR_SEQ2, "PWR_SEQ2"),
+ PINCTRL_PIN(PIN_XTAL_XIN, "XTAL_XIN"),
+};
+
+static const struct cv1800_pin cv1812h_pin_data[ARRAY_SIZE(cv1812h_pins)] = {
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM4, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x194, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc60),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x18c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc58),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x178, 7,
+ CV1800_PINCONF_AREA_SYS, 0x118, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc44),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x170, 7,
+ CV1800_PINCONF_AREA_SYS, 0x11c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc3c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D2, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x154, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc20),
+ CV1800_GENERAL_PIN(PIN_VIVO_D3, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x150, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc1c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D10, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x134, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc00),
+ CV1800_GENERAL_PIN(PIN_USB_VBUS_DET, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x108, 5,
+ CV1800_PINCONF_AREA_SYS, 0x820),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP3, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc6c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM3, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x19c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc68),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP4, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x198, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc64),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x190, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc5c),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x184, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc50),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x17c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc48),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x16c, 7,
+ CV1800_PINCONF_AREA_SYS, 0x120, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc38),
+ CV1800_GENERAL_PIN(PIN_MIPIRX5N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x164, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc30),
+ CV1800_GENERAL_PIN(PIN_VIVO_D1, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x158, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc24),
+ CV1800_GENERAL_PIN(PIN_VIVO_D5, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x148, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc14),
+ CV1800_GENERAL_PIN(PIN_VIVO_D7, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x140, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc0c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D9, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x138, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc04),
+ CV1800_GENERAL_PIN(PIN_USB_ID, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0fc, 3,
+ CV1800_PINCONF_AREA_SYS, 0x814),
+ CV1800_FUNC_PIN(PIN_ETH_RXM, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x130, 7),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP2, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc74),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM2, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc70),
+ CV1800_GENERAL_PIN(PIN_CAM_PD0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x004, 4,
+ CV1800_PINCONF_AREA_SYS, 0xb04),
+ CV1800_GENERAL_PIN(PIN_CAM_MCLK0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x000, 3,
+ CV1800_PINCONF_AREA_SYS, 0xb00),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x188, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc54),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x180, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc4c),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x174, 7,
+ CV1800_PINCONF_AREA_SYS, 0x114, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc40),
+ CV1800_GENERAL_PIN(PIN_MIPIRX5P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x168, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc34),
+ CV1800_GENERAL_PIN(PIN_VIVO_CLK, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x160, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc2c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D6, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x144, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc10),
+ CV1800_GENERAL_PIN(PIN_VIVO_D8, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x13c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc08),
+ CV1800_GENERAL_PIN(PIN_USB_VBUS_EN, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x100, 3,
+ CV1800_PINCONF_AREA_SYS, 0x818),
+ CV1800_FUNC_PIN(PIN_ETH_RXP, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x12c, 7),
+ CV1800_GENERAL_PIN(PIN_GPIO_RTX, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1cc, 5,
+ CV1800_PINCONF_AREA_SYS, 0xc8c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc7c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1ac, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc78),
+ CV1800_GENERAL_PIN(PIN_CAM_MCLK1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x00c, 4,
+ CV1800_PINCONF_AREA_SYS, 0xb0c),
+ CV1800_GENERAL_PIN(PIN_IIC3_SCL, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x014, 3,
+ CV1800_PINCONF_AREA_SYS, 0xb14),
+ CV1800_GENERAL_PIN(PIN_VIVO_D4, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x14c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc18),
+ CV1800_FUNC_PIN(PIN_ETH_TXM, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x128, 7),
+ CV1800_FUNC_PIN(PIN_ETH_TXP, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x124, 7),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc84),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc80),
+ CV1800_GENERAL_PIN(PIN_CAM_PD1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x010, 6,
+ CV1800_PINCONF_AREA_SYS, 0xb10),
+ CV1800_GENERAL_PIN(PIN_CAM_RST0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x008, 6,
+ CV1800_PINCONF_AREA_SYS, 0xb08),
+ CV1800_GENERAL_PIN(PIN_VIVO_D0, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x15c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc28),
+ CV1800_GENERAL_PIN(PIN_ADC1, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f8, 4,
+ CV1800_PINCONF_AREA_SYS, 0x810),
+ CV1800_GENERAL_PIN(PIN_ADC2, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f4, 7,
+ CV1800_PINCONF_AREA_SYS, 0x80c),
+ CV1800_GENERAL_PIN(PIN_ADC3, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f0, 7,
+ CV1800_PINCONF_AREA_SYS, 0x808),
+ CV1800_FUNC_PIN(PIN_AUD_AOUTL, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c4, 5),
+ CV1800_GENERAL_PIN(PIN_IIC3_SDA, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x018, 3,
+ CV1800_PINCONF_AREA_SYS, 0xb18),
+ CV1800_GENERAL_PIN(PIN_SD1_D2, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x05c),
+ CV1800_FUNC_PIN(PIN_AUD_AOUTR, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c8, 6),
+ CV1800_GENERAL_PIN(PIN_SD1_D3, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x058),
+ CV1800_GENERAL_PIN(PIN_SD1_CLK, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0e4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x06c),
+ CV1800_GENERAL_PIN(PIN_SD1_CMD, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0e0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x068),
+ CV1800_FUNC_PIN(PIN_AUD_AINL_MIC, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1bc, 5),
+ CV1800_GENERAL_PIN(PIN_RSTN, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0e8, 0,
+ CV1800_PINCONF_AREA_SYS, 0x800),
+ CV1800_GENERAL_PIN(PIN_PWM0_BUCK, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ec, 3,
+ CV1800_PINCONF_AREA_SYS, 0x804),
+ CV1800_GENERAL_PIN(PIN_SD1_D1, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x060),
+ CV1800_GENERAL_PIN(PIN_SD1_D0, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0dc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x064),
+ CV1800_FUNC_PIN(PIN_AUD_AINR_MIC, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c0, 6),
+ CV1800_GENERAL_PIN(PIN_IIC2_SCL, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0b8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x040),
+ CV1800_GENERAL_PIN(PIN_IIC2_SDA, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0bc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x044),
+ CV1800_GENERAL_PIN(PIN_SD0_CD, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x034, 3,
+ CV1800_PINCONF_AREA_SYS, 0x900),
+ CV1800_GENERAL_PIN(PIN_SD0_D1, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x028, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa0c),
+ CV1800_GENERAL_PIN(PIN_UART2_RX, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0c8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x050),
+ CV1800_GENERAL_PIN(PIN_UART2_CTS, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0cc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x054),
+ CV1800_GENERAL_PIN(PIN_UART2_TX, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0c0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x048),
+ CV1800_GENERAL_PIN(PIN_SD0_CLK, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x01c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa00),
+ CV1800_GENERAL_PIN(PIN_SD0_D0, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x024, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa08),
+ CV1800_GENERAL_PIN(PIN_SD0_CMD, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x020, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa04),
+ CV1800_GENERAL_PIN(PIN_CLK32K, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0b0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x038),
+ CV1800_GENERAL_PIN(PIN_UART2_RTS, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0c4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x04c),
+ CV1800_GENERAL_PIN(PIN_SD0_D3, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x030, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa14),
+ CV1800_GENERAL_PIN(PIN_SD0_D2, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x02c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa10),
+ CV1800_GENERAL_PIN(PIN_UART0_RX, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x044, 7,
+ CV1800_PINCONF_AREA_SYS, 0x910),
+ CV1800_GENERAL_PIN(PIN_UART0_TX, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x040, 7,
+ CV1800_PINCONF_AREA_SYS, 0x90c),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TRST, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x06c, 6,
+ CV1800_PINCONF_AREA_SYS, 0x938),
+ CV1800_GENERAL_PIN(PIN_PWR_ON, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x09c, 7,
+ CV1800_PINCONF_AREA_RTC, 0x024),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ac, 7,
+ CV1800_PINCONF_AREA_RTC, 0x034),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO0, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a4, 4,
+ CV1800_PINCONF_AREA_RTC, 0x02c),
+ CV1800_GENERAL_PIN(PIN_CLK25M, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0b4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x03c),
+ CV1800_GENERAL_PIN(PIN_SD0_PWR_EN, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x038, 3,
+ CV1800_PINCONF_AREA_SYS, 0x904),
+ CV1800_GENERAL_PIN(PIN_SPK_EN, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x03c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x908),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TCK, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x068, 7,
+ CV1800_PINCONF_AREA_SYS, 0x934),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TMS, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x064, 7,
+ CV1800_PINCONF_AREA_SYS, 0x930),
+ CV1800_GENERAL_PIN(PIN_PWR_WAKEUP1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x094, 7,
+ CV1800_PINCONF_AREA_RTC, 0x01c),
+ CV1800_GENERAL_PIN(PIN_PWR_WAKEUP0, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x090, 7,
+ CV1800_PINCONF_AREA_RTC, 0x018),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x030),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT3, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x058, 3,
+ CV1800_PINCONF_AREA_SYS, 0x924),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT0, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x054, 3,
+ CV1800_PINCONF_AREA_SYS, 0x920),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT2, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x04c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x918),
+ CV1800_GENERAL_PIN(PIN_EMMC_RSTN, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x048, 4,
+ CV1800_PINCONF_AREA_SYS, 0x914),
+ CV1800_GENERAL_PIN(PIN_AUX0, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x078, 7,
+ CV1800_PINCONF_AREA_SYS, 0x944),
+ CV1800_GENERAL_PIN(PIN_IIC0_SDA, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x074, 7,
+ CV1800_PINCONF_AREA_SYS, 0x940),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ3, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x08c, 3,
+ CV1800_PINCONF_AREA_RTC, 0x010),
+ CV1800_GENERAL_PIN(PIN_PWR_VBAT_DET, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x07c, 0,
+ CV1800_PINCONF_AREA_RTC, 0x000),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x084, 3,
+ CV1800_PINCONF_AREA_RTC, 0x008),
+ CV1800_GENERAL_PIN(PIN_PWR_BUTTON1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x098, 7,
+ CV1800_PINCONF_AREA_RTC, 0x020),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT1, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x060, 3,
+ CV1800_PINCONF_AREA_SYS, 0x92c),
+ CV1800_GENERAL_PIN(PIN_EMMC_CMD, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x05c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x928),
+ CV1800_GENERAL_PIN(PIN_EMMC_CLK, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x050, 3,
+ CV1800_PINCONF_AREA_SYS, 0x91c),
+ CV1800_GENERAL_PIN(PIN_IIC0_SCL, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x070, 7,
+ CV1800_PINCONF_AREA_SYS, 0x93c),
+ CV1800_GENERAL_PIN(PIN_GPIO_ZQ, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1d0, 4,
+ CV1800_PINCONF_AREA_RTC, 0x0e0),
+ CV1800_GENERAL_PIN(PIN_PWR_RSTN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x080, 0,
+ CV1800_PINCONF_AREA_RTC, 0x004),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x088, 3,
+ CV1800_PINCONF_AREA_RTC, 0x00c),
+ CV1800_GENERAL_PIN(PIN_XTAL_XIN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a0, 0,
+ CV1800_PINCONF_AREA_RTC, 0x028),
+};
+
+static const struct cv1800_pinctrl_data cv1812h_pindata = {
+ .pins = cv1812h_pins,
+ .pindata = cv1812h_pin_data,
+ .pdnames = cv1812h_power_domain_desc,
+ .vddio_ops = &cv1812h_vddio_cfg_ops,
+ .npins = ARRAY_SIZE(cv1812h_pins),
+ .npd = ARRAY_SIZE(cv1812h_power_domain_desc),
+};
+
+static const struct of_device_id cv1812h_pinctrl_ids[] = {
+ { .compatible = "sophgo,cv1812h-pinctrl", .data = &cv1812h_pindata },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cv1812h_pinctrl_ids);
+
+static struct platform_driver cv1812h_pinctrl_driver = {
+ .probe = cv1800_pinctrl_probe,
+ .driver = {
+ .name = "cv1812h-pinctrl",
+ .suppress_bind_attrs = true,
+ .of_match_table = cv1812h_pinctrl_ids,
+ },
+};
+module_platform_driver(cv1812h_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for the CV1812H series SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv18xx.c b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c
new file mode 100644
index 000000000000..d18fc5aa84f7
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c
@@ -0,0 +1,765 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo CV18XX SoCs pinctrl driver.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/bsearch.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+#include "pinctrl-cv18xx.h"
+
+struct cv1800_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctl_dev;
+ const struct cv1800_pinctrl_data *data;
+ struct pinctrl_desc pdesc;
+ u32 *power_cfg;
+
+ struct mutex mutex;
+ raw_spinlock_t lock;
+
+ void __iomem *regs[2];
+};
+
+struct cv1800_pin_mux_config {
+ struct cv1800_pin *pin;
+ u32 config;
+};
+
+static unsigned int cv1800_dt_get_pin(u32 value)
+{
+ return value & GENMASK(15, 0);
+}
+
+static unsigned int cv1800_dt_get_pin_mux(u32 value)
+{
+ return (value >> 16) & GENMASK(7, 0);
+}
+
+static unsigned int cv1800_dt_get_pin_mux2(u32 value)
+{
+ return (value >> 24) & GENMASK(7, 0);
+}
+
+#define cv1800_pinctrl_get_component_addr(pctrl, _comp) \
+ ((pctrl)->regs[(_comp)->area] + (_comp)->offset)
+
+static int cv1800_cmp_pin(const void *key, const void *pivot)
+{
+ const struct cv1800_pin *pin = pivot;
+ int pin_id = (long)key;
+ int pivid = pin->pin;
+
+ return pin_id - pivid;
+}
+
+static int cv1800_set_power_cfg(struct cv1800_pinctrl *pctrl,
+ u8 domain, u32 cfg)
+{
+ if (domain >= pctrl->data->npd)
+ return -ENOTSUPP;
+
+ if (pctrl->power_cfg[domain] && pctrl->power_cfg[domain] != cfg)
+ return -EINVAL;
+
+ pctrl->power_cfg[domain] = cfg;
+
+ return 0;
+}
+
+static int cv1800_get_power_cfg(struct cv1800_pinctrl *pctrl,
+ u8 domain)
+{
+ return pctrl->power_cfg[domain];
+}
+
+static struct cv1800_pin *cv1800_get_pin(struct cv1800_pinctrl *pctrl,
+ unsigned long pin)
+{
+ return bsearch((void *)pin, pctrl->data->pindata, pctrl->data->npins,
+ sizeof(struct cv1800_pin), cv1800_cmp_pin);
+}
+
+#define PIN_BGA_ID_OFFSET 8
+#define PIN_BGA_ID_MASK 0xff
+
+static const char *const io_type_desc[] = {
+ "1V8",
+ "18OD33",
+ "AUDIO",
+ "ETH"
+};
+
+static const char *cv1800_get_power_cfg_desc(struct cv1800_pinctrl *pctrl,
+ u8 domain)
+{
+ return pctrl->data->pdnames[domain];
+}
+
+static void cv1800_pctrl_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *seq, unsigned int pin_id)
+{
+ struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 value;
+ void __iomem *reg;
+
+ if (pin->pin >> PIN_BGA_ID_OFFSET)
+ seq_printf(seq, "pos: %c%u ",
+ 'A' + (pin->pin >> PIN_BGA_ID_OFFSET) - 1,
+ pin->pin & PIN_BGA_ID_MASK);
+ else
+ seq_printf(seq, "pos: %u ", pin->pin);
+
+ seq_printf(seq, "power-domain: %s ",
+ cv1800_get_power_cfg_desc(pctrl, pin->power_domain));
+ seq_printf(seq, "type: %s ", io_type_desc[type]);
+
+ reg = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux);
+ value = readl(reg);
+ seq_printf(seq, "mux: 0x%08x ", value);
+
+ if (pin->flags & CV1800_PIN_HAVE_MUX2) {
+ reg = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux2);
+ value = readl(reg);
+ seq_printf(seq, "mux2: 0x%08x ", value);
+ }
+
+ if (type == IO_TYPE_1V8_ONLY || type == IO_TYPE_1V8_OR_3V3) {
+ reg = cv1800_pinctrl_get_component_addr(pctrl, &pin->conf);
+ value = readl(reg);
+ seq_printf(seq, "conf: 0x%08x ", value);
+ }
+}
+
+static int cv1800_verify_pinmux_config(const struct cv1800_pin_mux_config *config)
+{
+ unsigned int mux = cv1800_dt_get_pin_mux(config->config);
+ unsigned int mux2 = cv1800_dt_get_pin_mux2(config->config);
+
+ if (mux > config->pin->mux.max)
+ return -EINVAL;
+
+ if (config->pin->flags & CV1800_PIN_HAVE_MUX2) {
+ if (mux != config->pin->mux2.pfunc)
+ return -EINVAL;
+
+ if (mux2 > config->pin->mux2.max)
+ return -EINVAL;
+ } else {
+ if (mux2 != PIN_MUX_INVALD)
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cv1800_verify_pin_group(const struct cv1800_pin_mux_config *mux,
+ unsigned long npins)
+{
+ enum cv1800_pin_io_type type;
+ u8 power_domain;
+ int i;
+
+ if (npins == 1)
+ return 0;
+
+ type = cv1800_pin_io_type(mux[0].pin);
+ power_domain = mux[0].pin->power_domain;
+
+ for (i = 0; i < npins; i++) {
+ if (type != cv1800_pin_io_type(mux[i].pin) ||
+ power_domain != mux[i].pin->power_domain)
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cv1800_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **maps,
+ unsigned int *num_maps)
+{
+ struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = pctrl->dev;
+ struct device_node *child;
+ struct pinctrl_map *map;
+ const char **grpnames;
+ const char *grpname;
+ int ngroups = 0;
+ int nmaps = 0;
+ int ret;
+
+ for_each_available_child_of_node(np, child)
+ ngroups += 1;
+
+ grpnames = devm_kcalloc(dev, ngroups, sizeof(*grpnames), GFP_KERNEL);
+ if (!grpnames)
+ return -ENOMEM;
+
+ map = devm_kcalloc(dev, ngroups * 2, sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ ngroups = 0;
+ mutex_lock(&pctrl->mutex);
+ for_each_available_child_of_node(np, child) {
+ int npins = of_property_count_u32_elems(child, "pinmux");
+ unsigned int *pins;
+ struct cv1800_pin_mux_config *pinmuxs;
+ u32 config, power;
+ int i;
+
+ if (npins < 1) {
+ dev_err(dev, "invalid pinctrl group %pOFn.%pOFn\n",
+ np, child);
+ ret = -EINVAL;
+ goto dt_failed;
+ }
+
+ grpname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn.%pOFn",
+ np, child);
+ if (!grpname) {
+ ret = -ENOMEM;
+ goto dt_failed;
+ }
+
+ grpnames[ngroups++] = grpname;
+
+ pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins) {
+ ret = -ENOMEM;
+ goto dt_failed;
+ }
+
+ pinmuxs = devm_kcalloc(dev, npins, sizeof(*pinmuxs), GFP_KERNEL);
+ if (!pinmuxs) {
+ ret = -ENOMEM;
+ goto dt_failed;
+ }
+
+ for (i = 0; i < npins; i++) {
+ ret = of_property_read_u32_index(child, "pinmux",
+ i, &config);
+ if (ret)
+ goto dt_failed;
+
+ pins[i] = cv1800_dt_get_pin(config);
+ pinmuxs[i].config = config;
+ pinmuxs[i].pin = cv1800_get_pin(pctrl, pins[i]);
+
+ if (!pinmuxs[i].pin) {
+ dev_err(dev, "failed to get pin %d\n", pins[i]);
+ ret = -ENODEV;
+ goto dt_failed;
+ }
+
+ ret = cv1800_verify_pinmux_config(&pinmuxs[i]);
+ if (ret) {
+ dev_err(dev, "group %s pin %d is invalid\n",
+ grpname, i);
+ goto dt_failed;
+ }
+ }
+
+ ret = cv1800_verify_pin_group(pinmuxs, npins);
+ if (ret) {
+ dev_err(dev, "group %s is invalid\n", grpname);
+ goto dt_failed;
+ }
+
+ ret = of_property_read_u32(child, "power-source", &power);
+ if (ret)
+ goto dt_failed;
+
+ if (!(power == PIN_POWER_STATE_3V3 || power == PIN_POWER_STATE_1V8)) {
+ dev_err(dev, "group %s have unsupported power: %u\n",
+ grpname, power);
+ ret = -ENOTSUPP;
+ goto dt_failed;
+ }
+
+ ret = cv1800_set_power_cfg(pctrl, pinmuxs[0].pin->power_domain,
+ power);
+ if (ret)
+ goto dt_failed;
+
+ map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
+ map[nmaps].data.mux.function = np->name;
+ map[nmaps].data.mux.group = grpname;
+ nmaps += 1;
+
+ ret = pinconf_generic_parse_dt_config(child, pctldev,
+ &map[nmaps].data.configs.configs,
+ &map[nmaps].data.configs.num_configs);
+ if (ret) {
+ dev_err(dev, "failed to parse pin config of group %s: %d\n",
+ grpname, ret);
+ goto dt_failed;
+ }
+
+ ret = pinctrl_generic_add_group(pctldev, grpname,
+ pins, npins, pinmuxs);
+ if (ret < 0) {
+ dev_err(dev, "failed to add group %s: %d\n", grpname, ret);
+ goto dt_failed;
+ }
+
+ /* don't create a map if there are no pinconf settings */
+ if (map[nmaps].data.configs.num_configs == 0)
+ continue;
+
+ map[nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ map[nmaps].data.configs.group_or_pin = grpname;
+ nmaps += 1;
+ }
+
+ ret = pinmux_generic_add_function(pctldev, np->name,
+ grpnames, ngroups, NULL);
+ if (ret < 0) {
+ dev_err(dev, "error adding function %s: %d\n", np->name, ret);
+ goto function_failed;
+ }
+
+ *maps = map;
+ *num_maps = nmaps;
+ mutex_unlock(&pctrl->mutex);
+
+ return 0;
+
+dt_failed:
+ of_node_put(child);
+function_failed:
+ pinctrl_utils_free_map(pctldev, map, nmaps);
+ mutex_unlock(&pctrl->mutex);
+ return ret;
+}
+
+static const struct pinctrl_ops cv1800_pctrl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .pin_dbg_show = cv1800_pctrl_dbg_show,
+ .dt_node_to_map = cv1800_pctrl_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int cv1800_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int fsel, unsigned int gsel)
+{
+ struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct group_desc *group;
+ const struct cv1800_pin_mux_config *configs;
+ unsigned int i;
+
+ group = pinctrl_generic_get_group(pctldev, gsel);
+ if (!group)
+ return -EINVAL;
+
+ configs = group->data;
+
+ for (i = 0; i < group->grp.npins; i++) {
+ const struct cv1800_pin *pin = configs[i].pin;
+ u32 value = configs[i].config;
+ void __iomem *reg_mux;
+ void __iomem *reg_mux2;
+ unsigned long flags;
+ u32 mux;
+ u32 mux2;
+
+ reg_mux = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux);
+ reg_mux2 = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux2);
+ mux = cv1800_dt_get_pin_mux(value);
+ mux2 = cv1800_dt_get_pin_mux2(value);
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ writel_relaxed(mux, reg_mux);
+ if (mux2 != PIN_MUX_INVALD)
+ writel_relaxed(mux2, reg_mux2);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops cv1800_pmx_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = cv1800_pmx_set_mux,
+ .strict = true,
+};
+
+#define PIN_IO_PULLUP BIT(2)
+#define PIN_IO_PULLDOWN BIT(3)
+#define PIN_IO_DRIVE GENMASK(7, 5)
+#define PIN_IO_SCHMITT GENMASK(9, 8)
+#define PIN_IO_BUS_HOLD BIT(10)
+#define PIN_IO_OUT_FAST_SLEW BIT(11)
+
+static u32 cv1800_pull_down_typical_resistor(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin)
+{
+ return pctrl->data->vddio_ops->get_pull_down(pin, pctrl->power_cfg);
+}
+
+static u32 cv1800_pull_up_typical_resistor(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin)
+{
+ return pctrl->data->vddio_ops->get_pull_up(pin, pctrl->power_cfg);
+}
+
+static int cv1800_pinctrl_oc2reg(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin, u32 target)
+{
+ const u32 *map;
+ int i, len;
+
+ len = pctrl->data->vddio_ops->get_oc_map(pin, pctrl->power_cfg, &map);
+ if (len < 0)
+ return len;
+
+ for (i = 0; i < len; i++) {
+ if (map[i] >= target)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int cv1800_pinctrl_reg2oc(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin, u32 reg)
+{
+ const u32 *map;
+ int len;
+
+ len = pctrl->data->vddio_ops->get_oc_map(pin, pctrl->power_cfg, &map);
+ if (len < 0)
+ return len;
+
+ if (reg >= len)
+ return -EINVAL;
+
+ return map[reg];
+}
+
+static int cv1800_pinctrl_schmitt2reg(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin, u32 target)
+{
+ const u32 *map;
+ int i, len;
+
+ len = pctrl->data->vddio_ops->get_schmitt_map(pin, pctrl->power_cfg,
+ &map);
+ if (len < 0)
+ return len;
+
+ for (i = 0; i < len; i++) {
+ if (map[i] == target)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int cv1800_pinctrl_reg2schmitt(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin, u32 reg)
+{
+ const u32 *map;
+ int len;
+
+ len = pctrl->data->vddio_ops->get_schmitt_map(pin, pctrl->power_cfg,
+ &map);
+ if (len < 0)
+ return len;
+
+ if (reg >= len)
+ return -EINVAL;
+
+ return map[reg];
+}
+
+static int cv1800_pconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin_id, unsigned long *config)
+{
+ struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ int param = pinconf_to_config_param(*config);
+ struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
+ enum cv1800_pin_io_type type;
+ u32 value;
+ u32 arg;
+ bool enabled;
+ int ret;
+
+ if (!pin)
+ return -EINVAL;
+
+ type = cv1800_pin_io_type(pin);
+ if (type == IO_TYPE_ETH || type == IO_TYPE_AUDIO)
+ return -ENOTSUPP;
+
+ value = readl(cv1800_pinctrl_get_component_addr(pctrl, &pin->conf));
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ enabled = FIELD_GET(PIN_IO_PULLDOWN, value);
+ arg = cv1800_pull_down_typical_resistor(pctrl, pin);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ enabled = FIELD_GET(PIN_IO_PULLUP, value);
+ arg = cv1800_pull_up_typical_resistor(pctrl, pin);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ enabled = true;
+ arg = FIELD_GET(PIN_IO_DRIVE, value);
+ ret = cv1800_pinctrl_reg2oc(pctrl, pin, arg);
+ if (ret < 0)
+ return ret;
+ arg = ret;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_UV:
+ arg = FIELD_GET(PIN_IO_SCHMITT, value);
+ ret = cv1800_pinctrl_reg2schmitt(pctrl, pin, arg);
+ if (ret < 0)
+ return ret;
+ arg = ret;
+ enabled = arg != 0;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ enabled = true;
+ arg = cv1800_get_power_cfg(pctrl, pin->power_domain);
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ enabled = true;
+ arg = FIELD_GET(PIN_IO_OUT_FAST_SLEW, value);
+ break;
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ arg = FIELD_GET(PIN_IO_BUS_HOLD, value);
+ enabled = arg != 0;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return enabled ? 0 : -EINVAL;
+}
+
+static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin,
+ unsigned long *configs,
+ unsigned int num_configs,
+ u32 *value)
+{
+ int i;
+ u32 v = 0;
+ enum cv1800_pin_io_type type;
+ int ret;
+
+ if (!pin)
+ return -EINVAL;
+
+ type = cv1800_pin_io_type(pin);
+ if (type == IO_TYPE_ETH || type == IO_TYPE_AUDIO)
+ return -ENOTSUPP;
+
+ for (i = 0; i < num_configs; i++) {
+ int param = pinconf_to_config_param(configs[i]);
+ u32 arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ v &= ~PIN_IO_PULLDOWN;
+ v |= FIELD_PREP(PIN_IO_PULLDOWN, arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ v &= ~PIN_IO_PULLUP;
+ v |= FIELD_PREP(PIN_IO_PULLUP, arg);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ ret = cv1800_pinctrl_oc2reg(pctrl, pin, arg);
+ if (ret < 0)
+ return ret;
+ v &= ~PIN_IO_DRIVE;
+ v |= FIELD_PREP(PIN_IO_DRIVE, ret);
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_UV:
+ ret = cv1800_pinctrl_schmitt2reg(pctrl, pin, arg);
+ if (ret < 0)
+ return ret;
+ v &= ~PIN_IO_SCHMITT;
+ v |= FIELD_PREP(PIN_IO_SCHMITT, ret);
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ /* Ignore power source as it is always fixed */
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ v &= ~PIN_IO_OUT_FAST_SLEW;
+ v |= FIELD_PREP(PIN_IO_OUT_FAST_SLEW, arg);
+ break;
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ v &= ~PIN_IO_BUS_HOLD;
+ v |= FIELD_PREP(PIN_IO_BUS_HOLD, arg);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ *value = v;
+
+ return 0;
+}
+
+static int cv1800_pin_set_config(struct cv1800_pinctrl *pctrl,
+ unsigned int pin_id,
+ u32 value)
+{
+ struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
+ unsigned long flags;
+ void __iomem *addr;
+
+ if (!pin)
+ return -EINVAL;
+
+ addr = cv1800_pinctrl_get_component_addr(pctrl, &pin->conf);
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ writel(value, addr);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static int cv1800_pconf_set(struct pinctrl_dev *pctldev,
+ unsigned int pin_id, unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
+ u32 value;
+
+ if (!pin)
+ return -ENODEV;
+
+ if (cv1800_pinconf_compute_config(pctrl, pin,
+ configs, num_configs, &value))
+ return -ENOTSUPP;
+
+ return cv1800_pin_set_config(pctrl, pin_id, value);
+}
+
+static int cv1800_pconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int gsel,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct group_desc *group;
+ const struct cv1800_pin_mux_config *pinmuxs;
+ u32 value;
+ int i;
+
+ group = pinctrl_generic_get_group(pctldev, gsel);
+ if (!group)
+ return -EINVAL;
+
+ pinmuxs = group->data;
+
+ if (cv1800_pinconf_compute_config(pctrl, pinmuxs[0].pin,
+ configs, num_configs, &value))
+ return -ENOTSUPP;
+
+ for (i = 0; i < group->grp.npins; i++)
+ cv1800_pin_set_config(pctrl, group->grp.pins[i], value);
+
+ return 0;
+}
+
+static const struct pinconf_ops cv1800_pconf_ops = {
+ .pin_config_get = cv1800_pconf_get,
+ .pin_config_set = cv1800_pconf_set,
+ .pin_config_group_set = cv1800_pconf_group_set,
+ .is_generic = true,
+};
+
+int cv1800_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cv1800_pinctrl *pctrl;
+ const struct cv1800_pinctrl_data *pctrl_data;
+ int ret;
+
+ pctrl_data = device_get_match_data(dev);
+ if (!pctrl_data)
+ return -ENODEV;
+
+ if (pctrl_data->npins == 0 || pctrl_data->npd == 0)
+ return dev_err_probe(dev, -EINVAL, "invalid pin data\n");
+
+ pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->power_cfg = devm_kcalloc(dev, pctrl_data->npd,
+ sizeof(u32), GFP_KERNEL);
+ if (!pctrl->power_cfg)
+ return -ENOMEM;
+
+ pctrl->regs[0] = devm_platform_ioremap_resource_byname(pdev, "sys");
+ if (IS_ERR(pctrl->regs[0]))
+ return PTR_ERR(pctrl->regs[0]);
+
+ pctrl->regs[1] = devm_platform_ioremap_resource_byname(pdev, "rtc");
+ if (IS_ERR(pctrl->regs[1]))
+ return PTR_ERR(pctrl->regs[1]);
+
+ pctrl->pdesc.name = dev_name(dev);
+ pctrl->pdesc.pins = pctrl_data->pins;
+ pctrl->pdesc.npins = pctrl_data->npins;
+ pctrl->pdesc.pctlops = &cv1800_pctrl_ops;
+ pctrl->pdesc.pmxops = &cv1800_pmx_ops;
+ pctrl->pdesc.confops = &cv1800_pconf_ops;
+ pctrl->pdesc.owner = THIS_MODULE;
+
+ pctrl->data = pctrl_data;
+ pctrl->dev = dev;
+ raw_spin_lock_init(&pctrl->lock);
+ mutex_init(&pctrl->mutex);
+
+ platform_set_drvdata(pdev, pctrl);
+
+ ret = devm_pinctrl_register_and_init(dev, &pctrl->pdesc,
+ pctrl, &pctrl->pctl_dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "fail to register pinctrl driver\n");
+
+ return pinctrl_enable(pctrl->pctl_dev);
+}
+EXPORT_SYMBOL_GPL(cv1800_pinctrl_probe);
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv18xx.h b/drivers/pinctrl/sophgo/pinctrl-cv18xx.h
new file mode 100644
index 000000000000..1a9998abb3b7
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-cv18xx.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#ifndef _PINCTRL_SOPHGO_CV18XX_H
+#define _PINCTRL_SOPHGO_CV18XX_H
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+
+enum cv1800_pin_io_type {
+ IO_TYPE_1V8_ONLY = 0,
+ IO_TYPE_1V8_OR_3V3 = 1,
+ IO_TYPE_AUDIO = 2,
+ IO_TYPE_ETH = 3
+};
+
+#define CV1800_PINCONF_AREA_SYS 0
+#define CV1800_PINCONF_AREA_RTC 1
+
+struct cv1800_pinmux {
+ u16 offset;
+ u8 area;
+ u8 max;
+};
+
+struct cv1800_pinmux2 {
+ u16 offset;
+ u8 area;
+ u8 max;
+ u8 pfunc;
+};
+
+struct cv1800_pinconf {
+ u16 offset;
+ u8 area;
+};
+
+#define CV1800_PIN_HAVE_MUX2 BIT(0)
+#define CV1800_PIN_IO_TYPE GENMASK(2, 1)
+
+#define CV1800_PIN_FLAG_IO_TYPE(type) \
+ FIELD_PREP_CONST(CV1800_PIN_IO_TYPE, type)
+struct cv1800_pin {
+ u16 pin;
+ u16 flags;
+ u8 power_domain;
+ struct cv1800_pinmux mux;
+ struct cv1800_pinmux2 mux2;
+ struct cv1800_pinconf conf;
+};
+
+#define PIN_POWER_STATE_1V8 1800
+#define PIN_POWER_STATE_3V3 3300
+
+/**
+ * struct cv1800_vddio_cfg_ops - pin vddio operations
+ *
+ * @get_pull_up: get resistor for pull up;
+ * @get_pull_down: get resistor for pull down.
+ * @get_oc_map: get mapping for typical low level output current value to
+ * register value map.
+ * @get_schmitt_map: get mapping for register value to typical schmitt
+ * threshold.
+ */
+struct cv1800_vddio_cfg_ops {
+ int (*get_pull_up)(struct cv1800_pin *pin, const u32 *psmap);
+ int (*get_pull_down)(struct cv1800_pin *pin, const u32 *psmap);
+ int (*get_oc_map)(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map);
+ int (*get_schmitt_map)(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map);
+};
+
+struct cv1800_pinctrl_data {
+ const struct pinctrl_pin_desc *pins;
+ const struct cv1800_pin *pindata;
+ const char * const *pdnames;
+ const struct cv1800_vddio_cfg_ops *vddio_ops;
+ u16 npins;
+ u16 npd;
+};
+
+static inline enum cv1800_pin_io_type cv1800_pin_io_type(struct cv1800_pin *pin)
+{
+ return FIELD_GET(CV1800_PIN_IO_TYPE, pin->flags);
+};
+
+int cv1800_pinctrl_probe(struct platform_device *pdev);
+
+#define CV1800_FUNC_PIN(_id, _power_domain, _type, \
+ _mux_area, _mux_offset, _mux_func_max) \
+ { \
+ .pin = (_id), \
+ .power_domain = (_power_domain), \
+ .flags = CV1800_PIN_FLAG_IO_TYPE(_type), \
+ .mux = { \
+ .area = (_mux_area), \
+ .offset = (_mux_offset), \
+ .max = (_mux_func_max), \
+ }, \
+ }
+
+#define CV1800_GENERAL_PIN(_id, _power_domain, _type, \
+ _mux_area, _mux_offset, _mux_func_max, \
+ _conf_area, _conf_offset) \
+ { \
+ .pin = (_id), \
+ .power_domain = (_power_domain), \
+ .flags = CV1800_PIN_FLAG_IO_TYPE(_type), \
+ .mux = { \
+ .area = (_mux_area), \
+ .offset = (_mux_offset), \
+ .max = (_mux_func_max), \
+ }, \
+ .conf = { \
+ .area = (_conf_area), \
+ .offset = (_conf_offset), \
+ }, \
+ }
+
+#define CV1800_GENERATE_PIN_MUX2(_id, _power_domain, _type, \
+ _mux_area, _mux_offset, _mux_func_max, \
+ _mux2_area, _mux2_offset, \
+ _mux2_func_max, \
+ _conf_area, _conf_offset) \
+ { \
+ .pin = (_id), \
+ .power_domain = (_power_domain), \
+ .flags = CV1800_PIN_FLAG_IO_TYPE(_type) | \
+ CV1800_PIN_HAVE_MUX2, \
+ .mux = { \
+ .area = (_mux_area), \
+ .offset = (_mux_offset), \
+ .max = (_mux_func_max), \
+ }, \
+ .mux2 = { \
+ .area = (_mux2_area), \
+ .offset = (_mux2_offset), \
+ .max = (_mux2_func_max), \
+ }, \
+ .conf = { \
+ .area = (_conf_area), \
+ .offset = (_conf_offset), \
+ }, \
+ }
+
+#endif
diff --git a/drivers/pinctrl/sophgo/pinctrl-sg2000.c b/drivers/pinctrl/sophgo/pinctrl-sg2000.c
new file mode 100644
index 000000000000..63c05b4dd68f
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-sg2000.c
@@ -0,0 +1,771 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2000 SoC pinctrl driver.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/pinctrl-sg2000.h>
+
+#include "pinctrl-cv18xx.h"
+
+enum SG2000_POWER_DOMAIN {
+ VDD18A_EPHY = 0,
+ VDD18A_MIPI = 1,
+ VDDIO18_1 = 2,
+ VDDIO_EMMC = 3,
+ VDDIO_RTC = 4,
+ VDDIO_SD0 = 5,
+ VDDIO_SD1 = 6,
+ VDDIO_VIVO = 7
+};
+
+static const char *const sg2000_power_domain_desc[] = {
+ [VDD18A_EPHY] = "VDD18A_EPHY",
+ [VDD18A_MIPI] = "VDD18A_MIPI",
+ [VDDIO18_1] = "VDDIO18_1",
+ [VDDIO_EMMC] = "VDDIO_EMMC",
+ [VDDIO_RTC] = "VDDIO_RTC",
+ [VDDIO_SD0] = "VDDIO_SD0",
+ [VDDIO_SD1] = "VDDIO_SD1",
+ [VDDIO_VIVO] = "VDDIO_VIVO",
+};
+
+static int sg2000_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 79000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 60000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 60000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int sg2000_get_pull_down(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 87000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 61000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 62000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 sg2000_1v8_oc_map[] = {
+ 12800,
+ 25300,
+ 37400,
+ 49000
+};
+
+static const u32 sg2000_18od33_1v8_oc_map[] = {
+ 7800,
+ 11700,
+ 15500,
+ 19200,
+ 23000,
+ 26600,
+ 30200,
+ 33700
+};
+
+static const u32 sg2000_18od33_3v3_oc_map[] = {
+ 5500,
+ 8200,
+ 10800,
+ 13400,
+ 16100,
+ 18700,
+ 21200,
+ 23700
+};
+
+static const u32 sg2000_eth_oc_map[] = {
+ 15700,
+ 17800
+};
+
+static int sg2000_get_oc_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = sg2000_1v8_oc_map;
+ return ARRAY_SIZE(sg2000_1v8_oc_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = sg2000_18od33_1v8_oc_map;
+ return ARRAY_SIZE(sg2000_18od33_1v8_oc_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = sg2000_18od33_3v3_oc_map;
+ return ARRAY_SIZE(sg2000_18od33_3v3_oc_map);
+ }
+ }
+
+ if (type == IO_TYPE_ETH) {
+ *map = sg2000_eth_oc_map;
+ return ARRAY_SIZE(sg2000_eth_oc_map);
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 sg2000_1v8_schmitt_map[] = {
+ 0,
+ 970000,
+ 1040000
+};
+
+static const u32 sg2000_18od33_1v8_schmitt_map[] = {
+ 0,
+ 1070000
+};
+
+static const u32 sg2000_18od33_3v3_schmitt_map[] = {
+ 0,
+ 1100000
+};
+
+static int sg2000_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = sg2000_1v8_schmitt_map;
+ return ARRAY_SIZE(sg2000_1v8_schmitt_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = sg2000_18od33_1v8_schmitt_map;
+ return ARRAY_SIZE(sg2000_18od33_1v8_schmitt_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = sg2000_18od33_3v3_schmitt_map;
+ return ARRAY_SIZE(sg2000_18od33_3v3_schmitt_map);
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static const struct cv1800_vddio_cfg_ops sg2000_vddio_cfg_ops = {
+ .get_pull_up = sg2000_get_pull_up,
+ .get_pull_down = sg2000_get_pull_down,
+ .get_oc_map = sg2000_get_oc_map,
+ .get_schmitt_map = sg2000_get_schmitt_map,
+};
+
+static const struct pinctrl_pin_desc sg2000_pins[] = {
+ PINCTRL_PIN(PIN_MIPI_TXM4, "MIPI_TXM4"),
+ PINCTRL_PIN(PIN_MIPIRX0N, "MIPIRX0N"),
+ PINCTRL_PIN(PIN_MIPIRX3P, "MIPIRX3P"),
+ PINCTRL_PIN(PIN_MIPIRX4P, "MIPIRX4P"),
+ PINCTRL_PIN(PIN_VIVO_D2, "VIVO_D2"),
+ PINCTRL_PIN(PIN_VIVO_D3, "VIVO_D3"),
+ PINCTRL_PIN(PIN_VIVO_D10, "VIVO_D10"),
+ PINCTRL_PIN(PIN_USB_VBUS_DET, "USB_VBUS_DET"),
+ PINCTRL_PIN(PIN_MIPI_TXP3, "MIPI_TXP3"),
+ PINCTRL_PIN(PIN_MIPI_TXM3, "MIPI_TXM3"),
+ PINCTRL_PIN(PIN_MIPI_TXP4, "MIPI_TXP4"),
+ PINCTRL_PIN(PIN_MIPIRX0P, "MIPIRX0P"),
+ PINCTRL_PIN(PIN_MIPIRX1N, "MIPIRX1N"),
+ PINCTRL_PIN(PIN_MIPIRX2N, "MIPIRX2N"),
+ PINCTRL_PIN(PIN_MIPIRX4N, "MIPIRX4N"),
+ PINCTRL_PIN(PIN_MIPIRX5N, "MIPIRX5N"),
+ PINCTRL_PIN(PIN_VIVO_D1, "VIVO_D1"),
+ PINCTRL_PIN(PIN_VIVO_D5, "VIVO_D5"),
+ PINCTRL_PIN(PIN_VIVO_D7, "VIVO_D7"),
+ PINCTRL_PIN(PIN_VIVO_D9, "VIVO_D9"),
+ PINCTRL_PIN(PIN_USB_ID, "USB_ID"),
+ PINCTRL_PIN(PIN_ETH_RXM, "ETH_RXM"),
+ PINCTRL_PIN(PIN_MIPI_TXP2, "MIPI_TXP2"),
+ PINCTRL_PIN(PIN_MIPI_TXM2, "MIPI_TXM2"),
+ PINCTRL_PIN(PIN_CAM_PD0, "CAM_PD0"),
+ PINCTRL_PIN(PIN_CAM_MCLK0, "CAM_MCLK0"),
+ PINCTRL_PIN(PIN_MIPIRX1P, "MIPIRX1P"),
+ PINCTRL_PIN(PIN_MIPIRX2P, "MIPIRX2P"),
+ PINCTRL_PIN(PIN_MIPIRX3N, "MIPIRX3N"),
+ PINCTRL_PIN(PIN_MIPIRX5P, "MIPIRX5P"),
+ PINCTRL_PIN(PIN_VIVO_CLK, "VIVO_CLK"),
+ PINCTRL_PIN(PIN_VIVO_D6, "VIVO_D6"),
+ PINCTRL_PIN(PIN_VIVO_D8, "VIVO_D8"),
+ PINCTRL_PIN(PIN_USB_VBUS_EN, "USB_VBUS_EN"),
+ PINCTRL_PIN(PIN_ETH_RXP, "ETH_RXP"),
+ PINCTRL_PIN(PIN_GPIO_RTX, "GPIO_RTX"),
+ PINCTRL_PIN(PIN_MIPI_TXP1, "MIPI_TXP1"),
+ PINCTRL_PIN(PIN_MIPI_TXM1, "MIPI_TXM1"),
+ PINCTRL_PIN(PIN_CAM_MCLK1, "CAM_MCLK1"),
+ PINCTRL_PIN(PIN_IIC3_SCL, "IIC3_SCL"),
+ PINCTRL_PIN(PIN_VIVO_D4, "VIVO_D4"),
+ PINCTRL_PIN(PIN_ETH_TXM, "ETH_TXM"),
+ PINCTRL_PIN(PIN_ETH_TXP, "ETH_TXP"),
+ PINCTRL_PIN(PIN_MIPI_TXP0, "MIPI_TXP0"),
+ PINCTRL_PIN(PIN_MIPI_TXM0, "MIPI_TXM0"),
+ PINCTRL_PIN(PIN_CAM_PD1, "CAM_PD1"),
+ PINCTRL_PIN(PIN_CAM_RST0, "CAM_RST0"),
+ PINCTRL_PIN(PIN_VIVO_D0, "VIVO_D0"),
+ PINCTRL_PIN(PIN_ADC1, "ADC1"),
+ PINCTRL_PIN(PIN_ADC2, "ADC2"),
+ PINCTRL_PIN(PIN_ADC3, "ADC3"),
+ PINCTRL_PIN(PIN_AUD_AOUTL, "AUD_AOUTL"),
+ PINCTRL_PIN(PIN_IIC3_SDA, "IIC3_SDA"),
+ PINCTRL_PIN(PIN_SD1_D2, "SD1_D2"),
+ PINCTRL_PIN(PIN_AUD_AOUTR, "AUD_AOUTR"),
+ PINCTRL_PIN(PIN_SD1_D3, "SD1_D3"),
+ PINCTRL_PIN(PIN_SD1_CLK, "SD1_CLK"),
+ PINCTRL_PIN(PIN_SD1_CMD, "SD1_CMD"),
+ PINCTRL_PIN(PIN_AUD_AINL_MIC, "AUD_AINL_MIC"),
+ PINCTRL_PIN(PIN_RSTN, "RSTN"),
+ PINCTRL_PIN(PIN_PWM0_BUCK, "PWM0_BUCK"),
+ PINCTRL_PIN(PIN_SD1_D1, "SD1_D1"),
+ PINCTRL_PIN(PIN_SD1_D0, "SD1_D0"),
+ PINCTRL_PIN(PIN_AUD_AINR_MIC, "AUD_AINR_MIC"),
+ PINCTRL_PIN(PIN_IIC2_SCL, "IIC2_SCL"),
+ PINCTRL_PIN(PIN_IIC2_SDA, "IIC2_SDA"),
+ PINCTRL_PIN(PIN_SD0_CD, "SD0_CD"),
+ PINCTRL_PIN(PIN_SD0_D1, "SD0_D1"),
+ PINCTRL_PIN(PIN_UART2_RX, "UART2_RX"),
+ PINCTRL_PIN(PIN_UART2_CTS, "UART2_CTS"),
+ PINCTRL_PIN(PIN_UART2_TX, "UART2_TX"),
+ PINCTRL_PIN(PIN_SD0_CLK, "SD0_CLK"),
+ PINCTRL_PIN(PIN_SD0_D0, "SD0_D0"),
+ PINCTRL_PIN(PIN_SD0_CMD, "SD0_CMD"),
+ PINCTRL_PIN(PIN_CLK32K, "CLK32K"),
+ PINCTRL_PIN(PIN_UART2_RTS, "UART2_RTS"),
+ PINCTRL_PIN(PIN_SD0_D3, "SD0_D3"),
+ PINCTRL_PIN(PIN_SD0_D2, "SD0_D2"),
+ PINCTRL_PIN(PIN_UART0_RX, "UART0_RX"),
+ PINCTRL_PIN(PIN_UART0_TX, "UART0_TX"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TRST, "JTAG_CPU_TRST"),
+ PINCTRL_PIN(PIN_PWR_ON, "PWR_ON"),
+ PINCTRL_PIN(PIN_PWR_GPIO2, "PWR_GPIO2"),
+ PINCTRL_PIN(PIN_PWR_GPIO0, "PWR_GPIO0"),
+ PINCTRL_PIN(PIN_CLK25M, "CLK25M"),
+ PINCTRL_PIN(PIN_SD0_PWR_EN, "SD0_PWR_EN"),
+ PINCTRL_PIN(PIN_SPK_EN, "SPK_EN"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TCK, "JTAG_CPU_TCK"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TMS, "JTAG_CPU_TMS"),
+ PINCTRL_PIN(PIN_PWR_WAKEUP1, "PWR_WAKEUP1"),
+ PINCTRL_PIN(PIN_PWR_WAKEUP0, "PWR_WAKEUP0"),
+ PINCTRL_PIN(PIN_PWR_GPIO1, "PWR_GPIO1"),
+ PINCTRL_PIN(PIN_EMMC_DAT3, "EMMC_DAT3"),
+ PINCTRL_PIN(PIN_EMMC_DAT0, "EMMC_DAT0"),
+ PINCTRL_PIN(PIN_EMMC_DAT2, "EMMC_DAT2"),
+ PINCTRL_PIN(PIN_EMMC_RSTN, "EMMC_RSTN"),
+ PINCTRL_PIN(PIN_AUX0, "AUX0"),
+ PINCTRL_PIN(PIN_IIC0_SDA, "IIC0_SDA"),
+ PINCTRL_PIN(PIN_PWR_SEQ3, "PWR_SEQ3"),
+ PINCTRL_PIN(PIN_PWR_VBAT_DET, "PWR_VBAT_DET"),
+ PINCTRL_PIN(PIN_PWR_SEQ1, "PWR_SEQ1"),
+ PINCTRL_PIN(PIN_PWR_BUTTON1, "PWR_BUTTON1"),
+ PINCTRL_PIN(PIN_EMMC_DAT1, "EMMC_DAT1"),
+ PINCTRL_PIN(PIN_EMMC_CMD, "EMMC_CMD"),
+ PINCTRL_PIN(PIN_EMMC_CLK, "EMMC_CLK"),
+ PINCTRL_PIN(PIN_IIC0_SCL, "IIC0_SCL"),
+ PINCTRL_PIN(PIN_GPIO_ZQ, "GPIO_ZQ"),
+ PINCTRL_PIN(PIN_PWR_RSTN, "PWR_RSTN"),
+ PINCTRL_PIN(PIN_PWR_SEQ2, "PWR_SEQ2"),
+ PINCTRL_PIN(PIN_XTAL_XIN, "XTAL_XIN"),
+};
+
+static const struct cv1800_pin sg2000_pin_data[ARRAY_SIZE(sg2000_pins)] = {
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM4, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x194, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc60),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x18c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc58),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x178, 7,
+ CV1800_PINCONF_AREA_SYS, 0x118, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc44),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x170, 7,
+ CV1800_PINCONF_AREA_SYS, 0x11c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc3c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D2, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x154, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc20),
+ CV1800_GENERAL_PIN(PIN_VIVO_D3, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x150, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc1c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D10, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x134, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc00),
+ CV1800_GENERAL_PIN(PIN_USB_VBUS_DET, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x108, 5,
+ CV1800_PINCONF_AREA_SYS, 0x820),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP3, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc6c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM3, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x19c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc68),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP4, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x198, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc64),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x190, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc5c),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x184, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc50),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x17c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc48),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x16c, 7,
+ CV1800_PINCONF_AREA_SYS, 0x120, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc38),
+ CV1800_GENERAL_PIN(PIN_MIPIRX5N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x164, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc30),
+ CV1800_GENERAL_PIN(PIN_VIVO_D1, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x158, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc24),
+ CV1800_GENERAL_PIN(PIN_VIVO_D5, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x148, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc14),
+ CV1800_GENERAL_PIN(PIN_VIVO_D7, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x140, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc0c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D9, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x138, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc04),
+ CV1800_GENERAL_PIN(PIN_USB_ID, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0fc, 3,
+ CV1800_PINCONF_AREA_SYS, 0x814),
+ CV1800_FUNC_PIN(PIN_ETH_RXM, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x130, 7),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP2, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc74),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM2, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc70),
+ CV1800_GENERAL_PIN(PIN_CAM_PD0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x004, 4,
+ CV1800_PINCONF_AREA_SYS, 0xb04),
+ CV1800_GENERAL_PIN(PIN_CAM_MCLK0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x000, 3,
+ CV1800_PINCONF_AREA_SYS, 0xb00),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x188, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc54),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x180, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc4c),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x174, 7,
+ CV1800_PINCONF_AREA_SYS, 0x114, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc40),
+ CV1800_GENERAL_PIN(PIN_MIPIRX5P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x168, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc34),
+ CV1800_GENERAL_PIN(PIN_VIVO_CLK, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x160, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc2c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D6, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x144, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc10),
+ CV1800_GENERAL_PIN(PIN_VIVO_D8, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x13c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc08),
+ CV1800_GENERAL_PIN(PIN_USB_VBUS_EN, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x100, 3,
+ CV1800_PINCONF_AREA_SYS, 0x818),
+ CV1800_FUNC_PIN(PIN_ETH_RXP, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x12c, 7),
+ CV1800_GENERAL_PIN(PIN_GPIO_RTX, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1cc, 5,
+ CV1800_PINCONF_AREA_SYS, 0xc8c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc7c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1ac, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc78),
+ CV1800_GENERAL_PIN(PIN_CAM_MCLK1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x00c, 4,
+ CV1800_PINCONF_AREA_SYS, 0xb0c),
+ CV1800_GENERAL_PIN(PIN_IIC3_SCL, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x014, 3,
+ CV1800_PINCONF_AREA_SYS, 0xb14),
+ CV1800_GENERAL_PIN(PIN_VIVO_D4, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x14c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc18),
+ CV1800_FUNC_PIN(PIN_ETH_TXM, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x128, 7),
+ CV1800_FUNC_PIN(PIN_ETH_TXP, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x124, 7),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc84),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc80),
+ CV1800_GENERAL_PIN(PIN_CAM_PD1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x010, 6,
+ CV1800_PINCONF_AREA_SYS, 0xb10),
+ CV1800_GENERAL_PIN(PIN_CAM_RST0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x008, 6,
+ CV1800_PINCONF_AREA_SYS, 0xb08),
+ CV1800_GENERAL_PIN(PIN_VIVO_D0, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x15c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc28),
+ CV1800_GENERAL_PIN(PIN_ADC1, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f8, 4,
+ CV1800_PINCONF_AREA_SYS, 0x810),
+ CV1800_GENERAL_PIN(PIN_ADC2, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f4, 7,
+ CV1800_PINCONF_AREA_SYS, 0x80c),
+ CV1800_GENERAL_PIN(PIN_ADC3, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f0, 7,
+ CV1800_PINCONF_AREA_SYS, 0x808),
+ CV1800_FUNC_PIN(PIN_AUD_AOUTL, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c4, 5),
+ CV1800_GENERAL_PIN(PIN_IIC3_SDA, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x018, 3,
+ CV1800_PINCONF_AREA_SYS, 0xb18),
+ CV1800_GENERAL_PIN(PIN_SD1_D2, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x05c),
+ CV1800_FUNC_PIN(PIN_AUD_AOUTR, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c8, 6),
+ CV1800_GENERAL_PIN(PIN_SD1_D3, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x058),
+ CV1800_GENERAL_PIN(PIN_SD1_CLK, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0e4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x06c),
+ CV1800_GENERAL_PIN(PIN_SD1_CMD, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0e0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x068),
+ CV1800_FUNC_PIN(PIN_AUD_AINL_MIC, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1bc, 5),
+ CV1800_GENERAL_PIN(PIN_RSTN, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0e8, 0,
+ CV1800_PINCONF_AREA_SYS, 0x800),
+ CV1800_GENERAL_PIN(PIN_PWM0_BUCK, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ec, 3,
+ CV1800_PINCONF_AREA_SYS, 0x804),
+ CV1800_GENERAL_PIN(PIN_SD1_D1, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x060),
+ CV1800_GENERAL_PIN(PIN_SD1_D0, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0dc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x064),
+ CV1800_FUNC_PIN(PIN_AUD_AINR_MIC, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c0, 6),
+ CV1800_GENERAL_PIN(PIN_IIC2_SCL, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0b8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x040),
+ CV1800_GENERAL_PIN(PIN_IIC2_SDA, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0bc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x044),
+ CV1800_GENERAL_PIN(PIN_SD0_CD, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x034, 3,
+ CV1800_PINCONF_AREA_SYS, 0x900),
+ CV1800_GENERAL_PIN(PIN_SD0_D1, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x028, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa0c),
+ CV1800_GENERAL_PIN(PIN_UART2_RX, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0c8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x050),
+ CV1800_GENERAL_PIN(PIN_UART2_CTS, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0cc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x054),
+ CV1800_GENERAL_PIN(PIN_UART2_TX, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0c0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x048),
+ CV1800_GENERAL_PIN(PIN_SD0_CLK, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x01c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa00),
+ CV1800_GENERAL_PIN(PIN_SD0_D0, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x024, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa08),
+ CV1800_GENERAL_PIN(PIN_SD0_CMD, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x020, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa04),
+ CV1800_GENERAL_PIN(PIN_CLK32K, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0b0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x038),
+ CV1800_GENERAL_PIN(PIN_UART2_RTS, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0c4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x04c),
+ CV1800_GENERAL_PIN(PIN_SD0_D3, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x030, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa14),
+ CV1800_GENERAL_PIN(PIN_SD0_D2, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x02c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa10),
+ CV1800_GENERAL_PIN(PIN_UART0_RX, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x044, 7,
+ CV1800_PINCONF_AREA_SYS, 0x910),
+ CV1800_GENERAL_PIN(PIN_UART0_TX, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x040, 7,
+ CV1800_PINCONF_AREA_SYS, 0x90c),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TRST, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x06c, 6,
+ CV1800_PINCONF_AREA_SYS, 0x938),
+ CV1800_GENERAL_PIN(PIN_PWR_ON, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x09c, 7,
+ CV1800_PINCONF_AREA_RTC, 0x024),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ac, 7,
+ CV1800_PINCONF_AREA_RTC, 0x034),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO0, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a4, 4,
+ CV1800_PINCONF_AREA_RTC, 0x02c),
+ CV1800_GENERAL_PIN(PIN_CLK25M, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0b4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x03c),
+ CV1800_GENERAL_PIN(PIN_SD0_PWR_EN, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x038, 3,
+ CV1800_PINCONF_AREA_SYS, 0x904),
+ CV1800_GENERAL_PIN(PIN_SPK_EN, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x03c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x908),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TCK, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x068, 7,
+ CV1800_PINCONF_AREA_SYS, 0x934),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TMS, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x064, 7,
+ CV1800_PINCONF_AREA_SYS, 0x930),
+ CV1800_GENERAL_PIN(PIN_PWR_WAKEUP1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x094, 7,
+ CV1800_PINCONF_AREA_RTC, 0x01c),
+ CV1800_GENERAL_PIN(PIN_PWR_WAKEUP0, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x090, 7,
+ CV1800_PINCONF_AREA_RTC, 0x018),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x030),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT3, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x058, 3,
+ CV1800_PINCONF_AREA_SYS, 0x924),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT0, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x054, 3,
+ CV1800_PINCONF_AREA_SYS, 0x920),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT2, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x04c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x918),
+ CV1800_GENERAL_PIN(PIN_EMMC_RSTN, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x048, 4,
+ CV1800_PINCONF_AREA_SYS, 0x914),
+ CV1800_GENERAL_PIN(PIN_AUX0, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x078, 7,
+ CV1800_PINCONF_AREA_SYS, 0x944),
+ CV1800_GENERAL_PIN(PIN_IIC0_SDA, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x074, 7,
+ CV1800_PINCONF_AREA_SYS, 0x940),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ3, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x08c, 3,
+ CV1800_PINCONF_AREA_RTC, 0x010),
+ CV1800_GENERAL_PIN(PIN_PWR_VBAT_DET, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x07c, 0,
+ CV1800_PINCONF_AREA_RTC, 0x000),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x084, 3,
+ CV1800_PINCONF_AREA_RTC, 0x008),
+ CV1800_GENERAL_PIN(PIN_PWR_BUTTON1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x098, 7,
+ CV1800_PINCONF_AREA_RTC, 0x020),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT1, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x060, 3,
+ CV1800_PINCONF_AREA_SYS, 0x92c),
+ CV1800_GENERAL_PIN(PIN_EMMC_CMD, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x05c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x928),
+ CV1800_GENERAL_PIN(PIN_EMMC_CLK, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x050, 3,
+ CV1800_PINCONF_AREA_SYS, 0x91c),
+ CV1800_GENERAL_PIN(PIN_IIC0_SCL, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x070, 7,
+ CV1800_PINCONF_AREA_SYS, 0x93c),
+ CV1800_GENERAL_PIN(PIN_GPIO_ZQ, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1d0, 4,
+ CV1800_PINCONF_AREA_RTC, 0x0e0),
+ CV1800_GENERAL_PIN(PIN_PWR_RSTN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x080, 0,
+ CV1800_PINCONF_AREA_RTC, 0x004),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x088, 3,
+ CV1800_PINCONF_AREA_RTC, 0x00c),
+ CV1800_GENERAL_PIN(PIN_XTAL_XIN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a0, 0,
+ CV1800_PINCONF_AREA_RTC, 0x028),
+};
+
+static const struct cv1800_pinctrl_data sg2000_pindata = {
+ .pins = sg2000_pins,
+ .pindata = sg2000_pin_data,
+ .pdnames = sg2000_power_domain_desc,
+ .vddio_ops = &sg2000_vddio_cfg_ops,
+ .npins = ARRAY_SIZE(sg2000_pins),
+ .npd = ARRAY_SIZE(sg2000_power_domain_desc),
+};
+
+static const struct of_device_id sg2000_pinctrl_ids[] = {
+ { .compatible = "sophgo,sg2000-pinctrl", .data = &sg2000_pindata },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sg2000_pinctrl_ids);
+
+static struct platform_driver sg2000_pinctrl_driver = {
+ .probe = cv1800_pinctrl_probe,
+ .driver = {
+ .name = "sg2000-pinctrl",
+ .suppress_bind_attrs = true,
+ .of_match_table = sg2000_pinctrl_ids,
+ },
+};
+module_platform_driver(sg2000_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for the SG2000 series SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sophgo/pinctrl-sg2002.c b/drivers/pinctrl/sophgo/pinctrl-sg2002.c
new file mode 100644
index 000000000000..5c49208dcb59
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-sg2002.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2002 SoC pinctrl driver.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/pinctrl-sg2002.h>
+
+#include "pinctrl-cv18xx.h"
+
+enum SG2002_POWER_DOMAIN {
+ VDD18A_MIPI = 0,
+ VDD18A_USB_PLL_ETH = 1,
+ VDDIO_RTC = 2,
+ VDDIO_SD0_EMMC = 3,
+ VDDIO_SD1 = 4
+};
+
+static const char *const sg2002_power_domain_desc[] = {
+ [VDD18A_MIPI] = "VDD18A_MIPI",
+ [VDD18A_USB_PLL_ETH] = "VDD18A_USB_PLL_ETH",
+ [VDDIO_RTC] = "VDDIO_RTC",
+ [VDDIO_SD0_EMMC] = "VDDIO_SD0_EMMC",
+ [VDDIO_SD1] = "VDDIO_SD1",
+};
+
+static int sg2002_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 79000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 60000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 60000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int sg2002_get_pull_down(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 87000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 61000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 62000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 sg2002_1v8_oc_map[] = {
+ 12800,
+ 25300,
+ 37400,
+ 49000
+};
+
+static const u32 sg2002_18od33_1v8_oc_map[] = {
+ 7800,
+ 11700,
+ 15500,
+ 19200,
+ 23000,
+ 26600,
+ 30200,
+ 33700
+};
+
+static const u32 sg2002_18od33_3v3_oc_map[] = {
+ 5500,
+ 8200,
+ 10800,
+ 13400,
+ 16100,
+ 18700,
+ 21200,
+ 23700
+};
+
+static const u32 sg2002_eth_oc_map[] = {
+ 15700,
+ 17800
+};
+
+static int sg2002_get_oc_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = sg2002_1v8_oc_map;
+ return ARRAY_SIZE(sg2002_1v8_oc_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = sg2002_18od33_1v8_oc_map;
+ return ARRAY_SIZE(sg2002_18od33_1v8_oc_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = sg2002_18od33_3v3_oc_map;
+ return ARRAY_SIZE(sg2002_18od33_3v3_oc_map);
+ }
+ }
+
+ if (type == IO_TYPE_ETH) {
+ *map = sg2002_eth_oc_map;
+ return ARRAY_SIZE(sg2002_eth_oc_map);
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 sg2002_1v8_schmitt_map[] = {
+ 0,
+ 970000,
+ 1040000
+};
+
+static const u32 sg2002_18od33_1v8_schmitt_map[] = {
+ 0,
+ 1070000
+};
+
+static const u32 sg2002_18od33_3v3_schmitt_map[] = {
+ 0,
+ 1100000
+};
+
+static int sg2002_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = sg2002_1v8_schmitt_map;
+ return ARRAY_SIZE(sg2002_1v8_schmitt_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = sg2002_18od33_1v8_schmitt_map;
+ return ARRAY_SIZE(sg2002_18od33_1v8_schmitt_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = sg2002_18od33_3v3_schmitt_map;
+ return ARRAY_SIZE(sg2002_18od33_3v3_schmitt_map);
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static const struct cv1800_vddio_cfg_ops sg2002_vddio_cfg_ops = {
+ .get_pull_up = sg2002_get_pull_up,
+ .get_pull_down = sg2002_get_pull_down,
+ .get_oc_map = sg2002_get_oc_map,
+ .get_schmitt_map = sg2002_get_schmitt_map,
+};
+
+static const struct pinctrl_pin_desc sg2002_pins[] = {
+ PINCTRL_PIN(PIN_AUD_AINL_MIC, "AUD_AINL_MIC"),
+ PINCTRL_PIN(PIN_AUD_AOUTR, "AUD_AOUTR"),
+ PINCTRL_PIN(PIN_SD0_CLK, "SD0_CLK"),
+ PINCTRL_PIN(PIN_SD0_CMD, "SD0_CMD"),
+ PINCTRL_PIN(PIN_SD0_D0, "SD0_D0"),
+ PINCTRL_PIN(PIN_SD0_D1, "SD0_D1"),
+ PINCTRL_PIN(PIN_SD0_D2, "SD0_D2"),
+ PINCTRL_PIN(PIN_SD0_D3, "SD0_D3"),
+ PINCTRL_PIN(PIN_SD0_CD, "SD0_CD"),
+ PINCTRL_PIN(PIN_SD0_PWR_EN, "SD0_PWR_EN"),
+ PINCTRL_PIN(PIN_SPK_EN, "SPK_EN"),
+ PINCTRL_PIN(PIN_UART0_TX, "UART0_TX"),
+ PINCTRL_PIN(PIN_UART0_RX, "UART0_RX"),
+ PINCTRL_PIN(PIN_EMMC_DAT2, "EMMC_DAT2"),
+ PINCTRL_PIN(PIN_EMMC_CLK, "EMMC_CLK"),
+ PINCTRL_PIN(PIN_EMMC_DAT0, "EMMC_DAT0"),
+ PINCTRL_PIN(PIN_EMMC_DAT3, "EMMC_DAT3"),
+ PINCTRL_PIN(PIN_EMMC_CMD, "EMMC_CMD"),
+ PINCTRL_PIN(PIN_EMMC_DAT1, "EMMC_DAT1"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TMS, "JTAG_CPU_TMS"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TCK, "JTAG_CPU_TCK"),
+ PINCTRL_PIN(PIN_IIC0_SCL, "IIC0_SCL"),
+ PINCTRL_PIN(PIN_IIC0_SDA, "IIC0_SDA"),
+ PINCTRL_PIN(PIN_AUX0, "AUX0"),
+ PINCTRL_PIN(PIN_GPIO_ZQ, "GPIO_ZQ"),
+ PINCTRL_PIN(PIN_PWR_VBAT_DET, "PWR_VBAT_DET"),
+ PINCTRL_PIN(PIN_PWR_RSTN, "PWR_RSTN"),
+ PINCTRL_PIN(PIN_PWR_SEQ1, "PWR_SEQ1"),
+ PINCTRL_PIN(PIN_PWR_SEQ2, "PWR_SEQ2"),
+ PINCTRL_PIN(PIN_PWR_WAKEUP0, "PWR_WAKEUP0"),
+ PINCTRL_PIN(PIN_PWR_BUTTON1, "PWR_BUTTON1"),
+ PINCTRL_PIN(PIN_XTAL_XIN, "XTAL_XIN"),
+ PINCTRL_PIN(PIN_PWR_GPIO0, "PWR_GPIO0"),
+ PINCTRL_PIN(PIN_PWR_GPIO1, "PWR_GPIO1"),
+ PINCTRL_PIN(PIN_PWR_GPIO2, "PWR_GPIO2"),
+ PINCTRL_PIN(PIN_SD1_D3, "SD1_D3"),
+ PINCTRL_PIN(PIN_SD1_D2, "SD1_D2"),
+ PINCTRL_PIN(PIN_SD1_D1, "SD1_D1"),
+ PINCTRL_PIN(PIN_SD1_D0, "SD1_D0"),
+ PINCTRL_PIN(PIN_SD1_CMD, "SD1_CMD"),
+ PINCTRL_PIN(PIN_SD1_CLK, "SD1_CLK"),
+ PINCTRL_PIN(PIN_PWM0_BUCK, "PWM0_BUCK"),
+ PINCTRL_PIN(PIN_ADC1, "ADC1"),
+ PINCTRL_PIN(PIN_USB_VBUS_DET, "USB_VBUS_DET"),
+ PINCTRL_PIN(PIN_ETH_TXP, "ETH_TXP"),
+ PINCTRL_PIN(PIN_ETH_TXM, "ETH_TXM"),
+ PINCTRL_PIN(PIN_ETH_RXP, "ETH_RXP"),
+ PINCTRL_PIN(PIN_ETH_RXM, "ETH_RXM"),
+ PINCTRL_PIN(PIN_GPIO_RTX, "GPIO_RTX"),
+ PINCTRL_PIN(PIN_MIPIRX4N, "MIPIRX4N"),
+ PINCTRL_PIN(PIN_MIPIRX4P, "MIPIRX4P"),
+ PINCTRL_PIN(PIN_MIPIRX3N, "MIPIRX3N"),
+ PINCTRL_PIN(PIN_MIPIRX3P, "MIPIRX3P"),
+ PINCTRL_PIN(PIN_MIPIRX2N, "MIPIRX2N"),
+ PINCTRL_PIN(PIN_MIPIRX2P, "MIPIRX2P"),
+ PINCTRL_PIN(PIN_MIPIRX1N, "MIPIRX1N"),
+ PINCTRL_PIN(PIN_MIPIRX1P, "MIPIRX1P"),
+ PINCTRL_PIN(PIN_MIPIRX0N, "MIPIRX0N"),
+ PINCTRL_PIN(PIN_MIPIRX0P, "MIPIRX0P"),
+ PINCTRL_PIN(PIN_MIPI_TXM2, "MIPI_TXM2"),
+ PINCTRL_PIN(PIN_MIPI_TXP2, "MIPI_TXP2"),
+ PINCTRL_PIN(PIN_MIPI_TXM1, "MIPI_TXM1"),
+ PINCTRL_PIN(PIN_MIPI_TXP1, "MIPI_TXP1"),
+ PINCTRL_PIN(PIN_MIPI_TXM0, "MIPI_TXM0"),
+ PINCTRL_PIN(PIN_MIPI_TXP0, "MIPI_TXP0"),
+};
+
+static const struct cv1800_pin sg2002_pin_data[ARRAY_SIZE(sg2002_pins)] = {
+ CV1800_FUNC_PIN(PIN_AUD_AINL_MIC, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1bc, 5),
+ CV1800_FUNC_PIN(PIN_AUD_AOUTR, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c8, 6),
+ CV1800_GENERAL_PIN(PIN_SD0_CLK, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x01c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa00),
+ CV1800_GENERAL_PIN(PIN_SD0_CMD, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x020, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa04),
+ CV1800_GENERAL_PIN(PIN_SD0_D0, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x024, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa08),
+ CV1800_GENERAL_PIN(PIN_SD0_D1, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x028, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa0c),
+ CV1800_GENERAL_PIN(PIN_SD0_D2, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x02c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa10),
+ CV1800_GENERAL_PIN(PIN_SD0_D3, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x030, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa14),
+ CV1800_GENERAL_PIN(PIN_SD0_CD, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x034, 3,
+ CV1800_PINCONF_AREA_SYS, 0x900),
+ CV1800_GENERAL_PIN(PIN_SD0_PWR_EN, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x038, 3,
+ CV1800_PINCONF_AREA_SYS, 0x904),
+ CV1800_GENERAL_PIN(PIN_SPK_EN, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x03c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x908),
+ CV1800_GENERAL_PIN(PIN_UART0_TX, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x040, 7,
+ CV1800_PINCONF_AREA_SYS, 0x90c),
+ CV1800_GENERAL_PIN(PIN_UART0_RX, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x044, 7,
+ CV1800_PINCONF_AREA_SYS, 0x910),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT2, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x04c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x918),
+ CV1800_GENERAL_PIN(PIN_EMMC_CLK, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x050, 3,
+ CV1800_PINCONF_AREA_SYS, 0x91c),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT0, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x054, 3,
+ CV1800_PINCONF_AREA_SYS, 0x920),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT3, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x058, 3,
+ CV1800_PINCONF_AREA_SYS, 0x924),
+ CV1800_GENERAL_PIN(PIN_EMMC_CMD, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x05c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x928),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT1, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x060, 3,
+ CV1800_PINCONF_AREA_SYS, 0x92c),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TMS, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x064, 7,
+ CV1800_PINCONF_AREA_SYS, 0x930),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TCK, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x068, 7,
+ CV1800_PINCONF_AREA_SYS, 0x934),
+ CV1800_GENERAL_PIN(PIN_IIC0_SCL, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x070, 7,
+ CV1800_PINCONF_AREA_SYS, 0x93c),
+ CV1800_GENERAL_PIN(PIN_IIC0_SDA, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x074, 7,
+ CV1800_PINCONF_AREA_SYS, 0x940),
+ CV1800_GENERAL_PIN(PIN_AUX0, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x078, 7,
+ CV1800_PINCONF_AREA_SYS, 0x944),
+ CV1800_GENERAL_PIN(PIN_GPIO_ZQ, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1d0, 4,
+ CV1800_PINCONF_AREA_RTC, 0x0e0),
+ CV1800_GENERAL_PIN(PIN_PWR_VBAT_DET, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x07c, 0,
+ CV1800_PINCONF_AREA_RTC, 0x000),
+ CV1800_GENERAL_PIN(PIN_PWR_RSTN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x080, 0,
+ CV1800_PINCONF_AREA_RTC, 0x004),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x084, 3,
+ CV1800_PINCONF_AREA_RTC, 0x008),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x088, 3,
+ CV1800_PINCONF_AREA_RTC, 0x00c),
+ CV1800_GENERAL_PIN(PIN_PWR_WAKEUP0, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x090, 7,
+ CV1800_PINCONF_AREA_RTC, 0x018),
+ CV1800_GENERAL_PIN(PIN_PWR_BUTTON1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x098, 7,
+ CV1800_PINCONF_AREA_RTC, 0x020),
+ CV1800_GENERAL_PIN(PIN_XTAL_XIN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a0, 0,
+ CV1800_PINCONF_AREA_RTC, 0x028),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO0, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a4, 4,
+ CV1800_PINCONF_AREA_RTC, 0x02c),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x030),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ac, 7,
+ CV1800_PINCONF_AREA_RTC, 0x034),
+ CV1800_GENERAL_PIN(PIN_SD1_D3, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x058),
+ CV1800_GENERAL_PIN(PIN_SD1_D2, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x05c),
+ CV1800_GENERAL_PIN(PIN_SD1_D1, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x060),
+ CV1800_GENERAL_PIN(PIN_SD1_D0, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0dc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x064),
+ CV1800_GENERAL_PIN(PIN_SD1_CMD, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0e0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x068),
+ CV1800_GENERAL_PIN(PIN_SD1_CLK, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0e4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x06c),
+ CV1800_GENERAL_PIN(PIN_PWM0_BUCK, VDD18A_USB_PLL_ETH,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ec, 3,
+ CV1800_PINCONF_AREA_SYS, 0x804),
+ CV1800_GENERAL_PIN(PIN_ADC1, VDD18A_USB_PLL_ETH,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f8, 4,
+ CV1800_PINCONF_AREA_SYS, 0x810),
+ CV1800_GENERAL_PIN(PIN_USB_VBUS_DET, VDD18A_USB_PLL_ETH,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x108, 5,
+ CV1800_PINCONF_AREA_SYS, 0x820),
+ CV1800_FUNC_PIN(PIN_ETH_TXP, VDD18A_USB_PLL_ETH,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x124, 7),
+ CV1800_FUNC_PIN(PIN_ETH_TXM, VDD18A_USB_PLL_ETH,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x128, 7),
+ CV1800_FUNC_PIN(PIN_ETH_RXP, VDD18A_USB_PLL_ETH,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x12c, 7),
+ CV1800_FUNC_PIN(PIN_ETH_RXM, VDD18A_USB_PLL_ETH,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x130, 7),
+ CV1800_GENERAL_PIN(PIN_GPIO_RTX, VDD18A_USB_PLL_ETH,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1cc, 5,
+ CV1800_PINCONF_AREA_SYS, 0xc8c),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x16c, 7,
+ CV1800_PINCONF_AREA_SYS, 0x120, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc38),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x170, 7,
+ CV1800_PINCONF_AREA_SYS, 0x11c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc3c),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x174, 7,
+ CV1800_PINCONF_AREA_SYS, 0x114, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc40),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x178, 7,
+ CV1800_PINCONF_AREA_SYS, 0x118, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc44),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x17c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc48),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x180, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc4c),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x184, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc50),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x188, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc54),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x18c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc58),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x190, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc5c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM2, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc70),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP2, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc74),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1ac, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc78),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc7c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc80),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc84),
+};
+
+static const struct cv1800_pinctrl_data sg2002_pindata = {
+ .pins = sg2002_pins,
+ .pindata = sg2002_pin_data,
+ .pdnames = sg2002_power_domain_desc,
+ .vddio_ops = &sg2002_vddio_cfg_ops,
+ .npins = ARRAY_SIZE(sg2002_pins),
+ .npd = ARRAY_SIZE(sg2002_power_domain_desc),
+};
+
+static const struct of_device_id sg2002_pinctrl_ids[] = {
+ { .compatible = "sophgo,sg2002-pinctrl", .data = &sg2002_pindata },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sg2002_pinctrl_ids);
+
+static struct platform_driver sg2002_pinctrl_driver = {
+ .probe = cv1800_pinctrl_probe,
+ .driver = {
+ .name = "sg2002-pinctrl",
+ .suppress_bind_attrs = true,
+ .of_match_table = sg2002_pinctrl_ids,
+ },
+};
+module_platform_driver(sg2002_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for the SG2002 series SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 73bcf806af0e..bde67ee31417 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -1603,30 +1603,26 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
}
ret = of_clk_get_parent_count(node);
- clk = devm_clk_get(&pdev->dev, ret == 1 ? NULL : "apb");
+ clk = devm_clk_get_enabled(&pdev->dev, ret == 1 ? NULL : "apb");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto gpiochip_error;
}
- ret = clk_prepare_enable(clk);
- if (ret)
- goto gpiochip_error;
-
pctl->irq = devm_kcalloc(&pdev->dev,
pctl->desc->irq_banks,
sizeof(*pctl->irq),
GFP_KERNEL);
if (!pctl->irq) {
ret = -ENOMEM;
- goto clk_error;
+ goto gpiochip_error;
}
for (i = 0; i < pctl->desc->irq_banks; i++) {
pctl->irq[i] = platform_get_irq(pdev, i);
if (pctl->irq[i] < 0) {
ret = pctl->irq[i];
- goto clk_error;
+ goto gpiochip_error;
}
}
@@ -1637,7 +1633,7 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
if (!pctl->domain) {
dev_err(&pdev->dev, "Couldn't register IRQ domain\n");
ret = -ENOMEM;
- goto clk_error;
+ goto gpiochip_error;
}
for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) {
@@ -1669,8 +1665,6 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
return 0;
-clk_error:
- clk_disable_unprepare(clk);
gpiochip_error:
gpiochip_remove(pctl->chip);
return ret;
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index f5e5a23d2226..019b302db2b0 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -82,7 +82,7 @@ struct ti_iodelay_reg_data {
u32 reg_start_offset;
u32 reg_nr_per_pin;
- struct regmap_config *regmap_config;
+ const struct regmap_config *regmap_config;
};
/**
@@ -274,6 +274,22 @@ static int ti_iodelay_pinconf_set(struct ti_iodelay_device *iod,
}
/**
+ * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device
+ * @data: IODelay device
+ *
+ * Deinitialize the IODelay device (basically just lock the region back up.
+ */
+static void ti_iodelay_pinconf_deinit_dev(void *data)
+{
+ struct ti_iodelay_device *iod = data;
+ const struct ti_iodelay_reg_data *reg = iod->reg_data;
+
+ /* lock the iodelay region back again */
+ regmap_update_bits(iod->regmap, reg->reg_global_lock_offset,
+ reg->global_lock_mask, reg->global_lock_val);
+}
+
+/**
* ti_iodelay_pinconf_init_dev() - Initialize IODelay device
* @iod: iodelay device
*
@@ -295,6 +311,11 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod)
if (r)
return r;
+ r = devm_add_action_or_reset(iod->dev, ti_iodelay_pinconf_deinit_dev,
+ iod);
+ if (r)
+ return r;
+
/* Read up Recalibration sequence done by bootloader */
r = regmap_read(iod->regmap, reg->reg_refclk_offset, &val);
if (r)
@@ -354,21 +375,6 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod)
}
/**
- * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device
- * @iod: IODelay device
- *
- * Deinitialize the IODelay device (basically just lock the region back up.
- */
-static void ti_iodelay_pinconf_deinit_dev(struct ti_iodelay_device *iod)
-{
- const struct ti_iodelay_reg_data *reg = iod->reg_data;
-
- /* lock the iodelay region back again */
- regmap_update_bits(iod->regmap, reg->reg_global_lock_offset,
- reg->global_lock_mask, reg->global_lock_val);
-}
-
-/**
* ti_iodelay_get_pingroup() - Find the group mapped by a group selector
* @iod: iodelay device
* @selector: Group Selector
@@ -770,14 +776,14 @@ static int ti_iodelay_alloc_pins(struct device *dev,
return 0;
}
-static struct regmap_config dra7_iodelay_regmap_config = {
+static const struct regmap_config dra7_iodelay_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0xd1c,
};
-static struct ti_iodelay_reg_data dra7_iodelay_data = {
+static const struct ti_iodelay_reg_data dra7_iodelay_data = {
.signature_mask = 0x0003f000,
.signature_value = 0x29,
.lock_mask = 0x00000400,
@@ -877,27 +883,11 @@ static int ti_iodelay_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, iod);
-
return pinctrl_enable(iod->pctl);
}
-/**
- * ti_iodelay_remove() - standard remove
- * @pdev: platform device
- */
-static void ti_iodelay_remove(struct platform_device *pdev)
-{
- struct ti_iodelay_device *iod = platform_get_drvdata(pdev);
-
- ti_iodelay_pinconf_deinit_dev(iod);
-
- /* Expect other allocations to be freed by devm */
-}
-
static struct platform_driver ti_iodelay_driver = {
.probe = ti_iodelay_probe,
- .remove_new = ti_iodelay_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = ti_iodelay_of_match,
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index a2cdbfbaeae6..3ab668764383 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -749,10 +749,9 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
if (!src->num_i2c_peripherals)
return 0;
- i2c_peripherals = kmemdup(src->i2c_peripherals,
- src->num_i2c_peripherals *
- sizeof(*src->i2c_peripherals),
- GFP_KERNEL);
+ i2c_peripherals = kmemdup_array(src->i2c_peripherals,
+ src->num_i2c_peripherals,
+ sizeof(*i2c_peripherals), GFP_KERNEL);
if (!i2c_peripherals)
return -ENOMEM;
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index f0470248b109..c784119ab5dc 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -631,12 +631,12 @@ static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids);
-static const struct lpc_driver_data framework_laptop_amd_lpc_driver_data __initconst = {
+static const struct lpc_driver_data framework_laptop_npcx_lpc_driver_data __initconst = {
.quirks = CROS_EC_LPC_QUIRK_REMAP_MEMORY,
.quirk_mmio_memory_base = 0xE00,
};
-static const struct lpc_driver_data framework_laptop_11_lpc_driver_data __initconst = {
+static const struct lpc_driver_data framework_laptop_mec_lpc_driver_data __initconst = {
.quirks = CROS_EC_LPC_QUIRK_ACPI_ID|CROS_EC_LPC_QUIRK_AML_MUTEX,
.quirk_acpi_id = "PNP0C09",
.quirk_aml_mutex_name = "ECMT",
@@ -696,21 +696,39 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
},
/* A small number of non-Chromebook/box machines also use the ChromeOS EC */
{
- /* the Framework Laptop 13 (AMD Ryzen) and 16 (AMD Ryzen) */
+ /* Framework Laptop (11th Gen Intel Core) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AMD Ryzen"),
- DMI_MATCH(DMI_PRODUCT_FAMILY, "Laptop"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Laptop"),
+ },
+ .driver_data = (void *)&framework_laptop_mec_lpc_driver_data,
+ },
+ {
+ /* Framework Laptop (12th Gen Intel Core) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "12th Gen Intel Core"),
+ },
+ .driver_data = (void *)&framework_laptop_mec_lpc_driver_data,
+ },
+ {
+ /* Framework Laptop (13th Gen Intel Core) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "13th Gen Intel Core"),
},
- .driver_data = (void *)&framework_laptop_amd_lpc_driver_data,
+ .driver_data = (void *)&framework_laptop_mec_lpc_driver_data,
},
{
- /* the Framework Laptop (Intel 11th, 12th, 13th Generation) */
+ /*
+ * All remaining Framework Laptop models (13 AMD Ryzen, 16 AMD
+ * Ryzen, Intel Core Ultra)
+ */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Laptop"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Laptop"),
},
- .driver_data = (void *)&framework_laptop_11_lpc_driver_data,
+ .driver_data = (void *)&framework_laptop_npcx_lpc_driver_data,
},
{ /* sentinel */ }
};
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 4d305876ec08..c7781aea0b88 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -1285,6 +1285,15 @@ unregister_ports:
return ret;
}
+static void cros_typec_remove(struct platform_device *pdev)
+{
+ struct cros_typec_data *typec = platform_get_drvdata(pdev);
+
+ cros_usbpd_unregister_notify(&typec->nb);
+ cancel_work_sync(&typec->port_work);
+ cros_unregister_ports(typec);
+}
+
static int __maybe_unused cros_typec_suspend(struct device *dev)
{
struct cros_typec_data *typec = dev_get_drvdata(dev);
@@ -1316,6 +1325,7 @@ static struct platform_driver cros_typec_driver = {
.pm = &cros_typec_pm_ops,
},
.probe = cros_typec_probe,
+ .remove_new = cros_typec_remove,
};
module_platform_driver(cros_typec_driver);
diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig
index a111eca8ff57..49c383eb6785 100644
--- a/drivers/platform/cznic/Kconfig
+++ b/drivers/platform/cznic/Kconfig
@@ -70,7 +70,7 @@ config TURRIS_OMNIA_MCU_TRNG
bool "Turris Omnia MCU true random number generator"
default y
depends on TURRIS_OMNIA_MCU_GPIO
- depends on HW_RANDOM
+ depends on HW_RANDOM=y || HW_RANDOM=TURRIS_OMNIA_MCU
help
Say Y here to add support for the true random number generator
provided by CZ.NIC's Turris Omnia MCU.
diff --git a/drivers/platform/cznic/turris-omnia-mcu-trng.c b/drivers/platform/cznic/turris-omnia-mcu-trng.c
index ad953fb3c37a..9a1d9292dc9a 100644
--- a/drivers/platform/cznic/turris-omnia-mcu-trng.c
+++ b/drivers/platform/cznic/turris-omnia-mcu-trng.c
@@ -70,8 +70,8 @@ int omnia_mcu_register_trng(struct omnia_mcu *mcu)
irq_idx = omnia_int_to_gpio_idx[__bf_shf(OMNIA_INT_TRNG)];
irq = gpiod_to_irq(gpio_device_get_desc(mcu->gc.gpiodev, irq_idx));
- if (!irq)
- return dev_err_probe(dev, -ENXIO, "Cannot get TRNG IRQ\n");
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Cannot get TRNG IRQ\n");
/*
* If someone else cleared the TRNG interrupt but did not read the
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 4ed9c7fd2b62..9d18dfca6a67 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -1774,6 +1774,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_
/* "event_list" sysfs to list events supported by the block */
attr = &pmc->block[blk_num].attr_event_list;
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.mode = 0444;
attr->dev_attr.show = mlxbf_pmc_event_list_show;
attr->nr = blk_num;
@@ -1787,6 +1788,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_
if (strstr(pmc->block_name[blk_num], "l3cache") ||
((pmc->block[blk_num].type == MLXBF_PMC_TYPE_CRSPACE))) {
attr = &pmc->block[blk_num].attr_enable;
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.mode = 0644;
attr->dev_attr.show = mlxbf_pmc_enable_show;
attr->dev_attr.store = mlxbf_pmc_enable_store;
@@ -1814,6 +1816,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_
/* "eventX" and "counterX" sysfs to program and read counter values */
for (j = 0; j < pmc->block[blk_num].counters; ++j) {
attr = &pmc->block[blk_num].attr_counter[j];
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.mode = 0644;
attr->dev_attr.show = mlxbf_pmc_counter_show;
attr->dev_attr.store = mlxbf_pmc_counter_store;
@@ -1826,6 +1829,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_
attr = NULL;
attr = &pmc->block[blk_num].attr_event[j];
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.mode = 0644;
attr->dev_attr.show = mlxbf_pmc_event_show;
attr->dev_attr.store = mlxbf_pmc_event_store;
@@ -1861,6 +1865,7 @@ static int mlxbf_pmc_init_perftype_reg(struct device *dev, unsigned int blk_num)
while (count > 0) {
--count;
attr = &pmc->block[blk_num].attr_event[count];
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.mode = 0644;
attr->dev_attr.show = mlxbf_pmc_counter_show;
attr->dev_attr.store = mlxbf_pmc_counter_store;
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index 921520475ff6..48e9861bb571 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -332,9 +332,6 @@ static struct dentry *olpc_ec_setup_debugfs(void)
struct dentry *dbgfs_dir;
dbgfs_dir = debugfs_create_dir("olpc-ec", NULL);
- if (IS_ERR_OR_NULL(dbgfs_dir))
- return NULL;
-
debugfs_create_file("cmd", 0600, dbgfs_dir, NULL, &ec_dbgfs_ops);
return dbgfs_dir;
diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c
index 62ccbcb15c74..fa7b3bda688a 100644
--- a/drivers/platform/olpc/olpc-xo175-ec.c
+++ b/drivers/platform/olpc/olpc-xo175-ec.c
@@ -536,7 +536,7 @@ static int olpc_xo175_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *resp,
dev_err(dev, "EC cmd error: timeout in STATE %d\n",
priv->cmd_state);
gpiod_set_value_cansleep(priv->gpio_cmd, 0);
- spi_slave_abort(priv->spi);
+ spi_target_abort(priv->spi);
olpc_xo175_ec_read_packet(priv);
return -ETIMEDOUT;
}
@@ -653,7 +653,7 @@ static void olpc_xo175_ec_remove(struct spi_device *spi)
if (pm_power_off == olpc_xo175_ec_power_off)
pm_power_off = NULL;
- spi_slave_abort(spi);
+ spi_target_abort(spi);
platform_device_unregister(olpc_ec);
olpc_ec = NULL;
diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
index af8d573aae93..d68d231e716e 100644
--- a/drivers/platform/surface/aggregator/bus.c
+++ b/drivers/platform/surface/aggregator/bus.c
@@ -6,6 +6,7 @@
*/
#include <linux/device.h>
+#include <linux/of.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -441,6 +442,7 @@ static int ssam_add_client_device(struct device *parent, struct ssam_controller
sdev->dev.parent = parent;
sdev->dev.fwnode = fwnode_handle_get(node);
+ sdev->dev.of_node = to_of_node(node);
status = ssam_device_add(sdev);
if (status)
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
index 7e89f547999b..a265e667538c 100644
--- a/drivers/platform/surface/aggregator/controller.c
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -1104,13 +1104,6 @@ int ssam_controller_caps_load_from_acpi(acpi_handle handle,
u64 funcs;
int status;
- /* Set defaults. */
- caps->ssh_power_profile = U32_MAX;
- caps->screen_on_sleep_idle_timeout = U32_MAX;
- caps->screen_off_sleep_idle_timeout = U32_MAX;
- caps->d3_closes_handle = false;
- caps->ssh_buffer_size = U32_MAX;
-
/* Pre-load supported DSM functions. */
status = ssam_dsm_get_functions(handle, &funcs);
if (status)
@@ -1150,6 +1143,52 @@ int ssam_controller_caps_load_from_acpi(acpi_handle handle,
}
/**
+ * ssam_controller_caps_load_from_of() - Load controller capabilities from OF/DT.
+ * @dev: A pointer to the controller device
+ * @caps: Where to store the capabilities in.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+static int ssam_controller_caps_load_from_of(struct device *dev, struct ssam_controller_caps *caps)
+{
+ /*
+ * Every device starting with Surface Pro X through Laptop 7 uses these
+ * identical values, which makes them good defaults.
+ */
+ caps->d3_closes_handle = true;
+ caps->screen_on_sleep_idle_timeout = 5000;
+ caps->screen_off_sleep_idle_timeout = 30;
+ caps->ssh_buffer_size = 48;
+ /* TODO: figure out power profile */
+
+ return 0;
+}
+
+/**
+ * ssam_controller_caps_load() - Load controller capabilities
+ * @dev: A pointer to the controller device
+ * @caps: Where to store the capabilities in.
+ *
+ * Return: Returns zero on success, a negative error code on failure.
+ */
+static int ssam_controller_caps_load(struct device *dev, struct ssam_controller_caps *caps)
+{
+ acpi_handle handle = ACPI_HANDLE(dev);
+
+ /* Set defaults. */
+ caps->ssh_power_profile = U32_MAX;
+ caps->screen_on_sleep_idle_timeout = U32_MAX;
+ caps->screen_off_sleep_idle_timeout = U32_MAX;
+ caps->d3_closes_handle = false;
+ caps->ssh_buffer_size = U32_MAX;
+
+ if (handle)
+ return ssam_controller_caps_load_from_acpi(handle, caps);
+ else
+ return ssam_controller_caps_load_from_of(dev, caps);
+}
+
+/**
* ssam_controller_init() - Initialize SSAM controller.
* @ctrl: The controller to initialize.
* @serdev: The serial device representing the underlying data transport.
@@ -1165,13 +1204,12 @@ int ssam_controller_caps_load_from_acpi(acpi_handle handle,
int ssam_controller_init(struct ssam_controller *ctrl,
struct serdev_device *serdev)
{
- acpi_handle handle = ACPI_HANDLE(&serdev->dev);
int status;
init_rwsem(&ctrl->lock);
kref_init(&ctrl->kref);
- status = ssam_controller_caps_load_from_acpi(handle, &ctrl->caps);
+ status = ssam_controller_caps_load(&serdev->dev, &ctrl->caps);
if (status)
return status;
@@ -2716,11 +2754,12 @@ int ssam_irq_setup(struct ssam_controller *ctrl)
const int irqf = IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
- if (IS_ERR(gpiod))
- return PTR_ERR(gpiod);
-
- irq = gpiod_to_irq(gpiod);
- gpiod_put(gpiod);
+ if (IS_ERR(gpiod)) {
+ irq = fwnode_irq_get(dev_fwnode(dev), 0);
+ } else {
+ irq = gpiod_to_irq(gpiod);
+ gpiod_put(gpiod);
+ }
if (irq < 0)
return irq;
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index 797d0645bd77..c58e1fdd1a5f 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -17,9 +17,12 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/serdev.h>
#include <linux/sysfs.h>
+#include <linux/units.h>
#include <linux/surface_aggregator/controller.h>
#include <linux/surface_aggregator/device.h>
@@ -299,7 +302,7 @@ static const struct attribute_group ssam_sam_group = {
};
-/* -- ACPI based device setup. ---------------------------------------------- */
+/* -- Serial device setup. -------------------------------------------------- */
static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc,
void *ctx)
@@ -352,13 +355,28 @@ static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc,
return AE_CTRL_TERMINATE;
}
-static acpi_status ssam_serdev_setup_via_acpi(acpi_handle handle,
- struct serdev_device *serdev)
+static int ssam_serdev_setup_via_acpi(struct serdev_device *serdev, acpi_handle handle)
{
- return acpi_walk_resources(handle, METHOD_NAME__CRS,
- ssam_serdev_setup_via_acpi_crs, serdev);
+ acpi_status status;
+
+ status = acpi_walk_resources(handle, METHOD_NAME__CRS,
+ ssam_serdev_setup_via_acpi_crs, serdev);
+
+ return status ? -ENXIO : 0;
}
+static int ssam_serdev_setup(struct acpi_device *ssh, struct serdev_device *serdev)
+{
+ if (ssh)
+ return ssam_serdev_setup_via_acpi(serdev, ssh->handle);
+
+ /* TODO: these values may differ per board/implementation */
+ serdev_device_set_baudrate(serdev, 4 * HZ_PER_MHZ);
+ serdev_device_set_flow_control(serdev, true);
+ serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+
+ return 0;
+}
/* -- Power management. ----------------------------------------------------- */
@@ -621,16 +639,17 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
struct device *dev = &serdev->dev;
struct acpi_device *ssh = ACPI_COMPANION(dev);
struct ssam_controller *ctrl;
- acpi_status astatus;
int status;
- status = gpiod_count(dev, NULL);
- if (status < 0)
- return dev_err_probe(dev, status, "no GPIO found\n");
+ if (ssh) {
+ status = gpiod_count(dev, NULL);
+ if (status < 0)
+ return dev_err_probe(dev, status, "no GPIO found\n");
- status = devm_acpi_dev_add_driver_gpios(dev, ssam_acpi_gpios);
- if (status)
- return status;
+ status = devm_acpi_dev_add_driver_gpios(dev, ssam_acpi_gpios);
+ if (status)
+ return status;
+ }
/* Allocate controller. */
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
@@ -655,9 +674,9 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
goto err_devopen;
}
- astatus = ssam_serdev_setup_via_acpi(ssh->handle, serdev);
- if (ACPI_FAILURE(astatus)) {
- status = dev_err_probe(dev, -ENXIO, "failed to setup serdev\n");
+ status = ssam_serdev_setup(ssh, serdev);
+ if (status) {
+ status = dev_err_probe(dev, status, "failed to setup serdev\n");
goto err_devinit;
}
@@ -717,7 +736,23 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
* For now let's thus default power/wakeup to false.
*/
device_set_wakeup_capable(dev, true);
- acpi_dev_clear_dependencies(ssh);
+
+ /*
+ * When using DT, we have to register the platform hub driver manually,
+ * as it can't be matched based on top-level board compatible (like it
+ * does the ACPI case).
+ */
+ if (!ssh) {
+ struct platform_device *ph_pdev =
+ platform_device_register_simple("surface_aggregator_platform_hub",
+ 0, NULL, 0);
+ if (IS_ERR(ph_pdev))
+ return dev_err_probe(dev, PTR_ERR(ph_pdev),
+ "Failed to register the platform hub driver\n");
+ }
+
+ if (ssh)
+ acpi_dev_clear_dependencies(ssh);
return 0;
@@ -782,18 +817,27 @@ static void ssam_serial_hub_remove(struct serdev_device *serdev)
device_set_wakeup_capable(&serdev->dev, false);
}
-static const struct acpi_device_id ssam_serial_hub_match[] = {
+static const struct acpi_device_id ssam_serial_hub_acpi_match[] = {
{ "MSHW0084", 0 },
{ },
};
-MODULE_DEVICE_TABLE(acpi, ssam_serial_hub_match);
+MODULE_DEVICE_TABLE(acpi, ssam_serial_hub_acpi_match);
+
+#ifdef CONFIG_OF
+static const struct of_device_id ssam_serial_hub_of_match[] = {
+ { .compatible = "microsoft,surface-sam", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ssam_serial_hub_of_match);
+#endif
static struct serdev_device_driver ssam_serial_hub = {
.probe = ssam_serial_hub_probe,
.remove = ssam_serial_hub_remove,
.driver = {
.name = "surface_serial_hub",
- .acpi_match_table = ssam_serial_hub_match,
+ .acpi_match_table = ACPI_PTR(ssam_serial_hub_acpi_match),
+ .of_match_table = of_match_ptr(ssam_serial_hub_of_match),
.pm = &ssam_serial_hub_pm_ops,
.shutdown = ssam_serial_hub_shutdown,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index a23dff35f8ca..25c8aa2131d6 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -12,6 +12,7 @@
#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/types.h>
@@ -291,6 +292,18 @@ static const struct software_node *ssam_node_group_sl6[] = {
NULL,
};
+/* Devices for Surface Laptop 7. */
+static const struct software_node *ssam_node_group_sl7[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_perf_profile_with_fan,
+ &ssam_node_fan_speed,
+ &ssam_node_hid_sam_keyboard,
+ /* TODO: evaluate thermal sensors devices when we get a driver for that */
+ NULL,
+};
+
/* Devices for Surface Laptop Studio 1. */
static const struct software_node *ssam_node_group_sls1[] = {
&ssam_node_root,
@@ -380,7 +393,7 @@ static const struct software_node *ssam_node_group_sp9[] = {
/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
-static const struct acpi_device_id ssam_platform_hub_match[] = {
+static const struct acpi_device_id ssam_platform_hub_acpi_match[] = {
/* Surface Pro 4, 5, and 6 (OMBR < 0x10) */
{ "MSHW0081", (unsigned long)ssam_node_group_gen5 },
@@ -446,18 +459,39 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
{ },
};
-MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_match);
+MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_acpi_match);
+
+static const struct of_device_id ssam_platform_hub_of_match[] __maybe_unused = {
+ /* Surface Laptop 7 */
+ { .compatible = "microsoft,romulus13", (void *)ssam_node_group_sl7 },
+ { .compatible = "microsoft,romulus15", (void *)ssam_node_group_sl7 },
+ { },
+};
static int ssam_platform_hub_probe(struct platform_device *pdev)
{
const struct software_node **nodes;
+ const struct of_device_id *match;
+ struct device_node *fdt_root;
struct ssam_controller *ctrl;
struct fwnode_handle *root;
int status;
nodes = (const struct software_node **)acpi_device_get_match_data(&pdev->dev);
- if (!nodes)
- return -ENODEV;
+ if (!nodes) {
+ fdt_root = of_find_node_by_path("/");
+ if (!fdt_root)
+ return -ENODEV;
+
+ match = of_match_node(ssam_platform_hub_of_match, fdt_root);
+ of_node_put(fdt_root);
+ if (!match)
+ return -ENODEV;
+
+ nodes = (const struct software_node **)match->data;
+ if (!nodes)
+ return -ENODEV;
+ }
/*
* As we're adding the SSAM client devices as children under this device
@@ -506,12 +540,13 @@ static struct platform_driver ssam_platform_hub_driver = {
.remove_new = ssam_platform_hub_remove,
.driver = {
.name = "surface_aggregator_platform_hub",
- .acpi_match_table = ssam_platform_hub_match,
+ .acpi_match_table = ssam_platform_hub_acpi_match,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_platform_driver(ssam_platform_hub_driver);
+MODULE_ALIAS("platform:surface_aggregator_platform_hub");
MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index ddfccc226751..3875abba5a79 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -1000,7 +1000,8 @@ config TOPSTAR_LAPTOP
config SERIAL_MULTI_INSTANTIATE
tristate "Serial bus multi instantiate pseudo device driver"
- depends on I2C && SPI && ACPI
+ depends on ACPI
+ depends on (I2C && !SPI) || (!I2C && SPI) || (I2C && SPI)
help
Some ACPI-based systems list multiple devices in a single ACPI
firmware-node. This driver will instantiate separate clients
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 38c932df6446..7169b84ccdb6 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -16,7 +16,6 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/dmi.h>
-#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
@@ -1685,7 +1684,7 @@ static int acer_backlight_init(struct device *dev)
acer_backlight_device = bd;
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
bd->props.brightness = read_brightness(bd);
backlight_update_status(bd);
return 0;
@@ -2224,39 +2223,25 @@ static void acer_rfkill_exit(void)
}
}
-static void acer_wmi_notify(u32 value, void *context)
+static void acer_wmi_notify(union acpi_object *obj, void *context)
{
- struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
struct event_return_value return_value;
- acpi_status status;
u16 device_state;
const struct key_entry *key;
u32 scancode;
- status = wmi_get_event_data(value, &response);
- if (status != AE_OK) {
- pr_warn("bad event status 0x%x\n", status);
- return;
- }
-
- obj = (union acpi_object *)response.pointer;
-
if (!obj)
return;
if (obj->type != ACPI_TYPE_BUFFER) {
pr_warn("Unknown response received %d\n", obj->type);
- kfree(obj);
return;
}
if (obj->buffer.length != 8) {
pr_warn("Unknown buffer length %d\n", obj->buffer.length);
- kfree(obj);
return;
}
return_value = *((struct event_return_value *)obj->buffer.pointer);
- kfree(obj);
switch (return_value.function) {
case WMID_HOTKEY_EVENT:
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 018c48429616..4c3bb68e8fe4 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -378,33 +378,13 @@ static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal, int *t)
return 0;
}
-static int acerhdf_bind(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
+static bool acerhdf_should_bind(struct thermal_zone_device *thermal,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
/* if the cooling device is the one from acerhdf bind it */
- if (cdev != cl_dev)
- return 0;
-
- if (thermal_zone_bind_cooling_device(thermal, 0, cdev,
- THERMAL_NO_LIMIT, THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT)) {
- pr_err("error binding cooling dev\n");
- return -EINVAL;
- }
- return 0;
-}
-
-static int acerhdf_unbind(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
-{
- if (cdev != cl_dev)
- return 0;
-
- if (thermal_zone_unbind_cooling_device(thermal, 0, cdev)) {
- pr_err("error unbinding cooling dev\n");
- return -EINVAL;
- }
- return 0;
+ return cdev == cl_dev && trip->type == THERMAL_TRIP_ACTIVE;
}
static inline void acerhdf_revert_to_bios_mode(void)
@@ -447,8 +427,7 @@ static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal,
/* bind callback functions to thermalzone */
static struct thermal_zone_device_ops acerhdf_dev_ops = {
- .bind = acerhdf_bind,
- .unbind = acerhdf_unbind,
+ .should_bind = acerhdf_should_bind,
.get_temp = acerhdf_get_ec_temp,
.change_mode = acerhdf_change_mode,
.get_crit_temp = acerhdf_get_crit_temp,
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index 1157ec148880..d5b496433d69 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -282,6 +282,29 @@ int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
return 0;
}
+static int apmf_notify_smart_pc_update(struct amd_pmf_dev *pdev, u32 val, u32 preq, u32 index)
+{
+ struct amd_pmf_notify_smart_pc_update args;
+ struct acpi_buffer params;
+ union acpi_object *info;
+
+ args.size = sizeof(args);
+ args.pending_req = preq;
+ args.custom_bios[index] = val;
+
+ params.length = sizeof(args);
+ params.pointer = &args;
+
+ info = apmf_if_call(pdev, APMF_FUNC_NOTIFY_SMART_PC_UPDATES, &params);
+ if (!info)
+ return -EIO;
+
+ kfree(info);
+ dev_dbg(pdev->dev, "Notify smart pc update, val: %u\n", val);
+
+ return 0;
+}
+
int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data)
{
return apmf_if_call_store_buffer(pdev, APMF_FUNC_AUTO_MODE, data, sizeof(*data));
@@ -447,6 +470,14 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev)
return 0;
}
+int amd_pmf_smartpc_apply_bios_output(struct amd_pmf_dev *dev, u32 val, u32 preq, u32 idx)
+{
+ if (!is_apmf_func_supported(dev, APMF_FUNC_NOTIFY_SMART_PC_UPDATES))
+ return -EINVAL;
+
+ return apmf_notify_smart_pc_update(dev, val, preq, idx);
+}
+
void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
{
acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 8f1f719befa3..d6af0ca036f1 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -37,12 +37,6 @@
#define AMD_PMF_RESULT_CMD_UNKNOWN 0xFE
#define AMD_PMF_RESULT_FAILED 0xFF
-/* List of supported CPU ids */
-#define AMD_CPU_ID_RMB 0x14b5
-#define AMD_CPU_ID_PS 0x14e8
-#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
-#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122
-
#define PMF_MSG_DELAY_MIN_US 50
#define RESPONSE_REGISTER_LOOP_MAX 20000
@@ -261,7 +255,19 @@ int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer)
/* Get Metrics Table Address */
if (alloc_buffer) {
- dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_PS:
+ case AMD_CPU_ID_RMB:
+ dev->mtable_size = sizeof(dev->m_table);
+ break;
+ case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ dev->mtable_size = sizeof(dev->m_table_v2);
+ break;
+ default:
+ dev_err(dev->dev, "Invalid CPU id: 0x%x", dev->cpu_id);
+ }
+
+ dev->buf = kzalloc(dev->mtable_size, GFP_KERNEL);
if (!dev->buf)
return -ENOMEM;
}
diff --git a/drivers/platform/x86/amd/pmf/pmf-quirks.c b/drivers/platform/x86/amd/pmf/pmf-quirks.c
index 48870ca52b41..7cde5733b9ca 100644
--- a/drivers/platform/x86/amd/pmf/pmf-quirks.c
+++ b/drivers/platform/x86/amd/pmf/pmf-quirks.c
@@ -37,6 +37,14 @@ static const struct dmi_system_id fwbug_list[] = {
},
.driver_data = &quirk_no_sps_bug,
},
+ {
+ .ident = "ASUS TUF Gaming A14",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FA401W"),
+ },
+ .driver_data = &quirk_no_sps_bug,
+ },
{}
};
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 753d5662c080..8ce8816da9c1 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -19,6 +19,12 @@
#define POLICY_SIGN_COOKIE 0x31535024
#define POLICY_COOKIE_OFFSET 0x10
+/* List of supported CPU ids */
+#define AMD_CPU_ID_RMB 0x14b5
+#define AMD_CPU_ID_PS 0x14e8
+#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
+#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122
+
struct cookie_header {
u32 sign;
u32 length;
@@ -35,6 +41,7 @@ struct cookie_header {
#define APMF_FUNC_STATIC_SLIDER_GRANULAR 9
#define APMF_FUNC_DYN_SLIDER_AC 11
#define APMF_FUNC_DYN_SLIDER_DC 12
+#define APMF_FUNC_NOTIFY_SMART_PC_UPDATES 14
#define APMF_FUNC_SBIOS_HEARTBEAT_V2 16
/* Message Definitions */
@@ -82,7 +89,17 @@ struct cookie_header {
#define PMF_POLICY_STT_SKINTEMP_APU 7
#define PMF_POLICY_STT_SKINTEMP_HS2 8
#define PMF_POLICY_SYSTEM_STATE 9
+#define PMF_POLICY_BIOS_OUTPUT_1 10
+#define PMF_POLICY_BIOS_OUTPUT_2 11
#define PMF_POLICY_P3T 38
+#define PMF_POLICY_BIOS_OUTPUT_3 57
+#define PMF_POLICY_BIOS_OUTPUT_4 58
+#define PMF_POLICY_BIOS_OUTPUT_5 59
+#define PMF_POLICY_BIOS_OUTPUT_6 60
+#define PMF_POLICY_BIOS_OUTPUT_7 61
+#define PMF_POLICY_BIOS_OUTPUT_8 62
+#define PMF_POLICY_BIOS_OUTPUT_9 63
+#define PMF_POLICY_BIOS_OUTPUT_10 64
/* TA macros */
#define PMF_TA_IF_VERSION_MAJOR 1
@@ -181,6 +198,53 @@ struct apmf_fan_idx {
u32 fan_ctl_idx;
} __packed;
+struct smu_pmf_metrics_v2 {
+ u16 core_frequency[16]; /* MHz */
+ u16 core_power[16]; /* mW */
+ u16 core_temp[16]; /* centi-C */
+ u16 gfx_temp; /* centi-C */
+ u16 soc_temp; /* centi-C */
+ u16 stapm_opn_limit; /* mW */
+ u16 stapm_cur_limit; /* mW */
+ u16 infra_cpu_maxfreq; /* MHz */
+ u16 infra_gfx_maxfreq; /* MHz */
+ u16 skin_temp; /* centi-C */
+ u16 gfxclk_freq; /* MHz */
+ u16 fclk_freq; /* MHz */
+ u16 gfx_activity; /* GFX busy % [0-100] */
+ u16 socclk_freq; /* MHz */
+ u16 vclk_freq; /* MHz */
+ u16 vcn_activity; /* VCN busy % [0-100] */
+ u16 vpeclk_freq; /* MHz */
+ u16 ipuclk_freq; /* MHz */
+ u16 ipu_busy[8]; /* NPU busy % [0-100] */
+ u16 dram_reads; /* MB/sec */
+ u16 dram_writes; /* MB/sec */
+ u16 core_c0residency[16]; /* C0 residency % [0-100] */
+ u16 ipu_power; /* mW */
+ u32 apu_power; /* mW */
+ u32 gfx_power; /* mW */
+ u32 dgpu_power; /* mW */
+ u32 socket_power; /* mW */
+ u32 all_core_power; /* mW */
+ u32 filter_alpha_value; /* time constant [us] */
+ u32 metrics_counter;
+ u16 memclk_freq; /* MHz */
+ u16 mpipuclk_freq; /* MHz */
+ u16 ipu_reads; /* MB/sec */
+ u16 ipu_writes; /* MB/sec */
+ u32 throttle_residency_prochot;
+ u32 throttle_residency_spl;
+ u32 throttle_residency_fppt;
+ u32 throttle_residency_sppt;
+ u32 throttle_residency_thm_core;
+ u32 throttle_residency_thm_gfx;
+ u32 throttle_residency_thm_soc;
+ u16 psys;
+ u16 spare1;
+ u32 spare[6];
+} __packed;
+
struct smu_pmf_metrics {
u16 gfxclk_freq; /* in MHz */
u16 socclk_freq; /* in MHz */
@@ -278,6 +342,7 @@ struct amd_pmf_dev {
int hb_interval; /* SBIOS heartbeat interval */
struct delayed_work heart_beat;
struct smu_pmf_metrics m_table;
+ struct smu_pmf_metrics_v2 m_table_v2;
struct delayed_work work_buffer;
ktime_t start_time;
int socket_power_history[AVG_SAMPLE_SIZE];
@@ -302,6 +367,7 @@ struct amd_pmf_dev {
bool smart_pc_enabled;
u16 pmf_if_version;
struct input_dev *pmf_idev;
+ size_t mtable_size;
};
struct apmf_sps_prop_granular_v2 {
@@ -344,6 +410,12 @@ struct os_power_slider {
u8 slider_event;
} __packed;
+struct amd_pmf_notify_smart_pc_update {
+ u16 size;
+ u32 pending_req;
+ u32 custom_bios[10];
+} __packed;
+
struct fan_table_control {
bool manual;
unsigned long fan_id;
@@ -717,6 +789,7 @@ extern const struct attribute_group cnqf_feature_attribute_group;
int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev);
void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev);
int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev);
+int amd_pmf_smartpc_apply_bios_output(struct amd_pmf_dev *dev, u32 val, u32 preq, u32 idx);
/* Smart PC - TA interfaces */
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index 3c153fb1425e..b5183969f9bf 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -53,30 +53,49 @@ void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) {}
#endif
-static void amd_pmf_get_smu_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+static void amd_pmf_get_c0_residency(u16 *core_res, size_t size, struct ta_pmf_enact_table *in)
{
u16 max, avg = 0;
int i;
- memset(dev->buf, 0, sizeof(dev->m_table));
- amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
- memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
-
- in->ev_info.socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
- in->ev_info.skin_temperature = dev->m_table.skin_temp;
-
/* Get the avg and max C0 residency of all the cores */
- max = dev->m_table.avg_core_c0residency[0];
- for (i = 0; i < ARRAY_SIZE(dev->m_table.avg_core_c0residency); i++) {
- avg += dev->m_table.avg_core_c0residency[i];
- if (dev->m_table.avg_core_c0residency[i] > max)
- max = dev->m_table.avg_core_c0residency[i];
+ max = *core_res;
+ for (i = 0; i < size; i++) {
+ avg += core_res[i];
+ if (core_res[i] > max)
+ max = core_res[i];
}
-
- avg = DIV_ROUND_CLOSEST(avg, ARRAY_SIZE(dev->m_table.avg_core_c0residency));
+ avg = DIV_ROUND_CLOSEST(avg, size);
in->ev_info.avg_c0residency = avg;
in->ev_info.max_c0residency = max;
- in->ev_info.gfx_busy = dev->m_table.avg_gfx_activity;
+}
+
+static void amd_pmf_get_smu_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ /* Get the updated metrics table data */
+ memset(dev->buf, 0, dev->mtable_size);
+ amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
+
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_PS:
+ memcpy(&dev->m_table, dev->buf, dev->mtable_size);
+ in->ev_info.socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
+ in->ev_info.skin_temperature = dev->m_table.skin_temp;
+ in->ev_info.gfx_busy = dev->m_table.avg_gfx_activity;
+ amd_pmf_get_c0_residency(dev->m_table.avg_core_c0residency,
+ ARRAY_SIZE(dev->m_table.avg_core_c0residency), in);
+ break;
+ case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ memcpy(&dev->m_table_v2, dev->buf, dev->mtable_size);
+ in->ev_info.socket_power = dev->m_table_v2.apu_power + dev->m_table_v2.dgpu_power;
+ in->ev_info.skin_temperature = dev->m_table_v2.skin_temp;
+ in->ev_info.gfx_busy = dev->m_table_v2.gfx_activity;
+ amd_pmf_get_c0_residency(dev->m_table_v2.core_c0residency,
+ ARRAY_SIZE(dev->m_table_v2.core_c0residency), in);
+ break;
+ default:
+ dev_err(dev->dev, "Unsupported CPU id: 0x%x", dev->cpu_id);
+ }
}
static const char * const pmf_battery_supply_name[] = {
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index e246367aacee..19c27b6e4666 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -160,6 +160,46 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
dev_dbg(dev->dev, "update SYSTEM_STATE: %s\n",
amd_pmf_uevent_as_str(val));
break;
+
+ case PMF_POLICY_BIOS_OUTPUT_1:
+ amd_pmf_smartpc_apply_bios_output(dev, val, BIT(0), 0);
+ break;
+
+ case PMF_POLICY_BIOS_OUTPUT_2:
+ amd_pmf_smartpc_apply_bios_output(dev, val, BIT(1), 1);
+ break;
+
+ case PMF_POLICY_BIOS_OUTPUT_3:
+ amd_pmf_smartpc_apply_bios_output(dev, val, BIT(2), 2);
+ break;
+
+ case PMF_POLICY_BIOS_OUTPUT_4:
+ amd_pmf_smartpc_apply_bios_output(dev, val, BIT(3), 3);
+ break;
+
+ case PMF_POLICY_BIOS_OUTPUT_5:
+ amd_pmf_smartpc_apply_bios_output(dev, val, BIT(4), 4);
+ break;
+
+ case PMF_POLICY_BIOS_OUTPUT_6:
+ amd_pmf_smartpc_apply_bios_output(dev, val, BIT(5), 5);
+ break;
+
+ case PMF_POLICY_BIOS_OUTPUT_7:
+ amd_pmf_smartpc_apply_bios_output(dev, val, BIT(6), 6);
+ break;
+
+ case PMF_POLICY_BIOS_OUTPUT_8:
+ amd_pmf_smartpc_apply_bios_output(dev, val, BIT(7), 7);
+ break;
+
+ case PMF_POLICY_BIOS_OUTPUT_9:
+ amd_pmf_smartpc_apply_bios_output(dev, val, BIT(8), 8);
+ break;
+
+ case PMF_POLICY_BIOS_OUTPUT_10:
+ amd_pmf_smartpc_apply_bios_output(dev, val, BIT(9), 9);
+ break;
}
}
}
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index ccb33d034e2a..9d7e6b712abf 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -28,7 +28,6 @@
#include <linux/err.h>
#include <linux/proc_fs.h>
#include <linux/backlight.h>
-#include <linux/fb.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
@@ -818,7 +817,7 @@ static int asus_backlight_init(struct asus_laptop *asus)
asus->backlight_device = bd;
bd->props.brightness = asus_read_brightness(bd);
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
return 0;
}
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index ed3633c5955d..ef04d396f61c 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -7,12 +7,12 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/backlight.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-#include <linux/fb.h>
#include <linux/dmi.h>
#include <linux/i8042.h>
@@ -538,7 +538,7 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
dmi_check_system(asus_quirks);
driver->quirks = quirks;
- driver->panel_power = FB_BLANK_UNBLANK;
+ driver->panel_power = BACKLIGHT_POWER_ON;
/* overwrite the wapf setting if the wapf paramater is specified */
if (wapf != -1)
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 37636e5a38e3..7a48220b4f5a 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -18,7 +18,6 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dmi.h>
-#include <linux/fb.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/init.h>
@@ -97,6 +96,12 @@ module_param(fnlock_default, bool, 0444);
#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1
#define ASUS_THROTTLE_THERMAL_POLICY_SILENT 2
+#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT_VIVO 0
+#define ASUS_THROTTLE_THERMAL_POLICY_SILENT_VIVO 1
+#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST_VIVO 2
+
+#define PLATFORM_PROFILE_MAX 2
+
#define USB_INTEL_XUSB2PR 0xD0
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
@@ -300,8 +305,8 @@ struct asus_wmi {
u32 kbd_rgb_dev;
bool kbd_rgb_state_available;
- bool throttle_thermal_policy_available;
u8 throttle_thermal_policy_mode;
+ u32 throttle_thermal_policy_dev;
bool cpu_fan_curve_available;
bool gpu_fan_curve_available;
@@ -349,20 +354,29 @@ static int asus_wmi_evaluate_method3(u32 method_id,
status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id,
&input, &output);
- if (ACPI_FAILURE(status))
+ pr_debug("%s called (0x%08x) with args: 0x%08x, 0x%08x, 0x%08x\n",
+ __func__, method_id, arg0, arg1, arg2);
+ if (ACPI_FAILURE(status)) {
+ pr_debug("%s, (0x%08x), arg 0x%08x failed: %d\n",
+ __func__, method_id, arg0, -EIO);
return -EIO;
+ }
obj = (union acpi_object *)output.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
tmp = (u32) obj->integer.value;
+ pr_debug("Result: 0x%08x\n", tmp);
if (retval)
*retval = tmp;
kfree(obj);
- if (tmp == ASUS_WMI_UNSUPPORTED_METHOD)
+ if (tmp == ASUS_WMI_UNSUPPORTED_METHOD) {
+ pr_debug("%s, (0x%08x), arg 0x%08x failed: %d\n",
+ __func__, method_id, arg0, -ENODEV);
return -ENODEV;
+ }
return 0;
}
@@ -392,20 +406,29 @@ static int asus_wmi_evaluate_method5(u32 method_id,
status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id,
&input, &output);
- if (ACPI_FAILURE(status))
+ pr_debug("%s called (0x%08x) with args: 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+ __func__, method_id, arg0, arg1, arg2, arg3, arg4);
+ if (ACPI_FAILURE(status)) {
+ pr_debug("%s, (0x%08x), arg 0x%08x failed: %d\n",
+ __func__, method_id, arg0, -EIO);
return -EIO;
+ }
obj = (union acpi_object *)output.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
tmp = (u32) obj->integer.value;
+ pr_debug("Result: %x\n", tmp);
if (retval)
*retval = tmp;
kfree(obj);
- if (tmp == ASUS_WMI_UNSUPPORTED_METHOD)
+ if (tmp == ASUS_WMI_UNSUPPORTED_METHOD) {
+ pr_debug("%s, (0x%08x), arg 0x%08x failed: %d\n",
+ __func__, method_id, arg0, -ENODEV);
return -ENODEV;
+ }
return 0;
}
@@ -431,8 +454,13 @@ static int asus_wmi_evaluate_method_buf(u32 method_id,
status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id,
&input, &output);
- if (ACPI_FAILURE(status))
+ pr_debug("%s called (0x%08x) with args: 0x%08x, 0x%08x\n",
+ __func__, method_id, arg0, arg1);
+ if (ACPI_FAILURE(status)) {
+ pr_debug("%s, (0x%08x), arg 0x%08x failed: %d\n",
+ __func__, method_id, arg0, -EIO);
return -EIO;
+ }
obj = (union acpi_object *)output.pointer;
@@ -468,8 +496,11 @@ static int asus_wmi_evaluate_method_buf(u32 method_id,
kfree(obj);
- if (err)
+ if (err) {
+ pr_debug("%s, (0x%08x), arg 0x%08x failed: %d\n",
+ __func__, method_id, arg0, err);
return err;
+ }
return 0;
}
@@ -557,6 +588,7 @@ static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id)
{
u32 retval;
int status = asus_wmi_get_devstate(asus, dev_id, &retval);
+ pr_debug("%s called (0x%08x), retval: 0x%08x\n", __func__, dev_id, retval);
return status == 0 && (retval & ASUS_WMI_DSTS_PRESENCE_BIT);
}
@@ -1722,7 +1754,8 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
goto error;
}
- if (!kbd_led_read(asus, &led_val, NULL)) {
+ if (!kbd_led_read(asus, &led_val, NULL) && !dmi_check_system(asus_use_hid_led_dmi_ids)) {
+ pr_info("using asus-wmi for asus::kbd_backlight\n");
asus->kbd_led_wk = led_val;
asus->kbd_led.name = "asus::kbd_backlight";
asus->kbd_led.flags = LED_BRIGHT_HW_CHANGED;
@@ -1793,6 +1826,16 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
goto error;
}
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE)) {
+ /*
+ * Disable OOBE state, so that e.g. the keyboard backlight
+ * works.
+ */
+ rv = asus_wmi_set_devstate(ASUS_WMI_DEVID_OOBE, 1, NULL);
+ if (rv)
+ goto error;
+ }
+
error:
if (rv)
asus_wmi_led_exit(asus);
@@ -3176,7 +3219,7 @@ static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev)
int err, fan_idx;
u8 mode = 0;
- if (asus->throttle_thermal_policy_available)
+ if (asus->throttle_thermal_policy_dev)
mode = asus->throttle_thermal_policy_mode;
/* DEVID_<C/G>PU_FAN_CURVE is switched for OVERBOOST vs SILENT */
if (mode == 2)
@@ -3383,7 +3426,7 @@ static ssize_t fan_curve_enable_store(struct device *dev,
* For machines with throttle this is the only way to reset fans
* to default mode of operation (does not erase curve data).
*/
- if (asus->throttle_thermal_policy_available) {
+ if (asus->throttle_thermal_policy_dev) {
err = throttle_thermal_policy_write(asus);
if (err)
return err;
@@ -3600,8 +3643,8 @@ static const struct attribute_group asus_fan_curve_attr_group = {
__ATTRIBUTE_GROUPS(asus_fan_curve_attr);
/*
- * Must be initialised after throttle_thermal_policy_check_present() as
- * we check the status of throttle_thermal_policy_available during init.
+ * Must be initialised after throttle_thermal_policy_dev is set as
+ * we check the status of throttle_thermal_policy_dev during init.
*/
static int asus_wmi_custom_fan_curve_init(struct asus_wmi *asus)
{
@@ -3611,18 +3654,27 @@ static int asus_wmi_custom_fan_curve_init(struct asus_wmi *asus)
err = fan_curve_check_present(asus, &asus->cpu_fan_curve_available,
ASUS_WMI_DEVID_CPU_FAN_CURVE);
- if (err)
+ if (err) {
+ pr_debug("%s, checked 0x%08x, failed: %d\n",
+ __func__, ASUS_WMI_DEVID_CPU_FAN_CURVE, err);
return err;
+ }
err = fan_curve_check_present(asus, &asus->gpu_fan_curve_available,
ASUS_WMI_DEVID_GPU_FAN_CURVE);
- if (err)
+ if (err) {
+ pr_debug("%s, checked 0x%08x, failed: %d\n",
+ __func__, ASUS_WMI_DEVID_GPU_FAN_CURVE, err);
return err;
+ }
err = fan_curve_check_present(asus, &asus->mid_fan_curve_available,
ASUS_WMI_DEVID_MID_FAN_CURVE);
- if (err)
+ if (err) {
+ pr_debug("%s, checked 0x%08x, failed: %d\n",
+ __func__, ASUS_WMI_DEVID_MID_FAN_CURVE, err);
return err;
+ }
if (!asus->cpu_fan_curve_available
&& !asus->gpu_fan_curve_available
@@ -3642,38 +3694,13 @@ static int asus_wmi_custom_fan_curve_init(struct asus_wmi *asus)
}
/* Throttle thermal policy ****************************************************/
-
-static int throttle_thermal_policy_check_present(struct asus_wmi *asus)
-{
- u32 result;
- int err;
-
- asus->throttle_thermal_policy_available = false;
-
- err = asus_wmi_get_devstate(asus,
- ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
- &result);
- if (err) {
- if (err == -ENODEV)
- return 0;
- return err;
- }
-
- if (result & ASUS_WMI_DSTS_PRESENCE_BIT)
- asus->throttle_thermal_policy_available = true;
-
- return 0;
-}
-
static int throttle_thermal_policy_write(struct asus_wmi *asus)
{
- int err;
- u8 value;
+ u8 value = asus->throttle_thermal_policy_mode;
u32 retval;
+ int err;
- value = asus->throttle_thermal_policy_mode;
-
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+ err = asus_wmi_set_devstate(asus->throttle_thermal_policy_dev,
value, &retval);
sysfs_notify(&asus->platform_device->dev.kobj, NULL,
@@ -3703,7 +3730,7 @@ static int throttle_thermal_policy_write(struct asus_wmi *asus)
static int throttle_thermal_policy_set_default(struct asus_wmi *asus)
{
- if (!asus->throttle_thermal_policy_available)
+ if (!asus->throttle_thermal_policy_dev)
return 0;
asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
@@ -3715,7 +3742,7 @@ static int throttle_thermal_policy_switch_next(struct asus_wmi *asus)
u8 new_mode = asus->throttle_thermal_policy_mode + 1;
int err;
- if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+ if (new_mode > PLATFORM_PROFILE_MAX)
new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
asus->throttle_thermal_policy_mode = new_mode;
@@ -3754,7 +3781,7 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
if (result < 0)
return result;
- if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+ if (new_mode > PLATFORM_PROFILE_MAX)
return -EINVAL;
asus->throttle_thermal_policy_mode = new_mode;
@@ -3771,10 +3798,52 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
return count;
}
-// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent
+/*
+ * Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent
+ */
static DEVICE_ATTR_RW(throttle_thermal_policy);
/* Platform profile ***********************************************************/
+static int asus_wmi_platform_profile_to_vivo(struct asus_wmi *asus, int mode)
+{
+ bool vivo;
+
+ vivo = asus->throttle_thermal_policy_dev == ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO;
+
+ if (vivo) {
+ switch (mode) {
+ case ASUS_THROTTLE_THERMAL_POLICY_DEFAULT:
+ return ASUS_THROTTLE_THERMAL_POLICY_DEFAULT_VIVO;
+ case ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST:
+ return ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST_VIVO;
+ case ASUS_THROTTLE_THERMAL_POLICY_SILENT:
+ return ASUS_THROTTLE_THERMAL_POLICY_SILENT_VIVO;
+ }
+ }
+
+ return mode;
+}
+
+static int asus_wmi_platform_profile_mode_from_vivo(struct asus_wmi *asus, int mode)
+{
+ bool vivo;
+
+ vivo = asus->throttle_thermal_policy_dev == ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO;
+
+ if (vivo) {
+ switch (mode) {
+ case ASUS_THROTTLE_THERMAL_POLICY_DEFAULT_VIVO:
+ return ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+ case ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST_VIVO:
+ return ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST;
+ case ASUS_THROTTLE_THERMAL_POLICY_SILENT_VIVO:
+ return ASUS_THROTTLE_THERMAL_POLICY_SILENT;
+ }
+ }
+
+ return mode;
+}
+
static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof,
enum platform_profile_option *profile)
{
@@ -3782,10 +3851,9 @@ static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof,
int tp;
asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
-
tp = asus->throttle_thermal_policy_mode;
- switch (tp) {
+ switch (asus_wmi_platform_profile_mode_from_vivo(asus, tp)) {
case ASUS_THROTTLE_THERMAL_POLICY_DEFAULT:
*profile = PLATFORM_PROFILE_BALANCED;
break;
@@ -3824,7 +3892,7 @@ static int asus_wmi_platform_profile_set(struct platform_profile_handler *pprof,
return -EOPNOTSUPP;
}
- asus->throttle_thermal_policy_mode = tp;
+ asus->throttle_thermal_policy_mode = asus_wmi_platform_profile_to_vivo(asus, tp);
return throttle_thermal_policy_write(asus);
}
@@ -3837,7 +3905,7 @@ static int platform_profile_setup(struct asus_wmi *asus)
* Not an error if a component platform_profile relies on is unavailable
* so early return, skipping the setup of platform_profile.
*/
- if (!asus->throttle_thermal_policy_available)
+ if (!asus->throttle_thermal_policy_dev)
return 0;
dev_info(dev, "Using throttle_thermal_policy for platform_profile support\n");
@@ -3852,8 +3920,13 @@ static int platform_profile_setup(struct asus_wmi *asus)
asus->platform_profile_handler.choices);
err = platform_profile_register(&asus->platform_profile_handler);
- if (err)
+ if (err == -EEXIST) {
+ pr_warn("%s, a platform_profile handler is already registered\n", __func__);
+ return 0;
+ } else if (err) {
+ pr_err("%s, failed at platform_profile_register: %d\n", __func__, err);
return err;
+ }
asus->platform_profile_support = true;
return 0;
@@ -3874,7 +3947,7 @@ static int read_backlight_power(struct asus_wmi *asus)
if (ret < 0)
return ret;
- return ret ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+ return ret ? BACKLIGHT_POWER_ON : BACKLIGHT_POWER_OFF;
}
static int read_brightness_max(struct asus_wmi *asus)
@@ -3933,7 +4006,7 @@ static int update_bl_status(struct backlight_device *bd)
power = read_backlight_power(asus);
if (power != -ENODEV && bd->props.power != power) {
- ctrl_param = !!(bd->props.power == FB_BLANK_UNBLANK);
+ ctrl_param = !!(bd->props.power == BACKLIGHT_POWER_ON);
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT,
ctrl_param, NULL);
if (asus->driver->quirks->store_backlight_power)
@@ -3992,7 +4065,7 @@ static int asus_wmi_backlight_init(struct asus_wmi *asus)
power = read_backlight_power(asus);
if (power == -ENODEV)
- power = FB_BLANK_UNBLANK;
+ power = BACKLIGHT_POWER_ON;
else if (power < 0)
return power;
@@ -4050,7 +4123,7 @@ static int read_screenpad_backlight_power(struct asus_wmi *asus)
if (ret < 0)
return ret;
/* 1 == powered */
- return ret ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+ return ret ? BACKLIGHT_POWER_ON : BACKLIGHT_POWER_OFF;
}
static int read_screenpad_brightness(struct backlight_device *bd)
@@ -4063,7 +4136,7 @@ static int read_screenpad_brightness(struct backlight_device *bd)
if (err < 0)
return err;
/* The device brightness can only be read if powered, so return stored */
- if (err == FB_BLANK_POWERDOWN)
+ if (err == BACKLIGHT_POWER_OFF)
return asus->driver->screenpad_brightness - ASUS_SCREENPAD_BRIGHT_MIN;
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_SCREENPAD_LIGHT, &retval);
@@ -4084,7 +4157,7 @@ static int update_screenpad_bl_status(struct backlight_device *bd)
return power;
if (bd->props.power != power) {
- if (power != FB_BLANK_UNBLANK) {
+ if (power != BACKLIGHT_POWER_ON) {
/* Only brightness > 0 can power it back on */
ctrl_param = asus->driver->screenpad_brightness - ASUS_SCREENPAD_BRIGHT_MIN;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_SCREENPAD_LIGHT,
@@ -4092,7 +4165,7 @@ static int update_screenpad_bl_status(struct backlight_device *bd)
} else {
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_SCREENPAD_POWER, 0, NULL);
}
- } else if (power == FB_BLANK_UNBLANK) {
+ } else if (power == BACKLIGHT_POWER_ON) {
/* Only set brightness if powered on or we get invalid/unsync state */
ctrl_param = bd->props.brightness + ASUS_SCREENPAD_BRIGHT_MIN;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_SCREENPAD_LIGHT, ctrl_param, NULL);
@@ -4122,7 +4195,7 @@ static int asus_screenpad_init(struct asus_wmi *asus)
if (power < 0)
return power;
- if (power != FB_BLANK_POWERDOWN) {
+ if (power != BACKLIGHT_POWER_OFF) {
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_SCREENPAD_LIGHT, &brightness);
if (err < 0)
return err;
@@ -4179,28 +4252,15 @@ static void asus_wmi_fnlock_update(struct asus_wmi *asus)
/* WMI events *****************************************************************/
-static int asus_wmi_get_event_code(u32 value)
+static int asus_wmi_get_event_code(union acpi_object *obj)
{
- struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- acpi_status status;
int code;
- status = wmi_get_event_data(value, &response);
- if (ACPI_FAILURE(status)) {
- pr_warn("Failed to get WMI notify code: %s\n",
- acpi_format_exception(status));
- return -EIO;
- }
-
- obj = (union acpi_object *)response.pointer;
-
if (obj && obj->type == ACPI_TYPE_INTEGER)
code = (int)(obj->integer.value & WMI_EVENT_MASK);
else
code = -EIO;
- kfree(obj);
return code;
}
@@ -4252,7 +4312,7 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
if (code == NOTIFY_KBD_FBM || code == NOTIFY_KBD_TTP) {
if (asus->fan_boost_mode_available)
fan_boost_mode_switch_next(asus);
- if (asus->throttle_thermal_policy_available)
+ if (asus->throttle_thermal_policy_dev)
throttle_thermal_policy_switch_next(asus);
return;
@@ -4266,10 +4326,10 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
pr_info("Unknown key code 0x%x\n", code);
}
-static void asus_wmi_notify(u32 value, void *context)
+static void asus_wmi_notify(union acpi_object *obj, void *context)
{
struct asus_wmi *asus = context;
- int code = asus_wmi_get_event_code(value);
+ int code = asus_wmi_get_event_code(obj);
if (code < 0) {
pr_warn("Failed to get notify code: %d\n", code);
@@ -4424,7 +4484,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
else if (attr == &dev_attr_fan_boost_mode.attr)
ok = asus->fan_boost_mode_available;
else if (attr == &dev_attr_throttle_thermal_policy.attr)
- ok = asus->throttle_thermal_policy_available;
+ ok = asus->throttle_thermal_policy_dev != 0;
else if (attr == &dev_attr_ppt_pl2_sppt.attr)
devid = ASUS_WMI_DEVID_PPT_PL2_SPPT;
else if (attr == &dev_attr_ppt_pl1_spl.attr)
@@ -4450,8 +4510,10 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
else if (attr == &dev_attr_available_mini_led_mode.attr)
ok = asus->mini_led_dev_id != 0;
- if (devid != -1)
+ if (devid != -1) {
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
+ pr_debug("%s called 0x%08x, ok: %x\n", __func__, devid, ok);
+ }
return ok ? attr->mode : 0;
}
@@ -4716,16 +4778,15 @@ static int asus_wmi_add(struct platform_device *pdev)
else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2))
asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2;
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY))
+ asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY;
+ else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO))
+ asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO;
+
err = fan_boost_mode_check_present(asus);
if (err)
goto fail_fan_boost_mode;
- err = throttle_thermal_policy_check_present(asus);
- if (err)
- goto fail_throttle_thermal_policy;
- else
- throttle_thermal_policy_set_default(asus);
-
err = platform_profile_setup(asus);
if (err)
goto fail_platform_profile_setup;
@@ -4820,7 +4881,6 @@ fail_hwmon:
fail_input:
asus_wmi_sysfs_exit(asus->platform_device);
fail_sysfs:
-fail_throttle_thermal_policy:
fail_custom_fan_curve:
fail_platform_profile_setup:
if (asus->platform_profile_support)
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index 309236cecd5a..68a49788a396 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -49,6 +49,7 @@ config DELL_LAPTOP
default m
depends on DMI
depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_BATTERY
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on RFKILL || RFKILL = n
depends on DELL_WMI || DELL_WMI = n
diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
index 6552dfe491c6..a3cd0505f282 100644
--- a/drivers/platform/x86/dell/dell-laptop.c
+++ b/drivers/platform/x86/dell/dell-laptop.c
@@ -22,11 +22,13 @@
#include <linux/io.h>
#include <linux/rfkill.h>
#include <linux/power_supply.h>
+#include <linux/sysfs.h>
#include <linux/acpi.h>
#include <linux/mm.h>
#include <linux/i8042.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <acpi/battery.h>
#include <acpi/video.h>
#include "dell-rbtn.h"
#include "dell-smbios.h"
@@ -99,6 +101,20 @@ static bool force_rfkill;
static bool micmute_led_registered;
static bool mute_led_registered;
+struct battery_mode_info {
+ int token;
+ const char *label;
+};
+
+static const struct battery_mode_info battery_modes[] = {
+ { BAT_PRI_AC_MODE_TOKEN, "Trickle" },
+ { BAT_EXPRESS_MODE_TOKEN, "Fast" },
+ { BAT_STANDARD_MODE_TOKEN, "Standard" },
+ { BAT_ADAPTIVE_MODE_TOKEN, "Adaptive" },
+ { BAT_CUSTOM_MODE_TOKEN, "Custom" },
+};
+static u32 battery_supported_modes;
+
module_param(force_rfkill, bool, 0444);
MODULE_PARM_DESC(force_rfkill, "enable rfkill on non whitelisted models");
@@ -353,6 +369,32 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
{ }
};
+/* -1 is a sentinel value, telling us to use token->value */
+#define USE_TVAL ((u32) -1)
+static int dell_send_request_for_tokenid(struct calling_interface_buffer *buffer,
+ u16 class, u16 select, u16 tokenid,
+ u32 val)
+{
+ struct calling_interface_token *token;
+
+ token = dell_smbios_find_token(tokenid);
+ if (!token)
+ return -ENODEV;
+
+ if (val == USE_TVAL)
+ val = token->value;
+
+ dell_fill_request(buffer, token->location, val, 0, 0);
+ return dell_send_request(buffer, class, select);
+}
+
+static inline int dell_set_std_token_value(struct calling_interface_buffer *buffer,
+ u16 tokenid, u32 value)
+{
+ return dell_send_request_for_tokenid(buffer, CLASS_TOKEN_WRITE,
+ SELECT_TOKEN_STD, tokenid, value);
+}
+
/*
* Derived from information in smbios-wireless-ctl:
*
@@ -895,43 +937,24 @@ static void dell_cleanup_rfkill(void)
static int dell_send_intensity(struct backlight_device *bd)
{
struct calling_interface_buffer buffer;
- struct calling_interface_token *token;
- int ret;
-
- token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
- if (!token)
- return -ENODEV;
-
- dell_fill_request(&buffer,
- token->location, bd->props.brightness, 0, 0);
- if (power_supply_is_system_supplied() > 0)
- ret = dell_send_request(&buffer,
- CLASS_TOKEN_WRITE, SELECT_TOKEN_AC);
- else
- ret = dell_send_request(&buffer,
- CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT);
+ u16 select;
- return ret;
+ select = power_supply_is_system_supplied() > 0 ?
+ SELECT_TOKEN_AC : SELECT_TOKEN_BAT;
+ return dell_send_request_for_tokenid(&buffer, CLASS_TOKEN_WRITE,
+ select, BRIGHTNESS_TOKEN, bd->props.brightness);
}
static int dell_get_intensity(struct backlight_device *bd)
{
struct calling_interface_buffer buffer;
- struct calling_interface_token *token;
int ret;
+ u16 select;
- token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
- if (!token)
- return -ENODEV;
-
- dell_fill_request(&buffer, token->location, 0, 0, 0);
- if (power_supply_is_system_supplied() > 0)
- ret = dell_send_request(&buffer,
- CLASS_TOKEN_READ, SELECT_TOKEN_AC);
- else
- ret = dell_send_request(&buffer,
- CLASS_TOKEN_READ, SELECT_TOKEN_BAT);
-
+ select = power_supply_is_system_supplied() > 0 ?
+ SELECT_TOKEN_AC : SELECT_TOKEN_BAT;
+ ret = dell_send_request_for_tokenid(&buffer, CLASS_TOKEN_READ,
+ select, BRIGHTNESS_TOKEN, 0);
if (ret == 0)
ret = buffer.output[1];
@@ -1355,20 +1378,11 @@ static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
static int kbd_set_token_bit(u8 bit)
{
struct calling_interface_buffer buffer;
- struct calling_interface_token *token;
- int ret;
if (bit >= ARRAY_SIZE(kbd_tokens))
return -EINVAL;
- token = dell_smbios_find_token(kbd_tokens[bit]);
- if (!token)
- return -EINVAL;
-
- dell_fill_request(&buffer, token->location, token->value, 0, 0);
- ret = dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
-
- return ret;
+ return dell_set_std_token_value(&buffer, kbd_tokens[bit], USE_TVAL);
}
static int kbd_get_token_bit(u8 bit)
@@ -1387,11 +1401,10 @@ static int kbd_get_token_bit(u8 bit)
dell_fill_request(&buffer, token->location, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_TOKEN_READ, SELECT_TOKEN_STD);
- val = buffer.output[1];
-
if (ret)
return ret;
+ val = buffer.output[1];
return (val == token->value);
}
@@ -1497,7 +1510,7 @@ static inline int kbd_init_info(void)
}
-static inline void kbd_init_tokens(void)
+static inline void __init kbd_init_tokens(void)
{
int i;
@@ -1506,7 +1519,7 @@ static inline void kbd_init_tokens(void)
kbd_token_bits |= BIT(i);
}
-static void kbd_init(void)
+static void __init kbd_init(void)
{
int ret;
@@ -2131,21 +2144,11 @@ static int micmute_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct calling_interface_buffer buffer;
- struct calling_interface_token *token;
- int state = brightness != LED_OFF;
+ u32 tokenid;
- if (state == 0)
- token = dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE);
- else
- token = dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE);
-
- if (!token)
- return -ENODEV;
-
- dell_fill_request(&buffer, token->location, token->value, 0, 0);
- dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
-
- return 0;
+ tokenid = brightness == LED_OFF ?
+ GLOBAL_MIC_MUTE_DISABLE : GLOBAL_MIC_MUTE_ENABLE;
+ return dell_set_std_token_value(&buffer, tokenid, USE_TVAL);
}
static struct led_classdev micmute_led_cdev = {
@@ -2159,33 +2162,288 @@ static int mute_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct calling_interface_buffer buffer;
+ u32 tokenid;
+
+ tokenid = brightness == LED_OFF ?
+ GLOBAL_MUTE_DISABLE : GLOBAL_MUTE_ENABLE;
+ return dell_set_std_token_value(&buffer, tokenid, USE_TVAL);
+}
+
+static struct led_classdev mute_led_cdev = {
+ .name = "platform::mute",
+ .max_brightness = 1,
+ .brightness_set_blocking = mute_led_set,
+ .default_trigger = "audio-mute",
+};
+
+static int dell_battery_set_mode(const u16 tokenid)
+{
+ struct calling_interface_buffer buffer;
+
+ return dell_set_std_token_value(&buffer, tokenid, USE_TVAL);
+}
+
+static int dell_battery_read(const u16 tokenid)
+{
+ struct calling_interface_buffer buffer;
+ int err;
+
+ err = dell_send_request_for_tokenid(&buffer, CLASS_TOKEN_READ,
+ SELECT_TOKEN_STD, tokenid, 0);
+ if (err)
+ return err;
+
+ if (buffer.output[1] > INT_MAX)
+ return -EIO;
+
+ return buffer.output[1];
+}
+
+static bool dell_battery_mode_is_active(const u16 tokenid)
+{
struct calling_interface_token *token;
- int state = brightness != LED_OFF;
+ int ret;
- if (state == 0)
- token = dell_smbios_find_token(GLOBAL_MUTE_DISABLE);
- else
- token = dell_smbios_find_token(GLOBAL_MUTE_ENABLE);
+ ret = dell_battery_read(tokenid);
+ if (ret < 0)
+ return false;
- if (!token)
+ token = dell_smbios_find_token(tokenid);
+ /* token's already verified by dell_battery_read() */
+
+ return token->value == (u16) ret;
+}
+
+/*
+ * The rules: the minimum start charging value is 50%. The maximum
+ * start charging value is 95%. The minimum end charging value is
+ * 55%. The maximum end charging value is 100%. And finally, there
+ * has to be at least a 5% difference between start & end values.
+ */
+#define CHARGE_START_MIN 50
+#define CHARGE_START_MAX 95
+#define CHARGE_END_MIN 55
+#define CHARGE_END_MAX 100
+#define CHARGE_MIN_DIFF 5
+
+static int dell_battery_set_custom_charge_start(int start)
+{
+ struct calling_interface_buffer buffer;
+ int end;
+
+ start = clamp(start, CHARGE_START_MIN, CHARGE_START_MAX);
+ end = dell_battery_read(BAT_CUSTOM_CHARGE_END);
+ if (end < 0)
+ return end;
+ if ((end - start) < CHARGE_MIN_DIFF)
+ start = end - CHARGE_MIN_DIFF;
+
+ return dell_set_std_token_value(&buffer, BAT_CUSTOM_CHARGE_START,
+ start);
+}
+
+static int dell_battery_set_custom_charge_end(int end)
+{
+ struct calling_interface_buffer buffer;
+ int start;
+
+ end = clamp(end, CHARGE_END_MIN, CHARGE_END_MAX);
+ start = dell_battery_read(BAT_CUSTOM_CHARGE_START);
+ if (start < 0)
+ return start;
+ if ((end - start) < CHARGE_MIN_DIFF)
+ end = start + CHARGE_MIN_DIFF;
+
+ return dell_set_std_token_value(&buffer, BAT_CUSTOM_CHARGE_END, end);
+}
+
+static ssize_t charge_types_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t count = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(battery_modes); i++) {
+ bool active;
+
+ if (!(battery_supported_modes & BIT(i)))
+ continue;
+
+ active = dell_battery_mode_is_active(battery_modes[i].token);
+ count += sysfs_emit_at(buf, count, active ? "[%s] " : "%s ",
+ battery_modes[i].label);
+ }
+
+ /* convert the last space to a newline */
+ if (count > 0)
+ count--;
+ count += sysfs_emit_at(buf, count, "\n");
+
+ return count;
+}
+
+static ssize_t charge_types_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ bool matched = false;
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(battery_modes); i++) {
+ if (!(battery_supported_modes & BIT(i)))
+ continue;
+
+ if (sysfs_streq(battery_modes[i].label, buf)) {
+ matched = true;
+ break;
+ }
+ }
+ if (!matched)
+ return -EINVAL;
+
+ err = dell_battery_set_mode(battery_modes[i].token);
+ if (err)
+ return err;
+
+ return size;
+}
+
+static ssize_t charge_control_start_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int start;
+
+ start = dell_battery_read(BAT_CUSTOM_CHARGE_START);
+ if (start < 0)
+ return start;
+
+ if (start > CHARGE_START_MAX)
+ return -EIO;
+
+ return sysfs_emit(buf, "%d\n", start);
+}
+
+static ssize_t charge_control_start_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret, start;
+
+ ret = kstrtoint(buf, 10, &start);
+ if (ret)
+ return ret;
+ if (start < 0 || start > 100)
+ return -EINVAL;
+
+ ret = dell_battery_set_custom_charge_start(start);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t charge_control_end_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int end;
+
+ end = dell_battery_read(BAT_CUSTOM_CHARGE_END);
+ if (end < 0)
+ return end;
+
+ if (end > CHARGE_END_MAX)
+ return -EIO;
+
+ return sysfs_emit(buf, "%d\n", end);
+}
+
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret, end;
+
+ ret = kstrtouint(buf, 10, &end);
+ if (ret)
+ return ret;
+ if (end < 0 || end > 100)
+ return -EINVAL;
+
+ ret = dell_battery_set_custom_charge_end(end);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(charge_control_start_threshold);
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+static DEVICE_ATTR_RW(charge_types);
+
+static struct attribute *dell_battery_attrs[] = {
+ &dev_attr_charge_control_start_threshold.attr,
+ &dev_attr_charge_control_end_threshold.attr,
+ &dev_attr_charge_types.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(dell_battery);
+
+static int dell_battery_add(struct power_supply *battery,
+ struct acpi_battery_hook *hook)
+{
+ /* this currently only supports the primary battery */
+ if (strcmp(battery->desc->name, "BAT0") != 0)
return -ENODEV;
- dell_fill_request(&buffer, token->location, token->value, 0, 0);
- dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
+ return device_add_groups(&battery->dev, dell_battery_groups);
+}
+static int dell_battery_remove(struct power_supply *battery,
+ struct acpi_battery_hook *hook)
+{
+ device_remove_groups(&battery->dev, dell_battery_groups);
return 0;
}
-static struct led_classdev mute_led_cdev = {
- .name = "platform::mute",
- .max_brightness = 1,
- .brightness_set_blocking = mute_led_set,
- .default_trigger = "audio-mute",
+static struct acpi_battery_hook dell_battery_hook = {
+ .add_battery = dell_battery_add,
+ .remove_battery = dell_battery_remove,
+ .name = "Dell Primary Battery Extension",
};
+static u32 __init battery_get_supported_modes(void)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(battery_modes); i++) {
+ if (dell_smbios_find_token(battery_modes[i].token))
+ modes |= BIT(i);
+ }
+
+ return modes;
+}
+
+static void __init dell_battery_init(struct device *dev)
+{
+ battery_supported_modes = battery_get_supported_modes();
+
+ if (battery_supported_modes != 0)
+ battery_hook_register(&dell_battery_hook);
+}
+
+static void dell_battery_exit(void)
+{
+ if (battery_supported_modes != 0)
+ battery_hook_unregister(&dell_battery_hook);
+}
+
static int __init dell_init(void)
{
- struct calling_interface_token *token;
+ struct calling_interface_buffer buffer;
int max_intensity = 0;
int ret;
@@ -2219,6 +2477,7 @@ static int __init dell_init(void)
touchpad_led_init(&platform_device->dev);
kbd_led_init(&platform_device->dev);
+ dell_battery_init(&platform_device->dev);
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
@@ -2246,16 +2505,10 @@ static int __init dell_init(void)
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
return 0;
- token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
- if (token) {
- struct calling_interface_buffer buffer;
-
- dell_fill_request(&buffer, token->location, 0, 0, 0);
- ret = dell_send_request(&buffer,
- CLASS_TOKEN_READ, SELECT_TOKEN_AC);
- if (ret == 0)
- max_intensity = buffer.output[3];
- }
+ ret = dell_send_request_for_tokenid(&buffer, CLASS_TOKEN_READ,
+ SELECT_TOKEN_AC, BRIGHTNESS_TOKEN, 0);
+ if (ret == 0)
+ max_intensity = buffer.output[3];
if (max_intensity) {
struct backlight_properties props;
@@ -2293,6 +2546,7 @@ fail_backlight:
if (mute_led_registered)
led_classdev_unregister(&mute_led_cdev);
fail_led:
+ dell_battery_exit();
dell_cleanup_rfkill();
fail_rfkill:
platform_device_del(platform_device);
@@ -2311,6 +2565,7 @@ static void __exit dell_exit(void)
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
kbd_led_exit();
+ dell_battery_exit();
backlight_device_unregister(dell_backlight_device);
if (micmute_led_registered)
led_classdev_unregister(&micmute_led_cdev);
diff --git a/drivers/platform/x86/dell/dell-smbios.h b/drivers/platform/x86/dell/dell-smbios.h
index ea0cc38642a2..77baa15eb523 100644
--- a/drivers/platform/x86/dell/dell-smbios.h
+++ b/drivers/platform/x86/dell/dell-smbios.h
@@ -33,6 +33,13 @@
#define KBD_LED_AUTO_50_TOKEN 0x02EB
#define KBD_LED_AUTO_75_TOKEN 0x02EC
#define KBD_LED_AUTO_100_TOKEN 0x02F6
+#define BAT_PRI_AC_MODE_TOKEN 0x0341
+#define BAT_ADAPTIVE_MODE_TOKEN 0x0342
+#define BAT_CUSTOM_MODE_TOKEN 0x0343
+#define BAT_STANDARD_MODE_TOKEN 0x0346
+#define BAT_EXPRESS_MODE_TOKEN 0x0347
+#define BAT_CUSTOM_CHARGE_START 0x0349
+#define BAT_CUSTOM_CHARGE_END 0x034A
#define GLOBAL_MIC_MUTE_ENABLE 0x0364
#define GLOBAL_MIC_MUTE_DISABLE 0x0365
#define GLOBAL_MUTE_ENABLE 0x058C
diff --git a/drivers/platform/x86/dell/dell-wmi-aio.c b/drivers/platform/x86/dell/dell-wmi-aio.c
index c7b7f1e403fb..54096495719b 100644
--- a/drivers/platform/x86/dell/dell-wmi-aio.c
+++ b/drivers/platform/x86/dell/dell-wmi-aio.c
@@ -70,20 +70,10 @@ static bool dell_wmi_aio_event_check(u8 *buffer, int length)
return false;
}
-static void dell_wmi_aio_notify(u32 value, void *context)
+static void dell_wmi_aio_notify(union acpi_object *obj, void *context)
{
- struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
struct dell_wmi_event *event;
- acpi_status status;
- status = wmi_get_event_data(value, &response);
- if (status != AE_OK) {
- pr_info("bad event status 0x%x\n", status);
- return;
- }
-
- obj = (union acpi_object *)response.pointer;
if (obj) {
unsigned int scancode = 0;
@@ -114,7 +104,6 @@ static void dell_wmi_aio_notify(u32 value, void *context)
break;
}
}
- kfree(obj);
}
static int __init dell_wmi_aio_input_setup(void)
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 447364bed249..03319a80e114 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/backlight.h>
-#include <linux/fb.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/slab.h>
@@ -1137,7 +1136,7 @@ static int eeepc_backlight_init(struct eeepc_laptop *eeepc)
}
eeepc->backlight_device = bd;
bd->props.brightness = read_brightness(bd);
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
return 0;
}
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 32d9f0ba6be3..37edb9ae67b8 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -13,13 +13,13 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/backlight.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/dmi.h>
-#include <linux/fb.h>
#include <linux/acpi.h>
#include "asus-wmi.h"
@@ -192,7 +192,7 @@ static void eeepc_wmi_quirks(struct asus_wmi_driver *driver)
driver->quirks = quirks;
driver->quirks->wapf = -1;
- driver->panel_power = FB_BLANK_UNBLANK;
+ driver->panel_power = BACKLIGHT_POWER_ON;
}
static struct asus_wmi_driver asus_wmi_driver = {
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 968fc91bd5e4..ae992ac1ab4a 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -43,7 +43,6 @@
#include <linux/bitops.h>
#include <linux/dmi.h>
#include <linux/backlight.h>
-#include <linux/fb.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/kfifo.h>
@@ -356,7 +355,7 @@ static int bl_get_brightness(struct backlight_device *b)
{
struct acpi_device *device = bl_get_data(b);
- return b->props.power == FB_BLANK_POWERDOWN ? 0 : get_lcd_level(device);
+ return b->props.power == BACKLIGHT_POWER_OFF ? 0 : get_lcd_level(device);
}
static int bl_update_status(struct backlight_device *b)
@@ -364,7 +363,7 @@ static int bl_update_status(struct backlight_device *b)
struct acpi_device *device = bl_get_data(b);
if (fext) {
- if (b->props.power == FB_BLANK_POWERDOWN)
+ if (b->props.power == BACKLIGHT_POWER_OFF)
call_fext_func(fext, FUNC_BACKLIGHT, 0x1,
BACKLIGHT_PARAM_POWER, BACKLIGHT_OFF);
else
@@ -933,9 +932,9 @@ static int acpi_fujitsu_laptop_add(struct acpi_device *device)
acpi_video_get_backlight_type() == acpi_backlight_vendor) {
if (call_fext_func(fext, FUNC_BACKLIGHT, 0x2,
BACKLIGHT_PARAM_POWER, 0x0) == BACKLIGHT_OFF)
- fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN;
+ fujitsu_bl->bl_device->props.power = BACKLIGHT_POWER_OFF;
else
- fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK;
+ fujitsu_bl->bl_device->props.power = BACKLIGHT_POWER_ON;
}
ret = acpi_fujitsu_laptop_input_setup(device);
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 876e0a97cee1..8c05e0dd2a21 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -834,28 +834,16 @@ static struct attribute *hp_wmi_attrs[] = {
};
ATTRIBUTE_GROUPS(hp_wmi);
-static void hp_wmi_notify(u32 value, void *context)
+static void hp_wmi_notify(union acpi_object *obj, void *context)
{
- struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
u32 event_id, event_data;
- union acpi_object *obj;
- acpi_status status;
u32 *location;
int key_code;
- status = wmi_get_event_data(value, &response);
- if (status != AE_OK) {
- pr_info("bad event status 0x%x\n", status);
- return;
- }
-
- obj = (union acpi_object *)response.pointer;
-
if (!obj)
return;
if (obj->type != ACPI_TYPE_BUFFER) {
pr_info("Unknown response received %d\n", obj->type);
- kfree(obj);
return;
}
@@ -872,10 +860,8 @@ static void hp_wmi_notify(u32 value, void *context)
event_data = *(location + 2);
} else {
pr_info("Unknown buffer length %d\n", obj->buffer.length);
- kfree(obj);
return;
}
- kfree(obj);
switch (event_id) {
case HPWMI_DOCK_EVENT:
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
index 09d476dd832e..d81fd5df4a00 100644
--- a/drivers/platform/x86/huawei-wmi.c
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -734,26 +734,14 @@ static void huawei_wmi_process_key(struct input_dev *idev, int code)
sparse_keymap_report_entry(idev, key, 1, true);
}
-static void huawei_wmi_input_notify(u32 value, void *context)
+static void huawei_wmi_input_notify(union acpi_object *obj, void *context)
{
struct input_dev *idev = (struct input_dev *)context;
- struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- acpi_status status;
- status = wmi_get_event_data(value, &response);
- if (ACPI_FAILURE(status)) {
- dev_err(&idev->dev, "Unable to get event data\n");
- return;
- }
-
- obj = (union acpi_object *)response.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
huawei_wmi_process_key(idev, obj->integer.value);
else
dev_err(&idev->dev, "Bad response type\n");
-
- kfree(response.pointer);
}
static int huawei_wmi_input_setup(struct device *dev, const char *guid)
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 98ec30fce9fd..c64dfc56651d 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -17,11 +17,11 @@
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dmi.h>
-#include <linux/fb.h>
#include <linux/i8042.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/module.h>
@@ -87,6 +87,34 @@ enum {
SALS_FNLOCK_OFF = 0xf,
};
+enum {
+ VPCCMD_R_VPC1 = 0x10,
+ VPCCMD_R_BL_MAX,
+ VPCCMD_R_BL,
+ VPCCMD_W_BL,
+ VPCCMD_R_WIFI,
+ VPCCMD_W_WIFI,
+ VPCCMD_R_BT,
+ VPCCMD_W_BT,
+ VPCCMD_R_BL_POWER,
+ VPCCMD_R_NOVO,
+ VPCCMD_R_VPC2,
+ VPCCMD_R_TOUCHPAD,
+ VPCCMD_W_TOUCHPAD,
+ VPCCMD_R_CAMERA,
+ VPCCMD_W_CAMERA,
+ VPCCMD_R_3G,
+ VPCCMD_W_3G,
+ VPCCMD_R_ODD, /* 0x21 */
+ VPCCMD_W_FAN,
+ VPCCMD_R_RF,
+ VPCCMD_W_RF,
+ VPCCMD_W_YMC = 0x2A,
+ VPCCMD_R_FAN = 0x2B,
+ VPCCMD_R_SPECIAL_BUTTONS = 0x31,
+ VPCCMD_W_BL_POWER = 0x33,
+};
+
/*
* These correspond to the number of supported states - 1
* Future keyboard types may need a new system, if there's a collision
@@ -237,6 +265,7 @@ static void ideapad_shared_exit(struct ideapad_private *priv)
/*
* ACPI Helpers
*/
+#define IDEAPAD_EC_TIMEOUT 200 /* in ms */
static int eval_int(acpi_handle handle, const char *name, unsigned long *res)
{
@@ -252,6 +281,29 @@ static int eval_int(acpi_handle handle, const char *name, unsigned long *res)
return 0;
}
+static int eval_int_with_arg(acpi_handle handle, const char *name, unsigned long arg,
+ unsigned long *res)
+{
+ struct acpi_object_list params;
+ unsigned long long result;
+ union acpi_object in_obj;
+ acpi_status status;
+
+ params.count = 1;
+ params.pointer = &in_obj;
+ in_obj.type = ACPI_TYPE_INTEGER;
+ in_obj.integer.value = arg;
+
+ status = acpi_evaluate_integer(handle, (char *)name, &params, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ if (res)
+ *res = result;
+
+ return 0;
+}
+
static int exec_simple_method(acpi_handle handle, const char *name, unsigned long arg)
{
acpi_status status = acpi_execute_simple_method(handle, (char *)name, arg);
@@ -294,6 +346,89 @@ static int eval_dytc(acpi_handle handle, unsigned long cmd, unsigned long *res)
return eval_int_with_arg(handle, "DYTC", cmd, res);
}
+static int eval_vpcr(acpi_handle handle, unsigned long cmd, unsigned long *res)
+{
+ return eval_int_with_arg(handle, "VPCR", cmd, res);
+}
+
+static int eval_vpcw(acpi_handle handle, unsigned long cmd, unsigned long data)
+{
+ struct acpi_object_list params;
+ union acpi_object in_obj[2];
+ acpi_status status;
+
+ params.count = 2;
+ params.pointer = in_obj;
+ in_obj[0].type = ACPI_TYPE_INTEGER;
+ in_obj[0].integer.value = cmd;
+ in_obj[1].type = ACPI_TYPE_INTEGER;
+ in_obj[1].integer.value = data;
+
+ status = acpi_evaluate_object(handle, "VPCW", &params, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return 0;
+}
+
+static int read_ec_data(acpi_handle handle, unsigned long cmd, unsigned long *data)
+{
+ unsigned long end_jiffies, val;
+ int err;
+
+ err = eval_vpcw(handle, 1, cmd);
+ if (err)
+ return err;
+
+ end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
+
+ while (time_before(jiffies, end_jiffies)) {
+ schedule();
+
+ err = eval_vpcr(handle, 1, &val);
+ if (err)
+ return err;
+
+ if (val == 0)
+ return eval_vpcr(handle, 0, data);
+ }
+
+ acpi_handle_err(handle, "timeout in %s\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int write_ec_cmd(acpi_handle handle, unsigned long cmd, unsigned long data)
+{
+ unsigned long end_jiffies, val;
+ int err;
+
+ err = eval_vpcw(handle, 0, data);
+ if (err)
+ return err;
+
+ err = eval_vpcw(handle, 1, cmd);
+ if (err)
+ return err;
+
+ end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
+
+ while (time_before(jiffies, end_jiffies)) {
+ schedule();
+
+ err = eval_vpcr(handle, 1, &val);
+ if (err)
+ return err;
+
+ if (val == 0)
+ return 0;
+ }
+
+ acpi_handle_err(handle, "timeout in %s\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
/*
* debugfs
*/
@@ -419,13 +554,14 @@ static ssize_t camera_power_show(struct device *dev,
char *buf)
{
struct ideapad_private *priv = dev_get_drvdata(dev);
- unsigned long result;
+ unsigned long result = 0;
int err;
- scoped_guard(mutex, &priv->vpc_mutex)
+ scoped_guard(mutex, &priv->vpc_mutex) {
err = read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result);
- if (err)
- return err;
+ if (err)
+ return err;
+ }
return sysfs_emit(buf, "%d\n", !!result);
}
@@ -442,10 +578,11 @@ static ssize_t camera_power_store(struct device *dev,
if (err)
return err;
- scoped_guard(mutex, &priv->vpc_mutex)
+ scoped_guard(mutex, &priv->vpc_mutex) {
err = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state);
- if (err)
- return err;
+ if (err)
+ return err;
+ }
return count;
}
@@ -493,13 +630,14 @@ static ssize_t fan_mode_show(struct device *dev,
char *buf)
{
struct ideapad_private *priv = dev_get_drvdata(dev);
- unsigned long result;
+ unsigned long result = 0;
int err;
- scoped_guard(mutex, &priv->vpc_mutex)
+ scoped_guard(mutex, &priv->vpc_mutex) {
err = read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result);
- if (err)
- return err;
+ if (err)
+ return err;
+ }
return sysfs_emit(buf, "%lu\n", result);
}
@@ -519,10 +657,11 @@ static ssize_t fan_mode_store(struct device *dev,
if (state > 4 || state == 3)
return -EINVAL;
- scoped_guard(mutex, &priv->vpc_mutex)
+ scoped_guard(mutex, &priv->vpc_mutex) {
err = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state);
- if (err)
- return err;
+ if (err)
+ return err;
+ }
return count;
}
@@ -602,13 +741,14 @@ static ssize_t touchpad_show(struct device *dev,
char *buf)
{
struct ideapad_private *priv = dev_get_drvdata(dev);
- unsigned long result;
+ unsigned long result = 0;
int err;
- scoped_guard(mutex, &priv->vpc_mutex)
+ scoped_guard(mutex, &priv->vpc_mutex) {
err = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &result);
- if (err)
- return err;
+ if (err)
+ return err;
+ }
priv->r_touchpad_val = result;
@@ -627,10 +767,11 @@ static ssize_t touchpad_store(struct device *dev,
if (err)
return err;
- scoped_guard(mutex, &priv->vpc_mutex)
+ scoped_guard(mutex, &priv->vpc_mutex) {
err = write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, state);
- if (err)
- return err;
+ if (err)
+ return err;
+ }
priv->r_touchpad_val = state;
@@ -1282,7 +1423,7 @@ static int ideapad_backlight_update_status(struct backlight_device *blightdev)
return err;
err = write_ec_cmd(priv->adev->handle, VPCCMD_W_BL_POWER,
- blightdev->props.power != FB_BLANK_POWERDOWN);
+ blightdev->props.power != BACKLIGHT_POWER_OFF);
if (err)
return err;
@@ -1332,7 +1473,7 @@ static int ideapad_backlight_init(struct ideapad_private *priv)
priv->blightdev = blightdev;
blightdev->props.brightness = now;
- blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+ blightdev->props.power = power ? BACKLIGHT_POWER_ON : BACKLIGHT_POWER_OFF;
backlight_update_status(blightdev);
@@ -1358,7 +1499,7 @@ static void ideapad_backlight_notify_power(struct ideapad_private *priv)
if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
return;
- blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+ blightdev->props.power = power ? BACKLIGHT_POWER_ON : BACKLIGHT_POWER_OFF;
}
static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
diff --git a/drivers/platform/x86/ideapad-laptop.h b/drivers/platform/x86/ideapad-laptop.h
index 948cc61800a9..1e52f2aa0aac 100644
--- a/drivers/platform/x86/ideapad-laptop.h
+++ b/drivers/platform/x86/ideapad-laptop.h
@@ -9,9 +9,6 @@
#ifndef _IDEAPAD_LAPTOP_H_
#define _IDEAPAD_LAPTOP_H_
-#include <linux/acpi.h>
-#include <linux/jiffies.h>
-#include <linux/errno.h>
#include <linux/notifier.h>
enum ideapad_laptop_notifier_actions {
@@ -22,140 +19,4 @@ int ideapad_laptop_register_notifier(struct notifier_block *nb);
int ideapad_laptop_unregister_notifier(struct notifier_block *nb);
void ideapad_laptop_call_notifier(unsigned long action, void *data);
-enum {
- VPCCMD_R_VPC1 = 0x10,
- VPCCMD_R_BL_MAX,
- VPCCMD_R_BL,
- VPCCMD_W_BL,
- VPCCMD_R_WIFI,
- VPCCMD_W_WIFI,
- VPCCMD_R_BT,
- VPCCMD_W_BT,
- VPCCMD_R_BL_POWER,
- VPCCMD_R_NOVO,
- VPCCMD_R_VPC2,
- VPCCMD_R_TOUCHPAD,
- VPCCMD_W_TOUCHPAD,
- VPCCMD_R_CAMERA,
- VPCCMD_W_CAMERA,
- VPCCMD_R_3G,
- VPCCMD_W_3G,
- VPCCMD_R_ODD, /* 0x21 */
- VPCCMD_W_FAN,
- VPCCMD_R_RF,
- VPCCMD_W_RF,
- VPCCMD_W_YMC = 0x2A,
- VPCCMD_R_FAN = 0x2B,
- VPCCMD_R_SPECIAL_BUTTONS = 0x31,
- VPCCMD_W_BL_POWER = 0x33,
-};
-
-static inline int eval_int_with_arg(acpi_handle handle, const char *name, unsigned long arg, unsigned long *res)
-{
- struct acpi_object_list params;
- unsigned long long result;
- union acpi_object in_obj;
- acpi_status status;
-
- params.count = 1;
- params.pointer = &in_obj;
- in_obj.type = ACPI_TYPE_INTEGER;
- in_obj.integer.value = arg;
-
- status = acpi_evaluate_integer(handle, (char *)name, &params, &result);
- if (ACPI_FAILURE(status))
- return -EIO;
-
- if (res)
- *res = result;
-
- return 0;
-}
-
-static inline int eval_vpcr(acpi_handle handle, unsigned long cmd, unsigned long *res)
-{
- return eval_int_with_arg(handle, "VPCR", cmd, res);
-}
-
-static inline int eval_vpcw(acpi_handle handle, unsigned long cmd, unsigned long data)
-{
- struct acpi_object_list params;
- union acpi_object in_obj[2];
- acpi_status status;
-
- params.count = 2;
- params.pointer = in_obj;
- in_obj[0].type = ACPI_TYPE_INTEGER;
- in_obj[0].integer.value = cmd;
- in_obj[1].type = ACPI_TYPE_INTEGER;
- in_obj[1].integer.value = data;
-
- status = acpi_evaluate_object(handle, "VPCW", &params, NULL);
- if (ACPI_FAILURE(status))
- return -EIO;
-
- return 0;
-}
-
-#define IDEAPAD_EC_TIMEOUT 200 /* in ms */
-
-static inline int read_ec_data(acpi_handle handle, unsigned long cmd, unsigned long *data)
-{
- unsigned long end_jiffies, val;
- int err;
-
- err = eval_vpcw(handle, 1, cmd);
- if (err)
- return err;
-
- end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
-
- while (time_before(jiffies, end_jiffies)) {
- schedule();
-
- err = eval_vpcr(handle, 1, &val);
- if (err)
- return err;
-
- if (val == 0)
- return eval_vpcr(handle, 0, data);
- }
-
- acpi_handle_err(handle, "timeout in %s\n", __func__);
-
- return -ETIMEDOUT;
-}
-
-static inline int write_ec_cmd(acpi_handle handle, unsigned long cmd, unsigned long data)
-{
- unsigned long end_jiffies, val;
- int err;
-
- err = eval_vpcw(handle, 0, data);
- if (err)
- return err;
-
- err = eval_vpcw(handle, 1, cmd);
- if (err)
- return err;
-
- end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
-
- while (time_before(jiffies, end_jiffies)) {
- schedule();
-
- err = eval_vpcr(handle, 1, &val);
- if (err)
- return err;
-
- if (val == 0)
- return 0;
- }
-
- acpi_handle_err(handle, "timeout in %s\n", __func__);
-
- return -ETIMEDOUT;
-}
-
-#undef IDEAPAD_EC_TIMEOUT
#endif /* !_IDEAPAD_LAPTOP_H_ */
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 10cd65497cc1..445e7a59beb4 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include "../dual_accel_detect.h"
@@ -331,10 +332,8 @@ static int intel_hid_set_enable(struct device *device, bool enable)
acpi_handle handle = ACPI_HANDLE(device);
/* Enable|disable features - power button is always enabled */
- if (!intel_hid_execute_method(handle, INTEL_HID_DSM_HDSM_FN,
- enable)) {
- dev_warn(device, "failed to %sable hotkeys\n",
- enable ? "en" : "dis");
+ if (!intel_hid_execute_method(handle, INTEL_HID_DSM_HDSM_FN, enable)) {
+ dev_warn(device, "failed to %s hotkeys\n", str_enable_disable(enable));
return -EIO;
}
diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c
index 33412a584836..bc252b883210 100644
--- a/drivers/platform/x86/intel/ifs/core.c
+++ b/drivers/platform/x86/intel/ifs/core.c
@@ -32,6 +32,7 @@ bool *ifs_pkg_auth;
static const struct ifs_test_caps scan_test = {
.integrity_cap_bit = MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT,
.test_num = IFS_TYPE_SAF,
+ .image_suffix = "scan",
};
static const struct ifs_test_caps array_test = {
@@ -39,9 +40,32 @@ static const struct ifs_test_caps array_test = {
.test_num = IFS_TYPE_ARRAY_BIST,
};
+static const struct ifs_test_msrs scan_msrs = {
+ .copy_hashes = MSR_COPY_SCAN_HASHES,
+ .copy_hashes_status = MSR_SCAN_HASHES_STATUS,
+ .copy_chunks = MSR_AUTHENTICATE_AND_COPY_CHUNK,
+ .copy_chunks_status = MSR_CHUNKS_AUTHENTICATION_STATUS,
+ .test_ctrl = MSR_SAF_CTRL,
+};
+
+static const struct ifs_test_msrs sbaf_msrs = {
+ .copy_hashes = MSR_COPY_SBAF_HASHES,
+ .copy_hashes_status = MSR_SBAF_HASHES_STATUS,
+ .copy_chunks = MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK,
+ .copy_chunks_status = MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS,
+ .test_ctrl = MSR_SBAF_CTRL,
+};
+
+static const struct ifs_test_caps sbaf_test = {
+ .integrity_cap_bit = MSR_INTEGRITY_CAPS_SBAF_BIT,
+ .test_num = IFS_TYPE_SBAF,
+ .image_suffix = "sbft",
+};
+
static struct ifs_device ifs_devices[] = {
[IFS_TYPE_SAF] = {
.test_caps = &scan_test,
+ .test_msrs = &scan_msrs,
.misc = {
.name = "intel_ifs_0",
.minor = MISC_DYNAMIC_MINOR,
@@ -56,6 +80,15 @@ static struct ifs_device ifs_devices[] = {
.groups = plat_ifs_array_groups,
},
},
+ [IFS_TYPE_SBAF] = {
+ .test_caps = &sbaf_test,
+ .test_msrs = &sbaf_msrs,
+ .misc = {
+ .name = "intel_ifs_2",
+ .minor = MISC_DYNAMIC_MINOR,
+ .groups = plat_ifs_groups,
+ },
+ },
};
#define IFS_NUMTESTS ARRAY_SIZE(ifs_devices)
diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h
index 56b9f3e3cf76..5c3c0dfa1bf8 100644
--- a/drivers/platform/x86/intel/ifs/ifs.h
+++ b/drivers/platform/x86/intel/ifs/ifs.h
@@ -126,11 +126,40 @@
* The driver does not make use of this, it only tests one core at a time.
*
* .. [#f1] https://github.com/intel/TBD
+ *
+ *
+ * Structural Based Functional Test at Field (SBAF):
+ * -------------------------------------------------
+ *
+ * SBAF is a new type of testing that provides comprehensive core test
+ * coverage complementing Scan at Field (SAF) testing. SBAF mimics the
+ * manufacturing screening environment and leverages the same test suite.
+ * It makes use of Design For Test (DFT) observation sites and features
+ * to maximize coverage in minimum time.
+ *
+ * Similar to the SAF test, SBAF isolates the core under test from the
+ * rest of the system during execution. Upon completion, the core
+ * seamlessly resets to its pre-test state and resumes normal operation.
+ * Any machine checks or hangs encountered during the test are confined to
+ * the isolated core, preventing disruption to the overall system.
+ *
+ * Like the SAF test, the SBAF test is also divided into multiple batches,
+ * and each batch test can take hundreds of milliseconds (100-200 ms) to
+ * complete. If such a lengthy interruption is undesirable, it is
+ * recommended to relocate the time-sensitive applications to other cores.
*/
#include <linux/device.h>
#include <linux/miscdevice.h>
#define MSR_ARRAY_BIST 0x00000105
+
+#define MSR_COPY_SBAF_HASHES 0x000002b8
+#define MSR_SBAF_HASHES_STATUS 0x000002b9
+#define MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK 0x000002ba
+#define MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS 0x000002bb
+#define MSR_ACTIVATE_SBAF 0x000002bc
+#define MSR_SBAF_STATUS 0x000002bd
+
#define MSR_COPY_SCAN_HASHES 0x000002c2
#define MSR_SCAN_HASHES_STATUS 0x000002c3
#define MSR_AUTHENTICATE_AND_COPY_CHUNK 0x000002c4
@@ -140,6 +169,7 @@
#define MSR_ARRAY_TRIGGER 0x000002d6
#define MSR_ARRAY_STATUS 0x000002d7
#define MSR_SAF_CTRL 0x000004f0
+#define MSR_SBAF_CTRL 0x000004f8
#define SCAN_NOT_TESTED 0
#define SCAN_TEST_PASS 1
@@ -147,6 +177,7 @@
#define IFS_TYPE_SAF 0
#define IFS_TYPE_ARRAY_BIST 1
+#define IFS_TYPE_SBAF 2
#define ARRAY_GEN0 0
#define ARRAY_GEN1 1
@@ -196,7 +227,8 @@ union ifs_chunks_auth_status_gen2 {
u16 valid_chunks;
u16 total_chunks;
u32 error_code :8;
- u32 rsvd2 :24;
+ u32 rsvd2 :8;
+ u32 max_bundle :16;
};
};
@@ -253,6 +285,34 @@ union ifs_array {
};
};
+/* MSR_ACTIVATE_SBAF bit fields */
+union ifs_sbaf {
+ u64 data;
+ struct {
+ u32 bundle_idx :9;
+ u32 rsvd1 :5;
+ u32 pgm_idx :2;
+ u32 rsvd2 :16;
+ u32 delay :31;
+ u32 sigmce :1;
+ };
+};
+
+/* MSR_SBAF_STATUS bit fields */
+union ifs_sbaf_status {
+ u64 data;
+ struct {
+ u32 bundle_idx :9;
+ u32 rsvd1 :5;
+ u32 pgm_idx :2;
+ u32 rsvd2 :16;
+ u32 error_code :8;
+ u32 rsvd3 :21;
+ u32 test_fail :1;
+ u32 sbaf_status :2;
+ };
+};
+
/*
* Driver populated error-codes
* 0xFD: Test timed out before completing all the chunks.
@@ -261,9 +321,28 @@ union ifs_array {
#define IFS_SW_TIMEOUT 0xFD
#define IFS_SW_PARTIAL_COMPLETION 0xFE
+#define IFS_SUFFIX_SZ 5
+
struct ifs_test_caps {
int integrity_cap_bit;
int test_num;
+ char image_suffix[IFS_SUFFIX_SZ];
+};
+
+/**
+ * struct ifs_test_msrs - MSRs used in IFS tests
+ * @copy_hashes: Copy test hash data
+ * @copy_hashes_status: Status of copied test hash data
+ * @copy_chunks: Copy chunks of the test data
+ * @copy_chunks_status: Status of the copied test data chunks
+ * @test_ctrl: Control the test attributes
+ */
+struct ifs_test_msrs {
+ u32 copy_hashes;
+ u32 copy_hashes_status;
+ u32 copy_chunks;
+ u32 copy_chunks_status;
+ u32 test_ctrl;
};
/**
@@ -278,6 +357,7 @@ struct ifs_test_caps {
* @generation: IFS test generation enumerated by hardware
* @chunk_size: size of a test chunk
* @array_gen: test generation of array test
+ * @max_bundle: maximum bundle index
*/
struct ifs_data {
int loaded_version;
@@ -290,6 +370,7 @@ struct ifs_data {
u32 generation;
u32 chunk_size;
u32 array_gen;
+ u32 max_bundle;
};
struct ifs_work {
@@ -299,6 +380,7 @@ struct ifs_work {
struct ifs_device {
const struct ifs_test_caps *test_caps;
+ const struct ifs_test_msrs *test_msrs;
struct ifs_data rw_data;
struct miscdevice misc;
};
@@ -319,6 +401,14 @@ static inline const struct ifs_test_caps *ifs_get_test_caps(struct device *dev)
return d->test_caps;
}
+static inline const struct ifs_test_msrs *ifs_get_test_msrs(struct device *dev)
+{
+ struct miscdevice *m = dev_get_drvdata(dev);
+ struct ifs_device *d = container_of(m, struct ifs_device, misc);
+
+ return d->test_msrs;
+}
+
extern bool *ifs_pkg_auth;
int ifs_load_firmware(struct device *dev);
int do_core_test(int cpu, struct device *dev);
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
index 39f19cb51749..de54bd1a5970 100644
--- a/drivers/platform/x86/intel/ifs/load.c
+++ b/drivers/platform/x86/intel/ifs/load.c
@@ -118,15 +118,17 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work)
union ifs_scan_hashes_status hashes_status;
union ifs_chunks_auth_status chunk_status;
struct device *dev = local_work->dev;
+ const struct ifs_test_msrs *msrs;
int i, num_chunks, chunk_size;
struct ifs_data *ifsd;
u64 linear_addr, base;
u32 err_code;
ifsd = ifs_get_data(dev);
+ msrs = ifs_get_test_msrs(dev);
/* run scan hash copy */
- wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr);
- rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data);
+ wrmsrl(msrs->copy_hashes, ifs_hash_ptr);
+ rdmsrl(msrs->copy_hashes_status, hashes_status.data);
/* enumerate the scan image information */
num_chunks = hashes_status.num_chunks;
@@ -147,8 +149,8 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work)
linear_addr = base + i * chunk_size;
linear_addr |= i;
- wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, linear_addr);
- rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data);
+ wrmsrl(msrs->copy_chunks, linear_addr);
+ rdmsrl(msrs->copy_chunks_status, chunk_status.data);
ifsd->valid_chunks = chunk_status.valid_chunks;
err_code = chunk_status.error_code;
@@ -180,6 +182,7 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
union ifs_scan_hashes_status_gen2 hashes_status;
union ifs_chunks_auth_status_gen2 chunk_status;
u32 err_code, valid_chunks, total_chunks;
+ const struct ifs_test_msrs *msrs;
int i, num_chunks, chunk_size;
union meta_data *ifs_meta;
int starting_chunk_nr;
@@ -189,10 +192,11 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
int retry_count;
ifsd = ifs_get_data(dev);
+ msrs = ifs_get_test_msrs(dev);
if (need_copy_scan_hashes(ifsd)) {
- wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr);
- rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data);
+ wrmsrl(msrs->copy_hashes, ifs_hash_ptr);
+ rdmsrl(msrs->copy_hashes_status, hashes_status.data);
/* enumerate the scan image information */
chunk_size = hashes_status.chunk_size * SZ_1K;
@@ -212,8 +216,8 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
}
if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) {
- wrmsrl(MSR_SAF_CTRL, INVALIDATE_STRIDE);
- rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data);
+ wrmsrl(msrs->test_ctrl, INVALIDATE_STRIDE);
+ rdmsrl(msrs->copy_chunks_status, chunk_status.data);
if (chunk_status.valid_chunks != 0) {
dev_err(dev, "Couldn't invalidate installed stride - %d\n",
chunk_status.valid_chunks);
@@ -234,9 +238,9 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
chunk_table[1] = linear_addr;
do {
local_irq_disable();
- wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, (u64)chunk_table);
+ wrmsrl(msrs->copy_chunks, (u64)chunk_table);
local_irq_enable();
- rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data);
+ rdmsrl(msrs->copy_chunks_status, chunk_status.data);
err_code = chunk_status.error_code;
} while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count);
@@ -257,20 +261,22 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
return -EIO;
}
ifsd->valid_chunks = valid_chunks;
+ ifsd->max_bundle = chunk_status.max_bundle;
return 0;
}
static int validate_ifs_metadata(struct device *dev)
{
+ const struct ifs_test_caps *test = ifs_get_test_caps(dev);
struct ifs_data *ifsd = ifs_get_data(dev);
union meta_data *ifs_meta;
char test_file[64];
int ret = -EINVAL;
- snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.scan",
+ snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.%s",
boot_cpu_data.x86, boot_cpu_data.x86_model,
- boot_cpu_data.x86_stepping, ifsd->cur_batch);
+ boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix);
ifs_meta = (union meta_data *)find_meta_data(ifs_header_ptr, META_TYPE_IFS);
if (!ifs_meta) {
@@ -300,6 +306,12 @@ static int validate_ifs_metadata(struct device *dev)
return ret;
}
+ if (ifs_meta->test_type != test->test_num) {
+ dev_warn(dev, "Metadata test_type %d mismatches with device type\n",
+ ifs_meta->test_type);
+ return ret;
+ }
+
return 0;
}
@@ -387,9 +399,9 @@ int ifs_load_firmware(struct device *dev)
char scan_path[64];
int ret;
- snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.scan",
+ snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.%s",
test->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model,
- boot_cpu_data.x86_stepping, ifsd->cur_batch);
+ boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix);
ret = request_firmware_direct(&fw, scan_path, dev);
if (ret) {
diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
index be3d51ed0e47..f978dd05d4d8 100644
--- a/drivers/platform/x86/intel/ifs/runtest.c
+++ b/drivers/platform/x86/intel/ifs/runtest.c
@@ -29,6 +29,13 @@ struct run_params {
union ifs_status status;
};
+struct sbaf_run_params {
+ struct ifs_data *ifsd;
+ int *retry_cnt;
+ union ifs_sbaf *activate;
+ union ifs_sbaf_status status;
+};
+
/*
* Number of TSC cycles that a logical CPU will wait for the other
* logical CPU on the core in the WRMSR(ACTIVATE_SCAN).
@@ -146,6 +153,7 @@ static bool can_restart(union ifs_status status)
#define SPINUNIT 100 /* 100 nsec */
static atomic_t array_cpus_in;
static atomic_t scan_cpus_in;
+static atomic_t sbaf_cpus_in;
/*
* Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus()
@@ -387,6 +395,225 @@ static void ifs_array_test_gen1(int cpu, struct device *dev)
ifsd->status = SCAN_TEST_PASS;
}
+#define SBAF_STATUS_PASS 0
+#define SBAF_STATUS_SIGN_FAIL 1
+#define SBAF_STATUS_INTR 2
+#define SBAF_STATUS_TEST_FAIL 3
+
+enum sbaf_status_err_code {
+ IFS_SBAF_NO_ERROR = 0,
+ IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN = 1,
+ IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS = 2,
+ IFS_SBAF_UNASSIGNED_ERROR_CODE3 = 3,
+ IFS_SBAF_INVALID_BUNDLE_INDEX = 4,
+ IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS = 5,
+ IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY = 6,
+ IFS_SBAF_UNASSIGNED_ERROR_CODE7 = 7,
+ IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8,
+ IFS_SBAF_INTERRUPTED_DURING_EXECUTION = 9,
+ IFS_SBAF_INVALID_PROGRAM_INDEX = 0xA,
+ IFS_SBAF_CORRUPTED_CHUNK = 0xB,
+ IFS_SBAF_DID_NOT_START = 0xC,
+};
+
+static const char * const sbaf_test_status[] = {
+ [IFS_SBAF_NO_ERROR] = "SBAF no error",
+ [IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN] = "Other thread could not join.",
+ [IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS] = "Interrupt occurred prior to SBAF coordination.",
+ [IFS_SBAF_UNASSIGNED_ERROR_CODE3] = "Unassigned error code 0x3",
+ [IFS_SBAF_INVALID_BUNDLE_INDEX] = "Non-valid sbaf bundles. Reload test image",
+ [IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS] = "Mismatch in arguments between threads T0/T1.",
+ [IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY] = "Core not capable of performing SBAF currently",
+ [IFS_SBAF_UNASSIGNED_ERROR_CODE7] = "Unassigned error code 0x7",
+ [IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT] = "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently",
+ [IFS_SBAF_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SBAF start",
+ [IFS_SBAF_INVALID_PROGRAM_INDEX] = "SBAF program index not valid",
+ [IFS_SBAF_CORRUPTED_CHUNK] = "SBAF operation aborted due to corrupted chunk",
+ [IFS_SBAF_DID_NOT_START] = "SBAF operation did not start",
+};
+
+static void sbaf_message_not_tested(struct device *dev, int cpu, u64 status_data)
+{
+ union ifs_sbaf_status status = (union ifs_sbaf_status)status_data;
+
+ if (status.error_code < ARRAY_SIZE(sbaf_test_status)) {
+ dev_info(dev, "CPU(s) %*pbl: SBAF operation did not start. %s\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)),
+ sbaf_test_status[status.error_code]);
+ } else if (status.error_code == IFS_SW_TIMEOUT) {
+ dev_info(dev, "CPU(s) %*pbl: software timeout during scan\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)));
+ } else if (status.error_code == IFS_SW_PARTIAL_COMPLETION) {
+ dev_info(dev, "CPU(s) %*pbl: %s\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)),
+ "Not all SBAF bundles executed. Maximum forward progress retries exceeded");
+ } else {
+ dev_info(dev, "CPU(s) %*pbl: SBAF unknown status %llx\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)), status.data);
+ }
+}
+
+static void sbaf_message_fail(struct device *dev, int cpu, union ifs_sbaf_status status)
+{
+ /* Failed signature check is set when SBAF signature did not match the expected value */
+ if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL) {
+ dev_err(dev, "CPU(s) %*pbl: Failed signature check\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)));
+ }
+
+ /* Failed to reach end of test */
+ if (status.sbaf_status == SBAF_STATUS_TEST_FAIL) {
+ dev_err(dev, "CPU(s) %*pbl: Failed to complete test\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)));
+ }
+}
+
+static bool sbaf_bundle_completed(union ifs_sbaf_status status)
+{
+ return !(status.sbaf_status || status.error_code);
+}
+
+static bool sbaf_can_restart(union ifs_sbaf_status status)
+{
+ enum sbaf_status_err_code err_code = status.error_code;
+
+ /* Signature for chunk is bad, or scan test failed */
+ if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL ||
+ status.sbaf_status == SBAF_STATUS_TEST_FAIL)
+ return false;
+
+ switch (err_code) {
+ case IFS_SBAF_NO_ERROR:
+ case IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN:
+ case IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS:
+ case IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT:
+ case IFS_SBAF_INTERRUPTED_DURING_EXECUTION:
+ return true;
+ case IFS_SBAF_UNASSIGNED_ERROR_CODE3:
+ case IFS_SBAF_INVALID_BUNDLE_INDEX:
+ case IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS:
+ case IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY:
+ case IFS_SBAF_UNASSIGNED_ERROR_CODE7:
+ case IFS_SBAF_INVALID_PROGRAM_INDEX:
+ case IFS_SBAF_CORRUPTED_CHUNK:
+ case IFS_SBAF_DID_NOT_START:
+ break;
+ }
+ return false;
+}
+
+/*
+ * Execute the SBAF test. Called "simultaneously" on all threads of a core
+ * at high priority using the stop_cpus mechanism.
+ */
+static int dosbaf(void *data)
+{
+ struct sbaf_run_params *run_params = data;
+ int cpu = smp_processor_id();
+ union ifs_sbaf_status status;
+ struct ifs_data *ifsd;
+ int first;
+
+ ifsd = run_params->ifsd;
+
+ /* Only the first logical CPU on a core reports result */
+ first = cpumask_first(cpu_smt_mask(cpu));
+ wait_for_sibling_cpu(&sbaf_cpus_in, NSEC_PER_SEC);
+
+ /*
+ * This WRMSR will wait for other HT threads to also write
+ * to this MSR (at most for activate.delay cycles). Then it
+ * starts scan of each requested bundle. The core test happens
+ * during the "execution" of the WRMSR.
+ */
+ wrmsrl(MSR_ACTIVATE_SBAF, run_params->activate->data);
+ rdmsrl(MSR_SBAF_STATUS, status.data);
+ trace_ifs_sbaf(ifsd->cur_batch, *run_params->activate, status);
+
+ /* Pass back the result of the test */
+ if (cpu == first)
+ run_params->status = status;
+
+ return 0;
+}
+
+static void ifs_sbaf_test_core(int cpu, struct device *dev)
+{
+ struct sbaf_run_params run_params;
+ union ifs_sbaf_status status = {};
+ union ifs_sbaf activate;
+ unsigned long timeout;
+ struct ifs_data *ifsd;
+ int stop_bundle;
+ int retries;
+
+ ifsd = ifs_get_data(dev);
+
+ activate.data = 0;
+ activate.delay = IFS_THREAD_WAIT;
+
+ timeout = jiffies + 2 * HZ;
+ retries = MAX_IFS_RETRIES;
+ activate.bundle_idx = 0;
+ stop_bundle = ifsd->max_bundle;
+
+ while (activate.bundle_idx <= stop_bundle) {
+ if (time_after(jiffies, timeout)) {
+ status.error_code = IFS_SW_TIMEOUT;
+ break;
+ }
+
+ atomic_set(&sbaf_cpus_in, 0);
+
+ run_params.ifsd = ifsd;
+ run_params.activate = &activate;
+ run_params.retry_cnt = &retries;
+ stop_core_cpuslocked(cpu, dosbaf, &run_params);
+
+ status = run_params.status;
+
+ if (sbaf_bundle_completed(status)) {
+ activate.bundle_idx = status.bundle_idx + 1;
+ activate.pgm_idx = 0;
+ retries = MAX_IFS_RETRIES;
+ continue;
+ }
+
+ /* Some cases can be retried, give up for others */
+ if (!sbaf_can_restart(status))
+ break;
+
+ if (status.pgm_idx == activate.pgm_idx) {
+ /* If no progress retry */
+ if (--retries == 0) {
+ if (status.error_code == IFS_NO_ERROR)
+ status.error_code = IFS_SW_PARTIAL_COMPLETION;
+ break;
+ }
+ } else {
+ /* if some progress, more pgms remaining in bundle, reset retries */
+ retries = MAX_IFS_RETRIES;
+ activate.bundle_idx = status.bundle_idx;
+ activate.pgm_idx = status.pgm_idx;
+ }
+ }
+
+ /* Update status for this core */
+ ifsd->scan_details = status.data;
+
+ if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL ||
+ status.sbaf_status == SBAF_STATUS_TEST_FAIL) {
+ ifsd->status = SCAN_TEST_FAIL;
+ sbaf_message_fail(dev, cpu, status);
+ } else if (status.error_code || status.sbaf_status == SBAF_STATUS_INTR ||
+ (activate.bundle_idx < stop_bundle)) {
+ ifsd->status = SCAN_NOT_TESTED;
+ sbaf_message_not_tested(dev, cpu, status.data);
+ } else {
+ ifsd->status = SCAN_TEST_PASS;
+ }
+}
+
/*
* Initiate per core test. It wakes up work queue threads on the target cpu and
* its sibling cpu. Once all sibling threads wake up, the scan test gets executed and
@@ -420,6 +647,12 @@ int do_core_test(int cpu, struct device *dev)
else
ifs_array_test_gen1(cpu, dev);
break;
+ case IFS_TYPE_SBAF:
+ if (!ifsd->loaded)
+ ret = -EPERM;
+ else
+ ifs_sbaf_test_core(cpu, dev);
+ break;
default:
ret = -EINVAL;
}
diff --git a/drivers/platform/x86/intel/int3472/Makefile b/drivers/platform/x86/intel/int3472/Makefile
index 9f16cb514397..a8aba07bf1dc 100644
--- a/drivers/platform/x86/intel/int3472/Makefile
+++ b/drivers/platform/x86/intel/int3472/Makefile
@@ -1,4 +1,7 @@
obj-$(CONFIG_INTEL_SKL_INT3472) += intel_skl_int3472_discrete.o \
- intel_skl_int3472_tps68470.o
-intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o led.o common.o
-intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o common.o
+ intel_skl_int3472_tps68470.o \
+ intel_skl_int3472_common.o
+intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o led.o
+intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o
+
+intel_skl_int3472_common-y += common.o
diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c
index 9db2bb0bbba4..b3a2578e06c1 100644
--- a/drivers/platform/x86/intel/int3472/common.c
+++ b/drivers/platform/x86/intel/int3472/common.c
@@ -29,6 +29,7 @@ union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, char *i
return obj;
}
+EXPORT_SYMBOL_GPL(skl_int3472_get_acpi_buffer);
int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb)
{
@@ -52,6 +53,7 @@ out_free_obj:
kfree(obj);
return ret;
}
+EXPORT_SYMBOL_GPL(skl_int3472_fill_cldb);
/* sensor_adev_ret may be NULL, name_ret must not be NULL */
int skl_int3472_get_sensor_adev_and_name(struct device *dev,
@@ -80,3 +82,8 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev,
return ret;
}
+EXPORT_SYMBOL_GPL(skl_int3472_get_sensor_adev_and_name);
+
+MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Device Driver library");
+MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index 07b302e09340..3de463c3d13b 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/uuid.h>
#include "common.h"
@@ -69,11 +70,7 @@ static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
if (!adev)
return -ENODEV;
- table_entry->key = acpi_dev_name(adev);
- table_entry->chip_hwnum = agpio->pin_table[0];
- table_entry->con_id = func;
- table_entry->idx = 0;
- table_entry->flags = polarity;
+ *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, polarity);
return 0;
}
@@ -234,7 +231,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", func,
agpio->resource_source.string_ptr, agpio->pin_table[0],
- (polarity == GPIO_ACTIVE_HIGH) ? "high" : "low");
+ str_high_low(polarity == GPIO_ACTIVE_HIGH));
switch (type) {
case INT3472_GPIO_TYPE_RESET:
diff --git a/drivers/platform/x86/intel/oaktrail.c b/drivers/platform/x86/intel/oaktrail.c
index 217630f40c3f..265cef327b4f 100644
--- a/drivers/platform/x86/intel/oaktrail.c
+++ b/drivers/platform/x86/intel/oaktrail.c
@@ -28,7 +28,6 @@
#include <linux/backlight.h>
#include <linux/dmi.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -250,7 +249,7 @@ static int oaktrail_backlight_init(void)
oaktrail_bl_device = bd;
bd->props.brightness = get_backlight_brightness(bd);
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
return 0;
diff --git a/drivers/platform/x86/intel/pmc/adl.c b/drivers/platform/x86/intel/pmc/adl.c
index e7878558fd90..9d9c07f44ff6 100644
--- a/drivers/platform/x86/intel/pmc/adl.c
+++ b/drivers/platform/x86/intel/pmc/adl.c
@@ -295,6 +295,8 @@ const struct pmc_reg_map adl_reg_map = {
.ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET,
+ .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE,
.ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED,
.lpm_num_modes = ADL_LPM_NUM_MODES,
.lpm_num_maps = ADL_LPM_NUM_MAPS,
diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c
index dd72974bf71e..513c02670c5a 100644
--- a/drivers/platform/x86/intel/pmc/cnp.c
+++ b/drivers/platform/x86/intel/pmc/cnp.c
@@ -200,6 +200,8 @@ const struct pmc_reg_map cnp_reg_map = {
.ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET,
+ .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE,
.ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
.etr3_offset = ETR3_OFFSET,
};
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 01ae71c6df59..ecb47f8b4f83 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi_pmtmr.h>
#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
@@ -714,6 +715,49 @@ static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_s0ix_blocker);
+static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) {
+ struct pmc *pmc;
+ u32 ltr_ign;
+
+ pmc = pmcdev->pmcs[i];
+ if (!pmc)
+ continue;
+
+ guard(mutex)(&pmcdev->lock);
+ pmc->ltr_ign = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset);
+
+ /* ltr_ignore_max is the max index value for LTR ignore register */
+ ltr_ign = pmc->ltr_ign | GENMASK(pmc->map->ltr_ignore_max, 0);
+ pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, ltr_ign);
+ }
+
+ /*
+ * Ignoring ME during suspend is blocking platforms with ADL PCH to get to
+ * deeper S0ix substate.
+ */
+ pmc_core_send_ltr_ignore(pmcdev, 6, 0);
+}
+
+static void pmc_core_ltr_restore_all(struct pmc_dev *pmcdev)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) {
+ struct pmc *pmc;
+
+ pmc = pmcdev->pmcs[i];
+ if (!pmc)
+ continue;
+
+ guard(mutex)(&pmcdev->lock);
+ pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, pmc->ltr_ign);
+ }
+}
+
static inline u64 adjust_lpm_residency(struct pmc *pmc, u32 offset,
const int lpm_adj_x2)
{
@@ -728,12 +772,11 @@ static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
u32 offset = pmc->map->lpm_residency_offset;
- unsigned int i;
int mode;
seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
- pmc_for_each_mode(i, mode, pmcdev) {
+ pmc_for_each_mode(mode, pmcdev) {
seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
adjust_lpm_residency(pmc, offset + (4 * mode), lpm_adj_x2));
}
@@ -787,20 +830,21 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index)
{
struct pmc_dev *pmcdev = s->private;
- unsigned int i;
int mode;
seq_printf(s, "%30s |", "Element");
- pmc_for_each_mode(i, mode, pmcdev)
+ pmc_for_each_mode(mode, pmcdev)
seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
- seq_printf(s, " %9s |\n", "Status");
+ seq_printf(s, " %9s |", "Status");
+ seq_printf(s, " %11s |\n", "Live Status");
}
static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
u32 sts_offset;
+ u32 sts_offset_live;
u32 *lpm_req_regs;
unsigned int mp, pmc_index;
int num_maps;
@@ -815,6 +859,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
maps = pmc->map->lpm_sts;
num_maps = pmc->map->lpm_num_maps;
sts_offset = pmc->map->lpm_status_offset;
+ sts_offset_live = pmc->map->lpm_live_status_offset;
lpm_req_regs = pmc->lpm_req_regs;
/*
@@ -832,20 +877,24 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
for (mp = 0; mp < num_maps; mp++) {
u32 req_mask = 0;
u32 lpm_status;
+ u32 lpm_status_live;
const struct pmc_bit_map *map;
- int mode, idx, i, len = 32;
+ int mode, i, len = 32;
/*
* Capture the requirements and create a mask so that we only
* show an element if it's required for at least one of the
* enabled low power modes
*/
- pmc_for_each_mode(idx, mode, pmcdev)
+ pmc_for_each_mode(mode, pmcdev)
req_mask |= lpm_req_regs[mp + (mode * num_maps)];
/* Get the last latched status for this map */
lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4));
+ /* Get the runtime status for this map */
+ lpm_status_live = pmc_core_reg_read(pmc, sts_offset_live + (mp * 4));
+
/* Loop over elements in this map */
map = maps[mp];
for (i = 0; map[i].name && i < len; i++) {
@@ -863,7 +912,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
seq_printf(s, "pmc%d: %26s |", pmc_index, map[i].name);
/* Loop over the enabled states and display if required */
- pmc_for_each_mode(idx, mode, pmcdev) {
+ pmc_for_each_mode(mode, pmcdev) {
bool required = lpm_req_regs[mp + (mode * num_maps)] &
bit_mask;
seq_printf(s, " %9s |", required ? "Required" : " ");
@@ -872,6 +921,9 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
/* In Status column, show the last captured state of this agent */
seq_printf(s, " %9s |", lpm_status & bit_mask ? "Yes" : " ");
+ /* In Live status column, show the live state of this agent */
+ seq_printf(s, " %11s |", lpm_status_live & bit_mask ? "Yes" : " ");
+
seq_puts(s, "\n");
}
}
@@ -925,7 +977,6 @@ static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
- unsigned int idx;
bool c10;
u32 reg;
int mode;
@@ -939,7 +990,7 @@ static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
c10 = true;
}
- pmc_for_each_mode(idx, mode, pmcdev) {
+ pmc_for_each_mode(mode, pmcdev) {
if ((BIT(mode) & reg) && !c10)
seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
else
@@ -960,7 +1011,6 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
bool clear = false, c10 = false;
unsigned char buf[8];
- unsigned int idx;
int m, mode;
u32 reg;
@@ -979,7 +1029,7 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
mode = sysfs_match_string(pmc_lpm_modes, buf);
/* Check string matches enabled mode */
- pmc_for_each_mode(idx, m, pmcdev)
+ pmc_for_each_mode(m, pmcdev)
if (mode == m)
break;
@@ -1208,6 +1258,38 @@ static bool pmc_core_is_pson_residency_enabled(struct pmc_dev *pmcdev)
return val == 1;
}
+/*
+ * Enable or disable ACPI PM Timer
+ *
+ * This function is intended to be a callback for ACPI PM suspend/resume event.
+ * The ACPI PM Timer is enabled on resume only if it was enabled during suspend.
+ */
+static void pmc_core_acpi_pm_timer_suspend_resume(void *data, bool suspend)
+{
+ struct pmc_dev *pmcdev = data;
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ const struct pmc_reg_map *map = pmc->map;
+ bool enabled;
+ u32 reg;
+
+ if (!map->acpi_pm_tmr_ctl_offset)
+ return;
+
+ guard(mutex)(&pmcdev->lock);
+
+ if (!suspend && !pmcdev->enable_acpi_pm_timer_on_resume)
+ return;
+
+ reg = pmc_core_reg_read(pmc, map->acpi_pm_tmr_ctl_offset);
+ enabled = !(reg & map->acpi_pm_tmr_disable_bit);
+ if (suspend)
+ reg |= map->acpi_pm_tmr_disable_bit;
+ else
+ reg &= ~map->acpi_pm_tmr_disable_bit;
+ pmc_core_reg_write(pmc, map->acpi_pm_tmr_ctl_offset, reg);
+
+ pmcdev->enable_acpi_pm_timer_on_resume = suspend && enabled;
+}
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
@@ -1404,6 +1486,7 @@ static int pmc_core_probe(struct platform_device *pdev)
struct pmc_dev *pmcdev;
const struct x86_cpu_id *cpu_id;
int (*core_init)(struct pmc_dev *pmcdev);
+ const struct pmc_reg_map *map;
struct pmc *primary_pmc;
int ret;
@@ -1462,6 +1545,11 @@ static int pmc_core_probe(struct platform_device *pdev)
pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) *
pmc_core_adjust_slp_s0_step(primary_pmc, 1));
+ map = primary_pmc->map;
+ if (map->acpi_pm_tmr_ctl_offset)
+ acpi_pmtmr_register_suspend_resume_callback(pmc_core_acpi_pm_timer_suspend_resume,
+ pmcdev);
+
device_initialized = true;
dev_info(&pdev->dev, " initialized\n");
@@ -1471,6 +1559,12 @@ static int pmc_core_probe(struct platform_device *pdev)
static void pmc_core_remove(struct platform_device *pdev)
{
struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
+ const struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ const struct pmc_reg_map *map = pmc->map;
+
+ if (map->acpi_pm_tmr_ctl_offset)
+ acpi_pmtmr_unregister_suspend_resume_callback();
+
pmc_core_dbgfs_unregister(pmcdev);
pmc_core_clean_structure(pdev);
}
@@ -1479,6 +1573,10 @@ static bool warn_on_s0ix_failures;
module_param(warn_on_s0ix_failures, bool, 0644);
MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
+static bool ltr_ignore_all_suspend = true;
+module_param(ltr_ignore_all_suspend, bool, 0644);
+MODULE_PARM_DESC(ltr_ignore_all_suspend, "Ignore all LTRs during suspend");
+
static __maybe_unused int pmc_core_suspend(struct device *dev)
{
struct pmc_dev *pmcdev = dev_get_drvdata(dev);
@@ -1488,6 +1586,9 @@ static __maybe_unused int pmc_core_suspend(struct device *dev)
if (pmcdev->suspend)
pmcdev->suspend(pmcdev);
+ if (ltr_ignore_all_suspend)
+ pmc_core_ltr_ignore_all(pmcdev);
+
/* Check if the syspend will actually use S0ix */
if (pm_suspend_via_firmware())
return 0;
@@ -1594,6 +1695,9 @@ static __maybe_unused int pmc_core_resume(struct device *dev)
{
struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ if (ltr_ignore_all_suspend)
+ pmc_core_ltr_restore_all(pmcdev);
+
if (pmcdev->resume)
return pmcdev->resume(pmcdev);
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index ea04de7eb9e8..75fd593a7b0f 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -68,6 +68,8 @@ struct telem_endpoint;
#define SPT_PMC_LTR_SCC 0x3A0
#define SPT_PMC_LTR_ISH 0x3A4
+#define SPT_PMC_ACPI_PM_TMR_CTL_OFFSET 0x18FC
+
/* Sunrise Point: PGD PFET Enable Ack Status Registers */
enum ppfear_regs {
SPT_PMC_XRAM_PPFEAR0A = 0x590,
@@ -148,6 +150,8 @@ enum ppfear_regs {
#define SPT_PMC_VRIC1_SLPS0LVEN BIT(13)
#define SPT_PMC_VRIC1_XTALSDQDIS BIT(22)
+#define SPT_PMC_BIT_ACPI_PM_TMR_DISABLE BIT(1)
+
/* Cannonlake Power Management Controller register offsets */
#define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4
#define CNP_PMC_PM_CFG_OFFSET 0x1818
@@ -351,6 +355,8 @@ struct pmc_reg_map {
const u8 *lpm_reg_index;
const u32 pson_residency_offset;
const u32 pson_residency_counter_step;
+ const u32 acpi_pm_tmr_ctl_offset;
+ const u32 acpi_pm_tmr_disable_bit;
};
/**
@@ -372,6 +378,7 @@ struct pmc_info {
* @map: pointer to pmc_reg_map struct that contains platform
* specific attributes
* @lpm_req_regs: List of substate requirements
+ * @ltr_ign: Holds LTR ignore data while suspended
*
* pmc contains info about one power management controller device.
*/
@@ -380,6 +387,7 @@ struct pmc {
void __iomem *regbase;
const struct pmc_reg_map *map;
u32 *lpm_req_regs;
+ u32 ltr_ign;
};
/**
@@ -424,6 +432,8 @@ struct pmc_dev {
u32 die_c6_offset;
struct telem_endpoint *punit_ep;
struct pmc_info *regmap_list;
+
+ bool enable_acpi_pm_timer_on_resume;
};
enum pmc_index {
@@ -604,10 +614,12 @@ int lnl_core_init(struct pmc_dev *pmcdev);
void cnl_suspend(struct pmc_dev *pmcdev);
int cnl_resume(struct pmc_dev *pmcdev);
-#define pmc_for_each_mode(i, mode, pmcdev) \
- for (i = 0, mode = pmcdev->lpm_en_modes[i]; \
- i < pmcdev->num_lpm_modes; \
- i++, mode = pmcdev->lpm_en_modes[i])
+#define pmc_for_each_mode(mode, pmcdev) \
+ for (unsigned int __i = 0, __cond; \
+ __cond = __i < (pmcdev)->num_lpm_modes, \
+ __cond && ((mode) = (pmcdev)->lpm_en_modes[__i]), \
+ __cond; \
+ __i++)
#define DEFINE_PMC_CORE_ATTR_WRITE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
diff --git a/drivers/platform/x86/intel/pmc/core_ssram.c b/drivers/platform/x86/intel/pmc/core_ssram.c
index 1bde86c54eb9..c259c96b7dfd 100644
--- a/drivers/platform/x86/intel/pmc/core_ssram.c
+++ b/drivers/platform/x86/intel/pmc/core_ssram.c
@@ -9,11 +9,11 @@
*/
#include <linux/cleanup.h>
+#include <linux/intel_vsec.h>
#include <linux/pci.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "core.h"
-#include "../vsec.h"
#include "../pmt/telemetry.h"
#define SSRAM_HDR_SIZE 0x100
@@ -45,7 +45,7 @@ static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc)
struct telem_endpoint *ep;
const u8 *lpm_indices;
int num_maps, mode_offset = 0;
- int ret, mode, i;
+ int ret, mode;
int lpm_size;
u32 guid;
@@ -116,7 +116,7 @@ static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc)
*
*/
mode_offset = LPM_HEADER_OFFSET + LPM_MODE_OFFSET;
- pmc_for_each_mode(i, mode, pmcdev) {
+ pmc_for_each_mode(mode, pmcdev) {
u32 *req_offset = pmc->lpm_req_regs + (mode * num_maps);
int m;
diff --git a/drivers/platform/x86/intel/pmc/icl.c b/drivers/platform/x86/intel/pmc/icl.c
index 71b0fd6cb7d8..cbbd44054468 100644
--- a/drivers/platform/x86/intel/pmc/icl.c
+++ b/drivers/platform/x86/intel/pmc/icl.c
@@ -46,6 +46,8 @@ const struct pmc_reg_map icl_reg_map = {
.ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET,
+ .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE,
.ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
.etr3_offset = ETR3_OFFSET,
};
diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c
index c7d15d864039..91f2fa728f5c 100644
--- a/drivers/platform/x86/intel/pmc/mtl.c
+++ b/drivers/platform/x86/intel/pmc/mtl.c
@@ -462,6 +462,8 @@ const struct pmc_reg_map mtl_socm_reg_map = {
.ppfear_buckets = MTL_SOCM_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET,
+ .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE,
.lpm_num_maps = ADL_LPM_NUM_MAPS,
.ltr_ignore_max = MTL_SOCM_NUM_IP_IGN_ALLOWED,
.lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
diff --git a/drivers/platform/x86/intel/pmc/spt.c b/drivers/platform/x86/intel/pmc/spt.c
index ab993a69e33e..2cd2b3c68e46 100644
--- a/drivers/platform/x86/intel/pmc/spt.c
+++ b/drivers/platform/x86/intel/pmc/spt.c
@@ -130,6 +130,8 @@ const struct pmc_reg_map spt_reg_map = {
.ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
+ .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET,
+ .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE,
.ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED,
.pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
};
diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c
index e0580de18077..371b4e30f142 100644
--- a/drivers/platform/x86/intel/pmc/tgl.c
+++ b/drivers/platform/x86/intel/pmc/tgl.c
@@ -197,6 +197,8 @@ const struct pmc_reg_map tgl_reg_map = {
.ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET,
+ .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE,
.ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
.lpm_num_maps = TGL_LPM_NUM_MAPS,
.lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
index 4b53940a64e2..c04bb7f97a4d 100644
--- a/drivers/platform/x86/intel/pmt/class.c
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -9,12 +9,12 @@
*/
#include <linux/kernel.h>
+#include <linux/intel_vsec.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pci.h>
-#include "../vsec.h"
#include "class.h"
#define PMT_XA_START 1
@@ -58,6 +58,22 @@ pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count)
return count;
}
+int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf,
+ void __iomem *addr, u32 count)
+{
+ if (cb && cb->read_telem)
+ return cb->read_telem(pdev, guid, buf, count);
+
+ if (guid == GUID_SPR_PUNIT)
+ /* PUNIT on SPR only supports aligned 64-bit read */
+ return pmt_memcpy64_fromio(buf, addr, count);
+
+ memcpy_fromio(buf, addr, count);
+
+ return count;
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_read_mmio, INTEL_PMT);
+
/*
* sysfs
*/
@@ -79,11 +95,8 @@ intel_pmt_read(struct file *filp, struct kobject *kobj,
if (count > entry->size - off)
count = entry->size - off;
- if (entry->guid == GUID_SPR_PUNIT)
- /* PUNIT on SPR only supports aligned 64-bit read */
- count = pmt_memcpy64_fromio(buf, entry->base + off, count);
- else
- memcpy_fromio(buf, entry->base + off, count);
+ count = pmt_telem_read_mmio(entry->ep->pcidev, entry->cb, entry->header.guid, buf,
+ entry->base + off, count);
return count;
}
@@ -239,6 +252,7 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
entry->guid = header->guid;
entry->size = header->size;
+ entry->cb = ivdev->priv_data;
return 0;
}
@@ -300,7 +314,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
goto fail_ioremap;
if (ns->pmt_add_endpoint) {
- ret = ns->pmt_add_endpoint(entry, ivdev->pcidev);
+ ret = ns->pmt_add_endpoint(ivdev, entry);
if (ret)
goto fail_add_endpoint;
}
diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h
index d23c63b73ab7..a267ac964423 100644
--- a/drivers/platform/x86/intel/pmt/class.h
+++ b/drivers/platform/x86/intel/pmt/class.h
@@ -2,13 +2,13 @@
#ifndef _INTEL_PMT_CLASS_H
#define _INTEL_PMT_CLASS_H
+#include <linux/intel_vsec.h>
#include <linux/xarray.h>
#include <linux/types.h>
#include <linux/bits.h>
#include <linux/err.h>
#include <linux/io.h>
-#include "../vsec.h"
#include "telemetry.h"
/* PMT access types */
@@ -24,6 +24,7 @@ struct pci_dev;
struct telem_endpoint {
struct pci_dev *pcidev;
struct telem_header header;
+ struct pmt_callbacks *cb;
void __iomem *base;
bool present;
struct kref kref;
@@ -43,6 +44,7 @@ struct intel_pmt_entry {
struct kobject *kobj;
void __iomem *disc_table;
void __iomem *base;
+ struct pmt_callbacks *cb;
unsigned long base_addr;
size_t size;
u32 guid;
@@ -55,10 +57,12 @@ struct intel_pmt_namespace {
const struct attribute_group *attr_grp;
int (*pmt_header_decode)(struct intel_pmt_entry *entry,
struct device *dev);
- int (*pmt_add_endpoint)(struct intel_pmt_entry *entry,
- struct pci_dev *pdev);
+ int (*pmt_add_endpoint)(struct intel_vsec_device *ivdev,
+ struct intel_pmt_entry *entry);
};
+int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf,
+ void __iomem *addr, u32 count);
bool intel_pmt_is_early_client_hw(struct device *dev);
int intel_pmt_dev_create(struct intel_pmt_entry *entry,
struct intel_pmt_namespace *ns,
diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c
index 4014c02cafdb..9079d5dffc03 100644
--- a/drivers/platform/x86/intel/pmt/crashlog.c
+++ b/drivers/platform/x86/intel/pmt/crashlog.c
@@ -9,6 +9,7 @@
*/
#include <linux/auxiliary_bus.h>
+#include <linux/intel_vsec.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -16,7 +17,6 @@
#include <linux/uaccess.h>
#include <linux/overflow.h>
-#include "../vsec.h"
#include "class.h"
/* Crashlog discovery header types */
diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
index 09258564dfc4..c9feac859e57 100644
--- a/drivers/platform/x86/intel/pmt/telemetry.c
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -9,6 +9,7 @@
*/
#include <linux/auxiliary_bus.h>
+#include <linux/intel_vsec.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -16,7 +17,6 @@
#include <linux/uaccess.h>
#include <linux/overflow.h>
-#include "../vsec.h"
#include "class.h"
#define TELEM_SIZE_OFFSET 0x0
@@ -93,8 +93,8 @@ static int pmt_telem_header_decode(struct intel_pmt_entry *entry,
return 0;
}
-static int pmt_telem_add_endpoint(struct intel_pmt_entry *entry,
- struct pci_dev *pdev)
+static int pmt_telem_add_endpoint(struct intel_vsec_device *ivdev,
+ struct intel_pmt_entry *entry)
{
struct telem_endpoint *ep;
@@ -104,13 +104,14 @@ static int pmt_telem_add_endpoint(struct intel_pmt_entry *entry,
return -ENOMEM;
ep = entry->ep;
- ep->pcidev = pdev;
+ ep->pcidev = ivdev->pcidev;
ep->header.access_type = entry->header.access_type;
ep->header.guid = entry->header.guid;
ep->header.base_offset = entry->header.base_offset;
ep->header.size = entry->header.size;
ep->base = entry->base;
ep->present = true;
+ ep->cb = ivdev->priv_data;
kref_init(&ep->kref);
@@ -218,7 +219,8 @@ int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count)
if (offset + NUM_BYTES_QWORD(count) > size)
return -EINVAL;
- memcpy_fromio(data, ep->base + offset, NUM_BYTES_QWORD(count));
+ pmt_telem_read_mmio(ep->pcidev, ep->cb, ep->header.guid, data, ep->base + offset,
+ NUM_BYTES_QWORD(count));
return ep->present ? 0 : -EPIPE;
}
diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c
index 277e4f4b20ac..9d137621f0e6 100644
--- a/drivers/platform/x86/intel/sdsi.c
+++ b/drivers/platform/x86/intel/sdsi.c
@@ -12,6 +12,7 @@
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/device.h>
+#include <linux/intel_vsec.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -22,8 +23,6 @@
#include <linux/types.h>
#include <linux/uaccess.h>
-#include "vsec.h"
-
#define ACCESS_TYPE_BARID 2
#define ACCESS_TYPE_LOCAL 3
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 10e21563fa46..9ad35fefea47 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -651,10 +651,6 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
/* Lock to prevent module registration when already opened by user space */
static DEFINE_MUTEX(punit_misc_dev_open_lock);
-/* Lock to allow one shared misc device for all ISST interfaces */
-static DEFINE_MUTEX(punit_misc_dev_reg_lock);
-static int misc_usage_count;
-static int misc_device_ret;
static int misc_device_open;
static int isst_if_open(struct inode *inode, struct file *file)
@@ -720,39 +716,23 @@ static struct miscdevice isst_if_char_driver = {
static int isst_misc_reg(void)
{
- mutex_lock(&punit_misc_dev_reg_lock);
- if (misc_device_ret)
- goto unlock_exit;
-
- if (!misc_usage_count) {
- misc_device_ret = isst_if_cpu_info_init();
- if (misc_device_ret)
- goto unlock_exit;
-
- misc_device_ret = misc_register(&isst_if_char_driver);
- if (misc_device_ret) {
- isst_if_cpu_info_exit();
- goto unlock_exit;
- }
- }
- misc_usage_count++;
+ int ret;
-unlock_exit:
- mutex_unlock(&punit_misc_dev_reg_lock);
+ ret = isst_if_cpu_info_init();
+ if (ret)
+ return ret;
- return misc_device_ret;
+ ret = misc_register(&isst_if_char_driver);
+ if (ret)
+ isst_if_cpu_info_exit();
+
+ return ret;
}
static void isst_misc_unreg(void)
{
- mutex_lock(&punit_misc_dev_reg_lock);
- if (misc_usage_count)
- misc_usage_count--;
- if (!misc_usage_count && !misc_device_ret) {
- misc_deregister(&isst_if_char_driver);
- isst_if_cpu_info_exit();
- }
- mutex_unlock(&punit_misc_dev_reg_lock);
+ misc_deregister(&isst_if_char_driver);
+ isst_if_cpu_info_exit();
}
/**
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
index 83e8b1fe53b3..486ddc9b3592 100644
--- a/drivers/platform/x86/intel/tpmi.c
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -51,6 +51,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/intel_tpmi.h>
+#include <linux/intel_vsec.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -59,8 +60,6 @@
#include <linux/sizes.h>
#include <linux/string_helpers.h>
-#include "vsec.h"
-
/**
* struct intel_tpmi_pfs_entry - TPMI PM Feature Structure (PFS) entry
* @tpmi_id: TPMI feature identifier (what the feature is and its data format).
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 4e880585cbe4..e22b683a7a43 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -60,11 +60,16 @@ static ssize_t show_attr(struct uncore_data *data, char *buf, enum uncore_index
static ssize_t store_attr(struct uncore_data *data, const char *buf, ssize_t count,
enum uncore_index index)
{
- unsigned int input;
+ unsigned int input = 0;
int ret;
- if (kstrtouint(buf, 10, &input))
- return -EINVAL;
+ if (index == UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE) {
+ if (kstrtobool(buf, (bool *)&input))
+ return -EINVAL;
+ } else {
+ if (kstrtouint(buf, 10, &input))
+ return -EINVAL;
+ }
mutex_lock(&uncore_lock);
ret = uncore_write(data, input, index);
@@ -103,6 +108,18 @@ show_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ);
show_uncore_attr(current_freq_khz, UNCORE_INDEX_CURRENT_FREQ);
+store_uncore_attr(elc_low_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD);
+store_uncore_attr(elc_high_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD);
+store_uncore_attr(elc_high_threshold_enable,
+ UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE);
+store_uncore_attr(elc_floor_freq_khz, UNCORE_INDEX_EFF_LAT_CTRL_FREQ);
+
+show_uncore_attr(elc_low_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD);
+show_uncore_attr(elc_high_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD);
+show_uncore_attr(elc_high_threshold_enable,
+ UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE);
+show_uncore_attr(elc_floor_freq_khz, UNCORE_INDEX_EFF_LAT_CTRL_FREQ);
+
#define show_uncore_data(member_name) \
static ssize_t show_##member_name(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf)\
@@ -146,7 +163,8 @@ show_uncore_data(initial_max_freq_khz);
static int create_attr_group(struct uncore_data *data, char *name)
{
- int ret, freq, index = 0;
+ int ret, index = 0;
+ unsigned int val;
init_attribute_rw(max_freq_khz);
init_attribute_rw(min_freq_khz);
@@ -168,10 +186,24 @@ static int create_attr_group(struct uncore_data *data, char *name)
data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
- ret = uncore_read(data, &freq, UNCORE_INDEX_CURRENT_FREQ);
+ ret = uncore_read(data, &val, UNCORE_INDEX_CURRENT_FREQ);
if (!ret)
data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
+ ret = uncore_read(data, &val, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD);
+ if (!ret) {
+ init_attribute_rw(elc_low_threshold_percent);
+ init_attribute_rw(elc_high_threshold_percent);
+ init_attribute_rw(elc_high_threshold_enable);
+ init_attribute_rw(elc_floor_freq_khz);
+
+ data->uncore_attrs[index++] = &data->elc_low_threshold_percent_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->elc_high_threshold_percent_kobj_attr.attr;
+ data->uncore_attrs[index++] =
+ &data->elc_high_threshold_enable_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->elc_floor_freq_khz_kobj_attr.attr;
+ }
+
data->uncore_attrs[index] = NULL;
data->uncore_attr_group.name = name;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
index 4c245b945e4e..26c854cd5d97 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
@@ -34,6 +34,13 @@
* @domain_id_kobj_attr: Storage for kobject attribute domain_id
* @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id
* @package_id_kobj_attr: Storage for kobject attribute package_id
+ * @elc_low_threshold_percent_kobj_attr:
+ Storage for kobject attribute elc_low_threshold_percent
+ * @elc_high_threshold_percent_kobj_attr:
+ Storage for kobject attribute elc_high_threshold_percent
+ * @elc_high_threshold_enable_kobj_attr:
+ Storage for kobject attribute elc_high_threshold_enable
+ * @elc_floor_freq_khz_kobj_attr: Storage for kobject attribute elc_floor_freq_khz
* @uncore_attrs: Attribute storage for group creation
*
* This structure is used to encapsulate all data related to uncore sysfs
@@ -61,7 +68,11 @@ struct uncore_data {
struct kobj_attribute domain_id_kobj_attr;
struct kobj_attribute fabric_cluster_id_kobj_attr;
struct kobj_attribute package_id_kobj_attr;
- struct attribute *uncore_attrs[9];
+ struct kobj_attribute elc_low_threshold_percent_kobj_attr;
+ struct kobj_attribute elc_high_threshold_percent_kobj_attr;
+ struct kobj_attribute elc_high_threshold_enable_kobj_attr;
+ struct kobj_attribute elc_floor_freq_khz_kobj_attr;
+ struct attribute *uncore_attrs[13];
};
#define UNCORE_DOMAIN_ID_INVALID -1
@@ -70,6 +81,10 @@ enum uncore_index {
UNCORE_INDEX_MIN_FREQ,
UNCORE_INDEX_MAX_FREQ,
UNCORE_INDEX_CURRENT_FREQ,
+ UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD,
+ UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD,
+ UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE,
+ UNCORE_INDEX_EFF_LAT_CTRL_FREQ,
};
int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value,
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index 9fa3037c03d1..0591053813a2 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -30,6 +30,7 @@
#define UNCORE_MAJOR_VERSION 0
#define UNCORE_MINOR_VERSION 2
+#define UNCORE_ELC_SUPPORTED_VERSION 2
#define UNCORE_HEADER_INDEX 0
#define UNCORE_FABRIC_CLUSTER_OFFSET 8
@@ -46,6 +47,7 @@ struct tpmi_uncore_struct;
/* Information for each cluster */
struct tpmi_uncore_cluster_info {
bool root_domain;
+ bool elc_supported;
u8 __iomem *cluster_base;
struct uncore_data uncore_data;
struct tpmi_uncore_struct *uncore_root;
@@ -75,6 +77,10 @@ struct tpmi_uncore_struct {
/* Bit definitions for CONTROL register */
#define UNCORE_MAX_RATIO_MASK GENMASK_ULL(14, 8)
#define UNCORE_MIN_RATIO_MASK GENMASK_ULL(21, 15)
+#define UNCORE_EFF_LAT_CTRL_RATIO_MASK GENMASK_ULL(28, 22)
+#define UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK GENMASK_ULL(38, 32)
+#define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE BIT(39)
+#define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK GENMASK_ULL(46, 40)
/* Helper function to read MMIO offset for max/min control frequency */
static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info,
@@ -89,6 +95,48 @@ static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info,
*value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
}
+/* Helper function to read efficiency latency control values over MMIO */
+static int read_eff_lat_ctrl(struct uncore_data *data, unsigned int *val, enum uncore_index index)
+{
+ struct tpmi_uncore_cluster_info *cluster_info;
+ u64 ctrl;
+
+ cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
+ if (cluster_info->root_domain)
+ return -ENODATA;
+
+ if (!cluster_info->elc_supported)
+ return -EOPNOTSUPP;
+
+ ctrl = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
+
+ switch (index) {
+ case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
+ *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, ctrl);
+ *val *= 100;
+ *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK));
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
+ *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, ctrl);
+ *val *= 100;
+ *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK));
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
+ *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, ctrl);
+ break;
+ case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
+ *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_RATIO_MASK, ctrl) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
#define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK)
/* Helper for sysfs read for max/min frequencies. Called under mutex locks */
@@ -137,6 +185,82 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *valu
return 0;
}
+/* Helper function for writing efficiency latency control values over MMIO */
+static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index)
+{
+ struct tpmi_uncore_cluster_info *cluster_info;
+ u64 control;
+
+ cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
+
+ if (cluster_info->root_domain)
+ return -ENODATA;
+
+ if (!cluster_info->elc_supported)
+ return -EOPNOTSUPP;
+
+ switch (index) {
+ case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
+ if (val > 100)
+ return -EINVAL;
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
+ if (val > 100)
+ return -EINVAL;
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
+ if (val > 1)
+ return -EINVAL;
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
+ val /= UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (val > FIELD_MAX(UNCORE_EFF_LAT_CTRL_RATIO_MASK))
+ return -EINVAL;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
+
+ switch (index) {
+ case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
+ val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK);
+ val /= 100;
+ control &= ~UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK;
+ control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, val);
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
+ val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK);
+ val /= 100;
+ control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK;
+ control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, val);
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
+ control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE;
+ control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, val);
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
+ control &= ~UNCORE_EFF_LAT_CTRL_RATIO_MASK;
+ control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_RATIO_MASK, val);
+ break;
+
+ default:
+ break;
+ }
+
+ writeq(control, cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
+
+ return 0;
+}
+
/* Helper function to write MMIO offset for max/min control frequency */
static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input,
unsigned int index)
@@ -156,7 +280,7 @@ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, un
writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX));
}
-/* Callback for sysfs write for max/min frequencies. Called under mutex locks */
+/* Helper for sysfs write for max/min frequencies. Called under mutex locks */
static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
enum uncore_index index)
{
@@ -234,6 +358,33 @@ static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncor
case UNCORE_INDEX_CURRENT_FREQ:
return uncore_read_freq(data, value);
+ case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
+ case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
+ return read_eff_lat_ctrl(data, value, index);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/* Callback for sysfs write for TPMI uncore data. Called under mutex locks. */
+static int uncore_write(struct uncore_data *data, unsigned int value, enum uncore_index index)
+{
+ switch (index) {
+ case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
+ case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
+ return write_eff_lat_ctrl(data, value, index);
+
+ case UNCORE_INDEX_MIN_FREQ:
+ case UNCORE_INDEX_MAX_FREQ:
+ return uncore_write_control_freq(data, value, index);
+
default:
break;
}
@@ -291,7 +442,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
return -EINVAL;
/* Register callbacks to uncore core */
- ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq);
+ ret = uncore_freq_common_init(uncore_read, uncore_write);
if (ret)
return ret;
@@ -409,6 +560,9 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
cluster_info->uncore_root = tpmi_uncore;
+ if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= UNCORE_ELC_SUPPORTED_VERSION)
+ cluster_info->elc_supported = true;
+
ret = uncore_freq_add_entry(&cluster_info->uncore_data, 0);
if (ret) {
cluster_info->cluster_base = NULL;
@@ -427,6 +581,9 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
auxiliary_set_drvdata(auxdev, tpmi_uncore);
+ if (topology_max_dies_per_package() > 1)
+ return 0;
+
tpmi_uncore->root_cluster.root_domain = true;
tpmi_uncore->root_cluster.uncore_root = tpmi_uncore;
@@ -450,7 +607,9 @@ static void uncore_remove(struct auxiliary_device *auxdev)
{
struct tpmi_uncore_struct *tpmi_uncore = auxiliary_get_drvdata(auxdev);
- uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data);
+ if (tpmi_uncore->root_cluster.root_domain)
+ uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data);
+
remove_cluster_entries(tpmi_uncore);
uncore_freq_common_exit();
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index 0fdfaf3a4f5c..7b5cc9993974 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -17,14 +17,13 @@
#include <linux/bits.h>
#include <linux/cleanup.h>
#include <linux/delay.h>
-#include <linux/kernel.h>
#include <linux/idr.h>
+#include <linux/intel_vsec.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
-#include "vsec.h"
-
#define PMT_XA_START 0
#define PMT_XA_MAX INT_MAX
#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
@@ -213,6 +212,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he
intel_vsec_dev->num_resources = header->num_entries;
intel_vsec_dev->quirks = info->quirks;
intel_vsec_dev->base_addr = info->base_addr;
+ intel_vsec_dev->priv_data = info->priv_data;
if (header->id == VSEC_ID_SDSI)
intel_vsec_dev->ida = &intel_vsec_sdsi_ida;
@@ -341,7 +341,7 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev,
void intel_vsec_register(struct pci_dev *pdev,
struct intel_vsec_platform_info *info)
{
- if (!pdev || !info)
+ if (!pdev || !info || !info->headers)
return;
intel_vsec_walk_header(pdev, info);
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index a68df4133403..5b16d29c93d7 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <asm/intel_scu_ipc.h>
+#include <linux/platform_data/x86/intel_scu_ipc.h>
/* IPC defines the following message types */
#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index 7d87cbd4b9c6..69b36ce41fa2 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -18,7 +18,7 @@
#include <linux/types.h>
#include <linux/uaccess.h>
-#include <asm/intel_scu_ipc.h>
+#include <linux/platform_data/x86/intel_scu_ipc.h>
static int major;
diff --git a/drivers/platform/x86/intel_scu_pcidrv.c b/drivers/platform/x86/intel_scu_pcidrv.c
index dbf0310448da..d7f72d6deb44 100644
--- a/drivers/platform/x86/intel_scu_pcidrv.c
+++ b/drivers/platform/x86/intel_scu_pcidrv.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/pci.h>
-#include <asm/intel_scu_ipc.h>
+#include <linux/platform_data/x86/intel_scu_ipc.h>
static int intel_scu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
diff --git a/drivers/platform/x86/intel_scu_pltdrv.c b/drivers/platform/x86/intel_scu_pltdrv.c
index 56ec6ae4c824..0892362acd7b 100644
--- a/drivers/platform/x86/intel_scu_pltdrv.c
+++ b/drivers/platform/x86/intel_scu_pltdrv.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <asm/intel_scu_ipc.h>
+#include <linux/platform_data/x86/intel_scu_ipc.h>
static int intel_scu_platform_probe(struct platform_device *pdev)
{
diff --git a/drivers/platform/x86/intel_scu_wdt.c b/drivers/platform/x86/intel_scu_wdt.c
index d0b6637861d3..746d47d33406 100644
--- a/drivers/platform/x86/intel_scu_wdt.c
+++ b/drivers/platform/x86/intel_scu_wdt.c
@@ -9,13 +9,14 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/intel-mid_wdt.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/io_apic.h>
#include <asm/hw_irq.h>
+#include <linux/platform_data/x86/intel-mid_wdt.h>
+
#define TANGIER_EXT_TIMER0_MSI 12
static struct platform_device wdt_dev = {
diff --git a/drivers/platform/x86/lenovo-ymc.c b/drivers/platform/x86/lenovo-ymc.c
index e0bbd6a14a89..bd9f95404c7c 100644
--- a/drivers/platform/x86/lenovo-ymc.c
+++ b/drivers/platform/x86/lenovo-ymc.c
@@ -43,6 +43,8 @@ struct lenovo_ymc_private {
};
static const struct key_entry lenovo_ymc_keymap[] = {
+ /* Ignore the uninitialized state */
+ { KE_IGNORE, 0x00 },
/* Laptop */
{ KE_SW, 0x01, { .sw = { SW_TABLET_MODE, 0 } } },
/* Tablet */
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index 9c7857842caf..4b57102c7f62 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -8,6 +8,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/dev_printk.h>
#include <linux/dmi.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
@@ -31,6 +34,26 @@ MODULE_AUTHOR("Matan Ziv-Av");
MODULE_DESCRIPTION("LG WMI Hotkey Driver");
MODULE_LICENSE("GPL");
+static bool fw_debug;
+module_param(fw_debug, bool, 0);
+MODULE_PARM_DESC(fw_debug, "Enable printing of firmware debug messages");
+
+#define LG_ADDRESS_SPACE_ID 0x8F
+
+#define LG_ADDRESS_SPACE_DEBUG_FLAG_ADR 0x00
+#define LG_ADDRESS_SPACE_FAN_MODE_ADR 0x03
+
+#define LG_ADDRESS_SPACE_DTTM_FLAG_ADR 0x20
+#define LG_ADDRESS_SPACE_CPU_TEMP_ADR 0x21
+#define LG_ADDRESS_SPACE_CPU_TRIP_LOW_ADR 0x22
+#define LG_ADDRESS_SPACE_CPU_TRIP_HIGH_ADR 0x23
+#define LG_ADDRESS_SPACE_MB_TEMP_ADR 0x24
+#define LG_ADDRESS_SPACE_MB_TRIP_LOW_ADR 0x25
+#define LG_ADDRESS_SPACE_MB_TRIP_HIGH_ADR 0x26
+
+#define LG_ADDRESS_SPACE_DEBUG_MSG_START_ADR 0x3E8
+#define LG_ADDRESS_SPACE_DEBUG_MSG_END_ADR 0x5E8
+
#define WMI_EVENT_GUID0 "E4FB94F9-7F2B-4173-AD1A-CD1D95086248"
#define WMI_EVENT_GUID1 "023B133E-49D1-4E10-B313-698220140DC2"
#define WMI_EVENT_GUID2 "37BE1AC0-C3F2-4B1F-BFBE-8FDEAF2814D6"
@@ -182,21 +205,11 @@ static union acpi_object *lg_wmbb(struct device *dev, u32 method_id, u32 arg1, u
return (union acpi_object *)buffer.pointer;
}
-static void wmi_notify(u32 value, void *context)
+static void wmi_notify(union acpi_object *obj, void *context)
{
- struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- acpi_status status;
long data = (long)context;
pr_debug("event guid %li\n", data);
- status = wmi_get_event_data(value, &response);
- if (ACPI_FAILURE(status)) {
- pr_err("Bad event status 0x%x\n", status);
- return;
- }
-
- obj = (union acpi_object *)response.pointer;
if (!obj)
return;
@@ -218,7 +231,6 @@ static void wmi_notify(u32 value, void *context)
pr_debug("Type: %i Eventcode: 0x%llx\n", obj->type,
obj->integer.value);
- kfree(response.pointer);
}
static void wmi_input_setup(void)
@@ -646,6 +658,107 @@ static struct platform_driver pf_driver = {
}
};
+static acpi_status lg_laptop_address_space_write(struct device *dev, acpi_physical_address address,
+ size_t size, u64 value)
+{
+ u8 byte;
+
+ /* Ignore any debug messages */
+ if (address >= LG_ADDRESS_SPACE_DEBUG_MSG_START_ADR &&
+ address <= LG_ADDRESS_SPACE_DEBUG_MSG_END_ADR)
+ return AE_OK;
+
+ if (size != sizeof(byte))
+ return AE_BAD_PARAMETER;
+
+ byte = value & 0xFF;
+
+ switch (address) {
+ case LG_ADDRESS_SPACE_FAN_MODE_ADR:
+ /*
+ * The fan mode field is not affected by the DTTM flag, so we
+ * have to manually check fw_debug.
+ */
+ if (fw_debug)
+ dev_dbg(dev, "Fan mode set to mode %u\n", byte);
+
+ return AE_OK;
+ case LG_ADDRESS_SPACE_CPU_TEMP_ADR:
+ dev_dbg(dev, "CPU temperature is %u °C\n", byte);
+ return AE_OK;
+ case LG_ADDRESS_SPACE_CPU_TRIP_LOW_ADR:
+ dev_dbg(dev, "CPU lower trip point set to %u °C\n", byte);
+ return AE_OK;
+ case LG_ADDRESS_SPACE_CPU_TRIP_HIGH_ADR:
+ dev_dbg(dev, "CPU higher trip point set to %u °C\n", byte);
+ return AE_OK;
+ case LG_ADDRESS_SPACE_MB_TEMP_ADR:
+ dev_dbg(dev, "Motherboard temperature is %u °C\n", byte);
+ return AE_OK;
+ case LG_ADDRESS_SPACE_MB_TRIP_LOW_ADR:
+ dev_dbg(dev, "Motherboard lower trip point set to %u °C\n", byte);
+ return AE_OK;
+ case LG_ADDRESS_SPACE_MB_TRIP_HIGH_ADR:
+ dev_dbg(dev, "Motherboard higher trip point set to %u °C\n", byte);
+ return AE_OK;
+ default:
+ dev_notice_ratelimited(dev, "Ignoring write to unknown opregion address %llu\n",
+ address);
+ return AE_OK;
+ }
+}
+
+static acpi_status lg_laptop_address_space_read(struct device *dev, acpi_physical_address address,
+ size_t size, u64 *value)
+{
+ if (size != 1)
+ return AE_BAD_PARAMETER;
+
+ switch (address) {
+ case LG_ADDRESS_SPACE_DEBUG_FLAG_ADR:
+ /* Debug messages are already printed using the standard ACPI Debug object */
+ *value = 0x00;
+ return AE_OK;
+ case LG_ADDRESS_SPACE_DTTM_FLAG_ADR:
+ *value = fw_debug;
+ return AE_OK;
+ default:
+ dev_notice_ratelimited(dev, "Attempt to read unknown opregion address %llu\n",
+ address);
+ return AE_BAD_PARAMETER;
+ }
+}
+
+static acpi_status lg_laptop_address_space_handler(u32 function, acpi_physical_address address,
+ u32 bits, u64 *value, void *handler_context,
+ void *region_context)
+{
+ struct device *dev = handler_context;
+ size_t size;
+
+ if (bits % BITS_PER_BYTE)
+ return AE_BAD_PARAMETER;
+
+ size = bits / BITS_PER_BYTE;
+
+ switch (function) {
+ case ACPI_READ:
+ return lg_laptop_address_space_read(dev, address, size, value);
+ case ACPI_WRITE:
+ return lg_laptop_address_space_write(dev, address, size, *value);
+ default:
+ return AE_BAD_PARAMETER;
+ }
+}
+
+static void lg_laptop_remove_address_space_handler(void *data)
+{
+ struct acpi_device *device = data;
+
+ acpi_remove_address_space_handler(device->handle, LG_ADDRESS_SPACE_ID,
+ &lg_laptop_address_space_handler);
+}
+
static int acpi_add(struct acpi_device *device)
{
struct platform_device_info pdev_info = {
@@ -653,6 +766,7 @@ static int acpi_add(struct acpi_device *device)
.name = PLATFORM_NAME,
.id = PLATFORM_DEVID_NONE,
};
+ acpi_status status;
int ret;
const char *product;
int year = 2017;
@@ -660,6 +774,17 @@ static int acpi_add(struct acpi_device *device)
if (pf_device)
return 0;
+ status = acpi_install_address_space_handler(device->handle, LG_ADDRESS_SPACE_ID,
+ &lg_laptop_address_space_handler,
+ NULL, &device->dev);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ ret = devm_add_action_or_reset(&device->dev, lg_laptop_remove_address_space_handler,
+ device);
+ if (ret < 0)
+ return ret;
+
ret = platform_driver_register(&pf_driver);
if (ret)
return ret;
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index fd318cdfe313..4a7ac85c4db4 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -170,20 +170,9 @@ static const struct backlight_ops msi_backlight_ops = {
.update_status = bl_set_status,
};
-static void msi_wmi_notify(u32 value, void *context)
+static void msi_wmi_notify(union acpi_object *obj, void *context)
{
- struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
struct key_entry *key;
- union acpi_object *obj;
- acpi_status status;
-
- status = wmi_get_event_data(value, &response);
- if (status != AE_OK) {
- pr_info("bad event status 0x%x\n", status);
- return;
- }
-
- obj = (union acpi_object *)response.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER) {
int eventcode = obj->integer.value;
@@ -192,7 +181,7 @@ static void msi_wmi_notify(u32 value, void *context)
eventcode);
if (!key) {
pr_info("Unknown key pressed - %x\n", eventcode);
- goto msi_wmi_notify_exit;
+ return;
}
if (event_wmi->quirk_last_pressed) {
@@ -204,7 +193,7 @@ static void msi_wmi_notify(u32 value, void *context)
pr_debug("Suppressed key event 0x%X - "
"Last press was %lld us ago\n",
key->code, ktime_to_us(diff));
- goto msi_wmi_notify_exit;
+ return;
}
last_pressed = cur;
}
@@ -221,9 +210,6 @@ static void msi_wmi_notify(u32 value, void *context)
}
} else
pr_info("Unknown event received\n");
-
-msi_wmi_notify_exit:
- kfree(response.pointer);
}
static int __init msi_wmi_backlight_setup(void)
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index cf845ee1c7b1..2bf94d0ab324 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -121,6 +121,7 @@
#include <linux/acpi.h>
#include <linux/backlight.h>
+#include <linux/bits.h>
#include <linux/ctype.h>
#include <linux/i8042.h>
#include <linux/init.h>
@@ -224,6 +225,17 @@ static const struct key_entry panasonic_keymap[] = {
{ KE_KEY, 8, { KEY_PROG1 } }, /* Change CPU boost */
{ KE_KEY, 9, { KEY_BATTERY } },
{ KE_KEY, 10, { KEY_SUSPEND } },
+ { KE_KEY, 21, { KEY_MACRO1 } },
+ { KE_KEY, 22, { KEY_MACRO2 } },
+ { KE_KEY, 24, { KEY_MACRO3 } },
+ { KE_KEY, 25, { KEY_MACRO4 } },
+ { KE_KEY, 34, { KEY_MACRO5 } },
+ { KE_KEY, 35, { KEY_MACRO6 } },
+ { KE_KEY, 36, { KEY_MACRO7 } },
+ { KE_KEY, 37, { KEY_MACRO8 } },
+ { KE_KEY, 41, { KEY_MACRO9 } },
+ { KE_KEY, 42, { KEY_MACRO10 } },
+ { KE_KEY, 43, { KEY_MACRO11 } },
{ KE_END, 0 }
};
@@ -337,7 +349,8 @@ static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc)
}
if (pcc->num_sifr < hkey->package.count) {
- pr_err("SQTY reports bad SINF length\n");
+ pr_err("SQTY reports bad SINF length SQTY: %lu SINF-pkg-count: %u\n",
+ pcc->num_sifr, hkey->package.count);
status = AE_ERROR;
goto end;
}
@@ -773,6 +786,24 @@ static DEVICE_ATTR_RW(dc_brightness);
static DEVICE_ATTR_RW(current_brightness);
static DEVICE_ATTR_RW(cdpower);
+static umode_t pcc_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+ if (attr == &dev_attr_mute.attr)
+ return (pcc->num_sifr > SINF_MUTE) ? attr->mode : 0;
+
+ if (attr == &dev_attr_eco_mode.attr)
+ return (pcc->num_sifr > SINF_ECO_MODE) ? attr->mode : 0;
+
+ if (attr == &dev_attr_current_brightness.attr)
+ return (pcc->num_sifr > SINF_CUR_BRIGHT) ? attr->mode : 0;
+
+ return attr->mode;
+}
+
static struct attribute *pcc_sysfs_entries[] = {
&dev_attr_numbatt.attr,
&dev_attr_lcdtype.attr,
@@ -787,8 +818,9 @@ static struct attribute *pcc_sysfs_entries[] = {
};
static const struct attribute_group pcc_attr_group = {
- .name = NULL, /* put in device directory */
- .attrs = pcc_sysfs_entries,
+ .name = NULL, /* put in device directory */
+ .attrs = pcc_sysfs_entries,
+ .is_visible = pcc_sysfs_is_visible,
};
@@ -810,8 +842,8 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
return;
}
- key = result & 0xf;
- updown = result & 0x80; /* 0x80 == key down; 0x00 = key up */
+ key = result & GENMASK(6, 0);
+ updown = result & BIT(7); /* 0x80 == key down; 0x00 = key up */
/* hack: some firmware sends no key down for sleep / hibernate */
if (key == 7 || key == 10) {
@@ -941,12 +973,15 @@ static int acpi_pcc_hotkey_resume(struct device *dev)
if (!pcc)
return -EINVAL;
- acpi_pcc_write_sset(pcc, SINF_MUTE, pcc->mute);
- acpi_pcc_write_sset(pcc, SINF_ECO_MODE, pcc->eco_mode);
+ if (pcc->num_sifr > SINF_MUTE)
+ acpi_pcc_write_sset(pcc, SINF_MUTE, pcc->mute);
+ if (pcc->num_sifr > SINF_ECO_MODE)
+ acpi_pcc_write_sset(pcc, SINF_ECO_MODE, pcc->eco_mode);
acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_key);
acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, pcc->ac_brightness);
acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, pcc->dc_brightness);
- acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, pcc->current_brightness);
+ if (pcc->num_sifr > SINF_CUR_BRIGHT)
+ acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, pcc->current_brightness);
return 0;
}
@@ -963,11 +998,21 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
num_sifr = acpi_pcc_get_sqty(device);
- if (num_sifr < 0 || num_sifr > 255) {
- pr_err("num_sifr out of range");
+ /*
+ * pcc->sinf is expected to at least have the AC+DC brightness entries.
+ * Accesses to higher SINF entries are checked against num_sifr.
+ */
+ if (num_sifr <= SINF_DC_CUR_BRIGHT || num_sifr > 255) {
+ pr_err("num_sifr %d out of range %d - 255\n", num_sifr, SINF_DC_CUR_BRIGHT + 1);
return -ENODEV;
}
+ /*
+ * Some DSDT-s have an off-by-one bug where the SINF package count is
+ * one higher than the SQTY reported value, allocate 1 entry extra.
+ */
+ num_sifr++;
+
pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL);
if (!pcc) {
pr_err("Couldn't allocate mem for pcc");
@@ -1020,11 +1065,14 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, 0);
pcc->sticky_key = 0;
- pcc->eco_mode = pcc->sinf[SINF_ECO_MODE];
- pcc->mute = pcc->sinf[SINF_MUTE];
pcc->ac_brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
pcc->dc_brightness = pcc->sinf[SINF_DC_CUR_BRIGHT];
- pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
+ if (pcc->num_sifr > SINF_MUTE)
+ pcc->mute = pcc->sinf[SINF_MUTE];
+ if (pcc->num_sifr > SINF_ECO_MODE)
+ pcc->eco_mode = pcc->sinf[SINF_ECO_MODE];
+ if (pcc->num_sifr > SINF_CUR_BRIGHT)
+ pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
/* add sysfs attributes */
result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group);
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 3d2f8e758369..0d3e3ca20b1b 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -14,7 +14,6 @@
#include <linux/pci.h>
#include <linux/backlight.h>
#include <linux/leds.h>
-#include <linux/fb.h>
#include <linux/dmi.h>
#include <linux/platform_device.h>
#include <linux/rfkill.h>
@@ -554,7 +553,7 @@ static int update_status(struct backlight_device *bd)
set_brightness(samsung, bd->props.brightness);
- if (bd->props.power == FB_BLANK_UNBLANK)
+ if (bd->props.power == BACKLIGHT_POWER_ON)
sabi_set_commandb(samsung, commands->set_backlight, 1);
else
sabi_set_commandb(samsung, commands->set_backlight, 0);
@@ -1189,7 +1188,7 @@ static int __init samsung_backlight_init(struct samsung_laptop *samsung)
samsung->backlight_device = bd;
samsung->backlight_device->props.brightness = read_brightness(samsung);
- samsung->backlight_device->props.power = FB_BLANK_UNBLANK;
+ samsung->backlight_device->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(samsung->backlight_device);
return 0;
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index 3be016cfe601..7c04cc9e5891 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -83,11 +83,15 @@ static int smi_get_irq(struct platform_device *pdev, struct acpi_device *adev,
static void smi_devs_unregister(struct smi *smi)
{
+#if IS_REACHABLE(CONFIG_I2C)
while (smi->i2c_num--)
i2c_unregister_device(smi->i2c_devs[smi->i2c_num]);
+#endif
- while (smi->spi_num--)
- spi_unregister_device(smi->spi_devs[smi->spi_num]);
+ if (IS_REACHABLE(CONFIG_SPI)) {
+ while (smi->spi_num--)
+ spi_unregister_device(smi->spi_devs[smi->spi_num]);
+ }
}
/**
@@ -258,9 +262,15 @@ static int smi_probe(struct platform_device *pdev)
switch (node->bus_type) {
case SMI_I2C:
- return smi_i2c_probe(pdev, smi, node->instances);
+ if (IS_REACHABLE(CONFIG_I2C))
+ return smi_i2c_probe(pdev, smi, node->instances);
+
+ return -ENODEV;
case SMI_SPI:
- return smi_spi_probe(pdev, smi, node->instances);
+ if (IS_REACHABLE(CONFIG_SPI))
+ return smi_spi_probe(pdev, smi, node->instances);
+
+ return -ENODEV;
case SMI_AUTO_DETECT:
/*
* For backwards-compatibility with the existing nodes I2C
@@ -270,10 +280,16 @@ static int smi_probe(struct platform_device *pdev)
* SpiSerialBus nodes that were previously ignored, and this
* preserves that behavior.
*/
- ret = smi_i2c_probe(pdev, smi, node->instances);
- if (ret != -ENOENT)
- return ret;
- return smi_spi_probe(pdev, smi, node->instances);
+ if (IS_REACHABLE(CONFIG_I2C)) {
+ ret = smi_i2c_probe(pdev, smi, node->instances);
+ if (ret != -ENOENT)
+ return ret;
+ }
+
+ if (IS_REACHABLE(CONFIG_SPI))
+ return smi_spi_probe(pdev, smi, node->instances);
+
+ return -ENODEV;
default:
return -EINVAL;
}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index f269ca1ff771..4c1b0553f872 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7749,6 +7749,28 @@ static struct ibm_struct volume_driver_data = {
* EC 0x2f (HFSP) might be available *for reading*, but do not use
* it for writing.
*
+ * TPACPI_FAN_RD_ACPI_FANG:
+ * ACPI FANG method: returns fan control register
+ *
+ * Takes one parameter which is 0x8100 plus the offset to EC memory
+ * address 0xf500 and returns the byte at this address.
+ *
+ * 0xf500:
+ * When the value is less than 9 automatic mode is enabled
+ * 0xf502:
+ * Contains the current fan speed from 0-100%
+ * 0xf506:
+ * Bit 7 has to be set in order to enable manual control by
+ * writing a value >= 9 to 0xf500
+ *
+ * TPACPI_FAN_WR_ACPI_FANW:
+ * ACPI FANW method: sets fan control registers
+ *
+ * Takes 0x8100 plus the offset to EC memory address 0xf500 and the
+ * value to be written there as parameters.
+ *
+ * see TPACPI_FAN_RD_ACPI_FANG
+ *
* TPACPI_FAN_WR_TPEC:
* ThinkPad EC register 0x2f (HFSP): fan control loop mode
* Supported on almost all ThinkPads
@@ -7882,6 +7904,7 @@ enum { /* Fan control constants */
enum fan_status_access_mode {
TPACPI_FAN_NONE = 0, /* No fan status or control */
TPACPI_FAN_RD_ACPI_GFAN, /* Use ACPI GFAN */
+ TPACPI_FAN_RD_ACPI_FANG, /* Use ACPI FANG */
TPACPI_FAN_RD_TPEC, /* Use ACPI EC regs 0x2f, 0x84-0x85 */
TPACPI_FAN_RD_TPEC_NS, /* Use non-standard ACPI EC regs (eg: L13 Yoga gen2 etc.) */
};
@@ -7889,6 +7912,7 @@ enum fan_status_access_mode {
enum fan_control_access_mode {
TPACPI_FAN_WR_NONE = 0, /* No fan control */
TPACPI_FAN_WR_ACPI_SFAN, /* Use ACPI SFAN */
+ TPACPI_FAN_WR_ACPI_FANW, /* Use ACPI FANW */
TPACPI_FAN_WR_TPEC, /* Use ACPI EC reg 0x2f */
TPACPI_FAN_WR_ACPI_FANS, /* Use ACPI FANS and EC reg 0x2f */
};
@@ -7922,9 +7946,13 @@ TPACPI_HANDLE(fans, ec, "FANS"); /* X31, X40, X41 */
TPACPI_HANDLE(gfan, ec, "GFAN", /* 570 */
"\\FSPD", /* 600e/x, 770e, 770x */
); /* all others */
+TPACPI_HANDLE(fang, ec, "FANG", /* E531 */
+ ); /* all others */
TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */
"JFNS", /* 770x-JL */
); /* all others */
+TPACPI_HANDLE(fanw, ec, "FANW", /* E531 */
+ ); /* all others */
/*
* Unitialized HFSP quirk: ACPI DSDT and EC fail to initialize the
@@ -8031,6 +8059,23 @@ static int fan_get_status(u8 *status)
break;
}
+ case TPACPI_FAN_RD_ACPI_FANG: {
+ /* E531 */
+ int mode, speed;
+
+ if (unlikely(!acpi_evalf(fang_handle, &mode, NULL, "dd", 0x8100)))
+ return -EIO;
+ if (unlikely(!acpi_evalf(fang_handle, &speed, NULL, "dd", 0x8102)))
+ return -EIO;
+
+ if (likely(status)) {
+ *status = speed * 7 / 100;
+ if (mode < 9)
+ *status |= TP_EC_FAN_AUTO;
+ }
+
+ break;
+ }
case TPACPI_FAN_RD_TPEC:
/* all except 570, 600e/x, 770e, 770x */
if (unlikely(!acpi_ec_read(fan_status_offset, &s)))
@@ -8145,6 +8190,17 @@ static int fan2_get_speed(unsigned int *speed)
if (speed)
*speed = lo ? FAN_RPM_CAL_CONST / lo : 0;
break;
+ case TPACPI_FAN_RD_ACPI_FANG: {
+ /* E531 */
+ int speed_tmp;
+
+ if (unlikely(!acpi_evalf(fang_handle, &speed_tmp, NULL, "dd", 0x8102)))
+ return -EIO;
+
+ if (likely(speed))
+ *speed = speed_tmp * 65535 / 100;
+ break;
+ }
default:
return -ENXIO;
@@ -8204,6 +8260,32 @@ static int fan_set_level(int level)
tp_features.fan_ctrl_status_undef = 0;
break;
+ case TPACPI_FAN_WR_ACPI_FANW:
+ if (!(level & TP_EC_FAN_AUTO) && (level < 0 || level > 7))
+ return -EINVAL;
+ if (level & TP_EC_FAN_FULLSPEED)
+ return -EINVAL;
+
+ if (level & TP_EC_FAN_AUTO) {
+ if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8106, 0x05)) {
+ return -EIO;
+ }
+ if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8100, 0x00)) {
+ return -EIO;
+ }
+ } else {
+ if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8106, 0x45)) {
+ return -EIO;
+ }
+ if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8100, 0xff)) {
+ return -EIO;
+ }
+ if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8102, level * 100 / 7)) {
+ return -EIO;
+ }
+ }
+ break;
+
default:
return -ENXIO;
}
@@ -8236,7 +8318,7 @@ static int fan_set_level_safe(int level)
static int fan_set_enable(void)
{
- u8 s;
+ u8 s = 0;
int rc;
if (!fan_control_allowed)
@@ -8282,6 +8364,19 @@ static int fan_set_enable(void)
rc = 0;
break;
+ case TPACPI_FAN_WR_ACPI_FANW:
+ if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8106, 0x05)) {
+ rc = -EIO;
+ break;
+ }
+ if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8100, 0x00)) {
+ rc = -EIO;
+ break;
+ }
+
+ rc = 0;
+ break;
+
default:
rc = -ENXIO;
}
@@ -8324,6 +8419,22 @@ static int fan_set_disable(void)
fan_control_desired_level = 0;
break;
+ case TPACPI_FAN_WR_ACPI_FANW:
+ if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8106, 0x45)) {
+ rc = -EIO;
+ break;
+ }
+ if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8100, 0xff)) {
+ rc = -EIO;
+ break;
+ }
+ if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8102, 0x00)) {
+ rc = -EIO;
+ break;
+ }
+ rc = 0;
+ break;
+
default:
rc = -ENXIO;
}
@@ -8357,6 +8468,23 @@ static int fan_set_speed(int speed)
rc = -EINVAL;
break;
+ case TPACPI_FAN_WR_ACPI_FANW:
+ if (speed >= 0 && speed <= 65535) {
+ if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8106, 0x45)) {
+ rc = -EIO;
+ break;
+ }
+ if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8100, 0xff)) {
+ rc = -EIO;
+ break;
+ }
+ if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd",
+ 0x8102, speed * 100 / 65535))
+ rc = -EIO;
+ } else
+ rc = -EINVAL;
+ break;
+
default:
rc = -ENXIO;
}
@@ -8699,6 +8827,10 @@ static int __init fan_init(struct ibm_init_struct *iibm)
TPACPI_ACPIHANDLE_INIT(gfan);
TPACPI_ACPIHANDLE_INIT(sfan);
}
+ if (tpacpi_is_lenovo()) {
+ TPACPI_ACPIHANDLE_INIT(fang);
+ TPACPI_ACPIHANDLE_INIT(fanw);
+ }
quirks = tpacpi_check_quirks(fan_quirk_table,
ARRAY_SIZE(fan_quirk_table));
@@ -8718,6 +8850,9 @@ static int __init fan_init(struct ibm_init_struct *iibm)
if (gfan_handle) {
/* 570, 600e/x, 770e, 770x */
fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
+ } else if (fang_handle) {
+ /* E531 */
+ fan_status_access_mode = TPACPI_FAN_RD_ACPI_FANG;
} else {
/* all other ThinkPads: note that even old-style
* ThinkPad ECs supports the fan control register */
@@ -8764,6 +8899,11 @@ static int __init fan_init(struct ibm_init_struct *iibm)
fan_control_access_mode = TPACPI_FAN_WR_ACPI_SFAN;
fan_control_commands |=
TPACPI_FAN_CMD_LEVEL | TPACPI_FAN_CMD_ENABLE;
+ } else if (fanw_handle) {
+ /* E531 */
+ fan_control_access_mode = TPACPI_FAN_WR_ACPI_FANW;
+ fan_control_commands |=
+ TPACPI_FAN_CMD_LEVEL | TPACPI_FAN_CMD_SPEED | TPACPI_FAN_CMD_ENABLE;
} else {
if (!gfan_handle) {
/* gfan without sfan means no fan control */
@@ -8915,6 +9055,7 @@ static int fan_read(struct seq_file *m)
case TPACPI_FAN_RD_TPEC_NS:
case TPACPI_FAN_RD_TPEC:
+ case TPACPI_FAN_RD_ACPI_FANG:
/* all except 570, 600e/x, 770e, 770x */
rc = fan_get_status_safe(&status);
if (rc)
@@ -8935,7 +9076,7 @@ static int fan_read(struct seq_file *m)
* No other levels settings available
*/
seq_printf(m, "level:\t\t%s\n", status & FAN_NS_CTRL ? "unknown" : "auto");
- } else {
+ } else if (fan_status_access_mode == TPACPI_FAN_RD_TPEC) {
if (status & TP_EC_FAN_FULLSPEED)
/* Disengaged mode takes precedence */
seq_printf(m, "level:\t\tdisengaged\n");
diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c
index 77c35529ab6f..12c46455e8dc 100644
--- a/drivers/platform/x86/toshiba-wmi.c
+++ b/drivers/platform/x86/toshiba-wmi.c
@@ -32,26 +32,13 @@ static const struct key_entry toshiba_wmi_keymap[] __initconst = {
{ KE_END, 0 }
};
-static void toshiba_wmi_notify(u32 value, void *context)
+static void toshiba_wmi_notify(union acpi_object *obj, void *context)
{
- struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- acpi_status status;
-
- status = wmi_get_event_data(value, &response);
- if (ACPI_FAILURE(status)) {
- pr_err("Bad event status 0x%x\n", status);
- return;
- }
-
- obj = (union acpi_object *)response.pointer;
if (!obj)
return;
/* TODO: Add proper checks once we have data */
pr_debug("Unknown event received, obj type %x\n", obj->type);
-
- kfree(response.pointer);
}
static const struct dmi_system_id toshiba_wmi_dmi_table[] __initconst = {
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index f74af0a689f2..0a39f68c641d 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -840,6 +840,21 @@ static const struct ts_dmi_data rwc_nanote_p8_data = {
.properties = rwc_nanote_p8_props,
};
+static const struct property_entry rwc_nanote_next_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1785),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1145),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-next.fw"),
+ { }
+};
+
+static const struct ts_dmi_data rwc_nanote_next_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = rwc_nanote_next_props,
+};
+
static const struct property_entry schneider_sct101ctm_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
@@ -1590,6 +1605,17 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* RWC NANOTE NEXT */
+ .driver_data = (void *)&rwc_nanote_next_data,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+ /* Above matches are too generic, add bios-version match */
+ DMI_MATCH(DMI_BIOS_VERSION, "S8A70R100-V005"),
+ },
+ },
+ {
/* Schneider SCT101CTM */
.driver_data = (void *)&schneider_sct101ctm_data,
.matches = {
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 1d0b2d6040d1..3cbe180c3fc0 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -166,22 +166,6 @@ static inline acpi_object_type get_param_acpi_type(const struct wmi_block *wbloc
return ACPI_TYPE_BUFFER;
}
-static acpi_status get_event_data(const struct wmi_block *wblock, struct acpi_buffer *out)
-{
- union acpi_object param = {
- .integer = {
- .type = ACPI_TYPE_INTEGER,
- .value = wblock->gblock.notify_id,
- }
- };
- struct acpi_object_list input = {
- .count = 1,
- .pointer = &param,
- };
-
- return acpi_evaluate_object(wblock->acpi_device->handle, "_WED", &input, out);
-}
-
static int wmidev_match_guid(struct device *dev, const void *data)
{
struct wmi_block *wblock = dev_to_wblock(dev);
@@ -199,23 +183,6 @@ static int wmidev_match_guid(struct device *dev, const void *data)
return 0;
}
-static int wmidev_match_notify_id(struct device *dev, const void *data)
-{
- struct wmi_block *wblock = dev_to_wblock(dev);
- const u32 *notify_id = data;
-
- /* Legacy GUID-based functions are restricted to only see
- * a single WMI device for each GUID.
- */
- if (test_bit(WMI_GUID_DUPLICATED, &wblock->flags))
- return 0;
-
- if (wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *notify_id)
- return 1;
-
- return 0;
-}
-
static const struct bus_type wmi_bus_type;
static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
@@ -235,17 +202,6 @@ static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
return dev_to_wdev(dev);
}
-static struct wmi_device *wmi_find_event_by_notify_id(const u32 notify_id)
-{
- struct device *dev;
-
- dev = bus_find_device(&wmi_bus_type, NULL, &notify_id, wmidev_match_notify_id);
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- return to_wmi_device(dev);
-}
-
static void wmi_device_put(struct wmi_device *wdev)
{
put_device(&wdev->dev);
@@ -650,35 +606,6 @@ acpi_status wmi_remove_notify_handler(const char *guid)
EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
/**
- * wmi_get_event_data - Get WMI data associated with an event (deprecated)
- *
- * @event: Event to find
- * @out: Buffer to hold event data
- *
- * Get extra data associated with an WMI event, the caller needs to free @out.
- *
- * Return: acpi_status signaling success or error.
- */
-acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
-{
- struct wmi_block *wblock;
- struct wmi_device *wdev;
- acpi_status status;
-
- wdev = wmi_find_event_by_notify_id(event);
- if (IS_ERR(wdev))
- return AE_NOT_FOUND;
-
- wblock = container_of(wdev, struct wmi_block, dev);
- status = get_event_data(wblock, out);
-
- wmi_device_put(wdev);
-
- return status;
-}
-EXPORT_SYMBOL_GPL(wmi_get_event_data);
-
-/**
* wmi_has_guid - Check if a GUID is available
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
*
@@ -1186,14 +1113,19 @@ static int parse_wdg(struct device *wmi_bus_dev, struct platform_device *pdev)
static int wmi_get_notify_data(struct wmi_block *wblock, union acpi_object **obj)
{
struct acpi_buffer data = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object param = {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = wblock->gblock.notify_id,
+ }
+ };
+ struct acpi_object_list input = {
+ .count = 1,
+ .pointer = &param,
+ };
acpi_status status;
- if (test_bit(WMI_NO_EVENT_DATA, &wblock->flags)) {
- *obj = NULL;
- return 0;
- }
-
- status = get_event_data(wblock, &data);
+ status = acpi_evaluate_object(wblock->acpi_device->handle, "_WED", &input, &data);
if (ACPI_FAILURE(status)) {
dev_warn(&wblock->dev.dev, "Failed to get event data\n");
return -EIO;
@@ -1220,47 +1152,40 @@ static void wmi_notify_driver(struct wmi_block *wblock, union acpi_object *obj)
static int wmi_notify_device(struct device *dev, void *data)
{
struct wmi_block *wblock = dev_to_wblock(dev);
- union acpi_object *obj;
+ union acpi_object *obj = NULL;
u32 *event = data;
int ret;
if (!(wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *event))
return 0;
- down_read(&wblock->notify_lock);
- /* The WMI driver notify handler conflicts with the legacy WMI handler.
- * Because of this the WMI driver notify handler takes precedence.
+ /* The ACPI WMI specification says that _WED should be
+ * evaluated every time an notification is received, even
+ * if no consumers are present.
+ *
+ * Some firmware implementations actually depend on this
+ * by using a queue for events which will fill up if the
+ * WMI driver core stops evaluating _WED due to missing
+ * WMI event consumers.
*/
- if (wblock->dev.dev.driver && wblock->driver_ready) {
+ if (!test_bit(WMI_NO_EVENT_DATA, &wblock->flags)) {
ret = wmi_get_notify_data(wblock, &obj);
- if (ret >= 0) {
- wmi_notify_driver(wblock, obj);
- kfree(obj);
- }
- } else {
- if (wblock->handler) {
- wblock->handler(*event, wblock->handler_data);
- } else {
- /* The ACPI WMI specification says that _WED should be
- * evaluated every time an notification is received, even
- * if no consumers are present.
- *
- * Some firmware implementations actually depend on this
- * by using a queue for events which will fill up if the
- * WMI driver core stops evaluating _WED due to missing
- * WMI event consumers.
- *
- * Because of this we need this seemingly useless call to
- * wmi_get_notify_data() which in turn evaluates _WED.
- */
- ret = wmi_get_notify_data(wblock, &obj);
- if (ret >= 0)
- kfree(obj);
- }
-
+ if (ret < 0)
+ return -EIO;
}
+
+ down_read(&wblock->notify_lock);
+
+ if (wblock->dev.dev.driver && wblock->driver_ready)
+ wmi_notify_driver(wblock, obj);
+
+ if (wblock->handler)
+ wblock->handler(obj, wblock->handler_data);
+
up_read(&wblock->notify_lock);
+ kfree(obj);
+
acpi_bus_generate_netlink_event("wmi", acpi_dev_name(wblock->acpi_device), *event, 0);
return -EBUSY;
diff --git a/drivers/platform/x86/x86-android-tablets/Kconfig b/drivers/platform/x86/x86-android-tablets/Kconfig
index b591419de80c..88d9e8f2ff24 100644
--- a/drivers/platform/x86/x86-android-tablets/Kconfig
+++ b/drivers/platform/x86/x86-android-tablets/Kconfig
@@ -20,4 +20,4 @@ config X86_ANDROID_TABLETS
are missing from the DSDT.
If you have a x86 Android tablet say Y or M here, for a generic x86
- distro config say M here.
+ distro configuration say M here.
diff --git a/drivers/platform/x86/x86-android-tablets/asus.c b/drivers/platform/x86/x86-android-tablets/asus.c
index 227afbb51078..07fbeab2319a 100644
--- a/drivers/platform/x86/x86-android-tablets/asus.c
+++ b/drivers/platform/x86/x86-android-tablets/asus.c
@@ -37,7 +37,7 @@ static const struct x86_gpio_button asus_me176c_tf103c_lid __initconst = {
.pin = 12,
};
-/* Asus ME176C tablets have an Android factory img with everything hardcoded */
+/* Asus ME176C tablets have an Android factory image with everything hardcoded */
static const char * const asus_me176c_accel_mount_matrix[] = {
"-1", "0", "0",
"0", "1", "0",
@@ -112,7 +112,7 @@ static const struct x86_i2c_client_info asus_me176c_i2c_clients[] __initconst =
},
.adapter_path = "\\_SB_.I2C5",
}, {
- /* kxtj21009 accel */
+ /* kxtj21009 accelerometer */
.board_info = {
.type = "kxtj21009",
.addr = 0x0f,
@@ -181,7 +181,7 @@ const struct x86_dev_info asus_me176c_info __initconst = {
.modules = bq24190_modules,
};
-/* Asus TF103C tablets have an Android factory img with everything hardcoded */
+/* Asus TF103C tablets have an Android factory image with everything hardcoded */
static const char * const asus_tf103c_accel_mount_matrix[] = {
"0", "-1", "0",
"-1", "0", "0",
@@ -280,7 +280,7 @@ static const struct x86_i2c_client_info asus_tf103c_i2c_clients[] __initconst =
},
.adapter_path = "\\_SB_.I2C5",
}, {
- /* kxtj21009 accel */
+ /* kxtj21009 accelerometer */
.board_info = {
.type = "kxtj21009",
.addr = 0x0f,
diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
index 919ef4471229..1427a9a39008 100644
--- a/drivers/platform/x86/x86-android-tablets/core.c
+++ b/drivers/platform/x86/x86-android-tablets/core.c
@@ -26,19 +26,19 @@
static struct platform_device *x86_android_tablet_device;
/*
- * This helper allows getting a gpio_desc *before* the actual device consuming
- * the GPIO has been instantiated. This function _must_ only be used to handle
- * this special case such as e.g. :
+ * This helper allows getting a GPIO descriptor *before* the actual device
+ * consuming it has been instantiated. This function MUST only be used to
+ * handle this special case such as, e.g.:
*
* 1. Getting an IRQ from a GPIO for i2c_board_info.irq which is passed to
* i2c_client_new() to instantiate i2c_client-s; or
- * 2. Calling desc_to_gpio() to get an old style GPIO number for gpio_keys
+ * 2. Calling desc_to_gpio() to get an old style GPIO number for gpio-keys
* platform_data which still uses old style GPIO numbers.
*
- * Since the consuming device has not been instatiated yet a dynamic lookup
- * is generated using the special x86_android_tablet dev for dev_id.
+ * Since the consuming device has not been instantiated yet a dynamic lookup
+ * is generated using the special x86_android_tablet device for dev_id.
*
- * For normal GPIO lookups a standard static gpiod_lookup_table _must_ be used.
+ * For normal GPIO lookups a standard static struct gpiod_lookup_table MUST be used.
*/
int x86_android_tablet_get_gpiod(const char *chip, int pin, const char *con_id,
bool active_low, enum gpiod_flags dflags,
@@ -87,7 +87,7 @@ int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data)
/*
* The DSDT may already reference the GSI in a device skipped by
* acpi_quirk_skip_i2c_client_enumeration(). Unregister the GSI
- * to avoid EBUSY errors in this case.
+ * to avoid -EBUSY errors in this case.
*/
acpi_unregister_gsi(data->index);
irq = acpi_register_gsi(NULL, data->index, data->trigger, data->polarity);
@@ -379,7 +379,7 @@ static __init int x86_android_tablet_probe(struct platform_device *pdev)
}
}
- /* + 1 to make space for (optional) gpio_keys_button pdev */
+ /* + 1 to make space for the (optional) gpio_keys_button platform device */
pdevs = kcalloc(dev_info->pdev_count + 1, sizeof(*pdevs), GFP_KERNEL);
if (!pdevs) {
x86_android_tablet_remove(pdev);
@@ -432,7 +432,7 @@ static __init int x86_android_tablet_probe(struct platform_device *pdev)
buttons[i] = dev_info->gpio_button[i].button;
buttons[i].gpio = desc_to_gpio(gpiod);
- /* Release gpiod so that gpio-keys can request it */
+ /* Release GPIO descriptor so that gpio-keys can request it */
devm_gpiod_put(&x86_android_tablet_device->dev, gpiod);
}
diff --git a/drivers/platform/x86/x86-android-tablets/dmi.c b/drivers/platform/x86/x86-android-tablets/dmi.c
index 387dd092c4dd..17f6da96aa01 100644
--- a/drivers/platform/x86/x86-android-tablets/dmi.c
+++ b/drivers/platform/x86/x86-android-tablets/dmi.c
@@ -99,17 +99,17 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
{
/* Lenovo Yoga Book X91F / X91L */
.matches = {
- /* Non exact match to match F + L versions */
+ /* Inexact match to match F + L versions */
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
},
.driver_data = (void *)&lenovo_yogabook_x91_info,
},
{
/*
- * Lenovo Yoga Tablet 2 Pro 1380F/L (13") This has more or less
- * the same BIOS as the 830F/L or 1050F/L (8" and 10") below,
- * but unlike the 8" / 10" models which share the same mainboard
- * this model has a different mainboard.
+ * Lenovo Yoga Tablet 2 Pro 1380F/L (13")
+ * This has more or less the same BIOS as the 830F/L or 1050F/L
+ * (8" and 10") below, but unlike the 8"/10" models which share
+ * the same mainboard this model has a different mainboard.
* This match for the 13" model MUST come before the 8" + 10"
* match since that one will also match the 13" model!
*/
@@ -124,8 +124,8 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
},
{
/*
- * Lenovo Yoga Tablet 2 830F/L or 1050F/L (The 8" and 10"
- * Lenovo Yoga Tablet 2 use the same mainboard)
+ * Lenovo Yoga Tablet 2 830F/L or 1050F/L
+ * The 8" and 10" Lenovo Yoga Tablet 2 use the same mainboard.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
@@ -163,7 +163,7 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
.driver_data = (void *)&nextbook_ares8_info,
},
{
- /* Nextbook Ares 8A (CHT version)*/
+ /* Nextbook Ares 8A (CHT version) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
index 74f39b658d2c..ae087f1471c1 100644
--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
+++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
@@ -59,7 +59,7 @@ static struct lp855x_platform_data lenovo_lp8557_reg_only_pdata = {
.initial_brightness = 128,
};
-/* Lenovo Yoga Book X90F / X90L's Android factory img has everything hardcoded */
+/* Lenovo Yoga Book X90F / X90L's Android factory image has everything hardcoded */
static const struct property_entry lenovo_yb1_x90_wacom_props[] = {
PROPERTY_ENTRY_U32("hid-descr-addr", 0x0001),
@@ -262,7 +262,7 @@ const struct x86_dev_info lenovo_yogabook_x90_info __initconst = {
.init = lenovo_yb1_x90_init,
};
-/* Lenovo Yoga Book X91F/L Windows tablet needs manual instantiation of the fg client */
+/* Lenovo Yoga Book X91F/L Windows tablet needs manual instantiation of the fuel-gauge client */
static const struct x86_i2c_client_info lenovo_yogabook_x91_i2c_clients[] __initconst = {
{
/* BQ27542 fuel-gauge */
@@ -281,7 +281,7 @@ const struct x86_dev_info lenovo_yogabook_x91_info __initconst = {
.i2c_client_count = ARRAY_SIZE(lenovo_yogabook_x91_i2c_clients),
};
-/* Lenovo Yoga Tablet 2 1050F/L's Android factory img has everything hardcoded */
+/* Lenovo Yoga Tablet 2 1050F/L's Android factory image has everything hardcoded */
static const struct property_entry lenovo_yoga_tab2_830_1050_bq24190_props[] = {
PROPERTY_ENTRY_STRING_ARRAY_LEN("supplied-from", tusb1211_chg_det_psy, 1),
PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_hv_4v35_battery_node),
@@ -521,9 +521,9 @@ err_put_device:
}
/*
- * These tablet's DSDT does not set acpi_gbl_reduced_hardware, so acpi_power_off
+ * These tablet's DSDT does not set acpi_gbl_reduced_hardware, so acpi_power_off()
* gets used as pm_power_off handler. This causes "poweroff" on these tablets
- * to hang hard. Requiring pressing the powerbutton for 30 seconds *twice*
+ * to hang hard. Requiring pressing the power button for 30 seconds *twice*
* followed by a normal 3 second press to recover. Avoid this by doing an EFI
* poweroff instead.
*/
@@ -546,7 +546,7 @@ static int __init lenovo_yoga_tab2_830_1050_init(struct device *dev)
if (ret)
return ret;
- /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */
+ /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off() */
lenovo_yoga_tab2_830_1050_sys_off_handler =
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_FIRMWARE + 1,
lenovo_yoga_tab2_830_1050_power_off, NULL);
@@ -742,7 +742,7 @@ static int __init lenovo_yoga_tab2_1380_init(struct device *dev)
if (ret)
return ret;
- /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */
+ /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off() */
lenovo_yoga_tab2_830_1050_sys_off_handler =
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_FIRMWARE + 1,
lenovo_yoga_tab2_830_1050_power_off, NULL);
@@ -799,7 +799,7 @@ static const struct software_node fg_bq25890_1_supply_node = {
.properties = fg_bq25890_1_supply_props,
};
-/* bq25892 charger settings for the flat lipo battery behind the screen */
+/* bq25892 charger settings for the flat LiPo battery behind the screen */
static const struct property_entry lenovo_yt3_bq25892_0_props[] = {
PROPERTY_ENTRY_STRING_ARRAY("supplied-from", lenovo_yt3_bq25892_0_suppliers),
PROPERTY_ENTRY_U32("linux,iinlim-percentage", 40),
@@ -833,7 +833,7 @@ static const struct software_node lenovo_yt3_hideep_ts_node = {
static const struct x86_i2c_client_info lenovo_yt3_i2c_clients[] __initconst = {
{
- /* bq27500 fuel-gauge for the flat lipo battery behind the screen */
+ /* bq27500 fuel-gauge for the flat LiPo battery behind the screen */
.board_info = {
.type = "bq27500",
.addr = 0x55,
@@ -842,7 +842,7 @@ static const struct x86_i2c_client_info lenovo_yt3_i2c_clients[] __initconst = {
},
.adapter_path = "\\_SB_.PCI0.I2C1",
}, {
- /* bq25892 charger for the flat lipo battery behind the screen */
+ /* bq25892 charger for the flat LiPo battery behind the screen */
.board_info = {
.type = "bq25892",
.addr = 0x6b,
@@ -859,7 +859,7 @@ static const struct x86_i2c_client_info lenovo_yt3_i2c_clients[] __initconst = {
.con_id = "bq25892_0_irq",
},
}, {
- /* bq27500 fuel-gauge for the round li-ion cells in the hinge */
+ /* bq27500 fuel-gauge for the round Li-ion cells in the hinge */
.board_info = {
.type = "bq27500",
.addr = 0x55,
diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
index eb0e55c69dfe..7db8aa58b907 100644
--- a/drivers/platform/x86/x86-android-tablets/other.c
+++ b/drivers/platform/x86/x86-android-tablets/other.c
@@ -20,7 +20,7 @@
#include "shared-psy-info.h"
#include "x86-android-tablets.h"
-/* Acer Iconia One 7 B1-750 has an Android factory img with everything hardcoded */
+/* Acer Iconia One 7 B1-750 has an Android factory image with everything hardcoded */
static const char * const acer_b1_750_mount_matrix[] = {
"-1", "0", "0",
"0", "1", "0",
@@ -98,7 +98,7 @@ const struct x86_dev_info acer_b1_750_info __initconst = {
* Advantech MICA-071
* This is a standard Windows tablet, but it has an extra "quick launch" button
* which is not described in the ACPI tables in anyway.
- * Use the x86-android-tablets infra to create a gpio-button device for this.
+ * Use the x86-android-tablets infra to create a gpio-keys device for this.
*/
static const struct x86_gpio_button advantech_mica_071_button __initconst = {
.button = {
@@ -209,7 +209,7 @@ const struct x86_dev_info chuwi_hi8_info __initconst = {
* This comes in both Windows and Android versions and even on Android
* the DSDT is mostly sane. This tablet has 2 extra general purpose buttons
* in the button row with the power + volume-buttons labeled P and F.
- * Use the x86-android-tablets infra to create a gpio-button device for these.
+ * Use the x86-android-tablets infra to create a gpio-keys device for these.
*/
static const struct x86_gpio_button cyberbook_t116_buttons[] __initconst = {
{
@@ -276,7 +276,7 @@ const struct x86_dev_info czc_p10t __initconst = {
.init = czc_p10t_init,
};
-/* Medion Lifetab S10346 tablets have an Android factory img with everything hardcoded */
+/* Medion Lifetab S10346 tablets have an Android factory image with everything hardcoded */
static const char * const medion_lifetab_s10346_accel_mount_matrix[] = {
"0", "1", "0",
"1", "0", "0",
@@ -305,7 +305,7 @@ static const struct software_node medion_lifetab_s10346_touchscreen_node = {
static const struct x86_i2c_client_info medion_lifetab_s10346_i2c_clients[] __initconst = {
{
- /* kxtj21009 accel */
+ /* kxtj21009 accelerometer */
.board_info = {
.type = "kxtj21009",
.addr = 0x0f,
@@ -359,7 +359,7 @@ const struct x86_dev_info medion_lifetab_s10346_info __initconst = {
.gpiod_lookup_tables = medion_lifetab_s10346_gpios,
};
-/* Nextbook Ares 8 (BYT) tablets have an Android factory img with everything hardcoded */
+/* Nextbook Ares 8 (BYT) tablets have an Android factory image with everything hardcoded */
static const char * const nextbook_ares8_accel_mount_matrix[] = {
"0", "-1", "0",
"-1", "0", "0",
@@ -387,7 +387,7 @@ static const struct software_node nextbook_ares8_touchscreen_node = {
static const struct x86_i2c_client_info nextbook_ares8_i2c_clients[] __initconst = {
{
- /* Freescale MMA8653FC accel */
+ /* Freescale MMA8653FC accelerometer */
.board_info = {
.type = "mma8653",
.addr = 0x1d,
@@ -428,7 +428,7 @@ const struct x86_dev_info nextbook_ares8_info __initconst = {
.gpiod_lookup_tables = nextbook_ares8_gpios,
};
-/* Nextbook Ares 8A (CHT) tablets have an Android factory img with everything hardcoded */
+/* Nextbook Ares 8A (CHT) tablets have an Android factory image with everything hardcoded */
static const char * const nextbook_ares8a_accel_mount_matrix[] = {
"1", "0", "0",
"0", "-1", "0",
@@ -446,7 +446,7 @@ static const struct software_node nextbook_ares8a_accel_node = {
static const struct x86_i2c_client_info nextbook_ares8a_i2c_clients[] __initconst = {
{
- /* Freescale MMA8653FC accel */
+ /* Freescale MMA8653FC accelerometer */
.board_info = {
.type = "mma8653",
.addr = 0x1d,
@@ -497,7 +497,7 @@ const struct x86_dev_info nextbook_ares8a_info __initconst = {
* Peaq C1010
* This is a standard Windows tablet, but it has a special Dolby button.
* This button has a WMI interface, but that is broken. Instead of trying to
- * use the broken WMI interface, instantiate a gpio_keys device for this.
+ * use the broken WMI interface, instantiate a gpio-keys device for this.
*/
static const struct x86_gpio_button peaq_c1010_button __initconst = {
.button = {
@@ -521,7 +521,7 @@ const struct x86_dev_info peaq_c1010_info __initconst = {
* Whitelabel (sold as various brands) TM800A550L tablets.
* These tablet's DSDT contains a whole bunch of bogus ACPI I2C devices
* (removed through acpi_quirk_skip_i2c_client_enumeration()) and
- * the touchscreen fwnode has the wrong GPIOs.
+ * the touchscreen firmware node has the wrong GPIOs.
*/
static const char * const whitelabel_tm800a550l_accel_mount_matrix[] = {
"-1", "0", "0",
@@ -566,7 +566,7 @@ static const struct x86_i2c_client_info whitelabel_tm800a550l_i2c_clients[] __in
.polarity = ACPI_ACTIVE_HIGH,
},
}, {
- /* kxcj91008 accel */
+ /* kxcj91008 accelerometer */
.board_info = {
.type = "kxcj91008",
.addr = 0x0f,
@@ -598,12 +598,12 @@ const struct x86_dev_info whitelabel_tm800a550l_info __initconst = {
};
/*
- * The fwnode for ktd2026 on Xaomi pad2. It composed of a RGB LED node
+ * The firmware node for ktd2026 on Xaomi pad2. It composed of a RGB LED node
* with three subnodes for each color (B/G/R). The RGB LED node is named
* "multi-led" to align with the name in the device tree.
*/
-/* main fwnode for ktd2026 */
+/* Main firmware node for ktd2026 */
static const struct software_node ktd2026_node = {
.name = "ktd2026",
};
@@ -665,12 +665,12 @@ static const struct software_node *ktd2026_node_group[] = {
};
/*
- * For the LEDs which backlight the menu / home / back capacitive buttons on
+ * For the LEDs which backlight the Menu / Home / Back capacitive buttons on
* the bottom bezel. These are attached to a TPS61158 LED controller which
* is controlled by the "pwm_soc_lpss_2" PWM output.
*/
#define XIAOMI_MIPAD2_LED_PERIOD_NS 19200
-#define XIAOMI_MIPAD2_LED_DEFAULT_DUTY 6000 /* From Android kernel */
+#define XIAOMI_MIPAD2_LED_MAX_DUTY_NS 6000 /* From Android kernel */
static struct pwm_device *xiaomi_mipad2_led_pwm;
@@ -679,7 +679,7 @@ static int xiaomi_mipad2_brightness_set(struct led_classdev *led_cdev,
{
struct pwm_state state = {
.period = XIAOMI_MIPAD2_LED_PERIOD_NS,
- .duty_cycle = val,
+ .duty_cycle = XIAOMI_MIPAD2_LED_MAX_DUTY_NS * val / LED_FULL,
/* Always set PWM enabled to avoid the pin floating */
.enabled = true,
};
@@ -701,11 +701,11 @@ static int __init xiaomi_mipad2_init(struct device *dev)
return -ENOMEM;
led_cdev->name = "mipad2:white:touch-buttons-backlight";
- led_cdev->max_brightness = XIAOMI_MIPAD2_LED_PERIOD_NS;
- /* "input-events" trigger uses blink_brightness */
- led_cdev->blink_brightness = XIAOMI_MIPAD2_LED_DEFAULT_DUTY;
+ led_cdev->max_brightness = LED_FULL;
led_cdev->default_trigger = "input-events";
led_cdev->brightness_set_blocking = xiaomi_mipad2_brightness_set;
+ /* Turn LED off during suspend */
+ led_cdev->flags = LED_CORE_SUSPENDRESUME;
ret = devm_led_classdev_register(dev, led_cdev);
if (ret)
diff --git a/drivers/platform/x86/x86-android-tablets/shared-psy-info.c b/drivers/platform/x86/x86-android-tablets/shared-psy-info.c
index d2d0aa51bc3f..a46fa15acfb1 100644
--- a/drivers/platform/x86/x86-android-tablets/shared-psy-info.c
+++ b/drivers/platform/x86/x86-android-tablets/shared-psy-info.c
@@ -39,7 +39,7 @@ const struct software_node fg_bq25890_supply_node = {
.properties = fg_bq25890_supply_props,
};
-/* LiPo HighVoltage (max 4.35V) settings used by most devs with a HV bat. */
+/* LiPo HighVoltage (max 4.35V) settings used by most devs with a HV battery */
static const struct property_entry generic_lipo_hv_4v35_battery_props[] = {
PROPERTY_ENTRY_STRING("compatible", "simple-battery"),
PROPERTY_ENTRY_STRING("device-chemistry", "lithium-ion"),
@@ -80,7 +80,7 @@ const char * const bq24190_modules[] __initconst = {
NULL
};
-/* Generic pdevs array and gpio-lookups for micro USB ID pin handling */
+/* Generic platform device array and GPIO lookup table for micro USB ID pin handling */
const struct platform_device_info int3496_pdevs[] __initconst = {
{
/* For micro USB ID pin handling */
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index 86402b9b46a3..5517e438c7b6 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -61,7 +61,7 @@ struct x86_serdev_info {
const char *ctrl_uid;
const char *ctrl_devname;
/*
- * ATM the serdev core only supports of or ACPI matching; and sofar all
+ * ATM the serdev core only supports of or ACPI matching; and so far all
* Android x86 tablets DSDTs have usable serdev nodes, but sometimes
* under the wrong controller. So we just tie the existing serdev ACPI
* node to the right controller.
diff --git a/drivers/pmdomain/amlogic/Kconfig b/drivers/pmdomain/amlogic/Kconfig
index 2108729909b5..e72b664174af 100644
--- a/drivers/pmdomain/amlogic/Kconfig
+++ b/drivers/pmdomain/amlogic/Kconfig
@@ -1,17 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "Amlogic PM Domains"
-config MESON_GX_PM_DOMAINS
- tristate "Amlogic Meson GX Power Domains driver"
- depends on ARCH_MESON || COMPILE_TEST
- depends on PM && OF
- default ARCH_MESON
- select PM_GENERIC_DOMAINS
- select PM_GENERIC_DOMAINS_OF
- help
- Say yes to expose Amlogic Meson GX Power Domains as
- Generic Power Domains.
-
config MESON_EE_PM_DOMAINS
tristate "Amlogic Meson Everything-Else Power Domains driver"
depends on ARCH_MESON || COMPILE_TEST
diff --git a/drivers/pmdomain/amlogic/Makefile b/drivers/pmdomain/amlogic/Makefile
index 3d58abd574f9..99f195f09957 100644
--- a/drivers/pmdomain/amlogic/Makefile
+++ b/drivers/pmdomain/amlogic/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o
obj-$(CONFIG_MESON_EE_PM_DOMAINS) += meson-ee-pwrc.o
obj-$(CONFIG_MESON_SECURE_PM_DOMAINS) += meson-secure-pwrc.o
diff --git a/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c b/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c
deleted file mode 100644
index 6028e91664a4..000000000000
--- a/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Copyright (c) 2017 BayLibre, SAS
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- *
- * SPDX-License-Identifier: GPL-2.0+
- */
-
-#include <linux/platform_device.h>
-#include <linux/pm_domain.h>
-#include <linux/bitfield.h>
-#include <linux/regmap.h>
-#include <linux/mfd/syscon.h>
-#include <linux/of.h>
-#include <linux/reset.h>
-#include <linux/clk.h>
-#include <linux/module.h>
-
-/* AO Offsets */
-
-#define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
-
-#define GEN_PWR_VPU_HDMI BIT(8)
-#define GEN_PWR_VPU_HDMI_ISO BIT(9)
-
-/* HHI Offsets */
-
-#define HHI_MEM_PD_REG0 (0x40 << 2)
-#define HHI_VPU_MEM_PD_REG0 (0x41 << 2)
-#define HHI_VPU_MEM_PD_REG1 (0x42 << 2)
-#define HHI_VPU_MEM_PD_REG2 (0x4d << 2)
-
-struct meson_gx_pwrc_vpu {
- struct generic_pm_domain genpd;
- struct regmap *regmap_ao;
- struct regmap *regmap_hhi;
- struct reset_control *rstc;
- struct clk *vpu_clk;
- struct clk *vapb_clk;
-};
-
-static inline
-struct meson_gx_pwrc_vpu *genpd_to_pd(struct generic_pm_domain *d)
-{
- return container_of(d, struct meson_gx_pwrc_vpu, genpd);
-}
-
-static int meson_gx_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
-{
- struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
- int i;
-
- regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
- GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO);
- udelay(20);
-
- /* Power Down Memories */
- for (i = 0; i < 32; i += 2) {
- regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
- 0x3 << i, 0x3 << i);
- udelay(5);
- }
- for (i = 0; i < 32; i += 2) {
- regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
- 0x3 << i, 0x3 << i);
- udelay(5);
- }
- for (i = 8; i < 16; i++) {
- regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
- BIT(i), BIT(i));
- udelay(5);
- }
- udelay(20);
-
- regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
- GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI);
-
- msleep(20);
-
- clk_disable_unprepare(pd->vpu_clk);
- clk_disable_unprepare(pd->vapb_clk);
-
- return 0;
-}
-
-static int meson_g12a_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
-{
- struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
- int i;
-
- regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
- GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO);
- udelay(20);
-
- /* Power Down Memories */
- for (i = 0; i < 32; i += 2) {
- regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
- 0x3 << i, 0x3 << i);
- udelay(5);
- }
- for (i = 0; i < 32; i += 2) {
- regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
- 0x3 << i, 0x3 << i);
- udelay(5);
- }
- for (i = 0; i < 32; i += 2) {
- regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2,
- 0x3 << i, 0x3 << i);
- udelay(5);
- }
- for (i = 8; i < 16; i++) {
- regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
- BIT(i), BIT(i));
- udelay(5);
- }
- udelay(20);
-
- regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
- GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI);
-
- msleep(20);
-
- clk_disable_unprepare(pd->vpu_clk);
- clk_disable_unprepare(pd->vapb_clk);
-
- return 0;
-}
-
-static int meson_gx_pwrc_vpu_setup_clk(struct meson_gx_pwrc_vpu *pd)
-{
- int ret;
-
- ret = clk_prepare_enable(pd->vpu_clk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(pd->vapb_clk);
- if (ret)
- clk_disable_unprepare(pd->vpu_clk);
-
- return ret;
-}
-
-static int meson_gx_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
-{
- struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
- int ret;
- int i;
-
- regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
- GEN_PWR_VPU_HDMI, 0);
- udelay(20);
-
- /* Power Up Memories */
- for (i = 0; i < 32; i += 2) {
- regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
- 0x3 << i, 0);
- udelay(5);
- }
-
- for (i = 0; i < 32; i += 2) {
- regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
- 0x3 << i, 0);
- udelay(5);
- }
-
- for (i = 8; i < 16; i++) {
- regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
- BIT(i), 0);
- udelay(5);
- }
- udelay(20);
-
- ret = reset_control_assert(pd->rstc);
- if (ret)
- return ret;
-
- regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
- GEN_PWR_VPU_HDMI_ISO, 0);
-
- ret = reset_control_deassert(pd->rstc);
- if (ret)
- return ret;
-
- ret = meson_gx_pwrc_vpu_setup_clk(pd);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int meson_g12a_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
-{
- struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
- int ret;
- int i;
-
- regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
- GEN_PWR_VPU_HDMI, 0);
- udelay(20);
-
- /* Power Up Memories */
- for (i = 0; i < 32; i += 2) {
- regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
- 0x3 << i, 0);
- udelay(5);
- }
-
- for (i = 0; i < 32; i += 2) {
- regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
- 0x3 << i, 0);
- udelay(5);
- }
-
- for (i = 0; i < 32; i += 2) {
- regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2,
- 0x3 << i, 0);
- udelay(5);
- }
-
- for (i = 8; i < 16; i++) {
- regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
- BIT(i), 0);
- udelay(5);
- }
- udelay(20);
-
- ret = reset_control_assert(pd->rstc);
- if (ret)
- return ret;
-
- regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
- GEN_PWR_VPU_HDMI_ISO, 0);
-
- ret = reset_control_deassert(pd->rstc);
- if (ret)
- return ret;
-
- ret = meson_gx_pwrc_vpu_setup_clk(pd);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static bool meson_gx_pwrc_vpu_get_power(struct meson_gx_pwrc_vpu *pd)
-{
- u32 reg;
-
- regmap_read(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, &reg);
-
- return (reg & GEN_PWR_VPU_HDMI);
-}
-
-static struct meson_gx_pwrc_vpu vpu_hdmi_pd = {
- .genpd = {
- .name = "vpu_hdmi",
- .power_off = meson_gx_pwrc_vpu_power_off,
- .power_on = meson_gx_pwrc_vpu_power_on,
- },
-};
-
-static struct meson_gx_pwrc_vpu vpu_hdmi_pd_g12a = {
- .genpd = {
- .name = "vpu_hdmi",
- .power_off = meson_g12a_pwrc_vpu_power_off,
- .power_on = meson_g12a_pwrc_vpu_power_on,
- },
-};
-
-static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
-{
- const struct meson_gx_pwrc_vpu *vpu_pd_match;
- struct regmap *regmap_ao, *regmap_hhi;
- struct meson_gx_pwrc_vpu *vpu_pd;
- struct device_node *parent_np;
- struct reset_control *rstc;
- struct clk *vpu_clk;
- struct clk *vapb_clk;
- bool powered_off;
- int ret;
-
- vpu_pd_match = of_device_get_match_data(&pdev->dev);
- if (!vpu_pd_match) {
- dev_err(&pdev->dev, "failed to get match data\n");
- return -ENODEV;
- }
-
- vpu_pd = devm_kzalloc(&pdev->dev, sizeof(*vpu_pd), GFP_KERNEL);
- if (!vpu_pd)
- return -ENOMEM;
-
- memcpy(vpu_pd, vpu_pd_match, sizeof(*vpu_pd));
-
- parent_np = of_get_parent(pdev->dev.of_node);
- regmap_ao = syscon_node_to_regmap(parent_np);
- of_node_put(parent_np);
- if (IS_ERR(regmap_ao)) {
- dev_err(&pdev->dev, "failed to get regmap\n");
- return PTR_ERR(regmap_ao);
- }
-
- regmap_hhi = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "amlogic,hhi-sysctrl");
- if (IS_ERR(regmap_hhi)) {
- dev_err(&pdev->dev, "failed to get HHI regmap\n");
- return PTR_ERR(regmap_hhi);
- }
-
- rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
- if (IS_ERR(rstc))
- return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
- "failed to get reset lines\n");
-
- vpu_clk = devm_clk_get(&pdev->dev, "vpu");
- if (IS_ERR(vpu_clk)) {
- dev_err(&pdev->dev, "vpu clock request failed\n");
- return PTR_ERR(vpu_clk);
- }
-
- vapb_clk = devm_clk_get(&pdev->dev, "vapb");
- if (IS_ERR(vapb_clk)) {
- dev_err(&pdev->dev, "vapb clock request failed\n");
- return PTR_ERR(vapb_clk);
- }
-
- vpu_pd->regmap_ao = regmap_ao;
- vpu_pd->regmap_hhi = regmap_hhi;
- vpu_pd->rstc = rstc;
- vpu_pd->vpu_clk = vpu_clk;
- vpu_pd->vapb_clk = vapb_clk;
-
- platform_set_drvdata(pdev, vpu_pd);
-
- powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd);
-
- /* If already powered, sync the clock states */
- if (!powered_off) {
- ret = meson_gx_pwrc_vpu_setup_clk(vpu_pd);
- if (ret)
- return ret;
- }
-
- vpu_pd->genpd.flags = GENPD_FLAG_ALWAYS_ON;
- pm_genpd_init(&vpu_pd->genpd, NULL, powered_off);
-
- return of_genpd_add_provider_simple(pdev->dev.of_node,
- &vpu_pd->genpd);
-}
-
-static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev)
-{
- struct meson_gx_pwrc_vpu *vpu_pd = platform_get_drvdata(pdev);
- bool powered_off;
-
- powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd);
- if (!powered_off)
- vpu_pd->genpd.power_off(&vpu_pd->genpd);
-}
-
-static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = {
- { .compatible = "amlogic,meson-gx-pwrc-vpu", .data = &vpu_hdmi_pd },
- {
- .compatible = "amlogic,meson-g12a-pwrc-vpu",
- .data = &vpu_hdmi_pd_g12a
- },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, meson_gx_pwrc_vpu_match_table);
-
-static struct platform_driver meson_gx_pwrc_vpu_driver = {
- .probe = meson_gx_pwrc_vpu_probe,
- .shutdown = meson_gx_pwrc_vpu_shutdown,
- .driver = {
- .name = "meson_gx_pwrc_vpu",
- .of_match_table = meson_gx_pwrc_vpu_match_table,
- },
-};
-module_platform_driver(meson_gx_pwrc_vpu_driver);
-MODULE_DESCRIPTION("Amlogic Meson GX Power Domains driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pmdomain/apple/pmgr-pwrstate.c b/drivers/pmdomain/apple/pmgr-pwrstate.c
index d62a776c89a1..9467235110f4 100644
--- a/drivers/pmdomain/apple/pmgr-pwrstate.c
+++ b/drivers/pmdomain/apple/pmgr-pwrstate.c
@@ -177,7 +177,7 @@ static int apple_pmgr_reset_status(struct reset_controller_dev *rcdev, unsigned
return !!(reg & APPLE_PMGR_RESET);
}
-const struct reset_control_ops apple_pmgr_reset_ops = {
+static const struct reset_control_ops apple_pmgr_reset_ops = {
.assert = apple_pmgr_reset_assert,
.deassert = apple_pmgr_reset_deassert,
.reset = apple_pmgr_reset_reset,
diff --git a/drivers/pmdomain/bcm/raspberrypi-power.c b/drivers/pmdomain/bcm/raspberrypi-power.c
index 06196ebfe03b..b87ea7adb7be 100644
--- a/drivers/pmdomain/bcm/raspberrypi-power.c
+++ b/drivers/pmdomain/bcm/raspberrypi-power.c
@@ -41,40 +41,46 @@ struct rpi_power_domains {
*/
struct rpi_power_domain_packet {
u32 domain;
- u32 on;
+ u32 state;
};
/*
* Asks the firmware to enable or disable power on a specific power
* domain.
*/
-static int rpi_firmware_set_power(struct rpi_power_domain *rpi_domain, bool on)
+static int rpi_firmware_set_power(struct generic_pm_domain *domain, bool on)
{
+ struct rpi_power_domain *rpi_domain =
+ container_of(domain, struct rpi_power_domain, base);
+ bool old_interface = rpi_domain->old_interface;
struct rpi_power_domain_packet packet;
+ int ret;
packet.domain = rpi_domain->domain;
- packet.on = on;
- return rpi_firmware_property(rpi_domain->fw,
- rpi_domain->old_interface ?
- RPI_FIRMWARE_SET_POWER_STATE :
- RPI_FIRMWARE_SET_DOMAIN_STATE,
- &packet, sizeof(packet));
+ packet.state = on;
+
+ ret = rpi_firmware_property(rpi_domain->fw, old_interface ?
+ RPI_FIRMWARE_SET_POWER_STATE :
+ RPI_FIRMWARE_SET_DOMAIN_STATE,
+ &packet, sizeof(packet));
+ if (ret)
+ dev_err(&domain->dev, "Failed to set %s to %u (%d)\n",
+ old_interface ? "power" : "domain", on, ret);
+ else
+ dev_dbg(&domain->dev, "Set %s to %u\n",
+ old_interface ? "power" : "domain", on);
+
+ return ret;
}
static int rpi_domain_off(struct generic_pm_domain *domain)
{
- struct rpi_power_domain *rpi_domain =
- container_of(domain, struct rpi_power_domain, base);
-
- return rpi_firmware_set_power(rpi_domain, false);
+ return rpi_firmware_set_power(domain, false);
}
static int rpi_domain_on(struct generic_pm_domain *domain)
{
- struct rpi_power_domain *rpi_domain =
- container_of(domain, struct rpi_power_domain, base);
-
- return rpi_firmware_set_power(rpi_domain, true);
+ return rpi_firmware_set_power(domain, true);
}
static void rpi_common_init_power_domain(struct rpi_power_domains *rpi_domains,
@@ -85,6 +91,7 @@ static void rpi_common_init_power_domain(struct rpi_power_domains *rpi_domains,
dom->fw = rpi_domains->fw;
dom->base.name = name;
+ dom->base.flags = GENPD_FLAG_ACTIVE_WAKEUP;
dom->base.power_on = rpi_domain_on;
dom->base.power_off = rpi_domain_off;
@@ -142,13 +149,13 @@ rpi_has_new_domain_support(struct rpi_power_domains *rpi_domains)
int ret;
packet.domain = RPI_POWER_DOMAIN_ARM;
- packet.on = ~0;
+ packet.state = ~0;
ret = rpi_firmware_property(rpi_domains->fw,
RPI_FIRMWARE_GET_DOMAIN_STATE,
&packet, sizeof(packet));
- return ret == 0 && packet.on != ~0;
+ return ret == 0 && packet.state != ~0;
}
static int rpi_power_probe(struct platform_device *pdev)
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 7a61aa88c061..5ede0f7eda09 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -117,6 +117,48 @@ static const struct genpd_lock_ops genpd_spin_ops = {
.unlock = genpd_unlock_spin,
};
+static void genpd_lock_raw_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->raw_slock)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&genpd->raw_slock, flags);
+ genpd->raw_lock_flags = flags;
+}
+
+static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd,
+ int depth)
+ __acquires(&genpd->raw_slock)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth);
+ genpd->raw_lock_flags = flags;
+}
+
+static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->raw_slock)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&genpd->raw_slock, flags);
+ genpd->raw_lock_flags = flags;
+ return 0;
+}
+
+static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd)
+ __releases(&genpd->raw_slock)
+{
+ raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags);
+}
+
+static const struct genpd_lock_ops genpd_raw_spin_ops = {
+ .lock = genpd_lock_raw_spin,
+ .lock_nested = genpd_lock_nested_raw_spin,
+ .lock_interruptible = genpd_lock_interruptible_raw_spin,
+ .unlock = genpd_unlock_raw_spin,
+};
+
#define genpd_lock(p) p->lock_ops->lock(p)
#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
@@ -1758,7 +1800,6 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
genpd_lock(genpd);
genpd_set_cpumask(genpd, gpd_data->cpu);
- dev_pm_domain_set(dev, &genpd->domain);
genpd->device_count++;
if (gd)
@@ -1767,6 +1808,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
genpd_unlock(genpd);
+ dev_pm_domain_set(dev, &genpd->domain);
out:
if (ret)
genpd_free_dev_data(dev, gpd_data);
@@ -1823,12 +1865,13 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
genpd->gd->max_off_time_changed = true;
genpd_clear_cpumask(genpd, gpd_data->cpu);
- dev_pm_domain_set(dev, NULL);
list_del_init(&pdd->list_node);
genpd_unlock(genpd);
+ dev_pm_domain_set(dev, NULL);
+
if (genpd->detach_dev)
genpd->detach_dev(genpd, dev);
@@ -2143,7 +2186,10 @@ static void genpd_free_data(struct generic_pm_domain *genpd)
static void genpd_lock_init(struct generic_pm_domain *genpd)
{
- if (genpd_is_irq_safe(genpd)) {
+ if (genpd_is_cpu_domain(genpd)) {
+ raw_spin_lock_init(&genpd->raw_slock);
+ genpd->lock_ops = &genpd_raw_spin_ops;
+ } else if (genpd_is_irq_safe(genpd)) {
spin_lock_init(&genpd->slock);
genpd->lock_ops = &genpd_spin_ops;
} else {
@@ -3181,24 +3227,25 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
else
WARN_ON(1);
- seq_printf(s, "%-25s ", p);
+ seq_printf(s, "%-26s ", p);
}
-static void mode_status_str(struct seq_file *s, struct device *dev)
+static void perf_status_str(struct seq_file *s, struct device *dev)
{
struct generic_pm_domain_data *gpd_data;
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
- seq_printf(s, "%20s", gpd_data->hw_mode ? "HW" : "SW");
+ seq_printf(s, "%-10u ", gpd_data->performance_state);
}
-static void perf_status_str(struct seq_file *s, struct device *dev)
+static void mode_status_str(struct seq_file *s, struct device *dev)
{
struct generic_pm_domain_data *gpd_data;
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
- seq_put_decimal_ull(s, "", gpd_data->performance_state);
+
+ seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW");
}
static int genpd_summary_one(struct seq_file *s,
@@ -3209,7 +3256,6 @@ static int genpd_summary_one(struct seq_file *s,
[GENPD_STATE_OFF] = "off"
};
struct pm_domain_data *pm_data;
- const char *kobj_path;
struct gpd_link *link;
char state[16];
int ret;
@@ -3226,7 +3272,7 @@ static int genpd_summary_one(struct seq_file *s,
else
snprintf(state, sizeof(state), "%s",
status_lookup[genpd->status]);
- seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
+ seq_printf(s, "%-30s %-30s %u", genpd->name, state, genpd->performance_state);
/*
* Modifications on the list require holding locks on both
@@ -3242,17 +3288,10 @@ static int genpd_summary_one(struct seq_file *s,
}
list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
- kobj_path = kobject_get_path(&pm_data->dev->kobj,
- genpd_is_irq_safe(genpd) ?
- GFP_ATOMIC : GFP_KERNEL);
- if (kobj_path == NULL)
- continue;
-
- seq_printf(s, "\n %-50s ", kobj_path);
+ seq_printf(s, "\n %-30s ", dev_name(pm_data->dev));
rtpm_status_str(s, pm_data->dev);
perf_status_str(s, pm_data->dev);
mode_status_str(s, pm_data->dev);
- kfree(kobj_path);
}
seq_puts(s, "\n");
@@ -3267,9 +3306,9 @@ static int summary_show(struct seq_file *s, void *data)
struct generic_pm_domain *genpd;
int ret = 0;
- seq_puts(s, "domain status children performance\n");
- seq_puts(s, " /device runtime status managed by\n");
- seq_puts(s, "------------------------------------------------------------------------------------------------------------\n");
+ seq_puts(s, "domain status children performance\n");
+ seq_puts(s, " /device runtime status managed by\n");
+ seq_puts(s, "------------------------------------------------------------------------------\n");
ret = mutex_lock_interruptible(&gpd_list_lock);
if (ret)
@@ -3421,23 +3460,14 @@ static int devices_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
struct pm_domain_data *pm_data;
- const char *kobj_path;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
- list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
- kobj_path = kobject_get_path(&pm_data->dev->kobj,
- genpd_is_irq_safe(genpd) ?
- GFP_ATOMIC : GFP_KERNEL);
- if (kobj_path == NULL)
- continue;
-
- seq_printf(s, "%s\n", kobj_path);
- kfree(kobj_path);
- }
+ list_for_each_entry(pm_data, &genpd->dev_list, list_node)
+ seq_printf(s, "%s\n", dev_name(pm_data->dev));
genpd_unlock(genpd);
return ret;
diff --git a/drivers/pmdomain/imx/gpc.c b/drivers/pmdomain/imx/gpc.c
index 9517cce93d8a..80a4dcc77199 100644
--- a/drivers/pmdomain/imx/gpc.c
+++ b/drivers/pmdomain/imx/gpc.c
@@ -455,7 +455,6 @@ static int imx_gpc_probe(struct platform_device *pdev)
} else {
struct imx_pm_domain *domain;
struct platform_device *pd_pdev;
- struct device_node *np;
struct clk *ipg_clk;
unsigned int ipg_rate_mhz;
int domain_index;
@@ -465,28 +464,24 @@ static int imx_gpc_probe(struct platform_device *pdev)
return PTR_ERR(ipg_clk);
ipg_rate_mhz = clk_get_rate(ipg_clk) / 1000000;
- for_each_child_of_node(pgc_node, np) {
+ for_each_child_of_node_scoped(pgc_node, np) {
ret = of_property_read_u32(np, "reg", &domain_index);
- if (ret) {
- of_node_put(np);
+ if (ret)
return ret;
- }
+
if (domain_index >= of_id_data->num_domains)
continue;
pd_pdev = platform_device_alloc("imx-pgc-power-domain",
domain_index);
- if (!pd_pdev) {
- of_node_put(np);
+ if (!pd_pdev)
return -ENOMEM;
- }
ret = platform_device_add_data(pd_pdev,
&imx_gpc_domains[domain_index],
sizeof(imx_gpc_domains[domain_index]));
if (ret) {
platform_device_put(pd_pdev);
- of_node_put(np);
return ret;
}
domain = pd_pdev->dev.platform_data;
@@ -500,7 +495,6 @@ static int imx_gpc_probe(struct platform_device *pdev)
ret = platform_device_add(pd_pdev);
if (ret) {
platform_device_put(pd_pdev);
- of_node_put(np);
return ret;
}
}
diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index 856eaac0ec14..963d61c5af6d 100644
--- a/drivers/pmdomain/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
@@ -1458,7 +1458,7 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
.max_register = SZ_4K,
};
struct device *dev = &pdev->dev;
- struct device_node *pgc_np, *np;
+ struct device_node *pgc_np;
struct regmap *regmap;
void __iomem *base;
int ret;
@@ -1480,7 +1480,7 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
return ret;
}
- for_each_child_of_node(pgc_np, np) {
+ for_each_child_of_node_scoped(pgc_np, np) {
struct platform_device *pd_pdev;
struct imx_pgc_domain *domain;
u32 domain_index;
@@ -1491,7 +1491,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "reg", &domain_index);
if (ret) {
dev_err(dev, "Failed to read 'reg' property\n");
- of_node_put(np);
return ret;
}
@@ -1506,7 +1505,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
domain_index);
if (!pd_pdev) {
dev_err(dev, "Failed to allocate platform device\n");
- of_node_put(np);
return -ENOMEM;
}
@@ -1515,7 +1513,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
sizeof(domain_data->domains[domain_index]));
if (ret) {
platform_device_put(pd_pdev);
- of_node_put(np);
return ret;
}
@@ -1532,7 +1529,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
ret = platform_device_add(pd_pdev);
if (ret) {
platform_device_put(pd_pdev);
- of_node_put(np);
return ret;
}
}
diff --git a/drivers/pmdomain/imx/imx93-pd.c b/drivers/pmdomain/imx/imx93-pd.c
index d750a7dc58d2..25ab592945bd 100644
--- a/drivers/pmdomain/imx/imx93-pd.c
+++ b/drivers/pmdomain/imx/imx93-pd.c
@@ -28,7 +28,6 @@ struct imx93_power_domain {
void __iomem *addr;
struct clk_bulk_data *clks;
int num_clks;
- bool init_off;
};
#define to_imx93_pd(_genpd) container_of(_genpd, struct imx93_power_domain, genpd)
@@ -90,9 +89,6 @@ static void imx93_pd_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- if (!domain->init_off)
- clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
-
of_genpd_del_provider(np);
pm_genpd_remove(&domain->genpd);
}
@@ -102,6 +98,7 @@ static int imx93_pd_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx93_power_domain *domain;
+ bool init_off;
int ret;
domain = devm_kzalloc(dev, sizeof(*domain), GFP_KERNEL);
@@ -121,18 +118,17 @@ static int imx93_pd_probe(struct platform_device *pdev)
domain->genpd.power_on = imx93_pd_on;
domain->dev = dev;
- domain->init_off = readl(domain->addr + MIX_FUNC_STAT_OFF) & FUNC_STAT_ISO_STAT_MASK;
+ init_off = readl(domain->addr + MIX_FUNC_STAT_OFF) & FUNC_STAT_ISO_STAT_MASK;
/* Just to sync the status of hardware */
- if (!domain->init_off) {
+ if (!init_off) {
ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
- if (ret) {
- dev_err(domain->dev, "failed to enable clocks for domain: %s\n",
- domain->genpd.name);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(domain->dev, ret,
+ "failed to enable clocks for domain: %s\n",
+ domain->genpd.name);
}
- ret = pm_genpd_init(&domain->genpd, NULL, domain->init_off);
+ ret = pm_genpd_init(&domain->genpd, NULL, init_off);
if (ret)
goto err_clk_unprepare;
@@ -148,7 +144,7 @@ err_genpd_remove:
pm_genpd_remove(&domain->genpd);
err_clk_unprepare:
- if (!domain->init_off)
+ if (!init_off)
clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
return ret;
diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c
index e274e3315fe7..88406e9ac63c 100644
--- a/drivers/pmdomain/mediatek/mtk-pm-domains.c
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c
@@ -398,12 +398,10 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
scpsys->dev->of_node = node;
pd->supply = devm_regulator_get(scpsys->dev, "domain");
scpsys->dev->of_node = root_node;
- if (IS_ERR(pd->supply)) {
- dev_err_probe(scpsys->dev, PTR_ERR(pd->supply),
+ if (IS_ERR(pd->supply))
+ return dev_err_cast_probe(scpsys->dev, pd->supply,
"%pOF: failed to get power supply.\n",
node);
- return ERR_CAST(pd->supply);
- }
}
pd->infracfg = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,infracfg");
diff --git a/drivers/pmdomain/qcom/cpr.c b/drivers/pmdomain/qcom/cpr.c
index c64e84a27cc7..e1fca65b80be 100644
--- a/drivers/pmdomain/qcom/cpr.c
+++ b/drivers/pmdomain/qcom/cpr.c
@@ -4,6 +4,7 @@
* Copyright (c) 2019, Linaro Limited
*/
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/debugfs.h>
@@ -747,9 +748,9 @@ static int cpr_set_performance_state(struct generic_pm_domain *domain,
struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
struct corner *corner, *end;
enum voltage_change_dir dir;
- int ret = 0, new_uV;
+ int ret, new_uV;
- mutex_lock(&drv->lock);
+ guard(mutex)(&drv->lock);
dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n",
__func__, state, cpr_get_cur_perf_state(drv));
@@ -760,10 +761,8 @@ static int cpr_set_performance_state(struct generic_pm_domain *domain,
*/
corner = drv->corners + state - 1;
end = &drv->corners[drv->num_corners - 1];
- if (corner > end || corner < drv->corners) {
- ret = -EINVAL;
- goto unlock;
- }
+ if (corner > end || corner < drv->corners)
+ return -EINVAL;
/* Determine direction */
if (drv->corner > corner)
@@ -783,7 +782,7 @@ static int cpr_set_performance_state(struct generic_pm_domain *domain,
ret = cpr_scale_voltage(drv, corner, new_uV, dir);
if (ret)
- goto unlock;
+ return ret;
if (cpr_is_allowed(drv)) {
cpr_irq_clr(drv);
@@ -794,10 +793,7 @@ static int cpr_set_performance_state(struct generic_pm_domain *domain,
drv->corner = corner;
-unlock:
- mutex_unlock(&drv->lock);
-
- return ret;
+ return 0;
}
static int
@@ -1040,36 +1036,30 @@ static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp)
static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref,
struct device *cpu_dev)
{
- u64 rate = 0;
- struct device_node *ref_np;
- struct device_node *desc_np;
- struct device_node *child_np = NULL;
- struct device_node *child_req_np = NULL;
+ struct device_node *ref_np __free(device_node) = NULL;
+ struct device_node *desc_np __free(device_node) =
+ dev_pm_opp_of_get_opp_desc_node(cpu_dev);
- desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!desc_np)
return 0;
ref_np = dev_pm_opp_get_of_node(ref);
if (!ref_np)
- goto out_ref;
+ return 0;
- do {
- of_node_put(child_req_np);
- child_np = of_get_next_available_child(desc_np, child_np);
- child_req_np = of_parse_phandle(child_np, "required-opps", 0);
- } while (child_np && child_req_np != ref_np);
+ for_each_available_child_of_node_scoped(desc_np, child_np) {
+ struct device_node *child_req_np __free(device_node) =
+ of_parse_phandle(child_np, "required-opps", 0);
- if (child_np && child_req_np == ref_np)
- of_property_read_u64(child_np, "opp-hz", &rate);
+ if (child_req_np == ref_np) {
+ u64 rate;
- of_node_put(child_req_np);
- of_node_put(child_np);
- of_node_put(ref_np);
-out_ref:
- of_node_put(desc_np);
+ of_property_read_u64(child_np, "opp-hz", &rate);
+ return (unsigned long) rate;
+ }
+ }
- return (unsigned long) rate;
+ return 0;
}
static int cpr_corner_init(struct cpr_drv *drv)
@@ -1443,9 +1433,9 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
{
struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
const struct acc_desc *acc_desc = drv->acc_desc;
- int ret = 0;
+ int ret;
- mutex_lock(&drv->lock);
+ guard(mutex)(&drv->lock);
dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev));
@@ -1457,7 +1447,7 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
* additional initialization when further CPUs get attached.
*/
if (drv->attached_cpu_dev)
- goto unlock;
+ return 0;
/*
* cpr_scale_voltage() requires the direction (if we are changing
@@ -1469,12 +1459,10 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
* the first time cpr_set_performance_state() is called.
*/
drv->cpu_clk = devm_clk_get(dev, NULL);
- if (IS_ERR(drv->cpu_clk)) {
- ret = PTR_ERR(drv->cpu_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(drv->dev, "could not get cpu clk: %d\n", ret);
- goto unlock;
- }
+ if (IS_ERR(drv->cpu_clk))
+ return dev_err_probe(drv->dev, PTR_ERR(drv->cpu_clk),
+ "could not get cpu clk\n");
+
drv->attached_cpu_dev = dev;
dev_dbg(drv->dev, "using cpu clk from: %s\n",
@@ -1491,42 +1479,39 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
ret = dev_pm_opp_get_opp_count(&drv->pd.dev);
if (ret < 0) {
dev_err(drv->dev, "could not get OPP count\n");
- goto unlock;
+ return ret;
}
drv->num_corners = ret;
if (drv->num_corners < 2) {
dev_err(drv->dev, "need at least 2 OPPs to use CPR\n");
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
sizeof(*drv->corners),
GFP_KERNEL);
- if (!drv->corners) {
- ret = -ENOMEM;
- goto unlock;
- }
+ if (!drv->corners)
+ return -ENOMEM;
ret = cpr_corner_init(drv);
if (ret)
- goto unlock;
+ return ret;
cpr_set_loop_allowed(drv);
ret = cpr_init_parameters(drv);
if (ret)
- goto unlock;
+ return ret;
/* Configure CPR HW but keep it disabled */
ret = cpr_config(drv);
if (ret)
- goto unlock;
+ return ret;
ret = cpr_find_initial_corner(drv);
if (ret)
- goto unlock;
+ return ret;
if (acc_desc->config)
regmap_multi_reg_write(drv->tcsr, acc_desc->config,
@@ -1541,10 +1526,7 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
dev_info(drv->dev, "driver initialized with %u OPPs\n",
drv->num_corners);
-unlock:
- mutex_unlock(&drv->lock);
-
- return ret;
+ return 0;
}
static int cpr_debug_info_show(struct seq_file *s, void *unused)
diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
index d2cb4271a1ca..65505e1e2219 100644
--- a/drivers/pmdomain/qcom/rpmhpd.c
+++ b/drivers/pmdomain/qcom/rpmhpd.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -775,9 +776,9 @@ static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
unsigned int level)
{
struct rpmhpd *pd = domain_to_rpmhpd(domain);
- int ret = 0, i;
+ int ret, i;
- mutex_lock(&rpmhpd_lock);
+ guard(mutex)(&rpmhpd_lock);
for (i = 0; i < pd->level_count; i++)
if (level <= pd->level[i])
@@ -797,14 +798,12 @@ static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
ret = rpmhpd_aggregate_corner(pd, i);
if (ret)
- goto out;
+ return ret;
}
pd->corner = i;
-out:
- mutex_unlock(&rpmhpd_lock);
- return ret;
+ return 0;
}
static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
diff --git a/drivers/pmdomain/qcom/rpmpd.c b/drivers/pmdomain/qcom/rpmpd.c
index 5e6280b4cf70..0be6b3026e3a 100644
--- a/drivers/pmdomain/qcom/rpmpd.c
+++ b/drivers/pmdomain/qcom/rpmpd.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -1024,20 +1025,17 @@ static int rpmpd_power_on(struct generic_pm_domain *domain)
int ret;
struct rpmpd *pd = domain_to_rpmpd(domain);
- mutex_lock(&rpmpd_lock);
+ guard(mutex)(&rpmpd_lock);
ret = rpmpd_send_enable(pd, true);
if (ret)
- goto out;
+ return ret;
pd->enabled = true;
if (pd->corner)
ret = rpmpd_aggregate_corner(pd);
-out:
- mutex_unlock(&rpmpd_lock);
-
return ret;
}
@@ -1060,27 +1058,21 @@ static int rpmpd_power_off(struct generic_pm_domain *domain)
static int rpmpd_set_performance(struct generic_pm_domain *domain,
unsigned int state)
{
- int ret = 0;
struct rpmpd *pd = domain_to_rpmpd(domain);
if (state > pd->max_state)
state = pd->max_state;
- mutex_lock(&rpmpd_lock);
+ guard(mutex)(&rpmpd_lock);
pd->corner = state;
/* Always send updates for vfc and vfl */
if (!pd->enabled && pd->key != cpu_to_le32(KEY_FLOOR_CORNER) &&
pd->key != cpu_to_le32(KEY_FLOOR_LEVEL))
- goto out;
+ return 0;
- ret = rpmpd_aggregate_corner(pd);
-
-out:
- mutex_unlock(&rpmpd_lock);
-
- return ret;
+ return rpmpd_aggregate_corner(pd);
}
static int rpmpd_probe(struct platform_device *pdev)
diff --git a/drivers/pmdomain/rockchip/pm-domains.c b/drivers/pmdomain/rockchip/pm-domains.c
index 9b76b62869d0..cb0f93800138 100644
--- a/drivers/pmdomain/rockchip/pm-domains.c
+++ b/drivers/pmdomain/rockchip/pm-domains.c
@@ -33,6 +33,7 @@
#include <dt-bindings/power/rk3368-power.h>
#include <dt-bindings/power/rk3399-power.h>
#include <dt-bindings/power/rk3568-power.h>
+#include <dt-bindings/power/rockchip,rk3576-power.h>
#include <dt-bindings/power/rk3588-power.h>
struct rockchip_domain_info {
@@ -45,6 +46,7 @@ struct rockchip_domain_info {
bool active_wakeup;
int pwr_w_mask;
int req_w_mask;
+ int clk_ungate_mask;
int mem_status_mask;
int repair_status_mask;
u32 pwr_offset;
@@ -62,6 +64,7 @@ struct rockchip_pmu_info {
u32 chain_status_offset;
u32 mem_status_offset;
u32 repair_status_offset;
+ u32 clk_ungate_offset;
u32 core_pwrcnt_offset;
u32 gpu_pwrcnt_offset;
@@ -144,6 +147,25 @@ struct rockchip_pmu {
.active_wakeup = wakeup, \
}
+#define DOMAIN_M_O_R_G(_name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, ack, g_mask, wakeup) \
+{ \
+ .name = _name, \
+ .pwr_offset = p_offset, \
+ .pwr_w_mask = (pwr) << 16, \
+ .pwr_mask = (pwr), \
+ .status_mask = (status), \
+ .mem_offset = m_offset, \
+ .mem_status_mask = (m_status), \
+ .repair_status_mask = (r_status), \
+ .req_offset = r_offset, \
+ .req_w_mask = (req) << 16, \
+ .req_mask = (req), \
+ .idle_mask = (idle), \
+ .clk_ungate_mask = (g_mask), \
+ .ack_mask = (ack), \
+ .active_wakeup = wakeup, \
+}
+
#define DOMAIN_RK3036(_name, req, ack, idle, wakeup) \
{ \
.name = _name, \
@@ -175,6 +197,9 @@ struct rockchip_pmu {
#define DOMAIN_RK3568(name, pwr, req, wakeup) \
DOMAIN_M(name, pwr, pwr, req, req, req, wakeup)
+#define DOMAIN_RK3576(name, p_offset, pwr, status, r_status, r_offset, req, idle, g_mask, wakeup) \
+ DOMAIN_M_O_R_G(name, p_offset, pwr, status, 0, r_status, r_status, r_offset, req, idle, idle, g_mask, wakeup)
+
/*
* Dynamic Memory Controller may need to coordinate with us -- see
* rockchip_pmu_block().
@@ -299,6 +324,26 @@ static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu)
return val;
}
+static int rockchip_pmu_ungate_clk(struct rockchip_pm_domain *pd, bool ungate)
+{
+ const struct rockchip_domain_info *pd_info = pd->info;
+ struct rockchip_pmu *pmu = pd->pmu;
+ unsigned int val;
+ int clk_ungate_w_mask = pd_info->clk_ungate_mask << 16;
+
+ if (!pd_info->clk_ungate_mask)
+ return 0;
+
+ if (!pmu->info->clk_ungate_offset)
+ return 0;
+
+ val = ungate ? (pd_info->clk_ungate_mask | clk_ungate_w_mask) :
+ clk_ungate_w_mask;
+ regmap_write(pmu->regmap, pmu->info->clk_ungate_offset, val);
+
+ return 0;
+}
+
static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
bool idle)
{
@@ -539,6 +584,8 @@ static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
return ret;
}
+ rockchip_pmu_ungate_clk(pd, true);
+
if (!power_on) {
rockchip_pmu_save_qos(pd);
@@ -555,6 +602,7 @@ static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
rockchip_pmu_restore_qos(pd);
}
+ rockchip_pmu_ungate_clk(pd, false);
clk_bulk_disable(pd->num_clks, pd->clks);
}
@@ -712,12 +760,11 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
goto err_unprepare_clocks;
}
pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);
+ of_node_put(qos_node);
if (IS_ERR(pd->qos_regmap[j])) {
error = -ENODEV;
- of_node_put(qos_node);
goto err_unprepare_clocks;
}
- of_node_put(qos_node);
}
}
@@ -800,11 +847,10 @@ static void rockchip_configure_pd_cnt(struct rockchip_pmu *pmu,
static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
struct device_node *parent)
{
- struct device_node *np;
struct generic_pm_domain *child_domain, *parent_domain;
int error;
- for_each_child_of_node(parent, np) {
+ for_each_child_of_node_scoped(parent, np) {
u32 idx;
error = of_property_read_u32(parent, "reg", &idx);
@@ -812,7 +858,7 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
dev_err(pmu->dev,
"%pOFn: failed to retrieve domain id (reg): %d\n",
parent, error);
- goto err_out;
+ return error;
}
parent_domain = pmu->genpd_data.domains[idx];
@@ -820,7 +866,7 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
if (error) {
dev_err(pmu->dev, "failed to handle node %pOFn: %d\n",
np, error);
- goto err_out;
+ return error;
}
error = of_property_read_u32(np, "reg", &idx);
@@ -828,7 +874,7 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
dev_err(pmu->dev,
"%pOFn: failed to retrieve domain id (reg): %d\n",
np, error);
- goto err_out;
+ return error;
}
child_domain = pmu->genpd_data.domains[idx];
@@ -836,7 +882,7 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
if (error) {
dev_err(pmu->dev, "%s failed to add subdomain %s: %d\n",
parent_domain->name, child_domain->name, error);
- goto err_out;
+ return error;
} else {
dev_dbg(pmu->dev, "%s add subdomain: %s\n",
parent_domain->name, child_domain->name);
@@ -846,17 +892,12 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
}
return 0;
-
-err_out:
- of_node_put(np);
- return error;
}
static int rockchip_pm_domain_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct device_node *node;
struct device *parent;
struct rockchip_pmu *pmu;
const struct rockchip_pmu_info *pmu_info;
@@ -912,14 +953,13 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
* Prevent any rockchip_pmu_block() from racing with the remainder of
* setup (clocks, register initialization).
*/
- mutex_lock(&dmc_pmu_mutex);
+ guard(mutex)(&dmc_pmu_mutex);
- for_each_available_child_of_node(np, node) {
+ for_each_available_child_of_node_scoped(np, node) {
error = rockchip_pm_add_one_domain(pmu, node);
if (error) {
dev_err(dev, "failed to handle node %pOFn: %d\n",
node, error);
- of_node_put(node);
goto err_out;
}
@@ -927,7 +967,6 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
if (error < 0) {
dev_err(dev, "failed to handle subdomain node %pOFn: %d\n",
node, error);
- of_node_put(node);
goto err_out;
}
}
@@ -947,13 +986,10 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
if (!WARN_ON_ONCE(dmc_pmu))
dmc_pmu = pmu;
- mutex_unlock(&dmc_pmu_mutex);
-
return 0;
err_out:
rockchip_pm_domain_cleanup(pmu);
- mutex_unlock(&dmc_pmu_mutex);
return error;
}
@@ -1106,6 +1142,28 @@ static const struct rockchip_domain_info rk3568_pm_domains[] = {
[RK3568_PD_PIPE] = DOMAIN_RK3568("pipe", BIT(8), BIT(11), false),
};
+static const struct rockchip_domain_info rk3576_pm_domains[] = {
+ [RK3576_PD_NPU] = DOMAIN_RK3576("npu", 0x0, BIT(0), BIT(0), 0, 0x0, 0, 0, 0, false),
+ [RK3576_PD_NVM] = DOMAIN_RK3576("nvm", 0x0, BIT(6), 0, BIT(6), 0x4, BIT(2), BIT(18), BIT(2), false),
+ [RK3576_PD_SDGMAC] = DOMAIN_RK3576("sdgmac", 0x0, BIT(7), 0, BIT(7), 0x4, BIT(1), BIT(17), 0x6, false),
+ [RK3576_PD_AUDIO] = DOMAIN_RK3576("audio", 0x0, BIT(8), 0, BIT(8), 0x4, BIT(0), BIT(16), BIT(0), false),
+ [RK3576_PD_PHP] = DOMAIN_RK3576("php", 0x0, BIT(9), 0, BIT(9), 0x0, BIT(15), BIT(15), BIT(15), false),
+ [RK3576_PD_SUBPHP] = DOMAIN_RK3576("subphp", 0x0, BIT(10), 0, BIT(10), 0x0, 0, 0, 0, false),
+ [RK3576_PD_VOP] = DOMAIN_RK3576("vop", 0x0, BIT(11), 0, BIT(11), 0x0, 0x6000, 0x6000, 0x6000, false),
+ [RK3576_PD_VO1] = DOMAIN_RK3576("vo1", 0x0, BIT(14), 0, BIT(14), 0x0, BIT(12), BIT(12), 0x7000, false),
+ [RK3576_PD_VO0] = DOMAIN_RK3576("vo0", 0x0, BIT(15), 0, BIT(15), 0x0, BIT(11), BIT(11), 0x6800, false),
+ [RK3576_PD_USB] = DOMAIN_RK3576("usb", 0x4, BIT(0), 0, BIT(16), 0x0, BIT(10), BIT(10), 0x6400, true),
+ [RK3576_PD_VI] = DOMAIN_RK3576("vi", 0x4, BIT(1), 0, BIT(17), 0x0, BIT(9), BIT(9), BIT(9), false),
+ [RK3576_PD_VEPU0] = DOMAIN_RK3576("vepu0", 0x4, BIT(2), 0, BIT(18), 0x0, BIT(7), BIT(7), 0x280, false),
+ [RK3576_PD_VEPU1] = DOMAIN_RK3576("vepu1", 0x4, BIT(3), 0, BIT(19), 0x0, BIT(8), BIT(8), BIT(8), false),
+ [RK3576_PD_VDEC] = DOMAIN_RK3576("vdec", 0x4, BIT(4), 0, BIT(20), 0x0, BIT(6), BIT(6), BIT(6), false),
+ [RK3576_PD_VPU] = DOMAIN_RK3576("vpu", 0x4, BIT(5), 0, BIT(21), 0x0, BIT(5), BIT(5), BIT(5), false),
+ [RK3576_PD_NPUTOP] = DOMAIN_RK3576("nputop", 0x4, BIT(6), 0, BIT(22), 0x0, 0x18, 0x18, 0x18, false),
+ [RK3576_PD_NPU0] = DOMAIN_RK3576("npu0", 0x4, BIT(7), 0, BIT(23), 0x0, BIT(1), BIT(1), 0x1a, false),
+ [RK3576_PD_NPU1] = DOMAIN_RK3576("npu1", 0x4, BIT(8), 0, BIT(24), 0x0, BIT(2), BIT(2), 0x1c, false),
+ [RK3576_PD_GPU] = DOMAIN_RK3576("gpu", 0x4, BIT(9), 0, BIT(25), 0x0, BIT(0), BIT(0), BIT(0), false),
+};
+
static const struct rockchip_domain_info rk3588_pm_domains[] = {
[RK3588_PD_GPU] = DOMAIN_RK3588("gpu", 0x0, BIT(0), 0, 0x0, 0, BIT(1), 0x0, BIT(0), BIT(0), false),
[RK3588_PD_NPU] = DOMAIN_RK3588("npu", 0x0, BIT(1), BIT(1), 0x0, 0, 0, 0x0, 0, 0, false),
@@ -1284,6 +1342,22 @@ static const struct rockchip_pmu_info rk3568_pmu = {
.domain_info = rk3568_pm_domains,
};
+static const struct rockchip_pmu_info rk3576_pmu = {
+ .pwr_offset = 0x210,
+ .status_offset = 0x230,
+ .chain_status_offset = 0x248,
+ .mem_status_offset = 0x250,
+ .mem_pwr_offset = 0x300,
+ .req_offset = 0x110,
+ .idle_offset = 0x128,
+ .ack_offset = 0x120,
+ .repair_status_offset = 0x570,
+ .clk_ungate_offset = 0x140,
+
+ .num_domains = ARRAY_SIZE(rk3576_pm_domains),
+ .domain_info = rk3576_pm_domains,
+};
+
static const struct rockchip_pmu_info rk3588_pmu = {
.pwr_offset = 0x14c,
.status_offset = 0x180,
@@ -1360,6 +1434,10 @@ static const struct of_device_id rockchip_pm_domain_dt_match[] = {
.data = (void *)&rk3568_pmu,
},
{
+ .compatible = "rockchip,rk3576-power-controller",
+ .data = (void *)&rk3576_pmu,
+ },
+ {
.compatible = "rockchip,rk3588-power-controller",
.data = (void *)&rk3588_pmu,
},
diff --git a/drivers/power/reset/brcmstb-reboot.c b/drivers/power/reset/brcmstb-reboot.c
index 0f2944dc9355..b9c093f6064c 100644
--- a/drivers/power/reset/brcmstb-reboot.c
+++ b/drivers/power/reset/brcmstb-reboot.c
@@ -18,9 +18,6 @@
#include <linux/smp.h>
#include <linux/mfd/syscon.h>
-#define RESET_SOURCE_ENABLE_REG 1
-#define SW_MASTER_RESET_REG 2
-
static struct regmap *regmap;
static u32 rst_src_en;
static u32 sw_mstr_rst;
@@ -32,8 +29,7 @@ struct reset_reg_mask {
static const struct reset_reg_mask *reset_masks;
-static int brcmstb_restart_handler(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int brcmstb_restart_handler(struct sys_off_data *data)
{
int rc;
u32 tmp;
@@ -62,17 +58,9 @@ static int brcmstb_restart_handler(struct notifier_block *this,
return NOTIFY_DONE;
}
- while (1)
- ;
-
return NOTIFY_DONE;
}
-static struct notifier_block brcmstb_restart_nb = {
- .notifier_call = brcmstb_restart_handler,
- .priority = 128,
-};
-
static const struct reset_reg_mask reset_bits_40nm = {
.rst_src_en_mask = BIT(0),
.sw_mstr_rst_mask = BIT(0),
@@ -83,46 +71,28 @@ static const struct reset_reg_mask reset_bits_65nm = {
.sw_mstr_rst_mask = BIT(31),
};
-static const struct of_device_id of_match[] = {
- { .compatible = "brcm,brcmstb-reboot", .data = &reset_bits_40nm },
- { .compatible = "brcm,bcm7038-reboot", .data = &reset_bits_65nm },
- {},
-};
-
static int brcmstb_reboot_probe(struct platform_device *pdev)
{
int rc;
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *of_id;
+ unsigned int args[2];
- of_id = of_match_node(of_match, np);
- if (!of_id) {
- pr_err("failed to look up compatible string\n");
+ reset_masks = device_get_match_data(&pdev->dev);
+ if (!reset_masks) {
+ pr_err("failed to get match data\n");
return -EINVAL;
}
- reset_masks = of_id->data;
- regmap = syscon_regmap_lookup_by_phandle(np, "syscon");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "syscon", ARRAY_SIZE(args), args);
if (IS_ERR(regmap)) {
pr_err("failed to get syscon phandle\n");
return -EINVAL;
}
+ rst_src_en = args[0];
+ sw_mstr_rst = args[1];
- rc = of_property_read_u32_index(np, "syscon", RESET_SOURCE_ENABLE_REG,
- &rst_src_en);
- if (rc) {
- pr_err("can't get rst_src_en offset (%d)\n", rc);
- return -EINVAL;
- }
-
- rc = of_property_read_u32_index(np, "syscon", SW_MASTER_RESET_REG,
- &sw_mstr_rst);
- if (rc) {
- pr_err("can't get sw_mstr_rst offset (%d)\n", rc);
- return -EINVAL;
- }
-
- rc = register_restart_handler(&brcmstb_restart_nb);
+ rc = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART,
+ 128, brcmstb_restart_handler, NULL);
if (rc)
dev_err(&pdev->dev,
"cannot register restart handler (err=%d)\n", rc);
@@ -130,6 +100,12 @@ static int brcmstb_reboot_probe(struct platform_device *pdev)
return rc;
}
+static const struct of_device_id of_match[] = {
+ { .compatible = "brcm,brcmstb-reboot", .data = &reset_bits_40nm },
+ { .compatible = "brcm,bcm7038-reboot", .data = &reset_bits_65nm },
+ {},
+};
+
static struct platform_driver brcmstb_reboot_driver = {
.probe = brcmstb_reboot_probe,
.driver = {
@@ -140,7 +116,6 @@ static struct platform_driver brcmstb_reboot_driver = {
static int __init brcmstb_reboot_init(void)
{
- return platform_driver_probe(&brcmstb_reboot_driver,
- brcmstb_reboot_probe);
+ return platform_driver_register(&brcmstb_reboot_driver);
}
subsys_initcall(brcmstb_reboot_init);
diff --git a/drivers/power/reset/pwr-mlxbf.c b/drivers/power/reset/pwr-mlxbf.c
index 1775b318d0ef..4f1cd1c0018c 100644
--- a/drivers/power/reset/pwr-mlxbf.c
+++ b/drivers/power/reset/pwr-mlxbf.c
@@ -18,7 +18,6 @@
struct pwr_mlxbf {
struct work_struct reboot_work;
- struct work_struct shutdown_work;
const char *hid;
};
@@ -27,22 +26,17 @@ static void pwr_mlxbf_reboot_work(struct work_struct *work)
acpi_bus_generate_netlink_event("button/reboot.*", "Reboot Button", 0x80, 1);
}
-static void pwr_mlxbf_shutdown_work(struct work_struct *work)
-{
- acpi_bus_generate_netlink_event("button/power.*", "Power Button", 0x80, 1);
-}
-
static irqreturn_t pwr_mlxbf_irq(int irq, void *ptr)
{
const char *rst_pwr_hid = "MLNXBF24";
- const char *low_pwr_hid = "MLNXBF29";
+ const char *shutdown_hid = "MLNXBF29";
struct pwr_mlxbf *priv = ptr;
if (!strncmp(priv->hid, rst_pwr_hid, 8))
schedule_work(&priv->reboot_work);
- if (!strncmp(priv->hid, low_pwr_hid, 8))
- schedule_work(&priv->shutdown_work);
+ if (!strncmp(priv->hid, shutdown_hid, 8))
+ orderly_poweroff(true);
return IRQ_HANDLED;
}
@@ -70,10 +64,6 @@ static int pwr_mlxbf_probe(struct platform_device *pdev)
if (irq < 0)
return dev_err_probe(dev, irq, "Error getting %s irq.\n", priv->hid);
- err = devm_work_autocancel(dev, &priv->shutdown_work, pwr_mlxbf_shutdown_work);
- if (err)
- return err;
-
err = devm_work_autocancel(dev, &priv->reboot_work, pwr_mlxbf_reboot_work);
if (err)
return err;
diff --git a/drivers/power/sequencing/pwrseq-qcom-wcn.c b/drivers/power/sequencing/pwrseq-qcom-wcn.c
index 700879474abf..4fa129877d7e 100644
--- a/drivers/power/sequencing/pwrseq-qcom-wcn.c
+++ b/drivers/power/sequencing/pwrseq-qcom-wcn.c
@@ -198,6 +198,13 @@ static const struct pwrseq_qcom_wcn_pdata pwrseq_qca6390_of_data = {
.gpio_enable_delay_ms = 100,
};
+static const struct pwrseq_qcom_wcn_pdata pwrseq_wcn6855_of_data = {
+ .vregs = pwrseq_qca6390_vregs,
+ .num_vregs = ARRAY_SIZE(pwrseq_qca6390_vregs),
+ .pwup_delay_ms = 50,
+ .gpio_enable_delay_ms = 5,
+};
+
static const char *const pwrseq_wcn7850_vregs[] = {
"vdd",
"vddio",
@@ -322,6 +329,10 @@ static const struct of_device_id pwrseq_qcom_wcn_of_match[] = {
.data = &pwrseq_qca6390_of_data,
},
{
+ .compatible = "qcom,wcn6855-pmu",
+ .data = &pwrseq_wcn6855_of_data,
+ },
+ {
.compatible = "qcom,wcn7850-pmu",
.data = &pwrseq_wcn7850_of_data,
},
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 270874eeb934..a71903b1bf78 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2531,7 +2531,7 @@ static struct attribute *ab8500_fg_attrs[] = {
};
ATTRIBUTE_GROUPS(ab8500_fg);
-static struct kobj_type ab8500_fg_ktype = {
+static const struct kobj_type ab8500_fg_ktype = {
.sysfs_ops = &ab8500_fg_sysfs_ops,
.default_groups = ab8500_fg_groups,
};
diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
index 6ac5c80cfda2..f71cc90fea12 100644
--- a/drivers/power/supply/axp20x_battery.c
+++ b/drivers/power/supply/axp20x_battery.c
@@ -17,6 +17,7 @@
* GNU General Public License for more details.
*/
+#include <linux/bitfield.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -32,9 +33,19 @@
#include <linux/mfd/axp20x.h>
#define AXP20X_PWR_STATUS_BAT_CHARGING BIT(2)
+#define AXP717_PWR_STATUS_MASK GENMASK(6, 5)
+#define AXP717_PWR_STATUS_BAT_STANDBY 0
+#define AXP717_PWR_STATUS_BAT_CHRG 1
+#define AXP717_PWR_STATUS_BAT_DISCHRG 2
#define AXP20X_PWR_OP_BATT_PRESENT BIT(5)
#define AXP20X_PWR_OP_BATT_ACTIVATED BIT(3)
+#define AXP717_PWR_OP_BATT_PRESENT BIT(3)
+
+#define AXP717_BATT_PMU_FAULT_MASK GENMASK(2, 0)
+#define AXP717_BATT_UVLO_2_5V BIT(2)
+#define AXP717_BATT_OVER_TEMP BIT(1)
+#define AXP717_BATT_UNDER_TEMP BIT(0)
#define AXP209_FG_PERCENT GENMASK(6, 0)
#define AXP22X_FG_VALID BIT(7)
@@ -49,20 +60,51 @@
#define AXP22X_CHRG_CTRL1_TGT_4_22V (1 << 5)
#define AXP22X_CHRG_CTRL1_TGT_4_24V (3 << 5)
+#define AXP717_CHRG_ENABLE BIT(1)
+#define AXP717_CHRG_CV_VOLT_MASK GENMASK(2, 0)
+#define AXP717_CHRG_CV_4_0V 0
+#define AXP717_CHRG_CV_4_1V 1
+#define AXP717_CHRG_CV_4_2V 2
+#define AXP717_CHRG_CV_4_35V 3
+#define AXP717_CHRG_CV_4_4V 4
+/* Values 5 and 6 reserved. */
+#define AXP717_CHRG_CV_5_0V 7
+
#define AXP813_CHRG_CTRL1_TGT_4_35V (3 << 5)
#define AXP20X_CHRG_CTRL1_TGT_CURR GENMASK(3, 0)
+#define AXP717_ICC_CHARGER_LIM_MASK GENMASK(5, 0)
+
+#define AXP717_ITERM_CHG_LIM_MASK GENMASK(3, 0)
+#define AXP717_ITERM_CC_STEP 64000
#define AXP20X_V_OFF_MASK GENMASK(2, 0)
+#define AXP717_V_OFF_MASK GENMASK(6, 4)
+
+#define AXP717_BAT_VMIN_MIN_UV 2600000
+#define AXP717_BAT_VMIN_MAX_UV 3300000
+#define AXP717_BAT_VMIN_STEP 100000
+#define AXP717_BAT_CV_MIN_UV 4000000
+#define AXP717_BAT_CV_MAX_UV 5000000
+#define AXP717_BAT_CC_MIN_UA 0
+#define AXP717_BAT_CC_MAX_UA 3008000
struct axp20x_batt_ps;
struct axp_data {
- int ccc_scale;
- int ccc_offset;
- bool has_fg_valid;
+ int ccc_scale;
+ int ccc_offset;
+ unsigned int ccc_reg;
+ unsigned int ccc_mask;
+ bool has_fg_valid;
+ const struct power_supply_desc *bat_ps_desc;
int (*get_max_voltage)(struct axp20x_batt_ps *batt, int *val);
int (*set_max_voltage)(struct axp20x_batt_ps *batt, int val);
+ int (*cfg_iio_chan)(struct platform_device *pdev,
+ struct axp20x_batt_ps *axp_batt);
+ void (*set_bat_info)(struct platform_device *pdev,
+ struct axp20x_batt_ps *axp_batt,
+ struct power_supply_battery_info *info);
};
struct axp20x_batt_ps {
@@ -135,6 +177,39 @@ static int axp22x_battery_get_max_voltage(struct axp20x_batt_ps *axp20x_batt,
return 0;
}
+static int axp717_battery_get_max_voltage(struct axp20x_batt_ps *axp20x_batt,
+ int *val)
+{
+ int ret, reg;
+
+ ret = regmap_read(axp20x_batt->regmap, AXP717_CV_CHG_SET, &reg);
+ if (ret)
+ return ret;
+
+ switch (reg & AXP717_CHRG_CV_VOLT_MASK) {
+ case AXP717_CHRG_CV_4_0V:
+ *val = 4000000;
+ return 0;
+ case AXP717_CHRG_CV_4_1V:
+ *val = 4100000;
+ return 0;
+ case AXP717_CHRG_CV_4_2V:
+ *val = 4200000;
+ return 0;
+ case AXP717_CHRG_CV_4_35V:
+ *val = 4350000;
+ return 0;
+ case AXP717_CHRG_CV_4_4V:
+ *val = 4400000;
+ return 0;
+ case AXP717_CHRG_CV_5_0V:
+ *val = 5000000;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int axp813_battery_get_max_voltage(struct axp20x_batt_ps *axp20x_batt,
int *val)
{
@@ -180,6 +255,21 @@ static int axp20x_get_constant_charge_current(struct axp20x_batt_ps *axp,
return 0;
}
+static int axp717_get_constant_charge_current(struct axp20x_batt_ps *axp,
+ int *val)
+{
+ int ret;
+
+ ret = regmap_read(axp->regmap, AXP717_ICC_CHG_SET, val);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(AXP717_ICC_CHARGER_LIM_MASK, *val) *
+ axp->data->ccc_scale;
+
+ return 0;
+}
+
static int axp20x_battery_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -303,11 +393,11 @@ static int axp20x_battery_get_prop(struct power_supply *psy,
val->intval = reg & AXP209_FG_PERCENT;
break;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
return axp20x_batt->data->get_max_voltage(axp20x_batt,
&val->intval);
- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
ret = regmap_read(axp20x_batt->regmap, AXP20X_V_OFF, &reg);
if (ret)
return ret;
@@ -332,6 +422,171 @@ static int axp20x_battery_get_prop(struct power_supply *psy,
return 0;
}
+static int axp717_battery_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy);
+ int ret = 0, reg;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = regmap_read(axp20x_batt->regmap, AXP717_ON_INDICATE,
+ &reg);
+ if (ret)
+ return ret;
+
+ val->intval = FIELD_GET(AXP717_PWR_OP_BATT_PRESENT, reg);
+ return 0;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = regmap_read(axp20x_batt->regmap, AXP717_PMU_STATUS_2,
+ &reg);
+ if (ret)
+ return ret;
+
+ switch (FIELD_GET(AXP717_PWR_STATUS_MASK, reg)) {
+ case AXP717_PWR_STATUS_BAT_STANDBY:
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ return 0;
+
+ case AXP717_PWR_STATUS_BAT_CHRG:
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ return 0;
+
+ case AXP717_PWR_STATUS_BAT_DISCHRG:
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ return 0;
+
+ default:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ return 0;
+ }
+
+ /*
+ * If a fault is detected it must also be cleared; if the
+ * condition persists it should reappear (This is an
+ * assumption, it's actually not documented). A restart was
+ * not sufficient to clear the bit in testing despite the
+ * register listed as POR.
+ */
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = regmap_read(axp20x_batt->regmap, AXP717_PMU_FAULT,
+ &reg);
+ if (ret)
+ return ret;
+
+ switch (reg & AXP717_BATT_PMU_FAULT_MASK) {
+ case AXP717_BATT_UVLO_2_5V:
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ regmap_update_bits(axp20x_batt->regmap,
+ AXP717_PMU_FAULT,
+ AXP717_BATT_UVLO_2_5V,
+ AXP717_BATT_UVLO_2_5V);
+ return 0;
+
+ case AXP717_BATT_OVER_TEMP:
+ val->intval = POWER_SUPPLY_HEALTH_HOT;
+ regmap_update_bits(axp20x_batt->regmap,
+ AXP717_PMU_FAULT,
+ AXP717_BATT_OVER_TEMP,
+ AXP717_BATT_OVER_TEMP);
+ return 0;
+
+ case AXP717_BATT_UNDER_TEMP:
+ val->intval = POWER_SUPPLY_HEALTH_COLD;
+ regmap_update_bits(axp20x_batt->regmap,
+ AXP717_PMU_FAULT,
+ AXP717_BATT_UNDER_TEMP,
+ AXP717_BATT_UNDER_TEMP);
+ return 0;
+
+ default:
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ return 0;
+ }
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ ret = axp717_get_constant_charge_current(axp20x_batt,
+ &val->intval);
+ if (ret)
+ return ret;
+ return 0;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ /*
+ * The offset of this value is currently unknown and is
+ * not documented in the datasheet. Based on
+ * observation it's assumed to be somewhere around
+ * 450ma. I will leave the value raw for now.
+ */
+ ret = iio_read_channel_processed(axp20x_batt->batt_chrg_i, &val->intval);
+ if (ret)
+ return ret;
+ /* IIO framework gives mA but Power Supply framework gives uA */
+ val->intval *= 1000;
+ return 0;
+
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = regmap_read(axp20x_batt->regmap, AXP717_ON_INDICATE,
+ &reg);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(AXP717_PWR_OP_BATT_PRESENT, reg))
+ return -ENODEV;
+
+ ret = regmap_read(axp20x_batt->regmap,
+ AXP717_BATT_PERCENT_DATA, &reg);
+ if (ret)
+ return ret;
+
+ /*
+ * Fuel Gauge data takes 7 bits but the stored value seems to be
+ * directly the raw percentage without any scaling to 7 bits.
+ */
+ val->intval = reg & AXP209_FG_PERCENT;
+ return 0;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ return axp20x_batt->data->get_max_voltage(axp20x_batt,
+ &val->intval);
+
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ ret = regmap_read(axp20x_batt->regmap,
+ AXP717_VSYS_V_POWEROFF, &reg);
+ if (ret)
+ return ret;
+
+ val->intval = AXP717_BAT_VMIN_MIN_UV + AXP717_BAT_VMIN_STEP *
+ (reg & AXP717_V_OFF_MASK);
+ return 0;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = iio_read_channel_processed(axp20x_batt->batt_v,
+ &val->intval);
+ if (ret)
+ return ret;
+
+ /* IIO framework gives mV but Power Supply framework gives uV */
+ val->intval *= 1000;
+ return 0;
+
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ ret = regmap_read(axp20x_batt->regmap,
+ AXP717_ITERM_CHG_SET, &reg);
+ if (ret)
+ return ret;
+
+ val->intval = (reg & AXP717_ITERM_CHG_LIM_MASK) * AXP717_ITERM_CC_STEP;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static int axp22x_battery_set_max_voltage(struct axp20x_batt_ps *axp20x_batt,
int val)
{
@@ -388,6 +643,35 @@ static int axp20x_battery_set_max_voltage(struct axp20x_batt_ps *axp20x_batt,
AXP20X_CHRG_CTRL1_TGT_VOLT, val);
}
+static int axp717_battery_set_max_voltage(struct axp20x_batt_ps *axp20x_batt,
+ int val)
+{
+ switch (val) {
+ case 4000000:
+ val = AXP717_CHRG_CV_4_0V;
+ break;
+
+ case 4100000:
+ val = AXP717_CHRG_CV_4_1V;
+ break;
+
+ case 4200000:
+ val = AXP717_CHRG_CV_4_2V;
+ break;
+
+ default:
+ /*
+ * AXP717 can go up to 4.35, 4.4, and 5.0 volts which
+ * seem too high for lithium batteries, so do not allow.
+ */
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(axp20x_batt->regmap,
+ AXP717_CV_CHG_SET,
+ AXP717_CHRG_CV_VOLT_MASK, val);
+}
+
static int axp20x_set_constant_charge_current(struct axp20x_batt_ps *axp_batt,
int charge_current)
{
@@ -404,6 +688,24 @@ static int axp20x_set_constant_charge_current(struct axp20x_batt_ps *axp_batt,
AXP20X_CHRG_CTRL1_TGT_CURR, charge_current);
}
+static int axp717_set_constant_charge_current(struct axp20x_batt_ps *axp,
+ int charge_current)
+{
+ int val;
+
+ if (charge_current > axp->max_ccc)
+ return -EINVAL;
+
+ if (charge_current > AXP717_BAT_CC_MAX_UA || charge_current < 0)
+ return -EINVAL;
+
+ val = (charge_current - axp->data->ccc_offset) /
+ axp->data->ccc_scale;
+
+ return regmap_update_bits(axp->regmap, AXP717_ICC_CHG_SET,
+ AXP717_ICC_CHARGER_LIM_MASK, val);
+}
+
static int axp20x_set_max_constant_charge_current(struct axp20x_batt_ps *axp,
int charge_current)
{
@@ -448,6 +750,19 @@ static int axp20x_set_voltage_min_design(struct axp20x_batt_ps *axp_batt,
AXP20X_V_OFF_MASK, val1);
}
+static int axp717_set_voltage_min_design(struct axp20x_batt_ps *axp_batt,
+ int min_voltage)
+{
+ int val1 = (min_voltage - AXP717_BAT_VMIN_MIN_UV) / AXP717_BAT_VMIN_STEP;
+
+ if (val1 < 0 || val1 > AXP717_V_OFF_MASK)
+ return -EINVAL;
+
+ return regmap_update_bits(axp_batt->regmap,
+ AXP717_VSYS_V_POWEROFF,
+ AXP717_V_OFF_MASK, val1);
+}
+
static int axp20x_battery_set_prop(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
@@ -455,10 +770,10 @@ static int axp20x_battery_set_prop(struct power_supply *psy,
struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy);
switch (psp) {
- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
return axp20x_set_voltage_min_design(axp20x_batt, val->intval);
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
return axp20x_batt->data->set_max_voltage(axp20x_batt, val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
@@ -484,6 +799,42 @@ static int axp20x_battery_set_prop(struct power_supply *psy,
}
}
+static int axp717_battery_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ return axp717_set_voltage_min_design(axp20x_batt, val->intval);
+
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ return axp20x_batt->data->set_max_voltage(axp20x_batt, val->intval);
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ return axp717_set_constant_charge_current(axp20x_batt,
+ val->intval);
+ case POWER_SUPPLY_PROP_STATUS:
+ switch (val->intval) {
+ case POWER_SUPPLY_STATUS_CHARGING:
+ return regmap_update_bits(axp20x_batt->regmap,
+ AXP717_MODULE_EN_CONTROL_2,
+ AXP717_CHRG_ENABLE,
+ AXP717_CHRG_ENABLE);
+
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ case POWER_SUPPLY_STATUS_NOT_CHARGING:
+ return regmap_update_bits(axp20x_batt->regmap,
+ AXP717_MODULE_EN_CONTROL_2,
+ AXP717_CHRG_ENABLE, 0);
+ }
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
static enum power_supply_property axp20x_battery_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
@@ -493,22 +844,45 @@ static enum power_supply_property axp20x_battery_props[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_HEALTH,
- POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
- POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
POWER_SUPPLY_PROP_CAPACITY,
};
+static enum power_supply_property axp717_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+};
+
static int axp20x_battery_prop_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
return psp == POWER_SUPPLY_PROP_STATUS ||
- psp == POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN ||
- psp == POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN ||
+ psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
+ psp == POWER_SUPPLY_PROP_VOLTAGE_MAX ||
psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT ||
psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX;
}
-static const struct power_supply_desc axp20x_batt_ps_desc = {
+static int axp717_battery_prop_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ return psp == POWER_SUPPLY_PROP_STATUS ||
+ psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
+ psp == POWER_SUPPLY_PROP_VOLTAGE_MAX ||
+ psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX;
+}
+
+static const struct power_supply_desc axp209_batt_ps_desc = {
.name = "axp20x-battery",
.type = POWER_SUPPLY_TYPE_BATTERY,
.properties = axp20x_battery_props,
@@ -518,27 +892,163 @@ static const struct power_supply_desc axp20x_batt_ps_desc = {
.set_property = axp20x_battery_set_prop,
};
+static const struct power_supply_desc axp717_batt_ps_desc = {
+ .name = "axp20x-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = axp717_battery_props,
+ .num_properties = ARRAY_SIZE(axp717_battery_props),
+ .property_is_writeable = axp717_battery_prop_writeable,
+ .get_property = axp717_battery_get_prop,
+ .set_property = axp717_battery_set_prop,
+};
+
+static int axp209_bat_cfg_iio_channels(struct platform_device *pdev,
+ struct axp20x_batt_ps *axp_batt)
+{
+ axp_batt->batt_v = devm_iio_channel_get(&pdev->dev, "batt_v");
+ if (IS_ERR(axp_batt->batt_v)) {
+ if (PTR_ERR(axp_batt->batt_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ return PTR_ERR(axp_batt->batt_v);
+ }
+
+ axp_batt->batt_chrg_i = devm_iio_channel_get(&pdev->dev,
+ "batt_chrg_i");
+ if (IS_ERR(axp_batt->batt_chrg_i)) {
+ if (PTR_ERR(axp_batt->batt_chrg_i) == -ENODEV)
+ return -EPROBE_DEFER;
+ return PTR_ERR(axp_batt->batt_chrg_i);
+ }
+
+ axp_batt->batt_dischrg_i = devm_iio_channel_get(&pdev->dev,
+ "batt_dischrg_i");
+ if (IS_ERR(axp_batt->batt_dischrg_i)) {
+ if (PTR_ERR(axp_batt->batt_dischrg_i) == -ENODEV)
+ return -EPROBE_DEFER;
+ return PTR_ERR(axp_batt->batt_dischrg_i);
+ }
+
+ return 0;
+}
+
+static int axp717_bat_cfg_iio_channels(struct platform_device *pdev,
+ struct axp20x_batt_ps *axp_batt)
+{
+ axp_batt->batt_v = devm_iio_channel_get(&pdev->dev, "batt_v");
+ if (IS_ERR(axp_batt->batt_v)) {
+ if (PTR_ERR(axp_batt->batt_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ return PTR_ERR(axp_batt->batt_v);
+ }
+
+ axp_batt->batt_chrg_i = devm_iio_channel_get(&pdev->dev,
+ "batt_chrg_i");
+ if (IS_ERR(axp_batt->batt_chrg_i)) {
+ if (PTR_ERR(axp_batt->batt_chrg_i) == -ENODEV)
+ return -EPROBE_DEFER;
+ return PTR_ERR(axp_batt->batt_chrg_i);
+ }
+
+ return 0;
+}
+
+static void axp209_set_battery_info(struct platform_device *pdev,
+ struct axp20x_batt_ps *axp_batt,
+ struct power_supply_battery_info *info)
+{
+ int vmin = info->voltage_min_design_uv;
+ int ccc = info->constant_charge_current_max_ua;
+
+ if (vmin > 0 && axp20x_set_voltage_min_design(axp_batt, vmin))
+ dev_err(&pdev->dev,
+ "couldn't set voltage_min_design\n");
+
+ /* Set max to unverified value to be able to set CCC */
+ axp_batt->max_ccc = ccc;
+
+ if (ccc <= 0 || axp20x_set_constant_charge_current(axp_batt, ccc)) {
+ dev_err(&pdev->dev,
+ "couldn't set ccc from DT: fallback to min value\n");
+ ccc = 300000;
+ axp_batt->max_ccc = ccc;
+ axp20x_set_constant_charge_current(axp_batt, ccc);
+ }
+}
+
+static void axp717_set_battery_info(struct platform_device *pdev,
+ struct axp20x_batt_ps *axp_batt,
+ struct power_supply_battery_info *info)
+{
+ int vmin = info->voltage_min_design_uv;
+ int vmax = info->voltage_max_design_uv;
+ int ccc = info->constant_charge_current_max_ua;
+ int val;
+
+ if (vmin > 0 && axp717_set_voltage_min_design(axp_batt, vmin))
+ dev_err(&pdev->dev,
+ "couldn't set voltage_min_design\n");
+
+ if (vmax > 0 && axp717_battery_set_max_voltage(axp_batt, vmax))
+ dev_err(&pdev->dev,
+ "couldn't set voltage_max_design\n");
+
+ axp717_get_constant_charge_current(axp_batt, &val);
+ axp_batt->max_ccc = ccc;
+ if (ccc <= 0 || axp717_set_constant_charge_current(axp_batt, ccc)) {
+ dev_err(&pdev->dev,
+ "couldn't set ccc from DT: current ccc is %d\n",
+ val);
+ }
+}
+
static const struct axp_data axp209_data = {
.ccc_scale = 100000,
.ccc_offset = 300000,
+ .ccc_reg = AXP20X_CHRG_CTRL1,
+ .ccc_mask = AXP20X_CHRG_CTRL1_TGT_CURR,
+ .bat_ps_desc = &axp209_batt_ps_desc,
.get_max_voltage = axp20x_battery_get_max_voltage,
.set_max_voltage = axp20x_battery_set_max_voltage,
+ .cfg_iio_chan = axp209_bat_cfg_iio_channels,
+ .set_bat_info = axp209_set_battery_info,
};
static const struct axp_data axp221_data = {
.ccc_scale = 150000,
.ccc_offset = 300000,
+ .ccc_reg = AXP20X_CHRG_CTRL1,
+ .ccc_mask = AXP20X_CHRG_CTRL1_TGT_CURR,
.has_fg_valid = true,
+ .bat_ps_desc = &axp209_batt_ps_desc,
.get_max_voltage = axp22x_battery_get_max_voltage,
.set_max_voltage = axp22x_battery_set_max_voltage,
+ .cfg_iio_chan = axp209_bat_cfg_iio_channels,
+ .set_bat_info = axp209_set_battery_info,
+};
+
+static const struct axp_data axp717_data = {
+ .ccc_scale = 64000,
+ .ccc_offset = 0,
+ .ccc_reg = AXP717_ICC_CHG_SET,
+ .ccc_mask = AXP717_ICC_CHARGER_LIM_MASK,
+ .bat_ps_desc = &axp717_batt_ps_desc,
+ .get_max_voltage = axp717_battery_get_max_voltage,
+ .set_max_voltage = axp717_battery_set_max_voltage,
+ .cfg_iio_chan = axp717_bat_cfg_iio_channels,
+ .set_bat_info = axp717_set_battery_info,
};
static const struct axp_data axp813_data = {
.ccc_scale = 200000,
.ccc_offset = 200000,
+ .ccc_reg = AXP20X_CHRG_CTRL1,
+ .ccc_mask = AXP20X_CHRG_CTRL1_TGT_CURR,
.has_fg_valid = true,
+ .bat_ps_desc = &axp209_batt_ps_desc,
.get_max_voltage = axp813_battery_get_max_voltage,
.set_max_voltage = axp20x_battery_set_max_voltage,
+ .cfg_iio_chan = axp209_bat_cfg_iio_channels,
+ .set_bat_info = axp209_set_battery_info,
};
static const struct of_device_id axp20x_battery_ps_id[] = {
@@ -549,6 +1059,9 @@ static const struct of_device_id axp20x_battery_ps_id[] = {
.compatible = "x-powers,axp221-battery-power-supply",
.data = (void *)&axp221_data,
}, {
+ .compatible = "x-powers,axp717-battery-power-supply",
+ .data = (void *)&axp717_data,
+ }, {
.compatible = "x-powers,axp813-battery-power-supply",
.data = (void *)&axp813_data,
}, { /* sentinel */ },
@@ -561,6 +1074,7 @@ static int axp20x_power_probe(struct platform_device *pdev)
struct power_supply_config psy_cfg = {};
struct power_supply_battery_info *info;
struct device *dev = &pdev->dev;
+ int ret;
if (!of_device_is_available(pdev->dev.of_node))
return -ENODEV;
@@ -572,29 +1086,6 @@ static int axp20x_power_probe(struct platform_device *pdev)
axp20x_batt->dev = &pdev->dev;
- axp20x_batt->batt_v = devm_iio_channel_get(&pdev->dev, "batt_v");
- if (IS_ERR(axp20x_batt->batt_v)) {
- if (PTR_ERR(axp20x_batt->batt_v) == -ENODEV)
- return -EPROBE_DEFER;
- return PTR_ERR(axp20x_batt->batt_v);
- }
-
- axp20x_batt->batt_chrg_i = devm_iio_channel_get(&pdev->dev,
- "batt_chrg_i");
- if (IS_ERR(axp20x_batt->batt_chrg_i)) {
- if (PTR_ERR(axp20x_batt->batt_chrg_i) == -ENODEV)
- return -EPROBE_DEFER;
- return PTR_ERR(axp20x_batt->batt_chrg_i);
- }
-
- axp20x_batt->batt_dischrg_i = devm_iio_channel_get(&pdev->dev,
- "batt_dischrg_i");
- if (IS_ERR(axp20x_batt->batt_dischrg_i)) {
- if (PTR_ERR(axp20x_batt->batt_dischrg_i) == -ENODEV)
- return -EPROBE_DEFER;
- return PTR_ERR(axp20x_batt->batt_dischrg_i);
- }
-
axp20x_batt->regmap = dev_get_regmap(pdev->dev.parent, NULL);
platform_set_drvdata(pdev, axp20x_batt);
@@ -603,8 +1094,12 @@ static int axp20x_power_probe(struct platform_device *pdev)
axp20x_batt->data = (struct axp_data *)of_device_get_match_data(dev);
+ ret = axp20x_batt->data->cfg_iio_chan(pdev, axp20x_batt);
+ if (ret)
+ return ret;
+
axp20x_batt->batt = devm_power_supply_register(&pdev->dev,
- &axp20x_batt_ps_desc,
+ axp20x_batt->data->bat_ps_desc,
&psy_cfg);
if (IS_ERR(axp20x_batt->batt)) {
dev_err(&pdev->dev, "failed to register power supply: %ld\n",
@@ -613,33 +1108,15 @@ static int axp20x_power_probe(struct platform_device *pdev)
}
if (!power_supply_get_battery_info(axp20x_batt->batt, &info)) {
- int vmin = info->voltage_min_design_uv;
- int ccc = info->constant_charge_current_max_ua;
-
- if (vmin > 0 && axp20x_set_voltage_min_design(axp20x_batt,
- vmin))
- dev_err(&pdev->dev,
- "couldn't set voltage_min_design\n");
-
- /* Set max to unverified value to be able to set CCC */
- axp20x_batt->max_ccc = ccc;
-
- if (ccc <= 0 || axp20x_set_constant_charge_current(axp20x_batt,
- ccc)) {
- dev_err(&pdev->dev,
- "couldn't set constant charge current from DT: fallback to minimum value\n");
- ccc = 300000;
- axp20x_batt->max_ccc = ccc;
- axp20x_set_constant_charge_current(axp20x_batt, ccc);
- }
+ axp20x_batt->data->set_bat_info(pdev, axp20x_batt, info);
+ power_supply_put_battery_info(axp20x_batt->batt, info);
}
/*
* Update max CCC to a valid value if battery info is present or set it
* to current register value by default.
*/
- axp20x_get_constant_charge_current(axp20x_batt,
- &axp20x_batt->max_ccc);
+ axp20x_get_constant_charge_current(axp20x_batt, &axp20x_batt->max_ccc);
return 0;
}
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index dae7e5cfc54e..2766352ab737 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -30,8 +30,13 @@
#define AXP20X_PWR_STATUS_VBUS_PRESENT BIT(5)
#define AXP20X_PWR_STATUS_VBUS_USED BIT(4)
+#define AXP717_PWR_STATUS_VBUS_GOOD BIT(5)
+
#define AXP20X_USB_STATUS_VBUS_VALID BIT(2)
+#define AXP717_PMU_FAULT_VBUS BIT(5)
+#define AXP717_PMU_FAULT_VSYS BIT(3)
+
#define AXP20X_VBUS_VHOLD_uV(b) (4000000 + (((b) >> 3) & 7) * 100000)
#define AXP20X_VBUS_VHOLD_MASK GENMASK(5, 3)
#define AXP20X_VBUS_VHOLD_OFFSET 3
@@ -39,12 +44,20 @@
#define AXP20X_ADC_EN1_VBUS_CURR BIT(2)
#define AXP20X_ADC_EN1_VBUS_VOLT BIT(3)
+#define AXP717_INPUT_VOL_LIMIT_MASK GENMASK(3, 0)
+#define AXP717_INPUT_CUR_LIMIT_MASK GENMASK(5, 0)
+#define AXP717_ADC_DATA_MASK GENMASK(14, 0)
+
+#define AXP717_ADC_EN_VBUS_VOLT BIT(2)
+
/*
* Note do not raise the debounce time, we must report Vusb high within
* 100ms otherwise we get Vbus errors in musb.
*/
#define DEBOUNCE_TIME msecs_to_jiffies(50)
+struct axp20x_usb_power;
+
struct axp_data {
const struct power_supply_desc *power_desc;
const char * const *irq_names;
@@ -58,6 +71,10 @@ struct axp_data {
struct reg_field usb_bc_det_fld;
struct reg_field vbus_disable_bit;
bool vbus_needs_polling: 1;
+ void (*axp20x_read_vbus)(struct work_struct *work);
+ int (*axp20x_cfg_iio_chan)(struct platform_device *pdev,
+ struct axp20x_usb_power *power);
+ int (*axp20x_cfg_adc_reg)(struct axp20x_usb_power *power);
};
struct axp20x_usb_power {
@@ -74,6 +91,7 @@ struct axp20x_usb_power {
struct iio_channel *vbus_v;
struct iio_channel *vbus_i;
struct delayed_work vbus_detect;
+ int max_input_cur;
unsigned int old_status;
unsigned int online;
unsigned int num_irqs;
@@ -136,6 +154,24 @@ out:
mod_delayed_work(system_power_efficient_wq, &power->vbus_detect, DEBOUNCE_TIME);
}
+static void axp717_usb_power_poll_vbus(struct work_struct *work)
+{
+ struct axp20x_usb_power *power =
+ container_of(work, struct axp20x_usb_power, vbus_detect.work);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(power->regmap, AXP717_ON_INDICATE, &val);
+ if (ret)
+ return;
+
+ val &= AXP717_PWR_STATUS_VBUS_GOOD;
+ if (val != power->old_status)
+ power_supply_changed(power->supply);
+
+ power->old_status = val;
+}
+
static int axp20x_get_usb_type(struct axp20x_usb_power *power,
union power_supply_propval *val)
{
@@ -281,6 +317,91 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
return 0;
}
+static int axp717_usb_power_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct axp20x_usb_power *power = power_supply_get_drvdata(psy);
+ unsigned int v;
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ ret = regmap_read(power->regmap, AXP717_ON_INDICATE, &v);
+ if (ret)
+ return ret;
+
+ if (!(v & AXP717_PWR_STATUS_VBUS_GOOD))
+ val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+
+ ret = regmap_read(power->regmap, AXP717_PMU_FAULT_VBUS, &v);
+ if (ret)
+ return ret;
+
+ v &= (AXP717_PMU_FAULT_VBUS | AXP717_PMU_FAULT_VSYS);
+ if (v) {
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ regmap_write(power->regmap, AXP717_PMU_FAULT_VBUS, v);
+ }
+
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = regmap_read(power->regmap, AXP717_INPUT_CUR_LIMIT_CTRL, &v);
+ if (ret)
+ return ret;
+
+ /* 50ma step size with 100ma offset. */
+ v &= AXP717_INPUT_CUR_LIMIT_MASK;
+ val->intval = (v * 50000) + 100000;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ case POWER_SUPPLY_PROP_PRESENT:
+ ret = regmap_read(power->regmap, AXP717_ON_INDICATE, &v);
+ if (ret)
+ return ret;
+ val->intval = !!(v & AXP717_PWR_STATUS_VBUS_GOOD);
+ break;
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return axp20x_get_usb_type(power, val);
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ ret = regmap_read(power->regmap, AXP717_INPUT_VOL_LIMIT_CTRL, &v);
+ if (ret)
+ return ret;
+
+ /* 80mv step size with 3.88v offset. */
+ v &= AXP717_INPUT_VOL_LIMIT_MASK;
+ val->intval = (v * 80000) + 3880000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ if (IS_ENABLED(CONFIG_AXP20X_ADC)) {
+ ret = iio_read_channel_processed(power->vbus_v,
+ &val->intval);
+ if (ret)
+ return ret;
+
+ /*
+ * IIO framework gives mV but Power Supply framework
+ * gives uV.
+ */
+ val->intval *= 1000;
+ return 0;
+ }
+
+ ret = axp20x_read_variable_width(power->regmap,
+ AXP717_VBUS_V_H, 16);
+ if (ret < 0)
+ return ret;
+
+ val->intval = (ret % AXP717_ADC_DATA_MASK) * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+
+}
+
static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
int intval)
{
@@ -307,6 +428,22 @@ static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
return -EINVAL;
}
+static int axp717_usb_power_set_voltage_min(struct axp20x_usb_power *power,
+ int intval)
+{
+ int val;
+
+ /* Minimum value of 3.88v and maximum of 5.08v. */
+ if (intval < 3880000 || intval > 5080000)
+ return -EINVAL;
+
+ /* step size of 80ma with 3.88v offset. */
+ val = (intval - 3880000) / 80000;
+ return regmap_update_bits(power->regmap,
+ AXP717_INPUT_VOL_LIMIT_CTRL,
+ AXP717_INPUT_VOL_LIMIT_MASK, val);
+}
+
static int axp20x_usb_power_set_input_current_limit(struct axp20x_usb_power *power,
int intval)
{
@@ -317,6 +454,13 @@ static int axp20x_usb_power_set_input_current_limit(struct axp20x_usb_power *pow
if (intval == -1)
return -EINVAL;
+ if (power->max_input_cur && (intval > power->max_input_cur)) {
+ dev_warn(power->dev,
+ "requested current %d clamped to max current %d\n",
+ intval, power->max_input_cur);
+ intval = power->max_input_cur;
+ }
+
/*
* BC1.2 detection can cause a race condition if we try to set a current
* limit while it's in progress. When it finishes it will overwrite the
@@ -340,6 +484,29 @@ static int axp20x_usb_power_set_input_current_limit(struct axp20x_usb_power *pow
return regmap_field_write(power->curr_lim_fld, reg);
}
+static int axp717_usb_power_set_input_current_limit(struct axp20x_usb_power *power,
+ int intval)
+{
+ int tmp;
+
+ /* Minimum value of 100mA and maximum value of 3.25A*/
+ if (intval < 100000 || intval > 3250000)
+ return -EINVAL;
+
+ if (power->max_input_cur && (intval > power->max_input_cur)) {
+ dev_warn(power->dev,
+ "reqested current %d clamped to max current %d\n",
+ intval, power->max_input_cur);
+ intval = power->max_input_cur;
+ }
+
+ /* Minimum value of 100mA with step size of 50mA. */
+ tmp = (intval - 100000) / 50000;
+ return regmap_update_bits(power->regmap,
+ AXP717_INPUT_CUR_LIMIT_CTRL,
+ AXP717_INPUT_CUR_LIMIT_MASK, tmp);
+}
+
static int axp20x_usb_power_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
@@ -362,6 +529,24 @@ static int axp20x_usb_power_set_property(struct power_supply *psy,
default:
return -EINVAL;
}
+}
+
+static int axp717_usb_power_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct axp20x_usb_power *power = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return axp717_usb_power_set_input_current_limit(power, val->intval);
+
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ return axp717_usb_power_set_voltage_min(power, val->intval);
+
+ default:
+ return -EINVAL;
+ }
return -EINVAL;
}
@@ -385,6 +570,64 @@ static int axp20x_usb_power_prop_writeable(struct power_supply *psy,
psp == POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT;
}
+static int axp717_usb_power_prop_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
+ psp == POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT;
+}
+
+static int axp20x_configure_iio_channels(struct platform_device *pdev,
+ struct axp20x_usb_power *power)
+{
+ power->vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v");
+ if (IS_ERR(power->vbus_v)) {
+ if (PTR_ERR(power->vbus_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ return PTR_ERR(power->vbus_v);
+ }
+
+ power->vbus_i = devm_iio_channel_get(&pdev->dev, "vbus_i");
+ if (IS_ERR(power->vbus_i)) {
+ if (PTR_ERR(power->vbus_i) == -ENODEV)
+ return -EPROBE_DEFER;
+ return PTR_ERR(power->vbus_i);
+ }
+
+ return 0;
+}
+
+static int axp717_configure_iio_channels(struct platform_device *pdev,
+ struct axp20x_usb_power *power)
+{
+ power->vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v");
+ if (IS_ERR(power->vbus_v)) {
+ if (PTR_ERR(power->vbus_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ return PTR_ERR(power->vbus_v);
+ }
+
+ return 0;
+}
+
+static int axp20x_configure_adc_registers(struct axp20x_usb_power *power)
+{
+ /* Enable vbus voltage and current measurement */
+ return regmap_update_bits(power->regmap, AXP20X_ADC_EN1,
+ AXP20X_ADC_EN1_VBUS_CURR |
+ AXP20X_ADC_EN1_VBUS_VOLT,
+ AXP20X_ADC_EN1_VBUS_CURR |
+ AXP20X_ADC_EN1_VBUS_VOLT);
+}
+
+static int axp717_configure_adc_registers(struct axp20x_usb_power *power)
+{
+ /* Enable vbus voltage measurement */
+ return regmap_update_bits(power->regmap, AXP717_ADC_CH_EN_CONTROL,
+ AXP717_ADC_EN_VBUS_VOLT,
+ AXP717_ADC_EN_VBUS_VOLT);
+}
+
static enum power_supply_property axp20x_usb_power_properties[] = {
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
@@ -403,6 +646,16 @@ static enum power_supply_property axp22x_usb_power_properties[] = {
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
};
+static enum power_supply_property axp717_usb_power_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+};
+
static enum power_supply_property axp813_usb_power_properties[] = {
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
@@ -412,13 +665,6 @@ static enum power_supply_property axp813_usb_power_properties[] = {
POWER_SUPPLY_PROP_USB_TYPE,
};
-static enum power_supply_usb_type axp813_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_CDP,
- POWER_SUPPLY_USB_TYPE_UNKNOWN,
-};
-
static const struct power_supply_desc axp20x_usb_power_desc = {
.name = "axp20x-usb",
.type = POWER_SUPPLY_TYPE_USB,
@@ -439,6 +685,20 @@ static const struct power_supply_desc axp22x_usb_power_desc = {
.set_property = axp20x_usb_power_set_property,
};
+static const struct power_supply_desc axp717_usb_power_desc = {
+ .name = "axp20x-usb",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = axp717_usb_power_properties,
+ .num_properties = ARRAY_SIZE(axp717_usb_power_properties),
+ .property_is_writeable = axp717_usb_power_prop_writeable,
+ .get_property = axp717_usb_power_get_property,
+ .set_property = axp717_usb_power_set_property,
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
+};
+
static const struct power_supply_desc axp813_usb_power_desc = {
.name = "axp20x-usb",
.type = POWER_SUPPLY_TYPE_USB,
@@ -447,8 +707,10 @@ static const struct power_supply_desc axp813_usb_power_desc = {
.property_is_writeable = axp20x_usb_power_prop_writeable,
.get_property = axp20x_usb_power_get_property,
.set_property = axp20x_usb_power_set_property,
- .usb_types = axp813_usb_types,
- .num_usb_types = ARRAY_SIZE(axp813_usb_types),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
};
static const char * const axp20x_irq_names[] = {
@@ -463,6 +725,12 @@ static const char * const axp22x_irq_names[] = {
"VBUS_REMOVAL",
};
+static const char * const axp717_irq_names[] = {
+ "VBUS_PLUGIN",
+ "VBUS_REMOVAL",
+ "VBUS_OVER_V",
+};
+
static int axp192_usb_curr_lim_table[] = {
-1,
-1,
@@ -505,6 +773,9 @@ static const struct axp_data axp192_data = {
.curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
.vbus_valid_bit = REG_FIELD(AXP192_USB_OTG_STATUS, 2, 2),
.vbus_mon_bit = REG_FIELD(AXP20X_VBUS_MON, 3, 3),
+ .axp20x_read_vbus = &axp20x_usb_power_poll_vbus,
+ .axp20x_cfg_iio_chan = axp20x_configure_iio_channels,
+ .axp20x_cfg_adc_reg = axp20x_configure_adc_registers,
};
static const struct axp_data axp202_data = {
@@ -516,6 +787,9 @@ static const struct axp_data axp202_data = {
.curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
.vbus_valid_bit = REG_FIELD(AXP20X_USB_OTG_STATUS, 2, 2),
.vbus_mon_bit = REG_FIELD(AXP20X_VBUS_MON, 3, 3),
+ .axp20x_read_vbus = &axp20x_usb_power_poll_vbus,
+ .axp20x_cfg_iio_chan = axp20x_configure_iio_channels,
+ .axp20x_cfg_adc_reg = axp20x_configure_adc_registers,
};
static const struct axp_data axp221_data = {
@@ -526,6 +800,9 @@ static const struct axp_data axp221_data = {
.curr_lim_table_size = ARRAY_SIZE(axp221_usb_curr_lim_table),
.curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
.vbus_needs_polling = true,
+ .axp20x_read_vbus = &axp20x_usb_power_poll_vbus,
+ .axp20x_cfg_iio_chan = axp20x_configure_iio_channels,
+ .axp20x_cfg_adc_reg = axp20x_configure_adc_registers,
};
static const struct axp_data axp223_data = {
@@ -536,6 +813,23 @@ static const struct axp_data axp223_data = {
.curr_lim_table_size = ARRAY_SIZE(axp20x_usb_curr_lim_table),
.curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1),
.vbus_needs_polling = true,
+ .axp20x_read_vbus = &axp20x_usb_power_poll_vbus,
+ .axp20x_cfg_iio_chan = axp20x_configure_iio_channels,
+ .axp20x_cfg_adc_reg = axp20x_configure_adc_registers,
+};
+
+static const struct axp_data axp717_data = {
+ .power_desc = &axp717_usb_power_desc,
+ .irq_names = axp717_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp717_irq_names),
+ .curr_lim_fld = REG_FIELD(AXP717_INPUT_CUR_LIMIT_CTRL, 0, 5),
+ .usb_bc_en_bit = REG_FIELD(AXP717_MODULE_EN_CONTROL_1, 4, 4),
+ .usb_bc_det_fld = REG_FIELD(AXP717_BC_DETECT, 5, 7),
+ .vbus_mon_bit = REG_FIELD(AXP717_ADC_CH_EN_CONTROL, 2, 2),
+ .vbus_needs_polling = false,
+ .axp20x_read_vbus = &axp717_usb_power_poll_vbus,
+ .axp20x_cfg_iio_chan = axp717_configure_iio_channels,
+ .axp20x_cfg_adc_reg = axp717_configure_adc_registers,
};
static const struct axp_data axp813_data = {
@@ -549,6 +843,9 @@ static const struct axp_data axp813_data = {
.usb_bc_det_fld = REG_FIELD(AXP288_BC_DET_STAT, 5, 7),
.vbus_disable_bit = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 7, 7),
.vbus_needs_polling = true,
+ .axp20x_read_vbus = &axp20x_usb_power_poll_vbus,
+ .axp20x_cfg_iio_chan = axp20x_configure_iio_channels,
+ .axp20x_cfg_adc_reg = axp20x_configure_adc_registers,
};
#ifdef CONFIG_PM_SLEEP
@@ -590,36 +887,6 @@ static int axp20x_usb_power_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(axp20x_usb_power_pm_ops, axp20x_usb_power_suspend,
axp20x_usb_power_resume);
-static int configure_iio_channels(struct platform_device *pdev,
- struct axp20x_usb_power *power)
-{
- power->vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v");
- if (IS_ERR(power->vbus_v)) {
- if (PTR_ERR(power->vbus_v) == -ENODEV)
- return -EPROBE_DEFER;
- return PTR_ERR(power->vbus_v);
- }
-
- power->vbus_i = devm_iio_channel_get(&pdev->dev, "vbus_i");
- if (IS_ERR(power->vbus_i)) {
- if (PTR_ERR(power->vbus_i) == -ENODEV)
- return -EPROBE_DEFER;
- return PTR_ERR(power->vbus_i);
- }
-
- return 0;
-}
-
-static int configure_adc_registers(struct axp20x_usb_power *power)
-{
- /* Enable vbus voltage and current measurement */
- return regmap_update_bits(power->regmap, AXP20X_ADC_EN1,
- AXP20X_ADC_EN1_VBUS_CURR |
- AXP20X_ADC_EN1_VBUS_VOLT,
- AXP20X_ADC_EN1_VBUS_CURR |
- AXP20X_ADC_EN1_VBUS_VOLT);
-}
-
static int axp20x_regmap_field_alloc_optional(struct device *dev,
struct regmap *regmap,
struct reg_field fdesc,
@@ -640,6 +907,18 @@ static int axp20x_regmap_field_alloc_optional(struct device *dev,
return 0;
}
+/* Optionally allow users to specify a maximum charging current. */
+static void axp20x_usb_power_parse_dt(struct device *dev,
+ struct axp20x_usb_power *power)
+{
+ int ret;
+
+ ret = device_property_read_u32(dev, "input-current-limit-microamp",
+ &power->max_input_cur);
+ if (ret)
+ dev_dbg(dev, "%s() no input-current-limit specified\n", __func__);
+}
+
static int axp20x_usb_power_probe(struct platform_device *pdev)
{
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
@@ -676,6 +955,8 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
if (IS_ERR(power->curr_lim_fld))
return PTR_ERR(power->curr_lim_fld);
+ axp20x_usb_power_parse_dt(&pdev->dev, power);
+
ret = axp20x_regmap_field_alloc_optional(&pdev->dev, power->regmap,
axp_data->vbus_valid_bit,
&power->vbus_valid_bit);
@@ -707,7 +988,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
return ret;
ret = devm_delayed_work_autocancel(&pdev->dev, &power->vbus_detect,
- axp20x_usb_power_poll_vbus);
+ axp_data->axp20x_read_vbus);
if (ret)
return ret;
@@ -718,9 +999,9 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
return ret;
if (IS_ENABLED(CONFIG_AXP20X_ADC))
- ret = configure_iio_channels(pdev, power);
+ ret = axp_data->axp20x_cfg_iio_chan(pdev, power);
else
- ret = configure_adc_registers(power);
+ ret = axp_data->axp20x_cfg_adc_reg(power);
if (ret)
return ret;
@@ -779,6 +1060,9 @@ static const struct of_device_id axp20x_usb_power_match[] = {
.compatible = "x-powers,axp223-usb-power-supply",
.data = &axp223_data,
}, {
+ .compatible = "x-powers,axp717-usb-power-supply",
+ .data = &axp717_data,
+ }, {
.compatible = "x-powers,axp813-usb-power-supply",
.data = &axp813_data,
}, { /* sentinel */ }
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
index 1a935bc88510..5514d1896bb8 100644
--- a/drivers/power/supply/bq256xx_charger.c
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -334,14 +334,6 @@ static const int bq25618_619_ichg_values[] = {
1290000, 1360000, 1430000, 1500000
};
-static enum power_supply_usb_type bq256xx_usb_type[] = {
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_CDP,
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_UNKNOWN,
- POWER_SUPPLY_USB_TYPE_ACA,
-};
-
static int bq256xx_array_parse(int array_size, int val, const int array[])
{
int i = 0;
@@ -1252,8 +1244,11 @@ static int bq256xx_property_is_writeable(struct power_supply *psy,
static const struct power_supply_desc bq256xx_power_supply_desc = {
.name = "bq256xx-charger",
.type = POWER_SUPPLY_TYPE_USB,
- .usb_types = bq256xx_usb_type,
- .num_usb_types = ARRAY_SIZE(bq256xx_usb_type),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_ACA) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
.properties = bq256xx_power_supply_props,
.num_properties = ARRAY_SIZE(bq256xx_power_supply_props),
.get_property = bq256xx_get_charger_property,
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index cebca34ff872..91e7292d86bb 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -904,7 +904,7 @@ static int cpcap_charger_probe(struct platform_device *pdev)
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = ddata;
psy_cfg.supplied_to = cpcap_charger_supplied_to;
- psy_cfg.num_supplicants = ARRAY_SIZE(cpcap_charger_supplied_to),
+ psy_cfg.num_supplicants = ARRAY_SIZE(cpcap_charger_supplied_to);
ddata->usb = devm_power_supply_register(ddata->dev,
&cpcap_charger_usb_desc,
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
index 8008e31c0c09..bed3e2e9bfea 100644
--- a/drivers/power/supply/cros_usbpd-charger.c
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -73,17 +73,6 @@ static enum power_supply_property cros_usbpd_dedicated_charger_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_NOW,
};
-static enum power_supply_usb_type cros_usbpd_charger_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_UNKNOWN,
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_CDP,
- POWER_SUPPLY_USB_TYPE_C,
- POWER_SUPPLY_USB_TYPE_PD,
- POWER_SUPPLY_USB_TYPE_PD_DRP,
- POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID
-};
-
/* Input voltage/current limit in mV/mA. Default to none. */
static u16 input_voltage_limit = EC_POWER_LIMIT_NONE;
static u16 input_current_limit = EC_POWER_LIMIT_NONE;
@@ -643,9 +632,14 @@ static int cros_usbpd_charger_probe(struct platform_device *pd)
psy_desc->properties = cros_usbpd_charger_props;
psy_desc->num_properties =
ARRAY_SIZE(cros_usbpd_charger_props);
- psy_desc->usb_types = cros_usbpd_charger_usb_types;
- psy_desc->num_usb_types =
- ARRAY_SIZE(cros_usbpd_charger_usb_types);
+ psy_desc->usb_types = BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN) |
+ BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_C) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD_DRP) |
+ BIT(POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID);
}
psy_desc->name = port->name;
diff --git a/drivers/power/supply/lenovo_yoga_c630_battery.c b/drivers/power/supply/lenovo_yoga_c630_battery.c
index d4d422cc5353..f98f65e00831 100644
--- a/drivers/power/supply/lenovo_yoga_c630_battery.c
+++ b/drivers/power/supply/lenovo_yoga_c630_battery.c
@@ -353,15 +353,10 @@ static enum power_supply_property yoga_c630_psy_adpt_properties[] = {
POWER_SUPPLY_PROP_USB_TYPE,
};
-static const enum power_supply_usb_type yoga_c630_psy_adpt_usb_type[] = {
- POWER_SUPPLY_USB_TYPE_C,
-};
-
static const struct power_supply_desc yoga_c630_psy_adpt_psy_desc = {
.name = "yoga-c630-adapter",
.type = POWER_SUPPLY_TYPE_USB,
- .usb_types = yoga_c630_psy_adpt_usb_type,
- .num_usb_types = ARRAY_SIZE(yoga_c630_psy_adpt_usb_type),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_C),
.properties = yoga_c630_psy_adpt_properties,
.num_properties = ARRAY_SIZE(yoga_c630_psy_adpt_properties),
.get_property = yoga_c630_psy_adpt_get_property,
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index e7d37e422c3f..496c3e1f2ee6 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -853,7 +853,10 @@ static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off)
/* program interrupt thresholds such that we should
* get interrupt for every 'off' perc change in the soc
*/
- regmap_read(map, MAX17042_RepSOC, &soc);
+ if (chip->pdata->enable_current_sense)
+ regmap_read(map, MAX17042_RepSOC, &soc);
+ else
+ regmap_read(map, MAX17042_VFSOC, &soc);
soc >>= 8;
soc_tr = (soc + off) << 8;
if (off < soc)
diff --git a/drivers/power/supply/max1720x_battery.c b/drivers/power/supply/max1720x_battery.c
index edc262f0a62f..2bc3dce963a3 100644
--- a/drivers/power/supply/max1720x_battery.c
+++ b/drivers/power/supply/max1720x_battery.c
@@ -10,13 +10,16 @@
#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/nvmem-provider.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <asm/unaligned.h>
/* Nonvolatile registers */
+#define MAX1720X_NXTABLE0 0x80
#define MAX1720X_NRSENSE 0xCF /* RSense in 10^-5 Ohm */
+#define MAX1720X_NDEVICE_NAME4 0xDF
/* ModelGauge m5 */
#define MAX172XX_STATUS 0x00 /* Status */
@@ -46,6 +49,8 @@ static const char *const max17205_model = "MAX17205";
struct max1720x_device_info {
struct regmap *regmap;
+ struct regmap *regmap_nv;
+ struct i2c_client *ancillary;
int rsense;
};
@@ -106,6 +111,134 @@ static const struct regmap_config max1720x_regmap_cfg = {
.cache_type = REGCACHE_RBTREE,
};
+static const struct regmap_range max1720x_nvmem_allow[] = {
+ regmap_reg_range(MAX1720X_NXTABLE0, MAX1720X_NDEVICE_NAME4),
+};
+
+static const struct regmap_range max1720x_nvmem_deny[] = {
+ regmap_reg_range(0x00, 0x7F),
+ regmap_reg_range(0xE0, 0xFF),
+};
+
+static const struct regmap_access_table max1720x_nvmem_regs = {
+ .yes_ranges = max1720x_nvmem_allow,
+ .n_yes_ranges = ARRAY_SIZE(max1720x_nvmem_allow),
+ .no_ranges = max1720x_nvmem_deny,
+ .n_no_ranges = ARRAY_SIZE(max1720x_nvmem_deny),
+};
+
+static const struct regmap_config max1720x_nvmem_regmap_cfg = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = MAX1720X_NDEVICE_NAME4,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .rd_table = &max1720x_nvmem_regs,
+};
+
+static const struct nvmem_cell_info max1720x_nvmem_cells[] = {
+ { .name = "nXTable0", .offset = 0, .bytes = 2, },
+ { .name = "nXTable1", .offset = 2, .bytes = 2, },
+ { .name = "nXTable2", .offset = 4, .bytes = 2, },
+ { .name = "nXTable3", .offset = 6, .bytes = 2, },
+ { .name = "nXTable4", .offset = 8, .bytes = 2, },
+ { .name = "nXTable5", .offset = 10, .bytes = 2, },
+ { .name = "nXTable6", .offset = 12, .bytes = 2, },
+ { .name = "nXTable7", .offset = 14, .bytes = 2, },
+ { .name = "nXTable8", .offset = 16, .bytes = 2, },
+ { .name = "nXTable9", .offset = 18, .bytes = 2, },
+ { .name = "nXTable10", .offset = 20, .bytes = 2, },
+ { .name = "nXTable11", .offset = 22, .bytes = 2, },
+ { .name = "nUser18C", .offset = 24, .bytes = 2, },
+ { .name = "nUser18D", .offset = 26, .bytes = 2, },
+ { .name = "nODSCTh", .offset = 28, .bytes = 2, },
+ { .name = "nODSCCfg", .offset = 30, .bytes = 2, },
+
+ { .name = "nOCVTable0", .offset = 32, .bytes = 2, },
+ { .name = "nOCVTable1", .offset = 34, .bytes = 2, },
+ { .name = "nOCVTable2", .offset = 36, .bytes = 2, },
+ { .name = "nOCVTable3", .offset = 38, .bytes = 2, },
+ { .name = "nOCVTable4", .offset = 40, .bytes = 2, },
+ { .name = "nOCVTable5", .offset = 42, .bytes = 2, },
+ { .name = "nOCVTable6", .offset = 44, .bytes = 2, },
+ { .name = "nOCVTable7", .offset = 46, .bytes = 2, },
+ { .name = "nOCVTable8", .offset = 48, .bytes = 2, },
+ { .name = "nOCVTable9", .offset = 50, .bytes = 2, },
+ { .name = "nOCVTable10", .offset = 52, .bytes = 2, },
+ { .name = "nOCVTable11", .offset = 54, .bytes = 2, },
+ { .name = "nIChgTerm", .offset = 56, .bytes = 2, },
+ { .name = "nFilterCfg", .offset = 58, .bytes = 2, },
+ { .name = "nVEmpty", .offset = 60, .bytes = 2, },
+ { .name = "nLearnCfg", .offset = 62, .bytes = 2, },
+
+ { .name = "nQRTable00", .offset = 64, .bytes = 2, },
+ { .name = "nQRTable10", .offset = 66, .bytes = 2, },
+ { .name = "nQRTable20", .offset = 68, .bytes = 2, },
+ { .name = "nQRTable30", .offset = 70, .bytes = 2, },
+ { .name = "nCycles", .offset = 72, .bytes = 2, },
+ { .name = "nFullCapNom", .offset = 74, .bytes = 2, },
+ { .name = "nRComp0", .offset = 76, .bytes = 2, },
+ { .name = "nTempCo", .offset = 78, .bytes = 2, },
+ { .name = "nIAvgEmpty", .offset = 80, .bytes = 2, },
+ { .name = "nFullCapRep", .offset = 82, .bytes = 2, },
+ { .name = "nVoltTemp", .offset = 84, .bytes = 2, },
+ { .name = "nMaxMinCurr", .offset = 86, .bytes = 2, },
+ { .name = "nMaxMinVolt", .offset = 88, .bytes = 2, },
+ { .name = "nMaxMinTemp", .offset = 90, .bytes = 2, },
+ { .name = "nSOC", .offset = 92, .bytes = 2, },
+ { .name = "nTimerH", .offset = 94, .bytes = 2, },
+
+ { .name = "nConfig", .offset = 96, .bytes = 2, },
+ { .name = "nRippleCfg", .offset = 98, .bytes = 2, },
+ { .name = "nMiscCfg", .offset = 100, .bytes = 2, },
+ { .name = "nDesignCap", .offset = 102, .bytes = 2, },
+ { .name = "nHibCfg", .offset = 104, .bytes = 2, },
+ { .name = "nPackCfg", .offset = 106, .bytes = 2, },
+ { .name = "nRelaxCfg", .offset = 108, .bytes = 2, },
+ { .name = "nConvgCfg", .offset = 110, .bytes = 2, },
+ { .name = "nNVCfg0", .offset = 112, .bytes = 2, },
+ { .name = "nNVCfg1", .offset = 114, .bytes = 2, },
+ { .name = "nNVCfg2", .offset = 116, .bytes = 2, },
+ { .name = "nSBSCfg", .offset = 118, .bytes = 2, },
+ { .name = "nROMID0", .offset = 120, .bytes = 2, },
+ { .name = "nROMID1", .offset = 122, .bytes = 2, },
+ { .name = "nROMID2", .offset = 124, .bytes = 2, },
+ { .name = "nROMID3", .offset = 126, .bytes = 2, },
+
+ { .name = "nVAlrtTh", .offset = 128, .bytes = 2, },
+ { .name = "nTAlrtTh", .offset = 130, .bytes = 2, },
+ { .name = "nSAlrtTh", .offset = 132, .bytes = 2, },
+ { .name = "nIAlrtTh", .offset = 134, .bytes = 2, },
+ { .name = "nUser1C4", .offset = 136, .bytes = 2, },
+ { .name = "nUser1C5", .offset = 138, .bytes = 2, },
+ { .name = "nFullSOCThr", .offset = 140, .bytes = 2, },
+ { .name = "nTTFCfg", .offset = 142, .bytes = 2, },
+ { .name = "nCGain", .offset = 144, .bytes = 2, },
+ { .name = "nTCurve", .offset = 146, .bytes = 2, },
+ { .name = "nTGain", .offset = 148, .bytes = 2, },
+ { .name = "nTOff", .offset = 150, .bytes = 2, },
+ { .name = "nManfctrName0", .offset = 152, .bytes = 2, },
+ { .name = "nManfctrName1", .offset = 154, .bytes = 2, },
+ { .name = "nManfctrName2", .offset = 156, .bytes = 2, },
+ { .name = "nRSense", .offset = 158, .bytes = 2, },
+
+ { .name = "nUser1D0", .offset = 160, .bytes = 2, },
+ { .name = "nUser1D1", .offset = 162, .bytes = 2, },
+ { .name = "nAgeFcCfg", .offset = 164, .bytes = 2, },
+ { .name = "nDesignVoltage", .offset = 166, .bytes = 2, },
+ { .name = "nUser1D4", .offset = 168, .bytes = 2, },
+ { .name = "nRFastVShdn", .offset = 170, .bytes = 2, },
+ { .name = "nManfctrDate", .offset = 172, .bytes = 2, },
+ { .name = "nFirstUsed", .offset = 174, .bytes = 2, },
+ { .name = "nSerialNumber0", .offset = 176, .bytes = 2, },
+ { .name = "nSerialNumber1", .offset = 178, .bytes = 2, },
+ { .name = "nSerialNumber2", .offset = 180, .bytes = 2, },
+ { .name = "nDeviceName0", .offset = 182, .bytes = 2, },
+ { .name = "nDeviceName1", .offset = 184, .bytes = 2, },
+ { .name = "nDeviceName2", .offset = 186, .bytes = 2, },
+ { .name = "nDeviceName3", .offset = 188, .bytes = 2, },
+ { .name = "nDeviceName4", .offset = 190, .bytes = 2, },
+};
+
static const enum power_supply_property max1720x_battery_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CAPACITY,
@@ -249,30 +382,80 @@ static int max1720x_battery_get_property(struct power_supply *psy,
return ret;
}
-static int max1720x_probe_sense_resistor(struct i2c_client *client,
- struct max1720x_device_info *info)
+static
+int max1720x_nvmem_reg_read(void *priv, unsigned int off, void *val, size_t len)
+{
+ struct max1720x_device_info *info = priv;
+ unsigned int reg = MAX1720X_NXTABLE0 + (off / 2);
+
+ return regmap_bulk_read(info->regmap_nv, reg, val, len / 2);
+}
+
+static void max1720x_unregister_ancillary(void *data)
+{
+ struct max1720x_device_info *info = data;
+
+ i2c_unregister_device(info->ancillary);
+}
+
+static int max1720x_probe_nvmem(struct i2c_client *client,
+ struct max1720x_device_info *info)
{
struct device *dev = &client->dev;
- struct i2c_client *ancillary;
+ struct nvmem_config nvmem_config = {
+ .dev = dev,
+ .name = "max1720x_nvmem",
+ .cells = max1720x_nvmem_cells,
+ .ncells = ARRAY_SIZE(max1720x_nvmem_cells),
+ .read_only = true,
+ .root_only = true,
+ .reg_read = max1720x_nvmem_reg_read,
+ .size = ARRAY_SIZE(max1720x_nvmem_cells) * 2,
+ .word_size = 2,
+ .stride = 2,
+ .priv = info,
+ };
+ struct nvmem_device *nvmem;
+ unsigned int val;
int ret;
- ancillary = i2c_new_ancillary_device(client, "nvmem", 0xb);
- if (IS_ERR(ancillary)) {
+ info->ancillary = i2c_new_ancillary_device(client, "nvmem", 0xb);
+ if (IS_ERR(info->ancillary)) {
dev_err(dev, "Failed to initialize ancillary i2c device\n");
- return PTR_ERR(ancillary);
+ return PTR_ERR(info->ancillary);
}
- ret = i2c_smbus_read_word_data(ancillary, MAX1720X_NRSENSE);
- i2c_unregister_device(ancillary);
- if (ret < 0)
+ ret = devm_add_action_or_reset(dev, max1720x_unregister_ancillary, info);
+ if (ret) {
+ dev_err(dev, "Failed to add unregister callback\n");
return ret;
+ }
+
+ info->regmap_nv = devm_regmap_init_i2c(info->ancillary,
+ &max1720x_nvmem_regmap_cfg);
+ if (IS_ERR(info->regmap_nv)) {
+ dev_err(dev, "regmap initialization of nvmem failed\n");
+ return PTR_ERR(info->regmap_nv);
+ }
+
+ ret = regmap_read(info->regmap_nv, MAX1720X_NRSENSE, &val);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read sense resistor value\n");
+ return ret;
+ }
- info->rsense = ret;
+ info->rsense = val;
if (!info->rsense) {
dev_warn(dev, "RSense not calibrated, set 10 mOhms!\n");
info->rsense = 1000; /* in regs in 10^-5 */
}
+ nvmem = devm_nvmem_register(dev, &nvmem_config);
+ if (IS_ERR(nvmem)) {
+ dev_err(dev, "Could not register nvmem!");
+ return PTR_ERR(nvmem);
+ }
+
return 0;
}
@@ -299,15 +482,15 @@ static int max1720x_probe(struct i2c_client *client)
psy_cfg.drv_data = info;
psy_cfg.fwnode = dev_fwnode(dev);
+ i2c_set_clientdata(client, info);
info->regmap = devm_regmap_init_i2c(client, &max1720x_regmap_cfg);
if (IS_ERR(info->regmap))
return dev_err_probe(dev, PTR_ERR(info->regmap),
"regmap initialization failed\n");
- ret = max1720x_probe_sense_resistor(client, info);
+ ret = max1720x_probe_nvmem(client, info);
if (ret)
- return dev_err_probe(dev, ret,
- "Failed to read sense resistor value\n");
+ return dev_err_probe(dev, ret, "Failed to probe nvmem\n");
bat = devm_power_supply_register(dev, &max1720x_bat_desc, &psy_cfg);
if (IS_ERR(bat))
diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c
index 2001e12c9f7d..4caac142c428 100644
--- a/drivers/power/supply/max77693_charger.c
+++ b/drivers/power/supply/max77693_charger.c
@@ -197,12 +197,58 @@ static int max77693_get_online(struct regmap *regmap, int *val)
return 0;
}
+/*
+ * There are *two* current limit registers:
+ * - CHGIN limit, which limits the input current from the external charger;
+ * - Fast charge current limit, which limits the current going to the battery.
+ */
+
+static int max77693_get_input_current_limit(struct regmap *regmap, int *val)
+{
+ unsigned int data;
+ int ret;
+
+ ret = regmap_read(regmap, MAX77693_CHG_REG_CHG_CNFG_09, &data);
+ if (ret < 0)
+ return ret;
+
+ data &= CHG_CNFG_09_CHGIN_ILIM_MASK;
+ data >>= CHG_CNFG_09_CHGIN_ILIM_SHIFT;
+
+ if (data <= 0x03)
+ /* The first four values (0x00..0x03) are 60mA */
+ *val = 60000;
+ else
+ *val = data * 20000; /* 20mA steps */
+
+ return 0;
+}
+
+static int max77693_get_fast_charge_current(struct regmap *regmap, int *val)
+{
+ unsigned int data;
+ int ret;
+
+ ret = regmap_read(regmap, MAX77693_CHG_REG_CHG_CNFG_02, &data);
+ if (ret < 0)
+ return ret;
+
+ data &= CHG_CNFG_02_CC_MASK;
+ data >>= CHG_CNFG_02_CC_SHIFT;
+
+ *val = data * 33300; /* 33.3mA steps */
+
+ return 0;
+}
+
static enum power_supply_property max77693_charger_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
};
@@ -231,6 +277,12 @@ static int max77693_charger_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_ONLINE:
ret = max77693_get_online(regmap, &val->intval);
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = max77693_get_input_current_limit(regmap, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ ret = max77693_get_fast_charge_current(regmap, &val->intval);
+ break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = max77693_charger_model;
break;
diff --git a/drivers/power/supply/max8998_charger.c b/drivers/power/supply/max8998_charger.c
index c26023b19f26..418b882b163d 100644
--- a/drivers/power/supply/max8998_charger.c
+++ b/drivers/power/supply/max8998_charger.c
@@ -191,6 +191,7 @@ static const struct platform_device_id max8998_battery_id[] = {
{ "max8998-battery", TYPE_MAX8998 },
{ }
};
+MODULE_DEVICE_TABLE(platform, max8998_battery_id);
static struct platform_driver max8998_battery_driver = {
.driver = {
diff --git a/drivers/power/supply/mp2629_charger.c b/drivers/power/supply/mp2629_charger.c
index 3a2a28fbba73..d281c1059629 100644
--- a/drivers/power/supply/mp2629_charger.c
+++ b/drivers/power/supply/mp2629_charger.c
@@ -94,14 +94,6 @@ struct mp2629_prop {
int shift;
};
-static enum power_supply_usb_type mp2629_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_CDP,
- POWER_SUPPLY_USB_TYPE_PD_DRP,
- POWER_SUPPLY_USB_TYPE_UNKNOWN
-};
-
static enum power_supply_property mp2629_charger_usb_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_USB_TYPE,
@@ -487,8 +479,11 @@ unlock:
static const struct power_supply_desc mp2629_usb_desc = {
.name = "mp2629_usb",
.type = POWER_SUPPLY_TYPE_USB,
- .usb_types = mp2629_usb_types,
- .num_usb_types = ARRAY_SIZE(mp2629_usb_types),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD_DRP) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
.properties = mp2629_charger_usb_props,
.num_properties = ARRAY_SIZE(mp2629_charger_usb_props),
.get_property = mp2629_charger_usb_get_prop,
diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c
index aca123783efc..e99e55148976 100644
--- a/drivers/power/supply/mt6360_charger.c
+++ b/drivers/power/supply/mt6360_charger.c
@@ -154,13 +154,6 @@ enum mt6360_pmu_chg_type {
MT6360_CHG_TYPE_MAX,
};
-static enum power_supply_usb_type mt6360_charger_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_UNKNOWN,
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_CDP,
-};
-
static int mt6360_get_chrdet_ext_stat(struct mt6360_chg_info *mci,
bool *pwr_rdy)
{
@@ -574,8 +567,10 @@ static const struct power_supply_desc mt6360_charger_desc = {
.get_property = mt6360_charger_get_property,
.set_property = mt6360_charger_set_property,
.property_is_writeable = mt6360_charger_property_is_writeable,
- .usb_types = mt6360_charger_usb_types,
- .num_usb_types = ARRAY_SIZE(mt6360_charger_usb_types),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
};
static const struct regulator_ops mt6360_chg_otg_ops = {
diff --git a/drivers/power/supply/mt6370-charger.c b/drivers/power/supply/mt6370-charger.c
index e24fce087d80..ad8793bf997e 100644
--- a/drivers/power/supply/mt6370-charger.c
+++ b/drivers/power/supply/mt6370-charger.c
@@ -624,13 +624,6 @@ static enum power_supply_property mt6370_chg_properties[] = {
POWER_SUPPLY_PROP_USB_TYPE,
};
-static enum power_supply_usb_type mt6370_chg_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_UNKNOWN,
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_CDP,
- POWER_SUPPLY_USB_TYPE_DCP,
-};
-
static const struct power_supply_desc mt6370_chg_psy_desc = {
.name = "mt6370-charger",
.type = POWER_SUPPLY_TYPE_USB,
@@ -639,8 +632,10 @@ static const struct power_supply_desc mt6370_chg_psy_desc = {
.get_property = mt6370_chg_get_property,
.set_property = mt6370_chg_set_property,
.property_is_writeable = mt6370_chg_property_is_writeable,
- .usb_types = mt6370_chg_usb_types,
- .num_usb_types = ARRAY_SIZE(mt6370_chg_usb_types),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
};
static const struct regulator_ops mt6370_chg_otg_ops = {
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 8f6025acd10a..49534458a9f7 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -9,6 +9,7 @@
* Modified: 2004, Oct Szabolcs Gyurko
*/
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -756,10 +757,10 @@ int power_supply_get_battery_info(struct power_supply *psy,
for (index = 0; index < len; index++) {
struct power_supply_battery_ocv_table *table;
- char *propname;
int i, tab_len, size;
- propname = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", index);
+ char *propname __free(kfree) = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d",
+ index);
if (!propname) {
power_supply_put_battery_info(psy, info);
err = -ENOMEM;
@@ -768,13 +769,11 @@ int power_supply_get_battery_info(struct power_supply *psy,
list = of_get_property(battery_np, propname, &size);
if (!list || !size) {
dev_err(&psy->dev, "failed to get %s\n", propname);
- kfree(propname);
power_supply_put_battery_info(psy, info);
err = -EINVAL;
goto out_put_node;
}
- kfree(propname);
tab_len = size / (2 * sizeof(__be32));
info->ocv_table_size[index] = tab_len;
@@ -1232,11 +1231,7 @@ EXPORT_SYMBOL_GPL(power_supply_set_property);
int power_supply_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
- if (atomic_read(&psy->use_cnt) <= 0 ||
- !psy->desc->property_is_writeable)
- return -ENODEV;
-
- return psy->desc->property_is_writeable(psy, psp);
+ return psy->desc->property_is_writeable && psy->desc->property_is_writeable(psy, psp);
}
EXPORT_SYMBOL_GPL(power_supply_property_is_writeable);
@@ -1296,7 +1291,7 @@ static int power_supply_read_temp(struct thermal_zone_device *tzd,
return ret;
}
-static struct thermal_zone_device_ops psy_tzd_ops = {
+static const struct thermal_zone_device_ops psy_tzd_ops = {
.get_temp = power_supply_read_temp,
};
@@ -1361,10 +1356,6 @@ __power_supply_register(struct device *parent,
pr_warn("%s: Expected proper parent device for '%s'\n",
__func__, desc->name);
- if (psy_has_property(desc, POWER_SUPPLY_PROP_USB_TYPE) &&
- (!desc->usb_types || !desc->num_usb_types))
- return ERR_PTR(-EINVAL);
-
psy = kzalloc(sizeof(*psy), GFP_KERNEL);
if (!psy)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c
index baacefbdf768..6fbbfb1c685e 100644
--- a/drivers/power/supply/power_supply_hwmon.c
+++ b/drivers/power/supply/power_supply_hwmon.c
@@ -318,7 +318,8 @@ static const struct hwmon_channel_info * const power_supply_hwmon_info[] = {
HWMON_T_INPUT |
HWMON_T_MAX |
HWMON_T_MIN |
- HWMON_T_MIN_ALARM,
+ HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM,
HWMON_T_LABEL |
HWMON_T_INPUT |
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 3e63d165b2f7..16b3c5880cd8 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -209,7 +209,7 @@ static struct power_supply_attr power_supply_attrs[] = {
POWER_SUPPLY_ATTR(TIME_TO_FULL_NOW),
POWER_SUPPLY_ATTR(TIME_TO_FULL_AVG),
POWER_SUPPLY_ENUM_ATTR(TYPE),
- POWER_SUPPLY_ATTR(USB_TYPE),
+ POWER_SUPPLY_ENUM_ATTR(USB_TYPE),
POWER_SUPPLY_ENUM_ATTR(SCOPE),
POWER_SUPPLY_ATTR(PRECHARGE_CURRENT),
POWER_SUPPLY_ATTR(CHARGE_TERM_CURRENT),
@@ -237,31 +237,28 @@ static enum power_supply_property dev_attr_psp(struct device_attribute *attr)
return to_ps_attr(attr) - power_supply_attrs;
}
-static ssize_t power_supply_show_usb_type(struct device *dev,
- const struct power_supply_desc *desc,
- union power_supply_propval *value,
- char *buf)
+static ssize_t power_supply_show_enum_with_available(
+ struct device *dev, const char * const labels[], int label_count,
+ unsigned int available_values, int value, char *buf)
{
- enum power_supply_usb_type usb_type;
+ bool match = false, available, active;
ssize_t count = 0;
- bool match = false;
int i;
- for (i = 0; i < desc->num_usb_types; ++i) {
- usb_type = desc->usb_types[i];
+ for (i = 0; i < label_count; i++) {
+ available = available_values & BIT(i);
+ active = i == value;
- if (value->intval == usb_type) {
- count += sysfs_emit_at(buf, count, "[%s] ",
- POWER_SUPPLY_USB_TYPE_TEXT[usb_type]);
+ if (available && active) {
+ count += sysfs_emit_at(buf, count, "[%s] ", labels[i]);
match = true;
- } else {
- count += sysfs_emit_at(buf, count, "%s ",
- POWER_SUPPLY_USB_TYPE_TEXT[usb_type]);
+ } else if (available) {
+ count += sysfs_emit_at(buf, count, "%s ", labels[i]);
}
}
if (!match) {
- dev_warn(dev, "driver reporting unsupported connected type\n");
+ dev_warn(dev, "driver reporting unavailable enum value %d\n", value);
return -EINVAL;
}
@@ -300,8 +297,10 @@ static ssize_t power_supply_show_property(struct device *dev,
switch (psp) {
case POWER_SUPPLY_PROP_USB_TYPE:
- ret = power_supply_show_usb_type(dev, psy->desc,
- &value, buf);
+ ret = power_supply_show_enum_with_available(
+ dev, POWER_SUPPLY_USB_TYPE_TEXT,
+ ARRAY_SIZE(POWER_SUPPLY_USB_TYPE_TEXT),
+ psy->desc->usb_types, value.intval, buf);
break;
case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
ret = power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours,
@@ -523,33 +522,10 @@ ssize_t power_supply_charge_behaviour_show(struct device *dev,
enum power_supply_charge_behaviour current_behaviour,
char *buf)
{
- bool match = false, available, active;
- ssize_t count = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT); i++) {
- available = available_behaviours & BIT(i);
- active = i == current_behaviour;
-
- if (available && active) {
- count += sysfs_emit_at(buf, count, "[%s] ",
- POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT[i]);
- match = true;
- } else if (available) {
- count += sysfs_emit_at(buf, count, "%s ",
- POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT[i]);
- }
- }
-
- if (!match) {
- dev_warn(dev, "driver reporting unsupported charge behaviour\n");
- return -EINVAL;
- }
-
- if (count)
- buf[count - 1] = '\n';
-
- return count;
+ return power_supply_show_enum_with_available(
+ dev, POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT,
+ ARRAY_SIZE(POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT),
+ available_behaviours, current_behaviour, buf);
}
EXPORT_SYMBOL_GPL(power_supply_charge_behaviour_show);
diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
index 8b3df3ee59ba..f0a64c00ddaa 100644
--- a/drivers/power/supply/qcom_battmgr.c
+++ b/drivers/power/supply/qcom_battmgr.c
@@ -786,19 +786,6 @@ static int qcom_battmgr_usb_get_property(struct power_supply *psy,
return 0;
}
-static const enum power_supply_usb_type usb_psy_supported_types[] = {
- POWER_SUPPLY_USB_TYPE_UNKNOWN,
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_CDP,
- POWER_SUPPLY_USB_TYPE_ACA,
- POWER_SUPPLY_USB_TYPE_C,
- POWER_SUPPLY_USB_TYPE_PD,
- POWER_SUPPLY_USB_TYPE_PD_DRP,
- POWER_SUPPLY_USB_TYPE_PD_PPS,
- POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID,
-};
-
static const enum power_supply_property sc8280xp_usb_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
@@ -809,8 +796,16 @@ static const struct power_supply_desc sc8280xp_usb_psy_desc = {
.properties = sc8280xp_usb_props,
.num_properties = ARRAY_SIZE(sc8280xp_usb_props),
.get_property = qcom_battmgr_usb_get_property,
- .usb_types = usb_psy_supported_types,
- .num_usb_types = ARRAY_SIZE(usb_psy_supported_types),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN) |
+ BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_ACA) |
+ BIT(POWER_SUPPLY_USB_TYPE_C) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD_DRP) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD_PPS) |
+ BIT(POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID),
};
static const enum power_supply_property sm8350_usb_props[] = {
@@ -829,8 +824,16 @@ static const struct power_supply_desc sm8350_usb_psy_desc = {
.properties = sm8350_usb_props,
.num_properties = ARRAY_SIZE(sm8350_usb_props),
.get_property = qcom_battmgr_usb_get_property,
- .usb_types = usb_psy_supported_types,
- .num_usb_types = ARRAY_SIZE(usb_psy_supported_types),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN) |
+ BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_ACA) |
+ BIT(POWER_SUPPLY_USB_TYPE_C) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD_DRP) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD_PPS) |
+ BIT(POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID),
};
static const u8 sm8350_wls_prop_map[] = {
diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c
index 9bb777406013..81acbd8b2169 100644
--- a/drivers/power/supply/qcom_pmi8998_charger.c
+++ b/drivers/power/supply/qcom_pmi8998_charger.c
@@ -411,13 +411,6 @@ static enum power_supply_property smb2_properties[] = {
POWER_SUPPLY_PROP_USB_TYPE,
};
-static enum power_supply_usb_type smb2_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_UNKNOWN,
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_CDP,
-};
-
static int smb2_get_prop_usb_online(struct smb2_chip *chip, int *val)
{
unsigned int stat;
@@ -775,8 +768,10 @@ static irqreturn_t smb2_handle_wdog_bark(int irq, void *data)
static const struct power_supply_desc smb2_psy_desc = {
.name = "pmi8998_charger",
.type = POWER_SUPPLY_TYPE_USB,
- .usb_types = smb2_usb_types,
- .num_usb_types = ARRAY_SIZE(smb2_usb_types),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
.properties = smb2_properties,
.num_properties = ARRAY_SIZE(smb2_properties),
.get_property = smb2_get_property,
diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
index 7ca91739c6cc..a3d377a32b49 100644
--- a/drivers/power/supply/rk817_charger.c
+++ b/drivers/power/supply/rk817_charger.c
@@ -673,11 +673,6 @@ static enum power_supply_property rk817_chg_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_AVG,
};
-static enum power_supply_usb_type rk817_usb_type[] = {
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_UNKNOWN,
-};
-
static const struct power_supply_desc rk817_bat_desc = {
.name = "rk817-battery",
.type = POWER_SUPPLY_TYPE_BATTERY,
@@ -689,8 +684,8 @@ static const struct power_supply_desc rk817_bat_desc = {
static const struct power_supply_desc rk817_chg_desc = {
.name = "rk817-charger",
.type = POWER_SUPPLY_TYPE_USB,
- .usb_types = rk817_usb_type,
- .num_usb_types = ARRAY_SIZE(rk817_usb_type),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
.properties = rk817_chg_props,
.num_properties = ARRAY_SIZE(rk817_chg_props),
.get_property = rk817_chg_get_prop,
diff --git a/drivers/power/supply/rn5t618_power.c b/drivers/power/supply/rn5t618_power.c
index ebea3522a2ac..40dec55a9f73 100644
--- a/drivers/power/supply/rn5t618_power.c
+++ b/drivers/power/supply/rn5t618_power.c
@@ -70,13 +70,6 @@ struct rn5t618_power_info {
int irq;
};
-static enum power_supply_usb_type rn5t618_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_CDP,
- POWER_SUPPLY_USB_TYPE_UNKNOWN
-};
-
static enum power_supply_property rn5t618_usb_props[] = {
/* input current limit is not very accurate */
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
@@ -681,8 +674,10 @@ static const struct power_supply_desc rn5t618_adp_desc = {
static const struct power_supply_desc rn5t618_usb_desc = {
.name = "rn5t618-usb",
.type = POWER_SUPPLY_TYPE_USB,
- .usb_types = rn5t618_usb_types,
- .num_usb_types = ARRAY_SIZE(rn5t618_usb_types),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
.properties = rn5t618_usb_props,
.num_properties = ARRAY_SIZE(rn5t618_usb_props),
.get_property = rn5t618_usb_get_property,
diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c
index fdfdc83ab045..235169c85c5d 100644
--- a/drivers/power/supply/rt9467-charger.c
+++ b/drivers/power/supply/rt9467-charger.c
@@ -630,13 +630,6 @@ out:
return ret;
}
-static const enum power_supply_usb_type rt9467_chg_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_UNKNOWN,
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_CDP,
-};
-
static const enum power_supply_property rt9467_chg_properties[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
@@ -745,8 +738,6 @@ static int rt9467_psy_set_property(struct power_supply *psy,
RT9467_RANGE_IPREC, val->intval);
case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
return rt9467_psy_set_ieoc(data, val->intval);
- case POWER_SUPPLY_PROP_USB_TYPE:
- return regmap_field_write(data->rm_field[F_USBCHGEN], val->intval);
default:
return -EINVAL;
}
@@ -764,7 +755,6 @@ static int rt9467_chg_prop_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
- case POWER_SUPPLY_PROP_USB_TYPE:
return 1;
default:
return 0;
@@ -774,8 +764,10 @@ static int rt9467_chg_prop_is_writeable(struct power_supply *psy,
static const struct power_supply_desc rt9467_chg_psy_desc = {
.name = "rt9467-charger",
.type = POWER_SUPPLY_TYPE_USB,
- .usb_types = rt9467_chg_usb_types,
- .num_usb_types = ARRAY_SIZE(rt9467_chg_usb_types),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
.properties = rt9467_chg_properties,
.num_properties = ARRAY_SIZE(rt9467_chg_properties),
.property_is_writeable = rt9467_chg_prop_is_writeable,
diff --git a/drivers/power/supply/rt9471.c b/drivers/power/supply/rt9471.c
index 868b0703d15c..c04af1ee89c6 100644
--- a/drivers/power/supply/rt9471.c
+++ b/drivers/power/supply/rt9471.c
@@ -333,14 +333,6 @@ static enum power_supply_property rt9471_charger_properties[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
-static enum power_supply_usb_type rt9471_charger_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_UNKNOWN,
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_CDP,
- POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID,
-};
-
static int rt9471_charger_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
@@ -726,8 +718,11 @@ static int rt9471_register_psy(struct rt9471_chip *chip)
desc->name = psy_name;
desc->type = POWER_SUPPLY_TYPE_USB;
- desc->usb_types = rt9471_charger_usb_types;
- desc->num_usb_types = ARRAY_SIZE(rt9471_charger_usb_types);
+ desc->usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN);
desc->properties = rt9471_charger_properties;
desc->num_properties = ARRAY_SIZE(rt9471_charger_properties);
desc->get_property = rt9471_charger_get_property;
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index 7b9b0b3e164e..f3f1a0862e93 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -363,7 +363,7 @@ static int twl4030_charger_update_current(struct twl4030_bci *bci)
if (status < 0)
return status;
cur_reg |= oldreg << 8;
- if (reg != oldreg) {
+ if (reg != cur_reg) {
/* disable write protection for one write access for
* BCIIREF */
status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xE7,
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
index 7970843a4f48..7382bec6a43c 100644
--- a/drivers/power/supply/ucs1002_power.c
+++ b/drivers/power/supply/ucs1002_power.c
@@ -296,22 +296,17 @@ static int ucs1002_set_max_current(struct ucs1002_info *info, u32 val)
return 0;
}
-static enum power_supply_usb_type ucs1002_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_PD,
- POWER_SUPPLY_USB_TYPE_SDP,
- POWER_SUPPLY_USB_TYPE_DCP,
- POWER_SUPPLY_USB_TYPE_CDP,
- POWER_SUPPLY_USB_TYPE_UNKNOWN,
-};
-
static int ucs1002_set_usb_type(struct ucs1002_info *info, int val)
{
unsigned int mode;
- if (val < 0 || val >= ARRAY_SIZE(ucs1002_usb_types))
- return -EINVAL;
-
- switch (ucs1002_usb_types[val]) {
+ switch (val) {
+ /*
+ * POWER_SUPPLY_USB_TYPE_UNKNOWN == 0, map this to dedicated for
+ * userspace API compatibility with older versions of this driver
+ * which mapped 0 to dedicated.
+ */
+ case POWER_SUPPLY_USB_TYPE_UNKNOWN:
case POWER_SUPPLY_USB_TYPE_PD:
mode = V_SET_ACTIVE_MODE_DEDICATED;
break;
@@ -428,8 +423,11 @@ static int ucs1002_property_is_writeable(struct power_supply *psy,
static const struct power_supply_desc ucs1002_charger_desc = {
.name = "ucs1002",
.type = POWER_SUPPLY_TYPE_USB,
- .usb_types = ucs1002_usb_types,
- .num_usb_types = ARRAY_SIZE(ucs1002_usb_types),
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
.get_property = ucs1002_get_property,
.set_property = ucs1002_set_property,
.property_is_writeable = ucs1002_property_is_writeable,
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 3cffa6c79538..5e793b80fd6b 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -740,7 +740,7 @@ static struct rapl_primitive_info *get_rpi(struct rapl_package *rp, int prim)
{
struct rapl_primitive_info *rpi = rp->priv->rpi;
- if (prim < 0 || prim > NR_RAPL_PRIMITIVES || !rpi)
+ if (prim < 0 || prim >= NR_RAPL_PRIMITIVES || !rpi)
return NULL;
return &rpi[prim];
@@ -1267,6 +1267,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ARROWLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_U, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_LAKEFIELD, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &rapl_defaults_byt),
@@ -1285,6 +1286,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_VENDOR_FAM(AMD, 0x17, &rapl_defaults_amd),
X86_MATCH_VENDOR_FAM(AMD, 0x19, &rapl_defaults_amd),
+ X86_MATCH_VENDOR_FAM(AMD, 0x1A, &rapl_defaults_amd),
X86_MATCH_VENDOR_FAM(HYGON, 0x18, &rapl_defaults_amd),
{}
};
@@ -2128,6 +2130,21 @@ void rapl_remove_package(struct rapl_package *rp)
}
EXPORT_SYMBOL_GPL(rapl_remove_package);
+/*
+ * RAPL Package energy counter scope:
+ * 1. AMD/HYGON platforms use per-PKG package energy counter
+ * 2. For Intel platforms
+ * 2.1 CLX-AP platform has per-DIE package energy counter
+ * 2.2 Other platforms that uses MSR RAPL are single die systems so the
+ * package energy counter can be considered as per-PKG/per-DIE,
+ * here it is considered as per-DIE.
+ * 2.3 New platforms that use TPMI RAPL doesn't care about the
+ * scope because they are not MSR/CPU based.
+ */
+#define rapl_msrs_are_pkg_scope() \
+ (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+
/* caller to ensure CPU hotplug lock is held */
struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv,
bool id_is_cpu)
@@ -2135,8 +2152,14 @@ struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_
struct rapl_package *rp;
int uid;
- if (id_is_cpu)
- uid = topology_logical_die_id(id);
+ if (id_is_cpu) {
+ uid = rapl_msrs_are_pkg_scope() ?
+ topology_physical_package_id(id) : topology_logical_die_id(id);
+ if (uid < 0) {
+ pr_err("topology_logical_(package/die)_id() returned a negative value");
+ return NULL;
+ }
+ }
else
uid = id;
@@ -2168,9 +2191,14 @@ struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *pr
return ERR_PTR(-ENOMEM);
if (id_is_cpu) {
- rp->id = topology_logical_die_id(id);
+ rp->id = rapl_msrs_are_pkg_scope() ?
+ topology_physical_package_id(id) : topology_logical_die_id(id);
+ if ((int)(rp->id) < 0) {
+ pr_err("topology_logical_(package/die)_id() returned a negative value");
+ return ERR_PTR(-EINVAL);
+ }
rp->lead_cpu = id;
- if (topology_max_dies_per_package() > 1)
+ if (!rapl_msrs_are_pkg_scope() && topology_max_dies_per_package() > 1)
snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d-die-%d",
topology_physical_package_id(id), topology_die_id(id));
else
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 2067b0120d08..ea96a14d72d1 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -359,11 +359,15 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
extoff = NULL;
break;
}
- if (extoff->n_samples > PTP_MAX_SAMPLES
- || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) {
+ if (extoff->n_samples > PTP_MAX_SAMPLES ||
+ extoff->rsv[0] || extoff->rsv[1] ||
+ (extoff->clockid != CLOCK_REALTIME &&
+ extoff->clockid != CLOCK_MONOTONIC &&
+ extoff->clockid != CLOCK_MONOTONIC_RAW)) {
err = -EINVAL;
break;
}
+ sts.clockid = extoff->clockid;
for (i = 0; i < extoff->n_samples; i++) {
err = ptp->info->gettimex64(ptp->info, &ts, &sts);
if (err)
diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c
index 92bb42c43fb2..d5732490ed9d 100644
--- a/drivers/ptp/ptp_idt82p33.c
+++ b/drivers/ptp/ptp_idt82p33.c
@@ -1171,10 +1171,10 @@ static void idt82p33_caps_init(u32 index, struct ptp_clock_info *caps,
caps->owner = THIS_MODULE;
caps->max_adj = DCO_MAX_PPB;
caps->n_per_out = MAX_PER_OUT;
- caps->n_ext_ts = MAX_PHC_PLL,
- caps->n_pins = max_pins,
- caps->adjphase = idt82p33_adjwritephase,
- caps->getmaxphase = idt82p33_getmaxphase,
+ caps->n_ext_ts = MAX_PHC_PLL;
+ caps->n_pins = max_pins;
+ caps->adjphase = idt82p33_adjwritephase;
+ caps->getmaxphase = idt82p33_getmaxphase;
caps->adjfine = idt82p33_adjfine;
caps->adjtime = idt82p33_adjtime;
caps->gettime64 = idt82p33_gettime;
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
index e6f7d2bf8dde..14a23d3a27f2 100644
--- a/drivers/ptp/ptp_ines.c
+++ b/drivers/ptp/ptp_ines.c
@@ -562,12 +562,8 @@ static int ines_ts_info(struct mii_timestamper *mii_ts,
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = -1;
-
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON) |
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index e7479b9b90cb..5feecaadde8e 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -1558,22 +1558,24 @@ ptp_ocp_watchdog(struct timer_list *t)
static void
ptp_ocp_estimate_pci_timing(struct ptp_ocp *bp)
{
- ktime_t start, end;
- ktime_t delay;
+ ktime_t start, end, delay = U64_MAX;
u32 ctrl;
+ int i;
- ctrl = ioread32(&bp->reg->ctrl);
- ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE;
+ for (i = 0; i < 3; i++) {
+ ctrl = ioread32(&bp->reg->ctrl);
+ ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE;
- iowrite32(ctrl, &bp->reg->ctrl);
+ iowrite32(ctrl, &bp->reg->ctrl);
- start = ktime_get_ns();
+ start = ktime_get_raw_ns();
- ctrl = ioread32(&bp->reg->ctrl);
+ ctrl = ioread32(&bp->reg->ctrl);
- end = ktime_get_ns();
+ end = ktime_get_raw_ns();
- delay = end - start;
+ delay = min(delay, end - start);
+ }
bp->ts_window_adjust = (delay >> 5) * 3;
}
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 3e53838990f5..0915c1e7df16 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -47,6 +47,13 @@ config PWM_AB8500
To compile this driver as a module, choose M here: the module
will be called pwm-ab8500.
+config PWM_ADP5585
+ tristate "ADP5585 PWM support"
+ depends on MFD_ADP5585
+ help
+ This option enables support for the PWM function found in the Analog
+ Devices ADP5585.
+
config PWM_APPLE
tristate "Apple SoC PWM support"
depends on ARCH_APPLE || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 0be4f3e6dd43..9081e0c0e9e0 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PWM) += core.o
obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
+obj-$(CONFIG_PWM_ADP5585) += pwm-adp5585.o
obj-$(CONFIG_PWM_APPLE) += pwm-apple.o
obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o
obj-$(CONFIG_PWM_ATMEL_HLCDC_PWM) += pwm-atmel-hlcdc.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 8acbcf5b6673..6e752e148b98 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -325,20 +325,19 @@ EXPORT_SYMBOL_GPL(pwm_adjust_config);
*
* Returns: 0 on success or a negative error code on failure.
*/
-int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
- unsigned long timeout)
+static int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
+ unsigned long timeout)
{
- if (!pwm || !pwm->chip->ops)
- return -EINVAL;
+ struct pwm_chip *chip = pwm->chip;
+ const struct pwm_ops *ops = chip->ops;
- if (!pwm->chip->ops->capture)
+ if (!ops->capture)
return -ENOSYS;
guard(mutex)(&pwm_lock);
- return pwm->chip->ops->capture(pwm->chip, pwm, result, timeout);
+ return ops->capture(chip, pwm, result, timeout);
}
-EXPORT_SYMBOL_GPL(pwm_capture);
static struct pwm_chip *pwmchip_find_by_name(const char *name)
{
diff --git a/drivers/pwm/pwm-adp5585.c b/drivers/pwm/pwm-adp5585.c
new file mode 100644
index 000000000000..40472ac5db64
--- /dev/null
+++ b/drivers/pwm/pwm-adp5585.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices ADP5585 PWM driver
+ *
+ * Copyright 2022 NXP
+ * Copyright 2024 Ideas on Board Oy
+ *
+ * Limitations:
+ * - The .apply() operation executes atomically, but may not wait for the
+ * period to complete (this is not documented and would need to be tested).
+ * - Disabling the PWM drives the output pin to a low level immediately.
+ * - The hardware can only generate normal polarity output.
+ */
+
+#include <asm/byteorder.h>
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/math64.h>
+#include <linux/mfd/adp5585.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+#define ADP5585_PWM_CHAN_NUM 1
+
+#define ADP5585_PWM_OSC_FREQ_HZ 1000000U
+#define ADP5585_PWM_MIN_PERIOD_NS (2ULL * NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ)
+#define ADP5585_PWM_MAX_PERIOD_NS (2ULL * 0xffff * NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ)
+
+static int pwm_adp5585_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct regmap *regmap = pwmchip_get_drvdata(chip);
+
+ /* Configure the R3 pin as PWM output. */
+ return regmap_update_bits(regmap, ADP5585_PIN_CONFIG_C,
+ ADP5585_R3_EXTEND_CFG_MASK,
+ ADP5585_R3_EXTEND_CFG_PWM_OUT);
+}
+
+static void pwm_adp5585_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct regmap *regmap = pwmchip_get_drvdata(chip);
+
+ regmap_update_bits(regmap, ADP5585_PIN_CONFIG_C,
+ ADP5585_R3_EXTEND_CFG_MASK,
+ ADP5585_R3_EXTEND_CFG_GPIO4);
+}
+
+static int pwm_adp5585_apply(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct regmap *regmap = pwmchip_get_drvdata(chip);
+ u64 period, duty_cycle;
+ u32 on, off;
+ __le16 val;
+ int ret;
+
+ if (!state->enabled) {
+ regmap_clear_bits(regmap, ADP5585_GENERAL_CFG, ADP5585_OSC_EN);
+ regmap_clear_bits(regmap, ADP5585_PWM_CFG, ADP5585_PWM_EN);
+ return 0;
+ }
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (state->period < ADP5585_PWM_MIN_PERIOD_NS)
+ return -EINVAL;
+
+ period = min(state->period, ADP5585_PWM_MAX_PERIOD_NS);
+ duty_cycle = min(state->duty_cycle, period);
+
+ /*
+ * Compute the on and off time. As the internal oscillator frequency is
+ * 1MHz, the calculation can be simplified without loss of precision.
+ */
+ on = div_u64(duty_cycle, NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ);
+ off = div_u64(period, NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ) - on;
+
+ val = cpu_to_le16(off);
+ ret = regmap_bulk_write(regmap, ADP5585_PWM_OFFT_LOW, &val, 2);
+ if (ret)
+ return ret;
+
+ val = cpu_to_le16(on);
+ ret = regmap_bulk_write(regmap, ADP5585_PWM_ONT_LOW, &val, 2);
+ if (ret)
+ return ret;
+
+ /* Enable PWM in continuous mode and no external AND'ing. */
+ ret = regmap_update_bits(regmap, ADP5585_PWM_CFG,
+ ADP5585_PWM_IN_AND | ADP5585_PWM_MODE |
+ ADP5585_PWM_EN, ADP5585_PWM_EN);
+ if (ret)
+ return ret;
+
+ ret = regmap_set_bits(regmap, ADP5585_GENERAL_CFG, ADP5585_OSC_EN);
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(regmap, ADP5585_PWM_CFG, ADP5585_PWM_EN);
+}
+
+static int pwm_adp5585_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct regmap *regmap = pwmchip_get_drvdata(chip);
+ unsigned int on, off;
+ unsigned int val;
+ __le16 on_off;
+ int ret;
+
+ ret = regmap_bulk_read(regmap, ADP5585_PWM_OFFT_LOW, &on_off, 2);
+ if (ret)
+ return ret;
+ off = le16_to_cpu(on_off);
+
+ ret = regmap_bulk_read(regmap, ADP5585_PWM_ONT_LOW, &on_off, 2);
+ if (ret)
+ return ret;
+ on = le16_to_cpu(on_off);
+
+ state->duty_cycle = on * (NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ);
+ state->period = (on + off) * (NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ);
+
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ regmap_read(regmap, ADP5585_PWM_CFG, &val);
+ state->enabled = !!(val & ADP5585_PWM_EN);
+
+ return 0;
+}
+
+static const struct pwm_ops adp5585_pwm_ops = {
+ .request = pwm_adp5585_request,
+ .free = pwm_adp5585_free,
+ .apply = pwm_adp5585_apply,
+ .get_state = pwm_adp5585_get_state,
+};
+
+static int adp5585_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent);
+ struct pwm_chip *chip;
+ int ret;
+
+ chip = devm_pwmchip_alloc(dev, ADP5585_PWM_CHAN_NUM, 0);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ device_set_of_node_from_dev(dev, dev->parent);
+
+ pwmchip_set_drvdata(chip, adp5585->regmap);
+ chip->ops = &adp5585_pwm_ops;
+
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add PWM chip\n");
+
+ return 0;
+}
+
+static const struct platform_device_id adp5585_pwm_id_table[] = {
+ { "adp5585-pwm" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, adp5585_pwm_id_table);
+
+static struct platform_driver adp5585_pwm_driver = {
+ .driver = {
+ .name = "adp5585-pwm",
+ },
+ .probe = adp5585_pwm_probe,
+ .id_table = adp5585_pwm_id_table,
+};
+module_platform_driver(adp5585_pwm_driver);
+
+MODULE_AUTHOR("Xiaoning Wang <xiaoning.wang@nxp.com>");
+MODULE_DESCRIPTION("ADP5585 PWM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 2afb302be02c..387a0d1fa4f2 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -234,7 +234,7 @@ static const struct of_device_id atmel_hlcdc_dt_ids[] = {
.data = &atmel_hlcdc_pwm_sama5d3_errata,
},
{ .compatible = "microchip,sam9x60-hlcdc", },
- { /* sentinel */ },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_hlcdc_dt_ids);
@@ -288,8 +288,9 @@ static void atmel_hlcdc_pwm_remove(struct platform_device *pdev)
static const struct of_device_id atmel_hlcdc_pwm_dt_ids[] = {
{ .compatible = "atmel,hlcdc-pwm" },
- { /* sentinel */ },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, atmel_hlcdc_pwm_dt_ids);
static struct platform_driver atmel_hlcdc_pwm_driver = {
.driver = {
@@ -298,7 +299,7 @@ static struct platform_driver atmel_hlcdc_pwm_driver = {
.pm = pm_ptr(&atmel_hlcdc_pwm_pm_ops),
},
.probe = atmel_hlcdc_pwm_probe,
- .remove_new = atmel_hlcdc_pwm_remove,
+ .remove = atmel_hlcdc_pwm_remove,
};
module_platform_driver(atmel_hlcdc_pwm_driver);
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index f9a9c12cbcdd..5ee4254d1e48 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -527,7 +527,7 @@ static struct platform_driver atmel_tcb_pwm_driver = {
.pm = pm_ptr(&atmel_tcb_pwm_pm_ops),
},
.probe = atmel_tcb_pwm_probe,
- .remove_new = atmel_tcb_pwm_remove,
+ .remove = atmel_tcb_pwm_remove,
};
module_platform_driver(atmel_tcb_pwm_driver);
diff --git a/drivers/pwm/pwm-axi-pwmgen.c b/drivers/pwm/pwm-axi-pwmgen.c
index 3ad60edf20a5..b5477659ba18 100644
--- a/drivers/pwm/pwm-axi-pwmgen.c
+++ b/drivers/pwm/pwm-axi-pwmgen.c
@@ -29,7 +29,6 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-#define AXI_PWMGEN_REG_CORE_VERSION 0x00
#define AXI_PWMGEN_REG_ID 0x04
#define AXI_PWMGEN_REG_SCRATCHPAD 0x08
#define AXI_PWMGEN_REG_CORE_MAGIC 0x0C
@@ -145,7 +144,7 @@ static int axi_pwmgen_setup(struct regmap *regmap, struct device *dev)
"failed to read expected value from register: got %08x, expected %08x\n",
val, AXI_PWMGEN_REG_CORE_MAGIC_VAL);
- ret = regmap_read(regmap, AXI_PWMGEN_REG_CORE_VERSION, &val);
+ ret = regmap_read(regmap, ADI_AXI_REG_VERSION, &val);
if (ret)
return ret;
diff --git a/drivers/pwm/pwm-clk.c b/drivers/pwm/pwm-clk.c
index c19a482d7e28..f8f5af57acba 100644
--- a/drivers/pwm/pwm-clk.c
+++ b/drivers/pwm/pwm-clk.c
@@ -130,7 +130,7 @@ static struct platform_driver pwm_clk_driver = {
.of_match_table = pwm_clk_dt_ids,
},
.probe = pwm_clk_probe,
- .remove_new = pwm_clk_remove,
+ .remove = pwm_clk_remove,
};
module_platform_driver(pwm_clk_driver);
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index 2eb0b13d4e10..e02ee6383dbc 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -276,7 +276,7 @@ static struct platform_driver hibvt_pwm_driver = {
.of_match_table = hibvt_pwm_of_match,
},
.probe = hibvt_pwm_probe,
- .remove_new = hibvt_pwm_remove,
+ .remove = hibvt_pwm_remove,
};
module_platform_driver(hibvt_pwm_driver);
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index d6596583ed4e..71542956feca 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -416,7 +416,7 @@ static struct platform_driver img_pwm_driver = {
.of_match_table = img_pwm_of_match,
},
.probe = img_pwm_probe,
- .remove_new = img_pwm_remove,
+ .remove = img_pwm_remove,
};
module_platform_driver(img_pwm_driver);
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 61189cea1046..90b0733c00c1 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -218,8 +218,7 @@ static int lp3943_pwm_parse_dt(struct device *dev,
struct lp3943_platform_data *pdata;
struct lp3943_pwm_map *pwm_map;
enum lp3943_pwm_output *output;
- int i, err, proplen, count = 0;
- u32 num_outputs;
+ int i, err, num_outputs, count = 0;
if (!node)
return -EINVAL;
@@ -234,11 +233,8 @@ static int lp3943_pwm_parse_dt(struct device *dev,
*/
for (i = 0; i < LP3943_NUM_PWMS; i++) {
- if (!of_get_property(node, name[i], &proplen))
- continue;
-
- num_outputs = proplen / sizeof(u32);
- if (num_outputs == 0)
+ num_outputs = of_property_count_u32_elems(node, name[i]);
+ if (num_outputs <= 0)
continue;
output = devm_kcalloc(dev, num_outputs, sizeof(*output),
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index 04b76d257fd8..f351baa63453 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -446,7 +446,7 @@ static struct platform_driver lpc18xx_pwm_driver = {
.of_match_table = lpc18xx_pwm_of_match,
},
.probe = lpc18xx_pwm_probe,
- .remove_new = lpc18xx_pwm_remove,
+ .remove = lpc18xx_pwm_remove,
};
module_platform_driver(lpc18xx_pwm_driver);
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index cd51c4a938f5..1858a77401f8 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -355,7 +355,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
goto err_platdata;
}
- if (!of_get_property(timer, "ti,timer-pwm", NULL)) {
+ if (!of_property_read_bool(timer, "ti,timer-pwm")) {
dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n");
ret = -ENODEV;
goto err_timer_property;
@@ -455,7 +455,7 @@ static struct platform_driver pwm_omap_dmtimer_driver = {
.of_match_table = pwm_omap_dmtimer_of_match,
},
.probe = pwm_omap_dmtimer_probe,
- .remove_new = pwm_omap_dmtimer_remove,
+ .remove = pwm_omap_dmtimer_remove,
};
module_platform_driver(pwm_omap_dmtimer_driver);
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 4cfecd88ede0..2261789cc27d 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -253,7 +253,7 @@ MODULE_DEVICE_TABLE(of, rcar_pwm_of_table);
static struct platform_driver rcar_pwm_driver = {
.probe = rcar_pwm_probe,
- .remove_new = rcar_pwm_remove,
+ .remove = rcar_pwm_remove,
.driver = {
.name = "pwm-rcar",
.of_match_table = rcar_pwm_of_table,
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index 0fa7575dbb54..c5f50e5eaf41 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -386,7 +386,7 @@ static struct platform_driver rockchip_pwm_driver = {
.of_match_table = rockchip_pwm_dt_ids,
},
.probe = rockchip_pwm_probe,
- .remove_new = rockchip_pwm_remove,
+ .remove = rockchip_pwm_remove,
};
module_platform_driver(rockchip_pwm_driver);
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index ed7957cc51fd..d5b647e6be78 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -336,7 +336,7 @@ MODULE_DEVICE_TABLE(of, pwm_sifive_of_match);
static struct platform_driver pwm_sifive_driver = {
.probe = pwm_sifive_probe,
- .remove_new = pwm_sifive_remove,
+ .remove = pwm_sifive_remove,
.driver = {
.name = "pwm-sifive",
.of_match_table = pwm_sifive_of_match,
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index f85eb41cb084..eb24054f9729 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -222,7 +222,7 @@ static int stm32_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
scale = max_arr / min(max_arr, raw_prd);
} else {
- scale = priv->max_arr; /* bellow resolution, use max scale */
+ scale = priv->max_arr; /* below resolution, use max scale */
}
if (psc && scale > 1) {
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 5c29590d1821..e60dc7d6b591 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -493,7 +493,7 @@ static struct platform_driver sun4i_pwm_driver = {
.of_match_table = sun4i_pwm_dt_ids,
},
.probe = sun4i_pwm_probe,
- .remove_new = sun4i_pwm_remove,
+ .remove = sun4i_pwm_remove,
};
module_platform_driver(sun4i_pwm_driver);
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index a3d69976148f..172063b51d44 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -432,7 +432,7 @@ static struct platform_driver tegra_pwm_driver = {
.pm = &tegra_pwm_pm_ops,
},
.probe = tegra_pwm_probe,
- .remove_new = tegra_pwm_remove,
+ .remove = tegra_pwm_remove,
};
module_platform_driver(tegra_pwm_driver);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index d6c2b1b1387e..d91b2bdc88fc 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -324,7 +324,7 @@ static struct platform_driver ecap_pwm_driver = {
.pm = pm_ptr(&ecap_pwm_pm_ops),
},
.probe = ecap_pwm_probe,
- .remove_new = ecap_pwm_remove,
+ .remove = ecap_pwm_remove,
};
module_platform_driver(ecap_pwm_driver);
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index e5104725d9b7..0125e73b98df 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -603,7 +603,7 @@ static struct platform_driver ehrpwm_pwm_driver = {
.pm = pm_ptr(&ehrpwm_pwm_pm_ops),
},
.probe = ehrpwm_pwm_probe,
- .remove_new = ehrpwm_pwm_remove,
+ .remove = ehrpwm_pwm_remove,
};
module_platform_driver(ehrpwm_pwm_driver);
diff --git a/drivers/ras/amd/atl/Kconfig b/drivers/ras/amd/atl/Kconfig
index df49c23e7f62..551680073e43 100644
--- a/drivers/ras/amd/atl/Kconfig
+++ b/drivers/ras/amd/atl/Kconfig
@@ -19,3 +19,7 @@ config AMD_ATL
Enable this option if using DRAM ECC on Zen-based systems
and OS-based error handling.
+
+config AMD_ATL_PRM
+ depends on AMD_ATL && ACPI_PRMT
+ def_bool y
diff --git a/drivers/ras/amd/atl/Makefile b/drivers/ras/amd/atl/Makefile
index 4acd5f05bd9c..b56892c0c0d9 100644
--- a/drivers/ras/amd/atl/Makefile
+++ b/drivers/ras/amd/atl/Makefile
@@ -15,4 +15,6 @@ amd_atl-y += map.o
amd_atl-y += system.o
amd_atl-y += umc.o
+amd_atl-$(CONFIG_AMD_ATL_PRM) += prm.o
+
obj-$(CONFIG_AMD_ATL) += amd_atl.o
diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
index 9de5d53d0568..143d04c779a8 100644
--- a/drivers/ras/amd/atl/internal.h
+++ b/drivers/ras/amd/atl/internal.h
@@ -282,6 +282,16 @@ unsigned long convert_umc_mca_addr_to_sys_addr(struct atl_err *err);
u64 add_base_and_hole(struct addr_ctx *ctx, u64 addr);
u64 remove_base_and_hole(struct addr_ctx *ctx, u64 addr);
+#ifdef CONFIG_AMD_ATL_PRM
+unsigned long prm_umc_norm_to_sys_addr(u8 socket_id, u64 umc_bank_inst_id, unsigned long addr);
+#else
+static inline unsigned long prm_umc_norm_to_sys_addr(u8 socket_id, u64 umc_bank_inst_id,
+ unsigned long addr)
+{
+ return -ENODEV;
+}
+#endif
+
/*
* Make a gap in @data that is @num_bits long starting at @bit_num.
* e.g. data = 11111111'b
diff --git a/drivers/ras/amd/atl/prm.c b/drivers/ras/amd/atl/prm.c
new file mode 100644
index 000000000000..0931a20d213b
--- /dev/null
+++ b/drivers/ras/amd/atl/prm.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * prm.c : Plumbing code for ACPI Platform Runtime Mechanism (PRM)
+ *
+ * Information on AMD PRM modules and handlers including the GUIDs and buffer
+ * structures used here are defined in the AMD ACPI Porting Guide in the
+ * chapter "Platform Runtime Mechanism Table (PRMT)"
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: John Allen <john.allen@amd.com>
+ */
+
+#include "internal.h"
+
+#include <linux/prmt.h>
+
+/*
+ * PRM parameter buffer - normalized to system physical address, as described
+ * in the "PRM Parameter Buffer" section of the AMD ACPI Porting Guide.
+ */
+struct norm_to_sys_param_buf {
+ u64 norm_addr;
+ u8 socket;
+ u64 bank_id;
+ void *out_buf;
+} __packed;
+
+static const guid_t norm_to_sys_guid = GUID_INIT(0xE7180659, 0xA65D, 0x451D,
+ 0x92, 0xCD, 0x2B, 0x56, 0xF1,
+ 0x2B, 0xEB, 0xA6);
+
+unsigned long prm_umc_norm_to_sys_addr(u8 socket_id, u64 bank_id, unsigned long addr)
+{
+ struct norm_to_sys_param_buf p_buf;
+ unsigned long ret_addr;
+ int ret;
+
+ p_buf.norm_addr = addr;
+ p_buf.socket = socket_id;
+ p_buf.bank_id = bank_id;
+ p_buf.out_buf = &ret_addr;
+
+ ret = acpi_call_prm_handler(norm_to_sys_guid, &p_buf);
+ if (!ret)
+ return ret_addr;
+
+ if (ret == -ENODEV)
+ pr_debug("PRM module/handler not available\n");
+ else
+ pr_notice_once("PRM address translation failed\n");
+
+ return ret;
+}
diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
index a1b4accf7b96..dc8aa12f63c8 100644
--- a/drivers/ras/amd/atl/umc.c
+++ b/drivers/ras/amd/atl/umc.c
@@ -401,9 +401,14 @@ unsigned long convert_umc_mca_addr_to_sys_addr(struct atl_err *err)
u8 coh_st_inst_id = get_coh_st_inst_id(err);
unsigned long addr = get_addr(err->addr);
u8 die_id = get_die_id(err);
+ unsigned long ret_addr;
pr_debug("socket_id=0x%x die_id=0x%x coh_st_inst_id=0x%x addr=0x%016lx",
socket_id, die_id, coh_st_inst_id, addr);
+ ret_addr = prm_umc_norm_to_sys_addr(socket_id, err->ipid, addr);
+ if (!IS_ERR_VALUE(ret_addr))
+ return ret_addr;
+
return norm_to_sys_addr(socket_id, die_id, coh_st_inst_id, addr);
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 4b411a09c1a6..39297f7d8177 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1373,13 +1373,6 @@ config REGULATOR_SLG51000
The SLG51000 is seven compact and customizable low dropout
regulators.
-config REGULATOR_SM5703
- tristate "Silicon Mitus SM5703 regulators"
- depends on MFD_SM5703
- help
- This driver provides support for voltage regulators of SM5703
- multi-function device.
-
config REGULATOR_STM32_BOOSTER
tristate "STMicroelectronics STM32 BOOSTER"
depends on ARCH_STM32 || COMPILE_TEST
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index a61fa42b13c4..3d5a803dce8a 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -160,7 +160,6 @@ obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o
obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o
obj-$(CONFIG_REGULATOR_SLG51000) += slg51000-regulator.o
-obj-$(CONFIG_REGULATOR_SM5703) += sm5703-regulator.o
obj-$(CONFIG_REGULATOR_STM32_BOOSTER) += stm32-booster.o
obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
obj-$(CONFIG_REGULATOR_STM32_PWR) += stm32-pwr.o
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index a504b01dd99c..0457af23c55a 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -673,9 +673,7 @@ static int act8865_pmic_probe(struct i2c_client *client)
type = (unsigned long) id->data;
- voltage_select = !!of_get_property(dev->of_node,
- "active-semi,vsel-high",
- NULL);
+ voltage_select = of_property_read_bool(dev->of_node, "active-semi,vsel-high");
} else {
type = i2c_id->driver_data;
pdata = dev_get_platdata(dev);
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index f3c447ecdc3b..a8e91d9d028b 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -143,6 +143,7 @@
#define AXP717_DCDC3_NUM_VOLTAGES 103
#define AXP717_DCDC_V_OUT_MASK GENMASK(6, 0)
#define AXP717_LDO_V_OUT_MASK GENMASK(4, 0)
+#define AXP717_BOOST_V_OUT_MASK GENMASK(7, 4)
#define AXP803_PWR_OUT_DCDC1_MASK BIT_MASK(0)
#define AXP803_PWR_OUT_DCDC2_MASK BIT_MASK(1)
@@ -834,6 +835,9 @@ static const struct regulator_desc axp717_regulators[] = {
AXP_DESC(AXP717, CPUSLDO, "cpusldo", "vin1", 500, 1400, 50,
AXP717_CPUSLDO_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO1_OUTPUT_CONTROL, BIT(4)),
+ AXP_DESC(AXP717, BOOST, "boost", "vin1", 4550, 5510, 64,
+ AXP717_BOOST_CONTROL, AXP717_BOOST_V_OUT_MASK,
+ AXP717_MODULE_EN_CONTROL_2, BIT(4)),
};
/* DCDC ranges shared with AXP813 */
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index c3fb05dce40c..1bb048de3ecd 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -2,6 +2,7 @@
// Copyright (C) 2018 ROHM Semiconductors
// bd71837-regulator.c ROHM BD71837MWV/BD71847MWV regulator driver
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -1635,18 +1636,17 @@ static int get_special_regulators(struct device *dev,
unsigned int num_reg_data, int *info)
{
int ret;
- struct device_node *np;
- struct device_node *nproot = dev->of_node;
int uv;
*info = 0;
- nproot = of_get_child_by_name(nproot, "regulators");
+ struct device_node *nproot __free(device_node) = of_get_child_by_name(dev->of_node,
+ "regulators");
if (!nproot) {
dev_err(dev, "failed to find regulators node\n");
return -ENODEV;
}
- for_each_child_of_node(nproot, np) {
+ for_each_child_of_node_scoped(nproot, np) {
if (of_property_read_bool(np, "rohm,no-regulator-enable-control"))
mark_hw_controlled(dev, np, reg_data, num_reg_data,
info);
@@ -1656,22 +1656,15 @@ static int get_special_regulators(struct device *dev,
if (ret == -EINVAL)
continue;
else
- goto err_out;
+ return ret;
}
ret = setup_feedback_loop(dev, np, reg_data, num_reg_data, uv);
if (ret)
- goto err_out;
+ return ret;
}
- of_node_put(nproot);
return 0;
-
-err_out:
- of_node_put(np);
- of_node_put(nproot);
-
- return ret;
}
static int bd718xx_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
index d4ca7b3f4036..bf5f9c3f2c97 100644
--- a/drivers/regulator/bd9576-regulator.c
+++ b/drivers/regulator/bd9576-regulator.c
@@ -68,25 +68,25 @@ static const struct linear_range voutL1_xvd_ranges[] = {
REGULATOR_LINEAR_RANGE(220000, 0x6e, 0x7f, 0),
};
-static struct linear_range voutS1_ocw_ranges_internal[] = {
+static const struct linear_range voutS1_ocw_ranges_internal[] = {
REGULATOR_LINEAR_RANGE(200000, 0x01, 0x04, 0),
REGULATOR_LINEAR_RANGE(250000, 0x05, 0x18, 50000),
REGULATOR_LINEAR_RANGE(1200000, 0x19, 0x3f, 0),
};
-static struct linear_range voutS1_ocw_ranges[] = {
+static const struct linear_range voutS1_ocw_ranges[] = {
REGULATOR_LINEAR_RANGE(50000, 0x01, 0x04, 0),
REGULATOR_LINEAR_RANGE(60000, 0x05, 0x18, 10000),
REGULATOR_LINEAR_RANGE(250000, 0x19, 0x3f, 0),
};
-static struct linear_range voutS1_ocp_ranges_internal[] = {
+static const struct linear_range voutS1_ocp_ranges_internal[] = {
REGULATOR_LINEAR_RANGE(300000, 0x01, 0x06, 0),
REGULATOR_LINEAR_RANGE(350000, 0x7, 0x1b, 50000),
REGULATOR_LINEAR_RANGE(1350000, 0x1c, 0x3f, 0),
};
-static struct linear_range voutS1_ocp_ranges[] = {
+static const struct linear_range voutS1_ocp_ranges[] = {
REGULATOR_LINEAR_RANGE(70000, 0x01, 0x06, 0),
REGULATOR_LINEAR_RANGE(80000, 0x7, 0x1b, 10000),
REGULATOR_LINEAR_RANGE(280000, 0x1c, 0x3f, 0),
diff --git a/drivers/regulator/bd96801-regulator.c b/drivers/regulator/bd96801-regulator.c
index 46ca81f18703..9876cc05867e 100644
--- a/drivers/regulator/bd96801-regulator.c
+++ b/drivers/regulator/bd96801-regulator.c
@@ -34,6 +34,7 @@
* conflict in your downstream driver ;)
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -453,15 +454,14 @@ static int bd96801_walk_regulator_dt(struct device *dev, struct regmap *regmap,
int num)
{
int i, ret;
- struct device_node *np;
- struct device_node *nproot = dev->parent->of_node;
- nproot = of_get_child_by_name(nproot, "regulators");
+ struct device_node *nproot __free(device_node) =
+ of_get_child_by_name(dev->parent->of_node, "regulators");
if (!nproot) {
dev_err(dev, "failed to find regulators node\n");
return -ENODEV;
}
- for_each_child_of_node(nproot, np)
+ for_each_child_of_node_scoped(nproot, np) {
for (i = 0; i < num; i++) {
if (!of_node_name_eq(np, data[i].desc.of_match))
continue;
@@ -476,11 +476,9 @@ static int bd96801_walk_regulator_dt(struct device *dev, struct regmap *regmap,
dev_err(dev,
"Initializing voltages for %s failed\n",
data[i].desc.name);
- of_node_put(np);
- of_node_put(nproot);
-
return ret;
}
+
if (of_property_read_bool(np, "rohm,keep-on-stby")) {
ret = regmap_set_bits(regmap,
BD96801_ALWAYS_ON_REG,
@@ -489,14 +487,11 @@ static int bd96801_walk_regulator_dt(struct device *dev, struct regmap *regmap,
dev_err(dev,
"failed to set %s on-at-stby\n",
data[i].desc.name);
- of_node_put(np);
- of_node_put(nproot);
-
return ret;
}
}
}
- of_node_put(nproot);
+ }
return 0;
}
@@ -853,8 +848,6 @@ static int bd96801_probe(struct platform_device *pdev)
ldo_errs_arr[temp_notif_ldos] = rdesc[i].ldo_errs;
temp_notif_ldos++;
}
- if (!idesc)
- continue;
/* Register INTB handlers for configured protections */
for (j = 0; j < idesc->num_irqs; j++) {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7674b7f2df14..1179766811f5 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -139,6 +139,8 @@ static bool regulator_ops_is_valid(struct regulator_dev *rdev, int ops)
* once. If a task, which is calling this function is other
* than the one, which initially locked the mutex, it will
* wait on mutex.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
static inline int regulator_lock_nested(struct regulator_dev *rdev,
struct ww_acquire_ctx *ww_ctx)
@@ -419,72 +421,6 @@ static void regulator_lock_dependent(struct regulator_dev *rdev,
mutex_unlock(&regulator_list_mutex);
}
-/**
- * of_get_child_regulator - get a child regulator device node
- * based on supply name
- * @parent: Parent device node
- * @prop_name: Combination regulator supply name and "-supply"
- *
- * Traverse all child nodes.
- * Extract the child regulator device node corresponding to the supply name.
- * returns the device node corresponding to the regulator if found, else
- * returns NULL.
- */
-static struct device_node *of_get_child_regulator(struct device_node *parent,
- const char *prop_name)
-{
- struct device_node *regnode = NULL;
- struct device_node *child = NULL;
-
- for_each_child_of_node(parent, child) {
- regnode = of_parse_phandle(child, prop_name, 0);
-
- if (!regnode) {
- regnode = of_get_child_regulator(child, prop_name);
- if (regnode)
- goto err_node_put;
- } else {
- goto err_node_put;
- }
- }
- return NULL;
-
-err_node_put:
- of_node_put(child);
- return regnode;
-}
-
-/**
- * of_get_regulator - get a regulator device node based on supply name
- * @dev: Device pointer for the consumer (of regulator) device
- * @supply: regulator supply name
- *
- * Extract the regulator device node corresponding to the supply name.
- * returns the device node corresponding to the regulator if found, else
- * returns NULL.
- */
-static struct device_node *of_get_regulator(struct device *dev, const char *supply)
-{
- struct device_node *regnode = NULL;
- char prop_name[64]; /* 64 is max size of property name */
-
- dev_dbg(dev, "Looking up %s-supply from device tree\n", supply);
-
- snprintf(prop_name, 64, "%s-supply", supply);
- regnode = of_parse_phandle(dev->of_node, prop_name, 0);
-
- if (!regnode) {
- regnode = of_get_child_regulator(dev->of_node, prop_name);
- if (regnode)
- return regnode;
-
- dev_dbg(dev, "Looking up %s property in node %pOF failed\n",
- prop_name, dev->of_node);
- return NULL;
- }
- return regnode;
-}
-
/* Platform voltage constraint check */
int regulator_check_voltage(struct regulator_dev *rdev,
int *min_uV, int *max_uV)
@@ -1462,6 +1398,8 @@ static int handle_notify_limits(struct regulator_dev *rdev,
* Constraints *must* be set by platform code in order for some
* regulator operations to proceed i.e. set_voltage, set_current_limit,
* set_mode.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
static int set_machine_constraints(struct regulator_dev *rdev)
{
@@ -1700,6 +1638,8 @@ static int set_machine_constraints(struct regulator_dev *rdev)
* Called by platform initialisation code to set the supply regulator for this
* regulator. This ensures that a regulators supply will also be enabled by the
* core if it's child is enabled.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
static int set_supply(struct regulator_dev *rdev,
struct regulator_dev *supply_rdev)
@@ -1732,6 +1672,8 @@ static int set_supply(struct regulator_dev *rdev,
* sources to symbolic names for supplies for use by devices. Devices
* should use these symbolic names to request regulators, avoiding the
* need to provide board-specific regulator names as platform data.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
static int set_consumer_device_supply(struct regulator_dev *rdev,
const char *consumer_dev_name,
@@ -1998,18 +1940,19 @@ static struct regulator_dev *regulator_lookup_by_name(const char *name)
* @dev: device for regulator "consumer".
* @supply: Supply name or regulator ID.
*
+ * Return: pointer to &struct regulator_dev or ERR_PTR() encoded negative error number.
+ *
* If successful, returns a struct regulator_dev that corresponds to the name
* @supply and with the embedded struct device refcount incremented by one.
* The refcount must be dropped by calling put_device().
- * On failure one of the following ERR-PTR-encoded values is returned:
- * -ENODEV if lookup fails permanently, -EPROBE_DEFER if lookup could succeed
+ * On failure one of the following ERR_PTR() encoded values is returned:
+ * -%ENODEV if lookup fails permanently, -%EPROBE_DEFER if lookup could succeed
* in the future.
*/
static struct regulator_dev *regulator_dev_lookup(struct device *dev,
const char *supply)
{
struct regulator_dev *r = NULL;
- struct device_node *node;
struct regulator_map *map;
const char *devname = NULL;
@@ -2017,19 +1960,14 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
/* first do a dt based lookup */
if (dev && dev->of_node) {
- node = of_get_regulator(dev, supply);
- if (node) {
- r = of_find_regulator_by_node(node);
- of_node_put(node);
- if (r)
- return r;
+ r = of_regulator_dev_lookup(dev, supply);
+ if (!IS_ERR(r))
+ return r;
+ if (PTR_ERR(r) == -EPROBE_DEFER)
+ return r;
- /*
- * We have a node, but there is no device.
- * assume it has not registered yet.
- */
- return ERR_PTR(-EPROBE_DEFER);
- }
+ if (PTR_ERR(r) == -ENODEV)
+ r = NULL;
}
/* if not found, try doing it non-dt way */
@@ -2168,26 +2106,43 @@ out:
return ret;
}
-/* Internal regulator request function */
-struct regulator *_regulator_get(struct device *dev, const char *id,
- enum regulator_get_type get_type)
+/* common pre-checks for regulator requests */
+int _regulator_get_common_check(struct device *dev, const char *id,
+ enum regulator_get_type get_type)
{
- struct regulator_dev *rdev;
- struct regulator *regulator;
- struct device_link *link;
- int ret;
-
if (get_type >= MAX_GET_TYPE) {
dev_err(dev, "invalid type %d in %s\n", get_type, __func__);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (id == NULL) {
- pr_err("get() with no identifier\n");
- return ERR_PTR(-EINVAL);
+ dev_err(dev, "regulator request with no identifier\n");
+ return -EINVAL;
}
- rdev = regulator_dev_lookup(dev, id);
+ return 0;
+}
+
+/**
+ * _regulator_get_common - Common code for regulator requests
+ * @rdev: regulator device pointer as returned by *regulator_dev_lookup()
+ * Its reference count is expected to have been incremented.
+ * @dev: device used for dev_printk messages
+ * @id: Supply name or regulator ID
+ * @get_type: enum regulator_get_type value corresponding to type of request
+ *
+ * Returns: pointer to struct regulator corresponding to @rdev, or ERR_PTR()
+ * encoded error.
+ *
+ * This function should be chained with *regulator_dev_lookup() functions.
+ */
+struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct device *dev,
+ const char *id, enum regulator_get_type get_type)
+{
+ struct regulator *regulator;
+ struct device_link *link;
+ int ret;
+
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
@@ -2303,18 +2258,33 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
return regulator;
}
+/* Internal regulator request function */
+struct regulator *_regulator_get(struct device *dev, const char *id,
+ enum regulator_get_type get_type)
+{
+ struct regulator_dev *rdev;
+ int ret;
+
+ ret = _regulator_get_common_check(dev, id, get_type);
+ if (ret)
+ return ERR_PTR(ret);
+
+ rdev = regulator_dev_lookup(dev, id);
+ return _regulator_get_common(rdev, dev, id, get_type);
+}
+
/**
* regulator_get - lookup and obtain a reference to a regulator.
* @dev: device for regulator "consumer"
* @id: Supply name or regulator ID.
*
- * Returns a struct regulator corresponding to the regulator producer,
- * or IS_ERR() condition containing errno.
- *
* Use of supply names configured via set_consumer_device_supply() is
* strongly encouraged. It is recommended that the supply name used
* should match the name used for the supply and/or the relevant
* device pins in the datasheet.
+ *
+ * Return: Pointer to a &struct regulator corresponding to the regulator
+ * producer, or an ERR_PTR() encoded negative error number.
*/
struct regulator *regulator_get(struct device *dev, const char *id)
{
@@ -2327,11 +2297,9 @@ EXPORT_SYMBOL_GPL(regulator_get);
* @dev: device for regulator "consumer"
* @id: Supply name or regulator ID.
*
- * Returns a struct regulator corresponding to the regulator producer,
- * or IS_ERR() condition containing errno. Other consumers will be
- * unable to obtain this regulator while this reference is held and the
- * use count for the regulator will be initialised to reflect the current
- * state of the regulator.
+ * Other consumers will be unable to obtain this regulator while this
+ * reference is held and the use count for the regulator will be
+ * initialised to reflect the current state of the regulator.
*
* This is intended for use by consumers which cannot tolerate shared
* use of the regulator such as those which need to force the
@@ -2342,6 +2310,9 @@ EXPORT_SYMBOL_GPL(regulator_get);
* strongly encouraged. It is recommended that the supply name used
* should match the name used for the supply and/or the relevant
* device pins in the datasheet.
+ *
+ * Return: Pointer to a &struct regulator corresponding to the regulator
+ * producer, or an ERR_PTR() encoded negative error number.
*/
struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
{
@@ -2354,9 +2325,6 @@ EXPORT_SYMBOL_GPL(regulator_get_exclusive);
* @dev: device for regulator "consumer"
* @id: Supply name or regulator ID.
*
- * Returns a struct regulator corresponding to the regulator producer,
- * or IS_ERR() condition containing errno.
- *
* This is intended for use by consumers for devices which can have
* some supplies unconnected in normal use, such as some MMC devices.
* It can allow the regulator core to provide stub supplies for other
@@ -2368,6 +2336,9 @@ EXPORT_SYMBOL_GPL(regulator_get_exclusive);
* strongly encouraged. It is recommended that the supply name used
* should match the name used for the supply and/or the relevant
* device pins in the datasheet.
+ *
+ * Return: Pointer to a &struct regulator corresponding to the regulator
+ * producer, or an ERR_PTR() encoded negative error number.
*/
struct regulator *regulator_get_optional(struct device *dev, const char *id)
{
@@ -2448,6 +2419,8 @@ EXPORT_SYMBOL_GPL(regulator_put);
*
* All lookups for id on dev will instead be conducted for alias_id on
* alias_dev.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_register_supply_alias(struct device *dev, const char *id,
struct device *alias_dev,
@@ -2507,12 +2480,12 @@ EXPORT_SYMBOL_GPL(regulator_unregister_supply_alias);
* lookup the supply
* @num_id: Number of aliases to register
*
- * @return 0 on success, an errno on failure.
- *
* This helper function allows drivers to register several supply
* aliases in one operation. If any of the aliases cannot be
* registered any aliases that were registered will be removed
* before returning to the caller.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_bulk_register_supply_alias(struct device *dev,
const char *const *id,
@@ -2637,6 +2610,8 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
*
* GPIO is enabled in case of initial use. (enable_count is 0)
* GPIO is disabled when it is not shared any more. (enable_count <= 1)
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable)
{
@@ -2707,10 +2682,8 @@ static void _regulator_delay_helper(unsigned int delay)
}
/**
- * _regulator_check_status_enabled
- *
- * A helper function to check if the regulator status can be interpreted
- * as 'regulator is enabled'.
+ * _regulator_check_status_enabled - check if regulator status can be
+ * interpreted as "regulator is enabled"
* @rdev: the regulator device to check
*
* Return:
@@ -2839,7 +2812,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
* responsible for keeping track of the refcount for a given regulator consumer
* and applying / unapplying these things.
*
- * Returns 0 upon no error; -error upon error.
+ * Return: 0 on success or negative error number on failure.
*/
static int _regulator_handle_consumer_enable(struct regulator *regulator)
{
@@ -2865,7 +2838,7 @@ static int _regulator_handle_consumer_enable(struct regulator *regulator)
*
* The opposite of _regulator_handle_consumer_enable().
*
- * Returns 0 upon no error; -error upon error.
+ * Return: 0 on success or a negative error number on failure.
*/
static int _regulator_handle_consumer_disable(struct regulator *regulator)
{
@@ -2961,6 +2934,8 @@ err_disable_supply:
*
* NOTE: the output value can be set by other drivers, boot loader or may be
* hardwired in the regulator.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_enable(struct regulator *regulator)
{
@@ -3071,6 +3046,8 @@ static int _regulator_disable(struct regulator *regulator)
* NOTE: this will only disable the regulator output if no other consumer
* devices have it enabled, the regulator device supports disabling and
* machine constraints permit this operation.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_disable(struct regulator *regulator)
{
@@ -3120,6 +3097,8 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
* NOTE: this *will* disable the regulator output even if other consumer
* devices have it enabled. This should be used for situations when device
* damage will likely occur if the regulator is not disabled (e.g. over temp).
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_force_disable(struct regulator *regulator)
{
@@ -3202,6 +3181,8 @@ static void regulator_disable_work(struct work_struct *work)
* NOTE: this will only disable the regulator output if no other consumer
* devices have it enabled, the regulator device supports disabling and
* machine constraints permit this operation.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_disable_deferred(struct regulator *regulator, int ms)
{
@@ -3273,13 +3254,13 @@ static int _regulator_list_voltage(struct regulator_dev *rdev,
* regulator_is_enabled - is the regulator output enabled
* @regulator: regulator source
*
- * Returns positive if the regulator driver backing the source/client
- * has requested that the device be enabled, zero if it hasn't, else a
- * negative errno code.
- *
* Note that the device backing this regulator handle can have multiple
* users, so it might be enabled even if regulator_enable() was never
* called for this particular source.
+ *
+ * Return: Positive if the regulator driver backing the source/client
+ * has requested that the device be enabled, zero if it hasn't,
+ * else a negative error number.
*/
int regulator_is_enabled(struct regulator *regulator)
{
@@ -3300,9 +3281,10 @@ EXPORT_SYMBOL_GPL(regulator_is_enabled);
* regulator_count_voltages - count regulator_list_voltage() selectors
* @regulator: regulator source
*
- * Returns number of selectors, or negative errno. Selectors are
- * numbered starting at zero, and typically correspond to bitfields
- * in hardware registers.
+ * Return: Number of selectors for @regulator, or negative error number.
+ *
+ * Selectors are numbered starting at zero, and typically correspond to
+ * bitfields in hardware registers.
*/
int regulator_count_voltages(struct regulator *regulator)
{
@@ -3324,9 +3306,9 @@ EXPORT_SYMBOL_GPL(regulator_count_voltages);
* @selector: identify voltage to list
* Context: can sleep
*
- * Returns a voltage that can be passed to @regulator_set_voltage(),
- * zero if this selector code can't be used on this system, or a
- * negative errno.
+ * Return: Voltage for @selector that can be passed to regulator_set_voltage(),
+ * 0 if @selector can't be used on this system, or a negative error
+ * number on failure.
*/
int regulator_list_voltage(struct regulator *regulator, unsigned selector)
{
@@ -3338,8 +3320,8 @@ EXPORT_SYMBOL_GPL(regulator_list_voltage);
* regulator_get_regmap - get the regulator's register map
* @regulator: regulator source
*
- * Returns the register map for the given regulator, or an ERR_PTR value
- * if the regulator doesn't use regmap.
+ * Return: Pointer to the &struct regmap for @regulator, or ERR_PTR()
+ * encoded -%EOPNOTSUPP if @regulator doesn't use regmap.
*/
struct regmap *regulator_get_regmap(struct regulator *regulator)
{
@@ -3360,8 +3342,11 @@ EXPORT_SYMBOL_GPL(regulator_get_regmap);
* hardware or firmware that can make I2C requests behind the kernel's back,
* for example.
*
+ * Return: 0 on success, or -%EOPNOTSUPP if the regulator does not support
+ * voltage selectors.
+ *
* On success, the output parameters @vsel_reg and @vsel_mask are filled in
- * and 0 is returned, otherwise a negative errno is returned.
+ * and 0 is returned, otherwise a negative error number is returned.
*/
int regulator_get_hardware_vsel_register(struct regulator *regulator,
unsigned *vsel_reg,
@@ -3389,7 +3374,9 @@ EXPORT_SYMBOL_GPL(regulator_get_hardware_vsel_register);
* directly written to the regulator registers. The address of the voltage
* register can be determined by calling @regulator_get_hardware_vsel_register.
*
- * On error a negative errno is returned.
+ * Return: 0 on success, -%EINVAL if the selector is outside the supported
+ * range, or -%EOPNOTSUPP if the regulator does not support voltage
+ * selectors.
*/
int regulator_list_hardware_vsel(struct regulator *regulator,
unsigned selector)
@@ -3416,7 +3403,7 @@ EXPORT_SYMBOL_GPL(regulator_list_hardware_vsel);
* Request that the regulator be enabled/disabled with the regulator output at
* the predefined voltage or current value.
*
- * On success 0 is returned, otherwise a negative errno is returned.
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_hardware_enable(struct regulator *regulator, bool enable)
{
@@ -3440,8 +3427,8 @@ EXPORT_SYMBOL_GPL(regulator_hardware_enable);
* regulator_get_linear_step - return the voltage step size between VSEL values
* @regulator: regulator source
*
- * Returns the voltage step size between VSEL values for linear
- * regulators, or return 0 if the regulator isn't a linear regulator.
+ * Return: The voltage step size between VSEL values for linear regulators,
+ * or 0 if the regulator isn't a linear regulator.
*/
unsigned int regulator_get_linear_step(struct regulator *regulator)
{
@@ -3458,7 +3445,9 @@ EXPORT_SYMBOL_GPL(regulator_get_linear_step);
* @min_uV: Minimum required voltage in uV.
* @max_uV: Maximum required voltage in uV.
*
- * Returns a boolean.
+ * Return: 1 if the voltage range is supported, 0 if not, or a negative error
+ * number if @regulator's voltage can't be changed and voltage readback
+ * failed.
*/
int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV)
@@ -4210,6 +4199,8 @@ static int regulator_balance_voltage(struct regulator_dev *rdev,
* request voltage that meets the system constraints will be used.
* Regulator system constraints must be set for this regulator before
* calling this function otherwise this call will fail.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
{
@@ -4320,6 +4311,8 @@ EXPORT_SYMBOL_GPL(regulator_set_suspend_voltage);
* Provided with the starting and ending voltage, this function attempts to
* calculate the time in microseconds required to rise or fall to this new
* voltage.
+ *
+ * Return: ramp time in microseconds, or a negative error number if calculation failed.
*/
int regulator_set_voltage_time(struct regulator *regulator,
int old_uV, int new_uV)
@@ -4377,6 +4370,8 @@ EXPORT_SYMBOL_GPL(regulator_set_voltage_time);
*
* Drivers providing ramp_delay in regulation_constraints can use this as their
* set_voltage_time_sel() operation.
+ *
+ * Return: ramp time in microseconds, or a negative error number if calculation failed.
*/
int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int old_selector,
@@ -4429,6 +4424,8 @@ out:
* Re-apply the last configured voltage. This is intended to be used
* where some external control source the consumer is cooperating with
* has caused the configured voltage to change.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_sync_voltage(struct regulator *regulator)
{
@@ -4527,7 +4524,7 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev);
* regulator_get_voltage - get regulator output voltage
* @regulator: regulator source
*
- * This returns the current regulator voltage in uV.
+ * Return: Current regulator voltage in uV, or a negative error number on failure.
*
* NOTE: If the regulator is disabled it will return the voltage value. This
* function should not be used to determine regulator state.
@@ -4560,6 +4557,8 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage);
*
* NOTE: Regulator system constraints must be set for this regulator before
* calling this function otherwise this call will fail.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_set_current_limit(struct regulator *regulator,
int min_uA, int max_uA)
@@ -4611,7 +4610,8 @@ static int _regulator_get_current_limit(struct regulator_dev *rdev)
* regulator_get_current_limit - get regulator output current
* @regulator: regulator source
*
- * This returns the current supplied by the specified current sink in uA.
+ * Return: Current supplied by the specified current sink in uA,
+ * or a negative error number on failure.
*
* NOTE: If the regulator is disabled it will return the current value. This
* function should not be used to determine regulator state.
@@ -4632,6 +4632,8 @@ EXPORT_SYMBOL_GPL(regulator_get_current_limit);
*
* NOTE: Regulator system constraints must be set for this regulator before
* calling this function otherwise this call will fail.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_set_mode(struct regulator *regulator, unsigned int mode)
{
@@ -4693,6 +4695,9 @@ static unsigned int _regulator_get_mode(struct regulator_dev *rdev)
* @regulator: regulator source
*
* Get the current regulator operating mode.
+ *
+ * Return: Current operating mode as %REGULATOR_MODE_* values,
+ * or a negative error number on failure.
*/
unsigned int regulator_get_mode(struct regulator *regulator)
{
@@ -4739,6 +4744,8 @@ static int _regulator_get_error_flags(struct regulator_dev *rdev,
* @flags: pointer to store error flags
*
* Get the current regulator error information.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_get_error_flags(struct regulator *regulator,
unsigned int *flags)
@@ -4779,7 +4786,7 @@ EXPORT_SYMBOL_GPL(regulator_get_error_flags);
* If a regulator is an always-on regulator then an individual consumer's
* load will still be removed if that consumer is fully disabled.
*
- * On error a negative errno is returned.
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_set_load(struct regulator *regulator, int uA_load)
{
@@ -4811,6 +4818,9 @@ EXPORT_SYMBOL_GPL(regulator_set_load);
* for the regulator also enable bypass mode and the machine
* constraints allow this. Bypass mode means that the regulator is
* simply passing the input directly to the output with no regulation.
+ *
+ * Return: 0 on success or if changing bypass is not possible, or
+ * a negative error number on failure.
*/
int regulator_allow_bypass(struct regulator *regulator, bool enable)
{
@@ -4868,6 +4878,8 @@ EXPORT_SYMBOL_GPL(regulator_allow_bypass);
* @nb: notifier block
*
* Register notifier block to receive regulator events.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_register_notifier(struct regulator *regulator,
struct notifier_block *nb)
@@ -4883,6 +4895,8 @@ EXPORT_SYMBOL_GPL(regulator_register_notifier);
* @nb: notifier block
*
* Unregister regulator event notifier block.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_unregister_notifier(struct regulator *regulator,
struct notifier_block *nb)
@@ -4964,12 +4978,12 @@ err:
* @num_consumers: Number of consumers to register
* @consumers: Configuration of consumers; clients are stored here.
*
- * @return 0 on success, an errno on failure.
- *
* This helper function allows drivers to get several regulator
* consumers in one operation. If any of the regulators cannot be
* acquired then any regulators that were allocated will be freed
* before returning to the caller.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers)
@@ -4990,12 +5004,13 @@ static void regulator_bulk_enable_async(void *data, async_cookie_t cookie)
*
* @num_consumers: Number of consumers
* @consumers: Consumer data; clients are stored here.
- * @return 0 on success, an errno on failure
*
* This convenience API allows consumers to enable multiple regulator
* clients in a single API call. If any consumers cannot be enabled
* then any others that were enabled will be disabled again prior to
* return.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers)
@@ -5039,12 +5054,13 @@ EXPORT_SYMBOL_GPL(regulator_bulk_enable);
*
* @num_consumers: Number of consumers
* @consumers: Consumer data; clients are stored here.
- * @return 0 on success, an errno on failure
*
* This convenience API allows consumers to disable multiple regulator
* clients in a single API call. If any consumers cannot be disabled
* then any others that were disabled will be enabled again prior to
* return.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers)
@@ -5078,7 +5094,6 @@ EXPORT_SYMBOL_GPL(regulator_bulk_disable);
*
* @num_consumers: Number of consumers
* @consumers: Consumer data; clients are stored here.
- * @return 0 on success, an errno on failure
*
* This convenience API allows consumers to forcibly disable multiple regulator
* clients in a single API call.
@@ -5086,6 +5101,8 @@ EXPORT_SYMBOL_GPL(regulator_bulk_disable);
* likely occur if the regulators are not disabled (e.g. over temp).
* Although regulator_force_disable function call for some consumers can
* return error numbers, the function is called for all consumers.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
int regulator_bulk_force_disable(int num_consumers,
struct regulator_bulk_data *consumers)
@@ -5170,6 +5187,8 @@ static void regulator_handle_critical(struct regulator_dev *rdev,
*
* Called by regulator drivers to notify clients a regulator event has
* occurred.
+ *
+ * Return: %NOTIFY_DONE.
*/
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
@@ -5188,6 +5207,8 @@ EXPORT_SYMBOL_GPL(regulator_notifier_call_chain);
* @mode: Mode to convert
*
* Convert a regulator mode into a status.
+ *
+ * Return: %REGULATOR_STATUS_* value corresponding to given mode.
*/
int regulator_mode_to_status(unsigned int mode)
{
@@ -5582,8 +5603,9 @@ static struct regulator_coupler generic_regulator_coupler = {
* @cfg: runtime configuration for regulator
*
* Called by regulator drivers to register a regulator.
- * Returns a valid pointer to struct regulator_dev on success
- * or an ERR_PTR() on error.
+ *
+ * Return: Pointer to a valid &struct regulator_dev on success or
+ * an ERR_PTR() encoded negative error number on failure.
*/
struct regulator_dev *
regulator_register(struct device *dev,
@@ -5877,6 +5899,8 @@ EXPORT_SYMBOL_GPL(regulator_unregister);
* @dev: ``&struct device`` pointer that is passed to _regulator_suspend()
*
* Configure each regulator with it's suspend operating parameters for state.
+ *
+ * Return: 0 on success or a negative error number on failure.
*/
static int regulator_suspend(struct device *dev)
{
@@ -5966,6 +5990,8 @@ EXPORT_SYMBOL_GPL(regulator_has_full_constraints);
*
* Get rdev regulator driver private data. This call can be used in the
* regulator driver context.
+ *
+ * Return: Pointer to regulator driver private data.
*/
void *rdev_get_drvdata(struct regulator_dev *rdev)
{
@@ -5979,6 +6005,8 @@ EXPORT_SYMBOL_GPL(rdev_get_drvdata);
*
* Get regulator driver private data. This call can be used in the consumer
* driver context when non API regulator specific functions need to be called.
+ *
+ * Return: Pointer to regulator driver private data.
*/
void *regulator_get_drvdata(struct regulator *regulator)
{
@@ -6000,6 +6028,8 @@ EXPORT_SYMBOL_GPL(regulator_set_drvdata);
/**
* rdev_get_id - get regulator ID
* @rdev: regulator
+ *
+ * Return: Regulator ID for @rdev.
*/
int rdev_get_id(struct regulator_dev *rdev)
{
diff --git a/drivers/regulator/da903x-regulator.c b/drivers/regulator/da903x-regulator.c
index f79337079a45..2f85897183b3 100644
--- a/drivers/regulator/da903x-regulator.c
+++ b/drivers/regulator/da903x-regulator.c
@@ -61,7 +61,7 @@
#define DA9034_MDTV2 (0x33)
#define DA9034_MVRC (0x34)
-/* DA9035 Registers. DA9034 Registers are comptabile to DA9035. */
+/* DA9035 Registers. DA9034 Registers are compatible to DA9035. */
#define DA9035_OVER3 (0x12)
#define DA9035_VCC2 (0x1f)
#define DA9035_3DTV1 (0x2c)
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index ab6f5d61b173..fbebe538a648 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -67,11 +67,11 @@ struct da9052_regulator_info {
struct da9052_regulator {
struct da9052 *da9052;
- struct da9052_regulator_info *info;
+ const struct da9052_regulator_info *info;
struct regulator_dev *rdev;
};
-static int verify_range(struct da9052_regulator_info *info,
+static int verify_range(const struct da9052_regulator_info *info,
int min_uV, int max_uV)
{
if (min_uV > info->max_uV || max_uV < info->min_uV)
@@ -151,7 +151,7 @@ static int da9052_list_voltage(struct regulator_dev *rdev,
unsigned int selector)
{
struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9052_regulator_info *info = regulator->info;
+ const struct da9052_regulator_info *info = regulator->info;
int id = rdev_get_id(rdev);
int volt_uV;
@@ -175,7 +175,7 @@ static int da9052_map_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9052_regulator_info *info = regulator->info;
+ const struct da9052_regulator_info *info = regulator->info;
int id = rdev_get_id(rdev);
int ret, sel;
@@ -206,7 +206,7 @@ static int da9052_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned int selector)
{
struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9052_regulator_info *info = regulator->info;
+ const struct da9052_regulator_info *info = regulator->info;
int id = rdev_get_id(rdev);
int ret;
@@ -237,7 +237,7 @@ static int da9052_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int new_sel)
{
struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9052_regulator_info *info = regulator->info;
+ const struct da9052_regulator_info *info = regulator->info;
int id = rdev_get_id(rdev);
int ret = 0;
@@ -327,7 +327,7 @@ static const struct regulator_ops da9052_ldo_ops = {
.activate_bit = (abits),\
}
-static struct da9052_regulator_info da9052_regulator_info[] = {
+static const struct da9052_regulator_info da9052_regulator_info[] = {
DA9052_DCDC(BUCK1, buck1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
DA9052_DCDC(BUCK2, buck2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
DA9052_DCDC(BUCK3, buck3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO),
@@ -344,7 +344,7 @@ static struct da9052_regulator_info da9052_regulator_info[] = {
DA9052_LDO(LDO10, ldo10, 50, 1200, 3600, 6, 6, 0),
};
-static struct da9052_regulator_info da9053_regulator_info[] = {
+static const struct da9052_regulator_info da9053_regulator_info[] = {
DA9052_DCDC(BUCK1, buck1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
DA9052_DCDC(BUCK2, buck2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
DA9052_DCDC(BUCK3, buck3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO),
@@ -361,10 +361,10 @@ static struct da9052_regulator_info da9053_regulator_info[] = {
DA9052_LDO(LDO10, ldo10, 50, 1200, 3600, 6, 6, 0),
};
-static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
- int id)
+static inline const struct da9052_regulator_info *find_regulator_info(u8 chip_id,
+ int id)
{
- struct da9052_regulator_info *info;
+ const struct da9052_regulator_info *info;
int i;
switch (chip_id) {
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 352547c375bd..a0d3414aa79e 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -73,7 +73,7 @@ struct da9055_regulator_info {
struct da9055_regulator {
struct da9055 *da9055;
- struct da9055_regulator_info *info;
+ const struct da9055_regulator_info *info;
struct regulator_dev *rdev;
enum gpio_select reg_rselect;
};
@@ -81,7 +81,7 @@ struct da9055_regulator {
static unsigned int da9055_buck_get_mode(struct regulator_dev *rdev)
{
struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9055_regulator_info *info = regulator->info;
+ const struct da9055_regulator_info *info = regulator->info;
int ret, mode = 0;
ret = da9055_reg_read(regulator->da9055, info->mode.reg);
@@ -107,7 +107,7 @@ static int da9055_buck_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9055_regulator_info *info = regulator->info;
+ const struct da9055_regulator_info *info = regulator->info;
int val = 0;
switch (mode) {
@@ -129,7 +129,7 @@ static int da9055_buck_set_mode(struct regulator_dev *rdev,
static unsigned int da9055_ldo_get_mode(struct regulator_dev *rdev)
{
struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9055_regulator_info *info = regulator->info;
+ const struct da9055_regulator_info *info = regulator->info;
int ret;
ret = da9055_reg_read(regulator->da9055, info->volt.reg_b);
@@ -145,7 +145,7 @@ static unsigned int da9055_ldo_get_mode(struct regulator_dev *rdev)
static int da9055_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9055_regulator_info *info = regulator->info;
+ const struct da9055_regulator_info *info = regulator->info;
struct da9055_volt_reg volt = info->volt;
int val = 0;
@@ -167,7 +167,7 @@ static int da9055_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
static int da9055_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9055_regulator_info *info = regulator->info;
+ const struct da9055_regulator_info *info = regulator->info;
struct da9055_volt_reg volt = info->volt;
int ret, sel;
@@ -199,7 +199,7 @@ static int da9055_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned int selector)
{
struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9055_regulator_info *info = regulator->info;
+ const struct da9055_regulator_info *info = regulator->info;
int ret;
/*
@@ -242,7 +242,7 @@ static int da9055_regulator_set_suspend_voltage(struct regulator_dev *rdev,
int uV)
{
struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9055_regulator_info *info = regulator->info;
+ const struct da9055_regulator_info *info = regulator->info;
int ret;
/* Select register set B for suspend voltage ramping. */
@@ -264,7 +264,7 @@ static int da9055_regulator_set_suspend_voltage(struct regulator_dev *rdev,
static int da9055_suspend_enable(struct regulator_dev *rdev)
{
struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9055_regulator_info *info = regulator->info;
+ const struct da9055_regulator_info *info = regulator->info;
/* Select register set B for voltage ramping. */
if (regulator->reg_rselect == NO_GPIO)
@@ -277,7 +277,7 @@ static int da9055_suspend_enable(struct regulator_dev *rdev)
static int da9055_suspend_disable(struct regulator_dev *rdev)
{
struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9055_regulator_info *info = regulator->info;
+ const struct da9055_regulator_info *info = regulator->info;
/* Diselect register set B. */
if (regulator->reg_rselect == NO_GPIO)
@@ -396,7 +396,7 @@ static const struct regulator_ops da9055_ldo_ops = {
},\
}
-static struct da9055_regulator_info da9055_regulator_info[] = {
+static const struct da9055_regulator_info da9055_regulator_info[] = {
DA9055_BUCK(BUCK1, 25, 725, 2075, 6, 9, 0xc, 2),
DA9055_BUCK(BUCK2, 25, 925, 2500, 6, 0, 3, 0),
DA9055_LDO(LDO1, 50, 900, 3300, 6, 2),
@@ -417,7 +417,7 @@ static int da9055_gpio_init(struct device *dev,
struct regulator_config *config,
struct da9055_pdata *pdata, int id)
{
- struct da9055_regulator_info *info = regulator->info;
+ const struct da9055_regulator_info *info = regulator->info;
struct gpio_desc *ren;
struct gpio_desc *ena;
struct gpio_desc *rsel;
@@ -491,9 +491,9 @@ static irqreturn_t da9055_ldo5_6_oc_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static inline struct da9055_regulator_info *find_regulator_info(int id)
+static inline const struct da9055_regulator_info *find_regulator_info(int id)
{
- struct da9055_regulator_info *info;
+ const struct da9055_regulator_info *info;
int i;
for (i = 0; i < ARRAY_SIZE(da9055_regulator_info); i++) {
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 82bf321ae06f..9d369cc45d41 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -133,7 +133,7 @@ struct da9063_regulator_info {
.suspend_vsel_reg = DA9063_REG_V##regl_name##_B, \
.mode = BFIELD(DA9063_REG_##regl_name##_CFG, DA9063_BUCK_MODE_MASK)
-/* Defines asignment of regulators info table to chip model */
+/* Defines assignment of regulators info table to chip model */
struct da9063_dev_model {
const struct da9063_regulator_info *regulator_info;
unsigned int n_regulators;
@@ -715,7 +715,7 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
};
/* Link chip model with regulators info table */
-static struct da9063_dev_model regulators_models[] = {
+static const struct da9063_dev_model regulators_models[] = {
{
.regulator_info = da9063_regulator_info,
.n_regulators = ARRAY_SIZE(da9063_regulator_info),
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index d97162f73793..17527a3f53b4 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -53,7 +53,7 @@ struct da9121_range {
int reg_max;
};
-static struct da9121_range da9121_10A_2phase_current = {
+static const struct da9121_range da9121_10A_2phase_current = {
.val_min = 7000000,
.val_max = 20000000,
.val_stp = 1000000,
@@ -61,7 +61,7 @@ static struct da9121_range da9121_10A_2phase_current = {
.reg_max = 14,
};
-static struct da9121_range da9121_6A_2phase_current = {
+static const struct da9121_range da9121_6A_2phase_current = {
.val_min = 7000000,
.val_max = 12000000,
.val_stp = 1000000,
@@ -69,7 +69,7 @@ static struct da9121_range da9121_6A_2phase_current = {
.reg_max = 6,
};
-static struct da9121_range da9121_5A_1phase_current = {
+static const struct da9121_range da9121_5A_1phase_current = {
.val_min = 3500000,
.val_max = 10000000,
.val_stp = 500000,
@@ -77,7 +77,7 @@ static struct da9121_range da9121_5A_1phase_current = {
.reg_max = 14,
};
-static struct da9121_range da9121_3A_1phase_current = {
+static const struct da9121_range da9121_3A_1phase_current = {
.val_min = 3500000,
.val_max = 6000000,
.val_stp = 500000,
@@ -85,7 +85,7 @@ static struct da9121_range da9121_3A_1phase_current = {
.reg_max = 6,
};
-static struct da9121_range da914x_40A_4phase_current = {
+static const struct da9121_range da914x_40A_4phase_current = {
.val_min = 26000000,
.val_max = 78000000,
.val_stp = 4000000,
@@ -93,7 +93,7 @@ static struct da9121_range da914x_40A_4phase_current = {
.reg_max = 14,
};
-static struct da9121_range da914x_20A_2phase_current = {
+static const struct da9121_range da914x_20A_2phase_current = {
.val_min = 13000000,
.val_max = 39000000,
.val_stp = 2000000,
@@ -104,7 +104,7 @@ static struct da9121_range da914x_20A_2phase_current = {
struct da9121_variant_info {
int num_bucks;
int num_phases;
- struct da9121_range *current_range;
+ const struct da9121_range *current_range;
};
static const struct da9121_variant_info variant_parameters[] = {
@@ -188,7 +188,7 @@ static int da9121_get_current_limit(struct regulator_dev *rdev)
{
struct da9121 *chip = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
- struct da9121_range *range =
+ const struct da9121_range *range =
variant_parameters[chip->variant_id].current_range;
unsigned int val = 0;
int ret = 0;
@@ -219,7 +219,7 @@ static int da9121_ceiling_selector(struct regulator_dev *rdev,
unsigned int *selector)
{
struct da9121 *chip = rdev_get_drvdata(rdev);
- struct da9121_range *range =
+ const struct da9121_range *range =
variant_parameters[chip->variant_id].current_range;
unsigned int level;
unsigned int i = 0;
@@ -259,7 +259,7 @@ static int da9121_set_current_limit(struct regulator_dev *rdev,
{
struct da9121 *chip = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
- struct da9121_range *range =
+ const struct da9121_range *range =
variant_parameters[chip->variant_id].current_range;
unsigned int sel = 0;
int ret = 0;
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index d8b39ea3de0e..d4f14d7ea8cf 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -264,7 +264,7 @@ static const struct regulator_ops da9211_buck_ops = {
.of_map_mode = da9211_map_buck_mode,\
}
-static struct regulator_desc da9211_regulators[] = {
+static const struct regulator_desc da9211_regulators[] = {
DA9211_BUCK(BUCKA),
DA9211_BUCK(BUCKB),
};
diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
index 7111c46e9de1..1b893cdd1aad 100644
--- a/drivers/regulator/devres.c
+++ b/drivers/regulator/devres.c
@@ -163,7 +163,7 @@ EXPORT_SYMBOL_GPL(devm_regulator_get_optional);
* In cases where the supply is not strictly required, callers can check for
* -ENODEV error and handle it accordingly.
*
- * Returns: voltage in microvolts on success, or an error code on failure.
+ * Returns: voltage in microvolts on success, or an negative error number on failure.
*/
int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id)
{
@@ -174,8 +174,8 @@ int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id)
* Since we need a real voltage, we use devm_regulator_get_optional()
* rather than getting a dummy regulator with devm_regulator_get() and
* then letting regulator_get_voltage() fail with -EINVAL. This way, the
- * caller can handle the -ENODEV error code if needed instead of the
- * ambiguous -EINVAL.
+ * caller can handle the -ENODEV negative error number if needed instead
+ * of the ambiguous -EINVAL.
*/
r = devm_regulator_get_optional(dev, id);
if (IS_ERR(r))
@@ -276,7 +276,7 @@ static int _devm_regulator_bulk_get(struct device *dev, int num_consumers,
* @num_consumers: number of consumers to register
* @consumers: configuration of consumers; clients are stored here.
*
- * @return 0 on success, an errno on failure.
+ * @return 0 on success, a negative error number on failure.
*
* This helper function allows drivers to get several regulator
* consumers in one operation with management, the regulators will
@@ -299,7 +299,7 @@ EXPORT_SYMBOL_GPL(devm_regulator_bulk_get);
* @num_consumers: number of consumers to register
* @consumers: configuration of consumers; clients are stored here.
*
- * @return 0 on success, an errno on failure.
+ * @return 0 on success, a negative error number on failure.
*
* This helper function allows drivers to exclusively get several
* regulator consumers in one operation with management, the regulators
@@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(devm_regulator_bulk_get_exclusive);
* This is a convenience function to allow bulk regulator configuration
* to be stored "static const" in files.
*
- * Return: 0 on success, an errno on failure.
+ * Return: 0 on success, a negative error number on failure.
*/
int devm_regulator_bulk_get_const(struct device *dev, int num_consumers,
const struct regulator_bulk_data *in_consumers,
@@ -393,7 +393,7 @@ static void devm_regulator_bulk_disable(void *res)
* @num_consumers: number of consumers to register
* @id: list of supply names or regulator IDs
*
- * @return 0 on success, an errno on failure.
+ * @return 0 on success, a negative error number on failure.
*
* This helper function allows drivers to get several regulator
* consumers in one operation with management, the regulators will
@@ -574,7 +574,7 @@ static void devm_regulator_unregister_supply_alias(struct device *dev,
* lookup the supply
* @num_id: number of aliases to register
*
- * @return 0 on success, an errno on failure.
+ * @return 0 on success, a negative error number on failure.
*
* This helper function allows drivers to register several supply
* aliases in one operation, the aliases will be automatically
@@ -726,7 +726,7 @@ static void regulator_irq_helper_drop(void *res)
* IRQ.
* @rdev_amount: Amount of regulators associated with this IRQ.
*
- * Return: handle to irq_helper or an ERR_PTR() encoded error code.
+ * Return: handle to irq_helper or an ERR_PTR() encoded negative error number.
*/
void *devm_regulator_irq_helper(struct device *dev,
const struct regulator_irq_desc *d, int irq,
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 17c9bf204385..bd9447dac596 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -46,7 +46,7 @@
/* VSEL bit definitions */
#define VSEL_BUCK_EN BIT(7)
#define VSEL_MODE BIT(6)
-/* Chip ID and Verison */
+/* Chip ID and Version */
#define DIE_ID 0x0F /* ID1 */
#define DIE_REV 0x0F /* ID2 */
/* Control bit definitions */
diff --git a/drivers/regulator/fixed-helper.c b/drivers/regulator/fixed-helper.c
index 2d5a42b2b3d8..b6cb0aaac3b1 100644
--- a/drivers/regulator/fixed-helper.c
+++ b/drivers/regulator/fixed-helper.c
@@ -26,6 +26,8 @@ static void regulator_fixed_release(struct device *dev)
* @supplies: consumers for this regulator
* @num_supplies: number of consumers
* @uv: voltage in microvolts
+ *
+ * Return: Pointer to registered platform device, or %NULL if memory allocation fails.
*/
struct platform_device *regulator_register_always_on(int id, const char *name,
struct regulator_consumer_supply *supplies, int num_supplies, int uv)
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index cb93e5cdcfa9..1cb647ed70c6 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -129,7 +129,7 @@ static irqreturn_t reg_fixed_under_voltage_irq_handler(int irq, void *data)
* If it's an optional IRQ and not found, it returns 0.
* Otherwise, it attempts to request the threaded IRQ.
*
- * Return: 0 on success, or error code on failure.
+ * Return: 0 on success, or a negative error number on failure.
*/
static int reg_fixed_get_irqs(struct device *dev,
struct fixed_voltage_data *priv)
@@ -158,8 +158,10 @@ static int reg_fixed_get_irqs(struct device *dev,
* @desc: regulator description
*
* Populates fixed_voltage_config structure by extracting data from device
- * tree node, returns a pointer to the populated structure of NULL if memory
- * alloc fails.
+ * tree node.
+ *
+ * Return: Pointer to a populated &struct fixed_voltage_config or %NULL if
+ * memory allocation fails.
*/
static struct fixed_voltage_config *
of_get_fixed_voltage_config(struct device *dev,
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index 6e1ace660b8c..0def82eb8b46 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -125,7 +125,7 @@ static int regulator_range_selector_to_index(struct regulator_dev *rdev,
*
* Regulators that use regmap for their register I/O and use pickable
* ranges can set the vsel_reg, vsel_mask, vsel_range_reg and vsel_range_mask
- * fields in their descriptor and then use this as their get_voltage_vsel
+ * fields in their descriptor and then use this as their get_voltage_sel
* operation, saving some code.
*/
int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev)
@@ -195,7 +195,7 @@ static int write_separate_vsel_and_range(struct regulator_dev *rdev,
*
* Regulators that use regmap for their register I/O and use pickable
* ranges can set the vsel_reg, vsel_mask, vsel_range_reg and vsel_range_mask
- * fields in their descriptor and then use this as their set_voltage_vsel
+ * fields in their descriptor and then use this as their set_voltage_sel
* operation, saving some code.
*/
int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev,
@@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_pickable_regmap);
*
* Regulators that use regmap for their register I/O can set the
* vsel_reg and vsel_mask fields in their descriptor and then use this
- * as their get_voltage_vsel operation, saving some code.
+ * as their get_voltage_sel operation, saving some code.
*/
int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev)
{
@@ -276,7 +276,7 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_regmap);
*
* Regulators that use regmap for their register I/O can set the
* vsel_reg and vsel_mask fields in their descriptor and then use this
- * as their set_voltage_vsel operation, saving some code.
+ * as their set_voltage_sel operation, saving some code.
*/
int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel)
{
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
index 82e9e364d4d4..69d24728d6a4 100644
--- a/drivers/regulator/hi6421-regulator.c
+++ b/drivers/regulator/hi6421-regulator.c
@@ -303,7 +303,7 @@ static const struct regulator_ops hi6421_buck345_ops;
}
/* HI6421 regulator information */
-static struct hi6421_regulator_info
+static const struct hi6421_regulator_info
hi6421_regulator_info[HI6421_NUM_REGULATORS] = {
HI6421_LDO(LDO0, hi6421_vout0, ldo_0_voltages, 0x20, 0x07, 0x20, 0x10,
10000, 0x20, 8000),
@@ -384,7 +384,7 @@ static int hi6421_regulator_enable(struct regulator_dev *rdev)
static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
{
- struct hi6421_regulator_info *info;
+ const struct hi6421_regulator_info *info;
unsigned int reg_val;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
@@ -397,7 +397,7 @@ static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
{
- struct hi6421_regulator_info *info;
+ const struct hi6421_regulator_info *info;
unsigned int reg_val;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
@@ -411,7 +411,7 @@ static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
- struct hi6421_regulator_info *info;
+ const struct hi6421_regulator_info *info;
unsigned int new_mode;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
@@ -436,7 +436,7 @@ static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
- struct hi6421_regulator_info *info;
+ const struct hi6421_regulator_info *info;
unsigned int new_mode;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
@@ -462,7 +462,7 @@ static unsigned int
hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev,
int input_uV, int output_uV, int load_uA)
{
- struct hi6421_regulator_info *info;
+ const struct hi6421_regulator_info *info;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
@@ -539,7 +539,7 @@ static int hi6421_regulator_probe(struct platform_device *pdev)
{
struct hi6421_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct hi6421_regulator_pdata *pdata;
- struct hi6421_regulator_info *info;
+ const struct hi6421_regulator_info *info;
struct regulator_config config = { };
struct regulator_dev *rdev;
int i;
diff --git a/drivers/regulator/hi6421v530-regulator.c b/drivers/regulator/hi6421v530-regulator.c
index 23924ff0c7b2..b3ebd1624814 100644
--- a/drivers/regulator/hi6421v530-regulator.c
+++ b/drivers/regulator/hi6421v530-regulator.c
@@ -21,12 +21,10 @@
* struct hi6421v530_regulator_info - hi6421v530 regulator information
* @desc: regulator description
* @mode_mask: ECO mode bitmask of LDOs; for BUCKs, this masks sleep
- * @eco_microamp: eco mode load upper limit (in uA), valid for LDOs only
*/
struct hi6421v530_regulator_info {
struct regulator_desc rdesc;
u8 mode_mask;
- u32 eco_microamp;
};
/* HI6421v530 regulators */
@@ -68,10 +66,9 @@ static const struct regulator_ops hi6421v530_ldo_ops;
* emask - enable mask
* odelay - off/on delay time in uS
* ecomask - eco mode mask
- * ecoamp - eco mode load uppler limit in uA
*/
#define HI6421V530_LDO(_ID, v_table, vreg, vmask, ereg, emask, \
- odelay, ecomask, ecoamp) { \
+ odelay, ecomask) { \
.rdesc = { \
.name = #_ID, \
.of_match = of_match_ptr(#_ID), \
@@ -90,31 +87,30 @@ static const struct regulator_ops hi6421v530_ldo_ops;
.off_on_delay = odelay, \
}, \
.mode_mask = ecomask, \
- .eco_microamp = ecoamp, \
}
/* HI6421V530 regulator information */
-static struct hi6421v530_regulator_info hi6421v530_regulator_info[] = {
+static const struct hi6421v530_regulator_info hi6421v530_regulator_info[] = {
HI6421V530_LDO(LDO3, ldo_3_voltages, 0x061, 0xf, 0x060, 0x2,
- 20000, 0x6, 8000),
+ 20000, 0x6),
HI6421V530_LDO(LDO9, ldo_9_11_voltages, 0x06b, 0x7, 0x06a, 0x2,
- 40000, 0x6, 8000),
+ 40000, 0x6),
HI6421V530_LDO(LDO11, ldo_9_11_voltages, 0x06f, 0x7, 0x06e, 0x2,
- 40000, 0x6, 8000),
+ 40000, 0x6),
HI6421V530_LDO(LDO15, ldo_15_16_voltages, 0x077, 0x7, 0x076, 0x2,
- 40000, 0x6, 8000),
+ 40000, 0x6),
HI6421V530_LDO(LDO16, ldo_15_16_voltages, 0x079, 0x7, 0x078, 0x2,
- 40000, 0x6, 8000),
+ 40000, 0x6),
};
static unsigned int hi6421v530_regulator_ldo_get_mode(
struct regulator_dev *rdev)
{
- struct hi6421v530_regulator_info *info;
+ const struct hi6421v530_regulator_info *info;
unsigned int reg_val;
- info = rdev_get_drvdata(rdev);
+ info = container_of(rdev->desc, struct hi6421v530_regulator_info, rdesc);
regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
if (reg_val & (info->mode_mask))
@@ -126,10 +122,10 @@ static unsigned int hi6421v530_regulator_ldo_get_mode(
static int hi6421v530_regulator_ldo_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
- struct hi6421v530_regulator_info *info;
+ const struct hi6421v530_regulator_info *info;
unsigned int new_mode;
- info = rdev_get_drvdata(rdev);
+ info = container_of(rdev->desc, struct hi6421v530_regulator_info, rdesc);
switch (mode) {
case REGULATOR_MODE_NORMAL:
new_mode = 0;
@@ -176,7 +172,6 @@ static int hi6421v530_regulator_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(hi6421v530_regulator_info); i++) {
config.dev = pdev->dev.parent;
config.regmap = pmic->regmap;
- config.driver_data = &hi6421v530_regulator_info[i];
rdev = devm_regulator_register(&pdev->dev,
&hi6421v530_regulator_info[i].rdesc,
diff --git a/drivers/regulator/hi6421v600-regulator.c b/drivers/regulator/hi6421v600-regulator.c
index 4e10daa1e689..e5f6fbfc9016 100644
--- a/drivers/regulator/hi6421v600-regulator.c
+++ b/drivers/regulator/hi6421v600-regulator.c
@@ -118,7 +118,7 @@ static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev)
static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
{
- struct hi6421_spmi_reg_info *sreg;
+ const struct hi6421_spmi_reg_info *sreg;
unsigned int reg_val;
sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc);
@@ -133,7 +133,7 @@ static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
- struct hi6421_spmi_reg_info *sreg;
+ const struct hi6421_spmi_reg_info *sreg;
unsigned int val;
sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc);
@@ -160,7 +160,7 @@ hi6421_spmi_regulator_get_optimum_mode(struct regulator_dev *rdev,
int input_uV, int output_uV,
int load_uA)
{
- struct hi6421_spmi_reg_info *sreg;
+ const struct hi6421_spmi_reg_info *sreg;
sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc);
@@ -195,7 +195,7 @@ enum hi6421_spmi_regulator_id {
hi6421v600_ldo34,
};
-static struct hi6421_spmi_reg_info regulator_info[] = {
+static const struct hi6421_spmi_reg_info regulator_info[] = {
HI6421V600_LDO(ldo3, range_1v5_to_2v0,
0x16, 0x01, 0x51,
20000, 120,
@@ -235,7 +235,7 @@ static int hi6421_spmi_regulator_probe(struct platform_device *pdev)
struct device *pmic_dev = pdev->dev.parent;
struct regulator_config config = { };
struct hi6421_spmi_reg_priv *priv;
- struct hi6421_spmi_reg_info *info;
+ const struct hi6421_spmi_reg_info *info;
struct device *dev = &pdev->dev;
struct regmap *regmap;
struct regulator_dev *rdev;
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 77a502141089..5b43f802468d 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -66,7 +66,8 @@ static inline struct regulator_dev *dev_to_rdev(struct device *dev)
}
#ifdef CONFIG_OF
-struct regulator_dev *of_find_regulator_by_node(struct device_node *np);
+struct regulator_dev *of_regulator_dev_lookup(struct device *dev,
+ const char *supply);
struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
const struct regulator_desc *desc,
struct regulator_config *config,
@@ -80,10 +81,10 @@ int of_get_n_coupled(struct regulator_dev *rdev);
bool of_check_coupling_data(struct regulator_dev *rdev);
#else
-static inline struct regulator_dev *
-of_find_regulator_by_node(struct device_node *np)
+static inline struct regulator_dev *of_regulator_dev_lookup(struct device *dev,
+ const char *supply)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline struct regulator_init_data *
@@ -120,6 +121,10 @@ enum regulator_get_type {
MAX_GET_TYPE
};
+int _regulator_get_common_check(struct device *dev, const char *id,
+ enum regulator_get_type get_type);
+struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct device *dev,
+ const char *id, enum regulator_get_type get_type);
struct regulator *_regulator_get(struct device *dev, const char *id,
enum regulator_get_type get_type);
int _regulator_bulk_get(struct device *dev, int num_consumers,
diff --git a/drivers/regulator/irq_helpers.c b/drivers/regulator/irq_helpers.c
index 5ab1a0befe12..0aa188b2bbb2 100644
--- a/drivers/regulator/irq_helpers.c
+++ b/drivers/regulator/irq_helpers.c
@@ -333,7 +333,7 @@ static void init_rdev_errors(struct regulator_irq *h)
* IRQ.
* @rdev_amount: Amount of regulators associated with this IRQ.
*
- * Return: handle to irq_helper or an ERR_PTR() encoded error code.
+ * Return: handle to irq_helper or an ERR_PTR() encoded negative error number.
*/
void *regulator_irq_helper(struct device *dev,
const struct regulator_irq_desc *d, int irq,
@@ -404,16 +404,21 @@ EXPORT_SYMBOL_GPL(regulator_irq_helper_cancel);
/**
* regulator_irq_map_event_simple - regulator IRQ notification for trivial IRQs
*
- * @irq: Number of IRQ that occurred
- * @rid: Information about the event IRQ indicates
- * @dev_mask: mask indicating the regulator originating the IRQ
+ * @irq: Number of IRQ that occurred.
+ * @rid: Information about the event IRQ indicates.
+ * The function fills in the &regulator_err_state->notifs
+ * and &regulator_err_state->errors fields of
+ * &regulator_irq_data->states as output.
+ * @dev_mask: mask indicating the regulator originating the IRQ.
*
* Regulators whose IRQ has single, well defined purpose (always indicate
* exactly one event, and are relevant to exactly one regulator device) can
- * use this function as their map_event callbac for their regulator IRQ
- * notification helperk. Exactly one rdev and exactly one error (in
+ * use this function as their map_event callback for their regulator IRQ
+ * notification helper. Exactly one rdev and exactly one error (in
* "common_errs"-field) can be given at IRQ helper registration for
* regulator_irq_map_event_simple() to be viable.
+ *
+ * Return: 0.
*/
int regulator_irq_map_event_simple(int irq, struct regulator_irq_data *rid,
unsigned long *dev_mask)
diff --git a/drivers/regulator/max5970-regulator.c b/drivers/regulator/max5970-regulator.c
index 8bbcd983a74a..4a568b1b0107 100644
--- a/drivers/regulator/max5970-regulator.c
+++ b/drivers/regulator/max5970-regulator.c
@@ -70,7 +70,7 @@ static int max5970_read(struct device *dev, enum hwmon_sensor_types type,
* millivolts) and then divide it by the maximum value of the 10-bit ADC.
*/
*val = (*val * ddata->irng) >> 10;
- /* Convert the voltage meansurement across shunt resistor to current */
+ /* Convert the voltage measurement across shunt resistor to current */
*val = (*val * 1000) / ddata->shunt_micro_ohms;
return 0;
default:
diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c
index 94abfbb2bc1e..7368f54f046d 100644
--- a/drivers/regulator/max77650-regulator.c
+++ b/drivers/regulator/max77650-regulator.c
@@ -43,8 +43,6 @@ struct max77650_regulator_desc {
unsigned int regB;
};
-static struct max77650_regulator_desc max77651_SBB1_desc;
-
static const unsigned int max77651_sbb1_volt_range_sel[] = {
0x0, 0x1, 0x2, 0x3
};
@@ -66,11 +64,11 @@ static const unsigned int max77650_current_limit_table[] = {
static int max77650_regulator_is_enabled(struct regulator_dev *rdev)
{
- struct max77650_regulator_desc *rdesc;
+ const struct max77650_regulator_desc *rdesc;
struct regmap *map;
int val, rv, en;
- rdesc = rdev_get_drvdata(rdev);
+ rdesc = container_of(rdev->desc, struct max77650_regulator_desc, desc);
map = rdev_get_regmap(rdev);
rv = regmap_read(map, rdesc->regB, &val);
@@ -84,10 +82,10 @@ static int max77650_regulator_is_enabled(struct regulator_dev *rdev)
static int max77650_regulator_enable(struct regulator_dev *rdev)
{
- struct max77650_regulator_desc *rdesc;
+ const struct max77650_regulator_desc *rdesc;
struct regmap *map;
- rdesc = rdev_get_drvdata(rdev);
+ rdesc = container_of(rdev->desc, struct max77650_regulator_desc, desc);
map = rdev_get_regmap(rdev);
return regmap_update_bits(map, rdesc->regB,
@@ -97,10 +95,10 @@ static int max77650_regulator_enable(struct regulator_dev *rdev)
static int max77650_regulator_disable(struct regulator_dev *rdev)
{
- struct max77650_regulator_desc *rdesc;
+ const struct max77650_regulator_desc *rdesc;
struct regmap *map;
- rdesc = rdev_get_drvdata(rdev);
+ rdesc = container_of(rdev->desc, struct max77650_regulator_desc, desc);
map = rdev_get_regmap(rdev);
return regmap_update_bits(map, rdesc->regB,
@@ -145,7 +143,7 @@ static const struct regulator_ops max77651_SBB1_regulator_ops = {
.set_active_discharge = regulator_set_active_discharge_regmap,
};
-static struct max77650_regulator_desc max77650_LDO_desc = {
+static const struct max77650_regulator_desc max77650_LDO_desc = {
.desc = {
.name = "ldo",
.of_match = of_match_ptr("ldo"),
@@ -171,7 +169,7 @@ static struct max77650_regulator_desc max77650_LDO_desc = {
.regB = MAX77650_REG_CNFG_LDO_B,
};
-static struct max77650_regulator_desc max77650_SBB0_desc = {
+static const struct max77650_regulator_desc max77650_SBB0_desc = {
.desc = {
.name = "sbb0",
.of_match = of_match_ptr("sbb0"),
@@ -201,7 +199,7 @@ static struct max77650_regulator_desc max77650_SBB0_desc = {
.regB = MAX77650_REG_CNFG_SBB0_B,
};
-static struct max77650_regulator_desc max77650_SBB1_desc = {
+static const struct max77650_regulator_desc max77650_SBB1_desc = {
.desc = {
.name = "sbb1",
.of_match = of_match_ptr("sbb1"),
@@ -231,7 +229,7 @@ static struct max77650_regulator_desc max77650_SBB1_desc = {
.regB = MAX77650_REG_CNFG_SBB1_B,
};
-static struct max77650_regulator_desc max77651_SBB1_desc = {
+static const struct max77650_regulator_desc max77651_SBB1_desc = {
.desc = {
.name = "sbb1",
.of_match = of_match_ptr("sbb1"),
@@ -264,7 +262,7 @@ static struct max77650_regulator_desc max77651_SBB1_desc = {
.regB = MAX77650_REG_CNFG_SBB1_B,
};
-static struct max77650_regulator_desc max77650_SBB2_desc = {
+static const struct max77650_regulator_desc max77650_SBB2_desc = {
.desc = {
.name = "sbb2",
.of_match = of_match_ptr("sbb2"),
@@ -294,7 +292,7 @@ static struct max77650_regulator_desc max77650_SBB2_desc = {
.regB = MAX77650_REG_CNFG_SBB2_B,
};
-static struct max77650_regulator_desc max77651_SBB2_desc = {
+static const struct max77650_regulator_desc max77651_SBB2_desc = {
.desc = {
.name = "sbb2",
.of_match = of_match_ptr("sbb2"),
@@ -326,8 +324,8 @@ static struct max77650_regulator_desc max77651_SBB2_desc = {
static int max77650_regulator_probe(struct platform_device *pdev)
{
- struct max77650_regulator_desc **rdescs;
- struct max77650_regulator_desc *rdesc;
+ const struct max77650_regulator_desc **rdescs;
+ const struct max77650_regulator_desc *rdesc;
struct regulator_config config = { };
struct device *dev, *parent;
struct regulator_dev *rdev;
@@ -376,7 +374,6 @@ static int max77650_regulator_probe(struct platform_device *pdev)
for (i = 0; i < MAX77650_REGULATOR_NUM_REGULATORS; i++) {
rdesc = rdescs[i];
- config.driver_data = rdesc;
rdev = devm_regulator_register(dev, &rdesc->desc, &config);
if (IS_ERR(rdev))
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index 69eb6abd2551..b2e87642bec4 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -160,8 +160,8 @@ static unsigned max77802_get_mode(struct regulator_dev *rdev)
* Enable Control Logic3 by PWRREQ (LDO 3)
*
* If setting the regulator mode fails, the function only warns but does
- * not return an error code to avoid the regulator core to stop setting
- * the operating mode for the remaining regulators.
+ * not return a negative error number to avoid the regulator core to stop
+ * setting the operating mode for the remaining regulators.
*/
static int max77802_set_suspend_mode(struct regulator_dev *rdev,
unsigned int mode)
diff --git a/drivers/regulator/max77826-regulator.c b/drivers/regulator/max77826-regulator.c
index 5590cdf615b7..310bc8ee7af8 100644
--- a/drivers/regulator/max77826-regulator.c
+++ b/drivers/regulator/max77826-regulator.c
@@ -153,7 +153,6 @@ enum max77826_regulators {
struct max77826_regulator_info {
struct regmap *regmap;
- struct regulator_desc *rdesc;
};
static const struct regmap_config max77826_regmap_config = {
@@ -187,7 +186,7 @@ static const struct regulator_ops max77826_buck_ops = {
.set_voltage_time_sel = max77826_set_voltage_time_sel,
};
-static struct regulator_desc max77826_regulators_desc[] = {
+static const struct regulator_desc max77826_regulators_desc[] = {
MAX77826_LDO(1, NMOS),
MAX77826_LDO(2, NMOS),
MAX77826_LDO(3, NMOS),
@@ -246,7 +245,6 @@ static int max77826_i2c_probe(struct i2c_client *client)
if (!info)
return -ENOMEM;
- info->rdesc = max77826_regulators_desc;
regmap = devm_regmap_init_i2c(client, &max77826_regmap_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to allocate regmap!\n");
diff --git a/drivers/regulator/max77857-regulator.c b/drivers/regulator/max77857-regulator.c
index bc28dc8503a8..1216cc3a6f72 100644
--- a/drivers/regulator/max77857-regulator.c
+++ b/drivers/regulator/max77857-regulator.c
@@ -427,7 +427,7 @@ static int max77857_probe(struct i2c_client *client)
return 0;
}
-const struct i2c_device_id max77857_id[] = {
+static const struct i2c_device_id max77857_id[] = {
{ "max77831", ID_MAX77831 },
{ "max77857", ID_MAX77857 },
{ "max77859", ID_MAX77859 },
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 96ca146281d6..f68caa07f546 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -470,8 +470,7 @@ static const struct thermal_zone_device_ops max77621_tz_ops = {
static int max8973_thermal_init(struct max8973_chip *mchip)
{
struct thermal_zone_device *tzd;
- struct irq_data *irq_data;
- unsigned long irq_flags = 0;
+ unsigned long irq_flags;
int ret;
if (mchip->id != MAX77621)
@@ -489,9 +488,7 @@ static int max8973_thermal_init(struct max8973_chip *mchip)
if (mchip->irq <= 0)
return 0;
- irq_data = irq_get_irq_data(mchip->irq);
- if (irq_data)
- irq_flags = irqd_get_trigger_type(irq_data);
+ irq_flags = irq_get_trigger_type(mchip->irq);
ret = devm_request_threaded_irq(mchip->dev, mchip->irq, NULL,
max8973_thermal_irq,
diff --git a/drivers/regulator/max8997-regulator.c b/drivers/regulator/max8997-regulator.c
index 5f201ee9a5b8..e77621b6466c 100644
--- a/drivers/regulator/max8997-regulator.c
+++ b/drivers/regulator/max8997-regulator.c
@@ -8,6 +8,7 @@
// This driver is based on max8998.c
#include <linux/bug.h>
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
@@ -876,7 +877,7 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
struct max8997_platform_data *pdata)
{
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct device_node *pmic_np, *regulators_np, *reg_np;
+ struct device_node *pmic_np, *reg_np;
struct max8997_regulator_data *rdata;
unsigned int i, dvs_voltage_nr = 1;
@@ -886,7 +887,8 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
return -ENODEV;
}
- regulators_np = of_get_child_by_name(pmic_np, "regulators");
+ struct device_node *regulators_np __free(device_node) = of_get_child_by_name(pmic_np,
+ "regulators");
if (!regulators_np) {
dev_err(&pdev->dev, "could not find regulators sub-node\n");
return -EINVAL;
@@ -898,10 +900,8 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
rdata = devm_kcalloc(&pdev->dev,
pdata->num_regulators, sizeof(*rdata),
GFP_KERNEL);
- if (!rdata) {
- of_node_put(regulators_np);
+ if (!rdata)
return -ENOMEM;
- }
pdata->regulators = rdata;
for_each_child_of_node(regulators_np, reg_np) {
@@ -922,7 +922,6 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
rdata->reg_node = reg_np;
rdata++;
}
- of_node_put(regulators_np);
pdata->buck1_gpiodvs = of_property_read_bool(pmic_np, "max8997,pmic-buck1-uses-gpio-dvs");
pdata->buck2_gpiodvs = of_property_read_bool(pmic_np, "max8997,pmic-buck2-uses-gpio-dvs");
@@ -941,9 +940,8 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
}
}
- if (of_get_property(pmic_np,
- "max8997,pmic-ignore-gpiodvs-side-effect", NULL))
- pdata->ignore_gpiodvs_side_effect = true;
+ pdata->ignore_gpiodvs_side_effect = of_property_read_bool(pmic_np,
+ "max8997,pmic-ignore-gpiodvs-side-effect");
dvs_voltage_nr = 8;
}
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
index 5de9d4fa5113..b34ae0bbba6f 100644
--- a/drivers/regulator/mcp16502.c
+++ b/drivers/regulator/mcp16502.c
@@ -107,9 +107,10 @@ static unsigned int mcp16502_of_map_mode(unsigned int mode)
return REGULATOR_MODE_INVALID;
}
-#define MCP16502_REGULATOR(_name, _id, _ranges, _ops, _ramp_table) \
+#define MCP16502_REGULATOR(_name, _id, _sn, _ranges, _ops, _ramp_table) \
[_id] = { \
.name = _name, \
+ .supply_name = #_sn, \
.regulators_node = "regulators", \
.id = _id, \
.ops = &(_ops), \
@@ -467,18 +468,18 @@ static const struct linear_range b234_ranges[] = {
};
static const struct regulator_desc mcp16502_desc[] = {
- /* MCP16502_REGULATOR(_name, _id, ranges, regulator_ops, ramp_table) */
- MCP16502_REGULATOR("VDD_IO", BUCK1, b1l12_ranges, mcp16502_buck_ops,
+ /* MCP16502_REGULATOR(_name, _id, _sn, _ranges, _ops, _ramp_table) */
+ MCP16502_REGULATOR("VDD_IO", BUCK1, pvin1, b1l12_ranges, mcp16502_buck_ops,
mcp16502_ramp_b1l12),
- MCP16502_REGULATOR("VDD_DDR", BUCK2, b234_ranges, mcp16502_buck_ops,
+ MCP16502_REGULATOR("VDD_DDR", BUCK2, pvin2, b234_ranges, mcp16502_buck_ops,
mcp16502_ramp_b234),
- MCP16502_REGULATOR("VDD_CORE", BUCK3, b234_ranges, mcp16502_buck_ops,
+ MCP16502_REGULATOR("VDD_CORE", BUCK3, pvin3, b234_ranges, mcp16502_buck_ops,
mcp16502_ramp_b234),
- MCP16502_REGULATOR("VDD_OTHER", BUCK4, b234_ranges, mcp16502_buck_ops,
+ MCP16502_REGULATOR("VDD_OTHER", BUCK4, pvin4, b234_ranges, mcp16502_buck_ops,
mcp16502_ramp_b234),
- MCP16502_REGULATOR("LDO1", LDO1, b1l12_ranges, mcp16502_ldo_ops,
+ MCP16502_REGULATOR("LDO1", LDO1, lvin, b1l12_ranges, mcp16502_ldo_ops,
mcp16502_ramp_b1l12),
- MCP16502_REGULATOR("LDO2", LDO2, b1l12_ranges, mcp16502_ldo_ops,
+ MCP16502_REGULATOR("LDO2", LDO2, lvin, b1l12_ranges, mcp16502_ldo_ops,
mcp16502_ramp_b1l12)
};
diff --git a/drivers/regulator/mp5416.c b/drivers/regulator/mp5416.c
index 3457e650a994..e6794190cb68 100644
--- a/drivers/regulator/mp5416.c
+++ b/drivers/regulator/mp5416.c
@@ -163,7 +163,7 @@ static const struct regulator_ops mp5416_buck_ops = {
.set_ramp_delay = regulator_set_ramp_delay_regmap,
};
-static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = {
+static const struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = {
MP5416BUCK("buck1", 1, mp5416_I_limits1, MP5416_REG_CTL1, BIT(0), 1),
MP5416BUCK("buck2", 2, mp5416_I_limits2, MP5416_REG_CTL1, BIT(1), 2),
MP5416BUCK("buck3", 3, mp5416_I_limits1, MP5416_REG_CTL1, BIT(2), 1),
@@ -174,7 +174,7 @@ static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = {
MP5416LDO("ldo4", 4, BIT(1)),
};
-static struct regulator_desc mp5496_regulators_desc[MP5416_MAX_REGULATORS] = {
+static const struct regulator_desc mp5496_regulators_desc[MP5416_MAX_REGULATORS] = {
MP5416BUCK("buck1", 1, mp5416_I_limits1, MP5416_REG_CTL1, BIT(0), 1),
MP5416BUCK("buck2", 2, mp5416_I_limits2, MP5416_REG_CTL1, BIT(1), 1),
MP5416BUCK("buck3", 3, mp5416_I_limits1, MP5416_REG_CTL1, BIT(2), 1),
diff --git a/drivers/regulator/mt6357-regulator.c b/drivers/regulator/mt6357-regulator.c
index c0439a4e0b50..1eb69c7a6acb 100644
--- a/drivers/regulator/mt6357-regulator.c
+++ b/drivers/regulator/mt6357-regulator.c
@@ -123,7 +123,7 @@ struct mt6357_regulator_info {
*
* Regulators that use regmap for their register I/O can set the
* da_vsel_reg and da_vsel_mask fields in the info structure and
- * then use this as their get_voltage_vsel operation.
+ * then use this as their get_voltage_sel operation.
*/
static int mt6357_get_buck_voltage_sel(struct regulator_dev *rdev)
{
diff --git a/drivers/regulator/mtk-dvfsrc-regulator.c b/drivers/regulator/mtk-dvfsrc-regulator.c
index 9bf4163221f1..f5662c569464 100644
--- a/drivers/regulator/mtk-dvfsrc-regulator.c
+++ b/drivers/regulator/mtk-dvfsrc-regulator.c
@@ -19,7 +19,7 @@ enum dvfsrc_regulator_id {
};
struct dvfsrc_regulator_pdata {
- struct regulator_desc *descs;
+ const struct regulator_desc *descs;
u32 size;
};
@@ -107,7 +107,7 @@ static const unsigned int mt6873_voltages[] = {
725000,
};
-static struct regulator_desc mt6873_regulators[] = {
+static const struct regulator_desc mt6873_regulators[] = {
MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt6873_voltages),
MTK_DVFSRC_VREG("dvfsrc-vscp", VSCP, mt6873_voltages),
};
@@ -122,7 +122,7 @@ static const unsigned int mt8183_voltages[] = {
800000,
};
-static struct regulator_desc mt8183_regulators[] = {
+static const struct regulator_desc mt8183_regulators[] = {
MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt8183_voltages),
};
@@ -138,7 +138,7 @@ static const unsigned int mt8195_voltages[] = {
750000,
};
-static struct regulator_desc mt8195_regulators[] = {
+static const struct regulator_desc mt8195_regulators[] = {
MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt8195_voltages),
MTK_DVFSRC_VREG("dvfsrc-vscp", VSCP, mt8195_voltages),
};
@@ -159,7 +159,7 @@ static int dvfsrc_vcore_regulator_probe(struct platform_device *pdev)
return -EINVAL;
for (i = 0; i < pdata->size; i++) {
- struct regulator_desc *vrdesc = &pdata->descs[i];
+ const struct regulator_desc *vrdesc = &pdata->descs[i];
struct regulator_dev *rdev;
rdev = devm_regulator_register(&pdev->dev, vrdesc, &config);
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 03afc160fc72..3f490d81abc2 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -338,8 +338,10 @@ static int of_get_regulation_constraints(struct device *dev,
* @desc: regulator description
*
* Populates regulator_init_data structure by extracting data from device
- * tree node, returns a pointer to the populated structure or NULL if memory
- * alloc fails.
+ * tree node.
+ *
+ * Return: Pointer to a populated &struct regulator_init_data or NULL if
+ * memory allocation fails.
*/
struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
struct device_node *node,
@@ -391,7 +393,7 @@ static void devm_of_regulator_put_matches(struct device *dev, void *res)
* in place and an additional of_node reference is taken for each matched
* regulator.
*
- * Returns the number of matches found or a negative error code on failure.
+ * Return: The number of matches found or a negative error number on failure.
*/
int of_regulator_match(struct device *dev, struct device_node *node,
struct of_regulator_match *matches,
@@ -550,7 +552,71 @@ error:
return NULL;
}
-struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
+/**
+ * of_get_child_regulator - get a child regulator device node
+ * based on supply name
+ * @parent: Parent device node
+ * @prop_name: Combination regulator supply name and "-supply"
+ *
+ * Traverse all child nodes.
+ * Extract the child regulator device node corresponding to the supply name.
+ *
+ * Return: Pointer to the &struct device_node corresponding to the regulator
+ * if found, or %NULL if not found.
+ */
+static struct device_node *of_get_child_regulator(struct device_node *parent,
+ const char *prop_name)
+{
+ struct device_node *regnode = NULL;
+ struct device_node *child = NULL;
+
+ for_each_child_of_node(parent, child) {
+ regnode = of_parse_phandle(child, prop_name, 0);
+ if (regnode)
+ goto err_node_put;
+
+ regnode = of_get_child_regulator(child, prop_name);
+ if (regnode)
+ goto err_node_put;
+ }
+ return NULL;
+
+err_node_put:
+ of_node_put(child);
+ return regnode;
+}
+
+/**
+ * of_get_regulator - get a regulator device node based on supply name
+ * @dev: Device pointer for the consumer (of regulator) device
+ * @supply: regulator supply name
+ *
+ * Extract the regulator device node corresponding to the supply name.
+ *
+ * Return: Pointer to the &struct device_node corresponding to the regulator
+ * if found, or %NULL if not found.
+ */
+static struct device_node *of_get_regulator(struct device *dev, const char *supply)
+{
+ struct device_node *regnode = NULL;
+ char prop_name[64]; /* 64 is max size of property name */
+
+ dev_dbg(dev, "Looking up %s-supply from device tree\n", supply);
+
+ snprintf(prop_name, 64, "%s-supply", supply);
+ regnode = of_parse_phandle(dev->of_node, prop_name, 0);
+ if (regnode)
+ return regnode;
+
+ regnode = of_get_child_regulator(dev->of_node, prop_name);
+ if (regnode)
+ return regnode;
+
+ dev_dbg(dev, "Looking up %s property in node %pOF failed\n", prop_name, dev->of_node);
+ return NULL;
+}
+
+static struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
{
struct device *dev;
@@ -559,6 +625,46 @@ struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
return dev ? dev_to_rdev(dev) : NULL;
}
+/**
+ * of_regulator_dev_lookup - lookup a regulator device with device tree only
+ * @dev: Device pointer for regulator supply lookup.
+ * @supply: Supply name or regulator ID.
+ *
+ * Return: Pointer to the &struct regulator_dev on success, or ERR_PTR()
+ * encoded value on error.
+ *
+ * If successful, returns a pointer to the &struct regulator_dev that
+ * corresponds to the name @supply and with the embedded &struct device
+ * refcount incremented by one. The refcount must be dropped by calling
+ * put_device().
+ *
+ * On failure one of the following ERR_PTR() encoded values is returned:
+ * * -%ENODEV if lookup fails permanently.
+ * * -%EPROBE_DEFER if lookup could succeed in the future.
+ */
+struct regulator_dev *of_regulator_dev_lookup(struct device *dev,
+ const char *supply)
+{
+ struct regulator_dev *r;
+ struct device_node *node;
+
+ node = of_get_regulator(dev, supply);
+ if (node) {
+ r = of_find_regulator_by_node(node);
+ of_node_put(node);
+ if (r)
+ return r;
+
+ /*
+ * We have a node, but there is no device.
+ * assume it has not registered yet.
+ */
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
/*
* Returns number of regulators coupled with rdev.
*/
@@ -619,7 +725,7 @@ static bool of_coupling_find_node(struct device_node *src,
* - all coupled regulators have the same number of regulator_dev phandles
* - all regulators are linked to each other
*
- * Returns true if all conditions are met.
+ * Return: True if all conditions are met; false otherwise.
*/
bool of_check_coupling_data(struct regulator_dev *rdev)
{
@@ -690,8 +796,8 @@ clean:
* "regulator-coupled-with" property
* @index: Index in phandles array
*
- * Returns the regulator_dev pointer parsed from DTS. If it has not been yet
- * registered, returns NULL
+ * Return: Pointer to the &struct regulator_dev parsed from DTS, or %NULL if
+ * it has not yet been registered.
*/
struct regulator_dev *of_parse_coupled_regulator(struct regulator_dev *rdev,
int index)
@@ -735,31 +841,32 @@ static int is_supply_name(const char *name)
return 0;
}
-/*
+/**
* of_regulator_bulk_get_all - get multiple regulator consumers
*
* @dev: Device to supply
* @np: device node to search for consumers
* @consumers: Configuration of consumers; clients are stored here.
*
- * @return number of regulators on success, an errno on failure.
- *
* This helper function allows drivers to get several regulator
* consumers in one operation. If any of the regulators cannot be
* acquired then any regulators that were allocated will be freed
- * before returning to the caller.
+ * before returning to the caller, and @consumers will not be
+ * changed.
+ *
+ * Return: Number of regulators on success, or a negative error number
+ * on failure.
*/
int of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
struct regulator_bulk_data **consumers)
{
int num_consumers = 0;
struct regulator *tmp;
+ struct regulator_bulk_data *_consumers = NULL;
struct property *prop;
int i, n = 0, ret;
char name[64];
- *consumers = NULL;
-
/*
* first pass: get numbers of xxx-supply
* second pass: fill consumers
@@ -769,7 +876,7 @@ restart:
i = is_supply_name(prop->name);
if (i == 0)
continue;
- if (!*consumers) {
+ if (!_consumers) {
num_consumers++;
continue;
} else {
@@ -777,28 +884,31 @@ restart:
name[i] = '\0';
tmp = regulator_get(dev, name);
if (IS_ERR(tmp)) {
- ret = -EINVAL;
+ ret = PTR_ERR(tmp);
goto error;
}
- (*consumers)[n].consumer = tmp;
+ _consumers[n].consumer = tmp;
n++;
continue;
}
}
- if (*consumers)
+ if (_consumers) {
+ *consumers = _consumers;
return num_consumers;
+ }
if (num_consumers == 0)
return 0;
- *consumers = kmalloc_array(num_consumers,
+ _consumers = kmalloc_array(num_consumers,
sizeof(struct regulator_bulk_data),
GFP_KERNEL);
- if (!*consumers)
+ if (!_consumers)
return -ENOMEM;
goto restart;
error:
while (--n >= 0)
- regulator_put(consumers[n]->consumer);
+ regulator_put(_consumers[n].consumer);
+ kfree(_consumers);
return ret;
}
EXPORT_SYMBOL_GPL(of_regulator_bulk_get_all);
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 319a88412154..441c9344aef7 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -105,7 +105,7 @@ struct pcap_regulator {
.lowpwr = _lowpwr, \
}
-static struct pcap_regulator vreg_table[] = {
+static const struct pcap_regulator vreg_table[] = {
VREG_INFO(V1, PCAP_REG_VREG1, 1, 2, 18, 0),
VREG_INFO(V2, PCAP_REG_VREG1, 5, 6, 19, 22),
VREG_INFO(V3, PCAP_REG_VREG1, 7, 8, 20, 23),
@@ -141,7 +141,7 @@ static struct pcap_regulator vreg_table[] = {
static int pcap_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
- struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
+ const struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
void *pcap = rdev_get_drvdata(rdev);
/* the regulator doesn't support voltage switching */
@@ -155,7 +155,7 @@ static int pcap_regulator_set_voltage_sel(struct regulator_dev *rdev,
static int pcap_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
- struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
+ const struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
void *pcap = rdev_get_drvdata(rdev);
u32 tmp;
@@ -169,7 +169,7 @@ static int pcap_regulator_get_voltage_sel(struct regulator_dev *rdev)
static int pcap_regulator_enable(struct regulator_dev *rdev)
{
- struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
+ const struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
void *pcap = rdev_get_drvdata(rdev);
if (vreg->en == NA)
@@ -180,7 +180,7 @@ static int pcap_regulator_enable(struct regulator_dev *rdev)
static int pcap_regulator_disable(struct regulator_dev *rdev)
{
- struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
+ const struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
void *pcap = rdev_get_drvdata(rdev);
if (vreg->en == NA)
@@ -191,7 +191,7 @@ static int pcap_regulator_disable(struct regulator_dev *rdev)
static int pcap_regulator_is_enabled(struct regulator_dev *rdev)
{
- struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
+ const struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
void *pcap = rdev_get_drvdata(rdev);
u32 tmp;
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 7c04870442d3..7d56c22b5e40 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -76,7 +76,7 @@ struct pfuze_chip {
struct device *dev;
struct pfuze_regulator regulator_descs[PFUZE100_MAX_REGULATOR];
struct regulator_dev *regulators[PFUZE100_MAX_REGULATOR];
- struct pfuze_regulator *pfuze_regulators;
+ const struct pfuze_regulator *pfuze_regulators;
};
static const int pfuze100_swbst[] = {
@@ -367,7 +367,7 @@ static const struct regulator_ops pfuze3000_sw_regulator_ops = {
}
/* PFUZE100 */
-static struct pfuze_regulator pfuze100_regulators[] = {
+static const struct pfuze_regulator pfuze100_regulators[] = {
PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
PFUZE100_SW_REG(PFUZE100, SW1C, PFUZE100_SW1CVOL, 300000, 1875000, 25000),
PFUZE100_SW_REG(PFUZE100, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
@@ -386,7 +386,7 @@ static struct pfuze_regulator pfuze100_regulators[] = {
PFUZE100_COIN_REG(PFUZE100, COIN, PFUZE100_COINVOL, 0x7, pfuze100_coin),
};
-static struct pfuze_regulator pfuze200_regulators[] = {
+static const struct pfuze_regulator pfuze200_regulators[] = {
PFUZE100_SW_REG(PFUZE200, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
PFUZE100_SW_REG(PFUZE200, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
PFUZE100_SW_REG(PFUZE200, SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
@@ -403,7 +403,7 @@ static struct pfuze_regulator pfuze200_regulators[] = {
PFUZE100_COIN_REG(PFUZE200, COIN, PFUZE100_COINVOL, 0x7, pfuze100_coin),
};
-static struct pfuze_regulator pfuze3000_regulators[] = {
+static const struct pfuze_regulator pfuze3000_regulators[] = {
PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
@@ -419,7 +419,7 @@ static struct pfuze_regulator pfuze3000_regulators[] = {
PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
};
-static struct pfuze_regulator pfuze3001_regulators[] = {
+static const struct pfuze_regulator pfuze3001_regulators[] = {
PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
diff --git a/drivers/regulator/qcom-refgen-regulator.c b/drivers/regulator/qcom-refgen-regulator.c
index 063e12c08e75..cfa72ce85bc8 100644
--- a/drivers/regulator/qcom-refgen-regulator.c
+++ b/drivers/regulator/qcom-refgen-regulator.c
@@ -62,7 +62,7 @@ static int qcom_sdm845_refgen_is_enabled(struct regulator_dev *rdev)
return 1;
}
-static struct regulator_desc sdm845_refgen_desc = {
+static const struct regulator_desc sdm845_refgen_desc = {
.enable_time = 5,
.name = "refgen",
.owner = THIS_MODULE,
@@ -74,7 +74,7 @@ static struct regulator_desc sdm845_refgen_desc = {
},
};
-static struct regulator_desc sm8250_refgen_desc = {
+static const struct regulator_desc sm8250_refgen_desc = {
.enable_reg = REFGEN_REG_PWRDWN_CTRL5,
.enable_mask = REFGEN_PWRDWN_CTRL5_MASK,
.enable_val = REFGEN_PWRDWN_CTRL5_ENABLE,
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 80e304711345..6c343b4b9d15 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -158,7 +158,7 @@ struct rpmh_vreg_init_data {
* @wait_for_ack: Boolean indicating if execution must wait until the
* request has been acknowledged as complete
*
- * Return: 0 on success, errno on failure
+ * Return: 0 on success, or a negative error number on failure
*/
static int rpmh_regulator_send_request(struct rpmh_vreg *vreg,
struct tcs_cmd *cmd, bool wait_for_ack)
@@ -317,7 +317,7 @@ static unsigned int rpmh_regulator_vrm_get_mode(struct regulator_dev *rdev)
* This function is used in the regulator_ops for VRM type RPMh regulator
* devices.
*
- * Return: 0 on success, errno on failure
+ * Return: 0 on success, or a negative error number on failure
*/
static unsigned int rpmh_regulator_vrm_get_optimum_mode(
struct regulator_dev *rdev, int input_uV, int output_uV, int load_uA)
@@ -409,7 +409,7 @@ static const struct regulator_ops rpmh_regulator_xob_ops = {
* @pmic_rpmh_data: Pointer to a null-terminated array of rpmh-regulator
* resources defined for the top level PMIC device
*
- * Return: 0 on success, errno on failure
+ * Return: 0 on success, or a negative error number on failure
*/
static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg, struct device *dev,
struct device_node *node, const char *pmic_id,
@@ -1537,7 +1537,6 @@ static int rpmh_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct rpmh_vreg_init_data *vreg_data;
- struct device_node *node;
struct rpmh_vreg *vreg;
const char *pmic_id;
int ret;
@@ -1552,19 +1551,15 @@ static int rpmh_regulator_probe(struct platform_device *pdev)
return ret;
}
- for_each_available_child_of_node(dev->of_node, node) {
+ for_each_available_child_of_node_scoped(dev->of_node, node) {
vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
- if (!vreg) {
- of_node_put(node);
+ if (!vreg)
return -ENOMEM;
- }
ret = rpmh_regulator_init_vreg(vreg, dev, node, pmic_id,
vreg_data);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
return ret;
- }
}
return 0;
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 3b7e06b9f5ce..28e7ce60cb61 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -1386,7 +1386,7 @@ MODULE_DEVICE_TABLE(of, rpm_of_match);
* @pmic_rpm_data: Pointer to a null-terminated array of qcom_smd-regulator
* resources defined for the top level PMIC device
*
- * Return: 0 on success, errno on failure
+ * Return: 0 on success, or a negative error number on failure
*/
static int rpm_regulator_init_vreg(struct qcom_rpm_reg *vreg, struct device *dev,
struct device_node *node,
@@ -1435,7 +1435,6 @@ static int rpm_reg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct rpm_regulator_data *vreg_data;
- struct device_node *node;
struct qcom_rpm_reg *vreg;
struct qcom_smd_rpm *rpm;
int ret;
@@ -1455,18 +1454,14 @@ static int rpm_reg_probe(struct platform_device *pdev)
if (!vreg_data)
return -ENODEV;
- for_each_available_child_of_node(dev->of_node, node) {
+ for_each_available_child_of_node_scoped(dev->of_node, node) {
vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
- if (!vreg) {
- of_node_put(node);
+ if (!vreg)
return -ENOMEM;
- }
ret = rpm_regulator_init_vreg(vreg, dev, node, vreg_data);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
return ret;
- }
}
return 0;
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 9a9fa20dcd95..d66a0f61637e 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -245,7 +245,7 @@ enum spmi_saw3_registers {
SAW3_VERSION = 0xFD0,
};
-/* Used for indexing into ctrl_reg. These are offets from 0x40 */
+/* Used for indexing into ctrl_reg. These are offsets from 0x40 */
enum spmi_common_control_register_index {
SPMI_COMMON_IDX_VOLTAGE_RANGE = 0,
SPMI_COMMON_IDX_VOLTAGE_SET = 1,
@@ -2528,8 +2528,8 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
if (!reg)
return -ENODEV;
- if (of_find_property(node, "qcom,saw-reg", &lenp)) {
- syscon = of_parse_phandle(node, "qcom,saw-reg", 0);
+ syscon = of_parse_phandle(node, "qcom,saw-reg", 0);
+ if (syscon) {
saw_regmap = syscon_node_to_regmap(syscon);
of_node_put(syscon);
if (IS_ERR(saw_regmap))
@@ -2577,15 +2577,13 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
if (saw_regmap) {
reg_node = of_get_child_by_name(node, reg->name);
- reg_prop = of_find_property(reg_node, "qcom,saw-leader",
- &lenp);
- of_node_put(reg_node);
- if (reg_prop) {
+ if (of_property_read_bool(reg_node, "qcom,saw-leader")) {
spmi_saw_ops = *(vreg->desc.ops);
spmi_saw_ops.set_voltage_sel =
spmi_regulator_saw_set_voltage;
vreg->desc.ops = &spmi_saw_ops;
}
+ of_node_put(reg_node);
}
if (vreg->set_points && vreg->set_points->count == 1) {
diff --git a/drivers/regulator/rt5120-regulator.c b/drivers/regulator/rt5120-regulator.c
index a388ac70865f..f0d3efd160d4 100644
--- a/drivers/regulator/rt5120-regulator.c
+++ b/drivers/regulator/rt5120-regulator.c
@@ -245,8 +245,8 @@ static void rt5120_fillin_regulator_desc(struct regulator_desc *desc, int rid)
desc->n_voltages = RT5120_BUCK1_NUM_VOLT;
desc->min_uV = RT5120_BUCK1_MINUV;
desc->uV_step = RT5120_BUCK1_STEPUV;
- desc->vsel_reg = RT5120_REG_CH1VID,
- desc->vsel_mask = RT5120_CH1VID_MASK,
+ desc->vsel_reg = RT5120_REG_CH1VID;
+ desc->vsel_mask = RT5120_CH1VID_MASK;
desc->ops = &rt5120_buck1_ops;
break;
case RT5120_REGULATOR_BUCK2 ... RT5120_REGULATOR_BUCK4:
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 570b61420f3a..7dcf92af8f15 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -4,6 +4,7 @@
// http://www.samsung.com
#include <linux/bug.h>
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
@@ -1120,7 +1121,6 @@ static const struct regulator_desc s2mpu02_regulators[] = {
static int s2mps11_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct of_regulator_match *rdata = NULL;
struct regulator_config config = { };
struct s2mps11_info *s2mps11;
unsigned int rdev_num = 0;
@@ -1170,7 +1170,8 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
if (!s2mps11->ext_control_gpiod)
return -ENOMEM;
- rdata = kcalloc(rdev_num, sizeof(*rdata), GFP_KERNEL);
+ struct of_regulator_match *rdata __free(kfree) =
+ kcalloc(rdev_num, sizeof(*rdata), GFP_KERNEL);
if (!rdata)
return -ENOMEM;
@@ -1179,7 +1180,7 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
ret = s2mps11_pmic_dt_parse(pdev, rdata, s2mps11, rdev_num);
if (ret)
- goto out;
+ return ret;
platform_set_drvdata(pdev, s2mps11);
@@ -1201,10 +1202,9 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
regulator = devm_regulator_register(&pdev->dev,
&regulators[i], &config);
if (IS_ERR(regulator)) {
- ret = PTR_ERR(regulator);
dev_err(&pdev->dev, "regulator init failed for %d\n",
i);
- goto out;
+ return PTR_ERR(regulator);
}
if (config.ena_gpiod) {
@@ -1214,15 +1214,12 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"failed to enable GPIO control over %s: %d\n",
regulator->desc->name, ret);
- goto out;
+ return ret;
}
}
}
-out:
- kfree(rdata);
-
- return ret;
+ return 0;
}
static const struct platform_device_id s2mps11_pmic_id[] = {
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index bfc0e143bf40..d25cd81e3f36 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -3,6 +3,7 @@
// Copyright (c) 2011 Samsung Electronics Co., Ltd
// http://www.samsung.com
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
@@ -521,7 +522,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
struct sec_platform_data *pdata)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct device_node *pmic_np, *regulators_np, *reg_np;
+ struct device_node *pmic_np, *reg_np;
struct sec_regulator_data *rdata;
struct sec_opmode_data *rmode;
unsigned int i, dvs_voltage_nr = 8, ret;
@@ -532,7 +533,8 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
return -ENODEV;
}
- regulators_np = of_get_child_by_name(pmic_np, "regulators");
+ struct device_node *regulators_np __free(device_node) = of_get_child_by_name(pmic_np,
+ "regulators");
if (!regulators_np) {
dev_err(iodev->dev, "could not find regulators sub-node\n");
return -EINVAL;
@@ -544,18 +546,14 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
rdata = devm_kcalloc(&pdev->dev,
pdata->num_regulators, sizeof(*rdata),
GFP_KERNEL);
- if (!rdata) {
- of_node_put(regulators_np);
+ if (!rdata)
return -ENOMEM;
- }
rmode = devm_kcalloc(&pdev->dev,
pdata->num_regulators, sizeof(*rmode),
GFP_KERNEL);
- if (!rmode) {
- of_node_put(regulators_np);
+ if (!rmode)
return -ENOMEM;
- }
pdata->regulators = rdata;
pdata->opmode = rmode;
@@ -581,7 +579,6 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
rdata->ext_control_gpiod = NULL;
} else if (IS_ERR(rdata->ext_control_gpiod)) {
of_node_put(reg_np);
- of_node_put(regulators_np);
return PTR_ERR(rdata->ext_control_gpiod);
}
@@ -603,8 +600,6 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
rmode++;
}
- of_node_put(regulators_np);
-
if (of_property_read_bool(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs")) {
pdata->buck2_gpiodvs = true;
diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c
index 29ab217297d6..9df726f10ad1 100644
--- a/drivers/regulator/scmi-regulator.c
+++ b/drivers/regulator/scmi-regulator.c
@@ -297,7 +297,7 @@ static int process_scmi_regulator_of_node(struct scmi_device *sdev,
static int scmi_regulator_probe(struct scmi_device *sdev)
{
int d, ret, num_doms;
- struct device_node *np, *child;
+ struct device_node *np;
const struct scmi_handle *handle = sdev->handle;
struct scmi_regulator_info *rinfo;
struct scmi_protocol_handle *ph;
@@ -341,13 +341,11 @@ static int scmi_regulator_probe(struct scmi_device *sdev)
*/
of_node_get(handle->dev->of_node);
np = of_find_node_by_name(handle->dev->of_node, "regulators");
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = process_scmi_regulator_of_node(sdev, ph, child, rinfo);
/* abort on any mem issue */
- if (ret == -ENOMEM) {
- of_node_put(child);
+ if (ret == -ENOMEM)
return ret;
- }
}
of_node_put(np);
/*
diff --git a/drivers/regulator/sm5703-regulator.c b/drivers/regulator/sm5703-regulator.c
deleted file mode 100644
index 702461cf075e..000000000000
--- a/drivers/regulator/sm5703-regulator.c
+++ /dev/null
@@ -1,170 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/mfd/sm5703.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/of_regulator.h>
-
-enum sm5703_regulators {
- SM5703_BUCK,
- SM5703_LDO1,
- SM5703_LDO2,
- SM5703_LDO3,
- SM5703_USBLDO1,
- SM5703_USBLDO2,
- SM5703_VBUS,
- SM5703_MAX_REGULATORS,
-};
-
-static const int sm5703_ldo_voltagemap[] = {
- 1500000, 1800000, 2600000, 2800000, 3000000, 3300000,
-};
-
-static const int sm5703_buck_voltagemap[] = {
- 1000000, 1000000, 1000000, 1000000,
- 1000000, 1000000, 1000000, 1000000,
- 1000000, 1000000, 1000000, 1100000,
- 1200000, 1300000, 1400000, 1500000,
- 1600000, 1700000, 1800000, 1900000,
- 2000000, 2100000, 2200000, 2300000,
- 2400000, 2500000, 2600000, 2700000,
- 2800000, 2900000, 3000000, 3000000,
-};
-
-#define SM5703USBLDO(_name, _id) \
- [SM5703_USBLDO ## _id] = { \
- .name = _name, \
- .of_match = _name, \
- .regulators_node = "regulators", \
- .type = REGULATOR_VOLTAGE, \
- .id = SM5703_USBLDO ## _id, \
- .ops = &sm5703_regulator_ops_fixed, \
- .n_voltages = 1, \
- .fixed_uV = SM5703_USBLDO_MICROVOLT, \
- .enable_reg = SM5703_REG_USBLDO12, \
- .enable_mask = SM5703_REG_EN_USBLDO ##_id, \
- .owner = THIS_MODULE, \
- }
-
-#define SM5703VBUS(_name) \
- [SM5703_VBUS] = { \
- .name = _name, \
- .of_match = _name, \
- .regulators_node = "regulators", \
- .type = REGULATOR_VOLTAGE, \
- .id = SM5703_VBUS, \
- .ops = &sm5703_regulator_ops_fixed, \
- .n_voltages = 1, \
- .fixed_uV = SM5703_VBUS_MICROVOLT, \
- .enable_reg = SM5703_REG_CNTL, \
- .enable_mask = SM5703_OPERATION_MODE_MASK, \
- .enable_val = SM5703_OPERATION_MODE_USB_OTG_MODE, \
- .disable_val = SM5703_OPERATION_MODE_CHARGING_ON, \
- .owner = THIS_MODULE, \
- }
-
-#define SM5703BUCK(_name) \
- [SM5703_BUCK] = { \
- .name = _name, \
- .of_match = _name, \
- .regulators_node = "regulators", \
- .type = REGULATOR_VOLTAGE, \
- .id = SM5703_BUCK, \
- .ops = &sm5703_regulator_ops, \
- .n_voltages = ARRAY_SIZE(sm5703_buck_voltagemap), \
- .volt_table = sm5703_buck_voltagemap, \
- .vsel_reg = SM5703_REG_BUCK, \
- .vsel_mask = SM5703_BUCK_VOLT_MASK, \
- .enable_reg = SM5703_REG_BUCK, \
- .enable_mask = SM5703_REG_EN_BUCK, \
- .owner = THIS_MODULE, \
- }
-
-#define SM5703LDO(_name, _id) \
- [SM5703_LDO ## _id] = { \
- .name = _name, \
- .of_match = _name, \
- .regulators_node = "regulators", \
- .type = REGULATOR_VOLTAGE, \
- .id = SM5703_LDO ## _id, \
- .ops = &sm5703_regulator_ops, \
- .n_voltages = ARRAY_SIZE(sm5703_ldo_voltagemap), \
- .volt_table = sm5703_ldo_voltagemap, \
- .vsel_reg = SM5703_REG_LDO ##_id, \
- .vsel_mask = SM5703_LDO_VOLT_MASK, \
- .enable_reg = SM5703_REG_LDO ##_id, \
- .enable_mask = SM5703_LDO_EN, \
- .owner = THIS_MODULE, \
- }
-
-static const struct regulator_ops sm5703_regulator_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_table,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
-};
-
-static const struct regulator_ops sm5703_regulator_ops_fixed = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
-};
-
-static struct regulator_desc sm5703_regulators_desc[SM5703_MAX_REGULATORS] = {
- SM5703BUCK("buck"),
- SM5703LDO("ldo1", 1),
- SM5703LDO("ldo2", 2),
- SM5703LDO("ldo3", 3),
- SM5703USBLDO("usbldo1", 1),
- SM5703USBLDO("usbldo2", 2),
- SM5703VBUS("vbus"),
-};
-
-static int sm5703_regulator_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regulator_config config = { NULL, };
- struct regulator_dev *rdev;
- struct sm5703_dev *sm5703 = dev_get_drvdata(pdev->dev.parent);
- int i;
-
- config.dev = dev->parent;
- config.regmap = sm5703->regmap;
-
- for (i = 0; i < SM5703_MAX_REGULATORS; i++) {
- rdev = devm_regulator_register(dev,
- &sm5703_regulators_desc[i],
- &config);
- if (IS_ERR(rdev))
- return dev_err_probe(dev, PTR_ERR(rdev),
- "Failed to register a regulator\n");
- }
-
- return 0;
-}
-
-static const struct platform_device_id sm5703_regulator_id[] = {
- { "sm5703-regulator", 0 },
- {}
-};
-MODULE_DEVICE_TABLE(platform, sm5703_regulator_id);
-
-static struct platform_driver sm5703_regulator_driver = {
- .driver = {
- .name = "sm5703-regulator",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- },
- .probe = sm5703_regulator_probe,
- .id_table = sm5703_regulator_id,
-};
-
-module_platform_driver(sm5703_regulator_driver);
-
-MODULE_DESCRIPTION("Silicon Mitus SM5703 LDO/Buck/USB regulator driver");
-MODULE_AUTHOR("Markuss Broks <markuss.broks@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c
index 7a0551f0c8c0..97f5ce138548 100644
--- a/drivers/regulator/tps6287x-regulator.c
+++ b/drivers/regulator/tps6287x-regulator.c
@@ -103,7 +103,7 @@ static const struct regulator_ops tps6287x_regulator_ops = {
.set_ramp_delay = regulator_set_ramp_delay_regmap,
};
-static struct regulator_desc tps6287x_reg = {
+static const struct regulator_desc tps6287x_reg = {
.name = "tps6287x",
.owner = THIS_MODULE,
.ops = &tps6287x_regulator_ops,
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index d5757fd9a65b..3334b5b7d907 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -242,17 +242,17 @@ static const struct regulator_desc tps65023_regulators[] = {
TPS65023_REGULATOR_LDO(2, TPS65023_LDO2_VSEL_table, 0x70),
};
-static struct tps_driver_data tps65020_drv_data = {
+static const struct tps_driver_data tps65020_drv_data = {
.desc = tps65020_regulators,
.core_regulator = TPS65023_DCDC_3,
};
-static struct tps_driver_data tps65021_drv_data = {
+static const struct tps_driver_data tps65021_drv_data = {
.desc = tps65021_regulators,
.core_regulator = TPS65023_DCDC_3,
};
-static struct tps_driver_data tps65023_drv_data = {
+static const struct tps_driver_data tps65023_drv_data = {
.desc = tps65023_regulators,
.core_regulator = TPS65023_DCDC_1,
};
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index ed5e191e8896..43f220cea21c 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -146,10 +146,10 @@ static int wm831x_isink_probe(struct platform_device *pdev)
isink->desc.ops = &wm831x_isink_ops;
isink->desc.type = REGULATOR_CURRENT;
isink->desc.owner = THIS_MODULE;
- isink->desc.curr_table = wm831x_isinkv_values,
- isink->desc.n_current_limits = ARRAY_SIZE(wm831x_isinkv_values),
- isink->desc.csel_reg = isink->reg,
- isink->desc.csel_mask = WM831X_CS1_ISEL_MASK,
+ isink->desc.curr_table = wm831x_isinkv_values;
+ isink->desc.n_current_limits = ARRAY_SIZE(wm831x_isinkv_values);
+ isink->desc.csel_reg = isink->reg;
+ isink->desc.csel_mask = WM831X_CS1_ISEL_MASK;
config.dev = pdev->dev.parent;
config.init_data = pdata->isink[id];
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index c4a229f66dec..fb3ca7956d00 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -112,7 +112,7 @@ static const struct regulator_ops wm8400_dcdc_ops = {
.get_optimum_mode = wm8400_dcdc_get_optimum_mode,
};
-static struct regulator_desc regulators[] = {
+static const struct regulator_desc regulators[] = {
{
.name = "LDO1",
.id = WM8400_LDO1,
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index dda2ada215b7..7a80c92b785e 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -340,6 +340,19 @@ config TI_K3_DSP_REMOTEPROC
It's safe to say N here if you're not interested in utilizing
the DSP slave processors.
+config TI_K3_M4_REMOTEPROC
+ tristate "TI K3 M4 remoteproc support"
+ depends on ARCH_OMAP2PLUS || ARCH_K3
+ select MAILBOX
+ select OMAP2PLUS_MBOX
+ help
+ Say m here to support TI's M4 remote processor subsystems
+ on various TI K3 family of SoCs through the remote processor
+ framework.
+
+ It's safe to say N here if you're not interested in utilizing
+ a remote processor.
+
config TI_K3_R5_REMOTEPROC
tristate "TI K3 R5 remoteproc support"
depends on ARCH_K3
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 91314a9b43ce..5ff4e2fee4ab 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -37,5 +37,6 @@ obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o
+obj-$(CONFIG_TI_K3_M4_REMOTEPROC) += ti_k3_m4_remoteproc.o
obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o
obj-$(CONFIG_XLNX_R5_REMOTEPROC) += xlnx_r5_remoteproc.o
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index 9041a0e07fb2..8770d0cf1255 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -239,8 +239,6 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
struct da8xx_rproc *drproc;
struct rproc *rproc;
struct irq_data *irq_data;
- struct resource *bootreg_res;
- struct resource *chipsig_res;
struct clk *dsp_clk;
struct reset_control *dsp_reset;
void __iomem *chipsig;
@@ -258,15 +256,11 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
return -EINVAL;
}
- bootreg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "host1cfg");
- bootreg = devm_ioremap_resource(dev, bootreg_res);
+ bootreg = devm_platform_ioremap_resource_byname(pdev, "host1cfg");
if (IS_ERR(bootreg))
return PTR_ERR(bootreg);
- chipsig_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "chipsig");
- chipsig = devm_ioremap_resource(dev, chipsig_res);
+ chipsig = devm_platform_ioremap_resource_byname(pdev, "chipsig");
if (IS_ERR(chipsig))
return PTR_ERR(chipsig);
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index 087506e21508..376187ad5754 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -509,7 +509,7 @@ static int imx_dsp_rproc_mbox_alloc(struct imx_dsp_rproc *priv)
struct mbox_client *cl;
int ret;
- if (!of_get_property(dev->of_node, "mbox-names", NULL))
+ if (!of_property_present(dev->of_node, "mbox-names"))
return 0;
cl = &priv->cl;
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 144c8e9a642e..800015ff7ff9 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -18,6 +18,7 @@
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/workqueue.h>
@@ -90,7 +91,7 @@ struct imx_rproc_mem {
#define ATT_CORE_MASK 0xffff
#define ATT_CORE(I) BIT((I))
-static int imx_rproc_xtr_mbox_init(struct rproc *rproc);
+static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block);
static void imx_rproc_free_mbox(struct rproc *rproc);
struct imx_rproc {
@@ -119,20 +120,16 @@ struct imx_rproc {
static const struct imx_rproc_att imx_rproc_att_imx93[] = {
/* dev addr , sys addr , size , flags */
/* TCM CODE NON-SECURE */
- { 0x0FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
- { 0x0FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x0FFC0000, 0x201C0000, 0x00040000, ATT_OWN | ATT_IOMEM },
/* TCM CODE SECURE */
- { 0x1FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
- { 0x1FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x1FFC0000, 0x201C0000, 0x00040000, ATT_OWN | ATT_IOMEM },
/* TCM SYS NON-SECURE*/
- { 0x20000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
- { 0x20020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x20000000, 0x20200000, 0x00040000, ATT_OWN | ATT_IOMEM },
/* TCM SYS SECURE*/
- { 0x30000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
- { 0x30020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x30000000, 0x20200000, 0x00040000, ATT_OWN | ATT_IOMEM },
/* DDR */
{ 0x80000000, 0x80000000, 0x10000000, 0 },
@@ -210,11 +207,9 @@ static const struct imx_rproc_att imx_rproc_att_imx8mq[] = {
/* QSPI Code - alias */
{ 0x08000000, 0x08000000, 0x08000000, 0 },
/* DDR (Code) - alias */
- { 0x10000000, 0x80000000, 0x0FFE0000, 0 },
- /* TCML */
- { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM},
- /* TCMU */
- { 0x20000000, 0x00800000, 0x00020000, ATT_OWN | ATT_IOMEM},
+ { 0x10000000, 0x40000000, 0x0FFE0000, 0 },
+ /* TCML/U */
+ { 0x1FFE0000, 0x007E0000, 0x00040000, ATT_OWN | ATT_IOMEM},
/* OCRAM_S */
{ 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
/* OCRAM */
@@ -339,6 +334,7 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = {
.att = imx_rproc_att_imx7ulp,
.att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp),
.method = IMX_RPROC_NONE,
+ .flags = IMX_RPROC_NEED_SYSTEM_OFF,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
@@ -375,7 +371,7 @@ static int imx_rproc_start(struct rproc *rproc)
struct arm_smccc_res res;
int ret;
- ret = imx_rproc_xtr_mbox_init(rproc);
+ ret = imx_rproc_xtr_mbox_init(rproc, true);
if (ret)
return ret;
@@ -635,7 +631,7 @@ static void imx_rproc_kick(struct rproc *rproc, int vqid)
static int imx_rproc_attach(struct rproc *rproc)
{
- return imx_rproc_xtr_mbox_init(rproc);
+ return imx_rproc_xtr_mbox_init(rproc, true);
}
static int imx_rproc_detach(struct rproc *rproc)
@@ -666,6 +662,17 @@ static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc
return (struct resource_table *)priv->rsc_table;
}
+static struct resource_table *
+imx_rproc_elf_find_loaded_rsc_table(struct rproc *rproc, const struct firmware *fw)
+{
+ struct imx_rproc *priv = rproc->priv;
+
+ if (priv->rsc_table)
+ return (struct resource_table *)priv->rsc_table;
+
+ return rproc_elf_find_loaded_rsc_table(rproc, fw);
+}
+
static const struct rproc_ops imx_rproc_ops = {
.prepare = imx_rproc_prepare,
.attach = imx_rproc_attach,
@@ -676,7 +683,7 @@ static const struct rproc_ops imx_rproc_ops = {
.da_to_va = imx_rproc_da_to_va,
.load = rproc_elf_load_segments,
.parse_fw = imx_rproc_parse_fw,
- .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .find_loaded_rsc_table = imx_rproc_elf_find_loaded_rsc_table,
.get_loaded_rsc_table = imx_rproc_get_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
@@ -789,7 +796,7 @@ static void imx_rproc_rx_callback(struct mbox_client *cl, void *msg)
queue_work(priv->workqueue, &priv->rproc_work);
}
-static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
+static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block)
{
struct imx_rproc *priv = rproc->priv;
struct device *dev = priv->dev;
@@ -807,12 +814,12 @@ static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
if (priv->tx_ch && priv->rx_ch)
return 0;
- if (!of_get_property(dev->of_node, "mbox-names", NULL))
+ if (!of_property_present(dev->of_node, "mbox-names"))
return 0;
cl = &priv->cl;
cl->dev = dev;
- cl->tx_block = true;
+ cl->tx_block = tx_block;
cl->tx_tout = 100;
cl->knows_txdone = false;
cl->rx_callback = imx_rproc_rx_callback;
@@ -1045,6 +1052,22 @@ static int imx_rproc_clk_enable(struct imx_rproc *priv)
return 0;
}
+static int imx_rproc_sys_off_handler(struct sys_off_data *data)
+{
+ struct rproc *rproc = data->cb_data;
+ int ret;
+
+ imx_rproc_free_mbox(rproc);
+
+ ret = imx_rproc_xtr_mbox_init(rproc, false);
+ if (ret) {
+ dev_err(&rproc->dev, "Failed to request non-blocking mbox\n");
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
static int imx_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1076,7 +1099,9 @@ static int imx_rproc_probe(struct platform_device *pdev)
return -ENOMEM;
}
- ret = imx_rproc_xtr_mbox_init(rproc);
+ INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
+
+ ret = imx_rproc_xtr_mbox_init(rproc, true);
if (ret)
goto err_put_wkq;
@@ -1094,11 +1119,33 @@ static int imx_rproc_probe(struct platform_device *pdev)
if (ret)
goto err_put_scu;
- INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
-
if (rproc->state != RPROC_DETACHED)
rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot");
+ if (dcfg->flags & IMX_RPROC_NEED_SYSTEM_OFF) {
+ /*
+ * setup mailbox to non-blocking mode in
+ * [SYS_OFF_MODE_POWER_OFF_PREPARE, SYS_OFF_MODE_RESTART_PREPARE]
+ * phase before invoking [SYS_OFF_MODE_POWER_OFF, SYS_OFF_MODE_RESTART]
+ * atomic chain, see kernel/reboot.c.
+ */
+ ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF_PREPARE,
+ SYS_OFF_PRIO_DEFAULT,
+ imx_rproc_sys_off_handler, rproc);
+ if (ret) {
+ dev_err(dev, "register power off handler failure\n");
+ goto err_put_clk;
+ }
+
+ ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART_PREPARE,
+ SYS_OFF_PRIO_DEFAULT,
+ imx_rproc_sys_off_handler, rproc);
+ if (ret) {
+ dev_err(dev, "register restart handler failure\n");
+ goto err_put_clk;
+ }
+ }
+
ret = rproc_add(rproc);
if (ret) {
dev_err(dev, "rproc_add failed\n");
diff --git a/drivers/remoteproc/imx_rproc.h b/drivers/remoteproc/imx_rproc.h
index 79a1b8956d14..17a7d051c531 100644
--- a/drivers/remoteproc/imx_rproc.h
+++ b/drivers/remoteproc/imx_rproc.h
@@ -26,6 +26,9 @@ enum imx_rproc_method {
IMX_RPROC_SCU_API,
};
+/* dcfg flags */
+#define IMX_RPROC_NEED_SYSTEM_OFF BIT(0)
+
struct imx_rproc_dcfg {
u32 src_reg;
u32 src_mask;
@@ -36,6 +39,7 @@ struct imx_rproc_dcfg {
const struct imx_rproc_att *att;
size_t att_size;
enum imx_rproc_method method;
+ u32 flags;
};
#endif /* _IMX_RPROC_H */
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
index 9902cce28692..1b78d8ddeacf 100644
--- a/drivers/remoteproc/ingenic_rproc.c
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -183,8 +183,7 @@ static int ingenic_rproc_probe(struct platform_device *pdev)
vpu->dev = &pdev->dev;
platform_set_drvdata(pdev, vpu);
- mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aux");
- vpu->aux_base = devm_ioremap_resource(dev, mem);
+ vpu->aux_base = devm_platform_ioremap_resource_byname(pdev, "aux");
if (IS_ERR(vpu->aux_base)) {
dev_err(dev, "Failed to ioremap\n");
return PTR_ERR(vpu->aux_base);
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 7e57b90bcaf8..8f0f7a4cfef2 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -366,8 +366,6 @@ static int keystone_rproc_probe(struct platform_device *pdev)
struct rproc *rproc;
int dsp_id;
char *fw_name = NULL;
- char *template = "keystone-dsp%d-fw";
- int name_len = 0;
int ret = 0;
if (!np) {
@@ -382,14 +380,12 @@ static int keystone_rproc_probe(struct platform_device *pdev)
}
/* construct a custom default fw name - subject to change in future */
- name_len = strlen(template); /* assuming a single digit alias */
- fw_name = devm_kzalloc(dev, name_len, GFP_KERNEL);
+ fw_name = devm_kasprintf(dev, GFP_KERNEL, "keystone-dsp%d-fw", dsp_id);
if (!fw_name)
return -ENOMEM;
- snprintf(fw_name, name_len, template, dsp_id);
- rproc = rproc_alloc(dev, dev_name(dev), &keystone_rproc_ops, fw_name,
- sizeof(*ksproc));
+ rproc = devm_rproc_alloc(dev, dev_name(dev), &keystone_rproc_ops,
+ fw_name, sizeof(*ksproc));
if (!rproc)
return -ENOMEM;
@@ -400,13 +396,11 @@ static int keystone_rproc_probe(struct platform_device *pdev)
ret = keystone_rproc_of_get_dev_syscon(pdev, ksproc);
if (ret)
- goto free_rproc;
+ return ret;
ksproc->reset = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR(ksproc->reset)) {
- ret = PTR_ERR(ksproc->reset);
- goto free_rproc;
- }
+ if (IS_ERR(ksproc->reset))
+ return PTR_ERR(ksproc->reset);
/* enable clock for accessing DSP internal memories */
pm_runtime_enable(dev);
@@ -471,8 +465,6 @@ disable_clk:
pm_runtime_put_sync(dev);
disable_rpm:
pm_runtime_disable(dev);
-free_rproc:
- rproc_free(rproc);
return ret;
}
@@ -484,7 +476,6 @@ static void keystone_rproc_remove(struct platform_device *pdev)
gpiod_put(ksproc->kick_gpio);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- rproc_free(ksproc->rproc);
of_reserved_mem_device_release(&pdev->dev);
}
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 88e7b84f223c..ef82835e98a4 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -829,6 +829,23 @@ static const struct adsp_data adsp_resource_init = {
.ssctl_id = 0x14,
};
+static const struct adsp_data sa8775p_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mbn",
+ .pas_id = 1,
+ .minidump_id = 5,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
static const struct adsp_data sdm845_adsp_resource_init = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
@@ -942,6 +959,42 @@ static const struct adsp_data cdsp_resource_init = {
.ssctl_id = 0x17,
};
+static const struct adsp_data sa8775p_cdsp0_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp0.mbn",
+ .pas_id = 18,
+ .minidump_id = 7,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ "nsp",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sa8775p_cdsp1_resource = {
+ .crash_reason_smem = 633,
+ .firmware_name = "cdsp1.mbn",
+ .pas_id = 30,
+ .minidump_id = 20,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ "nsp",
+ NULL
+ },
+ .load_state = "nsp",
+ .ssr_name = "cdsp1",
+ .sysmon_name = "cdsp1",
+ .ssctl_id = 0x20,
+};
+
static const struct adsp_data sdm845_cdsp_resource_init = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
@@ -1083,6 +1136,40 @@ static const struct adsp_data sm8350_cdsp_resource = {
.ssctl_id = 0x17,
};
+static const struct adsp_data sa8775p_gpdsp0_resource = {
+ .crash_reason_smem = 640,
+ .firmware_name = "gpdsp0.mbn",
+ .pas_id = 39,
+ .minidump_id = 21,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ NULL
+ },
+ .load_state = "gpdsp0",
+ .ssr_name = "gpdsp0",
+ .sysmon_name = "gpdsp0",
+ .ssctl_id = 0x21,
+};
+
+static const struct adsp_data sa8775p_gpdsp1_resource = {
+ .crash_reason_smem = 641,
+ .firmware_name = "gpdsp1.mbn",
+ .pas_id = 40,
+ .minidump_id = 22,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ NULL
+ },
+ .load_state = "gpdsp1",
+ .ssr_name = "gpdsp1",
+ .sysmon_name = "gpdsp1",
+ .ssctl_id = 0x22,
+};
+
static const struct adsp_data mpss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
@@ -1329,6 +1416,11 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
+ { .compatible = "qcom,sa8775p-adsp-pas", .data = &sa8775p_adsp_resource},
+ { .compatible = "qcom,sa8775p-cdsp0-pas", .data = &sa8775p_cdsp0_resource},
+ { .compatible = "qcom,sa8775p-cdsp1-pas", .data = &sa8775p_cdsp1_resource},
+ { .compatible = "qcom,sa8775p-gpdsp0-pas", .data = &sa8775p_gpdsp0_resource},
+ { .compatible = "qcom,sa8775p-gpdsp1-pas", .data = &sa8775p_gpdsp1_resource},
{ .compatible = "qcom,sc7180-adsp-pas", .data = &sm8250_adsp_resource},
{ .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sc7280-adsp-pas", .data = &sm8350_adsp_resource},
@@ -1346,6 +1438,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
{ .compatible = "qcom,sdm845-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
+ { .compatible = "qcom,sdx75-mpss-pas", .data = &sm8650_mpss_resource},
{ .compatible = "qcom,sm6115-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sm6115-cdsp-pas", .data = &cdsp_resource_init},
{ .compatible = "qcom,sm6115-mpss-pas", .data = &sc8180x_mpss_resource},
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index d17719384c16..5412beb0a692 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -259,16 +259,14 @@ struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev,
slim_rproc->mem[i].size = resource_size(res);
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimcore");
- slim_rproc->slimcore = devm_ioremap_resource(dev, res);
+ slim_rproc->slimcore = devm_platform_ioremap_resource_byname(pdev, "slimcore");
if (IS_ERR(slim_rproc->slimcore)) {
dev_err(&pdev->dev, "failed to ioremap slimcore IO\n");
err = PTR_ERR(slim_rproc->slimcore);
goto err;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "peripherals");
- slim_rproc->peri = devm_ioremap_resource(dev, res);
+ slim_rproc->peri = devm_platform_ioremap_resource_byname(pdev, "peripherals");
if (IS_ERR(slim_rproc->peri)) {
dev_err(&pdev->dev, "failed to ioremap peripherals IO\n");
err = PTR_ERR(slim_rproc->peri);
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index a22d41689a7d..8be3f631c192 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -115,6 +115,10 @@ static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
const char *name = kproc->rproc->name;
u32 msg = omap_mbox_message(data);
+ /* Do not forward messages from a detached core */
+ if (kproc->rproc->state == RPROC_DETACHED)
+ return;
+
dev_dbg(dev, "mbox msg: 0x%x\n", msg);
switch (msg) {
@@ -155,6 +159,10 @@ static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
mbox_msg_t msg = (mbox_msg_t)vqid;
int ret;
+ /* Do not forward messages to a detached core */
+ if (kproc->rproc->state == RPROC_DETACHED)
+ return;
+
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(kproc->mbox, (void *)msg);
if (ret < 0)
@@ -230,12 +238,9 @@ static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
client->knows_txdone = false;
kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox)) {
- ret = -EBUSY;
- dev_err(dev, "mbox_request_channel failed: %ld\n",
- PTR_ERR(kproc->mbox));
- return ret;
- }
+ if (IS_ERR(kproc->mbox))
+ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
+ "mbox_request_channel failed\n");
/*
* Ping the remote processor, this is only for sanity-sake for now;
@@ -315,32 +320,23 @@ static int k3_dsp_rproc_start(struct rproc *rproc)
u32 boot_addr;
int ret;
- ret = k3_dsp_rproc_request_mbox(rproc);
- if (ret)
- return ret;
-
boot_addr = rproc->bootaddr;
if (boot_addr & (kproc->data->boot_align_addr - 1)) {
dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n",
boot_addr, kproc->data->boot_align_addr);
- ret = -EINVAL;
- goto put_mbox;
+ return -EINVAL;
}
dev_dbg(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
if (ret)
- goto put_mbox;
+ return ret;
ret = k3_dsp_rproc_release(kproc);
if (ret)
- goto put_mbox;
+ return ret;
return 0;
-
-put_mbox:
- mbox_free_channel(kproc->mbox);
- return ret;
}
/*
@@ -353,8 +349,6 @@ static int k3_dsp_rproc_stop(struct rproc *rproc)
{
struct k3_dsp_rproc *kproc = rproc->priv;
- mbox_free_channel(kproc->mbox);
-
k3_dsp_rproc_reset(kproc);
return 0;
@@ -363,42 +357,22 @@ static int k3_dsp_rproc_stop(struct rproc *rproc)
/*
* Attach to a running DSP remote processor (IPC-only mode)
*
- * This rproc attach callback only needs to request the mailbox, the remote
- * processor is already booted, so there is no need to issue any TI-SCI
- * commands to boot the DSP core. This callback is invoked only in IPC-only
- * mode.
+ * This rproc attach callback is a NOP. The remote processor is already booted,
+ * and all required resources have been acquired during probe routine, so there
+ * is no need to issue any TI-SCI commands to boot the DSP core. This callback
+ * is invoked only in IPC-only mode and exists because rproc_validate() checks
+ * for its existence.
*/
-static int k3_dsp_rproc_attach(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = k3_dsp_rproc_request_mbox(rproc);
- if (ret)
- return ret;
-
- dev_info(dev, "DSP initialized in IPC-only mode\n");
- return 0;
-}
+static int k3_dsp_rproc_attach(struct rproc *rproc) { return 0; }
/*
* Detach from a running DSP remote processor (IPC-only mode)
*
- * This rproc detach callback performs the opposite operation to attach callback
- * and only needs to release the mailbox, the DSP core is not stopped and will
- * be left to continue to run its booted firmware. This callback is invoked only
- * in IPC-only mode.
+ * This rproc detach callback is a NOP. The DSP core is not stopped and will be
+ * left to continue to run its booted firmware. This callback is invoked only in
+ * IPC-only mode and exists for sanity sake.
*/
-static int k3_dsp_rproc_detach(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- mbox_free_channel(kproc->mbox);
- dev_info(dev, "DSP deinitialized in IPC-only mode\n");
- return 0;
-}
+static int k3_dsp_rproc_detach(struct rproc *rproc) { return 0; }
/*
* This function implements the .get_loaded_rsc_table() callback and is used
@@ -636,32 +610,6 @@ static void k3_dsp_release_tsp(void *data)
ti_sci_proc_release(tsp);
}
-static
-struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev,
- const struct ti_sci_handle *sci)
-{
- struct ti_sci_proc *tsp;
- u32 temp[2];
- int ret;
-
- ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids",
- temp, 2);
- if (ret < 0)
- return ERR_PTR(ret);
-
- tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
- if (!tsp)
- return ERR_PTR(-ENOMEM);
-
- tsp->dev = dev;
- tsp->sci = sci;
- tsp->ops = &sci->ops.proc_ops;
- tsp->proc_id = temp[0];
- tsp->host_id = temp[1];
-
- return tsp;
-}
-
static int k3_dsp_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -697,6 +645,10 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
kproc->dev = dev;
kproc->data = data;
+ ret = k3_dsp_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
if (IS_ERR(kproc->ti_sci))
return dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
@@ -711,7 +663,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(kproc->reset),
"failed to get reset\n");
- kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
+ kproc->tsp = ti_sci_proc_of_get_tsp(dev, kproc->ti_sci);
if (IS_ERR(kproc->tsp))
return dev_err_probe(dev, PTR_ERR(kproc->tsp),
"failed to construct ti-sci proc control\n");
@@ -789,6 +741,8 @@ static void k3_dsp_rproc_remove(struct platform_device *pdev)
if (ret)
dev_err(dev, "failed to detach proc (%pe)\n", ERR_PTR(ret));
}
+
+ mbox_free_channel(kproc->mbox);
}
static const struct k3_dsp_mem_data c66_mems[] = {
diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c
new file mode 100644
index 000000000000..09f0484a90e1
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 Cortex-M4 Remote Processor(s) driver
+ *
+ * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Hari Nagalla <hnagalla@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+
+#define K3_M4_IRAM_DEV_ADDR 0x00000
+#define K3_M4_DRAM_DEV_ADDR 0x30000
+
+/**
+ * struct k3_m4_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from remote processor view
+ * @size: Size of the memory region
+ */
+struct k3_m4_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct k3_m4_rproc_mem_data - memory definitions for a remote processor
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct k3_m4_rproc_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct k3_m4_rproc - k3 remote processor driver structure
+ * @dev: cached device pointer
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ * @reset: reset control handle
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ */
+struct k3_m4_rproc {
+ struct device *dev;
+ struct k3_m4_rproc_mem *mem;
+ int num_mems;
+ struct k3_m4_rproc_mem *rmem;
+ int num_rmems;
+ struct reset_control *reset;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+};
+
+/**
+ * k3_m4_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the K3 mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+static void k3_m4_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct device *dev = client->dev;
+ struct rproc *rproc = dev_get_drvdata(dev);
+ u32 msg = (u32)(uintptr_t)(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 rproc %s crashed\n", rproc->name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", rproc->name);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+static void k3_m4_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ u32 msg = (u32)vqid;
+ int ret;
+
+ /*
+ * Send the index of the triggered virtqueue in the mailbox payload.
+ * NOTE: msg is cast to uintptr_t to prevent compiler warnings when
+ * void* is 64bit. It is safely cast back to u32 in the mailbox driver.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+
+static int k3_m4_rproc_ping_mbox(struct k3_m4_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * The M4 cores have a local reset that affects only the CPU, and a
+ * generic module reset that powers on the device and allows the internal
+ * memories to be accessed while the local reset is asserted. This function is
+ * used to release the global reset on remote cores to allow loading into the
+ * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
+ * firmware loading, and is followed by the .start() ops after loading to
+ * actually let the remote cores to run.
+ */
+static int k3_m4_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* If the core is running already no need to deassert the module reset */
+ if (rproc->state == RPROC_DETACHED)
+ return 0;
+
+ /*
+ * Ensure the local reset is asserted so the core doesn't
+ * execute bogus code when the module reset is released.
+ */
+ ret = reset_control_assert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "could not assert local reset\n");
+ return ret;
+ }
+
+ ret = reset_control_status(kproc->reset);
+ if (ret <= 0) {
+ dev_err(dev, "local reset still not asserted\n");
+ return ret;
+ }
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "could not deassert module-reset for internal RAM loading\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * global reset on applicable cores. This completes the second portion of
+ * powering down the remote core. The cores themselves are only halted in the
+ * .stop() callback through the local reset, and the .unprepare() ops is invoked
+ * by the remoteproc core after the remoteproc is stopped to balance the global
+ * reset.
+ */
+static int k3_m4_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* If the core is going to be detached do not assert the module reset */
+ if (rproc->state == RPROC_ATTACHED)
+ return 0;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset assert failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for a booted remote processor in IPC-only
+ * mode. The remote processor firmwares follow a design-by-contract approach
+ * and are expected to have the resource table at the base of the DDR region
+ * reserved for firmware usage. This provides flexibility for the remote
+ * processor to be booted by different bootloaders that may or may not have the
+ * ability to publish the resource table address and size through a DT
+ * property.
+ */
+static struct resource_table *k3_m4_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
+/*
+ * Custom function to translate a remote processor device address (internal
+ * RAMs only) to a kernel virtual address. The remote processors can access
+ * their RAMs at either an internal address visible only from a remote
+ * processor, or at the SoC-level bus address. Both these addresses need to be
+ * looked through for translation. The translated addresses can be used either
+ * by the remoteproc core for loading (when using kernel remoteproc loader), or
+ * by any rpmsg bus drivers.
+ */
+static void *k3_m4_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ for (i = 0; i < kproc->num_mems; i++) {
+ bus_addr = kproc->mem[i].bus_addr;
+ dev_addr = kproc->mem[i].dev_addr;
+ size = kproc->mem[i].size;
+
+ /* handle M4-view addresses */
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+
+ /* handle SoC-view addresses */
+ if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
+ offset = da - bus_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+
+static int k3_m4_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_m4_rproc *kproc)
+{
+ static const char * const mem_names[] = { "iram", "dram" };
+ static const u32 mem_addrs[] = { K3_M4_IRAM_DEV_ADDR, K3_M4_DRAM_DEV_ADDR };
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems;
+ int i;
+
+ num_mems = ARRAY_SIZE(mem_names);
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems,
+ sizeof(*kproc->mem), GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ mem_names[i]);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ mem_names[i]);
+ return -EBUSY;
+ }
+
+ kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (!kproc->mem[i].cpu_addr) {
+ dev_err(dev, "failed to map %s memory\n",
+ mem_names[i]);
+ return -ENOMEM;
+ }
+ kproc->mem[i].bus_addr = res->start;
+ kproc->mem[i].dev_addr = mem_addrs[i];
+ kproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ mem_names[i], &kproc->mem[i].bus_addr,
+ kproc->mem[i].size, kproc->mem[i].cpu_addr,
+ kproc->mem[i].dev_addr);
+ }
+ kproc->num_mems = num_mems;
+
+ return 0;
+}
+
+static void k3_m4_rproc_dev_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+
+static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems < 0) {
+ dev_err(dev, "device does not reserved memory regions (%d)\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret);
+ return ret;
+ }
+ ret = devm_add_action_or_reset(dev, k3_m4_rproc_dev_mem_release, dev);
+ if (ret)
+ return ret;
+
+ num_rmems--;
+ kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem)
+ return -ENOMEM;
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np)
+ return -EINVAL;
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ if (!rmem) {
+ of_node_put(rmem_np);
+ return -EINVAL;
+ }
+ of_node_put(rmem_np);
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /* 64-bit address regions currently not supported */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+}
+
+static void k3_m4_release_tsp(void *data)
+{
+ struct ti_sci_proc *tsp = data;
+
+ ti_sci_proc_release(tsp);
+}
+
+/*
+ * Power up the M4 remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met. This callback is invoked only in remoteproc mode.
+ */
+static int k3_m4_rproc_start(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = k3_m4_rproc_ping_mbox(kproc);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Stop the M4 remote processor.
+ *
+ * This function puts the M4 processor into reset, and finishes processing
+ * of any pending messages. This callback is invoked only in remoteproc mode.
+ */
+static int k3_m4_rproc_stop(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = reset_control_assert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Attach to a running M4 remote processor (IPC-only mode)
+ *
+ * The remote processor is already booted, so there is no need to issue any
+ * TI-SCI commands to boot the M4 core. This callback is used only in IPC-only
+ * mode.
+ */
+static int k3_m4_rproc_attach(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ int ret;
+
+ ret = k3_m4_rproc_ping_mbox(kproc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Detach from a running M4 remote processor (IPC-only mode)
+ *
+ * This rproc detach callback performs the opposite operation to attach
+ * callback, the M4 core is not stopped and will be left to continue to
+ * run its booted firmware. This callback is invoked only in IPC-only mode.
+ */
+static int k3_m4_rproc_detach(struct rproc *rproc)
+{
+ return 0;
+}
+
+static const struct rproc_ops k3_m4_rproc_ops = {
+ .prepare = k3_m4_rproc_prepare,
+ .unprepare = k3_m4_rproc_unprepare,
+ .start = k3_m4_rproc_start,
+ .stop = k3_m4_rproc_stop,
+ .attach = k3_m4_rproc_attach,
+ .detach = k3_m4_rproc_detach,
+ .kick = k3_m4_rproc_kick,
+ .da_to_va = k3_m4_rproc_da_to_va,
+ .get_loaded_rsc_table = k3_m4_get_loaded_rsc_table,
+};
+
+static int k3_m4_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct k3_m4_rproc *kproc;
+ struct rproc *rproc;
+ const char *fw_name;
+ bool r_state = false;
+ bool p_state = false;
+ int ret;
+
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
+
+ rproc = devm_rproc_alloc(dev, dev_name(dev), &k3_m4_rproc_ops, fw_name,
+ sizeof(*kproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ rproc->recovery_disabled = true;
+ kproc = rproc->priv;
+ kproc->dev = dev;
+ platform_set_drvdata(pdev, rproc);
+
+ kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+ if (IS_ERR(kproc->ti_sci))
+ return dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
+ "failed to get ti-sci handle\n");
+
+ ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &kproc->ti_sci_id);
+ if (ret)
+ return dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n");
+
+ kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(kproc->reset))
+ return dev_err_probe(dev, PTR_ERR(kproc->reset), "failed to get reset\n");
+
+ kproc->tsp = ti_sci_proc_of_get_tsp(dev, kproc->ti_sci);
+ if (IS_ERR(kproc->tsp))
+ return dev_err_probe(dev, PTR_ERR(kproc->tsp),
+ "failed to construct ti-sci proc control\n");
+
+ ret = ti_sci_proc_request(kproc->tsp);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
+ ret = devm_add_action_or_reset(dev, k3_m4_release_tsp, kproc->tsp);
+ if (ret)
+ return ret;
+
+ ret = k3_m4_rproc_of_get_memories(pdev, kproc);
+ if (ret)
+ return ret;
+
+ ret = k3_m4_reserved_mem_init(kproc);
+ if (ret)
+ return dev_err_probe(dev, ret, "reserved memory init failed\n");
+
+ ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
+ &r_state, &p_state);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get initial state, mode cannot be determined\n");
+
+ /* configure devices for either remoteproc or IPC-only mode */
+ if (p_state) {
+ rproc->state = RPROC_DETACHED;
+ dev_info(dev, "configured M4F for IPC-only mode\n");
+ } else {
+ dev_info(dev, "configured M4F for remoteproc mode\n");
+ }
+
+ kproc->client.dev = dev;
+ kproc->client.tx_done = NULL;
+ kproc->client.rx_callback = k3_m4_rproc_mbox_callback;
+ kproc->client.tx_block = false;
+ kproc->client.knows_txdone = false;
+ kproc->mbox = mbox_request_channel(&kproc->client, 0);
+ if (IS_ERR(kproc->mbox))
+ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
+ "mbox_request_channel failed\n");
+
+ ret = devm_rproc_add(dev, rproc);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to register device with remoteproc core\n");
+
+ return 0;
+}
+
+static const struct of_device_id k3_m4_of_match[] = {
+ { .compatible = "ti,am64-m4fss", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, k3_m4_of_match);
+
+static struct platform_driver k3_m4_rproc_driver = {
+ .probe = k3_m4_rproc_probe,
+ .driver = {
+ .name = "k3-m4-rproc",
+ .of_match_table = k3_m4_of_match,
+ },
+};
+module_platform_driver(k3_m4_rproc_driver);
+
+MODULE_AUTHOR("Hari Nagalla <hnagalla@ti.com>");
+MODULE_DESCRIPTION("TI K3 M4 Remoteproc driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 39a47540c590..747ee467da88 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -194,6 +194,10 @@ static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
const char *name = kproc->rproc->name;
u32 msg = omap_mbox_message(data);
+ /* Do not forward message from a detached core */
+ if (kproc->rproc->state == RPROC_DETACHED)
+ return;
+
dev_dbg(dev, "mbox msg: 0x%x\n", msg);
switch (msg) {
@@ -229,6 +233,10 @@ static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
mbox_msg_t msg = (mbox_msg_t)vqid;
int ret;
+ /* Do not forward message to a detached core */
+ if (kproc->rproc->state == RPROC_DETACHED)
+ return;
+
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(kproc->mbox, (void *)msg);
if (ret < 0)
@@ -399,12 +407,9 @@ static int k3_r5_rproc_request_mbox(struct rproc *rproc)
client->knows_txdone = false;
kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox)) {
- ret = -EBUSY;
- dev_err(dev, "mbox_request_channel failed: %ld\n",
- PTR_ERR(kproc->mbox));
- return ret;
- }
+ if (IS_ERR(kproc->mbox))
+ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
+ "mbox_request_channel failed\n");
/*
* Ping the remote processor, this is only for sanity-sake for now;
@@ -464,8 +469,6 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
ret);
return ret;
}
- core->released_from_reset = true;
- wake_up_interruptible(&cluster->core_transition);
/*
* Newer IP revisions like on J7200 SoCs support h/w auto-initialization
@@ -552,10 +555,6 @@ static int k3_r5_rproc_start(struct rproc *rproc)
u32 boot_addr;
int ret;
- ret = k3_r5_rproc_request_mbox(rproc);
- if (ret)
- return ret;
-
boot_addr = rproc->bootaddr;
/* TODO: add boot_addr sanity checking */
dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
@@ -564,7 +563,7 @@ static int k3_r5_rproc_start(struct rproc *rproc)
core = kproc->core;
ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
if (ret)
- goto put_mbox;
+ return ret;
/* unhalt/run all applicable cores */
if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
@@ -580,13 +579,15 @@ static int k3_r5_rproc_start(struct rproc *rproc)
if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
dev_err(dev, "%s: can not start core 1 before core 0\n",
__func__);
- ret = -EPERM;
- goto put_mbox;
+ return -EPERM;
}
ret = k3_r5_core_run(core);
if (ret)
- goto put_mbox;
+ return ret;
+
+ core->released_from_reset = true;
+ wake_up_interruptible(&cluster->core_transition);
}
return 0;
@@ -596,8 +597,6 @@ unroll_core_run:
if (k3_r5_core_halt(core))
dev_warn(core->dev, "core halt back failed\n");
}
-put_mbox:
- mbox_free_channel(kproc->mbox);
return ret;
}
@@ -658,8 +657,6 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
goto out;
}
- mbox_free_channel(kproc->mbox);
-
return 0;
unroll_core_halt:
@@ -674,42 +671,22 @@ out:
/*
* Attach to a running R5F remote processor (IPC-only mode)
*
- * The R5F attach callback only needs to request the mailbox, the remote
- * processor is already booted, so there is no need to issue any TI-SCI
- * commands to boot the R5F cores in IPC-only mode. This callback is invoked
- * only in IPC-only mode.
+ * The R5F attach callback is a NOP. The remote processor is already booted, and
+ * all required resources have been acquired during probe routine, so there is
+ * no need to issue any TI-SCI commands to boot the R5F cores in IPC-only mode.
+ * This callback is invoked only in IPC-only mode and exists because
+ * rproc_validate() checks for its existence.
*/
-static int k3_r5_rproc_attach(struct rproc *rproc)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = k3_r5_rproc_request_mbox(rproc);
- if (ret)
- return ret;
-
- dev_info(dev, "R5F core initialized in IPC-only mode\n");
- return 0;
-}
+static int k3_r5_rproc_attach(struct rproc *rproc) { return 0; }
/*
* Detach from a running R5F remote processor (IPC-only mode)
*
- * The R5F detach callback performs the opposite operation to attach callback
- * and only needs to release the mailbox, the R5F cores are not stopped and
- * will be left in booted state in IPC-only mode. This callback is invoked
- * only in IPC-only mode.
+ * The R5F detach callback is a NOP. The R5F cores are not stopped and will be
+ * left in booted state in IPC-only mode. This callback is invoked only in
+ * IPC-only mode and exists for sanity sake.
*/
-static int k3_r5_rproc_detach(struct rproc *rproc)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- mbox_free_channel(kproc->mbox);
- dev_info(dev, "R5F core deinitialized in IPC-only mode\n");
- return 0;
-}
+static int k3_r5_rproc_detach(struct rproc *rproc) { return 0; }
/*
* This function implements the .get_loaded_rsc_table() callback and is used
@@ -1259,8 +1236,8 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
goto out;
}
- rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
- fw_name, sizeof(*kproc));
+ rproc = devm_rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
+ fw_name, sizeof(*kproc));
if (!rproc) {
ret = -ENOMEM;
goto out;
@@ -1278,9 +1255,13 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
kproc->rproc = rproc;
core->rproc = rproc;
+ ret = k3_r5_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
ret = k3_r5_rproc_configure_mode(kproc);
if (ret < 0)
- goto err_config;
+ goto out;
if (ret)
goto init_rmem;
@@ -1288,7 +1269,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
if (ret) {
dev_err(dev, "initial configure failed, ret = %d\n",
ret);
- goto err_config;
+ goto out;
}
init_rmem:
@@ -1298,7 +1279,7 @@ init_rmem:
if (ret) {
dev_err(dev, "reserved memory init failed, ret = %d\n",
ret);
- goto err_config;
+ goto out;
}
ret = rproc_add(rproc);
@@ -1332,7 +1313,7 @@ init_rmem:
dev_err(dev,
"Timed out waiting for %s core to power up!\n",
rproc->name);
- return ret;
+ goto err_powerup;
}
}
@@ -1348,12 +1329,10 @@ err_split:
}
}
+err_powerup:
rproc_del(rproc);
err_add:
k3_r5_reserved_mem_exit(kproc);
-err_config:
- rproc_free(rproc);
- core->rproc = NULL;
out:
/* undo core0 upon any failures on core1 in split-mode */
if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
@@ -1395,12 +1374,11 @@ static void k3_r5_cluster_rproc_exit(void *data)
}
}
+ mbox_free_channel(kproc->mbox);
+
rproc_del(rproc);
k3_r5_reserved_mem_exit(kproc);
-
- rproc_free(rproc);
- core->rproc = NULL;
}
}
@@ -1533,32 +1511,6 @@ static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
return 0;
}
-static
-struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
- const struct ti_sci_handle *sci)
-{
- struct ti_sci_proc *tsp;
- u32 temp[2];
- int ret;
-
- ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
- temp, 2);
- if (ret < 0)
- return ERR_PTR(ret);
-
- tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
- if (!tsp)
- return ERR_PTR(-ENOMEM);
-
- tsp->dev = dev;
- tsp->sci = sci;
- tsp->ops = &sci->ops.proc_ops;
- tsp->proc_id = temp[0];
- tsp->host_id = temp[1];
-
- return tsp;
-}
-
static int k3_r5_core_of_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1633,7 +1585,7 @@ static int k3_r5_core_of_init(struct platform_device *pdev)
goto err;
}
- core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
+ core->tsp = ti_sci_proc_of_get_tsp(dev, core->ti_sci);
if (IS_ERR(core->tsp)) {
ret = PTR_ERR(core->tsp);
dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
diff --git a/drivers/remoteproc/ti_sci_proc.h b/drivers/remoteproc/ti_sci_proc.h
index 778558abcdcc..f3911ce75252 100644
--- a/drivers/remoteproc/ti_sci_proc.h
+++ b/drivers/remoteproc/ti_sci_proc.h
@@ -28,6 +28,32 @@ struct ti_sci_proc {
u8 host_id;
};
+static inline
+struct ti_sci_proc *ti_sci_proc_of_get_tsp(struct device *dev,
+ const struct ti_sci_handle *sci)
+{
+ struct ti_sci_proc *tsp;
+ u32 temp[2];
+ int ret;
+
+ ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
+ temp, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
+ if (!tsp)
+ return ERR_PTR(-ENOMEM);
+
+ tsp->dev = dev;
+ tsp->sci = sci;
+ tsp->ops = &sci->ops.proc_ops;
+ tsp->proc_id = temp[0];
+ tsp->host_id = temp[1];
+
+ return tsp;
+}
+
static inline int ti_sci_proc_request(struct ti_sci_proc *tsp)
{
int ret;
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 596f3ffb8935..5aeedeaf3c41 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -57,6 +57,17 @@ struct mem_bank_data {
};
/**
+ * struct zynqmp_sram_bank - sram bank description
+ *
+ * @sram_res: sram address region information
+ * @da: device address of sram
+ */
+struct zynqmp_sram_bank {
+ struct resource sram_res;
+ u32 da;
+};
+
+/**
* struct mbox_info
*
* @rx_mc_buf: to copy data from mailbox rx channel
@@ -120,6 +131,8 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
* struct zynqmp_r5_core
*
* @rsc_tbl_va: resource table virtual address
+ * @sram: Array of sram memories assigned to this core
+ * @num_sram: number of sram for this core
* @dev: device of RPU instance
* @np: device node of RPU instance
* @tcm_bank_count: number TCM banks accessible to this RPU
@@ -131,6 +144,8 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
*/
struct zynqmp_r5_core {
void __iomem *rsc_tbl_va;
+ struct zynqmp_sram_bank *sram;
+ int num_sram;
struct device *dev;
struct device_node *np;
int tcm_bank_count;
@@ -494,6 +509,45 @@ static int add_mem_regions_carveout(struct rproc *rproc)
return 0;
}
+static int add_sram_carveouts(struct rproc *rproc)
+{
+ struct zynqmp_r5_core *r5_core = rproc->priv;
+ struct rproc_mem_entry *rproc_mem;
+ struct zynqmp_sram_bank *sram;
+ dma_addr_t dma_addr;
+ size_t len;
+ int da, i;
+
+ for (i = 0; i < r5_core->num_sram; i++) {
+ sram = &r5_core->sram[i];
+
+ dma_addr = (dma_addr_t)sram->sram_res.start;
+
+ len = resource_size(&sram->sram_res);
+ da = sram->da;
+
+ rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
+ dma_addr,
+ len, da,
+ zynqmp_r5_mem_region_map,
+ zynqmp_r5_mem_region_unmap,
+ sram->sram_res.name);
+ if (!rproc_mem) {
+ dev_err(&rproc->dev, "failed to add sram %s da=0x%x, size=0x%lx",
+ sram->sram_res.name, da, len);
+ return -ENOMEM;
+ }
+
+ rproc_add_carveout(rproc, rproc_mem);
+ rproc_coredump_add_segment(rproc, da, len);
+
+ dev_dbg(&rproc->dev, "sram carveout %s addr=%llx, da=0x%x, size=0x%lx",
+ sram->sram_res.name, dma_addr, da, len);
+ }
+
+ return 0;
+}
+
/*
* tcm_mem_unmap()
* @rproc: single R5 core's corresponding rproc instance
@@ -669,6 +723,12 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
return ret;
}
+ ret = add_sram_carveouts(rproc);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to get sram carveout %d\n", ret);
+ return ret;
+ }
+
return 0;
}
@@ -881,6 +941,77 @@ free_rproc:
return ERR_PTR(ret);
}
+static int zynqmp_r5_get_sram_banks(struct zynqmp_r5_core *r5_core)
+{
+ struct device_node *np = r5_core->np;
+ struct device *dev = r5_core->dev;
+ struct zynqmp_sram_bank *sram;
+ struct device_node *sram_np;
+ int num_sram, i, ret;
+ u64 abs_addr, size;
+
+ /* "sram" is optional property. Do not fail, if unavailable. */
+ if (!of_property_present(r5_core->np, "sram"))
+ return 0;
+
+ num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
+ if (num_sram <= 0) {
+ dev_err(dev, "Invalid sram property, ret = %d\n",
+ num_sram);
+ return -EINVAL;
+ }
+
+ sram = devm_kcalloc(dev, num_sram,
+ sizeof(struct zynqmp_sram_bank), GFP_KERNEL);
+ if (!sram)
+ return -ENOMEM;
+
+ for (i = 0; i < num_sram; i++) {
+ sram_np = of_parse_phandle(np, "sram", i);
+ if (!sram_np) {
+ dev_err(dev, "failed to get sram %d phandle\n", i);
+ return -EINVAL;
+ }
+
+ if (!of_device_is_available(sram_np)) {
+ dev_err(dev, "sram device not available\n");
+ ret = -EINVAL;
+ goto fail_sram_get;
+ }
+
+ ret = of_address_to_resource(sram_np, 0, &sram[i].sram_res);
+ if (ret) {
+ dev_err(dev, "addr to res failed\n");
+ goto fail_sram_get;
+ }
+
+ /* Get SRAM device address */
+ ret = of_property_read_reg(sram_np, i, &abs_addr, &size);
+ if (ret) {
+ dev_err(dev, "failed to get reg property\n");
+ goto fail_sram_get;
+ }
+
+ sram[i].da = (u32)abs_addr;
+
+ of_node_put(sram_np);
+
+ dev_dbg(dev, "sram %d: name=%s, addr=0x%llx, da=0x%x, size=0x%llx\n",
+ i, sram[i].sram_res.name, sram[i].sram_res.start,
+ sram[i].da, resource_size(&sram[i].sram_res));
+ }
+
+ r5_core->sram = sram;
+ r5_core->num_sram = num_sram;
+
+ return 0;
+
+fail_sram_get:
+ of_node_put(sram_np);
+
+ return ret;
+}
+
static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
{
int i, j, tcm_bank_count, ret, tcm_pd_idx, pd_count;
@@ -1059,7 +1190,7 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
r5_core = cluster->r5_cores[0];
/* Maintain backward compatibility for zynqmp by using hardcode TCM address. */
- if (of_find_property(r5_core->np, "reg", NULL))
+ if (of_property_present(r5_core->np, "reg"))
ret = zynqmp_r5_get_tcm_node_from_dt(cluster);
else if (device_is_compatible(dev, "xlnx,zynqmp-r5fss"))
ret = zynqmp_r5_get_tcm_node(cluster);
@@ -1086,7 +1217,7 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
return ret;
}
- if (of_find_property(dev_of_node(dev), "xlnx,tcm-mode", NULL) ||
+ if (of_property_present(dev_of_node(dev), "xlnx,tcm-mode") ||
device_is_compatible(dev, "xlnx,zynqmp-r5fss")) {
ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id,
tcm_mode);
@@ -1095,6 +1226,10 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
return ret;
}
}
+
+ ret = zynqmp_r5_get_sram_banks(r5_core);
+ if (ret)
+ return ret;
}
return 0;
@@ -1147,7 +1282,7 @@ static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster)
return -EINVAL;
}
- if (of_find_property(dev_node, "xlnx,tcm-mode", NULL)) {
+ if (of_property_present(dev_node, "xlnx,tcm-mode")) {
ret = of_property_read_u32(dev_node, "xlnx,tcm-mode", (u32 *)&tcm_mode);
if (ret)
return ret;
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 67bce340a87e..5484a65f66b9 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -66,6 +66,19 @@ config RESET_BRCMSTB_RESCAL
This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on
BCM7216.
+config RESET_EYEQ
+ bool "Mobileye EyeQ reset controller"
+ depends on MACH_EYEQ5 || MACH_EYEQ6H || COMPILE_TEST
+ select AUXILIARY_BUS
+ default MACH_EYEQ5 || MACH_EYEQ6H
+ help
+ This enables the Mobileye EyeQ reset controller, used in EyeQ5, EyeQ6L
+ and EyeQ6H SoCs.
+
+ It has one or more domains, with a varying number of resets in each.
+ Registers are located in a shared register region called OLB. EyeQ6H
+ has multiple reset instances.
+
config RESET_GPIO
tristate "GPIO reset controller"
depends on GPIOLIB
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 27b0bbdfcc04..4411a2a124d7 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_RESET_BCM6345) += reset-bcm6345.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o
obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o
+obj-$(CONFIG_RESET_EYEQ) += reset-eyeq.o
obj-$(CONFIG_RESET_GPIO) += reset-gpio.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index dba74e857be6..4d509d41456a 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -812,6 +812,7 @@ __reset_control_get_internal(struct reset_controller_dev *rcdev,
kref_init(&rstc->refcnt);
rstc->acquired = acquired;
rstc->shared = shared;
+ get_device(rcdev->dev);
return rstc;
}
@@ -826,6 +827,7 @@ static void __reset_control_release(struct kref *kref)
module_put(rstc->rcdev->owner);
list_del(&rstc->list);
+ put_device(rstc->rcdev->dev);
kfree(rstc);
}
@@ -916,20 +918,18 @@ static int __reset_add_reset_gpio_device(const struct of_phandle_args *args)
*/
lockdep_assert_not_held(&reset_list_mutex);
- mutex_lock(&reset_gpio_lookup_mutex);
+ guard(mutex)(&reset_gpio_lookup_mutex);
list_for_each_entry(rgpio_dev, &reset_gpio_lookup_list, list) {
if (args->np == rgpio_dev->of_args.np) {
if (of_phandle_args_equal(args, &rgpio_dev->of_args))
- goto out; /* Already on the list, done */
+ return 0; /* Already on the list, done */
}
}
id = ida_alloc(&reset_gpio_ida, GFP_KERNEL);
- if (id < 0) {
- ret = id;
- goto err_unlock;
- }
+ if (id < 0)
+ return id;
/* Not freed on success, because it is persisent subsystem data. */
rgpio_dev = kzalloc(sizeof(*rgpio_dev), GFP_KERNEL);
@@ -959,9 +959,6 @@ static int __reset_add_reset_gpio_device(const struct of_phandle_args *args)
list_add(&rgpio_dev->list, &reset_gpio_lookup_list);
-out:
- mutex_unlock(&reset_gpio_lookup_mutex);
-
return 0;
err_put:
@@ -970,8 +967,6 @@ err_kfree:
kfree(rgpio_dev);
err_ida_free:
ida_free(&reset_gpio_ida, id);
-err_unlock:
- mutex_unlock(&reset_gpio_lookup_mutex);
return ret;
}
diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c
index 2537ec05ecee..578fe867080c 100644
--- a/drivers/reset/reset-berlin.c
+++ b/drivers/reset/reset-berlin.c
@@ -68,13 +68,14 @@ static int berlin_reset_xlate(struct reset_controller_dev *rcdev,
static int berlin2_reset_probe(struct platform_device *pdev)
{
- struct device_node *parent_np = of_get_parent(pdev->dev.of_node);
+ struct device_node *parent_np;
struct berlin_reset_priv *priv;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ parent_np = of_get_parent(pdev->dev.of_node);
priv->regmap = syscon_node_to_regmap(parent_np);
of_node_put(parent_np);
if (IS_ERR(priv->regmap))
diff --git a/drivers/reset/reset-eyeq.c b/drivers/reset/reset-eyeq.c
new file mode 100644
index 000000000000..02d50041048b
--- /dev/null
+++ b/drivers/reset/reset-eyeq.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Reset driver for the Mobileye EyeQ5, EyeQ6L and EyeQ6H platforms.
+ *
+ * Controllers live in a shared register region called OLB. EyeQ5 and EyeQ6L
+ * have a single OLB instance for a single reset controller. EyeQ6H has seven
+ * OLB instances; three host reset controllers.
+ *
+ * Each reset controller has one or more domain. Domains are of a given type
+ * (see enum eqr_domain_type), with a valid offset mask (up to 32 resets per
+ * domain).
+ *
+ * Domain types define expected behavior: one-register-per-reset,
+ * one-bit-per-reset, status detection method, busywait duration, etc.
+ *
+ * We use eqr_ as prefix, as-in "EyeQ Reset", but way shorter.
+ *
+ * Known resets in EyeQ5 domain 0 (type EQR_EYEQ5_SARCR):
+ * 3. CAN0 4. CAN1 5. CAN2 6. SPI0
+ * 7. SPI1 8. SPI2 9. SPI3 10. UART0
+ * 11. UART1 12. UART2 13. I2C0 14. I2C1
+ * 15. I2C2 16. I2C3 17. I2C4 18. TIMER0
+ * 19. TIMER1 20. TIMER2 21. TIMER3 22. TIMER4
+ * 23. WD0 24. EXT0 25. EXT1 26. GPIO
+ * 27. WD1
+ *
+ * Known resets in EyeQ5 domain 1 (type EQR_EYEQ5_ACRP):
+ * 0. VMP0 1. VMP1 2. VMP2 3. VMP3
+ * 4. PMA0 5. PMA1 6. PMAC0 7. PMAC1
+ * 8. MPC0 9. MPC1 10. MPC2 11. MPC3
+ * 12. MPC4
+ *
+ * Known resets in EyeQ5 domain 2 (type EQR_EYEQ5_PCIE):
+ * 0. PCIE0_CORE 1. PCIE0_APB 2. PCIE0_LINK_AXI 3. PCIE0_LINK_MGMT
+ * 4. PCIE0_LINK_HOT 5. PCIE0_LINK_PIPE 6. PCIE1_CORE 7. PCIE1_APB
+ * 8. PCIE1_LINK_AXI 9. PCIE1_LINK_MGMT 10. PCIE1_LINK_HOT 11. PCIE1_LINK_PIPE
+ * 12. MULTIPHY 13. MULTIPHY_APB 15. PCIE0_LINK_MGMT 16. PCIE1_LINK_MGMT
+ * 17. PCIE0_LINK_PM 18. PCIE1_LINK_PM
+ *
+ * Known resets in EyeQ6L domain 0 (type EQR_EYEQ5_SARCR):
+ * 0. SPI0 1. SPI1 2. UART0 3. I2C0
+ * 4. I2C1 5. TIMER0 6. TIMER1 7. TIMER2
+ * 8. TIMER3 9. WD0 10. WD1 11. EXT0
+ * 12. EXT1 13. GPIO
+ *
+ * Known resets in EyeQ6L domain 1 (type EQR_EYEQ5_ACRP):
+ * 0. VMP0 1. VMP1 2. VMP2 3. VMP3
+ * 4. PMA0 5. PMA1 6. PMAC0 7. PMAC1
+ * 8. MPC0 9. MPC1 10. MPC2 11. MPC3
+ * 12. MPC4
+ *
+ * Known resets in EyeQ6H west/east (type EQR_EYEQ6H_SARCR):
+ * 0. CAN 1. SPI0 2. SPI1 3. UART0
+ * 4. UART1 5. I2C0 6. I2C1 7. -hole-
+ * 8. TIMER0 9. TIMER1 10. WD 11. EXT TIMER
+ * 12. GPIO
+ *
+ * Known resets in EyeQ6H acc (type EQR_EYEQ5_ACRP):
+ * 1. XNN0 2. XNN1 3. XNN2 4. XNN3
+ * 5. VMP0 6. VMP1 7. VMP2 8. VMP3
+ * 9. PMA0 10. PMA1 11. MPC0 12. MPC1
+ * 13. MPC2 14. MPC3 15. PERIPH
+ *
+ * Abbreviations:
+ * - PMA: Programmable Macro Array
+ * - MPC: Multi-threaded Processing Clusters
+ * - VMP: Vector Microcode Processors
+ *
+ * Copyright (C) 2024 Mobileye Vision Technologies Ltd.
+ */
+
+#include <linux/array_size.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/lockdep.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/*
+ * A reset ID, as returned by eqr_of_xlate_*(), is a (domain, offset) pair.
+ * Low byte is domain, rest is offset.
+ */
+#define ID_DOMAIN_MASK GENMASK(7, 0)
+#define ID_OFFSET_MASK GENMASK(31, 8)
+
+enum eqr_domain_type {
+ EQR_EYEQ5_SARCR,
+ EQR_EYEQ5_ACRP,
+ EQR_EYEQ5_PCIE,
+ EQR_EYEQ6H_SARCR,
+};
+
+/*
+ * Domain type EQR_EYEQ5_SARCR register offsets.
+ */
+#define EQR_EYEQ5_SARCR_REQUEST (0x000)
+#define EQR_EYEQ5_SARCR_STATUS (0x004)
+
+/*
+ * Domain type EQR_EYEQ5_ACRP register masks.
+ * Registers are: base + 4 * offset.
+ */
+#define EQR_EYEQ5_ACRP_PD_REQ BIT(0)
+#define EQR_EYEQ5_ACRP_ST_POWER_DOWN BIT(27)
+#define EQR_EYEQ5_ACRP_ST_ACTIVE BIT(29)
+
+/*
+ * Domain type EQR_EYEQ6H_SARCR register offsets.
+ */
+#define EQR_EYEQ6H_SARCR_RST_REQUEST (0x000)
+#define EQR_EYEQ6H_SARCR_CLK_STATUS (0x004)
+#define EQR_EYEQ6H_SARCR_RST_STATUS (0x008)
+#define EQR_EYEQ6H_SARCR_CLK_REQUEST (0x00C)
+
+struct eqr_busy_wait_timings {
+ unsigned long sleep_us;
+ unsigned long timeout_us;
+};
+
+static const struct eqr_busy_wait_timings eqr_timings[] = {
+ [EQR_EYEQ5_SARCR] = {1, 10},
+ [EQR_EYEQ5_ACRP] = {1, 40 * USEC_PER_MSEC}, /* LBIST implies long timeout. */
+ /* EQR_EYEQ5_PCIE does no busy waiting. */
+ [EQR_EYEQ6H_SARCR] = {1, 400},
+};
+
+#define EQR_MAX_DOMAIN_COUNT 3
+
+struct eqr_domain_descriptor {
+ enum eqr_domain_type type;
+ u32 valid_mask;
+ unsigned int offset;
+};
+
+struct eqr_match_data {
+ unsigned int domain_count;
+ const struct eqr_domain_descriptor *domains;
+};
+
+struct eqr_private {
+ /*
+ * One mutex per domain for read-modify-write operations on registers.
+ * Some domains can be involved in LBIST which implies long critical
+ * sections; we wouldn't want other domains to be impacted by that.
+ */
+ struct mutex mutexes[EQR_MAX_DOMAIN_COUNT];
+ void __iomem *base;
+ const struct eqr_match_data *data;
+ struct reset_controller_dev rcdev;
+};
+
+static inline struct eqr_private *eqr_rcdev_to_priv(struct reset_controller_dev *x)
+{
+ return container_of(x, struct eqr_private, rcdev);
+}
+
+static u32 eqr_double_readl(void __iomem *addr_a, void __iomem *addr_b,
+ u32 *dest_a, u32 *dest_b)
+{
+ *dest_a = readl(addr_a);
+ *dest_b = readl(addr_b);
+ return 0; /* read_poll_timeout() op argument must return something. */
+}
+
+static int eqr_busy_wait_locked(struct eqr_private *priv, struct device *dev,
+ u32 domain, u32 offset, bool assert)
+{
+ void __iomem *base = priv->base + priv->data->domains[domain].offset;
+ enum eqr_domain_type domain_type = priv->data->domains[domain].type;
+ unsigned long timeout_us = eqr_timings[domain_type].timeout_us;
+ unsigned long sleep_us = eqr_timings[domain_type].sleep_us;
+ u32 val, mask, rst_status, clk_status;
+ void __iomem *reg;
+ int ret;
+
+ lockdep_assert_held(&priv->mutexes[domain]);
+
+ switch (domain_type) {
+ case EQR_EYEQ5_SARCR:
+ reg = base + EQR_EYEQ5_SARCR_STATUS;
+ mask = BIT(offset);
+
+ ret = readl_poll_timeout(reg, val, !(val & mask) == assert,
+ sleep_us, timeout_us);
+ break;
+
+ case EQR_EYEQ5_ACRP:
+ reg = base + 4 * offset;
+ if (assert)
+ mask = EQR_EYEQ5_ACRP_ST_POWER_DOWN;
+ else
+ mask = EQR_EYEQ5_ACRP_ST_ACTIVE;
+
+ ret = readl_poll_timeout(reg, val, !!(val & mask),
+ sleep_us, timeout_us);
+ break;
+
+ case EQR_EYEQ5_PCIE:
+ ret = 0; /* No busy waiting. */
+ break;
+
+ case EQR_EYEQ6H_SARCR:
+ /*
+ * Wait until both bits change:
+ * readl(base + EQR_EYEQ6H_SARCR_RST_STATUS) & BIT(offset)
+ * readl(base + EQR_EYEQ6H_SARCR_CLK_STATUS) & BIT(offset)
+ */
+ mask = BIT(offset);
+ ret = read_poll_timeout(eqr_double_readl, val,
+ (!(rst_status & mask) == assert) &&
+ (!(clk_status & mask) == assert),
+ sleep_us, timeout_us, false,
+ base + EQR_EYEQ6H_SARCR_RST_STATUS,
+ base + EQR_EYEQ6H_SARCR_CLK_STATUS,
+ &rst_status, &clk_status);
+ break;
+
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret == -ETIMEDOUT)
+ dev_dbg(dev, "%u-%u: timeout\n", domain, offset);
+ return ret;
+}
+
+static void eqr_assert_locked(struct eqr_private *priv, u32 domain, u32 offset)
+{
+ enum eqr_domain_type domain_type = priv->data->domains[domain].type;
+ void __iomem *base, *reg;
+ u32 val;
+
+ lockdep_assert_held(&priv->mutexes[domain]);
+
+ base = priv->base + priv->data->domains[domain].offset;
+
+ switch (domain_type) {
+ case EQR_EYEQ5_SARCR:
+ reg = base + EQR_EYEQ5_SARCR_REQUEST;
+ writel(readl(reg) & ~BIT(offset), reg);
+ break;
+
+ case EQR_EYEQ5_ACRP:
+ reg = base + 4 * offset;
+ writel(readl(reg) | EQR_EYEQ5_ACRP_PD_REQ, reg);
+ break;
+
+ case EQR_EYEQ5_PCIE:
+ writel(readl(base) & ~BIT(offset), base);
+ break;
+
+ case EQR_EYEQ6H_SARCR:
+ /* RST_REQUEST and CLK_REQUEST must be kept in sync. */
+ val = readl(base + EQR_EYEQ6H_SARCR_RST_REQUEST);
+ val &= ~BIT(offset);
+ writel(val, base + EQR_EYEQ6H_SARCR_RST_REQUEST);
+ writel(val, base + EQR_EYEQ6H_SARCR_CLK_REQUEST);
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int eqr_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
+ u32 domain = FIELD_GET(ID_DOMAIN_MASK, id);
+ u32 offset = FIELD_GET(ID_OFFSET_MASK, id);
+
+ dev_dbg(rcdev->dev, "%u-%u: assert request\n", domain, offset);
+
+ guard(mutex)(&priv->mutexes[domain]);
+
+ eqr_assert_locked(priv, domain, offset);
+ return eqr_busy_wait_locked(priv, rcdev->dev, domain, offset, true);
+}
+
+static void eqr_deassert_locked(struct eqr_private *priv, u32 domain,
+ u32 offset)
+{
+ enum eqr_domain_type domain_type = priv->data->domains[domain].type;
+ void __iomem *base, *reg;
+ u32 val;
+
+ lockdep_assert_held(&priv->mutexes[domain]);
+
+ base = priv->base + priv->data->domains[domain].offset;
+
+ switch (domain_type) {
+ case EQR_EYEQ5_SARCR:
+ reg = base + EQR_EYEQ5_SARCR_REQUEST;
+ writel(readl(reg) | BIT(offset), reg);
+ break;
+
+ case EQR_EYEQ5_ACRP:
+ reg = base + 4 * offset;
+ writel(readl(reg) & ~EQR_EYEQ5_ACRP_PD_REQ, reg);
+ break;
+
+ case EQR_EYEQ5_PCIE:
+ writel(readl(base) | BIT(offset), base);
+ break;
+
+ case EQR_EYEQ6H_SARCR:
+ /* RST_REQUEST and CLK_REQUEST must be kept in sync. */
+ val = readl(base + EQR_EYEQ6H_SARCR_RST_REQUEST);
+ val |= BIT(offset);
+ writel(val, base + EQR_EYEQ6H_SARCR_RST_REQUEST);
+ writel(val, base + EQR_EYEQ6H_SARCR_CLK_REQUEST);
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int eqr_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
+ u32 domain = FIELD_GET(ID_DOMAIN_MASK, id);
+ u32 offset = FIELD_GET(ID_OFFSET_MASK, id);
+
+ dev_dbg(rcdev->dev, "%u-%u: deassert request\n", domain, offset);
+
+ guard(mutex)(&priv->mutexes[domain]);
+
+ eqr_deassert_locked(priv, domain, offset);
+ return eqr_busy_wait_locked(priv, rcdev->dev, domain, offset, false);
+}
+
+static int eqr_status(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ u32 domain = FIELD_GET(ID_DOMAIN_MASK, id);
+ u32 offset = FIELD_GET(ID_OFFSET_MASK, id);
+ struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
+ enum eqr_domain_type domain_type = priv->data->domains[domain].type;
+ void __iomem *base, *reg;
+
+ dev_dbg(rcdev->dev, "%u-%u: status request\n", domain, offset);
+
+ guard(mutex)(&priv->mutexes[domain]);
+
+ base = priv->base + priv->data->domains[domain].offset;
+
+ switch (domain_type) {
+ case EQR_EYEQ5_SARCR:
+ reg = base + EQR_EYEQ5_SARCR_STATUS;
+ return !(readl(reg) & BIT(offset));
+ case EQR_EYEQ5_ACRP:
+ reg = base + 4 * offset;
+ return !(readl(reg) & EQR_EYEQ5_ACRP_ST_ACTIVE);
+ case EQR_EYEQ5_PCIE:
+ return !(readl(base) & BIT(offset));
+ case EQR_EYEQ6H_SARCR:
+ reg = base + EQR_EYEQ6H_SARCR_RST_STATUS;
+ return !(readl(reg) & BIT(offset));
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct reset_control_ops eqr_ops = {
+ .assert = eqr_assert,
+ .deassert = eqr_deassert,
+ .status = eqr_status,
+};
+
+static int eqr_of_xlate_internal(struct reset_controller_dev *rcdev,
+ u32 domain, u32 offset)
+{
+ struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
+
+ if (domain >= priv->data->domain_count || offset > 31 ||
+ !(priv->data->domains[domain].valid_mask & BIT(offset))) {
+ dev_err(rcdev->dev, "%u-%u: invalid reset\n", domain, offset);
+ return -EINVAL;
+ }
+
+ return FIELD_PREP(ID_DOMAIN_MASK, domain) | FIELD_PREP(ID_OFFSET_MASK, offset);
+}
+
+static int eqr_of_xlate_onecell(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ return eqr_of_xlate_internal(rcdev, 0, reset_spec->args[0]);
+}
+
+static int eqr_of_xlate_twocells(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ return eqr_of_xlate_internal(rcdev, reset_spec->args[0], reset_spec->args[1]);
+}
+
+static int eqr_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ const struct of_device_id *match;
+ struct device *dev = &adev->dev;
+ struct eqr_private *priv;
+ unsigned int i;
+ int ret;
+
+ /*
+ * We are an auxiliary device of clk-eyeq. We do not have an OF node by
+ * default; let's reuse our parent's OF node.
+ */
+ WARN_ON(dev->of_node);
+ device_set_of_node_from_dev(dev, dev->parent);
+ if (!dev->of_node)
+ return -ENODEV;
+
+ /*
+ * Using our newfound OF node, we can get match data. We cannot use
+ * device_get_match_data() because it does not match reused OF nodes.
+ */
+ match = of_match_node(dev->driver->of_match_table, dev->of_node);
+ if (!match || !match->data)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->data = match->data;
+ priv->base = (void __iomem *)dev_get_platdata(dev);
+ priv->rcdev.ops = &eqr_ops;
+ priv->rcdev.owner = THIS_MODULE;
+ priv->rcdev.dev = dev;
+ priv->rcdev.of_node = dev->of_node;
+
+ if (priv->data->domain_count == 1) {
+ priv->rcdev.of_reset_n_cells = 1;
+ priv->rcdev.of_xlate = eqr_of_xlate_onecell;
+ } else {
+ priv->rcdev.of_reset_n_cells = 2;
+ priv->rcdev.of_xlate = eqr_of_xlate_twocells;
+ }
+
+ for (i = 0; i < priv->data->domain_count; i++)
+ mutex_init(&priv->mutexes[i]);
+
+ priv->rcdev.nr_resets = 0;
+ for (i = 0; i < priv->data->domain_count; i++)
+ priv->rcdev.nr_resets += hweight32(priv->data->domains[i].valid_mask);
+
+ ret = devm_reset_controller_register(dev, &priv->rcdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed registering reset controller\n");
+
+ return 0;
+}
+
+static const struct eqr_domain_descriptor eqr_eyeq5_domains[] = {
+ {
+ .type = EQR_EYEQ5_SARCR,
+ .valid_mask = 0xFFFFFF8,
+ .offset = 0x004,
+ },
+ {
+ .type = EQR_EYEQ5_ACRP,
+ .valid_mask = 0x0001FFF,
+ .offset = 0x200,
+ },
+ {
+ .type = EQR_EYEQ5_PCIE,
+ .valid_mask = 0x007BFFF,
+ .offset = 0x120,
+ },
+};
+
+static const struct eqr_match_data eqr_eyeq5_data = {
+ .domain_count = ARRAY_SIZE(eqr_eyeq5_domains),
+ .domains = eqr_eyeq5_domains,
+};
+
+static const struct eqr_domain_descriptor eqr_eyeq6l_domains[] = {
+ {
+ .type = EQR_EYEQ5_SARCR,
+ .valid_mask = 0x3FFF,
+ .offset = 0x004,
+ },
+ {
+ .type = EQR_EYEQ5_ACRP,
+ .valid_mask = 0x00FF,
+ .offset = 0x200,
+ },
+};
+
+static const struct eqr_match_data eqr_eyeq6l_data = {
+ .domain_count = ARRAY_SIZE(eqr_eyeq6l_domains),
+ .domains = eqr_eyeq6l_domains,
+};
+
+/* West and east OLBs each have an instance. */
+static const struct eqr_domain_descriptor eqr_eyeq6h_we_domains[] = {
+ {
+ .type = EQR_EYEQ6H_SARCR,
+ .valid_mask = 0x1F7F,
+ .offset = 0x004,
+ },
+};
+
+static const struct eqr_match_data eqr_eyeq6h_we_data = {
+ .domain_count = ARRAY_SIZE(eqr_eyeq6h_we_domains),
+ .domains = eqr_eyeq6h_we_domains,
+};
+
+static const struct eqr_domain_descriptor eqr_eyeq6h_acc_domains[] = {
+ {
+ .type = EQR_EYEQ5_ACRP,
+ .valid_mask = 0x7FFF,
+ .offset = 0x000,
+ },
+};
+
+static const struct eqr_match_data eqr_eyeq6h_acc_data = {
+ .domain_count = ARRAY_SIZE(eqr_eyeq6h_acc_domains),
+ .domains = eqr_eyeq6h_acc_domains,
+};
+
+/*
+ * Table describes OLB system-controller compatibles.
+ * It does not get used to match against devicetree node.
+ */
+static const struct of_device_id eqr_match_table[] = {
+ { .compatible = "mobileye,eyeq5-olb", .data = &eqr_eyeq5_data },
+ { .compatible = "mobileye,eyeq6l-olb", .data = &eqr_eyeq6l_data },
+ { .compatible = "mobileye,eyeq6h-west-olb", .data = &eqr_eyeq6h_we_data },
+ { .compatible = "mobileye,eyeq6h-east-olb", .data = &eqr_eyeq6h_we_data },
+ { .compatible = "mobileye,eyeq6h-acc-olb", .data = &eqr_eyeq6h_acc_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, eqr_match_table);
+
+static const struct auxiliary_device_id eqr_id_table[] = {
+ { .name = "clk_eyeq.reset" },
+ { .name = "clk_eyeq.reset_west" },
+ { .name = "clk_eyeq.reset_east" },
+ { .name = "clk_eyeq.reset_acc" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, eqr_id_table);
+
+static struct auxiliary_driver eqr_driver = {
+ .probe = eqr_probe,
+ .id_table = eqr_id_table,
+ .driver = {
+ .of_match_table = eqr_match_table,
+ }
+};
+module_auxiliary_driver(eqr_driver);
diff --git a/drivers/reset/reset-k210.c b/drivers/reset/reset-k210.c
index b62a2fd44e4e..e77e4cca377d 100644
--- a/drivers/reset/reset-k210.c
+++ b/drivers/reset/reset-k210.c
@@ -90,7 +90,7 @@ static const struct reset_control_ops k210_rst_ops = {
static int k210_rst_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *parent_np = of_get_parent(dev->of_node);
+ struct device_node *parent_np;
struct k210_rst *ksr;
dev_info(dev, "K210 reset controller\n");
@@ -99,6 +99,7 @@ static int k210_rst_probe(struct platform_device *pdev)
if (!ksr)
return -ENOMEM;
+ parent_np = of_get_parent(dev->of_node);
ksr->map = syscon_node_to_regmap(parent_np);
of_node_put(parent_np);
if (IS_ERR(ksr->map))
diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c
index 28fb85772b3e..e42b2f24a93d 100644
--- a/drivers/reset/reset-lpc18xx.c
+++ b/drivers/reset/reset-lpc18xx.c
@@ -150,29 +150,15 @@ static int lpc18xx_rgu_probe(struct platform_device *pdev)
if (IS_ERR(rc->base))
return PTR_ERR(rc->base);
- rc->clk_reg = devm_clk_get(&pdev->dev, "reg");
- if (IS_ERR(rc->clk_reg)) {
- dev_err(&pdev->dev, "reg clock not found\n");
- return PTR_ERR(rc->clk_reg);
- }
-
- rc->clk_delay = devm_clk_get(&pdev->dev, "delay");
- if (IS_ERR(rc->clk_delay)) {
- dev_err(&pdev->dev, "delay clock not found\n");
- return PTR_ERR(rc->clk_delay);
- }
-
- ret = clk_prepare_enable(rc->clk_reg);
- if (ret) {
- dev_err(&pdev->dev, "unable to enable reg clock\n");
- return ret;
- }
+ rc->clk_reg = devm_clk_get_enabled(&pdev->dev, "reg");
+ if (IS_ERR(rc->clk_reg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rc->clk_reg),
+ "reg clock not found\n");
- ret = clk_prepare_enable(rc->clk_delay);
- if (ret) {
- dev_err(&pdev->dev, "unable to enable delay clock\n");
- goto dis_clk_reg;
- }
+ rc->clk_delay = devm_clk_get_enabled(&pdev->dev, "delay");
+ if (IS_ERR(rc->clk_delay))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rc->clk_delay),
+ "delay clock not found\n");
fcclk = clk_get_rate(rc->clk_reg) / USEC_PER_SEC;
firc = clk_get_rate(rc->clk_delay) / USEC_PER_SEC;
@@ -189,10 +175,8 @@ static int lpc18xx_rgu_probe(struct platform_device *pdev)
rc->rcdev.of_node = pdev->dev.of_node;
ret = reset_controller_register(&rc->rcdev);
- if (ret) {
- dev_err(&pdev->dev, "unable to register device\n");
- goto dis_clks;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "unable to register device\n");
rc->restart_nb.priority = 192,
rc->restart_nb.notifier_call = lpc18xx_rgu_restart,
@@ -201,13 +185,6 @@ static int lpc18xx_rgu_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "failed to register restart handler\n");
return 0;
-
-dis_clks:
- clk_disable_unprepare(rc->clk_delay);
-dis_clk_reg:
- clk_disable_unprepare(rc->clk_reg);
-
- return ret;
}
static const struct of_device_id lpc18xx_rgu_match[] = {
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
index f78be97898bc..1e9fca3e30e8 100644
--- a/drivers/reset/reset-meson.c
+++ b/drivers/reset/reset-meson.c
@@ -102,6 +102,11 @@ static const struct meson_reset_param meson_s4_param = {
.level_offset = 0x40,
};
+static const struct meson_reset_param t7_param = {
+ .reg_count = 7,
+ .level_offset = 0x40,
+};
+
static const struct of_device_id meson_reset_dt_ids[] = {
{ .compatible = "amlogic,meson8b-reset", .data = &meson8b_param},
{ .compatible = "amlogic,meson-gxbb-reset", .data = &meson8b_param},
@@ -109,6 +114,7 @@ static const struct of_device_id meson_reset_dt_ids[] = {
{ .compatible = "amlogic,meson-a1-reset", .data = &meson_a1_param},
{ .compatible = "amlogic,meson-s4-reset", .data = &meson_s4_param},
{ .compatible = "amlogic,c3-reset", .data = &meson_s4_param},
+ { .compatible = "amlogic,t7-reset", .data = &t7_param},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, meson_reset_dt_ids);
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index 58e3b382e316..1e02b58ff61f 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o
obj-$(CONFIG_RPMSG_CTRL) += rpmsg_ctrl.o
obj-$(CONFIG_RPMSG_NS) += rpmsg_ns.o
obj-$(CONFIG_RPMSG_MTK_SCP) += mtk_rpmsg.o
+CFLAGS_qcom_glink_native.o := -I$(src)
qcom_glink-objs := qcom_glink_native.o qcom_glink_ssr.o
obj-$(CONFIG_RPMSG_QCOM_GLINK) += qcom_glink.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 82d460ff4777..0b2f29006908 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -23,6 +23,9 @@
#include "rpmsg_internal.h"
#include "qcom_glink_native.h"
+#define CREATE_TRACE_POINTS
+#include "qcom_glink_trace.h"
+
#define GLINK_NAME_SIZE 32
#define GLINK_VERSION_1 1
@@ -30,11 +33,16 @@
#define RPM_GLINK_CID_MAX 65536
struct glink_msg {
- __le16 cmd;
- __le16 param1;
- __le32 param2;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(glink_msg_hdr, hdr, __packed,
+ __le16 cmd;
+ __le16 param1;
+ __le32 param2;
+ );
u8 data[];
} __packed;
+static_assert(offsetof(struct glink_msg, data) == sizeof(struct glink_msg_hdr),
+ "struct member likely outside of __struct_group()");
/**
* struct glink_defer_cmd - deferred incoming control message
@@ -48,7 +56,7 @@ struct glink_msg {
struct glink_defer_cmd {
struct list_head node;
- struct glink_msg msg;
+ struct glink_msg_hdr msg;
u8 data[];
};
@@ -78,6 +86,7 @@ struct glink_core_rx_intent {
/**
* struct qcom_glink - driver context, relates to one remote subsystem
* @dev: reference to the associated struct device
+ * @label: identifier of the glink edge
* @rx_pipe: pipe object for receive FIFO
* @tx_pipe: pipe object for transmit FIFO
* @rx_work: worker for handling received control messages
@@ -96,6 +105,8 @@ struct glink_core_rx_intent {
struct qcom_glink {
struct device *dev;
+ const char *label;
+
struct qcom_glink_pipe *rx_pipe;
struct qcom_glink_pipe *tx_pipe;
@@ -392,6 +403,8 @@ static int qcom_glink_send_version(struct qcom_glink *glink)
msg.param1 = cpu_to_le16(GLINK_VERSION_1);
msg.param2 = cpu_to_le32(glink->features);
+ trace_qcom_glink_cmd_version_tx(glink->label, GLINK_VERSION_1, glink->features);
+
return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
}
@@ -403,6 +416,8 @@ static void qcom_glink_send_version_ack(struct qcom_glink *glink)
msg.param1 = cpu_to_le16(GLINK_VERSION_1);
msg.param2 = cpu_to_le32(glink->features);
+ trace_qcom_glink_cmd_version_ack_tx(glink->label, msg.param1, msg.param2);
+
qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
}
@@ -415,6 +430,9 @@ static void qcom_glink_send_open_ack(struct qcom_glink *glink,
msg.param1 = cpu_to_le16(channel->rcid);
msg.param2 = cpu_to_le32(0);
+ trace_qcom_glink_cmd_open_ack_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid);
+
qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
}
@@ -424,9 +442,16 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
struct glink_channel *channel;
unsigned long flags;
+ qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8));
+
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
+
+ trace_qcom_glink_cmd_rx_intent_req_ack_rx(glink->label,
+ channel ? channel->name : NULL,
+ channel ? channel->lcid : 0,
+ cid, granted);
if (!channel) {
dev_err(glink->dev, "unable to find channel\n");
return;
@@ -455,12 +480,9 @@ static void qcom_glink_intent_req_abort(struct glink_channel *channel)
static int qcom_glink_send_open_req(struct qcom_glink *glink,
struct glink_channel *channel)
{
- struct {
- struct glink_msg msg;
- u8 name[GLINK_NAME_SIZE];
- } __packed req;
+ DEFINE_RAW_FLEX(struct glink_msg, req, data, GLINK_NAME_SIZE);
int name_len = strlen(channel->name) + 1;
- int req_len = ALIGN(sizeof(req.msg) + name_len, 8);
+ int req_len = ALIGN(sizeof(*req) + name_len, 8);
int ret;
unsigned long flags;
@@ -476,12 +498,15 @@ static int qcom_glink_send_open_req(struct qcom_glink *glink,
channel->lcid = ret;
- req.msg.cmd = cpu_to_le16(GLINK_CMD_OPEN);
- req.msg.param1 = cpu_to_le16(channel->lcid);
- req.msg.param2 = cpu_to_le32(name_len);
- strcpy(req.name, channel->name);
+ req->cmd = cpu_to_le16(GLINK_CMD_OPEN);
+ req->param1 = cpu_to_le16(channel->lcid);
+ req->param2 = cpu_to_le32(name_len);
+ strcpy(req->data, channel->name);
+
+ trace_qcom_glink_cmd_open_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid);
- ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true);
+ ret = qcom_glink_tx(glink, req, req_len, NULL, 0, true);
if (ret)
goto remove_idr;
@@ -505,18 +530,24 @@ static void qcom_glink_send_close_req(struct qcom_glink *glink,
req.param1 = cpu_to_le16(channel->lcid);
req.param2 = 0;
+ trace_qcom_glink_cmd_close_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid);
+
qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
}
static void qcom_glink_send_close_ack(struct qcom_glink *glink,
- unsigned int rcid)
+ struct glink_channel *channel)
{
struct glink_msg req;
req.cmd = cpu_to_le16(GLINK_CMD_CLOSE_ACK);
- req.param1 = cpu_to_le16(rcid);
+ req.param1 = cpu_to_le16(channel->rcid);
req.param2 = 0;
+ trace_qcom_glink_cmd_close_ack_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid);
+
qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
}
@@ -548,6 +579,9 @@ static void qcom_glink_rx_done_work(struct work_struct *work)
cmd.lcid = cid;
cmd.liid = iid;
+ trace_qcom_glink_cmd_rx_done_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid, cmd.liid, reuse);
+
qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
if (!reuse) {
kfree(intent->data);
@@ -598,6 +632,8 @@ static void qcom_glink_receive_version(struct qcom_glink *glink,
u32 version,
u32 features)
{
+ trace_qcom_glink_cmd_version_rx(glink->label, version, features);
+
switch (version) {
case 0:
break;
@@ -625,6 +661,8 @@ static void qcom_glink_receive_version_ack(struct qcom_glink *glink,
u32 version,
u32 features)
{
+ trace_qcom_glink_cmd_version_ack_rx(glink->label, version, features);
+
switch (version) {
case 0:
/* Version negotiation failed */
@@ -656,6 +694,10 @@ static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink,
{
struct glink_msg msg;
+ trace_qcom_glink_cmd_rx_intent_req_ack_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid,
+ granted);
+
msg.cmd = cpu_to_le16(GLINK_CMD_RX_INTENT_REQ_ACK);
msg.param1 = cpu_to_le16(channel->lcid);
msg.param2 = cpu_to_le32(granted);
@@ -693,6 +735,10 @@ static int qcom_glink_advertise_intent(struct qcom_glink *glink,
cmd.size = cpu_to_le32(intent->size);
cmd.liid = cpu_to_le32(intent->id);
+ trace_qcom_glink_cmd_intent_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid,
+ cmd.count, cmd.size, cmd.liid);
+
qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
return 0;
@@ -745,9 +791,14 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
struct glink_channel *channel;
unsigned long flags;
+ qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8));
+
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
+
+ trace_qcom_glink_cmd_rx_done_rx(glink->label, channel ? channel->name : NULL,
+ channel ? channel->lcid : 0, cid, iid, reuse);
if (!channel) {
dev_err(glink->dev, "invalid channel id received\n");
return;
@@ -797,6 +848,10 @@ static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
+ trace_qcom_glink_cmd_rx_intent_req_rx(glink->label,
+ channel ? channel->name : NULL,
+ channel ? channel->lcid : 0,
+ cid, size);
if (!channel) {
pr_err("%s channel not found for cid %d\n", __func__, cid);
return;
@@ -826,7 +881,9 @@ static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
INIT_LIST_HEAD(&dcmd->node);
- qcom_glink_rx_peek(glink, &dcmd->msg, 0, sizeof(dcmd->msg) + extra);
+ qcom_glink_rx_peek(glink,
+ container_of(&dcmd->msg, struct glink_msg, hdr), 0,
+ sizeof(dcmd->msg) + extra);
spin_lock(&glink->rx_lock);
list_add_tail(&dcmd->node, &glink->rx_queue);
@@ -843,7 +900,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
struct glink_core_rx_intent *intent;
struct glink_channel *channel;
struct {
- struct glink_msg msg;
+ struct glink_msg_hdr msg;
__le32 chunk_size;
__le32 left_size;
} __packed hdr;
@@ -869,9 +926,15 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
}
rcid = le16_to_cpu(hdr.msg.param1);
+ liid = le32_to_cpu(hdr.msg.param2);
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, rcid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
+
+ trace_qcom_glink_cmd_tx_data_rx(glink->label, channel ? channel->name : NULL,
+ channel ? channel->lcid : 0, rcid,
+ liid, chunk_size, left_size,
+ hdr.msg.cmd == GLINK_CMD_TX_DATA_CONT);
if (!channel) {
dev_dbg(glink->dev, "Data on non-existing channel\n");
@@ -902,8 +965,6 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
intent = channel->buf;
}
} else {
- liid = le32_to_cpu(hdr.msg.param2);
-
spin_lock_irqsave(&channel->intent_lock, flags);
intent = idr_find(&channel->liids, liid);
spin_unlock_irqrestore(&channel->intent_lock, flags);
@@ -952,6 +1013,14 @@ advance_rx:
return ret;
}
+static void qcom_glink_rx_read_notif(struct qcom_glink *glink)
+{
+ trace_qcom_glink_cmd_read_notif_rx(glink->label);
+
+ qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8));
+ qcom_glink_tx_kick(glink);
+}
+
static void qcom_glink_handle_intent(struct qcom_glink *glink,
unsigned int cid,
unsigned int count,
@@ -965,7 +1034,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
};
struct {
- struct glink_msg msg;
+ struct glink_msg_hdr msg;
struct intent_pair intents[];
} __packed * msg;
@@ -983,6 +1052,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
if (!channel) {
+ trace_qcom_glink_cmd_intent_rx(glink->label, NULL, 0, cid, count, 0, 0);
dev_err(glink->dev, "intents for non-existing channel\n");
qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
return;
@@ -994,6 +1064,11 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
qcom_glink_rx_peek(glink, msg, 0, msglen);
+ trace_qcom_glink_cmd_intent_rx(glink->label, channel->name,
+ channel->lcid, cid, count,
+ count > 0 ? msg->intents[0].size : 0,
+ count > 0 ? msg->intents[0].iid : 0);
+
for (i = 0; i < count; ++i) {
intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
if (!intent)
@@ -1022,9 +1097,14 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
{
struct glink_channel *channel;
+ qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8));
+
spin_lock(&glink->idr_lock);
channel = idr_find(&glink->lcids, lcid);
spin_unlock(&glink->idr_lock);
+
+ trace_qcom_glink_cmd_open_ack_rx(glink->label, channel ? channel->name : NULL,
+ lcid, channel ? channel->rcid : 0);
if (!channel) {
dev_err(glink->dev, "Invalid open ack packet\n");
return -EINVAL;
@@ -1057,6 +1137,9 @@ static int qcom_glink_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u
msg.param1 = cpu_to_le16(channel->lcid);
msg.param2 = cpu_to_le32(sigs);
+ trace_qcom_glink_cmd_signal_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid, sigs);
+
return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
}
@@ -1067,9 +1150,14 @@ static void qcom_glink_handle_signals(struct qcom_glink *glink,
unsigned long flags;
bool enable;
+ qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8));
+
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, rcid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
+
+ trace_qcom_glink_cmd_signal_rx(glink->label, channel ? channel->name : NULL,
+ channel ? channel->lcid : 0, rcid, sigs);
if (!channel) {
dev_err(glink->dev, "signal for non-existing channel\n");
return;
@@ -1114,7 +1202,6 @@ void qcom_glink_native_rx(struct qcom_glink *glink)
break;
case GLINK_CMD_OPEN_ACK:
ret = qcom_glink_rx_open_ack(glink, param1);
- qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
case GLINK_CMD_OPEN:
ret = qcom_glink_rx_defer(glink, param2);
@@ -1124,27 +1211,22 @@ void qcom_glink_native_rx(struct qcom_glink *glink)
ret = qcom_glink_rx_data(glink, avail);
break;
case GLINK_CMD_READ_NOTIF:
- qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
- qcom_glink_tx_kick(glink);
+ qcom_glink_rx_read_notif(glink);
break;
case GLINK_CMD_INTENT:
qcom_glink_handle_intent(glink, param1, param2, avail);
break;
case GLINK_CMD_RX_DONE:
qcom_glink_handle_rx_done(glink, param1, param2, false);
- qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
case GLINK_CMD_RX_DONE_W_REUSE:
qcom_glink_handle_rx_done(glink, param1, param2, true);
- qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
case GLINK_CMD_RX_INTENT_REQ_ACK:
qcom_glink_handle_intent_req_ack(glink, param1, param2);
- qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
case GLINK_CMD_SIGNALS:
qcom_glink_handle_signals(glink, param1, param2);
- qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
default:
dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
@@ -1349,6 +1431,10 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
cmd.cid = channel->lcid;
cmd.size = size;
+ trace_qcom_glink_cmd_rx_intent_req_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid,
+ cmd.size);
+
ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
if (ret)
goto unlock;
@@ -1377,7 +1463,7 @@ static int __qcom_glink_send(struct glink_channel *channel,
struct glink_core_rx_intent *tmp;
int iid = 0;
struct {
- struct glink_msg msg;
+ struct glink_msg_hdr msg;
__le32 chunk_size;
__le32 left_size;
} __packed req;
@@ -1429,6 +1515,12 @@ static int __qcom_glink_send(struct glink_channel *channel,
req.chunk_size = cpu_to_le32(chunk_size);
req.left_size = cpu_to_le32(len - offset - chunk_size);
+ trace_qcom_glink_cmd_tx_data_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid,
+ iid, chunk_size,
+ len - offset - chunk_size,
+ offset > 0);
+
ret = qcom_glink_tx(glink, &req, sizeof(req), data + offset, chunk_size, wait);
if (ret) {
/* Mark intent available if we failed */
@@ -1544,6 +1636,8 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
create_device = true;
}
+ trace_qcom_glink_cmd_open_rx(glink->label, name, channel->lcid, rcid);
+
spin_lock_irqsave(&glink->idr_lock, flags);
ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_ATOMIC);
if (ret < 0) {
@@ -1605,6 +1699,9 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, rcid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
+
+ trace_qcom_glink_cmd_close_rx(glink->label, channel ? channel->name : NULL,
+ channel ? channel->lcid : 0, rcid);
if (WARN(!channel, "close request on unknown channel\n"))
return;
@@ -1620,7 +1717,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
}
channel->rpdev = NULL;
- qcom_glink_send_close_ack(glink, channel->rcid);
+ qcom_glink_send_close_ack(glink, channel);
spin_lock_irqsave(&glink->idr_lock, flags);
idr_remove(&glink->rcids, channel->rcid);
@@ -1641,6 +1738,9 @@ static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->lcids, lcid);
+
+ trace_qcom_glink_cmd_close_ack_rx(glink->label, channel ? channel->name : NULL,
+ lcid, channel ? channel->rcid : 0);
if (WARN(!channel, "close ack on unknown channel\n")) {
spin_unlock_irqrestore(&glink->idr_lock, flags);
return;
@@ -1685,7 +1785,7 @@ static void qcom_glink_work(struct work_struct *work)
list_del(&dcmd->node);
spin_unlock_irqrestore(&glink->rx_lock, flags);
- msg = &dcmd->msg;
+ msg = container_of(&dcmd->msg, struct glink_msg, hdr);
cmd = le16_to_cpu(msg->cmd);
param1 = le16_to_cpu(msg->param1);
param2 = le32_to_cpu(msg->param2);
@@ -1815,6 +1915,10 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
idr_init(&glink->lcids);
idr_init(&glink->rcids);
+ ret = of_property_read_string(dev->of_node, "label", &glink->label);
+ if (ret < 0)
+ glink->label = dev->of_node->name;
+
glink->dev->groups = qcom_glink_groups;
ret = device_add_groups(dev, qcom_glink_groups);
diff --git a/drivers/rpmsg/qcom_glink_trace.h b/drivers/rpmsg/qcom_glink_trace.h
new file mode 100644
index 000000000000..668bdea1d78f
--- /dev/null
+++ b/drivers/rpmsg/qcom_glink_trace.h
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM qcom_glink
+
+#if !defined(__QCOM_GLINK_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __QCOM_GLINK_TRACE_H__
+
+#include <linux/tracepoint.h>
+#include "qcom_glink_native.h"
+
+
+TRACE_EVENT(qcom_glink_cmd_version,
+ TP_PROTO(const char *remote, unsigned int version, unsigned int features, bool tx),
+ TP_ARGS(remote, version, features, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __field(u32, version)
+ __field(u32, features)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __entry->version = version;
+ __entry->features = features;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s version: %u features: %#x",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __entry->version,
+ __entry->features
+ )
+);
+#define trace_qcom_glink_cmd_version_tx(...) trace_qcom_glink_cmd_version(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_version_rx(...) trace_qcom_glink_cmd_version(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_version_ack,
+ TP_PROTO(const char *remote, unsigned int version, unsigned int features, bool tx),
+ TP_ARGS(remote, version, features, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __field(u32, version)
+ __field(u32, features)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __entry->version = version;
+ __entry->features = features;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s version: %u features: %#x",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __entry->version,
+ __entry->features
+ )
+);
+#define trace_qcom_glink_cmd_version_ack_tx(...) trace_qcom_glink_cmd_version_ack(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_version_ack_rx(...) trace_qcom_glink_cmd_version_ack(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_open,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u]",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid
+ )
+);
+#define trace_qcom_glink_cmd_open_tx(...) trace_qcom_glink_cmd_open(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_open_rx(...) trace_qcom_glink_cmd_open(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_close,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u]",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid
+ )
+);
+#define trace_qcom_glink_cmd_close_tx(...) trace_qcom_glink_cmd_close(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_close_rx(...) trace_qcom_glink_cmd_close(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_open_ack,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u]",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid
+ )
+);
+#define trace_qcom_glink_cmd_open_ack_tx(...) trace_qcom_glink_cmd_open_ack(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_open_ack_rx(...) trace_qcom_glink_cmd_open_ack(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_intent,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, size_t count, size_t size, u32 liid, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, count, size, liid, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(u32, count)
+ __field(u32, size)
+ __field(u32, liid)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->count = count;
+ __entry->size = size;
+ __entry->liid = liid;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u] count: %d [size: %d liid: %d]",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid,
+ __entry->count,
+ __entry->size,
+ __entry->liid
+ )
+);
+#define trace_qcom_glink_cmd_intent_tx(...) trace_qcom_glink_cmd_intent(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_intent_rx(...) trace_qcom_glink_cmd_intent(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_rx_done,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, u32 iid, bool reuse, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, iid, reuse, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(u32, iid)
+ __field(bool, reuse)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->iid = iid;
+ __entry->reuse = reuse;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u] iid: %d reuse: %d",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid,
+ __entry->iid,
+ __entry->reuse
+ )
+);
+#define trace_qcom_glink_cmd_rx_done_tx(...) trace_qcom_glink_cmd_rx_done(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_rx_done_rx(...) trace_qcom_glink_cmd_rx_done(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_rx_intent_req,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, size_t size, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, size, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(u32, size)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->size = size;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u] size: %d",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid,
+ __entry->size
+ )
+);
+#define trace_qcom_glink_cmd_rx_intent_req_tx(...) trace_qcom_glink_cmd_rx_intent_req(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_rx_intent_req_rx(...) trace_qcom_glink_cmd_rx_intent_req(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_rx_intent_req_ack,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool granted, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, granted, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(bool, granted)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->granted = granted;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u] granted: %d",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid,
+ __entry->granted
+ )
+);
+#define trace_qcom_glink_cmd_rx_intent_req_ack_tx(...) trace_qcom_glink_cmd_rx_intent_req_ack(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_rx_intent_req_ack_rx(...) trace_qcom_glink_cmd_rx_intent_req_ack(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_tx_data,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, u32 iid, u32 chunk_size, u32 left_size, bool cont, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, iid, chunk_size, left_size, cont, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(u32, iid)
+ __field(u32, chunk_size)
+ __field(u32, left_size)
+ __field(bool, cont)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->iid = iid;
+ __entry->chunk_size = chunk_size;
+ __entry->left_size = left_size;
+ __entry->cont = cont;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u] iid: %d chunk_size: %d left_size: %d cont: %d",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid,
+ __entry->iid,
+ __entry->chunk_size,
+ __entry->left_size,
+ __entry->cont
+ )
+);
+#define trace_qcom_glink_cmd_tx_data_tx(...) trace_qcom_glink_cmd_tx_data(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_tx_data_rx(...) trace_qcom_glink_cmd_tx_data(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_close_ack,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u]",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid
+ )
+);
+#define trace_qcom_glink_cmd_close_ack_tx(...) trace_qcom_glink_cmd_close_ack(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_close_ack_rx(...) trace_qcom_glink_cmd_close_ack(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_read_notif,
+ TP_PROTO(const char *remote, bool tx),
+ TP_ARGS(remote, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote)
+ )
+);
+#define trace_qcom_glink_cmd_read_notif_tx(...) trace_qcom_glink_cmd_read_notif(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_read_notif_rx(...) trace_qcom_glink_cmd_read_notif(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_signal,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, unsigned int signals, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, signals, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(u32, signals)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->signals = signals;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u] signals: %#x",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid,
+ __entry->signals
+ )
+);
+#define trace_qcom_glink_cmd_signal_tx(...) trace_qcom_glink_cmd_signal(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_signal_rx(...) trace_qcom_glink_cmd_signal(__VA_ARGS__, false)
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE qcom_glink_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 2a95b05982ad..66eb1122248b 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -743,6 +743,16 @@ config RTC_DRV_S5M
This driver can also be built as a module. If so, the module
will be called rtc-s5m.
+config RTC_DRV_SD2405AL
+ tristate "DFRobot SD2405AL"
+ select REGMAP_I2C
+ help
+ If you say yes here you will get support for the
+ DFRobot SD2405AL I2C RTC Module.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-sd2405al.
+
config RTC_DRV_SD3078
tristate "ZXW Shenzhen whwave SD3078"
select REGMAP_I2C
@@ -1827,6 +1837,17 @@ config RTC_DRV_BBNSM
This driver can also be built as a module, if so, the module
will be called "rtc-bbnsm".
+config RTC_DRV_IMX_BBM_SCMI
+ depends on IMX_SCMI_BBM_EXT || COMPILE_TEST
+ default y if ARCH_MXC
+ tristate "NXP i.MX BBM SCMI RTC support"
+ help
+ If you say yes here you get support for the NXP i.MX BBSM SCMI
+ RTC module.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rtc-imx-sm-bbm.
+
config RTC_DRV_IMX_SC
depends on IMX_SCU
depends on HAVE_ARM_SMCCC
@@ -1923,6 +1944,12 @@ config RTC_DRV_STM32
tristate "STM32 RTC"
select REGMAP_MMIO
depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ depends on PINCTRL
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ depends on COMMON_CLK
help
If you say yes here you get support for the STM32 On-Chip
Real Time Clock.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 3004e372f25f..f62340ecc534 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o
obj-$(CONFIG_RTC_DRV_HYM8563) += rtc-hym8563.o
obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o
obj-$(CONFIG_RTC_DRV_IMX_SC) += rtc-imx-sc.o
+obj-$(CONFIG_RTC_DRV_IMX_BBM_SCMI) += rtc-imx-sm-bbm.o
obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
obj-$(CONFIG_RTC_DRV_ISL12026) += rtc-isl12026.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
@@ -162,6 +163,7 @@ obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o
obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
obj-$(CONFIG_RTC_DRV_SC27XX) += rtc-sc27xx.o
+obj-$(CONFIG_RTC_DRV_SD2405AL) += rtc-sd2405al.o
obj-$(CONFIG_RTC_DRV_SD3078) += rtc-sd3078.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index f93bee96e362..993c0878fb66 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -368,6 +368,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
return ret;
rtc->gpbr = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
rtc->gpbr_offset = args.args[0];
if (IS_ERR(rtc->gpbr)) {
dev_err(&pdev->dev, "failed to retrieve gpbr regmap, aborting.\n");
diff --git a/drivers/rtc/rtc-imx-sm-bbm.c b/drivers/rtc/rtc-imx-sm-bbm.c
new file mode 100644
index 000000000000..daa472be7c80
--- /dev/null
+++ b/drivers/rtc/rtc-imx-sm-bbm.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+struct scmi_imx_bbm {
+ const struct scmi_imx_bbm_proto_ops *ops;
+ struct rtc_device *rtc_dev;
+ struct scmi_protocol_handle *ph;
+ struct notifier_block nb;
+};
+
+static int scmi_imx_bbm_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev);
+ struct scmi_protocol_handle *ph = bbnsm->ph;
+ u64 val;
+ int ret;
+
+ ret = bbnsm->ops->rtc_time_get(ph, 0, &val);
+ if (ret)
+ return ret;
+
+ rtc_time64_to_tm(val, tm);
+
+ return 0;
+}
+
+static int scmi_imx_bbm_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev);
+ struct scmi_protocol_handle *ph = bbnsm->ph;
+ u64 val;
+
+ val = rtc_tm_to_time64(tm);
+
+ return bbnsm->ops->rtc_time_set(ph, 0, val);
+}
+
+static int scmi_imx_bbm_alarm_irq_enable(struct device *dev, unsigned int enable)
+{
+ struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev);
+ struct scmi_protocol_handle *ph = bbnsm->ph;
+
+ /* scmi_imx_bbm_set_alarm enables the irq, just handle disable here */
+ if (!enable)
+ return bbnsm->ops->rtc_alarm_set(ph, 0, false, 0);
+
+ return 0;
+}
+
+static int scmi_imx_bbm_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev);
+ struct scmi_protocol_handle *ph = bbnsm->ph;
+ struct rtc_time *alrm_tm = &alrm->time;
+ u64 val;
+
+ val = rtc_tm_to_time64(alrm_tm);
+
+ return bbnsm->ops->rtc_alarm_set(ph, 0, true, val);
+}
+
+static const struct rtc_class_ops smci_imx_bbm_rtc_ops = {
+ .read_time = scmi_imx_bbm_read_time,
+ .set_time = scmi_imx_bbm_set_time,
+ .set_alarm = scmi_imx_bbm_set_alarm,
+ .alarm_irq_enable = scmi_imx_bbm_alarm_irq_enable,
+};
+
+static int scmi_imx_bbm_rtc_notifier(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct scmi_imx_bbm *bbnsm = container_of(nb, struct scmi_imx_bbm, nb);
+ struct scmi_imx_bbm_notif_report *r = data;
+
+ if (r->is_rtc)
+ rtc_update_irq(bbnsm->rtc_dev, 1, RTC_AF | RTC_IRQF);
+ else
+ pr_err("Unexpected bbm event: %s\n", __func__);
+
+ return 0;
+}
+
+static int scmi_imx_bbm_rtc_init(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+ struct device *dev = &sdev->dev;
+ struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev);
+ int ret;
+
+ bbnsm->rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(bbnsm->rtc_dev))
+ return PTR_ERR(bbnsm->rtc_dev);
+
+ bbnsm->rtc_dev->ops = &smci_imx_bbm_rtc_ops;
+ bbnsm->rtc_dev->range_max = U32_MAX;
+
+ bbnsm->nb.notifier_call = &scmi_imx_bbm_rtc_notifier;
+ ret = handle->notify_ops->devm_event_notifier_register(sdev, SCMI_PROTOCOL_IMX_BBM,
+ SCMI_EVENT_IMX_BBM_RTC,
+ NULL, &bbnsm->nb);
+ if (ret)
+ return ret;
+
+ return devm_rtc_register_device(bbnsm->rtc_dev);
+}
+
+static int scmi_imx_bbm_rtc_probe(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+ struct device *dev = &sdev->dev;
+ struct scmi_protocol_handle *ph;
+ struct scmi_imx_bbm *bbnsm;
+ int ret;
+
+ if (!handle)
+ return -ENODEV;
+
+ bbnsm = devm_kzalloc(dev, sizeof(*bbnsm), GFP_KERNEL);
+ if (!bbnsm)
+ return -ENOMEM;
+
+ bbnsm->ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_BBM, &ph);
+ if (IS_ERR(bbnsm->ops))
+ return PTR_ERR(bbnsm->ops);
+
+ bbnsm->ph = ph;
+
+ device_init_wakeup(dev, true);
+
+ dev_set_drvdata(dev, bbnsm);
+
+ ret = scmi_imx_bbm_rtc_init(sdev);
+ if (ret)
+ device_init_wakeup(dev, false);
+
+ return ret;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_IMX_BBM, "imx-bbm-rtc" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_imx_bbm_rtc_driver = {
+ .name = "scmi-imx-bbm-rtc",
+ .probe = scmi_imx_bbm_rtc_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_imx_bbm_rtc_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("IMX SM BBM RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index f0f6b9b6daec..5d30ce8e13ca 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -132,7 +132,7 @@ static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
M48T59_WRITE((bin2bcd(tm->tm_mon + 1) & 0x1F), M48T59_MONTH);
M48T59_WRITE(bin2bcd(year % 100), M48T59_YEAR);
- if (pdata->type == M48T59RTC_TYPE_M48T59 && (year / 100))
+ if (pdata->type == M48T59RTC_TYPE_M48T59 && (year >= 100))
val = (M48T59_WDAY_CEB | M48T59_WDAY_CB);
val |= (bin2bcd(tm->tm_wday) & 0x07);
M48T59_WRITE(val, M48T59_WDAY);
@@ -458,6 +458,8 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, m48t59);
m48t59->rtc->ops = &m48t59_rtc_ops;
+ m48t59->rtc->range_min = RTC_TIMESTAMP_BEGIN_1900;
+ m48t59->rtc->range_max = RTC_TIMESTAMP_END_2099;
nvmem_cfg.size = pdata->offset;
ret = devm_rtc_nvmem_register(m48t59->rtc, &nvmem_cfg);
diff --git a/drivers/rtc/rtc-rc5t619.c b/drivers/rtc/rtc-rc5t619.c
index e73102a39f1b..711f62eecd79 100644
--- a/drivers/rtc/rtc-rc5t619.c
+++ b/drivers/rtc/rtc-rc5t619.c
@@ -429,14 +429,23 @@ static int rc5t619_rtc_probe(struct platform_device *pdev)
return devm_rtc_register_device(rtc->rtc);
}
+static const struct platform_device_id rc5t619_rtc_id[] = {
+ {
+ .name = "rc5t619-rtc",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, rc5t619_rtc_id);
+
static struct platform_driver rc5t619_rtc_driver = {
.driver = {
.name = "rc5t619-rtc",
},
.probe = rc5t619_rtc_probe,
+ .id_table = rc5t619_rtc_id,
};
-
module_platform_driver(rc5t619_rtc_driver);
-MODULE_ALIAS("platform:rc5t619-rtc");
+
MODULE_DESCRIPTION("RICOH RC5T619 RTC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index 2d6b655a4b25..e3dc18882f41 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -56,7 +56,6 @@ static const struct i2c_device_id s35390a_id[] = {
MODULE_DEVICE_TABLE(i2c, s35390a_id);
static const __maybe_unused struct of_device_id s35390a_of_match[] = {
- { .compatible = "s35390a" },
{ .compatible = "sii,s35390a" },
{ }
};
diff --git a/drivers/rtc/rtc-sd2405al.c b/drivers/rtc/rtc-sd2405al.c
new file mode 100644
index 000000000000..d2568c3e3876
--- /dev/null
+++ b/drivers/rtc/rtc-sd2405al.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RTC driver for the SD2405AL Real-Time Clock
+ *
+ * Datasheet:
+ * https://image.dfrobot.com/image/data/TOY0021/SD2405AL%20datasheet%20(Angelo%20v0.1).pdf
+ *
+ * Copyright (C) 2024 Tóth János <gomba007@gmail.com>
+ */
+
+#include <linux/bcd.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+/* Real time clock registers */
+#define SD2405AL_REG_T_SEC 0x00
+#define SD2405AL_REG_T_MIN 0x01
+#define SD2405AL_REG_T_HOUR 0x02
+# define SD2405AL_BIT_12H_PM BIT(5)
+# define SD2405AL_BIT_24H BIT(7)
+#define SD2405AL_REG_T_WEEK 0x03
+#define SD2405AL_REG_T_DAY 0x04
+#define SD2405AL_REG_T_MON 0x05
+#define SD2405AL_REG_T_YEAR 0x06
+
+#define SD2405AL_NUM_T_REGS (SD2405AL_REG_T_YEAR - SD2405AL_REG_T_SEC + 1)
+
+/* Control registers */
+#define SD2405AL_REG_CTR1 0x0F
+# define SD2405AL_BIT_WRTC2 BIT(2)
+# define SD2405AL_BIT_WRTC3 BIT(7)
+#define SD2405AL_REG_CTR2 0x10
+# define SD2405AL_BIT_WRTC1 BIT(7)
+#define SD2405AL_REG_CTR3 0x11
+#define SD2405AL_REG_TTF 0x12
+#define SD2405AL_REG_CNTDWN 0x13
+
+/* General RAM */
+#define SD2405AL_REG_M_START 0x14
+#define SD2405AL_REG_M_END 0x1F
+
+struct sd2405al {
+ struct device *dev;
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+};
+
+static int sd2405al_enable_reg_write(struct sd2405al *sd2405al)
+{
+ int ret;
+
+ /* order of writes is important */
+ ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR2,
+ SD2405AL_BIT_WRTC1, SD2405AL_BIT_WRTC1);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR1,
+ SD2405AL_BIT_WRTC2 | SD2405AL_BIT_WRTC3,
+ SD2405AL_BIT_WRTC2 | SD2405AL_BIT_WRTC3);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sd2405al_disable_reg_write(struct sd2405al *sd2405al)
+{
+ int ret;
+
+ /* order of writes is important */
+ ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR1,
+ SD2405AL_BIT_WRTC2 | SD2405AL_BIT_WRTC3, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR2,
+ SD2405AL_BIT_WRTC1, 0x00);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sd2405al_read_time(struct device *dev, struct rtc_time *time)
+{
+ u8 data[SD2405AL_NUM_T_REGS] = { 0 };
+ struct sd2405al *sd2405al = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_bulk_read(sd2405al->regmap, SD2405AL_REG_T_SEC, data,
+ SD2405AL_NUM_T_REGS);
+ if (ret < 0)
+ return ret;
+
+ time->tm_sec = bcd2bin(data[SD2405AL_REG_T_SEC] & 0x7F);
+ time->tm_min = bcd2bin(data[SD2405AL_REG_T_MIN] & 0x7F);
+
+ if (data[SD2405AL_REG_T_HOUR] & SD2405AL_BIT_24H)
+ time->tm_hour = bcd2bin(data[SD2405AL_REG_T_HOUR] & 0x3F);
+ else
+ if (data[SD2405AL_REG_T_HOUR] & SD2405AL_BIT_12H_PM)
+ time->tm_hour = bcd2bin(data[SD2405AL_REG_T_HOUR]
+ & 0x1F) + 12;
+ else /* 12 hour mode, AM */
+ time->tm_hour = bcd2bin(data[SD2405AL_REG_T_HOUR]
+ & 0x1F);
+
+ time->tm_wday = bcd2bin(data[SD2405AL_REG_T_WEEK] & 0x07);
+ time->tm_mday = bcd2bin(data[SD2405AL_REG_T_DAY] & 0x3F);
+ time->tm_mon = bcd2bin(data[SD2405AL_REG_T_MON] & 0x1F) - 1;
+ time->tm_year = bcd2bin(data[SD2405AL_REG_T_YEAR]) + 100;
+
+ dev_dbg(sd2405al->dev, "read time: %ptR (%d)\n", time, time->tm_wday);
+
+ return 0;
+}
+
+static int sd2405al_set_time(struct device *dev, struct rtc_time *time)
+{
+ u8 data[SD2405AL_NUM_T_REGS];
+ struct sd2405al *sd2405al = dev_get_drvdata(dev);
+ int ret;
+
+ data[SD2405AL_REG_T_SEC] = bin2bcd(time->tm_sec);
+ data[SD2405AL_REG_T_MIN] = bin2bcd(time->tm_min);
+ data[SD2405AL_REG_T_HOUR] = bin2bcd(time->tm_hour) | SD2405AL_BIT_24H;
+ data[SD2405AL_REG_T_DAY] = bin2bcd(time->tm_mday);
+ data[SD2405AL_REG_T_WEEK] = bin2bcd(time->tm_wday);
+ data[SD2405AL_REG_T_MON] = bin2bcd(time->tm_mon) + 1;
+ data[SD2405AL_REG_T_YEAR] = bin2bcd(time->tm_year - 100);
+
+ ret = sd2405al_enable_reg_write(sd2405al);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_write(sd2405al->regmap, SD2405AL_REG_T_SEC, data,
+ SD2405AL_NUM_T_REGS);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(sd2405al->regmap, SD2405AL_REG_TTF, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = sd2405al_disable_reg_write(sd2405al);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(sd2405al->dev, "set time: %ptR (%d)\n", time, time->tm_wday);
+
+ return 0;
+}
+
+static const struct rtc_class_ops sd2405al_rtc_ops = {
+ .read_time = sd2405al_read_time,
+ .set_time = sd2405al_set_time,
+};
+
+static const struct regmap_config sd2405al_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = SD2405AL_REG_M_END,
+};
+
+static int sd2405al_probe(struct i2c_client *client)
+{
+ struct sd2405al *sd2405al;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ sd2405al = devm_kzalloc(&client->dev, sizeof(*sd2405al), GFP_KERNEL);
+ if (!sd2405al)
+ return -ENOMEM;
+
+ sd2405al->dev = &client->dev;
+
+ sd2405al->regmap = devm_regmap_init_i2c(client, &sd2405al_regmap_conf);
+ if (IS_ERR(sd2405al->regmap))
+ return PTR_ERR(sd2405al->regmap);
+
+ sd2405al->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(sd2405al->rtc))
+ return PTR_ERR(sd2405al->rtc);
+
+ sd2405al->rtc->ops = &sd2405al_rtc_ops;
+ sd2405al->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ sd2405al->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ dev_set_drvdata(&client->dev, sd2405al);
+
+ ret = devm_rtc_register_device(sd2405al->rtc);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct i2c_device_id sd2405al_id[] = {
+ { "sd2405al" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, sd2405al_id);
+
+static const __maybe_unused struct of_device_id sd2405al_of_match[] = {
+ { .compatible = "dfrobot,sd2405al" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sd2405al_of_match);
+
+static struct i2c_driver sd2405al_driver = {
+ .driver = {
+ .name = "sd2405al",
+ .of_match_table = of_match_ptr(sd2405al_of_match),
+ },
+ .probe = sd2405al_probe,
+ .id_table = sd2405al_id,
+};
+
+module_i2c_driver(sd2405al_driver);
+
+MODULE_AUTHOR("Tóth János <gomba007@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SD2405AL RTC driver");
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 98b07969609d..3e4f2ee22b0b 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -7,12 +7,16 @@
#include <linux/bcd.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/errno.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
@@ -42,6 +46,12 @@
#define STM32_RTC_CR_FMT BIT(6)
#define STM32_RTC_CR_ALRAE BIT(8)
#define STM32_RTC_CR_ALRAIE BIT(12)
+#define STM32_RTC_CR_OSEL GENMASK(22, 21)
+#define STM32_RTC_CR_OSEL_ALARM_A FIELD_PREP(STM32_RTC_CR_OSEL, 0x01)
+#define STM32_RTC_CR_COE BIT(23)
+#define STM32_RTC_CR_TAMPOE BIT(26)
+#define STM32_RTC_CR_TAMPALRM_TYPE BIT(30)
+#define STM32_RTC_CR_OUT2EN BIT(31)
/* STM32_RTC_ISR/STM32_RTC_ICSR bit fields */
#define STM32_RTC_ISR_ALRAWF BIT(0)
@@ -78,6 +88,12 @@
/* STM32_RTC_SR/_SCR bit fields */
#define STM32_RTC_SR_ALRA BIT(0)
+/* STM32_RTC_CFGR bit fields */
+#define STM32_RTC_CFGR_OUT2_RMP BIT(0)
+#define STM32_RTC_CFGR_LSCOEN GENMASK(2, 1)
+#define STM32_RTC_CFGR_LSCOEN_OUT1 1
+#define STM32_RTC_CFGR_LSCOEN_OUT2_RMP 2
+
/* STM32_RTC_VERR bit fields */
#define STM32_RTC_VERR_MINREV_SHIFT 0
#define STM32_RTC_VERR_MINREV GENMASK(3, 0)
@@ -107,6 +123,14 @@
/* STM32 RTC driver time helpers */
#define SEC_PER_DAY (24 * 60 * 60)
+/* STM32 RTC pinctrl helpers */
+#define STM32_RTC_PINMUX(_name, _action, ...) { \
+ .name = (_name), \
+ .action = (_action), \
+ .groups = ((const char *[]){ __VA_ARGS__ }), \
+ .num_groups = ARRAY_SIZE(((const char *[]){ __VA_ARGS__ })), \
+}
+
struct stm32_rtc;
struct stm32_rtc_registers {
@@ -119,6 +143,7 @@ struct stm32_rtc_registers {
u16 wpr;
u16 sr;
u16 scr;
+ u16 cfgr;
u16 verr;
};
@@ -134,6 +159,8 @@ struct stm32_rtc_data {
bool need_dbp;
bool need_accuracy;
bool rif_protected;
+ bool has_lsco;
+ bool has_alarm_out;
};
struct stm32_rtc {
@@ -146,6 +173,7 @@ struct stm32_rtc {
struct clk *rtc_ck;
const struct stm32_rtc_data *data;
int irq_alarm;
+ struct clk *clk_lsco;
};
struct stm32_rtc_rif_resource {
@@ -171,6 +199,209 @@ static void stm32_rtc_wpr_lock(struct stm32_rtc *rtc)
writel_relaxed(RTC_WPR_WRONG_KEY, rtc->base + regs->wpr);
}
+enum stm32_rtc_pin_name {
+ NONE,
+ OUT1,
+ OUT2,
+ OUT2_RMP
+};
+
+static const struct pinctrl_pin_desc stm32_rtc_pinctrl_pins[] = {
+ PINCTRL_PIN(OUT1, "out1"),
+ PINCTRL_PIN(OUT2, "out2"),
+ PINCTRL_PIN(OUT2_RMP, "out2_rmp"),
+};
+
+static int stm32_rtc_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(stm32_rtc_pinctrl_pins);
+}
+
+static const char *stm32_rtc_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return stm32_rtc_pinctrl_pins[selector].name;
+}
+
+static int stm32_rtc_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ *pins = &stm32_rtc_pinctrl_pins[selector].number;
+ *num_pins = 1;
+ return 0;
+}
+
+static const struct pinctrl_ops stm32_rtc_pinctrl_ops = {
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+ .get_groups_count = stm32_rtc_pinctrl_get_groups_count,
+ .get_group_name = stm32_rtc_pinctrl_get_group_name,
+ .get_group_pins = stm32_rtc_pinctrl_get_group_pins,
+};
+
+struct stm32_rtc_pinmux_func {
+ const char *name;
+ const char * const *groups;
+ const unsigned int num_groups;
+ int (*action)(struct pinctrl_dev *pctl_dev, unsigned int pin);
+};
+
+static int stm32_rtc_pinmux_action_alarm(struct pinctrl_dev *pctldev, unsigned int pin)
+{
+ struct stm32_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ struct stm32_rtc_registers regs = rtc->data->regs;
+ unsigned int cr = readl_relaxed(rtc->base + regs.cr);
+ unsigned int cfgr = readl_relaxed(rtc->base + regs.cfgr);
+
+ if (!rtc->data->has_alarm_out)
+ return -EPERM;
+
+ cr &= ~STM32_RTC_CR_OSEL;
+ cr |= STM32_RTC_CR_OSEL_ALARM_A;
+ cr &= ~STM32_RTC_CR_TAMPOE;
+ cr &= ~STM32_RTC_CR_COE;
+ cr &= ~STM32_RTC_CR_TAMPALRM_TYPE;
+
+ switch (pin) {
+ case OUT1:
+ cr &= ~STM32_RTC_CR_OUT2EN;
+ cfgr &= ~STM32_RTC_CFGR_OUT2_RMP;
+ break;
+ case OUT2:
+ cr |= STM32_RTC_CR_OUT2EN;
+ cfgr &= ~STM32_RTC_CFGR_OUT2_RMP;
+ break;
+ case OUT2_RMP:
+ cr |= STM32_RTC_CR_OUT2EN;
+ cfgr |= STM32_RTC_CFGR_OUT2_RMP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ stm32_rtc_wpr_unlock(rtc);
+ writel_relaxed(cr, rtc->base + regs.cr);
+ writel_relaxed(cfgr, rtc->base + regs.cfgr);
+ stm32_rtc_wpr_lock(rtc);
+
+ return 0;
+}
+
+static int stm32_rtc_pinmux_lsco_available(struct pinctrl_dev *pctldev, unsigned int pin)
+{
+ struct stm32_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ struct stm32_rtc_registers regs = rtc->data->regs;
+ unsigned int cr = readl_relaxed(rtc->base + regs.cr);
+ unsigned int cfgr = readl_relaxed(rtc->base + regs.cfgr);
+ unsigned int calib = STM32_RTC_CR_COE;
+ unsigned int tampalrm = STM32_RTC_CR_TAMPOE | STM32_RTC_CR_OSEL;
+
+ switch (pin) {
+ case OUT1:
+ if ((!(cr & STM32_RTC_CR_OUT2EN) &&
+ ((cr & calib) || cr & tampalrm)) ||
+ ((cr & calib) && (cr & tampalrm)))
+ return -EBUSY;
+ break;
+ case OUT2_RMP:
+ if ((cr & STM32_RTC_CR_OUT2EN) &&
+ (cfgr & STM32_RTC_CFGR_OUT2_RMP) &&
+ ((cr & calib) || (cr & tampalrm)))
+ return -EBUSY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (clk_get_rate(rtc->rtc_ck) != 32768)
+ return -ERANGE;
+
+ return 0;
+}
+
+static int stm32_rtc_pinmux_action_lsco(struct pinctrl_dev *pctldev, unsigned int pin)
+{
+ struct stm32_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ struct stm32_rtc_registers regs = rtc->data->regs;
+ struct device *dev = rtc->rtc_dev->dev.parent;
+ u8 lscoen;
+ int ret;
+
+ if (!rtc->data->has_lsco)
+ return -EPERM;
+
+ ret = stm32_rtc_pinmux_lsco_available(pctldev, pin);
+ if (ret)
+ return ret;
+
+ lscoen = (pin == OUT1) ? STM32_RTC_CFGR_LSCOEN_OUT1 : STM32_RTC_CFGR_LSCOEN_OUT2_RMP;
+
+ rtc->clk_lsco = clk_register_gate(dev, "rtc_lsco", __clk_get_name(rtc->rtc_ck),
+ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL,
+ rtc->base + regs.cfgr, lscoen, 0, NULL);
+ if (IS_ERR(rtc->clk_lsco))
+ return PTR_ERR(rtc->clk_lsco);
+
+ of_clk_add_provider(dev->of_node, of_clk_src_simple_get, rtc->clk_lsco);
+
+ return 0;
+}
+
+static const struct stm32_rtc_pinmux_func stm32_rtc_pinmux_functions[] = {
+ STM32_RTC_PINMUX("lsco", &stm32_rtc_pinmux_action_lsco, "out1", "out2_rmp"),
+ STM32_RTC_PINMUX("alarm-a", &stm32_rtc_pinmux_action_alarm, "out1", "out2", "out2_rmp"),
+};
+
+static int stm32_rtc_pinmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(stm32_rtc_pinmux_functions);
+}
+
+static const char *stm32_rtc_pinmux_get_fname(struct pinctrl_dev *pctldev, unsigned int selector)
+{
+ return stm32_rtc_pinmux_functions[selector].name;
+}
+
+static int stm32_rtc_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned int selector,
+ const char * const **groups, unsigned int * const num_groups)
+{
+ *groups = stm32_rtc_pinmux_functions[selector].groups;
+ *num_groups = stm32_rtc_pinmux_functions[selector].num_groups;
+ return 0;
+}
+
+static int stm32_rtc_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
+ unsigned int group)
+{
+ struct stm32_rtc_pinmux_func selected_func = stm32_rtc_pinmux_functions[selector];
+ struct pinctrl_pin_desc pin = stm32_rtc_pinctrl_pins[group];
+
+ /* Call action */
+ if (selected_func.action)
+ return selected_func.action(pctldev, pin.number);
+
+ return -EINVAL;
+}
+
+static const struct pinmux_ops stm32_rtc_pinmux_ops = {
+ .get_functions_count = stm32_rtc_pinmux_get_functions_count,
+ .get_function_name = stm32_rtc_pinmux_get_fname,
+ .get_function_groups = stm32_rtc_pinmux_get_groups,
+ .set_mux = stm32_rtc_pinmux_set_mux,
+ .strict = true,
+};
+
+static struct pinctrl_desc stm32_rtc_pdesc = {
+ .name = DRIVER_NAME,
+ .pins = stm32_rtc_pinctrl_pins,
+ .npins = ARRAY_SIZE(stm32_rtc_pinctrl_pins),
+ .owner = THIS_MODULE,
+ .pctlops = &stm32_rtc_pinctrl_ops,
+ .pmxops = &stm32_rtc_pinmux_ops,
+};
+
static int stm32_rtc_enter_init_mode(struct stm32_rtc *rtc)
{
const struct stm32_rtc_registers *regs = &rtc->data->regs;
@@ -576,6 +807,8 @@ static const struct stm32_rtc_data stm32_rtc_data = {
.need_dbp = true,
.need_accuracy = false,
.rif_protected = false,
+ .has_lsco = false,
+ .has_alarm_out = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -586,6 +819,7 @@ static const struct stm32_rtc_data stm32_rtc_data = {
.wpr = 0x24,
.sr = 0x0C, /* set to ISR offset to ease alarm management */
.scr = UNDEF_REG,
+ .cfgr = UNDEF_REG,
.verr = UNDEF_REG,
},
.events = {
@@ -599,6 +833,8 @@ static const struct stm32_rtc_data stm32h7_rtc_data = {
.need_dbp = true,
.need_accuracy = false,
.rif_protected = false,
+ .has_lsco = false,
+ .has_alarm_out = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -609,6 +845,7 @@ static const struct stm32_rtc_data stm32h7_rtc_data = {
.wpr = 0x24,
.sr = 0x0C, /* set to ISR offset to ease alarm management */
.scr = UNDEF_REG,
+ .cfgr = UNDEF_REG,
.verr = UNDEF_REG,
},
.events = {
@@ -631,6 +868,8 @@ static const struct stm32_rtc_data stm32mp1_data = {
.need_dbp = false,
.need_accuracy = true,
.rif_protected = false,
+ .has_lsco = true,
+ .has_alarm_out = true,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -641,6 +880,7 @@ static const struct stm32_rtc_data stm32mp1_data = {
.wpr = 0x24,
.sr = 0x50,
.scr = 0x5C,
+ .cfgr = 0x60,
.verr = 0x3F4,
},
.events = {
@@ -654,6 +894,8 @@ static const struct stm32_rtc_data stm32mp25_data = {
.need_dbp = false,
.need_accuracy = true,
.rif_protected = true,
+ .has_lsco = true,
+ .has_alarm_out = true,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -664,6 +906,7 @@ static const struct stm32_rtc_data stm32mp25_data = {
.wpr = 0x24,
.sr = 0x50,
.scr = 0x5C,
+ .cfgr = 0x60,
.verr = 0x3F4,
},
.events = {
@@ -681,6 +924,30 @@ static const struct of_device_id stm32_rtc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, stm32_rtc_of_match);
+static void stm32_rtc_clean_outs(struct stm32_rtc *rtc)
+{
+ struct stm32_rtc_registers regs = rtc->data->regs;
+ unsigned int cr = readl_relaxed(rtc->base + regs.cr);
+
+ cr &= ~STM32_RTC_CR_OSEL;
+ cr &= ~STM32_RTC_CR_TAMPOE;
+ cr &= ~STM32_RTC_CR_COE;
+ cr &= ~STM32_RTC_CR_TAMPALRM_TYPE;
+ cr &= ~STM32_RTC_CR_OUT2EN;
+
+ stm32_rtc_wpr_unlock(rtc);
+ writel_relaxed(cr, rtc->base + regs.cr);
+ stm32_rtc_wpr_lock(rtc);
+
+ if (regs.cfgr != UNDEF_REG) {
+ unsigned int cfgr = readl_relaxed(rtc->base + regs.cfgr);
+
+ cfgr &= ~STM32_RTC_CFGR_LSCOEN;
+ cfgr &= ~STM32_RTC_CFGR_OUT2_RMP;
+ writel_relaxed(cfgr, rtc->base + regs.cfgr);
+ }
+}
+
static int stm32_rtc_check_rif(struct stm32_rtc *stm32_rtc,
struct stm32_rtc_rif_resource res)
{
@@ -791,6 +1058,7 @@ static int stm32_rtc_probe(struct platform_device *pdev)
{
struct stm32_rtc *rtc;
const struct stm32_rtc_registers *regs;
+ struct pinctrl_dev *pctl;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
@@ -912,6 +1180,16 @@ static int stm32_rtc_probe(struct platform_device *pdev)
goto err;
}
+ stm32_rtc_clean_outs(rtc);
+
+ ret = devm_pinctrl_register_and_init(&pdev->dev, &stm32_rtc_pdesc, rtc, &pctl);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "pinctrl register failed");
+
+ ret = pinctrl_enable(pctl);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "pinctrl enable failed");
+
/*
* If INITS flag is reset (calendar year field set to 0x00), calendar
* must be initialized
@@ -950,6 +1228,9 @@ static void stm32_rtc_remove(struct platform_device *pdev)
const struct stm32_rtc_registers *regs = &rtc->data->regs;
unsigned int cr;
+ if (!IS_ERR_OR_NULL(rtc->clk_lsco))
+ clk_unregister_gate(rtc->clk_lsco);
+
/* Disable interrupts */
stm32_rtc_wpr_unlock(rtc);
cr = readl_relaxed(rtc->base + regs->cr);
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 8e0c66906103..e681c1745866 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -402,6 +402,7 @@ CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc",
static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = {
.rc_osc_rate = 32000,
.has_out_clk = 1,
+ .has_auto_swt = 1,
};
static void __init sun8i_v3_rtc_clk_init(struct device_node *node)
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 2cfacdd37e09..4e24c12004f1 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -591,8 +591,8 @@ static int twl_rtc_probe(struct platform_device *pdev)
memset(&nvmem_cfg, 0, sizeof(nvmem_cfg));
nvmem_cfg.name = "twl-secured-";
nvmem_cfg.type = NVMEM_TYPE_BATTERY_BACKED;
- nvmem_cfg.reg_read = twl_nvram_read,
- nvmem_cfg.reg_write = twl_nvram_write,
+ nvmem_cfg.reg_read = twl_nvram_read;
+ nvmem_cfg.reg_write = twl_nvram_write;
nvmem_cfg.word_size = 1;
nvmem_cfg.stride = 1;
if (twl_class_is_4030()) {
diff --git a/drivers/s390/char/hmcdrv_dev.c b/drivers/s390/char/hmcdrv_dev.c
index 8d50c894711f..e069dd685899 100644
--- a/drivers/s390/char/hmcdrv_dev.c
+++ b/drivers/s390/char/hmcdrv_dev.c
@@ -186,9 +186,6 @@ static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence)
if (pos < 0)
return -EINVAL;
- if (fp->f_pos != pos)
- ++fp->f_version;
-
fp->f_pos = pos;
return pos;
}
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 07df04af82f2..29156455970e 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -44,6 +44,7 @@ static void __init sclp_early_facilities_detect(void)
sclp.has_ibs = !!(sccb->fac117 & 0x20);
sclp.has_gisaf = !!(sccb->fac118 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
+ sclp.has_wti = !!(sccb->fac119 & 0x40);
sclp.has_kss = !!(sccb->fac98 & 0x01);
sclp.has_aisii = !!(sccb->fac118 & 0x40);
sclp.has_aeni = !!(sccb->fac118 & 0x20);
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index bd94811fd9f1..c88b6e071847 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -13,10 +13,22 @@ obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_cex4.o
-# pkey kernel module
-pkey-objs := pkey_api.o
+# pkey base and api module
+pkey-objs := pkey_base.o pkey_api.o pkey_sysfs.o
obj-$(CONFIG_PKEY) += pkey.o
+# pkey cca handler module
+pkey-cca-objs := pkey_cca.o
+obj-$(CONFIG_PKEY_CCA) += pkey-cca.o
+
+# pkey ep11 handler module
+pkey-ep11-objs := pkey_ep11.o
+obj-$(CONFIG_PKEY_EP11) += pkey-ep11.o
+
+# pkey pckmo handler module
+pkey-pckmo-objs := pkey_pckmo.o
+obj-$(CONFIG_PKEY_PCKMO) += pkey-pckmo.o
+
# adjunct processor matrix
vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o
obj-$(CONFIG_VFIO_AP) += vfio_ap.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f9f682f19415..60cea6c24349 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -107,6 +107,7 @@ debug_info_t *ap_dbf_info;
static bool ap_scan_bus(void);
static bool ap_scan_bus_result; /* result of last ap_scan_bus() */
static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */
+static struct task_struct *ap_scan_bus_task; /* thread holding the scan mutex */
static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */
static int ap_scan_bus_time = AP_CONFIG_TIME;
static struct timer_list ap_scan_bus_timer;
@@ -733,7 +734,7 @@ static void ap_check_bindings_complete(void)
if (!completion_done(&ap_apqn_bindings_complete)) {
complete_all(&ap_apqn_bindings_complete);
ap_send_bindings_complete_uevent();
- pr_debug("%s all apqn bindings complete\n", __func__);
+ pr_debug("all apqn bindings complete\n");
}
}
}
@@ -768,7 +769,7 @@ int ap_wait_apqn_bindings_complete(unsigned long timeout)
else if (l == 0 && timeout)
rc = -ETIME;
- pr_debug("%s rc=%d\n", __func__, rc);
+ pr_debug("rc=%d\n", rc);
return rc;
}
EXPORT_SYMBOL(ap_wait_apqn_bindings_complete);
@@ -795,8 +796,7 @@ static int __ap_revise_reserved(struct device *dev, void *dummy)
drvres = to_ap_drv(dev->driver)->flags
& AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres) {
- pr_debug("%s reprobing queue=%02x.%04x\n",
- __func__, card, queue);
+ pr_debug("reprobing queue=%02x.%04x\n", card, queue);
rc = device_reprobe(dev);
if (rc)
AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
@@ -1000,17 +1000,31 @@ bool ap_bus_force_rescan(void)
unsigned long scan_counter = atomic64_read(&ap_scan_bus_count);
bool rc = false;
- pr_debug(">%s scan counter=%lu\n", __func__, scan_counter);
+ pr_debug("> scan counter=%lu\n", scan_counter);
/* Only trigger AP bus scans after the initial scan is done */
if (scan_counter <= 0)
goto out;
+ /*
+ * There is one unlikely but nevertheless valid scenario where the
+ * thread holding the mutex may try to send some crypto load but
+ * all cards are offline so a rescan is triggered which causes
+ * a recursive call of ap_bus_force_rescan(). A simple return if
+ * the mutex is already locked by this thread solves this.
+ */
+ if (mutex_is_locked(&ap_scan_bus_mutex)) {
+ if (ap_scan_bus_task == current)
+ goto out;
+ }
+
/* Try to acquire the AP scan bus mutex */
if (mutex_trylock(&ap_scan_bus_mutex)) {
/* mutex acquired, run the AP bus scan */
+ ap_scan_bus_task = current;
ap_scan_bus_result = ap_scan_bus();
rc = ap_scan_bus_result;
+ ap_scan_bus_task = NULL;
mutex_unlock(&ap_scan_bus_mutex);
goto out;
}
@@ -1029,7 +1043,7 @@ bool ap_bus_force_rescan(void)
mutex_unlock(&ap_scan_bus_mutex);
out:
- pr_debug("%s rc=%d\n", __func__, rc);
+ pr_debug("rc=%d\n", rc);
return rc;
}
EXPORT_SYMBOL(ap_bus_force_rescan);
@@ -1043,7 +1057,7 @@ static int ap_bus_cfg_chg(struct notifier_block *nb,
if (action != CHSC_NOTIFY_AP_CFG)
return NOTIFY_DONE;
- pr_debug("%s config change, forcing bus rescan\n", __func__);
+ pr_debug("config change, forcing bus rescan\n");
ap_bus_force_rescan();
@@ -1900,8 +1914,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
}
spin_unlock_bh(&aq->lock);
- pr_debug("%s(%d,%d) queue dev checkstop on\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev checkstop on\n",
+ ac->id, dom);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
goto put_dev_and_continue;
@@ -1911,8 +1925,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- pr_debug("%s(%d,%d) queue dev checkstop off\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev checkstop off\n",
+ ac->id, dom);
goto put_dev_and_continue;
}
/* config state change */
@@ -1924,8 +1938,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
}
spin_unlock_bh(&aq->lock);
- pr_debug("%s(%d,%d) queue dev config off\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev config off\n",
+ ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
@@ -1936,8 +1950,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- pr_debug("%s(%d,%d) queue dev config on\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev config on\n",
+ ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
goto put_dev_and_continue;
}
@@ -2009,8 +2023,8 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- pr_debug("%s(%d) no type info (no APQN found), ignored\n",
- __func__, ap);
+ pr_debug("(%d) no type info (no APQN found), ignored\n",
+ ap);
}
return;
}
@@ -2022,8 +2036,7 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- pr_debug("%s(%d) no valid type (0) info, ignored\n",
- __func__, ap);
+ pr_debug("(%d) no valid type (0) info, ignored\n", ap);
}
return;
}
@@ -2202,7 +2215,7 @@ static bool ap_scan_bus(void)
bool config_changed;
int ap;
- pr_debug(">%s\n", __func__);
+ pr_debug(">\n");
/* (re-)fetch configuration via QCI */
config_changed = ap_get_configuration();
@@ -2243,7 +2256,7 @@ static bool ap_scan_bus(void)
}
if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
- pr_debug("%s init scan complete\n", __func__);
+ pr_debug("init scan complete\n");
ap_send_init_scan_done_uevent();
}
@@ -2251,7 +2264,7 @@ static bool ap_scan_bus(void)
mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
- pr_debug("<%s config_changed=%d\n", __func__, config_changed);
+ pr_debug("< config_changed=%d\n", config_changed);
return config_changed;
}
@@ -2284,7 +2297,9 @@ static void ap_scan_bus_wq_callback(struct work_struct *unused)
* system_long_wq which invokes this function here again.
*/
if (mutex_trylock(&ap_scan_bus_mutex)) {
+ ap_scan_bus_task = current;
ap_scan_bus_result = ap_scan_bus();
+ ap_scan_bus_task = NULL;
mutex_unlock(&ap_scan_bus_mutex);
}
}
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 1f647ffd6f4d..8c878c5aa31f 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -171,8 +171,8 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
aq->queue_count = 0;
list_splice_init(&aq->pendingq, &aq->requestq);
aq->requestq_count += aq->pendingq_count;
- pr_debug("%s queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
- __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
+ pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
aq->pendingq_count, aq->requestq_count);
aq->pendingq_count = 0;
break;
@@ -453,8 +453,8 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
case AP_BS_Q_USABLE:
/* association is through */
aq->sm_state = AP_SM_STATE_IDLE;
- pr_debug("%s queue 0x%02x.%04x associated with %u\n",
- __func__, AP_QID_CARD(aq->qid),
+ pr_debug("queue 0x%02x.%04x associated with %u\n",
+ AP_QID_CARD(aq->qid),
AP_QID_QUEUE(aq->qid), aq->assoc_idx);
return AP_SM_WAIT_NONE;
case AP_BS_Q_USABLE_NO_SECURE_KEY:
@@ -697,8 +697,8 @@ static ssize_t ap_functions_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
+ pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
+ status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
@@ -853,8 +853,8 @@ static ssize_t se_bind_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
+ pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
+ status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
@@ -981,8 +981,8 @@ static ssize_t se_associate_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
+ pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
+ status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index ffc0b5db55c2..c20251e00cf9 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -10,1338 +10,698 @@
#define KMSG_COMPONENT "pkey"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/fs.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
-#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/kallsyms.h>
-#include <linux/debugfs.h>
-#include <linux/random.h>
-#include <linux/cpufeature.h>
-#include <asm/zcrypt.h>
-#include <asm/cpacf.h>
-#include <asm/pkey.h>
-#include <crypto/aes.h>
#include "zcrypt_api.h"
#include "zcrypt_ccamisc.h"
-#include "zcrypt_ep11misc.h"
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("s390 protected key interface");
-
-#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
-#define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header))
-#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */
-#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
-#define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */
+#include "pkey_base.h"
/*
- * debug feature data and functions
+ * Helper functions
*/
-
-static debug_info_t *pkey_dbf_info;
-
-#define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__)
-#define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__)
-#define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__)
-
-static void __init pkey_debug_init(void)
+static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, size_t keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
- /* 5 arguments per dbf entry (including the format string ptr) */
- pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
- debug_register_view(pkey_dbf_info, &debug_sprintf_view);
- debug_set_level(pkey_dbf_info, 3);
-}
-
-static void __exit pkey_debug_exit(void)
-{
- debug_unregister(pkey_dbf_info);
-}
+ int rc;
-/* inside view of a protected key token (only type 0x00 version 0x01) */
-struct protaeskeytoken {
- u8 type; /* 0x00 for PAES specific key tokens */
- u8 res0[3];
- u8 version; /* should be 0x01 for protected AES key token */
- u8 res1[3];
- u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
- u32 len; /* bytes actually stored in protkey[] */
- u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
-} __packed;
-
-/* inside view of a clear key token (type 0x00 version 0x02) */
-struct clearkeytoken {
- u8 type; /* 0x00 for PAES specific key tokens */
- u8 res0[3];
- u8 version; /* 0x02 for clear key token */
- u8 res1[3];
- u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */
- u32 len; /* bytes actually stored in clearkey[] */
- u8 clearkey[]; /* clear key value */
-} __packed;
-
-/* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */
-static inline u32 pkey_keytype_aes_to_size(u32 keytype)
-{
- switch (keytype) {
- case PKEY_KEYTYPE_AES_128:
- return 16;
- case PKEY_KEYTYPE_AES_192:
- return 24;
- case PKEY_KEYTYPE_AES_256:
- return 32;
- default:
- return 0;
+ /* try the direct way */
+ rc = pkey_handler_key_to_protkey(apqns, nr_apqns,
+ key, keylen,
+ protkey, protkeylen,
+ protkeytype);
+
+ /* if this did not work, try the slowpath way */
+ if (rc == -ENODEV) {
+ rc = pkey_handler_slowpath_key_to_protkey(apqns, nr_apqns,
+ key, keylen,
+ protkey, protkeylen,
+ protkeytype);
+ if (rc)
+ rc = -ENODEV;
}
+
+ pr_debug("rc=%d\n", rc);
+ return rc;
}
/*
- * Create a protected key from a clear key value via PCKMO instruction.
+ * In-Kernel function: Transform a key blob (of any type) into a protected key
*/
-static int pkey_clr2protkey(u32 keytype, const u8 *clrkey,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+int pkey_key2protkey(const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
- /* mask of available pckmo subfunctions */
- static cpacf_mask_t pckmo_functions;
-
- u8 paramblock[112];
- u32 pkeytype;
- int keysize;
- long fc;
-
- switch (keytype) {
- case PKEY_KEYTYPE_AES_128:
- /* 16 byte key, 32 byte aes wkvp, total 48 bytes */
- keysize = 16;
- pkeytype = keytype;
- fc = CPACF_PCKMO_ENC_AES_128_KEY;
- break;
- case PKEY_KEYTYPE_AES_192:
- /* 24 byte key, 32 byte aes wkvp, total 56 bytes */
- keysize = 24;
- pkeytype = keytype;
- fc = CPACF_PCKMO_ENC_AES_192_KEY;
- break;
- case PKEY_KEYTYPE_AES_256:
- /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
- keysize = 32;
- pkeytype = keytype;
- fc = CPACF_PCKMO_ENC_AES_256_KEY;
- break;
- case PKEY_KEYTYPE_ECC_P256:
- /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
- keysize = 32;
- pkeytype = PKEY_KEYTYPE_ECC;
- fc = CPACF_PCKMO_ENC_ECC_P256_KEY;
- break;
- case PKEY_KEYTYPE_ECC_P384:
- /* 48 byte key, 32 byte aes wkvp, total 80 bytes */
- keysize = 48;
- pkeytype = PKEY_KEYTYPE_ECC;
- fc = CPACF_PCKMO_ENC_ECC_P384_KEY;
- break;
- case PKEY_KEYTYPE_ECC_P521:
- /* 80 byte key, 32 byte aes wkvp, total 112 bytes */
- keysize = 80;
- pkeytype = PKEY_KEYTYPE_ECC;
- fc = CPACF_PCKMO_ENC_ECC_P521_KEY;
- break;
- case PKEY_KEYTYPE_ECC_ED25519:
- /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
- keysize = 32;
- pkeytype = PKEY_KEYTYPE_ECC;
- fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY;
- break;
- case PKEY_KEYTYPE_ECC_ED448:
- /* 64 byte key, 32 byte aes wkvp, total 96 bytes */
- keysize = 64;
- pkeytype = PKEY_KEYTYPE_ECC;
- fc = CPACF_PCKMO_ENC_ECC_ED448_KEY;
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
- __func__, keytype);
- return -EINVAL;
- }
-
- if (*protkeylen < keysize + AES_WK_VP_SIZE) {
- PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n",
- __func__, *protkeylen, keysize + AES_WK_VP_SIZE);
- return -EINVAL;
- }
+ int rc;
- /* Did we already check for PCKMO ? */
- if (!pckmo_functions.bytes[0]) {
- /* no, so check now */
- if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
- return -ENODEV;
- }
- /* check for the pckmo subfunction we need now */
- if (!cpacf_test_func(&pckmo_functions, fc)) {
- PKEY_DBF_ERR("%s pckmo functions not available\n", __func__);
- return -ENODEV;
+ rc = key2protkey(NULL, 0, key, keylen,
+ protkey, protkeylen, protkeytype);
+ if (rc == -ENODEV) {
+ pkey_handler_request_modules();
+ rc = key2protkey(NULL, 0, key, keylen,
+ protkey, protkeylen, protkeytype);
}
- /* prepare param block */
- memset(paramblock, 0, sizeof(paramblock));
- memcpy(paramblock, clrkey, keysize);
-
- /* call the pckmo instruction */
- cpacf_pckmo(fc, paramblock);
-
- /* copy created protected key to key buffer including the wkvp block */
- *protkeylen = keysize + AES_WK_VP_SIZE;
- memcpy(protkey, paramblock, *protkeylen);
- *protkeytype = pkeytype;
-
- return 0;
+ return rc;
}
+EXPORT_SYMBOL(pkey_key2protkey);
/*
- * Find card and transform secure key into protected key.
+ * Ioctl functions
*/
-static int pkey_skey2pkey(const u8 *key, u8 *protkey,
- u32 *protkeylen, u32 *protkeytype)
-{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
- u16 cardnr, domain;
- int rc, verify;
-
- zcrypt_wait_api_operational();
-
- /*
- * The cca_xxx2protkey call may fail when a card has been
- * addressed where the master key was changed after last fetch
- * of the mkvp into the cache. Try 3 times: First without verify
- * then with verify and last round with verify and old master
- * key verification pattern match not ignored.
- */
- for (verify = 0; verify < 3; verify++) {
- rc = cca_findcard(key, &cardnr, &domain, verify);
- if (rc < 0)
- continue;
- if (rc > 0 && verify < 2)
- continue;
- switch (hdr->version) {
- case TOKVER_CCA_AES:
- rc = cca_sec2protkey(cardnr, domain, key,
- protkey, protkeylen, protkeytype);
- break;
- case TOKVER_CCA_VLSC:
- rc = cca_cipher2protkey(cardnr, domain, key,
- protkey, protkeylen,
- protkeytype);
- break;
- default:
- return -EINVAL;
- }
- if (rc == 0)
- break;
- }
- if (rc)
- pr_debug("%s failed rc=%d\n", __func__, rc);
+static void *_copy_key_from_user(void __user *ukey, size_t keylen)
+{
+ if (!ukey || keylen < MINKEYBLOBBUFSIZE || keylen > KEYBLOBBUFSIZE)
+ return ERR_PTR(-EINVAL);
- return rc;
+ return memdup_user(ukey, keylen);
}
-/*
- * Construct EP11 key with given clear key value.
- */
-static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
- u8 *keybuf, size_t *keybuflen)
+static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
{
- u32 nr_apqns, *apqns = NULL;
- u16 card, dom;
- int i, rc;
-
- zcrypt_wait_api_operational();
-
- /* build a list of apqns suitable for ep11 keys with cpacf support */
- rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7,
- ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
- NULL);
- if (rc)
- goto out;
-
- /* go through the list of apqns and try to bild an ep11 key */
- for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
- card = apqns[i] >> 16;
- dom = apqns[i] & 0xFFFF;
- rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
- 0, clrkey, keybuf, keybuflen,
- PKEY_TYPE_EP11);
- if (rc == 0)
- break;
- }
+ if (!uapqns || nr_apqns == 0)
+ return NULL;
-out:
- kfree(apqns);
- if (rc)
- pr_debug("%s failed rc=%d\n", __func__, rc);
- return rc;
+ return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
}
-/*
- * Find card and transform EP11 secure key into protected key.
- */
-static int pkey_ep11key2pkey(const u8 *key, size_t keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_genseck(struct pkey_genseck __user *ugs)
{
- u32 nr_apqns, *apqns = NULL;
- int i, j, rc = -ENODEV;
- u16 card, dom;
+ struct pkey_genseck kgs;
+ struct pkey_apqn apqn;
+ u32 keybuflen;
+ int rc;
- zcrypt_wait_api_operational();
+ if (copy_from_user(&kgs, ugs, sizeof(kgs)))
+ return -EFAULT;
- /* try two times in case of failure */
- for (i = 0; i < 2 && rc; i++) {
+ apqn.card = kgs.cardnr;
+ apqn.domain = kgs.domain;
+ keybuflen = sizeof(kgs.seckey.seckey);
+ rc = pkey_handler_gen_key(&apqn, 1,
+ kgs.keytype, PKEY_TYPE_CCA_DATA, 0, 0,
+ kgs.seckey.seckey, &keybuflen, NULL);
+ pr_debug("gen_key()=%d\n", rc);
+ if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs)))
+ rc = -EFAULT;
+ memzero_explicit(&kgs, sizeof(kgs));
- /* build a list of apqns suitable for this key */
- rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7,
- ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
- ep11_kb_wkvp(key, keylen));
- if (rc)
- continue; /* retry findcard on failure */
-
- /* go through the list of apqns and try to derive an pkey */
- for (rc = -ENODEV, j = 0; j < nr_apqns && rc; j++) {
- card = apqns[j] >> 16;
- dom = apqns[j] & 0xFFFF;
- rc = ep11_kblob2protkey(card, dom, key, keylen,
- protkey, protkeylen, protkeytype);
- }
+ return rc;
+}
- kfree(apqns);
- }
+static int pkey_ioctl_clr2seck(struct pkey_clr2seck __user *ucs)
+{
+ struct pkey_clr2seck kcs;
+ struct pkey_apqn apqn;
+ u32 keybuflen;
+ int rc;
- if (rc)
- pr_debug("%s failed rc=%d\n", __func__, rc);
+ if (copy_from_user(&kcs, ucs, sizeof(kcs)))
+ return -EFAULT;
+
+ apqn.card = kcs.cardnr;
+ apqn.domain = kcs.domain;
+ keybuflen = sizeof(kcs.seckey.seckey);
+ rc = pkey_handler_clr_to_key(&apqn, 1,
+ kcs.keytype, PKEY_TYPE_CCA_DATA, 0, 0,
+ kcs.clrkey.clrkey,
+ pkey_keytype_aes_to_size(kcs.keytype),
+ kcs.seckey.seckey, &keybuflen, NULL);
+ pr_debug("clr_to_key()=%d\n", rc);
+ if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs)))
+ rc = -EFAULT;
+ memzero_explicit(&kcs, sizeof(kcs));
return rc;
}
-/*
- * Verify key and give back some info about the key.
- */
-static int pkey_verifykey(const struct pkey_seckey *seckey,
- u16 *pcardnr, u16 *pdomain,
- u16 *pkeysize, u32 *pattributes)
+static int pkey_ioctl_sec2protk(struct pkey_sec2protk __user *usp)
{
- struct secaeskeytoken *t = (struct secaeskeytoken *)seckey;
- u16 cardnr, domain;
+ struct pkey_sec2protk ksp;
+ struct pkey_apqn apqn;
int rc;
- /* check the secure key for valid AES secure key */
- rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, (u8 *)seckey, 0);
- if (rc)
- goto out;
- if (pattributes)
- *pattributes = PKEY_VERIFY_ATTR_AES;
- if (pkeysize)
- *pkeysize = t->bitsize;
-
- /* try to find a card which can handle this key */
- rc = cca_findcard(seckey->seckey, &cardnr, &domain, 1);
- if (rc < 0)
- goto out;
-
- if (rc > 0) {
- /* key mkvp matches to old master key mkvp */
- pr_debug("%s secure key has old mkvp\n", __func__);
- if (pattributes)
- *pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
- rc = 0;
- }
+ if (copy_from_user(&ksp, usp, sizeof(ksp)))
+ return -EFAULT;
+
+ apqn.card = ksp.cardnr;
+ apqn.domain = ksp.domain;
+ ksp.protkey.len = sizeof(ksp.protkey.protkey);
+ rc = pkey_handler_key_to_protkey(&apqn, 1,
+ ksp.seckey.seckey,
+ sizeof(ksp.seckey.seckey),
+ ksp.protkey.protkey,
+ &ksp.protkey.len, &ksp.protkey.type);
+ pr_debug("key_to_protkey()=%d\n", rc);
+ if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
+ rc = -EFAULT;
+ memzero_explicit(&ksp, sizeof(ksp));
- if (pcardnr)
- *pcardnr = cardnr;
- if (pdomain)
- *pdomain = domain;
-
-out:
- pr_debug("%s rc=%d\n", __func__, rc);
return rc;
}
-/*
- * Generate a random protected key
- */
-static int pkey_genprotkey(u32 keytype, u8 *protkey,
- u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_clr2protk(struct pkey_clr2protk __user *ucp)
{
- u8 clrkey[32];
- int keysize;
+ struct pkey_clr2protk kcp;
+ struct clearkeytoken *t;
+ u32 keylen;
+ u8 *tmpbuf;
int rc;
- keysize = pkey_keytype_aes_to_size(keytype);
- if (!keysize) {
- PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", __func__,
- keytype);
+ if (copy_from_user(&kcp, ucp, sizeof(kcp)))
+ return -EFAULT;
+
+ /* build a 'clear key token' from the clear key value */
+ keylen = pkey_keytype_aes_to_size(kcp.keytype);
+ if (!keylen) {
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, kcp.keytype);
+ memzero_explicit(&kcp, sizeof(kcp));
return -EINVAL;
}
+ tmpbuf = kzalloc(sizeof(*t) + keylen, GFP_KERNEL);
+ if (!tmpbuf) {
+ memzero_explicit(&kcp, sizeof(kcp));
+ return -ENOMEM;
+ }
+ t = (struct clearkeytoken *)tmpbuf;
+ t->type = TOKTYPE_NON_CCA;
+ t->version = TOKVER_CLEAR_KEY;
+ t->keytype = (keylen - 8) >> 3;
+ t->len = keylen;
+ memcpy(t->clearkey, kcp.clrkey.clrkey, keylen);
+ kcp.protkey.len = sizeof(kcp.protkey.protkey);
- /* generate a dummy random clear key */
- get_random_bytes(clrkey, keysize);
+ rc = key2protkey(NULL, 0,
+ tmpbuf, sizeof(*t) + keylen,
+ kcp.protkey.protkey,
+ &kcp.protkey.len, &kcp.protkey.type);
+ pr_debug("key2protkey()=%d\n", rc);
- /* convert it to a dummy protected key */
- rc = pkey_clr2protkey(keytype, clrkey,
- protkey, protkeylen, protkeytype);
- if (rc)
- return rc;
+ kfree_sensitive(tmpbuf);
- /* replace the key part of the protected key with random bytes */
- get_random_bytes(protkey, keysize);
+ if (!rc && copy_to_user(ucp, &kcp, sizeof(kcp)))
+ rc = -EFAULT;
+ memzero_explicit(&kcp, sizeof(kcp));
- return 0;
+ return rc;
}
-/*
- * Verify if a protected key is still valid
- */
-static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen,
- u32 protkeytype)
+static int pkey_ioctl_findcard(struct pkey_findcard __user *ufc)
{
- struct {
- u8 iv[AES_BLOCK_SIZE];
- u8 key[MAXPROTKEYSIZE];
- } param;
- u8 null_msg[AES_BLOCK_SIZE];
- u8 dest_buf[AES_BLOCK_SIZE];
- unsigned int k, pkeylen;
- unsigned long fc;
-
- switch (protkeytype) {
- case PKEY_KEYTYPE_AES_128:
- pkeylen = 16 + AES_WK_VP_SIZE;
- fc = CPACF_KMC_PAES_128;
- break;
- case PKEY_KEYTYPE_AES_192:
- pkeylen = 24 + AES_WK_VP_SIZE;
- fc = CPACF_KMC_PAES_192;
- break;
- case PKEY_KEYTYPE_AES_256:
- pkeylen = 32 + AES_WK_VP_SIZE;
- fc = CPACF_KMC_PAES_256;
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__,
- protkeytype);
- return -EINVAL;
- }
- if (protkeylen != pkeylen) {
- PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n",
- __func__, protkeylen, protkeytype);
- return -EINVAL;
- }
+ struct pkey_findcard kfc;
+ struct pkey_apqn *apqns;
+ size_t nr_apqns;
+ int rc;
- memset(null_msg, 0, sizeof(null_msg));
+ if (copy_from_user(&kfc, ufc, sizeof(kfc)))
+ return -EFAULT;
- memset(param.iv, 0, sizeof(param.iv));
- memcpy(param.key, protkey, protkeylen);
+ nr_apqns = MAXAPQNSINLIST;
+ apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
- k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
- sizeof(null_msg));
- if (k != sizeof(null_msg)) {
- PKEY_DBF_ERR("%s protected key is not valid\n", __func__);
- return -EKEYREJECTED;
+ rc = pkey_handler_apqns_for_key(kfc.seckey.seckey,
+ sizeof(kfc.seckey.seckey),
+ PKEY_FLAGS_MATCH_CUR_MKVP,
+ apqns, &nr_apqns);
+ if (rc == -ENODEV)
+ rc = pkey_handler_apqns_for_key(kfc.seckey.seckey,
+ sizeof(kfc.seckey.seckey),
+ PKEY_FLAGS_MATCH_ALT_MKVP,
+ apqns, &nr_apqns);
+ pr_debug("apqns_for_key()=%d\n", rc);
+ if (rc) {
+ kfree(apqns);
+ return rc;
}
+ kfc.cardnr = apqns[0].card;
+ kfc.domain = apqns[0].domain;
+ kfree(apqns);
+ if (copy_to_user(ufc, &kfc, sizeof(kfc)))
+ return -EFAULT;
return 0;
}
-/* Helper for pkey_nonccatok2pkey, handles aes clear key token */
-static int nonccatokaes2pkey(const struct clearkeytoken *t,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_skey2pkey(struct pkey_skey2pkey __user *usp)
{
- size_t tmpbuflen = max_t(size_t, SECKEYBLOBSIZE, MAXEP11AESKEYBLOBSIZE);
- u8 *tmpbuf = NULL;
- u32 keysize;
+ struct pkey_skey2pkey ksp;
int rc;
- keysize = pkey_keytype_aes_to_size(t->keytype);
- if (!keysize) {
- PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
- __func__, t->keytype);
- return -EINVAL;
- }
- if (t->len != keysize) {
- PKEY_DBF_ERR("%s non clear key aes token: invalid key len %u\n",
- __func__, t->len);
- return -EINVAL;
- }
-
- /* try direct way with the PCKMO instruction */
- rc = pkey_clr2protkey(t->keytype, t->clearkey,
- protkey, protkeylen, protkeytype);
- if (!rc)
- goto out;
+ if (copy_from_user(&ksp, usp, sizeof(ksp)))
+ return -EFAULT;
+
+ ksp.protkey.len = sizeof(ksp.protkey.protkey);
+ rc = pkey_handler_key_to_protkey(NULL, 0,
+ ksp.seckey.seckey,
+ sizeof(ksp.seckey.seckey),
+ ksp.protkey.protkey,
+ &ksp.protkey.len,
+ &ksp.protkey.type);
+ pr_debug("key_to_protkey()=%d\n", rc);
+ if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
+ rc = -EFAULT;
+ memzero_explicit(&ksp, sizeof(ksp));
- /* PCKMO failed, so try the CCA secure key way */
- tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC);
- if (!tmpbuf)
- return -ENOMEM;
- zcrypt_wait_api_operational();
- rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype, t->clearkey, tmpbuf);
- if (rc)
- goto try_via_ep11;
- rc = pkey_skey2pkey(tmpbuf,
- protkey, protkeylen, protkeytype);
- if (!rc)
- goto out;
-
-try_via_ep11:
- /* if the CCA way also failed, let's try via EP11 */
- rc = pkey_clr2ep11key(t->clearkey, t->len,
- tmpbuf, &tmpbuflen);
- if (rc)
- goto failure;
- rc = pkey_ep11key2pkey(tmpbuf, tmpbuflen,
- protkey, protkeylen, protkeytype);
- if (!rc)
- goto out;
-
-failure:
- PKEY_DBF_ERR("%s unable to build protected key from clear", __func__);
-
-out:
- kfree(tmpbuf);
return rc;
}
-/* Helper for pkey_nonccatok2pkey, handles ecc clear key token */
-static int nonccatokecc2pkey(const struct clearkeytoken *t,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_verifykey(struct pkey_verifykey __user *uvk)
{
- u32 keylen;
+ u32 keytype, keybitsize, flags;
+ struct pkey_verifykey kvk;
int rc;
- switch (t->keytype) {
- case PKEY_KEYTYPE_ECC_P256:
- keylen = 32;
- break;
- case PKEY_KEYTYPE_ECC_P384:
- keylen = 48;
- break;
- case PKEY_KEYTYPE_ECC_P521:
- keylen = 80;
- break;
- case PKEY_KEYTYPE_ECC_ED25519:
- keylen = 32;
- break;
- case PKEY_KEYTYPE_ECC_ED448:
- keylen = 64;
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
- __func__, t->keytype);
- return -EINVAL;
- }
-
- if (t->len != keylen) {
- PKEY_DBF_ERR("%s non clear key ecc token: invalid key len %u\n",
- __func__, t->len);
- return -EINVAL;
- }
+ if (copy_from_user(&kvk, uvk, sizeof(kvk)))
+ return -EFAULT;
- /* only one path possible: via PCKMO instruction */
- rc = pkey_clr2protkey(t->keytype, t->clearkey,
- protkey, protkeylen, protkeytype);
- if (rc) {
- PKEY_DBF_ERR("%s unable to build protected key from clear",
- __func__);
- }
+ kvk.cardnr = 0xFFFF;
+ kvk.domain = 0xFFFF;
+ rc = pkey_handler_verify_key(kvk.seckey.seckey,
+ sizeof(kvk.seckey.seckey),
+ &kvk.cardnr, &kvk.domain,
+ &keytype, &keybitsize, &flags);
+ pr_debug("verify_key()=%d\n", rc);
+ if (!rc && keytype != PKEY_TYPE_CCA_DATA)
+ rc = -EINVAL;
+ kvk.attributes = PKEY_VERIFY_ATTR_AES;
+ kvk.keysize = (u16)keybitsize;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ kvk.attributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
+ if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk)))
+ rc = -EFAULT;
+ memzero_explicit(&kvk, sizeof(kvk));
return rc;
}
-/*
- * Transform a non-CCA key token into a protected key
- */
-static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_genprotk(struct pkey_genprotk __user *ugp)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
- int rc = -EINVAL;
+ struct pkey_genprotk kgp;
+ int rc;
- switch (hdr->version) {
- case TOKVER_PROTECTED_KEY: {
- struct protaeskeytoken *t;
+ if (copy_from_user(&kgp, ugp, sizeof(kgp)))
+ return -EFAULT;
- if (keylen != sizeof(struct protaeskeytoken))
- goto out;
- t = (struct protaeskeytoken *)key;
- rc = pkey_verifyprotkey(t->protkey, t->len, t->keytype);
- if (rc)
- goto out;
- memcpy(protkey, t->protkey, t->len);
- *protkeylen = t->len;
- *protkeytype = t->keytype;
- break;
- }
- case TOKVER_CLEAR_KEY: {
- struct clearkeytoken *t = (struct clearkeytoken *)key;
-
- if (keylen < sizeof(struct clearkeytoken) ||
- keylen != sizeof(*t) + t->len)
- goto out;
- switch (t->keytype) {
- case PKEY_KEYTYPE_AES_128:
- case PKEY_KEYTYPE_AES_192:
- case PKEY_KEYTYPE_AES_256:
- rc = nonccatokaes2pkey(t, protkey,
- protkeylen, protkeytype);
- break;
- case PKEY_KEYTYPE_ECC_P256:
- case PKEY_KEYTYPE_ECC_P384:
- case PKEY_KEYTYPE_ECC_P521:
- case PKEY_KEYTYPE_ECC_ED25519:
- case PKEY_KEYTYPE_ECC_ED448:
- rc = nonccatokecc2pkey(t, protkey,
- protkeylen, protkeytype);
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported non cca clear key type %u\n",
- __func__, t->keytype);
- return -EINVAL;
- }
- break;
- }
- case TOKVER_EP11_AES: {
- /* check ep11 key for exportable as protected key */
- rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
- if (rc)
- goto out;
- rc = pkey_ep11key2pkey(key, keylen,
- protkey, protkeylen, protkeytype);
- break;
- }
- case TOKVER_EP11_AES_WITH_HEADER:
- /* check ep11 key with header for exportable as protected key */
- rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
- 3, key, keylen, 1);
- if (rc)
- goto out;
- rc = pkey_ep11key2pkey(key, keylen,
- protkey, protkeylen, protkeytype);
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported non-CCA token version %d\n",
- __func__, hdr->version);
- }
+ kgp.protkey.len = sizeof(kgp.protkey.protkey);
+ rc = pkey_handler_gen_key(NULL, 0, kgp.keytype,
+ PKEY_TYPE_PROTKEY, 0, 0,
+ kgp.protkey.protkey, &kgp.protkey.len,
+ &kgp.protkey.type);
+ pr_debug("gen_key()=%d\n", rc);
+ if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp)))
+ rc = -EFAULT;
+ memzero_explicit(&kgp, sizeof(kgp));
-out:
return rc;
}
-/*
- * Transform a CCA internal key token into a protected key
- */
-static int pkey_ccainttok2pkey(const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_verifyprotk(struct pkey_verifyprotk __user *uvp)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct pkey_verifyprotk kvp;
+ struct protaeskeytoken *t;
+ u32 keytype;
+ u8 *tmpbuf;
+ int rc;
- switch (hdr->version) {
- case TOKVER_CCA_AES:
- if (keylen != sizeof(struct secaeskeytoken))
- return -EINVAL;
- break;
- case TOKVER_CCA_VLSC:
- if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
- return -EINVAL;
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported CCA internal token version %d\n",
- __func__, hdr->version);
+ if (copy_from_user(&kvp, uvp, sizeof(kvp)))
+ return -EFAULT;
+
+ keytype = pkey_aes_bitsize_to_keytype(8 * kvp.protkey.len);
+ if (!keytype) {
+ PKEY_DBF_ERR("%s unknown/unsupported protkey length %u\n",
+ __func__, kvp.protkey.len);
+ memzero_explicit(&kvp, sizeof(kvp));
return -EINVAL;
}
- return pkey_skey2pkey(key, protkey, protkeylen, protkeytype);
+ /* build a 'protected key token' from the raw protected key */
+ tmpbuf = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!tmpbuf) {
+ memzero_explicit(&kvp, sizeof(kvp));
+ return -ENOMEM;
+ }
+ t = (struct protaeskeytoken *)tmpbuf;
+ t->type = TOKTYPE_NON_CCA;
+ t->version = TOKVER_PROTECTED_KEY;
+ t->keytype = keytype;
+ t->len = kvp.protkey.len;
+ memcpy(t->protkey, kvp.protkey.protkey, kvp.protkey.len);
+
+ rc = pkey_handler_verify_key(tmpbuf, sizeof(*t),
+ NULL, NULL, NULL, NULL, NULL);
+ pr_debug("verify_key()=%d\n", rc);
+
+ kfree_sensitive(tmpbuf);
+ memzero_explicit(&kvp, sizeof(kvp));
+
+ return rc;
}
-/*
- * Transform a key blob (of any type) into a protected key
- */
-int pkey_keyblob2pkey(const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_kblob2protk(struct pkey_kblob2pkey __user *utp)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct pkey_kblob2pkey ktp;
+ u8 *kkey;
int rc;
- if (keylen < sizeof(struct keytoken_header)) {
- PKEY_DBF_ERR("%s invalid keylen %d\n", __func__, keylen);
- return -EINVAL;
- }
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey))
+ return PTR_ERR(kkey);
+ ktp.protkey.len = sizeof(ktp.protkey.protkey);
+ rc = key2protkey(NULL, 0, kkey, ktp.keylen,
+ ktp.protkey.protkey, &ktp.protkey.len,
+ &ktp.protkey.type);
+ pr_debug("key2protkey()=%d\n", rc);
+ kfree_sensitive(kkey);
+ if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
+ rc = -EFAULT;
+ memzero_explicit(&ktp, sizeof(ktp));
- switch (hdr->type) {
- case TOKTYPE_NON_CCA:
- rc = pkey_nonccatok2pkey(key, keylen,
- protkey, protkeylen, protkeytype);
- break;
- case TOKTYPE_CCA_INTERNAL:
- rc = pkey_ccainttok2pkey(key, keylen,
- protkey, protkeylen, protkeytype);
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
- return -EINVAL;
- }
-
- pr_debug("%s rc=%d\n", __func__, rc);
return rc;
}
-EXPORT_SYMBOL(pkey_keyblob2pkey);
-static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
- enum pkey_key_type ktype, enum pkey_key_size ksize,
- u32 kflags, u8 *keybuf, size_t *keybufsize)
+static int pkey_ioctl_genseck2(struct pkey_genseck2 __user *ugs)
{
- int i, card, dom, rc;
-
- /* check for at least one apqn given */
- if (!apqns || !nr_apqns)
- return -EINVAL;
+ u32 klen = KEYBLOBBUFSIZE;
+ struct pkey_genseck2 kgs;
+ struct pkey_apqn *apqns;
+ u8 *kkey;
+ int rc;
+ u32 u;
- /* check key type and size */
- switch (ktype) {
- case PKEY_TYPE_CCA_DATA:
- case PKEY_TYPE_CCA_CIPHER:
- if (*keybufsize < SECKEYBLOBSIZE)
- return -EINVAL;
- break;
- case PKEY_TYPE_EP11:
- if (*keybufsize < MINEP11AESKEYBLOBSIZE)
- return -EINVAL;
- break;
- case PKEY_TYPE_EP11_AES:
- if (*keybufsize < (sizeof(struct ep11kblob_header) +
- MINEP11AESKEYBLOBSIZE))
- return -EINVAL;
- break;
- default:
+ if (copy_from_user(&kgs, ugs, sizeof(kgs)))
+ return -EFAULT;
+ u = pkey_aes_bitsize_to_keytype(kgs.size);
+ if (!u) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, kgs.size);
return -EINVAL;
}
- switch (ksize) {
- case PKEY_SIZE_AES_128:
- case PKEY_SIZE_AES_192:
- case PKEY_SIZE_AES_256:
- break;
- default:
- return -EINVAL;
+ apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = kzalloc(klen, GFP_KERNEL);
+ if (!kkey) {
+ kfree(apqns);
+ return -ENOMEM;
}
-
- /* simple try all apqns from the list */
- for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
- card = apqns[i].card;
- dom = apqns[i].domain;
- if (ktype == PKEY_TYPE_EP11 ||
- ktype == PKEY_TYPE_EP11_AES) {
- rc = ep11_genaeskey(card, dom, ksize, kflags,
- keybuf, keybufsize, ktype);
- } else if (ktype == PKEY_TYPE_CCA_DATA) {
- rc = cca_genseckey(card, dom, ksize, keybuf);
- *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
- } else {
- /* TOKVER_CCA_VLSC */
- rc = cca_gencipherkey(card, dom, ksize, kflags,
- keybuf, keybufsize);
+ rc = pkey_handler_gen_key(apqns, kgs.apqn_entries,
+ u, kgs.type, kgs.size, kgs.keygenflags,
+ kkey, &klen, NULL);
+ pr_debug("gen_key()=%d\n", rc);
+ kfree(apqns);
+ if (rc) {
+ kfree_sensitive(kkey);
+ return rc;
+ }
+ if (kgs.key) {
+ if (kgs.keylen < klen) {
+ kfree_sensitive(kkey);
+ return -EINVAL;
+ }
+ if (copy_to_user(kgs.key, kkey, klen)) {
+ kfree_sensitive(kkey);
+ return -EFAULT;
}
- if (rc == 0)
- break;
}
+ kgs.keylen = klen;
+ if (copy_to_user(ugs, &kgs, sizeof(kgs)))
+ rc = -EFAULT;
+ kfree_sensitive(kkey);
return rc;
}
-static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
- enum pkey_key_type ktype, enum pkey_key_size ksize,
- u32 kflags, const u8 *clrkey,
- u8 *keybuf, size_t *keybufsize)
+static int pkey_ioctl_clr2seck2(struct pkey_clr2seck2 __user *ucs)
{
- int i, card, dom, rc;
-
- /* check for at least one apqn given */
- if (!apqns || !nr_apqns)
- return -EINVAL;
-
- /* check key type and size */
- switch (ktype) {
- case PKEY_TYPE_CCA_DATA:
- case PKEY_TYPE_CCA_CIPHER:
- if (*keybufsize < SECKEYBLOBSIZE)
- return -EINVAL;
- break;
- case PKEY_TYPE_EP11:
- if (*keybufsize < MINEP11AESKEYBLOBSIZE)
- return -EINVAL;
- break;
- case PKEY_TYPE_EP11_AES:
- if (*keybufsize < (sizeof(struct ep11kblob_header) +
- MINEP11AESKEYBLOBSIZE))
- return -EINVAL;
- break;
- default:
+ u32 klen = KEYBLOBBUFSIZE;
+ struct pkey_clr2seck2 kcs;
+ struct pkey_apqn *apqns;
+ u8 *kkey;
+ int rc;
+ u32 u;
+
+ if (copy_from_user(&kcs, ucs, sizeof(kcs)))
+ return -EFAULT;
+ u = pkey_aes_bitsize_to_keytype(kcs.size);
+ if (!u) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, kcs.size);
+ memzero_explicit(&kcs, sizeof(kcs));
return -EINVAL;
}
- switch (ksize) {
- case PKEY_SIZE_AES_128:
- case PKEY_SIZE_AES_192:
- case PKEY_SIZE_AES_256:
- break;
- default:
- return -EINVAL;
+ apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
+ if (IS_ERR(apqns)) {
+ memzero_explicit(&kcs, sizeof(kcs));
+ return PTR_ERR(apqns);
}
-
- zcrypt_wait_api_operational();
-
- /* simple try all apqns from the list */
- for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
- card = apqns[i].card;
- dom = apqns[i].domain;
- if (ktype == PKEY_TYPE_EP11 ||
- ktype == PKEY_TYPE_EP11_AES) {
- rc = ep11_clr2keyblob(card, dom, ksize, kflags,
- clrkey, keybuf, keybufsize,
- ktype);
- } else if (ktype == PKEY_TYPE_CCA_DATA) {
- rc = cca_clr2seckey(card, dom, ksize,
- clrkey, keybuf);
- *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
- } else {
- /* TOKVER_CCA_VLSC */
- rc = cca_clr2cipherkey(card, dom, ksize, kflags,
- clrkey, keybuf, keybufsize);
+ kkey = kzalloc(klen, GFP_KERNEL);
+ if (!kkey) {
+ kfree(apqns);
+ memzero_explicit(&kcs, sizeof(kcs));
+ return -ENOMEM;
+ }
+ rc = pkey_handler_clr_to_key(apqns, kcs.apqn_entries,
+ u, kcs.type, kcs.size, kcs.keygenflags,
+ kcs.clrkey.clrkey, kcs.size / 8,
+ kkey, &klen, NULL);
+ pr_debug("clr_to_key()=%d\n", rc);
+ kfree(apqns);
+ if (rc) {
+ kfree_sensitive(kkey);
+ memzero_explicit(&kcs, sizeof(kcs));
+ return rc;
+ }
+ if (kcs.key) {
+ if (kcs.keylen < klen) {
+ kfree_sensitive(kkey);
+ memzero_explicit(&kcs, sizeof(kcs));
+ return -EINVAL;
+ }
+ if (copy_to_user(kcs.key, kkey, klen)) {
+ kfree_sensitive(kkey);
+ memzero_explicit(&kcs, sizeof(kcs));
+ return -EFAULT;
}
- if (rc == 0)
- break;
}
+ kcs.keylen = klen;
+ if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+ rc = -EFAULT;
+ memzero_explicit(&kcs, sizeof(kcs));
+ kfree_sensitive(kkey);
return rc;
}
-static int pkey_verifykey2(const u8 *key, size_t keylen,
- u16 *cardnr, u16 *domain,
- enum pkey_key_type *ktype,
- enum pkey_key_size *ksize, u32 *flags)
+static int pkey_ioctl_verifykey2(struct pkey_verifykey2 __user *uvk)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
- u32 _nr_apqns, *_apqns = NULL;
+ struct pkey_verifykey2 kvk;
+ u8 *kkey;
int rc;
- if (keylen < sizeof(struct keytoken_header))
- return -EINVAL;
-
- if (hdr->type == TOKTYPE_CCA_INTERNAL &&
- hdr->version == TOKVER_CCA_AES) {
- struct secaeskeytoken *t = (struct secaeskeytoken *)key;
-
- rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0);
- if (rc)
- goto out;
- if (ktype)
- *ktype = PKEY_TYPE_CCA_DATA;
- if (ksize)
- *ksize = (enum pkey_key_size)t->bitsize;
-
- rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
- ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1);
- if (rc == 0 && flags)
- *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
- if (rc == -ENODEV) {
- rc = cca_findcard2(&_apqns, &_nr_apqns,
- *cardnr, *domain,
- ZCRYPT_CEX3C, AES_MK_SET,
- 0, t->mkvp, 1);
- if (rc == 0 && flags)
- *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
- }
- if (rc)
- goto out;
-
- *cardnr = ((struct pkey_apqn *)_apqns)->card;
- *domain = ((struct pkey_apqn *)_apqns)->domain;
-
- } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
- hdr->version == TOKVER_CCA_VLSC) {
- struct cipherkeytoken *t = (struct cipherkeytoken *)key;
-
- rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1);
- if (rc)
- goto out;
- if (ktype)
- *ktype = PKEY_TYPE_CCA_CIPHER;
- if (ksize) {
- *ksize = PKEY_SIZE_UNKNOWN;
- if (!t->plfver && t->wpllen == 512)
- *ksize = PKEY_SIZE_AES_128;
- else if (!t->plfver && t->wpllen == 576)
- *ksize = PKEY_SIZE_AES_192;
- else if (!t->plfver && t->wpllen == 640)
- *ksize = PKEY_SIZE_AES_256;
- }
-
- rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
- ZCRYPT_CEX6, AES_MK_SET, t->mkvp0, 0, 1);
- if (rc == 0 && flags)
- *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
- if (rc == -ENODEV) {
- rc = cca_findcard2(&_apqns, &_nr_apqns,
- *cardnr, *domain,
- ZCRYPT_CEX6, AES_MK_SET,
- 0, t->mkvp0, 1);
- if (rc == 0 && flags)
- *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
- }
- if (rc)
- goto out;
-
- *cardnr = ((struct pkey_apqn *)_apqns)->card;
- *domain = ((struct pkey_apqn *)_apqns)->domain;
+ if (copy_from_user(&kvk, uvk, sizeof(kvk)))
+ return -EFAULT;
+ kkey = _copy_key_from_user(kvk.key, kvk.keylen);
+ if (IS_ERR(kkey))
+ return PTR_ERR(kkey);
- } else if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_AES) {
- struct ep11keyblob *kb = (struct ep11keyblob *)key;
- int api;
-
- rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
- if (rc)
- goto out;
- if (ktype)
- *ktype = PKEY_TYPE_EP11;
- if (ksize)
- *ksize = kb->head.bitlen;
-
- api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
- rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
- ZCRYPT_CEX7, api,
- ep11_kb_wkvp(key, keylen));
- if (rc)
- goto out;
-
- if (flags)
- *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
-
- *cardnr = ((struct pkey_apqn *)_apqns)->card;
- *domain = ((struct pkey_apqn *)_apqns)->domain;
+ rc = pkey_handler_verify_key(kkey, kvk.keylen,
+ &kvk.cardnr, &kvk.domain,
+ &kvk.type, &kvk.size, &kvk.flags);
+ pr_debug("verify_key()=%d\n", rc);
- } else if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
- struct ep11kblob_header *kh = (struct ep11kblob_header *)key;
- int api;
+ kfree_sensitive(kkey);
+ if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk)))
+ return -EFAULT;
- rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
- 3, key, keylen, 1);
- if (rc)
- goto out;
- if (ktype)
- *ktype = PKEY_TYPE_EP11_AES;
- if (ksize)
- *ksize = kh->bitlen;
-
- api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
- rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
- ZCRYPT_CEX7, api,
- ep11_kb_wkvp(key, keylen));
- if (rc)
- goto out;
+ return rc;
+}
- if (flags)
- *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+static int pkey_ioctl_kblob2protk2(struct pkey_kblob2pkey2 __user *utp)
+{
+ struct pkey_apqn *apqns = NULL;
+ struct pkey_kblob2pkey2 ktp;
+ u8 *kkey;
+ int rc;
- *cardnr = ((struct pkey_apqn *)_apqns)->card;
- *domain = ((struct pkey_apqn *)_apqns)->domain;
- } else {
- rc = -EINVAL;
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
}
+ ktp.protkey.len = sizeof(ktp.protkey.protkey);
+ rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen,
+ ktp.protkey.protkey, &ktp.protkey.len,
+ &ktp.protkey.type);
+ pr_debug("key2protkey()=%d\n", rc);
+ kfree(apqns);
+ kfree_sensitive(kkey);
+ if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
+ rc = -EFAULT;
+ memzero_explicit(&ktp, sizeof(ktp));
-out:
- kfree(_apqns);
return rc;
}
-static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
- const u8 *key, size_t keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_apqns4k(struct pkey_apqns4key __user *uak)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
- int i, card, dom, rc;
-
- /* check for at least one apqn given */
- if (!apqns || !nr_apqns)
- return -EINVAL;
-
- if (keylen < sizeof(struct keytoken_header))
- return -EINVAL;
+ struct pkey_apqn *apqns = NULL;
+ struct pkey_apqns4key kak;
+ size_t nr_apqns, len;
+ u8 *kkey;
+ int rc;
- if (hdr->type == TOKTYPE_CCA_INTERNAL) {
- if (hdr->version == TOKVER_CCA_AES) {
- if (keylen != sizeof(struct secaeskeytoken))
- return -EINVAL;
- if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
- return -EINVAL;
- } else if (hdr->version == TOKVER_CCA_VLSC) {
- if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
- return -EINVAL;
- if (cca_check_secaescipherkey(pkey_dbf_info,
- 3, key, 0, 1))
- return -EINVAL;
- } else {
- PKEY_DBF_ERR("%s unknown CCA internal token version %d\n",
- __func__, hdr->version);
+ if (copy_from_user(&kak, uak, sizeof(kak)))
+ return -EFAULT;
+ nr_apqns = kak.apqn_entries;
+ if (nr_apqns) {
+ apqns = kmalloc_array(nr_apqns,
+ sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
+ }
+ kkey = _copy_key_from_user(kak.key, kak.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
+ }
+ rc = pkey_handler_apqns_for_key(kkey, kak.keylen, kak.flags,
+ apqns, &nr_apqns);
+ pr_debug("apqns_for_key()=%d\n", rc);
+ kfree_sensitive(kkey);
+ if (rc && rc != -ENOSPC) {
+ kfree(apqns);
+ return rc;
+ }
+ if (!rc && kak.apqns) {
+ if (nr_apqns > kak.apqn_entries) {
+ kfree(apqns);
return -EINVAL;
}
- } else if (hdr->type == TOKTYPE_NON_CCA) {
- if (hdr->version == TOKVER_EP11_AES) {
- if (ep11_check_aes_key(pkey_dbf_info,
- 3, key, keylen, 1))
- return -EINVAL;
- } else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
- if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
- 3, key, keylen, 1))
- return -EINVAL;
- } else {
- return pkey_nonccatok2pkey(key, keylen,
- protkey, protkeylen,
- protkeytype);
- }
- } else {
- PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
- return -EINVAL;
- }
-
- zcrypt_wait_api_operational();
-
- /* simple try all apqns from the list */
- for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
- card = apqns[i].card;
- dom = apqns[i].domain;
- if (hdr->type == TOKTYPE_CCA_INTERNAL &&
- hdr->version == TOKVER_CCA_AES) {
- rc = cca_sec2protkey(card, dom, key,
- protkey, protkeylen, protkeytype);
- } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
- hdr->version == TOKVER_CCA_VLSC) {
- rc = cca_cipher2protkey(card, dom, key,
- protkey, protkeylen,
- protkeytype);
- } else {
- rc = ep11_kblob2protkey(card, dom, key, keylen,
- protkey, protkeylen,
- protkeytype);
+ len = nr_apqns * sizeof(struct pkey_apqn);
+ if (len) {
+ if (copy_to_user(kak.apqns, apqns, len)) {
+ kfree(apqns);
+ return -EFAULT;
+ }
}
- if (rc == 0)
- break;
}
+ kak.apqn_entries = nr_apqns;
+ if (copy_to_user(uak, &kak, sizeof(kak)))
+ rc = -EFAULT;
+ kfree(apqns);
return rc;
}
-static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns)
+static int pkey_ioctl_apqns4kt(struct pkey_apqns4keytype __user *uat)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
- u32 _nr_apqns, *_apqns = NULL;
+ struct pkey_apqn *apqns = NULL;
+ struct pkey_apqns4keytype kat;
+ size_t nr_apqns, len;
int rc;
- if (keylen < sizeof(struct keytoken_header) || flags == 0)
- return -EINVAL;
-
- zcrypt_wait_api_operational();
-
- if (hdr->type == TOKTYPE_NON_CCA &&
- (hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
- hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
- is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
- struct ep11keyblob *kb = (struct ep11keyblob *)
- (key + sizeof(struct ep11kblob_header));
- int minhwtype = 0, api = 0;
-
- if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
- return -EINVAL;
- if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
- minhwtype = ZCRYPT_CEX7;
- api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
- }
- rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- minhwtype, api, kb->wkvp);
- if (rc)
- goto out;
- } else if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_AES &&
- is_ep11_keyblob(key)) {
- struct ep11keyblob *kb = (struct ep11keyblob *)key;
- int minhwtype = 0, api = 0;
-
- if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
- return -EINVAL;
- if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
- minhwtype = ZCRYPT_CEX7;
- api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
- }
- rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- minhwtype, api, kb->wkvp);
- if (rc)
- goto out;
- } else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
- u64 cur_mkvp = 0, old_mkvp = 0;
- int minhwtype = ZCRYPT_CEX3C;
-
- if (hdr->version == TOKVER_CCA_AES) {
- struct secaeskeytoken *t = (struct secaeskeytoken *)key;
-
- if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- cur_mkvp = t->mkvp;
- if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
- old_mkvp = t->mkvp;
- } else if (hdr->version == TOKVER_CCA_VLSC) {
- struct cipherkeytoken *t = (struct cipherkeytoken *)key;
-
- minhwtype = ZCRYPT_CEX6;
- if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- cur_mkvp = t->mkvp0;
- if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
- old_mkvp = t->mkvp0;
- } else {
- /* unknown cca internal token type */
+ if (copy_from_user(&kat, uat, sizeof(kat)))
+ return -EFAULT;
+ nr_apqns = kat.apqn_entries;
+ if (nr_apqns) {
+ apqns = kmalloc_array(nr_apqns,
+ sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
+ }
+ rc = pkey_handler_apqns_for_keytype(kat.type,
+ kat.cur_mkvp, kat.alt_mkvp,
+ kat.flags, apqns, &nr_apqns);
+ pr_debug("apqns_for_keytype()=%d\n", rc);
+ if (rc && rc != -ENOSPC) {
+ kfree(apqns);
+ return rc;
+ }
+ if (!rc && kat.apqns) {
+ if (nr_apqns > kat.apqn_entries) {
+ kfree(apqns);
return -EINVAL;
}
- rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- minhwtype, AES_MK_SET,
- cur_mkvp, old_mkvp, 1);
- if (rc)
- goto out;
- } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
- struct eccprivkeytoken *t = (struct eccprivkeytoken *)key;
- u64 cur_mkvp = 0, old_mkvp = 0;
-
- if (t->secid == 0x20) {
- if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- cur_mkvp = t->mkvp;
- if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
- old_mkvp = t->mkvp;
- } else {
- /* unknown cca internal 2 token type */
- return -EINVAL;
+ len = nr_apqns * sizeof(struct pkey_apqn);
+ if (len) {
+ if (copy_to_user(kat.apqns, apqns, len)) {
+ kfree(apqns);
+ return -EFAULT;
+ }
}
- rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7, APKA_MK_SET,
- cur_mkvp, old_mkvp, 1);
- if (rc)
- goto out;
- } else {
- return -EINVAL;
}
+ kat.apqn_entries = nr_apqns;
+ if (copy_to_user(uat, &kat, sizeof(kat)))
+ rc = -EFAULT;
+ kfree(apqns);
- if (apqns) {
- if (*nr_apqns < _nr_apqns)
- rc = -ENOSPC;
- else
- memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
- }
- *nr_apqns = _nr_apqns;
-
-out:
- kfree(_apqns);
return rc;
}
-static int pkey_apqns4keytype(enum pkey_key_type ktype,
- u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns)
+static int pkey_ioctl_kblob2protk3(struct pkey_kblob2pkey3 __user *utp)
{
- u32 _nr_apqns, *_apqns = NULL;
+ u32 protkeylen = PROTKEYBLOBBUFSIZE;
+ struct pkey_apqn *apqns = NULL;
+ struct pkey_kblob2pkey3 ktp;
+ u8 *kkey, *protkey;
int rc;
- zcrypt_wait_api_operational();
-
- if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
- u64 cur_mkvp = 0, old_mkvp = 0;
- int minhwtype = ZCRYPT_CEX3C;
-
- if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- cur_mkvp = *((u64 *)cur_mkvp);
- if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
- old_mkvp = *((u64 *)alt_mkvp);
- if (ktype == PKEY_TYPE_CCA_CIPHER)
- minhwtype = ZCRYPT_CEX6;
- rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- minhwtype, AES_MK_SET,
- cur_mkvp, old_mkvp, 1);
- if (rc)
- goto out;
- } else if (ktype == PKEY_TYPE_CCA_ECC) {
- u64 cur_mkvp = 0, old_mkvp = 0;
-
- if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- cur_mkvp = *((u64 *)cur_mkvp);
- if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
- old_mkvp = *((u64 *)alt_mkvp);
- rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7, APKA_MK_SET,
- cur_mkvp, old_mkvp, 1);
- if (rc)
- goto out;
-
- } else if (ktype == PKEY_TYPE_EP11 ||
- ktype == PKEY_TYPE_EP11_AES ||
- ktype == PKEY_TYPE_EP11_ECC) {
- u8 *wkvp = NULL;
- int api;
-
- if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- wkvp = cur_mkvp;
- api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
- rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7, api, wkvp);
- if (rc)
- goto out;
-
- } else {
- return -EINVAL;
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
}
-
- if (apqns) {
- if (*nr_apqns < _nr_apqns)
- rc = -ENOSPC;
- else
- memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ protkey = kmalloc(protkeylen, GFP_KERNEL);
+ if (!protkey) {
+ kfree(apqns);
+ kfree_sensitive(kkey);
+ return -ENOMEM;
}
- *nr_apqns = _nr_apqns;
-
-out:
- kfree(_apqns);
- return rc;
-}
-
-static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
- const u8 *key, size_t keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
-{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
- int i, card, dom, rc;
-
- /* check for at least one apqn given */
- if (!apqns || !nr_apqns)
- return -EINVAL;
-
- if (keylen < sizeof(struct keytoken_header))
- return -EINVAL;
-
- if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
- is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
- /* EP11 AES key blob with header */
- if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
- 3, key, keylen, 1))
- return -EINVAL;
- } else if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
- is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
- /* EP11 ECC key blob with header */
- if (ep11_check_ecc_key_with_hdr(pkey_dbf_info,
- 3, key, keylen, 1))
- return -EINVAL;
- } else if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_AES &&
- is_ep11_keyblob(key)) {
- /* EP11 AES key blob with header in session field */
- if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1))
- return -EINVAL;
- } else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
- if (hdr->version == TOKVER_CCA_AES) {
- /* CCA AES data key */
- if (keylen != sizeof(struct secaeskeytoken))
- return -EINVAL;
- if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
- return -EINVAL;
- } else if (hdr->version == TOKVER_CCA_VLSC) {
- /* CCA AES cipher key */
- if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
- return -EINVAL;
- if (cca_check_secaescipherkey(pkey_dbf_info,
- 3, key, 0, 1))
- return -EINVAL;
- } else {
- PKEY_DBF_ERR("%s unknown CCA internal token version %d\n",
- __func__, hdr->version);
- return -EINVAL;
- }
- } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
- /* CCA ECC (private) key */
- if (keylen < sizeof(struct eccprivkeytoken))
- return -EINVAL;
- if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1))
- return -EINVAL;
- } else if (hdr->type == TOKTYPE_NON_CCA) {
- return pkey_nonccatok2pkey(key, keylen,
- protkey, protkeylen, protkeytype);
- } else {
- PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
- return -EINVAL;
+ rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen,
+ protkey, &protkeylen, &ktp.pkeytype);
+ pr_debug("key2protkey()=%d\n", rc);
+ kfree(apqns);
+ kfree_sensitive(kkey);
+ if (rc) {
+ kfree_sensitive(protkey);
+ return rc;
}
-
- /* simple try all apqns from the list */
- for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
- card = apqns[i].card;
- dom = apqns[i].domain;
- if (hdr->type == TOKTYPE_NON_CCA &&
- (hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
- hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
- is_ep11_keyblob(key + sizeof(struct ep11kblob_header)))
- rc = ep11_kblob2protkey(card, dom, key, hdr->len,
- protkey, protkeylen,
- protkeytype);
- else if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_AES &&
- is_ep11_keyblob(key))
- rc = ep11_kblob2protkey(card, dom, key, hdr->len,
- protkey, protkeylen,
- protkeytype);
- else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
- hdr->version == TOKVER_CCA_AES)
- rc = cca_sec2protkey(card, dom, key, protkey,
- protkeylen, protkeytype);
- else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
- hdr->version == TOKVER_CCA_VLSC)
- rc = cca_cipher2protkey(card, dom, key, protkey,
- protkeylen, protkeytype);
- else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA)
- rc = cca_ecc2protkey(card, dom, key, protkey,
- protkeylen, protkeytype);
- else
+ if (ktp.pkey && ktp.pkeylen) {
+ if (protkeylen > ktp.pkeylen) {
+ kfree_sensitive(protkey);
return -EINVAL;
+ }
+ if (copy_to_user(ktp.pkey, protkey, protkeylen)) {
+ kfree_sensitive(protkey);
+ return -EFAULT;
+ }
}
+ kfree_sensitive(protkey);
+ ktp.pkeylen = protkeylen;
+ if (copy_to_user(utp, &ktp, sizeof(ktp)))
+ return -EFAULT;
- return rc;
-}
-
-/*
- * File io functions
- */
-
-static void *_copy_key_from_user(void __user *ukey, size_t keylen)
-{
- if (!ukey || keylen < MINKEYBLOBBUFSIZE || keylen > KEYBLOBBUFSIZE)
- return ERR_PTR(-EINVAL);
-
- return memdup_user(ukey, keylen);
-}
-
-static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
-{
- if (!uapqns || nr_apqns == 0)
- return NULL;
-
- return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
+ return 0;
}
static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
@@ -1350,438 +710,57 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
int rc;
switch (cmd) {
- case PKEY_GENSECK: {
- struct pkey_genseck __user *ugs = (void __user *)arg;
- struct pkey_genseck kgs;
-
- if (copy_from_user(&kgs, ugs, sizeof(kgs)))
- return -EFAULT;
- rc = cca_genseckey(kgs.cardnr, kgs.domain,
- kgs.keytype, kgs.seckey.seckey);
- pr_debug("%s cca_genseckey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs)))
- rc = -EFAULT;
- memzero_explicit(&kgs, sizeof(kgs));
+ case PKEY_GENSECK:
+ rc = pkey_ioctl_genseck((struct pkey_genseck __user *)arg);
break;
- }
- case PKEY_CLR2SECK: {
- struct pkey_clr2seck __user *ucs = (void __user *)arg;
- struct pkey_clr2seck kcs;
-
- if (copy_from_user(&kcs, ucs, sizeof(kcs)))
- return -EFAULT;
- rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
- kcs.clrkey.clrkey, kcs.seckey.seckey);
- pr_debug("%s cca_clr2seckey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs)))
- rc = -EFAULT;
- memzero_explicit(&kcs, sizeof(kcs));
+ case PKEY_CLR2SECK:
+ rc = pkey_ioctl_clr2seck((struct pkey_clr2seck __user *)arg);
break;
- }
- case PKEY_SEC2PROTK: {
- struct pkey_sec2protk __user *usp = (void __user *)arg;
- struct pkey_sec2protk ksp;
-
- if (copy_from_user(&ksp, usp, sizeof(ksp)))
- return -EFAULT;
- ksp.protkey.len = sizeof(ksp.protkey.protkey);
- rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
- ksp.seckey.seckey, ksp.protkey.protkey,
- &ksp.protkey.len, &ksp.protkey.type);
- pr_debug("%s cca_sec2protkey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
- rc = -EFAULT;
- memzero_explicit(&ksp, sizeof(ksp));
+ case PKEY_SEC2PROTK:
+ rc = pkey_ioctl_sec2protk((struct pkey_sec2protk __user *)arg);
break;
- }
- case PKEY_CLR2PROTK: {
- struct pkey_clr2protk __user *ucp = (void __user *)arg;
- struct pkey_clr2protk kcp;
-
- if (copy_from_user(&kcp, ucp, sizeof(kcp)))
- return -EFAULT;
- kcp.protkey.len = sizeof(kcp.protkey.protkey);
- rc = pkey_clr2protkey(kcp.keytype, kcp.clrkey.clrkey,
- kcp.protkey.protkey,
- &kcp.protkey.len, &kcp.protkey.type);
- pr_debug("%s pkey_clr2protkey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(ucp, &kcp, sizeof(kcp)))
- rc = -EFAULT;
- memzero_explicit(&kcp, sizeof(kcp));
+ case PKEY_CLR2PROTK:
+ rc = pkey_ioctl_clr2protk((struct pkey_clr2protk __user *)arg);
break;
- }
- case PKEY_FINDCARD: {
- struct pkey_findcard __user *ufc = (void __user *)arg;
- struct pkey_findcard kfc;
-
- if (copy_from_user(&kfc, ufc, sizeof(kfc)))
- return -EFAULT;
- rc = cca_findcard(kfc.seckey.seckey,
- &kfc.cardnr, &kfc.domain, 1);
- pr_debug("%s cca_findcard()=%d\n", __func__, rc);
- if (rc < 0)
- break;
- if (copy_to_user(ufc, &kfc, sizeof(kfc)))
- return -EFAULT;
+ case PKEY_FINDCARD:
+ rc = pkey_ioctl_findcard((struct pkey_findcard __user *)arg);
break;
- }
- case PKEY_SKEY2PKEY: {
- struct pkey_skey2pkey __user *usp = (void __user *)arg;
- struct pkey_skey2pkey ksp;
-
- if (copy_from_user(&ksp, usp, sizeof(ksp)))
- return -EFAULT;
- ksp.protkey.len = sizeof(ksp.protkey.protkey);
- rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey,
- &ksp.protkey.len, &ksp.protkey.type);
- pr_debug("%s pkey_skey2pkey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
- rc = -EFAULT;
- memzero_explicit(&ksp, sizeof(ksp));
+ case PKEY_SKEY2PKEY:
+ rc = pkey_ioctl_skey2pkey((struct pkey_skey2pkey __user *)arg);
break;
- }
- case PKEY_VERIFYKEY: {
- struct pkey_verifykey __user *uvk = (void __user *)arg;
- struct pkey_verifykey kvk;
-
- if (copy_from_user(&kvk, uvk, sizeof(kvk)))
- return -EFAULT;
- rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain,
- &kvk.keysize, &kvk.attributes);
- pr_debug("%s pkey_verifykey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk)))
- rc = -EFAULT;
- memzero_explicit(&kvk, sizeof(kvk));
+ case PKEY_VERIFYKEY:
+ rc = pkey_ioctl_verifykey((struct pkey_verifykey __user *)arg);
break;
- }
- case PKEY_GENPROTK: {
- struct pkey_genprotk __user *ugp = (void __user *)arg;
- struct pkey_genprotk kgp;
-
- if (copy_from_user(&kgp, ugp, sizeof(kgp)))
- return -EFAULT;
- kgp.protkey.len = sizeof(kgp.protkey.protkey);
- rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey,
- &kgp.protkey.len, &kgp.protkey.type);
- pr_debug("%s pkey_genprotkey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp)))
- rc = -EFAULT;
- memzero_explicit(&kgp, sizeof(kgp));
+ case PKEY_GENPROTK:
+ rc = pkey_ioctl_genprotk((struct pkey_genprotk __user *)arg);
break;
- }
- case PKEY_VERIFYPROTK: {
- struct pkey_verifyprotk __user *uvp = (void __user *)arg;
- struct pkey_verifyprotk kvp;
-
- if (copy_from_user(&kvp, uvp, sizeof(kvp)))
- return -EFAULT;
- rc = pkey_verifyprotkey(kvp.protkey.protkey,
- kvp.protkey.len, kvp.protkey.type);
- pr_debug("%s pkey_verifyprotkey()=%d\n", __func__, rc);
- memzero_explicit(&kvp, sizeof(kvp));
+ case PKEY_VERIFYPROTK:
+ rc = pkey_ioctl_verifyprotk((struct pkey_verifyprotk __user *)arg);
break;
- }
- case PKEY_KBLOB2PROTK: {
- struct pkey_kblob2pkey __user *utp = (void __user *)arg;
- struct pkey_kblob2pkey ktp;
- u8 *kkey;
-
- if (copy_from_user(&ktp, utp, sizeof(ktp)))
- return -EFAULT;
- kkey = _copy_key_from_user(ktp.key, ktp.keylen);
- if (IS_ERR(kkey))
- return PTR_ERR(kkey);
- ktp.protkey.len = sizeof(ktp.protkey.protkey);
- rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey,
- &ktp.protkey.len, &ktp.protkey.type);
- pr_debug("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
- kfree_sensitive(kkey);
- if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
- rc = -EFAULT;
- memzero_explicit(&ktp, sizeof(ktp));
+ case PKEY_KBLOB2PROTK:
+ rc = pkey_ioctl_kblob2protk((struct pkey_kblob2pkey __user *)arg);
break;
- }
- case PKEY_GENSECK2: {
- struct pkey_genseck2 __user *ugs = (void __user *)arg;
- size_t klen = KEYBLOBBUFSIZE;
- struct pkey_genseck2 kgs;
- struct pkey_apqn *apqns;
- u8 *kkey;
-
- if (copy_from_user(&kgs, ugs, sizeof(kgs)))
- return -EFAULT;
- apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
- if (IS_ERR(apqns))
- return PTR_ERR(apqns);
- kkey = kzalloc(klen, GFP_KERNEL);
- if (!kkey) {
- kfree(apqns);
- return -ENOMEM;
- }
- rc = pkey_genseckey2(apqns, kgs.apqn_entries,
- kgs.type, kgs.size, kgs.keygenflags,
- kkey, &klen);
- pr_debug("%s pkey_genseckey2()=%d\n", __func__, rc);
- kfree(apqns);
- if (rc) {
- kfree_sensitive(kkey);
- break;
- }
- if (kgs.key) {
- if (kgs.keylen < klen) {
- kfree_sensitive(kkey);
- return -EINVAL;
- }
- if (copy_to_user(kgs.key, kkey, klen)) {
- kfree_sensitive(kkey);
- return -EFAULT;
- }
- }
- kgs.keylen = klen;
- if (copy_to_user(ugs, &kgs, sizeof(kgs)))
- rc = -EFAULT;
- kfree_sensitive(kkey);
+ case PKEY_GENSECK2:
+ rc = pkey_ioctl_genseck2((struct pkey_genseck2 __user *)arg);
break;
- }
- case PKEY_CLR2SECK2: {
- struct pkey_clr2seck2 __user *ucs = (void __user *)arg;
- size_t klen = KEYBLOBBUFSIZE;
- struct pkey_clr2seck2 kcs;
- struct pkey_apqn *apqns;
- u8 *kkey;
-
- if (copy_from_user(&kcs, ucs, sizeof(kcs)))
- return -EFAULT;
- apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
- if (IS_ERR(apqns)) {
- memzero_explicit(&kcs, sizeof(kcs));
- return PTR_ERR(apqns);
- }
- kkey = kzalloc(klen, GFP_KERNEL);
- if (!kkey) {
- kfree(apqns);
- memzero_explicit(&kcs, sizeof(kcs));
- return -ENOMEM;
- }
- rc = pkey_clr2seckey2(apqns, kcs.apqn_entries,
- kcs.type, kcs.size, kcs.keygenflags,
- kcs.clrkey.clrkey, kkey, &klen);
- pr_debug("%s pkey_clr2seckey2()=%d\n", __func__, rc);
- kfree(apqns);
- if (rc) {
- kfree_sensitive(kkey);
- memzero_explicit(&kcs, sizeof(kcs));
- break;
- }
- if (kcs.key) {
- if (kcs.keylen < klen) {
- kfree_sensitive(kkey);
- memzero_explicit(&kcs, sizeof(kcs));
- return -EINVAL;
- }
- if (copy_to_user(kcs.key, kkey, klen)) {
- kfree_sensitive(kkey);
- memzero_explicit(&kcs, sizeof(kcs));
- return -EFAULT;
- }
- }
- kcs.keylen = klen;
- if (copy_to_user(ucs, &kcs, sizeof(kcs)))
- rc = -EFAULT;
- memzero_explicit(&kcs, sizeof(kcs));
- kfree_sensitive(kkey);
+ case PKEY_CLR2SECK2:
+ rc = pkey_ioctl_clr2seck2((struct pkey_clr2seck2 __user *)arg);
break;
- }
- case PKEY_VERIFYKEY2: {
- struct pkey_verifykey2 __user *uvk = (void __user *)arg;
- struct pkey_verifykey2 kvk;
- u8 *kkey;
-
- if (copy_from_user(&kvk, uvk, sizeof(kvk)))
- return -EFAULT;
- kkey = _copy_key_from_user(kvk.key, kvk.keylen);
- if (IS_ERR(kkey))
- return PTR_ERR(kkey);
- rc = pkey_verifykey2(kkey, kvk.keylen,
- &kvk.cardnr, &kvk.domain,
- &kvk.type, &kvk.size, &kvk.flags);
- pr_debug("%s pkey_verifykey2()=%d\n", __func__, rc);
- kfree_sensitive(kkey);
- if (rc)
- break;
- if (copy_to_user(uvk, &kvk, sizeof(kvk)))
- return -EFAULT;
+ case PKEY_VERIFYKEY2:
+ rc = pkey_ioctl_verifykey2((struct pkey_verifykey2 __user *)arg);
break;
- }
- case PKEY_KBLOB2PROTK2: {
- struct pkey_kblob2pkey2 __user *utp = (void __user *)arg;
- struct pkey_apqn *apqns = NULL;
- struct pkey_kblob2pkey2 ktp;
- u8 *kkey;
-
- if (copy_from_user(&ktp, utp, sizeof(ktp)))
- return -EFAULT;
- apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
- if (IS_ERR(apqns))
- return PTR_ERR(apqns);
- kkey = _copy_key_from_user(ktp.key, ktp.keylen);
- if (IS_ERR(kkey)) {
- kfree(apqns);
- return PTR_ERR(kkey);
- }
- ktp.protkey.len = sizeof(ktp.protkey.protkey);
- rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries,
- kkey, ktp.keylen,
- ktp.protkey.protkey, &ktp.protkey.len,
- &ktp.protkey.type);
- pr_debug("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
- kfree(apqns);
- kfree_sensitive(kkey);
- if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
- rc = -EFAULT;
- memzero_explicit(&ktp, sizeof(ktp));
+ case PKEY_KBLOB2PROTK2:
+ rc = pkey_ioctl_kblob2protk2((struct pkey_kblob2pkey2 __user *)arg);
break;
- }
- case PKEY_APQNS4K: {
- struct pkey_apqns4key __user *uak = (void __user *)arg;
- struct pkey_apqn *apqns = NULL;
- struct pkey_apqns4key kak;
- size_t nr_apqns, len;
- u8 *kkey;
-
- if (copy_from_user(&kak, uak, sizeof(kak)))
- return -EFAULT;
- nr_apqns = kak.apqn_entries;
- if (nr_apqns) {
- apqns = kmalloc_array(nr_apqns,
- sizeof(struct pkey_apqn),
- GFP_KERNEL);
- if (!apqns)
- return -ENOMEM;
- }
- kkey = _copy_key_from_user(kak.key, kak.keylen);
- if (IS_ERR(kkey)) {
- kfree(apqns);
- return PTR_ERR(kkey);
- }
- rc = pkey_apqns4key(kkey, kak.keylen, kak.flags,
- apqns, &nr_apqns);
- pr_debug("%s pkey_apqns4key()=%d\n", __func__, rc);
- kfree_sensitive(kkey);
- if (rc && rc != -ENOSPC) {
- kfree(apqns);
- break;
- }
- if (!rc && kak.apqns) {
- if (nr_apqns > kak.apqn_entries) {
- kfree(apqns);
- return -EINVAL;
- }
- len = nr_apqns * sizeof(struct pkey_apqn);
- if (len) {
- if (copy_to_user(kak.apqns, apqns, len)) {
- kfree(apqns);
- return -EFAULT;
- }
- }
- }
- kak.apqn_entries = nr_apqns;
- if (copy_to_user(uak, &kak, sizeof(kak)))
- rc = -EFAULT;
- kfree(apqns);
+ case PKEY_APQNS4K:
+ rc = pkey_ioctl_apqns4k((struct pkey_apqns4key __user *)arg);
break;
- }
- case PKEY_APQNS4KT: {
- struct pkey_apqns4keytype __user *uat = (void __user *)arg;
- struct pkey_apqn *apqns = NULL;
- struct pkey_apqns4keytype kat;
- size_t nr_apqns, len;
-
- if (copy_from_user(&kat, uat, sizeof(kat)))
- return -EFAULT;
- nr_apqns = kat.apqn_entries;
- if (nr_apqns) {
- apqns = kmalloc_array(nr_apqns,
- sizeof(struct pkey_apqn),
- GFP_KERNEL);
- if (!apqns)
- return -ENOMEM;
- }
- rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp,
- kat.flags, apqns, &nr_apqns);
- pr_debug("%s pkey_apqns4keytype()=%d\n", __func__, rc);
- if (rc && rc != -ENOSPC) {
- kfree(apqns);
- break;
- }
- if (!rc && kat.apqns) {
- if (nr_apqns > kat.apqn_entries) {
- kfree(apqns);
- return -EINVAL;
- }
- len = nr_apqns * sizeof(struct pkey_apqn);
- if (len) {
- if (copy_to_user(kat.apqns, apqns, len)) {
- kfree(apqns);
- return -EFAULT;
- }
- }
- }
- kat.apqn_entries = nr_apqns;
- if (copy_to_user(uat, &kat, sizeof(kat)))
- rc = -EFAULT;
- kfree(apqns);
+ case PKEY_APQNS4KT:
+ rc = pkey_ioctl_apqns4kt((struct pkey_apqns4keytype __user *)arg);
break;
- }
- case PKEY_KBLOB2PROTK3: {
- struct pkey_kblob2pkey3 __user *utp = (void __user *)arg;
- u32 protkeylen = PROTKEYBLOBBUFSIZE;
- struct pkey_apqn *apqns = NULL;
- struct pkey_kblob2pkey3 ktp;
- u8 *kkey, *protkey;
-
- if (copy_from_user(&ktp, utp, sizeof(ktp)))
- return -EFAULT;
- apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
- if (IS_ERR(apqns))
- return PTR_ERR(apqns);
- kkey = _copy_key_from_user(ktp.key, ktp.keylen);
- if (IS_ERR(kkey)) {
- kfree(apqns);
- return PTR_ERR(kkey);
- }
- protkey = kmalloc(protkeylen, GFP_KERNEL);
- if (!protkey) {
- kfree(apqns);
- kfree_sensitive(kkey);
- return -ENOMEM;
- }
- rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries,
- kkey, ktp.keylen,
- protkey, &protkeylen, &ktp.pkeytype);
- pr_debug("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
- kfree(apqns);
- kfree_sensitive(kkey);
- if (rc) {
- kfree_sensitive(protkey);
- break;
- }
- if (ktp.pkey && ktp.pkeylen) {
- if (protkeylen > ktp.pkeylen) {
- kfree_sensitive(protkey);
- return -EINVAL;
- }
- if (copy_to_user(ktp.pkey, protkey, protkeylen)) {
- kfree_sensitive(protkey);
- return -EFAULT;
- }
- }
- kfree_sensitive(protkey);
- ktp.pkeylen = protkeylen;
- if (copy_to_user(utp, &ktp, sizeof(ktp)))
- return -EFAULT;
+ case PKEY_KBLOB2PROTK3:
+ rc = pkey_ioctl_kblob2protk3((struct pkey_kblob2pkey3 __user *)arg);
break;
- }
default:
/* unknown/unsupported ioctl cmd */
return -ENOTTY;
@@ -1791,494 +770,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
/*
- * Sysfs and file io operations
- */
-
-/*
- * Sysfs attribute read function for all protected key binary attributes.
- * The implementation can not deal with partial reads, because a new random
- * protected key blob is generated with each read. In case of partial reads
- * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
- */
-static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf,
- loff_t off, size_t count)
-{
- struct protaeskeytoken protkeytoken;
- struct pkey_protkey protkey;
- int rc;
-
- if (off != 0 || count < sizeof(protkeytoken))
- return -EINVAL;
- if (is_xts)
- if (count < 2 * sizeof(protkeytoken))
- return -EINVAL;
-
- memset(&protkeytoken, 0, sizeof(protkeytoken));
- protkeytoken.type = TOKTYPE_NON_CCA;
- protkeytoken.version = TOKVER_PROTECTED_KEY;
- protkeytoken.keytype = keytype;
-
- protkey.len = sizeof(protkey.protkey);
- rc = pkey_genprotkey(protkeytoken.keytype,
- protkey.protkey, &protkey.len, &protkey.type);
- if (rc)
- return rc;
-
- protkeytoken.len = protkey.len;
- memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
-
- memcpy(buf, &protkeytoken, sizeof(protkeytoken));
-
- if (is_xts) {
- /* xts needs a second protected key, reuse protkey struct */
- protkey.len = sizeof(protkey.protkey);
- rc = pkey_genprotkey(protkeytoken.keytype,
- protkey.protkey, &protkey.len, &protkey.type);
- if (rc)
- return rc;
-
- protkeytoken.len = protkey.len;
- memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
-
- memcpy(buf + sizeof(protkeytoken), &protkeytoken,
- sizeof(protkeytoken));
-
- return 2 * sizeof(protkeytoken);
- }
-
- return sizeof(protkeytoken);
-}
-
-static ssize_t protkey_aes_128_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
- off, count);
-}
-
-static ssize_t protkey_aes_192_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
- off, count);
-}
-
-static ssize_t protkey_aes_256_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
- off, count);
-}
-
-static ssize_t protkey_aes_128_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
- off, count);
-}
-
-static ssize_t protkey_aes_256_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
- off, count);
-}
-
-static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
-
-static struct bin_attribute *protkey_attrs[] = {
- &bin_attr_protkey_aes_128,
- &bin_attr_protkey_aes_192,
- &bin_attr_protkey_aes_256,
- &bin_attr_protkey_aes_128_xts,
- &bin_attr_protkey_aes_256_xts,
- NULL
-};
-
-static struct attribute_group protkey_attr_group = {
- .name = "protkey",
- .bin_attrs = protkey_attrs,
-};
-
-/*
- * Sysfs attribute read function for all secure key ccadata binary attributes.
- * The implementation can not deal with partial reads, because a new random
- * protected key blob is generated with each read. In case of partial reads
- * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
- */
-static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
- loff_t off, size_t count)
-{
- struct pkey_seckey *seckey = (struct pkey_seckey *)buf;
- int rc;
-
- if (off != 0 || count < sizeof(struct secaeskeytoken))
- return -EINVAL;
- if (is_xts)
- if (count < 2 * sizeof(struct secaeskeytoken))
- return -EINVAL;
-
- rc = cca_genseckey(-1, -1, keytype, seckey->seckey);
- if (rc)
- return rc;
-
- if (is_xts) {
- seckey++;
- rc = cca_genseckey(-1, -1, keytype, seckey->seckey);
- if (rc)
- return rc;
-
- return 2 * sizeof(struct secaeskeytoken);
- }
-
- return sizeof(struct secaeskeytoken);
-}
-
-static ssize_t ccadata_aes_128_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
- off, count);
-}
-
-static ssize_t ccadata_aes_192_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
- off, count);
-}
-
-static ssize_t ccadata_aes_256_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
- off, count);
-}
-
-static ssize_t ccadata_aes_128_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
- off, count);
-}
-
-static ssize_t ccadata_aes_256_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
- off, count);
-}
-
-static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
-
-static struct bin_attribute *ccadata_attrs[] = {
- &bin_attr_ccadata_aes_128,
- &bin_attr_ccadata_aes_192,
- &bin_attr_ccadata_aes_256,
- &bin_attr_ccadata_aes_128_xts,
- &bin_attr_ccadata_aes_256_xts,
- NULL
-};
-
-static struct attribute_group ccadata_attr_group = {
- .name = "ccadata",
- .bin_attrs = ccadata_attrs,
-};
-
-#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
-
-/*
- * Sysfs attribute read function for all secure key ccacipher binary attributes.
- * The implementation can not deal with partial reads, because a new random
- * secure key blob is generated with each read. In case of partial reads
- * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
- */
-static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
- bool is_xts, char *buf, loff_t off,
- size_t count)
-{
- size_t keysize = CCACIPHERTOKENSIZE;
- u32 nr_apqns, *apqns = NULL;
- int i, rc, card, dom;
-
- if (off != 0 || count < CCACIPHERTOKENSIZE)
- return -EINVAL;
- if (is_xts)
- if (count < 2 * CCACIPHERTOKENSIZE)
- return -EINVAL;
-
- /* build a list of apqns able to generate an cipher key */
- rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX6, 0, 0, 0, 0);
- if (rc)
- return rc;
-
- memset(buf, 0, is_xts ? 2 * keysize : keysize);
-
- /* simple try all apqns from the list */
- for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
- card = apqns[i] >> 16;
- dom = apqns[i] & 0xFFFF;
- rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
- if (rc == 0)
- break;
- }
- if (rc)
- return rc;
-
- if (is_xts) {
- keysize = CCACIPHERTOKENSIZE;
- buf += CCACIPHERTOKENSIZE;
- rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
- if (rc == 0)
- return 2 * CCACIPHERTOKENSIZE;
- }
-
- return CCACIPHERTOKENSIZE;
-}
-
-static ssize_t ccacipher_aes_128_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
- off, count);
-}
-
-static ssize_t ccacipher_aes_192_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
- off, count);
-}
-
-static ssize_t ccacipher_aes_256_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
- off, count);
-}
-
-static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
- off, count);
-}
-
-static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
- off, count);
-}
-
-static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
-
-static struct bin_attribute *ccacipher_attrs[] = {
- &bin_attr_ccacipher_aes_128,
- &bin_attr_ccacipher_aes_192,
- &bin_attr_ccacipher_aes_256,
- &bin_attr_ccacipher_aes_128_xts,
- &bin_attr_ccacipher_aes_256_xts,
- NULL
-};
-
-static struct attribute_group ccacipher_attr_group = {
- .name = "ccacipher",
- .bin_attrs = ccacipher_attrs,
-};
-
-/*
- * Sysfs attribute read function for all ep11 aes key binary attributes.
- * The implementation can not deal with partial reads, because a new random
- * secure key blob is generated with each read. In case of partial reads
- * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
- * This function and the sysfs attributes using it provide EP11 key blobs
- * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
- * 336 bytes.
+ * File io operations
*/
-static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
- bool is_xts, char *buf, loff_t off,
- size_t count)
-{
- size_t keysize = MAXEP11AESKEYBLOBSIZE;
- u32 nr_apqns, *apqns = NULL;
- int i, rc, card, dom;
-
- if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
- return -EINVAL;
- if (is_xts)
- if (count < 2 * MAXEP11AESKEYBLOBSIZE)
- return -EINVAL;
-
- /* build a list of apqns able to generate an cipher key */
- rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7,
- ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
- NULL);
- if (rc)
- return rc;
-
- memset(buf, 0, is_xts ? 2 * keysize : keysize);
-
- /* simple try all apqns from the list */
- for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
- card = apqns[i] >> 16;
- dom = apqns[i] & 0xFFFF;
- rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
- PKEY_TYPE_EP11_AES);
- if (rc == 0)
- break;
- }
- if (rc)
- return rc;
-
- if (is_xts) {
- keysize = MAXEP11AESKEYBLOBSIZE;
- buf += MAXEP11AESKEYBLOBSIZE;
- rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
- PKEY_TYPE_EP11_AES);
- if (rc == 0)
- return 2 * MAXEP11AESKEYBLOBSIZE;
- }
-
- return MAXEP11AESKEYBLOBSIZE;
-}
-
-static ssize_t ep11_aes_128_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
- off, count);
-}
-
-static ssize_t ep11_aes_192_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
- off, count);
-}
-
-static ssize_t ep11_aes_256_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
- off, count);
-}
-
-static ssize_t ep11_aes_128_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
- off, count);
-}
-
-static ssize_t ep11_aes_256_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
- off, count);
-}
-
-static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
-
-static struct bin_attribute *ep11_attrs[] = {
- &bin_attr_ep11_aes_128,
- &bin_attr_ep11_aes_192,
- &bin_attr_ep11_aes_256,
- &bin_attr_ep11_aes_128_xts,
- &bin_attr_ep11_aes_256_xts,
- NULL
-};
-
-static struct attribute_group ep11_attr_group = {
- .name = "ep11",
- .bin_attrs = ep11_attrs,
-};
-
-static const struct attribute_group *pkey_attr_groups[] = {
- &protkey_attr_group,
- &ccadata_attr_group,
- &ccacipher_attr_group,
- &ep11_attr_group,
- NULL,
-};
static const struct file_operations pkey_fops = {
.owner = THIS_MODULE,
@@ -2295,43 +788,13 @@ static struct miscdevice pkey_dev = {
.groups = pkey_attr_groups,
};
-/*
- * Module init
- */
-static int __init pkey_init(void)
+int __init pkey_api_init(void)
{
- cpacf_mask_t func_mask;
-
- /*
- * The pckmo instruction should be available - even if we don't
- * actually invoke it. This instruction comes with MSA 3 which
- * is also the minimum level for the kmc instructions which
- * are able to work with protected keys.
- */
- if (!cpacf_query(CPACF_PCKMO, &func_mask))
- return -ENODEV;
-
- /* check for kmc instructions available */
- if (!cpacf_query(CPACF_KMC, &func_mask))
- return -ENODEV;
- if (!cpacf_test_func(&func_mask, CPACF_KMC_PAES_128) ||
- !cpacf_test_func(&func_mask, CPACF_KMC_PAES_192) ||
- !cpacf_test_func(&func_mask, CPACF_KMC_PAES_256))
- return -ENODEV;
-
- pkey_debug_init();
-
+ /* register as a misc device */
return misc_register(&pkey_dev);
}
-/*
- * Module exit
- */
-static void __exit pkey_exit(void)
+void __exit pkey_api_exit(void)
{
misc_deregister(&pkey_dev);
- pkey_debug_exit();
}
-
-module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init);
-module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/pkey_base.c b/drivers/s390/crypto/pkey_base.c
new file mode 100644
index 000000000000..fea243322838
--- /dev/null
+++ b/drivers/s390/crypto/pkey_base.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey base: debug feature, pkey handler registry
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/cpufeature.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/rculist.h>
+
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key base and api");
+
+/*
+ * pkey debug feature
+ */
+debug_info_t *pkey_dbf_info;
+EXPORT_SYMBOL(pkey_dbf_info);
+
+/*
+ * pkey handler registry
+ */
+
+static DEFINE_SPINLOCK(handler_list_write_lock);
+static LIST_HEAD(handler_list);
+
+int pkey_handler_register(struct pkey_handler *handler)
+{
+ const struct pkey_handler *h;
+
+ if (!handler ||
+ !handler->is_supported_key ||
+ !handler->is_supported_keytype)
+ return -EINVAL;
+
+ if (!try_module_get(handler->module))
+ return -ENXIO;
+
+ spin_lock(&handler_list_write_lock);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (h == handler) {
+ rcu_read_unlock();
+ spin_unlock(&handler_list_write_lock);
+ module_put(handler->module);
+ return -EEXIST;
+ }
+ }
+ rcu_read_unlock();
+
+ list_add_rcu(&handler->list, &handler_list);
+ spin_unlock(&handler_list_write_lock);
+ synchronize_rcu();
+
+ module_put(handler->module);
+
+ PKEY_DBF_INFO("%s pkey handler '%s' registered\n", __func__,
+ handler->name ?: "<no name>");
+
+ return 0;
+}
+EXPORT_SYMBOL(pkey_handler_register);
+
+int pkey_handler_unregister(struct pkey_handler *handler)
+{
+ spin_lock(&handler_list_write_lock);
+ list_del_rcu(&handler->list);
+ INIT_LIST_HEAD_RCU(&handler->list);
+ spin_unlock(&handler_list_write_lock);
+ synchronize_rcu();
+
+ PKEY_DBF_INFO("%s pkey handler '%s' unregistered\n", __func__,
+ handler->name ?: "<no name>");
+
+ return 0;
+}
+EXPORT_SYMBOL(pkey_handler_unregister);
+
+/*
+ * Handler invocation functions.
+ */
+
+const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen)
+{
+ const struct pkey_handler *h;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (!try_module_get(h->module))
+ continue;
+ if (h->is_supported_key(key, keylen)) {
+ rcu_read_unlock();
+ return h;
+ }
+ module_put(h->module);
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+EXPORT_SYMBOL(pkey_handler_get_keybased);
+
+const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt)
+{
+ const struct pkey_handler *h;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (!try_module_get(h->module))
+ continue;
+ if (h->is_supported_keytype(kt)) {
+ rcu_read_unlock();
+ return h;
+ }
+ module_put(h->module);
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+EXPORT_SYMBOL(pkey_handler_get_keytypebased);
+
+void pkey_handler_put(const struct pkey_handler *handler)
+{
+ const struct pkey_handler *h;
+
+ if (!handler)
+ return;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (h == handler) {
+ module_put(h->module);
+ break;
+ }
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(pkey_handler_put);
+
+int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keybased(key, keylen);
+ if (h && h->key_to_protkey) {
+ rc = h->key_to_protkey(apqns, nr_apqns, key, keylen,
+ protkey, protkeylen,
+ protkeytype);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_key_to_protkey);
+
+/*
+ * This handler invocation is special as there may be more than
+ * one handler providing support for the very same key (type).
+ * And the handler may not respond true on is_supported_key(),
+ * so simple try and check return value here.
+ */
+int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype)
+{
+ const struct pkey_handler *h, *htmp[10];
+ int i, n = 0, rc = -ENODEV;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (!try_module_get(h->module))
+ continue;
+ if (h->slowpath_key_to_protkey && n < ARRAY_SIZE(htmp))
+ htmp[n++] = h;
+ else
+ module_put(h->module);
+ }
+ rcu_read_unlock();
+
+ for (i = 0; i < n; i++) {
+ h = htmp[i];
+ if (rc)
+ rc = h->slowpath_key_to_protkey(apqns, nr_apqns,
+ key, keylen,
+ protkey, protkeylen,
+ protkeytype);
+ module_put(h->module);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_slowpath_key_to_protkey);
+
+int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keytypebased(keysubtype);
+ if (h && h->gen_key) {
+ rc = h->gen_key(apqns, nr_apqns, keytype, keysubtype,
+ keybitsize, flags,
+ keybuf, keybuflen, keyinfo);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_gen_key);
+
+int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keytypebased(keysubtype);
+ if (h && h->clr_to_key) {
+ rc = h->clr_to_key(apqns, nr_apqns, keytype, keysubtype,
+ keybitsize, flags, clrkey, clrkeylen,
+ keybuf, keybuflen, keyinfo);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_clr_to_key);
+
+int pkey_handler_verify_key(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keybased(key, keylen);
+ if (h && h->verify_key) {
+ rc = h->verify_key(key, keylen, card, dom,
+ keytype, keybitsize, flags);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_verify_key);
+
+int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keybased(key, keylen);
+ if (h && h->apqns_for_key)
+ rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns);
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_apqns_for_key);
+
+int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keytypebased(keysubtype);
+ if (h && h->apqns_for_keytype) {
+ rc = h->apqns_for_keytype(keysubtype,
+ cur_mkvp, alt_mkvp, flags,
+ apqns, nr_apqns);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_apqns_for_keytype);
+
+void pkey_handler_request_modules(void)
+{
+#ifdef CONFIG_MODULES
+ static const char * const pkey_handler_modules[] = {
+ "pkey_cca", "pkey_ep11", "pkey_pckmo" };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pkey_handler_modules); i++) {
+ const struct pkey_handler *h;
+ bool found = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (h->module &&
+ !strcmp(h->module->name, pkey_handler_modules[i])) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (!found) {
+ pr_debug("request_module(%s)\n", pkey_handler_modules[i]);
+ request_module(pkey_handler_modules[i]);
+ }
+ }
+#endif
+}
+EXPORT_SYMBOL(pkey_handler_request_modules);
+
+/*
+ * Module init
+ */
+static int __init pkey_init(void)
+{
+ int rc;
+
+ /* init debug feature */
+ pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
+ debug_register_view(pkey_dbf_info, &debug_sprintf_view);
+ debug_set_level(pkey_dbf_info, 4);
+
+ /* the handler registry does not need any init */
+
+ rc = pkey_api_init();
+ if (rc)
+ debug_unregister(pkey_dbf_info);
+
+ return rc;
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_exit(void)
+{
+ pkey_api_exit();
+}
+
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init);
+module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/pkey_base.h b/drivers/s390/crypto/pkey_base.h
new file mode 100644
index 000000000000..7a1a5ce192d8
--- /dev/null
+++ b/drivers/s390/crypto/pkey_base.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2024
+ *
+ * Pkey base: debug feature, defines and structs
+ * common to all pkey code.
+ */
+
+#ifndef _PKEY_BASE_H_
+#define _PKEY_BASE_H_
+
+#include <linux/types.h>
+#include <asm/debug.h>
+#include <asm/pkey.h>
+
+/*
+ * pkey debug feature
+ */
+
+extern debug_info_t *pkey_dbf_info;
+
+#define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__)
+#define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__)
+#define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__)
+
+/*
+ * common defines and common structs
+ */
+
+#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
+#define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header))
+#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */
+#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
+#define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */
+
+/* inside view of a generic protected key token */
+struct protkeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* should be 0x01 for protected key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
+ u32 len; /* bytes actually stored in protkey[] */
+ u8 protkey[]; /* the protected key blob */
+} __packed;
+
+/* inside view of a protected AES key token */
+struct protaeskeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* should be 0x01 for protected key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
+ u32 len; /* bytes actually stored in protkey[] */
+ u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
+} __packed;
+
+/* inside view of a clear key token (type 0x00 version 0x02) */
+struct clearkeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* 0x02 for clear key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */
+ u32 len; /* bytes actually stored in clearkey[] */
+ u8 clearkey[]; /* clear key value */
+} __packed;
+
+/* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */
+static inline u32 pkey_keytype_aes_to_size(u32 keytype)
+{
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ return 16;
+ case PKEY_KEYTYPE_AES_192:
+ return 24;
+ case PKEY_KEYTYPE_AES_256:
+ return 32;
+ default:
+ return 0;
+ }
+}
+
+/* helper function which translates AES key bit size into PKEY_KEYTYPE_AES_* */
+static inline u32 pkey_aes_bitsize_to_keytype(u32 keybitsize)
+{
+ switch (keybitsize) {
+ case 128:
+ return PKEY_KEYTYPE_AES_128;
+ case 192:
+ return PKEY_KEYTYPE_AES_192;
+ case 256:
+ return PKEY_KEYTYPE_AES_256;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * pkey_api.c:
+ */
+int __init pkey_api_init(void);
+void __exit pkey_api_exit(void);
+
+/*
+ * pkey_sysfs.c:
+ */
+
+extern const struct attribute_group *pkey_attr_groups[];
+
+/*
+ * pkey handler registry
+ */
+
+struct pkey_handler {
+ struct module *module;
+ const char *name;
+ /*
+ * is_supported_key() and is_supported_keytype() are called
+ * within an rcu_read_lock() scope and thus must not sleep!
+ */
+ bool (*is_supported_key)(const u8 *key, u32 keylen);
+ bool (*is_supported_keytype)(enum pkey_key_type);
+ int (*key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+ int (*slowpath_key_to_protkey)(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype);
+ int (*gen_key)(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
+ int (*clr_to_key)(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
+ int (*verify_key)(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags);
+ int (*apqns_for_key)(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns);
+ int (*apqns_for_keytype)(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns);
+ /* used internal by pkey base */
+ struct list_head list;
+};
+
+int pkey_handler_register(struct pkey_handler *handler);
+int pkey_handler_unregister(struct pkey_handler *handler);
+
+/*
+ * invocation function for the registered pkey handlers
+ */
+
+const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen);
+const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt);
+void pkey_handler_put(const struct pkey_handler *handler);
+
+int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype);
+int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
+int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
+int pkey_handler_verify_key(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags);
+int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns);
+int pkey_handler_apqns_for_keytype(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns);
+
+/*
+ * Unconditional try to load all handler modules
+ */
+void pkey_handler_request_modules(void);
+
+#endif /* _PKEY_BASE_H_ */
diff --git a/drivers/s390/crypto/pkey_cca.c b/drivers/s390/crypto/pkey_cca.c
new file mode 100644
index 000000000000..937051381720
--- /dev/null
+++ b/drivers/s390/crypto/pkey_cca.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey cca specific code
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+
+#include "zcrypt_api.h"
+#include "zcrypt_ccamisc.h"
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key CCA handler");
+
+#if IS_MODULE(CONFIG_PKEY_CCA)
+static struct ap_device_id pkey_cca_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4 },
+ { .dev_type = AP_DEVICE_TYPE_CEX5 },
+ { .dev_type = AP_DEVICE_TYPE_CEX6 },
+ { .dev_type = AP_DEVICE_TYPE_CEX7 },
+ { .dev_type = AP_DEVICE_TYPE_CEX8 },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(ap, pkey_cca_card_ids);
+#endif
+
+/*
+ * Check key blob for known and supported CCA key.
+ */
+static bool is_cca_key(const u8 *key, u32 keylen)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(*hdr))
+ return false;
+
+ switch (hdr->type) {
+ case TOKTYPE_CCA_INTERNAL:
+ switch (hdr->version) {
+ case TOKVER_CCA_AES:
+ case TOKVER_CCA_VLSC:
+ return true;
+ default:
+ return false;
+ }
+ case TOKTYPE_CCA_INTERNAL_PKA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_cca_keytype(enum pkey_key_type key_type)
+{
+ switch (key_type) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ case PKEY_TYPE_CCA_ECC:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ u32 _nr_apqns, *_apqns = NULL;
+ int rc;
+
+ if (!flags)
+ flags = PKEY_FLAGS_MATCH_CUR_MKVP | PKEY_FLAGS_MATCH_ALT_MKVP;
+
+ if (keylen < sizeof(struct keytoken_header))
+ return -EINVAL;
+
+ zcrypt_wait_api_operational();
+
+ if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+ int minhwtype = ZCRYPT_CEX3C;
+
+ if (hdr->version == TOKVER_CCA_AES) {
+ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp;
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
+ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
+
+ minhwtype = ZCRYPT_CEX6;
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp0;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp0;
+ } else {
+ /* unknown CCA internal token type */
+ return -EINVAL;
+ }
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, AES_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+ struct eccprivkeytoken *t = (struct eccprivkeytoken *)key;
+ u64 cur_mkvp = 0, old_mkvp = 0;
+
+ if (t->secid == 0x20) {
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp;
+ } else {
+ /* unknown CCA internal 2 token type */
+ return -EINVAL;
+ }
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, APKA_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
+ __func__, hdr->type, hdr->version);
+ return -EINVAL;
+ }
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ kfree(_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int cca_apqns4type(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ u32 _nr_apqns, *_apqns = NULL;
+ int rc;
+
+ zcrypt_wait_api_operational();
+
+ if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+ int minhwtype = ZCRYPT_CEX3C;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = *((u64 *)cur_mkvp);
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = *((u64 *)alt_mkvp);
+ if (ktype == PKEY_TYPE_CCA_CIPHER)
+ minhwtype = ZCRYPT_CEX6;
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, AES_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+
+ } else if (ktype == PKEY_TYPE_CCA_ECC) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = *((u64 *)cur_mkvp);
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = *((u64 *)alt_mkvp);
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, APKA_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported key type %d",
+ __func__, (int)ktype);
+ return -EINVAL;
+ }
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ kfree(_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct pkey_apqn *local_apqns = NULL;
+ int i, rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_AES) {
+ /* CCA AES data key */
+ if (keylen != sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_VLSC) {
+ /* CCA AES cipher key */
+ if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+ return -EINVAL;
+ if (cca_check_secaescipherkey(pkey_dbf_info,
+ 3, key, 0, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+ /* CCA ECC (private) key */
+ if (keylen < sizeof(struct eccprivkeytoken))
+ return -EINVAL;
+ if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
+ __func__, hdr->type, hdr->version);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!local_apqns)
+ return -ENOMEM;
+ rc = cca_apqns4key(key, keylen, 0, local_apqns, &nr_apqns);
+ if (rc)
+ goto out;
+ apqns = local_apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_AES) {
+ rc = cca_sec2protkey(apqns[i].card, apqns[i].domain,
+ key, protkey,
+ protkeylen, protkeytype);
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_VLSC) {
+ rc = cca_cipher2protkey(apqns[i].card, apqns[i].domain,
+ key, protkey,
+ protkeylen, protkeytype);
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+ rc = cca_ecc2protkey(apqns[i].card, apqns[i].domain,
+ key, protkey,
+ protkeylen, protkeytype);
+ } else {
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+out:
+ kfree(local_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate CCA secure key.
+ * As of now only CCA AES Data or Cipher secure keys are
+ * supported.
+ * keytype is one of the PKEY_KEYTYPE_* constants,
+ * subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER,
+ * keybitsize is the bit size of the key (may be 0 for
+ * keytype PKEY_KEYTYPE_AES_*).
+ */
+static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 subtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
+{
+ struct pkey_apqn *local_apqns = NULL;
+ int i, len, rc;
+
+ /* check keytype, subtype, keybitsize */
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ len = pkey_keytype_aes_to_size(keytype);
+ if (keybitsize && keybitsize != 8 * len) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ keybitsize = 8 * len;
+ switch (subtype) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!local_apqns)
+ return -ENOMEM;
+ rc = cca_apqns4type(subtype, NULL, NULL, 0,
+ local_apqns, &nr_apqns);
+ if (rc)
+ goto out;
+ apqns = local_apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ if (subtype == PKEY_TYPE_CCA_CIPHER) {
+ rc = cca_gencipherkey(apqns[i].card, apqns[i].domain,
+ keybitsize, flags,
+ keybuf, keybuflen);
+ } else {
+ /* PKEY_TYPE_CCA_DATA */
+ rc = cca_genseckey(apqns[i].card, apqns[i].domain,
+ keybitsize, keybuf);
+ *keybuflen = (rc ? 0 : SECKEYBLOBSIZE);
+ }
+ }
+
+out:
+ kfree(local_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate CCA secure key with given clear key value.
+ * As of now only CCA AES Data or Cipher secure keys are
+ * supported.
+ * keytype is one of the PKEY_KEYTYPE_* constants,
+ * subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER,
+ * keybitsize is the bit size of the key (may be 0 for
+ * keytype PKEY_KEYTYPE_AES_*).
+ */
+static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 subtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
+{
+ struct pkey_apqn *local_apqns = NULL;
+ int i, len, rc;
+
+ /* check keytype, subtype, clrkeylen, keybitsize */
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ len = pkey_keytype_aes_to_size(keytype);
+ if (keybitsize && keybitsize != 8 * len) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ keybitsize = 8 * len;
+ if (clrkeylen != len) {
+ PKEY_DBF_ERR("%s invalid clear key len %d != %d\n",
+ __func__, clrkeylen, len);
+ return -EINVAL;
+ }
+ switch (subtype) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!local_apqns)
+ return -ENOMEM;
+ rc = cca_apqns4type(subtype, NULL, NULL, 0,
+ local_apqns, &nr_apqns);
+ if (rc)
+ goto out;
+ apqns = local_apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ if (subtype == PKEY_TYPE_CCA_CIPHER) {
+ rc = cca_clr2cipherkey(apqns[i].card, apqns[i].domain,
+ keybitsize, flags, clrkey,
+ keybuf, keybuflen);
+ } else {
+ /* PKEY_TYPE_CCA_DATA */
+ rc = cca_clr2seckey(apqns[i].card, apqns[i].domain,
+ keybitsize, clrkey, keybuf);
+ *keybuflen = (rc ? 0 : SECKEYBLOBSIZE);
+ }
+ }
+
+out:
+ kfree(local_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int cca_verifykey(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ u32 nr_apqns, *apqns = NULL;
+ int rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ zcrypt_wait_api_operational();
+
+ if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_AES) {
+ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
+
+ rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0);
+ if (rc)
+ goto out;
+ *keytype = PKEY_TYPE_CCA_DATA;
+ *keybitsize = t->bitsize;
+ rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX3C, AES_MK_SET,
+ t->mkvp, 0, 1);
+ if (!rc)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+ if (rc == -ENODEV) {
+ rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX3C, AES_MK_SET,
+ 0, t->mkvp, 1);
+ if (!rc)
+ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
+ }
+ if (rc)
+ goto out;
+
+ *card = ((struct pkey_apqn *)apqns)->card;
+ *dom = ((struct pkey_apqn *)apqns)->domain;
+
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_VLSC) {
+ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
+
+ rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1);
+ if (rc)
+ goto out;
+ *keytype = PKEY_TYPE_CCA_CIPHER;
+ *keybitsize = PKEY_SIZE_UNKNOWN;
+ if (!t->plfver && t->wpllen == 512)
+ *keybitsize = PKEY_SIZE_AES_128;
+ else if (!t->plfver && t->wpllen == 576)
+ *keybitsize = PKEY_SIZE_AES_192;
+ else if (!t->plfver && t->wpllen == 640)
+ *keybitsize = PKEY_SIZE_AES_256;
+ rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX6, AES_MK_SET,
+ t->mkvp0, 0, 1);
+ if (!rc)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+ if (rc == -ENODEV) {
+ rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX6, AES_MK_SET,
+ 0, t->mkvp0, 1);
+ if (!rc)
+ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
+ }
+ if (rc)
+ goto out;
+
+ *card = ((struct pkey_apqn *)apqns)->card;
+ *dom = ((struct pkey_apqn *)apqns)->domain;
+
+ } else {
+ /* unknown/unsupported key blob */
+ rc = -EINVAL;
+ }
+
+out:
+ kfree(apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * This function provides an alternate but usually slow way
+ * to convert a 'clear key token' with AES key material into
+ * a protected key. This is done via an intermediate step
+ * which creates a CCA AES DATA secure key first and then
+ * derives the protected key from this secure key.
+ */
+static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype)
+{
+ const struct keytoken_header *hdr = (const struct keytoken_header *)key;
+ const struct clearkeytoken *t = (const struct clearkeytoken *)key;
+ u32 tmplen, keysize = 0;
+ u8 *tmpbuf;
+ int i, rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_CLEAR_KEY)
+ keysize = pkey_keytype_aes_to_size(t->keytype);
+ if (!keysize || t->len != keysize)
+ return -EINVAL;
+
+ /* alloc tmp key buffer */
+ tmpbuf = kmalloc(SECKEYBLOBSIZE, GFP_ATOMIC);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ /* try two times in case of failure */
+ for (i = 0, rc = -ENODEV; i < 2 && rc; i++) {
+ tmplen = SECKEYBLOBSIZE;
+ rc = cca_clr2key(NULL, 0, t->keytype, PKEY_TYPE_CCA_DATA,
+ 8 * keysize, 0, t->clearkey, t->len,
+ tmpbuf, &tmplen, NULL);
+ pr_debug("cca_clr2key()=%d\n", rc);
+ if (rc)
+ continue;
+ rc = cca_key2protkey(NULL, 0, tmpbuf, tmplen,
+ protkey, protkeylen, protkeytype);
+ pr_debug("cca_key2protkey()=%d\n", rc);
+ }
+
+ kfree(tmpbuf);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static struct pkey_handler cca_handler = {
+ .module = THIS_MODULE,
+ .name = "PKEY CCA handler",
+ .is_supported_key = is_cca_key,
+ .is_supported_keytype = is_cca_keytype,
+ .key_to_protkey = cca_key2protkey,
+ .slowpath_key_to_protkey = cca_slowpath_key2protkey,
+ .gen_key = cca_gen_key,
+ .clr_to_key = cca_clr2key,
+ .verify_key = cca_verifykey,
+ .apqns_for_key = cca_apqns4key,
+ .apqns_for_keytype = cca_apqns4type,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_cca_init(void)
+{
+ /* register this module as pkey handler for all the cca stuff */
+ return pkey_handler_register(&cca_handler);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_cca_exit(void)
+{
+ /* unregister this module as pkey handler */
+ pkey_handler_unregister(&cca_handler);
+}
+
+module_init(pkey_cca_init);
+module_exit(pkey_cca_exit);
diff --git a/drivers/s390/crypto/pkey_ep11.c b/drivers/s390/crypto/pkey_ep11.c
new file mode 100644
index 000000000000..f42d397a9cb6
--- /dev/null
+++ b/drivers/s390/crypto/pkey_ep11.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey ep11 specific code
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+
+#include "zcrypt_api.h"
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key EP11 handler");
+
+#if IS_MODULE(CONFIG_PKEY_EP11)
+static struct ap_device_id pkey_ep11_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4 },
+ { .dev_type = AP_DEVICE_TYPE_CEX5 },
+ { .dev_type = AP_DEVICE_TYPE_CEX6 },
+ { .dev_type = AP_DEVICE_TYPE_CEX7 },
+ { .dev_type = AP_DEVICE_TYPE_CEX8 },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(ap, pkey_ep11_card_ids);
+#endif
+
+/*
+ * Check key blob for known and supported EP11 key.
+ */
+static bool is_ep11_key(const u8 *key, u32 keylen)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(*hdr))
+ return false;
+
+ switch (hdr->type) {
+ case TOKTYPE_NON_CCA:
+ switch (hdr->version) {
+ case TOKVER_EP11_AES:
+ case TOKVER_EP11_AES_WITH_HEADER:
+ case TOKVER_EP11_ECC_WITH_HEADER:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool is_ep11_keytype(enum pkey_key_type key_type)
+{
+ switch (key_type) {
+ case PKEY_TYPE_EP11:
+ case PKEY_TYPE_EP11_AES:
+ case PKEY_TYPE_EP11_ECC:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ u32 _nr_apqns, *_apqns = NULL;
+ int rc;
+
+ if (!flags)
+ flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ if (keylen < sizeof(struct keytoken_header) || flags == 0)
+ return -EINVAL;
+
+ zcrypt_wait_api_operational();
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ (hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
+ hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ struct ep11keyblob *kb = (struct ep11keyblob *)
+ (key + sizeof(struct ep11kblob_header));
+ int minhwtype = 0, api = 0;
+
+ if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+ return -EINVAL;
+ if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+ minhwtype = ZCRYPT_CEX7;
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ }
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp);
+ if (rc)
+ goto out;
+
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES &&
+ is_ep11_keyblob(key)) {
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
+ int minhwtype = 0, api = 0;
+
+ if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+ return -EINVAL;
+ if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+ minhwtype = ZCRYPT_CEX7;
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ }
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp);
+ if (rc)
+ goto out;
+
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
+ __func__, hdr->type, hdr->version);
+ return -EINVAL;
+ }
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ kfree(_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int ep11_apqns4type(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ u32 _nr_apqns, *_apqns = NULL;
+ int rc;
+
+ zcrypt_wait_api_operational();
+
+ if (ktype == PKEY_TYPE_EP11 ||
+ ktype == PKEY_TYPE_EP11_AES ||
+ ktype == PKEY_TYPE_EP11_ECC) {
+ u8 *wkvp = NULL;
+ int api;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ wkvp = cur_mkvp;
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, api, wkvp);
+ if (rc)
+ goto out;
+
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported key type %d\n",
+ __func__, (int)ktype);
+ return -EINVAL;
+ }
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ kfree(_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct pkey_apqn *local_apqns = NULL;
+ int i, rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ /* EP11 AES key blob with header */
+ if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ /* EP11 ECC key blob with header */
+ if (ep11_check_ecc_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES &&
+ is_ep11_keyblob(key)) {
+ /* EP11 AES key blob with header in session field */
+ if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
+ __func__, hdr->type, hdr->version);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!local_apqns)
+ return -ENOMEM;
+ rc = ep11_apqns4key(key, keylen, 0, local_apqns, &nr_apqns);
+ if (rc)
+ goto out;
+ apqns = local_apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
+ key, hdr->len, protkey,
+ protkeylen, protkeytype);
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
+ key, hdr->len, protkey,
+ protkeylen, protkeytype);
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES &&
+ is_ep11_keyblob(key)) {
+ rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
+ key, hdr->len, protkey,
+ protkeylen, protkeytype);
+ } else {
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+out:
+ kfree(local_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate EP11 secure key.
+ * As of now only EP11 AES secure keys are supported.
+ * keytype is one of the PKEY_KEYTYPE_* constants,
+ * subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES
+ * or 0 (results in subtype PKEY_TYPE_EP11_AES),
+ * keybitsize is the bit size of the key (may be 0 for
+ * keytype PKEY_KEYTYPE_AES_*).
+ */
+static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 subtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
+{
+ struct pkey_apqn *local_apqns = NULL;
+ int i, len, rc;
+
+ /* check keytype, subtype, keybitsize */
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ len = pkey_keytype_aes_to_size(keytype);
+ if (keybitsize && keybitsize != 8 * len) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ keybitsize = 8 * len;
+ switch (subtype) {
+ case PKEY_TYPE_EP11:
+ case PKEY_TYPE_EP11_AES:
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!local_apqns)
+ return -ENOMEM;
+ rc = ep11_apqns4type(subtype, NULL, NULL, 0,
+ local_apqns, &nr_apqns);
+ if (rc)
+ goto out;
+ apqns = local_apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ rc = ep11_genaeskey(apqns[i].card, apqns[i].domain,
+ keybitsize, flags,
+ keybuf, keybuflen, subtype);
+ }
+
+out:
+ kfree(local_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate EP11 secure key with given clear key value.
+ * As of now only EP11 AES secure keys are supported.
+ * keytype is one of the PKEY_KEYTYPE_* constants,
+ * subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES
+ * or 0 (assumes PKEY_TYPE_EP11_AES then).
+ * keybitsize is the bit size of the key (may be 0 for
+ * keytype PKEY_KEYTYPE_AES_*).
+ */
+static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 subtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
+{
+ struct pkey_apqn *local_apqns = NULL;
+ int i, len, rc;
+
+ /* check keytype, subtype, clrkeylen, keybitsize */
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ len = pkey_keytype_aes_to_size(keytype);
+ if (keybitsize && keybitsize != 8 * len) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ keybitsize = 8 * len;
+ if (clrkeylen != len) {
+ PKEY_DBF_ERR("%s invalid clear key len %d != %d\n",
+ __func__, clrkeylen, len);
+ return -EINVAL;
+ }
+ switch (subtype) {
+ case PKEY_TYPE_EP11:
+ case PKEY_TYPE_EP11_AES:
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!local_apqns)
+ return -ENOMEM;
+ rc = ep11_apqns4type(subtype, NULL, NULL, 0,
+ local_apqns, &nr_apqns);
+ if (rc)
+ goto out;
+ apqns = local_apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ rc = ep11_clr2keyblob(apqns[i].card, apqns[i].domain,
+ keybitsize, flags, clrkey,
+ keybuf, keybuflen, subtype);
+ }
+
+out:
+ kfree(local_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int ep11_verifykey(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ u32 nr_apqns, *apqns = NULL;
+ int rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ zcrypt_wait_api_operational();
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES) {
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
+ int api;
+
+ rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
+ if (rc)
+ goto out;
+ *keytype = PKEY_TYPE_EP11;
+ *keybitsize = kb->head.bitlen;
+
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX7, api,
+ ep11_kb_wkvp(key, keylen));
+ if (rc)
+ goto out;
+
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ *card = ((struct pkey_apqn *)apqns)->card;
+ *dom = ((struct pkey_apqn *)apqns)->domain;
+
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
+ struct ep11kblob_header *kh = (struct ep11kblob_header *)key;
+ int api;
+
+ rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1);
+ if (rc)
+ goto out;
+ *keytype = PKEY_TYPE_EP11_AES;
+ *keybitsize = kh->bitlen;
+
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX7, api,
+ ep11_kb_wkvp(key, keylen));
+ if (rc)
+ goto out;
+
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ *card = ((struct pkey_apqn *)apqns)->card;
+ *dom = ((struct pkey_apqn *)apqns)->domain;
+
+ } else {
+ /* unknown/unsupported key blob */
+ rc = -EINVAL;
+ }
+
+out:
+ kfree(apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * This function provides an alternate but usually slow way
+ * to convert a 'clear key token' with AES key material into
+ * a protected key. That is done via an intermediate step
+ * which creates an EP11 AES secure key first and then derives
+ * the protected key from this secure key.
+ */
+static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype)
+{
+ const struct keytoken_header *hdr = (const struct keytoken_header *)key;
+ const struct clearkeytoken *t = (const struct clearkeytoken *)key;
+ u32 tmplen, keysize = 0;
+ u8 *tmpbuf;
+ int i, rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_CLEAR_KEY)
+ keysize = pkey_keytype_aes_to_size(t->keytype);
+ if (!keysize || t->len != keysize)
+ return -EINVAL;
+
+ /* alloc tmp key buffer */
+ tmpbuf = kmalloc(MAXEP11AESKEYBLOBSIZE, GFP_ATOMIC);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ /* try two times in case of failure */
+ for (i = 0, rc = -ENODEV; i < 2 && rc; i++) {
+ tmplen = MAXEP11AESKEYBLOBSIZE;
+ rc = ep11_clr2key(NULL, 0, t->keytype, PKEY_TYPE_EP11,
+ 8 * keysize, 0, t->clearkey, t->len,
+ tmpbuf, &tmplen, NULL);
+ pr_debug("ep11_clr2key()=%d\n", rc);
+ if (rc)
+ continue;
+ rc = ep11_key2protkey(NULL, 0, tmpbuf, tmplen,
+ protkey, protkeylen, protkeytype);
+ pr_debug("ep11_key2protkey()=%d\n", rc);
+ }
+
+ kfree(tmpbuf);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static struct pkey_handler ep11_handler = {
+ .module = THIS_MODULE,
+ .name = "PKEY EP11 handler",
+ .is_supported_key = is_ep11_key,
+ .is_supported_keytype = is_ep11_keytype,
+ .key_to_protkey = ep11_key2protkey,
+ .slowpath_key_to_protkey = ep11_slowpath_key2protkey,
+ .gen_key = ep11_gen_key,
+ .clr_to_key = ep11_clr2key,
+ .verify_key = ep11_verifykey,
+ .apqns_for_key = ep11_apqns4key,
+ .apqns_for_keytype = ep11_apqns4type,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_ep11_init(void)
+{
+ /* register this module as pkey handler for all the ep11 stuff */
+ return pkey_handler_register(&ep11_handler);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_ep11_exit(void)
+{
+ /* unregister this module as pkey handler */
+ pkey_handler_unregister(&ep11_handler);
+}
+
+module_init(pkey_ep11_init);
+module_exit(pkey_ep11_exit);
diff --git a/drivers/s390/crypto/pkey_pckmo.c b/drivers/s390/crypto/pkey_pckmo.c
new file mode 100644
index 000000000000..98079b1ed6db
--- /dev/null
+++ b/drivers/s390/crypto/pkey_pckmo.c
@@ -0,0 +1,557 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey pckmo specific code
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+#include <asm/cpacf.h>
+#include <crypto/aes.h>
+#include <linux/random.h>
+
+#include "zcrypt_api.h"
+#include "zcrypt_ccamisc.h"
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key PCKMO handler");
+
+/*
+ * Check key blob for known and supported here.
+ */
+static bool is_pckmo_key(const u8 *key, u32 keylen)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct clearkeytoken *t = (struct clearkeytoken *)key;
+
+ if (keylen < sizeof(*hdr))
+ return false;
+
+ switch (hdr->type) {
+ case TOKTYPE_NON_CCA:
+ switch (hdr->version) {
+ case TOKVER_CLEAR_KEY:
+ switch (t->keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ case PKEY_KEYTYPE_ECC_P256:
+ case PKEY_KEYTYPE_ECC_P384:
+ case PKEY_KEYTYPE_ECC_P521:
+ case PKEY_KEYTYPE_ECC_ED25519:
+ case PKEY_KEYTYPE_ECC_ED448:
+ case PKEY_KEYTYPE_AES_XTS_128:
+ case PKEY_KEYTYPE_AES_XTS_256:
+ case PKEY_KEYTYPE_HMAC_512:
+ case PKEY_KEYTYPE_HMAC_1024:
+ return true;
+ default:
+ return false;
+ }
+ case TOKVER_PROTECTED_KEY:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool is_pckmo_keytype(enum pkey_key_type keytype)
+{
+ switch (keytype) {
+ case PKEY_TYPE_PROTKEY:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Create a protected key from a clear key value via PCKMO instruction.
+ */
+static int pckmo_clr2protkey(u32 keytype, const u8 *clrkey, u32 clrkeylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ /* mask of available pckmo subfunctions */
+ static cpacf_mask_t pckmo_functions;
+
+ int keysize, rc = -EINVAL;
+ u8 paramblock[160];
+ u32 pkeytype;
+ long fc;
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ /* 16 byte key, 32 byte aes wkvp, total 48 bytes */
+ keysize = 16;
+ pkeytype = keytype;
+ fc = CPACF_PCKMO_ENC_AES_128_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ /* 24 byte key, 32 byte aes wkvp, total 56 bytes */
+ keysize = 24;
+ pkeytype = keytype;
+ fc = CPACF_PCKMO_ENC_AES_192_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
+ keysize = 32;
+ pkeytype = keytype;
+ fc = CPACF_PCKMO_ENC_AES_256_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_P256:
+ /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
+ keysize = 32;
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_P256_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_P384:
+ /* 48 byte key, 32 byte aes wkvp, total 80 bytes */
+ keysize = 48;
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_P384_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_P521:
+ /* 80 byte key, 32 byte aes wkvp, total 112 bytes */
+ keysize = 80;
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_P521_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_ED25519:
+ /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
+ keysize = 32;
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_ED448:
+ /* 64 byte key, 32 byte aes wkvp, total 96 bytes */
+ keysize = 64;
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_ED448_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ /* 2x16 byte keys, 32 byte aes wkvp, total 64 bytes */
+ keysize = 32;
+ pkeytype = PKEY_KEYTYPE_AES_XTS_128;
+ fc = CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ /* 2x32 byte keys, 32 byte aes wkvp, total 96 bytes */
+ keysize = 64;
+ pkeytype = PKEY_KEYTYPE_AES_XTS_256;
+ fc = CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY;
+ break;
+ case PKEY_KEYTYPE_HMAC_512:
+ /* 64 byte key, 32 byte aes wkvp, total 96 bytes */
+ keysize = 64;
+ pkeytype = PKEY_KEYTYPE_HMAC_512;
+ fc = CPACF_PCKMO_ENC_HMAC_512_KEY;
+ break;
+ case PKEY_KEYTYPE_HMAC_1024:
+ /* 128 byte key, 32 byte aes wkvp, total 160 bytes */
+ keysize = 128;
+ pkeytype = PKEY_KEYTYPE_HMAC_1024;
+ fc = CPACF_PCKMO_ENC_HMAC_1024_KEY;
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, keytype);
+ goto out;
+ }
+
+ if (clrkeylen && clrkeylen < keysize) {
+ PKEY_DBF_ERR("%s clear key size too small: %u < %d\n",
+ __func__, clrkeylen, keysize);
+ goto out;
+ }
+ if (*protkeylen < keysize + AES_WK_VP_SIZE) {
+ PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n",
+ __func__, *protkeylen, keysize + AES_WK_VP_SIZE);
+ goto out;
+ }
+
+ /* Did we already check for PCKMO ? */
+ if (!pckmo_functions.bytes[0]) {
+ /* no, so check now */
+ if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) {
+ PKEY_DBF_ERR("%s cpacf_query() failed\n", __func__);
+ rc = -ENODEV;
+ goto out;
+ }
+ }
+ /* check for the pckmo subfunction we need now */
+ if (!cpacf_test_func(&pckmo_functions, fc)) {
+ PKEY_DBF_ERR("%s pckmo functions not available\n", __func__);
+ rc = -ENODEV;
+ goto out;
+ }
+
+ /* prepare param block */
+ memset(paramblock, 0, sizeof(paramblock));
+ memcpy(paramblock, clrkey, keysize);
+
+ /* call the pckmo instruction */
+ cpacf_pckmo(fc, paramblock);
+
+ /* copy created protected key to key buffer including the wkvp block */
+ *protkeylen = keysize + AES_WK_VP_SIZE;
+ memcpy(protkey, paramblock, *protkeylen);
+ *protkeytype = pkeytype;
+
+ rc = 0;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Verify a raw protected key blob.
+ * Currently only AES protected keys are supported.
+ */
+static int pckmo_verify_protkey(const u8 *protkey, u32 protkeylen,
+ u32 protkeytype)
+{
+ struct {
+ u8 iv[AES_BLOCK_SIZE];
+ u8 key[MAXPROTKEYSIZE];
+ } param;
+ u8 null_msg[AES_BLOCK_SIZE];
+ u8 dest_buf[AES_BLOCK_SIZE];
+ unsigned int k, pkeylen;
+ unsigned long fc;
+ int rc = -EINVAL;
+
+ switch (protkeytype) {
+ case PKEY_KEYTYPE_AES_128:
+ pkeylen = 16 + AES_WK_VP_SIZE;
+ fc = CPACF_KMC_PAES_128;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ pkeylen = 24 + AES_WK_VP_SIZE;
+ fc = CPACF_KMC_PAES_192;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ pkeylen = 32 + AES_WK_VP_SIZE;
+ fc = CPACF_KMC_PAES_256;
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__,
+ protkeytype);
+ goto out;
+ }
+ if (protkeylen != pkeylen) {
+ PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n",
+ __func__, protkeylen, protkeytype);
+ goto out;
+ }
+
+ memset(null_msg, 0, sizeof(null_msg));
+
+ memset(param.iv, 0, sizeof(param.iv));
+ memcpy(param.key, protkey, protkeylen);
+
+ k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
+ sizeof(null_msg));
+ if (k != sizeof(null_msg)) {
+ PKEY_DBF_ERR("%s protected key is not valid\n", __func__);
+ rc = -EKEYREJECTED;
+ goto out;
+ }
+
+ rc = 0;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int pckmo_key2protkey(const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ int rc = -EINVAL;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+ if (hdr->type != TOKTYPE_NON_CCA)
+ return -EINVAL;
+
+ switch (hdr->version) {
+ case TOKVER_PROTECTED_KEY: {
+ struct protkeytoken *t = (struct protkeytoken *)key;
+
+ if (keylen < sizeof(*t))
+ goto out;
+ switch (t->keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ if (keylen != sizeof(struct protaeskeytoken))
+ goto out;
+ rc = pckmo_verify_protkey(t->protkey, t->len,
+ t->keytype);
+ if (rc)
+ goto out;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ if (t->len != 64 || keylen != sizeof(*t) + t->len)
+ goto out;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ case PKEY_KEYTYPE_HMAC_512:
+ if (t->len != 96 || keylen != sizeof(*t) + t->len)
+ goto out;
+ break;
+ case PKEY_KEYTYPE_HMAC_1024:
+ if (t->len != 160 || keylen != sizeof(*t) + t->len)
+ goto out;
+ break;
+ default:
+ PKEY_DBF_ERR("%s protected key token: unknown keytype %u\n",
+ __func__, t->keytype);
+ goto out;
+ }
+ memcpy(protkey, t->protkey, t->len);
+ *protkeylen = t->len;
+ *protkeytype = t->keytype;
+ break;
+ }
+ case TOKVER_CLEAR_KEY: {
+ struct clearkeytoken *t = (struct clearkeytoken *)key;
+ u32 keysize = 0;
+
+ if (keylen < sizeof(struct clearkeytoken) ||
+ keylen != sizeof(*t) + t->len)
+ goto out;
+ switch (t->keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ keysize = pkey_keytype_aes_to_size(t->keytype);
+ break;
+ case PKEY_KEYTYPE_ECC_P256:
+ keysize = 32;
+ break;
+ case PKEY_KEYTYPE_ECC_P384:
+ keysize = 48;
+ break;
+ case PKEY_KEYTYPE_ECC_P521:
+ keysize = 80;
+ break;
+ case PKEY_KEYTYPE_ECC_ED25519:
+ keysize = 32;
+ break;
+ case PKEY_KEYTYPE_ECC_ED448:
+ keysize = 64;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ keysize = 32;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ keysize = 64;
+ break;
+ case PKEY_KEYTYPE_HMAC_512:
+ keysize = 64;
+ break;
+ case PKEY_KEYTYPE_HMAC_1024:
+ keysize = 128;
+ break;
+ default:
+ break;
+ }
+ if (!keysize) {
+ PKEY_DBF_ERR("%s clear key token: unknown keytype %u\n",
+ __func__, t->keytype);
+ goto out;
+ }
+ if (t->len != keysize) {
+ PKEY_DBF_ERR("%s clear key token: invalid key len %u\n",
+ __func__, t->len);
+ goto out;
+ }
+ rc = pckmo_clr2protkey(t->keytype, t->clearkey, t->len,
+ protkey, protkeylen, protkeytype);
+ break;
+ }
+ default:
+ PKEY_DBF_ERR("%s unknown non-CCA token version %d\n",
+ __func__, hdr->version);
+ break;
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate a random protected key.
+ * Currently only the generation of AES protected keys
+ * is supported.
+ */
+static int pckmo_gen_protkey(u32 keytype, u32 subtype,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ u8 clrkey[128];
+ int keysize;
+ int rc;
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ keysize = pkey_keytype_aes_to_size(keytype);
+ break;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ keysize = 32;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ case PKEY_KEYTYPE_HMAC_512:
+ keysize = 64;
+ break;
+ case PKEY_KEYTYPE_HMAC_1024:
+ keysize = 128;
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+ if (subtype != PKEY_TYPE_PROTKEY) {
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+
+ /* generate a dummy random clear key */
+ get_random_bytes(clrkey, keysize);
+
+ /* convert it to a dummy protected key */
+ rc = pckmo_clr2protkey(keytype, clrkey, keysize,
+ protkey, protkeylen, protkeytype);
+ if (rc)
+ goto out;
+
+ /* replace the key part of the protected key with random bytes */
+ get_random_bytes(protkey, keysize);
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Verify a protected key token blob.
+ * Currently only AES protected keys are supported.
+ */
+static int pckmo_verify_key(const u8 *key, u32 keylen)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ int rc = -EINVAL;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+ if (hdr->type != TOKTYPE_NON_CCA)
+ return -EINVAL;
+
+ switch (hdr->version) {
+ case TOKVER_PROTECTED_KEY: {
+ struct protaeskeytoken *t;
+
+ if (keylen != sizeof(struct protaeskeytoken))
+ goto out;
+ t = (struct protaeskeytoken *)key;
+ rc = pckmo_verify_protkey(t->protkey, t->len, t->keytype);
+ break;
+ }
+ default:
+ PKEY_DBF_ERR("%s unknown non-CCA token version %d\n",
+ __func__, hdr->version);
+ break;
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Wrapper functions used for the pkey handler struct
+ */
+
+static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns,
+ size_t _nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *keyinfo)
+{
+ return pckmo_key2protkey(key, keylen,
+ protkey, protkeylen, keyinfo);
+}
+
+static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 _keybitsize, u32 _flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
+{
+ return pckmo_gen_protkey(keytype, keysubtype,
+ keybuf, keybuflen, keyinfo);
+}
+
+static int pkey_pckmo_verifykey(const u8 *key, u32 keylen,
+ u16 *_card, u16 *_dom,
+ u32 *_keytype, u32 *_keybitsize, u32 *_flags)
+{
+ return pckmo_verify_key(key, keylen);
+}
+
+static struct pkey_handler pckmo_handler = {
+ .module = THIS_MODULE,
+ .name = "PKEY PCKMO handler",
+ .is_supported_key = is_pckmo_key,
+ .is_supported_keytype = is_pckmo_keytype,
+ .key_to_protkey = pkey_pckmo_key2protkey,
+ .gen_key = pkey_pckmo_gen_key,
+ .verify_key = pkey_pckmo_verifykey,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_pckmo_init(void)
+{
+ cpacf_mask_t func_mask;
+
+ /*
+ * The pckmo instruction should be available - even if we don't
+ * actually invoke it. This instruction comes with MSA 3 which
+ * is also the minimum level for the kmc instructions which
+ * are able to work with protected keys.
+ */
+ if (!cpacf_query(CPACF_PCKMO, &func_mask))
+ return -ENODEV;
+
+ /* register this module as pkey handler for all the pckmo stuff */
+ return pkey_handler_register(&pckmo_handler);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_pckmo_exit(void)
+{
+ /* unregister this module as pkey handler */
+ pkey_handler_unregister(&pckmo_handler);
+}
+
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_pckmo_init);
+module_exit(pkey_pckmo_exit);
diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c
new file mode 100644
index 000000000000..cc0fc1e264bd
--- /dev/null
+++ b/drivers/s390/crypto/pkey_sysfs.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey module sysfs related functions
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/sysfs.h>
+
+#include "zcrypt_api.h"
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
+
+#include "pkey_base.h"
+
+/*
+ * Wrapper around pkey_handler_gen_key() which deals with the
+ * ENODEV return code and then tries to enforce a pkey handler
+ * module load.
+ */
+static int sys_pkey_handler_gen_key(u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
+{
+ int rc;
+
+ rc = pkey_handler_gen_key(NULL, 0,
+ keytype, keysubtype,
+ keybitsize, flags,
+ keybuf, keybuflen, keyinfo);
+ if (rc == -ENODEV) {
+ pkey_handler_request_modules();
+ rc = pkey_handler_gen_key(NULL, 0,
+ keytype, keysubtype,
+ keybitsize, flags,
+ keybuf, keybuflen, keyinfo);
+ }
+
+ return rc;
+}
+
+/*
+ * Sysfs attribute read function for all protected key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf,
+ loff_t off, size_t count)
+{
+ struct protaeskeytoken protkeytoken;
+ struct pkey_protkey protkey;
+ int rc;
+
+ if (off != 0 || count < sizeof(protkeytoken))
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * sizeof(protkeytoken))
+ return -EINVAL;
+
+ memset(&protkeytoken, 0, sizeof(protkeytoken));
+ protkeytoken.type = TOKTYPE_NON_CCA;
+ protkeytoken.version = TOKVER_PROTECTED_KEY;
+ protkeytoken.keytype = keytype;
+
+ protkey.len = sizeof(protkey.protkey);
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
+ protkey.protkey, &protkey.len,
+ &protkey.type);
+ if (rc)
+ return rc;
+
+ protkeytoken.len = protkey.len;
+ memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
+
+ memcpy(buf, &protkeytoken, sizeof(protkeytoken));
+
+ if (is_xts) {
+ /* xts needs a second protected key, reuse protkey struct */
+ protkey.len = sizeof(protkey.protkey);
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
+ protkey.protkey, &protkey.len,
+ &protkey.type);
+ if (rc)
+ return rc;
+
+ protkeytoken.len = protkey.len;
+ memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
+
+ memcpy(buf + sizeof(protkeytoken), &protkeytoken,
+ sizeof(protkeytoken));
+
+ return 2 * sizeof(protkeytoken);
+ }
+
+ return sizeof(protkeytoken);
+}
+
+/*
+ * Sysfs attribute read function for the AES XTS prot key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_protkey_aes_xts_attr_read(u32 keytype, char *buf,
+ loff_t off, size_t count)
+{
+ struct protkeytoken *t = (struct protkeytoken *)buf;
+ u32 protlen, prottype;
+ int rc;
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_XTS_128:
+ protlen = 64;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ protlen = 96;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (off != 0 || count < sizeof(*t) + protlen)
+ return -EINVAL;
+
+ memset(t, 0, sizeof(*t) + protlen);
+ t->type = TOKTYPE_NON_CCA;
+ t->version = TOKVER_PROTECTED_KEY;
+ t->keytype = keytype;
+
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
+ t->protkey, &protlen, &prottype);
+ if (rc)
+ return rc;
+
+ t->len = protlen;
+
+ return sizeof(*t) + protlen;
+}
+
+/*
+ * Sysfs attribute read function for the HMAC prot key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_protkey_hmac_attr_read(u32 keytype, char *buf,
+ loff_t off, size_t count)
+{
+ struct protkeytoken *t = (struct protkeytoken *)buf;
+ u32 protlen, prottype;
+ int rc;
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_HMAC_512:
+ protlen = 96;
+ break;
+ case PKEY_KEYTYPE_HMAC_1024:
+ protlen = 160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (off != 0 || count < sizeof(*t) + protlen)
+ return -EINVAL;
+
+ memset(t, 0, sizeof(*t) + protlen);
+ t->type = TOKTYPE_NON_CCA;
+ t->version = TOKVER_PROTECTED_KEY;
+ t->keytype = keytype;
+
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
+ t->protkey, &protlen, &prottype);
+ if (rc)
+ return rc;
+
+ t->len = protlen;
+
+ return sizeof(*t) + protlen;
+}
+
+static ssize_t protkey_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_xts_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_128,
+ buf, off, count);
+}
+
+static ssize_t protkey_aes_xts_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_256,
+ buf, off, count);
+}
+
+static ssize_t protkey_hmac_512_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_512,
+ buf, off, count);
+}
+
+static ssize_t protkey_hmac_1024_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_1024,
+ buf, off, count);
+}
+
+static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_xts_128, sizeof(struct protkeytoken) + 64);
+static BIN_ATTR_RO(protkey_aes_xts_256, sizeof(struct protkeytoken) + 96);
+static BIN_ATTR_RO(protkey_hmac_512, sizeof(struct protkeytoken) + 96);
+static BIN_ATTR_RO(protkey_hmac_1024, sizeof(struct protkeytoken) + 160);
+
+static struct bin_attribute *protkey_attrs[] = {
+ &bin_attr_protkey_aes_128,
+ &bin_attr_protkey_aes_192,
+ &bin_attr_protkey_aes_256,
+ &bin_attr_protkey_aes_128_xts,
+ &bin_attr_protkey_aes_256_xts,
+ &bin_attr_protkey_aes_xts_128,
+ &bin_attr_protkey_aes_xts_256,
+ &bin_attr_protkey_hmac_512,
+ &bin_attr_protkey_hmac_1024,
+ NULL
+};
+
+static struct attribute_group protkey_attr_group = {
+ .name = "protkey",
+ .bin_attrs = protkey_attrs,
+};
+
+/*
+ * Sysfs attribute read function for all secure key ccadata binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
+ loff_t off, size_t count)
+{
+ struct pkey_seckey *seckey = (struct pkey_seckey *)buf;
+ u32 buflen;
+ int rc;
+
+ if (off != 0 || count < sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * sizeof(struct secaeskeytoken))
+ return -EINVAL;
+
+ buflen = sizeof(seckey->seckey);
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0,
+ seckey->seckey, &buflen, NULL);
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ seckey++;
+ buflen = sizeof(seckey->seckey);
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0,
+ seckey->seckey, &buflen, NULL);
+ if (rc)
+ return rc;
+
+ return 2 * sizeof(struct secaeskeytoken);
+ }
+
+ return sizeof(struct secaeskeytoken);
+}
+
+static ssize_t ccadata_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
+
+static struct bin_attribute *ccadata_attrs[] = {
+ &bin_attr_ccadata_aes_128,
+ &bin_attr_ccadata_aes_192,
+ &bin_attr_ccadata_aes_256,
+ &bin_attr_ccadata_aes_128_xts,
+ &bin_attr_ccadata_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ccadata_attr_group = {
+ .name = "ccadata",
+ .bin_attrs = ccadata_attrs,
+};
+
+#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
+
+/*
+ * Sysfs attribute read function for all secure key ccacipher binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
+ bool is_xts, char *buf, loff_t off,
+ size_t count)
+{
+ u32 keysize = CCACIPHERTOKENSIZE;
+ int rc;
+
+ if (off != 0 || count < CCACIPHERTOKENSIZE)
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * CCACIPHERTOKENSIZE)
+ return -EINVAL;
+
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits),
+ PKEY_TYPE_CCA_CIPHER, keybits, 0,
+ buf, &keysize, NULL);
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ keysize = CCACIPHERTOKENSIZE;
+ buf += CCACIPHERTOKENSIZE;
+ rc = sys_pkey_handler_gen_key(
+ pkey_aes_bitsize_to_keytype(keybits),
+ PKEY_TYPE_CCA_CIPHER, keybits, 0,
+ buf, &keysize, NULL);
+ if (rc)
+ return rc;
+ return 2 * CCACIPHERTOKENSIZE;
+ }
+
+ return CCACIPHERTOKENSIZE;
+}
+
+static ssize_t ccacipher_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
+
+static struct bin_attribute *ccacipher_attrs[] = {
+ &bin_attr_ccacipher_aes_128,
+ &bin_attr_ccacipher_aes_192,
+ &bin_attr_ccacipher_aes_256,
+ &bin_attr_ccacipher_aes_128_xts,
+ &bin_attr_ccacipher_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ccacipher_attr_group = {
+ .name = "ccacipher",
+ .bin_attrs = ccacipher_attrs,
+};
+
+/*
+ * Sysfs attribute read function for all ep11 aes key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ * This function and the sysfs attributes using it provide EP11 key blobs
+ * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
+ * 336 bytes.
+ */
+static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+ bool is_xts, char *buf, loff_t off,
+ size_t count)
+{
+ u32 keysize = MAXEP11AESKEYBLOBSIZE;
+ int rc;
+
+ if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits),
+ PKEY_TYPE_EP11_AES, keybits, 0,
+ buf, &keysize, NULL);
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ keysize = MAXEP11AESKEYBLOBSIZE;
+ buf += MAXEP11AESKEYBLOBSIZE;
+ rc = sys_pkey_handler_gen_key(
+ pkey_aes_bitsize_to_keytype(keybits),
+ PKEY_TYPE_EP11_AES, keybits, 0,
+ buf, &keysize, NULL);
+ if (rc)
+ return rc;
+ return 2 * MAXEP11AESKEYBLOBSIZE;
+ }
+
+ return MAXEP11AESKEYBLOBSIZE;
+}
+
+static ssize_t ep11_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+
+static struct bin_attribute *ep11_attrs[] = {
+ &bin_attr_ep11_aes_128,
+ &bin_attr_ep11_aes_192,
+ &bin_attr_ep11_aes_256,
+ &bin_attr_ep11_aes_128_xts,
+ &bin_attr_ep11_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ep11_attr_group = {
+ .name = "ep11",
+ .bin_attrs = ep11_attrs,
+};
+
+const struct attribute_group *pkey_attr_groups[] = {
+ &protkey_attr_group,
+ &ccadata_attr_group,
+ &ccacipher_attr_group,
+ &ep11_attr_group,
+ NULL,
+};
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 74036886ca87..f9a47b54c51a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -715,7 +715,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- pr_debug("%s no matching queue found => ENODEV\n", __func__);
+ pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV;
goto out;
}
@@ -819,7 +819,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- pr_debug("%s no matching queue found => ENODEV\n", __func__);
+ pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV;
goto out;
}
@@ -940,8 +940,8 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- pr_debug("%s no match for address %02x.%04x => ENODEV\n",
- __func__, xcrb->user_defined, *domain);
+ pr_debug("no match for address %02x.%04x => ENODEV\n",
+ xcrb->user_defined, *domain);
rc = -ENODEV;
goto out;
}
@@ -991,7 +991,7 @@ long zcrypt_send_cprb(struct ica_xcRB *xcrb)
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
- pr_debug("%s rc=%d\n", __func__, rc);
+ pr_debug("rc=%d\n", rc);
return rc;
}
@@ -1138,15 +1138,13 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
if (!pref_zq) {
if (targets && target_num == 1) {
- pr_debug("%s no match for address %02x.%04x => ENODEV\n",
- __func__, (int)targets->ap_id,
- (int)targets->dom_id);
+ pr_debug("no match for address %02x.%04x => ENODEV\n",
+ (int)targets->ap_id, (int)targets->dom_id);
} else if (targets) {
- pr_debug("%s no match for %d target addrs => ENODEV\n",
- __func__, (int)target_num);
+ pr_debug("no match for %d target addrs => ENODEV\n",
+ (int)target_num);
} else {
- pr_debug("%s no match for address ff.ffff => ENODEV\n",
- __func__);
+ pr_debug("no match for address ff.ffff => ENODEV\n");
}
rc = -ENODEV;
goto out_free;
@@ -1195,7 +1193,7 @@ long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
- pr_debug("%s rc=%d\n", __func__, rc);
+ pr_debug("rc=%d\n", rc);
return rc;
}
@@ -1247,7 +1245,7 @@ static long zcrypt_rng(char *buffer)
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- pr_debug("%s no matching queue found => ENODEV\n", __func__);
+ pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV;
goto out;
}
@@ -2037,8 +2035,7 @@ int zcrypt_wait_api_operational(void)
break;
default:
/* other failure */
- pr_debug("%s ap_wait_init_apqn_bindings_complete()=%d\n",
- __func__, rc);
+ pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc);
break;
}
break;
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 7bef2cc4e461..43a27cb3db84 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -172,7 +172,7 @@ EXPORT_SYMBOL(cca_check_secaescipherkey);
* key token. Returns 0 on success or errno value on failure.
*/
int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
- const u8 *token, size_t keysize,
+ const u8 *token, u32 keysize,
int checkcpacfexport)
{
struct eccprivkeytoken *t = (struct eccprivkeytoken *)token;
@@ -187,7 +187,7 @@ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
}
if (t->len > keysize) {
if (dbg)
- DBF("%s token check failed, len %d > keysize %zu\n",
+ DBF("%s token check failed, len %d > keysize %u\n",
__func__, (int)t->len, keysize);
return -EINVAL;
}
@@ -737,7 +737,7 @@ static const u8 aes_cipher_key_skeleton[] = {
* Generate (random) CCA AES CIPHER secure key.
*/
int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize)
+ u8 *keybuf, u32 *keybufsize)
{
int rc;
u8 *mem, *ptr;
@@ -1085,7 +1085,7 @@ out:
* Build CCA AES CIPHER secure key with a given clear key value.
*/
int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
- const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize)
{
int rc;
u8 *token;
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index 5ddf02f965f9..aed7e8384542 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -153,7 +153,7 @@ int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
* key token. Returns 0 on success or errno value on failure.
*/
int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
- const u8 *token, size_t keysize,
+ const u8 *token, u32 keysize,
int checkcpacfexport);
/*
@@ -178,7 +178,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
* Generate (random) CCA AES CIPHER secure key.
*/
int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize);
+ u8 *keybuf, u32 *keybufsize);
/*
* Derive proteced key from CCA AES cipher secure key.
@@ -190,7 +190,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
* Build CCA AES CIPHER secure key with a given clear key value.
*/
int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
- const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize);
/*
* Derive proteced key from CCA ECC secure private key.
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index b43db17a4e0e..cb7e6da43602 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -203,7 +203,7 @@ out:
* For valid ep11 keyblobs, returns a reference to the wrappingkey verification
* pattern. Otherwise NULL.
*/
-const u8 *ep11_kb_wkvp(const u8 *keyblob, size_t keybloblen)
+const u8 *ep11_kb_wkvp(const u8 *keyblob, u32 keybloblen)
{
struct ep11keyblob *kb;
@@ -217,7 +217,7 @@ EXPORT_SYMBOL(ep11_kb_wkvp);
* Simple check if the key blob is a valid EP11 AES key blob with header.
*/
int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
- const u8 *key, size_t keylen, int checkcpacfexp)
+ const u8 *key, u32 keylen, int checkcpacfexp)
{
struct ep11kblob_header *hdr = (struct ep11kblob_header *)key;
struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr));
@@ -225,7 +225,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (keylen < sizeof(*hdr) + sizeof(*kb)) {
- DBF("%s key check failed, keylen %zu < %zu\n",
+ DBF("%s key check failed, keylen %u < %zu\n",
__func__, keylen, sizeof(*hdr) + sizeof(*kb));
return -EINVAL;
}
@@ -250,7 +250,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
}
if (hdr->len > keylen) {
if (dbg)
- DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+ DBF("%s key check failed, header len %d keylen %u mismatch\n",
__func__, (int)hdr->len, keylen);
return -EINVAL;
}
@@ -284,7 +284,7 @@ EXPORT_SYMBOL(ep11_check_aes_key_with_hdr);
* Simple check if the key blob is a valid EP11 ECC key blob with header.
*/
int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
- const u8 *key, size_t keylen, int checkcpacfexp)
+ const u8 *key, u32 keylen, int checkcpacfexp)
{
struct ep11kblob_header *hdr = (struct ep11kblob_header *)key;
struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr));
@@ -292,7 +292,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (keylen < sizeof(*hdr) + sizeof(*kb)) {
- DBF("%s key check failed, keylen %zu < %zu\n",
+ DBF("%s key check failed, keylen %u < %zu\n",
__func__, keylen, sizeof(*hdr) + sizeof(*kb));
return -EINVAL;
}
@@ -317,7 +317,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
}
if (hdr->len > keylen) {
if (dbg)
- DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+ DBF("%s key check failed, header len %d keylen %u mismatch\n",
__func__, (int)hdr->len, keylen);
return -EINVAL;
}
@@ -352,14 +352,14 @@ EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr);
* the header in the session field (old style EP11 AES key).
*/
int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
- const u8 *key, size_t keylen, int checkcpacfexp)
+ const u8 *key, u32 keylen, int checkcpacfexp)
{
struct ep11keyblob *kb = (struct ep11keyblob *)key;
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (keylen < sizeof(*kb)) {
- DBF("%s key check failed, keylen %zu < %zu\n",
+ DBF("%s key check failed, keylen %u < %zu\n",
__func__, keylen, sizeof(*kb));
return -EINVAL;
}
@@ -378,7 +378,7 @@ int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
}
if (kb->head.len > keylen) {
if (dbg)
- DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+ DBF("%s key check failed, header len %d keylen %u mismatch\n",
__func__, (int)kb->head.len, keylen);
return -EINVAL;
}
@@ -932,7 +932,7 @@ out:
}
int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize, u32 keybufver)
+ u8 *keybuf, u32 *keybufsize, u32 keybufver)
{
struct ep11kblob_header *hdr;
size_t hdr_size, pl_size;
@@ -1256,7 +1256,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
const u8 *enckey, size_t enckeysize,
u32 mech, const u8 *iv,
u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize,
+ u8 *keybuf, u32 *keybufsize,
u8 keybufver)
{
struct ep11kblob_header *hdr;
@@ -1412,7 +1412,7 @@ out:
}
int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
- const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize,
u32 keytype)
{
int rc;
@@ -1471,7 +1471,7 @@ out:
EXPORT_SYMBOL(ep11_clr2keyblob);
int ep11_kblob2protkey(u16 card, u16 dom,
- const u8 *keyblob, size_t keybloblen,
+ const u8 *keyblob, u32 keybloblen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
struct ep11kblob_header *hdr;
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
index 9d17fd5228a7..9f1bdffdec68 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.h
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -54,7 +54,7 @@ static inline bool is_ep11_keyblob(const u8 *key)
* For valid ep11 keyblobs, returns a reference to the wrappingkey verification
* pattern. Otherwise NULL.
*/
-const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen);
+const u8 *ep11_kb_wkvp(const u8 *kblob, u32 kbloblen);
/*
* Simple check if the key blob is a valid EP11 AES key blob with header.
@@ -63,7 +63,7 @@ const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen);
* Returns 0 on success or errno value on failure.
*/
int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
- const u8 *key, size_t keylen, int checkcpacfexp);
+ const u8 *key, u32 keylen, int checkcpacfexp);
/*
* Simple check if the key blob is a valid EP11 ECC key blob with header.
@@ -72,7 +72,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
* Returns 0 on success or errno value on failure.
*/
int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
- const u8 *key, size_t keylen, int checkcpacfexp);
+ const u8 *key, u32 keylen, int checkcpacfexp);
/*
* Simple check if the key blob is a valid EP11 AES key blob with
@@ -82,7 +82,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
* Returns 0 on success or errno value on failure.
*/
int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
- const u8 *key, size_t keylen, int checkcpacfexp);
+ const u8 *key, u32 keylen, int checkcpacfexp);
/* EP11 card info struct */
struct ep11_card_info {
@@ -115,13 +115,13 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
* Generate (random) EP11 AES secure key.
*/
int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize, u32 keybufver);
+ u8 *keybuf, u32 *keybufsize, u32 keybufver);
/*
* Generate EP11 AES secure key with given clear key value.
*/
int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
- const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize,
u32 keytype);
/*
@@ -149,7 +149,7 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
/*
* Derive proteced key from EP11 key blob (AES and ECC keys).
*/
-int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
+int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype);
void zcrypt_ep11misc_exit(void);
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 3b39cb8f926d..adc65eddaa1e 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -427,7 +427,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq,
len = t80h->len;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- pr_debug("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("len mismatch => EMSGSIZE\n");
msg->rc = -EMSGSIZE;
goto out;
}
@@ -487,8 +487,8 @@ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq,
out:
ap_msg->private = NULL;
if (rc)
- pr_debug("%s send me cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
+ pr_debug("send me cprb at dev=%02x.%04x rc=%d\n",
+ AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
@@ -537,8 +537,8 @@ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq,
out:
ap_msg->private = NULL;
if (rc)
- pr_debug("%s send crt cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
+ pr_debug("send crt cprb at dev=%02x.%04x rc=%d\n",
+ AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 215f257d2360..b64c9d9fc613 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -437,9 +437,8 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg,
ap_msg->flags |= AP_MSG_FLAG_ADMIN;
break;
default:
- pr_debug("%s unknown CPRB minor version '%c%c'\n",
- __func__, msg->cprbx.func_id[0],
- msg->cprbx.func_id[1]);
+ pr_debug("unknown CPRB minor version '%c%c'\n",
+ msg->cprbx.func_id[0], msg->cprbx.func_id[1]);
}
/* copy data block */
@@ -629,9 +628,8 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
/* Copy CPRB to user */
if (xcrb->reply_control_blk_length < msg->fmt2.count1) {
- pr_debug("%s reply_control_blk_length %u < required %u => EMSGSIZE\n",
- __func__, xcrb->reply_control_blk_length,
- msg->fmt2.count1);
+ pr_debug("reply_control_blk_length %u < required %u => EMSGSIZE\n",
+ xcrb->reply_control_blk_length, msg->fmt2.count1);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr,
@@ -642,9 +640,8 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
/* Copy data buffer to user */
if (msg->fmt2.count2) {
if (xcrb->reply_data_length < msg->fmt2.count2) {
- pr_debug("%s reply_data_length %u < required %u => EMSGSIZE\n",
- __func__, xcrb->reply_data_length,
- msg->fmt2.count2);
+ pr_debug("reply_data_length %u < required %u => EMSGSIZE\n",
+ xcrb->reply_data_length, msg->fmt2.count2);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_data_addr,
@@ -673,9 +670,8 @@ static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
char *data = reply->msg;
if (xcrb->resp_len < msg->fmt2.count1) {
- pr_debug("%s resp_len %u < required %u => EMSGSIZE\n",
- __func__, (unsigned int)xcrb->resp_len,
- msg->fmt2.count1);
+ pr_debug("resp_len %u < required %u => EMSGSIZE\n",
+ (unsigned int)xcrb->resp_len, msg->fmt2.count1);
return -EMSGSIZE;
}
@@ -875,8 +871,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
len = sizeof(struct type86x_reply) + t86r->length;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- pr_debug("%s len mismatch => EMSGSIZE\n",
- __func__);
+ pr_debug("len mismatch => EMSGSIZE\n");
msg->rc = -EMSGSIZE;
goto out;
}
@@ -890,8 +885,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- pr_debug("%s len mismatch => EMSGSIZE\n",
- __func__);
+ pr_debug("len mismatch => EMSGSIZE\n");
msg->rc = -EMSGSIZE;
goto out;
}
@@ -941,8 +935,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- pr_debug("%s len mismatch => EMSGSIZE\n",
- __func__);
+ pr_debug("len mismatch => EMSGSIZE\n");
msg->rc = -EMSGSIZE;
goto out;
}
@@ -1154,8 +1147,8 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
out:
if (rc)
- pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
+ pr_debug("send cprb at dev=%02x.%04x rc=%d\n",
+ AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
@@ -1277,8 +1270,8 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
out:
if (rc)
- pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
+ pr_debug("send cprb at dev=%02x.%04x rc=%d\n",
+ AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index cea3a79d538e..0e10502660de 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -157,7 +157,6 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
}
ncmd->status = 0;
- ncmd->message = 0;
}
static inline void advance_sg_buffer(struct NCR5380_cmd *ncmd)
@@ -199,7 +198,6 @@ static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
* Polls the chip in a reasonably efficient manner waiting for an
* event to occur. After a short quick poll we begin to yield the CPU
* (if possible). In irq contexts the time-out is arbitrarily limited.
- * Callers may hold locks as long as they are held in irq mode.
*
* Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT.
*/
@@ -1228,24 +1226,15 @@ out:
return ret;
}
-/*
- * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance,
- * unsigned char *phase, int *count, unsigned char **data)
- *
- * Purpose : transfers data in given phase using polled I/O
- *
- * Inputs : instance - instance of driver, *phase - pointer to
- * what phase is expected, *count - pointer to number of
- * bytes to transfer, **data - pointer to data pointer,
- * can_sleep - 1 or 0 when sleeping is permitted or not, respectively.
- *
- * Returns : -1 when different phase is entered without transferring
- * maximum number of bytes, 0 if all bytes are transferred or exit
- * is in same phase.
- *
- * Also, *phase, *count, *data are modified in place.
+/**
+ * NCR5380_transfer_pio() - transfers data in given phase using polled I/O
+ * @instance: instance of driver
+ * @phase: pointer to what phase is expected
+ * @count: pointer to number of bytes to transfer
+ * @data: pointer to data pointer
+ * @can_sleep: 1 or 0 when sleeping is permitted or not, respectively
*
- * XXX Note : handling for bus free may be useful.
+ * Returns: void. *phase, *count, *data are modified in place.
*/
/*
@@ -1254,9 +1243,9 @@ out:
* counts, we will always do a pseudo DMA or DMA transfer.
*/
-static int NCR5380_transfer_pio(struct Scsi_Host *instance,
- unsigned char *phase, int *count,
- unsigned char **data, unsigned int can_sleep)
+static void NCR5380_transfer_pio(struct Scsi_Host *instance,
+ unsigned char *phase, int *count,
+ unsigned char **data, unsigned int can_sleep)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char p = *phase, tmp;
@@ -1277,8 +1266,8 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
* valid
*/
- if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ,
- HZ * can_sleep) < 0)
+ if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ | SR_BSY,
+ SR_REQ | SR_BSY, HZ * can_sleep) < 0)
break;
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n");
@@ -1329,17 +1318,19 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n");
-/*
- * We have several special cases to consider during REQ/ACK handshaking :
- * 1. We were in MSGOUT phase, and we are on the last byte of the
- * message. ATN must be dropped as ACK is dropped.
- *
- * 2. We are in a MSGIN phase, and we are on the last byte of the
- * message. We must exit with ACK asserted, so that the calling
- * code may raise ATN before dropping ACK to reject the message.
- *
- * 3. ACK and ATN are clear and the target may proceed as normal.
- */
+ /*
+ * We have several special cases to consider during REQ/ACK
+ * handshaking:
+ *
+ * 1. We were in MSGOUT phase, and we are on the last byte of
+ * the message. ATN must be dropped as ACK is dropped.
+ *
+ * 2. We are in MSGIN phase, and we are on the last byte of the
+ * message. We must exit with ACK asserted, so that the calling
+ * code may raise ATN before dropping ACK to reject the message.
+ *
+ * 3. ACK and ATN are clear & the target may proceed as normal.
+ */
if (!(p == PHASE_MSGIN && c == 1)) {
if (p == PHASE_MSGOUT && c > 1)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
@@ -1361,11 +1352,6 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
*phase = tmp & PHASE_MASK;
else
*phase = PHASE_UNKNOWN;
-
- if (!c || (*phase == p))
- return 0;
- else
- return -1;
}
/**
@@ -1485,6 +1471,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
unsigned char **data)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected);
int c = *count;
unsigned char p = *phase;
unsigned char *d = *data;
@@ -1496,7 +1483,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
return -1;
}
- NCR5380_to_ncmd(hostdata->connected)->phase = p;
+ ncmd->phase = p;
if (p & SR_IO) {
if (hostdata->read_overruns)
@@ -1574,79 +1561,80 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
/* The result is zero iff pseudo DMA send/receive was completed. */
hostdata->dma_len = c;
-/*
- * A note regarding the DMA errata workarounds for early NMOS silicon.
- *
- * For DMA sends, we want to wait until the last byte has been
- * transferred out over the bus before we turn off DMA mode. Alas, there
- * seems to be no terribly good way of doing this on a 5380 under all
- * conditions. For non-scatter-gather operations, we can wait until REQ
- * and ACK both go false, or until a phase mismatch occurs. Gather-sends
- * are nastier, since the device will be expecting more data than we
- * are prepared to send it, and REQ will remain asserted. On a 53C8[01] we
- * could test Last Byte Sent to assure transfer (I imagine this is precisely
- * why this signal was added to the newer chips) but on the older 538[01]
- * this signal does not exist. The workaround for this lack is a watchdog;
- * we bail out of the wait-loop after a modest amount of wait-time if
- * the usual exit conditions are not met. Not a terribly clean or
- * correct solution :-%
- *
- * DMA receive is equally tricky due to a nasty characteristic of the NCR5380.
- * If the chip is in DMA receive mode, it will respond to a target's
- * REQ by latching the SCSI data into the INPUT DATA register and asserting
- * ACK, even if it has _already_ been notified by the DMA controller that
- * the current DMA transfer has completed! If the NCR5380 is then taken
- * out of DMA mode, this already-acknowledged byte is lost. This is
- * not a problem for "one DMA transfer per READ command", because
- * the situation will never arise... either all of the data is DMA'ed
- * properly, or the target switches to MESSAGE IN phase to signal a
- * disconnection (either operation bringing the DMA to a clean halt).
- * However, in order to handle scatter-receive, we must work around the
- * problem. The chosen fix is to DMA fewer bytes, then check for the
- * condition before taking the NCR5380 out of DMA mode. One or two extra
- * bytes are transferred via PIO as necessary to fill out the original
- * request.
- */
-
- if (hostdata->flags & FLAG_DMA_FIXUP) {
- if (p & SR_IO) {
- /*
- * The workaround was to transfer fewer bytes than we
- * intended to with the pseudo-DMA read function, wait for
- * the chip to latch the last byte, read it, and then disable
- * pseudo-DMA mode.
- *
- * After REQ is asserted, the NCR5380 asserts DRQ and ACK.
- * REQ is deasserted when ACK is asserted, and not reasserted
- * until ACK goes false. Since the NCR5380 won't lower ACK
- * until DACK is asserted, which won't happen unless we twiddle
- * the DMA port or we take the NCR5380 out of DMA mode, we
- * can guarantee that we won't handshake another extra
- * byte.
- */
+ /*
+ * A note regarding the DMA errata workarounds for early NMOS silicon.
+ *
+ * For DMA sends, we want to wait until the last byte has been
+ * transferred out over the bus before we turn off DMA mode. Alas, there
+ * seems to be no terribly good way of doing this on a 5380 under all
+ * conditions. For non-scatter-gather operations, we can wait until REQ
+ * and ACK both go false, or until a phase mismatch occurs. Gather-sends
+ * are nastier, since the device will be expecting more data than we
+ * are prepared to send it, and REQ will remain asserted. On a 53C8[01]
+ * we could test Last Byte Sent to assure transfer (I imagine this is
+ * precisely why this signal was added to the newer chips) but on the
+ * older 538[01] this signal does not exist. The workaround for this
+ * lack is a watchdog; we bail out of the wait-loop after a modest
+ * amount of wait-time if the usual exit conditions are not met.
+ * Not a terribly clean or correct solution :-%
+ *
+ * DMA receive is equally tricky due to a nasty characteristic of the
+ * NCR5380. If the chip is in DMA receive mode, it will respond to a
+ * target's REQ by latching the SCSI data into the INPUT DATA register
+ * and asserting ACK, even if it has _already_ been notified by the
+ * DMA controller that the current DMA transfer has completed! If the
+ * NCR5380 is then taken out of DMA mode, this already-acknowledged
+ * byte is lost.
+ *
+ * This is not a problem for "one DMA transfer per READ
+ * command", because the situation will never arise... either all of
+ * the data is DMA'ed properly, or the target switches to MESSAGE IN
+ * phase to signal a disconnection (either operation bringing the DMA
+ * to a clean halt). However, in order to handle scatter-receive, we
+ * must work around the problem. The chosen fix is to DMA fewer bytes,
+ * then check for the condition before taking the NCR5380 out of DMA
+ * mode. One or two extra bytes are transferred via PIO as necessary
+ * to fill out the original request.
+ */
- if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
- BASR_DRQ, BASR_DRQ, 0) < 0) {
- result = -1;
- shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n");
- }
- if (NCR5380_poll_politely(hostdata, STATUS_REG,
- SR_REQ, 0, 0) < 0) {
- result = -1;
- shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n");
- }
- d[*count - 1] = NCR5380_read(INPUT_DATA_REG);
- } else {
- /*
- * Wait for the last byte to be sent. If REQ is being asserted for
- * the byte we're interested, we'll ACK it and it will go false.
- */
- if (NCR5380_poll_politely2(hostdata,
- BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
- BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0) < 0) {
- result = -1;
- shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n");
+ if ((hostdata->flags & FLAG_DMA_FIXUP) &&
+ (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
+ /*
+ * The workaround was to transfer fewer bytes than we
+ * intended to with the pseudo-DMA receive function, wait for
+ * the chip to latch the last byte, read it, and then disable
+ * DMA mode.
+ *
+ * After REQ is asserted, the NCR5380 asserts DRQ and ACK.
+ * REQ is deasserted when ACK is asserted, and not reasserted
+ * until ACK goes false. Since the NCR5380 won't lower ACK
+ * until DACK is asserted, which won't happen unless we twiddle
+ * the DMA port or we take the NCR5380 out of DMA mode, we
+ * can guarantee that we won't handshake another extra
+ * byte.
+ *
+ * If sending, wait for the last byte to be sent. If REQ is
+ * being asserted for the byte we're interested, we'll ACK it
+ * and it will go false.
+ */
+ if (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
+ BASR_DRQ, BASR_DRQ, 0)) {
+ if ((p & SR_IO) &&
+ (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
+ if (!NCR5380_poll_politely(hostdata, STATUS_REG,
+ SR_REQ, 0, 0)) {
+ d[c] = NCR5380_read(INPUT_DATA_REG);
+ --ncmd->this_residual;
+ } else {
+ result = -1;
+ scmd_printk(KERN_ERR, hostdata->connected,
+ "PDMA fixup: !REQ timeout\n");
+ }
}
+ } else if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH) {
+ result = -1;
+ scmd_printk(KERN_ERR, hostdata->connected,
+ "PDMA fixup: DRQ timeout\n");
}
}
@@ -1666,9 +1654,6 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* Side effects : SCSI things happen, the disconnected queue will be
* modified if a command disconnects, *instance->connected will
* change.
- *
- * XXX Note : we need to watch for bus free or a reset condition here
- * to recover from an unexpected bus free condition.
*/
static void NCR5380_information_transfer(struct Scsi_Host *instance)
@@ -1807,9 +1792,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
return;
case PHASE_MSGIN:
len = 1;
+ tmp = 0xff;
data = &tmp;
NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
- ncmd->message = tmp;
+ if (tmp == 0xff)
+ break;
switch (tmp) {
case ABORT:
@@ -1996,6 +1983,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
break;
case PHASE_STATIN:
len = 1;
+ tmp = ncmd->status;
data = &tmp;
NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
ncmd->status = tmp;
@@ -2005,9 +1993,20 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
NCR5380_dprint(NDEBUG_ANY, instance);
} /* switch(phase) */
} else {
+ int err;
+
spin_unlock_irq(&hostdata->lock);
- NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
+ err = NCR5380_poll_politely(hostdata, STATUS_REG,
+ SR_REQ, SR_REQ, HZ);
spin_lock_irq(&hostdata->lock);
+
+ if (err < 0 && hostdata->connected &&
+ !(NCR5380_read(STATUS_REG) & SR_BSY)) {
+ scmd_printk(KERN_ERR, hostdata->connected,
+ "BSY signal lost\n");
+ do_reset(instance);
+ bus_reset_cleanup(instance);
+ }
}
}
}
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 8dc2be4212dc..d402d4bffcb2 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -3,10 +3,10 @@
* NCR 5380 defines
*
* Copyright 1993, Drew Eckhardt
- * Visionary Computing
- * (Unix consulting and custom programming)
- * drew@colorado.edu
- * +1 (303) 666-5836
+ * Visionary Computing
+ * (Unix consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
*
* For more information, please consult
*
@@ -78,7 +78,7 @@
#define ICR_DIFF_ENABLE 0x20 /* wo Set to enable diff. drivers */
#define ICR_ASSERT_ACK 0x10 /* rw ini Set to assert ACK */
#define ICR_ASSERT_BSY 0x08 /* rw Set to assert BSY */
-#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */
+#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */
#define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */
#define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */
@@ -135,7 +135,7 @@
#define BASR_IRQ 0x10 /* ro mirror of IRQ pin */
#define BASR_PHASE_MATCH 0x08 /* ro Set when MSG CD IO match TCR */
#define BASR_BUSY_ERROR 0x04 /* ro Unexpected change to inactive state */
-#define BASR_ATN 0x02 /* ro BUS status */
+#define BASR_ATN 0x02 /* ro BUS status */
#define BASR_ACK 0x01 /* ro BUS status */
/* Write any value to this register to start a DMA send */
@@ -170,7 +170,7 @@
#define CSR_BASE CSR_53C80_INTR
/* Note : PHASE_* macros are based on the values of the STATUS register */
-#define PHASE_MASK (SR_MSG | SR_CD | SR_IO)
+#define PHASE_MASK (SR_MSG | SR_CD | SR_IO)
#define PHASE_DATAOUT 0
#define PHASE_DATAIN SR_IO
@@ -231,7 +231,6 @@ struct NCR5380_cmd {
int this_residual;
struct scatterlist *buffer;
int status;
- int message;
int phase;
struct list_head list;
};
@@ -286,8 +285,9 @@ static const char *NCR5380_info(struct Scsi_Host *instance);
static void NCR5380_reselect(struct Scsi_Host *instance);
static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data,
- unsigned int can_sleep);
+static void NCR5380_transfer_pio(struct Scsi_Host *instance,
+ unsigned char *phase, int *count,
+ unsigned char **data, unsigned int can_sleep);
static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
unsigned int, u8, u8,
unsigned int, u8, u8, unsigned long);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index b22857c6f3f4..ec3834bda111 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1267,7 +1267,7 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
return ret;
command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) +
- ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
+ (le32_to_cpu(readcmd->sg.count) * sizeof(struct sgentryraw));
}
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
@@ -1302,7 +1302,7 @@ static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
if (ret < 0)
return ret;
fibsize = sizeof(struct aac_read64) +
- ((le32_to_cpu(readcmd->sg.count) - 1) *
+ (le32_to_cpu(readcmd->sg.count) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
@@ -1337,7 +1337,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
if (ret < 0)
return ret;
fibsize = sizeof(struct aac_read) +
- ((le32_to_cpu(readcmd->sg.count) - 1) *
+ (le32_to_cpu(readcmd->sg.count) *
sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
@@ -1401,7 +1401,7 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
return ret;
command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) +
- ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
+ (le32_to_cpu(writecmd->sg.count) * sizeof(struct sgentryraw));
}
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
@@ -1436,7 +1436,7 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba,
if (ret < 0)
return ret;
fibsize = sizeof(struct aac_write64) +
- ((le32_to_cpu(writecmd->sg.count) - 1) *
+ (le32_to_cpu(writecmd->sg.count) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
@@ -1473,7 +1473,7 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
if (ret < 0)
return ret;
fibsize = sizeof(struct aac_write) +
- ((le32_to_cpu(writecmd->sg.count) - 1) *
+ (le32_to_cpu(writecmd->sg.count) *
sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
@@ -1592,9 +1592,9 @@ static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
/*
* Build Scatter/Gather list
*/
- fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
+ fibsize = sizeof(struct aac_srb) +
((le32_to_cpu(srbcmd->sg.count) & 0xff) *
- sizeof (struct sgentry64));
+ sizeof(struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
@@ -1624,7 +1624,7 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
* Build Scatter/Gather list
*/
fibsize = sizeof (struct aac_srb) +
- (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
+ ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
@@ -1693,8 +1693,7 @@ static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
fibptr->hw_fib_va->header.XferState &=
~cpu_to_le32(FastResponseCapable);
- fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
- sizeof(struct sgentry64);
+ fibsize = sizeof(struct aac_srb) + sizeof(struct sgentry64);
/* allocate DMA buffer for response */
addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
@@ -1833,7 +1832,7 @@ static int aac_get_safw_ciss_luns(struct aac_dev *dev)
struct aac_ciss_phys_luns_resp *phys_luns;
datasize = sizeof(struct aac_ciss_phys_luns_resp) +
- (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
+ AAC_MAX_TARGETS * sizeof(struct _ciss_lun);
phys_luns = kmalloc(datasize, GFP_KERNEL);
if (phys_luns == NULL)
goto out;
@@ -2267,7 +2266,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->a_ops.adapter_bounds = aac_bounds_32;
dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
sizeof(struct aac_fibhdr) -
- sizeof(struct aac_write) + sizeof(struct sgentry)) /
+ sizeof(struct aac_write)) /
sizeof(struct sgentry);
if (dev->dac_support) {
dev->a_ops.adapter_read = aac_read_block64;
@@ -2278,8 +2277,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->scsi_host_ptr->sg_tablesize =
(dev->max_fib_size -
sizeof(struct aac_fibhdr) -
- sizeof(struct aac_write64) +
- sizeof(struct sgentry64)) /
+ sizeof(struct aac_write64)) /
sizeof(struct sgentry64);
} else {
dev->a_ops.adapter_read = aac_read_block;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 7d5a155073c6..1d09d3ac6aa4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -322,7 +322,7 @@ struct aac_ciss_phys_luns_resp {
u8 level3[2];
u8 level2[2];
u8 node_ident[16]; /* phys. node identifier */
- } lun[1]; /* List of phys. devices */
+ } lun[]; /* List of phys. devices */
};
/*
@@ -507,32 +507,27 @@ struct sge_ieee1212 {
struct sgmap {
__le32 count;
- struct sgentry sg[1];
+ struct sgentry sg[];
};
struct user_sgmap {
u32 count;
- struct user_sgentry sg[1];
+ struct user_sgentry sg[];
};
struct sgmap64 {
__le32 count;
- struct sgentry64 sg[1];
+ struct sgentry64 sg[];
};
struct user_sgmap64 {
u32 count;
- struct user_sgentry64 sg[1];
+ struct user_sgentry64 sg[];
};
struct sgmapraw {
__le32 count;
- struct sgentryraw sg[1];
-};
-
-struct user_sgmapraw {
- u32 count;
- struct user_sgentryraw sg[1];
+ struct sgentryraw sg[];
};
struct creation_info
@@ -873,7 +868,7 @@ union aac_init
__le16 element_count;
__le16 comp_thresh;
__le16 unused;
- } rrq[1]; /* up to 64 RRQ addresses */
+ } rrq[] __counted_by_le(rr_queue_count); /* up to 64 RRQ addresses */
} r8;
};
@@ -2029,8 +2024,8 @@ struct aac_srb_reply
};
struct aac_srb_unit {
- struct aac_srb srb;
struct aac_srb_reply srb_reply;
+ struct aac_srb srb;
};
/*
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index e7cc927ed952..68240d6f27ab 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -523,7 +523,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
- if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
+ if ((fibsize < sizeof(struct user_aac_srb)) ||
(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
rcode = -EINVAL;
goto cleanup;
@@ -561,7 +561,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -EINVAL;
goto cleanup;
}
- actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
+ actual_fibsize = sizeof(struct aac_srb) +
((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
(sizeof(struct sgentry64) - sizeof(struct sgentry));
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 0f64b0244303..28cf18955a08 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -522,8 +522,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
spin_lock_init(&dev->iq_lock);
dev->max_fib_size = sizeof(struct hw_fib);
dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
- - sizeof(struct aac_fibhdr)
- - sizeof(struct aac_write) + sizeof(struct sgentry))
+ - sizeof(struct aac_fibhdr) - sizeof(struct aac_write))
/ sizeof(struct sgentry);
dev->comm_interface = AAC_COMM_PRODUCER;
dev->raw_io_interface = dev->raw_io_64 = 0;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 25cee03d7f97..47287559c768 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2327,8 +2327,9 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
sg64->sg[0].count = cpu_to_le32(datasize);
- ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
- FsaNormal, 1, 1, NULL, NULL);
+ ret = aac_fib_send(ScsiPortCommand64, fibptr,
+ sizeof(struct aac_srb) + sizeof(struct sgentry),
+ FsaNormal, 1, 1, NULL, NULL);
dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr);
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 11ef58204e96..28115ed637e8 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -410,7 +410,7 @@ static void aac_src_start_adapter(struct aac_dev *dev)
lower_32_bits(dev->init_pa),
upper_32_bits(dev->init_pa),
sizeof(struct _r8) +
- (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
+ AAC_MAX_HRRQ * sizeof(struct _rrq),
0, 0, 0, NULL, NULL, NULL, NULL, NULL);
} else {
init->r7.host_elapsed_seconds =
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 06acb5ff609e..76a1e373386e 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5528,7 +5528,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
struct beiscsi_hba *phba = NULL;
struct be_eq_obj *pbe_eq;
unsigned int s_handle;
- char wq_name[20];
int ret, i;
ret = beiscsi_enable_pci(pcidev);
@@ -5634,9 +5633,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
- snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq",
- phba->shost->host_no);
- phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name);
+ phba->wq = alloc_workqueue("beiscsi_%02x_wq", WQ_MEM_RECLAIM, 1,
+ phba->shost->host_no);
if (!phba->wq) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_dev_probe-"
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 5023c0ab4277..e52ce9b01f49 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -1431,7 +1431,7 @@ bfa_cb_lps_flogo_comp(void *bfad, void *uarg)
* param[in] vf_id - VF_ID
*
* return
- * If lookup succeeds, retuns fcs vf object, otherwise returns NULL
+ * If lookup succeeds, returns fcs vf object, otherwise returns NULL
*/
bfa_fcs_vf_t *
bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index a9d3d8562d3c..66fb701401de 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -766,9 +766,8 @@ bfad_thread_workq(struct bfad_s *bfad)
struct bfad_im_s *im = bfad->im;
bfa_trc(bfad, 0);
- snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d",
- bfad->inst_no);
- im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
+ im->drv_workq = alloc_ordered_workqueue("bfad_wq_%d", WQ_MEM_RECLAIM,
+ bfad->inst_no);
if (!im->drv_workq)
return BFA_STATUS_FAILED;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 4353feedf76a..0884af04bd1f 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -134,7 +134,6 @@ struct bfad_fcp_binding {
struct bfad_im_s {
struct bfad_s *bfad;
struct workqueue_struct *drv_workq;
- char drv_workq_name[KOBJ_NAME_LEN];
struct work_struct aen_im_notify_work;
};
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 7e74f77da14f..6d47a4d8eed6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -358,18 +358,12 @@ struct bnx2fc_rport {
dma_addr_t lcq_dma;
u32 lcq_mem_size;
- void *ofld_req[4];
- dma_addr_t ofld_req_dma[4];
- void *enbl_req;
- dma_addr_t enbl_req_dma;
-
spinlock_t tgt_lock;
spinlock_t cq_lock;
atomic_t num_active_ios;
u32 flush_in_prog;
unsigned long timestamp;
unsigned long retry_delay_timestamp;
- struct list_head free_task_list;
struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
struct list_head active_cmd_queue;
struct list_head els_queue;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 1078c20c5ef6..f49783b89d04 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2363,8 +2363,8 @@ static int _bnx2fc_create(struct net_device *netdev,
interface->vlan_id = vlan_id;
interface->tm_timeout = BNX2FC_TM_TIMEOUT;
- interface->timer_work_queue =
- create_singlethread_workqueue("bnx2fc_timer_wq");
+ interface->timer_work_queue = alloc_ordered_workqueue(
+ "%s", WQ_MEM_RECLAIM, "bnx2fc_timer_wq");
if (!interface->timer_work_queue) {
printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
rc = -EINVAL;
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index df7d04afce05..7030efee5c46 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -815,11 +815,6 @@ extern struct bnx2i_hba *get_adapter_list_head(void);
struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
u16 iscsi_cid);
-int bnx2i_alloc_ep_pool(void);
-void bnx2i_release_ep_pool(void);
-struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
-struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
-
struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
@@ -869,12 +864,6 @@ extern int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep);
-/* Debug related function prototypes */
-extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
-extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
-extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
-extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
-
extern int bnx2i_percpu_io_thread(void *arg);
extern int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
struct bnx2i_conn *bnx2i_conn,
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index f8a09e3eba58..6e1b252cea0e 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -822,7 +822,8 @@ static int __init rdac_init(void)
/*
* Create workqueue to handle mode selects for rdac
*/
- kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
+ kmpath_rdacd =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "kmpath_rdacd");
if (!kmpath_rdacd) {
scsi_unregister_device_handler(&rdac_dh);
printk(KERN_ERR "kmpath_rdacd creation failed.\n");
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
index 6a6ec32c46bd..9ac69356b13e 100644
--- a/drivers/scsi/elx/efct/efct_lio.c
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -1114,7 +1114,8 @@ int efct_scsi_tgt_new_device(struct efct *efct)
atomic_set(&efct->tgt_efct.watermark_hit, 0);
atomic_set(&efct->tgt_efct.initiator_count, 0);
- lio_wq = create_singlethread_workqueue("efct_lio_worker");
+ lio_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ "efct_lio_worker");
if (!lio_wq) {
efc_log_err(efct, "workqueue create failed\n");
return -EIO;
diff --git a/drivers/scsi/elx/libefc/efc_nport.c b/drivers/scsi/elx/libefc/efc_nport.c
index 2e83a667901f..1a7437f4328e 100644
--- a/drivers/scsi/elx/libefc/efc_nport.c
+++ b/drivers/scsi/elx/libefc/efc_nport.c
@@ -705,9 +705,9 @@ efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
spin_lock_irqsave(&efc->lock, flags);
list_for_each_entry(nport, &domain->nport_list, list_entry) {
if (nport->wwpn == wwpn && nport->wwnn == wwnn) {
- kref_put(&nport->ref, nport->release);
/* Shutdown this NPORT */
efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ kref_put(&nport->ref, nport->release);
break;
}
}
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index ed63f7a9ea54..8a133254c4f6 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -929,7 +929,6 @@ struct esas2r_adapter {
struct list_head fw_event_list;
spinlock_t fw_event_lock;
u8 fw_events_off; /* if '1', then ignore events */
- char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN];
/*
* intr_mode stores the interrupt mode currently being used by this
* adapter. it is based on the interrupt_mode module parameter, but
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index c1a5ab662dc8..0cea5f3d1a08 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -311,9 +311,8 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
sema_init(&a->nvram_semaphore, 1);
esas2r_fw_event_off(a);
- snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
- a->index);
- a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
+ a->fw_event_q =
+ alloc_ordered_workqueue("esas2r/%d", WQ_MEM_RECLAIM, a->index);
init_waitqueue_head(&a->buffered_ioctl_waiter);
init_waitqueue_head(&a->nvram_waiter);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index f1429f270170..39aec710660c 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -722,7 +722,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
* will return 0, so do this first.
*/
mfs = netdev->mtu;
- if (netdev->features & NETIF_F_FCOE_MTU) {
+ if (netdev->fcoe_mtu) {
mfs = FCOE_MTU;
FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs);
}
@@ -1863,7 +1863,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
case NETDEV_CHANGE:
break;
case NETDEV_CHANGEMTU:
- if (netdev->features & NETIF_F_FCOE_MTU)
+ if (netdev->fcoe_mtu)
break;
mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
sizeof(struct fcoe_crc_eof));
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 7d3b904af9e8..0609ca6b9353 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -45,12 +45,8 @@ MODULE_PARM_DESC(fcf_dev_loss_tmo,
*/
#define fcoe_ctlr_id(x) \
((x)->id)
-#define fcoe_ctlr_work_q_name(x) \
- ((x)->work_q_name)
#define fcoe_ctlr_work_q(x) \
((x)->work_q)
-#define fcoe_ctlr_devloss_work_q_name(x) \
- ((x)->devloss_work_q_name)
#define fcoe_ctlr_devloss_work_q(x) \
((x)->devloss_work_q)
#define fcoe_ctlr_mode(x) \
@@ -797,18 +793,14 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
- snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name),
- "ctlr_wq_%d", ctlr->id);
- ctlr->work_q = create_singlethread_workqueue(
- ctlr->work_q_name);
+ ctlr->work_q = alloc_ordered_workqueue("ctlr_wq_%d", WQ_MEM_RECLAIM,
+ ctlr->id);
if (!ctlr->work_q)
goto out_del;
- snprintf(ctlr->devloss_work_q_name,
- sizeof(ctlr->devloss_work_q_name),
- "ctlr_dl_wq_%d", ctlr->id);
- ctlr->devloss_work_q = create_singlethread_workqueue(
- ctlr->devloss_work_q_name);
+ ctlr->devloss_work_q = alloc_ordered_workqueue("ctlr_dl_wq_%d",
+ WQ_MEM_RECLAIM,
+ ctlr->id);
if (!ctlr->devloss_work_q)
goto out_del_q;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 29eead383eb9..0044717d4486 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -1161,14 +1161,16 @@ static int __init fnic_init_module(void)
goto err_create_fnic_ioreq_slab;
}
- fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
+ fnic_event_queue =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq");
if (!fnic_event_queue) {
printk(KERN_ERR PFX "fnic work queue create failed\n");
err = -ENOMEM;
goto err_create_fnic_workq;
}
- fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
+ fnic_fip_queue =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_fip_q");
if (!fnic_fip_queue) {
printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
err = -ENOMEM;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index ec1a3e7ee94d..6219807ce3b9 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -2302,7 +2302,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
hisi_hba->last_slot_index = 0;
- hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
+ hisi_hba->wq =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, dev_name(dev));
if (!hisi_hba->wq) {
dev_err(dev, "sas_alloc: failed to create workqueue\n");
goto err_out;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 7f987335b44c..e021f1106bea 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -292,11 +292,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
}
if (shost->transportt->create_work_queue) {
- snprintf(shost->work_q_name, sizeof(shost->work_q_name),
- "scsi_wq_%d", shost->host_no);
- shost->work_q = alloc_workqueue("%s",
- WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 1, shost->work_q_name);
+ shost->work_q = alloc_workqueue(
+ "scsi_wq_%d",
+ WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
+ shost->host_no);
if (!shost->work_q) {
error = -EINVAL;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 2fca17cf8b51..16d085d56e9d 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3425,7 +3425,6 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
struct scsi_info *vscsi;
int rc = 0;
long hrc = 0;
- char wq_name[24];
vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
if (!vscsi) {
@@ -3536,8 +3535,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
init_completion(&vscsi->wait_idle);
init_completion(&vscsi->unconfig);
- snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
- vscsi->work_q = create_workqueue(wq_name);
+ vscsi->work_q = alloc_workqueue("ibmvscsis%s", WQ_MEM_RECLAIM, 1,
+ dev_name(&vdev->dev));
if (!vscsi->work_q) {
rc = -ENOMEM;
dev_err(&vscsi->dev, "create_workqueue failed\n");
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index c77d6ca1a210..b2b643c6dbbe 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1030,7 +1030,7 @@ struct ipr_hostrcb_fabric_desc {
#define IPR_PATH_FAILED 0x03
__be16 num_entries;
- struct ipr_hostrcb_config_element elem[1];
+ struct ipr_hostrcb_config_element elem[];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb64_fabric_desc {
@@ -1044,7 +1044,7 @@ struct ipr_hostrcb64_fabric_desc {
u8 res_path[8];
u8 reserved3[6];
__be16 num_entries;
- struct ipr_hostrcb64_config_element elem[1];
+ struct ipr_hostrcb64_config_element elem[];
}__attribute__((packed, aligned (8)));
#define for_each_hrrq(hrrq, ioa_cfg) \
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 1d91c457527f..f84a7e6ae379 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -2693,7 +2693,8 @@ int fc_setup_exch_mgr(void)
fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
fc_cpu_mask = (1 << fc_cpu_order) - 1;
- fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
+ fc_exch_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ "fc_exch_workqueue");
if (!fc_exch_workqueue)
goto err;
return 0;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 33da3c1085f0..308cb4872f96 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -2263,7 +2263,8 @@ struct fc4_prov fc_rport_t0_prov = {
*/
int fc_setup_rport(void)
{
- rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
+ rport_event_queue =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fc_rport_eq");
if (!rport_event_queue)
return -ENOMEM;
return 0;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 88714b7b0dba..7b4e7a61965a 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -564,7 +564,6 @@ static struct ata_port_operations sas_sata_ops = {
.error_handler = ata_std_error_handler,
.post_internal_cmd = sas_ata_post_internal,
.qc_defer = ata_std_qc_defer,
- .qc_prep = ata_noop_qc_prep,
.qc_issue = sas_ata_qc_issue,
.qc_fill_rtf = sas_ata_qc_fill_rtf,
.set_dmamode = sas_ata_set_dmamode,
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 9c8cc723170d..8566bb1208a0 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -122,12 +122,12 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
error = -ENOMEM;
snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
- sas_ha->event_q = create_singlethread_workqueue(name);
+ sas_ha->event_q = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name);
if (!sas_ha->event_q)
goto Undo_ports;
snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
- sas_ha->disco_q = create_singlethread_workqueue(name);
+ sas_ha->disco_q = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name);
if (!sas_ha->disco_q)
goto Undo_event_q;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 7c147d6ea8a8..e5a9c5a323f8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -306,6 +306,14 @@ struct lpfc_stats {
struct lpfc_hba;
+/* Data structure to keep withheld FLOGI_ACC information */
+struct lpfc_defer_flogi_acc {
+ bool flag;
+ u16 rx_id;
+ u16 ox_id;
+ struct lpfc_nodelist *ndlp;
+
+};
#define LPFC_VMID_TIMER 300 /* timer interval in seconds */
@@ -1430,9 +1438,7 @@ struct lpfc_hba {
uint16_t vlan_id;
struct list_head fcf_conn_rec_list;
- bool defer_flogi_acc_flag;
- uint16_t defer_flogi_acc_rx_id;
- uint16_t defer_flogi_acc_ox_id;
+ struct lpfc_defer_flogi_acc defer_flogi_acc;
spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
struct list_head ct_ev_waiters;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 929cbfc95163..de0ec945d2f1 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1099,8 +1099,10 @@ stop_rr_fcf_flogi:
sp->cmn.priority_tagging, kref_read(&ndlp->kref));
/* reinitialize the VMID datastructure before returning */
- if (lpfc_is_vmid_enabled(phba))
+ if (lpfc_is_vmid_enabled(phba)) {
lpfc_reinit_vmid(vport);
+ vport->vmid_flag = 0;
+ }
if (sp->cmn.priority_tagging)
vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
LPFC_VMID_TYPE_PRIO);
@@ -1390,7 +1392,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
/* Check for a deferred FLOGI ACC condition */
- if (phba->defer_flogi_acc_flag) {
+ if (phba->defer_flogi_acc.flag) {
/* lookup ndlp for received FLOGI */
ndlp = lpfc_findnode_did(vport, 0);
if (!ndlp)
@@ -1404,34 +1406,38 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4) {
bf_set(wqe_ctxt_tag,
&defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
- phba->defer_flogi_acc_rx_id);
+ phba->defer_flogi_acc.rx_id);
bf_set(wqe_rcvoxid,
&defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
- phba->defer_flogi_acc_ox_id);
+ phba->defer_flogi_acc.ox_id);
} else {
icmd = &defer_flogi_acc.iocb;
- icmd->ulpContext = phba->defer_flogi_acc_rx_id;
+ icmd->ulpContext = phba->defer_flogi_acc.rx_id;
icmd->unsli3.rcvsli3.ox_id =
- phba->defer_flogi_acc_ox_id;
+ phba->defer_flogi_acc.ox_id;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
" ox_id: x%x, hba_flag x%lx\n",
- phba->defer_flogi_acc_rx_id,
- phba->defer_flogi_acc_ox_id, phba->hba_flag);
+ phba->defer_flogi_acc.rx_id,
+ phba->defer_flogi_acc.ox_id, phba->hba_flag);
/* Send deferred FLOGI ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
ndlp, NULL);
- phba->defer_flogi_acc_flag = false;
- vport->fc_myDID = did;
+ phba->defer_flogi_acc.flag = false;
- /* Decrement ndlp reference count to indicate the node can be
- * released when other references are removed.
+ /* Decrement the held ndlp that was incremented when the
+ * deferred flogi acc flag was set.
*/
- lpfc_nlp_put(ndlp);
+ if (phba->defer_flogi_acc.ndlp) {
+ lpfc_nlp_put(phba->defer_flogi_acc.ndlp);
+ phba->defer_flogi_acc.ndlp = NULL;
+ }
+
+ vport->fc_myDID = did;
}
return 0;
@@ -5240,9 +5246,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0109 ACC to LOGO completes to NPort x%x refcnt %d "
- "Data: x%x x%x x%x\n",
- ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
- ndlp->nlp_state, ndlp->nlp_rpi);
+ "last els x%x Data: x%x x%x x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref),
+ ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
/* This clause allows the LOGO ACC to complete and free resources
* for the Fabric Domain Controller. It does deliberately skip
@@ -5254,18 +5261,22 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
- /* If PLOGI is being retried, PLOGI completion will cleanup the
- * node. The NLP_NPR_2B_DISC flag needs to be retained to make
- * progress on nodes discovered from last RSCN.
- */
- if ((ndlp->nlp_flag & NLP_DELAY_TMO) &&
- (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
- goto out;
-
if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
lpfc_unreg_rpi(vport, ndlp);
+ /* If came from PRLO, then PRLO_ACC is done.
+ * Start rediscovery now.
+ */
+ if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ }
}
+
out:
/*
* The driver received a LOGO from the rport and has ACK'd it.
@@ -8454,9 +8465,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Defer ACC response until AFTER we issue a FLOGI */
if (!test_bit(HBA_FLOGI_ISSUED, &phba->hba_flag)) {
- phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag,
+ phba->defer_flogi_acc.rx_id = bf_get(wqe_ctxt_tag,
&wqe->xmit_els_rsp.wqe_com);
- phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid,
+ phba->defer_flogi_acc.ox_id = bf_get(wqe_rcvoxid,
&wqe->xmit_els_rsp.wqe_com);
vport->fc_myDID = did;
@@ -8464,11 +8475,17 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3344 Deferring FLOGI ACC: rx_id: x%x,"
" ox_id: x%x, hba_flag x%lx\n",
- phba->defer_flogi_acc_rx_id,
- phba->defer_flogi_acc_ox_id, phba->hba_flag);
+ phba->defer_flogi_acc.rx_id,
+ phba->defer_flogi_acc.ox_id, phba->hba_flag);
- phba->defer_flogi_acc_flag = true;
+ phba->defer_flogi_acc.flag = true;
+ /* This nlp_get is paired with nlp_puts that reset the
+ * defer_flogi_acc.flag back to false. We need to retain
+ * a kref on the ndlp until the deferred FLOGI ACC is
+ * processed or cancelled.
+ */
+ phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp);
return 0;
}
@@ -10504,7 +10521,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
/* retain node if our response is deferred */
- if (phba->defer_flogi_acc_flag)
+ if (phba->defer_flogi_acc.flag)
break;
if (newnode)
lpfc_disc_state_machine(vport, ndlp, NULL,
@@ -10742,7 +10759,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
/* Unknown ELS command <elsCmd> received from NPORT <did> */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0115 Unknown ELS command x%x "
"received from NPORT x%x\n", cmd, did);
if (newnode)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 6943f6c6395c..35c9181c6608 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -175,7 +175,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->nlp_state, ndlp->fc4_xpt_flags);
/* Don't schedule a worker thread event if the vport is going down. */
- if (test_bit(FC_UNLOADING, &vport->load_flag)) {
+ if (test_bit(FC_UNLOADING, &vport->load_flag) ||
+ !test_bit(HBA_SETUP, &phba->hba_flag)) {
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL;
@@ -1254,7 +1255,14 @@ lpfc_linkdown(struct lpfc_hba *phba)
lpfc_scsi_dev_block(phba);
offline = pci_channel_offline(phba->pcidev);
- phba->defer_flogi_acc_flag = false;
+ /* Decrement the held ndlp if there is a deferred flogi acc */
+ if (phba->defer_flogi_acc.flag) {
+ if (phba->defer_flogi_acc.ndlp) {
+ lpfc_nlp_put(phba->defer_flogi_acc.ndlp);
+ phba->defer_flogi_acc.ndlp = NULL;
+ }
+ }
+ phba->defer_flogi_acc.flag = false;
/* Clear external loopback plug detected flag */
phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
@@ -1376,7 +1384,7 @@ lpfc_linkup_port(struct lpfc_vport *vport)
(vport != phba->pport))
return;
- if (phba->defer_flogi_acc_flag) {
+ if (phba->defer_flogi_acc.flag) {
clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
clear_bit(FC_RSCN_MODE, &vport->fc_flag);
clear_bit(FC_NLP_MORE, &vport->fc_flag);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e1dfa96c2a55..50620918becd 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -13861,12 +13861,7 @@ fcponly:
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
- rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "6400 Can't set dma maximum segment size\n");
- return rc;
- }
+ dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
/*
* Check whether the adapter supports an embedded copy of the
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index f6a53446e57f..4574716c8764 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -2652,8 +2652,26 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* flush the target */
lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
- /* Treat like rcv logo */
- lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
+ /* Send PRLO_ACC */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_LOGO_ACC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+
+ /* Save ELS_CMD_PRLO as the last elscmd and then set to NPR.
+ * lpfc_cmpl_els_logo_acc is expected to restart discovery.
+ */
+ ndlp->nlp_last_elscmd = ELS_CMD_PRLO;
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY,
+ "3422 DID x%06x nflag x%x lastels x%x ref cnt %u\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_last_elscmd,
+ kref_read(&ndlp->kref));
+
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 98ce9d97a225..60cd60ebff38 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5555,11 +5555,20 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
iocb = &lpfc_cmd->cur_iocbq;
if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
- if (!pring_s4) {
+ /* if the io_wq & pring are gone, the port was reset. */
+ if (!phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq ||
+ !phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "2877 SCSI Layer I/O Abort Request "
+ "IO CMPL Status x%x ID %d LUN %llu "
+ "HBA_SETUP %d\n", FAILED,
+ cmnd->device->id,
+ (u64)cmnd->device->lun,
+ test_bit(HBA_SETUP, &phba->hba_flag));
ret = FAILED;
goto out_unlock_hba;
}
+ pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
spin_lock(&pring_s4->ring_lock);
}
/* the command is in process of being cancelled */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 88debef2fb6d..332b8d2348e9 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4687,6 +4687,17 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
/* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) {
for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ if (!phba->sli4_hba.hdwq ||
+ !phba->sli4_hba.hdwq[i].io_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "7777 hdwq's deleted %lx "
+ "%lx %x %x\n",
+ phba->pport->load_flag,
+ phba->hba_flag,
+ phba->link_state,
+ phba->sli.sli_flag);
+ return;
+ }
pring = phba->sli4_hba.hdwq[i].io_wq->pring;
spin_lock_irq(&pring->ring_lock);
@@ -12473,8 +12484,6 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
cmdiocb->iocb.ulpClass,
LPFC_WQE_CQ_ID_DEFAULT, ia, false);
- abtsiocbp->vport = vport;
-
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
if (cmdiocb->cmd_flag & LPFC_IO_FCP)
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 7ac9ef281881..2fe0386a1fee 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.3"
+#define LPFC_DRIVER_VERSION "14.4.0.4"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
index 773e02ae20c3..cc3e4736f2fe 100644
--- a/drivers/scsi/lpfc/lpfc_vmid.c
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -321,6 +321,5 @@ lpfc_reinit_vmid(struct lpfc_vport *vport)
if (!hash_empty(vport->hash_table))
hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode)
hash_del(&cur->hnode);
- vport->vmid_flag = 0;
write_unlock(&vport->vmid_lock);
}
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 53ee8f84d094..f225bb20aa22 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -102,11 +102,15 @@ __setup("mac5380=", mac_scsi_setup);
* Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets
* so bus errors are unavoidable.
*
- * If a MOVE.B instruction faults, we assume that zero bytes were transferred
- * and simply retry. That assumption probably depends on target behaviour but
- * seems to hold up okay. The NOP provides synchronization: without it the
- * fault can sometimes occur after the program counter has moved past the
- * offending instruction. Post-increment addressing can't be used.
+ * If a MOVE.B instruction faults during a receive operation, we assume the
+ * target sent nothing and try again. That assumption probably depends on
+ * target firmware but it seems to hold up okay. If a fault happens during a
+ * send operation, the target may or may not have seen /ACK and got the byte.
+ * It's uncertain so the whole SCSI command gets retried.
+ *
+ * The NOP is needed for synchronization because the fault address in the
+ * exception stack frame may or may not be the instruction that actually
+ * caused the bus error. Post-increment addressing can't be used.
*/
#define MOVE_BYTE(operands) \
@@ -208,8 +212,6 @@ __setup("mac5380=", mac_scsi_setup);
".previous \n" \
: "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
-#define MAC_PDMA_DELAY 32
-
static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n)
{
unsigned char *addr = start;
@@ -245,22 +247,21 @@ static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n)
if (n >= 1) {
MOVE_BYTE("%0@,%3@");
if (result)
- goto out;
+ return -1;
}
if (n >= 1 && ((unsigned long)addr & 1)) {
MOVE_BYTE("%0@,%3@");
if (result)
- goto out;
+ return -2;
}
while (n >= 32)
MOVE_16_WORDS("%0@+,%3@");
while (n >= 2)
MOVE_WORD("%0@+,%3@");
if (result)
- return start - addr; /* Negated to indicate uncertain length */
+ return start - addr - 1; /* Negated to indicate uncertain length */
if (n == 1)
MOVE_BYTE("%0@,%3@");
-out:
return addr - start;
}
@@ -274,25 +275,56 @@ static inline void write_ctrl_reg(struct NCR5380_hostdata *hostdata, u32 value)
out_be32(hostdata->io + (CTRL_REG << 4), value);
}
+static inline int macscsi_wait_for_drq(struct NCR5380_hostdata *hostdata)
+{
+ unsigned int n = 1; /* effectively multiplies NCR5380_REG_POLL_TIME */
+ unsigned char basr;
+
+again:
+ basr = NCR5380_read(BUS_AND_STATUS_REG);
+
+ if (!(basr & BASR_PHASE_MATCH))
+ return 1;
+
+ if (basr & BASR_IRQ)
+ return -1;
+
+ if (basr & BASR_DRQ)
+ return 0;
+
+ if (n-- == 0) {
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
+ "%s: DRQ timeout\n", __func__);
+ return -1;
+ }
+
+ NCR5380_poll_politely2(hostdata,
+ BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
+ BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0);
+ goto again;
+}
+
static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *dst, int len)
{
u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
unsigned char *d = dst;
- int result = 0;
hostdata->pdma_residual = len;
- while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
- BASR_DRQ | BASR_PHASE_MATCH,
- BASR_DRQ | BASR_PHASE_MATCH, 0)) {
- int bytes;
+ while (macscsi_wait_for_drq(hostdata) == 0) {
+ int bytes, chunk_bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX)
write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE |
CTRL_INTERRUPTS_ENABLE);
- bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512));
+ chunk_bytes = min(hostdata->pdma_residual, 512);
+ bytes = mac_pdma_recv(s, d, chunk_bytes);
+
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
if (bytes > 0) {
d += bytes;
@@ -300,37 +332,25 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
}
if (hostdata->pdma_residual == 0)
- goto out;
+ break;
- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
- BUS_AND_STATUS_REG, BASR_ACK,
- BASR_ACK, 0) < 0)
- scmd_printk(KERN_DEBUG, hostdata->connected,
- "%s: !REQ and !ACK\n", __func__);
- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
- goto out;
+ if (bytes > 0)
+ continue;
- if (bytes == 0)
- udelay(MAC_PDMA_DELAY);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
+ "%s: bus error [%d/%d] (%d/%d)\n",
+ __func__, d - dst, len, bytes, chunk_bytes);
- if (bytes >= 0)
+ if (bytes == 0)
continue;
- dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
- "%s: bus error (%d/%d)\n", __func__, d - dst, len);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
- result = -1;
- goto out;
+ if (macscsi_wait_for_drq(hostdata) <= 0)
+ set_host_byte(hostdata->connected, DID_ERROR);
+ break;
}
- scmd_printk(KERN_ERR, hostdata->connected,
- "%s: phase mismatch or !DRQ\n", __func__);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
- result = -1;
-out:
- if (macintosh_config->ident == MAC_MODEL_IIFX)
- write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
- return result;
+ return 0;
}
static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
@@ -338,67 +358,47 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
{
unsigned char *s = src;
u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
- int result = 0;
hostdata->pdma_residual = len;
- while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
- BASR_DRQ | BASR_PHASE_MATCH,
- BASR_DRQ | BASR_PHASE_MATCH, 0)) {
- int bytes;
+ while (macscsi_wait_for_drq(hostdata) == 0) {
+ int bytes, chunk_bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX)
write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE |
CTRL_INTERRUPTS_ENABLE);
- bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512));
+ chunk_bytes = min(hostdata->pdma_residual, 512);
+ bytes = mac_pdma_send(s, d, chunk_bytes);
+
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
if (bytes > 0) {
s += bytes;
hostdata->pdma_residual -= bytes;
}
- if (hostdata->pdma_residual == 0) {
- if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
- TCR_LAST_BYTE_SENT,
- TCR_LAST_BYTE_SENT,
- 0) < 0) {
- scmd_printk(KERN_ERR, hostdata->connected,
- "%s: Last Byte Sent timeout\n", __func__);
- result = -1;
- }
- goto out;
- }
+ if (hostdata->pdma_residual == 0)
+ break;
- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
- BUS_AND_STATUS_REG, BASR_ACK,
- BASR_ACK, 0) < 0)
- scmd_printk(KERN_DEBUG, hostdata->connected,
- "%s: !REQ and !ACK\n", __func__);
- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
- goto out;
+ if (bytes > 0)
+ continue;
- if (bytes == 0)
- udelay(MAC_PDMA_DELAY);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
+ "%s: bus error [%d/%d] (%d/%d)\n",
+ __func__, s - src, len, bytes, chunk_bytes);
- if (bytes >= 0)
+ if (bytes == 0)
continue;
- dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
- "%s: bus error (%d/%d)\n", __func__, s - src, len);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
- result = -1;
- goto out;
+ if (macscsi_wait_for_drq(hostdata) <= 0)
+ set_host_byte(hostdata->connected, DID_ERROR);
+ break;
}
- scmd_printk(KERN_ERR, hostdata->connected,
- "%s: phase mismatch or !DRQ\n", __func__);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
- result = -1;
-out:
- if (macintosh_config->ident == MAC_MODEL_IIFX)
- write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
- return result;
+ return 0;
}
static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
@@ -432,7 +432,7 @@ static struct scsi_host_template mac_scsi_template = {
.eh_host_reset_handler = macscsi_host_reset,
.can_queue = 16,
.this_id = 7,
- .sg_tablesize = 1,
+ .sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
.cmd_size = sizeof(struct NCR5380_cmd),
@@ -470,6 +470,9 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7;
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ mac_scsi_template.sg_tablesize = 1;
+
instance = scsi_host_alloc(&mac_scsi_template,
sizeof(struct NCR5380_hostdata));
if (!instance)
@@ -491,6 +494,9 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0;
+ if (instance->sg_tablesize > 1)
+ host_flags |= FLAG_DMA_FIXUP;
+
error = NCR5380_init(instance, host_flags | FLAG_LATE_DMA_SETUP);
if (error)
goto fail_init;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 5680c6cdb221..088cc40ae866 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -814,12 +814,12 @@ struct MR_HOST_DEVICE_LIST {
__le32 size;
__le32 count;
__le32 reserved[2];
- struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[1];
+ struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[] __counted_by_le(count);
} __packed;
#define HOST_DEVICE_LIST_SZ (sizeof(struct MR_HOST_DEVICE_LIST) + \
(sizeof(struct MR_HOST_DEVICE_LIST_ENTRY) * \
- (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT - 1)))
+ (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT)))
/*
@@ -2473,7 +2473,7 @@ struct MR_LD_VF_MAP {
union MR_LD_REF ref;
u8 ldVfCount;
u8 reserved[6];
- u8 policy[1];
+ u8 policy[];
};
struct MR_LD_VF_AFFILIATION {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 6c1fb8149553..1eec23da28e2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1988,8 +1988,8 @@ megasas_fusion_start_watchdog(struct megasas_instance *instance)
sizeof(instance->fault_handler_work_q_name),
"poll_megasas%d_status", instance->host->host_no);
- instance->fw_fault_work_q =
- create_singlethread_workqueue(instance->fault_handler_work_q_name);
+ instance->fw_fault_work_q = alloc_ordered_workqueue(
+ "%s", WQ_MEM_RECLAIM, instance->fault_handler_work_q_name);
if (!instance->fw_fault_work_q) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 6a19e17eb1a7..4b7a8f6314a3 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -1565,16 +1565,13 @@ struct mpi3_sas_io_unit0_phy_data {
__le32 reserved10;
};
-#ifndef MPI3_SAS_IO_UNIT0_PHY_MAX
-#define MPI3_SAS_IO_UNIT0_PHY_MAX (1)
-#endif
struct mpi3_sas_io_unit_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
u8 num_phys;
u8 init_status;
__le16 reserved0e;
- struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX];
+ struct mpi3_sas_io_unit0_phy_data phy_data[];
};
#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
@@ -1606,9 +1603,6 @@ struct mpi3_sas_io_unit1_phy_data {
__le32 reserved08;
};
-#ifndef MPI3_SAS_IO_UNIT1_PHY_MAX
-#define MPI3_SAS_IO_UNIT1_PHY_MAX (1)
-#endif
struct mpi3_sas_io_unit_page1 {
struct mpi3_config_page_header header;
__le16 control_flags;
@@ -1618,7 +1612,7 @@ struct mpi3_sas_io_unit_page1 {
u8 num_phys;
u8 sata_max_q_depth;
__le16 reserved12;
- struct mpi3_sas_io_unit1_phy_data phy_data[MPI3_SAS_IO_UNIT1_PHY_MAX];
+ struct mpi3_sas_io_unit1_phy_data phy_data[];
};
#define MPI3_SASIOUNIT1_PAGEVERSION (0x00)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index 028784949873..c9fa0d69b75f 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -453,9 +453,6 @@ struct mpi3_event_data_sas_notify_primitive {
#define MPI3_EVENT_NOTIFY_PRIMITIVE_POWER_LOSS_EXPECTED (0x02)
#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED1 (0x03)
#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED2 (0x04)
-#ifndef MPI3_EVENT_SAS_TOPO_PHY_COUNT
-#define MPI3_EVENT_SAS_TOPO_PHY_COUNT (1)
-#endif
struct mpi3_event_sas_topo_phy_entry {
__le16 attached_dev_handle;
u8 link_rate;
@@ -496,7 +493,7 @@ struct mpi3_event_data_sas_topology_change_list {
u8 start_phy_num;
u8 exp_status;
u8 io_unit_port;
- struct mpi3_event_sas_topo_phy_entry phy_entry[MPI3_EVENT_SAS_TOPO_PHY_COUNT];
+ struct mpi3_event_sas_topo_phy_entry phy_entry[] __counted_by(num_entries);
};
#define MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
@@ -545,9 +542,6 @@ struct mpi3_event_data_pcie_enumeration {
#define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000)
#define MPI3_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000)
#define MPI3_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000)
-#ifndef MPI3_EVENT_PCIE_TOPO_PORT_COUNT
-#define MPI3_EVENT_PCIE_TOPO_PORT_COUNT (1)
-#endif
struct mpi3_event_pcie_topo_port_entry {
__le16 attached_dev_handle;
u8 port_status;
@@ -588,7 +582,7 @@ struct mpi3_event_data_pcie_topology_change_list {
u8 switch_status;
u8 io_unit_port;
__le32 reserved0c;
- struct mpi3_event_pcie_topo_port_entry port_entry[MPI3_EVENT_PCIE_TOPO_PORT_COUNT];
+ struct mpi3_event_pcie_topo_port_entry port_entry[] __counted_by(num_entries);
};
#define MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00)
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index dc2cdd5f0311..1dc640de3efc 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -57,8 +57,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.9.1.0.51"
-#define MPI3MR_DRIVER_RELDATE "29-May-2024"
+#define MPI3MR_DRIVER_VERSION "8.10.0.5.50"
+#define MPI3MR_DRIVER_RELDATE "08-Aug-2024"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -213,6 +213,7 @@ extern atomic64_t event_counter;
#define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_INDEX 0
#define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA 1
+#define MPI3MR_THRESHOLD_REPLY_COUNT 100
/* SGE Flag definition */
#define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \
@@ -1059,7 +1060,6 @@ struct scmd_priv {
* @sbq_lock: Sense buffer queue lock
* @sbq_host_index: Sense buffer queuehost index
* @event_masks: Event mask bitmap
- * @fwevt_worker_name: Firmware event worker thread name
* @fwevt_worker_thread: Firmware event worker thread
* @fwevt_lock: Firmware event lock
* @fwevt_list: Firmware event list
@@ -1240,7 +1240,6 @@ struct mpi3mr_ioc {
u32 sbq_host_index;
u32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS];
- char fwevt_worker_name[MPI3MR_NAME_LENGTH];
struct workqueue_struct *fwevt_worker_thread;
spinlock_t fwevt_lock;
struct list_head fwevt_list;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index c196dc14ad20..2e1a92d306b2 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -345,6 +345,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
{
u16 reply_desc_type, host_tag = 0;
u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
+ u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS;
u32 ioc_loginfo = 0, sense_count = 0;
struct mpi3_status_reply_descriptor *status_desc;
struct mpi3_address_reply_descriptor *addr_desc;
@@ -366,8 +367,8 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
if (ioc_status &
MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
- ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
- mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
+ masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+ mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
break;
case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
@@ -380,7 +381,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
if (ioc_status &
MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
- ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
@@ -393,7 +394,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
sshdr.asc, sshdr.ascq);
}
}
- mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
+ mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
break;
case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
@@ -408,7 +409,10 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
if (cmdptr->state & MPI3MR_CMD_PENDING) {
cmdptr->state |= MPI3MR_CMD_COMPLETE;
cmdptr->ioc_loginfo = ioc_loginfo;
- cmdptr->ioc_status = ioc_status;
+ if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS)
+ cmdptr->ioc_status = ioc_status;
+ else
+ cmdptr->ioc_status = masked_ioc_status;
cmdptr->state &= ~MPI3MR_CMD_PENDING;
if (def_reply) {
cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
@@ -439,6 +443,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
u32 admin_reply_ci = mrioc->admin_reply_ci;
u32 num_admin_replies = 0;
u64 reply_dma = 0;
+ u16 threshold_comps = 0;
struct mpi3_default_reply_descriptor *reply_desc;
if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
@@ -462,6 +467,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_admin_replies++;
+ threshold_comps++;
if (++admin_reply_ci == mrioc->num_admin_replies) {
admin_reply_ci = 0;
exp_phase ^= 1;
@@ -472,6 +478,11 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
if ((le16_to_cpu(reply_desc->reply_flags) &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
break;
+ if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
+ writel(admin_reply_ci,
+ &mrioc->sysif_regs->admin_reply_queue_ci);
+ threshold_comps = 0;
+ }
} while (1);
writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
@@ -525,7 +536,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
u32 num_op_reply = 0;
u64 reply_dma = 0;
struct mpi3_default_reply_descriptor *reply_desc;
- u16 req_q_idx = 0, reply_qidx;
+ u16 req_q_idx = 0, reply_qidx, threshold_comps = 0;
reply_qidx = op_reply_q->qid - 1;
@@ -556,6 +567,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_op_reply++;
+ threshold_comps++;
if (++reply_ci == op_reply_q->num_replies) {
reply_ci = 0;
@@ -577,13 +589,19 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
break;
}
#endif
+ if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
+ writel(reply_ci,
+ &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
+ atomic_sub(threshold_comps, &op_reply_q->pend_ios);
+ threshold_comps = 0;
+ }
} while (1);
writel(reply_ci,
&mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
op_reply_q->ci = reply_ci;
op_reply_q->ephase = exp_phase;
-
+ atomic_sub(threshold_comps, &op_reply_q->pend_ios);
atomic_dec(&op_reply_q->in_use);
return num_op_reply;
}
@@ -2742,8 +2760,8 @@ void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
snprintf(mrioc->watchdog_work_q_name,
sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
mrioc->id);
- mrioc->watchdog_work_q =
- create_singlethread_workqueue(mrioc->watchdog_work_q_name);
+ mrioc->watchdog_work_q = alloc_ordered_workqueue(
+ "%s", WQ_MEM_RECLAIM, mrioc->watchdog_work_q_name);
if (!mrioc->watchdog_work_q) {
ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
return;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 616894571c6a..5f2f67acf8bf 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -5317,10 +5317,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
else
scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
- snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
- "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
- mrioc->fwevt_worker_name, 0);
+ "%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id);
if (!mrioc->fwevt_worker_thread) {
ioc_err(mrioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index b785a7e88b49..9a24f7776d64 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -846,8 +846,8 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
snprintf(ioc->fault_reset_work_q_name,
sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
ioc->driver_name, ioc->id);
- ioc->fault_reset_work_q =
- create_singlethread_workqueue(ioc->fault_reset_work_q_name);
+ ioc->fault_reset_work_q = alloc_ordered_workqueue(
+ "%s", WQ_MEM_RECLAIM, ioc->fault_reset_work_q_name);
if (!ioc->fault_reset_work_q) {
ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index fe1e96fda284..eceb5eeb4651 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1162,8 +1162,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @fault_reset_work_q_name: fw fault work queue
* @fault_reset_work_q: ""
* @fault_reset_work: ""
- * @firmware_event_name: fw event work queue
- * @firmware_event_thread: ""
+ * @firmware_event_thread: fw event work queue
* @fw_event_lock:
* @fw_event_list: list of fw events
* @current_evet: current processing firmware event
@@ -1351,7 +1350,6 @@ struct MPT3SAS_ADAPTER {
struct delayed_work fault_reset_work;
/* fw event handler */
- char firmware_event_name[20];
struct workqueue_struct *firmware_event_thread;
spinlock_t fw_event_lock;
struct list_head fw_event_list;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 97c2472cd434..728cced42b0e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -12301,10 +12301,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
/* event thread */
- snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
- "fw_event_%s%d", ioc->driver_name, ioc->id);
ioc->firmware_event_thread = alloc_ordered_workqueue(
- ioc->firmware_event_name, 0);
+ "fw_event_%s%d", 0, ioc->driver_name, ioc->id);
if (!ioc->firmware_event_thread) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index f684eb5e0489..bfc2b835e612 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -112,9 +112,8 @@ static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
return false;
}
- snprintf(cb->work_q_name, sizeof(cb->work_q_name),
- "myrb_wq_%d", cb->host->host_no);
- cb->work_q = create_singlethread_workqueue(cb->work_q_name);
+ cb->work_q = alloc_ordered_workqueue("myrb_wq_%d", WQ_MEM_RECLAIM,
+ cb->host->host_no);
if (!cb->work_q) {
dma_pool_destroy(cb->dcdb_pool);
cb->dcdb_pool = NULL;
diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h
index fb8eacfceee8..78dc4136fb10 100644
--- a/drivers/scsi/myrb.h
+++ b/drivers/scsi/myrb.h
@@ -712,7 +712,6 @@ struct myrb_hba {
struct Scsi_Host *host;
struct workqueue_struct *work_q;
- char work_q_name[20];
struct delayed_work monitor_work;
unsigned long primary_monitor_time;
unsigned long secondary_monitor_time;
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index e824be9d9bbb..3392feb15cb4 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -2206,9 +2206,8 @@ static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
return false;
}
- snprintf(cs->work_q_name, sizeof(cs->work_q_name),
- "myrs_wq_%d", shost->host_no);
- cs->work_q = create_singlethread_workqueue(cs->work_q_name);
+ cs->work_q = alloc_ordered_workqueue("myrs_wq_%d", WQ_MEM_RECLAIM,
+ shost->host_no);
if (!cs->work_q) {
dma_pool_destroy(cs->dcdb_pool);
cs->dcdb_pool = NULL;
diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h
index 9f6696d0ddd5..e1d6b123de7b 100644
--- a/drivers/scsi/myrs.h
+++ b/drivers/scsi/myrs.h
@@ -904,7 +904,6 @@ struct myrs_hba {
bool disable_enc_msg;
struct workqueue_struct *work_q;
- char work_q_name[20];
struct delayed_work monitor_work;
unsigned long primary_monitor_time;
unsigned long secondary_monitor_time;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index a2a084c8075e..72a4c6e3d0c8 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4009,7 +4009,7 @@ static void pmcraid_tasklet_function(unsigned long instance)
* This routine un-registers registered interrupt handler and
* also frees irqs/vectors.
*
- * Retun Value
+ * Return Value
* None
*/
static
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 4813087e58a1..cf13148ba281 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3372,9 +3372,8 @@ retry_probe:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
qedf->io_mempool);
- sprintf(host_buf, "qedf_%u_link",
- qedf->lport->host->host_no);
- qedf->link_update_wq = create_workqueue(host_buf);
+ qedf->link_update_wq = alloc_workqueue("qedf_%u_link", WQ_MEM_RECLAIM,
+ 1, qedf->lport->host->host_no);
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
@@ -3584,9 +3583,8 @@ retry_probe:
ether_addr_copy(params.ll2_mac_address, qedf->mac);
/* Start LL2 processing thread */
- snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no);
- qedf->ll2_recv_wq =
- create_workqueue(host_buf);
+ qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2", WQ_MEM_RECLAIM, 1,
+ host->host_no);
if (!qedf->ll2_recv_wq) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
rc = -ENOMEM;
@@ -3627,9 +3625,8 @@ retry_probe:
}
}
- sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
- qedf->timer_work_queue =
- create_workqueue(host_buf);
+ qedf->timer_work_queue = alloc_workqueue("qedf_%u_timer",
+ WQ_MEM_RECLAIM, 1, qedf->lport->host->host_no);
if (!qedf->timer_work_queue) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
"workqueue.\n");
@@ -3641,7 +3638,8 @@ retry_probe:
if (mode != QEDF_MODE_RECOVERY) {
sprintf(host_buf, "qedf_%u_dpc",
qedf->lport->host->host_no);
- qedf->dpc_wq = create_workqueue(host_buf);
+ qedf->dpc_wq =
+ alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, host_buf);
}
INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
@@ -4182,7 +4180,7 @@ static int __init qedf_init(void)
goto err3;
}
- qedf_io_wq = create_workqueue("qedf_io_wq");
+ qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, "qedf_io_wq");
if (!qedf_io_wq) {
QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
goto err4;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cd0180b1f5b9..c5aec26019d6 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2767,7 +2767,8 @@ retry_probe:
}
sprintf(host_buf, "host_%d", qedi->shost->host_no);
- qedi->tmf_thread = create_singlethread_workqueue(host_buf);
+ qedi->tmf_thread =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, host_buf);
if (!qedi->tmf_thread) {
QEDI_ERR(&qedi->dbg_ctx,
"Unable to start tmf thread!\n");
@@ -2775,8 +2776,9 @@ retry_probe:
goto free_cid_que;
}
- sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no);
- qedi->offload_thread = create_workqueue(host_buf);
+ qedi->offload_thread = alloc_workqueue("qedi_ofld%d",
+ WQ_MEM_RECLAIM,
+ 1, qedi->shost->host_no);
if (!qedi->offload_thread) {
QEDI_ERR(&qedi->dbg_ctx,
"Unable to start offload thread!\n");
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 7cf998e3cc68..15066c112817 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2621,7 +2621,6 @@ typedef struct fc_port {
struct kref sess_kref;
struct qla_tgt *tgt;
unsigned long expires;
- struct list_head del_list_entry;
struct work_struct free_work;
struct work_struct reg_work;
uint64_t jiffies_at_registration;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index bc3b2aea3f8b..7f980e6141c2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3501,11 +3501,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
- ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
+ ha->dpc_lp_wq =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, wq_name);
INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
- ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
+ ha->dpc_hp_wq =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, wq_name);
INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
INIT_WORK(&ha->idc_state_handler,
qla83xx_idc_state_handler_work);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 17cccd14765f..d91f54a6e752 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -8806,7 +8806,7 @@ skip_retry_init:
DEBUG2(printk("scsi: %s: Starting kernel thread for "
"qla4xxx_dpc\n", __func__));
sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
- ha->dpc_thread = create_singlethread_workqueue(buf);
+ ha->dpc_thread = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, buf);
if (!ha->dpc_thread) {
ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
ret = -ENODEV;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3958a6d14bf4..0561b318dade 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1163,7 +1163,6 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
if (blk_integrity_rq(rq)) {
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
- int ivecs;
if (WARN_ON_ONCE(!prot_sdb)) {
/*
@@ -1175,20 +1174,15 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
goto out_free_sgtables;
}
- ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
-
- if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
+ if (sg_alloc_table_chained(&prot_sdb->table,
+ rq->nr_integrity_segments,
prot_sdb->table.sgl,
SCSI_INLINE_PROT_SG_CNT)) {
ret = BLK_STS_RESOURCE;
goto out_free_sgtables;
}
- count = blk_rq_map_integrity_sg(rq->q, rq->bio,
- prot_sdb->table.sgl);
- BUG_ON(count > ivecs);
- BUG_ON(count > queue_max_integrity_segments(rq->q));
-
+ count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl);
cmd->prot_sdb = prot_sdb;
cmd->prot_sdb->table.nents = count;
}
@@ -1988,8 +1982,15 @@ void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim)
if (shost->no_highmem)
lim->features |= BLK_FEAT_BOUNCE_HIGH;
- dma_set_seg_boundary(dev, shost->dma_boundary);
- dma_set_max_seg_size(dev, shost->max_segment_size);
+ /*
+ * Propagate the DMA formation properties to the dma-mapping layer as
+ * a courtesy service to the LLDDs. This needs to check that the buses
+ * actually support the DMA API first, though.
+ */
+ if (dev->dma_parms) {
+ dma_set_seg_boundary(dev, shost->dma_boundary);
+ dma_set_max_seg_size(dev, shost->max_segment_size);
+ }
}
EXPORT_SYMBOL_GPL(scsi_init_limits);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 7d088b8da075..62ea7e44460e 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -441,18 +441,13 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
fc_host->next_vport_number = 0;
fc_host->npiv_vports_inuse = 0;
- snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name),
- "fc_wq_%d", shost->host_no);
- fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name);
+ fc_host->work_q = alloc_workqueue("fc_wq_%d", 0, 0, shost->host_no);
if (!fc_host->work_q)
return -ENOMEM;
fc_host->dev_loss_tmo = fc_dev_loss_tmo;
- snprintf(fc_host->devloss_work_q_name,
- sizeof(fc_host->devloss_work_q_name),
- "fc_dl_%d", shost->host_no);
- fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0,
- fc_host->devloss_work_q_name);
+ fc_host->devloss_work_q = alloc_workqueue("fc_dl_%d", 0, 0,
+ shost->host_no);
if (!fc_host->devloss_work_q) {
destroy_workqueue(fc_host->work_q);
fc_host->work_q = NULL;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9db86943d04c..76f488ef6a7e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1382,7 +1382,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
protect | fua, dld);
- } else if (rq->cmd_flags & REQ_ATOMIC && write) {
+ } else if (rq->cmd_flags & REQ_ATOMIC) {
ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks,
sdkp->use_atomic_write_boundary,
protect | fua);
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index cdedc271857a..fae6db20a6e9 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -505,7 +505,7 @@ struct pqi_vendor_general_request {
__le64 buffer_address;
__le32 buffer_length;
u8 reserved[40];
- } ofa_memory_allocation;
+ } host_memory_allocation;
} data;
};
@@ -517,21 +517,30 @@ struct pqi_vendor_general_response {
u8 reserved[2];
};
-#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
-#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1
+#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
+#define PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE 1
+#define PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE 2
#define PQI_OFA_VERSION 1
#define PQI_OFA_SIGNATURE "OFA_QRM"
-#define PQI_OFA_MAX_SG_DESCRIPTORS 64
+#define PQI_CTRL_LOG_VERSION 1
+#define PQI_CTRL_LOG_SIGNATURE "FW_DATA"
+#define PQI_HOST_MAX_SG_DESCRIPTORS 64
-struct pqi_ofa_memory {
- __le64 signature; /* "OFA_QRM" */
+struct pqi_host_memory {
+ __le64 signature; /* "OFA_QRM", "FW_DATA", etc. */
__le16 version; /* version of this struct (1 = 1st version) */
u8 reserved[62];
__le32 bytes_allocated; /* total allocated memory in bytes */
__le16 num_memory_descriptors;
u8 reserved1[2];
- struct pqi_sg_descriptor sg_descriptor[PQI_OFA_MAX_SG_DESCRIPTORS];
+ struct pqi_sg_descriptor sg_descriptor[PQI_HOST_MAX_SG_DESCRIPTORS];
+};
+
+struct pqi_host_memory_descriptor {
+ struct pqi_host_memory *host_memory;
+ dma_addr_t host_memory_dma_handle;
+ void **host_chunk_virt_address;
};
struct pqi_aio_error_info {
@@ -867,7 +876,8 @@ struct pqi_config_table_firmware_features {
#define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17
#define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18
#define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21
-#define PQI_FIRMWARE_FEATURE_MAXIMUM 21
+#define PQI_FIRMWARE_FEATURE_CTRL_LOGGING 22
+#define PQI_FIRMWARE_FEATURE_MAXIMUM 22
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
@@ -1096,6 +1106,11 @@ struct pqi_tmf_work {
u8 scsi_opcode;
};
+struct pqi_raid_io_stats {
+ u64 raid_bypass_cnt;
+ u64 write_stream_cnt;
+};
+
struct pqi_scsi_dev {
int devtype; /* as reported by INQUIRY command */
u8 device_type; /* as reported by */
@@ -1158,7 +1173,7 @@ struct pqi_scsi_dev {
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
- unsigned int raid_bypass_cnt;
+ struct pqi_raid_io_stats __percpu *raid_io_stats;
struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE];
};
@@ -1357,6 +1372,7 @@ struct pqi_ctrl_info {
u8 firmware_triage_supported : 1;
u8 rpl_extended_format_4_5_supported : 1;
u8 multi_lun_device_supported : 1;
+ u8 ctrl_logging_supported : 1;
u8 enable_r1_writes : 1;
u8 enable_r5_writes : 1;
u8 enable_r6_writes : 1;
@@ -1398,13 +1414,12 @@ struct pqi_ctrl_info {
wait_queue_head_t block_requests_wait;
struct mutex ofa_mutex;
- struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
- dma_addr_t pqi_ofa_mem_dma_handle;
- void **pqi_ofa_chunk_virt_addr;
struct work_struct ofa_memory_alloc_work;
struct work_struct ofa_quiesce_work;
u32 ofa_bytes_requested;
u16 ofa_cancel_reason;
+ struct pqi_host_memory_descriptor ofa_memory;
+ struct pqi_host_memory_descriptor ctrl_log_memory;
enum pqi_ctrl_removal_state ctrl_removal_state;
};
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 24c7cb285dca..7fd5a8c813dc 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.26-030"
+#define DRIVER_VERSION "2.1.30-031"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 26
-#define DRIVER_REVISION 30
+#define DRIVER_RELEASE 30
+#define DRIVER_REVISION 31
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -92,9 +92,9 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
-static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
-static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
+static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size);
+static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor);
+static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
@@ -1508,6 +1508,12 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto error;
+ device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats);
+ if (!device->raid_io_stats) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
device->raid_map = raid_map;
return 0;
@@ -2099,6 +2105,10 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
/* To prevent this from being freed later. */
new_device->raid_map = NULL;
}
+ if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) {
+ existing_device->raid_io_stats = new_device->raid_io_stats;
+ new_device->raid_io_stats = NULL;
+ }
existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
}
@@ -2121,6 +2131,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
static inline void pqi_free_device(struct pqi_scsi_dev *device)
{
if (device) {
+ free_percpu(device->raid_io_stats);
kfree(device->raid_map);
kfree(device);
}
@@ -2292,17 +2303,23 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
* queue depth, device size.
*/
list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ /*
+ * Check for queue depth change.
+ */
if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
device->advertised_queue_depth = device->queue_depth;
scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
- if (pqi_volume_rescan_needed(device)) {
- device->rescan = false;
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- scsi_rescan_device(device->sdev);
- } else {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- }
+ }
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ /*
+ * Check for changes in the device, such as size.
+ */
+ if (pqi_volume_rescan_needed(device)) {
+ device->rescan = false;
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ scsi_rescan_device(device->sdev);
+ } else {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
}
@@ -2354,14 +2371,6 @@ static inline void pqi_mask_device(u8 *scsi3addr)
scsi3addr[3] |= 0xc0;
}
-static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
-{
- if (pqi_is_logical_device(device))
- return false;
-
- return (device->path_map & (device->path_map - 1)) != 0;
-}
-
static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
{
return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
@@ -3244,6 +3253,20 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
sense_data_length);
}
+ if (pqi_cmd_priv(scmd)->this_residual &&
+ !pqi_is_logical_device(scmd->device->hostdata) &&
+ scsi_status == SAM_STAT_CHECK_CONDITION &&
+ host_byte == DID_OK &&
+ sense_data_length &&
+ scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) &&
+ sshdr.sense_key == ILLEGAL_REQUEST &&
+ sshdr.asc == 0x26 &&
+ sshdr.ascq == 0x0) {
+ host_byte = DID_NO_CONNECT;
+ pqi_take_device_offline(scmd->device, "AIO");
+ scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1);
+ }
+
scmd->result = scsi_status;
set_host_byte(scmd, host_byte);
}
@@ -3258,14 +3281,12 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
int residual_count;
int xfer_count;
bool device_offline;
- struct pqi_scsi_dev *device;
scmd = io_request->scmd;
error_info = io_request->error_info;
host_byte = DID_OK;
sense_data_length = 0;
device_offline = false;
- device = scmd->device->hostdata;
switch (error_info->service_response) {
case PQI_AIO_SERV_RESPONSE_COMPLETE:
@@ -3290,14 +3311,8 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
break;
case PQI_AIO_STATUS_AIO_PATH_DISABLED:
pqi_aio_path_disabled(io_request);
- if (pqi_is_multipath_device(device)) {
- pqi_device_remove_start(device);
- host_byte = DID_NO_CONNECT;
- scsi_status = SAM_STAT_CHECK_CONDITION;
- } else {
- scsi_status = SAM_STAT_GOOD;
- io_request->status = -EAGAIN;
- }
+ scsi_status = SAM_STAT_GOOD;
+ io_request->status = -EAGAIN;
break;
case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
case PQI_AIO_STATUS_INVALID_DEVICE:
@@ -3625,7 +3640,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
ctrl_info->pqi_mode_enabled = false;
pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
- pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info);
dev_info(&ctrl_info->pci_dev->dev,
"Online Firmware Activation: %s\n",
@@ -3636,7 +3651,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
"Online Firmware Activation ABORTED\n");
if (ctrl_info->soft_reset_handshake_supported)
pqi_clear_soft_reset_status(ctrl_info);
- pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info);
pqi_ofa_ctrl_unquiesce(ctrl_info);
break;
@@ -3646,7 +3661,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
dev_err(&ctrl_info->pci_dev->dev,
"unexpected Online Firmware Activation reset status: 0x%x\n",
reset_status);
- pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info);
pqi_ofa_ctrl_unquiesce(ctrl_info);
pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
@@ -3661,8 +3676,8 @@ static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
pqi_ctrl_ofa_start(ctrl_info);
- pqi_ofa_setup_host_buffer(ctrl_info);
- pqi_ofa_host_memory_update(ctrl_info);
+ pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested);
+ pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE);
}
static void pqi_ofa_quiesce_worker(struct work_struct *work)
@@ -3702,7 +3717,7 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
dev_info(&ctrl_info->pci_dev->dev,
"received Online Firmware Activation cancel request: reason: %u\n",
ctrl_info->ofa_cancel_reason);
- pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info);
break;
default:
@@ -5933,7 +5948,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
int rc;
struct pqi_scsi_dev *device;
struct pqi_stream_data *pqi_stream_data;
- struct pqi_scsi_dev_raid_map_data rmd;
+ struct pqi_scsi_dev_raid_map_data rmd = { 0 };
if (!ctrl_info->enable_stream_detection)
return false;
@@ -5975,6 +5990,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
pqi_stream_data->next_lba = rmd.first_block +
rmd.block_cnt;
pqi_stream_data->last_accessed = jiffies;
+ per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++;
return true;
}
@@ -6025,7 +6041,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
ctrl_info = shost_to_hba(shost);
- if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
+ if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) {
set_host_byte(scmd, DID_NO_CONNECT);
pqi_scsi_done(scmd);
return 0;
@@ -6053,7 +6069,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
- device->raid_bypass_cnt++;
+ per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++;
}
}
if (!raid_bypassed)
@@ -6190,14 +6206,12 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
continue;
scsi_device = scmd->device->hostdata;
- if (scsi_device != device)
- continue;
-
- if ((u8)scmd->device->lun != lun)
- continue;
list_del(&io_request->request_list_entry);
- set_host_byte(scmd, DID_RESET);
+ if (scsi_device == device && (u8)scmd->device->lun == lun)
+ set_host_byte(scmd, DID_RESET);
+ else
+ set_host_byte(scmd, DID_REQUEUE);
pqi_free_io_request(io_request);
scsi_dma_unmap(scmd);
pqi_scsi_done(scmd);
@@ -7350,7 +7364,8 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
- unsigned int raid_bypass_cnt;
+ u64 raid_bypass_cnt;
+ int cpu;
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -7366,11 +7381,17 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
return -ENODEV;
}
- raid_bypass_cnt = device->raid_bypass_cnt;
+ raid_bypass_cnt = 0;
+
+ if (device->raid_io_stats) {
+ for_each_online_cpu(cpu) {
+ raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt;
+ }
+ }
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
+ return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt);
}
static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
@@ -7452,6 +7473,43 @@ static ssize_t pqi_numa_node_show(struct device *dev,
return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
}
+static ssize_t pqi_write_stream_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ u64 write_stream_cnt;
+ int cpu;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ write_stream_cnt = 0;
+
+ if (device->raid_io_stats) {
+ for_each_online_cpu(cpu) {
+ write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt;
+ }
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt);
+}
+
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
@@ -7462,6 +7520,7 @@ static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
+static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL);
static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_lunid.attr,
@@ -7473,6 +7532,7 @@ static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_raid_bypass_cnt.attr,
&dev_attr_sas_ncq_prio_enable.attr,
&dev_attr_numa_node.attr,
+ &dev_attr_write_stream_cnt.attr,
NULL
};
@@ -7863,6 +7923,9 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
break;
+ case PQI_FIRMWARE_FEATURE_CTRL_LOGGING:
+ ctrl_info->ctrl_logging_supported = firmware_feature->enabled;
+ break;
}
pqi_firmware_feature_status(ctrl_info, firmware_feature);
@@ -7968,6 +8031,11 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
.feature_status = pqi_ctrl_update_feature_flags,
},
+ {
+ .feature_name = "Controller Data Logging",
+ .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
};
static void pqi_process_firmware_features(
@@ -8070,6 +8138,7 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
ctrl_info->firmware_triage_supported = false;
ctrl_info->rpl_extended_format_4_5_supported = false;
ctrl_info->multi_lun_device_supported = false;
+ ctrl_info->ctrl_logging_supported = false;
}
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
@@ -8210,6 +8279,9 @@ static void pqi_perform_lockup_action(void)
}
}
+#define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024)
+#define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS)
+
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
@@ -8221,6 +8293,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
}
+ if (sis_is_ctrl_logging_supported(ctrl_info)) {
+ sis_notify_kdump(ctrl_info);
+ rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
+ if (rc)
+ return rc;
+ }
sis_soft_reset(ctrl_info);
ssleep(PQI_POST_RESET_DELAY_SECS);
} else {
@@ -8402,6 +8480,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
+ if (ctrl_info->ctrl_logging_supported && !reset_devices) {
+ pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
+ pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
+ }
+
rc = pqi_get_ctrl_product_details(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -8586,8 +8669,22 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
return rc;
}
- if (pqi_ofa_in_progress(ctrl_info))
+ if (pqi_ofa_in_progress(ctrl_info)) {
pqi_ctrl_unblock_scan(ctrl_info);
+ if (ctrl_info->ctrl_logging_supported) {
+ if (!ctrl_info->ctrl_log_memory.host_memory)
+ pqi_host_setup_buffer(ctrl_info,
+ &ctrl_info->ctrl_log_memory,
+ PQI_CTRL_LOG_TOTAL_SIZE,
+ PQI_CTRL_LOG_MIN_SIZE);
+ pqi_host_memory_update(ctrl_info,
+ &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
+ } else {
+ if (ctrl_info->ctrl_log_memory.host_memory)
+ pqi_host_free_buffer(ctrl_info,
+ &ctrl_info->ctrl_log_memory);
+ }
+ }
pqi_scan_scsi_devices(ctrl_info);
@@ -8777,6 +8874,7 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
pqi_fail_all_outstanding_requests(ctrl_info);
ctrl_info->pqi_mode_enabled = false;
}
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory);
pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info);
@@ -8802,177 +8900,187 @@ static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
pqi_ctrl_unblock_scan(ctrl_info);
}
-static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
+{
+ ssleep(delay_secs);
+
+ return pqi_ctrl_init_resume(ctrl_info);
+}
+
+static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u32 total_size, u32 chunk_size)
{
int i;
u32 sg_count;
struct device *dev;
- struct pqi_ofa_memory *ofap;
+ struct pqi_host_memory *host_memory;
struct pqi_sg_descriptor *mem_descriptor;
dma_addr_t dma_handle;
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
-
sg_count = DIV_ROUND_UP(total_size, chunk_size);
- if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
+ if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
goto out;
- ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
- if (!ctrl_info->pqi_ofa_chunk_virt_addr)
+ host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL);
+ if (!host_memory_descriptor->host_chunk_virt_address)
goto out;
dev = &ctrl_info->pci_dev->dev;
+ host_memory = host_memory_descriptor->host_memory;
for (i = 0; i < sg_count; i++) {
- ctrl_info->pqi_ofa_chunk_virt_addr[i] =
- dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
- if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
+ host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
+ if (!host_memory_descriptor->host_chunk_virt_address[i])
goto out_free_chunks;
- mem_descriptor = &ofap->sg_descriptor[i];
+ mem_descriptor = &host_memory->sg_descriptor[i];
put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
put_unaligned_le32(chunk_size, &mem_descriptor->length);
}
put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
- put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
- put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
+ put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors);
+ put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated);
return 0;
out_free_chunks:
while (--i >= 0) {
- mem_descriptor = &ofap->sg_descriptor[i];
+ mem_descriptor = &host_memory->sg_descriptor[i];
dma_free_coherent(dev, chunk_size,
- ctrl_info->pqi_ofa_chunk_virt_addr[i],
+ host_memory_descriptor->host_chunk_virt_address[i],
get_unaligned_le64(&mem_descriptor->address));
}
- kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
-
+ kfree(host_memory_descriptor->host_chunk_virt_address);
out:
return -ENOMEM;
}
-static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
+static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u32 total_required_size, u32 min_required_size)
{
- u32 total_size;
u32 chunk_size;
u32 min_chunk_size;
- if (ctrl_info->ofa_bytes_requested == 0)
+ if (total_required_size == 0 || min_required_size == 0)
return 0;
- total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
- min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
+ total_required_size = PAGE_ALIGN(total_required_size);
+ min_required_size = PAGE_ALIGN(min_required_size);
+ min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS);
min_chunk_size = PAGE_ALIGN(min_chunk_size);
- for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
- if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
- return 0;
- chunk_size /= 2;
- chunk_size = PAGE_ALIGN(chunk_size);
+ while (total_required_size >= min_required_size) {
+ for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) {
+ if (pqi_host_alloc_mem(ctrl_info,
+ host_memory_descriptor, total_required_size,
+ chunk_size) == 0)
+ return 0;
+ chunk_size /= 2;
+ chunk_size = PAGE_ALIGN(chunk_size);
+ }
+ total_required_size /= 2;
+ total_required_size = PAGE_ALIGN(total_required_size);
}
return -ENOMEM;
}
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
+static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u32 total_size, u32 min_size)
{
struct device *dev;
- struct pqi_ofa_memory *ofap;
+ struct pqi_host_memory *host_memory;
dev = &ctrl_info->pci_dev->dev;
- ofap = dma_alloc_coherent(dev, sizeof(*ofap),
- &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
- if (!ofap)
+ host_memory = dma_alloc_coherent(dev, sizeof(*host_memory),
+ &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL);
+ if (!host_memory)
return;
- ctrl_info->pqi_ofa_mem_virt_addr = ofap;
+ host_memory_descriptor->host_memory = host_memory;
- if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
- dev_err(dev,
- "failed to allocate host buffer for Online Firmware Activation\n");
- dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
- ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+ if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor,
+ total_size, min_size) < 0) {
+ dev_err(dev, "failed to allocate firmware usable host buffer\n");
+ dma_free_coherent(dev, sizeof(*host_memory), host_memory,
+ host_memory_descriptor->host_memory_dma_handle);
+ host_memory_descriptor->host_memory = NULL;
return;
}
-
- put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
- memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
}
-static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
+static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor)
{
unsigned int i;
struct device *dev;
- struct pqi_ofa_memory *ofap;
+ struct pqi_host_memory *host_memory;
struct pqi_sg_descriptor *mem_descriptor;
unsigned int num_memory_descriptors;
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
- if (!ofap)
+ host_memory = host_memory_descriptor->host_memory;
+ if (!host_memory)
return;
dev = &ctrl_info->pci_dev->dev;
- if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
+ if (get_unaligned_le32(&host_memory->bytes_allocated) == 0)
goto out;
- mem_descriptor = ofap->sg_descriptor;
- num_memory_descriptors =
- get_unaligned_le16(&ofap->num_memory_descriptors);
+ mem_descriptor = host_memory->sg_descriptor;
+ num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors);
for (i = 0; i < num_memory_descriptors; i++) {
dma_free_coherent(dev,
get_unaligned_le32(&mem_descriptor[i].length),
- ctrl_info->pqi_ofa_chunk_virt_addr[i],
+ host_memory_descriptor->host_chunk_virt_address[i],
get_unaligned_le64(&mem_descriptor[i].address));
}
- kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
+ kfree(host_memory_descriptor->host_chunk_virt_address);
out:
- dma_free_coherent(dev, sizeof(*ofap), ofap,
- ctrl_info->pqi_ofa_mem_dma_handle);
- ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+ dma_free_coherent(dev, sizeof(*host_memory), host_memory,
+ host_memory_descriptor->host_memory_dma_handle);
+ host_memory_descriptor->host_memory = NULL;
}
-static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
+static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u16 function_code)
{
u32 buffer_length;
struct pqi_vendor_general_request request;
- struct pqi_ofa_memory *ofap;
+ struct pqi_host_memory *host_memory;
memset(&request, 0, sizeof(request));
request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
- put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
- &request.header.iu_length);
- put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
- &request.function_code);
-
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
-
- if (ofap) {
- buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
- get_unaligned_le16(&ofap->num_memory_descriptors) *
- sizeof(struct pqi_sg_descriptor);
-
- put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
- &request.data.ofa_memory_allocation.buffer_address);
- put_unaligned_le32(buffer_length,
- &request.data.ofa_memory_allocation.buffer_length);
+ put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
+ put_unaligned_le16(function_code, &request.function_code);
+
+ host_memory = host_memory_descriptor->host_memory;
+
+ if (host_memory) {
+ buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor);
+ put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address);
+ put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length);
+
+ if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) {
+ put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version);
+ memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature));
+ } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) {
+ put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version);
+ memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature));
+ }
}
return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
}
-static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
-{
- ssleep(delay_secs);
-
- return pqi_ctrl_init_resume(ctrl_info);
-}
-
static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
.data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
.status = SAM_STAT_CHECK_CONDITION,
@@ -9446,6 +9554,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x0462)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x1104)
},
{
@@ -9474,6 +9586,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1110)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8460)
},
{
@@ -9482,6 +9598,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x8462)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0xc460)
},
{
@@ -9590,6 +9710,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x00a1)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f3a, 0x0104)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x19e5, 0xd227)
},
{
@@ -10182,6 +10310,110 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02fe)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02ff)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x0300)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0045)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0046)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0047)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0048)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0051)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0052)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0053)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0054)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006d)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0070)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0071)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0072)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0086)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0087)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0088)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0089)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1000)
},
{
@@ -10266,6 +10498,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x00a3)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 673437c7152b..ca1df36b83f7 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -29,6 +29,7 @@
#define SIS_ENABLE_INTX 0x80
#define SIS_SOFT_RESET 0x100
#define SIS_CMD_READY 0x200
+#define SIS_NOTIFY_KDUMP 0x400
#define SIS_TRIGGER_SHUTDOWN 0x800000
#define SIS_PQI_RESET_QUIESCE 0x1000000
@@ -52,6 +53,8 @@
#define SIS_BASE_STRUCT_ALIGNMENT 16
#define SIS_CTRL_KERNEL_FW_TRIAGE 0x3
+#define SIS_CTRL_KERNEL_CTRL_LOGGING 0x4
+#define SIS_CTRL_KERNEL_CTRL_LOGGING_STATUS 0x18
#define SIS_CTRL_KERNEL_UP 0x80
#define SIS_CTRL_KERNEL_PANIC 0x100
#define SIS_CTRL_READY_TIMEOUT_SECS 180
@@ -65,6 +68,13 @@ enum sis_fw_triage_status {
FW_TRIAGE_COMPLETED
};
+enum sis_ctrl_logging_status {
+ CTRL_LOGGING_NOT_STARTED = 0,
+ CTRL_LOGGING_STARTED,
+ CTRL_LOGGING_COND_INVALID,
+ CTRL_LOGGING_COMPLETED
+};
+
#pragma pack(1)
/* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */
@@ -442,6 +452,21 @@ static inline enum sis_fw_triage_status
SIS_CTRL_KERNEL_FW_TRIAGE));
}
+bool sis_is_ctrl_logging_supported(struct pqi_ctrl_info *ctrl_info)
+{
+ return readl(&ctrl_info->registers->sis_firmware_status) & SIS_CTRL_KERNEL_CTRL_LOGGING;
+}
+
+void sis_notify_kdump(struct pqi_ctrl_info *ctrl_info)
+{
+ sis_set_doorbell_bit(ctrl_info, SIS_NOTIFY_KDUMP);
+}
+
+static inline enum sis_ctrl_logging_status sis_read_ctrl_logging_status(struct pqi_ctrl_info *ctrl_info)
+{
+ return ((enum sis_ctrl_logging_status)((readl(&ctrl_info->registers->sis_firmware_status) & SIS_CTRL_KERNEL_CTRL_LOGGING_STATUS) >> 3));
+}
+
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
{
writel(SIS_SOFT_RESET,
@@ -484,6 +509,41 @@ int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info)
return rc;
}
+#define SIS_CTRL_LOGGING_STATUS_TIMEOUT_SECS 180
+#define SIS_CTRL_LOGGING_STATUS_POLL_INTERVAL_SECS 1
+
+int sis_wait_for_ctrl_logging_completion(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ enum sis_ctrl_logging_status status;
+ unsigned long timeout;
+
+ timeout = (SIS_CTRL_LOGGING_STATUS_TIMEOUT_SECS * HZ) + jiffies;
+ while (1) {
+ status = sis_read_ctrl_logging_status(ctrl_info);
+ if (status == CTRL_LOGGING_COND_INVALID) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "controller data logging condition invalid\n");
+ rc = -EINVAL;
+ break;
+ } else if (status == CTRL_LOGGING_COMPLETED) {
+ rc = 0;
+ break;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "timed out waiting for controller data logging status\n");
+ rc = -ETIMEDOUT;
+ break;
+ }
+
+ ssleep(SIS_CTRL_LOGGING_STATUS_POLL_INTERVAL_SECS);
+ }
+
+ return rc;
+}
+
void sis_verify_structures(void)
{
BUILD_BUG_ON(offsetof(struct sis_base_struct,
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 0c97626d87d4..7e0eac3d07de 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -31,6 +31,9 @@ u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info);
int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info);
+bool sis_is_ctrl_logging_supported(struct pqi_ctrl_info *ctrl_info);
+void sis_notify_kdump(struct pqi_ctrl_info *ctrl_info);
+int sis_wait_for_ctrl_logging_completion(struct pqi_ctrl_info *ctrl_info);
extern unsigned int sis_ctrl_ready_timeout_secs;
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index cc824dcfe7da..9be3f0193145 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -300,9 +300,8 @@ snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
}
SNIC_BUG_ON(shost->work_q != NULL);
- snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d",
- shost->host_no);
- shost->work_q = create_singlethread_workqueue(shost->work_q_name);
+ shost->work_q = alloc_ordered_workqueue("scsi_wq_%d", WQ_MEM_RECLAIM,
+ shost->host_no);
if (!shost->work_q) {
SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
@@ -873,7 +872,7 @@ snic_global_data_init(void)
snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
len = sizeof(struct snic_host_req);
- cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
+ cachep = kmem_cache_create("snic_req_tm", len, SNIC_SG_DESC_ALIGN,
SLAB_HWCACHE_ALIGN, NULL);
if (!cachep) {
SNIC_ERR("Failed to create snic tm req slab\n");
@@ -884,7 +883,8 @@ snic_global_data_init(void)
snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
/* snic_event queue */
- snic_glob->event_q = create_singlethread_workqueue("snic_event_wq");
+ snic_glob->event_q =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "snic_event_wq");
if (!snic_glob->event_q) {
SNIC_ERR("snic event queue create failed\n");
ret = -ENOMEM;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 8ffb75be99bc..0e81125df8c7 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -334,7 +334,6 @@ struct st_hba {
struct st_ccb *wait_ccb;
__le32 *scratch;
- char work_q_name[20];
struct workqueue_struct *work_q;
struct work_struct reset_work;
wait_queue_head_t reset_waitq;
@@ -1795,9 +1794,8 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hba->pdev = pdev;
init_waitqueue_head(&hba->reset_waitq);
- snprintf(hba->work_q_name, sizeof(hba->work_q_name),
- "stex_wq_%d", host->host_no);
- hba->work_q = create_singlethread_workqueue(hba->work_q_name);
+ hba->work_q = alloc_ordered_workqueue("stex_wq_%d", WQ_MEM_RECLAIM,
+ host->host_no);
if (!hba->work_q) {
printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
pci_name(pdev));
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index f51702893306..fffc0fa52594 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -304,7 +304,7 @@ static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata,
sun3_udc_write(UDC_INT_ENABLE, UDC_CSR);
#endif
- return count;
+ return count;
}
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c4fea077265e..32242d86cf5b 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1137,7 +1137,8 @@ static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
snprintf(name, sizeof(name),
"vmw_pvscsi_wq_%u", adapter->host->host_no);
- adapter->workqueue = create_singlethread_workqueue(name);
+ adapter->workqueue =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name);
if (!adapter->workqueue) {
printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
return 0;
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index fb2bd31387d0..56f476a12847 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -32,5 +32,5 @@ obj-y += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
-obj-$(CONFIG_PLAT_VERSATILE) += versatile/
+obj-y += versatile/
obj-y += xilinx/
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 8809a948201a..7549f1644e5e 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -41,6 +41,11 @@ static const struct meson_gx_soc_id {
{ "G12B", 0x29 },
{ "SM1", 0x2b },
{ "A1", 0x2c },
+ { "T7", 0x36 },
+ { "S4", 0x37 },
+ { "A5", 0x3c },
+ { "C3", 0x3d },
+ { "A4", 0x40 },
};
static const struct meson_gx_package_id {
@@ -76,6 +81,11 @@ static const struct meson_gx_package_id {
{ "S905X3", 0x2b, 0x10, 0x3f },
{ "S905D3", 0x2b, 0x30, 0x3f },
{ "A113L", 0x2c, 0x0, 0xf8 },
+ { "S805X2", 0x37, 0x2, 0xf },
+ { "C308L", 0x3d, 0x1, 0xf },
+ { "A311D2", 0x36, 0x1, 0xf },
+ { "A113X2", 0x3c, 0x1, 0xf },
+ { "A113L2", 0x40, 0x1, 0xf },
};
static inline unsigned int socinfo_to_major(u32 socinfo)
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index cc9a3e107479..2a42b28931c9 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -101,6 +101,29 @@ static const struct at91_soc socs[] __initconst = {
AT91_CIDR_VERSION_MASK, SAM9X60_D6K_EXID_MATCH,
"sam9x60 8MiB SDRAM SiP", "sam9x60"),
#endif
+#ifdef CONFIG_SOC_SAM9X7
+ AT91_SOC(SAM9X7_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X70_EXID_MATCH,
+ "sam9x70", "sam9x7"),
+ AT91_SOC(SAM9X7_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X72_EXID_MATCH,
+ "sam9x72", "sam9x7"),
+ AT91_SOC(SAM9X7_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH,
+ "sam9x75", "sam9x7"),
+ AT91_SOC(SAM9X7_CIDR_MATCH, SAM9X75_D1M_EXID_MATCH,
+ AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH,
+ "sam9x75 16MB DDR2 SiP", "sam9x7"),
+ AT91_SOC(SAM9X7_CIDR_MATCH, SAM9X75_D5M_EXID_MATCH,
+ AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH,
+ "sam9x75 64MB DDR2 SiP", "sam9x7"),
+ AT91_SOC(SAM9X7_CIDR_MATCH, SAM9X75_D1G_EXID_MATCH,
+ AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH,
+ "sam9x75 125MB DDR3L SiP ", "sam9x7"),
+ AT91_SOC(SAM9X7_CIDR_MATCH, SAM9X75_D2G_EXID_MATCH,
+ AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH,
+ "sam9x75 250MB DDR3L SiP", "sam9x7"),
+#endif
#ifdef CONFIG_SOC_SAMA5
AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
AT91_CIDR_VERSION_MASK, SAMA5D21CU_EXID_MATCH,
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index 7a9f47ce85fb..2c78e54255f7 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -44,6 +44,7 @@ at91_soc_init(const struct at91_soc *socs);
#define AT91SAM9X5_CIDR_MATCH 0x019a05a0
#define AT91SAM9N12_CIDR_MATCH 0x019a07a0
#define SAM9X60_CIDR_MATCH 0x019b35a0
+#define SAM9X7_CIDR_MATCH 0x09750020
#define SAMA7G5_CIDR_MATCH 0x00162100
#define AT91SAM9M11_EXID_MATCH 0x00000001
@@ -66,6 +67,14 @@ at91_soc_init(const struct at91_soc *socs);
#define SAM9X60_D1G_EXID_MATCH 0x00000010
#define SAM9X60_D6K_EXID_MATCH 0x00000011
+#define SAM9X70_EXID_MATCH 0x00000005
+#define SAM9X72_EXID_MATCH 0x00000004
+#define SAM9X75_D1G_EXID_MATCH 0x00000018
+#define SAM9X75_D2G_EXID_MATCH 0x00000020
+#define SAM9X75_D1M_EXID_MATCH 0x00000003
+#define SAM9X75_D5M_EXID_MATCH 0x00000010
+#define SAM9X75_EXID_MATCH 0x00000000
+
#define SAMA7G51_EXID_MATCH 0x3
#define SAMA7G52_EXID_MATCH 0x2
#define SAMA7G53_EXID_MATCH 0x1
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 392e54f14dbe..aa5348f4902f 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -791,8 +791,6 @@ static int fsl_qman_probe(struct platform_device *pdev)
* FQD memory MUST be zero'd by software
*/
zero_priv_mem(fqd_a, fqd_sz);
-#else
- WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
#endif
dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index e23b60618c1a..456ef5d5c199 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -48,9 +48,10 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
struct device *dev = pcfg->dev;
int ret;
- pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
- if (!pcfg->iommu_domain) {
+ pcfg->iommu_domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(pcfg->iommu_domain)) {
dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+ pcfg->iommu_domain = NULL;
goto no_iommu;
}
ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu);
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
index fa9ffbed0e92..eb03f42ab978 100644
--- a/drivers/soc/fsl/qe/Kconfig
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -17,7 +17,7 @@ config QUICC_ENGINE
config UCC_SLOW
bool
- default y if SERIAL_QE
+ default y if SERIAL_QE || (CPM_QMC && QUICC_ENGINE)
help
This option provides qe_lib support to UCC slow
protocols: UART, BISYNC, QMC
@@ -31,26 +31,27 @@ config UCC_FAST
config UCC
bool
- default y if UCC_FAST || UCC_SLOW
+ default y if UCC_FAST || UCC_SLOW || (CPM_TSA && QUICC_ENGINE)
config CPM_TSA
- tristate "CPM TSA support"
+ tristate "CPM/QE TSA support"
depends on OF && HAS_IOMEM
- depends on CPM1 || (CPM && COMPILE_TEST)
+ depends on CPM1 || QUICC_ENGINE || \
+ ((CPM || QUICC_ENGINE) && COMPILE_TEST)
help
- Freescale CPM Time Slot Assigner (TSA)
+ Freescale CPM/QE Time Slot Assigner (TSA)
controller.
This option enables support for this
controller
config CPM_QMC
- tristate "CPM QMC support"
+ tristate "CPM/QE QMC support"
depends on OF && HAS_IOMEM
- depends on CPM1 || (FSL_SOC && CPM && COMPILE_TEST)
+ depends on FSL_SOC
depends on CPM_TSA
help
- Freescale CPM QUICC Multichannel Controller
+ Freescale CPM/QE QUICC Multichannel Controller
(QMC)
This option enables support for this
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index a877347d37d3..02c29f5f86d3 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -13,6 +13,7 @@
* 2006 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
*/
+#include <linux/device.h>
#include <linux/genalloc.h>
#include <linux/init.h>
#include <linux/list.h>
@@ -187,6 +188,49 @@ void cpm_muram_free(s32 offset)
}
EXPORT_SYMBOL(cpm_muram_free);
+static void devm_cpm_muram_release(struct device *dev, void *res)
+{
+ s32 *info = res;
+
+ cpm_muram_free(*info);
+}
+
+/**
+ * devm_cpm_muram_alloc - Resource-managed cpm_muram_alloc
+ * @dev: Device to allocate memory for
+ * @size: number of bytes to allocate
+ * @align: requested alignment, in bytes
+ *
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure as cpm_muram_alloc() does.
+ * Use cpm_muram_addr() to get the virtual address of the area.
+ *
+ * Compare against cpm_muram_alloc(), the memory allocated by this
+ * resource-managed version is automatically freed on driver detach and so,
+ * cpm_muram_free() must not be called to release the allocated memory.
+ */
+s32 devm_cpm_muram_alloc(struct device *dev, unsigned long size,
+ unsigned long align)
+{
+ s32 info;
+ s32 *dr;
+
+ dr = devres_alloc(devm_cpm_muram_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ info = cpm_muram_alloc(size, align);
+ if (info >= 0) {
+ *dr = info;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return info;
+}
+EXPORT_SYMBOL(devm_cpm_muram_alloc);
+
/*
* cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
* @offset: offset of allocation start address
@@ -212,6 +256,42 @@ s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
EXPORT_SYMBOL(cpm_muram_alloc_fixed);
/**
+ * devm_cpm_muram_alloc_fixed - Resource-managed cpm_muram_alloc_fixed
+ * @dev: Device to allocate memory for
+ * @offset: offset of allocation start address
+ * @size: number of bytes to allocate
+ *
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure as cpm_muram_alloc_fixed() does.
+ * Use cpm_muram_addr() to get the virtual address of the area.
+ *
+ * Compare against cpm_muram_alloc_fixed(), the memory allocated by this
+ * resource-managed version is automatically freed on driver detach and so,
+ * cpm_muram_free() must not be called to release the allocated memory.
+ */
+s32 devm_cpm_muram_alloc_fixed(struct device *dev, unsigned long offset,
+ unsigned long size)
+{
+ s32 info;
+ s32 *dr;
+
+ dr = devres_alloc(devm_cpm_muram_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ info = cpm_muram_alloc_fixed(offset, size);
+ if (info >= 0) {
+ *dr = info;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return info;
+}
+EXPORT_SYMBOL(devm_cpm_muram_alloc_fixed);
+
+/**
* cpm_muram_addr - turn a muram offset into a virtual address
* @offset: muram offset to convert
*/
diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
index 76bb496305a0..3dffebb48b0d 100644
--- a/drivers/soc/fsl/qe/qmc.c
+++ b/drivers/soc/fsl/qe/qmc.c
@@ -8,7 +8,9 @@
*/
#include <soc/fsl/qe/qmc.h>
+#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
#include <linux/hdlc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -18,31 +20,41 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <soc/fsl/cpm.h>
+#include <soc/fsl/qe/ucc_slow.h>
+#include <soc/fsl/qe/qe.h>
#include <sysdev/fsl_soc.h>
#include "tsa.h"
-/* SCC general mode register high (32 bits) */
+/* SCC general mode register low (32 bits) (GUMR_L in QE) */
#define SCC_GSMRL 0x00
-#define SCC_GSMRL_ENR (1 << 5)
-#define SCC_GSMRL_ENT (1 << 4)
-#define SCC_GSMRL_MODE_QMC (0x0A << 0)
+#define SCC_GSMRL_ENR BIT(5)
+#define SCC_GSMRL_ENT BIT(4)
+#define SCC_GSMRL_MODE_MASK GENMASK(3, 0)
+#define SCC_CPM1_GSMRL_MODE_QMC FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x0A)
+#define SCC_QE_GSMRL_MODE_QMC FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x02)
-/* SCC general mode register low (32 bits) */
+/* SCC general mode register high (32 bits) (identical to GUMR_H in QE) */
#define SCC_GSMRH 0x04
-#define SCC_GSMRH_CTSS (1 << 7)
-#define SCC_GSMRH_CDS (1 << 8)
-#define SCC_GSMRH_CTSP (1 << 9)
-#define SCC_GSMRH_CDP (1 << 10)
-
-/* SCC event register (16 bits) */
+#define SCC_GSMRH_CTSS BIT(7)
+#define SCC_GSMRH_CDS BIT(8)
+#define SCC_GSMRH_CTSP BIT(9)
+#define SCC_GSMRH_CDP BIT(10)
+#define SCC_GSMRH_TTX BIT(11)
+#define SCC_GSMRH_TRX BIT(12)
+
+/* SCC event register (16 bits) (identical to UCCE in QE) */
#define SCC_SCCE 0x10
-#define SCC_SCCE_IQOV (1 << 3)
-#define SCC_SCCE_GINT (1 << 2)
-#define SCC_SCCE_GUN (1 << 1)
-#define SCC_SCCE_GOV (1 << 0)
+#define SCC_SCCE_IQOV BIT(3)
+#define SCC_SCCE_GINT BIT(2)
+#define SCC_SCCE_GUN BIT(1)
+#define SCC_SCCE_GOV BIT(0)
/* SCC mask register (16 bits) */
#define SCC_SCCM 0x14
+
+/* UCC Extended Mode Register (8 bits, QE only) */
+#define SCC_QE_UCC_GUEMR 0x90
+
/* Multichannel base pointer (32 bits) */
#define QMC_GBL_MCBASE 0x00
/* Multichannel controller state (16 bits) */
@@ -73,27 +85,42 @@
#define QMC_GBL_TSATTX 0x60
/* CRC constant (16 bits) */
#define QMC_GBL_C_MASK16 0xA0
+/* Rx framer base pointer (16 bits, QE only) */
+#define QMC_QE_GBL_RX_FRM_BASE 0xAC
+/* Tx framer base pointer (16 bits, QE only) */
+#define QMC_QE_GBL_TX_FRM_BASE 0xAE
+/* A reserved area (0xB0 -> 0xC3) that must be initialized to 0 (QE only) */
+#define QMC_QE_GBL_RSV_B0_START 0xB0
+#define QMC_QE_GBL_RSV_B0_SIZE 0x14
+/* QMC Global Channel specific base (32 bits, QE only) */
+#define QMC_QE_GBL_GCSBASE 0xC4
/* TSA entry (16bit entry in TSATRX and TSATTX) */
-#define QMC_TSA_VALID (1 << 15)
-#define QMC_TSA_WRAP (1 << 14)
-#define QMC_TSA_MASK (0x303F)
-#define QMC_TSA_CHANNEL(x) ((x) << 6)
+#define QMC_TSA_VALID BIT(15)
+#define QMC_TSA_WRAP BIT(14)
+#define QMC_TSA_MASK_MASKH GENMASK(13, 12)
+#define QMC_TSA_MASK_MASKL GENMASK(5, 0)
+#define QMC_TSA_MASK_8BIT (FIELD_PREP_CONST(QMC_TSA_MASK_MASKH, 0x3) | \
+ FIELD_PREP_CONST(QMC_TSA_MASK_MASKL, 0x3F))
+#define QMC_TSA_CHANNEL_MASK GENMASK(11, 6)
+#define QMC_TSA_CHANNEL(x) FIELD_PREP(QMC_TSA_CHANNEL_MASK, x)
/* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
#define QMC_SPE_TBASE 0x00
/* Channel mode register (16 bits) */
#define QMC_SPE_CHAMR 0x02
-#define QMC_SPE_CHAMR_MODE_HDLC (1 << 15)
-#define QMC_SPE_CHAMR_MODE_TRANSP ((0 << 15) | (1 << 13))
-#define QMC_SPE_CHAMR_ENT (1 << 12)
-#define QMC_SPE_CHAMR_POL (1 << 8)
-#define QMC_SPE_CHAMR_HDLC_IDLM (1 << 13)
-#define QMC_SPE_CHAMR_HDLC_CRC (1 << 7)
-#define QMC_SPE_CHAMR_HDLC_NOF (0x0f << 0)
-#define QMC_SPE_CHAMR_TRANSP_RD (1 << 14)
-#define QMC_SPE_CHAMR_TRANSP_SYNC (1 << 10)
+#define QMC_SPE_CHAMR_MODE_MASK GENMASK(15, 15)
+#define QMC_SPE_CHAMR_MODE_HDLC FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 1)
+#define QMC_SPE_CHAMR_MODE_TRANSP (FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 0) | BIT(13))
+#define QMC_SPE_CHAMR_ENT BIT(12)
+#define QMC_SPE_CHAMR_POL BIT(8)
+#define QMC_SPE_CHAMR_HDLC_IDLM BIT(13)
+#define QMC_SPE_CHAMR_HDLC_CRC BIT(7)
+#define QMC_SPE_CHAMR_HDLC_NOF_MASK GENMASK(3, 0)
+#define QMC_SPE_CHAMR_HDLC_NOF(x) FIELD_PREP(QMC_SPE_CHAMR_HDLC_NOF_MASK, x)
+#define QMC_SPE_CHAMR_TRANSP_RD BIT(14)
+#define QMC_SPE_CHAMR_TRANSP_SYNC BIT(10)
/* Tx internal state (32 bits) */
#define QMC_SPE_TSTATE 0x04
@@ -120,43 +147,47 @@
/* Transparent synchronization (16 bits) */
#define QMC_SPE_TRNSYNC 0x3C
-#define QMC_SPE_TRNSYNC_RX(x) ((x) << 8)
-#define QMC_SPE_TRNSYNC_TX(x) ((x) << 0)
+#define QMC_SPE_TRNSYNC_RX_MASK GENMASK(15, 8)
+#define QMC_SPE_TRNSYNC_RX(x) FIELD_PREP(QMC_SPE_TRNSYNC_RX_MASK, x)
+#define QMC_SPE_TRNSYNC_TX_MASK GENMASK(7, 0)
+#define QMC_SPE_TRNSYNC_TX(x) FIELD_PREP(QMC_SPE_TRNSYNC_TX_MASK, x)
/* Interrupt related registers bits */
-#define QMC_INT_V (1 << 15)
-#define QMC_INT_W (1 << 14)
-#define QMC_INT_NID (1 << 13)
-#define QMC_INT_IDL (1 << 12)
-#define QMC_INT_GET_CHANNEL(x) (((x) & 0x0FC0) >> 6)
-#define QMC_INT_MRF (1 << 5)
-#define QMC_INT_UN (1 << 4)
-#define QMC_INT_RXF (1 << 3)
-#define QMC_INT_BSY (1 << 2)
-#define QMC_INT_TXB (1 << 1)
-#define QMC_INT_RXB (1 << 0)
+#define QMC_INT_V BIT(15)
+#define QMC_INT_W BIT(14)
+#define QMC_INT_NID BIT(13)
+#define QMC_INT_IDL BIT(12)
+#define QMC_INT_CHANNEL_MASK GENMASK(11, 6)
+#define QMC_INT_GET_CHANNEL(x) FIELD_GET(QMC_INT_CHANNEL_MASK, x)
+#define QMC_INT_MRF BIT(5)
+#define QMC_INT_UN BIT(4)
+#define QMC_INT_RXF BIT(3)
+#define QMC_INT_BSY BIT(2)
+#define QMC_INT_TXB BIT(1)
+#define QMC_INT_RXB BIT(0)
/* BD related registers bits */
-#define QMC_BD_RX_E (1 << 15)
-#define QMC_BD_RX_W (1 << 13)
-#define QMC_BD_RX_I (1 << 12)
-#define QMC_BD_RX_L (1 << 11)
-#define QMC_BD_RX_F (1 << 10)
-#define QMC_BD_RX_CM (1 << 9)
-#define QMC_BD_RX_UB (1 << 7)
-#define QMC_BD_RX_LG (1 << 5)
-#define QMC_BD_RX_NO (1 << 4)
-#define QMC_BD_RX_AB (1 << 3)
-#define QMC_BD_RX_CR (1 << 2)
-
-#define QMC_BD_TX_R (1 << 15)
-#define QMC_BD_TX_W (1 << 13)
-#define QMC_BD_TX_I (1 << 12)
-#define QMC_BD_TX_L (1 << 11)
-#define QMC_BD_TX_TC (1 << 10)
-#define QMC_BD_TX_CM (1 << 9)
-#define QMC_BD_TX_UB (1 << 7)
-#define QMC_BD_TX_PAD (0x0f << 0)
+#define QMC_BD_RX_E BIT(15)
+#define QMC_BD_RX_W BIT(13)
+#define QMC_BD_RX_I BIT(12)
+#define QMC_BD_RX_L BIT(11)
+#define QMC_BD_RX_F BIT(10)
+#define QMC_BD_RX_CM BIT(9)
+#define QMC_BD_RX_UB BIT(7)
+#define QMC_BD_RX_LG BIT(5)
+#define QMC_BD_RX_NO BIT(4)
+#define QMC_BD_RX_AB BIT(3)
+#define QMC_BD_RX_CR BIT(2)
+
+#define QMC_BD_TX_R BIT(15)
+#define QMC_BD_TX_W BIT(13)
+#define QMC_BD_TX_I BIT(12)
+#define QMC_BD_TX_L BIT(11)
+#define QMC_BD_TX_TC BIT(10)
+#define QMC_BD_TX_CM BIT(9)
+#define QMC_BD_TX_UB BIT(7)
+#define QMC_BD_TX_PAD_MASK GENMASK(3, 0)
+#define QMC_BD_TX_PAD(x) FIELD_PREP(QMC_BD_TX_PAD_MASK, x)
/* Numbers of BDs and interrupt items */
#define QMC_NB_TXBDS 8
@@ -184,7 +215,7 @@ struct qmc_chan {
u64 rx_ts_mask;
bool is_reverse_data;
- spinlock_t tx_lock;
+ spinlock_t tx_lock; /* Protect Tx related data */
cbd_t __iomem *txbds;
cbd_t __iomem *txbd_free;
cbd_t __iomem *txbd_done;
@@ -192,7 +223,7 @@ struct qmc_chan {
u64 nb_tx_underrun;
bool is_tx_stopped;
- spinlock_t rx_lock;
+ spinlock_t rx_lock; /* Protect Rx related data */
cbd_t __iomem *rxbds;
cbd_t __iomem *rxbd_free;
cbd_t __iomem *rxbd_done;
@@ -203,13 +234,31 @@ struct qmc_chan {
bool is_rx_stopped;
};
+enum qmc_version {
+ QMC_CPM1,
+ QMC_QE,
+};
+
+struct qmc_data {
+ enum qmc_version version;
+ u32 tstate; /* Initial TSTATE value */
+ u32 rstate; /* Initial RSTATE value */
+ u32 zistate; /* Initial ZISTATE value */
+ u32 zdstate_hdlc; /* Initial ZDSTATE value (HDLC mode) */
+ u32 zdstate_transp; /* Initial ZDSTATE value (Transparent mode) */
+ u32 rpack; /* Initial RPACK value */
+};
+
struct qmc {
struct device *dev;
+ const struct qmc_data *data;
struct tsa_serial *tsa_serial;
void __iomem *scc_regs;
void __iomem *scc_pram;
void __iomem *dpram;
u16 scc_pram_offset;
+ u32 dpram_offset;
+ u32 qe_subblock;
cbd_t __iomem *bd_table;
dma_addr_t bd_dma_addr;
size_t bd_size;
@@ -222,6 +271,11 @@ struct qmc {
struct qmc_chan *chans[64];
};
+static void qmc_write8(void __iomem *addr, u8 val)
+{
+ iowrite8(val, addr);
+}
+
static void qmc_write16(void __iomem *addr, u16 val)
{
iowrite16be(val, addr);
@@ -262,6 +316,13 @@ static void qmc_setbits32(void __iomem *addr, u32 set)
qmc_write32(addr, qmc_read32(addr) | set);
}
+static bool qmc_is_qe(const struct qmc *qmc)
+{
+ if (IS_ENABLED(CONFIG_QUICC_ENGINE) && IS_ENABLED(CONFIG_CPM))
+ return qmc->data->version == QMC_QE;
+
+ return IS_ENABLED(CONFIG_QUICC_ENGINE);
+}
int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
{
@@ -348,8 +409,8 @@ int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param
switch (param->mode) {
case QMC_HDLC:
- if ((param->hdlc.max_rx_buf_size % 4) ||
- (param->hdlc.max_rx_buf_size < 8))
+ if (param->hdlc.max_rx_buf_size % 4 ||
+ param->hdlc.max_rx_buf_size < 8)
return -EINVAL;
qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
@@ -532,11 +593,12 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
/* Restart receiver if needed */
if (chan->is_rx_halted && !chan->is_rx_stopped) {
/* Restart receiver */
- if (chan->mode == QMC_TRANSPARENT)
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
- else
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
- qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
+ chan->mode == QMC_TRANSPARENT ?
+ chan->qmc->data->zdstate_transp :
+ chan->qmc->data->zdstate_hdlc);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
chan->is_rx_halted = false;
}
chan->rx_pending++;
@@ -641,7 +703,7 @@ static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_ser
return -EINVAL;
}
- val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
+ val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
/* Check entries based on Rx stuff*/
for (i = 0; i < info->nb_rx_ts; i++) {
@@ -662,7 +724,7 @@ static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_ser
continue;
qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
- ~QMC_TSA_WRAP, enable ? val : 0x0000);
+ (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
}
return 0;
@@ -677,7 +739,7 @@ static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_seria
/* Use a Rx 32 entries table */
- val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
+ val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
/* Check entries based on Rx stuff */
for (i = 0; i < info->nb_rx_ts; i++) {
@@ -698,7 +760,7 @@ static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_seria
continue;
qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
- ~QMC_TSA_WRAP, enable ? val : 0x0000);
+ (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
}
return 0;
@@ -713,7 +775,7 @@ static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_seria
/* Use a Tx 32 entries table */
- val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
+ val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
/* Check entries based on Tx stuff */
for (i = 0; i < info->nb_tx_ts; i++) {
@@ -734,7 +796,7 @@ static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_seria
continue;
qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2),
- ~QMC_TSA_WRAP, enable ? val : 0x0000);
+ (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
}
return 0;
@@ -774,11 +836,18 @@ static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable)
return qmc_chan_setup_tsa_32rx(chan, &info, enable);
}
-static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
+static int qmc_chan_cpm1_command(struct qmc_chan *chan, u8 qmc_opcode)
{
return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
}
+static int qmc_chan_qe_command(struct qmc_chan *chan, u32 cmd)
+{
+ if (!qe_issue_cmd(cmd, chan->qmc->qe_subblock, chan->id, 0))
+ return -EIO;
+ return 0;
+}
+
static int qmc_chan_stop_rx(struct qmc_chan *chan)
{
unsigned long flags;
@@ -793,7 +862,9 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan)
}
/* Send STOP RECEIVE command */
- ret = qmc_chan_command(chan, 0x0);
+ ret = qmc_is_qe(chan->qmc) ?
+ qmc_chan_qe_command(chan, QE_QMC_STOP_RX) :
+ qmc_chan_cpm1_command(chan, 0x0);
if (ret) {
dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
chan->id, ret);
@@ -830,7 +901,9 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan)
}
/* Send STOP TRANSMIT command */
- ret = qmc_chan_command(chan, 0x1);
+ ret = qmc_is_qe(chan->qmc) ?
+ qmc_chan_qe_command(chan, QE_QMC_STOP_TX) :
+ qmc_chan_cpm1_command(chan, 0x1);
if (ret) {
dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
chan->id, ret);
@@ -889,6 +962,7 @@ EXPORT_SYMBOL(qmc_chan_stop);
static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
{
struct tsa_serial_info info;
+ unsigned int w_rx, w_tx;
u16 first_rx, last_tx;
u16 trnsync;
int ret;
@@ -898,6 +972,14 @@ static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
if (ret)
return ret;
+ w_rx = hweight64(chan->rx_ts_mask);
+ w_tx = hweight64(chan->tx_ts_mask);
+ if (w_rx <= 1 && w_tx <= 1) {
+ dev_dbg(qmc->dev, "only one or zero ts -> disable trnsync\n");
+ qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC);
+ return 0;
+ }
+
/* Find the first Rx TS allocated to the channel */
first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
@@ -911,6 +993,7 @@ static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
+ qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC);
dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
chan->id, trnsync,
@@ -940,19 +1023,22 @@ static int qmc_chan_start_rx(struct qmc_chan *chan)
goto end;
}
- ret = qmc_setup_chan_trnsync(chan->qmc, chan);
- if (ret) {
- dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
- chan->id, ret);
- goto end;
+ if (chan->mode == QMC_TRANSPARENT) {
+ ret = qmc_setup_chan_trnsync(chan->qmc, chan);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
}
/* Restart the receiver */
- if (chan->mode == QMC_TRANSPARENT)
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
- else
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
- qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
+ chan->mode == QMC_TRANSPARENT ?
+ chan->qmc->data->zdstate_transp :
+ chan->qmc->data->zdstate_hdlc);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
chan->is_rx_halted = false;
chan->is_rx_stopped = false;
@@ -982,11 +1068,13 @@ static int qmc_chan_start_tx(struct qmc_chan *chan)
goto end;
}
- ret = qmc_setup_chan_trnsync(chan->qmc, chan);
- if (ret) {
- dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
- chan->id, ret);
- goto end;
+ if (chan->mode == QMC_TRANSPARENT) {
+ ret = qmc_setup_chan_trnsync(chan->qmc, chan);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
}
/*
@@ -1096,8 +1184,8 @@ static void qmc_chan_reset_tx(struct qmc_chan *chan)
qmc_read16(chan->s_param + QMC_SPE_TBASE));
/* Reset TSTATE and ZISTATE to their initial value */
- qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
- qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
+ qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate);
+ qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate);
spin_unlock_irqrestore(&chan->tx_lock, flags);
}
@@ -1127,7 +1215,7 @@ static int qmc_check_chans(struct qmc *qmc)
if (ret)
return ret;
- if ((info.nb_tx_ts > 64) || (info.nb_rx_ts > 64)) {
+ if (info.nb_tx_ts > 64 || info.nb_rx_ts > 64) {
dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
return -EINVAL;
}
@@ -1136,7 +1224,7 @@ static int qmc_check_chans(struct qmc *qmc)
* If more than 32 TS are assigned to this serial, one common table is
* used for Tx and Rx and so masks must be equal for all channels.
*/
- if ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) {
+ if (info.nb_tx_ts > 32 || info.nb_rx_ts > 32) {
if (info.nb_tx_ts != info.nb_rx_ts) {
dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
return -EINVAL;
@@ -1368,13 +1456,14 @@ static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
- qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
- qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
- qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
+ qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
+ qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate);
+ qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
if (chan->mode == QMC_TRANSPARENT) {
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_transp);
qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
- val = QMC_SPE_CHAMR_MODE_TRANSP | QMC_SPE_CHAMR_TRANSP_SYNC;
+ val = QMC_SPE_CHAMR_MODE_TRANSP;
if (chan->is_reverse_data)
val |= QMC_SPE_CHAMR_TRANSP_RD;
qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
@@ -1382,10 +1471,10 @@ static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
if (ret)
return ret;
} else {
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_hdlc);
qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
qmc_write16(chan->s_param + QMC_SPE_CHAMR,
- QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
+ QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
}
/* Do not enable interrupts now. They will be enabled later */
@@ -1510,11 +1599,14 @@ static void qmc_irq_gint(struct qmc *qmc)
/* Restart the receiver if needed */
spin_lock_irqsave(&chan->rx_lock, flags);
if (chan->rx_pending && !chan->is_rx_stopped) {
- if (chan->mode == QMC_TRANSPARENT)
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
- else
- qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
- qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ qmc_write32(chan->s_param + QMC_SPE_RPACK,
+ chan->qmc->data->rpack);
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
+ chan->mode == QMC_TRANSPARENT ?
+ chan->qmc->data->zdstate_transp :
+ chan->qmc->data->zdstate_hdlc);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE,
+ chan->qmc->data->rstate);
chan->is_rx_halted = false;
} else {
chan->is_rx_halted = true;
@@ -1558,27 +1650,74 @@ static irqreturn_t qmc_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
}
-static int qmc_probe(struct platform_device *pdev)
+static int qmc_qe_soft_qmc_init(struct qmc *qmc, struct device_node *np)
{
- struct device_node *np = pdev->dev.of_node;
- unsigned int nb_chans;
- struct resource *res;
- struct qmc *qmc;
- int irq;
+ struct qe_firmware_info *qe_fw_info;
+ const struct qe_firmware *qe_fw;
+ const struct firmware *fw;
+ const char *filename;
int ret;
- qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
- if (!qmc)
- return -ENOMEM;
+ ret = of_property_read_string(np, "fsl,soft-qmc", &filename);
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ /* fsl,soft-qmc property not set -> Simply do nothing */
+ return 0;
+ default:
+ dev_err(qmc->dev, "%pOF: failed to read fsl,soft-qmc\n",
+ np);
+ return ret;
+ }
- qmc->dev = &pdev->dev;
- INIT_LIST_HEAD(&qmc->chan_head);
+ qe_fw_info = qe_get_firmware_info();
+ if (qe_fw_info) {
+ if (!strstr(qe_fw_info->id, "Soft-QMC")) {
+ dev_err(qmc->dev, "Another Firmware is already loaded\n");
+ return -EALREADY;
+ }
+ dev_info(qmc->dev, "Firmware already loaded\n");
+ return 0;
+ }
+
+ dev_info(qmc->dev, "Using firmware %s\n", filename);
+
+ ret = request_firmware(&fw, filename, qmc->dev);
+ if (ret) {
+ dev_err(qmc->dev, "Failed to request firmware %s\n", filename);
+ return ret;
+ }
+
+ qe_fw = (const struct qe_firmware *)fw->data;
+
+ if (fw->size < sizeof(qe_fw->header) ||
+ be32_to_cpu(qe_fw->header.length) != fw->size) {
+ dev_err(qmc->dev, "Invalid firmware %s\n", filename);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = qe_upload_firmware(qe_fw);
+ if (ret) {
+ dev_err(qmc->dev, "Failed to load firmware %s\n", filename);
+ goto end;
+ }
+
+ ret = 0;
+end:
+ release_firmware(fw);
+ return ret;
+}
+
+static int qmc_cpm1_init_resources(struct qmc *qmc, struct platform_device *pdev)
+{
+ struct resource *res;
qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs");
if (IS_ERR(qmc->scc_regs))
return PTR_ERR(qmc->scc_regs);
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
if (!res)
return -EINVAL;
@@ -1591,44 +1730,215 @@ static int qmc_probe(struct platform_device *pdev)
if (IS_ERR(qmc->dpram))
return PTR_ERR(qmc->dpram);
+ return 0;
+}
+
+static int qmc_qe_init_resources(struct qmc *qmc, struct platform_device *pdev)
+{
+ struct resource *res;
+ int ucc_num;
+ s32 info;
+
+ qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "ucc_regs");
+ if (IS_ERR(qmc->scc_regs))
+ return PTR_ERR(qmc->scc_regs);
+
+ ucc_num = tsa_serial_get_num(qmc->tsa_serial);
+ if (ucc_num < 0)
+ return dev_err_probe(qmc->dev, ucc_num, "Failed to get UCC num\n");
+
+ qmc->qe_subblock = ucc_slow_get_qe_cr_subblock(ucc_num);
+ if (qmc->qe_subblock == QE_CR_SUBBLOCK_INVALID) {
+ dev_err(qmc->dev, "Unsupported ucc num %u\n", ucc_num);
+ return -EINVAL;
+ }
+ /* Allocate the 'Global Multichannel Parameters' and the
+ * 'Framer parameters' areas. The 'Framer parameters' area
+ * is located right after the 'Global Multichannel Parameters'.
+ * The 'Framer parameters' need 1 byte per receive and transmit
+ * channel. The maximum number of receive or transmit channel
+ * is 64. So reserve 2 * 64 bytes for the 'Framer parameters'.
+ */
+ info = devm_qe_muram_alloc(qmc->dev, UCC_SLOW_PRAM_SIZE + 2 * 64,
+ ALIGNMENT_OF_UCC_SLOW_PRAM);
+ if (IS_ERR_VALUE(info)) {
+ dev_err(qmc->dev, "cannot allocate MURAM for PRAM");
+ return -ENOMEM;
+ }
+ if (!qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, qmc->qe_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, info)) {
+ dev_err(qmc->dev, "QE_ASSIGN_PAGE_TO_DEVICE cmd failed");
+ return -EIO;
+ }
+ qmc->scc_pram = qe_muram_addr(info);
+ qmc->scc_pram_offset = info;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpram");
+ if (!res)
+ return -EINVAL;
+ qmc->dpram_offset = res->start - qe_muram_dma(qe_muram_addr(0));
+ qmc->dpram = devm_ioremap_resource(qmc->dev, res);
+ if (IS_ERR(qmc->scc_pram))
+ return PTR_ERR(qmc->scc_pram);
+
+ return 0;
+}
+
+static int qmc_init_resources(struct qmc *qmc, struct platform_device *pdev)
+{
+ return qmc_is_qe(qmc) ?
+ qmc_qe_init_resources(qmc, pdev) :
+ qmc_cpm1_init_resources(qmc, pdev);
+}
+
+static int qmc_cpm1_init_scc(struct qmc *qmc)
+{
+ u32 val;
+ int ret;
+
+ /* Connect the serial (SCC) to TSA */
+ ret = tsa_serial_connect(qmc->tsa_serial);
+ if (ret)
+ return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n");
+
+ /* Init GMSR_H and GMSR_L registers */
+ val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP;
+ qmc_write32(qmc->scc_regs + SCC_GSMRH, val);
+
+ /* enable QMC mode */
+ qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_CPM1_GSMRL_MODE_QMC);
+
+ /* Disable and clear interrupts */
+ qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
+ qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
+
+ return 0;
+}
+
+static int qmc_qe_init_ucc(struct qmc *qmc)
+{
+ u32 val;
+ int ret;
+
+ /* Set the UCC in slow mode */
+ qmc_write8(qmc->scc_regs + SCC_QE_UCC_GUEMR,
+ UCC_GUEMR_SET_RESERVED3 | UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX);
+
+ /* Connect the serial (UCC) to TSA */
+ ret = tsa_serial_connect(qmc->tsa_serial);
+ if (ret)
+ return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n");
+
+ /* Initialize the QMC tx startup addresses */
+ if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0x80)) {
+ dev_err(qmc->dev, "QE_CMD_PUSH_SCHED tx cmd failed");
+ ret = -EIO;
+ goto err_tsa_serial_disconnect;
+ }
+
+ /* Initialize the QMC rx startup addresses */
+ if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock | 0x00020000,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0x82)) {
+ dev_err(qmc->dev, "QE_CMD_PUSH_SCHED rx cmd failed");
+ ret = -EIO;
+ goto err_tsa_serial_disconnect;
+ }
+
+ /* Re-init RXPTR and TXPTR with the content of RX_S_PTR and
+ * TX_S_PTR (RX_S_PTR and TX_S_PTR are initialized during
+ * qmc_setup_tsa() call
+ */
+ val = qmc_read16(qmc->scc_pram + QMC_GBL_RX_S_PTR);
+ qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
+ val = qmc_read16(qmc->scc_pram + QMC_GBL_TX_S_PTR);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
+
+ /* Init GUMR_H and GUMR_L registers (SCC GSMR_H and GSMR_L) */
+ val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP |
+ SCC_GSMRH_TRX | SCC_GSMRH_TTX;
+ qmc_write32(qmc->scc_regs + SCC_GSMRH, val);
+
+ /* enable QMC mode */
+ qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_QE_GSMRL_MODE_QMC);
+
+ /* Disable and clear interrupts */
+ qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
+ qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
+
+ return 0;
+
+err_tsa_serial_disconnect:
+ tsa_serial_disconnect(qmc->tsa_serial);
+ return ret;
+}
+
+static int qmc_init_xcc(struct qmc *qmc)
+{
+ return qmc_is_qe(qmc) ?
+ qmc_qe_init_ucc(qmc) :
+ qmc_cpm1_init_scc(qmc);
+}
+
+static void qmc_exit_xcc(struct qmc *qmc)
+{
+ /* Disconnect the serial from TSA */
+ tsa_serial_disconnect(qmc->tsa_serial);
+}
+
+static int qmc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int nb_chans;
+ struct qmc *qmc;
+ int irq;
+ int ret;
+
+ qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
+ if (!qmc)
+ return -ENOMEM;
+
+ qmc->dev = &pdev->dev;
+ qmc->data = of_device_get_match_data(&pdev->dev);
+ if (!qmc->data) {
+ dev_err(qmc->dev, "Missing match data\n");
+ return -EINVAL;
+ }
+ INIT_LIST_HEAD(&qmc->chan_head);
+
qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial");
if (IS_ERR(qmc->tsa_serial)) {
return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial),
"Failed to get TSA serial\n");
}
- /* Connect the serial (SCC) to TSA */
- ret = tsa_serial_connect(qmc->tsa_serial);
- if (ret) {
- dev_err(qmc->dev, "Failed to connect TSA serial\n");
+ ret = qmc_init_resources(qmc, pdev);
+ if (ret)
return ret;
+
+ if (qmc_is_qe(qmc)) {
+ ret = qmc_qe_soft_qmc_init(qmc, np);
+ if (ret)
+ return ret;
}
/* Parse channels informationss */
ret = qmc_of_parse_chans(qmc, np);
if (ret)
- goto err_tsa_serial_disconnect;
+ return ret;
nb_chans = qmc_nb_chans(qmc);
- /* Init GMSR_H and GMSR_L registers */
- qmc_write32(qmc->scc_regs + SCC_GSMRH,
- SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP);
-
- /* enable QMC mode */
- qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_MODE_QMC);
-
/*
* Allocate the buffer descriptor table
* 8 rx and 8 tx descriptors per channel
*/
qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t);
qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size,
- &qmc->bd_dma_addr, GFP_KERNEL);
+ &qmc->bd_dma_addr, GFP_KERNEL);
if (!qmc->bd_table) {
dev_err(qmc->dev, "Failed to allocate bd table\n");
- ret = -ENOMEM;
- goto err_tsa_serial_disconnect;
+ return -ENOMEM;
}
memset(qmc->bd_table, 0, qmc->bd_size);
@@ -1637,11 +1947,10 @@ static int qmc_probe(struct platform_device *pdev)
/* Allocate the interrupt table */
qmc->int_size = QMC_NB_INTS * sizeof(u16);
qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size,
- &qmc->int_dma_addr, GFP_KERNEL);
+ &qmc->int_dma_addr, GFP_KERNEL);
if (!qmc->int_table) {
dev_err(qmc->dev, "Failed to allocate interrupt table\n");
- ret = -ENOMEM;
- goto err_tsa_serial_disconnect;
+ return -ENOMEM;
}
memset(qmc->int_table, 0, qmc->int_size);
@@ -1658,40 +1967,59 @@ static int qmc_probe(struct platform_device *pdev)
qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
+ if (qmc_is_qe(qmc)) {
+ /* Zeroed the reserved area */
+ memset_io(qmc->scc_pram + QMC_QE_GBL_RSV_B0_START, 0,
+ QMC_QE_GBL_RSV_B0_SIZE);
+
+ qmc_write32(qmc->scc_pram + QMC_QE_GBL_GCSBASE, qmc->dpram_offset);
+
+ /* Init 'framer parameters' area and set the base addresses */
+ memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE, 0x01, 64);
+ memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE + 64, 0x01, 64);
+ qmc_write16(qmc->scc_pram + QMC_QE_GBL_RX_FRM_BASE,
+ qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE);
+ qmc_write16(qmc->scc_pram + QMC_QE_GBL_TX_FRM_BASE,
+ qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE + 64);
+ }
+
ret = qmc_init_tsa(qmc);
if (ret)
- goto err_tsa_serial_disconnect;
+ return ret;
qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000);
ret = qmc_setup_chans(qmc);
if (ret)
- goto err_tsa_serial_disconnect;
+ return ret;
/* Init interrupts table */
ret = qmc_setup_ints(qmc);
if (ret)
- goto err_tsa_serial_disconnect;
+ return ret;
- /* Disable and clear interrupts, set the irq handler */
- qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
- qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
+ /* Init SCC (CPM1) or UCC (QE) */
+ ret = qmc_init_xcc(qmc);
+ if (ret)
+ return ret;
+
+ /* Set the irq handler */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- goto err_tsa_serial_disconnect;
+ goto err_exit_xcc;
ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
if (ret < 0)
- goto err_tsa_serial_disconnect;
+ goto err_exit_xcc;
/* Enable interrupts */
qmc_write16(qmc->scc_regs + SCC_SCCM,
- SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
+ SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
ret = qmc_finalize_chans(qmc);
if (ret < 0)
goto err_disable_intr;
- /* Enable transmiter and receiver */
+ /* Enable transmitter and receiver */
qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
platform_set_drvdata(pdev, qmc);
@@ -1709,8 +2037,8 @@ err_disable_txrx:
err_disable_intr:
qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
-err_tsa_serial_disconnect:
- tsa_serial_disconnect(qmc->tsa_serial);
+err_exit_xcc:
+ qmc_exit_xcc(qmc);
return ret;
}
@@ -1718,18 +2046,43 @@ static void qmc_remove(struct platform_device *pdev)
{
struct qmc *qmc = platform_get_drvdata(pdev);
- /* Disable transmiter and receiver */
+ /* Disable transmitter and receiver */
qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
/* Disable interrupts */
qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
- /* Disconnect the serial from TSA */
- tsa_serial_disconnect(qmc->tsa_serial);
+ /* Exit SCC (CPM1) or UCC (QE) */
+ qmc_exit_xcc(qmc);
}
+static const struct qmc_data qmc_data_cpm1 = {
+ .version = QMC_CPM1,
+ .tstate = 0x30000000,
+ .rstate = 0x31000000,
+ .zistate = 0x00000100,
+ .zdstate_hdlc = 0x00000080,
+ .zdstate_transp = 0x18000080,
+ .rpack = 0x00000000,
+};
+
+static const struct qmc_data qmc_data_qe = {
+ .version = QMC_QE,
+ .tstate = 0x30000000,
+ .rstate = 0x30000000,
+ .zistate = 0x00000200,
+ .zdstate_hdlc = 0x80FFFFE0,
+ .zdstate_transp = 0x003FFFE2,
+ .rpack = 0x80000000,
+};
+
static const struct of_device_id qmc_id_table[] = {
- { .compatible = "fsl,cpm1-scc-qmc" },
+#if IS_ENABLED(CONFIG_CPM1)
+ { .compatible = "fsl,cpm1-scc-qmc", .data = &qmc_data_cpm1 },
+#endif
+#if IS_ENABLED(CONFIG_QUICC_ENGINE)
+ { .compatible = "fsl,qe-ucc-qmc", .data = &qmc_data_qe },
+#endif
{} /* sentinel */
};
MODULE_DEVICE_TABLE(of, qmc_id_table);
@@ -1889,5 +2242,5 @@ struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev,
EXPORT_SYMBOL(devm_qmc_chan_get_bychild);
MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
-MODULE_DESCRIPTION("CPM QMC driver");
+MODULE_DESCRIPTION("CPM/QE QMC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/soc/fsl/qe/tsa.c b/drivers/soc/fsl/qe/tsa.c
index 6c5741cf5e9d..f0889b3fcaf2 100644
--- a/drivers/soc/fsl/qe/tsa.c
+++ b/drivers/soc/fsl/qe/tsa.c
@@ -9,6 +9,8 @@
#include "tsa.h"
#include <dt-bindings/soc/cpm1-fsl,tsa.h>
+#include <dt-bindings/soc/qe-fsl,tsa.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -16,86 +18,116 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <soc/fsl/qe/ucc.h>
+
+/* TSA SI RAM routing tables entry (CPM1) */
+#define TSA_CPM1_SIRAM_ENTRY_LAST BIT(16)
+#define TSA_CPM1_SIRAM_ENTRY_BYTE BIT(17)
+#define TSA_CPM1_SIRAM_ENTRY_CNT_MASK GENMASK(21, 18)
+#define TSA_CPM1_SIRAM_ENTRY_CNT(x) FIELD_PREP(TSA_CPM1_SIRAM_ENTRY_CNT_MASK, x)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_MASK GENMASK(24, 22)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_NU FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x0)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x2)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x3)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x4)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x5)
+#define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x6)
+
+/* TSA SI RAM routing tables entry (QE) */
+#define TSA_QE_SIRAM_ENTRY_LAST BIT(0)
+#define TSA_QE_SIRAM_ENTRY_BYTE BIT(1)
+#define TSA_QE_SIRAM_ENTRY_CNT_MASK GENMASK(4, 2)
+#define TSA_QE_SIRAM_ENTRY_CNT(x) FIELD_PREP(TSA_QE_SIRAM_ENTRY_CNT_MASK, x)
+#define TSA_QE_SIRAM_ENTRY_CSEL_MASK GENMASK(8, 5)
+#define TSA_QE_SIRAM_ENTRY_CSEL_NU FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x0)
+#define TSA_QE_SIRAM_ENTRY_CSEL_UCC5 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x1)
+#define TSA_QE_SIRAM_ENTRY_CSEL_UCC1 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x9)
+#define TSA_QE_SIRAM_ENTRY_CSEL_UCC2 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xa)
+#define TSA_QE_SIRAM_ENTRY_CSEL_UCC3 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xb)
+#define TSA_QE_SIRAM_ENTRY_CSEL_UCC4 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xc)
-
-/* TSA SI RAM routing tables entry */
-#define TSA_SIRAM_ENTRY_LAST (1 << 16)
-#define TSA_SIRAM_ENTRY_BYTE (1 << 17)
-#define TSA_SIRAM_ENTRY_CNT(x) (((x) & 0x0f) << 18)
-#define TSA_SIRAM_ENTRY_CSEL_MASK (0x7 << 22)
-#define TSA_SIRAM_ENTRY_CSEL_NU (0x0 << 22)
-#define TSA_SIRAM_ENTRY_CSEL_SCC2 (0x2 << 22)
-#define TSA_SIRAM_ENTRY_CSEL_SCC3 (0x3 << 22)
-#define TSA_SIRAM_ENTRY_CSEL_SCC4 (0x4 << 22)
-#define TSA_SIRAM_ENTRY_CSEL_SMC1 (0x5 << 22)
-#define TSA_SIRAM_ENTRY_CSEL_SMC2 (0x6 << 22)
-
-/* SI mode register (32 bits) */
-#define TSA_SIMODE 0x00
-#define TSA_SIMODE_SMC2 0x80000000
-#define TSA_SIMODE_SMC1 0x00008000
-#define TSA_SIMODE_TDMA(x) ((x) << 0)
-#define TSA_SIMODE_TDMB(x) ((x) << 16)
-#define TSA_SIMODE_TDM_MASK 0x0fff
-#define TSA_SIMODE_TDM_SDM_MASK 0x0c00
-#define TSA_SIMODE_TDM_SDM_NORM 0x0000
-#define TSA_SIMODE_TDM_SDM_ECHO 0x0400
-#define TSA_SIMODE_TDM_SDM_INTL_LOOP 0x0800
-#define TSA_SIMODE_TDM_SDM_LOOP_CTRL 0x0c00
-#define TSA_SIMODE_TDM_RFSD(x) ((x) << 8)
-#define TSA_SIMODE_TDM_DSC 0x0080
-#define TSA_SIMODE_TDM_CRT 0x0040
-#define TSA_SIMODE_TDM_STZ 0x0020
-#define TSA_SIMODE_TDM_CE 0x0010
-#define TSA_SIMODE_TDM_FE 0x0008
-#define TSA_SIMODE_TDM_GM 0x0004
-#define TSA_SIMODE_TDM_TFSD(x) ((x) << 0)
-
-/* SI global mode register (8 bits) */
-#define TSA_SIGMR 0x04
-#define TSA_SIGMR_ENB (1<<3)
-#define TSA_SIGMR_ENA (1<<2)
-#define TSA_SIGMR_RDM_MASK 0x03
-#define TSA_SIGMR_RDM_STATIC_TDMA 0x00
-#define TSA_SIGMR_RDM_DYN_TDMA 0x01
-#define TSA_SIGMR_RDM_STATIC_TDMAB 0x02
-#define TSA_SIGMR_RDM_DYN_TDMAB 0x03
-
-/* SI status register (8 bits) */
-#define TSA_SISTR 0x06
-
-/* SI command register (8 bits) */
-#define TSA_SICMR 0x07
+/*
+ * SI mode register :
+ * - CPM1: 32bit register split in 2*16bit (16bit TDM)
+ * - QE: 4x16bit registers, one per TDM
+ */
+#define TSA_CPM1_SIMODE 0x00
+#define TSA_QE_SIAMR 0x00
+#define TSA_QE_SIBMR 0x02
+#define TSA_QE_SICMR 0x04
+#define TSA_QE_SIDMR 0x06
+#define TSA_CPM1_SIMODE_SMC2 BIT(31)
+#define TSA_CPM1_SIMODE_SMC1 BIT(15)
+#define TSA_CPM1_SIMODE_TDMA_MASK GENMASK(11, 0)
+#define TSA_CPM1_SIMODE_TDMA(x) FIELD_PREP(TSA_CPM1_SIMODE_TDMA_MASK, x)
+#define TSA_CPM1_SIMODE_TDMB_MASK GENMASK(27, 16)
+#define TSA_CPM1_SIMODE_TDMB(x) FIELD_PREP(TSA_CPM1_SIMODE_TDMB_MASK, x)
+#define TSA_QE_SIMODE_TDM_SAD_MASK GENMASK(15, 12)
+#define TSA_QE_SIMODE_TDM_SAD(x) FIELD_PREP(TSA_QE_SIMODE_TDM_SAD_MASK, x)
+#define TSA_CPM1_SIMODE_TDM_MASK GENMASK(11, 0)
+#define TSA_SIMODE_TDM_SDM_MASK GENMASK(11, 10)
+#define TSA_SIMODE_TDM_SDM_NORM FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x0)
+#define TSA_SIMODE_TDM_SDM_ECHO FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x1)
+#define TSA_SIMODE_TDM_SDM_INTL_LOOP FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x2)
+#define TSA_SIMODE_TDM_SDM_LOOP_CTRL FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x3)
+#define TSA_SIMODE_TDM_RFSD_MASK GENMASK(9, 8)
+#define TSA_SIMODE_TDM_RFSD(x) FIELD_PREP(TSA_SIMODE_TDM_RFSD_MASK, x)
+#define TSA_SIMODE_TDM_DSC BIT(7)
+#define TSA_SIMODE_TDM_CRT BIT(6)
+#define TSA_CPM1_SIMODE_TDM_STZ BIT(5) /* bit 5: STZ in CPM1 */
+#define TSA_QE_SIMODE_TDM_SL BIT(5) /* bit 5: SL in QE */
+#define TSA_SIMODE_TDM_CE BIT(4)
+#define TSA_SIMODE_TDM_FE BIT(3)
+#define TSA_SIMODE_TDM_GM BIT(2)
+#define TSA_SIMODE_TDM_TFSD_MASK GENMASK(1, 0)
+#define TSA_SIMODE_TDM_TFSD(x) FIELD_PREP(TSA_SIMODE_TDM_TFSD_MASK, x)
+
+/* CPM SI global mode register (8 bits) */
+#define TSA_CPM1_SIGMR 0x04
+#define TSA_CPM1_SIGMR_ENB BIT(3)
+#define TSA_CPM1_SIGMR_ENA BIT(2)
+#define TSA_CPM1_SIGMR_RDM_MASK GENMASK(1, 0)
+#define TSA_CPM1_SIGMR_RDM_STATIC_TDMA FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x0)
+#define TSA_CPM1_SIGMR_RDM_DYN_TDMA FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x1)
+#define TSA_CPM1_SIGMR_RDM_STATIC_TDMAB FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x2)
+#define TSA_CPM1_SIGMR_RDM_DYN_TDMAB FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x3)
+
+/* QE SI global mode register high (8 bits) */
+#define TSA_QE_SIGLMRH 0x08
+#define TSA_QE_SIGLMRH_END BIT(3)
+#define TSA_QE_SIGLMRH_ENC BIT(2)
+#define TSA_QE_SIGLMRH_ENB BIT(1)
+#define TSA_QE_SIGLMRH_ENA BIT(0)
/* SI clock route register (32 bits) */
-#define TSA_SICR 0x0C
-#define TSA_SICR_SCC2(x) ((x) << 8)
-#define TSA_SICR_SCC3(x) ((x) << 16)
-#define TSA_SICR_SCC4(x) ((x) << 24)
-#define TSA_SICR_SCC_MASK 0x0ff
-#define TSA_SICR_SCC_GRX (1 << 7)
-#define TSA_SICR_SCC_SCX_TSA (1 << 6)
-#define TSA_SICR_SCC_RXCS_MASK (0x7 << 3)
-#define TSA_SICR_SCC_RXCS_BRG1 (0x0 << 3)
-#define TSA_SICR_SCC_RXCS_BRG2 (0x1 << 3)
-#define TSA_SICR_SCC_RXCS_BRG3 (0x2 << 3)
-#define TSA_SICR_SCC_RXCS_BRG4 (0x3 << 3)
-#define TSA_SICR_SCC_RXCS_CLK15 (0x4 << 3)
-#define TSA_SICR_SCC_RXCS_CLK26 (0x5 << 3)
-#define TSA_SICR_SCC_RXCS_CLK37 (0x6 << 3)
-#define TSA_SICR_SCC_RXCS_CLK48 (0x7 << 3)
-#define TSA_SICR_SCC_TXCS_MASK (0x7 << 0)
-#define TSA_SICR_SCC_TXCS_BRG1 (0x0 << 0)
-#define TSA_SICR_SCC_TXCS_BRG2 (0x1 << 0)
-#define TSA_SICR_SCC_TXCS_BRG3 (0x2 << 0)
-#define TSA_SICR_SCC_TXCS_BRG4 (0x3 << 0)
-#define TSA_SICR_SCC_TXCS_CLK15 (0x4 << 0)
-#define TSA_SICR_SCC_TXCS_CLK26 (0x5 << 0)
-#define TSA_SICR_SCC_TXCS_CLK37 (0x6 << 0)
-#define TSA_SICR_SCC_TXCS_CLK48 (0x7 << 0)
-
-/* Serial interface RAM pointer register (32 bits) */
-#define TSA_SIRP 0x10
+#define TSA_CPM1_SICR 0x0C
+#define TSA_CPM1_SICR_SCC2_MASK GENMASK(15, 8)
+#define TSA_CPM1_SICR_SCC2(x) FIELD_PREP(TSA_CPM1_SICR_SCC2_MASK, x)
+#define TSA_CPM1_SICR_SCC3_MASK GENMASK(23, 16)
+#define TSA_CPM1_SICR_SCC3(x) FIELD_PREP(TSA_CPM1_SICR_SCC3_MASK, x)
+#define TSA_CPM1_SICR_SCC4_MASK GENMASK(31, 24)
+#define TSA_CPM1_SICR_SCC4(x) FIELD_PREP(TSA_CPM1_SICR_SCC4_MASK, x)
+#define TSA_CPM1_SICR_SCC_MASK GENMASK(7, 0)
+#define TSA_CPM1_SICR_SCC_GRX BIT(7)
+#define TSA_CPM1_SICR_SCC_SCX_TSA BIT(6)
+#define TSA_CPM1_SICR_SCC_RXCS_MASK GENMASK(5, 3)
+#define TSA_CPM1_SICR_SCC_RXCS_BRG1 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x0)
+#define TSA_CPM1_SICR_SCC_RXCS_BRG2 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x1)
+#define TSA_CPM1_SICR_SCC_RXCS_BRG3 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x2)
+#define TSA_CPM1_SICR_SCC_RXCS_BRG4 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x3)
+#define TSA_CPM1_SICR_SCC_RXCS_CLK15 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x4)
+#define TSA_CPM1_SICR_SCC_RXCS_CLK26 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x5)
+#define TSA_CPM1_SICR_SCC_RXCS_CLK37 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x6)
+#define TSA_CPM1_SICR_SCC_RXCS_CLK48 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x7)
+#define TSA_CPM1_SICR_SCC_TXCS_MASK GENMASK(2, 0)
+#define TSA_CPM1_SICR_SCC_TXCS_BRG1 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x0)
+#define TSA_CPM1_SICR_SCC_TXCS_BRG2 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x1)
+#define TSA_CPM1_SICR_SCC_TXCS_BRG3 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x2)
+#define TSA_CPM1_SICR_SCC_TXCS_BRG4 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x3)
+#define TSA_CPM1_SICR_SCC_TXCS_CLK15 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x4)
+#define TSA_CPM1_SICR_SCC_TXCS_CLK26 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x5)
+#define TSA_CPM1_SICR_SCC_TXCS_CLK37 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x6)
+#define TSA_CPM1_SICR_SCC_TXCS_CLK48 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x7)
struct tsa_entries_area {
void __iomem *entries_start;
@@ -114,15 +146,31 @@ struct tsa_tdm {
#define TSA_TDMA 0
#define TSA_TDMB 1
+#define TSA_TDMC 2 /* QE implementation only */
+#define TSA_TDMD 3 /* QE implementation only */
+
+enum tsa_version {
+ TSA_CPM1 = 1, /* Avoid 0 value */
+ TSA_QE,
+};
struct tsa {
struct device *dev;
void __iomem *si_regs;
void __iomem *si_ram;
resource_size_t si_ram_sz;
- spinlock_t lock;
+ spinlock_t lock; /* Lock for read/modify/write sequence */
+ enum tsa_version version;
int tdms; /* TSA_TDMx ORed */
+#if IS_ENABLED(CONFIG_QUICC_ENGINE)
+ struct tsa_tdm tdm[4]; /* TDMa, TDMb, TDMc and TDMd */
+#else
struct tsa_tdm tdm[2]; /* TDMa and TDMb */
+#endif
+ /* Same number of serials for CPM1 and QE:
+ * CPM1: NU, 3 SCCs and 2 SMCs
+ * QE: NU and 5 UCCs
+ */
struct tsa_serial {
unsigned int id;
struct tsa_serial_info info;
@@ -140,7 +188,12 @@ static inline void tsa_write32(void __iomem *addr, u32 val)
iowrite32be(val, addr);
}
-static inline void tsa_write8(void __iomem *addr, u32 val)
+static inline void tsa_write16(void __iomem *addr, u16 val)
+{
+ iowrite16be(val, addr);
+}
+
+static inline void tsa_write8(void __iomem *addr, u8 val)
{
iowrite8(val, addr);
}
@@ -150,17 +203,68 @@ static inline u32 tsa_read32(void __iomem *addr)
return ioread32be(addr);
}
+static inline u16 tsa_read16(void __iomem *addr)
+{
+ return ioread16be(addr);
+}
+
static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
{
tsa_write32(addr, tsa_read32(addr) & ~clr);
}
+static inline void tsa_clrbits16(void __iomem *addr, u16 clr)
+{
+ tsa_write16(addr, tsa_read16(addr) & ~clr);
+}
+
static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
{
tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
}
-int tsa_serial_connect(struct tsa_serial *tsa_serial)
+static bool tsa_is_qe(const struct tsa *tsa)
+{
+ if (IS_ENABLED(CONFIG_QUICC_ENGINE) && IS_ENABLED(CONFIG_CPM))
+ return tsa->version == TSA_QE;
+
+ return IS_ENABLED(CONFIG_QUICC_ENGINE);
+}
+
+static int tsa_qe_serial_get_num(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+
+ switch (tsa_serial->id) {
+ case FSL_QE_TSA_UCC1: return 0;
+ case FSL_QE_TSA_UCC2: return 1;
+ case FSL_QE_TSA_UCC3: return 2;
+ case FSL_QE_TSA_UCC4: return 3;
+ case FSL_QE_TSA_UCC5: return 4;
+ default:
+ break;
+ }
+
+ dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
+ return -EINVAL;
+}
+
+int tsa_serial_get_num(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+
+ /*
+ * There is no need to get the serial num out of the TSA driver in the
+ * CPM case.
+ * Further more, in CPM, we can have 2 types of serial SCCs and FCCs.
+ * What kind of numbering to use that can be global to both SCCs and
+ * FCCs ?
+ */
+ return tsa_is_qe(tsa) ? tsa_qe_serial_get_num(tsa_serial) : -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(tsa_serial_get_num);
+
+static int tsa_cpm1_serial_connect(struct tsa_serial *tsa_serial, bool connect)
{
struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
unsigned long flags;
@@ -169,16 +273,16 @@ int tsa_serial_connect(struct tsa_serial *tsa_serial)
switch (tsa_serial->id) {
case FSL_CPM_TSA_SCC2:
- clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
- set = TSA_SICR_SCC2(TSA_SICR_SCC_SCX_TSA);
+ clear = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_MASK);
+ set = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_SCX_TSA);
break;
case FSL_CPM_TSA_SCC3:
- clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
- set = TSA_SICR_SCC3(TSA_SICR_SCC_SCX_TSA);
+ clear = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_MASK);
+ set = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_SCX_TSA);
break;
case FSL_CPM_TSA_SCC4:
- clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
- set = TSA_SICR_SCC4(TSA_SICR_SCC_SCX_TSA);
+ clear = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_MASK);
+ set = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_SCX_TSA);
break;
default:
dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
@@ -186,40 +290,53 @@ int tsa_serial_connect(struct tsa_serial *tsa_serial)
}
spin_lock_irqsave(&tsa->lock, flags);
- tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, set);
+ tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SICR, clear,
+ connect ? set : 0);
spin_unlock_irqrestore(&tsa->lock, flags);
return 0;
}
-EXPORT_SYMBOL(tsa_serial_connect);
-int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
+static int tsa_qe_serial_connect(struct tsa_serial *tsa_serial, bool connect)
{
struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
unsigned long flags;
- u32 clear;
+ int ucc_num;
+ int ret;
- switch (tsa_serial->id) {
- case FSL_CPM_TSA_SCC2:
- clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
- break;
- case FSL_CPM_TSA_SCC3:
- clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
- break;
- case FSL_CPM_TSA_SCC4:
- clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
- break;
- default:
- dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
- return -EINVAL;
- }
+ ucc_num = tsa_qe_serial_get_num(tsa_serial);
+ if (ucc_num < 0)
+ return ucc_num;
spin_lock_irqsave(&tsa->lock, flags);
- tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, 0);
+ ret = ucc_set_qe_mux_tsa(ucc_num, connect);
spin_unlock_irqrestore(&tsa->lock, flags);
-
+ if (ret) {
+ dev_err(tsa->dev, "Connect serial id %u to TSA failed (%d)\n",
+ tsa_serial->id, ret);
+ return ret;
+ }
return 0;
}
+
+int tsa_serial_connect(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+
+ return tsa_is_qe(tsa) ?
+ tsa_qe_serial_connect(tsa_serial, true) :
+ tsa_cpm1_serial_connect(tsa_serial, true);
+}
+EXPORT_SYMBOL(tsa_serial_connect);
+
+int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+
+ return tsa_is_qe(tsa) ?
+ tsa_qe_serial_connect(tsa_serial, false) :
+ tsa_cpm1_serial_connect(tsa_serial, false);
+}
EXPORT_SYMBOL(tsa_serial_disconnect);
int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info)
@@ -229,14 +346,14 @@ int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *i
}
EXPORT_SYMBOL(tsa_serial_get_info);
-static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
- u32 tdms, u32 tdm_id, bool is_rx)
+static void tsa_cpm1_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 tdms, u32 tdm_id, bool is_rx)
{
resource_size_t quarter;
resource_size_t half;
- quarter = tsa->si_ram_sz/4;
- half = tsa->si_ram_sz/2;
+ quarter = tsa->si_ram_sz / 4;
+ half = tsa->si_ram_sz / 2;
if (tdms == BIT(TSA_TDMA)) {
/* Only TDMA */
@@ -281,7 +398,42 @@ static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area
}
}
-static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
+static void tsa_qe_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 tdms, u32 tdm_id, bool is_rx)
+{
+ resource_size_t eighth;
+ resource_size_t half;
+
+ eighth = tsa->si_ram_sz / 8;
+ half = tsa->si_ram_sz / 2;
+
+ /*
+ * One half of the SI RAM used for Tx, the other one for Rx.
+ * In each half, 1/4 of the area is assigned to each TDM.
+ */
+ if (is_rx) {
+ /* Rx: Second half of si_ram */
+ area->entries_start = tsa->si_ram + half + (eighth * tdm_id);
+ area->entries_next = area->entries_start + eighth;
+ area->last_entry = NULL;
+ } else {
+ /* Tx: First half of si_ram */
+ area->entries_start = tsa->si_ram + (eighth * tdm_id);
+ area->entries_next = area->entries_start + eighth;
+ area->last_entry = NULL;
+ }
+}
+
+static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 tdms, u32 tdm_id, bool is_rx)
+{
+ if (tsa_is_qe(tsa))
+ tsa_qe_init_entries_area(tsa, area, tdms, tdm_id, is_rx);
+ else
+ tsa_cpm1_init_entries_area(tsa, area, tdms, tdm_id, is_rx);
+}
+
+static const char *tsa_cpm1_serial_id2name(struct tsa *tsa, u32 serial_id)
{
switch (serial_id) {
case FSL_CPM_TSA_NU: return "Not used";
@@ -296,22 +448,44 @@ static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
return NULL;
}
-static u32 tsa_serial_id2csel(struct tsa *tsa, u32 serial_id)
+static const char *tsa_qe_serial_id2name(struct tsa *tsa, u32 serial_id)
{
switch (serial_id) {
- case FSL_CPM_TSA_SCC2: return TSA_SIRAM_ENTRY_CSEL_SCC2;
- case FSL_CPM_TSA_SCC3: return TSA_SIRAM_ENTRY_CSEL_SCC3;
- case FSL_CPM_TSA_SCC4: return TSA_SIRAM_ENTRY_CSEL_SCC4;
- case FSL_CPM_TSA_SMC1: return TSA_SIRAM_ENTRY_CSEL_SMC1;
- case FSL_CPM_TSA_SMC2: return TSA_SIRAM_ENTRY_CSEL_SMC2;
+ case FSL_QE_TSA_NU: return "Not used";
+ case FSL_QE_TSA_UCC1: return "UCC1";
+ case FSL_QE_TSA_UCC2: return "UCC2";
+ case FSL_QE_TSA_UCC3: return "UCC3";
+ case FSL_QE_TSA_UCC4: return "UCC4";
+ case FSL_QE_TSA_UCC5: return "UCC5";
default:
break;
}
- return TSA_SIRAM_ENTRY_CSEL_NU;
+ return NULL;
}
-static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
- u32 count, u32 serial_id)
+static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
+{
+ return tsa_is_qe(tsa) ?
+ tsa_qe_serial_id2name(tsa, serial_id) :
+ tsa_cpm1_serial_id2name(tsa, serial_id);
+}
+
+static u32 tsa_cpm1_serial_id2csel(struct tsa *tsa, u32 serial_id)
+{
+ switch (serial_id) {
+ case FSL_CPM_TSA_SCC2: return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2;
+ case FSL_CPM_TSA_SCC3: return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3;
+ case FSL_CPM_TSA_SCC4: return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4;
+ case FSL_CPM_TSA_SMC1: return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1;
+ case FSL_CPM_TSA_SMC2: return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2;
+ default:
+ break;
+ }
+ return TSA_CPM1_SIRAM_ENTRY_CSEL_NU;
+}
+
+static int tsa_cpm1_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 count, u32 serial_id)
{
void __iomem *addr;
u32 left;
@@ -329,21 +503,21 @@ static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
if (area->last_entry) {
/* Clear last flag */
- tsa_clrbits32(area->last_entry, TSA_SIRAM_ENTRY_LAST);
+ tsa_clrbits32(area->last_entry, TSA_CPM1_SIRAM_ENTRY_LAST);
}
left = count;
while (left) {
- val = TSA_SIRAM_ENTRY_BYTE | tsa_serial_id2csel(tsa, serial_id);
+ val = TSA_CPM1_SIRAM_ENTRY_BYTE | tsa_cpm1_serial_id2csel(tsa, serial_id);
if (left > 16) {
cnt = 16;
} else {
cnt = left;
- val |= TSA_SIRAM_ENTRY_LAST;
+ val |= TSA_CPM1_SIRAM_ENTRY_LAST;
area->last_entry = addr;
}
- val |= TSA_SIRAM_ENTRY_CNT(cnt - 1);
+ val |= TSA_CPM1_SIRAM_ENTRY_CNT(cnt - 1);
tsa_write32(addr, val);
addr += 4;
@@ -353,6 +527,71 @@ static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
return 0;
}
+static u32 tsa_qe_serial_id2csel(struct tsa *tsa, u32 serial_id)
+{
+ switch (serial_id) {
+ case FSL_QE_TSA_UCC1: return TSA_QE_SIRAM_ENTRY_CSEL_UCC1;
+ case FSL_QE_TSA_UCC2: return TSA_QE_SIRAM_ENTRY_CSEL_UCC2;
+ case FSL_QE_TSA_UCC3: return TSA_QE_SIRAM_ENTRY_CSEL_UCC3;
+ case FSL_QE_TSA_UCC4: return TSA_QE_SIRAM_ENTRY_CSEL_UCC4;
+ case FSL_QE_TSA_UCC5: return TSA_QE_SIRAM_ENTRY_CSEL_UCC5;
+ default:
+ break;
+ }
+ return TSA_QE_SIRAM_ENTRY_CSEL_NU;
+}
+
+static int tsa_qe_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 count, u32 serial_id)
+{
+ void __iomem *addr;
+ u32 left;
+ u32 val;
+ u32 cnt;
+ u32 nb;
+
+ addr = area->last_entry ? area->last_entry + 2 : area->entries_start;
+
+ nb = DIV_ROUND_UP(count, 8);
+ if ((addr + (nb * 2)) > area->entries_next) {
+ dev_err(tsa->dev, "si ram area full\n");
+ return -ENOSPC;
+ }
+
+ if (area->last_entry) {
+ /* Clear last flag */
+ tsa_clrbits16(area->last_entry, TSA_QE_SIRAM_ENTRY_LAST);
+ }
+
+ left = count;
+ while (left) {
+ val = TSA_QE_SIRAM_ENTRY_BYTE | tsa_qe_serial_id2csel(tsa, serial_id);
+
+ if (left > 8) {
+ cnt = 8;
+ } else {
+ cnt = left;
+ val |= TSA_QE_SIRAM_ENTRY_LAST;
+ area->last_entry = addr;
+ }
+ val |= TSA_QE_SIRAM_ENTRY_CNT(cnt - 1);
+
+ tsa_write16(addr, val);
+ addr += 2;
+ left -= cnt;
+ }
+
+ return 0;
+}
+
+static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 count, u32 serial_id)
+{
+ return tsa_is_qe(tsa) ?
+ tsa_qe_add_entry(tsa, area, count, serial_id) :
+ tsa_cpm1_add_entry(tsa, area, count, serial_id);
+}
+
static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
u32 tdms, u32 tdm_id, bool is_rx)
{
@@ -399,7 +638,7 @@ static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
}
dev_dbg(tsa->dev, "tdm_id=%u, %s ts %u..%u -> %s\n",
- tdm_id, route_name, ts, ts+count-1, serial_name);
+ tdm_id, route_name, ts, ts + count - 1, serial_name);
ts += count;
ret = tsa_add_entry(tsa, &area, count, serial_id);
@@ -449,8 +688,8 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
int i;
tsa->tdms = 0;
- tsa->tdm[0].is_enable = false;
- tsa->tdm[1].is_enable = false;
+ for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++)
+ tsa->tdm[i].is_enable = false;
for_each_available_child_of_node(np, tdm_np) {
ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
@@ -466,7 +705,18 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
case 1:
tsa->tdms |= BIT(TSA_TDMB);
break;
+ case 2:
+ if (!tsa_is_qe(tsa))
+ goto invalid_tdm; /* Not available on CPM1 */
+ tsa->tdms |= BIT(TSA_TDMC);
+ break;
+ case 3:
+ if (!tsa_is_qe(tsa))
+ goto invalid_tdm; /* Not available on CPM1 */
+ tsa->tdms |= BIT(TSA_TDMD);
+ break;
default:
+invalid_tdm:
dev_err(tsa->dev, "%pOF: Invalid tdm_id (%u)\n", tdm_np,
tdm_id);
of_node_put(tdm_np);
@@ -532,10 +782,14 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
if (of_property_read_bool(tdm_np, "fsl,fsync-rising-edge"))
tdm->simode_tdm |= TSA_SIMODE_TDM_FE;
+ if (tsa_is_qe(tsa) &&
+ of_property_read_bool(tdm_np, "fsl,fsync-active-low"))
+ tdm->simode_tdm |= TSA_QE_SIMODE_TDM_SL;
+
if (of_property_read_bool(tdm_np, "fsl,double-speed-clock"))
tdm->simode_tdm |= TSA_SIMODE_TDM_DSC;
- clk = of_clk_get_by_name(tdm_np, "l1rsync");
+ clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "rsync" : "l1rsync");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
of_node_put(tdm_np);
@@ -549,7 +803,7 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
}
tdm->l1rsync_clk = clk;
- clk = of_clk_get_by_name(tdm_np, "l1rclk");
+ clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "rclk" : "l1rclk");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
of_node_put(tdm_np);
@@ -564,7 +818,7 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
tdm->l1rclk_clk = clk;
if (!(tdm->simode_tdm & TSA_SIMODE_TDM_CRT)) {
- clk = of_clk_get_by_name(tdm_np, "l1tsync");
+ clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "tsync" : "l1tsync");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
of_node_put(tdm_np);
@@ -578,7 +832,7 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
}
tdm->l1tsync_clk = clk;
- clk = of_clk_get_by_name(tdm_np, "l1tclk");
+ clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "tclk" : "l1tclk");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
of_node_put(tdm_np);
@@ -593,6 +847,17 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
tdm->l1tclk_clk = clk;
}
+ if (tsa_is_qe(tsa)) {
+ /*
+ * The starting address for TSA table must be set.
+ * 512 entries for Tx and 512 entries for Rx are
+ * available for 4 TDMs.
+ * We assign entries equally -> 128 Rx/Tx entries per
+ * TDM. In other words, 4 blocks of 32 entries per TDM.
+ */
+ tdm->simode_tdm |= TSA_QE_SIMODE_TDM_SAD(4 * tdm_id);
+ }
+
ret = tsa_of_parse_tdm_rx_route(tsa, tdm_np, tsa->tdms, tdm_id);
if (ret) {
of_node_put(tdm_np);
@@ -610,7 +875,7 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
return 0;
err:
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
if (tsa->tdm[i].l1rsync_clk) {
clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
clk_put(tsa->tdm[i].l1rsync_clk);
@@ -636,8 +901,87 @@ static void tsa_init_si_ram(struct tsa *tsa)
resource_size_t i;
/* Fill all entries as the last one */
- for (i = 0; i < tsa->si_ram_sz; i += 4)
- tsa_write32(tsa->si_ram + i, TSA_SIRAM_ENTRY_LAST);
+ if (tsa_is_qe(tsa)) {
+ for (i = 0; i < tsa->si_ram_sz; i += 2)
+ tsa_write16(tsa->si_ram + i, TSA_QE_SIRAM_ENTRY_LAST);
+ } else {
+ for (i = 0; i < tsa->si_ram_sz; i += 4)
+ tsa_write32(tsa->si_ram + i, TSA_CPM1_SIRAM_ENTRY_LAST);
+ }
+}
+
+static int tsa_cpm1_setup(struct tsa *tsa)
+{
+ u32 val;
+
+ /* Set SIMODE */
+ val = 0;
+ if (tsa->tdm[0].is_enable)
+ val |= TSA_CPM1_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
+ if (tsa->tdm[1].is_enable)
+ val |= TSA_CPM1_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
+
+ tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SIMODE,
+ TSA_CPM1_SIMODE_TDMA(TSA_CPM1_SIMODE_TDM_MASK) |
+ TSA_CPM1_SIMODE_TDMB(TSA_CPM1_SIMODE_TDM_MASK),
+ val);
+
+ /* Set SIGMR */
+ val = (tsa->tdms == BIT(TSA_TDMA)) ?
+ TSA_CPM1_SIGMR_RDM_STATIC_TDMA : TSA_CPM1_SIGMR_RDM_STATIC_TDMAB;
+ if (tsa->tdms & BIT(TSA_TDMA))
+ val |= TSA_CPM1_SIGMR_ENA;
+ if (tsa->tdms & BIT(TSA_TDMB))
+ val |= TSA_CPM1_SIGMR_ENB;
+ tsa_write8(tsa->si_regs + TSA_CPM1_SIGMR, val);
+
+ return 0;
+}
+
+static int tsa_qe_setup(struct tsa *tsa)
+{
+ unsigned int sixmr;
+ u8 siglmrh = 0;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
+ if (!tsa->tdm[i].is_enable)
+ continue;
+
+ switch (i) {
+ case 0:
+ sixmr = TSA_QE_SIAMR;
+ siglmrh |= TSA_QE_SIGLMRH_ENA;
+ break;
+ case 1:
+ sixmr = TSA_QE_SIBMR;
+ siglmrh |= TSA_QE_SIGLMRH_ENB;
+ break;
+ case 2:
+ sixmr = TSA_QE_SICMR;
+ siglmrh |= TSA_QE_SIGLMRH_ENC;
+ break;
+ case 3:
+ sixmr = TSA_QE_SIDMR;
+ siglmrh |= TSA_QE_SIGLMRH_END;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set SI mode register */
+ tsa_write16(tsa->si_regs + sixmr, tsa->tdm[i].simode_tdm);
+ }
+
+ /* Enable TDMs */
+ tsa_write8(tsa->si_regs + TSA_QE_SIGLMRH, siglmrh);
+
+ return 0;
+}
+
+static int tsa_setup(struct tsa *tsa)
+{
+ return tsa_is_qe(tsa) ? tsa_qe_setup(tsa) : tsa_cpm1_setup(tsa);
}
static int tsa_probe(struct platform_device *pdev)
@@ -646,7 +990,6 @@ static int tsa_probe(struct platform_device *pdev)
struct resource *res;
struct tsa *tsa;
unsigned int i;
- u32 val;
int ret;
tsa = devm_kzalloc(&pdev->dev, sizeof(*tsa), GFP_KERNEL);
@@ -654,6 +997,18 @@ static int tsa_probe(struct platform_device *pdev)
return -ENOMEM;
tsa->dev = &pdev->dev;
+ tsa->version = (enum tsa_version)(uintptr_t)of_device_get_match_data(&pdev->dev);
+ switch (tsa->version) {
+ case TSA_CPM1:
+ dev_info(tsa->dev, "CPM1 version\n");
+ break;
+ case TSA_QE:
+ dev_info(tsa->dev, "QE version\n");
+ break;
+ default:
+ dev_err(tsa->dev, "Unknown version (%d)\n", tsa->version);
+ return -EINVAL;
+ }
for (i = 0; i < ARRAY_SIZE(tsa->serials); i++)
tsa->serials[i].id = i;
@@ -680,26 +1035,9 @@ static int tsa_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Set SIMODE */
- val = 0;
- if (tsa->tdm[0].is_enable)
- val |= TSA_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
- if (tsa->tdm[1].is_enable)
- val |= TSA_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
-
- tsa_clrsetbits32(tsa->si_regs + TSA_SIMODE,
- TSA_SIMODE_TDMA(TSA_SIMODE_TDM_MASK) |
- TSA_SIMODE_TDMB(TSA_SIMODE_TDM_MASK),
- val);
-
- /* Set SIGMR */
- val = (tsa->tdms == BIT(TSA_TDMA)) ?
- TSA_SIGMR_RDM_STATIC_TDMA : TSA_SIGMR_RDM_STATIC_TDMAB;
- if (tsa->tdms & BIT(TSA_TDMA))
- val |= TSA_SIGMR_ENA;
- if (tsa->tdms & BIT(TSA_TDMB))
- val |= TSA_SIGMR_ENB;
- tsa_write8(tsa->si_regs + TSA_SIGMR, val);
+ ret = tsa_setup(tsa);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, tsa);
@@ -711,7 +1049,7 @@ static void tsa_remove(struct platform_device *pdev)
struct tsa *tsa = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
if (tsa->tdm[i].l1rsync_clk) {
clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
clk_put(tsa->tdm[i].l1rsync_clk);
@@ -732,7 +1070,12 @@ static void tsa_remove(struct platform_device *pdev)
}
static const struct of_device_id tsa_id_table[] = {
- { .compatible = "fsl,cpm1-tsa" },
+#if IS_ENABLED(CONFIG_CPM1)
+ { .compatible = "fsl,cpm1-tsa", .data = (void *)TSA_CPM1 },
+#endif
+#if IS_ENABLED(CONFIG_QUICC_ENGINE)
+ { .compatible = "fsl,qe-tsa", .data = (void *)TSA_QE },
+#endif
{} /* sentinel */
};
MODULE_DEVICE_TABLE(of, tsa_id_table);
@@ -841,5 +1184,5 @@ struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
EXPORT_SYMBOL(devm_tsa_serial_get_byphandle);
MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
-MODULE_DESCRIPTION("CPM TSA driver");
+MODULE_DESCRIPTION("CPM/QE TSA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/soc/fsl/qe/tsa.h b/drivers/soc/fsl/qe/tsa.h
index d9df89b6da3e..da137bc0f49b 100644
--- a/drivers/soc/fsl/qe/tsa.h
+++ b/drivers/soc/fsl/qe/tsa.h
@@ -39,4 +39,7 @@ struct tsa_serial_info {
/* Get information */
int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info);
+/* Get serial number */
+int tsa_serial_get_num(struct tsa_serial *tsa_serial);
+
#endif /* __SOC_FSL_TSA_H__ */
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index 21dbcd787cd5..892aa5931d5b 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -114,6 +114,7 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
return 0;
}
+EXPORT_SYMBOL(ucc_mux_set_grant_tsa_bkpt);
int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
enum comm_dir mode)
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 01b129caf1eb..5250c1d702eb 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -327,11 +327,11 @@ enum mtk_mutex_sof_id {
};
struct mtk_mutex_data {
- const unsigned int *mutex_mod;
- const unsigned int *mutex_sof;
- const unsigned int mutex_mod_reg;
- const unsigned int mutex_sof_reg;
- const unsigned int *mutex_table_mod;
+ const u8 *mutex_mod;
+ const u8 *mutex_table_mod;
+ const u16 *mutex_sof;
+ const u16 mutex_mod_reg;
+ const u16 mutex_sof_reg;
const bool no_clk;
};
@@ -345,7 +345,7 @@ struct mtk_mutex_ctx {
struct cmdq_client_reg cmdq_reg;
};
-static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_BLS] = MT2701_MUTEX_MOD_DISP_BLS,
[DDP_COMPONENT_COLOR0] = MT2701_MUTEX_MOD_DISP_COLOR,
[DDP_COMPONENT_OVL0] = MT2701_MUTEX_MOD_DISP_OVL,
@@ -354,7 +354,7 @@ static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA,
};
-static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT2712_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_AAL1] = MT2712_MUTEX_MOD2_DISP_AAL1,
[DDP_COMPONENT_COLOR0] = MT2712_MUTEX_MOD_DISP_COLOR0,
@@ -374,7 +374,7 @@ static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = MT2712_MUTEX_MOD_DISP_WDMA1,
};
-static const unsigned int mt8167_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8167_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8167_MUTEX_MOD_DISP_AAL,
[DDP_COMPONENT_CCORR] = MT8167_MUTEX_MOD_DISP_CCORR,
[DDP_COMPONENT_COLOR0] = MT8167_MUTEX_MOD_DISP_COLOR,
@@ -389,7 +389,7 @@ static const unsigned int mt8167_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT8167_MUTEX_MOD_DISP_WDMA0,
};
-static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8173_MUTEX_MOD_DISP_AAL,
[DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0,
[DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1,
@@ -407,7 +407,7 @@ static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
};
-static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8183_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8183_MUTEX_MOD_DISP_CCORR0,
[DDP_COMPONENT_COLOR0] = MT8183_MUTEX_MOD_DISP_COLOR0,
@@ -421,7 +421,7 @@ static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0,
};
-static const unsigned int mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+static const u8 mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_RDMA0] = MT8183_MUTEX_MOD_MDP_RDMA0,
[MUTEX_MOD_IDX_MDP_RSZ0] = MT8183_MUTEX_MOD_MDP_RSZ0,
[MUTEX_MOD_IDX_MDP_RSZ1] = MT8183_MUTEX_MOD_MDP_RSZ1,
@@ -432,7 +432,7 @@ static const unsigned int mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_CCORR0] = MT8183_MUTEX_MOD_MDP_CCORR0,
};
-static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8186_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8186_MUTEX_MOD_DISP_CCORR0,
[DDP_COMPONENT_COLOR0] = MT8186_MUTEX_MOD_DISP_COLOR0,
@@ -445,7 +445,7 @@ static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_RDMA1] = MT8186_MUTEX_MOD_DISP_RDMA1,
};
-static const unsigned int mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+static const u8 mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_RDMA0] = MT8186_MUTEX_MOD_MDP_RDMA0,
[MUTEX_MOD_IDX_MDP_RSZ0] = MT8186_MUTEX_MOD_MDP_RSZ0,
[MUTEX_MOD_IDX_MDP_RSZ1] = MT8186_MUTEX_MOD_MDP_RSZ1,
@@ -456,7 +456,7 @@ static const unsigned int mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_COLOR0] = MT8186_MUTEX_MOD_MDP_COLOR0,
};
-static const unsigned int mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_OVL0] = MT8188_MUTEX_MOD_DISP_OVL0,
[DDP_COMPONENT_WDMA0] = MT8188_MUTEX_MOD_DISP_WDMA0,
[DDP_COMPONENT_RDMA0] = MT8188_MUTEX_MOD_DISP_RDMA0,
@@ -496,7 +496,7 @@ static const unsigned int mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_MERGE5] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE4,
};
-static const unsigned int mt8188_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+static const u8 mt8188_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_RDMA0] = MT8195_MUTEX_MOD_MDP_RDMA0,
[MUTEX_MOD_IDX_MDP_RDMA2] = MT8195_MUTEX_MOD_MDP_RDMA2,
[MUTEX_MOD_IDX_MDP_RDMA3] = MT8195_MUTEX_MOD_MDP_RDMA3,
@@ -530,7 +530,7 @@ static const unsigned int mt8188_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_WROT3] = MT8195_MUTEX_MOD_MDP_WROT3,
};
-static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0,
[DDP_COMPONENT_COLOR0] = MT8192_MUTEX_MOD_DISP_COLOR0,
@@ -544,7 +544,7 @@ static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_RDMA4] = MT8192_MUTEX_MOD_DISP_RDMA4,
};
-static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_OVL0] = MT8195_MUTEX_MOD_DISP_OVL0,
[DDP_COMPONENT_WDMA0] = MT8195_MUTEX_MOD_DISP_WDMA0,
[DDP_COMPONENT_RDMA0] = MT8195_MUTEX_MOD_DISP_RDMA0,
@@ -575,7 +575,7 @@ static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_DP_INTF1] = MT8195_MUTEX_MOD_DISP1_DP_INTF0,
};
-static const unsigned int mt8195_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+static const u8 mt8195_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_RDMA0] = MT8195_MUTEX_MOD_MDP_RDMA0,
[MUTEX_MOD_IDX_MDP_RDMA1] = MT8195_MUTEX_MOD_MDP_RDMA1,
[MUTEX_MOD_IDX_MDP_RDMA2] = MT8195_MUTEX_MOD_MDP_RDMA2,
@@ -621,7 +621,7 @@ static const unsigned int mt8195_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
[MUTEX_MOD_IDX_MDP_WROT3] = MT8195_MUTEX_MOD_MDP_WROT3,
};
-static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+static const u8 mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8365_MUTEX_MOD_DISP_AAL,
[DDP_COMPONENT_CCORR] = MT8365_MUTEX_MOD_DISP_CCORR,
[DDP_COMPONENT_COLOR0] = MT8365_MUTEX_MOD_DISP_COLOR0,
@@ -637,7 +637,7 @@ static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT8365_MUTEX_MOD_DISP_WDMA0,
};
-static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+static const u16 mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
[MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
@@ -647,14 +647,14 @@ static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
};
-static const unsigned int mt6795_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+static const u16 mt6795_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
[MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
[MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
};
-static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+static const u16 mt8167_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
[MUTEX_SOF_DPI0] = MT8167_MUTEX_SOF_DPI0,
@@ -662,13 +662,13 @@ static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_MAX] = {
};
/* Add EOF setting so overlay hardware can receive frame done irq */
-static const unsigned int mt8183_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+static const u16 mt8183_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0 | MT8183_MUTEX_EOF_DSI0,
[MUTEX_SOF_DPI0] = MT8183_MUTEX_SOF_DPI0 | MT8183_MUTEX_EOF_DPI0,
};
-static const unsigned int mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
+static const u16 mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MT8186_MUTEX_SOF_DSI0 | MT8186_MUTEX_EOF_DSI0,
[MUTEX_SOF_DPI0] = MT8186_MUTEX_SOF_DPI0 | MT8186_MUTEX_EOF_DPI0,
@@ -682,7 +682,7 @@ static const unsigned int mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
* but also detect the error at end of frame(EAEOF) when EOF signal
* arrives.
*/
-static const unsigned int mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+static const u16 mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] =
MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0,
@@ -692,7 +692,7 @@ static const unsigned int mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
MT8188_MUTEX_SOF_DP_INTF1 | MT8188_MUTEX_EOF_DP_INTF1,
};
-static const unsigned int mt8195_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+static const u16 mt8195_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MT8195_MUTEX_SOF_DSI0 | MT8195_MUTEX_EOF_DSI0,
[MUTEX_SOF_DSI1] = MT8195_MUTEX_SOF_DSI1 | MT8195_MUTEX_EOF_DSI1,
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index efd9cae212dc..9fdc0ef79202 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -483,7 +483,7 @@ enum pwrap_regs {
PWRAP_MSB_FIRST,
};
-static int mt2701_regs[] = {
+static const int mt2701_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -569,7 +569,7 @@ static int mt2701_regs[] = {
[PWRAP_ADC_RDATA_ADDR2] = 0x154,
};
-static int mt6765_regs[] = {
+static const int mt6765_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -601,7 +601,7 @@ static int mt6765_regs[] = {
[PWRAP_DCM_DBC_PRD] = 0x1E0,
};
-static int mt6779_regs[] = {
+static const int mt6779_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -640,7 +640,7 @@ static int mt6779_regs[] = {
[PWRAP_WACS2_VLDCLR] = 0xC28,
};
-static int mt6795_regs[] = {
+static const int mt6795_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -725,7 +725,7 @@ static int mt6795_regs[] = {
[PWRAP_EXT_CK] = 0x14c,
};
-static int mt6797_regs[] = {
+static const int mt6797_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -758,7 +758,7 @@ static int mt6797_regs[] = {
[PWRAP_DCM_DBC_PRD] = 0x1D4,
};
-static int mt6873_regs[] = {
+static const int mt6873_regs[] = {
[PWRAP_INIT_DONE2] = 0x0,
[PWRAP_TIMER_EN] = 0x3E0,
[PWRAP_INT_EN] = 0x448,
@@ -769,7 +769,7 @@ static int mt6873_regs[] = {
[PWRAP_WACS2_RDATA] = 0xCA8,
};
-static int mt7622_regs[] = {
+static const int mt7622_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -881,7 +881,7 @@ static int mt7622_regs[] = {
[PWRAP_SPI2_CTRL] = 0x244,
};
-static int mt8135_regs[] = {
+static const int mt8135_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -954,7 +954,7 @@ static int mt8135_regs[] = {
[PWRAP_DCM_DBC_PRD] = 0x160,
};
-static int mt8173_regs[] = {
+static const int mt8173_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -1036,7 +1036,7 @@ static int mt8173_regs[] = {
[PWRAP_DCM_DBC_PRD] = 0x148,
};
-static int mt8183_regs[] = {
+static const int mt8183_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -1087,7 +1087,7 @@ static int mt8183_regs[] = {
[PWRAP_WACS2_VLDCLR] = 0xC28,
};
-static int mt8195_regs[] = {
+static const int mt8195_regs[] = {
[PWRAP_INIT_DONE2] = 0x0,
[PWRAP_STAUPD_CTRL] = 0x4C,
[PWRAP_TIMER_EN] = 0x3E4,
@@ -1104,7 +1104,7 @@ static int mt8195_regs[] = {
[PWRAP_WACS2_RDATA] = 0x8A8,
};
-static int mt8365_regs[] = {
+static const int mt8365_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -1166,7 +1166,7 @@ static int mt8365_regs[] = {
[PWRAP_WDT_SRC_EN_1] = 0xf8,
};
-static int mt8516_regs[] = {
+static const int mt8516_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -1251,7 +1251,7 @@ static int mt8516_regs[] = {
[PWRAP_MSB_FIRST] = 0x170,
};
-static int mt8186_regs[] = {
+static const int mt8186_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
[PWRAP_DIO_EN] = 0x8,
@@ -1366,10 +1366,6 @@ struct pmic_wrapper {
struct regmap *regmap;
const struct pmic_wrapper_type *master;
const struct pwrap_slv_type *slave;
- struct clk *clk_spi;
- struct clk *clk_wrap;
- struct clk *clk_sys;
- struct clk *clk_tmr;
struct reset_control *rstc;
struct reset_control *rstc_bridge;
@@ -1377,7 +1373,7 @@ struct pmic_wrapper {
};
struct pmic_wrapper_type {
- int *regs;
+ const int *regs;
enum pwrap_type type;
u32 arb_en_all;
u32 int_en_all;
@@ -2397,7 +2393,7 @@ static const struct pmic_wrapper_type pwrap_mt8183 = {
.init_soc_specific = pwrap_mt8183_init_soc_specific,
};
-static struct pmic_wrapper_type pwrap_mt8195 = {
+static const struct pmic_wrapper_type pwrap_mt8195 = {
.regs = mt8195_regs,
.type = PWRAP_MT8195,
.arb_en_all = 0x777f, /* NEED CONFIRM */
@@ -2423,7 +2419,7 @@ static const struct pmic_wrapper_type pwrap_mt8365 = {
.init_soc_specific = NULL,
};
-static struct pmic_wrapper_type pwrap_mt8516 = {
+static const struct pmic_wrapper_type pwrap_mt8516 = {
.regs = mt8516_regs,
.type = PWRAP_MT8516,
.arb_en_all = 0xff,
@@ -2435,7 +2431,7 @@ static struct pmic_wrapper_type pwrap_mt8516 = {
.init_soc_specific = NULL,
};
-static struct pmic_wrapper_type pwrap_mt8186 = {
+static const struct pmic_wrapper_type pwrap_mt8186 = {
.regs = mt8186_regs,
.type = PWRAP_MT8186,
.arb_en_all = 0xfb27f,
@@ -2472,6 +2468,7 @@ static int pwrap_probe(struct platform_device *pdev)
int ret, irq;
u32 mask_done;
struct pmic_wrapper *wrp;
+ struct clk_bulk_data *clk;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_slave_id = NULL;
@@ -2521,49 +2518,10 @@ static int pwrap_probe(struct platform_device *pdev)
}
}
- wrp->clk_spi = devm_clk_get(wrp->dev, "spi");
- if (IS_ERR(wrp->clk_spi)) {
- dev_dbg(wrp->dev, "failed to get clock: %ld\n",
- PTR_ERR(wrp->clk_spi));
- return PTR_ERR(wrp->clk_spi);
- }
-
- wrp->clk_wrap = devm_clk_get(wrp->dev, "wrap");
- if (IS_ERR(wrp->clk_wrap)) {
- dev_dbg(wrp->dev, "failed to get clock: %ld\n",
- PTR_ERR(wrp->clk_wrap));
- return PTR_ERR(wrp->clk_wrap);
- }
-
- wrp->clk_sys = devm_clk_get_optional(wrp->dev, "sys");
- if (IS_ERR(wrp->clk_sys)) {
- return dev_err_probe(wrp->dev, PTR_ERR(wrp->clk_sys),
- "failed to get clock: %pe\n",
- wrp->clk_sys);
- }
-
- wrp->clk_tmr = devm_clk_get_optional(wrp->dev, "tmr");
- if (IS_ERR(wrp->clk_tmr)) {
- return dev_err_probe(wrp->dev, PTR_ERR(wrp->clk_tmr),
- "failed to get clock: %pe\n",
- wrp->clk_tmr);
- }
-
- ret = clk_prepare_enable(wrp->clk_spi);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(wrp->clk_wrap);
+ ret = devm_clk_bulk_get_all_enable(wrp->dev, &clk);
if (ret)
- goto err_out1;
-
- ret = clk_prepare_enable(wrp->clk_sys);
- if (ret)
- goto err_out2;
-
- ret = clk_prepare_enable(wrp->clk_tmr);
- if (ret)
- goto err_out3;
+ return dev_err_probe(wrp->dev, ret,
+ "failed to get clocks\n");
/* Enable internal dynamic clock */
if (HAS_CAP(wrp->master->caps, PWRAP_CAP_DCM)) {
@@ -2579,7 +2537,7 @@ static int pwrap_probe(struct platform_device *pdev)
ret = pwrap_init(wrp);
if (ret) {
dev_dbg(wrp->dev, "init failed with %d\n", ret);
- goto err_out4;
+ return ret;
}
}
@@ -2592,8 +2550,7 @@ static int pwrap_probe(struct platform_device *pdev)
if (!(pwrap_readl(wrp, PWRAP_WACS2_RDATA) & mask_done)) {
dev_dbg(wrp->dev, "initialization isn't finished\n");
- ret = -ENODEV;
- goto err_out4;
+ return -ENODEV;
}
/* Initialize watchdog, may not be done by the bootloader */
@@ -2622,42 +2579,27 @@ static int pwrap_probe(struct platform_device *pdev)
pwrap_writel(wrp, wrp->master->int1_en_all, PWRAP_INT1_EN);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_out2;
- }
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt,
IRQF_TRIGGER_HIGH,
"mt-pmic-pwrap", wrp);
if (ret)
- goto err_out4;
+ return ret;
wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regops->regmap);
- if (IS_ERR(wrp->regmap)) {
- ret = PTR_ERR(wrp->regmap);
- goto err_out2;
- }
+ if (IS_ERR(wrp->regmap))
+ return PTR_ERR(wrp->regmap);
ret = of_platform_populate(np, NULL, NULL, wrp->dev);
if (ret) {
dev_dbg(wrp->dev, "failed to create child devices at %pOF\n",
np);
- goto err_out4;
+ return ret;
}
return 0;
-
-err_out4:
- clk_disable_unprepare(wrp->clk_tmr);
-err_out3:
- clk_disable_unprepare(wrp->clk_sys);
-err_out2:
- clk_disable_unprepare(wrp->clk_wrap);
-err_out1:
- clk_disable_unprepare(wrp->clk_spi);
-
- return ret;
}
static struct platform_driver pwrap_drv = {
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index d3560f861085..acbca2ab5cc2 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -25,6 +25,7 @@ qcom_rpmh-y += rpmh.o
obj-$(CONFIG_QCOM_SMD_RPM) += rpm-proc.o smd-rpm.o
obj-$(CONFIG_QCOM_SMEM) += smem.o
obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
+CFLAGS_smp2p.o := -I$(src)
obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
obj-$(CONFIG_QCOM_SMSM) += smsm.o
obj-$(CONFIG_QCOM_SOCINFO) += socinfo.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 4fbff3a890e2..a956c407ce03 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -485,11 +485,10 @@ static int of_apr_add_pd_lookups(struct device *dev)
{
const char *service_name, *service_path;
struct packet_router *apr = dev_get_drvdata(dev);
- struct device_node *node;
struct pdr_service *pds;
int ret;
- for_each_child_of_node(dev->of_node, node) {
+ for_each_child_of_node_scoped(dev->of_node, node) {
ret = of_property_read_string_index(node, "qcom,protection-domain",
0, &service_name);
if (ret < 0)
@@ -499,14 +498,12 @@ static int of_apr_add_pd_lookups(struct device *dev)
1, &service_path);
if (ret < 0) {
dev_err(dev, "pdr service path missing: %d\n", ret);
- of_node_put(node);
return ret;
}
pds = pdr_add_lookup(apr->pdr, service_name, service_path);
if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds));
- of_node_put(node);
return PTR_ERR(pds);
}
}
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
index e7851974084b..f9235bc3aa3b 100644
--- a/drivers/soc/qcom/icc-bwmon.c
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -17,6 +17,8 @@
#include <linux/pm_opp.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
+#define CREATE_TRACE_POINTS
+#include "trace_icc-bwmon.h"
/*
* The BWMON samples data throughput within 'sample_ms' time. With three
@@ -645,9 +647,10 @@ static irqreturn_t bwmon_intr_thread(int irq, void *dev_id)
struct icc_bwmon *bwmon = dev_id;
unsigned int irq_enable = 0;
struct dev_pm_opp *opp, *target_opp;
- unsigned int bw_kbps, up_kbps, down_kbps;
+ unsigned int bw_kbps, up_kbps, down_kbps, meas_kbps;
bw_kbps = bwmon->target_kbps;
+ meas_kbps = bwmon->target_kbps;
target_opp = dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_kbps, 0);
if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE)
@@ -679,6 +682,7 @@ static irqreturn_t bwmon_intr_thread(int irq, void *dev_id)
bwmon_clear_irq(bwmon);
bwmon_enable(bwmon, irq_enable);
+ trace_qcom_bwmon_update(dev_name(bwmon->dev), meas_kbps, up_kbps, down_kbps);
if (bwmon->target_kbps == bwmon->current_kbps)
goto out;
diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c
index fbab7fe5c652..50be7a9274a1 100644
--- a/drivers/soc/qcom/ice.c
+++ b/drivers/soc/qcom/ice.c
@@ -8,6 +8,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
@@ -265,7 +266,6 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct qcom_ice *ice;
- struct device_node *node;
struct resource *res;
void __iomem *base;
@@ -292,15 +292,15 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
* (legacy DT binding), then it must at least provide a phandle
* to the ICE devicetree node, otherwise ICE is not supported.
*/
- node = of_parse_phandle(dev->of_node, "qcom,ice", 0);
+ struct device_node *node __free(device_node) = of_parse_phandle(dev->of_node,
+ "qcom,ice", 0);
if (!node)
return NULL;
pdev = of_find_device_by_node(node);
if (!pdev) {
dev_err(dev, "Cannot find device node %s\n", node->name);
- ice = ERR_PTR(-EPROBE_DEFER);
- goto out;
+ return ERR_PTR(-EPROBE_DEFER);
}
ice = platform_get_drvdata(pdev);
@@ -308,8 +308,7 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
dev_err(dev, "Cannot get ice instance from %s\n",
dev_name(&pdev->dev));
platform_device_put(pdev);
- ice = ERR_PTR(-EPROBE_DEFER);
- goto out;
+ return ERR_PTR(-EPROBE_DEFER);
}
ice->link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
@@ -321,9 +320,6 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
ice = ERR_PTR(-EINVAL);
}
-out:
- of_node_put(node);
-
return ice;
}
EXPORT_SYMBOL_GPL(of_qcom_ice_get);
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 37e11e501728..8fa4ffd3a9b5 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -453,26 +453,24 @@ static const struct llcc_slice_config qdu1000_data_8ch[] = {
};
static const struct llcc_slice_config x1e80100_data[] = {
- {LLCC_CPUSS, 1, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_AUDIO, 6, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CPUSS, 1, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_VIDSC0, 2, 512, 4, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{LLCC_CMPT, 10, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_GPUHTW, 11, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_GPU, 9, 4096, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_MMUHWT, 18, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_GPU, 9, 4608, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_MMUHWT, 18, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CVP, 8, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CAMEXP1, 7, 3072, 2, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_LCPDARE, 30, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CVP, 8, 512, 4, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_WRCACHE, 31, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CAMEXP0, 4, 256, 4, 1, 0x3, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CAMEXP1, 7, 3072, 3, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_LCPDARE, 30, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
{LLCC_AENPU, 3, 3072, 1, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_ISLAND1, 12, 512, 7, 1, 0x1, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_ISLAND2, 13, 512, 7, 1, 0x2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_ISLAND3, 14, 512, 7, 1, 0x3, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_ISLAND4, 15, 512, 7, 1, 0x4, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CAMEXP2, 19, 3072, 3, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CAMEXP3, 20, 3072, 3, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {LLCC_CAMEXP4, 21, 3072, 3, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_ISLAND1, 12, 2048, 7, 1, 0x0, 0xF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CAMEXP2, 19, 3072, 3, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CAMEXP3, 20, 3072, 2, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CAMEXP4, 21, 3072, 2, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
};
static const struct llcc_edac_reg_offset llcc_v1_edac_reg_offset = {
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
index 6b6dd80cbc0f..ff8df7d75d6b 100644
--- a/drivers/soc/qcom/ocmem.c
+++ b/drivers/soc/qcom/ocmem.c
@@ -186,23 +186,20 @@ static void update_range(struct ocmem *ocmem, struct ocmem_buf *buf,
struct ocmem *of_get_ocmem(struct device *dev)
{
struct platform_device *pdev;
- struct device_node *devnode;
struct ocmem *ocmem;
- devnode = of_parse_phandle(dev->of_node, "sram", 0);
+ struct device_node *devnode __free(device_node) = of_parse_phandle(dev->of_node,
+ "sram", 0);
if (!devnode || !devnode->parent) {
dev_err(dev, "Cannot look up sram phandle\n");
- of_node_put(devnode);
return ERR_PTR(-ENODEV);
}
pdev = of_find_device_by_node(devnode->parent);
if (!pdev) {
dev_err(dev, "Cannot find device node %s\n", devnode->name);
- of_node_put(devnode);
return ERR_PTR(-EPROBE_DEFER);
}
- of_node_put(devnode);
ocmem = platform_get_drvdata(pdev);
if (!ocmem) {
diff --git a/drivers/soc/qcom/qcom-pbs.c b/drivers/soc/qcom/qcom-pbs.c
index 6af49b5060e5..77a70d3d0d0b 100644
--- a/drivers/soc/qcom/qcom-pbs.c
+++ b/drivers/soc/qcom/qcom-pbs.c
@@ -3,6 +3,7 @@
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -148,11 +149,11 @@ EXPORT_SYMBOL_GPL(qcom_pbs_trigger_event);
*/
struct pbs_dev *get_pbs_client_device(struct device *dev)
{
- struct device_node *pbs_dev_node;
struct platform_device *pdev;
struct pbs_dev *pbs;
- pbs_dev_node = of_parse_phandle(dev->of_node, "qcom,pbs", 0);
+ struct device_node *pbs_dev_node __free(device_node) = of_parse_phandle(dev->of_node,
+ "qcom,pbs", 0);
if (!pbs_dev_node) {
dev_err(dev, "Missing qcom,pbs property\n");
return ERR_PTR(-ENODEV);
@@ -161,28 +162,23 @@ struct pbs_dev *get_pbs_client_device(struct device *dev)
pdev = of_find_device_by_node(pbs_dev_node);
if (!pdev) {
dev_err(dev, "Unable to find PBS dev_node\n");
- pbs = ERR_PTR(-EPROBE_DEFER);
- goto out;
+ return ERR_PTR(-EPROBE_DEFER);
}
pbs = platform_get_drvdata(pdev);
if (!pbs) {
dev_err(dev, "Cannot get pbs instance from %s\n", dev_name(&pdev->dev));
platform_device_put(pdev);
- pbs = ERR_PTR(-EPROBE_DEFER);
- goto out;
+ return ERR_PTR(-EPROBE_DEFER);
}
pbs->link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
if (!pbs->link) {
dev_err(&pdev->dev, "Failed to create device link to consumer %s\n", dev_name(dev));
platform_device_put(pdev);
- pbs = ERR_PTR(-EINVAL);
- goto out;
+ return ERR_PTR(-EINVAL);
}
-out:
- of_node_put(pbs_dev_node);
return pbs;
}
EXPORT_SYMBOL_GPL(get_pbs_client_device);
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index ca2f6b7629ce..60af26667bce 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -394,7 +394,7 @@ static int qmp_cooling_device_add(struct qmp *qmp,
static int qmp_cooling_devices_register(struct qmp *qmp)
{
- struct device_node *np, *child;
+ struct device_node *np;
int count = 0;
int ret;
@@ -407,15 +407,13 @@ static int qmp_cooling_devices_register(struct qmp *qmp)
if (!qmp->cooling_devs)
return -ENOMEM;
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
if (!of_property_present(child, "#cooling-cells"))
continue;
ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
child);
- if (ret) {
- of_node_put(child);
+ if (ret)
goto unroll;
- }
}
if (!count)
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index 2228595a3dc5..c940f4da28ed 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -517,12 +517,25 @@ static const struct qcom_pdm_domain_data *sm8550_domains[] = {
NULL,
};
+static const struct qcom_pdm_domain_data *x1e80100_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_charger_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ NULL,
+};
+
static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
+ { .compatible = "qcom,apq8016", .data = NULL, },
{ .compatible = "qcom,apq8064", .data = NULL, },
{ .compatible = "qcom,apq8074", .data = NULL, },
{ .compatible = "qcom,apq8084", .data = NULL, },
{ .compatible = "qcom,apq8096", .data = msm8996_domains, },
{ .compatible = "qcom,msm8226", .data = NULL, },
+ { .compatible = "qcom,msm8909", .data = NULL, },
+ { .compatible = "qcom,msm8916", .data = NULL, },
+ { .compatible = "qcom,msm8939", .data = NULL, },
{ .compatible = "qcom,msm8974", .data = NULL, },
{ .compatible = "qcom,msm8996", .data = msm8996_domains, },
{ .compatible = "qcom,msm8998", .data = msm8998_domains, },
@@ -539,12 +552,14 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sm4250", .data = sm6115_domains, },
{ .compatible = "qcom,sm6115", .data = sm6115_domains, },
{ .compatible = "qcom,sm6350", .data = sm6350_domains, },
+ { .compatible = "qcom,sm7325", .data = sc7280_domains, },
{ .compatible = "qcom,sm8150", .data = sm8150_domains, },
{ .compatible = "qcom,sm8250", .data = sm8250_domains, },
{ .compatible = "qcom,sm8350", .data = sm8350_domains, },
{ .compatible = "qcom,sm8450", .data = sm8350_domains, },
{ .compatible = "qcom,sm8550", .data = sm8550_domains, },
{ .compatible = "qcom,sm8650", .data = sm8550_domains, },
+ { .compatible = "qcom,x1e80100", .data = x1e80100_domains, },
{},
};
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index b7056aed4c7d..f2b3e02abdf1 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -196,9 +196,6 @@ static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
{
struct qcom_smd_rpm *rpm;
- if (!rpdev->dev.of_node)
- return -EINVAL;
-
rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL);
if (!rpm)
return -ENOMEM;
@@ -218,18 +215,44 @@ static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
of_platform_depopulate(&rpdev->dev);
}
-static const struct rpmsg_device_id qcom_smd_rpm_id_table[] = {
- { .name = "rpm_requests", },
- { /* sentinel */ }
+static const struct of_device_id qcom_smd_rpm_of_match[] = {
+ { .compatible = "qcom,glink-smd-rpm" },
+ { .compatible = "qcom,smd-rpm" },
+ /*
+ * Don't add any more compatibles to the list, two previous entryes
+ * should match all defined devices.
+ */
+ { .compatible = "qcom,rpm-apq8084" },
+ { .compatible = "qcom,rpm-ipq6018" },
+ { .compatible = "qcom,rpm-ipq9574" },
+ { .compatible = "qcom,rpm-msm8226" },
+ { .compatible = "qcom,rpm-msm8909" },
+ { .compatible = "qcom,rpm-msm8916" },
+ { .compatible = "qcom,rpm-msm8936" },
+ { .compatible = "qcom,rpm-msm8953" },
+ { .compatible = "qcom,rpm-msm8974" },
+ { .compatible = "qcom,rpm-msm8976" },
+ { .compatible = "qcom,rpm-msm8994" },
+ { .compatible = "qcom,rpm-msm8996" },
+ { .compatible = "qcom,rpm-msm8998" },
+ { .compatible = "qcom,rpm-sdm660" },
+ { .compatible = "qcom,rpm-sm6115" },
+ { .compatible = "qcom,rpm-sm6125" },
+ { .compatible = "qcom,rpm-sm6375" },
+ { .compatible = "qcom,rpm-qcm2290" },
+ { .compatible = "qcom,rpm-qcs404" },
+ {}
};
-MODULE_DEVICE_TABLE(rpmsg, qcom_smd_rpm_id_table);
+MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
static struct rpmsg_driver qcom_smd_rpm_driver = {
.probe = qcom_smd_rpm_probe,
.remove = qcom_smd_rpm_remove,
.callback = qcom_smd_rpm_callback,
- .id_table = qcom_smd_rpm_id_table,
- .drv.name = "qcom_smd_rpm",
+ .drv = {
+ .name = "qcom_smd_rpm",
+ .of_match_table = qcom_smd_rpm_of_match,
+ },
};
static int __init qcom_smd_rpm_init(void)
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 696c2a8387d0..cefcbd61c628 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -161,6 +161,9 @@ struct qcom_smp2p {
struct list_head outbound;
};
+#define CREATE_TRACE_POINTS
+#include "trace-smp2p.h"
+
static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
{
/* Make sure any updated data is written before the kick */
@@ -192,6 +195,7 @@ static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p)
struct smp2p_smem_item *out = smp2p->out;
u32 val;
+ trace_smp2p_ssr_ack(smp2p->dev);
smp2p->ssr_ack = !smp2p->ssr_ack;
val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
@@ -214,6 +218,7 @@ static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p)
smp2p->ssr_ack_enabled = true;
smp2p->negotiation_done = true;
+ trace_smp2p_negotiate(smp2p->dev, out->features);
}
}
@@ -252,6 +257,8 @@ static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
status = val ^ entry->last_value;
entry->last_value = val;
+ trace_smp2p_notify_in(entry, status, val);
+
/* No changes of this entry? */
if (!status)
continue;
@@ -415,6 +422,8 @@ static int smp2p_update_bits(void *data, u32 mask, u32 value)
writel(val, entry->value);
spin_unlock_irqrestore(&entry->lock, flags);
+ trace_smp2p_update_bits(entry, orig, val);
+
if (val != orig)
qcom_smp2p_kick(entry->smp2p);
@@ -530,7 +539,6 @@ static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
static int qcom_smp2p_probe(struct platform_device *pdev)
{
struct smp2p_entry *entry;
- struct device_node *node;
struct qcom_smp2p *smp2p;
const char *key;
int irq;
@@ -584,11 +592,10 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
if (ret < 0)
goto release_mbox;
- for_each_available_child_of_node(pdev->dev.of_node, node) {
+ for_each_available_child_of_node_scoped(pdev->dev.of_node, node) {
entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
if (!entry) {
ret = -ENOMEM;
- of_node_put(node);
goto unwind_interfaces;
}
@@ -596,25 +603,19 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
spin_lock_init(&entry->lock);
ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
goto unwind_interfaces;
- }
if (of_property_read_bool(node, "interrupt-controller")) {
ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
goto unwind_interfaces;
- }
list_add(&entry->node, &smp2p->inbound);
} else {
ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
goto unwind_interfaces;
- }
list_add(&entry->node, &smp2p->outbound);
}
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index d7359a235e3c..24c3971f2ef1 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -401,11 +401,13 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(SA8540P) },
{ qcom_board_id(QCM4290) },
{ qcom_board_id(QCS4290) },
+ { qcom_board_id(SM7325) },
{ qcom_board_id_named(SM8450_2, "SM8450") },
{ qcom_board_id_named(SM8450_3, "SM8450") },
{ qcom_board_id(SC7280) },
{ qcom_board_id(SC7180P) },
{ qcom_board_id(QCM6490) },
+ { qcom_board_id(SM7325P) },
{ qcom_board_id(IPQ5000) },
{ qcom_board_id(IPQ0509) },
{ qcom_board_id(IPQ0518) },
@@ -441,6 +443,8 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(QCM8550) },
{ qcom_board_id(IPQ5300) },
{ qcom_board_id(IPQ5321) },
+ { qcom_board_id(QCS8300) },
+ { qcom_board_id(QCS8275) },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
diff --git a/drivers/soc/qcom/trace-smp2p.h b/drivers/soc/qcom/trace-smp2p.h
new file mode 100644
index 000000000000..9a6392043f10
--- /dev/null
+++ b/drivers/soc/qcom/trace-smp2p.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM qcom_smp2p
+
+#if !defined(__QCOM_SMP2P_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __QCOM_SMP2P_TRACE_H__
+
+#include <linux/device.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(smp2p_ssr_ack,
+ TP_PROTO(const struct device *dev),
+ TP_ARGS(dev),
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(dev))
+ ),
+ TP_fast_assign(
+ __assign_str(dev_name);
+ ),
+ TP_printk("%s: SSR detected", __get_str(dev_name))
+);
+
+TRACE_EVENT(smp2p_negotiate,
+ TP_PROTO(const struct device *dev, unsigned int features),
+ TP_ARGS(dev, features),
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(dev))
+ __field(u32, out_features)
+ ),
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __entry->out_features = features;
+ ),
+ TP_printk("%s: state=open out_features=%s", __get_str(dev_name),
+ __print_flags(__entry->out_features, "|",
+ {SMP2P_FEATURE_SSR_ACK, "SMP2P_FEATURE_SSR_ACK"})
+ )
+);
+
+TRACE_EVENT(smp2p_notify_in,
+ TP_PROTO(struct smp2p_entry *smp2p_entry, unsigned long status, u32 val),
+ TP_ARGS(smp2p_entry, status, val),
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(smp2p_entry->smp2p->dev))
+ __string(client_name, smp2p_entry->name)
+ __field(unsigned long, status)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __assign_str(client_name);
+ __entry->status = status;
+ __entry->val = val;
+ ),
+ TP_printk("%s: %s: status:0x%0lx val:0x%0x",
+ __get_str(dev_name),
+ __get_str(client_name),
+ __entry->status,
+ __entry->val
+ )
+);
+
+TRACE_EVENT(smp2p_update_bits,
+ TP_PROTO(struct smp2p_entry *smp2p_entry, u32 orig, u32 val),
+ TP_ARGS(smp2p_entry, orig, val),
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(smp2p_entry->smp2p->dev))
+ __string(client_name, smp2p_entry->name)
+ __field(u32, orig)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __assign_str(client_name);
+ __entry->orig = orig;
+ __entry->val = val;
+ ),
+ TP_printk("%s: %s: orig:0x%0x new:0x%0x",
+ __get_str(dev_name),
+ __get_str(client_name),
+ __entry->orig,
+ __entry->val
+ )
+);
+
+#endif /* __QCOM_SMP2P_TRACE_H__ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace-smp2p
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/qcom/trace_icc-bwmon.h b/drivers/soc/qcom/trace_icc-bwmon.h
new file mode 100644
index 000000000000..beb8e6b485a9
--- /dev/null
+++ b/drivers/soc/qcom/trace_icc-bwmon.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM icc_bwmon
+
+#if !defined(_TRACE_ICC_BWMON_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ICC_BWMON_H
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(qcom_bwmon_update,
+ TP_PROTO(const char *name,
+ unsigned int meas_kbps, unsigned int up_kbps, unsigned int down_kbps),
+
+ TP_ARGS(name, meas_kbps, up_kbps, down_kbps),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(unsigned int, meas_kbps)
+ __field(unsigned int, up_kbps)
+ __field(unsigned int, down_kbps)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->meas_kbps = meas_kbps;
+ __entry->up_kbps = up_kbps;
+ __entry->down_kbps = down_kbps;
+ ),
+
+ TP_printk("name=%s meas_kbps=%u up_kbps=%u down_kbps=%u",
+ __get_str(name),
+ __entry->meas_kbps,
+ __entry->up_kbps,
+ __entry->down_kbps)
+);
+
+#endif /* _TRACE_ICC_BWMON_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/soc/qcom/
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace_icc-bwmon
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 5fd62046b28a..1eab4bb0eacf 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -41,9 +41,11 @@ static const struct rockchip_grf_info rk3036_grf __initconst = {
};
#define RK3128_GRF_SOC_CON0 0x140
+#define RK3128_GRF_SOC_CON1 0x144
static const struct rockchip_grf_value rk3128_defaults[] __initconst = {
{ "jtag switching", RK3128_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 8) },
+ { "vpu main clock", RK3128_GRF_SOC_CON1, HIWORD_UPDATE(0, 1, 10) },
};
static const struct rockchip_grf_info rk3128_grf __initconst = {
@@ -121,6 +123,29 @@ static const struct rockchip_grf_info rk3566_pipegrf __initconst = {
.num_values = ARRAY_SIZE(rk3566_defaults),
};
+#define RK3576_SYSGRF_SOC_CON1 0x0004
+
+static const struct rockchip_grf_value rk3576_defaults_sys_grf[] __initconst = {
+ { "i3c0 weakpull", RK3576_SYSGRF_SOC_CON1, HIWORD_UPDATE(3, 3, 6) },
+ { "i3c1 weakpull", RK3576_SYSGRF_SOC_CON1, HIWORD_UPDATE(3, 3, 8) },
+};
+
+static const struct rockchip_grf_info rk3576_sysgrf __initconst = {
+ .values = rk3576_defaults_sys_grf,
+ .num_values = ARRAY_SIZE(rk3576_defaults_sys_grf),
+};
+
+#define RK3576_IOCGRF_MISC_CON 0x04F0
+
+static const struct rockchip_grf_value rk3576_defaults_ioc_grf[] __initconst = {
+ { "jtag switching", RK3576_IOCGRF_MISC_CON, HIWORD_UPDATE(0, 1, 1) },
+};
+
+static const struct rockchip_grf_info rk3576_iocgrf __initconst = {
+ .values = rk3576_defaults_ioc_grf,
+ .num_values = ARRAY_SIZE(rk3576_defaults_ioc_grf),
+};
+
#define RK3588_GRF_SOC_CON6 0x0318
static const struct rockchip_grf_value rk3588_defaults[] __initconst = {
@@ -132,7 +157,6 @@ static const struct rockchip_grf_info rk3588_sysgrf __initconst = {
.num_values = ARRAY_SIZE(rk3588_defaults),
};
-
static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
{
.compatible = "rockchip,rk3036-grf",
@@ -159,6 +183,12 @@ static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
.compatible = "rockchip,rk3566-pipe-grf",
.data = (void *)&rk3566_pipegrf,
}, {
+ .compatible = "rockchip,rk3576-sys-grf",
+ .data = (void *)&rk3576_sysgrf,
+ }, {
+ .compatible = "rockchip,rk3576-ioc-grf",
+ .data = (void *)&rk3576_iocgrf,
+ }, {
.compatible = "rockchip,rk3588-sys-grf",
.data = (void *)&rk3588_sysgrf,
},
diff --git a/drivers/soc/rockchip/io-domain.c b/drivers/soc/rockchip/io-domain.c
index 18f809c160a7..fd9fd31f71c2 100644
--- a/drivers/soc/rockchip/io-domain.c
+++ b/drivers/soc/rockchip/io-domain.c
@@ -39,6 +39,10 @@
#define RK3288_SOC_CON2_FLASH0 BIT(7)
#define RK3288_SOC_FLASH_SUPPLY_NUM 2
+#define RK3308_SOC_CON0 0x300
+#define RK3308_SOC_CON0_VCCIO3 BIT(8)
+#define RK3308_SOC_VCCIO3_SUPPLY_NUM 3
+
#define RK3328_SOC_CON4 0x410
#define RK3328_SOC_CON4_VCCIO2 BIT(7)
#define RK3328_SOC_VCCIO2_SUPPLY_NUM 1
@@ -229,6 +233,25 @@ static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
}
+static void rk3308_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no vccio3 supply we should leave things alone */
+ if (!iod->supplies[RK3308_SOC_VCCIO3_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set vccio3 iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = RK3308_SOC_CON0_VCCIO3 | (RK3308_SOC_CON0_VCCIO3 << 16);
+ ret = regmap_write(iod->grf, RK3308_SOC_CON0, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update vccio3 vsel ctrl\n");
+}
+
static void rk3328_iodomain_init(struct rockchip_iodomain *iod)
{
int ret;
@@ -376,6 +399,19 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3288 = {
.init = rk3288_iodomain_init,
};
+static const struct rockchip_iodomain_soc_data soc_data_rk3308 = {
+ .grf_offset = 0x300,
+ .supply_names = {
+ "vccio0",
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio4",
+ "vccio5",
+ },
+ .init = rk3308_iodomain_init,
+};
+
static const struct rockchip_iodomain_soc_data soc_data_rk3328 = {
.grf_offset = 0x410,
.supply_names = {
@@ -529,6 +565,10 @@ static const struct of_device_id rockchip_iodomain_match[] = {
.data = &soc_data_rk3288
},
{
+ .compatible = "rockchip,rk3308-io-voltage-domain",
+ .data = &soc_data_rk3308
+ },
+ {
.compatible = "rockchip,rk3328-io-voltage-domain",
.data = &soc_data_rk3328
},
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 6c37d6eb8b49..a08c377933c5 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -1438,7 +1438,7 @@ static int tegra_powergate_init(struct tegra_pmc *pmc,
struct device_node *parent)
{
struct of_phandle_args child_args, parent_args;
- struct device_node *np, *child;
+ struct device_node *np;
int err = 0;
/*
@@ -1457,12 +1457,10 @@ static int tegra_powergate_init(struct tegra_pmc *pmc,
if (!np)
return 0;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
err = tegra_powergate_add(pmc, child);
- if (err < 0) {
- of_node_put(child);
+ if (err < 0)
break;
- }
if (of_parse_phandle_with_args(child, "power-domains",
"#power-domain-cells",
@@ -1474,10 +1472,8 @@ static int tegra_powergate_init(struct tegra_pmc *pmc,
err = of_genpd_add_subdomain(&parent_args, &child_args);
of_node_put(parent_args.np);
- if (err) {
- of_node_put(child);
+ if (err)
break;
- }
}
of_node_put(np);
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index fd4251d75935..8c0102968351 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -161,7 +161,7 @@ struct k3_ring {
struct k3_ringacc_proxy_target_regs __iomem *proxy;
dma_addr_t ring_mem_dma;
void *ring_mem_virt;
- struct k3_ring_ops *ops;
+ const struct k3_ring_ops *ops;
u32 size;
enum k3_ring_size elm_size;
enum k3_ring_mode mode;
@@ -268,17 +268,17 @@ static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem);
static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem);
-static struct k3_ring_ops k3_ring_mode_ring_ops = {
+static const struct k3_ring_ops k3_ring_mode_ring_ops = {
.push_tail = k3_ringacc_ring_push_mem,
.pop_head = k3_ringacc_ring_pop_mem,
};
-static struct k3_ring_ops k3_dmaring_fwd_ops = {
+static const struct k3_ring_ops k3_dmaring_fwd_ops = {
.push_tail = k3_ringacc_ring_push_mem,
.pop_head = k3_dmaring_fwd_pop,
};
-static struct k3_ring_ops k3_dmaring_reverse_ops = {
+static const struct k3_ring_ops k3_dmaring_reverse_ops = {
/* Reverse side of the DMA ring can only be popped by SW */
.pop_head = k3_dmaring_reverse_pop,
};
@@ -288,7 +288,7 @@ static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
-static struct k3_ring_ops k3_ring_mode_msg_ops = {
+static const struct k3_ring_ops k3_ring_mode_msg_ops = {
.push_tail = k3_ringacc_ring_push_io,
.push_head = k3_ringacc_ring_push_head_io,
.pop_tail = k3_ringacc_ring_pop_tail_io,
@@ -300,7 +300,7 @@ static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
-static struct k3_ring_ops k3_ring_mode_proxy_ops = {
+static const struct k3_ring_ops k3_ring_mode_proxy_ops = {
.push_tail = k3_ringacc_ring_push_tail_proxy,
.push_head = k3_ringacc_ring_push_head_proxy,
.pop_tail = k3_ringacc_ring_pop_tail_proxy,
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 6023006685fc..fb0746d8caad 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -602,7 +602,7 @@ static int dma_init(struct device_node *cloud, struct device_node *dma_node)
unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched;
struct device_node *node = dma_node;
struct knav_dma_device *dma;
- int ret, len, num_chan = 0;
+ int ret, num_chan = 0;
resource_size_t size;
u32 timeout;
u32 i;
@@ -615,25 +615,13 @@ static int dma_init(struct device_node *cloud, struct device_node *dma_node)
INIT_LIST_HEAD(&dma->list);
INIT_LIST_HEAD(&dma->chan_list);
- if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) {
- dev_err(kdev->dev, "unspecified navigator cloud addresses\n");
- return -ENODEV;
- }
-
- dma->logical_queue_managers = len / sizeof(u32);
- if (dma->logical_queue_managers > DMA_MAX_QMS) {
- dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n",
- dma->logical_queue_managers);
- dma->logical_queue_managers = DMA_MAX_QMS;
- }
-
- ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address",
- dma->qm_base_address,
- dma->logical_queue_managers);
- if (ret) {
+ ret = of_property_read_variable_u32_array(cloud, "ti,navigator-cloud-address",
+ dma->qm_base_address, 1, DMA_MAX_QMS);
+ if (ret < 0) {
dev_err(kdev->dev, "invalid navigator cloud addresses\n");
return -ENODEV;
}
+ dma->logical_queue_managers = ret;
dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
if (IS_ERR(dma->reg_global))
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index f2055a76f84c..6c98738e548a 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1076,14 +1076,20 @@ static const char *knav_queue_find_name(struct device_node *node)
}
static int knav_queue_setup_regions(struct knav_device *kdev,
- struct device_node *regions)
+ struct device_node *node)
{
struct device *dev = kdev->dev;
+ struct device_node *regions __free(device_node) =
+ of_get_child_by_name(node, "descriptor-regions");
struct knav_region *region;
struct device_node *child;
u32 temp[2];
int ret;
+ if (!regions)
+ return dev_err_probe(dev, -ENODEV,
+ "descriptor-regions not specified\n");
+
for_each_child_of_node(regions, child) {
region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
if (!region) {
@@ -1104,11 +1110,6 @@ static int knav_queue_setup_regions(struct knav_device *kdev,
continue;
}
- if (!of_get_property(child, "link-index", NULL)) {
- dev_err(dev, "No link info for %s\n", region->name);
- devm_kfree(dev, region);
- continue;
- }
ret = of_property_read_u32(child, "link-index",
&region->link_index);
if (ret) {
@@ -1121,10 +1122,9 @@ static int knav_queue_setup_regions(struct knav_device *kdev,
INIT_LIST_HEAD(&region->pools);
list_add_tail(&region->list, &kdev->regions);
}
- if (list_empty(&kdev->regions)) {
- dev_err(dev, "no valid region information found\n");
- return -ENODEV;
- }
+ if (list_empty(&kdev->regions))
+ return dev_err_probe(dev, -ENODEV,
+ "no valid region information found\n");
/* Next, we run through the regions and set things up */
for_each_region(kdev, region)
@@ -1306,10 +1306,16 @@ static int knav_setup_queue_range(struct knav_device *kdev,
}
static int knav_setup_queue_pools(struct knav_device *kdev,
- struct device_node *queue_pools)
+ struct device_node *node)
{
+ struct device_node *queue_pools __free(device_node) =
+ of_get_child_by_name(node, "queue-pools");
struct device_node *type, *range;
+ if (!queue_pools)
+ return dev_err_probe(kdev->dev, -ENODEV,
+ "queue-pools not specified\n");
+
for_each_child_of_node(queue_pools, type) {
for_each_child_of_node(type, range) {
/* return value ignored, we init the rest... */
@@ -1318,10 +1324,9 @@ static int knav_setup_queue_pools(struct knav_device *kdev,
}
/* ... and barf if they all failed! */
- if (list_empty(&kdev->queue_ranges)) {
- dev_err(kdev->dev, "no valid queue range found\n");
- return -ENODEV;
- }
+ if (list_empty(&kdev->queue_ranges))
+ return dev_err_probe(kdev->dev, -ENODEV,
+ "no valid queue range found\n");
return 0;
}
@@ -1389,14 +1394,20 @@ static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
}
static int knav_queue_init_qmgrs(struct knav_device *kdev,
- struct device_node *qmgrs)
+ struct device_node *node)
{
struct device *dev = kdev->dev;
+ struct device_node *qmgrs __free(device_node) =
+ of_get_child_by_name(node, "qmgrs");
struct knav_qmgr_info *qmgr;
struct device_node *child;
u32 temp[2];
int ret;
+ if (!qmgrs)
+ return dev_err_probe(dev, -ENODEV,
+ "queue manager info not specified\n");
+
for_each_child_of_node(qmgrs, child) {
qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
if (!qmgr) {
@@ -1668,6 +1679,26 @@ static int knav_queue_start_pdsps(struct knav_device *kdev)
return 0;
}
+static int knav_queue_setup_pdsps(struct knav_device *kdev,
+ struct device_node *node)
+{
+ struct device_node *pdsps __free(device_node) =
+ of_get_child_by_name(node, "pdsps");
+
+ if (pdsps) {
+ int ret;
+
+ ret = knav_queue_init_pdsps(kdev, pdsps);
+ if (ret)
+ return ret;
+
+ ret = knav_queue_start_pdsps(kdev);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
{
struct knav_qmgr_info *qmgr;
@@ -1755,7 +1786,6 @@ MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
static int knav_queue_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
- struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
struct device *dev = &pdev->dev;
u32 temp[2];
int ret;
@@ -1799,39 +1829,17 @@ static int knav_queue_probe(struct platform_device *pdev)
kdev->num_queues = temp[1];
/* Initialize queue managers using device tree configuration */
- qmgrs = of_get_child_by_name(node, "qmgrs");
- if (!qmgrs) {
- dev_err(dev, "queue manager info not specified\n");
- ret = -ENODEV;
- goto err;
- }
- ret = knav_queue_init_qmgrs(kdev, qmgrs);
- of_node_put(qmgrs);
+ ret = knav_queue_init_qmgrs(kdev, node);
if (ret)
goto err;
/* get pdsp configuration values from device tree */
- pdsps = of_get_child_by_name(node, "pdsps");
- if (pdsps) {
- ret = knav_queue_init_pdsps(kdev, pdsps);
- if (ret)
- goto err;
-
- ret = knav_queue_start_pdsps(kdev);
- if (ret)
- goto err;
- }
- of_node_put(pdsps);
+ ret = knav_queue_setup_pdsps(kdev, node);
+ if (ret)
+ goto err;
/* get usable queue range values from device tree */
- queue_pools = of_get_child_by_name(node, "queue-pools");
- if (!queue_pools) {
- dev_err(dev, "queue-pools not specified\n");
- ret = -ENODEV;
- goto err;
- }
- ret = knav_setup_queue_pools(kdev, queue_pools);
- of_node_put(queue_pools);
+ ret = knav_setup_queue_pools(kdev, node);
if (ret)
goto err;
@@ -1853,14 +1861,7 @@ static int knav_queue_probe(struct platform_device *pdev)
if (ret)
goto err;
- regions = of_get_child_by_name(node, "descriptor-regions");
- if (!regions) {
- dev_err(dev, "descriptor-regions not specified\n");
- ret = -ENODEV;
- goto err;
- }
- ret = knav_queue_setup_regions(kdev, regions);
- of_node_put(regions);
+ ret = knav_queue_setup_regions(kdev, node);
if (ret)
goto err;
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index 3a56bbf3268a..8169885ab1e0 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -383,54 +383,44 @@ static void am33xx_pm_free_sram(void)
*/
static int am33xx_pm_alloc_sram(void)
{
- struct device_node *np;
- int ret = 0;
+ struct device_node *np __free(device_node) =
+ of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");
- np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");
if (!np) {
np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
- if (!np) {
- dev_err(pm33xx_dev, "PM: %s: Unable to find device node for mpu\n",
- __func__);
- return -ENODEV;
- }
+ if (!np)
+ return dev_err_probe(pm33xx_dev, -ENODEV,
+ "PM: %s: Unable to find device node for mpu\n",
+ __func__);
}
sram_pool = of_gen_pool_get(np, "pm-sram", 0);
- if (!sram_pool) {
- dev_err(pm33xx_dev, "PM: %s: Unable to get sram pool for ocmcram\n",
- __func__);
- ret = -ENODEV;
- goto mpu_put_node;
- }
+ if (!sram_pool)
+ return dev_err_probe(pm33xx_dev, -ENODEV,
+ "PM: %s: Unable to get sram pool for ocmcram\n",
+ __func__);
sram_pool_data = of_gen_pool_get(np, "pm-sram", 1);
- if (!sram_pool_data) {
- dev_err(pm33xx_dev, "PM: %s: Unable to get sram data pool for ocmcram\n",
- __func__);
- ret = -ENODEV;
- goto mpu_put_node;
- }
+ if (!sram_pool_data)
+ return dev_err_probe(pm33xx_dev, -ENODEV,
+ "PM: %s: Unable to get sram data pool for ocmcram\n",
+ __func__);
ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz);
- if (!ocmcram_location) {
- dev_err(pm33xx_dev, "PM: %s: Unable to allocate memory from ocmcram\n",
- __func__);
- ret = -ENOMEM;
- goto mpu_put_node;
- }
+ if (!ocmcram_location)
+ return dev_err_probe(pm33xx_dev, -ENOMEM,
+ "PM: %s: Unable to allocate memory from ocmcram\n",
+ __func__);
ocmcram_location_data = gen_pool_alloc(sram_pool_data,
sizeof(struct emif_regs_amx3));
if (!ocmcram_location_data) {
- dev_err(pm33xx_dev, "PM: Unable to allocate memory from ocmcram\n");
gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
- ret = -ENOMEM;
+ return dev_err_probe(pm33xx_dev, -ENOMEM,
+ "PM: Unable to allocate memory from ocmcram\n");
}
-mpu_put_node:
- of_node_put(np);
- return ret;
+ return 0;
}
static int am33xx_pm_rtc_setup(void)
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index 24a42e0b645c..3ec758f50e24 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -380,39 +380,81 @@ put_clk_mux_np:
static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
{
- const struct pruss_private_data *data;
- struct device_node *clks_np;
struct device *dev = pruss->dev;
- int ret = 0;
-
- data = of_device_get_match_data(dev);
+ struct device_node *clks_np __free(device_node) =
+ of_get_child_by_name(cfg_node, "clocks");
+ const struct pruss_private_data *data = of_device_get_match_data(dev);
+ int ret;
- clks_np = of_get_child_by_name(cfg_node, "clocks");
- if (!clks_np) {
- dev_err(dev, "%pOF is missing its 'clocks' node\n", cfg_node);
- return -ENODEV;
- }
+ if (!clks_np)
+ return dev_err_probe(dev, -ENODEV,
+ "%pOF is missing its 'clocks' node\n",
+ cfg_node);
if (data && data->has_core_mux_clock) {
ret = pruss_clk_mux_setup(pruss, pruss->core_clk_mux,
"coreclk-mux", clks_np);
- if (ret) {
- dev_err(dev, "failed to setup coreclk-mux\n");
- goto put_clks_node;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to setup coreclk-mux\n");
}
ret = pruss_clk_mux_setup(pruss, pruss->iep_clk_mux, "iepclk-mux",
clks_np);
- if (ret) {
- dev_err(dev, "failed to setup iepclk-mux\n");
- goto put_clks_node;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to setup iepclk-mux\n");
-put_clks_node:
- of_node_put(clks_np);
+ return 0;
+}
- return ret;
+static int pruss_of_setup_memories(struct device *dev, struct pruss *pruss)
+{
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *child __free(device_node) =
+ of_get_child_by_name(np, "memories");
+ const struct pruss_private_data *data = of_device_get_match_data(dev);
+ const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
+ int i;
+
+ if (!child)
+ return dev_err_probe(dev, -ENODEV,
+ "%pOF is missing its 'memories' node\n",
+ child);
+
+ for (i = 0; i < PRUSS_MEM_MAX; i++) {
+ struct resource res;
+ int index;
+
+ /*
+ * On AM437x one of two PRUSS units don't contain Shared RAM,
+ * skip it
+ */
+ if (data && data->has_no_sharedram && i == PRUSS_MEM_SHRD_RAM2)
+ continue;
+
+ index = of_property_match_string(child, "reg-names",
+ mem_names[i]);
+ if (index < 0)
+ return index;
+
+ if (of_address_to_resource(child, index, &res))
+ return -EINVAL;
+
+ pruss->mem_regions[i].va = devm_ioremap(dev, res.start,
+ resource_size(&res));
+ if (!pruss->mem_regions[i].va)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to parse and map memory resource %d %s\n",
+ i, mem_names[i]);
+ pruss->mem_regions[i].pa = res.start;
+ pruss->mem_regions[i].size = resource_size(&res);
+
+ dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
+ mem_names[i], &pruss->mem_regions[i].pa,
+ pruss->mem_regions[i].size, pruss->mem_regions[i].va);
+ }
+
+ return 0;
}
static struct regmap_config regmap_conf = {
@@ -424,26 +466,21 @@ static struct regmap_config regmap_conf = {
static int pruss_cfg_of_init(struct device *dev, struct pruss *pruss)
{
struct device_node *np = dev_of_node(dev);
- struct device_node *child;
+ struct device_node *child __free(device_node) =
+ of_get_child_by_name(np, "cfg");
struct resource res;
int ret;
- child = of_get_child_by_name(np, "cfg");
- if (!child) {
- dev_err(dev, "%pOF is missing its 'cfg' node\n", child);
- return -ENODEV;
- }
+ if (!child)
+ return dev_err_probe(dev, -ENODEV,
+ "%pOF is missing its 'cfg' node\n", child);
- if (of_address_to_resource(child, 0, &res)) {
- ret = -ENOMEM;
- goto node_put;
- }
+ if (of_address_to_resource(child, 0, &res))
+ return -ENOMEM;
pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res));
- if (!pruss->cfg_base) {
- ret = -ENOMEM;
- goto node_put;
- }
+ if (!pruss->cfg_base)
+ return -ENOMEM;
regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child,
(u64)res.start);
@@ -452,34 +489,22 @@ static int pruss_cfg_of_init(struct device *dev, struct pruss *pruss)
pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base,
&regmap_conf);
kfree(regmap_conf.name);
- if (IS_ERR(pruss->cfg_regmap)) {
- dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n",
- PTR_ERR(pruss->cfg_regmap));
- ret = PTR_ERR(pruss->cfg_regmap);
- goto node_put;
- }
+ if (IS_ERR(pruss->cfg_regmap))
+ return dev_err_probe(dev, PTR_ERR(pruss->cfg_regmap),
+ "regmap_init_mmio failed for cfg\n");
ret = pruss_clk_init(pruss, child);
if (ret)
- dev_err(dev, "pruss_clk_init failed, ret = %d\n", ret);
+ return dev_err_probe(dev, ret, "pruss_clk_init failed\n");
-node_put:
- of_node_put(child);
- return ret;
+ return 0;
}
static int pruss_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev_of_node(dev);
- struct device_node *child;
struct pruss *pruss;
- struct resource res;
- int ret, i, index;
- const struct pruss_private_data *data;
- const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
-
- data = of_device_get_match_data(&pdev->dev);
+ int ret;
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret) {
@@ -494,48 +519,9 @@ static int pruss_probe(struct platform_device *pdev)
pruss->dev = dev;
mutex_init(&pruss->lock);
- child = of_get_child_by_name(np, "memories");
- if (!child) {
- dev_err(dev, "%pOF is missing its 'memories' node\n", child);
- return -ENODEV;
- }
-
- for (i = 0; i < PRUSS_MEM_MAX; i++) {
- /*
- * On AM437x one of two PRUSS units don't contain Shared RAM,
- * skip it
- */
- if (data && data->has_no_sharedram && i == PRUSS_MEM_SHRD_RAM2)
- continue;
-
- index = of_property_match_string(child, "reg-names",
- mem_names[i]);
- if (index < 0) {
- of_node_put(child);
- return index;
- }
-
- if (of_address_to_resource(child, index, &res)) {
- of_node_put(child);
- return -EINVAL;
- }
-
- pruss->mem_regions[i].va = devm_ioremap(dev, res.start,
- resource_size(&res));
- if (!pruss->mem_regions[i].va) {
- dev_err(dev, "failed to parse and map memory resource %d %s\n",
- i, mem_names[i]);
- of_node_put(child);
- return -ENOMEM;
- }
- pruss->mem_regions[i].pa = res.start;
- pruss->mem_regions[i].size = resource_size(&res);
-
- dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
- mem_names[i], &pruss->mem_regions[i].pa,
- pruss->mem_regions[i].size, pruss->mem_regions[i].va);
- }
- of_node_put(child);
+ ret = pruss_of_setup_memories(dev, pruss);
+ if (ret < 0)
+ return ret;
platform_set_drvdata(pdev, pruss);
diff --git a/drivers/soc/versatile/Kconfig b/drivers/soc/versatile/Kconfig
index c3792c0a84ac..7bbf54a8d879 100644
--- a/drivers/soc/versatile/Kconfig
+++ b/drivers/soc/versatile/Kconfig
@@ -4,7 +4,7 @@
#
config SOC_INTEGRATOR_CM
bool "SoC bus device for the ARM Integrator platform core modules"
- depends on ARCH_INTEGRATOR
+ depends on ARCH_INTEGRATOR || COMPILE_TEST
select SOC_BUS
help
Include support for the SoC bus on the ARM Integrator platform
@@ -13,7 +13,7 @@ config SOC_INTEGRATOR_CM
config SOC_REALVIEW
bool "SoC bus device for the ARM RealView platforms"
- depends on ARCH_REALVIEW
+ depends on ARCH_REALVIEW || COMPILE_TEST
select SOC_BUS
help
Include support for the SoC bus on the ARM RealView platforms
diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c
index bab4ad87aa75..d5099a3386b4 100644
--- a/drivers/soc/versatile/soc-integrator.c
+++ b/drivers/soc/versatile/soc-integrator.c
@@ -113,6 +113,7 @@ static int __init integrator_soc_init(void)
return -ENODEV;
syscon_regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(syscon_regmap))
return PTR_ERR(syscon_regmap);
diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c
index c6876d232d8f..cf91abe07d38 100644
--- a/drivers/soc/versatile/soc-realview.c
+++ b/drivers/soc/versatile/soc-realview.c
@@ -4,6 +4,7 @@
*
* Author: Linus Walleij <linus.walleij@linaro.org>
*/
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -81,6 +82,13 @@ static struct attribute *realview_attrs[] = {
ATTRIBUTE_GROUPS(realview);
+static void realview_soc_socdev_release(void *data)
+{
+ struct soc_device *soc_dev = data;
+
+ soc_device_unregister(soc_dev);
+}
+
static int realview_soc_probe(struct platform_device *pdev)
{
struct regmap *syscon_regmap;
@@ -93,7 +101,7 @@ static int realview_soc_probe(struct platform_device *pdev)
if (IS_ERR(syscon_regmap))
return PTR_ERR(syscon_regmap);
- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
@@ -106,10 +114,14 @@ static int realview_soc_probe(struct platform_device *pdev)
soc_dev_attr->family = "Versatile";
soc_dev_attr->custom_attr_group = realview_groups[0];
soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev)) {
- kfree(soc_dev_attr);
+ if (IS_ERR(soc_dev))
return -ENODEV;
- }
+
+ ret = devm_add_action_or_reset(&pdev->dev, realview_soc_socdev_release,
+ soc_dev);
+ if (ret)
+ return ret;
+
ret = regmap_read(syscon_regmap, REALVIEW_SYS_ID_OFFSET,
&realview_coreid);
if (ret)
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index d928258c6761..77dc094075e1 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -83,7 +83,6 @@ static int sdw_drv_probe(struct device *dev)
struct sdw_slave *slave = dev_to_sdw_dev(dev);
struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
const struct sdw_device_id *id;
- const char *name;
int ret;
/*
@@ -108,11 +107,6 @@ static int sdw_drv_probe(struct device *dev)
ret = drv->probe(slave, id);
if (ret) {
- name = drv->name;
- if (!name)
- name = drv->driver.name;
-
- dev_err(dev, "Probe of %s failed: %d\n", name, ret);
dev_pm_domain_detach(dev, false);
return ret;
}
@@ -129,7 +123,7 @@ static int sdw_drv_probe(struct device *dev)
/* init the dynamic sysfs attributes we need */
ret = sdw_slave_sysfs_dpn_init(slave);
if (ret < 0)
- dev_warn(dev, "Slave sysfs init failed:%d\n", ret);
+ dev_warn(dev, "failed to initialise sysfs: %d\n", ret);
/*
* Check for valid clk_stop_timeout, use DisCo worst case value of
@@ -153,7 +147,7 @@ static int sdw_drv_probe(struct device *dev)
if (drv->ops && drv->ops->update_status) {
ret = drv->ops->update_status(slave, slave->status);
if (ret < 0)
- dev_warn(dev, "%s: update_status failed with status %d\n", __func__, ret);
+ dev_warn(dev, "failed to update status at probe: %d\n", ret);
}
mutex_unlock(&slave->sdw_dev_lock);
@@ -204,16 +198,11 @@ static void sdw_drv_shutdown(struct device *dev)
*/
int __sdw_register_driver(struct sdw_driver *drv, struct module *owner)
{
- const char *name;
-
drv->driver.bus = &sdw_bus_type;
if (!drv->probe) {
- name = drv->name;
- if (!name)
- name = drv->driver.name;
-
- pr_err("driver %s didn't provide SDW probe routine\n", name);
+ pr_err("driver %s didn't provide SDW probe routine\n",
+ drv->driver.name);
return -EINVAL;
}
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index e0683a5975d1..05652e983539 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -890,8 +890,14 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
}
}
- if (is_slave)
- return sdw_handle_slave_status(&cdns->bus, status);
+ if (is_slave) {
+ int ret;
+
+ mutex_lock(&cdns->status_update_lock);
+ ret = sdw_handle_slave_status(&cdns->bus, status);
+ mutex_unlock(&cdns->status_update_lock);
+ return ret;
+ }
return 0;
}
@@ -988,6 +994,31 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
}
EXPORT_SYMBOL(sdw_cdns_irq);
+static void cdns_check_attached_status_dwork(struct work_struct *work)
+{
+ struct sdw_cdns *cdns =
+ container_of(work, struct sdw_cdns, attach_dwork.work);
+ enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
+ u32 val;
+ int ret;
+ int i;
+
+ val = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
+
+ for (i = 0; i <= SDW_MAX_DEVICES; i++) {
+ status[i] = val & 0x3;
+ if (status[i])
+ dev_dbg(cdns->dev, "Peripheral %d status: %d\n", i, status[i]);
+ val >>= 2;
+ }
+
+ mutex_lock(&cdns->status_update_lock);
+ ret = sdw_handle_slave_status(&cdns->bus, status);
+ mutex_unlock(&cdns->status_update_lock);
+ if (ret < 0)
+ dev_err(cdns->dev, "%s: sdw_handle_slave_status failed: %d\n", __func__, ret);
+}
+
/**
* cdns_update_slave_status_work - update slave status in a work since we will need to handle
* other interrupts eg. CDNS_MCP_INT_RX_WL during the update slave
@@ -1740,7 +1771,11 @@ int sdw_cdns_probe(struct sdw_cdns *cdns)
init_completion(&cdns->tx_complete);
cdns->bus.port_ops = &cdns_port_ops;
+ mutex_init(&cdns->status_update_lock);
+
INIT_WORK(&cdns->work, cdns_update_slave_status_work);
+ INIT_DELAYED_WORK(&cdns->attach_dwork, cdns_check_attached_status_dwork);
+
return 0;
}
EXPORT_SYMBOL(sdw_cdns_probe);
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index bc84435e420f..e1d7969ba48a 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -117,6 +117,8 @@ struct sdw_cdns_dai_runtime {
* @link_up: Link status
* @msg_count: Messages sent on bus
* @dai_runtime_array: runtime context for each allocated DAI.
+ * @status_update_lock: protect concurrency between interrupt-based and delayed work
+ * status update
*/
struct sdw_cdns {
struct device *dev;
@@ -148,10 +150,13 @@ struct sdw_cdns {
bool interrupt_enabled;
struct work_struct work;
+ struct delayed_work attach_dwork;
struct list_head list;
struct sdw_cdns_dai_runtime **dai_runtime_array;
+
+ struct mutex status_update_lock; /* add mutual exclusion to sdw_handle_slave_status() */
};
#define bus_to_cdns(_bus) container_of(_bus, struct sdw_cdns, bus)
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index 68838e843b54..dddd29381441 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -103,6 +103,8 @@ static inline void intel_writew(void __iomem *base, int offset, u16 value)
#define INTEL_MASTER_RESET_ITERATIONS 10
+#define SDW_INTEL_DELAYED_ENUMERATION_MS 100
+
#define SDW_INTEL_CHECK_OPS(sdw, cb) ((sdw) && (sdw)->link_res && (sdw)->link_res->hw_ops && \
(sdw)->link_res->hw_ops->cb)
#define SDW_INTEL_OPS(sdw, cb) ((sdw)->link_res->hw_ops->cb)
@@ -222,6 +224,13 @@ static inline bool sdw_intel_sync_check_cmdsync_unlocked(struct sdw_intel *sdw)
return false;
}
+static inline int sdw_intel_get_link_count(struct sdw_intel *sdw)
+{
+ if (SDW_INTEL_CHECK_OPS(sdw, get_link_count))
+ return SDW_INTEL_OPS(sdw, get_link_count)(sdw);
+ return 4; /* default on older generations */
+}
+
/* common bus management */
int intel_start_bus(struct sdw_intel *sdw);
int intel_start_bus_after_reset(struct sdw_intel *sdw);
diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c
index 781fe0aefa68..fff312c6968d 100644
--- a/drivers/soundwire/intel_ace2x.c
+++ b/drivers/soundwire/intel_ace2x.c
@@ -706,10 +706,30 @@ static void intel_program_sdi(struct sdw_intel *sdw, int dev_num)
__func__, sdw->instance, dev_num);
}
+static int intel_get_link_count(struct sdw_intel *sdw)
+{
+ int ret;
+
+ ret = hdac_bus_eml_get_count(sdw->link_res->hbus, true, AZX_REG_ML_LEPTR_ID_SDW);
+ if (!ret) {
+ dev_err(sdw->cdns.dev, "%s: could not retrieve link count\n", __func__);
+ return -ENODEV;
+ }
+
+ if (ret > SDW_INTEL_MAX_LINKS) {
+ dev_err(sdw->cdns.dev, "%s: link count %d exceed max %d\n", __func__, ret, SDW_INTEL_MAX_LINKS);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops = {
.debugfs_init = intel_ace2x_debugfs_init,
.debugfs_exit = intel_ace2x_debugfs_exit,
+ .get_link_count = intel_get_link_count,
+
.register_dai = intel_register_dai,
.check_clock_stop = intel_check_clock_stop,
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index 8807e01cbf7c..ae689d5d1ab9 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -317,6 +317,20 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
bus->link_id = auxdev->id;
bus->clk_stop_timeout = 1;
+ /*
+ * paranoia check: make sure ACPI-reported number of links is aligned with
+ * hardware capabilities.
+ */
+ ret = sdw_intel_get_link_count(sdw);
+ if (ret < 0) {
+ dev_err(dev, "%s: sdw_intel_get_link_count failed: %d\n", __func__, ret);
+ return ret;
+ }
+ if (ret <= sdw->instance) {
+ dev_err(dev, "%s: invalid link id %d, link count %d\n", __func__, auxdev->id, ret);
+ return -EINVAL;
+ }
+
sdw_cdns_probe(cdns);
/* Set ops */
@@ -475,6 +489,7 @@ static void intel_link_remove(struct auxiliary_device *auxdev)
*/
if (!bus->prop.hw_disabled) {
sdw_intel_debugfs_exit(sdw);
+ cancel_delayed_work_sync(&cdns->attach_dwork);
sdw_cdns_enable_interrupt(cdns, false);
}
sdw_bus_master_delete(bus);
diff --git a/drivers/soundwire/intel_bus_common.c b/drivers/soundwire/intel_bus_common.c
index df944e11b9ca..d3ff6c65b64c 100644
--- a/drivers/soundwire/intel_bus_common.c
+++ b/drivers/soundwire/intel_bus_common.c
@@ -45,21 +45,24 @@ int intel_start_bus(struct sdw_intel *sdw)
return ret;
}
- ret = sdw_cdns_exit_reset(cdns);
+ ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
- dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
+ dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
return ret;
}
- ret = sdw_cdns_enable_interrupt(cdns, true);
+ ret = sdw_cdns_exit_reset(cdns);
if (ret < 0) {
- dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
+ dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
return ret;
}
sdw_cdns_check_self_clearing_bits(cdns, __func__,
true, INTEL_MASTER_RESET_ITERATIONS);
+ schedule_delayed_work(&cdns->attach_dwork,
+ msecs_to_jiffies(SDW_INTEL_DELAYED_ENUMERATION_MS));
+
return 0;
}
@@ -136,21 +139,24 @@ int intel_start_bus_after_reset(struct sdw_intel *sdw)
return ret;
}
- ret = sdw_cdns_exit_reset(cdns);
+ ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
- dev_err(dev, "unable to exit bus reset sequence during resume\n");
+ dev_err(dev, "cannot enable interrupts during resume\n");
return ret;
}
- ret = sdw_cdns_enable_interrupt(cdns, true);
+ ret = sdw_cdns_exit_reset(cdns);
if (ret < 0) {
- dev_err(dev, "cannot enable interrupts during resume\n");
+ dev_err(dev, "unable to exit bus reset sequence during resume\n");
return ret;
}
}
sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
+ schedule_delayed_work(&cdns->attach_dwork,
+ msecs_to_jiffies(SDW_INTEL_DELAYED_ENUMERATION_MS));
+
return 0;
}
@@ -184,6 +190,9 @@ int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
+ schedule_delayed_work(&cdns->attach_dwork,
+ msecs_to_jiffies(SDW_INTEL_DELAYED_ENUMERATION_MS));
+
return 0;
}
@@ -194,6 +203,8 @@ int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop)
bool wake_enable = false;
int ret;
+ cancel_delayed_work_sync(&cdns->attach_dwork);
+
if (clock_stop) {
ret = sdw_cdns_clock_stop(cdns, true);
if (ret < 0)
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index f275143d7b18..7aa4900dcf31 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -1291,18 +1291,18 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
unsigned int port_num)
{
struct sdw_dpn_prop *dpn_prop;
- unsigned long mask;
+ u8 num_ports;
int i;
if (direction == SDW_DATA_DIR_TX) {
- mask = slave->prop.source_ports;
+ num_ports = hweight32(slave->prop.source_ports);
dpn_prop = slave->prop.src_dpn_prop;
} else {
- mask = slave->prop.sink_ports;
+ num_ports = hweight32(slave->prop.sink_ports);
dpn_prop = slave->prop.sink_dpn_prop;
}
- for_each_set_bit(i, &mask, 32) {
+ for (i = 0; i < num_ports; i++) {
if (dpn_prop[i].num == port_num)
return &dpn_prop[i];
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ec1550c698d5..823797217404 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -267,7 +267,7 @@ config SPI_CADENCE_QUADSPI
config SPI_CADENCE_XSPI
tristate "Cadence XSPI controller"
- depends on OF && HAS_IOMEM
+ depends on OF && HAS_IOMEM && 64BIT
depends on SPI_MEM
help
Enable support for the Cadence XSPI Flash controller.
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 5aaff3bee1b7..4f288f07e38f 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -375,9 +375,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
* If the QSPI controller is set in regular SPI mode, set it in
* Serial Memory Mode (SMM).
*/
- if (aq->mr != QSPI_MR_SMM) {
- atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
- aq->mr = QSPI_MR_SMM;
+ if (!(aq->mr & QSPI_MR_SMM)) {
+ aq->mr |= QSPI_MR_SMM;
+ atmel_qspi_write(aq->scr, aq, QSPI_MR);
}
/* Clear pending interrupts */
@@ -501,7 +501,8 @@ static int atmel_qspi_setup(struct spi_device *spi)
if (ret < 0)
return ret;
- aq->scr = QSPI_SCR_SCBR(scbr);
+ aq->scr &= ~QSPI_SCR_SCBR_MASK;
+ aq->scr |= QSPI_SCR_SCBR(scbr);
atmel_qspi_write(aq->scr, aq, QSPI_SCR);
pm_runtime_mark_last_busy(ctrl->dev.parent);
@@ -534,6 +535,7 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi)
if (ret < 0)
return ret;
+ aq->scr &= ~QSPI_SCR_DLYBS_MASK;
aq->scr |= QSPI_SCR_DLYBS(cs_setup);
atmel_qspi_write(aq->scr, aq, QSPI_SCR);
@@ -549,8 +551,8 @@ static void atmel_qspi_init(struct atmel_qspi *aq)
atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
/* Set the QSPI controller by default in Serial Memory Mode */
- atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
- aq->mr = QSPI_MR_SMM;
+ aq->mr |= QSPI_MR_SMM;
+ atmel_qspi_write(aq->mr, aq, QSPI_MR);
/* Enable the QSPI controller */
atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
@@ -601,20 +603,17 @@ static int atmel_qspi_probe(struct platform_device *pdev)
aq->pdev = pdev;
/* Map the registers */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
- aq->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(aq->regs)) {
- dev_err(&pdev->dev, "missing registers\n");
- return PTR_ERR(aq->regs);
- }
+ aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base");
+ if (IS_ERR(aq->regs))
+ return dev_err_probe(&pdev->dev, PTR_ERR(aq->regs),
+ "missing registers\n");
/* Map the AHB memory */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap");
aq->mem = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(aq->mem)) {
- dev_err(&pdev->dev, "missing AHB memory\n");
- return PTR_ERR(aq->mem);
- }
+ if (IS_ERR(aq->mem))
+ return dev_err_probe(&pdev->dev, PTR_ERR(aq->mem),
+ "missing AHB memory\n");
aq->mmap_size = resource_size(res);
@@ -623,17 +622,15 @@ static int atmel_qspi_probe(struct platform_device *pdev)
if (IS_ERR(aq->pclk))
aq->pclk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(aq->pclk)) {
- dev_err(&pdev->dev, "missing peripheral clock\n");
- return PTR_ERR(aq->pclk);
- }
+ if (IS_ERR(aq->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(aq->pclk),
+ "missing peripheral clock\n");
/* Enable the peripheral clock */
err = clk_prepare_enable(aq->pclk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable the peripheral clock\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to enable the peripheral clock\n");
aq->caps = of_device_get_match_data(&pdev->dev);
if (!aq->caps) {
@@ -726,6 +723,7 @@ static void atmel_qspi_remove(struct platform_device *pdev)
clk_unprepare(aq->pclk);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
diff --git a/drivers/spi/spi-airoha-snfi.c b/drivers/spi/spi-airoha-snfi.c
index 9d97ec98881c..94458df53eae 100644
--- a/drivers/spi/spi-airoha-snfi.c
+++ b/drivers/spi/spi-airoha-snfi.c
@@ -211,9 +211,6 @@ struct airoha_snand_dev {
u8 *txrx_buf;
dma_addr_t dma_addr;
-
- u64 cur_page_num;
- bool data_need_update;
};
struct airoha_snand_ctrl {
@@ -405,7 +402,7 @@ static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd,
for (i = 0; i < len; i += data_len) {
int err;
- data_len = min(len, SPI_MAX_TRANSFER_SIZE);
+ data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len);
if (err)
return err;
@@ -427,7 +424,7 @@ static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data,
for (i = 0; i < len; i += data_len) {
int err;
- data_len = min(len, SPI_MAX_TRANSFER_SIZE);
+ data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len);
if (err)
return err;
@@ -644,11 +641,6 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
u32 val, rd_mode;
int err;
- if (!as_dev->data_need_update)
- return len;
-
- as_dev->data_need_update = false;
-
switch (op->cmd.opcode) {
case SPI_NAND_OP_READ_FROM_CACHE_DUAL:
rd_mode = 1;
@@ -739,8 +731,13 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
if (err)
return err;
- err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
- SPI_NFI_READ_FROM_CACHE_DONE);
+ /*
+ * SPI_NFI_READ_FROM_CACHE_DONE bit must be written at the end
+ * of dirmap_read operation even if it is already set.
+ */
+ err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
+ SPI_NFI_READ_FROM_CACHE_DONE,
+ SPI_NFI_READ_FROM_CACHE_DONE);
if (err)
return err;
@@ -870,8 +867,13 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
if (err)
return err;
- err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
- SPI_NFI_LOAD_TO_CACHE_DONE);
+ /*
+ * SPI_NFI_LOAD_TO_CACHE_DONE bit must be written at the end
+ * of dirmap_write operation even if it is already set.
+ */
+ err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
+ SPI_NFI_LOAD_TO_CACHE_DONE,
+ SPI_NFI_LOAD_TO_CACHE_DONE);
if (err)
return err;
@@ -885,23 +887,11 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
static int airoha_snand_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct airoha_snand_dev *as_dev = spi_get_ctldata(mem->spi);
u8 data[8], cmd, opcode = op->cmd.opcode;
struct airoha_snand_ctrl *as_ctrl;
int i, err;
as_ctrl = spi_controller_get_devdata(mem->spi->controller);
- if (opcode == SPI_NAND_OP_PROGRAM_EXECUTE &&
- op->addr.val == as_dev->cur_page_num) {
- as_dev->data_need_update = true;
- } else if (opcode == SPI_NAND_OP_PAGE_READ) {
- if (!as_dev->data_need_update &&
- op->addr.val == as_dev->cur_page_num)
- return 0;
-
- as_dev->data_need_update = true;
- as_dev->cur_page_num = op->addr.val;
- }
/* switch to manual mode */
err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
@@ -986,7 +976,6 @@ static int airoha_snand_setup(struct spi_device *spi)
if (dma_mapping_error(as_ctrl->dev, as_dev->dma_addr))
return -ENOMEM;
- as_dev->data_need_update = true;
spi_set_ctldata(spi, as_dev);
return 0;
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 447e5a962dee..2dff95d2b3f5 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -41,6 +41,7 @@
#define SPI_ENGINE_CONFIG_CPHA BIT(0)
#define SPI_ENGINE_CONFIG_CPOL BIT(1)
#define SPI_ENGINE_CONFIG_3WIRE BIT(2)
+#define SPI_ENGINE_CONFIG_SDO_IDLE_HIGH BIT(3)
#define SPI_ENGINE_INST_TRANSFER 0x0
#define SPI_ENGINE_INST_ASSERT 0x1
@@ -137,6 +138,10 @@ static unsigned int spi_engine_get_config(struct spi_device *spi)
config |= SPI_ENGINE_CONFIG_CPHA;
if (spi->mode & SPI_3WIRE)
config |= SPI_ENGINE_CONFIG_3WIRE;
+ if (spi->mode & SPI_MOSI_IDLE_HIGH)
+ config |= SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
+ if (spi->mode & SPI_MOSI_IDLE_LOW)
+ config &= ~SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
return config;
}
@@ -258,7 +263,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
clk_div - 1));
}
- if (bits_per_word != xfer->bits_per_word) {
+ if (bits_per_word != xfer->bits_per_word && xfer->len) {
bits_per_word = xfer->bits_per_word;
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
@@ -692,9 +697,13 @@ static int spi_engine_probe(struct platform_device *pdev)
host->num_chipselect = 8;
/* Some features depend of the IP core version. */
- if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) {
- host->mode_bits |= SPI_CS_HIGH;
- host->setup = spi_engine_setup;
+ if (ADI_AXI_PCORE_VER_MAJOR(version) >= 1) {
+ if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) {
+ host->mode_bits |= SPI_CS_HIGH;
+ host->setup = spi_engine_setup;
+ }
+ if (ADI_AXI_PCORE_VER_MINOR(version) >= 3)
+ host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH;
}
if (host->max_speed_hz == 0)
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 2fb8d4e55c77..ef3a7226db12 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -466,6 +466,7 @@ static const struct platform_device_id bcm63xx_spi_dev_match[] = {
{
},
};
+MODULE_DEVICE_TABLE(platform, bcm63xx_spi_dev_match);
static const struct of_device_id bcm63xx_spi_of_match[] = {
{ .compatible = "brcm,bcm6348-spi", .data = &bcm6348_spi_reg_offsets },
@@ -583,13 +584,15 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
- pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ goto out_clk_disable;
/* register and we are done */
ret = devm_spi_register_controller(dev, host);
if (ret) {
dev_err(dev, "spi register failed\n");
- goto out_pm_disable;
+ goto out_clk_disable;
}
dev_info(dev, "at %pr (irq %d, FIFOs size %d)\n",
@@ -597,8 +600,6 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
return 0;
-out_pm_disable:
- pm_runtime_disable(&pdev->dev);
out_clk_disable:
clk_disable_unprepare(clk);
out_err:
diff --git a/drivers/spi/spi-bcmbca-hsspi.c b/drivers/spi/spi-bcmbca-hsspi.c
index 9f64afd8164e..d936104a41ec 100644
--- a/drivers/spi/spi-bcmbca-hsspi.c
+++ b/drivers/spi/spi-bcmbca-hsspi.c
@@ -433,7 +433,6 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct bcmbca_hsspi *bs;
- struct resource *res_mem;
void __iomem *spim_ctrl;
void __iomem *regs;
struct device *dev = &pdev->dev;
@@ -445,17 +444,11 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsspi");
- if (!res_mem)
- return -EINVAL;
- regs = devm_ioremap_resource(dev, res_mem);
+ regs = devm_platform_ioremap_resource_byname(pdev, "hsspi");
if (IS_ERR(regs))
return PTR_ERR(regs);
- res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spim-ctrl");
- if (!res_mem)
- return -EINVAL;
- spim_ctrl = devm_ioremap_resource(dev, res_mem);
+ spim_ctrl = devm_platform_ioremap_resource_byname(pdev, "spim-ctrl");
if (IS_ERR(spim_ctrl))
return PTR_ERR(spim_ctrl);
@@ -487,7 +480,7 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
}
}
- host = spi_alloc_host(&pdev->dev, sizeof(*bs));
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(*bs));
if (!host) {
ret = -ENOMEM;
goto out_disable_pll_clk;
@@ -543,15 +536,17 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, bcmbca_hsspi_interrupt, IRQF_SHARED,
pdev->name, bs);
if (ret)
- goto out_put_host;
+ goto out_disable_pll_clk;
}
- pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ goto out_disable_pll_clk;
ret = sysfs_create_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
if (ret) {
dev_err(&pdev->dev, "couldn't register sysfs group\n");
- goto out_pm_disable;
+ goto out_disable_pll_clk;
}
/* register and we are done */
@@ -565,10 +560,6 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
out_sysgroup_disable:
sysfs_remove_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
-out_pm_disable:
- pm_runtime_disable(&pdev->dev);
-out_put_host:
- spi_controller_put(host);
out_disable_pll_clk:
clk_disable_unprepare(pll_clk);
out_disable_clk:
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index afb1b1105ec2..ebe18f0b5d23 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -54,21 +54,28 @@ static unsigned int bitbang_txrx_8(struct spi_device *spi,
struct spi_transfer *t,
unsigned int flags)
{
+ struct spi_bitbang *bitbang;
unsigned int bits = t->bits_per_word;
unsigned int count = t->len;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
+ bitbang = spi_controller_get_devdata(spi->controller);
while (likely(count > 0)) {
u8 word = 0;
if (tx)
word = *tx++;
+ else
+ word = spi->mode & SPI_MOSI_IDLE_HIGH ? 0xFF : 0;
word = txrx_word(spi, ns, word, bits, flags);
if (rx)
*rx++ = word;
count -= 1;
}
+ if (bitbang->set_mosi_idle)
+ bitbang->set_mosi_idle(spi);
+
return t->len - count;
}
@@ -78,21 +85,28 @@ static unsigned int bitbang_txrx_16(struct spi_device *spi,
struct spi_transfer *t,
unsigned int flags)
{
+ struct spi_bitbang *bitbang;
unsigned int bits = t->bits_per_word;
unsigned int count = t->len;
const u16 *tx = t->tx_buf;
u16 *rx = t->rx_buf;
+ bitbang = spi_controller_get_devdata(spi->controller);
while (likely(count > 1)) {
u16 word = 0;
if (tx)
word = *tx++;
+ else
+ word = spi->mode & SPI_MOSI_IDLE_HIGH ? 0xFFFF : 0;
word = txrx_word(spi, ns, word, bits, flags);
if (rx)
*rx++ = word;
count -= 2;
}
+ if (bitbang->set_mosi_idle)
+ bitbang->set_mosi_idle(spi);
+
return t->len - count;
}
@@ -102,21 +116,28 @@ static unsigned int bitbang_txrx_32(struct spi_device *spi,
struct spi_transfer *t,
unsigned int flags)
{
+ struct spi_bitbang *bitbang;
unsigned int bits = t->bits_per_word;
unsigned int count = t->len;
const u32 *tx = t->tx_buf;
u32 *rx = t->rx_buf;
+ bitbang = spi_controller_get_devdata(spi->controller);
while (likely(count > 3)) {
u32 word = 0;
if (tx)
word = *tx++;
+ else
+ word = spi->mode & SPI_MOSI_IDLE_HIGH ? 0xFFFFFFFF : 0;
word = txrx_word(spi, ns, word, bits, flags);
if (rx)
*rx++ = word;
count -= 4;
}
+ if (bitbang->set_mosi_idle)
+ bitbang->set_mosi_idle(spi);
+
return t->len - count;
}
@@ -192,6 +213,9 @@ int spi_bitbang_setup(struct spi_device *spi)
goto err_free;
}
+ if (bitbang->set_mosi_idle)
+ bitbang->set_mosi_idle(spi);
+
dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
return 0;
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index d4607cb89c48..1755ca026f08 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1662,23 +1662,20 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi)
unsigned int max_cs = cqspi->num_chipselect - 1;
struct platform_device *pdev = cqspi->pdev;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct cqspi_flash_pdata *f_pdata;
unsigned int cs;
int ret;
/* Get flash device data */
- for_each_available_child_of_node(dev->of_node, np) {
+ for_each_available_child_of_node_scoped(dev->of_node, np) {
ret = of_property_read_u32(np, "reg", &cs);
if (ret) {
dev_err(dev, "Couldn't determine chip select.\n");
- of_node_put(np);
return ret;
}
if (cs >= cqspi->num_chipselect) {
dev_err(dev, "Chip select %d out of range.\n", cs);
- of_node_put(np);
return -EINVAL;
} else if (cs < max_cs) {
max_cs = cs;
@@ -1689,10 +1686,8 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi)
f_pdata->cs = cs;
ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
- if (ret) {
- of_node_put(np);
+ if (ret)
return ret;
- }
}
cqspi->num_chipselect = max_cs + 1;
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index 2e3eacd46b72..aed98ab14334 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -2,6 +2,7 @@
// Cadence XSPI flash controller driver
// Copyright (C) 2020-21 Cadence
+#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -19,6 +20,7 @@
#include <linux/bitfield.h>
#include <linux/limits.h>
#include <linux/log2.h>
+#include <linux/bitrev.h>
#define CDNS_XSPI_MAGIC_NUM_VALUE 0x6522
#define CDNS_XSPI_MAX_BANKS 8
@@ -193,6 +195,98 @@
((op)->data.dir == SPI_MEM_DATA_IN) ? \
CDNS_XSPI_STIG_CMD_DIR_READ : CDNS_XSPI_STIG_CMD_DIR_WRITE))
+/* Helper macros for GENERIC and GENERIC-DSEQ instruction type */
+#define CMD_REG_LEN (6*4)
+#define INSTRUCTION_TYPE_GENERIC 96
+#define CDNS_XSPI_CMD_FLD_P1_GENERIC_CMD (\
+ FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, INSTRUCTION_TYPE_GENERIC))
+
+#define GENERIC_NUM_OF_BYTES GENMASK(27, 24)
+#define CDNS_XSPI_CMD_FLD_P3_GENERIC_CMD(len) (\
+ FIELD_PREP(GENERIC_NUM_OF_BYTES, len))
+
+#define GENERIC_BANK_NUM GENMASK(14, 12)
+#define GENERIC_GLUE_CMD BIT(28)
+#define CDNS_XSPI_CMD_FLD_P4_GENERIC_CMD(cs, glue) (\
+ FIELD_PREP(GENERIC_BANK_NUM, cs) | FIELD_PREP(GENERIC_GLUE_CMD, glue))
+
+#define CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_1 (\
+ FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ))
+
+#define CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_2(nbytes) (\
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, nbytes & 0xffff))
+
+#define CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_3(nbytes) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, (nbytes >> 16) & 0xffff))
+
+#define CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_4(dir, chipsel) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DIR, dir))
+
+/* Marvell PHY default values */
+#define MARVELL_REGS_DLL_PHY_CTRL 0x00000707
+#define MARVELL_CTB_RFILE_PHY_CTRL 0x00004000
+#define MARVELL_RFILE_PHY_TSEL 0x00000000
+#define MARVELL_RFILE_PHY_DQ_TIMING 0x00000101
+#define MARVELL_RFILE_PHY_DQS_TIMING 0x00700404
+#define MARVELL_RFILE_PHY_GATE_LPBK_CTRL 0x00200030
+#define MARVELL_RFILE_PHY_DLL_MASTER_CTRL 0x00800000
+#define MARVELL_RFILE_PHY_DLL_SLAVE_CTRL 0x0000ff01
+
+/* PHY config registers */
+#define CDNS_XSPI_RF_MINICTRL_REGS_DLL_PHY_CTRL 0x1034
+#define CDNS_XSPI_PHY_CTB_RFILE_PHY_CTRL 0x0080
+#define CDNS_XSPI_PHY_CTB_RFILE_PHY_TSEL 0x0084
+#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DQ_TIMING 0x0000
+#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DQS_TIMING 0x0004
+#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_GATE_LPBK_CTRL 0x0008
+#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DLL_MASTER_CTRL 0x000c
+#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DLL_SLAVE_CTRL 0x0010
+#define CDNS_XSPI_DATASLICE_RFILE_PHY_DLL_OBS_REG_0 0x001c
+
+#define CDNS_XSPI_DLL_RST_N BIT(24)
+#define CDNS_XSPI_DLL_LOCK BIT(0)
+
+/* Marvell overlay registers - clock */
+#define MRVL_XSPI_CLK_CTRL_AUX_REG 0x2020
+#define MRVL_XSPI_CLK_ENABLE BIT(0)
+#define MRVL_XSPI_CLK_DIV GENMASK(4, 1)
+#define MRVL_XSPI_IRQ_ENABLE BIT(6)
+#define MRVL_XSPI_CLOCK_IO_HZ 800000000
+#define MRVL_XSPI_CLOCK_DIVIDED(div) ((MRVL_XSPI_CLOCK_IO_HZ) / (div))
+#define MRVL_DEFAULT_CLK 25000000
+
+/* Marvell overlay registers - xfer */
+#define MRVL_XFER_FUNC_CTRL 0x210
+#define MRVL_XFER_FUNC_CTRL_READ_DATA(i) (0x000 + 8 * (i))
+#define MRVL_XFER_SOFT_RESET BIT(11)
+#define MRVL_XFER_CS_N_HOLD GENMASK(9, 6)
+#define MRVL_XFER_RECEIVE_ENABLE BIT(4)
+#define MRVL_XFER_FUNC_ENABLE BIT(3)
+#define MRVL_XFER_CLK_CAPTURE_POL BIT(2)
+#define MRVL_XFER_CLK_DRIVE_POL BIT(1)
+#define MRVL_XFER_FUNC_START BIT(0)
+#define MRVL_XFER_QWORD_COUNT 32
+#define MRVL_XFER_QWORD_BYTECOUNT 8
+
+#define MRVL_XSPI_POLL_TIMEOUT_US 1000
+#define MRVL_XSPI_POLL_DELAY_US 10
+
+/* Macros for calculating data bits in generic command
+ * Up to 10 bytes can be fit into cmd_registers
+ * least significant is placed in cmd_reg[1]
+ * Other bits are inserted after it in cmd_reg[1,2,3] register
+ */
+#define GENERIC_CMD_DATA_REG_3_COUNT(len) (len >= 10 ? 2 : len - 8)
+#define GENERIC_CMD_DATA_REG_2_COUNT(len) (len >= 7 ? 3 : len - 4)
+#define GENERIC_CMD_DATA_REG_1_COUNT(len) (len >= 3 ? 2 : len - 1)
+#define GENERIC_CMD_DATA_3_OFFSET(position) (8*(position))
+#define GENERIC_CMD_DATA_2_OFFSET(position) (8*(position))
+#define GENERIC_CMD_DATA_1_OFFSET(position) (8 + 8*(position))
+#define GENERIC_CMD_DATA_INSERT(data, pos) ((data) << (pos))
+#define GENERIC_CMD_REG_3_NEEDED(len) (len > 7)
+#define GENERIC_CMD_REG_2_NEEDED(len) (len > 3)
+
enum cdns_xspi_stig_instr_type {
CDNS_XSPI_STIG_INSTR_TYPE_0,
CDNS_XSPI_STIG_INSTR_TYPE_1,
@@ -209,6 +303,51 @@ enum cdns_xspi_stig_cmd_dir {
CDNS_XSPI_STIG_CMD_DIR_WRITE,
};
+struct cdns_xspi_driver_data {
+ bool mrvl_hw_overlay;
+ u32 dll_phy_ctrl;
+ u32 ctb_rfile_phy_ctrl;
+ u32 rfile_phy_tsel;
+ u32 rfile_phy_dq_timing;
+ u32 rfile_phy_dqs_timing;
+ u32 rfile_phy_gate_lpbk_ctrl;
+ u32 rfile_phy_dll_master_ctrl;
+ u32 rfile_phy_dll_slave_ctrl;
+};
+
+static struct cdns_xspi_driver_data marvell_driver_data = {
+ .mrvl_hw_overlay = true,
+ .dll_phy_ctrl = MARVELL_REGS_DLL_PHY_CTRL,
+ .ctb_rfile_phy_ctrl = MARVELL_CTB_RFILE_PHY_CTRL,
+ .rfile_phy_tsel = MARVELL_RFILE_PHY_TSEL,
+ .rfile_phy_dq_timing = MARVELL_RFILE_PHY_DQ_TIMING,
+ .rfile_phy_dqs_timing = MARVELL_RFILE_PHY_DQS_TIMING,
+ .rfile_phy_gate_lpbk_ctrl = MARVELL_RFILE_PHY_GATE_LPBK_CTRL,
+ .rfile_phy_dll_master_ctrl = MARVELL_RFILE_PHY_DLL_MASTER_CTRL,
+ .rfile_phy_dll_slave_ctrl = MARVELL_RFILE_PHY_DLL_SLAVE_CTRL,
+};
+
+static struct cdns_xspi_driver_data cdns_driver_data = {
+ .mrvl_hw_overlay = false,
+};
+
+static const int cdns_mrvl_xspi_clk_div_list[] = {
+ 4, //0x0 = Divide by 4. SPI clock is 200 MHz.
+ 6, //0x1 = Divide by 6. SPI clock is 133.33 MHz.
+ 8, //0x2 = Divide by 8. SPI clock is 100 MHz.
+ 10, //0x3 = Divide by 10. SPI clock is 80 MHz.
+ 12, //0x4 = Divide by 12. SPI clock is 66.666 MHz.
+ 16, //0x5 = Divide by 16. SPI clock is 50 MHz.
+ 18, //0x6 = Divide by 18. SPI clock is 44.44 MHz.
+ 20, //0x7 = Divide by 20. SPI clock is 40 MHz.
+ 24, //0x8 = Divide by 24. SPI clock is 33.33 MHz.
+ 32, //0x9 = Divide by 32. SPI clock is 25 MHz.
+ 40, //0xA = Divide by 40. SPI clock is 20 MHz.
+ 50, //0xB = Divide by 50. SPI clock is 16 MHz.
+ 64, //0xC = Divide by 64. SPI clock is 12.5 MHz.
+ 128 //0xD = Divide by 128. SPI clock is 6.25 MHz.
+};
+
struct cdns_xspi_dev {
struct platform_device *pdev;
struct device *dev;
@@ -216,6 +355,7 @@ struct cdns_xspi_dev {
void __iomem *iobase;
void __iomem *auxbase;
void __iomem *sdmabase;
+ void __iomem *xferbase;
int irq;
int cur_cs;
@@ -230,8 +370,102 @@ struct cdns_xspi_dev {
const void *out_buffer;
u8 hw_num_banks;
+
+ const struct cdns_xspi_driver_data *driver_data;
+ void (*sdma_handler)(struct cdns_xspi_dev *cdns_xspi);
+ void (*set_interrupts_handler)(struct cdns_xspi_dev *cdns_xspi, bool enabled);
+
+ bool xfer_in_progress;
+ int current_xfer_qword;
};
+static void cdns_xspi_reset_dll(struct cdns_xspi_dev *cdns_xspi)
+{
+ u32 dll_cntrl = readl(cdns_xspi->iobase +
+ CDNS_XSPI_RF_MINICTRL_REGS_DLL_PHY_CTRL);
+
+ /* Reset DLL */
+ dll_cntrl |= CDNS_XSPI_DLL_RST_N;
+ writel(dll_cntrl, cdns_xspi->iobase +
+ CDNS_XSPI_RF_MINICTRL_REGS_DLL_PHY_CTRL);
+}
+
+static bool cdns_xspi_is_dll_locked(struct cdns_xspi_dev *cdns_xspi)
+{
+ u32 dll_lock;
+
+ return !readl_relaxed_poll_timeout(cdns_xspi->iobase +
+ CDNS_XSPI_INTR_STATUS_REG,
+ dll_lock, ((dll_lock & CDNS_XSPI_DLL_LOCK) == 1), 10, 10000);
+}
+
+/* Static configuration of PHY */
+static bool cdns_xspi_configure_phy(struct cdns_xspi_dev *cdns_xspi)
+{
+ writel(cdns_xspi->driver_data->dll_phy_ctrl,
+ cdns_xspi->iobase + CDNS_XSPI_RF_MINICTRL_REGS_DLL_PHY_CTRL);
+ writel(cdns_xspi->driver_data->ctb_rfile_phy_ctrl,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_CTB_RFILE_PHY_CTRL);
+ writel(cdns_xspi->driver_data->rfile_phy_tsel,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_CTB_RFILE_PHY_TSEL);
+ writel(cdns_xspi->driver_data->rfile_phy_dq_timing,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DQ_TIMING);
+ writel(cdns_xspi->driver_data->rfile_phy_dqs_timing,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DQS_TIMING);
+ writel(cdns_xspi->driver_data->rfile_phy_gate_lpbk_ctrl,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_GATE_LPBK_CTRL);
+ writel(cdns_xspi->driver_data->rfile_phy_dll_master_ctrl,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DLL_MASTER_CTRL);
+ writel(cdns_xspi->driver_data->rfile_phy_dll_slave_ctrl,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DLL_SLAVE_CTRL);
+
+ cdns_xspi_reset_dll(cdns_xspi);
+
+ return cdns_xspi_is_dll_locked(cdns_xspi);
+}
+
+static bool cdns_mrvl_xspi_setup_clock(struct cdns_xspi_dev *cdns_xspi,
+ int requested_clk)
+{
+ int i = 0;
+ int clk_val;
+ u32 clk_reg;
+ bool update_clk = false;
+
+ while (i < ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list)) {
+ clk_val = MRVL_XSPI_CLOCK_DIVIDED(
+ cdns_mrvl_xspi_clk_div_list[i]);
+ if (clk_val <= requested_clk)
+ break;
+ i++;
+ }
+
+ dev_dbg(cdns_xspi->dev, "Found clk div: %d, clk val: %d\n",
+ cdns_mrvl_xspi_clk_div_list[i],
+ MRVL_XSPI_CLOCK_DIVIDED(
+ cdns_mrvl_xspi_clk_div_list[i]));
+
+ clk_reg = readl(cdns_xspi->auxbase + MRVL_XSPI_CLK_CTRL_AUX_REG);
+
+ if (FIELD_GET(MRVL_XSPI_CLK_DIV, clk_reg) != i) {
+ clk_reg &= ~MRVL_XSPI_CLK_ENABLE;
+ writel(clk_reg,
+ cdns_xspi->auxbase + MRVL_XSPI_CLK_CTRL_AUX_REG);
+ clk_reg = FIELD_PREP(MRVL_XSPI_CLK_DIV, i);
+ clk_reg &= ~MRVL_XSPI_CLK_DIV;
+ clk_reg |= FIELD_PREP(MRVL_XSPI_CLK_DIV, i);
+ clk_reg |= MRVL_XSPI_CLK_ENABLE;
+ clk_reg |= MRVL_XSPI_IRQ_ENABLE;
+ update_clk = true;
+ }
+
+ if (update_clk)
+ writel(clk_reg,
+ cdns_xspi->auxbase + MRVL_XSPI_CLK_CTRL_AUX_REG);
+
+ return update_clk;
+}
+
static int cdns_xspi_wait_for_controller_idle(struct cdns_xspi_dev *cdns_xspi)
{
u32 ctrl_stat;
@@ -304,6 +538,23 @@ static void cdns_xspi_set_interrupts(struct cdns_xspi_dev *cdns_xspi,
writel(intr_enable, cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG);
}
+static void marvell_xspi_set_interrupts(struct cdns_xspi_dev *cdns_xspi,
+ bool enabled)
+{
+ u32 intr_enable;
+ u32 irq_status;
+
+ irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG);
+ writel(irq_status, cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG);
+
+ intr_enable = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG);
+ if (enabled)
+ intr_enable |= CDNS_XSPI_INTR_MASK;
+ else
+ intr_enable &= ~CDNS_XSPI_INTR_MASK;
+ writel(intr_enable, cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG);
+}
+
static int cdns_xspi_controller_init(struct cdns_xspi_dev *cdns_xspi)
{
u32 ctrl_ver;
@@ -321,7 +572,7 @@ static int cdns_xspi_controller_init(struct cdns_xspi_dev *cdns_xspi)
ctrl_features = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_FEATURES_REG);
cdns_xspi->hw_num_banks = FIELD_GET(CDNS_XSPI_NUM_BANKS, ctrl_features);
- cdns_xspi_set_interrupts(cdns_xspi, false);
+ cdns_xspi->set_interrupts_handler(cdns_xspi, false);
return 0;
}
@@ -348,6 +599,78 @@ static void cdns_xspi_sdma_handle(struct cdns_xspi_dev *cdns_xspi)
}
}
+static void m_ioreadq(void __iomem *addr, void *buf, int len)
+{
+ if (IS_ALIGNED((long)buf, 8) && len >= 8) {
+ u64 full_ops = len / 8;
+ u64 *buffer = buf;
+
+ len -= full_ops * 8;
+ buf += full_ops * 8;
+
+ do {
+ u64 b = readq(addr);
+ *buffer++ = b;
+ } while (--full_ops);
+ }
+
+
+ while (len) {
+ u64 tmp_buf;
+
+ tmp_buf = readq(addr);
+ memcpy(buf, &tmp_buf, min(len, 8));
+ len = len > 8 ? len - 8 : 0;
+ buf += 8;
+ }
+}
+
+static void m_iowriteq(void __iomem *addr, const void *buf, int len)
+{
+ if (IS_ALIGNED((long)buf, 8) && len >= 8) {
+ u64 full_ops = len / 8;
+ const u64 *buffer = buf;
+
+ len -= full_ops * 8;
+ buf += full_ops * 8;
+
+ do {
+ writeq(*buffer++, addr);
+ } while (--full_ops);
+ }
+
+ while (len) {
+ u64 tmp_buf;
+
+ memcpy(&tmp_buf, buf, min(len, 8));
+ writeq(tmp_buf, addr);
+ len = len > 8 ? len - 8 : 0;
+ buf += 8;
+ }
+}
+
+static void marvell_xspi_sdma_handle(struct cdns_xspi_dev *cdns_xspi)
+{
+ u32 sdma_size, sdma_trd_info;
+ u8 sdma_dir;
+
+ sdma_size = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_SIZE_REG);
+ sdma_trd_info = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_TRD_INFO_REG);
+ sdma_dir = FIELD_GET(CDNS_XSPI_SDMA_DIR, sdma_trd_info);
+
+ switch (sdma_dir) {
+ case CDNS_XSPI_SDMA_DIR_READ:
+ m_ioreadq(cdns_xspi->sdmabase,
+ cdns_xspi->in_buffer, sdma_size);
+ break;
+
+ case CDNS_XSPI_SDMA_DIR_WRITE:
+ m_iowriteq(cdns_xspi->sdmabase,
+ cdns_xspi->out_buffer, sdma_size);
+ break;
+ }
+}
+
static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
const struct spi_mem_op *op,
bool data_phase)
@@ -364,7 +687,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
writel(FIELD_PREP(CDNS_XSPI_CTRL_WORK_MODE, CDNS_XSPI_WORK_MODE_STIG),
cdns_xspi->iobase + CDNS_XSPI_CTRL_CONFIG_REG);
- cdns_xspi_set_interrupts(cdns_xspi, true);
+ cdns_xspi->set_interrupts_handler(cdns_xspi, true);
cdns_xspi->sdma_error = false;
memset(cmd_regs, 0, sizeof(cmd_regs));
@@ -396,14 +719,14 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
wait_for_completion(&cdns_xspi->sdma_complete);
if (cdns_xspi->sdma_error) {
- cdns_xspi_set_interrupts(cdns_xspi, false);
+ cdns_xspi->set_interrupts_handler(cdns_xspi, false);
return -EIO;
}
- cdns_xspi_sdma_handle(cdns_xspi);
+ cdns_xspi->sdma_handler(cdns_xspi);
}
wait_for_completion(&cdns_xspi->cmd_complete);
- cdns_xspi_set_interrupts(cdns_xspi, false);
+ cdns_xspi->set_interrupts_handler(cdns_xspi, false);
cmd_status = cdns_xspi_check_command_status(cdns_xspi);
if (cmd_status)
@@ -437,6 +760,81 @@ static int cdns_xspi_mem_op_execute(struct spi_mem *mem,
return ret;
}
+static int marvell_xspi_mem_op_execute(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct cdns_xspi_dev *cdns_xspi =
+ spi_controller_get_devdata(mem->spi->controller);
+ int ret = 0;
+
+ cdns_mrvl_xspi_setup_clock(cdns_xspi, mem->spi->max_speed_hz);
+
+ ret = cdns_xspi_mem_op(cdns_xspi, mem, op);
+
+ return ret;
+}
+
+#ifdef CONFIG_ACPI
+static bool cdns_xspi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct spi_device *spi = mem->spi;
+ const union acpi_object *obj;
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(&spi->dev);
+
+ if (!acpi_dev_get_property(adev, "spi-tx-bus-width", ACPI_TYPE_INTEGER,
+ &obj)) {
+ switch (obj->integer.value) {
+ case 1:
+ break;
+ case 2:
+ spi->mode |= SPI_TX_DUAL;
+ break;
+ case 4:
+ spi->mode |= SPI_TX_QUAD;
+ break;
+ case 8:
+ spi->mode |= SPI_TX_OCTAL;
+ break;
+ default:
+ dev_warn(&spi->dev,
+ "spi-tx-bus-width %lld not supported\n",
+ obj->integer.value);
+ break;
+ }
+ }
+
+ if (!acpi_dev_get_property(adev, "spi-rx-bus-width", ACPI_TYPE_INTEGER,
+ &obj)) {
+ switch (obj->integer.value) {
+ case 1:
+ break;
+ case 2:
+ spi->mode |= SPI_RX_DUAL;
+ break;
+ case 4:
+ spi->mode |= SPI_RX_QUAD;
+ break;
+ case 8:
+ spi->mode |= SPI_RX_OCTAL;
+ break;
+ default:
+ dev_warn(&spi->dev,
+ "spi-rx-bus-width %lld not supported\n",
+ obj->integer.value);
+ break;
+ }
+ }
+
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ return true;
+}
+#endif
+
static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct cdns_xspi_dev *cdns_xspi =
@@ -448,10 +846,21 @@ static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *
}
static const struct spi_controller_mem_ops cadence_xspi_mem_ops = {
+#ifdef CONFIG_ACPI
+ .supports_op = cdns_xspi_supports_op,
+#endif
.exec_op = cdns_xspi_mem_op_execute,
.adjust_op_size = cdns_xspi_adjust_mem_op_size,
};
+static const struct spi_controller_mem_ops marvell_xspi_mem_ops = {
+#ifdef CONFIG_ACPI
+ .supports_op = cdns_xspi_supports_op,
+#endif
+ .exec_op = marvell_xspi_mem_op_execute,
+ .adjust_op_size = cdns_xspi_adjust_mem_op_size,
+};
+
static irqreturn_t cdns_xspi_irq_handler(int this_irq, void *dev)
{
struct cdns_xspi_dev *cdns_xspi = dev;
@@ -495,15 +904,20 @@ static irqreturn_t cdns_xspi_irq_handler(int this_irq, void *dev)
static int cdns_xspi_of_get_plat_data(struct platform_device *pdev)
{
- struct device_node *node_prop = pdev->dev.of_node;
+ struct fwnode_handle *fwnode_child;
unsigned int cs;
- for_each_available_child_of_node_scoped(node_prop, node_child) {
- if (of_property_read_u32(node_child, "reg", &cs)) {
+ device_for_each_child_node(&pdev->dev, fwnode_child) {
+ if (!fwnode_device_is_available(fwnode_child))
+ continue;
+
+ if (fwnode_property_read_u32(fwnode_child, "reg", &cs)) {
dev_err(&pdev->dev, "Couldn't get memory chip select\n");
+ fwnode_handle_put(fwnode_child);
return -ENXIO;
} else if (cs >= CDNS_XSPI_MAX_BANKS) {
dev_err(&pdev->dev, "reg (cs) parameter value too large\n");
+ fwnode_handle_put(fwnode_child);
return -ENXIO;
}
}
@@ -528,6 +942,204 @@ static void cdns_xspi_print_phy_config(struct cdns_xspi_dev *cdns_xspi)
readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL));
}
+static int cdns_xspi_prepare_generic(int cs, const void *dout, int len, int glue, u32 *cmd_regs)
+{
+ u8 *data = (u8 *)dout;
+ int i;
+ int data_counter = 0;
+
+ memset(cmd_regs, 0x00, CMD_REG_LEN);
+
+ if (GENERIC_CMD_REG_3_NEEDED(len)) {
+ for (i = GENERIC_CMD_DATA_REG_3_COUNT(len); i >= 0 ; i--)
+ cmd_regs[3] |= GENERIC_CMD_DATA_INSERT(data[data_counter++],
+ GENERIC_CMD_DATA_3_OFFSET(i));
+ }
+ if (GENERIC_CMD_REG_2_NEEDED(len)) {
+ for (i = GENERIC_CMD_DATA_REG_2_COUNT(len); i >= 0; i--)
+ cmd_regs[2] |= GENERIC_CMD_DATA_INSERT(data[data_counter++],
+ GENERIC_CMD_DATA_2_OFFSET(i));
+ }
+ for (i = GENERIC_CMD_DATA_REG_1_COUNT(len); i >= 0 ; i--)
+ cmd_regs[1] |= GENERIC_CMD_DATA_INSERT(data[data_counter++],
+ GENERIC_CMD_DATA_1_OFFSET(i));
+
+ cmd_regs[1] |= CDNS_XSPI_CMD_FLD_P1_GENERIC_CMD;
+ cmd_regs[3] |= CDNS_XSPI_CMD_FLD_P3_GENERIC_CMD(len);
+ cmd_regs[4] |= CDNS_XSPI_CMD_FLD_P4_GENERIC_CMD(cs, glue);
+
+ return 0;
+}
+
+static void marvell_xspi_read_single_qword(struct cdns_xspi_dev *cdns_xspi, u8 **buffer)
+{
+ u64 d = readq(cdns_xspi->xferbase +
+ MRVL_XFER_FUNC_CTRL_READ_DATA(cdns_xspi->current_xfer_qword));
+ u8 *ptr = (u8 *)&d;
+ int k;
+
+ for (k = 0; k < 8; k++) {
+ u8 val = bitrev8((ptr[k]));
+ **buffer = val;
+ *buffer = *buffer + 1;
+ }
+
+ cdns_xspi->current_xfer_qword++;
+ cdns_xspi->current_xfer_qword %= MRVL_XFER_QWORD_COUNT;
+}
+
+static void cdns_xspi_finish_read(struct cdns_xspi_dev *cdns_xspi, u8 **buffer, u32 data_count)
+{
+ u64 d = readq(cdns_xspi->xferbase +
+ MRVL_XFER_FUNC_CTRL_READ_DATA(cdns_xspi->current_xfer_qword));
+ u8 *ptr = (u8 *)&d;
+ int k;
+
+ for (k = 0; k < data_count % MRVL_XFER_QWORD_BYTECOUNT; k++) {
+ u8 val = bitrev8((ptr[k]));
+ **buffer = val;
+ *buffer = *buffer + 1;
+ }
+
+ cdns_xspi->current_xfer_qword++;
+ cdns_xspi->current_xfer_qword %= MRVL_XFER_QWORD_COUNT;
+}
+
+static int cdns_xspi_prepare_transfer(int cs, int dir, int len, u32 *cmd_regs)
+{
+ memset(cmd_regs, 0x00, CMD_REG_LEN);
+
+ cmd_regs[1] |= CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_1;
+ cmd_regs[2] |= CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_2(len);
+ cmd_regs[4] |= CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_4(dir, cs);
+
+ return 0;
+}
+
+static bool cdns_xspi_is_stig_ready(struct cdns_xspi_dev *cdns_xspi, bool sleep)
+{
+ u32 ctrl_stat;
+
+ return !readl_relaxed_poll_timeout
+ (cdns_xspi->iobase + CDNS_XSPI_CTRL_STATUS_REG,
+ ctrl_stat,
+ ((ctrl_stat & BIT(3)) == 0),
+ sleep ? MRVL_XSPI_POLL_DELAY_US : 0,
+ sleep ? MRVL_XSPI_POLL_TIMEOUT_US : 0);
+}
+
+static bool cdns_xspi_is_sdma_ready(struct cdns_xspi_dev *cdns_xspi, bool sleep)
+{
+ u32 ctrl_stat;
+
+ return !readl_relaxed_poll_timeout
+ (cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG,
+ ctrl_stat,
+ (ctrl_stat & CDNS_XSPI_SDMA_TRIGGER),
+ sleep ? MRVL_XSPI_POLL_DELAY_US : 0,
+ sleep ? MRVL_XSPI_POLL_TIMEOUT_US : 0);
+}
+
+static int cdns_xspi_transfer_one_message_b0(struct spi_controller *controller,
+ struct spi_message *m)
+{
+ struct cdns_xspi_dev *cdns_xspi = spi_controller_get_devdata(controller);
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t = NULL;
+
+ const unsigned int max_len = MRVL_XFER_QWORD_BYTECOUNT * MRVL_XFER_QWORD_COUNT;
+ int current_transfer_len;
+ int cs = spi_get_chipselect(spi, 0);
+ int cs_change = 0;
+
+ /* Enable xfer state machine */
+ if (!cdns_xspi->xfer_in_progress) {
+ u32 xfer_control = readl(cdns_xspi->xferbase + MRVL_XFER_FUNC_CTRL);
+
+ cdns_xspi->current_xfer_qword = 0;
+ cdns_xspi->xfer_in_progress = true;
+ xfer_control |= (MRVL_XFER_RECEIVE_ENABLE |
+ MRVL_XFER_CLK_CAPTURE_POL |
+ MRVL_XFER_FUNC_START |
+ MRVL_XFER_SOFT_RESET |
+ FIELD_PREP(MRVL_XFER_CS_N_HOLD, (1 << cs)));
+ xfer_control &= ~(MRVL_XFER_FUNC_ENABLE | MRVL_XFER_CLK_DRIVE_POL);
+ writel(xfer_control, cdns_xspi->xferbase + MRVL_XFER_FUNC_CTRL);
+ }
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ u8 *txd = (u8 *) t->tx_buf;
+ u8 *rxd = (u8 *) t->rx_buf;
+ u8 data[10];
+ u32 cmd_regs[6];
+
+ if (!txd)
+ txd = data;
+
+ cdns_xspi->in_buffer = txd + 1;
+ cdns_xspi->out_buffer = txd + 1;
+
+ while (t->len) {
+
+ current_transfer_len = min(max_len, t->len);
+
+ if (current_transfer_len < 10) {
+ cdns_xspi_prepare_generic(cs, txd, current_transfer_len,
+ false, cmd_regs);
+ cdns_xspi_trigger_command(cdns_xspi, cmd_regs);
+ if (!cdns_xspi_is_stig_ready(cdns_xspi, true))
+ return -EIO;
+ } else {
+ cdns_xspi_prepare_generic(cs, txd, 1, true, cmd_regs);
+ cdns_xspi_trigger_command(cdns_xspi, cmd_regs);
+ cdns_xspi_prepare_transfer(cs, 1, current_transfer_len - 1,
+ cmd_regs);
+ cdns_xspi_trigger_command(cdns_xspi, cmd_regs);
+ if (!cdns_xspi_is_sdma_ready(cdns_xspi, true))
+ return -EIO;
+ cdns_xspi->sdma_handler(cdns_xspi);
+ if (!cdns_xspi_is_stig_ready(cdns_xspi, true))
+ return -EIO;
+
+ cdns_xspi->in_buffer += current_transfer_len;
+ cdns_xspi->out_buffer += current_transfer_len;
+ }
+
+ if (rxd) {
+ int j;
+
+ for (j = 0; j < current_transfer_len / 8; j++)
+ marvell_xspi_read_single_qword(cdns_xspi, &rxd);
+ cdns_xspi_finish_read(cdns_xspi, &rxd, current_transfer_len);
+ } else {
+ cdns_xspi->current_xfer_qword += current_transfer_len /
+ MRVL_XFER_QWORD_BYTECOUNT;
+ if (current_transfer_len % MRVL_XFER_QWORD_BYTECOUNT)
+ cdns_xspi->current_xfer_qword++;
+
+ cdns_xspi->current_xfer_qword %= MRVL_XFER_QWORD_COUNT;
+ }
+ cs_change = t->cs_change;
+ t->len -= current_transfer_len;
+ }
+ spi_transfer_delay_exec(t);
+ }
+
+ if (!cs_change) {
+ u32 xfer_control = readl(cdns_xspi->xferbase + MRVL_XFER_FUNC_CTRL);
+
+ xfer_control &= ~(MRVL_XFER_RECEIVE_ENABLE |
+ MRVL_XFER_SOFT_RESET);
+ writel(xfer_control, cdns_xspi->xferbase + MRVL_XFER_FUNC_CTRL);
+ cdns_xspi->xfer_in_progress = false;
+ }
+
+ m->status = 0;
+ spi_finalize_current_message(controller);
+
+ return 0;
+}
+
static int cdns_xspi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -544,13 +1156,29 @@ static int cdns_xspi_probe(struct platform_device *pdev)
SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL | SPI_RX_OCTAL |
SPI_MODE_0 | SPI_MODE_3;
- host->mem_ops = &cadence_xspi_mem_ops;
+ cdns_xspi = spi_controller_get_devdata(host);
+ cdns_xspi->driver_data = of_device_get_match_data(dev);
+ if (!cdns_xspi->driver_data) {
+ cdns_xspi->driver_data = acpi_device_get_match_data(dev);
+ if (!cdns_xspi->driver_data)
+ return -ENODEV;
+ }
+
+ if (cdns_xspi->driver_data->mrvl_hw_overlay) {
+ host->mem_ops = &marvell_xspi_mem_ops;
+ host->transfer_one_message = cdns_xspi_transfer_one_message_b0;
+ cdns_xspi->sdma_handler = &marvell_xspi_sdma_handle;
+ cdns_xspi->set_interrupts_handler = &marvell_xspi_set_interrupts;
+ } else {
+ host->mem_ops = &cadence_xspi_mem_ops;
+ cdns_xspi->sdma_handler = &cdns_xspi_sdma_handle;
+ cdns_xspi->set_interrupts_handler = &cdns_xspi_set_interrupts;
+ }
host->dev.of_node = pdev->dev.of_node;
host->bus_num = -1;
platform_set_drvdata(pdev, host);
- cdns_xspi = spi_controller_get_devdata(host);
cdns_xspi->pdev = pdev;
cdns_xspi->dev = &pdev->dev;
cdns_xspi->cur_cs = 0;
@@ -565,20 +1193,42 @@ static int cdns_xspi_probe(struct platform_device *pdev)
cdns_xspi->iobase = devm_platform_ioremap_resource_byname(pdev, "io");
if (IS_ERR(cdns_xspi->iobase)) {
- dev_err(dev, "Failed to remap controller base address\n");
- return PTR_ERR(cdns_xspi->iobase);
+ cdns_xspi->iobase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(cdns_xspi->iobase)) {
+ dev_err(dev, "Failed to remap controller base address\n");
+ return PTR_ERR(cdns_xspi->iobase);
+ }
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sdma");
cdns_xspi->sdmabase = devm_ioremap_resource(dev, res);
- if (IS_ERR(cdns_xspi->sdmabase))
- return PTR_ERR(cdns_xspi->sdmabase);
+ if (IS_ERR(cdns_xspi->sdmabase)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ cdns_xspi->sdmabase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cdns_xspi->sdmabase))
+ return PTR_ERR(cdns_xspi->sdmabase);
+ }
cdns_xspi->sdmasize = resource_size(res);
cdns_xspi->auxbase = devm_platform_ioremap_resource_byname(pdev, "aux");
if (IS_ERR(cdns_xspi->auxbase)) {
- dev_err(dev, "Failed to remap AUX address\n");
- return PTR_ERR(cdns_xspi->auxbase);
+ cdns_xspi->auxbase = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(cdns_xspi->auxbase)) {
+ dev_err(dev, "Failed to remap AUX address\n");
+ return PTR_ERR(cdns_xspi->auxbase);
+ }
+ }
+
+ if (cdns_xspi->driver_data->mrvl_hw_overlay) {
+ cdns_xspi->xferbase = devm_platform_ioremap_resource_byname(pdev, "xfer");
+ if (IS_ERR(cdns_xspi->xferbase)) {
+ cdns_xspi->xferbase = devm_platform_ioremap_resource(pdev, 3);
+ if (IS_ERR(cdns_xspi->xferbase)) {
+ dev_info(dev, "XFER register base not found, set it\n");
+ // For compatibility with older firmware
+ cdns_xspi->xferbase = cdns_xspi->iobase + 0x8000;
+ }
+ }
}
cdns_xspi->irq = platform_get_irq(pdev, 0);
@@ -592,6 +1242,11 @@ static int cdns_xspi_probe(struct platform_device *pdev)
return ret;
}
+ if (cdns_xspi->driver_data->mrvl_hw_overlay) {
+ cdns_mrvl_xspi_setup_clock(cdns_xspi, MRVL_DEFAULT_CLK);
+ cdns_xspi_configure_phy(cdns_xspi);
+ }
+
cdns_xspi_print_phy_config(cdns_xspi);
ret = cdns_xspi_controller_init(cdns_xspi);
@@ -616,6 +1271,11 @@ static int cdns_xspi_probe(struct platform_device *pdev)
static const struct of_device_id cdns_xspi_of_match[] = {
{
.compatible = "cdns,xspi-nor",
+ .data = &cdns_driver_data,
+ },
+ {
+ .compatible = "marvell,cn10-xspi-nor",
+ .data = &marvell_driver_data,
},
{ /* end of table */}
};
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index f7e8b5efa50e..ad26c8409733 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -570,6 +570,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
u32 errors = 0;
struct davinci_spi_config *spicfg;
struct davinci_spi_platform_data *pdata;
+ unsigned long timeout;
dspi = spi_controller_get_devdata(spi->controller);
pdata = &dspi->pdata;
@@ -661,7 +662,12 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
/* Wait for the transfer to complete */
if (spicfg->io_type != SPI_IO_TYPE_POLL) {
- if (wait_for_completion_timeout(&dspi->done, HZ) == 0)
+ timeout = DIV_ROUND_UP(t->speed_hz, MSEC_PER_SEC);
+ timeout = DIV_ROUND_UP(t->len * 8, timeout);
+ /* Assume we are at most 2x slower than the nominal bus speed */
+ timeout = 2 * msecs_to_jiffies(timeout);
+
+ if (wait_for_completion_timeout(&dspi->done, timeout) == 0)
errors = SPIFLG_TIMEOUT_MASK;
} else {
while (dspi->rcount > 0 || dspi->wcount > 0) {
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 8ecb426be45c..977e8b55c82b 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -986,6 +986,7 @@ static void fsl_lpspi_remove(struct platform_device *pdev)
fsl_lpspi_dma_exit(controller);
+ pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
pm_runtime_disable(fsl_lpspi->dev);
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 37ef8c40b276..f6e40f90418f 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -604,6 +604,21 @@ static int spi_geni_prepare_message(struct spi_controller *spi,
return -EINVAL;
}
+static void spi_geni_release_dma_chan(void *data)
+{
+ struct spi_geni_master *mas = data;
+
+ if (mas->rx) {
+ dma_release_channel(mas->rx);
+ mas->rx = NULL;
+ }
+
+ if (mas->tx) {
+ dma_release_channel(mas->tx);
+ mas->tx = NULL;
+ }
+}
+
static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas)
{
int ret;
@@ -622,6 +637,12 @@ static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas)
goto err_rx;
}
+ ret = devm_add_action_or_reset(mas->dev, spi_geni_release_dma_chan, mas);
+ if (ret) {
+ dev_err(mas->dev, "Unable to add action.\n");
+ return ret;
+ }
+
return 0;
err_rx:
@@ -632,19 +653,6 @@ err_tx:
return ret;
}
-static void spi_geni_release_dma_chan(struct spi_geni_master *mas)
-{
- if (mas->rx) {
- dma_release_channel(mas->rx);
- mas->rx = NULL;
- }
-
- if (mas->tx) {
- dma_release_channel(mas->tx);
- mas->tx = NULL;
- }
-}
-
static int spi_geni_init(struct spi_geni_master *mas)
{
struct spi_controller *spi = dev_get_drvdata(mas->dev);
@@ -1110,25 +1118,27 @@ static int spi_geni_probe(struct platform_device *pdev)
spin_lock_init(&mas->lock);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
- pm_runtime_enable(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
if (device_property_read_bool(&pdev->dev, "spi-slave"))
spi->target = true;
ret = geni_icc_get(&mas->se, NULL);
if (ret)
- goto spi_geni_probe_runtime_disable;
+ return ret;
/* Set the bus quota to a reasonable value for register access */
mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
ret = geni_icc_set_bw(&mas->se);
if (ret)
- goto spi_geni_probe_runtime_disable;
+ return ret;
ret = spi_geni_init(mas);
if (ret)
- goto spi_geni_probe_runtime_disable;
+ return ret;
/*
* check the mode supported and set_cs for fifo mode only
@@ -1144,36 +1154,11 @@ static int spi_geni_probe(struct platform_device *pdev)
if (mas->cur_xfer_mode == GENI_GPI_DMA)
spi->flags = SPI_CONTROLLER_MUST_TX;
- ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
+ ret = devm_request_irq(dev, mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
if (ret)
- goto spi_geni_release_dma;
-
- ret = spi_register_controller(spi);
- if (ret)
- goto spi_geni_probe_free_irq;
-
- return 0;
-spi_geni_probe_free_irq:
- free_irq(mas->irq, spi);
-spi_geni_release_dma:
- spi_geni_release_dma_chan(mas);
-spi_geni_probe_runtime_disable:
- pm_runtime_disable(dev);
- return ret;
-}
-
-static void spi_geni_remove(struct platform_device *pdev)
-{
- struct spi_controller *spi = platform_get_drvdata(pdev);
- struct spi_geni_master *mas = spi_controller_get_devdata(spi);
-
- /* Unregister _before_ disabling pm_runtime() so we stop transfers */
- spi_unregister_controller(spi);
-
- spi_geni_release_dma_chan(mas);
+ return ret;
- free_irq(mas->irq, spi);
- pm_runtime_disable(&pdev->dev);
+ return devm_spi_register_controller(dev, spi);
}
static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
@@ -1255,7 +1240,6 @@ MODULE_DEVICE_TABLE(of, spi_geni_dt_match);
static struct platform_driver spi_geni_driver = {
.probe = spi_geni_probe,
- .remove_new = spi_geni_remove,
.driver = {
.name = "geni_spi",
.pm = &spi_geni_pm_ops,
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 36c587be9e28..4f192e013cd6 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -236,6 +236,14 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
}
}
+static void spi_gpio_set_mosi_idle(struct spi_device *spi)
+{
+ struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+
+ gpiod_set_value_cansleep(spi_gpio->mosi,
+ !!(spi->mode & SPI_MOSI_IDLE_HIGH));
+}
+
static int spi_gpio_setup(struct spi_device *spi)
{
struct gpio_desc *cs;
@@ -389,7 +397,8 @@ static int spi_gpio_probe(struct platform_device *pdev)
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
host->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
- SPI_CS_HIGH | SPI_LSB_FIRST;
+ SPI_CS_HIGH | SPI_LSB_FIRST | SPI_MOSI_IDLE_LOW |
+ SPI_MOSI_IDLE_HIGH;
if (!spi_gpio->mosi) {
/* HW configuration without MOSI pin
*
@@ -414,6 +423,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
host->flags |= SPI_CONTROLLER_GPIO_SS;
bb->chipselect = spi_gpio_chipselect;
bb->set_line_direction = spi_gpio_set_direction;
+ bb->set_mosi_idle = spi_gpio_set_mosi_idle;
if (host->flags & SPI_CONTROLLER_NO_TX) {
bb->txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0;
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 8838a98b04c2..1d05590a7434 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -655,8 +655,8 @@ static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
}
init.num_parents = 1;
- pow2_fixed_div->mult = 1,
- pow2_fixed_div->div = 4,
+ pow2_fixed_div->mult = 1;
+ pow2_fixed_div->div = 4;
pow2_fixed_div->hw.init = &init;
clk = devm_clk_register(dev, &pow2_fixed_div->hw);
@@ -674,9 +674,9 @@ static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
parent_data[0].hw = &pow2_fixed_div->hw;
init.num_parents = 1;
- spicc->pow2_div.shift = 16,
- spicc->pow2_div.width = 3,
- spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO,
+ spicc->pow2_div.shift = 16;
+ spicc->pow2_div.width = 3;
+ spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO;
spicc->pow2_div.reg = spicc->base + SPICC_CONREG;
spicc->pow2_div.hw.init = &init;
@@ -721,8 +721,8 @@ static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
}
init.num_parents = 1;
- enh_fixed_div->mult = 1,
- enh_fixed_div->div = 2,
+ enh_fixed_div->mult = 1;
+ enh_fixed_div->div = 2;
enh_fixed_div->hw.init = &init;
clk = devm_clk_register(dev, &enh_fixed_div->hw);
@@ -740,8 +740,8 @@ static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
parent_data[0].hw = &enh_fixed_div->hw;
init.num_parents = 1;
- enh_div->shift = 16,
- enh_div->width = 8,
+ enh_div->shift = 16;
+ enh_div->width = 8;
enh_div->reg = spicc->base + SPICC_ENH_CTL0;
enh_div->hw.init = &init;
@@ -761,8 +761,8 @@ static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
init.num_parents = 2;
init.flags = CLK_SET_RATE_PARENT;
- mux->mask = 0x1,
- mux->shift = 24,
+ mux->mask = 0x1;
+ mux->shift = 24;
mux->reg = spicc->base + SPICC_ENH_CTL0;
mux->hw.init = &init;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 36c2f52cd6b8..dfee244fc317 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -743,25 +743,13 @@ static int mtk_spi_setup(struct spi_device *spi)
return 0;
}
-static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
+static irqreturn_t mtk_spi_interrupt_thread(int irq, void *dev_id)
{
u32 cmd, reg_val, cnt, remainder, len;
struct spi_controller *host = dev_id;
struct mtk_spi *mdata = spi_controller_get_devdata(host);
struct spi_transfer *xfer = mdata->cur_transfer;
- reg_val = readl(mdata->base + SPI_STATUS0_REG);
- if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
- mdata->state = MTK_SPI_PAUSED;
- else
- mdata->state = MTK_SPI_IDLE;
-
- /* SPI-MEM ops */
- if (mdata->use_spimem) {
- complete(&mdata->spimem_done);
- return IRQ_HANDLED;
- }
-
if (!host->can_dma(host, NULL, xfer)) {
if (xfer->rx_buf) {
cnt = mdata->xfer_len / 4;
@@ -845,6 +833,27 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_controller *host = dev_id;
+ struct mtk_spi *mdata = spi_controller_get_devdata(host);
+ u32 reg_val;
+
+ reg_val = readl(mdata->base + SPI_STATUS0_REG);
+ if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
+ mdata->state = MTK_SPI_PAUSED;
+ else
+ mdata->state = MTK_SPI_IDLE;
+
+ /* SPI-MEM ops */
+ if (mdata->use_spimem) {
+ complete(&mdata->spimem_done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
struct spi_mem_op *op)
{
@@ -1255,8 +1264,9 @@ static int mtk_spi_probe(struct platform_device *pdev)
dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
addr_bits, ret);
- ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
- IRQF_TRIGGER_NONE, dev_name(dev), host);
+ ret = devm_request_threaded_irq(dev, irq, mtk_spi_interrupt,
+ mtk_spi_interrupt_thread,
+ IRQF_TRIGGER_NONE, dev_name(dev), host);
if (ret)
return dev_err_probe(dev, ret, "failed to register irq\n");
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 88cbe4f00cc3..3e341d1ff3b6 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -477,7 +477,7 @@ static int mxs_spi_runtime_resume(struct device *dev)
return ret;
}
-static int __maybe_unused mxs_spi_suspend(struct device *dev)
+static int mxs_spi_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
int ret;
@@ -492,7 +492,7 @@ static int __maybe_unused mxs_spi_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mxs_spi_resume(struct device *dev)
+static int mxs_spi_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
int ret;
@@ -512,9 +512,8 @@ static int __maybe_unused mxs_spi_resume(struct device *dev)
}
static const struct dev_pm_ops mxs_spi_pm = {
- SET_RUNTIME_PM_OPS(mxs_spi_runtime_suspend,
- mxs_spi_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(mxs_spi_suspend, mxs_spi_resume)
+ RUNTIME_PM_OPS(mxs_spi_runtime_suspend, mxs_spi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(mxs_spi_suspend, mxs_spi_resume)
};
static const struct of_device_id mxs_spi_dt_ids[] = {
@@ -662,7 +661,7 @@ static struct platform_driver mxs_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = mxs_spi_dt_ids,
- .pm = &mxs_spi_pm,
+ .pm = pm_ptr(&mxs_spi_pm),
},
};
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 88397f712a3b..5a1e55a01c52 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -57,13 +57,6 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
-/*
- * The driver only uses one single LUT entry, that is updated on
- * each call of exec_op(). Index 0 is preset at boot with a basic
- * read operation, so let's use the last entry (31).
- */
-#define SEQID_LUT 31
-
/* Registers used by the driver */
#define FSPI_MCR0 0x00
#define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24)
@@ -263,9 +256,6 @@
#define FSPI_TFDR 0x180
#define FSPI_LUT_BASE 0x200
-#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
-#define FSPI_LUT_REG(idx) \
- (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4)
/* register map end */
@@ -341,6 +331,7 @@ struct nxp_fspi_devtype_data {
unsigned int txfifo;
unsigned int ahb_buf_size;
unsigned int quirks;
+ unsigned int lut_num;
bool little_endian;
};
@@ -349,6 +340,7 @@ static struct nxp_fspi_devtype_data lx2160a_data = {
.txfifo = SZ_1K, /* (128 * 64 bits) */
.ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
.quirks = 0,
+ .lut_num = 32,
.little_endian = true, /* little-endian */
};
@@ -357,6 +349,7 @@ static struct nxp_fspi_devtype_data imx8mm_data = {
.txfifo = SZ_1K, /* (128 * 64 bits) */
.ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
.quirks = 0,
+ .lut_num = 32,
.little_endian = true, /* little-endian */
};
@@ -365,6 +358,7 @@ static struct nxp_fspi_devtype_data imx8qxp_data = {
.txfifo = SZ_1K, /* (128 * 64 bits) */
.ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
.quirks = 0,
+ .lut_num = 32,
.little_endian = true, /* little-endian */
};
@@ -373,6 +367,16 @@ static struct nxp_fspi_devtype_data imx8dxl_data = {
.txfifo = SZ_1K, /* (128 * 64 bits) */
.ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
.quirks = FSPI_QUIRK_USE_IP_ONLY,
+ .lut_num = 32,
+ .little_endian = true, /* little-endian */
+};
+
+static struct nxp_fspi_devtype_data imx8ulp_data = {
+ .rxfifo = SZ_512, /* (64 * 64 bits) */
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
+ .lut_num = 16,
.little_endian = true, /* little-endian */
};
@@ -544,6 +548,8 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
void __iomem *base = f->iobase;
u32 lutval[4] = {};
int lutidx = 1, i;
+ u32 lut_offset = (f->devtype_data->lut_num - 1) * 4 * 4;
+ u32 target_lut_reg;
/* cmd */
lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
@@ -588,8 +594,10 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR);
/* fill LUT */
- for (i = 0; i < ARRAY_SIZE(lutval); i++)
- fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i));
+ for (i = 0; i < ARRAY_SIZE(lutval); i++) {
+ target_lut_reg = FSPI_LUT_BASE + lut_offset + i * 4;
+ fspi_writel(f, lutval[i], base + target_lut_reg);
+ }
dev_dbg(f->dev, "CMD[%02x] lutval[0:%08x 1:%08x 2:%08x 3:%08x], size: 0x%08x\n",
op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3], op->data.nbytes);
@@ -756,8 +764,7 @@ static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
iounmap(f->ahb_addr);
f->memmap_start = start;
- f->memmap_len = len > NXP_FSPI_MIN_IOMAP ?
- len : NXP_FSPI_MIN_IOMAP;
+ f->memmap_len = max_t(u32, len, NXP_FSPI_MIN_IOMAP);
f->ahb_addr = ioremap(f->memmap_phy + f->memmap_start,
f->memmap_len);
@@ -805,14 +812,15 @@ static void nxp_fspi_fill_txfifo(struct nxp_fspi *f,
if (i < op->data.nbytes) {
u32 data = 0;
int j;
+ int remaining = op->data.nbytes - i;
/* Wait for TXFIFO empty */
ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
FSPI_INTR_IPTXWE, 0,
POLL_TOUT, true);
WARN_ON(ret);
- for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) {
- memcpy(&data, buf + i + j, 4);
+ for (j = 0; j < ALIGN(remaining, 4); j += 4) {
+ memcpy(&data, buf + i + j, min_t(int, 4, remaining - j));
fspi_writel(f, data, base + FSPI_TFDR + j);
}
fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
@@ -875,7 +883,7 @@ static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op)
void __iomem *base = f->iobase;
int seqnum = 0;
int err = 0;
- u32 reg;
+ u32 reg, seqid_lut;
reg = fspi_readl(f, base + FSPI_IPRXFCR);
/* invalid RXFIFO first */
@@ -891,8 +899,9 @@ static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op)
* the LUT at each exec_op() call. And also specify the DATA
* length, since it's has not been specified in the LUT.
*/
+ seqid_lut = f->devtype_data->lut_num - 1;
fspi_writel(f, op->data.nbytes |
- (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) |
+ (seqid_lut << FSPI_IPCR1_SEQID_SHIFT) |
(seqnum << FSPI_IPCR1_SEQNUM_SHIFT),
base + FSPI_IPCR1);
@@ -1016,7 +1025,7 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f)
{
void __iomem *base = f->iobase;
int ret, i;
- u32 reg;
+ u32 reg, seqid_lut;
/* disable and unprepare clock to avoid glitch pass to controller */
nxp_fspi_clk_disable_unprep(f);
@@ -1091,11 +1100,17 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f)
fspi_writel(f, reg, base + FSPI_FLSHB1CR1);
fspi_writel(f, reg, base + FSPI_FLSHB2CR1);
+ /*
+ * The driver only uses one single LUT entry, that is updated on
+ * each call of exec_op(). Index 0 is preset at boot with a basic
+ * read operation, so let's use the last entry.
+ */
+ seqid_lut = f->devtype_data->lut_num - 1;
/* AHB Read - Set lut sequence ID for all CS. */
- fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2);
- fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2);
- fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2);
- fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2);
+ fspi_writel(f, seqid_lut, base + FSPI_FLSHA1CR2);
+ fspi_writel(f, seqid_lut, base + FSPI_FLSHA2CR2);
+ fspi_writel(f, seqid_lut, base + FSPI_FLSHB1CR2);
+ fspi_writel(f, seqid_lut, base + FSPI_FLSHB2CR2);
f->selected = -1;
@@ -1290,6 +1305,7 @@ static const struct of_device_id nxp_fspi_dt_ids[] = {
{ .compatible = "nxp,imx8mp-fspi", .data = (void *)&imx8mm_data, },
{ .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, },
{ .compatible = "nxp,imx8dxl-fspi", .data = (void *)&imx8dxl_data, },
+ { .compatible = "nxp,imx8ulp-fspi", .data = (void *)&imx8ulp_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 942c3117ab3a..4a64ea0f596f 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -20,23 +20,21 @@
* during SPI transfers by setting max_speed_hz via the device tree.
*/
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
-#include <linux/io.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
@@ -412,7 +410,11 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
}
/* Request IRQ */
- hw->irqnum = irq_of_parse_and_map(np, 0);
+ ret = platform_get_irq(op, 0);
+ if (ret < 0)
+ goto free_host;
+ hw->irqnum = ret;
+
ret = request_irq(hw->irqnum, spi_ppc4xx_int,
0, "spi_ppc4xx_of", (void *)hw);
if (ret) {
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
index d3f07fd719bd..b468a95972bf 100644
--- a/drivers/spi/spi-rpc-if.c
+++ b/drivers/spi/spi-rpc-if.c
@@ -198,9 +198,16 @@ static int __maybe_unused rpcif_spi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rpcif_spi_pm_ops, rpcif_spi_suspend, rpcif_spi_resume);
+static const struct platform_device_id rpc_if_spi_id_table[] = {
+ { .name = "rpc-if-spi" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, rpc_if_spi_id_table);
+
static struct platform_driver rpcif_spi_driver = {
.probe = rpcif_spi_probe,
.remove_new = rpcif_spi_remove,
+ .id_table = rpc_if_spi_id_table,
.driver = {
.name = "rpc-if-spi",
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 833c58c88e40..51a002b3f518 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1637,6 +1637,7 @@ static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(platform, s3c64xx_spi_driver_ids);
static const struct of_device_id s3c64xx_spi_dt_match[] = {
{ .compatible = "google,gs101-spi",
diff --git a/drivers/spi/spi-slave-mt27xx.c b/drivers/spi/spi-slave-mt27xx.c
index f1ddf4c099a3..4a91b7bae3c6 100644
--- a/drivers/spi/spi-slave-mt27xx.c
+++ b/drivers/spi/spi-slave-mt27xx.c
@@ -69,7 +69,7 @@ struct mtk_spi_slave {
struct clk *spi_clk;
struct completion xfer_done;
struct spi_transfer *cur_transfer;
- bool slave_aborted;
+ bool target_aborted;
const struct mtk_spi_compatible *dev_comp;
};
@@ -118,7 +118,7 @@ static void mtk_spi_slave_disable_xfer(struct mtk_spi_slave *mdata)
static int mtk_spi_slave_wait_for_completion(struct mtk_spi_slave *mdata)
{
if (wait_for_completion_interruptible(&mdata->xfer_done) ||
- mdata->slave_aborted) {
+ mdata->target_aborted) {
dev_err(mdata->dev, "interrupted\n");
return -EINTR;
}
@@ -286,7 +286,7 @@ static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr,
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
reinit_completion(&mdata->xfer_done);
- mdata->slave_aborted = false;
+ mdata->target_aborted = false;
mdata->cur_transfer = xfer;
if (xfer->len > mdata->dev_comp->max_fifo_size)
@@ -314,11 +314,11 @@ static int mtk_spi_slave_setup(struct spi_device *spi)
return 0;
}
-static int mtk_slave_abort(struct spi_controller *ctlr)
+static int mtk_target_abort(struct spi_controller *ctlr)
{
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
- mdata->slave_aborted = true;
+ mdata->target_aborted = true;
complete(&mdata->xfer_done);
return 0;
@@ -402,7 +402,7 @@ static int mtk_spi_slave_probe(struct platform_device *pdev)
ctlr->prepare_message = mtk_spi_slave_prepare_message;
ctlr->transfer_one = mtk_spi_slave_transfer_one;
ctlr->setup = mtk_spi_slave_setup;
- ctlr->slave_abort = mtk_slave_abort;
+ ctlr->target_abort = mtk_target_abort;
of_id = of_match_node(mtk_spi_slave_of_match, pdev->dev.of_node);
if (!of_id) {
diff --git a/drivers/spi/spi-slave-system-control.c b/drivers/spi/spi-slave-system-control.c
index d37cfe995a63..8f5c32b61a5b 100644
--- a/drivers/spi/spi-slave-system-control.c
+++ b/drivers/spi/spi-slave-system-control.c
@@ -136,7 +136,7 @@ static void spi_slave_system_control_remove(struct spi_device *spi)
{
struct spi_slave_system_control_priv *priv = spi_get_drvdata(spi);
- spi_slave_abort(spi);
+ spi_target_abort(spi);
wait_for_completion(&priv->finished);
}
diff --git a/drivers/spi/spi-slave-time.c b/drivers/spi/spi-slave-time.c
index f56c1afb8534..8bb3070e4b80 100644
--- a/drivers/spi/spi-slave-time.c
+++ b/drivers/spi/spi-slave-time.c
@@ -110,7 +110,7 @@ static void spi_slave_time_remove(struct spi_device *spi)
{
struct spi_slave_time_priv *priv = spi_get_drvdata(spi);
- spi_slave_abort(spi);
+ spi_target_abort(spi);
wait_for_completion(&priv->finished);
}
diff --git a/drivers/spi/spi-wpcm-fiu.c b/drivers/spi/spi-wpcm-fiu.c
index 886d6d7771d4..a9aee2a6c7dc 100644
--- a/drivers/spi/spi-wpcm-fiu.c
+++ b/drivers/spi/spi-wpcm-fiu.c
@@ -448,12 +448,10 @@ static int wpcm_fiu_probe(struct platform_device *pdev)
fiu = spi_controller_get_devdata(ctrl);
fiu->dev = dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
- fiu->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(fiu->regs)) {
- dev_err(dev, "Failed to map registers\n");
- return PTR_ERR(fiu->regs);
- }
+ fiu->regs = devm_platform_ioremap_resource_byname(pdev, "control");
+ if (IS_ERR(fiu->regs))
+ return dev_err_probe(dev, PTR_ERR(fiu->regs),
+ "Failed to map registers\n");
fiu->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(fiu->clk))
@@ -462,10 +460,9 @@ static int wpcm_fiu_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory");
fiu->memory = devm_ioremap_resource(dev, res);
fiu->memory_size = min_t(size_t, resource_size(res), MAX_MEMORY_SIZE_TOTAL);
- if (IS_ERR(fiu->memory)) {
- dev_err(dev, "Failed to map flash memory window\n");
- return PTR_ERR(fiu->memory);
- }
+ if (IS_ERR(fiu->memory))
+ return dev_err_probe(dev, PTR_ERR(fiu->memory),
+ "Failed to map flash memory window\n");
fiu->shm_regmap = syscon_regmap_lookup_by_phandle_optional(dev->of_node, "nuvoton,shm");
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index d6325c6be3d4..b67455bda972 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -569,7 +569,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
}
if (op->dummy.nbytes) {
- tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
+ tmpbuf = kmalloc(op->dummy.nbytes, GFP_KERNEL);
if (!tmpbuf)
return -ENOMEM;
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 558c466135a5..fcd0ca996684 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -1242,7 +1242,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
u32 num_cs;
const struct qspi_platform_data *p_data;
- ctlr = spi_alloc_host(&pdev->dev, sizeof(*xqspi));
+ ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*xqspi));
if (!ctlr)
return -ENOMEM;
@@ -1256,30 +1256,22 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
xqspi->has_tapdelay = true;
xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(xqspi->regs)) {
- ret = PTR_ERR(xqspi->regs);
- goto remove_ctlr;
- }
+ if (IS_ERR(xqspi->regs))
+ return PTR_ERR(xqspi->regs);
xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(xqspi->pclk)) {
- dev_err(dev, "pclk clock not found.\n");
- ret = PTR_ERR(xqspi->pclk);
- goto remove_ctlr;
- }
+ if (IS_ERR(xqspi->pclk))
+ return dev_err_probe(dev, PTR_ERR(xqspi->pclk),
+ "pclk clock not found.\n");
xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
- if (IS_ERR(xqspi->refclk)) {
- dev_err(dev, "ref_clk clock not found.\n");
- ret = PTR_ERR(xqspi->refclk);
- goto remove_ctlr;
- }
+ if (IS_ERR(xqspi->refclk))
+ return dev_err_probe(dev, PTR_ERR(xqspi->refclk),
+ "ref_clk clock not found.\n");
ret = clk_prepare_enable(xqspi->pclk);
- if (ret) {
- dev_err(dev, "Unable to enable APB clock.\n");
- goto remove_ctlr;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Unable to enable APB clock.\n");
ret = clk_prepare_enable(xqspi->refclk);
if (ret) {
@@ -1364,8 +1356,6 @@ clk_dis_all:
clk_disable_unprepare(xqspi->refclk);
clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk);
-remove_ctlr:
- spi_controller_put(ctlr);
return ret;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 6ebe5dd9bbb1..c1dad30a4528 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1440,7 +1440,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
- if (spi_controller_is_slave(ctlr)) {
+ if (spi_controller_is_target(ctlr)) {
if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
return -EINTR;
@@ -2425,7 +2425,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
}
}
- if (spi_controller_is_slave(ctlr)) {
+ if (spi_controller_is_target(ctlr)) {
if (!of_node_name_eq(nc, "slave")) {
dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
nc);
@@ -2934,21 +2934,10 @@ static struct class spi_master_class = {
#ifdef CONFIG_SPI_SLAVE
/**
- * spi_slave_abort - abort the ongoing transfer request on an SPI slave
+ * spi_target_abort - abort the ongoing transfer request on an SPI slave
* controller
* @spi: device used for the current transfer
*/
-int spi_slave_abort(struct spi_device *spi)
-{
- struct spi_controller *ctlr = spi->controller;
-
- if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
- return ctlr->slave_abort(ctlr);
-
- return -ENOTSUPP;
-}
-EXPORT_SYMBOL_GPL(spi_slave_abort);
-
int spi_target_abort(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
@@ -3321,7 +3310,7 @@ int spi_register_controller(struct spi_controller *ctlr)
*/
dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
- if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
+ if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) {
status = spi_get_gpio_descs(ctlr);
if (status)
goto free_bus_id;
@@ -3349,7 +3338,7 @@ int spi_register_controller(struct spi_controller *ctlr)
if (status < 0)
goto free_bus_id;
dev_dbg(dev, "registered %s %s\n",
- spi_controller_is_slave(ctlr) ? "slave" : "master",
+ spi_controller_is_target(ctlr) ? "target" : "host",
dev_name(&ctlr->dev));
/*
@@ -3921,6 +3910,12 @@ int spi_setup(struct spi_device *spi)
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
return -EINVAL;
+ /* Check against conflicting MOSI idle configuration */
+ if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) {
+ dev_err(&spi->dev,
+ "setup: MOSI configured to idle low and high at the same time.\n");
+ return -EINVAL;
+ }
/*
* Help drivers fail *cleanly* when they need options
* that aren't supported with their current controller.
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index face93a9cf20..5539c5d139d4 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -666,7 +666,7 @@ static int spidev_release(struct inode *inode, struct file *filp)
}
#ifdef CONFIG_SPI_SLAVE
if (!dofree)
- spi_slave_abort(spidev->spi);
+ spi_target_abort(spidev->spi);
#endif
mutex_unlock(&device_list_lock);
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h
index fefbe3cd08f3..4cfe9a0e0d56 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp.h
@@ -200,7 +200,7 @@ struct atomisp_dis_vector {
};
/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
- * arrays that contain the coeffients for each type.
+ * arrays that contain the coefficients for each type.
*/
struct atomisp_dvs2_coef_types {
short __user *odd_real; /** real part of the odd coefficients*/
@@ -698,7 +698,7 @@ enum atomisp_burst_capture_options {
/* Digital Image Stabilization:
* 1. get dis statistics: reads DIS statistics from ISP (every frame)
* 2. set dis coefficients: set DIS filter coefficients (one time)
- * 3. set dis motion vecotr: set motion vector (result of DIS, every frame)
+ * 3. set dis motion vector: set motion vector (result of DIS, every frame)
*/
#define ATOMISP_IOC_G_DIS_STAT \
_IOWR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_statistics)
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
index fdeb247036b0..064449fd51af 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -116,7 +116,7 @@ struct intel_v4l2_subdev_table {
};
/*
- * Sensor of external ISP can send multiple steams with different mipi data
+ * Sensor of external ISP can send multiple streams with different mipi data
* type in the same virtual channel. This information needs to come from the
* sensor or external ISP
*/
@@ -138,7 +138,7 @@ struct atomisp_input_stream_info {
/*
* if more isys_configs is more than 0, sensor needs to configure the
* input format differently. width and height can be 0. If width and
- * height is not zero, then the corresponsing data needs to be set
+ * height is not zero, then the corresponding data needs to be set
*/
struct atomisp_isys_config_info isys_info[MAX_STREAMS_PER_CHANNEL];
};
@@ -175,8 +175,6 @@ int atomisp_register_sensor_no_gmin(struct v4l2_subdev *subdev, u32 lanes,
enum atomisp_bayer_order bayer_order);
void atomisp_unregister_subdev(struct v4l2_subdev *subdev);
-int v4l2_get_acpi_sensor_info(struct device *dev, char **module_id_str);
-
/* API from old platform_camera.h, new CPUID implementation */
#define __IS_SOC(x) (boot_cpu_data.x86_vfm == x)
#define __IS_SOCS(x, y) (boot_cpu_data.x86_vfm == x || boot_cpu_data.x86_vfm == y)
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
index d789d38ef689..6abda358a72f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
@@ -109,6 +109,8 @@ static struct gmin_cfg_var lenovo_ideapad_miix_310_vars[] = {
static struct gmin_cfg_var xiaomi_mipad2_vars[] = {
/* _DSM contains the wrong CsiPort for the front facing OV5693 sensor */
{ "INT33BE:00", "CsiPort", "0" },
+ /* _DSM contains the wrong CsiLanes for the back facing T4KA3 sensor */
+ { "XMCC0003:00", "CsiLanes", "4" },
{}
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
index 50c4123ba006..b180fcbea9b1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
@@ -441,6 +441,8 @@ const struct vb2_ops atomisp_vb2_ops = {
.buf_queue = atomisp_buf_queue,
.start_streaming = atomisp_start_streaming,
.stop_streaming = atomisp_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static void atomisp_dev_init_struct(struct atomisp_device *isp)
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 3a3e84a035e2..202497695e46 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -797,12 +797,12 @@ static int atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
pipe->vb_queue.ops = &atomisp_vb2_ops;
pipe->vb_queue.mem_ops = &vb2_vmalloc_memops;
pipe->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ pipe->vb_queue.lock = &pipe->vb_queue_mutex;
ret = vb2_queue_init(&pipe->vb_queue);
if (ret)
return ret;
pipe->vdev.queue = &pipe->vb_queue;
- pipe->vdev.queue->lock = &pipe->vb_queue_mutex;
INIT_LIST_HEAD(&pipe->buffers_in_css);
INIT_LIST_HEAD(&pipe->activeq);
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h
index d0ba59cedc92..6f0a8fe868bd 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h
@@ -20,8 +20,10 @@
#include "vmem_global.h"
typedef u16 t_vmem_elem;
+typedef s16 t_svmem_elem;
-#define VMEM_ARRAY(x, s) t_vmem_elem x[s / ISP_NWAY][ISP_NWAY]
+#define VMEM_ARRAY(x, s) t_vmem_elem x[(s) / ISP_NWAY][ISP_NWAY]
+#define SVMEM_ARRAY(x, s) t_svmem_elem x[(s) / ISP_NWAY][ISP_NWAY]
void isp_vmem_load(
const isp_ID_t ID,
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h
index d294ac402de8..c5ab13511db8 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h
@@ -27,7 +27,8 @@
* #define assert(cnd) BUG_ON(cnd)
* but that causes many compiler warnings (==errors) under Android
* because it seems that the BUG_ON() macro is not seen as a check by
- * gcc like the BUG() macro is. */
+ * gcc like the BUG() macro is.
+ */
#define assert(cnd) \
do { \
if (!(cnd)) \
@@ -37,7 +38,8 @@
#ifndef PIPE_GENERATION
/* Deprecated OP___assert, this is still used in ~1000 places
* in the code. This will be removed over time.
- * The implementation for the pipe generation tool is in see support.isp.h */
+ * The implementation for the pipe generation tool is in see support.isp.h
+ */
#define OP___assert(cnd) assert(cnd)
static inline void compile_time_assert(unsigned int cond)
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
index 693154e8ec2f..7e37f0809034 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
@@ -94,7 +94,7 @@ hrt_data csi_rx_fe_ctrl_reg_load(
const hrt_address reg);
/**
* @brief Store a value to the register.
- * Store a value to the registe of the csi rx fe.
+ * Store a value to the register of the csi rx fe.
*
* @param[in] ID The global unique ID for the ibuf-controller instance.
* @param[in] reg The offset address of the register.
@@ -119,7 +119,7 @@ hrt_data csi_rx_be_ctrl_reg_load(
const hrt_address reg);
/**
* @brief Store a value to the register.
- * Store a value to the registe of the csi rx be.
+ * Store a value to the register of the csi rx be.
*
* @param[in] ID The global unique ID for the ibuf-controller instance.
* @param[in] reg The offset address of the register.
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
index 160c496784b7..907f9ebcc60d 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
@@ -28,12 +28,6 @@
#define CEIL_SHIFT(a, b) (((a) + (1 << (b)) - 1) >> (b))
#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b))
-#if !defined(PIPE_GENERATION)
-
-#define ceil_div(a, b) (CEIL_DIV(a, b))
-
-#endif /* !defined(PIPE_GENERATION) */
-
/*
* For SP and ISP, SDK provides the definition of OP_std_modadd.
* We need it only for host
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
index 3e2899ad8517..e8c5d728fd55 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
@@ -204,9 +204,6 @@ static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type,
goto bind_err;
}
- dev_dbg(atomisp_dev, "pages: 0x%08x (%zu bytes), type: %d, vmalloc %p\n",
- bo->start, bytes, type, vmalloc_noprof);
-
return bo->start;
bind_err:
@@ -231,8 +228,6 @@ void hmm_free(ia_css_ptr virt)
{
struct hmm_buffer_object *bo;
- dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt);
-
if (WARN_ON(virt == mmgr_EXCEPTION))
return;
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c
index 457a004e194d..b75cfd3096d8 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c
@@ -45,7 +45,8 @@ ia_css_bnr_dump(
const struct sh_css_isp_bnr_params *bnr,
unsigned int level)
{
- if (!bnr) return;
+ if (!bnr)
+ return;
ia_css_debug_dtrace(level, "Bayer Noise Reduction:\n");
ia_css_debug_dtrace(level, "\t%-32s = %d\n",
"bnr_gain_all", bnr->gain_all);
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c
index 25e3f0822fb8..e66faeda3613 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c
@@ -47,7 +47,8 @@ ia_css_de_dump(
const struct sh_css_isp_de_params *de,
unsigned int level)
{
- if (!de) return;
+ if (!de)
+ return;
ia_css_debug_dtrace(level, "Demosaic:\n");
ia_css_debug_dtrace(level, "\t%-32s = %d\n",
"de_pixelnoise", de->pixelnoise);
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c
index e4fc90f88e24..b79d78e5b77f 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c
@@ -172,25 +172,21 @@ ia_css_eed1_8_vmem_encode(
base = shuffle_block * i;
for (j = 0; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) {
- to->e_dew_enh_x[0][base + j] = min_t(int, max_t(int,
- from->dew_enhance_seg_x[j], 0),
- 8191);
- to->e_dew_enh_y[0][base + j] = min_t(int, max_t(int,
- from->dew_enhance_seg_y[j], -8192),
- 8191);
+ to->e_dew_enh_x[0][base + j] = clamp(from->dew_enhance_seg_x[j],
+ 0, 8191);
+ to->e_dew_enh_y[0][base + j] = clamp(from->dew_enhance_seg_y[j],
+ -8192, 8191);
}
for (j = 0; j < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); j++) {
- to->e_dew_enh_a[0][base + j] = min_t(int, max_t(int,
- from->dew_enhance_seg_slope[j],
- -8192), 8191);
+ to->e_dew_enh_a[0][base + j] = clamp(from->dew_enhance_seg_slope[j],
+ -8192, 8191);
/* Convert dew_enhance_seg_exp to flag:
* 0 -> 0
* 1...13 -> 1
*/
- to->e_dew_enh_f[0][base + j] = (min_t(int, max_t(int,
- from->dew_enhance_seg_exp[j],
- 0), 13) > 0);
+ to->e_dew_enh_f[0][base + j] = clamp(from->dew_enhance_seg_exp[j],
+ 0, 13) > 0;
}
/* Hard-coded to 0, in order to be able to handle out of
@@ -276,7 +272,7 @@ ia_css_eed1_8_encode(
for (i = 0; i < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); i++) {
min_exp = max(min_exp, from->dew_enhance_seg_exp[i]);
}
- to->e_dew_enh_asr = 13 - min(max(min_exp, 0), 13);
+ to->e_dew_enh_asr = 13 - clamp(min_exp, 0, 13);
to->dedgew_max = from->dedgew_max;
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h
index 6fb3b38f49e7..b9eeeb592ec8 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h
@@ -94,8 +94,8 @@
struct eed1_8_vmem_params {
VMEM_ARRAY(e_dew_enh_x, ISP_VEC_NELEMS);
- VMEM_ARRAY(e_dew_enh_y, ISP_VEC_NELEMS);
- VMEM_ARRAY(e_dew_enh_a, ISP_VEC_NELEMS);
+ SVMEM_ARRAY(e_dew_enh_y, ISP_VEC_NELEMS);
+ SVMEM_ARRAY(e_dew_enh_a, ISP_VEC_NELEMS);
VMEM_ARRAY(e_dew_enh_f, ISP_VEC_NELEMS);
VMEM_ARRAY(chgrinv_x, ISP_VEC_NELEMS);
VMEM_ARRAY(chgrinv_a, ISP_VEC_NELEMS);
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
index 57b5e11e1cfe..8ccfa99c61ef 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
@@ -43,7 +43,8 @@ ia_css_fpn_dump(
const struct sh_css_isp_fpn_params *fpn,
unsigned int level)
{
- if (!fpn) return;
+ if (!fpn)
+ return;
ia_css_debug_dtrace(level, "Fixed Pattern Noise Reduction:\n");
ia_css_debug_dtrace(level, "\t%-32s = %d\n",
"fpn_shift", fpn->shift);
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
index 0091e2a3da52..c32659894c29 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
@@ -13,9 +13,11 @@
* more details.
*/
+#include <linux/bitops.h>
+#include <linux/math.h>
+
#include "ia_css_bayer_io.host.h"
#include "dma.h"
-#include "math_support.h"
#ifndef IA_CSS_NO_DEBUG
#include "ia_css_debug.h"
#endif
@@ -29,9 +31,8 @@ int ia_css_bayer_io_config(const struct ia_css_binary *binary,
const struct ia_css_frame **out_frames = (const struct ia_css_frame **)
&args->out_frame;
const struct ia_css_frame_info *in_frame_info = ia_css_frame_get_info(in_frame);
- const unsigned int ddr_bits_per_element = sizeof(short) * 8;
- const unsigned int ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS,
- ddr_bits_per_element);
+ const unsigned int ddr_elems_per_word =
+ DIV_ROUND_UP(HIVE_ISP_DDR_WORD_BITS, BITS_PER_TYPE(short));
unsigned int size_get = 0, size_put = 0;
unsigned int offset = 0;
int ret;
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
index 32c504a950ce..5b2d5023b5ee 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
@@ -13,9 +13,11 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
*/
+#include <linux/bitops.h>
+#include <linux/math.h>
+
#include "ia_css_yuv444_io.host.h"
#include "dma.h"
-#include "math_support.h"
#ifndef IA_CSS_NO_DEBUG
#include "ia_css_debug.h"
#endif
@@ -29,9 +31,8 @@ int ia_css_yuv444_io_config(const struct ia_css_binary *binary,
const struct ia_css_frame **out_frames = (const struct ia_css_frame **)
&args->out_frame;
const struct ia_css_frame_info *in_frame_info = ia_css_frame_get_info(in_frame);
- const unsigned int ddr_bits_per_element = sizeof(short) * 8;
- const unsigned int ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS,
- ddr_bits_per_element);
+ const unsigned int ddr_elems_per_word =
+ DIV_ROUND_UP(HIVE_ISP_DDR_WORD_BITS, BITS_PER_TYPE(short));
unsigned int size_get = 0, size_put = 0;
unsigned int offset = 0;
int ret;
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c
index 70132d955e9b..def2c8fb4b38 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c
@@ -108,7 +108,7 @@ compute_coring(int coring)
* factor. Clip to [0, isp_scale-1).
*/
isp_coring = ((coring * isp_scale) + offset) / host_scale;
- return min(max(isp_coring, 0), isp_scale - 1);
+ return clamp(isp_coring, 0, isp_scale - 1);
}
/*
@@ -168,15 +168,15 @@ ia_css_xnr3_encode(
to->alpha.y0 = alpha_y0;
to->alpha.u0 = alpha_u0;
to->alpha.v0 = alpha_v0;
- to->alpha.ydiff = min(max(alpha_ydiff, min_diff), max_diff);
- to->alpha.udiff = min(max(alpha_udiff, min_diff), max_diff);
- to->alpha.vdiff = min(max(alpha_vdiff, min_diff), max_diff);
+ to->alpha.ydiff = clamp(alpha_ydiff, min_diff, max_diff);
+ to->alpha.udiff = clamp(alpha_udiff, min_diff, max_diff);
+ to->alpha.vdiff = clamp(alpha_vdiff, min_diff, max_diff);
/* coring parameters are expressed in q1.NN format */
to->coring.u0 = coring_u0;
to->coring.v0 = coring_v0;
- to->coring.udiff = min(max(coring_udiff, min_diff), max_diff);
- to->coring.vdiff = min(max(coring_vdiff, min_diff), max_diff);
+ to->coring.udiff = clamp(coring_udiff, min_diff, max_diff);
+ to->coring.vdiff = clamp(coring_vdiff, min_diff, max_diff);
/* blending strength is expressed in q1.NN format */
to->blending.strength = blending;
diff --git a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
index b0f904a5e442..7ce2b2d6da11 100644
--- a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
+++ b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
@@ -328,7 +328,8 @@ ia_css_binary_dvs_grid_info(const struct ia_css_binary *binary,
dvs_info = &info->dvs_grid.dvs_grid_info;
- /* for DIS, we use a division instead of a ceil_div. If this is smaller
+ /*
+ * For DIS, we use a division instead of a DIV_ROUND_UP(). If this is smaller
* than the 3a grid size, it indicates that the outer values are not
* valid for DIS.
*/
@@ -923,8 +924,8 @@ ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo,
return 0;
}
-static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
- struct ia_css_binary *binary) {
+int ia_css_binary_find(struct ia_css_binary_descr *descr, struct ia_css_binary *binary)
+{
int mode;
bool online;
bool two_ppc;
@@ -953,10 +954,8 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
/* MW: used after an error check, may accept NULL, but doubtfull */
assert(binary);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() enter: descr=%p, (mode=%d), binary=%p\n",
- descr, descr->mode,
- binary);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() enter: descr=%p, (mode=%d), binary=%p\n",
+ descr, descr->mode, binary);
mode = descr->mode;
online = descr->online;
@@ -1001,15 +1000,15 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
}
/* print a map of the binary file */
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "BINARY INFO:\n");
+ dev_dbg(atomisp_dev, "BINARY INFO:\n");
for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++) {
xcandidate = binary_infos[i];
if (xcandidate) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%d:\n", i);
+ dev_dbg(atomisp_dev, "%d:\n", i);
while (xcandidate) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " Name:%s Type:%d Cont:%d\n",
- xcandidate->blob->name, xcandidate->type,
- xcandidate->sp.enable.continuous);
+ dev_dbg(atomisp_dev, " Name:%s Type:%d Cont:%d\n",
+ xcandidate->blob->name, xcandidate->type,
+ xcandidate->sp.enable.continuous);
xcandidate = xcandidate->next;
}
}
@@ -1021,9 +1020,9 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
struct ia_css_binary_info *candidate = &xcandidate->sp;
/* printf("sh_css_binary_find: evaluating candidate:
* %d\n",candidate->id); */
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() candidate = %p, mode = %d ID = %d\n",
- candidate, candidate->pipeline.mode, candidate->id);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() candidate = %p, mode = %d ID = %d\n",
+ candidate, candidate->pipeline.mode, candidate->id);
/*
* MW: Only a limited set of jointly configured binaries can
@@ -1032,17 +1031,16 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
*/
if (!candidate->enable.continuous &&
continuous && (mode != IA_CSS_BINARY_MODE_COPY)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && %d && (%d != %d)\n",
- __LINE__, candidate->enable.continuous,
- continuous, mode,
- IA_CSS_BINARY_MODE_COPY);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: !%d && %d && (%d != %d)\n",
+ __LINE__, candidate->enable.continuous,
+ continuous, mode, IA_CSS_BINARY_MODE_COPY);
continue;
}
if (striped && candidate->iterator.num_stripes == 1) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: binary is not striped\n",
- __LINE__);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: binary is not striped\n",
+ __LINE__);
continue;
}
@@ -1050,58 +1048,38 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
(mode != IA_CSS_BINARY_MODE_COPY) &&
(mode != IA_CSS_BINARY_MODE_CAPTURE_PP) &&
(mode != IA_CSS_BINARY_MODE_VF_PP)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d != %d)\n",
- __LINE__,
- candidate->pipeline.isp_pipe_version, isp_pipe_version);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d != %d)\n",
+ __LINE__, candidate->pipeline.isp_pipe_version, isp_pipe_version);
continue;
}
if (!candidate->enable.reduced_pipe && enable_reduced_pipe) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && %d\n",
- __LINE__,
- candidate->enable.reduced_pipe,
- enable_reduced_pipe);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.reduced_pipe, enable_reduced_pipe);
continue;
}
if (!candidate->enable.dvs_6axis && enable_dvs_6axis) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && %d\n",
- __LINE__,
- candidate->enable.dvs_6axis,
- enable_dvs_6axis);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.dvs_6axis, enable_dvs_6axis);
continue;
}
if (candidate->enable.high_speed && !enable_high_speed) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: %d && !%d\n",
- __LINE__,
- candidate->enable.high_speed,
- enable_high_speed);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__, candidate->enable.high_speed, enable_high_speed);
continue;
}
if (!candidate->enable.xnr && need_xnr) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: %d && !%d\n",
- __LINE__,
- candidate->enable.xnr,
- need_xnr);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__, candidate->enable.xnr, need_xnr);
continue;
}
if (!(candidate->enable.ds & 2) && enable_yuv_ds) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && %d\n",
- __LINE__,
- ((candidate->enable.ds & 2) != 0),
- enable_yuv_ds);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, ((candidate->enable.ds & 2) != 0), enable_yuv_ds);
continue;
}
if ((candidate->enable.ds & 2) && !enable_yuv_ds) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: %d && !%d\n",
- __LINE__,
- ((candidate->enable.ds & 2) != 0),
- enable_yuv_ds);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__, ((candidate->enable.ds & 2) != 0), enable_yuv_ds);
continue;
}
@@ -1115,100 +1093,85 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
candidate->vf_dec.is_variable ||
/* or more than one output pin. */
xcandidate->num_output_pins > 1)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%p != NULL) && !(%d || %d || (%d >%d))\n",
- __LINE__, req_vf_info,
- candidate->enable.vf_veceven,
- candidate->vf_dec.is_variable,
- xcandidate->num_output_pins, 1);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%p != NULL) && !(%d || %d || (%d >%d))\n",
+ __LINE__, req_vf_info, candidate->enable.vf_veceven,
+ candidate->vf_dec.is_variable, xcandidate->num_output_pins, 1);
continue;
}
if (!candidate->enable.dvs_envelope && need_dvs) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && %d\n",
- __LINE__,
- candidate->enable.dvs_envelope, (int)need_dvs);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.dvs_envelope, (int)need_dvs);
continue;
}
/* internal_res check considers input, output, and dvs envelope sizes */
ia_css_binary_internal_res(req_in_info, req_bds_out_info,
req_bin_out_info, &dvs_env, candidate, &internal_res);
if (internal_res.width > candidate->internal.max_width) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d > %d)\n",
- __LINE__, internal_res.width,
- candidate->internal.max_width);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, internal_res.width, candidate->internal.max_width);
continue;
}
if (internal_res.height > candidate->internal.max_height) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d > %d)\n",
- __LINE__, internal_res.height,
- candidate->internal.max_height);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, internal_res.height, candidate->internal.max_height);
continue;
}
if (!candidate->enable.ds && need_ds && !(xcandidate->num_output_pins > 1)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && %d\n",
- __LINE__, candidate->enable.ds, (int)need_ds);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.ds, (int)need_ds);
continue;
}
if (!candidate->enable.uds && !candidate->enable.dvs_6axis && need_dz) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && !%d && %d\n",
- __LINE__, candidate->enable.uds,
- candidate->enable.dvs_6axis, (int)need_dz);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: !%d && !%d && %d\n",
+ __LINE__, candidate->enable.uds, candidate->enable.dvs_6axis,
+ (int)need_dz);
continue;
}
if (online && candidate->input.source == IA_CSS_BINARY_INPUT_MEMORY) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: %d && (%d == %d)\n",
- __LINE__, online, candidate->input.source,
- IA_CSS_BINARY_INPUT_MEMORY);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: %d && (%d == %d)\n",
+ __LINE__, online, candidate->input.source,
+ IA_CSS_BINARY_INPUT_MEMORY);
continue;
}
if (!online && candidate->input.source == IA_CSS_BINARY_INPUT_SENSOR) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && (%d == %d)\n",
- __LINE__, online, candidate->input.source,
- IA_CSS_BINARY_INPUT_SENSOR);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: !%d && (%d == %d)\n",
+ __LINE__, online, candidate->input.source,
+ IA_CSS_BINARY_INPUT_SENSOR);
continue;
}
if (req_bin_out_info->res.width < candidate->output.min_width ||
req_bin_out_info->res.width > candidate->output.max_width) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d > %d) || (%d < %d)\n",
- __LINE__,
- req_bin_out_info->padded_width,
- candidate->output.min_width,
- req_bin_out_info->padded_width,
- candidate->output.max_width);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d > %d) || (%d < %d)\n",
+ __LINE__, req_bin_out_info->padded_width,
+ candidate->output.min_width, req_bin_out_info->padded_width,
+ candidate->output.max_width);
continue;
}
if (xcandidate->num_output_pins > 1 &&
/* in case we have a second output pin, */
req_vf_info) { /* and we need vf output. */
if (req_vf_info->res.width > candidate->output.max_width) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d < %d)\n",
- __LINE__,
- req_vf_info->res.width,
- candidate->output.max_width);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d < %d)\n",
+ __LINE__, req_vf_info->res.width,
+ candidate->output.max_width);
continue;
}
}
if (req_in_info->padded_width > candidate->input.max_width) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d > %d)\n",
- __LINE__, req_in_info->padded_width,
- candidate->input.max_width);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, req_in_info->padded_width, candidate->input.max_width);
continue;
}
if (!binary_supports_output_format(xcandidate, req_bin_out_info->format)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d\n",
- __LINE__,
- binary_supports_output_format(xcandidate, req_bin_out_info->format));
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d\n",
+ __LINE__, binary_supports_output_format(xcandidate,
+ req_bin_out_info->format));
continue;
}
if (xcandidate->num_output_pins > 1 &&
@@ -1217,11 +1180,10 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
/* check if the required vf format
is supported. */
!binary_supports_output_format(xcandidate, req_vf_info->format)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n",
- __LINE__, xcandidate->num_output_pins, 1,
- req_vf_info,
- binary_supports_output_format(xcandidate, req_vf_info->format));
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n",
+ __LINE__, xcandidate->num_output_pins, 1, req_vf_info,
+ binary_supports_output_format(xcandidate, req_vf_info->format));
continue;
}
@@ -1229,11 +1191,11 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
if (xcandidate->num_output_pins == 1 &&
req_vf_info && candidate->enable.vf_veceven &&
!binary_supports_vf_format(xcandidate, req_vf_info->format)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n",
- __LINE__, xcandidate->num_output_pins, 1,
- req_vf_info, candidate->enable.vf_veceven,
- binary_supports_vf_format(xcandidate, req_vf_info->format));
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n",
+ __LINE__, xcandidate->num_output_pins, 1,
+ req_vf_info, candidate->enable.vf_veceven,
+ binary_supports_vf_format(xcandidate, req_vf_info->format));
continue;
}
@@ -1241,37 +1203,31 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
if (xcandidate->num_output_pins == 1 &&
req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */
if (req_vf_info->res.width > candidate->output.max_width) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d < %d)\n",
- __LINE__,
- req_vf_info->res.width,
- candidate->output.max_width);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d < %d)\n",
+ __LINE__, req_vf_info->res.width,
+ candidate->output.max_width);
continue;
}
}
if (!supports_bds_factor(candidate->bds.supported_bds_factors,
descr->required_bds_factor)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
- __LINE__, candidate->bds.supported_bds_factors,
- descr->required_bds_factor);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->bds.supported_bds_factors,
+ descr->required_bds_factor);
continue;
}
if (!candidate->enable.dpc && need_dpc) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
- __LINE__, candidate->enable.dpc,
- descr->enable_dpc);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->enable.dpc, descr->enable_dpc);
continue;
}
if (candidate->uds.use_bci && enable_capture_pp_bli) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
- __LINE__, candidate->uds.use_bci,
- descr->enable_capture_pp_bli);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->uds.use_bci, descr->enable_capture_pp_bli);
continue;
}
@@ -1290,39 +1246,18 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
break;
}
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() selected = %p, mode = %d ID = %d\n",
- xcandidate, xcandidate ? xcandidate->sp.pipeline.mode : 0, xcandidate ? xcandidate->sp.id : 0);
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() leave: return_err=%d\n", err);
-
if (!err && xcandidate)
- dev_dbg(atomisp_dev,
- "Using binary %s (id %d), type %d, mode %d, continuous %s\n",
- xcandidate->blob->name,
- xcandidate->sp.id,
- xcandidate->type,
+ dev_dbg(atomisp_dev, "Using binary %s (id %d), type %d, mode %d, continuous %s\n",
+ xcandidate->blob->name, xcandidate->sp.id, xcandidate->type,
xcandidate->sp.pipeline.mode,
xcandidate->sp.enable.continuous ? "true" : "false");
+ if (err)
+ dev_err(atomisp_dev, "Failed to find a firmware binary matching the pipeline parameters\n");
return err;
}
-int ia_css_binary_find(struct ia_css_binary_descr *descr,
- struct ia_css_binary *binary)
-{
- int ret = __ia_css_binary_find(descr, binary);
-
- if (unlikely(ret)) {
- dev_dbg(atomisp_dev, "Seeking for binary failed at:");
- dump_stack();
- }
-
- return ret;
-}
-
unsigned
ia_css_binary_max_vf_width(void)
{
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
index 52483498239d..2e0193671f4b 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
@@ -13,6 +13,8 @@
* more details.
*/
+#include <linux/bitops.h>
+#include <linux/math.h>
#include <linux/string.h> /* for memcpy() */
#include "system_global.h"
@@ -20,7 +22,6 @@
#include "ia_css_isys.h"
#include "ia_css_debug.h"
-#include "math_support.h"
#include "virtual_isys.h"
#include "isp.h"
#include "sh_css_defs.h"
@@ -558,7 +559,7 @@ static int32_t calculate_stride(
bits_per_pixel = CEIL_MUL(bits_per_pixel, 8);
pixels_per_word = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
- words_per_line = ceil_div(pixels_per_line_padded, pixels_per_word);
+ words_per_line = DIV_ROUND_UP(pixels_per_line_padded, pixels_per_word);
bytes_per_line = HIVE_ISP_DDR_WORD_BYTES * words_per_line;
return bytes_per_line;
@@ -690,7 +691,6 @@ static bool calculate_ibuf_ctrl_cfg(
const isp2401_input_system_cfg_t *isys_cfg,
ibuf_ctrl_cfg_t *cfg)
{
- const s32 bits_per_byte = 8;
s32 bits_per_pixel;
s32 bytes_per_pixel;
s32 left_padding;
@@ -698,7 +698,7 @@ static bool calculate_ibuf_ctrl_cfg(
(void)input_port;
bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel;
- bytes_per_pixel = ceil_div(bits_per_pixel, bits_per_byte);
+ bytes_per_pixel = BITS_TO_BYTES(bits_per_pixel);
left_padding = CEIL_MUL(isys_cfg->output_port_attr.left_padding, ISP_VEC_NELEMS)
* bytes_per_pixel;
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
index 01f0b8a33c99..ca97ea082cf4 100644
--- a/drivers/staging/media/atomisp/pci/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -5826,20 +5826,19 @@ need_yuv_scaler_stage(const struct ia_css_pipe *pipe)
* Later, merge this with ia_css_pipe_create_cas_scaler_desc
*/
static int ia_css_pipe_create_cas_scaler_desc_single_output(
- struct ia_css_frame_info *cas_scaler_in_info,
- struct ia_css_frame_info *cas_scaler_out_info,
- struct ia_css_frame_info *cas_scaler_vf_info,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
struct ia_css_cas_binary_descr *descr)
{
unsigned int i;
unsigned int hor_ds_factor = 0, ver_ds_factor = 0;
int err = 0;
struct ia_css_frame_info tmp_in_info;
-
unsigned int max_scale_factor_per_stage = MAX_PREFERRED_YUV_DS_PER_STEP;
- assert(cas_scaler_in_info);
- assert(cas_scaler_out_info);
+ assert(in_info);
+ assert(out_info);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_pipe_create_cas_scaler_desc() enter:\n");
@@ -5847,10 +5846,8 @@ static int ia_css_pipe_create_cas_scaler_desc_single_output(
/* We assume that this function is used only for single output port case. */
descr->num_output_stage = 1;
- hor_ds_factor = CEIL_DIV(cas_scaler_in_info->res.width,
- cas_scaler_out_info->res.width);
- ver_ds_factor = CEIL_DIV(cas_scaler_in_info->res.height,
- cas_scaler_out_info->res.height);
+ hor_ds_factor = CEIL_DIV(in_info->res.width, out_info->res.width);
+ ver_ds_factor = CEIL_DIV(in_info->res.height, out_info->res.height);
/* use the same horizontal and vertical downscaling factor for simplicity */
assert(hor_ds_factor == ver_ds_factor);
@@ -5895,30 +5892,29 @@ static int ia_css_pipe_create_cas_scaler_desc_single_output(
goto ERR;
}
- tmp_in_info = *cas_scaler_in_info;
+ tmp_in_info = *in_info;
for (i = 0; i < descr->num_stage; i++) {
descr->in_info[i] = tmp_in_info;
- if ((tmp_in_info.res.width / max_scale_factor_per_stage) <=
- cas_scaler_out_info->res.width) {
+ if ((tmp_in_info.res.width / max_scale_factor_per_stage) <= out_info->res.width) {
descr->is_output_stage[i] = true;
if ((descr->num_output_stage > 1) && (i != (descr->num_stage - 1))) {
- descr->internal_out_info[i].res.width = cas_scaler_out_info->res.width;
- descr->internal_out_info[i].res.height = cas_scaler_out_info->res.height;
- descr->internal_out_info[i].padded_width = cas_scaler_out_info->padded_width;
+ descr->internal_out_info[i].res.width = out_info->res.width;
+ descr->internal_out_info[i].res.height = out_info->res.height;
+ descr->internal_out_info[i].padded_width = out_info->padded_width;
descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
} else {
assert(i == (descr->num_stage - 1));
descr->internal_out_info[i].res.width = 0;
descr->internal_out_info[i].res.height = 0;
}
- descr->out_info[i].res.width = cas_scaler_out_info->res.width;
- descr->out_info[i].res.height = cas_scaler_out_info->res.height;
- descr->out_info[i].padded_width = cas_scaler_out_info->padded_width;
- descr->out_info[i].format = cas_scaler_out_info->format;
- if (cas_scaler_vf_info) {
- descr->vf_info[i].res.width = cas_scaler_vf_info->res.width;
- descr->vf_info[i].res.height = cas_scaler_vf_info->res.height;
- descr->vf_info[i].padded_width = cas_scaler_vf_info->padded_width;
+ descr->out_info[i].res.width = out_info->res.width;
+ descr->out_info[i].res.height = out_info->res.height;
+ descr->out_info[i].padded_width = out_info->padded_width;
+ descr->out_info[i].format = out_info->format;
+ if (vf_info) {
+ descr->vf_info[i].res.width = vf_info->res.width;
+ descr->vf_info[i].res.height = vf_info->res.height;
+ descr->vf_info[i].padded_width = vf_info->padded_width;
ia_css_frame_info_set_format(&descr->vf_info[i], IA_CSS_FRAME_FORMAT_YUV_LINE);
} else {
descr->vf_info[i].res.width = 0;
diff --git a/drivers/staging/media/atomisp/pci/sh_css_dvs_info.h b/drivers/staging/media/atomisp/pci/sh_css_dvs_info.h
deleted file mode 100644
index 6f058f132300..000000000000
--- a/drivers/staging/media/atomisp/pci/sh_css_dvs_info.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/**
-Support for Intel Camera Imaging ISP subsystem.
-Copyright (c) 2010 - 2015, Intel Corporation.
-
-This program is free software; you can redistribute it and/or modify it
-under the terms and conditions of the GNU General Public License,
-version 2, as published by the Free Software Foundation.
-
-This program is distributed in the hope it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-more details.
-*/
-
-#ifndef __SH_CSS_DVS_INFO_H__
-#define __SH_CSS_DVS_INFO_H__
-
-#include <math_support.h>
-
-/* horizontal 64x64 blocks round up to DVS_BLOCKDIM_X, make even */
-#define DVS_NUM_BLOCKS_X(X) (CEIL_MUL(CEIL_DIV((X), DVS_BLOCKDIM_X), 2))
-
-/* vertical 64x64 blocks round up to DVS_BLOCKDIM_Y */
-#define DVS_NUM_BLOCKS_Y(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_LUMA))
-
-/* Bilinear interpolation (HRT_GDC_BLI_MODE) is the supported method currently.
- * Bicubic interpolation (HRT_GDC_BCI_MODE) is not supported yet */
-#define DVS_GDC_INTERP_METHOD HRT_GDC_BLI_MODE
-
-#define DVS_INPUT_BYTES_PER_PIXEL (1)
-
-#define DVS_NUM_BLOCKS_X_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_X))
-
-#define DVS_NUM_BLOCKS_Y_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_CHROMA))
-
-#endif /* __SH_CSS_DVS_INFO_H__ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_frac.h b/drivers/staging/media/atomisp/pci/sh_css_frac.h
index b90b5b330dfa..8ba65161f7a9 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_frac.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_frac.h
@@ -32,12 +32,24 @@
#define uISP_VAL_MAX ((unsigned int)((1 << uISP_REG_BIT) - 1))
/* a:fraction bits for 16bit precision, b:fraction bits for ISP precision */
-#define sDIGIT_FITTING(v, a, b) \
- min_t(int, max_t(int, (((v) >> sSHIFT) >> max(sFRACTION_BITS_FITTING(a) - (b), 0)), \
- sISP_VAL_MIN), sISP_VAL_MAX)
-#define uDIGIT_FITTING(v, a, b) \
- min((unsigned int)max((unsigned)(((v) >> uSHIFT) \
- >> max((int)(uFRACTION_BITS_FITTING(a) - (b)), 0)), \
- uISP_VAL_MIN), uISP_VAL_MAX)
+static inline int sDIGIT_FITTING(int v, int a, int b)
+{
+ int fit_shift = sFRACTION_BITS_FITTING(a) - b;
+
+ v >>= sSHIFT;
+ v >>= fit_shift > 0 ? fit_shift : 0;
+
+ return clamp_t(int, v, sISP_VAL_MIN, sISP_VAL_MAX);
+}
+
+static inline unsigned int uDIGIT_FITTING(unsigned int v, int a, int b)
+{
+ int fit_shift = uFRACTION_BITS_FITTING(a) - b;
+
+ v >>= uSHIFT;
+ v >>= fit_shift > 0 ? fit_shift : 0;
+
+ return clamp_t(unsigned int, v, uISP_VAL_MIN, uISP_VAL_MAX);
+}
#endif /* __SH_CSS_FRAC_H */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h
index 7782f76b9f97..25e5b4570f7d 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h
@@ -18,7 +18,6 @@
#include <math_support.h>
#include <ia_css_types.h>
-#include <sh_css_dvs_info.h>
#include "gdc_global.h" /* gdc_warp_param_mem_t */
#define DVS_ENV_MIN_X (12)
diff --git a/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c b/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c
index 31b2b48085c5..712f916f0935 100644
--- a/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c
+++ b/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c
@@ -333,20 +333,16 @@ static const u32 isc_sama5d2_gamma_table[][GAMMA_ENTRIES] = {
static int isc_parse_dt(struct device *dev, struct isc_device *isc)
{
struct device_node *np = dev->of_node;
- struct device_node *epn = NULL;
+ struct device_node *epn;
struct isc_subdev_entity *subdev_entity;
unsigned int flags;
- int ret;
+ int ret = -EINVAL;
INIT_LIST_HEAD(&isc->subdev_entities);
- while (1) {
+ for_each_endpoint_of_node(np, epn) {
struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
- epn = of_graph_get_next_endpoint(np, epn);
- if (!epn)
- return 0;
-
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
&v4l2_epn);
if (ret) {
diff --git a/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c b/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c
index 020034f631f5..9485167d5b7d 100644
--- a/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c
+++ b/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c
@@ -316,23 +316,19 @@ static const u32 isc_sama7g5_gamma_table[][GAMMA_ENTRIES] = {
static int xisc_parse_dt(struct device *dev, struct isc_device *isc)
{
struct device_node *np = dev->of_node;
- struct device_node *epn = NULL;
+ struct device_node *epn;
struct isc_subdev_entity *subdev_entity;
unsigned int flags;
- int ret;
+ int ret = -EINVAL;
bool mipi_mode;
INIT_LIST_HEAD(&isc->subdev_entities);
mipi_mode = of_property_read_bool(np, "microchip,mipi-mode");
- while (1) {
+ for_each_endpoint_of_node(np, epn) {
struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
- epn = of_graph_get_next_endpoint(np, epn);
- if (!epn)
- return 0;
-
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
&v4l2_epn);
if (ret) {
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 3df58eb3e882..e7aee7e3db5b 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -535,31 +535,53 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq)
container_of(vq, struct imgu_video_device, vbq);
int r;
unsigned int pipe;
+ bool stop_streaming = false;
+ /* Verify that the node had been setup with imgu_v4l2_node_setup() */
WARN_ON(!node->enabled);
pipe = node->pipe;
dev_dbg(dev, "Try to stream off node [%u][%u]", pipe, node->id);
- imgu_pipe = &imgu->imgu_pipe[pipe];
- r = v4l2_subdev_call(&imgu_pipe->imgu_sd.subdev, video, s_stream, 0);
- if (r)
- dev_err(&imgu->pci_dev->dev,
- "failed to stop subdev streaming\n");
+ /*
+ * When the first node of a streaming setup is stopped, the entire
+ * pipeline needs to stop before individual nodes are disabled.
+ * Perform the inverse of the initial setup.
+ *
+ * Part 1 - s_stream on the entire pipeline
+ */
mutex_lock(&imgu->streaming_lock);
- /* Was this the first node with streaming disabled? */
- if (imgu->streaming && imgu_all_nodes_streaming(imgu, node)) {
+ if (imgu->streaming) {
/* Yes, really stop streaming now */
dev_dbg(dev, "IMGU streaming is ready to stop");
r = imgu_s_stream(imgu, false);
if (!r)
imgu->streaming = false;
+ stop_streaming = true;
}
-
- imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
mutex_unlock(&imgu->streaming_lock);
+ /* Part 2 - s_stream on subdevs
+ *
+ * If we call s_stream multiple times, Linux v6.7's call_s_stream()
+ * WARNs and aborts. Thus, disable all pipes at once, and only once.
+ */
+ if (stop_streaming) {
+ for_each_set_bit(pipe, imgu->css.enabled_pipes,
+ IMGU_MAX_PIPE_NUM) {
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ r = v4l2_subdev_call(&imgu_pipe->imgu_sd.subdev,
+ video, s_stream, 0);
+ if (r)
+ dev_err(&imgu->pci_dev->dev,
+ "failed to stop subdev streaming\n");
+ }
+ }
+
+ /* Part 3 - individual node teardown */
video_device_pipeline_stop(&node->vdev);
+ imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
}
/******************** v4l2_ioctl_ops ********************/
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index de3e0345ab7c..5e5b296f93ba 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -982,6 +982,8 @@ static const struct of_device_id vdec_dt_match[] = {
.data = &vdec_platform_gxm },
{ .compatible = "amlogic,gxl-vdec",
.data = &vdec_platform_gxl },
+ { .compatible = "amlogic,gxlx-vdec",
+ .data = &vdec_platform_gxlx },
{ .compatible = "amlogic,g12a-vdec",
.data = &vdec_platform_g12a },
{ .compatible = "amlogic,sm1-vdec",
diff --git a/drivers/staging/media/meson/vdec/vdec_1.c b/drivers/staging/media/meson/vdec/vdec_1.c
index 3fe2de0c9331..a65cb4959446 100644
--- a/drivers/staging/media/meson/vdec/vdec_1.c
+++ b/drivers/staging/media/meson/vdec/vdec_1.c
@@ -129,7 +129,7 @@ static u32 vdec_1_vififo_level(struct amvdec_session *sess)
return amvdec_read_dos(core, VLD_MEM_VIFIFO_LEVEL);
}
-static int vdec_1_stop(struct amvdec_session *sess)
+static void __vdec_1_stop(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
@@ -158,10 +158,17 @@ static int vdec_1_stop(struct amvdec_session *sess)
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VDEC_1, GEN_PWR_VDEC_1);
- clk_disable_unprepare(core->vdec_1_clk);
-
if (sess->priv)
codec_ops->stop(sess);
+}
+
+static int vdec_1_stop(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ __vdec_1_stop(sess);
+
+ clk_disable_unprepare(core->vdec_1_clk);
return 0;
}
@@ -235,7 +242,8 @@ static int vdec_1_start(struct amvdec_session *sess)
return 0;
stop:
- vdec_1_stop(sess);
+ __vdec_1_stop(sess);
+ clk_disable_unprepare(core->vdec_1_clk);
return ret;
}
diff --git a/drivers/staging/media/meson/vdec/vdec_hevc.c b/drivers/staging/media/meson/vdec/vdec_hevc.c
index afced435c907..1939c47def58 100644
--- a/drivers/staging/media/meson/vdec/vdec_hevc.c
+++ b/drivers/staging/media/meson/vdec/vdec_hevc.c
@@ -110,7 +110,7 @@ static u32 vdec_hevc_vififo_level(struct amvdec_session *sess)
return readl_relaxed(sess->core->dos_base + HEVC_STREAM_LEVEL);
}
-static int vdec_hevc_stop(struct amvdec_session *sess)
+static void __vdec_hevc_stop(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
@@ -142,6 +142,13 @@ static int vdec_hevc_stop(struct amvdec_session *sess)
else
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VDEC_HEVC, GEN_PWR_VDEC_HEVC);
+}
+
+static int vdec_hevc_stop(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ __vdec_hevc_stop(sess);
clk_disable_unprepare(core->vdec_hevc_clk);
if (core->platform->revision == VDEC_REVISION_G12A ||
@@ -151,20 +158,12 @@ static int vdec_hevc_stop(struct amvdec_session *sess)
return 0;
}
-static int vdec_hevc_start(struct amvdec_session *sess)
+static int __vdec_hevc_start(struct amvdec_session *sess)
{
int ret;
struct amvdec_core *core = sess->core;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
- if (core->platform->revision == VDEC_REVISION_G12A ||
- core->platform->revision == VDEC_REVISION_SM1) {
- clk_set_rate(core->vdec_hevcf_clk, 666666666);
- ret = clk_prepare_enable(core->vdec_hevcf_clk);
- if (ret)
- return ret;
- }
-
clk_set_rate(core->vdec_hevc_clk, 666666666);
ret = clk_prepare_enable(core->vdec_hevc_clk);
if (ret) {
@@ -223,10 +222,32 @@ static int vdec_hevc_start(struct amvdec_session *sess)
return 0;
stop:
- vdec_hevc_stop(sess);
+ __vdec_hevc_stop(sess);
+ clk_disable_unprepare(core->vdec_hevc_clk);
return ret;
}
+static int vdec_hevc_start(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ int ret;
+
+ if (core->platform->revision == VDEC_REVISION_G12A ||
+ core->platform->revision == VDEC_REVISION_SM1) {
+ clk_set_rate(core->vdec_hevcf_clk, 666666666);
+ ret = clk_prepare_enable(core->vdec_hevcf_clk);
+ if (ret)
+ return ret;
+
+ ret = __vdec_hevc_start(sess);
+ if (ret)
+ clk_disable_unprepare(core->vdec_hevcf_clk);
+ return ret;
+ }
+
+ return __vdec_hevc_start(sess);
+}
+
struct amvdec_ops vdec_hevc_ops = {
.start = vdec_hevc_start,
.stop = vdec_hevc_stop,
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c
index 70c9fd7c8bc5..66bb307db85a 100644
--- a/drivers/staging/media/meson/vdec/vdec_platform.c
+++ b/drivers/staging/media/meson/vdec/vdec_platform.c
@@ -101,6 +101,44 @@ static const struct amvdec_format vdec_formats_gxl[] = {
},
};
+static const struct amvdec_format vdec_formats_gxlx[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .min_buffers = 2,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_h264_ops,
+ .firmware_path = "meson/vdec/gxl_h264.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ },
+};
+
static const struct amvdec_format vdec_formats_gxm[] = {
{
.pixfmt = V4L2_PIX_FMT_VP9,
@@ -263,6 +301,12 @@ const struct vdec_platform vdec_platform_gxl = {
.revision = VDEC_REVISION_GXL,
};
+const struct vdec_platform vdec_platform_gxlx = {
+ .formats = vdec_formats_gxlx,
+ .num_formats = ARRAY_SIZE(vdec_formats_gxlx),
+ .revision = VDEC_REVISION_GXLX,
+};
+
const struct vdec_platform vdec_platform_gxm = {
.formats = vdec_formats_gxm,
.num_formats = ARRAY_SIZE(vdec_formats_gxm),
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.h b/drivers/staging/media/meson/vdec/vdec_platform.h
index 731877a771f4..88ca4a9db8a8 100644
--- a/drivers/staging/media/meson/vdec/vdec_platform.h
+++ b/drivers/staging/media/meson/vdec/vdec_platform.h
@@ -14,6 +14,7 @@ struct amvdec_format;
enum vdec_revision {
VDEC_REVISION_GXBB,
VDEC_REVISION_GXL,
+ VDEC_REVISION_GXLX,
VDEC_REVISION_GXM,
VDEC_REVISION_G12A,
VDEC_REVISION_SM1,
@@ -28,6 +29,7 @@ struct vdec_platform {
extern const struct vdec_platform vdec_platform_gxbb;
extern const struct vdec_platform vdec_platform_gxm;
extern const struct vdec_platform vdec_platform_gxl;
+extern const struct vdec_platform vdec_platform_gxlx;
extern const struct vdec_platform vdec_platform_g12a;
extern const struct vdec_platform vdec_platform_sm1;
diff --git a/drivers/staging/media/starfive/camss/stf-camss.c b/drivers/staging/media/starfive/camss/stf-camss.c
index fecd3e67c7a1..b6d34145bc19 100644
--- a/drivers/staging/media/starfive/camss/stf-camss.c
+++ b/drivers/staging/media/starfive/camss/stf-camss.c
@@ -358,8 +358,6 @@ err_cleanup_notifier:
/*
* stfcamss_remove - Remove STFCAMSS platform device
* @pdev: Pointer to STFCAMSS platform device
- *
- * Always returns 0.
*/
static void stfcamss_remove(struct platform_device *pdev)
{
diff --git a/drivers/staging/media/starfive/camss/stf-capture.c b/drivers/staging/media/starfive/camss/stf-capture.c
index ec5169e7b391..e15d2e97eb0b 100644
--- a/drivers/staging/media/starfive/camss/stf-capture.c
+++ b/drivers/staging/media/starfive/camss/stf-capture.c
@@ -180,6 +180,8 @@ static void stf_channel_set(struct stfcamss_video *video)
u32 val;
if (cap->type == STF_CAPTURE_RAW) {
+ const struct v4l2_pix_format *pix = &video->active_fmt.fmt.pix;
+
val = stf_syscon_reg_read(stfcamss, VIN_CHANNEL_SEL_EN);
val &= ~U0_VIN_CHANNEL_SEL_MASK;
val |= CHANNEL(0);
@@ -193,7 +195,7 @@ static void stf_channel_set(struct stfcamss_video *video)
val |= PIXEL_HEIGH_BIT_SEL(0);
val &= ~U0_VIN_PIX_CNT_END_MASK;
- val |= PIX_CNT_END(IMAGE_MAX_WIDTH / 4 - 1);
+ val |= PIX_CNT_END(pix->width / 4 - 1);
stf_syscon_reg_write(stfcamss, VIN_INRT_PIX_CFG, val);
} else if (cap->type == STF_CAPTURE_YUV) {
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 9eee28f2940c..a5e99cc78a45 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -425,7 +425,7 @@ int cvm_oct_common_init(struct net_device *dev)
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
/* We do our own locking, Linux doesn't need to */
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
dev->ethtool_ops = &cvm_oct_ethtool_ops;
cvm_oct_set_mac_filter(dev);
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index 639877069fad..138733cb00e2 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -378,7 +378,7 @@ static void rtllib_ccmp_print_stats(struct seq_file *m, void *priv)
ccmp->dot11rsna_stats_ccmp_decrypt_errors);
}
-static struct lib80211_crypto_ops rtllib_crypt_ccmp = {
+static const struct lib80211_crypto_ops rtllib_crypt_ccmp = {
.name = "R-CCMP",
.init = rtllib_ccmp_init,
.deinit = rtllib_ccmp_deinit,
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index dc0917b03511..74dc8326c886 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -678,7 +678,7 @@ static void rtllib_tkip_print_stats(struct seq_file *m, void *priv)
tkip->dot11RSNAStatsTKIPLocalMICFailures);
}
-static struct lib80211_crypto_ops rtllib_crypt_tkip = {
+static const struct lib80211_crypto_ops rtllib_crypt_tkip = {
.name = "R-TKIP",
.init = rtllib_tkip_init,
.deinit = rtllib_tkip_deinit,
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index 10092f6884ff..aa18c060d727 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -209,7 +209,7 @@ static void prism2_wep_print_stats(struct seq_file *m, void *priv)
seq_printf(m, "key[%d] alg=WEP len=%d\n", wep->key_idx, wep->key_len);
}
-static struct lib80211_crypto_ops rtllib_crypt_wep = {
+static const struct lib80211_crypto_ops rtllib_crypt_wep = {
.name = "R-WEP",
.init = prism2_wep_init,
.deinit = prism2_wep_deinit,
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index fbd4ec824084..c730d921463d 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -474,7 +474,7 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
int i, idx;
int group_key = 0;
const char *alg, *module;
- struct lib80211_crypto_ops *ops;
+ const struct lib80211_crypto_ops *ops;
struct lib80211_crypt_data **crypt;
struct rtllib_security sec = {
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 0c997a08adec..873411e95ed2 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -15,7 +15,6 @@ struct kref;
struct sockaddr_storage;
extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
-extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
extern void iscsit_del_tiqn(struct iscsi_tiqn *);
@@ -35,7 +34,6 @@ extern void iscsit_set_unsolicited_dataout(struct iscsit_cmd *);
extern int iscsit_logout_closesession(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_logout_closeconnection(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *, struct iscsit_conn *);
-extern int iscsit_send_async_msg(struct iscsit_conn *, u16, u8, u8);
extern int iscsit_build_r2ts_for_cmd(struct iscsit_conn *, struct iscsit_cmd *, bool recovery);
extern void iscsit_thread_get_cpumask(struct iscsit_conn *);
extern int iscsi_target_tx_thread(void *);
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 3ca2f232b387..e8760735486b 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -24,6 +24,5 @@ extern int iscsit_start_kthreads(struct iscsit_conn *);
extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsit_conn *, u8);
extern void iscsi_target_login_sess_out(struct iscsit_conn *, bool, bool);
extern int iscsi_target_login_thread(void *);
-extern void iscsi_handle_login_thread_timeout(struct timer_list *t);
#endif /*** ISCSI_TARGET_LOGIN_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
index 41c3db3ddeaa..e60a46d34835 100644
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -15,8 +15,6 @@ extern int extract_param(const char *, const char *, unsigned int, char *,
unsigned char *);
extern int iscsi_target_check_login_request(struct iscsit_conn *,
struct iscsi_login *);
-extern int iscsi_target_get_initial_payload(struct iscsit_conn *,
- struct iscsi_login *);
extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsit_conn *,
struct iscsi_login *);
extern int iscsi_target_start_negotiation(
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 71d067f62177..d44d09f2dde9 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -24,12 +24,7 @@ extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_
int);
extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *);
extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int);
-extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl(
- struct iscsi_portal_group *, const char *, u32);
-extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *,
- struct se_node_acl *);
extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsit_session *);
-extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
struct sockaddr_storage *, struct iscsi_tpg_np *,
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 24b8e577575a..336da4fb0a77 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -17,7 +17,6 @@ extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsit_cmd *, u32, u32);
extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *);
extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsit_cmd *);
extern void iscsit_free_r2ts_from_list(struct iscsit_cmd *);
-extern struct iscsit_cmd *iscsit_alloc_cmd(struct iscsit_conn *, gfp_t);
extern struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *, int);
extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsit_cmd *, u32);
extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *);
@@ -34,7 +33,6 @@ extern void iscsit_add_cmd_to_immediate_queue(struct iscsit_cmd *, struct iscsit
extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *);
extern int iscsit_add_cmd_to_response_queue(struct iscsit_cmd *, struct iscsit_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *);
-extern void iscsit_remove_cmd_from_tx_queues(struct iscsit_cmd *, struct iscsit_conn *);
extern bool iscsit_conn_all_queues_empty(struct iscsit_conn *);
extern void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *);
extern void iscsit_release_cmd(struct iscsit_cmd *);
@@ -64,9 +62,6 @@ extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int);
extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8);
extern void iscsit_print_session_params(struct iscsit_session *);
-extern int iscsit_print_dev_to_proc(char *, char **, off_t, int);
-extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int);
-extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int);
extern int rx_data(struct iscsit_conn *, struct kvec *, int, int);
extern int tx_data(struct iscsit_conn *, struct kvec *, int, int);
extern void iscsit_collect_login_stats(struct iscsit_conn *, u8, u8);
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
index 976928641aa6..7bb7990d0b07 100644
--- a/drivers/tee/optee/Kconfig
+++ b/drivers/tee/optee/Kconfig
@@ -4,6 +4,7 @@ config OPTEE
tristate "OP-TEE"
depends on HAVE_ARM_SMCCC
depends on MMU
+ depends on RPMB || !RPMB
help
This implements the OP-TEE Trusted Execution Environment (TEE)
driver.
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 39e688d4e974..c75fddc83576 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -10,17 +10,85 @@
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/rpmb.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tee_core.h>
#include <linux/types.h>
#include "optee_private.h"
+struct blocking_notifier_head optee_rpmb_intf_added =
+ BLOCKING_NOTIFIER_INIT(optee_rpmb_intf_added);
+
+static int rpmb_add_dev(struct device *dev)
+{
+ blocking_notifier_call_chain(&optee_rpmb_intf_added, 0,
+ to_rpmb_dev(dev));
+
+ return 0;
+}
+
+static struct class_interface rpmb_class_intf = {
+ .add_dev = rpmb_add_dev,
+};
+
+void optee_bus_scan_rpmb(struct work_struct *work)
+{
+ struct optee *optee = container_of(work, struct optee,
+ rpmb_scan_bus_work);
+ int ret;
+
+ if (!optee->rpmb_scan_bus_done) {
+ ret = optee_enumerate_devices(PTA_CMD_GET_DEVICES_RPMB);
+ optee->rpmb_scan_bus_done = !ret;
+ if (ret && ret != -ENODEV)
+ pr_info("Scanning for RPMB device: ret %d\n", ret);
+ }
+}
+
+int optee_rpmb_intf_rdev(struct notifier_block *intf, unsigned long action,
+ void *data)
+{
+ struct optee *optee = container_of(intf, struct optee, rpmb_intf);
+
+ schedule_work(&optee->rpmb_scan_bus_work);
+
+ return 0;
+}
+
static void optee_bus_scan(struct work_struct *work)
{
WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
}
+static ssize_t rpmb_routing_model_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct optee *optee = dev_get_drvdata(dev);
+ const char *s;
+
+ if (optee->in_kernel_rpmb_routing)
+ s = "kernel";
+ else
+ s = "user";
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", s);
+}
+static DEVICE_ATTR_RO(rpmb_routing_model);
+
+static struct attribute *optee_dev_attrs[] = {
+ &dev_attr_rpmb_routing_model.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(optee_dev);
+
+void optee_set_dev_group(struct optee *optee)
+{
+ tee_device_set_dev_groups(optee->teedev, optee_dev_groups);
+ tee_device_set_dev_groups(optee->supp_teedev, optee_dev_groups);
+}
+
int optee_open(struct tee_context *ctx, bool cap_memref_null)
{
struct optee_context_data *ctxdata;
@@ -97,6 +165,9 @@ void optee_release_supp(struct tee_context *ctx)
void optee_remove_common(struct optee *optee)
{
+ blocking_notifier_chain_unregister(&optee_rpmb_intf_added,
+ &optee->rpmb_intf);
+ cancel_work_sync(&optee->rpmb_scan_bus_work);
/* Unregister OP-TEE specific client devices on TEE bus */
optee_unregister_devices();
@@ -113,13 +184,18 @@ void optee_remove_common(struct optee *optee)
tee_shm_pool_free(optee->pool);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
+ rpmb_dev_put(optee->rpmb_dev);
+ mutex_destroy(&optee->rpmb_dev_mutex);
}
static int smc_abi_rc;
static int ffa_abi_rc;
+static bool intf_is_regged;
static int __init optee_core_init(void)
{
+ int rc;
+
/*
* The kernel may have crashed at the same time that all available
* secure world threads were suspended and we cannot reschedule the
@@ -130,18 +206,36 @@ static int __init optee_core_init(void)
if (is_kdump_kernel())
return -ENODEV;
+ if (IS_REACHABLE(CONFIG_RPMB)) {
+ rc = rpmb_interface_register(&rpmb_class_intf);
+ if (rc)
+ return rc;
+ intf_is_regged = true;
+ }
+
smc_abi_rc = optee_smc_abi_register();
ffa_abi_rc = optee_ffa_abi_register();
/* If both failed there's no point with this module */
- if (smc_abi_rc && ffa_abi_rc)
+ if (smc_abi_rc && ffa_abi_rc) {
+ if (IS_REACHABLE(CONFIG_RPMB)) {
+ rpmb_interface_unregister(&rpmb_class_intf);
+ intf_is_regged = false;
+ }
return smc_abi_rc;
+ }
+
return 0;
}
module_init(optee_core_init);
static void __exit optee_core_exit(void)
{
+ if (IS_REACHABLE(CONFIG_RPMB) && intf_is_regged) {
+ rpmb_interface_unregister(&rpmb_class_intf);
+ intf_is_regged = false;
+ }
+
if (!smc_abi_rc)
optee_smc_abi_unregister();
if (!ffa_abi_rc)
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index d296c70ddfdc..950b4661d5df 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -43,6 +43,13 @@ static int get_devices(struct tee_context *ctx, u32 session,
ret = tee_client_invoke_func(ctx, &inv_arg, param);
if ((ret < 0) || ((inv_arg.ret != TEEC_SUCCESS) &&
(inv_arg.ret != TEEC_ERROR_SHORT_BUFFER))) {
+ /*
+ * TEE_ERROR_STORAGE_NOT_AVAILABLE is returned when getting
+ * the list of device TAs that depends on RPMB but a usable
+ * RPMB device isn't found.
+ */
+ if (inv_arg.ret == TEE_ERROR_STORAGE_NOT_AVAILABLE)
+ return -ENODEV;
pr_err("PTA_CMD_GET_DEVICES invoke function err: %x\n",
inv_arg.ret);
return -EINVAL;
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index 3e73efa51bba..f3af5666bb11 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -7,6 +7,7 @@
#include <linux/arm_ffa.h>
#include <linux/errno.h>
+#include <linux/rpmb.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -909,6 +910,10 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee->ffa.bottom_half_value = U32_MAX;
optee->rpc_param_count = rpc_param_count;
+ if (IS_REACHABLE(CONFIG_RPMB) &&
+ (sec_caps & OPTEE_FFA_SEC_CAP_RPMB_PROBE))
+ optee->in_kernel_rpmb_routing = true;
+
teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
optee);
if (IS_ERR(teedev)) {
@@ -925,6 +930,8 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
}
optee->supp_teedev = teedev;
+ optee_set_dev_group(optee);
+
rc = tee_device_register(optee->teedev);
if (rc)
goto err_unreg_supp_teedev;
@@ -940,6 +947,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee_cq_init(&optee->call_queue, 0);
optee_supp_init(&optee->supp);
optee_shm_arg_cache_init(optee, arg_cache_flags);
+ mutex_init(&optee->rpmb_dev_mutex);
ffa_dev_set_drvdata(ffa_dev, optee);
ctx = teedev_open(optee->teedev);
if (IS_ERR(ctx)) {
@@ -961,6 +969,10 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
if (rc)
goto err_unregister_devices;
+ INIT_WORK(&optee->rpmb_scan_bus_work, optee_bus_scan_rpmb);
+ optee->rpmb_intf.notifier_call = optee_rpmb_intf_rdev;
+ blocking_notifier_chain_register(&optee_rpmb_intf_added,
+ &optee->rpmb_intf);
pr_info("initialized driver\n");
return 0;
@@ -974,6 +986,8 @@ err_close_ctx:
teedev_close_context(ctx);
err_rhashtable_free:
rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
+ rpmb_dev_put(optee->rpmb_dev);
+ mutex_destroy(&optee->rpmb_dev_mutex);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
mutex_destroy(&optee->ffa.mutex);
diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h
index 5db779dc00de..257735ae5b56 100644
--- a/drivers/tee/optee/optee_ffa.h
+++ b/drivers/tee/optee/optee_ffa.h
@@ -92,6 +92,8 @@
#define OPTEE_FFA_SEC_CAP_ARG_OFFSET BIT(0)
/* OP-TEE supports asynchronous notification via FF-A */
#define OPTEE_FFA_SEC_CAP_ASYNC_NOTIF BIT(1)
+/* OP-TEE supports probing for RPMB device if needed */
+#define OPTEE_FFA_SEC_CAP_RPMB_PROBE BIT(2)
#define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2)
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 424898cdc4e9..dc0f355ef72a 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -7,7 +7,9 @@
#define OPTEE_PRIVATE_H
#include <linux/arm-smccc.h>
+#include <linux/notifier.h>
#include <linux/rhashtable.h>
+#include <linux/rpmb.h>
#include <linux/semaphore.h>
#include <linux/tee_core.h>
#include <linux/types.h>
@@ -20,6 +22,7 @@
/* Some Global Platform error codes used in this driver */
#define TEEC_SUCCESS 0x00000000
#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_ITEM_NOT_FOUND 0xFFFF0008
#define TEEC_ERROR_NOT_SUPPORTED 0xFFFF000A
#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
@@ -28,6 +31,7 @@
/* API Return Codes are from the GP TEE Internal Core API Specification */
#define TEE_ERROR_TIMEOUT 0xFFFF3001
+#define TEE_ERROR_STORAGE_NOT_AVAILABLE 0xF0100003
#define TEEC_ORIGIN_COMMS 0x00000002
@@ -200,6 +204,12 @@ struct optee_ops {
* @notif: notification synchronization struct
* @supp: supplicant synchronization struct for RPC to supplicant
* @pool: shared memory pool
+ * @mutex: mutex protecting @rpmb_dev
+ * @rpmb_dev: current RPMB device or NULL
+ * @rpmb_scan_bus_done flag if device registation of RPMB dependent devices
+ * was already done
+ * @rpmb_scan_bus_work workq to for an RPMB device and to scan optee bus
+ * and register RPMB dependent optee drivers
* @rpc_param_count: If > 0 number of RPC parameters to make room for
* @scan_bus_done flag if device registation was already done.
* @scan_bus_work workq to scan optee bus and register optee drivers
@@ -218,9 +228,16 @@ struct optee {
struct optee_notif notif;
struct optee_supp supp;
struct tee_shm_pool *pool;
+ /* Protects rpmb_dev pointer */
+ struct mutex rpmb_dev_mutex;
+ struct rpmb_dev *rpmb_dev;
+ struct notifier_block rpmb_intf;
unsigned int rpc_param_count;
- bool scan_bus_done;
+ bool scan_bus_done;
+ bool rpmb_scan_bus_done;
+ bool in_kernel_rpmb_routing;
struct work_struct scan_bus_work;
+ struct work_struct rpmb_scan_bus_work;
};
struct optee_session {
@@ -253,6 +270,8 @@ struct optee_call_ctx {
size_t num_entries;
};
+extern struct blocking_notifier_head optee_rpmb_intf_added;
+
int optee_notif_init(struct optee *optee, u_int max_key);
void optee_notif_uninit(struct optee *optee);
int optee_notif_wait(struct optee *optee, u_int key, u32 timeout);
@@ -283,9 +302,14 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
#define PTA_CMD_GET_DEVICES 0x0
#define PTA_CMD_GET_DEVICES_SUPP 0x1
+#define PTA_CMD_GET_DEVICES_RPMB 0x2
int optee_enumerate_devices(u32 func);
void optee_unregister_devices(void);
+void optee_bus_scan_rpmb(struct work_struct *work);
+int optee_rpmb_intf_rdev(struct notifier_block *intf, unsigned long action,
+ void *data);
+void optee_set_dev_group(struct optee *optee);
void optee_remove_common(struct optee *optee);
int optee_open(struct tee_context *ctx, bool cap_memref_null);
void optee_release(struct tee_context *ctx);
diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h
index 4576751b490c..87a59cc03480 100644
--- a/drivers/tee/optee/optee_rpc_cmd.h
+++ b/drivers/tee/optee/optee_rpc_cmd.h
@@ -104,4 +104,39 @@
/* I2C master control flags */
#define OPTEE_RPC_I2C_FLAGS_TEN_BIT BIT(0)
+/*
+ * Reset RPMB probing
+ *
+ * Releases an eventually already used RPMB devices and starts over searching
+ * for RPMB devices. Returns the kind of shared memory to use in subsequent
+ * OPTEE_RPC_CMD_RPMB_PROBE_NEXT and OPTEE_RPC_CMD_RPMB calls.
+ *
+ * [out] value[0].a OPTEE_RPC_SHM_TYPE_*, the parameter for
+ * OPTEE_RPC_CMD_SHM_ALLOC
+ */
+#define OPTEE_RPC_CMD_RPMB_PROBE_RESET 22
+
+/*
+ * Probe next RPMB device
+ *
+ * [out] value[0].a Type of RPMB device, OPTEE_RPC_RPMB_*
+ * [out] value[0].b EXT CSD-slice 168 "RPMB Size"
+ * [out] value[0].c EXT CSD-slice 222 "Reliable Write Sector Count"
+ * [out] memref[1] Buffer with the raw CID
+ */
+#define OPTEE_RPC_CMD_RPMB_PROBE_NEXT 23
+
+/* Type of RPMB device */
+#define OPTEE_RPC_RPMB_EMMC 0
+#define OPTEE_RPC_RPMB_UFS 1
+#define OPTEE_RPC_RPMB_NVME 2
+
+/*
+ * Replay Protected Memory Block access
+ *
+ * [in] memref[0] Frames to device
+ * [out] memref[1] Frames from device
+ */
+#define OPTEE_RPC_CMD_RPMB_FRAMES 24
+
#endif /*__OPTEE_RPC_CMD_H*/
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index 7d9fa426505b..879426300821 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -278,6 +278,8 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5)
/* Secure world supports pre-allocating RPC arg struct */
#define OPTEE_SMC_SEC_CAP_RPC_ARG BIT(6)
+/* Secure world supports probing for RPMB device if needed */
+#define OPTEE_SMC_SEC_CAP_RPMB_PROBE BIT(7)
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index 5de4504665be..ebbbd42b0e3e 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
+#include <linux/rpmb.h>
#include <linux/slab.h>
#include <linux/tee_core.h>
#include "optee_private.h"
@@ -261,6 +262,154 @@ void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_FREE, 1, &param);
}
+static void handle_rpc_func_rpmb_probe_reset(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct tee_param params[1];
+
+ if (arg->num_params != ARRAY_SIZE(params) ||
+ optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params) ||
+ params[0].attr != TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ params[0].u.value.a = OPTEE_RPC_SHM_TYPE_KERNEL;
+ params[0].u.value.b = 0;
+ params[0].u.value.c = 0;
+ if (optee->ops->to_msg_param(optee, arg->params,
+ arg->num_params, params)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ mutex_lock(&optee->rpmb_dev_mutex);
+ rpmb_dev_put(optee->rpmb_dev);
+ optee->rpmb_dev = NULL;
+ mutex_unlock(&optee->rpmb_dev_mutex);
+
+ arg->ret = TEEC_SUCCESS;
+}
+
+static int rpmb_type_to_rpc_type(enum rpmb_type rtype)
+{
+ switch (rtype) {
+ case RPMB_TYPE_EMMC:
+ return OPTEE_RPC_RPMB_EMMC;
+ case RPMB_TYPE_UFS:
+ return OPTEE_RPC_RPMB_UFS;
+ case RPMB_TYPE_NVME:
+ return OPTEE_RPC_RPMB_NVME;
+ default:
+ return -1;
+ }
+}
+
+static int rpc_rpmb_match(struct device *dev, const void *data)
+{
+ struct rpmb_dev *rdev = to_rpmb_dev(dev);
+
+ return rpmb_type_to_rpc_type(rdev->descr.type) >= 0;
+}
+
+static void handle_rpc_func_rpmb_probe_next(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct rpmb_dev *rdev;
+ struct tee_param params[2];
+ void *buf;
+
+ if (arg->num_params != ARRAY_SIZE(params) ||
+ optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params) ||
+ params[0].attr != TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT ||
+ params[1].attr != TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+ buf = tee_shm_get_va(params[1].u.memref.shm,
+ params[1].u.memref.shm_offs);
+ if (IS_ERR(buf)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ mutex_lock(&optee->rpmb_dev_mutex);
+ rdev = rpmb_dev_find_device(NULL, optee->rpmb_dev, rpc_rpmb_match);
+ rpmb_dev_put(optee->rpmb_dev);
+ optee->rpmb_dev = rdev;
+ mutex_unlock(&optee->rpmb_dev_mutex);
+
+ if (!rdev) {
+ arg->ret = TEEC_ERROR_ITEM_NOT_FOUND;
+ return;
+ }
+
+ if (params[1].u.memref.size < rdev->descr.dev_id_len) {
+ arg->ret = TEEC_ERROR_SHORT_BUFFER;
+ return;
+ }
+ memcpy(buf, rdev->descr.dev_id, rdev->descr.dev_id_len);
+ params[1].u.memref.size = rdev->descr.dev_id_len;
+ params[0].u.value.a = rpmb_type_to_rpc_type(rdev->descr.type);
+ params[0].u.value.b = rdev->descr.capacity;
+ params[0].u.value.c = rdev->descr.reliable_wr_count;
+ if (optee->ops->to_msg_param(optee, arg->params,
+ arg->num_params, params)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ arg->ret = TEEC_SUCCESS;
+}
+
+static void handle_rpc_func_rpmb_frames(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct tee_param params[2];
+ struct rpmb_dev *rdev;
+ void *p0, *p1;
+
+ mutex_lock(&optee->rpmb_dev_mutex);
+ rdev = rpmb_dev_get(optee->rpmb_dev);
+ mutex_unlock(&optee->rpmb_dev_mutex);
+ if (!rdev) {
+ arg->ret = TEEC_ERROR_ITEM_NOT_FOUND;
+ return;
+ }
+
+ if (arg->num_params != ARRAY_SIZE(params) ||
+ optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params) ||
+ params[0].attr != TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT ||
+ params[1].attr != TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+
+ p0 = tee_shm_get_va(params[0].u.memref.shm,
+ params[0].u.memref.shm_offs);
+ p1 = tee_shm_get_va(params[1].u.memref.shm,
+ params[1].u.memref.shm_offs);
+ if (rpmb_route_frames(rdev, p0, params[0].u.memref.size, p1,
+ params[1].u.memref.size)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+ if (optee->ops->to_msg_param(optee, arg->params,
+ arg->num_params, params)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+ arg->ret = TEEC_SUCCESS;
+out:
+ rpmb_dev_put(rdev);
+}
+
void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
struct optee_msg_arg *arg)
{
@@ -277,6 +426,34 @@ void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
case OPTEE_RPC_CMD_I2C_TRANSFER:
handle_rpc_func_cmd_i2c_transfer(ctx, arg);
break;
+ /*
+ * optee->in_kernel_rpmb_routing true means that OP-TEE supports
+ * in-kernel RPMB routing _and_ that the RPMB subsystem is
+ * reachable. This is reported to user space with
+ * rpmb_routing_model=kernel in sysfs.
+ *
+ * rpmb_routing_model=kernel is also a promise to user space that
+ * RPMB access will not require supplicant support, hence the
+ * checks below.
+ */
+ case OPTEE_RPC_CMD_RPMB_PROBE_RESET:
+ if (optee->in_kernel_rpmb_routing)
+ handle_rpc_func_rpmb_probe_reset(ctx, optee, arg);
+ else
+ handle_rpc_supp_cmd(ctx, optee, arg);
+ break;
+ case OPTEE_RPC_CMD_RPMB_PROBE_NEXT:
+ if (optee->in_kernel_rpmb_routing)
+ handle_rpc_func_rpmb_probe_next(ctx, optee, arg);
+ else
+ handle_rpc_supp_cmd(ctx, optee, arg);
+ break;
+ case OPTEE_RPC_CMD_RPMB_FRAMES:
+ if (optee->in_kernel_rpmb_routing)
+ handle_rpc_func_rpmb_frames(ctx, optee, arg);
+ else
+ handle_rpc_supp_cmd(ctx, optee, arg);
+ break;
default:
handle_rpc_supp_cmd(ctx, optee, arg);
}
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 844285d4f03c..e9456e3e74cc 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -20,6 +20,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/rpmb.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -1685,6 +1686,10 @@ static int optee_probe(struct platform_device *pdev)
optee->smc.sec_caps = sec_caps;
optee->rpc_param_count = rpc_param_count;
+ if (IS_REACHABLE(CONFIG_RPMB) &&
+ (sec_caps & OPTEE_SMC_SEC_CAP_RPMB_PROBE))
+ optee->in_kernel_rpmb_routing = true;
+
teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
@@ -1699,6 +1704,8 @@ static int optee_probe(struct platform_device *pdev)
}
optee->supp_teedev = teedev;
+ optee_set_dev_group(optee);
+
rc = tee_device_register(optee->teedev);
if (rc)
goto err_unreg_supp_teedev;
@@ -1712,6 +1719,7 @@ static int optee_probe(struct platform_device *pdev)
optee->smc.memremaped_shm = memremaped_shm;
optee->pool = pool;
optee_shm_arg_cache_init(optee, arg_cache_flags);
+ mutex_init(&optee->rpmb_dev_mutex);
platform_set_drvdata(pdev, optee);
ctx = teedev_open(optee->teedev);
@@ -1766,6 +1774,10 @@ static int optee_probe(struct platform_device *pdev)
if (rc)
goto err_disable_shm_cache;
+ INIT_WORK(&optee->rpmb_scan_bus_work, optee_bus_scan_rpmb);
+ optee->rpmb_intf.notifier_call = optee_rpmb_intf_rdev;
+ blocking_notifier_chain_register(&optee_rpmb_intf_added,
+ &optee->rpmb_intf);
pr_info("initialized driver\n");
return 0;
@@ -1779,6 +1791,8 @@ err_notif_uninit:
err_close_ctx:
teedev_close_context(ctx);
err_supp_uninit:
+ rpmb_dev_put(optee->rpmb_dev);
+ mutex_destroy(&optee->rpmb_dev_mutex);
optee_shm_arg_cache_uninit(optee);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index d52e879b204e..d113679b1e2d 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -40,10 +40,7 @@ static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683,
static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
static DEFINE_SPINLOCK(driver_lock);
-static const struct class tee_class = {
- .name = "tee",
-};
-
+static const struct class tee_class;
static dev_t tee_devt;
struct tee_context *teedev_open(struct tee_device *teedev)
@@ -965,6 +962,13 @@ err:
}
EXPORT_SYMBOL_GPL(tee_device_alloc);
+void tee_device_set_dev_groups(struct tee_device *teedev,
+ const struct attribute_group **dev_groups)
+{
+ teedev->dev.groups = dev_groups;
+}
+EXPORT_SYMBOL_GPL(tee_device_set_dev_groups);
+
static ssize_t implementation_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -983,6 +987,11 @@ static struct attribute *tee_dev_attrs[] = {
ATTRIBUTE_GROUPS(tee_dev);
+static const struct class tee_class = {
+ .name = "tee",
+ .dev_groups = tee_dev_groups,
+};
+
/**
* tee_device_register() - Registers a TEE device
* @teedev: Device to register
@@ -1001,8 +1010,6 @@ int tee_device_register(struct tee_device *teedev)
return -EINVAL;
}
- teedev->dev.groups = tee_dev_groups;
-
rc = cdev_device_add(&teedev->cdev, &teedev->dev);
if (rc) {
dev_err(&teedev->dev,
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index ed16897584b4..61e7ae524b1f 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -40,6 +40,15 @@ config THERMAL_DEBUGFS
Say Y to allow the thermal subsystem to collect diagnostic
information that can be accessed via debugfs.
+config THERMAL_CORE_TESTING
+ tristate "Thermal core testing facility"
+ depends on DEBUG_FS
+ help
+ Say Y to add a debugfs-based thermal core testing facility.
+ It allows test thermal zones to be created and populated
+ with trip points in order to exercise the thermal core
+ functionality in a controlled way.
+
config THERMAL_EMERGENCY_POWEROFF_DELAY_MS
int "Emergency poweroff delay in milli-seconds"
default 0
@@ -429,7 +438,7 @@ source "drivers/thermal/samsung/Kconfig"
endmenu
menu "STMicroelectronics thermal drivers"
-depends on (ARCH_STI || ARCH_STM32) && OF
+depends on (ARCH_STI || ARCH_STM32) && THERMAL_OF
source "drivers/thermal/st/Kconfig"
endmenu
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index ce7a4752ef52..41c4d56beb40 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -63,3 +63,4 @@ obj-$(CONFIG_AMLOGIC_THERMAL) += amlogic_thermal.o
obj-$(CONFIG_SPRD_THERMAL) += sprd_thermal.o
obj-$(CONFIG_KHADAS_MCU_FAN_THERMAL) += khadas_mcu_fan.o
obj-$(CONFIG_LOONGSON2_THERMAL) += loongson2_thermal.o
+obj-$(CONFIG_THERMAL_CORE_TESTING) += testing/
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 5ad87eb3f578..7d61493082b5 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -208,8 +208,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
*/
val = readl(data->regs + BCM2835_TS_TSENSCTL);
if (!(val & BCM2835_TS_TSENSCTL_RSTB)) {
- struct thermal_trip trip;
- int offset, slope;
+ int offset, slope, crit_temp;
slope = thermal_zone_get_slope(tz);
offset = thermal_zone_get_offset(tz);
@@ -217,7 +216,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
* For now we deal only with critical, otherwise
* would need to iterate
*/
- err = thermal_zone_get_trip(tz, 0, &trip);
+ err = thermal_zone_get_crit_temp(tz, &crit_temp);
if (err < 0) {
dev_err(dev, "Not able to read trip_temp: %d\n", err);
return err;
@@ -232,7 +231,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
val |= (0xFE << BCM2835_TS_TSENSCTL_RSTDELAY_SHIFT);
/* trip_adc value from info */
- val |= bcm2835_thermal_temp2adc(trip.temperature,
+ val |= bcm2835_thermal_temp2adc(crit_temp,
offset,
slope)
<< BCM2835_TS_TSENSCTL_THOLD_SHIFT;
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index 9674e5ffcfa2..270982740fde 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -338,11 +338,9 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
thermal = devm_thermal_of_zone_register(&pdev->dev, 0, priv,
of_ops);
- if (IS_ERR(thermal)) {
- ret = PTR_ERR(thermal);
- dev_err(&pdev->dev, "could not register sensor: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(thermal))
+ return dev_err_probe(&pdev->dev, PTR_ERR(thermal),
+ "could not register sensor\n");
priv->thermal = thermal;
@@ -352,10 +350,9 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
brcmstb_tmon_irq_thread,
IRQF_ONESHOT,
DRV_NAME, priv);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not request IRQ: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not request IRQ\n");
}
dev_info(&pdev->dev, "registered AVS TMON of-sensor driver\n");
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index daed67d19efb..863e7a4272e6 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -92,23 +92,21 @@ static void bang_bang_manage(struct thermal_zone_device *tz)
for_each_trip_desc(tz, td) {
const struct thermal_trip *trip = &td->trip;
+ bool turn_on;
- if (tz->temperature >= td->threshold ||
- trip->temperature == THERMAL_TEMP_INVALID ||
+ if (trip->temperature == THERMAL_TEMP_INVALID ||
trip->type == THERMAL_TRIP_CRITICAL ||
trip->type == THERMAL_TRIP_HOT)
continue;
/*
- * If the initial cooling device state is "on", but the zone
- * temperature is not above the trip point, the core will not
- * call bang_bang_control() until the zone temperature reaches
- * the trip point temperature which may be never. In those
- * cases, set the initial state of the cooling device to 0.
+ * Adjust the target states for uninitialized thermal instances
+ * to the thermal zone temperature and the trip point threshold.
*/
+ turn_on = tz->temperature >= td->threshold;
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if (!instance->initialized && instance->trip == trip)
- bang_bang_set_instance_target(instance, 0);
+ bang_bang_set_instance_target(instance, turn_on);
}
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 0eb657db62e4..f1fe0f8ab04f 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -465,11 +465,22 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
return IRQ_HANDLED;
}
+static int hisi_trip_walk_cb(struct thermal_trip *trip, void *arg)
+{
+ struct hisi_thermal_sensor *sensor = arg;
+
+ if (trip->type != THERMAL_TRIP_PASSIVE)
+ return 0;
+
+ sensor->thres_temp = trip->temperature;
+ /* Return nonzero to terminate the search. */
+ return 1;
+}
+
static int hisi_thermal_register_sensor(struct platform_device *pdev,
struct hisi_thermal_sensor *sensor)
{
- int ret, i;
- struct thermal_trip trip;
+ int ret;
sensor->tzd = devm_thermal_of_zone_register(&pdev->dev,
sensor->id, sensor,
@@ -482,15 +493,7 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
return ret;
}
- for (i = 0; i < thermal_zone_get_num_trips(sensor->tzd); i++) {
-
- thermal_zone_get_trip(sensor->tzd, i, &trip);
-
- if (trip.type == THERMAL_TRIP_PASSIVE) {
- sensor->thres_temp = trip.temperature;
- break;
- }
- }
+ thermal_zone_for_each_trip(sensor->tzd, hisi_trip_walk_cb, sensor);
return 0;
}
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
index 7224f8d21db9..88558ce58880 100644
--- a/drivers/thermal/imx_sc_thermal.c
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -111,8 +111,7 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
if (ret == -ENODEV)
continue;
- dev_err(&pdev->dev, "failed to register thermal zone\n");
- return ret;
+ return dev_err_probe(&pdev->dev, ret, "failed to register thermal zone\n");
}
devm_thermal_add_hwmon_sysfs(&pdev->dev, sensor->tzd);
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 091fb30dedf3..b8e85a405351 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -353,24 +353,16 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz,
return 0;
}
-static int imx_bind(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev)
+static bool imx_should_bind(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
- return thermal_zone_bind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev,
- THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT);
-}
-
-static int imx_unbind(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev)
-{
- return thermal_zone_unbind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev);
+ return trip->type == THERMAL_TRIP_PASSIVE;
}
static struct thermal_zone_device_ops imx_tz_ops = {
- .bind = imx_bind,
- .unbind = imx_unbind,
+ .should_bind = imx_should_bind,
.get_temp = imx_get_temp,
.change_mode = imx_change_mode,
.set_trip_temp = imx_set_trip_temp,
@@ -773,7 +765,7 @@ static void imx_thermal_remove(struct platform_device *pdev)
imx_thermal_unregister_legacy_cooling(data);
}
-static int __maybe_unused imx_thermal_suspend(struct device *dev)
+static int imx_thermal_suspend(struct device *dev)
{
struct imx_thermal_data *data = dev_get_drvdata(dev);
int ret;
@@ -792,7 +784,7 @@ static int __maybe_unused imx_thermal_suspend(struct device *dev)
return pm_runtime_force_suspend(data->dev);
}
-static int __maybe_unused imx_thermal_resume(struct device *dev)
+static int imx_thermal_resume(struct device *dev)
{
struct imx_thermal_data *data = dev_get_drvdata(dev);
int ret;
@@ -804,7 +796,7 @@ static int __maybe_unused imx_thermal_resume(struct device *dev)
return thermal_zone_device_enable(data->tz);
}
-static int __maybe_unused imx_thermal_runtime_suspend(struct device *dev)
+static int imx_thermal_runtime_suspend(struct device *dev)
{
struct imx_thermal_data *data = dev_get_drvdata(dev);
const struct thermal_soc_data *socdata = data->socdata;
@@ -826,7 +818,7 @@ static int __maybe_unused imx_thermal_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused imx_thermal_runtime_resume(struct device *dev)
+static int imx_thermal_runtime_resume(struct device *dev)
{
struct imx_thermal_data *data = dev_get_drvdata(dev);
const struct thermal_soc_data *socdata = data->socdata;
@@ -857,15 +849,15 @@ static int __maybe_unused imx_thermal_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops imx_thermal_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(imx_thermal_suspend, imx_thermal_resume)
- SET_RUNTIME_PM_OPS(imx_thermal_runtime_suspend,
- imx_thermal_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(imx_thermal_suspend, imx_thermal_resume)
+ RUNTIME_PM_OPS(imx_thermal_runtime_suspend,
+ imx_thermal_runtime_resume, NULL)
};
static struct platform_driver imx_thermal = {
.driver = {
.name = "imx_thermal",
- .pm = &imx_thermal_pm_ops,
+ .pm = pm_ptr(&imx_thermal_pm_ops),
.of_match_table = of_imx_thermal_match,
},
.probe = imx_thermal_probe,
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index 96daad28b0c0..c2d59cbfaea9 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -291,24 +291,6 @@ static irqreturn_t qpnp_tm_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int qpnp_tm_get_critical_trip_temp(struct qpnp_tm_chip *chip)
-{
- struct thermal_trip trip;
- int i, ret;
-
- for (i = 0; i < thermal_zone_get_num_trips(chip->tz_dev); i++) {
-
- ret = thermal_zone_get_trip(chip->tz_dev, i, &trip);
- if (ret)
- continue;
-
- if (trip.type == THERMAL_TRIP_CRITICAL)
- return trip.temperature;
- }
-
- return THERMAL_TEMP_INVALID;
-}
-
/*
* This function initializes the internal temp value based on only the
* current thermal stage and threshold. Setup threshold control and
@@ -343,7 +325,9 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip)
mutex_unlock(&chip->lock);
- crit_temp = qpnp_tm_get_critical_trip_temp(chip);
+ ret = thermal_zone_get_crit_temp(chip->tz_dev, &crit_temp);
+ if (ret)
+ crit_temp = THERMAL_TEMP_INVALID;
mutex_lock(&chip->lock);
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 404f01cca4da..52e26be8c53d 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -347,7 +347,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused qoriq_tmu_suspend(struct device *dev)
+static int qoriq_tmu_suspend(struct device *dev)
{
struct qoriq_tmu_data *data = dev_get_drvdata(dev);
int ret;
@@ -361,7 +361,7 @@ static int __maybe_unused qoriq_tmu_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused qoriq_tmu_resume(struct device *dev)
+static int qoriq_tmu_resume(struct device *dev)
{
int ret;
struct qoriq_tmu_data *data = dev_get_drvdata(dev);
@@ -374,8 +374,8 @@ static int __maybe_unused qoriq_tmu_resume(struct device *dev)
return regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, TMR_ME);
}
-static SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops,
- qoriq_tmu_suspend, qoriq_tmu_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops,
+ qoriq_tmu_suspend, qoriq_tmu_resume);
static const struct of_device_id qoriq_tmu_match[] = {
{ .compatible = "fsl,qoriq-tmu", },
@@ -387,7 +387,7 @@ MODULE_DEVICE_TABLE(of, qoriq_tmu_match);
static struct platform_driver qoriq_tmu = {
.driver = {
.name = "qoriq_thermal",
- .pm = &qoriq_tmu_pm_ops,
+ .pm = pm_sleep_ptr(&qoriq_tmu_pm_ops),
.of_match_table = qoriq_tmu_match,
},
.probe = qoriq_tmu_probe,
diff --git a/drivers/thermal/renesas/rcar_gen3_thermal.c b/drivers/thermal/renesas/rcar_gen3_thermal.c
index 5c769871753a..810f86677461 100644
--- a/drivers/thermal/renesas/rcar_gen3_thermal.c
+++ b/drivers/thermal/renesas/rcar_gen3_thermal.c
@@ -563,11 +563,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (ret)
goto error_unregister;
- ret = thermal_zone_get_num_trips(tsc->zone);
- if (ret < 0)
- goto error_unregister;
-
- dev_info(dev, "Sensor %u: Loaded %d trip points\n", i, ret);
+ dev_info(dev, "Sensor %u: Loaded\n", i);
}
if (!priv->num_tscs) {
diff --git a/drivers/thermal/renesas/rcar_thermal.c b/drivers/thermal/renesas/rcar_thermal.c
index 1e93f60b6d74..ddc8341e5c3f 100644
--- a/drivers/thermal/renesas/rcar_thermal.c
+++ b/drivers/thermal/renesas/rcar_thermal.c
@@ -447,7 +447,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, rcar_thermal_irq,
IRQF_SHARED, dev_name(dev), common);
if (ret) {
- dev_err(dev, "irq request failed\n ");
+ dev_err(dev, "irq request failed\n");
goto error_unregister;
}
diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c
index 874192546548..dfd1d529c410 100644
--- a/drivers/thermal/sprd_thermal.c
+++ b/drivers/thermal/sprd_thermal.c
@@ -359,21 +359,17 @@ static int sprd_thm_probe(struct platform_device *pdev)
return -EINVAL;
}
- thm->clk = devm_clk_get(&pdev->dev, "enable");
+ thm->clk = devm_clk_get_enabled(&pdev->dev, "enable");
if (IS_ERR(thm->clk)) {
dev_err(&pdev->dev, "failed to get enable clock\n");
return PTR_ERR(thm->clk);
}
- ret = clk_prepare_enable(thm->clk);
- if (ret)
- return ret;
-
sprd_thm_para_config(thm);
ret = sprd_thm_cal_read(np, "thm_sign_cal", &val);
if (ret)
- goto disable_clk;
+ return ret;
if (val > 0)
thm->ratio_sign = -1;
@@ -382,7 +378,7 @@ static int sprd_thm_probe(struct platform_device *pdev)
ret = sprd_thm_cal_read(np, "thm_ratio_cal", &thm->ratio_off);
if (ret)
- goto disable_clk;
+ return ret;
for_each_child_of_node(np, sen_child) {
sen = devm_kzalloc(&pdev->dev, sizeof(*sen), GFP_KERNEL);
@@ -439,8 +435,6 @@ static int sprd_thm_probe(struct platform_device *pdev)
of_put:
of_node_put(sen_child);
-disable_clk:
- clk_disable_unprepare(thm->clk);
return ret;
}
@@ -526,8 +520,6 @@ static void sprd_thm_remove(struct platform_device *pdev)
devm_thermal_of_zone_unregister(&pdev->dev,
thm->sensor[i]->tzd);
}
-
- clk_disable_unprepare(thm->clk);
}
static const struct of_device_id sprd_thermal_of_match[] = {
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
index 2a105409864e..a14a37d54698 100644
--- a/drivers/thermal/st/st_thermal.c
+++ b/drivers/thermal/st/st_thermal.c
@@ -12,6 +12,7 @@
#include <linux/of_device.h>
#include "st_thermal.h"
+#include "../thermal_hwmon.h"
/* The Thermal Framework expects millidegrees */
#define mcelsius(temp) ((temp) * 1000)
@@ -135,8 +136,6 @@ static struct thermal_zone_device_ops st_tz_ops = {
.get_temp = st_thermal_get_temp,
};
-static struct thermal_trip trip;
-
int st_thermal_register(struct platform_device *pdev,
const struct of_device_id *st_thermal_of_match)
{
@@ -145,7 +144,6 @@ int st_thermal_register(struct platform_device *pdev,
struct device_node *np = dev->of_node;
const struct of_device_id *match;
- int polling_delay;
int ret;
if (!np) {
@@ -197,29 +195,24 @@ int st_thermal_register(struct platform_device *pdev,
if (ret)
goto sensor_off;
- polling_delay = sensor->ops->register_enable_irq ? 0 : 1000;
-
- trip.temperature = sensor->cdata->crit_temp;
- trip.type = THERMAL_TRIP_CRITICAL;
-
sensor->thermal_dev =
- thermal_zone_device_register_with_trips(dev_name(dev), &trip, 1, sensor,
- &st_tz_ops, NULL, 0, polling_delay);
+ devm_thermal_of_zone_register(dev, 0, sensor, &st_tz_ops);
if (IS_ERR(sensor->thermal_dev)) {
- dev_err(dev, "failed to register thermal zone device\n");
+ dev_err(dev, "failed to register thermal of zone\n");
ret = PTR_ERR(sensor->thermal_dev);
goto sensor_off;
}
- ret = thermal_zone_device_enable(sensor->thermal_dev);
- if (ret)
- goto tzd_unregister;
platform_set_drvdata(pdev, sensor);
+ /*
+ * devm_thermal_of_zone_register() doesn't enable hwmon by default
+ * Enable it here
+ */
+ devm_thermal_add_hwmon_sysfs(dev, sensor->thermal_dev);
+
return 0;
-tzd_unregister:
- thermal_zone_device_unregister(sensor->thermal_dev);
sensor_off:
st_thermal_sensor_off(sensor);
@@ -232,11 +225,11 @@ void st_thermal_unregister(struct platform_device *pdev)
struct st_thermal_sensor *sensor = platform_get_drvdata(pdev);
st_thermal_sensor_off(sensor);
- thermal_zone_device_unregister(sensor->thermal_dev);
+ thermal_remove_hwmon_sysfs(sensor->thermal_dev);
+ devm_thermal_of_zone_unregister(sensor->dev, sensor->thermal_dev);
}
EXPORT_SYMBOL_GPL(st_thermal_unregister);
-#ifdef CONFIG_PM_SLEEP
static int st_thermal_suspend(struct device *dev)
{
struct st_thermal_sensor *sensor = dev_get_drvdata(dev);
@@ -265,9 +258,8 @@ static int st_thermal_resume(struct device *dev)
return 0;
}
-#endif
-SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume);
+DEFINE_SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume);
EXPORT_SYMBOL_GPL(st_thermal_pm_ops);
MODULE_AUTHOR("STMicroelectronics (R&D) Limited <ajitpal.singh@st.com>");
diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c
index e427117381a4..97493d2b2f49 100644
--- a/drivers/thermal/st/st_thermal_memmap.c
+++ b/drivers/thermal/st/st_thermal_memmap.c
@@ -170,7 +170,7 @@ static void st_mmap_remove(struct platform_device *pdev)
static struct platform_driver st_mmap_thermal_driver = {
.driver = {
.name = "st_thermal_mmap",
- .pm = &st_thermal_pm_ops,
+ .pm = pm_sleep_ptr(&st_thermal_pm_ops),
.of_match_table = st_mmap_thermal_of_match,
},
.probe = st_mmap_probe,
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index 34785b9276fc..ffd988600ed6 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -440,7 +440,6 @@ thermal_unprepare:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
static int stm_thermal_suspend(struct device *dev)
{
struct stm_thermal_sensor *sensor = dev_get_drvdata(dev);
@@ -466,10 +465,9 @@ static int stm_thermal_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops,
- stm_thermal_suspend, stm_thermal_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops,
+ stm_thermal_suspend, stm_thermal_resume);
static const struct thermal_zone_device_ops stm_tz_ops = {
.get_temp = stm_thermal_get_temp,
@@ -580,7 +578,7 @@ static void stm_thermal_remove(struct platform_device *pdev)
static struct platform_driver stm_thermal_driver = {
.driver = {
.name = "stm_thermal",
- .pm = &stm_thermal_pm_ops,
+ .pm = pm_sleep_ptr(&stm_thermal_pm_ops),
.of_match_table = stm_thermal_of_match,
},
.probe = stm_thermal_probe,
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index d3dfc34c62c6..a023c948afbd 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -682,24 +682,25 @@ static const struct thermal_zone_device_ops tegra_of_thermal_ops = {
.set_trips = tegra_thermctl_set_trips,
};
-static int get_hot_temp(struct thermal_zone_device *tz, int *trip_id, int *temp)
+static int get_hot_trip_cb(struct thermal_trip *trip, void *arg)
{
- int i, ret;
- struct thermal_trip trip;
+ const struct thermal_trip **trip_ret = arg;
- for (i = 0; i < thermal_zone_get_num_trips(tz); i++) {
+ if (trip->type != THERMAL_TRIP_HOT)
+ return 0;
- ret = thermal_zone_get_trip(tz, i, &trip);
- if (ret)
- return -EINVAL;
+ *trip_ret = trip;
+ /* Return nonzero to terminate the search. */
+ return 1;
+}
- if (trip.type == THERMAL_TRIP_HOT) {
- *trip_id = i;
- return 0;
- }
- }
+static const struct thermal_trip *get_hot_trip(struct thermal_zone_device *tz)
+{
+ const struct thermal_trip *trip = NULL;
- return -EINVAL;
+ thermal_zone_for_each_trip(tz, get_hot_trip_cb, &trip);
+
+ return trip;
}
/**
@@ -731,8 +732,9 @@ static int tegra_soctherm_set_hwtrips(struct device *dev,
struct thermal_zone_device *tz)
{
struct tegra_soctherm *ts = dev_get_drvdata(dev);
+ const struct thermal_trip *hot_trip;
struct soctherm_throt_cfg *stc;
- int i, trip, temperature, ret;
+ int i, temperature, ret;
/* Get thermtrips. If missing, try to get critical trips. */
temperature = tsensor_group_thermtrip_get(ts, sg->id);
@@ -749,8 +751,8 @@ static int tegra_soctherm_set_hwtrips(struct device *dev,
dev_info(dev, "thermtrip: will shut down when %s reaches %d mC\n",
sg->name, temperature);
- ret = get_hot_temp(tz, &trip, &temperature);
- if (ret) {
+ hot_trip = get_hot_trip(tz);
+ if (!hot_trip) {
dev_info(dev, "throttrip: %s: missing hot temperature\n",
sg->name);
return 0;
@@ -763,7 +765,7 @@ static int tegra_soctherm_set_hwtrips(struct device *dev,
continue;
cdev = ts->throt_cfgs[i].cdev;
- if (get_thermal_instance(tz, cdev, trip))
+ if (thermal_trip_is_bound_to_cdev(tz, hot_trip, cdev))
stc = find_throttle_cfg_by_name(ts, cdev->type);
else
continue;
diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c
index d911fa60f100..6245f6b97f43 100644
--- a/drivers/thermal/tegra/tegra30-tsensor.c
+++ b/drivers/thermal/tegra/tegra30-tsensor.c
@@ -303,33 +303,37 @@ stop_channel:
return 0;
}
-static void tegra_tsensor_get_hw_channel_trips(struct thermal_zone_device *tzd,
- int *hot_trip, int *crit_trip)
+struct trip_temps {
+ int hot_trip;
+ int crit_trip;
+};
+
+static int tegra_tsensor_get_trips_cb(struct thermal_trip *trip, void *arg)
{
- unsigned int i;
+ struct trip_temps *temps = arg;
+
+ if (trip->type == THERMAL_TRIP_HOT)
+ temps->hot_trip = trip->temperature;
+ else if (trip->type == THERMAL_TRIP_CRITICAL)
+ temps->crit_trip = trip->temperature;
+
+ return 0;
+}
+static void tegra_tsensor_get_hw_channel_trips(struct thermal_zone_device *tzd,
+ struct trip_temps *temps)
+{
/*
* 90C is the maximal critical temperature of all Tegra30 SoC variants,
* use it for the default trip if unspecified in a device-tree.
*/
- *hot_trip = 85000;
- *crit_trip = 90000;
-
- for (i = 0; i < thermal_zone_get_num_trips(tzd); i++) {
-
- struct thermal_trip trip;
+ temps->hot_trip = 85000;
+ temps->crit_trip = 90000;
- thermal_zone_get_trip(tzd, i, &trip);
-
- if (trip.type == THERMAL_TRIP_HOT)
- *hot_trip = trip.temperature;
-
- if (trip.type == THERMAL_TRIP_CRITICAL)
- *crit_trip = trip.temperature;
- }
+ thermal_zone_for_each_trip(tzd, tegra_tsensor_get_trips_cb, temps);
/* clamp hardware trips to the calibration limits */
- *hot_trip = clamp(*hot_trip, 25000, 90000);
+ temps->hot_trip = clamp(temps->hot_trip, 25000, 90000);
/*
* Kernel will perform a normal system shut down if it will
@@ -338,7 +342,7 @@ static void tegra_tsensor_get_hw_channel_trips(struct thermal_zone_device *tzd,
* shut down gracefully before sending signal to the Power
* Management controller.
*/
- *crit_trip = clamp(*crit_trip + 5000, 25000, 90000);
+ temps->crit_trip = clamp(temps->crit_trip + 5000, 25000, 90000);
}
static int tegra_tsensor_enable_hw_channel(const struct tegra_tsensor *ts,
@@ -346,7 +350,8 @@ static int tegra_tsensor_enable_hw_channel(const struct tegra_tsensor *ts,
{
const struct tegra_tsensor_channel *tsc = &ts->ch[id];
struct thermal_zone_device *tzd = tsc->tzd;
- int err, hot_trip = 0, crit_trip = 0;
+ struct trip_temps temps = { 0 };
+ int err;
u32 val;
if (!tzd) {
@@ -357,24 +362,24 @@ static int tegra_tsensor_enable_hw_channel(const struct tegra_tsensor *ts,
return 0;
}
- tegra_tsensor_get_hw_channel_trips(tzd, &hot_trip, &crit_trip);
+ tegra_tsensor_get_hw_channel_trips(tzd, &temps);
dev_info_once(ts->dev, "ch%u: PMC emergency shutdown trip set to %dC\n",
- id, DIV_ROUND_CLOSEST(crit_trip, 1000));
+ id, DIV_ROUND_CLOSEST(temps.crit_trip, 1000));
- hot_trip = tegra_tsensor_temp_to_counter(ts, hot_trip);
- crit_trip = tegra_tsensor_temp_to_counter(ts, crit_trip);
+ temps.hot_trip = tegra_tsensor_temp_to_counter(ts, temps.hot_trip);
+ temps.crit_trip = tegra_tsensor_temp_to_counter(ts, temps.crit_trip);
/* program LEVEL2 counter threshold */
val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG1);
val &= ~TSENSOR_SENSOR0_CONFIG1_TH2;
- val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG1_TH2, hot_trip);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG1_TH2, temps.hot_trip);
writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG1);
/* program LEVEL3 counter threshold */
val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG2);
val &= ~TSENSOR_SENSOR0_CONFIG2_TH3;
- val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG2_TH3, crit_trip);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG2_TH3, temps.crit_trip);
writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG2);
/*
diff --git a/drivers/thermal/testing/Makefile b/drivers/thermal/testing/Makefile
new file mode 100644
index 000000000000..ede9678efbce
--- /dev/null
+++ b/drivers/thermal/testing/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Thermal core testing facility.
+
+obj-$(CONFIG_THERMAL_CORE_TESTING) += thermal-testing.o
+
+thermal-testing-y := command.o zone.o
diff --git a/drivers/thermal/testing/command.c b/drivers/thermal/testing/command.c
new file mode 100644
index 000000000000..ba11d70e8021
--- /dev/null
+++ b/drivers/thermal/testing/command.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024, Intel Corporation
+ *
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * Thermal subsystem testing facility.
+ *
+ * This facility allows the thermal core functionality to be exercised in a
+ * controlled way in order to verify its behavior.
+ *
+ * It resides in the "thermal-testing" directory under the debugfs root and
+ * starts with a single file called "command" which can be written a string
+ * representing a thermal testing facility command.
+ *
+ * The currently supported commands are listed in the tt_commands enum below.
+ *
+ * The "addtz" command causes a new test thermal zone template to be created,
+ * for example:
+ *
+ * # echo addtz > /sys/kernel/debug/thermal-testing/command
+ *
+ * That template will be represented as a subdirectory in the "thermal-testing"
+ * directory, for example
+ *
+ * # ls /sys/kernel/debug/thermal-testing/
+ * command tz0
+ *
+ * The thermal zone template can be populated with trip points with the help of
+ * the "tzaddtrip" command, for example:
+ *
+ * # echo tzaddtrip:0 > /sys/kernel/debug/thermal-testing/command
+ *
+ * which causes a trip point template to be added to the test thermal zone
+ * template 0 (represented by the tz0 subdirectory in "thermal-testing").
+ *
+ * # ls /sys/kernel/debug/thermal-testing/tz0
+ * init_temp temp trip_0_temp trip_0_hyst
+ *
+ * The temperature of a trip point template is initially THERMAL_TEMP_INVALID
+ * and its hysteresis is initially 0. They can be adjusted by writing to the
+ * "trip_x_temp" and "trip_x_hyst" files correspoinding to that trip point
+ * template, respectively.
+ *
+ * The initial temperature of a thermal zone based on a template can be set by
+ * writing to the "init_temp" file in its directory under "thermal-testing", for
+ * example:
+ *
+ * echo 50000 > /sys/kernel/debug/thermal-testing/tz0/init_temp
+ *
+ * When ready, "tzreg" command can be used for registering and enabling a
+ * thermal zone based on a given template with the thermal core, for example
+ *
+ * # echo tzreg:0 > /sys/kernel/debug/thermal-testing/command
+ *
+ * In this case, test thermal zone template 0 is used for registering a new
+ * thermal zone and the set of trip point templates associated with it is used
+ * for populating the new thermal zone's trip points table. The type of the new
+ * thermal zone is "test_tz".
+ *
+ * The temperature and hysteresis of all of the trip points in that new thermal
+ * zone are adjustable via sysfs, so they can be updated at any time.
+ *
+ * The current temperature of the new thermal zone can be set by writing to the
+ * "temp" file in the corresponding thermal zone template's directory under
+ * "thermal-testing", for example
+ *
+ * echo 10000 > /sys/kernel/debug/thermal-testing/tz0/temp
+ *
+ * which will also trigger a temperature update for this zone in the thermal
+ * core, including checking its trip points, sending notifications to user space
+ * if any of them have been crossed and so on.
+ *
+ * When it is not needed any more, a test thermal zone template can be deleted
+ * with the help of the "deltz" command, for example
+ *
+ * # echo deltz:0 > /sys/kernel/debug/thermal-testing/command
+ *
+ * which will also unregister the thermal zone based on it, if present.
+ */
+
+#define pr_fmt(fmt) "thermal-testing: " fmt
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+#include "thermal_testing.h"
+
+struct dentry *d_testing;
+
+#define TT_COMMAND_SIZE 16
+
+enum tt_commands {
+ TT_CMD_ADDTZ,
+ TT_CMD_DELTZ,
+ TT_CMD_TZADDTRIP,
+ TT_CMD_TZREG,
+ TT_CMD_TZUNREG,
+};
+
+static const char *tt_command_strings[] = {
+ [TT_CMD_ADDTZ] = "addtz",
+ [TT_CMD_DELTZ] = "deltz",
+ [TT_CMD_TZADDTRIP] = "tzaddtrip",
+ [TT_CMD_TZREG] = "tzreg",
+ [TT_CMD_TZUNREG] = "tzunreg",
+};
+
+static int tt_command_exec(int index, const char *arg)
+{
+ int ret;
+
+ switch (index) {
+ case TT_CMD_ADDTZ:
+ ret = tt_add_tz();
+ break;
+
+ case TT_CMD_DELTZ:
+ ret = tt_del_tz(arg);
+ break;
+
+ case TT_CMD_TZADDTRIP:
+ ret = tt_zone_add_trip(arg);
+ break;
+
+ case TT_CMD_TZREG:
+ ret = tt_zone_reg(arg);
+ break;
+
+ case TT_CMD_TZUNREG:
+ ret = tt_zone_unreg(arg);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static ssize_t tt_command_process(struct dentry *dentry, const char __user *user_buf,
+ size_t count)
+{
+ char *buf __free(kfree);
+ char *arg;
+ int i;
+
+ buf = kmalloc(count + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+ strim(buf);
+
+ arg = strstr(buf, ":");
+ if (arg) {
+ *arg = '\0';
+ arg++;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tt_command_strings); i++) {
+ if (!strcmp(buf, tt_command_strings[i]))
+ return tt_command_exec(i, arg);
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t tt_command_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dentry *dentry = file->f_path.dentry;
+ ssize_t ret;
+
+ if (*ppos)
+ return -EINVAL;
+
+ if (count + 1 > TT_COMMAND_SIZE)
+ return -E2BIG;
+
+ ret = debugfs_file_get(dentry);
+ if (unlikely(ret))
+ return ret;
+
+ ret = tt_command_process(dentry, user_buf, count);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations tt_command_fops = {
+ .write = tt_command_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static int __init thermal_testing_init(void)
+{
+ d_testing = debugfs_create_dir("thermal-testing", NULL);
+ if (!IS_ERR(d_testing))
+ debugfs_create_file("command", 0200, d_testing, NULL,
+ &tt_command_fops);
+
+ return 0;
+}
+module_init(thermal_testing_init);
+
+static void __exit thermal_testing_exit(void)
+{
+ debugfs_remove(d_testing);
+ tt_zone_cleanup();
+}
+module_exit(thermal_testing_exit);
+
+MODULE_DESCRIPTION("Thermal core testing facility");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/testing/thermal_testing.h b/drivers/thermal/testing/thermal_testing.h
new file mode 100644
index 000000000000..c790a32aae4e
--- /dev/null
+++ b/drivers/thermal/testing/thermal_testing.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+extern struct dentry *d_testing;
+
+int tt_add_tz(void);
+int tt_del_tz(const char *arg);
+int tt_zone_add_trip(const char *arg);
+int tt_zone_reg(const char *arg);
+int tt_zone_unreg(const char *arg);
+
+void tt_zone_cleanup(void);
diff --git a/drivers/thermal/testing/zone.c b/drivers/thermal/testing/zone.c
new file mode 100644
index 000000000000..c6d8c66f40f9
--- /dev/null
+++ b/drivers/thermal/testing/zone.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024, Intel Corporation
+ *
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * Thermal zone tempalates handling for thermal core testing.
+ */
+
+#define pr_fmt(fmt) "thermal-testing: " fmt
+
+#include <linux/debugfs.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/thermal.h>
+#include <linux/workqueue.h>
+
+#include "thermal_testing.h"
+
+#define TT_MAX_FILE_NAME_LENGTH 16
+
+/**
+ * struct tt_thermal_zone - Testing thermal zone template
+ *
+ * Represents a template of a thermal zone that can be used for registering
+ * a test thermal zone with the thermal core.
+ *
+ * @list_node: Node in the list of all testing thermal zone templates.
+ * @trips: List of trip point templates for this thermal zone template.
+ * @d_tt_zone: Directory in debugfs representing this template.
+ * @tz: Test thermal zone based on this template, if present.
+ * @lock: Mutex for synchronizing changes of this template.
+ * @ida: IDA for trip point IDs.
+ * @id: The ID of this template for the debugfs interface.
+ * @temp: Temperature value.
+ * @tz_temp: Current thermal zone temperature (after registration).
+ * @num_trips: Number of trip points in the @trips list.
+ * @refcount: Reference counter for usage and removal synchronization.
+ */
+struct tt_thermal_zone {
+ struct list_head list_node;
+ struct list_head trips;
+ struct dentry *d_tt_zone;
+ struct thermal_zone_device *tz;
+ struct mutex lock;
+ struct ida ida;
+ int id;
+ int temp;
+ int tz_temp;
+ unsigned int num_trips;
+ unsigned int refcount;
+};
+
+DEFINE_GUARD(tt_zone, struct tt_thermal_zone *, mutex_lock(&_T->lock), mutex_unlock(&_T->lock))
+
+/**
+ * struct tt_trip - Testing trip point template
+ *
+ * Represents a template of a trip point to be used for populating a trip point
+ * during the registration of a thermal zone based on a given zone template.
+ *
+ * @list_node: Node in the list of all trip templates in the zone template.
+ * @trip: Trip point data to use for thernal zone registration.
+ * @id: The ID of this trip template for the debugfs interface.
+ */
+struct tt_trip {
+ struct list_head list_node;
+ struct thermal_trip trip;
+ int id;
+};
+
+/*
+ * It is both questionable and potentially problematic from the sychnronization
+ * perspective to attempt to manipulate debugfs from within a debugfs file
+ * "write" operation, so auxiliary work items are used for that. The majority
+ * of zone-related command functions have a part that runs from a workqueue and
+ * make changes in debugs, among other things.
+ */
+struct tt_work {
+ struct work_struct work;
+ struct tt_thermal_zone *tt_zone;
+ struct tt_trip *tt_trip;
+};
+
+static inline struct tt_work *tt_work_of_work(struct work_struct *work)
+{
+ return container_of(work, struct tt_work, work);
+}
+
+static LIST_HEAD(tt_thermal_zones);
+static DEFINE_IDA(tt_thermal_zones_ida);
+static DEFINE_MUTEX(tt_thermal_zones_lock);
+
+static int tt_int_get(void *data, u64 *val)
+{
+ *val = *(int *)data;
+ return 0;
+}
+static int tt_int_set(void *data, u64 val)
+{
+ if ((int)val < THERMAL_TEMP_INVALID)
+ return -EINVAL;
+
+ *(int *)data = val;
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(tt_int_attr, tt_int_get, tt_int_set, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(tt_unsigned_int_attr, tt_int_get, tt_int_set, "%llu\n");
+
+static int tt_zone_tz_temp_get(void *data, u64 *val)
+{
+ struct tt_thermal_zone *tt_zone = data;
+
+ guard(tt_zone)(tt_zone);
+
+ if (!tt_zone->tz)
+ return -EBUSY;
+
+ *val = tt_zone->tz_temp;
+
+ return 0;
+}
+static int tt_zone_tz_temp_set(void *data, u64 val)
+{
+ struct tt_thermal_zone *tt_zone = data;
+
+ guard(tt_zone)(tt_zone);
+
+ if (!tt_zone->tz)
+ return -EBUSY;
+
+ WRITE_ONCE(tt_zone->tz_temp, val);
+ thermal_zone_device_update(tt_zone->tz, THERMAL_EVENT_TEMP_SAMPLE);
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(tt_zone_tz_temp_attr, tt_zone_tz_temp_get,
+ tt_zone_tz_temp_set, "%lld\n");
+
+static void tt_zone_free_trips(struct tt_thermal_zone *tt_zone)
+{
+ struct tt_trip *tt_trip, *aux;
+
+ list_for_each_entry_safe(tt_trip, aux, &tt_zone->trips, list_node) {
+ list_del(&tt_trip->list_node);
+ ida_free(&tt_zone->ida, tt_trip->id);
+ kfree(tt_trip);
+ }
+}
+
+static void tt_zone_free(struct tt_thermal_zone *tt_zone)
+{
+ tt_zone_free_trips(tt_zone);
+ ida_free(&tt_thermal_zones_ida, tt_zone->id);
+ ida_destroy(&tt_zone->ida);
+ kfree(tt_zone);
+}
+
+static void tt_add_tz_work_fn(struct work_struct *work)
+{
+ struct tt_work *tt_work = tt_work_of_work(work);
+ struct tt_thermal_zone *tt_zone = tt_work->tt_zone;
+ char f_name[TT_MAX_FILE_NAME_LENGTH];
+
+ kfree(tt_work);
+
+ snprintf(f_name, TT_MAX_FILE_NAME_LENGTH, "tz%d", tt_zone->id);
+ tt_zone->d_tt_zone = debugfs_create_dir(f_name, d_testing);
+ if (IS_ERR(tt_zone->d_tt_zone)) {
+ tt_zone_free(tt_zone);
+ return;
+ }
+
+ debugfs_create_file_unsafe("temp", 0600, tt_zone->d_tt_zone, tt_zone,
+ &tt_zone_tz_temp_attr);
+
+ debugfs_create_file_unsafe("init_temp", 0600, tt_zone->d_tt_zone,
+ &tt_zone->temp, &tt_int_attr);
+
+ guard(mutex)(&tt_thermal_zones_lock);
+
+ list_add_tail(&tt_zone->list_node, &tt_thermal_zones);
+}
+
+int tt_add_tz(void)
+{
+ struct tt_thermal_zone *tt_zone __free(kfree);
+ struct tt_work *tt_work __free(kfree);
+ int ret;
+
+ tt_zone = kzalloc(sizeof(*tt_zone), GFP_KERNEL);
+ if (!tt_zone)
+ return -ENOMEM;
+
+ tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL);
+ if (!tt_work)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&tt_zone->trips);
+ mutex_init(&tt_zone->lock);
+ ida_init(&tt_zone->ida);
+ tt_zone->temp = THERMAL_TEMP_INVALID;
+
+ ret = ida_alloc(&tt_thermal_zones_ida, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ tt_zone->id = ret;
+
+ INIT_WORK(&tt_work->work, tt_add_tz_work_fn);
+ tt_work->tt_zone = no_free_ptr(tt_zone);
+ schedule_work(&(no_free_ptr(tt_work)->work));
+
+ return 0;
+}
+
+static void tt_del_tz_work_fn(struct work_struct *work)
+{
+ struct tt_work *tt_work = tt_work_of_work(work);
+ struct tt_thermal_zone *tt_zone = tt_work->tt_zone;
+
+ kfree(tt_work);
+
+ debugfs_remove(tt_zone->d_tt_zone);
+ tt_zone_free(tt_zone);
+}
+
+static void tt_zone_unregister_tz(struct tt_thermal_zone *tt_zone)
+{
+ guard(tt_zone)(tt_zone);
+
+ if (tt_zone->tz) {
+ thermal_zone_device_unregister(tt_zone->tz);
+ tt_zone->tz = NULL;
+ }
+}
+
+int tt_del_tz(const char *arg)
+{
+ struct tt_work *tt_work __free(kfree);
+ struct tt_thermal_zone *tt_zone, *aux;
+ int ret;
+ int id;
+
+ ret = sscanf(arg, "%d", &id);
+ if (ret != 1)
+ return -EINVAL;
+
+ tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL);
+ if (!tt_work)
+ return -ENOMEM;
+
+ guard(mutex)(&tt_thermal_zones_lock);
+
+ ret = -EINVAL;
+ list_for_each_entry_safe(tt_zone, aux, &tt_thermal_zones, list_node) {
+ if (tt_zone->id == id) {
+ if (tt_zone->refcount) {
+ ret = -EBUSY;
+ } else {
+ list_del(&tt_zone->list_node);
+ ret = 0;
+ }
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ tt_zone_unregister_tz(tt_zone);
+
+ INIT_WORK(&tt_work->work, tt_del_tz_work_fn);
+ tt_work->tt_zone = tt_zone;
+ schedule_work(&(no_free_ptr(tt_work)->work));
+
+ return 0;
+}
+
+static struct tt_thermal_zone *tt_get_tt_zone(const char *arg)
+{
+ struct tt_thermal_zone *tt_zone;
+ int ret, id;
+
+ ret = sscanf(arg, "%d", &id);
+ if (ret != 1)
+ return ERR_PTR(-EINVAL);
+
+ guard(mutex)(&tt_thermal_zones_lock);
+
+ ret = -EINVAL;
+ list_for_each_entry(tt_zone, &tt_thermal_zones, list_node) {
+ if (tt_zone->id == id) {
+ tt_zone->refcount++;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ return tt_zone;
+}
+
+static void tt_put_tt_zone(struct tt_thermal_zone *tt_zone)
+{
+ guard(mutex)(&tt_thermal_zones_lock);
+
+ tt_zone->refcount--;
+}
+
+static void tt_zone_add_trip_work_fn(struct work_struct *work)
+{
+ struct tt_work *tt_work = tt_work_of_work(work);
+ struct tt_thermal_zone *tt_zone = tt_work->tt_zone;
+ struct tt_trip *tt_trip = tt_work->tt_trip;
+ char d_name[TT_MAX_FILE_NAME_LENGTH];
+
+ kfree(tt_work);
+
+ snprintf(d_name, TT_MAX_FILE_NAME_LENGTH, "trip_%d_temp", tt_trip->id);
+ debugfs_create_file_unsafe(d_name, 0600, tt_zone->d_tt_zone,
+ &tt_trip->trip.temperature, &tt_int_attr);
+
+ snprintf(d_name, TT_MAX_FILE_NAME_LENGTH, "trip_%d_hyst", tt_trip->id);
+ debugfs_create_file_unsafe(d_name, 0600, tt_zone->d_tt_zone,
+ &tt_trip->trip.hysteresis, &tt_unsigned_int_attr);
+
+ tt_put_tt_zone(tt_zone);
+}
+
+int tt_zone_add_trip(const char *arg)
+{
+ struct tt_work *tt_work __free(kfree);
+ struct tt_trip *tt_trip __free(kfree);
+ struct tt_thermal_zone *tt_zone;
+ int id;
+
+ tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL);
+ if (!tt_work)
+ return -ENOMEM;
+
+ tt_trip = kzalloc(sizeof(*tt_trip), GFP_KERNEL);
+ if (!tt_trip)
+ return -ENOMEM;
+
+ tt_zone = tt_get_tt_zone(arg);
+ if (IS_ERR(tt_zone))
+ return PTR_ERR(tt_zone);
+
+ id = ida_alloc(&tt_zone->ida, GFP_KERNEL);
+ if (id < 0) {
+ tt_put_tt_zone(tt_zone);
+ return id;
+ }
+
+ tt_trip->trip.type = THERMAL_TRIP_ACTIVE;
+ tt_trip->trip.temperature = THERMAL_TEMP_INVALID;
+ tt_trip->trip.flags = THERMAL_TRIP_FLAG_RW;
+ tt_trip->id = id;
+
+ guard(tt_zone)(tt_zone);
+
+ list_add_tail(&tt_trip->list_node, &tt_zone->trips);
+ tt_zone->num_trips++;
+
+ INIT_WORK(&tt_work->work, tt_zone_add_trip_work_fn);
+ tt_work->tt_zone = tt_zone;
+ tt_work->tt_trip = no_free_ptr(tt_trip);
+ schedule_work(&(no_free_ptr(tt_work)->work));
+
+ return 0;
+}
+
+static int tt_zone_get_temp(struct thermal_zone_device *tz, int *temp)
+{
+ struct tt_thermal_zone *tt_zone = thermal_zone_device_priv(tz);
+
+ *temp = READ_ONCE(tt_zone->tz_temp);
+
+ if (*temp < THERMAL_TEMP_INVALID)
+ return -ENODATA;
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops tt_zone_ops = {
+ .get_temp = tt_zone_get_temp,
+};
+
+static int tt_zone_register_tz(struct tt_thermal_zone *tt_zone)
+{
+ struct thermal_trip *trips __free(kfree);
+ struct thermal_zone_device *tz;
+ struct tt_trip *tt_trip;
+ int i;
+
+ guard(tt_zone)(tt_zone);
+
+ if (tt_zone->tz)
+ return -EINVAL;
+
+ trips = kcalloc(tt_zone->num_trips, sizeof(*trips), GFP_KERNEL);
+ if (!trips)
+ return -ENOMEM;
+
+ i = 0;
+ list_for_each_entry(tt_trip, &tt_zone->trips, list_node)
+ trips[i++] = tt_trip->trip;
+
+ tt_zone->tz_temp = tt_zone->temp;
+
+ tz = thermal_zone_device_register_with_trips("test_tz", trips, i, tt_zone,
+ &tt_zone_ops, NULL, 0, 0);
+ if (IS_ERR(tz))
+ return PTR_ERR(tz);
+
+ tt_zone->tz = tz;
+
+ thermal_zone_device_enable(tz);
+
+ return 0;
+}
+
+int tt_zone_reg(const char *arg)
+{
+ struct tt_thermal_zone *tt_zone;
+ int ret;
+
+ tt_zone = tt_get_tt_zone(arg);
+ if (IS_ERR(tt_zone))
+ return PTR_ERR(tt_zone);
+
+ ret = tt_zone_register_tz(tt_zone);
+
+ tt_put_tt_zone(tt_zone);
+
+ return ret;
+}
+
+int tt_zone_unreg(const char *arg)
+{
+ struct tt_thermal_zone *tt_zone;
+
+ tt_zone = tt_get_tt_zone(arg);
+ if (IS_ERR(tt_zone))
+ return PTR_ERR(tt_zone);
+
+ tt_zone_unregister_tz(tt_zone);
+
+ tt_put_tt_zone(tt_zone);
+
+ return 0;
+}
+
+void tt_zone_cleanup(void)
+{
+ struct tt_thermal_zone *tt_zone, *aux;
+
+ list_for_each_entry_safe(tt_zone, aux, &tt_thermal_zones, list_node) {
+ tt_zone_unregister_tz(tt_zone);
+
+ list_del(&tt_zone->list_node);
+
+ tt_zone_free(tt_zone);
+ }
+}
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index e6669aeda1ff..073d02e21352 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -323,11 +323,10 @@ static void thermal_zone_broken_disable(struct thermal_zone_device *tz)
static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
unsigned long delay)
{
- if (delay)
- mod_delayed_work(system_freezable_power_efficient_wq,
- &tz->poll_queue, delay);
- else
- cancel_delayed_work(&tz->poll_queue);
+ if (delay > HZ)
+ delay = round_jiffies_relative(delay);
+
+ mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, delay);
}
static void thermal_zone_recheck(struct thermal_zone_device *tz, int error)
@@ -360,9 +359,7 @@ static void thermal_zone_recheck(struct thermal_zone_device *tz, int error)
static void monitor_thermal_zone(struct thermal_zone_device *tz)
{
- if (tz->mode != THERMAL_DEVICE_ENABLED)
- thermal_zone_device_set_polling(tz, 0);
- else if (tz->passive > 0)
+ if (tz->passive > 0 && tz->passive_delay_jiffies)
thermal_zone_device_set_polling(tz, tz->passive_delay_jiffies);
else if (tz->polling_delay_jiffies)
thermal_zone_device_set_polling(tz, tz->polling_delay_jiffies);
@@ -547,12 +544,10 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
struct thermal_trip_desc *td;
LIST_HEAD(way_down_list);
LIST_HEAD(way_up_list);
+ int low = -INT_MAX, high = INT_MAX;
int temp, ret;
- if (tz->suspended)
- return;
-
- if (!thermal_zone_device_is_enabled(tz))
+ if (tz->suspended || tz->mode != THERMAL_DEVICE_ENABLED)
return;
ret = __thermal_zone_get_temp(tz, &temp);
@@ -580,10 +575,17 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
tz->notify_event = event;
- for_each_trip_desc(tz, td)
+ for_each_trip_desc(tz, td) {
handle_thermal_trip(tz, td, &way_up_list, &way_down_list);
- thermal_zone_set_trips(tz);
+ if (td->threshold <= tz->temperature && td->threshold > low)
+ low = td->threshold;
+
+ if (td->threshold >= tz->temperature && td->threshold < high)
+ high = td->threshold;
+ }
+
+ thermal_zone_set_trips(tz, low, high);
list_sort(NULL, &way_up_list, thermal_trip_notify_cmp);
list_for_each_entry(td, &way_up_list, notify_list_node)
@@ -647,13 +649,6 @@ int thermal_zone_device_disable(struct thermal_zone_device *tz)
}
EXPORT_SYMBOL_GPL(thermal_zone_device_disable);
-int thermal_zone_device_is_enabled(struct thermal_zone_device *tz)
-{
- lockdep_assert_held(&tz->lock);
-
- return tz->mode == THERMAL_DEVICE_ENABLED;
-}
-
static bool thermal_zone_is_present(struct thermal_zone_device *tz)
{
return !list_empty(&tz->node);
@@ -757,15 +752,7 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id)
* @tz: pointer to struct thermal_zone_device
* @trip: trip point the cooling devices is associated with in this zone.
* @cdev: pointer to struct thermal_cooling_device
- * @upper: the Maximum cooling state for this trip point.
- * THERMAL_NO_LIMIT means no upper limit,
- * and the cooling device can be in max_state.
- * @lower: the Minimum cooling state can be used for this trip point.
- * THERMAL_NO_LIMIT means no lower limit,
- * and the cooling device can be in cooling state 0.
- * @weight: The weight of the cooling device to be bound to the
- * thermal zone. Use THERMAL_WEIGHT_DEFAULT for the
- * default value
+ * @cool_spec: cooling specification for @trip and @cdev
*
* This interface function bind a thermal cooling device to the certain trip
* point of a thermal zone device.
@@ -773,55 +760,41 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id)
*
* Return: 0 on success, the proper error value otherwise.
*/
-int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
+static int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
const struct thermal_trip *trip,
struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower,
- unsigned int weight)
+ struct cooling_spec *cool_spec)
{
struct thermal_instance *dev;
struct thermal_instance *pos;
- struct thermal_zone_device *pos1;
- struct thermal_cooling_device *pos2;
bool upper_no_limit;
int result;
- list_for_each_entry(pos1, &thermal_tz_list, node) {
- if (pos1 == tz)
- break;
- }
- list_for_each_entry(pos2, &thermal_cdev_list, node) {
- if (pos2 == cdev)
- break;
- }
-
- if (tz != pos1 || cdev != pos2)
- return -EINVAL;
-
/* lower default 0, upper default max_state */
- lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
+ if (cool_spec->lower == THERMAL_NO_LIMIT)
+ cool_spec->lower = 0;
- if (upper == THERMAL_NO_LIMIT) {
- upper = cdev->max_state;
+ if (cool_spec->upper == THERMAL_NO_LIMIT) {
+ cool_spec->upper = cdev->max_state;
upper_no_limit = true;
} else {
upper_no_limit = false;
}
- if (lower > upper || upper > cdev->max_state)
+ if (cool_spec->lower > cool_spec->upper || cool_spec->upper > cdev->max_state)
return -EINVAL;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- dev->tz = tz;
+
dev->cdev = cdev;
dev->trip = trip;
- dev->upper = upper;
+ dev->upper = cool_spec->upper;
dev->upper_no_limit = upper_no_limit;
- dev->lower = lower;
+ dev->lower = cool_spec->lower;
dev->target = THERMAL_NO_TARGET;
- dev->weight = weight;
+ dev->weight = cool_spec->weight;
result = ida_alloc(&tz->ida, GFP_KERNEL);
if (result < 0)
@@ -855,10 +828,9 @@ int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
if (result)
goto remove_trip_file;
- mutex_lock(&tz->lock);
mutex_lock(&cdev->lock);
list_for_each_entry(pos, &tz->thermal_instances, tz_node)
- if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
+ if (pos->trip == trip && pos->cdev == cdev) {
result = -EEXIST;
break;
}
@@ -870,7 +842,6 @@ int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
thermal_governor_update_tz(tz, THERMAL_TZ_BIND_CDEV);
}
mutex_unlock(&cdev->lock);
- mutex_unlock(&tz->lock);
if (!result)
return 0;
@@ -886,21 +857,6 @@ free_mem:
kfree(dev);
return result;
}
-EXPORT_SYMBOL_GPL(thermal_bind_cdev_to_trip);
-
-int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
- int trip_index,
- struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower,
- unsigned int weight)
-{
- if (trip_index < 0 || trip_index >= tz->num_trips)
- return -EINVAL;
-
- return thermal_bind_cdev_to_trip(tz, &tz->trips[trip_index].trip, cdev,
- upper, lower, weight);
-}
-EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device);
/**
* thermal_unbind_cdev_from_trip - unbind a cooling device from a thermal zone.
@@ -911,33 +867,28 @@ EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device);
* This interface function unbind a thermal cooling device from the certain
* trip point of a thermal zone device.
* This function is usually called in the thermal zone device .unbind callback.
- *
- * Return: 0 on success, the proper error value otherwise.
*/
-int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
- struct thermal_cooling_device *cdev)
+static void thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev)
{
struct thermal_instance *pos, *next;
- mutex_lock(&tz->lock);
mutex_lock(&cdev->lock);
list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) {
- if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
+ if (pos->trip == trip && pos->cdev == cdev) {
list_del(&pos->tz_node);
list_del(&pos->cdev_node);
thermal_governor_update_tz(tz, THERMAL_TZ_UNBIND_CDEV);
mutex_unlock(&cdev->lock);
- mutex_unlock(&tz->lock);
goto unbind;
}
}
mutex_unlock(&cdev->lock);
- mutex_unlock(&tz->lock);
- return -ENODEV;
+ return;
unbind:
device_remove_file(&tz->device, &pos->weight_attr);
@@ -945,20 +896,7 @@ unbind:
sysfs_remove_link(&tz->device.kobj, pos->name);
ida_free(&tz->ida, pos->id);
kfree(pos);
- return 0;
}
-EXPORT_SYMBOL_GPL(thermal_unbind_cdev_from_trip);
-
-int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
- int trip_index,
- struct thermal_cooling_device *cdev)
-{
- if (trip_index < 0 || trip_index >= tz->num_trips)
- return -EINVAL;
-
- return thermal_unbind_cdev_from_trip(tz, &tz->trips[trip_index].trip, cdev);
-}
-EXPORT_SYMBOL_GPL(thermal_zone_unbind_cooling_device);
static void thermal_release(struct device *dev)
{
@@ -985,24 +923,41 @@ static struct class *thermal_class;
static inline
void print_bind_err_msg(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
struct thermal_cooling_device *cdev, int ret)
{
- dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n",
- tz->type, cdev->type, ret);
+ dev_err(&tz->device, "binding cdev %s to trip %d failed: %d\n",
+ cdev->type, thermal_zone_trip_id(tz, trip), ret);
}
-static void bind_cdev(struct thermal_cooling_device *cdev)
+static void thermal_zone_cdev_bind(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev)
{
- int ret;
- struct thermal_zone_device *pos = NULL;
+ struct thermal_trip_desc *td;
- list_for_each_entry(pos, &thermal_tz_list, node) {
- if (pos->ops.bind) {
- ret = pos->ops.bind(pos, cdev);
- if (ret)
- print_bind_err_msg(pos, cdev, ret);
- }
+ if (!tz->ops.should_bind)
+ return;
+
+ mutex_lock(&tz->lock);
+
+ for_each_trip_desc(tz, td) {
+ struct thermal_trip *trip = &td->trip;
+ struct cooling_spec c = {
+ .upper = THERMAL_NO_LIMIT,
+ .lower = THERMAL_NO_LIMIT,
+ .weight = THERMAL_WEIGHT_DEFAULT
+ };
+ int ret;
+
+ if (!tz->ops.should_bind(tz, trip, cdev, &c))
+ continue;
+
+ ret = thermal_bind_cdev_to_trip(tz, trip, cdev, &c);
+ if (ret)
+ print_bind_err_msg(tz, trip, cdev, ret);
}
+
+ mutex_unlock(&tz->lock);
}
/**
@@ -1100,7 +1055,8 @@ __thermal_cooling_device_register(struct device_node *np,
list_add(&cdev->node, &thermal_cdev_list);
/* Update binding information for 'this' new cdev */
- bind_cdev(cdev);
+ list_for_each_entry(pos, &thermal_tz_list, node)
+ thermal_zone_cdev_bind(pos, cdev);
list_for_each_entry(pos, &thermal_tz_list, node)
if (atomic_cmpxchg(&pos->need_update, 1, 0))
@@ -1301,6 +1257,19 @@ unlock_list:
}
EXPORT_SYMBOL_GPL(thermal_cooling_device_update);
+static void thermal_zone_cdev_unbind(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev)
+{
+ struct thermal_trip_desc *td;
+
+ mutex_lock(&tz->lock);
+
+ for_each_trip_desc(tz, td)
+ thermal_unbind_cdev_from_trip(tz, &td->trip, cdev);
+
+ mutex_unlock(&tz->lock);
+}
+
/**
* thermal_cooling_device_unregister - removes a thermal cooling device
* @cdev: the thermal cooling device to remove.
@@ -1327,10 +1296,8 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
list_del(&cdev->node);
/* Unbind all thermal zones associated with 'this' cdev */
- list_for_each_entry(tz, &thermal_tz_list, node) {
- if (tz->ops.unbind)
- tz->ops.unbind(tz, cdev);
- }
+ list_for_each_entry(tz, &thermal_tz_list, node)
+ thermal_zone_cdev_unbind(tz, cdev);
mutex_unlock(&thermal_list_lock);
@@ -1338,32 +1305,6 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
}
EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
-static void bind_tz(struct thermal_zone_device *tz)
-{
- int ret;
- struct thermal_cooling_device *pos = NULL;
-
- if (!tz->ops.bind)
- return;
-
- mutex_lock(&thermal_list_lock);
-
- list_for_each_entry(pos, &thermal_cdev_list, node) {
- ret = tz->ops.bind(tz, pos);
- if (ret)
- print_bind_err_msg(tz, pos, ret);
- }
-
- mutex_unlock(&thermal_list_lock);
-}
-
-static void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms)
-{
- *delay_jiffies = msecs_to_jiffies(delay_ms);
- if (delay_ms > 1000)
- *delay_jiffies = round_jiffies(*delay_jiffies);
-}
-
int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp)
{
const struct thermal_trip_desc *td;
@@ -1424,6 +1365,7 @@ thermal_zone_device_register_with_trips(const char *type,
unsigned int polling_delay)
{
const struct thermal_trip *trip = trips;
+ struct thermal_cooling_device *cdev;
struct thermal_zone_device *tz;
struct thermal_trip_desc *td;
int id;
@@ -1447,20 +1389,15 @@ thermal_zone_device_register_with_trips(const char *type,
}
if (!ops || !ops->get_temp) {
- pr_err("Thermal zone device ops not defined\n");
+ pr_err("Thermal zone device ops not defined or invalid\n");
return ERR_PTR(-EINVAL);
}
if (num_trips > 0 && !trips)
return ERR_PTR(-EINVAL);
- if (polling_delay) {
- if (passive_delay > polling_delay)
- return ERR_PTR(-EINVAL);
-
- if (!passive_delay)
- passive_delay = polling_delay;
- }
+ if (polling_delay && passive_delay > polling_delay)
+ return ERR_PTR(-EINVAL);
if (!thermal_class)
return ERR_PTR(-ENODEV);
@@ -1509,8 +1446,8 @@ thermal_zone_device_register_with_trips(const char *type,
td->threshold = INT_MAX;
}
- thermal_set_delay_jiffies(&tz->passive_delay_jiffies, passive_delay);
- thermal_set_delay_jiffies(&tz->polling_delay_jiffies, polling_delay);
+ tz->polling_delay_jiffies = msecs_to_jiffies(polling_delay);
+ tz->passive_delay_jiffies = msecs_to_jiffies(passive_delay);
tz->recheck_delay_jiffies = THERMAL_RECHECK_DELAY;
/* sys I/F */
@@ -1554,13 +1491,16 @@ thermal_zone_device_register_with_trips(const char *type,
}
mutex_lock(&thermal_list_lock);
+
mutex_lock(&tz->lock);
list_add_tail(&tz->node, &thermal_tz_list);
mutex_unlock(&tz->lock);
- mutex_unlock(&thermal_list_lock);
/* Bind cooling devices for this zone */
- bind_tz(tz);
+ list_for_each_entry(cdev, &thermal_cdev_list, node)
+ thermal_zone_cdev_bind(tz, cdev);
+
+ mutex_unlock(&thermal_list_lock);
thermal_zone_device_init(tz);
/* Update the new thermal zone and mark it as already updated. */
@@ -1652,8 +1592,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
/* Unbind all cdevs associated with 'this' thermal zone */
list_for_each_entry(cdev, &thermal_cdev_list, node)
- if (tz->ops.unbind)
- tz->ops.unbind(tz, cdev);
+ thermal_zone_cdev_unbind(tz, cdev);
mutex_unlock(&thermal_list_lock);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 4cf2b7230d04..50b858aa173a 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -15,8 +15,20 @@
#include "thermal_netlink.h"
#include "thermal_debugfs.h"
+struct thermal_attr {
+ struct device_attribute attr;
+ char name[THERMAL_NAME_LENGTH];
+};
+
+struct thermal_trip_attrs {
+ struct thermal_attr type;
+ struct thermal_attr temp;
+ struct thermal_attr hyst;
+};
+
struct thermal_trip_desc {
struct thermal_trip trip;
+ struct thermal_trip_attrs trip_attrs;
struct list_head notify_list_node;
int notify_temp;
int threshold;
@@ -56,9 +68,6 @@ struct thermal_governor {
* @device: &struct device for this thermal zone
* @removal: removal completion
* @resume: resume completion
- * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
- * @trip_type_attrs: attributes for trip points for sysfs: trip type
- * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
* @mode: current mode of this thermal zone
* @devdata: private pointer for device private data
* @num_trips: number of trip points the thermal zone supports
@@ -102,9 +111,6 @@ struct thermal_zone_device {
struct completion removal;
struct completion resume;
struct attribute_group trips_attribute_group;
- struct thermal_attr *trip_temp_attrs;
- struct thermal_attr *trip_type_attrs;
- struct thermal_attr *trip_hyst_attrs;
enum thermal_device_mode mode;
void *devdata;
int num_trips;
@@ -188,11 +194,6 @@ int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *),
struct thermal_zone_device *thermal_zone_get_by_id(int id);
-struct thermal_attr {
- struct device_attribute attr;
- char name[THERMAL_NAME_LENGTH];
-};
-
static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
{
return cdev->ops->get_requested_power && cdev->ops->state2power &&
@@ -204,11 +205,6 @@ void __thermal_cdev_update(struct thermal_cooling_device *cdev);
int get_tz_trend(struct thermal_zone_device *tz, const struct thermal_trip *trip);
-struct thermal_instance *
-get_thermal_instance(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev,
- int trip);
-
/*
* This structure is used to describe the behavior of
* a certain cooling device on a certain trip point
@@ -217,7 +213,6 @@ get_thermal_instance(struct thermal_zone_device *tz,
struct thermal_instance {
int id;
char name[THERMAL_NAME_LENGTH];
- struct thermal_zone_device *tz;
struct thermal_cooling_device *cdev;
const struct thermal_trip *trip;
bool initialized;
@@ -259,14 +254,14 @@ void thermal_governor_update_tz(struct thermal_zone_device *tz,
const char *thermal_trip_type_name(enum thermal_trip_type trip_type);
-void thermal_zone_set_trips(struct thermal_zone_device *tz);
+void thermal_zone_set_trips(struct thermal_zone_device *tz, int low, int high);
int thermal_zone_trip_id(const struct thermal_zone_device *tz,
const struct thermal_trip *trip);
-void thermal_zone_trip_updated(struct thermal_zone_device *tz,
- const struct thermal_trip *trip);
int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
void thermal_zone_trip_down(struct thermal_zone_device *tz,
const struct thermal_trip *trip);
+void thermal_zone_set_trip_hyst(struct thermal_zone_device *tz,
+ struct thermal_trip *trip, int hyst);
/* sysfs I/F */
int thermal_zone_create_device_groups(struct thermal_zone_device *tz);
@@ -289,7 +284,4 @@ thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
unsigned long new_state) {}
#endif /* CONFIG_THERMAL_STATISTICS */
-/* device tree support */
-int thermal_zone_device_is_enabled(struct thermal_zone_device *tz);
-
#endif /* __THERMAL_CORE_H__ */
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index aedb8369e2aa..dc374a7a1a65 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -39,18 +39,18 @@ int get_tz_trend(struct thermal_zone_device *tz, const struct thermal_trip *trip
return trend;
}
-static struct thermal_instance *get_instance(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev,
- const struct thermal_trip *trip)
+static bool thermal_instance_present(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev,
+ const struct thermal_trip *trip)
{
struct thermal_instance *ti;
list_for_each_entry(ti, &tz->thermal_instances, tz_node) {
if (ti->trip == trip && ti->cdev == cdev)
- return ti;
+ return true;
}
- return NULL;
+ return false;
}
bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz,
@@ -62,7 +62,7 @@ bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz,
mutex_lock(&tz->lock);
mutex_lock(&cdev->lock);
- ret = !!get_instance(tz, cdev, trip);
+ ret = thermal_instance_present(tz, cdev, trip);
mutex_unlock(&cdev->lock);
mutex_unlock(&tz->lock);
@@ -71,24 +71,6 @@ bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz,
}
EXPORT_SYMBOL_GPL(thermal_trip_is_bound_to_cdev);
-struct thermal_instance *
-get_thermal_instance(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev, int trip_index)
-{
- struct thermal_instance *ti;
-
- mutex_lock(&tz->lock);
- mutex_lock(&cdev->lock);
-
- ti = get_instance(tz, cdev, &tz->trips[trip_index].trip);
-
- mutex_unlock(&cdev->lock);
- mutex_unlock(&tz->lock);
-
- return ti;
-}
-EXPORT_SYMBOL(get_thermal_instance);
-
/**
* __thermal_zone_get_temp() - returns the temperature of a thermal zone
* @tz: a valid pointer to a struct thermal_zone_device
@@ -199,8 +181,6 @@ void __thermal_cdev_update(struct thermal_cooling_device *cdev)
/* Make sure cdev enters the deepest cooling state */
list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
- dev_dbg(&cdev->device, "zone%d->target=%lu\n",
- instance->tz->id, instance->target);
if (instance->target == THERMAL_NO_TARGET)
continue;
if (instance->target > target)
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 1f252692815a..a4caf7899f8e 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -20,37 +20,6 @@
/*** functions parsing device tree nodes ***/
-static int of_find_trip_id(struct device_node *np, struct device_node *trip)
-{
- struct device_node *trips;
- struct device_node *t;
- int i = 0;
-
- trips = of_get_child_by_name(np, "trips");
- if (!trips) {
- pr_err("Failed to find 'trips' node\n");
- return -EINVAL;
- }
-
- /*
- * Find the trip id point associated with the cooling device map
- */
- for_each_child_of_node(trips, t) {
-
- if (t == trip) {
- of_node_put(t);
- goto out;
- }
- i++;
- }
-
- i = -ENXIO;
-out:
- of_node_put(trips);
-
- return i;
-}
-
/*
* It maps 'enum thermal_trip_type' found in include/linux/thermal.h
* into the device tree binding of 'trip', property type.
@@ -119,6 +88,8 @@ static int thermal_of_populate_trip(struct device_node *np,
trip->flags = THERMAL_TRIP_FLAG_RW_TEMP;
+ trip->priv = np;
+
return 0;
}
@@ -291,39 +262,9 @@ static struct device_node *thermal_of_zone_get_by_name(struct thermal_zone_devic
return tz_np;
}
-static int __thermal_of_unbind(struct device_node *map_np, int index, int trip_id,
- struct thermal_zone_device *tz, struct thermal_cooling_device *cdev)
-{
- struct of_phandle_args cooling_spec;
- int ret;
-
- ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells",
- index, &cooling_spec);
-
- if (ret < 0) {
- pr_err("Invalid cooling-device entry\n");
- return ret;
- }
-
- of_node_put(cooling_spec.np);
-
- if (cooling_spec.args_count < 2) {
- pr_err("wrong reference to cooling device, missing limits\n");
- return -EINVAL;
- }
-
- if (cooling_spec.np != cdev->np)
- return 0;
-
- ret = thermal_zone_unbind_cooling_device(tz, trip_id, cdev);
- if (ret)
- pr_err("Failed to unbind '%s' with '%s': %d\n", tz->type, cdev->type, ret);
-
- return ret;
-}
-
-static int __thermal_of_bind(struct device_node *map_np, int index, int trip_id,
- struct thermal_zone_device *tz, struct thermal_cooling_device *cdev)
+static bool thermal_of_get_cooling_spec(struct device_node *map_np, int index,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
struct of_phandle_args cooling_spec;
int ret, weight = THERMAL_WEIGHT_DEFAULT;
@@ -335,104 +276,73 @@ static int __thermal_of_bind(struct device_node *map_np, int index, int trip_id,
if (ret < 0) {
pr_err("Invalid cooling-device entry\n");
- return ret;
+ return false;
}
of_node_put(cooling_spec.np);
if (cooling_spec.args_count < 2) {
pr_err("wrong reference to cooling device, missing limits\n");
- return -EINVAL;
+ return false;
}
if (cooling_spec.np != cdev->np)
- return 0;
+ return false;
- ret = thermal_zone_bind_cooling_device(tz, trip_id, cdev, cooling_spec.args[1],
- cooling_spec.args[0],
- weight);
- if (ret)
- pr_err("Failed to bind '%s' with '%s': %d\n", tz->type, cdev->type, ret);
+ c->lower = cooling_spec.args[0];
+ c->upper = cooling_spec.args[1];
+ c->weight = weight;
- return ret;
+ return true;
}
-static int thermal_of_for_each_cooling_device(struct device_node *tz_np, struct device_node *map_np,
- struct thermal_zone_device *tz, struct thermal_cooling_device *cdev,
- int (*action)(struct device_node *, int, int,
- struct thermal_zone_device *, struct thermal_cooling_device *))
-{
- struct device_node *tr_np;
- int count, i, trip_id;
-
- tr_np = of_parse_phandle(map_np, "trip", 0);
- if (!tr_np)
- return -ENODEV;
-
- trip_id = of_find_trip_id(tz_np, tr_np);
- if (trip_id < 0)
- return trip_id;
-
- count = of_count_phandle_with_args(map_np, "cooling-device", "#cooling-cells");
- if (count <= 0) {
- pr_err("Add a cooling_device property with at least one device\n");
- return -ENOENT;
- }
-
- /*
- * At this point, we don't want to bail out when there is an
- * error, we will try to bind/unbind as many as possible
- * cooling devices
- */
- for (i = 0; i < count; i++)
- action(map_np, i, trip_id, tz, cdev);
-
- return 0;
-}
-
-static int thermal_of_for_each_cooling_maps(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev,
- int (*action)(struct device_node *, int, int,
- struct thermal_zone_device *, struct thermal_cooling_device *))
+static bool thermal_of_should_bind(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
{
struct device_node *tz_np, *cm_np, *child;
- int ret = 0;
+ bool result = false;
tz_np = thermal_of_zone_get_by_name(tz);
if (IS_ERR(tz_np)) {
pr_err("Failed to get node tz by name\n");
- return PTR_ERR(tz_np);
+ return false;
}
cm_np = of_get_child_by_name(tz_np, "cooling-maps");
if (!cm_np)
goto out;
+ /* Look up the trip and the cdev in the cooling maps. */
for_each_child_of_node(cm_np, child) {
- ret = thermal_of_for_each_cooling_device(tz_np, child, tz, cdev, action);
- if (ret) {
- of_node_put(child);
- break;
+ struct device_node *tr_np;
+ int count, i;
+
+ tr_np = of_parse_phandle(child, "trip", 0);
+ if (tr_np != trip->priv)
+ continue;
+
+ /* The trip has been found, look up the cdev. */
+ count = of_count_phandle_with_args(child, "cooling-device", "#cooling-cells");
+ if (count <= 0)
+ pr_err("Add a cooling_device property with at least one device\n");
+
+ for (i = 0; i < count; i++) {
+ result = thermal_of_get_cooling_spec(child, i, cdev, c);
+ if (result)
+ break;
}
+
+ of_node_put(child);
+ break;
}
of_node_put(cm_np);
out:
of_node_put(tz_np);
- return ret;
-}
-
-static int thermal_of_bind(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev)
-{
- return thermal_of_for_each_cooling_maps(tz, cdev, __thermal_of_bind);
-}
-
-static int thermal_of_unbind(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev)
-{
- return thermal_of_for_each_cooling_maps(tz, cdev, __thermal_of_unbind);
+ return result;
}
/**
@@ -504,8 +414,7 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node *
thermal_of_parameters_init(np, &tzp);
- of_ops.bind = thermal_of_bind;
- of_ops.unbind = thermal_of_unbind;
+ of_ops.should_bind = thermal_of_should_bind;
ret = of_property_read_string(np, "critical-action", &action);
if (!ret)
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 72b302bf914e..1838aa729bb5 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/container_of.h>
#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -52,7 +53,7 @@ mode_show(struct device *dev, struct device_attribute *attr, char *buf)
int enabled;
mutex_lock(&tz->lock);
- enabled = thermal_zone_device_is_enabled(tz);
+ enabled = tz->mode == THERMAL_DEVICE_ENABLED;
mutex_unlock(&tz->lock);
return sprintf(buf, "%s\n", enabled ? "enabled" : "disabled");
@@ -78,51 +79,58 @@ mode_store(struct device *dev, struct device_attribute *attr,
return count;
}
+#define thermal_trip_of_attr(_ptr_, _attr_) \
+ ({ \
+ struct thermal_trip_desc *td; \
+ \
+ td = container_of(_ptr_, struct thermal_trip_desc, \
+ trip_attrs._attr_.attr); \
+ &td->trip; \
+ })
+
static ssize_t
trip_point_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- int trip_id;
+ struct thermal_trip *trip = thermal_trip_of_attr(attr, type);
- if (sscanf(attr->attr.name, "trip_point_%d_type", &trip_id) != 1)
- return -EINVAL;
-
- return sprintf(buf, "%s\n", thermal_trip_type_name(tz->trips[trip_id].trip.type));
+ return sprintf(buf, "%s\n", thermal_trip_type_name(trip->type));
}
static ssize_t
trip_point_temp_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct thermal_trip *trip = thermal_trip_of_attr(attr, temp);
struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_trip *trip;
- int trip_id, ret;
- int temp;
+ int ret, temp;
ret = kstrtoint(buf, 10, &temp);
if (ret)
return -EINVAL;
- if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1)
- return -EINVAL;
-
mutex_lock(&tz->lock);
- trip = &tz->trips[trip_id].trip;
-
- if (temp != trip->temperature) {
- if (tz->ops.set_trip_temp) {
- ret = tz->ops.set_trip_temp(tz, trip, temp);
- if (ret)
- goto unlock;
- }
+ if (temp == trip->temperature)
+ goto unlock;
- thermal_zone_set_trip_temp(tz, trip, temp);
+ /* Arrange the condition to avoid integer overflows. */
+ if (temp != THERMAL_TEMP_INVALID &&
+ temp <= trip->hysteresis + THERMAL_TEMP_INVALID) {
+ ret = -EINVAL;
+ goto unlock;
+ }
- __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
+ if (tz->ops.set_trip_temp) {
+ ret = tz->ops.set_trip_temp(tz, trip, temp);
+ if (ret)
+ goto unlock;
}
+ thermal_zone_set_trip_temp(tz, trip, temp);
+
+ __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
+
unlock:
mutex_unlock(&tz->lock);
@@ -133,57 +141,61 @@ static ssize_t
trip_point_temp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- int trip_id;
-
- if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1)
- return -EINVAL;
+ struct thermal_trip *trip = thermal_trip_of_attr(attr, temp);
- return sprintf(buf, "%d\n", READ_ONCE(tz->trips[trip_id].trip.temperature));
+ return sprintf(buf, "%d\n", READ_ONCE(trip->temperature));
}
static ssize_t
trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct thermal_trip *trip = thermal_trip_of_attr(attr, hyst);
struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_trip *trip;
- int trip_id, ret;
- int hyst;
+ int ret, hyst;
ret = kstrtoint(buf, 10, &hyst);
if (ret || hyst < 0)
return -EINVAL;
- if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
- return -EINVAL;
-
mutex_lock(&tz->lock);
- trip = &tz->trips[trip_id].trip;
+ if (hyst == trip->hysteresis)
+ goto unlock;
- if (hyst != trip->hysteresis) {
+ /*
+ * Allow the hysteresis to be updated when the temperature is invalid
+ * to allow user space to avoid having to adjust hysteresis after a
+ * valid temperature has been set, but in that case just change the
+ * value and do nothing else.
+ */
+ if (trip->temperature == THERMAL_TEMP_INVALID) {
WRITE_ONCE(trip->hysteresis, hyst);
+ goto unlock;
+ }
- thermal_zone_trip_updated(tz, trip);
+ if (trip->temperature - hyst <= THERMAL_TEMP_INVALID) {
+ ret = -EINVAL;
+ goto unlock;
}
+ thermal_zone_set_trip_hyst(tz, trip, hyst);
+
+ __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
+
+unlock:
mutex_unlock(&tz->lock);
- return count;
+ return ret ? ret : count;
}
static ssize_t
trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct thermal_zone_device *tz = to_thermal_zone(dev);
- int trip_id;
+ struct thermal_trip *trip = thermal_trip_of_attr(attr, hyst);
- if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
- return -EINVAL;
-
- return sprintf(buf, "%d\n", READ_ONCE(tz->trips[trip_id].trip.hysteresis));
+ return sprintf(buf, "%d\n", READ_ONCE(trip->hysteresis));
}
static ssize_t
@@ -382,87 +394,55 @@ static const struct attribute_group *thermal_zone_attribute_groups[] = {
*/
static int create_trip_attrs(struct thermal_zone_device *tz)
{
- const struct thermal_trip_desc *td;
+ struct thermal_trip_desc *td;
struct attribute **attrs;
-
- /* This function works only for zones with at least one trip */
- if (tz->num_trips <= 0)
- return -EINVAL;
-
- tz->trip_type_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_type_attrs),
- GFP_KERNEL);
- if (!tz->trip_type_attrs)
- return -ENOMEM;
-
- tz->trip_temp_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_temp_attrs),
- GFP_KERNEL);
- if (!tz->trip_temp_attrs) {
- kfree(tz->trip_type_attrs);
- return -ENOMEM;
- }
-
- tz->trip_hyst_attrs = kcalloc(tz->num_trips,
- sizeof(*tz->trip_hyst_attrs),
- GFP_KERNEL);
- if (!tz->trip_hyst_attrs) {
- kfree(tz->trip_type_attrs);
- kfree(tz->trip_temp_attrs);
- return -ENOMEM;
- }
+ int i;
attrs = kcalloc(tz->num_trips * 3 + 1, sizeof(*attrs), GFP_KERNEL);
- if (!attrs) {
- kfree(tz->trip_type_attrs);
- kfree(tz->trip_temp_attrs);
- kfree(tz->trip_hyst_attrs);
+ if (!attrs)
return -ENOMEM;
- }
+ i = 0;
for_each_trip_desc(tz, td) {
- int indx = thermal_zone_trip_id(tz, &td->trip);
+ struct thermal_trip_attrs *trip_attrs = &td->trip_attrs;
/* create trip type attribute */
- snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH,
- "trip_point_%d_type", indx);
+ snprintf(trip_attrs->type.name, THERMAL_NAME_LENGTH,
+ "trip_point_%d_type", i);
- sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr);
- tz->trip_type_attrs[indx].attr.attr.name =
- tz->trip_type_attrs[indx].name;
- tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO;
- tz->trip_type_attrs[indx].attr.show = trip_point_type_show;
- attrs[indx] = &tz->trip_type_attrs[indx].attr.attr;
+ sysfs_attr_init(&trip_attrs->type.attr.attr);
+ trip_attrs->type.attr.attr.name = trip_attrs->type.name;
+ trip_attrs->type.attr.attr.mode = S_IRUGO;
+ trip_attrs->type.attr.show = trip_point_type_show;
+ attrs[i] = &trip_attrs->type.attr.attr;
/* create trip temp attribute */
- snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH,
- "trip_point_%d_temp", indx);
-
- sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr);
- tz->trip_temp_attrs[indx].attr.attr.name =
- tz->trip_temp_attrs[indx].name;
- tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO;
- tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show;
+ snprintf(trip_attrs->temp.name, THERMAL_NAME_LENGTH,
+ "trip_point_%d_temp", i);
+
+ sysfs_attr_init(&trip_attrs->temp.attr.attr);
+ trip_attrs->temp.attr.attr.name = trip_attrs->temp.name;
+ trip_attrs->temp.attr.attr.mode = S_IRUGO;
+ trip_attrs->temp.attr.show = trip_point_temp_show;
if (td->trip.flags & THERMAL_TRIP_FLAG_RW_TEMP) {
- tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR;
- tz->trip_temp_attrs[indx].attr.store =
- trip_point_temp_store;
+ trip_attrs->temp.attr.attr.mode |= S_IWUSR;
+ trip_attrs->temp.attr.store = trip_point_temp_store;
}
- attrs[indx + tz->num_trips] = &tz->trip_temp_attrs[indx].attr.attr;
+ attrs[i + tz->num_trips] = &trip_attrs->temp.attr.attr;
- snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH,
- "trip_point_%d_hyst", indx);
+ snprintf(trip_attrs->hyst.name, THERMAL_NAME_LENGTH,
+ "trip_point_%d_hyst", i);
- sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr);
- tz->trip_hyst_attrs[indx].attr.attr.name =
- tz->trip_hyst_attrs[indx].name;
- tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO;
- tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show;
+ sysfs_attr_init(&trip_attrs->hyst.attr.attr);
+ trip_attrs->hyst.attr.attr.name = trip_attrs->hyst.name;
+ trip_attrs->hyst.attr.attr.mode = S_IRUGO;
+ trip_attrs->hyst.attr.show = trip_point_hyst_show;
if (td->trip.flags & THERMAL_TRIP_FLAG_RW_HYST) {
- tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR;
- tz->trip_hyst_attrs[indx].attr.store =
- trip_point_hyst_store;
+ trip_attrs->hyst.attr.attr.mode |= S_IWUSR;
+ trip_attrs->hyst.attr.store = trip_point_hyst_store;
}
- attrs[indx + tz->num_trips * 2] =
- &tz->trip_hyst_attrs[indx].attr.attr;
+ attrs[i + 2 * tz->num_trips] = &trip_attrs->hyst.attr.attr;
+ i++;
}
attrs[tz->num_trips * 3] = NULL;
@@ -479,13 +459,8 @@ static int create_trip_attrs(struct thermal_zone_device *tz)
*/
static void destroy_trip_attrs(struct thermal_zone_device *tz)
{
- if (!tz)
- return;
-
- kfree(tz->trip_type_attrs);
- kfree(tz->trip_temp_attrs);
- kfree(tz->trip_hyst_attrs);
- kfree(tz->trips_attribute_group.attrs);
+ if (tz)
+ kfree(tz->trips_attribute_group.attrs);
}
int thermal_zone_create_device_groups(struct thermal_zone_device *tz)
@@ -887,13 +862,12 @@ void thermal_cooling_device_stats_reinit(struct thermal_cooling_device *cdev)
ssize_t
trip_point_show(struct device *dev, struct device_attribute *attr, char *buf)
{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
struct thermal_instance *instance;
- instance =
- container_of(attr, struct thermal_instance, attr);
+ instance = container_of(attr, struct thermal_instance, attr);
- return sprintf(buf, "%d\n",
- thermal_zone_trip_id(instance->tz, instance->trip));
+ return sprintf(buf, "%d\n", thermal_zone_trip_id(tz, instance->trip));
}
ssize_t
@@ -909,6 +883,7 @@ weight_show(struct device *dev, struct device_attribute *attr, char *buf)
ssize_t weight_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
struct thermal_instance *instance;
int ret, weight;
@@ -919,14 +894,13 @@ ssize_t weight_store(struct device *dev, struct device_attribute *attr,
instance = container_of(attr, struct thermal_instance, weight_attr);
/* Don't race with governors using the 'weight' value */
- mutex_lock(&instance->tz->lock);
+ mutex_lock(&tz->lock);
instance->weight = weight;
- thermal_governor_update_tz(instance->tz,
- THERMAL_INSTANCE_WEIGHT_CHANGED);
+ thermal_governor_update_tz(tz, THERMAL_INSTANCE_WEIGHT_CHANGED);
- mutex_unlock(&instance->tz->lock);
+ mutex_unlock(&tz->lock);
return count;
}
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index 06a0554ddc38..b53fac333ec5 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -55,31 +55,8 @@ int thermal_zone_for_each_trip(struct thermal_zone_device *tz,
}
EXPORT_SYMBOL_GPL(thermal_zone_for_each_trip);
-int thermal_zone_get_num_trips(struct thermal_zone_device *tz)
+void thermal_zone_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- return tz->num_trips;
-}
-EXPORT_SYMBOL_GPL(thermal_zone_get_num_trips);
-
-/**
- * thermal_zone_set_trips - Computes the next trip points for the driver
- * @tz: a pointer to a thermal zone device structure
- *
- * The function computes the next temperature boundaries by browsing
- * the trip points. The result is the closer low and high trip points
- * to the current temperature. These values are passed to the backend
- * driver to let it set its own notification mechanism (usually an
- * interrupt).
- *
- * This function must be called with tz->lock held. Both tz and tz->ops
- * must be valid pointers.
- *
- * It does not return a value
- */
-void thermal_zone_set_trips(struct thermal_zone_device *tz)
-{
- const struct thermal_trip_desc *td;
- int low = -INT_MAX, high = INT_MAX;
int ret;
lockdep_assert_held(&tz->lock);
@@ -87,14 +64,6 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz)
if (!tz->ops.set_trips)
return;
- for_each_trip_desc(tz, td) {
- if (td->threshold <= tz->temperature && td->threshold > low)
- low = td->threshold;
-
- if (td->threshold >= tz->temperature && td->threshold < high)
- high = td->threshold;
- }
-
/* No need to change trip points */
if (tz->prev_low_trip == low && tz->prev_high_trip == high)
return;
@@ -114,20 +83,6 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz)
dev_err(&tz->device, "Failed to set trips: %d\n", ret);
}
-int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
- struct thermal_trip *trip)
-{
- if (!tz || !trip || trip_id < 0 || trip_id >= tz->num_trips)
- return -EINVAL;
-
- mutex_lock(&tz->lock);
- *trip = tz->trips[trip_id].trip;
- mutex_unlock(&tz->lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(thermal_zone_get_trip);
-
int thermal_zone_trip_id(const struct thermal_zone_device *tz,
const struct thermal_trip *trip)
{
@@ -138,11 +93,11 @@ int thermal_zone_trip_id(const struct thermal_zone_device *tz,
return trip_to_trip_desc(trip) - tz->trips;
}
-void thermal_zone_trip_updated(struct thermal_zone_device *tz,
- const struct thermal_trip *trip)
+void thermal_zone_set_trip_hyst(struct thermal_zone_device *tz,
+ struct thermal_trip *trip, int hyst)
{
+ WRITE_ONCE(trip->hysteresis, hyst);
thermal_notify_tz_trip_change(tz, trip);
- __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
}
void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.h b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
index 1f4bbaf31675..46263c1da8b6 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
@@ -336,10 +336,6 @@ struct ti_bandgap_data {
struct ti_temp_sensor sensors[];
};
-int ti_bandgap_read_thot(struct ti_bandgap *bgp, int id, int *thot);
-int ti_bandgap_write_thot(struct ti_bandgap *bgp, int id, int val);
-int ti_bandgap_read_tcold(struct ti_bandgap *bgp, int id, int *tcold);
-int ti_bandgap_write_tcold(struct ti_bandgap *bgp, int id, int val);
int ti_bandgap_read_update_interval(struct ti_bandgap *bgp, int id,
int *interval);
int ti_bandgap_write_update_interval(struct ti_bandgap *bgp, int id,
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 29e4b83e0376..5f9f06911795 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -423,11 +423,11 @@ static int univ8250_console_setup(struct console *co, char *options)
port = &serial8250_ports[co->index].port;
/* link port to console */
- port->cons = co;
+ uart_port_set_cons(port, co);
retval = serial8250_console_setup(port, options, false);
if (retval != 0)
- port->cons = NULL;
+ uart_port_set_cons(port, NULL);
return retval;
}
@@ -485,7 +485,7 @@ static int univ8250_console_match(struct console *co, char *name, int idx,
continue;
co->index = i;
- port->cons = co;
+ uart_port_set_cons(port, co);
return serial8250_console_setup(port, options, true);
}
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8b1644f5411e..7d0134ecd82f 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2480,7 +2480,7 @@ static int pl011_console_match(struct console *co, char *name, int idx,
continue;
co->index = i;
- port->cons = co;
+ uart_port_set_cons(port, co);
return pl011_console_setup(co, options);
}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 5bea3af46abc..1e3e28e364df 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -3176,8 +3176,15 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
state->uart_port = uport;
uport->state = state;
+ /*
+ * If this port is in use as a console then the spinlock is already
+ * initialised.
+ */
+ if (!uart_console_registered(uport))
+ uart_port_spin_lock_init(uport);
+
state->pm_state = UART_PM_STATE_UNDEFINED;
- uport->cons = drv->cons;
+ uart_port_set_cons(uport, drv->cons);
uport->minor = drv->tty_driver->minor_start + uport->line;
uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name,
drv->tty_driver->name_base + uport->line);
@@ -3186,13 +3193,6 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
goto out;
}
- /*
- * If this port is in use as a console then the spinlock is already
- * initialised.
- */
- if (!uart_console_registered(uport))
- uart_port_spin_lock_init(uport);
-
if (uport->cons && uport->dev)
of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 14f8f00fdcf9..930b04e3d148 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -531,6 +531,7 @@ static const struct sysrq_key_op *sysrq_key_table[62] = {
NULL, /* P */
NULL, /* Q */
&sysrq_replay_logs_op, /* R */
+ /* S: May be registered by sched_ext for resetting */
NULL, /* S */
NULL, /* T */
NULL, /* U */
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 407b0d87b7c1..abc2708d4ac5 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2225,6 +2225,12 @@ static int __tty_fasync(int fd, struct file *filp, int on)
if (tty_paranoia_check(tty, file_inode(filp), "tty_fasync"))
goto out;
+ if (on) {
+ retval = file_f_owner_allocate(filp);
+ if (retval)
+ goto out;
+ }
+
retval = fasync_helper(fd, filp, on, &tty->fasync);
if (retval <= 0)
goto out;
@@ -3567,7 +3573,7 @@ static ssize_t show_cons_active(struct device *dev,
for_each_console(c) {
if (!c->device)
continue;
- if (!c->write)
+ if (!(c->flags & CON_NBCON) && !c->write)
continue;
if ((c->flags & CON_ENABLED) == 0)
continue;
diff --git a/drivers/ufs/core/ufs-fault-injection.c b/drivers/ufs/core/ufs-fault-injection.c
index 169540417079..55db38e75cc4 100644
--- a/drivers/ufs/core/ufs-fault-injection.c
+++ b/drivers/ufs/core/ufs-fault-injection.c
@@ -3,6 +3,7 @@
#include <linux/kconfig.h>
#include <linux/types.h>
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <ufs/ufshcd.h>
#include "ufs-fault-injection.h"
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index e80a32421a8c..fe313800aed0 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -198,6 +198,24 @@ static u32 ufshcd_us_to_ahit(unsigned int timer)
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
}
+static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg)
+{
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ ufshcd_hold(hba);
+ *val = ufshcd_readl(hba, reg);
+ ufshcd_release(hba);
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+ return 0;
+}
+
static ssize_t auto_hibern8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -208,23 +226,11 @@ static ssize_t auto_hibern8_show(struct device *dev,
if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
- down(&hba->host_sem);
- if (!ufshcd_is_user_access_allowed(hba)) {
- ret = -EBUSY;
- goto out;
- }
-
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba);
- ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
- ufshcd_release(hba);
- pm_runtime_put_sync(hba->dev);
-
- ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
+ ret = ufshcd_read_hci_reg(hba, &ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ if (ret)
+ return ret;
-out:
- up(&hba->host_sem);
- return ret;
+ return sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
}
static ssize_t auto_hibern8_store(struct device *dev,
@@ -519,6 +525,58 @@ static const struct attribute_group ufs_sysfs_capabilities_group = {
.attrs = ufs_sysfs_capabilities_attrs,
};
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "0x%x\n", hba->ufs_version);
+}
+
+static ssize_t product_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ u32 val;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_PID);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", val);
+}
+
+static ssize_t man_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ u32 val;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_MID);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", val);
+}
+
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_RO(product_id);
+static DEVICE_ATTR_RO(man_id);
+
+static struct attribute *ufs_sysfs_ufshci_cap_attrs[] = {
+ &dev_attr_version.attr,
+ &dev_attr_product_id.attr,
+ &dev_attr_man_id.attr,
+ NULL
+};
+
+static const struct attribute_group ufs_sysfs_ufshci_group = {
+ .name = "ufshci_capabilities",
+ .attrs = ufs_sysfs_ufshci_cap_attrs,
+};
+
static ssize_t monitor_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1502,6 +1560,7 @@ static const struct attribute_group ufs_sysfs_attributes_group = {
static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
&ufs_sysfs_capabilities_group,
+ &ufs_sysfs_ufshci_group,
&ufs_sysfs_monitor_group,
&ufs_sysfs_power_info_group,
&ufs_sysfs_device_descriptor_group,
diff --git a/include/trace/events/ufs.h b/drivers/ufs/core/ufs_trace.h
index c4e209fbdfbb..84deca2b841d 100644
--- a/include/trace/events/ufs.h
+++ b/drivers/ufs/core/ufs_trace.h
@@ -9,6 +9,7 @@
#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_UFS_H
+#include <ufs/ufs.h>
#include <linux/tracepoint.h>
#define str_opcode(opcode) \
@@ -395,5 +396,10 @@ TRACE_EVENT(ufshcd_exception_event,
#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/ufs/core
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE ufs_trace
+
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index a6f818cdef0e..24a32e2fd75e 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -39,7 +39,7 @@
#include <asm/unaligned.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/ufs.h>
+#include "ufs_trace.h"
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
@@ -51,8 +51,10 @@
/* UIC command timeout, unit: ms */
-#define UIC_CMD_TIMEOUT 500
-
+enum {
+ UIC_CMD_TIMEOUT_DEFAULT = 500,
+ UIC_CMD_TIMEOUT_MAX = 2000,
+};
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
/* Timeout after 50 msecs if NOP OUT hangs without response */
@@ -116,6 +118,23 @@ static bool is_mcq_supported(struct ufs_hba *hba)
module_param(use_mcq_mode, bool, 0644);
MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
+static unsigned int uic_cmd_timeout = UIC_CMD_TIMEOUT_DEFAULT;
+
+static int uic_cmd_timeout_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UIC_CMD_TIMEOUT_DEFAULT,
+ UIC_CMD_TIMEOUT_MAX);
+}
+
+static const struct kernel_param_ops uic_cmd_timeout_ops = {
+ .set = uic_cmd_timeout_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
+MODULE_PARM_DESC(uic_cmd_timeout,
+ "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively");
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -1785,8 +1804,6 @@ static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
{
- char wq_name[sizeof("ufs_clkscaling_00")];
-
if (!ufshcd_is_clkscaling_supported(hba))
return;
@@ -1798,9 +1815,8 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
INIT_WORK(&hba->clk_scaling.resume_work,
ufshcd_clk_scaling_resume_work);
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
- hba->host->host_no);
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+ hba->clk_scaling.workq = alloc_ordered_workqueue(
+ "ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
hba->clk_scaling.is_initialized = true;
}
@@ -2124,8 +2140,6 @@ static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
- char wq_name[sizeof("ufs_clk_gating_00")];
-
if (!ufshcd_is_clkgating_allowed(hba))
return;
@@ -2135,10 +2149,9 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
- hba->host->host_no);
- hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
- WQ_MEM_RECLAIM | WQ_HIGHPRI);
+ hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
+ "ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
+ hba->host->host_no);
ufshcd_init_clk_gating_sysfs(hba);
@@ -2452,7 +2465,7 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
{
u32 val;
int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
- 500, UIC_CMD_TIMEOUT * 1000, false, hba,
+ 500, uic_cmd_timeout * 1000, false, hba,
REG_CONTROLLER_STATUS);
return ret == 0;
}
@@ -2512,7 +2525,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
lockdep_assert_held(&hba->uic_cmd_mutex);
if (wait_for_completion_timeout(&uic_cmd->done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ msecs_to_jiffies(uic_cmd_timeout))) {
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
} else {
ret = -ETIMEDOUT;
@@ -4285,7 +4298,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
}
if (!wait_for_completion_timeout(hba->uic_async_done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ msecs_to_jiffies(uic_cmd_timeout))) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
cmd->command, cmd->argument3);
@@ -5876,12 +5889,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
/**
* ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
* @hba: per-adapter instance
- * @status: bkops_status value
*
* Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
* flag in the device to permit background operations if the device
- * bkops_status is greater than or equal to "status" argument passed to
- * this function, disable otherwise.
+ * bkops_status is greater than or equal to the "hba->urgent_bkops_lvl",
+ * disable otherwise.
*
* Return: 0 for success, non-zero in case of failure.
*
@@ -5889,11 +5901,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
* to know whether auto bkops is enabled or disabled after this function
* returns control to it.
*/
-static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
- enum bkops_status status)
+static int ufshcd_bkops_ctrl(struct ufs_hba *hba)
{
- int err;
+ enum bkops_status status = hba->urgent_bkops_lvl;
u32 curr_status = 0;
+ int err;
err = ufshcd_get_bkops_status(hba, &curr_status);
if (err) {
@@ -5915,23 +5927,6 @@ out:
return err;
}
-/**
- * ufshcd_urgent_bkops - handle urgent bkops exception event
- * @hba: per-adapter instance
- *
- * Enable fBackgroundOpsEn flag in the device to permit background
- * operations.
- *
- * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
- * and negative error value for any other failure.
- *
- * Return: 0 upon success; < 0 upon failure.
- */
-static int ufshcd_urgent_bkops(struct ufs_hba *hba)
-{
- return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
-}
-
static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
{
return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -9692,7 +9687,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* allow background operations if bkops status shows
* that performance might be impacted.
*/
- ret = ufshcd_urgent_bkops(hba);
+ ret = ufshcd_bkops_ctrl(hba);
if (ret) {
/*
* If return err in suspend flow, IO will hang.
@@ -9881,7 +9876,7 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* If BKOPs operations are urgently needed at this moment then
* keep auto-bkops enabled or else disable it.
*/
- ufshcd_urgent_bkops(hba);
+ ufshcd_bkops_ctrl(hba);
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
@@ -10395,7 +10390,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
int err;
struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev;
- char eh_wq_name[sizeof("ufs_eh_wq_00")];
/*
* dev_set_drvdata() must be called before any callbacks are registered
@@ -10462,9 +10456,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
/* Initialize work queues */
- snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
- hba->host->host_no);
- hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
+ hba->eh_wq = alloc_ordered_workqueue("ufs_eh_wq_%d", WQ_MEM_RECLAIM,
+ hba->host->host_no);
if (!hba->eh_wq) {
dev_err(hba->dev, "%s: failed to create eh workqueue\n",
__func__);
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index a3e69ecafd27..1f4f30d6cb42 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -31,8 +31,7 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
const char *name;
u32 *clkfreq = NULL;
struct ufs_clk_info *clki;
- int len = 0;
- size_t sz = 0;
+ ssize_t sz = 0;
if (!np)
goto out;
@@ -50,15 +49,12 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
if (cnt <= 0)
goto out;
- if (!of_get_property(np, "freq-table-hz", &len)) {
+ sz = of_property_count_u32_elems(np, "freq-table-hz");
+ if (sz <= 0) {
dev_info(dev, "freq-table-hz property not specified\n");
goto out;
}
- if (len <= 0)
- goto out;
-
- sz = len / sizeof(*clkfreq);
if (sz != 2 * cnt) {
dev_err(dev, "%s len mismatch\n", "freq-table-hz");
ret = -EINVAL;
@@ -272,10 +268,10 @@ static int ufshcd_parse_operating_points(struct ufs_hba *hba)
const char **clk_names;
int cnt, i, ret;
- if (!of_find_property(np, "operating-points-v2", NULL))
+ if (!of_property_present(np, "operating-points-v2"))
return 0;
- if (of_find_property(np, "freq-table-hz", NULL)) {
+ if (of_property_present(np, "freq-table-hz")) {
dev_err(dev, "%s: operating-points and freq-table-hz are incompatible\n",
__func__);
return -EINVAL;
diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c
index 06d88e9caeaf..d1e7c487ddfb 100644
--- a/drivers/usb/typec/anx7411.c
+++ b/drivers/usb/typec/anx7411.c
@@ -1340,12 +1340,6 @@ static void anx7411_get_gpio_irq(struct anx7411_data *ctx)
dev_err(dev, "failed to get GPIO IRQ\n");
}
-static enum power_supply_usb_type anx7411_psy_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_C,
- POWER_SUPPLY_USB_TYPE_PD,
- POWER_SUPPLY_USB_TYPE_PD_PPS,
-};
-
static enum power_supply_property anx7411_psy_props[] = {
POWER_SUPPLY_PROP_USB_TYPE,
POWER_SUPPLY_PROP_ONLINE,
@@ -1423,8 +1417,9 @@ static int anx7411_psy_register(struct anx7411_data *ctx)
psy_desc->name = psy_name;
psy_desc->type = POWER_SUPPLY_TYPE_USB;
- psy_desc->usb_types = anx7411_psy_usb_types;
- psy_desc->num_usb_types = ARRAY_SIZE(anx7411_psy_usb_types);
+ psy_desc->usb_types = BIT(POWER_SUPPLY_USB_TYPE_C) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD_PPS);
psy_desc->properties = anx7411_psy_props;
psy_desc->num_properties = ARRAY_SIZE(anx7411_psy_props);
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 56989a0d0f43..46b4c5c3a6be 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -8,6 +8,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
+#include <linux/platform_data/x86/intel_scu_ipc.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/usb/pd.h>
@@ -18,8 +19,6 @@
#include <linux/debugfs.h>
#include <linux/usb.h>
-#include <asm/intel_scu_ipc.h>
-
#define PMC_USBC_CMD 0xa7
/* Response status bits */
diff --git a/drivers/usb/typec/rt1719.c b/drivers/usb/typec/rt1719.c
index be02d420920e..0b0c23a0b014 100644
--- a/drivers/usb/typec/rt1719.c
+++ b/drivers/usb/typec/rt1719.c
@@ -109,12 +109,6 @@ struct rt1719_data {
u16 conn_stat;
};
-static const enum power_supply_usb_type rt1719_psy_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_C,
- POWER_SUPPLY_USB_TYPE_PD,
- POWER_SUPPLY_USB_TYPE_PD_PPS
-};
-
static const enum power_supply_property rt1719_psy_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_USB_TYPE,
@@ -572,8 +566,9 @@ static int devm_rt1719_psy_register(struct rt1719_data *data)
data->psy_desc.name = psy_name;
data->psy_desc.type = POWER_SUPPLY_TYPE_USB;
- data->psy_desc.usb_types = rt1719_psy_usb_types;
- data->psy_desc.num_usb_types = ARRAY_SIZE(rt1719_psy_usb_types);
+ data->psy_desc.usb_types = BIT(POWER_SUPPLY_USB_TYPE_C) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD_PPS);
data->psy_desc.properties = rt1719_psy_properties;
data->psy_desc.num_properties = ARRAY_SIZE(rt1719_psy_properties);
data->psy_desc.get_property = rt1719_psy_get_property;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 4b02d6474259..fc619478200f 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -7483,12 +7483,6 @@ static int tcpm_psy_prop_writeable(struct power_supply *psy,
}
}
-static enum power_supply_usb_type tcpm_psy_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_C,
- POWER_SUPPLY_USB_TYPE_PD,
- POWER_SUPPLY_USB_TYPE_PD_PPS,
-};
-
static const char *tcpm_psy_name_prefix = "tcpm-source-psy-";
static int devm_tcpm_psy_register(struct tcpm_port *port)
@@ -7509,8 +7503,9 @@ static int devm_tcpm_psy_register(struct tcpm_port *port)
port_dev_name);
port->psy_desc.name = psy_name;
port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
- port->psy_desc.usb_types = tcpm_psy_usb_types;
- port->psy_desc.num_usb_types = ARRAY_SIZE(tcpm_psy_usb_types);
+ port->psy_desc.usb_types = BIT(POWER_SUPPLY_USB_TYPE_C) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD_PPS);
port->psy_desc.properties = tcpm_psy_props;
port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props);
port->psy_desc.get_property = tcpm_psy_get_prop;
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 256b0c054e9a..7ee721a877c1 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -150,11 +150,6 @@ static enum power_supply_property tps6598x_psy_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
-static enum power_supply_usb_type tps6598x_psy_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_C,
- POWER_SUPPLY_USB_TYPE_PD,
-};
-
static const char *tps6598x_psy_name_prefix = "tps6598x-source-psy-";
/*
@@ -827,8 +822,8 @@ static int devm_tps6598_psy_register(struct tps6598x *tps)
tps->psy_desc.name = psy_name;
tps->psy_desc.type = POWER_SUPPLY_TYPE_USB;
- tps->psy_desc.usb_types = tps6598x_psy_usb_types;
- tps->psy_desc.num_usb_types = ARRAY_SIZE(tps6598x_psy_usb_types);
+ tps->psy_desc.usb_types = BIT(POWER_SUPPLY_USB_TYPE_C) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD);
tps->psy_desc.properties = tps6598x_psy_props;
tps->psy_desc.num_properties = ARRAY_SIZE(tps6598x_psy_props);
tps->psy_desc.get_property = tps6598x_psy_get_prop;
diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c
index e623d80e177c..1c631c7855a9 100644
--- a/drivers/usb/typec/ucsi/psy.c
+++ b/drivers/usb/typec/ucsi/psy.c
@@ -254,12 +254,6 @@ static int ucsi_psy_get_prop(struct power_supply *psy,
}
}
-static enum power_supply_usb_type ucsi_psy_usb_types[] = {
- POWER_SUPPLY_USB_TYPE_C,
- POWER_SUPPLY_USB_TYPE_PD,
- POWER_SUPPLY_USB_TYPE_PD_PPS,
-};
-
int ucsi_register_port_psy(struct ucsi_connector *con)
{
struct power_supply_config psy_cfg = {};
@@ -276,8 +270,9 @@ int ucsi_register_port_psy(struct ucsi_connector *con)
con->psy_desc.name = psy_name;
con->psy_desc.type = POWER_SUPPLY_TYPE_USB;
- con->psy_desc.usb_types = ucsi_psy_usb_types;
- con->psy_desc.num_usb_types = ARRAY_SIZE(ucsi_psy_usb_types);
+ con->psy_desc.usb_types = BIT(POWER_SUPPLY_USB_TYPE_C) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD_PPS);
con->psy_desc.properties = ucsi_psy_props;
con->psy_desc.num_properties = ARRAY_SIZE(ucsi_psy_props);
con->psy_desc.get_property = ucsi_psy_get_prop;
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index 5265d09fc1c4..559fb9d3271f 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -11,8 +11,7 @@ if VDPA
config VDPA_SIM
tristate "vDPA device simulator core"
- depends on RUNTIME_TESTING_MENU && HAS_DMA
- select DMA_OPS
+ depends on RUNTIME_TESTING_MENU
select VHOST_RING
select IOMMU_IOVA
help
@@ -36,7 +35,12 @@ config VDPA_SIM_BLOCK
config VDPA_USER
tristate "VDUSE (vDPA Device in Userspace) support"
depends on EVENTFD && MMU && HAS_DMA
- select DMA_OPS
+ #
+ # This driver incorrectly tries to override the dma_ops. It should
+ # never have done that, but for now keep it working on architectures
+ # that use dma ops
+ #
+ depends on ARCH_HAS_DMA_OPS
select VHOST_IOTLB
select IOMMU_IOVA
help
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 0f347717021a..aa36de361c10 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -112,15 +112,12 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
const void *src, int length);
u8 ifcvf_get_status(struct ifcvf_hw *hw);
void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
-void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
void ifcvf_reset(struct ifcvf_hw *hw);
u64 ifcvf_get_dev_features(struct ifcvf_hw *hw);
u64 ifcvf_get_hw_features(struct ifcvf_hw *hw);
int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features);
u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
-struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
-int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 50aac8fe57ef..2cedf7e2dbc4 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -83,10 +83,28 @@ enum {
MLX5_VDPA_NUM_AS = 2
};
+struct mlx5_vdpa_mr_resources {
+ struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
+ unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
+
+ /* Pre-deletion mr list */
+ struct list_head mr_list_head;
+
+ /* Deferred mr list */
+ struct list_head mr_gc_list_head;
+ struct workqueue_struct *wq_gc;
+ struct delayed_work gc_dwork_ent;
+
+ struct mutex lock;
+
+ atomic_t shutdown;
+};
+
struct mlx5_vdpa_dev {
struct vdpa_device vdev;
struct mlx5_core_dev *mdev;
struct mlx5_vdpa_resources res;
+ struct mlx5_vdpa_mr_resources mres;
u64 mlx_features;
u64 actual_features;
@@ -95,14 +113,23 @@ struct mlx5_vdpa_dev {
u16 max_idx;
u32 generation;
- struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
- struct list_head mr_list_head;
- /* serialize mr access */
- struct mutex mr_mtx;
struct mlx5_control_vq cvq;
struct workqueue_struct *wq;
- unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
bool suspended;
+
+ struct mlx5_async_ctx async_ctx;
+};
+
+struct mlx5_vdpa_async_cmd {
+ int err;
+ struct mlx5_async_work cb_work;
+ struct completion cmd_done;
+
+ void *in;
+ size_t inlen;
+
+ void *out;
+ size_t outlen;
};
int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
@@ -121,7 +148,9 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb);
+int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev);
void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev);
+void mlx5_vdpa_clean_mrs(struct mlx5_vdpa_dev *mvdev);
void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr);
void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
@@ -134,6 +163,14 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
unsigned int asid);
int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev);
int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
+int mlx5_vdpa_exec_async_cmds(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_async_cmd *cmds,
+ int num_cmds);
+
+#define mlx5_vdpa_err(__dev, format, ...) \
+ dev_err((__dev)->mdev->device, "%s:%d:(pid %d) error: " format, __func__, __LINE__, \
+ current->pid, ##__VA_ARGS__)
+
#define mlx5_vdpa_warn(__dev, format, ...) \
dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 4758914ccf86..2dd21e0b399e 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -49,17 +49,23 @@ static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
}
}
-static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
-{
- int inlen;
+struct mlx5_create_mkey_mem {
+ u8 out[MLX5_ST_SZ_BYTES(create_mkey_out)];
+ u8 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
+ __be64 mtt[];
+};
+
+struct mlx5_destroy_mkey_mem {
+ u8 out[MLX5_ST_SZ_BYTES(destroy_mkey_out)];
+ u8 in[MLX5_ST_SZ_BYTES(destroy_mkey_in)];
+};
+
+static void fill_create_direct_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_direct_mr *mr,
+ struct mlx5_create_mkey_mem *mem)
+{
+ void *in = &mem->in;
void *mkc;
- void *in;
- int err;
-
- inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
@@ -76,18 +82,36 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
get_octo_len(mr->end - mr->start, mr->log_size));
populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt));
- err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen);
- kvfree(in);
- if (err) {
- mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n");
- return err;
- }
- return 0;
+ MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+ MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
+}
+
+static void create_direct_mr_end(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_direct_mr *mr,
+ struct mlx5_create_mkey_mem *mem)
+{
+ u32 mkey_index = MLX5_GET(create_mkey_out, mem->out, mkey_index);
+
+ mr->mr = mlx5_idx_to_mkey(mkey_index);
+}
+
+static void fill_destroy_direct_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_direct_mr *mr,
+ struct mlx5_destroy_mkey_mem *mem)
+{
+ void *in = &mem->in;
+
+ MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid);
+ MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mr->mr));
}
static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
{
+ if (!mr->mr)
+ return;
+
mlx5_vdpa_destroy_mkey(mvdev, mr->mr);
}
@@ -179,6 +203,123 @@ static int klm_byte_size(int nklms)
return 16 * ALIGN(nklms, 4);
}
+#define MLX5_VDPA_MTT_ALIGN 16
+
+static int create_direct_keys(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
+{
+ struct mlx5_vdpa_async_cmd *cmds;
+ struct mlx5_vdpa_direct_mr *dmr;
+ int err = 0;
+ int i = 0;
+
+ cmds = kvcalloc(mr->num_directs, sizeof(*cmds), GFP_KERNEL);
+ if (!cmds)
+ return -ENOMEM;
+
+ list_for_each_entry(dmr, &mr->head, list) {
+ struct mlx5_create_mkey_mem *cmd_mem;
+ int mttlen, mttcount;
+
+ mttlen = roundup(MLX5_ST_SZ_BYTES(mtt) * dmr->nsg, MLX5_VDPA_MTT_ALIGN);
+ mttcount = mttlen / sizeof(cmd_mem->mtt[0]);
+ cmd_mem = kvcalloc(1, struct_size(cmd_mem, mtt, mttcount), GFP_KERNEL);
+ if (!cmd_mem) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ cmds[i].out = cmd_mem->out;
+ cmds[i].outlen = sizeof(cmd_mem->out);
+ cmds[i].in = cmd_mem->in;
+ cmds[i].inlen = struct_size(cmd_mem, mtt, mttcount);
+
+ fill_create_direct_mr(mvdev, dmr, cmd_mem);
+
+ i++;
+ }
+
+ err = mlx5_vdpa_exec_async_cmds(mvdev, cmds, mr->num_directs);
+ if (err) {
+
+ mlx5_vdpa_err(mvdev, "error issuing MTT mkey creation for direct mrs: %d\n", err);
+ goto done;
+ }
+
+ i = 0;
+ list_for_each_entry(dmr, &mr->head, list) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i++];
+ struct mlx5_create_mkey_mem *cmd_mem;
+
+ cmd_mem = container_of(cmd->out, struct mlx5_create_mkey_mem, out);
+
+ if (!cmd->err) {
+ create_direct_mr_end(mvdev, dmr, cmd_mem);
+ } else {
+ err = err ? err : cmd->err;
+ mlx5_vdpa_err(mvdev, "error creating MTT mkey [0x%llx, 0x%llx]: %d\n",
+ dmr->start, dmr->end, cmd->err);
+ }
+ }
+
+done:
+ for (i = i-1; i >= 0; i--) {
+ struct mlx5_create_mkey_mem *cmd_mem;
+
+ cmd_mem = container_of(cmds[i].out, struct mlx5_create_mkey_mem, out);
+ kvfree(cmd_mem);
+ }
+
+ kvfree(cmds);
+ return err;
+}
+
+DEFINE_FREE(free_cmds, struct mlx5_vdpa_async_cmd *, kvfree(_T))
+DEFINE_FREE(free_cmd_mem, struct mlx5_destroy_mkey_mem *, kvfree(_T))
+
+static int destroy_direct_keys(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
+{
+ struct mlx5_destroy_mkey_mem *cmd_mem __free(free_cmd_mem) = NULL;
+ struct mlx5_vdpa_async_cmd *cmds __free(free_cmds) = NULL;
+ struct mlx5_vdpa_direct_mr *dmr;
+ int err = 0;
+ int i = 0;
+
+ cmds = kvcalloc(mr->num_directs, sizeof(*cmds), GFP_KERNEL);
+ cmd_mem = kvcalloc(mr->num_directs, sizeof(*cmd_mem), GFP_KERNEL);
+ if (!cmds || !cmd_mem)
+ return -ENOMEM;
+
+ list_for_each_entry(dmr, &mr->head, list) {
+ cmds[i].out = cmd_mem[i].out;
+ cmds[i].outlen = sizeof(cmd_mem[i].out);
+ cmds[i].in = cmd_mem[i].in;
+ cmds[i].inlen = sizeof(cmd_mem[i].in);
+ fill_destroy_direct_mr(mvdev, dmr, &cmd_mem[i]);
+ i++;
+ }
+
+ err = mlx5_vdpa_exec_async_cmds(mvdev, cmds, mr->num_directs);
+ if (err) {
+
+ mlx5_vdpa_err(mvdev, "error issuing MTT mkey deletion for direct mrs: %d\n", err);
+ return err;
+ }
+
+ i = 0;
+ list_for_each_entry(dmr, &mr->head, list) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i++];
+
+ dmr->mr = 0;
+ if (cmd->err) {
+ err = err ? err : cmd->err;
+ mlx5_vdpa_err(mvdev, "error deleting MTT mkey [0x%llx, 0x%llx]: %d\n",
+ dmr->start, dmr->end, cmd->err);
+ }
+ }
+
+ return err;
+}
+
static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
int inlen;
@@ -279,14 +420,8 @@ done:
goto err_map;
}
- err = create_direct_mr(mvdev, mr);
- if (err)
- goto err_direct;
-
return 0;
-err_direct:
- dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
err_map:
sg_free_table(&mr->sg_head);
return err;
@@ -401,6 +536,10 @@ static int create_user_mr(struct mlx5_vdpa_dev *mvdev,
if (err)
goto err_chain;
+ err = create_direct_keys(mvdev, mr);
+ if (err)
+ goto err_chain;
+
/* Create the memory key that defines the guests's address space. This
* memory key refers to the direct keys that contain the MTT
* translations
@@ -489,6 +628,7 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
struct mlx5_vdpa_direct_mr *n;
destroy_indirect_key(mvdev, mr);
+ destroy_direct_keys(mvdev, mr);
list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
list_del_init(&dmr->list);
unmap_direct_mr(mvdev, dmr);
@@ -513,22 +653,58 @@ static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_
kfree(mr);
}
+/* There can be multiple .set_map() operations in quick succession.
+ * This large delay is a simple way to prevent the MR cleanup from blocking
+ * .set_map() MR creation in this scenario.
+ */
+#define MLX5_VDPA_MR_GC_TRIGGER_MS 2000
+
+static void mlx5_vdpa_mr_gc_handler(struct work_struct *work)
+{
+ struct mlx5_vdpa_mr_resources *mres;
+ struct mlx5_vdpa_mr *mr, *tmp;
+ struct mlx5_vdpa_dev *mvdev;
+
+ mres = container_of(work, struct mlx5_vdpa_mr_resources, gc_dwork_ent.work);
+
+ if (atomic_read(&mres->shutdown)) {
+ mutex_lock(&mres->lock);
+ } else if (!mutex_trylock(&mres->lock)) {
+ queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
+ msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
+ return;
+ }
+
+ mvdev = container_of(mres, struct mlx5_vdpa_dev, mres);
+
+ list_for_each_entry_safe(mr, tmp, &mres->mr_gc_list_head, mr_list) {
+ _mlx5_vdpa_destroy_mr(mvdev, mr);
+ }
+
+ mutex_unlock(&mres->lock);
+}
+
static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr)
{
+ struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
+
if (!mr)
return;
- if (refcount_dec_and_test(&mr->refcount))
- _mlx5_vdpa_destroy_mr(mvdev, mr);
+ if (refcount_dec_and_test(&mr->refcount)) {
+ list_move_tail(&mr->mr_list, &mres->mr_gc_list_head);
+ queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
+ msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
+ }
}
void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr)
{
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
_mlx5_vdpa_put_mr(mvdev, mr);
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
}
static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
@@ -543,44 +719,47 @@ static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr)
{
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
_mlx5_vdpa_get_mr(mvdev, mr);
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
}
void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *new_mr,
unsigned int asid)
{
- struct mlx5_vdpa_mr *old_mr = mvdev->mr[asid];
+ struct mlx5_vdpa_mr *old_mr = mvdev->mres.mr[asid];
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
_mlx5_vdpa_put_mr(mvdev, old_mr);
- mvdev->mr[asid] = new_mr;
+ mvdev->mres.mr[asid] = new_mr;
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
}
static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_mr *mr;
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
- list_for_each_entry(mr, &mvdev->mr_list_head, mr_list) {
+ list_for_each_entry(mr, &mvdev->mres.mr_list_head, mr_list) {
mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: "
"mr: %p, mkey: 0x%x, refcount: %u\n",
mr, mr->mkey, refcount_read(&mr->refcount));
}
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
}
-void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
+void mlx5_vdpa_clean_mrs(struct mlx5_vdpa_dev *mvdev)
{
+ if (!mvdev->res.valid)
+ return;
+
for (int i = 0; i < MLX5_VDPA_NUM_AS; i++)
mlx5_vdpa_update_mr(mvdev, NULL, i);
@@ -613,7 +792,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
if (err)
goto err_iotlb;
- list_add_tail(&mr->mr_list, &mvdev->mr_list_head);
+ list_add_tail(&mr->mr_list, &mvdev->mres.mr_list_head);
return 0;
@@ -639,9 +818,9 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
if (!mr)
return ERR_PTR(-ENOMEM);
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
if (err)
goto out_err;
@@ -661,7 +840,7 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
{
int err;
- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
+ if (mvdev->mres.group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
return 0;
spin_lock(&mvdev->cvq.iommu_lock);
@@ -703,3 +882,33 @@ int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
return 0;
}
+
+int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
+
+ mres->wq_gc = create_singlethread_workqueue("mlx5_vdpa_mr_gc");
+ if (!mres->wq_gc)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&mres->gc_dwork_ent, mlx5_vdpa_mr_gc_handler);
+
+ mutex_init(&mres->lock);
+
+ INIT_LIST_HEAD(&mres->mr_list_head);
+ INIT_LIST_HEAD(&mres->mr_gc_list_head);
+
+ return 0;
+}
+
+void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
+
+ atomic_set(&mres->shutdown, 1);
+
+ flush_delayed_work(&mres->gc_dwork_ent);
+ destroy_workqueue(mres->wq_gc);
+ mres->wq_gc = NULL;
+ mutex_destroy(&mres->lock);
+}
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
index 5c5a41b64bfc..aeae31d0cefa 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -256,7 +256,6 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
mlx5_vdpa_warn(mvdev, "resources already allocated\n");
return -EINVAL;
}
- mutex_init(&mvdev->mr_mtx);
res->uar = mlx5_get_uars_page(mdev);
if (IS_ERR(res->uar)) {
err = PTR_ERR(res->uar);
@@ -301,7 +300,6 @@ err_pd:
err_uctx:
mlx5_put_uars_page(mdev, res->uar);
err_uars:
- mutex_destroy(&mvdev->mr_mtx);
return err;
}
@@ -318,6 +316,78 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
dealloc_pd(mvdev, res->pdn, res->uid);
destroy_uctx(mvdev, res->uid);
mlx5_put_uars_page(mvdev->mdev, res->uar);
- mutex_destroy(&mvdev->mr_mtx);
res->valid = false;
}
+
+static void virtqueue_cmd_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5_vdpa_async_cmd *cmd =
+ container_of(context, struct mlx5_vdpa_async_cmd, cb_work);
+
+ cmd->err = mlx5_cmd_check(context->ctx->dev, status, cmd->in, cmd->out);
+ complete(&cmd->cmd_done);
+}
+
+static int issue_async_cmd(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_async_cmd *cmds,
+ int issued,
+ int *completed)
+
+{
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[issued];
+ int err;
+
+retry:
+ err = mlx5_cmd_exec_cb(&mvdev->async_ctx,
+ cmd->in, cmd->inlen,
+ cmd->out, cmd->outlen,
+ virtqueue_cmd_callback,
+ &cmd->cb_work);
+ if (err == -EBUSY) {
+ if (*completed < issued) {
+ /* Throttled by own commands: wait for oldest completion. */
+ wait_for_completion(&cmds[*completed].cmd_done);
+ (*completed)++;
+
+ goto retry;
+ } else {
+ /* Throttled by external commands: switch to sync api. */
+ err = mlx5_cmd_exec(mvdev->mdev,
+ cmd->in, cmd->inlen,
+ cmd->out, cmd->outlen);
+ if (!err)
+ (*completed)++;
+ }
+ }
+
+ return err;
+}
+
+int mlx5_vdpa_exec_async_cmds(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_async_cmd *cmds,
+ int num_cmds)
+{
+ int completed = 0;
+ int issued = 0;
+ int err = 0;
+
+ for (int i = 0; i < num_cmds; i++)
+ init_completion(&cmds[i].cmd_done);
+
+ while (issued < num_cmds) {
+
+ err = issue_async_cmd(mvdev, cmds, issued, &completed);
+ if (err) {
+ mlx5_vdpa_err(mvdev, "error issuing command %d of %d: %d\n",
+ issued, num_cmds, err);
+ break;
+ }
+
+ issued++;
+ }
+
+ while (completed < issued)
+ wait_for_completion(&cmds[completed++].cmd_done);
+
+ return err;
+}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index fa78e8288ebb..dee019977716 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -941,11 +941,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev,
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
- vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+ vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
- vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+ vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (vq_desc_mr &&
MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey);
@@ -953,11 +953,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev,
/* If there is no mr update, make sure that the existing ones are set
* modify to ready.
*/
- vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+ vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY;
- vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+ vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (vq_desc_mr)
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY;
}
@@ -1184,40 +1184,92 @@ struct mlx5_virtq_attr {
u16 used_index;
};
-static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
- struct mlx5_virtq_attr *attr)
-{
- int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out);
- u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {};
- void *out;
- void *obj_context;
- void *cmd_hdr;
- int err;
+struct mlx5_virtqueue_query_mem {
+ u8 in[MLX5_ST_SZ_BYTES(query_virtio_net_q_in)];
+ u8 out[MLX5_ST_SZ_BYTES(query_virtio_net_q_out)];
+};
- out = kzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
+struct mlx5_virtqueue_modify_mem {
+ u8 in[MLX5_ST_SZ_BYTES(modify_virtio_net_q_in)];
+ u8 out[MLX5_ST_SZ_BYTES(modify_virtio_net_q_out)];
+};
- cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+static void fill_query_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ struct mlx5_virtqueue_query_mem *cmd)
+{
+ void *cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
- err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
- if (err)
- goto err_cmd;
+}
+
+static void query_virtqueue_end(struct mlx5_vdpa_net *ndev,
+ struct mlx5_virtqueue_query_mem *cmd,
+ struct mlx5_virtq_attr *attr)
+{
+ void *obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, cmd->out, obj_context);
- obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context);
memset(attr, 0, sizeof(*attr));
attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
- kfree(out);
- return 0;
+}
-err_cmd:
- kfree(out);
+static int query_virtqueues(struct mlx5_vdpa_net *ndev,
+ int start_vq,
+ int num_vqs,
+ struct mlx5_virtq_attr *attrs)
+{
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+ struct mlx5_virtqueue_query_mem *cmd_mem;
+ struct mlx5_vdpa_async_cmd *cmds;
+ int err = 0;
+
+ WARN(start_vq + num_vqs > mvdev->max_vqs, "query vq range invalid [%d, %d), max_vqs: %u\n",
+ start_vq, start_vq + num_vqs, mvdev->max_vqs);
+
+ cmds = kvcalloc(num_vqs, sizeof(*cmds), GFP_KERNEL);
+ cmd_mem = kvcalloc(num_vqs, sizeof(*cmd_mem), GFP_KERNEL);
+ if (!cmds || !cmd_mem) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ for (int i = 0; i < num_vqs; i++) {
+ cmds[i].in = &cmd_mem[i].in;
+ cmds[i].inlen = sizeof(cmd_mem[i].in);
+ cmds[i].out = &cmd_mem[i].out;
+ cmds[i].outlen = sizeof(cmd_mem[i].out);
+ fill_query_virtqueue_cmd(ndev, &ndev->vqs[start_vq + i], &cmd_mem[i]);
+ }
+
+ err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs);
+ if (err) {
+ mlx5_vdpa_err(mvdev, "error issuing query cmd for vq range [%d, %d): %d\n",
+ start_vq, start_vq + num_vqs, err);
+ goto done;
+ }
+
+ for (int i = 0; i < num_vqs; i++) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
+ int vq_idx = start_vq + i;
+
+ if (cmd->err) {
+ mlx5_vdpa_err(mvdev, "query vq %d failed, err: %d\n", vq_idx, err);
+ if (!err)
+ err = cmd->err;
+ continue;
+ }
+
+ query_virtqueue_end(ndev, &cmd_mem[i], &attrs[i]);
+ }
+
+done:
+ kvfree(cmd_mem);
+ kvfree(cmds);
return err;
}
@@ -1251,51 +1303,30 @@ static bool modifiable_virtqueue_fields(struct mlx5_vdpa_virtqueue *mvq)
return true;
}
-static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
- struct mlx5_vdpa_virtqueue *mvq,
- int state)
+static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ int state,
+ struct mlx5_virtqueue_modify_mem *cmd)
{
- int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
- u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
struct mlx5_vdpa_mr *desc_mr = NULL;
struct mlx5_vdpa_mr *vq_mr = NULL;
- bool state_change = false;
void *obj_context;
void *cmd_hdr;
void *vq_ctx;
- void *in;
- int err;
- if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE)
- return 0;
-
- if (!modifiable_virtqueue_fields(mvq))
- return -EINVAL;
-
- in = kzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+ cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
- obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
+ obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, obj_context);
vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
- if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) {
- if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
- err = -EINVAL;
- goto done;
- }
-
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE)
MLX5_SET(virtio_net_q_object, obj_context, state, state);
- state_change = true;
- }
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) {
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
@@ -1323,7 +1354,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
}
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
- vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+ vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
@@ -1332,7 +1363,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
}
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
- desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+ desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey);
@@ -1341,38 +1372,36 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
}
MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields);
- err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
- if (err)
- goto done;
+}
- if (state_change)
- mvq->fw_state = state;
+static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ int state)
+{
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
+ unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP];
+ struct mlx5_vdpa_mr *vq_mr = mvdev->mres.mr[asid];
+
mlx5_vdpa_put_mr(mvdev, mvq->vq_mr);
mlx5_vdpa_get_mr(mvdev, vq_mr);
mvq->vq_mr = vq_mr;
}
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
+ unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP];
+ struct mlx5_vdpa_mr *desc_mr = mvdev->mres.mr[asid];
+
mlx5_vdpa_put_mr(mvdev, mvq->desc_mr);
mlx5_vdpa_get_mr(mvdev, desc_mr);
mvq->desc_mr = desc_mr;
}
- mvq->modified_fields = 0;
-
-done:
- kfree(in);
- return err;
-}
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE)
+ mvq->fw_state = state;
-static int modify_virtqueue_state(struct mlx5_vdpa_net *ndev,
- struct mlx5_vdpa_virtqueue *mvq,
- unsigned int state)
-{
- mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE;
- return modify_virtqueue(ndev, mvq, state);
+ mvq->modified_fields = 0;
}
static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
@@ -1525,53 +1554,136 @@ err_fwqp:
return err;
}
-static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+static int modify_virtqueues(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs, int state)
{
- struct mlx5_virtq_attr attr;
- int err;
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+ struct mlx5_virtqueue_modify_mem *cmd_mem;
+ struct mlx5_vdpa_async_cmd *cmds;
+ int err = 0;
- if (!mvq->initialized)
- return 0;
+ WARN(start_vq + num_vqs > mvdev->max_vqs, "modify vq range invalid [%d, %d), max_vqs: %u\n",
+ start_vq, start_vq + num_vqs, mvdev->max_vqs);
- if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
- return 0;
+ cmds = kvcalloc(num_vqs, sizeof(*cmds), GFP_KERNEL);
+ cmd_mem = kvcalloc(num_vqs, sizeof(*cmd_mem), GFP_KERNEL);
+ if (!cmds || !cmd_mem) {
+ err = -ENOMEM;
+ goto done;
+ }
- err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
- if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed, err: %d\n", err);
- return err;
+ for (int i = 0; i < num_vqs; i++) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
+ struct mlx5_vdpa_virtqueue *mvq;
+ int vq_idx = start_vq + i;
+
+ mvq = &ndev->vqs[vq_idx];
+
+ if (!modifiable_virtqueue_fields(mvq)) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (mvq->fw_state != state) {
+ if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE;
+ }
+
+ cmd->in = &cmd_mem[i].in;
+ cmd->inlen = sizeof(cmd_mem[i].in);
+ cmd->out = &cmd_mem[i].out;
+ cmd->outlen = sizeof(cmd_mem[i].out);
+ fill_modify_virtqueue_cmd(ndev, mvq, state, &cmd_mem[i]);
}
- err = query_virtqueue(ndev, mvq, &attr);
+ err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue, err: %d\n", err);
- return err;
+ mlx5_vdpa_err(mvdev, "error issuing modify cmd for vq range [%d, %d)\n",
+ start_vq, start_vq + num_vqs);
+ goto done;
}
- mvq->avail_idx = attr.available_index;
- mvq->used_idx = attr.used_index;
+ for (int i = 0; i < num_vqs; i++) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
+ struct mlx5_vdpa_virtqueue *mvq;
+ int vq_idx = start_vq + i;
- return 0;
+ mvq = &ndev->vqs[vq_idx];
+
+ if (cmd->err) {
+ mlx5_vdpa_err(mvdev, "modify vq %d failed, state: %d -> %d, err: %d\n",
+ vq_idx, mvq->fw_state, state, err);
+ if (!err)
+ err = cmd->err;
+ continue;
+ }
+
+ modify_virtqueue_end(ndev, mvq, state);
+ }
+
+done:
+ kvfree(cmd_mem);
+ kvfree(cmds);
+ return err;
}
-static int suspend_vqs(struct mlx5_vdpa_net *ndev)
+static int suspend_vqs(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs)
{
- int err = 0;
- int i;
+ struct mlx5_vdpa_virtqueue *mvq;
+ struct mlx5_virtq_attr *attrs;
+ int vq_idx, i;
+ int err;
+
+ if (start_vq >= ndev->cur_num_vqs)
+ return -EINVAL;
+
+ mvq = &ndev->vqs[start_vq];
+ if (!mvq->initialized)
+ return 0;
+
+ if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
+ return 0;
+
+ err = modify_virtqueues(ndev, start_vq, num_vqs, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
+ if (err)
+ return err;
+
+ attrs = kcalloc(num_vqs, sizeof(struct mlx5_virtq_attr), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
- for (i = 0; i < ndev->cur_num_vqs; i++) {
- int local_err = suspend_vq(ndev, &ndev->vqs[i]);
+ err = query_virtqueues(ndev, start_vq, num_vqs, attrs);
+ if (err)
+ goto done;
- err = local_err ? local_err : err;
+ for (i = 0, vq_idx = start_vq; i < num_vqs; i++, vq_idx++) {
+ mvq = &ndev->vqs[vq_idx];
+ mvq->avail_idx = attrs[i].available_index;
+ mvq->used_idx = attrs[i].used_index;
}
+done:
+ kfree(attrs);
return err;
}
-static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ return suspend_vqs(ndev, mvq->index, 1);
+}
+
+static int resume_vqs(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs)
{
+ struct mlx5_vdpa_virtqueue *mvq;
int err;
+ if (start_vq >= ndev->mvdev.max_vqs)
+ return -EINVAL;
+
+ mvq = &ndev->vqs[start_vq];
if (!mvq->initialized)
return 0;
@@ -1583,13 +1695,9 @@ static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq
/* Due to a FW quirk we need to modify the VQ fields first then change state.
* This should be fixed soon. After that, a single command can be used.
*/
- err = modify_virtqueue(ndev, mvq, 0);
- if (err) {
- mlx5_vdpa_warn(&ndev->mvdev,
- "modify vq properties failed for vq %u, err: %d\n",
- mvq->index, err);
+ err = modify_virtqueues(ndev, start_vq, num_vqs, mvq->fw_state);
+ if (err)
return err;
- }
break;
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
if (!is_resumable(ndev)) {
@@ -1600,30 +1708,17 @@ static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
return 0;
default:
- mlx5_vdpa_warn(&ndev->mvdev, "resume vq %u called from bad state %d\n",
+ mlx5_vdpa_err(&ndev->mvdev, "resume vq %u called from bad state %d\n",
mvq->index, mvq->fw_state);
return -EINVAL;
}
- err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
- if (err)
- mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u, err: %d\n",
- mvq->index, err);
-
- return err;
+ return modify_virtqueues(ndev, start_vq, num_vqs, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
}
-static int resume_vqs(struct mlx5_vdpa_net *ndev)
+static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
- int err = 0;
-
- for (int i = 0; i < ndev->cur_num_vqs; i++) {
- int local_err = resume_vq(ndev, &ndev->vqs[i]);
-
- err = local_err ? local_err : err;
- }
-
- return err;
+ return resume_vqs(ndev, mvq->index, 1);
}
static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
@@ -2002,13 +2097,13 @@ static int setup_steering(struct mlx5_vdpa_net *ndev)
ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
if (!ns) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
+ mlx5_vdpa_err(&ndev->mvdev, "failed to get flow namespace\n");
return -EOPNOTSUPP;
}
ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ndev->rxft)) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n");
+ mlx5_vdpa_err(&ndev->mvdev, "failed to create flow table\n");
return PTR_ERR(ndev->rxft);
}
mlx5_vdpa_add_rx_flow_table(ndev);
@@ -2124,45 +2219,48 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- int cur_qps = ndev->cur_num_vqs / 2;
+ int cur_vqs = ndev->cur_num_vqs;
+ int new_vqs = newqps * 2;
int err;
int i;
- if (cur_qps > newqps) {
- err = modify_rqt(ndev, 2 * newqps);
+ if (cur_vqs > new_vqs) {
+ err = modify_rqt(ndev, new_vqs);
if (err)
return err;
- for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--) {
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[i];
-
- if (is_resumable(ndev))
- suspend_vq(ndev, mvq);
- else
- teardown_vq(ndev, mvq);
+ if (is_resumable(ndev)) {
+ suspend_vqs(ndev, new_vqs, cur_vqs - new_vqs);
+ } else {
+ for (i = new_vqs; i < cur_vqs; i++)
+ teardown_vq(ndev, &ndev->vqs[i]);
}
- ndev->cur_num_vqs = 2 * newqps;
+ ndev->cur_num_vqs = new_vqs;
} else {
- ndev->cur_num_vqs = 2 * newqps;
- for (i = cur_qps * 2; i < 2 * newqps; i++) {
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[i];
+ ndev->cur_num_vqs = new_vqs;
- err = mvq->initialized ? resume_vq(ndev, mvq) : setup_vq(ndev, mvq, true);
+ for (i = cur_vqs; i < new_vqs; i++) {
+ err = setup_vq(ndev, &ndev->vqs[i], false);
if (err)
goto clean_added;
}
- err = modify_rqt(ndev, 2 * newqps);
+
+ err = resume_vqs(ndev, cur_vqs, new_vqs - cur_vqs);
+ if (err)
+ goto clean_added;
+
+ err = modify_rqt(ndev, new_vqs);
if (err)
goto clean_added;
}
return 0;
clean_added:
- for (--i; i >= 2 * cur_qps; --i)
+ for (--i; i >= cur_vqs; --i)
teardown_vq(ndev, &ndev->vqs[i]);
- ndev->cur_num_vqs = 2 * cur_qps;
+ ndev->cur_num_vqs = cur_vqs;
return err;
}
@@ -2528,9 +2626,9 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
return 0;
}
- err = query_virtqueue(ndev, mvq, &attr);
+ err = query_virtqueues(ndev, mvq->index, 1, &attr);
if (err) {
- mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
+ mlx5_vdpa_err(mvdev, "failed to query virtqueue\n");
return err;
}
state->split.avail_index = attr.used_index;
@@ -2755,6 +2853,9 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
struct mlx5_eqe *eqe = param;
int ret = NOTIFY_DONE;
+ if (ndev->mvdev.suspended)
+ return NOTIFY_DONE;
+
if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
switch (eqe->sub_type) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
@@ -2879,7 +2980,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
int err;
if (mvq->initialized) {
- err = query_virtqueue(ndev, mvq, &attr);
+ err = query_virtqueues(ndev, mvq->index, 1, &attr);
if (err)
return err;
}
@@ -2948,7 +3049,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
bool teardown = !is_resumable(ndev);
int err;
- suspend_vqs(ndev);
+ suspend_vqs(ndev, 0, ndev->cur_num_vqs);
if (teardown) {
err = save_channels_info(ndev);
if (err)
@@ -2973,7 +3074,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
return err;
}
- resume_vqs(ndev);
+ resume_vqs(ndev, 0, ndev->cur_num_vqs);
return 0;
}
@@ -3097,7 +3198,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
teardown_vq_resources(ndev);
if (ndev->setup) {
- err = resume_vqs(ndev);
+ err = resume_vqs(ndev, 0, ndev->cur_num_vqs);
if (err) {
mlx5_vdpa_warn(mvdev, "failed to resume VQs\n");
goto err_driver;
@@ -3122,7 +3223,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
err_driver:
unregister_link_notifier(ndev);
err_setup:
- mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+ mlx5_vdpa_clean_mrs(&ndev->mvdev);
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
err_clear:
up_write(&ndev->reslock);
@@ -3134,7 +3235,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
/* default mapping all groups are mapped to asid 0 */
for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++)
- mvdev->group2asid[i] = 0;
+ mvdev->mres.group2asid[i] = 0;
}
static bool needs_vqs_reset(const struct mlx5_vdpa_dev *mvdev)
@@ -3174,7 +3275,7 @@ static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags)
}
if (flags & VDPA_RESET_F_CLEAN_MAP)
- mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+ mlx5_vdpa_clean_mrs(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.suspended = false;
ndev->cur_num_vqs = MLX5V_DEFAULT_VQ_COUNT;
@@ -3189,7 +3290,7 @@ static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags)
if ((flags & VDPA_RESET_F_CLEAN_MAP) &&
MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
if (mlx5_vdpa_create_dma_mr(mvdev))
- mlx5_vdpa_warn(mvdev, "create MR failed\n");
+ mlx5_vdpa_err(mvdev, "create MR failed\n");
}
if (vq_reset)
setup_vq_resources(ndev, false);
@@ -3244,7 +3345,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
new_mr = mlx5_vdpa_create_mr(mvdev, iotlb);
if (IS_ERR(new_mr)) {
err = PTR_ERR(new_mr);
- mlx5_vdpa_warn(mvdev, "create map failed(%d)\n", err);
+ mlx5_vdpa_err(mvdev, "create map failed(%d)\n", err);
return err;
}
} else {
@@ -3252,12 +3353,12 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
new_mr = NULL;
}
- if (!mvdev->mr[asid]) {
+ if (!mvdev->mres.mr[asid]) {
mlx5_vdpa_update_mr(mvdev, new_mr, asid);
} else {
err = mlx5_vdpa_change_map(mvdev, new_mr, asid);
if (err) {
- mlx5_vdpa_warn(mvdev, "change map failed(%d)\n", err);
+ mlx5_vdpa_err(mvdev, "change map failed(%d)\n", err);
goto out_err;
}
}
@@ -3332,7 +3433,10 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
ndev = to_mlx5_vdpa_ndev(mvdev);
free_fixed_resources(ndev);
- mlx5_vdpa_destroy_mr_resources(mvdev);
+ mlx5_vdpa_clean_mrs(mvdev);
+ mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+ mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
+
if (!is_zero_ether_addr(ndev->config.mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
@@ -3500,8 +3604,7 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
mlx5_vdpa_info(mvdev, "suspending device\n");
down_write(&ndev->reslock);
- unregister_link_notifier(ndev);
- err = suspend_vqs(ndev);
+ err = suspend_vqs(ndev, 0, ndev->cur_num_vqs);
mlx5_vdpa_cvq_suspend(mvdev);
mvdev->suspended = true;
up_write(&ndev->reslock);
@@ -3521,8 +3624,8 @@ static int mlx5_vdpa_resume(struct vdpa_device *vdev)
down_write(&ndev->reslock);
mvdev->suspended = false;
- err = resume_vqs(ndev);
- register_link_notifier(ndev);
+ err = resume_vqs(ndev, 0, ndev->cur_num_vqs);
+ queue_link_work(ndev);
up_write(&ndev->reslock);
return err;
@@ -3537,12 +3640,12 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
if (group >= MLX5_VDPA_NUMVQ_GROUPS)
return -EINVAL;
- mvdev->group2asid[group] = asid;
+ mvdev->mres.group2asid[group] = asid;
- mutex_lock(&mvdev->mr_mtx);
- if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mr[asid])
- err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mr[asid]->iotlb, asid);
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
+ if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid])
+ err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid);
+ mutex_unlock(&mvdev->mres.lock);
return err;
}
@@ -3854,18 +3957,22 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->rqt_size = 1;
}
+ mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx);
+
ndev->mvdev.mlx_features = device_features;
mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
goto err_mpfs;
- INIT_LIST_HEAD(&mvdev->mr_list_head);
+ err = mlx5_vdpa_init_mr_resources(mvdev);
+ if (err)
+ goto err_res;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
err = mlx5_vdpa_create_dma_mr(mvdev);
if (err)
- goto err_res;
+ goto err_mr_res;
}
err = alloc_fixed_resources(ndev);
@@ -3906,6 +4013,8 @@ err_reg:
err_res2:
free_fixed_resources(ndev);
err_mr:
+ mlx5_vdpa_clean_mrs(mvdev);
+err_mr_res:
mlx5_vdpa_destroy_mr_resources(mvdev);
err_res:
mlx5_vdpa_free_resources(&ndev->mvdev);
@@ -3937,9 +4046,37 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
mgtdev->ndev = NULL;
}
+static int mlx5_vdpa_set_attr(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev,
+ const struct vdpa_dev_set_config *add_config)
+{
+ struct virtio_net_config *config;
+ struct mlx5_core_dev *pfmdev;
+ struct mlx5_vdpa_dev *mvdev;
+ struct mlx5_vdpa_net *ndev;
+ struct mlx5_core_dev *mdev;
+ int err = -EOPNOTSUPP;
+
+ mvdev = to_mvdev(dev);
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+ mdev = mvdev->mdev;
+ config = &ndev->config;
+
+ down_write(&ndev->reslock);
+ if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
+ pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
+ err = mlx5_mpfs_add_mac(pfmdev, config->mac);
+ if (!err)
+ ether_addr_copy(config->mac, add_config->net.mac);
+ }
+
+ up_write(&ndev->reslock);
+ return err;
+}
+
static const struct vdpa_mgmtdev_ops mdev_ops = {
.dev_add = mlx5_vdpa_dev_add,
.dev_del = mlx5_vdpa_dev_del,
+ .dev_set_attr = mlx5_vdpa_set_attr,
};
static struct virtio_device_id id_table[] = {
diff --git a/drivers/vdpa/pds/cmds.h b/drivers/vdpa/pds/cmds.h
index e24d85cb8f1c..6b1bc33356b0 100644
--- a/drivers/vdpa/pds/cmds.h
+++ b/drivers/vdpa/pds/cmds.h
@@ -14,5 +14,4 @@ int pds_vdpa_cmd_init_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
struct pds_vdpa_vq_info *vq_info);
int pds_vdpa_cmd_reset_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
struct pds_vdpa_vq_info *vq_info);
-int pds_vdpa_cmd_set_features(struct pds_vdpa_device *pdsv, u64 features);
#endif /* _VDPA_CMDS_H_ */
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 4dbd2e55a288..8a372b51c21a 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -1361,6 +1361,80 @@ dev_err:
return err;
}
+static int vdpa_dev_net_device_attr_set(struct vdpa_device *vdev,
+ struct genl_info *info)
+{
+ struct vdpa_dev_set_config set_config = {};
+ struct vdpa_mgmt_dev *mdev = vdev->mdev;
+ struct nlattr **nl_attrs = info->attrs;
+ const u8 *macaddr;
+ int err = -EOPNOTSUPP;
+
+ down_write(&vdev->cf_lock);
+ if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
+ set_config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
+ macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
+
+ if (is_valid_ether_addr(macaddr)) {
+ ether_addr_copy(set_config.net.mac, macaddr);
+ if (mdev->ops->dev_set_attr) {
+ err = mdev->ops->dev_set_attr(mdev, vdev,
+ &set_config);
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "Operation not supported by the device.");
+ }
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "Invalid MAC address");
+ }
+ }
+ up_write(&vdev->cf_lock);
+ return err;
+}
+
+static int vdpa_nl_cmd_dev_attr_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct vdpa_device *vdev;
+ struct device *dev;
+ const char *name;
+ u64 classes;
+ int err = 0;
+
+ if (!info->attrs[VDPA_ATTR_DEV_NAME])
+ return -EINVAL;
+
+ name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
+
+ down_write(&vdpa_dev_lock);
+ dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
+ if (!dev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "device not found");
+ err = -ENODEV;
+ goto dev_err;
+ }
+ vdev = container_of(dev, struct vdpa_device, dev);
+ if (!vdev->mdev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
+ err = -EINVAL;
+ goto mdev_err;
+ }
+ classes = vdpa_mgmtdev_get_classes(vdev->mdev, NULL);
+ if (classes & BIT_ULL(VIRTIO_ID_NET)) {
+ err = vdpa_dev_net_device_attr_set(vdev, info);
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack, "%s device not supported",
+ name);
+ }
+
+mdev_err:
+ put_device(dev);
+dev_err:
+ up_write(&vdpa_dev_lock);
+ return err;
+}
+
static int vdpa_dev_config_dump(struct device *dev, void *data)
{
struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
@@ -1497,6 +1571,11 @@ static const struct genl_ops vdpa_nl_ops[] = {
.doit = vdpa_nl_cmd_dev_stats_get_doit,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = VDPA_CMD_DEV_ATTR_SET,
+ .doit = vdpa_nl_cmd_dev_attr_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family vdpa_nl_family __ro_after_init = {
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index cfe962911804..6caf09a1907b 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -414,6 +414,24 @@ static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config)
net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
}
+static int vdpasim_net_set_attr(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev,
+ const struct vdpa_dev_set_config *config)
+{
+ struct vdpasim *vdpasim = container_of(dev, struct vdpasim, vdpa);
+ struct virtio_net_config *vio_config = vdpasim->config;
+
+ mutex_lock(&vdpasim->mutex);
+
+ if (config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
+ ether_addr_copy(vio_config->mac, config->net.mac);
+ mutex_unlock(&vdpasim->mutex);
+ return 0;
+ }
+
+ mutex_unlock(&vdpasim->mutex);
+ return -EOPNOTSUPP;
+}
+
static void vdpasim_net_setup_config(struct vdpasim *vdpasim,
const struct vdpa_dev_set_config *config)
{
@@ -510,7 +528,8 @@ static void vdpasim_net_dev_del(struct vdpa_mgmt_dev *mdev,
static const struct vdpa_mgmtdev_ops vdpasim_net_mgmtdev_ops = {
.dev_add = vdpasim_net_dev_add,
- .dev_del = vdpasim_net_dev_del
+ .dev_del = vdpasim_net_dev_del,
+ .dev_set_attr = vdpasim_net_set_attr
};
static struct virtio_device_id id_table[] = {
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 791d38d6284c..58116f89d8da 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -162,6 +162,7 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
enum dma_data_direction dir)
{
struct vduse_bounce_map *map;
+ struct page *page;
unsigned int offset;
void *addr;
size_t sz;
@@ -178,7 +179,10 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
map->orig_phys == INVALID_PHYS_ADDR))
return;
- addr = kmap_local_page(map->bounce_page);
+ page = domain->user_bounce_pages ?
+ map->user_bounce_page : map->bounce_page;
+
+ addr = kmap_local_page(page);
do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
kunmap_local(addr);
size -= sz;
@@ -270,9 +274,8 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
memcpy_to_page(pages[i], 0,
page_address(map->bounce_page),
PAGE_SIZE);
- __free_page(map->bounce_page);
}
- map->bounce_page = pages[i];
+ map->user_bounce_page = pages[i];
get_page(pages[i]);
}
domain->user_bounce_pages = true;
@@ -297,17 +300,17 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
struct page *page = NULL;
map = &domain->bounce_maps[i];
- if (WARN_ON(!map->bounce_page))
+ if (WARN_ON(!map->user_bounce_page))
continue;
/* Copy user page to kernel page if it's in use */
if (map->orig_phys != INVALID_PHYS_ADDR) {
- page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
+ page = map->bounce_page;
memcpy_from_page(page_address(page),
- map->bounce_page, 0, PAGE_SIZE);
+ map->user_bounce_page, 0, PAGE_SIZE);
}
- put_page(map->bounce_page);
- map->bounce_page = page;
+ put_page(map->user_bounce_page);
+ map->user_bounce_page = NULL;
}
domain->user_bounce_pages = false;
out:
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index f92f22a7267d..7f3f0928ec78 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -21,6 +21,7 @@
struct vduse_bounce_map {
struct page *bounce_page;
+ struct page *user_bounce_page;
u64 orig_phys;
};
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
index 82b2afa9b7e3..7e7988c4258f 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
@@ -108,10 +108,10 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
void *data)
{
struct fsl_mc_device *mc_dev = vdev->mc_dev;
- int ret, hwirq;
struct vfio_fsl_mc_irq *irq;
struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
+ int ret;
if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
return vfio_set_trigger(vdev, index, -1);
@@ -136,8 +136,6 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
return vfio_set_trigger(vdev, index, fd);
}
- hwirq = vdev->mc_dev->irqs[index]->virq;
-
irq = &vdev->mc_irqs[index];
if (flags & VFIO_IRQ_SET_DATA_NONE) {
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index ded364588d29..95b336de8a17 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -112,7 +112,7 @@ static int vfio_group_ioctl_set_container(struct vfio_group *group,
return -EFAULT;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
mutex_lock(&group->group_lock);
@@ -125,13 +125,13 @@ static int vfio_group_ioctl_set_container(struct vfio_group *group,
goto out_unlock;
}
- container = vfio_container_from_file(f.file);
+ container = vfio_container_from_file(fd_file(f));
if (container) {
ret = vfio_container_attach_group(container, group);
goto out_unlock;
}
- iommufd = iommufd_ctx_from_file(f.file);
+ iommufd = iommufd_ctx_from_file(fd_file(f));
if (!IS_ERR(iommufd)) {
if (IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
group->type == VFIO_NO_IOMMU)
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index 63a1316b08b7..5f61acd0fe42 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -10,9 +10,6 @@
#ifndef MDEV_PRIVATE_H
#define MDEV_PRIVATE_H
-int mdev_bus_register(void);
-void mdev_bus_unregister(void);
-
extern const struct bus_type mdev_bus_type;
extern const struct attribute_group *mdev_device_groups[];
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index 9d2738e10c0b..e44bb44c581e 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -160,7 +160,7 @@ static void mdev_type_release(struct kobject *kobj)
put_device(type->parent->dev);
}
-static struct kobj_type mdev_type_ktype = {
+static const struct kobj_type mdev_type_ktype = {
.sysfs_ops = &mdev_type_sysfs_ops,
.release = mdev_type_release,
.default_groups = mdev_type_groups,
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index ba0ce0075b2f..1ab58da9f38a 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -20,6 +20,7 @@
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/pci.h>
+#include <linux/pfn_t.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -57,11 +58,6 @@ struct vfio_pci_vf_token {
int users;
};
-struct vfio_pci_mmap_vma {
- struct vm_area_struct *vma;
- struct list_head vma_next;
-};
-
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
@@ -1328,7 +1324,7 @@ out:
static int
vfio_pci_ioctl_pci_hot_reset_groups(struct vfio_pci_core_device *vdev,
- int array_count, bool slot,
+ u32 array_count, bool slot,
struct vfio_pci_hot_reset __user *arg)
{
int32_t *group_fds;
@@ -1657,14 +1653,20 @@ static unsigned long vma_to_pfn(struct vm_area_struct *vma)
return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff;
}
-static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
+ unsigned int order)
{
struct vm_area_struct *vma = vmf->vma;
struct vfio_pci_core_device *vdev = vma->vm_private_data;
unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
- unsigned long addr = vma->vm_start;
vm_fault_t ret = VM_FAULT_SIGBUS;
+ if (order && (vmf->address & ((PAGE_SIZE << order) - 1) ||
+ vmf->address + (PAGE_SIZE << order) > vma->vm_end)) {
+ ret = VM_FAULT_FALLBACK;
+ goto out;
+ }
+
pfn = vma_to_pfn(vma);
down_read(&vdev->memory_lock);
@@ -1672,30 +1674,49 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
goto out_unlock;
- ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
- if (ret & VM_FAULT_ERROR)
- goto out_unlock;
-
- /*
- * Pre-fault the remainder of the vma, abort further insertions and
- * supress error if fault is encountered during pre-fault.
- */
- for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) {
- if (addr == vmf->address)
- continue;
-
- if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
- break;
+ switch (order) {
+ case 0:
+ ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
+ break;
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+ case PMD_ORDER:
+ ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn + pgoff,
+ PFN_DEV), false);
+ break;
+#endif
+#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+ case PUD_ORDER:
+ ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn + pgoff,
+ PFN_DEV), false);
+ break;
+#endif
+ default:
+ ret = VM_FAULT_FALLBACK;
}
out_unlock:
up_read(&vdev->memory_lock);
+out:
+ dev_dbg_ratelimited(&vdev->pdev->dev,
+ "%s(,order = %d) BAR %ld page offset 0x%lx: 0x%x\n",
+ __func__, order,
+ vma->vm_pgoff >>
+ (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT),
+ pgoff, (unsigned int)ret);
return ret;
}
+static vm_fault_t vfio_pci_mmap_page_fault(struct vm_fault *vmf)
+{
+ return vfio_pci_mmap_huge_fault(vmf, 0);
+}
+
static const struct vm_operations_struct vfio_pci_mmap_ops = {
- .fault = vfio_pci_mmap_fault,
+ .fault = vfio_pci_mmap_page_fault,
+#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP
+ .huge_fault = vfio_pci_mmap_huge_fault,
+#endif
};
int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0960699e7554..bf391b40e576 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -513,12 +513,10 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
{
- pte_t *ptep;
- pte_t pte;
- spinlock_t *ptl;
+ struct follow_pfnmap_args args = { .vma = vma, .address = vaddr };
int ret;
- ret = follow_pte(vma, vaddr, &ptep, &ptl);
+ ret = follow_pfnmap_start(&args);
if (ret) {
bool unlocked = false;
@@ -532,19 +530,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
if (ret)
return ret;
- ret = follow_pte(vma, vaddr, &ptep, &ptl);
+ ret = follow_pfnmap_start(&args);
if (ret)
return ret;
}
- pte = ptep_get(ptep);
-
- if (write_fault && !pte_write(pte))
+ if (write_fault && !args.writable)
ret = -EFAULT;
else
- *pfn = pte_pfn(pte);
+ *pfn = args.pfn;
- pte_unmap_unlock(ptep, ptl);
+ follow_pfnmap_end(&args);
return ret;
}
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
index 532269133801..d22881245e89 100644
--- a/drivers/vfio/virqfd.c
+++ b/drivers/vfio/virqfd.c
@@ -134,12 +134,12 @@ int vfio_virqfd_enable(void *opaque,
INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject);
irqfd = fdget(fd);
- if (!irqfd.file) {
+ if (!fd_file(irqfd)) {
ret = -EBADF;
goto err_fd;
}
- ctx = eventfd_ctx_fileget(irqfd.file);
+ ctx = eventfd_ctx_fileget(fd_file(irqfd));
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto err_ctx;
@@ -171,7 +171,7 @@ int vfio_virqfd_enable(void *opaque,
init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
- events = vfs_poll(irqfd.file, &virqfd->pt);
+ events = vfs_poll(fd_file(irqfd), &virqfd->pt);
/*
* Check if there was an event already pending on the eventfd
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 478cd46a49ed..5a49b5a6d496 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -209,11 +209,9 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
if (irq < 0)
return;
- irq_bypass_unregister_producer(&vq->call_ctx.producer);
if (!vq->call_ctx.ctx)
return;
- vq->call_ctx.producer.token = vq->call_ctx.ctx;
vq->call_ctx.producer.irq = irq;
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
if (unlikely(ret))
@@ -709,6 +707,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
vq->last_avail_idx = vq_state.split.avail_index;
}
break;
+ case VHOST_SET_VRING_CALL:
+ if (vq->call_ctx.ctx) {
+ if (ops->get_status(vdpa) &
+ VIRTIO_CONFIG_S_DRIVER_OK)
+ vhost_vdpa_unsetup_vq_irq(v, idx);
+ vq->call_ctx.producer.token = NULL;
+ }
+ break;
}
r = vhost_vring_ioctl(&v->vdev, cmd, argp);
@@ -747,13 +753,16 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
cb.callback = vhost_vdpa_virtqueue_cb;
cb.private = vq;
cb.trigger = vq->call_ctx.ctx;
+ vq->call_ctx.producer.token = vq->call_ctx.ctx;
+ if (ops->get_status(vdpa) &
+ VIRTIO_CONFIG_S_DRIVER_OK)
+ vhost_vdpa_setup_vq_irq(v, idx);
} else {
cb.callback = NULL;
cb.private = NULL;
cb.trigger = NULL;
}
ops->set_vq_cb(vdpa, idx, &cb);
- vhost_vdpa_setup_vq_irq(v, idx);
break;
case VHOST_SET_VRING_NUM:
@@ -1419,6 +1428,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
for (i = 0; i < nvqs; i++) {
vqs[i] = &v->vqs[i];
vqs[i]->handle_kick = handle_vq_kick;
+ vqs[i]->call_ctx.ctx = NULL;
}
vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
vhost_vdpa_process_iotlb_msg);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bf664ec9341b..802153e23073 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -244,7 +244,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
restart_tx = true;
}
- consume_skb(skb);
+ virtio_transport_consume_skb_sent(skb, true);
}
} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
if (added)
@@ -451,6 +451,8 @@ static struct virtio_transport vhost_transport = {
.notify_buffer_size = virtio_transport_notify_buffer_size,
.notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,
+ .unsent_bytes = virtio_transport_unsent_bytes,
+
.read_skb = virtio_transport_read_skb,
},
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index dd0874f8c7ff..4175a4603071 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -166,6 +166,7 @@ static const struct lcd_ops l4f_ops = {
static int l4f00242t03_probe(struct spi_device *spi)
{
struct l4f00242t03_priv *priv;
+ int ret;
priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv),
GFP_KERNEL);
@@ -174,7 +175,9 @@ static int l4f00242t03_probe(struct spi_device *spi)
spi_set_drvdata(spi, priv);
spi->bits_per_word = 9;
- spi_setup(spi);
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret, "Unable to setup spi.\n");
priv->spi = spi;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 3f7333dca508..2e093535884b 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -64,6 +64,8 @@
#include <linux/console.h>
#include <linux/string.h>
#include <linux/kd.h>
+#include <linux/panic.h>
+#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/fbcon.h>
@@ -270,12 +272,24 @@ static int fbcon_get_rotate(struct fb_info *info)
return (ops) ? ops->rotate : 0;
}
+static bool fbcon_skip_panic(struct fb_info *info)
+{
+/* panic_cpu is not exported, and can't be used if built as module. Use
+ * oops_in_progress instead, but non-fatal oops won't be printed.
+ */
+#if defined(MODULE)
+ return (info->skip_panic && unlikely(oops_in_progress));
+#else
+ return (info->skip_panic && unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID));
+#endif
+}
+
static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
return (info->state != FBINFO_STATE_RUNNING ||
- vc->vc_mode != KD_TEXT || ops->graphics);
+ vc->vc_mode != KD_TEXT || ops->graphics || fbcon_skip_panic(info));
}
static int get_color(struct vc_data *vc, struct fb_info *info,
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 4c4ad0a86a50..3c568cff2913 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -544,6 +544,36 @@ unregister_framebuffer(struct fb_info *fb_info)
}
EXPORT_SYMBOL(unregister_framebuffer);
+static void devm_unregister_framebuffer(void *data)
+{
+ struct fb_info *info = data;
+
+ unregister_framebuffer(info);
+}
+
+/**
+ * devm_register_framebuffer - resource-managed frame buffer device registration
+ * @dev: device the framebuffer belongs to
+ * @fb_info: frame buffer info structure
+ *
+ * Registers a frame buffer device @fb_info to device @dev.
+ *
+ * Returns negative errno on error, or zero for success.
+ *
+ */
+int
+devm_register_framebuffer(struct device *dev, struct fb_info *fb_info)
+{
+ int ret;
+
+ ret = register_framebuffer(fb_info);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_unregister_framebuffer, fb_info);
+}
+EXPORT_SYMBOL(devm_register_framebuffer);
+
/**
* fb_set_suspend - low level driver signals suspend
* @info: framebuffer affected
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 8dd82afb3452..20517448487e 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -322,7 +322,7 @@ static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- struct screen_info *si = dev_get_platdata(dev); \
+ struct screen_info *si = dev_get_drvdata(dev); \
if (!si) \
return -ENODEV; \
return sprintf(buf, fmt "\n", (si->lfb_##name)); \
@@ -369,6 +369,8 @@ static int efifb_probe(struct platform_device *dev)
if (!si)
return -ENOMEM;
+ dev_set_drvdata(&dev->dev, si);
+
if (si->orig_video_isVGA != VIDEO_TYPE_EFI)
return -ENODEV;
@@ -449,7 +451,6 @@ static int efifb_probe(struct platform_device *dev)
err = -ENOMEM;
goto err_release_mem;
}
- platform_set_drvdata(dev, info);
par = info->par;
info->pseudo_palette = par->pseudo_palette;
@@ -561,15 +562,10 @@ static int efifb_probe(struct platform_device *dev)
break;
}
- err = sysfs_create_groups(&dev->dev.kobj, efifb_groups);
- if (err) {
- pr_err("efifb: cannot add sysfs attrs\n");
- goto err_unmap;
- }
err = fb_alloc_cmap(&info->cmap, 256, 0);
if (err < 0) {
pr_err("efifb: cannot allocate colormap\n");
- goto err_groups;
+ goto err_unmap;
}
err = devm_aperture_acquire_for_platform_device(dev, par->base, par->size);
@@ -577,7 +573,7 @@ static int efifb_probe(struct platform_device *dev)
pr_err("efifb: cannot acquire aperture\n");
goto err_fb_dealloc_cmap;
}
- err = register_framebuffer(info);
+ err = devm_register_framebuffer(&dev->dev, info);
if (err < 0) {
pr_err("efifb: cannot register framebuffer\n");
goto err_fb_dealloc_cmap;
@@ -587,8 +583,6 @@ static int efifb_probe(struct platform_device *dev)
err_fb_dealloc_cmap:
fb_dealloc_cmap(&info->cmap);
-err_groups:
- sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
err_unmap:
if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
iounmap(info->screen_base);
@@ -602,21 +596,12 @@ err_release_mem:
return err;
}
-static void efifb_remove(struct platform_device *pdev)
-{
- struct fb_info *info = platform_get_drvdata(pdev);
-
- /* efifb_destroy takes care of info cleanup */
- unregister_framebuffer(info);
- sysfs_remove_groups(&pdev->dev.kobj, efifb_groups);
-}
-
static struct platform_driver efifb_driver = {
.driver = {
.name = "efi-framebuffer",
+ .dev_groups = efifb_groups,
},
.probe = efifb_probe,
- .remove_new = efifb_remove,
};
builtin_platform_driver(efifb_driver);
diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c
index 66fac8e5393e..a1144b150982 100644
--- a/drivers/video/fbdev/hpfb.c
+++ b/drivers/video/fbdev/hpfb.c
@@ -345,6 +345,7 @@ static int hpfb_dio_probe(struct dio_dev *d, const struct dio_device_id *ent)
if (hpfb_init_one(paddr, vaddr)) {
if (d->scode >= DIOII_SCBASE)
iounmap((void *)vaddr);
+ release_mem_region(d->resource.start, resource_size(&d->resource));
return -ENOMEM;
}
return 0;
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 8fdccf033b2d..7fdb5edd7e2e 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -1189,7 +1189,7 @@ static int hvfb_probe(struct hv_device *hdev,
* which is almost at the end of list, with priority = INT_MIN + 1.
*/
par->hvfb_panic_nb.notifier_call = hvfb_on_panic;
- par->hvfb_panic_nb.priority = INT_MIN + 10,
+ par->hvfb_panic_nb.priority = INT_MIN + 10;
atomic_notifier_chain_register(&panic_notifier_list,
&par->hvfb_panic_nb);
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 660499260f46..dc4e659e06af 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -995,7 +995,7 @@ imsttfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
bgc |= (bgc << 8);
bgc |= (bgc << 16);
- Bpp = info->var.bits_per_pixel >> 3,
+ Bpp = info->var.bits_per_pixel >> 3;
line_pitch = info->fix.line_length;
dy = rect->dy * line_pitch;
@@ -1036,7 +1036,7 @@ imsttfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
__u32 Bpp, line_pitch, fb_offset_old, fb_offset_new, sp, dp_octl;
__u32 cnt, bltctl, sx, sy, dx, dy, height, width;
- Bpp = info->var.bits_per_pixel >> 3,
+ Bpp = info->var.bits_per_pixel >> 3;
sx = area->sx * Bpp;
sy = area->sy;
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index a20a2c408127..03e23173198c 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -512,16 +512,13 @@ static int mmphw_probe(struct platform_device *pdev)
}
/* get clock */
- ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
+ ctrl->clk = devm_clk_get_enabled(ctrl->dev, mi->clk_name);
if (IS_ERR(ctrl->clk)) {
ret = PTR_ERR(ctrl->clk);
dev_err_probe(ctrl->dev, ret,
"unable to get clk %s\n", mi->clk_name);
goto failed;
}
- ret = clk_prepare_enable(ctrl->clk);
- if (ret)
- goto failed;
/* init global regs */
ctrl_set_default(ctrl);
@@ -556,7 +553,6 @@ failed_path_init:
path_deinit(path_plat);
}
- clk_disable_unprepare(ctrl->clk);
failed:
dev_err(&pdev->dev, "device init failed\n");
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index aa31c0d26e92..e12c6019a4d6 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1241,14 +1241,13 @@ static ssize_t omapfb_show_caps_num(struct device *dev,
{
struct omapfb_device *fbdev = dev_get_drvdata(dev);
int plane;
- size_t size;
+ size_t size = 0;
struct omapfb_caps caps;
plane = 0;
- size = 0;
- while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
+ while (plane < OMAPFB_PLANE_NUM) {
omapfb_get_caps(fbdev, plane, &caps);
- size += scnprintf(&buf[size], PAGE_SIZE - size,
+ size += sysfs_emit_at(buf, size,
"plane#%d %#010x %#010x %#010x\n",
plane, caps.ctrl, caps.plane_color, caps.wnd_color);
plane++;
@@ -1263,34 +1262,27 @@ static ssize_t omapfb_show_caps_text(struct device *dev,
int i;
struct omapfb_caps caps;
int plane;
- size_t size;
+ size_t size = 0;
plane = 0;
- size = 0;
- while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
+ while (plane < OMAPFB_PLANE_NUM) {
omapfb_get_caps(fbdev, plane, &caps);
- size += scnprintf(&buf[size], PAGE_SIZE - size,
- "plane#%d:\n", plane);
- for (i = 0; i < ARRAY_SIZE(ctrl_caps) &&
- size < PAGE_SIZE; i++) {
+ size += sysfs_emit_at(buf, size, "plane#%d:\n", plane);
+ for (i = 0; i < ARRAY_SIZE(ctrl_caps); i++) {
if (ctrl_caps[i].flag & caps.ctrl)
- size += scnprintf(&buf[size], PAGE_SIZE - size,
+ size += sysfs_emit_at(buf, size,
" %s\n", ctrl_caps[i].name);
}
- size += scnprintf(&buf[size], PAGE_SIZE - size,
- " plane colors:\n");
- for (i = 0; i < ARRAY_SIZE(color_caps) &&
- size < PAGE_SIZE; i++) {
+ size += sysfs_emit_at(buf, size, " plane colors:\n");
+ for (i = 0; i < ARRAY_SIZE(color_caps); i++) {
if (color_caps[i].flag & caps.plane_color)
- size += scnprintf(&buf[size], PAGE_SIZE - size,
+ size += sysfs_emit_at(buf, size,
" %s\n", color_caps[i].name);
}
- size += scnprintf(&buf[size], PAGE_SIZE - size,
- " window colors:\n");
- for (i = 0; i < ARRAY_SIZE(color_caps) &&
- size < PAGE_SIZE; i++) {
+ size += sysfs_emit_at(buf, size, " window colors:\n");
+ for (i = 0; i < ARRAY_SIZE(color_caps); i++) {
if (color_caps[i].flag & caps.wnd_color)
- size += scnprintf(&buf[size], PAGE_SIZE - size,
+ size += sysfs_emit_at(buf, size,
" %s\n", color_caps[i].name);
}
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
index 71d2e015960c..fc975615d5c9 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
@@ -466,19 +466,20 @@ static ssize_t show_cabc_available_modes(struct device *dev,
char *buf)
{
struct panel_drv_data *ddata = dev_get_drvdata(dev);
- int len;
+ int len = 0;
int i;
if (!ddata->has_cabc)
return sysfs_emit(buf, "%s\n", cabc_modes[0]);
- for (i = 0, len = 0;
- len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
- len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
- i ? " " : "", cabc_modes[i],
- i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
+ for (i = 0; i < ARRAY_SIZE(cabc_modes); i++)
+ len += sysfs_emit_at(buf, len, "%s ", cabc_modes[i]);
+
+ /* Remove the trailing space */
+ if (len)
+ buf[len - 1] = '\n';
- return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
+ return len;
}
static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
index 9a7253355f6d..cdb1dedca492 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
@@ -351,7 +351,7 @@ struct omap_hdmi {
bool audio_configured;
struct omap_dss_audio audio_config;
- /* This lock should be taken when booleans bellow are touched. */
+ /* This lock should be taken when booleans below are touched. */
spinlock_t audio_playing_lock;
bool audio_playing;
bool display_enabled;
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 43c80316d84b..489088b4e467 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -594,8 +594,8 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
* container_of(). This isn't really necessary as we have a fixed minor
* number anyway, but this is to avoid statics. */
- priv->misc_dev.minor = PXA3XX_GCU_MINOR,
- priv->misc_dev.name = DRV_NAME,
+ priv->misc_dev.minor = PXA3XX_GCU_MINOR;
+ priv->misc_dev.name = DRV_NAME;
priv->misc_dev.fops = &pxa3xx_gcu_miscdev_fops;
/* handle IO resources */
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 2ef56fa28aff..5ce02495cda6 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2403,6 +2403,7 @@ static void pxafb_remove(struct platform_device *dev)
info = &fbi->fb;
pxafb_overlay_exit(fbi);
+ cancel_work_sync(&fbi->task);
unregister_framebuffer(info);
pxafb_disable_controller(fbi);
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 66d4628a96ae..c90f48ebb15e 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -407,6 +407,7 @@ static int xenfb_probe(struct xenbus_device *dev,
/* complete the abuse: */
fb_info->pseudo_palette = fb_info->par;
fb_info->par = info;
+ fb_info->device = &dev->dev;
fb_info->screen_buffer = info->fb;
diff --git a/drivers/virt/acrn/irqfd.c b/drivers/virt/acrn/irqfd.c
index d4ad211dce7a..9994d818bb7e 100644
--- a/drivers/virt/acrn/irqfd.c
+++ b/drivers/virt/acrn/irqfd.c
@@ -125,12 +125,12 @@ static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
INIT_WORK(&irqfd->shutdown, hsm_irqfd_shutdown_work);
f = fdget(args->fd);
- if (!f.file) {
+ if (!fd_file(f)) {
ret = -EBADF;
goto out;
}
- eventfd = eventfd_ctx_fileget(f.file);
+ eventfd = eventfd_ctx_fileget(fd_file(f));
if (IS_ERR(eventfd)) {
ret = PTR_ERR(eventfd);
goto fail;
@@ -157,7 +157,7 @@ static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
mutex_unlock(&vm->irqfds_lock);
/* Check the pending event in this stage */
- events = vfs_poll(f.file, &irqfd->pt);
+ events = vfs_poll(fd_file(f), &irqfd->pt);
if (events & EPOLLIN)
acrn_irqfd_inject(irqfd);
diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
index db8ff1d0ac23..4c2f28715b70 100644
--- a/drivers/virt/acrn/mm.c
+++ b/drivers/virt/acrn/mm.c
@@ -177,9 +177,7 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
vma = vma_lookup(current->mm, memmap->vma_base);
if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
unsigned long start_pfn, cur_pfn;
- spinlock_t *ptl;
bool writable;
- pte_t *ptep;
if ((memmap->vma_base + memmap->len) > vma->vm_end) {
mmap_read_unlock(current->mm);
@@ -187,16 +185,20 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
}
for (i = 0; i < nr_pages; i++) {
- ret = follow_pte(vma, memmap->vma_base + i * PAGE_SIZE,
- &ptep, &ptl);
+ struct follow_pfnmap_args args = {
+ .vma = vma,
+ .address = memmap->vma_base + i * PAGE_SIZE,
+ };
+
+ ret = follow_pfnmap_start(&args);
if (ret)
break;
- cur_pfn = pte_pfn(ptep_get(ptep));
+ cur_pfn = args.pfn;
if (i == 0)
start_pfn = cur_pfn;
- writable = !!pte_write(ptep_get(ptep));
- pte_unmap_unlock(ptep, ptl);
+ writable = args.writable;
+ follow_pfnmap_end(&args);
/* Disallow write access if the PTE is not writable. */
if (!writable &&
diff --git a/drivers/virt/coco/Kconfig b/drivers/virt/coco/Kconfig
index 87d142c1f932..d9ff676bf48d 100644
--- a/drivers/virt/coco/Kconfig
+++ b/drivers/virt/coco/Kconfig
@@ -9,6 +9,8 @@ config TSM_REPORTS
source "drivers/virt/coco/efi_secret/Kconfig"
+source "drivers/virt/coco/pkvm-guest/Kconfig"
+
source "drivers/virt/coco/sev-guest/Kconfig"
source "drivers/virt/coco/tdx-guest/Kconfig"
diff --git a/drivers/virt/coco/Makefile b/drivers/virt/coco/Makefile
index 18c1aba5edb7..b69c30c1c720 100644
--- a/drivers/virt/coco/Makefile
+++ b/drivers/virt/coco/Makefile
@@ -4,5 +4,6 @@
#
obj-$(CONFIG_TSM_REPORTS) += tsm.o
obj-$(CONFIG_EFI_SECRET) += efi_secret/
+obj-$(CONFIG_ARM_PKVM_GUEST) += pkvm-guest/
obj-$(CONFIG_SEV_GUEST) += sev-guest/
obj-$(CONFIG_INTEL_TDX_GUEST) += tdx-guest/
diff --git a/drivers/virt/coco/pkvm-guest/Kconfig b/drivers/virt/coco/pkvm-guest/Kconfig
new file mode 100644
index 000000000000..d2f344f1f98f
--- /dev/null
+++ b/drivers/virt/coco/pkvm-guest/Kconfig
@@ -0,0 +1,10 @@
+config ARM_PKVM_GUEST
+ bool "Arm pKVM protected guest driver"
+ depends on ARM64
+ help
+ Protected guests running under the pKVM hypervisor on arm64
+ are isolated from the host and must issue hypercalls to enable
+ interaction with virtual devices. This driver implements
+ support for probing and issuing these hypercalls.
+
+ If unsure, say 'N'.
diff --git a/drivers/virt/coco/pkvm-guest/Makefile b/drivers/virt/coco/pkvm-guest/Makefile
new file mode 100644
index 000000000000..4bee24579423
--- /dev/null
+++ b/drivers/virt/coco/pkvm-guest/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ARM_PKVM_GUEST) += arm-pkvm-guest.o
diff --git a/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c b/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c
new file mode 100644
index 000000000000..56a3859dda8a
--- /dev/null
+++ b/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Support for the hypercall interface exposed to protected guests by
+ * pKVM.
+ *
+ * Author: Will Deacon <will@kernel.org>
+ * Copyright (C) 2024 Google LLC
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/array_size.h>
+#include <linux/io.h>
+#include <linux/mem_encrypt.h>
+#include <linux/mm.h>
+#include <linux/pgtable.h>
+
+#include <asm/hypervisor.h>
+
+static size_t pkvm_granule;
+
+static int arm_smccc_do_one_page(u32 func_id, phys_addr_t phys)
+{
+ phys_addr_t end = phys + PAGE_SIZE;
+
+ while (phys < end) {
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(func_id, phys, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return -EPERM;
+
+ phys += pkvm_granule;
+ }
+
+ return 0;
+}
+
+static int __set_memory_range(u32 func_id, unsigned long start, int numpages)
+{
+ void *addr = (void *)start, *end = addr + numpages * PAGE_SIZE;
+
+ while (addr < end) {
+ int err;
+
+ err = arm_smccc_do_one_page(func_id, virt_to_phys(addr));
+ if (err)
+ return err;
+
+ addr += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static int pkvm_set_memory_encrypted(unsigned long addr, int numpages)
+{
+ return __set_memory_range(ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID,
+ addr, numpages);
+}
+
+static int pkvm_set_memory_decrypted(unsigned long addr, int numpages)
+{
+ return __set_memory_range(ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID,
+ addr, numpages);
+}
+
+static const struct arm64_mem_crypt_ops pkvm_crypt_ops = {
+ .encrypt = pkvm_set_memory_encrypted,
+ .decrypt = pkvm_set_memory_decrypted,
+};
+
+static int mmio_guard_ioremap_hook(phys_addr_t phys, size_t size,
+ pgprot_t *prot)
+{
+ phys_addr_t end;
+ pteval_t protval = pgprot_val(*prot);
+
+ /*
+ * We only expect MMIO emulation for regions mapped with device
+ * attributes.
+ */
+ if (protval != PROT_DEVICE_nGnRE && protval != PROT_DEVICE_nGnRnE)
+ return 0;
+
+ phys = PAGE_ALIGN_DOWN(phys);
+ end = phys + PAGE_ALIGN(size);
+
+ while (phys < end) {
+ const int func_id = ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_FUNC_ID;
+ int err;
+
+ err = arm_smccc_do_one_page(func_id, phys);
+ if (err)
+ return err;
+
+ phys += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+void pkvm_init_hyp_services(void)
+{
+ int i;
+ struct arm_smccc_res res;
+ const u32 funcs[] = {
+ ARM_SMCCC_KVM_FUNC_HYP_MEMINFO,
+ ARM_SMCCC_KVM_FUNC_MEM_SHARE,
+ ARM_SMCCC_KVM_FUNC_MEM_UNSHARE,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(funcs); ++i) {
+ if (!kvm_arm_hyp_service_available(funcs[i]))
+ return;
+ }
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID,
+ 0, 0, 0, &res);
+ if (res.a0 > PAGE_SIZE) /* Includes error codes */
+ return;
+
+ pkvm_granule = res.a0;
+ arm64_mem_crypt_ops_register(&pkvm_crypt_ops);
+
+ if (kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MMIO_GUARD))
+ arm64_ioremap_prot_hook_register(&mmio_guard_ioremap_hook);
+}
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 6fc7884ea0a1..89754b019be2 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -114,7 +114,7 @@ static bool is_vmpck_empty(struct snp_guest_dev *snp_dev)
*/
static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
{
- dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n",
+ dev_alert(snp_dev->dev, "Disabling VMPCK%d communication key to prevent IV reuse.\n",
vmpck_id);
memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN);
snp_dev->vmpck = NULL;
@@ -291,44 +291,45 @@ static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz)
{
struct snp_guest_crypto *crypto = snp_dev->crypto;
- struct snp_guest_msg *resp = &snp_dev->secret_response;
- struct snp_guest_msg *req = &snp_dev->secret_request;
- struct snp_guest_msg_hdr *req_hdr = &req->hdr;
- struct snp_guest_msg_hdr *resp_hdr = &resp->hdr;
+ struct snp_guest_msg *resp_msg = &snp_dev->secret_response;
+ struct snp_guest_msg *req_msg = &snp_dev->secret_request;
+ struct snp_guest_msg_hdr *req_msg_hdr = &req_msg->hdr;
+ struct snp_guest_msg_hdr *resp_msg_hdr = &resp_msg->hdr;
- dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n",
- resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz);
+ pr_debug("response [seqno %lld type %d version %d sz %d]\n",
+ resp_msg_hdr->msg_seqno, resp_msg_hdr->msg_type, resp_msg_hdr->msg_version,
+ resp_msg_hdr->msg_sz);
/* Copy response from shared memory to encrypted memory. */
- memcpy(resp, snp_dev->response, sizeof(*resp));
+ memcpy(resp_msg, snp_dev->response, sizeof(*resp_msg));
/* Verify that the sequence counter is incremented by 1 */
- if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1)))
+ if (unlikely(resp_msg_hdr->msg_seqno != (req_msg_hdr->msg_seqno + 1)))
return -EBADMSG;
/* Verify response message type and version number. */
- if (resp_hdr->msg_type != (req_hdr->msg_type + 1) ||
- resp_hdr->msg_version != req_hdr->msg_version)
+ if (resp_msg_hdr->msg_type != (req_msg_hdr->msg_type + 1) ||
+ resp_msg_hdr->msg_version != req_msg_hdr->msg_version)
return -EBADMSG;
/*
* If the message size is greater than our buffer length then return
* an error.
*/
- if (unlikely((resp_hdr->msg_sz + crypto->a_len) > sz))
+ if (unlikely((resp_msg_hdr->msg_sz + crypto->a_len) > sz))
return -EBADMSG;
/* Decrypt the payload */
- return dec_payload(snp_dev, resp, payload, resp_hdr->msg_sz + crypto->a_len);
+ return dec_payload(snp_dev, resp_msg, payload, resp_msg_hdr->msg_sz + crypto->a_len);
}
static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type,
void *payload, size_t sz)
{
- struct snp_guest_msg *req = &snp_dev->secret_request;
- struct snp_guest_msg_hdr *hdr = &req->hdr;
+ struct snp_guest_msg *msg = &snp_dev->secret_request;
+ struct snp_guest_msg_hdr *hdr = &msg->hdr;
- memset(req, 0, sizeof(*req));
+ memset(msg, 0, sizeof(*msg));
hdr->algo = SNP_AEAD_AES_256_GCM;
hdr->hdr_version = MSG_HDR_VER;
@@ -343,10 +344,10 @@ static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8
if (!hdr->msg_seqno)
return -ENOSR;
- dev_dbg(snp_dev->dev, "request [seqno %lld type %d version %d sz %d]\n",
- hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
+ pr_debug("request [seqno %lld type %d version %d sz %d]\n",
+ hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
- return __enc_payload(snp_dev, req, payload, sz);
+ return __enc_payload(snp_dev, msg, payload, sz);
}
static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
@@ -495,8 +496,8 @@ struct snp_req_resp {
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
struct snp_guest_crypto *crypto = snp_dev->crypto;
- struct snp_report_req *req = &snp_dev->req.report;
- struct snp_report_resp *resp;
+ struct snp_report_req *report_req = &snp_dev->req.report;
+ struct snp_report_resp *report_resp;
int rc, resp_len;
lockdep_assert_held(&snp_cmd_mutex);
@@ -504,7 +505,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
if (!arg->req_data || !arg->resp_data)
return -EINVAL;
- if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req)))
return -EFAULT;
/*
@@ -512,30 +513,29 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
* response payload. Make sure that it has enough space to cover the
* authtag.
*/
- resp_len = sizeof(resp->data) + crypto->a_len;
- resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
- if (!resp)
+ resp_len = sizeof(report_resp->data) + crypto->a_len;
+ report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
+ if (!report_resp)
return -ENOMEM;
- rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
- SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data,
- resp_len);
+ rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg, SNP_MSG_REPORT_REQ,
+ report_req, sizeof(*report_req), report_resp->data, resp_len);
if (rc)
goto e_free;
- if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
+ if (copy_to_user((void __user *)arg->resp_data, report_resp, sizeof(*report_resp)))
rc = -EFAULT;
e_free:
- kfree(resp);
+ kfree(report_resp);
return rc;
}
static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
- struct snp_derived_key_req *req = &snp_dev->req.derived_key;
+ struct snp_derived_key_req *derived_key_req = &snp_dev->req.derived_key;
struct snp_guest_crypto *crypto = snp_dev->crypto;
- struct snp_derived_key_resp resp = {0};
+ struct snp_derived_key_resp derived_key_resp = {0};
int rc, resp_len;
/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
u8 buf[64 + 16];
@@ -550,25 +550,27 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
* response payload. Make sure that it has enough space to cover the
* authtag.
*/
- resp_len = sizeof(resp.data) + crypto->a_len;
+ resp_len = sizeof(derived_key_resp.data) + crypto->a_len;
if (sizeof(buf) < resp_len)
return -ENOMEM;
- if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ if (copy_from_user(derived_key_req, (void __user *)arg->req_data,
+ sizeof(*derived_key_req)))
return -EFAULT;
- rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
- SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len);
+ rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg, SNP_MSG_KEY_REQ,
+ derived_key_req, sizeof(*derived_key_req), buf, resp_len);
if (rc)
return rc;
- memcpy(resp.data, buf, sizeof(resp.data));
- if (copy_to_user((void __user *)arg->resp_data, &resp, sizeof(resp)))
+ memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data));
+ if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp,
+ sizeof(derived_key_resp)))
rc = -EFAULT;
/* The response buffer contains the sensitive data, explicitly clear it. */
memzero_explicit(buf, sizeof(buf));
- memzero_explicit(&resp, sizeof(resp));
+ memzero_explicit(&derived_key_resp, sizeof(derived_key_resp));
return rc;
}
@@ -576,9 +578,9 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
struct snp_req_resp *io)
{
- struct snp_ext_report_req *req = &snp_dev->req.ext_report;
+ struct snp_ext_report_req *report_req = &snp_dev->req.ext_report;
struct snp_guest_crypto *crypto = snp_dev->crypto;
- struct snp_report_resp *resp;
+ struct snp_report_resp *report_resp;
int ret, npages = 0, resp_len;
sockptr_t certs_address;
@@ -587,22 +589,22 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
return -EINVAL;
- if (copy_from_sockptr(req, io->req_data, sizeof(*req)))
+ if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req)))
return -EFAULT;
/* caller does not want certificate data */
- if (!req->certs_len || !req->certs_address)
+ if (!report_req->certs_len || !report_req->certs_address)
goto cmd;
- if (req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
- !IS_ALIGNED(req->certs_len, PAGE_SIZE))
+ if (report_req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
+ !IS_ALIGNED(report_req->certs_len, PAGE_SIZE))
return -EINVAL;
if (sockptr_is_kernel(io->resp_data)) {
- certs_address = KERNEL_SOCKPTR((void *)req->certs_address);
+ certs_address = KERNEL_SOCKPTR((void *)report_req->certs_address);
} else {
- certs_address = USER_SOCKPTR((void __user *)req->certs_address);
- if (!access_ok(certs_address.user, req->certs_len))
+ certs_address = USER_SOCKPTR((void __user *)report_req->certs_address);
+ if (!access_ok(certs_address.user, report_req->certs_len))
return -EFAULT;
}
@@ -612,45 +614,45 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
* the host. If host does not supply any certs in it, then copy
* zeros to indicate that certificate data was not provided.
*/
- memset(snp_dev->certs_data, 0, req->certs_len);
- npages = req->certs_len >> PAGE_SHIFT;
+ memset(snp_dev->certs_data, 0, report_req->certs_len);
+ npages = report_req->certs_len >> PAGE_SHIFT;
cmd:
/*
* The intermediate response buffer is used while decrypting the
* response payload. Make sure that it has enough space to cover the
* authtag.
*/
- resp_len = sizeof(resp->data) + crypto->a_len;
- resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
- if (!resp)
+ resp_len = sizeof(report_resp->data) + crypto->a_len;
+ report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
+ if (!report_resp)
return -ENOMEM;
snp_dev->input.data_npages = npages;
- ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg,
- SNP_MSG_REPORT_REQ, &req->data,
- sizeof(req->data), resp->data, resp_len);
+ ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg, SNP_MSG_REPORT_REQ,
+ &report_req->data, sizeof(report_req->data),
+ report_resp->data, resp_len);
/* If certs length is invalid then copy the returned length */
if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
- req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
+ report_req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
- if (copy_to_sockptr(io->req_data, req, sizeof(*req)))
+ if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req)))
ret = -EFAULT;
}
if (ret)
goto e_free;
- if (npages && copy_to_sockptr(certs_address, snp_dev->certs_data, req->certs_len)) {
+ if (npages && copy_to_sockptr(certs_address, snp_dev->certs_data, report_req->certs_len)) {
ret = -EFAULT;
goto e_free;
}
- if (copy_to_sockptr(io->resp_data, resp, sizeof(*resp)))
+ if (copy_to_sockptr(io->resp_data, report_resp, sizeof(*report_resp)))
ret = -EFAULT;
e_free:
- kfree(resp);
+ kfree(report_resp);
return ret;
}
@@ -1090,6 +1092,8 @@ static int __init sev_guest_probe(struct platform_device *pdev)
void __iomem *mapping;
int ret;
+ BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE);
+
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return -ENODEV;
@@ -1115,13 +1119,13 @@ static int __init sev_guest_probe(struct platform_device *pdev)
ret = -EINVAL;
snp_dev->vmpck = get_vmpck(vmpck_id, secrets, &snp_dev->os_area_msg_seqno);
if (!snp_dev->vmpck) {
- dev_err(dev, "invalid vmpck id %d\n", vmpck_id);
+ dev_err(dev, "Invalid VMPCK%d communication key\n", vmpck_id);
goto e_unmap;
}
/* Verify that VMPCK is not zero. */
if (is_vmpck_empty(snp_dev)) {
- dev_err(dev, "vmpck id %d is null\n", vmpck_id);
+ dev_err(dev, "Empty VMPCK%d communication key\n", vmpck_id);
goto e_unmap;
}
@@ -1172,7 +1176,7 @@ static int __init sev_guest_probe(struct platform_device *pdev)
if (ret)
goto e_free_cert_data;
- dev_info(dev, "Initialized SEV guest driver (using vmpck_id %d)\n", vmpck_id);
+ dev_info(dev, "Initialized SEV guest driver (using VMPCK%d communication key)\n", vmpck_id);
return 0;
e_free_cert_data:
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index bc1f962e483b..b9095751e43b 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -127,10 +127,12 @@ static void __virtio_config_changed(struct virtio_device *dev)
{
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- if (!dev->config_enabled)
+ if (!dev->config_core_enabled || dev->config_driver_disabled)
dev->config_change_pending = true;
- else if (drv && drv->config_changed)
+ else if (drv && drv->config_changed) {
drv->config_changed(dev);
+ dev->config_change_pending = false;
+ }
}
void virtio_config_changed(struct virtio_device *dev)
@@ -143,20 +145,51 @@ void virtio_config_changed(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(virtio_config_changed);
-static void virtio_config_disable(struct virtio_device *dev)
+/**
+ * virtio_config_driver_disable - disable config change reporting by drivers
+ * @dev: the device to reset
+ *
+ * This is only allowed to be called by a driver and disabling can't
+ * be nested.
+ */
+void virtio_config_driver_disable(struct virtio_device *dev)
{
spin_lock_irq(&dev->config_lock);
- dev->config_enabled = false;
+ dev->config_driver_disabled = true;
spin_unlock_irq(&dev->config_lock);
}
+EXPORT_SYMBOL_GPL(virtio_config_driver_disable);
-static void virtio_config_enable(struct virtio_device *dev)
+/**
+ * virtio_config_driver_enable - enable config change reporting by drivers
+ * @dev: the device to reset
+ *
+ * This is only allowed to be called by a driver and enabling can't
+ * be nested.
+ */
+void virtio_config_driver_enable(struct virtio_device *dev)
{
spin_lock_irq(&dev->config_lock);
- dev->config_enabled = true;
+ dev->config_driver_disabled = false;
+ if (dev->config_change_pending)
+ __virtio_config_changed(dev);
+ spin_unlock_irq(&dev->config_lock);
+}
+EXPORT_SYMBOL_GPL(virtio_config_driver_enable);
+
+static void virtio_config_core_disable(struct virtio_device *dev)
+{
+ spin_lock_irq(&dev->config_lock);
+ dev->config_core_enabled = false;
+ spin_unlock_irq(&dev->config_lock);
+}
+
+static void virtio_config_core_enable(struct virtio_device *dev)
+{
+ spin_lock_irq(&dev->config_lock);
+ dev->config_core_enabled = true;
if (dev->config_change_pending)
__virtio_config_changed(dev);
- dev->config_change_pending = false;
spin_unlock_irq(&dev->config_lock);
}
@@ -316,7 +349,7 @@ static int virtio_dev_probe(struct device *_d)
if (drv->scan)
drv->scan(dev);
- virtio_config_enable(dev);
+ virtio_config_core_enable(dev);
return 0;
@@ -331,7 +364,7 @@ static void virtio_dev_remove(struct device *_d)
struct virtio_device *dev = dev_to_virtio(_d);
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- virtio_config_disable(dev);
+ virtio_config_core_disable(dev);
drv->remove(dev);
@@ -443,7 +476,7 @@ int register_virtio_device(struct virtio_device *dev)
goto out_ida_remove;
spin_lock_init(&dev->config_lock);
- dev->config_enabled = false;
+ dev->config_core_enabled = false;
dev->config_change_pending = false;
INIT_LIST_HEAD(&dev->vqs);
@@ -500,14 +533,14 @@ int virtio_device_freeze(struct virtio_device *dev)
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
int ret;
- virtio_config_disable(dev);
+ virtio_config_core_disable(dev);
dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
if (drv && drv->freeze) {
ret = drv->freeze(dev);
if (ret) {
- virtio_config_enable(dev);
+ virtio_config_core_enable(dev);
return ret;
}
}
@@ -557,7 +590,7 @@ int virtio_device_restore(struct virtio_device *dev)
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
virtio_device_ready(dev);
- virtio_config_enable(dev);
+ virtio_config_core_enable(dev);
return 0;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 54469277ca30..b36d2803674e 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -355,6 +355,8 @@ static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
{
unsigned long events[NR_VM_EVENT_ITEMS];
unsigned int idx = 0;
+ unsigned int zid;
+ unsigned long stall = 0;
all_vm_events(events);
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
@@ -363,6 +365,22 @@ static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
pages_to_bytes(events[PSWPOUT]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_OOM_KILL, events[OOM_KILL]);
+
+ /* sum all the stall events */
+ for (zid = 0; zid < MAX_NR_ZONES; zid++)
+ stall += events[ALLOCSTALL_NORMAL - ZONE_NORMAL + zid];
+
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ALLOC_STALL, stall);
+
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_SCAN,
+ pages_to_bytes(events[PGSCAN_KSWAPD]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_SCAN,
+ pages_to_bytes(events[PGSCAN_DIRECT]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_RECLAIM,
+ pages_to_bytes(events[PGSTEAL_KSWAPD]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_RECLAIM,
+ pages_to_bytes(events[PGSTEAL_DIRECT]));
#ifdef CONFIG_HUGETLB_PAGE
update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGALLOC,
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index bae1d97cce89..684b9fe84fff 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -953,6 +953,15 @@ config RENESAS_RZG2LWDT
This driver adds watchdog support for the integrated watchdogs in the
Renesas RZ/G2L SoCs. These watchdogs can be used to reset a system.
+config RENESAS_RZV2HWDT
+ tristate "Renesas RZ/V2H(P) WDT Watchdog"
+ depends on ARCH_R9A09G057 || COMPILE_TEST
+ depends on PM || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ This driver adds watchdog support for the integrated watchdogs in the
+ Renesas RZ/V2H(P) SoCs. These watchdogs can be used to reset a system.
+
config ASPEED_WATCHDOG
tristate "Aspeed BMC watchdog support"
depends on ARCH_ASPEED || COMPILE_TEST
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index b51030f035a6..ab6f2b41e38e 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o
obj-$(CONFIG_RENESAS_RZAWDT) += rza_wdt.o
obj-$(CONFIG_RENESAS_RZN1WDT) += rzn1_wdt.o
obj-$(CONFIG_RENESAS_RZG2LWDT) += rzg2l_wdt.o
+obj-$(CONFIG_RENESAS_RZV2HWDT) += rzv2h_wdt.o
obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o
obj-$(CONFIG_STM32_WATCHDOG) += stm32_iwdg.o
obj-$(CONFIG_UNIPHIER_WATCHDOG) += uniphier_wdt.o
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 264857d314da..35b358bcf94c 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -563,8 +563,8 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
}
ident.firmware_version = p->iTCO_version;
- p->wddev.info = &ident,
- p->wddev.ops = &iTCO_wdt_ops,
+ p->wddev.info = &ident;
+ p->wddev.ops = &iTCO_wdt_ops;
p->wddev.bootstatus = 0;
p->wddev.timeout = WATCHDOG_TIMEOUT;
watchdog_set_nowayout(&p->wddev, nowayout);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 42e8ffae18dd..4b3a192ee3e8 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -379,7 +379,7 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
}
/* Disable watchdog if it is active or non-active but still running */
-static int __maybe_unused imx2_wdt_suspend(struct device *dev)
+static int imx2_wdt_suspend(struct device *dev)
{
struct watchdog_device *wdog = dev_get_drvdata(dev);
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
@@ -404,7 +404,7 @@ static int __maybe_unused imx2_wdt_suspend(struct device *dev)
}
/* Enable watchdog and configure it if necessary */
-static int __maybe_unused imx2_wdt_resume(struct device *dev)
+static int imx2_wdt_resume(struct device *dev)
{
struct watchdog_device *wdog = dev_get_drvdata(dev);
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
@@ -435,8 +435,8 @@ static int __maybe_unused imx2_wdt_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(imx2_wdt_pm_ops, imx2_wdt_suspend,
- imx2_wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(imx2_wdt_pm_ops, imx2_wdt_suspend,
+ imx2_wdt_resume);
static struct imx2_wdt_data imx_wdt = {
.wdw_supported = true,
@@ -476,7 +476,7 @@ static struct platform_driver imx2_wdt_driver = {
.shutdown = imx2_wdt_shutdown,
.driver = {
.name = DRIVER_NAME,
- .pm = &imx2_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&imx2_wdt_pm_ops),
.of_match_table = imx2_wdt_dt_ids,
},
};
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 94914a22daff..0f13a3053357 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -55,6 +55,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
struct imx_wdt_hw_feature {
bool prescaler_enable;
+ bool post_rcs_wait;
u32 wdog_clock_rate;
};
@@ -62,7 +63,6 @@ struct imx7ulp_wdt_device {
struct watchdog_device wdd;
void __iomem *base;
struct clk *clk;
- bool post_rcs_wait;
bool ext_reset;
const struct imx_wdt_hw_feature *hw;
};
@@ -95,7 +95,7 @@ static int imx7ulp_wdt_wait_rcs(struct imx7ulp_wdt_device *wdt)
ret = -ETIMEDOUT;
/* Wait 2.5 clocks after RCS done */
- if (wdt->post_rcs_wait)
+ if (wdt->hw->post_rcs_wait)
usleep_range(wait_min, wait_min + 2000);
return ret;
@@ -334,15 +334,6 @@ static int imx7ulp_wdt_probe(struct platform_device *pdev)
/* The WDOG may need to do external reset through dedicated pin */
imx7ulp_wdt->ext_reset = of_property_read_bool(dev->of_node, "fsl,ext-reset-output");
- imx7ulp_wdt->post_rcs_wait = true;
- if (of_device_is_compatible(dev->of_node,
- "fsl,imx8ulp-wdt")) {
- dev_info(dev, "imx8ulp wdt probe\n");
- imx7ulp_wdt->post_rcs_wait = false;
- } else {
- dev_info(dev, "imx7ulp wdt probe\n");
- }
-
wdog = &imx7ulp_wdt->wdd;
wdog->info = &imx7ulp_wdt_info;
wdog->ops = &imx7ulp_wdt_ops;
@@ -403,6 +394,12 @@ static const struct dev_pm_ops imx7ulp_wdt_pm_ops = {
static const struct imx_wdt_hw_feature imx7ulp_wdt_hw = {
.prescaler_enable = false,
.wdog_clock_rate = 1000,
+ .post_rcs_wait = true,
+};
+
+static const struct imx_wdt_hw_feature imx8ulp_wdt_hw = {
+ .prescaler_enable = false,
+ .wdog_clock_rate = 1000,
};
static const struct imx_wdt_hw_feature imx93_wdt_hw = {
@@ -411,8 +408,8 @@ static const struct imx_wdt_hw_feature imx93_wdt_hw = {
};
static const struct of_device_id imx7ulp_wdt_dt_ids[] = {
- { .compatible = "fsl,imx8ulp-wdt", .data = &imx7ulp_wdt_hw, },
{ .compatible = "fsl,imx7ulp-wdt", .data = &imx7ulp_wdt_hw, },
+ { .compatible = "fsl,imx8ulp-wdt", .data = &imx8ulp_wdt_hw, },
{ .compatible = "fsl,imx93-wdt", .data = &imx93_wdt_hw, },
{ /* sentinel */ }
};
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
index e51fe1b78518..1280b9b1ec2a 100644
--- a/drivers/watchdog/imx_sc_wdt.c
+++ b/drivers/watchdog/imx_sc_wdt.c
@@ -56,6 +56,25 @@ static int imx_sc_wdt_ping(struct watchdog_device *wdog)
return 0;
}
+static bool imx_sc_wdt_is_running(void)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_START_WDOG,
+ 0, 0, 0, 0, 0, 0, &res);
+
+ /* Already enabled (SC_TIMER_ERR_BUSY)? */
+ if (res.a0 == SC_TIMER_ERR_BUSY)
+ return true;
+
+ /* Undo only if that was us who has (successfully) enabled the WDT */
+ if (!res.a0)
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_STOP_WDOG,
+ 0, 0, 0, 0, 0, 0, &res);
+
+ return false;
+}
+
static int imx_sc_wdt_start(struct watchdog_device *wdog)
{
struct arm_smccc_res res;
@@ -183,6 +202,9 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (imx_sc_wdt_is_running())
+ set_bit(WDOG_HW_RUNNING, &wdog->status);
+
watchdog_stop_on_reboot(wdog);
watchdog_stop_on_unregister(wdog);
@@ -216,29 +238,6 @@ register_device:
return devm_watchdog_register_device(dev, wdog);
}
-static int __maybe_unused imx_sc_wdt_suspend(struct device *dev)
-{
- struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev);
-
- if (watchdog_active(&imx_sc_wdd->wdd))
- imx_sc_wdt_stop(&imx_sc_wdd->wdd);
-
- return 0;
-}
-
-static int __maybe_unused imx_sc_wdt_resume(struct device *dev)
-{
- struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev);
-
- if (watchdog_active(&imx_sc_wdd->wdd))
- imx_sc_wdt_start(&imx_sc_wdd->wdd);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(imx_sc_wdt_pm_ops,
- imx_sc_wdt_suspend, imx_sc_wdt_resume);
-
static const struct of_device_id imx_sc_wdt_dt_ids[] = {
{ .compatible = "fsl,imx-sc-wdt", },
{ /* sentinel */ }
@@ -250,7 +249,6 @@ static struct platform_driver imx_sc_wdt_driver = {
.driver = {
.name = "imx-sc-wdt",
.of_match_table = imx_sc_wdt_dt_ids,
- .pm = &imx_sc_wdt_pm_ops,
},
};
module_platform_driver(imx_sc_wdt_driver);
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 8d71f6a2236b..756d262dc580 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -20,9 +20,8 @@
#include <linux/types.h>
#include <linux/watchdog.h>
-#include <linux/platform_data/intel-mid_wdt.h>
-
-#include <asm/intel_scu_ipc.h>
+#include <linux/platform_data/x86/intel-mid_wdt.h>
+#include <linux/platform_data/x86/intel_scu_ipc.h>
#define IPC_WATCHDOG 0xf8
diff --git a/drivers/watchdog/marvell_gti_wdt.c b/drivers/watchdog/marvell_gti_wdt.c
index 098bb141a521..298089d45ab8 100644
--- a/drivers/watchdog/marvell_gti_wdt.c
+++ b/drivers/watchdog/marvell_gti_wdt.c
@@ -285,8 +285,8 @@ static int gti_wdt_probe(struct platform_device *pdev)
}
wdog_dev = &priv->wdev;
- wdog_dev->info = &gti_wdt_ident,
- wdog_dev->ops = &gti_wdt_ops,
+ wdog_dev->info = &gti_wdt_ident;
+ wdog_dev->ops = &gti_wdt_ops;
wdog_dev->parent = dev;
/*
* Watchdog counter is 24 bit where lower 8 bits are zeros
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index f3fcbeb0852c..007ed139ab96 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -218,7 +218,7 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
return err;
}
- wdt->wdev.ops = &pm8916_wdt_ops,
+ wdt->wdev.ops = &pm8916_wdt_ops;
wdt->wdev.parent = dev;
wdt->wdev.min_timeout = PM8916_WDT_MIN_TIMEOUT;
wdt->wdev.max_timeout = PM8916_WDT_MAX_TIMEOUT;
diff --git a/drivers/watchdog/rzv2h_wdt.c b/drivers/watchdog/rzv2h_wdt.c
new file mode 100644
index 000000000000..1d1b17312747
--- /dev/null
+++ b/drivers/watchdog/rzv2h_wdt.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2H(P) WDT Watchdog Driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corporation.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/units.h>
+#include <linux/watchdog.h>
+
+#define WDTRR 0x00 /* WDT Refresh Register RW, 8 */
+#define WDTCR 0x02 /* WDT Control Register RW, 16 */
+#define WDTSR 0x04 /* WDT Status Register RW, 16 */
+#define WDTRCR 0x06 /* WDT Reset Control Register RW, 8 */
+
+#define WDTCR_TOPS_1024 0x00
+#define WDTCR_TOPS_16384 0x03
+
+#define WDTCR_CKS_CLK_1 0x00
+#define WDTCR_CKS_CLK_256 0x50
+
+#define WDTCR_RPES_0 0x300
+#define WDTCR_RPES_75 0x000
+
+#define WDTCR_RPSS_25 0x00
+#define WDTCR_RPSS_100 0x3000
+
+#define WDTRCR_RSTIRQS BIT(7)
+
+#define MAX_TIMEOUT_CYCLES 16384
+#define CLOCK_DIV_BY_256 256
+
+#define WDT_DEFAULT_TIMEOUT 60U
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct rzv2h_wdt_priv {
+ void __iomem *base;
+ struct clk *pclk;
+ struct clk *oscclk;
+ struct reset_control *rstc;
+ struct watchdog_device wdev;
+};
+
+static int rzv2h_wdt_ping(struct watchdog_device *wdev)
+{
+ struct rzv2h_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ /*
+ * The down-counter is refreshed and starts counting operation on
+ * a write of the values 00h and FFh to the WDTRR register.
+ */
+ writeb(0x0, priv->base + WDTRR);
+ writeb(0xFF, priv->base + WDTRR);
+
+ return 0;
+}
+
+static void rzv2h_wdt_setup(struct watchdog_device *wdev, u16 wdtcr)
+{
+ struct rzv2h_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ /* Configure the timeout, clock division ratio, and window start and end positions. */
+ writew(wdtcr, priv->base + WDTCR);
+
+ /* Enable interrupt output to the ICU. */
+ writeb(0, priv->base + WDTRCR);
+
+ /* Clear underflow flag and refresh error flag. */
+ writew(0, priv->base + WDTSR);
+}
+
+static int rzv2h_wdt_start(struct watchdog_device *wdev)
+{
+ struct rzv2h_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(wdev->parent);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(priv->rstc);
+ if (ret) {
+ pm_runtime_put(wdev->parent);
+ return ret;
+ }
+
+ /* delay to handle clock halt after de-assert operation */
+ udelay(3);
+
+ /*
+ * WDTCR
+ * - CKS[7:4] - Clock Division Ratio Select - 0101b: oscclk/256
+ * - RPSS[13:12] - Window Start Position Select - 11b: 100%
+ * - RPES[9:8] - Window End Position Select - 11b: 0%
+ * - TOPS[1:0] - Timeout Period Select - 11b: 16384 cycles (3FFFh)
+ */
+ rzv2h_wdt_setup(wdev, WDTCR_CKS_CLK_256 | WDTCR_RPSS_100 |
+ WDTCR_RPES_0 | WDTCR_TOPS_16384);
+
+ /*
+ * Down counting starts after writing the sequence 00h -> FFh to the
+ * WDTRR register. Hence, call the ping operation after loading the counter.
+ */
+ rzv2h_wdt_ping(wdev);
+
+ return 0;
+}
+
+static int rzv2h_wdt_stop(struct watchdog_device *wdev)
+{
+ struct rzv2h_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ int ret;
+
+ ret = reset_control_assert(priv->rstc);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_put(wdev->parent);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct watchdog_info rzv2h_wdt_ident = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+ .identity = "Renesas RZ/V2H WDT Watchdog",
+};
+
+static int rzv2h_wdt_restart(struct watchdog_device *wdev,
+ unsigned long action, void *data)
+{
+ struct rzv2h_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ int ret;
+
+ if (!watchdog_active(wdev)) {
+ ret = clk_enable(priv->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(priv->oscclk);
+ if (ret) {
+ clk_disable(priv->pclk);
+ return ret;
+ }
+
+ ret = reset_control_deassert(priv->rstc);
+ if (ret) {
+ clk_disable(priv->oscclk);
+ clk_disable(priv->pclk);
+ return ret;
+ }
+ } else {
+ /*
+ * Writing to the WDT Control Register (WDTCR) or WDT Reset
+ * Control Register (WDTRCR) is possible once between the
+ * release from the reset state and the first refresh operation.
+ * Therefore, issue a reset if the watchdog is active.
+ */
+ ret = reset_control_reset(priv->rstc);
+ if (ret)
+ return ret;
+ }
+
+ /* delay to handle clock halt after de-assert operation */
+ udelay(3);
+
+ /*
+ * WDTCR
+ * - CKS[7:4] - Clock Division Ratio Select - 0000b: oscclk/1
+ * - RPSS[13:12] - Window Start Position Select - 00b: 25%
+ * - RPES[9:8] - Window End Position Select - 00b: 75%
+ * - TOPS[1:0] - Timeout Period Select - 00b: 1024 cycles (03FFh)
+ */
+ rzv2h_wdt_setup(wdev, WDTCR_CKS_CLK_1 | WDTCR_RPSS_25 |
+ WDTCR_RPES_75 | WDTCR_TOPS_1024);
+
+ rzv2h_wdt_ping(wdev);
+
+ /* wait for underflow to trigger... */
+ udelay(5);
+
+ return 0;
+}
+
+static const struct watchdog_ops rzv2h_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rzv2h_wdt_start,
+ .stop = rzv2h_wdt_stop,
+ .ping = rzv2h_wdt_ping,
+ .restart = rzv2h_wdt_restart,
+};
+
+static int rzv2h_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rzv2h_wdt_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->pclk = devm_clk_get_prepared(&pdev->dev, "pclk");
+ if (IS_ERR(priv->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->pclk), "no pclk");
+
+ priv->oscclk = devm_clk_get_prepared(&pdev->dev, "oscclk");
+ if (IS_ERR(priv->oscclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->oscclk), "no oscclk");
+
+ priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->rstc),
+ "failed to get cpg reset");
+
+ priv->wdev.max_hw_heartbeat_ms = (MILLI * MAX_TIMEOUT_CYCLES * CLOCK_DIV_BY_256) /
+ clk_get_rate(priv->oscclk);
+ dev_dbg(dev, "max hw timeout of %dms\n", priv->wdev.max_hw_heartbeat_ms);
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ priv->wdev.min_timeout = 1;
+ priv->wdev.timeout = WDT_DEFAULT_TIMEOUT;
+ priv->wdev.info = &rzv2h_wdt_ident;
+ priv->wdev.ops = &rzv2h_wdt_ops;
+ priv->wdev.parent = dev;
+ watchdog_set_drvdata(&priv->wdev, priv);
+ watchdog_set_nowayout(&priv->wdev, nowayout);
+ watchdog_stop_on_unregister(&priv->wdev);
+
+ ret = watchdog_init_timeout(&priv->wdev, 0, dev);
+ if (ret)
+ dev_warn(dev, "Specified timeout invalid, using default");
+
+ return devm_watchdog_register_device(&pdev->dev, &priv->wdev);
+}
+
+static const struct of_device_id rzv2h_wdt_ids[] = {
+ { .compatible = "renesas,r9a09g057-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzv2h_wdt_ids);
+
+static struct platform_driver rzv2h_wdt_driver = {
+ .driver = {
+ .name = "rzv2h_wdt",
+ .of_match_table = rzv2h_wdt_ids,
+ },
+ .probe = rzv2h_wdt_probe,
+};
+module_platform_driver(rzv2h_wdt_driver);
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/V2H(P) WDT Watchdog Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index d5989871dd5d..f7d6f47971fd 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -177,8 +177,8 @@ config XEN_GRANT_DMA_ALLOC
config SWIOTLB_XEN
def_bool y
+ depends on ARCH_HAS_DMA_OPS
depends on XEN_PV || ARM || ARM64
- select DMA_OPS
select SWIOTLB
config XEN_PCI_STUB
@@ -348,10 +348,10 @@ config XEN_GRANT_DMA_IOMMU
config XEN_GRANT_DMA_OPS
bool
- select DMA_OPS
config XEN_VIRTIO
bool "Xen virtio support"
+ depends on ARCH_HAS_DMA_OPS
depends on VIRTIO
select XEN_GRANT_DMA_OPS
select XEN_GRANT_DMA_IOMMU if OF
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 72d4e3f193af..a2facd8f7e51 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -44,15 +44,11 @@ static int xen_add_device(struct device *dev)
}
#endif
if (pci_seg_supported) {
- struct {
- struct physdev_pci_device_add add;
- uint32_t pxm;
- } add_ext = {
- .add.seg = pci_domain_nr(pci_dev->bus),
- .add.bus = pci_dev->bus->number,
- .add.devfn = pci_dev->devfn
- };
- struct physdev_pci_device_add *add = &add_ext.add;
+ DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
+
+ add->seg = pci_domain_nr(pci_dev->bus);
+ add->bus = pci_dev->bus->number;
+ add->devfn = pci_dev->devfn;
#ifdef CONFIG_ACPI
acpi_handle handle;
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 9563650dfbaf..54e4f285c0f4 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -959,12 +959,12 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
INIT_WORK(&kirqfd->shutdown, irqfd_shutdown);
f = fdget(irqfd->fd);
- if (!f.file) {
+ if (!fd_file(f)) {
ret = -EBADF;
goto error_kfree;
}
- kirqfd->eventfd = eventfd_ctx_fileget(f.file);
+ kirqfd->eventfd = eventfd_ctx_fileget(fd_file(f));
if (IS_ERR(kirqfd->eventfd)) {
ret = PTR_ERR(kirqfd->eventfd);
goto error_fd_put;
@@ -995,7 +995,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
* Check if there was an event already pending on the eventfd before we
* registered, and trigger it as if we didn't miss it.
*/
- events = vfs_poll(f.file, &kirqfd->pt);
+ events = vfs_poll(fd_file(f), &kirqfd->pt);
if (events & EPOLLIN)
irqfd_inject(kirqfd);
@@ -1345,12 +1345,12 @@ static int privcmd_ioeventfd_assign(struct privcmd_ioeventfd *ioeventfd)
return -ENOMEM;
f = fdget(ioeventfd->event_fd);
- if (!f.file) {
+ if (!fd_file(f)) {
ret = -EBADF;
goto error_kfree;
}
- kioeventfd->eventfd = eventfd_ctx_fileget(f.file);
+ kioeventfd->eventfd = eventfd_ctx_fileget(fd_file(f));
fdput(f);
if (IS_ERR(kioeventfd->eventfd)) {
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 35155258a7e2..a337edcf8faf 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -78,9 +78,15 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
+ phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
next_bfn = pfn_to_bfn(xen_pfn);
+ /* If buffer is physically aligned, ensure DMA alignment. */
+ if (IS_ALIGNED(p, algn) &&
+ !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
+ return 1;
+
for (i = 1; i < nr_pages; i++)
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
return 1;
@@ -141,7 +147,7 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
void *ret;
/* Align the allocation to the Xen page size */
- size = 1UL << (order + XEN_PAGE_SHIFT);
+ size = ALIGN(size, XEN_PAGE_SIZE);
ret = (void *)__get_free_pages(flags, get_order(size));
if (!ret)
@@ -173,7 +179,7 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
int order = get_order(size);
/* Convert the size to actually allocated. */
- size = 1UL << (order + XEN_PAGE_SHIFT);
+ size = ALIGN(size, XEN_PAGE_SIZE);
if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 028a182bcc9e..d32c726f7a12 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -427,12 +427,12 @@ char **xenbus_directory(struct xenbus_transaction t,
path = join(dir, node);
if (IS_ERR(path))
- return (char **)path;
+ return ERR_CAST(path);
strings = xs_single(t, XS_DIRECTORY, path, &len);
kfree(path);
if (IS_ERR(strings))
- return (char **)strings;
+ return ERR_CAST(strings);
return split(strings, len, num);
}
@@ -465,7 +465,7 @@ void *xenbus_read(struct xenbus_transaction t,
path = join(dir, node);
if (IS_ERR(path))
- return (void *)path;
+ return ERR_CAST(path);
ret = xs_single(t, XS_READ, path, len);
kfree(path);
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 24fdc74caeba..819c75233235 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -68,17 +68,22 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
struct p9_fid *fid = rreq->netfs_priv;
+ unsigned long long pos = subreq->start + subreq->transferred;
int total, err;
- total = p9_client_read(fid, subreq->start + subreq->transferred,
- &subreq->io_iter, &err);
+ total = p9_client_read(fid, pos, &subreq->io_iter, &err);
/* if we just extended the file size, any portion not in
* cache won't be on server and is zeroes */
if (subreq->rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ if (pos + total >= i_size_read(rreq->inode))
+ __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
- netfs_subreq_terminated(subreq, err ?: total, false);
+ if (!err)
+ subreq->transferred += total;
+
+ netfs_read_subreq_terminated(subreq, err, false);
}
/**
diff --git a/fs/Kconfig b/fs/Kconfig
index a46b0cbc4d8f..949895cff872 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -288,6 +288,10 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
depends on SPARSEMEM_VMEMMAP
+config HUGETLB_PMD_PAGE_TABLE_SHARING
+ def_bool HUGETLB_PAGE
+ depends on ARCH_WANT_HUGE_PMD_SHARE && SPLIT_PMD_PTLOCKS
+
config ARCH_HAS_GIGANTIC_PAGE
bool
@@ -382,6 +386,29 @@ config NFS_COMMON
depends on NFSD || NFS_FS || LOCKD
default y
+config NFS_COMMON_LOCALIO_SUPPORT
+ tristate
+ default n
+ default y if NFSD=y || NFS_FS=y
+ default m if NFSD=m && NFS_FS=m
+ select SUNRPC
+
+config NFS_LOCALIO
+ bool "NFS client and server support for LOCALIO auxiliary protocol"
+ depends on NFSD && NFS_FS
+ select NFS_COMMON_LOCALIO_SUPPORT
+ default n
+ help
+ Some NFS servers support an auxiliary NFS LOCALIO protocol
+ that is not an official part of the NFS protocol.
+
+ This option enables support for the LOCALIO protocol in the
+ kernel's NFS server and client. Enable this to permit local
+ NFS clients to bypass the network when issuing reads and
+ writes to the local NFS server.
+
+ If unsure, say N.
+
config NFS_V4_2_SSC_HELPER
bool
default y if NFS_V4_2
diff --git a/fs/Makefile b/fs/Makefile
index 6ecc9b0a53f2..61679fd587b7 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -129,3 +129,4 @@ obj-$(CONFIG_EFIVAR_FS) += efivarfs/
obj-$(CONFIG_EROFS_FS) += erofs/
obj-$(CONFIG_VBOXSF_FS) += vboxsf/
obj-$(CONFIG_ZONEFS_FS) += zonefs/
+obj-$(CONFIG_BPF_LSM) += bpf_fs_kfuncs.o
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index a183e213a4a5..21527189e430 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -55,12 +55,11 @@ static void adfs_write_failed(struct address_space *mapping, loff_t to)
static int adfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
int ret;
- *pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
adfs_get_block,
&ADFS_I(mapping->host)->mmu_private);
if (unlikely(ret))
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index 2e612834329a..e8c2c4535cb3 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -14,8 +14,6 @@
/* Ugly macros make the code more pretty. */
-#define GET_END_PTR(st,p,sz) ((st *)((char *)(p)+((sz)-sizeof(st))))
-#define AFFS_GET_HASHENTRY(data,hashkey) be32_to_cpu(((struct dir_front *)data)->hashtable[hashkey])
#define AFFS_BLOCK(sb, bh, blk) (AFFS_HEAD(bh)->table[AFFS_SB(sb)->s_hashsize-1-(blk)])
#define AFFS_HEAD(bh) ((struct affs_head *)(bh)->b_data)
diff --git a/fs/affs/amigaffs.h b/fs/affs/amigaffs.h
index 1b973a669d23..da3217ab6adb 100644
--- a/fs/affs/amigaffs.h
+++ b/fs/affs/amigaffs.h
@@ -49,12 +49,13 @@ struct affs_short_date {
struct affs_root_head {
__be32 ptype;
+ /* The following fields are not used, but kept as documentation. */
__be32 spare1;
__be32 spare2;
__be32 hash_size;
__be32 spare3;
__be32 checksum;
- __be32 hashtable[1];
+ __be32 hashtable[];
};
struct affs_root_tail {
diff --git a/fs/affs/dir.c b/fs/affs/dir.c
index b2bf7016e1b3..bd40d5f08810 100644
--- a/fs/affs/dir.c
+++ b/fs/affs/dir.c
@@ -17,13 +17,44 @@
#include <linux/iversion.h>
#include "affs.h"
+struct affs_dir_data {
+ unsigned long ino;
+ u64 cookie;
+};
+
static int affs_readdir(struct file *, struct dir_context *);
+static loff_t affs_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct affs_dir_data *data = file->private_data;
+
+ return generic_llseek_cookie(file, offset, whence, &data->cookie);
+}
+
+static int affs_dir_open(struct inode *inode, struct file *file)
+{
+ struct affs_dir_data *data;
+
+ data = kzalloc(sizeof(struct affs_dir_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ file->private_data = data;
+ return 0;
+}
+
+static int affs_dir_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
const struct file_operations affs_dir_operations = {
+ .open = affs_dir_open,
.read = generic_read_dir,
- .llseek = generic_file_llseek,
+ .llseek = affs_dir_llseek,
.iterate_shared = affs_readdir,
.fsync = affs_file_fsync,
+ .release = affs_dir_release,
};
/*
@@ -45,6 +76,7 @@ static int
affs_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
+ struct affs_dir_data *data = file->private_data;
struct super_block *sb = inode->i_sb;
struct buffer_head *dir_bh = NULL;
struct buffer_head *fh_bh = NULL;
@@ -59,7 +91,7 @@ affs_readdir(struct file *file, struct dir_context *ctx)
pr_debug("%s(ino=%lu,f_pos=%llx)\n", __func__, inode->i_ino, ctx->pos);
if (ctx->pos < 2) {
- file->private_data = (void *)0;
+ data->ino = 0;
if (!dir_emit_dots(file, ctx))
return 0;
}
@@ -80,8 +112,8 @@ affs_readdir(struct file *file, struct dir_context *ctx)
/* If the directory hasn't changed since the last call to readdir(),
* we can jump directly to where we left off.
*/
- ino = (u32)(long)file->private_data;
- if (ino && inode_eq_iversion(inode, file->f_version)) {
+ ino = data->ino;
+ if (ino && inode_eq_iversion(inode, data->cookie)) {
pr_debug("readdir() left off=%d\n", ino);
goto inside;
}
@@ -131,8 +163,8 @@ inside:
} while (ino);
}
done:
- file->f_version = inode_query_iversion(inode);
- file->private_data = (void *)(long)ino;
+ data->cookie = inode_query_iversion(inode);
+ data->ino = ino;
affs_brelse(fh_bh);
out_brelse_dir:
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 04c018e19602..a5a861dd5223 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -417,12 +417,11 @@ affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
static int affs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
int ret;
- *pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
affs_get_block,
&AFFS_I(mapping->host)->mmu_private);
if (unlikely(ret))
@@ -433,12 +432,12 @@ static int affs_write_begin(struct file *file, struct address_space *mapping,
static int affs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
int ret;
- ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+ ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
/* Clear Archived bit on file writes, as AmigaOS would do */
if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
@@ -648,7 +647,7 @@ static int affs_read_folio_ofs(struct file *file, struct folio *folio)
static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
struct inode *inode = mapping->host;
struct folio *folio;
@@ -671,7 +670,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
return PTR_ERR(folio);
- *pagep = &folio->page;
+ *foliop = folio;
if (folio_test_uptodate(folio))
return 0;
@@ -687,9 +686,8 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct folio *folio = page_folio(page);
struct inode *inode = mapping->host;
struct super_block *sb = inode->i_sb;
struct buffer_head *bh, *prev_bh;
@@ -882,14 +880,14 @@ affs_truncate(struct inode *inode)
if (inode->i_size > AFFS_I(inode)->mmu_private) {
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
void *fsdata = NULL;
loff_t isize = inode->i_size;
int res;
- res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &page, &fsdata);
+ res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &folio, &fsdata);
if (!res)
- res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata);
+ res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, folio, fsdata);
else
inode->i_size = AFFS_I(inode)->mmu_private;
mark_inode_dirty(inode);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index ec1be0091fdb..492d857a3fa0 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -16,6 +16,7 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/netfs.h>
+#include <trace/events/netfs.h>
#include "internal.h"
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
@@ -242,9 +243,10 @@ static void afs_fetch_data_notify(struct afs_operation *op)
req->error = error;
if (subreq) {
- if (subreq->rreq->origin != NETFS_DIO_READ)
- __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
- netfs_subreq_terminated(subreq, error ?: req->actual_len, false);
+ subreq->rreq->i_size = req->file_size;
+ if (req->pos + req->actual_len >= req->file_size)
+ __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
+ netfs_read_subreq_terminated(subreq, error, false);
req->subreq = NULL;
} else if (req->done) {
req->done(req);
@@ -262,6 +264,12 @@ static void afs_fetch_data_success(struct afs_operation *op)
afs_fetch_data_notify(op);
}
+static void afs_fetch_data_aborted(struct afs_operation *op)
+{
+ afs_check_for_remote_deletion(op);
+ afs_fetch_data_notify(op);
+}
+
static void afs_fetch_data_put(struct afs_operation *op)
{
op->fetch.req->error = afs_op_error(op);
@@ -272,7 +280,7 @@ static const struct afs_operation_ops afs_fetch_data_operation = {
.issue_afs_rpc = afs_fs_fetch_data,
.issue_yfs_rpc = yfs_fs_fetch_data,
.success = afs_fetch_data_success,
- .aborted = afs_check_for_remote_deletion,
+ .aborted = afs_fetch_data_aborted,
.failed = afs_fetch_data_notify,
.put = afs_fetch_data_put,
};
@@ -294,7 +302,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req)
op = afs_alloc_operation(req->key, vnode->volume);
if (IS_ERR(op)) {
if (req->subreq)
- netfs_subreq_terminated(req->subreq, PTR_ERR(op), false);
+ netfs_read_subreq_terminated(req->subreq, PTR_ERR(op), false);
return PTR_ERR(op);
}
@@ -305,14 +313,15 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req)
return afs_do_sync_operation(op);
}
-static void afs_issue_read(struct netfs_io_subrequest *subreq)
+static void afs_read_worker(struct work_struct *work)
{
+ struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work);
struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
struct afs_read *fsreq;
fsreq = afs_alloc_read(GFP_NOFS);
if (!fsreq)
- return netfs_subreq_terminated(subreq, -ENOMEM, false);
+ return netfs_read_subreq_terminated(subreq, -ENOMEM, false);
fsreq->subreq = subreq;
fsreq->pos = subreq->start + subreq->transferred;
@@ -321,10 +330,17 @@ static void afs_issue_read(struct netfs_io_subrequest *subreq)
fsreq->vnode = vnode;
fsreq->iter = &subreq->io_iter;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
afs_fetch_data(fsreq->vnode, fsreq);
afs_put_read(fsreq);
}
+static void afs_issue_read(struct netfs_io_subrequest *subreq)
+{
+ INIT_WORK(&subreq->work, afs_read_worker);
+ queue_work(system_long_wq, &subreq->work);
+}
+
static int afs_symlink_read_folio(struct file *file, struct folio *folio)
{
struct afs_vnode *vnode = AFS_FS_I(folio->mapping->host);
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 79cd30775b7a..098fa034a1cc 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -304,6 +304,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
struct afs_vnode_param *vp = &op->file[0];
struct afs_read *req = op->fetch.req;
const __be32 *bp;
+ size_t count_before;
int ret;
_enter("{%u,%zu,%zu/%llu}",
@@ -345,10 +346,14 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
/* extract the returned data */
case 2:
- _debug("extract data %zu/%llu",
- iov_iter_count(call->iter), req->actual_len);
+ count_before = call->iov_len;
+ _debug("extract data %zu/%llu", count_before, req->actual_len);
ret = afs_extract_data(call, true);
+ if (req->subreq) {
+ req->subreq->transferred += count_before - call->iov_len;
+ netfs_read_subreq_progress(req->subreq, false);
+ }
if (ret < 0)
return ret;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index e959640694c2..34107b55f834 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -89,10 +89,12 @@ static const struct afs_operation_ops afs_store_data_operation = {
*/
void afs_prepare_write(struct netfs_io_subrequest *subreq)
{
+ struct netfs_io_stream *stream = &subreq->rreq->io_streams[subreq->stream_nr];
+
//if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags))
// subreq->max_len = 512 * 1024;
//else
- subreq->max_len = 256 * 1024 * 1024;
+ stream->sreq_max_len = 256 * 1024 * 1024;
}
/*
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index f521e66d3bf6..024227aba4cd 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -355,6 +355,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
struct afs_vnode_param *vp = &op->file[0];
struct afs_read *req = op->fetch.req;
const __be32 *bp;
+ size_t count_before;
int ret;
_enter("{%u,%zu, %zu/%llu}",
@@ -391,10 +392,14 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
/* extract the returned data */
case 2:
- _debug("extract data %zu/%llu",
- iov_iter_count(call->iter), req->actual_len);
+ count_before = call->iov_len;
+ _debug("extract data %zu/%llu", count_before, req->actual_len);
ret = afs_extract_data(call, true);
+ if (req->subreq) {
+ req->subreq->transferred += count_before - call->iov_len;
+ netfs_read_subreq_progress(req->subreq, false);
+ }
if (ret < 0)
return ret;
diff --git a/fs/aio.c b/fs/aio.c
index 6066f64967b3..e8920178b50f 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -100,7 +100,7 @@ struct kioctx {
unsigned long user_id;
- struct __percpu kioctx_cpu *cpu;
+ struct kioctx_cpu __percpu *cpu;
/*
* For percpu reqs_available, number of slots we move to/from global
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
index 8c1d587b3eef..77c7991d89aa 100644
--- a/fs/autofs/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -62,6 +62,7 @@ struct autofs_info {
struct list_head expiring;
struct autofs_sb_info *sbi;
+ unsigned long exp_timeout;
unsigned long last_used;
int count;
@@ -81,6 +82,9 @@ struct autofs_info {
*/
#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
+#define AUTOFS_INF_EXPIRE_SET (1<<3) /* per-dentry expire timeout set for
+ this mount point.
+ */
struct autofs_wait_queue {
wait_queue_head_t queue;
struct autofs_wait_queue *next;
diff --git a/fs/autofs/dev-ioctl.c b/fs/autofs/dev-ioctl.c
index 5bf781ea6d67..f011e026358e 100644
--- a/fs/autofs/dev-ioctl.c
+++ b/fs/autofs/dev-ioctl.c
@@ -128,7 +128,13 @@ static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param)
goto out;
}
+ /* Setting the per-dentry expire timeout requires a trailing
+ * path component, ie. no '/', so invert the logic of the
+ * check_name() return for AUTOFS_DEV_IOCTL_TIMEOUT_CMD.
+ */
err = check_name(param->path);
+ if (cmd == AUTOFS_DEV_IOCTL_TIMEOUT_CMD)
+ err = err ? 0 : -EINVAL;
if (err) {
pr_warn("invalid path supplied for cmd(0x%08x)\n",
cmd);
@@ -396,16 +402,97 @@ static int autofs_dev_ioctl_catatonic(struct file *fp,
return 0;
}
-/* Set the autofs mount timeout */
+/*
+ * Set the autofs mount expire timeout.
+ *
+ * There are two places an expire timeout can be set, in the autofs
+ * super block info. (this is all that's needed for direct and offset
+ * mounts because there's a distinct mount corresponding to each of
+ * these) and per-dentry within within the dentry info. If a per-dentry
+ * timeout is set it will override the expire timeout set in the parent
+ * autofs super block info.
+ *
+ * If setting the autofs super block expire timeout the autofs_dev_ioctl
+ * size field will be equal to the autofs_dev_ioctl structure size. If
+ * setting the per-dentry expire timeout the mount point name is passed
+ * in the autofs_dev_ioctl path field and the size field updated to
+ * reflect this.
+ *
+ * Setting the autofs mount expire timeout sets the timeout in the super
+ * block info. struct. Setting the per-dentry timeout does a little more.
+ * If the timeout is equal to -1 the per-dentry timeout (and flag) is
+ * cleared which reverts to using the super block timeout, otherwise if
+ * timeout is 0 the timeout is set to this value and the flag is left
+ * set which disables expiration for the mount point, lastly the flag
+ * and the timeout are set enabling the dentry to use this timeout.
+ */
static int autofs_dev_ioctl_timeout(struct file *fp,
struct autofs_sb_info *sbi,
struct autofs_dev_ioctl *param)
{
- unsigned long timeout;
+ unsigned long timeout = param->timeout.timeout;
+
+ /* If setting the expire timeout for an individual indirect
+ * mount point dentry the mount trailing component path is
+ * placed in param->path and param->size adjusted to account
+ * for it otherwise param->size it is set to the structure
+ * size.
+ */
+ if (param->size == AUTOFS_DEV_IOCTL_SIZE) {
+ param->timeout.timeout = sbi->exp_timeout / HZ;
+ sbi->exp_timeout = timeout * HZ;
+ } else {
+ struct dentry *base = fp->f_path.dentry;
+ struct inode *inode = base->d_inode;
+ int path_len = param->size - AUTOFS_DEV_IOCTL_SIZE - 1;
+ struct dentry *dentry;
+ struct autofs_info *ino;
+
+ if (!autofs_type_indirect(sbi->type))
+ return -EINVAL;
+
+ /* An expire timeout greater than the superblock timeout
+ * could be a problem at shutdown but the super block
+ * timeout itself can change so all we can really do is
+ * warn the user.
+ */
+ if (timeout >= sbi->exp_timeout)
+ pr_warn("per-mount expire timeout is greater than "
+ "the parent autofs mount timeout which could "
+ "prevent shutdown\n");
+
+ inode_lock_shared(inode);
+ dentry = try_lookup_one_len(param->path, base, path_len);
+ inode_unlock_shared(inode);
+ if (IS_ERR_OR_NULL(dentry))
+ return dentry ? PTR_ERR(dentry) : -ENOENT;
+ ino = autofs_dentry_ino(dentry);
+ if (!ino) {
+ dput(dentry);
+ return -ENOENT;
+ }
+
+ if (ino->exp_timeout && ino->flags & AUTOFS_INF_EXPIRE_SET)
+ param->timeout.timeout = ino->exp_timeout / HZ;
+ else
+ param->timeout.timeout = sbi->exp_timeout / HZ;
+
+ if (timeout == -1) {
+ /* Revert to using the super block timeout */
+ ino->flags &= ~AUTOFS_INF_EXPIRE_SET;
+ ino->exp_timeout = 0;
+ } else {
+ /* Set the dentry expire flag and timeout.
+ *
+ * If timeout is 0 it will prevent the expire
+ * of this particular automount.
+ */
+ ino->flags |= AUTOFS_INF_EXPIRE_SET;
+ ino->exp_timeout = timeout * HZ;
+ }
+ dput(dentry);
+ }
- timeout = param->timeout.timeout;
- param->timeout.timeout = sbi->exp_timeout / HZ;
- sbi->exp_timeout = timeout * HZ;
return 0;
}
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index 39d8c84c16f4..5c2d459e1e48 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -429,8 +429,6 @@ static struct dentry *autofs_expire_indirect(struct super_block *sb,
if (!root)
return NULL;
- timeout = sbi->exp_timeout;
-
dentry = NULL;
while ((dentry = get_next_positive_subdir(dentry, root))) {
spin_lock(&sbi->fs_lock);
@@ -441,6 +439,11 @@ static struct dentry *autofs_expire_indirect(struct super_block *sb,
}
spin_unlock(&sbi->fs_lock);
+ if (ino->flags & AUTOFS_INF_EXPIRE_SET)
+ timeout = ino->exp_timeout;
+ else
+ timeout = sbi->exp_timeout;
+
expired = should_expire(dentry, mnt, timeout, how);
if (!expired)
continue;
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index cf792d4de4f1..ee2edccaef70 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -19,6 +19,7 @@ struct autofs_info *autofs_new_ino(struct autofs_sb_info *sbi)
INIT_LIST_HEAD(&ino->expiring);
ino->last_used = jiffies;
ino->sbi = sbi;
+ ino->exp_timeout = -1;
ino->count = 1;
}
return ino;
@@ -28,6 +29,7 @@ void autofs_clean_ino(struct autofs_info *ino)
{
ino->uid = GLOBAL_ROOT_UID;
ino->gid = GLOBAL_ROOT_GID;
+ ino->exp_timeout = -1;
ino->last_used = jiffies;
}
@@ -172,8 +174,7 @@ static int autofs_parse_fd(struct fs_context *fc, struct autofs_sb_info *sbi,
ret = autofs_check_pipe(pipe);
if (ret < 0) {
errorf(fc, "Invalid/unusable pipe");
- if (param->type != fs_value_is_file)
- fput(pipe);
+ fput(pipe);
return -EBADF;
}
diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig
index 5cdfef3b551a..5bac803ea367 100644
--- a/fs/bcachefs/Kconfig
+++ b/fs/bcachefs/Kconfig
@@ -87,6 +87,13 @@ config BCACHEFS_SIX_OPTIMISTIC_SPIN
is held by another thread, spin for a short while, as long as the
thread owning the lock is running.
+config BCACHEFS_PATH_TRACEPOINTS
+ bool "Extra btree_path tracepoints"
+ depends on BCACHEFS_FS
+ help
+ Enable extra tracepoints for debugging btree_path operations; we don't
+ normally want these enabled because they happen at very high rates.
+
config MEAN_AND_VARIANCE_UNIT_TEST
tristate "mean_and_variance unit tests" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile
index 0ab533a2b03b..56d20e219f59 100644
--- a/fs/bcachefs/Makefile
+++ b/fs/bcachefs/Makefile
@@ -69,6 +69,7 @@ bcachefs-y := \
printbuf.o \
quota.o \
rebalance.o \
+ rcu_pending.o \
recovery.o \
recovery_passes.o \
reflink.o \
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index 331a17f3f113..87f1be9d4db4 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -361,7 +361,7 @@ retry:
bch2_trans_begin(trans);
acl = _acl;
- ret = bch2_subvol_is_ro_trans(trans, inode->ei_subvol) ?:
+ ret = bch2_subvol_is_ro_trans(trans, inode->ei_inum.subvol) ?:
bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
BTREE_ITER_intent);
if (ret)
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index ba46f1c1d78a..645b5ed4babb 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -30,6 +30,7 @@
#include <linux/rcupdate.h>
#include <linux/sched/task.h>
#include <linux/sort.h>
+#include <linux/jiffies.h>
static void bch2_discard_one_bucket_fast(struct bch_dev *, u64);
@@ -1968,8 +1969,8 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
break;
}
- bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
percpu_ref_put(&ca->io_ref);
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
}
static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
@@ -1979,18 +1980,18 @@ static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
if (discard_in_flight_add(ca, bucket, false))
return;
- if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast))
return;
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast))
- goto put_ioref;
+ if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ goto put_ref;
if (queue_work(c->write_ref_wq, &ca->discard_fast_work))
return;
- bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
-put_ioref:
percpu_ref_put(&ca->io_ref);
+put_ref:
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
}
static int invalidate_one_bucket(struct btree_trans *trans,
@@ -2132,26 +2133,26 @@ static void bch2_do_invalidates_work(struct work_struct *work)
bch2_trans_iter_exit(trans, &iter);
err:
bch2_trans_put(trans);
- bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
percpu_ref_put(&ca->io_ref);
+ bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
void bch2_dev_do_invalidates(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
- if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate))
return;
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate))
- goto put_ioref;
+ if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ goto put_ref;
if (queue_work(c->write_ref_wq, &ca->invalidate_work))
return;
- bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
-put_ioref:
percpu_ref_put(&ca->io_ref);
+put_ref:
+ bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
void bch2_do_invalidates(struct bch_fs *c)
@@ -2183,7 +2184,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
* freespace/need_discard/need_gc_gens btrees as needed:
*/
while (1) {
- if (last_updated + HZ * 10 < jiffies) {
+ if (time_after(jiffies, last_updated + HZ * 10)) {
bch_info(ca, "%s: currently at %llu/%llu",
__func__, iter.pos.offset, ca->mi.nbuckets);
last_updated = jiffies;
@@ -2297,6 +2298,36 @@ int bch2_fs_freespace_init(struct bch_fs *c)
return 0;
}
+/* device removal */
+
+int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
+{
+ struct bpos start = POS(ca->dev_idx, 0);
+ struct bpos end = POS(ca->dev_idx, U64_MAX);
+ int ret;
+
+ /*
+ * We clear the LRU and need_discard btrees first so that we don't race
+ * with bch2_do_invalidates() and bch2_do_discards()
+ */
+ ret = bch2_dev_remove_stripes(c, ca->dev_idx) ?:
+ bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
+ BTREE_TRIGGER_norun, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
+ BTREE_TRIGGER_norun, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
+ BTREE_TRIGGER_norun, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
+ BTREE_TRIGGER_norun, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
+ BTREE_TRIGGER_norun, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
+ BTREE_TRIGGER_norun, NULL) ?:
+ bch2_dev_usage_remove(c, ca->dev_idx);
+ bch_err_msg(ca, ret, "removing dev alloc info");
+ return ret;
+}
+
/* Bucket IO clocks: */
int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
@@ -2432,13 +2463,15 @@ static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
/* device goes ro: */
void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
{
- unsigned i;
+ lockdep_assert_held(&c->state_lock);
/* First, remove device from allocation groups: */
- for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
+ for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
clear_bit(ca->dev_idx, c->rw_devs[i].d);
+ c->rw_devs_change_count++;
+
/*
* Capacity is calculated based off of devices in allocation groups:
*/
@@ -2467,11 +2500,13 @@ void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
/* device goes rw: */
void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
{
- unsigned i;
+ lockdep_assert_held(&c->state_lock);
- for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
+ for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
if (ca->mi.data_allowed & (1 << i))
set_bit(ca->dev_idx, c->rw_devs[i].d);
+
+ c->rw_devs_change_count++;
}
void bch2_dev_allocator_background_exit(struct bch_dev *ca)
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index fd790b03fbe1..f8e87c6721b1 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -16,7 +16,7 @@ enum bch_validate_flags;
static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
{
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, pos.inode);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, pos.inode);
bool ret = ca && bucket_valid(ca, pos.offset);
rcu_read_unlock();
return ret;
@@ -338,6 +338,7 @@ static inline const struct bch_backpointer *alloc_v4_backpointers_c(const struct
int bch2_dev_freespace_init(struct bch_fs *, struct bch_dev *, u64, u64);
int bch2_fs_freespace_init(struct bch_fs *);
+int bch2_dev_remove_alloc(struct bch_fs *, struct bch_dev *);
void bch2_recalc_capacity(struct bch_fs *);
u64 bch2_min_rw_member_capacity(struct bch_fs *);
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 8563c2d26847..d0e0b56892e3 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -600,6 +600,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
enum bch_watermark watermark,
enum bch_data_type data_type,
struct closure *cl,
+ bool nowait,
struct bch_dev_usage *usage)
{
struct bch_fs *c = trans->c;
@@ -609,7 +610,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
struct bucket_alloc_state s = {
.btree_bitmap = data_type == BCH_DATA_btree,
};
- bool waiting = false;
+ bool waiting = nowait;
again:
bch2_dev_usage_read_fast(ca, usage);
avail = dev_buckets_free(ca, *usage, watermark);
@@ -685,7 +686,7 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
bch2_trans_do(c, NULL, NULL, 0,
PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
- data_type, cl, &usage)));
+ data_type, cl, false, &usage)));
return ob;
}
@@ -748,7 +749,6 @@ static int add_new_bucket(struct bch_fs *c,
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
- unsigned flags,
struct open_bucket *ob)
{
unsigned durability = ob_dev(c, ob)->mi.durability;
@@ -775,7 +775,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
- unsigned flags,
+ enum bch_write_flags flags,
enum bch_data_type data_type,
enum bch_watermark watermark,
struct closure *cl)
@@ -801,7 +801,8 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
continue;
}
- ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type, cl, &usage);
+ ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type,
+ cl, flags & BCH_WRITE_ALLOC_NOWAIT, &usage);
if (!IS_ERR(ob))
bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
bch2_dev_put(ca);
@@ -815,7 +816,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
if (add_new_bucket(c, ptrs, devs_may_alloc,
nr_replicas, nr_effective,
- have_cache, flags, ob)) {
+ have_cache, ob)) {
ret = 0;
break;
}
@@ -841,7 +842,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans,
unsigned *nr_effective,
bool *have_cache,
enum bch_watermark watermark,
- unsigned flags,
+ enum bch_write_flags flags,
struct closure *cl)
{
struct bch_fs *c = trans->c;
@@ -883,7 +884,7 @@ got_bucket:
ret = add_new_bucket(c, ptrs, devs_may_alloc,
nr_replicas, nr_effective,
- have_cache, flags, ob);
+ have_cache, ob);
out_put_head:
bch2_ec_stripe_head_put(c, h);
return ret;
@@ -922,7 +923,7 @@ static int bucket_alloc_set_writepoint(struct bch_fs *c,
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
- bool ec, unsigned flags)
+ bool ec)
{
struct open_buckets ptrs_skip = { .nr = 0 };
struct open_bucket *ob;
@@ -934,7 +935,7 @@ static int bucket_alloc_set_writepoint(struct bch_fs *c,
have_cache, ec, ob))
ret = add_new_bucket(c, ptrs, devs_may_alloc,
nr_replicas, nr_effective,
- have_cache, flags, ob);
+ have_cache, ob);
else
ob_push(c, &ptrs_skip, ob);
}
@@ -950,8 +951,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache, bool ec,
- enum bch_watermark watermark,
- unsigned flags)
+ enum bch_watermark watermark)
{
int i, ret = 0;
@@ -983,7 +983,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
ret = add_new_bucket(c, ptrs, devs_may_alloc,
nr_replicas, nr_effective,
- have_cache, flags, ob);
+ have_cache, ob);
if (ret)
break;
}
@@ -1003,7 +1003,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
unsigned *nr_effective,
bool *have_cache,
enum bch_watermark watermark,
- unsigned flags,
+ enum bch_write_flags flags,
struct closure *_cl)
{
struct bch_fs *c = trans->c;
@@ -1022,18 +1022,15 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
open_bucket_for_each(c, ptrs, ob, i)
__clear_bit(ob->dev, devs.d);
- if (erasure_code && ec_open_bucket(c, ptrs))
- return 0;
-
ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
nr_replicas, nr_effective,
- have_cache, erasure_code, flags);
+ have_cache, erasure_code);
if (ret)
return ret;
ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
nr_replicas, nr_effective,
- have_cache, erasure_code, watermark, flags);
+ have_cache, erasure_code, watermark);
if (ret)
return ret;
@@ -1074,12 +1071,12 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
unsigned *nr_effective,
bool *have_cache,
enum bch_watermark watermark,
- unsigned flags,
+ enum bch_write_flags flags,
struct closure *cl)
{
int ret;
- if (erasure_code) {
+ if (erasure_code && !ec_open_bucket(trans->c, ptrs)) {
ret = __open_bucket_add_buckets(trans, ptrs, wp,
devs_have, target, erasure_code,
nr_replicas, nr_effective, have_cache,
@@ -1376,7 +1373,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
unsigned nr_replicas,
unsigned nr_replicas_required,
enum bch_watermark watermark,
- unsigned flags,
+ enum bch_write_flags flags,
struct closure *cl,
struct write_point **wp_ret)
{
@@ -1392,8 +1389,6 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
erasure_code = false;
- BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
-
BUG_ON(!nr_replicas || !nr_replicas_required);
retry:
ptrs.nr = 0;
@@ -1498,11 +1493,12 @@ err:
try_decrease_writepoints(trans, write_points_nr))
goto retry;
- if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
+ if (cl && bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
+ ret = -BCH_ERR_bucket_alloc_blocked;
+
+ if (cl && !(flags & BCH_WRITE_ALLOC_NOWAIT) &&
bch2_err_matches(ret, BCH_ERR_freelist_empty))
- return cl
- ? -BCH_ERR_bucket_alloc_blocked
- : -BCH_ERR_ENOSPC_bucket_alloc;
+ ret = -BCH_ERR_bucket_alloc_blocked;
return ret;
}
@@ -1733,13 +1729,6 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
nr[c->open_buckets[i].data_type]++;
- printbuf_tabstops_reset(out);
- printbuf_tabstop_push(out, 12);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 16);
-
bch2_dev_usage_to_text(out, ca, &stats);
prt_newline(out);
diff --git a/fs/bcachefs/alloc_foreground.h b/fs/bcachefs/alloc_foreground.h
index 386d231ceca3..1a16fd5bd4f8 100644
--- a/fs/bcachefs/alloc_foreground.h
+++ b/fs/bcachefs/alloc_foreground.h
@@ -155,9 +155,10 @@ static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64
return ret;
}
+enum bch_write_flags;
int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *,
struct dev_stripe_state *, struct bch_devs_mask *,
- unsigned, unsigned *, bool *, unsigned,
+ unsigned, unsigned *, bool *, enum bch_write_flags,
enum bch_data_type, enum bch_watermark,
struct closure *);
@@ -167,7 +168,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *,
struct bch_devs_list *,
unsigned, unsigned,
enum bch_watermark,
- unsigned,
+ enum bch_write_flags,
struct closure *,
struct write_point **);
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index d4da6343efa9..e11989a57ca0 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -9,6 +9,7 @@
#include "btree_update_interior.h"
#include "btree_write_buffer.h"
#include "checksum.h"
+#include "disk_accounting.h"
#include "error.h"
#include <linux/mm.h>
@@ -53,7 +54,7 @@ int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k,
struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, bp.k->p.inode);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp.k->p.inode);
if (!ca) {
/* these will be caught by fsck */
rcu_read_unlock();
@@ -87,7 +88,7 @@ void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer
void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
{
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, k.k->p.inode);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.k->p.inode);
if (ca) {
struct bpos bucket = bp_pos_to_bucket(ca, k.k->p);
rcu_read_unlock();
@@ -671,7 +672,7 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
continue;
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, p.ptr.dev);
if (ca)
bch2_extent_ptr_to_bp(c, ca, btree, level, k, p, entry, &bucket_pos, &bp);
rcu_read_unlock();
@@ -750,10 +751,12 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
s64 mem_may_pin = mem_may_pin_bytes(c);
int ret = 0;
+ bch2_btree_cache_unpin(c);
+
btree_interior_mask |= btree_leaf_mask;
- c->btree_cache.pinned_nodes_leaf_mask = btree_leaf_mask;
- c->btree_cache.pinned_nodes_interior_mask = btree_interior_mask;
+ c->btree_cache.pinned_nodes_mask[0] = btree_leaf_mask;
+ c->btree_cache.pinned_nodes_mask[1] = btree_interior_mask;
c->btree_cache.pinned_nodes_start = start;
c->btree_cache.pinned_nodes_end = *end = BBPOS_MAX;
@@ -775,6 +778,7 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
BBPOS(btree, b->key.k.p);
break;
}
+ bch2_node_pin(c, b);
0;
}));
}
@@ -782,12 +786,80 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
return ret;
}
+struct progress_indicator_state {
+ unsigned long next_print;
+ u64 nodes_seen;
+ u64 nodes_total;
+ struct btree *last_node;
+};
+
+static inline void progress_init(struct progress_indicator_state *s,
+ struct bch_fs *c,
+ u64 btree_id_mask)
+{
+ memset(s, 0, sizeof(*s));
+
+ s->next_print = jiffies + HZ * 10;
+
+ for (unsigned i = 0; i < BTREE_ID_NR; i++) {
+ if (!(btree_id_mask & BIT_ULL(i)))
+ continue;
+
+ struct disk_accounting_pos acc = {
+ .type = BCH_DISK_ACCOUNTING_btree,
+ .btree.id = i,
+ };
+
+ u64 v;
+ bch2_accounting_mem_read(c, disk_accounting_pos_to_bpos(&acc), &v, 1);
+ s->nodes_total += div64_ul(v, btree_sectors(c));
+ }
+}
+
+static inline bool progress_update_p(struct progress_indicator_state *s)
+{
+ bool ret = time_after_eq(jiffies, s->next_print);
+
+ if (ret)
+ s->next_print = jiffies + HZ * 10;
+ return ret;
+}
+
+static void progress_update_iter(struct btree_trans *trans,
+ struct progress_indicator_state *s,
+ struct btree_iter *iter,
+ const char *msg)
+{
+ struct bch_fs *c = trans->c;
+ struct btree *b = path_l(btree_iter_path(trans, iter))->b;
+
+ s->nodes_seen += b != s->last_node;
+ s->last_node = b;
+
+ if (progress_update_p(s)) {
+ struct printbuf buf = PRINTBUF;
+ unsigned percent = s->nodes_total
+ ? div64_u64(s->nodes_seen * 100, s->nodes_total)
+ : 0;
+
+ prt_printf(&buf, "%s: %d%%, done %llu/%llu nodes, at ",
+ msg, percent, s->nodes_seen, s->nodes_total);
+ bch2_bbpos_to_text(&buf, BBPOS(iter->btree_id, iter->pos));
+
+ bch_info(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ }
+}
+
static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
struct extents_to_bp_state *s)
{
struct bch_fs *c = trans->c;
+ struct progress_indicator_state progress;
int ret = 0;
+ progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_extents)|BIT_ULL(BTREE_ID_reflink));
+
for (enum btree_id btree_id = 0;
btree_id < btree_id_nr_alive(c);
btree_id++) {
@@ -805,6 +877,7 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
BTREE_ITER_prefetch);
ret = for_each_btree_key_continue(trans, iter, 0, k, ({
+ progress_update_iter(trans, &progress, &iter, "extents_to_backpointers");
check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
}));
@@ -865,8 +938,7 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c)
bch2_trans_put(trans);
bch2_bkey_buf_exit(&s.last_flushed, c);
- c->btree_cache.pinned_nodes_leaf_mask = 0;
- c->btree_cache.pinned_nodes_interior_mask = 0;
+ bch2_btree_cache_unpin(c);
bch_err_fn(c, ret);
return ret;
@@ -920,19 +992,24 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
struct bbpos start,
struct bbpos end)
{
+ struct bch_fs *c = trans->c;
struct bkey_buf last_flushed;
+ struct progress_indicator_state progress;
bch2_bkey_buf_init(&last_flushed);
bkey_init(&last_flushed.k->k);
+ progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_backpointers));
int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
POS_MIN, BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_one_backpointer(trans, start, end,
- bkey_s_c_to_backpointer(k),
- &last_flushed));
-
- bch2_bkey_buf_exit(&last_flushed, trans->c);
+ NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
+ progress_update_iter(trans, &progress, &iter, "backpointers_to_extents");
+ check_one_backpointer(trans, start, end,
+ bkey_s_c_to_backpointer(k),
+ &last_flushed);
+ }));
+
+ bch2_bkey_buf_exit(&last_flushed, c);
return ret;
}
@@ -977,8 +1054,7 @@ int bch2_check_backpointers_to_extents(struct bch_fs *c)
}
bch2_trans_put(trans);
- c->btree_cache.pinned_nodes_leaf_mask = 0;
- c->btree_cache.pinned_nodes_interior_mask = 0;
+ bch2_btree_cache_unpin(c);
bch_err_fn(c, ret);
return ret;
diff --git a/fs/bcachefs/backpointers.h b/fs/bcachefs/backpointers.h
index 7daecadb764e..3b29fdf519dd 100644
--- a/fs/bcachefs/backpointers.h
+++ b/fs/bcachefs/backpointers.h
@@ -134,28 +134,37 @@ static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
}
}
-static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
+static inline void __bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k, struct extent_ptr_decoded p,
const union bch_extent_entry *entry,
- struct bpos *bucket_pos, struct bch_backpointer *bp)
+ struct bpos *bucket_pos, struct bch_backpointer *bp,
+ u64 sectors)
{
- enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
- s64 sectors = level ? btree_sectors(c) : k.k->size;
u32 bucket_offset;
-
*bucket_pos = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset);
*bp = (struct bch_backpointer) {
.btree_id = btree_id,
.level = level,
- .data_type = data_type,
+ .data_type = bch2_bkey_ptr_data_type(k, p, entry),
.bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
p.crc.offset,
- .bucket_len = ptr_disk_sectors(sectors, p),
+ .bucket_len = sectors,
.pos = k.k->p,
};
}
+static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, struct extent_ptr_decoded p,
+ const union bch_extent_entry *entry,
+ struct bpos *bucket_pos, struct bch_backpointer *bp)
+{
+ u64 sectors = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p);
+
+ __bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, bucket_pos, bp, sectors);
+}
+
int bch2_get_next_backpointer(struct btree_trans *, struct bch_dev *ca, struct bpos, int,
struct bpos *, struct bch_backpointer *, unsigned);
struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *,
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 0c7086e00d18..c711d4c27a03 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -542,7 +542,7 @@ struct bch_dev {
* gc_gens_lock, for device resize - holding any is sufficient for
* access: Or rcu_read_lock(), but only for dev_ptr_stale():
*/
- struct bucket_array __rcu *buckets_gc;
+ GENRADIX(struct bucket) buckets_gc;
struct bucket_gens __rcu *bucket_gens;
u8 *oldest_gen;
unsigned long *buckets_nouse;
@@ -871,6 +871,7 @@ struct bch_fs {
/* ALLOCATION */
struct bch_devs_mask rw_devs[BCH_DATA_NR];
+ unsigned long rw_devs_change_count;
u64 capacity; /* sectors */
u64 reserved; /* sectors */
@@ -1023,6 +1024,7 @@ struct bch_fs {
/* fs.c */
struct list_head vfs_inodes_list;
struct mutex vfs_inodes_lock;
+ struct rhashtable vfs_inodes_table;
/* VFS IO PATH - fs-io.c */
struct bio_set writepage_bioset;
@@ -1044,8 +1046,6 @@ struct bch_fs {
* for signaling to the toplevel code which pass we want to run now.
*/
enum bch_recovery_pass curr_recovery_pass;
- /* bitmap of explicitly enabled recovery passes: */
- u64 recovery_passes_explicit;
/* bitmask of recovery passes that we actually ran */
u64 recovery_passes_complete;
/* never rewinds version of curr_recovery_pass */
@@ -1085,7 +1085,6 @@ struct bch_fs {
u64 __percpu *counters;
unsigned copy_gc_enabled:1;
- bool promote_whole_extents;
struct bch2_time_stats times[BCH_TIME_STAT_NR];
@@ -1195,12 +1194,15 @@ static inline bool btree_id_cached(const struct bch_fs *c, enum btree_id btree)
static inline struct timespec64 bch2_time_to_timespec(const struct bch_fs *c, s64 time)
{
struct timespec64 t;
+ s64 sec;
s32 rem;
time += c->sb.time_base_lo;
- t.tv_sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
- t.tv_nsec = rem * c->sb.nsec_per_time_unit;
+ sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
+
+ set_normalized_timespec64(&t, sec, rem * (s64)c->sb.nsec_per_time_unit);
+
return t;
}
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index 14ce726bf5a3..8c4addddd07e 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -795,6 +795,8 @@ LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63);
+LE64_BITMASK(BCH_SB_PROMOTE_WHOLE_EXTENTS,
+ struct bch_sb, flags[0], 63, 64);
LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1], 4, 8);
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index 575e1d0b6eeb..d1f6092624d8 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -304,11 +304,6 @@ struct bkey_float {
};
#define BKEY_MANTISSA_BITS 16
-static unsigned bkey_float_byte_offset(unsigned idx)
-{
- return idx * sizeof(struct bkey_float);
-}
-
struct ro_aux_tree {
u8 nothing[0];
struct bkey_float f[];
@@ -328,8 +323,7 @@ static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
return t->aux_data_offset;
case BSET_RO_AUX_TREE:
return t->aux_data_offset +
- DIV_ROUND_UP(t->size * sizeof(struct bkey_float) +
- t->size * sizeof(u8), 8);
+ DIV_ROUND_UP(t->size * sizeof(struct bkey_float), 8);
case BSET_RW_AUX_TREE:
return t->aux_data_offset +
DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
@@ -360,14 +354,6 @@ static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
return __aux_tree_base(b, t);
}
-static u8 *ro_aux_tree_prev(const struct btree *b,
- const struct bset_tree *t)
-{
- EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
-
- return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size);
-}
-
static struct bkey_float *bkey_float(const struct btree *b,
const struct bset_tree *t,
unsigned idx)
@@ -479,15 +465,6 @@ static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
bkey_float(b, t, j)->key_offset);
}
-static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
- const struct bset_tree *t,
- unsigned j)
-{
- unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
-
- return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s);
-}
-
static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
const struct bset_tree *t)
{
@@ -585,8 +562,7 @@ static unsigned rw_aux_tree_bsearch(struct btree *b,
}
static inline unsigned bkey_mantissa(const struct bkey_packed *k,
- const struct bkey_float *f,
- unsigned idx)
+ const struct bkey_float *f)
{
u64 v;
@@ -617,7 +593,7 @@ static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
struct bkey_packed *m = tree_to_bkey(b, t, j);
struct bkey_packed *l = is_power_of_2(j)
? min_key
- : tree_to_prev_bkey(b, t, j >> ffs(j));
+ : tree_to_bkey(b, t, j >> ffs(j));
struct bkey_packed *r = is_power_of_2(j + 1)
? max_key
: tree_to_bkey(b, t, j >> (ffz(j) + 1));
@@ -668,7 +644,7 @@ static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
f->exponent = shift;
- mantissa = bkey_mantissa(m, f, j);
+ mantissa = bkey_mantissa(m, f);
/*
* If we've got garbage bits, set them to all 1s - it's legal for the
@@ -690,8 +666,7 @@ static unsigned __bset_tree_capacity(struct btree *b, const struct bset_tree *t)
static unsigned bset_ro_tree_capacity(struct btree *b, const struct bset_tree *t)
{
- return __bset_tree_capacity(b, t) /
- (sizeof(struct bkey_float) + sizeof(u8));
+ return __bset_tree_capacity(b, t) / sizeof(struct bkey_float);
}
static unsigned bset_rw_tree_capacity(struct btree *b, const struct bset_tree *t)
@@ -720,7 +695,7 @@ static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
{
- struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
+ struct bkey_packed *k = btree_bkey_first(b, t);
struct bkey_i min_key, max_key;
unsigned cacheline = 1;
@@ -733,12 +708,12 @@ retry:
return;
}
- t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
+ t->extra = eytzinger1_extra(t->size - 1);
/* First we figure out where the first key in each cacheline is */
eytzinger1_for_each(j, t->size - 1) {
while (bkey_to_cacheline(b, t, k) < cacheline)
- prev = k, k = bkey_p_next(k);
+ k = bkey_p_next(k);
if (k >= btree_bkey_last(b, t)) {
/* XXX: this path sucks */
@@ -746,17 +721,12 @@ retry:
goto retry;
}
- ro_aux_tree_prev(b, t)[j] = prev->u64s;
bkey_float(b, t, j)->key_offset =
bkey_to_cacheline_offset(b, t, cacheline++, k);
- EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
EBUG_ON(tree_to_bkey(b, t, j) != k);
}
- while (k != btree_bkey_last(b, t))
- prev = k, k = bkey_p_next(k);
-
if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
bkey_init(&min_key.k);
min_key.k.p = b->data->min_key;
@@ -915,6 +885,38 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
/* Insert */
+static void rw_aux_tree_insert_entry(struct btree *b,
+ struct bset_tree *t,
+ unsigned idx)
+{
+ EBUG_ON(!idx || idx > t->size);
+ struct bkey_packed *start = rw_aux_to_bkey(b, t, idx - 1);
+ struct bkey_packed *end = idx < t->size
+ ? rw_aux_to_bkey(b, t, idx)
+ : btree_bkey_last(b, t);
+
+ if (t->size < bset_rw_tree_capacity(b, t) &&
+ (void *) end - (void *) start > L1_CACHE_BYTES) {
+ struct bkey_packed *k = start;
+
+ while (1) {
+ k = bkey_p_next(k);
+ if (k == end)
+ break;
+
+ if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
+ memmove(&rw_aux_tree(b, t)[idx + 1],
+ &rw_aux_tree(b, t)[idx],
+ (void *) &rw_aux_tree(b, t)[t->size] -
+ (void *) &rw_aux_tree(b, t)[idx]);
+ t->size++;
+ rw_aux_tree_set(b, t, idx, k);
+ break;
+ }
+ }
+ }
+}
+
static void bch2_bset_fix_lookup_table(struct btree *b,
struct bset_tree *t,
struct bkey_packed *_where,
@@ -922,84 +924,59 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
unsigned new_u64s)
{
int shift = new_u64s - clobber_u64s;
- unsigned l, j, where = __btree_node_key_to_offset(b, _where);
+ unsigned idx, j, where = __btree_node_key_to_offset(b, _where);
EBUG_ON(bset_has_ro_aux_tree(t));
if (!bset_has_rw_aux_tree(t))
return;
+ if (where > rw_aux_tree(b, t)[t->size - 1].offset) {
+ rw_aux_tree_insert_entry(b, t, t->size);
+ goto verify;
+ }
+
/* returns first entry >= where */
- l = rw_aux_tree_bsearch(b, t, where);
-
- if (!l) /* never delete first entry */
- l++;
- else if (l < t->size &&
- where < t->end_offset &&
- rw_aux_tree(b, t)[l].offset == where)
- rw_aux_tree_set(b, t, l++, _where);
-
- /* l now > where */
-
- for (j = l;
- j < t->size &&
- rw_aux_tree(b, t)[j].offset < where + clobber_u64s;
- j++)
- ;
-
- if (j < t->size &&
- rw_aux_tree(b, t)[j].offset + shift ==
- rw_aux_tree(b, t)[l - 1].offset)
- j++;
-
- memmove(&rw_aux_tree(b, t)[l],
- &rw_aux_tree(b, t)[j],
- (void *) &rw_aux_tree(b, t)[t->size] -
- (void *) &rw_aux_tree(b, t)[j]);
- t->size -= j - l;
-
- for (j = l; j < t->size; j++)
- rw_aux_tree(b, t)[j].offset += shift;
+ idx = rw_aux_tree_bsearch(b, t, where);
+
+ if (rw_aux_tree(b, t)[idx].offset == where) {
+ if (!idx) { /* never delete first entry */
+ idx++;
+ } else if (where < t->end_offset) {
+ rw_aux_tree_set(b, t, idx++, _where);
+ } else {
+ EBUG_ON(where != t->end_offset);
+ rw_aux_tree_insert_entry(b, t, --t->size);
+ goto verify;
+ }
+ }
- EBUG_ON(l < t->size &&
- rw_aux_tree(b, t)[l].offset ==
- rw_aux_tree(b, t)[l - 1].offset);
+ EBUG_ON(idx < t->size && rw_aux_tree(b, t)[idx].offset <= where);
+ if (idx < t->size &&
+ rw_aux_tree(b, t)[idx].offset + shift ==
+ rw_aux_tree(b, t)[idx - 1].offset) {
+ memmove(&rw_aux_tree(b, t)[idx],
+ &rw_aux_tree(b, t)[idx + 1],
+ (void *) &rw_aux_tree(b, t)[t->size] -
+ (void *) &rw_aux_tree(b, t)[idx + 1]);
+ t->size -= 1;
+ }
- if (t->size < bset_rw_tree_capacity(b, t) &&
- (l < t->size
- ? rw_aux_tree(b, t)[l].offset
- : t->end_offset) -
- rw_aux_tree(b, t)[l - 1].offset >
- L1_CACHE_BYTES / sizeof(u64)) {
- struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1);
- struct bkey_packed *end = l < t->size
- ? rw_aux_to_bkey(b, t, l)
- : btree_bkey_last(b, t);
- struct bkey_packed *k = start;
+ for (j = idx; j < t->size; j++)
+ rw_aux_tree(b, t)[j].offset += shift;
- while (1) {
- k = bkey_p_next(k);
- if (k == end)
- break;
+ EBUG_ON(idx < t->size &&
+ rw_aux_tree(b, t)[idx].offset ==
+ rw_aux_tree(b, t)[idx - 1].offset);
- if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
- memmove(&rw_aux_tree(b, t)[l + 1],
- &rw_aux_tree(b, t)[l],
- (void *) &rw_aux_tree(b, t)[t->size] -
- (void *) &rw_aux_tree(b, t)[l]);
- t->size++;
- rw_aux_tree_set(b, t, l, k);
- break;
- }
- }
- }
+ rw_aux_tree_insert_entry(b, t, idx);
+verify:
bch2_bset_verify_rw_aux_tree(b, t);
bset_aux_tree_verify(b);
}
void bch2_bset_insert(struct btree *b,
- struct btree_node_iter *iter,
struct bkey_packed *where,
struct bkey_i *insert,
unsigned clobber_u64s)
@@ -1098,8 +1075,7 @@ static inline void prefetch_four_cachelines(void *p)
}
static inline bool bkey_mantissa_bits_dropped(const struct btree *b,
- const struct bkey_float *f,
- unsigned idx)
+ const struct bkey_float *f)
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits;
@@ -1133,9 +1109,9 @@ static struct bkey_packed *bset_search_tree(const struct btree *b,
goto slowpath;
l = f->mantissa;
- r = bkey_mantissa(packed_search, f, n);
+ r = bkey_mantissa(packed_search, f);
- if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f, n))
+ if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f))
goto slowpath;
n = n * 2 + (l < r);
diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h
index 5c6c7a14fa0f..6953d55b72cc 100644
--- a/fs/bcachefs/bset.h
+++ b/fs/bcachefs/bset.h
@@ -270,8 +270,8 @@ void bch2_bset_init_first(struct btree *, struct bset *);
void bch2_bset_init_next(struct btree *, struct btree_node_entry *);
void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
-void bch2_bset_insert(struct btree *, struct btree_node_iter *,
- struct bkey_packed *, struct bkey_i *, unsigned);
+void bch2_bset_insert(struct btree *, struct bkey_packed *, struct bkey_i *,
+ unsigned);
void bch2_bset_delete(struct btree *, struct bkey_packed *, unsigned);
/* Bkey utility code */
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index e52a06d3418c..6e4afb2b5441 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -15,11 +15,12 @@
#include <linux/prefetch.h>
#include <linux/sched/mm.h>
+#include <linux/swap.h>
#define BTREE_CACHE_NOT_FREED_INCREMENT(counter) \
do { \
if (shrinker_counter) \
- bc->not_freed_##counter++; \
+ bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_##counter]++; \
} while (0)
const char * const bch2_btree_node_flags[] = {
@@ -31,24 +32,29 @@ const char * const bch2_btree_node_flags[] = {
void bch2_recalc_btree_reserve(struct bch_fs *c)
{
- unsigned i, reserve = 16;
+ unsigned reserve = 16;
if (!c->btree_roots_known[0].b)
reserve += 8;
- for (i = 0; i < btree_id_nr_alive(c); i++) {
+ for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
if (r->b)
reserve += min_t(unsigned, 1, r->b->c.level) * 8;
}
- c->btree_cache.reserve = reserve;
+ c->btree_cache.nr_reserve = reserve;
}
-static inline unsigned btree_cache_can_free(struct btree_cache *bc)
+static inline size_t btree_cache_can_free(struct btree_cache_list *list)
{
- return max_t(int, 0, bc->used - bc->reserve);
+ struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]);
+
+ size_t can_free = list->nr;
+ if (!list->idx)
+ can_free = max_t(ssize_t, 0, can_free - bc->nr_reserve);
+ return can_free;
}
static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b)
@@ -63,6 +69,18 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
{
struct btree_cache *bc = &c->btree_cache;
+ BUG_ON(btree_node_hashed(b));
+
+ /*
+ * This should really be done in slub/vmalloc, but we're using the
+ * kmalloc_large() path, so we're working around a slub bug by doing
+ * this here:
+ */
+ if (b->data)
+ mm_account_reclaimed_pages(btree_buf_bytes(b) / PAGE_SIZE);
+ if (b->aux_data)
+ mm_account_reclaimed_pages(btree_aux_data_bytes(b) / PAGE_SIZE);
+
EBUG_ON(btree_node_write_in_flight(b));
clear_btree_node_just_written(b);
@@ -76,7 +94,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
#endif
b->aux_data = NULL;
- bc->used--;
+ bc->nr_freeable--;
btree_node_to_freedlist(bc, b);
}
@@ -102,6 +120,8 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
{
BUG_ON(b->data || b->aux_data);
+ gfp |= __GFP_ACCOUNT|__GFP_RECLAIMABLE;
+
b->data = kvmalloc(btree_buf_bytes(b), gfp);
if (!b->data)
return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
@@ -154,7 +174,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
bch2_btree_lock_init(&b->c, 0);
- bc->used++;
+ bc->nr_freeable++;
list_add(&b->list, &bc->freeable);
return b;
}
@@ -169,10 +189,56 @@ void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b)
six_unlock_intent(&b->c.lock);
}
+static inline bool __btree_node_pinned(struct btree_cache *bc, struct btree *b)
+{
+ struct bbpos pos = BBPOS(b->c.btree_id, b->key.k.p);
+
+ u64 mask = bc->pinned_nodes_mask[!!b->c.level];
+
+ return ((mask & BIT_ULL(b->c.btree_id)) &&
+ bbpos_cmp(bc->pinned_nodes_start, pos) < 0 &&
+ bbpos_cmp(bc->pinned_nodes_end, pos) >= 0);
+}
+
+void bch2_node_pin(struct bch_fs *c, struct btree *b)
+{
+ struct btree_cache *bc = &c->btree_cache;
+
+ mutex_lock(&bc->lock);
+ BUG_ON(!__btree_node_pinned(bc, b));
+ if (b != btree_node_root(c, b) && !btree_node_pinned(b)) {
+ set_btree_node_pinned(b);
+ list_move(&b->list, &bc->live[1].list);
+ bc->live[0].nr--;
+ bc->live[1].nr++;
+ }
+ mutex_unlock(&bc->lock);
+}
+
+void bch2_btree_cache_unpin(struct bch_fs *c)
+{
+ struct btree_cache *bc = &c->btree_cache;
+ struct btree *b, *n;
+
+ mutex_lock(&bc->lock);
+ c->btree_cache.pinned_nodes_mask[0] = 0;
+ c->btree_cache.pinned_nodes_mask[1] = 0;
+
+ list_for_each_entry_safe(b, n, &bc->live[1].list, list) {
+ clear_btree_node_pinned(b);
+ list_move(&b->list, &bc->live[0].list);
+ bc->live[0].nr++;
+ bc->live[1].nr--;
+ }
+
+ mutex_unlock(&bc->lock);
+}
+
/* Btree in memory cache - hash table */
void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
{
+ lockdep_assert_held(&bc->lock);
int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
BUG_ON(ret);
@@ -181,7 +247,11 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
b->hash_val = 0;
if (b->c.btree_id < BTREE_ID_NR)
- --bc->used_by_btree[b->c.btree_id];
+ --bc->nr_by_btree[b->c.btree_id];
+
+ bc->live[btree_node_pinned(b)].nr--;
+ bc->nr_freeable++;
+ list_move(&b->list, &bc->freeable);
}
int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
@@ -191,23 +261,30 @@ int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash,
bch_btree_cache_params);
- if (!ret && b->c.btree_id < BTREE_ID_NR)
- bc->used_by_btree[b->c.btree_id]++;
- return ret;
+ if (ret)
+ return ret;
+
+ if (b->c.btree_id < BTREE_ID_NR)
+ bc->nr_by_btree[b->c.btree_id]++;
+
+ bool p = __btree_node_pinned(bc, b);
+ mod_bit(BTREE_NODE_pinned, &b->flags, p);
+
+ list_move_tail(&b->list, &bc->live[p].list);
+ bc->live[p].nr++;
+
+ bc->nr_freeable--;
+ return 0;
}
int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
unsigned level, enum btree_id id)
{
- int ret;
-
b->c.level = level;
b->c.btree_id = id;
mutex_lock(&bc->lock);
- ret = __bch2_btree_node_hash_insert(bc, b);
- if (!ret)
- list_add_tail(&b->list, &bc->live);
+ int ret = __bch2_btree_node_hash_insert(bc, b);
mutex_unlock(&bc->lock);
return ret;
@@ -261,18 +338,6 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush, b
int ret = 0;
lockdep_assert_held(&bc->lock);
-
- struct bbpos pos = BBPOS(b->c.btree_id, b->key.k.p);
-
- u64 mask = b->c.level
- ? bc->pinned_nodes_interior_mask
- : bc->pinned_nodes_leaf_mask;
-
- if ((mask & BIT_ULL(b->c.btree_id)) &&
- bbpos_cmp(bc->pinned_nodes_start, pos) < 0 &&
- bbpos_cmp(bc->pinned_nodes_end, pos) >= 0)
- return -BCH_ERR_ENOMEM_btree_node_reclaim;
-
wait_on_io:
if (b->flags & ((1U << BTREE_NODE_dirty)|
(1U << BTREE_NODE_read_in_flight)|
@@ -377,8 +442,9 @@ static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b)
static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct bch_fs *c = shrink->private_data;
- struct btree_cache *bc = &c->btree_cache;
+ struct btree_cache_list *list = shrink->private_data;
+ struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]);
+ struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache);
struct btree *b, *t;
unsigned long nr = sc->nr_to_scan;
unsigned long can_free = 0;
@@ -386,8 +452,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
unsigned long touched = 0;
unsigned i, flags;
unsigned long ret = SHRINK_STOP;
- bool trigger_writes = atomic_read(&bc->dirty) + nr >=
- bc->used * 3 / 4;
+ bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >= list->nr * 3 / 4;
if (bch2_btree_shrinker_disabled)
return SHRINK_STOP;
@@ -402,7 +467,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
* succeed, so that inserting keys into the btree can always succeed and
* IO can always make forward progress:
*/
- can_free = btree_cache_can_free(bc);
+ can_free = btree_cache_can_free(list);
nr = min_t(unsigned long, nr, can_free);
i = 0;
@@ -424,22 +489,24 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
freed++;
- bc->freed++;
+ bc->nr_freed++;
}
}
restart:
- list_for_each_entry_safe(b, t, &bc->live, list) {
+ list_for_each_entry_safe(b, t, &list->list, list) {
touched++;
if (btree_node_accessed(b)) {
clear_btree_node_accessed(b);
- bc->not_freed_access_bit++;
+ bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++;
+ --touched;;
} else if (!btree_node_reclaim(c, b, true)) {
+ bch2_btree_node_hash_remove(bc, b);
+
freed++;
btree_node_data_free(c, b);
- bc->freed++;
+ bc->nr_freed++;
- bch2_btree_node_hash_remove(bc, b);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
@@ -450,7 +517,7 @@ restart:
!btree_node_will_make_reachable(b) &&
!btree_node_write_blocked(b) &&
six_trylock_read(&b->c.lock)) {
- list_move(&bc->live, &b->list);
+ list_move(&list->list, &b->list);
mutex_unlock(&bc->lock);
__bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
six_unlock_read(&b->c.lock);
@@ -464,8 +531,8 @@ restart:
break;
}
out_rotate:
- if (&t->list != &bc->live)
- list_move_tail(&bc->live, &t->list);
+ if (&t->list != &list->list)
+ list_move_tail(&list->list, &t->list);
out:
mutex_unlock(&bc->lock);
out_nounlock:
@@ -478,44 +545,45 @@ out_nounlock:
static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct bch_fs *c = shrink->private_data;
- struct btree_cache *bc = &c->btree_cache;
+ struct btree_cache_list *list = shrink->private_data;
if (bch2_btree_shrinker_disabled)
return 0;
- return btree_cache_can_free(bc);
+ return btree_cache_can_free(list);
}
void bch2_fs_btree_cache_exit(struct bch_fs *c)
{
struct btree_cache *bc = &c->btree_cache;
- struct btree *b;
- unsigned i, flags;
+ struct btree *b, *t;
+ unsigned long flags;
- shrinker_free(bc->shrink);
+ shrinker_free(bc->live[1].shrink);
+ shrinker_free(bc->live[0].shrink);
/* vfree() can allocate memory: */
flags = memalloc_nofs_save();
mutex_lock(&bc->lock);
if (c->verify_data)
- list_move(&c->verify_data->list, &bc->live);
+ list_move(&c->verify_data->list, &bc->live[0].list);
kvfree(c->verify_ondisk);
- for (i = 0; i < btree_id_nr_alive(c); i++) {
+ for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
if (r->b)
- list_add(&r->b->list, &bc->live);
+ list_add(&r->b->list, &bc->live[0].list);
}
- list_splice(&bc->freeable, &bc->live);
-
- while (!list_empty(&bc->live)) {
- b = list_first_entry(&bc->live, struct btree, list);
+ list_for_each_entry_safe(b, t, &bc->live[1].list, list)
+ bch2_btree_node_hash_remove(bc, b);
+ list_for_each_entry_safe(b, t, &bc->live[0].list, list)
+ bch2_btree_node_hash_remove(bc, b);
+ list_for_each_entry_safe(b, t, &bc->freeable, list) {
BUG_ON(btree_node_read_in_flight(b) ||
btree_node_write_in_flight(b));
@@ -523,12 +591,11 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
}
BUG_ON(!bch2_journal_error(&c->journal) &&
- atomic_read(&c->btree_cache.dirty));
+ atomic_long_read(&c->btree_cache.nr_dirty));
list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu);
- while (!list_empty(&bc->freed_nonpcpu)) {
- b = list_first_entry(&bc->freed_nonpcpu, struct btree, list);
+ list_for_each_entry_safe(b, t, &bc->freed_nonpcpu, list) {
list_del(&b->list);
six_lock_exit(&b->c.lock);
kfree(b);
@@ -537,6 +604,12 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
mutex_unlock(&bc->lock);
memalloc_nofs_restore(flags);
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++)
+ BUG_ON(bc->nr_by_btree[i]);
+ BUG_ON(bc->live[0].nr);
+ BUG_ON(bc->live[1].nr);
+ BUG_ON(bc->nr_freeable);
+
if (bc->table_init_done)
rhashtable_destroy(&bc->table);
}
@@ -556,22 +629,32 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
bch2_recalc_btree_reserve(c);
- for (i = 0; i < bc->reserve; i++)
+ for (i = 0; i < bc->nr_reserve; i++)
if (!__bch2_btree_node_mem_alloc(c))
goto err;
- list_splice_init(&bc->live, &bc->freeable);
+ list_splice_init(&bc->live[0].list, &bc->freeable);
mutex_init(&c->verify_lock);
shrink = shrinker_alloc(0, "%s-btree_cache", c->name);
if (!shrink)
goto err;
- bc->shrink = shrink;
+ bc->live[0].shrink = shrink;
+ shrink->count_objects = bch2_btree_cache_count;
+ shrink->scan_objects = bch2_btree_cache_scan;
+ shrink->seeks = 2;
+ shrink->private_data = &bc->live[0];
+ shrinker_register(shrink);
+
+ shrink = shrinker_alloc(0, "%s-btree_cache-pinned", c->name);
+ if (!shrink)
+ goto err;
+ bc->live[1].shrink = shrink;
shrink->count_objects = bch2_btree_cache_count;
shrink->scan_objects = bch2_btree_cache_scan;
- shrink->seeks = 4;
- shrink->private_data = c;
+ shrink->seeks = 8;
+ shrink->private_data = &bc->live[1];
shrinker_register(shrink);
return 0;
@@ -582,7 +665,10 @@ err:
void bch2_fs_btree_cache_init_early(struct btree_cache *bc)
{
mutex_init(&bc->lock);
- INIT_LIST_HEAD(&bc->live);
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) {
+ bc->live[i].idx = i;
+ INIT_LIST_HEAD(&bc->live[i].list);
+ }
INIT_LIST_HEAD(&bc->freeable);
INIT_LIST_HEAD(&bc->freed_pcpu);
INIT_LIST_HEAD(&bc->freed_nonpcpu);
@@ -644,14 +730,16 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c)
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
- list_for_each_entry_reverse(b, &bc->live, list)
- if (!btree_node_reclaim(c, b, false))
- return b;
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++)
+ list_for_each_entry_reverse(b, &bc->live[i].list, list)
+ if (!btree_node_reclaim(c, b, false))
+ return b;
while (1) {
- list_for_each_entry_reverse(b, &bc->live, list)
- if (!btree_node_write_and_reclaim(c, b))
- return b;
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++)
+ list_for_each_entry_reverse(b, &bc->live[i].list, list)
+ if (!btree_node_write_and_reclaim(c, b))
+ return b;
/*
* Rare case: all nodes were intent-locked.
@@ -671,9 +759,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
: &bc->freed_nonpcpu;
struct btree *b, *b2;
u64 start_time = local_clock();
- unsigned flags;
- flags = memalloc_nofs_save();
mutex_lock(&bc->lock);
/*
@@ -725,7 +811,7 @@ got_node:
}
mutex_lock(&bc->lock);
- bc->used++;
+ bc->nr_freeable++;
got_mem:
mutex_unlock(&bc->lock);
@@ -745,8 +831,6 @@ out:
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc],
start_time);
- memalloc_nofs_restore(flags);
-
int ret = bch2_trans_relock(trans);
if (unlikely(ret)) {
bch2_btree_node_to_freelist(c, b);
@@ -781,7 +865,6 @@ err:
}
mutex_unlock(&bc->lock);
- memalloc_nofs_restore(flags);
return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc);
}
@@ -1269,8 +1352,8 @@ wait_on_io:
BUG_ON(btree_node_dirty(b));
mutex_lock(&bc->lock);
- btree_node_data_free(c, b);
bch2_btree_node_hash_remove(bc, b);
+ btree_node_data_free(c, b);
mutex_unlock(&bc->lock);
out:
six_unlock_write(&b->c.lock);
@@ -1342,13 +1425,20 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struc
}
static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c,
- const char *label, unsigned nr)
+ const char *label, size_t nr)
{
prt_printf(out, "%s\t", label);
prt_human_readable_u64(out, nr * c->opts.btree_node_size);
- prt_printf(out, " (%u)\n", nr);
+ prt_printf(out, " (%zu)\n", nr);
}
+static const char * const bch2_btree_cache_not_freed_reasons_strs[] = {
+#define x(n) #n,
+ BCH_BTREE_CACHE_NOT_FREED_REASONS()
+#undef x
+ NULL
+};
+
void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc)
{
struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache);
@@ -1356,24 +1446,21 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc
if (!out->nr_tabstops)
printbuf_tabstop_push(out, 32);
- prt_btree_cache_line(out, c, "total:", bc->used);
- prt_btree_cache_line(out, c, "nr dirty:", atomic_read(&bc->dirty));
+ prt_btree_cache_line(out, c, "live:", bc->live[0].nr);
+ prt_btree_cache_line(out, c, "pinned:", bc->live[1].nr);
+ prt_btree_cache_line(out, c, "freeable:", bc->nr_freeable);
+ prt_btree_cache_line(out, c, "dirty:", atomic_long_read(&bc->nr_dirty));
prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock);
prt_newline(out);
- for (unsigned i = 0; i < ARRAY_SIZE(bc->used_by_btree); i++)
- prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->used_by_btree[i]);
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++)
+ prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->nr_by_btree[i]);
prt_newline(out);
- prt_printf(out, "freed:\t%u\n", bc->freed);
+ prt_printf(out, "freed:\t%zu\n", bc->nr_freed);
prt_printf(out, "not freed:\n");
- prt_printf(out, " dirty\t%u\n", bc->not_freed_dirty);
- prt_printf(out, " write in flight\t%u\n", bc->not_freed_write_in_flight);
- prt_printf(out, " read in flight\t%u\n", bc->not_freed_read_in_flight);
- prt_printf(out, " lock intent failed\t%u\n", bc->not_freed_lock_intent);
- prt_printf(out, " lock write failed\t%u\n", bc->not_freed_lock_write);
- prt_printf(out, " access bit\t%u\n", bc->not_freed_access_bit);
- prt_printf(out, " no evict failed\t%u\n", bc->not_freed_noevict);
- prt_printf(out, " write blocked\t%u\n", bc->not_freed_write_blocked);
- prt_printf(out, " will make reachable\t%u\n", bc->not_freed_will_make_reachable);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->not_freed); i++)
+ prt_printf(out, " %s\t%llu\n",
+ bch2_btree_cache_not_freed_reasons_strs[i], bc->not_freed[i]);
}
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
index f82064007127..367acd217c6a 100644
--- a/fs/bcachefs/btree_cache.h
+++ b/fs/bcachefs/btree_cache.h
@@ -19,6 +19,9 @@ int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
unsigned, enum btree_id);
+void bch2_node_pin(struct bch_fs *, struct btree *);
+void bch2_btree_cache_unpin(struct bch_fs *);
+
void bch2_btree_node_update_key_early(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_i *);
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index eb3002c4eae7..b5e0692f03c6 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -549,9 +549,8 @@ reconstruct_root:
six_unlock_read(&b->c.lock);
if (ret == DROP_THIS_NODE) {
- bch2_btree_node_hash_remove(&c->btree_cache, b);
mutex_lock(&c->btree_cache.lock);
- list_move(&b->list, &c->btree_cache.freeable);
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
mutex_unlock(&c->btree_cache.lock);
r->b = NULL;
@@ -753,10 +752,8 @@ static void bch2_gc_free(struct bch_fs *c)
genradix_free(&c->reflink_gc_table);
genradix_free(&c->gc_stripes);
- for_each_member_device(c, ca) {
- kvfree(rcu_dereference_protected(ca->buckets_gc, 1));
- ca->buckets_gc = NULL;
- }
+ for_each_member_device(c, ca)
+ genradix_free(&ca->buckets_gc);
}
static int bch2_gc_start(struct bch_fs *c)
@@ -910,20 +907,12 @@ static int bch2_gc_alloc_start(struct bch_fs *c)
int ret = 0;
for_each_member_device(c, ca) {
- struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) +
- ca->mi.nbuckets * sizeof(struct bucket),
- GFP_KERNEL|__GFP_ZERO);
- if (!buckets) {
+ ret = genradix_prealloc(&ca->buckets_gc, ca->mi.nbuckets, GFP_KERNEL);
+ if (ret) {
bch2_dev_put(ca);
ret = -BCH_ERR_ENOMEM_gc_alloc_start;
break;
}
-
- buckets->first_bucket = ca->mi.first_bucket;
- buckets->nbuckets = ca->mi.nbuckets;
- buckets->nbuckets_minus_first =
- buckets->nbuckets - buckets->first_bucket;
- rcu_assign_pointer(ca->buckets_gc, buckets);
}
bch_err_fn(c, ret);
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 56ea9a77cd4a..cb48a9477514 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1666,7 +1666,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
bch2_btree_pos_to_text(&buf, c, b);
bch_err_ratelimited(c, "%s", buf.buf);
- if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
+ if (c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
bch2_fatal_error(c);
@@ -1749,10 +1749,8 @@ static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
bch2_btree_node_read(trans, b, true);
if (btree_node_read_error(b)) {
- bch2_btree_node_hash_remove(&c->btree_cache, b);
-
mutex_lock(&c->btree_cache.lock);
- list_move(&b->list, &c->btree_cache.freeable);
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
mutex_unlock(&c->btree_cache.lock);
ret = -BCH_ERR_btree_node_read_error;
@@ -2031,7 +2029,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
do_write:
BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
- atomic_dec(&c->btree_cache.dirty);
+ atomic_long_dec(&c->btree_cache.nr_dirty);
BUG_ON(btree_node_fake(b));
BUG_ON((b->will_make_reachable != 0) != !b->written);
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index 63d76f5c6403..9b01ca3de907 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -18,13 +18,13 @@ struct btree_node_read_all;
static inline void set_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
{
if (!test_and_set_bit(BTREE_NODE_dirty, &b->flags))
- atomic_inc(&c->btree_cache.dirty);
+ atomic_long_inc(&c->btree_cache.nr_dirty);
}
static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
{
if (test_and_clear_bit(BTREE_NODE_dirty, &b->flags))
- atomic_dec(&c->btree_cache.dirty);
+ atomic_long_dec(&c->btree_cache.nr_dirty);
}
static inline unsigned btree_ptr_sectors_written(struct bkey_s_c k)
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 2e84d22e17bd..bfe9f0c1e1be 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -1010,9 +1010,9 @@ retry_all:
* the same position:
*/
if (trans->paths[idx].uptodate) {
- __btree_path_get(&trans->paths[idx], false);
+ __btree_path_get(trans, &trans->paths[idx], false);
ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
- __btree_path_put(&trans->paths[idx], false);
+ __btree_path_put(trans, &trans->paths[idx], false);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
bch2_err_matches(ret, ENOMEM))
@@ -1131,6 +1131,8 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
if (unlikely(!trans->srcu_held))
bch2_trans_srcu_lock(trans);
+ trace_btree_path_traverse_start(trans, path);
+
/*
* Ensure we obey path->should_be_locked: if it's set, we can't unlock
* and re-traverse the path without a transaction restart:
@@ -1194,6 +1196,7 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
out_uptodate:
path->uptodate = BTREE_ITER_UPTODATE;
+ trace_btree_path_traverse_end(trans, path);
out:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
panic("ret %s (%i) trans->restarted %s (%i)\n",
@@ -1225,7 +1228,7 @@ static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_i
{
btree_path_idx_t new = btree_path_alloc(trans, src);
btree_path_copy(trans, trans->paths + new, trans->paths + src);
- __btree_path_get(trans->paths + new, intent);
+ __btree_path_get(trans, trans->paths + new, intent);
#ifdef TRACK_PATH_ALLOCATED
trans->paths[new].ip_allocated = ip;
#endif
@@ -1236,8 +1239,10 @@ __flatten
btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
btree_path_idx_t path, bool intent, unsigned long ip)
{
- __btree_path_put(trans->paths + path, intent);
+ struct btree_path *old = trans->paths + path;
+ __btree_path_put(trans, trans->paths + path, intent);
path = btree_path_clone(trans, path, intent, ip);
+ trace_btree_path_clone(trans, old, trans->paths + path);
trans->paths[path].preserve = false;
return path;
}
@@ -1252,6 +1257,8 @@ __bch2_btree_path_set_pos(struct btree_trans *trans,
bch2_trans_verify_not_in_restart(trans);
EBUG_ON(!trans->paths[path_idx].ref);
+ trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
+
path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
struct btree_path *path = trans->paths + path_idx;
@@ -1361,13 +1368,15 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
{
struct btree_path *path = trans->paths + path_idx, *dup;
- if (!__btree_path_put(path, intent))
+ if (!__btree_path_put(trans, path, intent))
return;
dup = path->preserve
? have_path_at_pos(trans, path)
: have_node_at_pos(trans, path);
+ trace_btree_path_free(trans, path_idx, dup);
+
if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
return;
@@ -1392,7 +1401,7 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
bool intent)
{
- if (!__btree_path_put(trans->paths + path, intent))
+ if (!__btree_path_put(trans, trans->paths + path, intent))
return;
__bch2_path_free(trans, path);
@@ -1421,8 +1430,8 @@ void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
noinline __cold
void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
{
- prt_printf(buf, "transaction updates for %s journal seq %llu\n",
- trans->fn, trans->journal_res.seq);
+ prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
+ trans->nr_updates, trans->fn, trans->journal_res.seq);
printbuf_indent_add(buf, 2);
trans_for_each_update(trans, i) {
@@ -1464,7 +1473,7 @@ static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_tra
{
struct btree_path *path = trans->paths + path_idx;
- prt_printf(out, "path: idx %2u ref %u:%u %c %c %c btree=%s l=%u pos ",
+ prt_printf(out, "path: idx %3u ref %u:%u %c %c %c btree=%s l=%u pos ",
path_idx, path->ref, path->intent_ref,
path->preserve ? 'P' : ' ',
path->should_be_locked ? 'S' : ' ',
@@ -1716,14 +1725,16 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
trans->paths[path_pos].cached == cached &&
trans->paths[path_pos].btree_id == btree_id &&
trans->paths[path_pos].level == level) {
- __btree_path_get(trans->paths + path_pos, intent);
+ trace_btree_path_get(trans, trans->paths + path_pos, &pos);
+
+ __btree_path_get(trans, trans->paths + path_pos, intent);
path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
path = trans->paths + path_idx;
} else {
path_idx = btree_path_alloc(trans, path_pos);
path = trans->paths + path_idx;
- __btree_path_get(path, intent);
+ __btree_path_get(trans, path, intent);
path->pos = pos;
path->btree_id = btree_id;
path->cached = cached;
@@ -1738,6 +1749,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
path->ip_allocated = ip;
#endif
trans->paths_sorted = false;
+
+ trace_btree_path_alloc(trans, path);
}
if (!(flags & BTREE_ITER_nopreserve))
@@ -1857,7 +1870,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
struct btree_path *path = btree_iter_path(trans, iter);
if (btree_path_node(path, path->level))
- btree_path_set_should_be_locked(path);
+ btree_path_set_should_be_locked(trans, path);
return 0;
}
@@ -1889,7 +1902,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
- btree_path_set_should_be_locked(btree_iter_path(trans, iter));
+ btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
@@ -1983,7 +1996,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
- btree_path_set_should_be_locked(btree_iter_path(trans, iter));
+ btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
EBUG_ON(btree_iter_path(trans, iter)->uptodate);
out:
bch2_btree_iter_verify_entry_exit(iter);
@@ -2155,7 +2168,7 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
if (unlikely(ret))
return bkey_s_c_err(ret);
- btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
+ btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
if (k.k && !bkey_err(k)) {
@@ -2199,7 +2212,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
goto out;
}
- btree_path_set_should_be_locked(path);
+ btree_path_set_should_be_locked(trans, path);
k = btree_path_level_peek_all(trans->c, l, &iter->k);
@@ -2326,7 +2339,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
* advance, same as on exit for iter->path, but only up
* to snapshot
*/
- __btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
+ __btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
iter->update_path = iter->path;
iter->update_path = bch2_btree_path_set_pos(trans,
@@ -2382,14 +2395,14 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
- btree_path_set_should_be_locked(btree_iter_path(trans, iter));
+ btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out_no_locked:
if (iter->update_path) {
ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
if (unlikely(ret))
k = bkey_s_c_err(ret);
else
- btree_path_set_should_be_locked(trans->paths + iter->update_path);
+ btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
}
if (!(iter->flags & BTREE_ITER_all_snapshots))
@@ -2511,6 +2524,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
iter->flags & BTREE_ITER_intent,
_THIS_IP_);
path = btree_iter_path(trans, iter);
+ trace_btree_path_save_pos(trans, path, trans->paths + saved_path);
saved_k = *k.k;
saved_v = k.v;
}
@@ -2527,7 +2541,7 @@ got_key:
continue;
}
- btree_path_set_should_be_locked(path);
+ btree_path_set_should_be_locked(trans, path);
break;
} else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
/* Advance to previous leaf node: */
@@ -2685,7 +2699,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
}
}
out:
- btree_path_set_should_be_locked(btree_iter_path(trans, iter));
+ btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out_no_locked:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
@@ -2712,6 +2726,7 @@ struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
return bch2_btree_iter_peek_slot(iter);
}
+/* Obsolete, but still used by rust wrapper in -tools */
struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
{
struct bkey_s_c k;
@@ -2911,9 +2926,9 @@ void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
dst->ip_allocated = _RET_IP_;
#endif
if (src->path)
- __btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_intent);
+ __btree_path_get(trans, trans->paths + src->path, src->flags & BTREE_ITER_intent);
if (src->update_path)
- __btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
+ __btree_path_get(trans, trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
dst->key_cache_path = 0;
}
@@ -3237,7 +3252,7 @@ void bch2_trans_put(struct btree_trans *trans)
bch2_trans_unlock(trans);
trans_for_each_update(trans, i)
- __btree_path_put(trans->paths + i->path, true);
+ __btree_path_put(trans, trans->paths + i->path, true);
trans->nr_updates = 0;
check_btree_paths_leaked(trans);
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 222b7ce8a901..78e63ad7d380 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -6,6 +6,12 @@
#include "btree_types.h"
#include "trace.h"
+void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
+void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
+void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
+void bch2_dump_trans_updates(struct btree_trans *);
+void bch2_dump_trans_paths_updates(struct btree_trans *);
+
static inline int __bkey_err(const struct bkey *k)
{
return PTR_ERR_OR_ZERO(k);
@@ -13,16 +19,28 @@ static inline int __bkey_err(const struct bkey *k)
#define bkey_err(_k) __bkey_err((_k).k)
-static inline void __btree_path_get(struct btree_path *path, bool intent)
+static inline void __btree_path_get(struct btree_trans *trans, struct btree_path *path, bool intent)
{
+ unsigned idx = path - trans->paths;
+
+ EBUG_ON(!test_bit(idx, trans->paths_allocated));
+ if (unlikely(path->ref == U8_MAX)) {
+ bch2_dump_trans_paths_updates(trans);
+ panic("path %u refcount overflow\n", idx);
+ }
+
path->ref++;
path->intent_ref += intent;
+ trace_btree_path_get_ll(trans, path);
}
-static inline bool __btree_path_put(struct btree_path *path, bool intent)
+static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
{
+ EBUG_ON(!test_bit(path - trans->paths, trans->paths_allocated));
EBUG_ON(!path->ref);
EBUG_ON(!path->intent_ref && intent);
+
+ trace_btree_path_put_ll(trans, path);
path->intent_ref -= intent;
return --path->ref == 0;
}
@@ -511,6 +529,12 @@ void bch2_set_btree_iter_dontneed(struct btree_iter *);
void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
+/**
+ * bch2_trans_kmalloc - allocate memory for use by the current transaction
+ *
+ * Must be called after bch2_trans_begin, which on second and further calls
+ * frees all memory allocated in this transaction
+ */
static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
{
size = roundup(size, 8);
@@ -814,20 +838,6 @@ transaction_restart: \
struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
-static inline struct bkey_s_c
-__bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
- struct btree_iter *iter, unsigned flags)
-{
- struct bkey_s_c k;
-
- while (btree_trans_too_many_iters(trans) ||
- (k = bch2_btree_iter_peek_type(iter, flags),
- bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
- bch2_trans_begin(trans);
-
- return k;
-}
-
#define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
_start, _end, _flags, _k, _ret) \
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
@@ -868,7 +878,7 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
\
if (bch2_err_matches(_ret, ENOMEM)) { \
_gfp = GFP_KERNEL; \
- _ret = drop_locks_do(trans, _do); \
+ _ret = drop_locks_do(_trans, _do); \
} \
_ret; \
})
@@ -881,7 +891,7 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
_ret = 0; \
if (unlikely(!_p)) { \
_gfp = GFP_KERNEL; \
- _ret = drop_locks_do(trans, ((_p = _do), 0)); \
+ _ret = drop_locks_do(_trans, ((_p = _do), 0)); \
} \
_p; \
})
@@ -894,12 +904,6 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
_ret; \
})
-void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
-void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
-void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
-void bch2_dump_trans_updates(struct btree_trans *);
-void bch2_dump_trans_paths_updates(struct btree_trans *);
-
struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
void bch2_trans_put(struct btree_trans *);
diff --git a/fs/bcachefs/btree_journal_iter.c b/fs/bcachefs/btree_journal_iter.c
index 74933490aaba..c1657182c275 100644
--- a/fs/bcachefs/btree_journal_iter.c
+++ b/fs/bcachefs/btree_journal_iter.c
@@ -530,6 +530,8 @@ static void __journal_keys_sort(struct journal_keys *keys)
{
sort(keys->data, keys->nr, sizeof(keys->data[0]), journal_sort_key_cmp, NULL);
+ cond_resched();
+
struct journal_key *dst = keys->data;
darray_for_each(*keys, src) {
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index fda7998734cb..244610b1d0b5 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -79,134 +79,47 @@ static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
return true;
}
-static void bkey_cached_evict(struct btree_key_cache *c,
+static bool bkey_cached_evict(struct btree_key_cache *c,
struct bkey_cached *ck)
{
- BUG_ON(rhashtable_remove_fast(&c->table, &ck->hash,
- bch2_btree_key_cache_params));
- memset(&ck->key, ~0, sizeof(ck->key));
-
- atomic_long_dec(&c->nr_keys);
-}
-
-static void bkey_cached_free(struct btree_key_cache *bc,
- struct bkey_cached *ck)
-{
- struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
-
- BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
-
- ck->btree_trans_barrier_seq =
- start_poll_synchronize_srcu(&c->btree_trans_barrier);
-
- if (ck->c.lock.readers) {
- list_move_tail(&ck->list, &bc->freed_pcpu);
- bc->nr_freed_pcpu++;
- } else {
- list_move_tail(&ck->list, &bc->freed_nonpcpu);
- bc->nr_freed_nonpcpu++;
+ bool ret = !rhashtable_remove_fast(&c->table, &ck->hash,
+ bch2_btree_key_cache_params);
+ if (ret) {
+ memset(&ck->key, ~0, sizeof(ck->key));
+ atomic_long_dec(&c->nr_keys);
}
- atomic_long_inc(&bc->nr_freed);
-
- kfree(ck->k);
- ck->k = NULL;
- ck->u64s = 0;
- six_unlock_write(&ck->c.lock);
- six_unlock_intent(&ck->c.lock);
+ return ret;
}
-#ifdef __KERNEL__
-static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
- struct bkey_cached *ck)
+static void __bkey_cached_free(struct rcu_pending *pending, struct rcu_head *rcu)
{
- struct bkey_cached *pos;
-
- bc->nr_freed_nonpcpu++;
+ struct bch_fs *c = container_of(pending->srcu, struct bch_fs, btree_trans_barrier);
+ struct bkey_cached *ck = container_of(rcu, struct bkey_cached, rcu);
- list_for_each_entry_reverse(pos, &bc->freed_nonpcpu, list) {
- if (ULONG_CMP_GE(ck->btree_trans_barrier_seq,
- pos->btree_trans_barrier_seq)) {
- list_move(&ck->list, &pos->list);
- return;
- }
- }
-
- list_move(&ck->list, &bc->freed_nonpcpu);
+ this_cpu_dec(*c->btree_key_cache.nr_pending);
+ kmem_cache_free(bch2_key_cache, ck);
}
-#endif
-
-static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
- struct bkey_cached *ck)
-{
- BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
-
- if (!ck->c.lock.readers) {
-#ifdef __KERNEL__
- struct btree_key_cache_freelist *f;
- bool freed = false;
-
- preempt_disable();
- f = this_cpu_ptr(bc->pcpu_freed);
-
- if (f->nr < ARRAY_SIZE(f->objs)) {
- f->objs[f->nr++] = ck;
- freed = true;
- }
- preempt_enable();
- if (!freed) {
- mutex_lock(&bc->lock);
- preempt_disable();
- f = this_cpu_ptr(bc->pcpu_freed);
-
- while (f->nr > ARRAY_SIZE(f->objs) / 2) {
- struct bkey_cached *ck2 = f->objs[--f->nr];
-
- __bkey_cached_move_to_freelist_ordered(bc, ck2);
- }
- preempt_enable();
-
- __bkey_cached_move_to_freelist_ordered(bc, ck);
- mutex_unlock(&bc->lock);
- }
-#else
- mutex_lock(&bc->lock);
- list_move_tail(&ck->list, &bc->freed_nonpcpu);
- bc->nr_freed_nonpcpu++;
- mutex_unlock(&bc->lock);
-#endif
- } else {
- mutex_lock(&bc->lock);
- list_move_tail(&ck->list, &bc->freed_pcpu);
- bc->nr_freed_pcpu++;
- mutex_unlock(&bc->lock);
- }
-}
-
-static void bkey_cached_free_fast(struct btree_key_cache *bc,
- struct bkey_cached *ck)
+static void bkey_cached_free(struct btree_key_cache *bc,
+ struct bkey_cached *ck)
{
- struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
-
- ck->btree_trans_barrier_seq =
- start_poll_synchronize_srcu(&c->btree_trans_barrier);
-
- list_del_init(&ck->list);
- atomic_long_inc(&bc->nr_freed);
-
kfree(ck->k);
ck->k = NULL;
ck->u64s = 0;
- bkey_cached_move_to_freelist(bc, ck);
-
six_unlock_write(&ck->c.lock);
six_unlock_intent(&ck->c.lock);
+
+ bool pcpu_readers = ck->c.lock.readers != NULL;
+ rcu_pending_enqueue(&bc->pending[pcpu_readers], &ck->rcu);
+ this_cpu_inc(*bc->nr_pending);
}
static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s, gfp_t gfp)
{
+ gfp |= __GFP_ACCOUNT|__GFP_RECLAIMABLE;
+
struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp);
if (unlikely(!ck))
return NULL;
@@ -224,74 +137,14 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned k
{
struct bch_fs *c = trans->c;
struct btree_key_cache *bc = &c->btree_key_cache;
- struct bkey_cached *ck = NULL;
bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id);
int ret;
- if (!pcpu_readers) {
-#ifdef __KERNEL__
- struct btree_key_cache_freelist *f;
-
- preempt_disable();
- f = this_cpu_ptr(bc->pcpu_freed);
- if (f->nr)
- ck = f->objs[--f->nr];
- preempt_enable();
-
- if (!ck) {
- mutex_lock(&bc->lock);
- preempt_disable();
- f = this_cpu_ptr(bc->pcpu_freed);
-
- while (!list_empty(&bc->freed_nonpcpu) &&
- f->nr < ARRAY_SIZE(f->objs) / 2) {
- ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
- list_del_init(&ck->list);
- bc->nr_freed_nonpcpu--;
- f->objs[f->nr++] = ck;
- }
-
- ck = f->nr ? f->objs[--f->nr] : NULL;
- preempt_enable();
- mutex_unlock(&bc->lock);
- }
-#else
- mutex_lock(&bc->lock);
- if (!list_empty(&bc->freed_nonpcpu)) {
- ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
- list_del_init(&ck->list);
- bc->nr_freed_nonpcpu--;
- }
- mutex_unlock(&bc->lock);
-#endif
- } else {
- mutex_lock(&bc->lock);
- if (!list_empty(&bc->freed_pcpu)) {
- ck = list_last_entry(&bc->freed_pcpu, struct bkey_cached, list);
- list_del_init(&ck->list);
- bc->nr_freed_pcpu--;
- }
- mutex_unlock(&bc->lock);
- }
-
- if (ck) {
- ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_intent, _THIS_IP_);
- if (unlikely(ret)) {
- bkey_cached_move_to_freelist(bc, ck);
- return ERR_PTR(ret);
- }
-
- btree_path_cached_set(trans, path, ck, BTREE_NODE_INTENT_LOCKED);
-
- ret = bch2_btree_node_lock_write(trans, path, &ck->c);
- if (unlikely(ret)) {
- btree_node_unlock(trans, path, 0);
- bkey_cached_move_to_freelist(bc, ck);
- return ERR_PTR(ret);
- }
-
- return ck;
- }
+ struct bkey_cached *ck = container_of_or_null(
+ rcu_pending_dequeue(&bc->pending[pcpu_readers]),
+ struct bkey_cached, rcu);
+ if (ck)
+ goto lock;
ck = allocate_dropping_locks(trans, ret,
__bkey_cached_alloc(key_u64s, _gfp));
@@ -302,15 +155,19 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned k
return ERR_PTR(ret);
}
- if (!ck)
- return NULL;
-
- INIT_LIST_HEAD(&ck->list);
- bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
+ if (ck) {
+ bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
+ ck->c.cached = true;
+ goto lock;
+ }
- ck->c.cached = true;
- BUG_ON(!six_trylock_intent(&ck->c.lock));
- BUG_ON(!six_trylock_write(&ck->c.lock));
+ ck = container_of_or_null(rcu_pending_dequeue_from_all(&bc->pending[pcpu_readers]),
+ struct bkey_cached, rcu);
+ if (ck)
+ goto lock;
+lock:
+ six_lock_intent(&ck->c.lock, NULL, NULL);
+ six_lock_write(&ck->c.lock, NULL, NULL);
return ck;
}
@@ -322,21 +179,21 @@ bkey_cached_reuse(struct btree_key_cache *c)
struct bkey_cached *ck;
unsigned i;
- mutex_lock(&c->lock);
rcu_read_lock();
tbl = rht_dereference_rcu(c->table.tbl, &c->table);
for (i = 0; i < tbl->size; i++)
rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
bkey_cached_lock_for_evict(ck)) {
- bkey_cached_evict(c, ck);
- goto out;
+ if (bkey_cached_evict(c, ck))
+ goto out;
+ six_unlock_write(&ck->c.lock);
+ six_unlock_intent(&ck->c.lock);
}
}
ck = NULL;
out:
rcu_read_unlock();
- mutex_unlock(&c->lock);
return ck;
}
@@ -415,7 +272,7 @@ static int btree_key_cache_create(struct btree_trans *trans, struct btree_path *
path->uptodate = BTREE_ITER_UPTODATE;
return 0;
err:
- bkey_cached_free_fast(bc, ck);
+ bkey_cached_free(bc, ck);
mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
return ret;
@@ -611,8 +468,12 @@ evict:
}
mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
- bkey_cached_evict(&c->btree_key_cache, ck);
- bkey_cached_free_fast(&c->btree_key_cache, ck);
+ if (bkey_cached_evict(&c->btree_key_cache, ck)) {
+ bkey_cached_free(&c->btree_key_cache, ck);
+ } else {
+ six_unlock_write(&ck->c.lock);
+ six_unlock_intent(&ck->c.lock);
+ }
}
out:
bch2_trans_iter_exit(trans, &b_iter);
@@ -722,7 +583,7 @@ void bch2_btree_key_cache_drop(struct btree_trans *trans,
}
bkey_cached_evict(bc, ck);
- bkey_cached_free_fast(bc, ck);
+ bkey_cached_free(bc, ck);
mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
@@ -735,48 +596,14 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
struct bch_fs *c = shrink->private_data;
struct btree_key_cache *bc = &c->btree_key_cache;
struct bucket_table *tbl;
- struct bkey_cached *ck, *t;
+ struct bkey_cached *ck;
size_t scanned = 0, freed = 0, nr = sc->nr_to_scan;
- unsigned start, flags;
+ unsigned iter, start;
int srcu_idx;
- mutex_lock(&bc->lock);
- bc->requested_to_free += sc->nr_to_scan;
-
srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
- flags = memalloc_nofs_save();
-
- /*
- * Newest freed entries are at the end of the list - once we hit one
- * that's too new to be freed, we can bail out:
- */
- list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) {
- if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
- ck->btree_trans_barrier_seq))
- break;
-
- list_del(&ck->list);
- six_lock_exit(&ck->c.lock);
- kmem_cache_free(bch2_key_cache, ck);
- atomic_long_dec(&bc->nr_freed);
- bc->nr_freed_nonpcpu--;
- bc->freed++;
- }
-
- list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) {
- if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
- ck->btree_trans_barrier_seq))
- break;
-
- list_del(&ck->list);
- six_lock_exit(&ck->c.lock);
- kmem_cache_free(bch2_key_cache, ck);
- atomic_long_dec(&bc->nr_freed);
- bc->nr_freed_pcpu--;
- bc->freed++;
- }
-
rcu_read_lock();
+
tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
/*
@@ -792,17 +619,18 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
return SHRINK_STOP;
}
- if (bc->shrink_iter >= tbl->size)
- bc->shrink_iter = 0;
- start = bc->shrink_iter;
+ iter = bc->shrink_iter;
+ if (iter >= tbl->size)
+ iter = 0;
+ start = iter;
do {
struct rhash_head *pos, *next;
- pos = rht_ptr_rcu(&tbl->buckets[bc->shrink_iter]);
+ pos = rht_ptr_rcu(&tbl->buckets[iter]);
while (!rht_is_a_nulls(pos)) {
- next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
+ next = rht_dereference_bucket_rcu(pos->next, tbl, iter);
ck = container_of(pos, struct bkey_cached, hash);
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
@@ -812,29 +640,31 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
bc->skipped_accessed++;
} else if (!bkey_cached_lock_for_evict(ck)) {
bc->skipped_lock_fail++;
- } else {
- bkey_cached_evict(bc, ck);
+ } else if (bkey_cached_evict(bc, ck)) {
bkey_cached_free(bc, ck);
- bc->moved_to_freelist++;
+ bc->freed++;
freed++;
+ } else {
+ six_unlock_write(&ck->c.lock);
+ six_unlock_intent(&ck->c.lock);
}
scanned++;
if (scanned >= nr)
- break;
+ goto out;
pos = next;
}
- bc->shrink_iter++;
- if (bc->shrink_iter >= tbl->size)
- bc->shrink_iter = 0;
- } while (scanned < nr && bc->shrink_iter != start);
+ iter++;
+ if (iter >= tbl->size)
+ iter = 0;
+ } while (scanned < nr && iter != start);
+out:
+ bc->shrink_iter = iter;
rcu_read_unlock();
- memalloc_nofs_restore(flags);
srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
- mutex_unlock(&bc->lock);
return freed;
}
@@ -862,18 +692,13 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
{
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
struct bucket_table *tbl;
- struct bkey_cached *ck, *n;
+ struct bkey_cached *ck;
struct rhash_head *pos;
LIST_HEAD(items);
unsigned i;
-#ifdef __KERNEL__
- int cpu;
-#endif
shrinker_free(bc->shrink);
- mutex_lock(&bc->lock);
-
/*
* The loop is needed to guard against racing with rehash:
*/
@@ -892,44 +717,14 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
for (i = 0; i < tbl->size; i++)
while (pos = rht_ptr_rcu(&tbl->buckets[i]), !rht_is_a_nulls(pos)) {
ck = container_of(pos, struct bkey_cached, hash);
- bkey_cached_evict(bc, ck);
- list_add(&ck->list, &items);
+ BUG_ON(!bkey_cached_evict(bc, ck));
+ kfree(ck->k);
+ kmem_cache_free(bch2_key_cache, ck);
}
}
rcu_read_unlock();
}
-#ifdef __KERNEL__
- if (bc->pcpu_freed) {
- for_each_possible_cpu(cpu) {
- struct btree_key_cache_freelist *f =
- per_cpu_ptr(bc->pcpu_freed, cpu);
-
- for (i = 0; i < f->nr; i++) {
- ck = f->objs[i];
- list_add(&ck->list, &items);
- }
- }
- }
-#endif
-
- BUG_ON(list_count_nodes(&bc->freed_pcpu) != bc->nr_freed_pcpu);
- BUG_ON(list_count_nodes(&bc->freed_nonpcpu) != bc->nr_freed_nonpcpu);
-
- list_splice(&bc->freed_pcpu, &items);
- list_splice(&bc->freed_nonpcpu, &items);
-
- mutex_unlock(&bc->lock);
-
- list_for_each_entry_safe(ck, n, &items, list) {
- cond_resched();
-
- list_del(&ck->list);
- kfree(ck->k);
- six_lock_exit(&ck->c.lock);
- kmem_cache_free(bch2_key_cache, ck);
- }
-
if (atomic_long_read(&bc->nr_dirty) &&
!bch2_journal_error(&c->journal) &&
test_bit(BCH_FS_was_rw, &c->flags))
@@ -943,14 +738,14 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
if (bc->table_init_done)
rhashtable_destroy(&bc->table);
- free_percpu(bc->pcpu_freed);
+ rcu_pending_exit(&bc->pending[0]);
+ rcu_pending_exit(&bc->pending[1]);
+
+ free_percpu(bc->nr_pending);
}
void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
{
- mutex_init(&c->lock);
- INIT_LIST_HEAD(&c->freed_pcpu);
- INIT_LIST_HEAD(&c->freed_nonpcpu);
}
int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
@@ -958,11 +753,13 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
struct shrinker *shrink;
-#ifdef __KERNEL__
- bc->pcpu_freed = alloc_percpu(struct btree_key_cache_freelist);
- if (!bc->pcpu_freed)
+ bc->nr_pending = alloc_percpu(size_t);
+ if (!bc->nr_pending)
+ return -BCH_ERR_ENOMEM_fs_btree_cache_init;
+
+ if (rcu_pending_init(&bc->pending[0], &c->btree_trans_barrier, __bkey_cached_free) ||
+ rcu_pending_init(&bc->pending[1], &c->btree_trans_barrier, __bkey_cached_free))
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
-#endif
if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params))
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
@@ -984,45 +781,21 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *bc)
{
- struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
-
printbuf_tabstop_push(out, 24);
printbuf_tabstop_push(out, 12);
- unsigned flags = memalloc_nofs_save();
- mutex_lock(&bc->lock);
prt_printf(out, "keys:\t%lu\r\n", atomic_long_read(&bc->nr_keys));
prt_printf(out, "dirty:\t%lu\r\n", atomic_long_read(&bc->nr_dirty));
- prt_printf(out, "freelist:\t%lu\r\n", atomic_long_read(&bc->nr_freed));
- prt_printf(out, "nonpcpu freelist:\t%zu\r\n", bc->nr_freed_nonpcpu);
- prt_printf(out, "pcpu freelist:\t%zu\r\n", bc->nr_freed_pcpu);
-
- prt_printf(out, "\nshrinker:\n");
+ prt_printf(out, "table size:\t%u\r\n", bc->table.tbl->size);
+ prt_newline(out);
+ prt_printf(out, "shrinker:\n");
prt_printf(out, "requested_to_free:\t%lu\r\n", bc->requested_to_free);
prt_printf(out, "freed:\t%lu\r\n", bc->freed);
- prt_printf(out, "moved_to_freelist:\t%lu\r\n", bc->moved_to_freelist);
prt_printf(out, "skipped_dirty:\t%lu\r\n", bc->skipped_dirty);
prt_printf(out, "skipped_accessed:\t%lu\r\n", bc->skipped_accessed);
prt_printf(out, "skipped_lock_fail:\t%lu\r\n", bc->skipped_lock_fail);
-
- prt_printf(out, "srcu seq:\t%lu\r\n", get_state_synchronize_srcu(&c->btree_trans_barrier));
-
- struct bkey_cached *ck;
- unsigned iter = 0;
- list_for_each_entry(ck, &bc->freed_nonpcpu, list) {
- prt_printf(out, "freed_nonpcpu:\t%lu\r\n", ck->btree_trans_barrier_seq);
- if (++iter > 10)
- break;
- }
-
- iter = 0;
- list_for_each_entry(ck, &bc->freed_pcpu, list) {
- prt_printf(out, "freed_pcpu:\t%lu\r\n", ck->btree_trans_barrier_seq);
- if (++iter > 10)
- break;
- }
- mutex_unlock(&bc->lock);
- memalloc_flags_restore(flags);
+ prt_newline(out);
+ prt_printf(out, "pending:\t%zu\r\n", per_cpu_sum(bc->nr_pending));
}
void bch2_btree_key_cache_exit(void)
diff --git a/fs/bcachefs/btree_key_cache_types.h b/fs/bcachefs/btree_key_cache_types.h
index 237e8bb3ac40..722f1ed10551 100644
--- a/fs/bcachefs/btree_key_cache_types.h
+++ b/fs/bcachefs/btree_key_cache_types.h
@@ -2,33 +2,25 @@
#ifndef _BCACHEFS_BTREE_KEY_CACHE_TYPES_H
#define _BCACHEFS_BTREE_KEY_CACHE_TYPES_H
-struct btree_key_cache_freelist {
- struct bkey_cached *objs[16];
- unsigned nr;
-};
+#include "rcu_pending.h"
struct btree_key_cache {
- struct mutex lock;
struct rhashtable table;
bool table_init_done;
- struct list_head freed_pcpu;
- size_t nr_freed_pcpu;
- struct list_head freed_nonpcpu;
- size_t nr_freed_nonpcpu;
-
struct shrinker *shrink;
unsigned shrink_iter;
- struct btree_key_cache_freelist __percpu *pcpu_freed;
- atomic_long_t nr_freed;
+ /* 0: non pcpu reader locks, 1: pcpu reader locks */
+ struct rcu_pending pending[2];
+ size_t __percpu *nr_pending;
+
atomic_long_t nr_keys;
atomic_long_t nr_dirty;
/* shrinker stats */
unsigned long requested_to_free;
unsigned long freed;
- unsigned long moved_to_freelist;
unsigned long skipped_dirty;
unsigned long skipped_accessed;
unsigned long skipped_lock_fail;
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 11a64ead8685..7c07f9fa9add 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -218,16 +218,17 @@ static inline int __btree_node_lock_nopath(struct btree_trans *trans,
bool lock_may_not_fail,
unsigned long ip)
{
- int ret;
-
trans->lock_may_not_fail = lock_may_not_fail;
trans->lock_must_abort = false;
trans->locking = b;
- ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
- bch2_six_check_for_deadlock, trans, ip);
+ int ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
+ bch2_six_check_for_deadlock, trans, ip);
WRITE_ONCE(trans->locking, NULL);
WRITE_ONCE(trans->locking_wait.start_time, 0);
+
+ if (!ret)
+ trace_btree_path_lock(trans, _THIS_IP_, b);
return ret;
}
@@ -281,6 +282,7 @@ static inline int btree_node_lock(struct btree_trans *trans,
int ret = 0;
EBUG_ON(level >= BTREE_MAX_DEPTH);
+ bch2_trans_verify_not_unlocked(trans);
if (likely(six_trylock_type(&b->lock, type)) ||
btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
@@ -400,12 +402,13 @@ static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
/* misc: */
-static inline void btree_path_set_should_be_locked(struct btree_path *path)
+static inline void btree_path_set_should_be_locked(struct btree_trans *trans, struct btree_path *path)
{
EBUG_ON(!btree_node_locked(path, path->level));
EBUG_ON(path->uptodate);
path->should_be_locked = true;
+ trace_btree_path_should_be_locked(trans, path);
}
static inline void __btree_path_set_level_up(struct btree_trans *trans,
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
index a0101d9c5d83..91884da4e30a 100644
--- a/fs/bcachefs/btree_trans_commit.c
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -214,7 +214,7 @@ bool bch2_btree_bset_insert_key(struct btree_trans *trans,
k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
overwrite:
- bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
+ bch2_bset_insert(b, k, insert, clobber_u64s);
new_u64s = k->u64s;
fix_iter:
if (clobber_u64s != new_u64s)
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index b256b2a20a4f..4568a41fefaf 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -138,6 +138,31 @@ struct btree {
struct list_head list;
};
+#define BCH_BTREE_CACHE_NOT_FREED_REASONS() \
+ x(lock_intent) \
+ x(lock_write) \
+ x(dirty) \
+ x(read_in_flight) \
+ x(write_in_flight) \
+ x(noevict) \
+ x(write_blocked) \
+ x(will_make_reachable) \
+ x(access_bit)
+
+enum bch_btree_cache_not_freed_reasons {
+#define x(n) BCH_BTREE_CACHE_NOT_FREED_##n,
+ BCH_BTREE_CACHE_NOT_FREED_REASONS()
+#undef x
+ BCH_BTREE_CACHE_NOT_FREED_REASONS_NR,
+};
+
+struct btree_cache_list {
+ unsigned idx;
+ struct shrinker *shrink;
+ struct list_head list;
+ size_t nr;
+};
+
struct btree_cache {
struct rhashtable table;
bool table_init_done;
@@ -155,28 +180,19 @@ struct btree_cache {
* should never grow past ~2-3 nodes in practice.
*/
struct mutex lock;
- struct list_head live;
struct list_head freeable;
struct list_head freed_pcpu;
struct list_head freed_nonpcpu;
+ struct btree_cache_list live[2];
- /* Number of elements in live + freeable lists */
- unsigned used;
- unsigned reserve;
- unsigned freed;
- unsigned not_freed_lock_intent;
- unsigned not_freed_lock_write;
- unsigned not_freed_dirty;
- unsigned not_freed_read_in_flight;
- unsigned not_freed_write_in_flight;
- unsigned not_freed_noevict;
- unsigned not_freed_write_blocked;
- unsigned not_freed_will_make_reachable;
- unsigned not_freed_access_bit;
- atomic_t dirty;
- struct shrinker *shrink;
+ size_t nr_freeable;
+ size_t nr_reserve;
+ size_t nr_by_btree[BTREE_ID_NR];
+ atomic_long_t nr_dirty;
- unsigned used_by_btree[BTREE_ID_NR];
+ /* shrinker stats */
+ size_t nr_freed;
+ u64 not_freed[BCH_BTREE_CACHE_NOT_FREED_REASONS_NR];
/*
* If we need to allocate memory for a new btree node and that
@@ -189,8 +205,8 @@ struct btree_cache {
struct bbpos pinned_nodes_start;
struct bbpos pinned_nodes_end;
- u64 pinned_nodes_leaf_mask;
- u64 pinned_nodes_interior_mask;
+ /* btree id mask: 0 for leaves, 1 for interior */
+ u64 pinned_nodes_mask[2];
};
struct btree_node_iter {
@@ -386,17 +402,16 @@ struct bkey_cached {
struct btree_bkey_cached_common c;
unsigned long flags;
- unsigned long btree_trans_barrier_seq;
u16 u64s;
struct bkey_cached_key key;
struct rhash_head hash;
- struct list_head list;
struct journal_entry_pin journal;
u64 seq;
struct bkey_i *k;
+ struct rcu_head rcu;
};
static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
@@ -583,7 +598,8 @@ enum btree_write_type {
x(dying) \
x(fake) \
x(need_rewrite) \
- x(never_write)
+ x(never_write) \
+ x(pinned)
enum btree_flags {
/* First bits for btree node write type */
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
index d6f6df10dcc3..514df618548e 100644
--- a/fs/bcachefs/btree_update.c
+++ b/fs/bcachefs/btree_update.c
@@ -374,7 +374,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
i->key_cache_already_flushed = true;
i->flags |= BTREE_TRIGGER_norun;
- btree_path_set_should_be_locked(btree_path);
+ btree_path_set_should_be_locked(trans, btree_path);
ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
out:
bch2_path_put(trans, path_idx, true);
@@ -422,7 +422,9 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
break;
}
- if (!cmp && i < trans->updates + trans->nr_updates) {
+ bool overwrite = !cmp && i < trans->updates + trans->nr_updates;
+
+ if (overwrite) {
EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
bch2_path_put(trans, i->path, true);
@@ -449,7 +451,9 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
}
}
- __btree_path_get(trans->paths + i->path, true);
+ __btree_path_get(trans, trans->paths + i->path, true);
+
+ trace_update_by_path(trans, path, i, overwrite);
/*
* If a key is present in the key cache, it must also exist in the
@@ -498,7 +502,7 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
}
- btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
+ btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
}
return 0;
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 8fd112026e7a..190bc1e81756 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -16,6 +16,7 @@
#include "clock.h"
#include "error.h"
#include "extents.h"
+#include "io_write.h"
#include "journal.h"
#include "journal_reclaim.h"
#include "keylist.h"
@@ -145,7 +146,7 @@ fsck_err:
printbuf_exit(&buf);
return ret;
topology_repair:
- if ((c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology)) &&
+ if ((c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology)) &&
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) {
bch2_inconsistent_error(c);
ret = -BCH_ERR_btree_need_topology_repair;
@@ -250,8 +251,13 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans,
unsigned i, level = b->c.level;
bch2_btree_node_lock_write_nofail(trans, path, &b->c);
+
+ mutex_lock(&c->btree_cache.lock);
bch2_btree_node_hash_remove(&c->btree_cache, b);
+ mutex_unlock(&c->btree_cache.lock);
+
__btree_node_free(trans, b);
+
six_unlock_write(&b->c.lock);
mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
@@ -283,7 +289,6 @@ static void bch2_btree_node_free_never_used(struct btree_update *as,
clear_btree_node_need_write(b);
mutex_lock(&c->btree_cache.lock);
- list_del_init(&b->list);
bch2_btree_node_hash_remove(&c->btree_cache, b);
mutex_unlock(&c->btree_cache.lock);
@@ -732,6 +737,18 @@ static void btree_update_nodes_written(struct btree_update *as)
"%s", bch2_err_str(ret));
err:
/*
+ * Ensure transaction is unlocked before using btree_node_lock_nopath()
+ * (the use of which is always suspect, we need to work on removing this
+ * in the future)
+ *
+ * It should be, but bch2_path_get_unlocked_mut() -> bch2_path_get()
+ * calls bch2_path_upgrade(), before we call path_make_mut(), so we may
+ * rarely end up with a locked path besides the one we have here:
+ */
+ bch2_trans_unlock(trans);
+ bch2_trans_begin(trans);
+
+ /*
* We have to be careful because another thread might be getting ready
* to free as->b and calling btree_update_reparent() on us - we'll
* recheck under btree_update_lock below:
@@ -750,18 +767,6 @@ err:
* we're in journal error state:
*/
- /*
- * Ensure transaction is unlocked before using
- * btree_node_lock_nopath() (the use of which is always suspect,
- * we need to work on removing this in the future)
- *
- * It should be, but bch2_path_get_unlocked_mut() -> bch2_path_get()
- * calls bch2_path_upgrade(), before we call path_make_mut(), so
- * we may rarely end up with a locked path besides the one we
- * have here:
- */
- bch2_trans_unlock(trans);
- bch2_trans_begin(trans);
btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans,
as->btree_id, b->c.level, b->key.k.p);
struct btree_path *path = trans->paths + path_idx;
@@ -1899,7 +1904,7 @@ static void __btree_increase_depth(struct btree_update *as, struct btree_trans *
six_unlock_intent(&n->c.lock);
mutex_lock(&c->btree_cache.lock);
- list_add_tail(&b->list, &c->btree_cache.live);
+ list_add_tail(&b->list, &c->btree_cache.live[btree_node_pinned(b)].list);
mutex_unlock(&c->btree_cache.lock);
bch2_trans_verify_locks(trans);
@@ -1981,7 +1986,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
if (ret)
goto err;
- btree_path_set_should_be_locked(trans->paths + sib_path);
+ btree_path_set_should_be_locked(trans, trans->paths + sib_path);
m = trans->paths[sib_path].l[level].b;
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index 02c6ecada97c..10f400957f21 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -159,6 +159,8 @@ static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
unsigned level,
unsigned flags)
{
+ bch2_trans_verify_not_unlocked(trans);
+
return bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
btree_prev_sib) ?:
bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 20219c1e6ddf..546cd01a72e3 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -75,6 +75,15 @@ void bch2_dev_usage_to_text(struct printbuf *out,
struct bch_dev *ca,
struct bch_dev_usage *usage)
{
+ if (out->nr_tabstops < 5) {
+ printbuf_tabstops_reset(out);
+ printbuf_tabstop_push(out, 12);
+ printbuf_tabstop_push(out, 16);
+ printbuf_tabstop_push(out, 16);
+ printbuf_tabstop_push(out, 16);
+ printbuf_tabstop_push(out, 16);
+ }
+
prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n");
for (unsigned i = 0; i < BCH_DATA_NR; i++) {
@@ -100,12 +109,13 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
if (!ca) {
- if (fsck_err(trans, ptr_to_invalid_device,
- "pointer to missing device %u\n"
- "while marking %s",
- p.ptr.dev,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ if (fsck_err_on(p.ptr.dev != BCH_SB_MEMBER_INVALID,
+ trans, ptr_to_invalid_device,
+ "pointer to missing device %u\n"
+ "while marking %s",
+ p.ptr.dev,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
*do_update = true;
return 0;
}
@@ -271,7 +281,7 @@ int bch2_check_fix_ptrs(struct btree_trans *trans,
goto err;
rcu_read_lock();
- bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_rcu(c, ptr->dev));
+ bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_exists(c, ptr->dev));
rcu_read_unlock();
if (level) {
@@ -476,7 +486,7 @@ out:
return ret;
err:
bch2_dump_trans_updates(trans);
- ret = -EIO;
+ ret = -BCH_ERR_bucket_ref_update;
goto out;
}
@@ -555,22 +565,24 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
s64 *sectors,
enum btree_iter_update_trigger_flags flags)
{
+ struct bch_fs *c = trans->c;
bool insert = !(flags & BTREE_TRIGGER_overwrite);
struct printbuf buf = PRINTBUF;
int ret = 0;
- struct bch_fs *c = trans->c;
+ u64 abs_sectors = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p);
+ *sectors = insert ? abs_sectors : -abs_sectors;
+
struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
if (unlikely(!ca)) {
- if (insert)
- ret = -EIO;
+ if (insert && p.ptr.dev != BCH_SB_MEMBER_INVALID)
+ ret = -BCH_ERR_trigger_pointer;
goto err;
}
struct bpos bucket;
struct bch_backpointer bp;
- bch2_extent_ptr_to_bp(trans->c, ca, btree_id, level, k, p, entry, &bucket, &bp);
- *sectors = insert ? bp.bucket_len : -((s64) bp.bucket_len);
+ __bch2_extent_ptr_to_bp(trans->c, ca, btree_id, level, k, p, entry, &bucket, &bp, abs_sectors);
if (flags & BTREE_TRIGGER_transactional) {
struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0);
@@ -592,7 +604,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
p.ptr.dev,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_trigger_pointer;
goto err_unlock;
}
@@ -637,7 +649,7 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
bch2_trans_inconsistent(trans,
"stripe pointer doesn't match stripe %llu",
(u64) p.ec.idx);
- ret = -EIO;
+ ret = -BCH_ERR_trigger_stripe_pointer;
goto err;
}
@@ -676,7 +688,7 @@ err:
(u64) p.ec.idx, buf.buf);
printbuf_exit(&buf);
bch2_inconsistent_error(c);
- return -EIO;
+ return -BCH_ERR_trigger_stripe_pointer;
}
m->block_sectors[p.ec.block] += sectors;
@@ -740,7 +752,7 @@ static int __trigger_extent(struct btree_trans *trans,
return ret;
} else if (!p.has_ec) {
*replicas_sectors += disk_sectors;
- acc_replicas_key.replicas.devs[acc_replicas_key.replicas.nr_devs++] = p.ptr.dev;
+ replicas_entry_add_dev(&acc_replicas_key.replicas, p.ptr.dev);
} else {
ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags);
if (ret)
@@ -956,7 +968,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
bch2_data_type_str(a->v.data_type),
bch2_data_type_str(type),
bch2_data_type_str(type));
- ret = -EIO;
+ ret = -BCH_ERR_metadata_bucket_inconsistency;
goto err;
}
@@ -1012,7 +1024,7 @@ err:
bucket_unlock(g);
err_unlock:
percpu_up_read(&c->mark_lock);
- return -EIO;
+ return -BCH_ERR_metadata_bucket_inconsistency;
}
int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index edbdffd508fc..e2cb7b24b220 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -80,22 +80,9 @@ static inline void bucket_lock(struct bucket *b)
TASK_UNINTERRUPTIBLE);
}
-static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)
-{
- return rcu_dereference_check(ca->buckets_gc,
- !ca->fs ||
- percpu_rwsem_is_held(&ca->fs->mark_lock) ||
- lockdep_is_held(&ca->fs->state_lock) ||
- lockdep_is_held(&ca->bucket_lock));
-}
-
static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
{
- struct bucket_array *buckets = gc_bucket_array(ca);
-
- if (b - buckets->first_bucket >= buckets->nbuckets_minus_first)
- return NULL;
- return buckets->b + b;
+ return genradix_ptr(&ca->buckets_gc, b);
}
static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index c9698cdf866f..28bd09a253c8 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -19,14 +19,6 @@ struct bucket {
u32 stripe_sectors;
} __aligned(sizeof(long));
-struct bucket_array {
- struct rcu_head rcu;
- u16 first_bucket;
- size_t nbuckets;
- size_t nbuckets_minus_first;
- struct bucket b[];
-};
-
struct bucket_gens {
struct rcu_head rcu;
u16 first_bucket;
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
index e7208bf1974e..ce8fc677bef9 100644
--- a/fs/bcachefs/checksum.c
+++ b/fs/bcachefs/checksum.c
@@ -100,13 +100,12 @@ static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
struct scatterlist *sg, size_t len)
{
SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- int ret;
skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
- ret = crypto_skcipher_encrypt(req);
+ int ret = crypto_skcipher_encrypt(req);
if (ret)
pr_err("got error %i from crypto_skcipher_encrypt()", ret);
@@ -118,38 +117,47 @@ static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
void *buf, size_t len)
{
if (!is_vmalloc_addr(buf)) {
- struct scatterlist sg;
-
- sg_init_table(&sg, 1);
- sg_set_page(&sg,
- is_vmalloc_addr(buf)
- ? vmalloc_to_page(buf)
- : virt_to_page(buf),
- len, offset_in_page(buf));
+ struct scatterlist sg = {};
+
+ sg_mark_end(&sg);
+ sg_set_page(&sg, virt_to_page(buf), len, offset_in_page(buf));
return do_encrypt_sg(tfm, nonce, &sg, len);
} else {
- unsigned pages = buf_pages(buf, len);
- struct scatterlist *sg;
- size_t orig_len = len;
- int ret, i;
-
- sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL);
- if (!sg)
- return -BCH_ERR_ENOMEM_do_encrypt;
+ DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
+ size_t sgl_len = 0;
+ int ret;
- sg_init_table(sg, pages);
+ darray_init(&sgl);
- for (i = 0; i < pages; i++) {
+ while (len) {
unsigned offset = offset_in_page(buf);
- unsigned pg_len = min_t(size_t, len, PAGE_SIZE - offset);
+ struct scatterlist sg = {
+ .page_link = (unsigned long) vmalloc_to_page(buf),
+ .offset = offset,
+ .length = min(len, PAGE_SIZE - offset),
+ };
- sg_set_page(sg + i, vmalloc_to_page(buf), pg_len, offset);
- buf += pg_len;
- len -= pg_len;
+ if (darray_push(&sgl, sg)) {
+ sg_mark_end(&darray_last(sgl));
+ ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
+ if (ret)
+ goto err;
+
+ nonce = nonce_add(nonce, sgl_len);
+ sgl_len = 0;
+ sgl.nr = 0;
+ BUG_ON(darray_push(&sgl, sg));
+ }
+
+ buf += sg.length;
+ len -= sg.length;
+ sgl_len += sg.length;
}
- ret = do_encrypt_sg(tfm, nonce, sg, orig_len);
- kfree(sg);
+ sg_mark_end(&darray_last(sgl));
+ ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
+err:
+ darray_exit(&sgl);
return ret;
}
}
@@ -325,39 +333,42 @@ int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
{
struct bio_vec bv;
struct bvec_iter iter;
- struct scatterlist sgl[16], *sg = sgl;
- size_t bytes = 0;
+ DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
+ size_t sgl_len = 0;
int ret = 0;
if (!bch2_csum_type_is_encryption(type))
return 0;
- sg_init_table(sgl, ARRAY_SIZE(sgl));
+ darray_init(&sgl);
bio_for_each_segment(bv, bio, iter) {
- if (sg == sgl + ARRAY_SIZE(sgl)) {
- sg_mark_end(sg - 1);
-
- ret = do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
+ struct scatterlist sg = {
+ .page_link = (unsigned long) bv.bv_page,
+ .offset = bv.bv_offset,
+ .length = bv.bv_len,
+ };
+
+ if (darray_push(&sgl, sg)) {
+ sg_mark_end(&darray_last(sgl));
+ ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
if (ret)
- return ret;
+ goto err;
- nonce = nonce_add(nonce, bytes);
- bytes = 0;
+ nonce = nonce_add(nonce, sgl_len);
+ sgl_len = 0;
+ sgl.nr = 0;
- sg_init_table(sgl, ARRAY_SIZE(sgl));
- sg = sgl;
+ BUG_ON(darray_push(&sgl, sg));
}
- sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
- bytes += bv.bv_len;
- }
-
- if (sg != sgl) {
- sg_mark_end(sg - 1);
- return do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
+ sgl_len += sg.length;
}
+ sg_mark_end(&darray_last(sgl));
+ ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
+err:
+ darray_exit(&sgl);
return ret;
}
diff --git a/fs/bcachefs/clock.h b/fs/bcachefs/clock.h
index 85c975dfbcfe..82c79c8baf92 100644
--- a/fs/bcachefs/clock.h
+++ b/fs/bcachefs/clock.h
@@ -20,15 +20,6 @@ static inline void bch2_increment_clock(struct bch_fs *c, u64 sectors,
void bch2_io_clock_schedule_timeout(struct io_clock *, u64);
-#define bch2_kthread_wait_event_ioclock_timeout(condition, clock, timeout)\
-({ \
- long __ret = timeout; \
- might_sleep(); \
- if (!___wait_cond_timeout(condition)) \
- __ret = __wait_event_timeout(wq, condition, timeout); \
- __ret; \
-})
-
void bch2_io_timers_to_text(struct printbuf *, struct io_clock *);
void bch2_io_clock_exit(struct io_clock *);
diff --git a/fs/bcachefs/darray.c b/fs/bcachefs/darray.c
index b7d223f85873..4f06cd8bbbe1 100644
--- a/fs/bcachefs/darray.c
+++ b/fs/bcachefs/darray.c
@@ -4,12 +4,12 @@
#include <linux/slab.h>
#include "darray.h"
-int __bch2_darray_resize(darray_char *d, size_t element_size, size_t new_size, gfp_t gfp)
+int __bch2_darray_resize_noprof(darray_char *d, size_t element_size, size_t new_size, gfp_t gfp)
{
if (new_size > d->size) {
new_size = roundup_pow_of_two(new_size);
- void *data = kvmalloc_array(new_size, element_size, gfp);
+ void *data = kvmalloc_array_noprof(new_size, element_size, gfp);
if (!data)
return -ENOMEM;
diff --git a/fs/bcachefs/darray.h b/fs/bcachefs/darray.h
index 4b340d13caac..8f4c3f0665c4 100644
--- a/fs/bcachefs/darray.h
+++ b/fs/bcachefs/darray.h
@@ -22,29 +22,23 @@ struct { \
typedef DARRAY(char) darray_char;
typedef DARRAY(char *) darray_str;
-int __bch2_darray_resize(darray_char *, size_t, size_t, gfp_t);
-
-static inline int __darray_resize(darray_char *d, size_t element_size,
- size_t new_size, gfp_t gfp)
-{
- return unlikely(new_size > d->size)
- ? __bch2_darray_resize(d, element_size, new_size, gfp)
- : 0;
-}
+int __bch2_darray_resize_noprof(darray_char *, size_t, size_t, gfp_t);
+
+#define __bch2_darray_resize(...) alloc_hooks(__bch2_darray_resize_noprof(__VA_ARGS__))
+
+#define __darray_resize(_d, _element_size, _new_size, _gfp) \
+ (unlikely((_new_size) > (_d)->size) \
+ ? __bch2_darray_resize((_d), (_element_size), (_new_size), (_gfp))\
+ : 0)
#define darray_resize_gfp(_d, _new_size, _gfp) \
- unlikely(__darray_resize((darray_char *) (_d), sizeof((_d)->data[0]), (_new_size), _gfp))
+ __darray_resize((darray_char *) (_d), sizeof((_d)->data[0]), (_new_size), _gfp)
#define darray_resize(_d, _new_size) \
darray_resize_gfp(_d, _new_size, GFP_KERNEL)
-static inline int __darray_make_room(darray_char *d, size_t t_size, size_t more, gfp_t gfp)
-{
- return __darray_resize(d, t_size, d->nr + more, gfp);
-}
-
#define darray_make_room_gfp(_d, _more, _gfp) \
- __darray_make_room((darray_char *) (_d), sizeof((_d)->data[0]), (_more), _gfp)
+ darray_resize_gfp((_d), (_d)->nr + (_more), _gfp)
#define darray_make_room(_d, _more) \
darray_make_room_gfp(_d, _more, GFP_KERNEL)
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 004894ad4147..757b9884ef55 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -571,7 +571,7 @@ int bch2_extent_drop_ptrs(struct btree_trans *trans,
while (data_opts.kill_ptrs) {
unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
- bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, i++ == drop);
+ bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, i++ == drop);
data_opts.kill_ptrs ^= 1U << drop;
}
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index 32bfdf19289a..84dd4a879d98 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -552,62 +552,30 @@ static int bch2_dir_emit(struct dir_context *ctx, struct bkey_s_c_dirent d, subv
int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx)
{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_s_c k;
- subvol_inum target;
- u32 snapshot;
struct bkey_buf sk;
- int ret;
-
bch2_bkey_buf_init(&sk);
-retry:
- bch2_trans_begin(trans);
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
- for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_dirents,
- SPOS(inum.inum, ctx->pos, snapshot),
- POS(inum.inum, U64_MAX), 0, k, ret) {
- if (k.k->type != KEY_TYPE_dirent)
- continue;
+ int ret = bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_dirents,
+ POS(inum.inum, ctx->pos),
+ POS(inum.inum, U64_MAX),
+ inum.subvol, 0, k, ({
+ if (k.k->type != KEY_TYPE_dirent)
+ continue;
- /* dir_emit() can fault and block: */
- bch2_bkey_buf_reassemble(&sk, c, k);
- struct bkey_s_c_dirent dirent = bkey_i_to_s_c_dirent(sk.k);
+ /* dir_emit() can fault and block: */
+ bch2_bkey_buf_reassemble(&sk, c, k);
+ struct bkey_s_c_dirent dirent = bkey_i_to_s_c_dirent(sk.k);
- ret = bch2_dirent_read_target(trans, inum, dirent, &target);
- if (ret < 0)
- break;
- if (ret)
- continue;
+ subvol_inum target;
+ int ret2 = bch2_dirent_read_target(trans, inum, dirent, &target);
+ if (ret2 > 0)
+ continue;
- /*
- * read_target looks up subvolumes, we can overflow paths if the
- * directory has many subvolumes in it
- *
- * XXX: btree_trans_too_many_iters() is something we'd like to
- * get rid of, and there's no good reason to be using it here
- * except that we don't yet have a for_each_btree_key() helper
- * that does subvolume_get_snapshot().
- */
- ret = drop_locks_do(trans,
- bch2_dir_emit(ctx, dirent, target)) ?:
- btree_trans_too_many_iters(trans);
- if (ret) {
- ret = ret < 0 ? ret : 0;
- break;
- }
- }
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
+ ret2 ?: drop_locks_do(trans, bch2_dir_emit(ctx, dirent, target));
+ })));
- bch2_trans_put(trans);
bch2_bkey_buf_exit(&sk, c);
- return ret;
+ return ret < 0 ? ret : 0;
}
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 141a4c63142f..1587c6e1866a 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -18,6 +18,7 @@
#include "ec.h"
#include "error.h"
#include "io_read.h"
+#include "io_write.h"
#include "keylist.h"
#include "recovery.h"
#include "replicas.h"
@@ -146,12 +147,18 @@ void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
bch2_prt_csum_type(out, s.csum_type);
prt_printf(out, " gran %u", 1U << s.csum_granularity_bits);
+ if (s.disk_label) {
+ prt_str(out, " label");
+ bch2_disk_path_to_text(out, c, s.disk_label - 1);
+ }
+
for (unsigned i = 0; i < s.nr_blocks; i++) {
const struct bch_extent_ptr *ptr = sp->ptrs + i;
if ((void *) ptr >= bkey_val_end(k))
break;
+ prt_char(out, ' ');
bch2_extent_ptr_to_text(out, c, ptr);
if (s.csum_type < BCH_CSUM_NR &&
@@ -192,7 +199,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
a->dirty_sectors,
a->stripe, s.k->p.offset,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_mark_stripe;
goto err;
}
@@ -203,7 +210,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
a->dirty_sectors,
a->cached_sectors,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_mark_stripe;
goto err;
}
} else {
@@ -213,7 +220,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
bucket.inode, bucket.offset, a->gen,
a->stripe,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_mark_stripe;
goto err;
}
@@ -223,7 +230,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
bch2_data_type_str(a->data_type),
bch2_data_type_str(data_type),
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_mark_stripe;
goto err;
}
@@ -235,7 +242,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
a->dirty_sectors,
a->cached_sectors,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_mark_stripe;
goto err;
}
}
@@ -273,8 +280,8 @@ static int mark_stripe_bucket(struct btree_trans *trans,
struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev);
if (unlikely(!ca)) {
- if (!(flags & BTREE_TRIGGER_overwrite))
- ret = -EIO;
+ if (ptr->dev != BCH_SB_MEMBER_INVALID && !(flags & BTREE_TRIGGER_overwrite))
+ ret = -BCH_ERR_mark_stripe;
goto err;
}
@@ -293,7 +300,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
ptr->dev,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_mark_stripe;
goto err_unlock;
}
@@ -351,6 +358,19 @@ static int mark_stripe_buckets(struct btree_trans *trans,
return 0;
}
+static inline void stripe_to_mem(struct stripe *m, const struct bch_stripe *s)
+{
+ m->sectors = le16_to_cpu(s->sectors);
+ m->algorithm = s->algorithm;
+ m->nr_blocks = s->nr_blocks;
+ m->nr_redundant = s->nr_redundant;
+ m->disk_label = s->disk_label;
+ m->blocks_nonempty = 0;
+
+ for (unsigned i = 0; i < s->nr_blocks; i++)
+ m->blocks_nonempty += !!stripe_blockcount_get(s, i);
+}
+
int bch2_trigger_stripe(struct btree_trans *trans,
enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s _new,
@@ -467,14 +487,7 @@ int bch2_trigger_stripe(struct btree_trans *trans,
memset(m, 0, sizeof(*m));
} else {
- m->sectors = le16_to_cpu(new_s->sectors);
- m->algorithm = new_s->algorithm;
- m->nr_blocks = new_s->nr_blocks;
- m->nr_redundant = new_s->nr_redundant;
- m->blocks_nonempty = 0;
-
- for (unsigned i = 0; i < new_s->nr_blocks; i++)
- m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
+ stripe_to_mem(m, new_s);
if (!old_s)
bch2_stripes_heap_insert(c, m, idx);
@@ -816,13 +829,16 @@ err:
}
/* recovery read path: */
-int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
+int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio,
+ struct bkey_s_c orig_k)
{
struct bch_fs *c = trans->c;
- struct ec_stripe_buf *buf;
+ struct ec_stripe_buf *buf = NULL;
struct closure cl;
struct bch_stripe *v;
unsigned i, offset;
+ const char *msg = NULL;
+ struct printbuf msgbuf = PRINTBUF;
int ret = 0;
closure_init_stack(&cl);
@@ -835,32 +851,28 @@ int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf));
if (ret) {
- bch_err_ratelimited(c,
- "error doing reconstruct read: error %i looking up stripe", ret);
- kfree(buf);
- return -EIO;
+ msg = "stripe not found";
+ goto err;
}
v = &bkey_i_to_stripe(&buf->key)->v;
if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
- bch_err_ratelimited(c,
- "error doing reconstruct read: pointer doesn't match stripe");
- ret = -EIO;
+ msg = "pointer doesn't match stripe";
goto err;
}
offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset;
if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) {
- bch_err_ratelimited(c,
- "error doing reconstruct read: read is bigger than stripe");
- ret = -EIO;
+ msg = "read is bigger than stripe";
goto err;
}
ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
- if (ret)
+ if (ret) {
+ msg = "-ENOMEM";
goto err;
+ }
for (i = 0; i < v->nr_blocks; i++)
ec_block_io(c, buf, REQ_OP_READ, i, &cl);
@@ -868,9 +880,7 @@ int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
closure_sync(&cl);
if (ec_nr_failed(buf) > v->nr_redundant) {
- bch_err_ratelimited(c,
- "error doing reconstruct read: unable to read enough blocks");
- ret = -EIO;
+ msg = "unable to read enough blocks";
goto err;
}
@@ -882,10 +892,17 @@ int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9));
-err:
+out:
ec_stripe_buf_exit(buf);
kfree(buf);
return ret;
+err:
+ bch2_bkey_val_to_text(&msgbuf, c, orig_k);
+ bch_err_ratelimited(c,
+ "error doing reconstruct read: %s\n %s", msg, msgbuf.buf);
+ printbuf_exit(&msgbuf);;
+ ret = -BCH_ERR_stripe_reconstruct;
+ goto out;
}
/* stripe bucket accounting: */
@@ -1305,7 +1322,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
bkey_reassemble(n, k);
- bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev);
+ bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, ptr->dev != dev);
ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev);
BUG_ON(!ec_ptr);
@@ -1555,10 +1572,12 @@ void bch2_ec_do_stripe_creates(struct bch_fs *c)
bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
}
-static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
+static void ec_stripe_new_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
{
struct ec_stripe_new *s = h->s;
+ lockdep_assert_held(&h->lock);
+
BUG_ON(!s->allocated && !s->err);
h->s = NULL;
@@ -1571,6 +1590,12 @@ static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
ec_stripe_new_put(c, s, STRIPE_REF_io);
}
+static void ec_stripe_new_cancel(struct bch_fs *c, struct ec_stripe_head *h, int err)
+{
+ h->s->err = err;
+ ec_stripe_new_set_pending(c, h);
+}
+
void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
{
struct ec_stripe_new *s = ob->ec;
@@ -1641,7 +1666,8 @@ static void ec_stripe_key_init(struct bch_fs *c,
struct bkey_i *k,
unsigned nr_data,
unsigned nr_parity,
- unsigned stripe_size)
+ unsigned stripe_size,
+ unsigned disk_label)
{
struct bkey_i_stripe *s = bkey_stripe_init(k);
unsigned u64s;
@@ -1652,7 +1678,7 @@ static void ec_stripe_key_init(struct bch_fs *c,
s->v.nr_redundant = nr_parity;
s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9);
s->v.csum_type = BCH_CSUM_crc32c;
- s->v.pad = 0;
+ s->v.disk_label = disk_label;
while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
BUG_ON(1 << s->v.csum_granularity_bits >=
@@ -1685,40 +1711,32 @@ static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
s->nr_parity = h->redundancy;
ec_stripe_key_init(c, &s->new_stripe.key,
- s->nr_data, s->nr_parity, h->blocksize);
+ s->nr_data, s->nr_parity,
+ h->blocksize, h->disk_label);
h->s = s;
+ h->nr_created++;
return 0;
}
-static struct ec_stripe_head *
-ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
- unsigned algo, unsigned redundancy,
- enum bch_watermark watermark)
+static void ec_stripe_head_devs_update(struct bch_fs *c, struct ec_stripe_head *h)
{
- struct ec_stripe_head *h;
-
- h = kzalloc(sizeof(*h), GFP_KERNEL);
- if (!h)
- return NULL;
-
- mutex_init(&h->lock);
- BUG_ON(!mutex_trylock(&h->lock));
-
- h->target = target;
- h->algo = algo;
- h->redundancy = redundancy;
- h->watermark = watermark;
+ struct bch_devs_mask devs = h->devs;
rcu_read_lock();
- h->devs = target_rw_devs(c, BCH_DATA_user, target);
+ h->devs = target_rw_devs(c, BCH_DATA_user, h->disk_label
+ ? group_to_target(h->disk_label - 1)
+ : 0);
+ unsigned nr_devs = dev_mask_nr(&h->devs);
for_each_member_device_rcu(c, ca, &h->devs)
if (!ca->mi.durability)
__clear_bit(ca->dev_idx, h->devs.d);
+ unsigned nr_devs_with_durability = dev_mask_nr(&h->devs);
h->blocksize = pick_blocksize(c, &h->devs);
+ h->nr_active_devs = 0;
for_each_member_device_rcu(c, ca, &h->devs)
if (ca->mi.bucket_size == h->blocksize)
h->nr_active_devs++;
@@ -1729,9 +1747,50 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
* If we only have redundancy + 1 devices, we're better off with just
* replication:
*/
- if (h->nr_active_devs < h->redundancy + 2)
- bch_err(c, "insufficient devices available to create stripe (have %u, need %u) - mismatched bucket sizes?",
- h->nr_active_devs, h->redundancy + 2);
+ h->insufficient_devs = h->nr_active_devs < h->redundancy + 2;
+
+ if (h->insufficient_devs) {
+ const char *err;
+
+ if (nr_devs < h->redundancy + 2)
+ err = NULL;
+ else if (nr_devs_with_durability < h->redundancy + 2)
+ err = "cannot use durability=0 devices";
+ else
+ err = "mismatched bucket sizes";
+
+ if (err)
+ bch_err(c, "insufficient devices available to create stripe (have %u, need %u): %s",
+ h->nr_active_devs, h->redundancy + 2, err);
+ }
+
+ struct bch_devs_mask devs_leaving;
+ bitmap_andnot(devs_leaving.d, devs.d, h->devs.d, BCH_SB_MEMBERS_MAX);
+
+ if (h->s && !h->s->allocated && dev_mask_nr(&devs_leaving))
+ ec_stripe_new_cancel(c, h, -EINTR);
+
+ h->rw_devs_change_count = c->rw_devs_change_count;
+}
+
+static struct ec_stripe_head *
+ec_new_stripe_head_alloc(struct bch_fs *c, unsigned disk_label,
+ unsigned algo, unsigned redundancy,
+ enum bch_watermark watermark)
+{
+ struct ec_stripe_head *h;
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return NULL;
+
+ mutex_init(&h->lock);
+ BUG_ON(!mutex_trylock(&h->lock));
+
+ h->disk_label = disk_label;
+ h->algo = algo;
+ h->redundancy = redundancy;
+ h->watermark = watermark;
list_add(&h->list, &c->ec_stripe_head_list);
return h;
@@ -1743,14 +1802,14 @@ void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
h->s->allocated &&
bitmap_weight(h->s->blocks_allocated,
h->s->nr_data) == h->s->nr_data)
- ec_stripe_set_pending(c, h);
+ ec_stripe_new_set_pending(c, h);
mutex_unlock(&h->lock);
}
static struct ec_stripe_head *
__bch2_ec_stripe_head_get(struct btree_trans *trans,
- unsigned target,
+ unsigned disk_label,
unsigned algo,
unsigned redundancy,
enum bch_watermark watermark)
@@ -1768,27 +1827,32 @@ __bch2_ec_stripe_head_get(struct btree_trans *trans,
if (test_bit(BCH_FS_going_ro, &c->flags)) {
h = ERR_PTR(-BCH_ERR_erofs_no_writes);
- goto found;
+ goto err;
}
list_for_each_entry(h, &c->ec_stripe_head_list, list)
- if (h->target == target &&
+ if (h->disk_label == disk_label &&
h->algo == algo &&
h->redundancy == redundancy &&
h->watermark == watermark) {
ret = bch2_trans_mutex_lock(trans, &h->lock);
- if (ret)
+ if (ret) {
h = ERR_PTR(ret);
+ goto err;
+ }
goto found;
}
- h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark);
+ h = ec_new_stripe_head_alloc(c, disk_label, algo, redundancy, watermark);
found:
- if (!IS_ERR_OR_NULL(h) &&
- h->nr_active_devs < h->redundancy + 2) {
+ if (h->rw_devs_change_count != c->rw_devs_change_count)
+ ec_stripe_head_devs_update(c, h);
+
+ if (h->insufficient_devs) {
mutex_unlock(&h->lock);
h = NULL;
}
+err:
mutex_unlock(&c->ec_stripe_head_lock);
return h;
}
@@ -1878,7 +1942,6 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
return 0;
}
-/* XXX: doesn't obey target: */
static s64 get_existing_stripe(struct bch_fs *c,
struct ec_stripe_head *head)
{
@@ -1901,7 +1964,8 @@ static s64 get_existing_stripe(struct bch_fs *c,
m = genradix_ptr(&c->stripes, stripe_idx);
- if (m->algorithm == head->algo &&
+ if (m->disk_label == head->disk_label &&
+ m->algorithm == head->algo &&
m->nr_redundant == head->redundancy &&
m->sectors == head->blocksize &&
m->blocks_nonempty < m->nr_blocks - m->nr_redundant &&
@@ -2046,9 +2110,19 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct ec_stripe_head *h;
bool waiting = false;
+ unsigned disk_label = 0;
+ struct target t = target_decode(target);
int ret;
- h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark);
+ if (t.type == TARGET_GROUP) {
+ if (t.group > U8_MAX) {
+ bch_err(c, "cannot create a stripe when disk_label > U8_MAX");
+ return NULL;
+ }
+ disk_label = t.group + 1; /* 0 == no label */
+ }
+
+ h = __bch2_ec_stripe_head_get(trans, disk_label, algo, redundancy, watermark);
if (IS_ERR_OR_NULL(h))
return h;
@@ -2126,6 +2200,73 @@ err:
return ERR_PTR(ret);
}
+/* device removal */
+
+static int bch2_invalidate_stripe_to_dev(struct btree_trans *trans, struct bkey_s_c k_a)
+{
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k_a, &a_convert);
+
+ if (!a->stripe)
+ return 0;
+
+ if (a->stripe_sectors) {
+ bch_err(trans->c, "trying to invalidate device in stripe when bucket has stripe data");
+ return -BCH_ERR_invalidate_stripe_to_dev;
+ }
+
+ struct btree_iter iter;
+ struct bkey_i_stripe *s =
+ bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_stripes, POS(0, a->stripe),
+ BTREE_ITER_slots, stripe);
+ int ret = PTR_ERR_OR_ZERO(s);
+ if (ret)
+ return ret;
+
+ struct disk_accounting_pos acc = {
+ .type = BCH_DISK_ACCOUNTING_replicas,
+ };
+
+ s64 sectors = 0;
+ for (unsigned i = 0; i < s->v.nr_blocks; i++)
+ sectors -= stripe_blockcount_get(&s->v, i);
+
+ bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
+ acc.replicas.data_type = BCH_DATA_user;
+ ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
+ if (ret)
+ goto err;
+
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(&s->k_i));
+ bkey_for_each_ptr(ptrs, ptr)
+ if (ptr->dev == k_a.k->p.inode)
+ ptr->dev = BCH_SB_MEMBER_INVALID;
+
+ sectors = -sectors;
+
+ bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
+ acc.replicas.data_type = BCH_DATA_user;
+ ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
+ if (ret)
+ goto err;
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx)
+{
+ return bch2_trans_run(c,
+ for_each_btree_key_upto_commit(trans, iter,
+ BTREE_ID_alloc, POS(dev_idx, 0), POS(dev_idx, U64_MAX),
+ BTREE_ITER_intent, k,
+ NULL, NULL, 0, ({
+ bch2_invalidate_stripe_to_dev(trans, k);
+ })));
+}
+
+/* startup/shutdown */
+
static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
{
struct ec_stripe_head *h;
@@ -2151,8 +2292,7 @@ static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
}
goto unlock;
found:
- h->s->err = -BCH_ERR_erofs_no_writes;
- ec_stripe_set_pending(c, h);
+ ec_stripe_new_cancel(c, h, -BCH_ERR_erofs_no_writes);
unlock:
mutex_unlock(&h->lock);
}
@@ -2197,17 +2337,9 @@ int bch2_stripes_read(struct bch_fs *c)
if (ret)
break;
- const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
-
struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset);
- m->sectors = le16_to_cpu(s->sectors);
- m->algorithm = s->algorithm;
- m->nr_blocks = s->nr_blocks;
- m->nr_redundant = s->nr_redundant;
- m->blocks_nonempty = 0;
- for (unsigned i = 0; i < s->nr_blocks; i++)
- m->blocks_nonempty += !!stripe_blockcount_get(s, i);
+ stripe_to_mem(m, bkey_s_c_to_stripe(k).v);
bch2_stripes_heap_insert(c, m, k.k->p.offset);
0;
@@ -2252,6 +2384,8 @@ static void bch2_new_stripe_to_text(struct printbuf *out, struct bch_fs *c,
for_each_set_bit(i, s->blocks_gotten, v->nr_blocks)
prt_printf(out, " %u", s->blocks[i]);
prt_newline(out);
+ bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&s->new_stripe.key));
+ prt_newline(out);
}
void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
@@ -2261,9 +2395,10 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
mutex_lock(&c->ec_stripe_head_lock);
list_for_each_entry(h, &c->ec_stripe_head_list, list) {
- prt_printf(out, "target %u algo %u redundancy %u %s:\n",
- h->target, h->algo, h->redundancy,
- bch2_watermarks[h->watermark]);
+ prt_printf(out, "disk label %u algo %u redundancy %u %s nr created %llu:\n",
+ h->disk_label, h->algo, h->redundancy,
+ bch2_watermarks[h->watermark],
+ h->nr_created);
if (h->s)
bch2_new_stripe_to_text(out, c, h->s);
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
index 90962b3c0130..43326370b410 100644
--- a/fs/bcachefs/ec.h
+++ b/fs/bcachefs/ec.h
@@ -97,7 +97,9 @@ static inline bool __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe
const struct bch_extent_ptr *data_ptr,
unsigned sectors)
{
- return data_ptr->dev == stripe_ptr->dev &&
+ return (data_ptr->dev == stripe_ptr->dev ||
+ data_ptr->dev == BCH_SB_MEMBER_INVALID ||
+ stripe_ptr->dev == BCH_SB_MEMBER_INVALID) &&
data_ptr->gen == stripe_ptr->gen &&
data_ptr->offset >= stripe_ptr->offset &&
data_ptr->offset < stripe_ptr->offset + sectors;
@@ -186,10 +188,15 @@ struct ec_stripe_head {
struct list_head list;
struct mutex lock;
- unsigned target;
+ unsigned disk_label;
unsigned algo;
unsigned redundancy;
enum bch_watermark watermark;
+ bool insufficient_devs;
+
+ unsigned long rw_devs_change_count;
+
+ u64 nr_created;
struct bch_devs_mask devs;
unsigned nr_active_devs;
@@ -202,7 +209,7 @@ struct ec_stripe_head {
struct ec_stripe_new *s;
};
-int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *);
+int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *, struct bkey_s_c);
void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
@@ -247,6 +254,8 @@ static inline void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s,
}
}
+int bch2_dev_remove_stripes(struct bch_fs *, unsigned);
+
void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *);
void bch2_fs_ec_stop(struct bch_fs *);
void bch2_fs_ec_flush(struct bch_fs *);
diff --git a/fs/bcachefs/ec_format.h b/fs/bcachefs/ec_format.h
index 44ce88ba08d7..64ef52e00078 100644
--- a/fs/bcachefs/ec_format.h
+++ b/fs/bcachefs/ec_format.h
@@ -11,7 +11,14 @@ struct bch_stripe {
__u8 csum_granularity_bits;
__u8 csum_type;
- __u8 pad;
+
+ /*
+ * XXX: targets should be 16 bits - fix this if we ever do a stripe_v2
+ *
+ * we can manage with this because this only needs to point to a
+ * disk label, not a target:
+ */
+ __u8 disk_label;
struct bch_extent_ptr ptrs[];
} __packed __aligned(8);
diff --git a/fs/bcachefs/ec_types.h b/fs/bcachefs/ec_types.h
index 1df03dccfc72..8d1e70e830ac 100644
--- a/fs/bcachefs/ec_types.h
+++ b/fs/bcachefs/ec_types.h
@@ -16,6 +16,7 @@ struct stripe {
u8 nr_blocks;
u8 nr_redundant;
u8 blocks_nonempty;
+ u8 disk_label;
};
struct gc_stripe {
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index 742dcdd3e5d7..60b7875adada 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -119,8 +119,8 @@
x(EEXIST, EEXIST_str_hash_set) \
x(EEXIST, EEXIST_discard_in_flight_add) \
x(EEXIST, EEXIST_subvolume_create) \
- x(0, open_buckets_empty) \
- x(0, freelist_empty) \
+ x(ENOSPC, open_buckets_empty) \
+ x(ENOSPC, freelist_empty) \
x(BCH_ERR_freelist_empty, no_buckets_found) \
x(0, transaction_restart) \
x(BCH_ERR_transaction_restart, transaction_restart_fault_inject) \
@@ -244,6 +244,16 @@
x(EIO, btree_node_read_error) \
x(EIO, btree_node_read_validate_error) \
x(EIO, btree_need_topology_repair) \
+ x(EIO, bucket_ref_update) \
+ x(EIO, trigger_pointer) \
+ x(EIO, trigger_stripe_pointer) \
+ x(EIO, metadata_bucket_inconsistency) \
+ x(EIO, mark_stripe) \
+ x(EIO, stripe_reconstruct) \
+ x(EIO, key_type_error) \
+ x(EIO, no_device_to_read_from) \
+ x(EIO, missing_indirect_extent) \
+ x(EIO, invalidate_stripe_to_dev) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_fixable) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_want_retry) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_must_retry) \
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index eb31bda19544..cc0d22085aef 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -115,7 +115,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
int ret = 0;
if (k.k->type == KEY_TYPE_error)
- return -EIO;
+ return -BCH_ERR_key_type_error;
rcu_read_lock();
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
@@ -133,7 +133,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
* read:
*/
if (!ret && !p.ptr.cached)
- ret = -EIO;
+ ret = -BCH_ERR_no_device_to_read_from;
struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
@@ -146,16 +146,13 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
? f->idx
: f->idx + 1;
- if (!p.idx && !ca)
+ if (!p.idx && (!ca || !bch2_dev_is_readable(ca)))
p.idx++;
if (!p.idx && p.has_ec && bch2_force_reconstruct_read)
p.idx++;
- if (!p.idx && !bch2_dev_is_readable(ca))
- p.idx++;
-
- if (p.idx >= (unsigned) p.has_ec + 1)
+ if (p.idx > (unsigned) p.has_ec)
continue;
if (ret > 0 && !ptr_better(c, p, *pick))
@@ -781,14 +778,17 @@ static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
/*
* Returns pointer to the next entry after the one being dropped:
*/
-union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k,
- struct bch_extent_ptr *ptr)
+void bch2_bkey_drop_ptr_noerror(struct bkey_s k, struct bch_extent_ptr *ptr)
{
struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
union bch_extent_entry *entry = to_entry(ptr), *next;
- union bch_extent_entry *ret = entry;
bool drop_crc = true;
+ if (k.k->type == KEY_TYPE_stripe) {
+ ptr->dev = BCH_SB_MEMBER_INVALID;
+ return;
+ }
+
EBUG_ON(ptr < &ptrs.start->ptr ||
ptr >= &ptrs.end->ptr);
EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
@@ -811,21 +811,28 @@ union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k,
break;
if ((extent_entry_is_crc(entry) && drop_crc) ||
- extent_entry_is_stripe_ptr(entry)) {
- ret = (void *) ret - extent_entry_bytes(entry);
+ extent_entry_is_stripe_ptr(entry))
extent_entry_drop(k, entry);
- }
}
-
- return ret;
}
-union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
- struct bch_extent_ptr *ptr)
+void bch2_bkey_drop_ptr(struct bkey_s k, struct bch_extent_ptr *ptr)
{
+ if (k.k->type != KEY_TYPE_stripe) {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k.s_c);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (p.ptr.dev == ptr->dev && p.has_ec) {
+ ptr->dev = BCH_SB_MEMBER_INVALID;
+ return;
+ }
+ }
+
bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
- union bch_extent_entry *ret =
- bch2_bkey_drop_ptr_noerror(k, ptr);
+
+ bch2_bkey_drop_ptr_noerror(k, ptr);
/*
* If we deleted all the dirty pointers and there's still cached
@@ -837,14 +844,10 @@ union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
!bch2_bkey_dirty_devs(k.s_c).nr) {
k.k->type = KEY_TYPE_error;
set_bkey_val_u64s(k.k, 0);
- ret = NULL;
} else if (!bch2_bkey_nr_ptrs(k.s_c)) {
k.k->type = KEY_TYPE_deleted;
set_bkey_val_u64s(k.k, 0);
- ret = NULL;
}
-
- return ret;
}
void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
@@ -854,10 +857,7 @@ void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
{
- struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev);
-
- if (ptr)
- bch2_bkey_drop_ptr_noerror(k, ptr);
+ bch2_bkey_drop_ptrs_noerror(k, ptr, ptr->dev == dev);
}
const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
@@ -1027,7 +1027,7 @@ void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struc
{
out->atomic++;
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
if (!ca) {
prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
(u64) ptr->offset, ptr->gen,
@@ -1131,8 +1131,9 @@ static int extent_ptr_validate(struct bch_fs *c,
{
int ret = 0;
+ /* bad pointers are repaired by check_fix_ptrs(): */
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
if (!ca) {
rcu_read_unlock();
return 0;
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index 709dd83183be..ed5001dd662e 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -357,7 +357,7 @@ out: \
__bkey_for_each_ptr_decode(_k, (_p).start, (_p).end, \
_ptr, _entry)
-#define bkey_crc_next(_k, _start, _end, _crc, _iter) \
+#define bkey_crc_next(_k, _end, _crc, _iter) \
({ \
__bkey_extent_entry_for_each_from(_iter, _end, _iter) \
if (extent_entry_is_crc(_iter)) { \
@@ -372,7 +372,7 @@ out: \
#define __bkey_for_each_crc(_k, _start, _end, _crc, _iter) \
for ((_crc) = bch2_extent_crc_unpack(_k, NULL), \
(_iter) = (_start); \
- bkey_crc_next(_k, _start, _end, _crc, _iter); \
+ bkey_crc_next(_k, _end, _crc, _iter); \
(_iter) = extent_entry_next(_iter))
#define bkey_for_each_crc(_k, _p, _crc, _iter) \
@@ -611,9 +611,6 @@ unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_d
unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
-void bch2_bkey_drop_device(struct bkey_s, unsigned);
-void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
-
const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c, unsigned);
static inline struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s k, unsigned dev)
@@ -649,26 +646,38 @@ static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr
void bch2_extent_ptr_decoded_append(struct bkey_i *,
struct extent_ptr_decoded *);
-union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s,
- struct bch_extent_ptr *);
-union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
- struct bch_extent_ptr *);
+void bch2_bkey_drop_ptr_noerror(struct bkey_s, struct bch_extent_ptr *);
+void bch2_bkey_drop_ptr(struct bkey_s, struct bch_extent_ptr *);
-#define bch2_bkey_drop_ptrs(_k, _ptr, _cond) \
+void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
+void bch2_bkey_drop_device(struct bkey_s, unsigned);
+
+#define bch2_bkey_drop_ptrs_noerror(_k, _ptr, _cond) \
do { \
- struct bkey_ptrs _ptrs = bch2_bkey_ptrs(_k); \
+ __label__ _again; \
+ struct bkey_ptrs _ptrs; \
+_again: \
+ _ptrs = bch2_bkey_ptrs(_k); \
\
- struct bch_extent_ptr *_ptr = &_ptrs.start->ptr; \
- \
- while ((_ptr = bkey_ptr_next(_ptrs, _ptr))) { \
+ bkey_for_each_ptr(_ptrs, _ptr) \
if (_cond) { \
- _ptr = (void *) bch2_bkey_drop_ptr(_k, _ptr); \
- _ptrs = bch2_bkey_ptrs(_k); \
- continue; \
+ bch2_bkey_drop_ptr_noerror(_k, _ptr); \
+ goto _again; \
} \
+} while (0)
+
+#define bch2_bkey_drop_ptrs(_k, _ptr, _cond) \
+do { \
+ __label__ _again; \
+ struct bkey_ptrs _ptrs; \
+_again: \
+ _ptrs = bch2_bkey_ptrs(_k); \
\
- (_ptr)++; \
- } \
+ bkey_for_each_ptr(_ptrs, _ptr) \
+ if (_cond) { \
+ bch2_bkey_drop_ptr(_k, _ptr); \
+ goto _again; \
+ } \
} while (0)
bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
diff --git a/fs/bcachefs/fs-common.c b/fs/bcachefs/fs-common.c
index 508d029ac53d..7e10a9ddcfd9 100644
--- a/fs/bcachefs/fs-common.c
+++ b/fs/bcachefs/fs-common.c
@@ -42,7 +42,8 @@ int bch2_create_trans(struct btree_trans *trans,
if (ret)
goto err;
- ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent);
+ ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir,
+ BTREE_ITER_intent|BTREE_ITER_with_updates);
if (ret)
goto err;
@@ -163,7 +164,7 @@ int bch2_create_trans(struct btree_trans *trans,
name,
dir_target,
&dir_offset,
- STR_HASH_must_create);
+ STR_HASH_must_create|BTREE_ITER_with_updates);
if (ret)
goto err;
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index ec8c427bf588..48a1ab9a649b 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -151,7 +151,6 @@ static void bchfs_read(struct btree_trans *trans,
struct bkey_buf sk;
int flags = BCH_READ_RETRY_IF_STALE|
BCH_READ_MAY_PROMOTE;
- u32 snapshot;
int ret = 0;
rbio->c = c;
@@ -159,29 +158,23 @@ static void bchfs_read(struct btree_trans *trans,
rbio->subvol = inum.subvol;
bch2_bkey_buf_init(&sk);
-retry:
bch2_trans_begin(trans);
- iter = (struct btree_iter) { NULL };
-
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
+ POS(inum.inum, rbio->bio.bi_iter.bi_sector),
BTREE_ITER_slots);
while (1) {
struct bkey_s_c k;
unsigned bytes, sectors, offset_into_extent;
enum btree_id data_btree = BTREE_ID_extents;
- /*
- * read_extent -> io_time_reset may cause a transaction restart
- * without returning an error, we need to check for that here:
- */
- ret = bch2_trans_relock(trans);
+ bch2_trans_begin(trans);
+
+ u32 snapshot;
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
if (ret)
- break;
+ goto err;
+
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
bch2_btree_iter_set_pos(&iter,
POS(inum.inum, rbio->bio.bi_iter.bi_sector));
@@ -189,7 +182,7 @@ retry:
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
- break;
+ goto err;
offset_into_extent = iter.pos.offset -
bkey_start_offset(k.k);
@@ -200,7 +193,7 @@ retry:
ret = bch2_read_indirect_extent(trans, &data_btree,
&offset_into_extent, &sk);
if (ret)
- break;
+ goto err;
k = bkey_i_to_s_c(sk.k);
@@ -210,7 +203,7 @@ retry:
ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
extent_partial_reads_expensive(k));
if (ret)
- break;
+ goto err;
}
bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
@@ -229,17 +222,13 @@ retry:
swap(rbio->bio.bi_iter.bi_size, bytes);
bio_advance(&rbio->bio, bytes);
-
- ret = btree_trans_too_many_iters(trans);
- if (ret)
+err:
+ if (ret &&
+ !bch2_err_matches(ret, BCH_ERR_transaction_restart))
break;
}
-err:
bch2_trans_iter_exit(trans, &iter);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
if (ret) {
bch_err_inum_offset_ratelimited(c,
iter.pos.inode,
@@ -486,7 +475,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
op->nr_replicas = nr_replicas;
op->res.nr_replicas = nr_replicas;
op->write_point = writepoint_hashed(inode->ei_last_dirtied);
- op->subvol = inode->ei_subvol;
+ op->subvol = inode->ei_inum.subvol;
op->pos = POS(inode->v.i_ino, sector);
op->end_io = bch2_writepage_io_done;
op->devs_need_flush = &inode->ei_devs_need_flush;
@@ -659,7 +648,7 @@ int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc
int bch2_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
struct bch_inode_info *inode = to_bch_ei(mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
@@ -728,12 +717,11 @@ out:
goto err;
}
- *pagep = &folio->page;
+ *foliop = folio;
return 0;
err:
folio_unlock(folio);
folio_put(folio);
- *pagep = NULL;
err_unlock:
bch2_pagecache_add_put(inode);
kfree(res);
@@ -743,12 +731,11 @@ err_unlock:
int bch2_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct bch_inode_info *inode = to_bch_ei(mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch2_folio_reservation *res = fsdata;
- struct folio *folio = page_folio(page);
unsigned offset = pos - folio_pos(folio);
lockdep_assert_held(&inode->v.i_rwsem);
diff --git a/fs/bcachefs/fs-io-buffered.h b/fs/bcachefs/fs-io-buffered.h
index a6126ff790e6..3207ebbb4ab4 100644
--- a/fs/bcachefs/fs-io-buffered.h
+++ b/fs/bcachefs/fs-io-buffered.h
@@ -10,10 +10,10 @@ int bch2_read_folio(struct file *, struct folio *);
int bch2_writepages(struct address_space *, struct writeback_control *);
void bch2_readahead(struct readahead_control *);
-int bch2_write_begin(struct file *, struct address_space *, loff_t,
- unsigned, struct page **, void **);
+int bch2_write_begin(struct file *, struct address_space *, loff_t pos,
+ unsigned len, struct folio **, void **);
int bch2_write_end(struct file *, struct address_space *, loff_t,
- unsigned, unsigned, struct page *, void *);
+ unsigned len, unsigned copied, struct folio *, void *);
ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *);
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
index e246b1e05aa2..ee1c0325f313 100644
--- a/fs/bcachefs/fs-io-direct.c
+++ b/fs/bcachefs/fs-io-direct.c
@@ -500,7 +500,7 @@ static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
dio->op.target = dio->op.opts.foreground_target;
dio->op.write_point = writepoint_hashed((unsigned long) current);
dio->op.nr_replicas = dio->op.opts.data_replicas;
- dio->op.subvol = inode->ei_subvol;
+ dio->op.subvol = inode->ei_inum.subvol;
dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
dio->op.devs_need_flush = &inode->ei_devs_need_flush;
diff --git a/fs/bcachefs/fs-io-pagecache.c b/fs/bcachefs/fs-io-pagecache.c
index a9cc5cad9cc9..af3a24546aa3 100644
--- a/fs/bcachefs/fs-io-pagecache.c
+++ b/fs/bcachefs/fs-io-pagecache.c
@@ -182,18 +182,11 @@ static void __bch2_folio_set(struct folio *folio,
int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
struct folio **fs, unsigned nr_folios)
{
- struct btree_trans *trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_folio *s;
u64 offset = folio_sector(fs[0]);
- unsigned folio_idx;
- u32 snapshot;
bool need_set = false;
- int ret;
- for (folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
- s = bch2_folio_create(fs[folio_idx], GFP_KERNEL);
+ for (unsigned folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
+ struct bch_folio *s = bch2_folio_create(fs[folio_idx], GFP_KERNEL);
if (!s)
return -ENOMEM;
@@ -203,53 +196,40 @@ int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
if (!need_set)
return 0;
- folio_idx = 0;
- trans = bch2_trans_get(c);
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
- SPOS(inum.inum, offset, snapshot),
- BTREE_ITER_slots, k, ret) {
- unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
- unsigned state = bkey_to_sector_state(k);
-
- while (folio_idx < nr_folios) {
- struct folio *folio = fs[folio_idx];
- u64 folio_start = folio_sector(folio);
- u64 folio_end = folio_end_sector(folio);
- unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) -
- folio_start;
- unsigned folio_len = min(k.k->p.offset, folio_end) -
- folio_offset - folio_start;
-
- BUG_ON(k.k->p.offset < folio_start);
- BUG_ON(bkey_start_offset(k.k) > folio_end);
-
- if (!bch2_folio(folio)->uptodate)
- __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
-
- if (k.k->p.offset < folio_end)
- break;
- folio_idx++;
- }
-
- if (folio_idx == nr_folios)
- break;
- }
-
- offset = iter.pos.offset;
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
- bch2_trans_put(trans);
+ unsigned folio_idx = 0;
+
+ return bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents,
+ POS(inum.inum, offset),
+ POS(inum.inum, U64_MAX),
+ inum.subvol, BTREE_ITER_slots, k, ({
+ unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
+ unsigned state = bkey_to_sector_state(k);
+
+ while (folio_idx < nr_folios) {
+ struct folio *folio = fs[folio_idx];
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) -
+ folio_start;
+ unsigned folio_len = min(k.k->p.offset, folio_end) -
+ folio_offset - folio_start;
+
+ BUG_ON(k.k->p.offset < folio_start);
+ BUG_ON(bkey_start_offset(k.k) > folio_end);
+
+ if (!bch2_folio(folio)->uptodate)
+ __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
+
+ if (k.k->p.offset < folio_end)
+ break;
+ folio_idx++;
+ }
- return ret;
+ if (folio_idx == nr_folios)
+ break;
+ 0;
+ })));
}
void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
diff --git a/fs/bcachefs/fs-io-pagecache.h b/fs/bcachefs/fs-io-pagecache.h
index fd7d692c087e..fad911cf5068 100644
--- a/fs/bcachefs/fs-io-pagecache.h
+++ b/fs/bcachefs/fs-io-pagecache.h
@@ -99,9 +99,7 @@ static inline void bch2_folio_release(struct folio *folio)
static inline struct bch_folio *__bch2_folio(struct folio *folio)
{
- return folio_has_private(folio)
- ? (struct bch_folio *) folio_get_private(folio)
- : NULL;
+ return folio_get_private(folio);
}
static inline struct bch_folio *bch2_folio(struct folio *folio)
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 77b85da30fb2..71d0fa387509 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -221,30 +221,11 @@ static inline int range_has_data(struct bch_fs *c, u32 subvol,
struct bpos start,
struct bpos end)
{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret = 0;
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, subvol, &start.snapshot);
- if (ret)
- goto err;
-
- for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
- if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
- ret = 1;
- break;
- }
- start = iter.pos;
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_put(trans);
- return ret;
+ return bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents, start, end,
+ subvol, 0, k, ({
+ bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k);
+ })));
}
static int __bch2_truncate_folio(struct bch_inode_info *inode,
@@ -267,7 +248,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
* XXX: we're doing two index lookups when we end up reading the
* folio
*/
- ret = range_has_data(c, inode->ei_subvol,
+ ret = range_has_data(c, inode->ei_inum.subvol,
POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
if (ret <= 0)
@@ -618,7 +599,7 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
bch2_trans_begin(trans);
ret = bch2_subvolume_get_snapshot(trans,
- inode->ei_subvol, &snapshot);
+ inode->ei_inum.subvol, &snapshot);
if (ret)
goto bkey_err;
@@ -813,41 +794,23 @@ static int quota_reserve_range(struct bch_inode_info *inode,
u64 start, u64 end)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_s_c k;
- u32 snapshot;
u64 sectors = end - start;
- u64 pos = start;
- int ret;
-retry:
- bch2_trans_begin(trans);
- ret = bch2_subvolume_get_snapshot(trans, inode->ei_subvol, &snapshot);
- if (ret)
- goto err;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- SPOS(inode->v.i_ino, pos, snapshot), 0);
-
- while (!(ret = btree_trans_too_many_iters(trans)) &&
- (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
- !(ret = bkey_err(k))) {
- if (bkey_extent_is_allocation(k.k)) {
- u64 s = min(end, k.k->p.offset) -
- max(start, bkey_start_offset(k.k));
- BUG_ON(s > sectors);
- sectors -= s;
- }
- bch2_btree_iter_advance(&iter);
- }
- pos = iter.pos.offset;
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_put(trans);
+ int ret = bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter,
+ BTREE_ID_extents,
+ POS(inode->v.i_ino, start),
+ POS(inode->v.i_ino, end - 1),
+ inode->ei_inum.subvol, 0, k, ({
+ if (bkey_extent_is_allocation(k.k)) {
+ u64 s = min(end, k.k->p.offset) -
+ max(start, bkey_start_offset(k.k));
+ BUG_ON(s > sectors);
+ sectors -= s;
+ }
+
+ 0;
+ })));
return ret ?: bch2_quota_reservation_add(c, inode, res, sectors, true);
}
@@ -942,42 +905,25 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
{
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_trans *trans;
- struct btree_iter iter;
- struct bkey_s_c k;
subvol_inum inum = inode_inum(inode);
u64 isize, next_data = MAX_LFS_FILESIZE;
- u32 snapshot;
- int ret;
isize = i_size_read(&inode->v);
if (offset >= isize)
return -ENXIO;
- trans = bch2_trans_get(c);
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
- for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_extents,
- SPOS(inode->v.i_ino, offset >> 9, snapshot),
- POS(inode->v.i_ino, U64_MAX),
- 0, k, ret) {
- if (bkey_extent_is_data(k.k)) {
- next_data = max(offset, bkey_start_offset(k.k) << 9);
- break;
- } else if (k.k->p.offset >> 9 > isize)
- break;
- }
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_put(trans);
+ int ret = bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents,
+ POS(inode->v.i_ino, offset >> 9),
+ POS(inode->v.i_ino, U64_MAX),
+ inum.subvol, 0, k, ({
+ if (bkey_extent_is_data(k.k)) {
+ next_data = max(offset, bkey_start_offset(k.k) << 9);
+ break;
+ } else if (k.k->p.offset >> 9 > isize)
+ break;
+ 0;
+ })));
if (ret)
return ret;
@@ -995,50 +941,34 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
{
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_trans *trans;
- struct btree_iter iter;
- struct bkey_s_c k;
subvol_inum inum = inode_inum(inode);
u64 isize, next_hole = MAX_LFS_FILESIZE;
- u32 snapshot;
- int ret;
isize = i_size_read(&inode->v);
if (offset >= isize)
return -ENXIO;
- trans = bch2_trans_get(c);
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
- SPOS(inode->v.i_ino, offset >> 9, snapshot),
- BTREE_ITER_slots, k, ret) {
- if (k.k->p.inode != inode->v.i_ino) {
- next_hole = bch2_seek_pagecache_hole(&inode->v,
- offset, MAX_LFS_FILESIZE, 0, false);
- break;
- } else if (!bkey_extent_is_data(k.k)) {
- next_hole = bch2_seek_pagecache_hole(&inode->v,
- max(offset, bkey_start_offset(k.k) << 9),
- k.k->p.offset << 9, 0, false);
-
- if (next_hole < k.k->p.offset << 9)
+ int ret = bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents,
+ POS(inode->v.i_ino, offset >> 9),
+ POS(inode->v.i_ino, U64_MAX),
+ inum.subvol, BTREE_ITER_slots, k, ({
+ if (k.k->p.inode != inode->v.i_ino) {
+ next_hole = bch2_seek_pagecache_hole(&inode->v,
+ offset, MAX_LFS_FILESIZE, 0, false);
break;
- } else {
- offset = max(offset, bkey_start_offset(k.k) << 9);
- }
- }
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_put(trans);
+ } else if (!bkey_extent_is_data(k.k)) {
+ next_hole = bch2_seek_pagecache_hole(&inode->v,
+ max(offset, bkey_start_offset(k.k) << 9),
+ k.k->p.offset << 9, 0, false);
+
+ if (next_hole < k.k->p.offset << 9)
+ break;
+ } else {
+ offset = max(offset, bkey_start_offset(k.k) << 9);
+ }
+ 0;
+ })));
if (ret)
return ret;
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
index 99c7fe987c74..405cf08bda34 100644
--- a/fs/bcachefs/fs-ioctl.c
+++ b/fs/bcachefs/fs-ioctl.c
@@ -100,7 +100,7 @@ static int bch2_ioc_setflags(struct bch_fs *c,
}
mutex_lock(&inode->ei_update_lock);
- ret = bch2_subvol_is_ro(c, inode->ei_subvol) ?:
+ ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?:
bch2_write_inode(c, inode, bch2_inode_flags_set, &s,
ATTR_CTIME);
mutex_unlock(&inode->ei_update_lock);
@@ -184,7 +184,7 @@ static int bch2_ioc_fssetxattr(struct bch_fs *c,
}
mutex_lock(&inode->ei_update_lock);
- ret = bch2_subvol_is_ro(c, inode->ei_subvol) ?:
+ ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?:
bch2_set_projid(c, inode, fa.fsx_projid) ?:
bch2_write_inode(c, inode, fssetxattr_inode_update_fn, &s,
ATTR_CTIME);
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 94c392abef65..4a1bb07a2574 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -108,7 +108,7 @@ retry:
goto retry;
bch2_fs_fatal_err_on(bch2_err_matches(ret, ENOENT), c,
- "%s: inode %u:%llu not found when updating",
+ "%s: inode %llu:%llu not found when updating",
bch2_err_str(ret),
inode_inum(inode).subvol,
inode_inum(inode).inum);
@@ -152,42 +152,106 @@ int bch2_fs_quota_transfer(struct bch_fs *c,
return ret;
}
-static int bch2_iget5_test(struct inode *vinode, void *p)
+static bool subvol_inum_eq(subvol_inum a, subvol_inum b)
{
- struct bch_inode_info *inode = to_bch_ei(vinode);
- subvol_inum *inum = p;
+ return a.subvol == b.subvol && a.inum == b.inum;
+}
+
+static int bch2_vfs_inode_cmp_fn(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ const struct bch_inode_info *inode = obj;
+ const subvol_inum *v = arg->key;
- return inode->ei_subvol == inum->subvol &&
- inode->ei_inode.bi_inum == inum->inum;
+ return !subvol_inum_eq(inode->ei_inum, *v);
}
-static int bch2_iget5_set(struct inode *vinode, void *p)
+static const struct rhashtable_params bch2_vfs_inodes_params = {
+ .head_offset = offsetof(struct bch_inode_info, hash),
+ .key_offset = offsetof(struct bch_inode_info, ei_inum),
+ .key_len = sizeof(subvol_inum),
+ .obj_cmpfn = bch2_vfs_inode_cmp_fn,
+ .automatic_shrinking = true,
+};
+
+static void __wait_on_freeing_inode(struct inode *inode)
{
- struct bch_inode_info *inode = to_bch_ei(vinode);
- subvol_inum *inum = p;
+ wait_queue_head_t *wq;
+ DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
+ wq = bit_waitqueue(&inode->i_state, __I_NEW);
+ prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
+ spin_unlock(&inode->i_lock);
+ schedule();
+ finish_wait(wq, &wait.wq_entry);
+}
- inode->v.i_ino = inum->inum;
- inode->ei_subvol = inum->subvol;
- inode->ei_inode.bi_inum = inum->inum;
- return 0;
+struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *c, subvol_inum inum)
+{
+ return rhashtable_lookup_fast(&c->vfs_inodes_table, &inum, bch2_vfs_inodes_params);
+}
+
+static struct bch_inode_info *bch2_inode_hash_find(struct bch_fs *c, struct btree_trans *trans,
+ subvol_inum inum)
+{
+ struct bch_inode_info *inode;
+repeat:
+ inode = __bch2_inode_hash_find(c, inum);
+ if (inode) {
+ spin_lock(&inode->v.i_lock);
+ if (!test_bit(EI_INODE_HASHED, &inode->ei_flags)) {
+ spin_unlock(&inode->v.i_lock);
+ return NULL;
+ }
+ if ((inode->v.i_state & (I_FREEING|I_WILL_FREE))) {
+ if (!trans) {
+ __wait_on_freeing_inode(&inode->v);
+ } else {
+ bch2_trans_unlock(trans);
+ __wait_on_freeing_inode(&inode->v);
+ int ret = bch2_trans_relock(trans);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ goto repeat;
+ }
+ __iget(&inode->v);
+ spin_unlock(&inode->v.i_lock);
+ }
+
+ return inode;
}
-static unsigned bch2_inode_hash(subvol_inum inum)
+static void bch2_inode_hash_remove(struct bch_fs *c, struct bch_inode_info *inode)
{
- return jhash_3words(inum.subvol, inum.inum >> 32, inum.inum, JHASH_INITVAL);
+ spin_lock(&inode->v.i_lock);
+ bool remove = test_and_clear_bit(EI_INODE_HASHED, &inode->ei_flags);
+ spin_unlock(&inode->v.i_lock);
+
+ if (remove) {
+ int ret = rhashtable_remove_fast(&c->vfs_inodes_table,
+ &inode->hash, bch2_vfs_inodes_params);
+ BUG_ON(ret);
+ inode->v.i_hash.pprev = NULL;
+ }
}
-static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_inode_info *inode)
+static struct bch_inode_info *bch2_inode_hash_insert(struct bch_fs *c,
+ struct btree_trans *trans,
+ struct bch_inode_info *inode)
{
- subvol_inum inum = inode_inum(inode);
- struct bch_inode_info *old = to_bch_ei(inode_insert5(&inode->v,
- bch2_inode_hash(inum),
- bch2_iget5_test,
- bch2_iget5_set,
- &inum));
- BUG_ON(!old);
+ struct bch_inode_info *old = inode;
+
+ set_bit(EI_INODE_HASHED, &inode->ei_flags);
+retry:
+ if (unlikely(rhashtable_lookup_insert_fast(&c->vfs_inodes_table,
+ &inode->hash,
+ bch2_vfs_inodes_params))) {
+ old = bch2_inode_hash_find(c, trans, inode->ei_inum);
+ if (!old)
+ goto retry;
+
+ clear_bit(EI_INODE_HASHED, &inode->ei_flags);
- if (unlikely(old != inode)) {
/*
* bcachefs doesn't use I_NEW; we have no use for it since we
* only insert fully created inodes in the inode hash table. But
@@ -201,21 +265,17 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino
*/
set_nlink(&inode->v, 1);
discard_new_inode(&inode->v);
- inode = old;
+ return old;
} else {
+ inode_fake_hash(&inode->v);
+
+ inode_sb_list_add(&inode->v);
+
mutex_lock(&c->vfs_inodes_lock);
list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list);
mutex_unlock(&c->vfs_inodes_lock);
- /*
- * Again, I_NEW makes no sense for bcachefs. This is only needed
- * for clearing I_NEW, but since the inode was already fully
- * created and initialized we didn't actually want
- * inode_insert5() to set it for us.
- */
- unlock_new_inode(&inode->v);
+ return inode;
}
-
- return inode;
}
#define memalloc_flags_do(_flags, _do) \
@@ -233,7 +293,8 @@ static struct inode *bch2_alloc_inode(struct super_block *sb)
static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c)
{
- struct bch_inode_info *inode = kmem_cache_alloc(bch2_inode_cache, GFP_NOFS);
+ struct bch_inode_info *inode = alloc_inode_sb(c->vfs_sb,
+ bch2_inode_cache, GFP_NOFS);
if (!inode)
return NULL;
@@ -275,13 +336,24 @@ static struct bch_inode_info *bch2_new_inode(struct btree_trans *trans)
return inode;
}
+static struct bch_inode_info *bch2_inode_hash_init_insert(struct btree_trans *trans,
+ subvol_inum inum,
+ struct bch_inode_unpacked *bi,
+ struct bch_subvolume *subvol)
+{
+ struct bch_inode_info *inode = bch2_new_inode(trans);
+ if (IS_ERR(inode))
+ return inode;
+
+ bch2_vfs_inode_init(trans, inum, inode, bi, subvol);
+
+ return bch2_inode_hash_insert(trans->c, trans, inode);
+
+}
+
struct inode *bch2_vfs_inode_get(struct bch_fs *c, subvol_inum inum)
{
- struct bch_inode_info *inode =
- to_bch_ei(ilookup5_nowait(c->vfs_sb,
- bch2_inode_hash(inum),
- bch2_iget5_test,
- &inum));
+ struct bch_inode_info *inode = bch2_inode_hash_find(c, NULL, inum);
if (inode)
return &inode->v;
@@ -292,11 +364,7 @@ struct inode *bch2_vfs_inode_get(struct bch_fs *c, subvol_inum inum)
int ret = lockrestart_do(trans,
bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?:
bch2_inode_find_by_inum_trans(trans, inum, &inode_u)) ?:
- PTR_ERR_OR_ZERO(inode = bch2_new_inode(trans));
- if (!ret) {
- bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol);
- inode = bch2_inode_insert(c, inode);
- }
+ PTR_ERR_OR_ZERO(inode = bch2_inode_hash_init_insert(trans, inum, &inode_u, &subvol));
bch2_trans_put(trans);
return ret ? ERR_PTR(ret) : &inode->v;
@@ -317,6 +385,8 @@ __bch2_create(struct mnt_idmap *idmap,
subvol_inum inum;
struct bch_subvolume subvol;
u64 journal_seq = 0;
+ kuid_t kuid;
+ kgid_t kgid;
int ret;
/*
@@ -343,13 +413,15 @@ __bch2_create(struct mnt_idmap *idmap,
retry:
bch2_trans_begin(trans);
- ret = bch2_subvol_is_ro_trans(trans, dir->ei_subvol) ?:
+ kuid = mapped_fsuid(idmap, i_user_ns(&dir->v));
+ kgid = mapped_fsgid(idmap, i_user_ns(&dir->v));
+ ret = bch2_subvol_is_ro_trans(trans, dir->ei_inum.subvol) ?:
bch2_create_trans(trans,
inode_inum(dir), &dir_u, &inode_u,
!(flags & BCH_CREATE_TMPFILE)
? &dentry->d_name : NULL,
- from_kuid(i_user_ns(&dir->v), current_fsuid()),
- from_kgid(i_user_ns(&dir->v), current_fsgid()),
+ from_kuid(i_user_ns(&dir->v), kuid),
+ from_kgid(i_user_ns(&dir->v), kgid),
mode, rdev,
default_acl, acl, snapshot_src, flags) ?:
bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1,
@@ -357,7 +429,7 @@ retry:
if (unlikely(ret))
goto err_before_quota;
- inum.subvol = inode_u.bi_subvol ?: dir->ei_subvol;
+ inum.subvol = inode_u.bi_subvol ?: dir->ei_inum.subvol;
inum.inum = inode_u.bi_inum;
ret = bch2_subvolume_get(trans, inum.subvol, true,
@@ -387,8 +459,16 @@ err_before_quota:
* we must insert the new inode into the inode cache before calling
* bch2_trans_exit() and dropping locks, else we could race with another
* thread pulling the inode in and modifying it:
+ *
+ * also, calling bch2_inode_hash_insert() without passing in the
+ * transaction object is sketchy - if we could ever end up in
+ * __wait_on_freeing_inode(), we'd risk deadlock.
+ *
+ * But that shouldn't be possible, since we still have the inode locked
+ * that we just created, and we _really_ can't take a transaction
+ * restart here.
*/
- inode = bch2_inode_insert(c, inode);
+ inode = bch2_inode_hash_insert(c, NULL, inode);
bch2_trans_put(trans);
err:
posix_acl_release(default_acl);
@@ -428,11 +508,7 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
if (ret)
goto err;
- struct bch_inode_info *inode =
- to_bch_ei(ilookup5_nowait(c->vfs_sb,
- bch2_inode_hash(inum),
- bch2_iget5_test,
- &inum));
+ struct bch_inode_info *inode = bch2_inode_hash_find(c, trans, inum);
if (inode)
goto out;
@@ -440,7 +516,7 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
struct bch_inode_unpacked inode_u;
ret = bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?:
bch2_inode_find_by_inum_nowarn_trans(trans, inum, &inode_u) ?:
- PTR_ERR_OR_ZERO(inode = bch2_new_inode(trans));
+ PTR_ERR_OR_ZERO(inode = bch2_inode_hash_init_insert(trans, inum, &inode_u, &subvol));
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
c, "dirent to missing inode:\n %s",
@@ -460,9 +536,6 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
ret = -ENOENT;
goto err;
}
-
- bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol);
- inode = bch2_inode_insert(c, inode);
out:
bch2_trans_iter_exit(trans, &dirent_iter);
printbuf_exit(&buf);
@@ -549,8 +622,8 @@ static int bch2_link(struct dentry *old_dentry, struct inode *vdir,
lockdep_assert_held(&inode->v.i_rwsem);
- ret = bch2_subvol_is_ro(c, dir->ei_subvol) ?:
- bch2_subvol_is_ro(c, inode->ei_subvol) ?:
+ ret = bch2_subvol_is_ro(c, dir->ei_inum.subvol) ?:
+ bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?:
__bch2_link(c, inode, dir, dentry);
if (unlikely(ret))
return bch2_err_class(ret);
@@ -606,7 +679,7 @@ static int bch2_unlink(struct inode *vdir, struct dentry *dentry)
struct bch_inode_info *dir= to_bch_ei(vdir);
struct bch_fs *c = dir->v.i_sb->s_fs_info;
- int ret = bch2_subvol_is_ro(c, dir->ei_subvol) ?:
+ int ret = bch2_subvol_is_ro(c, dir->ei_inum.subvol) ?:
__bch2_unlink(vdir, dentry, false);
return bch2_err_class(ret);
}
@@ -663,15 +736,16 @@ static int bch2_rename2(struct mnt_idmap *idmap,
struct bch_inode_info *src_inode = to_bch_ei(src_dentry->d_inode);
struct bch_inode_info *dst_inode = to_bch_ei(dst_dentry->d_inode);
struct bch_inode_unpacked dst_dir_u, src_dir_u;
- struct bch_inode_unpacked src_inode_u, dst_inode_u;
+ struct bch_inode_unpacked src_inode_u, dst_inode_u, *whiteout_inode_u;
struct btree_trans *trans;
enum bch_rename_mode mode = flags & RENAME_EXCHANGE
? BCH_RENAME_EXCHANGE
: dst_dentry->d_inode
? BCH_RENAME_OVERWRITE : BCH_RENAME;
+ bool whiteout = !!(flags & RENAME_WHITEOUT);
int ret;
- if (flags & ~(RENAME_NOREPLACE|RENAME_EXCHANGE))
+ if (flags & ~(RENAME_NOREPLACE|RENAME_EXCHANGE|RENAME_WHITEOUT))
return -EINVAL;
if (mode == BCH_RENAME_OVERWRITE) {
@@ -689,8 +763,8 @@ static int bch2_rename2(struct mnt_idmap *idmap,
trans = bch2_trans_get(c);
- ret = bch2_subvol_is_ro_trans(trans, src_dir->ei_subvol) ?:
- bch2_subvol_is_ro_trans(trans, dst_dir->ei_subvol);
+ ret = bch2_subvol_is_ro_trans(trans, src_dir->ei_inum.subvol) ?:
+ bch2_subvol_is_ro_trans(trans, dst_dir->ei_inum.subvol);
if (ret)
goto err;
@@ -712,18 +786,48 @@ static int bch2_rename2(struct mnt_idmap *idmap,
if (ret)
goto err;
}
+retry:
+ bch2_trans_begin(trans);
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_rename_trans(trans,
- inode_inum(src_dir), &src_dir_u,
- inode_inum(dst_dir), &dst_dir_u,
- &src_inode_u,
- &dst_inode_u,
- &src_dentry->d_name,
- &dst_dentry->d_name,
- mode));
+ ret = bch2_rename_trans(trans,
+ inode_inum(src_dir), &src_dir_u,
+ inode_inum(dst_dir), &dst_dir_u,
+ &src_inode_u,
+ &dst_inode_u,
+ &src_dentry->d_name,
+ &dst_dentry->d_name,
+ mode);
if (unlikely(ret))
+ goto err_tx_restart;
+
+ if (whiteout) {
+ whiteout_inode_u = bch2_trans_kmalloc_nomemzero(trans, sizeof(*whiteout_inode_u));
+ ret = PTR_ERR_OR_ZERO(whiteout_inode_u);
+ if (unlikely(ret))
+ goto err_tx_restart;
+ bch2_inode_init_early(c, whiteout_inode_u);
+
+ ret = bch2_create_trans(trans,
+ inode_inum(src_dir), &src_dir_u,
+ whiteout_inode_u,
+ &src_dentry->d_name,
+ from_kuid(i_user_ns(&src_dir->v), current_fsuid()),
+ from_kgid(i_user_ns(&src_dir->v), current_fsgid()),
+ S_IFCHR|WHITEOUT_MODE, 0,
+ NULL, NULL, (subvol_inum) { 0 }, 0) ?:
+ bch2_quota_acct(c, bch_qid(whiteout_inode_u), Q_INO, 1,
+ KEY_TYPE_QUOTA_PREALLOC);
+ if (unlikely(ret))
+ goto err_tx_restart;
+ }
+
+ ret = bch2_trans_commit(trans, NULL, NULL, 0);
+ if (unlikely(ret)) {
+err_tx_restart:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
goto err;
+ }
BUG_ON(src_inode->v.i_ino != src_inode_u.bi_inum);
BUG_ON(dst_inode &&
@@ -771,11 +875,17 @@ static void bch2_setattr_copy(struct mnt_idmap *idmap,
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
unsigned int ia_valid = attr->ia_valid;
+ kuid_t kuid;
+ kgid_t kgid;
- if (ia_valid & ATTR_UID)
- bi->bi_uid = from_kuid(i_user_ns(&inode->v), attr->ia_uid);
- if (ia_valid & ATTR_GID)
- bi->bi_gid = from_kgid(i_user_ns(&inode->v), attr->ia_gid);
+ if (ia_valid & ATTR_UID) {
+ kuid = from_vfsuid(idmap, i_user_ns(&inode->v), attr->ia_vfsuid);
+ bi->bi_uid = from_kuid(i_user_ns(&inode->v), kuid);
+ }
+ if (ia_valid & ATTR_GID) {
+ kgid = from_vfsgid(idmap, i_user_ns(&inode->v), attr->ia_vfsgid);
+ bi->bi_gid = from_kgid(i_user_ns(&inode->v), kgid);
+ }
if (ia_valid & ATTR_SIZE)
bi->bi_size = attr->ia_size;
@@ -790,11 +900,11 @@ static void bch2_setattr_copy(struct mnt_idmap *idmap,
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
kgid_t gid = ia_valid & ATTR_GID
- ? attr->ia_gid
+ ? kgid
: inode->v.i_gid;
- if (!in_group_p(gid) &&
- !capable_wrt_inode_uidgid(idmap, &inode->v, CAP_FSETID))
+ if (!in_group_or_capable(idmap, &inode->v,
+ make_vfsgid(idmap, i_user_ns(&inode->v), gid)))
mode &= ~S_ISGID;
bi->bi_mode = mode;
}
@@ -810,17 +920,23 @@ int bch2_setattr_nonsize(struct mnt_idmap *idmap,
struct btree_iter inode_iter = { NULL };
struct bch_inode_unpacked inode_u;
struct posix_acl *acl = NULL;
+ kuid_t kuid;
+ kgid_t kgid;
int ret;
mutex_lock(&inode->ei_update_lock);
qid = inode->ei_qid;
- if (attr->ia_valid & ATTR_UID)
- qid.q[QTYP_USR] = from_kuid(i_user_ns(&inode->v), attr->ia_uid);
+ if (attr->ia_valid & ATTR_UID) {
+ kuid = from_vfsuid(idmap, i_user_ns(&inode->v), attr->ia_vfsuid);
+ qid.q[QTYP_USR] = from_kuid(i_user_ns(&inode->v), kuid);
+ }
- if (attr->ia_valid & ATTR_GID)
- qid.q[QTYP_GRP] = from_kgid(i_user_ns(&inode->v), attr->ia_gid);
+ if (attr->ia_valid & ATTR_GID) {
+ kgid = from_vfsgid(idmap, i_user_ns(&inode->v), attr->ia_vfsgid);
+ qid.q[QTYP_GRP] = from_kgid(i_user_ns(&inode->v), kgid);
+ }
ret = bch2_fs_quota_transfer(c, inode, qid, ~0,
KEY_TYPE_QUOTA_PREALLOC);
@@ -876,13 +992,15 @@ static int bch2_getattr(struct mnt_idmap *idmap,
{
struct bch_inode_info *inode = to_bch_ei(d_inode(path->dentry));
struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, &inode->v);
+ vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, &inode->v);
stat->dev = inode->v.i_sb->s_dev;
stat->ino = inode->v.i_ino;
stat->mode = inode->v.i_mode;
stat->nlink = inode->v.i_nlink;
- stat->uid = inode->v.i_uid;
- stat->gid = inode->v.i_gid;
+ stat->uid = vfsuid_into_kuid(vfsuid);
+ stat->gid = vfsgid_into_kgid(vfsgid);
stat->rdev = inode->v.i_rdev;
stat->size = i_size_read(&inode->v);
stat->atime = inode_get_atime(&inode->v);
@@ -891,7 +1009,7 @@ static int bch2_getattr(struct mnt_idmap *idmap,
stat->blksize = block_bytes(c);
stat->blocks = inode->v.i_blocks;
- stat->subvol = inode->ei_subvol;
+ stat->subvol = inode->ei_inum.subvol;
stat->result_mask |= STATX_SUBVOL;
if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->v.i_mode)) {
@@ -933,7 +1051,7 @@ static int bch2_setattr(struct mnt_idmap *idmap,
lockdep_assert_held(&inode->v.i_rwsem);
- ret = bch2_subvol_is_ro(c, inode->ei_subvol) ?:
+ ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?:
setattr_prepare(idmap, dentry, iattr);
if (ret)
return ret;
@@ -1026,7 +1144,6 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
struct bkey_buf cur, prev;
unsigned offset_into_extent, sectors;
bool have_extent = false;
- u32 snapshot;
int ret = 0;
ret = fiemap_prep(&ei->v, info, start, &len, FIEMAP_FLAG_SYNC);
@@ -1042,21 +1159,30 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
bch2_bkey_buf_init(&cur);
bch2_bkey_buf_init(&prev);
trans = bch2_trans_get(c);
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, ei->ei_subvol, &snapshot);
- if (ret)
- goto err;
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- SPOS(ei->v.i_ino, start, snapshot), 0);
+ POS(ei->v.i_ino, start), 0);
- while (!(ret = btree_trans_too_many_iters(trans)) &&
- (k = bch2_btree_iter_peek_upto(&iter, end)).k &&
- !(ret = bkey_err(k))) {
+ while (true) {
enum btree_id data_btree = BTREE_ID_extents;
+ bch2_trans_begin(trans);
+
+ u32 snapshot;
+ ret = bch2_subvolume_get_snapshot(trans, ei->ei_inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
+
+ k = bch2_btree_iter_peek_upto(&iter, end);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (!k.k)
+ break;
+
if (!bkey_extent_is_data(k.k) &&
k.k->type != KEY_TYPE_reservation) {
bch2_btree_iter_advance(&iter);
@@ -1100,16 +1226,12 @@ retry:
bch2_btree_iter_set_pos(&iter,
POS(iter.pos.inode, iter.pos.offset + sectors));
-
- ret = bch2_trans_relock(trans);
- if (ret)
+err:
+ if (ret &&
+ !bch2_err_matches(ret, BCH_ERR_transaction_restart))
break;
}
- start = iter.pos.offset;
bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
if (!ret && have_extent) {
bch2_trans_unlock(trans);
@@ -1165,7 +1287,7 @@ static int bch2_open(struct inode *vinode, struct file *file)
struct bch_inode_info *inode = to_bch_ei(vinode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- int ret = bch2_subvol_is_ro(c, inode->ei_subvol);
+ int ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol);
if (ret)
return ret;
}
@@ -1297,8 +1419,8 @@ static int bcachefs_fid_valid(int fh_len, int fh_type)
static struct bcachefs_fid bch2_inode_to_fid(struct bch_inode_info *inode)
{
return (struct bcachefs_fid) {
- .inum = inode->ei_inode.bi_inum,
- .subvol = inode->ei_subvol,
+ .inum = inode->ei_inum.inum,
+ .subvol = inode->ei_inum.subvol,
.gen = inode->ei_inode.bi_generation,
};
}
@@ -1383,7 +1505,7 @@ static struct dentry *bch2_get_parent(struct dentry *child)
struct bch_fs *c = inode->v.i_sb->s_fs_info;
subvol_inum parent_inum = {
.subvol = inode->ei_inode.bi_parent_subvol ?:
- inode->ei_subvol,
+ inode->ei_inum.subvol,
.inum = inode->ei_inode.bi_dir,
};
@@ -1419,7 +1541,7 @@ static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child
retry:
bch2_trans_begin(trans);
- ret = bch2_subvolume_get_snapshot(trans, dir->ei_subvol, &snapshot);
+ ret = bch2_subvolume_get_snapshot(trans, dir->ei_inum.subvol, &snapshot);
if (ret)
goto err;
@@ -1450,8 +1572,7 @@ retry:
if (ret)
goto err;
- if (target.subvol == inode->ei_subvol &&
- target.inum == inode->ei_inode.bi_inum)
+ if (subvol_inum_eq(target, inode->ei_inum))
goto found;
} else {
/*
@@ -1472,8 +1593,7 @@ retry:
if (ret)
continue;
- if (target.subvol == inode->ei_subvol &&
- target.inum == inode->ei_inode.bi_inum)
+ if (subvol_inum_eq(target, inode->ei_inum))
goto found;
}
}
@@ -1505,12 +1625,15 @@ static const struct export_operations bch_export_ops = {
.get_name = bch2_get_name,
};
-static void bch2_vfs_inode_init(struct btree_trans *trans, subvol_inum inum,
+static void bch2_vfs_inode_init(struct btree_trans *trans,
+ subvol_inum inum,
struct bch_inode_info *inode,
struct bch_inode_unpacked *bi,
struct bch_subvolume *subvol)
{
- bch2_iget5_set(&inode->v, &inum);
+ inode->v.i_ino = inum.inum;
+ inode->ei_inum = inum;
+ inode->ei_inode.bi_inum = inum.inum;
bch2_inode_update_after_write(trans, inode, bi, ~0);
inode->v.i_blocks = bi->bi_sectors;
@@ -1522,7 +1645,6 @@ static void bch2_vfs_inode_init(struct btree_trans *trans, subvol_inum inum,
inode->ei_flags = 0;
inode->ei_quota_reserved = 0;
inode->ei_qid = bch_qid(bi);
- inode->ei_subvol = inum.subvol;
if (BCH_SUBVOLUME_SNAP(subvol))
set_bit(EI_INODE_SNAPSHOT, &inode->ei_flags);
@@ -1589,6 +1711,17 @@ static void bch2_evict_inode(struct inode *vinode)
{
struct bch_fs *c = vinode->i_sb->s_fs_info;
struct bch_inode_info *inode = to_bch_ei(vinode);
+ bool delete = !inode->v.i_nlink && !is_bad_inode(&inode->v);
+
+ /*
+ * evict() has waited for outstanding writeback, we'll do no more IO
+ * through this inode: it's safe to remove from VFS inode hashtable here
+ *
+ * Do that now so that other threads aren't blocked from pulling it back
+ * in, there's no reason for them to be:
+ */
+ if (!delete)
+ bch2_inode_hash_remove(c, inode);
truncate_inode_pages_final(&inode->v.i_data);
@@ -1596,12 +1729,18 @@ static void bch2_evict_inode(struct inode *vinode)
BUG_ON(!is_bad_inode(&inode->v) && inode->ei_quota_reserved);
- if (!inode->v.i_nlink && !is_bad_inode(&inode->v)) {
+ if (delete) {
bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks),
KEY_TYPE_QUOTA_WARN);
bch2_quota_acct(c, inode->ei_qid, Q_INO, -1,
KEY_TYPE_QUOTA_WARN);
bch2_inode_rm(c, inode_inum(inode));
+
+ /*
+ * If we are deleting, we need it present in the vfs hash table
+ * so that fsck can check if unlinked inodes are still open:
+ */
+ bch2_inode_hash_remove(c, inode);
}
mutex_lock(&c->vfs_inodes_lock);
@@ -1631,7 +1770,7 @@ again:
mutex_lock(&c->vfs_inodes_lock);
list_for_each_entry(inode, &c->vfs_inodes_list, ei_vfs_inode_list) {
- if (!snapshot_list_has_id(s, inode->ei_subvol))
+ if (!snapshot_list_has_id(s, inode->ei_inum.subvol))
continue;
if (!(inode->v.i_state & I_DONTCACHE) &&
@@ -1644,14 +1783,16 @@ again:
break;
}
} else if (clean_pass && this_pass_clean) {
- wait_queue_head_t *wq = bit_waitqueue(&inode->v.i_state, __I_NEW);
- DEFINE_WAIT_BIT(wait, &inode->v.i_state, __I_NEW);
+ struct wait_bit_queue_entry wqe;
+ struct wait_queue_head *wq_head;
- prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
+ wq_head = inode_bit_waitqueue(&wqe, &inode->v, __I_NEW);
+ prepare_to_wait_event(wq_head, &wqe.wq_entry,
+ TASK_UNINTERRUPTIBLE);
mutex_unlock(&c->vfs_inodes_lock);
schedule();
- finish_wait(wq, &wait.wq_entry);
+ finish_wait(wq_head, &wqe.wq_entry);
goto again;
}
}
@@ -1791,30 +1932,14 @@ static int bch2_show_devname(struct seq_file *seq, struct dentry *root)
static int bch2_show_options(struct seq_file *seq, struct dentry *root)
{
struct bch_fs *c = root->d_sb->s_fs_info;
- enum bch_opt_id i;
struct printbuf buf = PRINTBUF;
- int ret = 0;
- for (i = 0; i < bch2_opts_nr; i++) {
- const struct bch_option *opt = &bch2_opt_table[i];
- u64 v = bch2_opt_get_by_id(&c->opts, i);
+ bch2_opts_to_text(&buf, c->opts, c, c->disk_sb.sb,
+ OPT_MOUNT, OPT_HIDDEN, OPT_SHOW_MOUNT_STYLE);
+ printbuf_nul_terminate(&buf);
+ seq_puts(seq, buf.buf);
- if ((opt->flags & OPT_HIDDEN) ||
- !(opt->flags & OPT_MOUNT))
- continue;
-
- if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
- continue;
-
- printbuf_reset(&buf);
- bch2_opt_to_text(&buf, c, c->disk_sb.sb, opt, v,
- OPT_SHOW_MOUNT_STYLE);
- seq_putc(seq, ',');
- seq_puts(seq, buf.buf);
- }
-
- if (buf.allocation_failure)
- ret = -ENOMEM;
+ int ret = buf.allocation_failure ? -ENOMEM : 0;
printbuf_exit(&buf);
return ret;
}
@@ -2119,12 +2244,23 @@ static int bch2_init_fs_context(struct fs_context *fc)
return 0;
}
+void bch2_fs_vfs_exit(struct bch_fs *c)
+{
+ if (c->vfs_inodes_table.tbl)
+ rhashtable_destroy(&c->vfs_inodes_table);
+}
+
+int bch2_fs_vfs_init(struct bch_fs *c)
+{
+ return rhashtable_init(&c->vfs_inodes_table, &bch2_vfs_inodes_params);
+}
+
static struct file_system_type bcache_fs_type = {
.owner = THIS_MODULE,
.name = "bcachefs",
.init_fs_context = bch2_init_fs_context,
.kill_sb = bch2_kill_sb,
- .fs_flags = FS_REQUIRES_DEV,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("bcachefs");
@@ -2139,7 +2275,8 @@ int __init bch2_vfs_init(void)
{
int ret = -ENOMEM;
- bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT);
+ bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT |
+ SLAB_ACCOUNT);
if (!bch2_inode_cache)
goto err;
diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h
index c3af7225ff69..da74ecc236e7 100644
--- a/fs/bcachefs/fs.h
+++ b/fs/bcachefs/fs.h
@@ -13,6 +13,9 @@
struct bch_inode_info {
struct inode v;
+ struct rhash_head hash;
+ subvol_inum ei_inum;
+
struct list_head ei_vfs_inode_list;
unsigned long ei_flags;
@@ -24,8 +27,6 @@ struct bch_inode_info {
struct mutex ei_quota_lock;
struct bch_qid ei_qid;
- u32 ei_subvol;
-
/*
* When we've been doing nocow writes we'll need to issue flushes to the
* underlying block devices
@@ -50,12 +51,11 @@ struct bch_inode_info {
static inline subvol_inum inode_inum(struct bch_inode_info *inode)
{
- return (subvol_inum) {
- .subvol = inode->ei_subvol,
- .inum = inode->ei_inode.bi_inum,
- };
+ return inode->ei_inum;
}
+struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *, subvol_inum);
+
/*
* Set if we've gotten a btree error for this inode, and thus the vfs inode and
* btree inode may be inconsistent:
@@ -67,6 +67,7 @@ static inline subvol_inum inode_inum(struct bch_inode_info *inode)
* those:
*/
#define EI_INODE_SNAPSHOT 1
+#define EI_INODE_HASHED 2
#define to_bch_ei(_inode) \
container_of_or_null(_inode, struct bch_inode_info, v)
@@ -187,6 +188,9 @@ int __bch2_unlink(struct inode *, struct dentry *, bool);
void bch2_evict_subvolume_inodes(struct bch_fs *, snapshot_id_list *);
+void bch2_fs_vfs_exit(struct bch_fs *);
+int bch2_fs_vfs_init(struct bch_fs *);
+
void bch2_vfs_exit(void);
int bch2_vfs_init(void);
@@ -194,8 +198,17 @@ int bch2_vfs_init(void);
#define bch2_inode_update_after_write(_trans, _inode, _inode_u, _fields) ({ do {} while (0); })
+static inline struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *c, subvol_inum inum)
+{
+ return NULL;
+}
+
static inline void bch2_evict_subvolume_inodes(struct bch_fs *c,
snapshot_id_list *s) {}
+
+static inline void bch2_fs_vfs_exit(struct bch_fs *c) {}
+static inline int bch2_fs_vfs_init(struct bch_fs *c) { return 0; }
+
static inline void bch2_vfs_exit(void) {}
static inline int bch2_vfs_init(void) { return 0; }
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 83bd31b44aad..9b3470a97546 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -8,6 +8,7 @@
#include "darray.h"
#include "dirent.h"
#include "error.h"
+#include "fs.h"
#include "fs-common.h"
#include "fsck.h"
#include "inode.h"
@@ -962,6 +963,22 @@ fsck_err:
return ret;
}
+static bool bch2_inode_open(struct bch_fs *c, struct bpos p)
+{
+ subvol_inum inum = {
+ .subvol = snapshot_t(c, p.snapshot)->subvol,
+ .inum = p.offset,
+ };
+
+ /* snapshot tree corruption, can't safely delete */
+ if (!inum.subvol) {
+ bch_err_ratelimited(c, "%s(): snapshot %u has no subvol", __func__, p.snapshot);
+ return true;
+ }
+
+ return __bch2_inode_hash_find(c, inum) != NULL;
+}
+
static int check_inode(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k,
@@ -1040,6 +1057,7 @@ static int check_inode(struct btree_trans *trans,
}
if (u.bi_flags & BCH_INODE_unlinked &&
+ !bch2_inode_open(c, k.k->p) &&
(!c->sb.clean ||
fsck_err(trans, inode_unlinked_but_clean,
"filesystem marked clean, but inode %llu unlinked",
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 2be6be33afa3..6ac0ff7e074b 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -365,7 +365,7 @@ int bch2_inode_peek(struct btree_trans *trans,
subvol_inum inum, unsigned flags)
{
int ret = bch2_inode_peek_nowarn(trans, iter, inode, inum, flags);
- bch_err_msg(trans->c, ret, "looking up inum %u:%llu:", inum.subvol, inum.inum);
+ bch_err_msg(trans->c, ret, "looking up inum %llu:%llu:", inum.subvol, inum.inum);
return ret;
}
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index 7ee3b75480df..b2f50e74bb76 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -286,7 +286,7 @@ static struct promote_op *promote_alloc(struct btree_trans *trans,
*/
bool promote_full = (failed ||
*read_full ||
- READ_ONCE(c->promote_whole_extents));
+ READ_ONCE(c->opts.promote_whole_extents));
/* data might have to be decompressed in the write path: */
unsigned sectors = promote_full
? max(pick->crc.compressed_size, pick->crc.live_size)
@@ -777,7 +777,7 @@ int __bch2_read_indirect_extent(struct btree_trans *trans,
orig_k->k->k.size,
reflink_offset);
bch2_inconsistent_error(trans->c);
- ret = -EIO;
+ ret = -BCH_ERR_missing_indirect_extent;
goto err;
}
@@ -869,9 +869,15 @@ retry_pick:
goto hole;
if (pick_ret < 0) {
+ struct printbuf buf = PRINTBUF;
+ bch2_bkey_val_to_text(&buf, c, k);
+
bch_err_inum_offset_ratelimited(c,
read_pos.inode, read_pos.offset << 9,
- "no device to read from");
+ "no device to read from: %s\n %s",
+ bch2_err_str(pick_ret),
+ buf.buf);
+ printbuf_exit(&buf);
goto err;
}
@@ -1086,7 +1092,7 @@ get_bio:
trans->notrace_relock_fail = true;
} else {
/* Attempting reconstruct read: */
- if (bch2_ec_read_extent(trans, rbio)) {
+ if (bch2_ec_read_extent(trans, rbio, k)) {
bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
goto out;
}
@@ -1214,10 +1220,6 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
swap(bvec_iter.bi_size, bytes);
bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
-
- ret = btree_trans_too_many_iters(trans);
- if (ret)
- goto err;
err:
if (ret &&
!bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 1d4761d15002..d3b5be7fd9bf 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -1447,9 +1447,7 @@ again:
op->nr_replicas_required,
op->watermark,
op->flags,
- (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
- BCH_WRITE_ONLY_SPECIFIED_DEVS))
- ? NULL : &op->cl, &wp));
+ &op->cl, &wp));
if (unlikely(ret)) {
if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
break;
@@ -1592,6 +1590,9 @@ CLOSURE_CALLBACK(bch2_write)
BUG_ON(!op->write_point.v);
BUG_ON(bkey_eq(op->pos, POS_MAX));
+ if (op->flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)
+ op->flags |= BCH_WRITE_ALLOC_NOWAIT;
+
op->nr_replicas_required = min_t(unsigned, op->nr_replicas_required, op->nr_replicas);
op->start_time = local_clock();
bch2_keylist_init(&op->insert_keys, op->inline_keys);
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 7664b68e6a15..30460bce04be 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -1353,6 +1353,7 @@ int bch2_journal_read(struct bch_fs *c,
genradix_for_each(&c->journal_entries, radix_iter, _i) {
struct bch_replicas_padded replicas = {
.e.data_type = BCH_DATA_journal,
+ .e.nr_devs = 0,
.e.nr_required = 1,
};
@@ -1379,7 +1380,7 @@ int bch2_journal_read(struct bch_fs *c,
goto err;
darray_for_each(i->ptrs, ptr)
- replicas.e.devs[replicas.e.nr_devs++] = ptr->dev;
+ replicas_entry_add_dev(&replicas.e, ptr->dev);
bch2_replicas_entry_sort(&replicas.e);
@@ -1950,7 +1951,8 @@ static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *
if (error ||
w->noflush ||
(!w->must_flush &&
- (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
+ time_before(jiffies, j->last_flush_write +
+ msecs_to_jiffies(c->opts.journal_flush_delay)) &&
test_bit(JOURNAL_may_skip_flush, &j->flags))) {
w->noflush = true;
SET_JSET_NO_FLUSH(w->data, true);
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
index 70b998d9f19c..ace291f175dd 100644
--- a/fs/bcachefs/journal_reclaim.c
+++ b/fs/bcachefs/journal_reclaim.c
@@ -641,6 +641,7 @@ static u64 journal_seq_to_flush(struct journal *j)
static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct btree_cache *bc = &c->btree_cache;
bool kthread = (current->flags & PF_KTHREAD) != 0;
u64 seq_to_flush;
size_t min_nr, min_key_cache, nr_flushed;
@@ -681,7 +682,8 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
if (j->watermark != BCH_WATERMARK_stripe)
min_nr = 1;
- if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
+ size_t btree_cache_live = bc->live[0].nr + bc->live[1].nr;
+ if (atomic_long_read(&bc->nr_dirty) * 2 > btree_cache_live)
min_nr = 1;
min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
@@ -689,8 +691,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
trace_and_count(c, journal_reclaim_start, c,
direct, kicked,
min_nr, min_key_cache,
- atomic_read(&c->btree_cache.dirty),
- c->btree_cache.used,
+ atomic_long_read(&bc->nr_dirty), btree_cache_live,
atomic_long_read(&c->btree_key_cache.nr_dirty),
atomic_long_read(&c->btree_key_cache.nr_keys));
diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c
index e10fc1da71b1..232be8a44051 100644
--- a/fs/bcachefs/opts.c
+++ b/fs/bcachefs/opts.c
@@ -230,6 +230,8 @@ const struct bch_option bch2_opt_table[] = {
#define OPT_STR_NOLIMIT(_choices) .type = BCH_OPT_STR, \
.min = 0, .max = U64_MAX, \
.choices = _choices
+#define OPT_BITFIELD(_choices) .type = BCH_OPT_BITFIELD, \
+ .choices = _choices
#define OPT_FN(_fn) .type = BCH_OPT_FN, .fn = _fn
#define x(_name, _bits, _flags, _type, _sb_opt, _default, _hint, _help) \
@@ -376,6 +378,13 @@ int bch2_opt_parse(struct bch_fs *c,
*res = ret;
break;
+ case BCH_OPT_BITFIELD: {
+ s64 v = bch2_read_flag_list(val, opt->choices);
+ if (v < 0)
+ return v;
+ *res = v;
+ break;
+ }
case BCH_OPT_FN:
ret = opt->fn.parse(c, val, res, err);
@@ -423,6 +432,9 @@ void bch2_opt_to_text(struct printbuf *out,
else
prt_str(out, opt->choices[v]);
break;
+ case BCH_OPT_BITFIELD:
+ prt_bitflags(out, opt->choices, v);
+ break;
case BCH_OPT_FN:
opt->fn.to_text(out, c, sb, v);
break;
@@ -431,6 +443,32 @@ void bch2_opt_to_text(struct printbuf *out,
}
}
+void bch2_opts_to_text(struct printbuf *out,
+ struct bch_opts opts,
+ struct bch_fs *c, struct bch_sb *sb,
+ unsigned show_mask, unsigned hide_mask,
+ unsigned flags)
+{
+ bool first = true;
+
+ for (enum bch_opt_id i = 0; i < bch2_opts_nr; i++) {
+ const struct bch_option *opt = &bch2_opt_table[i];
+
+ if ((opt->flags & hide_mask) || !(opt->flags & show_mask))
+ continue;
+
+ u64 v = bch2_opt_get_by_id(&opts, i);
+ if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
+ continue;
+
+ if (!first)
+ prt_char(out, ',');
+ first = false;
+
+ bch2_opt_to_text(out, c, sb, opt, v, flags);
+ }
+}
+
int bch2_opt_check_may_set(struct bch_fs *c, int id, u64 v)
{
int ret = 0;
@@ -608,10 +646,20 @@ int bch2_opts_from_sb(struct bch_opts *opts, struct bch_sb *sb)
return 0;
}
-void __bch2_opt_set_sb(struct bch_sb *sb, const struct bch_option *opt, u64 v)
+struct bch_dev_sb_opt_set {
+ void (*set_sb)(struct bch_member *, u64);
+};
+
+static const struct bch_dev_sb_opt_set bch2_dev_sb_opt_setters [] = {
+#define x(n, set) [Opt_##n] = { .set_sb = SET_##set },
+ BCH_DEV_OPT_SETTERS()
+#undef x
+};
+
+void __bch2_opt_set_sb(struct bch_sb *sb, int dev_idx,
+ const struct bch_option *opt, u64 v)
{
- if (opt->set_sb == SET_BCH2_NO_SB_OPT)
- return;
+ enum bch_opt_id id = opt - bch2_opt_table;
if (opt->flags & OPT_SB_FIELD_SECTORS)
v >>= 9;
@@ -619,16 +667,35 @@ void __bch2_opt_set_sb(struct bch_sb *sb, const struct bch_option *opt, u64 v)
if (opt->flags & OPT_SB_FIELD_ILOG2)
v = ilog2(v);
- opt->set_sb(sb, v);
+ if (opt->flags & OPT_SB_FIELD_ONE_BIAS)
+ v++;
+
+ if (opt->flags & OPT_FS) {
+ if (opt->set_sb != SET_BCH2_NO_SB_OPT)
+ opt->set_sb(sb, v);
+ }
+
+ if ((opt->flags & OPT_DEVICE) && dev_idx >= 0) {
+ if (WARN(!bch2_member_exists(sb, dev_idx),
+ "tried to set device option %s on nonexistent device %i",
+ opt->attr.name, dev_idx))
+ return;
+
+ struct bch_member *m = bch2_members_v2_get_mut(sb, dev_idx);
+
+ const struct bch_dev_sb_opt_set *set = bch2_dev_sb_opt_setters + id;
+ if (set->set_sb)
+ set->set_sb(m, v);
+ else
+ pr_err("option %s cannot be set via opt_set_sb()", opt->attr.name);
+ }
}
-void bch2_opt_set_sb(struct bch_fs *c, const struct bch_option *opt, u64 v)
+void bch2_opt_set_sb(struct bch_fs *c, struct bch_dev *ca,
+ const struct bch_option *opt, u64 v)
{
- if (opt->set_sb == SET_BCH2_NO_SB_OPT)
- return;
-
mutex_lock(&c->sb_lock);
- __bch2_opt_set_sb(c->disk_sb.sb, opt, v);
+ __bch2_opt_set_sb(c->disk_sb.sb, ca ? ca->dev_idx : -1, opt, v);
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
}
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index cda1725702ea..cb2e244a2429 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -53,23 +53,25 @@ void SET_BCH2_NO_SB_OPT(struct bch_sb *, u64);
/* When can be set: */
enum opt_flags {
- OPT_FS = (1 << 0), /* Filesystem option */
- OPT_DEVICE = (1 << 1), /* Device option */
- OPT_INODE = (1 << 2), /* Inode option */
- OPT_FORMAT = (1 << 3), /* May be specified at format time */
- OPT_MOUNT = (1 << 4), /* May be specified at mount time */
- OPT_RUNTIME = (1 << 5), /* May be specified at runtime */
- OPT_HUMAN_READABLE = (1 << 6),
- OPT_MUST_BE_POW_2 = (1 << 7), /* Must be power of 2 */
- OPT_SB_FIELD_SECTORS = (1 << 8),/* Superblock field is >> 9 of actual value */
- OPT_SB_FIELD_ILOG2 = (1 << 9), /* Superblock field is ilog2 of actual value */
- OPT_HIDDEN = (1 << 10),
+ OPT_FS = BIT(0), /* Filesystem option */
+ OPT_DEVICE = BIT(1), /* Device option */
+ OPT_INODE = BIT(2), /* Inode option */
+ OPT_FORMAT = BIT(3), /* May be specified at format time */
+ OPT_MOUNT = BIT(4), /* May be specified at mount time */
+ OPT_RUNTIME = BIT(5), /* May be specified at runtime */
+ OPT_HUMAN_READABLE = BIT(6),
+ OPT_MUST_BE_POW_2 = BIT(7), /* Must be power of 2 */
+ OPT_SB_FIELD_SECTORS = BIT(8), /* Superblock field is >> 9 of actual value */
+ OPT_SB_FIELD_ILOG2 = BIT(9), /* Superblock field is ilog2 of actual value */
+ OPT_SB_FIELD_ONE_BIAS = BIT(10), /* 0 means default value */
+ OPT_HIDDEN = BIT(11),
};
enum opt_type {
BCH_OPT_BOOL,
BCH_OPT_UINT,
BCH_OPT_STR,
+ BCH_OPT_BITFIELD,
BCH_OPT_FN,
};
@@ -263,6 +265,11 @@ enum fsck_err_opts {
OPT_BOOL(), \
BCH2_NO_SB_OPT, true, \
NULL, "Enable inline data extents") \
+ x(promote_whole_extents, u8, \
+ OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_BOOL(), \
+ BCH_SB_PROMOTE_WHOLE_EXTENTS, true, \
+ NULL, "Promote whole extents, instead of just part being read")\
x(acl, u8, \
OPT_FS|OPT_FORMAT|OPT_MOUNT, \
OPT_BOOL(), \
@@ -366,6 +373,16 @@ enum fsck_err_opts {
OPT_BOOL(), \
BCH2_NO_SB_OPT, false, \
NULL, "Exit recovery immediately prior to journal replay")\
+ x(recovery_passes, u64, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BITFIELD(bch2_recovery_passes), \
+ BCH2_NO_SB_OPT, 0, \
+ NULL, "Recovery passes to run explicitly") \
+ x(recovery_passes_exclude, u64, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BITFIELD(bch2_recovery_passes), \
+ BCH2_NO_SB_OPT, 0, \
+ NULL, "Recovery passes to exclude") \
x(recovery_pass_last, u8, \
OPT_FS|OPT_MOUNT, \
OPT_STR_NOLIMIT(bch2_recovery_passes), \
@@ -472,11 +489,16 @@ enum fsck_err_opts {
BCH2_NO_SB_OPT, 0, \
"size", "Size of filesystem on device") \
x(durability, u8, \
- OPT_DEVICE, \
+ OPT_DEVICE|OPT_SB_FIELD_ONE_BIAS, \
OPT_UINT(0, BCH_REPLICAS_MAX), \
BCH2_NO_SB_OPT, 1, \
"n", "Data written to this device will be considered\n"\
"to have already been replicated n times") \
+ x(data_allowed, u8, \
+ OPT_DEVICE, \
+ OPT_BITFIELD(__bch2_data_types), \
+ BCH2_NO_SB_OPT, BIT(BCH_DATA_journal)|BIT(BCH_DATA_btree)|BIT(BCH_DATA_user),\
+ "types", "Allowed data types for this device: journal, btree, and/or user")\
x(btree_node_prefetch, u8, \
OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
OPT_BOOL(), \
@@ -484,6 +506,11 @@ enum fsck_err_opts {
NULL, "BTREE_ITER_prefetch casuse btree nodes to be\n"\
" prefetched sequentially")
+#define BCH_DEV_OPT_SETTERS() \
+ x(discard, BCH_MEMBER_DISCARD) \
+ x(durability, BCH_MEMBER_DURABILITY) \
+ x(data_allowed, BCH_MEMBER_DATA_ALLOWED)
+
struct bch_opts {
#define x(_name, _bits, ...) unsigned _name##_defined:1;
BCH_OPTS()
@@ -563,8 +590,10 @@ void bch2_opt_set_by_id(struct bch_opts *, enum bch_opt_id, u64);
u64 bch2_opt_from_sb(struct bch_sb *, enum bch_opt_id);
int bch2_opts_from_sb(struct bch_opts *, struct bch_sb *);
-void __bch2_opt_set_sb(struct bch_sb *, const struct bch_option *, u64);
-void bch2_opt_set_sb(struct bch_fs *, const struct bch_option *, u64);
+void __bch2_opt_set_sb(struct bch_sb *, int, const struct bch_option *, u64);
+
+struct bch_dev;
+void bch2_opt_set_sb(struct bch_fs *, struct bch_dev *, const struct bch_option *, u64);
int bch2_opt_lookup(const char *);
int bch2_opt_validate(const struct bch_option *, u64, struct printbuf *);
@@ -576,6 +605,10 @@ int bch2_opt_parse(struct bch_fs *, const struct bch_option *,
void bch2_opt_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *,
const struct bch_option *, u64, unsigned);
+void bch2_opts_to_text(struct printbuf *,
+ struct bch_opts,
+ struct bch_fs *, struct bch_sb *,
+ unsigned, unsigned, unsigned);
int bch2_opt_check_may_set(struct bch_fs *, int, u64);
int bch2_opts_check_may_set(struct bch_fs *);
diff --git a/fs/bcachefs/rcu_pending.c b/fs/bcachefs/rcu_pending.c
new file mode 100644
index 000000000000..40a20192eee8
--- /dev/null
+++ b/fs/bcachefs/rcu_pending.c
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "%s() " fmt "\n", __func__
+
+#include <linux/generic-radix-tree.h>
+#include <linux/mm.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/srcu.h>
+#include <linux/vmalloc.h>
+
+#include "rcu_pending.h"
+#include "darray.h"
+#include "util.h"
+
+#define static_array_for_each(_a, _i) \
+ for (typeof(&(_a)[0]) _i = _a; \
+ _i < (_a) + ARRAY_SIZE(_a); \
+ _i++)
+
+enum rcu_pending_special {
+ RCU_PENDING_KVFREE = 1,
+ RCU_PENDING_CALL_RCU = 2,
+};
+
+#define RCU_PENDING_KVFREE_FN ((rcu_pending_process_fn) (ulong) RCU_PENDING_KVFREE)
+#define RCU_PENDING_CALL_RCU_FN ((rcu_pending_process_fn) (ulong) RCU_PENDING_CALL_RCU)
+
+static inline unsigned long __get_state_synchronize_rcu(struct srcu_struct *ssp)
+{
+ return ssp
+ ? get_state_synchronize_srcu(ssp)
+ : get_state_synchronize_rcu();
+}
+
+static inline unsigned long __start_poll_synchronize_rcu(struct srcu_struct *ssp)
+{
+ return ssp
+ ? start_poll_synchronize_srcu(ssp)
+ : start_poll_synchronize_rcu();
+}
+
+static inline bool __poll_state_synchronize_rcu(struct srcu_struct *ssp, unsigned long cookie)
+{
+ return ssp
+ ? poll_state_synchronize_srcu(ssp, cookie)
+ : poll_state_synchronize_rcu(cookie);
+}
+
+static inline void __rcu_barrier(struct srcu_struct *ssp)
+{
+ return ssp
+ ? srcu_barrier(ssp)
+ : rcu_barrier();
+}
+
+static inline void __call_rcu(struct srcu_struct *ssp, struct rcu_head *rhp,
+ rcu_callback_t func)
+{
+ if (ssp)
+ call_srcu(ssp, rhp, func);
+ else
+ call_rcu(rhp, func);
+}
+
+struct rcu_pending_seq {
+ /*
+ * We're using a radix tree like a vector - we're just pushing elements
+ * onto the end; we're using a radix tree instead of an actual vector to
+ * avoid reallocation overhead
+ */
+ GENRADIX(struct rcu_head *) objs;
+ size_t nr;
+ struct rcu_head **cursor;
+ unsigned long seq;
+};
+
+struct rcu_pending_list {
+ struct rcu_head *head;
+ struct rcu_head *tail;
+ unsigned long seq;
+};
+
+struct rcu_pending_pcpu {
+ struct rcu_pending *parent;
+ spinlock_t lock;
+ int cpu;
+
+ /*
+ * We can't bound the number of unprocessed gp sequence numbers, and we
+ * can't efficiently merge radix trees for expired grace periods, so we
+ * need darray/vector:
+ */
+ DARRAY_PREALLOCATED(struct rcu_pending_seq, 4) objs;
+
+ /* Third entry is for expired objects: */
+ struct rcu_pending_list lists[NUM_ACTIVE_RCU_POLL_OLDSTATE + 1];
+
+ struct rcu_head cb;
+ bool cb_armed;
+ struct work_struct work;
+};
+
+static bool __rcu_pending_has_pending(struct rcu_pending_pcpu *p)
+{
+ if (p->objs.nr)
+ return true;
+
+ static_array_for_each(p->lists, i)
+ if (i->head)
+ return true;
+
+ return false;
+}
+
+static void rcu_pending_list_merge(struct rcu_pending_list *l1,
+ struct rcu_pending_list *l2)
+{
+#ifdef __KERNEL__
+ if (!l1->head)
+ l1->head = l2->head;
+ else
+ l1->tail->next = l2->head;
+#else
+ if (!l1->head)
+ l1->head = l2->head;
+ else
+ l1->tail->next.next = (void *) l2->head;
+#endif
+
+ l1->tail = l2->tail;
+ l2->head = l2->tail = NULL;
+}
+
+static void rcu_pending_list_add(struct rcu_pending_list *l,
+ struct rcu_head *n)
+{
+#ifdef __KERNEL__
+ if (!l->head)
+ l->head = n;
+ else
+ l->tail->next = n;
+ l->tail = n;
+ n->next = NULL;
+#else
+ if (!l->head)
+ l->head = n;
+ else
+ l->tail->next.next = (void *) n;
+ l->tail = n;
+ n->next.next = NULL;
+#endif
+}
+
+static void merge_expired_lists(struct rcu_pending_pcpu *p)
+{
+ struct rcu_pending_list *expired = &p->lists[NUM_ACTIVE_RCU_POLL_OLDSTATE];
+
+ for (struct rcu_pending_list *i = p->lists; i < expired; i++)
+ if (i->head && __poll_state_synchronize_rcu(p->parent->srcu, i->seq))
+ rcu_pending_list_merge(expired, i);
+}
+
+#ifndef __KERNEL__
+static inline void kfree_bulk(size_t nr, void ** p)
+{
+ while (nr--)
+ kfree(*p);
+}
+
+#define local_irq_save(flags) \
+do { \
+ flags = 0; \
+} while (0)
+#endif
+
+static noinline void __process_finished_items(struct rcu_pending *pending,
+ struct rcu_pending_pcpu *p,
+ unsigned long flags)
+{
+ struct rcu_pending_list *expired = &p->lists[NUM_ACTIVE_RCU_POLL_OLDSTATE];
+ struct rcu_pending_seq objs = {};
+ struct rcu_head *list = NULL;
+
+ if (p->objs.nr &&
+ __poll_state_synchronize_rcu(pending->srcu, p->objs.data[0].seq)) {
+ objs = p->objs.data[0];
+ darray_remove_item(&p->objs, p->objs.data);
+ }
+
+ merge_expired_lists(p);
+
+ list = expired->head;
+ expired->head = expired->tail = NULL;
+
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ switch ((ulong) pending->process) {
+ case RCU_PENDING_KVFREE:
+ for (size_t i = 0; i < objs.nr; ) {
+ size_t nr_this_node = min(GENRADIX_NODE_SIZE / sizeof(void *), objs.nr - i);
+
+ kfree_bulk(nr_this_node, (void **) genradix_ptr(&objs.objs, i));
+ i += nr_this_node;
+ }
+ genradix_free(&objs.objs);
+
+ while (list) {
+ struct rcu_head *obj = list;
+#ifdef __KERNEL__
+ list = obj->next;
+#else
+ list = (void *) obj->next.next;
+#endif
+
+ /*
+ * low bit of pointer indicates whether rcu_head needs
+ * to be freed - kvfree_rcu_mightsleep()
+ */
+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN == 0);
+
+ void *ptr = (void *)(((unsigned long) obj->func) & ~1UL);
+ bool free_head = ((unsigned long) obj->func) & 1UL;
+
+ kvfree(ptr);
+ if (free_head)
+ kfree(obj);
+ }
+
+ break;
+
+ case RCU_PENDING_CALL_RCU:
+ for (size_t i = 0; i < objs.nr; i++) {
+ struct rcu_head *obj = *genradix_ptr(&objs.objs, i);
+ obj->func(obj);
+ }
+ genradix_free(&objs.objs);
+
+ while (list) {
+ struct rcu_head *obj = list;
+#ifdef __KERNEL__
+ list = obj->next;
+#else
+ list = (void *) obj->next.next;
+#endif
+ obj->func(obj);
+ }
+ break;
+
+ default:
+ for (size_t i = 0; i < objs.nr; i++)
+ pending->process(pending, *genradix_ptr(&objs.objs, i));
+ genradix_free(&objs.objs);
+
+ while (list) {
+ struct rcu_head *obj = list;
+#ifdef __KERNEL__
+ list = obj->next;
+#else
+ list = (void *) obj->next.next;
+#endif
+ pending->process(pending, obj);
+ }
+ break;
+ }
+}
+
+static bool process_finished_items(struct rcu_pending *pending,
+ struct rcu_pending_pcpu *p,
+ unsigned long flags)
+{
+ /*
+ * XXX: we should grab the gp seq once and avoid multiple function
+ * calls, this is called from __rcu_pending_enqueue() fastpath in
+ * may_sleep==true mode
+ */
+ if ((p->objs.nr && __poll_state_synchronize_rcu(pending->srcu, p->objs.data[0].seq)) ||
+ (p->lists[0].head && __poll_state_synchronize_rcu(pending->srcu, p->lists[0].seq)) ||
+ (p->lists[1].head && __poll_state_synchronize_rcu(pending->srcu, p->lists[1].seq)) ||
+ p->lists[2].head) {
+ __process_finished_items(pending, p, flags);
+ return true;
+ }
+
+ return false;
+}
+
+static void rcu_pending_work(struct work_struct *work)
+{
+ struct rcu_pending_pcpu *p =
+ container_of(work, struct rcu_pending_pcpu, work);
+ struct rcu_pending *pending = p->parent;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&p->lock, flags);
+ } while (process_finished_items(pending, p, flags));
+
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void rcu_pending_rcu_cb(struct rcu_head *rcu)
+{
+ struct rcu_pending_pcpu *p = container_of(rcu, struct rcu_pending_pcpu, cb);
+
+ schedule_work_on(p->cpu, &p->work);
+
+ unsigned long flags;
+ spin_lock_irqsave(&p->lock, flags);
+ if (__rcu_pending_has_pending(p)) {
+ spin_unlock_irqrestore(&p->lock, flags);
+ __call_rcu(p->parent->srcu, &p->cb, rcu_pending_rcu_cb);
+ } else {
+ p->cb_armed = false;
+ spin_unlock_irqrestore(&p->lock, flags);
+ }
+}
+
+static __always_inline struct rcu_pending_seq *
+get_object_radix(struct rcu_pending_pcpu *p, unsigned long seq)
+{
+ darray_for_each_reverse(p->objs, objs)
+ if (objs->seq == seq)
+ return objs;
+
+ if (darray_push_gfp(&p->objs, ((struct rcu_pending_seq) { .seq = seq }), GFP_ATOMIC))
+ return NULL;
+
+ return &darray_last(p->objs);
+}
+
+static noinline bool
+rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, unsigned long seq,
+ struct rcu_head *head, void *ptr,
+ unsigned long *flags)
+{
+ if (ptr) {
+ if (!head) {
+ /*
+ * kvfree_rcu_mightsleep(): we weren't passed an
+ * rcu_head, but we need one: use the low bit of the
+ * ponter to free to flag that the head needs to be
+ * freed as well:
+ */
+ ptr = (void *)(((unsigned long) ptr)|1UL);
+ head = kmalloc(sizeof(*head), __GFP_NOWARN);
+ if (!head) {
+ spin_unlock_irqrestore(&p->lock, *flags);
+ head = kmalloc(sizeof(*head), GFP_KERNEL|__GFP_NOFAIL);
+ /*
+ * dropped lock, did GFP_KERNEL allocation,
+ * check for gp expiration
+ */
+ if (unlikely(__poll_state_synchronize_rcu(p->parent->srcu, seq))) {
+ kvfree(--ptr);
+ kfree(head);
+ spin_lock_irqsave(&p->lock, *flags);
+ return false;
+ }
+ }
+ }
+
+ head->func = ptr;
+ }
+again:
+ for (struct rcu_pending_list *i = p->lists;
+ i < p->lists + NUM_ACTIVE_RCU_POLL_OLDSTATE; i++) {
+ if (i->seq == seq) {
+ rcu_pending_list_add(i, head);
+ return false;
+ }
+ }
+
+ for (struct rcu_pending_list *i = p->lists;
+ i < p->lists + NUM_ACTIVE_RCU_POLL_OLDSTATE; i++) {
+ if (!i->head) {
+ i->seq = seq;
+ rcu_pending_list_add(i, head);
+ return true;
+ }
+ }
+
+ merge_expired_lists(p);
+ goto again;
+}
+
+/*
+ * __rcu_pending_enqueue: enqueue a pending RCU item, to be processed (via
+ * pending->pracess) once grace period elapses.
+ *
+ * Attempt to enqueue items onto a radix tree; if memory allocation fails, fall
+ * back to a linked list.
+ *
+ * - If @ptr is NULL, we're enqueuing an item for a generic @pending with a
+ * process callback
+ *
+ * - If @ptr and @head are both not NULL, we're kvfree_rcu()
+ *
+ * - If @ptr is not NULL and @head is, we're kvfree_rcu_mightsleep()
+ *
+ * - If @may_sleep is true, will do GFP_KERNEL memory allocations and process
+ * expired items.
+ */
+static __always_inline void
+__rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *head,
+ void *ptr, bool may_sleep)
+{
+
+ struct rcu_pending_pcpu *p;
+ struct rcu_pending_seq *objs;
+ struct genradix_node *new_node = NULL;
+ unsigned long seq, flags;
+ bool start_gp = false;
+
+ BUG_ON((ptr != NULL) != (pending->process == RCU_PENDING_KVFREE_FN));
+
+ local_irq_save(flags);
+ p = this_cpu_ptr(pending->p);
+ spin_lock(&p->lock);
+ seq = __get_state_synchronize_rcu(pending->srcu);
+restart:
+ if (may_sleep &&
+ unlikely(process_finished_items(pending, p, flags)))
+ goto check_expired;
+
+ /*
+ * In kvfree_rcu() mode, the radix tree is only for slab pointers so
+ * that we can do kfree_bulk() - vmalloc pointers always use the linked
+ * list:
+ */
+ if (ptr && unlikely(is_vmalloc_addr(ptr)))
+ goto list_add;
+
+ objs = get_object_radix(p, seq);
+ if (unlikely(!objs))
+ goto list_add;
+
+ if (unlikely(!objs->cursor)) {
+ /*
+ * New radix tree nodes must be added under @p->lock because the
+ * tree root is in a darray that can be resized (typically,
+ * genradix supports concurrent unlocked allocation of new
+ * nodes) - hence preallocation and the retry loop:
+ */
+ objs->cursor = genradix_ptr_alloc_preallocated_inlined(&objs->objs,
+ objs->nr, &new_node, GFP_ATOMIC|__GFP_NOWARN);
+ if (unlikely(!objs->cursor)) {
+ if (may_sleep) {
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ gfp_t gfp = GFP_KERNEL;
+ if (!head)
+ gfp |= __GFP_NOFAIL;
+
+ new_node = genradix_alloc_node(gfp);
+ if (!new_node)
+ may_sleep = false;
+ goto check_expired;
+ }
+list_add:
+ start_gp = rcu_pending_enqueue_list(p, seq, head, ptr, &flags);
+ goto start_gp;
+ }
+ }
+
+ *objs->cursor++ = ptr ?: head;
+ /* zero cursor if we hit the end of a radix tree node: */
+ if (!(((ulong) objs->cursor) & (GENRADIX_NODE_SIZE - 1)))
+ objs->cursor = NULL;
+ start_gp = !objs->nr;
+ objs->nr++;
+start_gp:
+ if (unlikely(start_gp)) {
+ /*
+ * We only have one callback (ideally, we would have one for
+ * every outstanding graceperiod) - so if our callback is
+ * already in flight, we may still have to start a grace period
+ * (since we used get_state() above, not start_poll())
+ */
+ if (!p->cb_armed) {
+ p->cb_armed = true;
+ __call_rcu(pending->srcu, &p->cb, rcu_pending_rcu_cb);
+ } else {
+ __start_poll_synchronize_rcu(pending->srcu);
+ }
+ }
+ spin_unlock_irqrestore(&p->lock, flags);
+free_node:
+ if (new_node)
+ genradix_free_node(new_node);
+ return;
+check_expired:
+ if (unlikely(__poll_state_synchronize_rcu(pending->srcu, seq))) {
+ switch ((ulong) pending->process) {
+ case RCU_PENDING_KVFREE:
+ kvfree(ptr);
+ break;
+ case RCU_PENDING_CALL_RCU:
+ head->func(head);
+ break;
+ default:
+ pending->process(pending, head);
+ break;
+ }
+ goto free_node;
+ }
+
+ local_irq_save(flags);
+ p = this_cpu_ptr(pending->p);
+ spin_lock(&p->lock);
+ goto restart;
+}
+
+void rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *obj)
+{
+ __rcu_pending_enqueue(pending, obj, NULL, true);
+}
+
+static struct rcu_head *rcu_pending_pcpu_dequeue(struct rcu_pending_pcpu *p)
+{
+ struct rcu_head *ret = NULL;
+
+ spin_lock_irq(&p->lock);
+ darray_for_each(p->objs, objs)
+ if (objs->nr) {
+ ret = *genradix_ptr(&objs->objs, --objs->nr);
+ objs->cursor = NULL;
+ if (!objs->nr)
+ genradix_free(&objs->objs);
+ goto out;
+ }
+
+ static_array_for_each(p->lists, i)
+ if (i->head) {
+ ret = i->head;
+#ifdef __KERNEL__
+ i->head = ret->next;
+#else
+ i->head = (void *) ret->next.next;
+#endif
+ if (!i->head)
+ i->tail = NULL;
+ goto out;
+ }
+out:
+ spin_unlock_irq(&p->lock);
+
+ return ret;
+}
+
+struct rcu_head *rcu_pending_dequeue(struct rcu_pending *pending)
+{
+ return rcu_pending_pcpu_dequeue(raw_cpu_ptr(pending->p));
+}
+
+struct rcu_head *rcu_pending_dequeue_from_all(struct rcu_pending *pending)
+{
+ struct rcu_head *ret = rcu_pending_dequeue(pending);
+
+ if (ret)
+ return ret;
+
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ ret = rcu_pending_pcpu_dequeue(per_cpu_ptr(pending->p, cpu));
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static bool rcu_pending_has_pending_or_armed(struct rcu_pending *pending)
+{
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
+ spin_lock_irq(&p->lock);
+ if (__rcu_pending_has_pending(p) || p->cb_armed) {
+ spin_unlock_irq(&p->lock);
+ return true;
+ }
+ spin_unlock_irq(&p->lock);
+ }
+
+ return false;
+}
+
+void rcu_pending_exit(struct rcu_pending *pending)
+{
+ int cpu;
+
+ if (!pending->p)
+ return;
+
+ while (rcu_pending_has_pending_or_armed(pending)) {
+ __rcu_barrier(pending->srcu);
+
+ for_each_possible_cpu(cpu) {
+ struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
+ flush_work(&p->work);
+ }
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
+ flush_work(&p->work);
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
+
+ static_array_for_each(p->lists, i)
+ WARN_ON(i->head);
+ WARN_ON(p->objs.nr);
+ darray_exit(&p->objs);
+ }
+ free_percpu(pending->p);
+}
+
+/**
+ * rcu_pending_init: - initialize a rcu_pending
+ *
+ * @pending: Object to init
+ * @srcu: May optionally be used with an srcu_struct; if NULL, uses normal
+ * RCU flavor
+ * @process: Callback function invoked on objects once their RCU barriers
+ * have completed; if NULL, kvfree() is used.
+ */
+int rcu_pending_init(struct rcu_pending *pending,
+ struct srcu_struct *srcu,
+ rcu_pending_process_fn process)
+{
+ pending->p = alloc_percpu(struct rcu_pending_pcpu);
+ if (!pending->p)
+ return -ENOMEM;
+
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
+ p->parent = pending;
+ p->cpu = cpu;
+ spin_lock_init(&p->lock);
+ darray_init(&p->objs);
+ INIT_WORK(&p->work, rcu_pending_work);
+ }
+
+ pending->srcu = srcu;
+ pending->process = process;
+
+ return 0;
+}
diff --git a/fs/bcachefs/rcu_pending.h b/fs/bcachefs/rcu_pending.h
new file mode 100644
index 000000000000..71a2f4ddaade
--- /dev/null
+++ b/fs/bcachefs/rcu_pending.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RCU_PENDING_H
+#define _LINUX_RCU_PENDING_H
+
+#include <linux/rcupdate.h>
+
+struct rcu_pending;
+typedef void (*rcu_pending_process_fn)(struct rcu_pending *, struct rcu_head *);
+
+struct rcu_pending_pcpu;
+
+struct rcu_pending {
+ struct rcu_pending_pcpu __percpu *p;
+ struct srcu_struct *srcu;
+ rcu_pending_process_fn process;
+};
+
+void rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *obj);
+struct rcu_head *rcu_pending_dequeue(struct rcu_pending *pending);
+struct rcu_head *rcu_pending_dequeue_from_all(struct rcu_pending *pending);
+
+void rcu_pending_exit(struct rcu_pending *pending);
+int rcu_pending_init(struct rcu_pending *pending,
+ struct srcu_struct *srcu,
+ rcu_pending_process_fn process);
+
+#endif /* _LINUX_RCU_PENDING_H */
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
index cf81e5128c3a..2d299a37cf07 100644
--- a/fs/bcachefs/rebalance.c
+++ b/fs/bcachefs/rebalance.c
@@ -13,6 +13,7 @@
#include "errcode.h"
#include "error.h"
#include "inode.h"
+#include "io_write.h"
#include "move.h"
#include "rebalance.h"
#include "subvolume.h"
@@ -156,6 +157,7 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
data_opts->rewrite_ptrs =
bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression);
data_opts->target = r->target;
+ data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
if (!data_opts->rewrite_ptrs) {
/*
@@ -263,6 +265,7 @@ static bool rebalance_pred(struct bch_fs *c, void *arg,
data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, k, target, compression);
data_opts->target = target;
+ data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
return data_opts->rewrite_ptrs != 0;
}
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 36de1c6fe8c3..be1e7ca4362f 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -97,7 +97,7 @@ static void bch2_reconstruct_alloc(struct bch_fs *c)
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
+ c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
bch2_shoot_down_journal_keys(c, BTREE_ID_alloc,
@@ -525,17 +525,17 @@ static int read_btree_roots(struct bch_fs *c)
"error reading btree root %s l=%u: %s",
bch2_btree_id_str(i), r->level, bch2_err_str(ret))) {
if (btree_id_is_alloc(i)) {
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_allocations);
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_info);
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_lrus);
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers);
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_allocations);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_info);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_lrus);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs);
c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
r->error = 0;
- } else if (!(c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes))) {
+ } else if (!(c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes))) {
bch_info(c, "will run btree node scan");
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes);
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
}
ret = 0;
@@ -706,14 +706,14 @@ int bch2_fs_recovery(struct bch_fs *c)
if (check_version_upgrade(c))
write_sb = true;
- c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
+ c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
if (write_sb)
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
if (c->opts.fsck)
set_bit(BCH_FS_fsck_running, &c->flags);
diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c
index 73339a0a3111..735b8adc8f9d 100644
--- a/fs/bcachefs/recovery_passes.c
+++ b/fs/bcachefs/recovery_passes.c
@@ -40,7 +40,7 @@ static int bch2_set_may_go_rw(struct bch_fs *c)
set_bit(BCH_FS_may_go_rw, &c->flags);
- if (keys->nr || c->opts.fsck || !c->sb.clean || c->recovery_passes_explicit)
+ if (keys->nr || c->opts.fsck || !c->sb.clean || c->opts.recovery_passes)
return bch2_fs_read_write_early(c);
return 0;
}
@@ -97,14 +97,14 @@ u64 bch2_recovery_passes_from_stable(u64 v)
int bch2_run_explicit_recovery_pass(struct bch_fs *c,
enum bch_recovery_pass pass)
{
- if (c->recovery_passes_explicit & BIT_ULL(pass))
+ if (c->opts.recovery_passes & BIT_ULL(pass))
return 0;
bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
bch2_recovery_passes[pass], pass,
bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
- c->recovery_passes_explicit |= BIT_ULL(pass);
+ c->opts.recovery_passes |= BIT_ULL(pass);
if (c->curr_recovery_pass >= pass) {
c->curr_recovery_pass = pass;
@@ -161,7 +161,9 @@ static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pa
{
struct recovery_pass_fn *p = recovery_pass_fns + pass;
- if (c->recovery_passes_explicit & BIT_ULL(pass))
+ if (c->opts.recovery_passes_exclude & BIT_ULL(pass))
+ return false;
+ if (c->opts.recovery_passes & BIT_ULL(pass))
return true;
if ((p->when & PASS_FSCK) && c->opts.fsck)
return true;
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index 12d4de65ae17..998c0bd06802 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -123,7 +123,7 @@ static void extent_to_replicas(struct bkey_s_c k,
continue;
if (!p.has_ec)
- r->devs[r->nr_devs++] = p.ptr.dev;
+ replicas_entry_add_dev(r, p.ptr.dev);
else
r->nr_required = 0;
}
@@ -140,7 +140,7 @@ static void stripe_to_replicas(struct bkey_s_c k,
for (ptr = s.v->ptrs;
ptr < s.v->ptrs + s.v->nr_blocks;
ptr++)
- r->devs[r->nr_devs++] = ptr->dev;
+ replicas_entry_add_dev(r, ptr->dev);
}
void bch2_bkey_to_replicas(struct bch_replicas_entry_v1 *e,
@@ -181,7 +181,7 @@ void bch2_devlist_to_replicas(struct bch_replicas_entry_v1 *e,
e->nr_required = 1;
darray_for_each(devs, i)
- e->devs[e->nr_devs++] = *i;
+ replicas_entry_add_dev(e, *i);
bch2_replicas_entry_sort(e);
}
@@ -795,12 +795,12 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
for (unsigned i = 0; i < e->nr_devs; i++) {
nr_online += test_bit(e->devs[i], devs.d);
- struct bch_dev *ca = bch2_dev_rcu(c, e->devs[i]);
- nr_failed += ca && ca->mi.state == BCH_MEMBER_STATE_failed;
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, e->devs[i]);
+ nr_failed += !ca || ca->mi.state == BCH_MEMBER_STATE_failed;
}
rcu_read_unlock();
- if (nr_failed == e->nr_devs)
+ if (nr_online + nr_failed == e->nr_devs)
continue;
if (nr_online < e->nr_required)
diff --git a/fs/bcachefs/replicas_format.h b/fs/bcachefs/replicas_format.h
index b97208195d06..b7eff904acdb 100644
--- a/fs/bcachefs/replicas_format.h
+++ b/fs/bcachefs/replicas_format.h
@@ -5,7 +5,7 @@
struct bch_replicas_entry_v0 {
__u8 data_type;
__u8 nr_devs;
- __u8 devs[];
+ __u8 devs[] __counted_by(nr_devs);
} __packed;
struct bch_sb_field_replicas_v0 {
@@ -17,7 +17,7 @@ struct bch_replicas_entry_v1 {
__u8 data_type;
__u8 nr_devs;
__u8 nr_required;
- __u8 devs[];
+ __u8 devs[] __counted_by(nr_devs);
} __packed;
struct bch_sb_field_replicas {
@@ -28,4 +28,9 @@ struct bch_sb_field_replicas {
#define replicas_entry_bytes(_i) \
(offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
+#define replicas_entry_add_dev(e, d) ({ \
+ (e)->nr_devs++; \
+ (e)->devs[(e)->nr_devs - 1] = (d); \
+})
+
#endif /* _BCACHEFS_REPLICAS_FORMAT_H */
diff --git a/fs/bcachefs/sb-clean.c b/fs/bcachefs/sb-clean.c
index c57d42bb8d1b..025848a9c4c0 100644
--- a/fs/bcachefs/sb-clean.c
+++ b/fs/bcachefs/sb-clean.c
@@ -155,7 +155,7 @@ struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c)
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
c->sb.clean = false;
mutex_unlock(&c->sb_lock);
- return NULL;
+ return ERR_PTR(-BCH_ERR_invalid_sb_clean);
}
clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c
index 4b765422dd77..02bcde3c1b02 100644
--- a/fs/bcachefs/sb-members.c
+++ b/fs/bcachefs/sb-members.c
@@ -465,3 +465,60 @@ void bch2_dev_btree_bitmap_mark(struct bch_fs *c, struct bkey_s_c k)
__bch2_dev_btree_bitmap_mark(mi, ptr->dev, ptr->offset, btree_sectors(c));
}
}
+
+unsigned bch2_sb_nr_devices(const struct bch_sb *sb)
+{
+ unsigned nr = 0;
+
+ for (unsigned i = 0; i < sb->nr_devices; i++)
+ nr += bch2_member_exists((struct bch_sb *) sb, i);
+ return nr;
+}
+
+int bch2_sb_member_alloc(struct bch_fs *c)
+{
+ unsigned dev_idx = c->sb.nr_devices;
+ struct bch_sb_field_members_v2 *mi;
+ unsigned nr_devices;
+ unsigned u64s;
+ int best = -1;
+ u64 best_last_mount = 0;
+
+ if (dev_idx < BCH_SB_MEMBERS_MAX)
+ goto have_slot;
+
+ for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++) {
+ /* eventually BCH_SB_MEMBERS_MAX will be raised */
+ if (dev_idx == BCH_SB_MEMBER_INVALID)
+ continue;
+
+ struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, dev_idx);
+ if (bch2_member_alive(&m))
+ continue;
+
+ u64 last_mount = le64_to_cpu(m.last_mount);
+ if (best < 0 || last_mount < best_last_mount) {
+ best = dev_idx;
+ best_last_mount = last_mount;
+ }
+ }
+ if (best >= 0) {
+ dev_idx = best;
+ goto have_slot;
+ }
+
+ return -BCH_ERR_ENOSPC_sb_members;
+have_slot:
+ nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
+
+ mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
+ u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) +
+ le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64));
+
+ mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s);
+ if (!mi)
+ return -BCH_ERR_ENOSPC_sb_members;
+
+ c->disk_sb.sb->nr_devices = nr_devices;
+ return dev_idx;
+}
diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h
index dd93192ec065..762083b564ee 100644
--- a/fs/bcachefs/sb-members.h
+++ b/fs/bcachefs/sb-members.h
@@ -198,29 +198,37 @@ static inline struct bch_dev *bch2_dev_locked(struct bch_fs *c, unsigned dev)
lockdep_is_held(&c->state_lock));
}
-static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *c, unsigned dev)
+static inline struct bch_dev *bch2_dev_rcu_noerror(struct bch_fs *c, unsigned dev)
{
return c && dev < c->sb.nr_devices
? rcu_dereference(c->devs[dev])
: NULL;
}
+void bch2_dev_missing(struct bch_fs *, unsigned);
+
+static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *c, unsigned dev)
+{
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev);
+ if (unlikely(!ca))
+ bch2_dev_missing(c, dev);
+ return ca;
+}
+
static inline struct bch_dev *bch2_dev_tryget_noerror(struct bch_fs *c, unsigned dev)
{
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, dev);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev);
if (ca)
bch2_dev_get(ca);
rcu_read_unlock();
return ca;
}
-void bch2_dev_missing(struct bch_fs *, unsigned);
-
static inline struct bch_dev *bch2_dev_tryget(struct bch_fs *c, unsigned dev)
{
struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev);
- if (!ca)
+ if (unlikely(!ca))
bch2_dev_missing(c, dev);
return ca;
}
@@ -307,6 +315,8 @@ static inline bool bch2_member_exists(struct bch_sb *sb, unsigned dev)
return false;
}
+unsigned bch2_sb_nr_devices(const struct bch_sb *);
+
static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
{
return (struct bch_member_cpu) {
@@ -352,4 +362,6 @@ static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64
bool bch2_dev_btree_bitmap_marked(struct bch_fs *, struct bkey_s_c);
void bch2_dev_btree_bitmap_mark(struct bch_fs *, struct bkey_s_c);
+int bch2_sb_member_alloc(struct bch_fs *);
+
#endif /* _BCACHEFS_SB_MEMBERS_H */
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index 3a494c5d1247..9cbd3c14c94f 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -335,7 +335,7 @@ static inline bool six_owner_running(struct six_lock *lock)
*/
rcu_read_lock();
struct task_struct *owner = READ_ONCE(lock->owner);
- bool ret = owner ? owner_on_cpu(owner) : !rt_task(current);
+ bool ret = owner ? owner_on_cpu(owner) : !rt_or_dl_task(current);
rcu_read_unlock();
return ret;
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index c8c266cb5797..215eed4cce6d 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -270,7 +270,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
desc.hash_bkey(info, bkey_i_to_s_c(insert)),
snapshot),
POS(insert->k.p.inode, U64_MAX),
- BTREE_ITER_slots|BTREE_ITER_intent, k, ret) {
+ BTREE_ITER_slots|BTREE_ITER_intent|flags, k, ret) {
if (is_visible_key(desc, inum, k)) {
if (!desc.cmp_bkey(k, bkey_i_to_s_c(insert)))
goto found;
diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h
index a8299ba2cab2..e62f876541fe 100644
--- a/fs/bcachefs/subvolume.h
+++ b/fs/bcachefs/subvolume.h
@@ -31,6 +31,51 @@ int bch2_subvolume_get_snapshot(struct btree_trans *, u32, u32 *);
int bch2_subvol_is_ro_trans(struct btree_trans *, u32);
int bch2_subvol_is_ro(struct bch_fs *, u32);
+static inline struct bkey_s_c
+bch2_btree_iter_peek_in_subvolume_upto_type(struct btree_iter *iter, struct bpos end,
+ u32 subvolid, unsigned flags)
+{
+ u32 snapshot;
+ int ret = bch2_subvolume_get_snapshot(iter->trans, subvolid, &snapshot);
+ if (ret)
+ return bkey_s_c_err(ret);
+
+ bch2_btree_iter_set_snapshot(iter, snapshot);
+ return bch2_btree_iter_peek_upto_type(iter, end, flags);
+}
+
+#define for_each_btree_key_in_subvolume_upto_continue(_trans, _iter, \
+ _end, _subvolid, _flags, _k, _do) \
+({ \
+ struct bkey_s_c _k; \
+ int _ret3 = 0; \
+ \
+ do { \
+ _ret3 = lockrestart_do(_trans, ({ \
+ (_k) = bch2_btree_iter_peek_in_subvolume_upto_type(&(_iter), \
+ _end, _subvolid, (_flags)); \
+ if (!(_k).k) \
+ break; \
+ \
+ bkey_err(_k) ?: (_do); \
+ })); \
+ } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
+ \
+ bch2_trans_iter_exit((_trans), &(_iter)); \
+ _ret3; \
+})
+
+#define for_each_btree_key_in_subvolume_upto(_trans, _iter, _btree_id, \
+ _start, _end, _subvolid, _flags, _k, _do) \
+({ \
+ struct btree_iter _iter; \
+ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ \
+ for_each_btree_key_in_subvolume_upto_continue(_trans, _iter, \
+ _end, _subvolid, _flags, _k, _do); \
+})
+
int bch2_delete_dead_snapshots(struct bch_fs *);
void bch2_delete_dead_snapshots_async(struct bch_fs *);
diff --git a/fs/bcachefs/subvolume_types.h b/fs/bcachefs/subvolume_types.h
index 9b10c8947828..f2ec4277c2a5 100644
--- a/fs/bcachefs/subvolume_types.h
+++ b/fs/bcachefs/subvolume_types.h
@@ -30,7 +30,8 @@ struct snapshot_table {
};
typedef struct {
- u32 subvol;
+ /* we can't have padding in this struct: */
+ u64 subvol;
u64 inum;
} subvol_inum;
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index c8c2ccbdfbb5..d86d5dae54c9 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -418,6 +418,9 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb,
if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_disk_accounting_v2 &&
!BCH_SB_ALLOCATOR_STUCK_TIMEOUT(sb))
SET_BCH_SB_ALLOCATOR_STUCK_TIMEOUT(sb, 30);
+
+ if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_disk_accounting_v2)
+ SET_BCH_SB_PROMOTE_WHOLE_EXTENTS(sb, true);
}
for (opt_id = 0; opt_id < bch2_opts_nr; opt_id++) {
@@ -1292,15 +1295,9 @@ void bch2_sb_layout_to_text(struct printbuf *out, struct bch_sb_layout *l)
void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
bool print_layout, unsigned fields)
{
- u64 fields_have = 0;
- unsigned nr_devices = 0;
-
if (!out->nr_tabstops)
printbuf_tabstop_push(out, 44);
- for (int i = 0; i < sb->nr_devices; i++)
- nr_devices += bch2_member_exists(sb, i);
-
prt_printf(out, "External UUID:\t");
pr_uuid(out, sb->user_uuid.b);
prt_newline(out);
@@ -1356,9 +1353,10 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
prt_newline(out);
prt_printf(out, "Clean:\t%llu\n", BCH_SB_CLEAN(sb));
- prt_printf(out, "Devices:\t%u\n", nr_devices);
+ prt_printf(out, "Devices:\t%u\n", bch2_sb_nr_devices(sb));
prt_printf(out, "Sections:\t");
+ u64 fields_have = 0;
vstruct_for_each(sb, f)
fields_have |= 1 << le32_to_cpu(f->type);
prt_bitflags(out, bch2_sb_fields, fields_have);
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index e7fa2de35014..873e4be7e1dc 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -370,7 +370,7 @@ void bch2_fs_read_only(struct bch_fs *c)
test_bit(BCH_FS_clean_shutdown, &c->flags) &&
c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay) {
BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
- BUG_ON(atomic_read(&c->btree_cache.dirty));
+ BUG_ON(atomic_long_read(&c->btree_cache.nr_dirty));
BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
BUG_ON(c->btree_write_buffer.inc.keys.nr);
BUG_ON(c->btree_write_buffer.flushing.keys.nr);
@@ -543,6 +543,7 @@ static void __bch2_fs_free(struct bch_fs *c)
bch2_fs_fs_io_direct_exit(c);
bch2_fs_fs_io_buffered_exit(c);
bch2_fs_fsio_exit(c);
+ bch2_fs_vfs_exit(c);
bch2_fs_ec_exit(c);
bch2_fs_encryption_exit(c);
bch2_fs_nocow_locking_exit(c);
@@ -810,7 +811,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
c->copy_gc_enabled = 1;
c->rebalance.enabled = 1;
- c->promote_whole_extents = true;
c->journal.flush_write_time = &c->times[BCH_TIME_journal_flush_write];
c->journal.noflush_write_time = &c->times[BCH_TIME_journal_noflush_write];
@@ -926,6 +926,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
bch2_fs_encryption_init(c) ?:
bch2_fs_compress_init(c) ?:
bch2_fs_ec_init(c) ?:
+ bch2_fs_vfs_init(c) ?:
bch2_fs_fsio_init(c) ?:
bch2_fs_fs_io_buffered_init(c) ?:
bch2_fs_fs_io_direct_init(c);
@@ -1591,33 +1592,6 @@ int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
/* Device add/removal: */
-static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
-{
- struct bpos start = POS(ca->dev_idx, 0);
- struct bpos end = POS(ca->dev_idx, U64_MAX);
- int ret;
-
- /*
- * We clear the LRU and need_discard btrees first so that we don't race
- * with bch2_do_invalidates() and bch2_do_discards()
- */
- ret = bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_dev_usage_remove(c, ca->dev_idx);
- bch_err_msg(c, ret, "removing dev alloc info");
- return ret;
-}
-
int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
{
struct bch_member *m;
@@ -1729,9 +1703,6 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
struct bch_opts opts = bch2_opts_empty();
struct bch_sb_handle sb;
struct bch_dev *ca = NULL;
- struct bch_sb_field_members_v2 *mi;
- struct bch_member dev_mi;
- unsigned dev_idx, nr_devices, u64s;
struct printbuf errbuf = PRINTBUF;
struct printbuf label = PRINTBUF;
int ret;
@@ -1741,7 +1712,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
if (ret)
goto err;
- dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
+ struct bch_member dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
if (BCH_MEMBER_GROUP(&dev_mi)) {
bch2_disk_path_to_text_sb(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
@@ -1779,55 +1750,19 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
goto err_unlock;
if (dynamic_fault("bcachefs:add:no_slot"))
- goto no_slot;
-
- if (c->sb.nr_devices < BCH_SB_MEMBERS_MAX) {
- dev_idx = c->sb.nr_devices;
- goto have_slot;
- }
-
- int best = -1;
- u64 best_last_mount = 0;
- for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++) {
- struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, dev_idx);
- if (bch2_member_alive(&m))
- continue;
-
- u64 last_mount = le64_to_cpu(m.last_mount);
- if (best < 0 || last_mount < best_last_mount) {
- best = dev_idx;
- best_last_mount = last_mount;
- }
- }
- if (best >= 0) {
- dev_idx = best;
- goto have_slot;
- }
-no_slot:
- ret = -BCH_ERR_ENOSPC_sb_members;
- bch_err_msg(c, ret, "setting up new superblock");
- goto err_unlock;
-
-have_slot:
- nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
-
- mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
- u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) +
- le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64));
+ goto err_unlock;
- mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s);
- if (!mi) {
- ret = -BCH_ERR_ENOSPC_sb_members;
+ ret = bch2_sb_member_alloc(c);
+ if (ret < 0) {
bch_err_msg(c, ret, "setting up new superblock");
goto err_unlock;
}
- struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
+ unsigned dev_idx = ret;
/* success: */
- *m = dev_mi;
- m->last_mount = cpu_to_le64(ktime_get_real_seconds());
- c->disk_sb.sb->nr_devices = nr_devices;
+ dev_mi.last_mount = cpu_to_le64(ktime_get_real_seconds());
+ *bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx) = dev_mi;
ca->disk_sb.sb->dev_idx = dev_idx;
bch2_dev_attach(c, ca, dev_idx);
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index f393023a3ae2..03e59f86f360 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -219,7 +219,6 @@ read_attribute(copy_gc_wait);
rw_attribute(rebalance_enabled);
sysfs_pd_controller_attribute(rebalance);
read_attribute(rebalance_status);
-rw_attribute(promote_whole_extents);
read_attribute(new_stripes);
@@ -234,7 +233,7 @@ write_attribute(perf_test);
#define x(_name) \
static struct attribute sysfs_time_stat_##_name = \
- { .name = #_name, .mode = 0444 };
+ { .name = #_name, .mode = 0644 };
BCH_TIME_STATS()
#undef x
@@ -245,14 +244,18 @@ static struct attribute sysfs_state_rw = {
static size_t bch2_btree_cache_size(struct bch_fs *c)
{
+ struct btree_cache *bc = &c->btree_cache;
size_t ret = 0;
struct btree *b;
- mutex_lock(&c->btree_cache.lock);
- list_for_each_entry(b, &c->btree_cache.live, list)
+ mutex_lock(&bc->lock);
+ list_for_each_entry(b, &bc->live[0].list, list)
ret += btree_buf_bytes(b);
-
- mutex_unlock(&c->btree_cache.lock);
+ list_for_each_entry(b, &bc->live[1].list, list)
+ ret += btree_buf_bytes(b);
+ list_for_each_entry(b, &bc->freeable, list)
+ ret += btree_buf_bytes(b);
+ mutex_unlock(&bc->lock);
return ret;
}
@@ -288,7 +291,7 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
prt_tab_rjust(out);
prt_human_readable_u64(out, nr_extents
- ? div_u64(sectors_uncompressed << 9, nr_extents)
+ ? div64_u64(sectors_uncompressed << 9, nr_extents)
: 0);
prt_tab_rjust(out);
prt_newline(out);
@@ -347,8 +350,6 @@ SHOW(bch2_fs)
if (attr == &sysfs_rebalance_status)
bch2_rebalance_status_to_text(out, c);
- sysfs_print(promote_whole_extents, c->promote_whole_extents);
-
/* Debugging: */
if (attr == &sysfs_journal_debug)
@@ -436,8 +437,6 @@ STORE(bch2_fs)
sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
- sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
-
/* Debugging: */
if (!test_bit(BCH_FS_started, &c->flags))
@@ -449,11 +448,12 @@ STORE(bch2_fs)
return -EROFS;
if (attr == &sysfs_trigger_btree_cache_shrink) {
+ struct btree_cache *bc = &c->btree_cache;
struct shrink_control sc;
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
- c->btree_cache.shrink->scan_objects(c->btree_cache.shrink, &sc);
+ bc->live[0].shrink->scan_objects(bc->live[0].shrink, &sc);
}
if (attr == &sysfs_trigger_btree_key_cache_shrink) {
@@ -461,7 +461,7 @@ STORE(bch2_fs)
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
- c->btree_key_cache.shrink->scan_objects(c->btree_cache.shrink, &sc);
+ c->btree_key_cache.shrink->scan_objects(c->btree_key_cache.shrink, &sc);
}
if (attr == &sysfs_trigger_gc)
@@ -514,7 +514,7 @@ struct attribute *bch2_fs_files[] = {
&sysfs_btree_cache_size,
&sysfs_btree_write_stats,
- &sysfs_promote_whole_extents,
+ &sysfs_rebalance_status,
&sysfs_compression_stats,
@@ -614,7 +614,6 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_copy_gc_wait,
&sysfs_rebalance_enabled,
- &sysfs_rebalance_status,
sysfs_pd_controller_files(rebalance),
&sysfs_moving_ctxts,
@@ -674,7 +673,7 @@ STORE(bch2_fs_opts_dir)
if (ret < 0)
goto err;
- bch2_opt_set_sb(c, opt, v);
+ bch2_opt_set_sb(c, NULL, opt, v);
bch2_opt_set_by_id(&c->opts, id, v);
if (v &&
@@ -728,6 +727,13 @@ SHOW(bch2_fs_time_stats)
STORE(bch2_fs_time_stats)
{
+ struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
+
+#define x(name) \
+ if (attr == &sysfs_time_stat_##name) \
+ bch2_time_stats_reset(&c->times[BCH_TIME_##name]);
+ BCH_TIME_STATS()
+#undef x
return size;
}
SYSFS_OPS(bch2_fs_time_stats);
@@ -821,32 +827,17 @@ STORE(bch2_dev)
{
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
struct bch_fs *c = ca->fs;
- struct bch_member *mi;
if (attr == &sysfs_discard) {
bool v = strtoul_or_return(buf);
- mutex_lock(&c->sb_lock);
- mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
-
- if (v != BCH_MEMBER_DISCARD(mi)) {
- SET_BCH_MEMBER_DISCARD(mi, v);
- bch2_write_super(c);
- }
- mutex_unlock(&c->sb_lock);
+ bch2_opt_set_sb(c, ca, bch2_opt_table + Opt_discard, v);
}
if (attr == &sysfs_durability) {
u64 v = strtoul_or_return(buf);
- mutex_lock(&c->sb_lock);
- mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
-
- if (v + 1 != BCH_MEMBER_DURABILITY(mi)) {
- SET_BCH_MEMBER_DURABILITY(mi, v + 1);
- bch2_write_super(c);
- }
- mutex_unlock(&c->sb_lock);
+ bch2_opt_set_sb(c, ca, bch2_opt_table + Opt_durability, v);
}
if (attr == &sysfs_label) {
diff --git a/fs/bcachefs/thread_with_file.c b/fs/bcachefs/thread_with_file.c
index 0807ce9b171a..fb3442a7c67f 100644
--- a/fs/bcachefs/thread_with_file.c
+++ b/fs/bcachefs/thread_with_file.c
@@ -387,7 +387,7 @@ again:
seen = buf->buf.nr;
char *n = memchr(buf->buf.data, '\n', seen);
- if (!n && timeout != MAX_SCHEDULE_TIMEOUT && jiffies >= until) {
+ if (!n && timeout != MAX_SCHEDULE_TIMEOUT && time_after_eq(jiffies, until)) {
spin_unlock(&buf->lock);
return -ETIME;
}
diff --git a/fs/bcachefs/time_stats.c b/fs/bcachefs/time_stats.c
index 4508e9dcbee2..3fe82757f93a 100644
--- a/fs/bcachefs/time_stats.c
+++ b/fs/bcachefs/time_stats.c
@@ -151,6 +151,20 @@ void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
}
}
+void bch2_time_stats_reset(struct bch2_time_stats *stats)
+{
+ spin_lock_irq(&stats->lock);
+ unsigned offset = offsetof(struct bch2_time_stats, min_duration);
+ memset((void *) stats + offset, 0, sizeof(*stats) - offset);
+
+ if (stats->buffer) {
+ int cpu;
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(stats->buffer, cpu)->nr = 0;
+ }
+ spin_unlock_irq(&stats->lock);
+}
+
void bch2_time_stats_exit(struct bch2_time_stats *stats)
{
free_percpu(stats->buffer);
diff --git a/fs/bcachefs/time_stats.h b/fs/bcachefs/time_stats.h
index 5df61403744b..dc6493f7bbab 100644
--- a/fs/bcachefs/time_stats.h
+++ b/fs/bcachefs/time_stats.h
@@ -70,6 +70,7 @@ struct time_stat_buffer {
struct bch2_time_stats {
spinlock_t lock;
bool have_quantiles;
+ struct time_stat_buffer __percpu *buffer;
/* all fields are in nanoseconds */
u64 min_duration;
u64 max_duration;
@@ -87,7 +88,6 @@ struct bch2_time_stats {
struct mean_and_variance_weighted duration_stats_weighted;
struct mean_and_variance_weighted freq_stats_weighted;
- struct time_stat_buffer __percpu *buffer;
};
struct bch2_time_stats_quantiles {
@@ -142,6 +142,7 @@ static inline bool track_event_change(struct bch2_time_stats *stats, bool v)
return false;
}
+void bch2_time_stats_reset(struct bch2_time_stats *);
void bch2_time_stats_exit(struct bch2_time_stats *);
void bch2_time_stats_init(struct bch2_time_stats *);
diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h
index c62f00322d1e..5597b9d6297f 100644
--- a/fs/bcachefs/trace.h
+++ b/fs/bcachefs/trace.h
@@ -3,7 +3,6 @@
#define TRACE_SYSTEM bcachefs
#if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_BCACHEFS_H
#include <linux/tracepoint.h>
@@ -558,6 +557,7 @@ TRACE_EVENT(btree_path_relock_fail,
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u8, level )
+ __field(u8, path_idx)
TRACE_BPOS_entries(pos)
__array(char, node, 24 )
__field(u8, self_read_count )
@@ -575,7 +575,8 @@ TRACE_EVENT(btree_path_relock_fail,
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
- __entry->level = path->level;
+ __entry->level = level;
+ __entry->path_idx = path - trans->paths;
TRACE_BPOS_assign(pos, path->pos);
c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
@@ -588,7 +589,7 @@ TRACE_EVENT(btree_path_relock_fail,
c = six_lock_counts(&path->l[level].b->c.lock);
__entry->read_count = c.n[SIX_LOCK_read];
__entry->intent_count = c.n[SIX_LOCK_intent];
- scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
+ scnprintf(__entry->node, sizeof(__entry->node), "%px", &b->c);
}
__entry->iter_lock_seq = path->l[level].lock_seq;
__entry->node_lock_seq = is_btree_node(path, level)
@@ -596,9 +597,10 @@ TRACE_EVENT(btree_path_relock_fail,
: 0;
),
- TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
+ TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
__entry->trans_fn,
(void *) __entry->caller_ip,
+ __entry->path_idx,
bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode,
__entry->pos_offset,
@@ -625,6 +627,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u8, level )
+ __field(u8, path_idx)
TRACE_BPOS_entries(pos)
__field(u8, locked )
__field(u8, self_read_count )
@@ -642,6 +645,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
__entry->level = level;
+ __entry->path_idx = path - trans->paths;
TRACE_BPOS_assign(pos, path->pos);
__entry->locked = btree_node_locked(path, level);
@@ -657,9 +661,10 @@ TRACE_EVENT(btree_path_upgrade_fail,
: 0;
),
- TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
+ TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
__entry->trans_fn,
(void *) __entry->caller_ip,
+ __entry->path_idx,
bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode,
__entry->pos_offset,
@@ -1438,6 +1443,456 @@ TRACE_EVENT(error_downcast,
TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
);
+#ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
+
+TRACE_EVENT(update_by_path,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path,
+ struct btree_insert_entry *i, bool overwrite),
+ TP_ARGS(trans, path, i, overwrite),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(btree_path_idx_t, path_idx )
+ __field(u8, btree_id )
+ TRACE_BPOS_entries(pos)
+ __field(u8, overwrite )
+ __field(btree_path_idx_t, update_idx )
+ __field(btree_path_idx_t, nr_updates )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->path_idx = path - trans->paths;
+ __entry->btree_id = path->btree_id;
+ TRACE_BPOS_assign(pos, path->pos);
+ __entry->overwrite = overwrite;
+ __entry->update_idx = i - trans->updates;
+ __entry->nr_updates = trans->nr_updates;
+ ),
+
+ TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u",
+ __entry->trans_fn,
+ __entry->path_idx,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
+ __entry->overwrite,
+ __entry->update_idx,
+ __entry->nr_updates)
+);
+
+TRACE_EVENT(btree_path_lock,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_bkey_cached_common *b),
+ TP_ARGS(trans, caller_ip, b),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+ __field(u8, btree_id )
+ __field(u8, level )
+ __array(char, node, 24 )
+ __field(u32, lock_seq )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ __entry->btree_id = b->btree_id;
+ __entry->level = b->level;
+
+ scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
+ __entry->lock_seq = six_lock_seq(&b->lock);
+ ),
+
+ TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u",
+ __entry->trans_fn,
+ (void *) __entry->caller_ip,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->level,
+ __entry->node,
+ __entry->lock_seq)
+);
+
+DECLARE_EVENT_CLASS(btree_path_ev,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path),
+ TP_ARGS(trans, path),
+
+ TP_STRUCT__entry(
+ __field(u16, idx )
+ __field(u8, ref )
+ __field(u8, btree_id )
+ TRACE_BPOS_entries(pos)
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path - trans->paths;
+ __entry->ref = path->ref;
+ __entry->btree_id = path->btree_id;
+ TRACE_BPOS_assign(pos, path->pos);
+ ),
+
+ TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u",
+ __entry->idx, __entry->ref,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot)
+);
+
+DEFINE_EVENT(btree_path_ev, btree_path_get_ll,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path),
+ TP_ARGS(trans, path)
+);
+
+DEFINE_EVENT(btree_path_ev, btree_path_put_ll,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path),
+ TP_ARGS(trans, path)
+);
+
+DEFINE_EVENT(btree_path_ev, btree_path_should_be_locked,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path),
+ TP_ARGS(trans, path)
+);
+
+TRACE_EVENT(btree_path_alloc,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path),
+ TP_ARGS(trans, path),
+
+ TP_STRUCT__entry(
+ __field(btree_path_idx_t, idx )
+ __field(u8, locks_want )
+ __field(u8, btree_id )
+ TRACE_BPOS_entries(pos)
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path - trans->paths;
+ __entry->locks_want = path->locks_want;
+ __entry->btree_id = path->btree_id;
+ TRACE_BPOS_assign(pos, path->pos);
+ ),
+
+ TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u",
+ __entry->idx,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->locks_want,
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot)
+);
+
+TRACE_EVENT(btree_path_get,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos),
+ TP_ARGS(trans, path, new_pos),
+
+ TP_STRUCT__entry(
+ __field(btree_path_idx_t, idx )
+ __field(u8, ref )
+ __field(u8, preserve )
+ __field(u8, locks_want )
+ __field(u8, btree_id )
+ TRACE_BPOS_entries(old_pos)
+ TRACE_BPOS_entries(new_pos)
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path - trans->paths;
+ __entry->ref = path->ref;
+ __entry->preserve = path->preserve;
+ __entry->locks_want = path->locks_want;
+ __entry->btree_id = path->btree_id;
+ TRACE_BPOS_assign(old_pos, path->pos);
+ TRACE_BPOS_assign(new_pos, *new_pos);
+ ),
+
+ TP_printk(" path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u",
+ __entry->idx,
+ __entry->ref,
+ __entry->preserve,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->locks_want,
+ __entry->old_pos_inode,
+ __entry->old_pos_offset,
+ __entry->old_pos_snapshot,
+ __entry->new_pos_inode,
+ __entry->new_pos_offset,
+ __entry->new_pos_snapshot)
+);
+
+DECLARE_EVENT_CLASS(btree_path_clone,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
+ TP_ARGS(trans, path, new),
+
+ TP_STRUCT__entry(
+ __field(btree_path_idx_t, idx )
+ __field(u8, new_idx )
+ __field(u8, btree_id )
+ __field(u8, ref )
+ __field(u8, preserve )
+ TRACE_BPOS_entries(pos)
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path - trans->paths;
+ __entry->new_idx = new - trans->paths;
+ __entry->btree_id = path->btree_id;
+ __entry->ref = path->ref;
+ __entry->preserve = path->preserve;
+ TRACE_BPOS_assign(pos, path->pos);
+ ),
+
+ TP_printk(" path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u",
+ __entry->idx,
+ __entry->ref,
+ __entry->preserve,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
+ __entry->new_idx)
+);
+
+DEFINE_EVENT(btree_path_clone, btree_path_clone,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
+ TP_ARGS(trans, path, new)
+);
+
+DEFINE_EVENT(btree_path_clone, btree_path_save_pos,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
+ TP_ARGS(trans, path, new)
+);
+
+DECLARE_EVENT_CLASS(btree_path_traverse,
+ TP_PROTO(struct btree_trans *trans,
+ struct btree_path *path),
+ TP_ARGS(trans, path),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(btree_path_idx_t, idx )
+ __field(u8, ref )
+ __field(u8, preserve )
+ __field(u8, should_be_locked )
+ __field(u8, btree_id )
+ __field(u8, level )
+ TRACE_BPOS_entries(pos)
+ __field(u8, locks_want )
+ __field(u8, nodes_locked )
+ __array(char, node0, 24 )
+ __array(char, node1, 24 )
+ __array(char, node2, 24 )
+ __array(char, node3, 24 )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+
+ __entry->idx = path - trans->paths;
+ __entry->ref = path->ref;
+ __entry->preserve = path->preserve;
+ __entry->btree_id = path->btree_id;
+ __entry->level = path->level;
+ TRACE_BPOS_assign(pos, path->pos);
+
+ __entry->locks_want = path->locks_want;
+ __entry->nodes_locked = path->nodes_locked;
+ struct btree *b = path->l[0].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
+ b = path->l[1].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
+ b = path->l[2].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
+ b = path->l[3].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
+ ),
+
+ TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n"
+ "locks %u %u %u %u node %s %s %s %s",
+ __entry->trans_fn,
+ __entry->idx,
+ __entry->ref,
+ __entry->preserve,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
+ __entry->level,
+ __entry->locks_want,
+ (__entry->nodes_locked >> 6) & 3,
+ (__entry->nodes_locked >> 4) & 3,
+ (__entry->nodes_locked >> 2) & 3,
+ (__entry->nodes_locked >> 0) & 3,
+ __entry->node3,
+ __entry->node2,
+ __entry->node1,
+ __entry->node0)
+);
+
+DEFINE_EVENT(btree_path_traverse, btree_path_traverse_start,
+ TP_PROTO(struct btree_trans *trans,
+ struct btree_path *path),
+ TP_ARGS(trans, path)
+);
+
+DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path),
+ TP_ARGS(trans, path)
+);
+
+TRACE_EVENT(btree_path_set_pos,
+ TP_PROTO(struct btree_trans *trans,
+ struct btree_path *path,
+ struct bpos *new_pos),
+ TP_ARGS(trans, path, new_pos),
+
+ TP_STRUCT__entry(
+ __field(btree_path_idx_t, idx )
+ __field(u8, ref )
+ __field(u8, preserve )
+ __field(u8, btree_id )
+ TRACE_BPOS_entries(old_pos)
+ TRACE_BPOS_entries(new_pos)
+ __field(u8, locks_want )
+ __field(u8, nodes_locked )
+ __array(char, node0, 24 )
+ __array(char, node1, 24 )
+ __array(char, node2, 24 )
+ __array(char, node3, 24 )
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path - trans->paths;
+ __entry->ref = path->ref;
+ __entry->preserve = path->preserve;
+ __entry->btree_id = path->btree_id;
+ TRACE_BPOS_assign(old_pos, path->pos);
+ TRACE_BPOS_assign(new_pos, *new_pos);
+
+ __entry->nodes_locked = path->nodes_locked;
+ struct btree *b = path->l[0].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
+ b = path->l[1].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
+ b = path->l[2].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
+ b = path->l[3].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
+ ),
+
+ TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
+ "locks %u %u %u %u node %s %s %s %s",
+ __entry->idx,
+ __entry->ref,
+ __entry->preserve,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->old_pos_inode,
+ __entry->old_pos_offset,
+ __entry->old_pos_snapshot,
+ __entry->new_pos_inode,
+ __entry->new_pos_offset,
+ __entry->new_pos_snapshot,
+ (__entry->nodes_locked >> 6) & 3,
+ (__entry->nodes_locked >> 4) & 3,
+ (__entry->nodes_locked >> 2) & 3,
+ (__entry->nodes_locked >> 0) & 3,
+ __entry->node3,
+ __entry->node2,
+ __entry->node1,
+ __entry->node0)
+);
+
+TRACE_EVENT(btree_path_free,
+ TP_PROTO(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup),
+ TP_ARGS(trans, path, dup),
+
+ TP_STRUCT__entry(
+ __field(btree_path_idx_t, idx )
+ __field(u8, preserve )
+ __field(u8, should_be_locked)
+ __field(s8, dup )
+ __field(u8, dup_locked )
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path;
+ __entry->preserve = trans->paths[path].preserve;
+ __entry->should_be_locked = trans->paths[path].should_be_locked;
+ __entry->dup = dup ? dup - trans->paths : -1;
+ __entry->dup_locked = dup ? btree_node_locked(dup, dup->level) : 0;
+ ),
+
+ TP_printk(" path %3u %c %c dup %2i locked %u", __entry->idx,
+ __entry->preserve ? 'P' : ' ',
+ __entry->should_be_locked ? 'S' : ' ',
+ __entry->dup,
+ __entry->dup_locked)
+);
+
+TRACE_EVENT(btree_path_free_trans_begin,
+ TP_PROTO(btree_path_idx_t path),
+ TP_ARGS(path),
+
+ TP_STRUCT__entry(
+ __field(btree_path_idx_t, idx )
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path;
+ ),
+
+ TP_printk(" path %3u", __entry->idx)
+);
+
+#else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
+#ifndef _TRACE_BCACHEFS_H
+
+static inline void trace_update_by_path(struct btree_trans *trans, struct btree_path *path,
+ struct btree_insert_entry *i, bool overwrite) {}
+static inline void trace_btree_path_lock(struct btree_trans *trans, unsigned long caller_ip, struct btree_bkey_cached_common *b) {}
+static inline void trace_btree_path_get_ll(struct btree_trans *trans, struct btree_path *path) {}
+static inline void trace_btree_path_put_ll(struct btree_trans *trans, struct btree_path *path) {}
+static inline void trace_btree_path_should_be_locked(struct btree_trans *trans, struct btree_path *path) {}
+static inline void trace_btree_path_alloc(struct btree_trans *trans, struct btree_path *path) {}
+static inline void trace_btree_path_get(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
+static inline void trace_btree_path_clone(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
+static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
+static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {}
+static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {}
+static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
+static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {}
+static inline void trace_btree_path_free_trans_begin(btree_path_idx_t path) {}
+
+#endif
+#endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
+
+#define _TRACE_BCACHEFS_H
#endif /* _TRACE_BCACHEFS_H */
/* This part must be outside protection */
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
index 1b8554460af4..42f565c76181 100644
--- a/fs/bcachefs/util.c
+++ b/fs/bcachefs/util.c
@@ -64,7 +64,7 @@ static int bch2_pow(u64 n, u64 p, u64 *res)
*res = 1;
while (p--) {
- if (*res > div_u64(U64_MAX, n))
+ if (*res > div64_u64(U64_MAX, n))
return -ERANGE;
*res *= n;
}
@@ -140,14 +140,14 @@ static int __bch2_strtou64_h(const char *cp, u64 *res)
parse_or_ret(cp, parse_unit_suffix(cp, &b));
- if (v > div_u64(U64_MAX, b))
+ if (v > div64_u64(U64_MAX, b))
return -ERANGE;
v *= b;
- if (f_n > div_u64(U64_MAX, b))
+ if (f_n > div64_u64(U64_MAX, b))
return -ERANGE;
- f_n = div_u64(f_n * b, f_d);
+ f_n = div64_u64(f_n * b, f_d);
if (v + f_n < v)
return -ERANGE;
v += f_n;
@@ -204,7 +204,7 @@ STRTO_H(strtoll, long long)
STRTO_H(strtoull, unsigned long long)
STRTO_H(strtou64, u64)
-u64 bch2_read_flag_list(char *opt, const char * const list[])
+u64 bch2_read_flag_list(const char *opt, const char * const list[])
{
u64 ret = 0;
char *p, *s, *d = kstrdup(opt, GFP_KERNEL);
@@ -214,7 +214,7 @@ u64 bch2_read_flag_list(char *opt, const char * const list[])
s = strim(d);
- while ((p = strsep(&s, ","))) {
+ while ((p = strsep(&s, ",;"))) {
int flag = match_string(list, -1, p);
if (flag < 0) {
@@ -360,7 +360,7 @@ void bch2_pr_time_units(struct printbuf *out, u64 ns)
{
const struct time_unit *u = bch2_pick_time_units(ns);
- prt_printf(out, "%llu %s", div_u64(ns, u->nsecs), u->name);
+ prt_printf(out, "%llu %s", div64_u64(ns, u->nsecs), u->name);
}
static void bch2_pr_time_units_aligned(struct printbuf *out, u64 ns)
@@ -477,7 +477,7 @@ void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats
bool is_last = eytzinger0_next(i, NR_QUANTILES) == -1;
u64 q = max(quantiles->entries[i].m, last_q);
- prt_printf(out, "%llu ", div_u64(q, u->nsecs));
+ prt_printf(out, "%llu ", div64_u64(q, u->nsecs));
if (is_last)
prt_newline(out);
last_q = q;
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 902b7f5406a2..fb02c1c36004 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -195,7 +195,7 @@ static inline int bch2_strtoul_h(const char *cp, long *res)
bool bch2_is_zero(const void *, size_t);
-u64 bch2_read_flag_list(char *, const char * const[]);
+u64 bch2_read_flag_list(const char *, const char * const[]);
void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned);
void bch2_prt_u64_base2(struct printbuf *, u64);
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index 331f944d73dc..56c8d3fe55a4 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -250,17 +250,27 @@ static int __bch2_xattr_emit(const char *prefix,
return 0;
}
+static inline const char *bch2_xattr_prefix(unsigned type, struct dentry *dentry)
+{
+ const struct xattr_handler *handler = bch2_xattr_type_to_handler(type);
+
+ if (!xattr_handler_can_list(handler, dentry))
+ return NULL;
+
+ return xattr_prefix(handler);
+}
+
static int bch2_xattr_emit(struct dentry *dentry,
const struct bch_xattr *xattr,
struct xattr_buf *buf)
{
- const struct xattr_handler *handler =
- bch2_xattr_type_to_handler(xattr->x_type);
+ const char *prefix;
+
+ prefix = bch2_xattr_prefix(xattr->x_type, dentry);
+ if (!prefix)
+ return 0;
- return handler && (!handler->list || handler->list(dentry))
- ? __bch2_xattr_emit(handler->prefix ?: handler->name,
- xattr->x_name, xattr->x_name_len, buf)
- : 0;
+ return __bch2_xattr_emit(prefix, xattr->x_name, xattr->x_name_len, buf);
}
static int bch2_xattr_list_bcachefs(struct bch_fs *c,
@@ -295,54 +305,23 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
struct bch_fs *c = dentry->d_sb->s_fs_info;
struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_s_c k;
struct xattr_buf buf = { .buf = buffer, .len = buffer_size };
u64 offset = 0, inum = inode->ei_inode.bi_inum;
- u32 snapshot;
- int ret;
-retry:
- bch2_trans_begin(trans);
- iter = (struct btree_iter) { NULL };
-
- ret = bch2_subvolume_get_snapshot(trans, inode->ei_subvol, &snapshot);
- if (ret)
- goto err;
-
- for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_xattrs,
- SPOS(inum, offset, snapshot),
- POS(inum, U64_MAX), 0, k, ret) {
- if (k.k->type != KEY_TYPE_xattr)
- continue;
-
- ret = bch2_xattr_emit(dentry, bkey_s_c_to_xattr(k).v, &buf);
- if (ret)
- break;
- }
- offset = iter.pos.offset;
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_put(trans);
+ int ret = bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_xattrs,
+ POS(inum, offset),
+ POS(inum, U64_MAX),
+ inode->ei_inum.subvol, 0, k, ({
+ if (k.k->type != KEY_TYPE_xattr)
+ continue;
- if (ret)
- goto out;
+ bch2_xattr_emit(dentry, bkey_s_c_to_xattr(k).v, &buf);
+ }))) ?:
+ bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, false) ?:
+ bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, true);
- ret = bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, false);
- if (ret)
- goto out;
-
- ret = bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, true);
- if (ret)
- goto out;
-
- return buf.used;
-out:
- return bch2_err_class(ret);
+ return ret ? bch2_err_class(ret) : buf.used;
}
static int bch2_xattr_get_handler(const struct xattr_handler *handler,
@@ -632,10 +611,6 @@ static const struct xattr_handler bch_xattr_bcachefs_effective_handler = {
const struct xattr_handler *bch2_xattr_handlers[] = {
&bch_xattr_user_handler,
-#ifdef CONFIG_BCACHEFS_POSIX_ACL
- &nop_posix_acl_access,
- &nop_posix_acl_default,
-#endif
&bch_xattr_trusted_handler,
&bch_xattr_security_handler,
#ifndef NO_BCACHEFS_FS
diff --git a/fs/bcachefs/xattr_format.h b/fs/bcachefs/xattr_format.h
index e9f810539552..c7916011ef34 100644
--- a/fs/bcachefs/xattr_format.h
+++ b/fs/bcachefs/xattr_format.h
@@ -13,7 +13,7 @@ struct bch_xattr {
__u8 x_type;
__u8 x_name_len;
__le16 x_val_len;
- __u8 x_name[];
+ __u8 x_name[] __counted_by(x_name_len);
} __packed __aligned(8);
#endif /* _BCACHEFS_XATTR_FORMAT_H */
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index a778411574a9..fa66a09e496a 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -172,11 +172,11 @@ static void bfs_write_failed(struct address_space *mapping, loff_t to)
static int bfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, pagep, bfs_get_block);
+ ret = block_write_begin(mapping, pos, len, foliop, bfs_get_block);
if (unlikely(ret))
bfs_write_failed(mapping, pos + len);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 19fa49cd9907..34d0d1e43f36 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1314,6 +1314,11 @@ out_free_interp:
emulate the SVr4 behavior. Sigh. */
error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE, 0);
+
+ retval = do_mseal(0, PAGE_SIZE, 0);
+ if (retval)
+ pr_warn_ratelimited("pid=%d, couldn't seal address 0, ret=%d.\n",
+ task_pid_nr(current), retval);
}
regs = current_pt_regs();
@@ -2027,8 +2032,10 @@ static int elf_core_dump(struct coredump_params *cprm)
* Collect all the non-memory information about the process for the
* notes. This also sets up the file header.
*/
- if (!fill_note_info(&elf, e_phnum, &info, cprm))
+ if (!fill_note_info(&elf, e_phnum, &info, cprm)) {
+ coredump_report_failure("Error collecting note info");
goto end_coredump;
+ }
has_dumped = 1;
@@ -2039,12 +2046,14 @@ static int elf_core_dump(struct coredump_params *cprm)
{
size_t sz = info.size;
- /* For cell spufs */
+ /* For cell spufs and x86 xstate */
sz += elf_coredump_extra_notes_size();
phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
- if (!phdr4note)
+ if (!phdr4note) {
+ coredump_report_failure("Error allocating program headers note entry");
goto end_coredump;
+ }
fill_elf_note_phdr(phdr4note, sz, offset);
offset += sz;
@@ -2058,18 +2067,24 @@ static int elf_core_dump(struct coredump_params *cprm)
if (e_phnum == PN_XNUM) {
shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
- if (!shdr4extnum)
+ if (!shdr4extnum) {
+ coredump_report_failure("Error allocating extra program headers");
goto end_coredump;
+ }
fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
}
offset = dataoff;
- if (!dump_emit(cprm, &elf, sizeof(elf)))
+ if (!dump_emit(cprm, &elf, sizeof(elf))) {
+ coredump_report_failure("Error emitting the ELF headers");
goto end_coredump;
+ }
- if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
+ if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note))) {
+ coredump_report_failure("Error emitting the program header for notes");
goto end_coredump;
+ }
/* Write program headers for segments dump */
for (i = 0; i < cprm->vma_count; i++) {
@@ -2092,20 +2107,28 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_flags |= PF_X;
phdr.p_align = ELF_EXEC_PAGESIZE;
- if (!dump_emit(cprm, &phdr, sizeof(phdr)))
+ if (!dump_emit(cprm, &phdr, sizeof(phdr))) {
+ coredump_report_failure("Error emitting program headers");
goto end_coredump;
+ }
}
- if (!elf_core_write_extra_phdrs(cprm, offset))
+ if (!elf_core_write_extra_phdrs(cprm, offset)) {
+ coredump_report_failure("Error writing out extra program headers");
goto end_coredump;
+ }
/* write out the notes section */
- if (!write_note_info(&info, cprm))
+ if (!write_note_info(&info, cprm)) {
+ coredump_report_failure("Error writing out notes");
goto end_coredump;
+ }
- /* For cell spufs */
- if (elf_coredump_extra_notes_write(cprm))
+ /* For cell spufs and x86 xstate */
+ if (elf_coredump_extra_notes_write(cprm)) {
+ coredump_report_failure("Error writing out extra notes");
goto end_coredump;
+ }
/* Align to page */
dump_skip_to(cprm, dataoff);
@@ -2113,16 +2136,22 @@ static int elf_core_dump(struct coredump_params *cprm)
for (i = 0; i < cprm->vma_count; i++) {
struct core_vma_metadata *meta = cprm->vma_meta + i;
- if (!dump_user_range(cprm, meta->start, meta->dump_size))
+ if (!dump_user_range(cprm, meta->start, meta->dump_size)) {
+ coredump_report_failure("Error writing out the process memory");
goto end_coredump;
+ }
}
- if (!elf_core_write_extra_data(cprm))
+ if (!elf_core_write_extra_data(cprm)) {
+ coredump_report_failure("Error writing out extra data");
goto end_coredump;
+ }
if (e_phnum == PN_XNUM) {
- if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
+ if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum))) {
+ coredump_report_failure("Error emitting extra program headers");
goto end_coredump;
+ }
}
end_coredump:
diff --git a/fs/bpf_fs_kfuncs.c b/fs/bpf_fs_kfuncs.c
new file mode 100644
index 000000000000..3fe9f59ef867
--- /dev/null
+++ b/fs/bpf_fs_kfuncs.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC. */
+
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
+#include <linux/dcache.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/xattr.h>
+
+__bpf_kfunc_start_defs();
+
+/**
+ * bpf_get_task_exe_file - get a reference on the exe_file struct file member of
+ * the mm_struct that is nested within the supplied
+ * task_struct
+ * @task: task_struct of which the nested mm_struct exe_file member to get a
+ * reference on
+ *
+ * Get a reference on the exe_file struct file member field of the mm_struct
+ * nested within the supplied *task*. The referenced file pointer acquired by
+ * this BPF kfunc must be released using bpf_put_file(). Failing to call
+ * bpf_put_file() on the returned referenced struct file pointer that has been
+ * acquired by this BPF kfunc will result in the BPF program being rejected by
+ * the BPF verifier.
+ *
+ * This BPF kfunc may only be called from BPF LSM programs.
+ *
+ * Internally, this BPF kfunc leans on get_task_exe_file(), such that calling
+ * bpf_get_task_exe_file() would be analogous to calling get_task_exe_file()
+ * directly in kernel context.
+ *
+ * Return: A referenced struct file pointer to the exe_file member of the
+ * mm_struct that is nested within the supplied *task*. On error, NULL is
+ * returned.
+ */
+__bpf_kfunc struct file *bpf_get_task_exe_file(struct task_struct *task)
+{
+ return get_task_exe_file(task);
+}
+
+/**
+ * bpf_put_file - put a reference on the supplied file
+ * @file: file to put a reference on
+ *
+ * Put a reference on the supplied *file*. Only referenced file pointers may be
+ * passed to this BPF kfunc. Attempting to pass an unreferenced file pointer, or
+ * any other arbitrary pointer for that matter, will result in the BPF program
+ * being rejected by the BPF verifier.
+ *
+ * This BPF kfunc may only be called from BPF LSM programs.
+ */
+__bpf_kfunc void bpf_put_file(struct file *file)
+{
+ fput(file);
+}
+
+/**
+ * bpf_path_d_path - resolve the pathname for the supplied path
+ * @path: path to resolve the pathname for
+ * @buf: buffer to return the resolved pathname in
+ * @buf__sz: length of the supplied buffer
+ *
+ * Resolve the pathname for the supplied *path* and store it in *buf*. This BPF
+ * kfunc is the safer variant of the legacy bpf_d_path() helper and should be
+ * used in place of bpf_d_path() whenever possible. It enforces KF_TRUSTED_ARGS
+ * semantics, meaning that the supplied *path* must itself hold a valid
+ * reference, or else the BPF program will be outright rejected by the BPF
+ * verifier.
+ *
+ * This BPF kfunc may only be called from BPF LSM programs.
+ *
+ * Return: A positive integer corresponding to the length of the resolved
+ * pathname in *buf*, including the NUL termination character. On error, a
+ * negative integer is returned.
+ */
+__bpf_kfunc int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz)
+{
+ int len;
+ char *ret;
+
+ if (!buf__sz)
+ return -EINVAL;
+
+ ret = d_path(path, buf, buf__sz);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ len = buf + buf__sz - ret;
+ memmove(buf, ret, len);
+ return len;
+}
+
+/**
+ * bpf_get_dentry_xattr - get xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: output buffer of the xattr value
+ *
+ * Get xattr *name__str* of *dentry* and store the output in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "user." is allowed.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_get_dentry_xattr(struct dentry *dentry, const char *name__str,
+ struct bpf_dynptr *value_p)
+{
+ struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p;
+ struct inode *inode = d_inode(dentry);
+ u32 value_len;
+ void *value;
+ int ret;
+
+ if (WARN_ON(!inode))
+ return -EINVAL;
+
+ if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+ return -EPERM;
+
+ value_len = __bpf_dynptr_size(value_ptr);
+ value = __bpf_dynptr_data_rw(value_ptr, value_len);
+ if (!value)
+ return -EINVAL;
+
+ ret = inode_permission(&nop_mnt_idmap, inode, MAY_READ);
+ if (ret)
+ return ret;
+ return __vfs_getxattr(dentry, inode, name__str, value, value_len);
+}
+
+/**
+ * bpf_get_file_xattr - get xattr of a file
+ * @file: file to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: output buffer of the xattr value
+ *
+ * Get xattr *name__str* of *file* and store the output in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "user." is allowed.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
+ struct bpf_dynptr *value_p)
+{
+ struct dentry *dentry;
+
+ dentry = file_dentry(file);
+ return bpf_get_dentry_xattr(dentry, name__str, value_p);
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(bpf_fs_kfunc_set_ids)
+BTF_ID_FLAGS(func, bpf_get_task_exe_file,
+ KF_ACQUIRE | KF_TRUSTED_ARGS | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_put_file, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_path_d_path, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_get_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_KFUNCS_END(bpf_fs_kfunc_set_ids)
+
+static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 kfunc_id)
+{
+ if (!btf_id_set8_contains(&bpf_fs_kfunc_set_ids, kfunc_id) ||
+ prog->type == BPF_PROG_TYPE_LSM)
+ return 0;
+ return -EACCES;
+}
+
+static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &bpf_fs_kfunc_set_ids,
+ .filter = bpf_fs_kfuncs_filter,
+};
+
+static int __init bpf_fs_kfuncs_init(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
+}
+
+late_initcall(bpf_fs_kfuncs_init);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index a2de5c05f97c..e2f478ecd7fd 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -219,8 +219,8 @@ static void free_pref(struct prelim_ref *ref)
* A -1 return indicates ref1 is a 'lower' block than ref2, while 1
* indicates a 'higher' block.
*/
-static int prelim_ref_compare(struct prelim_ref *ref1,
- struct prelim_ref *ref2)
+static int prelim_ref_compare(const struct prelim_ref *ref1,
+ const struct prelim_ref *ref2)
{
if (ref1->level < ref2->level)
return -1;
@@ -251,7 +251,7 @@ static int prelim_ref_compare(struct prelim_ref *ref1,
}
static void update_share_count(struct share_check *sc, int oldcount,
- int newcount, struct prelim_ref *newref)
+ int newcount, const struct prelim_ref *newref)
{
if ((!sc) || (oldcount == 0 && newcount < 1))
return;
diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
index b4e31ae17cd9..fec5c6cde0a7 100644
--- a/fs/btrfs/bio.c
+++ b/fs/btrfs/bio.c
@@ -53,7 +53,7 @@ void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
/*
* Allocate a btrfs_bio structure. The btrfs_bio is the main I/O container for
- * btrfs, and is used for all I/O submitted through btrfs_submit_bio.
+ * btrfs, and is used for all I/O submitted through btrfs_submit_bbio().
*
* Just like the underlying bio_alloc_bioset it will not fail as it is backed by
* a mempool.
@@ -73,20 +73,13 @@ struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
struct btrfs_bio *orig_bbio,
- u64 map_length, bool use_append)
+ u64 map_length)
{
struct btrfs_bio *bbio;
struct bio *bio;
- if (use_append) {
- unsigned int nr_segs;
-
- bio = bio_split_rw(&orig_bbio->bio, &fs_info->limits, &nr_segs,
- &btrfs_clone_bioset, map_length);
- } else {
- bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT,
- GFP_NOFS, &btrfs_clone_bioset);
- }
+ bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, GFP_NOFS,
+ &btrfs_clone_bioset);
bbio = btrfs_bio(bio);
btrfs_bio_init(bbio, fs_info, NULL, orig_bbio);
bbio->inode = orig_bbio->inode;
@@ -120,12 +113,6 @@ static void __btrfs_bio_end_io(struct btrfs_bio *bbio)
}
}
-void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
-{
- bbio->bio.bi_status = status;
- __btrfs_bio_end_io(bbio);
-}
-
static void btrfs_orig_write_end_io(struct bio *bio);
static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio,
@@ -147,8 +134,9 @@ static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio,
}
}
-static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio)
+void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
{
+ bbio->bio.bi_status = status;
if (bbio->bio.bi_pool == &btrfs_clone_bioset) {
struct btrfs_bio *orig_bbio = bbio->private;
@@ -179,7 +167,7 @@ static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror)
static void btrfs_repair_done(struct btrfs_failed_bio *fbio)
{
if (atomic_dec_and_test(&fbio->repair_count)) {
- btrfs_orig_bbio_end_io(fbio->bbio);
+ btrfs_bio_end_io(fbio->bbio, fbio->bbio->bio.bi_status);
mempool_free(fbio, &btrfs_failed_bio_pool);
}
}
@@ -211,7 +199,7 @@ static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio,
goto done;
}
- btrfs_submit_bio(repair_bbio, mirror);
+ btrfs_submit_bbio(repair_bbio, mirror);
return;
}
@@ -280,7 +268,7 @@ static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio,
mirror = next_repair_mirror(fbio, failed_bbio->mirror_num);
btrfs_debug(fs_info, "submitting repair read to mirror %d", mirror);
- btrfs_submit_bio(repair_bbio, mirror);
+ btrfs_submit_bbio(repair_bbio, mirror);
return fbio;
}
@@ -326,7 +314,7 @@ static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *de
if (fbio)
btrfs_repair_done(fbio);
else
- btrfs_orig_bbio_end_io(bbio);
+ btrfs_bio_end_io(bbio, bbio->bio.bi_status);
}
static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev)
@@ -360,7 +348,7 @@ static void btrfs_end_bio_work(struct work_struct *work)
if (is_data_bbio(bbio))
btrfs_check_read_bio(bbio, bbio->bio.bi_private);
else
- btrfs_orig_bbio_end_io(bbio);
+ btrfs_bio_end_io(bbio, bbio->bio.bi_status);
}
static void btrfs_simple_end_io(struct bio *bio)
@@ -380,7 +368,7 @@ static void btrfs_simple_end_io(struct bio *bio)
} else {
if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
btrfs_record_physical_zoned(bbio);
- btrfs_orig_bbio_end_io(bbio);
+ btrfs_bio_end_io(bbio, bbio->bio.bi_status);
}
}
@@ -394,7 +382,7 @@ static void btrfs_raid56_end_io(struct bio *bio)
if (bio_op(bio) == REQ_OP_READ && is_data_bbio(bbio))
btrfs_check_read_bio(bbio, NULL);
else
- btrfs_orig_bbio_end_io(bbio);
+ btrfs_bio_end_io(bbio, bbio->bio.bi_status);
btrfs_put_bioc(bioc);
}
@@ -424,7 +412,7 @@ static void btrfs_orig_write_end_io(struct bio *bio)
if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
- btrfs_orig_bbio_end_io(bbio);
+ btrfs_bio_end_io(bbio, bbio->bio.bi_status);
btrfs_put_bioc(bioc);
}
@@ -502,8 +490,8 @@ static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr)
btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio);
}
-static void __btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc,
- struct btrfs_io_stripe *smap, int mirror_num)
+static void btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc,
+ struct btrfs_io_stripe *smap, int mirror_num)
{
if (!bioc) {
/* Single mirror read/write fast path. */
@@ -593,7 +581,7 @@ static void run_one_async_done(struct btrfs_work *work, bool do_free)
/* If an error occurred we just want to clean up the bio and move on. */
if (bio->bi_status) {
- btrfs_orig_bbio_end_io(async->bbio);
+ btrfs_bio_end_io(async->bbio, async->bbio->bio.bi_status);
return;
}
@@ -603,7 +591,7 @@ static void run_one_async_done(struct btrfs_work *work, bool do_free)
* context. This changes nothing when cgroups aren't in use.
*/
bio->bi_opf |= REQ_BTRFS_CGROUP_PUNT;
- __btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num);
+ btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num);
}
static bool should_async_write(struct btrfs_bio *bbio)
@@ -664,6 +652,19 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
return true;
}
+static u64 btrfs_append_map_length(struct btrfs_bio *bbio, u64 map_length)
+{
+ unsigned int nr_segs;
+ int sector_offset;
+
+ map_length = min(map_length, bbio->fs_info->max_zone_append_size);
+ sector_offset = bio_split_rw_at(&bbio->bio, &bbio->fs_info->limits,
+ &nr_segs, map_length);
+ if (sector_offset)
+ return sector_offset << SECTOR_SHIFT;
+ return map_length;
+}
+
static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
{
struct btrfs_inode *inode = bbio->inode;
@@ -678,7 +679,10 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
blk_status_t ret;
int error;
- smap.is_scrub = !bbio->inode;
+ if (!bbio->inode || btrfs_is_data_reloc_root(inode->root))
+ smap.rst_search_commit_root = true;
+ else
+ smap.rst_search_commit_root = false;
btrfs_bio_counter_inc_blocked(fs_info);
error = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
@@ -690,10 +694,10 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
map_length = min(map_length, length);
if (use_append)
- map_length = min(map_length, fs_info->max_zone_append_size);
+ map_length = btrfs_append_map_length(bbio, map_length);
if (map_length < length) {
- bbio = btrfs_split_bio(fs_info, bbio, map_length, use_append);
+ bbio = btrfs_split_bio(fs_info, bbio, map_length);
bio = &bbio->bio;
}
@@ -749,7 +753,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
}
}
- __btrfs_submit_bio(bio, bioc, &smap, mirror_num);
+ btrfs_submit_bio(bio, bioc, &smap, mirror_num);
done:
return map_length == length;
@@ -765,16 +769,14 @@ fail:
ASSERT(bbio->bio.bi_pool == &btrfs_clone_bioset);
ASSERT(remaining);
- remaining->bio.bi_status = ret;
- btrfs_orig_bbio_end_io(remaining);
+ btrfs_bio_end_io(remaining, ret);
}
- bbio->bio.bi_status = ret;
- btrfs_orig_bbio_end_io(bbio);
+ btrfs_bio_end_io(bbio, ret);
/* Do not submit another chunk */
return true;
}
-void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num)
+void btrfs_submit_bbio(struct btrfs_bio *bbio, int mirror_num)
{
/* If bbio->inode is not populated, its file_offset must be 0. */
ASSERT(bbio->inode || bbio->file_offset == 0);
@@ -786,7 +788,7 @@ void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num)
/*
* Submit a repair write.
*
- * This bypasses btrfs_submit_bio deliberately, as that writes all copies in a
+ * This bypasses btrfs_submit_bbio() deliberately, as that writes all copies in a
* RAID setup. Here we only want to write the one bad copy, so we do the
* mapping ourselves and submit the bio directly.
*
@@ -875,7 +877,7 @@ void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_
ASSERT(smap.dev == fs_info->dev_replace.srcdev);
smap.dev = fs_info->dev_replace.tgtdev;
}
- __btrfs_submit_bio(&bbio->bio, NULL, &smap, mirror_num);
+ btrfs_submit_bio(&bbio->bio, NULL, &smap, mirror_num);
return;
fail:
diff --git a/fs/btrfs/bio.h b/fs/btrfs/bio.h
index d9dd5276093d..e48612340745 100644
--- a/fs/btrfs/bio.h
+++ b/fs/btrfs/bio.h
@@ -29,7 +29,7 @@ typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio);
/*
* Highlevel btrfs I/O structure. It is allocated by btrfs_bio_alloc and
- * passed to btrfs_submit_bio for mapping to the physical devices.
+ * passed to btrfs_submit_bbio() for mapping to the physical devices.
*/
struct btrfs_bio {
/*
@@ -42,7 +42,7 @@ struct btrfs_bio {
union {
/*
* For data reads: checksumming and original I/O information.
- * (for internal use in the btrfs_submit_bio machinery only)
+ * (for internal use in the btrfs_submit_bbio() machinery only)
*/
struct {
u8 *csum;
@@ -104,7 +104,7 @@ void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status);
/* Submit using blkcg_punt_bio_submit. */
#define REQ_BTRFS_CGROUP_PUNT REQ_FS_PRIVATE
-void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num);
+void btrfs_submit_bbio(struct btrfs_bio *bbio, int mirror_num);
void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_replace);
int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
u64 length, u64 logical, struct folio *folio,
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 2e49d978f504..7980b2e33a92 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -23,7 +23,7 @@
#include "extent-tree.h"
#ifdef CONFIG_BTRFS_DEBUG
-int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group)
+int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
@@ -40,9 +40,9 @@ int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group)
*
* Should be called with balance_lock held
*/
-static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
+static u64 get_restripe_target(const struct btrfs_fs_info *fs_info, u64 flags)
{
- struct btrfs_balance_control *bctl = fs_info->balance_ctl;
+ const struct btrfs_balance_control *bctl = fs_info->balance_ctl;
u64 target = 0;
if (!bctl)
@@ -1415,9 +1415,9 @@ out:
}
static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
- struct btrfs_block_group *bg)
+ const struct btrfs_block_group *bg)
{
- struct btrfs_fs_info *fs_info = bg->fs_info;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_transaction *prev_trans = NULL;
const u64 start = bg->start;
const u64 end = start + bg->length - 1;
@@ -1756,14 +1756,14 @@ static int reclaim_bgs_cmp(void *unused, const struct list_head *a,
return bg1->used > bg2->used;
}
-static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info)
+static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info)
{
if (btrfs_is_zoned(fs_info))
return btrfs_zoned_should_reclaim(fs_info);
return true;
}
-static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed)
+static bool should_reclaim_block_group(const struct btrfs_block_group *bg, u64 bytes_freed)
{
const int thresh_pct = btrfs_calc_reclaim_threshold(bg->space_info);
u64 thresh_bytes = mult_perc(bg->length, thresh_pct);
@@ -2006,8 +2006,8 @@ void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg)
spin_unlock(&fs_info->unused_bgs_lock);
}
-static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
- struct btrfs_path *path)
+static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key *key,
+ const struct btrfs_path *path)
{
struct btrfs_chunk_map *map;
struct btrfs_block_group_item bg;
@@ -2055,7 +2055,7 @@ out_free_map:
static int find_first_block_group(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
- struct btrfs_key *key)
+ const struct btrfs_key *key)
{
struct btrfs_root *root = btrfs_block_group_root(fs_info);
int ret;
@@ -2640,8 +2640,8 @@ static int insert_block_group_item(struct btrfs_trans_handle *trans,
}
static int insert_dev_extent(struct btrfs_trans_handle *trans,
- struct btrfs_device *device, u64 chunk_offset,
- u64 start, u64 num_bytes)
+ const struct btrfs_device *device, u64 chunk_offset,
+ u64 start, u64 num_bytes)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
@@ -2817,7 +2817,7 @@ next:
* For extent tree v2 we use the block_group_item->chunk_offset to point at our
* global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID.
*/
-static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset)
+static u64 calculate_global_root_id(const struct btrfs_fs_info *fs_info, u64 offset)
{
u64 div = SZ_1G;
u64 index;
@@ -3842,8 +3842,8 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
}
}
-static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *sinfo, int force)
+static int should_alloc_chunk(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo, int force)
{
u64 bytes_used = btrfs_space_info_used(sinfo, false);
u64 thresh;
@@ -4218,7 +4218,7 @@ out:
return ret;
}
-static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
+static u64 get_profile_num_devs(const struct btrfs_fs_info *fs_info, u64 type)
{
u64 num_dev;
@@ -4622,7 +4622,7 @@ int btrfs_use_block_group_size_class(struct btrfs_block_group *bg,
return 0;
}
-bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg)
+bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg)
{
if (btrfs_is_zoned(bg->fs_info))
return false;
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 915111338fc0..36937eeab9b8 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -266,7 +266,7 @@ struct btrfs_block_group {
u64 reclaim_mark;
};
-static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
+static inline u64 btrfs_block_group_end(const struct btrfs_block_group *block_group)
{
return (block_group->start + block_group->length);
}
@@ -278,8 +278,7 @@ static inline bool btrfs_is_block_group_used(const struct btrfs_block_group *bg)
return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0);
}
-static inline bool btrfs_is_block_group_data_only(
- struct btrfs_block_group *block_group)
+static inline bool btrfs_is_block_group_data_only(const struct btrfs_block_group *block_group)
{
/*
* In mixed mode the fragmentation is expected to be high, lowering the
@@ -290,7 +289,7 @@ static inline bool btrfs_is_block_group_data_only(
}
#ifdef CONFIG_BTRFS_DEBUG
-int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group);
+int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group);
#endif
struct btrfs_block_group *btrfs_lookup_first_block_group(
@@ -370,7 +369,7 @@ static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
}
-static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
+static inline int btrfs_block_group_done(const struct btrfs_block_group *cache)
{
smp_mb();
return cache->cached == BTRFS_CACHE_FINISHED ||
@@ -387,6 +386,6 @@ enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size);
int btrfs_use_block_group_size_class(struct btrfs_block_group *bg,
enum btrfs_block_group_size_class size_class,
bool force_wrong_size_class);
-bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg);
+bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg);
#endif /* BTRFS_BLOCK_GROUP_H */
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index b299b82d676e..a07b9594dc70 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -553,7 +553,7 @@ try_reserve:
return ERR_PTR(ret);
}
-int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
+int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv)
{
u64 needed_bytes;
diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h
index 1f53b967d069..d12b1fac5c74 100644
--- a/fs/btrfs/block-rsv.h
+++ b/fs/btrfs/block-rsv.h
@@ -89,7 +89,7 @@ void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info);
struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u32 blocksize);
-int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
+int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 3056c8aed8ef..e152fde888fc 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -152,6 +152,7 @@ struct btrfs_inode {
* logged_trans), to access/update delalloc_bytes, new_delalloc_bytes,
* defrag_bytes, disk_i_size, outstanding_extents, csum_bytes and to
* update the VFS' inode number of bytes used.
+ * Also protects setting struct file::private_data.
*/
spinlock_t lock;
@@ -350,10 +351,12 @@ static inline void btrfs_set_first_dir_index_to_log(struct btrfs_inode *inode,
WRITE_ONCE(inode->first_dir_index_to_log, index);
}
-static inline struct btrfs_inode *BTRFS_I(const struct inode *inode)
-{
- return container_of(inode, struct btrfs_inode, vfs_inode);
-}
+/* Type checked and const-preserving VFS inode -> btrfs inode. */
+#define BTRFS_I(_inode) \
+ _Generic(_inode, \
+ struct inode *: container_of(_inode, struct btrfs_inode, vfs_inode), \
+ const struct inode *: (const struct btrfs_inode *)container_of( \
+ _inode, const struct btrfs_inode, vfs_inode))
static inline unsigned long btrfs_inode_hash(u64 objectid,
const struct btrfs_root *root)
@@ -505,6 +508,14 @@ static inline bool btrfs_inode_can_compress(const struct btrfs_inode *inode)
return true;
}
+static inline void btrfs_assert_inode_locked(struct btrfs_inode *inode)
+{
+ /* Immediately trigger a crash if the inode is not locked. */
+ ASSERT(inode_is_locked(&inode->vfs_inode));
+ /* Trigger a splat in dmesg if this task is not holding the lock. */
+ lockdep_assert_held(&inode->vfs_inode.i_rwsem);
+}
+
/* Array of bytes with variable length, hexadecimal format 0x1234 */
#define CSUM_FMT "0x%*phN"
#define CSUM_FMT_VALUE(size, bytes) size, bytes
@@ -578,7 +589,7 @@ struct inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
struct btrfs_path *path);
struct inode *btrfs_iget(u64 ino, struct btrfs_root *root);
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
- struct page *page, u64 start, u64 len);
+ struct folio *folio, u64 start, u64 len);
int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode);
int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
@@ -596,9 +607,9 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
struct btrfs_trans_handle *trans, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint);
-int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
+int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio,
u64 start, u64 end, struct writeback_control *wbc);
-int btrfs_writepage_cow_fixup(struct page *page);
+int btrfs_writepage_cow_fixup(struct folio *folio);
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
int compress_type);
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index a8e2c461aff7..90aef2627ca2 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -138,15 +138,15 @@ static int compression_decompress_bio(struct list_head *ws,
}
static int compression_decompress(int type, struct list_head *ws,
- const u8 *data_in, struct page *dest_page,
+ const u8 *data_in, struct folio *dest_folio,
unsigned long dest_pgoff, size_t srclen, size_t destlen)
{
switch (type) {
- case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
+ case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_folio,
dest_pgoff, srclen, destlen);
- case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
+ case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_folio,
dest_pgoff, srclen, destlen);
- case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
+ case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_folio,
dest_pgoff, srclen, destlen);
case BTRFS_COMPRESS_NONE:
default:
@@ -395,7 +395,7 @@ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
cb->bbio.ordered = ordered;
btrfs_add_compressed_bio_folios(cb);
- btrfs_submit_bio(&cb->bbio, 0);
+ btrfs_submit_bbio(&cb->bbio, 0);
}
/*
@@ -420,7 +420,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
u64 isize = i_size_read(inode);
int ret;
- struct page *page;
+ struct folio *folio;
struct extent_map *em;
struct address_space *mapping = inode->i_mapping;
struct extent_map_tree *em_tree;
@@ -453,9 +453,13 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (pg_index > end_index)
break;
- page = xa_load(&mapping->i_pages, pg_index);
- if (page && !xa_is_value(page)) {
- sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >>
+ folio = __filemap_get_folio(mapping, pg_index, 0, 0);
+ if (!IS_ERR(folio)) {
+ u64 folio_sz = folio_size(folio);
+ u64 offset = offset_in_folio(folio, cur);
+
+ folio_put(folio);
+ sectors_missed += (folio_sz - offset) >>
fs_info->sectorsize_bits;
/* Beyond threshold, no need to continue */
@@ -466,35 +470,35 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* Jump to next page start as we already have page for
* current offset.
*/
- cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
+ cur += (folio_sz - offset);
continue;
}
- page = __page_cache_alloc(mapping_gfp_constraint(mapping,
- ~__GFP_FS));
- if (!page)
+ folio = filemap_alloc_folio(mapping_gfp_constraint(mapping,
+ ~__GFP_FS), 0);
+ if (!folio)
break;
- if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
- put_page(page);
+ if (filemap_add_folio(mapping, folio, pg_index, GFP_NOFS)) {
/* There is already a page, skip to page end */
- cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
+ cur += folio_size(folio);
+ folio_put(folio);
continue;
}
- if (!*memstall && PageWorkingset(page)) {
+ if (!*memstall && folio_test_workingset(folio)) {
psi_memstall_enter(pflags);
*memstall = 1;
}
- ret = set_page_extent_mapped(page);
+ ret = set_folio_extent_mapped(folio);
if (ret < 0) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
break;
}
- page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
+ page_end = (pg_index << PAGE_SHIFT) + folio_size(folio) - 1;
lock_extent(tree, cur, page_end, NULL);
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
@@ -511,28 +515,28 @@ static noinline int add_ra_bio_pages(struct inode *inode,
orig_bio->bi_iter.bi_sector) {
free_extent_map(em);
unlock_extent(tree, cur, page_end, NULL);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
break;
}
add_size = min(em->start + em->len, page_end + 1) - cur;
free_extent_map(em);
+ unlock_extent(tree, cur, page_end, NULL);
- if (page->index == end_index) {
- size_t zero_offset = offset_in_page(isize);
+ if (folio->index == end_index) {
+ size_t zero_offset = offset_in_folio(folio, isize);
if (zero_offset) {
int zeros;
- zeros = PAGE_SIZE - zero_offset;
- memzero_page(page, zero_offset, zeros);
+ zeros = folio_size(folio) - zero_offset;
+ folio_zero_range(folio, zero_offset, zeros);
}
}
- ret = bio_add_page(orig_bio, page, add_size, offset_in_page(cur));
- if (ret != add_size) {
- unlock_extent(tree, cur, page_end, NULL);
- unlock_page(page);
- put_page(page);
+ if (!bio_add_folio(orig_bio, folio, add_size,
+ offset_in_folio(folio, cur))) {
+ folio_unlock(folio);
+ folio_put(folio);
break;
}
/*
@@ -541,9 +545,9 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* subpage::readers and to unlock the page.
*/
if (fs_info->sectorsize < PAGE_SIZE)
- btrfs_subpage_start_reader(fs_info, page_folio(page),
- cur, add_size);
- put_page(page);
+ btrfs_subpage_start_reader(fs_info, folio, cur,
+ add_size);
+ folio_put(folio);
cur += add_size;
}
return 0;
@@ -626,7 +630,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
if (memstall)
psi_memstall_leave(&pflags);
- btrfs_submit_bio(&cb->bbio, 0);
+ btrfs_submit_bbio(&cb->bbio, 0);
return;
out_free_compressed_pages:
@@ -1057,10 +1061,10 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
* single page, and we want to read a single page out of it.
* start_byte tells us the offset into the compressed data we're interested in
*/
-int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
+int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
unsigned long dest_pgoff, size_t srclen, size_t destlen)
{
- struct btrfs_fs_info *fs_info = page_to_fs_info(dest_page);
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(dest_folio);
struct list_head *workspace;
const u32 sectorsize = fs_info->sectorsize;
int ret;
@@ -1073,7 +1077,7 @@ int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize);
workspace = get_workspace(type, 0);
- ret = compression_decompress(type, workspace, data_in, dest_page,
+ ret = compression_decompress(type, workspace, data_in, dest_folio,
dest_pgoff, srclen, destlen);
put_workspace(type, workspace);
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index cfdc64319186..b6563b6a333e 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -82,13 +82,21 @@ static inline unsigned int btrfs_compress_level(unsigned int type_level)
return ((type_level & 0xF0) >> 4);
}
+/* @range_end must be exclusive. */
+static inline u32 btrfs_calc_input_length(u64 range_end, u64 cur)
+{
+ u64 page_end = round_down(cur, PAGE_SIZE) + PAGE_SIZE;
+
+ return min(range_end, page_end) - cur;
+}
+
int __init btrfs_init_compress(void);
void __cold btrfs_exit_compress(void);
int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
-int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
+int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
unsigned long start_byte, size_t srclen, size_t destlen);
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
struct compressed_bio *cb, u32 decompressed);
@@ -154,7 +162,7 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
unsigned long *total_in, unsigned long *total_out);
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zlib_decompress(struct list_head *ws, const u8 *data_in,
- struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
+ struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
struct list_head *zlib_alloc_workspace(unsigned int level);
void zlib_free_workspace(struct list_head *ws);
@@ -165,7 +173,7 @@ int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
unsigned long *total_in, unsigned long *total_out);
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, const u8 *data_in,
- struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
+ struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
struct list_head *lzo_alloc_workspace(unsigned int level);
void lzo_free_workspace(struct list_head *ws);
@@ -175,7 +183,7 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
unsigned long *total_in, unsigned long *total_out);
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zstd_decompress(struct list_head *ws, const u8 *data_in,
- struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
+ struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
void zstd_init_workspace_manager(void);
void zstd_cleanup_workspace_manager(void);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 451203055bbf..0cc919d15b14 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2564,8 +2564,8 @@ int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
*
*/
static void fixup_low_keys(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- struct btrfs_disk_key *key, int level)
+ const struct btrfs_path *path,
+ const struct btrfs_disk_key *key, int level)
{
int i;
struct extent_buffer *t;
@@ -2594,7 +2594,7 @@ static void fixup_low_keys(struct btrfs_trans_handle *trans,
* that the new key won't break the order
*/
void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
+ const struct btrfs_path *path,
const struct btrfs_key *new_key)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -2660,8 +2660,8 @@ void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
* is correct, we only need to bother the last key of @left and the first
* key of @right.
*/
-static bool check_sibling_keys(struct extent_buffer *left,
- struct extent_buffer *right)
+static bool check_sibling_keys(const struct extent_buffer *left,
+ const struct extent_buffer *right)
{
struct btrfs_key left_last;
struct btrfs_key right_first;
@@ -2928,8 +2928,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
* blocknr is the block the key points to.
*/
static int insert_ptr(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- struct btrfs_disk_key *key, u64 bytenr,
+ const struct btrfs_path *path,
+ const struct btrfs_disk_key *key, u64 bytenr,
int slot, int level)
{
struct extent_buffer *lower;
@@ -4019,7 +4019,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
* the front.
*/
void btrfs_truncate_item(struct btrfs_trans_handle *trans,
- struct btrfs_path *path, u32 new_size, int from_end)
+ const struct btrfs_path *path, u32 new_size, int from_end)
{
int slot;
struct extent_buffer *leaf;
@@ -4111,7 +4111,7 @@ void btrfs_truncate_item(struct btrfs_trans_handle *trans,
* make the item pointed to by the path bigger, data_size is the added size.
*/
void btrfs_extend_item(struct btrfs_trans_handle *trans,
- struct btrfs_path *path, u32 data_size)
+ const struct btrfs_path *path, u32 data_size)
{
int slot;
struct extent_buffer *leaf;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index c8568b1a61c4..317a3712270f 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -6,6 +6,7 @@
#ifndef BTRFS_CTREE_H
#define BTRFS_CTREE_H
+#include "linux/cleanup.h"
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
@@ -84,6 +85,9 @@ struct btrfs_path {
unsigned int nowait:1;
};
+#define BTRFS_PATH_AUTO_FREE(path_name) \
+ struct btrfs_path *path_name __free(btrfs_free_path) = NULL
+
/*
* The state of btrfs root
*/
@@ -459,6 +463,8 @@ struct btrfs_file_private {
void *filldir_buf;
u64 last_index;
struct extent_state *llseek_cached_state;
+ /* Task that allocated this structure. */
+ struct task_struct *owner_task;
};
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
@@ -538,7 +544,7 @@ int btrfs_previous_item(struct btrfs_root *root,
int btrfs_previous_extent_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid);
void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
+ const struct btrfs_path *path,
const struct btrfs_key *new_key);
struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
@@ -572,9 +578,9 @@ bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
void btrfs_extend_item(struct btrfs_trans_handle *trans,
- struct btrfs_path *path, u32 data_size);
+ const struct btrfs_path *path, u32 data_size);
void btrfs_truncate_item(struct btrfs_trans_handle *trans,
- struct btrfs_path *path, u32 new_size, int from_end);
+ const struct btrfs_path *path, u32 new_size, int from_end);
int btrfs_split_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
@@ -598,6 +604,7 @@ int btrfs_search_slot_for_read(struct btrfs_root *root,
void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
+DEFINE_FREE(btrfs_free_path, struct btrfs_path *, btrfs_free_path(_T))
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int slot, int nr);
diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index f6dbda37a361..b95ef44c326b 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -45,8 +45,8 @@ struct inode_defrag {
u32 extent_thresh;
};
-static int __compare_inode_defrag(struct inode_defrag *defrag1,
- struct inode_defrag *defrag2)
+static int compare_inode_defrag(const struct inode_defrag *defrag1,
+ const struct inode_defrag *defrag2)
{
if (defrag1->root > defrag2->root)
return 1;
@@ -61,16 +61,14 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1,
}
/*
- * Pop a record for an inode into the defrag tree. The lock must be held
+ * Insert a record for an inode into the defrag tree. The lock must be held
* already.
*
* If you're inserting a record for an older transid than an existing record,
* the transid already in the tree is lowered.
- *
- * If an existing record is found the defrag item you pass in is freed.
*/
-static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
- struct inode_defrag *defrag)
+static int btrfs_insert_inode_defrag(struct btrfs_inode *inode,
+ struct inode_defrag *defrag)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct inode_defrag *entry;
@@ -83,7 +81,7 @@ static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
parent = *p;
entry = rb_entry(parent, struct inode_defrag, rb_node);
- ret = __compare_inode_defrag(defrag, entry);
+ ret = compare_inode_defrag(defrag, entry);
if (ret < 0)
p = &parent->rb_left;
else if (ret > 0)
@@ -107,7 +105,7 @@ static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
return 0;
}
-static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
+static inline int need_auto_defrag(struct btrfs_fs_info *fs_info)
{
if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
return 0;
@@ -119,34 +117,28 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
}
/*
- * Insert a defrag record for this inode if auto defrag is enabled.
+ * Insert a defrag record for this inode if auto defrag is enabled. No errors
+ * returned as they're not considered fatal.
*/
-int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode, u32 extent_thresh)
+void btrfs_add_inode_defrag(struct btrfs_inode *inode, u32 extent_thresh)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct inode_defrag *defrag;
- u64 transid;
int ret;
- if (!__need_auto_defrag(fs_info))
- return 0;
+ if (!need_auto_defrag(fs_info))
+ return;
if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
- return 0;
-
- if (trans)
- transid = trans->transid;
- else
- transid = btrfs_get_root_last_trans(root);
+ return;
defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
if (!defrag)
- return -ENOMEM;
+ return;
defrag->ino = btrfs_ino(inode);
- defrag->transid = transid;
+ defrag->transid = btrfs_get_root_last_trans(root);
defrag->root = btrfs_root_id(root);
defrag->extent_thresh = extent_thresh;
@@ -157,14 +149,13 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
* and then re-read this inode, this new inode doesn't have
* IN_DEFRAG flag. At the case, we may find the existed defrag.
*/
- ret = __btrfs_add_inode_defrag(inode, defrag);
+ ret = btrfs_insert_inode_defrag(inode, defrag);
if (ret)
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
} else {
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
}
spin_unlock(&fs_info->defrag_inodes_lock);
- return 0;
}
/*
@@ -189,7 +180,7 @@ static struct inode_defrag *btrfs_pick_defrag_inode(
parent = p;
entry = rb_entry(parent, struct inode_defrag, rb_node);
- ret = __compare_inode_defrag(&tmp, entry);
+ ret = compare_inode_defrag(&tmp, entry);
if (ret < 0)
p = parent->rb_left;
else if (ret > 0)
@@ -198,7 +189,7 @@ static struct inode_defrag *btrfs_pick_defrag_inode(
goto out;
}
- if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
+ if (parent && compare_inode_defrag(&tmp, entry) > 0) {
parent = rb_next(parent);
if (parent)
entry = rb_entry(parent, struct inode_defrag, rb_node);
@@ -214,27 +205,24 @@ out:
void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
{
- struct inode_defrag *defrag;
- struct rb_node *node;
+ struct inode_defrag *defrag, *next;
spin_lock(&fs_info->defrag_inodes_lock);
- node = rb_first(&fs_info->defrag_inodes);
- while (node) {
- rb_erase(node, &fs_info->defrag_inodes);
- defrag = rb_entry(node, struct inode_defrag, rb_node);
+
+ rbtree_postorder_for_each_entry_safe(defrag, next,
+ &fs_info->defrag_inodes, rb_node)
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- cond_resched_lock(&fs_info->defrag_inodes_lock);
+ fs_info->defrag_inodes = RB_ROOT;
- node = rb_first(&fs_info->defrag_inodes);
- }
spin_unlock(&fs_info->defrag_inodes_lock);
}
#define BTRFS_DEFRAG_BATCH 1024
-static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
- struct inode_defrag *defrag)
+static int btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
+ struct inode_defrag *defrag,
+ struct file_ra_state *ra)
{
struct btrfs_root *inode_root;
struct inode *inode;
@@ -245,7 +233,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
again:
if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
goto cleanup;
- if (!__need_auto_defrag(fs_info))
+ if (!need_auto_defrag(fs_info))
goto cleanup;
/* Get the inode */
@@ -273,9 +261,10 @@ again:
range.len = (u64)-1;
range.start = cur;
range.extent_thresh = defrag->extent_thresh;
+ file_ra_state_init(ra, inode->i_mapping);
sb_start_write(fs_info->sb);
- ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
+ ret = btrfs_defrag_file(inode, ra, &range, defrag->transid,
BTRFS_DEFRAG_BATCH);
sb_end_write(fs_info->sb);
iput(inode);
@@ -302,11 +291,13 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
atomic_inc(&fs_info->defrag_running);
while (1) {
+ struct file_ra_state ra = { 0 };
+
/* Pause the auto defragger. */
if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
break;
- if (!__need_auto_defrag(fs_info))
+ if (!need_auto_defrag(fs_info))
break;
/* find an inode to defrag */
@@ -324,7 +315,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
first_ino = defrag->ino + 1;
root_objectid = defrag->root;
- __btrfs_run_defrag_inode(fs_info, defrag);
+ btrfs_run_defrag_inode(fs_info, defrag, &ra);
}
atomic_dec(&fs_info->defrag_running);
@@ -1317,8 +1308,7 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
if (entry->start + range_len <= *last_scanned_ret)
continue;
- if (ra)
- page_cache_sync_readahead(inode->vfs_inode.i_mapping,
+ page_cache_sync_readahead(inode->vfs_inode.i_mapping,
ra, NULL, entry->start >> PAGE_SHIFT,
((entry->start + range_len - 1) >> PAGE_SHIFT) -
(entry->start >> PAGE_SHIFT) + 1);
@@ -1350,7 +1340,7 @@ out:
* Entry point to file defragmentation.
*
* @inode: inode to be defragged
- * @ra: readahead state (can be NUL)
+ * @ra: readahead state
* @range: defrag options including range and flags
* @newer_than: minimum transid to defrag
* @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
@@ -1372,12 +1362,13 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
u64 cur;
u64 last_byte;
bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS);
- bool ra_allocated = false;
int compress_type = BTRFS_COMPRESS_ZLIB;
int ret = 0;
u32 extent_thresh = range->extent_thresh;
pgoff_t start_index;
+ ASSERT(ra);
+
if (isize == 0)
return 0;
@@ -1407,18 +1398,6 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
/*
- * If we were not given a ra, allocate a readahead context. As
- * readahead is just an optimization, defrag will work without it so
- * we don't error out.
- */
- if (!ra) {
- ra_allocated = true;
- ra = kzalloc(sizeof(*ra), GFP_KERNEL);
- if (ra)
- file_ra_state_init(ra, inode->i_mapping);
- }
-
- /*
* Make writeback start from the beginning of the range, so that the
* defrag range can be written sequentially.
*/
@@ -1472,8 +1451,6 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
cond_resched();
}
- if (ra_allocated)
- kfree(ra);
/*
* Update range.start for autodefrag, this will indicate where to start
* in next run.
diff --git a/fs/btrfs/defrag.h b/fs/btrfs/defrag.h
index 878528e086fb..6b7596c4f0dc 100644
--- a/fs/btrfs/defrag.h
+++ b/fs/btrfs/defrag.h
@@ -18,8 +18,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
u64 newer_than, unsigned long max_to_defrag);
int __init btrfs_auto_defrag_init(void);
void __cold btrfs_auto_defrag_exit(void);
-int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode, u32 extent_thresh);
+void btrfs_add_inode_defrag(struct btrfs_inode *inode, u32 extent_thresh);
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
int btrfs_defrag_root(struct btrfs_root *root);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 06a9e0542d70..ad9ef8312e41 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -855,11 +855,17 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
/* Record qgroup extent info if provided */
if (qrecord) {
- if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
- delayed_refs, qrecord))
+ int ret;
+
+ ret = btrfs_qgroup_trace_extent_nolock(trans->fs_info,
+ delayed_refs, qrecord);
+ if (ret) {
+ /* Clean up if insertion fails or item exists. */
+ xa_release(&delayed_refs->dirty_extents, qrecord->bytenr);
kfree(qrecord);
- else
+ } else {
qrecord_inserted = true;
+ }
}
trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
@@ -1005,18 +1011,16 @@ static int add_delayed_ref(struct btrfs_trans_handle *trans,
return -ENOMEM;
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
- if (!head_ref) {
- kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
- return -ENOMEM;
- }
+ if (!head_ref)
+ goto free_node;
if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
record = kzalloc(sizeof(*record), GFP_NOFS);
- if (!record) {
- kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
- kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
- return -ENOMEM;
- }
+ if (!record)
+ goto free_head_ref;
+ if (xa_reserve(&trans->transaction->delayed_refs.dirty_extents,
+ generic_ref->bytenr, GFP_NOFS))
+ goto free_record;
}
init_delayed_ref_common(fs_info, node, generic_ref);
@@ -1052,6 +1056,14 @@ static int add_delayed_ref(struct btrfs_trans_handle *trans,
if (qrecord_inserted)
return btrfs_qgroup_trace_extent_post(trans, record);
return 0;
+
+free_record:
+ kfree(record);
+free_head_ref:
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
+free_node:
+ kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
+ return -ENOMEM;
}
/*
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 05f634eb472d..085f30968aba 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -202,8 +202,8 @@ struct btrfs_delayed_ref_root {
/* head ref rbtree */
struct rb_root_cached href_root;
- /* dirty extent records */
- struct rb_root dirty_extent_root;
+ /* Track dirty extent records. */
+ struct xarray dirty_extents;
/* this spin lock protects the rbtree and the entries inside */
spinlock_t lock;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index f638c458d285..83d5cdd77f29 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -824,22 +824,45 @@ static void btrfs_dev_replace_update_device_in_mapping_tree(
struct btrfs_device *srcdev,
struct btrfs_device *tgtdev)
{
- u64 start = 0;
- int i;
+ struct rb_node *node;
+
+ /*
+ * The chunk mutex must be held so that no new chunks can be created
+ * while we are updating existing chunks. This guarantees we don't miss
+ * any new chunk that gets created for a range that falls before the
+ * range of the last chunk we processed.
+ */
+ lockdep_assert_held(&fs_info->chunk_mutex);
write_lock(&fs_info->mapping_tree_lock);
- do {
+ node = rb_first_cached(&fs_info->mapping_tree);
+ while (node) {
+ struct rb_node *next = rb_next(node);
struct btrfs_chunk_map *map;
+ u64 next_start;
- map = btrfs_find_chunk_map_nolock(fs_info, start, U64_MAX);
- if (!map)
- break;
- for (i = 0; i < map->num_stripes; i++)
+ map = rb_entry(node, struct btrfs_chunk_map, rb_node);
+ next_start = map->start + map->chunk_len;
+
+ for (int i = 0; i < map->num_stripes; i++)
if (srcdev == map->stripes[i].dev)
map->stripes[i].dev = tgtdev;
- start = map->start + map->chunk_len;
- btrfs_free_chunk_map(map);
- } while (start);
+
+ if (cond_resched_rwlock_write(&fs_info->mapping_tree_lock)) {
+ map = btrfs_find_chunk_map_nolock(fs_info, next_start, U64_MAX);
+ if (!map)
+ break;
+ node = &map->rb_node;
+ /*
+ * Drop the lookup reference since we are holding the
+ * lock in write mode and no one can remove the chunk
+ * map from the tree and drop its tree reference.
+ */
+ btrfs_free_chunk_map(map);
+ } else {
+ node = next;
+ }
+ }
write_unlock(&fs_info->mapping_tree_lock);
}
diff --git a/fs/btrfs/direct-io.c b/fs/btrfs/direct-io.c
index 364bce34f034..bd38df5647e3 100644
--- a/fs/btrfs/direct-io.c
+++ b/fs/btrfs/direct-io.c
@@ -40,11 +40,21 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
struct btrfs_ordered_extent *ordered;
int ret = 0;
+ /* Direct lock must be taken before the extent lock. */
+ if (nowait) {
+ if (!try_lock_dio_extent(io_tree, lockstart, lockend, cached_state))
+ return -EAGAIN;
+ } else {
+ lock_dio_extent(io_tree, lockstart, lockend, cached_state);
+ }
+
while (1) {
if (nowait) {
if (!try_lock_extent(io_tree, lockstart, lockend,
- cached_state))
- return -EAGAIN;
+ cached_state)) {
+ ret = -EAGAIN;
+ break;
+ }
} else {
lock_extent(io_tree, lockstart, lockend, cached_state);
}
@@ -120,6 +130,8 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
cond_resched();
}
+ if (ret)
+ unlock_dio_extent(io_tree, lockstart, lockend, cached_state);
return ret;
}
@@ -353,7 +365,7 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
int ret = 0;
u64 len = length;
const u64 data_alloc_len = length;
- bool unlock_extents = false;
+ u32 unlock_bits = EXTENT_LOCKED;
/*
* We could potentially fault if we have a buffer > PAGE_SIZE, and if
@@ -514,7 +526,6 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
start, &len, flags);
if (ret < 0)
goto unlock_err;
- unlock_extents = true;
/* Recalc len in case the new em is smaller than requested */
len = min(len, em->len - (start - em->start));
if (dio_data->data_space_reserved) {
@@ -535,22 +546,8 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
release_offset,
release_len);
}
- } else {
- /*
- * We need to unlock only the end area that we aren't using.
- * The rest is going to be unlocked by the endio routine.
- */
- lockstart = start + len;
- if (lockstart < lockend)
- unlock_extents = true;
}
- if (unlock_extents)
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
- else
- free_extent_state(cached_state);
-
/*
* Translate extent map information to iomap.
* We trim the extents (and move the addr) even though iomap code does
@@ -569,11 +566,33 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
iomap->length = len;
free_extent_map(em);
+ /*
+ * Reads will hold the EXTENT_DIO_LOCKED bit until the io is completed,
+ * writes only hold it for this part. We hold the extent lock until
+ * we're completely done with the extent map to make sure it remains
+ * valid.
+ */
+ if (write)
+ unlock_bits |= EXTENT_DIO_LOCKED;
+
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ unlock_bits, &cached_state);
+
+ /* We didn't use everything, unlock the dio extent for the remainder. */
+ if (!write && (start + len) < lockend)
+ unlock_dio_extent(&BTRFS_I(inode)->io_tree, start + len,
+ lockend, NULL);
+
return 0;
unlock_err:
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
+ /*
+ * Don't use EXTENT_LOCK_BITS here in case we extend it later and forget
+ * to update this, be explicit that we expect EXTENT_LOCKED and
+ * EXTENT_DIO_LOCKED to be set here, and so that's what we're clearing.
+ */
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ EXTENT_LOCKED | EXTENT_DIO_LOCKED, &cached_state);
err:
if (dio_data->data_space_reserved) {
btrfs_free_reserved_data_space(BTRFS_I(inode),
@@ -596,8 +615,8 @@ static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
if (!write && (iomap->type == IOMAP_HOLE)) {
/* If reading from a hole, unlock and return */
- unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1,
- NULL);
+ unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos,
+ pos + length - 1, NULL);
return 0;
}
@@ -608,8 +627,8 @@ static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
btrfs_finish_ordered_extent(dio_data->ordered, NULL,
pos, length, false);
else
- unlock_extent(&BTRFS_I(inode)->io_tree, pos,
- pos + length - 1, NULL);
+ unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos,
+ pos + length - 1, NULL);
ret = -ENOTBLK;
}
if (write) {
@@ -641,8 +660,8 @@ static void btrfs_dio_end_io(struct btrfs_bio *bbio)
dip->file_offset, dip->bytes,
!bio->bi_status);
} else {
- unlock_extent(&inode->io_tree, dip->file_offset,
- dip->file_offset + dip->bytes - 1, NULL);
+ unlock_dio_extent(&inode->io_tree, dip->file_offset,
+ dip->file_offset + dip->bytes - 1, NULL);
}
bbio->bio.bi_private = bbio->private;
@@ -726,7 +745,7 @@ static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
}
}
- btrfs_submit_bio(bbio, 0);
+ btrfs_submit_bbio(bbio, 0);
}
static const struct iomap_ops btrfs_dio_iomap_ops = {
diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
index 944a7340f6a4..e815d165cccc 100644
--- a/fs/btrfs/discard.c
+++ b/fs/btrfs/discard.c
@@ -68,7 +68,7 @@ static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = {
};
static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
- struct btrfs_block_group *block_group)
+ const struct btrfs_block_group *block_group)
{
return &discard_ctl->discard_list[block_group->discard_index];
}
@@ -80,7 +80,7 @@ static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
*
* Check if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set.
*/
-static bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl)
+static bool btrfs_run_discard_work(const struct btrfs_discard_ctl *discard_ctl)
{
struct btrfs_fs_info *fs_info = container_of(discard_ctl,
struct btrfs_fs_info,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a6f5441e62d1..25d768e67e37 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -525,7 +525,7 @@ static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
if (folio_test_writeback(folio) || folio_test_dirty(folio))
return false;
- return try_release_extent_buffer(&folio->page);
+ return try_release_extent_buffer(folio);
}
static void btree_invalidate_folio(struct folio *folio, size_t offset,
@@ -1285,7 +1285,6 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
btrfs_extent_buffer_leak_debug_check(fs_info);
kfree(fs_info->super_copy);
kfree(fs_info->super_for_commit);
- kfree(fs_info->subpage_info);
kvfree(fs_info);
}
@@ -3322,6 +3321,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
fs_info->nodesize = nodesize;
fs_info->sectorsize = sectorsize;
fs_info->sectorsize_bits = ilog2(sectorsize);
+ fs_info->sectors_per_page = (PAGE_SIZE >> fs_info->sectorsize_bits);
fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
fs_info->stripesize = stripesize;
@@ -3346,20 +3346,10 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
*/
fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize);
- if (sectorsize < PAGE_SIZE) {
- struct btrfs_subpage_info *subpage_info;
-
+ if (sectorsize < PAGE_SIZE)
btrfs_warn(fs_info,
"read-write for sector size %u with page size %lu is experimental",
sectorsize, PAGE_SIZE);
- subpage_info = kzalloc(sizeof(*subpage_info), GFP_KERNEL);
- if (!subpage_info) {
- ret = -ENOMEM;
- goto fail_alloc;
- }
- btrfs_init_subpage_info(subpage_info, sectorsize);
- fs_info->subpage_info = subpage_info;
- }
ret = btrfs_init_workqueues(fs_info);
if (ret)
diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
index c54c5d7a5cd5..6d08c100b01d 100644
--- a/fs/btrfs/extent-io-tree.c
+++ b/fs/btrfs/extent-io-tree.c
@@ -126,7 +126,7 @@ void extent_io_tree_init(struct btrfs_fs_info *fs_info,
* Empty an io tree, removing and freeing every extent state record from the
* tree. This should be called once we are sure no other task can access the
* tree anymore, so no tree updates happen after we empty the tree and there
- * aren't any waiters on any extent state record (EXTENT_LOCKED bit is never
+ * aren't any waiters on any extent state record (EXTENT_LOCK_BITS are never
* set on any extent state when calling this function).
*/
void extent_io_tree_release(struct extent_io_tree *tree)
@@ -141,7 +141,7 @@ void extent_io_tree_release(struct extent_io_tree *tree)
rbtree_postorder_for_each_entry_safe(state, tmp, &root, rb_node) {
/* Clear node to keep free_extent_state() happy. */
RB_CLEAR_NODE(&state->rb_node);
- ASSERT(!(state->state & EXTENT_LOCKED));
+ ASSERT(!(state->state & EXTENT_LOCK_BITS));
/*
* No need for a memory barrier here, as we are holding the tree
* lock and we only change the waitqueue while holding that lock
@@ -399,7 +399,7 @@ static void merge_next_state(struct extent_io_tree *tree, struct extent_state *s
*/
static void merge_state(struct extent_io_tree *tree, struct extent_state *state)
{
- if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
+ if (state->state & (EXTENT_LOCK_BITS | EXTENT_BOUNDARY))
return;
merge_prev_state(tree, state);
@@ -445,7 +445,7 @@ static struct extent_state *insert_state(struct extent_io_tree *tree,
struct rb_node *parent = NULL;
const u64 start = state->start - 1;
const u64 end = state->end + 1;
- const bool try_merge = !(bits & (EXTENT_LOCKED | EXTENT_BOUNDARY));
+ const bool try_merge = !(bits & (EXTENT_LOCK_BITS | EXTENT_BOUNDARY));
set_state_bits(tree, state, bits, changeset);
@@ -616,9 +616,6 @@ static void set_gfp_mask_from_bits(u32 *bits, gfp_t *mask)
* inserting elements in the tree, so the gfp mask is used to indicate which
* allocations or sleeping are allowed.
*
- * Pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove the given
- * range from the tree regardless of state (ie for truncate).
- *
* The range [start, end] is inclusive.
*
* This takes the tree lock, and returns 0 on success and < 0 on error.
@@ -647,8 +644,8 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
if (bits & EXTENT_DELALLOC)
bits |= EXTENT_NORESERVE;
- wake = (bits & EXTENT_LOCKED) ? 1 : 0;
- if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
+ wake = ((bits & EXTENT_LOCK_BITS) ? 1 : 0);
+ if (bits & (EXTENT_LOCK_BITS | EXTENT_BOUNDARY))
clear = 1;
again:
if (!prealloc) {
@@ -861,8 +858,7 @@ static void cache_state_if_flags(struct extent_state *state,
static void cache_state(struct extent_state *state,
struct extent_state **cached_ptr)
{
- return cache_state_if_flags(state, cached_ptr,
- EXTENT_LOCKED | EXTENT_BOUNDARY);
+ return cache_state_if_flags(state, cached_ptr, EXTENT_LOCK_BITS | EXTENT_BOUNDARY);
}
/*
@@ -1063,7 +1059,7 @@ static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
int ret = 0;
u64 last_start;
u64 last_end;
- u32 exclusive_bits = (bits & EXTENT_LOCKED);
+ u32 exclusive_bits = (bits & EXTENT_LOCK_BITS);
gfp_t mask;
set_gfp_mask_from_bits(&bits, &mask);
@@ -1812,12 +1808,11 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_changeset *changeset)
{
/*
- * We don't support EXTENT_LOCKED yet, as current changeset will
- * record any bits changed, so for EXTENT_LOCKED case, it will
- * either fail with -EEXIST or changeset will record the whole
- * range.
+ * We don't support EXTENT_LOCK_BITS yet, as current changeset will
+ * record any bits changed, so for EXTENT_LOCK_BITS case, it will either
+ * fail with -EEXIST or changeset will record the whole range.
*/
- ASSERT(!(bits & EXTENT_LOCKED));
+ ASSERT(!(bits & EXTENT_LOCK_BITS));
return __set_extent_bit(tree, start, end, bits, NULL, NULL, NULL, changeset);
}
@@ -1826,26 +1821,25 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_changeset *changeset)
{
/*
- * Don't support EXTENT_LOCKED case, same reason as
+ * Don't support EXTENT_LOCK_BITS case, same reason as
* set_record_extent_bits().
*/
- ASSERT(!(bits & EXTENT_LOCKED));
+ ASSERT(!(bits & EXTENT_LOCK_BITS));
return __clear_extent_bit(tree, start, end, bits, NULL, changeset);
}
-int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached)
+bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
+ struct extent_state **cached)
{
int err;
u64 failed_start;
- err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start,
+ err = __set_extent_bit(tree, start, end, bits, &failed_start,
NULL, cached, NULL);
if (err == -EEXIST) {
if (failed_start > start)
- clear_extent_bit(tree, start, failed_start - 1,
- EXTENT_LOCKED, cached);
+ clear_extent_bit(tree, start, failed_start - 1, bits, cached);
return 0;
}
return 1;
@@ -1855,23 +1849,22 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
* Either insert or lock state struct between start and end use mask to tell
* us if waiting is desired.
*/
-int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached_state)
+int __lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
+ struct extent_state **cached_state)
{
struct extent_state *failed_state = NULL;
int err;
u64 failed_start;
- err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start,
+ err = __set_extent_bit(tree, start, end, bits, &failed_start,
&failed_state, cached_state, NULL);
while (err == -EEXIST) {
if (failed_start != start)
clear_extent_bit(tree, start, failed_start - 1,
- EXTENT_LOCKED, cached_state);
+ bits, cached_state);
- wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED,
- &failed_state);
- err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
+ wait_extent_bit(tree, failed_start, end, bits, &failed_state);
+ err = __set_extent_bit(tree, start, end, bits,
&failed_start, &failed_state,
cached_state, NULL);
}
diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
index 9d3a52d8f59a..6ffef1cd37c1 100644
--- a/fs/btrfs/extent-io-tree.h
+++ b/fs/btrfs/extent-io-tree.h
@@ -19,6 +19,7 @@ enum {
ENUM_BIT(EXTENT_DIRTY),
ENUM_BIT(EXTENT_UPTODATE),
ENUM_BIT(EXTENT_LOCKED),
+ ENUM_BIT(EXTENT_DIO_LOCKED),
ENUM_BIT(EXTENT_NEW),
ENUM_BIT(EXTENT_DELALLOC),
ENUM_BIT(EXTENT_DEFRAG),
@@ -67,6 +68,8 @@ enum {
EXTENT_ADD_INODE_BYTES | \
EXTENT_CLEAR_ALL_BITS)
+#define EXTENT_LOCK_BITS (EXTENT_LOCKED | EXTENT_DIO_LOCKED)
+
/*
* Redefined bits above which are used only in the device allocation tree,
* shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
@@ -134,12 +137,22 @@ const struct btrfs_fs_info *extent_io_tree_to_fs_info(const struct extent_io_tre
void extent_io_tree_init(struct btrfs_fs_info *fs_info,
struct extent_io_tree *tree, unsigned int owner);
void extent_io_tree_release(struct extent_io_tree *tree);
+int __lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
+ struct extent_state **cached);
+bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
+ struct extent_state **cached);
-int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached);
+static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+ struct extent_state **cached)
+{
+ return __lock_extent(tree, start, end, EXTENT_LOCKED, cached);
+}
-int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached);
+static inline bool try_lock_extent(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
+{
+ return __try_lock_extent(tree, start, end, EXTENT_LOCKED, cached);
+}
int __init extent_state_init_cachep(void);
void __cold extent_state_free_cachep(void);
@@ -212,5 +225,22 @@ int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
u64 *end, u64 max_bytes,
struct extent_state **cached_state);
+static inline int lock_dio_extent(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
+{
+ return __lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached);
+}
+
+static inline bool try_lock_dio_extent(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
+{
+ return __try_lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached);
+}
+
+static inline int unlock_dio_extent(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
+{
+ return __clear_extent_bit(tree, start, end, EXTENT_DIO_LOCKED, cached, NULL);
+}
#endif /* BTRFS_EXTENT_IO_TREE_H */
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index feec49e6f9c8..a5966324607d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6551,13 +6551,13 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
continue;
ret = btrfs_trim_free_extents(device, &group_trimmed);
+
+ trimmed += group_trimmed;
if (ret) {
dev_failed++;
dev_ret = ret;
break;
}
-
- trimmed += group_trimmed;
}
mutex_unlock(&fs_devices->device_list_mutex);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c73cd4f89015..39c9677c47d5 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -101,6 +101,13 @@ struct btrfs_bio_ctrl {
blk_opf_t opf;
btrfs_bio_end_io_t end_io_func;
struct writeback_control *wbc;
+
+ /*
+ * The sectors of the page which are going to be submitted by
+ * extent_writepage_io().
+ * This is to avoid touching ranges covered by compression/inline.
+ */
+ unsigned long submit_bitmap;
};
static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
@@ -117,7 +124,7 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
btrfs_submit_compressed_read(bbio);
else
- btrfs_submit_bio(bbio, 0);
+ btrfs_submit_bbio(bbio, 0);
/* The bbio is owned by the end_io handler now */
bio_ctrl->bbio = NULL;
@@ -164,11 +171,10 @@ void __cold extent_buffer_free_cachep(void)
kmem_cache_destroy(extent_buffer_cache);
}
-static void process_one_page(struct btrfs_fs_info *fs_info,
- struct page *page, const struct page *locked_page,
- unsigned long page_ops, u64 start, u64 end)
+static void process_one_folio(struct btrfs_fs_info *fs_info,
+ struct folio *folio, const struct folio *locked_folio,
+ unsigned long page_ops, u64 start, u64 end)
{
- struct folio *folio = page_folio(page);
u32 len;
ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
@@ -183,13 +189,13 @@ static void process_one_page(struct btrfs_fs_info *fs_info,
if (page_ops & PAGE_END_WRITEBACK)
btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
- if (page != locked_page && (page_ops & PAGE_UNLOCK))
+ if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
btrfs_folio_end_writer_lock(fs_info, folio, start, len);
}
-static void __process_pages_contig(struct address_space *mapping,
- const struct page *locked_page, u64 start, u64 end,
- unsigned long page_ops)
+static void __process_folios_contig(struct address_space *mapping,
+ const struct folio *locked_folio, u64 start,
+ u64 end, unsigned long page_ops)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
pgoff_t start_index = start >> PAGE_SHIFT;
@@ -207,8 +213,8 @@ static void __process_pages_contig(struct address_space *mapping,
for (i = 0; i < found_folios; i++) {
struct folio *folio = fbatch.folios[i];
- process_one_page(fs_info, &folio->page, locked_page,
- page_ops, start, end);
+ process_one_folio(fs_info, folio, locked_folio,
+ page_ops, start, end);
}
folio_batch_release(&fbatch);
cond_resched();
@@ -216,24 +222,23 @@ static void __process_pages_contig(struct address_space *mapping,
}
static noinline void __unlock_for_delalloc(const struct inode *inode,
- const struct page *locked_page,
+ const struct folio *locked_folio,
u64 start, u64 end)
{
unsigned long index = start >> PAGE_SHIFT;
unsigned long end_index = end >> PAGE_SHIFT;
- ASSERT(locked_page);
- if (index == locked_page->index && end_index == index)
+ ASSERT(locked_folio);
+ if (index == locked_folio->index && end_index == index)
return;
- __process_pages_contig(inode->i_mapping, locked_page, start, end,
- PAGE_UNLOCK);
+ __process_folios_contig(inode->i_mapping, locked_folio, start, end,
+ PAGE_UNLOCK);
}
-static noinline int lock_delalloc_pages(struct inode *inode,
- const struct page *locked_page,
- u64 start,
- u64 end)
+static noinline int lock_delalloc_folios(struct inode *inode,
+ const struct folio *locked_folio,
+ u64 start, u64 end)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct address_space *mapping = inode->i_mapping;
@@ -243,7 +248,7 @@ static noinline int lock_delalloc_pages(struct inode *inode,
u64 processed_end = start;
struct folio_batch fbatch;
- if (index == locked_page->index && index == end_index)
+ if (index == locked_folio->index && index == end_index)
return 0;
folio_batch_init(&fbatch);
@@ -257,23 +262,22 @@ static noinline int lock_delalloc_pages(struct inode *inode,
for (i = 0; i < found_folios; i++) {
struct folio *folio = fbatch.folios[i];
- struct page *page = folio_page(folio, 0);
u32 len = end + 1 - start;
- if (page == locked_page)
+ if (folio == locked_folio)
continue;
if (btrfs_folio_start_writer_lock(fs_info, folio, start,
len))
goto out;
- if (!PageDirty(page) || page->mapping != mapping) {
+ if (!folio_test_dirty(folio) || folio->mapping != mapping) {
btrfs_folio_end_writer_lock(fs_info, folio, start,
len);
goto out;
}
- processed_end = page_offset(page) + PAGE_SIZE - 1;
+ processed_end = folio_pos(folio) + folio_size(folio) - 1;
}
folio_batch_release(&fbatch);
cond_resched();
@@ -283,7 +287,8 @@ static noinline int lock_delalloc_pages(struct inode *inode,
out:
folio_batch_release(&fbatch);
if (processed_end > start)
- __unlock_for_delalloc(inode, locked_page, start, processed_end);
+ __unlock_for_delalloc(inode, locked_folio, start,
+ processed_end);
return -EAGAIN;
}
@@ -304,8 +309,8 @@ out:
*/
EXPORT_FOR_TESTS
noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
- struct page *locked_page, u64 *start,
- u64 *end)
+ struct folio *locked_folio,
+ u64 *start, u64 *end)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
@@ -323,9 +328,9 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
/* Caller should pass a valid @end to indicate the search range end */
ASSERT(orig_end > orig_start);
- /* The range should at least cover part of the page */
- ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
- orig_end <= page_offset(locked_page)));
+ /* The range should at least cover part of the folio */
+ ASSERT(!(orig_start >= folio_pos(locked_folio) + folio_size(locked_folio) ||
+ orig_end <= folio_pos(locked_folio)));
again:
/* step one, find a bunch of delalloc bytes starting at start */
delalloc_start = *start;
@@ -342,25 +347,25 @@ again:
}
/*
- * start comes from the offset of locked_page. We have to lock
- * pages in order, so we can't process delalloc bytes before
- * locked_page
+ * start comes from the offset of locked_folio. We have to lock
+ * folios in order, so we can't process delalloc bytes before
+ * locked_folio
*/
if (delalloc_start < *start)
delalloc_start = *start;
/*
- * make sure to limit the number of pages we try to lock down
+ * make sure to limit the number of folios we try to lock down
*/
if (delalloc_end + 1 - delalloc_start > max_bytes)
delalloc_end = delalloc_start + max_bytes - 1;
- /* step two, lock all the pages after the page that has start */
- ret = lock_delalloc_pages(inode, locked_page,
- delalloc_start, delalloc_end);
+ /* step two, lock all the folioss after the folios that has start */
+ ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
+ delalloc_end);
ASSERT(!ret || ret == -EAGAIN);
if (ret == -EAGAIN) {
- /* some of the pages are gone, lets avoid looping by
+ /* some of the folios are gone, lets avoid looping by
* shortening the size of the delalloc range we're searching
*/
free_extent_state(cached_state);
@@ -384,8 +389,8 @@ again:
unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
if (!ret) {
- __unlock_for_delalloc(inode, locked_page,
- delalloc_start, delalloc_end);
+ __unlock_for_delalloc(inode, locked_folio, delalloc_start,
+ delalloc_end);
cond_resched();
goto again;
}
@@ -396,40 +401,41 @@ out_failed:
}
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
- const struct page *locked_page,
+ const struct folio *locked_folio,
struct extent_state **cached,
u32 clear_bits, unsigned long page_ops)
{
clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
- __process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
- start, end, page_ops);
+ __process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start,
+ end, page_ops);
}
-static bool btrfs_verify_page(struct page *page, u64 start)
+static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
{
- if (!fsverity_active(page->mapping->host) ||
- PageUptodate(page) ||
- start >= i_size_read(page->mapping->host))
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
+
+ if (!fsverity_active(folio->mapping->host) ||
+ btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
+ start >= i_size_read(folio->mapping->host))
return true;
- return fsverity_verify_page(page);
+ return fsverity_verify_folio(folio);
}
-static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
+static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
{
- struct btrfs_fs_info *fs_info = page_to_fs_info(page);
- struct folio *folio = page_folio(page);
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
- ASSERT(page_offset(page) <= start &&
- start + len <= page_offset(page) + PAGE_SIZE);
+ ASSERT(folio_pos(folio) <= start &&
+ start + len <= folio_pos(folio) + PAGE_SIZE);
- if (uptodate && btrfs_verify_page(page, start))
+ if (uptodate && btrfs_verify_folio(folio, start, len))
btrfs_folio_set_uptodate(fs_info, folio, start, len);
else
btrfs_folio_clear_uptodate(fs_info, folio, start, len);
- if (!btrfs_is_subpage(fs_info, page->mapping))
- unlock_page(page);
+ if (!btrfs_is_subpage(fs_info, folio->mapping))
+ folio_unlock(folio);
else
btrfs_subpage_end_reader(fs_info, folio, start, len);
}
@@ -471,8 +477,8 @@ static void end_bbio_data_write(struct btrfs_bio *bbio)
"incomplete page write with offset %zu and length %zu",
fi.offset, fi.length);
- btrfs_finish_ordered_extent(bbio->ordered,
- folio_page(folio, 0), start, len, !error);
+ btrfs_finish_ordered_extent(bbio->ordered, folio, start, len,
+ !error);
if (error)
mapping_set_error(folio->mapping, error);
btrfs_folio_clear_writeback(fs_info, folio, start, len);
@@ -481,85 +487,14 @@ static void end_bbio_data_write(struct btrfs_bio *bbio)
bio_put(bio);
}
-/*
- * Record previously processed extent range
- *
- * For endio_readpage_release_extent() to handle a full extent range, reducing
- * the extent io operations.
- */
-struct processed_extent {
- struct btrfs_inode *inode;
- /* Start of the range in @inode */
- u64 start;
- /* End of the range in @inode */
- u64 end;
- bool uptodate;
-};
-
-/*
- * Try to release processed extent range
- *
- * May not release the extent range right now if the current range is
- * contiguous to processed extent.
- *
- * Will release processed extent when any of @inode, @uptodate, the range is
- * no longer contiguous to the processed range.
- *
- * Passing @inode == NULL will force processed extent to be released.
- */
-static void endio_readpage_release_extent(struct processed_extent *processed,
- struct btrfs_inode *inode, u64 start, u64 end,
- bool uptodate)
-{
- struct extent_state *cached = NULL;
- struct extent_io_tree *tree;
-
- /* The first extent, initialize @processed */
- if (!processed->inode)
- goto update;
-
- /*
- * Contiguous to processed extent, just uptodate the end.
- *
- * Several things to notice:
- *
- * - bio can be merged as long as on-disk bytenr is contiguous
- * This means we can have page belonging to other inodes, thus need to
- * check if the inode still matches.
- * - bvec can contain range beyond current page for multi-page bvec
- * Thus we need to do processed->end + 1 >= start check
- */
- if (processed->inode == inode && processed->uptodate == uptodate &&
- processed->end + 1 >= start && end >= processed->end) {
- processed->end = end;
- return;
- }
-
- tree = &processed->inode->io_tree;
- /*
- * Now we don't have range contiguous to the processed range, release
- * the processed range now.
- */
- unlock_extent(tree, processed->start, processed->end, &cached);
-
-update:
- /* Update processed to current range */
- processed->inode = inode;
- processed->start = start;
- processed->end = end;
- processed->uptodate = uptodate;
-}
-
-static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
+static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
{
- struct folio *folio = page_folio(page);
-
ASSERT(folio_test_locked(folio));
if (!btrfs_is_subpage(fs_info, folio->mapping))
return;
ASSERT(folio_test_private(folio));
- btrfs_subpage_start_reader(fs_info, folio, page_offset(page), PAGE_SIZE);
+ btrfs_subpage_start_reader(fs_info, folio, folio_pos(folio), PAGE_SIZE);
}
/*
@@ -578,7 +513,6 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
{
struct btrfs_fs_info *fs_info = bbio->fs_info;
struct bio *bio = &bbio->bio;
- struct processed_extent processed = { 0 };
struct folio_iter fi;
const u32 sectorsize = fs_info->sectorsize;
@@ -642,12 +576,8 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
}
/* Update page status and unlock. */
- end_page_read(folio_page(folio, 0), uptodate, start, len);
- endio_readpage_release_extent(&processed, BTRFS_I(inode),
- start, end, uptodate);
+ end_folio_read(folio, uptodate, start, len);
}
- /* Release the last extent */
- endio_readpage_release_extent(&processed, NULL, 0, 0, false);
bio_put(bio);
}
@@ -737,12 +667,13 @@ static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
}
static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
- struct page *page, u64 disk_bytenr,
+ struct folio *folio, u64 disk_bytenr,
unsigned int pg_offset)
{
struct bio *bio = &bio_ctrl->bbio->bio;
struct bio_vec *bvec = bio_last_bvec_all(bio);
const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
+ struct folio *bv_folio = page_folio(bvec->bv_page);
if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
/*
@@ -755,7 +686,7 @@ static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
/*
* The contig check requires the following conditions to be met:
*
- * 1) The pages are belonging to the same inode
+ * 1) The folios are belonging to the same inode
* This is implied by the call chain.
*
* 2) The range has adjacent logical bytenr
@@ -764,8 +695,8 @@ static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
* This is required for the usage of btrfs_bio->file_offset.
*/
return bio_end_sector(bio) == sector &&
- page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len ==
- page_offset(page) + pg_offset;
+ folio_pos(bv_folio) + bvec->bv_offset + bvec->bv_len ==
+ folio_pos(folio) + pg_offset;
}
static void alloc_new_bio(struct btrfs_inode *inode,
@@ -818,17 +749,17 @@ static void alloc_new_bio(struct btrfs_inode *inode,
* The mirror number for this IO should already be initizlied in
* @bio_ctrl->mirror_num.
*/
-static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
- u64 disk_bytenr, struct page *page,
+static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
+ u64 disk_bytenr, struct folio *folio,
size_t size, unsigned long pg_offset)
{
- struct btrfs_inode *inode = page_to_inode(page);
+ struct btrfs_inode *inode = folio_to_inode(folio);
ASSERT(pg_offset + size <= PAGE_SIZE);
ASSERT(bio_ctrl->end_io_func);
if (bio_ctrl->bbio &&
- !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset))
+ !btrfs_bio_is_contig(bio_ctrl, folio, disk_bytenr, pg_offset))
submit_one_bio(bio_ctrl);
do {
@@ -837,7 +768,7 @@ static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
/* Allocate new bio if needed */
if (!bio_ctrl->bbio) {
alloc_new_bio(inode, bio_ctrl, disk_bytenr,
- page_offset(page) + pg_offset);
+ folio_pos(folio) + pg_offset);
}
/* Cap to the current ordered extent boundary if there is one. */
@@ -847,21 +778,22 @@ static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
len = bio_ctrl->len_to_oe_boundary;
}
- if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) {
+ if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) {
/* bio full: move on to a new one */
submit_one_bio(bio_ctrl);
continue;
}
if (bio_ctrl->wbc)
- wbc_account_cgroup_owner(bio_ctrl->wbc, page, len);
+ wbc_account_cgroup_owner(bio_ctrl->wbc, &folio->page,
+ len);
size -= len;
pg_offset += len;
disk_bytenr += len;
/*
- * len_to_oe_boundary defaults to U32_MAX, which isn't page or
+ * len_to_oe_boundary defaults to U32_MAX, which isn't folio or
* sector aligned. alloc_new_bio() then sets it to the end of
* our ordered extent for writes into zoned devices.
*
@@ -871,15 +803,15 @@ static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
* boundary is correct.
*
* When len_to_oe_boundary is U32_MAX, the cap above would
- * result in a 4095 byte IO for the last page right before
- * we hit the bio limit of UINT_MAX. bio_add_page() has all
+ * result in a 4095 byte IO for the last folio right before
+ * we hit the bio limit of UINT_MAX. bio_add_folio() has all
* the checks required to make sure we don't overflow the bio,
* and we should just ignore len_to_oe_boundary completely
* unless we're using it to track an ordered extent.
*
* It's pretty hard to make a bio sized U32_MAX, but it can
* happen when the page cache is able to feed us contiguous
- * pages for large extents.
+ * folios for large extents.
*/
if (bio_ctrl->len_to_oe_boundary != U32_MAX)
bio_ctrl->len_to_oe_boundary -= len;
@@ -952,27 +884,28 @@ int set_folio_extent_mapped(struct folio *folio)
return 0;
}
-void clear_page_extent_mapped(struct page *page)
+void clear_folio_extent_mapped(struct folio *folio)
{
- struct folio *folio = page_folio(page);
struct btrfs_fs_info *fs_info;
- ASSERT(page->mapping);
+ ASSERT(folio->mapping);
if (!folio_test_private(folio))
return;
- fs_info = page_to_fs_info(page);
- if (btrfs_is_subpage(fs_info, page->mapping))
+ fs_info = folio_to_fs_info(folio);
+ if (btrfs_is_subpage(fs_info, folio->mapping))
return btrfs_detach_subpage(fs_info, folio);
folio_detach_private(folio);
}
-static struct extent_map *__get_extent_map(struct inode *inode, struct page *page,
- u64 start, u64 len, struct extent_map **em_cached)
+static struct extent_map *__get_extent_map(struct inode *inode,
+ struct folio *folio, u64 start,
+ u64 len, struct extent_map **em_cached)
{
struct extent_map *em;
+ struct extent_state *cached_state = NULL;
ASSERT(em_cached);
@@ -988,12 +921,15 @@ static struct extent_map *__get_extent_map(struct inode *inode, struct page *pag
*em_cached = NULL;
}
- em = btrfs_get_extent(BTRFS_I(inode), page, start, len);
+ btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), start, start + len - 1, &cached_state);
+ em = btrfs_get_extent(BTRFS_I(inode), folio, start, len);
if (!IS_ERR(em)) {
BUG_ON(*em_cached);
refcount_inc(&em->refs);
*em_cached = em;
}
+ unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len - 1, &cached_state);
+
return em;
}
/*
@@ -1003,12 +939,12 @@ static struct extent_map *__get_extent_map(struct inode *inode, struct page *pag
* XXX JDM: This needs looking at to ensure proper page locking
* return 0 on success, otherwise return error
*/
-static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
+static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- u64 start = page_offset(page);
+ u64 start = folio_pos(folio);
const u64 end = start + PAGE_SIZE - 1;
u64 cur = start;
u64 extent_offset;
@@ -1019,25 +955,23 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
size_t pg_offset = 0;
size_t iosize;
size_t blocksize = fs_info->sectorsize;
- struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
- ret = set_page_extent_mapped(page);
+ ret = set_folio_extent_mapped(folio);
if (ret < 0) {
- unlock_extent(tree, start, end, NULL);
- unlock_page(page);
+ folio_unlock(folio);
return ret;
}
- if (page->index == last_byte >> PAGE_SHIFT) {
- size_t zero_offset = offset_in_page(last_byte);
+ if (folio->index == last_byte >> folio_shift(folio)) {
+ size_t zero_offset = offset_in_folio(folio, last_byte);
if (zero_offset) {
- iosize = PAGE_SIZE - zero_offset;
- memzero_page(page, zero_offset, iosize);
+ iosize = folio_size(folio) - zero_offset;
+ folio_zero_range(folio, zero_offset, iosize);
}
}
bio_ctrl->end_io_func = end_bbio_data_read;
- begin_page_read(fs_info, page);
+ begin_folio_read(fs_info, folio);
while (cur <= end) {
enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
bool force_bio_submit = false;
@@ -1045,16 +979,15 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
if (cur >= last_byte) {
- iosize = PAGE_SIZE - pg_offset;
- memzero_page(page, pg_offset, iosize);
- unlock_extent(tree, cur, cur + iosize - 1, NULL);
- end_page_read(page, true, cur, iosize);
+ iosize = folio_size(folio) - pg_offset;
+ folio_zero_range(folio, pg_offset, iosize);
+ end_folio_read(folio, true, cur, iosize);
break;
}
- em = __get_extent_map(inode, page, cur, end - cur + 1, em_cached);
+ em = __get_extent_map(inode, folio, cur, end - cur + 1,
+ em_cached);
if (IS_ERR(em)) {
- unlock_extent(tree, cur, end, NULL);
- end_page_read(page, false, cur, end + 1 - cur);
+ end_folio_read(folio, false, cur, end + 1 - cur);
return PTR_ERR(em);
}
extent_offset = cur - em->start;
@@ -1079,8 +1012,8 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
* to the same compressed extent (possibly with a different
* offset and/or length, so it either points to the whole extent
* or only part of it), we must make sure we do not submit a
- * single bio to populate the pages for the 2 ranges because
- * this makes the compressed extent read zero out the pages
+ * single bio to populate the folios for the 2 ranges because
+ * this makes the compressed extent read zero out the folios
* belonging to the 2nd range. Imagine the following scenario:
*
* File layout
@@ -1093,13 +1026,13 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
* [extent X, compressed length = 4K uncompressed length = 16K]
*
* If the bio to read the compressed extent covers both ranges,
- * it will decompress extent X into the pages belonging to the
+ * it will decompress extent X into the folios belonging to the
* first range and then it will stop, zeroing out the remaining
- * pages that belong to the other range that points to extent X.
+ * folios that belong to the other range that points to extent X.
* So here we make sure we submit 2 bios, one for the first
* range and another one for the third range. Both will target
* the same physical extent from disk, but we can't currently
- * make the compressed bio endio callback populate the pages
+ * make the compressed bio endio callback populate the folios
* for both ranges because each compressed bio is tightly
* coupled with a single extent map, and each range can have
* an extent map with a different offset value relative to the
@@ -1120,18 +1053,16 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
/* we've found a hole, just zero and go on */
if (block_start == EXTENT_MAP_HOLE) {
- memzero_page(page, pg_offset, iosize);
+ folio_zero_range(folio, pg_offset, iosize);
- unlock_extent(tree, cur, cur + iosize - 1, NULL);
- end_page_read(page, true, cur, iosize);
+ end_folio_read(folio, true, cur, iosize);
cur = cur + iosize;
pg_offset += iosize;
continue;
}
- /* the get_extent function already copied into the page */
+ /* the get_extent function already copied into the folio */
if (block_start == EXTENT_MAP_INLINE) {
- unlock_extent(tree, cur, cur + iosize - 1, NULL);
- end_page_read(page, true, cur, iosize);
+ end_folio_read(folio, true, cur, iosize);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -1144,8 +1075,8 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
if (force_bio_submit)
submit_one_bio(bio_ctrl);
- submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
- pg_offset);
+ submit_extent_folio(bio_ctrl, disk_bytenr, folio, iosize,
+ pg_offset);
cur = cur + iosize;
pg_offset += iosize;
}
@@ -1155,17 +1086,11 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
int btrfs_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- struct btrfs_inode *inode = page_to_inode(page);
- u64 start = page_offset(page);
- u64 end = start + PAGE_SIZE - 1;
struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
struct extent_map *em_cached = NULL;
int ret;
- btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
-
- ret = btrfs_do_readpage(page, &em_cached, &bio_ctrl, NULL);
+ ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
free_extent_map(em_cached);
/*
@@ -1176,28 +1101,8 @@ int btrfs_read_folio(struct file *file, struct folio *folio)
return ret;
}
-static inline void contiguous_readpages(struct page *pages[], int nr_pages,
- u64 start, u64 end,
- struct extent_map **em_cached,
- struct btrfs_bio_ctrl *bio_ctrl,
- u64 *prev_em_start)
-{
- struct btrfs_inode *inode = page_to_inode(pages[0]);
- int index;
-
- ASSERT(em_cached);
-
- btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
-
- for (index = 0; index < nr_pages; index++) {
- btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
- prev_em_start);
- put_page(pages[index]);
- }
-}
-
/*
- * helper for __extent_writepage, doing all of the delayed allocation setup.
+ * helper for extent_writepage(), doing all of the delayed allocation setup.
*
* This returns 1 if btrfs_run_delalloc_range function did all the work required
* to write the page (copy into inline extent). In this case the IO has
@@ -1207,13 +1112,14 @@ static inline void contiguous_readpages(struct page *pages[], int nr_pages,
* This returns < 0 if there were errors (page still locked)
*/
static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
- struct page *page, struct writeback_control *wbc)
+ struct folio *folio,
+ struct btrfs_bio_ctrl *bio_ctrl)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
- struct folio *folio = page_folio(page);
- const bool is_subpage = btrfs_is_subpage(fs_info, page->mapping);
- const u64 page_start = page_offset(page);
- const u64 page_end = page_start + PAGE_SIZE - 1;
+ struct writeback_control *wbc = bio_ctrl->wbc;
+ const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping);
+ const u64 page_start = folio_pos(folio);
+ const u64 page_end = page_start + folio_size(folio) - 1;
/*
* Save the last found delalloc end. As the delalloc end can go beyond
* page boundary, thus we cannot rely on subpage bitmap to locate the
@@ -1225,10 +1131,18 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
u64 delalloc_to_write = 0;
int ret = 0;
- /* Lock all (subpage) delalloc ranges inside the page first. */
+ /* Save the dirty bitmap as our submission bitmap will be a subset of it. */
+ if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
+ ASSERT(fs_info->sectors_per_page > 1);
+ btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
+ } else {
+ bio_ctrl->submit_bitmap = 1;
+ }
+
+ /* Lock all (subpage) delalloc ranges inside the folio first. */
while (delalloc_start < page_end) {
delalloc_end = page_end;
- if (!find_lock_delalloc_range(&inode->vfs_inode, page,
+ if (!find_lock_delalloc_range(&inode->vfs_inode, folio,
&delalloc_start, &delalloc_end)) {
delalloc_start = delalloc_end + 1;
continue;
@@ -1253,7 +1167,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
if (!is_subpage) {
/*
* For non-subpage case, the found delalloc range must
- * cover this page and there must be only one locked
+ * cover this folio and there must be only one locked
* delalloc range.
*/
found_start = page_start;
@@ -1267,7 +1181,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
break;
/*
* The subpage range covers the last sector, the delalloc range may
- * end beyond the page boundary, use the saved delalloc_end
+ * end beyond the folio boundary, use the saved delalloc_end
* instead.
*/
if (found_start + found_len >= page_end)
@@ -1275,7 +1189,8 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
if (ret >= 0) {
/* No errors hit so far, run the current delalloc range. */
- ret = btrfs_run_delalloc_range(inode, page, found_start,
+ ret = btrfs_run_delalloc_range(inode, folio,
+ found_start,
found_start + found_len - 1,
wbc);
} else {
@@ -1285,30 +1200,27 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
*/
unlock_extent(&inode->io_tree, found_start,
found_start + found_len - 1, NULL);
- __unlock_for_delalloc(&inode->vfs_inode, page, found_start,
+ __unlock_for_delalloc(&inode->vfs_inode, folio,
+ found_start,
found_start + found_len - 1);
}
/*
- * We can hit btrfs_run_delalloc_range() with >0 return value.
- *
- * This happens when either the IO is already done and page
- * unlocked (inline) or the IO submission and page unlock would
- * be handled as async (compression).
- *
- * Inline is only possible for regular sectorsize for now.
- *
- * Compression is possible for both subpage and regular cases,
- * but even for subpage compression only happens for page aligned
- * range, thus the found delalloc range must go beyond current
- * page.
+ * We have some ranges that's going to be submitted asynchronously
+ * (compression or inline). These range have their own control
+ * on when to unlock the pages. We should not touch them
+ * anymore, so clear the range from the submission bitmap.
*/
- if (ret > 0)
- ASSERT(!is_subpage || found_start + found_len >= page_end);
-
+ if (ret > 0) {
+ unsigned int start_bit = (found_start - page_start) >>
+ fs_info->sectorsize_bits;
+ unsigned int end_bit = (min(page_end + 1, found_start + found_len) -
+ page_start) >> fs_info->sectorsize_bits;
+ bitmap_clear(&bio_ctrl->submit_bitmap, start_bit, end_bit - start_bit);
+ }
/*
- * Above btrfs_run_delalloc_range() may have unlocked the page,
- * thus for the last range, we cannot touch the page anymore.
+ * Above btrfs_run_delalloc_range() may have unlocked the folio,
+ * thus for the last range, we cannot touch the folio anymore.
*/
if (found_start + found_len >= last_delalloc_end + 1)
break;
@@ -1330,10 +1242,10 @@ out:
DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
/*
- * If btrfs_run_dealloc_range() already started I/O and unlocked
- * the pages, we just need to account for them here.
+ * If all ranges are submitted asynchronously, we just need to account
+ * for them here.
*/
- if (ret == 1) {
+ if (bitmap_empty(&bio_ctrl->submit_bitmap, fs_info->sectors_per_page)) {
wbc->nr_to_write -= delalloc_to_write;
return 1;
}
@@ -1351,182 +1263,148 @@ out:
}
/*
- * Find the first byte we need to write.
+ * Return 0 if we have submitted or queued the sector for submission.
+ * Return <0 for critical errors.
*
- * For subpage, one page can contain several sectors, and
- * __extent_writepage_io() will just grab all extent maps in the page
- * range and try to submit all non-inline/non-compressed extents.
- *
- * This is a big problem for subpage, we shouldn't re-submit already written
- * data at all.
- * This function will lookup subpage dirty bit to find which range we really
- * need to submit.
- *
- * Return the next dirty range in [@start, @end).
- * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
+ * Caller should make sure filepos < i_size and handle filepos >= i_size case.
*/
-static void find_next_dirty_byte(const struct btrfs_fs_info *fs_info,
- struct page *page, u64 *start, u64 *end)
+static int submit_one_sector(struct btrfs_inode *inode,
+ struct folio *folio,
+ u64 filepos, struct btrfs_bio_ctrl *bio_ctrl,
+ loff_t i_size)
{
- struct folio *folio = page_folio(page);
- struct btrfs_subpage *subpage = folio_get_private(folio);
- struct btrfs_subpage_info *spi = fs_info->subpage_info;
- u64 orig_start = *start;
- /* Declare as unsigned long so we can use bitmap ops */
- unsigned long flags;
- int range_start_bit;
- int range_end_bit;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_map *em;
+ u64 block_start;
+ u64 disk_bytenr;
+ u64 extent_offset;
+ u64 em_end;
+ const u32 sectorsize = fs_info->sectorsize;
- /*
- * For regular sector size == page size case, since one page only
- * contains one sector, we return the page offset directly.
- */
- if (!btrfs_is_subpage(fs_info, page->mapping)) {
- *start = page_offset(page);
- *end = page_offset(page) + PAGE_SIZE;
- return;
- }
+ ASSERT(IS_ALIGNED(filepos, sectorsize));
+
+ /* @filepos >= i_size case should be handled by the caller. */
+ ASSERT(filepos < i_size);
- range_start_bit = spi->dirty_offset +
- (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
+ em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
+ if (IS_ERR(em))
+ return PTR_ERR_OR_ZERO(em);
- /* We should have the page locked, but just in case */
- spin_lock_irqsave(&subpage->lock, flags);
- bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
- spi->dirty_offset + spi->bitmap_nr_bits);
- spin_unlock_irqrestore(&subpage->lock, flags);
+ extent_offset = filepos - em->start;
+ em_end = extent_map_end(em);
+ ASSERT(filepos <= em_end);
+ ASSERT(IS_ALIGNED(em->start, sectorsize));
+ ASSERT(IS_ALIGNED(em->len, sectorsize));
- range_start_bit -= spi->dirty_offset;
- range_end_bit -= spi->dirty_offset;
+ block_start = extent_map_block_start(em);
+ disk_bytenr = extent_map_block_start(em) + extent_offset;
- *start = page_offset(page) + range_start_bit * fs_info->sectorsize;
- *end = page_offset(page) + range_end_bit * fs_info->sectorsize;
+ ASSERT(!extent_map_is_compressed(em));
+ ASSERT(block_start != EXTENT_MAP_HOLE);
+ ASSERT(block_start != EXTENT_MAP_INLINE);
+
+ free_extent_map(em);
+ em = NULL;
+
+ btrfs_set_range_writeback(inode, filepos, filepos + sectorsize - 1);
+ /*
+ * Above call should set the whole folio with writeback flag, even
+ * just for a single subpage sector.
+ * As long as the folio is properly locked and the range is correct,
+ * we should always get the folio with writeback flag.
+ */
+ ASSERT(folio_test_writeback(folio));
+
+ /*
+ * Although the PageDirty bit is cleared before entering this
+ * function, subpage dirty bit is not cleared.
+ * So clear subpage dirty bit here so next time we won't submit
+ * folio for range already written to disk.
+ */
+ btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
+ submit_extent_folio(bio_ctrl, disk_bytenr, folio,
+ sectorsize, filepos - folio_pos(folio));
+ return 0;
}
/*
- * helper for __extent_writepage. This calls the writepage start hooks,
+ * Helper for extent_writepage(). This calls the writepage start hooks,
* and does the loop to map the page into extents and bios.
*
* We return 1 if the IO is started and the page is unlocked,
* 0 if all went well (page still locked)
* < 0 if there were errors (page still locked)
*/
-static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
- struct page *page, u64 start, u32 len,
- struct btrfs_bio_ctrl *bio_ctrl,
- loff_t i_size,
- int *nr_ret)
+static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+ struct folio *folio,
+ u64 start, u32 len,
+ struct btrfs_bio_ctrl *bio_ctrl,
+ loff_t i_size)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- u64 cur = start;
- u64 end = start + len - 1;
- u64 extent_offset;
- u64 block_start;
- struct extent_map *em;
+ unsigned long range_bitmap = 0;
+ bool submitted_io = false;
+ const u64 folio_start = folio_pos(folio);
+ u64 cur;
+ int bit;
int ret = 0;
- int nr = 0;
- ASSERT(start >= page_offset(page) &&
- start + len <= page_offset(page) + PAGE_SIZE);
+ ASSERT(start >= folio_start &&
+ start + len <= folio_start + folio_size(folio));
- ret = btrfs_writepage_cow_fixup(page);
+ ret = btrfs_writepage_cow_fixup(folio);
if (ret) {
/* Fixup worker will requeue */
- redirty_page_for_writepage(bio_ctrl->wbc, page);
- unlock_page(page);
+ folio_redirty_for_writepage(bio_ctrl->wbc, folio);
+ folio_unlock(folio);
return 1;
}
+ for (cur = start; cur < start + len; cur += fs_info->sectorsize)
+ set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
+ bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
+ fs_info->sectors_per_page);
+
bio_ctrl->end_io_func = end_bbio_data_write;
- while (cur <= end) {
- u32 len = end - cur + 1;
- u64 disk_bytenr;
- u64 em_end;
- u64 dirty_range_start = cur;
- u64 dirty_range_end;
- u32 iosize;
+
+ for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
+ cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
if (cur >= i_size) {
- btrfs_mark_ordered_io_finished(inode, page, cur, len,
- true);
+ btrfs_mark_ordered_io_finished(inode, folio, cur,
+ start + len - cur, true);
/*
* This range is beyond i_size, thus we don't need to
* bother writing back.
* But we still need to clear the dirty subpage bit, or
- * the next time the page gets dirtied, we will try to
+ * the next time the folio gets dirtied, we will try to
* writeback the sectors with subpage dirty bits,
* causing writeback without ordered extent.
*/
- btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, len);
+ btrfs_folio_clear_dirty(fs_info, folio, cur,
+ start + len - cur);
break;
}
-
- find_next_dirty_byte(fs_info, page, &dirty_range_start,
- &dirty_range_end);
- if (cur < dirty_range_start) {
- cur = dirty_range_start;
- continue;
- }
-
- em = btrfs_get_extent(inode, NULL, cur, len);
- if (IS_ERR(em)) {
- ret = PTR_ERR_OR_ZERO(em);
- goto out_error;
- }
-
- extent_offset = cur - em->start;
- em_end = extent_map_end(em);
- ASSERT(cur <= em_end);
- ASSERT(cur < end);
- ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
- ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
-
- block_start = extent_map_block_start(em);
- disk_bytenr = extent_map_block_start(em) + extent_offset;
-
- ASSERT(!extent_map_is_compressed(em));
- ASSERT(block_start != EXTENT_MAP_HOLE);
- ASSERT(block_start != EXTENT_MAP_INLINE);
-
- /*
- * Note that em_end from extent_map_end() and dirty_range_end from
- * find_next_dirty_byte() are all exclusive
- */
- iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
- free_extent_map(em);
- em = NULL;
-
- /*
- * Although the PageDirty bit might be cleared before entering
- * this function, subpage dirty bit is not cleared.
- * So clear subpage dirty bit here so next time we won't submit
- * page for range already written to disk.
- */
- btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, iosize);
- btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
- if (!PageWriteback(page)) {
- btrfs_err(inode->root->fs_info,
- "page %lu not writeback, cur %llu end %llu",
- page->index, cur, end);
- }
-
-
- submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
- cur - page_offset(page));
- cur += iosize;
- nr++;
+ ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
+ if (ret < 0)
+ goto out;
+ submitted_io = true;
}
- btrfs_folio_assert_not_dirty(fs_info, page_folio(page), start, len);
- *nr_ret = nr;
- return 0;
-
-out_error:
+ btrfs_folio_assert_not_dirty(fs_info, folio, start, len);
+out:
/*
- * If we finish without problem, we should not only clear page dirty,
- * but also empty subpage dirty bits
+ * If we didn't submitted any sector (>= i_size), folio dirty get
+ * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
+ * by folio_start_writeback() if the folio is not dirty).
+ *
+ * Here we set writeback and clear for the range. If the full folio
+ * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
*/
- *nr_ret = nr;
+ if (!submitted_io) {
+ btrfs_folio_set_writeback(fs_info, folio, start, len);
+ btrfs_folio_clear_writeback(fs_info, folio, start, len);
+ }
return ret;
}
@@ -1539,62 +1417,65 @@ out_error:
* Return 0 if everything goes well.
* Return <0 for error.
*/
-static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl)
+static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
{
- struct folio *folio = page_folio(page);
- struct inode *inode = page->mapping->host;
- const u64 page_start = page_offset(page);
+ struct inode *inode = folio->mapping->host;
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ const u64 page_start = folio_pos(folio);
int ret;
- int nr = 0;
size_t pg_offset;
loff_t i_size = i_size_read(inode);
unsigned long end_index = i_size >> PAGE_SHIFT;
- trace___extent_writepage(page, inode, bio_ctrl->wbc);
+ trace_extent_writepage(folio, inode, bio_ctrl->wbc);
- WARN_ON(!PageLocked(page));
+ WARN_ON(!folio_test_locked(folio));
- pg_offset = offset_in_page(i_size);
- if (page->index > end_index ||
- (page->index == end_index && !pg_offset)) {
+ pg_offset = offset_in_folio(folio, i_size);
+ if (folio->index > end_index ||
+ (folio->index == end_index && !pg_offset)) {
folio_invalidate(folio, 0, folio_size(folio));
folio_unlock(folio);
return 0;
}
- if (page->index == end_index)
- memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
+ if (folio->index == end_index)
+ folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset);
- ret = set_page_extent_mapped(page);
+ /*
+ * Default to unlock the whole folio.
+ * The proper bitmap can only be initialized until writepage_delalloc().
+ */
+ bio_ctrl->submit_bitmap = (unsigned long)-1;
+ ret = set_folio_extent_mapped(folio);
if (ret < 0)
goto done;
- ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
+ ret = writepage_delalloc(BTRFS_I(inode), folio, bio_ctrl);
if (ret == 1)
return 0;
if (ret)
goto done;
- ret = __extent_writepage_io(BTRFS_I(inode), page, page_offset(page),
- PAGE_SIZE, bio_ctrl, i_size, &nr);
+ ret = extent_writepage_io(BTRFS_I(inode), folio, folio_pos(folio),
+ PAGE_SIZE, bio_ctrl, i_size);
if (ret == 1)
return 0;
bio_ctrl->wbc->nr_to_write--;
done:
- if (nr == 0) {
- /* make sure the mapping tag for page dirty gets cleared */
- set_page_writeback(page);
- end_page_writeback(page);
- }
if (ret) {
- btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
- PAGE_SIZE, !ret);
- mapping_set_error(page->mapping, ret);
+ btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
+ page_start, PAGE_SIZE, !ret);
+ mapping_set_error(folio->mapping, ret);
}
- btrfs_folio_end_all_writers(inode_to_fs_info(inode), folio);
+ /*
+ * Only unlock ranges that are submitted. As there can be some async
+ * submitted ranges inside the folio.
+ */
+ btrfs_folio_end_writer_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
ASSERT(ret <= 0);
return ret;
}
@@ -1846,7 +1727,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
folio_unlock(folio);
}
}
- btrfs_submit_bio(bbio, 0);
+ btrfs_submit_bbio(bbio, 0);
}
/*
@@ -1863,17 +1744,16 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
* Return >=0 for the number of submitted extent buffers.
* Return <0 for fatal error.
*/
-static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
+static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
{
- struct btrfs_fs_info *fs_info = page_to_fs_info(page);
- struct folio *folio = page_folio(page);
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
int submitted = 0;
- u64 page_start = page_offset(page);
+ u64 folio_start = folio_pos(folio);
int bit_start = 0;
int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
/* Lock and write each dirty extent buffers in the range */
- while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
+ while (bit_start < fs_info->sectors_per_page) {
struct btrfs_subpage *subpage = folio_get_private(folio);
struct extent_buffer *eb;
unsigned long flags;
@@ -1883,21 +1763,21 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
* Take private lock to ensure the subpage won't be detached
* in the meantime.
*/
- spin_lock(&page->mapping->i_private_lock);
+ spin_lock(&folio->mapping->i_private_lock);
if (!folio_test_private(folio)) {
- spin_unlock(&page->mapping->i_private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
break;
}
spin_lock_irqsave(&subpage->lock, flags);
- if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
+ if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page,
subpage->bitmaps)) {
spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&page->mapping->i_private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
bit_start++;
continue;
}
- start = page_start + bit_start * fs_info->sectorsize;
+ start = folio_start + bit_start * fs_info->sectorsize;
bit_start += sectors_per_node;
/*
@@ -1906,7 +1786,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
*/
eb = find_extent_buffer_nolock(fs_info, start);
spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&page->mapping->i_private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
/*
* The eb has already reached 0 refs thus find_extent_buffer()
@@ -1945,19 +1825,18 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
* previous call.
* Return <0 for fatal error.
*/
-static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
+static int submit_eb_page(struct folio *folio, struct btrfs_eb_write_context *ctx)
{
struct writeback_control *wbc = ctx->wbc;
- struct address_space *mapping = page->mapping;
- struct folio *folio = page_folio(page);
+ struct address_space *mapping = folio->mapping;
struct extent_buffer *eb;
int ret;
if (!folio_test_private(folio))
return 0;
- if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
- return submit_eb_subpage(page, wbc);
+ if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
+ return submit_eb_subpage(folio, wbc);
spin_lock(&mapping->i_private_lock);
if (!folio_test_private(folio)) {
@@ -2055,7 +1934,7 @@ retry:
for (i = 0; i < nr_folios; i++) {
struct folio *folio = fbatch.folios[i];
- ret = submit_eb_page(&folio->page, &ctx);
+ ret = submit_eb_page(folio, &ctx);
if (ret == 0)
continue;
if (ret < 0) {
@@ -2109,7 +1988,7 @@ retry:
* extent io tree. Thus we don't want to submit such wild eb
* if the fs already has error.
*
- * We can get ret > 0 from submit_extent_page() indicating how many ebs
+ * We can get ret > 0 from submit_extent_folio() indicating how many ebs
* were submitted. Reset it to 0 to avoid false alerts for the caller.
*/
if (ret > 0)
@@ -2248,7 +2127,7 @@ retry:
continue;
}
- ret = __extent_writepage(&folio->page, bio_ctrl);
+ ret = extent_writepage(folio, bio_ctrl);
if (ret < 0) {
done = 1;
break;
@@ -2295,7 +2174,7 @@ retry:
* already been ran (aka, ordered extent inserted) and all pages are still
* locked.
*/
-void extent_write_locked_range(struct inode *inode, const struct page *locked_page,
+void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
u64 start, u64 end, struct writeback_control *wbc,
bool pages_dirty)
{
@@ -2319,37 +2198,46 @@ void extent_write_locked_range(struct inode *inode, const struct page *locked_pa
while (cur <= end) {
u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
u32 cur_len = cur_end + 1 - cur;
- struct page *page;
- int nr = 0;
+ struct folio *folio;
- page = find_get_page(mapping, cur >> PAGE_SHIFT);
- ASSERT(PageLocked(page));
- if (pages_dirty && page != locked_page)
- ASSERT(PageDirty(page));
+ folio = __filemap_get_folio(mapping, cur >> PAGE_SHIFT, 0, 0);
- ret = __extent_writepage_io(BTRFS_I(inode), page, cur, cur_len,
- &bio_ctrl, i_size, &nr);
+ /*
+ * This shouldn't happen, the pages are pinned and locked, this
+ * code is just in case, but shouldn't actually be run.
+ */
+ if (IS_ERR(folio)) {
+ btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
+ cur, cur_len, false);
+ mapping_set_error(mapping, PTR_ERR(folio));
+ cur = cur_end + 1;
+ continue;
+ }
+
+ ASSERT(folio_test_locked(folio));
+ if (pages_dirty && folio != locked_folio)
+ ASSERT(folio_test_dirty(folio));
+
+ /*
+ * Set the submission bitmap to submit all sectors.
+ * extent_writepage_io() will do the truncation correctly.
+ */
+ bio_ctrl.submit_bitmap = (unsigned long)-1;
+ ret = extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len,
+ &bio_ctrl, i_size);
if (ret == 1)
goto next_page;
- /* Make sure the mapping tag for page dirty gets cleared. */
- if (nr == 0) {
- struct folio *folio;
-
- folio = page_folio(page);
- btrfs_folio_set_writeback(fs_info, folio, cur, cur_len);
- btrfs_folio_clear_writeback(fs_info, folio, cur, cur_len);
- }
if (ret) {
- btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
+ btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
cur, cur_len, !ret);
- mapping_set_error(page->mapping, ret);
+ mapping_set_error(mapping, ret);
}
- btrfs_folio_unlock_writer(fs_info, page_folio(page), cur, cur_len);
+ btrfs_folio_end_writer_lock(fs_info, folio, cur, cur_len);
if (ret < 0)
found_error = true;
next_page:
- put_page(page);
+ folio_put(folio);
cur = cur_end + 1;
}
@@ -2379,18 +2267,12 @@ int btrfs_writepages(struct address_space *mapping, struct writeback_control *wb
void btrfs_readahead(struct readahead_control *rac)
{
struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
- struct page *pagepool[16];
+ struct folio *folio;
struct extent_map *em_cached = NULL;
u64 prev_em_start = (u64)-1;
- int nr;
-
- while ((nr = readahead_page_batch(rac, pagepool))) {
- u64 contig_start = readahead_pos(rac);
- u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
- contiguous_readpages(pagepool, nr, contig_start, contig_end,
- &em_cached, &bio_ctrl, &prev_em_start);
- }
+ while ((folio = readahead_folio(rac)) != NULL)
+ btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
if (em_cached)
free_extent_map(em_cached);
@@ -2435,9 +2317,9 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
* to drop the page.
*/
static bool try_release_extent_state(struct extent_io_tree *tree,
- struct page *page, gfp_t mask)
+ struct folio *folio, gfp_t mask)
{
- u64 start = page_offset(page);
+ u64 start = folio_pos(folio);
u64 end = start + PAGE_SIZE - 1;
bool ret;
@@ -2473,11 +2355,11 @@ static bool try_release_extent_state(struct extent_io_tree *tree,
* in the range corresponding to the page, both state records and extent
* map records are removed
*/
-bool try_release_extent_mapping(struct page *page, gfp_t mask)
+bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
{
- u64 start = page_offset(page);
+ u64 start = folio_pos(folio);
u64 end = start + PAGE_SIZE - 1;
- struct btrfs_inode *inode = page_to_inode(page);
+ struct btrfs_inode *inode = folio_to_inode(folio);
struct extent_io_tree *io_tree = &inode->io_tree;
while (start <= end) {
@@ -2546,7 +2428,7 @@ next:
cond_resched();
}
}
- return try_release_extent_state(io_tree, page, mask);
+ return try_release_extent_state(io_tree, folio, mask);
}
static void __free_extent_buffer(struct extent_buffer *eb)
@@ -2572,7 +2454,7 @@ static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *foli
return true;
/*
* Even there is no eb refs here, we may still have
- * end_page_read() call relying on page::private.
+ * end_folio_read() call relying on page::private.
*/
if (atomic_read(&subpage->readers))
return true;
@@ -3615,7 +3497,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
ASSERT(ret);
}
}
- btrfs_submit_bio(bbio, mirror_num);
+ btrfs_submit_bbio(bbio, mirror_num);
done:
if (wait == WAIT_COMPLETE) {
@@ -4171,17 +4053,17 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
#define GANG_LOOKUP_SIZE 16
static struct extent_buffer *get_next_extent_buffer(
- const struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
+ const struct btrfs_fs_info *fs_info, struct folio *folio, u64 bytenr)
{
struct extent_buffer *gang[GANG_LOOKUP_SIZE];
struct extent_buffer *found = NULL;
- u64 page_start = page_offset(page);
- u64 cur = page_start;
+ u64 folio_start = folio_pos(folio);
+ u64 cur = folio_start;
- ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
+ ASSERT(in_range(bytenr, folio_start, PAGE_SIZE));
lockdep_assert_held(&fs_info->buffer_lock);
- while (cur < page_start + PAGE_SIZE) {
+ while (cur < folio_start + PAGE_SIZE) {
int ret;
int i;
@@ -4193,7 +4075,7 @@ static struct extent_buffer *get_next_extent_buffer(
goto out;
for (i = 0; i < ret; i++) {
/* Already beyond page end */
- if (gang[i]->start >= page_start + PAGE_SIZE)
+ if (gang[i]->start >= folio_start + PAGE_SIZE)
goto out;
/* Found one */
if (gang[i]->start >= bytenr) {
@@ -4207,11 +4089,11 @@ out:
return found;
}
-static int try_release_subpage_extent_buffer(struct page *page)
+static int try_release_subpage_extent_buffer(struct folio *folio)
{
- struct btrfs_fs_info *fs_info = page_to_fs_info(page);
- u64 cur = page_offset(page);
- const u64 end = page_offset(page) + PAGE_SIZE;
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
+ u64 cur = folio_pos(folio);
+ const u64 end = cur + PAGE_SIZE;
int ret;
while (cur < end) {
@@ -4226,7 +4108,7 @@ static int try_release_subpage_extent_buffer(struct page *page)
* with spinlock rather than RCU.
*/
spin_lock(&fs_info->buffer_lock);
- eb = get_next_extent_buffer(fs_info, page, cur);
+ eb = get_next_extent_buffer(fs_info, folio, cur);
if (!eb) {
/* No more eb in the page range after or at cur */
spin_unlock(&fs_info->buffer_lock);
@@ -4267,31 +4149,30 @@ static int try_release_subpage_extent_buffer(struct page *page)
* Finally to check if we have cleared folio private, as if we have
* released all ebs in the page, the folio private should be cleared now.
*/
- spin_lock(&page->mapping->i_private_lock);
- if (!folio_test_private(page_folio(page)))
+ spin_lock(&folio->mapping->i_private_lock);
+ if (!folio_test_private(folio))
ret = 1;
else
ret = 0;
- spin_unlock(&page->mapping->i_private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
return ret;
}
-int try_release_extent_buffer(struct page *page)
+int try_release_extent_buffer(struct folio *folio)
{
- struct folio *folio = page_folio(page);
struct extent_buffer *eb;
- if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
- return try_release_subpage_extent_buffer(page);
+ if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
+ return try_release_subpage_extent_buffer(folio);
/*
* We need to make sure nobody is changing folio private, as we rely on
* folio private as the pointer to extent buffer.
*/
- spin_lock(&page->mapping->i_private_lock);
+ spin_lock(&folio->mapping->i_private_lock);
if (!folio_test_private(folio)) {
- spin_unlock(&page->mapping->i_private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
return 1;
}
@@ -4306,10 +4187,10 @@ int try_release_extent_buffer(struct page *page)
spin_lock(&eb->refs_lock);
if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
spin_unlock(&eb->refs_lock);
- spin_unlock(&page->mapping->i_private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
return 0;
}
- spin_unlock(&page->mapping->i_private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
/*
* If tree ref isn't set then we know the ref on this eb is a real ref,
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index dceebd76c7d1..8a36117ed453 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -236,11 +236,11 @@ static inline void extent_changeset_free(struct extent_changeset *changeset)
kfree(changeset);
}
-bool try_release_extent_mapping(struct page *page, gfp_t mask);
-int try_release_extent_buffer(struct page *page);
+bool try_release_extent_mapping(struct folio *folio, gfp_t mask);
+int try_release_extent_buffer(struct folio *folio);
int btrfs_read_folio(struct file *file, struct folio *folio);
-void extent_write_locked_range(struct inode *inode, const struct page *locked_page,
+void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
u64 start, u64 end, struct writeback_control *wbc,
bool pages_dirty);
int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc);
@@ -249,7 +249,7 @@ int btree_write_cache_pages(struct address_space *mapping,
void btrfs_readahead(struct readahead_control *rac);
int set_folio_extent_mapped(struct folio *folio);
int set_page_extent_mapped(struct page *page);
-void clear_page_extent_mapped(struct page *page);
+void clear_folio_extent_mapped(struct folio *folio);
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u64 owner_root, int level);
@@ -354,7 +354,7 @@ void set_extent_buffer_dirty(struct extent_buffer *eb);
void set_extent_buffer_uptodate(struct extent_buffer *eb);
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
- const struct page *locked_page,
+ const struct folio *locked_folio,
struct extent_state **cached,
u32 bits_to_clear, unsigned long page_ops);
int extent_invalidate_folio(struct extent_io_tree *tree,
@@ -368,7 +368,7 @@ int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
bool find_lock_delalloc_range(struct inode *inode,
- struct page *locked_page, u64 *start,
+ struct folio *locked_folio, u64 *start,
u64 *end);
#endif
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 10ac5f657e38..25d191f1ac10 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -192,10 +192,13 @@ static inline u64 extent_map_block_len(const struct extent_map *em)
static inline u64 extent_map_block_end(const struct extent_map *em)
{
- if (extent_map_block_start(em) + extent_map_block_len(em) <
- extent_map_block_start(em))
+ const u64 block_start = extent_map_block_start(em);
+ const u64 block_end = block_start + extent_map_block_len(em);
+
+ if (block_end < block_start)
return (u64)-1;
- return extent_map_block_start(em) + extent_map_block_len(em);
+
+ return block_end;
}
static bool can_merge_extent_map(const struct extent_map *em)
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 5c342fe1af61..886749b39672 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -151,7 +151,7 @@ static inline u32 max_ordered_sum_bytes(const struct btrfs_fs_info *fs_info)
* Calculate the total size needed to allocate for an ordered sum structure
* spanning @bytes in the file.
*/
-static int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info, unsigned long bytes)
+static int btrfs_ordered_sum_size(const struct btrfs_fs_info *fs_info, unsigned long bytes)
{
return sizeof(struct btrfs_ordered_sum) + bytes_to_csum_size(fs_info, bytes);
}
@@ -1272,7 +1272,7 @@ out:
void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
const struct btrfs_path *path,
- struct btrfs_file_extent_item *fi,
+ const struct btrfs_file_extent_item *fi,
struct extent_map *em)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
diff --git a/fs/btrfs/file-item.h b/fs/btrfs/file-item.h
index 557dc43d7142..0e13661a71f3 100644
--- a/fs/btrfs/file-item.h
+++ b/fs/btrfs/file-item.h
@@ -74,7 +74,7 @@ int btrfs_lookup_csums_bitmap(struct btrfs_root *root, struct btrfs_path *path,
unsigned long *csum_bitmap);
void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
const struct btrfs_path *path,
- struct btrfs_file_extent_item *fi,
+ const struct btrfs_file_extent_item *fi,
struct extent_map *em);
int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
u64 len);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 2aeb8116549c..4fb521d91b06 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1617,7 +1617,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (current->journal_info == BTRFS_TRANS_DIO_WRITE_STUB) {
skip_ilock = true;
current->journal_info = NULL;
- lockdep_assert_held(&inode->vfs_inode.i_rwsem);
+ btrfs_assert_inode_locked(inode);
}
trace_btrfs_sync_file(file, datasync);
@@ -1920,8 +1920,8 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
reserved_space = PAGE_SIZE;
sb_start_pagefault(inode->i_sb);
- page_start = page_offset(page);
- page_end = page_start + PAGE_SIZE - 1;
+ page_start = folio_pos(folio);
+ page_end = page_start + folio_size(folio) - 1;
end = page_end;
/*
@@ -1949,18 +1949,18 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
ret = VM_FAULT_NOPAGE;
again:
down_read(&BTRFS_I(inode)->i_mmap_lock);
- lock_page(page);
+ folio_lock(folio);
size = i_size_read(inode);
- if ((page->mapping != inode->i_mapping) ||
+ if ((folio->mapping != inode->i_mapping) ||
(page_start >= size)) {
/* Page got truncated out from underneath us. */
goto out_unlock;
}
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
lock_extent(io_tree, page_start, page_end, &cached_state);
- ret2 = set_page_extent_mapped(page);
+ ret2 = set_folio_extent_mapped(folio);
if (ret2 < 0) {
ret = vmf_error(ret2);
unlock_extent(io_tree, page_start, page_end, &cached_state);
@@ -1974,14 +1974,14 @@ again:
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE);
if (ordered) {
unlock_extent(io_tree, page_start, page_end, &cached_state);
- unlock_page(page);
+ folio_unlock(folio);
up_read(&BTRFS_I(inode)->i_mmap_lock);
btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
goto again;
}
- if (page->index == ((size - 1) >> PAGE_SHIFT)) {
+ if (folio->index == ((size - 1) >> PAGE_SHIFT)) {
reserved_space = round_up(size - page_start, fs_info->sectorsize);
if (reserved_space < PAGE_SIZE) {
end = page_start + reserved_space - 1;
@@ -2011,13 +2011,13 @@ again:
}
/* Page is wholly or partially inside EOF. */
- if (page_start + PAGE_SIZE > size)
- zero_start = offset_in_page(size);
+ if (page_start + folio_size(folio) > size)
+ zero_start = offset_in_folio(folio, size);
else
zero_start = PAGE_SIZE;
if (zero_start != PAGE_SIZE)
- memzero_page(page, zero_start, PAGE_SIZE - zero_start);
+ folio_zero_range(folio, zero_start, folio_size(folio) - zero_start);
btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
@@ -2034,7 +2034,7 @@ again:
return VM_FAULT_LOCKED;
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
up_read(&BTRFS_I(inode)->i_mmap_lock);
out:
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
@@ -3485,7 +3485,7 @@ static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
{
struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host);
- struct btrfs_file_private *private = file->private_data;
+ struct btrfs_file_private *private;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_state *cached_state = NULL;
struct extent_state **delalloc_cached_state;
@@ -3513,7 +3513,19 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
inode_get_bytes(&inode->vfs_inode) == i_size)
return i_size;
- if (!private) {
+ spin_lock(&inode->lock);
+ private = file->private_data;
+ spin_unlock(&inode->lock);
+
+ if (private && private->owner_task != current) {
+ /*
+ * Not allocated by us, don't use it as its cached state is used
+ * by the task that allocated it and we don't want neither to
+ * mess with it nor get incorrect results because it reflects an
+ * invalid state for the current task.
+ */
+ private = NULL;
+ } else if (!private) {
private = kzalloc(sizeof(*private), GFP_KERNEL);
/*
* No worries if memory allocation failed.
@@ -3521,7 +3533,23 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
* lseek SEEK_HOLE/DATA calls to a file when there's delalloc,
* so everything will still be correct.
*/
- file->private_data = private;
+ if (private) {
+ bool free = false;
+
+ private->owner_task = current;
+
+ spin_lock(&inode->lock);
+ if (file->private_data)
+ free = true;
+ else
+ file->private_data = private;
+ spin_unlock(&inode->lock);
+
+ if (free) {
+ kfree(private);
+ private = NULL;
+ }
+ }
}
if (private)
diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
index 3d6d4b503220..79f64e383edd 100644
--- a/fs/btrfs/fs.h
+++ b/fs/btrfs/fs.h
@@ -703,8 +703,8 @@ struct btrfs_fs_info {
* running.
*/
refcount_t scrub_workers_refcnt;
+ u32 sectors_per_page;
struct workqueue_struct *scrub_workers;
- struct btrfs_subpage_info *subpage_info;
struct btrfs_discard_ctl discard_ctl;
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 316756ff08ac..29572dfaf878 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -14,7 +14,7 @@
#include "extent-tree.h"
#include "file-item.h"
-struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
+struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf,
int slot,
const struct fscrypt_str *name)
{
@@ -42,7 +42,7 @@ struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
}
struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
- struct extent_buffer *leaf, int slot, u64 ref_objectid,
+ const struct extent_buffer *leaf, int slot, u64 ref_objectid,
const struct fscrypt_str *name)
{
struct btrfs_inode_extref *extref;
@@ -423,9 +423,9 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
return ret;
}
-static inline void btrfs_trace_truncate(struct btrfs_inode *inode,
- struct extent_buffer *leaf,
- struct btrfs_file_extent_item *fi,
+static inline void btrfs_trace_truncate(const struct btrfs_inode *inode,
+ const struct extent_buffer *leaf,
+ const struct btrfs_file_extent_item *fi,
u64 offset, int extent_type, int slot)
{
if (!inode)
diff --git a/fs/btrfs/inode-item.h b/fs/btrfs/inode-item.h
index c4aded82709b..c11b97fdccc4 100644
--- a/fs/btrfs/inode-item.h
+++ b/fs/btrfs/inode-item.h
@@ -109,11 +109,11 @@ struct btrfs_inode_extref *btrfs_lookup_inode_extref(
u64 inode_objectid, u64 ref_objectid, int ins_len,
int cow);
-struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
+struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf,
int slot,
const struct fscrypt_str *name);
struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
- struct extent_buffer *leaf, int slot, u64 ref_objectid,
+ const struct extent_buffer *leaf, int slot, u64 ref_objectid,
const struct fscrypt_str *name);
#endif
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b1b6564ab68f..edac499fd83d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -116,7 +116,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr);
static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
static noinline int run_delalloc_cow(struct btrfs_inode *inode,
- struct page *locked_page, u64 start,
+ struct folio *locked_folio, u64 start,
u64 end, struct writeback_control *wbc,
bool pages_dirty);
@@ -393,17 +393,17 @@ void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
* extent (btrfs_finish_ordered_io()).
*/
static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
- struct page *locked_page,
+ struct folio *locked_folio,
u64 offset, u64 bytes)
{
unsigned long index = offset >> PAGE_SHIFT;
unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
u64 page_start = 0, page_end = 0;
- struct page *page;
+ struct folio *folio;
- if (locked_page) {
- page_start = page_offset(locked_page);
- page_end = page_start + PAGE_SIZE - 1;
+ if (locked_folio) {
+ page_start = folio_pos(locked_folio);
+ page_end = page_start + folio_size(locked_folio) - 1;
}
while (index <= end_index) {
@@ -417,13 +417,13 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
* btrfs_mark_ordered_io_finished() would skip the accounting
* for the page range, and the ordered extent will never finish.
*/
- if (locked_page && index == (page_start >> PAGE_SHIFT)) {
+ if (locked_folio && index == (page_start >> PAGE_SHIFT)) {
index++;
continue;
}
- page = find_get_page(inode->vfs_inode.i_mapping, index);
+ folio = __filemap_get_folio(inode->vfs_inode.i_mapping, index, 0, 0);
index++;
- if (!page)
+ if (IS_ERR(folio))
continue;
/*
@@ -431,14 +431,14 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
* range, then btrfs_mark_ordered_io_finished() will handle
* the ordered extent accounting for the range.
*/
- btrfs_folio_clamp_clear_ordered(inode->root->fs_info,
- page_folio(page), offset, bytes);
- put_page(page);
+ btrfs_folio_clamp_clear_ordered(inode->root->fs_info, folio,
+ offset, bytes);
+ folio_put(folio);
}
- if (locked_page) {
+ if (locked_folio) {
/* The locked page covers the full range, nothing needs to be done */
- if (bytes + offset <= page_start + PAGE_SIZE)
+ if (bytes + offset <= page_start + folio_size(locked_folio))
return;
/*
* In case this page belongs to the delalloc range being
@@ -447,8 +447,9 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
* run_delalloc_range
*/
if (page_start >= offset && page_end <= (offset + bytes - 1)) {
- bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
- offset = page_offset(locked_page) + PAGE_SIZE;
+ bytes = offset + bytes - folio_pos(locked_folio) -
+ folio_size(locked_folio);
+ offset = folio_pos(locked_folio) + folio_size(locked_folio);
}
}
@@ -494,7 +495,6 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
- struct page *page = NULL;
const u32 sectorsize = trans->fs_info->sectorsize;
char *kaddr;
unsigned long ptr;
@@ -554,12 +554,16 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_compression(leaf, ei,
compress_type);
} else {
- page = find_get_page(inode->vfs_inode.i_mapping, 0);
+ struct folio *folio;
+
+ folio = __filemap_get_folio(inode->vfs_inode.i_mapping,
+ 0, 0, 0);
+ ASSERT(!IS_ERR(folio));
btrfs_set_file_extent_compression(leaf, ei, 0);
- kaddr = kmap_local_page(page);
+ kaddr = kmap_local_folio(folio, 0);
write_extent_buffer(leaf, kaddr, ptr, size);
kunmap_local(kaddr);
- put_page(page);
+ folio_put(folio);
}
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
@@ -715,7 +719,7 @@ out:
}
static noinline int cow_file_range_inline(struct btrfs_inode *inode,
- struct page *locked_page,
+ struct folio *locked_folio,
u64 offset, u64 end,
size_t compressed_size,
int compress_type,
@@ -740,13 +744,26 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode,
return ret;
}
+ /*
+ * In the successful case (ret == 0 here), cow_file_range will return 1.
+ *
+ * Quite a bit further up the callstack in extent_writepage(), ret == 1
+ * is treated as a short circuited success and does not unlock the folio,
+ * so we must do it here.
+ *
+ * In the failure case, the locked_folio does get unlocked by
+ * btrfs_folio_end_all_writers, which asserts that it is still locked
+ * at that point, so we must *not* unlock it here.
+ *
+ * The other two callsites in compress_file_range do not have a
+ * locked_folio, so they are not relevant to this logic.
+ */
if (ret == 0)
- locked_page = NULL;
+ locked_folio = NULL;
- extent_clear_unlock_delalloc(inode, offset, end, locked_page, &cached,
- clear_flags,
- PAGE_UNLOCK | PAGE_START_WRITEBACK |
- PAGE_END_WRITEBACK);
+ extent_clear_unlock_delalloc(inode, offset, end, locked_folio, &cached,
+ clear_flags, PAGE_UNLOCK |
+ PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
return ret;
}
@@ -762,7 +779,7 @@ struct async_extent {
struct async_chunk {
struct btrfs_inode *inode;
- struct page *locked_page;
+ struct folio *locked_folio;
u64 start;
u64 end;
blk_opf_t write_flags;
@@ -868,25 +885,25 @@ static inline void inode_should_defrag(struct btrfs_inode *inode,
/* If this is a small write inside eof, kick off a defrag */
if (num_bytes < small_write &&
(start > 0 || end + 1 < inode->disk_i_size))
- btrfs_add_inode_defrag(NULL, inode, small_write);
+ btrfs_add_inode_defrag(inode, small_write);
}
static int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
{
unsigned long end_index = end >> PAGE_SHIFT;
- struct page *page;
+ struct folio *folio;
int ret = 0;
for (unsigned long index = start >> PAGE_SHIFT;
index <= end_index; index++) {
- page = find_get_page(inode->i_mapping, index);
- if (unlikely(!page)) {
+ folio = __filemap_get_folio(inode->i_mapping, index, 0, 0);
+ if (IS_ERR(folio)) {
if (!ret)
- ret = -ENOENT;
+ ret = PTR_ERR(folio);
continue;
}
- clear_page_dirty_for_io(page);
- put_page(page);
+ folio_clear_dirty_for_io(folio);
+ folio_put(folio);
}
return ret;
}
@@ -1122,7 +1139,7 @@ static void free_async_extent_pages(struct async_extent *async_extent)
static void submit_uncompressed_range(struct btrfs_inode *inode,
struct async_extent *async_extent,
- struct page *locked_page)
+ struct folio *locked_folio)
{
u64 start = async_extent->start;
u64 end = async_extent->start + async_extent->ram_size - 1;
@@ -1135,20 +1152,22 @@ static void submit_uncompressed_range(struct btrfs_inode *inode,
};
wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
- ret = run_delalloc_cow(inode, locked_page, start, end, &wbc, false);
+ ret = run_delalloc_cow(inode, locked_folio, start, end,
+ &wbc, false);
wbc_detach_inode(&wbc);
if (ret < 0) {
- btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
- if (locked_page) {
- const u64 page_start = page_offset(locked_page);
-
- set_page_writeback(locked_page);
- end_page_writeback(locked_page);
- btrfs_mark_ordered_io_finished(inode, locked_page,
+ btrfs_cleanup_ordered_extents(inode, locked_folio,
+ start, end - start + 1);
+ if (locked_folio) {
+ const u64 page_start = folio_pos(locked_folio);
+
+ folio_start_writeback(locked_folio);
+ folio_end_writeback(locked_folio);
+ btrfs_mark_ordered_io_finished(inode, locked_folio,
page_start, PAGE_SIZE,
!ret);
- mapping_set_error(locked_page->mapping, ret);
- unlock_page(locked_page);
+ mapping_set_error(locked_folio->mapping, ret);
+ folio_unlock(locked_folio);
}
}
}
@@ -1164,7 +1183,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
struct btrfs_ordered_extent *ordered;
struct btrfs_file_extent file_extent;
struct btrfs_key ins;
- struct page *locked_page = NULL;
+ struct folio *locked_folio = NULL;
struct extent_state *cached = NULL;
struct extent_map *em;
int ret = 0;
@@ -1175,19 +1194,20 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
kthread_associate_blkcg(async_chunk->blkcg_css);
/*
- * If async_chunk->locked_page is in the async_extent range, we need to
+ * If async_chunk->locked_folio is in the async_extent range, we need to
* handle it.
*/
- if (async_chunk->locked_page) {
- u64 locked_page_start = page_offset(async_chunk->locked_page);
- u64 locked_page_end = locked_page_start + PAGE_SIZE - 1;
+ if (async_chunk->locked_folio) {
+ u64 locked_folio_start = folio_pos(async_chunk->locked_folio);
+ u64 locked_folio_end = locked_folio_start +
+ folio_size(async_chunk->locked_folio) - 1;
- if (!(start >= locked_page_end || end <= locked_page_start))
- locked_page = async_chunk->locked_page;
+ if (!(start >= locked_folio_end || end <= locked_folio_start))
+ locked_folio = async_chunk->locked_folio;
}
if (async_extent->compress_type == BTRFS_COMPRESS_NONE) {
- submit_uncompressed_range(inode, async_extent, locked_page);
+ submit_uncompressed_range(inode, async_extent, locked_folio);
goto done;
}
@@ -1202,7 +1222,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
* non-contiguous space for the uncompressed size instead. So
* fall back to uncompressed.
*/
- submit_uncompressed_range(inode, async_extent, locked_page);
+ submit_uncompressed_range(inode, async_extent, locked_folio);
goto done;
}
@@ -1306,21 +1326,21 @@ u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
* allocate extents on disk for the range, and create ordered data structs
* in ram to track those extents.
*
- * locked_page is the page that writepage had locked already. We use
+ * locked_folio is the folio that writepage had locked already. We use
* it to make sure we don't do extra locks or unlocks.
*
- * When this function fails, it unlocks all pages except @locked_page.
+ * When this function fails, it unlocks all pages except @locked_folio.
*
* When this function successfully creates an inline extent, it returns 1 and
- * unlocks all pages including locked_page and starts I/O on them.
- * (In reality inline extents are limited to a single page, so locked_page is
+ * unlocks all pages including locked_folio and starts I/O on them.
+ * (In reality inline extents are limited to a single page, so locked_folio is
* the only page handled anyway).
*
* When this function succeed and creates a normal extent, the page locking
* status depends on the passed in flags:
*
* - If @keep_locked is set, all pages are kept locked.
- * - Else all pages except for @locked_page are unlocked.
+ * - Else all pages except for @locked_folio are unlocked.
*
* When a failure happens in the second or later iteration of the
* while-loop, the ordered extents created in previous iterations are kept
@@ -1329,8 +1349,8 @@ u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
* example.
*/
static noinline int cow_file_range(struct btrfs_inode *inode,
- struct page *locked_page, u64 start, u64 end,
- u64 *done_offset,
+ struct folio *locked_folio, u64 start,
+ u64 end, u64 *done_offset,
bool keep_locked, bool no_inline)
{
struct btrfs_root *root = inode->root;
@@ -1363,7 +1383,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
if (!no_inline) {
/* lets try to make an inline extent */
- ret = cow_file_range_inline(inode, locked_page, start, end, 0,
+ ret = cow_file_range_inline(inode, locked_folio, start, end, 0,
BTRFS_COMPRESS_NONE, NULL, false);
if (ret <= 0) {
/*
@@ -1500,7 +1520,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
page_ops |= PAGE_SET_ORDERED;
extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
- locked_page, &cached,
+ locked_folio, &cached,
EXTENT_LOCKED | EXTENT_DELALLOC,
page_ops);
if (num_bytes < cur_alloc_size)
@@ -1553,13 +1573,13 @@ out_unlock:
* function.
*
* However, in case of @keep_locked, we still need to unlock the pages
- * (except @locked_page) to ensure all the pages are unlocked.
+ * (except @locked_folio) to ensure all the pages are unlocked.
*/
if (keep_locked && orig_start < start) {
- if (!locked_page)
+ if (!locked_folio)
mapping_set_error(inode->vfs_inode.i_mapping, ret);
extent_clear_unlock_delalloc(inode, orig_start, start - 1,
- locked_page, NULL, 0, page_ops);
+ locked_folio, NULL, 0, page_ops);
}
/*
@@ -1582,8 +1602,7 @@ out_unlock:
if (extent_reserved) {
extent_clear_unlock_delalloc(inode, start,
start + cur_alloc_size - 1,
- locked_page, &cached,
- clear_bits,
+ locked_folio, &cached, clear_bits,
page_ops);
btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL);
start += cur_alloc_size;
@@ -1597,7 +1616,7 @@ out_unlock:
*/
if (start < end) {
clear_bits |= EXTENT_CLEAR_DATA_RESV;
- extent_clear_unlock_delalloc(inode, start, end, locked_page,
+ extent_clear_unlock_delalloc(inode, start, end, locked_folio,
&cached, clear_bits, page_ops);
btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL);
}
@@ -1651,7 +1670,7 @@ static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_
}
static bool run_delalloc_compressed(struct btrfs_inode *inode,
- struct page *locked_page, u64 start,
+ struct folio *locked_folio, u64 start,
u64 end, struct writeback_control *wbc)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
@@ -1691,15 +1710,16 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode,
INIT_LIST_HEAD(&async_chunk[i].extents);
/*
- * The locked_page comes all the way from writepage and its
- * the original page we were actually given. As we spread
+ * The locked_folio comes all the way from writepage and its
+ * the original folio we were actually given. As we spread
* this large delalloc region across multiple async_chunk
- * structs, only the first struct needs a pointer to locked_page
+ * structs, only the first struct needs a pointer to
+ * locked_folio.
*
* This way we don't need racey decisions about who is supposed
* to unlock it.
*/
- if (locked_page) {
+ if (locked_folio) {
/*
* Depending on the compressibility, the pages might or
* might not go through async. We want all of them to
@@ -1709,12 +1729,12 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode,
* need full accuracy. Just account the whole thing
* against the first page.
*/
- wbc_account_cgroup_owner(wbc, locked_page,
+ wbc_account_cgroup_owner(wbc, &locked_folio->page,
cur_end - start);
- async_chunk[i].locked_page = locked_page;
- locked_page = NULL;
+ async_chunk[i].locked_folio = locked_folio;
+ locked_folio = NULL;
} else {
- async_chunk[i].locked_page = NULL;
+ async_chunk[i].locked_folio = NULL;
}
if (blkcg_css != blkcg_root_css) {
@@ -1743,7 +1763,7 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode,
* covered by the range.
*/
static noinline int run_delalloc_cow(struct btrfs_inode *inode,
- struct page *locked_page, u64 start,
+ struct folio *locked_folio, u64 start,
u64 end, struct writeback_control *wbc,
bool pages_dirty)
{
@@ -1751,20 +1771,21 @@ static noinline int run_delalloc_cow(struct btrfs_inode *inode,
int ret;
while (start <= end) {
- ret = cow_file_range(inode, locked_page, start, end, &done_offset,
- true, false);
+ ret = cow_file_range(inode, locked_folio, start, end,
+ &done_offset, true, false);
if (ret)
return ret;
- extent_write_locked_range(&inode->vfs_inode, locked_page, start,
- done_offset, wbc, pages_dirty);
+ extent_write_locked_range(&inode->vfs_inode, locked_folio,
+ start, done_offset, wbc, pages_dirty);
start = done_offset + 1;
}
return 1;
}
-static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
- const u64 start, const u64 end)
+static int fallback_to_cow(struct btrfs_inode *inode,
+ struct folio *locked_folio, const u64 start,
+ const u64 end)
{
const bool is_space_ino = btrfs_is_free_space_inode(inode);
const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
@@ -1833,7 +1854,8 @@ static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
* is written out and unlocked directly and a normal NOCOW extent
* doesn't work.
*/
- ret = cow_file_range(inode, locked_page, start, end, NULL, false, true);
+ ret = cow_file_range(inode, locked_folio, start, end, NULL, false,
+ true);
ASSERT(ret != 1);
return ret;
}
@@ -1987,7 +2009,7 @@ static int can_nocow_file_extent(struct btrfs_path *path,
* blocks on disk
*/
static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
- struct page *locked_page,
+ struct folio *locked_folio,
const u64 start, const u64 end)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
@@ -2150,8 +2172,8 @@ must_cow:
* NOCOW, following one which needs to be COW'ed
*/
if (cow_start != (u64)-1) {
- ret = fallback_to_cow(inode, locked_page,
- cow_start, found_key.offset - 1);
+ ret = fallback_to_cow(inode, locked_folio, cow_start,
+ found_key.offset - 1);
cow_start = (u64)-1;
if (ret) {
btrfs_dec_nocow_writers(nocow_bg);
@@ -2206,7 +2228,7 @@ must_cow:
btrfs_put_ordered_extent(ordered);
extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
- locked_page, &cached_state,
+ locked_folio, &cached_state,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_CLEAR_DATA_RESV,
PAGE_UNLOCK | PAGE_SET_ORDERED);
@@ -2228,7 +2250,7 @@ must_cow:
if (cow_start != (u64)-1) {
cur_offset = end;
- ret = fallback_to_cow(inode, locked_page, cow_start, end);
+ ret = fallback_to_cow(inode, locked_folio, cow_start, end);
cow_start = (u64)-1;
if (ret)
goto error;
@@ -2255,7 +2277,7 @@ error:
lock_extent(&inode->io_tree, cur_offset, end, &cached);
extent_clear_unlock_delalloc(inode, cur_offset, end,
- locked_page, &cached,
+ locked_folio, &cached,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
@@ -2282,39 +2304,39 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
* Function to process delayed allocation (create CoW) for ranges which are
* being touched for the first time.
*/
-int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
+int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio,
u64 start, u64 end, struct writeback_control *wbc)
{
const bool zoned = btrfs_is_zoned(inode->root->fs_info);
int ret;
/*
- * The range must cover part of the @locked_page, or a return of 1
+ * The range must cover part of the @locked_folio, or a return of 1
* can confuse the caller.
*/
- ASSERT(!(end <= page_offset(locked_page) ||
- start >= page_offset(locked_page) + PAGE_SIZE));
+ ASSERT(!(end <= folio_pos(locked_folio) ||
+ start >= folio_pos(locked_folio) + folio_size(locked_folio)));
if (should_nocow(inode, start, end)) {
- ret = run_delalloc_nocow(inode, locked_page, start, end);
+ ret = run_delalloc_nocow(inode, locked_folio, start, end);
goto out;
}
if (btrfs_inode_can_compress(inode) &&
inode_need_compress(inode, start, end) &&
- run_delalloc_compressed(inode, locked_page, start, end, wbc))
+ run_delalloc_compressed(inode, locked_folio, start, end, wbc))
return 1;
if (zoned)
- ret = run_delalloc_cow(inode, locked_page, start, end, wbc,
+ ret = run_delalloc_cow(inode, locked_folio, start, end, wbc,
true);
else
- ret = cow_file_range(inode, locked_page, start, end, NULL,
+ ret = cow_file_range(inode, locked_folio, start, end, NULL,
false, false);
out:
if (ret < 0)
- btrfs_cleanup_ordered_extents(inode, locked_page, start,
+ btrfs_cleanup_ordered_extents(inode, locked_folio, start,
end - start + 1);
return ret;
}
@@ -2690,7 +2712,7 @@ int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
/* see btrfs_writepage_start_hook for details on why this is required */
struct btrfs_writepage_fixup {
- struct page *page;
+ struct folio *folio;
struct btrfs_inode *inode;
struct btrfs_work work;
};
@@ -2702,50 +2724,51 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
struct extent_changeset *data_reserved = NULL;
- struct page *page = fixup->page;
+ struct folio *folio = fixup->folio;
struct btrfs_inode *inode = fixup->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- u64 page_start = page_offset(page);
- u64 page_end = page_offset(page) + PAGE_SIZE - 1;
+ u64 page_start = folio_pos(folio);
+ u64 page_end = folio_pos(folio) + folio_size(folio) - 1;
int ret = 0;
bool free_delalloc_space = true;
/*
* This is similar to page_mkwrite, we need to reserve the space before
- * we take the page lock.
+ * we take the folio lock.
*/
ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
- PAGE_SIZE);
+ folio_size(folio));
again:
- lock_page(page);
+ folio_lock(folio);
/*
- * Before we queued this fixup, we took a reference on the page.
- * page->mapping may go NULL, but it shouldn't be moved to a different
+ * Before we queued this fixup, we took a reference on the folio.
+ * folio->mapping may go NULL, but it shouldn't be moved to a different
* address space.
*/
- if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
+ if (!folio->mapping || !folio_test_dirty(folio) ||
+ !folio_test_checked(folio)) {
/*
* Unfortunately this is a little tricky, either
*
- * 1) We got here and our page had already been dealt with and
+ * 1) We got here and our folio had already been dealt with and
* we reserved our space, thus ret == 0, so we need to just
* drop our space reservation and bail. This can happen the
* first time we come into the fixup worker, or could happen
* while waiting for the ordered extent.
- * 2) Our page was already dealt with, but we happened to get an
+ * 2) Our folio was already dealt with, but we happened to get an
* ENOSPC above from the btrfs_delalloc_reserve_space. In
* this case we obviously don't have anything to release, but
- * because the page was already dealt with we don't want to
- * mark the page with an error, so make sure we're resetting
+ * because the folio was already dealt with we don't want to
+ * mark the folio with an error, so make sure we're resetting
* ret to 0. This is why we have this check _before_ the ret
* check, because we do not want to have a surprise ENOSPC
- * when the page was already properly dealt with.
+ * when the folio was already properly dealt with.
*/
if (!ret) {
- btrfs_delalloc_release_extents(inode, PAGE_SIZE);
+ btrfs_delalloc_release_extents(inode, folio_size(folio));
btrfs_delalloc_release_space(inode, data_reserved,
- page_start, PAGE_SIZE,
+ page_start, folio_size(folio),
true);
}
ret = 0;
@@ -2753,7 +2776,7 @@ again:
}
/*
- * We can't mess with the page state unless it is locked, so now that
+ * We can't mess with the folio state unless it is locked, so now that
* it is locked bail if we failed to make our space reservation.
*/
if (ret)
@@ -2762,14 +2785,14 @@ again:
lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
/* already ordered? We're done */
- if (PageOrdered(page))
+ if (folio_test_ordered(folio))
goto out_reserved;
ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
if (ordered) {
unlock_extent(&inode->io_tree, page_start, page_end,
&cached_state);
- unlock_page(page);
+ folio_unlock(folio);
btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
goto again;
@@ -2787,7 +2810,7 @@ again:
*
* The page was dirty when we started, nothing should have cleaned it.
*/
- BUG_ON(!PageDirty(page));
+ BUG_ON(!folio_test_dirty(folio));
free_delalloc_space = false;
out_reserved:
btrfs_delalloc_release_extents(inode, PAGE_SIZE);
@@ -2801,14 +2824,14 @@ out_page:
* We hit ENOSPC or other errors. Update the mapping and page
* to reflect the errors and clean the page.
*/
- mapping_set_error(page->mapping, ret);
- btrfs_mark_ordered_io_finished(inode, page, page_start,
- PAGE_SIZE, !ret);
- clear_page_dirty_for_io(page);
- }
- btrfs_folio_clear_checked(fs_info, page_folio(page), page_start, PAGE_SIZE);
- unlock_page(page);
- put_page(page);
+ mapping_set_error(folio->mapping, ret);
+ btrfs_mark_ordered_io_finished(inode, folio, page_start,
+ folio_size(folio), !ret);
+ folio_clear_dirty_for_io(folio);
+ }
+ btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
+ folio_unlock(folio);
+ folio_put(folio);
kfree(fixup);
extent_changeset_free(data_reserved);
/*
@@ -2821,33 +2844,34 @@ out_page:
/*
* There are a few paths in the higher layers of the kernel that directly
- * set the page dirty bit without asking the filesystem if it is a
+ * set the folio dirty bit without asking the filesystem if it is a
* good idea. This causes problems because we want to make sure COW
* properly happens and the data=ordered rules are followed.
*
* In our case any range that doesn't have the ORDERED bit set
* hasn't been properly setup for IO. We kick off an async process
* to fix it up. The async helper will wait for ordered extents, set
- * the delalloc bit and make it safe to write the page.
+ * the delalloc bit and make it safe to write the folio.
*/
-int btrfs_writepage_cow_fixup(struct page *page)
+int btrfs_writepage_cow_fixup(struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
struct btrfs_writepage_fixup *fixup;
- /* This page has ordered extent covering it already */
- if (PageOrdered(page))
+ /* This folio has ordered extent covering it already */
+ if (folio_test_ordered(folio))
return 0;
/*
- * PageChecked is set below when we create a fixup worker for this page,
- * don't try to create another one if we're already PageChecked()
+ * folio_checked is set below when we create a fixup worker for this
+ * folio, don't try to create another one if we're already
+ * folio_test_checked.
*
- * The extent_io writepage code will redirty the page if we send back
+ * The extent_io writepage code will redirty the foio if we send back
* EAGAIN.
*/
- if (PageChecked(page))
+ if (folio_test_checked(folio))
return -EAGAIN;
fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
@@ -2857,14 +2881,14 @@ int btrfs_writepage_cow_fixup(struct page *page)
/*
* We are already holding a reference to this inode from
* write_cache_pages. We need to hold it because the space reservation
- * takes place outside of the page lock, and we can't trust
- * page->mapping outside of the page lock.
+ * takes place outside of the folio lock, and we can't trust
+ * page->mapping outside of the folio lock.
*/
ihold(inode);
- btrfs_folio_set_checked(fs_info, page_folio(page), page_offset(page), PAGE_SIZE);
- get_page(page);
+ btrfs_folio_set_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
+ folio_get(folio);
btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
- fixup->page = page;
+ fixup->folio = folio;
fixup->inode = BTRFS_I(inode);
btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
@@ -6700,7 +6724,7 @@ static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
}
static noinline int uncompress_inline(struct btrfs_path *path,
- struct page *page,
+ struct folio *folio,
struct btrfs_file_extent_item *item)
{
int ret;
@@ -6722,7 +6746,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
read_extent_buffer(leaf, tmp, ptr, inline_size);
max_size = min_t(unsigned long, PAGE_SIZE, max_size);
- ret = btrfs_decompress(compress_type, tmp, page, 0, inline_size, max_size);
+ ret = btrfs_decompress(compress_type, tmp, folio, 0, inline_size,
+ max_size);
/*
* decompression code contains a memset to fill in any space between the end
@@ -6733,36 +6758,36 @@ static noinline int uncompress_inline(struct btrfs_path *path,
*/
if (max_size < PAGE_SIZE)
- memzero_page(page, max_size, PAGE_SIZE - max_size);
+ folio_zero_range(folio, max_size, PAGE_SIZE - max_size);
kfree(tmp);
return ret;
}
static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path,
- struct page *page)
+ struct folio *folio)
{
struct btrfs_file_extent_item *fi;
void *kaddr;
size_t copy_size;
- if (!page || PageUptodate(page))
+ if (!folio || folio_test_uptodate(folio))
return 0;
- ASSERT(page_offset(page) == 0);
+ ASSERT(folio_pos(folio) == 0);
fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_file_extent_item);
if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
- return uncompress_inline(path, page, fi);
+ return uncompress_inline(path, folio, fi);
copy_size = min_t(u64, PAGE_SIZE,
btrfs_file_extent_ram_bytes(path->nodes[0], fi));
- kaddr = kmap_local_page(page);
+ kaddr = kmap_local_folio(folio, 0);
read_extent_buffer(path->nodes[0], kaddr,
btrfs_file_extent_inline_start(fi), copy_size);
kunmap_local(kaddr);
if (copy_size < PAGE_SIZE)
- memzero_page(page, copy_size, PAGE_SIZE - copy_size);
+ folio_zero_range(folio, copy_size, PAGE_SIZE - copy_size);
return 0;
}
@@ -6784,7 +6809,7 @@ static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path
* Return: ERR_PTR on error, non-NULL extent_map on success.
*/
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
- struct page *page, u64 start, u64 len)
+ struct folio *folio, u64 start, u64 len)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret = 0;
@@ -6807,7 +6832,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
if (em) {
if (em->start > start || em->start + em->len <= start)
free_extent_map(em);
- else if (em->disk_bytenr == EXTENT_MAP_INLINE && page)
+ else if (em->disk_bytenr == EXTENT_MAP_INLINE && folio)
free_extent_map(em);
else
goto out;
@@ -6937,7 +6962,7 @@ next:
ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE);
ASSERT(em->len == fs_info->sectorsize);
- ret = read_inline_extent(inode, path, page);
+ ret = read_inline_extent(inode, path, folio);
if (ret < 0)
goto out;
goto insert;
@@ -7179,13 +7204,12 @@ struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
* for subpage spinlock. So this function is to spin and wait for subpage
* spinlock.
*/
-static void wait_subpage_spinlock(struct page *page)
+static void wait_subpage_spinlock(struct folio *folio)
{
- struct btrfs_fs_info *fs_info = page_to_fs_info(page);
- struct folio *folio = page_folio(page);
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
struct btrfs_subpage *subpage;
- if (!btrfs_is_subpage(fs_info, page->mapping))
+ if (!btrfs_is_subpage(fs_info, folio->mapping))
return;
ASSERT(folio_test_private(folio) && folio_get_private(folio));
@@ -7214,9 +7238,9 @@ static int btrfs_launder_folio(struct folio *folio)
static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
- if (try_release_extent_mapping(&folio->page, gfp_flags)) {
- wait_subpage_spinlock(&folio->page);
- clear_page_extent_mapped(&folio->page);
+ if (try_release_extent_mapping(folio, gfp_flags)) {
+ wait_subpage_spinlock(folio);
+ clear_folio_extent_mapped(folio);
return true;
}
return false;
@@ -7276,7 +7300,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
* do double ordered extent accounting on the same folio.
*/
folio_wait_writeback(folio);
- wait_subpage_spinlock(&folio->page);
+ wait_subpage_spinlock(folio);
/*
* For subpage case, we have call sites like
@@ -7414,7 +7438,7 @@ next:
btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
if (!inode_evicting)
__btrfs_release_folio(folio, GFP_NOFS);
- clear_page_extent_mapped(&folio->page);
+ clear_folio_extent_mapped(folio);
}
static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
@@ -8951,19 +8975,19 @@ void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned long index = start >> PAGE_SHIFT;
unsigned long end_index = end >> PAGE_SHIFT;
- struct page *page;
+ struct folio *folio;
u32 len;
ASSERT(end + 1 - start <= U32_MAX);
len = end + 1 - start;
while (index <= end_index) {
- page = find_get_page(inode->vfs_inode.i_mapping, index);
- ASSERT(page); /* Pages should be in the extent_io_tree */
+ folio = __filemap_get_folio(inode->vfs_inode.i_mapping, index, 0, 0);
+ ASSERT(!IS_ERR(folio)); /* folios should be in the extent_io_tree */
/* This is for data, which doesn't yet support larger folio. */
- ASSERT(folio_order(page_folio(page)) == 0);
- btrfs_folio_set_writeback(fs_info, page_folio(page), start, len);
- put_page(page);
+ ASSERT(folio_order(folio) == 0);
+ btrfs_folio_set_writeback(fs_info, folio, start, len);
+ folio_put(folio);
index++;
}
}
@@ -9128,7 +9152,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
atomic_inc(&priv.pending);
- btrfs_submit_bio(bbio, 0);
+ btrfs_submit_bbio(bbio, 0);
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
btrfs_encoded_read_endio, &priv);
@@ -9143,7 +9167,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
} while (disk_io_size);
atomic_inc(&priv.pending);
- btrfs_submit_bio(bbio, 0);
+ btrfs_submit_bbio(bbio, 0);
if (atomic_dec_return(&priv.pending))
io_wait_event(priv.wait, !atomic_read(&priv.pending));
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index e0a664b8a46a..226c91fe31a7 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -543,13 +543,11 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
range.minlen = max(range.minlen, minlen);
ret = btrfs_trim_fs(fs_info, &range);
- if (ret < 0)
- return ret;
if (copy_to_user(arg, &range, sizeof(range)))
return -EFAULT;
- return 0;
+ return ret;
}
int __pure btrfs_is_empty_uuid(const u8 *uuid)
@@ -1312,12 +1310,12 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
} else {
struct fd src = fdget(fd);
struct inode *src_inode;
- if (!src.file) {
+ if (!fd_file(src)) {
ret = -EINVAL;
goto out_drop_write;
}
- src_inode = file_inode(src.file);
+ src_inode = file_inode(fd_file(src));
if (src_inode->i_sb != file_inode(file)->i_sb) {
btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
"Snapshot src from another FS");
@@ -4765,11 +4763,10 @@ long btrfs_ioctl(struct file *file, unsigned int
return ret;
ret = btrfs_sync_fs(inode->i_sb, 1);
/*
- * The transaction thread may want to do more work,
- * namely it pokes the cleaner kthread that will start
- * processing uncleaned subvols.
+ * There may be work for the cleaner kthread to do (subvolume
+ * deletion, delayed iputs, defrag inodes, etc), so wake it up.
*/
- wake_up_process(fs_info->transaction_kthread);
+ wake_up_process(fs_info->cleaner_kthread);
return ret;
}
case BTRFS_IOC_START_SYNC:
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 1e2a68b8f62d..72856f6775f7 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -438,11 +438,11 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
}
int lzo_decompress(struct list_head *ws, const u8 *data_in,
- struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
+ struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
- struct btrfs_fs_info *fs_info = page_to_fs_info(dest_page);
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(dest_folio);
const u32 sectorsize = fs_info->sectorsize;
size_t in_len;
size_t out_len;
@@ -467,22 +467,22 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
out_len = sectorsize;
ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
if (unlikely(ret != LZO_E_OK)) {
- struct btrfs_inode *inode = BTRFS_I(dest_page->mapping->host);
+ struct btrfs_inode *inode = folio_to_inode(dest_folio);
btrfs_err(fs_info,
"lzo decompression failed, error %d root %llu inode %llu offset %llu",
ret, btrfs_root_id(inode->root), btrfs_ino(inode),
- page_offset(dest_page));
+ folio_pos(dest_folio));
ret = -EIO;
goto out;
}
ASSERT(out_len <= sectorsize);
- memcpy_to_page(dest_page, dest_pgoff, workspace->buf, out_len);
+ memcpy_to_folio(dest_folio, dest_pgoff, workspace->buf, out_len);
/* Early end, considered as an error. */
if (unlikely(out_len < destlen)) {
ret = -EIO;
- memzero_page(dest_page, dest_pgoff + out_len, destlen - out_len);
+ folio_zero_range(dest_folio, dest_pgoff + out_len, destlen - out_len);
}
out:
return ret;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 82a68394a89c..2104d60c2161 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -332,7 +332,7 @@ static void finish_ordered_fn(struct btrfs_work *work)
}
static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
- struct page *page, u64 file_offset,
+ struct folio *folio, u64 file_offset,
u64 len, bool uptodate)
{
struct btrfs_inode *inode = ordered->inode;
@@ -340,10 +340,10 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
lockdep_assert_held(&inode->ordered_tree_lock);
- if (page) {
- ASSERT(page->mapping);
- ASSERT(page_offset(page) <= file_offset);
- ASSERT(file_offset + len <= page_offset(page) + PAGE_SIZE);
+ if (folio) {
+ ASSERT(folio->mapping);
+ ASSERT(folio_pos(folio) <= file_offset);
+ ASSERT(file_offset + len <= folio_pos(folio) + folio_size(folio));
/*
* Ordered (Private2) bit indicates whether we still have
@@ -351,10 +351,9 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
*
* If there's no such bit, we need to skip to next range.
*/
- if (!btrfs_folio_test_ordered(fs_info, page_folio(page),
- file_offset, len))
+ if (!btrfs_folio_test_ordered(fs_info, folio, file_offset, len))
return false;
- btrfs_folio_clear_ordered(fs_info, page_folio(page), file_offset, len);
+ btrfs_folio_clear_ordered(fs_info, folio, file_offset, len);
}
/* Now we're fine to update the accounting. */
@@ -398,7 +397,7 @@ static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
}
void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
- struct page *page, u64 file_offset, u64 len,
+ struct folio *folio, u64 file_offset, u64 len,
bool uptodate)
{
struct btrfs_inode *inode = ordered->inode;
@@ -408,7 +407,8 @@ void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
spin_lock_irqsave(&inode->ordered_tree_lock, flags);
- ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate);
+ ret = can_finish_ordered_extent(ordered, folio, file_offset, len,
+ uptodate);
spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
/*
@@ -449,8 +449,8 @@ void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
/*
* Mark all ordered extents io inside the specified range finished.
*
- * @page: The involved page for the operation.
- * For uncompressed buffered IO, the page status also needs to be
+ * @folio: The involved folio for the operation.
+ * For uncompressed buffered IO, the folio status also needs to be
* updated to indicate whether the pending ordered io is finished.
* Can be NULL for direct IO and compressed write.
* For these cases, callers are ensured they won't execute the
@@ -460,7 +460,7 @@ void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
* extent(s) covering it.
*/
void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
- struct page *page, u64 file_offset,
+ struct folio *folio, u64 file_offset,
u64 num_bytes, bool uptodate)
{
struct rb_node *node;
@@ -524,7 +524,7 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
ASSERT(end + 1 - cur < U32_MAX);
len = end + 1 - cur;
- if (can_finish_ordered_extent(entry, page, cur, len, uptodate)) {
+ if (can_finish_ordered_extent(entry, folio, cur, len, uptodate)) {
spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
btrfs_queue_ordered_fn(entry);
spin_lock_irqsave(&inode->ordered_tree_lock, flags);
@@ -1015,7 +1015,7 @@ void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
{
struct rb_node *n;
- ASSERT(inode_is_locked(&inode->vfs_inode));
+ btrfs_assert_inode_locked(inode);
spin_lock_irq(&inode->ordered_tree_lock);
for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 51b9e81726e2..4e152736d06c 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -163,11 +163,11 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
struct btrfs_ordered_extent *entry);
void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
- struct page *page, u64 file_offset, u64 len,
+ struct folio *folio, u64 file_offset, u64 len,
bool uptodate);
void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
- struct page *page, u64 file_offset,
- u64 num_bytes, bool uptodate);
+ struct folio *folio, u64 file_offset,
+ u64 num_bytes, bool uptodate);
bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size);
diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c
index 6195a2215b8f..9f3ad124104f 100644
--- a/fs/btrfs/orphan.c
+++ b/fs/btrfs/orphan.c
@@ -9,9 +9,8 @@
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 offset)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
- int ret = 0;
key.objectid = BTRFS_ORPHAN_OBJECTID;
key.type = BTRFS_ORPHAN_ITEM_KEY;
@@ -21,16 +20,13 @@ int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
-
- btrfs_free_path(path);
- return ret;
+ return btrfs_insert_empty_item(trans, root, path, &key, 0);
}
int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 offset)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
int ret = 0;
@@ -44,15 +40,9 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
- goto out;
- if (ret) { /* JDM: Really? */
- ret = -ENOENT;
- goto out;
- }
-
- ret = btrfs_del_item(trans, root, path);
+ return ret;
+ if (ret)
+ return -ENOENT;
-out:
- btrfs_free_path(path);
- return ret;
+ return btrfs_del_item(trans, root, path);
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index feb8f9f2f358..c297909f1506 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1998,16 +1998,14 @@ out:
*
* Return 0 for success insert
* Return >0 for existing record, caller can free @record safely.
- * Error is not possible
+ * Return <0 for insertion failure, caller can free @record safely.
*/
int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record)
{
- struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
- struct rb_node *parent_node = NULL;
- struct btrfs_qgroup_extent_record *entry;
- u64 bytenr = record->bytenr;
+ struct btrfs_qgroup_extent_record *existing, *ret;
+ unsigned long bytenr = record->bytenr;
if (!btrfs_qgroup_full_accounting(fs_info))
return 1;
@@ -2015,26 +2013,24 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
lockdep_assert_held(&delayed_refs->lock);
trace_btrfs_qgroup_trace_extent(fs_info, record);
- while (*p) {
- parent_node = *p;
- entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
- node);
- if (bytenr < entry->bytenr) {
- p = &(*p)->rb_left;
- } else if (bytenr > entry->bytenr) {
- p = &(*p)->rb_right;
- } else {
- if (record->data_rsv && !entry->data_rsv) {
- entry->data_rsv = record->data_rsv;
- entry->data_rsv_refroot =
- record->data_rsv_refroot;
- }
- return 1;
+ xa_lock(&delayed_refs->dirty_extents);
+ existing = xa_load(&delayed_refs->dirty_extents, bytenr);
+ if (existing) {
+ if (record->data_rsv && !existing->data_rsv) {
+ existing->data_rsv = record->data_rsv;
+ existing->data_rsv_refroot = record->data_rsv_refroot;
}
+ xa_unlock(&delayed_refs->dirty_extents);
+ return 1;
+ }
+
+ ret = __xa_store(&delayed_refs->dirty_extents, record->bytenr, record, GFP_ATOMIC);
+ xa_unlock(&delayed_refs->dirty_extents);
+ if (xa_is_err(ret)) {
+ qgroup_mark_inconsistent(fs_info);
+ return xa_err(ret);
}
- rb_link_node(&record->node, parent_node, p);
- rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
return 0;
}
@@ -2141,6 +2137,11 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
if (!record)
return -ENOMEM;
+ if (xa_reserve(&trans->transaction->delayed_refs.dirty_extents, bytenr, GFP_NOFS)) {
+ kfree(record);
+ return -ENOMEM;
+ }
+
delayed_refs = &trans->transaction->delayed_refs;
record->bytenr = bytenr;
record->num_bytes = num_bytes;
@@ -2149,7 +2150,9 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
spin_lock(&delayed_refs->lock);
ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
spin_unlock(&delayed_refs->lock);
- if (ret > 0) {
+ if (ret) {
+ /* Clean up if insertion fails or item exists. */
+ xa_release(&delayed_refs->dirty_extents, record->bytenr);
kfree(record);
return 0;
}
@@ -3018,7 +3021,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
struct btrfs_qgroup_extent_record *record;
struct btrfs_delayed_ref_root *delayed_refs;
struct ulist *new_roots = NULL;
- struct rb_node *node;
+ unsigned long index;
u64 num_dirty_extents = 0;
u64 qgroup_to_skip;
int ret = 0;
@@ -3028,10 +3031,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
delayed_refs = &trans->transaction->delayed_refs;
qgroup_to_skip = delayed_refs->qgroup_to_skip;
- while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
- record = rb_entry(node, struct btrfs_qgroup_extent_record,
- node);
-
+ xa_for_each(&delayed_refs->dirty_extents, index, record) {
num_dirty_extents++;
trace_btrfs_qgroup_account_extents(fs_info, record);
@@ -3097,7 +3097,7 @@ cleanup:
ulist_free(record->old_roots);
ulist_free(new_roots);
new_roots = NULL;
- rb_erase(node, &delayed_refs->dirty_extent_root);
+ xa_erase(&delayed_refs->dirty_extents, index);
kfree(record);
}
@@ -4874,15 +4874,13 @@ out:
void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
{
struct btrfs_qgroup_extent_record *entry;
- struct btrfs_qgroup_extent_record *next;
- struct rb_root *root;
+ unsigned long index;
- root = &trans->delayed_refs.dirty_extent_root;
- rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
+ xa_for_each(&trans->delayed_refs.dirty_extents, index, entry) {
ulist_free(entry->old_roots);
kfree(entry);
}
- *root = RB_ROOT;
+ xa_destroy(&trans->delayed_refs.dirty_extents);
}
void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes)
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index deb479d176a9..98adf4ec7b01 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -125,7 +125,6 @@ struct btrfs_inode;
* Record a dirty extent, and info qgroup to update quota on it
*/
struct btrfs_qgroup_extent_record {
- struct rb_node node;
u64 bytenr;
u64 num_bytes;
diff --git a/fs/btrfs/raid-stripe-tree.c b/fs/btrfs/raid-stripe-tree.c
index e6f7a234b8f6..4c859b550f6c 100644
--- a/fs/btrfs/raid-stripe-tree.c
+++ b/fs/btrfs/raid-stripe-tree.c
@@ -66,6 +66,11 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le
if (ret)
break;
+ start += key.offset;
+ length -= key.offset;
+ if (length == 0)
+ break;
+
btrfs_release_path(path);
}
@@ -73,6 +78,36 @@ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 le
return ret;
}
+static int update_raid_extent_item(struct btrfs_trans_handle *trans,
+ struct btrfs_key *key,
+ struct btrfs_stripe_extent *stripe_extent,
+ const size_t item_size)
+{
+ struct btrfs_path *path;
+ struct extent_buffer *leaf;
+ int ret;
+ int slot;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ ret = btrfs_search_slot(trans, trans->fs_info->stripe_root, key, path,
+ 0, 1);
+ if (ret)
+ return (ret == 1 ? ret : -EINVAL);
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+
+ write_extent_buffer(leaf, stripe_extent, btrfs_item_ptr_offset(leaf, slot),
+ item_size);
+ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_free_path(path);
+
+ return ret;
+}
+
static int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
struct btrfs_io_context *bioc)
{
@@ -112,6 +147,9 @@ static int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
ret = btrfs_insert_item(trans, stripe_root, &stripe_key, stripe_extent,
item_size);
+ if (ret == -EEXIST)
+ ret = update_raid_extent_item(trans, &stripe_key, stripe_extent,
+ item_size);
if (ret)
btrfs_abort_transaction(trans, ret);
@@ -172,7 +210,7 @@ int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
if (!path)
return -ENOMEM;
- if (stripe->is_scrub) {
+ if (stripe->rst_search_commit_root) {
path->skip_locking = 1;
path->search_commit_root = 1;
}
@@ -245,10 +283,8 @@ int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
out:
if (ret > 0)
ret = -ENOENT;
- if (ret && ret != -EIO && !stripe->is_scrub) {
- if (IS_ENABLED(CONFIG_BTRFS_DEBUG))
- btrfs_print_tree(leaf, 1);
- btrfs_err(fs_info,
+ if (ret && ret != -EIO && !stripe->rst_search_commit_root) {
+ btrfs_debug(fs_info,
"cannot find raid-stripe for logical [%llu, %llu] devid %llu, profile %s",
logical, logical + *length, stripe->dev->devid,
btrfs_bg_type_to_raid_name(map_type));
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index df6b93b927cd..f0824c948cb7 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -66,7 +66,7 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0);
char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0);
struct extent_changeset *data_reserved = NULL;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct address_space *mapping = inode->vfs_inode.i_mapping;
int ret;
@@ -83,14 +83,15 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
if (ret)
goto out;
- page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT,
- btrfs_alloc_write_mask(mapping));
- if (!page) {
+ folio = __filemap_get_folio(mapping, file_offset >> PAGE_SHIFT,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ btrfs_alloc_write_mask(mapping));
+ if (IS_ERR(folio)) {
ret = -ENOMEM;
goto out_unlock;
}
- ret = set_page_extent_mapped(page);
+ ret = set_folio_extent_mapped(folio);
if (ret < 0)
goto out_unlock;
@@ -115,15 +116,15 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
if (comp_type == BTRFS_COMPRESS_NONE) {
- memcpy_to_page(page, offset_in_page(file_offset), data_start,
- datal);
+ memcpy_to_folio(folio, offset_in_folio(folio, file_offset), data_start,
+ datal);
} else {
- ret = btrfs_decompress(comp_type, data_start, page,
- offset_in_page(file_offset),
+ ret = btrfs_decompress(comp_type, data_start, folio,
+ offset_in_folio(folio, file_offset),
inline_size, datal);
if (ret)
goto out_unlock;
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
}
/*
@@ -139,15 +140,15 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
* So what's in the range [500, 4095] corresponds to zeroes.
*/
if (datal < block_size)
- memzero_page(page, datal, block_size - datal);
+ folio_zero_range(folio, datal, block_size - datal);
- btrfs_folio_set_uptodate(fs_info, page_folio(page), file_offset, block_size);
- btrfs_folio_clear_checked(fs_info, page_folio(page), file_offset, block_size);
- btrfs_folio_set_dirty(fs_info, page_folio(page), file_offset, block_size);
+ btrfs_folio_set_uptodate(fs_info, folio, file_offset, block_size);
+ btrfs_folio_clear_checked(fs_info, folio, file_offset, block_size);
+ btrfs_folio_set_dirty(fs_info, folio, file_offset, block_size);
out_unlock:
- if (page) {
- unlock_page(page);
- put_page(page);
+ if (!IS_ERR(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
}
if (ret)
btrfs_delalloc_release_space(inode, data_reserved, file_offset,
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 0533d0f82dc9..ea4ed85919ec 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -36,6 +36,7 @@
#include "relocation.h"
#include "super.h"
#include "tree-checker.h"
+#include "raid-stripe-tree.h"
/*
* Relocation overview
@@ -2965,21 +2966,34 @@ static int relocate_one_folio(struct reloc_control *rc,
u64 folio_end;
u64 cur;
int ret;
+ const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags);
ASSERT(index <= last_index);
folio = filemap_lock_folio(inode->i_mapping, index);
if (IS_ERR(folio)) {
- page_cache_sync_readahead(inode->i_mapping, ra, NULL,
- index, last_index + 1 - index);
+
+ /*
+ * On relocation we're doing readahead on the relocation inode,
+ * but if the filesystem is backed by a RAID stripe tree we can
+ * get ENOENT (e.g. due to preallocated extents not being
+ * mapped in the RST) from the lookup.
+ *
+ * But readahead doesn't handle the error and submits invalid
+ * reads to the device, causing a assertion failures.
+ */
+ if (!use_rst)
+ page_cache_sync_readahead(inode->i_mapping, ra, NULL,
+ index, last_index + 1 - index);
folio = __filemap_get_folio(inode->i_mapping, index,
- FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mask);
if (IS_ERR(folio))
return PTR_ERR(folio);
}
WARN_ON(folio_order(folio));
- if (folio_test_readahead(folio))
+ if (folio_test_readahead(folio) && !use_rst)
page_cache_async_readahead(inode->i_mapping, ra, NULL,
folio, last_index + 1 - index);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 0de9162ff481..3a3427428074 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -838,7 +838,7 @@ static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
bbio->bio.bi_iter.bi_size >= blocksize)) {
ASSERT(bbio->bio.bi_iter.bi_size);
atomic_inc(&stripe->pending_io);
- btrfs_submit_bio(bbio, mirror);
+ btrfs_submit_bbio(bbio, mirror);
if (wait)
wait_scrub_stripe_io(stripe);
bbio = NULL;
@@ -857,7 +857,7 @@ static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
if (bbio) {
ASSERT(bbio->bio.bi_iter.bi_size);
atomic_inc(&stripe->pending_io);
- btrfs_submit_bio(bbio, mirror);
+ btrfs_submit_bbio(bbio, mirror);
if (wait)
wait_scrub_stripe_io(stripe);
}
@@ -1683,7 +1683,7 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
bbio->bio.bi_iter.bi_size >= stripe_len)) {
ASSERT(bbio->bio.bi_iter.bi_size);
atomic_inc(&stripe->pending_io);
- btrfs_submit_bio(bbio, mirror);
+ btrfs_submit_bbio(bbio, mirror);
bbio = NULL;
}
@@ -1694,7 +1694,7 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
(i << fs_info->sectorsize_bits);
int err;
- io_stripe.is_scrub = true;
+ io_stripe.rst_search_commit_root = true;
stripe_len = (nr_sectors - i) << fs_info->sectorsize_bits;
/*
* For RST cases, we need to manually split the bbio to
@@ -1720,7 +1720,7 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
if (bbio) {
ASSERT(bbio->bio.bi_iter.bi_size);
atomic_inc(&stripe->pending_io);
- btrfs_submit_bio(bbio, mirror);
+ btrfs_submit_bbio(bbio, mirror);
}
if (atomic_dec_and_test(&stripe->pending_io)) {
@@ -1776,7 +1776,7 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
mirror = calc_next_mirror(mirror, num_copies);
}
- btrfs_submit_bio(bbio, mirror);
+ btrfs_submit_bbio(bbio, mirror);
}
static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 619fa0b8b3f6..7f48ba6c1c77 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -62,7 +62,7 @@ struct fs_path {
/*
* Average path length does not exceed 200 bytes, we'll have
* better packing in the slab and higher chance to satisfy
- * a allocation later during send.
+ * an allocation later during send.
*/
char pad[256];
};
@@ -1136,7 +1136,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
/*
* Start with a small buffer (1 page). If later we end up needing more
* space, which can happen for xattrs on a fs with a leaf size greater
- * then the page size, attempt to increase the buffer. Typically xattr
+ * than the page size, attempt to increase the buffer. Typically xattr
* values are small.
*/
buf_len = PATH_MAX;
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index c691784b4660..d5a9cd8a4fd8 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -163,7 +163,7 @@
* thing with or without extra unallocated space.
*/
-u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
+u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info,
bool may_use_included)
{
ASSERT(s_info);
@@ -368,7 +368,7 @@ static u64 calc_effective_data_chunk_size(struct btrfs_fs_info *fs_info)
}
static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
+ const struct btrfs_space_info *space_info,
enum btrfs_reserve_flush_enum flush)
{
u64 profile;
@@ -437,7 +437,7 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
}
int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info, u64 bytes,
+ const struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush)
{
u64 avail;
@@ -542,8 +542,8 @@ static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
}
-static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *info)
+static void __btrfs_dump_space_info(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *info)
{
const char *flag_str = space_info_flag_to_str(info);
lockdep_assert_held(&info->lock);
@@ -844,9 +844,8 @@ static void flush_space(struct btrfs_fs_info *fs_info,
return;
}
-static inline u64
-btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
+static u64 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *space_info)
{
u64 used;
u64 avail;
@@ -871,7 +870,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
}
static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
+ const struct btrfs_space_info *space_info)
{
const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
u64 ordered, delalloc;
@@ -1943,7 +1942,7 @@ static u64 calc_unalloc_target(struct btrfs_fs_info *fs_info)
* Typically with 10 block groups as the target, the discrete values this comes
* out to are 0, 10, 20, ... , 80, 90, and 99.
*/
-static int calc_dynamic_reclaim_threshold(struct btrfs_space_info *space_info)
+static int calc_dynamic_reclaim_threshold(const struct btrfs_space_info *space_info)
{
struct btrfs_fs_info *fs_info = space_info->fs_info;
u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
@@ -1962,7 +1961,7 @@ static int calc_dynamic_reclaim_threshold(struct btrfs_space_info *space_info)
return calc_pct_ratio(want, target);
}
-int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info)
+int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info)
{
lockdep_assert_held(&space_info->lock);
@@ -1985,7 +1984,7 @@ static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
return unalloc < data_chunk_size;
}
-static void do_reclaim_sweep(struct btrfs_fs_info *fs_info,
+static void do_reclaim_sweep(const struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, int raid)
{
struct btrfs_block_group *bg;
@@ -2073,7 +2072,7 @@ bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
return ret;
}
-void btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info)
+void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
{
int raid;
struct btrfs_space_info *space_info;
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index 5602026c5e14..efbecc0c5258 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -217,7 +217,7 @@ struct reserve_ticket {
wait_queue_head_t wait;
};
-static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
+static inline bool btrfs_mixed_space_info(const struct btrfs_space_info *space_info)
{
return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) &&
(space_info->flags & BTRFS_BLOCK_GROUP_DATA));
@@ -258,7 +258,7 @@ void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
u64 chunk_size);
struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
u64 flags);
-u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
+u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info,
bool may_use_included);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
@@ -271,7 +271,7 @@ int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info);
int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info, u64 bytes,
+ const struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush);
static inline void btrfs_space_info_free_bytes_may_use(
@@ -293,7 +293,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes);
void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready);
bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info);
-int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info);
-void btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info);
+int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info);
+void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info);
#endif /* BTRFS_SPACE_INFO_H */
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
index 8ddd5fcbeb93..fe4d719d506b 100644
--- a/fs/btrfs/subpage.c
+++ b/fs/btrfs/subpage.c
@@ -64,6 +64,7 @@
* This means a slightly higher tree locking latency.
*/
+#if PAGE_SIZE > SZ_4K
bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping)
{
if (fs_info->sectorsize >= PAGE_SIZE)
@@ -85,37 +86,7 @@ bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space
return true;
return false;
}
-
-void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
-{
- unsigned int cur = 0;
- unsigned int nr_bits;
-
- ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
-
- nr_bits = PAGE_SIZE / sectorsize;
- subpage_info->bitmap_nr_bits = nr_bits;
-
- subpage_info->uptodate_offset = cur;
- cur += nr_bits;
-
- subpage_info->dirty_offset = cur;
- cur += nr_bits;
-
- subpage_info->writeback_offset = cur;
- cur += nr_bits;
-
- subpage_info->ordered_offset = cur;
- cur += nr_bits;
-
- subpage_info->checked_offset = cur;
- cur += nr_bits;
-
- subpage_info->locked_offset = cur;
- cur += nr_bits;
-
- subpage_info->total_nr_bits = cur;
-}
+#endif
int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
struct folio *folio, enum btrfs_subpage_type type)
@@ -163,7 +134,7 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
ASSERT(fs_info->sectorsize < PAGE_SIZE);
real_size = struct_size(ret, bitmaps,
- BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
+ BITS_TO_LONGS(btrfs_bitmap_nr_max * fs_info->sectors_per_page));
ret = kzalloc(real_size, GFP_NOFS);
if (!ret)
return ERR_PTR(-ENOMEM);
@@ -246,7 +217,7 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
\
btrfs_subpage_assert(fs_info, folio, start, len); \
__start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
- __start_bit += fs_info->subpage_info->name##_offset; \
+ __start_bit += fs_info->sectors_per_page * btrfs_bitmap_nr_##name; \
__start_bit; \
})
@@ -351,6 +322,8 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf
const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
const int nbits = (len >> fs_info->sectorsize_bits);
unsigned long flags;
+ unsigned int cleared = 0;
+ int bit = start_bit;
bool last;
btrfs_subpage_assert(fs_info, folio, start, len);
@@ -368,11 +341,12 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf
return true;
}
- ASSERT(atomic_read(&subpage->writers) >= nbits);
- /* The target range should have been locked. */
- ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits));
- bitmap_clear(subpage->bitmaps, start_bit, nbits);
- last = atomic_sub_and_test(nbits, &subpage->writers);
+ for_each_set_bit_from(bit, subpage->bitmaps, start_bit + nbits) {
+ clear_bit(bit, subpage->bitmaps);
+ cleared++;
+ }
+ ASSERT(atomic_read(&subpage->writers) >= cleared);
+ last = atomic_sub_and_test(cleared, &subpage->writers);
spin_unlock_irqrestore(&subpage->lock, flags);
return last;
}
@@ -404,27 +378,94 @@ int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
return 0;
}
+/*
+ * Handle different locked folios:
+ *
+ * - Non-subpage folio
+ * Just unlock it.
+ *
+ * - folio locked but without any subpage locked
+ * This happens either before writepage_delalloc() or the delalloc range is
+ * already handled by previous folio.
+ * We can simple unlock it.
+ *
+ * - folio locked with subpage range locked.
+ * We go through the locked sectors inside the range and clear their locked
+ * bitmap, reduce the writer lock number, and unlock the page if that's
+ * the last locked range.
+ */
void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
+ struct btrfs_subpage *subpage = folio_get_private(folio);
+
+ ASSERT(folio_test_locked(folio));
+
if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
folio_unlock(folio);
return;
}
+
+ /*
+ * For subpage case, there are two types of locked page. With or
+ * without writers number.
+ *
+ * Since we own the page lock, no one else could touch subpage::writers
+ * and we are safe to do several atomic operations without spinlock.
+ */
+ if (atomic_read(&subpage->writers) == 0) {
+ /* No writers, locked by plain lock_page(). */
+ folio_unlock(folio);
+ return;
+ }
+
btrfs_subpage_clamp_range(folio, &start, &len);
if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len))
folio_unlock(folio);
}
+void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, unsigned long bitmap)
+{
+ struct btrfs_subpage *subpage = folio_get_private(folio);
+ const int start_bit = fs_info->sectors_per_page * btrfs_bitmap_nr_locked;
+ unsigned long flags;
+ bool last = false;
+ int cleared = 0;
+ int bit;
+
+ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
+ folio_unlock(folio);
+ return;
+ }
+
+ if (atomic_read(&subpage->writers) == 0) {
+ /* No writers, locked by plain lock_page(). */
+ folio_unlock(folio);
+ return;
+ }
+
+ spin_lock_irqsave(&subpage->lock, flags);
+ for_each_set_bit(bit, &bitmap, fs_info->sectors_per_page) {
+ if (test_and_clear_bit(bit + start_bit, subpage->bitmaps))
+ cleared++;
+ }
+ ASSERT(atomic_read(&subpage->writers) >= cleared);
+ last = atomic_sub_and_test(cleared, &subpage->writers);
+ spin_unlock_irqrestore(&subpage->lock, flags);
+ if (last)
+ folio_unlock(folio);
+}
+
#define subpage_test_bitmap_all_set(fs_info, subpage, name) \
bitmap_test_range_all_set(subpage->bitmaps, \
- fs_info->subpage_info->name##_offset, \
- fs_info->subpage_info->bitmap_nr_bits)
+ fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
+ fs_info->sectors_per_page)
#define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
bitmap_test_range_all_zero(subpage->bitmaps, \
- fs_info->subpage_info->name##_offset, \
- fs_info->subpage_info->bitmap_nr_bits)
+ fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
+ fs_info->sectors_per_page)
void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
@@ -729,53 +770,6 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
}
/*
- * Handle different locked pages with different page sizes:
- *
- * - Page locked by plain lock_page()
- * It should not have any subpage::writers count.
- * Can be unlocked by unlock_page().
- * This is the most common locked page for __extent_writepage() called
- * inside extent_write_cache_pages().
- * Rarer cases include the @locked_page from extent_write_locked_range().
- *
- * - Page locked by lock_delalloc_pages()
- * There is only one caller, all pages except @locked_page for
- * extent_write_locked_range().
- * In this case, we have to call subpage helper to handle the case.
- */
-void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len)
-{
- struct btrfs_subpage *subpage;
-
- ASSERT(folio_test_locked(folio));
- /* For non-subpage case, we just unlock the page */
- if (!btrfs_is_subpage(fs_info, folio->mapping)) {
- folio_unlock(folio);
- return;
- }
-
- ASSERT(folio_test_private(folio) && folio_get_private(folio));
- subpage = folio_get_private(folio);
-
- /*
- * For subpage case, there are two types of locked page. With or
- * without writers number.
- *
- * Since we own the page lock, no one else could touch subpage::writers
- * and we are safe to do several atomic operations without spinlock.
- */
- if (atomic_read(&subpage->writers) == 0) {
- /* No writers, locked by plain lock_page() */
- folio_unlock(folio);
- return;
- }
-
- /* Have writers, use proper subpage helper to end it */
- btrfs_folio_end_writer_lock(fs_info, folio, start, len);
-}
-
-/*
* This is for folio already locked by plain lock_page()/folio_lock(), which
* doesn't have any subpage awareness.
*
@@ -803,7 +797,7 @@ void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
bitmap_set(subpage->bitmaps, start_bit, nbits);
ret = atomic_add_return(nbits, &subpage->writers);
- ASSERT(ret <= fs_info->subpage_info->bitmap_nr_bits);
+ ASSERT(ret <= fs_info->sectors_per_page);
spin_unlock_irqrestore(&subpage->lock, flags);
}
@@ -819,14 +813,13 @@ bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 search_start,
u64 *found_start_ret, u32 *found_len_ret)
{
- struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
struct btrfs_subpage *subpage = folio_get_private(folio);
+ const u32 sectors_per_page = fs_info->sectors_per_page;
const unsigned int len = PAGE_SIZE - offset_in_page(search_start);
const unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
locked, search_start, len);
- const unsigned int locked_bitmap_start = subpage_info->locked_offset;
- const unsigned int locked_bitmap_end = locked_bitmap_start +
- subpage_info->bitmap_nr_bits;
+ const unsigned int locked_bitmap_start = sectors_per_page * btrfs_bitmap_nr_locked;
+ const unsigned int locked_bitmap_end = locked_bitmap_start + sectors_per_page;
unsigned long flags;
int first_zero;
int first_set;
@@ -855,59 +848,21 @@ out:
return found;
}
-/*
- * Unlike btrfs_folio_end_writer_lock() which unlocks a specified subpage range,
- * this ends all writer locked ranges of a page.
- *
- * This is for the locked page of __extent_writepage(), as the locked page
- * can contain several locked subpage ranges.
- */
-void btrfs_folio_end_all_writers(const struct btrfs_fs_info *fs_info, struct folio *folio)
-{
- struct btrfs_subpage *subpage = folio_get_private(folio);
- u64 folio_start = folio_pos(folio);
- u64 cur = folio_start;
-
- ASSERT(folio_test_locked(folio));
- if (!btrfs_is_subpage(fs_info, folio->mapping)) {
- folio_unlock(folio);
- return;
- }
-
- /* The page has no new delalloc range locked on it. Just plain unlock. */
- if (atomic_read(&subpage->writers) == 0) {
- folio_unlock(folio);
- return;
- }
- while (cur < folio_start + PAGE_SIZE) {
- u64 found_start;
- u32 found_len;
- bool found;
- bool last;
-
- found = btrfs_subpage_find_writer_locked(fs_info, folio, cur,
- &found_start, &found_len);
- if (!found)
- break;
- last = btrfs_subpage_end_and_test_writer(fs_info, folio,
- found_start, found_len);
- if (last) {
- folio_unlock(folio);
- break;
- }
- cur = found_start + found_len;
- }
+#define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \
+{ \
+ const int sectors_per_page = fs_info->sectors_per_page; \
+ \
+ ASSERT(sectors_per_page < BITS_PER_LONG); \
+ *dst = bitmap_read(subpage->bitmaps, \
+ sectors_per_page * btrfs_bitmap_nr_##name, \
+ sectors_per_page); \
}
-#define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \
- bitmap_cut(dst, subpage->bitmaps, 0, \
- subpage_info->name##_offset, subpage_info->bitmap_nr_bits)
-
void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
- struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
struct btrfs_subpage *subpage;
+ const u32 sectors_per_page = fs_info->sectors_per_page;
unsigned long uptodate_bitmap;
unsigned long dirty_bitmap;
unsigned long writeback_bitmap;
@@ -916,25 +871,41 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
unsigned long flags;
ASSERT(folio_test_private(folio) && folio_get_private(folio));
- ASSERT(subpage_info);
+ ASSERT(sectors_per_page > 1);
subpage = folio_get_private(folio);
spin_lock_irqsave(&subpage->lock, flags);
- GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap);
- GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap);
- GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap);
- GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap);
- GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap);
- GET_SUBPAGE_BITMAP(subpage, subpage_info, locked, &checked_bitmap);
+ GET_SUBPAGE_BITMAP(subpage, fs_info, uptodate, &uptodate_bitmap);
+ GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, &dirty_bitmap);
+ GET_SUBPAGE_BITMAP(subpage, fs_info, writeback, &writeback_bitmap);
+ GET_SUBPAGE_BITMAP(subpage, fs_info, ordered, &ordered_bitmap);
+ GET_SUBPAGE_BITMAP(subpage, fs_info, checked, &checked_bitmap);
+ GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &checked_bitmap);
spin_unlock_irqrestore(&subpage->lock, flags);
dump_page(folio_page(folio, 0), "btrfs subpage dump");
btrfs_warn(fs_info,
"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
start, len, folio_pos(folio),
- subpage_info->bitmap_nr_bits, &uptodate_bitmap,
- subpage_info->bitmap_nr_bits, &dirty_bitmap,
- subpage_info->bitmap_nr_bits, &writeback_bitmap,
- subpage_info->bitmap_nr_bits, &ordered_bitmap,
- subpage_info->bitmap_nr_bits, &checked_bitmap);
+ sectors_per_page, &uptodate_bitmap,
+ sectors_per_page, &dirty_bitmap,
+ sectors_per_page, &writeback_bitmap,
+ sectors_per_page, &ordered_bitmap,
+ sectors_per_page, &checked_bitmap);
+}
+
+void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
+ struct folio *folio,
+ unsigned long *ret_bitmap)
+{
+ struct btrfs_subpage *subpage;
+ unsigned long flags;
+
+ ASSERT(folio_test_private(folio) && folio_get_private(folio));
+ ASSERT(fs_info->sectors_per_page > 1);
+ subpage = folio_get_private(folio);
+
+ spin_lock_irqsave(&subpage->lock, flags);
+ GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, ret_bitmap);
+ spin_unlock_irqrestore(&subpage->lock, flags);
}
diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h
index 249396e118d0..4b85d91d0e18 100644
--- a/fs/btrfs/subpage.h
+++ b/fs/btrfs/subpage.h
@@ -5,6 +5,7 @@
#include <linux/spinlock.h>
#include <linux/atomic.h>
+#include <linux/sizes.h>
struct address_space;
struct folio;
@@ -18,39 +19,23 @@ struct btrfs_fs_info;
*
* This structure records how they are organized in the bitmap:
*
- * /- uptodate_offset /- dirty_offset /- ordered_offset
+ * /- uptodate /- dirty /- ordered
* | | |
* v v v
* |u|u|u|u|........|u|u|d|d|.......|d|d|o|o|.......|o|o|
- * |<- bitmap_nr_bits ->|
- * |<----------------- total_nr_bits ------------------>|
+ * |< sectors_per_page >|
+ *
+ * Unlike regular macro-like enums, here we do not go upper-case names, as
+ * these names will be utilized in various macros to define function names.
*/
-struct btrfs_subpage_info {
- /* Number of bits for each bitmap */
- unsigned int bitmap_nr_bits;
-
- /* Total number of bits for the whole bitmap */
- unsigned int total_nr_bits;
-
- /*
- * *_offset indicates where the bitmap starts, the length is always
- * @bitmap_size, which is calculated from PAGE_SIZE / sectorsize.
- */
- unsigned int uptodate_offset;
- unsigned int dirty_offset;
- unsigned int writeback_offset;
- unsigned int ordered_offset;
- unsigned int checked_offset;
-
- /*
- * For locked bitmaps, normally it's subpage representation for folio
- * Locked flag, but metadata is different:
- *
- * - Metadata doesn't really lock the folio
- * It's just to prevent page::private get cleared before the last
- * end_page_read().
- */
- unsigned int locked_offset;
+enum {
+ btrfs_bitmap_nr_uptodate = 0,
+ btrfs_bitmap_nr_dirty,
+ btrfs_bitmap_nr_writeback,
+ btrfs_bitmap_nr_ordered,
+ btrfs_bitmap_nr_checked,
+ btrfs_bitmap_nr_locked,
+ btrfs_bitmap_nr_max
};
/*
@@ -88,9 +73,16 @@ enum btrfs_subpage_type {
BTRFS_SUBPAGE_DATA,
};
+#if PAGE_SIZE > SZ_4K
bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping);
+#else
+static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
+ struct address_space *mapping)
+{
+ return false;
+}
+#endif
-void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize);
int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
struct folio *folio, enum btrfs_subpage_type type);
void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio);
@@ -114,10 +106,11 @@ void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
+void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
+ struct folio *folio, unsigned long bitmap);
bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 search_start,
u64 *found_start_ret, u32 *found_len_ret);
-void btrfs_folio_end_all_writers(const struct btrfs_fs_info *fs_info, struct folio *folio);
/*
* Template for subpage related operations.
@@ -164,8 +157,9 @@ bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
-void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
- struct folio *folio, u64 start, u32 len);
+void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
+ struct folio *folio,
+ unsigned long *ret_bitmap);
void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 865d4af4b303..0a2dbfaaf49e 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -180,7 +180,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL);
start = 0;
end = start + PAGE_SIZE - 1;
- found = find_lock_delalloc_range(inode, locked_page, &start,
+ found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end);
if (!found) {
test_err("should have found at least one delalloc");
@@ -211,7 +211,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL);
start = test_start;
end = start + PAGE_SIZE - 1;
- found = find_lock_delalloc_range(inode, locked_page, &start,
+ found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end);
if (!found) {
test_err("couldn't find delalloc in our range");
@@ -245,7 +245,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
}
start = test_start;
end = start + PAGE_SIZE - 1;
- found = find_lock_delalloc_range(inode, locked_page, &start,
+ found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end);
if (found) {
test_err("found range when we shouldn't have");
@@ -266,7 +266,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL);
start = test_start;
end = start + PAGE_SIZE - 1;
- found = find_lock_delalloc_range(inode, locked_page, &start,
+ found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end);
if (!found) {
test_err("didn't find our range");
@@ -307,7 +307,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
* this changes at any point in the future we will need to fix this
* tests expected behavior.
*/
- found = find_lock_delalloc_range(inode, locked_page, &start,
+ found = find_lock_delalloc_range(inode, page_folio(locked_page), &start,
&end);
if (!found) {
test_err("didn't find our range");
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 5e6fff8e1003..0fc873af891f 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -143,8 +143,7 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
BUG_ON(!list_empty(&transaction->list));
WARN_ON(!RB_EMPTY_ROOT(
&transaction->delayed_refs.href_root.rb_root));
- WARN_ON(!RB_EMPTY_ROOT(
- &transaction->delayed_refs.dirty_extent_root));
+ WARN_ON(!xa_empty(&transaction->delayed_refs.dirty_extents));
if (transaction->delayed_refs.pending_csums)
btrfs_err(transaction->fs_info,
"pending csums is %llu",
@@ -351,7 +350,7 @@ loop:
memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
- cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
+ xa_init(&cur_trans->delayed_refs.dirty_extents);
atomic_set(&cur_trans->delayed_refs.num_entries, 0);
/*
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 634d69964fe4..7b50263723bc 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1517,7 +1517,7 @@ static int check_extent_item(struct extent_buffer *leaf,
dref_objectid > BTRFS_LAST_FREE_OBJECTID)) {
extent_err(leaf, slot,
"invalid data ref objectid value %llu",
- dref_root);
+ dref_objectid);
return -EUCLEAN;
}
if (unlikely(!IS_ALIGNED(dref_offset,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f0cf8ce26f01..e2ed2a791f8f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2877,7 +2877,7 @@ void btrfs_release_log_ctx_extents(struct btrfs_log_ctx *ctx)
struct btrfs_ordered_extent *ordered;
struct btrfs_ordered_extent *tmp;
- ASSERT(inode_is_locked(&ctx->inode->vfs_inode));
+ btrfs_assert_inode_locked(ctx->inode);
list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
list_del_init(&ordered->log_list);
diff --git a/fs/btrfs/tree-mod-log.c b/fs/btrfs/tree-mod-log.c
index fa45b5fb9683..b382a4c443d4 100644
--- a/fs/btrfs/tree-mod-log.c
+++ b/fs/btrfs/tree-mod-log.c
@@ -170,7 +170,7 @@ static noinline int tree_mod_log_insert(struct btrfs_fs_info *fs_info,
* this until all tree mod log insertions are recorded in the rb tree and then
* write unlock fs_info::tree_mod_log_lock.
*/
-static bool tree_mod_dont_log(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
+static bool tree_mod_dont_log(struct btrfs_fs_info *fs_info, const struct extent_buffer *eb)
{
if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
return true;
@@ -188,7 +188,7 @@ static bool tree_mod_dont_log(struct btrfs_fs_info *fs_info, struct extent_buffe
/* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
static bool tree_mod_need_log(const struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb)
+ const struct extent_buffer *eb)
{
if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
return false;
@@ -198,7 +198,7 @@ static bool tree_mod_need_log(const struct btrfs_fs_info *fs_info,
return true;
}
-static struct tree_mod_elem *alloc_tree_mod_elem(struct extent_buffer *eb,
+static struct tree_mod_elem *alloc_tree_mod_elem(const struct extent_buffer *eb,
int slot,
enum btrfs_mod_log_op op)
{
@@ -221,7 +221,7 @@ static struct tree_mod_elem *alloc_tree_mod_elem(struct extent_buffer *eb,
return tm;
}
-int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
+int btrfs_tree_mod_log_insert_key(const struct extent_buffer *eb, int slot,
enum btrfs_mod_log_op op)
{
struct tree_mod_elem *tm;
@@ -258,7 +258,7 @@ out_unlock:
return ret;
}
-static struct tree_mod_elem *tree_mod_log_alloc_move(struct extent_buffer *eb,
+static struct tree_mod_elem *tree_mod_log_alloc_move(const struct extent_buffer *eb,
int dst_slot, int src_slot,
int nr_items)
{
@@ -278,7 +278,7 @@ static struct tree_mod_elem *tree_mod_log_alloc_move(struct extent_buffer *eb,
return tm;
}
-int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb,
+int btrfs_tree_mod_log_insert_move(const struct extent_buffer *eb,
int dst_slot, int src_slot,
int nr_items)
{
@@ -535,7 +535,7 @@ static struct tree_mod_elem *tree_mod_log_search(struct btrfs_fs_info *fs_info,
}
int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst,
- struct extent_buffer *src,
+ const struct extent_buffer *src,
unsigned long dst_offset,
unsigned long src_offset,
int nr_items)
diff --git a/fs/btrfs/tree-mod-log.h b/fs/btrfs/tree-mod-log.h
index ff00c8e8a393..6308c577a4a4 100644
--- a/fs/btrfs/tree-mod-log.h
+++ b/fs/btrfs/tree-mod-log.h
@@ -37,7 +37,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
int btrfs_tree_mod_log_insert_root(struct extent_buffer *old_root,
struct extent_buffer *new_root,
bool log_removal);
-int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
+int btrfs_tree_mod_log_insert_key(const struct extent_buffer *eb, int slot,
enum btrfs_mod_log_op op);
int btrfs_tree_mod_log_free_eb(struct extent_buffer *eb);
struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
@@ -47,11 +47,11 @@ struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
struct extent_buffer *btrfs_get_old_root(struct btrfs_root *root, u64 time_seq);
int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst,
- struct extent_buffer *src,
+ const struct extent_buffer *src,
unsigned long dst_offset,
unsigned long src_offset,
int nr_items);
-int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb,
+int btrfs_tree_mod_log_insert_move(const struct extent_buffer *eb,
int dst_slot, int src_slot,
int nr_items);
u64 btrfs_tree_mod_log_lowest_seq(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index eae75bb572b9..c6399513c66f 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -3,6 +3,7 @@
* Copyright (C) STRATO AG 2013. All rights reserved.
*/
+#include <linux/kthread.h>
#include <linux/uuid.h>
#include <asm/unaligned.h>
#include "messages.h"
@@ -12,6 +13,7 @@
#include "fs.h"
#include "accessors.h"
#include "uuid-tree.h"
+#include "ioctl.h"
static void btrfs_uuid_to_key(const u8 *uuid, u8 type, struct btrfs_key *key)
{
@@ -390,3 +392,180 @@ out:
btrfs_free_path(path);
return ret;
}
+
+int btrfs_uuid_scan_kthread(void *data)
+{
+ struct btrfs_fs_info *fs_info = data;
+ struct btrfs_root *root = fs_info->tree_root;
+ struct btrfs_key key;
+ struct btrfs_path *path = NULL;
+ int ret = 0;
+ struct extent_buffer *eb;
+ int slot;
+ struct btrfs_root_item root_item;
+ u32 item_size;
+ struct btrfs_trans_handle *trans = NULL;
+ bool closing = false;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ key.objectid = 0;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = 0;
+
+ while (1) {
+ if (btrfs_fs_closing(fs_info)) {
+ closing = true;
+ break;
+ }
+ ret = btrfs_search_forward(root, &key, path,
+ BTRFS_OLDEST_GENERATION);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ break;
+ }
+
+ if (key.type != BTRFS_ROOT_ITEM_KEY ||
+ (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
+ key.objectid != BTRFS_FS_TREE_OBJECTID) ||
+ key.objectid > BTRFS_LAST_FREE_OBJECTID)
+ goto skip;
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ item_size = btrfs_item_size(eb, slot);
+ if (item_size < sizeof(root_item))
+ goto skip;
+
+ read_extent_buffer(eb, &root_item,
+ btrfs_item_ptr_offset(eb, slot),
+ (int)sizeof(root_item));
+ if (btrfs_root_refs(&root_item) == 0)
+ goto skip;
+
+ if (!btrfs_is_empty_uuid(root_item.uuid) ||
+ !btrfs_is_empty_uuid(root_item.received_uuid)) {
+ if (trans)
+ goto update_tree;
+
+ btrfs_release_path(path);
+ /*
+ * 1 - subvol uuid item
+ * 1 - received_subvol uuid item
+ */
+ trans = btrfs_start_transaction(fs_info->uuid_root, 2);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ break;
+ }
+ continue;
+ } else {
+ goto skip;
+ }
+update_tree:
+ btrfs_release_path(path);
+ if (!btrfs_is_empty_uuid(root_item.uuid)) {
+ ret = btrfs_uuid_tree_add(trans, root_item.uuid,
+ BTRFS_UUID_KEY_SUBVOL,
+ key.objectid);
+ if (ret < 0) {
+ btrfs_warn(fs_info, "uuid_tree_add failed %d",
+ ret);
+ break;
+ }
+ }
+
+ if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
+ ret = btrfs_uuid_tree_add(trans,
+ root_item.received_uuid,
+ BTRFS_UUID_KEY_RECEIVED_SUBVOL,
+ key.objectid);
+ if (ret < 0) {
+ btrfs_warn(fs_info, "uuid_tree_add failed %d",
+ ret);
+ break;
+ }
+ }
+
+skip:
+ btrfs_release_path(path);
+ if (trans) {
+ ret = btrfs_end_transaction(trans);
+ trans = NULL;
+ if (ret)
+ break;
+ }
+
+ if (key.offset < (u64)-1) {
+ key.offset++;
+ } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
+ key.offset = 0;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ } else if (key.objectid < (u64)-1) {
+ key.offset = 0;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.objectid++;
+ } else {
+ break;
+ }
+ cond_resched();
+ }
+
+out:
+ btrfs_free_path(path);
+ if (trans && !IS_ERR(trans))
+ btrfs_end_transaction(trans);
+ if (ret)
+ btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
+ else if (!closing)
+ set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
+ up(&fs_info->uuid_tree_rescan_sem);
+ return 0;
+}
+
+int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *tree_root = fs_info->tree_root;
+ struct btrfs_root *uuid_root;
+ struct task_struct *task;
+ int ret;
+
+ /*
+ * 1 - root node
+ * 1 - root item
+ */
+ trans = btrfs_start_transaction(tree_root, 2);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
+ if (IS_ERR(uuid_root)) {
+ ret = PTR_ERR(uuid_root);
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ return ret;
+ }
+
+ fs_info->uuid_root = uuid_root;
+
+ ret = btrfs_commit_transaction(trans);
+ if (ret)
+ return ret;
+
+ down(&fs_info->uuid_tree_rescan_sem);
+ task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
+ if (IS_ERR(task)) {
+ /* fs_info->update_uuid_tree_gen remains 0 in all error case */
+ btrfs_warn(fs_info, "failed to start uuid_scan task");
+ up(&fs_info->uuid_tree_rescan_sem);
+ return PTR_ERR(task);
+ }
+
+ return 0;
+}
diff --git a/fs/btrfs/uuid-tree.h b/fs/btrfs/uuid-tree.h
index a3f5757cc7cf..c60ad20325cc 100644
--- a/fs/btrfs/uuid-tree.h
+++ b/fs/btrfs/uuid-tree.h
@@ -13,5 +13,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, const u8 *uuid, u8 typ
int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type,
u64 subid);
int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info);
+int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
+int btrfs_uuid_scan_kthread(void *data);
#endif
diff --git a/fs/btrfs/verity.c b/fs/btrfs/verity.c
index 4042dd6437ae..e97ad824ae16 100644
--- a/fs/btrfs/verity.c
+++ b/fs/btrfs/verity.c
@@ -284,7 +284,7 @@ static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
* page and ignore dest, but it must still be non-NULL to avoid the
* counting-only behavior.
* @len: length in bytes to read
- * @dest_page: copy into this page instead of the dest buffer
+ * @dest_folio: copy into this folio instead of the dest buffer
*
* Helper function to read items from the btree. This returns the number of
* bytes read or < 0 for errors. We can return short reads if the items don't
@@ -294,7 +294,7 @@ static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
* Returns number of bytes read or a negative error code on failure.
*/
static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
- char *dest, u64 len, struct page *dest_page)
+ char *dest, u64 len, struct folio *dest_folio)
{
struct btrfs_path *path;
struct btrfs_root *root = inode->root;
@@ -314,7 +314,7 @@ static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
if (!path)
return -ENOMEM;
- if (dest_page)
+ if (dest_folio)
path->reada = READA_FORWARD;
key.objectid = btrfs_ino(inode);
@@ -371,15 +371,15 @@ static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
copy_offset = offset - key.offset;
if (dest) {
- if (dest_page)
- kaddr = kmap_local_page(dest_page);
+ if (dest_folio)
+ kaddr = kmap_local_folio(dest_folio, 0);
data = btrfs_item_ptr(leaf, path->slots[0], void);
read_extent_buffer(leaf, kaddr + dest_offset,
(unsigned long)data + copy_offset,
copy_bytes);
- if (dest_page)
+ if (dest_folio)
kunmap_local(kaddr);
}
@@ -460,7 +460,7 @@ static int rollback_verity(struct btrfs_inode *inode)
struct btrfs_root *root = inode->root;
int ret;
- ASSERT(inode_is_locked(&inode->vfs_inode));
+ btrfs_assert_inode_locked(inode);
truncate_inode_pages(inode->vfs_inode.i_mapping, inode->vfs_inode.i_size);
clear_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
ret = btrfs_drop_verity_items(inode);
@@ -585,7 +585,7 @@ static int btrfs_begin_enable_verity(struct file *filp)
struct btrfs_trans_handle *trans;
int ret;
- ASSERT(inode_is_locked(file_inode(filp)));
+ btrfs_assert_inode_locked(inode);
if (test_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags))
return -EBUSY;
@@ -633,7 +633,7 @@ static int btrfs_end_enable_verity(struct file *filp, const void *desc,
int ret = 0;
int rollback_ret;
- ASSERT(inode_is_locked(file_inode(filp)));
+ btrfs_assert_inode_locked(inode);
if (desc == NULL)
goto rollback;
@@ -762,7 +762,7 @@ again:
* [ inode objectid, BTRFS_MERKLE_ITEM_KEY, offset in bytes ]
*/
ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY, off,
- folio_address(folio), PAGE_SIZE, &folio->page);
+ folio_address(folio), PAGE_SIZE, folio);
if (ret < 0) {
folio_put(folio);
return ERR_PTR(ret);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index fcedc43ef291..8f340ad1d938 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -476,6 +476,8 @@ btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder,
if (IS_ERR(*bdev_file)) {
ret = PTR_ERR(*bdev_file);
+ btrfs_err(NULL, "failed to open device for path %s with flags 0x%x: %d",
+ device_path, flags, ret);
goto error;
}
bdev = file_bdev(*bdev_file);
@@ -4784,183 +4786,6 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
return 0;
}
-int btrfs_uuid_scan_kthread(void *data)
-{
- struct btrfs_fs_info *fs_info = data;
- struct btrfs_root *root = fs_info->tree_root;
- struct btrfs_key key;
- struct btrfs_path *path = NULL;
- int ret = 0;
- struct extent_buffer *eb;
- int slot;
- struct btrfs_root_item root_item;
- u32 item_size;
- struct btrfs_trans_handle *trans = NULL;
- bool closing = false;
-
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
-
- key.objectid = 0;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = 0;
-
- while (1) {
- if (btrfs_fs_closing(fs_info)) {
- closing = true;
- break;
- }
- ret = btrfs_search_forward(root, &key, path,
- BTRFS_OLDEST_GENERATION);
- if (ret) {
- if (ret > 0)
- ret = 0;
- break;
- }
-
- if (key.type != BTRFS_ROOT_ITEM_KEY ||
- (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
- key.objectid != BTRFS_FS_TREE_OBJECTID) ||
- key.objectid > BTRFS_LAST_FREE_OBJECTID)
- goto skip;
-
- eb = path->nodes[0];
- slot = path->slots[0];
- item_size = btrfs_item_size(eb, slot);
- if (item_size < sizeof(root_item))
- goto skip;
-
- read_extent_buffer(eb, &root_item,
- btrfs_item_ptr_offset(eb, slot),
- (int)sizeof(root_item));
- if (btrfs_root_refs(&root_item) == 0)
- goto skip;
-
- if (!btrfs_is_empty_uuid(root_item.uuid) ||
- !btrfs_is_empty_uuid(root_item.received_uuid)) {
- if (trans)
- goto update_tree;
-
- btrfs_release_path(path);
- /*
- * 1 - subvol uuid item
- * 1 - received_subvol uuid item
- */
- trans = btrfs_start_transaction(fs_info->uuid_root, 2);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- break;
- }
- continue;
- } else {
- goto skip;
- }
-update_tree:
- btrfs_release_path(path);
- if (!btrfs_is_empty_uuid(root_item.uuid)) {
- ret = btrfs_uuid_tree_add(trans, root_item.uuid,
- BTRFS_UUID_KEY_SUBVOL,
- key.objectid);
- if (ret < 0) {
- btrfs_warn(fs_info, "uuid_tree_add failed %d",
- ret);
- break;
- }
- }
-
- if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
- ret = btrfs_uuid_tree_add(trans,
- root_item.received_uuid,
- BTRFS_UUID_KEY_RECEIVED_SUBVOL,
- key.objectid);
- if (ret < 0) {
- btrfs_warn(fs_info, "uuid_tree_add failed %d",
- ret);
- break;
- }
- }
-
-skip:
- btrfs_release_path(path);
- if (trans) {
- ret = btrfs_end_transaction(trans);
- trans = NULL;
- if (ret)
- break;
- }
-
- if (key.offset < (u64)-1) {
- key.offset++;
- } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
- key.offset = 0;
- key.type = BTRFS_ROOT_ITEM_KEY;
- } else if (key.objectid < (u64)-1) {
- key.offset = 0;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.objectid++;
- } else {
- break;
- }
- cond_resched();
- }
-
-out:
- btrfs_free_path(path);
- if (trans && !IS_ERR(trans))
- btrfs_end_transaction(trans);
- if (ret)
- btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
- else if (!closing)
- set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
- up(&fs_info->uuid_tree_rescan_sem);
- return 0;
-}
-
-int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
-{
- struct btrfs_trans_handle *trans;
- struct btrfs_root *tree_root = fs_info->tree_root;
- struct btrfs_root *uuid_root;
- struct task_struct *task;
- int ret;
-
- /*
- * 1 - root node
- * 1 - root item
- */
- trans = btrfs_start_transaction(tree_root, 2);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
-
- uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
- if (IS_ERR(uuid_root)) {
- ret = PTR_ERR(uuid_root);
- btrfs_abort_transaction(trans, ret);
- btrfs_end_transaction(trans);
- return ret;
- }
-
- fs_info->uuid_root = uuid_root;
-
- ret = btrfs_commit_transaction(trans);
- if (ret)
- return ret;
-
- down(&fs_info->uuid_tree_rescan_sem);
- task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
- if (IS_ERR(task)) {
- /* fs_info->update_uuid_tree_gen remains 0 in all error case */
- btrfs_warn(fs_info, "failed to start uuid_scan task");
- up(&fs_info->uuid_tree_rescan_sem);
- return PTR_ERR(task);
- }
-
- return 0;
-}
-
/*
* shrinking a device means finding all of the device extents past
* the new size, and then following the back refs to the chunks.
@@ -5956,11 +5781,31 @@ void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info)
write_unlock(&fs_info->mapping_tree_lock);
}
+static int btrfs_chunk_map_num_copies(const struct btrfs_chunk_map *map)
+{
+ enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(map->type);
+
+ if (map->type & BTRFS_BLOCK_GROUP_RAID5)
+ return 2;
+
+ /*
+ * There could be two corrupted data stripes, we need to loop retry in
+ * order to rebuild the correct data.
+ *
+ * Fail a stripe at a time on every retry except the stripe under
+ * reconstruction.
+ */
+ if (map->type & BTRFS_BLOCK_GROUP_RAID6)
+ return map->num_stripes;
+
+ /* Non-RAID56, use their ncopies from btrfs_raid_array. */
+ return btrfs_raid_array[index].ncopies;
+}
+
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
{
struct btrfs_chunk_map *map;
- enum btrfs_raid_types index;
- int ret = 1;
+ int ret;
map = btrfs_get_chunk_map(fs_info, logical, len);
if (IS_ERR(map))
@@ -5972,22 +5817,7 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
*/
return 1;
- index = btrfs_bg_flags_to_raid_index(map->type);
-
- /* Non-RAID56, use their ncopies from btrfs_raid_array. */
- if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK))
- ret = btrfs_raid_array[index].ncopies;
- else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
- ret = 2;
- else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
- /*
- * There could be two corrupted data stripes, we need
- * to loop retry in order to rebuild the correct data.
- *
- * Fail a stripe at a time on every retry except the
- * stripe under reconstruction.
- */
- ret = map->num_stripes;
+ ret = btrfs_chunk_map_num_copies(map);
btrfs_free_chunk_map(map);
return ret;
}
@@ -6637,14 +6467,14 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
io_geom.stripe_index = 0;
io_geom.op = op;
- num_copies = btrfs_num_copies(fs_info, logical, fs_info->sectorsize);
- if (io_geom.mirror_num > num_copies)
- return -EINVAL;
-
map = btrfs_get_chunk_map(fs_info, logical, *length);
if (IS_ERR(map))
return PTR_ERR(map);
+ num_copies = btrfs_chunk_map_num_copies(map);
+ if (io_geom.mirror_num > num_copies)
+ return -EINVAL;
+
map_offset = logical - map->start;
io_geom.raid56_full_stripe_start = (u64)-1;
max_len = btrfs_max_io_len(map, map_offset, &io_geom);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 37a09ebb34dd..03d2d60afe0c 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -444,7 +444,7 @@ struct btrfs_io_stripe {
/* Block mapping. */
u64 physical;
u64 length;
- bool is_scrub;
+ bool rst_search_commit_root;
/* For the endio handler. */
struct btrfs_io_context *bioc;
};
@@ -725,8 +725,6 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset);
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
-int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
-int btrfs_uuid_scan_kthread(void *data);
bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset);
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 738c7bb8ea7c..ce464cd8e0ac 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -120,7 +120,7 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
* locks the inode's i_mutex before calling setxattr or removexattr.
*/
if (flags & XATTR_REPLACE) {
- ASSERT(inode_is_locked(inode));
+ btrfs_assert_inode_locked(BTRFS_I(inode));
di = btrfs_lookup_xattr(NULL, root, path,
btrfs_ino(BTRFS_I(inode)), name, name_len, 0);
if (!di)
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 30971dd741e2..100abc00b794 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -20,6 +20,8 @@
#include <linux/refcount.h>
#include "btrfs_inode.h"
#include "compression.h"
+#include "fs.h"
+#include "subpage.h"
/* workspace buffer size for s390 zlib hardware support */
#define ZLIB_DFLTCC_BUF_SIZE (4 * PAGE_SIZE)
@@ -108,6 +110,7 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
unsigned long len = *total_out;
unsigned long nr_dest_folios = *out_folios;
const unsigned long max_out = nr_dest_folios * PAGE_SIZE;
+ const u64 orig_end = start + len;
*out_folios = 0;
*total_out = 0;
@@ -153,6 +156,10 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
if (in_buf_folios > 1) {
int i;
+ /* S390 hardware acceleration path, not subpage. */
+ ASSERT(!btrfs_is_subpage(
+ inode_to_fs_info(mapping->host),
+ mapping));
for (i = 0; i < in_buf_folios; i++) {
if (data_in) {
kunmap_local(data_in);
@@ -167,9 +174,14 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
copy_page(workspace->buf + i * PAGE_SIZE,
data_in);
start += PAGE_SIZE;
+ workspace->strm.avail_in =
+ (in_buf_folios << PAGE_SHIFT);
}
workspace->strm.next_in = workspace->buf;
} else {
+ unsigned int pg_off;
+ unsigned int cur_len;
+
if (data_in) {
kunmap_local(data_in);
folio_put(in_folio);
@@ -179,12 +191,13 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
start, &in_folio);
if (ret < 0)
goto out;
- data_in = kmap_local_folio(in_folio, 0);
+ pg_off = offset_in_page(start);
+ cur_len = btrfs_calc_input_length(orig_end, start);
+ data_in = kmap_local_folio(in_folio, pg_off);
start += PAGE_SIZE;
workspace->strm.next_in = data_in;
+ workspace->strm.avail_in = cur_len;
}
- workspace->strm.avail_in = min(bytes_left,
- (unsigned long) workspace->buf_size);
}
ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
@@ -380,7 +393,7 @@ done:
}
int zlib_decompress(struct list_head *ws, const u8 *data_in,
- struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
+ struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
@@ -408,12 +421,12 @@ int zlib_decompress(struct list_head *ws, const u8 *data_in,
ret = zlib_inflateInit2(&workspace->strm, wbits);
if (unlikely(ret != Z_OK)) {
- struct btrfs_inode *inode = BTRFS_I(dest_page->mapping->host);
+ struct btrfs_inode *inode = folio_to_inode(dest_folio);
btrfs_err(inode->root->fs_info,
"zlib decompression init failed, error %d root %llu inode %llu offset %llu",
ret, btrfs_root_id(inode->root), btrfs_ino(inode),
- page_offset(dest_page));
+ folio_pos(dest_folio));
return -EIO;
}
@@ -426,16 +439,16 @@ int zlib_decompress(struct list_head *ws, const u8 *data_in,
if (ret != Z_STREAM_END)
goto out;
- memcpy_to_page(dest_page, dest_pgoff, workspace->buf, to_copy);
+ memcpy_to_folio(dest_folio, dest_pgoff, workspace->buf, to_copy);
out:
if (unlikely(to_copy != destlen)) {
- struct btrfs_inode *inode = BTRFS_I(dest_page->mapping->host);
+ struct btrfs_inode *inode = folio_to_inode(dest_folio);
btrfs_err(inode->root->fs_info,
"zlib decompression failed, error %d root %llu inode %llu offset %llu decompressed %lu expected %zu",
ret, btrfs_root_id(inode->root), btrfs_ino(inode),
- page_offset(dest_page), to_copy, destlen);
+ folio_pos(dest_folio), to_copy, destlen);
ret = -EIO;
} else {
ret = 0;
@@ -444,7 +457,7 @@ out:
zlib_inflateEnd(&workspace->strm);
if (unlikely(to_copy < destlen))
- memzero_page(dest_page, dest_pgoff + to_copy, destlen - to_copy);
+ folio_zero_range(dest_folio, dest_pgoff + to_copy, destlen - to_copy);
return ret;
}
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 047e3337852e..7fa2920632ba 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -287,7 +287,7 @@ static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
/* The emulated zone size is determined from the size of device extent */
static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
{
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
struct extent_buffer *leaf;
@@ -304,28 +304,21 @@ static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
+ return ret;
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
- goto out;
+ return ret;
/* No dev extents at all? Not good */
- if (ret > 0) {
- ret = -EUCLEAN;
- goto out;
- }
+ if (ret > 0)
+ return -EUCLEAN;
}
leaf = path->nodes[0];
dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
- ret = 0;
-
-out:
- btrfs_free_path(path);
-
- return ret;
+ return 0;
}
int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
@@ -1211,7 +1204,7 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_root *root;
- struct btrfs_path *path;
+ BTRFS_PATH_AUTO_FREE(path);
struct btrfs_key key;
struct btrfs_key found_key;
int ret;
@@ -1246,7 +1239,7 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
if (!ret)
ret = -EUCLEAN;
if (ret < 0)
- goto out;
+ return ret;
ret = btrfs_previous_extent_item(root, path, cache->start);
if (ret) {
@@ -1254,7 +1247,7 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
ret = 0;
*offset_ret = 0;
}
- goto out;
+ return ret;
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
@@ -1266,15 +1259,10 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
if (!(found_key.objectid >= cache->start &&
found_key.objectid + length <= cache->start + cache->length)) {
- ret = -EUCLEAN;
- goto out;
+ return -EUCLEAN;
}
*offset_ret = found_key.objectid + length - cache->start;
- ret = 0;
-
-out:
- btrfs_free_path(path);
- return ret;
+ return 0;
}
struct zone_info {
@@ -2459,7 +2447,7 @@ void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
mutex_unlock(&fs_devices->device_list_mutex);
}
-bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
+bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 30b2e48a1cec..7612e6572605 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -89,7 +89,7 @@ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
struct extent_buffer *eb);
void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg);
void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info);
-bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info);
+bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info);
void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
u64 length);
int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info);
@@ -242,7 +242,7 @@ static inline void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) { }
static inline void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { }
-static inline bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
+static inline bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info)
{
return false;
}
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 2a079561b2b1..866607fd3e58 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -389,7 +389,10 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
unsigned long tot_out = 0;
unsigned long len = *total_out;
const unsigned long nr_dest_folios = *out_folios;
+ const u64 orig_end = start + len;
unsigned long max_out = nr_dest_folios * PAGE_SIZE;
+ unsigned int pg_off;
+ unsigned int cur_len;
zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
len);
@@ -415,9 +418,11 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
if (ret < 0)
goto out;
- workspace->in_buf.src = kmap_local_folio(in_folio, 0);
+ pg_off = offset_in_page(start);
+ cur_len = btrfs_calc_input_length(orig_end, start);
+ workspace->in_buf.src = kmap_local_folio(in_folio, pg_off);
workspace->in_buf.pos = 0;
- workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
+ workspace->in_buf.size = cur_len;
/* Allocate and map in the output buffer */
out_folio = btrfs_alloc_compr_folio();
@@ -494,14 +499,16 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
kunmap_local(workspace->in_buf.src);
workspace->in_buf.src = NULL;
folio_put(in_folio);
- start += PAGE_SIZE;
- len -= PAGE_SIZE;
+ start += cur_len;
+ len -= cur_len;
ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
if (ret < 0)
goto out;
- workspace->in_buf.src = kmap_local_folio(in_folio, 0);
+ pg_off = offset_in_page(start);
+ cur_len = btrfs_calc_input_length(orig_end, start);
+ workspace->in_buf.src = kmap_local_folio(in_folio, pg_off);
workspace->in_buf.pos = 0;
- workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
+ workspace->in_buf.size = cur_len;
}
}
while (1) {
@@ -649,11 +656,11 @@ done:
}
int zstd_decompress(struct list_head *ws, const u8 *data_in,
- struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
+ struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
- struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
+ struct btrfs_fs_info *fs_info = btrfs_sb(folio_inode(dest_folio)->i_sb);
const u32 sectorsize = fs_info->sectorsize;
zstd_dstream *stream;
int ret = 0;
@@ -662,12 +669,12 @@ int zstd_decompress(struct list_head *ws, const u8 *data_in,
stream = zstd_init_dstream(
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
if (unlikely(!stream)) {
- struct btrfs_inode *inode = BTRFS_I(dest_page->mapping->host);
+ struct btrfs_inode *inode = folio_to_inode(dest_folio);
btrfs_err(inode->root->fs_info,
"zstd decompression init failed, root %llu inode %llu offset %llu",
btrfs_root_id(inode->root), btrfs_ino(inode),
- page_offset(dest_page));
+ folio_pos(dest_folio));
ret = -EIO;
goto finish;
}
@@ -686,21 +693,21 @@ int zstd_decompress(struct list_head *ws, const u8 *data_in,
*/
ret = zstd_decompress_stream(stream, &workspace->out_buf, &workspace->in_buf);
if (unlikely(zstd_is_error(ret))) {
- struct btrfs_inode *inode = BTRFS_I(dest_page->mapping->host);
+ struct btrfs_inode *inode = folio_to_inode(dest_folio);
btrfs_err(inode->root->fs_info,
"zstd decompression failed, error %d root %llu inode %llu offset %llu",
zstd_get_error_code(ret), btrfs_root_id(inode->root),
- btrfs_ino(inode), page_offset(dest_page));
+ btrfs_ino(inode), folio_pos(dest_folio));
goto finish;
}
to_copy = workspace->out_buf.pos;
- memcpy_to_page(dest_page, dest_pgoff, workspace->out_buf.dst, to_copy);
+ memcpy_to_folio(dest_folio, dest_pgoff, workspace->out_buf.dst, to_copy);
finish:
/* Error or early end. */
if (unlikely(to_copy < destlen)) {
ret = -EIO;
- memzero_page(dest_page, dest_pgoff + to_copy, destlen - to_copy);
+ folio_zero_range(dest_folio, dest_pgoff + to_copy, destlen - to_copy);
}
return ret;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index e55ad471c530..1fc9a50def0b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -774,12 +774,11 @@ EXPORT_SYMBOL(block_dirty_folio);
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
struct buffer_head *bh;
- struct list_head tmp;
struct address_space *mapping;
int err = 0, err2;
struct blk_plug plug;
+ LIST_HEAD(tmp);
- INIT_LIST_HEAD(&tmp);
blk_start_plug(&plug);
spin_lock(lock);
@@ -958,12 +957,9 @@ no_grow:
}
EXPORT_SYMBOL_GPL(folio_alloc_buffers);
-struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
- bool retry)
+struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
{
gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
- if (retry)
- gfp |= __GFP_NOFAIL;
return folio_alloc_buffers(page_folio(page), size, gfp);
}
@@ -2168,11 +2164,10 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
return err;
}
-int __block_write_begin(struct page *page, loff_t pos, unsigned len,
+int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block)
{
- return __block_write_begin_int(page_folio(page), pos, len, get_block,
- NULL);
+ return __block_write_begin_int(folio, pos, len, get_block, NULL);
}
EXPORT_SYMBOL(__block_write_begin);
@@ -2222,33 +2217,33 @@ static void __block_commit_write(struct folio *folio, size_t from, size_t to)
* The filesystem needs to handle block truncation upon failure.
*/
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- struct page **pagep, get_block_t *get_block)
+ struct folio **foliop, get_block_t *get_block)
{
pgoff_t index = pos >> PAGE_SHIFT;
- struct page *page;
+ struct folio *folio;
int status;
- page = grab_cache_page_write_begin(mapping, index);
- if (!page)
- return -ENOMEM;
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- status = __block_write_begin(page, pos, len, get_block);
+ status = __block_write_begin_int(folio, pos, len, get_block, NULL);
if (unlikely(status)) {
- unlock_page(page);
- put_page(page);
- page = NULL;
+ folio_unlock(folio);
+ folio_put(folio);
+ folio = NULL;
}
- *pagep = page;
+ *foliop = folio;
return status;
}
EXPORT_SYMBOL(block_write_begin);
int block_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct folio *folio = page_folio(page);
size_t start = pos - folio_pos(folio);
if (unlikely(copied < len)) {
@@ -2280,19 +2275,19 @@ EXPORT_SYMBOL(block_write_end);
int generic_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
loff_t old_size = inode->i_size;
bool i_size_changed = false;
- copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+ copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
/*
* No need to use i_size_read() here, the i_size cannot change under us
* because we hold i_rwsem.
*
- * But it's important to update i_size while still holding page lock:
+ * But it's important to update i_size while still holding folio lock:
* page writeout could otherwise come in and zero beyond i_size.
*/
if (pos + copied > inode->i_size) {
@@ -2300,8 +2295,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
i_size_changed = true;
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
@@ -2467,7 +2462,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops;
- struct page *page;
+ struct folio *folio;
void *fsdata = NULL;
int err;
@@ -2475,11 +2470,11 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
if (err)
goto out;
- err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
+ err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
if (err)
goto out;
- err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
+ err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
BUG_ON(err > 0);
out:
@@ -2493,7 +2488,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
const struct address_space_operations *aops = mapping->a_ops;
unsigned int blocksize = i_blocksize(inode);
- struct page *page;
+ struct folio *folio;
void *fsdata = NULL;
pgoff_t index, curidx;
loff_t curpos;
@@ -2512,12 +2507,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
len = PAGE_SIZE - zerofrom;
err = aops->write_begin(file, mapping, curpos, len,
- &page, &fsdata);
+ &folio, &fsdata);
if (err)
goto out;
- zero_user(page, zerofrom, len);
+ folio_zero_range(folio, offset_in_folio(folio, curpos), len);
err = aops->write_end(file, mapping, curpos, len, len,
- page, fsdata);
+ folio, fsdata);
if (err < 0)
goto out;
BUG_ON(err != len);
@@ -2545,12 +2540,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
len = offset - zerofrom;
err = aops->write_begin(file, mapping, curpos, len,
- &page, &fsdata);
+ &folio, &fsdata);
if (err)
goto out;
- zero_user(page, zerofrom, len);
+ folio_zero_range(folio, offset_in_folio(folio, curpos), len);
err = aops->write_end(file, mapping, curpos, len, len,
- page, fsdata);
+ folio, fsdata);
if (err < 0)
goto out;
BUG_ON(err != len);
@@ -2566,7 +2561,7 @@ out:
*/
int cont_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata,
+ struct folio **foliop, void **fsdata,
get_block_t *get_block, loff_t *bytes)
{
struct inode *inode = mapping->host;
@@ -2584,7 +2579,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
(*bytes)++;
}
- return block_write_begin(mapping, pos, len, pagep, get_block);
+ return block_write_begin(mapping, pos, len, foliop, get_block);
}
EXPORT_SYMBOL(cont_write_begin);
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index a91acd03ee12..6a821a959b59 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -627,11 +627,12 @@ static void cachefiles_prepare_write_subreq(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *wreq = subreq->rreq;
struct netfs_cache_resources *cres = &wreq->cache_resources;
+ struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
_enter("W=%x[%x] %llx", wreq->debug_id, subreq->debug_index, subreq->start);
- subreq->max_len = MAX_RW_COUNT;
- subreq->max_nr_segs = BIO_MAX_VECS;
+ stream->sreq_max_len = MAX_RW_COUNT;
+ stream->sreq_max_segs = BIO_MAX_VECS;
if (!cachefiles_cres_file(cres)) {
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
@@ -647,6 +648,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
struct netfs_cache_resources *cres = &wreq->cache_resources;
struct cachefiles_object *object = cachefiles_cres_object(cres);
struct cachefiles_cache *cache = object->volume->cache;
+ struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
const struct cred *saved_cred;
size_t off, pre, post, len = subreq->len;
loff_t start = subreq->start;
@@ -660,6 +662,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
if (off) {
pre = CACHEFILES_DIO_BLOCK_SIZE - off;
if (pre >= len) {
+ fscache_count_dio_misfit();
netfs_write_subrequest_terminated(subreq, len, false);
return;
}
@@ -670,10 +673,22 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
}
/* We also need to end on the cache granularity boundary */
+ if (start + len == wreq->i_size) {
+ size_t part = len % CACHEFILES_DIO_BLOCK_SIZE;
+ size_t need = CACHEFILES_DIO_BLOCK_SIZE - part;
+
+ if (part && stream->submit_extendable_to >= need) {
+ len += need;
+ subreq->len += need;
+ subreq->io_iter.count += need;
+ }
+ }
+
post = len & (CACHEFILES_DIO_BLOCK_SIZE - 1);
if (post) {
len -= post;
if (len == 0) {
+ fscache_count_dio_misfit();
netfs_write_subrequest_terminated(subreq, post, false);
return;
}
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 4dd8a993c60a..7c6f260a3be5 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -64,9 +64,15 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object)
memcpy(buf->data, fscache_get_aux(object->cookie), len);
ret = cachefiles_inject_write_error();
- if (ret == 0)
- ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache,
- buf, sizeof(struct cachefiles_xattr) + len, 0);
+ if (ret == 0) {
+ ret = mnt_want_write_file(file);
+ if (ret == 0) {
+ ret = vfs_setxattr(&nop_mnt_idmap, dentry,
+ cachefiles_xattr_cache, buf,
+ sizeof(struct cachefiles_xattr) + len, 0);
+ mnt_drop_write_file(file);
+ }
+ }
if (ret < 0) {
trace_cachefiles_vfs_error(object, file_inode(file), ret,
cachefiles_trace_setxattr_error);
@@ -151,8 +157,14 @@ int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
int ret;
ret = cachefiles_inject_remove_error();
- if (ret == 0)
- ret = vfs_removexattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache);
+ if (ret == 0) {
+ ret = mnt_want_write(cache->mnt);
+ if (ret == 0) {
+ ret = vfs_removexattr(&nop_mnt_idmap, dentry,
+ cachefiles_xattr_cache);
+ mnt_drop_write(cache->mnt);
+ }
+ }
if (ret < 0) {
trace_cachefiles_vfs_error(object, d_inode(dentry), ret,
cachefiles_trace_remxattr_error);
@@ -208,9 +220,15 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
memcpy(buf->data, p, volume->vcookie->coherency_len);
ret = cachefiles_inject_write_error();
- if (ret == 0)
- ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache,
- buf, len, 0);
+ if (ret == 0) {
+ ret = mnt_want_write(volume->cache->mnt);
+ if (ret == 0) {
+ ret = vfs_setxattr(&nop_mnt_idmap, dentry,
+ cachefiles_xattr_cache,
+ buf, len, 0);
+ mnt_drop_write(volume->cache->mnt);
+ }
+ }
if (ret < 0) {
trace_cachefiles_vfs_error(NULL, d_inode(dentry), ret,
cachefiles_trace_setxattr_error);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index c4744a02db75..5d9ccda098cc 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -13,6 +13,7 @@
#include <linux/iversion.h>
#include <linux/ktime.h>
#include <linux/netfs.h>
+#include <trace/events/netfs.h>
#include "super.h"
#include "mds_client.h"
@@ -205,21 +206,6 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
}
}
-static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
-{
- struct inode *inode = subreq->rreq->inode;
- struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
- struct ceph_inode_info *ci = ceph_inode(inode);
- u64 objno, objoff;
- u32 xlen;
-
- /* Truncate the extent at the end of the current block */
- ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len,
- &objno, &objoff, &xlen);
- subreq->len = min(xlen, fsc->mount_options->rsize);
- return true;
-}
-
static void finish_netfs_read(struct ceph_osd_request *req)
{
struct inode *inode = req->r_inode;
@@ -264,7 +250,12 @@ static void finish_netfs_read(struct ceph_osd_request *req)
calc_pages_for(osd_data->alignment,
osd_data->length), false);
}
- netfs_subreq_terminated(subreq, err, false);
+ if (err > 0) {
+ subreq->transferred = err;
+ err = 0;
+ }
+ trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
+ netfs_read_subreq_terminated(subreq, err, false);
iput(req->r_inode);
ceph_dec_osd_stopping_blocker(fsc->mdsc);
}
@@ -278,7 +269,6 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
struct ceph_mds_request *req;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct iov_iter iter;
ssize_t err = 0;
size_t len;
int mode;
@@ -301,6 +291,7 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INLINE_DATA);
req->r_num_caps = 2;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
err = ceph_mdsc_do_request(mdsc, NULL, req);
if (err < 0)
goto out;
@@ -314,17 +305,36 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
}
len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len);
- iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
- err = copy_to_iter(iinfo->inline_data + subreq->start, len, &iter);
- if (err == 0)
+ err = copy_to_iter(iinfo->inline_data + subreq->start, len, &subreq->io_iter);
+ if (err == 0) {
err = -EFAULT;
+ } else {
+ subreq->transferred += err;
+ err = 0;
+ }
ceph_mdsc_put_request(req);
out:
- netfs_subreq_terminated(subreq, err, false);
+ netfs_read_subreq_terminated(subreq, err, false);
return true;
}
+static int ceph_netfs_prepare_read(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct inode *inode = rreq->inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ u64 objno, objoff;
+ u32 xlen;
+
+ /* Truncate the extent at the end of the current block */
+ ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len,
+ &objno, &objoff, &xlen);
+ rreq->io_streams[0].sreq_max_len = umin(xlen, fsc->mount_options->rsize);
+ return 0;
+}
+
static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
@@ -334,9 +344,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
struct ceph_client *cl = fsc->client;
struct ceph_osd_request *req = NULL;
struct ceph_vino vino = ceph_vino(inode);
- struct iov_iter iter;
- int err = 0;
- u64 len = subreq->len;
+ int err;
+ u64 len;
bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
u64 off = subreq->start;
int extent_cnt;
@@ -349,6 +358,12 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
return;
+ // TODO: This rounding here is slightly dodgy. It *should* work, for
+ // now, as the cache only deals in blocks that are a multiple of
+ // PAGE_SIZE and fscrypt blocks are at most PAGE_SIZE. What needs to
+ // happen is for the fscrypt driving to be moved into netfslib and the
+ // data in the cache also to be stored encrypted.
+ len = subreq->len;
ceph_fscrypt_adjust_off_and_len(inode, &off, &len);
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino,
@@ -371,8 +386,6 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
doutc(cl, "%llx.%llx pos=%llu orig_len=%zu len=%llu\n",
ceph_vinop(inode), subreq->start, subreq->len, len);
- iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
-
/*
* FIXME: For now, use CEPH_OSD_DATA_TYPE_PAGES instead of _ITER for
* encrypted inodes. We'd need infrastructure that handles an iov_iter
@@ -384,7 +397,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
struct page **pages;
size_t page_off;
- err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off);
+ err = iov_iter_get_pages_alloc2(&subreq->io_iter, &pages, len, &page_off);
if (err < 0) {
doutc(cl, "%llx.%llx failed to allocate pages, %d\n",
ceph_vinop(inode), err);
@@ -399,7 +412,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false,
false);
} else {
- osd_req_op_extent_osd_iter(req, 0, &iter);
+ osd_req_op_extent_osd_iter(req, 0, &subreq->io_iter);
}
if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
err = -EIO;
@@ -410,17 +423,19 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
req->r_inode = inode;
ihold(inode);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
ceph_osdc_start_request(req->r_osdc, req);
out:
ceph_osdc_put_request(req);
if (err)
- netfs_subreq_terminated(subreq, err, false);
+ netfs_read_subreq_terminated(subreq, err, false);
doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err);
}
static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
{
struct inode *inode = rreq->inode;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = ceph_inode_to_client(inode);
int got = 0, want = CEPH_CAP_FILE_CACHE;
struct ceph_netfs_request_data *priv;
@@ -472,6 +487,7 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
priv->caps = got;
rreq->netfs_priv = priv;
+ rreq->io_streams[0].sreq_max_len = fsc->mount_options->rsize;
out:
if (ret < 0)
@@ -496,9 +512,9 @@ static void ceph_netfs_free_request(struct netfs_io_request *rreq)
const struct netfs_request_ops ceph_netfs_ops = {
.init_request = ceph_init_request,
.free_request = ceph_netfs_free_request,
+ .prepare_read = ceph_netfs_prepare_read,
.issue_read = ceph_netfs_issue_read,
.expand_readahead = ceph_netfs_expand_readahead,
- .clamp_length = ceph_netfs_clamp_length,
.check_write_begin = ceph_netfs_check_write_begin,
};
@@ -1508,20 +1524,18 @@ static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned
*/
static int ceph_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct folio *folio = NULL;
int r;
- r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL);
+ r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, foliop, NULL);
if (r < 0)
return r;
- folio_wait_private_2(folio); /* [DEPRECATED] */
- WARN_ON_ONCE(!folio_test_locked(folio));
- *pagep = &folio->page;
+ folio_wait_private_2(*foliop); /* [DEPRECATED] */
+ WARN_ON_ONCE(!folio_test_locked(*foliop));
return 0;
}
@@ -1531,9 +1545,8 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
*/
static int ceph_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *subpage, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct folio *folio = page_folio(subpage);
struct inode *inode = file_inode(file);
struct ceph_client *cl = ceph_inode_to_client(inode);
bool check_cap = false;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 18c72b305858..ddec8c9244ee 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -707,7 +707,6 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
if (offset != file->f_pos) {
file->f_pos = offset;
- file->f_version = 0;
dfi->file_info.flags &= ~CEPH_F_ATEND;
}
retval = offset;
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 6898dc621011..6896fce122e1 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -119,31 +119,43 @@ static const struct fs_parameter_spec coda_param_specs[] = {
{}
};
-static int coda_parse_fd(struct fs_context *fc, int fd)
+static int coda_set_idx(struct fs_context *fc, struct file *file)
{
struct coda_fs_context *ctx = fc->fs_private;
- struct fd f;
struct inode *inode;
int idx;
- f = fdget(fd);
- if (!f.file)
- return -EBADF;
- inode = file_inode(f.file);
+ inode = file_inode(file);
if (!S_ISCHR(inode->i_mode) || imajor(inode) != CODA_PSDEV_MAJOR) {
- fdput(f);
- return invalf(fc, "code: Not coda psdev");
+ return invalf(fc, "coda: Not coda psdev");
}
-
idx = iminor(inode);
- fdput(f);
-
if (idx < 0 || idx >= MAX_CODADEVS)
return invalf(fc, "coda: Bad minor number");
ctx->idx = idx;
return 0;
}
+static int coda_parse_fd(struct fs_context *fc, struct fs_parameter *param,
+ struct fs_parse_result *result)
+{
+ struct file *file;
+ int err;
+
+ if (param->type == fs_value_is_file) {
+ file = param->file;
+ param->file = NULL;
+ } else {
+ file = fget(result->uint_32);
+ }
+ if (!file)
+ return -EBADF;
+
+ err = coda_set_idx(fc, file);
+ fput(file);
+ return err;
+}
+
static int coda_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct fs_parse_result result;
@@ -155,7 +167,7 @@ static int coda_parse_param(struct fs_context *fc, struct fs_parameter *param)
switch (opt) {
case Opt_fd:
- return coda_parse_fd(fc, result.uint_32);
+ return coda_parse_fd(fc, param, &result);
}
return 0;
@@ -167,6 +179,7 @@ static int coda_parse_param(struct fs_context *fc, struct fs_parameter *param)
*/
static int coda_parse_monolithic(struct fs_context *fc, void *_data)
{
+ struct file *file;
struct coda_mount_data *data = _data;
if (!data)
@@ -175,7 +188,11 @@ static int coda_parse_monolithic(struct fs_context *fc, void *_data)
if (data->version != CODA_MOUNT_VERSION)
return invalf(fc, "coda: Bad mount version");
- coda_parse_fd(fc, data->fd);
+ file = fget(data->fd);
+ if (file) {
+ coda_set_idx(fc, file);
+ fput(file);
+ }
return 0;
}
diff --git a/fs/coredump.c b/fs/coredump.c
index 7f12ff6ad1d3..53a78b6bbb5b 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -18,6 +18,7 @@
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <linux/coredump.h>
+#include <linux/sort.h>
#include <linux/sched/coredump.h>
#include <linux/sched/signal.h>
#include <linux/sched/task_stack.h>
@@ -464,7 +465,17 @@ static bool dump_interrupted(void)
* but then we need to teach dump_write() to restart and clear
* TIF_SIGPENDING.
*/
- return fatal_signal_pending(current) || freezing(current);
+ if (fatal_signal_pending(current)) {
+ coredump_report_failure("interrupted: fatal signal pending");
+ return true;
+ }
+
+ if (freezing(current)) {
+ coredump_report_failure("interrupted: freezing");
+ return true;
+ }
+
+ return false;
}
static void wait_for_dump_helpers(struct file *file)
@@ -519,7 +530,7 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
return err;
}
-void do_coredump(const kernel_siginfo_t *siginfo)
+int do_coredump(const kernel_siginfo_t *siginfo)
{
struct core_state core_state;
struct core_name cn;
@@ -527,7 +538,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
struct linux_binfmt * binfmt;
const struct cred *old_cred;
struct cred *cred;
- int retval = 0;
+ int retval;
int ispipe;
size_t *argv = NULL;
int argc = 0;
@@ -551,14 +562,20 @@ void do_coredump(const kernel_siginfo_t *siginfo)
audit_core_dumps(siginfo->si_signo);
binfmt = mm->binfmt;
- if (!binfmt || !binfmt->core_dump)
+ if (!binfmt || !binfmt->core_dump) {
+ retval = -ENOEXEC;
goto fail;
- if (!__get_dumpable(cprm.mm_flags))
+ }
+ if (!__get_dumpable(cprm.mm_flags)) {
+ retval = -EACCES;
goto fail;
+ }
cred = prepare_creds();
- if (!cred)
+ if (!cred) {
+ retval = -EPERM;
goto fail;
+ }
/*
* We cannot trust fsuid as being the "true" uid of the process
* nor do we know its entire history. We only know it was tainted
@@ -586,8 +603,8 @@ void do_coredump(const kernel_siginfo_t *siginfo)
struct subprocess_info *sub_info;
if (ispipe < 0) {
- printk(KERN_WARNING "format_corename failed\n");
- printk(KERN_WARNING "Aborting core\n");
+ coredump_report_failure("format_corename failed, aborting core");
+ retval = ispipe;
goto fail_unlock;
}
@@ -607,27 +624,24 @@ void do_coredump(const kernel_siginfo_t *siginfo)
* right pid if a thread in a multi-threaded
* core_pattern process dies.
*/
- printk(KERN_WARNING
- "Process %d(%s) has RLIMIT_CORE set to 1\n",
- task_tgid_vnr(current), current->comm);
- printk(KERN_WARNING "Aborting core\n");
+ coredump_report_failure("RLIMIT_CORE is set to 1, aborting core");
+ retval = -EPERM;
goto fail_unlock;
}
cprm.limit = RLIM_INFINITY;
dump_count = atomic_inc_return(&core_dump_count);
if (core_pipe_limit && (core_pipe_limit < dump_count)) {
- printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
- task_tgid_vnr(current), current->comm);
- printk(KERN_WARNING "Skipping core dump\n");
+ coredump_report_failure("over core_pipe_limit, skipping core dump");
+ retval = -E2BIG;
goto fail_dropcount;
}
helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
GFP_KERNEL);
if (!helper_argv) {
- printk(KERN_WARNING "%s failed to allocate memory\n",
- __func__);
+ coredump_report_failure("%s failed to allocate memory", __func__);
+ retval = -ENOMEM;
goto fail_dropcount;
}
for (argi = 0; argi < argc; argi++)
@@ -644,8 +658,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
kfree(helper_argv);
if (retval) {
- printk(KERN_INFO "Core dump to |%s pipe failed\n",
- cn.corename);
+ coredump_report_failure("|%s pipe failed", cn.corename);
goto close_fail;
}
} else {
@@ -654,14 +667,16 @@ void do_coredump(const kernel_siginfo_t *siginfo)
int open_flags = O_CREAT | O_WRONLY | O_NOFOLLOW |
O_LARGEFILE | O_EXCL;
- if (cprm.limit < binfmt->min_coredump)
+ if (cprm.limit < binfmt->min_coredump) {
+ coredump_report_failure("over coredump resource limit, skipping core dump");
+ retval = -E2BIG;
goto fail_unlock;
+ }
if (need_suid_safe && cn.corename[0] != '/') {
- printk(KERN_WARNING "Pid %d(%s) can only dump core "\
- "to fully qualified path!\n",
- task_tgid_vnr(current), current->comm);
- printk(KERN_WARNING "Skipping core dump\n");
+ coredump_report_failure(
+ "this process can only dump core to a fully qualified path, skipping core dump");
+ retval = -EPERM;
goto fail_unlock;
}
@@ -707,20 +722,28 @@ void do_coredump(const kernel_siginfo_t *siginfo)
} else {
cprm.file = filp_open(cn.corename, open_flags, 0600);
}
- if (IS_ERR(cprm.file))
+ if (IS_ERR(cprm.file)) {
+ retval = PTR_ERR(cprm.file);
goto fail_unlock;
+ }
inode = file_inode(cprm.file);
- if (inode->i_nlink > 1)
+ if (inode->i_nlink > 1) {
+ retval = -EMLINK;
goto close_fail;
- if (d_unhashed(cprm.file->f_path.dentry))
+ }
+ if (d_unhashed(cprm.file->f_path.dentry)) {
+ retval = -EEXIST;
goto close_fail;
+ }
/*
* AK: actually i see no reason to not allow this for named
* pipes etc, but keep the previous behaviour for now.
*/
- if (!S_ISREG(inode->i_mode))
+ if (!S_ISREG(inode->i_mode)) {
+ retval = -EISDIR;
goto close_fail;
+ }
/*
* Don't dump core if the filesystem changed owner or mode
* of the file during file creation. This is an issue when
@@ -730,19 +753,24 @@ void do_coredump(const kernel_siginfo_t *siginfo)
idmap = file_mnt_idmap(cprm.file);
if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode),
current_fsuid())) {
- pr_info_ratelimited("Core dump to %s aborted: cannot preserve file owner\n",
- cn.corename);
+ coredump_report_failure("Core dump to %s aborted: "
+ "cannot preserve file owner", cn.corename);
+ retval = -EPERM;
goto close_fail;
}
if ((inode->i_mode & 0677) != 0600) {
- pr_info_ratelimited("Core dump to %s aborted: cannot preserve file permissions\n",
- cn.corename);
+ coredump_report_failure("Core dump to %s aborted: "
+ "cannot preserve file permissions", cn.corename);
+ retval = -EPERM;
goto close_fail;
}
- if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
+ if (!(cprm.file->f_mode & FMODE_CAN_WRITE)) {
+ retval = -EACCES;
goto close_fail;
- if (do_truncate(idmap, cprm.file->f_path.dentry,
- 0, 0, cprm.file))
+ }
+ retval = do_truncate(idmap, cprm.file->f_path.dentry,
+ 0, 0, cprm.file);
+ if (retval)
goto close_fail;
}
@@ -757,11 +785,16 @@ void do_coredump(const kernel_siginfo_t *siginfo)
* have this set to NULL.
*/
if (!cprm.file) {
- pr_info("Core dump to |%s disabled\n", cn.corename);
+ coredump_report_failure("Core dump to |%s disabled", cn.corename);
+ retval = -EPERM;
goto close_fail;
}
- if (!dump_vma_snapshot(&cprm))
+ if (!dump_vma_snapshot(&cprm)) {
+ coredump_report_failure("Can't get VMA snapshot for core dump |%s",
+ cn.corename);
+ retval = -EACCES;
goto close_fail;
+ }
file_start_write(cprm.file);
core_dumped = binfmt->core_dump(&cprm);
@@ -777,9 +810,21 @@ void do_coredump(const kernel_siginfo_t *siginfo)
}
file_end_write(cprm.file);
free_vma_snapshot(&cprm);
+ } else {
+ coredump_report_failure("Core dump to %s%s has been interrupted",
+ ispipe ? "|" : "", cn.corename);
+ retval = -EAGAIN;
+ goto fail;
}
+ coredump_report(
+ "written to %s%s: VMAs: %d, size %zu; core: %lld bytes, pos %lld",
+ ispipe ? "|" : "", cn.corename,
+ cprm.vma_count, cprm.vma_data_size, cprm.written, cprm.pos);
if (ispipe && core_pipe_limit)
wait_for_dump_helpers(cprm.file);
+
+ retval = 0;
+
close_fail:
if (cprm.file)
filp_close(cprm.file, NULL);
@@ -794,7 +839,7 @@ fail_unlock:
fail_creds:
put_cred(cred);
fail:
- return;
+ return retval;
}
/*
@@ -814,8 +859,16 @@ static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr)
if (dump_interrupted())
return 0;
n = __kernel_write(file, addr, nr, &pos);
- if (n != nr)
+ if (n != nr) {
+ if (n < 0)
+ coredump_report_failure("failed when writing out, error %zd", n);
+ else
+ coredump_report_failure(
+ "partially written out, only %zd(of %d) bytes written",
+ n, nr);
+
return 0;
+ }
file->f_pos = pos;
cprm->written += n;
cprm->pos += n;
@@ -828,9 +881,16 @@ static int __dump_skip(struct coredump_params *cprm, size_t nr)
static char zeroes[PAGE_SIZE];
struct file *file = cprm->file;
if (file->f_mode & FMODE_LSEEK) {
- if (dump_interrupted() ||
- vfs_llseek(file, nr, SEEK_CUR) < 0)
+ int ret;
+
+ if (dump_interrupted())
+ return 0;
+
+ ret = vfs_llseek(file, nr, SEEK_CUR);
+ if (ret < 0) {
+ coredump_report_failure("failed when seeking, error %d", ret);
return 0;
+ }
cprm->pos += nr;
return 1;
} else {
@@ -983,11 +1043,10 @@ void validate_coredump_safety(void)
{
if (suid_dumpable == SUID_DUMP_ROOT &&
core_pattern[0] != '/' && core_pattern[0] != '|') {
- pr_warn(
-"Unsafe core_pattern used with fs.suid_dumpable=2.\n"
-"Pipe handler or fully qualified core dump path required.\n"
-"Set kernel.core_pattern before fs.suid_dumpable.\n"
- );
+
+ coredump_report_failure("Unsafe core_pattern used with fs.suid_dumpable=2: "
+ "pipe handler or fully qualified core dump path required. "
+ "Set kernel.core_pattern before fs.suid_dumpable.");
}
}
@@ -1191,6 +1250,18 @@ static void free_vma_snapshot(struct coredump_params *cprm)
}
}
+static int cmp_vma_size(const void *vma_meta_lhs_ptr, const void *vma_meta_rhs_ptr)
+{
+ const struct core_vma_metadata *vma_meta_lhs = vma_meta_lhs_ptr;
+ const struct core_vma_metadata *vma_meta_rhs = vma_meta_rhs_ptr;
+
+ if (vma_meta_lhs->dump_size < vma_meta_rhs->dump_size)
+ return -1;
+ if (vma_meta_lhs->dump_size > vma_meta_rhs->dump_size)
+ return 1;
+ return 0;
+}
+
/*
* Under the mmap_lock, take a snapshot of relevant information about the task's
* VMAs.
@@ -1253,5 +1324,8 @@ static bool dump_vma_snapshot(struct coredump_params *cprm)
cprm->vma_data_size += m->dump_size;
}
+ sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta),
+ cmp_vma_size, NULL);
+
return true;
}
diff --git a/fs/dcache.c b/fs/dcache.c
index 6386b9b625dd..0f6b16ba30d0 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1913,8 +1913,13 @@ void d_instantiate_new(struct dentry *entry, struct inode *inode)
__d_instantiate(entry, inode);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW & ~I_CREATING;
+ /*
+ * Pairs with the barrier in prepare_to_wait_event() to make sure
+ * ___wait_var_event() either sees the bit cleared or
+ * waitqueue_active() check in wake_up_var() sees the waiter.
+ */
smp_mb();
- wake_up_bit(&inode->i_state, __I_NEW);
+ inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(d_instantiate_new);
@@ -2168,9 +2173,6 @@ seqretry:
* without taking d_lock and checking d_seq sequence count against @seq
* returned here.
*
- * A refcount may be taken on the found dentry with the d_rcu_to_refcount
- * function.
- *
* Alternatively, __d_lookup_rcu may be called again to look up the child of
* the returned dentry, so long as its parent's seqlock is checked after the
* child is looked up. Thus, an interlocking stepping of sequence lock checks
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 91521576f500..66d9b3b4c588 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -89,12 +89,14 @@ enum {
Opt_uid,
Opt_gid,
Opt_mode,
+ Opt_source,
};
static const struct fs_parameter_spec debugfs_param_specs[] = {
fsparam_gid ("gid", Opt_gid),
fsparam_u32oct ("mode", Opt_mode),
fsparam_uid ("uid", Opt_uid),
+ fsparam_string ("source", Opt_source),
{}
};
@@ -126,6 +128,12 @@ static int debugfs_parse_param(struct fs_context *fc, struct fs_parameter *param
case Opt_mode:
opts->mode = result.uint_32 & S_IALLUGO;
break;
+ case Opt_source:
+ if (fc->source)
+ return invalfc(fc, "Multiple sources specified");
+ fc->source = param->string;
+ param->string = NULL;
+ break;
/*
* We might like to report bad mount options here;
* but traditionally debugfs has ignored all mount options
diff --git a/fs/direct-io.c b/fs/direct-io.c
index b0aafe640fa4..bbd05f1a2145 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -37,7 +37,6 @@
#include <linux/rwsem.h>
#include <linux/uio.h>
#include <linux/atomic.h>
-#include <linux/prefetch.h>
#include "internal.h"
@@ -1121,11 +1120,6 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct blk_plug plug;
unsigned long align = offset | iov_iter_alignment(iter);
- /*
- * Avoid references to bdev if not absolutely needed to give
- * the early prefetch in the caller enough time.
- */
-
/* watch out for a 0 len io from a tricksy fs */
if (iov_iter_rw(iter) == READ && !count)
return 0;
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 99952234799e..eac96f1c1d74 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -928,7 +928,7 @@ int dlm_comm_seq(int nodeid, uint32_t *seq)
int dlm_our_nodeid(void)
{
- return local_comm ? local_comm->nodeid : 0;
+ return local_comm->nodeid;
}
/* num 0 is first addr, num 1 is second addr */
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 32d98e63d25e..d534a4bc162b 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -295,6 +295,7 @@ struct dlm_lkb {
void *lkb_astparam; /* caller's ast arg */
struct dlm_user_args *lkb_ua;
};
+ struct rcu_head rcu;
};
/*
@@ -660,6 +661,8 @@ struct dlm_ls {
const struct dlm_lockspace_ops *ls_ops;
void *ls_ops_arg;
+ struct work_struct ls_free_work;
+
int ls_namelen;
char ls_name[DLM_LOCKSPACE_LEN + 1];
};
@@ -803,6 +806,8 @@ static inline void dlm_set_sbflags_val(struct dlm_lkb *lkb, uint32_t val)
__DLM_SBF_MAX_BIT);
}
+extern struct workqueue_struct *dlm_wq;
+
int dlm_plock_init(void);
void dlm_plock_exit(void);
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 8bee4f444afd..865dc70a9dfc 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -600,7 +600,7 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
{
struct dlm_rsb *r;
- r = dlm_allocate_rsb(ls);
+ r = dlm_allocate_rsb();
if (!r)
return -ENOMEM;
@@ -733,11 +733,13 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
}
retry:
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
+ if (error)
+ goto do_new;
/* check if the rsb is active under read lock - likely path */
read_lock_bh(&ls->ls_rsbtbl_lock);
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
- if (error) {
+ if (!rsb_flag(r, RSB_HASHED)) {
read_unlock_bh(&ls->ls_rsbtbl_lock);
goto do_new;
}
@@ -918,11 +920,13 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
int error;
retry:
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
+ if (error)
+ goto do_new;
/* check if the rsb is in active state under read lock - likely path */
read_lock_bh(&ls->ls_rsbtbl_lock);
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
- if (error) {
+ if (!rsb_flag(r, RSB_HASHED)) {
read_unlock_bh(&ls->ls_rsbtbl_lock);
goto do_new;
}
@@ -1151,7 +1155,7 @@ static void __dlm_master_lookup(struct dlm_ls *ls, struct dlm_rsb *r, int our_no
r->res_dir_nodeid = our_nodeid;
}
- if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
+ if (fix_master && r->res_master_nodeid && dlm_is_removed(ls, r->res_master_nodeid)) {
/* Recovery uses this function to set a new master when
* the previous master failed. Setting NEW_MASTER will
* force dlm_recover_masters to call recover_master on this
@@ -1276,43 +1280,45 @@ static int _dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *na
}
retry:
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
+ if (error)
+ goto not_found;
/* check if the rsb is active under read lock - likely path */
read_lock_bh(&ls->ls_rsbtbl_lock);
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
- if (!error) {
- if (rsb_flag(r, RSB_INACTIVE)) {
- read_unlock_bh(&ls->ls_rsbtbl_lock);
- goto do_inactive;
- }
-
- /* because the rsb is active, we need to lock_rsb before
- * checking/changing re_master_nodeid
- */
+ if (!rsb_flag(r, RSB_HASHED)) {
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto not_found;
+ }
- hold_rsb(r);
+ if (rsb_flag(r, RSB_INACTIVE)) {
read_unlock_bh(&ls->ls_rsbtbl_lock);
- lock_rsb(r);
+ goto do_inactive;
+ }
- __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false,
- flags, r_nodeid, result);
+ /* because the rsb is active, we need to lock_rsb before
+ * checking/changing re_master_nodeid
+ */
- /* the rsb was active */
- unlock_rsb(r);
- put_rsb(r);
+ hold_rsb(r);
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ lock_rsb(r);
- return 0;
- } else {
- read_unlock_bh(&ls->ls_rsbtbl_lock);
- goto not_found;
- }
+ __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false,
+ flags, r_nodeid, result);
+
+ /* the rsb was active */
+ unlock_rsb(r);
+ put_rsb(r);
+
+ return 0;
do_inactive:
- /* unlikely path - relookup under write */
+ /* unlikely path - check if still part of ls_rsbtbl */
write_lock_bh(&ls->ls_rsbtbl_lock);
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
- if (!error) {
+ /* see comment in find_rsb_dir */
+ if (rsb_flag(r, RSB_HASHED)) {
if (!rsb_flag(r, RSB_INACTIVE)) {
write_unlock_bh(&ls->ls_rsbtbl_lock);
/* something as changed, very unlikely but
@@ -1403,14 +1409,14 @@ void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len)
struct dlm_rsb *r = NULL;
int error;
- read_lock_bh(&ls->ls_rsbtbl_lock);
+ rcu_read_lock();
error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
if (!error)
goto out;
dlm_dump_rsb(r);
out:
- read_unlock_bh(&ls->ls_rsbtbl_lock);
+ rcu_read_unlock();
}
static void deactivate_rsb(struct kref *kref)
@@ -1442,18 +1448,6 @@ static void deactivate_rsb(struct kref *kref)
}
}
-/* See comment for unhold_lkb */
-
-static void unhold_rsb(struct dlm_rsb *r)
-{
- int rv;
-
- /* inactive rsbs are not ref counted */
- WARN_ON(rsb_flag(r, RSB_INACTIVE));
- rv = kref_put(&r->res_ref, deactivate_rsb);
- DLM_ASSERT(!rv, dlm_dump_rsb(r););
-}
-
void free_inactive_rsb(struct dlm_rsb *r)
{
WARN_ON_ONCE(!rsb_flag(r, RSB_INACTIVE));
@@ -1497,7 +1491,7 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
limit.max = end;
limit.min = start;
- lkb = dlm_allocate_lkb(ls);
+ lkb = dlm_allocate_lkb();
if (!lkb)
return -ENOMEM;
@@ -1533,11 +1527,21 @@ static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
{
struct dlm_lkb *lkb;
- read_lock_bh(&ls->ls_lkbxa_lock);
+ rcu_read_lock();
lkb = xa_load(&ls->ls_lkbxa, lkid);
- if (lkb)
- kref_get(&lkb->lkb_ref);
- read_unlock_bh(&ls->ls_lkbxa_lock);
+ if (lkb) {
+ /* check if lkb is still part of lkbxa under lkbxa_lock as
+ * the lkb_ref is tight to the lkbxa data structure, see
+ * __put_lkb().
+ */
+ read_lock_bh(&ls->ls_lkbxa_lock);
+ if (kref_read(&lkb->lkb_ref))
+ kref_get(&lkb->lkb_ref);
+ else
+ lkb = NULL;
+ read_unlock_bh(&ls->ls_lkbxa_lock);
+ }
+ rcu_read_unlock();
*lkb_ret = lkb;
return lkb ? 0 : -ENOENT;
@@ -1675,10 +1679,8 @@ static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
{
- hold_lkb(lkb);
del_lkb(r, lkb);
add_lkb(r, lkb, sts);
- unhold_lkb(lkb);
}
static int msg_reply_type(int mstype)
@@ -4323,16 +4325,27 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms)
memset(name, 0, sizeof(name));
memcpy(name, ms->m_extra, len);
- write_lock_bh(&ls->ls_rsbtbl_lock);
-
+ rcu_read_lock();
rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
if (rv) {
+ rcu_read_unlock();
/* should not happen */
log_error(ls, "%s from %d not found %s", __func__,
from_nodeid, name);
+ return;
+ }
+
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+ if (!rsb_flag(r, RSB_HASHED)) {
+ rcu_read_unlock();
write_unlock_bh(&ls->ls_rsbtbl_lock);
+ /* should not happen */
+ log_error(ls, "%s from %d got removed during removal %s",
+ __func__, from_nodeid, name);
return;
}
+ /* at this stage the rsb can only being freed here */
+ rcu_read_unlock();
if (!rsb_flag(r, RSB_INACTIVE)) {
if (r->res_master_nodeid != from_nodeid) {
@@ -5297,7 +5310,7 @@ int dlm_recover_waiters_post(struct dlm_ls *ls)
case DLM_MSG_LOOKUP:
case DLM_MSG_REQUEST:
_request_lock(r, lkb);
- if (is_master(r))
+ if (r->res_nodeid != -1 && is_master(r))
confirm_master(r, 0);
break;
case DLM_MSG_CONVERT:
@@ -5409,9 +5422,8 @@ void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list)
return;
list_for_each_entry(r, root_list, res_root_list) {
- hold_rsb(r);
lock_rsb(r);
- if (is_master(r)) {
+ if (r->res_nodeid != -1 && is_master(r)) {
purge_dead_list(ls, r, &r->res_grantqueue,
nodeid_gone, &lkb_count);
purge_dead_list(ls, r, &r->res_convertqueue,
@@ -5420,7 +5432,7 @@ void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list)
nodeid_gone, &lkb_count);
}
unlock_rsb(r);
- unhold_rsb(r);
+
cond_resched();
}
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index 4ed8d36f9c6d..b23d7b854ed4 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -66,6 +66,8 @@ int dlm_debug_add_lkb_to_waiters(struct dlm_ls *ls, uint32_t lkb_id,
static inline int is_master(struct dlm_rsb *r)
{
+ WARN_ON_ONCE(r->res_nodeid == -1);
+
return !r->res_nodeid;
}
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 1848cbbc96a9..8afac6e2dff0 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -174,12 +174,6 @@ static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
return a->store ? a->store(ls, buf, len) : len;
}
-static void lockspace_kobj_release(struct kobject *k)
-{
- struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
- kfree(ls);
-}
-
static const struct sysfs_ops dlm_attr_ops = {
.show = dlm_attr_show,
.store = dlm_attr_store,
@@ -188,7 +182,6 @@ static const struct sysfs_ops dlm_attr_ops = {
static struct kobj_type dlm_ktype = {
.default_groups = dlm_groups,
.sysfs_ops = &dlm_attr_ops,
- .release = lockspace_kobj_release,
};
static struct kset *dlm_kset;
@@ -322,13 +315,50 @@ static int threads_start(void)
return error;
}
+static int lkb_idr_free(struct dlm_lkb *lkb)
+{
+ if (lkb->lkb_lvbptr && test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags))
+ dlm_free_lvb(lkb->lkb_lvbptr);
+
+ dlm_free_lkb(lkb);
+ return 0;
+}
+
+static void rhash_free_rsb(void *ptr, void *arg)
+{
+ struct dlm_rsb *rsb = ptr;
+
+ dlm_free_rsb(rsb);
+}
+
+static void free_lockspace(struct work_struct *work)
+{
+ struct dlm_ls *ls = container_of(work, struct dlm_ls, ls_free_work);
+ struct dlm_lkb *lkb;
+ unsigned long id;
+
+ /*
+ * Free all lkb's in xa
+ */
+ xa_for_each(&ls->ls_lkbxa, id, lkb) {
+ lkb_idr_free(lkb);
+ }
+ xa_destroy(&ls->ls_lkbxa);
+
+ /*
+ * Free all rsb's on rsbtbl
+ */
+ rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL);
+
+ kfree(ls);
+}
+
static int new_lockspace(const char *name, const char *cluster,
uint32_t flags, int lvblen,
const struct dlm_lockspace_ops *ops, void *ops_arg,
int *ops_result, dlm_lockspace_t **lockspace)
{
struct dlm_ls *ls;
- int do_unreg = 0;
int namelen = strlen(name);
int error;
@@ -453,6 +483,8 @@ static int new_lockspace(const char *name, const char *cluster,
spin_lock_init(&ls->ls_cb_lock);
INIT_LIST_HEAD(&ls->ls_cb_delay);
+ INIT_WORK(&ls->ls_free_work, free_lockspace);
+
ls->ls_recoverd_task = NULL;
mutex_init(&ls->ls_recoverd_active);
spin_lock_init(&ls->ls_recover_lock);
@@ -530,9 +562,6 @@ static int new_lockspace(const char *name, const char *cluster,
wait_event(ls->ls_recover_lock_wait,
test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags));
- /* let kobject handle freeing of ls if there's an error */
- do_unreg = 1;
-
ls->ls_kobj.kset = dlm_kset;
error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
"%s", ls->ls_name);
@@ -580,10 +609,8 @@ static int new_lockspace(const char *name, const char *cluster,
xa_destroy(&ls->ls_lkbxa);
rhashtable_destroy(&ls->ls_rsbtbl);
out_lsfree:
- if (do_unreg)
- kobject_put(&ls->ls_kobj);
- else
- kfree(ls);
+ kobject_put(&ls->ls_kobj);
+ kfree(ls);
out:
module_put(THIS_MODULE);
return error;
@@ -640,15 +667,6 @@ int dlm_new_user_lockspace(const char *name, const char *cluster,
ops_arg, ops_result, lockspace);
}
-static int lkb_idr_free(struct dlm_lkb *lkb)
-{
- if (lkb->lkb_lvbptr && test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags))
- dlm_free_lvb(lkb->lkb_lvbptr);
-
- dlm_free_lkb(lkb);
- return 0;
-}
-
/* NOTE: We check the lkbxa here rather than the resource table.
This is because there may be LKBs queued as ASTs that have been unlinked
from their RSBs and are pending deletion once the AST has been delivered */
@@ -680,17 +698,8 @@ static int lockspace_busy(struct dlm_ls *ls, int force)
return rv;
}
-static void rhash_free_rsb(void *ptr, void *arg)
-{
- struct dlm_rsb *rsb = ptr;
-
- dlm_free_rsb(rsb);
-}
-
static int release_lockspace(struct dlm_ls *ls, int force)
{
- struct dlm_lkb *lkb;
- unsigned long id;
int busy, rv;
busy = lockspace_busy(ls, force);
@@ -743,23 +752,12 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_delete_debug_file(ls);
+ kobject_put(&ls->ls_kobj);
+
xa_destroy(&ls->ls_recover_xa);
kfree(ls->ls_recover_buf);
/*
- * Free all lkb's in xa
- */
- xa_for_each(&ls->ls_lkbxa, id, lkb) {
- lkb_idr_free(lkb);
- }
- xa_destroy(&ls->ls_lkbxa);
-
- /*
- * Free all rsb's on rsbtbl
- */
- rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL);
-
- /*
* Free structures on any other lists
*/
@@ -768,10 +766,11 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_clear_members(ls);
dlm_clear_members_gone(ls);
kfree(ls->ls_node_array);
- log_rinfo(ls, "release_lockspace final free");
- kobject_put(&ls->ls_kobj);
- /* The ls structure will be freed when the kobject is done with */
+ log_rinfo(ls, "%s final free", __func__);
+
+ /* delayed free of data structures see free_lockspace() */
+ queue_work(dlm_wq, &ls->ls_free_work);
module_put(THIS_MODULE);
return 0;
}
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 2e3e269d820e..cb3a10b041c2 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -161,8 +161,6 @@ struct dlm_proto_ops {
const char *name;
int proto;
- int (*connect)(struct connection *con, struct socket *sock,
- struct sockaddr *addr, int addr_len);
void (*sockopts)(struct socket *sock);
int (*bind)(struct socket *sock);
int (*listen_validate)(void);
@@ -1599,8 +1597,7 @@ static int dlm_connect(struct connection *con)
log_print_ratelimited("connecting to %d", con->nodeid);
make_sockaddr(&addr, dlm_config.ci_tcp_port, &addr_len);
- result = dlm_proto_ops->connect(con, sock, (struct sockaddr *)&addr,
- addr_len);
+ result = kernel_connect(sock, (struct sockaddr *)&addr, addr_len, 0);
switch (result) {
case -EINPROGRESS:
/* not an error */
@@ -1634,13 +1631,6 @@ static void process_send_sockets(struct work_struct *work)
switch (ret) {
case 0:
break;
- case -EINPROGRESS:
- /* avoid spamming resched on connection
- * we might can switch to a state_change
- * event based mechanism if established
- */
- msleep(100);
- break;
default:
/* CF_SEND_PENDING not cleared */
up_write(&con->sock_lock);
@@ -1831,12 +1821,6 @@ static int dlm_tcp_bind(struct socket *sock)
return 0;
}
-static int dlm_tcp_connect(struct connection *con, struct socket *sock,
- struct sockaddr *addr, int addr_len)
-{
- return kernel_connect(sock, addr, addr_len, O_NONBLOCK);
-}
-
static int dlm_tcp_listen_validate(void)
{
/* We don't support multi-homed hosts */
@@ -1873,7 +1857,6 @@ static int dlm_tcp_listen_bind(struct socket *sock)
static const struct dlm_proto_ops dlm_tcp_ops = {
.name = "TCP",
.proto = IPPROTO_TCP,
- .connect = dlm_tcp_connect,
.sockopts = dlm_tcp_sockopts,
.bind = dlm_tcp_bind,
.listen_validate = dlm_tcp_listen_validate,
@@ -1886,22 +1869,6 @@ static int dlm_sctp_bind(struct socket *sock)
return sctp_bind_addrs(sock, 0);
}
-static int dlm_sctp_connect(struct connection *con, struct socket *sock,
- struct sockaddr *addr, int addr_len)
-{
- int ret;
-
- /*
- * Make kernel_connect() function return in specified time,
- * since O_NONBLOCK argument in connect() function does not work here,
- * then, we should restore the default value of this attribute.
- */
- sock_set_sndtimeo(sock->sk, 5);
- ret = kernel_connect(sock, addr, addr_len, 0);
- sock_set_sndtimeo(sock->sk, 0);
- return ret;
-}
-
static int dlm_sctp_listen_validate(void)
{
if (!IS_ENABLED(CONFIG_IP_SCTP)) {
@@ -1929,7 +1896,6 @@ static const struct dlm_proto_ops dlm_sctp_ops = {
.name = "SCTP",
.proto = IPPROTO_SCTP,
.try_new_addr = true,
- .connect = dlm_sctp_connect,
.sockopts = dlm_sctp_sockopts,
.bind = dlm_sctp_bind,
.listen_validate = dlm_sctp_listen_validate,
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index 6ca28299c9db..4887c8a05318 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -22,6 +22,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/dlm.h>
+struct workqueue_struct *dlm_wq;
+
static int __init init_dlm(void)
{
int error;
@@ -50,10 +52,18 @@ static int __init init_dlm(void)
if (error)
goto out_user;
+ dlm_wq = alloc_workqueue("dlm_wq", 0, 0);
+ if (!dlm_wq) {
+ error = -ENOMEM;
+ goto out_plock;
+ }
+
printk("DLM installed\n");
return 0;
+ out_plock:
+ dlm_plock_exit();
out_user:
dlm_user_exit();
out_debug:
@@ -70,6 +80,8 @@ static int __init init_dlm(void)
static void __exit exit_dlm(void)
{
+ /* be sure every pending work e.g. freeing is done */
+ destroy_workqueue(dlm_wq);
dlm_plock_exit();
dlm_user_exit();
dlm_config_exit();
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index a7ee7fd2b9d3..c9661906568a 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -366,6 +366,8 @@ int dlm_is_member(struct dlm_ls *ls, int nodeid)
int dlm_is_removed(struct dlm_ls *ls, int nodeid)
{
+ WARN_ON_ONCE(!nodeid || nodeid == -1);
+
if (find_memb(&ls->ls_nodes_gone, nodeid))
return 1;
return 0;
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index 8c44b954c166..5c35cc67aca4 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -84,10 +84,7 @@ void dlm_memory_exit(void)
char *dlm_allocate_lvb(struct dlm_ls *ls)
{
- char *p;
-
- p = kzalloc(ls->ls_lvblen, GFP_ATOMIC);
- return p;
+ return kzalloc(ls->ls_lvblen, GFP_ATOMIC);
}
void dlm_free_lvb(char *p)
@@ -95,12 +92,9 @@ void dlm_free_lvb(char *p)
kfree(p);
}
-struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls)
+struct dlm_rsb *dlm_allocate_rsb(void)
{
- struct dlm_rsb *r;
-
- r = kmem_cache_zalloc(rsb_cache, GFP_ATOMIC);
- return r;
+ return kmem_cache_zalloc(rsb_cache, GFP_ATOMIC);
}
static void __free_rsb_rcu(struct rcu_head *rcu)
@@ -116,16 +110,15 @@ void dlm_free_rsb(struct dlm_rsb *r)
call_rcu(&r->rcu, __free_rsb_rcu);
}
-struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls)
+struct dlm_lkb *dlm_allocate_lkb(void)
{
- struct dlm_lkb *lkb;
-
- lkb = kmem_cache_zalloc(lkb_cache, GFP_ATOMIC);
- return lkb;
+ return kmem_cache_zalloc(lkb_cache, GFP_ATOMIC);
}
-void dlm_free_lkb(struct dlm_lkb *lkb)
+static void __free_lkb_rcu(struct rcu_head *rcu)
{
+ struct dlm_lkb *lkb = container_of(rcu, struct dlm_lkb, rcu);
+
if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
struct dlm_user_args *ua;
ua = lkb->lkb_ua;
@@ -138,6 +131,11 @@ void dlm_free_lkb(struct dlm_lkb *lkb)
kmem_cache_free(lkb_cache, lkb);
}
+void dlm_free_lkb(struct dlm_lkb *lkb)
+{
+ call_rcu(&lkb->rcu, __free_lkb_rcu);
+}
+
struct dlm_mhandle *dlm_allocate_mhandle(void)
{
return kmem_cache_alloc(mhandle_cache, GFP_ATOMIC);
diff --git a/fs/dlm/memory.h b/fs/dlm/memory.h
index 15198d46b42a..551b6b788489 100644
--- a/fs/dlm/memory.h
+++ b/fs/dlm/memory.h
@@ -14,9 +14,9 @@
int dlm_memory_init(void);
void dlm_memory_exit(void);
-struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls);
+struct dlm_rsb *dlm_allocate_rsb(void);
void dlm_free_rsb(struct dlm_rsb *r);
-struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls);
+struct dlm_lkb *dlm_allocate_lkb(void);
void dlm_free_lkb(struct dlm_lkb *l);
char *dlm_allocate_lvb(struct dlm_ls *ls);
void dlm_free_lvb(char *l);
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index c7afb428a2b4..2e1169c81c6e 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -452,10 +452,11 @@ static int recover_master(struct dlm_rsb *r, unsigned int *count, uint64_t seq)
int is_removed = 0;
int error;
- if (is_master(r))
+ if (r->res_nodeid != -1 && is_master(r))
return 0;
- is_removed = dlm_is_removed(ls, r->res_nodeid);
+ if (r->res_nodeid != -1)
+ is_removed = dlm_is_removed(ls, r->res_nodeid);
if (!is_removed && !rsb_flag(r, RSB_NEW_MASTER))
return 0;
@@ -664,7 +665,7 @@ int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq,
int error, count = 0;
list_for_each_entry(r, root_list, res_root_list) {
- if (is_master(r)) {
+ if (r->res_nodeid != -1 && is_master(r)) {
rsb_clear_flag(r, RSB_NEW_MASTER);
continue;
}
@@ -858,7 +859,7 @@ void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list)
list_for_each_entry(r, root_list, res_root_list) {
lock_rsb(r);
- if (is_master(r)) {
+ if (r->res_nodeid != -1 && is_master(r)) {
if (rsb_flag(r, RSB_RECOVER_CONVERT))
recover_conversion(r);
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index e2483acc4366..287e5d407f08 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -234,17 +234,17 @@ out:
/*
* Called with lower inode mutex held.
*/
-static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
+static int fill_zeros_to_end_of_page(struct folio *folio, unsigned int to)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
int end_byte_in_page;
- if ((i_size_read(inode) / PAGE_SIZE) != page->index)
+ if ((i_size_read(inode) / PAGE_SIZE) != folio->index)
goto out;
end_byte_in_page = i_size_read(inode) % PAGE_SIZE;
if (to > end_byte_in_page)
end_byte_in_page = to;
- zero_user_segment(page, end_byte_in_page, PAGE_SIZE);
+ folio_zero_segment(folio, end_byte_in_page, PAGE_SIZE);
out:
return 0;
}
@@ -255,7 +255,7 @@ out:
* @mapping: The eCryptfs object
* @pos: The file offset at which to start writing
* @len: Length of the write
- * @pagep: Pointer to return the page
+ * @foliop: Pointer to return the folio
* @fsdata: Pointer to return fs data (unused)
*
* This function must zero any hole we create
@@ -265,38 +265,39 @@ out:
static int ecryptfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
- struct page *page;
+ struct folio *folio;
loff_t prev_page_end_size;
int rc = 0;
- page = grab_cache_page_write_begin(mapping, index);
- if (!page)
- return -ENOMEM;
- *pagep = page;
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ *foliop = folio;
prev_page_end_size = ((loff_t)index << PAGE_SHIFT);
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(mapping->host)->crypt_stat;
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_read_lower_page_segment(
- page, index, 0, PAGE_SIZE, mapping->host);
+ &folio->page, index, 0, PAGE_SIZE, mapping->host);
if (rc) {
printk(KERN_ERR "%s: Error attempting to read "
"lower page segment; rc = [%d]\n",
__func__, rc);
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
goto out;
} else
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
rc = ecryptfs_copy_up_encrypted_with_header(
- page, crypt_stat);
+ &folio->page, crypt_stat);
if (rc) {
printk(KERN_ERR "%s: Error attempting "
"to copy the encrypted content "
@@ -304,46 +305,46 @@ static int ecryptfs_write_begin(struct file *file,
"inserting the metadata from "
"the xattr into the header; rc "
"= [%d]\n", __func__, rc);
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
} else {
rc = ecryptfs_read_lower_page_segment(
- page, index, 0, PAGE_SIZE,
+ &folio->page, index, 0, PAGE_SIZE,
mapping->host);
if (rc) {
printk(KERN_ERR "%s: Error reading "
"page; rc = [%d]\n",
__func__, rc);
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
} else {
if (prev_page_end_size
- >= i_size_read(page->mapping->host)) {
- zero_user(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
+ >= i_size_read(mapping->host)) {
+ folio_zero_range(folio, 0, PAGE_SIZE);
+ folio_mark_uptodate(folio);
} else if (len < PAGE_SIZE) {
- rc = ecryptfs_decrypt_page(page);
+ rc = ecryptfs_decrypt_page(&folio->page);
if (rc) {
printk(KERN_ERR "%s: Error decrypting "
"page at index [%ld]; "
"rc = [%d]\n",
- __func__, page->index, rc);
- ClearPageUptodate(page);
+ __func__, folio->index, rc);
+ folio_clear_uptodate(folio);
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
}
}
/* If creating a page or more of holes, zero them out via truncate.
* Note, this will increase i_size. */
if (index != 0) {
- if (prev_page_end_size > i_size_read(page->mapping->host)) {
+ if (prev_page_end_size > i_size_read(mapping->host)) {
rc = ecryptfs_truncate(file->f_path.dentry,
prev_page_end_size);
if (rc) {
@@ -359,12 +360,11 @@ static int ecryptfs_write_begin(struct file *file,
* of page? Zero it out. */
if ((i_size_read(mapping->host) == prev_page_end_size)
&& (pos != 0))
- zero_user(page, 0, PAGE_SIZE);
+ folio_zero_range(folio, 0, PAGE_SIZE);
out:
if (unlikely(rc)) {
- unlock_page(page);
- put_page(page);
- *pagep = NULL;
+ folio_unlock(folio);
+ folio_put(folio);
}
return rc;
}
@@ -457,13 +457,13 @@ int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode)
* @pos: The file position
* @len: The length of the data (unused)
* @copied: The amount of data copied
- * @page: The eCryptfs page
+ * @folio: The eCryptfs folio
* @fsdata: The fsdata (unused)
*/
static int ecryptfs_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
unsigned from = pos & (PAGE_SIZE - 1);
@@ -476,8 +476,8 @@ static int ecryptfs_write_end(struct file *file,
ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
"(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
- rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page, 0,
- to);
+ rc = ecryptfs_write_lower_page_segment(ecryptfs_inode,
+ &folio->page, 0, to);
if (!rc) {
rc = copied;
fsstack_copy_inode_size(ecryptfs_inode,
@@ -485,21 +485,21 @@ static int ecryptfs_write_end(struct file *file,
}
goto out;
}
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
if (copied < PAGE_SIZE) {
rc = 0;
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
/* Fills in zeros if 'to' goes beyond inode size */
- rc = fill_zeros_to_end_of_page(page, to);
+ rc = fill_zeros_to_end_of_page(folio, to);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error attempting to fill "
"zeros in page with index = [0x%.16lx]\n", index);
goto out;
}
- rc = ecryptfs_encrypt_page(page);
+ rc = ecryptfs_encrypt_page(&folio->page);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
"index [0x%.16lx])\n", index);
@@ -518,8 +518,8 @@ static int ecryptfs_write_end(struct file *file,
else
rc = copied;
out:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return rc;
}
diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig
index 7dcdce660cac..6ea60661fa55 100644
--- a/fs/erofs/Kconfig
+++ b/fs/erofs/Kconfig
@@ -74,6 +74,23 @@ config EROFS_FS_SECURITY
If you are not using a security module, say N.
+config EROFS_FS_BACKED_BY_FILE
+ bool "File-backed EROFS filesystem support"
+ depends on EROFS_FS
+ default y
+ help
+ This allows EROFS to use filesystem image files directly, without
+ the intercession of loopback block devices or likewise. It is
+ particularly useful for container images with numerous blobs and
+ other sandboxes, where loop devices behave intricately. It can also
+ be used to simplify error-prone lifetime management of unnecessary
+ virtual block devices.
+
+ Note that this feature, along with ongoing fanotify pre-content
+ hooks, will eventually replace "EROFS over fscache."
+
+ If you don't want to enable this feature, say N.
+
config EROFS_FS_ZIP
bool "EROFS Data Compression Support"
depends on EROFS_FS
@@ -128,7 +145,7 @@ config EROFS_FS_ZIP_ZSTD
If unsure, say N.
config EROFS_FS_ONDEMAND
- bool "EROFS fscache-based on-demand read support"
+ bool "EROFS fscache-based on-demand read support (deprecated)"
depends on EROFS_FS
select NETFS_SUPPORT
select FSCACHE
@@ -138,6 +155,9 @@ config EROFS_FS_ONDEMAND
This permits EROFS to use fscache-backed data blobs with on-demand
read support.
+ It is now deprecated and scheduled to be removed from the kernel
+ after fanotify pre-content hooks are landed.
+
If unsure, say N.
config EROFS_FS_PCPU_KTHREAD
diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile
index 097d672e6b14..4331d53c7109 100644
--- a/fs/erofs/Makefile
+++ b/fs/erofs/Makefile
@@ -7,4 +7,5 @@ erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o zutil.o
erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o
erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o
erofs-$(CONFIG_EROFS_FS_ZIP_ZSTD) += decompressor_zstd.o
+erofs-$(CONFIG_EROFS_FS_BACKED_BY_FILE) += fileio.o
erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index 1b7eba38ba1e..61debd799cf9 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -59,8 +59,12 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
{
- if (erofs_is_fscache_mode(sb))
- buf->mapping = EROFS_SB(sb)->s_fscache->inode->i_mapping;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+ if (erofs_is_fileio_mode(sbi))
+ buf->mapping = file_inode(sbi->fdev)->i_mapping;
+ else if (erofs_is_fscache_mode(sb))
+ buf->mapping = sbi->s_fscache->inode->i_mapping;
else
buf->mapping = sb->s_bdev->bd_mapping;
}
@@ -75,38 +79,28 @@ void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
static int erofs_map_blocks_flatmode(struct inode *inode,
struct erofs_map_blocks *map)
{
- erofs_blk_t nblocks, lastblk;
- u64 offset = map->m_la;
struct erofs_inode *vi = EROFS_I(inode);
struct super_block *sb = inode->i_sb;
bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
+ erofs_blk_t lastblk = erofs_iblks(inode) - tailendpacking;
- nblocks = erofs_iblks(inode);
- lastblk = nblocks - tailendpacking;
-
- /* there is no hole in flatmode */
- map->m_flags = EROFS_MAP_MAPPED;
- if (offset < erofs_pos(sb, lastblk)) {
+ map->m_flags = EROFS_MAP_MAPPED; /* no hole in flat inodes */
+ if (map->m_la < erofs_pos(sb, lastblk)) {
map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
- map->m_plen = erofs_pos(sb, lastblk) - offset;
- } else if (tailendpacking) {
+ map->m_plen = erofs_pos(sb, lastblk) - map->m_la;
+ } else {
+ DBG_BUGON(!tailendpacking);
map->m_pa = erofs_iloc(inode) + vi->inode_isize +
- vi->xattr_isize + erofs_blkoff(sb, offset);
- map->m_plen = inode->i_size - offset;
+ vi->xattr_isize + erofs_blkoff(sb, map->m_la);
+ map->m_plen = inode->i_size - map->m_la;
/* inline data should be located in the same meta block */
if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
- erofs_err(sb, "inline data cross block boundary @ nid %llu",
- vi->nid);
+ erofs_err(sb, "inline data across blocks @ nid %llu", vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
map->m_flags |= EROFS_MAP_META;
- } else {
- erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx",
- vi->nid, inode->i_size, map->m_la);
- DBG_BUGON(1);
- return -EIO;
}
return 0;
}
@@ -128,7 +122,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
if (map->m_la >= inode->i_size) {
/* leave out-of-bound access unmapped */
map->m_flags = 0;
- map->m_plen = 0;
+ map->m_plen = map->m_llen;
goto out;
}
@@ -189,16 +183,34 @@ out:
return err;
}
+static void erofs_fill_from_devinfo(struct erofs_map_dev *map,
+ struct erofs_device_info *dif)
+{
+ map->m_bdev = NULL;
+ map->m_fp = NULL;
+ if (dif->file) {
+ if (S_ISBLK(file_inode(dif->file)->i_mode))
+ map->m_bdev = file_bdev(dif->file);
+ else
+ map->m_fp = dif->file;
+ }
+ map->m_daxdev = dif->dax_dev;
+ map->m_dax_part_off = dif->dax_part_off;
+ map->m_fscache = dif->fscache;
+}
+
int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
{
struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
struct erofs_device_info *dif;
+ erofs_off_t startoff, length;
int id;
map->m_bdev = sb->s_bdev;
map->m_daxdev = EROFS_SB(sb)->dax_dev;
map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
map->m_fscache = EROFS_SB(sb)->s_fscache;
+ map->m_fp = EROFS_SB(sb)->fdev;
if (map->m_deviceid) {
down_read(&devs->rwsem);
@@ -212,29 +224,20 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
up_read(&devs->rwsem);
return 0;
}
- map->m_bdev = dif->bdev_file ? file_bdev(dif->bdev_file) : NULL;
- map->m_daxdev = dif->dax_dev;
- map->m_dax_part_off = dif->dax_part_off;
- map->m_fscache = dif->fscache;
+ erofs_fill_from_devinfo(map, dif);
up_read(&devs->rwsem);
} else if (devs->extra_devices && !devs->flatdev) {
down_read(&devs->rwsem);
idr_for_each_entry(&devs->tree, dif, id) {
- erofs_off_t startoff, length;
-
if (!dif->mapped_blkaddr)
continue;
+
startoff = erofs_pos(sb, dif->mapped_blkaddr);
length = erofs_pos(sb, dif->blocks);
-
if (map->m_pa >= startoff &&
map->m_pa < startoff + length) {
map->m_pa -= startoff;
- map->m_bdev = dif->bdev_file ?
- file_bdev(dif->bdev_file) : NULL;
- map->m_daxdev = dif->dax_dev;
- map->m_dax_part_off = dif->dax_part_off;
- map->m_fscache = dif->fscache;
+ erofs_fill_from_devinfo(map, dif);
break;
}
}
@@ -243,6 +246,42 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
return 0;
}
+/*
+ * bit 30: I/O error occurred on this folio
+ * bit 0 - 29: remaining parts to complete this folio
+ */
+#define EROFS_ONLINEFOLIO_EIO (1 << 30)
+
+void erofs_onlinefolio_init(struct folio *folio)
+{
+ union {
+ atomic_t o;
+ void *v;
+ } u = { .o = ATOMIC_INIT(1) };
+
+ folio->private = u.v; /* valid only if file-backed folio is locked */
+}
+
+void erofs_onlinefolio_split(struct folio *folio)
+{
+ atomic_inc((atomic_t *)&folio->private);
+}
+
+void erofs_onlinefolio_end(struct folio *folio, int err)
+{
+ int orig, v;
+
+ do {
+ orig = atomic_read((atomic_t *)&folio->private);
+ v = (orig - 1) | (err ? EROFS_ONLINEFOLIO_EIO : 0);
+ } while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig);
+
+ if (v & ~EROFS_ONLINEFOLIO_EIO)
+ return;
+ folio->private = 0;
+ folio_end_read(folio, !(v & EROFS_ONLINEFOLIO_EIO));
+}
+
static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
{
@@ -392,7 +431,7 @@ static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
}
/* for uncompressed (aligned) files and raw access for other files */
-const struct address_space_operations erofs_raw_access_aops = {
+const struct address_space_operations erofs_aops = {
.read_folio = erofs_read_folio,
.readahead = erofs_readahead,
.bmap = erofs_bmap,
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index c2253b6a5416..eb318c7ddd80 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -539,7 +539,7 @@ int __init z_erofs_init_decompressor(void)
for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) {
err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0;
if (err) {
- while (--i)
+ while (i--)
if (z_erofs_decomp[i])
z_erofs_decomp[i]->exit();
return err;
diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
index 6c0c270c42e1..c8f2ae845bd2 100644
--- a/fs/erofs/erofs_fs.h
+++ b/fs/erofs/erofs_fs.h
@@ -288,9 +288,12 @@ struct erofs_dirent {
#define EROFS_NAME_LEN 255
-/* maximum supported size of a physical compression cluster */
+/* maximum supported encoded size of a physical compressed cluster */
#define Z_EROFS_PCLUSTER_MAX_SIZE (1024 * 1024)
+/* maximum supported decoded size of a physical compressed cluster */
+#define Z_EROFS_PCLUSTER_MAX_DSIZE (12 * 1024 * 1024)
+
/* available compression algorithm types (for h_algorithmtype) */
enum {
Z_EROFS_COMPRESSION_LZ4 = 0,
diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
new file mode 100644
index 000000000000..3af96b1e2c2a
--- /dev/null
+++ b/fs/erofs/fileio.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2024, Alibaba Cloud
+ */
+#include "internal.h"
+#include <trace/events/erofs.h>
+
+struct erofs_fileio_rq {
+ struct bio_vec bvecs[BIO_MAX_VECS];
+ struct bio bio;
+ struct kiocb iocb;
+};
+
+struct erofs_fileio {
+ struct erofs_map_blocks map;
+ struct erofs_map_dev dev;
+ struct erofs_fileio_rq *rq;
+};
+
+static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
+{
+ struct erofs_fileio_rq *rq =
+ container_of(iocb, struct erofs_fileio_rq, iocb);
+ struct folio_iter fi;
+
+ if (ret > 0) {
+ if (ret != rq->bio.bi_iter.bi_size) {
+ bio_advance(&rq->bio, ret);
+ zero_fill_bio(&rq->bio);
+ }
+ ret = 0;
+ }
+ if (rq->bio.bi_end_io) {
+ rq->bio.bi_end_io(&rq->bio);
+ } else {
+ bio_for_each_folio_all(fi, &rq->bio) {
+ DBG_BUGON(folio_test_uptodate(fi.folio));
+ erofs_onlinefolio_end(fi.folio, ret);
+ }
+ }
+ bio_uninit(&rq->bio);
+ kfree(rq);
+}
+
+static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
+{
+ struct iov_iter iter;
+ int ret;
+
+ if (!rq)
+ return;
+ rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
+ rq->iocb.ki_ioprio = get_current_ioprio();
+ rq->iocb.ki_complete = erofs_fileio_ki_complete;
+ rq->iocb.ki_flags = (rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT) ?
+ IOCB_DIRECT : 0;
+ iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
+ rq->bio.bi_iter.bi_size);
+ ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
+ if (ret != -EIOCBQUEUED)
+ erofs_fileio_ki_complete(&rq->iocb, ret);
+}
+
+static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
+{
+ struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq),
+ GFP_KERNEL | __GFP_NOFAIL);
+
+ bio_init(&rq->bio, NULL, rq->bvecs, BIO_MAX_VECS, REQ_OP_READ);
+ rq->iocb.ki_filp = mdev->m_fp;
+ return rq;
+}
+
+struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev)
+{
+ return &erofs_fileio_rq_alloc(mdev)->bio;
+}
+
+void erofs_fileio_submit_bio(struct bio *bio)
+{
+ return erofs_fileio_rq_submit(container_of(bio, struct erofs_fileio_rq,
+ bio));
+}
+
+static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
+{
+ struct inode *inode = folio_inode(folio);
+ struct erofs_map_blocks *map = &io->map;
+ unsigned int cur = 0, end = folio_size(folio), len, attached = 0;
+ loff_t pos = folio_pos(folio), ofs;
+ struct iov_iter iter;
+ struct bio_vec bv;
+ int err = 0;
+
+ erofs_onlinefolio_init(folio);
+ while (cur < end) {
+ if (!in_range(pos + cur, map->m_la, map->m_llen)) {
+ map->m_la = pos + cur;
+ map->m_llen = end - cur;
+ err = erofs_map_blocks(inode, map);
+ if (err)
+ break;
+ }
+
+ ofs = folio_pos(folio) + cur - map->m_la;
+ len = min_t(loff_t, map->m_llen - ofs, end - cur);
+ if (map->m_flags & EROFS_MAP_META) {
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+ void *src;
+
+ src = erofs_read_metabuf(&buf, inode->i_sb,
+ map->m_pa + ofs, EROFS_KMAP);
+ if (IS_ERR(src)) {
+ err = PTR_ERR(src);
+ break;
+ }
+ bvec_set_folio(&bv, folio, len, cur);
+ iov_iter_bvec(&iter, ITER_DEST, &bv, 1, len);
+ if (copy_to_iter(src, len, &iter) != len) {
+ erofs_put_metabuf(&buf);
+ err = -EIO;
+ break;
+ }
+ erofs_put_metabuf(&buf);
+ } else if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+ folio_zero_segment(folio, cur, cur + len);
+ attached = 0;
+ } else {
+ if (io->rq && (map->m_pa + ofs != io->dev.m_pa ||
+ map->m_deviceid != io->dev.m_deviceid)) {
+io_retry:
+ erofs_fileio_rq_submit(io->rq);
+ io->rq = NULL;
+ }
+
+ if (!io->rq) {
+ io->dev = (struct erofs_map_dev) {
+ .m_pa = io->map.m_pa + ofs,
+ .m_deviceid = io->map.m_deviceid,
+ };
+ err = erofs_map_dev(inode->i_sb, &io->dev);
+ if (err)
+ break;
+ io->rq = erofs_fileio_rq_alloc(&io->dev);
+ io->rq->bio.bi_iter.bi_sector = io->dev.m_pa >> 9;
+ attached = 0;
+ }
+ if (!attached++)
+ erofs_onlinefolio_split(folio);
+ if (!bio_add_folio(&io->rq->bio, folio, len, cur))
+ goto io_retry;
+ io->dev.m_pa += len;
+ }
+ cur += len;
+ }
+ erofs_onlinefolio_end(folio, err);
+ return err;
+}
+
+static int erofs_fileio_read_folio(struct file *file, struct folio *folio)
+{
+ struct erofs_fileio io = {};
+ int err;
+
+ trace_erofs_read_folio(folio, true);
+ err = erofs_fileio_scan_folio(&io, folio);
+ erofs_fileio_rq_submit(io.rq);
+ return err;
+}
+
+static void erofs_fileio_readahead(struct readahead_control *rac)
+{
+ struct inode *inode = rac->mapping->host;
+ struct erofs_fileio io = {};
+ struct folio *folio;
+ int err;
+
+ trace_erofs_readpages(inode, readahead_index(rac),
+ readahead_count(rac), true);
+ while ((folio = readahead_folio(rac))) {
+ err = erofs_fileio_scan_folio(&io, folio);
+ if (err && err != -EINTR)
+ erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
+ folio->index, EROFS_I(inode)->nid);
+ }
+ erofs_fileio_rq_submit(io.rq);
+}
+
+const struct address_space_operations erofs_fileio_aops = {
+ .read_folio = erofs_fileio_read_folio,
+ .readahead = erofs_fileio_readahead,
+};
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index 419432be3223..db29190656eb 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -5,11 +5,26 @@
* Copyright (C) 2021, Alibaba Cloud
*/
#include "xattr.h"
-
#include <trace/events/erofs.h>
-static void *erofs_read_inode(struct erofs_buf *buf,
- struct inode *inode, unsigned int *ofs)
+static int erofs_fill_symlink(struct inode *inode, void *kaddr,
+ unsigned int m_pofs)
+{
+ struct erofs_inode *vi = EROFS_I(inode);
+ loff_t off;
+
+ m_pofs += vi->xattr_isize;
+ /* check if it cannot be handled with fast symlink scheme */
+ if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
+ check_add_overflow(m_pofs, inode->i_size, &off) ||
+ off > i_blocksize(inode))
+ return 0;
+
+ inode->i_link = kmemdup_nul(kaddr + m_pofs, inode->i_size, GFP_KERNEL);
+ return inode->i_link ? 0 : -ENOMEM;
+}
+
+static int erofs_read_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
@@ -20,20 +35,21 @@ static void *erofs_read_inode(struct erofs_buf *buf,
struct erofs_inode_compact *dic;
struct erofs_inode_extended *die, *copied = NULL;
union erofs_inode_i_u iu;
- unsigned int ifmt;
- int err;
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+ unsigned int ifmt, ofs;
+ int err = 0;
blkaddr = erofs_blknr(sb, inode_loc);
- *ofs = erofs_blkoff(sb, inode_loc);
+ ofs = erofs_blkoff(sb, inode_loc);
- kaddr = erofs_read_metabuf(buf, sb, erofs_pos(sb, blkaddr), EROFS_KMAP);
+ kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), EROFS_KMAP);
if (IS_ERR(kaddr)) {
erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
vi->nid, PTR_ERR(kaddr));
- return kaddr;
+ return PTR_ERR(kaddr);
}
- dic = kaddr + *ofs;
+ dic = kaddr + ofs;
ifmt = le16_to_cpu(dic->i_format);
if (ifmt & ~EROFS_I_ALL) {
erofs_err(sb, "unsupported i_format %u of nid %llu",
@@ -54,11 +70,11 @@ static void *erofs_read_inode(struct erofs_buf *buf,
case EROFS_INODE_LAYOUT_EXTENDED:
vi->inode_isize = sizeof(struct erofs_inode_extended);
/* check if the extended inode acrosses block boundary */
- if (*ofs + vi->inode_isize <= sb->s_blocksize) {
- *ofs += vi->inode_isize;
+ if (ofs + vi->inode_isize <= sb->s_blocksize) {
+ ofs += vi->inode_isize;
die = (struct erofs_inode_extended *)dic;
} else {
- const unsigned int gotten = sb->s_blocksize - *ofs;
+ const unsigned int gotten = sb->s_blocksize - ofs;
copied = kmalloc(vi->inode_isize, GFP_KERNEL);
if (!copied) {
@@ -66,16 +82,16 @@ static void *erofs_read_inode(struct erofs_buf *buf,
goto err_out;
}
memcpy(copied, dic, gotten);
- kaddr = erofs_read_metabuf(buf, sb, erofs_pos(sb, blkaddr + 1),
+ kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr + 1),
EROFS_KMAP);
if (IS_ERR(kaddr)) {
erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
vi->nid, PTR_ERR(kaddr));
kfree(copied);
- return kaddr;
+ return PTR_ERR(kaddr);
}
- *ofs = vi->inode_isize - gotten;
- memcpy((u8 *)copied + gotten, kaddr, *ofs);
+ ofs = vi->inode_isize - gotten;
+ memcpy((u8 *)copied + gotten, kaddr, ofs);
die = copied;
}
vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
@@ -91,11 +107,10 @@ static void *erofs_read_inode(struct erofs_buf *buf,
inode->i_size = le64_to_cpu(die->i_size);
kfree(copied);
- copied = NULL;
break;
case EROFS_INODE_LAYOUT_COMPACT:
vi->inode_isize = sizeof(struct erofs_inode_compact);
- *ofs += vi->inode_isize;
+ ofs += vi->inode_isize;
vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
inode->i_mode = le16_to_cpu(dic->i_mode);
@@ -115,11 +130,21 @@ static void *erofs_read_inode(struct erofs_buf *buf,
goto err_out;
}
+ if (unlikely(inode->i_size < 0)) {
+ erofs_err(sb, "negative i_size @ nid %llu", vi->nid);
+ err = -EFSCORRUPTED;
+ goto err_out;
+ }
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
case S_IFDIR:
case S_IFLNK:
vi->raw_blkaddr = le32_to_cpu(iu.raw_blkaddr);
+ if(S_ISLNK(inode->i_mode)) {
+ err = erofs_fill_symlink(inode, kaddr, ofs);
+ if (err)
+ goto err_out;
+ }
break;
case S_IFCHR:
case S_IFBLK:
@@ -165,65 +190,23 @@ static void *erofs_read_inode(struct erofs_buf *buf,
inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
else
inode->i_blocks = nblks << (sb->s_blocksize_bits - 9);
- return kaddr;
-
err_out:
- DBG_BUGON(1);
- kfree(copied);
- erofs_put_metabuf(buf);
- return ERR_PTR(err);
-}
-
-static int erofs_fill_symlink(struct inode *inode, void *kaddr,
- unsigned int m_pofs)
-{
- struct erofs_inode *vi = EROFS_I(inode);
- unsigned int bsz = i_blocksize(inode);
- char *lnk;
-
- /* if it cannot be handled with fast symlink scheme */
- if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
- inode->i_size >= bsz || inode->i_size < 0) {
- inode->i_op = &erofs_symlink_iops;
- return 0;
- }
-
- lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
- if (!lnk)
- return -ENOMEM;
-
- m_pofs += vi->xattr_isize;
- /* inline symlink data shouldn't cross block boundary */
- if (m_pofs + inode->i_size > bsz) {
- kfree(lnk);
- erofs_err(inode->i_sb,
- "inline data cross block boundary @ nid %llu",
- vi->nid);
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
- memcpy(lnk, kaddr + m_pofs, inode->i_size);
- lnk[inode->i_size] = '\0';
-
- inode->i_link = lnk;
- inode->i_op = &erofs_fast_symlink_iops;
- return 0;
+ DBG_BUGON(err);
+ erofs_put_metabuf(&buf);
+ return err;
}
static int erofs_fill_inode(struct inode *inode)
{
struct erofs_inode *vi = EROFS_I(inode);
- struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
- void *kaddr;
- unsigned int ofs;
- int err = 0;
+ int err;
trace_erofs_fill_inode(inode);
/* read inode base data from disk */
- kaddr = erofs_read_inode(&buf, inode, &ofs);
- if (IS_ERR(kaddr))
- return PTR_ERR(kaddr);
+ err = erofs_read_inode(inode);
+ if (err)
+ return err;
/* setup the new inode */
switch (inode->i_mode & S_IFMT) {
@@ -240,9 +223,10 @@ static int erofs_fill_inode(struct inode *inode)
inode_nohighmem(inode);
break;
case S_IFLNK:
- err = erofs_fill_symlink(inode, kaddr, ofs);
- if (err)
- goto out_unlock;
+ if (inode->i_link)
+ inode->i_op = &erofs_fast_symlink_iops;
+ else
+ inode->i_op = &erofs_symlink_iops;
inode_nohighmem(inode);
break;
case S_IFCHR:
@@ -251,10 +235,9 @@ static int erofs_fill_inode(struct inode *inode)
case S_IFSOCK:
inode->i_op = &erofs_generic_iops;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
- goto out_unlock;
+ return 0;
default:
- err = -EFSCORRUPTED;
- goto out_unlock;
+ return -EFSCORRUPTED;
}
mapping_set_large_folios(inode->i_mapping);
@@ -268,14 +251,17 @@ static int erofs_fill_inode(struct inode *inode)
err = -EOPNOTSUPP;
#endif
} else {
- inode->i_mapping->a_ops = &erofs_raw_access_aops;
+ inode->i_mapping->a_ops = &erofs_aops;
#ifdef CONFIG_EROFS_FS_ONDEMAND
if (erofs_is_fscache_mode(inode->i_sb))
inode->i_mapping->a_ops = &erofs_fscache_access_aops;
#endif
+#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
+ if (erofs_is_fileio_mode(EROFS_SB(inode->i_sb)))
+ inode->i_mapping->a_ops = &erofs_fileio_aops;
+#endif
}
-out_unlock:
- erofs_put_metabuf(&buf);
+
return err;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 45dc15ebd870..4efd578d7c62 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -49,7 +49,7 @@ typedef u32 erofs_blk_t;
struct erofs_device_info {
char *path;
struct erofs_fscache *fscache;
- struct file *bdev_file;
+ struct file *file;
struct dax_device *dax_dev;
u64 dax_part_off;
@@ -130,6 +130,7 @@ struct erofs_sb_info {
struct erofs_sb_lz4_info lz4;
#endif /* CONFIG_EROFS_FS_ZIP */
+ struct file *fdev;
struct inode *packed_inode;
struct erofs_dev_context *devs;
struct dax_device *dax_dev;
@@ -190,9 +191,15 @@ struct erofs_sb_info {
#define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option)
#define test_opt(opt, option) ((opt)->mount_opt & EROFS_MOUNT_##option)
+static inline bool erofs_is_fileio_mode(struct erofs_sb_info *sbi)
+{
+ return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->fdev;
+}
+
static inline bool erofs_is_fscache_mode(struct super_block *sb)
{
- return IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && !sb->s_bdev;
+ return IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) &&
+ !erofs_is_fileio_mode(EROFS_SB(sb)) && !sb->s_bdev;
}
enum {
@@ -365,6 +372,7 @@ struct erofs_map_dev {
struct erofs_fscache *m_fscache;
struct block_device *m_bdev;
struct dax_device *m_daxdev;
+ struct file *m_fp;
u64 m_dax_part_off;
erofs_off_t m_pa;
@@ -373,7 +381,8 @@ struct erofs_map_dev {
extern const struct super_operations erofs_sops;
-extern const struct address_space_operations erofs_raw_access_aops;
+extern const struct address_space_operations erofs_aops;
+extern const struct address_space_operations erofs_fileio_aops;
extern const struct address_space_operations z_erofs_aops;
extern const struct address_space_operations erofs_fscache_access_aops;
@@ -404,6 +413,9 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev);
int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map);
+void erofs_onlinefolio_init(struct folio *folio);
+void erofs_onlinefolio_split(struct folio *folio);
+void erofs_onlinefolio_end(struct folio *folio, int err);
struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid);
int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask,
@@ -477,6 +489,14 @@ static inline void z_erofs_exit_subsystem(void) {}
static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
#endif /* !CONFIG_EROFS_FS_ZIP */
+#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
+struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev);
+void erofs_fileio_submit_bio(struct bio *bio);
+#else
+static inline struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev) { return NULL; }
+static inline void erofs_fileio_submit_bio(struct bio *bio) {}
+#endif
+
#ifdef CONFIG_EROFS_FS_ONDEMAND
int erofs_fscache_register_fs(struct super_block *sb);
void erofs_fscache_unregister_fs(struct super_block *sb);
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 6cb5c8916174..666873f745da 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -10,6 +10,7 @@
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/exportfs.h>
+#include <linux/backing-dev.h>
#include "xattr.h"
#define CREATE_TRACE_POINTS
@@ -161,7 +162,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_fscache *fscache;
struct erofs_deviceslot *dis;
- struct file *bdev_file;
+ struct file *file;
dis = erofs_read_metabuf(buf, sb, *pos, EROFS_KMAP);
if (IS_ERR(dis))
@@ -183,13 +184,17 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
return PTR_ERR(fscache);
dif->fscache = fscache;
} else if (!sbi->devs->flatdev) {
- bdev_file = bdev_file_open_by_path(dif->path, BLK_OPEN_READ,
- sb->s_type, NULL);
- if (IS_ERR(bdev_file))
- return PTR_ERR(bdev_file);
- dif->bdev_file = bdev_file;
- dif->dax_dev = fs_dax_get_by_bdev(file_bdev(bdev_file),
- &dif->dax_part_off, NULL, NULL);
+ file = erofs_is_fileio_mode(sbi) ?
+ filp_open(dif->path, O_RDONLY | O_LARGEFILE, 0) :
+ bdev_file_open_by_path(dif->path,
+ BLK_OPEN_READ, sb->s_type, NULL);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ dif->file = file;
+ if (!erofs_is_fileio_mode(sbi))
+ dif->dax_dev = fs_dax_get_by_bdev(file_bdev(file),
+ &dif->dax_part_off, NULL, NULL);
}
dif->blocks = le32_to_cpu(dis->blocks);
@@ -348,7 +353,7 @@ static int erofs_read_superblock(struct super_block *sb)
ret = erofs_scan_devices(sb, dsb);
if (erofs_is_fscache_mode(sb))
- erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!");
+ erofs_info(sb, "[deprecated] fscache-based on-demand read feature in use. Use at your own risk!");
out:
erofs_put_metabuf(&buf);
return ret;
@@ -566,15 +571,16 @@ static void erofs_set_sysfs_name(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
- if (erofs_is_fscache_mode(sb)) {
- if (sbi->domain_id)
- super_set_sysfs_name_generic(sb, "%s,%s",sbi->domain_id,
- sbi->fsid);
- else
- super_set_sysfs_name_generic(sb, "%s", sbi->fsid);
- return;
- }
- super_set_sysfs_name_id(sb);
+ if (sbi->domain_id)
+ super_set_sysfs_name_generic(sb, "%s,%s", sbi->domain_id,
+ sbi->fsid);
+ else if (sbi->fsid)
+ super_set_sysfs_name_generic(sb, "%s", sbi->fsid);
+ else if (erofs_is_fileio_mode(sbi))
+ super_set_sysfs_name_generic(sb, "%s",
+ bdi_dev_name(sb->s_bdi));
+ else
+ super_set_sysfs_name_id(sb);
}
static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
@@ -589,14 +595,15 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_op = &erofs_sops;
sbi->blkszbits = PAGE_SHIFT;
- if (erofs_is_fscache_mode(sb)) {
+ if (!sb->s_bdev) {
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
- err = erofs_fscache_register_fs(sb);
- if (err)
- return err;
-
+ if (erofs_is_fscache_mode(sb)) {
+ err = erofs_fscache_register_fs(sb);
+ if (err)
+ return err;
+ }
err = super_setup_bdi(sb);
if (err)
return err;
@@ -644,7 +651,6 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_flags |= SB_POSIXACL;
else
sb->s_flags &= ~SB_POSIXACL;
- erofs_set_sysfs_name(sb);
#ifdef CONFIG_EROFS_FS_ZIP
xa_init(&sbi->managed_pslots);
@@ -682,6 +688,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
return err;
+ erofs_set_sysfs_name(sb);
err = erofs_register_sysfs(sb);
if (err)
return err;
@@ -693,11 +700,24 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
static int erofs_fc_get_tree(struct fs_context *fc)
{
struct erofs_sb_info *sbi = fc->s_fs_info;
+ int ret;
if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
return get_tree_nodev(fc, erofs_fc_fill_super);
- return get_tree_bdev(fc, erofs_fc_fill_super);
+ ret = get_tree_bdev(fc, erofs_fc_fill_super);
+#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
+ if (ret == -ENOTBLK) {
+ if (!fc->source)
+ return invalf(fc, "No source specified");
+ sbi->fdev = filp_open(fc->source, O_RDONLY | O_LARGEFILE, 0);
+ if (IS_ERR(sbi->fdev))
+ return PTR_ERR(sbi->fdev);
+
+ return get_tree_nodev(fc, erofs_fc_fill_super);
+ }
+#endif
+ return ret;
}
static int erofs_fc_reconfigure(struct fs_context *fc)
@@ -727,8 +747,8 @@ static int erofs_release_device_info(int id, void *ptr, void *data)
struct erofs_device_info *dif = ptr;
fs_put_dax(dif->dax_dev, NULL);
- if (dif->bdev_file)
- fput(dif->bdev_file);
+ if (dif->file)
+ fput(dif->file);
erofs_fscache_unregister_cookie(dif->fscache);
dif->fscache = NULL;
kfree(dif->path);
@@ -791,7 +811,7 @@ static void erofs_kill_sb(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
- if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
+ if ((IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) || sbi->fdev)
kill_anon_super(sb);
else
kill_block_super(sb);
@@ -801,6 +821,8 @@ static void erofs_kill_sb(struct super_block *sb)
erofs_fscache_unregister_fs(sb);
kfree(sbi->fsid);
kfree(sbi->domain_id);
+ if (sbi->fdev)
+ fput(sbi->fdev);
kfree(sbi);
sb->s_fs_info = NULL;
}
@@ -903,7 +925,7 @@ static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_namelen = EROFS_NAME_LEN;
if (uuid_is_null(&sb->s_uuid))
- buf->f_fsid = u64_to_fsid(erofs_is_fscache_mode(sb) ? 0 :
+ buf->f_fsid = u64_to_fsid(!sb->s_bdev ? 0 :
huge_encode_dev(sb->s_bdev->bd_dev));
else
buf->f_fsid = uuid_to_fsid(sb->s_uuid.b);
diff --git a/fs/erofs/sysfs.c b/fs/erofs/sysfs.c
index 435e515c0792..63cffd0fd261 100644
--- a/fs/erofs/sysfs.c
+++ b/fs/erofs/sysfs.c
@@ -205,34 +205,16 @@ static struct kobject erofs_feat = {
int erofs_register_sysfs(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
- char *name;
- char *str = NULL;
int err;
- if (erofs_is_fscache_mode(sb)) {
- if (sbi->domain_id) {
- str = kasprintf(GFP_KERNEL, "%s,%s", sbi->domain_id,
- sbi->fsid);
- if (!str)
- return -ENOMEM;
- name = str;
- } else {
- name = sbi->fsid;
- }
- } else {
- name = sb->s_id;
- }
sbi->s_kobj.kset = &erofs_root;
init_completion(&sbi->s_kobj_unregister);
- err = kobject_init_and_add(&sbi->s_kobj, &erofs_sb_ktype, NULL, "%s", name);
- kfree(str);
- if (err)
- goto put_sb_kobj;
- return 0;
-
-put_sb_kobj:
- kobject_put(&sbi->s_kobj);
- wait_for_completion(&sbi->s_kobj_unregister);
+ err = kobject_init_and_add(&sbi->s_kobj, &erofs_sb_ktype, NULL, "%s",
+ sb->s_sysfs_name);
+ if (err) {
+ kobject_put(&sbi->s_kobj);
+ wait_for_completion(&sbi->s_kobj_unregister);
+ }
return err;
}
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 424f656cd765..8936790618c6 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -122,42 +122,6 @@ static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo)
return fo->mapping == MNGD_MAPPING(sbi);
}
-/*
- * bit 30: I/O error occurred on this folio
- * bit 0 - 29: remaining parts to complete this folio
- */
-#define Z_EROFS_FOLIO_EIO (1 << 30)
-
-static void z_erofs_onlinefolio_init(struct folio *folio)
-{
- union {
- atomic_t o;
- void *v;
- } u = { .o = ATOMIC_INIT(1) };
-
- folio->private = u.v; /* valid only if file-backed folio is locked */
-}
-
-static void z_erofs_onlinefolio_split(struct folio *folio)
-{
- atomic_inc((atomic_t *)&folio->private);
-}
-
-static void z_erofs_onlinefolio_end(struct folio *folio, int err)
-{
- int orig, v;
-
- do {
- orig = atomic_read((atomic_t *)&folio->private);
- v = (orig - 1) | (err ? Z_EROFS_FOLIO_EIO : 0);
- } while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig);
-
- if (v & ~Z_EROFS_FOLIO_EIO)
- return;
- folio->private = 0;
- folio_end_read(folio, !(v & Z_EROFS_FOLIO_EIO));
-}
-
#define Z_EROFS_ONSTACK_PAGES 32
/*
@@ -232,7 +196,8 @@ static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
struct page *nextpage = *candidate_bvpage;
if (!nextpage) {
- nextpage = erofs_allocpage(pagepool, GFP_KERNEL);
+ nextpage = __erofs_allocpage(pagepool, GFP_KERNEL,
+ true);
if (!nextpage)
return -ENOMEM;
set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
@@ -965,7 +930,7 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
int err = 0;
tight = (bs == PAGE_SIZE);
- z_erofs_onlinefolio_init(folio);
+ erofs_onlinefolio_init(folio);
do {
if (offset + end - 1 < map->m_la ||
offset + end - 1 >= map->m_la + map->m_llen) {
@@ -1024,7 +989,7 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
if (err)
break;
- z_erofs_onlinefolio_split(folio);
+ erofs_onlinefolio_split(folio);
if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
f->pcl->multibases = true;
if (f->pcl->length < offset + end - map->m_la) {
@@ -1044,7 +1009,7 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
tight = (bs == PAGE_SIZE);
}
} while ((end = cur) > 0);
- z_erofs_onlinefolio_end(folio, err);
+ erofs_onlinefolio_end(folio, err);
return err;
}
@@ -1147,7 +1112,7 @@ static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
cur += len;
}
kunmap_local(dst);
- z_erofs_onlinefolio_end(page_folio(bvi->bvec.page), err);
+ erofs_onlinefolio_end(page_folio(bvi->bvec.page), err);
list_del(p);
kfree(bvi);
}
@@ -1190,9 +1155,10 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
struct page *page = bvec->page;
- /* compressed data ought to be valid before decompressing */
- if (!page) {
- err = -EIO;
+ /* compressed data ought to be valid when decompressing */
+ if (IS_ERR(page) || !page) {
+ bvec->page = NULL; /* clear the failure reason */
+ err = page ? PTR_ERR(page) : -EIO;
continue;
}
be->compressed_pages[i] = page;
@@ -1268,8 +1234,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
.inplace_io = overlapped,
.partial_decoding = pcl->partial,
.fillgaps = pcl->multibases,
- .gfp = pcl->besteffort ?
- GFP_KERNEL | __GFP_NOFAIL :
+ .gfp = pcl->besteffort ? GFP_KERNEL :
GFP_NOWAIT | __GFP_NORETRY
}, be->pagepool);
@@ -1302,7 +1267,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
DBG_BUGON(z_erofs_page_is_invalidated(page));
if (!z_erofs_is_shortlived_page(page)) {
- z_erofs_onlinefolio_end(page_folio(page), err);
+ erofs_onlinefolio_end(page_folio(page), err);
continue;
}
if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
@@ -1333,8 +1298,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
return err;
}
-static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
- struct page **pagepool)
+static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
+ struct page **pagepool)
{
struct z_erofs_decompress_backend be = {
.sb = io->sb,
@@ -1343,6 +1308,7 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
};
z_erofs_next_pcluster_t owned = io->head;
+ int err = io->eio ? -EIO : 0;
while (owned != Z_EROFS_PCLUSTER_TAIL) {
DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
@@ -1350,12 +1316,13 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
be.pcl = container_of(owned, struct z_erofs_pcluster, next);
owned = READ_ONCE(be.pcl->next);
- z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
+ err = z_erofs_decompress_pcluster(&be, err) ?: err;
if (z_erofs_is_inline_pcluster(be.pcl))
z_erofs_free_pcluster(be.pcl);
else
erofs_workgroup_put(&be.pcl->obj);
}
+ return err;
}
static void z_erofs_decompressqueue_work(struct work_struct *work)
@@ -1428,6 +1395,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
struct z_erofs_bvec zbv;
struct address_space *mapping;
struct folio *folio;
+ struct page *page;
int bs = i_blocksize(f->inode);
/* Except for inplace folios, the entire folio can be used for I/Os */
@@ -1450,7 +1418,6 @@ repeat:
* file-backed folios will be used instead.
*/
if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) {
- folio->private = 0;
tocache = true;
goto out_tocache;
}
@@ -1468,7 +1435,7 @@ repeat:
}
folio_lock(folio);
- if (folio->mapping == mc) {
+ if (likely(folio->mapping == mc)) {
/*
* The cached folio is still in managed cache but without
* a valid `->private` pcluster hint. Let's reconnect them.
@@ -1478,41 +1445,48 @@ repeat:
/* compressed_bvecs[] already takes a ref before */
folio_put(folio);
}
-
- /* no need to submit if it is already up-to-date */
- if (folio_test_uptodate(folio)) {
- folio_unlock(folio);
- bvec->bv_page = NULL;
+ if (likely(folio->private == pcl)) {
+ /* don't submit cache I/Os again if already uptodate */
+ if (folio_test_uptodate(folio)) {
+ folio_unlock(folio);
+ bvec->bv_page = NULL;
+ }
+ return;
}
- return;
+ /*
+ * Already linked with another pcluster, which only appears in
+ * crafted images by fuzzers for now. But handle this anyway.
+ */
+ tocache = false; /* use temporary short-lived pages */
+ } else {
+ DBG_BUGON(1); /* referenced managed folios can't be truncated */
+ tocache = true;
}
-
- /*
- * It has been truncated, so it's unsafe to reuse this one. Let's
- * allocate a new page for compressed data.
- */
- DBG_BUGON(folio->mapping);
- tocache = true;
folio_unlock(folio);
folio_put(folio);
out_allocfolio:
- zbv.page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL);
+ page = __erofs_allocpage(&f->pagepool, gfp, true);
spin_lock(&pcl->obj.lockref.lock);
- if (pcl->compressed_bvecs[nr].page) {
- erofs_pagepool_add(&f->pagepool, zbv.page);
+ if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) {
+ if (page)
+ erofs_pagepool_add(&f->pagepool, page);
spin_unlock(&pcl->obj.lockref.lock);
cond_resched();
goto repeat;
}
- bvec->bv_page = pcl->compressed_bvecs[nr].page = zbv.page;
- folio = page_folio(zbv.page);
- /* first mark it as a temporary shortlived folio (now 1 ref) */
- folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
+ pcl->compressed_bvecs[nr].page = page ? page : ERR_PTR(-ENOMEM);
spin_unlock(&pcl->obj.lockref.lock);
+ bvec->bv_page = page;
+ if (!page)
+ return;
+ folio = page_folio(page);
out_tocache:
if (!tocache || bs != PAGE_SIZE ||
- filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp))
+ filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) {
+ /* turn into a temporary shortlived folio (1 ref) */
+ folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
return;
+ }
folio_attach_private(folio, pcl);
/* drop a refcount added by allocpage (then 2 refs in total here) */
folio_put(folio);
@@ -1647,17 +1621,16 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
cur = mdev.m_pa;
end = cur + pcl->pclustersize;
do {
- z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc);
- if (!bvec.bv_page)
- continue;
-
+ bvec.bv_page = NULL;
if (bio && (cur != last_pa ||
bio->bi_bdev != mdev.m_bdev)) {
-io_retry:
- if (!erofs_is_fscache_mode(sb))
- submit_bio(bio);
- else
+drain_io:
+ if (erofs_is_fileio_mode(EROFS_SB(sb)))
+ erofs_fileio_submit_bio(bio);
+ else if (erofs_is_fscache_mode(sb))
erofs_fscache_submit_bio(bio);
+ else
+ submit_bio(bio);
if (memstall) {
psi_memstall_leave(&pflags);
@@ -1666,6 +1639,15 @@ io_retry:
bio = NULL;
}
+ if (!bvec.bv_page) {
+ z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc);
+ if (!bvec.bv_page)
+ continue;
+ if (cur + bvec.bv_len > end)
+ bvec.bv_len = end - cur;
+ DBG_BUGON(bvec.bv_len < sb->s_blocksize);
+ }
+
if (unlikely(PageWorkingset(bvec.bv_page)) &&
!memstall) {
psi_memstall_enter(&pflags);
@@ -1673,10 +1655,13 @@ io_retry:
}
if (!bio) {
- bio = erofs_is_fscache_mode(sb) ?
- erofs_fscache_bio_alloc(&mdev) :
- bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
- REQ_OP_READ, GFP_NOIO);
+ if (erofs_is_fileio_mode(EROFS_SB(sb)))
+ bio = erofs_fileio_bio_alloc(&mdev);
+ else if (erofs_is_fscache_mode(sb))
+ bio = erofs_fscache_bio_alloc(&mdev);
+ else
+ bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
+ REQ_OP_READ, GFP_NOIO);
bio->bi_end_io = z_erofs_endio;
bio->bi_iter.bi_sector = cur >> 9;
bio->bi_private = q[JQ_SUBMIT];
@@ -1685,13 +1670,9 @@ io_retry:
++nr_bios;
}
- if (cur + bvec.bv_len > end)
- bvec.bv_len = end - cur;
- DBG_BUGON(bvec.bv_len < sb->s_blocksize);
if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
bvec.bv_offset))
- goto io_retry;
-
+ goto drain_io;
last_pa = cur + bvec.bv_len;
bypass = false;
} while ((cur += bvec.bv_len) < end);
@@ -1703,10 +1684,12 @@ io_retry:
} while (owned_head != Z_EROFS_PCLUSTER_TAIL);
if (bio) {
- if (!erofs_is_fscache_mode(sb))
- submit_bio(bio);
- else
+ if (erofs_is_fileio_mode(EROFS_SB(sb)))
+ erofs_fileio_submit_bio(bio);
+ else if (erofs_is_fscache_mode(sb))
erofs_fscache_submit_bio(bio);
+ else
+ submit_bio(bio);
if (memstall)
psi_memstall_leave(&pflags);
}
@@ -1722,26 +1705,28 @@ io_retry:
z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
}
-static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
- bool force_fg, bool ra)
+static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
+ unsigned int ra_folios)
{
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
+ struct erofs_sb_info *sbi = EROFS_I_SB(f->inode);
+ bool force_fg = z_erofs_is_sync_decompress(sbi, ra_folios);
+ int err;
if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
- return;
- z_erofs_submit_queue(f, io, &force_fg, ra);
+ return 0;
+ z_erofs_submit_queue(f, io, &force_fg, !!ra_folios);
/* handle bypass queue (no i/o pclusters) immediately */
- z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
-
+ err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
if (!force_fg)
- return;
+ return err;
/* wait until all bios are completed */
wait_for_completion_io(&io[JQ_SUBMIT].u.done);
/* handle synchronous decompress queue in the caller context */
- z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool);
+ return z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool) ?: err;
}
/*
@@ -1803,7 +1788,6 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
static int z_erofs_read_folio(struct file *file, struct folio *folio)
{
struct inode *const inode = folio->mapping->host;
- struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
int err;
@@ -1815,9 +1799,8 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
z_erofs_pcluster_readmore(&f, NULL, false);
z_erofs_pcluster_end(&f);
- /* if some compressed cluster ready, need submit them anyway */
- z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
-
+ /* if some pclusters are ready, need submit them anyway */
+ err = z_erofs_runqueue(&f, 0) ?: err;
if (err && err != -EINTR)
erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu",
err, folio->index, EROFS_I(inode)->nid);
@@ -1830,7 +1813,6 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
static void z_erofs_readahead(struct readahead_control *rac)
{
struct inode *const inode = rac->mapping->host;
- struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
struct folio *head = NULL, *folio;
unsigned int nr_folios;
@@ -1860,7 +1842,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
z_erofs_pcluster_readmore(&f, rac, false);
z_erofs_pcluster_end(&f);
- z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_folios), true);
+ (void)z_erofs_runqueue(&f, nr_folios);
erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&f.pagepool);
}
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 403af6e31d5b..e980e29873a5 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -687,32 +687,30 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
int err = 0;
trace_erofs_map_blocks_enter(inode, map, flags);
-
- /* when trying to read beyond EOF, leave it unmapped */
- if (map->m_la >= inode->i_size) {
+ if (map->m_la >= inode->i_size) { /* post-EOF unmapped extent */
map->m_llen = map->m_la + 1 - inode->i_size;
map->m_la = inode->i_size;
map->m_flags = 0;
- goto out;
- }
-
- err = z_erofs_fill_inode_lazy(inode);
- if (err)
- goto out;
-
- if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
- !vi->z_tailextent_headlcn) {
- map->m_la = 0;
- map->m_llen = inode->i_size;
- map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED |
- EROFS_MAP_FRAGMENT;
- goto out;
+ } else {
+ err = z_erofs_fill_inode_lazy(inode);
+ if (!err) {
+ if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
+ !vi->z_tailextent_headlcn) {
+ map->m_la = 0;
+ map->m_llen = inode->i_size;
+ map->m_flags = EROFS_MAP_MAPPED |
+ EROFS_MAP_FULL_MAPPED | EROFS_MAP_FRAGMENT;
+ } else {
+ err = z_erofs_do_map_blocks(inode, map, flags);
+ }
+ }
+ if (!err && (map->m_flags & EROFS_MAP_ENCODED) &&
+ unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
+ map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
+ err = -EOPNOTSUPP;
+ if (err)
+ map->m_llen = 0;
}
-
- err = z_erofs_do_map_blocks(inode, map, flags);
-out:
- if (err)
- map->m_llen = 0;
trace_erofs_map_blocks_exit(inode, map, flags, err);
return err;
}
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 9afdb722fa92..22c934f3a080 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -349,9 +349,9 @@ struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
struct eventfd_ctx *ctx;
struct fd f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-EBADF);
- ctx = eventfd_ctx_fileget(f.file);
+ ctx = eventfd_ctx_fileget(fd_file(f));
fdput(f);
return ctx;
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index f53ca4f7fced..1ae4542f0bd8 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -420,7 +420,7 @@ static bool busy_loop_ep_timeout(unsigned long start_time,
static bool ep_busy_loop_on(struct eventpoll *ep)
{
- return !!ep->busy_poll_usecs || net_busy_loop_on();
+ return !!READ_ONCE(ep->busy_poll_usecs) || net_busy_loop_on();
}
static bool ep_busy_loop_end(void *p, unsigned long start_time)
@@ -2200,11 +2200,6 @@ static int do_epoll_create(int flags)
error = PTR_ERR(file);
goto out_free_fd;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
- ep->busy_poll_usecs = 0;
- ep->busy_poll_budget = 0;
- ep->prefer_busy_poll = false;
-#endif
ep->file = file;
fd_install(fd, file);
return fd;
@@ -2266,17 +2261,17 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
error = -EBADF;
f = fdget(epfd);
- if (!f.file)
+ if (!fd_file(f))
goto error_return;
/* Get the "struct file *" for the target file */
tf = fdget(fd);
- if (!tf.file)
+ if (!fd_file(tf))
goto error_fput;
/* The target file descriptor must support poll */
error = -EPERM;
- if (!file_can_poll(tf.file))
+ if (!file_can_poll(fd_file(tf)))
goto error_tgt_fput;
/* Check if EPOLLWAKEUP is allowed */
@@ -2289,7 +2284,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
* adding an epoll file descriptor inside itself.
*/
error = -EINVAL;
- if (f.file == tf.file || !is_file_epoll(f.file))
+ if (fd_file(f) == fd_file(tf) || !is_file_epoll(fd_file(f)))
goto error_tgt_fput;
/*
@@ -2300,7 +2295,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) {
if (op == EPOLL_CTL_MOD)
goto error_tgt_fput;
- if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
+ if (op == EPOLL_CTL_ADD && (is_file_epoll(fd_file(tf)) ||
(epds->events & ~EPOLLEXCLUSIVE_OK_BITS)))
goto error_tgt_fput;
}
@@ -2309,7 +2304,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
- ep = f.file->private_data;
+ ep = fd_file(f)->private_data;
/*
* When we insert an epoll file descriptor inside another epoll file
@@ -2330,16 +2325,16 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
if (error)
goto error_tgt_fput;
if (op == EPOLL_CTL_ADD) {
- if (READ_ONCE(f.file->f_ep) || ep->gen == loop_check_gen ||
- is_file_epoll(tf.file)) {
+ if (READ_ONCE(fd_file(f)->f_ep) || ep->gen == loop_check_gen ||
+ is_file_epoll(fd_file(tf))) {
mutex_unlock(&ep->mtx);
error = epoll_mutex_lock(&epnested_mutex, 0, nonblock);
if (error)
goto error_tgt_fput;
loop_check_gen++;
full_check = 1;
- if (is_file_epoll(tf.file)) {
- tep = tf.file->private_data;
+ if (is_file_epoll(fd_file(tf))) {
+ tep = fd_file(tf)->private_data;
error = -ELOOP;
if (ep_loop_check(ep, tep) != 0)
goto error_tgt_fput;
@@ -2355,14 +2350,14 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
* above, we can be sure to be able to use the item looked up by
* ep_find() till we release the mutex.
*/
- epi = ep_find(ep, tf.file, fd);
+ epi = ep_find(ep, fd_file(tf), fd);
error = -EINVAL;
switch (op) {
case EPOLL_CTL_ADD:
if (!epi) {
epds->events |= EPOLLERR | EPOLLHUP;
- error = ep_insert(ep, epds, tf.file, fd, full_check);
+ error = ep_insert(ep, epds, fd_file(tf), fd, full_check);
} else
error = -EEXIST;
break;
@@ -2443,7 +2438,7 @@ static int do_epoll_wait(int epfd, struct epoll_event __user *events,
/* Get the "struct file *" for the eventpoll file */
f = fdget(epfd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
/*
@@ -2451,14 +2446,14 @@ static int do_epoll_wait(int epfd, struct epoll_event __user *events,
* the user passed to us _is_ an eventpoll file.
*/
error = -EINVAL;
- if (!is_file_epoll(f.file))
+ if (!is_file_epoll(fd_file(f)))
goto error_fput;
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
- ep = f.file->private_data;
+ ep = fd_file(f)->private_data;
/* Time to fish for events ... */
error = ep_poll(ep, events, maxevents, to);
diff --git a/fs/exec.c b/fs/exec.c
index 50e76cc633c4..6c53920795c2 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -145,13 +145,11 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
goto out;
/*
- * may_open() has already checked for this, so it should be
- * impossible to trip now. But we need to be extra cautious
- * and check again at the very end too.
+ * Check do_open_execat() for an explanation.
*/
error = -EACCES;
- if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
- path_noexec(&file->f_path)))
+ if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) ||
+ path_noexec(&file->f_path))
goto exit;
error = -ENOEXEC;
@@ -712,80 +710,6 @@ static int copy_strings_kernel(int argc, const char *const *argv,
#ifdef CONFIG_MMU
/*
- * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
- * the binfmt code determines where the new stack should reside, we shift it to
- * its final location. The process proceeds as follows:
- *
- * 1) Use shift to calculate the new vma endpoints.
- * 2) Extend vma to cover both the old and new ranges. This ensures the
- * arguments passed to subsequent functions are consistent.
- * 3) Move vma's page tables to the new range.
- * 4) Free up any cleared pgd range.
- * 5) Shrink the vma to cover only the new range.
- */
-static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
-{
- struct mm_struct *mm = vma->vm_mm;
- unsigned long old_start = vma->vm_start;
- unsigned long old_end = vma->vm_end;
- unsigned long length = old_end - old_start;
- unsigned long new_start = old_start - shift;
- unsigned long new_end = old_end - shift;
- VMA_ITERATOR(vmi, mm, new_start);
- struct vm_area_struct *next;
- struct mmu_gather tlb;
-
- BUG_ON(new_start > new_end);
-
- /*
- * ensure there are no vmas between where we want to go
- * and where we are
- */
- if (vma != vma_next(&vmi))
- return -EFAULT;
-
- vma_iter_prev_range(&vmi);
- /*
- * cover the whole range: [new_start, old_end)
- */
- if (vma_expand(&vmi, vma, new_start, old_end, vma->vm_pgoff, NULL))
- return -ENOMEM;
-
- /*
- * move the page tables downwards, on failure we rely on
- * process cleanup to remove whatever mess we made.
- */
- if (length != move_page_tables(vma, old_start,
- vma, new_start, length, false, true))
- return -ENOMEM;
-
- lru_add_drain();
- tlb_gather_mmu(&tlb, mm);
- next = vma_next(&vmi);
- if (new_end > old_start) {
- /*
- * when the old and new regions overlap clear from new_end.
- */
- free_pgd_range(&tlb, new_end, old_end, new_end,
- next ? next->vm_start : USER_PGTABLES_CEILING);
- } else {
- /*
- * otherwise, clean from old_start; this is done to not touch
- * the address space in [new_end, old_start) some architectures
- * have constraints on va-space that make this illegal (IA64) -
- * for the others its just a little faster.
- */
- free_pgd_range(&tlb, old_start, old_end, new_end,
- next ? next->vm_start : USER_PGTABLES_CEILING);
- }
- tlb_finish_mmu(&tlb);
-
- vma_prev(&vmi);
- /* Shrink the vma to just the new range */
- return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);
-}
-
-/*
* Finalizes the stack vm_area_struct. The flags and permissions are updated,
* the stack is optionally relocated, and some extra space is added.
*/
@@ -813,7 +737,8 @@ int setup_arg_pages(struct linux_binprm *bprm,
stack_base = calc_max_stack_size(stack_base);
/* Add space for stack randomization. */
- stack_base += (STACK_RND_MASK << PAGE_SHIFT);
+ if (current->flags & PF_RANDOMIZE)
+ stack_base += (STACK_RND_MASK << PAGE_SHIFT);
/* Make sure we didn't let the argument array grow too large. */
if (vma->vm_end - vma->vm_start > stack_base)
@@ -877,7 +802,12 @@ int setup_arg_pages(struct linux_binprm *bprm,
/* Move stack pages down in memory. */
if (stack_shift) {
- ret = shift_arg_pages(vma, stack_shift);
+ /*
+ * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
+ * the binfmt code determines where the new stack should reside, we shift it to
+ * its final location.
+ */
+ ret = relocate_vma_down(vma, stack_shift);
if (ret)
goto out_unlock;
}
@@ -954,7 +884,6 @@ EXPORT_SYMBOL(transfer_args_to_stack);
static struct file *do_open_execat(int fd, struct filename *name, int flags)
{
struct file *file;
- int err;
struct open_flags open_exec_flags = {
.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
.acc_mode = MAY_EXEC,
@@ -971,24 +900,20 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
file = do_filp_open(fd, name, &open_exec_flags);
if (IS_ERR(file))
- goto out;
+ return file;
/*
- * may_open() has already checked for this, so it should be
- * impossible to trip now. But we need to be extra cautious
- * and check again at the very end too.
+ * In the past the regular type check was here. It moved to may_open() in
+ * 633fb6ac3980 ("exec: move S_ISREG() check earlier"). Since then it is
+ * an invariant that all non-regular files error out before we get here.
*/
- err = -EACCES;
- if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
- path_noexec(&file->f_path)))
- goto exit;
+ if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) ||
+ path_noexec(&file->f_path)) {
+ fput(file);
+ return ERR_PTR(-EACCES);
+ }
-out:
return file;
-
-exit:
- fput(file);
- return ERR_PTR(err);
}
/**
diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
index 0356c88252bd..ce9be95c9172 100644
--- a/fs/exfat/balloc.c
+++ b/fs/exfat/balloc.c
@@ -91,11 +91,8 @@ int exfat_load_bitmap(struct super_block *sb)
return -EIO;
type = exfat_get_entry_type(ep);
- if (type == TYPE_UNUSED)
- break;
- if (type != TYPE_BITMAP)
- continue;
- if (ep->dentry.bitmap.flags == 0x0) {
+ if (type == TYPE_BITMAP &&
+ ep->dentry.bitmap.flags == 0x0) {
int err;
err = exfat_allocate_bitmap(sb, ep);
@@ -103,6 +100,9 @@ int exfat_load_bitmap(struct super_block *sb)
return err;
}
brelse(bh);
+
+ if (type == TYPE_UNUSED)
+ return -EINVAL;
}
if (exfat_get_next_cluster(sb, &clu.dir))
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index ecc5db952deb..3cdc1de362a9 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -10,6 +10,7 @@
#include <linux/ratelimit.h>
#include <linux/nls.h>
#include <linux/blkdev.h>
+#include <uapi/linux/exfat.h>
#define EXFAT_ROOT_INO 1
@@ -148,6 +149,9 @@ enum {
#define DIR_CACHE_SIZE \
(DIV_ROUND_UP(EXFAT_DEN_TO_B(ES_MAX_ENTRY_NUM), SECTOR_SIZE) + 1)
+/* Superblock flags */
+#define EXFAT_FLAGS_SHUTDOWN 1
+
struct exfat_dentry_namebuf {
char *lfn;
int lfnbuf_len; /* usually MAX_UNINAME_BUF_SIZE */
@@ -267,6 +271,8 @@ struct exfat_sb_info {
unsigned int clu_srch_ptr; /* cluster search pointer */
unsigned int used_clusters; /* number of used clusters */
+ unsigned long s_exfat_flags; /* Exfat superblock flags */
+
struct mutex s_lock; /* superblock lock */
struct mutex bitmap_lock; /* bitmap lock */
struct exfat_mount_options options;
@@ -309,13 +315,6 @@ struct exfat_inode_info {
/* for avoiding the race between alloc and free */
unsigned int cache_valid_id;
- /*
- * NOTE: i_size_ondisk is 64bits, so must hold ->inode_lock to access.
- * physically allocated size.
- */
- loff_t i_size_ondisk;
- /* block-aligned i_size (used in cont_write_begin) */
- loff_t i_size_aligned;
/* on-disk position of directory entry or 0 */
loff_t i_pos;
loff_t valid_size;
@@ -338,6 +337,11 @@ static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
return container_of(inode, struct exfat_inode_info, vfs_inode);
}
+static inline int exfat_forced_shutdown(struct super_block *sb)
+{
+ return test_bit(EXFAT_FLAGS_SHUTDOWN, &EXFAT_SB(sb)->s_exfat_flags);
+}
+
/*
* If ->i_mode can't hold 0222 (i.e. ATTR_RO), we use ->i_attrs to
* save ATTR_RO instead of ->i_mode.
@@ -417,6 +421,11 @@ static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
return clus >= EXFAT_FIRST_CLUSTER && clus < sbi->num_clusters;
}
+static inline loff_t exfat_ondisk_size(const struct inode *inode)
+{
+ return ((loff_t)inode->i_blocks) << 9;
+}
+
/* super.c */
int exfat_set_volume_dirty(struct super_block *sb);
int exfat_clear_volume_dirty(struct super_block *sb);
@@ -461,6 +470,7 @@ int exfat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long exfat_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+int exfat_force_shutdown(struct super_block *sb, u32 flags);
/* namei.c */
extern const struct dentry_operations exfat_dentry_ops;
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index 64c31867bc76..a25d7eb789f4 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -29,7 +29,7 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
if (ret)
return ret;
- num_clusters = EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi);
+ num_clusters = EXFAT_B_TO_CLU(exfat_ondisk_size(inode), sbi);
new_num_clusters = EXFAT_B_TO_CLU_ROUND_UP(size, sbi);
if (new_num_clusters == num_clusters)
@@ -74,8 +74,6 @@ out:
/* Expanded range not zeroed, do not update valid_size */
i_size_write(inode, size);
- ei->i_size_aligned = round_up(size, sb->s_blocksize);
- ei->i_size_ondisk = ei->i_size_aligned;
inode->i_blocks = round_up(size, sbi->cluster_size) >> 9;
mark_inode_dirty(inode);
@@ -159,7 +157,7 @@ int __exfat_truncate(struct inode *inode)
exfat_set_volume_dirty(sb);
num_clusters_new = EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi);
- num_clusters_phys = EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi);
+ num_clusters_phys = EXFAT_B_TO_CLU(exfat_ondisk_size(inode), sbi);
exfat_chain_set(&clu, ei->start_clu, num_clusters_phys, ei->flags);
@@ -245,8 +243,6 @@ void exfat_truncate(struct inode *inode)
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
- unsigned int blocksize = i_blocksize(inode);
- loff_t aligned_size;
int err;
mutex_lock(&sbi->s_lock);
@@ -264,17 +260,6 @@ void exfat_truncate(struct inode *inode)
inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
write_size:
- aligned_size = i_size_read(inode);
- if (aligned_size & (blocksize - 1)) {
- aligned_size |= (blocksize - 1);
- aligned_size++;
- }
-
- if (ei->i_size_ondisk > i_size_read(inode))
- ei->i_size_ondisk = aligned_size;
-
- if (ei->i_size_aligned > i_size_read(inode))
- ei->i_size_aligned = aligned_size;
mutex_unlock(&sbi->s_lock);
}
@@ -302,6 +287,9 @@ int exfat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
unsigned int ia_valid;
int error;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size > i_size_read(inode)) {
error = exfat_cont_expand(inode, attr->ia_size);
@@ -485,6 +473,19 @@ static int exfat_ioctl_fitrim(struct inode *inode, unsigned long arg)
return 0;
}
+static int exfat_ioctl_shutdown(struct super_block *sb, unsigned long arg)
+{
+ u32 flags;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(flags, (__u32 __user *)arg))
+ return -EFAULT;
+
+ return exfat_force_shutdown(sb, flags);
+}
+
long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -495,6 +496,8 @@ long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return exfat_ioctl_get_attributes(inode, user_attr);
case FAT_IOCTL_SET_ATTRIBUTES:
return exfat_ioctl_set_attributes(filp, user_attr);
+ case EXFAT_IOC_SHUTDOWN:
+ return exfat_ioctl_shutdown(inode->i_sb, arg);
case FITRIM:
return exfat_ioctl_fitrim(inode, arg);
default:
@@ -515,6 +518,9 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
struct inode *inode = filp->f_mapping->host;
int err;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
err = __generic_file_fsync(filp, start, end, datasync);
if (err)
return err;
@@ -526,32 +532,32 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
return blkdev_issue_flush(inode->i_sb->s_bdev);
}
-static int exfat_file_zeroed_range(struct file *file, loff_t start, loff_t end)
+static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
{
int err;
+ loff_t pos;
struct inode *inode = file_inode(file);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *ops = mapping->a_ops;
- while (start < end) {
- u32 zerofrom, len;
- struct page *page = NULL;
+ pos = ei->valid_size;
+ while (pos < new_valid_size) {
+ u32 len;
+ struct folio *folio;
- zerofrom = start & (PAGE_SIZE - 1);
- len = PAGE_SIZE - zerofrom;
- if (start + len > end)
- len = end - start;
+ len = PAGE_SIZE - (pos & (PAGE_SIZE - 1));
+ if (pos + len > new_valid_size)
+ len = new_valid_size - pos;
- err = ops->write_begin(file, mapping, start, len, &page, NULL);
+ err = ops->write_begin(file, mapping, pos, len, &folio, NULL);
if (err)
goto out;
- zero_user_segment(page, zerofrom, zerofrom + len);
-
- err = ops->write_end(file, mapping, start, len, len, page, NULL);
+ err = ops->write_end(file, mapping, pos, len, len, folio, NULL);
if (err < 0)
goto out;
- start += len;
+ pos += len;
balance_dirty_pages_ratelimited(mapping);
cond_resched();
@@ -579,7 +585,7 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
goto unlock;
if (pos > valid_size) {
- ret = exfat_file_zeroed_range(file, valid_size, pos);
+ ret = exfat_extend_valid_size(file, pos);
if (ret < 0 && ret != -ENOSPC) {
exfat_err(inode->i_sb,
"write: fail to zero from %llu to %llu(%zd)",
@@ -613,26 +619,46 @@ unlock:
return ret;
}
-static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma)
+static vm_fault_t exfat_page_mkwrite(struct vm_fault *vmf)
{
- int ret;
+ int err;
+ struct vm_area_struct *vma = vmf->vma;
+ struct file *file = vma->vm_file;
struct inode *inode = file_inode(file);
struct exfat_inode_info *ei = EXFAT_I(inode);
- loff_t start = ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
- loff_t end = min_t(loff_t, i_size_read(inode),
+ loff_t start, end;
+
+ if (!inode_trylock(inode))
+ return VM_FAULT_RETRY;
+
+ start = ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ end = min_t(loff_t, i_size_read(inode),
start + vma->vm_end - vma->vm_start);
- if ((vma->vm_flags & VM_WRITE) && ei->valid_size < end) {
- ret = exfat_file_zeroed_range(file, ei->valid_size, end);
- if (ret < 0) {
- exfat_err(inode->i_sb,
- "mmap: fail to zero from %llu to %llu(%d)",
- start, end, ret);
- return ret;
+ if (ei->valid_size < end) {
+ err = exfat_extend_valid_size(file, end);
+ if (err < 0) {
+ inode_unlock(inode);
+ return vmf_fs_error(err);
}
}
- return generic_file_mmap(file, vma);
+ inode_unlock(inode);
+
+ return filemap_page_mkwrite(vmf);
+}
+
+static const struct vm_operations_struct exfat_file_vm_ops = {
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = exfat_page_mkwrite,
+};
+
+static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ file_accessed(file);
+ vma->vm_ops = &exfat_file_vm_ops;
+ return 0;
}
const struct file_operations exfat_file_operations = {
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index dd894e558c91..d724de8f57bf 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -102,6 +102,9 @@ int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int ret;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
ret = __exfat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
mutex_unlock(&EXFAT_SB(inode->i_sb)->s_lock);
@@ -130,11 +133,9 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
unsigned int local_clu_offset = clu_offset;
- unsigned int num_to_be_allocated = 0, num_clusters = 0;
+ unsigned int num_to_be_allocated = 0, num_clusters;
- if (ei->i_size_ondisk > 0)
- num_clusters =
- EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi);
+ num_clusters = EXFAT_B_TO_CLU(exfat_ondisk_size(inode), sbi);
if (clu_offset >= num_clusters)
num_to_be_allocated = clu_offset - num_clusters + 1;
@@ -260,21 +261,6 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
return 0;
}
-static int exfat_map_new_buffer(struct exfat_inode_info *ei,
- struct buffer_head *bh, loff_t pos)
-{
- if (buffer_delay(bh) && pos > ei->i_size_aligned)
- return -EIO;
- set_buffer_new(bh);
-
- /*
- * Adjust i_size_aligned if i_size_ondisk is bigger than it.
- */
- if (ei->i_size_ondisk > ei->i_size_aligned)
- ei->i_size_aligned = ei->i_size_ondisk;
- return 0;
-}
-
static int exfat_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
@@ -288,7 +274,6 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
sector_t last_block;
sector_t phys = 0;
sector_t valid_blks;
- loff_t pos;
mutex_lock(&sbi->s_lock);
last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size_read(inode), sb);
@@ -316,12 +301,6 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
mapped_blocks = sbi->sect_per_clus - sec_offset;
max_blocks = min(mapped_blocks, max_blocks);
- pos = EXFAT_BLK_TO_B((iblock + 1), sb);
- if ((create && iblock >= last_block) || buffer_delay(bh_result)) {
- if (ei->i_size_ondisk < pos)
- ei->i_size_ondisk = pos;
- }
-
map_bh(bh_result, sb, phys);
if (buffer_delay(bh_result))
clear_buffer_delay(bh_result);
@@ -342,13 +321,7 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
}
/* The area has not been written, map and mark as new. */
- err = exfat_map_new_buffer(ei, bh_result, pos);
- if (err) {
- exfat_fs_error(sb,
- "requested for bmap out of range(pos : (%llu) > i_size_aligned(%llu)\n",
- pos, ei->i_size_aligned);
- goto unlock_ret;
- }
+ set_buffer_new(bh_result);
ei->valid_size = EXFAT_BLK_TO_B(iblock + max_blocks, sb);
mark_inode_dirty(inode);
@@ -371,7 +344,7 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
* The block has been partially written,
* zero the unwritten part and map the block.
*/
- loff_t size, off;
+ loff_t size, off, pos;
max_blocks = 1;
@@ -382,7 +355,7 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
if (!bh_result->b_folio)
goto done;
- pos -= sb->s_blocksize;
+ pos = EXFAT_BLK_TO_B(iblock, sb);
size = ei->valid_size - pos;
off = pos & (PAGE_SIZE - 1);
@@ -432,6 +405,9 @@ static void exfat_readahead(struct readahead_control *rac)
static int exfat_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
+ if (unlikely(exfat_forced_shutdown(mapping->host->i_sb)))
+ return -EIO;
+
return mpage_writepages(mapping, wbc, exfat_get_block);
}
@@ -448,12 +424,14 @@ static void exfat_write_failed(struct address_space *mapping, loff_t to)
static int exfat_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
int ret;
- *pagep = NULL;
- ret = block_write_begin(mapping, pos, len, pagep, exfat_get_block);
+ if (unlikely(exfat_forced_shutdown(mapping->host->i_sb)))
+ return -EIO;
+
+ ret = block_write_begin(mapping, pos, len, foliop, exfat_get_block);
if (ret < 0)
exfat_write_failed(mapping, pos+len);
@@ -463,21 +441,13 @@ static int exfat_write_begin(struct file *file, struct address_space *mapping,
static int exfat_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int copied,
- struct page *pagep, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
struct exfat_inode_info *ei = EXFAT_I(inode);
int err;
- err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
-
- if (ei->i_size_aligned < i_size_read(inode)) {
- exfat_fs_error(inode->i_sb,
- "invalid size(size(%llu) > aligned(%llu)\n",
- i_size_read(inode), ei->i_size_aligned);
- return -EIO;
- }
-
+ err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
if (err < len)
exfat_write_failed(mapping, pos+len);
@@ -505,20 +475,6 @@ static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
int rw = iov_iter_rw(iter);
ssize_t ret;
- if (rw == WRITE) {
- /*
- * FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
- * so we need to update the ->i_size_aligned to block boundary.
- *
- * But we must fill the remaining area or hole by nul for
- * updating ->i_size_aligned
- *
- * Return 0, and fallback to normal buffered write.
- */
- if (EXFAT_I(inode)->i_size_aligned < size)
- return 0;
- }
-
/*
* Need to use the DIO_LOCKING for avoiding the race
* condition of exfat_get_block() and ->truncate().
@@ -532,8 +488,18 @@ static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
} else
size = pos + ret;
- /* zero the unwritten part in the partially written block */
- if (rw == READ && pos < ei->valid_size && ei->valid_size < size) {
+ if (rw == WRITE) {
+ /*
+ * If the block had been partially written before this write,
+ * ->valid_size will not be updated in exfat_get_block(),
+ * update it here.
+ */
+ if (ei->valid_size < size) {
+ ei->valid_size = size;
+ mark_inode_dirty(inode);
+ }
+ } else if (pos < ei->valid_size && ei->valid_size < size) {
+ /* zero the unwritten part in the partially written block */
iov_iter_revert(iter, size - ei->valid_size);
iov_iter_zero(size - ei->valid_size, iter);
}
@@ -668,15 +634,6 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
i_size_write(inode, size);
- /* ondisk and aligned size should be aligned with block size */
- if (size & (inode->i_sb->s_blocksize - 1)) {
- size |= (inode->i_sb->s_blocksize - 1);
- size++;
- }
-
- ei->i_size_aligned = size;
- ei->i_size_ondisk = size;
-
exfat_save_attr(inode, info->attr);
inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 631ad9e8e32a..2c4c44229352 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -372,8 +372,6 @@ static int exfat_find_empty_entry(struct inode *inode,
/* directory inode should be updated in here */
i_size_write(inode, size);
- ei->i_size_ondisk += sbi->cluster_size;
- ei->i_size_aligned += sbi->cluster_size;
ei->valid_size += sbi->cluster_size;
ei->flags = p_dir->flags;
inode->i_blocks += sbi->cluster_size >> 9;
@@ -549,6 +547,9 @@ static int exfat_create(struct mnt_idmap *idmap, struct inode *dir,
int err;
loff_t size = i_size_read(dir);
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(sb)->s_lock);
exfat_set_volume_dirty(sb);
err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_FILE,
@@ -772,6 +773,9 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
struct exfat_entry_set_cache es;
int entry, err = 0;
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(sb)->s_lock);
exfat_chain_dup(&cdir, &ei->dir);
entry = ei->entry;
@@ -825,6 +829,9 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
int err;
loff_t size = i_size_read(dir);
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(sb)->s_lock);
exfat_set_volume_dirty(sb);
err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_DIR,
@@ -915,6 +922,9 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
struct exfat_entry_set_cache es;
int entry, err;
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
exfat_chain_dup(&cdir, &ei->dir);
@@ -982,6 +992,9 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
struct exfat_entry_set_cache old_es, new_es;
int sync = IS_DIRSYNC(inode);
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
num_new_entries = exfat_calc_num_entries(p_uniname);
if (num_new_entries < 0)
return num_new_entries;
diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c
index afdf13c34ff5..1ac011088ce7 100644
--- a/fs/exfat/nls.c
+++ b/fs/exfat/nls.c
@@ -779,8 +779,11 @@ int exfat_create_upcase_table(struct super_block *sb)
le32_to_cpu(ep->dentry.upcase.checksum));
brelse(bh);
- if (ret && ret != -EIO)
+ if (ret && ret != -EIO) {
+ /* free memory from exfat_load_upcase_table call */
+ exfat_free_upcase_table(sbi);
goto load_default;
+ }
/* load successfully */
return ret;
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index 323ecebe6f0e..bd57844414aa 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -46,6 +46,9 @@ static int exfat_sync_fs(struct super_block *sb, int wait)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
int err = 0;
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return 0;
+
if (!wait)
return 0;
@@ -167,6 +170,41 @@ static int exfat_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
+int exfat_force_shutdown(struct super_block *sb, u32 flags)
+{
+ int ret;
+ struct exfat_sb_info *sbi = sb->s_fs_info;
+ struct exfat_mount_options *opts = &sbi->options;
+
+ if (exfat_forced_shutdown(sb))
+ return 0;
+
+ switch (flags) {
+ case EXFAT_GOING_DOWN_DEFAULT:
+ case EXFAT_GOING_DOWN_FULLSYNC:
+ ret = bdev_freeze(sb->s_bdev);
+ if (ret)
+ return ret;
+ bdev_thaw(sb->s_bdev);
+ set_bit(EXFAT_FLAGS_SHUTDOWN, &sbi->s_exfat_flags);
+ break;
+ case EXFAT_GOING_DOWN_NOSYNC:
+ set_bit(EXFAT_FLAGS_SHUTDOWN, &sbi->s_exfat_flags);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (opts->discard)
+ opts->discard = 0;
+ return 0;
+}
+
+static void exfat_shutdown(struct super_block *sb)
+{
+ exfat_force_shutdown(sb, EXFAT_GOING_DOWN_NOSYNC);
+}
+
static struct inode *exfat_alloc_inode(struct super_block *sb)
{
struct exfat_inode_info *ei;
@@ -193,6 +231,7 @@ static const struct super_operations exfat_sops = {
.sync_fs = exfat_sync_fs,
.statfs = exfat_statfs,
.show_options = exfat_show_options,
+ .shutdown = exfat_shutdown,
};
enum {
@@ -370,8 +409,6 @@ static int exfat_read_root(struct inode *inode)
inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
ei->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff;
- ei->i_size_aligned = i_size_read(inode);
- ei->i_size_ondisk = i_size_read(inode);
exfat_save_attr(inode, EXFAT_ATTR_SUBDIR);
ei->i_crtime = simple_inode_init_ts(inode);
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 087457061c6e..402fecf90a44 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -87,7 +87,7 @@ static void ext2_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
struct inode *dir = mapping->host;
inode_inc_iversion(dir);
- block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
+ block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
@@ -263,7 +263,7 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
unsigned long n = pos >> PAGE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
- bool need_revalidate = !inode_eq_iversion(inode, file->f_version);
+ bool need_revalidate = !inode_eq_iversion(inode, *(u64 *)file->private_data);
bool has_filetype;
if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
@@ -290,7 +290,7 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
offset = ext2_validate_entry(kaddr, offset, chunk_mask);
ctx->pos = (n<<PAGE_SHIFT) + offset;
}
- file->f_version = inode_query_iversion(inode);
+ *(u64 *)file->private_data = inode_query_iversion(inode);
need_revalidate = false;
}
de = (ext2_dirent *)(kaddr+offset);
@@ -434,7 +434,7 @@ int ext2_inode_by_name(struct inode *dir, const struct qstr *child, ino_t *ino)
static int ext2_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
{
- return __block_write_begin(&folio->page, pos, len, ext2_get_block);
+ return __block_write_begin(folio, pos, len, ext2_get_block);
}
static int ext2_handle_dirsync(struct inode *dir)
@@ -703,8 +703,30 @@ not_empty:
return 0;
}
+static int ext2_dir_open(struct inode *inode, struct file *file)
+{
+ file->private_data = kzalloc(sizeof(u64), GFP_KERNEL);
+ if (!file->private_data)
+ return -ENOMEM;
+ return 0;
+}
+
+static int ext2_dir_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static loff_t ext2_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ return generic_llseek_cookie(file, offset, whence,
+ (u64 *)file->private_data);
+}
+
const struct file_operations ext2_dir_operations = {
- .llseek = generic_file_llseek,
+ .open = ext2_dir_open,
+ .release = ext2_dir_release,
+ .llseek = ext2_dir_llseek,
.read = generic_read_dir,
.iterate_shared = ext2_readdir,
.unlocked_ioctl = ext2_ioctl,
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 0caa1650cee8..30f8201c155f 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -916,11 +916,11 @@ static void ext2_readahead(struct readahead_control *rac)
static int
ext2_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, pagep, ext2_get_block);
+ ret = block_write_begin(mapping, pos, len, foliop, ext2_get_block);
if (ret < 0)
ext2_write_failed(mapping, pos + len);
return ret;
@@ -928,11 +928,11 @@ ext2_write_begin(struct file *file, struct address_space *mapping,
static int ext2_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
int ret;
- ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+ ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
if (ret < len)
ext2_write_failed(mapping, pos + len);
return ret;
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index cd725bebe69e..2a135075468d 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -18,15 +18,17 @@ unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
int ext4_inode_bitmap_csum_verify(struct super_block *sb,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz)
+ struct buffer_head *bh)
{
__u32 hi;
__u32 provided, calculated;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int sz;
if (!ext4_has_metadata_csum(sb))
return 1;
+ sz = EXT4_INODES_PER_GROUP(sb) >> 3;
provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) {
@@ -40,14 +42,16 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb,
void ext4_inode_bitmap_csum_set(struct super_block *sb,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz)
+ struct buffer_head *bh)
{
__u32 csum;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int sz;
if (!ext4_has_metadata_csum(sb))
return;
+ sz = EXT4_INODES_PER_GROUP(sb) >> 3;
csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
gdp->bg_inode_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END)
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index ff4514e4626b..ef6a3c8f3a9a 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -133,6 +133,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
struct super_block *sb = inode->i_sb;
struct buffer_head *bh = NULL;
struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
+ struct dir_private_info *info = file->private_data;
err = fscrypt_prepare_readdir(inode);
if (err)
@@ -229,7 +230,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
- if (!inode_eq_iversion(inode, file->f_version)) {
+ if (!inode_eq_iversion(inode, info->cookie)) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ext4_dir_entry_2 *)
(bh->b_data + i);
@@ -249,7 +250,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
offset = i;
ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
| offset;
- file->f_version = inode_query_iversion(inode);
+ info->cookie = inode_query_iversion(inode);
}
while (ctx->pos < inode->i_size
@@ -279,12 +280,20 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
struct fscrypt_str de_name =
FSTR_INIT(de->name,
de->name_len);
+ u32 hash;
+ u32 minor_hash;
+
+ if (IS_CASEFOLDED(inode)) {
+ hash = EXT4_DIRENT_HASH(de);
+ minor_hash = EXT4_DIRENT_MINOR_HASH(de);
+ } else {
+ hash = 0;
+ minor_hash = 0;
+ }
/* Directory is encrypted */
err = fscrypt_fname_disk_to_usr(inode,
- EXT4_DIRENT_HASH(de),
- EXT4_DIRENT_MINOR_HASH(de),
- &de_name, &fstr);
+ hash, minor_hash, &de_name, &fstr);
de_name = fstr;
fstr.len = save_len;
if (err)
@@ -384,6 +393,7 @@ static inline loff_t ext4_get_htree_eof(struct file *filp)
static loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
+ struct dir_private_info *info = file->private_data;
int dx_dir = is_dx_dir(inode);
loff_t ret, htree_max = ext4_get_htree_eof(file);
@@ -392,7 +402,7 @@ static loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
htree_max, htree_max);
else
ret = ext4_llseek(file, offset, whence);
- file->f_version = inode_peek_iversion(inode) - 1;
+ info->cookie = inode_peek_iversion(inode) - 1;
return ret;
}
@@ -429,18 +439,15 @@ static void free_rb_tree_fname(struct rb_root *root)
*root = RB_ROOT;
}
-
-static struct dir_private_info *ext4_htree_create_dir_info(struct file *filp,
- loff_t pos)
+static void ext4_htree_init_dir_info(struct file *filp, loff_t pos)
{
- struct dir_private_info *p;
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return NULL;
- p->curr_hash = pos2maj_hash(filp, pos);
- p->curr_minor_hash = pos2min_hash(filp, pos);
- return p;
+ struct dir_private_info *p = filp->private_data;
+
+ if (is_dx_dir(file_inode(filp)) && !p->initialized) {
+ p->curr_hash = pos2maj_hash(filp, pos);
+ p->curr_minor_hash = pos2min_hash(filp, pos);
+ p->initialized = true;
+ }
}
void ext4_htree_free_dir_info(struct dir_private_info *p)
@@ -552,12 +559,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
struct fname *fname;
int ret = 0;
- if (!info) {
- info = ext4_htree_create_dir_info(file, ctx->pos);
- if (!info)
- return -ENOMEM;
- file->private_data = info;
- }
+ ext4_htree_init_dir_info(file, ctx->pos);
if (ctx->pos == ext4_get_htree_eof(file))
return 0; /* EOF */
@@ -590,10 +592,10 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
* cached entries.
*/
if ((!info->curr_node) ||
- !inode_eq_iversion(inode, file->f_version)) {
+ !inode_eq_iversion(inode, info->cookie)) {
info->curr_node = NULL;
free_rb_tree_fname(&info->root);
- file->f_version = inode_query_iversion(inode);
+ info->cookie = inode_query_iversion(inode);
ret = ext4_htree_fill_tree(file, info->curr_hash,
info->curr_minor_hash,
&info->next_hash);
@@ -664,7 +666,19 @@ int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
return 0;
}
+static int ext4_dir_open(struct inode *inode, struct file *file)
+{
+ struct dir_private_info *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ file->private_data = info;
+ return 0;
+}
+
const struct file_operations ext4_dir_operations = {
+ .open = ext4_dir_open,
.llseek = ext4_dir_llseek,
.read = generic_read_dir,
.iterate_shared = ext4_readdir,
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 08acd152261e..44b0d418143c 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1058,6 +1058,7 @@ struct ext4_inode_info {
/* Number of ongoing updates on this inode */
atomic_t i_fc_updates;
+ atomic_t i_unwritten; /* Nr. of inflight conversions pending */
/* Fast commit wait queue for this inode */
wait_queue_head_t i_fc_wait;
@@ -1106,6 +1107,10 @@ struct ext4_inode_info {
/* mballoc */
atomic_t i_prealloc_active;
+
+ /* allocation reservation info for delalloc */
+ /* In case of bigalloc, this refer to clusters rather than blocks */
+ unsigned int i_reserved_data_blocks;
struct rb_root i_prealloc_node;
rwlock_t i_prealloc_lock;
@@ -1122,10 +1127,6 @@ struct ext4_inode_info {
/* ialloc */
ext4_group_t i_last_alloc_group;
- /* allocation reservation info for delalloc */
- /* In case of bigalloc, this refer to clusters rather than blocks */
- unsigned int i_reserved_data_blocks;
-
/* pending cluster reservations for bigalloc file systems */
struct ext4_pending_tree i_pending_tree;
@@ -1149,7 +1150,6 @@ struct ext4_inode_info {
*/
struct list_head i_rsv_conversion_list;
struct work_struct i_rsv_conversion_work;
- atomic_t i_unwritten; /* Nr. of inflight conversions pending */
spinlock_t i_block_reservation_lock;
@@ -2338,9 +2338,9 @@ struct ext4_dir_entry_2 {
((struct ext4_dir_entry_hash *) \
(((void *)(entry)) + \
((8 + (entry)->name_len + EXT4_DIR_ROUND) & ~EXT4_DIR_ROUND)))
-#define EXT4_DIRENT_HASH(entry) le32_to_cpu(EXT4_DIRENT_HASHES(de)->hash)
+#define EXT4_DIRENT_HASH(entry) le32_to_cpu(EXT4_DIRENT_HASHES(entry)->hash)
#define EXT4_DIRENT_MINOR_HASH(entry) \
- le32_to_cpu(EXT4_DIRENT_HASHES(de)->minor_hash)
+ le32_to_cpu(EXT4_DIRENT_HASHES(entry)->minor_hash)
static inline bool ext4_hash_in_dirent(const struct inode *inode)
{
@@ -2462,6 +2462,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
#define DX_HASH_HALF_MD4_UNSIGNED 4
#define DX_HASH_TEA_UNSIGNED 5
#define DX_HASH_SIPHASH 6
+#define DX_HASH_LAST DX_HASH_SIPHASH
static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
const void *address, unsigned int length)
@@ -2553,6 +2554,8 @@ struct dir_private_info {
__u32 curr_hash;
__u32 curr_minor_hash;
__u32 next_hash;
+ u64 cookie;
+ bool initialized;
};
/* calculate the first block number of the group */
@@ -2693,10 +2696,10 @@ struct mmpd_data {
extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
void ext4_inode_bitmap_csum_set(struct super_block *sb,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz);
+ struct buffer_head *bh);
int ext4_inode_bitmap_csum_verify(struct super_block *sb,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz);
+ struct buffer_head *bh);
void ext4_block_bitmap_csum_set(struct super_block *sb,
struct ext4_group_desc *gdp,
struct buffer_head *bh);
@@ -3563,13 +3566,13 @@ int ext4_readpage_inline(struct inode *inode, struct folio *folio);
extern int ext4_try_to_write_inline_data(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
- struct page **pagep);
+ struct folio **foliop);
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
unsigned copied, struct folio *folio);
extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
- struct page **pagep,
+ struct folio **foliop,
void **fsdata);
extern int ext4_try_add_inline_entry(handle_t *handle,
struct ext4_filename *fname,
@@ -3710,11 +3713,12 @@ extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
int num,
struct ext4_ext_path *path);
-extern int ext4_ext_insert_extent(handle_t *, struct inode *,
- struct ext4_ext_path **,
- struct ext4_extent *, int);
+extern struct ext4_ext_path *ext4_ext_insert_extent(
+ handle_t *handle, struct inode *inode,
+ struct ext4_ext_path *path,
+ struct ext4_extent *newext, int gb_flags);
extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
- struct ext4_ext_path **,
+ struct ext4_ext_path *,
int flags);
extern void ext4_free_ext_path(struct ext4_ext_path *);
extern int ext4_ext_check_inode(struct inode *inode);
@@ -3851,6 +3855,9 @@ static inline int ext4_buffer_uptodate(struct buffer_head *bh)
return buffer_uptodate(bh);
}
+extern int ext4_block_write_begin(handle_t *handle, struct folio *folio,
+ loff_t pos, unsigned len,
+ get_block_t *get_block);
#endif /* __KERNEL__ */
#define EFSBADCRC EBADMSG /* Bad CRC detected */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index e067f2dd0335..34e25eee6521 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -84,12 +84,11 @@ static void ext4_extent_block_csum_set(struct inode *inode,
et->et_checksum = ext4_extent_block_csum(inode, eh);
}
-static int ext4_split_extent_at(handle_t *handle,
- struct inode *inode,
- struct ext4_ext_path **ppath,
- ext4_lblk_t split,
- int split_flag,
- int flags);
+static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_lblk_t split,
+ int split_flag, int flags);
static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
{
@@ -106,21 +105,27 @@ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
return 0;
}
+static inline void ext4_ext_path_brelse(struct ext4_ext_path *path)
+{
+ brelse(path->p_bh);
+ path->p_bh = NULL;
+}
+
static void ext4_ext_drop_refs(struct ext4_ext_path *path)
{
int depth, i;
- if (!path)
+ if (IS_ERR_OR_NULL(path))
return;
depth = path->p_depth;
- for (i = 0; i <= depth; i++, path++) {
- brelse(path->p_bh);
- path->p_bh = NULL;
- }
+ for (i = 0; i <= depth; i++, path++)
+ ext4_ext_path_brelse(path);
}
void ext4_free_ext_path(struct ext4_ext_path *path)
{
+ if (IS_ERR_OR_NULL(path))
+ return;
ext4_ext_drop_refs(path);
kfree(path);
}
@@ -323,19 +328,18 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
return size;
}
-static inline int
+static inline struct ext4_ext_path *
ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
- struct ext4_ext_path **ppath, ext4_lblk_t lblk,
+ struct ext4_ext_path *path, ext4_lblk_t lblk,
int nofail)
{
- struct ext4_ext_path *path = *ppath;
int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
if (nofail)
flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
- return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
+ return ext4_split_extent_at(handle, inode, path, lblk, unwritten ?
EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
flags);
}
@@ -635,8 +639,7 @@ int ext4_ext_precache(struct inode *inode)
*/
if ((i == depth) ||
path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
- brelse(path[i].p_bh);
- path[i].p_bh = NULL;
+ ext4_ext_path_brelse(path + i);
i--;
continue;
}
@@ -689,7 +692,7 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
struct ext4_extent *ex;
int i;
- if (!path)
+ if (IS_ERR_OR_NULL(path))
return;
eh = path[depth].p_hdr;
@@ -881,11 +884,10 @@ void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
struct ext4_ext_path *
ext4_find_extent(struct inode *inode, ext4_lblk_t block,
- struct ext4_ext_path **orig_path, int flags)
+ struct ext4_ext_path *path, int flags)
{
struct ext4_extent_header *eh;
struct buffer_head *bh;
- struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
short int depth, i, ppos = 0;
int ret;
gfp_t gfp_flags = GFP_NOFS;
@@ -906,7 +908,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
ext4_ext_drop_refs(path);
if (depth > path[0].p_maxdepth) {
kfree(path);
- *orig_path = path = NULL;
+ path = NULL;
}
}
if (!path) {
@@ -961,8 +963,6 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
err:
ext4_free_ext_path(path);
- if (orig_path)
- *orig_path = NULL;
return ERR_PTR(ret);
}
@@ -1395,15 +1395,15 @@ out:
* finds empty index and adds new leaf.
* if no free index is found, then it requests in-depth growing.
*/
-static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
- unsigned int mb_flags,
- unsigned int gb_flags,
- struct ext4_ext_path **ppath,
- struct ext4_extent *newext)
+static struct ext4_ext_path *
+ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ unsigned int mb_flags, unsigned int gb_flags,
+ struct ext4_ext_path *path,
+ struct ext4_extent *newext)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_ext_path *curp;
int depth, i, err = 0;
+ ext4_lblk_t ee_block = le32_to_cpu(newext->ee_block);
repeat:
i = depth = ext_depth(inode);
@@ -1422,42 +1422,38 @@ repeat:
* entry: create all needed subtree and add new leaf */
err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
if (err)
- goto out;
+ goto errout;
/* refill path */
- path = ext4_find_extent(inode,
- (ext4_lblk_t)le32_to_cpu(newext->ee_block),
- ppath, gb_flags);
- if (IS_ERR(path))
- err = PTR_ERR(path);
- } else {
- /* tree is full, time to grow in depth */
- err = ext4_ext_grow_indepth(handle, inode, mb_flags);
- if (err)
- goto out;
+ path = ext4_find_extent(inode, ee_block, path, gb_flags);
+ return path;
+ }
- /* refill path */
- path = ext4_find_extent(inode,
- (ext4_lblk_t)le32_to_cpu(newext->ee_block),
- ppath, gb_flags);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- goto out;
- }
+ /* tree is full, time to grow in depth */
+ err = ext4_ext_grow_indepth(handle, inode, mb_flags);
+ if (err)
+ goto errout;
- /*
- * only first (depth 0 -> 1) produces free space;
- * in all other cases we have to split the grown tree
- */
- depth = ext_depth(inode);
- if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
- /* now we need to split */
- goto repeat;
- }
+ /* refill path */
+ path = ext4_find_extent(inode, ee_block, path, gb_flags);
+ if (IS_ERR(path))
+ return path;
+
+ /*
+ * only first (depth 0 -> 1) produces free space;
+ * in all other cases we have to split the grown tree
+ */
+ depth = ext_depth(inode);
+ if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
+ /* now we need to split */
+ goto repeat;
}
-out:
- return err;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
/*
@@ -1749,12 +1745,23 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
break;
err = ext4_ext_get_access(handle, inode, path + k);
if (err)
- break;
+ goto clean;
path[k].p_idx->ei_block = border;
err = ext4_ext_dirty(handle, inode, path + k);
if (err)
- break;
+ goto clean;
}
+ return 0;
+
+clean:
+ /*
+ * The path[k].p_bh is either unmodified or with no verified bit
+ * set (see ext4_ext_get_access()). So just clear the verified bit
+ * of the successfully modified extents buffers, which will force
+ * these extents to be checked to avoid using inconsistent data.
+ */
+ while (++k < depth)
+ clear_buffer_verified(path[k].p_bh);
return err;
}
@@ -1876,7 +1883,7 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
(path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
path[0].p_hdr->eh_max = cpu_to_le16(max_root);
- brelse(path[1].p_bh);
+ ext4_ext_path_brelse(path + 1);
ext4_free_blocks(handle, inode, NULL, blk, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
}
@@ -1964,16 +1971,15 @@ out:
* inserts requested extent as new one into the tree,
* creating new leaf in the no-space case.
*/
-int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
- struct ext4_ext_path **ppath,
- struct ext4_extent *newext, int gb_flags)
+struct ext4_ext_path *
+ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ struct ext4_ext_path *path,
+ struct ext4_extent *newext, int gb_flags)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_extent_header *eh;
struct ext4_extent *ex, *fex;
struct ext4_extent *nearex; /* nearest extent */
- struct ext4_ext_path *npath = NULL;
- int depth, len, err;
+ int depth, len, err = 0;
ext4_lblk_t next;
int mb_flags = 0, unwritten;
@@ -1981,14 +1987,16 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
mb_flags |= EXT4_MB_DELALLOC_RESERVED;
if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
- return -EFSCORRUPTED;
+ err = -EFSCORRUPTED;
+ goto errout;
}
depth = ext_depth(inode);
ex = path[depth].p_ext;
eh = path[depth].p_hdr;
if (unlikely(path[depth].p_hdr == NULL)) {
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
- return -EFSCORRUPTED;
+ err = -EFSCORRUPTED;
+ goto errout;
}
/* try to insert block into found extent and return */
@@ -2026,7 +2034,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
err = ext4_ext_get_access(handle, inode,
path + depth);
if (err)
- return err;
+ goto errout;
unwritten = ext4_ext_is_unwritten(ex);
ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ ext4_ext_get_actual_len(newext));
@@ -2051,7 +2059,7 @@ prepend:
err = ext4_ext_get_access(handle, inode,
path + depth);
if (err)
- return err;
+ goto errout;
unwritten = ext4_ext_is_unwritten(ex);
ex->ee_block = newext->ee_block;
@@ -2076,21 +2084,26 @@ prepend:
if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
next = ext4_ext_next_leaf_block(path);
if (next != EXT_MAX_BLOCKS) {
+ struct ext4_ext_path *npath;
+
ext_debug(inode, "next leaf block - %u\n", next);
- BUG_ON(npath != NULL);
npath = ext4_find_extent(inode, next, NULL, gb_flags);
- if (IS_ERR(npath))
- return PTR_ERR(npath);
+ if (IS_ERR(npath)) {
+ err = PTR_ERR(npath);
+ goto errout;
+ }
BUG_ON(npath->p_depth != path->p_depth);
eh = npath[depth].p_hdr;
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
ext_debug(inode, "next leaf isn't full(%d)\n",
le16_to_cpu(eh->eh_entries));
+ ext4_free_ext_path(path);
path = npath;
goto has_space;
}
ext_debug(inode, "next leaf has no free space(%d,%d)\n",
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
+ ext4_free_ext_path(npath);
}
/*
@@ -2099,10 +2112,10 @@ prepend:
*/
if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
mb_flags |= EXT4_MB_USE_RESERVED;
- err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
- ppath, newext);
- if (err)
- goto cleanup;
+ path = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
+ path, newext);
+ if (IS_ERR(path))
+ return path;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
@@ -2111,7 +2124,7 @@ has_space:
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- goto cleanup;
+ goto errout;
if (!nearex) {
/* there is no extent in this leaf, create first one */
@@ -2169,17 +2182,20 @@ merge:
if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
ext4_ext_try_to_merge(handle, inode, path, nearex);
-
/* time to correct all indexes above */
err = ext4_ext_correct_indexes(handle, inode, path);
if (err)
- goto cleanup;
+ goto errout;
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+ if (err)
+ goto errout;
-cleanup:
- ext4_free_ext_path(npath);
- return err;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
static int ext4_fill_es_cache_info(struct inode *inode,
@@ -2279,27 +2295,26 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
{
int err;
ext4_fsblk_t leaf;
+ int k = depth - 1;
/* free index block */
- depth--;
- path = path + depth;
- leaf = ext4_idx_pblock(path->p_idx);
- if (unlikely(path->p_hdr->eh_entries == 0)) {
- EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
+ leaf = ext4_idx_pblock(path[k].p_idx);
+ if (unlikely(path[k].p_hdr->eh_entries == 0)) {
+ EXT4_ERROR_INODE(inode, "path[%d].p_hdr->eh_entries == 0", k);
return -EFSCORRUPTED;
}
- err = ext4_ext_get_access(handle, inode, path);
+ err = ext4_ext_get_access(handle, inode, path + k);
if (err)
return err;
- if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
- int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
+ if (path[k].p_idx != EXT_LAST_INDEX(path[k].p_hdr)) {
+ int len = EXT_LAST_INDEX(path[k].p_hdr) - path[k].p_idx;
len *= sizeof(struct ext4_extent_idx);
- memmove(path->p_idx, path->p_idx + 1, len);
+ memmove(path[k].p_idx, path[k].p_idx + 1, len);
}
- le16_add_cpu(&path->p_hdr->eh_entries, -1);
- err = ext4_ext_dirty(handle, inode, path);
+ le16_add_cpu(&path[k].p_hdr->eh_entries, -1);
+ err = ext4_ext_dirty(handle, inode, path + k);
if (err)
return err;
ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
@@ -2308,18 +2323,29 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
ext4_free_blocks(handle, inode, NULL, leaf, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
- while (--depth >= 0) {
- if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
+ while (--k >= 0) {
+ if (path[k + 1].p_idx != EXT_FIRST_INDEX(path[k + 1].p_hdr))
break;
- path--;
- err = ext4_ext_get_access(handle, inode, path);
+ err = ext4_ext_get_access(handle, inode, path + k);
if (err)
- break;
- path->p_idx->ei_block = (path+1)->p_idx->ei_block;
- err = ext4_ext_dirty(handle, inode, path);
+ goto clean;
+ path[k].p_idx->ei_block = path[k + 1].p_idx->ei_block;
+ err = ext4_ext_dirty(handle, inode, path + k);
if (err)
- break;
+ goto clean;
}
+ return 0;
+
+clean:
+ /*
+ * The path[k].p_bh is either unmodified or with no verified bit
+ * set (see ext4_ext_get_access()). So just clear the verified bit
+ * of the successfully modified extents buffers, which will force
+ * these extents to be checked to avoid using inconsistent data.
+ */
+ while (++k < depth)
+ clear_buffer_verified(path[k].p_bh);
+
return err;
}
@@ -2872,11 +2898,12 @@ again:
* fail removing space due to ENOSPC so try to use
* reserved block if that happens.
*/
- err = ext4_force_split_extent_at(handle, inode, &path,
- end + 1, 1);
- if (err < 0)
+ path = ext4_force_split_extent_at(handle, inode, path,
+ end + 1, 1);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
goto out;
-
+ }
} else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
partial.state == initial) {
/*
@@ -2934,8 +2961,7 @@ again:
err = ext4_ext_rm_leaf(handle, inode, path,
&partial, start, end);
/* root level has p_bh == NULL, brelse() eats this */
- brelse(path[i].p_bh);
- path[i].p_bh = NULL;
+ ext4_ext_path_brelse(path + i);
i--;
continue;
}
@@ -2997,8 +3023,7 @@ again:
err = ext4_ext_rm_idx(handle, inode, path, i);
}
/* root level has p_bh == NULL, brelse() eats this */
- brelse(path[i].p_bh);
- path[i].p_bh = NULL;
+ ext4_ext_path_brelse(path + i);
i--;
ext_debug(inode, "return to level %d\n", i);
}
@@ -3113,7 +3138,7 @@ static void ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
return;
ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
- EXTENT_STATUS_WRITTEN);
+ EXTENT_STATUS_WRITTEN, 0);
}
/* FIXME!! we need to try to merge to left or right after zero-out */
@@ -3147,16 +3172,14 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
* a> the extent are splitted into two extent.
* b> split is not needed, and just mark the extent.
*
- * return 0 on success.
+ * Return an extent path pointer on success, or an error pointer on failure.
*/
-static int ext4_split_extent_at(handle_t *handle,
- struct inode *inode,
- struct ext4_ext_path **ppath,
- ext4_lblk_t split,
- int split_flag,
- int flags)
+static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_lblk_t split,
+ int split_flag, int flags)
{
- struct ext4_ext_path *path = *ppath;
ext4_fsblk_t newblock;
ext4_lblk_t ee_block;
struct ext4_extent *ex, newex, orig_ex, zero_ex;
@@ -3226,10 +3249,31 @@ static int ext4_split_extent_at(handle_t *handle,
if (split_flag & EXT4_EXT_MARK_UNWRIT2)
ext4_ext_mark_unwritten(ex2);
- err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
- if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+ if (!IS_ERR(path))
goto out;
+ err = PTR_ERR(path);
+ if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+ return path;
+
+ /*
+ * Get a new path to try to zeroout or fix the extent length.
+ * Using EXT4_EX_NOFAIL guarantees that ext4_find_extent()
+ * will not return -ENOMEM, otherwise -ENOMEM will cause a
+ * retry in do_writepages(), and a WARN_ON may be triggered
+ * in ext4_da_update_reserve_space() due to an incorrect
+ * ee_len causing the i_reserved_data_blocks exception.
+ */
+ path = ext4_find_extent(inode, ee_block, NULL, flags | EXT4_EX_NOFAIL);
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
+ return path;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+
if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
if (split_flag & EXT4_EXT_DATA_VALID1) {
@@ -3280,14 +3324,17 @@ fix_extent_len:
* and err is a non-zero error code.
*/
ext4_ext_dirty(handle, inode, path + path->p_depth);
- return err;
out:
+ if (err) {
+ ext4_free_ext_path(path);
+ path = ERR_PTR(err);
+ }
ext4_ext_show_leaf(inode, path);
- return err;
+ return path;
}
/*
- * ext4_split_extents() splits an extent and mark extent which is covered
+ * ext4_split_extent() splits an extent and mark extent which is covered
* by @map as split_flags indicates
*
* It may result in splitting the extent into multiple extents (up to three)
@@ -3297,21 +3344,18 @@ out:
* c> Splits in three extents: Somone is splitting in middle of the extent
*
*/
-static int ext4_split_extent(handle_t *handle,
- struct inode *inode,
- struct ext4_ext_path **ppath,
- struct ext4_map_blocks *map,
- int split_flag,
- int flags)
+static struct ext4_ext_path *ext4_split_extent(handle_t *handle,
+ struct inode *inode,
+ struct ext4_ext_path *path,
+ struct ext4_map_blocks *map,
+ int split_flag, int flags,
+ unsigned int *allocated)
{
- struct ext4_ext_path *path = *ppath;
ext4_lblk_t ee_block;
struct ext4_extent *ex;
unsigned int ee_len, depth;
- int err = 0;
int unwritten;
int split_flag1, flags1;
- int allocated = map->m_len;
depth = ext_depth(inode);
ex = path[depth].p_ext;
@@ -3327,28 +3371,27 @@ static int ext4_split_extent(handle_t *handle,
EXT4_EXT_MARK_UNWRIT2;
if (split_flag & EXT4_EXT_DATA_VALID2)
split_flag1 |= EXT4_EXT_DATA_VALID1;
- err = ext4_split_extent_at(handle, inode, ppath,
+ path = ext4_split_extent_at(handle, inode, path,
map->m_lblk + map->m_len, split_flag1, flags1);
- if (err)
- goto out;
- } else {
- allocated = ee_len - (map->m_lblk - ee_block);
- }
- /*
- * Update path is required because previous ext4_split_extent_at() may
- * result in split of original leaf or extent zeroout.
- */
- path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
- if (IS_ERR(path))
- return PTR_ERR(path);
- depth = ext_depth(inode);
- ex = path[depth].p_ext;
- if (!ex) {
- EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
- (unsigned long) map->m_lblk);
- return -EFSCORRUPTED;
+ if (IS_ERR(path))
+ return path;
+ /*
+ * Update path is required because previous ext4_split_extent_at
+ * may result in split of original leaf or extent zeroout.
+ */
+ path = ext4_find_extent(inode, map->m_lblk, path, flags);
+ if (IS_ERR(path))
+ return path;
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ if (!ex) {
+ EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
+ (unsigned long) map->m_lblk);
+ ext4_free_ext_path(path);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+ unwritten = ext4_ext_is_unwritten(ex);
}
- unwritten = ext4_ext_is_unwritten(ex);
if (map->m_lblk >= ee_block) {
split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
@@ -3357,15 +3400,20 @@ static int ext4_split_extent(handle_t *handle,
split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
EXT4_EXT_MARK_UNWRIT2);
}
- err = ext4_split_extent_at(handle, inode, ppath,
+ path = ext4_split_extent_at(handle, inode, path,
map->m_lblk, split_flag1, flags);
- if (err)
- goto out;
+ if (IS_ERR(path))
+ return path;
}
+ if (allocated) {
+ if (map->m_lblk + map->m_len > ee_block + ee_len)
+ *allocated = ee_len - (map->m_lblk - ee_block);
+ else
+ *allocated = map->m_len;
+ }
ext4_ext_show_leaf(inode, path);
-out:
- return err ? err : allocated;
+ return path;
}
/*
@@ -3388,13 +3436,11 @@ out:
* that are allocated and initialized.
* It is guaranteed to be >= map->m_len.
*/
-static int ext4_ext_convert_to_initialized(handle_t *handle,
- struct inode *inode,
- struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath,
- int flags)
+static struct ext4_ext_path *
+ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map, struct ext4_ext_path *path,
+ int flags, unsigned int *allocated)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_sb_info *sbi;
struct ext4_extent_header *eh;
struct ext4_map_blocks split_map;
@@ -3404,7 +3450,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
unsigned int ee_len, depth, map_len = map->m_len;
int err = 0;
int split_flag = EXT4_EXT_DATA_VALID2;
- int allocated = 0;
unsigned int max_zeroout = 0;
ext_debug(inode, "logical block %llu, max_blocks %u\n",
@@ -3445,6 +3490,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
* - L2: we only attempt to merge with an extent stored in the
* same extent tree node.
*/
+ *allocated = 0;
if ((map->m_lblk == ee_block) &&
/* See if we can merge left */
(map_len < ee_len) && /*L1*/
@@ -3474,7 +3520,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
(prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- goto out;
+ goto errout;
trace_ext4_ext_convert_to_initialized_fastpath(inode,
map, ex, abut_ex);
@@ -3489,7 +3535,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
/* Result: number of initialized blocks past m_lblk */
- allocated = map_len;
+ *allocated = map_len;
}
} else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
(map_len < ee_len) && /*L1*/
@@ -3520,7 +3566,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
(next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- goto out;
+ goto errout;
trace_ext4_ext_convert_to_initialized_fastpath(inode,
map, ex, abut_ex);
@@ -3535,18 +3581,20 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
abut_ex->ee_len = cpu_to_le16(next_len + map_len);
/* Result: number of initialized blocks past m_lblk */
- allocated = map_len;
+ *allocated = map_len;
}
}
- if (allocated) {
+ if (*allocated) {
/* Mark the block containing both extents as dirty */
err = ext4_ext_dirty(handle, inode, path + depth);
/* Update path to point to the right extent */
path[depth].p_ext = abut_ex;
+ if (err)
+ goto errout;
goto out;
} else
- allocated = ee_len - (map->m_lblk - ee_block);
+ *allocated = ee_len - (map->m_lblk - ee_block);
WARN_ON(map->m_lblk < ee_block);
/*
@@ -3573,21 +3621,21 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
split_map.m_lblk = map->m_lblk;
split_map.m_len = map->m_len;
- if (max_zeroout && (allocated > split_map.m_len)) {
- if (allocated <= max_zeroout) {
+ if (max_zeroout && (*allocated > split_map.m_len)) {
+ if (*allocated <= max_zeroout) {
/* case 3 or 5 */
zero_ex1.ee_block =
cpu_to_le32(split_map.m_lblk +
split_map.m_len);
zero_ex1.ee_len =
- cpu_to_le16(allocated - split_map.m_len);
+ cpu_to_le16(*allocated - split_map.m_len);
ext4_ext_store_pblock(&zero_ex1,
ext4_ext_pblock(ex) + split_map.m_lblk +
split_map.m_len - ee_block);
err = ext4_ext_zeroout(inode, &zero_ex1);
if (err)
goto fallback;
- split_map.m_len = allocated;
+ split_map.m_len = *allocated;
}
if (split_map.m_lblk - ee_block + split_map.m_len <
max_zeroout) {
@@ -3605,22 +3653,24 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
split_map.m_len += split_map.m_lblk - ee_block;
split_map.m_lblk = ee_block;
- allocated = map->m_len;
+ *allocated = map->m_len;
}
}
fallback:
- err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
- flags);
- if (err > 0)
- err = 0;
+ path = ext4_split_extent(handle, inode, path, &split_map, split_flag,
+ flags, NULL);
+ if (IS_ERR(path))
+ return path;
out:
/* If we have gotten a failure, don't zero out status tree */
- if (!err) {
- ext4_zeroout_es(inode, &zero_ex1);
- ext4_zeroout_es(inode, &zero_ex2);
- }
- return err ? err : allocated;
+ ext4_zeroout_es(inode, &zero_ex1);
+ ext4_zeroout_es(inode, &zero_ex2);
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
/*
@@ -3645,15 +3695,16 @@ out:
* being filled will be convert to initialized by the end_io callback function
* via ext4_convert_unwritten_extents().
*
- * Returns the size of unwritten extent to be written on success.
+ * The size of unwritten extent to be written is passed to the caller via the
+ * allocated pointer. Return an extent path pointer on success, or an error
+ * pointer on failure.
*/
-static int ext4_split_convert_extents(handle_t *handle,
+static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath,
- int flags)
+ struct ext4_ext_path *path,
+ int flags, unsigned int *allocated)
{
- struct ext4_ext_path *path = *ppath;
ext4_lblk_t eof_block;
ext4_lblk_t ee_block;
struct ext4_extent *ex;
@@ -3686,15 +3737,15 @@ static int ext4_split_convert_extents(handle_t *handle,
split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
}
flags |= EXT4_GET_BLOCKS_PRE_IO;
- return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
+ return ext4_split_extent(handle, inode, path, map, split_flag, flags,
+ allocated);
}
-static int ext4_convert_unwritten_extents_endio(handle_t *handle,
- struct inode *inode,
- struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath)
+static struct ext4_ext_path *
+ext4_convert_unwritten_extents_endio(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map,
+ struct ext4_ext_path *path)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_extent *ex;
ext4_lblk_t ee_block;
unsigned int ee_len;
@@ -3722,20 +3773,21 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
inode->i_ino, (unsigned long long)ee_block, ee_len,
(unsigned long long)map->m_lblk, map->m_len);
#endif
- err = ext4_split_convert_extents(handle, inode, map, ppath,
- EXT4_GET_BLOCKS_CONVERT);
- if (err < 0)
- return err;
- path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+ path = ext4_split_convert_extents(handle, inode, map, path,
+ EXT4_GET_BLOCKS_CONVERT, NULL);
if (IS_ERR(path))
- return PTR_ERR(path);
+ return path;
+
+ path = ext4_find_extent(inode, map->m_lblk, path, 0);
+ if (IS_ERR(path))
+ return path;
depth = ext_depth(inode);
ex = path[depth].p_ext;
}
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- goto out;
+ goto errout;
/* first mark the extent as initialized */
ext4_ext_mark_initialized(ex);
@@ -3746,18 +3798,23 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
/* Mark modified extent as dirty */
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
-out:
+ if (err)
+ goto errout;
+
ext4_ext_show_leaf(inode, path);
- return err;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
-static int
+static struct ext4_ext_path *
convert_initialized_extent(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath,
+ struct ext4_ext_path *path,
unsigned int *allocated)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_extent *ex;
ext4_lblk_t ee_block;
unsigned int ee_len;
@@ -3780,25 +3837,27 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
(unsigned long long)ee_block, ee_len);
if (ee_block != map->m_lblk || ee_len > map->m_len) {
- err = ext4_split_convert_extents(handle, inode, map, ppath,
- EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
- if (err < 0)
- return err;
- path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+ path = ext4_split_convert_extents(handle, inode, map, path,
+ EXT4_GET_BLOCKS_CONVERT_UNWRITTEN, NULL);
if (IS_ERR(path))
- return PTR_ERR(path);
+ return path;
+
+ path = ext4_find_extent(inode, map->m_lblk, path, 0);
+ if (IS_ERR(path))
+ return path;
depth = ext_depth(inode);
ex = path[depth].p_ext;
if (!ex) {
EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
(unsigned long) map->m_lblk);
- return -EFSCORRUPTED;
+ err = -EFSCORRUPTED;
+ goto errout;
}
}
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- return err;
+ goto errout;
/* first mark the extent as unwritten */
ext4_ext_mark_unwritten(ex);
@@ -3810,7 +3869,7 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
/* Mark modified extent as dirty */
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
if (err)
- return err;
+ goto errout;
ext4_ext_show_leaf(inode, path);
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -3819,22 +3878,24 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
if (*allocated > map->m_len)
*allocated = map->m_len;
map->m_len = *allocated;
- return 0;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
-static int
+static struct ext4_ext_path *
ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath, int flags,
- unsigned int allocated, ext4_fsblk_t newblock)
+ struct ext4_ext_path *path, int flags,
+ unsigned int *allocated, ext4_fsblk_t newblock)
{
- struct ext4_ext_path __maybe_unused *path = *ppath;
- int ret = 0;
int err = 0;
ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
(unsigned long long)map->m_lblk, map->m_len, flags,
- allocated);
+ *allocated);
ext4_ext_show_leaf(inode, path);
/*
@@ -3844,36 +3905,34 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
- allocated, newblock);
+ *allocated, newblock);
/* get_block() before submitting IO, split the extent */
if (flags & EXT4_GET_BLOCKS_PRE_IO) {
- ret = ext4_split_convert_extents(handle, inode, map, ppath,
- flags | EXT4_GET_BLOCKS_CONVERT);
- if (ret < 0) {
- err = ret;
- goto out2;
- }
+ path = ext4_split_convert_extents(handle, inode, map, path,
+ flags | EXT4_GET_BLOCKS_CONVERT, allocated);
+ if (IS_ERR(path))
+ return path;
/*
- * shouldn't get a 0 return when splitting an extent unless
+ * shouldn't get a 0 allocated when splitting an extent unless
* m_len is 0 (bug) or extent has been corrupted
*/
- if (unlikely(ret == 0)) {
+ if (unlikely(*allocated == 0)) {
EXT4_ERROR_INODE(inode,
- "unexpected ret == 0, m_len = %u",
+ "unexpected allocated == 0, m_len = %u",
map->m_len);
err = -EFSCORRUPTED;
- goto out2;
+ goto errout;
}
map->m_flags |= EXT4_MAP_UNWRITTEN;
goto out;
}
/* IO end_io complete, convert the filled extent to written */
if (flags & EXT4_GET_BLOCKS_CONVERT) {
- err = ext4_convert_unwritten_extents_endio(handle, inode, map,
- ppath);
- if (err < 0)
- goto out2;
+ path = ext4_convert_unwritten_extents_endio(handle, inode,
+ map, path);
+ if (IS_ERR(path))
+ return path;
ext4_update_inode_fsync_trans(handle, inode, 1);
goto map_out;
}
@@ -3905,36 +3964,37 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
* For buffered writes, at writepage time, etc. Convert a
* discovered unwritten extent to written.
*/
- ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
- if (ret < 0) {
- err = ret;
- goto out2;
- }
+ path = ext4_ext_convert_to_initialized(handle, inode, map, path,
+ flags, allocated);
+ if (IS_ERR(path))
+ return path;
ext4_update_inode_fsync_trans(handle, inode, 1);
/*
- * shouldn't get a 0 return when converting an unwritten extent
+ * shouldn't get a 0 allocated when converting an unwritten extent
* unless m_len is 0 (bug) or extent has been corrupted
*/
- if (unlikely(ret == 0)) {
- EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
+ if (unlikely(*allocated == 0)) {
+ EXT4_ERROR_INODE(inode, "unexpected allocated == 0, m_len = %u",
map->m_len);
err = -EFSCORRUPTED;
- goto out2;
+ goto errout;
}
out:
- allocated = ret;
map->m_flags |= EXT4_MAP_NEW;
map_out:
map->m_flags |= EXT4_MAP_MAPPED;
out1:
map->m_pblk = newblock;
- if (allocated > map->m_len)
- allocated = map->m_len;
- map->m_len = allocated;
+ if (*allocated > map->m_len)
+ *allocated = map->m_len;
+ map->m_len = *allocated;
ext4_ext_show_leaf(inode, path);
-out2:
- return err ? err : allocated;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
/*
@@ -4097,7 +4157,8 @@ again:
insert_hole:
/* Put just found gap into cache to speed up subsequent requests */
ext_debug(inode, " -> %u:%u\n", hole_start, len);
- ext4_es_insert_extent(inode, hole_start, len, ~0, EXTENT_STATUS_HOLE);
+ ext4_es_insert_extent(inode, hole_start, len, ~0,
+ EXTENT_STATUS_HOLE, 0);
/* Update hole_len to reflect hole size after lblk */
if (hole_start != lblk)
@@ -4131,7 +4192,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_extent newex, *ex, ex2;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_fsblk_t newblock = 0, pblk;
- int err = 0, depth, ret;
+ int err = 0, depth;
unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar;
@@ -4144,7 +4205,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
if (IS_ERR(path)) {
err = PTR_ERR(path);
- path = NULL;
goto out;
}
@@ -4193,8 +4253,10 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
*/
if ((!ext4_ext_is_unwritten(ex)) &&
(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
- err = convert_initialized_extent(handle,
- inode, map, &path, &allocated);
+ path = convert_initialized_extent(handle,
+ inode, map, path, &allocated);
+ if (IS_ERR(path))
+ err = PTR_ERR(path);
goto out;
} else if (!ext4_ext_is_unwritten(ex)) {
map->m_flags |= EXT4_MAP_MAPPED;
@@ -4206,13 +4268,11 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
goto out;
}
- ret = ext4_ext_handle_unwritten_extents(
- handle, inode, map, &path, flags,
- allocated, newblock);
- if (ret < 0)
- err = ret;
- else
- allocated = ret;
+ path = ext4_ext_handle_unwritten_extents(
+ handle, inode, map, path, flags,
+ &allocated, newblock);
+ if (IS_ERR(path))
+ err = PTR_ERR(path);
goto out;
}
}
@@ -4264,6 +4324,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
+ err = 0;
goto got_allocated_blocks;
}
@@ -4336,8 +4397,9 @@ got_allocated_blocks:
map->m_flags |= EXT4_MAP_UNWRITTEN;
}
- err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
- if (err) {
+ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
if (allocated_clusters) {
int fb_flags = 0;
@@ -4357,43 +4419,6 @@ got_allocated_blocks:
}
/*
- * Reduce the reserved cluster count to reflect successful deferred
- * allocation of delayed allocated clusters or direct allocation of
- * clusters discovered to be delayed allocated. Once allocated, a
- * cluster is not included in the reserved count.
- */
- if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) {
- if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
- /*
- * When allocating delayed allocated clusters, simply
- * reduce the reserved cluster count and claim quota
- */
- ext4_da_update_reserve_space(inode, allocated_clusters,
- 1);
- } else {
- ext4_lblk_t lblk, len;
- unsigned int n;
-
- /*
- * When allocating non-delayed allocated clusters
- * (from fallocate, filemap, DIO, or clusters
- * allocated when delalloc has been disabled by
- * ext4_nonda_switch), reduce the reserved cluster
- * count by the number of allocated clusters that
- * have previously been delayed allocated. Quota
- * has been claimed by ext4_mb_new_blocks() above,
- * so release the quota reservations made for any
- * previously delayed allocated clusters.
- */
- lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
- len = allocated_clusters << sbi->s_cluster_bits;
- n = ext4_es_delayed_clu(inode, lblk, len);
- if (n > 0)
- ext4_da_update_reserve_space(inode, (int) n, 0);
- }
- }
-
- /*
* Cache the extent and update transaction to commit on fdatasync only
* when it is _not_ an unwritten extent.
*/
@@ -5184,7 +5209,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
* won't be shifted beyond EXT_MAX_BLOCKS.
*/
if (SHIFT == SHIFT_LEFT) {
- path = ext4_find_extent(inode, start - 1, &path,
+ path = ext4_find_extent(inode, start - 1, path,
EXT4_EX_NOCACHE);
if (IS_ERR(path))
return PTR_ERR(path);
@@ -5233,7 +5258,7 @@ again:
* becomes NULL to indicate the end of the loop.
*/
while (iterator && start <= stop) {
- path = ext4_find_extent(inode, *iterator, &path,
+ path = ext4_find_extent(inode, *iterator, path,
EXT4_EX_NOCACHE);
if (IS_ERR(path))
return PTR_ERR(path);
@@ -5535,6 +5560,7 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
path = ext4_find_extent(inode, offset_lblk, NULL, 0);
if (IS_ERR(path)) {
up_write(&EXT4_I(inode)->i_data_sem);
+ ret = PTR_ERR(path);
goto out_stop;
}
@@ -5553,22 +5579,21 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
if (ext4_ext_is_unwritten(extent))
split_flag = EXT4_EXT_MARK_UNWRIT1 |
EXT4_EXT_MARK_UNWRIT2;
- ret = ext4_split_extent_at(handle, inode, &path,
+ path = ext4_split_extent_at(handle, inode, path,
offset_lblk, split_flag,
EXT4_EX_NOCACHE |
EXT4_GET_BLOCKS_PRE_IO |
EXT4_GET_BLOCKS_METADATA_NOFAIL);
}
- ext4_free_ext_path(path);
- if (ret < 0) {
+ if (IS_ERR(path)) {
up_write(&EXT4_I(inode)->i_data_sem);
+ ret = PTR_ERR(path);
goto out_stop;
}
- } else {
- ext4_free_ext_path(path);
}
+ ext4_free_ext_path(path);
ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
/*
@@ -5636,25 +5661,21 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
int e1_len, e2_len, len;
int split = 0;
- path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
+ path1 = ext4_find_extent(inode1, lblk1, path1, EXT4_EX_NOCACHE);
if (IS_ERR(path1)) {
*erp = PTR_ERR(path1);
- path1 = NULL;
- finish:
- count = 0;
- goto repeat;
+ goto errout;
}
- path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
+ path2 = ext4_find_extent(inode2, lblk2, path2, EXT4_EX_NOCACHE);
if (IS_ERR(path2)) {
*erp = PTR_ERR(path2);
- path2 = NULL;
- goto finish;
+ goto errout;
}
ex1 = path1[path1->p_depth].p_ext;
ex2 = path2[path2->p_depth].p_ext;
/* Do we have something to swap ? */
if (unlikely(!ex2 || !ex1))
- goto finish;
+ goto errout;
e1_blk = le32_to_cpu(ex1->ee_block);
e2_blk = le32_to_cpu(ex2->ee_block);
@@ -5676,7 +5697,7 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
next2 = e2_blk;
/* Do we have something to swap */
if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
- goto finish;
+ goto errout;
/* Move to the rightest boundary */
len = next1 - lblk1;
if (len < next2 - lblk2)
@@ -5686,28 +5707,32 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
lblk1 += len;
lblk2 += len;
count -= len;
- goto repeat;
+ continue;
}
/* Prepare left boundary */
if (e1_blk < lblk1) {
split = 1;
- *erp = ext4_force_split_extent_at(handle, inode1,
- &path1, lblk1, 0);
- if (unlikely(*erp))
- goto finish;
+ path1 = ext4_force_split_extent_at(handle, inode1,
+ path1, lblk1, 0);
+ if (IS_ERR(path1)) {
+ *erp = PTR_ERR(path1);
+ goto errout;
+ }
}
if (e2_blk < lblk2) {
split = 1;
- *erp = ext4_force_split_extent_at(handle, inode2,
- &path2, lblk2, 0);
- if (unlikely(*erp))
- goto finish;
+ path2 = ext4_force_split_extent_at(handle, inode2,
+ path2, lblk2, 0);
+ if (IS_ERR(path2)) {
+ *erp = PTR_ERR(path2);
+ goto errout;
+ }
}
/* ext4_split_extent_at() may result in leaf extent split,
* path must to be revalidated. */
if (split)
- goto repeat;
+ continue;
/* Prepare right boundary */
len = count;
@@ -5718,30 +5743,34 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
if (len != e1_len) {
split = 1;
- *erp = ext4_force_split_extent_at(handle, inode1,
- &path1, lblk1 + len, 0);
- if (unlikely(*erp))
- goto finish;
+ path1 = ext4_force_split_extent_at(handle, inode1,
+ path1, lblk1 + len, 0);
+ if (IS_ERR(path1)) {
+ *erp = PTR_ERR(path1);
+ goto errout;
+ }
}
if (len != e2_len) {
split = 1;
- *erp = ext4_force_split_extent_at(handle, inode2,
- &path2, lblk2 + len, 0);
- if (*erp)
- goto finish;
+ path2 = ext4_force_split_extent_at(handle, inode2,
+ path2, lblk2 + len, 0);
+ if (IS_ERR(path2)) {
+ *erp = PTR_ERR(path2);
+ goto errout;
+ }
}
/* ext4_split_extent_at() may result in leaf extent split,
* path must to be revalidated. */
if (split)
- goto repeat;
+ continue;
BUG_ON(e2_len != e1_len);
*erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
if (unlikely(*erp))
- goto finish;
+ goto errout;
*erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
if (unlikely(*erp))
- goto finish;
+ goto errout;
/* Both extents are fully inside boundaries. Swap it now */
tmp_ex = *ex1;
@@ -5759,7 +5788,7 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
*erp = ext4_ext_dirty(handle, inode2, path2 +
path2->p_depth);
if (unlikely(*erp))
- goto finish;
+ goto errout;
*erp = ext4_ext_dirty(handle, inode1, path1 +
path1->p_depth);
/*
@@ -5769,17 +5798,17 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
* aborted anyway.
*/
if (unlikely(*erp))
- goto finish;
+ goto errout;
+
lblk1 += len;
lblk2 += len;
replaced_count += len;
count -= len;
-
- repeat:
- ext4_free_ext_path(path1);
- ext4_free_ext_path(path2);
- path1 = path2 = NULL;
}
+
+errout:
+ ext4_free_ext_path(path1);
+ ext4_free_ext_path(path2);
return replaced_count;
}
@@ -5814,11 +5843,8 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
/* search for the extent closest to the first block in the cluster */
path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- path = NULL;
- goto out;
- }
+ if (IS_ERR(path))
+ return PTR_ERR(path);
depth = ext_depth(inode);
@@ -5880,7 +5906,7 @@ out:
int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
int len, int unwritten, ext4_fsblk_t pblk)
{
- struct ext4_ext_path *path = NULL, *ppath;
+ struct ext4_ext_path *path;
struct ext4_extent *ex;
int ret;
@@ -5896,30 +5922,34 @@ int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
if (le32_to_cpu(ex->ee_block) != start ||
ext4_ext_get_actual_len(ex) != len) {
/* We need to split this extent to match our extent first */
- ppath = path;
down_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1);
+ path = ext4_force_split_extent_at(NULL, inode, path, start, 1);
up_write(&EXT4_I(inode)->i_data_sem);
- if (ret)
+ if (IS_ERR(path)) {
+ ret = PTR_ERR(path);
goto out;
- kfree(path);
- path = ext4_find_extent(inode, start, NULL, 0);
+ }
+
+ path = ext4_find_extent(inode, start, path, 0);
if (IS_ERR(path))
- return -1;
- ppath = path;
+ return PTR_ERR(path);
+
ex = path[path->p_depth].p_ext;
WARN_ON(le32_to_cpu(ex->ee_block) != start);
+
if (ext4_ext_get_actual_len(ex) != len) {
down_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_force_split_extent_at(NULL, inode, &ppath,
- start + len, 1);
+ path = ext4_force_split_extent_at(NULL, inode, path,
+ start + len, 1);
up_write(&EXT4_I(inode)->i_data_sem);
- if (ret)
+ if (IS_ERR(path)) {
+ ret = PTR_ERR(path);
goto out;
- kfree(path);
- path = ext4_find_extent(inode, start, NULL, 0);
+ }
+
+ path = ext4_find_extent(inode, start, path, 0);
if (IS_ERR(path))
- return -EINVAL;
+ return PTR_ERR(path);
ex = path[path->p_depth].p_ext;
}
}
@@ -6001,12 +6031,9 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
if (IS_ERR(path))
return PTR_ERR(path);
ex = path[path->p_depth].p_ext;
- if (!ex) {
- ext4_free_ext_path(path);
+ if (!ex)
goto out;
- }
end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
- ext4_free_ext_path(path);
/* Count the number of data blocks */
cur = 0;
@@ -6032,32 +6059,28 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
ret = skip_hole(inode, &cur);
if (ret < 0)
goto out;
- path = ext4_find_extent(inode, cur, NULL, 0);
+ path = ext4_find_extent(inode, cur, path, 0);
if (IS_ERR(path))
goto out;
numblks += path->p_depth;
- ext4_free_ext_path(path);
while (cur < end) {
- path = ext4_find_extent(inode, cur, NULL, 0);
+ path = ext4_find_extent(inode, cur, path, 0);
if (IS_ERR(path))
break;
ex = path[path->p_depth].p_ext;
- if (!ex) {
- ext4_free_ext_path(path);
- return 0;
- }
+ if (!ex)
+ goto cleanup;
+
cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
ext4_ext_get_actual_len(ex));
ret = skip_hole(inode, &cur);
- if (ret < 0) {
- ext4_free_ext_path(path);
+ if (ret < 0)
break;
- }
- path2 = ext4_find_extent(inode, cur, NULL, 0);
- if (IS_ERR(path2)) {
- ext4_free_ext_path(path);
+
+ path2 = ext4_find_extent(inode, cur, path2, 0);
+ if (IS_ERR(path2))
break;
- }
+
for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
cmp1 = cmp2 = 0;
if (i <= path->p_depth)
@@ -6069,13 +6092,14 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
if (cmp1 != cmp2 && cmp2 != 0)
numblks++;
}
- ext4_free_ext_path(path);
- ext4_free_ext_path(path2);
}
out:
inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
ext4_mark_inode_dirty(NULL, inode);
+cleanup:
+ ext4_free_ext_path(path);
+ ext4_free_ext_path(path2);
return 0;
}
@@ -6096,12 +6120,9 @@ int ext4_ext_clear_bb(struct inode *inode)
if (IS_ERR(path))
return PTR_ERR(path);
ex = path[path->p_depth].p_ext;
- if (!ex) {
- ext4_free_ext_path(path);
- return 0;
- }
+ if (!ex)
+ goto out;
end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
- ext4_free_ext_path(path);
cur = 0;
while (cur < end) {
@@ -6111,16 +6132,16 @@ int ext4_ext_clear_bb(struct inode *inode)
if (ret < 0)
break;
if (ret > 0) {
- path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
- if (!IS_ERR_OR_NULL(path)) {
+ path = ext4_find_extent(inode, map.m_lblk, path, 0);
+ if (!IS_ERR(path)) {
for (j = 0; j < path->p_depth; j++) {
-
ext4_mb_mark_bb(inode->i_sb,
path[j].p_block, 1, false);
ext4_fc_record_regions(inode->i_sb, inode->i_ino,
0, path[j].p_block, 1, 1);
}
- ext4_free_ext_path(path);
+ } else {
+ path = NULL;
}
ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, false);
ext4_fc_record_regions(inode->i_sb, inode->i_ino,
@@ -6129,5 +6150,7 @@ int ext4_ext_clear_bb(struct inode *inode)
cur = cur + map.m_len;
}
+out:
+ ext4_free_ext_path(path);
return 0;
}
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 17dcf13adde2..c786691dabd3 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -558,8 +558,8 @@ static int ext4_es_can_be_merged(struct extent_status *es1,
if (ext4_es_is_hole(es1))
return 1;
- /* we need to check delayed extent is without unwritten status */
- if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
+ /* we need to check delayed extent */
+ if (ext4_es_is_delayed(es1))
return 1;
return 0;
@@ -848,11 +848,12 @@ out:
*/
void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
- unsigned int status)
+ unsigned int status, int flags)
{
struct extent_status newes;
ext4_lblk_t end = lblk + len - 1;
int err1 = 0, err2 = 0, err3 = 0;
+ int resv_used = 0, pending = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct extent_status *es1 = NULL;
struct extent_status *es2 = NULL;
@@ -862,21 +863,14 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
return;
- es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
- lblk, len, pblk, status, inode->i_ino);
+ es_debug("add [%u/%u) %llu %x %x to extent status tree of inode %lu\n",
+ lblk, len, pblk, status, flags, inode->i_ino);
if (!len)
return;
BUG_ON(end < lblk);
-
- if ((status & EXTENT_STATUS_DELAYED) &&
- (status & EXTENT_STATUS_WRITTEN)) {
- ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
- " delayed and written which can potentially "
- " cause data loss.", lblk, len);
- WARN_ON(1);
- }
+ WARN_ON_ONCE(status & EXTENT_STATUS_DELAYED);
newes.es_lblk = lblk;
newes.es_len = len;
@@ -894,11 +888,11 @@ retry:
es1 = __es_alloc_extent(true);
if ((err1 || err2) && !es2)
es2 = __es_alloc_extent(true);
- if ((err1 || err2 || err3) && revise_pending && !pr)
+ if ((err1 || err2 || err3 < 0) && revise_pending && !pr)
pr = __alloc_pending(true);
write_lock(&EXT4_I(inode)->i_es_lock);
- err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
+ err1 = __es_remove_extent(inode, lblk, end, &resv_used, es1);
if (err1 != 0)
goto error;
/* Free preallocated extent if it didn't get used. */
@@ -922,16 +916,38 @@ retry:
if (revise_pending) {
err3 = __revise_pending(inode, lblk, len, &pr);
- if (err3 != 0)
+ if (err3 < 0)
goto error;
if (pr) {
__free_pending(pr);
pr = NULL;
}
+ pending = err3;
}
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
- if (err1 || err2 || err3)
+ /*
+ * Reduce the reserved cluster count to reflect successful deferred
+ * allocation of delayed allocated clusters or direct allocation of
+ * clusters discovered to be delayed allocated. Once allocated, a
+ * cluster is not included in the reserved count.
+ *
+ * When direct allocating (from fallocate, filemap, DIO, or clusters
+ * allocated when delalloc has been disabled by ext4_nonda_switch())
+ * an extent either 1) contains delayed blocks but start with
+ * non-delayed allocated blocks (e.g. hole) or 2) contains non-delayed
+ * allocated blocks which belong to delayed allocated clusters when
+ * bigalloc feature is enabled, quota has already been claimed by
+ * ext4_mb_new_blocks(), so release the quota reservations made for
+ * any previously delayed allocated clusters instead of claim them
+ * again.
+ */
+ resv_used += pending;
+ if (resv_used)
+ ext4_da_update_reserve_space(inode, resv_used,
+ flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE);
+
+ if (err1 || err2 || err3 < 0)
goto retry;
ext4_es_print_tree(inode);
@@ -1051,7 +1067,7 @@ out:
}
struct rsvd_count {
- int ndelonly;
+ int ndelayed;
bool first_do_lblk_found;
ext4_lblk_t first_do_lblk;
ext4_lblk_t last_do_lblk;
@@ -1077,10 +1093,10 @@ static void init_rsvd(struct inode *inode, ext4_lblk_t lblk,
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct rb_node *node;
- rc->ndelonly = 0;
+ rc->ndelayed = 0;
/*
- * for bigalloc, note the first delonly block in the range has not
+ * for bigalloc, note the first delayed block in the range has not
* been found, record the extent containing the block to the left of
* the region to be removed, if any, and note that there's no partial
* cluster to track
@@ -1100,9 +1116,8 @@ static void init_rsvd(struct inode *inode, ext4_lblk_t lblk,
}
/*
- * count_rsvd - count the clusters containing delayed and not unwritten
- * (delonly) blocks in a range within an extent and add to
- * the running tally in rsvd_count
+ * count_rsvd - count the clusters containing delayed blocks in a range
+ * within an extent and add to the running tally in rsvd_count
*
* @inode - file containing extent
* @lblk - first block in range
@@ -1119,13 +1134,13 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t i, end, nclu;
- if (!ext4_es_is_delonly(es))
+ if (!ext4_es_is_delayed(es))
return;
WARN_ON(len <= 0);
if (sbi->s_cluster_ratio == 1) {
- rc->ndelonly += (int) len;
+ rc->ndelayed += (int) len;
return;
}
@@ -1135,7 +1150,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
end = lblk + (ext4_lblk_t) len - 1;
end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end;
- /* record the first block of the first delonly extent seen */
+ /* record the first block of the first delayed extent seen */
if (!rc->first_do_lblk_found) {
rc->first_do_lblk = i;
rc->first_do_lblk_found = true;
@@ -1149,7 +1164,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
* doesn't start with it, count it and stop tracking
*/
if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) {
- rc->ndelonly++;
+ rc->ndelayed++;
rc->partial = false;
}
@@ -1159,7 +1174,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
*/
if (EXT4_LBLK_COFF(sbi, i) != 0) {
if (end >= EXT4_LBLK_CFILL(sbi, i)) {
- rc->ndelonly++;
+ rc->ndelayed++;
rc->partial = false;
i = EXT4_LBLK_CFILL(sbi, i) + 1;
}
@@ -1167,11 +1182,11 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
/*
* if the current cluster starts on a cluster boundary, count the
- * number of whole delonly clusters in the extent
+ * number of whole delayed clusters in the extent
*/
if ((i + sbi->s_cluster_ratio - 1) <= end) {
nclu = (end - i + 1) >> sbi->s_cluster_bits;
- rc->ndelonly += nclu;
+ rc->ndelayed += nclu;
i += nclu << sbi->s_cluster_bits;
}
@@ -1231,10 +1246,9 @@ static struct pending_reservation *__pr_tree_search(struct rb_root *root,
* @rc - pointer to reserved count data
*
* The number of reservations to be released is equal to the number of
- * clusters containing delayed and not unwritten (delonly) blocks within
- * the range, minus the number of clusters still containing delonly blocks
- * at the ends of the range, and minus the number of pending reservations
- * within the range.
+ * clusters containing delayed blocks within the range, minus the number of
+ * clusters still containing delayed blocks at the ends of the range, and
+ * minus the number of pending reservations within the range.
*/
static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
struct extent_status *right_es,
@@ -1245,33 +1259,33 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
struct rb_node *node;
ext4_lblk_t first_lclu, last_lclu;
- bool left_delonly, right_delonly, count_pending;
+ bool left_delayed, right_delayed, count_pending;
struct extent_status *es;
if (sbi->s_cluster_ratio > 1) {
/* count any remaining partial cluster */
if (rc->partial)
- rc->ndelonly++;
+ rc->ndelayed++;
- if (rc->ndelonly == 0)
+ if (rc->ndelayed == 0)
return 0;
first_lclu = EXT4_B2C(sbi, rc->first_do_lblk);
last_lclu = EXT4_B2C(sbi, rc->last_do_lblk);
/*
- * decrease the delonly count by the number of clusters at the
- * ends of the range that still contain delonly blocks -
+ * decrease the delayed count by the number of clusters at the
+ * ends of the range that still contain delayed blocks -
* these clusters still need to be reserved
*/
- left_delonly = right_delonly = false;
+ left_delayed = right_delayed = false;
es = rc->left_es;
while (es && ext4_es_end(es) >=
EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) {
- if (ext4_es_is_delonly(es)) {
- rc->ndelonly--;
- left_delonly = true;
+ if (ext4_es_is_delayed(es)) {
+ rc->ndelayed--;
+ left_delayed = true;
break;
}
node = rb_prev(&es->rb_node);
@@ -1279,7 +1293,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
break;
es = rb_entry(node, struct extent_status, rb_node);
}
- if (right_es && (!left_delonly || first_lclu != last_lclu)) {
+ if (right_es && (!left_delayed || first_lclu != last_lclu)) {
if (end < ext4_es_end(right_es)) {
es = right_es;
} else {
@@ -1289,9 +1303,9 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
}
while (es && es->es_lblk <=
EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) {
- if (ext4_es_is_delonly(es)) {
- rc->ndelonly--;
- right_delonly = true;
+ if (ext4_es_is_delayed(es)) {
+ rc->ndelayed--;
+ right_delayed = true;
break;
}
node = rb_next(&es->rb_node);
@@ -1305,21 +1319,21 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
/*
* Determine the block range that should be searched for
* pending reservations, if any. Clusters on the ends of the
- * original removed range containing delonly blocks are
+ * original removed range containing delayed blocks are
* excluded. They've already been accounted for and it's not
* possible to determine if an associated pending reservation
* should be released with the information available in the
* extents status tree.
*/
if (first_lclu == last_lclu) {
- if (left_delonly | right_delonly)
+ if (left_delayed | right_delayed)
count_pending = false;
else
count_pending = true;
} else {
- if (left_delonly)
+ if (left_delayed)
first_lclu++;
- if (right_delonly)
+ if (right_delayed)
last_lclu--;
if (first_lclu <= last_lclu)
count_pending = true;
@@ -1330,13 +1344,13 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
/*
* a pending reservation found between first_lclu and last_lclu
* represents an allocated cluster that contained at least one
- * delonly block, so the delonly total must be reduced by one
+ * delayed block, so the delayed total must be reduced by one
* for each pending reservation found and released
*/
if (count_pending) {
pr = __pr_tree_search(&tree->root, first_lclu);
while (pr && pr->lclu <= last_lclu) {
- rc->ndelonly--;
+ rc->ndelayed--;
node = rb_next(&pr->rb_node);
rb_erase(&pr->rb_node, &tree->root);
__free_pending(pr);
@@ -1347,7 +1361,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
}
}
}
- return rc->ndelonly;
+ return rc->ndelayed;
}
@@ -1940,7 +1954,7 @@ static struct pending_reservation *__get_pending(struct inode *inode,
* @lblk - logical block in the cluster to be added
* @prealloc - preallocated pending entry
*
- * Returns 0 on successful insertion and -ENOMEM on failure. If the
+ * Returns 1 on successful insertion and -ENOMEM on failure. If the
* pending reservation is already in the set, returns successfully.
*/
static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
@@ -1984,6 +1998,7 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
rb_link_node(&pr->rb_node, parent, p);
rb_insert_color(&pr->rb_node, &tree->root);
+ ret = 1;
out:
return ret;
@@ -2105,7 +2120,7 @@ retry:
es1 = __es_alloc_extent(true);
if ((err1 || err2) && !es2)
es2 = __es_alloc_extent(true);
- if (err1 || err2 || err3) {
+ if (err1 || err2 || err3 < 0) {
if (lclu_allocated && !pr1)
pr1 = __alloc_pending(true);
if (end_allocated && !pr2)
@@ -2135,7 +2150,7 @@ retry:
if (lclu_allocated) {
err3 = __insert_pending(inode, lblk, &pr1);
- if (err3 != 0)
+ if (err3 < 0)
goto error;
if (pr1) {
__free_pending(pr1);
@@ -2144,7 +2159,7 @@ retry:
}
if (end_allocated) {
err3 = __insert_pending(inode, end, &pr2);
- if (err3 != 0)
+ if (err3 < 0)
goto error;
if (pr2) {
__free_pending(pr2);
@@ -2153,7 +2168,7 @@ retry:
}
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
- if (err1 || err2 || err3)
+ if (err1 || err2 || err3 < 0)
goto retry;
ext4_es_print_tree(inode);
@@ -2162,94 +2177,6 @@ error:
}
/*
- * __es_delayed_clu - count number of clusters containing blocks that
- * are delayed only
- *
- * @inode - file containing block range
- * @start - logical block defining start of range
- * @end - logical block defining end of range
- *
- * Returns the number of clusters containing only delayed (not delayed
- * and unwritten) blocks in the range specified by @start and @end. Any
- * cluster or part of a cluster within the range and containing a delayed
- * and not unwritten block within the range is counted as a whole cluster.
- */
-static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start,
- ext4_lblk_t end)
-{
- struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
- struct extent_status *es;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- struct rb_node *node;
- ext4_lblk_t first_lclu, last_lclu;
- unsigned long long last_counted_lclu;
- unsigned int n = 0;
-
- /* guaranteed to be unequal to any ext4_lblk_t value */
- last_counted_lclu = ~0ULL;
-
- es = __es_tree_search(&tree->root, start);
-
- while (es && (es->es_lblk <= end)) {
- if (ext4_es_is_delonly(es)) {
- if (es->es_lblk <= start)
- first_lclu = EXT4_B2C(sbi, start);
- else
- first_lclu = EXT4_B2C(sbi, es->es_lblk);
-
- if (ext4_es_end(es) >= end)
- last_lclu = EXT4_B2C(sbi, end);
- else
- last_lclu = EXT4_B2C(sbi, ext4_es_end(es));
-
- if (first_lclu == last_counted_lclu)
- n += last_lclu - first_lclu;
- else
- n += last_lclu - first_lclu + 1;
- last_counted_lclu = last_lclu;
- }
- node = rb_next(&es->rb_node);
- if (!node)
- break;
- es = rb_entry(node, struct extent_status, rb_node);
- }
-
- return n;
-}
-
-/*
- * ext4_es_delayed_clu - count number of clusters containing blocks that
- * are both delayed and unwritten
- *
- * @inode - file containing block range
- * @lblk - logical block defining start of range
- * @len - number of blocks in range
- *
- * Locking for external use of __es_delayed_clu().
- */
-unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
- ext4_lblk_t end;
- unsigned int n;
-
- if (len == 0)
- return 0;
-
- end = lblk + len - 1;
- WARN_ON(end < lblk);
-
- read_lock(&ei->i_es_lock);
-
- n = __es_delayed_clu(inode, lblk, end);
-
- read_unlock(&ei->i_es_lock);
-
- return n;
-}
-
-/*
* __revise_pending - makes, cancels, or leaves unchanged pending cluster
* reservations for a specified block range depending
* upon the presence or absence of delayed blocks
@@ -2263,7 +2190,9 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
*
* Used after a newly allocated extent is added to the extents status tree.
* Requires that the extents in the range have either written or unwritten
- * status. Must be called while holding i_es_lock.
+ * status. Must be called while holding i_es_lock. Returns number of new
+ * inserts pending cluster on insert pendings, returns 0 on remove pendings,
+ * return -ENOMEM on failure.
*/
static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len,
@@ -2273,6 +2202,7 @@ static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t end = lblk + len - 1;
ext4_lblk_t first, last;
bool f_del = false, l_del = false;
+ int pendings = 0;
int ret = 0;
if (len == 0)
@@ -2294,49 +2224,53 @@ static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) {
first = EXT4_LBLK_CMASK(sbi, lblk);
if (first != lblk)
- f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ f_del = __es_scan_range(inode, &ext4_es_is_delayed,
first, lblk - 1);
if (f_del) {
ret = __insert_pending(inode, first, prealloc);
if (ret < 0)
goto out;
+ pendings += ret;
} else {
last = EXT4_LBLK_CMASK(sbi, end) +
sbi->s_cluster_ratio - 1;
if (last != end)
l_del = __es_scan_range(inode,
- &ext4_es_is_delonly,
+ &ext4_es_is_delayed,
end + 1, last);
if (l_del) {
ret = __insert_pending(inode, last, prealloc);
if (ret < 0)
goto out;
+ pendings += ret;
} else
__remove_pending(inode, last);
}
} else {
first = EXT4_LBLK_CMASK(sbi, lblk);
if (first != lblk)
- f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ f_del = __es_scan_range(inode, &ext4_es_is_delayed,
first, lblk - 1);
if (f_del) {
ret = __insert_pending(inode, first, prealloc);
if (ret < 0)
goto out;
+ pendings += ret;
} else
__remove_pending(inode, first);
last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
if (last != end)
- l_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ l_del = __es_scan_range(inode, &ext4_es_is_delayed,
end + 1, last);
if (l_del) {
ret = __insert_pending(inode, last, prealloc);
if (ret < 0)
goto out;
+ pendings += ret;
} else
__remove_pending(inode, last);
}
out:
- return ret;
+ return (ret < 0) ? ret : pendings;
}
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index 3c8e2edee5d5..4424232de298 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -42,6 +42,10 @@ enum {
#define ES_SHIFT (sizeof(ext4_fsblk_t)*8 - ES_FLAGS)
#define ES_MASK (~((ext4_fsblk_t)0) << ES_SHIFT)
+/*
+ * Besides EXTENT_STATUS_REFERENCED, all these extent type masks
+ * are exclusive, only one type can be set at a time.
+ */
#define EXTENT_STATUS_WRITTEN (1 << ES_WRITTEN_B)
#define EXTENT_STATUS_UNWRITTEN (1 << ES_UNWRITTEN_B)
#define EXTENT_STATUS_DELAYED (1 << ES_DELAYED_B)
@@ -51,7 +55,9 @@ enum {
#define ES_TYPE_MASK ((ext4_fsblk_t)(EXTENT_STATUS_WRITTEN | \
EXTENT_STATUS_UNWRITTEN | \
EXTENT_STATUS_DELAYED | \
- EXTENT_STATUS_HOLE) << ES_SHIFT)
+ EXTENT_STATUS_HOLE))
+
+#define ES_TYPE_VALID(type) ((type) && !((type) & ((type) - 1)))
struct ext4_sb_info;
struct ext4_extent;
@@ -129,7 +135,7 @@ extern void ext4_es_init_tree(struct ext4_es_tree *tree);
extern void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
- unsigned int status);
+ unsigned int status, int flags);
extern void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
unsigned int status);
@@ -156,7 +162,7 @@ static inline unsigned int ext4_es_status(struct extent_status *es)
static inline unsigned int ext4_es_type(struct extent_status *es)
{
- return (es->es_pblk & ES_TYPE_MASK) >> ES_SHIFT;
+ return (es->es_pblk >> ES_SHIFT) & ES_TYPE_MASK;
}
static inline int ext4_es_is_written(struct extent_status *es)
@@ -184,11 +190,6 @@ static inline int ext4_es_is_mapped(struct extent_status *es)
return (ext4_es_is_written(es) || ext4_es_is_unwritten(es));
}
-static inline int ext4_es_is_delonly(struct extent_status *es)
-{
- return (ext4_es_is_delayed(es) && !ext4_es_is_unwritten(es));
-}
-
static inline void ext4_es_set_referenced(struct extent_status *es)
{
es->es_pblk |= ((ext4_fsblk_t)EXTENT_STATUS_REFERENCED) << ES_SHIFT;
@@ -224,17 +225,12 @@ static inline void ext4_es_store_pblock(struct extent_status *es,
es->es_pblk = block;
}
-static inline void ext4_es_store_status(struct extent_status *es,
- unsigned int status)
-{
- es->es_pblk = (((ext4_fsblk_t)status << ES_SHIFT) & ES_MASK) |
- (es->es_pblk & ~ES_MASK);
-}
-
static inline void ext4_es_store_pblock_status(struct extent_status *es,
ext4_fsblk_t pb,
unsigned int status)
{
+ WARN_ON_ONCE(!ES_TYPE_VALID(status & ES_TYPE_MASK));
+
es->es_pblk = (((ext4_fsblk_t)status << ES_SHIFT) & ES_MASK) |
(pb & ~ES_MASK);
}
@@ -252,8 +248,6 @@ extern bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk);
extern void ext4_es_insert_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, bool lclu_allocated,
bool end_allocated);
-extern unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len);
extern void ext4_clear_inode_es(struct inode *inode);
#endif /* _EXT4_EXTENTS_STATUS_H */
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index 3926a05eceee..eaa5f5b51f50 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -339,22 +339,29 @@ void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handl
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
tid_t tid;
+ bool has_transaction = true;
+ bool is_ineligible;
if (ext4_fc_disabled(sb))
return;
- ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
if (handle && !IS_ERR(handle))
tid = handle->h_transaction->t_tid;
else {
read_lock(&sbi->s_journal->j_state_lock);
- tid = sbi->s_journal->j_running_transaction ?
- sbi->s_journal->j_running_transaction->t_tid : 0;
+ if (sbi->s_journal->j_running_transaction)
+ tid = sbi->s_journal->j_running_transaction->t_tid;
+ else
+ has_transaction = false;
read_unlock(&sbi->s_journal->j_state_lock);
}
spin_lock(&sbi->s_fc_lock);
- if (tid_gt(tid, sbi->s_fc_ineligible_tid))
+ is_ineligible = ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ if (has_transaction &&
+ (!is_ineligible ||
+ (is_ineligible && tid_gt(tid, sbi->s_fc_ineligible_tid))))
sbi->s_fc_ineligible_tid = tid;
+ ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
spin_unlock(&sbi->s_fc_lock);
WARN_ON(reason >= EXT4_FC_REASON_MAX);
sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
@@ -1288,8 +1295,21 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
list_del_init(&iter->i_fc_list);
ext4_clear_inode_state(&iter->vfs_inode,
EXT4_STATE_FC_COMMITTING);
- if (tid_geq(tid, iter->i_sync_tid))
+ if (tid_geq(tid, iter->i_sync_tid)) {
ext4_fc_reset_inode(&iter->vfs_inode);
+ } else if (full) {
+ /*
+ * We are called after a full commit, inode has been
+ * modified while the commit was running. Re-enqueue
+ * the inode into STAGING, which will then be splice
+ * back into MAIN. This cannot happen during
+ * fastcommit because the journal is locked all the
+ * time in that case (and tid doesn't increase so
+ * tid check above isn't reliable).
+ */
+ list_add_tail(&EXT4_I(&iter->vfs_inode)->i_fc_list,
+ &sbi->s_fc_q[FC_Q_STAGING]);
+ }
/* Make sure EXT4_STATE_FC_COMMITTING bit is clear */
smp_mb();
#if (BITS_PER_LONG < 64)
@@ -1772,7 +1792,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
if (ret == 0) {
/* Range is not mapped */
- path = ext4_find_extent(inode, cur, NULL, 0);
+ path = ext4_find_extent(inode, cur, path, 0);
if (IS_ERR(path))
goto out;
memset(&newex, 0, sizeof(newex));
@@ -1783,11 +1803,10 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
if (ext4_ext_is_unwritten(ex))
ext4_ext_mark_unwritten(&newex);
down_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_ext_insert_extent(
- NULL, inode, &path, &newex, 0);
+ path = ext4_ext_insert_extent(NULL, inode,
+ path, &newex, 0);
up_write((&EXT4_I(inode)->i_data_sem));
- ext4_free_ext_path(path);
- if (ret)
+ if (IS_ERR(path))
goto out;
goto next;
}
@@ -1836,6 +1855,7 @@ next:
ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >>
sb->s_blocksize_bits);
out:
+ ext4_free_ext_path(path);
iput(inode);
return 0;
}
@@ -1936,12 +1956,13 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
break;
if (ret > 0) {
- path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
+ path = ext4_find_extent(inode, map.m_lblk, path, 0);
if (!IS_ERR(path)) {
for (j = 0; j < path->p_depth; j++)
ext4_mb_mark_bb(inode->i_sb,
path[j].p_block, 1, true);
- ext4_free_ext_path(path);
+ } else {
+ path = NULL;
}
cur += ret;
ext4_mb_mark_bb(inode->i_sb, map.m_pblk,
@@ -1952,6 +1973,8 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
}
iput(inode);
}
+
+ ext4_free_ext_path(path);
}
/*
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index c89e434db6b7..f14aed14b9cf 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -306,7 +306,7 @@ out:
}
static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
- ssize_t count)
+ ssize_t written, ssize_t count)
{
handle_t *handle;
@@ -315,7 +315,7 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
if (IS_ERR(handle))
return PTR_ERR(handle);
- if (ext4_update_inode_size(inode, offset + count)) {
+ if (ext4_update_inode_size(inode, offset + written)) {
int ret = ext4_mark_inode_dirty(handle, inode);
if (unlikely(ret)) {
ext4_journal_stop(handle);
@@ -323,21 +323,21 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
}
}
- if (inode->i_nlink)
+ if ((written == count) && inode->i_nlink)
ext4_orphan_del(handle, inode);
ext4_journal_stop(handle);
- return count;
+ return written;
}
/*
* Clean up the inode after DIO or DAX extending write has completed and the
* inode size has been updated using ext4_handle_inode_extension().
*/
-static void ext4_inode_extension_cleanup(struct inode *inode, ssize_t count)
+static void ext4_inode_extension_cleanup(struct inode *inode, bool need_trunc)
{
lockdep_assert_held_write(&inode->i_rwsem);
- if (count < 0) {
+ if (need_trunc) {
ext4_truncate_failed_write(inode);
/*
* If the truncate operation failed early, then the inode may
@@ -393,7 +393,7 @@ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize) &&
pos + size <= i_size_read(inode))
return size;
- return ext4_handle_inode_extension(inode, pos, size);
+ return ext4_handle_inode_extension(inode, pos, size, size);
}
static const struct iomap_dio_ops ext4_dio_write_ops = {
@@ -586,7 +586,7 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
* writeback of delalloc blocks.
*/
WARN_ON_ONCE(ret == -EIOCBQUEUED);
- ext4_inode_extension_cleanup(inode, ret);
+ ext4_inode_extension_cleanup(inode, ret < 0);
}
out:
@@ -669,8 +669,8 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
if (extend) {
- ret = ext4_handle_inode_extension(inode, offset, ret);
- ext4_inode_extension_cleanup(inode, ret);
+ ret = ext4_handle_inode_extension(inode, offset, ret, count);
+ ext4_inode_extension_cleanup(inode, ret < (ssize_t)count);
}
out:
inode_unlock(inode);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 9dfd768ed9f8..7f1a5f90dbbd 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -87,10 +87,10 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
return 0;
- grp = ext4_get_group_info(sb, block_group);
-
if (buffer_verified(bh))
return 0;
+
+ grp = ext4_get_group_info(sb, block_group);
if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
return -EFSCORRUPTED;
@@ -98,8 +98,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
if (buffer_verified(bh))
goto verified;
blk = ext4_inode_bitmap(sb, desc);
- if (!ext4_inode_bitmap_csum_verify(sb, desc, bh,
- EXT4_INODES_PER_GROUP(sb) / 8) ||
+ if (!ext4_inode_bitmap_csum_verify(sb, desc, bh) ||
ext4_simulate_fail(sb, EXT4_SIM_IBITMAP_CRC)) {
ext4_unlock_group(sb, block_group);
ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
@@ -327,8 +326,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
if (percpu_counter_initialized(&sbi->s_dirs_counter))
percpu_counter_dec(&sbi->s_dirs_counter);
}
- ext4_inode_bitmap_csum_set(sb, gdp, bitmap_bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_inode_bitmap_csum_set(sb, gdp, bitmap_bh);
ext4_group_desc_csum_set(sb, block_group, gdp);
ext4_unlock_group(sb, block_group);
@@ -514,6 +512,8 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
if (min_inodes < 1)
min_inodes = 1;
min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
+ if (min_clusters < 0)
+ min_clusters = 0;
/*
* Start looking in the flex group where we last allocated an
@@ -755,10 +755,10 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
struct ext4_group_desc *gdp;
ext4_group_t group;
int bit;
- int err = -EFSCORRUPTED;
+ int err;
if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
- goto out;
+ return -EFSCORRUPTED;
group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
@@ -772,7 +772,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
}
gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
- if (!gdp || !group_desc_bh) {
+ if (!gdp) {
err = -EINVAL;
goto out;
}
@@ -851,8 +851,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
if (ext4_has_group_desc_csum(sb)) {
- ext4_inode_bitmap_csum_set(sb, gdp, inode_bitmap_bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_inode_bitmap_csum_set(sb, gdp, inode_bitmap_bh);
ext4_group_desc_csum_set(sb, group, gdp);
}
@@ -860,6 +859,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
err = ext4_handle_dirty_metadata(NULL, NULL, group_desc_bh);
sync_dirty_buffer(group_desc_bh);
out:
+ brelse(inode_bitmap_bh);
return err;
}
@@ -1053,14 +1053,14 @@ got_group:
brelse(inode_bitmap_bh);
inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
/* Skip groups with suspicious inode tables */
- if (((!(sbi->s_mount_state & EXT4_FC_REPLAY))
- && EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) ||
- IS_ERR(inode_bitmap_bh)) {
+ if (IS_ERR(inode_bitmap_bh)) {
inode_bitmap_bh = NULL;
goto next_group;
}
+ if (!(sbi->s_mount_state & EXT4_FC_REPLAY) &&
+ EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
+ goto next_group;
-repeat_in_this_group:
ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
if (!ret2)
goto next_group;
@@ -1110,8 +1110,6 @@ repeat_in_this_group:
if (!ret2)
goto got; /* we grabbed the inode! */
- if (ino < EXT4_INODES_PER_GROUP(sb))
- goto repeat_in_this_group;
next_group:
if (++group == ngroups)
group = 0;
@@ -1224,8 +1222,7 @@ got:
}
}
if (ext4_has_group_desc_csum(sb)) {
- ext4_inode_bitmap_csum_set(sb, gdp, inode_bitmap_bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_inode_bitmap_csum_set(sb, gdp, inode_bitmap_bh);
ext4_group_desc_csum_set(sb, group, gdp);
}
ext4_unlock_group(sb, group);
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index d8ca7f64f952..7404f0935c90 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -652,13 +652,6 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
ext4_update_inode_fsync_trans(handle, inode, 1);
count = ar.len;
- /*
- * Update reserved blocks/metadata blocks after successful block
- * allocation which had been deferred till now.
- */
- if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
- ext4_da_update_reserve_space(inode, count, 1);
-
got_it:
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = le32_to_cpu(chain[depth-1].key);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index e7a09a99837b..3536ca7e4fcc 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -601,10 +601,11 @@ retry:
goto out;
if (ext4_should_dioread_nolock(inode)) {
- ret = __block_write_begin(&folio->page, from, to,
- ext4_get_block_unwritten);
+ ret = ext4_block_write_begin(handle, folio, from, to,
+ ext4_get_block_unwritten);
} else
- ret = __block_write_begin(&folio->page, from, to, ext4_get_block);
+ ret = ext4_block_write_begin(handle, folio, from, to,
+ ext4_get_block);
if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, inode,
@@ -660,7 +661,7 @@ out_nofolio:
int ext4_try_to_write_inline_data(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
- struct page **pagep)
+ struct folio **foliop)
{
int ret;
handle_t *handle;
@@ -708,7 +709,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
goto out;
}
- *pagep = &folio->page;
+ *foliop = folio;
down_read(&EXT4_I(inode)->xattr_sem);
if (!ext4_has_inline_data(inode)) {
ret = 0;
@@ -856,8 +857,8 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
goto out;
}
- ret = __block_write_begin(&folio->page, 0, inline_size,
- ext4_da_get_block_prep);
+ ret = ext4_block_write_begin(NULL, folio, 0, inline_size,
+ ext4_da_get_block_prep);
if (ret) {
up_read(&EXT4_I(inode)->xattr_sem);
folio_unlock(folio);
@@ -891,7 +892,7 @@ out:
int ext4_da_write_inline_data_begin(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
- struct page **pagep,
+ struct folio **foliop,
void **fsdata)
{
int ret;
@@ -954,7 +955,7 @@ retry_journal:
goto out_release_page;
up_read(&EXT4_I(inode)->xattr_sem);
- *pagep = &folio->page;
+ *foliop = folio;
brelse(iloc.bh);
return 1;
out_release_page:
@@ -1460,6 +1461,7 @@ int ext4_read_inline_dir(struct file *file,
struct ext4_iloc iloc;
void *dir_buf = NULL;
int dotdot_offset, dotdot_size, extra_offset, extra_size;
+ struct dir_private_info *info = file->private_data;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
@@ -1503,12 +1505,12 @@ int ext4_read_inline_dir(struct file *file,
extra_size = extra_offset + inline_size;
/*
- * If the version has changed since the last call to
+ * If the cookie has changed since the last call to
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the inline
* dir to make sure.
*/
- if (!inode_eq_iversion(inode, file->f_version)) {
+ if (!inode_eq_iversion(inode, info->cookie)) {
for (i = 0; i < extra_size && i < offset;) {
/*
* "." is with offset 0 and
@@ -1540,7 +1542,7 @@ int ext4_read_inline_dir(struct file *file,
}
offset = i;
ctx->pos = offset;
- file->f_version = inode_query_iversion(inode);
+ info->cookie = inode_query_iversion(inode);
}
while (ctx->pos < extra_size) {
@@ -1664,24 +1666,36 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
struct ext4_dir_entry_2 **res_dir,
int *has_inline_data)
{
+ struct ext4_xattr_ibody_find is = {
+ .s = { .not_found = -ENODATA, },
+ };
+ struct ext4_xattr_info i = {
+ .name_index = EXT4_XATTR_INDEX_SYSTEM,
+ .name = EXT4_XATTR_SYSTEM_DATA,
+ };
int ret;
- struct ext4_iloc iloc;
void *inline_start;
int inline_size;
- if (ext4_get_inode_loc(dir, &iloc))
- return NULL;
+ ret = ext4_get_inode_loc(dir, &is.iloc);
+ if (ret)
+ return ERR_PTR(ret);
down_read(&EXT4_I(dir)->xattr_sem);
+
+ ret = ext4_xattr_ibody_find(dir, &i, &is);
+ if (ret)
+ goto out;
+
if (!ext4_has_inline_data(dir)) {
*has_inline_data = 0;
goto out;
}
- inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
+ inline_start = (void *)ext4_raw_inode(&is.iloc)->i_block +
EXT4_INLINE_DOTDOT_SIZE;
inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
- ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
+ ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size,
dir, fname, 0, res_dir);
if (ret == 1)
goto out_find;
@@ -1691,20 +1705,23 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
if (ext4_get_inline_size(dir) == EXT4_MIN_INLINE_DATA_SIZE)
goto out;
- inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
+ inline_start = ext4_get_inline_xattr_pos(dir, &is.iloc);
inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
- ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
+ ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size,
dir, fname, 0, res_dir);
if (ret == 1)
goto out_find;
out:
- brelse(iloc.bh);
- iloc.bh = NULL;
+ brelse(is.iloc.bh);
+ if (ret < 0)
+ is.iloc.bh = ERR_PTR(ret);
+ else
+ is.iloc.bh = NULL;
out_find:
up_read(&EXT4_I(dir)->xattr_sem);
- return iloc.bh;
+ return is.iloc.bh;
}
int ext4_delete_inline_entry(handle_t *handle,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 941c1c0d5c6e..54bdd4884fe6 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -49,6 +49,11 @@
#include <trace/events/ext4.h>
+static void ext4_journalled_zero_new_buffers(handle_t *handle,
+ struct inode *inode,
+ struct folio *folio,
+ unsigned from, unsigned to);
+
static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei)
{
@@ -478,7 +483,89 @@ static int ext4_map_query_blocks(handle_t *handle, struct inode *inode,
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
- map->m_pblk, status);
+ map->m_pblk, status, 0);
+ return retval;
+}
+
+static int ext4_map_create_blocks(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map, int flags)
+{
+ struct extent_status es;
+ unsigned int status;
+ int err, retval = 0;
+
+ /*
+ * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE
+ * indicates that the blocks and quotas has already been
+ * checked when the data was copied into the page cache.
+ */
+ if (map->m_flags & EXT4_MAP_DELAYED)
+ flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
+
+ /*
+ * Here we clear m_flags because after allocating an new extent,
+ * it will be set again.
+ */
+ map->m_flags &= ~EXT4_MAP_FLAGS;
+
+ /*
+ * We need to check for EXT4 here because migrate could have
+ * changed the inode type in between.
+ */
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ retval = ext4_ext_map_blocks(handle, inode, map, flags);
+ } else {
+ retval = ext4_ind_map_blocks(handle, inode, map, flags);
+
+ /*
+ * We allocated new blocks which will result in i_data's
+ * format changing. Force the migrate to fail by clearing
+ * migrate flags.
+ */
+ if (retval > 0 && map->m_flags & EXT4_MAP_NEW)
+ ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
+ }
+ if (retval <= 0)
+ return retval;
+
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode %lu: "
+ "retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
+ }
+
+ /*
+ * We have to zeroout blocks before inserting them into extent
+ * status tree. Otherwise someone could look them up there and
+ * use them before they are really zeroed. We also have to
+ * unmap metadata before zeroing as otherwise writeback can
+ * overwrite zeros with stale data from block device.
+ */
+ if (flags & EXT4_GET_BLOCKS_ZERO &&
+ map->m_flags & EXT4_MAP_MAPPED && map->m_flags & EXT4_MAP_NEW) {
+ err = ext4_issue_zeroout(inode, map->m_lblk, map->m_pblk,
+ map->m_len);
+ if (err)
+ return err;
+ }
+
+ /*
+ * If the extent has been zeroed out, we don't need to update
+ * extent status tree.
+ */
+ if (flags & EXT4_GET_BLOCKS_PRE_IO &&
+ ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
+ if (ext4_es_is_written(&es))
+ return retval;
+ }
+
+ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+ ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+ map->m_pblk, status, flags);
+
return retval;
}
@@ -576,32 +663,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
* file system block.
*/
down_read(&EXT4_I(inode)->i_data_sem);
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- retval = ext4_ext_map_blocks(handle, inode, map, 0);
- } else {
- retval = ext4_ind_map_blocks(handle, inode, map, 0);
- }
- if (retval > 0) {
- unsigned int status;
-
- if (unlikely(retval != map->m_len)) {
- ext4_warning(inode->i_sb,
- "ES len assertion failed for inode "
- "%lu: retval %d != map->m_len %d",
- inode->i_ino, retval, map->m_len);
- WARN_ON(1);
- }
-
- status = map->m_flags & EXT4_MAP_UNWRITTEN ?
- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
- if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
- !(status & EXTENT_STATUS_WRITTEN) &&
- ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
- map->m_lblk + map->m_len - 1))
- status |= EXTENT_STATUS_DELAYED;
- ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
- map->m_pblk, status);
- }
+ retval = ext4_map_query_blocks(handle, inode, map);
up_read((&EXT4_I(inode)->i_data_sem));
found:
@@ -631,88 +693,13 @@ found:
return retval;
/*
- * Here we clear m_flags because after allocating an new extent,
- * it will be set again.
- */
- map->m_flags &= ~EXT4_MAP_FLAGS;
-
- /*
* New blocks allocate and/or writing to unwritten extent
* will possibly result in updating i_data, so we take
* the write lock of i_data_sem, and call get_block()
* with create == 1 flag.
*/
down_write(&EXT4_I(inode)->i_data_sem);
-
- /*
- * We need to check for EXT4 here because migrate
- * could have changed the inode type in between
- */
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- retval = ext4_ext_map_blocks(handle, inode, map, flags);
- } else {
- retval = ext4_ind_map_blocks(handle, inode, map, flags);
-
- if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
- /*
- * We allocated new blocks which will result in
- * i_data's format changing. Force the migrate
- * to fail by clearing migrate flags
- */
- ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
- }
- }
-
- if (retval > 0) {
- unsigned int status;
-
- if (unlikely(retval != map->m_len)) {
- ext4_warning(inode->i_sb,
- "ES len assertion failed for inode "
- "%lu: retval %d != map->m_len %d",
- inode->i_ino, retval, map->m_len);
- WARN_ON(1);
- }
-
- /*
- * We have to zeroout blocks before inserting them into extent
- * status tree. Otherwise someone could look them up there and
- * use them before they are really zeroed. We also have to
- * unmap metadata before zeroing as otherwise writeback can
- * overwrite zeros with stale data from block device.
- */
- if (flags & EXT4_GET_BLOCKS_ZERO &&
- map->m_flags & EXT4_MAP_MAPPED &&
- map->m_flags & EXT4_MAP_NEW) {
- ret = ext4_issue_zeroout(inode, map->m_lblk,
- map->m_pblk, map->m_len);
- if (ret) {
- retval = ret;
- goto out_sem;
- }
- }
-
- /*
- * If the extent has been zeroed out, we don't need to update
- * extent status tree.
- */
- if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
- ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
- if (ext4_es_is_written(&es))
- goto out_sem;
- }
- status = map->m_flags & EXT4_MAP_UNWRITTEN ?
- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
- if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
- !(status & EXTENT_STATUS_WRITTEN) &&
- ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
- map->m_lblk + map->m_len - 1))
- status |= EXTENT_STATUS_DELAYED;
- ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
- map->m_pblk, status);
- }
-
-out_sem:
+ retval = ext4_map_create_blocks(handle, inode, map, flags);
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
@@ -1018,32 +1005,16 @@ static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
int do_journal_get_write_access(handle_t *handle, struct inode *inode,
struct buffer_head *bh)
{
- int dirty = buffer_dirty(bh);
- int ret;
-
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
- /*
- * __block_write_begin() could have dirtied some buffers. Clean
- * the dirty bit as jbd2_journal_get_write_access() could complain
- * otherwise about fs integrity issues. Setting of the dirty bit
- * by __block_write_begin() isn't a real problem here as we clear
- * the bit before releasing a page lock and thus writeback cannot
- * ever write the buffer.
- */
- if (dirty)
- clear_buffer_dirty(bh);
BUFFER_TRACE(bh, "get write access");
- ret = ext4_journal_get_write_access(handle, inode->i_sb, bh,
+ return ext4_journal_get_write_access(handle, inode->i_sb, bh,
EXT4_JTR_NONE);
- if (!ret && dirty)
- ret = ext4_dirty_journalled_data(handle, bh);
- return ret;
}
-#ifdef CONFIG_FS_ENCRYPTION
-static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
- get_block_t *get_block)
+int ext4_block_write_begin(handle_t *handle, struct folio *folio,
+ loff_t pos, unsigned len,
+ get_block_t *get_block)
{
unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + len;
@@ -1056,6 +1027,7 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
struct buffer_head *bh, *head, *wait[2];
int nr_wait = 0;
int i;
+ bool should_journal_data = ext4_should_journal_data(inode);
BUG_ON(!folio_test_locked(folio));
BUG_ON(from > PAGE_SIZE);
@@ -1085,10 +1057,22 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
if (err)
break;
if (buffer_new(bh)) {
+ /*
+ * We may be zeroing partial buffers or all new
+ * buffers in case of failure. Prepare JBD2 for
+ * that.
+ */
+ if (should_journal_data)
+ do_journal_get_write_access(handle,
+ inode, bh);
if (folio_test_uptodate(folio)) {
- clear_buffer_new(bh);
+ /*
+ * Unlike __block_write_begin() we leave
+ * dirtying of new uptodate buffers to
+ * ->write_end() time or
+ * folio_zero_new_buffers().
+ */
set_buffer_uptodate(bh);
- mark_buffer_dirty(bh);
continue;
}
if (block_end > to || block_start < from)
@@ -1118,7 +1102,11 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
err = -EIO;
}
if (unlikely(err)) {
- folio_zero_new_buffers(folio, from, to);
+ if (should_journal_data)
+ ext4_journalled_zero_new_buffers(handle, inode, folio,
+ from, to);
+ else
+ folio_zero_new_buffers(folio, from, to);
} else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
for (i = 0; i < nr_wait; i++) {
int err2;
@@ -1134,7 +1122,6 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
return err;
}
-#endif
/*
* To preserve ordering, it is essential that the hole instantiation and
@@ -1145,7 +1132,7 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
*/
static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
struct inode *inode = mapping->host;
int ret, needed_blocks;
@@ -1170,7 +1157,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
- pagep);
+ foliop);
if (ret < 0)
return ret;
if (ret == 1)
@@ -1216,19 +1203,12 @@ retry_journal:
/* In case writeback began while the folio was unlocked */
folio_wait_stable(folio);
-#ifdef CONFIG_FS_ENCRYPTION
if (ext4_should_dioread_nolock(inode))
- ret = ext4_block_write_begin(folio, pos, len,
+ ret = ext4_block_write_begin(handle, folio, pos, len,
ext4_get_block_unwritten);
else
- ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
-#else
- if (ext4_should_dioread_nolock(inode))
- ret = __block_write_begin(&folio->page, pos, len,
- ext4_get_block_unwritten);
- else
- ret = __block_write_begin(&folio->page, pos, len, ext4_get_block);
-#endif
+ ret = ext4_block_write_begin(handle, folio, pos, len,
+ ext4_get_block);
if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, inode,
folio_buffers(folio), from, to,
@@ -1241,7 +1221,7 @@ retry_journal:
folio_unlock(folio);
/*
- * __block_write_begin may have instantiated a few blocks
+ * ext4_block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
* i_size_read because we hold i_rwsem.
*
@@ -1270,7 +1250,7 @@ retry_journal:
folio_put(folio);
return ret;
}
- *pagep = &folio->page;
+ *foliop = folio;
return ret;
}
@@ -1298,9 +1278,8 @@ static int write_end_fn(handle_t *handle, struct inode *inode,
static int ext4_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct folio *folio = page_folio(page);
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
loff_t old_size = inode->i_size;
@@ -1315,7 +1294,7 @@ static int ext4_write_end(struct file *file,
return ext4_write_inline_data_end(inode, pos, len, copied,
folio);
- copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+ copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
/*
* it's important to update i_size while still holding folio lock:
* page writeout could otherwise come in and zero beyond i_size.
@@ -1389,9 +1368,9 @@ static void ext4_journalled_zero_new_buffers(handle_t *handle,
size = min(to, block_end) - start;
folio_zero_range(folio, start, size);
- write_end_fn(handle, inode, bh);
}
clear_buffer_new(bh);
+ write_end_fn(handle, inode, bh);
}
}
block_start = block_end;
@@ -1402,9 +1381,8 @@ static void ext4_journalled_zero_new_buffers(handle_t *handle,
static int ext4_journalled_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct folio *folio = page_folio(page);
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
loff_t old_size = inode->i_size;
@@ -1663,7 +1641,7 @@ static int ext4_clu_alloc_state(struct inode *inode, ext4_lblk_t lblk)
int ret;
/* Has delalloc reservation? */
- if (ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk))
+ if (ext4_es_scan_clu(inode, &ext4_es_is_delayed, lblk))
return 1;
/* Already been allocated? */
@@ -1784,7 +1762,7 @@ found:
* Delayed extent could be allocated by fallocate.
* So we need to check it.
*/
- if (ext4_es_is_delonly(&es)) {
+ if (ext4_es_is_delayed(&es)) {
map->m_flags |= EXT4_MAP_DELAYED;
return 0;
}
@@ -2219,11 +2197,6 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
* writeback and there is nothing we can do about it so it might result
* in data loss. So use reserved blocks to allocate metadata if
* possible.
- *
- * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
- * the blocks in question are delalloc blocks. This indicates
- * that the blocks and quotas has already been checked when
- * the data was copied into the page cache.
*/
get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
EXT4_GET_BLOCKS_METADATA_NOFAIL |
@@ -2231,8 +2204,6 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
dioread_nolock = ext4_should_dioread_nolock(inode);
if (dioread_nolock)
get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
- if (map->m_flags & BIT(BH_Delay))
- get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
if (err < 0)
@@ -2926,7 +2897,7 @@ static int ext4_nonda_switch(struct super_block *sb)
static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
int ret, retries = 0;
struct folio *folio;
@@ -2941,14 +2912,14 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
return ext4_write_begin(file, mapping, pos,
- len, pagep, fsdata);
+ len, foliop, fsdata);
}
*fsdata = (void *)0;
trace_ext4_da_write_begin(inode, pos, len);
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
- pagep, fsdata);
+ foliop, fsdata);
if (ret < 0)
return ret;
if (ret == 1)
@@ -2961,11 +2932,8 @@ retry:
if (IS_ERR(folio))
return PTR_ERR(folio);
-#ifdef CONFIG_FS_ENCRYPTION
- ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
-#else
- ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep);
-#endif
+ ret = ext4_block_write_begin(NULL, folio, pos, len,
+ ext4_da_get_block_prep);
if (ret < 0) {
folio_unlock(folio);
folio_put(folio);
@@ -2983,7 +2951,7 @@ retry:
return ret;
}
- *pagep = &folio->page;
+ *foliop = folio;
return ret;
}
@@ -3029,7 +2997,7 @@ static int ext4_da_do_write_end(struct address_space *mapping,
* flag, which all that's needed to trigger page writeback.
*/
copied = block_write_end(NULL, mapping, pos, len, copied,
- &folio->page, NULL);
+ folio, NULL);
new_i_size = pos + copied;
/*
@@ -3080,15 +3048,14 @@ static int ext4_da_do_write_end(struct address_space *mapping,
static int ext4_da_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
int write_mode = (int)(unsigned long)fsdata;
- struct folio *folio = page_folio(page);
if (write_mode == FALL_BACK_TO_NONDELALLOC)
return ext4_write_end(file, mapping, pos,
- len, copied, &folio->page, fsdata);
+ len, copied, folio, fsdata);
trace_ext4_da_write_end(inode, pos, len, copied);
@@ -4070,7 +4037,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
stop_block);
ext4_es_insert_extent(inode, first_block, hole_len, ~0,
- EXTENT_STATUS_HOLE);
+ EXTENT_STATUS_HOLE, 0);
up_write(&EXT4_I(inode)->i_data_sem);
}
ext4_fc_track_range(handle, inode, first_block, stop_block);
@@ -5279,8 +5246,9 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
{
unsigned offset;
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
- tid_t commit_tid = 0;
+ tid_t commit_tid;
int ret;
+ bool has_transaction;
offset = inode->i_size & (PAGE_SIZE - 1);
/*
@@ -5305,12 +5273,14 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
folio_put(folio);
if (ret != -EBUSY)
return;
- commit_tid = 0;
+ has_transaction = false;
read_lock(&journal->j_state_lock);
- if (journal->j_committing_transaction)
+ if (journal->j_committing_transaction) {
commit_tid = journal->j_committing_transaction->t_tid;
+ has_transaction = true;
+ }
read_unlock(&journal->j_state_lock);
- if (commit_tid)
+ if (has_transaction)
jbd2_log_wait_commit(journal, commit_tid);
}
}
@@ -6219,7 +6189,8 @@ retry_alloc:
if (folio_pos(folio) + len > size)
len = size - folio_pos(folio);
- err = __block_write_begin(&folio->page, 0, len, ext4_get_block);
+ err = ext4_block_write_begin(handle, folio, 0, len,
+ ext4_get_block);
if (!err) {
ret = VM_FAULT_SIGBUS;
if (ext4_journal_folio_buffers(handle, folio, len))
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index e8bf5972dd47..1c77400bd88e 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -1343,10 +1343,10 @@ group_extend_out:
me.moved_len = 0;
donor = fdget(me.donor_fd);
- if (!donor.file)
+ if (!fd_file(donor))
return -EBADF;
- if (!(donor.file->f_mode & FMODE_WRITE)) {
+ if (!(fd_file(donor)->f_mode & FMODE_WRITE)) {
err = -EBADF;
goto mext_out;
}
@@ -1367,7 +1367,7 @@ group_extend_out:
if (err)
goto mext_out;
- err = ext4_move_extents(filp, donor.file, me.orig_start,
+ err = ext4_move_extents(filp, fd_file(donor), me.orig_start,
me.donor_start, me.len, &me.moved_len);
mnt_drop_write_file(filp);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 9dda9cd68ab2..d73e38323879 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2356,7 +2356,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
ex.fe_logical = 0xDEADFA11; /* debug value */
if (max >= ac->ac_g_ex.fe_len &&
- ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) {
+ ac->ac_g_ex.fe_len == EXT4_NUM_B2C(sbi, sbi->s_stripe)) {
ext4_fsblk_t start;
start = ext4_grp_offs_to_block(ac->ac_sb, &ex);
@@ -2553,7 +2553,7 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
do_div(a, sbi->s_stripe);
i = (a * sbi->s_stripe) - first_group_block;
- stripe = EXT4_B2C(sbi, sbi->s_stripe);
+ stripe = EXT4_NUM_B2C(sbi, sbi->s_stripe);
i = EXT4_B2C(sbi, i);
while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
if (!mb_test_bit(i, bitmap)) {
@@ -2928,9 +2928,11 @@ repeat:
if (cr == CR_POWER2_ALIGNED)
ext4_mb_simple_scan_group(ac, &e4b);
else {
- bool is_stripe_aligned = sbi->s_stripe &&
+ bool is_stripe_aligned =
+ (sbi->s_stripe >=
+ sbi->s_cluster_ratio) &&
!(ac->ac_g_ex.fe_len %
- EXT4_B2C(sbi, sbi->s_stripe));
+ EXT4_NUM_B2C(sbi, sbi->s_stripe));
if ((cr == CR_GOAL_LEN_FAST ||
cr == CR_BEST_AVAIL_LEN) &&
@@ -3075,8 +3077,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
seq_puts(seq, " ]");
if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
seq_puts(seq, " Block bitmap corrupted!");
- seq_puts(seq, "\n");
-
+ seq_putc(seq, '\n');
return 0;
}
@@ -3707,7 +3708,7 @@ int ext4_mb_init(struct super_block *sb)
*/
if (sbi->s_stripe > 1) {
sbi->s_mb_group_prealloc = roundup(
- sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe));
+ sbi->s_mb_group_prealloc, EXT4_NUM_B2C(sbi, sbi->s_stripe));
}
sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
@@ -3887,11 +3888,8 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
/*
* Clear the trimmed flag for the group so that the next
* ext4_trim_fs can trim it.
- * If the volume is mounted with -o discard, online discard
- * is supported and the free blocks will be trimmed online.
*/
- if (!test_opt(sb, DISCARD))
- EXT4_MB_GRP_CLEAR_TRIMMED(db);
+ EXT4_MB_GRP_CLEAR_TRIMMED(db);
if (!db->bb_free_root.rb_node) {
/* No more items in the per group rb tree
@@ -6515,8 +6513,9 @@ do_more:
" group:%u block:%d count:%lu failed"
" with %d", block_group, bit, count,
err);
- } else
- EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
+ }
+
+ EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
ext4_lock_group(sb, block_group);
mb_free_blocks(inode, &e4b, bit, count_clusters);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index d98ac2af8199..1b0dfd963d3f 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -37,7 +37,6 @@ static int finish_range(handle_t *handle, struct inode *inode,
path = ext4_find_extent(inode, lb->first_block, NULL, 0);
if (IS_ERR(path)) {
retval = PTR_ERR(path);
- path = NULL;
goto err_out;
}
@@ -53,7 +52,9 @@ static int finish_range(handle_t *handle, struct inode *inode,
retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
if (retval < 0)
goto err_out;
- retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
+ path = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
+ if (IS_ERR(path))
+ retval = PTR_ERR(path);
err_out:
up_write((&EXT4_I(inode)->i_data_sem));
ext4_free_ext_path(path);
@@ -663,8 +664,8 @@ int ext4_ind_migrate(struct inode *inode)
if (unlikely(ret2 && !ret))
ret = ret2;
errout:
- ext4_journal_stop(handle);
up_write(&EXT4_I(inode)->i_data_sem);
+ ext4_journal_stop(handle);
out_unlock:
ext4_writepages_up_write(inode->i_sb, alloc_ctx);
return ret;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 204f53b23622..b64661ea6e0e 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -17,27 +17,23 @@
* get_ext_path() - Find an extent path for designated logical block number.
* @inode: inode to be searched
* @lblock: logical block number to find an extent path
- * @ppath: pointer to an extent path pointer (for output)
+ * @path: pointer to an extent path
*
- * ext4_find_extent wrapper. Return 0 on success, or a negative error value
- * on failure.
+ * ext4_find_extent wrapper. Return an extent path pointer on success,
+ * or an error pointer on failure.
*/
-static inline int
+static inline struct ext4_ext_path *
get_ext_path(struct inode *inode, ext4_lblk_t lblock,
- struct ext4_ext_path **ppath)
+ struct ext4_ext_path *path)
{
- struct ext4_ext_path *path;
-
- path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE);
+ path = ext4_find_extent(inode, lblock, path, EXT4_EX_NOCACHE);
if (IS_ERR(path))
- return PTR_ERR(path);
+ return path;
if (path[ext_depth(inode)].p_ext == NULL) {
ext4_free_ext_path(path);
- *ppath = NULL;
- return -ENODATA;
+ return ERR_PTR(-ENODATA);
}
- *ppath = path;
- return 0;
+ return path;
}
/**
@@ -95,9 +91,11 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
int ret = 0;
ext4_lblk_t last = from + count;
while (from < last) {
- *err = get_ext_path(inode, from, &path);
- if (*err)
- goto out;
+ path = get_ext_path(inode, from, path);
+ if (IS_ERR(path)) {
+ *err = PTR_ERR(path);
+ return ret;
+ }
ext = path[ext_depth(inode)].p_ext;
if (unwritten != ext4_ext_is_unwritten(ext))
goto out;
@@ -166,15 +164,16 @@ mext_folio_double_lock(struct inode *inode1, struct inode *inode2,
return 0;
}
-/* Force page buffers uptodate w/o dropping page's lock */
-static int
-mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
+/* Force folio buffers uptodate w/o dropping folio's lock */
+static int mext_page_mkuptodate(struct folio *folio, size_t from, size_t to)
{
struct inode *inode = folio->mapping->host;
sector_t block;
- struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
+ struct buffer_head *bh, *head;
unsigned int blocksize, block_start, block_end;
- int i, err, nr = 0, partial = 0;
+ int nr = 0;
+ bool partial = false;
+
BUG_ON(!folio_test_locked(folio));
BUG_ON(folio_test_writeback(folio));
@@ -186,19 +185,21 @@ mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
if (!head)
head = create_empty_buffers(folio, blocksize, 0);
- block = (sector_t)folio->index << (PAGE_SHIFT - inode->i_blkbits);
- for (bh = head, block_start = 0; bh != head || !block_start;
- block++, block_start = block_end, bh = bh->b_this_page) {
+ block = folio_pos(folio) >> inode->i_blkbits;
+ block_end = 0;
+ bh = head;
+ do {
+ block_start = block_end;
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (!buffer_uptodate(bh))
- partial = 1;
+ partial = true;
continue;
}
if (buffer_uptodate(bh))
continue;
if (!buffer_mapped(bh)) {
- err = ext4_get_block(inode, block, bh, 0);
+ int err = ext4_get_block(inode, block, bh, 0);
if (err)
return err;
if (!buffer_mapped(bh)) {
@@ -207,21 +208,30 @@ mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
continue;
}
}
- BUG_ON(nr >= MAX_BUF_PER_PAGE);
- arr[nr++] = bh;
- }
+ lock_buffer(bh);
+ if (buffer_uptodate(bh)) {
+ unlock_buffer(bh);
+ continue;
+ }
+ ext4_read_bh_nowait(bh, 0, NULL);
+ nr++;
+ } while (block++, (bh = bh->b_this_page) != head);
+
/* No io required */
if (!nr)
goto out;
- for (i = 0; i < nr; i++) {
- bh = arr[i];
- if (!bh_uptodate_or_lock(bh)) {
- err = ext4_read_bh(bh, 0, NULL);
- if (err)
- return err;
- }
- }
+ bh = head;
+ do {
+ if (bh_offset(bh) + blocksize <= from)
+ continue;
+ if (bh_offset(bh) > to)
+ break;
+ wait_on_buffer(bh);
+ if (buffer_uptodate(bh))
+ continue;
+ return -EIO;
+ } while ((bh = bh->b_this_page) != head);
out:
if (!partial)
folio_mark_uptodate(folio);
@@ -624,9 +634,11 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
int offset_in_page;
int unwritten, cur_len;
- ret = get_ext_path(orig_inode, o_start, &path);
- if (ret)
+ path = get_ext_path(orig_inode, o_start, path);
+ if (IS_ERR(path)) {
+ ret = PTR_ERR(path);
goto out;
+ }
ex = path[path->p_depth].p_ext;
cur_blk = le32_to_cpu(ex->ee_block);
cur_len = ext4_ext_get_actual_len(ex);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 6a95713f9193..790db7eac6c2 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1482,7 +1482,7 @@ static bool ext4_match(struct inode *parent,
}
/*
- * Returns 0 if not found, -1 on failure, and 1 on success
+ * Returns 0 if not found, -EFSCORRUPTED on failure, and 1 on success
*/
int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
struct inode *dir, struct ext4_filename *fname,
@@ -1503,7 +1503,7 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
* a full check */
if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
buf_size, offset))
- return -1;
+ return -EFSCORRUPTED;
*res_dir = de;
return 1;
}
@@ -1511,7 +1511,7 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
de_len = ext4_rec_len_from_disk(de->rec_len,
dir->i_sb->s_blocksize);
if (de_len <= 0)
- return -1;
+ return -EFSCORRUPTED;
offset += de_len;
de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
}
@@ -1574,7 +1574,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
&has_inline_data);
if (inlined)
*inlined = has_inline_data;
- if (has_inline_data)
+ if (has_inline_data || IS_ERR(ret))
goto cleanup_and_exit;
}
@@ -1663,8 +1663,10 @@ restart:
goto cleanup_and_exit;
} else {
brelse(bh);
- if (i < 0)
+ if (i < 0) {
+ ret = ERR_PTR(i);
goto cleanup_and_exit;
+ }
}
next:
if (++block >= nblocks)
@@ -1758,7 +1760,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
if (retval == 1)
goto success;
brelse(bh);
- if (retval == -1) {
+ if (retval < 0) {
bh = ERR_PTR(ERR_BAD_DX_DIR);
goto errout;
}
@@ -1999,7 +2001,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
split = count/2;
hash2 = map[split].hash;
- continued = hash2 == map[split - 1].hash;
+ continued = split > 0 ? hash2 == map[split - 1].hash : 0;
dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
(unsigned long)dx_get_block(frame->at),
hash2, split, count-split));
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 8494492582ab..5d3a9dc9a32d 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -221,7 +221,7 @@ int ext4_mpage_readpages(struct inode *inode,
sector_t block_in_file;
sector_t last_block;
sector_t last_block_in_file;
- sector_t blocks[MAX_BUF_PER_PAGE];
+ sector_t first_block;
unsigned page_block;
struct block_device *bdev = inode->i_sb->s_bdev;
int length;
@@ -263,6 +263,7 @@ int ext4_mpage_readpages(struct inode *inode,
unsigned map_offset = block_in_file - map.m_lblk;
unsigned last = map.m_len - map_offset;
+ first_block = map.m_pblk + map_offset;
for (relative_block = 0; ; relative_block++) {
if (relative_block == last) {
/* needed? */
@@ -271,8 +272,6 @@ int ext4_mpage_readpages(struct inode *inode,
}
if (page_block == blocks_per_page)
break;
- blocks[page_block] = map.m_pblk + map_offset +
- relative_block;
page_block++;
block_in_file++;
}
@@ -307,7 +306,9 @@ int ext4_mpage_readpages(struct inode *inode,
goto confused; /* hole -> non-hole */
/* Contiguous blocks? */
- if (page_block && blocks[page_block-1] != map.m_pblk-1)
+ if (!page_block)
+ first_block = map.m_pblk;
+ else if (first_block + page_block != map.m_pblk)
goto confused;
for (relative_block = 0; ; relative_block++) {
if (relative_block == map.m_len) {
@@ -316,7 +317,6 @@ int ext4_mpage_readpages(struct inode *inode,
break;
} else if (page_block == blocks_per_page)
break;
- blocks[page_block] = map.m_pblk+relative_block;
page_block++;
block_in_file++;
}
@@ -339,7 +339,7 @@ int ext4_mpage_readpages(struct inode *inode,
* This folio will go to BIO. Do we need to send this
* BIO off first?
*/
- if (bio && (last_block_in_bio != blocks[0] - 1 ||
+ if (bio && (last_block_in_bio != first_block - 1 ||
!fscrypt_mergeable_bio(bio, inode, next_block))) {
submit_and_realloc:
submit_bio(bio);
@@ -355,7 +355,7 @@ int ext4_mpage_readpages(struct inode *inode,
fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
GFP_KERNEL);
ext4_set_bio_post_read_ctx(bio, inode, folio->index);
- bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
+ bio->bi_iter.bi_sector = first_block << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
if (rac)
bio->bi_opf |= REQ_RAHEAD;
@@ -371,7 +371,7 @@ int ext4_mpage_readpages(struct inode *inode,
submit_bio(bio);
bio = NULL;
} else
- last_block_in_bio = blocks[blocks_per_page - 1];
+ last_block_in_bio = first_block + blocks_per_page - 1;
continue;
confused:
if (bio) {
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 0ba9837d65ca..e04eb08b9060 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1319,8 +1319,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb,
bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
if (!bh)
return -EIO;
- ext4_inode_bitmap_csum_set(sb, gdp, bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_inode_bitmap_csum_set(sb, gdp, bh);
brelse(bh);
bh = ext4_get_bitmap(sb, group_data->block_bitmap);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index e72145c4ae5a..16a4ce704460 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -735,11 +735,12 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
/*
- * Make sure updated value of ->s_mount_flags will be visible before
- * ->s_flags update
+ * EXT4_FLAGS_SHUTDOWN was set which stops all filesystem
+ * modifications. We don't set SB_RDONLY because that requires
+ * sb->s_umount semaphore and setting it without proper remount
+ * procedure is confusing code such as freeze_super() leading to
+ * deadlocks and other problems.
*/
- smp_wmb();
- sb->s_flags |= SB_RDONLY;
}
static void update_super_work(struct work_struct *work)
@@ -3045,7 +3046,7 @@ int ext4_seq_options_show(struct seq_file *seq, void *offset)
seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
rc = _ext4_show_options(seq, sb, 1);
- seq_puts(seq, "\n");
+ seq_putc(seq, '\n');
return rc;
}
@@ -5087,16 +5088,27 @@ out:
return ret;
}
-static void ext4_hash_info_init(struct super_block *sb)
+static int ext4_hash_info_init(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
unsigned int i;
+ sbi->s_def_hash_version = es->s_def_hash_version;
+
+ if (sbi->s_def_hash_version > DX_HASH_LAST) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid default hash set in the superblock");
+ return -EINVAL;
+ } else if (sbi->s_def_hash_version == DX_HASH_SIPHASH) {
+ ext4_msg(sb, KERN_ERR,
+ "SIPHASH is not a valid default hash value");
+ return -EINVAL;
+ }
+
for (i = 0; i < 4; i++)
sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
- sbi->s_def_hash_version = es->s_def_hash_version;
if (ext4_has_feature_dir_index(sb)) {
i = le32_to_cpu(es->s_flags);
if (i & EXT2_FLAGS_UNSIGNED_HASH)
@@ -5114,6 +5126,7 @@ static void ext4_hash_info_init(struct super_block *sb)
#endif
}
}
+ return 0;
}
static int ext4_block_group_meta_init(struct super_block *sb, int silent)
@@ -5165,6 +5178,18 @@ static int ext4_block_group_meta_init(struct super_block *sb, int silent)
return 0;
}
+/*
+ * It's hard to get stripe aligned blocks if stripe is not aligned with
+ * cluster, just disable stripe and alert user to simplify code and avoid
+ * stripe aligned allocation which will rarely succeed.
+ */
+static bool ext4_is_stripe_incompatible(struct super_block *sb, unsigned long stripe)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ return (stripe > 0 && sbi->s_cluster_ratio > 1 &&
+ stripe % sbi->s_cluster_ratio != 0);
+}
+
static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
{
struct ext4_super_block *es = NULL;
@@ -5249,7 +5274,9 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
if (err)
goto failed_mount;
- ext4_hash_info_init(sb);
+ err = ext4_hash_info_init(sb);
+ if (err)
+ goto failed_mount;
err = ext4_handle_clustersize(sb);
if (err)
@@ -5272,13 +5299,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
goto failed_mount3;
sbi->s_stripe = ext4_get_stripe_size(sbi);
- /*
- * It's hard to get stripe aligned blocks if stripe is not aligned with
- * cluster, just disable stripe and alert user to simpfy code and avoid
- * stripe aligned allocation which will rarely successes.
- */
- if (sbi->s_stripe > 0 && sbi->s_cluster_ratio > 1 &&
- sbi->s_stripe % sbi->s_cluster_ratio != 0) {
+ if (ext4_is_stripe_incompatible(sb, sbi->s_stripe)) {
ext4_msg(sb, KERN_WARNING,
"stripe (%lu) is not aligned with cluster size (%u), "
"stripe is disabled",
@@ -5313,6 +5334,8 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
mutex_init(&sbi->s_orphan_lock);
+ spin_lock_init(&sbi->s_bdev_wb_lock);
+
ext4_fast_commit_init(sb);
sb->s_root = NULL;
@@ -5534,7 +5557,6 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
* Save the original bdev mapping's wb_err value which could be
* used to detect the metadata async write error.
*/
- spin_lock_init(&sbi->s_bdev_wb_lock);
errseq_check_and_advance(&sb->s_bdev->bd_mapping->wb_err,
&sbi->s_bdev_wb_err);
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
@@ -5614,8 +5636,8 @@ failed_mount3a:
failed_mount3:
/* flush s_sb_upd_work before sbi destroy */
flush_work(&sbi->s_sb_upd_work);
- del_timer_sync(&sbi->s_err_report);
ext4_stop_mmpd(sbi);
+ del_timer_sync(&sbi->s_err_report);
ext4_group_desc_free(sbi);
failed_mount:
if (sbi->s_chksum_driver)
@@ -6441,6 +6463,15 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
}
+ if ((ctx->spec & EXT4_SPEC_s_stripe) &&
+ ext4_is_stripe_incompatible(sb, ctx->s_stripe)) {
+ ext4_msg(sb, KERN_WARNING,
+ "stripe (%lu) is not aligned with cluster size (%u), "
+ "stripe is disabled",
+ ctx->s_stripe, sbi->s_cluster_ratio);
+ ctx->s_stripe = 0;
+ }
+
/*
* Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause
* two calls to ext4_should_dioread_nolock() to return inconsistent
diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
index 2f37e1ea3955..d9203228ce97 100644
--- a/fs/ext4/verity.c
+++ b/fs/ext4/verity.c
@@ -76,17 +76,17 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
while (count) {
size_t n = min_t(size_t, count,
PAGE_SIZE - offset_in_page(pos));
- struct page *page;
+ struct folio *folio;
void *fsdata = NULL;
int res;
- res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata);
+ res = aops->write_begin(NULL, mapping, pos, n, &folio, &fsdata);
if (res)
return res;
- memcpy_to_page(page, offset_in_page(pos), buf, n);
+ memcpy_to_folio(folio, offset_in_folio(folio, pos), buf, n);
- res = aops->write_end(NULL, mapping, pos, n, n, page, fsdata);
+ res = aops->write_end(NULL, mapping, pos, n, n, folio, fsdata);
if (res < 0)
return res;
if (res != n)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 46ce2f21fef9..e0e1956dcdd3 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -458,7 +458,7 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
ext4_set_inode_state(inode, EXT4_STATE_LUSTRE_EA_INODE);
ext4_xattr_inode_set_ref(inode, 1);
} else {
- inode_lock(inode);
+ inode_lock_nested(inode, I_MUTEX_XATTR);
inode->i_flags |= S_NOQUOTA;
inode_unlock(inode);
}
@@ -1039,7 +1039,7 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
s64 ref_count;
int ret;
- inode_lock(ea_inode);
+ inode_lock_nested(ea_inode, I_MUTEX_XATTR);
ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
if (ret)
@@ -2879,33 +2879,31 @@ ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
if (*ea_inode_array == NULL) {
/*
* Start with 15 inodes, so it fits into a power-of-two size.
- * If *ea_inode_array is NULL, this is essentially offsetof()
*/
- (*ea_inode_array) =
- kmalloc(offsetof(struct ext4_xattr_inode_array,
- inodes[EIA_MASK]),
- GFP_NOFS);
+ (*ea_inode_array) = kmalloc(
+ struct_size(*ea_inode_array, inodes, EIA_MASK),
+ GFP_NOFS);
if (*ea_inode_array == NULL)
return -ENOMEM;
(*ea_inode_array)->count = 0;
} else if (((*ea_inode_array)->count & EIA_MASK) == EIA_MASK) {
/* expand the array once all 15 + n * 16 slots are full */
struct ext4_xattr_inode_array *new_array = NULL;
- int count = (*ea_inode_array)->count;
- /* if new_array is NULL, this is essentially offsetof() */
new_array = kmalloc(
- offsetof(struct ext4_xattr_inode_array,
- inodes[count + EIA_INCR]),
- GFP_NOFS);
+ struct_size(*ea_inode_array, inodes,
+ (*ea_inode_array)->count + EIA_INCR),
+ GFP_NOFS);
if (new_array == NULL)
return -ENOMEM;
memcpy(new_array, *ea_inode_array,
- offsetof(struct ext4_xattr_inode_array, inodes[count]));
+ struct_size(*ea_inode_array, inodes,
+ (*ea_inode_array)->count));
kfree(*ea_inode_array);
*ea_inode_array = new_array;
}
- (*ea_inode_array)->inodes[(*ea_inode_array)->count++] = inode;
+ (*ea_inode_array)->count++;
+ (*ea_inode_array)->inodes[(*ea_inode_array)->count - 1] = inode;
return 0;
}
@@ -3036,8 +3034,6 @@ void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *ea_inode_array)
*
* Create a new entry in the extended attribute block cache, and insert
* it unless such an entry is already in the cache.
- *
- * Returns 0, or a negative error number on failure.
*/
static void
ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache,
@@ -3065,8 +3061,7 @@ ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache,
*
* Compare two extended attribute blocks for equality.
*
- * Returns 0 if the blocks are equal, 1 if they differ, and
- * a negative error number on errors.
+ * Returns 0 if the blocks are equal, 1 if they differ.
*/
static int
ext4_xattr_cmp(struct ext4_xattr_header *header1,
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index bd97c4aa8177..b25c2d7b5f99 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -32,8 +32,7 @@ struct ext4_xattr_header {
__le32 h_refcount; /* reference count */
__le32 h_blocks; /* number of disk blocks used */
__le32 h_hash; /* hash value of all attributes */
- __le32 h_checksum; /* crc32c(uuid+id+xattrblock) */
- /* id = inum if refcount=1, blknum otherwise */
+ __le32 h_checksum; /* crc32c(uuid+blknum+xattrblock) */
__u32 h_reserved[3]; /* zero right now */
};
@@ -130,8 +129,8 @@ struct ext4_xattr_ibody_find {
};
struct ext4_xattr_inode_array {
- unsigned int count; /* # of used items in the array */
- struct inode *inodes[];
+ unsigned int count;
+ struct inode *inodes[] __counted_by(count);
};
extern const struct xattr_handler ext4_xattr_user_handler;
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index bdd96329dddd..7f76460b721f 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -99,7 +99,7 @@ repeat:
}
if (unlikely(!PageUptodate(page))) {
- f2fs_handle_page_eio(sbi, page->index, META);
+ f2fs_handle_page_eio(sbi, page_folio(page), META);
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
@@ -345,30 +345,31 @@ static int __f2fs_write_meta_page(struct page *page,
enum iostat_type io_type)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct folio *folio = page_folio(page);
- trace_f2fs_writepage(page_folio(page), META);
+ trace_f2fs_writepage(folio, META);
if (unlikely(f2fs_cp_error(sbi))) {
if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_META);
- unlock_page(page);
+ folio_unlock(folio);
return 0;
}
goto redirty_out;
}
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
+ if (wbc->for_reclaim && folio->index < GET_SUM_BLOCK(sbi, 0))
goto redirty_out;
- f2fs_do_write_meta_page(sbi, page, io_type);
+ f2fs_do_write_meta_page(sbi, folio, io_type);
dec_page_count(sbi, F2FS_DIRTY_META);
if (wbc->for_reclaim)
f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META);
- unlock_page(page);
+ folio_unlock(folio);
if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_write(sbi, META);
@@ -1551,7 +1552,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
blk = start_blk + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks;
for (i = 0; i < nm_i->nat_bits_blocks; i++)
f2fs_update_meta_page(sbi, nm_i->nat_bits +
- (i << F2FS_BLKSIZE_BITS), blk + i);
+ F2FS_BLK_TO_BYTES(i), blk + i);
}
/* write out checkpoint buffer at block 0 */
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 990b93689b46..7f26440e8595 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -90,11 +90,13 @@ bool f2fs_is_compressed_page(struct page *page)
static void f2fs_set_compressed_page(struct page *page,
struct inode *inode, pgoff_t index, void *data)
{
- attach_page_private(page, (void *)data);
+ struct folio *folio = page_folio(page);
+
+ folio_attach_private(folio, (void *)data);
/* i_crypto_info and iv index */
- page->index = index;
- page->mapping = inode->i_mapping;
+ folio->index = index;
+ folio->mapping = inode->i_mapping;
}
static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
@@ -160,17 +162,17 @@ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
cc->cluster_idx = NULL_CLUSTER;
}
-void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
+void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio)
{
unsigned int cluster_ofs;
- if (!f2fs_cluster_can_merge_page(cc, page->index))
+ if (!f2fs_cluster_can_merge_page(cc, folio->index))
f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
- cluster_ofs = offset_in_cluster(cc, page->index);
- cc->rpages[cluster_ofs] = page;
+ cluster_ofs = offset_in_cluster(cc, folio->index);
+ cc->rpages[cluster_ofs] = folio_page(folio, 0);
cc->nr_rpages++;
- cc->cluster_idx = cluster_idx(cc, page->index);
+ cc->cluster_idx = cluster_idx(cc, folio->index);
}
#ifdef CONFIG_F2FS_FS_LZO
@@ -879,7 +881,7 @@ static bool cluster_has_invalid_data(struct compress_ctx *cc)
f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
/* beyond EOF */
- if (page->index >= nr_pages)
+ if (page_folio(page)->index >= nr_pages)
return true;
}
return false;
@@ -945,7 +947,7 @@ static int __f2fs_get_cluster_blocks(struct inode *inode,
unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
int count, i;
- for (i = 1, count = 1; i < cluster_size; i++) {
+ for (i = 0, count = 0; i < cluster_size; i++) {
block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
dn->ofs_in_node + i);
@@ -956,8 +958,8 @@ static int __f2fs_get_cluster_blocks(struct inode *inode,
return count;
}
-static int __f2fs_cluster_blocks(struct inode *inode,
- unsigned int cluster_idx, bool compr_blks)
+static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx,
+ enum cluster_check_type type)
{
struct dnode_of_data dn;
unsigned int start_idx = cluster_idx <<
@@ -978,10 +980,12 @@ static int __f2fs_cluster_blocks(struct inode *inode,
}
if (dn.data_blkaddr == COMPRESS_ADDR) {
- if (compr_blks)
- ret = __f2fs_get_cluster_blocks(inode, &dn);
- else
+ if (type == CLUSTER_COMPR_BLKS)
+ ret = 1 + __f2fs_get_cluster_blocks(inode, &dn);
+ else if (type == CLUSTER_IS_COMPR)
ret = 1;
+ } else if (type == CLUSTER_RAW_BLKS) {
+ ret = __f2fs_get_cluster_blocks(inode, &dn);
}
fail:
f2fs_put_dnode(&dn);
@@ -991,7 +995,16 @@ fail:
/* return # of compressed blocks in compressed cluster */
static int f2fs_compressed_blocks(struct compress_ctx *cc)
{
- return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
+ return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx,
+ CLUSTER_COMPR_BLKS);
+}
+
+/* return # of raw blocks in non-compressed cluster */
+static int f2fs_decompressed_blocks(struct inode *inode,
+ unsigned int cluster_idx)
+{
+ return __f2fs_cluster_blocks(inode, cluster_idx,
+ CLUSTER_RAW_BLKS);
}
/* return whether cluster is compressed one or not */
@@ -999,7 +1012,16 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
{
return __f2fs_cluster_blocks(inode,
index >> F2FS_I(inode)->i_log_cluster_size,
- false);
+ CLUSTER_IS_COMPR);
+}
+
+/* return whether cluster contains non raw blocks or not */
+bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index)
+{
+ unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
+
+ return f2fs_decompressed_blocks(inode, cluster_idx) !=
+ F2FS_I(inode)->i_cluster_size;
}
static bool cluster_may_compress(struct compress_ctx *cc)
@@ -1093,7 +1115,7 @@ retry:
if (PageUptodate(page))
f2fs_put_page(page, 1);
else
- f2fs_compress_ctx_add_page(cc, page);
+ f2fs_compress_ctx_add_page(cc, page_folio(page));
}
if (!f2fs_cluster_is_empty(cc)) {
@@ -1123,7 +1145,7 @@ retry:
}
f2fs_wait_on_page_writeback(page, DATA, true, true);
- f2fs_compress_ctx_add_page(cc, page);
+ f2fs_compress_ctx_add_page(cc, page_folio(page));
if (!PageUptodate(page)) {
release_and_retry:
@@ -1523,7 +1545,8 @@ continue_unlock:
if (!clear_page_dirty_for_io(cc->rpages[i]))
goto continue_unlock;
- ret = f2fs_write_single_data_page(cc->rpages[i], &submitted,
+ ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]),
+ &submitted,
NULL, NULL, wbc, io_type,
compr_blocks, false);
if (ret) {
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 6457e5bca9c9..94f7b084f601 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -7,7 +7,6 @@
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
-#include <linux/buffer_head.h>
#include <linux/sched/mm.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
@@ -355,7 +354,7 @@ static void f2fs_write_end_io(struct bio *bio)
}
f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
- page->index != nid_of_node(page));
+ page_folio(page)->index != nid_of_node(page));
dec_page_count(sbi, type);
if (f2fs_in_warm_node_list(sbi, page))
@@ -704,7 +703,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
bio = __bio_alloc(fio, 1);
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
- fio->page->index, fio, GFP_NOIO);
+ page_folio(fio->page)->index, fio, GFP_NOIO);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
@@ -803,7 +802,7 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
fio->new_blkaddr));
if (f2fs_crypt_mergeable_bio(*bio,
fio->page->mapping->host,
- fio->page->index, fio) &&
+ page_folio(fio->page)->index, fio) &&
bio_add_page(*bio, page, PAGE_SIZE, 0) ==
PAGE_SIZE) {
ret = 0;
@@ -903,7 +902,7 @@ alloc_new:
if (!bio) {
bio = __bio_alloc(fio, BIO_MAX_VECS);
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
- fio->page->index, fio, GFP_NOIO);
+ page_folio(fio->page)->index, fio, GFP_NOIO);
add_bio_entry(fio->sbi, bio, page, fio->temp);
} else {
@@ -996,13 +995,13 @@ next:
(!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
fio->new_blkaddr) ||
!f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
- bio_page->index, fio)))
+ page_folio(bio_page)->index, fio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
io->bio = __bio_alloc(fio, BIO_MAX_VECS);
f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
- bio_page->index, fio, GFP_NOIO);
+ page_folio(bio_page)->index, fio, GFP_NOIO);
io->fio = *fio;
}
@@ -1087,7 +1086,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
}
/* This can handle encryption stuffs */
-static int f2fs_submit_page_read(struct inode *inode, struct page *page,
+static int f2fs_submit_page_read(struct inode *inode, struct folio *folio,
block_t blkaddr, blk_opf_t op_flags,
bool for_write)
{
@@ -1095,14 +1094,14 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
struct bio *bio;
bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
- page->index, for_write);
+ folio->index, for_write);
if (IS_ERR(bio))
return PTR_ERR(bio);
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, blkaddr);
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ if (!bio_add_folio(bio, folio, PAGE_SIZE, 0)) {
iostat_update_and_unbind_ctx(bio);
if (bio->bi_private)
mempool_free(bio->bi_private, bio_post_read_ctx_pool);
@@ -1270,7 +1269,7 @@ got_it:
return page;
}
- err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
+ err = f2fs_submit_page_read(inode, page_folio(page), dn.data_blkaddr,
op_flags, for_write);
if (err)
goto put_err;
@@ -1713,6 +1712,14 @@ skip:
dn.ofs_in_node = end_offset;
}
+ if (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
+ map->m_may_create) {
+ /* the next block to be allocated may not be contiguous. */
+ if (GET_SEGOFF_FROM_SEG0(sbi, blkaddr) % BLKS_PER_SEC(sbi) ==
+ CAP_BLKS_PER_SEC(sbi) - 1)
+ goto sync_out;
+ }
+
if (pgofs >= end)
goto sync_out;
else if (dn.ofs_in_node < end_offset)
@@ -1939,7 +1946,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
inode_lock_shared(inode);
- maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
+ maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
if (start > maxbytes) {
ret = -EFBIG;
goto out;
@@ -2064,7 +2071,7 @@ out:
static inline loff_t f2fs_readpage_limit(struct inode *inode)
{
if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
- return inode->i_sb->s_maxbytes;
+ return F2FS_BLK_TO_BYTES(max_file_blocks(inode));
return i_size_read(inode);
}
@@ -2208,19 +2215,22 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
/* get rid of pages beyond EOF */
for (i = 0; i < cc->cluster_size; i++) {
struct page *page = cc->rpages[i];
+ struct folio *folio;
if (!page)
continue;
- if ((sector_t)page->index >= last_block_in_file) {
- zero_user_segment(page, 0, PAGE_SIZE);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- } else if (!PageUptodate(page)) {
+
+ folio = page_folio(page);
+ if ((sector_t)folio->index >= last_block_in_file) {
+ folio_zero_segment(folio, 0, folio_size(folio));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ } else if (!folio_test_uptodate(folio)) {
continue;
}
- unlock_page(page);
+ folio_unlock(folio);
if (for_write)
- put_page(page);
+ folio_put(folio);
cc->rpages[i] = NULL;
cc->nr_rpages--;
}
@@ -2280,7 +2290,7 @@ skip_reading_dnode:
}
for (i = 0; i < cc->nr_cpages; i++) {
- struct page *page = dic->cpages[i];
+ struct folio *folio = page_folio(dic->cpages[i]);
block_t blkaddr;
struct bio_post_read_ctx *ctx;
@@ -2290,7 +2300,8 @@ skip_reading_dnode:
f2fs_wait_on_block_writeback(inode, blkaddr);
- if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
+ if (f2fs_load_compressed_page(sbi, folio_page(folio, 0),
+ blkaddr)) {
if (atomic_dec_and_test(&dic->remaining_pages)) {
f2fs_decompress_cluster(dic, true);
break;
@@ -2300,7 +2311,7 @@ skip_reading_dnode:
if (bio && (!page_is_mergeable(sbi, bio,
*last_block_in_bio, blkaddr) ||
- !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
+ !f2fs_crypt_mergeable_bio(bio, inode, folio->index, NULL))) {
submit_and_realloc:
f2fs_submit_read_bio(sbi, bio, DATA);
bio = NULL;
@@ -2309,7 +2320,7 @@ submit_and_realloc:
if (!bio) {
bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
f2fs_ra_op_flags(rac),
- page->index, for_write);
+ folio->index, for_write);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
f2fs_decompress_end_io(dic, ret, true);
@@ -2319,7 +2330,7 @@ submit_and_realloc:
}
}
- if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+ if (!bio_add_folio(bio, folio, blocksize, 0))
goto submit_and_realloc;
ctx = get_post_read_ctx(bio);
@@ -2430,7 +2441,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
if (ret)
goto set_error_page;
- f2fs_compress_ctx_add_page(&cc, &folio->page);
+ f2fs_compress_ctx_add_page(&cc, folio);
goto next_page;
read_single_page:
@@ -2645,21 +2656,24 @@ static inline bool need_inplace_update(struct f2fs_io_info *fio)
int f2fs_do_write_data_page(struct f2fs_io_info *fio)
{
- struct page *page = fio->page;
- struct inode *inode = page->mapping->host;
+ struct folio *folio = page_folio(fio->page);
+ struct inode *inode = folio->mapping->host;
struct dnode_of_data dn;
struct node_info ni;
bool ipu_force = false;
+ bool atomic_commit;
int err = 0;
/* Use COW inode to make dnode_of_data for atomic write */
- if (f2fs_is_atomic_file(inode))
+ atomic_commit = f2fs_is_atomic_file(inode) &&
+ page_private_atomic(folio_page(folio, 0));
+ if (atomic_commit)
set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
else
set_new_dnode(&dn, inode, NULL, NULL, 0);
if (need_inplace_update(fio) &&
- f2fs_lookup_read_extent_cache_block(inode, page->index,
+ f2fs_lookup_read_extent_cache_block(inode, folio->index,
&fio->old_blkaddr)) {
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
DATA_GENERIC_ENHANCE))
@@ -2674,7 +2688,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
return -EAGAIN;
- err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
+ err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
if (err)
goto out;
@@ -2682,8 +2696,8 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
/* This page is already truncated */
if (fio->old_blkaddr == NULL_ADDR) {
- ClearPageUptodate(page);
- clear_page_private_gcing(page);
+ folio_clear_uptodate(folio);
+ clear_page_private_gcing(folio_page(folio, 0));
goto out_writepage;
}
got_it:
@@ -2709,7 +2723,7 @@ got_it:
if (err)
goto out_writepage;
- set_page_writeback(page);
+ folio_start_writeback(folio);
f2fs_put_dnode(&dn);
if (fio->need_lock == LOCK_REQ)
f2fs_unlock_op(fio->sbi);
@@ -2717,11 +2731,11 @@ got_it:
if (err) {
if (fscrypt_inode_uses_fs_layer_crypto(inode))
fscrypt_finalize_bounce_page(&fio->encrypted_page);
- end_page_writeback(page);
+ folio_end_writeback(folio);
} else {
set_inode_flag(inode, FI_UPDATE_WRITE);
}
- trace_f2fs_do_write_data_page(page_folio(page), IPU);
+ trace_f2fs_do_write_data_page(folio, IPU);
return err;
}
@@ -2743,15 +2757,17 @@ got_it:
if (err)
goto out_writepage;
- set_page_writeback(page);
+ folio_start_writeback(folio);
if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
/* LFS mode write path */
f2fs_outplace_write_data(&dn, fio);
- trace_f2fs_do_write_data_page(page_folio(page), OPU);
+ trace_f2fs_do_write_data_page(folio, OPU);
set_inode_flag(inode, FI_APPEND_WRITE);
+ if (atomic_commit)
+ clear_page_private_atomic(folio_page(folio, 0));
out_writepage:
f2fs_put_dnode(&dn);
out:
@@ -2760,7 +2776,7 @@ out:
return err;
}
-int f2fs_write_single_data_page(struct page *page, int *submitted,
+int f2fs_write_single_data_page(struct folio *folio, int *submitted,
struct bio **bio,
sector_t *last_block,
struct writeback_control *wbc,
@@ -2768,12 +2784,13 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
int compr_blocks,
bool allow_balance)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
+ struct page *page = folio_page(folio, 0);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long)i_size)
>> PAGE_SHIFT;
- loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
+ loff_t psize = (loff_t)(folio->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
bool quota_inode = IS_NOQUOTA(inode);
@@ -2797,11 +2814,11 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
.last_block = last_block,
};
- trace_f2fs_writepage(page_folio(page), DATA);
+ trace_f2fs_writepage(folio, DATA);
/* we should bypass data pages to proceed the kworker jobs */
if (unlikely(f2fs_cp_error(sbi))) {
- mapping_set_error(page->mapping, -EIO);
+ mapping_set_error(folio->mapping, -EIO);
/*
* don't drop any dirty dentry pages for keeping lastest
* directory structure.
@@ -2819,7 +2836,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (page->index < end_index ||
+ if (folio->index < end_index ||
f2fs_verity_in_progress(inode) ||
compr_blocks)
goto write;
@@ -2829,10 +2846,10 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
* this page does not have to be written to disk.
*/
offset = i_size & (PAGE_SIZE - 1);
- if ((page->index >= end_index + 1) || !offset)
+ if ((folio->index >= end_index + 1) || !offset)
goto out;
- zero_user_segment(page, offset, PAGE_SIZE);
+ folio_zero_segment(folio, offset, folio_size(folio));
write:
/* Dentry/quota blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode) || quota_inode) {
@@ -2862,7 +2879,7 @@ write:
err = -EAGAIN;
if (f2fs_has_inline_data(inode)) {
- err = f2fs_write_inline_data(inode, page);
+ err = f2fs_write_inline_data(inode, folio);
if (!err)
goto out;
}
@@ -2892,7 +2909,7 @@ done:
out:
inode_dec_dirty_pages(inode);
if (err) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
clear_page_private_gcing(page);
}
@@ -2902,7 +2919,7 @@ out:
f2fs_remove_dirty_inode(inode);
submitted = NULL;
}
- unlock_page(page);
+ folio_unlock(folio);
if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
!F2FS_I(inode)->wb_task && allow_balance)
f2fs_balance_fs(sbi, need_balance_fs);
@@ -2920,7 +2937,7 @@ out:
return 0;
redirty_out:
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
/*
* pageout() in MM translates EAGAIN, so calls handle_write_error()
* -> mapping_set_error() -> set_bit(AS_EIO, ...).
@@ -2929,29 +2946,30 @@ redirty_out:
*/
if (!err || wbc->for_reclaim)
return AOP_WRITEPAGE_ACTIVATE;
- unlock_page(page);
+ folio_unlock(folio);
return err;
}
static int f2fs_write_data_page(struct page *page,
struct writeback_control *wbc)
{
+ struct folio *folio = page_folio(page);
#ifdef CONFIG_F2FS_FS_COMPRESSION
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
goto out;
if (f2fs_compressed_file(inode)) {
- if (f2fs_is_compressed_cluster(inode, page->index)) {
- redirty_page_for_writepage(wbc, page);
+ if (f2fs_is_compressed_cluster(inode, folio->index)) {
+ folio_redirty_for_writepage(wbc, folio);
return AOP_WRITEPAGE_ACTIVATE;
}
}
out:
#endif
- return f2fs_write_single_data_page(page, NULL, NULL, NULL,
+ return f2fs_write_single_data_page(folio, NULL, NULL, NULL,
wbc, FS_DATA_IO, 0, true);
}
@@ -3157,11 +3175,11 @@ continue_unlock:
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
folio_get(folio);
- f2fs_compress_ctx_add_page(&cc, &folio->page);
+ f2fs_compress_ctx_add_page(&cc, folio);
continue;
}
#endif
- ret = f2fs_write_single_data_page(&folio->page,
+ ret = f2fs_write_single_data_page(folio,
&submitted, &bio, &last_block,
wbc, io_type, 0, true);
if (ret == AOP_WRITEPAGE_ACTIVATE)
@@ -3369,11 +3387,11 @@ void f2fs_write_failed(struct inode *inode, loff_t to)
}
static int prepare_write_begin(struct f2fs_sb_info *sbi,
- struct page *page, loff_t pos, unsigned len,
+ struct folio *folio, loff_t pos, unsigned int len,
block_t *blk_addr, bool *node_changed)
{
- struct inode *inode = page->mapping->host;
- pgoff_t index = page->index;
+ struct inode *inode = folio->mapping->host;
+ pgoff_t index = folio->index;
struct dnode_of_data dn;
struct page *ipage;
bool locked = false;
@@ -3410,13 +3428,13 @@ restart:
if (f2fs_has_inline_data(inode)) {
if (pos + len <= MAX_INLINE_DATA(inode)) {
- f2fs_do_read_inline_data(page_folio(page), ipage);
+ f2fs_do_read_inline_data(folio, ipage);
set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
set_page_private_inline(ipage);
goto out;
}
- err = f2fs_convert_inline_page(&dn, page);
+ err = f2fs_convert_inline_page(&dn, folio_page(folio, 0));
if (err || dn.data_blkaddr != NULL_ADDR)
goto out;
}
@@ -3509,12 +3527,12 @@ unlock_out:
}
static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
- struct page *page, loff_t pos, unsigned int len,
+ struct folio *folio, loff_t pos, unsigned int len,
block_t *blk_addr, bool *node_changed, bool *use_cow)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct inode *cow_inode = F2FS_I(inode)->cow_inode;
- pgoff_t index = page->index;
+ pgoff_t index = folio->index;
int err = 0;
block_t ori_blk_addr = NULL_ADDR;
@@ -3552,12 +3570,12 @@ reserve_block:
}
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *page = NULL;
- pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
+ struct folio *folio;
+ pgoff_t index = pos >> PAGE_SHIFT;
bool need_balance = false;
bool use_cow = false;
block_t blkaddr = NULL_ADDR;
@@ -3573,7 +3591,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
/*
* We should check this at this moment to avoid deadlock on inode page
* and #0 page. The locking rule for inline_data conversion should be:
- * lock_page(page #0) -> lock_page(inode_page)
+ * folio_lock(folio #0) -> folio_lock(inode_page)
*/
if (index != 0) {
err = f2fs_convert_inline_inode(inode);
@@ -3584,18 +3602,20 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
int ret;
+ struct page *page;
*fsdata = NULL;
if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
goto repeat;
- ret = f2fs_prepare_compress_overwrite(inode, pagep,
+ ret = f2fs_prepare_compress_overwrite(inode, &page,
index, fsdata);
if (ret < 0) {
err = ret;
goto fail;
} else if (ret) {
+ *foliop = page_folio(page);
return 0;
}
}
@@ -3603,81 +3623,85 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
repeat:
/*
- * Do not use grab_cache_page_write_begin() to avoid deadlock due to
- * wait_for_stable_page. Will wait that below with our IO control.
+ * Do not use FGP_STABLE to avoid deadlock.
+ * Will wait that below with our IO control.
*/
- page = f2fs_pagecache_get_page(mapping, index,
+ folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
- if (!page) {
- err = -ENOMEM;
+ if (IS_ERR(folio)) {
+ err = PTR_ERR(folio);
goto fail;
}
/* TODO: cluster can be compressed due to race with .writepage */
- *pagep = page;
+ *foliop = folio;
if (f2fs_is_atomic_file(inode))
- err = prepare_atomic_write_begin(sbi, page, pos, len,
+ err = prepare_atomic_write_begin(sbi, folio, pos, len,
&blkaddr, &need_balance, &use_cow);
else
- err = prepare_write_begin(sbi, page, pos, len,
+ err = prepare_write_begin(sbi, folio, pos, len,
&blkaddr, &need_balance);
if (err)
- goto fail;
+ goto put_folio;
if (need_balance && !IS_NOQUOTA(inode) &&
has_not_enough_free_secs(sbi, 0, 0)) {
- unlock_page(page);
+ folio_unlock(folio);
f2fs_balance_fs(sbi, true);
- lock_page(page);
- if (page->mapping != mapping) {
- /* The page got truncated from under us */
- f2fs_put_page(page, 1);
+ folio_lock(folio);
+ if (folio->mapping != mapping) {
+ /* The folio got truncated from under us */
+ folio_unlock(folio);
+ folio_put(folio);
goto repeat;
}
}
- f2fs_wait_on_page_writeback(page, DATA, false, true);
+ f2fs_wait_on_page_writeback(&folio->page, DATA, false, true);
- if (len == PAGE_SIZE || PageUptodate(page))
+ if (len == folio_size(folio) || folio_test_uptodate(folio))
return 0;
if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
!f2fs_verity_in_progress(inode)) {
- zero_user_segment(page, len, PAGE_SIZE);
+ folio_zero_segment(folio, len, folio_size(folio));
return 0;
}
if (blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
+ folio_zero_segment(folio, 0, folio_size(folio));
+ folio_mark_uptodate(folio);
} else {
if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
DATA_GENERIC_ENHANCE_READ)) {
err = -EFSCORRUPTED;
- goto fail;
+ goto put_folio;
}
err = f2fs_submit_page_read(use_cow ?
- F2FS_I(inode)->cow_inode : inode, page,
- blkaddr, 0, true);
+ F2FS_I(inode)->cow_inode : inode,
+ folio, blkaddr, 0, true);
if (err)
- goto fail;
+ goto put_folio;
- lock_page(page);
- if (unlikely(page->mapping != mapping)) {
- f2fs_put_page(page, 1);
+ folio_lock(folio);
+ if (unlikely(folio->mapping != mapping)) {
+ folio_unlock(folio);
+ folio_put(folio);
goto repeat;
}
- if (unlikely(!PageUptodate(page))) {
+ if (unlikely(!folio_test_uptodate(folio))) {
err = -EIO;
- goto fail;
+ goto put_folio;
}
}
return 0;
+put_folio:
+ folio_unlock(folio);
+ folio_put(folio);
fail:
- f2fs_put_page(page, 1);
f2fs_write_failed(inode, pos + len);
return err;
}
@@ -3685,9 +3709,9 @@ fail:
static int f2fs_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
trace_f2fs_write_end(inode, pos, len, copied);
@@ -3696,17 +3720,17 @@ static int f2fs_write_end(struct file *file,
* should be PAGE_SIZE. Otherwise, we treat it with zero copied and
* let generic_perform_write() try to copy data again through copied=0.
*/
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
if (unlikely(copied != len))
copied = 0;
else
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
/* overwrite compressed file */
if (f2fs_compressed_file(inode) && fsdata) {
- f2fs_compress_write_end(inode, fsdata, page->index, copied);
+ f2fs_compress_write_end(inode, fsdata, folio->index, copied);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
if (pos + copied > i_size_read(inode) &&
@@ -3719,7 +3743,10 @@ static int f2fs_write_end(struct file *file,
if (!copied)
goto unlock_out;
- set_page_dirty(page);
+ folio_mark_dirty(folio);
+
+ if (f2fs_is_atomic_file(inode))
+ set_page_private_atomic(folio_page(folio, 0));
if (pos + copied > i_size_read(inode) &&
!f2fs_verity_in_progress(inode)) {
@@ -3729,7 +3756,8 @@ static int f2fs_write_end(struct file *file,
pos + copied);
}
unlock_out:
- f2fs_put_page(page, 1);
+ folio_unlock(folio);
+ folio_put(folio);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied;
}
@@ -4110,9 +4138,8 @@ const struct address_space_operations f2fs_dblock_aops = {
.swap_deactivate = f2fs_swap_deactivate,
};
-void f2fs_clear_page_cache_dirty_tag(struct page *page)
+void f2fs_clear_page_cache_dirty_tag(struct folio *folio)
{
- struct folio *folio = page_folio(page);
struct address_space *mapping = folio->mapping;
unsigned long flags;
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 8b0e1e71b667..546b8ba91261 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -275,7 +275,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
/* build nm */
si->base_mem += sizeof(struct f2fs_nm_info);
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
- si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
+ si->base_mem += F2FS_BLK_TO_BYTES(NM_I(sbi)->nat_bits_blocks);
si->base_mem += NM_I(sbi)->nat_blocks *
f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK);
si->base_mem += NM_I(sbi)->nat_blocks / 8;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index cbd7a5e96a37..1136539a57a8 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -166,7 +166,8 @@ static unsigned long dir_block_index(unsigned int level,
unsigned long bidx = 0;
for (i = 0; i < level; i++)
- bidx += dir_buckets(i, dir_level) * bucket_blocks(i);
+ bidx += mul_u32_u32(dir_buckets(i, dir_level),
+ bucket_blocks(i));
bidx += idx * bucket_blocks(level);
return bidx;
}
@@ -841,6 +842,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
struct f2fs_dentry_block *dentry_blk;
unsigned int bit_pos;
int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
+ pgoff_t index = page_folio(page)->index;
int i;
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
@@ -866,8 +868,8 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
set_page_dirty(page);
if (bit_pos == NR_DENTRY_IN_BLOCK &&
- !f2fs_truncate_hole(dir, page->index, page->index + 1)) {
- f2fs_clear_page_cache_dirty_tag(page);
+ !f2fs_truncate_hole(dir, index, index + 1)) {
+ f2fs_clear_page_cache_dirty_tag(page_folio(page));
clear_page_dirty_for_io(page);
ClearPageUptodate(page);
clear_page_private_all(page);
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index fd1fc06359ee..62ac440d9416 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -366,7 +366,7 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
static void __drop_largest_extent(struct extent_tree *et,
pgoff_t fofs, unsigned int len)
{
- if (fofs < et->largest.fofs + et->largest.len &&
+ if (fofs < (pgoff_t)et->largest.fofs + et->largest.len &&
fofs + len > et->largest.fofs) {
et->largest.len = 0;
et->largest_updated = true;
@@ -456,7 +456,7 @@ static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
if (type == EX_READ &&
et->largest.fofs <= pgofs &&
- et->largest.fofs + et->largest.len > pgofs) {
+ (pgoff_t)et->largest.fofs + et->largest.len > pgofs) {
*ei = et->largest;
ret = true;
stat_inc_largest_node_hit(sbi);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index ac19c61f0c3e..33f5449dc22d 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -11,7 +11,6 @@
#include <linux/uio.h>
#include <linux/types.h>
#include <linux/page-flags.h>
-#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/magic.h>
@@ -134,6 +133,12 @@ typedef u32 nid_t;
#define COMPRESS_EXT_NUM 16
+enum blkzone_allocation_policy {
+ BLKZONE_ALLOC_PRIOR_SEQ, /* Prioritize writing to sequential zones */
+ BLKZONE_ALLOC_ONLY_SEQ, /* Only allow writing to sequential zones */
+ BLKZONE_ALLOC_PRIOR_CONV, /* Prioritize writing to conventional zones */
+};
+
/*
* An implementation of an rwsem that is explicitly unfair to readers. This
* prevents priority inversion when a low-priority reader acquires the read lock
@@ -285,6 +290,7 @@ enum {
APPEND_INO, /* for append ino list */
UPDATE_INO, /* for update ino list */
TRANS_DIR_INO, /* for transactions dir ino list */
+ XATTR_DIR_INO, /* for xattr updated dir ino list */
FLUSH_INO, /* for multiple device flushing */
MAX_INO_ENTRY, /* max. list */
};
@@ -784,7 +790,6 @@ enum {
FI_NEED_IPU, /* used for ipu per file */
FI_ATOMIC_FILE, /* indicate atomic file */
FI_DATA_EXIST, /* indicate data exists */
- FI_INLINE_DOTS, /* indicate inline dot dentries */
FI_SKIP_WRITES, /* should skip data page writeback */
FI_OPU_WRITE, /* used for opu per file */
FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
@@ -802,6 +807,7 @@ enum {
FI_ALIGNED_WRITE, /* enable aligned write */
FI_COW_FILE, /* indicate COW file */
FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */
+ FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */
FI_ATOMIC_REPLACE, /* indicate atomic replace */
FI_OPENED_FILE, /* indicate file has been opened */
FI_MAX, /* max flag, never be used */
@@ -1155,6 +1161,7 @@ enum cp_reason_type {
CP_FASTBOOT_MODE,
CP_SPEC_LOG_NUM,
CP_RECOVER_DIR,
+ CP_XATTR_DIR,
};
enum iostat_type {
@@ -1293,6 +1300,7 @@ struct f2fs_gc_control {
bool no_bg_gc; /* check the space and stop bg_gc */
bool should_migrate_blocks; /* should migrate blocks */
bool err_gc_skipped; /* return EAGAIN if GC skipped */
+ bool one_time; /* require one time GC in one migration unit */
unsigned int nr_free_secs; /* # of free sections to do GC */
};
@@ -1418,7 +1426,8 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr);
* bit 1 PAGE_PRIVATE_ONGOING_MIGRATION
* bit 2 PAGE_PRIVATE_INLINE_INODE
* bit 3 PAGE_PRIVATE_REF_RESOURCE
- * bit 4- f2fs private data
+ * bit 4 PAGE_PRIVATE_ATOMIC_WRITE
+ * bit 5- f2fs private data
*
* Layout B: lowest bit should be 0
* page.private is a wrapped pointer.
@@ -1428,6 +1437,7 @@ enum {
PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
+ PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */
PAGE_PRIVATE_MAX
};
@@ -1559,6 +1569,8 @@ struct f2fs_sb_info {
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int blocks_per_blkz; /* F2FS blocks per zone */
unsigned int max_open_zones; /* max open zone resources of the zoned device */
+ /* For adjust the priority writing position of data in zone UFS */
+ unsigned int blkzone_alloc_policy;
#endif
/* for node-related operations */
@@ -1685,6 +1697,8 @@ struct f2fs_sb_info {
unsigned int max_victim_search;
/* migration granularity of garbage collection, unit: segment */
unsigned int migration_granularity;
+ /* migration window granularity of garbage collection, unit: segment */
+ unsigned int migration_window_granularity;
/*
* for stat information.
@@ -1994,6 +2008,16 @@ static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
return (struct f2fs_super_block *)(sbi->raw_super);
}
+static inline struct f2fs_super_block *F2FS_SUPER_BLOCK(struct folio *folio,
+ pgoff_t index)
+{
+ pgoff_t idx_in_folio = index % (1 << folio_order(folio));
+
+ return (struct f2fs_super_block *)
+ (page_address(folio_page(folio, idx_in_folio)) +
+ F2FS_SUPER_OFFSET);
+}
+
static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
{
return (struct f2fs_checkpoint *)(sbi->ckpt);
@@ -2396,14 +2420,17 @@ static inline void clear_page_private_##name(struct page *page) \
PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
static inline unsigned long get_page_private_data(struct page *page)
{
@@ -2435,6 +2462,7 @@ static inline void clear_page_private_all(struct page *page)
clear_page_private_reference(page);
clear_page_private_gcing(page);
clear_page_private_inline(page);
+ clear_page_private_atomic(page);
f2fs_bug_on(F2FS_P_SB(page), page_private(page));
}
@@ -2854,13 +2882,26 @@ static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
return false;
}
+static inline bool is_inflight_read_io(struct f2fs_sb_info *sbi)
+{
+ return get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_DIO_READ);
+}
+
static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
{
+ bool zoned_gc = (type == GC_TIME &&
+ F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_BLKZONED));
+
if (sbi->gc_mode == GC_URGENT_HIGH)
return true;
- if (is_inflight_io(sbi, type))
- return false;
+ if (zoned_gc) {
+ if (is_inflight_read_io(sbi))
+ return false;
+ } else {
+ if (is_inflight_io(sbi, type))
+ return false;
+ }
if (sbi->gc_mode == GC_URGENT_MID)
return true;
@@ -2869,6 +2910,9 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
(type == DISCARD_TIME || type == GC_TIME))
return true;
+ if (zoned_gc)
+ return true;
+
return f2fs_time_over(sbi, type);
}
@@ -2900,26 +2944,27 @@ static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
}
static inline int f2fs_has_extra_attr(struct inode *inode);
-static inline block_t data_blkaddr(struct inode *inode,
- struct page *node_page, unsigned int offset)
+static inline unsigned int get_dnode_base(struct inode *inode,
+ struct page *node_page)
{
- struct f2fs_node *raw_node;
- __le32 *addr_array;
- int base = 0;
- bool is_inode = IS_INODE(node_page);
+ if (!IS_INODE(node_page))
+ return 0;
- raw_node = F2FS_NODE(node_page);
+ return inode ? get_extra_isize(inode) :
+ offset_in_addr(&F2FS_NODE(node_page)->i);
+}
- if (is_inode) {
- if (!inode)
- /* from GC path only */
- base = offset_in_addr(&raw_node->i);
- else if (f2fs_has_extra_attr(inode))
- base = get_extra_isize(inode);
- }
+static inline __le32 *get_dnode_addr(struct inode *inode,
+ struct page *node_page)
+{
+ return blkaddr_in_node(F2FS_NODE(node_page)) +
+ get_dnode_base(inode, node_page);
+}
- addr_array = blkaddr_in_node(raw_node);
- return le32_to_cpu(addr_array[base + offset]);
+static inline block_t data_blkaddr(struct inode *inode,
+ struct page *node_page, unsigned int offset)
+{
+ return le32_to_cpu(*(get_dnode_addr(inode, node_page) + offset));
}
static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
@@ -3038,10 +3083,8 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
return;
fallthrough;
case FI_DATA_EXIST:
- case FI_INLINE_DOTS:
case FI_PIN_FILE:
case FI_COMPRESS_RELEASED:
- case FI_ATOMIC_COMMITTED:
f2fs_mark_inode_dirty_sync(inode, true);
}
}
@@ -3163,8 +3206,6 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
set_bit(FI_INLINE_DENTRY, fi->flags);
if (ri->i_inline & F2FS_DATA_EXIST)
set_bit(FI_DATA_EXIST, fi->flags);
- if (ri->i_inline & F2FS_INLINE_DOTS)
- set_bit(FI_INLINE_DOTS, fi->flags);
if (ri->i_inline & F2FS_EXTRA_ATTR)
set_bit(FI_EXTRA_ATTR, fi->flags);
if (ri->i_inline & F2FS_PIN_FILE)
@@ -3185,8 +3226,6 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
ri->i_inline |= F2FS_INLINE_DENTRY;
if (is_inode_flag_set(inode, FI_DATA_EXIST))
ri->i_inline |= F2FS_DATA_EXIST;
- if (is_inode_flag_set(inode, FI_INLINE_DOTS))
- ri->i_inline |= F2FS_INLINE_DOTS;
if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
ri->i_inline |= F2FS_EXTRA_ATTR;
if (is_inode_flag_set(inode, FI_PIN_FILE))
@@ -3267,11 +3306,6 @@ static inline int f2fs_exist_data(struct inode *inode)
return is_inode_flag_set(inode, FI_DATA_EXIST);
}
-static inline int f2fs_has_inline_dots(struct inode *inode)
-{
- return is_inode_flag_set(inode, FI_INLINE_DOTS);
-}
-
static inline int f2fs_is_mmap_file(struct inode *inode)
{
return is_inode_flag_set(inode, FI_MMAP_FILE);
@@ -3292,8 +3326,6 @@ static inline bool f2fs_is_cow_file(struct inode *inode)
return is_inode_flag_set(inode, FI_COW_FILE);
}
-static inline __le32 *get_dnode_addr(struct inode *inode,
- struct page *node_page);
static inline void *inline_data_addr(struct inode *inode, struct page *page)
{
__le32 *addr = get_dnode_addr(inode, page);
@@ -3432,17 +3464,6 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
return F2FS_I(inode)->i_inline_xattr_size;
}
-static inline __le32 *get_dnode_addr(struct inode *inode,
- struct page *node_page)
-{
- int base = 0;
-
- if (IS_INODE(node_page) && f2fs_has_extra_attr(inode))
- base = get_extra_isize(inode);
-
- return blkaddr_in_node(F2FS_NODE(node_page)) + base;
-}
-
#define f2fs_get_inode_mode(i) \
((is_inode_flag_set(i, FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
@@ -3495,7 +3516,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
- bool readonly);
+ bool readonly, bool need_lock);
int f2fs_precache_extents(struct inode *inode);
int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
int f2fs_fileattr_set(struct mnt_idmap *idmap,
@@ -3719,7 +3740,7 @@ bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
block_t blk_addr);
-void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
enum iostat_type io_type);
void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
void f2fs_outplace_write_data(struct dnode_of_data *dn,
@@ -3759,8 +3780,7 @@ void f2fs_destroy_segment_manager_caches(void);
int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint);
enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
enum page_type type, enum temp_type temp);
-unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
- unsigned int segno);
+unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi);
unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
unsigned int segno);
@@ -3868,7 +3888,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
-int f2fs_write_single_data_page(struct page *page, int *submitted,
+int f2fs_write_single_data_page(struct folio *folio, int *submitted,
struct bio **bio, sector_t *last_block,
struct writeback_control *wbc,
enum iostat_type io_type,
@@ -3877,7 +3897,7 @@ void f2fs_write_failed(struct inode *inode, loff_t to);
void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
bool f2fs_release_folio(struct folio *folio, gfp_t wait);
bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
-void f2fs_clear_page_cache_dirty_tag(struct page *page);
+void f2fs_clear_page_cache_dirty_tag(struct folio *folio);
int f2fs_init_post_read_processing(void);
void f2fs_destroy_post_read_processing(void);
int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
@@ -3901,7 +3921,7 @@ void f2fs_destroy_garbage_collection_cache(void);
/* victim selection function for cleaning and SSR */
int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
int gc_type, int type, char alloc_mode,
- unsigned long long age);
+ unsigned long long age, bool one_time);
/*
* recovery.c
@@ -3987,7 +4007,7 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
#define stat_inc_cp_call_count(sbi, foreground) \
atomic_inc(&sbi->cp_call_count[(foreground)])
-#define stat_inc_cp_count(si) (F2FS_STAT(sbi)->cp_count++)
+#define stat_inc_cp_count(sbi) (F2FS_STAT(sbi)->cp_count++)
#define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
#define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
@@ -4172,7 +4192,7 @@ int f2fs_read_inline_data(struct inode *inode, struct folio *folio);
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
int f2fs_convert_inline_inode(struct inode *inode);
int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
-int f2fs_write_inline_data(struct inode *inode, struct page *page);
+int f2fs_write_inline_data(struct inode *inode, struct folio *folio);
int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
const struct f2fs_filename *fname,
@@ -4289,6 +4309,11 @@ static inline bool f2fs_meta_inode_gc_required(struct inode *inode)
* compress.c
*/
#ifdef CONFIG_F2FS_FS_COMPRESSION
+enum cluster_check_type {
+ CLUSTER_IS_COMPR, /* check only if compressed cluster */
+ CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */
+ CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */
+};
bool f2fs_is_compressed_page(struct page *page);
struct page *f2fs_compress_control_page(struct page *page);
int f2fs_prepare_compress_overwrite(struct inode *inode,
@@ -4309,12 +4334,13 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
int index, int nr_pages, bool uptodate);
bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
-void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
+void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio);
int f2fs_write_multi_pages(struct compress_ctx *cc,
int *submitted,
struct writeback_control *wbc,
enum iostat_type io_type);
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
+bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index);
void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
pgoff_t fofs, block_t blkaddr,
unsigned int llen, unsigned int c_len);
@@ -4401,6 +4427,12 @@ static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
nid_t ino) { }
#define inc_compr_inode_stat(inode) do { } while (0)
+static inline int f2fs_is_compressed_cluster(
+ struct inode *inode,
+ pgoff_t index) { return 0; }
+static inline bool f2fs_is_sparse_cluster(
+ struct inode *inode,
+ pgoff_t index) { return true; }
static inline void f2fs_update_read_extent_tree_range_compressed(
struct inode *inode,
pgoff_t fofs, block_t blkaddr,
@@ -4653,9 +4685,11 @@ static inline void f2fs_io_schedule_timeout(long timeout)
io_schedule_timeout(timeout);
}
-static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs,
- enum page_type type)
+static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi,
+ struct folio *folio, enum page_type type)
{
+ pgoff_t ofs = folio->index;
+
if (unlikely(f2fs_cp_error(sbi)))
return;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 168f08507004..9ae54c4c72fe 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -8,7 +8,6 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
-#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/falloc.h>
@@ -54,7 +53,7 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vmf->vma->vm_file);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
@@ -86,7 +85,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
- int ret = f2fs_is_compressed_cluster(inode, page->index);
+ int ret = f2fs_is_compressed_cluster(inode, folio->index);
if (ret < 0) {
err = ret;
@@ -106,11 +105,11 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
file_update_time(vmf->vma->vm_file);
filemap_invalidate_lock_shared(inode->i_mapping);
- lock_page(page);
- if (unlikely(page->mapping != inode->i_mapping ||
- page_offset(page) > i_size_read(inode) ||
- !PageUptodate(page))) {
- unlock_page(page);
+ folio_lock(folio);
+ if (unlikely(folio->mapping != inode->i_mapping ||
+ folio_pos(folio) > i_size_read(inode) ||
+ !folio_test_uptodate(folio))) {
+ folio_unlock(folio);
err = -EFAULT;
goto out_sem;
}
@@ -118,9 +117,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
set_new_dnode(&dn, inode, NULL, NULL, 0);
if (need_alloc) {
/* block allocation */
- err = f2fs_get_block_locked(&dn, page->index);
+ err = f2fs_get_block_locked(&dn, folio->index);
} else {
- err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
+ err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
f2fs_put_dnode(&dn);
if (f2fs_is_pinned_file(inode) &&
!__is_valid_data_blkaddr(dn.data_blkaddr))
@@ -128,11 +127,11 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
}
if (err) {
- unlock_page(page);
+ folio_unlock(folio);
goto out_sem;
}
- f2fs_wait_on_page_writeback(page, DATA, false, true);
+ f2fs_wait_on_page_writeback(folio_page(folio, 0), DATA, false, true);
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
@@ -140,18 +139,18 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
/*
* check to see if the page is mapped already (no holes)
*/
- if (PageMappedToDisk(page))
+ if (folio_test_mappedtodisk(folio))
goto out_sem;
/* page is wholly or partially inside EOF */
- if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
+ if (((loff_t)(folio->index + 1) << PAGE_SHIFT) >
i_size_read(inode)) {
loff_t offset;
offset = i_size_read(inode) & ~PAGE_MASK;
- zero_user_segment(page, offset, PAGE_SIZE);
+ folio_zero_segment(folio, offset, folio_size(folio));
}
- set_page_dirty(page);
+ folio_mark_dirty(folio);
f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
f2fs_update_time(sbi, REQ_TIME);
@@ -163,7 +162,7 @@ out_sem:
out:
ret = vmf_fs_error(err);
- trace_f2fs_vm_page_mkwrite(inode, page->index, vmf->vma->vm_flags, ret);
+ trace_f2fs_vm_page_mkwrite(inode, folio->index, vmf->vma->vm_flags, ret);
return ret;
}
@@ -218,6 +217,9 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
TRANS_DIR_INO))
cp_reason = CP_RECOVER_DIR;
+ else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
+ XATTR_DIR_INO))
+ cp_reason = CP_XATTR_DIR;
return cp_reason;
}
@@ -373,8 +375,7 @@ sync_nodes:
f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
clear_inode_flag(inode, FI_APPEND_WRITE);
flush_out:
- if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
- (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
+ if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
ret = f2fs_issue_flush(sbi, inode->i_ino);
if (!ret) {
f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
@@ -431,7 +432,7 @@ static bool __found_offset(struct address_space *mapping,
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
- loff_t maxbytes = inode->i_sb->s_maxbytes;
+ loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
struct dnode_of_data dn;
pgoff_t pgofs, end_offset;
loff_t data_ofs = offset;
@@ -513,10 +514,7 @@ fail:
static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
- loff_t maxbytes = inode->i_sb->s_maxbytes;
-
- if (f2fs_compressed_file(inode))
- maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
+ loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
switch (whence) {
case SEEK_SET:
@@ -1052,6 +1050,13 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
return err;
}
+ /*
+ * wait for inflight dio, blocks should be removed after
+ * IO completion.
+ */
+ if (attr->ia_size < old_size)
+ inode_dio_wait(inode);
+
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
@@ -1888,6 +1893,12 @@ static long f2fs_fallocate(struct file *file, int mode,
if (ret)
goto out;
+ /*
+ * wait for inflight dio, blocks should be removed after IO
+ * completion.
+ */
+ inode_dio_wait(inode);
+
if (mode & FALLOC_FL_PUNCH_HOLE) {
if (offset >= inode->i_size)
goto out;
@@ -2116,10 +2127,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
struct mnt_idmap *idmap = file_mnt_idmap(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inode *pinode;
loff_t isize;
int ret;
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
@@ -2149,6 +2162,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
goto out;
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&fi->i_gc_rwsem[READ]);
/*
* Should wait end_io to count F2FS_WB_CP_DATA correctly by
@@ -2158,27 +2172,18 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
- if (ret) {
- f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- goto out;
- }
+ if (ret)
+ goto out_unlock;
/* Check if the inode already has a COW inode */
if (fi->cow_inode == NULL) {
/* Create a COW inode for atomic write */
- pinode = f2fs_iget(inode->i_sb, fi->i_pino);
- if (IS_ERR(pinode)) {
- f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- ret = PTR_ERR(pinode);
- goto out;
- }
+ struct dentry *dentry = file_dentry(filp);
+ struct inode *dir = d_inode(dentry->d_parent);
- ret = f2fs_get_tmpfile(idmap, pinode, &fi->cow_inode);
- iput(pinode);
- if (ret) {
- f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- goto out;
- }
+ ret = f2fs_get_tmpfile(idmap, dir, &fi->cow_inode);
+ if (ret)
+ goto out_unlock;
set_inode_flag(fi->cow_inode, FI_COW_FILE);
clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
@@ -2187,11 +2192,13 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
F2FS_I(fi->cow_inode)->atomic_inode = inode;
} else {
/* Reuse the already created COW inode */
+ f2fs_bug_on(sbi, get_dirty_pages(fi->cow_inode));
+
+ invalidate_mapping_pages(fi->cow_inode->i_mapping, 0, -1);
+
ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
- if (ret) {
- f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- goto out;
- }
+ if (ret)
+ goto out_unlock;
}
f2fs_write_inode(inode, NULL);
@@ -2210,7 +2217,11 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
}
f2fs_i_size_write(fi->cow_inode, isize);
+out_unlock:
+ f2fs_up_write(&fi->i_gc_rwsem[READ]);
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+ if (ret)
+ goto out;
f2fs_update_time(sbi, REQ_TIME);
fi->atomic_write_task = current;
@@ -2228,6 +2239,9 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
struct mnt_idmap *idmap = file_mnt_idmap(filp);
int ret;
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
@@ -2260,6 +2274,9 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp)
struct mnt_idmap *idmap = file_mnt_idmap(filp);
int ret;
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
@@ -2279,7 +2296,7 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp)
}
int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
- bool readonly)
+ bool readonly, bool need_lock)
{
struct super_block *sb = sbi->sb;
int ret = 0;
@@ -2326,12 +2343,19 @@ int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
if (readonly)
goto out;
+ /* grab sb->s_umount to avoid racing w/ remount() */
+ if (need_lock)
+ down_read(&sbi->sb->s_umount);
+
f2fs_stop_gc_thread(sbi);
f2fs_stop_discard_thread(sbi);
f2fs_drop_discard_cmd(sbi);
clear_opt(sbi, DISCARD);
+ if (need_lock)
+ up_read(&sbi->sb->s_umount);
+
f2fs_update_time(sbi, REQ_TIME);
out:
@@ -2368,7 +2392,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
}
}
- ret = f2fs_do_shutdown(sbi, in, readonly);
+ ret = f2fs_do_shutdown(sbi, in, readonly, true);
if (need_drop)
mnt_drop_write_file(filp);
@@ -2686,7 +2710,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
(range->start + range->len) >> PAGE_SHIFT,
DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) ||
+ f2fs_is_atomic_file(inode)) {
err = -EINVAL;
goto unlock_out;
}
@@ -2710,7 +2735,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
* block addresses are continuous.
*/
if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
- if (ei.fofs + ei.len >= pg_end)
+ if ((pgoff_t)ei.fofs + ei.len >= pg_end)
goto out;
}
@@ -2793,6 +2818,8 @@ do_map:
goto clear_out;
}
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
+
set_page_dirty(page);
set_page_private_gcing(page);
f2fs_put_page(page, 1);
@@ -2917,6 +2944,11 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
goto out_unlock;
}
+ if (f2fs_is_atomic_file(src) || f2fs_is_atomic_file(dst)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
ret = -EINVAL;
if (pos_in + len > src->i_size || pos_in + len < pos_in)
goto out_unlock;
@@ -2968,9 +3000,9 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
}
f2fs_lock_op(sbi);
- ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
- pos_out >> F2FS_BLKSIZE_BITS,
- len >> F2FS_BLKSIZE_BITS, false);
+ ret = __exchange_data_block(src, dst, F2FS_BYTES_TO_BLK(pos_in),
+ F2FS_BYTES_TO_BLK(pos_out),
+ F2FS_BYTES_TO_BLK(len), false);
if (!ret) {
if (dst_max_i_size)
@@ -3014,10 +3046,10 @@ static int __f2fs_ioc_move_range(struct file *filp,
return -EBADF;
dst = fdget(range->dst_fd);
- if (!dst.file)
+ if (!fd_file(dst))
return -EBADF;
- if (!(dst.file->f_mode & FMODE_WRITE)) {
+ if (!(fd_file(dst)->f_mode & FMODE_WRITE)) {
err = -EBADF;
goto err_out;
}
@@ -3026,7 +3058,7 @@ static int __f2fs_ioc_move_range(struct file *filp,
if (err)
goto err_out;
- err = f2fs_move_file_range(filp, range->pos_in, dst.file,
+ err = f2fs_move_file_range(filp, range->pos_in, fd_file(dst),
range->pos_out, range->len);
mnt_drop_write_file(filp);
@@ -3300,6 +3332,11 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
inode_lock(inode);
+ if (f2fs_is_atomic_file(inode)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (!pin) {
clear_inode_flag(inode, FI_PIN_FILE);
f2fs_i_gc_failures_write(inode, 0);
@@ -4193,6 +4230,8 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
/* It will never fail, when page has pinned above */
f2fs_bug_on(F2FS_I_SB(inode), !page);
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
+
set_page_dirty(page);
set_page_private_gcing(page);
f2fs_put_page(page, 1);
@@ -4207,9 +4246,8 @@ static int f2fs_ioc_decompress_file(struct file *filp)
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
- pgoff_t page_idx = 0, last_idx;
- int cluster_size = fi->i_cluster_size;
- int count, ret;
+ pgoff_t page_idx = 0, last_idx, cluster_idx;
+ int ret;
if (!f2fs_sb_has_compression(sbi) ||
F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
@@ -4244,10 +4282,15 @@ static int f2fs_ioc_decompress_file(struct file *filp)
goto out;
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ last_idx >>= fi->i_log_cluster_size;
- count = last_idx - page_idx;
- while (count && count >= cluster_size) {
- ret = redirty_blocks(inode, page_idx, cluster_size);
+ for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
+ page_idx = cluster_idx << fi->i_log_cluster_size;
+
+ if (!f2fs_is_compressed_cluster(inode, page_idx))
+ continue;
+
+ ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
if (ret < 0)
break;
@@ -4257,9 +4300,6 @@ static int f2fs_ioc_decompress_file(struct file *filp)
break;
}
- count -= cluster_size;
- page_idx += cluster_size;
-
cond_resched();
if (fatal_signal_pending(current)) {
ret = -EINTR;
@@ -4286,9 +4326,9 @@ static int f2fs_ioc_compress_file(struct file *filp)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- pgoff_t page_idx = 0, last_idx;
- int cluster_size = F2FS_I(inode)->i_cluster_size;
- int count, ret;
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ pgoff_t page_idx = 0, last_idx, cluster_idx;
+ int ret;
if (!f2fs_sb_has_compression(sbi) ||
F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
@@ -4322,10 +4362,15 @@ static int f2fs_ioc_compress_file(struct file *filp)
set_inode_flag(inode, FI_ENABLE_COMPRESS);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ last_idx >>= fi->i_log_cluster_size;
+
+ for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
+ page_idx = cluster_idx << fi->i_log_cluster_size;
+
+ if (f2fs_is_sparse_cluster(inode, page_idx))
+ continue;
- count = last_idx - page_idx;
- while (count && count >= cluster_size) {
- ret = redirty_blocks(inode, page_idx, cluster_size);
+ ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
if (ret < 0)
break;
@@ -4335,9 +4380,6 @@ static int f2fs_ioc_compress_file(struct file *filp)
break;
}
- count -= cluster_size;
- page_idx += cluster_size;
-
cond_resched();
if (fatal_signal_pending(current)) {
ret = -EINTR;
@@ -4538,6 +4580,13 @@ static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
f2fs_down_read(&fi->i_gc_rwsem[READ]);
}
+ /* dio is not compatible w/ atomic file */
+ if (f2fs_is_atomic_file(inode)) {
+ f2fs_up_read(&fi->i_gc_rwsem[READ]);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
/*
* We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
* the higher-level function iomap_dio_rw() in order to ensure that the
@@ -4597,6 +4646,10 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
iov_iter_count(to), READ);
+ /* In LFS mode, if there is inflight dio, wait for its completion */
+ if (f2fs_lfs_mode(F2FS_I_SB(inode)))
+ inode_dio_wait(inode);
+
if (f2fs_should_use_dio(inode, iocb, to)) {
ret = f2fs_dio_read_iter(iocb, to);
} else {
@@ -4949,6 +5002,12 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
/* Determine whether we will do a direct write or a buffered write. */
dio = f2fs_should_use_dio(inode, iocb, from);
+ /* dio is not compatible w/ atomic write */
+ if (dio && f2fs_is_atomic_file(inode)) {
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
/* Possibly preallocate the blocks for the write. */
target_size = iocb->ki_pos + iov_iter_count(from);
preallocated = f2fs_preallocate_blocks(iocb, from, dio);
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 724bbcb447d3..9322a7200e31 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -81,6 +81,8 @@ static int gc_thread_func(void *data)
continue;
}
+ gc_control.one_time = false;
+
/*
* [GC triggering condition]
* 0. GC is not conducted currently.
@@ -116,15 +118,30 @@ static int gc_thread_func(void *data)
goto next;
}
- if (has_enough_invalid_blocks(sbi))
+ if (f2fs_sb_has_blkzoned(sbi)) {
+ if (has_enough_free_blocks(sbi,
+ gc_th->no_zoned_gc_percent)) {
+ wait_ms = gc_th->no_gc_sleep_time;
+ f2fs_up_write(&sbi->gc_lock);
+ goto next;
+ }
+ if (wait_ms == gc_th->no_gc_sleep_time)
+ wait_ms = gc_th->max_sleep_time;
+ }
+
+ if (need_to_boost_gc(sbi)) {
decrease_sleep_time(gc_th, &wait_ms);
- else
+ if (f2fs_sb_has_blkzoned(sbi))
+ gc_control.one_time = true;
+ } else {
increase_sleep_time(gc_th, &wait_ms);
+ }
do_gc:
stat_inc_gc_call_count(sbi, foreground ?
FOREGROUND : BACKGROUND);
- sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
+ sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
+ gc_control.one_time;
/* foreground GC was been triggered via f2fs_balance_fs() */
if (foreground)
@@ -179,9 +196,21 @@ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
return -ENOMEM;
gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
- gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
- gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
- gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
+ gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO;
+
+ if (f2fs_sb_has_blkzoned(sbi)) {
+ gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED;
+ gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED;
+ gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED;
+ gc_th->no_zoned_gc_percent = LIMIT_NO_ZONED_GC;
+ gc_th->boost_zoned_gc_percent = LIMIT_BOOST_ZONED_GC;
+ } else {
+ gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
+ gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
+ gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
+ gc_th->no_zoned_gc_percent = 0;
+ gc_th->boost_zoned_gc_percent = 0;
+ }
gc_th->gc_wake = false;
@@ -339,7 +368,7 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
unsigned char age = 0;
unsigned char u;
unsigned int i;
- unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
+ unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
for (i = 0; i < usable_segs_per_sec; i++)
mtime += get_seg_entry(sbi, start + i)->mtime;
@@ -368,6 +397,11 @@ static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
if (p->alloc_mode == SSR)
return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+ if (p->one_time_gc && (get_valid_blocks(sbi, segno, true) >=
+ CAP_BLKS_PER_SEC(sbi) * sbi->gc_thread->valid_thresh_ratio /
+ 100))
+ return UINT_MAX;
+
/* alloc_mode == LFS */
if (p->gc_mode == GC_GREEDY)
return get_valid_blocks(sbi, segno, true);
@@ -742,7 +776,7 @@ static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
*/
int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
int gc_type, int type, char alloc_mode,
- unsigned long long age)
+ unsigned long long age, bool one_time)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct sit_info *sm = SIT_I(sbi);
@@ -759,6 +793,7 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
p.alloc_mode = alloc_mode;
p.age = age;
p.age_threshold = sbi->am.age_threshold;
+ p.one_time_gc = one_time;
retry:
select_policy(sbi, gc_type, type, &p);
@@ -1670,13 +1705,14 @@ next_step:
}
static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
- int gc_type)
+ int gc_type, bool one_time)
{
struct sit_info *sit_i = SIT_I(sbi);
int ret;
down_write(&sit_i->sentry_lock);
- ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS, 0);
+ ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE,
+ LFS, 0, one_time);
up_write(&sit_i->sentry_lock);
return ret;
}
@@ -1684,30 +1720,49 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
static int do_garbage_collect(struct f2fs_sb_info *sbi,
unsigned int start_segno,
struct gc_inode_list *gc_list, int gc_type,
- bool force_migrate)
+ bool force_migrate, bool one_time)
{
struct page *sum_page;
struct f2fs_summary_block *sum;
struct blk_plug plug;
unsigned int segno = start_segno;
unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
+ unsigned int sec_end_segno;
int seg_freed = 0, migrated = 0;
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE;
unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
int submitted = 0;
- if (__is_large_section(sbi))
- end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
+ if (__is_large_section(sbi)) {
+ sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
- /*
- * zone-capacity can be less than zone-size in zoned devices,
- * resulting in less than expected usable segments in the zone,
- * calculate the end segno in the zone which can be garbage collected
- */
- if (f2fs_sb_has_blkzoned(sbi))
- end_segno -= SEGS_PER_SEC(sbi) -
- f2fs_usable_segs_in_sec(sbi, segno);
+ /*
+ * zone-capacity can be less than zone-size in zoned devices,
+ * resulting in less than expected usable segments in the zone,
+ * calculate the end segno in the zone which can be garbage
+ * collected
+ */
+ if (f2fs_sb_has_blkzoned(sbi))
+ sec_end_segno -= SEGS_PER_SEC(sbi) -
+ f2fs_usable_segs_in_sec(sbi);
+
+ if (gc_type == BG_GC || one_time) {
+ unsigned int window_granularity =
+ sbi->migration_window_granularity;
+
+ if (f2fs_sb_has_blkzoned(sbi) &&
+ !has_enough_free_blocks(sbi,
+ sbi->gc_thread->boost_zoned_gc_percent))
+ window_granularity *=
+ BOOST_GC_MULTIPLE;
+
+ end_segno = start_segno + window_granularity;
+ }
+
+ if (end_segno > sec_end_segno)
+ end_segno = sec_end_segno;
+ }
sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
@@ -1786,7 +1841,8 @@ freed:
if (__is_large_section(sbi))
sbi->next_victim_seg[gc_type] =
- (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
+ (segno + 1 < sec_end_segno) ?
+ segno + 1 : NULL_SEGNO;
skip:
f2fs_put_page(sum_page, 0);
}
@@ -1863,7 +1919,7 @@ gc_more:
goto stop;
}
retry:
- ret = __get_victim(sbi, &segno, gc_type);
+ ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time);
if (ret) {
/* allow to search victim from sections has pinned data */
if (ret == -ENODATA && gc_type == FG_GC &&
@@ -1875,17 +1931,21 @@ retry:
}
seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
- gc_control->should_migrate_blocks);
+ gc_control->should_migrate_blocks,
+ gc_control->one_time);
if (seg_freed < 0)
goto stop;
total_freed += seg_freed;
- if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) {
+ if (seg_freed == f2fs_usable_segs_in_sec(sbi)) {
sec_freed++;
total_sec_freed++;
}
+ if (gc_control->one_time)
+ goto stop;
+
if (gc_type == FG_GC) {
sbi->cur_victim_sec = NULL_SEGNO;
@@ -2010,8 +2070,7 @@ int f2fs_gc_range(struct f2fs_sb_info *sbi,
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
- do_garbage_collect(sbi, segno, &gc_list, FG_GC,
- dry_run_sections == 0);
+ do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
put_gc_inode(&gc_list);
if (!dry_run && get_valid_blocks(sbi, segno, true))
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index a8ea3301b815..2914b678bf8f 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -15,16 +15,27 @@
#define DEF_GC_THREAD_MAX_SLEEP_TIME 60000
#define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
+/* GC sleep parameters for zoned deivces */
+#define DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED 10
+#define DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED 20
+#define DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED 60000
+
/* choose candidates from sections which has age of more than 7 days */
#define DEF_GC_THREAD_AGE_THRESHOLD (60 * 60 * 24 * 7)
#define DEF_GC_THREAD_CANDIDATE_RATIO 20 /* select 20% oldest sections as candidates */
#define DEF_GC_THREAD_MAX_CANDIDATE_COUNT 10 /* select at most 10 sections as candidates */
#define DEF_GC_THREAD_AGE_WEIGHT 60 /* age weight */
+#define DEF_GC_THREAD_VALID_THRESH_RATIO 95 /* do not GC over 95% valid block ratio for one time GC */
#define DEFAULT_ACCURACY_CLASS 10000 /* accuracy class */
#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
+#define LIMIT_NO_ZONED_GC 60 /* percentage over total user space of no gc for zoned devices */
+#define LIMIT_BOOST_ZONED_GC 25 /* percentage over total user space of boosted gc for zoned devices */
+#define DEF_MIGRATION_WINDOW_GRANULARITY_ZONED 3
+#define BOOST_GC_MULTIPLE 5
+
#define DEF_GC_FAILED_PINNED_FILES 2048
#define MAX_GC_FAILED_PINNED_FILES USHRT_MAX
@@ -51,6 +62,11 @@ struct f2fs_gc_kthread {
* caller of f2fs_balance_fs()
* will wait on this wait queue.
*/
+
+ /* for gc control for zoned devices */
+ unsigned int no_zoned_gc_percent;
+ unsigned int boost_zoned_gc_percent;
+ unsigned int valid_thresh_ratio;
};
struct gc_inode_list {
@@ -152,6 +168,12 @@ static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th,
*wait -= min_time;
}
+static inline bool has_enough_free_blocks(struct f2fs_sb_info *sbi,
+ unsigned int limit_perc)
+{
+ return free_sections(sbi) > ((sbi->total_sections * limit_perc) / 100);
+}
+
static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
{
block_t user_block_count = sbi->user_block_count;
@@ -167,3 +189,10 @@ static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
free_user_blocks(sbi) <
limit_free_user_blocks(invalid_user_blocks));
}
+
+static inline bool need_to_boost_gc(struct f2fs_sb_info *sbi)
+{
+ if (f2fs_sb_has_blkzoned(sbi))
+ return !has_enough_free_blocks(sbi, LIMIT_BOOST_ZONED_GC);
+ return has_enough_invalid_blocks(sbi);
+}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index cca7d448e55c..005babf1bed1 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -260,35 +260,34 @@ out:
return err;
}
-int f2fs_write_inline_data(struct inode *inode, struct page *page)
+int f2fs_write_inline_data(struct inode *inode, struct folio *folio)
{
- struct dnode_of_data dn;
- int err;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct page *ipage;
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
- if (err)
- return err;
+ ipage = f2fs_get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
if (!f2fs_has_inline_data(inode)) {
- f2fs_put_dnode(&dn);
+ f2fs_put_page(ipage, 1);
return -EAGAIN;
}
- f2fs_bug_on(F2FS_I_SB(inode), page->index);
+ f2fs_bug_on(F2FS_I_SB(inode), folio->index);
- f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true);
- memcpy_from_page(inline_data_addr(inode, dn.inode_page),
- page, 0, MAX_INLINE_DATA(inode));
- set_page_dirty(dn.inode_page);
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ memcpy_from_folio(inline_data_addr(inode, ipage),
+ folio, 0, MAX_INLINE_DATA(inode));
+ set_page_dirty(ipage);
- f2fs_clear_page_cache_dirty_tag(page);
+ f2fs_clear_page_cache_dirty_tag(folio);
set_inode_flag(inode, FI_APPEND_WRITE);
set_inode_flag(inode, FI_DATA_EXIST);
- clear_page_private_inline(dn.inode_page);
- f2fs_put_dnode(&dn);
+ clear_page_private_inline(ipage);
+ f2fs_put_page(ipage, 1);
return 0;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index aef57172014f..1ed86df343a5 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -7,7 +7,6 @@
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
-#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/sched/mm.h>
#include <linux/lz4.h>
@@ -35,6 +34,11 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
if (f2fs_inode_dirtied(inode, sync))
return;
+ if (f2fs_is_atomic_file(inode)) {
+ set_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ return;
+ }
+
mark_inode_dirty_sync(inode);
}
@@ -175,7 +179,8 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
if (provided != calculated)
f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
- page->index, ino_of_node(page), provided, calculated);
+ page_folio(page)->index, ino_of_node(page),
+ provided, calculated);
return provided == calculated;
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 38b4750475db..57d46e1439de 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -457,62 +457,6 @@ struct dentry *f2fs_get_parent(struct dentry *child)
return d_obtain_alias(f2fs_iget(child->d_sb, ino));
}
-static int __recover_dot_dentries(struct inode *dir, nid_t pino)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct qstr dot = QSTR_INIT(".", 1);
- struct f2fs_dir_entry *de;
- struct page *page;
- int err = 0;
-
- if (f2fs_readonly(sbi->sb)) {
- f2fs_info(sbi, "skip recovering inline_dots inode (ino:%lu, pino:%u) in readonly mountpoint",
- dir->i_ino, pino);
- return 0;
- }
-
- if (!S_ISDIR(dir->i_mode)) {
- f2fs_err(sbi, "inconsistent inode status, skip recovering inline_dots inode (ino:%lu, i_mode:%u, pino:%u)",
- dir->i_ino, dir->i_mode, pino);
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- return -ENOTDIR;
- }
-
- err = f2fs_dquot_initialize(dir);
- if (err)
- return err;
-
- f2fs_balance_fs(sbi, true);
-
- f2fs_lock_op(sbi);
-
- de = f2fs_find_entry(dir, &dot, &page);
- if (de) {
- f2fs_put_page(page, 0);
- } else if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto out;
- } else {
- err = f2fs_do_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
- if (err)
- goto out;
- }
-
- de = f2fs_find_entry(dir, &dotdot_name, &page);
- if (de)
- f2fs_put_page(page, 0);
- else if (IS_ERR(page))
- err = PTR_ERR(page);
- else
- err = f2fs_do_add_link(dir, &dotdot_name, NULL, pino, S_IFDIR);
-out:
- if (!err)
- clear_inode_flag(dir, FI_INLINE_DOTS);
-
- f2fs_unlock_op(sbi);
- return err;
-}
-
static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
@@ -522,7 +466,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
struct dentry *new;
nid_t ino = -1;
int err = 0;
- unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir));
struct f2fs_filename fname;
trace_f2fs_lookup_start(dir, dentry, flags);
@@ -558,17 +501,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
- if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) {
- err = __recover_dot_dentries(dir, root_ino);
- if (err)
- goto out_iput;
- }
-
- if (f2fs_has_inline_dots(inode)) {
- err = __recover_dot_dentries(inode, dir->i_ino);
- if (err)
- goto out_iput;
- }
if (IS_ENCRYPTED(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!fscrypt_has_permitted_context(dir, inode)) {
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index b72ef96f7e33..59b13ff243fa 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -20,7 +20,7 @@
#include "iostat.h"
#include <trace/events/f2fs.h>
-#define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
+#define on_f2fs_build_free_nids(nm_i) mutex_is_locked(&(nm_i)->build_lock)
static struct kmem_cache *nat_entry_slab;
static struct kmem_cache *free_nid_slab;
@@ -123,7 +123,7 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
static void clear_node_page_dirty(struct page *page)
{
if (PageDirty(page)) {
- f2fs_clear_page_cache_dirty_tag(page);
+ f2fs_clear_page_cache_dirty_tag(page_folio(page));
clear_page_dirty_for_io(page);
dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
}
@@ -919,7 +919,7 @@ static int truncate_node(struct dnode_of_data *dn)
clear_node_page_dirty(dn->node_page);
set_sbi_flag(sbi, SBI_IS_DIRTY);
- index = dn->node_page->index;
+ index = page_folio(dn->node_page)->index;
f2fs_put_page(dn->node_page, 1);
invalidate_mapping_pages(NODE_MAPPING(sbi),
@@ -1369,6 +1369,7 @@ fail:
*/
static int read_node_page(struct page *page, blk_opf_t op_flags)
{
+ struct folio *folio = page_folio(page);
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
struct node_info ni;
struct f2fs_io_info fio = {
@@ -1381,21 +1382,21 @@ static int read_node_page(struct page *page, blk_opf_t op_flags)
};
int err;
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
if (!f2fs_inode_chksum_verify(sbi, page)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
return -EFSBADCRC;
}
return LOCKED_PAGE;
}
- err = f2fs_get_node_info(sbi, page->index, &ni, false);
+ err = f2fs_get_node_info(sbi, folio->index, &ni, false);
if (err)
return err;
/* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
return -ENOENT;
}
@@ -1492,7 +1493,7 @@ out_err:
out_put_err:
/* ENOENT comes from read_node_page which is not an error. */
if (err != -ENOENT)
- f2fs_handle_page_eio(sbi, page->index, NODE);
+ f2fs_handle_page_eio(sbi, page_folio(page), NODE);
f2fs_put_page(page, 1);
return ERR_PTR(err);
}
@@ -1535,7 +1536,7 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
if (!clear_page_dirty_for_io(page))
goto page_out;
- ret = f2fs_write_inline_data(inode, page);
+ ret = f2fs_write_inline_data(inode, page_folio(page));
inode_dec_dirty_pages(inode);
f2fs_remove_dirty_inode(inode);
if (ret)
@@ -1608,6 +1609,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
enum iostat_type io_type, unsigned int *seq_id)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct folio *folio = page_folio(page);
nid_t nid;
struct node_info ni;
struct f2fs_io_info fio = {
@@ -1624,15 +1626,15 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
};
unsigned int seq;
- trace_f2fs_writepage(page_folio(page), NODE);
+ trace_f2fs_writepage(folio, NODE);
if (unlikely(f2fs_cp_error(sbi))) {
/* keep node pages in remount-ro mode */
if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
goto redirty_out;
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_NODES);
- unlock_page(page);
+ folio_unlock(folio);
return 0;
}
@@ -1646,7 +1648,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
/* get old block addr of this node page */
nid = nid_of_node(page);
- f2fs_bug_on(sbi, page->index != nid);
+ f2fs_bug_on(sbi, folio->index != nid);
if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
goto redirty_out;
@@ -1660,10 +1662,10 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
/* This page is already truncated */
if (unlikely(ni.blk_addr == NULL_ADDR)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_NODES);
f2fs_up_read(&sbi->node_write);
- unlock_page(page);
+ folio_unlock(folio);
return 0;
}
@@ -1674,7 +1676,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
goto redirty_out;
}
- if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi))
+ if (atomic && !test_opt(sbi, NOBARRIER))
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
/* should add to global list before clearing PAGECACHE status */
@@ -1684,7 +1686,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
*seq_id = seq;
}
- set_page_writeback(page);
+ folio_start_writeback(folio);
fio.old_blkaddr = ni.blk_addr;
f2fs_do_write_node_page(nid, &fio);
@@ -1697,7 +1699,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
submitted = NULL;
}
- unlock_page(page);
+ folio_unlock(folio);
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_submit_merged_write(sbi, NODE);
@@ -1711,7 +1713,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
return 0;
redirty_out:
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
return AOP_WRITEPAGE_ACTIVATE;
}
@@ -1867,7 +1869,7 @@ continue_unlock:
}
if (!ret && atomic && !marked) {
f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
- ino, last_page->index);
+ ino, page_folio(last_page)->index);
lock_page(last_page);
f2fs_wait_on_page_writeback(last_page, NODE, true, true);
set_page_dirty(last_page);
@@ -3166,7 +3168,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
nm_i->nat_bits = f2fs_kvzalloc(sbi,
- nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
+ F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL);
if (!nm_i->nat_bits)
return -ENOMEM;
@@ -3185,7 +3187,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
if (IS_ERR(page))
return PTR_ERR(page);
- memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
+ memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i),
page_address(page), F2FS_BLKSIZE);
f2fs_put_page(page, 1);
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 78c3198a6308..1766254279d2 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -199,6 +199,10 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
clear_inode_flag(inode, FI_ATOMIC_REPLACE);
clear_inode_flag(inode, FI_ATOMIC_FILE);
+ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
+ clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
stat_dec_atomic_inode(inode);
F2FS_I(inode)->atomic_write_task = NULL;
@@ -366,6 +370,10 @@ out:
} else {
sbi->committed_atomic_block += fi->atomic_write_cnt;
set_inode_flag(inode, FI_ATOMIC_COMMITTED);
+ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
+ clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
}
__complete_revoke_list(inode, &revoke_list, ret ? true : false);
@@ -1282,6 +1290,13 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
wait_list, issued);
return 0;
}
+
+ /*
+ * Issue discard for conventional zones only if the device
+ * supports discard.
+ */
+ if (!bdev_max_discard_sectors(bdev))
+ return -EOPNOTSUPP;
}
#endif
@@ -2686,22 +2701,47 @@ static int get_new_segment(struct f2fs_sb_info *sbi,
goto got_it;
}
+#ifdef CONFIG_BLK_DEV_ZONED
/*
* If we format f2fs on zoned storage, let's try to get pinned sections
* from beginning of the storage, which should be a conventional one.
*/
if (f2fs_sb_has_blkzoned(sbi)) {
- segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg);
+ /* Prioritize writing to conventional zones */
+ if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning)
+ segno = 0;
+ else
+ segno = max(first_zoned_segno(sbi), *newseg);
hint = GET_SEC_FROM_SEG(sbi, segno);
}
+#endif
find_other_zone:
secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) {
+ /* Write only to sequential zones */
+ if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) {
+ hint = GET_SEC_FROM_SEG(sbi, first_zoned_segno(sbi));
+ secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
+ } else
+ secno = find_first_zero_bit(free_i->free_secmap,
+ MAIN_SECS(sbi));
+ if (secno >= MAIN_SECS(sbi)) {
+ ret = -ENOSPC;
+ f2fs_bug_on(sbi, 1);
+ goto out_unlock;
+ }
+ }
+#endif
+
if (secno >= MAIN_SECS(sbi)) {
secno = find_first_zero_bit(free_i->free_secmap,
MAIN_SECS(sbi));
if (secno >= MAIN_SECS(sbi)) {
ret = -ENOSPC;
+ f2fs_bug_on(sbi, 1);
goto out_unlock;
}
}
@@ -2743,10 +2783,8 @@ got_it:
out_unlock:
spin_unlock(&free_i->segmap_lock);
- if (ret == -ENOSPC) {
+ if (ret == -ENOSPC)
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT);
- f2fs_bug_on(sbi, 1);
- }
return ret;
}
@@ -3052,7 +3090,8 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
sanity_check_seg_type(sbi, seg_type);
/* f2fs_need_SSR() already forces to do this */
- if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
+ if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type,
+ alloc_mode, age, false)) {
curseg->next_segno = segno;
return 1;
}
@@ -3079,7 +3118,8 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
for (; cnt-- > 0; reversed ? i-- : i++) {
if (i == seg_type)
continue;
- if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
+ if (!f2fs_get_victim(sbi, &segno, BG_GC, i,
+ alloc_mode, age, false)) {
curseg->next_segno = segno;
return 1;
}
@@ -3522,7 +3562,8 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
if (file_is_cold(inode) || f2fs_need_compress_data(inode))
return CURSEG_COLD_DATA;
- type = __get_age_segment_type(inode, fio->page->index);
+ type = __get_age_segment_type(inode,
+ page_folio(fio->page)->index);
if (type != NO_CHECK_TYPE)
return type;
@@ -3781,7 +3822,7 @@ out:
f2fs_up_read(&fio->sbi->io_order_lock);
}
-void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
enum iostat_type io_type)
{
struct f2fs_io_info fio = {
@@ -3790,20 +3831,20 @@ void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
.temp = HOT,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
- .old_blkaddr = page->index,
- .new_blkaddr = page->index,
- .page = page,
+ .old_blkaddr = folio->index,
+ .new_blkaddr = folio->index,
+ .page = folio_page(folio, 0),
.encrypted_page = NULL,
.in_list = 0,
};
- if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
+ if (unlikely(folio->index >= MAIN_BLKADDR(sbi)))
fio.op_flags &= ~REQ_META;
- set_page_writeback(page);
+ folio_start_writeback(folio);
f2fs_submit_page_write(&fio);
- stat_inc_meta_count(sbi, page->index);
+ stat_inc_meta_count(sbi, folio->index);
f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE);
}
@@ -5381,8 +5422,7 @@ unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
return BLKS_PER_SEG(sbi);
}
-unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
- unsigned int segno)
+unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi)
{
if (f2fs_sb_has_blkzoned(sbi))
return CAP_SEGS_PER_SEC(sbi);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index bfc01a521cb9..71adb4a43bec 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -188,6 +188,7 @@ struct victim_sel_policy {
unsigned int min_segno; /* segment # having min. cost */
unsigned long long age; /* mtime of GCed section*/
unsigned long long age_threshold;/* age threshold */
+ bool one_time_gc; /* one time GC */
};
struct seg_entry {
@@ -430,7 +431,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
- unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
+ unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi);
spin_lock(&free_i->segmap_lock);
clear_bit(segno, free_i->free_segmap);
@@ -464,7 +465,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
- unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
+ unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi);
spin_lock(&free_i->segmap_lock);
if (test_and_clear_bit(segno, free_i->free_segmap)) {
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 3959fd137cc9..87ab5696bd48 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -11,7 +11,6 @@
#include <linux/fs_context.h>
#include <linux/sched/mm.h>
#include <linux/statfs.h>
-#include <linux/buffer_head.h>
#include <linux/kthread.h>
#include <linux/parser.h>
#include <linux/mount.h>
@@ -707,6 +706,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
if (!strcmp(name, "on")) {
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
} else if (!strcmp(name, "off")) {
+ if (f2fs_sb_has_blkzoned(sbi)) {
+ f2fs_warn(sbi, "zoned devices need bggc");
+ kfree(name);
+ return -EINVAL;
+ }
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
} else if (!strcmp(name, "sync")) {
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
@@ -2561,7 +2565,7 @@ restore_opts:
static void f2fs_shutdown(struct super_block *sb)
{
- f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false);
+ f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false, false);
}
#ifdef CONFIG_QUOTA
@@ -2677,7 +2681,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
const struct address_space_operations *a_ops = mapping->a_ops;
int offset = off & (sb->s_blocksize - 1);
size_t towrite = len;
- struct page *page;
+ struct folio *folio;
void *fsdata = NULL;
int err = 0;
int tocopy;
@@ -2687,7 +2691,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
towrite);
retry:
err = a_ops->write_begin(NULL, mapping, off, tocopy,
- &page, &fsdata);
+ &folio, &fsdata);
if (unlikely(err)) {
if (err == -ENOMEM) {
f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
@@ -2697,10 +2701,10 @@ retry:
break;
}
- memcpy_to_page(page, offset, data, tocopy);
+ memcpy_to_folio(folio, offset_in_folio(folio, off), data, tocopy);
a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
- page, fsdata);
+ folio, fsdata);
offset = 0;
towrite -= tocopy;
off += tocopy;
@@ -3318,29 +3322,47 @@ loff_t max_file_blocks(struct inode *inode)
* fit within U32_MAX + 1 data units.
*/
- result = min(result, (((loff_t)U32_MAX + 1) * 4096) >> F2FS_BLKSIZE_BITS);
+ result = min(result, F2FS_BYTES_TO_BLK(((loff_t)U32_MAX + 1) * 4096));
return result;
}
-static int __f2fs_commit_super(struct buffer_head *bh,
- struct f2fs_super_block *super)
+static int __f2fs_commit_super(struct f2fs_sb_info *sbi, struct folio *folio,
+ pgoff_t index, bool update)
{
- lock_buffer(bh);
- if (super)
- memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
- set_buffer_dirty(bh);
- unlock_buffer(bh);
-
+ struct bio *bio;
/* it's rare case, we can do fua all the time */
- return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
+ blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA;
+ int ret;
+
+ folio_lock(folio);
+ folio_wait_writeback(folio);
+ if (update)
+ memcpy(F2FS_SUPER_BLOCK(folio, index), F2FS_RAW_SUPER(sbi),
+ sizeof(struct f2fs_super_block));
+ folio_mark_dirty(folio);
+ folio_clear_dirty_for_io(folio);
+ folio_start_writeback(folio);
+ folio_unlock(folio);
+
+ bio = bio_alloc(sbi->sb->s_bdev, 1, opf, GFP_NOFS);
+
+ /* it doesn't need to set crypto context for superblock update */
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio_index(folio));
+
+ if (!bio_add_folio(bio, folio, folio_size(folio), 0))
+ f2fs_bug_on(sbi, 1);
+
+ ret = submit_bio_wait(bio);
+ folio_end_writeback(folio);
+
+ return ret;
}
static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
- struct buffer_head *bh)
+ struct folio *folio, pgoff_t index)
{
- struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
- (bh->b_data + F2FS_SUPER_OFFSET);
+ struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
struct super_block *sb = sbi->sb;
u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
@@ -3356,9 +3378,9 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
u32 segment_count = le32_to_cpu(raw_super->segment_count);
u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
u64 main_end_blkaddr = main_blkaddr +
- (segment_count_main << log_blocks_per_seg);
+ ((u64)segment_count_main << log_blocks_per_seg);
u64 seg_end_blkaddr = segment0_blkaddr +
- (segment_count << log_blocks_per_seg);
+ ((u64)segment_count << log_blocks_per_seg);
if (segment0_blkaddr != cp_blkaddr) {
f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
@@ -3415,7 +3437,7 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
res = "internally";
} else {
- err = __f2fs_commit_super(bh, NULL);
+ err = __f2fs_commit_super(sbi, folio, index, false);
res = err ? "failed" : "done";
}
f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
@@ -3428,12 +3450,11 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
}
static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
- struct buffer_head *bh)
+ struct folio *folio, pgoff_t index)
{
block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
block_t total_sections, blocks_per_seg;
- struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
- (bh->b_data + F2FS_SUPER_OFFSET);
+ struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
size_t crc_offset = 0;
__u32 crc = 0;
@@ -3591,7 +3612,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
}
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
- if (sanity_check_area_boundary(sbi, bh))
+ if (sanity_check_area_boundary(sbi, folio, index))
return -EFSCORRUPTED;
return 0;
@@ -3786,6 +3807,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
sbi->migration_granularity = SEGS_PER_SEC(sbi);
+ sbi->migration_window_granularity = f2fs_sb_has_blkzoned(sbi) ?
+ DEF_MIGRATION_WINDOW_GRANULARITY_ZONED : SEGS_PER_SEC(sbi);
sbi->seq_file_ra_mul = MIN_RA_MUL;
sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
@@ -3938,7 +3961,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
{
struct super_block *sb = sbi->sb;
int block;
- struct buffer_head *bh;
+ struct folio *folio;
struct f2fs_super_block *super;
int err = 0;
@@ -3947,32 +3970,32 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
return -ENOMEM;
for (block = 0; block < 2; block++) {
- bh = sb_bread(sb, block);
- if (!bh) {
+ folio = read_mapping_folio(sb->s_bdev->bd_mapping, block, NULL);
+ if (IS_ERR(folio)) {
f2fs_err(sbi, "Unable to read %dth superblock",
block + 1);
- err = -EIO;
+ err = PTR_ERR(folio);
*recovery = 1;
continue;
}
/* sanity checking of raw super */
- err = sanity_check_raw_super(sbi, bh);
+ err = sanity_check_raw_super(sbi, folio, block);
if (err) {
f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
block + 1);
- brelse(bh);
+ folio_put(folio);
*recovery = 1;
continue;
}
if (!*raw_super) {
- memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
+ memcpy(super, F2FS_SUPER_BLOCK(folio, block),
sizeof(*super));
*valid_super_block = block;
*raw_super = super;
}
- brelse(bh);
+ folio_put(folio);
}
/* No valid superblock */
@@ -3986,7 +4009,8 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
{
- struct buffer_head *bh;
+ struct folio *folio;
+ pgoff_t index;
__u32 crc = 0;
int err;
@@ -4004,22 +4028,24 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
}
/* write back-up superblock first */
- bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
- if (!bh)
- return -EIO;
- err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
- brelse(bh);
+ index = sbi->valid_super_block ? 0 : 1;
+ folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ err = __f2fs_commit_super(sbi, folio, index, true);
+ folio_put(folio);
/* if we are in recovery path, skip writing valid superblock */
if (recover || err)
return err;
/* write current valid superblock */
- bh = sb_bread(sbi->sb, sbi->valid_super_block);
- if (!bh)
- return -EIO;
- err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
- brelse(bh);
+ index = sbi->valid_super_block;
+ folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ err = __f2fs_commit_super(sbi, folio, index, true);
+ folio_put(folio);
return err;
}
@@ -4173,12 +4199,14 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
}
f2fs_warn(sbi, "Remounting filesystem read-only");
+
/*
- * Make sure updated value of ->s_mount_flags will be visible before
- * ->s_flags update
+ * We have already set CP_ERROR_FLAG flag to stop all updates
+ * to filesystem, so it doesn't need to set SB_RDONLY flag here
+ * because the flag should be set covered w/ sb->s_umount semaphore
+ * via remount procedure, otherwise, it will confuse code like
+ * freeze_super() which will lead to deadlocks and other problems.
*/
- smp_wmb();
- sb->s_flags |= SB_RDONLY;
}
static void f2fs_record_error_work(struct work_struct *work)
@@ -4219,6 +4247,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
sbi->aligned_blksize = true;
#ifdef CONFIG_BLK_DEV_ZONED
sbi->max_open_zones = UINT_MAX;
+ sbi->blkzone_alloc_policy = BLKZONE_ALLOC_PRIOR_SEQ;
#endif
for (i = 0; i < max_devices; i++) {
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index fee7ee45ceaa..c56e8c873935 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -170,6 +170,12 @@ static ssize_t undiscard_blks_show(struct f2fs_attr *a,
SM_I(sbi)->dcc_info->undiscard_blks);
}
+static ssize_t atgc_enabled_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", sbi->am.atgc_enabled ? 1 : 0);
+}
+
static ssize_t gc_mode_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
@@ -182,50 +188,50 @@ static ssize_t features_show(struct f2fs_attr *a,
int len = 0;
if (f2fs_sb_has_encrypt(sbi))
- len += scnprintf(buf, PAGE_SIZE - len, "%s",
+ len += sysfs_emit_at(buf, len, "%s",
"encryption");
if (f2fs_sb_has_blkzoned(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "blkzoned");
if (f2fs_sb_has_extra_attr(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "extra_attr");
if (f2fs_sb_has_project_quota(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "projquota");
if (f2fs_sb_has_inode_chksum(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "inode_checksum");
if (f2fs_sb_has_flexible_inline_xattr(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "flexible_inline_xattr");
if (f2fs_sb_has_quota_ino(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "quota_ino");
if (f2fs_sb_has_inode_crtime(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "inode_crtime");
if (f2fs_sb_has_lost_found(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "lost_found");
if (f2fs_sb_has_verity(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "verity");
if (f2fs_sb_has_sb_chksum(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "sb_checksum");
if (f2fs_sb_has_casefold(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "casefold");
if (f2fs_sb_has_readonly(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "readonly");
if (f2fs_sb_has_compression(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "compression");
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "pin_file");
- len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
return len;
}
@@ -323,17 +329,14 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
int hot_count = sbi->raw_super->hot_ext_count;
int len = 0, i;
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "cold file extension:\n");
+ len += sysfs_emit_at(buf, len, "cold file extension:\n");
for (i = 0; i < cold_count; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n",
- extlist[i]);
+ len += sysfs_emit_at(buf, len, "%s\n", extlist[i]);
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "hot file extension:\n");
+ len += sysfs_emit_at(buf, len, "hot file extension:\n");
for (i = cold_count; i < cold_count + hot_count; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n",
- extlist[i]);
+ len += sysfs_emit_at(buf, len, "%s\n", extlist[i]);
+
return len;
}
@@ -561,6 +564,11 @@ out:
return -EINVAL;
}
+ if (!strcmp(a->attr.name, "migration_window_granularity")) {
+ if (t == 0 || t > SEGS_PER_SEC(sbi))
+ return -EINVAL;
+ }
+
if (!strcmp(a->attr.name, "gc_urgent")) {
if (t == 0) {
sbi->gc_mode = GC_NORMAL;
@@ -627,6 +635,15 @@ out:
}
#endif
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (!strcmp(a->attr.name, "blkzone_alloc_policy")) {
+ if (t < BLKZONE_ALLOC_PRIOR_SEQ || t > BLKZONE_ALLOC_PRIOR_CONV)
+ return -EINVAL;
+ sbi->blkzone_alloc_policy = t;
+ return count;
+ }
+#endif
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (!strcmp(a->attr.name, "compr_written_block") ||
!strcmp(a->attr.name, "compr_saved_block")) {
@@ -775,7 +792,8 @@ out:
if (!strcmp(a->attr.name, "ipu_policy")) {
if (t >= BIT(F2FS_IPU_MAX))
return -EINVAL;
- if (t && f2fs_lfs_mode(sbi))
+ /* allow F2FS_IPU_NOCACHE only for IPU in the pinned file */
+ if (f2fs_lfs_mode(sbi) && (t & ~BIT(F2FS_IPU_NOCACHE)))
return -EINVAL;
SM_I(sbi)->ipu_policy = (unsigned int)t;
return count;
@@ -960,6 +978,9 @@ GC_THREAD_RW_ATTR(gc_urgent_sleep_time, urgent_sleep_time);
GC_THREAD_RW_ATTR(gc_min_sleep_time, min_sleep_time);
GC_THREAD_RW_ATTR(gc_max_sleep_time, max_sleep_time);
GC_THREAD_RW_ATTR(gc_no_gc_sleep_time, no_gc_sleep_time);
+GC_THREAD_RW_ATTR(gc_no_zoned_gc_percent, no_zoned_gc_percent);
+GC_THREAD_RW_ATTR(gc_boost_zoned_gc_percent, boost_zoned_gc_percent);
+GC_THREAD_RW_ATTR(gc_valid_thresh_ratio, valid_thresh_ratio);
/* SM_INFO ATTR */
SM_INFO_RW_ATTR(reclaim_segments, rec_prefree_segments);
@@ -969,6 +990,7 @@ SM_INFO_GENERAL_RW_ATTR(min_fsync_blocks);
SM_INFO_GENERAL_RW_ATTR(min_seq_blocks);
SM_INFO_GENERAL_RW_ATTR(min_hot_blocks);
SM_INFO_GENERAL_RW_ATTR(min_ssr_sections);
+SM_INFO_GENERAL_RW_ATTR(reserved_segments);
/* DCC_INFO ATTR */
DCC_INFO_RW_ATTR(max_small_discards, max_discards);
@@ -1001,6 +1023,7 @@ F2FS_SBI_RW_ATTR(gc_pin_file_thresh, gc_pin_file_threshold);
F2FS_SBI_RW_ATTR(gc_reclaimed_segments, gc_reclaimed_segs);
F2FS_SBI_GENERAL_RW_ATTR(max_victim_search);
F2FS_SBI_GENERAL_RW_ATTR(migration_granularity);
+F2FS_SBI_GENERAL_RW_ATTR(migration_window_granularity);
F2FS_SBI_GENERAL_RW_ATTR(dir_level);
#ifdef CONFIG_F2FS_IOSTAT
F2FS_SBI_GENERAL_RW_ATTR(iostat_enable);
@@ -1033,6 +1056,7 @@ F2FS_SBI_GENERAL_RW_ATTR(warm_data_age_threshold);
F2FS_SBI_GENERAL_RW_ATTR(last_age_weight);
#ifdef CONFIG_BLK_DEV_ZONED
F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
+F2FS_SBI_GENERAL_RW_ATTR(blkzone_alloc_policy);
#endif
/* STAT_INFO ATTR */
@@ -1072,6 +1096,7 @@ F2FS_GENERAL_RO_ATTR(encoding);
F2FS_GENERAL_RO_ATTR(mounted_time_sec);
F2FS_GENERAL_RO_ATTR(main_blkaddr);
F2FS_GENERAL_RO_ATTR(pending_discard);
+F2FS_GENERAL_RO_ATTR(atgc_enabled);
F2FS_GENERAL_RO_ATTR(gc_mode);
#ifdef CONFIG_F2FS_STAT_FS
F2FS_GENERAL_RO_ATTR(moved_blocks_background);
@@ -1116,6 +1141,9 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_min_sleep_time),
ATTR_LIST(gc_max_sleep_time),
ATTR_LIST(gc_no_gc_sleep_time),
+ ATTR_LIST(gc_no_zoned_gc_percent),
+ ATTR_LIST(gc_boost_zoned_gc_percent),
+ ATTR_LIST(gc_valid_thresh_ratio),
ATTR_LIST(gc_idle),
ATTR_LIST(gc_urgent),
ATTR_LIST(reclaim_segments),
@@ -1138,8 +1166,10 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(min_seq_blocks),
ATTR_LIST(min_hot_blocks),
ATTR_LIST(min_ssr_sections),
+ ATTR_LIST(reserved_segments),
ATTR_LIST(max_victim_search),
ATTR_LIST(migration_granularity),
+ ATTR_LIST(migration_window_granularity),
ATTR_LIST(dir_level),
ATTR_LIST(ram_thresh),
ATTR_LIST(ra_nid_pages),
@@ -1187,6 +1217,7 @@ static struct attribute *f2fs_attrs[] = {
#endif
#ifdef CONFIG_BLK_DEV_ZONED
ATTR_LIST(unusable_blocks_per_sec),
+ ATTR_LIST(blkzone_alloc_policy),
#endif
#ifdef CONFIG_F2FS_FS_COMPRESSION
ATTR_LIST(compr_written_block),
@@ -1200,6 +1231,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(atgc_candidate_count),
ATTR_LIST(atgc_age_weight),
ATTR_LIST(atgc_age_threshold),
+ ATTR_LIST(atgc_enabled),
ATTR_LIST(seq_file_ra_mul),
ATTR_LIST(gc_segment_mode),
ATTR_LIST(gc_reclaimed_segments),
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index f7bb0c54502c..2287f238ae09 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -74,23 +74,23 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops;
- if (pos + count > inode->i_sb->s_maxbytes)
+ if (pos + count > F2FS_BLK_TO_BYTES(max_file_blocks(inode)))
return -EFBIG;
while (count) {
size_t n = min_t(size_t, count,
PAGE_SIZE - offset_in_page(pos));
- struct page *page;
+ struct folio *folio;
void *fsdata = NULL;
int res;
- res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata);
+ res = aops->write_begin(NULL, mapping, pos, n, &folio, &fsdata);
if (res)
return res;
- memcpy_to_page(page, offset_in_page(pos), buf, n);
+ memcpy_to_folio(folio, offset_in_folio(folio, pos), buf, n);
- res = aops->write_end(NULL, mapping, pos, n, n, page, fsdata);
+ res = aops->write_end(NULL, mapping, pos, n, n, folio, fsdata);
if (res < 0)
return res;
if (res != n)
@@ -237,7 +237,8 @@ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
pos = le64_to_cpu(dloc.pos);
/* Get the descriptor */
- if (pos + size < pos || pos + size > inode->i_sb->s_maxbytes ||
+ if (pos + size < pos ||
+ pos + size > F2FS_BLK_TO_BYTES(max_file_blocks(inode)) ||
pos < f2fs_verity_metadata_pos(inode) || size > INT_MAX) {
f2fs_warn(F2FS_I_SB(inode), "invalid verity xattr");
f2fs_handle_error(F2FS_I_SB(inode),
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index f290fe9327c4..3f3874943679 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -629,6 +629,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size,
struct page *ipage, int flags)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_xattr_entry *here, *last;
void *base_addr, *last_base_addr;
int found, newsize;
@@ -772,9 +773,18 @@ retry:
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
- if (S_ISDIR(inode->i_mode))
- set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
+ if (!S_ISDIR(inode->i_mode))
+ goto same;
+ /*
+ * In restrict mode, fsync() always try to trigger checkpoint for all
+ * metadata consistency, in other mode, it triggers checkpoint when
+ * parent's xattr metadata was updated.
+ */
+ if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
+ set_sbi_flag(sbi, SBI_NEED_CP);
+ else
+ f2fs_add_ino_entry(sbi, inode->i_ino, XATTR_DIR_INO);
same:
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
inode->i_mode = F2FS_I(inode)->i_acl_mode;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 19115fd2d2a4..75722bbd6b5f 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -221,13 +221,12 @@ static void fat_write_failed(struct address_space *mapping, loff_t to)
static int fat_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
int err;
- *pagep = NULL;
err = cont_write_begin(file, mapping, pos, len,
- pagep, fsdata, fat_get_block,
+ foliop, fsdata, fat_get_block,
&MSDOS_I(mapping->host)->mmu_private);
if (err < 0)
fat_write_failed(mapping, pos + len);
@@ -236,11 +235,11 @@ static int fat_write_begin(struct file *file, struct address_space *mapping,
static int fat_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *pagep, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
int err;
- err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
+ err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
if (err < len)
fat_write_failed(mapping, pos + len);
if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) {
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 300e5d9ad913..22dd9dcce7ec 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -33,6 +33,8 @@
#include <asm/siginfo.h>
#include <linux/uaccess.h>
+#include "internal.h"
+
#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
static int setfl(int fd, struct file * filp, unsigned int arg)
@@ -87,29 +89,65 @@ static int setfl(int fd, struct file * filp, unsigned int arg)
return error;
}
-static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
- int force)
+/*
+ * Allocate an file->f_owner struct if it doesn't exist, handling racing
+ * allocations correctly.
+ */
+int file_f_owner_allocate(struct file *file)
{
- write_lock_irq(&filp->f_owner.lock);
- if (force || !filp->f_owner.pid) {
- put_pid(filp->f_owner.pid);
- filp->f_owner.pid = get_pid(pid);
- filp->f_owner.pid_type = type;
+ struct fown_struct *f_owner;
- if (pid) {
- const struct cred *cred = current_cred();
- filp->f_owner.uid = cred->uid;
- filp->f_owner.euid = cred->euid;
- }
+ f_owner = file_f_owner(file);
+ if (f_owner)
+ return 0;
+
+ f_owner = kzalloc(sizeof(struct fown_struct), GFP_KERNEL);
+ if (!f_owner)
+ return -ENOMEM;
+
+ rwlock_init(&f_owner->lock);
+ f_owner->file = file;
+ /* If someone else raced us, drop our allocation. */
+ if (unlikely(cmpxchg(&file->f_owner, NULL, f_owner)))
+ kfree(f_owner);
+ return 0;
+}
+EXPORT_SYMBOL(file_f_owner_allocate);
+
+void file_f_owner_release(struct file *file)
+{
+ struct fown_struct *f_owner;
+
+ f_owner = file_f_owner(file);
+ if (f_owner) {
+ put_pid(f_owner->pid);
+ kfree(f_owner);
}
- write_unlock_irq(&filp->f_owner.lock);
}
void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
int force)
{
- security_file_set_fowner(filp);
- f_modown(filp, pid, type, force);
+ struct fown_struct *f_owner;
+
+ f_owner = file_f_owner(filp);
+ if (WARN_ON_ONCE(!f_owner))
+ return;
+
+ write_lock_irq(&f_owner->lock);
+ if (force || !f_owner->pid) {
+ put_pid(f_owner->pid);
+ f_owner->pid = get_pid(pid);
+ f_owner->pid_type = type;
+
+ if (pid) {
+ const struct cred *cred = current_cred();
+ security_file_set_fowner(filp);
+ f_owner->uid = cred->uid;
+ f_owner->euid = cred->euid;
+ }
+ }
+ write_unlock_irq(&f_owner->lock);
}
EXPORT_SYMBOL(__f_setown);
@@ -119,6 +157,8 @@ int f_setown(struct file *filp, int who, int force)
struct pid *pid = NULL;
int ret = 0;
+ might_sleep();
+
type = PIDTYPE_TGID;
if (who < 0) {
/* avoid overflow below */
@@ -129,6 +169,10 @@ int f_setown(struct file *filp, int who, int force)
who = -who;
}
+ ret = file_f_owner_allocate(filp);
+ if (ret)
+ return ret;
+
rcu_read_lock();
if (who) {
pid = find_vpid(who);
@@ -146,22 +190,27 @@ EXPORT_SYMBOL(f_setown);
void f_delown(struct file *filp)
{
- f_modown(filp, NULL, PIDTYPE_TGID, 1);
+ __f_setown(filp, NULL, PIDTYPE_TGID, 1);
}
pid_t f_getown(struct file *filp)
{
pid_t pid = 0;
+ struct fown_struct *f_owner;
- read_lock_irq(&filp->f_owner.lock);
+ f_owner = file_f_owner(filp);
+ if (!f_owner)
+ return pid;
+
+ read_lock_irq(&f_owner->lock);
rcu_read_lock();
- if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) {
- pid = pid_vnr(filp->f_owner.pid);
- if (filp->f_owner.pid_type == PIDTYPE_PGID)
+ if (pid_task(f_owner->pid, f_owner->pid_type)) {
+ pid = pid_vnr(f_owner->pid);
+ if (f_owner->pid_type == PIDTYPE_PGID)
pid = -pid;
}
rcu_read_unlock();
- read_unlock_irq(&filp->f_owner.lock);
+ read_unlock_irq(&f_owner->lock);
return pid;
}
@@ -194,6 +243,10 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
return -EINVAL;
}
+ ret = file_f_owner_allocate(filp);
+ if (ret)
+ return ret;
+
rcu_read_lock();
pid = find_vpid(owner.pid);
if (owner.pid && !pid)
@@ -210,13 +263,20 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
struct f_owner_ex __user *owner_p = (void __user *)arg;
struct f_owner_ex owner = {};
int ret = 0;
+ struct fown_struct *f_owner;
+ enum pid_type pid_type = PIDTYPE_PID;
- read_lock_irq(&filp->f_owner.lock);
- rcu_read_lock();
- if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type))
- owner.pid = pid_vnr(filp->f_owner.pid);
- rcu_read_unlock();
- switch (filp->f_owner.pid_type) {
+ f_owner = file_f_owner(filp);
+ if (f_owner) {
+ read_lock_irq(&f_owner->lock);
+ rcu_read_lock();
+ if (pid_task(f_owner->pid, f_owner->pid_type))
+ owner.pid = pid_vnr(f_owner->pid);
+ rcu_read_unlock();
+ pid_type = f_owner->pid_type;
+ }
+
+ switch (pid_type) {
case PIDTYPE_PID:
owner.type = F_OWNER_TID;
break;
@@ -234,7 +294,8 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
ret = -EINVAL;
break;
}
- read_unlock_irq(&filp->f_owner.lock);
+ if (f_owner)
+ read_unlock_irq(&f_owner->lock);
if (!ret) {
ret = copy_to_user(owner_p, &owner, sizeof(owner));
@@ -248,14 +309,18 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
static int f_getowner_uids(struct file *filp, unsigned long arg)
{
struct user_namespace *user_ns = current_user_ns();
+ struct fown_struct *f_owner;
uid_t __user *dst = (void __user *)arg;
- uid_t src[2];
+ uid_t src[2] = {0, 0};
int err;
- read_lock_irq(&filp->f_owner.lock);
- src[0] = from_kuid(user_ns, filp->f_owner.uid);
- src[1] = from_kuid(user_ns, filp->f_owner.euid);
- read_unlock_irq(&filp->f_owner.lock);
+ f_owner = file_f_owner(filp);
+ if (f_owner) {
+ read_lock_irq(&f_owner->lock);
+ src[0] = from_kuid(user_ns, f_owner->uid);
+ src[1] = from_kuid(user_ns, f_owner->euid);
+ read_unlock_irq(&f_owner->lock);
+ }
err = put_user(src[0], &dst[0]);
err |= put_user(src[1], &dst[1]);
@@ -340,7 +405,37 @@ static long f_dupfd_query(int fd, struct file *filp)
* overkill, but given our lockless file pointer lookup, the
* alternatives are complicated.
*/
- return f.file == filp;
+ return fd_file(f) == filp;
+}
+
+/* Let the caller figure out whether a given file was just created. */
+static long f_created_query(const struct file *filp)
+{
+ return !!(filp->f_mode & FMODE_CREATED);
+}
+
+static int f_owner_sig(struct file *filp, int signum, bool setsig)
+{
+ int ret = 0;
+ struct fown_struct *f_owner;
+
+ might_sleep();
+
+ if (setsig) {
+ if (!valid_signal(signum))
+ return -EINVAL;
+
+ ret = file_f_owner_allocate(filp);
+ if (ret)
+ return ret;
+ }
+
+ f_owner = file_f_owner(filp);
+ if (setsig)
+ f_owner->signum = signum;
+ else if (f_owner)
+ ret = f_owner->signum;
+ return ret;
}
static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
@@ -352,6 +447,9 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
long err = -EINVAL;
switch (cmd) {
+ case F_CREATED_QUERY:
+ err = f_created_query(filp);
+ break;
case F_DUPFD:
err = f_dupfd(argi, filp, 0);
break;
@@ -421,15 +519,10 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
err = f_getowner_uids(filp, arg);
break;
case F_GETSIG:
- err = filp->f_owner.signum;
+ err = f_owner_sig(filp, 0, false);
break;
case F_SETSIG:
- /* arg == 0 restores default behaviour. */
- if (!valid_signal(argi)) {
- break;
- }
- err = 0;
- filp->f_owner.signum = argi;
+ err = f_owner_sig(filp, argi, true);
break;
case F_GETLEASE:
err = fcntl_getlease(filp);
@@ -463,6 +556,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
static int check_fcntl_cmd(unsigned cmd)
{
switch (cmd) {
+ case F_CREATED_QUERY:
case F_DUPFD:
case F_DUPFD_CLOEXEC:
case F_DUPFD_QUERY:
@@ -479,17 +573,17 @@ SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
struct fd f = fdget_raw(fd);
long err = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
goto out;
- if (unlikely(f.file->f_mode & FMODE_PATH)) {
+ if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd))
goto out1;
}
- err = security_file_fcntl(f.file, cmd, arg);
+ err = security_file_fcntl(fd_file(f), cmd, arg);
if (!err)
- err = do_fcntl(fd, cmd, arg, f.file);
+ err = do_fcntl(fd, cmd, arg, fd_file(f));
out1:
fdput(f);
@@ -506,15 +600,15 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
struct flock64 flock;
long err = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
goto out;
- if (unlikely(f.file->f_mode & FMODE_PATH)) {
+ if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd))
goto out1;
}
- err = security_file_fcntl(f.file, cmd, arg);
+ err = security_file_fcntl(fd_file(f), cmd, arg);
if (err)
goto out1;
@@ -524,7 +618,7 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
err = -EFAULT;
if (copy_from_user(&flock, argp, sizeof(flock)))
break;
- err = fcntl_getlk64(f.file, cmd, &flock);
+ err = fcntl_getlk64(fd_file(f), cmd, &flock);
if (!err && copy_to_user(argp, &flock, sizeof(flock)))
err = -EFAULT;
break;
@@ -535,10 +629,10 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
err = -EFAULT;
if (copy_from_user(&flock, argp, sizeof(flock)))
break;
- err = fcntl_setlk64(fd, f.file, cmd, &flock);
+ err = fcntl_setlk64(fd, fd_file(f), cmd, &flock);
break;
default:
- err = do_fcntl(fd, cmd, arg, f.file);
+ err = do_fcntl(fd, cmd, arg, fd_file(f));
break;
}
out1:
@@ -643,15 +737,15 @@ static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
struct flock flock;
long err = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
return err;
- if (unlikely(f.file->f_mode & FMODE_PATH)) {
+ if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd))
goto out_put;
}
- err = security_file_fcntl(f.file, cmd, arg);
+ err = security_file_fcntl(fd_file(f), cmd, arg);
if (err)
goto out_put;
@@ -660,7 +754,7 @@ static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
err = get_compat_flock(&flock, compat_ptr(arg));
if (err)
break;
- err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
+ err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock);
if (err)
break;
err = fixup_compat_flock(&flock);
@@ -672,7 +766,7 @@ static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
err = get_compat_flock64(&flock, compat_ptr(arg));
if (err)
break;
- err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
+ err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock);
if (!err)
err = put_compat_flock64(&flock, compat_ptr(arg));
break;
@@ -681,7 +775,7 @@ static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
err = get_compat_flock(&flock, compat_ptr(arg));
if (err)
break;
- err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
+ err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock);
break;
case F_SETLK64:
case F_SETLKW64:
@@ -690,10 +784,10 @@ static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
err = get_compat_flock64(&flock, compat_ptr(arg));
if (err)
break;
- err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
+ err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock);
break;
default:
- err = do_fcntl(fd, cmd, arg, f.file);
+ err = do_fcntl(fd, cmd, arg, fd_file(f));
break;
}
out_put:
@@ -844,14 +938,19 @@ static void send_sigurg_to_task(struct task_struct *p,
do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type);
}
-int send_sigurg(struct fown_struct *fown)
+int send_sigurg(struct file *file)
{
+ struct fown_struct *fown;
struct task_struct *p;
enum pid_type type;
struct pid *pid;
unsigned long flags;
int ret = 0;
+ fown = file_f_owner(file);
+ if (!fown)
+ return 0;
+
read_lock_irqsave(&fown->lock, flags);
type = fown->pid_type;
@@ -1027,13 +1126,16 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
}
read_lock_irqsave(&fa->fa_lock, flags);
if (fa->fa_file) {
- fown = &fa->fa_file->f_owner;
+ fown = file_f_owner(fa->fa_file);
+ if (!fown)
+ goto next;
/* Don't send SIGURG to processes which have not set a
queued signum: SIGURG has its own default signalling
mechanism. */
if (!(sig == SIGURG && fown->signum == 0))
send_sigio(fown, fa->fa_fd, band);
}
+next:
read_unlock_irqrestore(&fa->fa_lock, flags);
fa = rcu_dereference(fa->fa_next);
}
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 6e8cea16790e..82df28d45cd7 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -16,7 +16,8 @@
static long do_sys_name_to_handle(const struct path *path,
struct file_handle __user *ufh,
- int __user *mnt_id, int fh_flags)
+ void __user *mnt_id, bool unique_mntid,
+ int fh_flags)
{
long retval;
struct file_handle f_handle;
@@ -69,9 +70,19 @@ static long do_sys_name_to_handle(const struct path *path,
} else
retval = 0;
/* copy the mount id */
- if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
- copy_to_user(ufh, handle,
- struct_size(handle, f_handle, handle_bytes)))
+ if (unique_mntid) {
+ if (put_user(real_mount(path->mnt)->mnt_id_unique,
+ (u64 __user *) mnt_id))
+ retval = -EFAULT;
+ } else {
+ if (put_user(real_mount(path->mnt)->mnt_id,
+ (int __user *) mnt_id))
+ retval = -EFAULT;
+ }
+ /* copy the handle */
+ if (retval != -EFAULT &&
+ copy_to_user(ufh, handle,
+ struct_size(handle, f_handle, handle_bytes)))
retval = -EFAULT;
kfree(handle);
return retval;
@@ -83,6 +94,7 @@ static long do_sys_name_to_handle(const struct path *path,
* @name: name that should be converted to handle.
* @handle: resulting file handle
* @mnt_id: mount id of the file system containing the file
+ * (u64 if AT_HANDLE_MNT_ID_UNIQUE, otherwise int)
* @flag: flag value to indicate whether to follow symlink or not
* and whether a decodable file handle is required.
*
@@ -92,7 +104,7 @@ static long do_sys_name_to_handle(const struct path *path,
* value required.
*/
SYSCALL_DEFINE5(name_to_handle_at, int, dfd, const char __user *, name,
- struct file_handle __user *, handle, int __user *, mnt_id,
+ struct file_handle __user *, handle, void __user *, mnt_id,
int, flag)
{
struct path path;
@@ -100,7 +112,8 @@ SYSCALL_DEFINE5(name_to_handle_at, int, dfd, const char __user *, name,
int fh_flags;
int err;
- if (flag & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH | AT_HANDLE_FID))
+ if (flag & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH | AT_HANDLE_FID |
+ AT_HANDLE_MNT_ID_UNIQUE))
return -EINVAL;
lookup_flags = (flag & AT_SYMLINK_FOLLOW) ? LOOKUP_FOLLOW : 0;
@@ -109,7 +122,9 @@ SYSCALL_DEFINE5(name_to_handle_at, int, dfd, const char __user *, name,
lookup_flags |= LOOKUP_EMPTY;
err = user_path_at(dfd, name, lookup_flags, &path);
if (!err) {
- err = do_sys_name_to_handle(&path, handle, mnt_id, fh_flags);
+ err = do_sys_name_to_handle(&path, handle, mnt_id,
+ flag & AT_HANDLE_MNT_ID_UNIQUE,
+ fh_flags);
path_put(&path);
}
return err;
@@ -125,9 +140,9 @@ static int get_path_from_fd(int fd, struct path *root)
spin_unlock(&fs->lock);
} else {
struct fd f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- *root = f.file->f_path;
+ *root = fd_file(f)->f_path;
path_get(root);
fdput(f);
}
diff --git a/fs/file.c b/fs/file.c
index 655338effe9c..5125607d040a 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -672,7 +672,7 @@ int close_fd(unsigned fd)
return filp_close(file, files);
}
-EXPORT_SYMBOL(close_fd); /* for ksys_close() */
+EXPORT_SYMBOL(close_fd);
/**
* last_fd - return last valid index into fd table
@@ -1124,7 +1124,7 @@ EXPORT_SYMBOL(task_lookup_next_fdget_rcu);
* The fput_needed flag returned by fget_light should be passed to the
* corresponding fput_light.
*/
-static unsigned long __fget_light(unsigned int fd, fmode_t mask)
+static inline struct fd __fget_light(unsigned int fd, fmode_t mask)
{
struct files_struct *files = current->files;
struct file *file;
@@ -1141,22 +1141,22 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask)
if (likely(atomic_read_acquire(&files->count) == 1)) {
file = files_lookup_fd_raw(files, fd);
if (!file || unlikely(file->f_mode & mask))
- return 0;
- return (unsigned long)file;
+ return EMPTY_FD;
+ return BORROWED_FD(file);
} else {
file = __fget_files(files, fd, mask);
if (!file)
- return 0;
- return FDPUT_FPUT | (unsigned long)file;
+ return EMPTY_FD;
+ return CLONED_FD(file);
}
}
-unsigned long __fdget(unsigned int fd)
+struct fd fdget(unsigned int fd)
{
return __fget_light(fd, FMODE_PATH);
}
-EXPORT_SYMBOL(__fdget);
+EXPORT_SYMBOL(fdget);
-unsigned long __fdget_raw(unsigned int fd)
+struct fd fdget_raw(unsigned int fd)
{
return __fget_light(fd, 0);
}
@@ -1177,16 +1177,16 @@ static inline bool file_needs_f_pos_lock(struct file *file)
(file_count(file) > 1 || file->f_op->iterate_shared);
}
-unsigned long __fdget_pos(unsigned int fd)
+struct fd fdget_pos(unsigned int fd)
{
- unsigned long v = __fdget(fd);
- struct file *file = (struct file *)(v & ~3);
+ struct fd f = fdget(fd);
+ struct file *file = fd_file(f);
if (file && file_needs_f_pos_lock(file)) {
- v |= FDPUT_POS_UNLOCK;
+ f.word |= FDPUT_POS_UNLOCK;
mutex_lock(&file->f_pos_lock);
}
- return v;
+ return f;
}
void __f_unlock_pos(struct file *f)
diff --git a/fs/file_table.c b/fs/file_table.c
index ca7843dde56d..eed5ffad9997 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -136,6 +136,7 @@ static int __init init_fs_stat_sysctls(void)
register_sysctl_init("fs", fs_stat_sysctls);
if (IS_ENABLED(CONFIG_BINFMT_MISC)) {
struct ctl_table_header *hdr;
+
hdr = register_sysctl_mount_point("fs/binfmt_misc");
kmemleak_not_leak(hdr);
}
@@ -155,8 +156,14 @@ static int init_file(struct file *f, int flags, const struct cred *cred)
return error;
}
- rwlock_init(&f->f_owner.lock);
spin_lock_init(&f->f_lock);
+ /*
+ * Note that f_pos_lock is only used for files raising
+ * FMODE_ATOMIC_POS and directories. Other files such as pipes
+ * don't need it and since f_pos_lock is in a union may reuse
+ * the space for other purposes. They are expected to initialize
+ * the respective member when opening the file.
+ */
mutex_init(&f->f_pos_lock);
f->f_flags = flags;
f->f_mode = OPEN_FMODE(flags);
@@ -383,7 +390,9 @@ EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount);
struct file *alloc_file_clone(struct file *base, int flags,
const struct file_operations *fops)
{
- struct file *f = alloc_file(&base->f_path, flags, fops);
+ struct file *f;
+
+ f = alloc_file(&base->f_path, flags, fops);
if (!IS_ERR(f)) {
path_get(&f->f_path);
f->f_mapping = base->f_mapping;
@@ -425,7 +434,7 @@ static void __fput(struct file *file)
cdev_put(inode->i_cdev);
}
fops_put(file->f_op);
- put_pid(file->f_owner.pid);
+ file_f_owner_release(file);
put_file_access(file);
dput(dentry);
if (unlikely(mode & FMODE_NEED_UNMOUNT))
@@ -512,9 +521,14 @@ EXPORT_SYMBOL(__fput_sync);
void __init files_init(void)
{
- filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
- SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN |
- SLAB_PANIC | SLAB_ACCOUNT, NULL);
+ struct kmem_cache_args args = {
+ .use_freeptr_offset = true,
+ .freeptr_offset = offsetof(struct file, f_freeptr),
+ };
+
+ filp_cachep = kmem_cache_create("filp", sizeof(struct file), &args,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC |
+ SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
percpu_counter_init(&nr_files, 0, GFP_KERNEL);
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index b865a3fa52f3..d8bec3c1bb1f 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1132,6 +1132,7 @@ out_bdi_put:
/**
* cgroup_writeback_umount - flush inode wb switches for umount
+ * @sb: target super_block
*
* This function is called when a super_block is about to be destroyed and
* flushes in-flight inode wb switches. An inode wb switch goes through
@@ -1140,8 +1141,12 @@ out_bdi_put:
* rare occurrences and synchronize_rcu() can take a while, perform
* flushing iff wb switches are in flight.
*/
-void cgroup_writeback_umount(void)
+void cgroup_writeback_umount(struct super_block *sb)
{
+
+ if (!(sb->s_bdi->capabilities & BDI_CAP_WRITEBACK))
+ return;
+
/*
* SB_ACTIVE should be reliably cleared before checking
* isw_nr_in_flight, see generic_shutdown_super().
@@ -1381,12 +1386,13 @@ static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
static void inode_sync_complete(struct inode *inode)
{
+ assert_spin_locked(&inode->i_lock);
+
inode->i_state &= ~I_SYNC;
/* If inode is clean an unused, put it into LRU now... */
inode_add_lru(inode);
- /* Waiters must see I_SYNC cleared before being woken up */
- smp_mb();
- wake_up_bit(&inode->i_state, __I_SYNC);
+ /* Called with inode->i_lock which ensures memory ordering. */
+ inode_wake_up_bit(inode, __I_SYNC);
}
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
@@ -1505,30 +1511,27 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
* Wait for writeback on an inode to complete. Called with i_lock held.
* Caller must make sure inode cannot go away when we drop i_lock.
*/
-static void __inode_wait_for_writeback(struct inode *inode)
- __releases(inode->i_lock)
- __acquires(inode->i_lock)
+void inode_wait_for_writeback(struct inode *inode)
{
- DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
- wait_queue_head_t *wqh;
+ struct wait_bit_queue_entry wqe;
+ struct wait_queue_head *wq_head;
+
+ assert_spin_locked(&inode->i_lock);
- wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
- while (inode->i_state & I_SYNC) {
+ if (!(inode->i_state & I_SYNC))
+ return;
+
+ wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC);
+ for (;;) {
+ prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
+ /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */
+ if (!(inode->i_state & I_SYNC))
+ break;
spin_unlock(&inode->i_lock);
- __wait_on_bit(wqh, &wq, bit_wait,
- TASK_UNINTERRUPTIBLE);
+ schedule();
spin_lock(&inode->i_lock);
}
-}
-
-/*
- * Wait for writeback on an inode to complete. Caller must have inode pinned.
- */
-void inode_wait_for_writeback(struct inode *inode)
-{
- spin_lock(&inode->i_lock);
- __inode_wait_for_writeback(inode);
- spin_unlock(&inode->i_lock);
+ finish_wait(wq_head, &wqe.wq_entry);
}
/*
@@ -1539,16 +1542,20 @@ void inode_wait_for_writeback(struct inode *inode)
static void inode_sleep_on_writeback(struct inode *inode)
__releases(inode->i_lock)
{
- DEFINE_WAIT(wait);
- wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
- int sleep;
+ struct wait_bit_queue_entry wqe;
+ struct wait_queue_head *wq_head;
+ bool sleep;
+
+ assert_spin_locked(&inode->i_lock);
- prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
- sleep = inode->i_state & I_SYNC;
+ wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC);
+ prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
+ /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */
+ sleep = !!(inode->i_state & I_SYNC);
spin_unlock(&inode->i_lock);
if (sleep)
schedule();
- finish_wait(wqh, &wait);
+ finish_wait(wq_head, &wqe.wq_entry);
}
/*
@@ -1752,7 +1759,7 @@ static int writeback_single_inode(struct inode *inode,
*/
if (wbc->sync_mode != WB_SYNC_ALL)
goto out;
- __inode_wait_for_writeback(inode);
+ inode_wait_for_writeback(inode);
}
WARN_ON(inode->i_state & I_SYNC);
/*
diff --git a/fs/fsopen.c b/fs/fsopen.c
index ed2dd000622e..ee92ca58429e 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -394,13 +394,13 @@ SYSCALL_DEFINE5(fsconfig,
}
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
ret = -EINVAL;
- if (f.file->f_op != &fscontext_fops)
+ if (fd_file(f)->f_op != &fscontext_fops)
goto out_f;
- fc = f.file->private_data;
+ fc = fd_file(f)->private_data;
if (fc->ops == &legacy_fs_context_ops) {
switch (cmd) {
case FSCONFIG_SET_BINARY:
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 6e0228c6d0cb..ce0ff7a9007b 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -3,6 +3,9 @@
# Makefile for the FUSE filesystem.
#
+# Needed for trace events
+ccflags-y = -I$(src)
+
obj-$(CONFIG_FUSE_FS) += fuse.o
obj-$(CONFIG_CUSE) += cuse.o
obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c
index 04cfd8fee992..8f484b105f13 100644
--- a/fs/fuse/acl.c
+++ b/fs/fuse/acl.c
@@ -12,7 +12,6 @@
#include <linux/posix_acl_xattr.h>
static struct posix_acl *__fuse_get_acl(struct fuse_conn *fc,
- struct mnt_idmap *idmap,
struct inode *inode, int type, bool rcu)
{
int size;
@@ -74,7 +73,7 @@ struct posix_acl *fuse_get_acl(struct mnt_idmap *idmap,
if (fuse_no_acl(fc, inode))
return ERR_PTR(-EOPNOTSUPP);
- return __fuse_get_acl(fc, idmap, inode, type, false);
+ return __fuse_get_acl(fc, inode, type, false);
}
struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu)
@@ -90,8 +89,7 @@ struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu)
*/
if (!fc->posix_acl)
return NULL;
-
- return __fuse_get_acl(fc, &nop_mnt_idmap, inode, type, rcu);
+ return __fuse_get_acl(fc, inode, type, rcu);
}
int fuse_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
@@ -146,8 +144,8 @@ int fuse_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
* be stripped.
*/
if (fc->posix_acl &&
- !in_group_or_capable(&nop_mnt_idmap, inode,
- i_gid_into_vfsgid(&nop_mnt_idmap, inode)))
+ !in_group_or_capable(idmap, inode,
+ i_gid_into_vfsgid(idmap, inode)))
extra_flags |= FUSE_SETXATTR_ACL_KILL_SGID;
ret = fuse_setxattr(inode, name, value, size, 0, extra_flags);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index f0c9cd1a0b39..46ed30a4e0fc 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -22,6 +22,9 @@
#include <linux/splice.h>
#include <linux/sched.h>
+#define CREATE_TRACE_POINTS
+#include "fuse_trace.h"
+
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
@@ -105,11 +108,17 @@ static void fuse_drop_waiting(struct fuse_conn *fc)
static void fuse_put_request(struct fuse_req *req);
-static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
+static struct fuse_req *fuse_get_req(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ bool for_background)
{
struct fuse_conn *fc = fm->fc;
struct fuse_req *req;
+ bool no_idmap = !fm->sb || (fm->sb->s_iflags & SB_I_NOIDMAP);
+ kuid_t fsuid;
+ kgid_t fsgid;
int err;
+
atomic_inc(&fc->num_waiting);
if (fuse_block_alloc(fc, for_background)) {
@@ -137,19 +146,32 @@ static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
goto out;
}
- req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
- req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
__set_bit(FR_WAITING, &req->flags);
if (for_background)
__set_bit(FR_BACKGROUND, &req->flags);
- if (unlikely(req->in.h.uid == ((uid_t)-1) ||
- req->in.h.gid == ((gid_t)-1))) {
+ /*
+ * Keep the old behavior when idmappings support was not
+ * declared by a FUSE server.
+ *
+ * For those FUSE servers who support idmapped mounts,
+ * we send UID/GID only along with "inode creation"
+ * fuse requests, otherwise idmap == &invalid_mnt_idmap and
+ * req->in.h.{u,g}id will be equal to FUSE_INVALID_UIDGID.
+ */
+ fsuid = no_idmap ? current_fsuid() : mapped_fsuid(idmap, fc->user_ns);
+ fsgid = no_idmap ? current_fsgid() : mapped_fsgid(idmap, fc->user_ns);
+ req->in.h.uid = from_kuid(fc->user_ns, fsuid);
+ req->in.h.gid = from_kgid(fc->user_ns, fsgid);
+
+ if (no_idmap && unlikely(req->in.h.uid == ((uid_t)-1) ||
+ req->in.h.gid == ((gid_t)-1))) {
fuse_put_request(req);
return ERR_PTR(-EOVERFLOW);
}
+
return req;
out:
@@ -194,11 +216,22 @@ unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
}
EXPORT_SYMBOL_GPL(fuse_len_args);
-u64 fuse_get_unique(struct fuse_iqueue *fiq)
+static u64 fuse_get_unique_locked(struct fuse_iqueue *fiq)
{
fiq->reqctr += FUSE_REQ_ID_STEP;
return fiq->reqctr;
}
+
+u64 fuse_get_unique(struct fuse_iqueue *fiq)
+{
+ u64 ret;
+
+ spin_lock(&fiq->lock);
+ ret = fuse_get_unique_locked(fiq);
+ spin_unlock(&fiq->lock);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(fuse_get_unique);
static unsigned int fuse_req_hash(u64 unique)
@@ -217,22 +250,70 @@ __releases(fiq->lock)
spin_unlock(&fiq->lock);
}
+static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget)
+{
+ spin_lock(&fiq->lock);
+ if (fiq->connected) {
+ fiq->forget_list_tail->next = forget;
+ fiq->forget_list_tail = forget;
+ fuse_dev_wake_and_unlock(fiq);
+ } else {
+ kfree(forget);
+ spin_unlock(&fiq->lock);
+ }
+}
+
+static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
+{
+ spin_lock(&fiq->lock);
+ if (list_empty(&req->intr_entry)) {
+ list_add_tail(&req->intr_entry, &fiq->interrupts);
+ /*
+ * Pairs with smp_mb() implied by test_and_set_bit()
+ * from fuse_request_end().
+ */
+ smp_mb();
+ if (test_bit(FR_FINISHED, &req->flags)) {
+ list_del_init(&req->intr_entry);
+ spin_unlock(&fiq->lock);
+ } else {
+ fuse_dev_wake_and_unlock(fiq);
+ }
+ } else {
+ spin_unlock(&fiq->lock);
+ }
+}
+
+static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req)
+{
+ spin_lock(&fiq->lock);
+ if (fiq->connected) {
+ if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
+ req->in.h.unique = fuse_get_unique_locked(fiq);
+ list_add_tail(&req->list, &fiq->pending);
+ fuse_dev_wake_and_unlock(fiq);
+ } else {
+ spin_unlock(&fiq->lock);
+ req->out.h.error = -ENOTCONN;
+ clear_bit(FR_PENDING, &req->flags);
+ fuse_request_end(req);
+ }
+}
+
const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
- .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
- .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
- .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
+ .send_forget = fuse_dev_queue_forget,
+ .send_interrupt = fuse_dev_queue_interrupt,
+ .send_req = fuse_dev_queue_req,
};
EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
-static void queue_request_and_unlock(struct fuse_iqueue *fiq,
- struct fuse_req *req)
-__releases(fiq->lock)
+static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req)
{
req->in.h.len = sizeof(struct fuse_in_header) +
fuse_len_args(req->args->in_numargs,
(struct fuse_arg *) req->args->in_args);
- list_add_tail(&req->list, &fiq->pending);
- fiq->ops->wake_pending_and_unlock(fiq);
+ trace_fuse_request_send(req);
+ fiq->ops->send_req(fiq, req);
}
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
@@ -243,15 +324,7 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
forget->forget_one.nodeid = nodeid;
forget->forget_one.nlookup = nlookup;
- spin_lock(&fiq->lock);
- if (fiq->connected) {
- fiq->forget_list_tail->next = forget;
- fiq->forget_list_tail = forget;
- fiq->ops->wake_forget_and_unlock(fiq);
- } else {
- kfree(forget);
- spin_unlock(&fiq->lock);
- }
+ fiq->ops->send_forget(fiq, forget);
}
static void flush_bg_queue(struct fuse_conn *fc)
@@ -265,9 +338,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
list_del(&req->list);
fc->active_background++;
- spin_lock(&fiq->lock);
- req->in.h.unique = fuse_get_unique(fiq);
- queue_request_and_unlock(fiq, req);
+ fuse_send_one(fiq, req);
}
}
@@ -288,6 +359,7 @@ void fuse_request_end(struct fuse_req *req)
if (test_and_set_bit(FR_FINISHED, &req->flags))
goto put_request;
+ trace_fuse_request_end(req);
/*
* test_and_set_bit() implies smp_mb() between bit
* changing and below FR_INTERRUPTED check. Pairs with
@@ -337,29 +409,12 @@ static int queue_interrupt(struct fuse_req *req)
{
struct fuse_iqueue *fiq = &req->fm->fc->iq;
- spin_lock(&fiq->lock);
/* Check for we've sent request to interrupt this req */
- if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
- spin_unlock(&fiq->lock);
+ if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags)))
return -EINVAL;
- }
- if (list_empty(&req->intr_entry)) {
- list_add_tail(&req->intr_entry, &fiq->interrupts);
- /*
- * Pairs with smp_mb() implied by test_and_set_bit()
- * from fuse_request_end().
- */
- smp_mb();
- if (test_bit(FR_FINISHED, &req->flags)) {
- list_del_init(&req->intr_entry);
- spin_unlock(&fiq->lock);
- return 0;
- }
- fiq->ops->wake_interrupt_and_unlock(fiq);
- } else {
- spin_unlock(&fiq->lock);
- }
+ fiq->ops->send_interrupt(fiq, req);
+
return 0;
}
@@ -414,21 +469,15 @@ static void __fuse_request_send(struct fuse_req *req)
struct fuse_iqueue *fiq = &req->fm->fc->iq;
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
- spin_lock(&fiq->lock);
- if (!fiq->connected) {
- spin_unlock(&fiq->lock);
- req->out.h.error = -ENOTCONN;
- } else {
- req->in.h.unique = fuse_get_unique(fiq);
- /* acquire extra reference, since request is still needed
- after fuse_request_end() */
- __fuse_get_request(req);
- queue_request_and_unlock(fiq, req);
- request_wait_answer(req);
- /* Pairs with smp_wmb() in fuse_request_end() */
- smp_rmb();
- }
+ /* acquire extra reference, since request is still needed after
+ fuse_request_end() */
+ __fuse_get_request(req);
+ fuse_send_one(fiq, req);
+
+ request_wait_answer(req);
+ /* Pairs with smp_wmb() in fuse_request_end() */
+ smp_rmb();
}
static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
@@ -468,8 +517,14 @@ static void fuse_force_creds(struct fuse_req *req)
{
struct fuse_conn *fc = req->fm->fc;
- req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
- req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
+ if (!req->fm->sb || req->fm->sb->s_iflags & SB_I_NOIDMAP) {
+ req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
+ req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
+ } else {
+ req->in.h.uid = FUSE_INVALID_UIDGID;
+ req->in.h.gid = FUSE_INVALID_UIDGID;
+ }
+
req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
}
@@ -484,7 +539,9 @@ static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
__set_bit(FR_ASYNC, &req->flags);
}
-ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
+ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ struct fuse_args *args)
{
struct fuse_conn *fc = fm->fc;
struct fuse_req *req;
@@ -501,7 +558,7 @@ ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
__set_bit(FR_FORCE, &req->flags);
} else {
WARN_ON(args->nocreds);
- req = fuse_get_req(fm, false);
+ req = fuse_get_req(idmap, fm, false);
if (IS_ERR(req))
return PTR_ERR(req);
}
@@ -562,7 +619,7 @@ int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
__set_bit(FR_BACKGROUND, &req->flags);
} else {
WARN_ON(args->nocreds);
- req = fuse_get_req(fm, true);
+ req = fuse_get_req(&invalid_mnt_idmap, fm, true);
if (IS_ERR(req))
return PTR_ERR(req);
}
@@ -583,9 +640,8 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
{
struct fuse_req *req;
struct fuse_iqueue *fiq = &fm->fc->iq;
- int err = 0;
- req = fuse_get_req(fm, false);
+ req = fuse_get_req(&invalid_mnt_idmap, fm, false);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -594,16 +650,9 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
fuse_args_to_req(req, args);
- spin_lock(&fiq->lock);
- if (fiq->connected) {
- queue_request_and_unlock(fiq, req);
- } else {
- err = -ENODEV;
- spin_unlock(&fiq->lock);
- fuse_put_request(req);
- }
+ fuse_send_one(fiq, req);
- return err;
+ return 0;
}
/*
@@ -1075,9 +1124,9 @@ __releases(fiq->lock)
return err ? err : reqsize;
}
-struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
- unsigned int max,
- unsigned int *countp)
+static struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
+ unsigned int max,
+ unsigned int *countp)
{
struct fuse_forget_link *head = fiq->forget_list_head.next;
struct fuse_forget_link **newhead = &head;
@@ -1096,7 +1145,6 @@ struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
return head;
}
-EXPORT_SYMBOL(fuse_dequeue_forget);
static int fuse_read_single_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
@@ -1111,7 +1159,7 @@ __releases(fiq->lock)
struct fuse_in_header ih = {
.opcode = FUSE_FORGET,
.nodeid = forget->forget_one.nodeid,
- .unique = fuse_get_unique(fiq),
+ .unique = fuse_get_unique_locked(fiq),
.len = sizeof(ih) + sizeof(arg),
};
@@ -1142,7 +1190,7 @@ __releases(fiq->lock)
struct fuse_batch_forget_in arg = { .count = 0 };
struct fuse_in_header ih = {
.opcode = FUSE_BATCH_FORGET,
- .unique = fuse_get_unique(fiq),
+ .unique = fuse_get_unique_locked(fiq),
.len = sizeof(ih) + sizeof(arg),
};
@@ -1830,7 +1878,7 @@ static void fuse_resend(struct fuse_conn *fc)
}
/* iq and pq requests are both oldest to newest */
list_splice(&to_queue, &fiq->pending);
- fiq->ops->wake_pending_and_unlock(fiq);
+ fuse_dev_wake_and_unlock(fiq);
}
static int fuse_notify_resend(struct fuse_conn *fc)
@@ -2329,15 +2377,15 @@ static long fuse_dev_ioctl_clone(struct file *file, __u32 __user *argp)
return -EFAULT;
f = fdget(oldfd);
- if (!f.file)
+ if (!fd_file(f))
return -EINVAL;
/*
* Check against file->f_op because CUSE
* uses the same ioctl handler.
*/
- if (f.file->f_op == file->f_op)
- fud = fuse_get_dev(f.file);
+ if (fd_file(f)->f_op == file->f_op)
+ fud = fuse_get_dev(fd_file(f));
res = -EINVAL;
if (fud) {
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8e96df9fd76c..54104dd48af7 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -545,17 +545,21 @@ static u32 fuse_ext_size(size_t size)
/*
* This adds just a single supplementary group that matches the parent's group.
*/
-static int get_create_supp_group(struct inode *dir, struct fuse_in_arg *ext)
+static int get_create_supp_group(struct mnt_idmap *idmap,
+ struct inode *dir,
+ struct fuse_in_arg *ext)
{
struct fuse_conn *fc = get_fuse_conn(dir);
struct fuse_ext_header *xh;
struct fuse_supp_groups *sg;
kgid_t kgid = dir->i_gid;
+ vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns, kgid);
gid_t parent_gid = from_kgid(fc->user_ns, kgid);
+
u32 sg_len = fuse_ext_size(sizeof(*sg) + sizeof(sg->groups[0]));
- if (parent_gid == (gid_t) -1 || gid_eq(kgid, current_fsgid()) ||
- !in_group_p(kgid))
+ if (parent_gid == (gid_t) -1 || vfsgid_eq_kgid(vfsgid, current_fsgid()) ||
+ !vfsgid_in_group_p(vfsgid))
return 0;
xh = extend_arg(ext, sg_len);
@@ -572,7 +576,8 @@ static int get_create_supp_group(struct inode *dir, struct fuse_in_arg *ext)
return 0;
}
-static int get_create_ext(struct fuse_args *args,
+static int get_create_ext(struct mnt_idmap *idmap,
+ struct fuse_args *args,
struct inode *dir, struct dentry *dentry,
umode_t mode)
{
@@ -583,7 +588,7 @@ static int get_create_ext(struct fuse_args *args,
if (fc->init_security)
err = get_security_context(dentry, mode, &ext);
if (!err && fc->create_supp_group)
- err = get_create_supp_group(dir, &ext);
+ err = get_create_supp_group(idmap, dir, &ext);
if (!err && ext.size) {
WARN_ON(args->in_numargs >= ARRAY_SIZE(args->in_args));
@@ -609,9 +614,9 @@ static void free_ext_value(struct fuse_args *args)
* If the filesystem doesn't support this, then fall back to separate
* 'mknod' + 'open' requests.
*/
-static int fuse_create_open(struct inode *dir, struct dentry *entry,
- struct file *file, unsigned int flags,
- umode_t mode, u32 opcode)
+static int fuse_create_open(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *entry, struct file *file,
+ unsigned int flags, umode_t mode, u32 opcode)
{
int err;
struct inode *inode;
@@ -668,11 +673,11 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
args.out_args[1].size = sizeof(*outopenp);
args.out_args[1].value = outopenp;
- err = get_create_ext(&args, dir, entry, mode);
+ err = get_create_ext(idmap, &args, dir, entry, mode);
if (err)
goto out_free_ff;
- err = fuse_simple_request(fm, &args);
+ err = fuse_simple_idmap_request(idmap, fm, &args);
free_ext_value(&args);
if (err)
goto out_free_ff;
@@ -729,6 +734,7 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
umode_t mode)
{
int err;
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
struct fuse_conn *fc = get_fuse_conn(dir);
struct dentry *res = NULL;
@@ -753,7 +759,7 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
if (fc->no_create)
goto mknod;
- err = fuse_create_open(dir, entry, file, flags, mode, FUSE_CREATE);
+ err = fuse_create_open(idmap, dir, entry, file, flags, mode, FUSE_CREATE);
if (err == -ENOSYS) {
fc->no_create = 1;
goto mknod;
@@ -764,7 +770,7 @@ out_dput:
return err;
mknod:
- err = fuse_mknod(&nop_mnt_idmap, dir, entry, mode, 0);
+ err = fuse_mknod(idmap, dir, entry, mode, 0);
if (err)
goto out_dput;
no_open:
@@ -774,9 +780,9 @@ no_open:
/*
* Code shared between mknod, mkdir, symlink and link
*/
-static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
- struct inode *dir, struct dentry *entry,
- umode_t mode)
+static int create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
+ struct fuse_args *args, struct inode *dir,
+ struct dentry *entry, umode_t mode)
{
struct fuse_entry_out outarg;
struct inode *inode;
@@ -798,12 +804,12 @@ static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
args->out_args[0].value = &outarg;
if (args->opcode != FUSE_LINK) {
- err = get_create_ext(args, dir, entry, mode);
+ err = get_create_ext(idmap, args, dir, entry, mode);
if (err)
goto out_put_forget_req;
}
- err = fuse_simple_request(fm, args);
+ err = fuse_simple_idmap_request(idmap, fm, args);
free_ext_value(args);
if (err)
goto out_put_forget_req;
@@ -864,13 +870,13 @@ static int fuse_mknod(struct mnt_idmap *idmap, struct inode *dir,
args.in_args[0].value = &inarg;
args.in_args[1].size = entry->d_name.len + 1;
args.in_args[1].value = entry->d_name.name;
- return create_new_entry(fm, &args, dir, entry, mode);
+ return create_new_entry(idmap, fm, &args, dir, entry, mode);
}
static int fuse_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *entry, umode_t mode, bool excl)
{
- return fuse_mknod(&nop_mnt_idmap, dir, entry, mode, 0);
+ return fuse_mknod(idmap, dir, entry, mode, 0);
}
static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
@@ -882,7 +888,8 @@ static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
if (fc->no_tmpfile)
return -EOPNOTSUPP;
- err = fuse_create_open(dir, file->f_path.dentry, file, file->f_flags, mode, FUSE_TMPFILE);
+ err = fuse_create_open(idmap, dir, file->f_path.dentry, file,
+ file->f_flags, mode, FUSE_TMPFILE);
if (err == -ENOSYS) {
fc->no_tmpfile = 1;
err = -EOPNOTSUPP;
@@ -909,7 +916,7 @@ static int fuse_mkdir(struct mnt_idmap *idmap, struct inode *dir,
args.in_args[0].value = &inarg;
args.in_args[1].size = entry->d_name.len + 1;
args.in_args[1].value = entry->d_name.name;
- return create_new_entry(fm, &args, dir, entry, S_IFDIR);
+ return create_new_entry(idmap, fm, &args, dir, entry, S_IFDIR);
}
static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
@@ -925,7 +932,7 @@ static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
args.in_args[0].value = entry->d_name.name;
args.in_args[1].size = len;
args.in_args[1].value = link;
- return create_new_entry(fm, &args, dir, entry, S_IFLNK);
+ return create_new_entry(idmap, fm, &args, dir, entry, S_IFLNK);
}
void fuse_flush_time_update(struct inode *inode)
@@ -1019,7 +1026,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
return err;
}
-static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
+static int fuse_rename_common(struct mnt_idmap *idmap, struct inode *olddir, struct dentry *oldent,
struct inode *newdir, struct dentry *newent,
unsigned int flags, int opcode, size_t argsize)
{
@@ -1040,7 +1047,7 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
args.in_args[1].value = oldent->d_name.name;
args.in_args[2].size = newent->d_name.len + 1;
args.in_args[2].value = newent->d_name.name;
- err = fuse_simple_request(fm, &args);
+ err = fuse_simple_idmap_request(idmap, fm, &args);
if (!err) {
/* ctime changes */
fuse_update_ctime(d_inode(oldent));
@@ -1086,7 +1093,8 @@ static int fuse_rename2(struct mnt_idmap *idmap, struct inode *olddir,
if (fc->no_rename2 || fc->minor < 23)
return -EINVAL;
- err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
+ err = fuse_rename_common((flags & RENAME_WHITEOUT) ? idmap : &invalid_mnt_idmap,
+ olddir, oldent, newdir, newent, flags,
FUSE_RENAME2,
sizeof(struct fuse_rename2_in));
if (err == -ENOSYS) {
@@ -1094,7 +1102,7 @@ static int fuse_rename2(struct mnt_idmap *idmap, struct inode *olddir,
err = -EINVAL;
}
} else {
- err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
+ err = fuse_rename_common(&invalid_mnt_idmap, olddir, oldent, newdir, newent, 0,
FUSE_RENAME,
sizeof(struct fuse_rename_in));
}
@@ -1119,7 +1127,7 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
args.in_args[0].value = &inarg;
args.in_args[1].size = newent->d_name.len + 1;
args.in_args[1].value = newent->d_name.name;
- err = create_new_entry(fm, &args, newdir, newent, inode->i_mode);
+ err = create_new_entry(&invalid_mnt_idmap, fm, &args, newdir, newent, inode->i_mode);
if (!err)
fuse_update_ctime_in_cache(inode);
else if (err == -EINTR)
@@ -1128,18 +1136,22 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
return err;
}
-static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
- struct kstat *stat)
+static void fuse_fillattr(struct mnt_idmap *idmap, struct inode *inode,
+ struct fuse_attr *attr, struct kstat *stat)
{
unsigned int blkbits;
struct fuse_conn *fc = get_fuse_conn(inode);
+ vfsuid_t vfsuid = make_vfsuid(idmap, fc->user_ns,
+ make_kuid(fc->user_ns, attr->uid));
+ vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns,
+ make_kgid(fc->user_ns, attr->gid));
stat->dev = inode->i_sb->s_dev;
stat->ino = attr->ino;
stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
stat->nlink = attr->nlink;
- stat->uid = make_kuid(fc->user_ns, attr->uid);
- stat->gid = make_kgid(fc->user_ns, attr->gid);
+ stat->uid = vfsuid_into_kuid(vfsuid);
+ stat->gid = vfsgid_into_kgid(vfsgid);
stat->rdev = inode->i_rdev;
stat->atime.tv_sec = attr->atime;
stat->atime.tv_nsec = attr->atimensec;
@@ -1178,8 +1190,8 @@ static void fuse_statx_to_attr(struct fuse_statx *sx, struct fuse_attr *attr)
attr->blksize = sx->blksize;
}
-static int fuse_do_statx(struct inode *inode, struct file *file,
- struct kstat *stat)
+static int fuse_do_statx(struct mnt_idmap *idmap, struct inode *inode,
+ struct file *file, struct kstat *stat)
{
int err;
struct fuse_attr attr;
@@ -1232,15 +1244,15 @@ static int fuse_do_statx(struct inode *inode, struct file *file,
stat->result_mask = sx->mask & (STATX_BASIC_STATS | STATX_BTIME);
stat->btime.tv_sec = sx->btime.tv_sec;
stat->btime.tv_nsec = min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1);
- fuse_fillattr(inode, &attr, stat);
+ fuse_fillattr(idmap, inode, &attr, stat);
stat->result_mask |= STATX_TYPE;
}
return 0;
}
-static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
- struct file *file)
+static int fuse_do_getattr(struct mnt_idmap *idmap, struct inode *inode,
+ struct kstat *stat, struct file *file)
{
int err;
struct fuse_getattr_in inarg;
@@ -1279,15 +1291,15 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
ATTR_TIMEOUT(&outarg),
attr_version);
if (stat)
- fuse_fillattr(inode, &outarg.attr, stat);
+ fuse_fillattr(idmap, inode, &outarg.attr, stat);
}
}
return err;
}
-static int fuse_update_get_attr(struct inode *inode, struct file *file,
- struct kstat *stat, u32 request_mask,
- unsigned int flags)
+static int fuse_update_get_attr(struct mnt_idmap *idmap, struct inode *inode,
+ struct file *file, struct kstat *stat,
+ u32 request_mask, unsigned int flags)
{
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
@@ -1318,17 +1330,17 @@ retry:
forget_all_cached_acls(inode);
/* Try statx if BTIME is requested */
if (!fc->no_statx && (request_mask & ~STATX_BASIC_STATS)) {
- err = fuse_do_statx(inode, file, stat);
+ err = fuse_do_statx(idmap, inode, file, stat);
if (err == -ENOSYS) {
fc->no_statx = 1;
err = 0;
goto retry;
}
} else {
- err = fuse_do_getattr(inode, stat, file);
+ err = fuse_do_getattr(idmap, inode, stat, file);
}
} else if (stat) {
- generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
stat->mode = fi->orig_i_mode;
stat->ino = fi->orig_ino;
if (test_bit(FUSE_I_BTIME, &fi->state)) {
@@ -1342,7 +1354,7 @@ retry:
int fuse_update_attributes(struct inode *inode, struct file *file, u32 mask)
{
- return fuse_update_get_attr(inode, file, NULL, mask, 0);
+ return fuse_update_get_attr(&nop_mnt_idmap, inode, file, NULL, mask, 0);
}
int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
@@ -1462,6 +1474,14 @@ static int fuse_access(struct inode *inode, int mask)
BUG_ON(mask & MAY_NOT_BLOCK);
+ /*
+ * We should not send FUSE_ACCESS to the userspace
+ * when idmapped mounts are enabled as for this case
+ * we have fc->default_permissions = 1 and access
+ * permission checks are done on the kernel side.
+ */
+ WARN_ON_ONCE(!(fm->sb->s_iflags & SB_I_NOIDMAP));
+
if (fm->fc->no_access)
return 0;
@@ -1486,7 +1506,7 @@ static int fuse_perm_getattr(struct inode *inode, int mask)
return -ECHILD;
forget_all_cached_acls(inode);
- return fuse_do_getattr(inode, NULL, NULL);
+ return fuse_do_getattr(&nop_mnt_idmap, inode, NULL, NULL);
}
/*
@@ -1534,7 +1554,7 @@ static int fuse_permission(struct mnt_idmap *idmap,
}
if (fc->default_permissions) {
- err = generic_permission(&nop_mnt_idmap, inode, mask);
+ err = generic_permission(idmap, inode, mask);
/* If permission is denied, try to refresh file
attributes. This is also needed, because the root
@@ -1542,7 +1562,7 @@ static int fuse_permission(struct mnt_idmap *idmap,
if (err == -EACCES && !refreshed) {
err = fuse_perm_getattr(inode, mask);
if (!err)
- err = generic_permission(&nop_mnt_idmap,
+ err = generic_permission(idmap,
inode, mask);
}
@@ -1738,17 +1758,29 @@ static bool update_mtime(unsigned ivalid, bool trust_local_mtime)
return true;
}
-static void iattr_to_fattr(struct fuse_conn *fc, struct iattr *iattr,
- struct fuse_setattr_in *arg, bool trust_local_cmtime)
+static void iattr_to_fattr(struct mnt_idmap *idmap, struct fuse_conn *fc,
+ struct iattr *iattr, struct fuse_setattr_in *arg,
+ bool trust_local_cmtime)
{
unsigned ivalid = iattr->ia_valid;
if (ivalid & ATTR_MODE)
arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
- if (ivalid & ATTR_UID)
- arg->valid |= FATTR_UID, arg->uid = from_kuid(fc->user_ns, iattr->ia_uid);
- if (ivalid & ATTR_GID)
- arg->valid |= FATTR_GID, arg->gid = from_kgid(fc->user_ns, iattr->ia_gid);
+
+ if (ivalid & ATTR_UID) {
+ kuid_t fsuid = from_vfsuid(idmap, fc->user_ns, iattr->ia_vfsuid);
+
+ arg->valid |= FATTR_UID;
+ arg->uid = from_kuid(fc->user_ns, fsuid);
+ }
+
+ if (ivalid & ATTR_GID) {
+ kgid_t fsgid = from_vfsgid(idmap, fc->user_ns, iattr->ia_vfsgid);
+
+ arg->valid |= FATTR_GID;
+ arg->gid = from_kgid(fc->user_ns, fsgid);
+ }
+
if (ivalid & ATTR_SIZE)
arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
if (ivalid & ATTR_ATIME) {
@@ -1868,8 +1900,8 @@ int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
* vmtruncate() doesn't allow for this case, so do the rlimit checking
* and the actual truncation by hand.
*/
-int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
- struct file *file)
+int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr, struct file *file)
{
struct inode *inode = d_inode(dentry);
struct fuse_mount *fm = get_fuse_mount(inode);
@@ -1889,7 +1921,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
if (!fc->default_permissions)
attr->ia_valid |= ATTR_FORCE;
- err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
+ err = setattr_prepare(idmap, dentry, attr);
if (err)
return err;
@@ -1948,7 +1980,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
memset(&inarg, 0, sizeof(inarg));
memset(&outarg, 0, sizeof(outarg));
- iattr_to_fattr(fc, attr, &inarg, trust_local_cmtime);
+ iattr_to_fattr(idmap, fc, attr, &inarg, trust_local_cmtime);
if (file) {
struct fuse_file *ff = file->private_data;
inarg.valid |= FATTR_FH;
@@ -2065,7 +2097,7 @@ static int fuse_setattr(struct mnt_idmap *idmap, struct dentry *entry,
* ia_mode calculation may have used stale i_mode.
* Refresh and recalculate.
*/
- ret = fuse_do_getattr(inode, NULL, file);
+ ret = fuse_do_getattr(idmap, inode, NULL, file);
if (ret)
return ret;
@@ -2083,7 +2115,7 @@ static int fuse_setattr(struct mnt_idmap *idmap, struct dentry *entry,
if (!attr->ia_valid)
return 0;
- ret = fuse_do_setattr(entry, attr, file);
+ ret = fuse_do_setattr(idmap, entry, attr, file);
if (!ret) {
/*
* If filesystem supports acls it may have updated acl xattrs in
@@ -2122,7 +2154,7 @@ static int fuse_getattr(struct mnt_idmap *idmap,
return -EACCES;
}
- return fuse_update_get_attr(inode, NULL, stat, request_mask, flags);
+ return fuse_update_get_attr(idmap, inode, NULL, stat, request_mask, flags);
}
static const struct inode_operations fuse_dir_inode_operations = {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index ed76121f73f2..f33fbce86ae0 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -448,9 +448,6 @@ static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
/*
* Check if any page in a range is under writeback
- *
- * This is currently done by walking the list of writepage requests
- * for the inode, which can be pretty inefficient.
*/
static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
pgoff_t idx_to)
@@ -458,6 +455,9 @@ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
struct fuse_inode *fi = get_fuse_inode(inode);
bool found;
+ if (RB_EMPTY_ROOT(&fi->writepages))
+ return false;
+
spin_lock(&fi->lock);
found = fuse_find_writeback(fi, idx_from, idx_to);
spin_unlock(&fi->lock);
@@ -1345,7 +1345,7 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from
/* shared locks are not allowed with parallel page cache IO */
if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state))
- return false;
+ return true;
/* Parallel dio beyond EOF is not supported, at least for now. */
if (fuse_io_past_eof(iocb, from))
@@ -1398,6 +1398,7 @@ static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
struct address_space *mapping = file->f_mapping;
ssize_t written = 0;
struct inode *inode = mapping->host;
@@ -1412,7 +1413,7 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
return err;
if (fc->handle_killpriv_v2 &&
- setattr_should_drop_suidgid(&nop_mnt_idmap,
+ setattr_should_drop_suidgid(idmap,
file_inode(file))) {
goto writethrough;
}
@@ -1762,27 +1763,31 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa)
for (i = 0; i < ap->num_pages; i++)
__free_page(ap->pages[i]);
- if (wpa->ia.ff)
- fuse_file_put(wpa->ia.ff, false);
+ fuse_file_put(wpa->ia.ff, false);
kfree(ap->pages);
kfree(wpa);
}
-static void fuse_writepage_finish(struct fuse_mount *fm,
- struct fuse_writepage_args *wpa)
+static void fuse_writepage_finish_stat(struct inode *inode, struct page *page)
+{
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
+
+ dec_wb_stat(&bdi->wb, WB_WRITEBACK);
+ dec_node_page_state(page, NR_WRITEBACK_TEMP);
+ wb_writeout_inc(&bdi->wb);
+}
+
+static void fuse_writepage_finish(struct fuse_writepage_args *wpa)
{
struct fuse_args_pages *ap = &wpa->ia.ap;
struct inode *inode = wpa->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
- struct backing_dev_info *bdi = inode_to_bdi(inode);
int i;
- for (i = 0; i < ap->num_pages; i++) {
- dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
- wb_writeout_inc(&bdi->wb);
- }
+ for (i = 0; i < ap->num_pages; i++)
+ fuse_writepage_finish_stat(inode, ap->pages[i]);
+
wake_up(&fi->page_waitq);
}
@@ -1829,19 +1834,14 @@ __acquires(fi->lock)
out_free:
fi->writectr--;
rb_erase(&wpa->writepages_entry, &fi->writepages);
- fuse_writepage_finish(fm, wpa);
+ fuse_writepage_finish(wpa);
spin_unlock(&fi->lock);
/* After rb_erase() aux request list is private */
for (aux = wpa->next; aux; aux = next) {
- struct backing_dev_info *bdi = inode_to_bdi(aux->inode);
-
next = aux->next;
aux->next = NULL;
-
- dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP);
- wb_writeout_inc(&bdi->wb);
+ fuse_writepage_finish_stat(aux->inode, aux->ia.ap.pages[0]);
fuse_writepage_free(aux);
}
@@ -1936,7 +1936,6 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
wpa->next = next->next;
next->next = NULL;
- next->ia.ff = fuse_file_get(wpa->ia.ff);
tree_insert(&fi->writepages, next);
/*
@@ -1965,7 +1964,7 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
fuse_send_writepage(fm, next, inarg->offset + inarg->size);
}
fi->writectr--;
- fuse_writepage_finish(fm, wpa);
+ fuse_writepage_finish(wpa);
spin_unlock(&fi->lock);
fuse_writepage_free(wpa);
}
@@ -2049,49 +2048,77 @@ static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
rcu_read_unlock();
}
+static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio,
+ struct folio *tmp_folio, uint32_t page_index)
+{
+ struct inode *inode = folio->mapping->host;
+ struct fuse_args_pages *ap = &wpa->ia.ap;
+
+ folio_copy(tmp_folio, folio);
+
+ ap->pages[page_index] = &tmp_folio->page;
+ ap->descs[page_index].offset = 0;
+ ap->descs[page_index].length = PAGE_SIZE;
+
+ inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
+ inc_node_page_state(&tmp_folio->page, NR_WRITEBACK_TEMP);
+}
+
+static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio,
+ struct fuse_file *ff)
+{
+ struct inode *inode = folio->mapping->host;
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_writepage_args *wpa;
+ struct fuse_args_pages *ap;
+
+ wpa = fuse_writepage_args_alloc();
+ if (!wpa)
+ return NULL;
+
+ fuse_writepage_add_to_bucket(fc, wpa);
+ fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio), 0);
+ wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
+ wpa->inode = inode;
+ wpa->ia.ff = ff;
+
+ ap = &wpa->ia.ap;
+ ap->args.in_pages = true;
+ ap->args.end = fuse_writepage_end;
+
+ return wpa;
+}
+
static int fuse_writepage_locked(struct folio *folio)
{
struct address_space *mapping = folio->mapping;
struct inode *inode = mapping->host;
- struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_writepage_args *wpa;
struct fuse_args_pages *ap;
struct folio *tmp_folio;
+ struct fuse_file *ff;
int error = -ENOMEM;
- folio_start_writeback(folio);
-
- wpa = fuse_writepage_args_alloc();
- if (!wpa)
- goto err;
- ap = &wpa->ia.ap;
-
tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0);
if (!tmp_folio)
- goto err_free;
+ goto err;
error = -EIO;
- wpa->ia.ff = fuse_write_file_get(fi);
- if (!wpa->ia.ff)
+ ff = fuse_write_file_get(fi);
+ if (!ff)
goto err_nofile;
- fuse_writepage_add_to_bucket(fc, wpa);
- fuse_write_args_fill(&wpa->ia, wpa->ia.ff, folio_pos(folio), 0);
+ wpa = fuse_writepage_args_setup(folio, ff);
+ error = -ENOMEM;
+ if (!wpa)
+ goto err_writepage_args;
- folio_copy(tmp_folio, folio);
- wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
- wpa->next = NULL;
- ap->args.in_pages = true;
+ ap = &wpa->ia.ap;
ap->num_pages = 1;
- ap->pages[0] = &tmp_folio->page;
- ap->descs[0].offset = 0;
- ap->descs[0].length = PAGE_SIZE;
- ap->args.end = fuse_writepage_end;
- wpa->inode = inode;
- inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
- node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP);
+ folio_start_writeback(folio);
+ fuse_writepage_args_page_fill(wpa, folio, tmp_folio, 0);
spin_lock(&fi->lock);
tree_insert(&fi->writepages, wpa);
@@ -2103,13 +2130,12 @@ static int fuse_writepage_locked(struct folio *folio)
return 0;
+err_writepage_args:
+ fuse_file_put(ff, false);
err_nofile:
folio_put(tmp_folio);
-err_free:
- kfree(wpa);
err:
mapping_set_error(folio->mapping, error);
- folio_end_writeback(folio);
return error;
}
@@ -2155,7 +2181,6 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data)
int num_pages = wpa->ia.ap.num_pages;
int i;
- wpa->ia.ff = fuse_file_get(data->ff);
spin_lock(&fi->lock);
list_add_tail(&wpa->queue_entry, &fi->queued_writes);
fuse_flush_writepages(inode);
@@ -2210,11 +2235,7 @@ static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
spin_unlock(&fi->lock);
if (tmp) {
- struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
-
- dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
- wb_writeout_inc(&bdi->wb);
+ fuse_writepage_finish_stat(new_wpa->inode, new_ap->pages[0]);
fuse_writepage_free(new_wpa);
}
@@ -2264,24 +2285,17 @@ static int fuse_writepages_fill(struct folio *folio,
struct inode *inode = data->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
- struct page *tmp_page;
+ struct folio *tmp_folio;
int err;
- if (!data->ff) {
- err = -EIO;
- data->ff = fuse_write_file_get(fi);
- if (!data->ff)
- goto out_unlock;
- }
-
if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) {
fuse_writepages_send(data);
data->wpa = NULL;
}
err = -ENOMEM;
- tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
- if (!tmp_page)
+ tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0);
+ if (!tmp_folio)
goto out_unlock;
/*
@@ -2299,35 +2313,20 @@ static int fuse_writepages_fill(struct folio *folio,
*/
if (data->wpa == NULL) {
err = -ENOMEM;
- wpa = fuse_writepage_args_alloc();
+ wpa = fuse_writepage_args_setup(folio, data->ff);
if (!wpa) {
- __free_page(tmp_page);
+ folio_put(tmp_folio);
goto out_unlock;
}
- fuse_writepage_add_to_bucket(fc, wpa);
-
+ fuse_file_get(wpa->ia.ff);
data->max_pages = 1;
-
ap = &wpa->ia.ap;
- fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0);
- wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
- wpa->next = NULL;
- ap->args.in_pages = true;
- ap->args.end = fuse_writepage_end;
- ap->num_pages = 0;
- wpa->inode = inode;
}
folio_start_writeback(folio);
- copy_highpage(tmp_page, &folio->page);
- ap->pages[ap->num_pages] = tmp_page;
- ap->descs[ap->num_pages].offset = 0;
- ap->descs[ap->num_pages].length = PAGE_SIZE;
+ fuse_writepage_args_page_fill(wpa, folio, tmp_folio, ap->num_pages);
data->orig_pages[ap->num_pages] = &folio->page;
- inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
- inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
-
err = 0;
if (data->wpa) {
/*
@@ -2352,13 +2351,13 @@ static int fuse_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
+ struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_fill_wb_data data;
int err;
- err = -EIO;
if (fuse_is_bad(inode))
- goto out;
+ return -EIO;
if (wbc->sync_mode == WB_SYNC_NONE &&
fc->num_background >= fc->congestion_threshold)
@@ -2366,7 +2365,9 @@ static int fuse_writepages(struct address_space *mapping,
data.inode = inode;
data.wpa = NULL;
- data.ff = NULL;
+ data.ff = fuse_write_file_get(fi);
+ if (!data.ff)
+ return -EIO;
err = -ENOMEM;
data.orig_pages = kcalloc(fc->max_pages,
@@ -2380,11 +2381,10 @@ static int fuse_writepages(struct address_space *mapping,
WARN_ON(!data.wpa->ia.ap.num_pages);
fuse_writepages_send(&data);
}
- if (data.ff)
- fuse_file_put(data.ff, false);
kfree(data.orig_pages);
out:
+ fuse_file_put(data.ff, false);
return err;
}
@@ -2393,76 +2393,77 @@ out:
* but how to implement it without killing performance need more thinking.
*/
static int fuse_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
struct fuse_conn *fc = get_fuse_conn(file_inode(file));
- struct page *page;
+ struct folio *folio;
loff_t fsize;
int err = -ENOMEM;
WARN_ON(!fc->writeback_cache);
- page = grab_cache_page_write_begin(mapping, index);
- if (!page)
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
goto error;
- fuse_wait_on_page_writeback(mapping->host, page->index);
+ fuse_wait_on_page_writeback(mapping->host, folio->index);
- if (PageUptodate(page) || len == PAGE_SIZE)
+ if (folio_test_uptodate(folio) || len >= folio_size(folio))
goto success;
/*
- * Check if the start this page comes after the end of file, in which
- * case the readpage can be optimized away.
+ * Check if the start of this folio comes after the end of file,
+ * in which case the readpage can be optimized away.
*/
fsize = i_size_read(mapping->host);
- if (fsize <= (pos & PAGE_MASK)) {
- size_t off = pos & ~PAGE_MASK;
+ if (fsize <= folio_pos(folio)) {
+ size_t off = offset_in_folio(folio, pos);
if (off)
- zero_user_segment(page, 0, off);
+ folio_zero_segment(folio, 0, off);
goto success;
}
- err = fuse_do_readpage(file, page);
+ err = fuse_do_readpage(file, &folio->page);
if (err)
goto cleanup;
success:
- *pagep = page;
+ *foliop = folio;
return 0;
cleanup:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
error:
return err;
}
static int fuse_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
/* Haven't copied anything? Skip zeroing, size extending, dirtying. */
if (!copied)
goto unlock;
pos += copied;
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
/* Zero any unwritten bytes at the end of the page */
size_t endoff = pos & ~PAGE_MASK;
if (endoff)
- zero_user_segment(page, endoff, PAGE_SIZE);
- SetPageUptodate(page);
+ folio_zero_segment(folio, endoff, PAGE_SIZE);
+ folio_mark_uptodate(folio);
}
if (pos > inode->i_size)
i_size_write(inode, pos);
- set_page_dirty(page);
+ folio_mark_dirty(folio);
unlock:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return copied;
}
@@ -2972,7 +2973,7 @@ static void fuse_do_truncate(struct file *file)
attr.ia_file = file;
attr.ia_valid |= ATTR_FILE;
- fuse_do_setattr(file_dentry(file), &attr, file);
+ fuse_do_setattr(file_mnt_idmap(file), file_dentry(file), &attr, file);
}
static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index f23919610313..e6cc3d552b13 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -449,22 +449,19 @@ struct fuse_iqueue;
*/
struct fuse_iqueue_ops {
/**
- * Signal that a forget has been queued
+ * Send one forget
*/
- void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq)
- __releases(fiq->lock);
+ void (*send_forget)(struct fuse_iqueue *fiq, struct fuse_forget_link *link);
/**
- * Signal that an INTERRUPT request has been queued
+ * Send interrupt for request
*/
- void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq)
- __releases(fiq->lock);
+ void (*send_interrupt)(struct fuse_iqueue *fiq, struct fuse_req *req);
/**
- * Signal that a request has been queued
+ * Send one request
*/
- void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq)
- __releases(fiq->lock);
+ void (*send_req)(struct fuse_iqueue *fiq, struct fuse_req *req);
/**
* Clean up when fuse_iqueue is destroyed
@@ -869,7 +866,7 @@ struct fuse_conn {
/** Negotiated minor version */
unsigned minor;
- /** Entry on the fuse_mount_list */
+ /** Entry on the fuse_conn_list */
struct list_head entry;
/** Device ID from the root super block */
@@ -1053,10 +1050,6 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
struct fuse_forget_link *fuse_alloc_forget(void);
-struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
- unsigned int max,
- unsigned int *countp);
-
/*
* Initialize READ or READDIR request
*/
@@ -1154,7 +1147,22 @@ void __exit fuse_ctl_cleanup(void);
/**
* Simple request sending that does request allocation and freeing
*/
-ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args);
+ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ struct fuse_args *args);
+
+static inline ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
+{
+ return __fuse_simple_request(&invalid_mnt_idmap, fm, args);
+}
+
+static inline ssize_t fuse_simple_idmap_request(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ struct fuse_args *args)
+{
+ return __fuse_simple_request(idmap, fm, args);
+}
+
int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
gfp_t gfp_flags);
@@ -1330,8 +1338,8 @@ bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written);
int fuse_flush_times(struct inode *inode, struct fuse_file *ff);
int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
-int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
- struct file *file);
+int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr, struct file *file);
void fuse_set_initialized(struct fuse_conn *fc);
diff --git a/fs/fuse/fuse_trace.h b/fs/fuse/fuse_trace.h
new file mode 100644
index 000000000000..bbe9ddd8c716
--- /dev/null
+++ b/fs/fuse/fuse_trace.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fuse
+
+#if !defined(_TRACE_FUSE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FUSE_H
+
+#include <linux/tracepoint.h>
+
+#define OPCODES \
+ EM( FUSE_LOOKUP, "FUSE_LOOKUP") \
+ EM( FUSE_FORGET, "FUSE_FORGET") \
+ EM( FUSE_GETATTR, "FUSE_GETATTR") \
+ EM( FUSE_SETATTR, "FUSE_SETATTR") \
+ EM( FUSE_READLINK, "FUSE_READLINK") \
+ EM( FUSE_SYMLINK, "FUSE_SYMLINK") \
+ EM( FUSE_MKNOD, "FUSE_MKNOD") \
+ EM( FUSE_MKDIR, "FUSE_MKDIR") \
+ EM( FUSE_UNLINK, "FUSE_UNLINK") \
+ EM( FUSE_RMDIR, "FUSE_RMDIR") \
+ EM( FUSE_RENAME, "FUSE_RENAME") \
+ EM( FUSE_LINK, "FUSE_LINK") \
+ EM( FUSE_OPEN, "FUSE_OPEN") \
+ EM( FUSE_READ, "FUSE_READ") \
+ EM( FUSE_WRITE, "FUSE_WRITE") \
+ EM( FUSE_STATFS, "FUSE_STATFS") \
+ EM( FUSE_RELEASE, "FUSE_RELEASE") \
+ EM( FUSE_FSYNC, "FUSE_FSYNC") \
+ EM( FUSE_SETXATTR, "FUSE_SETXATTR") \
+ EM( FUSE_GETXATTR, "FUSE_GETXATTR") \
+ EM( FUSE_LISTXATTR, "FUSE_LISTXATTR") \
+ EM( FUSE_REMOVEXATTR, "FUSE_REMOVEXATTR") \
+ EM( FUSE_FLUSH, "FUSE_FLUSH") \
+ EM( FUSE_INIT, "FUSE_INIT") \
+ EM( FUSE_OPENDIR, "FUSE_OPENDIR") \
+ EM( FUSE_READDIR, "FUSE_READDIR") \
+ EM( FUSE_RELEASEDIR, "FUSE_RELEASEDIR") \
+ EM( FUSE_FSYNCDIR, "FUSE_FSYNCDIR") \
+ EM( FUSE_GETLK, "FUSE_GETLK") \
+ EM( FUSE_SETLK, "FUSE_SETLK") \
+ EM( FUSE_SETLKW, "FUSE_SETLKW") \
+ EM( FUSE_ACCESS, "FUSE_ACCESS") \
+ EM( FUSE_CREATE, "FUSE_CREATE") \
+ EM( FUSE_INTERRUPT, "FUSE_INTERRUPT") \
+ EM( FUSE_BMAP, "FUSE_BMAP") \
+ EM( FUSE_DESTROY, "FUSE_DESTROY") \
+ EM( FUSE_IOCTL, "FUSE_IOCTL") \
+ EM( FUSE_POLL, "FUSE_POLL") \
+ EM( FUSE_NOTIFY_REPLY, "FUSE_NOTIFY_REPLY") \
+ EM( FUSE_BATCH_FORGET, "FUSE_BATCH_FORGET") \
+ EM( FUSE_FALLOCATE, "FUSE_FALLOCATE") \
+ EM( FUSE_READDIRPLUS, "FUSE_READDIRPLUS") \
+ EM( FUSE_RENAME2, "FUSE_RENAME2") \
+ EM( FUSE_LSEEK, "FUSE_LSEEK") \
+ EM( FUSE_COPY_FILE_RANGE, "FUSE_COPY_FILE_RANGE") \
+ EM( FUSE_SETUPMAPPING, "FUSE_SETUPMAPPING") \
+ EM( FUSE_REMOVEMAPPING, "FUSE_REMOVEMAPPING") \
+ EM( FUSE_SYNCFS, "FUSE_SYNCFS") \
+ EM( FUSE_TMPFILE, "FUSE_TMPFILE") \
+ EM( FUSE_STATX, "FUSE_STATX") \
+ EMe(CUSE_INIT, "CUSE_INIT")
+
+/*
+ * This will turn the above table into TRACE_DEFINE_ENUM() for each of the
+ * entries.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+OPCODES
+
+/* Now we redfine it with the table that __print_symbolic needs. */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
+TRACE_EVENT(fuse_request_send,
+ TP_PROTO(const struct fuse_req *req),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(dev_t, connection)
+ __field(uint64_t, unique)
+ __field(enum fuse_opcode, opcode)
+ __field(uint32_t, len)
+ ),
+
+ TP_fast_assign(
+ __entry->connection = req->fm->fc->dev;
+ __entry->unique = req->in.h.unique;
+ __entry->opcode = req->in.h.opcode;
+ __entry->len = req->in.h.len;
+ ),
+
+ TP_printk("connection %u req %llu opcode %u (%s) len %u ",
+ __entry->connection, __entry->unique, __entry->opcode,
+ __print_symbolic(__entry->opcode, OPCODES), __entry->len)
+);
+
+TRACE_EVENT(fuse_request_end,
+ TP_PROTO(const struct fuse_req *req),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(dev_t, connection)
+ __field(uint64_t, unique)
+ __field(uint32_t, len)
+ __field(int32_t, error)
+ ),
+
+ TP_fast_assign(
+ __entry->connection = req->fm->fc->dev;
+ __entry->unique = req->in.h.unique;
+ __entry->len = req->out.h.len;
+ __entry->error = req->out.h.error;
+ ),
+
+ TP_printk("connection %u req %llu len %u error %d", __entry->connection,
+ __entry->unique, __entry->len, __entry->error)
+);
+
+#endif /* _TRACE_FUSE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE fuse_trace
+#include <trace/define_trace.h>
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index bebd89002328..fd3321e29a3e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1348,6 +1348,12 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
}
if (flags & FUSE_NO_EXPORT_SUPPORT)
fm->sb->s_export_op = &fuse_export_fid_operations;
+ if (flags & FUSE_ALLOW_IDMAP) {
+ if (fc->default_permissions)
+ fm->sb->s_iflags &= ~SB_I_NOIDMAP;
+ else
+ ok = false;
+ }
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
@@ -1395,7 +1401,7 @@ void fuse_send_init(struct fuse_mount *fm)
FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP |
- FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND;
+ FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP;
#ifdef CONFIG_FUSE_DAX
if (fm->fc->dax)
flags |= FUSE_MAP_ALIGNMENT;
@@ -1572,6 +1578,7 @@ static void fuse_sb_defaults(struct super_block *sb)
sb->s_time_gran = 1;
sb->s_export_op = &fuse_export_operations;
sb->s_iflags |= SB_I_IMA_UNVERIFIABLE_SIGNATURE;
+ sb->s_iflags |= SB_I_NOIDMAP;
if (sb->s_user_ns != &init_user_ns)
sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER;
sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
@@ -1984,7 +1991,7 @@ static void fuse_kill_sb_anon(struct super_block *sb)
static struct file_system_type fuse_fs_type = {
.owner = THIS_MODULE,
.name = "fuse",
- .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT,
+ .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
.init_fs_context = fuse_init_fs_context,
.parameters = fuse_fs_parameters,
.kill_sb = fuse_kill_sb_anon,
@@ -2005,7 +2012,7 @@ static struct file_system_type fuseblk_fs_type = {
.init_fs_context = fuse_init_fs_context,
.parameters = fuse_fs_parameters,
.kill_sb = fuse_kill_sb_blk,
- .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
+ .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("fuseblk");
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
index 9666d13884ce..62aee8289d11 100644
--- a/fs/fuse/passthrough.c
+++ b/fs/fuse/passthrough.c
@@ -228,16 +228,13 @@ int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map)
if (map->flags || map->padding)
goto out;
- file = fget(map->fd);
+ file = fget_raw(map->fd);
res = -EBADF;
if (!file)
goto out;
- res = -EOPNOTSUPP;
- if (!file->f_op->read_iter || !file->f_op->write_iter)
- goto out_fput;
-
backing_sb = file_inode(file)->i_sb;
+ pr_info("%s: %x:%pD %i\n", __func__, backing_sb->s_dev, file, backing_sb->s_stack_depth);
res = -ELOOP;
if (backing_sb->s_stack_depth >= fc->max_stack_depth)
goto out_fput;
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index dd5260141615..6404a189e989 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -56,12 +56,14 @@ struct virtio_fs_vq {
bool connected;
long in_flight;
struct completion in_flight_zero; /* No inflight requests */
+ struct kobject *kobj;
char name[VQ_NAME_LEN];
} ____cacheline_aligned_in_smp;
/* A virtio-fs device instance */
struct virtio_fs {
struct kobject kobj;
+ struct kobject *mqs_kobj;
struct list_head list; /* on virtio_fs_instances */
char *tag;
struct virtio_fs_vq *vqs;
@@ -200,19 +202,94 @@ static const struct kobj_type virtio_fs_ktype = {
.default_groups = virtio_fs_groups,
};
+static struct virtio_fs_vq *virtio_fs_kobj_to_vq(struct virtio_fs *fs,
+ struct kobject *kobj)
+{
+ int i;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ if (kobj == fs->vqs[i].kobj)
+ return &fs->vqs[i];
+ }
+ return NULL;
+}
+
+static ssize_t name_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
+ struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
+
+ if (!fsvq)
+ return -EINVAL;
+ return sysfs_emit(buf, "%s\n", fsvq->name);
+}
+
+static struct kobj_attribute virtio_fs_vq_name_attr = __ATTR_RO(name);
+
+static ssize_t cpu_list_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
+ struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
+ unsigned int cpu, qid;
+ const size_t size = PAGE_SIZE - 1;
+ bool first = true;
+ int ret = 0, pos = 0;
+
+ if (!fsvq)
+ return -EINVAL;
+
+ qid = fsvq->vq->index;
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid - VQ_REQUEST)) {
+ if (first)
+ ret = snprintf(buf + pos, size - pos, "%u", cpu);
+ else
+ ret = snprintf(buf + pos, size - pos, ", %u", cpu);
+
+ if (ret >= size - pos)
+ break;
+ first = false;
+ pos += ret;
+ }
+ }
+ ret = snprintf(buf + pos, size + 1 - pos, "\n");
+ return pos + ret;
+}
+
+static struct kobj_attribute virtio_fs_vq_cpu_list_attr = __ATTR_RO(cpu_list);
+
+static struct attribute *virtio_fs_vq_attrs[] = {
+ &virtio_fs_vq_name_attr.attr,
+ &virtio_fs_vq_cpu_list_attr.attr,
+ NULL
+};
+
+static struct attribute_group virtio_fs_vq_attr_group = {
+ .attrs = virtio_fs_vq_attrs,
+};
+
/* Make sure virtiofs_mutex is held */
-static void virtio_fs_put(struct virtio_fs *fs)
+static void virtio_fs_put_locked(struct virtio_fs *fs)
{
+ lockdep_assert_held(&virtio_fs_mutex);
+
kobject_put(&fs->kobj);
}
+static void virtio_fs_put(struct virtio_fs *fs)
+{
+ mutex_lock(&virtio_fs_mutex);
+ virtio_fs_put_locked(fs);
+ mutex_unlock(&virtio_fs_mutex);
+}
+
static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
{
struct virtio_fs *vfs = fiq->priv;
- mutex_lock(&virtio_fs_mutex);
virtio_fs_put(vfs);
- mutex_unlock(&virtio_fs_mutex);
}
static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
@@ -273,6 +350,50 @@ static void virtio_fs_start_all_queues(struct virtio_fs *fs)
}
}
+static void virtio_fs_delete_queues_sysfs(struct virtio_fs *fs)
+{
+ struct virtio_fs_vq *fsvq;
+ int i;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ fsvq = &fs->vqs[i];
+ kobject_put(fsvq->kobj);
+ }
+}
+
+static int virtio_fs_add_queues_sysfs(struct virtio_fs *fs)
+{
+ struct virtio_fs_vq *fsvq;
+ char buff[12];
+ int i, j, ret;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ fsvq = &fs->vqs[i];
+
+ sprintf(buff, "%d", i);
+ fsvq->kobj = kobject_create_and_add(buff, fs->mqs_kobj);
+ if (!fs->mqs_kobj) {
+ ret = -ENOMEM;
+ goto out_del;
+ }
+
+ ret = sysfs_create_group(fsvq->kobj, &virtio_fs_vq_attr_group);
+ if (ret) {
+ kobject_put(fsvq->kobj);
+ goto out_del;
+ }
+ }
+
+ return 0;
+
+out_del:
+ for (j = 0; j < i; j++) {
+ fsvq = &fs->vqs[j];
+ kobject_put(fsvq->kobj);
+ }
+ return ret;
+}
+
/* Add a new instance to the list or return -EEXIST if tag name exists*/
static int virtio_fs_add_instance(struct virtio_device *vdev,
struct virtio_fs *fs)
@@ -296,17 +417,22 @@ static int virtio_fs_add_instance(struct virtio_device *vdev,
*/
fs->kobj.kset = virtio_fs_kset;
ret = kobject_add(&fs->kobj, NULL, "%d", vdev->index);
- if (ret < 0) {
- mutex_unlock(&virtio_fs_mutex);
- return ret;
+ if (ret < 0)
+ goto out_unlock;
+
+ fs->mqs_kobj = kobject_create_and_add("mqs", &fs->kobj);
+ if (!fs->mqs_kobj) {
+ ret = -ENOMEM;
+ goto out_del;
}
ret = sysfs_create_link(&fs->kobj, &vdev->dev.kobj, "device");
- if (ret < 0) {
- kobject_del(&fs->kobj);
- mutex_unlock(&virtio_fs_mutex);
- return ret;
- }
+ if (ret < 0)
+ goto out_put;
+
+ ret = virtio_fs_add_queues_sysfs(fs);
+ if (ret)
+ goto out_remove;
list_add_tail(&fs->list, &virtio_fs_instances);
@@ -315,6 +441,16 @@ static int virtio_fs_add_instance(struct virtio_device *vdev,
kobject_uevent(&fs->kobj, KOBJ_ADD);
return 0;
+
+out_remove:
+ sysfs_remove_link(&fs->kobj, "device");
+out_put:
+ kobject_put(fs->mqs_kobj);
+out_del:
+ kobject_del(&fs->kobj);
+out_unlock:
+ mutex_unlock(&virtio_fs_mutex);
+ return ret;
}
/* Return the virtio_fs with a given tag, or NULL */
@@ -1043,7 +1179,9 @@ static void virtio_fs_remove(struct virtio_device *vdev)
mutex_lock(&virtio_fs_mutex);
/* This device is going away. No one should get new reference */
list_del_init(&fs->list);
+ virtio_fs_delete_queues_sysfs(fs);
sysfs_remove_link(&fs->kobj, "device");
+ kobject_put(fs->mqs_kobj);
kobject_del(&fs->kobj);
virtio_fs_stop_all_queues(fs);
virtio_fs_drain_all_queues_locked(fs);
@@ -1052,7 +1190,7 @@ static void virtio_fs_remove(struct virtio_device *vdev)
vdev->priv = NULL;
/* Put device reference on virtio_fs object */
- virtio_fs_put(fs);
+ virtio_fs_put_locked(fs);
mutex_unlock(&virtio_fs_mutex);
}
@@ -1091,22 +1229,13 @@ static struct virtio_driver virtio_fs_driver = {
#endif
};
-static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
-__releases(fiq->lock)
+static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link)
{
- struct fuse_forget_link *link;
struct virtio_fs_forget *forget;
struct virtio_fs_forget_req *req;
- struct virtio_fs *fs;
- struct virtio_fs_vq *fsvq;
- u64 unique;
-
- link = fuse_dequeue_forget(fiq, 1, NULL);
- unique = fuse_get_unique(fiq);
-
- fs = fiq->priv;
- fsvq = &fs->vqs[VQ_HIPRIO];
- spin_unlock(&fiq->lock);
+ struct virtio_fs *fs = fiq->priv;
+ struct virtio_fs_vq *fsvq = &fs->vqs[VQ_HIPRIO];
+ u64 unique = fuse_get_unique(fiq);
/* Allocate a buffer for the request */
forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
@@ -1126,8 +1255,7 @@ __releases(fiq->lock)
kfree(link);
}
-static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
-__releases(fiq->lock)
+static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
/*
* TODO interrupts.
@@ -1136,7 +1264,6 @@ __releases(fiq->lock)
* Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
* with shared lock between host and guest.
*/
- spin_unlock(&fiq->lock);
}
/* Count number of scatter-gather elements required */
@@ -1341,21 +1468,17 @@ out:
return ret;
}
-static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
-__releases(fiq->lock)
+static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req)
{
unsigned int queue_id;
struct virtio_fs *fs;
- struct fuse_req *req;
struct virtio_fs_vq *fsvq;
int ret;
- WARN_ON(list_empty(&fiq->pending));
- req = list_last_entry(&fiq->pending, struct fuse_req, list);
+ if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
+ req->in.h.unique = fuse_get_unique(fiq);
+
clear_bit(FR_PENDING, &req->flags);
- list_del_init(&req->list);
- WARN_ON(!list_empty(&fiq->pending));
- spin_unlock(&fiq->lock);
fs = fiq->priv;
queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()];
@@ -1393,10 +1516,10 @@ __releases(fiq->lock)
}
static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
- .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
- .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
- .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
- .release = virtio_fs_fiq_release,
+ .send_forget = virtio_fs_send_forget,
+ .send_interrupt = virtio_fs_send_interrupt,
+ .send_req = virtio_fs_send_req,
+ .release = virtio_fs_fiq_release,
};
static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
@@ -1596,9 +1719,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
out_err:
kfree(fc);
- mutex_lock(&virtio_fs_mutex);
virtio_fs_put(fs);
- mutex_unlock(&virtio_fs_mutex);
return err;
}
@@ -1628,6 +1749,7 @@ static struct file_system_type virtio_fs_type = {
.name = "virtiofs",
.init_fs_context = virtio_fs_init_fs_context,
.kill_sb = virtio_kill_sb,
+ .fs_flags = FS_ALLOW_IDMAP,
};
static int virtio_fs_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 10d5acd3f742..68fc8af14700 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -139,35 +139,6 @@ static int __gfs2_jdata_write_folio(struct folio *folio,
}
/**
- * gfs2_jdata_writepage - Write complete page
- * @page: Page to write
- * @wbc: The writeback control
- *
- * Returns: errno
- *
- */
-
-static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct folio *folio = page_folio(page);
- struct inode *inode = page->mapping->host;
- struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_sbd *sdp = GFS2_SB(inode);
-
- if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
- goto out;
- if (folio_test_checked(folio) || current->journal_info)
- goto out_ignore;
- return __gfs2_jdata_write_folio(folio, wbc);
-
-out_ignore:
- folio_redirty_for_writepage(wbc, folio);
-out:
- folio_unlock(folio);
- return 0;
-}
-
-/**
* gfs2_writepages - Write a bunch of dirty pages back to disk
* @mapping: The mapping to write
* @wbc: Write-back control
@@ -748,7 +719,6 @@ static const struct address_space_operations gfs2_aops = {
};
static const struct address_space_operations gfs2_jdata_aops = {
- .writepage = gfs2_jdata_writepage,
.writepages = gfs2_jdata_writepages,
.read_folio = gfs2_read_folio,
.readahead = gfs2_readahead,
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 08982937b5df..f7dd64856c9b 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -1057,7 +1057,7 @@ retry:
}
pagefault_disable();
- ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
+ ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops, NULL);
pagefault_enable();
if (ret > 0)
written += ret;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 12a769077ea0..269c3bc7fced 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1885,14 +1885,16 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
{
unsigned long delay = 0;
- unsigned long holdtime;
- unsigned long now = jiffies;
gfs2_glock_hold(gl);
spin_lock(&gl->gl_lockref.lock);
- holdtime = gl->gl_tchange + gl->gl_hold_time;
if (!list_empty(&gl->gl_holders) &&
gl->gl_name.ln_type == LM_TYPE_INODE) {
+ unsigned long now = jiffies;
+ unsigned long holdtime;
+
+ holdtime = gl->gl_tchange + gl->gl_hold_time;
+
if (time_before(now, holdtime))
delay = holdtime - now;
if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags))
@@ -2249,6 +2251,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
gfs2_free_dead_glocks(sdp);
glock_hash_walk(dump_glock_func, sdp);
destroy_workqueue(sdp->sd_glock_wq);
+ sdp->sd_glock_wq = NULL;
}
static const char *state2str(unsigned state)
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 6ee6013fb825..f9c5089783d2 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -80,15 +80,6 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
brelse(bd->bd_bh);
}
-static int __gfs2_writepage(struct folio *folio, struct writeback_control *wbc,
- void *data)
-{
- struct address_space *mapping = data;
- int ret = mapping->a_ops->writepage(&folio->page, wbc);
- mapping_set_error(mapping, ret);
- return ret;
-}
-
/**
* gfs2_ail1_start_one - Start I/O on a transaction
* @sdp: The superblock
@@ -140,7 +131,7 @@ __acquires(&sdp->sd_ail_lock)
if (!mapping)
continue;
spin_unlock(&sdp->sd_ail_lock);
- ret = write_cache_pages(mapping, wbc, __gfs2_writepage, mapping);
+ ret = mapping->a_ops->writepages(mapping, wbc);
if (need_resched()) {
blk_finish_plug(plug);
cond_resched();
@@ -149,6 +140,7 @@ __acquires(&sdp->sd_ail_lock)
spin_lock(&sdp->sd_ail_lock);
if (ret == -ENODATA) /* if a jdata write into a new hole */
ret = 0; /* ignore it */
+ mapping_set_error(mapping, ret);
if (ret || wbc->nr_to_write <= 0)
break;
return -EBUSY;
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 2b26e8d529aa..fea3efcc2f93 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -30,9 +30,9 @@
#include "util.h"
#include "trace_gfs2.h"
-static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
+static void gfs2_aspace_write_folio(struct folio *folio,
+ struct writeback_control *wbc)
{
- struct folio *folio = page_folio(page);
struct buffer_head *bh, *head;
int nr_underway = 0;
blk_opf_t write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
@@ -66,8 +66,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
} while ((bh = bh->b_this_page) != head);
/*
- * The page and its buffers are protected by PageWriteback(), so we can
- * drop the bh refcounts early.
+ * The folio and its buffers are protected from truncation by
+ * the writeback flag, so we can drop the bh refcounts early.
*/
BUG_ON(folio_test_writeback(folio));
folio_start_writeback(folio);
@@ -84,21 +84,31 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
if (nr_underway == 0)
folio_end_writeback(folio);
+}
- return 0;
+static int gfs2_aspace_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct folio *folio = NULL;
+ int error;
+
+ while ((folio = writeback_iter(mapping, wbc, folio, &error)))
+ gfs2_aspace_write_folio(folio, wbc);
+
+ return error;
}
const struct address_space_operations gfs2_meta_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .writepage = gfs2_aspace_writepage,
+ .writepages = gfs2_aspace_writepages,
.release_folio = gfs2_release_folio,
};
const struct address_space_operations gfs2_rgrp_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .writepage = gfs2_aspace_writepage,
+ .writepages = gfs2_aspace_writepages,
.release_folio = gfs2_release_folio,
};
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index ff1f3e3dc65c..e83d293c3614 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1307,7 +1307,8 @@ fail_debug:
fail_delete_wq:
destroy_workqueue(sdp->sd_delete_wq);
fail_glock_wq:
- destroy_workqueue(sdp->sd_glock_wq);
+ if (sdp->sd_glock_wq)
+ destroy_workqueue(sdp->sd_glock_wq);
fail_free:
free_sbd(sdp);
sb->s_fs_info = NULL;
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index 6d1878b99b30..4a0ce131e233 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -487,15 +487,15 @@ void hfs_file_truncate(struct inode *inode)
if (inode->i_size > HFS_I(inode)->phys_size) {
struct address_space *mapping = inode->i_mapping;
void *fsdata = NULL;
- struct page *page;
+ struct folio *folio;
/* XXX: Can use generic_cont_expand? */
size = inode->i_size - 1;
- res = hfs_write_begin(NULL, mapping, size + 1, 0, &page,
+ res = hfs_write_begin(NULL, mapping, size + 1, 0, &folio,
&fsdata);
if (!res) {
res = generic_write_end(NULL, mapping, size + 1, 0, 0,
- page, fsdata);
+ folio, fsdata);
}
if (res)
inode->i_size = HFS_I(inode)->phys_size;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index b5a6ad5df357..a0c7cb0f79fc 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -202,7 +202,7 @@ extern const struct address_space_operations hfs_aops;
extern const struct address_space_operations hfs_btree_aops;
int hfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct page **pagep, void **fsdata);
+ loff_t pos, unsigned len, struct folio **foliop, void **fsdata);
extern struct inode *hfs_new_inode(struct inode *, const struct qstr *, umode_t);
extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
extern int hfs_write_inode(struct inode *, struct writeback_control *);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 744e10b46904..a81ce7a740b9 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -45,12 +45,11 @@ static void hfs_write_failed(struct address_space *mapping, loff_t to)
}
int hfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{
int ret;
- *pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
hfs_get_block,
&HFS_I(mapping->host)->phys_size);
if (unlikely(ret))
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 9c51867dddc5..a6d61685ae79 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -554,16 +554,16 @@ void hfsplus_file_truncate(struct inode *inode)
if (inode->i_size > hip->phys_size) {
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
void *fsdata = NULL;
loff_t size = inode->i_size;
res = hfsplus_write_begin(NULL, mapping, size, 0,
- &page, &fsdata);
+ &folio, &fsdata);
if (res)
return;
res = generic_write_end(NULL, mapping, size, 0, 0,
- page, fsdata);
+ folio, fsdata);
if (res < 0)
return;
mark_inode_dirty(inode);
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 9e78f181c24f..59ce81dca73f 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -472,7 +472,7 @@ extern const struct address_space_operations hfsplus_btree_aops;
extern const struct dentry_operations hfsplus_dentry_operations;
int hfsplus_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct page **pagep, void **fsdata);
+ loff_t pos, unsigned len, struct folio **foliop, void **fsdata);
struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
umode_t mode);
void hfsplus_delete_inode(struct inode *inode);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 3d326926c195..f331e9574217 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -39,12 +39,11 @@ static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
}
int hfsplus_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{
int ret;
- *pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
hfsplus_get_block,
&HFSPLUS_I(mapping->host)->phys_size);
if (unlikely(ret))
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 22df574ca99e..6d1cf2436ead 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -465,31 +465,32 @@ static int hostfs_read_folio(struct file *file, struct folio *folio)
static int hostfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
- *pagep = grab_cache_page_write_begin(mapping, index);
- if (!*pagep)
+ *foliop = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (!*foliop)
return -ENOMEM;
return 0;
}
static int hostfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
void *buffer;
- unsigned from = pos & (PAGE_SIZE - 1);
+ size_t from = offset_in_folio(folio, pos);
int err;
- buffer = kmap_local_page(page);
- err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied);
+ buffer = kmap_local_folio(folio, from);
+ err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer, copied);
kunmap_local(buffer);
- if (!PageUptodate(page) && err == PAGE_SIZE)
- SetPageUptodate(page);
+ if (!folio_test_uptodate(folio) && err == folio_size(folio))
+ folio_mark_uptodate(folio);
/*
* If err > 0, write_file has added err to pos, so we are comparing
@@ -497,8 +498,8 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
*/
if (err > 0 && (pos > inode->i_size))
inode->i_size = pos;
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return err;
}
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 1bb8d97cd9ae..449a3fc1b8d9 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -190,12 +190,11 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to)
static int hpfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
int ret;
- *pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
hpfs_get_block,
&hpfs_i(mapping->host)->mmu_private);
if (unlikely(ret))
@@ -206,11 +205,11 @@ static int hpfs_write_begin(struct file *file, struct address_space *mapping,
static int hpfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *pagep, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
int err;
- err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
+ err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
if (err < len)
hpfs_write_failed(mapping, pos + len);
if (!(err < 0)) {
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 9f6cff356796..5cf327337e22 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -388,14 +388,14 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
static int hugetlbfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
return -EINVAL;
}
static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
BUG();
return -EINVAL;
diff --git a/fs/inode.c b/fs/inode.c
index 10c4619faeef..471ae4a31549 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -439,14 +439,6 @@ static void init_once(void *foo)
}
/*
- * inode->i_lock must be held
- */
-void __iget(struct inode *inode)
-{
- atomic_inc(&inode->i_count);
-}
-
-/*
* get additional reference to inode; caller must already hold one.
*/
void ihold(struct inode *inode)
@@ -472,6 +464,17 @@ static void __inode_add_lru(struct inode *inode, bool rotate)
inode->i_state |= I_REFERENCED;
}
+struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
+ struct inode *inode, u32 bit)
+{
+ void *bit_address;
+
+ bit_address = inode_state_wait_address(inode, bit);
+ init_wait_var_entry(wqe, bit_address, 0);
+ return __var_waitqueue(bit_address);
+}
+EXPORT_SYMBOL(inode_bit_waitqueue);
+
/*
* Add inode to LRU if needed (inode is unused and clean).
*
@@ -500,25 +503,35 @@ static void inode_unpin_lru_isolating(struct inode *inode)
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_LRU_ISOLATING));
inode->i_state &= ~I_LRU_ISOLATING;
- smp_mb();
- wake_up_bit(&inode->i_state, __I_LRU_ISOLATING);
+ /* Called with inode->i_lock which ensures memory ordering. */
+ inode_wake_up_bit(inode, __I_LRU_ISOLATING);
spin_unlock(&inode->i_lock);
}
static void inode_wait_for_lru_isolating(struct inode *inode)
{
- spin_lock(&inode->i_lock);
- if (inode->i_state & I_LRU_ISOLATING) {
- DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LRU_ISOLATING);
- wait_queue_head_t *wqh;
+ struct wait_bit_queue_entry wqe;
+ struct wait_queue_head *wq_head;
- wqh = bit_waitqueue(&inode->i_state, __I_LRU_ISOLATING);
+ lockdep_assert_held(&inode->i_lock);
+ if (!(inode->i_state & I_LRU_ISOLATING))
+ return;
+
+ wq_head = inode_bit_waitqueue(&wqe, inode, __I_LRU_ISOLATING);
+ for (;;) {
+ prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
+ /*
+ * Checking I_LRU_ISOLATING with inode->i_lock guarantees
+ * memory ordering.
+ */
+ if (!(inode->i_state & I_LRU_ISOLATING))
+ break;
spin_unlock(&inode->i_lock);
- __wait_on_bit(wqh, &wq, bit_wait, TASK_UNINTERRUPTIBLE);
+ schedule();
spin_lock(&inode->i_lock);
- WARN_ON(inode->i_state & I_LRU_ISOLATING);
}
- spin_unlock(&inode->i_lock);
+ finish_wait(wq_head, &wqe.wq_entry);
+ WARN_ON(inode->i_state & I_LRU_ISOLATING);
}
/**
@@ -595,6 +608,7 @@ void dump_mapping(const struct address_space *mapping)
struct hlist_node *dentry_first;
struct dentry *dentry_ptr;
struct dentry dentry;
+ char fname[64] = {};
unsigned long ino;
/*
@@ -631,11 +645,14 @@ void dump_mapping(const struct address_space *mapping)
return;
}
+ if (strncpy_from_kernel_nofault(fname, dentry.d_name.name, 63) < 0)
+ strscpy(fname, "<invalid>");
/*
- * if dentry is corrupted, the %pd handler may still crash,
- * but it's unlikely that we reach here with a corrupt mapping
+ * Even if strncpy_from_kernel_nofault() succeeded,
+ * the fname could be unreliable
*/
- pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n", a_ops, ino, &dentry);
+ pr_warn("aops:%ps ino:%lx dentry name(?):\"%s\"\n",
+ a_ops, ino, fname);
}
void clear_inode(struct inode *inode)
@@ -690,6 +707,7 @@ static void evict(struct inode *inode)
inode_sb_list_del(inode);
+ spin_lock(&inode->i_lock);
inode_wait_for_lru_isolating(inode);
/*
@@ -699,6 +717,7 @@ static void evict(struct inode *inode)
* the inode. We just have to wait for running writeback to finish.
*/
inode_wait_for_writeback(inode);
+ spin_unlock(&inode->i_lock);
if (op->evict_inode) {
op->evict_inode(inode);
@@ -722,7 +741,13 @@ static void evict(struct inode *inode)
* used as an indicator whether blocking on it is safe.
*/
spin_lock(&inode->i_lock);
- wake_up_bit(&inode->i_state, __I_NEW);
+ /*
+ * Pairs with the barrier in prepare_to_wait_event() to make sure
+ * ___wait_var_event() either sees the bit cleared or
+ * waitqueue_active() check in wake_up_var() sees the waiter.
+ */
+ smp_mb();
+ inode_wake_up_bit(inode, __I_NEW);
BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
spin_unlock(&inode->i_lock);
@@ -770,6 +795,10 @@ again:
continue;
spin_lock(&inode->i_lock);
+ if (atomic_read(&inode->i_count)) {
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
spin_unlock(&inode->i_lock);
continue;
@@ -1130,8 +1159,13 @@ void unlock_new_inode(struct inode *inode)
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW & ~I_CREATING;
+ /*
+ * Pairs with the barrier in prepare_to_wait_event() to make sure
+ * ___wait_var_event() either sees the bit cleared or
+ * waitqueue_active() check in wake_up_var() sees the waiter.
+ */
smp_mb();
- wake_up_bit(&inode->i_state, __I_NEW);
+ inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(unlock_new_inode);
@@ -1142,8 +1176,13 @@ void discard_new_inode(struct inode *inode)
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW;
+ /*
+ * Pairs with the barrier in prepare_to_wait_event() to make sure
+ * ___wait_var_event() either sees the bit cleared or
+ * waitqueue_active() check in wake_up_var() sees the waiter.
+ */
smp_mb();
- wake_up_bit(&inode->i_state, __I_NEW);
+ inode_wake_up_bit(inode, __I_NEW);
spin_unlock(&inode->i_lock);
iput(inode);
}
@@ -1570,9 +1609,7 @@ struct inode *ilookup(struct super_block *sb, unsigned long ino)
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
again:
- spin_lock(&inode_hash_lock);
- inode = find_inode_fast(sb, head, ino, true);
- spin_unlock(&inode_hash_lock);
+ inode = find_inode_fast(sb, head, ino, false);
if (inode) {
if (IS_ERR(inode))
@@ -2334,8 +2371,8 @@ EXPORT_SYMBOL(inode_needs_sync);
*/
static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked)
{
- wait_queue_head_t *wq;
- DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
+ struct wait_bit_queue_entry wqe;
+ struct wait_queue_head *wq_head;
/*
* Handle racing against evict(), see that routine for more details.
@@ -2346,14 +2383,14 @@ static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_lock
return;
}
- wq = bit_waitqueue(&inode->i_state, __I_NEW);
- prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
+ wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW);
+ prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
spin_unlock(&inode->i_lock);
rcu_read_unlock();
if (is_inode_hash_locked)
spin_unlock(&inode_hash_lock);
schedule();
- finish_wait(wq, &wait.wq_entry);
+ finish_wait(wq_head, &wqe.wq_entry);
if (is_inode_hash_locked)
spin_lock(&inode_hash_lock);
rcu_read_lock();
@@ -2502,18 +2539,11 @@ EXPORT_SYMBOL(inode_owner_or_capable);
/*
* Direct i/o helper functions
*/
-static void __inode_dio_wait(struct inode *inode)
+bool inode_dio_finished(const struct inode *inode)
{
- wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
- DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
-
- do {
- prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
- if (atomic_read(&inode->i_dio_count))
- schedule();
- } while (atomic_read(&inode->i_dio_count));
- finish_wait(wq, &q.wq_entry);
+ return atomic_read(&inode->i_dio_count) == 0;
}
+EXPORT_SYMBOL(inode_dio_finished);
/**
* inode_dio_wait - wait for outstanding DIO requests to finish
@@ -2527,11 +2557,17 @@ static void __inode_dio_wait(struct inode *inode)
*/
void inode_dio_wait(struct inode *inode)
{
- if (atomic_read(&inode->i_dio_count))
- __inode_dio_wait(inode);
+ wait_var_event(&inode->i_dio_count, inode_dio_finished(inode));
}
EXPORT_SYMBOL(inode_dio_wait);
+void inode_dio_wait_interruptible(struct inode *inode)
+{
+ wait_var_event_interruptible(&inode->i_dio_count,
+ inode_dio_finished(inode));
+}
+EXPORT_SYMBOL(inode_dio_wait_interruptible);
+
/*
* inode_set_flags - atomically set some inode flags
*
diff --git a/fs/internal.h b/fs/internal.h
index cdd73209eecb..8c1b7acbbe8f 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -337,3 +337,4 @@ static inline bool path_mounted(const struct path *path)
{
return path->mnt->mnt_root == path->dentry;
}
+void file_f_owner_release(struct file *file);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 64776891120c..6e0c954388d4 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -235,9 +235,9 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
loff_t cloned;
int ret;
- if (!src_file.file)
+ if (!fd_file(src_file))
return -EBADF;
- cloned = vfs_clone_file_range(src_file.file, off, dst_file, destoff,
+ cloned = vfs_clone_file_range(fd_file(src_file), off, dst_file, destoff,
olen, 0);
if (cloned < 0)
ret = cloned;
@@ -895,16 +895,16 @@ SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
struct fd f = fdget(fd);
int error;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = security_file_ioctl(f.file, cmd, arg);
+ error = security_file_ioctl(fd_file(f), cmd, arg);
if (error)
goto out;
- error = do_vfs_ioctl(f.file, fd, cmd, arg);
+ error = do_vfs_ioctl(fd_file(f), fd, cmd, arg);
if (error == -ENOIOCTLCMD)
- error = vfs_ioctl(f.file, cmd, arg);
+ error = vfs_ioctl(fd_file(f), cmd, arg);
out:
fdput(f);
@@ -953,32 +953,32 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
struct fd f = fdget(fd);
int error;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = security_file_ioctl_compat(f.file, cmd, arg);
+ error = security_file_ioctl_compat(fd_file(f), cmd, arg);
if (error)
goto out;
switch (cmd) {
/* FICLONE takes an int argument, so don't use compat_ptr() */
case FICLONE:
- error = ioctl_file_clone(f.file, arg, 0, 0, 0);
+ error = ioctl_file_clone(fd_file(f), arg, 0, 0, 0);
break;
#if defined(CONFIG_X86_64)
/* these get messy on amd64 due to alignment differences */
case FS_IOC_RESVSP_32:
case FS_IOC_RESVSP64_32:
- error = compat_ioctl_preallocate(f.file, 0, compat_ptr(arg));
+ error = compat_ioctl_preallocate(fd_file(f), 0, compat_ptr(arg));
break;
case FS_IOC_UNRESVSP_32:
case FS_IOC_UNRESVSP64_32:
- error = compat_ioctl_preallocate(f.file, FALLOC_FL_PUNCH_HOLE,
+ error = compat_ioctl_preallocate(fd_file(f), FALLOC_FL_PUNCH_HOLE,
compat_ptr(arg));
break;
case FS_IOC_ZERO_RANGE_32:
- error = compat_ioctl_preallocate(f.file, FALLOC_FL_ZERO_RANGE,
+ error = compat_ioctl_preallocate(fd_file(f), FALLOC_FL_ZERO_RANGE,
compat_ptr(arg));
break;
#endif
@@ -998,13 +998,13 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
* argument.
*/
default:
- error = do_vfs_ioctl(f.file, fd, cmd,
+ error = do_vfs_ioctl(fd_file(f), fd, cmd,
(unsigned long)compat_ptr(arg));
if (error != -ENOIOCTLCMD)
break;
- if (f.file->f_op->compat_ioctl)
- error = f.file->f_op->compat_ioctl(f.file, cmd, arg);
+ if (fd_file(f)->f_op->compat_ioctl)
+ error = fd_file(f)->f_op->compat_ioctl(fd_file(f), cmd, arg);
if (error == -ENOIOCTLCMD)
error = -ENOTTY;
break;
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index f420c53d86ac..11ea747228ae 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -23,7 +23,6 @@
#define IOEND_BATCH_SIZE 4096
-typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length);
/*
* Structure allocated for each folio to track per-block uptodate, dirty state
* and I/O completions.
@@ -900,7 +899,7 @@ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
size_t bh_written;
bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
- len, copied, &folio->page, NULL);
+ len, copied, folio, NULL);
WARN_ON_ONCE(bh_written != copied && bh_written != 0);
return bh_written == copied;
}
@@ -1022,13 +1021,14 @@ retry:
ssize_t
iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops, void *private)
{
struct iomap_iter iter = {
.inode = iocb->ki_filp->f_mapping->host,
.pos = iocb->ki_pos,
.len = iov_iter_count(i),
.flags = IOMAP_WRITE,
+ .private = private,
};
ssize_t ret;
@@ -1046,15 +1046,14 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
}
EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
-static int iomap_write_delalloc_ifs_punch(struct inode *inode,
+static void iomap_write_delalloc_ifs_punch(struct inode *inode,
struct folio *folio, loff_t start_byte, loff_t end_byte,
- iomap_punch_t punch)
+ struct iomap *iomap, iomap_punch_t punch)
{
unsigned int first_blk, last_blk, i;
loff_t last_byte;
u8 blkbits = inode->i_blkbits;
struct iomap_folio_state *ifs;
- int ret = 0;
/*
* When we have per-block dirty tracking, there can be
@@ -1064,47 +1063,35 @@ static int iomap_write_delalloc_ifs_punch(struct inode *inode,
*/
ifs = folio->private;
if (!ifs)
- return ret;
+ return;
last_byte = min_t(loff_t, end_byte - 1,
folio_pos(folio) + folio_size(folio) - 1);
first_blk = offset_in_folio(folio, start_byte) >> blkbits;
last_blk = offset_in_folio(folio, last_byte) >> blkbits;
for (i = first_blk; i <= last_blk; i++) {
- if (!ifs_block_is_dirty(folio, ifs, i)) {
- ret = punch(inode, folio_pos(folio) + (i << blkbits),
- 1 << blkbits);
- if (ret)
- return ret;
- }
+ if (!ifs_block_is_dirty(folio, ifs, i))
+ punch(inode, folio_pos(folio) + (i << blkbits),
+ 1 << blkbits, iomap);
}
-
- return ret;
}
-
-static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
+static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
- iomap_punch_t punch)
+ struct iomap *iomap, iomap_punch_t punch)
{
- int ret = 0;
-
if (!folio_test_dirty(folio))
- return ret;
+ return;
/* if dirty, punch up to offset */
if (start_byte > *punch_start_byte) {
- ret = punch(inode, *punch_start_byte,
- start_byte - *punch_start_byte);
- if (ret)
- return ret;
+ punch(inode, *punch_start_byte, start_byte - *punch_start_byte,
+ iomap);
}
/* Punch non-dirty blocks within folio */
- ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte,
- end_byte, punch);
- if (ret)
- return ret;
+ iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte,
+ iomap, punch);
/*
* Make sure the next punch start is correctly bound to
@@ -1112,8 +1099,6 @@ static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
*/
*punch_start_byte = min_t(loff_t, end_byte,
folio_pos(folio) + folio_size(folio));
-
- return ret;
}
/*
@@ -1133,13 +1118,12 @@ static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
* This function uses [start_byte, end_byte) intervals (i.e. open ended) to
* simplify range iterations.
*/
-static int iomap_write_delalloc_scan(struct inode *inode,
+static void iomap_write_delalloc_scan(struct inode *inode,
loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
- iomap_punch_t punch)
+ struct iomap *iomap, iomap_punch_t punch)
{
while (start_byte < end_byte) {
struct folio *folio;
- int ret;
/* grab locked page */
folio = filemap_lock_folio(inode->i_mapping,
@@ -1150,20 +1134,14 @@ static int iomap_write_delalloc_scan(struct inode *inode,
continue;
}
- ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte,
- start_byte, end_byte, punch);
- if (ret) {
- folio_unlock(folio);
- folio_put(folio);
- return ret;
- }
+ iomap_write_delalloc_punch(inode, folio, punch_start_byte,
+ start_byte, end_byte, iomap, punch);
/* move offset to start of next folio in range */
start_byte = folio_next_index(folio) << PAGE_SHIFT;
folio_unlock(folio);
folio_put(folio);
}
- return 0;
}
/*
@@ -1199,12 +1177,12 @@ static int iomap_write_delalloc_scan(struct inode *inode,
* require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
* the code to subtle off-by-one bugs....
*/
-static int iomap_write_delalloc_release(struct inode *inode,
- loff_t start_byte, loff_t end_byte, iomap_punch_t punch)
+static void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
+ loff_t end_byte, unsigned flags, struct iomap *iomap,
+ iomap_punch_t punch)
{
loff_t punch_start_byte = start_byte;
loff_t scan_end_byte = min(i_size_read(inode), end_byte);
- int error = 0;
/*
* Lock the mapping to avoid races with page faults re-instantiating
@@ -1221,13 +1199,15 @@ static int iomap_write_delalloc_release(struct inode *inode,
/*
* If there is no more data to scan, all that is left is to
* punch out the remaining range.
+ *
+ * Note that mapping_seek_hole_data is only supposed to return
+ * either an offset or -ENXIO, so WARN on any other error as
+ * that would be an API change without updating the callers.
*/
if (start_byte == -ENXIO || start_byte == scan_end_byte)
break;
- if (start_byte < 0) {
- error = start_byte;
+ if (WARN_ON_ONCE(start_byte < 0))
goto out_unlock;
- }
WARN_ON_ONCE(start_byte < punch_start_byte);
WARN_ON_ONCE(start_byte > scan_end_byte);
@@ -1237,28 +1217,31 @@ static int iomap_write_delalloc_release(struct inode *inode,
*/
data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
scan_end_byte, SEEK_HOLE);
- if (data_end < 0) {
- error = data_end;
+ if (WARN_ON_ONCE(data_end < 0))
goto out_unlock;
- }
- WARN_ON_ONCE(data_end <= start_byte);
+
+ /*
+ * If we race with post-direct I/O invalidation of the page cache,
+ * there might be no data left at start_byte.
+ */
+ if (data_end == start_byte)
+ continue;
+
+ WARN_ON_ONCE(data_end < start_byte);
WARN_ON_ONCE(data_end > scan_end_byte);
- error = iomap_write_delalloc_scan(inode, &punch_start_byte,
- start_byte, data_end, punch);
- if (error)
- goto out_unlock;
+ iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte,
+ data_end, iomap, punch);
/* The next data search starts at the end of this one. */
start_byte = data_end;
}
if (punch_start_byte < end_byte)
- error = punch(inode, punch_start_byte,
- end_byte - punch_start_byte);
+ punch(inode, punch_start_byte, end_byte - punch_start_byte,
+ iomap);
out_unlock:
filemap_invalidate_unlock(inode->i_mapping);
- return error;
}
/*
@@ -1291,20 +1274,20 @@ out_unlock:
* ->punch
* internal filesystem allocation lock
*/
-int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
- struct iomap *iomap, loff_t pos, loff_t length,
- ssize_t written, iomap_punch_t punch)
+void iomap_file_buffered_write_punch_delalloc(struct inode *inode,
+ loff_t pos, loff_t length, ssize_t written, unsigned flags,
+ struct iomap *iomap, iomap_punch_t punch)
{
loff_t start_byte;
loff_t end_byte;
unsigned int blocksize = i_blocksize(inode);
if (iomap->type != IOMAP_DELALLOC)
- return 0;
+ return;
/* If we didn't reserve the blocks, we're not allowed to punch them. */
if (!(iomap->flags & IOMAP_F_NEW))
- return 0;
+ return;
/*
* start_byte refers to the first unused block after a short write. If
@@ -1319,26 +1302,35 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
/* Nothing to do if we've written the entire delalloc extent */
if (start_byte >= end_byte)
- return 0;
+ return;
- return iomap_write_delalloc_release(inode, start_byte, end_byte,
- punch);
+ iomap_write_delalloc_release(inode, start_byte, end_byte, flags, iomap,
+ punch);
}
EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
static loff_t iomap_unshare_iter(struct iomap_iter *iter)
{
struct iomap *iomap = &iter->iomap;
- const struct iomap *srcmap = iomap_iter_srcmap(iter);
loff_t pos = iter->pos;
loff_t length = iomap_length(iter);
loff_t written = 0;
- /* don't bother with blocks that are not shared to start with */
+ /* Don't bother with blocks that are not shared to start with. */
if (!(iomap->flags & IOMAP_F_SHARED))
return length;
- /* don't bother with holes or unwritten extents */
- if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
+
+ /*
+ * Don't bother with holes or unwritten extents.
+ *
+ * Note that we use srcmap directly instead of iomap_iter_srcmap as
+ * unsharing requires providing a separate source map, and the presence
+ * of one is a good indicator that unsharing is needed, unlike
+ * IOMAP_F_SHARED which can be set for any data that goes into the COW
+ * fork for XFS.
+ */
+ if (iter->srcmap.type == IOMAP_HOLE ||
+ iter->srcmap.type == IOMAP_UNWRITTEN)
return length;
do {
@@ -1393,16 +1385,53 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
}
EXPORT_SYMBOL_GPL(iomap_file_unshare);
-static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
+/*
+ * Flush the remaining range of the iter and mark the current mapping stale.
+ * This is used when zero range sees an unwritten mapping that may have had
+ * dirty pagecache over it.
+ */
+static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
+{
+ struct address_space *mapping = i->inode->i_mapping;
+ loff_t end = i->pos + i->len - 1;
+
+ i->iomap.flags |= IOMAP_F_STALE;
+ return filemap_write_and_wait_range(mapping, i->pos, end);
+}
+
+static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
+ bool *range_dirty)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
loff_t pos = iter->pos;
loff_t length = iomap_length(iter);
loff_t written = 0;
- /* already zeroed? we're done. */
- if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
+ /*
+ * We must zero subranges of unwritten mappings that might be dirty in
+ * pagecache from previous writes. We only know whether the entire range
+ * was clean or not, however, and dirty folios may have been written
+ * back or reclaimed at any point after mapping lookup.
+ *
+ * The easiest way to deal with this is to flush pagecache to trigger
+ * any pending unwritten conversions and then grab the updated extents
+ * from the fs. The flush may change the current mapping, so mark it
+ * stale for the iterator to remap it for the next pass to handle
+ * properly.
+ *
+ * Note that holes are treated the same as unwritten because zero range
+ * is (ab)used for partial folio zeroing in some cases. Hole backed
+ * post-eof ranges can be dirtied via mapped write and the flush
+ * triggers writeback time post-eof zeroing.
+ */
+ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) {
+ if (*range_dirty) {
+ *range_dirty = false;
+ return iomap_zero_iter_flush_and_stale(iter);
+ }
+ /* range is clean and already zeroed, nothing to do */
return length;
+ }
do {
struct folio *folio;
@@ -1450,9 +1479,27 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
.flags = IOMAP_ZERO,
};
int ret;
+ bool range_dirty;
+
+ /*
+ * Zero range wants to skip pre-zeroed (i.e. unwritten) mappings, but
+ * pagecache must be flushed to ensure stale data from previous
+ * buffered writes is not exposed. A flush is only required for certain
+ * types of mappings, but checking pagecache after mapping lookup is
+ * racy with writeback and reclaim.
+ *
+ * Therefore, check the entire range first and pass along whether any
+ * part of it is dirty. If so and an underlying mapping warrants it,
+ * flush the cache at that point. This trades off the occasional false
+ * positive (and spurious flush, if the dirty data and mapping don't
+ * happen to overlap) for simplicity in handling a relatively uncommon
+ * situation.
+ */
+ range_dirty = filemap_range_needs_writeback(inode->i_mapping,
+ pos, pos + len - 1);
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_zero_iter(&iter, did_zero);
+ iter.processed = iomap_zero_iter(&iter, did_zero, &range_dirty);
return ret;
}
EXPORT_SYMBOL_GPL(iomap_zero_range);
@@ -2007,10 +2054,10 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
}
EXPORT_SYMBOL_GPL(iomap_writepages);
-static int __init iomap_init(void)
+static int __init iomap_buffered_init(void)
{
return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
offsetof(struct iomap_ioend, io_bio),
BIOSET_NEED_BVECS);
}
-fs_initcall(iomap_init);
+fs_initcall(iomap_buffered_init);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index f3b43d223a46..f637aa0706a3 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -27,6 +27,13 @@
#define IOMAP_DIO_WRITE (1U << 30)
#define IOMAP_DIO_DIRTY (1U << 31)
+/*
+ * Used for sub block zeroing in iomap_dio_zero()
+ */
+#define IOMAP_ZERO_PAGE_SIZE (SZ_64K)
+#define IOMAP_ZERO_PAGE_ORDER (get_order(IOMAP_ZERO_PAGE_SIZE))
+static struct page *zero_page;
+
struct iomap_dio {
struct kiocb *iocb;
const struct iomap_dio_ops *dops;
@@ -232,13 +239,20 @@ release_bio:
}
EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io);
-static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
+static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
loff_t pos, unsigned len)
{
struct inode *inode = file_inode(dio->iocb->ki_filp);
- struct page *page = ZERO_PAGE(0);
struct bio *bio;
+ if (!len)
+ return 0;
+ /*
+ * Max block size supported is 64k
+ */
+ if (WARN_ON_ONCE(len > IOMAP_ZERO_PAGE_SIZE))
+ return -EINVAL;
+
bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
GFP_KERNEL);
@@ -246,8 +260,9 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- __bio_add_page(bio, page, len, 0);
+ __bio_add_page(bio, zero_page, len, 0);
iomap_dio_submit_bio(iter, dio, bio, pos);
+ return 0;
}
/*
@@ -356,8 +371,10 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
if (need_zeroout) {
/* zero out from the start of the block to the write offset */
pad = pos & (fs_block_size - 1);
- if (pad)
- iomap_dio_zero(iter, dio, pos - pad, pad);
+
+ ret = iomap_dio_zero(iter, dio, pos - pad, pad);
+ if (ret)
+ goto out;
}
/*
@@ -431,7 +448,8 @@ zero_tail:
/* zero out from the end of the write to the end of the block */
pad = pos & (fs_block_size - 1);
if (pad)
- iomap_dio_zero(iter, dio, pos, fs_block_size - pad);
+ ret = iomap_dio_zero(iter, dio, pos,
+ fs_block_size - pad);
}
out:
/* Undo iter limitation to current extent */
@@ -753,3 +771,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
return iomap_dio_complete(dio);
}
EXPORT_SYMBOL_GPL(iomap_dio_rw);
+
+static int __init iomap_dio_init(void)
+{
+ zero_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
+ IOMAP_ZERO_PAGE_ORDER);
+
+ if (!zero_page)
+ return -ENOMEM;
+
+ return 0;
+}
+fs_initcall(iomap_dio_init);
diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
index ee9660e9671c..7755e587f778 100644
--- a/fs/isofs/rock.h
+++ b/fs/isofs/rock.h
@@ -44,7 +44,7 @@ struct RR_PN_s {
struct SL_component {
__u8 flags;
__u8 len;
- __u8 text[];
+ __u8 text[] __counted_by(len);
} __attribute__ ((packed));
struct RR_SL_s {
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 951f78634adf..b3971e91e8eb 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -79,17 +79,23 @@ __releases(&journal->j_state_lock)
if (space_left < nblocks) {
int chkpt = journal->j_checkpoint_transactions != NULL;
tid_t tid = 0;
+ bool has_transaction = false;
- if (journal->j_committing_transaction)
+ if (journal->j_committing_transaction) {
tid = journal->j_committing_transaction->t_tid;
+ has_transaction = true;
+ }
spin_unlock(&journal->j_list_lock);
write_unlock(&journal->j_state_lock);
if (chkpt) {
jbd2_log_do_checkpoint(journal);
- } else if (jbd2_cleanup_journal_tail(journal) == 0) {
- /* We were able to recover space; yay! */
+ } else if (jbd2_cleanup_journal_tail(journal) <= 0) {
+ /*
+ * We were able to recover space or the
+ * journal was aborted due to an error.
+ */
;
- } else if (tid) {
+ } else if (has_transaction) {
/*
* jbd2_journal_commit_transaction() may want
* to take the checkpoint_mutex if JBD2_FLUSHED
@@ -407,6 +413,7 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
tid_t tid = 0;
unsigned long nr_freed = 0;
unsigned long freed;
+ bool first_set = false;
again:
spin_lock(&journal->j_list_lock);
@@ -426,8 +433,10 @@ again:
else
transaction = journal->j_checkpoint_transactions;
- if (!first_tid)
+ if (!first_set) {
first_tid = transaction->t_tid;
+ first_set = true;
+ }
last_transaction = journal->j_checkpoint_transactions->t_cpprev;
next_transaction = transaction;
last_tid = last_transaction->t_tid;
@@ -457,7 +466,7 @@ again:
spin_unlock(&journal->j_list_lock);
cond_resched();
- if (*nr_to_scan && next_tid)
+ if (*nr_to_scan && journal->j_shrink_transaction)
goto again;
out:
trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid,
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 1ebf2393bfb7..97f487c3d8fc 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -281,6 +281,16 @@ static void journal_kill_thread(journal_t *journal)
write_unlock(&journal->j_state_lock);
}
+static inline bool jbd2_data_needs_escaping(char *data)
+{
+ return *((__be32 *)data) == cpu_to_be32(JBD2_MAGIC_NUMBER);
+}
+
+static inline void jbd2_data_do_escape(char *data)
+{
+ *((unsigned int *)data) = 0;
+}
+
/*
* jbd2_journal_write_metadata_buffer: write a metadata buffer to the journal.
*
@@ -318,9 +328,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct buffer_head **bh_out,
sector_t blocknr)
{
- int done_copy_out = 0;
int do_escape = 0;
- char *mapped_data;
struct buffer_head *new_bh;
struct folio *new_folio;
unsigned int new_offset;
@@ -349,37 +357,33 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
* we use that version of the data for the commit.
*/
if (jh_in->b_frozen_data) {
- done_copy_out = 1;
new_folio = virt_to_folio(jh_in->b_frozen_data);
new_offset = offset_in_folio(new_folio, jh_in->b_frozen_data);
+ do_escape = jbd2_data_needs_escaping(jh_in->b_frozen_data);
+ if (do_escape)
+ jbd2_data_do_escape(jh_in->b_frozen_data);
} else {
+ char *tmp;
+ char *mapped_data;
+
new_folio = bh_in->b_folio;
new_offset = offset_in_folio(new_folio, bh_in->b_data);
- }
-
- mapped_data = kmap_local_folio(new_folio, new_offset);
- /*
- * Fire data frozen trigger if data already wasn't frozen. Do this
- * before checking for escaping, as the trigger may modify the magic
- * offset. If a copy-out happens afterwards, it will have the correct
- * data in the buffer.
- */
- if (!done_copy_out)
+ mapped_data = kmap_local_folio(new_folio, new_offset);
+ /*
+ * Fire data frozen trigger if data already wasn't frozen. Do
+ * this before checking for escaping, as the trigger may modify
+ * the magic offset. If a copy-out happens afterwards, it will
+ * have the correct data in the buffer.
+ */
jbd2_buffer_frozen_trigger(jh_in, mapped_data,
jh_in->b_triggers);
-
- /*
- * Check for escaping
- */
- if (*((__be32 *)mapped_data) == cpu_to_be32(JBD2_MAGIC_NUMBER))
- do_escape = 1;
- kunmap_local(mapped_data);
-
- /*
- * Do we need to do a data copy?
- */
- if (do_escape && !done_copy_out) {
- char *tmp;
+ do_escape = jbd2_data_needs_escaping(mapped_data);
+ kunmap_local(mapped_data);
+ /*
+ * Do we need to do a data copy?
+ */
+ if (!do_escape)
+ goto escape_done;
spin_unlock(&jh_in->b_state_lock);
tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
@@ -406,18 +410,10 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
copy_done:
new_folio = virt_to_folio(jh_in->b_frozen_data);
new_offset = offset_in_folio(new_folio, jh_in->b_frozen_data);
- done_copy_out = 1;
+ jbd2_data_do_escape(jh_in->b_frozen_data);
}
- /*
- * Did we need to do an escaping? Now we've done all the
- * copying, we can finally do so.
- * b_frozen_data is from jbd2_alloc() which always provides an
- * address from the direct kernels mapping.
- */
- if (do_escape)
- *((unsigned int *)jh_in->b_frozen_data) = 0;
-
+escape_done:
folio_set_bh(new_bh, new_folio, new_offset);
new_bh->b_size = bh_in->b_size;
new_bh->b_bdev = journal->j_dev;
@@ -710,7 +706,7 @@ int jbd2_fc_begin_commit(journal_t *journal, tid_t tid)
return -EINVAL;
write_lock(&journal->j_state_lock);
- if (tid <= journal->j_commit_sequence) {
+ if (tid_geq(journal->j_commit_sequence, tid)) {
write_unlock(&journal->j_state_lock);
return -EALREADY;
}
@@ -740,9 +736,9 @@ EXPORT_SYMBOL(jbd2_fc_begin_commit);
*/
static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback)
{
- jbd2_journal_unlock_updates(journal);
if (journal->j_fc_cleanup_callback)
journal->j_fc_cleanup_callback(journal, 0, tid);
+ jbd2_journal_unlock_updates(journal);
write_lock(&journal->j_state_lock);
journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
if (fallback)
@@ -841,17 +837,12 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out)
*bh_out = NULL;
- if (journal->j_fc_off + journal->j_fc_first < journal->j_fc_last) {
- fc_off = journal->j_fc_off;
- blocknr = journal->j_fc_first + fc_off;
- journal->j_fc_off++;
- } else {
- ret = -EINVAL;
- }
-
- if (ret)
- return ret;
+ if (journal->j_fc_off + journal->j_fc_first >= journal->j_fc_last)
+ return -EINVAL;
+ fc_off = journal->j_fc_off;
+ blocknr = journal->j_fc_first + fc_off;
+ journal->j_fc_off++;
ret = jbd2_journal_bmap(journal, blocknr, &pblock);
if (ret)
return ret;
@@ -860,7 +851,6 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out)
if (!bh)
return -ENOMEM;
-
journal->j_fc_wbuf[fc_off] = bh;
*bh_out = bh;
@@ -903,7 +893,7 @@ int jbd2_fc_wait_bufs(journal_t *journal, int num_blks)
}
EXPORT_SYMBOL(jbd2_fc_wait_bufs);
-int jbd2_fc_release_bufs(journal_t *journal)
+void jbd2_fc_release_bufs(journal_t *journal)
{
struct buffer_head *bh;
int i, j_fc_off;
@@ -917,8 +907,6 @@ int jbd2_fc_release_bufs(journal_t *journal)
put_bh(bh);
journal->j_fc_wbuf[i] = NULL;
}
-
- return 0;
}
EXPORT_SYMBOL(jbd2_fc_release_bufs);
@@ -1944,7 +1932,7 @@ static void jbd2_mark_journal_empty(journal_t *journal, blk_opf_t write_flags)
if (had_fast_commit)
jbd2_set_feature_fast_commit(journal);
- /* Log is no longer empty */
+ /* Log is empty */
write_lock(&journal->j_state_lock);
journal->j_flags |= JBD2_FLUSHED;
write_unlock(&journal->j_state_lock);
@@ -2866,8 +2854,7 @@ static struct journal_head *journal_alloc_journal_head(void)
ret = kmem_cache_zalloc(jbd2_journal_head_cache,
GFP_NOFS | __GFP_NOFAIL);
}
- if (ret)
- spin_lock_init(&ret->b_state_lock);
+ spin_lock_init(&ret->b_state_lock);
return ret;
}
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index e12cb145147e..13c18ccc13b0 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -23,10 +23,10 @@
static int jffs2_write_end(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *pg, void *fsdata);
+ struct folio *folio, void *fsdata);
static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata);
+ struct folio **foliop, void **fsdata);
static int jffs2_read_folio(struct file *filp, struct folio *folio);
int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
@@ -77,29 +77,27 @@ const struct address_space_operations jffs2_file_address_operations =
.write_end = jffs2_write_end,
};
-static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
+static int jffs2_do_readpage_nolock(struct inode *inode, struct folio *folio)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- unsigned char *pg_buf;
+ unsigned char *kaddr;
int ret;
jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
- __func__, inode->i_ino, pg->index << PAGE_SHIFT);
+ __func__, inode->i_ino, folio->index << PAGE_SHIFT);
- BUG_ON(!PageLocked(pg));
+ BUG_ON(!folio_test_locked(folio));
- pg_buf = kmap(pg);
- /* FIXME: Can kmap fail? */
-
- ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
+ kaddr = kmap_local_folio(folio, 0);
+ ret = jffs2_read_inode_range(c, f, kaddr, folio->index << PAGE_SHIFT,
PAGE_SIZE);
+ kunmap_local(kaddr);
if (!ret)
- SetPageUptodate(pg);
+ folio_mark_uptodate(folio);
- flush_dcache_page(pg);
- kunmap(pg);
+ flush_dcache_folio(folio);
jffs2_dbg(2, "readpage finished\n");
return ret;
@@ -107,7 +105,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
int __jffs2_read_folio(struct file *file, struct folio *folio)
{
- int ret = jffs2_do_readpage_nolock(folio->mapping->host, &folio->page);
+ int ret = jffs2_do_readpage_nolock(folio->mapping->host, folio);
folio_unlock(folio);
return ret;
}
@@ -125,9 +123,9 @@ static int jffs2_read_folio(struct file *file, struct folio *folio)
static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
- struct page *pg;
+ struct folio *folio;
struct inode *inode = mapping->host;
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
@@ -206,29 +204,30 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
* page in read_cache_page(), which causes a deadlock.
*/
mutex_lock(&c->alloc_sem);
- pg = grab_cache_page_write_begin(mapping, index);
- if (!pg) {
- ret = -ENOMEM;
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
goto release_sem;
}
- *pagep = pg;
+ *foliop = folio;
/*
- * Read in the page if it wasn't already present. Cannot optimize away
- * the whole page write case until jffs2_write_end can handle the
+ * Read in the folio if it wasn't already present. Cannot optimize away
+ * the whole folio write case until jffs2_write_end can handle the
* case of a short-copy.
*/
- if (!PageUptodate(pg)) {
+ if (!folio_test_uptodate(folio)) {
mutex_lock(&f->sem);
- ret = jffs2_do_readpage_nolock(inode, pg);
+ ret = jffs2_do_readpage_nolock(inode, folio);
mutex_unlock(&f->sem);
if (ret) {
- unlock_page(pg);
- put_page(pg);
+ folio_unlock(folio);
+ folio_put(folio);
goto release_sem;
}
}
- jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
+ jffs2_dbg(1, "end write_begin(). folio->flags %lx\n", folio->flags);
release_sem:
mutex_unlock(&c->alloc_sem);
@@ -238,7 +237,7 @@ out_err:
static int jffs2_write_end(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *pg, void *fsdata)
+ struct folio *folio, void *fsdata)
{
/* Actually commit the write from the page cache page we're looking at.
* For now, we write the full page out each time. It sucks, but it's simple
@@ -252,16 +251,17 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
unsigned aligned_start = start & ~3;
int ret = 0;
uint32_t writtenlen = 0;
+ void *buf;
- jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
- __func__, inode->i_ino, pg->index << PAGE_SHIFT,
- start, end, pg->flags);
+ jffs2_dbg(1, "%s(): ino #%lu, page at 0x%llx, range %d-%d, flags %lx\n",
+ __func__, inode->i_ino, folio_pos(folio),
+ start, end, folio->flags);
/* We need to avoid deadlock with page_cache_read() in
- jffs2_garbage_collect_pass(). So the page must be
+ jffs2_garbage_collect_pass(). So the folio must be
up to date to prevent page_cache_read() from trying
to re-lock it. */
- BUG_ON(!PageUptodate(pg));
+ BUG_ON(!folio_test_uptodate(folio));
if (end == PAGE_SIZE) {
/* When writing out the end of a page, write out the
@@ -276,8 +276,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
if (!ri) {
jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
__func__);
- unlock_page(pg);
- put_page(pg);
+ folio_unlock(folio);
+ folio_put(folio);
return -ENOMEM;
}
@@ -289,15 +289,11 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
ri->isize = cpu_to_je32((uint32_t)inode->i_size);
ri->atime = ri->ctime = ri->mtime = cpu_to_je32(JFFS2_NOW());
- /* In 2.4, it was already kmapped by generic_file_write(). Doesn't
- hurt to do it again. The alternative is ifdefs, which are ugly. */
- kmap(pg);
-
- ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
- (pg->index << PAGE_SHIFT) + aligned_start,
+ buf = kmap_local_folio(folio, aligned_start);
+ ret = jffs2_write_inode_range(c, f, ri, buf,
+ folio_pos(folio) + aligned_start,
end - aligned_start, &writtenlen);
-
- kunmap(pg);
+ kunmap_local(buf);
if (ret)
mapping_set_error(mapping, ret);
@@ -323,12 +319,12 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
it gets reread */
jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n",
__func__);
- ClearPageUptodate(pg);
+ folio_clear_uptodate(folio);
}
jffs2_dbg(1, "%s() returning %d\n",
__func__, writtenlen > 0 ? writtenlen : ret);
- unlock_page(pg);
- put_page(pg);
+ folio_unlock(folio);
+ folio_put(folio);
return writtenlen > 0 ? writtenlen : ret;
}
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 5c6602f3c189..822949d0eb00 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -1171,7 +1171,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
uint32_t alloclen, offset, orig_end, orig_start;
int ret = 0;
unsigned char *comprbuf = NULL, *writebuf;
- struct page *page;
+ struct folio *folio;
unsigned char *pg_ptr;
memset(&ri, 0, sizeof(ri));
@@ -1317,25 +1317,25 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
BUG_ON(start > orig_start);
}
- /* The rules state that we must obtain the page lock *before* f->sem, so
+ /* The rules state that we must obtain the folio lock *before* f->sem, so
* drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
* actually going to *change* so we're safe; we only allow reading.
*
* It is important to note that jffs2_write_begin() will ensure that its
- * page is marked Uptodate before allocating space. That means that if we
- * end up here trying to GC the *same* page that jffs2_write_begin() is
- * trying to write out, read_cache_page() will not deadlock. */
+ * folio is marked uptodate before allocating space. That means that if we
+ * end up here trying to GC the *same* folio that jffs2_write_begin() is
+ * trying to write out, read_cache_folio() will not deadlock. */
mutex_unlock(&f->sem);
- page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT,
+ folio = read_cache_folio(inode->i_mapping, start >> PAGE_SHIFT,
__jffs2_read_folio, NULL);
- if (IS_ERR(page)) {
- pr_warn("read_cache_page() returned error: %ld\n",
- PTR_ERR(page));
+ if (IS_ERR(folio)) {
+ pr_warn("read_cache_folio() returned error: %ld\n",
+ PTR_ERR(folio));
mutex_lock(&f->sem);
- return PTR_ERR(page);
+ return PTR_ERR(folio);
}
- pg_ptr = kmap(page);
+ pg_ptr = kmap_local_folio(folio, 0);
mutex_lock(&f->sem);
offset = start;
@@ -1400,7 +1400,6 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
}
}
- kunmap(page);
- put_page(page);
+ folio_release_kmap(folio, pg_ptr);
return ret;
}
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 1a6b5921d17a..07cfdc440596 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -292,11 +292,11 @@ static void jfs_write_failed(struct address_space *mapping, loff_t to)
static int jfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, pagep, jfs_get_block);
+ ret = block_write_begin(mapping, pos, len, foliop, jfs_get_block);
if (unlikely(ret))
jfs_write_failed(mapping, pos + len);
@@ -304,12 +304,12 @@ static int jfs_write_begin(struct file *file, struct address_space *mapping,
}
static int jfs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied, struct page *page,
+ loff_t pos, unsigned len, unsigned copied, struct folio *folio,
void *fsdata)
{
int ret;
- ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+ ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
if (ret < len)
jfs_write_failed(mapping, pos + len);
return ret;
diff --git a/fs/jfs/jfs_discard.c b/fs/jfs/jfs_discard.c
index 575cb2ba74fc..5f4b305030ad 100644
--- a/fs/jfs/jfs_discard.c
+++ b/fs/jfs/jfs_discard.c
@@ -65,7 +65,7 @@ void jfs_issue_discard(struct inode *ip, u64 blkno, u64 nblocks)
int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
{
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
- struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
+ struct bmap *bmp;
struct super_block *sb = ipbmap->i_sb;
int agno, agno_end;
u64 start, end, minlen;
@@ -83,10 +83,15 @@ int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
if (minlen == 0)
minlen = 1;
+ down_read(&sb->s_umount);
+ bmp = JFS_SBI(ip->i_sb)->bmap;
+
if (minlen > bmp->db_agsize ||
start >= bmp->db_mapsize ||
- range->len < sb->s_blocksize)
+ range->len < sb->s_blocksize) {
+ up_read(&sb->s_umount);
return -EINVAL;
+ }
if (end >= bmp->db_mapsize)
end = bmp->db_mapsize - 1;
@@ -100,6 +105,8 @@ int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
trimmed += dbDiscardAG(ip, agno, minlen);
agno++;
}
+
+ up_read(&sb->s_umount);
range->len = trimmed << sb->s_blocksize_bits;
return 0;
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 5713994328cb..974ecf5e0d95 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -187,7 +187,7 @@ int dbMount(struct inode *ipbmap)
}
bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
- if (!bmp->db_numag) {
+ if (!bmp->db_numag || bmp->db_numag >= MAXAG) {
err = -EINVAL;
goto err_release_metapage;
}
@@ -652,7 +652,7 @@ int dbNextAG(struct inode *ipbmap)
* average free space.
*/
for (i = 0 ; i < bmp->db_numag; i++, agpref++) {
- if (agpref == bmp->db_numag)
+ if (agpref >= bmp->db_numag)
agpref = 0;
if (atomic_read(&bmp->db_active[agpref]))
@@ -2944,9 +2944,10 @@ static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl)
static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl)
{
int ti, n = 0, k, x = 0;
- int max_size;
+ int max_size, max_idx;
max_size = is_ctl ? CTLTREESIZE : TREESIZE;
+ max_idx = is_ctl ? LPERCTL : LPERDMAP;
/* first check the root of the tree to see if there is
* sufficient free space.
@@ -2978,6 +2979,8 @@ static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl)
*/
assert(n < 4);
}
+ if (le32_to_cpu(tp->dmt_leafidx) >= max_idx)
+ return -ENOSPC;
/* set the return to the leftmost leaf describing sufficient
* free space.
@@ -3022,7 +3025,7 @@ static int dbFindBits(u32 word, int l2nb)
/* scan the word for nb free bits at nb alignments.
*/
- for (bitno = 0; mask != 0; bitno += nb, mask >>= nb) {
+ for (bitno = 0; mask != 0; bitno += nb, mask = (mask >> nb)) {
if ((mask & word) == mask)
break;
}
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 1407feccbc2d..a360b24ed320 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -1360,7 +1360,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
/* get the ag number of this iag */
agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag;
- if (agno < 0 || agno > dn_numag)
+ if (agno < 0 || agno > dn_numag || agno >= MAXAG)
return -EIO;
if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 2999ed5d83f5..0fb05e314edf 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -434,6 +434,8 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
int rc;
int quota_allocation = 0;
+ memset(&ea_buf->new_ea, 0, sizeof(ea_buf->new_ea));
+
/* When fsck.jfs clears a bad ea, it doesn't clear the size */
if (ji->ea.flag == 0)
ea_size = 0;
diff --git a/fs/kernel_read_file.c b/fs/kernel_read_file.c
index c429c42a6867..9ff37ae650ea 100644
--- a/fs/kernel_read_file.c
+++ b/fs/kernel_read_file.c
@@ -178,10 +178,10 @@ ssize_t kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
struct fd f = fdget(fd);
ssize_t ret = -EBADF;
- if (!f.file || !(f.file->f_mode & FMODE_READ))
+ if (!fd_file(f) || !(fd_file(f)->f_mode & FMODE_READ))
goto out;
- ret = kernel_read_file(f.file, offset, buf, buf_size, file_size, id);
+ ret = kernel_read_file(fd_file(f), offset, buf, buf_size, file_size, id);
out:
fdput(f);
return ret;
diff --git a/fs/libfs.c b/fs/libfs.c
index b64b4c44cfea..46966fd8bcf9 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -914,7 +914,7 @@ static int simple_read_folio(struct file *file, struct folio *folio)
int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
struct folio *folio;
@@ -923,7 +923,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
if (IS_ERR(folio))
return PTR_ERR(folio);
- *pagep = &folio->page;
+ *foliop = folio;
if (!folio_test_uptodate(folio) && (len != folio_size(folio))) {
size_t from = offset_in_folio(folio, pos);
@@ -942,11 +942,11 @@ EXPORT_SYMBOL(simple_write_begin);
* @pos: "
* @len: "
* @copied: "
- * @page: "
+ * @folio: "
* @fsdata: "
*
- * simple_write_end does the minimum needed for updating a page after writing is
- * done. It has the same API signature as the .write_end of
+ * simple_write_end does the minimum needed for updating a folio after
+ * writing is done. It has the same API signature as the .write_end of
* address_space_operations vector. So it can just be set onto .write_end for
* FSes that don't need any other processing. i_mutex is assumed to be held.
* Block based filesystems should use generic_write_end().
@@ -959,9 +959,8 @@ EXPORT_SYMBOL(simple_write_begin);
*/
static int simple_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct folio *folio = page_folio(page);
struct inode *inode = folio->mapping->host;
loff_t last_pos = pos + copied;
@@ -2003,13 +2002,19 @@ bool inode_maybe_inc_iversion(struct inode *inode, bool force)
* information, but the legacy inode_inc_iversion code used a spinlock
* to serialize increments.
*
- * Here, we add full memory barriers to ensure that any de-facto
- * ordering with other info is preserved.
+ * We add a full memory barrier to ensure that any de facto ordering
+ * with other state is preserved (either implicitly coming from cmpxchg
+ * or explicitly from smp_mb if we don't know upfront if we will execute
+ * the former).
*
- * This barrier pairs with the barrier in inode_query_iversion()
+ * These barriers pair with inode_query_iversion().
*/
- smp_mb();
cur = inode_peek_iversion_raw(inode);
+ if (!force && !(cur & I_VERSION_QUERIED)) {
+ smp_mb();
+ cur = inode_peek_iversion_raw(inode);
+ }
+
do {
/* If flag is clear then we needn't do anything */
if (!force && !(cur & I_VERSION_QUERIED))
@@ -2038,20 +2043,22 @@ EXPORT_SYMBOL(inode_maybe_inc_iversion);
u64 inode_query_iversion(struct inode *inode)
{
u64 cur, new;
+ bool fenced = false;
+ /*
+ * Memory barriers (implicit in cmpxchg, explicit in smp_mb) pair with
+ * inode_maybe_inc_iversion(), see that routine for more details.
+ */
cur = inode_peek_iversion_raw(inode);
do {
/* If flag is already set, then no need to swap */
if (cur & I_VERSION_QUERIED) {
- /*
- * This barrier (and the implicit barrier in the
- * cmpxchg below) pairs with the barrier in
- * inode_maybe_inc_iversion().
- */
- smp_mb();
+ if (!fenced)
+ smp_mb();
break;
}
+ fenced = true;
new = cur | I_VERSION_QUERIED;
} while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
return cur >> I_VERSION_QUERIED_SHIFT;
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index c11516801784..5e6877c37f73 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -440,7 +440,7 @@ nlm_bind_host(struct nlm_host *host)
if ((clnt = host->h_rpcclnt) != NULL) {
nlm_rebind_host(host);
} else {
- unsigned long increment = nlmsvc_timeout;
+ unsigned long increment = nlm_timeout * HZ;
struct rpc_timeout timeparms = {
.to_initval = increment,
.to_increment = increment,
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index ab8042a5b895..4ec22c2f2ea3 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -53,7 +53,6 @@ EXPORT_SYMBOL_GPL(nlmsvc_ops);
static DEFINE_MUTEX(nlmsvc_mutex);
static unsigned int nlmsvc_users;
static struct svc_serv *nlmsvc_serv;
-unsigned long nlmsvc_timeout;
static void nlmsvc_request_retry(struct timer_list *tl)
{
@@ -68,7 +67,7 @@ unsigned int lockd_net_id;
* and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003
*/
static unsigned long nlm_grace_period;
-static unsigned long nlm_timeout = LOCKD_DFLT_TIMEO;
+unsigned long nlm_timeout = LOCKD_DFLT_TIMEO;
static int nlm_udpport, nlm_tcpport;
/* RLIM_NOFILE defaults to 1024. That seems like a reasonable default here. */
@@ -125,6 +124,8 @@ lockd(void *vrqstp)
struct net *net = &init_net;
struct lockd_net *ln = net_generic(net, lockd_net_id);
+ svc_thread_init_status(rqstp, 0);
+
/* try_to_freeze() is called from svc_recv() */
set_freezable();
@@ -333,10 +334,6 @@ static int lockd_get(void)
printk(KERN_WARNING
"lockd_up: no pid, %d users??\n", nlmsvc_users);
- if (!nlm_timeout)
- nlm_timeout = LOCKD_DFLT_TIMEO;
- nlmsvc_timeout = nlm_timeout * HZ;
-
serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, lockd);
if (!serv) {
printk(KERN_WARNING "lockd_up: create service failed\n");
diff --git a/fs/locks.c b/fs/locks.c
index e45cad40f8b6..204847628f3e 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1451,7 +1451,7 @@ int lease_modify(struct file_lease *fl, int arg, struct list_head *dispose)
struct file *filp = fl->c.flc_file;
f_delown(filp);
- filp->f_owner.signum = 0;
+ file_f_owner(filp)->signum = 0;
fasync_helper(0, fl->c.flc_file, 0, &fl->fl_fasync);
if (fl->fl_fasync != NULL) {
printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
@@ -1783,6 +1783,10 @@ generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **pr
lease = *flp;
trace_generic_add_lease(inode, lease);
+ error = file_f_owner_allocate(filp);
+ if (error)
+ return error;
+
/* Note that arg is never F_UNLCK here */
ctx = locks_get_lock_context(inode, arg);
if (!ctx)
@@ -2153,15 +2157,15 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
error = -EBADF;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return error;
- if (type != F_UNLCK && !(f.file->f_mode & (FMODE_READ | FMODE_WRITE)))
+ if (type != F_UNLCK && !(fd_file(f)->f_mode & (FMODE_READ | FMODE_WRITE)))
goto out_putf;
- flock_make_lock(f.file, &fl, type);
+ flock_make_lock(fd_file(f), &fl, type);
- error = security_file_lock(f.file, fl.c.flc_type);
+ error = security_file_lock(fd_file(f), fl.c.flc_type);
if (error)
goto out_putf;
@@ -2169,12 +2173,12 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
if (can_sleep)
fl.c.flc_flags |= FL_SLEEP;
- if (f.file->f_op->flock)
- error = f.file->f_op->flock(f.file,
+ if (fd_file(f)->f_op->flock)
+ error = fd_file(f)->f_op->flock(fd_file(f),
(can_sleep) ? F_SETLKW : F_SETLK,
&fl);
else
- error = locks_lock_file_wait(f.file, &fl);
+ error = locks_lock_file_wait(fd_file(f), &fl);
locks_release_private(&fl);
out_putf:
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index a224cf222570..dd2a425b41f0 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -40,18 +40,18 @@ minix_last_byte(struct inode *inode, unsigned long page_nr)
return last_byte;
}
-static void dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
+static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
struct inode *dir = mapping->host;
- block_write_end(NULL, mapping, pos, len, len, page, NULL);
+ block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
mark_inode_dirty(dir);
}
- unlock_page(page);
+ folio_unlock(folio);
}
static int minix_handle_dirsync(struct inode *dir)
@@ -64,14 +64,15 @@ static int minix_handle_dirsync(struct inode *dir)
return err;
}
-static void *dir_get_page(struct inode *dir, unsigned long n, struct page **p)
+static void *dir_get_folio(struct inode *dir, unsigned long n,
+ struct folio **foliop)
{
- struct address_space *mapping = dir->i_mapping;
- struct page *page = read_mapping_page(mapping, n, NULL);
- if (IS_ERR(page))
- return ERR_CAST(page);
- *p = page;
- return kmap_local_page(page);
+ struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
+
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
+ *foliop = folio;
+ return kmap_local_folio(folio, 0);
}
static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
@@ -99,9 +100,9 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
for ( ; n < npages; n++, offset = 0) {
char *p, *kaddr, *limit;
- struct page *page;
+ struct folio *folio;
- kaddr = dir_get_page(inode, n, &page);
+ kaddr = dir_get_folio(inode, n, &folio);
if (IS_ERR(kaddr))
continue;
p = kaddr+offset;
@@ -122,13 +123,13 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
unsigned l = strnlen(name, sbi->s_namelen);
if (!dir_emit(ctx, name, l,
inumber, DT_UNKNOWN)) {
- unmap_and_put_page(page, p);
+ folio_release_kmap(folio, p);
return 0;
}
}
ctx->pos += chunk_size;
}
- unmap_and_put_page(page, kaddr);
+ folio_release_kmap(folio, kaddr);
}
return 0;
}
@@ -144,12 +145,13 @@ static inline int namecompare(int len, int maxlen,
/*
* minix_find_entry()
*
- * finds an entry in the specified directory with the wanted name. It
- * returns the cache buffer in which the entry was found, and the entry
- * itself (as a parameter - res_dir). It does NOT read the inode of the
+ * finds an entry in the specified directory with the wanted name.
+ * It does NOT read the inode of the
* entry - you'll have to do that yourself if you want to.
+ *
+ * On Success folio_release_kmap() should be called on *foliop.
*/
-minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
+minix_dirent *minix_find_entry(struct dentry *dentry, struct folio **foliop)
{
const char * name = dentry->d_name.name;
int namelen = dentry->d_name.len;
@@ -158,17 +160,15 @@ minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
struct minix_sb_info * sbi = minix_sb(sb);
unsigned long n;
unsigned long npages = dir_pages(dir);
- struct page *page = NULL;
char *p;
char *namx;
__u32 inumber;
- *res_page = NULL;
for (n = 0; n < npages; n++) {
char *kaddr, *limit;
- kaddr = dir_get_page(dir, n, &page);
+ kaddr = dir_get_folio(dir, n, foliop);
if (IS_ERR(kaddr))
continue;
@@ -188,12 +188,11 @@ minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
if (namecompare(namelen, sbi->s_namelen, name, namx))
goto found;
}
- unmap_and_put_page(page, kaddr);
+ folio_release_kmap(*foliop, kaddr);
}
return NULL;
found:
- *res_page = page;
return (minix_dirent *)p;
}
@@ -204,7 +203,7 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
int namelen = dentry->d_name.len;
struct super_block * sb = dir->i_sb;
struct minix_sb_info * sbi = minix_sb(sb);
- struct page *page = NULL;
+ struct folio *folio = NULL;
unsigned long npages = dir_pages(dir);
unsigned long n;
char *kaddr, *p;
@@ -223,10 +222,10 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
for (n = 0; n <= npages; n++) {
char *limit, *dir_end;
- kaddr = dir_get_page(dir, n, &page);
+ kaddr = dir_get_folio(dir, n, &folio);
if (IS_ERR(kaddr))
return PTR_ERR(kaddr);
- lock_page(page);
+ folio_lock(folio);
dir_end = kaddr + minix_last_byte(dir, n);
limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
@@ -253,15 +252,15 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
if (namecompare(namelen, sbi->s_namelen, name, namx))
goto out_unlock;
}
- unlock_page(page);
- unmap_and_put_page(page, kaddr);
+ folio_unlock(folio);
+ folio_release_kmap(folio, kaddr);
}
BUG();
return -EINVAL;
got_it:
- pos = page_offset(page) + offset_in_page(p);
- err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
+ pos = folio_pos(folio) + offset_in_folio(folio, p);
+ err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
if (err)
goto out_unlock;
memcpy (namx, name, namelen);
@@ -272,37 +271,37 @@ got_it:
memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
de->inode = inode->i_ino;
}
- dir_commit_chunk(page, pos, sbi->s_dirsize);
+ dir_commit_chunk(folio, pos, sbi->s_dirsize);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
err = minix_handle_dirsync(dir);
out_put:
- unmap_and_put_page(page, kaddr);
+ folio_release_kmap(folio, kaddr);
return err;
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
goto out_put;
}
-int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
+int minix_delete_entry(struct minix_dir_entry *de, struct folio *folio)
{
- struct inode *inode = page->mapping->host;
- loff_t pos = page_offset(page) + offset_in_page(de);
+ struct inode *inode = folio->mapping->host;
+ loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
unsigned len = sbi->s_dirsize;
int err;
- lock_page(page);
- err = minix_prepare_chunk(page, pos, len);
+ folio_lock(folio);
+ err = minix_prepare_chunk(folio, pos, len);
if (err) {
- unlock_page(page);
+ folio_unlock(folio);
return err;
}
if (sbi->s_version == MINIX_V3)
((minix3_dirent *)de)->inode = 0;
else
de->inode = 0;
- dir_commit_chunk(page, pos, len);
+ dir_commit_chunk(folio, pos, len);
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
return minix_handle_dirsync(inode);
@@ -310,21 +309,21 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
int minix_make_empty(struct inode *inode, struct inode *dir)
{
- struct page *page = grab_cache_page(inode->i_mapping, 0);
+ struct folio *folio = filemap_grab_folio(inode->i_mapping, 0);
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
char *kaddr;
int err;
- if (!page)
- return -ENOMEM;
- err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ err = minix_prepare_chunk(folio, 0, 2 * sbi->s_dirsize);
if (err) {
- unlock_page(page);
+ folio_unlock(folio);
goto fail;
}
- kaddr = kmap_local_page(page);
- memset(kaddr, 0, PAGE_SIZE);
+ kaddr = kmap_local_folio(folio, 0);
+ memset(kaddr, 0, folio_size(folio));
if (sbi->s_version == MINIX_V3) {
minix3_dirent *de3 = (minix3_dirent *)kaddr;
@@ -345,10 +344,10 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
}
kunmap_local(kaddr);
- dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
+ dir_commit_chunk(folio, 0, 2 * sbi->s_dirsize);
err = minix_handle_dirsync(inode);
fail:
- put_page(page);
+ folio_put(folio);
return err;
}
@@ -357,7 +356,7 @@ fail:
*/
int minix_empty_dir(struct inode * inode)
{
- struct page *page = NULL;
+ struct folio *folio = NULL;
unsigned long i, npages = dir_pages(inode);
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
char *name, *kaddr;
@@ -366,7 +365,7 @@ int minix_empty_dir(struct inode * inode)
for (i = 0; i < npages; i++) {
char *p, *limit;
- kaddr = dir_get_page(inode, i, &page);
+ kaddr = dir_get_folio(inode, i, &folio);
if (IS_ERR(kaddr))
continue;
@@ -395,44 +394,44 @@ int minix_empty_dir(struct inode * inode)
goto not_empty;
}
}
- unmap_and_put_page(page, kaddr);
+ folio_release_kmap(folio, kaddr);
}
return 1;
not_empty:
- unmap_and_put_page(page, kaddr);
+ folio_release_kmap(folio, kaddr);
return 0;
}
/* Releases the page */
-int minix_set_link(struct minix_dir_entry *de, struct page *page,
+int minix_set_link(struct minix_dir_entry *de, struct folio *folio,
struct inode *inode)
{
- struct inode *dir = page->mapping->host;
+ struct inode *dir = folio->mapping->host;
struct minix_sb_info *sbi = minix_sb(dir->i_sb);
- loff_t pos = page_offset(page) + offset_in_page(de);
+ loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
int err;
- lock_page(page);
- err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
+ folio_lock(folio);
+ err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
if (err) {
- unlock_page(page);
+ folio_unlock(folio);
return err;
}
if (sbi->s_version == MINIX_V3)
((minix3_dirent *)de)->inode = inode->i_ino;
else
de->inode = inode->i_ino;
- dir_commit_chunk(page, pos, sbi->s_dirsize);
+ dir_commit_chunk(folio, pos, sbi->s_dirsize);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
return minix_handle_dirsync(dir);
}
-struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
+struct minix_dir_entry *minix_dotdot(struct inode *dir, struct folio **foliop)
{
struct minix_sb_info *sbi = minix_sb(dir->i_sb);
- struct minix_dir_entry *de = dir_get_page(dir, 0, p);
+ struct minix_dir_entry *de = dir_get_folio(dir, 0, foliop);
if (!IS_ERR(de))
return minix_next_entry(de, sbi);
@@ -441,20 +440,19 @@ struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
ino_t minix_inode_by_name(struct dentry *dentry)
{
- struct page *page;
- struct minix_dir_entry *de = minix_find_entry(dentry, &page);
+ struct folio *folio;
+ struct minix_dir_entry *de = minix_find_entry(dentry, &folio);
ino_t res = 0;
if (de) {
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
+ struct inode *inode = folio->mapping->host;
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
if (sbi->s_version == MINIX_V3)
res = ((minix3_dirent *) de)->inode;
else
res = de->inode;
- unmap_and_put_page(page, de);
+ folio_release_kmap(folio, de);
}
return res;
}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 1c3df63162ef..f007e389d5d2 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -427,9 +427,9 @@ static int minix_read_folio(struct file *file, struct folio *folio)
return block_read_full_folio(folio, minix_get_block);
}
-int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len)
+int minix_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
{
- return __block_write_begin(page, pos, len, minix_get_block);
+ return __block_write_begin(folio, pos, len, minix_get_block);
}
static void minix_write_failed(struct address_space *mapping, loff_t to)
@@ -444,11 +444,11 @@ static void minix_write_failed(struct address_space *mapping, loff_t to)
static int minix_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, pagep, minix_get_block);
+ ret = block_write_begin(mapping, pos, len, foliop, minix_get_block);
if (unlikely(ret))
minix_write_failed(mapping, pos + len);
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index d493507c064f..d54273c3c9ff 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -42,18 +42,18 @@ struct minix_sb_info {
unsigned short s_version;
};
-extern struct inode *minix_iget(struct super_block *, unsigned long);
-extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
-extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
-extern struct inode * minix_new_inode(const struct inode *, umode_t);
-extern void minix_free_inode(struct inode * inode);
-extern unsigned long minix_count_free_inodes(struct super_block *sb);
-extern int minix_new_block(struct inode * inode);
-extern void minix_free_block(struct inode *inode, unsigned long block);
-extern unsigned long minix_count_free_blocks(struct super_block *sb);
-extern int minix_getattr(struct mnt_idmap *, const struct path *,
- struct kstat *, u32, unsigned int);
-extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
+struct inode *minix_iget(struct super_block *, unsigned long);
+struct minix_inode *minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
+struct minix2_inode *minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
+struct inode *minix_new_inode(const struct inode *, umode_t);
+void minix_free_inode(struct inode *inode);
+unsigned long minix_count_free_inodes(struct super_block *sb);
+int minix_new_block(struct inode *inode);
+void minix_free_block(struct inode *inode, unsigned long block);
+unsigned long minix_count_free_blocks(struct super_block *sb);
+int minix_getattr(struct mnt_idmap *, const struct path *,
+ struct kstat *, u32, unsigned int);
+int minix_prepare_chunk(struct folio *folio, loff_t pos, unsigned len);
extern void V1_minix_truncate(struct inode *);
extern void V2_minix_truncate(struct inode *);
@@ -64,15 +64,15 @@ extern int V2_minix_get_block(struct inode *, long, struct buffer_head *, int);
extern unsigned V1_minix_blocks(loff_t, struct super_block *);
extern unsigned V2_minix_blocks(loff_t, struct super_block *);
-extern struct minix_dir_entry *minix_find_entry(struct dentry*, struct page**);
-extern int minix_add_link(struct dentry*, struct inode*);
-extern int minix_delete_entry(struct minix_dir_entry*, struct page*);
-extern int minix_make_empty(struct inode*, struct inode*);
-extern int minix_empty_dir(struct inode*);
-int minix_set_link(struct minix_dir_entry *de, struct page *page,
+struct minix_dir_entry *minix_find_entry(struct dentry *, struct folio **);
+int minix_add_link(struct dentry*, struct inode*);
+int minix_delete_entry(struct minix_dir_entry *, struct folio *);
+int minix_make_empty(struct inode*, struct inode*);
+int minix_empty_dir(struct inode*);
+int minix_set_link(struct minix_dir_entry *de, struct folio *folio,
struct inode *inode);
-extern struct minix_dir_entry *minix_dotdot(struct inode*, struct page**);
-extern ino_t minix_inode_by_name(struct dentry*);
+struct minix_dir_entry *minix_dotdot(struct inode*, struct folio **);
+ino_t minix_inode_by_name(struct dentry*);
extern const struct inode_operations minix_file_inode_operations;
extern const struct inode_operations minix_dir_inode_operations;
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index a944a0f17b53..5d9c1406fe27 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -141,15 +141,15 @@ out_fail:
static int minix_unlink(struct inode * dir, struct dentry *dentry)
{
struct inode * inode = d_inode(dentry);
- struct page * page;
+ struct folio *folio;
struct minix_dir_entry * de;
int err;
- de = minix_find_entry(dentry, &page);
+ de = minix_find_entry(dentry, &folio);
if (!de)
return -ENOENT;
- err = minix_delete_entry(de, page);
- unmap_and_put_page(page, de);
+ err = minix_delete_entry(de, folio);
+ folio_release_kmap(folio, de);
if (err)
return err;
@@ -180,28 +180,28 @@ static int minix_rename(struct mnt_idmap *idmap,
{
struct inode * old_inode = d_inode(old_dentry);
struct inode * new_inode = d_inode(new_dentry);
- struct page * dir_page = NULL;
+ struct folio * dir_folio = NULL;
struct minix_dir_entry * dir_de = NULL;
- struct page * old_page;
+ struct folio *old_folio;
struct minix_dir_entry * old_de;
int err = -ENOENT;
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
- old_de = minix_find_entry(old_dentry, &old_page);
+ old_de = minix_find_entry(old_dentry, &old_folio);
if (!old_de)
goto out;
if (S_ISDIR(old_inode->i_mode)) {
err = -EIO;
- dir_de = minix_dotdot(old_inode, &dir_page);
+ dir_de = minix_dotdot(old_inode, &dir_folio);
if (!dir_de)
goto out_old;
}
if (new_inode) {
- struct page * new_page;
+ struct folio *new_folio;
struct minix_dir_entry * new_de;
err = -ENOTEMPTY;
@@ -209,11 +209,11 @@ static int minix_rename(struct mnt_idmap *idmap,
goto out_dir;
err = -ENOENT;
- new_de = minix_find_entry(new_dentry, &new_page);
+ new_de = minix_find_entry(new_dentry, &new_folio);
if (!new_de)
goto out_dir;
- err = minix_set_link(new_de, new_page, old_inode);
- unmap_and_put_page(new_page, new_de);
+ err = minix_set_link(new_de, new_folio, old_inode);
+ folio_release_kmap(new_folio, new_de);
if (err)
goto out_dir;
inode_set_ctime_current(new_inode);
@@ -228,22 +228,22 @@ static int minix_rename(struct mnt_idmap *idmap,
inode_inc_link_count(new_dir);
}
- err = minix_delete_entry(old_de, old_page);
+ err = minix_delete_entry(old_de, old_folio);
if (err)
goto out_dir;
mark_inode_dirty(old_inode);
if (dir_de) {
- err = minix_set_link(dir_de, dir_page, new_dir);
+ err = minix_set_link(dir_de, dir_folio, new_dir);
if (!err)
inode_dec_link_count(old_dir);
}
out_dir:
if (dir_de)
- unmap_and_put_page(dir_page, dir_de);
+ folio_release_kmap(dir_folio, dir_de);
out_old:
- unmap_and_put_page(old_page, old_de);
+ folio_release_kmap(old_folio, old_de);
out:
return err;
}
diff --git a/fs/mnt_idmapping.c b/fs/mnt_idmapping.c
index 3c60f1eaca61..7b1df8cc2821 100644
--- a/fs/mnt_idmapping.c
+++ b/fs/mnt_idmapping.c
@@ -32,6 +32,15 @@ struct mnt_idmap nop_mnt_idmap = {
};
EXPORT_SYMBOL_GPL(nop_mnt_idmap);
+/*
+ * Carries the invalid idmapping of a full 0-4294967295 {g,u}id range.
+ * This means that all {g,u}ids are mapped to INVALID_VFS{G,U}ID.
+ */
+struct mnt_idmap invalid_mnt_idmap = {
+ .count = REFCOUNT_INIT(1),
+};
+EXPORT_SYMBOL_GPL(invalid_mnt_idmap);
+
/**
* initial_idmapping - check whether this is the initial mapping
* @ns: idmapping to check
@@ -75,6 +84,8 @@ vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
if (idmap == &nop_mnt_idmap)
return VFSUIDT_INIT(kuid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_VFSUID;
if (initial_idmapping(fs_userns))
uid = __kuid_val(kuid);
else
@@ -112,6 +123,8 @@ vfsgid_t make_vfsgid(struct mnt_idmap *idmap,
if (idmap == &nop_mnt_idmap)
return VFSGIDT_INIT(kgid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_VFSGID;
if (initial_idmapping(fs_userns))
gid = __kgid_val(kgid);
else
@@ -140,6 +153,8 @@ kuid_t from_vfsuid(struct mnt_idmap *idmap,
if (idmap == &nop_mnt_idmap)
return AS_KUIDT(vfsuid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_UID;
uid = map_id_up(&idmap->uid_map, __vfsuid_val(vfsuid));
if (uid == (uid_t)-1)
return INVALID_UID;
@@ -167,6 +182,8 @@ kgid_t from_vfsgid(struct mnt_idmap *idmap,
if (idmap == &nop_mnt_idmap)
return AS_KGIDT(vfsgid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_GID;
gid = map_id_up(&idmap->gid_map, __vfsgid_val(vfsgid));
if (gid == (gid_t)-1)
return INVALID_GID;
@@ -228,15 +245,15 @@ static int copy_mnt_idmap(struct uid_gid_map *map_from,
return 0;
}
- forward = kmemdup(map_from->forward,
- nr_extents * sizeof(struct uid_gid_extent),
- GFP_KERNEL_ACCOUNT);
+ forward = kmemdup_array(map_from->forward, nr_extents,
+ sizeof(struct uid_gid_extent),
+ GFP_KERNEL_ACCOUNT);
if (!forward)
return -ENOMEM;
- reverse = kmemdup(map_from->reverse,
- nr_extents * sizeof(struct uid_gid_extent),
- GFP_KERNEL_ACCOUNT);
+ reverse = kmemdup_array(map_from->reverse, nr_extents,
+ sizeof(struct uid_gid_extent),
+ GFP_KERNEL_ACCOUNT);
if (!reverse) {
kfree(forward);
return -ENOMEM;
@@ -296,7 +313,7 @@ struct mnt_idmap *alloc_mnt_idmap(struct user_namespace *mnt_userns)
*/
struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap)
{
- if (idmap != &nop_mnt_idmap)
+ if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap)
refcount_inc(&idmap->count);
return idmap;
@@ -312,7 +329,8 @@ EXPORT_SYMBOL_GPL(mnt_idmap_get);
*/
void mnt_idmap_put(struct mnt_idmap *idmap)
{
- if (idmap != &nop_mnt_idmap && refcount_dec_and_test(&idmap->count))
+ if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap &&
+ refcount_dec_and_test(&idmap->count))
free_mnt_idmap(idmap);
}
EXPORT_SYMBOL_GPL(mnt_idmap_put);
diff --git a/fs/mount.h b/fs/mount.h
index ad4b1ddebb54..185fc56afc13 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -153,5 +153,17 @@ static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list)
list_add_tail(&mnt->mnt_list, dt_list);
}
-extern void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor);
bool has_locked_children(struct mount *mnt, struct dentry *dentry);
+struct mnt_namespace *__lookup_next_mnt_ns(struct mnt_namespace *mnt_ns, bool previous);
+static inline struct mnt_namespace *lookup_next_mnt_ns(struct mnt_namespace *mntns)
+{
+ return __lookup_next_mnt_ns(mntns, false);
+}
+static inline struct mnt_namespace *lookup_prev_mnt_ns(struct mnt_namespace *mntns)
+{
+ return __lookup_next_mnt_ns(mntns, true);
+}
+static inline struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct mnt_namespace, ns);
+}
diff --git a/fs/namei.c b/fs/namei.c
index 5512cb10fa89..4a4a22a08ac2 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1639,6 +1639,20 @@ struct dentry *lookup_one_qstr_excl(const struct qstr *name,
}
EXPORT_SYMBOL(lookup_one_qstr_excl);
+/**
+ * lookup_fast - do fast lockless (but racy) lookup of a dentry
+ * @nd: current nameidata
+ *
+ * Do a fast, but racy lookup in the dcache for the given dentry, and
+ * revalidate it. Returns a valid dentry pointer or NULL if one wasn't
+ * found. On error, an ERR_PTR will be returned.
+ *
+ * If this function returns a valid dentry and the walk is no longer
+ * lazy, the dentry will carry a reference that must later be put. If
+ * RCU mode is still in force, then this is not the case and the dentry
+ * must be legitimized before use. If this returns NULL, then the walk
+ * will no longer be in RCU mode.
+ */
static struct dentry *lookup_fast(struct nameidata *nd)
{
struct dentry *dentry, *parent = nd->path.dentry;
@@ -2492,25 +2506,25 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
struct fd f = fdget_raw(nd->dfd);
struct dentry *dentry;
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-EBADF);
if (flags & LOOKUP_LINKAT_EMPTY) {
- if (f.file->f_cred != current_cred() &&
- !ns_capable(f.file->f_cred->user_ns, CAP_DAC_READ_SEARCH)) {
+ if (fd_file(f)->f_cred != current_cred() &&
+ !ns_capable(fd_file(f)->f_cred->user_ns, CAP_DAC_READ_SEARCH)) {
fdput(f);
return ERR_PTR(-ENOENT);
}
}
- dentry = f.file->f_path.dentry;
+ dentry = fd_file(f)->f_path.dentry;
if (*s && unlikely(!d_can_lookup(dentry))) {
fdput(f);
return ERR_PTR(-ENOTDIR);
}
- nd->path = f.file->f_path;
+ nd->path = fd_file(f)->f_path;
if (flags & LOOKUP_RCU) {
nd->inode = nd->path.dentry->d_inode;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
@@ -3521,6 +3535,9 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
return dentry;
}
+ if (open_flag & O_CREAT)
+ audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
+
/*
* Checking write permission is tricky, bacuse we don't know if we are
* going to actually need it: O_CREAT opens should work as long as the
@@ -3591,6 +3608,42 @@ out_dput:
return ERR_PTR(error);
}
+static inline bool trailing_slashes(struct nameidata *nd)
+{
+ return (bool)nd->last.name[nd->last.len];
+}
+
+static struct dentry *lookup_fast_for_open(struct nameidata *nd, int open_flag)
+{
+ struct dentry *dentry;
+
+ if (open_flag & O_CREAT) {
+ if (trailing_slashes(nd))
+ return ERR_PTR(-EISDIR);
+
+ /* Don't bother on an O_EXCL create */
+ if (open_flag & O_EXCL)
+ return NULL;
+ }
+
+ if (trailing_slashes(nd))
+ nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
+
+ dentry = lookup_fast(nd);
+ if (IS_ERR_OR_NULL(dentry))
+ return dentry;
+
+ if (open_flag & O_CREAT) {
+ /* Discard negative dentries. Need inode_lock to do the create */
+ if (!dentry->d_inode) {
+ if (!(nd->flags & LOOKUP_RCU))
+ dput(dentry);
+ dentry = NULL;
+ }
+ }
+ return dentry;
+}
+
static const char *open_last_lookups(struct nameidata *nd,
struct file *file, const struct open_flags *op)
{
@@ -3608,28 +3661,22 @@ static const char *open_last_lookups(struct nameidata *nd,
return handle_dots(nd, nd->last_type);
}
- if (!(open_flag & O_CREAT)) {
- if (nd->last.name[nd->last.len])
- nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
- /* we _can_ be in RCU mode here */
- dentry = lookup_fast(nd);
- if (IS_ERR(dentry))
- return ERR_CAST(dentry);
- if (likely(dentry))
- goto finish_lookup;
+ /* We _can_ be in RCU mode here */
+ dentry = lookup_fast_for_open(nd, open_flag);
+ if (IS_ERR(dentry))
+ return ERR_CAST(dentry);
+ if (likely(dentry))
+ goto finish_lookup;
+
+ if (!(open_flag & O_CREAT)) {
if (WARN_ON_ONCE(nd->flags & LOOKUP_RCU))
return ERR_PTR(-ECHILD);
} else {
- /* create side of things */
if (nd->flags & LOOKUP_RCU) {
if (!try_to_unlazy(nd))
return ERR_PTR(-ECHILD);
}
- audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
- /* trailing slashes? */
- if (unlikely(nd->last.name[nd->last.len]))
- return ERR_PTR(-EISDIR);
}
if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
@@ -5304,7 +5351,7 @@ int page_symlink(struct inode *inode, const char *symname, int len)
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops;
bool nofs = !mapping_gfp_constraint(mapping, __GFP_FS);
- struct page *page;
+ struct folio *folio;
void *fsdata = NULL;
int err;
unsigned int flags;
@@ -5312,16 +5359,16 @@ int page_symlink(struct inode *inode, const char *symname, int len)
retry:
if (nofs)
flags = memalloc_nofs_save();
- err = aops->write_begin(NULL, mapping, 0, len-1, &page, &fsdata);
+ err = aops->write_begin(NULL, mapping, 0, len-1, &folio, &fsdata);
if (nofs)
memalloc_nofs_restore(flags);
if (err)
goto fail;
- memcpy(page_address(page), symname, len-1);
+ memcpy(folio_address(folio), symname, len - 1);
- err = aops->write_end(NULL, mapping, 0, len-1, len-1,
- page, fsdata);
+ err = aops->write_end(NULL, mapping, 0, len - 1, len - 1,
+ folio, fsdata);
if (err < 0)
goto fail;
if (err < len-1)
diff --git a/fs/namespace.c b/fs/namespace.c
index 328087a4df8a..93c377816d75 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1774,7 +1774,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
list_del_init(&p->mnt_child);
}
- /* Add propogated mounts to the tmp_list */
+ /* Add propagated mounts to the tmp_list */
if (how & UMOUNT_PROPAGATE)
propagate_umount(&tmp_list);
@@ -2060,14 +2060,41 @@ static bool is_mnt_ns_file(struct dentry *dentry)
dentry->d_fsdata == &mntns_operations;
}
-static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
+struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
{
- return container_of(ns, struct mnt_namespace, ns);
+ return &mnt->ns;
}
-struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
+struct mnt_namespace *__lookup_next_mnt_ns(struct mnt_namespace *mntns, bool previous)
{
- return &mnt->ns;
+ guard(read_lock)(&mnt_ns_tree_lock);
+ for (;;) {
+ struct rb_node *node;
+
+ if (previous)
+ node = rb_prev(&mntns->mnt_ns_tree_node);
+ else
+ node = rb_next(&mntns->mnt_ns_tree_node);
+ if (!node)
+ return ERR_PTR(-ENOENT);
+
+ mntns = node_to_mnt_ns(node);
+ node = &mntns->mnt_ns_tree_node;
+
+ if (!ns_capable_noaudit(mntns->user_ns, CAP_SYS_ADMIN))
+ continue;
+
+ /*
+ * Holding mnt_ns_tree_lock prevents the mount namespace from
+ * being freed but it may well be on it's deathbed. We want an
+ * active reference, not just a passive one here as we're
+ * persisting the mount namespace.
+ */
+ if (!refcount_inc_not_zero(&mntns->ns.count))
+ continue;
+
+ return mntns;
+ }
}
static bool mnt_ns_loop(struct dentry *dentry)
@@ -2921,8 +2948,15 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *
if (!__mnt_is_readonly(mnt) &&
(!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) &&
(ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
- char *buf = (char *)__get_free_page(GFP_KERNEL);
- char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM);
+ char *buf, *mntpath;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (buf)
+ mntpath = d_path(mountpoint, buf, PAGE_SIZE);
+ else
+ mntpath = ERR_PTR(-ENOMEM);
+ if (IS_ERR(mntpath))
+ mntpath = "(unknown)";
pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n",
sb->s_type->name,
@@ -2930,8 +2964,9 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *
mntpath, &sb->s_time_max,
(unsigned long long)sb->s_time_max);
- free_page((unsigned long)buf);
sb->s_iflags |= SB_I_TS_EXPIRY_WARNED;
+ if (buf)
+ free_page((unsigned long)buf);
}
}
@@ -4099,14 +4134,14 @@ SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
}
f = fdget(fs_fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
ret = -EINVAL;
- if (f.file->f_op != &fscontext_fops)
+ if (fd_file(f)->f_op != &fscontext_fops)
goto err_fsfd;
- fc = f.file->private_data;
+ fc = fd_file(f)->private_data;
ret = mutex_lock_interruptible(&fc->uapi_mutex);
if (ret < 0)
@@ -4436,6 +4471,10 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
return -EINVAL;
+ /* The filesystem has turned off idmapped mounts. */
+ if (m->mnt_sb->s_iflags & SB_I_NOIDMAP)
+ return -EINVAL;
+
/* We're not controlling the superblock. */
if (!ns_capable(fs_userns, CAP_SYS_ADMIN))
return -EPERM;
@@ -4649,15 +4688,15 @@ static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
return -EINVAL;
f = fdget(attr->userns_fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (!proc_ns_file(f.file)) {
+ if (!proc_ns_file(fd_file(f))) {
err = -EINVAL;
goto out_fput;
}
- ns = get_proc_ns(file_inode(f.file));
+ ns = get_proc_ns(file_inode(fd_file(f)));
if (ns->ops->type != CLONE_NEWUSER) {
err = -EINVAL;
goto out_fput;
@@ -5243,12 +5282,37 @@ static int copy_mnt_id_req(const struct mnt_id_req __user *req,
* that, or if not simply grab a passive reference on our mount namespace and
* return that.
*/
-static struct mnt_namespace *grab_requested_mnt_ns(u64 mnt_ns_id)
+static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq)
{
- if (mnt_ns_id)
- return lookup_mnt_ns(mnt_ns_id);
- refcount_inc(&current->nsproxy->mnt_ns->passive);
- return current->nsproxy->mnt_ns;
+ struct mnt_namespace *mnt_ns;
+
+ if (kreq->mnt_ns_id && kreq->spare)
+ return ERR_PTR(-EINVAL);
+
+ if (kreq->mnt_ns_id)
+ return lookup_mnt_ns(kreq->mnt_ns_id);
+
+ if (kreq->spare) {
+ struct ns_common *ns;
+
+ CLASS(fd, f)(kreq->spare);
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+
+ if (!proc_ns_file(fd_file(f)))
+ return ERR_PTR(-EINVAL);
+
+ ns = get_proc_ns(file_inode(fd_file(f)));
+ if (ns->ops->type != CLONE_NEWNS)
+ return ERR_PTR(-EINVAL);
+
+ mnt_ns = to_mnt_ns(ns);
+ } else {
+ mnt_ns = current->nsproxy->mnt_ns;
+ }
+
+ refcount_inc(&mnt_ns->passive);
+ return mnt_ns;
}
SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
@@ -5269,7 +5333,7 @@ SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
if (ret)
return ret;
- ns = grab_requested_mnt_ns(kreq.mnt_ns_id);
+ ns = grab_requested_mnt_ns(&kreq);
if (!ns)
return -ENOENT;
@@ -5396,7 +5460,7 @@ SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
if (!kmnt_ids)
return -ENOMEM;
- ns = grab_requested_mnt_ns(kreq.mnt_ns_id);
+ ns = grab_requested_mnt_ns(&kreq);
if (!ns)
return -ENOENT;
@@ -5605,7 +5669,7 @@ static bool mnt_already_visible(struct mnt_namespace *ns,
/* Only worry about locked mounts */
if (!(child->mnt.mnt_flags & MNT_LOCKED))
continue;
- /* Is the directory permanetly empty? */
+ /* Is the directory permanently empty? */
if (!is_empty_dir_inode(inode))
goto next;
}
diff --git a/fs/netfs/Makefile b/fs/netfs/Makefile
index 8e6781e0b10b..d08b0bfb6756 100644
--- a/fs/netfs/Makefile
+++ b/fs/netfs/Makefile
@@ -5,12 +5,14 @@ netfs-y := \
buffered_write.o \
direct_read.o \
direct_write.o \
- io.o \
iterator.o \
locking.o \
main.o \
misc.o \
objects.o \
+ read_collect.o \
+ read_pgpriv2.o \
+ read_retry.o \
write_collect.o \
write_issue.o
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 27c750d39476..c40e226053cc 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -9,266 +9,388 @@
#include <linux/task_io_accounting_ops.h>
#include "internal.h"
-/*
- * [DEPRECATED] Unlock the folios in a read operation for when the filesystem
- * is using PG_private_2 and direct writing to the cache from here rather than
- * marking the page for writeback.
- *
- * Note that we don't touch folio->private in this code.
- */
-static void netfs_rreq_unlock_folios_pgpriv2(struct netfs_io_request *rreq,
- size_t *account)
+static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
+ unsigned long long *_start,
+ unsigned long long *_len,
+ unsigned long long i_size)
{
- struct netfs_io_subrequest *subreq;
- struct folio *folio;
- pgoff_t start_page = rreq->start / PAGE_SIZE;
- pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
- bool subreq_failed = false;
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
- XA_STATE(xas, &rreq->mapping->i_pages, start_page);
+ if (cres->ops && cres->ops->expand_readahead)
+ cres->ops->expand_readahead(cres, _start, _len, i_size);
+}
- /* Walk through the pagecache and the I/O request lists simultaneously.
- * We may have a mixture of cached and uncached sections and we only
- * really want to write out the uncached sections. This is slightly
- * complicated by the possibility that we might have huge pages with a
- * mixture inside.
+static void netfs_rreq_expand(struct netfs_io_request *rreq,
+ struct readahead_control *ractl)
+{
+ /* Give the cache a chance to change the request parameters. The
+ * resultant request must contain the original region.
*/
- subreq = list_first_entry(&rreq->subrequests,
- struct netfs_io_subrequest, rreq_link);
- subreq_failed = (subreq->error < 0);
+ netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
- trace_netfs_rreq(rreq, netfs_rreq_trace_unlock_pgpriv2);
+ /* Give the netfs a chance to change the request parameters. The
+ * resultant request must contain the original region.
+ */
+ if (rreq->netfs_ops->expand_readahead)
+ rreq->netfs_ops->expand_readahead(rreq);
- rcu_read_lock();
- xas_for_each(&xas, folio, last_page) {
- loff_t pg_end;
- bool pg_failed = false;
- bool folio_started = false;
+ /* Expand the request if the cache wants it to start earlier. Note
+ * that the expansion may get further extended if the VM wishes to
+ * insert THPs and the preferred start and/or end wind up in the middle
+ * of THPs.
+ *
+ * If this is the case, however, the THP size should be an integer
+ * multiple of the cache granule size, so we get a whole number of
+ * granules to deal with.
+ */
+ if (rreq->start != readahead_pos(ractl) ||
+ rreq->len != readahead_length(ractl)) {
+ readahead_expand(ractl, rreq->start, rreq->len);
+ rreq->start = readahead_pos(ractl);
+ rreq->len = readahead_length(ractl);
- if (xas_retry(&xas, folio))
- continue;
+ trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
+ netfs_read_trace_expanded);
+ }
+}
- pg_end = folio_pos(folio) + folio_size(folio) - 1;
+/*
+ * Begin an operation, and fetch the stored zero point value from the cookie if
+ * available.
+ */
+static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx)
+{
+ return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx));
+}
- for (;;) {
- loff_t sreq_end;
+/*
+ * Decant the list of folios to read into a rolling buffer.
+ */
+static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq,
+ struct folio_queue *folioq)
+{
+ unsigned int order, nr;
+ size_t size = 0;
+
+ nr = __readahead_batch(rreq->ractl, (struct page **)folioq->vec.folios,
+ ARRAY_SIZE(folioq->vec.folios));
+ folioq->vec.nr = nr;
+ for (int i = 0; i < nr; i++) {
+ struct folio *folio = folioq_folio(folioq, i);
+
+ trace_netfs_folio(folio, netfs_folio_trace_read);
+ order = folio_order(folio);
+ folioq->orders[i] = order;
+ size += PAGE_SIZE << order;
+ }
- if (!subreq) {
- pg_failed = true;
- break;
- }
+ for (int i = nr; i < folioq_nr_slots(folioq); i++)
+ folioq_clear(folioq, i);
- if (!folio_started &&
- test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags) &&
- fscache_operation_valid(&rreq->cache_resources)) {
- trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
- folio_start_private_2(folio);
- folio_started = true;
- }
+ return size;
+}
- pg_failed |= subreq_failed;
- sreq_end = subreq->start + subreq->len - 1;
- if (pg_end < sreq_end)
- break;
+/*
+ * netfs_prepare_read_iterator - Prepare the subreq iterator for I/O
+ * @subreq: The subrequest to be set up
+ *
+ * Prepare the I/O iterator representing the read buffer on a subrequest for
+ * the filesystem to use for I/O (it can be passed directly to a socket). This
+ * is intended to be called from the ->issue_read() method once the filesystem
+ * has trimmed the request to the size it wants.
+ *
+ * Returns the limited size if successful and -ENOMEM if insufficient memory
+ * available.
+ *
+ * [!] NOTE: This must be run in the same thread as ->issue_read() was called
+ * in as we access the readahead_control struct.
+ */
+static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+ size_t rsize = subreq->len;
+
+ if (subreq->source == NETFS_DOWNLOAD_FROM_SERVER)
+ rsize = umin(rsize, rreq->io_streams[0].sreq_max_len);
+
+ if (rreq->ractl) {
+ /* If we don't have sufficient folios in the rolling buffer,
+ * extract a folioq's worth from the readahead region at a time
+ * into the buffer. Note that this acquires a ref on each page
+ * that we will need to release later - but we don't want to do
+ * that until after we've started the I/O.
+ */
+ while (rreq->submitted < subreq->start + rsize) {
+ struct folio_queue *tail = rreq->buffer_tail, *new;
+ size_t added;
+
+ new = kmalloc(sizeof(*new), GFP_NOFS);
+ if (!new)
+ return -ENOMEM;
+ netfs_stat(&netfs_n_folioq);
+ folioq_init(new);
+ new->prev = tail;
+ tail->next = new;
+ rreq->buffer_tail = new;
+ added = netfs_load_buffer_from_ra(rreq, new);
+ rreq->iter.count += added;
+ rreq->submitted += added;
+ }
+ }
- *account += subreq->transferred;
- if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
- subreq = list_next_entry(subreq, rreq_link);
- subreq_failed = (subreq->error < 0);
- } else {
- subreq = NULL;
- subreq_failed = false;
- }
+ subreq->len = rsize;
+ if (unlikely(rreq->io_streams[0].sreq_max_segs)) {
+ size_t limit = netfs_limit_iter(&rreq->iter, 0, rsize,
+ rreq->io_streams[0].sreq_max_segs);
- if (pg_end == sreq_end)
- break;
+ if (limit < rsize) {
+ subreq->len = limit;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_limited);
}
+ }
- if (!pg_failed) {
- flush_dcache_folio(folio);
- folio_mark_uptodate(folio);
- }
+ subreq->io_iter = rreq->iter;
- if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
- if (folio->index == rreq->no_unlock_folio &&
- test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
- _debug("no unlock");
- else
- folio_unlock(folio);
+ if (iov_iter_is_folioq(&subreq->io_iter)) {
+ if (subreq->io_iter.folioq_slot >= folioq_nr_slots(subreq->io_iter.folioq)) {
+ subreq->io_iter.folioq = subreq->io_iter.folioq->next;
+ subreq->io_iter.folioq_slot = 0;
}
+ subreq->curr_folioq = (struct folio_queue *)subreq->io_iter.folioq;
+ subreq->curr_folioq_slot = subreq->io_iter.folioq_slot;
+ subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
}
- rcu_read_unlock();
+
+ iov_iter_truncate(&subreq->io_iter, subreq->len);
+ iov_iter_advance(&rreq->iter, subreq->len);
+ return subreq->len;
}
-/*
- * Unlock the folios in a read operation. We need to set PG_writeback on any
- * folios we're going to write back before we unlock them.
- *
- * Note that if the deprecated NETFS_RREQ_USE_PGPRIV2 is set then we use
- * PG_private_2 and do a direct write to the cache from here instead.
- */
-void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
+static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq,
+ loff_t i_size)
{
- struct netfs_io_subrequest *subreq;
- struct netfs_folio *finfo;
- struct folio *folio;
- pgoff_t start_page = rreq->start / PAGE_SIZE;
- pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
- size_t account = 0;
- bool subreq_failed = false;
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
- XA_STATE(xas, &rreq->mapping->i_pages, start_page);
+ if (!cres->ops)
+ return NETFS_DOWNLOAD_FROM_SERVER;
+ return cres->ops->prepare_read(subreq, i_size);
+}
- if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
- __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
- }
- }
+static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
+ bool was_async)
+{
+ struct netfs_io_subrequest *subreq = priv;
- /* Handle deprecated PG_private_2 case. */
- if (test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
- netfs_rreq_unlock_folios_pgpriv2(rreq, &account);
- goto out;
+ if (transferred_or_error < 0) {
+ netfs_read_subreq_terminated(subreq, transferred_or_error, was_async);
+ return;
}
- /* Walk through the pagecache and the I/O request lists simultaneously.
- * We may have a mixture of cached and uncached sections and we only
- * really want to write out the uncached sections. This is slightly
- * complicated by the possibility that we might have huge pages with a
- * mixture inside.
- */
- subreq = list_first_entry(&rreq->subrequests,
- struct netfs_io_subrequest, rreq_link);
- subreq_failed = (subreq->error < 0);
-
- trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
+ if (transferred_or_error > 0)
+ subreq->transferred += transferred_or_error;
+ netfs_read_subreq_terminated(subreq, 0, was_async);
+}
- rcu_read_lock();
- xas_for_each(&xas, folio, last_page) {
- loff_t pg_end;
- bool pg_failed = false;
- bool wback_to_cache = false;
+/*
+ * Issue a read against the cache.
+ * - Eats the caller's ref on subreq.
+ */
+static void netfs_read_cache_to_pagecache(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
- if (xas_retry(&xas, folio))
- continue;
+ netfs_stat(&netfs_n_rh_read);
+ cres->ops->read(cres, subreq->start, &subreq->io_iter, NETFS_READ_HOLE_IGNORE,
+ netfs_cache_read_terminated, subreq);
+}
- pg_end = folio_pos(folio) + folio_size(folio) - 1;
+/*
+ * Perform a read to the pagecache from a series of sources of different types,
+ * slicing up the region to be read according to available cache blocks and
+ * network rsize.
+ */
+static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
+{
+ struct netfs_inode *ictx = netfs_inode(rreq->inode);
+ unsigned long long start = rreq->start;
+ ssize_t size = rreq->len;
+ int ret = 0;
+
+ atomic_inc(&rreq->nr_outstanding);
+
+ do {
+ struct netfs_io_subrequest *subreq;
+ enum netfs_io_source source = NETFS_DOWNLOAD_FROM_SERVER;
+ ssize_t slice;
+
+ subreq = netfs_alloc_subrequest(rreq);
+ if (!subreq) {
+ ret = -ENOMEM;
+ break;
+ }
- for (;;) {
- loff_t sreq_end;
+ subreq->start = start;
+ subreq->len = size;
+
+ atomic_inc(&rreq->nr_outstanding);
+ spin_lock_bh(&rreq->lock);
+ list_add_tail(&subreq->rreq_link, &rreq->subrequests);
+ subreq->prev_donated = rreq->prev_donated;
+ rreq->prev_donated = 0;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_added);
+ spin_unlock_bh(&rreq->lock);
+
+ source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size);
+ subreq->source = source;
+ if (source == NETFS_DOWNLOAD_FROM_SERVER) {
+ unsigned long long zp = umin(ictx->zero_point, rreq->i_size);
+ size_t len = subreq->len;
+
+ if (subreq->start >= zp) {
+ subreq->source = source = NETFS_FILL_WITH_ZEROES;
+ goto fill_with_zeroes;
+ }
- if (!subreq) {
- pg_failed = true;
+ if (len > zp - subreq->start)
+ len = zp - subreq->start;
+ if (len == 0) {
+ pr_err("ZERO-LEN READ: R=%08x[%x] l=%zx/%zx s=%llx z=%llx i=%llx",
+ rreq->debug_id, subreq->debug_index,
+ subreq->len, size,
+ subreq->start, ictx->zero_point, rreq->i_size);
break;
}
+ subreq->len = len;
+
+ netfs_stat(&netfs_n_rh_download);
+ if (rreq->netfs_ops->prepare_read) {
+ ret = rreq->netfs_ops->prepare_read(subreq);
+ if (ret < 0) {
+ atomic_dec(&rreq->nr_outstanding);
+ netfs_put_subrequest(subreq, false,
+ netfs_sreq_trace_put_cancel);
+ break;
+ }
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+ }
- wback_to_cache |= test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
- pg_failed |= subreq_failed;
- sreq_end = subreq->start + subreq->len - 1;
- if (pg_end < sreq_end)
+ slice = netfs_prepare_read_iterator(subreq);
+ if (slice < 0) {
+ atomic_dec(&rreq->nr_outstanding);
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
+ ret = slice;
break;
-
- account += subreq->transferred;
- if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
- subreq = list_next_entry(subreq, rreq_link);
- subreq_failed = (subreq->error < 0);
- } else {
- subreq = NULL;
- subreq_failed = false;
}
- if (pg_end == sreq_end)
- break;
+ rreq->netfs_ops->issue_read(subreq);
+ goto done;
}
- if (!pg_failed) {
- flush_dcache_folio(folio);
- finfo = netfs_folio_info(folio);
- if (finfo) {
- trace_netfs_folio(folio, netfs_folio_trace_filled_gaps);
- if (finfo->netfs_group)
- folio_change_private(folio, finfo->netfs_group);
- else
- folio_detach_private(folio);
- kfree(finfo);
- }
- folio_mark_uptodate(folio);
- if (wback_to_cache && !WARN_ON_ONCE(folio_get_private(folio) != NULL)) {
- trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
- folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE);
- filemap_dirty_folio(folio->mapping, folio);
- }
+ fill_with_zeroes:
+ if (source == NETFS_FILL_WITH_ZEROES) {
+ subreq->source = NETFS_FILL_WITH_ZEROES;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ netfs_stat(&netfs_n_rh_zero);
+ slice = netfs_prepare_read_iterator(subreq);
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ netfs_read_subreq_terminated(subreq, 0, false);
+ goto done;
}
- if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
- if (folio->index == rreq->no_unlock_folio &&
- test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
- _debug("no unlock");
- else
- folio_unlock(folio);
+ if (source == NETFS_READ_FROM_CACHE) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ slice = netfs_prepare_read_iterator(subreq);
+ netfs_read_cache_to_pagecache(rreq, subreq);
+ goto done;
}
- }
- rcu_read_unlock();
-out:
- task_io_account_read(account);
- if (rreq->netfs_ops->done)
- rreq->netfs_ops->done(rreq);
-}
+ pr_err("Unexpected read source %u\n", source);
+ WARN_ON_ONCE(1);
+ break;
-static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
- unsigned long long *_start,
- unsigned long long *_len,
- unsigned long long i_size)
-{
- struct netfs_cache_resources *cres = &rreq->cache_resources;
+ done:
+ size -= slice;
+ start += slice;
+ cond_resched();
+ } while (size > 0);
- if (cres->ops && cres->ops->expand_readahead)
- cres->ops->expand_readahead(cres, _start, _len, i_size);
+ if (atomic_dec_and_test(&rreq->nr_outstanding))
+ netfs_rreq_terminated(rreq, false);
+
+ /* Defer error return as we may need to wait for outstanding I/O. */
+ cmpxchg(&rreq->error, 0, ret);
}
-static void netfs_rreq_expand(struct netfs_io_request *rreq,
- struct readahead_control *ractl)
+/*
+ * Wait for the read operation to complete, successfully or otherwise.
+ */
+static int netfs_wait_for_read(struct netfs_io_request *rreq)
{
- /* Give the cache a chance to change the request parameters. The
- * resultant request must contain the original region.
- */
- netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
+ int ret;
- /* Give the netfs a chance to change the request parameters. The
- * resultant request must contain the original region.
- */
- if (rreq->netfs_ops->expand_readahead)
- rreq->netfs_ops->expand_readahead(rreq);
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
+ wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE);
+ ret = rreq->error;
+ if (ret == 0 && rreq->submitted < rreq->len) {
+ trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
+ ret = -EIO;
+ }
- /* Expand the request if the cache wants it to start earlier. Note
- * that the expansion may get further extended if the VM wishes to
- * insert THPs and the preferred start and/or end wind up in the middle
- * of THPs.
- *
- * If this is the case, however, the THP size should be an integer
- * multiple of the cache granule size, so we get a whole number of
- * granules to deal with.
- */
- if (rreq->start != readahead_pos(ractl) ||
- rreq->len != readahead_length(ractl)) {
- readahead_expand(ractl, rreq->start, rreq->len);
- rreq->start = readahead_pos(ractl);
- rreq->len = readahead_length(ractl);
+ return ret;
+}
- trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
- netfs_read_trace_expanded);
- }
+/*
+ * Set up the initial folioq of buffer folios in the rolling buffer and set the
+ * iterator to refer to it.
+ */
+static int netfs_prime_buffer(struct netfs_io_request *rreq)
+{
+ struct folio_queue *folioq;
+ size_t added;
+
+ folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
+ if (!folioq)
+ return -ENOMEM;
+ netfs_stat(&netfs_n_folioq);
+ folioq_init(folioq);
+ rreq->buffer = folioq;
+ rreq->buffer_tail = folioq;
+ rreq->submitted = rreq->start;
+ iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, 0);
+
+ added = netfs_load_buffer_from_ra(rreq, folioq);
+ rreq->iter.count += added;
+ rreq->submitted += added;
+ return 0;
}
/*
- * Begin an operation, and fetch the stored zero point value from the cookie if
- * available.
+ * Drop the ref on each folio that we inherited from the VM readahead code. We
+ * still have the folio locks to pin the page until we complete the I/O.
+ *
+ * Note that we can't just release the batch in each queue struct as we use the
+ * occupancy count in other places.
*/
-static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx)
+static void netfs_put_ra_refs(struct folio_queue *folioq)
{
- return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx));
+ struct folio_batch fbatch;
+
+ folio_batch_init(&fbatch);
+ while (folioq) {
+ for (unsigned int slot = 0; slot < folioq_count(folioq); slot++) {
+ struct folio *folio = folioq_folio(folioq, slot);
+ if (!folio)
+ continue;
+ trace_netfs_folio(folio, netfs_folio_trace_read_put);
+ if (!folio_batch_add(&fbatch, folio))
+ folio_batch_release(&fbatch);
+ }
+ folioq = folioq->next;
+ }
+
+ folio_batch_release(&fbatch);
}
/**
@@ -289,22 +411,17 @@ static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_in
void netfs_readahead(struct readahead_control *ractl)
{
struct netfs_io_request *rreq;
- struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
+ struct netfs_inode *ictx = netfs_inode(ractl->mapping->host);
+ unsigned long long start = readahead_pos(ractl);
+ size_t size = readahead_length(ractl);
int ret;
- _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
-
- if (readahead_count(ractl) == 0)
- return;
-
- rreq = netfs_alloc_request(ractl->mapping, ractl->file,
- readahead_pos(ractl),
- readahead_length(ractl),
+ rreq = netfs_alloc_request(ractl->mapping, ractl->file, start, size,
NETFS_READAHEAD);
if (IS_ERR(rreq))
return;
- ret = netfs_begin_cache_read(rreq, ctx);
+ ret = netfs_begin_cache_read(rreq, ictx);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto cleanup_free;
@@ -314,18 +431,15 @@ void netfs_readahead(struct readahead_control *ractl)
netfs_rreq_expand(rreq, ractl);
- /* Set up the output buffer */
- iov_iter_xarray(&rreq->iter, ITER_DEST, &ractl->mapping->i_pages,
- rreq->start, rreq->len);
+ rreq->ractl = ractl;
+ if (netfs_prime_buffer(rreq) < 0)
+ goto cleanup_free;
+ netfs_read_to_pagecache(rreq);
- /* Drop the refs on the folios here rather than in the cache or
- * filesystem. The locks will be dropped in netfs_rreq_unlock().
- */
- while (readahead_folio(ractl))
- ;
+ /* Release the folio refs whilst we're waiting for the I/O. */
+ netfs_put_ra_refs(rreq->buffer);
- netfs_begin_read(rreq, false);
- netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
+ netfs_put_request(rreq, true, netfs_rreq_trace_put_return);
return;
cleanup_free:
@@ -334,6 +448,117 @@ cleanup_free:
}
EXPORT_SYMBOL(netfs_readahead);
+/*
+ * Create a rolling buffer with a single occupying folio.
+ */
+static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct folio *folio)
+{
+ struct folio_queue *folioq;
+
+ folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
+ if (!folioq)
+ return -ENOMEM;
+
+ netfs_stat(&netfs_n_folioq);
+ folioq_init(folioq);
+ folioq_append(folioq, folio);
+ BUG_ON(folioq_folio(folioq, 0) != folio);
+ BUG_ON(folioq_folio_order(folioq, 0) != folio_order(folio));
+ rreq->buffer = folioq;
+ rreq->buffer_tail = folioq;
+ rreq->submitted = rreq->start + rreq->len;
+ iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, rreq->len);
+ rreq->ractl = (struct readahead_control *)1UL;
+ return 0;
+}
+
+/*
+ * Read into gaps in a folio partially filled by a streaming write.
+ */
+static int netfs_read_gaps(struct file *file, struct folio *folio)
+{
+ struct netfs_io_request *rreq;
+ struct address_space *mapping = folio->mapping;
+ struct netfs_folio *finfo = netfs_folio_info(folio);
+ struct netfs_inode *ctx = netfs_inode(mapping->host);
+ struct folio *sink = NULL;
+ struct bio_vec *bvec;
+ unsigned int from = finfo->dirty_offset;
+ unsigned int to = from + finfo->dirty_len;
+ unsigned int off = 0, i = 0;
+ size_t flen = folio_size(folio);
+ size_t nr_bvec = flen / PAGE_SIZE + 2;
+ size_t part;
+ int ret;
+
+ _enter("%lx", folio->index);
+
+ rreq = netfs_alloc_request(mapping, file, folio_pos(folio), flen, NETFS_READ_GAPS);
+ if (IS_ERR(rreq)) {
+ ret = PTR_ERR(rreq);
+ goto alloc_error;
+ }
+
+ ret = netfs_begin_cache_read(rreq, ctx);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto discard;
+
+ netfs_stat(&netfs_n_rh_read_folio);
+ trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_read_gaps);
+
+ /* Fiddle the buffer so that a gap at the beginning and/or a gap at the
+ * end get copied to, but the middle is discarded.
+ */
+ ret = -ENOMEM;
+ bvec = kmalloc_array(nr_bvec, sizeof(*bvec), GFP_KERNEL);
+ if (!bvec)
+ goto discard;
+
+ sink = folio_alloc(GFP_KERNEL, 0);
+ if (!sink) {
+ kfree(bvec);
+ goto discard;
+ }
+
+ trace_netfs_folio(folio, netfs_folio_trace_read_gaps);
+
+ rreq->direct_bv = bvec;
+ rreq->direct_bv_count = nr_bvec;
+ if (from > 0) {
+ bvec_set_folio(&bvec[i++], folio, from, 0);
+ off = from;
+ }
+ while (off < to) {
+ part = min_t(size_t, to - off, PAGE_SIZE);
+ bvec_set_folio(&bvec[i++], sink, part, 0);
+ off += part;
+ }
+ if (to < flen)
+ bvec_set_folio(&bvec[i++], folio, flen - to, to);
+ iov_iter_bvec(&rreq->iter, ITER_DEST, bvec, i, rreq->len);
+ rreq->submitted = rreq->start + flen;
+
+ netfs_read_to_pagecache(rreq);
+
+ if (sink)
+ folio_put(sink);
+
+ ret = netfs_wait_for_read(rreq);
+ if (ret == 0) {
+ flush_dcache_folio(folio);
+ folio_mark_uptodate(folio);
+ }
+ folio_unlock(folio);
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
+ return ret < 0 ? ret : 0;
+
+discard:
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
+alloc_error:
+ folio_unlock(folio);
+ return ret;
+}
+
/**
* netfs_read_folio - Helper to manage a read_folio request
* @file: The file to read from
@@ -353,9 +578,13 @@ int netfs_read_folio(struct file *file, struct folio *folio)
struct address_space *mapping = folio->mapping;
struct netfs_io_request *rreq;
struct netfs_inode *ctx = netfs_inode(mapping->host);
- struct folio *sink = NULL;
int ret;
+ if (folio_test_dirty(folio)) {
+ trace_netfs_folio(folio, netfs_folio_trace_read_gaps);
+ return netfs_read_gaps(file, folio);
+ }
+
_enter("%lx", folio->index);
rreq = netfs_alloc_request(mapping, file,
@@ -374,54 +603,12 @@ int netfs_read_folio(struct file *file, struct folio *folio)
trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
/* Set up the output buffer */
- if (folio_test_dirty(folio)) {
- /* Handle someone trying to read from an unflushed streaming
- * write. We fiddle the buffer so that a gap at the beginning
- * and/or a gap at the end get copied to, but the middle is
- * discarded.
- */
- struct netfs_folio *finfo = netfs_folio_info(folio);
- struct bio_vec *bvec;
- unsigned int from = finfo->dirty_offset;
- unsigned int to = from + finfo->dirty_len;
- unsigned int off = 0, i = 0;
- size_t flen = folio_size(folio);
- size_t nr_bvec = flen / PAGE_SIZE + 2;
- size_t part;
-
- ret = -ENOMEM;
- bvec = kmalloc_array(nr_bvec, sizeof(*bvec), GFP_KERNEL);
- if (!bvec)
- goto discard;
-
- sink = folio_alloc(GFP_KERNEL, 0);
- if (!sink)
- goto discard;
-
- trace_netfs_folio(folio, netfs_folio_trace_read_gaps);
-
- rreq->direct_bv = bvec;
- rreq->direct_bv_count = nr_bvec;
- if (from > 0) {
- bvec_set_folio(&bvec[i++], folio, from, 0);
- off = from;
- }
- while (off < to) {
- part = min_t(size_t, to - off, PAGE_SIZE);
- bvec_set_folio(&bvec[i++], sink, part, 0);
- off += part;
- }
- if (to < flen)
- bvec_set_folio(&bvec[i++], folio, flen - to, to);
- iov_iter_bvec(&rreq->iter, ITER_DEST, bvec, i, rreq->len);
- } else {
- iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
- rreq->start, rreq->len);
- }
+ ret = netfs_create_singular_buffer(rreq, folio);
+ if (ret < 0)
+ goto discard;
- ret = netfs_begin_read(rreq, true);
- if (sink)
- folio_put(sink);
+ netfs_read_to_pagecache(rreq);
+ ret = netfs_wait_for_read(rreq);
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
return ret < 0 ? ret : 0;
@@ -494,13 +681,10 @@ zero_out:
*
* Pre-read data for a write-begin request by drawing data from the cache if
* possible, or the netfs if not. Space beyond the EOF is zero-filled.
- * Multiple I/O requests from different sources will get munged together. If
- * necessary, the readahead window can be expanded in either direction to a
- * more convenient alighment for RPC efficiency or to make storage in the cache
- * feasible.
+ * Multiple I/O requests from different sources will get munged together.
*
* The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory.
+ * issue_read, is mandatory.
*
* The check_write_begin() operation can be provided to check for and flush
* conflicting writes once the folio is grabbed and locked. It is passed a
@@ -528,8 +712,6 @@ int netfs_write_begin(struct netfs_inode *ctx,
pgoff_t index = pos >> PAGE_SHIFT;
int ret;
- DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
-
retry:
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
mapping_gfp_mask(mapping));
@@ -577,22 +759,13 @@ retry:
netfs_stat(&netfs_n_rh_write_begin);
trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
- /* Expand the request to meet caching requirements and download
- * preferences.
- */
- ractl._nr_pages = folio_nr_pages(folio);
- netfs_rreq_expand(rreq, &ractl);
-
/* Set up the output buffer */
- iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
- rreq->start, rreq->len);
-
- /* We hold the folio locks, so we can drop the references */
- folio_get(folio);
- while (readahead_folio(&ractl))
- ;
+ ret = netfs_create_singular_buffer(rreq, folio);
+ if (ret < 0)
+ goto error_put;
- ret = netfs_begin_read(rreq, true);
+ netfs_read_to_pagecache(rreq);
+ ret = netfs_wait_for_read(rreq);
if (ret < 0)
goto error;
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
@@ -652,10 +825,13 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
trace_netfs_read(rreq, start, flen, netfs_read_trace_prefetch_for_write);
/* Set up the output buffer */
- iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
- rreq->start, rreq->len);
+ ret = netfs_create_singular_buffer(rreq, folio);
+ if (ret < 0)
+ goto error_put;
- ret = netfs_begin_read(rreq, true);
+ folioq_mark2(rreq->buffer, 0);
+ netfs_read_to_pagecache(rreq);
+ ret = netfs_wait_for_read(rreq);
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
return ret;
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index ca53c5d1622e..b3910dfcb56d 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -13,91 +13,22 @@
#include <linux/pagevec.h>
#include "internal.h"
-/*
- * Determined write method. Adjust netfs_folio_traces if this is changed.
- */
-enum netfs_how_to_modify {
- NETFS_FOLIO_IS_UPTODATE, /* Folio is uptodate already */
- NETFS_JUST_PREFETCH, /* We have to read the folio anyway */
- NETFS_WHOLE_FOLIO_MODIFY, /* We're going to overwrite the whole folio */
- NETFS_MODIFY_AND_CLEAR, /* We can assume there is no data to be downloaded. */
- NETFS_STREAMING_WRITE, /* Store incomplete data in non-uptodate page. */
- NETFS_STREAMING_WRITE_CONT, /* Continue streaming write. */
- NETFS_FLUSH_CONTENT, /* Flush incompatible content. */
-};
-
-static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
+static void __netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
{
- void *priv = folio_get_private(folio);
-
- if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE))
+ if (netfs_group)
folio_attach_private(folio, netfs_get_group(netfs_group));
- else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE)
- folio_detach_private(folio);
}
-/*
- * Decide how we should modify a folio. We might be attempting to do
- * write-streaming, in which case we don't want to a local RMW cycle if we can
- * avoid it. If we're doing local caching or content crypto, we award that
- * priority over avoiding RMW. If the file is open readably, then we also
- * assume that we may want to read what we wrote.
- */
-static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
- struct file *file,
- struct folio *folio,
- void *netfs_group,
- size_t flen,
- size_t offset,
- size_t len,
- bool maybe_trouble)
+static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
{
- struct netfs_folio *finfo = netfs_folio_info(folio);
- struct netfs_group *group = netfs_folio_group(folio);
- loff_t pos = folio_pos(folio);
-
- _enter("");
-
- if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE)
- return NETFS_FLUSH_CONTENT;
-
- if (folio_test_uptodate(folio))
- return NETFS_FOLIO_IS_UPTODATE;
-
- if (pos >= ctx->zero_point)
- return NETFS_MODIFY_AND_CLEAR;
-
- if (!maybe_trouble && offset == 0 && len >= flen)
- return NETFS_WHOLE_FOLIO_MODIFY;
-
- if (file->f_mode & FMODE_READ)
- goto no_write_streaming;
-
- if (netfs_is_cache_enabled(ctx)) {
- /* We don't want to get a streaming write on a file that loses
- * caching service temporarily because the backing store got
- * culled.
- */
- goto no_write_streaming;
- }
+ void *priv = folio_get_private(folio);
- if (!finfo)
- return NETFS_STREAMING_WRITE;
-
- /* We can continue a streaming write only if it continues on from the
- * previous. If it overlaps, we must flush lest we suffer a partial
- * copy and disjoint dirty regions.
- */
- if (offset == finfo->dirty_offset + finfo->dirty_len)
- return NETFS_STREAMING_WRITE_CONT;
- return NETFS_FLUSH_CONTENT;
-
-no_write_streaming:
- if (finfo) {
- netfs_stat(&netfs_n_wh_wstream_conflict);
- return NETFS_FLUSH_CONTENT;
+ if (unlikely(priv != netfs_group)) {
+ if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE))
+ folio_attach_private(folio, netfs_get_group(netfs_group));
+ else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE)
+ folio_detach_private(folio);
}
- return NETFS_JUST_PREFETCH;
}
/*
@@ -177,13 +108,10 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
.range_end = iocb->ki_pos + iter->count,
};
struct netfs_io_request *wreq = NULL;
- struct netfs_folio *finfo;
- struct folio *folio, *writethrough = NULL;
- enum netfs_how_to_modify howto;
- enum netfs_folio_trace trace;
+ struct folio *folio = NULL, *writethrough = NULL;
unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
ssize_t written = 0, ret, ret2;
- loff_t i_size, pos = iocb->ki_pos, from, to;
+ loff_t i_size, pos = iocb->ki_pos;
size_t max_chunk = mapping_max_folio_size(mapping);
bool maybe_trouble = false;
@@ -213,15 +141,14 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
}
do {
+ struct netfs_folio *finfo;
+ struct netfs_group *group;
+ unsigned long long fpos;
size_t flen;
size_t offset; /* Offset into pagecache folio */
size_t part; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */
- ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
- if (unlikely(ret < 0))
- break;
-
offset = pos & (max_chunk - 1);
part = min(max_chunk - offset, iov_iter_count(iter));
@@ -247,7 +174,8 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
}
flen = folio_size(folio);
- offset = pos & (flen - 1);
+ fpos = folio_pos(folio);
+ offset = pos - fpos;
part = min_t(size_t, flen - offset, part);
/* Wait for writeback to complete. The writeback engine owns
@@ -265,71 +193,52 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
goto error_folio_unlock;
}
- /* See if we need to prefetch the area we're going to modify.
- * We need to do this before we get a lock on the folio in case
- * there's more than one writer competing for the same cache
- * block.
+ /* Decide how we should modify a folio. We might be attempting
+ * to do write-streaming, in which case we don't want to a
+ * local RMW cycle if we can avoid it. If we're doing local
+ * caching or content crypto, we award that priority over
+ * avoiding RMW. If the file is open readably, then we also
+ * assume that we may want to read what we wrote.
*/
- howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
- flen, offset, part, maybe_trouble);
- _debug("howto %u", howto);
- switch (howto) {
- case NETFS_JUST_PREFETCH:
- ret = netfs_prefetch_for_write(file, folio, offset, part);
- if (ret < 0) {
- _debug("prefetch = %zd", ret);
- goto error_folio_unlock;
- }
- break;
- case NETFS_FOLIO_IS_UPTODATE:
- case NETFS_WHOLE_FOLIO_MODIFY:
- case NETFS_STREAMING_WRITE_CONT:
- break;
- case NETFS_MODIFY_AND_CLEAR:
- zero_user_segment(&folio->page, 0, offset);
- break;
- case NETFS_STREAMING_WRITE:
- ret = -EIO;
- if (WARN_ON(folio_get_private(folio)))
- goto error_folio_unlock;
- break;
- case NETFS_FLUSH_CONTENT:
- trace_netfs_folio(folio, netfs_flush_content);
- from = folio_pos(folio);
- to = from + folio_size(folio) - 1;
- folio_unlock(folio);
- folio_put(folio);
- ret = filemap_write_and_wait_range(mapping, from, to);
- if (ret < 0)
- goto error_folio_unlock;
- continue;
- }
-
- if (mapping_writably_mapped(mapping))
- flush_dcache_folio(folio);
-
- copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
-
- flush_dcache_folio(folio);
-
- /* Deal with a (partially) failed copy */
- if (copied == 0) {
- ret = -EFAULT;
- goto error_folio_unlock;
+ finfo = netfs_folio_info(folio);
+ group = netfs_folio_group(folio);
+
+ if (unlikely(group != netfs_group) &&
+ group != NETFS_FOLIO_COPY_TO_CACHE)
+ goto flush_content;
+
+ if (folio_test_uptodate(folio)) {
+ if (mapping_writably_mapped(mapping))
+ flush_dcache_folio(folio);
+ copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+ if (unlikely(copied == 0))
+ goto copy_failed;
+ netfs_set_group(folio, netfs_group);
+ trace_netfs_folio(folio, netfs_folio_is_uptodate);
+ goto copied;
}
- trace = (enum netfs_folio_trace)howto;
- switch (howto) {
- case NETFS_FOLIO_IS_UPTODATE:
- case NETFS_JUST_PREFETCH:
- netfs_set_group(folio, netfs_group);
- break;
- case NETFS_MODIFY_AND_CLEAR:
+ /* If the page is above the zero-point then we assume that the
+ * server would just return a block of zeros or a short read if
+ * we try to read it.
+ */
+ if (fpos >= ctx->zero_point) {
+ zero_user_segment(&folio->page, 0, offset);
+ copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+ if (unlikely(copied == 0))
+ goto copy_failed;
zero_user_segment(&folio->page, offset + copied, flen);
- netfs_set_group(folio, netfs_group);
+ __netfs_set_group(folio, netfs_group);
folio_mark_uptodate(folio);
- break;
- case NETFS_WHOLE_FOLIO_MODIFY:
+ trace_netfs_folio(folio, netfs_modify_and_clear);
+ goto copied;
+ }
+
+ /* See if we can write a whole folio in one go. */
+ if (!maybe_trouble && offset == 0 && part >= flen) {
+ copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+ if (unlikely(copied == 0))
+ goto copy_failed;
if (unlikely(copied < part)) {
maybe_trouble = true;
iov_iter_revert(iter, copied);
@@ -337,16 +246,53 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
folio_unlock(folio);
goto retry;
}
- netfs_set_group(folio, netfs_group);
+ __netfs_set_group(folio, netfs_group);
folio_mark_uptodate(folio);
- break;
- case NETFS_STREAMING_WRITE:
+ trace_netfs_folio(folio, netfs_whole_folio_modify);
+ goto copied;
+ }
+
+ /* We don't want to do a streaming write on a file that loses
+ * caching service temporarily because the backing store got
+ * culled and we don't really want to get a streaming write on
+ * a file that's open for reading as ->read_folio() then has to
+ * be able to flush it.
+ */
+ if ((file->f_mode & FMODE_READ) ||
+ netfs_is_cache_enabled(ctx)) {
+ if (finfo) {
+ netfs_stat(&netfs_n_wh_wstream_conflict);
+ goto flush_content;
+ }
+ ret = netfs_prefetch_for_write(file, folio, offset, part);
+ if (ret < 0) {
+ _debug("prefetch = %zd", ret);
+ goto error_folio_unlock;
+ }
+ /* Note that copy-to-cache may have been set. */
+
+ copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+ if (unlikely(copied == 0))
+ goto copy_failed;
+ netfs_set_group(folio, netfs_group);
+ trace_netfs_folio(folio, netfs_just_prefetch);
+ goto copied;
+ }
+
+ if (!finfo) {
+ ret = -EIO;
+ if (WARN_ON(folio_get_private(folio)))
+ goto error_folio_unlock;
+ copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+ if (unlikely(copied == 0))
+ goto copy_failed;
if (offset == 0 && copied == flen) {
- netfs_set_group(folio, netfs_group);
+ __netfs_set_group(folio, netfs_group);
folio_mark_uptodate(folio);
- trace = netfs_streaming_filled_page;
- break;
+ trace_netfs_folio(folio, netfs_streaming_filled_page);
+ goto copied;
}
+
finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
if (!finfo) {
iov_iter_revert(iter, copied);
@@ -358,9 +304,18 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
finfo->dirty_len = copied;
folio_attach_private(folio, (void *)((unsigned long)finfo |
NETFS_FOLIO_INFO));
- break;
- case NETFS_STREAMING_WRITE_CONT:
- finfo = netfs_folio_info(folio);
+ trace_netfs_folio(folio, netfs_streaming_write);
+ goto copied;
+ }
+
+ /* We can continue a streaming write only if it continues on
+ * from the previous. If it overlaps, we must flush lest we
+ * suffer a partial copy and disjoint dirty regions.
+ */
+ if (offset == finfo->dirty_offset + finfo->dirty_len) {
+ copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+ if (unlikely(copied == 0))
+ goto copy_failed;
finfo->dirty_len += copied;
if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) {
if (finfo->netfs_group)
@@ -369,17 +324,25 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
folio_detach_private(folio);
folio_mark_uptodate(folio);
kfree(finfo);
- trace = netfs_streaming_cont_filled_page;
+ trace_netfs_folio(folio, netfs_streaming_cont_filled_page);
+ } else {
+ trace_netfs_folio(folio, netfs_streaming_write_cont);
}
- break;
- default:
- WARN(true, "Unexpected modify type %u ix=%lx\n",
- howto, folio->index);
- ret = -EIO;
- goto error_folio_unlock;
+ goto copied;
}
- trace_netfs_folio(folio, trace);
+ /* Incompatible write; flush the folio and try again. */
+ flush_content:
+ trace_netfs_folio(folio, netfs_flush_content);
+ folio_unlock(folio);
+ folio_put(folio);
+ ret = filemap_write_and_wait_range(mapping, fpos, fpos + flen - 1);
+ if (ret < 0)
+ goto error_folio_unlock;
+ continue;
+
+ copied:
+ flush_dcache_folio(folio);
/* Update the inode size if we moved the EOF marker */
pos += copied;
@@ -401,12 +364,22 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
folio_put(folio);
folio = NULL;
+ ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
+ if (unlikely(ret < 0))
+ break;
+
cond_resched();
} while (iov_iter_count(iter));
out:
- if (likely(written) && ctx->ops->post_modify)
- ctx->ops->post_modify(inode);
+ if (likely(written)) {
+ /* Set indication that ctime and mtime got updated in case
+ * close is deferred.
+ */
+ set_bit(NETFS_ICTX_MODIFIED_ATTR, &ctx->flags);
+ if (unlikely(ctx->ops->post_modify))
+ ctx->ops->post_modify(inode);
+ }
if (unlikely(wreq)) {
ret2 = netfs_end_writethrough(wreq, &wbc, writethrough);
@@ -421,6 +394,8 @@ out:
_leave(" = %zd [%zd]", written, ret);
return written ? written : ret;
+copy_failed:
+ ret = -EFAULT;
error_folio_unlock:
folio_unlock(folio);
folio_put(folio);
@@ -577,6 +552,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
netfs_set_group(folio, netfs_group);
file_update_time(file);
+ set_bit(NETFS_ICTX_MODIFIED_ATTR, &ictx->flags);
if (ictx->ops->post_modify)
ictx->ops->post_modify(inode);
ret = VM_FAULT_LOCKED;
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index 10a1e4da6bda..b1a66a6e6bc2 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -16,6 +16,143 @@
#include <linux/netfs.h>
#include "internal.h"
+static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+ size_t rsize;
+
+ rsize = umin(subreq->len, rreq->io_streams[0].sreq_max_len);
+ subreq->len = rsize;
+
+ if (unlikely(rreq->io_streams[0].sreq_max_segs)) {
+ size_t limit = netfs_limit_iter(&rreq->iter, 0, rsize,
+ rreq->io_streams[0].sreq_max_segs);
+
+ if (limit < rsize) {
+ subreq->len = limit;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_limited);
+ }
+ }
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+
+ subreq->io_iter = rreq->iter;
+ iov_iter_truncate(&subreq->io_iter, subreq->len);
+ iov_iter_advance(&rreq->iter, subreq->len);
+}
+
+/*
+ * Perform a read to a buffer from the server, slicing up the region to be read
+ * according to the network rsize.
+ */
+static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
+{
+ unsigned long long start = rreq->start;
+ ssize_t size = rreq->len;
+ int ret = 0;
+
+ atomic_set(&rreq->nr_outstanding, 1);
+
+ do {
+ struct netfs_io_subrequest *subreq;
+ ssize_t slice;
+
+ subreq = netfs_alloc_subrequest(rreq);
+ if (!subreq) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+ subreq->start = start;
+ subreq->len = size;
+
+ atomic_inc(&rreq->nr_outstanding);
+ spin_lock_bh(&rreq->lock);
+ list_add_tail(&subreq->rreq_link, &rreq->subrequests);
+ subreq->prev_donated = rreq->prev_donated;
+ rreq->prev_donated = 0;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_added);
+ spin_unlock_bh(&rreq->lock);
+
+ netfs_stat(&netfs_n_rh_download);
+ if (rreq->netfs_ops->prepare_read) {
+ ret = rreq->netfs_ops->prepare_read(subreq);
+ if (ret < 0) {
+ atomic_dec(&rreq->nr_outstanding);
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
+ break;
+ }
+ }
+
+ netfs_prepare_dio_read_iterator(subreq);
+ slice = subreq->len;
+ rreq->netfs_ops->issue_read(subreq);
+
+ size -= slice;
+ start += slice;
+ rreq->submitted += slice;
+
+ if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
+ test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
+ break;
+ cond_resched();
+ } while (size > 0);
+
+ if (atomic_dec_and_test(&rreq->nr_outstanding))
+ netfs_rreq_terminated(rreq, false);
+ return ret;
+}
+
+/*
+ * Perform a read to an application buffer, bypassing the pagecache and the
+ * local disk cache.
+ */
+static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
+{
+ int ret;
+
+ _enter("R=%x %llx-%llx",
+ rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
+
+ if (rreq->len == 0) {
+ pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
+ return -EIO;
+ }
+
+ // TODO: Use bounce buffer if requested
+
+ inode_dio_begin(rreq->inode);
+
+ ret = netfs_dispatch_unbuffered_reads(rreq);
+
+ if (!rreq->submitted) {
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_no_submit);
+ inode_dio_end(rreq->inode);
+ ret = 0;
+ goto out;
+ }
+
+ if (sync) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
+ wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
+ TASK_UNINTERRUPTIBLE);
+
+ ret = rreq->error;
+ if (ret == 0 && rreq->submitted < rreq->len &&
+ rreq->origin != NETFS_DIO_READ) {
+ trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
+ ret = -EIO;
+ }
+ } else {
+ ret = -EIOCBQUEUED;
+ }
+
+out:
+ _leave(" = %d", ret);
+ return ret;
+}
+
/**
* netfs_unbuffered_read_iter_locked - Perform an unbuffered or direct I/O read
* @iocb: The I/O control descriptor describing the read
@@ -31,7 +168,7 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
struct netfs_io_request *rreq;
ssize_t ret;
size_t orig_count = iov_iter_count(iter);
- bool async = !is_sync_kiocb(iocb);
+ bool sync = is_sync_kiocb(iocb);
_enter("");
@@ -78,13 +215,13 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
// TODO: Set up bounce buffer if needed
- if (async)
+ if (!sync)
rreq->iocb = iocb;
- ret = netfs_begin_read(rreq, is_sync_kiocb(iocb));
+ ret = netfs_unbuffered_read(rreq, sync);
if (ret < 0)
goto out; /* May be -EIOCBQUEUED */
- if (!async) {
+ if (sync) {
// TODO: Copy from bounce buffer
iocb->ki_pos += rreq->transferred;
ret = rreq->transferred;
@@ -94,8 +231,6 @@ out:
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
if (ret > 0)
orig_count -= ret;
- if (ret != -EIOCBQUEUED)
- iov_iter_revert(iter, orig_count - iov_iter_count(iter));
return ret;
}
EXPORT_SYMBOL(netfs_unbuffered_read_iter_locked);
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index 7773f3d855a9..c9f0ed24cb7b 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <linux/folio_queue.h>
#include <linux/netfs.h>
#include <linux/fscache.h>
#include <linux/fscache-cache.h>
@@ -22,16 +23,10 @@
/*
* buffered_read.c
*/
-void netfs_rreq_unlock_folios(struct netfs_io_request *rreq);
int netfs_prefetch_for_write(struct file *file, struct folio *folio,
size_t offset, size_t len);
/*
- * io.c
- */
-int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
-
-/*
* main.c
*/
extern unsigned int netfs_debug;
@@ -63,6 +58,11 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
/*
* misc.c
*/
+int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
+ bool needs_put);
+struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq);
+void netfs_clear_buffer(struct netfs_io_request *rreq);
+void netfs_reset_iter(struct netfs_io_subrequest *subreq);
/*
* objects.c
@@ -84,6 +84,28 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
}
/*
+ * read_collect.c
+ */
+void netfs_read_termination_worker(struct work_struct *work);
+void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async);
+
+/*
+ * read_pgpriv2.c
+ */
+void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
+ struct netfs_io_request *rreq,
+ struct folio_queue *folioq,
+ int slot);
+void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq);
+bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq);
+
+/*
+ * read_retry.c
+ */
+void netfs_retry_reads(struct netfs_io_request *rreq);
+void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq);
+
+/*
* stats.c
*/
#ifdef CONFIG_NETFS_STATS
@@ -110,6 +132,7 @@ extern atomic_t netfs_n_wh_buffered_write;
extern atomic_t netfs_n_wh_writethrough;
extern atomic_t netfs_n_wh_dio_write;
extern atomic_t netfs_n_wh_writepages;
+extern atomic_t netfs_n_wh_copy_to_cache;
extern atomic_t netfs_n_wh_wstream_conflict;
extern atomic_t netfs_n_wh_upload;
extern atomic_t netfs_n_wh_upload_done;
@@ -117,6 +140,9 @@ extern atomic_t netfs_n_wh_upload_failed;
extern atomic_t netfs_n_wh_write;
extern atomic_t netfs_n_wh_write_done;
extern atomic_t netfs_n_wh_write_failed;
+extern atomic_t netfs_n_wb_lock_skip;
+extern atomic_t netfs_n_wb_lock_wait;
+extern atomic_t netfs_n_folioq;
int netfs_stats_show(struct seq_file *m, void *v);
@@ -150,7 +176,10 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
loff_t start,
enum netfs_io_origin origin);
void netfs_reissue_write(struct netfs_io_stream *stream,
- struct netfs_io_subrequest *subreq);
+ struct netfs_io_subrequest *subreq,
+ struct iov_iter *source);
+void netfs_issue_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream);
int netfs_advance_write(struct netfs_io_request *wreq,
struct netfs_io_stream *stream,
loff_t start, size_t len, bool to_eof);
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
deleted file mode 100644
index d6ada4eba744..000000000000
--- a/fs/netfs/io.c
+++ /dev/null
@@ -1,804 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Network filesystem high-level read support.
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/module.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/uio.h>
-#include <linux/sched/mm.h>
-#include <linux/task_io_accounting_ops.h>
-#include "internal.h"
-
-/*
- * Clear the unread part of an I/O request.
- */
-static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
-{
- iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter);
-}
-
-static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
- bool was_async)
-{
- struct netfs_io_subrequest *subreq = priv;
-
- netfs_subreq_terminated(subreq, transferred_or_error, was_async);
-}
-
-/*
- * Issue a read against the cache.
- * - Eats the caller's ref on subreq.
- */
-static void netfs_read_from_cache(struct netfs_io_request *rreq,
- struct netfs_io_subrequest *subreq,
- enum netfs_read_from_hole read_hole)
-{
- struct netfs_cache_resources *cres = &rreq->cache_resources;
-
- netfs_stat(&netfs_n_rh_read);
- cres->ops->read(cres, subreq->start, &subreq->io_iter, read_hole,
- netfs_cache_read_terminated, subreq);
-}
-
-/*
- * Fill a subrequest region with zeroes.
- */
-static void netfs_fill_with_zeroes(struct netfs_io_request *rreq,
- struct netfs_io_subrequest *subreq)
-{
- netfs_stat(&netfs_n_rh_zero);
- __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
- netfs_subreq_terminated(subreq, 0, false);
-}
-
-/*
- * Ask the netfs to issue a read request to the server for us.
- *
- * The netfs is expected to read from subreq->pos + subreq->transferred to
- * subreq->pos + subreq->len - 1. It may not backtrack and write data into the
- * buffer prior to the transferred point as it might clobber dirty data
- * obtained from the cache.
- *
- * Alternatively, the netfs is allowed to indicate one of two things:
- *
- * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
- * make progress.
- *
- * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
- * cleared.
- */
-static void netfs_read_from_server(struct netfs_io_request *rreq,
- struct netfs_io_subrequest *subreq)
-{
- netfs_stat(&netfs_n_rh_download);
-
- if (rreq->origin != NETFS_DIO_READ &&
- iov_iter_count(&subreq->io_iter) != subreq->len - subreq->transferred)
- pr_warn("R=%08x[%u] ITER PRE-MISMATCH %zx != %zx-%zx %lx\n",
- rreq->debug_id, subreq->debug_index,
- iov_iter_count(&subreq->io_iter), subreq->len,
- subreq->transferred, subreq->flags);
- rreq->netfs_ops->issue_read(subreq);
-}
-
-/*
- * Release those waiting.
- */
-static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
-{
- trace_netfs_rreq(rreq, netfs_rreq_trace_done);
- netfs_clear_subrequests(rreq, was_async);
- netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
-}
-
-/*
- * [DEPRECATED] Deal with the completion of writing the data to the cache. We
- * have to clear the PG_fscache bits on the folios involved and release the
- * caller's ref.
- *
- * May be called in softirq mode and we inherit a ref from the caller.
- */
-static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
- bool was_async)
-{
- struct netfs_io_subrequest *subreq;
- struct folio *folio;
- pgoff_t unlocked = 0;
- bool have_unlocked = false;
-
- rcu_read_lock();
-
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
-
- xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
- if (xas_retry(&xas, folio))
- continue;
-
- /* We might have multiple writes from the same huge
- * folio, but we mustn't unlock a folio more than once.
- */
- if (have_unlocked && folio->index <= unlocked)
- continue;
- unlocked = folio_next_index(folio) - 1;
- trace_netfs_folio(folio, netfs_folio_trace_end_copy);
- folio_end_private_2(folio);
- have_unlocked = true;
- }
- }
-
- rcu_read_unlock();
- netfs_rreq_completed(rreq, was_async);
-}
-
-static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
- bool was_async) /* [DEPRECATED] */
-{
- struct netfs_io_subrequest *subreq = priv;
- struct netfs_io_request *rreq = subreq->rreq;
-
- if (IS_ERR_VALUE(transferred_or_error)) {
- netfs_stat(&netfs_n_rh_write_failed);
- trace_netfs_failure(rreq, subreq, transferred_or_error,
- netfs_fail_copy_to_cache);
- } else {
- netfs_stat(&netfs_n_rh_write_done);
- }
-
- trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
-
- /* If we decrement nr_copy_ops to 0, the ref belongs to us. */
- if (atomic_dec_and_test(&rreq->nr_copy_ops))
- netfs_rreq_unmark_after_write(rreq, was_async);
-
- netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
-}
-
-/*
- * [DEPRECATED] Perform any outstanding writes to the cache. We inherit a ref
- * from the caller.
- */
-static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
-{
- struct netfs_cache_resources *cres = &rreq->cache_resources;
- struct netfs_io_subrequest *subreq, *next, *p;
- struct iov_iter iter;
- int ret;
-
- trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
-
- /* We don't want terminating writes trying to wake us up whilst we're
- * still going through the list.
- */
- atomic_inc(&rreq->nr_copy_ops);
-
- list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
- if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
- list_del_init(&subreq->rreq_link);
- netfs_put_subrequest(subreq, false,
- netfs_sreq_trace_put_no_copy);
- }
- }
-
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- /* Amalgamate adjacent writes */
- while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
- next = list_next_entry(subreq, rreq_link);
- if (next->start != subreq->start + subreq->len)
- break;
- subreq->len += next->len;
- list_del_init(&next->rreq_link);
- netfs_put_subrequest(next, false,
- netfs_sreq_trace_put_merged);
- }
-
- ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
- subreq->len, rreq->i_size, true);
- if (ret < 0) {
- trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
- trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
- continue;
- }
-
- iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
- subreq->start, subreq->len);
-
- atomic_inc(&rreq->nr_copy_ops);
- netfs_stat(&netfs_n_rh_write);
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
- trace_netfs_sreq(subreq, netfs_sreq_trace_write);
- cres->ops->write(cres, subreq->start, &iter,
- netfs_rreq_copy_terminated, subreq);
- }
-
- /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
- if (atomic_dec_and_test(&rreq->nr_copy_ops))
- netfs_rreq_unmark_after_write(rreq, false);
-}
-
-static void netfs_rreq_write_to_cache_work(struct work_struct *work) /* [DEPRECATED] */
-{
- struct netfs_io_request *rreq =
- container_of(work, struct netfs_io_request, work);
-
- netfs_rreq_do_write_to_cache(rreq);
-}
-
-static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) /* [DEPRECATED] */
-{
- rreq->work.func = netfs_rreq_write_to_cache_work;
- if (!queue_work(system_unbound_wq, &rreq->work))
- BUG();
-}
-
-/*
- * Handle a short read.
- */
-static void netfs_rreq_short_read(struct netfs_io_request *rreq,
- struct netfs_io_subrequest *subreq)
-{
- __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
- __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
-
- netfs_stat(&netfs_n_rh_short_read);
- trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
-
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read);
- atomic_inc(&rreq->nr_outstanding);
- if (subreq->source == NETFS_READ_FROM_CACHE)
- netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
- else
- netfs_read_from_server(rreq, subreq);
-}
-
-/*
- * Reset the subrequest iterator prior to resubmission.
- */
-static void netfs_reset_subreq_iter(struct netfs_io_request *rreq,
- struct netfs_io_subrequest *subreq)
-{
- size_t remaining = subreq->len - subreq->transferred;
- size_t count = iov_iter_count(&subreq->io_iter);
-
- if (count == remaining)
- return;
-
- _debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x",
- rreq->debug_id, subreq->debug_index,
- iov_iter_count(&subreq->io_iter), subreq->transferred,
- subreq->len, rreq->i_size,
- subreq->io_iter.iter_type);
-
- if (count < remaining)
- iov_iter_revert(&subreq->io_iter, remaining - count);
- else
- iov_iter_advance(&subreq->io_iter, count - remaining);
-}
-
-/*
- * Resubmit any short or failed operations. Returns true if we got the rreq
- * ref back.
- */
-static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
-{
- struct netfs_io_subrequest *subreq;
-
- WARN_ON(in_interrupt());
-
- trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
-
- /* We don't want terminating submissions trying to wake us up whilst
- * we're still going through the list.
- */
- atomic_inc(&rreq->nr_outstanding);
-
- __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- if (subreq->error) {
- if (subreq->source != NETFS_READ_FROM_CACHE)
- break;
- subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
- subreq->error = 0;
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
- netfs_stat(&netfs_n_rh_download_instead);
- trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
- atomic_inc(&rreq->nr_outstanding);
- netfs_reset_subreq_iter(rreq, subreq);
- netfs_read_from_server(rreq, subreq);
- } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
- __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
- netfs_reset_subreq_iter(rreq, subreq);
- netfs_rreq_short_read(rreq, subreq);
- }
- }
-
- /* If we decrement nr_outstanding to 0, the usage ref belongs to us. */
- if (atomic_dec_and_test(&rreq->nr_outstanding))
- return true;
-
- wake_up_var(&rreq->nr_outstanding);
- return false;
-}
-
-/*
- * Check to see if the data read is still valid.
- */
-static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
-{
- struct netfs_io_subrequest *subreq;
-
- if (!rreq->netfs_ops->is_still_valid ||
- rreq->netfs_ops->is_still_valid(rreq))
- return;
-
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- if (subreq->source == NETFS_READ_FROM_CACHE) {
- subreq->error = -ESTALE;
- __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
- }
- }
-}
-
-/*
- * Determine how much we can admit to having read from a DIO read.
- */
-static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
-{
- struct netfs_io_subrequest *subreq;
- unsigned int i;
- size_t transferred = 0;
-
- for (i = 0; i < rreq->direct_bv_count; i++) {
- flush_dcache_page(rreq->direct_bv[i].bv_page);
- // TODO: cifs marks pages in the destination buffer
- // dirty under some circumstances after a read. Do we
- // need to do that too?
- set_page_dirty(rreq->direct_bv[i].bv_page);
- }
-
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- if (subreq->error || subreq->transferred == 0)
- break;
- transferred += subreq->transferred;
- if (subreq->transferred < subreq->len ||
- test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags))
- break;
- }
-
- for (i = 0; i < rreq->direct_bv_count; i++)
- flush_dcache_page(rreq->direct_bv[i].bv_page);
-
- rreq->transferred = transferred;
- task_io_account_read(transferred);
-
- if (rreq->iocb) {
- rreq->iocb->ki_pos += transferred;
- if (rreq->iocb->ki_complete)
- rreq->iocb->ki_complete(
- rreq->iocb, rreq->error ? rreq->error : transferred);
- }
- if (rreq->netfs_ops->done)
- rreq->netfs_ops->done(rreq);
- inode_dio_end(rreq->inode);
-}
-
-/*
- * Assess the state of a read request and decide what to do next.
- *
- * Note that we could be in an ordinary kernel thread, on a workqueue or in
- * softirq context at this point. We inherit a ref from the caller.
- */
-static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
-{
- trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
-
-again:
- netfs_rreq_is_still_valid(rreq);
-
- if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
- test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
- if (netfs_rreq_perform_resubmissions(rreq))
- goto again;
- return;
- }
-
- if (rreq->origin != NETFS_DIO_READ)
- netfs_rreq_unlock_folios(rreq);
- else
- netfs_rreq_assess_dio(rreq);
-
- trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
- clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
- wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
-
- if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags) &&
- test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags))
- return netfs_rreq_write_to_cache(rreq);
-
- netfs_rreq_completed(rreq, was_async);
-}
-
-static void netfs_rreq_work(struct work_struct *work)
-{
- struct netfs_io_request *rreq =
- container_of(work, struct netfs_io_request, work);
- netfs_rreq_assess(rreq, false);
-}
-
-/*
- * Handle the completion of all outstanding I/O operations on a read request.
- * We inherit a ref from the caller.
- */
-static void netfs_rreq_terminated(struct netfs_io_request *rreq,
- bool was_async)
-{
- if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
- was_async) {
- if (!queue_work(system_unbound_wq, &rreq->work))
- BUG();
- } else {
- netfs_rreq_assess(rreq, was_async);
- }
-}
-
-/**
- * netfs_subreq_terminated - Note the termination of an I/O operation.
- * @subreq: The I/O request that has terminated.
- * @transferred_or_error: The amount of data transferred or an error code.
- * @was_async: The termination was asynchronous
- *
- * This tells the read helper that a contributory I/O operation has terminated,
- * one way or another, and that it should integrate the results.
- *
- * The caller indicates in @transferred_or_error the outcome of the operation,
- * supplying a positive value to indicate the number of bytes transferred, 0 to
- * indicate a failure to transfer anything that should be retried or a negative
- * error code. The helper will look after reissuing I/O operations as
- * appropriate and writing downloaded data to the cache.
- *
- * If @was_async is true, the caller might be running in softirq or interrupt
- * context and we can't sleep.
- */
-void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
- ssize_t transferred_or_error,
- bool was_async)
-{
- struct netfs_io_request *rreq = subreq->rreq;
- int u;
-
- _enter("R=%x[%x]{%llx,%lx},%zd",
- rreq->debug_id, subreq->debug_index,
- subreq->start, subreq->flags, transferred_or_error);
-
- switch (subreq->source) {
- case NETFS_READ_FROM_CACHE:
- netfs_stat(&netfs_n_rh_read_done);
- break;
- case NETFS_DOWNLOAD_FROM_SERVER:
- netfs_stat(&netfs_n_rh_download_done);
- break;
- default:
- break;
- }
-
- if (IS_ERR_VALUE(transferred_or_error)) {
- subreq->error = transferred_or_error;
- trace_netfs_failure(rreq, subreq, transferred_or_error,
- netfs_fail_read);
- goto failed;
- }
-
- if (WARN(transferred_or_error > subreq->len - subreq->transferred,
- "Subreq overread: R%x[%x] %zd > %zu - %zu",
- rreq->debug_id, subreq->debug_index,
- transferred_or_error, subreq->len, subreq->transferred))
- transferred_or_error = subreq->len - subreq->transferred;
-
- subreq->error = 0;
- subreq->transferred += transferred_or_error;
- if (subreq->transferred < subreq->len &&
- !test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags))
- goto incomplete;
-
-complete:
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
- if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
- set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
-
-out:
- trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
-
- /* If we decrement nr_outstanding to 0, the ref belongs to us. */
- u = atomic_dec_return(&rreq->nr_outstanding);
- if (u == 0)
- netfs_rreq_terminated(rreq, was_async);
- else if (u == 1)
- wake_up_var(&rreq->nr_outstanding);
-
- netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
- return;
-
-incomplete:
- if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
- netfs_clear_unread(subreq);
- subreq->transferred = subreq->len;
- goto complete;
- }
-
- if (transferred_or_error == 0) {
- if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
- if (rreq->origin != NETFS_DIO_READ)
- subreq->error = -ENODATA;
- goto failed;
- }
- } else {
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
- }
-
- __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
- set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
- goto out;
-
-failed:
- if (subreq->source == NETFS_READ_FROM_CACHE) {
- netfs_stat(&netfs_n_rh_read_failed);
- set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
- } else {
- netfs_stat(&netfs_n_rh_download_failed);
- set_bit(NETFS_RREQ_FAILED, &rreq->flags);
- rreq->error = subreq->error;
- }
- goto out;
-}
-EXPORT_SYMBOL(netfs_subreq_terminated);
-
-static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq,
- loff_t i_size)
-{
- struct netfs_io_request *rreq = subreq->rreq;
- struct netfs_cache_resources *cres = &rreq->cache_resources;
-
- if (cres->ops)
- return cres->ops->prepare_read(subreq, i_size);
- if (subreq->start >= rreq->i_size)
- return NETFS_FILL_WITH_ZEROES;
- return NETFS_DOWNLOAD_FROM_SERVER;
-}
-
-/*
- * Work out what sort of subrequest the next one will be.
- */
-static enum netfs_io_source
-netfs_rreq_prepare_read(struct netfs_io_request *rreq,
- struct netfs_io_subrequest *subreq,
- struct iov_iter *io_iter)
-{
- enum netfs_io_source source = NETFS_DOWNLOAD_FROM_SERVER;
- struct netfs_inode *ictx = netfs_inode(rreq->inode);
- size_t lsize;
-
- _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
-
- if (rreq->origin != NETFS_DIO_READ) {
- source = netfs_cache_prepare_read(subreq, rreq->i_size);
- if (source == NETFS_INVALID_READ)
- goto out;
- }
-
- if (source == NETFS_DOWNLOAD_FROM_SERVER) {
- /* Call out to the netfs to let it shrink the request to fit
- * its own I/O sizes and boundaries. If it shinks it here, it
- * will be called again to make simultaneous calls; if it wants
- * to make serial calls, it can indicate a short read and then
- * we will call it again.
- */
- if (rreq->origin != NETFS_DIO_READ) {
- if (subreq->start >= ictx->zero_point) {
- source = NETFS_FILL_WITH_ZEROES;
- goto set;
- }
- if (subreq->len > ictx->zero_point - subreq->start)
- subreq->len = ictx->zero_point - subreq->start;
-
- /* We limit buffered reads to the EOF, but let the
- * server deal with larger-than-EOF DIO/unbuffered
- * reads.
- */
- if (subreq->len > rreq->i_size - subreq->start)
- subreq->len = rreq->i_size - subreq->start;
- }
- if (rreq->rsize && subreq->len > rreq->rsize)
- subreq->len = rreq->rsize;
-
- if (rreq->netfs_ops->clamp_length &&
- !rreq->netfs_ops->clamp_length(subreq)) {
- source = NETFS_INVALID_READ;
- goto out;
- }
-
- if (subreq->max_nr_segs) {
- lsize = netfs_limit_iter(io_iter, 0, subreq->len,
- subreq->max_nr_segs);
- if (subreq->len > lsize) {
- subreq->len = lsize;
- trace_netfs_sreq(subreq, netfs_sreq_trace_limited);
- }
- }
- }
-
-set:
- if (subreq->len > rreq->len)
- pr_warn("R=%08x[%u] SREQ>RREQ %zx > %llx\n",
- rreq->debug_id, subreq->debug_index,
- subreq->len, rreq->len);
-
- if (WARN_ON(subreq->len == 0)) {
- source = NETFS_INVALID_READ;
- goto out;
- }
-
- subreq->source = source;
- trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
-
- subreq->io_iter = *io_iter;
- iov_iter_truncate(&subreq->io_iter, subreq->len);
- iov_iter_advance(io_iter, subreq->len);
-out:
- subreq->source = source;
- trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
- return source;
-}
-
-/*
- * Slice off a piece of a read request and submit an I/O request for it.
- */
-static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
- struct iov_iter *io_iter)
-{
- struct netfs_io_subrequest *subreq;
- enum netfs_io_source source;
-
- subreq = netfs_alloc_subrequest(rreq);
- if (!subreq)
- return false;
-
- subreq->start = rreq->start + rreq->submitted;
- subreq->len = io_iter->count;
-
- _debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
- list_add_tail(&subreq->rreq_link, &rreq->subrequests);
-
- /* Call out to the cache to find out what it can do with the remaining
- * subset. It tells us in subreq->flags what it decided should be done
- * and adjusts subreq->len down if the subset crosses a cache boundary.
- *
- * Then when we hand the subset, it can choose to take a subset of that
- * (the starts must coincide), in which case, we go around the loop
- * again and ask it to download the next piece.
- */
- source = netfs_rreq_prepare_read(rreq, subreq, io_iter);
- if (source == NETFS_INVALID_READ)
- goto subreq_failed;
-
- atomic_inc(&rreq->nr_outstanding);
-
- rreq->submitted += subreq->len;
-
- trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
- switch (source) {
- case NETFS_FILL_WITH_ZEROES:
- netfs_fill_with_zeroes(rreq, subreq);
- break;
- case NETFS_DOWNLOAD_FROM_SERVER:
- netfs_read_from_server(rreq, subreq);
- break;
- case NETFS_READ_FROM_CACHE:
- netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
- break;
- default:
- BUG();
- }
-
- return true;
-
-subreq_failed:
- rreq->error = subreq->error;
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed);
- return false;
-}
-
-/*
- * Begin the process of reading in a chunk of data, where that data may be
- * stitched together from multiple sources, including multiple servers and the
- * local cache.
- */
-int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
-{
- struct iov_iter io_iter;
- int ret;
-
- _enter("R=%x %llx-%llx",
- rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
-
- if (rreq->len == 0) {
- pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
- return -EIO;
- }
-
- if (rreq->origin == NETFS_DIO_READ)
- inode_dio_begin(rreq->inode);
-
- // TODO: Use bounce buffer if requested
- rreq->io_iter = rreq->iter;
-
- INIT_WORK(&rreq->work, netfs_rreq_work);
-
- /* Chop the read into slices according to what the cache and the netfs
- * want and submit each one.
- */
- netfs_get_request(rreq, netfs_rreq_trace_get_for_outstanding);
- atomic_set(&rreq->nr_outstanding, 1);
- io_iter = rreq->io_iter;
- do {
- _debug("submit %llx + %llx >= %llx",
- rreq->start, rreq->submitted, rreq->i_size);
- if (!netfs_rreq_submit_slice(rreq, &io_iter))
- break;
- if (test_bit(NETFS_SREQ_NO_PROGRESS, &rreq->flags))
- break;
- if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
- test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
- break;
-
- } while (rreq->submitted < rreq->len);
-
- if (!rreq->submitted) {
- netfs_put_request(rreq, false, netfs_rreq_trace_put_no_submit);
- if (rreq->origin == NETFS_DIO_READ)
- inode_dio_end(rreq->inode);
- ret = 0;
- goto out;
- }
-
- if (sync) {
- /* Keep nr_outstanding incremented so that the ref always
- * belongs to us, and the service code isn't punted off to a
- * random thread pool to process. Note that this might start
- * further work, such as writing to the cache.
- */
- wait_var_event(&rreq->nr_outstanding,
- atomic_read(&rreq->nr_outstanding) == 1);
- if (atomic_dec_and_test(&rreq->nr_outstanding))
- netfs_rreq_assess(rreq, false);
-
- trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
- wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
- TASK_UNINTERRUPTIBLE);
-
- ret = rreq->error;
- if (ret == 0) {
- if (rreq->origin == NETFS_DIO_READ) {
- ret = rreq->transferred;
- } else if (rreq->submitted < rreq->len) {
- trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
- ret = -EIO;
- }
- }
- } else {
- /* If we decrement nr_outstanding to 0, the ref belongs to us. */
- if (atomic_dec_and_test(&rreq->nr_outstanding))
- netfs_rreq_assess(rreq, false);
- ret = -EIOCBQUEUED;
- }
-
-out:
- return ret;
-}
diff --git a/fs/netfs/iterator.c b/fs/netfs/iterator.c
index b781bbbf1d8d..72a435e5fc6d 100644
--- a/fs/netfs/iterator.c
+++ b/fs/netfs/iterator.c
@@ -188,9 +188,59 @@ static size_t netfs_limit_xarray(const struct iov_iter *iter, size_t start_offse
return min(span, max_size);
}
+/*
+ * Select the span of a folio queue iterator we're going to use. Limit it by
+ * both maximum size and maximum number of segments. Returns the size of the
+ * span in bytes.
+ */
+static size_t netfs_limit_folioq(const struct iov_iter *iter, size_t start_offset,
+ size_t max_size, size_t max_segs)
+{
+ const struct folio_queue *folioq = iter->folioq;
+ unsigned int nsegs = 0;
+ unsigned int slot = iter->folioq_slot;
+ size_t span = 0, n = iter->count;
+
+ if (WARN_ON(!iov_iter_is_folioq(iter)) ||
+ WARN_ON(start_offset > n) ||
+ n == 0)
+ return 0;
+ max_size = umin(max_size, n - start_offset);
+
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = folioq->next;
+ slot = 0;
+ }
+
+ start_offset += iter->iov_offset;
+ do {
+ size_t flen = folioq_folio_size(folioq, slot);
+
+ if (start_offset < flen) {
+ span += flen - start_offset;
+ nsegs++;
+ start_offset = 0;
+ } else {
+ start_offset -= flen;
+ }
+ if (span >= max_size || nsegs >= max_segs)
+ break;
+
+ slot++;
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = folioq->next;
+ slot = 0;
+ }
+ } while (folioq);
+
+ return umin(span, max_size);
+}
+
size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
size_t max_size, size_t max_segs)
{
+ if (iov_iter_is_folioq(iter))
+ return netfs_limit_folioq(iter, start_offset, max_size, max_segs);
if (iov_iter_is_bvec(iter))
return netfs_limit_bvec(iter, start_offset, max_size, max_segs);
if (iov_iter_is_xarray(iter))
diff --git a/fs/netfs/locking.c b/fs/netfs/locking.c
index 75dc52a49b3a..21eab56ee2f9 100644
--- a/fs/netfs/locking.c
+++ b/fs/netfs/locking.c
@@ -19,25 +19,13 @@
* Must be called under a lock that serializes taking new references
* to i_dio_count, usually by inode->i_mutex.
*/
-static int inode_dio_wait_interruptible(struct inode *inode)
+static int netfs_inode_dio_wait_interruptible(struct inode *inode)
{
- if (!atomic_read(&inode->i_dio_count))
+ if (inode_dio_finished(inode))
return 0;
- wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
- DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
-
- for (;;) {
- prepare_to_wait(wq, &q.wq_entry, TASK_INTERRUPTIBLE);
- if (!atomic_read(&inode->i_dio_count))
- break;
- if (signal_pending(current))
- break;
- schedule();
- }
- finish_wait(wq, &q.wq_entry);
-
- return atomic_read(&inode->i_dio_count) ? -ERESTARTSYS : 0;
+ inode_dio_wait_interruptible(inode);
+ return !inode_dio_finished(inode) ? -ERESTARTSYS : 0;
}
/* Call with exclusively locked inode->i_rwsem */
@@ -46,7 +34,7 @@ static int netfs_block_o_direct(struct netfs_inode *ictx)
if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags))
return 0;
clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
- return inode_dio_wait_interruptible(&ictx->inode);
+ return netfs_inode_dio_wait_interruptible(&ictx->inode);
}
/**
diff --git a/fs/netfs/main.c b/fs/netfs/main.c
index 5f0f438e5d21..6c7be1377ee0 100644
--- a/fs/netfs/main.c
+++ b/fs/netfs/main.c
@@ -36,13 +36,14 @@ DEFINE_SPINLOCK(netfs_proc_lock);
static const char *netfs_origins[nr__netfs_io_origin] = {
[NETFS_READAHEAD] = "RA",
[NETFS_READPAGE] = "RP",
+ [NETFS_READ_GAPS] = "RG",
[NETFS_READ_FOR_WRITE] = "RW",
- [NETFS_COPY_TO_CACHE] = "CC",
+ [NETFS_DIO_READ] = "DR",
[NETFS_WRITEBACK] = "WB",
[NETFS_WRITETHROUGH] = "WT",
[NETFS_UNBUFFERED_WRITE] = "UW",
- [NETFS_DIO_READ] = "DR",
[NETFS_DIO_WRITE] = "DW",
+ [NETFS_PGPRIV2_COPY_TO_CACHE] = "2C",
};
/*
@@ -62,7 +63,7 @@ static int netfs_requests_seq_show(struct seq_file *m, void *v)
rreq = list_entry(v, struct netfs_io_request, proc_link);
seq_printf(m,
- "%08x %s %3d %2lx %4d %3d @%04llx %llx/%llx",
+ "%08x %s %3d %2lx %4ld %3d @%04llx %llx/%llx",
rreq->debug_id,
netfs_origins[rreq->origin],
refcount_read(&rreq->ref),
@@ -142,7 +143,7 @@ static int __init netfs_init(void)
error_fscache:
error_procfile:
- remove_proc_entry("fs/netfs", NULL);
+ remove_proc_subtree("fs/netfs", NULL);
error_proc:
mempool_exit(&netfs_subrequest_pool);
error_subreqpool:
@@ -159,7 +160,7 @@ fs_initcall(netfs_init);
static void __exit netfs_exit(void)
{
fscache_exit();
- remove_proc_entry("fs/netfs", NULL);
+ remove_proc_subtree("fs/netfs", NULL);
mempool_exit(&netfs_subrequest_pool);
kmem_cache_destroy(netfs_subrequest_slab);
mempool_exit(&netfs_request_pool);
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index c1f321cf5999..0ad0982ce0e2 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -8,6 +8,100 @@
#include <linux/swap.h>
#include "internal.h"
+/*
+ * Append a folio to the rolling queue.
+ */
+int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
+ bool needs_put)
+{
+ struct folio_queue *tail = rreq->buffer_tail;
+ unsigned int slot, order = folio_order(folio);
+
+ if (WARN_ON_ONCE(!rreq->buffer && tail) ||
+ WARN_ON_ONCE(rreq->buffer && !tail))
+ return -EIO;
+
+ if (!tail || folioq_full(tail)) {
+ tail = kmalloc(sizeof(*tail), GFP_NOFS);
+ if (!tail)
+ return -ENOMEM;
+ netfs_stat(&netfs_n_folioq);
+ folioq_init(tail);
+ tail->prev = rreq->buffer_tail;
+ if (tail->prev)
+ tail->prev->next = tail;
+ rreq->buffer_tail = tail;
+ if (!rreq->buffer) {
+ rreq->buffer = tail;
+ iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0);
+ }
+ rreq->buffer_tail_slot = 0;
+ }
+
+ rreq->io_iter.count += PAGE_SIZE << order;
+
+ slot = folioq_append(tail, folio);
+ /* Store the counter after setting the slot. */
+ smp_store_release(&rreq->buffer_tail_slot, slot);
+ return 0;
+}
+
+/*
+ * Delete the head of a rolling queue.
+ */
+struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq)
+{
+ struct folio_queue *head = wreq->buffer, *next = head->next;
+
+ if (next)
+ next->prev = NULL;
+ netfs_stat_d(&netfs_n_folioq);
+ kfree(head);
+ wreq->buffer = next;
+ return next;
+}
+
+/*
+ * Clear out a rolling queue.
+ */
+void netfs_clear_buffer(struct netfs_io_request *rreq)
+{
+ struct folio_queue *p;
+
+ while ((p = rreq->buffer)) {
+ rreq->buffer = p->next;
+ for (int slot = 0; slot < folioq_nr_slots(p); slot++) {
+ struct folio *folio = folioq_folio(p, slot);
+ if (!folio)
+ continue;
+ if (folioq_is_marked(p, slot)) {
+ trace_netfs_folio(folio, netfs_folio_trace_put);
+ folio_put(folio);
+ }
+ }
+ netfs_stat_d(&netfs_n_folioq);
+ kfree(p);
+ }
+}
+
+/*
+ * Reset the subrequest iterator to refer just to the region remaining to be
+ * read. The iterator may or may not have been advanced by socket ops or
+ * extraction ops to an extent that may or may not match the amount actually
+ * read.
+ */
+void netfs_reset_iter(struct netfs_io_subrequest *subreq)
+{
+ struct iov_iter *io_iter = &subreq->io_iter;
+ size_t remain = subreq->len - subreq->transferred;
+
+ if (io_iter->count > remain)
+ iov_iter_advance(io_iter, io_iter->count - remain);
+ else if (io_iter->count < remain)
+ iov_iter_revert(io_iter, remain - io_iter->count);
+ iov_iter_truncate(&subreq->io_iter, remain);
+}
+
/**
* netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
* @mapping: The mapping the folio belongs to.
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index 0294df70c3ff..31e388ec6e48 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -36,7 +36,6 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
memset(rreq, 0, kmem_cache_size(cache));
rreq->start = start;
rreq->len = len;
- rreq->upper_len = len;
rreq->origin = origin;
rreq->netfs_ops = ctx->ops;
rreq->mapping = mapping;
@@ -44,13 +43,23 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
rreq->i_size = i_size_read(inode);
rreq->debug_id = atomic_inc_return(&debug_ids);
rreq->wsize = INT_MAX;
+ rreq->io_streams[0].sreq_max_len = ULONG_MAX;
+ rreq->io_streams[0].sreq_max_segs = 0;
spin_lock_init(&rreq->lock);
INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
INIT_LIST_HEAD(&rreq->subrequests);
- INIT_WORK(&rreq->work, NULL);
refcount_set(&rreq->ref, 1);
+ if (origin == NETFS_READAHEAD ||
+ origin == NETFS_READPAGE ||
+ origin == NETFS_READ_GAPS ||
+ origin == NETFS_READ_FOR_WRITE ||
+ origin == NETFS_DIO_READ)
+ INIT_WORK(&rreq->work, netfs_read_termination_worker);
+ else
+ INIT_WORK(&rreq->work, netfs_write_collection_worker);
+
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
if (file && file->f_flags & O_NONBLOCK)
__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
@@ -134,6 +143,7 @@ static void netfs_free_request(struct work_struct *work)
}
kvfree(rreq->direct_bv);
}
+ netfs_clear_buffer(rreq);
if (atomic_dec_and_test(&ictx->io_count))
wake_up_var(&ictx->io_count);
@@ -155,7 +165,7 @@ void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
if (was_async) {
rreq->work.func = netfs_free_request;
if (!queue_work(system_unbound_wq, &rreq->work))
- BUG();
+ WARN_ON(1);
} else {
netfs_free_request(&rreq->work);
}
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
new file mode 100644
index 000000000000..b18c65ba5580
--- /dev/null
+++ b/fs/netfs/read_collect.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem read subrequest result collection, assessment and
+ * retrying.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
+
+/*
+ * Clear the unread part of an I/O request.
+ */
+static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
+{
+ netfs_reset_iter(subreq);
+ WARN_ON_ONCE(subreq->len - subreq->transferred != iov_iter_count(&subreq->io_iter));
+ iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter);
+ if (subreq->start + subreq->transferred >= subreq->rreq->i_size)
+ __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
+}
+
+/*
+ * Flush, mark and unlock a folio that's now completely read. If we want to
+ * cache the folio, we set the group to NETFS_FOLIO_COPY_TO_CACHE, mark it
+ * dirty and let writeback handle it.
+ */
+static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
+ struct netfs_io_request *rreq,
+ struct folio_queue *folioq,
+ int slot)
+{
+ struct netfs_folio *finfo;
+ struct folio *folio = folioq_folio(folioq, slot);
+
+ flush_dcache_folio(folio);
+ folio_mark_uptodate(folio);
+
+ if (!test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
+ finfo = netfs_folio_info(folio);
+ if (finfo) {
+ trace_netfs_folio(folio, netfs_folio_trace_filled_gaps);
+ if (finfo->netfs_group)
+ folio_change_private(folio, finfo->netfs_group);
+ else
+ folio_detach_private(folio);
+ kfree(finfo);
+ }
+
+ if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+ if (!WARN_ON_ONCE(folio_get_private(folio) != NULL)) {
+ trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
+ folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE);
+ folio_mark_dirty(folio);
+ }
+ } else {
+ trace_netfs_folio(folio, netfs_folio_trace_read_done);
+ }
+ } else {
+ // TODO: Use of PG_private_2 is deprecated.
+ if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
+ netfs_pgpriv2_mark_copy_to_cache(subreq, rreq, folioq, slot);
+ }
+
+ if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
+ if (folio->index == rreq->no_unlock_folio &&
+ test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) {
+ _debug("no unlock");
+ } else {
+ trace_netfs_folio(folio, netfs_folio_trace_read_unlock);
+ folio_unlock(folio);
+ }
+ }
+}
+
+/*
+ * Unlock any folios that are now completely read. Returns true if the
+ * subrequest is removed from the list.
+ */
+static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was_async)
+{
+ struct netfs_io_subrequest *prev, *next;
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct folio_queue *folioq = subreq->curr_folioq;
+ size_t avail, prev_donated, next_donated, fsize, part, excess;
+ loff_t fpos, start;
+ loff_t fend;
+ int slot = subreq->curr_folioq_slot;
+
+ if (WARN(subreq->transferred > subreq->len,
+ "Subreq overread: R%x[%x] %zu > %zu",
+ rreq->debug_id, subreq->debug_index,
+ subreq->transferred, subreq->len))
+ subreq->transferred = subreq->len;
+
+next_folio:
+ fsize = PAGE_SIZE << subreq->curr_folio_order;
+ fpos = round_down(subreq->start + subreq->consumed, fsize);
+ fend = fpos + fsize;
+
+ if (WARN_ON_ONCE(!folioq) ||
+ WARN_ON_ONCE(!folioq_folio(folioq, slot)) ||
+ WARN_ON_ONCE(folioq_folio(folioq, slot)->index != fpos / PAGE_SIZE)) {
+ pr_err("R=%08x[%x] s=%llx-%llx ctl=%zx/%zx/%zx sl=%u\n",
+ rreq->debug_id, subreq->debug_index,
+ subreq->start, subreq->start + subreq->transferred - 1,
+ subreq->consumed, subreq->transferred, subreq->len,
+ slot);
+ if (folioq) {
+ struct folio *folio = folioq_folio(folioq, slot);
+
+ pr_err("folioq: orders=%02x%02x%02x%02x\n",
+ folioq->orders[0], folioq->orders[1],
+ folioq->orders[2], folioq->orders[3]);
+ if (folio)
+ pr_err("folio: %llx-%llx ix=%llx o=%u qo=%u\n",
+ fpos, fend - 1, folio_pos(folio), folio_order(folio),
+ folioq_folio_order(folioq, slot));
+ }
+ }
+
+donation_changed:
+ /* Try to consume the current folio if we've hit or passed the end of
+ * it. There's a possibility that this subreq doesn't start at the
+ * beginning of the folio, in which case we need to donate to/from the
+ * preceding subreq.
+ *
+ * We also need to include any potential donation back from the
+ * following subreq.
+ */
+ prev_donated = READ_ONCE(subreq->prev_donated);
+ next_donated = READ_ONCE(subreq->next_donated);
+ if (prev_donated || next_donated) {
+ spin_lock_bh(&rreq->lock);
+ prev_donated = subreq->prev_donated;
+ next_donated = subreq->next_donated;
+ subreq->start -= prev_donated;
+ subreq->len += prev_donated;
+ subreq->transferred += prev_donated;
+ prev_donated = subreq->prev_donated = 0;
+ if (subreq->transferred == subreq->len) {
+ subreq->len += next_donated;
+ subreq->transferred += next_donated;
+ next_donated = subreq->next_donated = 0;
+ }
+ trace_netfs_sreq(subreq, netfs_sreq_trace_add_donations);
+ spin_unlock_bh(&rreq->lock);
+ }
+
+ avail = subreq->transferred;
+ if (avail == subreq->len)
+ avail += next_donated;
+ start = subreq->start;
+ if (subreq->consumed == 0) {
+ start -= prev_donated;
+ avail += prev_donated;
+ } else {
+ start += subreq->consumed;
+ avail -= subreq->consumed;
+ }
+ part = umin(avail, fsize);
+
+ trace_netfs_progress(subreq, start, avail, part);
+
+ if (start + avail >= fend) {
+ if (fpos == start) {
+ /* Flush, unlock and mark for caching any folio we've just read. */
+ subreq->consumed = fend - subreq->start;
+ netfs_unlock_read_folio(subreq, rreq, folioq, slot);
+ folioq_mark2(folioq, slot);
+ if (subreq->consumed >= subreq->len)
+ goto remove_subreq;
+ } else if (fpos < start) {
+ excess = fend - subreq->start;
+
+ spin_lock_bh(&rreq->lock);
+ /* If we complete first on a folio split with the
+ * preceding subreq, donate to that subreq - otherwise
+ * we get the responsibility.
+ */
+ if (subreq->prev_donated != prev_donated) {
+ spin_unlock_bh(&rreq->lock);
+ goto donation_changed;
+ }
+
+ if (list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
+ spin_unlock_bh(&rreq->lock);
+ pr_err("Can't donate prior to front\n");
+ goto bad;
+ }
+
+ prev = list_prev_entry(subreq, rreq_link);
+ WRITE_ONCE(prev->next_donated, prev->next_donated + excess);
+ subreq->start += excess;
+ subreq->len -= excess;
+ subreq->transferred -= excess;
+ trace_netfs_donate(rreq, subreq, prev, excess,
+ netfs_trace_donate_tail_to_prev);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev);
+
+ if (subreq->consumed >= subreq->len)
+ goto remove_subreq_locked;
+ spin_unlock_bh(&rreq->lock);
+ } else {
+ pr_err("fpos > start\n");
+ goto bad;
+ }
+
+ /* Advance the rolling buffer to the next folio. */
+ slot++;
+ if (slot >= folioq_nr_slots(folioq)) {
+ slot = 0;
+ folioq = folioq->next;
+ subreq->curr_folioq = folioq;
+ }
+ subreq->curr_folioq_slot = slot;
+ if (folioq && folioq_folio(folioq, slot))
+ subreq->curr_folio_order = folioq->orders[slot];
+ if (!was_async)
+ cond_resched();
+ goto next_folio;
+ }
+
+ /* Deal with partial progress. */
+ if (subreq->transferred < subreq->len)
+ return false;
+
+ /* Donate the remaining downloaded data to one of the neighbouring
+ * subrequests. Note that we may race with them doing the same thing.
+ */
+ spin_lock_bh(&rreq->lock);
+
+ if (subreq->prev_donated != prev_donated ||
+ subreq->next_donated != next_donated) {
+ spin_unlock_bh(&rreq->lock);
+ cond_resched();
+ goto donation_changed;
+ }
+
+ /* Deal with the trickiest case: that this subreq is in the middle of a
+ * folio, not touching either edge, but finishes first. In such a
+ * case, we donate to the previous subreq, if there is one, so that the
+ * donation is only handled when that completes - and remove this
+ * subreq from the list.
+ *
+ * If the previous subreq finished first, we will have acquired their
+ * donation and should be able to unlock folios and/or donate nextwards.
+ */
+ if (!subreq->consumed &&
+ !prev_donated &&
+ !list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
+ prev = list_prev_entry(subreq, rreq_link);
+ WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len);
+ subreq->start += subreq->len;
+ subreq->len = 0;
+ subreq->transferred = 0;
+ trace_netfs_donate(rreq, subreq, prev, subreq->len,
+ netfs_trace_donate_to_prev);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev);
+ goto remove_subreq_locked;
+ }
+
+ /* If we can't donate down the chain, donate up the chain instead. */
+ excess = subreq->len - subreq->consumed + next_donated;
+
+ if (!subreq->consumed)
+ excess += prev_donated;
+
+ if (list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
+ rreq->prev_donated = excess;
+ trace_netfs_donate(rreq, subreq, NULL, excess,
+ netfs_trace_donate_to_deferred_next);
+ } else {
+ next = list_next_entry(subreq, rreq_link);
+ WRITE_ONCE(next->prev_donated, excess);
+ trace_netfs_donate(rreq, subreq, next, excess,
+ netfs_trace_donate_to_next);
+ }
+ trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_next);
+ subreq->len = subreq->consumed;
+ subreq->transferred = subreq->consumed;
+ goto remove_subreq_locked;
+
+remove_subreq:
+ spin_lock_bh(&rreq->lock);
+remove_subreq_locked:
+ subreq->consumed = subreq->len;
+ list_del(&subreq->rreq_link);
+ spin_unlock_bh(&rreq->lock);
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_consumed);
+ return true;
+
+bad:
+ /* Errr... prev and next both donated to us, but insufficient to finish
+ * the folio.
+ */
+ printk("R=%08x[%x] s=%llx-%llx %zx/%zx/%zx\n",
+ rreq->debug_id, subreq->debug_index,
+ subreq->start, subreq->start + subreq->transferred - 1,
+ subreq->consumed, subreq->transferred, subreq->len);
+ printk("folio: %llx-%llx\n", fpos, fend - 1);
+ printk("donated: prev=%zx next=%zx\n", prev_donated, next_donated);
+ printk("s=%llx av=%zx part=%zx\n", start, avail, part);
+ BUG();
+}
+
+/*
+ * Do page flushing and suchlike after DIO.
+ */
+static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *subreq;
+ unsigned int i;
+
+ /* Collect unbuffered reads and direct reads, adding up the transfer
+ * sizes until we find the first short or failed subrequest.
+ */
+ list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+ rreq->transferred += subreq->transferred;
+
+ if (subreq->transferred < subreq->len ||
+ test_bit(NETFS_SREQ_FAILED, &subreq->flags)) {
+ rreq->error = subreq->error;
+ break;
+ }
+ }
+
+ if (rreq->origin == NETFS_DIO_READ) {
+ for (i = 0; i < rreq->direct_bv_count; i++) {
+ flush_dcache_page(rreq->direct_bv[i].bv_page);
+ // TODO: cifs marks pages in the destination buffer
+ // dirty under some circumstances after a read. Do we
+ // need to do that too?
+ set_page_dirty(rreq->direct_bv[i].bv_page);
+ }
+ }
+
+ if (rreq->iocb) {
+ rreq->iocb->ki_pos += rreq->transferred;
+ if (rreq->iocb->ki_complete)
+ rreq->iocb->ki_complete(
+ rreq->iocb, rreq->error ? rreq->error : rreq->transferred);
+ }
+ if (rreq->netfs_ops->done)
+ rreq->netfs_ops->done(rreq);
+ if (rreq->origin == NETFS_DIO_READ)
+ inode_dio_end(rreq->inode);
+}
+
+/*
+ * Assess the state of a read request and decide what to do next.
+ *
+ * Note that we're in normal kernel thread context at this point, possibly
+ * running on a workqueue.
+ */
+static void netfs_rreq_assess(struct netfs_io_request *rreq)
+{
+ trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
+
+ //netfs_rreq_is_still_valid(rreq);
+
+ if (test_and_clear_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags)) {
+ netfs_retry_reads(rreq);
+ return;
+ }
+
+ if (rreq->origin == NETFS_DIO_READ ||
+ rreq->origin == NETFS_READ_GAPS)
+ netfs_rreq_assess_dio(rreq);
+ task_io_account_read(rreq->transferred);
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
+ clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+ wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_done);
+ netfs_clear_subrequests(rreq, false);
+ netfs_unlock_abandoned_read_pages(rreq);
+ if (unlikely(test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)))
+ netfs_pgpriv2_write_to_the_cache(rreq);
+}
+
+void netfs_read_termination_worker(struct work_struct *work)
+{
+ struct netfs_io_request *rreq =
+ container_of(work, struct netfs_io_request, work);
+ netfs_see_request(rreq, netfs_rreq_trace_see_work);
+ netfs_rreq_assess(rreq);
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_work_complete);
+}
+
+/*
+ * Handle the completion of all outstanding I/O operations on a read request.
+ * We inherit a ref from the caller.
+ */
+void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async)
+{
+ if (!was_async)
+ return netfs_rreq_assess(rreq);
+ if (!work_pending(&rreq->work)) {
+ netfs_get_request(rreq, netfs_rreq_trace_get_work);
+ if (!queue_work(system_unbound_wq, &rreq->work))
+ netfs_put_request(rreq, was_async, netfs_rreq_trace_put_work_nq);
+ }
+}
+
+/**
+ * netfs_read_subreq_progress - Note progress of a read operation.
+ * @subreq: The read request that has terminated.
+ * @was_async: True if we're in an asynchronous context.
+ *
+ * This tells the read side of netfs lib that a contributory I/O operation has
+ * made some progress and that it may be possible to unlock some folios.
+ *
+ * Before calling, the filesystem should update subreq->transferred to track
+ * the amount of data copied into the output buffer.
+ *
+ * If @was_async is true, the caller might be running in softirq or interrupt
+ * context and we can't sleep.
+ */
+void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq,
+ bool was_async)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_progress);
+
+ if (subreq->transferred > subreq->consumed &&
+ (rreq->origin == NETFS_READAHEAD ||
+ rreq->origin == NETFS_READPAGE ||
+ rreq->origin == NETFS_READ_FOR_WRITE)) {
+ netfs_consume_read_data(subreq, was_async);
+ __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+ }
+}
+EXPORT_SYMBOL(netfs_read_subreq_progress);
+
+/**
+ * netfs_read_subreq_terminated - Note the termination of an I/O operation.
+ * @subreq: The I/O request that has terminated.
+ * @error: Error code indicating type of completion.
+ * @was_async: The termination was asynchronous
+ *
+ * This tells the read helper that a contributory I/O operation has terminated,
+ * one way or another, and that it should integrate the results.
+ *
+ * The caller indicates the outcome of the operation through @error, supplying
+ * 0 to indicate a successful or retryable transfer (if NETFS_SREQ_NEED_RETRY
+ * is set) or a negative error code. The helper will look after reissuing I/O
+ * operations as appropriate and writing downloaded data to the cache.
+ *
+ * Before calling, the filesystem should update subreq->transferred to track
+ * the amount of data copied into the output buffer.
+ *
+ * If @was_async is true, the caller might be running in softirq or interrupt
+ * context and we can't sleep.
+ */
+void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
+ int error, bool was_async)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+
+ switch (subreq->source) {
+ case NETFS_READ_FROM_CACHE:
+ netfs_stat(&netfs_n_rh_read_done);
+ break;
+ case NETFS_DOWNLOAD_FROM_SERVER:
+ netfs_stat(&netfs_n_rh_download_done);
+ break;
+ default:
+ break;
+ }
+
+ if (rreq->origin != NETFS_DIO_READ) {
+ /* Collect buffered reads.
+ *
+ * If the read completed validly short, then we can clear the
+ * tail before going on to unlock the folios.
+ */
+ if (error == 0 && subreq->transferred < subreq->len &&
+ (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags) ||
+ test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags))) {
+ netfs_clear_unread(subreq);
+ subreq->transferred = subreq->len;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_clear);
+ }
+ if (subreq->transferred > subreq->consumed &&
+ (rreq->origin == NETFS_READAHEAD ||
+ rreq->origin == NETFS_READPAGE ||
+ rreq->origin == NETFS_READ_FOR_WRITE)) {
+ netfs_consume_read_data(subreq, was_async);
+ __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+ }
+ rreq->transferred += subreq->transferred;
+ }
+
+ /* Deal with retry requests, short reads and errors. If we retry
+ * but don't make progress, we abandon the attempt.
+ */
+ if (!error && subreq->transferred < subreq->len) {
+ if (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_hit_eof);
+ } else {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_short);
+ if (subreq->transferred > subreq->consumed) {
+ __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+ set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
+ } else if (!__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
+ __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
+ } else {
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ error = -ENODATA;
+ }
+ }
+ }
+
+ subreq->error = error;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
+
+ if (unlikely(error < 0)) {
+ trace_netfs_failure(rreq, subreq, error, netfs_fail_read);
+ if (subreq->source == NETFS_READ_FROM_CACHE) {
+ netfs_stat(&netfs_n_rh_read_failed);
+ } else {
+ netfs_stat(&netfs_n_rh_download_failed);
+ set_bit(NETFS_RREQ_FAILED, &rreq->flags);
+ rreq->error = subreq->error;
+ }
+ }
+
+ if (atomic_dec_and_test(&rreq->nr_outstanding))
+ netfs_rreq_terminated(rreq, was_async);
+
+ netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+}
+EXPORT_SYMBOL(netfs_read_subreq_terminated);
diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c
new file mode 100644
index 000000000000..ba5af89d37fa
--- /dev/null
+++ b/fs/netfs/read_pgpriv2.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Read with PG_private_2 [DEPRECATED].
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
+
+/*
+ * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2. The
+ * third mark in the folio queue is used to indicate that this folio needs
+ * writing.
+ */
+void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
+ struct netfs_io_request *rreq,
+ struct folio_queue *folioq,
+ int slot)
+{
+ struct folio *folio = folioq_folio(folioq, slot);
+
+ trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
+ folio_start_private_2(folio);
+ folioq_mark3(folioq, slot);
+}
+
+/*
+ * [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an
+ * unrecoverable error.
+ */
+static void netfs_pgpriv2_cancel(struct folio_queue *folioq)
+{
+ struct folio *folio;
+ int slot;
+
+ while (folioq) {
+ if (!folioq->marks3) {
+ folioq = folioq->next;
+ continue;
+ }
+
+ slot = __ffs(folioq->marks3);
+ folio = folioq_folio(folioq, slot);
+
+ trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
+ folio_end_private_2(folio);
+ folioq_unmark3(folioq, slot);
+ }
+}
+
+/*
+ * [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
+ */
+static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio *folio)
+{
+ struct netfs_io_stream *cache = &wreq->io_streams[1];
+ size_t fsize = folio_size(folio), flen = fsize;
+ loff_t fpos = folio_pos(folio), i_size;
+ bool to_eof = false;
+
+ _enter("");
+
+ /* netfs_perform_write() may shift i_size around the page or from out
+ * of the page to beyond it, but cannot move i_size into or through the
+ * page since we have it locked.
+ */
+ i_size = i_size_read(wreq->inode);
+
+ if (fpos >= i_size) {
+ /* mmap beyond eof. */
+ _debug("beyond eof");
+ folio_end_private_2(folio);
+ return 0;
+ }
+
+ if (fpos + fsize > wreq->i_size)
+ wreq->i_size = i_size;
+
+ if (flen > i_size - fpos) {
+ flen = i_size - fpos;
+ to_eof = true;
+ } else if (flen == i_size - fpos) {
+ to_eof = true;
+ }
+
+ _debug("folio %zx %zx", flen, fsize);
+
+ trace_netfs_folio(folio, netfs_folio_trace_store_copy);
+
+ /* Attach the folio to the rolling buffer. */
+ if (netfs_buffer_append_folio(wreq, folio, false) < 0)
+ return -ENOMEM;
+
+ cache->submit_extendable_to = fsize;
+ cache->submit_off = 0;
+ cache->submit_len = flen;
+
+ /* Attach the folio to one or more subrequests. For a big folio, we
+ * could end up with thousands of subrequests if the wsize is small -
+ * but we might need to wait during the creation of subrequests for
+ * network resources (eg. SMB credits).
+ */
+ do {
+ ssize_t part;
+
+ wreq->io_iter.iov_offset = cache->submit_off;
+
+ atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
+ cache->submit_extendable_to = fsize - cache->submit_off;
+ part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
+ cache->submit_len, to_eof);
+ cache->submit_off += part;
+ if (part > cache->submit_len)
+ cache->submit_len = 0;
+ else
+ cache->submit_len -= part;
+ } while (cache->submit_len > 0);
+
+ wreq->io_iter.iov_offset = 0;
+ iov_iter_advance(&wreq->io_iter, fsize);
+ atomic64_set(&wreq->issued_to, fpos + fsize);
+
+ if (flen < fsize)
+ netfs_issue_write(wreq, cache);
+
+ _leave(" = 0");
+ return 0;
+}
+
+/*
+ * [DEPRECATED] Go through the buffer and write any folios that are marked with
+ * the third mark to the cache.
+ */
+void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
+{
+ struct netfs_io_request *wreq;
+ struct folio_queue *folioq;
+ struct folio *folio;
+ int error = 0;
+ int slot = 0;
+
+ _enter("");
+
+ if (!fscache_resources_valid(&rreq->cache_resources))
+ goto couldnt_start;
+
+ /* Need the first folio to be able to set up the op. */
+ for (folioq = rreq->buffer; folioq; folioq = folioq->next) {
+ if (folioq->marks3) {
+ slot = __ffs(folioq->marks3);
+ break;
+ }
+ }
+ if (!folioq)
+ return;
+ folio = folioq_folio(folioq, slot);
+
+ wreq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
+ NETFS_PGPRIV2_COPY_TO_CACHE);
+ if (IS_ERR(wreq)) {
+ kleave(" [create %ld]", PTR_ERR(wreq));
+ goto couldnt_start;
+ }
+
+ trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
+ netfs_stat(&netfs_n_wh_copy_to_cache);
+
+ for (;;) {
+ error = netfs_pgpriv2_copy_folio(wreq, folio);
+ if (error < 0)
+ break;
+
+ folioq_unmark3(folioq, slot);
+ if (!folioq->marks3) {
+ folioq = folioq->next;
+ if (!folioq)
+ break;
+ }
+
+ slot = __ffs(folioq->marks3);
+ folio = folioq_folio(folioq, slot);
+ }
+
+ netfs_issue_write(wreq, &wreq->io_streams[1]);
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+ _leave(" = %d", error);
+couldnt_start:
+ netfs_pgpriv2_cancel(rreq->buffer);
+}
+
+/*
+ * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
+ * copying.
+ */
+bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
+{
+ struct folio_queue *folioq = wreq->buffer;
+ unsigned long long collected_to = wreq->collected_to;
+ unsigned int slot = wreq->buffer_head_slot;
+ bool made_progress = false;
+
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = netfs_delete_buffer_head(wreq);
+ slot = 0;
+ }
+
+ for (;;) {
+ struct folio *folio;
+ unsigned long long fpos, fend;
+ size_t fsize, flen;
+
+ folio = folioq_folio(folioq, slot);
+ if (WARN_ONCE(!folio_test_private_2(folio),
+ "R=%08x: folio %lx is not marked private_2\n",
+ wreq->debug_id, folio->index))
+ trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
+
+ fpos = folio_pos(folio);
+ fsize = folio_size(folio);
+ flen = fsize;
+
+ fend = min_t(unsigned long long, fpos + flen, wreq->i_size);
+
+ trace_netfs_collect_folio(wreq, folio, fend, collected_to);
+
+ /* Unlock any folio we've transferred all of. */
+ if (collected_to < fend)
+ break;
+
+ trace_netfs_folio(folio, netfs_folio_trace_end_copy);
+ folio_end_private_2(folio);
+ wreq->cleaned_to = fpos + fsize;
+ made_progress = true;
+
+ /* Clean up the head folioq. If we clear an entire folioq, then
+ * we can get rid of it provided it's not also the tail folioq
+ * being filled by the issuer.
+ */
+ folioq_clear(folioq, slot);
+ slot++;
+ if (slot >= folioq_nr_slots(folioq)) {
+ if (READ_ONCE(wreq->buffer_tail) == folioq)
+ break;
+ folioq = netfs_delete_buffer_head(wreq);
+ slot = 0;
+ }
+
+ if (fpos + fsize >= collected_to)
+ break;
+ }
+
+ wreq->buffer = folioq;
+ wreq->buffer_head_slot = slot;
+ return made_progress;
+}
diff --git a/fs/netfs/read_retry.c b/fs/netfs/read_retry.c
new file mode 100644
index 000000000000..0350592ea804
--- /dev/null
+++ b/fs/netfs/read_retry.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem read subrequest retrying.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+static void netfs_reissue_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ struct iov_iter *io_iter = &subreq->io_iter;
+
+ if (iov_iter_is_folioq(io_iter)) {
+ subreq->curr_folioq = (struct folio_queue *)io_iter->folioq;
+ subreq->curr_folioq_slot = io_iter->folioq_slot;
+ subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
+ }
+
+ atomic_inc(&rreq->nr_outstanding);
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ subreq->rreq->netfs_ops->issue_read(subreq);
+}
+
+/*
+ * Go through the list of failed/short reads, retrying all retryable ones. We
+ * need to switch failed cache reads to network downloads.
+ */
+static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *subreq;
+ struct netfs_io_stream *stream0 = &rreq->io_streams[0];
+ LIST_HEAD(sublist);
+ LIST_HEAD(queue);
+
+ _enter("R=%x", rreq->debug_id);
+
+ if (list_empty(&rreq->subrequests))
+ return;
+
+ if (rreq->netfs_ops->retry_request)
+ rreq->netfs_ops->retry_request(rreq, NULL);
+
+ /* If there's no renegotiation to do, just resend each retryable subreq
+ * up to the first permanently failed one.
+ */
+ if (!rreq->netfs_ops->prepare_read &&
+ !test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) {
+ struct netfs_io_subrequest *subreq;
+
+ list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+ if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
+ break;
+ if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ netfs_reset_iter(subreq);
+ netfs_reissue_read(rreq, subreq);
+ }
+ }
+ return;
+ }
+
+ /* Okay, we need to renegotiate all the download requests and flip any
+ * failed cache reads over to being download requests and negotiate
+ * those also. All fully successful subreqs have been removed from the
+ * list and any spare data from those has been donated.
+ *
+ * What we do is decant the list and rebuild it one subreq at a time so
+ * that we don't end up with donations jumping over a gap we're busy
+ * populating with smaller subrequests. In the event that the subreq
+ * we just launched finishes before we insert the next subreq, it'll
+ * fill in rreq->prev_donated instead.
+
+ * Note: Alternatively, we could split the tail subrequest right before
+ * we reissue it and fix up the donations under lock.
+ */
+ list_splice_init(&rreq->subrequests, &queue);
+
+ do {
+ struct netfs_io_subrequest *from;
+ struct iov_iter source;
+ unsigned long long start, len;
+ size_t part, deferred_next_donated = 0;
+ bool boundary = false;
+
+ /* Go through the subreqs and find the next span of contiguous
+ * buffer that we then rejig (cifs, for example, needs the
+ * rsize renegotiating) and reissue.
+ */
+ from = list_first_entry(&queue, struct netfs_io_subrequest, rreq_link);
+ list_move_tail(&from->rreq_link, &sublist);
+ start = from->start + from->transferred;
+ len = from->len - from->transferred;
+
+ _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx/%zx",
+ rreq->debug_id, from->debug_index,
+ from->start, from->consumed, from->transferred, from->len);
+
+ if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
+ !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
+ goto abandon;
+
+ deferred_next_donated = from->next_donated;
+ while ((subreq = list_first_entry_or_null(
+ &queue, struct netfs_io_subrequest, rreq_link))) {
+ if (subreq->start != start + len ||
+ subreq->transferred > 0 ||
+ !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
+ break;
+ list_move_tail(&subreq->rreq_link, &sublist);
+ len += subreq->len;
+ deferred_next_donated = subreq->next_donated;
+ if (test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags))
+ break;
+ }
+
+ _debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
+
+ /* Determine the set of buffers we're going to use. Each
+ * subreq gets a subset of a single overall contiguous buffer.
+ */
+ netfs_reset_iter(from);
+ source = from->io_iter;
+ source.count = len;
+
+ /* Work through the sublist. */
+ while ((subreq = list_first_entry_or_null(
+ &sublist, struct netfs_io_subrequest, rreq_link))) {
+ list_del(&subreq->rreq_link);
+
+ subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+ subreq->start = start - subreq->transferred;
+ subreq->len = len + subreq->transferred;
+ stream0->sreq_max_len = subreq->len;
+
+ __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+
+ spin_lock_bh(&rreq->lock);
+ list_add_tail(&subreq->rreq_link, &rreq->subrequests);
+ subreq->prev_donated += rreq->prev_donated;
+ rreq->prev_donated = 0;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+ spin_unlock_bh(&rreq->lock);
+
+ BUG_ON(!len);
+
+ /* Renegotiate max_len (rsize) */
+ if (rreq->netfs_ops->prepare_read(subreq) < 0) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ }
+
+ part = umin(len, stream0->sreq_max_len);
+ if (unlikely(rreq->io_streams[0].sreq_max_segs))
+ part = netfs_limit_iter(&source, 0, part, stream0->sreq_max_segs);
+ subreq->len = subreq->transferred + part;
+ subreq->io_iter = source;
+ iov_iter_truncate(&subreq->io_iter, part);
+ iov_iter_advance(&source, part);
+ len -= part;
+ start += part;
+ if (!len) {
+ if (boundary)
+ __set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
+ subreq->next_donated = deferred_next_donated;
+ } else {
+ __clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
+ subreq->next_donated = 0;
+ }
+
+ netfs_reissue_read(rreq, subreq);
+ if (!len)
+ break;
+
+ /* If we ran out of subrequests, allocate another. */
+ if (list_empty(&sublist)) {
+ subreq = netfs_alloc_subrequest(rreq);
+ if (!subreq)
+ goto abandon;
+ subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+ subreq->start = start;
+
+ /* We get two refs, but need just one. */
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_new);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_split);
+ list_add_tail(&subreq->rreq_link, &sublist);
+ }
+ }
+
+ /* If we managed to use fewer subreqs, we can discard the
+ * excess.
+ */
+ while ((subreq = list_first_entry_or_null(
+ &sublist, struct netfs_io_subrequest, rreq_link))) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
+ list_del(&subreq->rreq_link);
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
+ }
+
+ } while (!list_empty(&queue));
+
+ return;
+
+ /* If we hit ENOMEM, fail all remaining subrequests */
+abandon:
+ list_splice_init(&sublist, &queue);
+ list_for_each_entry(subreq, &queue, rreq_link) {
+ if (!subreq->error)
+ subreq->error = -ENOMEM;
+ __clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ __clear_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+ }
+ spin_lock_bh(&rreq->lock);
+ list_splice_tail_init(&queue, &rreq->subrequests);
+ spin_unlock_bh(&rreq->lock);
+}
+
+/*
+ * Retry reads.
+ */
+void netfs_retry_reads(struct netfs_io_request *rreq)
+{
+ trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
+
+ atomic_inc(&rreq->nr_outstanding);
+
+ netfs_retry_read_subrequests(rreq);
+
+ if (atomic_dec_and_test(&rreq->nr_outstanding))
+ netfs_rreq_terminated(rreq, false);
+}
+
+/*
+ * Unlock any the pages that haven't been unlocked yet due to abandoned
+ * subrequests.
+ */
+void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
+{
+ struct folio_queue *p;
+
+ for (p = rreq->buffer; p; p = p->next) {
+ for (int slot = 0; slot < folioq_count(p); slot++) {
+ struct folio *folio = folioq_folio(p, slot);
+
+ if (folio && !folioq_is_marked2(p, slot)) {
+ trace_netfs_folio(folio, netfs_folio_trace_abandon);
+ folio_unlock(folio);
+ }
+ }
+ }
+}
diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c
index 0892768eea32..8e63516b40f6 100644
--- a/fs/netfs/stats.c
+++ b/fs/netfs/stats.c
@@ -32,6 +32,7 @@ atomic_t netfs_n_wh_buffered_write;
atomic_t netfs_n_wh_writethrough;
atomic_t netfs_n_wh_dio_write;
atomic_t netfs_n_wh_writepages;
+atomic_t netfs_n_wh_copy_to_cache;
atomic_t netfs_n_wh_wstream_conflict;
atomic_t netfs_n_wh_upload;
atomic_t netfs_n_wh_upload_done;
@@ -39,45 +40,53 @@ atomic_t netfs_n_wh_upload_failed;
atomic_t netfs_n_wh_write;
atomic_t netfs_n_wh_write_done;
atomic_t netfs_n_wh_write_failed;
+atomic_t netfs_n_wb_lock_skip;
+atomic_t netfs_n_wb_lock_wait;
+atomic_t netfs_n_folioq;
int netfs_stats_show(struct seq_file *m, void *v)
{
- seq_printf(m, "Netfs : DR=%u RA=%u RF=%u WB=%u WBZ=%u\n",
+ seq_printf(m, "Reads : DR=%u RA=%u RF=%u WB=%u WBZ=%u\n",
atomic_read(&netfs_n_rh_dio_read),
atomic_read(&netfs_n_rh_readahead),
atomic_read(&netfs_n_rh_read_folio),
atomic_read(&netfs_n_rh_write_begin),
atomic_read(&netfs_n_rh_write_zskip));
- seq_printf(m, "Netfs : BW=%u WT=%u DW=%u WP=%u\n",
+ seq_printf(m, "Writes : BW=%u WT=%u DW=%u WP=%u 2C=%u\n",
atomic_read(&netfs_n_wh_buffered_write),
atomic_read(&netfs_n_wh_writethrough),
atomic_read(&netfs_n_wh_dio_write),
- atomic_read(&netfs_n_wh_writepages));
- seq_printf(m, "Netfs : ZR=%u sh=%u sk=%u\n",
+ atomic_read(&netfs_n_wh_writepages),
+ atomic_read(&netfs_n_wh_copy_to_cache));
+ seq_printf(m, "ZeroOps: ZR=%u sh=%u sk=%u\n",
atomic_read(&netfs_n_rh_zero),
atomic_read(&netfs_n_rh_short_read),
atomic_read(&netfs_n_rh_write_zskip));
- seq_printf(m, "Netfs : DL=%u ds=%u df=%u di=%u\n",
+ seq_printf(m, "DownOps: DL=%u ds=%u df=%u di=%u\n",
atomic_read(&netfs_n_rh_download),
atomic_read(&netfs_n_rh_download_done),
atomic_read(&netfs_n_rh_download_failed),
atomic_read(&netfs_n_rh_download_instead));
- seq_printf(m, "Netfs : RD=%u rs=%u rf=%u\n",
+ seq_printf(m, "CaRdOps: RD=%u rs=%u rf=%u\n",
atomic_read(&netfs_n_rh_read),
atomic_read(&netfs_n_rh_read_done),
atomic_read(&netfs_n_rh_read_failed));
- seq_printf(m, "Netfs : UL=%u us=%u uf=%u\n",
+ seq_printf(m, "UpldOps: UL=%u us=%u uf=%u\n",
atomic_read(&netfs_n_wh_upload),
atomic_read(&netfs_n_wh_upload_done),
atomic_read(&netfs_n_wh_upload_failed));
- seq_printf(m, "Netfs : WR=%u ws=%u wf=%u\n",
+ seq_printf(m, "CaWrOps: WR=%u ws=%u wf=%u\n",
atomic_read(&netfs_n_wh_write),
atomic_read(&netfs_n_wh_write_done),
atomic_read(&netfs_n_wh_write_failed));
- seq_printf(m, "Netfs : rr=%u sr=%u wsc=%u\n",
+ seq_printf(m, "Objs : rr=%u sr=%u foq=%u wsc=%u\n",
atomic_read(&netfs_n_rh_rreq),
atomic_read(&netfs_n_rh_sreq),
+ atomic_read(&netfs_n_folioq),
atomic_read(&netfs_n_wh_wstream_conflict));
+ seq_printf(m, "WbLock : skip=%u wait=%u\n",
+ atomic_read(&netfs_n_wb_lock_skip),
+ atomic_read(&netfs_n_wb_lock_wait));
return fscache_stats_show(m);
}
EXPORT_SYMBOL(netfs_stats_show);
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index ae7a2043f670..1d438be2e1b4 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -15,15 +15,11 @@
/* Notes made in the collector */
#define HIT_PENDING 0x01 /* A front op was still pending */
-#define SOME_EMPTY 0x02 /* One of more streams are empty */
-#define ALL_EMPTY 0x04 /* All streams are empty */
-#define MAYBE_DISCONTIG 0x08 /* A front op may be discontiguous (rounded to PAGE_SIZE) */
-#define NEED_REASSESS 0x10 /* Need to loop round and reassess */
-#define REASSESS_DISCONTIG 0x20 /* Reassess discontiguity if contiguity advances */
-#define MADE_PROGRESS 0x40 /* Made progress cleaning up a stream or the folio set */
-#define BUFFERED 0x80 /* The pagecache needs cleaning up */
-#define NEED_RETRY 0x100 /* A front op requests retrying */
-#define SAW_FAILURE 0x200 /* One stream or hit a permanent failure */
+#define NEED_REASSESS 0x02 /* Need to loop round and reassess */
+#define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */
+#define BUFFERED 0x08 /* The pagecache needs cleaning up */
+#define NEED_RETRY 0x10 /* A front op requests retrying */
+#define SAW_FAILURE 0x20 /* One stream or hit a permanent failure */
/*
* Successful completion of write of a folio to the server and/or cache. Note
@@ -82,55 +78,37 @@ end_wb:
}
/*
- * Get hold of a folio we have under writeback. We don't want to get the
- * refcount on it.
+ * Unlock any folios we've finished with.
*/
-static struct folio *netfs_writeback_lookup_folio(struct netfs_io_request *wreq, loff_t pos)
+static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
+ unsigned int *notes)
{
- XA_STATE(xas, &wreq->mapping->i_pages, pos / PAGE_SIZE);
- struct folio *folio;
-
- rcu_read_lock();
-
- for (;;) {
- xas_reset(&xas);
- folio = xas_load(&xas);
- if (xas_retry(&xas, folio))
- continue;
+ struct folio_queue *folioq = wreq->buffer;
+ unsigned long long collected_to = wreq->collected_to;
+ unsigned int slot = wreq->buffer_head_slot;
- if (!folio || xa_is_value(folio))
- kdebug("R=%08x: folio %lx (%llx) not present",
- wreq->debug_id, xas.xa_index, pos / PAGE_SIZE);
- BUG_ON(!folio || xa_is_value(folio));
-
- if (folio == xas_reload(&xas))
- break;
+ if (wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) {
+ if (netfs_pgpriv2_unlock_copied_folios(wreq))
+ *notes |= MADE_PROGRESS;
+ return;
}
- rcu_read_unlock();
-
- if (WARN_ONCE(!folio_test_writeback(folio),
- "R=%08x: folio %lx is not under writeback\n",
- wreq->debug_id, folio->index)) {
- trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = netfs_delete_buffer_head(wreq);
+ slot = 0;
}
- return folio;
-}
-/*
- * Unlock any folios we've finished with.
- */
-static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
- unsigned long long collected_to,
- unsigned int *notes)
-{
for (;;) {
struct folio *folio;
struct netfs_folio *finfo;
unsigned long long fpos, fend;
size_t fsize, flen;
- folio = netfs_writeback_lookup_folio(wreq, wreq->cleaned_to);
+ folio = folioq_folio(folioq, slot);
+ if (WARN_ONCE(!folio_test_writeback(folio),
+ "R=%08x: folio %lx is not under writeback\n",
+ wreq->debug_id, folio->index))
+ trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
fpos = folio_pos(folio);
fsize = folio_size(folio);
@@ -141,12 +119,6 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
trace_netfs_collect_folio(wreq, folio, fend, collected_to);
- if (fpos + fsize > wreq->contiguity) {
- trace_netfs_collect_contig(wreq, fpos + fsize,
- netfs_contig_trace_unlock);
- wreq->contiguity = fpos + fsize;
- }
-
/* Unlock any folio we've transferred all of. */
if (collected_to < fend)
break;
@@ -155,9 +127,25 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
wreq->cleaned_to = fpos + fsize;
*notes |= MADE_PROGRESS;
+ /* Clean up the head folioq. If we clear an entire folioq, then
+ * we can get rid of it provided it's not also the tail folioq
+ * being filled by the issuer.
+ */
+ folioq_clear(folioq, slot);
+ slot++;
+ if (slot >= folioq_nr_slots(folioq)) {
+ if (READ_ONCE(wreq->buffer_tail) == folioq)
+ break;
+ folioq = netfs_delete_buffer_head(wreq);
+ slot = 0;
+ }
+
if (fpos + fsize >= collected_to)
break;
}
+
+ wreq->buffer = folioq;
+ wreq->buffer_head_slot = slot;
}
/*
@@ -188,9 +176,12 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
break;
if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ struct iov_iter source = subreq->io_iter;
+
+ iov_iter_revert(&source, subreq->len - source.count);
__set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
- netfs_reissue_write(stream, subreq);
+ netfs_reissue_write(stream, subreq, &source);
}
}
return;
@@ -200,6 +191,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
do {
struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
+ struct iov_iter source;
unsigned long long start, len;
size_t part;
bool boundary = false;
@@ -227,6 +219,13 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
len += to->len;
}
+ /* Determine the set of buffers we're going to use. Each
+ * subreq gets a subset of a single overall contiguous buffer.
+ */
+ netfs_reset_iter(from);
+ source = from->io_iter;
+ source.count = len;
+
/* Work through the sublist. */
subreq = from;
list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
@@ -238,7 +237,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
__set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
stream->prepare_write(subreq);
- part = min(len, subreq->max_len);
+ part = min(len, stream->sreq_max_len);
subreq->len = part;
subreq->start = start;
subreq->transferred = 0;
@@ -249,7 +248,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
boundary = true;
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
- netfs_reissue_write(stream, subreq);
+ netfs_reissue_write(stream, subreq, &source);
if (subreq == to)
break;
}
@@ -278,8 +277,6 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
subreq = netfs_alloc_subrequest(wreq);
subreq->source = to->source;
subreq->start = start;
- subreq->max_len = len;
- subreq->max_nr_segs = INT_MAX;
subreq->debug_index = atomic_inc_return(&wreq->subreq_counter);
subreq->stream_nr = to->stream_nr;
__set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
@@ -293,10 +290,12 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
to = list_next_entry(to, rreq_link);
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+ stream->sreq_max_len = len;
+ stream->sreq_max_segs = INT_MAX;
switch (stream->source) {
case NETFS_UPLOAD_TO_SERVER:
netfs_stat(&netfs_n_wh_upload);
- subreq->max_len = min(len, wreq->wsize);
+ stream->sreq_max_len = umin(len, wreq->wsize);
break;
case NETFS_WRITE_TO_CACHE:
netfs_stat(&netfs_n_wh_write);
@@ -307,7 +306,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
stream->prepare_write(subreq);
- part = min(len, subreq->max_len);
+ part = umin(len, stream->sreq_max_len);
subreq->len = subreq->transferred + part;
len -= part;
start += part;
@@ -316,7 +315,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
boundary = false;
}
- netfs_reissue_write(stream, subreq);
+ netfs_reissue_write(stream, subreq, &source);
if (!len)
break;
@@ -377,7 +376,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
{
struct netfs_io_subrequest *front, *remove;
struct netfs_io_stream *stream;
- unsigned long long collected_to;
+ unsigned long long collected_to, issued_to;
unsigned int notes;
int s;
@@ -386,28 +385,22 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
trace_netfs_rreq(wreq, netfs_rreq_trace_collect);
reassess_streams:
+ issued_to = atomic64_read(&wreq->issued_to);
smp_rmb();
collected_to = ULLONG_MAX;
- if (wreq->origin == NETFS_WRITEBACK)
- notes = ALL_EMPTY | BUFFERED | MAYBE_DISCONTIG;
- else if (wreq->origin == NETFS_WRITETHROUGH)
- notes = ALL_EMPTY | BUFFERED;
+ if (wreq->origin == NETFS_WRITEBACK ||
+ wreq->origin == NETFS_WRITETHROUGH ||
+ wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE)
+ notes = BUFFERED;
else
- notes = ALL_EMPTY;
+ notes = 0;
/* Remove completed subrequests from the front of the streams and
* advance the completion point on each stream. We stop when we hit
* something that's in progress. The issuer thread may be adding stuff
* to the tail whilst we're doing this.
- *
- * We must not, however, merge in discontiguities that span whole
- * folios that aren't under writeback. This is made more complicated
- * by the folios in the gap being of unpredictable sizes - if they even
- * exist - but we don't want to look them up.
*/
for (s = 0; s < NR_IO_STREAMS; s++) {
- loff_t rstart, rend;
-
stream = &wreq->io_streams[s];
/* Read active flag before list pointers */
if (!smp_load_acquire(&stream->active))
@@ -419,26 +412,10 @@ reassess_streams:
//_debug("sreq [%x] %llx %zx/%zx",
// front->debug_index, front->start, front->transferred, front->len);
- /* Stall if there may be a discontinuity. */
- rstart = round_down(front->start, PAGE_SIZE);
- if (rstart > wreq->contiguity) {
- if (wreq->contiguity > stream->collected_to) {
- trace_netfs_collect_gap(wreq, stream,
- wreq->contiguity, 'D');
- stream->collected_to = wreq->contiguity;
- }
- notes |= REASSESS_DISCONTIG;
- break;
- }
- rend = round_up(front->start + front->len, PAGE_SIZE);
- if (rend > wreq->contiguity) {
- trace_netfs_collect_contig(wreq, rend,
- netfs_contig_trace_collect);
- wreq->contiguity = rend;
- if (notes & REASSESS_DISCONTIG)
- notes |= NEED_REASSESS;
+ if (stream->collected_to < front->start) {
+ trace_netfs_collect_gap(wreq, stream, issued_to, 'F');
+ stream->collected_to = front->start;
}
- notes &= ~MAYBE_DISCONTIG;
/* Stall if the front is still undergoing I/O. */
if (test_bit(NETFS_SREQ_IN_PROGRESS, &front->flags)) {
@@ -473,33 +450,27 @@ reassess_streams:
cancel:
/* Remove if completely consumed. */
- spin_lock(&wreq->lock);
+ spin_lock_bh(&wreq->lock);
remove = front;
list_del_init(&front->rreq_link);
front = list_first_entry_or_null(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
stream->front = front;
- if (!front) {
- unsigned long long jump_to = atomic64_read(&wreq->issued_to);
-
- if (stream->collected_to < jump_to) {
- trace_netfs_collect_gap(wreq, stream, jump_to, 'A');
- stream->collected_to = jump_to;
- }
- }
-
- spin_unlock(&wreq->lock);
+ spin_unlock_bh(&wreq->lock);
netfs_put_subrequest(remove, false,
notes & SAW_FAILURE ?
netfs_sreq_trace_put_cancel :
netfs_sreq_trace_put_done);
}
- if (front)
- notes &= ~ALL_EMPTY;
- else
- notes |= SOME_EMPTY;
+ /* If we have an empty stream, we need to jump it forward
+ * otherwise the collection point will never advance.
+ */
+ if (!front && issued_to > stream->collected_to) {
+ trace_netfs_collect_gap(wreq, stream, issued_to, 'E');
+ stream->collected_to = issued_to;
+ }
if (stream->collected_to < collected_to)
collected_to = stream->collected_to;
@@ -508,36 +479,6 @@ reassess_streams:
if (collected_to != ULLONG_MAX && collected_to > wreq->collected_to)
wreq->collected_to = collected_to;
- /* If we have an empty stream, we need to jump it forward over any gap
- * otherwise the collection point will never advance.
- *
- * Note that the issuer always adds to the stream with the lowest
- * so-far submitted start, so if we see two consecutive subreqs in one
- * stream with nothing between then in another stream, then the second
- * stream has a gap that can be jumped.
- */
- if (notes & SOME_EMPTY) {
- unsigned long long jump_to = wreq->start + READ_ONCE(wreq->submitted);
-
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (stream->active &&
- stream->front &&
- stream->front->start < jump_to)
- jump_to = stream->front->start;
- }
-
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (stream->active &&
- !stream->front &&
- stream->collected_to < jump_to) {
- trace_netfs_collect_gap(wreq, stream, jump_to, 'B');
- stream->collected_to = jump_to;
- }
- }
- }
-
for (s = 0; s < NR_IO_STREAMS; s++) {
stream = &wreq->io_streams[s];
if (stream->active)
@@ -548,43 +489,14 @@ reassess_streams:
/* Unlock any folios that we have now finished with. */
if (notes & BUFFERED) {
- unsigned long long clean_to = min(wreq->collected_to, wreq->contiguity);
-
- if (wreq->cleaned_to < clean_to)
- netfs_writeback_unlock_folios(wreq, clean_to, &notes);
+ if (wreq->cleaned_to < wreq->collected_to)
+ netfs_writeback_unlock_folios(wreq, &notes);
} else {
wreq->cleaned_to = wreq->collected_to;
}
// TODO: Discard encryption buffers
- /* If all streams are discontiguous with the last folio we cleared, we
- * may need to skip a set of folios.
- */
- if ((notes & (MAYBE_DISCONTIG | ALL_EMPTY)) == MAYBE_DISCONTIG) {
- unsigned long long jump_to = ULLONG_MAX;
-
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (stream->active && stream->front &&
- stream->front->start < jump_to)
- jump_to = stream->front->start;
- }
-
- trace_netfs_collect_contig(wreq, jump_to, netfs_contig_trace_jump);
- wreq->contiguity = jump_to;
- wreq->cleaned_to = jump_to;
- wreq->collected_to = jump_to;
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (stream->collected_to < jump_to)
- stream->collected_to = jump_to;
- }
- //cond_resched();
- notes |= MADE_PROGRESS;
- goto reassess_streams;
- }
-
if (notes & NEED_RETRY)
goto need_retry;
if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 3f7e37e50c7d..04e66d587f77 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -95,7 +95,8 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
struct netfs_io_request *wreq;
struct netfs_inode *ictx;
bool is_buffered = (origin == NETFS_WRITEBACK ||
- origin == NETFS_WRITETHROUGH);
+ origin == NETFS_WRITETHROUGH ||
+ origin == NETFS_PGPRIV2_COPY_TO_CACHE);
wreq = netfs_alloc_request(mapping, file, start, 0, origin);
if (IS_ERR(wreq))
@@ -107,9 +108,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
if (is_buffered && netfs_is_cache_enabled(ictx))
fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx));
- wreq->contiguity = wreq->start;
wreq->cleaned_to = wreq->start;
- INIT_WORK(&wreq->work, netfs_write_collection_worker);
wreq->io_streams[0].stream_nr = 0;
wreq->io_streams[0].source = NETFS_UPLOAD_TO_SERVER;
@@ -158,22 +157,19 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
subreq = netfs_alloc_subrequest(wreq);
subreq->source = stream->source;
subreq->start = start;
- subreq->max_len = ULONG_MAX;
- subreq->max_nr_segs = INT_MAX;
subreq->stream_nr = stream->stream_nr;
+ subreq->io_iter = wreq->io_iter;
_enter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
- trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
- refcount_read(&subreq->ref),
- netfs_sreq_trace_new);
-
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+ stream->sreq_max_len = UINT_MAX;
+ stream->sreq_max_segs = INT_MAX;
switch (stream->source) {
case NETFS_UPLOAD_TO_SERVER:
netfs_stat(&netfs_n_wh_upload);
- subreq->max_len = wreq->wsize;
+ stream->sreq_max_len = wreq->wsize;
break;
case NETFS_WRITE_TO_CACHE:
netfs_stat(&netfs_n_wh_write);
@@ -192,7 +188,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
* the list. The collector only goes nextwards and uses the lock to
* remove entries off of the front.
*/
- spin_lock(&wreq->lock);
+ spin_lock_bh(&wreq->lock);
list_add_tail(&subreq->rreq_link, &stream->subrequests);
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
stream->front = subreq;
@@ -203,7 +199,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
}
}
- spin_unlock(&wreq->lock);
+ spin_unlock_bh(&wreq->lock);
stream->construct = subreq;
}
@@ -223,41 +219,34 @@ static void netfs_do_issue_write(struct netfs_io_stream *stream,
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
return netfs_write_subrequest_terminated(subreq, subreq->error, false);
- // TODO: Use encrypted buffer
- if (test_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags)) {
- subreq->io_iter = wreq->io_iter;
- iov_iter_advance(&subreq->io_iter,
- subreq->start + subreq->transferred - wreq->start);
- iov_iter_truncate(&subreq->io_iter,
- subreq->len - subreq->transferred);
- } else {
- iov_iter_xarray(&subreq->io_iter, ITER_SOURCE, &wreq->mapping->i_pages,
- subreq->start + subreq->transferred,
- subreq->len - subreq->transferred);
- }
-
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
stream->issue_write(subreq);
}
void netfs_reissue_write(struct netfs_io_stream *stream,
- struct netfs_io_subrequest *subreq)
+ struct netfs_io_subrequest *subreq,
+ struct iov_iter *source)
{
+ size_t size = subreq->len - subreq->transferred;
+
+ // TODO: Use encrypted buffer
+ subreq->io_iter = *source;
+ iov_iter_advance(source, size);
+ iov_iter_truncate(&subreq->io_iter, size);
+
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
netfs_do_issue_write(stream, subreq);
}
-static void netfs_issue_write(struct netfs_io_request *wreq,
- struct netfs_io_stream *stream)
+void netfs_issue_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream)
{
struct netfs_io_subrequest *subreq = stream->construct;
if (!subreq)
return;
stream->construct = NULL;
-
- if (subreq->start + subreq->len > wreq->start + wreq->submitted)
- WRITE_ONCE(wreq->submitted, subreq->start + subreq->len - wreq->start);
+ subreq->io_iter.count = subreq->len;
netfs_do_issue_write(stream, subreq);
}
@@ -290,13 +279,14 @@ int netfs_advance_write(struct netfs_io_request *wreq,
netfs_prepare_write(wreq, stream, start);
subreq = stream->construct;
- part = min(subreq->max_len - subreq->len, len);
- _debug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len);
+ part = umin(stream->sreq_max_len - subreq->len, len);
+ _debug("part %zx/%zx %zx/%zx", subreq->len, stream->sreq_max_len, part, len);
subreq->len += part;
subreq->nr_segs++;
+ stream->submit_extendable_to -= part;
- if (subreq->len >= subreq->max_len ||
- subreq->nr_segs >= subreq->max_nr_segs ||
+ if (subreq->len >= stream->sreq_max_len ||
+ subreq->nr_segs >= stream->sreq_max_segs ||
to_eof) {
netfs_issue_write(wreq, stream);
subreq = NULL;
@@ -410,19 +400,26 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
folio_unlock(folio);
if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) {
- if (!fscache_resources_valid(&wreq->cache_resources)) {
+ if (!cache->avail) {
trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
netfs_issue_write(wreq, upload);
netfs_folio_written_back(folio);
return 0;
}
trace_netfs_folio(folio, netfs_folio_trace_store_copy);
+ } else if (!upload->avail && !cache->avail) {
+ trace_netfs_folio(folio, netfs_folio_trace_cancel_store);
+ netfs_folio_written_back(folio);
+ return 0;
} else if (!upload->construct) {
trace_netfs_folio(folio, netfs_folio_trace_store);
} else {
trace_netfs_folio(folio, netfs_folio_trace_store_plus);
}
+ /* Attach the folio to the rolling buffer. */
+ netfs_buffer_append_folio(wreq, folio, false);
+
/* Move the submission point forward to allow for write-streaming data
* not starting at the front of the page. We don't do write-streaming
* with the cache as the cache requires DIO alignment.
@@ -432,7 +429,6 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
*/
for (int s = 0; s < NR_IO_STREAMS; s++) {
stream = &wreq->io_streams[s];
- stream->submit_max_len = fsize;
stream->submit_off = foff;
stream->submit_len = flen;
if ((stream->source == NETFS_WRITE_TO_CACHE && streamw) ||
@@ -440,7 +436,6 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
fgroup == NETFS_FOLIO_COPY_TO_CACHE)) {
stream->submit_off = UINT_MAX;
stream->submit_len = 0;
- stream->submit_max_len = 0;
}
}
@@ -467,12 +462,13 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
if (choose_s < 0)
break;
stream = &wreq->io_streams[choose_s];
+ wreq->io_iter.iov_offset = stream->submit_off;
+ atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
+ stream->submit_extendable_to = fsize - stream->submit_off;
part = netfs_advance_write(wreq, stream, fpos + stream->submit_off,
stream->submit_len, to_eof);
- atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
stream->submit_off += part;
- stream->submit_max_len -= part;
if (part > stream->submit_len)
stream->submit_len = 0;
else
@@ -481,6 +477,8 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
debug = true;
}
+ wreq->io_iter.iov_offset = 0;
+ iov_iter_advance(&wreq->io_iter, fsize);
atomic64_set(&wreq->issued_to, fpos + fsize);
if (!debug)
@@ -505,10 +503,14 @@ int netfs_writepages(struct address_space *mapping,
struct folio *folio;
int error = 0;
- if (wbc->sync_mode == WB_SYNC_ALL)
+ if (!mutex_trylock(&ictx->wb_lock)) {
+ if (wbc->sync_mode == WB_SYNC_NONE) {
+ netfs_stat(&netfs_n_wb_lock_skip);
+ return 0;
+ }
+ netfs_stat(&netfs_n_wb_lock_wait);
mutex_lock(&ictx->wb_lock);
- else if (!mutex_trylock(&ictx->wb_lock))
- return 0;
+ }
/* Need the first folio to be able to set up the op. */
folio = writeback_iter(mapping, wbc, NULL, &error);
@@ -525,10 +527,10 @@ int netfs_writepages(struct address_space *mapping,
netfs_stat(&netfs_n_wh_writepages);
do {
- _debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
+ _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to));
/* It appears we don't have to handle cyclic writeback wrapping. */
- WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted);
+ WARN_ON_ONCE(wreq && folio_pos(folio) < atomic64_read(&wreq->issued_to));
if (netfs_folio_group(folio) != NETFS_FOLIO_COPY_TO_CACHE &&
unlikely(!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))) {
@@ -672,6 +674,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
part = netfs_advance_write(wreq, upload, start, len, false);
start += part;
len -= part;
+ iov_iter_advance(&wreq->io_iter, part);
if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause);
wait_on_bit(&wreq->flags, NETFS_RREQ_PAUSE, TASK_UNINTERRUPTIBLE);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 57249f040dfc..0eb20012792f 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -4,6 +4,7 @@ config NFS_FS
depends on INET && FILE_LOCKING && MULTIUSER
select LOCKD
select SUNRPC
+ select NFS_COMMON
select NFS_ACL_SUPPORT if NFS_V3_ACL
help
Choose Y here if you want to access files residing on other
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 5f6db37f461e..9fb2f2cac87e 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -13,6 +13,7 @@ nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-$(CONFIG_NFS_FSCACHE) += fscache.o
+nfs-$(CONFIG_NFS_LOCALIO) += localio.o
obj-$(CONFIG_NFS_V2) += nfsv2.o
nfsv2-y := nfs2super.o proc.o nfs2xdr.o
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 8adfcd4c8c1a..6cf92498a5ac 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -76,6 +76,8 @@ nfs4_callback_svc(void *vrqstp)
{
struct svc_rqst *rqstp = vrqstp;
+ svc_thread_init_status(rqstp, 0);
+
set_freezable();
while (!svc_thread_should_stop(rqstp))
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 8286edd6062d..a1d21c4be0ac 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -178,6 +178,14 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_max_connect = cl_init->max_connect ? cl_init->max_connect : 1;
clp->cl_net = get_net(cl_init->net);
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ seqlock_init(&clp->cl_boot_lock);
+ ktime_get_real_ts64(&clp->cl_nfssvc_boot);
+ clp->cl_uuid.net = NULL;
+ clp->cl_uuid.dom = NULL;
+ spin_lock_init(&clp->cl_localio_lock);
+#endif /* CONFIG_NFS_LOCALIO */
+
clp->cl_principal = "*";
clp->cl_xprtsec = cl_init->xprtsec;
return clp;
@@ -233,6 +241,8 @@ static void pnfs_init_server(struct nfs_server *server)
*/
void nfs_free_client(struct nfs_client *clp)
{
+ nfs_local_disable(clp);
+
/* -EIO all pending I/O */
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
@@ -424,7 +434,10 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
list_add_tail(&new->cl_share_link,
&nn->nfs_client_list);
spin_unlock(&nn->nfs_client_lock);
- return rpc_ops->init_client(new, cl_init);
+ new = rpc_ops->init_client(new, cl_init);
+ if (!IS_ERR(new))
+ nfs_local_probe(new);
+ return new;
}
spin_unlock(&nn->nfs_client_lock);
@@ -997,8 +1010,8 @@ struct nfs_server *nfs_alloc_server(void)
init_waitqueue_head(&server->write_congestion_wait);
atomic_long_set(&server->writeback, 0);
- ida_init(&server->openowner_id);
- ida_init(&server->lockowner_id);
+ atomic64_set(&server->owner_ctr, 0);
+
pnfs_init_server(server);
rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
@@ -1037,8 +1050,6 @@ void nfs_free_server(struct nfs_server *server)
}
ida_free(&s_sysfs_ids, server->s_sysfs_id);
- ida_destroy(&server->lockowner_id);
- ida_destroy(&server->openowner_id);
put_cred(server->cred);
nfs_release_automount_timer();
call_rcu(&server->rcu, delayed_free);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 4cb97ef41350..492cffd9d3d8 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -151,7 +151,7 @@ struct nfs_cache_array {
unsigned char folio_full : 1,
folio_is_eof : 1,
cookies_are_ordered : 1;
- struct nfs_cache_array_entry array[];
+ struct nfs_cache_array_entry array[] __counted_by(size);
};
struct nfs_readdir_descriptor {
@@ -328,7 +328,8 @@ static int nfs_readdir_folio_array_append(struct folio *folio,
goto out;
}
- cache_entry = &array->array[array->size];
+ array->size++;
+ cache_entry = &array->array[array->size - 1];
cache_entry->cookie = array->last_cookie;
cache_entry->ino = entry->ino;
cache_entry->d_type = entry->d_type;
@@ -337,7 +338,6 @@ static int nfs_readdir_folio_array_append(struct folio *folio,
array->last_cookie = entry->cookie;
if (array->last_cookie <= cache_entry->cookie)
array->cookies_are_ordered = 0;
- array->size++;
if (entry->eof != 0)
nfs_readdir_array_set_eof(array);
out:
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 61a8cdb9f1e1..6800ee92d742 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -336,7 +336,7 @@ static bool nfs_want_read_modify_write(struct file *file, struct folio *folio,
* increment the page use counts until he is done with the page.
*/
static int nfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, struct page **pagep,
+ loff_t pos, unsigned len, struct folio **foliop,
void **fsdata)
{
fgf_t fgp = FGP_WRITEBEGIN;
@@ -353,7 +353,7 @@ start:
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
return PTR_ERR(folio);
- *pagep = &folio->page;
+ *foliop = folio;
ret = nfs_flush_incompatible(file, folio);
if (ret) {
@@ -372,10 +372,9 @@ start:
static int nfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct nfs_open_context *ctx = nfs_file_open_context(file);
- struct folio *folio = page_folio(page);
unsigned offset = offset_in_folio(folio, pos);
int status;
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index b6e9aeaf4ce2..d39a1f58e18d 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -488,7 +488,7 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr)
/* Perform an asynchronous read to ds */
nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
NFS_PROTO(hdr->inode), &filelayout_read_call_ops,
- 0, RPC_TASK_SOFTCONN);
+ 0, RPC_TASK_SOFTCONN, NULL);
return PNFS_ATTEMPTED;
}
@@ -530,7 +530,7 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
/* Perform an asynchronous write */
nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
NFS_PROTO(hdr->inode), &filelayout_write_call_ops,
- sync, RPC_TASK_SOFTCONN);
+ sync, RPC_TASK_SOFTCONN, NULL);
return PNFS_ATTEMPTED;
}
@@ -1011,7 +1011,7 @@ static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
data->args.fh = fh;
return nfs_initiate_commit(ds_clnt, data, NFS_PROTO(data->inode),
&filelayout_commit_call_ops, how,
- RPC_TASK_SOFTCONN);
+ RPC_TASK_SOFTCONN, NULL);
out_err:
pnfs_generic_prepare_to_resend_writes(data);
pnfs_generic_commit_release(data);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 39ba9f4208aa..f78115c6c2c1 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -11,6 +11,7 @@
#include <linux/nfs_mount.h>
#include <linux/nfs_page.h>
#include <linux/module.h>
+#include <linux/file.h>
#include <linux/sched/mm.h>
#include <linux/sunrpc/metrics.h>
@@ -162,6 +163,21 @@ decode_name(struct xdr_stream *xdr, u32 *id)
return 0;
}
+static struct nfsd_file *
+ff_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, fmode_t mode)
+{
+ if (mode & FMODE_WRITE) {
+ /*
+ * Always request read and write access since this corresponds
+ * to a rw layout.
+ */
+ mode |= FMODE_READ;
+ }
+
+ return nfs_local_open_fh(clp, cred, fh, mode);
+}
+
static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
const struct nfs4_ff_layout_mirror *m2)
{
@@ -237,7 +253,7 @@ static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
{
- const struct cred *cred;
+ const struct cred *cred;
ff_layout_remove_mirror(mirror);
kfree(mirror->fh_versions);
@@ -1756,6 +1772,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfsd_file *localio;
struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
loff_t offset = hdr->args.offset;
@@ -1802,11 +1819,18 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
hdr->args.offset = offset;
hdr->mds_offset = offset;
+ /* Start IO accounting for local read */
+ localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, FMODE_READ);
+ if (localio) {
+ hdr->task.tk_start = ktime_get();
+ ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
+ }
+
/* Perform an asynchronous read to ds */
nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
vers == 3 ? &ff_layout_read_call_ops_v3 :
&ff_layout_read_call_ops_v4,
- 0, RPC_TASK_SOFTCONN);
+ 0, RPC_TASK_SOFTCONN, localio);
put_cred(ds_cred);
return PNFS_ATTEMPTED;
@@ -1826,6 +1850,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfsd_file *localio;
struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
loff_t offset = hdr->args.offset;
@@ -1870,11 +1895,19 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
*/
hdr->args.offset = offset;
+ /* Start IO accounting for local write */
+ localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
+ FMODE_READ|FMODE_WRITE);
+ if (localio) {
+ hdr->task.tk_start = ktime_get();
+ ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
+ }
+
/* Perform an asynchronous write */
nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
vers == 3 ? &ff_layout_write_call_ops_v3 :
&ff_layout_write_call_ops_v4,
- sync, RPC_TASK_SOFTCONN);
+ sync, RPC_TASK_SOFTCONN, localio);
put_cred(ds_cred);
return PNFS_ATTEMPTED;
@@ -1908,6 +1941,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
struct pnfs_layout_segment *lseg = data->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfsd_file *localio;
struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
u32 idx;
@@ -1946,10 +1980,18 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
if (fh)
data->args.fh = fh;
+ /* Start IO accounting for local commit */
+ localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
+ FMODE_READ|FMODE_WRITE);
+ if (localio) {
+ data->task.tk_start = ktime_get();
+ ff_layout_commit_record_layoutstats_start(&data->task, data);
+ }
+
ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
vers == 3 ? &ff_layout_commit_call_ops_v3 :
&ff_layout_commit_call_ops_v4,
- how, RPC_TASK_SOFTCONN);
+ how, RPC_TASK_SOFTCONN, localio);
put_cred(ds_cred);
return ret;
out_err:
@@ -2087,12 +2129,6 @@ static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
}
static void
-encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
-{
- WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
-}
-
-static void
ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
const nfs4_stateid *stateid,
const struct nfs42_layoutstat_devinfo *devinfo)
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index e028f5a0ef5f..e58bedfb1dcc 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -395,6 +395,12 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
/* connect success, check rsize/wsize limit */
if (!status) {
+ /*
+ * ds_clp is put in destroy_ds().
+ * keep ds_clp even if DS is local, so that if local IO cannot
+ * proceed somehow, we can fall back to NFS whenever we want.
+ */
+ nfs_local_probe(ds->ds_clp);
max_payload =
nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
NULL);
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index 6c9f3f6645dd..7e000d782e28 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -49,6 +49,7 @@ enum nfs_param {
Opt_bsize,
Opt_clientaddr,
Opt_cto,
+ Opt_alignwrite,
Opt_fg,
Opt_fscache,
Opt_fscache_flag,
@@ -149,6 +150,7 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_u32 ("bsize", Opt_bsize),
fsparam_string("clientaddr", Opt_clientaddr),
fsparam_flag_no("cto", Opt_cto),
+ fsparam_flag_no("alignwrite", Opt_alignwrite),
fsparam_flag ("fg", Opt_fg),
fsparam_flag_no("fsc", Opt_fscache_flag),
fsparam_string("fsc", Opt_fscache),
@@ -592,6 +594,12 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
else
ctx->flags |= NFS_MOUNT_TRUNK_DISCOVERY;
break;
+ case Opt_alignwrite:
+ if (result.negated)
+ ctx->flags |= NFS_MOUNT_NO_ALIGNWRITE;
+ else
+ ctx->flags &= ~NFS_MOUNT_NO_ALIGNWRITE;
+ break;
case Opt_ac:
if (result.negated)
ctx->flags |= NFS_MOUNT_NOAC;
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 7a558dea75c4..810269ee0a50 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -267,6 +267,7 @@ static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *fi
rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
__set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
+ rreq->io_streams[0].sreq_max_len = NFS_SB(rreq->inode->i_sb)->rsize;
return 0;
}
@@ -288,14 +289,6 @@ static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sre
return netfs;
}
-static bool nfs_netfs_clamp_length(struct netfs_io_subrequest *sreq)
-{
- size_t rsize = NFS_SB(sreq->rreq->inode->i_sb)->rsize;
-
- sreq->len = min(sreq->len, rsize);
- return true;
-}
-
static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
{
struct nfs_netfs_io_data *netfs;
@@ -304,17 +297,18 @@ static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
struct nfs_open_context *ctx = sreq->rreq->netfs_priv;
struct page *page;
unsigned long idx;
+ pgoff_t start, last;
int err;
- pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT;
- pgoff_t last = ((sreq->start + sreq->len -
- sreq->transferred - 1) >> PAGE_SHIFT);
+
+ start = (sreq->start + sreq->transferred) >> PAGE_SHIFT;
+ last = ((sreq->start + sreq->len - sreq->transferred - 1) >> PAGE_SHIFT);
nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops);
netfs = nfs_netfs_alloc(sreq);
if (!netfs)
- return netfs_subreq_terminated(sreq, -ENOMEM, false);
+ return netfs_read_subreq_terminated(sreq, -ENOMEM, false);
pgio.pg_netfs = netfs; /* used in completion */
@@ -380,5 +374,4 @@ const struct netfs_request_ops nfs_netfs_ops = {
.init_request = nfs_netfs_init_request,
.free_request = nfs_netfs_free_request,
.issue_read = nfs_netfs_issue_read,
- .clamp_length = nfs_netfs_clamp_length
};
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index e8adae1bc260..772d485e96d3 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -60,8 +60,6 @@ static inline void nfs_netfs_get(struct nfs_netfs_io_data *netfs)
static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
{
- ssize_t final_len;
-
/* Only the last RPC completion should call netfs_subreq_terminated() */
if (!refcount_dec_and_test(&netfs->refcount))
return;
@@ -74,8 +72,9 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
* Correct the final length here to be no larger than the netfs subrequest
* length, and thus avoid netfs's "Subreq overread" warning message.
*/
- final_len = min_t(s64, netfs->sreq->len, atomic64_read(&netfs->transferred));
- netfs_subreq_terminated(netfs->sreq, netfs->error ?: final_len, false);
+ netfs->sreq->transferred = min_t(s64, netfs->sreq->len,
+ atomic64_read(&netfs->transferred));
+ netfs_read_subreq_terminated(netfs->sreq, netfs->error, false);
kfree(netfs);
}
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 11ff2b2e060f..f13d25d95b85 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -62,7 +62,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
}
/*
- * get an NFS2/NFS3 root dentry from the root filehandle
+ * get a root dentry from the root filehandle
*/
int nfs_get_root(struct super_block *s, struct fs_context *fc)
{
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b4914a11c3c2..542c7d97b235 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -2461,35 +2461,54 @@ static void nfs_destroy_inodecache(void)
kmem_cache_destroy(nfs_inode_cachep);
}
+struct workqueue_struct *nfslocaliod_workqueue;
struct workqueue_struct *nfsiod_workqueue;
EXPORT_SYMBOL_GPL(nfsiod_workqueue);
/*
- * start up the nfsiod workqueue
+ * Destroy the nfsiod workqueues
*/
-static int nfsiod_start(void)
+static void nfsiod_stop(void)
{
struct workqueue_struct *wq;
- dprintk("RPC: creating workqueue nfsiod\n");
- wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
- if (wq == NULL)
- return -ENOMEM;
- nfsiod_workqueue = wq;
- return 0;
+
+ wq = nfsiod_workqueue;
+ if (wq != NULL) {
+ nfsiod_workqueue = NULL;
+ destroy_workqueue(wq);
+ }
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ wq = nfslocaliod_workqueue;
+ if (wq != NULL) {
+ nfslocaliod_workqueue = NULL;
+ destroy_workqueue(wq);
+ }
+#endif /* CONFIG_NFS_LOCALIO */
}
/*
- * Destroy the nfsiod workqueue
+ * Start the nfsiod workqueues
*/
-static void nfsiod_stop(void)
+static int nfsiod_start(void)
{
- struct workqueue_struct *wq;
-
- wq = nfsiod_workqueue;
- if (wq == NULL)
- return;
- nfsiod_workqueue = NULL;
- destroy_workqueue(wq);
+ dprintk("RPC: creating workqueue nfsiod\n");
+ nfsiod_workqueue = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (nfsiod_workqueue == NULL)
+ return -ENOMEM;
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ /*
+ * localio writes need to use a normal (non-memreclaim) workqueue.
+ * When we start getting low on space, XFS goes and calls flush_work() on
+ * a non-memreclaim work queue, which causes a priority inversion problem.
+ */
+ dprintk("RPC: creating workqueue nfslocaliod\n");
+ nfslocaliod_workqueue = alloc_workqueue("nfslocaliod", WQ_UNBOUND, 0);
+ if (unlikely(nfslocaliod_workqueue == NULL)) {
+ nfsiod_stop();
+ return -ENOMEM;
+ }
+#endif /* CONFIG_NFS_LOCALIO */
+ return 0;
}
unsigned int nfs_net_id;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 5902a9beca1f..430733e3eff2 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -9,6 +9,7 @@
#include <linux/crc32.h>
#include <linux/sunrpc/addr.h>
#include <linux/nfs_page.h>
+#include <linux/nfslocalio.h>
#include <linux/wait_bit.h>
#define NFS_SB_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
@@ -308,7 +309,8 @@ void nfs_pgio_header_free(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
- const struct rpc_call_ops *call_ops, int how, int flags);
+ const struct rpc_call_ops *call_ops, int how, int flags,
+ struct nfsd_file *localio);
void nfs_free_request(struct nfs_page *req);
struct nfs_pgio_mirror *
nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc);
@@ -438,6 +440,7 @@ int nfs_check_flags(int);
/* inode.c */
extern struct workqueue_struct *nfsiod_workqueue;
+extern struct workqueue_struct *nfslocaliod_workqueue;
extern struct inode *nfs_alloc_inode(struct super_block *sb);
extern void nfs_free_inode(struct inode *);
extern int nfs_write_inode(struct inode *, struct writeback_control *);
@@ -449,6 +452,51 @@ extern void nfs_set_cache_invalid(struct inode *inode, unsigned long flags);
extern bool nfs_check_cache_invalid(struct inode *, unsigned long);
extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+/* localio.c */
+extern void nfs_local_disable(struct nfs_client *);
+extern void nfs_local_probe(struct nfs_client *);
+extern struct nfsd_file *nfs_local_open_fh(struct nfs_client *,
+ const struct cred *,
+ struct nfs_fh *,
+ const fmode_t);
+extern int nfs_local_doio(struct nfs_client *,
+ struct nfsd_file *,
+ struct nfs_pgio_header *,
+ const struct rpc_call_ops *);
+extern int nfs_local_commit(struct nfsd_file *,
+ struct nfs_commit_data *,
+ const struct rpc_call_ops *, int);
+extern bool nfs_server_is_local(const struct nfs_client *clp);
+
+#else /* CONFIG_NFS_LOCALIO */
+static inline void nfs_local_disable(struct nfs_client *clp) {}
+static inline void nfs_local_probe(struct nfs_client *clp) {}
+static inline struct nfsd_file *
+nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, const fmode_t mode)
+{
+ return NULL;
+}
+static inline int nfs_local_doio(struct nfs_client *clp,
+ struct nfsd_file *localio,
+ struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ return -EINVAL;
+}
+static inline int nfs_local_commit(struct nfsd_file *localio,
+ struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops, int how)
+{
+ return -EINVAL;
+}
+static inline bool nfs_server_is_local(const struct nfs_client *clp)
+{
+ return false;
+}
+#endif /* CONFIG_NFS_LOCALIO */
+
/* super.c */
extern const struct super_operations nfs_sops;
bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
@@ -505,7 +553,6 @@ extern int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
struct nfs_open_context *ctx,
struct folio *folio);
extern void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio);
-extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
/* super.c */
@@ -528,7 +575,8 @@ extern int nfs_initiate_commit(struct rpc_clnt *clnt,
struct nfs_commit_data *data,
const struct nfs_rpc_ops *nfs_ops,
const struct rpc_call_ops *call_ops,
- int how, int flags);
+ int how, int flags,
+ struct nfsd_file *localio);
extern void nfs_init_commit(struct nfs_commit_data *data,
struct list_head *head,
struct pnfs_layout_segment *lseg,
diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
new file mode 100644
index 000000000000..c29cdf51c458
--- /dev/null
+++ b/fs/nfs/localio.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NFS client support for local clients to bypass network stack
+ *
+ * Copyright (C) 2014 Weston Andros Adamson <dros@primarydata.com>
+ * Copyright (C) 2019 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/vfs.h>
+#include <linux/file.h>
+#include <linux/inet.h>
+#include <linux/sunrpc/addr.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <linux/nfs_common.h>
+#include <linux/nfslocalio.h>
+#include <linux/module.h>
+#include <linux/bvec.h>
+
+#include <linux/nfs.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_xdr.h>
+
+#include "internal.h"
+#include "pnfs.h"
+#include "nfstrace.h"
+
+#define NFSDBG_FACILITY NFSDBG_VFS
+
+struct nfs_local_kiocb {
+ struct kiocb kiocb;
+ struct bio_vec *bvec;
+ struct nfs_pgio_header *hdr;
+ struct work_struct work;
+ struct nfsd_file *localio;
+};
+
+struct nfs_local_fsync_ctx {
+ struct nfsd_file *localio;
+ struct nfs_commit_data *data;
+ struct work_struct work;
+ struct kref kref;
+ struct completion *done;
+};
+static void nfs_local_fsync_work(struct work_struct *work);
+
+static bool localio_enabled __read_mostly = true;
+module_param(localio_enabled, bool, 0644);
+
+static inline bool nfs_client_is_local(const struct nfs_client *clp)
+{
+ return !!test_bit(NFS_CS_LOCAL_IO, &clp->cl_flags);
+}
+
+bool nfs_server_is_local(const struct nfs_client *clp)
+{
+ return nfs_client_is_local(clp) && localio_enabled;
+}
+EXPORT_SYMBOL_GPL(nfs_server_is_local);
+
+/*
+ * UUID_IS_LOCAL XDR functions
+ */
+
+static void localio_xdr_enc_uuidargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const u8 *uuid = data;
+
+ encode_opaque_fixed(xdr, uuid, UUID_SIZE);
+}
+
+static int localio_xdr_dec_uuidres(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ void *result)
+{
+ /* void return */
+ return 0;
+}
+
+static const struct rpc_procinfo nfs_localio_procedures[] = {
+ [LOCALIOPROC_UUID_IS_LOCAL] = {
+ .p_proc = LOCALIOPROC_UUID_IS_LOCAL,
+ .p_encode = localio_xdr_enc_uuidargs,
+ .p_decode = localio_xdr_dec_uuidres,
+ .p_arglen = XDR_QUADLEN(UUID_SIZE),
+ .p_replen = 0,
+ .p_statidx = LOCALIOPROC_UUID_IS_LOCAL,
+ .p_name = "UUID_IS_LOCAL",
+ },
+};
+
+static unsigned int nfs_localio_counts[ARRAY_SIZE(nfs_localio_procedures)];
+static const struct rpc_version nfslocalio_version1 = {
+ .number = 1,
+ .nrprocs = ARRAY_SIZE(nfs_localio_procedures),
+ .procs = nfs_localio_procedures,
+ .counts = nfs_localio_counts,
+};
+
+static const struct rpc_version *nfslocalio_version[] = {
+ [1] = &nfslocalio_version1,
+};
+
+extern const struct rpc_program nfslocalio_program;
+static struct rpc_stat nfslocalio_rpcstat = { &nfslocalio_program };
+
+const struct rpc_program nfslocalio_program = {
+ .name = "nfslocalio",
+ .number = NFS_LOCALIO_PROGRAM,
+ .nrvers = ARRAY_SIZE(nfslocalio_version),
+ .version = nfslocalio_version,
+ .stats = &nfslocalio_rpcstat,
+};
+
+/*
+ * nfs_local_enable - enable local i/o for an nfs_client
+ */
+static void nfs_local_enable(struct nfs_client *clp)
+{
+ spin_lock(&clp->cl_localio_lock);
+ set_bit(NFS_CS_LOCAL_IO, &clp->cl_flags);
+ trace_nfs_local_enable(clp);
+ spin_unlock(&clp->cl_localio_lock);
+}
+
+/*
+ * nfs_local_disable - disable local i/o for an nfs_client
+ */
+void nfs_local_disable(struct nfs_client *clp)
+{
+ spin_lock(&clp->cl_localio_lock);
+ if (test_and_clear_bit(NFS_CS_LOCAL_IO, &clp->cl_flags)) {
+ trace_nfs_local_disable(clp);
+ nfs_uuid_invalidate_one_client(&clp->cl_uuid);
+ }
+ spin_unlock(&clp->cl_localio_lock);
+}
+
+/*
+ * nfs_init_localioclient - Initialise an NFS localio client connection
+ */
+static struct rpc_clnt *nfs_init_localioclient(struct nfs_client *clp)
+{
+ struct rpc_clnt *rpcclient_localio;
+
+ rpcclient_localio = rpc_bind_new_program(clp->cl_rpcclient,
+ &nfslocalio_program, 1);
+
+ dprintk_rcu("%s: server (%s) %s NFS LOCALIO.\n",
+ __func__, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
+ (IS_ERR(rpcclient_localio) ? "does not support" : "supports"));
+
+ return rpcclient_localio;
+}
+
+static bool nfs_server_uuid_is_local(struct nfs_client *clp)
+{
+ u8 uuid[UUID_SIZE];
+ struct rpc_message msg = {
+ .rpc_argp = &uuid,
+ };
+ struct rpc_clnt *rpcclient_localio;
+ int status;
+
+ rpcclient_localio = nfs_init_localioclient(clp);
+ if (IS_ERR(rpcclient_localio))
+ return false;
+
+ export_uuid(uuid, &clp->cl_uuid.uuid);
+
+ msg.rpc_proc = &nfs_localio_procedures[LOCALIOPROC_UUID_IS_LOCAL];
+ status = rpc_call_sync(rpcclient_localio, &msg, 0);
+ dprintk("%s: NFS reply UUID_IS_LOCAL: status=%d\n",
+ __func__, status);
+ rpc_shutdown_client(rpcclient_localio);
+
+ /* Server is only local if it initialized required struct members */
+ if (status || !clp->cl_uuid.net || !clp->cl_uuid.dom)
+ return false;
+
+ return true;
+}
+
+/*
+ * nfs_local_probe - probe local i/o support for an nfs_server and nfs_client
+ * - called after alloc_client and init_client (so cl_rpcclient exists)
+ * - this function is idempotent, it can be called for old or new clients
+ */
+void nfs_local_probe(struct nfs_client *clp)
+{
+ /* Disallow localio if disabled via sysfs or AUTH_SYS isn't used */
+ if (!localio_enabled ||
+ clp->cl_rpcclient->cl_auth->au_flavor != RPC_AUTH_UNIX) {
+ nfs_local_disable(clp);
+ return;
+ }
+
+ if (nfs_client_is_local(clp)) {
+ /* If already enabled, disable and re-enable */
+ nfs_local_disable(clp);
+ }
+
+ nfs_uuid_begin(&clp->cl_uuid);
+ if (nfs_server_uuid_is_local(clp))
+ nfs_local_enable(clp);
+ nfs_uuid_end(&clp->cl_uuid);
+}
+EXPORT_SYMBOL_GPL(nfs_local_probe);
+
+/*
+ * nfs_local_open_fh - open a local filehandle in terms of nfsd_file
+ *
+ * Returns a pointer to a struct nfsd_file or NULL
+ */
+struct nfsd_file *
+nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, const fmode_t mode)
+{
+ struct nfsd_file *localio;
+ int status;
+
+ if (!nfs_server_is_local(clp))
+ return NULL;
+ if (mode & ~(FMODE_READ | FMODE_WRITE))
+ return NULL;
+
+ localio = nfs_open_local_fh(&clp->cl_uuid, clp->cl_rpcclient,
+ cred, fh, mode);
+ if (IS_ERR(localio)) {
+ status = PTR_ERR(localio);
+ trace_nfs_local_open_fh(fh, mode, status);
+ switch (status) {
+ case -ENOMEM:
+ case -ENXIO:
+ case -ENOENT:
+ /* Revalidate localio, will disable if unsupported */
+ nfs_local_probe(clp);
+ }
+ return NULL;
+ }
+ return localio;
+}
+EXPORT_SYMBOL_GPL(nfs_local_open_fh);
+
+static struct bio_vec *
+nfs_bvec_alloc_and_import_pagevec(struct page **pagevec,
+ unsigned int npages, gfp_t flags)
+{
+ struct bio_vec *bvec, *p;
+
+ bvec = kmalloc_array(npages, sizeof(*bvec), flags);
+ if (bvec != NULL) {
+ for (p = bvec; npages > 0; p++, pagevec++, npages--) {
+ p->bv_page = *pagevec;
+ p->bv_len = PAGE_SIZE;
+ p->bv_offset = 0;
+ }
+ }
+ return bvec;
+}
+
+static void
+nfs_local_iocb_free(struct nfs_local_kiocb *iocb)
+{
+ kfree(iocb->bvec);
+ kfree(iocb);
+}
+
+static struct nfs_local_kiocb *
+nfs_local_iocb_alloc(struct nfs_pgio_header *hdr,
+ struct nfsd_file *localio, gfp_t flags)
+{
+ struct nfs_local_kiocb *iocb;
+
+ iocb = kmalloc(sizeof(*iocb), flags);
+ if (iocb == NULL)
+ return NULL;
+ iocb->bvec = nfs_bvec_alloc_and_import_pagevec(hdr->page_array.pagevec,
+ hdr->page_array.npages, flags);
+ if (iocb->bvec == NULL) {
+ kfree(iocb);
+ return NULL;
+ }
+ init_sync_kiocb(&iocb->kiocb, nfs_to->nfsd_file_file(localio));
+ iocb->kiocb.ki_pos = hdr->args.offset;
+ iocb->localio = localio;
+ iocb->hdr = hdr;
+ iocb->kiocb.ki_flags &= ~IOCB_APPEND;
+ return iocb;
+}
+
+static void
+nfs_local_iter_init(struct iov_iter *i, struct nfs_local_kiocb *iocb, int dir)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+
+ iov_iter_bvec(i, dir, iocb->bvec, hdr->page_array.npages,
+ hdr->args.count + hdr->args.pgbase);
+ if (hdr->args.pgbase != 0)
+ iov_iter_advance(i, hdr->args.pgbase);
+}
+
+static void
+nfs_local_hdr_release(struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ call_ops->rpc_call_done(&hdr->task, hdr);
+ call_ops->rpc_release(hdr);
+}
+
+static void
+nfs_local_pgio_init(struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ hdr->task.tk_ops = call_ops;
+ if (!hdr->task.tk_start)
+ hdr->task.tk_start = ktime_get();
+}
+
+static void
+nfs_local_pgio_done(struct nfs_pgio_header *hdr, long status)
+{
+ if (status >= 0) {
+ hdr->res.count = status;
+ hdr->res.op_status = NFS4_OK;
+ hdr->task.tk_status = 0;
+ } else {
+ hdr->res.op_status = nfs4_stat_to_errno(status);
+ hdr->task.tk_status = status;
+ }
+}
+
+static void
+nfs_local_pgio_release(struct nfs_local_kiocb *iocb)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+
+ nfs_to->nfsd_file_put_local(iocb->localio);
+ nfs_local_iocb_free(iocb);
+ nfs_local_hdr_release(hdr, hdr->task.tk_ops);
+}
+
+static void
+nfs_local_read_done(struct nfs_local_kiocb *iocb, long status)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ struct file *filp = iocb->kiocb.ki_filp;
+
+ nfs_local_pgio_done(hdr, status);
+
+ if (hdr->res.count != hdr->args.count ||
+ hdr->args.offset + hdr->res.count >= i_size_read(file_inode(filp)))
+ hdr->res.eof = true;
+
+ dprintk("%s: read %ld bytes eof %d.\n", __func__,
+ status > 0 ? status : 0, hdr->res.eof);
+}
+
+static void nfs_local_call_read(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+ struct file *filp = iocb->kiocb.ki_filp;
+ const struct cred *save_cred;
+ struct iov_iter iter;
+ ssize_t status;
+
+ save_cred = override_creds(filp->f_cred);
+
+ nfs_local_iter_init(&iter, iocb, READ);
+
+ status = filp->f_op->read_iter(&iocb->kiocb, &iter);
+ WARN_ON_ONCE(status == -EIOCBQUEUED);
+
+ nfs_local_read_done(iocb, status);
+ nfs_local_pgio_release(iocb);
+
+ revert_creds(save_cred);
+}
+
+static int
+nfs_do_local_read(struct nfs_pgio_header *hdr,
+ struct nfsd_file *localio,
+ const struct rpc_call_ops *call_ops)
+{
+ struct nfs_local_kiocb *iocb;
+
+ dprintk("%s: vfs_read count=%u pos=%llu\n",
+ __func__, hdr->args.count, hdr->args.offset);
+
+ iocb = nfs_local_iocb_alloc(hdr, localio, GFP_KERNEL);
+ if (iocb == NULL)
+ return -ENOMEM;
+
+ nfs_local_pgio_init(hdr, call_ops);
+ hdr->res.eof = false;
+
+ INIT_WORK(&iocb->work, nfs_local_call_read);
+ queue_work(nfslocaliod_workqueue, &iocb->work);
+
+ return 0;
+}
+
+static void
+nfs_copy_boot_verifier(struct nfs_write_verifier *verifier, struct inode *inode)
+{
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ u32 *verf = (u32 *)verifier->data;
+ int seq = 0;
+
+ do {
+ read_seqbegin_or_lock(&clp->cl_boot_lock, &seq);
+ verf[0] = (u32)clp->cl_nfssvc_boot.tv_sec;
+ verf[1] = (u32)clp->cl_nfssvc_boot.tv_nsec;
+ } while (need_seqretry(&clp->cl_boot_lock, seq));
+ done_seqretry(&clp->cl_boot_lock, seq);
+}
+
+static void
+nfs_reset_boot_verifier(struct inode *inode)
+{
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+
+ write_seqlock(&clp->cl_boot_lock);
+ ktime_get_real_ts64(&clp->cl_nfssvc_boot);
+ write_sequnlock(&clp->cl_boot_lock);
+}
+
+static void
+nfs_set_local_verifier(struct inode *inode,
+ struct nfs_writeverf *verf,
+ enum nfs3_stable_how how)
+{
+ nfs_copy_boot_verifier(&verf->verifier, inode);
+ verf->committed = how;
+}
+
+/* Factored out from fs/nfsd/vfs.h:fh_getattr() */
+static int __vfs_getattr(struct path *p, struct kstat *stat, int version)
+{
+ u32 request_mask = STATX_BASIC_STATS;
+
+ if (version == 4)
+ request_mask |= (STATX_BTIME | STATX_CHANGE_COOKIE);
+ return vfs_getattr(p, stat, request_mask, AT_STATX_SYNC_AS_STAT);
+}
+
+/* Copied from fs/nfsd/nfsfh.c:nfsd4_change_attribute() */
+static u64 __nfsd4_change_attribute(const struct kstat *stat,
+ const struct inode *inode)
+{
+ u64 chattr;
+
+ if (stat->result_mask & STATX_CHANGE_COOKIE) {
+ chattr = stat->change_cookie;
+ if (S_ISREG(inode->i_mode) &&
+ !(stat->attributes & STATX_ATTR_CHANGE_MONOTONIC)) {
+ chattr += (u64)stat->ctime.tv_sec << 30;
+ chattr += stat->ctime.tv_nsec;
+ }
+ } else {
+ chattr = time_to_chattr(&stat->ctime);
+ }
+ return chattr;
+}
+
+static void nfs_local_vfs_getattr(struct nfs_local_kiocb *iocb)
+{
+ struct kstat stat;
+ struct file *filp = iocb->kiocb.ki_filp;
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ struct nfs_fattr *fattr = hdr->res.fattr;
+ int version = NFS_PROTO(hdr->inode)->version;
+
+ if (unlikely(!fattr) || __vfs_getattr(&filp->f_path, &stat, version))
+ return;
+
+ fattr->valid = (NFS_ATTR_FATTR_FILEID |
+ NFS_ATTR_FATTR_CHANGE |
+ NFS_ATTR_FATTR_SIZE |
+ NFS_ATTR_FATTR_ATIME |
+ NFS_ATTR_FATTR_MTIME |
+ NFS_ATTR_FATTR_CTIME |
+ NFS_ATTR_FATTR_SPACE_USED);
+
+ fattr->fileid = stat.ino;
+ fattr->size = stat.size;
+ fattr->atime = stat.atime;
+ fattr->mtime = stat.mtime;
+ fattr->ctime = stat.ctime;
+ if (version == 4) {
+ fattr->change_attr =
+ __nfsd4_change_attribute(&stat, file_inode(filp));
+ } else
+ fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
+ fattr->du.nfs3.used = stat.blocks << 9;
+}
+
+static void
+nfs_local_write_done(struct nfs_local_kiocb *iocb, long status)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ struct inode *inode = hdr->inode;
+
+ dprintk("%s: wrote %ld bytes.\n", __func__, status > 0 ? status : 0);
+
+ /* Handle short writes as if they are ENOSPC */
+ if (status > 0 && status < hdr->args.count) {
+ hdr->mds_offset += status;
+ hdr->args.offset += status;
+ hdr->args.pgbase += status;
+ hdr->args.count -= status;
+ nfs_set_pgio_error(hdr, -ENOSPC, hdr->args.offset);
+ status = -ENOSPC;
+ }
+ if (status < 0)
+ nfs_reset_boot_verifier(inode);
+ else if (nfs_should_remove_suid(inode)) {
+ /* Deal with the suid/sgid bit corner case */
+ spin_lock(&inode->i_lock);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
+ spin_unlock(&inode->i_lock);
+ }
+ nfs_local_pgio_done(hdr, status);
+}
+
+static void nfs_local_call_write(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+ struct file *filp = iocb->kiocb.ki_filp;
+ unsigned long old_flags = current->flags;
+ const struct cred *save_cred;
+ struct iov_iter iter;
+ ssize_t status;
+
+ current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
+ save_cred = override_creds(filp->f_cred);
+
+ nfs_local_iter_init(&iter, iocb, WRITE);
+
+ file_start_write(filp);
+ status = filp->f_op->write_iter(&iocb->kiocb, &iter);
+ file_end_write(filp);
+ WARN_ON_ONCE(status == -EIOCBQUEUED);
+
+ nfs_local_write_done(iocb, status);
+ nfs_local_vfs_getattr(iocb);
+ nfs_local_pgio_release(iocb);
+
+ revert_creds(save_cred);
+ current->flags = old_flags;
+}
+
+static int
+nfs_do_local_write(struct nfs_pgio_header *hdr,
+ struct nfsd_file *localio,
+ const struct rpc_call_ops *call_ops)
+{
+ struct nfs_local_kiocb *iocb;
+
+ dprintk("%s: vfs_write count=%u pos=%llu %s\n",
+ __func__, hdr->args.count, hdr->args.offset,
+ (hdr->args.stable == NFS_UNSTABLE) ? "unstable" : "stable");
+
+ iocb = nfs_local_iocb_alloc(hdr, localio, GFP_NOIO);
+ if (iocb == NULL)
+ return -ENOMEM;
+
+ switch (hdr->args.stable) {
+ default:
+ break;
+ case NFS_DATA_SYNC:
+ iocb->kiocb.ki_flags |= IOCB_DSYNC;
+ break;
+ case NFS_FILE_SYNC:
+ iocb->kiocb.ki_flags |= IOCB_DSYNC|IOCB_SYNC;
+ }
+ nfs_local_pgio_init(hdr, call_ops);
+
+ nfs_set_local_verifier(hdr->inode, hdr->res.verf, hdr->args.stable);
+
+ INIT_WORK(&iocb->work, nfs_local_call_write);
+ queue_work(nfslocaliod_workqueue, &iocb->work);
+
+ return 0;
+}
+
+int nfs_local_doio(struct nfs_client *clp, struct nfsd_file *localio,
+ struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ int status = 0;
+ struct file *filp = nfs_to->nfsd_file_file(localio);
+
+ if (!hdr->args.count)
+ return 0;
+ /* Don't support filesystems without read_iter/write_iter */
+ if (!filp->f_op->read_iter || !filp->f_op->write_iter) {
+ nfs_local_disable(clp);
+ status = -EAGAIN;
+ goto out;
+ }
+
+ switch (hdr->rw_mode) {
+ case FMODE_READ:
+ status = nfs_do_local_read(hdr, localio, call_ops);
+ break;
+ case FMODE_WRITE:
+ status = nfs_do_local_write(hdr, localio, call_ops);
+ break;
+ default:
+ dprintk("%s: invalid mode: %d\n", __func__,
+ hdr->rw_mode);
+ status = -EINVAL;
+ }
+out:
+ if (status != 0) {
+ nfs_to->nfsd_file_put_local(localio);
+ hdr->task.tk_status = status;
+ nfs_local_hdr_release(hdr, call_ops);
+ }
+ return status;
+}
+
+static void
+nfs_local_init_commit(struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops)
+{
+ data->task.tk_ops = call_ops;
+}
+
+static int
+nfs_local_run_commit(struct file *filp, struct nfs_commit_data *data)
+{
+ loff_t start = data->args.offset;
+ loff_t end = LLONG_MAX;
+
+ if (data->args.count > 0) {
+ end = start + data->args.count - 1;
+ if (end < start)
+ end = LLONG_MAX;
+ }
+
+ dprintk("%s: commit %llu - %llu\n", __func__, start, end);
+ return vfs_fsync_range(filp, start, end, 0);
+}
+
+static void
+nfs_local_commit_done(struct nfs_commit_data *data, int status)
+{
+ if (status >= 0) {
+ nfs_set_local_verifier(data->inode,
+ data->res.verf,
+ NFS_FILE_SYNC);
+ data->res.op_status = NFS4_OK;
+ data->task.tk_status = 0;
+ } else {
+ nfs_reset_boot_verifier(data->inode);
+ data->res.op_status = nfs4_stat_to_errno(status);
+ data->task.tk_status = status;
+ }
+}
+
+static void
+nfs_local_release_commit_data(struct nfsd_file *localio,
+ struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops)
+{
+ nfs_to->nfsd_file_put_local(localio);
+ call_ops->rpc_call_done(&data->task, data);
+ call_ops->rpc_release(data);
+}
+
+static struct nfs_local_fsync_ctx *
+nfs_local_fsync_ctx_alloc(struct nfs_commit_data *data,
+ struct nfsd_file *localio, gfp_t flags)
+{
+ struct nfs_local_fsync_ctx *ctx = kmalloc(sizeof(*ctx), flags);
+
+ if (ctx != NULL) {
+ ctx->localio = localio;
+ ctx->data = data;
+ INIT_WORK(&ctx->work, nfs_local_fsync_work);
+ kref_init(&ctx->kref);
+ ctx->done = NULL;
+ }
+ return ctx;
+}
+
+static void
+nfs_local_fsync_ctx_kref_free(struct kref *kref)
+{
+ kfree(container_of(kref, struct nfs_local_fsync_ctx, kref));
+}
+
+static void
+nfs_local_fsync_ctx_put(struct nfs_local_fsync_ctx *ctx)
+{
+ kref_put(&ctx->kref, nfs_local_fsync_ctx_kref_free);
+}
+
+static void
+nfs_local_fsync_ctx_free(struct nfs_local_fsync_ctx *ctx)
+{
+ nfs_local_release_commit_data(ctx->localio, ctx->data,
+ ctx->data->task.tk_ops);
+ nfs_local_fsync_ctx_put(ctx);
+}
+
+static void
+nfs_local_fsync_work(struct work_struct *work)
+{
+ struct nfs_local_fsync_ctx *ctx;
+ int status;
+
+ ctx = container_of(work, struct nfs_local_fsync_ctx, work);
+
+ status = nfs_local_run_commit(nfs_to->nfsd_file_file(ctx->localio),
+ ctx->data);
+ nfs_local_commit_done(ctx->data, status);
+ if (ctx->done != NULL)
+ complete(ctx->done);
+ nfs_local_fsync_ctx_free(ctx);
+}
+
+int nfs_local_commit(struct nfsd_file *localio,
+ struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops, int how)
+{
+ struct nfs_local_fsync_ctx *ctx;
+
+ ctx = nfs_local_fsync_ctx_alloc(data, localio, GFP_KERNEL);
+ if (!ctx) {
+ nfs_local_commit_done(data, -ENOMEM);
+ nfs_local_release_commit_data(localio, data, call_ops);
+ return -ENOMEM;
+ }
+
+ nfs_local_init_commit(data, call_ops);
+ kref_get(&ctx->kref);
+ if (how & FLUSH_SYNC) {
+ DECLARE_COMPLETION_ONSTACK(done);
+ ctx->done = &done;
+ queue_work(nfsiod_workqueue, &ctx->work);
+ wait_for_completion(&done);
+ } else
+ queue_work(nfsiod_workqueue, &ctx->work);
+ nfs_local_fsync_ctx_put(ctx);
+ return 0;
+}
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index c19093814296..6e75c6c2d234 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -22,14 +22,12 @@
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs_fs.h>
+#include <linux/nfs_common.h>
#include "nfstrace.h"
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
-/* Mapping from NFS error code to "errno" error code. */
-#define errno_NFSERR_IO EIO
-
/*
* Declare the space requirements for NFS arguments and replies as
* number of 32bit-words
@@ -64,8 +62,6 @@
#define NFS_readdirres_sz (1+NFS_pagepad_sz)
#define NFS_statfsres_sz (1+NFS_info_sz)
-static int nfs_stat_to_errno(enum nfs_stat);
-
/*
* Encode/decode NFSv2 basic data types
*
@@ -1054,70 +1050,6 @@ out_default:
return nfs_stat_to_errno(status);
}
-
-/*
- * We need to translate between nfs status return values and
- * the local errno values which may not be the same.
- */
-static const struct {
- int stat;
- int errno;
-} nfs_errtbl[] = {
- { NFS_OK, 0 },
- { NFSERR_PERM, -EPERM },
- { NFSERR_NOENT, -ENOENT },
- { NFSERR_IO, -errno_NFSERR_IO},
- { NFSERR_NXIO, -ENXIO },
-/* { NFSERR_EAGAIN, -EAGAIN }, */
- { NFSERR_ACCES, -EACCES },
- { NFSERR_EXIST, -EEXIST },
- { NFSERR_XDEV, -EXDEV },
- { NFSERR_NODEV, -ENODEV },
- { NFSERR_NOTDIR, -ENOTDIR },
- { NFSERR_ISDIR, -EISDIR },
- { NFSERR_INVAL, -EINVAL },
- { NFSERR_FBIG, -EFBIG },
- { NFSERR_NOSPC, -ENOSPC },
- { NFSERR_ROFS, -EROFS },
- { NFSERR_MLINK, -EMLINK },
- { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
- { NFSERR_NOTEMPTY, -ENOTEMPTY },
- { NFSERR_DQUOT, -EDQUOT },
- { NFSERR_STALE, -ESTALE },
- { NFSERR_REMOTE, -EREMOTE },
-#ifdef EWFLUSH
- { NFSERR_WFLUSH, -EWFLUSH },
-#endif
- { NFSERR_BADHANDLE, -EBADHANDLE },
- { NFSERR_NOT_SYNC, -ENOTSYNC },
- { NFSERR_BAD_COOKIE, -EBADCOOKIE },
- { NFSERR_NOTSUPP, -ENOTSUPP },
- { NFSERR_TOOSMALL, -ETOOSMALL },
- { NFSERR_SERVERFAULT, -EREMOTEIO },
- { NFSERR_BADTYPE, -EBADTYPE },
- { NFSERR_JUKEBOX, -EJUKEBOX },
- { -1, -EIO }
-};
-
-/**
- * nfs_stat_to_errno - convert an NFS status code to a local errno
- * @status: NFS status code to convert
- *
- * Returns a local errno value, or -EIO if the NFS status code is
- * not recognized. This function is used jointly by NFSv2 and NFSv3.
- */
-static int nfs_stat_to_errno(enum nfs_stat status)
-{
- int i;
-
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
- if (nfs_errtbl[i].stat == (int)status)
- return nfs_errtbl[i].errno;
- }
- dprintk("NFS: Unrecognized nfs status value: %u\n", status);
- return nfs_errtbl[i].errno;
-}
-
#define PROC(proc, argtype, restype, timer) \
[NFSPROC_##proc] = { \
.p_proc = NFSPROC_##proc, \
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 60f032be805a..4ae01c10b7e2 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -21,14 +21,13 @@
#include <linux/nfs3.h>
#include <linux/nfs_fs.h>
#include <linux/nfsacl.h>
+#include <linux/nfs_common.h>
+
#include "nfstrace.h"
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
-/* Mapping from NFS error code to "errno" error code. */
-#define errno_NFSERR_IO EIO
-
/*
* Declare the space requirements for NFS arguments and replies as
* number of 32bit-words
@@ -91,8 +90,6 @@
NFS3_pagepad_sz)
#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
-static int nfs3_stat_to_errno(enum nfs_stat);
-
/*
* Map file type to S_IFMT bits
*/
@@ -1406,7 +1403,7 @@ static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1445,7 +1442,7 @@ static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1495,7 +1492,7 @@ out_default:
error = decode_post_op_attr(xdr, result->dir_attr, userns);
if (unlikely(error))
goto out;
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1537,7 +1534,7 @@ static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1578,7 +1575,7 @@ static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1658,7 +1655,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1728,7 +1725,7 @@ static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1795,7 +1792,7 @@ out_default:
error = decode_wcc_data(xdr, result->dir_attr, userns);
if (unlikely(error))
goto out;
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1835,7 +1832,7 @@ static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1881,7 +1878,7 @@ static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1926,7 +1923,7 @@ static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/**
@@ -2101,7 +2098,7 @@ out_default:
error = decode_post_op_attr(xdr, result->dir_attr, rpc_rqst_userns(req));
if (unlikely(error))
goto out;
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2167,7 +2164,7 @@ static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2243,7 +2240,7 @@ static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2304,7 +2301,7 @@ static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2350,7 +2347,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
#ifdef CONFIG_NFS_V3_ACL
@@ -2416,7 +2413,7 @@ static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
@@ -2435,76 +2432,11 @@ static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
#endif /* CONFIG_NFS_V3_ACL */
-
-/*
- * We need to translate between nfs status return values and
- * the local errno values which may not be the same.
- */
-static const struct {
- int stat;
- int errno;
-} nfs_errtbl[] = {
- { NFS_OK, 0 },
- { NFSERR_PERM, -EPERM },
- { NFSERR_NOENT, -ENOENT },
- { NFSERR_IO, -errno_NFSERR_IO},
- { NFSERR_NXIO, -ENXIO },
-/* { NFSERR_EAGAIN, -EAGAIN }, */
- { NFSERR_ACCES, -EACCES },
- { NFSERR_EXIST, -EEXIST },
- { NFSERR_XDEV, -EXDEV },
- { NFSERR_NODEV, -ENODEV },
- { NFSERR_NOTDIR, -ENOTDIR },
- { NFSERR_ISDIR, -EISDIR },
- { NFSERR_INVAL, -EINVAL },
- { NFSERR_FBIG, -EFBIG },
- { NFSERR_NOSPC, -ENOSPC },
- { NFSERR_ROFS, -EROFS },
- { NFSERR_MLINK, -EMLINK },
- { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
- { NFSERR_NOTEMPTY, -ENOTEMPTY },
- { NFSERR_DQUOT, -EDQUOT },
- { NFSERR_STALE, -ESTALE },
- { NFSERR_REMOTE, -EREMOTE },
-#ifdef EWFLUSH
- { NFSERR_WFLUSH, -EWFLUSH },
-#endif
- { NFSERR_BADHANDLE, -EBADHANDLE },
- { NFSERR_NOT_SYNC, -ENOTSYNC },
- { NFSERR_BAD_COOKIE, -EBADCOOKIE },
- { NFSERR_NOTSUPP, -ENOTSUPP },
- { NFSERR_TOOSMALL, -ETOOSMALL },
- { NFSERR_SERVERFAULT, -EREMOTEIO },
- { NFSERR_BADTYPE, -EBADTYPE },
- { NFSERR_JUKEBOX, -EJUKEBOX },
- { -1, -EIO }
-};
-
-/**
- * nfs3_stat_to_errno - convert an NFS status code to a local errno
- * @status: NFS status code to convert
- *
- * Returns a local errno value, or -EIO if the NFS status code is
- * not recognized. This function is used jointly by NFSv2 and NFSv3.
- */
-static int nfs3_stat_to_errno(enum nfs_stat status)
-{
- int i;
-
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
- if (nfs_errtbl[i].stat == (int)status)
- return nfs_errtbl[i].errno;
- }
- dprintk("NFS: Unrecognized nfs status value: %u\n", status);
- return nfs_errtbl[i].errno;
-}
-
-
#define PROC(proc, argtype, restype, timer) \
[NFS3PROC_##proc] = { \
.p_proc = NFS3PROC_##proc, \
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index c2045a2a9d0f..7d383d29a995 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -83,7 +83,7 @@ struct nfs4_minor_version_ops {
#define NFS_SEQID_CONFIRMED 1
struct nfs_seqid_counter {
ktime_t create_time;
- int owner_id;
+ u64 owner_id;
int flags;
u32 counter;
spinlock_t lock; /* Protects the list */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b8ffbe52ba15..cd2fbde2e6d7 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3904,6 +3904,18 @@ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
#define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL)
+#define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \
+ (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS)
+static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res)
+{
+ u32 share_access_want = res->open_caps.oa_share_access_want[0];
+ u32 attr_bitmask = res->attr_bitmask[2];
+
+ return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) &&
+ ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) ==
+ FATTR4_WORD2_NFS42_TIME_DELEG_MASK);
+}
+
static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
{
u32 minorversion = server->nfs_client->cl_minorversion;
@@ -3982,8 +3994,6 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
#endif
if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS)
server->caps |= NFS_CAP_FS_LOCATIONS;
- if (res.attr_bitmask[2] & FATTR4_WORD2_TIME_DELEG_MODIFY)
- server->caps |= NFS_CAP_DELEGTIME;
if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
@@ -4011,6 +4021,8 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
if (res.open_caps.oa_share_access_want[0] &
NFS4_SHARE_WANT_OPEN_XOR_DELEGATION)
server->caps |= NFS_CAP_OPEN_XOR;
+ if (nfs4_server_delegtime_capable(&res))
+ server->caps |= NFS_CAP_DELEGTIME;
memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 877f682b45f2..581864a15888 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -501,11 +501,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
sp = kzalloc(sizeof(*sp), gfp_flags);
if (!sp)
return NULL;
- sp->so_seqid.owner_id = ida_alloc(&server->openowner_id, gfp_flags);
- if (sp->so_seqid.owner_id < 0) {
- kfree(sp);
- return NULL;
- }
+ sp->so_seqid.owner_id = atomic64_inc_return(&server->owner_ctr);
sp->so_server = server;
sp->so_cred = get_cred(cred);
spin_lock_init(&sp->so_lock);
@@ -536,7 +532,6 @@ static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
{
nfs4_destroy_seqid_counter(&sp->so_seqid);
put_cred(sp->so_cred);
- ida_free(&sp->so_server->openowner_id, sp->so_seqid.owner_id);
kfree(sp);
}
@@ -879,19 +874,13 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
refcount_set(&lsp->ls_count, 1);
lsp->ls_state = state;
lsp->ls_owner = owner;
- lsp->ls_seqid.owner_id = ida_alloc(&server->lockowner_id, GFP_KERNEL_ACCOUNT);
- if (lsp->ls_seqid.owner_id < 0)
- goto out_free;
+ lsp->ls_seqid.owner_id = atomic64_inc_return(&server->owner_ctr);
INIT_LIST_HEAD(&lsp->ls_locks);
return lsp;
-out_free:
- kfree(lsp);
- return NULL;
}
void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
{
- ida_free(&server->lockowner_id, lsp->ls_seqid.owner_id);
nfs4_destroy_seqid_counter(&lsp->ls_seqid);
kfree(lsp);
}
@@ -1957,6 +1946,7 @@ restart:
set_bit(ops->owner_flag_bit, &sp->so_flags);
nfs4_put_state_owner(sp);
status = nfs4_recovery_handle_error(clp, status);
+ nfs4_free_state_owners(&freeme);
return (status != 0) ? status : -EAGAIN;
}
@@ -2023,6 +2013,12 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
nfs_mark_client_ready(clp, -EPERM);
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
return -EPERM;
+ case -ETIMEDOUT:
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
+ nfs_mark_client_ready(clp, -EIO);
+ return -EIO;
+ }
+ fallthrough;
case -EACCES:
case -NFS4ERR_DELAY:
case -EAGAIN:
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 7704a4509676..e8ac3f615f93 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -52,6 +52,7 @@
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
+#include <linux/nfs_common.h>
#include "nfs4_fs.h"
#include "nfs4trace.h"
@@ -63,11 +64,7 @@
#define NFSDBG_FACILITY NFSDBG_XDR
-/* Mapping from NFS error code to "errno" error code. */
-#define errno_NFSERR_IO EIO
-
struct compound_hdr;
-static int nfs4_stat_to_errno(int);
static void encode_layoutget(struct xdr_stream *xdr,
const struct nfs4_layoutget_args *args,
struct compound_hdr *hdr);
@@ -975,11 +972,6 @@ static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes)
return p;
}
-static void encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
-{
- WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
-}
-
static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
{
WARN_ON_ONCE(xdr_stream_encode_opaque(xdr, str, len) < 0);
@@ -1424,12 +1416,12 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
*/
encode_nfs4_seqid(xdr, arg->seqid);
encode_share_access(xdr, arg->share_access);
- p = reserve_space(xdr, 36);
+ p = reserve_space(xdr, 40);
p = xdr_encode_hyper(p, arg->clientid);
- *p++ = cpu_to_be32(24);
+ *p++ = cpu_to_be32(28);
p = xdr_encode_opaque_fixed(p, "open id:", 8);
*p++ = cpu_to_be32(arg->server->s_dev);
- *p++ = cpu_to_be32(arg->id.uniquifier);
+ p = xdr_encode_hyper(p, arg->id.uniquifier);
xdr_encode_hyper(p, arg->id.create_time);
}
@@ -3447,7 +3439,7 @@ static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, ui
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT;
}
- dprintk("%s: link support=%s\n", __func__, *res == 0 ? "false" : "true");
+ dprintk("%s: link support=%s\n", __func__, str_false_true(*res == 0));
return 0;
}
@@ -3465,7 +3457,7 @@ static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap,
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT;
}
- dprintk("%s: symlink support=%s\n", __func__, *res == 0 ? "false" : "true");
+ dprintk("%s: symlink support=%s\n", __func__, str_false_true(*res == 0));
return 0;
}
@@ -3607,7 +3599,7 @@ static int decode_attr_case_insensitive(struct xdr_stream *xdr, uint32_t *bitmap
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_CASE_INSENSITIVE;
}
- dprintk("%s: case_insensitive=%s\n", __func__, *res == 0 ? "false" : "true");
+ dprintk("%s: case_insensitive=%s\n", __func__, str_false_true(*res == 0));
return 0;
}
@@ -3625,7 +3617,7 @@ static int decode_attr_case_preserving(struct xdr_stream *xdr, uint32_t *bitmap,
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_CASE_PRESERVING;
}
- dprintk("%s: case_preserving=%s\n", __func__, *res == 0 ? "false" : "true");
+ dprintk("%s: case_preserving=%s\n", __func__, str_false_true(*res == 0));
return 0;
}
@@ -4333,8 +4325,7 @@ static int decode_attr_xattrsupport(struct xdr_stream *xdr, uint32_t *bitmap,
*res = be32_to_cpup(p);
bitmap[2] &= ~FATTR4_WORD2_XATTR_SUPPORT;
}
- dprintk("%s: XATTR support=%s\n", __func__,
- *res == 0 ? "false" : "true");
+ dprintk("%s: XATTR support=%s\n", __func__, str_false_true(*res == 0));
return 0;
}
@@ -4409,14 +4400,6 @@ static int decode_access(struct xdr_stream *xdr, u32 *supported, u32 *access)
return 0;
}
-static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len)
-{
- ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len);
- if (unlikely(ret < 0))
- return -EIO;
- return 0;
-}
-
static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
{
return decode_opaque_fixed(xdr, stateid, NFS4_STATEID_SIZE);
@@ -7621,72 +7604,6 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
return 0;
}
-/*
- * We need to translate between nfs status return values and
- * the local errno values which may not be the same.
- */
-static struct {
- int stat;
- int errno;
-} nfs_errtbl[] = {
- { NFS4_OK, 0 },
- { NFS4ERR_PERM, -EPERM },
- { NFS4ERR_NOENT, -ENOENT },
- { NFS4ERR_IO, -errno_NFSERR_IO},
- { NFS4ERR_NXIO, -ENXIO },
- { NFS4ERR_ACCESS, -EACCES },
- { NFS4ERR_EXIST, -EEXIST },
- { NFS4ERR_XDEV, -EXDEV },
- { NFS4ERR_NOTDIR, -ENOTDIR },
- { NFS4ERR_ISDIR, -EISDIR },
- { NFS4ERR_INVAL, -EINVAL },
- { NFS4ERR_FBIG, -EFBIG },
- { NFS4ERR_NOSPC, -ENOSPC },
- { NFS4ERR_ROFS, -EROFS },
- { NFS4ERR_MLINK, -EMLINK },
- { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
- { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
- { NFS4ERR_DQUOT, -EDQUOT },
- { NFS4ERR_STALE, -ESTALE },
- { NFS4ERR_BADHANDLE, -EBADHANDLE },
- { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
- { NFS4ERR_NOTSUPP, -ENOTSUPP },
- { NFS4ERR_TOOSMALL, -ETOOSMALL },
- { NFS4ERR_SERVERFAULT, -EREMOTEIO },
- { NFS4ERR_BADTYPE, -EBADTYPE },
- { NFS4ERR_LOCKED, -EAGAIN },
- { NFS4ERR_SYMLINK, -ELOOP },
- { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
- { NFS4ERR_DEADLOCK, -EDEADLK },
- { NFS4ERR_NOXATTR, -ENODATA },
- { NFS4ERR_XATTR2BIG, -E2BIG },
- { -1, -EIO }
-};
-
-/*
- * Convert an NFS error code to a local one.
- * This one is used jointly by NFSv2 and NFSv3.
- */
-static int
-nfs4_stat_to_errno(int stat)
-{
- int i;
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
- if (nfs_errtbl[i].stat == stat)
- return nfs_errtbl[i].errno;
- }
- if (stat <= 10000 || stat > 10100) {
- /* The server is looney tunes. */
- return -EREMOTEIO;
- }
- /* If we cannot translate the error, the recovery routines should
- * handle it.
- * Note: remaining NFSv4 error codes have values > 10000, so should
- * not conflict with native Linux error codes.
- */
- return -stat;
-}
-
#ifdef CONFIG_NFS_V4_2
#include "nfs42xdr.c"
#endif /* CONFIG_NFS_V4_2 */
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 352fdaed4075..1eab98c277fa 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -1685,6 +1685,67 @@ TRACE_EVENT(nfs_mount_path,
TP_printk("path='%s'", __get_str(path))
);
+TRACE_EVENT(nfs_local_open_fh,
+ TP_PROTO(
+ const struct nfs_fh *fh,
+ fmode_t fmode,
+ int error
+ ),
+
+ TP_ARGS(fh, fmode, error),
+
+ TP_STRUCT__entry(
+ __field(int, error)
+ __field(u32, fhandle)
+ __field(unsigned int, fmode)
+ ),
+
+ TP_fast_assign(
+ __entry->error = error;
+ __entry->fhandle = nfs_fhandle_hash(fh);
+ __entry->fmode = (__force unsigned int)fmode;
+ ),
+
+ TP_printk(
+ "error=%d fhandle=0x%08x mode=%s",
+ __entry->error,
+ __entry->fhandle,
+ show_fs_fmode_flags(__entry->fmode)
+ )
+);
+
+DECLARE_EVENT_CLASS(nfs_local_client_event,
+ TP_PROTO(
+ const struct nfs_client *clp
+ ),
+
+ TP_ARGS(clp),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, protocol)
+ __string(server, clp->cl_hostname)
+ ),
+
+ TP_fast_assign(
+ __entry->protocol = clp->rpc_ops->version;
+ __assign_str(server);
+ ),
+
+ TP_printk(
+ "server=%s NFSv%u", __get_str(server), __entry->protocol
+ )
+);
+
+#define DEFINE_NFS_LOCAL_CLIENT_EVENT(name) \
+ DEFINE_EVENT(nfs_local_client_event, name, \
+ TP_PROTO( \
+ const struct nfs_client *clp \
+ ), \
+ TP_ARGS(clp))
+
+DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_enable);
+DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_disable);
+
DECLARE_EVENT_CLASS(nfs_xdr_event,
TP_PROTO(
const struct xdr_stream *xdr,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 04124f226665..e27c07bd8929 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -731,7 +731,8 @@ static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
- const struct rpc_call_ops *call_ops, int how, int flags)
+ const struct rpc_call_ops *call_ops, int how, int flags,
+ struct nfsd_file *localio)
{
struct rpc_task *task;
struct rpc_message msg = {
@@ -761,6 +762,10 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
hdr->args.count,
(unsigned long long)hdr->args.offset);
+ if (localio)
+ return nfs_local_doio(NFS_SERVER(hdr->inode)->nfs_client,
+ localio, hdr, call_ops);
+
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -953,6 +958,12 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
ret = nfs_generic_pgio(desc, hdr);
if (ret == 0) {
+ struct nfs_client *clp = NFS_SERVER(hdr->inode)->nfs_client;
+
+ struct nfsd_file *localio =
+ nfs_local_open_fh(clp, hdr->cred,
+ hdr->args.fh, hdr->args.context->mode);
+
if (NFS_SERVER(hdr->inode)->nfs_client->cl_minorversion)
task_flags = RPC_TASK_MOVEABLE;
ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
@@ -961,7 +972,8 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
NFS_PROTO(hdr->inode),
desc->pg_rpc_callops,
desc->pg_ioflags,
- RPC_TASK_CRED_NOREF | task_flags);
+ RPC_TASK_CRED_NOREF | task_flags,
+ localio);
}
return ret;
}
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index a74ee69a2fa6..dbef837e871a 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -490,7 +490,7 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
nfs_initiate_commit(NFS_CLIENT(inode), data,
NFS_PROTO(data->inode),
data->mds_ops, how,
- RPC_TASK_CRED_NOREF);
+ RPC_TASK_CRED_NOREF, NULL);
} else {
nfs_init_commit(data, NULL, data->lseg, cinfo);
initiate_commit(data, how);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index a6103333b666..81bd1b9aba17 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -48,8 +48,7 @@ static struct nfs_pgio_header *nfs_readhdr_alloc(void)
static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
{
- if (rhdr->res.scratch != NULL)
- kfree(rhdr->res.scratch);
+ kfree(rhdr->res.scratch);
kmem_cache_free(nfs_rdata_cachep, rhdr);
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 97b386032b71..9723b6c53397 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -551,6 +551,9 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
else
seq_puts(m, ",local_lock=posix");
+ if (nfss->flags & NFS_MOUNT_NO_ALIGNWRITE)
+ seq_puts(m, ",noalignwrite");
+
if (nfss->flags & NFS_MOUNT_WRITE_EAGER) {
if (nfss->flags & NFS_MOUNT_WRITE_WAIT)
seq_puts(m, ",write=wait");
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d074d0ceb4f0..ead2dc55952d 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -772,8 +772,7 @@ static void nfs_inode_add_request(struct nfs_page *req)
nfs_lock_request(req);
spin_lock(&mapping->i_private_lock);
set_bit(PG_MAPPED, &req->wb_flags);
- folio_set_private(folio);
- folio->private = req;
+ folio_attach_private(folio, req);
spin_unlock(&mapping->i_private_lock);
atomic_long_inc(&nfsi->nrequests);
/* this a head request for a page group - mark it as having an
@@ -797,8 +796,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
spin_lock(&mapping->i_private_lock);
if (likely(folio)) {
- folio->private = NULL;
- folio_clear_private(folio);
+ folio_detach_private(folio);
clear_bit(PG_MAPPED, &req->wb_head->wb_flags);
}
spin_unlock(&mapping->i_private_lock);
@@ -1297,7 +1295,10 @@ static int nfs_can_extend_write(struct file *file, struct folio *folio,
struct file_lock_context *flctx = locks_inode_context(inode);
struct file_lock *fl;
int ret;
+ unsigned int mntflags = NFS_SERVER(inode)->flags;
+ if (mntflags & NFS_MOUNT_NO_ALIGNWRITE)
+ return 0;
if (file->f_flags & O_DSYNC)
return 0;
if (!nfs_folio_write_uptodate(folio, pagelen))
@@ -1663,7 +1664,8 @@ EXPORT_SYMBOL_GPL(nfs_commitdata_release);
int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
const struct nfs_rpc_ops *nfs_ops,
const struct rpc_call_ops *call_ops,
- int how, int flags)
+ int how, int flags,
+ struct nfsd_file *localio)
{
struct rpc_task *task;
int priority = flush_task_priority(how);
@@ -1692,6 +1694,9 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
dprintk("NFS: initiated commit call\n");
+ if (localio)
+ return nfs_local_commit(localio, data, call_ops, how);
+
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -1791,6 +1796,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
struct nfs_commit_info *cinfo)
{
struct nfs_commit_data *data;
+ struct nfsd_file *localio;
unsigned short task_flags = 0;
/* another commit raced with us */
@@ -1807,9 +1813,12 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
nfs_init_commit(data, head, NULL, cinfo);
if (NFS_SERVER(inode)->nfs_client->cl_minorversion)
task_flags = RPC_TASK_MOVEABLE;
+
+ localio = nfs_local_open_fh(NFS_SERVER(inode)->nfs_client, data->cred,
+ data->args.fh, data->context->mode);
return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
data->mds_ops, how,
- RPC_TASK_CRED_NOREF | task_flags);
+ RPC_TASK_CRED_NOREF | task_flags, localio);
}
/*
diff --git a/fs/nfs_common/Makefile b/fs/nfs_common/Makefile
index 119c75ab9fd0..a5e54809701e 100644
--- a/fs/nfs_common/Makefile
+++ b/fs/nfs_common/Makefile
@@ -6,5 +6,10 @@
obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o
nfs_acl-objs := nfsacl.o
+obj-$(CONFIG_NFS_COMMON_LOCALIO_SUPPORT) += nfs_localio.o
+nfs_localio-objs := nfslocalio.o
+
obj-$(CONFIG_GRACE_PERIOD) += grace.o
obj-$(CONFIG_NFS_V4_2_SSC_HELPER) += nfs_ssc.o
+
+obj-$(CONFIG_NFS_COMMON) += common.o
diff --git a/fs/nfs_common/common.c b/fs/nfs_common/common.c
new file mode 100644
index 000000000000..34a115176f97
--- /dev/null
+++ b/fs/nfs_common/common.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/module.h>
+#include <linux/nfs_common.h>
+#include <linux/nfs4.h>
+
+/*
+ * We need to translate between nfs status return values and
+ * the local errno values which may not be the same.
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs_errtbl[] = {
+ { NFS_OK, 0 },
+ { NFSERR_PERM, -EPERM },
+ { NFSERR_NOENT, -ENOENT },
+ { NFSERR_IO, -errno_NFSERR_IO},
+ { NFSERR_NXIO, -ENXIO },
+/* { NFSERR_EAGAIN, -EAGAIN }, */
+ { NFSERR_ACCES, -EACCES },
+ { NFSERR_EXIST, -EEXIST },
+ { NFSERR_XDEV, -EXDEV },
+ { NFSERR_NODEV, -ENODEV },
+ { NFSERR_NOTDIR, -ENOTDIR },
+ { NFSERR_ISDIR, -EISDIR },
+ { NFSERR_INVAL, -EINVAL },
+ { NFSERR_FBIG, -EFBIG },
+ { NFSERR_NOSPC, -ENOSPC },
+ { NFSERR_ROFS, -EROFS },
+ { NFSERR_MLINK, -EMLINK },
+ { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFSERR_NOTEMPTY, -ENOTEMPTY },
+ { NFSERR_DQUOT, -EDQUOT },
+ { NFSERR_STALE, -ESTALE },
+ { NFSERR_REMOTE, -EREMOTE },
+#ifdef EWFLUSH
+ { NFSERR_WFLUSH, -EWFLUSH },
+#endif
+ { NFSERR_BADHANDLE, -EBADHANDLE },
+ { NFSERR_NOT_SYNC, -ENOTSYNC },
+ { NFSERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFSERR_NOTSUPP, -ENOTSUPP },
+ { NFSERR_TOOSMALL, -ETOOSMALL },
+ { NFSERR_SERVERFAULT, -EREMOTEIO },
+ { NFSERR_BADTYPE, -EBADTYPE },
+ { NFSERR_JUKEBOX, -EJUKEBOX },
+ { -1, -EIO }
+};
+
+/**
+ * nfs_stat_to_errno - convert an NFS status code to a local errno
+ * @status: NFS status code to convert
+ *
+ * Returns a local errno value, or -EIO if the NFS status code is
+ * not recognized. This function is used jointly by NFSv2 and NFSv3.
+ */
+int nfs_stat_to_errno(enum nfs_stat status)
+{
+ int i;
+
+ for (i = 0; nfs_errtbl[i].stat != -1; i++) {
+ if (nfs_errtbl[i].stat == (int)status)
+ return nfs_errtbl[i].errno;
+ }
+ return nfs_errtbl[i].errno;
+}
+EXPORT_SYMBOL_GPL(nfs_stat_to_errno);
+
+/*
+ * We need to translate between nfs v4 status return values and
+ * the local errno values which may not be the same.
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs4_errtbl[] = {
+ { NFS4_OK, 0 },
+ { NFS4ERR_PERM, -EPERM },
+ { NFS4ERR_NOENT, -ENOENT },
+ { NFS4ERR_IO, -errno_NFSERR_IO},
+ { NFS4ERR_NXIO, -ENXIO },
+ { NFS4ERR_ACCESS, -EACCES },
+ { NFS4ERR_EXIST, -EEXIST },
+ { NFS4ERR_XDEV, -EXDEV },
+ { NFS4ERR_NOTDIR, -ENOTDIR },
+ { NFS4ERR_ISDIR, -EISDIR },
+ { NFS4ERR_INVAL, -EINVAL },
+ { NFS4ERR_FBIG, -EFBIG },
+ { NFS4ERR_NOSPC, -ENOSPC },
+ { NFS4ERR_ROFS, -EROFS },
+ { NFS4ERR_MLINK, -EMLINK },
+ { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
+ { NFS4ERR_DQUOT, -EDQUOT },
+ { NFS4ERR_STALE, -ESTALE },
+ { NFS4ERR_BADHANDLE, -EBADHANDLE },
+ { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFS4ERR_NOTSUPP, -ENOTSUPP },
+ { NFS4ERR_TOOSMALL, -ETOOSMALL },
+ { NFS4ERR_SERVERFAULT, -EREMOTEIO },
+ { NFS4ERR_BADTYPE, -EBADTYPE },
+ { NFS4ERR_LOCKED, -EAGAIN },
+ { NFS4ERR_SYMLINK, -ELOOP },
+ { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
+ { NFS4ERR_DEADLOCK, -EDEADLK },
+ { NFS4ERR_NOXATTR, -ENODATA },
+ { NFS4ERR_XATTR2BIG, -E2BIG },
+ { -1, -EIO }
+};
+
+/*
+ * Convert an NFS error code to a local one.
+ * This one is used by NFSv4.
+ */
+int nfs4_stat_to_errno(int stat)
+{
+ int i;
+ for (i = 0; nfs4_errtbl[i].stat != -1; i++) {
+ if (nfs4_errtbl[i].stat == stat)
+ return nfs4_errtbl[i].errno;
+ }
+ if (stat <= 10000 || stat > 10100) {
+ /* The server is looney tunes. */
+ return -EREMOTEIO;
+ }
+ /* If we cannot translate the error, the recovery routines should
+ * handle it.
+ * Note: remaining NFSv4 error codes have values > 10000, so should
+ * not conflict with native Linux error codes.
+ */
+ return -stat;
+}
+EXPORT_SYMBOL_GPL(nfs4_stat_to_errno);
diff --git a/fs/nfs_common/nfslocalio.c b/fs/nfs_common/nfslocalio.c
new file mode 100644
index 000000000000..42b479b9191f
--- /dev/null
+++ b/fs/nfs_common/nfslocalio.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+
+#include <linux/module.h>
+#include <linux/rculist.h>
+#include <linux/nfslocalio.h>
+#include <net/netns/generic.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("NFS localio protocol bypass support");
+
+static DEFINE_SPINLOCK(nfs_uuid_lock);
+
+/*
+ * Global list of nfs_uuid_t instances
+ * that is protected by nfs_uuid_lock.
+ */
+static LIST_HEAD(nfs_uuids);
+
+void nfs_uuid_begin(nfs_uuid_t *nfs_uuid)
+{
+ nfs_uuid->net = NULL;
+ nfs_uuid->dom = NULL;
+ uuid_gen(&nfs_uuid->uuid);
+
+ spin_lock(&nfs_uuid_lock);
+ list_add_tail_rcu(&nfs_uuid->list, &nfs_uuids);
+ spin_unlock(&nfs_uuid_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_begin);
+
+void nfs_uuid_end(nfs_uuid_t *nfs_uuid)
+{
+ if (nfs_uuid->net == NULL) {
+ spin_lock(&nfs_uuid_lock);
+ list_del_init(&nfs_uuid->list);
+ spin_unlock(&nfs_uuid_lock);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_end);
+
+static nfs_uuid_t * nfs_uuid_lookup_locked(const uuid_t *uuid)
+{
+ nfs_uuid_t *nfs_uuid;
+
+ list_for_each_entry(nfs_uuid, &nfs_uuids, list)
+ if (uuid_equal(&nfs_uuid->uuid, uuid))
+ return nfs_uuid;
+
+ return NULL;
+}
+
+static struct module *nfsd_mod;
+
+void nfs_uuid_is_local(const uuid_t *uuid, struct list_head *list,
+ struct net *net, struct auth_domain *dom,
+ struct module *mod)
+{
+ nfs_uuid_t *nfs_uuid;
+
+ spin_lock(&nfs_uuid_lock);
+ nfs_uuid = nfs_uuid_lookup_locked(uuid);
+ if (nfs_uuid) {
+ kref_get(&dom->ref);
+ nfs_uuid->dom = dom;
+ /*
+ * We don't hold a ref on the net, but instead put
+ * ourselves on a list so the net pointer can be
+ * invalidated.
+ */
+ list_move(&nfs_uuid->list, list);
+ rcu_assign_pointer(nfs_uuid->net, net);
+
+ __module_get(mod);
+ nfsd_mod = mod;
+ }
+ spin_unlock(&nfs_uuid_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_is_local);
+
+static void nfs_uuid_put_locked(nfs_uuid_t *nfs_uuid)
+{
+ if (nfs_uuid->net) {
+ module_put(nfsd_mod);
+ nfs_uuid->net = NULL;
+ }
+ if (nfs_uuid->dom) {
+ auth_domain_put(nfs_uuid->dom);
+ nfs_uuid->dom = NULL;
+ }
+ list_del_init(&nfs_uuid->list);
+}
+
+void nfs_uuid_invalidate_clients(struct list_head *list)
+{
+ nfs_uuid_t *nfs_uuid, *tmp;
+
+ spin_lock(&nfs_uuid_lock);
+ list_for_each_entry_safe(nfs_uuid, tmp, list, list)
+ nfs_uuid_put_locked(nfs_uuid);
+ spin_unlock(&nfs_uuid_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_clients);
+
+void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid)
+{
+ if (nfs_uuid->net) {
+ spin_lock(&nfs_uuid_lock);
+ nfs_uuid_put_locked(nfs_uuid);
+ spin_unlock(&nfs_uuid_lock);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_one_client);
+
+struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid,
+ struct rpc_clnt *rpc_clnt, const struct cred *cred,
+ const struct nfs_fh *nfs_fh, const fmode_t fmode)
+{
+ struct net *net;
+ struct nfsd_file *localio;
+
+ /*
+ * Not running in nfsd context, so must safely get reference on nfsd_serv.
+ * But the server may already be shutting down, if so disallow new localio.
+ * uuid->net is NOT a counted reference, but rcu_read_lock() ensures that
+ * if uuid->net is not NULL, then calling nfsd_serv_try_get() is safe
+ * and if it succeeds we will have an implied reference to the net.
+ *
+ * Otherwise NFS may not have ref on NFSD and therefore cannot safely
+ * make 'nfs_to' calls.
+ */
+ rcu_read_lock();
+ net = rcu_dereference(uuid->net);
+ if (!net || !nfs_to->nfsd_serv_try_get(net)) {
+ rcu_read_unlock();
+ return ERR_PTR(-ENXIO);
+ }
+ rcu_read_unlock();
+ /* We have an implied reference to net thanks to nfsd_serv_try_get */
+ localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt,
+ cred, nfs_fh, fmode);
+ if (IS_ERR(localio))
+ nfs_to->nfsd_serv_put(net);
+ return localio;
+}
+EXPORT_SYMBOL_GPL(nfs_open_local_fh);
+
+/*
+ * The NFS LOCALIO code needs to call into NFSD using various symbols,
+ * but cannot be statically linked, because that will make the NFS
+ * module always depend on the NFSD module.
+ *
+ * 'nfs_to' provides NFS access to NFSD functions needed for LOCALIO,
+ * its lifetime is tightly coupled to the NFSD module and will always
+ * be available to NFS LOCALIO because any successful client<->server
+ * LOCALIO handshake results in a reference on the NFSD module (above),
+ * so NFS implicitly holds a reference to the NFSD module and its
+ * functions in the 'nfs_to' nfsd_localio_operations cannot disappear.
+ *
+ * If the last NFS client using LOCALIO disconnects (and its reference
+ * on NFSD dropped) then NFSD could be unloaded, resulting in 'nfs_to'
+ * functions being invalid pointers. But if NFSD isn't loaded then NFS
+ * will not be able to handshake with NFSD and will have no cause to
+ * try to call 'nfs_to' function pointers. If/when NFSD is reloaded it
+ * will reinitialize the 'nfs_to' function pointers and make LOCALIO
+ * possible.
+ */
+const struct nfsd_localio_operations *nfs_to;
+EXPORT_SYMBOL_GPL(nfs_to);
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index ec2ab6429e00..c0bd1509ccd4 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -7,6 +7,7 @@ config NFSD
select LOCKD
select SUNRPC
select EXPORTFS
+ select NFS_COMMON
select NFS_ACL_SUPPORT if NFSD_V2_ACL
select NFS_ACL_SUPPORT if NFSD_V3_ACL
depends on MULTIUSER
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index b8736a82e57c..18cbd3fa7691 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -23,3 +23,4 @@ nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o
nfsd-$(CONFIG_NFSD_BLOCKLAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_SCSILAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_FLEXFILELAYOUT) += flexfilelayout.o flexfilelayoutxdr.o
+nfsd-$(CONFIG_NFS_LOCALIO) += localio.o
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index e6beaaf4f170..93e33d1ee891 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -5,26 +5,26 @@
#include "nfsd.h"
#include "auth.h"
-int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
+int nfsexp_flags(struct svc_cred *cred, struct svc_export *exp)
{
struct exp_flavor_info *f;
struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
for (f = exp->ex_flavors; f < end; f++) {
- if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
+ if (f->pseudoflavor == cred->cr_flavor)
return f->flags;
}
return exp->ex_flags;
}
-int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
+int nfsd_setuser(struct svc_cred *cred, struct svc_export *exp)
{
struct group_info *rqgi;
struct group_info *gi;
struct cred *new;
int i;
- int flags = nfsexp_flags(rqstp, exp);
+ int flags = nfsexp_flags(cred, exp);
/* discard any old override before preparing the new set */
revert_creds(get_cred(current_real_cred()));
@@ -32,10 +32,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
if (!new)
return -ENOMEM;
- new->fsuid = rqstp->rq_cred.cr_uid;
- new->fsgid = rqstp->rq_cred.cr_gid;
+ new->fsuid = cred->cr_uid;
+ new->fsgid = cred->cr_gid;
- rqgi = rqstp->rq_cred.cr_group_info;
+ rqgi = cred->cr_group_info;
if (flags & NFSEXP_ALLSQUASH) {
new->fsuid = exp->ex_anon_uid;
diff --git a/fs/nfsd/auth.h b/fs/nfsd/auth.h
index dbd66424f600..8c5031bbbcee 100644
--- a/fs/nfsd/auth.h
+++ b/fs/nfsd/auth.h
@@ -12,6 +12,6 @@
* Set the current process's fsuid/fsgid etc to those of the NFS
* client user
*/
-int nfsd_setuser(struct svc_rqst *, struct svc_export *);
+int nfsd_setuser(struct svc_cred *cred, struct svc_export *exp);
#endif /* LINUX_NFSD_AUTH_H */
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 3c040c81c77d..08a20e5bcf7f 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -147,8 +147,7 @@ nfsd4_block_get_device_info_simple(struct super_block *sb,
struct pnfs_block_deviceaddr *dev;
struct pnfs_block_volume *b;
- dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) +
- sizeof(struct pnfs_block_volume), GFP_KERNEL);
+ dev = kzalloc(struct_size(dev, volumes, 1), GFP_KERNEL);
if (!dev)
return -ENOMEM;
gdp->gd_device = dev;
@@ -255,8 +254,7 @@ nfsd4_block_get_device_info_scsi(struct super_block *sb,
const struct pr_ops *ops;
int ret;
- dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) +
- sizeof(struct pnfs_block_volume), GFP_KERNEL);
+ dev = kzalloc(struct_size(dev, volumes, 1), GFP_KERNEL);
if (!dev)
return -ENOMEM;
gdp->gd_device = dev;
diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h
index b0361e8aa9a7..4e28ac8f1127 100644
--- a/fs/nfsd/blocklayoutxdr.h
+++ b/fs/nfsd/blocklayoutxdr.h
@@ -47,7 +47,7 @@ struct pnfs_block_volume {
struct pnfs_block_deviceaddr {
u32 nr_volumes;
- struct pnfs_block_volume volumes[];
+ struct pnfs_block_volume volumes[] __counted_by(nr_volumes);
};
__be32 nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
index 66a05fefae98..bb7addef4a31 100644
--- a/fs/nfsd/cache.h
+++ b/fs/nfsd/cache.h
@@ -10,7 +10,7 @@
#define NFSCACHE_H
#include <linux/sunrpc/svc.h>
-#include "netns.h"
+#include "nfsd.h"
/*
* Representation of a reply cache entry.
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 50b3135d07ac..c82d8e3e0d4f 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1074,10 +1074,30 @@ static struct svc_export *exp_find(struct cache_detail *cd,
return exp;
}
+/**
+ * check_nfsd_access - check if access to export is allowed.
+ * @exp: svc_export that is being accessed.
+ * @rqstp: svc_rqst attempting to access @exp (will be NULL for LOCALIO).
+ *
+ * Return values:
+ * %nfs_ok if access is granted, or
+ * %nfserr_wrongsec if access is denied
+ */
__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
{
struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
- struct svc_xprt *xprt = rqstp->rq_xprt;
+ struct svc_xprt *xprt;
+
+ /*
+ * If rqstp is NULL, this is a LOCALIO request which will only
+ * ever use a filehandle/credential pair for which access has
+ * been affirmed (by ACCESS or OPEN NFS requests) over the
+ * wire. So there is no need for further checks here.
+ */
+ if (!rqstp)
+ return nfs_ok;
+
+ xprt = rqstp->rq_xprt;
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) {
if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags))
@@ -1098,17 +1118,17 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
ok:
/* legacy gss-only clients are always OK: */
if (exp->ex_client == rqstp->rq_gssclient)
- return 0;
+ return nfs_ok;
/* ip-address based client; check sec= export option: */
for (f = exp->ex_flavors; f < end; f++) {
if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
- return 0;
+ return nfs_ok;
}
/* defaults in absence of sec= options: */
if (exp->ex_nflavors == 0) {
if (rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL ||
rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)
- return 0;
+ return nfs_ok;
}
/* If the compound op contains a spo_must_allowed op,
@@ -1118,10 +1138,10 @@ ok:
*/
if (nfsd4_spo_must_allow(rqstp))
- return 0;
+ return nfs_ok;
denied:
- return rqstp->rq_vers < 4 ? nfserr_acces : nfserr_wrongsec;
+ return nfserr_wrongsec;
}
/*
@@ -1164,19 +1184,35 @@ gss:
return gssexp;
}
+/**
+ * rqst_exp_find - Find an svc_export in the context of a rqst or similar
+ * @reqp: The handle to be used to suspend the request if a cache-upcall is needed
+ * If NULL, missing in-cache information will result in failure.
+ * @net: The network namespace in which the request exists
+ * @cl: default auth_domain to use for looking up the export
+ * @gsscl: an alternate auth_domain defined using deprecated gss/krb5 format.
+ * @fsid_type: The type of fsid to look for
+ * @fsidv: The actual fsid to look up in the context of either client.
+ *
+ * Perform a lookup for @cl/@fsidv in the given @net for an export. If
+ * none found and @gsscl specified, repeat the lookup.
+ *
+ * Returns an export, or an error pointer.
+ */
struct svc_export *
-rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
+rqst_exp_find(struct cache_req *reqp, struct net *net,
+ struct auth_domain *cl, struct auth_domain *gsscl,
+ int fsid_type, u32 *fsidv)
{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct cache_detail *cd = nn->svc_export_cache;
- if (rqstp->rq_client == NULL)
+ if (!cl)
goto gss;
/* First try the auth_unix client: */
- exp = exp_find(cd, rqstp->rq_client, fsid_type,
- fsidv, &rqstp->rq_chandle);
+ exp = exp_find(cd, cl, fsid_type, fsidv, reqp);
if (PTR_ERR(exp) == -ENOENT)
goto gss;
if (IS_ERR(exp))
@@ -1186,10 +1222,9 @@ rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
return exp;
gss:
/* Otherwise, try falling back on gss client */
- if (rqstp->rq_gssclient == NULL)
+ if (!gsscl)
return exp;
- gssexp = exp_find(cd, rqstp->rq_gssclient, fsid_type, fsidv,
- &rqstp->rq_chandle);
+ gssexp = exp_find(cd, gsscl, fsid_type, fsidv, reqp);
if (PTR_ERR(gssexp) == -ENOENT)
return exp;
if (!IS_ERR(exp))
@@ -1220,7 +1255,9 @@ struct svc_export *rqst_find_fsidzero_export(struct svc_rqst *rqstp)
mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
- return rqst_exp_find(rqstp, FSID_NUM, fsidv);
+ return rqst_exp_find(&rqstp->rq_chandle, SVC_NET(rqstp),
+ rqstp->rq_client, rqstp->rq_gssclient,
+ FSID_NUM, fsidv);
}
/*
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index ca9dc230ae3d..3794ae253a70 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -99,7 +99,8 @@ struct svc_expkey {
#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE)
#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES)
-int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp);
+struct svc_cred;
+int nfsexp_flags(struct svc_cred *cred, struct svc_export *exp);
__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
/*
@@ -127,6 +128,8 @@ static inline struct svc_export *exp_get(struct svc_export *exp)
cache_get(&exp->h);
return exp;
}
-struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *);
+struct svc_export *rqst_exp_find(struct cache_req *reqp, struct net *net,
+ struct auth_domain *cl, struct auth_domain *gsscl,
+ int fsid_type, u32 *fsidv);
#endif /* NFSD_EXPORT_H */
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index f4704f5d4086..19bb88c7eebd 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -52,10 +52,11 @@
#define NFSD_FILE_CACHE_UP (0)
/* We only care about NFSD_MAY_READ/WRITE for this cache */
-#define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
+#define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE|NFSD_MAY_LOCALIO)
static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
static DEFINE_PER_CPU(unsigned long, nfsd_file_acquisitions);
+static DEFINE_PER_CPU(unsigned long, nfsd_file_allocations);
static DEFINE_PER_CPU(unsigned long, nfsd_file_releases);
static DEFINE_PER_CPU(unsigned long, nfsd_file_total_age);
static DEFINE_PER_CPU(unsigned long, nfsd_file_evictions);
@@ -111,7 +112,7 @@ static void
nfsd_file_schedule_laundrette(void)
{
if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags))
- queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
+ queue_delayed_work(system_unbound_wq, &nfsd_filecache_laundrette,
NFSD_LAUNDRETTE_DELAY);
}
@@ -151,7 +152,7 @@ nfsd_file_mark_put(struct nfsd_file_mark *nfm)
}
static struct nfsd_file_mark *
-nfsd_file_mark_find_or_create(struct nfsd_file *nf, struct inode *inode)
+nfsd_file_mark_find_or_create(struct inode *inode)
{
int err;
struct fsnotify_mark *mark;
@@ -215,7 +216,9 @@ nfsd_file_alloc(struct net *net, struct inode *inode, unsigned char need,
if (unlikely(!nf))
return NULL;
+ this_cpu_inc(nfsd_file_allocations);
INIT_LIST_HEAD(&nf->nf_lru);
+ INIT_LIST_HEAD(&nf->nf_gc);
nf->nf_birthtime = ktime_get();
nf->nf_file = NULL;
nf->nf_cred = get_current_cred();
@@ -387,14 +390,42 @@ nfsd_file_put(struct nfsd_file *nf)
nfsd_file_free(nf);
}
+/**
+ * nfsd_file_put_local - put the reference to nfsd_file and local nfsd_serv
+ * @nf: nfsd_file of which to put the references
+ *
+ * First put the reference of the nfsd_file and then put the
+ * reference to the associated nn->nfsd_serv.
+ */
+void
+nfsd_file_put_local(struct nfsd_file *nf)
+{
+ struct net *net = nf->nf_net;
+
+ nfsd_file_put(nf);
+ nfsd_serv_put(net);
+}
+
+/**
+ * nfsd_file_file - get the backing file of an nfsd_file
+ * @nf: nfsd_file of which to access the backing file.
+ *
+ * Return backing file for @nf.
+ */
+struct file *
+nfsd_file_file(struct nfsd_file *nf)
+{
+ return nf->nf_file;
+}
+
static void
nfsd_file_dispose_list(struct list_head *dispose)
{
struct nfsd_file *nf;
while (!list_empty(dispose)) {
- nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
- list_del_init(&nf->nf_lru);
+ nf = list_first_entry(dispose, struct nfsd_file, nf_gc);
+ list_del_init(&nf->nf_gc);
nfsd_file_free(nf);
}
}
@@ -411,12 +442,12 @@ nfsd_file_dispose_list_delayed(struct list_head *dispose)
{
while(!list_empty(dispose)) {
struct nfsd_file *nf = list_first_entry(dispose,
- struct nfsd_file, nf_lru);
+ struct nfsd_file, nf_gc);
struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
struct nfsd_fcache_disposal *l = nn->fcache_disposal;
spin_lock(&l->lock);
- list_move_tail(&nf->nf_lru, &l->freeme);
+ list_move_tail(&nf->nf_gc, &l->freeme);
spin_unlock(&l->lock);
svc_wake_up(nn->nfsd_serv);
}
@@ -503,7 +534,8 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
/* Refcount went to zero. Unhash it and queue it to the dispose list */
nfsd_file_unhash(nf);
- list_lru_isolate_move(lru, &nf->nf_lru, head);
+ list_lru_isolate(lru, &nf->nf_lru);
+ list_add(&nf->nf_gc, head);
this_cpu_inc(nfsd_file_evictions);
trace_nfsd_file_gc_disposed(nf);
return LRU_REMOVED;
@@ -578,7 +610,7 @@ nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
/* If refcount goes to 0, then put on the dispose list */
if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
- list_add(&nf->nf_lru, dispose);
+ list_add(&nf->nf_gc, dispose);
trace_nfsd_file_closing(nf);
}
}
@@ -654,8 +686,8 @@ nfsd_file_close_inode_sync(struct inode *inode)
nfsd_file_queue_for_close(inode, &dispose);
while (!list_empty(&dispose)) {
- nf = list_first_entry(&dispose, struct nfsd_file, nf_lru);
- list_del_init(&nf->nf_lru);
+ nf = list_first_entry(&dispose, struct nfsd_file, nf_gc);
+ list_del_init(&nf->nf_gc);
nfsd_file_free(nf);
}
}
@@ -909,6 +941,7 @@ nfsd_file_cache_shutdown(void)
for_each_possible_cpu(i) {
per_cpu(nfsd_file_cache_hits, i) = 0;
per_cpu(nfsd_file_acquisitions, i) = 0;
+ per_cpu(nfsd_file_allocations, i) = 0;
per_cpu(nfsd_file_releases, i) = 0;
per_cpu(nfsd_file_total_age, i) = 0;
per_cpu(nfsd_file_evictions, i) = 0;
@@ -977,12 +1010,14 @@ nfsd_file_is_cached(struct inode *inode)
}
static __be32
-nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+nfsd_file_do_acquire(struct svc_rqst *rqstp, struct net *net,
+ struct svc_cred *cred,
+ struct auth_domain *client,
+ struct svc_fh *fhp,
unsigned int may_flags, struct file *file,
struct nfsd_file **pnf, bool want_gc)
{
unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
- struct net *net = SVC_NET(rqstp);
struct nfsd_file *new, *nf;
bool stale_retry = true;
bool open_retry = true;
@@ -991,8 +1026,13 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
int ret;
retry:
- status = fh_verify(rqstp, fhp, S_IFREG,
- may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ if (rqstp) {
+ status = fh_verify(rqstp, fhp, S_IFREG,
+ may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ } else {
+ status = fh_verify_local(net, cred, client, fhp, S_IFREG,
+ may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ }
if (status != nfs_ok)
return status;
inode = d_inode(fhp->fh_dentry);
@@ -1024,7 +1064,7 @@ retry:
if (unlikely(nf)) {
spin_unlock(&inode->i_lock);
rcu_read_unlock();
- nfsd_file_slab_free(&new->nf_rcu);
+ nfsd_file_free(new);
goto wait_for_construction;
}
nf = new;
@@ -1035,8 +1075,6 @@ retry:
if (likely(ret == 0))
goto open_file;
- if (ret == -EEXIST)
- goto retry;
trace_nfsd_file_insert_err(rqstp, inode, may_flags, ret);
status = nfserr_jukebox;
goto construction_err;
@@ -1051,6 +1089,7 @@ wait_for_construction:
status = nfserr_jukebox;
goto construction_err;
}
+ nfsd_file_put(nf);
open_retry = false;
fh_put(fhp);
goto retry;
@@ -1074,7 +1113,7 @@ out:
open_file:
trace_nfsd_file_alloc(nf);
- nf->nf_mark = nfsd_file_mark_find_or_create(nf, inode);
+ nf->nf_mark = nfsd_file_mark_find_or_create(inode);
if (nf->nf_mark) {
if (file) {
get_file(file);
@@ -1139,7 +1178,8 @@ __be32
nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf)
{
- return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, true);
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
+ fhp, may_flags, NULL, pnf, true);
}
/**
@@ -1163,7 +1203,55 @@ __be32
nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf)
{
- return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, false);
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
+ fhp, may_flags, NULL, pnf, false);
+}
+
+/**
+ * nfsd_file_acquire_local - Get a struct nfsd_file with an open file for localio
+ * @net: The network namespace in which to perform a lookup
+ * @cred: the user credential with which to validate access
+ * @client: the auth_domain for LOCALIO lookup
+ * @fhp: the NFS filehandle of the file to be opened
+ * @may_flags: NFSD_MAY_ settings for the file
+ * @pnf: OUT: new or found "struct nfsd_file" object
+ *
+ * This file lookup interface provide access to a file given the
+ * filehandle and credential. No connection-based authorisation
+ * is performed and in that way it is quite different to other
+ * file access mediated by nfsd. It allows a kernel module such as the NFS
+ * client to reach across network and filesystem namespaces to access
+ * a file. The security implications of this should be carefully
+ * considered before use.
+ *
+ * The nfsd_file object returned by this API is reference-counted
+ * and garbage-collected. The object is retained for a few
+ * seconds after the final nfsd_file_put() in case the caller
+ * wants to re-use it.
+ *
+ * Return values:
+ * %nfs_ok - @pnf points to an nfsd_file with its reference
+ * count boosted.
+ *
+ * On error, an nfsstat value in network byte order is returned.
+ */
+__be32
+nfsd_file_acquire_local(struct net *net, struct svc_cred *cred,
+ struct auth_domain *client, struct svc_fh *fhp,
+ unsigned int may_flags, struct nfsd_file **pnf)
+{
+ /*
+ * Save creds before calling nfsd_file_do_acquire() (which calls
+ * nfsd_setuser). Important because caller (LOCALIO) is from
+ * client context.
+ */
+ const struct cred *save_cred = get_current_cred();
+ __be32 beres;
+
+ beres = nfsd_file_do_acquire(NULL, net, cred, client,
+ fhp, may_flags, NULL, pnf, true);
+ revert_creds(save_cred);
+ return beres;
}
/**
@@ -1189,7 +1277,8 @@ nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct file *file,
struct nfsd_file **pnf)
{
- return nfsd_file_do_acquire(rqstp, fhp, may_flags, file, pnf, false);
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
+ fhp, may_flags, file, pnf, false);
}
/*
@@ -1199,7 +1288,7 @@ nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
*/
int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
{
- unsigned long releases = 0, evictions = 0;
+ unsigned long allocations = 0, releases = 0, evictions = 0;
unsigned long hits = 0, acquisitions = 0;
unsigned int i, count = 0, buckets = 0;
unsigned long lru = 0, total_age = 0;
@@ -1224,6 +1313,7 @@ int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
for_each_possible_cpu(i) {
hits += per_cpu(nfsd_file_cache_hits, i);
acquisitions += per_cpu(nfsd_file_acquisitions, i);
+ allocations += per_cpu(nfsd_file_allocations, i);
releases += per_cpu(nfsd_file_releases, i);
total_age += per_cpu(nfsd_file_total_age, i);
evictions += per_cpu(nfsd_file_evictions, i);
@@ -1234,6 +1324,7 @@ int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
seq_printf(m, "lru entries: %lu\n", lru);
seq_printf(m, "cache hits: %lu\n", hits);
seq_printf(m, "acquisitions: %lu\n", acquisitions);
+ seq_printf(m, "allocations: %lu\n", allocations);
seq_printf(m, "releases: %lu\n", releases);
seq_printf(m, "evictions: %lu\n", evictions);
if (releases)
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
index c61884def906..cadf3c2689c4 100644
--- a/fs/nfsd/filecache.h
+++ b/fs/nfsd/filecache.h
@@ -44,6 +44,7 @@ struct nfsd_file {
struct nfsd_file_mark *nf_mark;
struct list_head nf_lru;
+ struct list_head nf_gc;
struct rcu_head nf_rcu;
ktime_t nf_birthtime;
};
@@ -54,7 +55,9 @@ void nfsd_file_cache_shutdown(void);
int nfsd_file_cache_start_net(struct net *net);
void nfsd_file_cache_shutdown_net(struct net *net);
void nfsd_file_put(struct nfsd_file *nf);
+void nfsd_file_put_local(struct nfsd_file *nf);
struct nfsd_file *nfsd_file_get(struct nfsd_file *nf);
+struct file *nfsd_file_file(struct nfsd_file *nf);
void nfsd_file_close_inode_sync(struct inode *inode);
void nfsd_file_net_dispose(struct nfsd_net *nn);
bool nfsd_file_is_cached(struct inode *inode);
@@ -65,5 +68,8 @@ __be32 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
__be32 nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct file *file,
struct nfsd_file **nfp);
+__be32 nfsd_file_acquire_local(struct net *net, struct svc_cred *cred,
+ struct auth_domain *client, struct svc_fh *fhp,
+ unsigned int may_flags, struct nfsd_file **pnf);
int nfsd_file_cache_stats_show(struct seq_file *m, void *v);
#endif /* _FS_NFSD_FILECACHE_H */
diff --git a/fs/nfsd/localio.c b/fs/nfsd/localio.c
new file mode 100644
index 000000000000..291e9c69cae4
--- /dev/null
+++ b/fs/nfsd/localio.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NFS server support for local clients to bypass network stack
+ *
+ * Copyright (C) 2014 Weston Andros Adamson <dros@primarydata.com>
+ * Copyright (C) 2019 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+
+#include <linux/exportfs.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs_common.h>
+#include <linux/nfslocalio.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_xdr.h>
+#include <linux/string.h>
+
+#include "nfsd.h"
+#include "vfs.h"
+#include "netns.h"
+#include "filecache.h"
+#include "cache.h"
+
+static const struct nfsd_localio_operations nfsd_localio_ops = {
+ .nfsd_serv_try_get = nfsd_serv_try_get,
+ .nfsd_serv_put = nfsd_serv_put,
+ .nfsd_open_local_fh = nfsd_open_local_fh,
+ .nfsd_file_put_local = nfsd_file_put_local,
+ .nfsd_file_file = nfsd_file_file,
+};
+
+void nfsd_localio_ops_init(void)
+{
+ nfs_to = &nfsd_localio_ops;
+}
+
+/**
+ * nfsd_open_local_fh - lookup a local filehandle @nfs_fh and map to nfsd_file
+ *
+ * @net: 'struct net' to get the proper nfsd_net required for LOCALIO access
+ * @dom: 'struct auth_domain' required for LOCALIO access
+ * @rpc_clnt: rpc_clnt that the client established
+ * @cred: cred that the client established
+ * @nfs_fh: filehandle to lookup
+ * @fmode: fmode_t to use for open
+ *
+ * This function maps a local fh to a path on a local filesystem.
+ * This is useful when the nfs client has the local server mounted - it can
+ * avoid all the NFS overhead with reads, writes and commits.
+ *
+ * On successful return, returned nfsd_file will have its nf_net member
+ * set. Caller (NFS client) is responsible for calling nfsd_serv_put and
+ * nfsd_file_put (via nfs_to->nfsd_file_put_local).
+ */
+struct nfsd_file *
+nfsd_open_local_fh(struct net *net, struct auth_domain *dom,
+ struct rpc_clnt *rpc_clnt, const struct cred *cred,
+ const struct nfs_fh *nfs_fh, const fmode_t fmode)
+{
+ int mayflags = NFSD_MAY_LOCALIO;
+ struct svc_cred rq_cred;
+ struct svc_fh fh;
+ struct nfsd_file *localio;
+ __be32 beres;
+
+ if (nfs_fh->size > NFS4_FHSIZE)
+ return ERR_PTR(-EINVAL);
+
+ /* nfs_fh -> svc_fh */
+ fh_init(&fh, NFS4_FHSIZE);
+ fh.fh_handle.fh_size = nfs_fh->size;
+ memcpy(fh.fh_handle.fh_raw, nfs_fh->data, nfs_fh->size);
+
+ if (fmode & FMODE_READ)
+ mayflags |= NFSD_MAY_READ;
+ if (fmode & FMODE_WRITE)
+ mayflags |= NFSD_MAY_WRITE;
+
+ svcauth_map_clnt_to_svc_cred_local(rpc_clnt, cred, &rq_cred);
+
+ beres = nfsd_file_acquire_local(net, &rq_cred, dom,
+ &fh, mayflags, &localio);
+ if (beres)
+ localio = ERR_PTR(nfs_stat_to_errno(be32_to_cpu(beres)));
+
+ fh_put(&fh);
+ if (rq_cred.cr_group_info)
+ put_group_info(rq_cred.cr_group_info);
+
+ return localio;
+}
+EXPORT_SYMBOL_GPL(nfsd_open_local_fh);
+
+/*
+ * UUID_IS_LOCAL XDR functions
+ */
+
+static __be32 localio_proc_null(struct svc_rqst *rqstp)
+{
+ return rpc_success;
+}
+
+struct localio_uuidarg {
+ uuid_t uuid;
+};
+
+static __be32 localio_proc_uuid_is_local(struct svc_rqst *rqstp)
+{
+ struct localio_uuidarg *argp = rqstp->rq_argp;
+ struct net *net = SVC_NET(rqstp);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nfs_uuid_is_local(&argp->uuid, &nn->local_clients,
+ net, rqstp->rq_client, THIS_MODULE);
+
+ return rpc_success;
+}
+
+static bool localio_decode_uuidarg(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr)
+{
+ struct localio_uuidarg *argp = rqstp->rq_argp;
+ u8 uuid[UUID_SIZE];
+
+ if (decode_opaque_fixed(xdr, uuid, UUID_SIZE))
+ return false;
+ import_uuid(&argp->uuid, uuid);
+
+ return true;
+}
+
+static const struct svc_procedure localio_procedures1[] = {
+ [LOCALIOPROC_NULL] = {
+ .pc_func = localio_proc_null,
+ .pc_decode = nfssvc_decode_voidarg,
+ .pc_encode = nfssvc_encode_voidres,
+ .pc_argsize = sizeof(struct nfsd_voidargs),
+ .pc_ressize = sizeof(struct nfsd_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = 0,
+ .pc_name = "NULL",
+ },
+ [LOCALIOPROC_UUID_IS_LOCAL] = {
+ .pc_func = localio_proc_uuid_is_local,
+ .pc_decode = localio_decode_uuidarg,
+ .pc_encode = nfssvc_encode_voidres,
+ .pc_argsize = sizeof(struct localio_uuidarg),
+ .pc_argzero = sizeof(struct localio_uuidarg),
+ .pc_ressize = sizeof(struct nfsd_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_name = "UUID_IS_LOCAL",
+ },
+};
+
+#define LOCALIO_NR_PROCEDURES ARRAY_SIZE(localio_procedures1)
+static DEFINE_PER_CPU_ALIGNED(unsigned long,
+ localio_count[LOCALIO_NR_PROCEDURES]);
+const struct svc_version localio_version1 = {
+ .vs_vers = 1,
+ .vs_nproc = LOCALIO_NR_PROCEDURES,
+ .vs_proc = localio_procedures1,
+ .vs_dispatch = nfsd_dispatch,
+ .vs_count = localio_count,
+ .vs_xdrsize = XDR_QUADLEN(UUID_SIZE),
+ .vs_hidden = true,
+};
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 14ec15656320..26f7b34d1a03 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -13,6 +13,7 @@
#include <linux/filelock.h>
#include <linux/nfs4.h>
#include <linux/percpu_counter.h>
+#include <linux/percpu-refcount.h>
#include <linux/siphash.h>
#include <linux/sunrpc/stats.h>
@@ -139,7 +140,9 @@ struct nfsd_net {
struct svc_info nfsd_info;
#define nfsd_serv nfsd_info.serv
-
+ struct percpu_ref nfsd_serv_ref;
+ struct completion nfsd_serv_confirm_done;
+ struct completion nfsd_serv_free_done;
/*
* clientid and stateid data for construction of net unique COPY
@@ -148,12 +151,13 @@ struct nfsd_net {
u32 s2s_cp_cl_id;
struct idr s2s_cp_stateids;
spinlock_t s2s_cp_lock;
+ atomic_t pending_async_copies;
/*
* Version information
*/
- bool *nfsd_versions;
- bool *nfsd4_minorversions;
+ bool nfsd_versions[NFSD_MAXVERS + 1];
+ bool nfsd4_minorversions[NFSD_SUPPORTED_MINOR_VERSION + 1];
/*
* Duplicate reply cache
@@ -213,16 +217,21 @@ struct nfsd_net {
/* last time an admin-revoke happened for NFSv4.0 */
time64_t nfs40_last_revoke;
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ /* Local clients to be invalidated when net is shut down */
+ struct list_head local_clients;
+#endif
};
/* Simple check to find out if a given net was properly initialized */
#define nfsd_netns_ready(nn) ((nn)->sessionid_hashtbl)
extern bool nfsd_support_version(int vers);
-extern void nfsd_netns_free_versions(struct nfsd_net *nn);
-
extern unsigned int nfsd_net_id;
+bool nfsd_serv_try_get(struct net *net);
+void nfsd_serv_put(struct net *net);
+
void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn);
void nfsd_reset_write_verifier(struct nfsd_net *nn);
#endif /* __NFSD_NETNS_H__ */
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index dfcc957e460d..372bdcf5e07a 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -28,6 +28,29 @@ static int nfs3_ftypes[] = {
S_IFIFO, /* NF3FIFO */
};
+static __be32 nfsd3_map_status(__be32 status)
+{
+ switch (status) {
+ case nfs_ok:
+ break;
+ case nfserr_nofilehandle:
+ status = nfserr_badhandle;
+ break;
+ case nfserr_wrongsec:
+ case nfserr_file_open:
+ status = nfserr_acces;
+ break;
+ case nfserr_symlink_not_dir:
+ status = nfserr_notdir;
+ break;
+ case nfserr_symlink:
+ case nfserr_wrong_type:
+ status = nfserr_inval;
+ break;
+ }
+ return status;
+}
+
/*
* NULL call.
*/
@@ -57,6 +80,7 @@ nfsd3_proc_getattr(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -80,6 +104,7 @@ nfsd3_proc_setattr(struct svc_rqst *rqstp)
if (argp->check_guard)
guardtime = &argp->guardtime;
resp->status = nfsd_setattr(rqstp, &resp->fh, &attrs, guardtime);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -103,6 +128,7 @@ nfsd3_proc_lookup(struct svc_rqst *rqstp)
resp->status = nfsd_lookup(rqstp, &resp->dirfh,
argp->name, argp->len,
&resp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -122,6 +148,7 @@ nfsd3_proc_access(struct svc_rqst *rqstp)
fh_copy(&resp->fh, &argp->fh);
resp->access = argp->access;
resp->status = nfsd_access(rqstp, &resp->fh, &resp->access, NULL);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -142,6 +169,7 @@ nfsd3_proc_readlink(struct svc_rqst *rqstp)
resp->pages = rqstp->rq_next_page++;
resp->status = nfsd_readlink(rqstp, &resp->fh,
page_address(*resp->pages), &resp->len);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -179,6 +207,7 @@ nfsd3_proc_read(struct svc_rqst *rqstp)
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_read(rqstp, &resp->fh, argp->offset,
&resp->count, &resp->eof);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -212,6 +241,7 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
rqstp->rq_vec, nvecs, &cnt,
resp->committed, resp->verf);
resp->count = cnt;
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -359,6 +389,7 @@ nfsd3_proc_create(struct svc_rqst *rqstp)
newfhp = fh_init(&resp->fh, NFS3_FHSIZE);
resp->status = nfsd3_create_file(rqstp, dirfhp, newfhp, argp);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -384,6 +415,7 @@ nfsd3_proc_mkdir(struct svc_rqst *rqstp)
fh_init(&resp->fh, NFS3_FHSIZE);
resp->status = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len,
&attrs, S_IFDIR, 0, &resp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -424,6 +456,7 @@ nfsd3_proc_symlink(struct svc_rqst *rqstp)
argp->flen, argp->tname, &attrs, &resp->fh);
kfree(argp->tname);
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -465,6 +498,7 @@ nfsd3_proc_mknod(struct svc_rqst *rqstp)
resp->status = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len,
&attrs, type, rdev, &resp->fh);
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -486,6 +520,7 @@ nfsd3_proc_remove(struct svc_rqst *rqstp)
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_unlink(rqstp, &resp->fh, -S_IFDIR,
argp->name, argp->len);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -506,6 +541,7 @@ nfsd3_proc_rmdir(struct svc_rqst *rqstp)
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_unlink(rqstp, &resp->fh, S_IFDIR,
argp->name, argp->len);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -528,6 +564,7 @@ nfsd3_proc_rename(struct svc_rqst *rqstp)
fh_copy(&resp->tfh, &argp->tfh);
resp->status = nfsd_rename(rqstp, &resp->ffh, argp->fname, argp->flen,
&resp->tfh, argp->tname, argp->tlen);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -548,6 +585,7 @@ nfsd3_proc_link(struct svc_rqst *rqstp)
fh_copy(&resp->tfh, &argp->tfh);
resp->status = nfsd_link(rqstp, &resp->tfh, argp->tname, argp->tlen,
&resp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -600,6 +638,7 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
/* Recycle only pages that were part of the reply */
rqstp->rq_next_page = resp->xdr.page_ptr + 1;
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -644,6 +683,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
rqstp->rq_next_page = resp->xdr.page_ptr + 1;
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -661,6 +701,7 @@ nfsd3_proc_fsstat(struct svc_rqst *rqstp)
resp->status = nfsd_statfs(rqstp, &argp->fh, &resp->stats, 0);
fh_put(&argp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -704,6 +745,7 @@ nfsd3_proc_fsinfo(struct svc_rqst *rqstp)
}
fh_put(&argp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -746,6 +788,7 @@ nfsd3_proc_pathconf(struct svc_rqst *rqstp)
}
fh_put(&argp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -773,6 +816,7 @@ nfsd3_proc_commit(struct svc_rqst *rqstp)
argp->count, resp->verf);
nfsd_file_put(nf);
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index d756f443fc44..b5b3ab9d719a 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1223,6 +1223,7 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
* cb_seq_status is only set in decode_cb_sequence4res,
* and so will remain 1 if an rpc level failure occurs.
*/
+ trace_nfsd_cb_rpc_prepare(clp);
cb->cb_seq_status = 1;
cb->cb_status = 0;
if (minorversion && !nfsd41_cb_get_slot(cb, task))
@@ -1329,11 +1330,14 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
struct nfsd4_callback *cb = calldata;
struct nfs4_client *clp = cb->cb_clp;
+ trace_nfsd_cb_rpc_done(clp);
+
if (!nfsd4_cb_sequence_done(task, cb))
return;
if (cb->cb_status) {
- WARN_ON_ONCE(task->tk_status);
+ WARN_ONCE(task->tk_status, "cb_status=%d tk_status=%d",
+ cb->cb_status, task->tk_status);
task->tk_status = cb->cb_status;
}
@@ -1359,6 +1363,8 @@ static void nfsd4_cb_release(void *calldata)
{
struct nfsd4_callback *cb = calldata;
+ trace_nfsd_cb_rpc_release(cb->cb_clp);
+
if (cb->cb_need_restart)
nfsd4_queue_cb(cb);
else
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 7a806ac13e31..8cca1329f348 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -581,6 +581,7 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr,
.id = id,
.type = type,
};
+ __be32 status = nfs_ok;
__be32 *p;
int ret;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
@@ -593,12 +594,16 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr,
return nfserrno(ret);
ret = strlen(item->name);
WARN_ON_ONCE(ret > IDMAP_NAMESZ);
+
p = xdr_reserve_space(xdr, ret + 4);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_opaque(p, item->name, ret);
+ if (unlikely(!p)) {
+ status = nfserr_resource;
+ goto out_put;
+ }
+ xdr_encode_opaque(p, item->name, ret);
+out_put:
cache_put(&item->h, nn->idtoname_cache);
- return 0;
+ return status;
}
static bool
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 4f3072b5979a..fbfddd3c4c94 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -740,6 +740,7 @@ static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
.prepare = nfsd4_cb_layout_prepare,
.done = nfsd4_cb_layout_done,
.release = nfsd4_cb_layout_release,
+ .opcode = OP_CB_LAYOUTRECALL,
};
static bool
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 2e39cf2e502a..b5a6bf4f459f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -158,7 +158,7 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
return fh_verify(rqstp, current_fh, S_IFREG, accmode);
}
-static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
+static __be32 nfsd_check_obj_isreg(struct svc_fh *fh, u32 minor_version)
{
umode_t mode = d_inode(fh->fh_dentry)->i_mode;
@@ -166,14 +166,15 @@ static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
return nfs_ok;
if (S_ISDIR(mode))
return nfserr_isdir;
- /*
- * Using err_symlink as our catch-all case may look odd; but
- * there's no other obvious error for this case in 4.0, and we
- * happen to know that it will cause the linux v4 client to do
- * the right thing on attempts to open something other than a
- * regular file.
- */
- return nfserr_symlink;
+ if (S_ISLNK(mode))
+ return nfserr_symlink;
+
+ /* RFC 7530 - 16.16.6 */
+ if (minor_version == 0)
+ return nfserr_symlink;
+ else
+ return nfserr_wrong_type;
+
}
static void nfsd4_set_open_owner_reply_cache(struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh *resfh)
@@ -466,7 +467,7 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
}
if (status)
goto out;
- status = nfsd_check_obj_isreg(*resfh);
+ status = nfsd_check_obj_isreg(*resfh, cstate->minorversion);
if (status)
goto out;
@@ -751,15 +752,6 @@ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&access->ac_supported);
}
-static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
-{
- __be32 *verf = (__be32 *)verifier->data;
-
- BUILD_BUG_ON(2*sizeof(*verf) != sizeof(verifier->data));
-
- nfsd_copy_write_verifier(verf, net_generic(net, nfsd_net_id));
-}
-
static __be32
nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -1288,6 +1280,7 @@ static void nfs4_put_copy(struct nfsd4_copy *copy)
{
if (!refcount_dec_and_test(&copy->refcount))
return;
+ atomic_dec(&copy->cp_nn->pending_async_copies);
kfree(copy->cp_src);
kfree(copy);
}
@@ -1621,7 +1614,8 @@ static int nfsd4_cb_offload_done(struct nfsd4_callback *cb,
static const struct nfsd4_callback_ops nfsd4_cb_offload_ops = {
.release = nfsd4_cb_offload_release,
- .done = nfsd4_cb_offload_done
+ .done = nfsd4_cb_offload_done,
+ .opcode = OP_CB_OFFLOAD,
};
static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
@@ -1630,7 +1624,6 @@ static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
test_bit(NFSD4_COPY_F_COMMITTED, &copy->cp_flags) ?
NFS_FILE_SYNC : NFS_UNSTABLE;
nfsd4_copy_set_sync(copy, sync);
- gen_boot_verifier(&copy->cp_res.wr_verifier, copy->cp_clp->net);
}
static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
@@ -1767,7 +1760,7 @@ static int nfsd4_do_async_copy(void *data)
{
struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
- trace_nfsd_copy_do_async(copy);
+ trace_nfsd_copy_async(copy);
if (nfsd4_ssc_is_inter(copy)) {
struct file *filp;
@@ -1794,6 +1787,7 @@ static int nfsd4_do_async_copy(void *data)
do_callback:
set_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags);
+ trace_nfsd_copy_async_done(copy);
nfsd4_send_cb_offload(copy);
cleanup_async_copy(copy);
return 0;
@@ -1803,9 +1797,11 @@ static __be32
nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct nfsd4_copy *async_copy = NULL;
struct nfsd4_copy *copy = &u->copy;
+ struct nfsd42_write_res *result;
__be32 status;
- struct nfsd4_copy *async_copy = NULL;
/*
* Currently, async COPY is not reliable. Force all COPY
@@ -1814,6 +1810,9 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
*/
nfsd4_copy_set_sync(copy, true);
+ result = &copy->cp_res;
+ nfsd_copy_write_verifier((__be32 *)&result->wr_verifier.data, nn);
+
copy->cp_clp = cstate->clp;
if (nfsd4_ssc_is_inter(copy)) {
trace_nfsd_copy_inter(copy);
@@ -1838,12 +1837,16 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
memcpy(&copy->fh, &cstate->current_fh.fh_handle,
sizeof(struct knfsd_fh));
if (nfsd4_copy_is_async(copy)) {
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
-
- status = nfserrno(-ENOMEM);
async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
if (!async_copy)
goto out_err;
+ async_copy->cp_nn = nn;
+ /* Arbitrary cap on number of pending async copy operations */
+ if (atomic_inc_return(&nn->pending_async_copies) >
+ (int)rqstp->rq_pool->sp_nrthreads) {
+ atomic_dec(&nn->pending_async_copies);
+ goto out_err;
+ }
INIT_LIST_HEAD(&async_copy->copies);
refcount_set(&async_copy->refcount, 1);
async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
@@ -1851,8 +1854,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out_err;
if (!nfs4_init_copy_state(nn, copy))
goto out_err;
- memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.cs_stid,
- sizeof(copy->cp_res.cb_stateid));
+ memcpy(&result->cb_stateid, &copy->cp_stateid.cs_stid,
+ sizeof(result->cb_stateid));
dup_copy_fields(copy, async_copy);
async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
async_copy, "%s", "copy thread");
@@ -1883,7 +1886,7 @@ out_err:
}
if (async_copy)
cleanup_async_copy(async_copy);
- status = nfserrno(-ENOMEM);
+ status = nfserr_jukebox;
goto out;
}
@@ -1942,7 +1945,7 @@ nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_copy_notify *cn = &u->copy_notify;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- struct nfs4_stid *stid;
+ struct nfs4_stid *stid = NULL;
struct nfs4_cpntf_state *cps;
struct nfs4_client *clp = cstate->clp;
@@ -1951,6 +1954,8 @@ nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&stid);
if (status)
return status;
+ if (!stid)
+ return nfserr_bad_stateid;
cn->cpn_lease_time.tv_sec = nn->nfsd4_lease;
cn->cpn_lease_time.tv_nsec = 0;
@@ -2231,7 +2236,9 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
return nfserr_noent;
}
- exp = rqst_exp_find(rqstp, map->fsid_type, map->fsid);
+ exp = rqst_exp_find(&rqstp->rq_chandle, SVC_NET(rqstp),
+ rqstp->rq_client, rqstp->rq_gssclient,
+ map->fsid_type, map->fsid);
if (IS_ERR(exp)) {
dprintk("%s: could not find device id\n", __func__);
return nfserr_noent;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 67d8673a9391..b7d61eb8afe9 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -809,6 +809,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
ci = &cmsg->cm_u.cm_clntinfo;
if (get_user(namelen, &ci->cc_name.cn_len))
return -EFAULT;
+ if (namelen == 0 || namelen > NFS4_OPAQUE_LIMIT) {
+ dprintk("%s: invalid namelen (%u)", __func__, namelen);
+ return -EINVAL;
+ }
name.data = memdup_user(&ci->cc_name.cn_id, namelen);
if (IS_ERR(name.data))
return PTR_ERR(name.data);
@@ -831,6 +835,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
cnm = &cmsg->cm_u.cm_name;
if (get_user(namelen, &cnm->cn_len))
return -EFAULT;
+ if (namelen == 0 || namelen > NFS4_OPAQUE_LIMIT) {
+ dprintk("%s: invalid namelen (%u)", __func__, namelen);
+ return -EINVAL;
+ }
name.data = memdup_user(&cnm->cn_id, namelen);
if (IS_ERR(name.data))
return PTR_ERR(name.data);
@@ -1895,10 +1903,7 @@ nfsd4_cltrack_upcall_lock(struct nfs4_client *clp)
static void
nfsd4_cltrack_upcall_unlock(struct nfs4_client *clp)
{
- smp_mb__before_atomic();
- clear_bit(NFSD4_CLIENT_UPCALL_LOCK, &clp->cl_flags);
- smp_mb__after_atomic();
- wake_up_bit(&clp->cl_flags, NFSD4_CLIENT_UPCALL_LOCK);
+ clear_and_wake_up_bit(NFSD4_CLIENT_UPCALL_LOCK, &clp->cl_flags);
}
static void
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index a366fb1c1b9b..ac1859c7cc9d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -400,6 +400,7 @@ static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
.prepare = nfsd4_cb_notify_lock_prepare,
.done = nfsd4_cb_notify_lock_done,
.release = nfsd4_cb_notify_lock_release,
+ .opcode = OP_CB_NOTIFY_LOCK,
};
/*
@@ -1077,7 +1078,8 @@ static void nfs4_free_deleg(struct nfs4_stid *stid)
* When a delegation is recalled, the filehandle is stored in the "new"
* filter.
* Every 30 seconds we swap the filters and clear the "new" one,
- * unless both are empty of course.
+ * unless both are empty of course. This results in delegations for a
+ * given filehandle being blocked for between 30 and 60 seconds.
*
* Each filter is 256 bits. We hash the filehandle to 32bit and use the
* low 3 bytes as hash-table indices.
@@ -1106,9 +1108,9 @@ static int delegation_blocked(struct knfsd_fh *fh)
if (ktime_get_seconds() - bd->swap_time > 30) {
bd->entries -= bd->old_entries;
bd->old_entries = bd->entries;
+ bd->new = 1-bd->new;
memset(bd->set[bd->new], 0,
sizeof(bd->set[0]));
- bd->new = 1-bd->new;
bd->swap_time = ktime_get_seconds();
}
spin_unlock(&blocked_delegations_lock);
@@ -1663,9 +1665,7 @@ static void release_openowner(struct nfs4_openowner *oo)
{
struct nfs4_ol_stateid *stp;
struct nfs4_client *clp = oo->oo_owner.so_client;
- struct list_head reaplist;
-
- INIT_LIST_HEAD(&reaplist);
+ LIST_HEAD(reaplist);
spin_lock(&clp->cl_lock);
unhash_openowner_locked(oo);
@@ -2369,9 +2369,8 @@ __destroy_client(struct nfs4_client *clp)
int i;
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
- struct list_head reaplist;
+ LIST_HEAD(reaplist);
- INIT_LIST_HEAD(&reaplist);
spin_lock(&state_lock);
while (!list_empty(&clp->cl_delegations)) {
dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
@@ -2692,7 +2691,7 @@ static int client_info_show(struct seq_file *m, void *v)
clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
}
seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
- seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
+ seq_printf(m, "callback address: \"%pISpc\"\n", &clp->cl_cb_conn.cb_addr);
seq_printf(m, "admin-revoked states: %d\n",
atomic_read(&clp->cl_admin_revoked));
drop_client(clp);
@@ -3059,7 +3058,10 @@ nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task)
{
struct nfs4_cb_fattr *ncf =
container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+ struct nfs4_delegation *dp =
+ container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
+ trace_nfsd_cb_getattr_done(&dp->dl_stid.sc_stateid, task);
ncf->ncf_cb_status = task->tk_status;
switch (task->tk_status) {
case -NFS4ERR_DELAY:
@@ -3078,19 +3080,20 @@ nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
struct nfs4_delegation *dp =
container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
- clear_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
- wake_up_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY);
+ clear_and_wake_up_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
nfs4_put_stid(&dp->dl_stid);
}
static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
.done = nfsd4_cb_recall_any_done,
.release = nfsd4_cb_recall_any_release,
+ .opcode = OP_CB_RECALL_ANY,
};
static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops = {
.done = nfsd4_cb_getattr_done,
.release = nfsd4_cb_getattr_release,
+ .opcode = OP_CB_GETATTR,
};
static void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf)
@@ -4704,6 +4707,7 @@ void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
if (so != NULL) {
cstate->replay_owner = NULL;
atomic_set(&so->so_replay.rp_locked, RP_UNLOCKED);
+ smp_mb__after_atomic();
wake_up_var(&so->so_replay.rp_locked);
nfs4_put_stateowner(so);
}
@@ -5004,6 +5008,7 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
* so tell them to stop waiting.
*/
atomic_set(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED);
+ smp_mb__after_atomic();
wake_up_var(&oo->oo_owner.so_replay.rp_locked);
wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
@@ -5218,6 +5223,7 @@ static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
.prepare = nfsd4_cb_recall_prepare,
.done = nfsd4_cb_recall_done,
.release = nfsd4_cb_recall_release,
+ .opcode = OP_CB_RECALL,
};
static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
@@ -5277,11 +5283,8 @@ static bool nfsd_breaker_owns_lease(struct file_lease *fl)
struct svc_rqst *rqst;
struct nfs4_client *clp;
- if (!i_am_nfsd())
- return false;
- rqst = kthread_data(current);
- /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
- if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
+ rqst = nfsd_current_rqst();
+ if (!nfsd_v4client(rqst))
return false;
clp = *(rqst->rq_lease_breaker);
return dl->dl_stid.sc_client == clp;
@@ -5859,7 +5862,7 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
/*
* Now that the deleg is set, check again to ensure that nothing
- * raced in and changed the mode while we weren't lookng.
+ * raced in and changed the mode while we weren't looking.
*/
status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
if (status)
@@ -5912,6 +5915,28 @@ static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
}
}
+static bool
+nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh,
+ struct kstat *stat)
+{
+ struct nfsd_file *nf = find_rw_file(dp->dl_stid.sc_file);
+ struct path path;
+ int rc;
+
+ if (!nf)
+ return false;
+
+ path.mnt = currentfh->fh_export->ex_path.mnt;
+ path.dentry = file_dentry(nf->nf_file);
+
+ rc = vfs_getattr(&path, stat,
+ (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
+ AT_STATX_SYNC_AS_STAT);
+
+ nfsd_file_put(nf);
+ return rc == 0;
+}
+
/*
* The Linux NFS server does not offer write delegations to NFSv4.0
* clients in order to avoid conflicts between write delegations and
@@ -5947,7 +5972,6 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
int cb_up;
int status = 0;
struct kstat stat;
- struct path path;
cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
open->op_recall = false;
@@ -5983,20 +6007,16 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
- open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
- trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
- path.mnt = currentfh->fh_export->ex_path.mnt;
- path.dentry = currentfh->fh_dentry;
- if (vfs_getattr(&path, &stat,
- (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
- AT_STATX_SYNC_AS_STAT)) {
+ if (!nfs4_delegation_stat(dp, currentfh, &stat)) {
nfs4_put_stid(&dp->dl_stid);
destroy_delegation(dp);
goto out_no_deleg;
}
+ open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
dp->dl_cb_fattr.ncf_cur_fsize = stat.size;
dp->dl_cb_fattr.ncf_initial_cinfo =
nfsd4_change_attribute(&stat, d_inode(currentfh->fh_dentry));
+ trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
} else {
open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
@@ -6271,7 +6291,6 @@ void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
init_waitqueue_head(&nn->nfsd_ssc_waitq);
}
-EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
/*
* This is called when nfsd is being shutdown, after all inter_ssc
@@ -6619,9 +6638,8 @@ deleg_reaper(struct nfsd_net *nn)
{
struct list_head *pos, *next;
struct nfs4_client *clp;
- struct list_head cblist;
+ LIST_HEAD(cblist);
- INIT_LIST_HEAD(&cblist);
spin_lock(&nn->client_lock);
list_for_each_safe(pos, next, &nn->client_lru) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
@@ -6647,7 +6665,6 @@ deleg_reaper(struct nfsd_net *nn)
cl_ra_cblist);
list_del_init(&clp->cl_ra_cblist);
clp->cl_ra->ra_keep = 0;
- clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG);
clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) |
BIT(RCA4_TYPE_MASK_WDATA_DLG);
trace_nfsd_cb_recall_any(clp->cl_ra);
@@ -6892,7 +6909,8 @@ nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
nf = nfs4_find_file(s, flags);
if (nf) {
- status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
+ status = nfsd_permission(&rqstp->rq_cred,
+ fhp->fh_export, fhp->fh_dentry,
acc | NFSD_MAY_OWNER_OVERRIDE);
if (status) {
nfsd_file_put(nf);
@@ -7023,11 +7041,7 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
*nfp = NULL;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
- if (cstid)
- status = nfserr_bad_stateid;
- else
- status = check_special_stateids(net, fhp, stateid,
- flags);
+ status = check_special_stateids(net, fhp, stateid, flags);
goto done;
}
@@ -7481,8 +7495,9 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto put_stateid;
trace_nfsd_deleg_return(stateid);
- wake_up_var(d_inode(cstate->current_fh.fh_dentry));
destroy_delegation(dp);
+ smp_mb__after_atomic();
+ wake_up_var(d_inode(cstate->current_fh.fh_dentry));
put_stateid:
nfs4_put_stid(&dp->dl_stid);
out:
@@ -8338,7 +8353,7 @@ out:
* @cstate: NFSv4 COMPOUND state
* @u: RELEASE_LOCKOWNER arguments
*
- * Check if theree are any locks still held and if not - free the lockowner
+ * Check if there are any locks still held and if not, free the lockowner
* and any lock state that is owned.
*
* Return values:
@@ -8557,6 +8572,7 @@ static int nfs4_state_create_net(struct net *net)
spin_lock_init(&nn->client_lock);
spin_lock_init(&nn->s2s_cp_lock);
idr_init(&nn->s2s_cp_stateids);
+ atomic_set(&nn->pending_async_copies, 0);
spin_lock_init(&nn->blocked_locks_lock);
INIT_LIST_HEAD(&nn->blocked_locks_lru);
@@ -8836,6 +8852,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct file_lock_context *ctx;
+ struct nfs4_delegation *dp = NULL;
struct file_lease *fl;
struct iattr attrs;
struct nfs4_cb_fattr *ncf;
@@ -8845,84 +8862,76 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
ctx = locks_inode_context(inode);
if (!ctx)
return 0;
+
+#define NON_NFSD_LEASE ((void *)1)
+
spin_lock(&ctx->flc_lock);
for_each_file_lock(fl, &ctx->flc_lease) {
- unsigned char type = fl->c.flc_type;
-
if (fl->c.flc_flags == FL_LAYOUT)
continue;
- if (fl->fl_lmops != &nfsd_lease_mng_ops) {
- /*
- * non-nfs lease, if it's a lease with F_RDLCK then
- * we are done; there isn't any write delegation
- * on this inode
- */
- if (type == F_RDLCK)
- break;
-
- nfsd_stats_wdeleg_getattr_inc(nn);
- spin_unlock(&ctx->flc_lock);
-
- status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
+ if (fl->c.flc_type == F_WRLCK) {
+ if (fl->fl_lmops == &nfsd_lease_mng_ops)
+ dp = fl->c.flc_owner;
+ else
+ dp = NON_NFSD_LEASE;
+ }
+ break;
+ }
+ if (dp == NULL || dp == NON_NFSD_LEASE ||
+ dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
+ spin_unlock(&ctx->flc_lock);
+ if (dp == NON_NFSD_LEASE) {
+ status = nfserrno(nfsd_open_break_lease(inode,
+ NFSD_MAY_READ));
if (status != nfserr_jukebox ||
!nfsd_wait_for_delegreturn(rqstp, inode))
return status;
- return 0;
}
- if (type == F_WRLCK) {
- struct nfs4_delegation *dp = fl->c.flc_owner;
+ return 0;
+ }
- if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
- spin_unlock(&ctx->flc_lock);
- return 0;
- }
- nfsd_stats_wdeleg_getattr_inc(nn);
- dp = fl->c.flc_owner;
- refcount_inc(&dp->dl_stid.sc_count);
- ncf = &dp->dl_cb_fattr;
- nfs4_cb_getattr(&dp->dl_cb_fattr);
- spin_unlock(&ctx->flc_lock);
- wait_on_bit_timeout(&ncf->ncf_cb_flags, CB_GETATTR_BUSY,
- TASK_INTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT);
- if (ncf->ncf_cb_status) {
- /* Recall delegation only if client didn't respond */
- status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
- if (status != nfserr_jukebox ||
- !nfsd_wait_for_delegreturn(rqstp, inode)) {
- nfs4_put_stid(&dp->dl_stid);
- return status;
- }
- }
- if (!ncf->ncf_file_modified &&
- (ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
- ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
- ncf->ncf_file_modified = true;
- if (ncf->ncf_file_modified) {
- int err;
-
- /*
- * Per section 10.4.3 of RFC 8881, the server would
- * not update the file's metadata with the client's
- * modified size
- */
- attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
- attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG;
- inode_lock(inode);
- err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
- inode_unlock(inode);
- if (err) {
- nfs4_put_stid(&dp->dl_stid);
- return nfserrno(err);
- }
- ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
- *size = ncf->ncf_cur_fsize;
- *modified = true;
- }
- nfs4_put_stid(&dp->dl_stid);
- return 0;
+ nfsd_stats_wdeleg_getattr_inc(nn);
+ refcount_inc(&dp->dl_stid.sc_count);
+ ncf = &dp->dl_cb_fattr;
+ nfs4_cb_getattr(&dp->dl_cb_fattr);
+ spin_unlock(&ctx->flc_lock);
+
+ wait_on_bit_timeout(&ncf->ncf_cb_flags, CB_GETATTR_BUSY,
+ TASK_INTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT);
+ if (ncf->ncf_cb_status) {
+ /* Recall delegation only if client didn't respond */
+ status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
+ if (status != nfserr_jukebox ||
+ !nfsd_wait_for_delegreturn(rqstp, inode))
+ goto out_status;
+ }
+ if (!ncf->ncf_file_modified &&
+ (ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
+ ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
+ ncf->ncf_file_modified = true;
+ if (ncf->ncf_file_modified) {
+ int err;
+
+ /*
+ * Per section 10.4.3 of RFC 8881, the server would
+ * not update the file's metadata with the client's
+ * modified size
+ */
+ attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
+ attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG;
+ inode_lock(inode);
+ err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
+ inode_unlock(inode);
+ if (err) {
+ status = nfserrno(err);
+ goto out_status;
}
- break;
+ ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
+ *size = ncf->ncf_cur_fsize;
+ *modified = true;
}
- spin_unlock(&ctx->flc_lock);
- return 0;
+ status = 0;
+out_status:
+ nfs4_put_stid(&dp->dl_stid);
+ return status;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 97f583777972..f118921250c3 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1246,14 +1246,6 @@ nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
}
static __be32
-nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, union nfsd4_op_u *p)
-{
- if (argp->minorversion == 0)
- return nfs_ok;
- return nfserr_notsupp;
-}
-
-static __be32
nfsd4_decode_read(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_read *read = &u->read;
@@ -2374,7 +2366,7 @@ static const nfsd4_dec nfsd4_dec_ops[] = {
[OP_OPEN_CONFIRM] = nfsd4_decode_open_confirm,
[OP_OPEN_DOWNGRADE] = nfsd4_decode_open_downgrade,
[OP_PUTFH] = nfsd4_decode_putfh,
- [OP_PUTPUBFH] = nfsd4_decode_putpubfh,
+ [OP_PUTPUBFH] = nfsd4_decode_noop,
[OP_PUTROOTFH] = nfsd4_decode_noop,
[OP_READ] = nfsd4_decode_read,
[OP_READDIR] = nfsd4_decode_readdir,
@@ -5731,6 +5723,23 @@ __be32 nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 respsize)
return nfserr_rep_too_big;
}
+static __be32 nfsd4_map_status(__be32 status, u32 minor)
+{
+ switch (status) {
+ case nfs_ok:
+ break;
+ case nfserr_wrong_type:
+ /* RFC 8881 - 15.1.2.9 */
+ if (minor == 0)
+ status = nfserr_inval;
+ break;
+ case nfserr_symlink_not_dir:
+ status = nfserr_symlink;
+ break;
+ }
+ return status;
+}
+
void
nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
@@ -5798,6 +5807,8 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
so->so_replay.rp_buf, len);
}
status:
+ op->status = nfsd4_map_status(op->status,
+ resp->cstate.minorversion);
*p = op->status;
release:
if (opdesc && opdesc->op_release)
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 34eb2c2cbcde..3adbc05ebaac 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -18,6 +18,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/module.h>
#include <linux/fsnotify.h>
+#include <linux/nfslocalio.h>
#include "idmap.h"
#include "nfsd.h"
@@ -174,6 +175,13 @@ static int export_features_show(struct seq_file *m, void *v)
DEFINE_SHOW_ATTRIBUTE(export_features);
+static int nfsd_pool_stats_open(struct inode *inode, struct file *file)
+{
+ struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
+
+ return svc_pool_stats_open(&nn->nfsd_info, file);
+}
+
static const struct file_operations pool_stats_operations = {
.open = nfsd_pool_stats_open,
.read = seq_read,
@@ -1762,7 +1770,7 @@ int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info)
struct svc_pool *sp = &nn->nfsd_serv->sv_pools[i];
err = nla_put_u32(skb, NFSD_A_SERVER_THREADS,
- atomic_read(&sp->sp_nrthreads));
+ sp->sp_nrthreads);
if (err)
goto err_unlock;
}
@@ -2224,8 +2232,9 @@ err_free_msg:
*/
static __net_init int nfsd_net_init(struct net *net)
{
- int retval;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ int retval;
+ int i;
retval = nfsd_export_init(net);
if (retval)
@@ -2238,16 +2247,20 @@ static __net_init int nfsd_net_init(struct net *net)
if (retval)
goto out_repcache_error;
memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
- nn->nfsd_svcstats.program = &nfsd_program;
- nn->nfsd_versions = NULL;
- nn->nfsd4_minorversions = NULL;
+ nn->nfsd_svcstats.program = &nfsd_programs[0];
+ for (i = 0; i < sizeof(nn->nfsd_versions); i++)
+ nn->nfsd_versions[i] = nfsd_support_version(i);
+ for (i = 0; i < sizeof(nn->nfsd4_minorversions); i++)
+ nn->nfsd4_minorversions[i] = nfsd_support_version(4);
nn->nfsd_info.mutex = &nfsd_mutex;
nn->nfsd_serv = NULL;
nfsd4_init_leases_net(nn);
get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
seqlock_init(&nn->writeverf_lock);
nfsd_proc_stat_init(net);
-
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ INIT_LIST_HEAD(&nn->local_clients);
+#endif
return 0;
out_repcache_error:
@@ -2258,6 +2271,22 @@ out_export_error:
return retval;
}
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+/**
+ * nfsd_net_pre_exit - Disconnect localio clients from net namespace
+ * @net: a network namespace that is about to be destroyed
+ *
+ * This invalidated ->net pointers held by localio clients
+ * while they can still safely access nn->counter.
+ */
+static __net_exit void nfsd_net_pre_exit(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nfs_uuid_invalidate_clients(&nn->local_clients);
+}
+#endif
+
/**
* nfsd_net_exit - Release the nfsd_net portion of a net namespace
* @net: a network namespace that is about to be destroyed
@@ -2271,11 +2300,13 @@ static __net_exit void nfsd_net_exit(struct net *net)
percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM);
nfsd_idmap_shutdown(net);
nfsd_export_shutdown(net);
- nfsd_netns_free_versions(nn);
}
static struct pernet_operations nfsd_net_ops = {
.init = nfsd_net_init,
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ .pre_exit = nfsd_net_pre_exit,
+#endif
.exit = nfsd_net_exit,
.id = &nfsd_net_id,
.size = sizeof(struct nfsd_net),
@@ -2313,6 +2344,7 @@ static int __init init_nfsd(void)
retval = genl_register_family(&nfsd_nl_family);
if (retval)
goto out_free_all;
+ nfsd_localio_ops_init();
return 0;
out_free_all:
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index cec8697b1cd6..4b56ba1e8e48 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -23,9 +23,7 @@
#include <uapi/linux/nfsd/debug.h>
-#include "netns.h"
#include "export.h"
-#include "stats.h"
#undef ifdebug
#ifdef CONFIG_SUNRPC_DEBUG
@@ -37,7 +35,14 @@
/*
* nfsd version
*/
+#define NFSD_MINVERS 2
+#define NFSD_MAXVERS 4
#define NFSD_SUPPORTED_MINOR_VERSION 2
+bool nfsd_support_version(int vers);
+
+#include "netns.h"
+#include "stats.h"
+
/*
* Maximum blocksizes supported by daemon under various circumstances.
*/
@@ -80,7 +85,7 @@ struct nfsd_genl_rqstp {
u32 rq_opnum[NFSD_MAX_OPS_PER_COMPOUND];
};
-extern struct svc_program nfsd_program;
+extern struct svc_program nfsd_programs[];
extern const struct svc_version nfsd_version2, nfsd_version3, nfsd_version4;
extern struct mutex nfsd_mutex;
extern spinlock_t nfsd_drc_lock;
@@ -111,11 +116,9 @@ int nfsd_nrthreads(struct net *);
int nfsd_nrpools(struct net *);
int nfsd_get_nrthreads(int n, int *, struct net *);
int nfsd_set_nrthreads(int n, int *, struct net *);
-int nfsd_pool_stats_open(struct inode *, struct file *);
-int nfsd_pool_stats_release(struct inode *, struct file *);
void nfsd_shutdown_threads(struct net *net);
-bool i_am_nfsd(void);
+struct svc_rqst *nfsd_current_rqst(void);
struct nfsdfs_client {
struct kref cl_ref;
@@ -143,6 +146,10 @@ extern const struct svc_version nfsd_acl_version3;
#endif
#endif
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+extern const struct svc_version localio_version1;
+#endif
+
struct nfsd_net;
enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL };
@@ -156,7 +163,7 @@ extern int nfsd_max_blksize;
static inline int nfsd_v4client(struct svc_rqst *rq)
{
- return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
+ return rq && rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
}
static inline struct user_namespace *
nfsd_user_namespace(const struct svc_rqst *rqstp)
@@ -327,17 +334,36 @@ void nfsd_lockd_shutdown(void);
#define nfserr_xattr2big cpu_to_be32(NFS4ERR_XATTR2BIG)
#define nfserr_noxattr cpu_to_be32(NFS4ERR_NOXATTR)
-/* error codes for internal use */
+/*
+ * Error codes for internal use. We use enum to choose numbers that are
+ * not already assigned, then covert to be32 resulting in a number that
+ * cannot conflict with any existing be32 nfserr value.
+ */
+enum {
+ NFSERR_DROPIT = NFS4ERR_FIRST_FREE,
/* if a request fails due to kmalloc failure, it gets dropped.
* Client should resend eventually
*/
-#define nfserr_dropit cpu_to_be32(30000)
+#define nfserr_dropit cpu_to_be32(NFSERR_DROPIT)
+
/* end-of-file indicator in readdir */
-#define nfserr_eof cpu_to_be32(30001)
+ NFSERR_EOF,
+#define nfserr_eof cpu_to_be32(NFSERR_EOF)
+
/* replay detected */
-#define nfserr_replay_me cpu_to_be32(11001)
+ NFSERR_REPLAY_ME,
+#define nfserr_replay_me cpu_to_be32(NFSERR_REPLAY_ME)
+
/* nfs41 replay detected */
-#define nfserr_replay_cache cpu_to_be32(11002)
+ NFSERR_REPLAY_CACHE,
+#define nfserr_replay_cache cpu_to_be32(NFSERR_REPLAY_CACHE)
+
+/* symlink found where dir expected - handled differently to
+ * other symlink found errors by NFSv3.
+ */
+ NFSERR_SYMLINK_NOT_DIR,
+#define nfserr_symlink_not_dir cpu_to_be32(NFSERR_SYMLINK_NOT_DIR)
+};
/* Check for dir entries '.' and '..' */
#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.'))
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index dd4e11a703aa..40ad58a6a036 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -62,8 +62,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
* the write call).
*/
static inline __be32
-nfsd_mode_check(struct svc_rqst *rqstp, struct dentry *dentry,
- umode_t requested)
+nfsd_mode_check(struct dentry *dentry, umode_t requested)
{
umode_t mode = d_inode(dentry)->i_mode & S_IFMT;
@@ -76,36 +75,36 @@ nfsd_mode_check(struct svc_rqst *rqstp, struct dentry *dentry,
}
return nfs_ok;
}
- /*
- * v4 has an error more specific than err_notdir which we should
- * return in preference to err_notdir:
- */
- if (rqstp->rq_vers == 4 && mode == S_IFLNK)
+ if (mode == S_IFLNK) {
+ if (requested == S_IFDIR)
+ return nfserr_symlink_not_dir;
return nfserr_symlink;
+ }
if (requested == S_IFDIR)
return nfserr_notdir;
if (mode == S_IFDIR)
return nfserr_isdir;
- return nfserr_inval;
+ return nfserr_wrong_type;
}
-static bool nfsd_originating_port_ok(struct svc_rqst *rqstp, int flags)
+static bool nfsd_originating_port_ok(struct svc_rqst *rqstp,
+ struct svc_cred *cred,
+ struct svc_export *exp)
{
- if (flags & NFSEXP_INSECURE_PORT)
+ if (nfsexp_flags(cred, exp) & NFSEXP_INSECURE_PORT)
return true;
/* We don't require gss requests to use low ports: */
- if (rqstp->rq_cred.cr_flavor >= RPC_AUTH_GSS)
+ if (cred->cr_flavor >= RPC_AUTH_GSS)
return true;
return test_bit(RQ_SECURE, &rqstp->rq_flags);
}
static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
+ struct svc_cred *cred,
struct svc_export *exp)
{
- int flags = nfsexp_flags(rqstp, exp);
-
/* Check if the request originated from a secure port. */
- if (!nfsd_originating_port_ok(rqstp, flags)) {
+ if (rqstp && !nfsd_originating_port_ok(rqstp, cred, exp)) {
RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
dprintk("nfsd: request from insecure port %s!\n",
svc_print_addr(rqstp, buf, sizeof(buf)));
@@ -113,23 +112,15 @@ static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
}
/* Set user creds for this exportpoint */
- return nfserrno(nfsd_setuser(rqstp, exp));
+ return nfserrno(nfsd_setuser(cred, exp));
}
-static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
- struct dentry *dentry, struct svc_export *exp)
+static inline __be32 check_pseudo_root(struct dentry *dentry,
+ struct svc_export *exp)
{
if (!(exp->ex_flags & NFSEXP_V4ROOT))
return nfs_ok;
/*
- * v2/v3 clients have no need for the V4ROOT export--they use
- * the mount protocl instead; also, further V4ROOT checks may be
- * in v4-specific code, in which case v2/v3 clients could bypass
- * them.
- */
- if (!nfsd_v4client(rqstp))
- return nfserr_stale;
- /*
* We're exposing only the directories and symlinks that have to be
* traversed on the way to real exports:
*/
@@ -151,7 +142,11 @@ static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
* dentry. On success, the results are used to set fh_export and
* fh_dentry.
*/
-static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
+static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
+ struct svc_cred *cred,
+ struct auth_domain *client,
+ struct auth_domain *gssclient,
+ struct svc_fh *fhp)
{
struct knfsd_fh *fh = &fhp->fh_handle;
struct fid *fid = NULL;
@@ -162,10 +157,8 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
int len;
__be32 error;
- error = nfserr_stale;
- if (rqstp->rq_vers > 2)
- error = nfserr_badhandle;
- if (rqstp->rq_vers == 4 && fh->fh_size == 0)
+ error = nfserr_badhandle;
+ if (fh->fh_size == 0)
return nfserr_nofilehandle;
if (fh->fh_version != 1)
@@ -195,7 +188,9 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
data_left -= len;
if (data_left < 0)
return error;
- exp = rqst_exp_find(rqstp, fh->fh_fsid_type, fh->fh_fsid);
+ exp = rqst_exp_find(rqstp ? &rqstp->rq_chandle : NULL,
+ net, client, gssclient,
+ fh->fh_fsid_type, fh->fh_fsid);
fid = (struct fid *)(fh->fh_fsid + len);
error = nfserr_stale;
@@ -229,7 +224,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
put_cred(override_creds(new));
put_cred(new);
} else {
- error = nfsd_setuser_and_check_port(rqstp, exp);
+ error = nfsd_setuser_and_check_port(rqstp, cred, exp);
if (error)
goto out;
}
@@ -237,9 +232,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
/*
* Look up the dentry using the NFS file handle.
*/
- error = nfserr_stale;
- if (rqstp->rq_vers > 2)
- error = nfserr_badhandle;
+ error = nfserr_badhandle;
fileid_type = fh->fh_fileid_type;
@@ -278,17 +271,25 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
fhp->fh_dentry = dentry;
fhp->fh_export = exp;
- switch (rqstp->rq_vers) {
- case 4:
+ switch (fhp->fh_maxsize) {
+ case NFS4_FHSIZE:
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOATOMIC_ATTR)
fhp->fh_no_atomic_attr = true;
+ fhp->fh_64bit_cookies = true;
break;
- case 3:
+ case NFS3_FHSIZE:
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOWCC)
fhp->fh_no_wcc = true;
+ fhp->fh_64bit_cookies = true;
+ if (exp->ex_flags & NFSEXP_V4ROOT)
+ goto out;
break;
- case 2:
+ case NFS_FHSIZE:
fhp->fh_no_wcc = true;
+ if (EX_WGATHER(exp))
+ fhp->fh_use_wgather = true;
+ if (exp->ex_flags & NFSEXP_V4ROOT)
+ goto out;
}
return 0;
@@ -298,42 +299,33 @@ out:
}
/**
- * fh_verify - filehandle lookup and access checking
- * @rqstp: pointer to current rpc request
+ * __fh_verify - filehandle lookup and access checking
+ * @rqstp: RPC transaction context, or NULL
+ * @net: net namespace in which to perform the export lookup
+ * @cred: RPC user credential
+ * @client: RPC auth domain
+ * @gssclient: RPC GSS auth domain, or NULL
* @fhp: filehandle to be verified
* @type: expected type of object pointed to by filehandle
* @access: type of access needed to object
*
- * Look up a dentry from the on-the-wire filehandle, check the client's
- * access to the export, and set the current task's credentials.
- *
- * Regardless of success or failure of fh_verify(), fh_put() should be
- * called on @fhp when the caller is finished with the filehandle.
- *
- * fh_verify() may be called multiple times on a given filehandle, for
- * example, when processing an NFSv4 compound. The first call will look
- * up a dentry using the on-the-wire filehandle. Subsequent calls will
- * skip the lookup and just perform the other checks and possibly change
- * the current task's credentials.
- *
- * @type specifies the type of object expected using one of the S_IF*
- * constants defined in include/linux/stat.h. The caller may use zero
- * to indicate that it doesn't care, or a negative integer to indicate
- * that it expects something not of the given type.
- *
- * @access is formed from the NFSD_MAY_* constants defined in
- * fs/nfsd/vfs.h.
+ * See fh_verify() for further descriptions of @fhp, @type, and @access.
*/
-__be32
-fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+static __be32
+__fh_verify(struct svc_rqst *rqstp,
+ struct net *net, struct svc_cred *cred,
+ struct auth_domain *client,
+ struct auth_domain *gssclient,
+ struct svc_fh *fhp, umode_t type, int access)
{
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_export *exp = NULL;
struct dentry *dentry;
__be32 error;
if (!fhp->fh_dentry) {
- error = nfsd_set_fh_dentry(rqstp, fhp);
+ error = nfsd_set_fh_dentry(rqstp, net, cred, client,
+ gssclient, fhp);
if (error)
goto out;
}
@@ -358,15 +350,15 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
* (for example, if different id-squashing options are in
* effect on the new filesystem).
*/
- error = check_pseudo_root(rqstp, dentry, exp);
+ error = check_pseudo_root(dentry, exp);
if (error)
goto out;
- error = nfsd_setuser_and_check_port(rqstp, exp);
+ error = nfsd_setuser_and_check_port(rqstp, cred, exp);
if (error)
goto out;
- error = nfsd_mode_check(rqstp, dentry, type);
+ error = nfsd_mode_check(dentry, type);
if (error)
goto out;
@@ -392,7 +384,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
skip_pseudoflavor_check:
/* Finally, check access permissions. */
- error = nfsd_permission(rqstp, exp, dentry, access);
+ error = nfsd_permission(cred, exp, dentry, access);
out:
trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error);
if (error == nfserr_stale)
@@ -400,6 +392,63 @@ out:
return error;
}
+/**
+ * fh_verify_local - filehandle lookup and access checking
+ * @net: net namespace in which to perform the export lookup
+ * @cred: RPC user credential
+ * @client: RPC auth domain
+ * @fhp: filehandle to be verified
+ * @type: expected type of object pointed to by filehandle
+ * @access: type of access needed to object
+ *
+ * This API can be used by callers who do not have an RPC
+ * transaction context (ie are not running in an nfsd thread).
+ *
+ * See fh_verify() for further descriptions of @fhp, @type, and @access.
+ */
+__be32
+fh_verify_local(struct net *net, struct svc_cred *cred,
+ struct auth_domain *client, struct svc_fh *fhp,
+ umode_t type, int access)
+{
+ return __fh_verify(NULL, net, cred, client, NULL,
+ fhp, type, access);
+}
+
+/**
+ * fh_verify - filehandle lookup and access checking
+ * @rqstp: pointer to current rpc request
+ * @fhp: filehandle to be verified
+ * @type: expected type of object pointed to by filehandle
+ * @access: type of access needed to object
+ *
+ * Look up a dentry from the on-the-wire filehandle, check the client's
+ * access to the export, and set the current task's credentials.
+ *
+ * Regardless of success or failure of fh_verify(), fh_put() should be
+ * called on @fhp when the caller is finished with the filehandle.
+ *
+ * fh_verify() may be called multiple times on a given filehandle, for
+ * example, when processing an NFSv4 compound. The first call will look
+ * up a dentry using the on-the-wire filehandle. Subsequent calls will
+ * skip the lookup and just perform the other checks and possibly change
+ * the current task's credentials.
+ *
+ * @type specifies the type of object expected using one of the S_IF*
+ * constants defined in include/linux/stat.h. The caller may use zero
+ * to indicate that it doesn't care, or a negative integer to indicate
+ * that it expects something not of the given type.
+ *
+ * @access is formed from the NFSD_MAY_* constants defined in
+ * fs/nfsd/vfs.h.
+ */
+__be32
+fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+{
+ return __fh_verify(rqstp, SVC_NET(rqstp), &rqstp->rq_cred,
+ rqstp->rq_client, rqstp->rq_gssclient,
+ fhp, type, access);
+}
/*
* Compose a file handle for an NFS reply.
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 6ebdf7ea27bf..5b7394801dc4 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -88,6 +88,8 @@ typedef struct svc_fh {
* wcc data is not atomic with
* operation
*/
+ bool fh_use_wgather; /* NFSv2 wgather option */
+ bool fh_64bit_cookies;/* readdir cookie size */
int fh_flags; /* FH flags */
bool fh_post_saved; /* post-op attrs saved */
bool fh_pre_saved; /* pre-op attrs saved */
@@ -215,6 +217,8 @@ extern char * SVCFH_fmt(struct svc_fh *fhp);
* Function prototypes
*/
__be32 fh_verify(struct svc_rqst *, struct svc_fh *, umode_t, int);
+__be32 fh_verify_local(struct net *, struct svc_cred *, struct auth_domain *,
+ struct svc_fh *, umode_t, int);
__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
__be32 fh_update(struct svc_fh *);
void fh_put(struct svc_fh *);
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 36370b957b63..6dda081eb24c 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -13,6 +13,31 @@
#define NFSDDBG_FACILITY NFSDDBG_PROC
+static __be32 nfsd_map_status(__be32 status)
+{
+ switch (status) {
+ case nfs_ok:
+ break;
+ case nfserr_nofilehandle:
+ case nfserr_badhandle:
+ status = nfserr_stale;
+ break;
+ case nfserr_wrongsec:
+ case nfserr_xdev:
+ case nfserr_file_open:
+ status = nfserr_acces;
+ break;
+ case nfserr_symlink_not_dir:
+ status = nfserr_notdir;
+ break;
+ case nfserr_symlink:
+ case nfserr_wrong_type:
+ status = nfserr_inval;
+ break;
+ }
+ return status;
+}
+
static __be32
nfsd_proc_null(struct svc_rqst *rqstp)
{
@@ -38,6 +63,7 @@ nfsd_proc_getattr(struct svc_rqst *rqstp)
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -109,6 +135,7 @@ nfsd_proc_setattr(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -143,6 +170,7 @@ nfsd_proc_lookup(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -164,6 +192,7 @@ nfsd_proc_readlink(struct svc_rqst *rqstp)
page_address(resp->page), &resp->len);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -200,6 +229,7 @@ nfsd_proc_read(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
else if (resp->status == nfserr_jukebox)
set_bit(RQ_DROPME, &rqstp->rq_flags);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -235,6 +265,7 @@ nfsd_proc_write(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
else if (resp->status == nfserr_jukebox)
set_bit(RQ_DROPME, &rqstp->rq_flags);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -331,10 +362,11 @@ nfsd_proc_create(struct svc_rqst *rqstp)
* echo thing > device-special-file-or-pipe
* by doing a CREATE with type==0
*/
- resp->status = nfsd_permission(rqstp,
- newfhp->fh_export,
- newfhp->fh_dentry,
- NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS);
+ resp->status = nfsd_permission(
+ &rqstp->rq_cred,
+ newfhp->fh_export,
+ newfhp->fh_dentry,
+ NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS);
if (resp->status && resp->status != nfserr_rofs)
goto out_unlock;
}
@@ -403,6 +435,7 @@ done:
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -419,6 +452,7 @@ nfsd_proc_remove(struct svc_rqst *rqstp)
resp->status = nfsd_unlink(rqstp, &argp->fh, -S_IFDIR,
argp->name, argp->len);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -437,6 +471,7 @@ nfsd_proc_rename(struct svc_rqst *rqstp)
&argp->tfh, argp->tname, argp->tlen);
fh_put(&argp->ffh);
fh_put(&argp->tfh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -457,6 +492,7 @@ nfsd_proc_link(struct svc_rqst *rqstp)
&argp->ffh);
fh_put(&argp->ffh);
fh_put(&argp->tfh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -495,6 +531,7 @@ nfsd_proc_symlink(struct svc_rqst *rqstp)
fh_put(&argp->ffh);
fh_put(&newfh);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -528,6 +565,7 @@ nfsd_proc_mkdir(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -545,6 +583,7 @@ nfsd_proc_rmdir(struct svc_rqst *rqstp)
resp->status = nfsd_unlink(rqstp, &argp->fh, S_IFDIR,
argp->name, argp->len);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -590,6 +629,7 @@ nfsd_proc_readdir(struct svc_rqst *rqstp)
nfssvc_encode_nfscookie(resp, offset);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -607,6 +647,7 @@ nfsd_proc_statfs(struct svc_rqst *rqstp)
resp->status = nfsd_statfs(rqstp, &argp->fh, &resp->stats,
NFSD_MAY_BYPASS_GSS_ON_ROOT);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 0bc8eaa5e009..e236135ddc63 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -19,6 +19,7 @@
#include <linux/sunrpc/svc_xprt.h>
#include <linux/lockd/bind.h>
#include <linux/nfsacl.h>
+#include <linux/nfslocalio.h>
#include <linux/seq_file.h>
#include <linux/inetdevice.h>
#include <net/addrconf.h>
@@ -35,7 +36,6 @@
#define NFSDDBG_FACILITY NFSDDBG_SVC
atomic_t nfsd_th_cnt = ATOMIC_INIT(0);
-extern struct svc_program nfsd_program;
static int nfsd(void *vrqstp);
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static int nfsd_acl_rpcbind_set(struct net *,
@@ -80,6 +80,15 @@ DEFINE_SPINLOCK(nfsd_drc_lock);
unsigned long nfsd_drc_max_mem;
unsigned long nfsd_drc_mem_used;
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+static const struct svc_version *localio_versions[] = {
+ [1] = &localio_version1,
+};
+
+#define NFSD_LOCALIO_NRVERS ARRAY_SIZE(localio_versions)
+
+#endif /* CONFIG_NFS_LOCALIO */
+
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static const struct svc_version *nfsd_acl_version[] = {
# if defined(CONFIG_NFSD_V2_ACL)
@@ -90,23 +99,12 @@ static const struct svc_version *nfsd_acl_version[] = {
# endif
};
-#define NFSD_ACL_MINVERS 2
+#define NFSD_ACL_MINVERS 2
#define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
-static struct svc_program nfsd_acl_program = {
- .pg_prog = NFS_ACL_PROGRAM,
- .pg_nvers = NFSD_ACL_NRVERS,
- .pg_vers = nfsd_acl_version,
- .pg_name = "nfsacl",
- .pg_class = "nfsd",
- .pg_authenticate = &svc_set_client,
- .pg_init_request = nfsd_acl_init_request,
- .pg_rpcbind_set = nfsd_acl_rpcbind_set,
-};
-
#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
-static const struct svc_version *nfsd_version[] = {
+static const struct svc_version *nfsd_version[NFSD_MAXVERS+1] = {
#if defined(CONFIG_NFSD_V2)
[2] = &nfsd_version2,
#endif
@@ -116,97 +114,63 @@ static const struct svc_version *nfsd_version[] = {
#endif
};
-#define NFSD_MINVERS 2
-#define NFSD_NRVERS ARRAY_SIZE(nfsd_version)
-
-struct svc_program nfsd_program = {
-#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
- .pg_next = &nfsd_acl_program,
-#endif
+struct svc_program nfsd_programs[] = {
+ {
.pg_prog = NFS_PROGRAM, /* program number */
- .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */
+ .pg_nvers = NFSD_MAXVERS+1, /* nr of entries in nfsd_version */
.pg_vers = nfsd_version, /* version table */
.pg_name = "nfsd", /* program name */
.pg_class = "nfsd", /* authentication class */
- .pg_authenticate = &svc_set_client, /* export authentication */
+ .pg_authenticate = svc_set_client, /* export authentication */
.pg_init_request = nfsd_init_request,
.pg_rpcbind_set = nfsd_rpcbind_set,
+ },
+#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+ {
+ .pg_prog = NFS_ACL_PROGRAM,
+ .pg_nvers = NFSD_ACL_NRVERS,
+ .pg_vers = nfsd_acl_version,
+ .pg_name = "nfsacl",
+ .pg_class = "nfsd",
+ .pg_authenticate = svc_set_client,
+ .pg_init_request = nfsd_acl_init_request,
+ .pg_rpcbind_set = nfsd_acl_rpcbind_set,
+ },
+#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ {
+ .pg_prog = NFS_LOCALIO_PROGRAM,
+ .pg_nvers = NFSD_LOCALIO_NRVERS,
+ .pg_vers = localio_versions,
+ .pg_name = "nfslocalio",
+ .pg_class = "nfsd",
+ .pg_authenticate = svc_set_client,
+ .pg_init_request = svc_generic_init_request,
+ .pg_rpcbind_set = svc_generic_rpcbind_set,
+ }
+#endif /* CONFIG_NFS_LOCALIO */
};
bool nfsd_support_version(int vers)
{
- if (vers >= NFSD_MINVERS && vers < NFSD_NRVERS)
+ if (vers >= NFSD_MINVERS && vers <= NFSD_MAXVERS)
return nfsd_version[vers] != NULL;
return false;
}
-static bool *
-nfsd_alloc_versions(void)
-{
- bool *vers = kmalloc_array(NFSD_NRVERS, sizeof(bool), GFP_KERNEL);
- unsigned i;
-
- if (vers) {
- /* All compiled versions are enabled by default */
- for (i = 0; i < NFSD_NRVERS; i++)
- vers[i] = nfsd_support_version(i);
- }
- return vers;
-}
-
-static bool *
-nfsd_alloc_minorversions(void)
-{
- bool *vers = kmalloc_array(NFSD_SUPPORTED_MINOR_VERSION + 1,
- sizeof(bool), GFP_KERNEL);
- unsigned i;
-
- if (vers) {
- /* All minor versions are enabled by default */
- for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++)
- vers[i] = nfsd_support_version(4);
- }
- return vers;
-}
-
-void
-nfsd_netns_free_versions(struct nfsd_net *nn)
-{
- kfree(nn->nfsd_versions);
- kfree(nn->nfsd4_minorversions);
- nn->nfsd_versions = NULL;
- nn->nfsd4_minorversions = NULL;
-}
-
-static void
-nfsd_netns_init_versions(struct nfsd_net *nn)
-{
- if (!nn->nfsd_versions) {
- nn->nfsd_versions = nfsd_alloc_versions();
- nn->nfsd4_minorversions = nfsd_alloc_minorversions();
- if (!nn->nfsd_versions || !nn->nfsd4_minorversions)
- nfsd_netns_free_versions(nn);
- }
-}
-
int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change)
{
- if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
+ if (vers < NFSD_MINVERS || vers > NFSD_MAXVERS)
return 0;
switch(change) {
case NFSD_SET:
- if (nn->nfsd_versions)
- nn->nfsd_versions[vers] = nfsd_support_version(vers);
+ nn->nfsd_versions[vers] = nfsd_support_version(vers);
break;
case NFSD_CLEAR:
- nfsd_netns_init_versions(nn);
- if (nn->nfsd_versions)
- nn->nfsd_versions[vers] = false;
+ nn->nfsd_versions[vers] = false;
break;
case NFSD_TEST:
- if (nn->nfsd_versions)
- return nn->nfsd_versions[vers];
- fallthrough;
+ return nn->nfsd_versions[vers];
case NFSD_AVAIL:
return nfsd_support_version(vers);
}
@@ -233,23 +197,16 @@ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change
switch(change) {
case NFSD_SET:
- if (nn->nfsd4_minorversions) {
- nfsd_vers(nn, 4, NFSD_SET);
- nn->nfsd4_minorversions[minorversion] =
- nfsd_vers(nn, 4, NFSD_TEST);
- }
+ nfsd_vers(nn, 4, NFSD_SET);
+ nn->nfsd4_minorversions[minorversion] =
+ nfsd_vers(nn, 4, NFSD_TEST);
break;
case NFSD_CLEAR:
- nfsd_netns_init_versions(nn);
- if (nn->nfsd4_minorversions) {
- nn->nfsd4_minorversions[minorversion] = false;
- nfsd_adjust_nfsd_versions4(nn);
- }
+ nn->nfsd4_minorversions[minorversion] = false;
+ nfsd_adjust_nfsd_versions4(nn);
break;
case NFSD_TEST:
- if (nn->nfsd4_minorversions)
- return nn->nfsd4_minorversions[minorversion];
- return nfsd_vers(nn, 4, NFSD_TEST);
+ return nn->nfsd4_minorversions[minorversion];
case NFSD_AVAIL:
return minorversion <= NFSD_SUPPORTED_MINOR_VERSION &&
nfsd_vers(nn, 4, NFSD_AVAIL);
@@ -257,6 +214,34 @@ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change
return 0;
}
+bool nfsd_serv_try_get(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ return (nn && percpu_ref_tryget_live(&nn->nfsd_serv_ref));
+}
+
+void nfsd_serv_put(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ percpu_ref_put(&nn->nfsd_serv_ref);
+}
+
+static void nfsd_serv_done(struct percpu_ref *ref)
+{
+ struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref);
+
+ complete(&nn->nfsd_serv_confirm_done);
+}
+
+static void nfsd_serv_free(struct percpu_ref *ref)
+{
+ struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref);
+
+ complete(&nn->nfsd_serv_free_done);
+}
+
/*
* Maximum number of nfsd processes
*/
@@ -456,6 +441,7 @@ static void nfsd_shutdown_net(struct net *net)
lockd_down(net);
nn->lockd_up = false;
}
+ percpu_ref_exit(&nn->nfsd_serv_ref);
nn->nfsd_net_up = false;
nfsd_shutdown_generic();
}
@@ -535,6 +521,13 @@ void nfsd_destroy_serv(struct net *net)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_serv *serv = nn->nfsd_serv;
+ lockdep_assert_held(&nfsd_mutex);
+
+ percpu_ref_kill_and_confirm(&nn->nfsd_serv_ref, nfsd_serv_done);
+ wait_for_completion(&nn->nfsd_serv_confirm_done);
+ wait_for_completion(&nn->nfsd_serv_free_done);
+ /* percpu_ref_exit is called in nfsd_shutdown_net */
+
spin_lock(&nfsd_notifier_lock);
nn->nfsd_serv = NULL;
spin_unlock(&nfsd_notifier_lock);
@@ -568,11 +561,11 @@ void nfsd_reset_versions(struct nfsd_net *nn)
{
int i;
- for (i = 0; i < NFSD_NRVERS; i++)
+ for (i = 0; i <= NFSD_MAXVERS; i++)
if (nfsd_vers(nn, i, NFSD_TEST))
return;
- for (i = 0; i < NFSD_NRVERS; i++)
+ for (i = 0; i <= NFSD_MAXVERS; i++)
if (i != 4)
nfsd_vers(nn, i, NFSD_SET);
else {
@@ -642,9 +635,11 @@ void nfsd_shutdown_threads(struct net *net)
mutex_unlock(&nfsd_mutex);
}
-bool i_am_nfsd(void)
+struct svc_rqst *nfsd_current_rqst(void)
{
- return kthread_func(current) == nfsd;
+ if (kthread_func(current) == nfsd)
+ return kthread_data(current);
+ return NULL;
}
int nfsd_create_serv(struct net *net)
@@ -657,10 +652,18 @@ int nfsd_create_serv(struct net *net)
if (nn->nfsd_serv)
return 0;
+ error = percpu_ref_init(&nn->nfsd_serv_ref, nfsd_serv_free,
+ 0, GFP_KERNEL);
+ if (error)
+ return error;
+ init_completion(&nn->nfsd_serv_free_done);
+ init_completion(&nn->nfsd_serv_confirm_done);
+
if (nfsd_max_blksize == 0)
nfsd_max_blksize = nfsd_get_default_max_blksize();
nfsd_reset_versions(nn);
- serv = svc_create_pooled(&nfsd_program, &nn->nfsd_svcstats,
+ serv = svc_create_pooled(nfsd_programs, ARRAY_SIZE(nfsd_programs),
+ &nn->nfsd_svcstats,
nfsd_max_blksize, nfsd);
if (serv == NULL)
return -ENOMEM;
@@ -705,7 +708,7 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
if (serv)
for (i = 0; i < serv->sv_nrpools && i < n; i++)
- nthreads[i] = atomic_read(&serv->sv_pools[i].sp_nrthreads);
+ nthreads[i] = serv->sv_pools[i].sp_nrthreads;
return 0;
}
@@ -905,17 +908,17 @@ nfsd_init_request(struct svc_rqst *rqstp,
if (likely(nfsd_vers(nn, rqstp->rq_vers, NFSD_TEST)))
return svc_generic_init_request(rqstp, progp, ret);
- ret->mismatch.lovers = NFSD_NRVERS;
- for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
+ ret->mismatch.lovers = NFSD_MAXVERS + 1;
+ for (i = NFSD_MINVERS; i <= NFSD_MAXVERS; i++) {
if (nfsd_vers(nn, i, NFSD_TEST)) {
ret->mismatch.lovers = i;
break;
}
}
- if (ret->mismatch.lovers == NFSD_NRVERS)
+ if (ret->mismatch.lovers > NFSD_MAXVERS)
return rpc_prog_unavail;
ret->mismatch.hivers = NFSD_MINVERS;
- for (i = NFSD_NRVERS - 1; i >= NFSD_MINVERS; i--) {
+ for (i = NFSD_MAXVERS; i >= NFSD_MINVERS; i--) {
if (nfsd_vers(nn, i, NFSD_TEST)) {
ret->mismatch.hivers = i;
break;
@@ -937,11 +940,9 @@ nfsd(void *vrqstp)
/* At this point, the thread shares current->fs
* with the init process. We need to create files with the
- * umask as defined by the client instead of init's umask. */
- if (unshare_fs_struct() < 0) {
- printk("Unable to start nfsd thread: out of memory\n");
- goto out;
- }
+ * umask as defined by the client instead of init's umask.
+ */
+ svc_thread_init_status(rqstp, unshare_fs_struct());
current->fs->umask = 0;
@@ -963,14 +964,13 @@ nfsd(void *vrqstp)
atomic_dec(&nfsd_th_cnt);
-out:
/* Release the thread */
svc_exit_thread(rqstp);
return 0;
}
/**
- * nfsd_dispatch - Process an NFS or NFSACL Request
+ * nfsd_dispatch - Process an NFS or NFSACL or LOCALIO Request
* @rqstp: incoming request
*
* This RPC dispatcher integrates the NFS server's duplicate reply cache.
@@ -1084,10 +1084,3 @@ bool nfssvc_encode_voidres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
return true;
}
-
-int nfsd_pool_stats_open(struct inode *inode, struct file *file)
-{
- struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
-
- return svc_pool_stats_open(&nn->nfsd_info, file);
-}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index ec4559ecd193..79c743c01a47 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -79,6 +79,7 @@ struct nfsd4_callback_ops {
void (*prepare)(struct nfsd4_callback *);
int (*done)(struct nfsd4_callback *, struct rpc_task *);
void (*release)(struct nfsd4_callback *);
+ uint32_t opcode;
};
/*
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 77bbd23aa150..c625966cfcf3 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -86,7 +86,8 @@ DEFINE_NFSD_XDR_ERR_EVENT(cant_encode);
{ NFSD_MAY_NOT_BREAK_LEASE, "NOT_BREAK_LEASE" }, \
{ NFSD_MAY_BYPASS_GSS, "BYPASS_GSS" }, \
{ NFSD_MAY_READ_IF_EXEC, "READ_IF_EXEC" }, \
- { NFSD_MAY_64BIT_COOKIE, "64BIT_COOKIE" })
+ { NFSD_MAY_64BIT_COOKIE, "64BIT_COOKIE" }, \
+ { NFSD_MAY_LOCALIO, "LOCALIO" })
TRACE_EVENT(nfsd_compound,
TP_PROTO(
@@ -193,7 +194,7 @@ TRACE_EVENT(nfsd_compound_encode_err,
{ S_IFIFO, "FIFO" }, \
{ S_IFSOCK, "SOCK" })
-TRACE_EVENT(nfsd_fh_verify,
+TRACE_EVENT_CONDITION(nfsd_fh_verify,
TP_PROTO(
const struct svc_rqst *rqstp,
const struct svc_fh *fhp,
@@ -201,6 +202,7 @@ TRACE_EVENT(nfsd_fh_verify,
int access
),
TP_ARGS(rqstp, fhp, type, access),
+ TP_CONDITION(rqstp != NULL),
TP_STRUCT__entry(
__field(unsigned int, netns_ino)
__sockaddr(server, rqstp->rq_xprt->xpt_remotelen)
@@ -239,7 +241,7 @@ TRACE_EVENT_CONDITION(nfsd_fh_verify_err,
__be32 error
),
TP_ARGS(rqstp, fhp, type, access, error),
- TP_CONDITION(error),
+ TP_CONDITION(rqstp != NULL && error),
TP_STRUCT__entry(
__field(unsigned int, netns_ino)
__sockaddr(server, rqstp->rq_xprt->xpt_remotelen)
@@ -295,12 +297,13 @@ DECLARE_EVENT_CLASS(nfsd_fh_err_class,
__entry->status)
)
-#define DEFINE_NFSD_FH_ERR_EVENT(name) \
-DEFINE_EVENT(nfsd_fh_err_class, nfsd_##name, \
- TP_PROTO(struct svc_rqst *rqstp, \
- struct svc_fh *fhp, \
- int status), \
- TP_ARGS(rqstp, fhp, status))
+#define DEFINE_NFSD_FH_ERR_EVENT(name) \
+DEFINE_EVENT_CONDITION(nfsd_fh_err_class, nfsd_##name, \
+ TP_PROTO(struct svc_rqst *rqstp, \
+ struct svc_fh *fhp, \
+ int status), \
+ TP_ARGS(rqstp, fhp, status), \
+ TP_CONDITION(rqstp != NULL))
DEFINE_NFSD_FH_ERR_EVENT(set_fh_dentry_badexport);
DEFINE_NFSD_FH_ERR_EVENT(set_fh_dentry_badhandle);
@@ -1486,6 +1489,9 @@ DEFINE_NFSD_CB_EVENT(new_state);
DEFINE_NFSD_CB_EVENT(probe);
DEFINE_NFSD_CB_EVENT(lost);
DEFINE_NFSD_CB_EVENT(shutdown);
+DEFINE_NFSD_CB_EVENT(rpc_prepare);
+DEFINE_NFSD_CB_EVENT(rpc_done);
+DEFINE_NFSD_CB_EVENT(rpc_release);
TRACE_DEFINE_ENUM(RPC_AUTH_NULL);
TRACE_DEFINE_ENUM(RPC_AUTH_UNIX);
@@ -1553,6 +1559,19 @@ TRACE_EVENT(nfsd_cb_setup_err,
__entry->error)
);
+/* Not a real opcode, but there is no 0 operation. */
+#define _CB_NULL 0
+
+#define show_nfsd_cb_opcode(val) \
+ __print_symbolic(val, \
+ { _CB_NULL, "CB_NULL" }, \
+ { OP_CB_GETATTR, "CB_GETATTR" }, \
+ { OP_CB_RECALL, "CB_RECALL" }, \
+ { OP_CB_LAYOUTRECALL, "CB_LAYOUTRECALL" }, \
+ { OP_CB_RECALL_ANY, "CB_RECALL_ANY" }, \
+ { OP_CB_NOTIFY_LOCK, "CB_NOTIFY_LOCK" }, \
+ { OP_CB_OFFLOAD, "CB_OFFLOAD" })
+
DECLARE_EVENT_CLASS(nfsd_cb_lifetime_class,
TP_PROTO(
const struct nfs4_client *clp,
@@ -1563,6 +1582,7 @@ DECLARE_EVENT_CLASS(nfsd_cb_lifetime_class,
__field(u32, cl_boot)
__field(u32, cl_id)
__field(const void *, cb)
+ __field(unsigned long, opcode)
__field(bool, need_restart)
__sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
),
@@ -1570,14 +1590,15 @@ DECLARE_EVENT_CLASS(nfsd_cb_lifetime_class,
__entry->cl_boot = clp->cl_clientid.cl_boot;
__entry->cl_id = clp->cl_clientid.cl_id;
__entry->cb = cb;
+ __entry->opcode = cb->cb_ops ? cb->cb_ops->opcode : _CB_NULL;
__entry->need_restart = cb->cb_need_restart;
__assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
clp->cl_cb_conn.cb_addrlen)
),
- TP_printk("addr=%pISpc client %08x:%08x cb=%p%s",
- __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
- __entry->cb, __entry->need_restart ?
- " (need restart)" : " (first try)"
+ TP_printk("addr=%pISpc client %08x:%08x cb=%p%s opcode=%s",
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id, __entry->cb,
+ __entry->need_restart ? " (need restart)" : " (first try)",
+ show_nfsd_cb_opcode(__entry->opcode)
)
);
@@ -1830,6 +1851,7 @@ DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_recall_done);
DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_notify_lock_done);
DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_layout_done);
DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_offload_done);
+DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_getattr_done);
TRACE_EVENT(nfsd_cb_recall_any_done,
TP_PROTO(
@@ -2127,6 +2149,10 @@ DECLARE_EVENT_CLASS(nfsd_copy_class,
__field(u32, dst_cl_id)
__field(u32, dst_so_id)
__field(u32, dst_si_generation)
+ __field(u32, cb_cl_boot)
+ __field(u32, cb_cl_id)
+ __field(u32, cb_so_id)
+ __field(u32, cb_si_generation)
__field(u64, src_cp_pos)
__field(u64, dst_cp_pos)
__field(u64, cp_count)
@@ -2135,6 +2161,7 @@ DECLARE_EVENT_CLASS(nfsd_copy_class,
TP_fast_assign(
const stateid_t *src_stp = &copy->cp_src_stateid;
const stateid_t *dst_stp = &copy->cp_dst_stateid;
+ const stateid_t *cb_stp = &copy->cp_res.cb_stateid;
__entry->intra = test_bit(NFSD4_COPY_F_INTRA, &copy->cp_flags);
__entry->async = !test_bit(NFSD4_COPY_F_SYNCHRONOUS, &copy->cp_flags);
@@ -2146,6 +2173,10 @@ DECLARE_EVENT_CLASS(nfsd_copy_class,
__entry->dst_cl_id = dst_stp->si_opaque.so_clid.cl_id;
__entry->dst_so_id = dst_stp->si_opaque.so_id;
__entry->dst_si_generation = dst_stp->si_generation;
+ __entry->cb_cl_boot = cb_stp->si_opaque.so_clid.cl_boot;
+ __entry->cb_cl_id = cb_stp->si_opaque.so_clid.cl_id;
+ __entry->cb_so_id = cb_stp->si_opaque.so_id;
+ __entry->cb_si_generation = cb_stp->si_generation;
__entry->src_cp_pos = copy->cp_src_pos;
__entry->dst_cp_pos = copy->cp_dst_pos;
__entry->cp_count = copy->cp_count;
@@ -2153,14 +2184,17 @@ DECLARE_EVENT_CLASS(nfsd_copy_class,
sizeof(struct sockaddr_in6));
),
TP_printk("client=%pISpc intra=%d async=%d "
- "src_stateid[si_generation:0x%x cl_boot:0x%x cl_id:0x%x so_id:0x%x] "
- "dst_stateid[si_generation:0x%x cl_boot:0x%x cl_id:0x%x so_id:0x%x] "
+ "src_client %08x:%08x src_stateid %08x:%08x "
+ "dst_client %08x:%08x dst_stateid %08x:%08x "
+ "cb_client %08x:%08x cb_stateid %08x:%08x "
"cp_src_pos=%llu cp_dst_pos=%llu cp_count=%llu",
__get_sockaddr(addr), __entry->intra, __entry->async,
- __entry->src_si_generation, __entry->src_cl_boot,
- __entry->src_cl_id, __entry->src_so_id,
- __entry->dst_si_generation, __entry->dst_cl_boot,
- __entry->dst_cl_id, __entry->dst_so_id,
+ __entry->src_cl_boot, __entry->src_cl_id,
+ __entry->src_so_id, __entry->src_si_generation,
+ __entry->dst_cl_boot, __entry->dst_cl_id,
+ __entry->dst_so_id, __entry->dst_si_generation,
+ __entry->cb_cl_boot, __entry->cb_cl_id,
+ __entry->cb_so_id, __entry->cb_si_generation,
__entry->src_cp_pos, __entry->dst_cp_pos, __entry->cp_count
)
);
@@ -2172,7 +2206,7 @@ DEFINE_EVENT(nfsd_copy_class, nfsd_copy_##name, \
DEFINE_COPY_EVENT(inter);
DEFINE_COPY_EVENT(intra);
-DEFINE_COPY_EVENT(do_async);
+DEFINE_COPY_EVENT(async);
TRACE_EVENT(nfsd_copy_done,
TP_PROTO(
@@ -2193,11 +2227,80 @@ TRACE_EVENT(nfsd_copy_done,
__assign_sockaddr(addr, &copy->cp_clp->cl_addr,
sizeof(struct sockaddr_in6));
),
- TP_printk("addr=%pISpc status=%d intra=%d async=%d ",
+ TP_printk("addr=%pISpc status=%d intra=%d async=%d",
__get_sockaddr(addr), __entry->status, __entry->intra, __entry->async
)
);
+TRACE_EVENT(nfsd_copy_async_done,
+ TP_PROTO(
+ const struct nfsd4_copy *copy
+ ),
+ TP_ARGS(copy),
+ TP_STRUCT__entry(
+ __field(int, status)
+ __field(bool, intra)
+ __field(bool, async)
+ __field(u32, src_cl_boot)
+ __field(u32, src_cl_id)
+ __field(u32, src_so_id)
+ __field(u32, src_si_generation)
+ __field(u32, dst_cl_boot)
+ __field(u32, dst_cl_id)
+ __field(u32, dst_so_id)
+ __field(u32, dst_si_generation)
+ __field(u32, cb_cl_boot)
+ __field(u32, cb_cl_id)
+ __field(u32, cb_so_id)
+ __field(u32, cb_si_generation)
+ __field(u64, src_cp_pos)
+ __field(u64, dst_cp_pos)
+ __field(u64, cp_count)
+ __sockaddr(addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ const stateid_t *src_stp = &copy->cp_src_stateid;
+ const stateid_t *dst_stp = &copy->cp_dst_stateid;
+ const stateid_t *cb_stp = &copy->cp_res.cb_stateid;
+
+ __entry->status = be32_to_cpu(copy->nfserr);
+ __entry->intra = test_bit(NFSD4_COPY_F_INTRA, &copy->cp_flags);
+ __entry->async = !test_bit(NFSD4_COPY_F_SYNCHRONOUS, &copy->cp_flags);
+ __entry->src_cl_boot = src_stp->si_opaque.so_clid.cl_boot;
+ __entry->src_cl_id = src_stp->si_opaque.so_clid.cl_id;
+ __entry->src_so_id = src_stp->si_opaque.so_id;
+ __entry->src_si_generation = src_stp->si_generation;
+ __entry->dst_cl_boot = dst_stp->si_opaque.so_clid.cl_boot;
+ __entry->dst_cl_id = dst_stp->si_opaque.so_clid.cl_id;
+ __entry->dst_so_id = dst_stp->si_opaque.so_id;
+ __entry->dst_si_generation = dst_stp->si_generation;
+ __entry->cb_cl_boot = cb_stp->si_opaque.so_clid.cl_boot;
+ __entry->cb_cl_id = cb_stp->si_opaque.so_clid.cl_id;
+ __entry->cb_so_id = cb_stp->si_opaque.so_id;
+ __entry->cb_si_generation = cb_stp->si_generation;
+ __entry->src_cp_pos = copy->cp_src_pos;
+ __entry->dst_cp_pos = copy->cp_dst_pos;
+ __entry->cp_count = copy->cp_count;
+ __assign_sockaddr(addr, &copy->cp_clp->cl_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("client=%pISpc status=%d intra=%d async=%d "
+ "src_client %08x:%08x src_stateid %08x:%08x "
+ "dst_client %08x:%08x dst_stateid %08x:%08x "
+ "cb_client %08x:%08x cb_stateid %08x:%08x "
+ "cp_src_pos=%llu cp_dst_pos=%llu cp_count=%llu",
+ __get_sockaddr(addr),
+ __entry->status, __entry->intra, __entry->async,
+ __entry->src_cl_boot, __entry->src_cl_id,
+ __entry->src_so_id, __entry->src_si_generation,
+ __entry->dst_cl_boot, __entry->dst_cl_id,
+ __entry->dst_so_id, __entry->dst_si_generation,
+ __entry->cb_cl_boot, __entry->cb_cl_id,
+ __entry->cb_so_id, __entry->cb_si_generation,
+ __entry->src_cp_pos, __entry->dst_cp_pos, __entry->cp_count
+ )
+);
+
#endif /* _NFSD_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 29b1f3613800..22325b590e17 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -100,6 +100,7 @@ nfserrno (int errno)
{ nfserr_io, -EUCLEAN },
{ nfserr_perm, -ENOKEY },
{ nfserr_no_grace, -ENOGRACE},
+ { nfserr_io, -EBADMSG },
};
int i;
@@ -421,8 +422,9 @@ nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (iap->ia_size < inode->i_size) {
__be32 err;
- err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
- NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
+ err = nfsd_permission(&rqstp->rq_cred,
+ fhp->fh_export, fhp->fh_dentry,
+ NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
if (err)
return err;
}
@@ -814,7 +816,8 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
sresult |= map->access;
- err2 = nfsd_permission(rqstp, export, dentry, map->how);
+ err2 = nfsd_permission(&rqstp->rq_cred, export,
+ dentry, map->how);
switch (err2) {
case nfs_ok:
result |= map->access;
@@ -1160,7 +1163,6 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
errseq_t since;
__be32 nfserr;
int host_err;
- int use_wgather;
loff_t pos = offset;
unsigned long exp_op_flags = 0;
unsigned int pflags = current->flags;
@@ -1186,12 +1188,11 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
}
exp = fhp->fh_export;
- use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp);
if (!EX_ISSYNC(exp))
stable = NFS_UNSTABLE;
- if (stable && !use_wgather)
+ if (stable && !fhp->fh_use_wgather)
flags |= RWF_SYNC;
iov_iter_kvec(&iter, ITER_SOURCE, vec, vlen, *cnt);
@@ -1210,7 +1211,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
if (host_err < 0)
goto out_nfserr;
- if (stable && use_wgather) {
+ if (stable && fhp->fh_use_wgather) {
host_err = wait_for_concurrent_writes(file);
if (host_err < 0)
commit_reset_write_verifier(nn, rqstp, host_err);
@@ -1475,7 +1476,8 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
dirp = d_inode(dentry);
dchild = dget(resfhp->fh_dentry);
- err = nfsd_permission(rqstp, fhp->fh_export, dentry, NFSD_MAY_CREATE);
+ err = nfsd_permission(&rqstp->rq_cred, fhp->fh_export, dentry,
+ NFSD_MAY_CREATE);
if (err)
goto out;
@@ -1767,10 +1769,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
if (!err)
err = nfserrno(commit_metadata(tfhp));
} else {
- if (host_err == -EXDEV && rqstp->rq_vers == 2)
- err = nfserr_acces;
- else
- err = nfserrno(host_err);
+ err = nfserrno(host_err);
}
dput(dnew);
out_drop_write:
@@ -1836,7 +1835,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
goto out;
- err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
+ err = nfserr_xdev;
if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
goto out;
if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
@@ -1851,7 +1850,7 @@ retry:
trap = lock_rename(tdentry, fdentry);
if (IS_ERR(trap)) {
- err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
+ err = nfserr_xdev;
goto out_want_write;
}
err = fh_fill_pre_attrs(ffhp);
@@ -2020,10 +2019,7 @@ out_nfserr:
/* name is mounted-on. There is no perfect
* error status.
*/
- if (nfsd_v4client(rqstp))
- err = nfserr_file_open;
- else
- err = nfserr_acces;
+ err = nfserr_file_open;
} else {
err = nfserrno(host_err);
}
@@ -2178,8 +2174,7 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
loff_t offset = *offsetp;
int may_flags = NFSD_MAY_READ;
- /* NFSv2 only supports 32 bit cookies */
- if (rqstp->rq_vers > 2)
+ if (fhp->fh_64bit_cookies)
may_flags |= NFSD_MAY_64BIT_COOKIE;
err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file);
@@ -2255,9 +2250,9 @@ nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, in
return err;
}
-static int exp_rdonly(struct svc_rqst *rqstp, struct svc_export *exp)
+static int exp_rdonly(struct svc_cred *cred, struct svc_export *exp)
{
- return nfsexp_flags(rqstp, exp) & NFSEXP_READONLY;
+ return nfsexp_flags(cred, exp) & NFSEXP_READONLY;
}
#ifdef CONFIG_NFSD_V4
@@ -2501,8 +2496,8 @@ out_unlock:
* Check for a user's access permissions to this inode.
*/
__be32
-nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
- struct dentry *dentry, int acc)
+nfsd_permission(struct svc_cred *cred, struct svc_export *exp,
+ struct dentry *dentry, int acc)
{
struct inode *inode = d_inode(dentry);
int err;
@@ -2533,7 +2528,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
*/
if (!(acc & NFSD_MAY_LOCAL_ACCESS))
if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) {
- if (exp_rdonly(rqstp, exp) ||
+ if (exp_rdonly(cred, exp) ||
__mnt_is_readonly(exp->ex_path.mnt))
return nfserr_rofs;
if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode))
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 57cd70062048..3ff146522556 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -33,6 +33,8 @@
#define NFSD_MAY_64BIT_COOKIE 0x1000 /* 64 bit readdir cookies for >= NFSv3 */
+#define NFSD_MAY_LOCALIO 0x2000 /* for tracing, reflects when localio used */
+
#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
@@ -153,8 +155,8 @@ __be32 nfsd_readdir(struct svc_rqst *, struct svc_fh *,
__be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *,
struct kstatfs *, int access);
-__be32 nfsd_permission(struct svc_rqst *, struct svc_export *,
- struct dentry *, int);
+__be32 nfsd_permission(struct svc_cred *cred, struct svc_export *exp,
+ struct dentry *dentry, int acc);
void nfsd_filp_close(struct file *fp);
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index fbdd42cde1fa..2a21a7662e03 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -713,6 +713,7 @@ struct nfsd4_copy {
struct nfsd4_ssc_umount_item *ss_nsui;
struct nfs_fh c_fh;
nfs4_stateid stateid;
+ struct nfsd_net *cp_nn;
};
static inline void nfsd4_copy_set_sync(struct nfsd4_copy *copy, bool sync)
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index d825a9faca6d..e19d7eb10084 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -37,7 +37,7 @@ void *nilfs_palloc_block_get_entry(const struct inode *, __u64,
int nilfs_palloc_count_max_entries(struct inode *, u64, u64 *);
/**
- * nilfs_palloc_req - persistent allocator request and reply
+ * struct nilfs_palloc_req - persistent allocator request and reply
* @pr_entry_nr: entry number (vblocknr or inode number)
* @pr_desc_bh: buffer head of the buffer containing block group descriptors
* @pr_bitmap_bh: buffer head of the buffer containing a block group bitmap
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index cd14ea25968c..c9e8d9a7d820 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -349,7 +349,7 @@ int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh)
}
/**
- * nilfs_bmap_lookup_dirty_buffers -
+ * nilfs_bmap_lookup_dirty_buffers - collect dirty block buffers
* @bmap: bmap
* @listp: pointer to buffer head list
*/
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index 608168a5cb88..4656df392722 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -44,6 +44,19 @@ struct nilfs_bmap_stats {
/**
* struct nilfs_bmap_operations - bmap operation table
+ * @bop_lookup: single block search operation
+ * @bop_lookup_contig: consecutive block search operation
+ * @bop_insert: block insertion operation
+ * @bop_delete: block delete operation
+ * @bop_clear: block mapping resource release operation
+ * @bop_propagate: operation to propagate dirty state towards the
+ * mapping root
+ * @bop_lookup_dirty_buffers: operation to collect dirty block buffers
+ * @bop_assign: disk block address assignment operation
+ * @bop_mark: operation to mark in-use blocks as dirty for
+ * relocation by GC
+ * @bop_seek_key: find valid block key operation
+ * @bop_last_key: find last valid block key operation
*/
struct nilfs_bmap_operations {
int (*bop_lookup)(const struct nilfs_bmap *, __u64, int, __u64 *);
@@ -66,7 +79,7 @@ struct nilfs_bmap_operations {
int (*bop_seek_key)(const struct nilfs_bmap *, __u64, __u64 *);
int (*bop_last_key)(const struct nilfs_bmap *, __u64 *);
- /* The following functions are internal use only. */
+ /* private: internal use only */
int (*bop_check_insert)(const struct nilfs_bmap *, __u64);
int (*bop_check_delete)(struct nilfs_bmap *, __u64);
int (*bop_gather_data)(struct nilfs_bmap *, __u64 *, __u64 *, int);
@@ -74,9 +87,8 @@ struct nilfs_bmap_operations {
#define NILFS_BMAP_SIZE (NILFS_INODE_BMAP_SIZE * sizeof(__le64))
-#define NILFS_BMAP_KEY_BIT (sizeof(unsigned long) * 8 /* CHAR_BIT */)
-#define NILFS_BMAP_NEW_PTR_INIT \
- (1UL << (sizeof(unsigned long) * 8 /* CHAR_BIT */ - 1))
+#define NILFS_BMAP_KEY_BIT BITS_PER_LONG
+#define NILFS_BMAP_NEW_PTR_INIT (1UL << (BITS_PER_LONG - 1))
static inline int nilfs_bmap_is_new_ptr(unsigned long ptr)
{
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index c034080c334b..57b4af5ad646 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -179,11 +179,32 @@ void nilfs_btnode_delete(struct buffer_head *bh)
}
/**
- * nilfs_btnode_prepare_change_key
- * prepare to move contents of the block for old key to one of new key.
- * the old buffer will not be removed, but might be reused for new buffer.
- * it might return -ENOMEM because of memory allocation errors,
- * and might return -EIO because of disk read errors.
+ * nilfs_btnode_prepare_change_key - prepare to change the search key of a
+ * b-tree node block
+ * @btnc: page cache in which the b-tree node block is buffered
+ * @ctxt: structure for exchanging context information for key change
+ *
+ * nilfs_btnode_prepare_change_key() prepares to move the contents of the
+ * b-tree node block of the old key given in the "oldkey" member of @ctxt to
+ * the position of the new key given in the "newkey" member of @ctxt in the
+ * page cache @btnc. Here, the key of the block is an index in units of
+ * blocks, and if the page and block sizes match, it matches the page index
+ * in the page cache.
+ *
+ * If the page size and block size match, this function attempts to move the
+ * entire folio, and in preparation for this, inserts the original folio into
+ * the new index of the cache. If this insertion fails or if the page size
+ * and block size are different, it falls back to a copy preparation using
+ * nilfs_btnode_create_block(), inserts a new block at the position
+ * corresponding to "newkey", and stores the buffer head pointer in the
+ * "newbh" member of @ctxt.
+ *
+ * Note that the current implementation does not support folio sizes larger
+ * than the page size.
+ *
+ * Return: 0 on success, or the following negative error code on failure.
+ * * %-EIO - I/O error (metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_btnode_prepare_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
@@ -245,8 +266,21 @@ retry:
}
/**
- * nilfs_btnode_commit_change_key
- * commit the change_key operation prepared by prepare_change_key().
+ * nilfs_btnode_commit_change_key - commit the change of the search key of
+ * a b-tree node block
+ * @btnc: page cache in which the b-tree node block is buffered
+ * @ctxt: structure for exchanging context information for key change
+ *
+ * nilfs_btnode_commit_change_key() executes the key change based on the
+ * context @ctxt prepared by nilfs_btnode_prepare_change_key(). If no valid
+ * block buffer is prepared in "newbh" of @ctxt (i.e., a full folio move),
+ * this function removes the folio from the old index and completes the move.
+ * Otherwise, it copies the block data and inherited flag states of "oldbh"
+ * to "newbh" and clears the "oldbh" from the cache. In either case, the
+ * relocated buffer is marked as dirty.
+ *
+ * As with nilfs_btnode_prepare_change_key(), the current implementation does
+ * not support folio sizes larger than the page size.
*/
void nilfs_btnode_commit_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
@@ -285,8 +319,19 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
}
/**
- * nilfs_btnode_abort_change_key
- * abort the change_key operation prepared by prepare_change_key().
+ * nilfs_btnode_abort_change_key - abort the change of the search key of a
+ * b-tree node block
+ * @btnc: page cache in which the b-tree node block is buffered
+ * @ctxt: structure for exchanging context information for key change
+ *
+ * nilfs_btnode_abort_change_key() cancels the key change associated with the
+ * context @ctxt prepared via nilfs_btnode_prepare_change_key() and performs
+ * any necessary cleanup. If no valid block buffer is prepared in "newbh" of
+ * @ctxt, this function removes the folio from the destination index and aborts
+ * the move. Otherwise, it clears "newbh" from the cache.
+ *
+ * As with nilfs_btnode_prepare_change_key(), the current implementation does
+ * not support folio sizes larger than the page size.
*/
void nilfs_btnode_abort_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 862bdf23120e..ef5061bb56da 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -350,7 +350,7 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
level >= NILFS_BTREE_LEVEL_MAX ||
(flags & NILFS_BTREE_NODE_ROOT) ||
- nchildren < 0 ||
+ nchildren <= 0 ||
nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) {
nilfs_crit(inode->i_sb,
"bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d",
@@ -381,7 +381,8 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
level >= NILFS_BTREE_LEVEL_MAX ||
nchildren < 0 ||
- nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
+ nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX ||
+ (nchildren == 0 && level > NILFS_BTREE_LEVEL_NODE_MIN))) {
nilfs_crit(inode->i_sb,
"bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d",
inode->i_ino, level, flags, nchildren);
@@ -1658,13 +1659,16 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
int nchildren, ret;
root = nilfs_btree_get_root(btree);
+ nchildren = nilfs_btree_node_get_nchildren(root);
+ if (unlikely(nchildren == 0))
+ return 0;
+
switch (nilfs_btree_height(btree)) {
case 2:
bh = NULL;
node = root;
break;
case 3:
- nchildren = nilfs_btree_node_get_nchildren(root);
if (nchildren > 1)
return 0;
ptr = nilfs_btree_node_get_ptr(root, nchildren - 1,
@@ -1673,12 +1677,12 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
if (ret < 0)
return ret;
node = (struct nilfs_btree_node *)bh->b_data;
+ nchildren = nilfs_btree_node_get_nchildren(node);
break;
default:
return 0;
}
- nchildren = nilfs_btree_node_get_nchildren(node);
maxkey = nilfs_btree_node_get_key(node, nchildren - 1);
nextmaxkey = (nchildren > 1) ?
nilfs_btree_node_get_key(node, nchildren - 2) : 0;
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h
index 92868e1a48ca..2a220f716c91 100644
--- a/fs/nilfs2/btree.h
+++ b/fs/nilfs2/btree.h
@@ -24,6 +24,7 @@
* @bp_index: index of child node
* @bp_oldreq: ptr end request for old ptr
* @bp_newreq: ptr alloc request for new ptr
+ * @bp_ctxt: context information for changing the key of a b-tree node block
* @bp_op: rebalance operation
*/
struct nilfs_btree_path {
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 69a5cced1e84..f0ce37552446 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -125,10 +125,17 @@ static void nilfs_cpfile_block_init(struct inode *cpfile,
}
}
-static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
- struct buffer_head **bhp)
+static int nilfs_cpfile_get_header_block(struct inode *cpfile,
+ struct buffer_head **bhp)
{
- return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
+ int err = nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
+
+ if (unlikely(err == -ENOENT)) {
+ nilfs_error(cpfile->i_sb,
+ "missing header block in checkpoint metadata");
+ err = -EIO;
+ }
+ return err;
}
static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
@@ -283,14 +290,9 @@ int nilfs_cpfile_create_checkpoint(struct inode *cpfile, __u64 cno)
down_write(&NILFS_MDT(cpfile)->mi_sem);
ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
- if (unlikely(ret < 0)) {
- if (ret == -ENOENT) {
- nilfs_error(cpfile->i_sb,
- "checkpoint creation failed due to metadata corruption.");
- ret = -EIO;
- }
+ if (unlikely(ret < 0))
goto out_sem;
- }
+
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 1, &cp_bh);
if (unlikely(ret < 0))
goto out_header;
@@ -704,9 +706,15 @@ ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
}
/**
- * nilfs_cpfile_delete_checkpoint -
- * @cpfile:
- * @cno:
+ * nilfs_cpfile_delete_checkpoint - delete a checkpoint
+ * @cpfile: checkpoint file inode
+ * @cno: checkpoint number to delete
+ *
+ * Return: 0 on success, or the following negative error code on failure.
+ * * %-EBUSY - Checkpoint in use (snapshot specified).
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - No valid checkpoint found.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
{
@@ -968,21 +976,15 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
}
/**
- * nilfs_cpfile_is_snapshot -
+ * nilfs_cpfile_is_snapshot - determine if checkpoint is a snapshot
* @cpfile: inode of checkpoint file
- * @cno: checkpoint number
- *
- * Description:
- *
- * Return Value: On success, 1 is returned if the checkpoint specified by
- * @cno is a snapshot, or 0 if not. On error, one of the following negative
- * error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * @cno: checkpoint number
*
- * %-ENOENT - No such checkpoint.
+ * Return: 1 if the checkpoint specified by @cno is a snapshot, 0 if not, or
+ * the following negative error code on failure.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - No such checkpoint.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
{
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index fc1caf63a42a..0bef662176a4 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -271,18 +271,15 @@ void nilfs_dat_abort_update(struct inode *dat,
}
/**
- * nilfs_dat_mark_dirty -
- * @dat: DAT file inode
+ * nilfs_dat_mark_dirty - mark the DAT block buffer containing the specified
+ * virtual block address entry as dirty
+ * @dat: DAT file inode
* @vblocknr: virtual block number
*
- * Description:
- *
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or the following negative error code on failure.
+ * * %-EINVAL - Invalid DAT entry (internal code).
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
{
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 4a29b0138d75..fe5b1a30c509 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -83,7 +83,7 @@ static int nilfs_prepare_chunk(struct folio *folio, unsigned int from,
{
loff_t pos = folio_pos(folio) + from;
- return __block_write_begin(&folio->page, pos, to - from, nilfs_get_block);
+ return __block_write_begin(folio, pos, to - from, nilfs_get_block);
}
static void nilfs_commit_chunk(struct folio *folio,
@@ -96,7 +96,7 @@ static void nilfs_commit_chunk(struct folio *folio,
int err;
nr_dirty = nilfs_page_count_clean_buffers(&folio->page, from, to);
- copied = block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL);
+ copied = block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos + copied > dir->i_size)
i_size_write(dir, pos + copied);
if (IS_DIRSYNC(dir))
@@ -231,37 +231,6 @@ static struct nilfs_dir_entry *nilfs_next_entry(struct nilfs_dir_entry *p)
nilfs_rec_len_from_disk(p->rec_len));
}
-static unsigned char
-nilfs_filetype_table[NILFS_FT_MAX] = {
- [NILFS_FT_UNKNOWN] = DT_UNKNOWN,
- [NILFS_FT_REG_FILE] = DT_REG,
- [NILFS_FT_DIR] = DT_DIR,
- [NILFS_FT_CHRDEV] = DT_CHR,
- [NILFS_FT_BLKDEV] = DT_BLK,
- [NILFS_FT_FIFO] = DT_FIFO,
- [NILFS_FT_SOCK] = DT_SOCK,
- [NILFS_FT_SYMLINK] = DT_LNK,
-};
-
-#define S_SHIFT 12
-static unsigned char
-nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
- [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
- [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
- [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,
- [S_IFBLK >> S_SHIFT] = NILFS_FT_BLKDEV,
- [S_IFIFO >> S_SHIFT] = NILFS_FT_FIFO,
- [S_IFSOCK >> S_SHIFT] = NILFS_FT_SOCK,
- [S_IFLNK >> S_SHIFT] = NILFS_FT_SYMLINK,
-};
-
-static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode)
-{
- umode_t mode = inode->i_mode;
-
- de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
-}
-
static int nilfs_readdir(struct file *file, struct dir_context *ctx)
{
loff_t pos = ctx->pos;
@@ -297,10 +266,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
if (de->inode) {
unsigned char t;
- if (de->file_type < NILFS_FT_MAX)
- t = nilfs_filetype_table[de->file_type];
- else
- t = DT_UNKNOWN;
+ t = fs_ftype_to_dtype(de->file_type);
if (!dir_emit(ctx, de->name, de->name_len,
le64_to_cpu(de->inode), t)) {
@@ -444,7 +410,7 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
err = nilfs_prepare_chunk(folio, from, to);
BUG_ON(err);
de->inode = cpu_to_le64(inode->i_ino);
- nilfs_set_de_type(de, inode);
+ de->file_type = fs_umode_to_ftype(inode->i_mode);
nilfs_commit_chunk(folio, mapping, from, to);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
}
@@ -531,7 +497,7 @@ got_it:
de->name_len = namelen;
memcpy(de->name, name, namelen);
de->inode = cpu_to_le64(inode->i_ino);
- nilfs_set_de_type(de, inode);
+ de->file_type = fs_umode_to_ftype(inode->i_mode);
nilfs_commit_chunk(folio, folio->mapping, from, to);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
nilfs_mark_inode_dirty(dir);
@@ -612,14 +578,14 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
de->rec_len = nilfs_rec_len_to_disk(NILFS_DIR_REC_LEN(1));
memcpy(de->name, ".\0\0", 4);
de->inode = cpu_to_le64(inode->i_ino);
- nilfs_set_de_type(de, inode);
+ de->file_type = fs_umode_to_ftype(inode->i_mode);
de = (struct nilfs_dir_entry *)(kaddr + NILFS_DIR_REC_LEN(1));
de->name_len = 2;
de->rec_len = nilfs_rec_len_to_disk(chunk_size - NILFS_DIR_REC_LEN(1));
de->inode = cpu_to_le64(parent->i_ino);
memcpy(de->name, "..\0", 4);
- nilfs_set_de_type(de, inode);
+ de->file_type = fs_umode_to_ftype(inode->i_mode);
kunmap_local(kaddr);
nilfs_commit_chunk(folio, mapping, 0, chunk_size);
fail:
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 7340a01d80e1..be6acf6e2bfc 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -15,6 +15,7 @@
#include <linux/writeback.h>
#include <linux/uio.h>
#include <linux/fiemap.h>
+#include <linux/random.h>
#include "nilfs.h"
#include "btnode.h"
#include "segment.h"
@@ -28,17 +29,13 @@
* @ino: inode number
* @cno: checkpoint number
* @root: pointer on NILFS root object (mounted checkpoint)
- * @for_gc: inode for GC flag
- * @for_btnc: inode for B-tree node cache flag
- * @for_shadow: inode for shadowed page cache flag
+ * @type: inode type
*/
struct nilfs_iget_args {
u64 ino;
__u64 cno;
struct nilfs_root *root;
- bool for_gc;
- bool for_btnc;
- bool for_shadow;
+ unsigned int type;
};
static int nilfs_iget_test(struct inode *inode, void *opaque);
@@ -162,7 +159,7 @@ static int nilfs_writepages(struct address_space *mapping,
int err = 0;
if (sb_rdonly(inode->i_sb)) {
- nilfs_clear_dirty_pages(mapping, false);
+ nilfs_clear_dirty_pages(mapping);
return -EROFS;
}
@@ -186,7 +183,7 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
* have dirty pages that try to be flushed in background.
* So, here we simply discard this dirty page.
*/
- nilfs_clear_folio_dirty(folio, false);
+ nilfs_clear_folio_dirty(folio);
folio_unlock(folio);
return -EROFS;
}
@@ -250,7 +247,7 @@ void nilfs_write_failed(struct address_space *mapping, loff_t to)
static int nilfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
struct inode *inode = mapping->host;
@@ -259,7 +256,7 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping,
if (unlikely(err))
return err;
- err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block);
+ err = block_write_begin(mapping, pos, len, foliop, nilfs_get_block);
if (unlikely(err)) {
nilfs_write_failed(mapping, pos + len);
nilfs_transaction_abort(inode->i_sb);
@@ -269,16 +266,16 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping,
static int nilfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
unsigned int start = pos & (PAGE_SIZE - 1);
unsigned int nr_dirty;
int err;
- nr_dirty = nilfs_page_count_clean_buffers(page, start,
+ nr_dirty = nilfs_page_count_clean_buffers(&folio->page, start,
start + copied);
- copied = generic_write_end(file, mapping, pos, len, copied, page,
+ copied = generic_write_end(file, mapping, pos, len, copied, folio,
fsdata);
nilfs_set_file_dirty(inode, nr_dirty);
err = nilfs_transaction_commit(inode->i_sb);
@@ -315,8 +312,7 @@ static int nilfs_insert_inode_locked(struct inode *inode,
unsigned long ino)
{
struct nilfs_iget_args args = {
- .ino = ino, .root = root, .cno = 0, .for_gc = false,
- .for_btnc = false, .for_shadow = false
+ .ino = ino, .root = root, .cno = 0, .type = NILFS_I_TYPE_NORMAL
};
return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
@@ -325,7 +321,6 @@ static int nilfs_insert_inode_locked(struct inode *inode,
struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
{
struct super_block *sb = dir->i_sb;
- struct the_nilfs *nilfs = sb->s_fs_info;
struct inode *inode;
struct nilfs_inode_info *ii;
struct nilfs_root *root;
@@ -343,25 +338,13 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
root = NILFS_I(dir)->i_root;
ii = NILFS_I(inode);
ii->i_state = BIT(NILFS_I_NEW);
+ ii->i_type = NILFS_I_TYPE_NORMAL;
ii->i_root = root;
err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
if (unlikely(err))
goto failed_ifile_create_inode;
/* reference count of i_bh inherits from nilfs_mdt_read_block() */
-
- if (unlikely(ino < NILFS_USER_INO)) {
- nilfs_warn(sb,
- "inode bitmap is inconsistent for reserved inodes");
- do {
- brelse(bh);
- err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
- if (unlikely(err))
- goto failed_ifile_create_inode;
- } while (ino < NILFS_USER_INO);
-
- nilfs_info(sb, "repaired inode bitmap for reserved inodes");
- }
ii->i_bh = bh;
atomic64_inc(&root->inodes_count);
@@ -385,9 +368,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
/* ii->i_dir_acl = 0; */
ii->i_dir_start_lookup = 0;
nilfs_set_inode_flags(inode);
- spin_lock(&nilfs->ns_next_gen_lock);
- inode->i_generation = nilfs->ns_next_generation++;
- spin_unlock(&nilfs->ns_next_gen_lock);
+ inode->i_generation = get_random_u32();
if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
err = -EIO;
goto failed_after_creation;
@@ -546,23 +527,10 @@ static int nilfs_iget_test(struct inode *inode, void *opaque)
return 0;
ii = NILFS_I(inode);
- if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
- if (!args->for_btnc)
- return 0;
- } else if (args->for_btnc) {
+ if (ii->i_type != args->type)
return 0;
- }
- if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
- if (!args->for_shadow)
- return 0;
- } else if (args->for_shadow) {
- return 0;
- }
- if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
- return !args->for_gc;
-
- return args->for_gc && args->cno == ii->i_cno;
+ return !(args->type & NILFS_I_TYPE_GC) || args->cno == ii->i_cno;
}
static int nilfs_iget_set(struct inode *inode, void *opaque)
@@ -572,15 +540,9 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
inode->i_ino = args->ino;
NILFS_I(inode)->i_cno = args->cno;
NILFS_I(inode)->i_root = args->root;
+ NILFS_I(inode)->i_type = args->type;
if (args->root && args->ino == NILFS_ROOT_INO)
nilfs_get_root(args->root);
-
- if (args->for_gc)
- NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
- if (args->for_btnc)
- NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
- if (args->for_shadow)
- NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
return 0;
}
@@ -588,8 +550,7 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
unsigned long ino)
{
struct nilfs_iget_args args = {
- .ino = ino, .root = root, .cno = 0, .for_gc = false,
- .for_btnc = false, .for_shadow = false
+ .ino = ino, .root = root, .cno = 0, .type = NILFS_I_TYPE_NORMAL
};
return ilookup5(sb, ino, nilfs_iget_test, &args);
@@ -599,8 +560,7 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
unsigned long ino)
{
struct nilfs_iget_args args = {
- .ino = ino, .root = root, .cno = 0, .for_gc = false,
- .for_btnc = false, .for_shadow = false
+ .ino = ino, .root = root, .cno = 0, .type = NILFS_I_TYPE_NORMAL
};
return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
@@ -631,8 +591,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
__u64 cno)
{
struct nilfs_iget_args args = {
- .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
- .for_btnc = false, .for_shadow = false
+ .ino = ino, .root = NULL, .cno = cno, .type = NILFS_I_TYPE_GC
};
struct inode *inode;
int err;
@@ -677,9 +636,7 @@ int nilfs_attach_btree_node_cache(struct inode *inode)
args.ino = inode->i_ino;
args.root = ii->i_root;
args.cno = ii->i_cno;
- args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
- args.for_btnc = true;
- args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
+ args.type = ii->i_type | NILFS_I_TYPE_BTNC;
btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
nilfs_iget_set, &args);
@@ -733,8 +690,8 @@ void nilfs_detach_btree_node_cache(struct inode *inode)
struct inode *nilfs_iget_for_shadow(struct inode *inode)
{
struct nilfs_iget_args args = {
- .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
- .for_btnc = false, .for_shadow = true
+ .ino = inode->i_ino, .root = NULL, .cno = 0,
+ .type = NILFS_I_TYPE_SHADOW
};
struct inode *s_inode;
int err;
@@ -900,7 +857,7 @@ static void nilfs_clear_inode(struct inode *inode)
if (test_bit(NILFS_I_BMAP, &ii->i_state))
nilfs_bmap_clear(ii->i_bmap);
- if (!test_bit(NILFS_I_BTNC, &ii->i_state))
+ if (!(ii->i_type & NILFS_I_TYPE_BTNC))
nilfs_detach_btree_node_cache(inode);
if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 8be471ce4f19..fa77f78df681 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -17,6 +17,7 @@
#include <linux/mount.h> /* mnt_want_write_file(), mnt_drop_write_file() */
#include <linux/buffer_head.h>
#include <linux/fileattr.h>
+#include <linux/string.h>
#include "nilfs.h"
#include "segment.h"
#include "bmap.h"
@@ -114,7 +115,11 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
}
/**
- * nilfs_fileattr_get - ioctl to support lsattr
+ * nilfs_fileattr_get - retrieve miscellaneous file attributes
+ * @dentry: the object to retrieve from
+ * @fa: fileattr pointer
+ *
+ * Return: always 0 as success.
*/
int nilfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
{
@@ -126,7 +131,12 @@ int nilfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
}
/**
- * nilfs_fileattr_set - ioctl to support chattr
+ * nilfs_fileattr_set - change miscellaneous file attributes
+ * @idmap: idmap of the mount
+ * @dentry: the object to change
+ * @fa: fileattr pointer
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_fileattr_set(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa)
@@ -159,6 +169,10 @@ int nilfs_fileattr_set(struct mnt_idmap *idmap,
/**
* nilfs_ioctl_getversion - get info about a file's version (generation number)
+ * @inode: inode object
+ * @argp: userspace memory where the generation number of @inode is stored
+ *
+ * Return: 0 on success, or %-EFAULT on error.
*/
static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp)
{
@@ -1266,6 +1280,91 @@ out:
return ret;
}
+/**
+ * nilfs_ioctl_get_fslabel - get the volume name of the file system
+ * @sb: super block instance
+ * @argp: pointer to userspace memory where the volume name should be stored
+ *
+ * Return: 0 on success, %-EFAULT if copying to userspace memory fails.
+ */
+static int nilfs_ioctl_get_fslabel(struct super_block *sb, void __user *argp)
+{
+ struct the_nilfs *nilfs = sb->s_fs_info;
+ char label[NILFS_MAX_VOLUME_NAME + 1];
+
+ BUILD_BUG_ON(NILFS_MAX_VOLUME_NAME >= FSLABEL_MAX);
+
+ down_read(&nilfs->ns_sem);
+ memtostr_pad(label, nilfs->ns_sbp[0]->s_volume_name);
+ up_read(&nilfs->ns_sem);
+
+ if (copy_to_user(argp, label, sizeof(label)))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * nilfs_ioctl_set_fslabel - set the volume name of the file system
+ * @sb: super block instance
+ * @filp: file object
+ * @argp: pointer to userspace memory that contains the volume name
+ *
+ * Return: 0 on success, or the following negative error code on failure.
+ * * %-EFAULT - Error copying input data.
+ * * %-EINVAL - Label length exceeds record size in superblock.
+ * * %-EIO - I/O error.
+ * * %-EPERM - Operation not permitted (insufficient permissions).
+ * * %-EROFS - Read only file system.
+ */
+static int nilfs_ioctl_set_fslabel(struct super_block *sb, struct file *filp,
+ void __user *argp)
+{
+ char label[NILFS_MAX_VOLUME_NAME + 1];
+ struct the_nilfs *nilfs = sb->s_fs_info;
+ struct nilfs_super_block **sbp;
+ size_t len;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ if (copy_from_user(label, argp, NILFS_MAX_VOLUME_NAME + 1)) {
+ ret = -EFAULT;
+ goto out_drop_write;
+ }
+
+ len = strnlen(label, NILFS_MAX_VOLUME_NAME + 1);
+ if (len > NILFS_MAX_VOLUME_NAME) {
+ nilfs_err(sb, "unable to set label with more than %zu bytes",
+ NILFS_MAX_VOLUME_NAME);
+ ret = -EINVAL;
+ goto out_drop_write;
+ }
+
+ down_write(&nilfs->ns_sem);
+ sbp = nilfs_prepare_super(sb, false);
+ if (unlikely(!sbp)) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ strtomem_pad(sbp[0]->s_volume_name, label, 0);
+ if (sbp[1])
+ strtomem_pad(sbp[1]->s_volume_name, label, 0);
+
+ ret = nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
+
+out_unlock:
+ up_write(&nilfs->ns_sem);
+out_drop_write:
+ mnt_drop_write_file(filp);
+ return ret;
+}
+
long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -1308,6 +1407,10 @@ long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return nilfs_ioctl_set_alloc_range(inode, argp);
case FITRIM:
return nilfs_ioctl_trim_fs(inode, argp);
+ case FS_IOC_GETFSLABEL:
+ return nilfs_ioctl_get_fslabel(inode->i_sb, argp);
+ case FS_IOC_SETFSLABEL:
+ return nilfs_ioctl_set_fslabel(inode->i_sb, filp, argp);
default:
return -ENOTTY;
}
@@ -1334,6 +1437,8 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case NILFS_IOCTL_RESIZE:
case NILFS_IOCTL_SET_ALLOC_RANGE:
case FITRIM:
+ case FS_IOC_GETFSLABEL:
+ case FS_IOC_SETFSLABEL:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 4f792a0ad0f0..ceb7dc0b5bad 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -411,7 +411,7 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
* have dirty folios that try to be flushed in background.
* So, here we simply discard this dirty folio.
*/
- nilfs_clear_folio_dirty(folio, false);
+ nilfs_clear_folio_dirty(folio);
folio_unlock(folio);
return -EROFS;
}
@@ -638,10 +638,10 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
if (mi->mi_palloc_cache)
nilfs_palloc_clear_cache(inode);
- nilfs_clear_dirty_pages(inode->i_mapping, true);
+ nilfs_clear_dirty_pages(inode->i_mapping);
nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
- nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true);
+ nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping);
nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 4017f7856440..fb1c4c5bae7c 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -22,6 +22,7 @@
/**
* struct nilfs_inode_info - nilfs inode data in memory
* @i_flags: inode flags
+ * @i_type: inode type (combination of flags that inidicate usage)
* @i_state: dynamic state flags
* @i_bmap: pointer on i_bmap_data
* @i_bmap_data: raw block mapping
@@ -37,6 +38,7 @@
*/
struct nilfs_inode_info {
__u32 i_flags;
+ unsigned int i_type;
unsigned long i_state; /* Dynamic state flags */
struct nilfs_bmap *i_bmap;
struct nilfs_bmap i_bmap_data;
@@ -90,9 +92,16 @@ enum {
NILFS_I_UPDATED, /* The file has been written back */
NILFS_I_INODE_SYNC, /* dsync is not allowed for inode */
NILFS_I_BMAP, /* has bmap and btnode_cache */
- NILFS_I_GCINODE, /* inode for GC, on memory only */
- NILFS_I_BTNC, /* inode for btree node cache */
- NILFS_I_SHADOW, /* inode for shadowed page cache */
+};
+
+/*
+ * Flags to identify the usage of on-memory inodes (i_type)
+ */
+enum {
+ NILFS_I_TYPE_NORMAL = 0,
+ NILFS_I_TYPE_GC = 0x0001, /* For data caching during GC */
+ NILFS_I_TYPE_BTNC = 0x0002, /* For btree node cache */
+ NILFS_I_TYPE_SHADOW = 0x0004, /* For shadowed page cache */
};
/*
@@ -103,6 +112,18 @@ enum {
NILFS_SB_COMMIT_ALL /* Commit both super blocks */
};
+/**
+ * define NILFS_MAX_VOLUME_NAME - maximum number of characters (bytes) in a
+ * file system volume name
+ *
+ * Defined by the size of the volume name field in the on-disk superblocks.
+ * This volume name does not include the terminating NULL byte if the string
+ * length matches the field size, so use (NILFS_MAX_VOLUME_NAME + 1) for the
+ * size of the buffer that requires a NULL byte termination.
+ */
+#define NILFS_MAX_VOLUME_NAME \
+ sizeof_field(struct nilfs_super_block, s_volume_name)
+
/*
* Macros to check inode numbers
*/
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 14e470fb8870..9c0b7cddeaae 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -262,7 +262,7 @@ repeat:
NILFS_FOLIO_BUG(folio, "inconsistent dirty state");
dfolio = filemap_grab_folio(dmap, folio->index);
- if (unlikely(IS_ERR(dfolio))) {
+ if (IS_ERR(dfolio)) {
/* No empty page is added to the page cache */
folio_unlock(folio);
err = PTR_ERR(dfolio);
@@ -357,9 +357,8 @@ repeat:
/**
* nilfs_clear_dirty_pages - discard dirty pages in address space
* @mapping: address space with dirty pages for discarding
- * @silent: suppress [true] or print [false] warning messages
*/
-void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
+void nilfs_clear_dirty_pages(struct address_space *mapping)
{
struct folio_batch fbatch;
unsigned int i;
@@ -380,7 +379,7 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
* was acquired. Skip processing in that case.
*/
if (likely(folio->mapping == mapping))
- nilfs_clear_folio_dirty(folio, silent);
+ nilfs_clear_folio_dirty(folio);
folio_unlock(folio);
}
@@ -392,20 +391,13 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
/**
* nilfs_clear_folio_dirty - discard dirty folio
* @folio: dirty folio that will be discarded
- * @silent: suppress [true] or print [false] warning messages
*/
-void nilfs_clear_folio_dirty(struct folio *folio, bool silent)
+void nilfs_clear_folio_dirty(struct folio *folio)
{
- struct inode *inode = folio->mapping->host;
- struct super_block *sb = inode->i_sb;
struct buffer_head *bh, *head;
BUG_ON(!folio_test_locked(folio));
- if (!silent)
- nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu",
- folio_pos(folio), inode->i_ino);
-
folio_clear_uptodate(folio);
folio_clear_mappedtodisk(folio);
@@ -419,11 +411,6 @@ void nilfs_clear_folio_dirty(struct folio *folio, bool silent)
bh = head;
do {
lock_buffer(bh);
- if (!silent)
- nilfs_warn(sb,
- "discard dirty block: blocknr=%llu, size=%zu",
- (u64)bh->b_blocknr, bh->b_size);
-
set_mask_bits(&bh->b_state, clear_bits, 0);
unlock_buffer(bh);
} while (bh = bh->b_this_page, bh != head);
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 7e1a2c455a10..64521a03a19e 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -41,8 +41,8 @@ void nilfs_folio_bug(struct folio *);
int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *);
-void nilfs_clear_folio_dirty(struct folio *, bool);
-void nilfs_clear_dirty_pages(struct address_space *, bool);
+void nilfs_clear_folio_dirty(struct folio *folio);
+void nilfs_clear_dirty_pages(struct address_space *mapping);
unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int,
unsigned int);
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 61e25a980f73..21d81097a89f 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -433,8 +433,17 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
* The next segment is invalidated by this recovery.
*/
err = nilfs_sufile_free(sufile, segnum[1]);
- if (unlikely(err))
+ if (unlikely(err)) {
+ if (err == -ENOENT) {
+ nilfs_err(sb,
+ "checkpoint log inconsistency at block %llu (segment %llu): next segment %llu is unallocated",
+ (unsigned long long)nilfs->ns_last_pseg,
+ (unsigned long long)nilfs->ns_segnum,
+ (unsigned long long)segnum[1]);
+ err = -EINVAL;
+ }
goto failed;
+ }
for (i = 1; i < 4; i++) {
err = nilfs_segment_list_add(head, segnum[i]);
@@ -498,7 +507,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
struct inode *inode;
struct nilfs_recovery_block *rb, *n;
unsigned int blocksize = nilfs->ns_blocksize;
- struct page *page;
+ struct folio *folio;
loff_t pos;
int err = 0, err2 = 0;
@@ -512,7 +521,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
pos = rb->blkoff << inode->i_blkbits;
err = block_write_begin(inode->i_mapping, pos, blocksize,
- &page, nilfs_get_block);
+ &folio, nilfs_get_block);
if (unlikely(err)) {
loff_t isize = inode->i_size;
@@ -522,7 +531,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
goto failed_inode;
}
- err = nilfs_recovery_copy_block(nilfs, rb, pos, page);
+ err = nilfs_recovery_copy_block(nilfs, rb, pos, &folio->page);
if (unlikely(err))
goto failed_page;
@@ -531,17 +540,17 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
goto failed_page;
block_write_end(NULL, inode->i_mapping, pos, blocksize,
- blocksize, page, NULL);
+ blocksize, folio, NULL);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
(*nr_salvaged_blocks)++;
goto next;
failed_page:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
failed_inode:
nilfs_warn(sb,
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 871ec35ea8e8..587251830897 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -519,7 +519,7 @@ static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
ii = NILFS_I(inode);
- if (test_bit(NILFS_I_GCINODE, &ii->i_state))
+ if (ii->i_type & NILFS_I_TYPE_GC)
cno = ii->i_cno;
else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
cno = 0;
@@ -1102,12 +1102,64 @@ static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
return err;
}
+/**
+ * nilfs_free_segments - free the segments given by an array of segment numbers
+ * @nilfs: nilfs object
+ * @segnumv: array of segment numbers to be freed
+ * @nsegs: number of segments to be freed in @segnumv
+ *
+ * nilfs_free_segments() wraps nilfs_sufile_freev() and
+ * nilfs_sufile_cancel_freev(), and edits the segment usage metadata file
+ * (sufile) to free all segments given by @segnumv and @nsegs at once. If
+ * it fails midway, it cancels the changes so that none of the segments are
+ * freed. If @nsegs is 0, this function does nothing.
+ *
+ * The freeing of segments is not finalized until the writing of a log with
+ * a super root block containing this sufile change is complete, and it can
+ * be canceled with nilfs_sufile_cancel_freev() until then.
+ *
+ * Return: 0 on success, or the following negative error code on failure.
+ * * %-EINVAL - Invalid segment number.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ */
+static int nilfs_free_segments(struct the_nilfs *nilfs, __u64 *segnumv,
+ size_t nsegs)
+{
+ size_t ndone;
+ int ret;
+
+ if (!nsegs)
+ return 0;
+
+ ret = nilfs_sufile_freev(nilfs->ns_sufile, segnumv, nsegs, &ndone);
+ if (unlikely(ret)) {
+ nilfs_sufile_cancel_freev(nilfs->ns_sufile, segnumv, ndone,
+ NULL);
+ /*
+ * If a segment usage of the segments to be freed is in a
+ * hole block, nilfs_sufile_freev() will return -ENOENT.
+ * In this case, -EINVAL should be returned to the caller
+ * since there is something wrong with the given segment
+ * number array. This error can only occur during GC, so
+ * there is no need to worry about it propagating to other
+ * callers (such as fsync).
+ */
+ if (ret == -ENOENT) {
+ nilfs_err(nilfs->ns_sb,
+ "The segment usage entry %llu to be freed is invalid (in a hole)",
+ (unsigned long long)segnumv[ndone]);
+ ret = -EINVAL;
+ }
+ }
+ return ret;
+}
+
static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
{
struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
struct list_head *head;
struct nilfs_inode_info *ii;
- size_t ndone;
int err = 0;
switch (nilfs_sc_cstage_get(sci)) {
@@ -1201,14 +1253,10 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
nilfs_sc_cstage_inc(sci);
fallthrough;
case NILFS_ST_SUFILE:
- err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
- sci->sc_nfreesegs, &ndone);
- if (unlikely(err)) {
- nilfs_sufile_cancel_freev(nilfs->ns_sufile,
- sci->sc_freesegs, ndone,
- NULL);
+ err = nilfs_free_segments(nilfs, sci->sc_freesegs,
+ sci->sc_nfreesegs);
+ if (unlikely(err))
break;
- }
sci->sc_stage.flags |= NILFS_CF_SUFREED;
err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
@@ -2456,7 +2504,7 @@ static void nilfs_construction_timeout(struct timer_list *t)
{
struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
- wake_up_process(sci->sc_timer_task);
+ wake_up_process(sci->sc_task);
}
static void
@@ -2582,123 +2630,85 @@ static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
}
/**
- * nilfs_segctor_thread - main loop of the segment constructor thread.
+ * nilfs_log_write_required - determine whether log writing is required
+ * @sci: nilfs_sc_info struct
+ * @modep: location for storing log writing mode
+ *
+ * Return: true if log writing is required, false otherwise. If log writing
+ * is required, the mode is stored in the location pointed to by @modep.
+ */
+static bool nilfs_log_write_required(struct nilfs_sc_info *sci, int *modep)
+{
+ bool timedout, ret = true;
+
+ spin_lock(&sci->sc_state_lock);
+ timedout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
+ time_after_eq(jiffies, sci->sc_timer.expires));
+ if (timedout || sci->sc_seq_request != sci->sc_seq_done)
+ *modep = SC_LSEG_SR;
+ else if (sci->sc_flush_request)
+ *modep = nilfs_segctor_flush_mode(sci);
+ else
+ ret = false;
+
+ spin_unlock(&sci->sc_state_lock);
+ return ret;
+}
+
+/**
+ * nilfs_segctor_thread - main loop of the log writer thread
* @arg: pointer to a struct nilfs_sc_info.
*
- * nilfs_segctor_thread() initializes a timer and serves as a daemon
- * to execute segment constructions.
+ * nilfs_segctor_thread() is the main loop function of the log writer kernel
+ * thread, which determines whether log writing is necessary, and if so,
+ * performs the log write in the background, or waits if not. It is also
+ * used to decide the background writeback of the superblock.
+ *
+ * Return: Always 0.
*/
static int nilfs_segctor_thread(void *arg)
{
struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
- int timeout = 0;
-
- sci->sc_timer_task = current;
- timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
- /* start sync. */
- sci->sc_task = current;
- wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
nilfs_info(sci->sc_super,
"segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
set_freezable();
- spin_lock(&sci->sc_state_lock);
- loop:
- for (;;) {
- int mode;
-
- if (sci->sc_state & NILFS_SEGCTOR_QUIT)
- goto end_thread;
-
- if (timeout || sci->sc_seq_request != sci->sc_seq_done)
- mode = SC_LSEG_SR;
- else if (sci->sc_flush_request)
- mode = nilfs_segctor_flush_mode(sci);
- else
- break;
-
- spin_unlock(&sci->sc_state_lock);
- nilfs_segctor_thread_construct(sci, mode);
- spin_lock(&sci->sc_state_lock);
- timeout = 0;
- }
-
- if (freezing(current)) {
- spin_unlock(&sci->sc_state_lock);
- try_to_freeze();
- spin_lock(&sci->sc_state_lock);
- } else {
+ while (!kthread_should_stop()) {
DEFINE_WAIT(wait);
- int should_sleep = 1;
+ bool should_write;
+ int mode;
+
+ if (freezing(current)) {
+ try_to_freeze();
+ continue;
+ }
prepare_to_wait(&sci->sc_wait_daemon, &wait,
TASK_INTERRUPTIBLE);
-
- if (sci->sc_seq_request != sci->sc_seq_done)
- should_sleep = 0;
- else if (sci->sc_flush_request)
- should_sleep = 0;
- else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
- should_sleep = time_before(jiffies,
- sci->sc_timer.expires);
-
- if (should_sleep) {
- spin_unlock(&sci->sc_state_lock);
+ should_write = nilfs_log_write_required(sci, &mode);
+ if (!should_write)
schedule();
- spin_lock(&sci->sc_state_lock);
- }
finish_wait(&sci->sc_wait_daemon, &wait);
- timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
- time_after_eq(jiffies, sci->sc_timer.expires));
if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
set_nilfs_discontinued(nilfs);
+
+ if (should_write)
+ nilfs_segctor_thread_construct(sci, mode);
}
- goto loop;
- end_thread:
/* end sync. */
+ spin_lock(&sci->sc_state_lock);
sci->sc_task = NULL;
timer_shutdown_sync(&sci->sc_timer);
- wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
spin_unlock(&sci->sc_state_lock);
return 0;
}
-static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
-{
- struct task_struct *t;
-
- t = kthread_run(nilfs_segctor_thread, sci, "segctord");
- if (IS_ERR(t)) {
- int err = PTR_ERR(t);
-
- nilfs_err(sci->sc_super, "error %d creating segctord thread",
- err);
- return err;
- }
- wait_event(sci->sc_wait_task, sci->sc_task != NULL);
- return 0;
-}
-
-static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
- __acquires(&sci->sc_state_lock)
- __releases(&sci->sc_state_lock)
-{
- sci->sc_state |= NILFS_SEGCTOR_QUIT;
-
- while (sci->sc_task) {
- wake_up(&sci->sc_wait_daemon);
- spin_unlock(&sci->sc_state_lock);
- wait_event(sci->sc_wait_task, sci->sc_task == NULL);
- spin_lock(&sci->sc_state_lock);
- }
-}
-
/*
* Setup & clean-up functions
*/
@@ -2719,7 +2729,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
init_waitqueue_head(&sci->sc_wait_request);
init_waitqueue_head(&sci->sc_wait_daemon);
- init_waitqueue_head(&sci->sc_wait_task);
spin_lock_init(&sci->sc_state_lock);
INIT_LIST_HEAD(&sci->sc_dirty_files);
INIT_LIST_HEAD(&sci->sc_segbufs);
@@ -2774,8 +2783,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
up_write(&nilfs->ns_segctor_sem);
+ if (sci->sc_task) {
+ wake_up(&sci->sc_wait_daemon);
+ kthread_stop(sci->sc_task);
+ }
+
spin_lock(&sci->sc_state_lock);
- nilfs_segctor_kill_thread(sci);
flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
|| sci->sc_seq_request != sci->sc_seq_done);
spin_unlock(&sci->sc_state_lock);
@@ -2823,14 +2836,15 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
* This allocates a log writer object, initializes it, and starts the
* log writer.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: 0 on success, or the following negative error code on failure.
+ * * %-EINTR - Log writer thread creation failed due to interruption.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
{
struct the_nilfs *nilfs = sb->s_fs_info;
+ struct nilfs_sc_info *sci;
+ struct task_struct *t;
int err;
if (nilfs->ns_writer) {
@@ -2843,15 +2857,23 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
return 0;
}
- nilfs->ns_writer = nilfs_segctor_new(sb, root);
- if (!nilfs->ns_writer)
+ sci = nilfs_segctor_new(sb, root);
+ if (unlikely(!sci))
return -ENOMEM;
- err = nilfs_segctor_start_thread(nilfs->ns_writer);
- if (unlikely(err))
+ nilfs->ns_writer = sci;
+ t = kthread_create(nilfs_segctor_thread, sci, "segctord");
+ if (IS_ERR(t)) {
+ err = PTR_ERR(t);
+ nilfs_err(sb, "error %d creating segctord thread", err);
nilfs_detach_log_writer(sb);
+ return err;
+ }
+ sci->sc_task = t;
+ timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
- return err;
+ wake_up_process(sci->sc_task);
+ return 0;
}
/**
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 1060f72ebf5a..f723f47ddc4e 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -22,10 +22,10 @@ struct nilfs_root;
* struct nilfs_recovery_info - Recovery information
* @ri_need_recovery: Recovery status
* @ri_super_root: Block number of the last super root
- * @ri_ri_cno: Number of the last checkpoint
+ * @ri_cno: Number of the last checkpoint
* @ri_lsegs_start: Region for roll-forwarding (start block number)
* @ri_lsegs_end: Region for roll-forwarding (end block number)
- * @ri_lseg_start_seq: Sequence value of the segment at ri_lsegs_start
+ * @ri_lsegs_start_seq: Sequence value of the segment at ri_lsegs_start
* @ri_used_segments: List of segments to be mark active
* @ri_pseg_start: Block number of the last partial segment
* @ri_seq: Sequence number on the last partial segment
@@ -105,9 +105,8 @@ struct nilfs_segsum_pointer {
* @sc_flush_request: inode bitmap of metadata files to be flushed
* @sc_wait_request: Client request queue
* @sc_wait_daemon: Daemon wait queue
- * @sc_wait_task: Start/end wait queue to control segctord task
* @sc_seq_request: Request counter
- * @sc_seq_accept: Accepted request count
+ * @sc_seq_accepted: Accepted request count
* @sc_seq_done: Completion counter
* @sc_sync: Request of explicit sync operation
* @sc_interval: Timeout value of background construction
@@ -158,7 +157,6 @@ struct nilfs_sc_info {
wait_queue_head_t sc_wait_request;
wait_queue_head_t sc_wait_daemon;
- wait_queue_head_t sc_wait_task;
__u32 sc_seq_request;
__u32 sc_seq_accepted;
@@ -171,7 +169,6 @@ struct nilfs_sc_info {
unsigned long sc_watermark;
struct timer_list sc_timer;
- struct task_struct *sc_timer_task;
struct task_struct *sc_task;
};
@@ -192,7 +189,6 @@ enum {
};
/* sc_state */
-#define NILFS_SEGCTOR_QUIT 0x0001 /* segctord is being destroyed */
#define NILFS_SEGCTOR_COMMIT 0x0004 /* committed transaction exists */
/*
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 6748218be7c5..eea5a6a12f7b 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -79,10 +79,17 @@ nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
NILFS_MDT(sufile)->mi_entry_size;
}
-static inline int nilfs_sufile_get_header_block(struct inode *sufile,
- struct buffer_head **bhp)
+static int nilfs_sufile_get_header_block(struct inode *sufile,
+ struct buffer_head **bhp)
{
- return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
+ int err = nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
+
+ if (unlikely(err == -ENOENT)) {
+ nilfs_error(sufile->i_sb,
+ "missing header block in segment usage metadata");
+ err = -EIO;
+ }
+ return err;
}
static inline int
@@ -506,8 +513,15 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
down_write(&NILFS_MDT(sufile)->mi_sem);
ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
- if (ret)
+ if (unlikely(ret)) {
+ if (ret == -ENOENT) {
+ nilfs_error(sufile->i_sb,
+ "segment usage for segment %llu is unreadable due to a hole block",
+ (unsigned long long)segnum);
+ ret = -EIO;
+ }
goto out_sem;
+ }
kaddr = kmap_local_page(bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
@@ -840,21 +854,17 @@ out:
}
/**
- * nilfs_sufile_get_suinfo -
+ * nilfs_sufile_get_suinfo - get segment usage information
* @sufile: inode of segment usage file
* @segnum: segment number to start looking
- * @buf: array of suinfo
- * @sisz: byte size of suinfo
- * @nsi: size of suinfo array
- *
- * Description:
+ * @buf: array of suinfo
+ * @sisz: byte size of suinfo
+ * @nsi: size of suinfo array
*
- * Return Value: On success, 0 is returned and .... On error, one of the
- * following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: Count of segment usage info items stored in the output buffer on
+ * success, or the following negative error code on failure.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
unsigned int sisz, size_t nsi)
@@ -1241,9 +1251,15 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
if (err)
goto failed;
- err = nilfs_sufile_get_header_block(sufile, &header_bh);
- if (err)
+ err = nilfs_mdt_get_block(sufile, 0, 0, NULL, &header_bh);
+ if (unlikely(err)) {
+ if (err == -ENOENT) {
+ nilfs_err(sb,
+ "missing header block in segment usage metadata");
+ err = -EINVAL;
+ }
goto failed;
+ }
sui = NILFS_SUI(sufile);
kaddr = kmap_local_page(header_bh->b_page);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index e835e1f5a712..eca79cca3803 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -105,6 +105,10 @@ static void nilfs_set_error(struct super_block *sb)
/**
* __nilfs_error() - report failure condition on a filesystem
+ * @sb: super block instance
+ * @function: name of calling function
+ * @fmt: format string for message to be output
+ * @...: optional arguments to @fmt
*
* __nilfs_error() sets an ERROR_FS flag on the superblock as well as
* reporting an error message. This function should be called when
@@ -156,6 +160,7 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
return NULL;
ii->i_bh = NULL;
ii->i_state = 0;
+ ii->i_type = 0;
ii->i_cno = 0;
ii->i_assoc_inode = NULL;
ii->i_bmap = &ii->i_bmap_data;
@@ -1063,6 +1068,10 @@ nilfs_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
goto failed_nilfs;
+ super_set_uuid(sb, nilfs->ns_sbp[0]->s_uuid,
+ sizeof(nilfs->ns_sbp[0]->s_uuid));
+ super_set_sysfs_name_bdev(sb);
+
cno = nilfs_last_cno(nilfs);
err = nilfs_attach_checkpoint(sb, cno, true, &fsroot);
if (err) {
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index e44dde57ab65..ac03fd3c330c 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
-#include <linux/random.h>
#include <linux/log2.h>
#include <linux/crc32.h>
#include "nilfs.h"
@@ -69,7 +68,6 @@ struct the_nilfs *alloc_nilfs(struct super_block *sb)
INIT_LIST_HEAD(&nilfs->ns_dirty_files);
INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
spin_lock_init(&nilfs->ns_inode_lock);
- spin_lock_init(&nilfs->ns_next_gen_lock);
spin_lock_init(&nilfs->ns_last_segment_lock);
nilfs->ns_cptree = RB_ROOT;
spin_lock_init(&nilfs->ns_cptree_lock);
@@ -754,9 +752,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
nilfs->ns_blocksize_bits = sb->s_blocksize_bits;
nilfs->ns_blocksize = blocksize;
- get_random_bytes(&nilfs->ns_next_generation,
- sizeof(nilfs->ns_next_generation));
-
err = nilfs_store_disk_layout(nilfs, sbp);
if (err)
goto failed_sbh;
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 1e829ed7b0ef..4776a70f01ae 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -71,8 +71,6 @@ enum {
* @ns_dirty_files: list of dirty files
* @ns_inode_lock: lock protecting @ns_dirty_files
* @ns_gc_inodes: dummy inodes to keep live blocks
- * @ns_next_generation: next generation number for inodes
- * @ns_next_gen_lock: lock protecting @ns_next_generation
* @ns_mount_opt: mount options
* @ns_resuid: uid for reserved blocks
* @ns_resgid: gid for reserved blocks
@@ -161,10 +159,6 @@ struct the_nilfs {
/* GC inode list */
struct list_head ns_gc_inodes;
- /* Inode allocator */
- u32 ns_next_generation;
- spinlock_t ns_next_gen_lock;
-
/* Mount options */
unsigned long ns_mount_opt;
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index f3669403fabf..46440fbb8662 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -110,7 +110,7 @@ static int dnotify_handle_event(struct fsnotify_mark *inode_mark, u32 mask,
prev = &dn->dn_next;
continue;
}
- fown = &dn->dn_filp->f_owner;
+ fown = file_f_owner(dn->dn_filp);
send_sigio(fown, dn->dn_fd, POLL_MSG);
if (dn->dn_mask & FS_DN_MULTISHOT)
prev = &dn->dn_next;
@@ -316,6 +316,10 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
goto out_err;
}
+ error = file_f_owner_allocate(filp);
+ if (error)
+ goto out_err;
+
/* set up the new_fsn_mark and new_dn_mark */
new_fsn_mark = &new_dn_mark->fsn_mark;
fsnotify_init_mark(new_fsn_mark, dnotify_group);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 9ec313e9f6e1..13454e5fd3fb 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -1006,17 +1006,17 @@ static int fanotify_find_path(int dfd, const char __user *filename,
struct fd f = fdget(dfd);
ret = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
goto out;
ret = -ENOTDIR;
if ((flags & FAN_MARK_ONLYDIR) &&
- !(S_ISDIR(file_inode(f.file)->i_mode))) {
+ !(S_ISDIR(file_inode(fd_file(f))->i_mode))) {
fdput(f);
goto out;
}
- *path = f.file->f_path;
+ *path = fd_file(f)->f_path;
path_get(path);
fdput(f);
} else {
@@ -1753,14 +1753,14 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
}
f = fdget(fanotify_fd);
- if (unlikely(!f.file))
+ if (unlikely(!fd_file(f)))
return -EBADF;
/* verify that this is indeed an fanotify instance */
ret = -EINVAL;
- if (unlikely(f.file->f_op != &fanotify_fops))
+ if (unlikely(fd_file(f)->f_op != &fanotify_fops))
goto fput_and_out;
- group = f.file->private_data;
+ group = fd_file(f)->private_data;
/*
* An unprivileged user is not allowed to setup mount nor filesystem
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 4ffc30606e0b..c7e451d5bd51 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -753,7 +753,7 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
return -EINVAL;
f = fdget(fd);
- if (unlikely(!f.file))
+ if (unlikely(!fd_file(f)))
return -EBADF;
/* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
@@ -763,7 +763,7 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
}
/* verify that this is indeed an inotify instance */
- if (unlikely(f.file->f_op != &inotify_fops)) {
+ if (unlikely(fd_file(f)->f_op != &inotify_fops)) {
ret = -EINVAL;
goto fput_and_out;
}
@@ -780,7 +780,7 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
/* inode held in place by reference to path; group by fget on fd */
inode = path.dentry->d_inode;
- group = f.file->private_data;
+ group = fd_file(f)->private_data;
/* create/update an inode mark */
ret = inotify_update_watch(group, inode, mask);
@@ -798,14 +798,14 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
int ret = -EINVAL;
f = fdget(fd);
- if (unlikely(!f.file))
+ if (unlikely(!fd_file(f)))
return -EBADF;
/* verify that this is indeed an inotify instance */
- if (unlikely(f.file->f_op != &inotify_fops))
+ if (unlikely(fd_file(f)->f_op != &inotify_fops))
goto out;
- group = f.file->private_data;
+ group = fd_file(f)->private_data;
i_mark = inotify_idr_find(group, wd);
if (unlikely(!i_mark))
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 97c37a9631e5..67ee176b8824 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -12,6 +12,7 @@
#include <linux/user_namespace.h>
#include <linux/nsfs.h>
#include <linux/uaccess.h>
+#include <linux/mnt_namespace.h>
#include "mount.h"
#include "internal.h"
@@ -128,6 +129,30 @@ int open_related_ns(struct ns_common *ns,
}
EXPORT_SYMBOL_GPL(open_related_ns);
+static int copy_ns_info_to_user(const struct mnt_namespace *mnt_ns,
+ struct mnt_ns_info __user *uinfo, size_t usize,
+ struct mnt_ns_info *kinfo)
+{
+ /*
+ * If userspace and the kernel have the same struct size it can just
+ * be copied. If userspace provides an older struct, only the bits that
+ * userspace knows about will be copied. If userspace provides a new
+ * struct, only the bits that the kernel knows aobut will be copied and
+ * the size value will be set to the size the kernel knows about.
+ */
+ kinfo->size = min(usize, sizeof(*kinfo));
+ kinfo->mnt_ns_id = mnt_ns->seq;
+ kinfo->nr_mounts = READ_ONCE(mnt_ns->nr_mounts);
+ /* Subtract the root mount of the mount namespace. */
+ if (kinfo->nr_mounts)
+ kinfo->nr_mounts--;
+
+ if (copy_to_user(uinfo, kinfo, kinfo->size))
+ return -EFAULT;
+
+ return 0;
+}
+
static long ns_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
@@ -135,6 +160,8 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
struct pid_namespace *pid_ns;
struct task_struct *tsk;
struct ns_common *ns = get_proc_ns(file_inode(filp));
+ struct mnt_namespace *mnt_ns;
+ bool previous = false;
uid_t __user *argp;
uid_t uid;
int ret;
@@ -156,7 +183,6 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
uid = from_kuid_munged(current_user_ns(), user_ns->owner);
return put_user(uid, argp);
case NS_GET_MNTNS_ID: {
- struct mnt_namespace *mnt_ns;
__u64 __user *idp;
__u64 id;
@@ -211,7 +237,79 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
if (!ret)
ret = -ESRCH;
- break;
+ return ret;
+ }
+ }
+
+ /* extensible ioctls */
+ switch (_IOC_NR(ioctl)) {
+ case _IOC_NR(NS_MNT_GET_INFO): {
+ struct mnt_ns_info kinfo = {};
+ struct mnt_ns_info __user *uinfo = (struct mnt_ns_info __user *)arg;
+ size_t usize = _IOC_SIZE(ioctl);
+
+ if (ns->ops->type != CLONE_NEWNS)
+ return -EINVAL;
+
+ if (!uinfo)
+ return -EINVAL;
+
+ if (usize < MNT_NS_INFO_SIZE_VER0)
+ return -EINVAL;
+
+ return copy_ns_info_to_user(to_mnt_ns(ns), uinfo, usize, &kinfo);
+ }
+ case _IOC_NR(NS_MNT_GET_PREV):
+ previous = true;
+ fallthrough;
+ case _IOC_NR(NS_MNT_GET_NEXT): {
+ struct mnt_ns_info kinfo = {};
+ struct mnt_ns_info __user *uinfo = (struct mnt_ns_info __user *)arg;
+ struct path path __free(path_put) = {};
+ struct file *f __free(fput) = NULL;
+ size_t usize = _IOC_SIZE(ioctl);
+
+ if (ns->ops->type != CLONE_NEWNS)
+ return -EINVAL;
+
+ if (usize < MNT_NS_INFO_SIZE_VER0)
+ return -EINVAL;
+
+ if (previous)
+ mnt_ns = lookup_prev_mnt_ns(to_mnt_ns(ns));
+ else
+ mnt_ns = lookup_next_mnt_ns(to_mnt_ns(ns));
+ if (IS_ERR(mnt_ns))
+ return PTR_ERR(mnt_ns);
+
+ ns = to_ns_common(mnt_ns);
+ /* Transfer ownership of @mnt_ns reference to @path. */
+ ret = path_from_stashed(&ns->stashed, nsfs_mnt, ns, &path);
+ if (ret)
+ return ret;
+
+ CLASS(get_unused_fd, fd)(O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ f = dentry_open(&path, O_RDONLY, current_cred());
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ if (uinfo) {
+ /*
+ * If @uinfo is passed return all information about the
+ * mount namespace as well.
+ */
+ ret = copy_ns_info_to_user(to_mnt_ns(ns), uinfo, usize, &kinfo);
+ if (ret)
+ return ret;
+ }
+
+ /* Transfer reference of @f to caller's fdtable. */
+ fd_install(fd, no_free_ptr(f));
+ /* File descriptor is live so hand it off to the caller. */
+ return take_fd(fd);
}
default:
ret = -ENOTTY;
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index ca1ddc46bd86..6202895a4542 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -182,7 +182,7 @@ static int ntfs_extend_initialized_size(struct file *file,
for (;;) {
u32 zerofrom, len;
- struct page *page;
+ struct folio *folio;
u8 bits;
CLST vcn, lcn, clen;
@@ -208,14 +208,13 @@ static int ntfs_extend_initialized_size(struct file *file,
if (pos + len > new_valid)
len = new_valid - pos;
- err = ntfs_write_begin(file, mapping, pos, len, &page, NULL);
+ err = ntfs_write_begin(file, mapping, pos, len, &folio, NULL);
if (err)
goto out;
- zero_user_segment(page, zerofrom, PAGE_SIZE);
+ folio_zero_range(folio, zerofrom, folio_size(folio));
- /* This function in any case puts page. */
- err = ntfs_write_end(file, mapping, pos, len, len, page, NULL);
+ err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL);
if (err < 0)
goto out;
pos += len;
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index 6b0bdc474e76..f672072e6bd4 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -901,7 +901,7 @@ static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
}
int ntfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, u32 len, struct page **pagep, void **fsdata)
+ loff_t pos, u32 len, struct folio **foliop, void **fsdata)
{
int err;
struct inode *inode = mapping->host;
@@ -910,7 +910,6 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
return -EIO;
- *pagep = NULL;
if (is_resident(ni)) {
struct folio *folio = __filemap_get_folio(
mapping, pos >> PAGE_SHIFT, FGP_WRITEBEGIN,
@@ -926,7 +925,7 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
ni_unlock(ni);
if (!err) {
- *pagep = &folio->page;
+ *foliop = folio;
goto out;
}
folio_unlock(folio);
@@ -936,7 +935,7 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
goto out;
}
- err = block_write_begin(mapping, pos, len, pagep,
+ err = block_write_begin(mapping, pos, len, foliop,
ntfs_get_block_write_begin);
out:
@@ -947,9 +946,8 @@ out:
* ntfs_write_end - Address_space_operations::write_end.
*/
int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
- u32 len, u32 copied, struct page *page, void *fsdata)
+ u32 len, u32 copied, struct folio *folio, void *fsdata)
{
- struct folio *folio = page_folio(page);
struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
u64 valid = ni->i_valid;
@@ -979,7 +977,7 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
folio_unlock(folio);
folio_put(folio);
} else {
- err = generic_write_end(file, mapping, pos, len, copied, page,
+ err = generic_write_end(file, mapping, pos, len, copied, folio,
fsdata);
}
@@ -1008,45 +1006,6 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
return err;
}
-int reset_log_file(struct inode *inode)
-{
- int err;
- loff_t pos = 0;
- u32 log_size = inode->i_size;
- struct address_space *mapping = inode->i_mapping;
-
- for (;;) {
- u32 len;
- void *kaddr;
- struct page *page;
-
- len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
-
- err = block_write_begin(mapping, pos, len, &page,
- ntfs_get_block_write_begin);
- if (err)
- goto out;
-
- kaddr = kmap_atomic(page);
- memset(kaddr, -1, len);
- kunmap_atomic(kaddr);
- flush_dcache_page(page);
-
- err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
- if (err < 0)
- goto out;
- pos += len;
-
- if (pos >= log_size)
- break;
- balance_dirty_pages_ratelimited(mapping);
- }
-out:
- mark_inode_dirty_sync(inode);
-
- return err;
-}
-
int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
{
return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index e5255a251929..584f814715f4 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -708,13 +708,12 @@ int indx_update_dup(struct ntfs_inode *ni, struct ntfs_sb_info *sbi,
struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
const struct cpu_str *name);
int ntfs_set_size(struct inode *inode, u64 new_size);
-int reset_log_file(struct inode *inode);
int ntfs_get_block(struct inode *inode, sector_t vbn,
struct buffer_head *bh_result, int create);
int ntfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, u32 len, struct page **pagep, void **fsdata);
+ loff_t pos, u32 len, struct folio **foliop, void **fsdata);
int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
- u32 len, u32 copied, struct page *page, void *fsdata);
+ u32 len, u32 copied, struct folio *folio, void *fsdata);
int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
int ntfs_sync_inode(struct inode *inode);
int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 6be175a1ab3c..1fea43c33b6b 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1187,7 +1187,7 @@ static int ocfs2_write_cluster(struct address_space *mapping,
/* This is the direct io target page. */
if (wc->w_pages[i] == NULL) {
- p_blkno++;
+ p_blkno += (1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits));
continue;
}
@@ -1643,7 +1643,7 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, ocfs2_write_type_t type,
- struct page **pagep, void **fsdata,
+ struct folio **foliop, void **fsdata,
struct buffer_head *di_bh, struct page *mmap_page)
{
int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
@@ -1826,8 +1826,8 @@ try_again:
ocfs2_free_alloc_context(meta_ac);
success:
- if (pagep)
- *pagep = wc->w_target_page;
+ if (foliop)
+ *foliop = page_folio(wc->w_target_page);
*fsdata = wc;
return 0;
out_quota:
@@ -1879,7 +1879,7 @@ out:
static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
int ret;
struct buffer_head *di_bh = NULL;
@@ -1901,7 +1901,7 @@ static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
down_write(&OCFS2_I(inode)->ip_alloc_sem);
ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER,
- pagep, fsdata, di_bh, NULL);
+ foliop, fsdata, di_bh, NULL);
if (ret) {
mlog_errno(ret);
goto out_fail;
@@ -2076,7 +2076,7 @@ out:
static int ocfs2_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
int ret;
struct inode *inode = mapping->host;
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 3a520117fa59..45db1781ea73 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -38,7 +38,7 @@ typedef enum {
int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, ocfs2_write_type_t type,
- struct page **pagep, void **fsdata,
+ struct folio **foliop, void **fsdata,
struct buffer_head *di_bh, struct page *mmap_page);
int ocfs2_read_inline_data(struct inode *inode, struct page *page,
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index cdb9b9bdea1f..8f714406528d 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -235,7 +235,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
if (bhs[i] == NULL) {
bhs[i] = sb_getblk(sb, block++);
if (bhs[i] == NULL) {
- ocfs2_metadata_cache_io_unlock(ci);
status = -ENOMEM;
mlog_errno(status);
/* Don't forget to put previous bh! */
@@ -389,7 +388,8 @@ read_failure:
/* Always set the buffer in the cache, even if it was
* a forced read, or read-ahead which hasn't yet
* completed. */
- ocfs2_set_buffer_uptodate(ci, bh);
+ if (bh)
+ ocfs2_set_buffer_uptodate(ci, bh);
}
ocfs2_metadata_cache_io_unlock(ci);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 1bde1281d514..4b9f45d7049e 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1785,17 +1785,17 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
goto out;
f = fdget(fd);
- if (f.file == NULL)
+ if (fd_file(f) == NULL)
goto out;
if (reg->hr_blocks == 0 || reg->hr_start_block == 0 ||
reg->hr_block_bytes == 0)
goto out2;
- if (!S_ISBLK(f.file->f_mapping->host->i_mode))
+ if (!S_ISBLK(fd_file(f)->f_mapping->host->i_mode))
goto out2;
- reg->hr_bdev_file = bdev_file_open_by_dev(f.file->f_mapping->host->i_rdev,
+ reg->hr_bdev_file = bdev_file_open_by_dev(fd_file(f)->f_mapping->host->i_rdev,
BLK_OPEN_WRITE | BLK_OPEN_READ, NULL, NULL);
if (IS_ERR(reg->hr_bdev_file)) {
ret = PTR_ERR(reg->hr_bdev_file);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index f0beb173dbba..213206ebdd58 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1932,6 +1932,7 @@ int ocfs2_readdir(struct file *file, struct dir_context *ctx)
{
int error = 0;
struct inode *inode = file_inode(file);
+ struct ocfs2_file_private *fp = file->private_data;
int lock_level = 0;
trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
@@ -1952,7 +1953,7 @@ int ocfs2_readdir(struct file *file, struct dir_context *ctx)
goto bail_nolock;
}
- error = ocfs2_dir_foreach_blk(inode, &file->f_version, ctx, false);
+ error = ocfs2_dir_foreach_blk(inode, &fp->cookie, ctx, false);
ocfs2_inode_unlock(inode, lock_level);
if (error)
@@ -3511,16 +3512,6 @@ static int dx_leaf_sort_cmp(const void *a, const void *b)
return 0;
}
-static void dx_leaf_sort_swap(void *a, void *b, int size)
-{
- struct ocfs2_dx_entry *entry1 = a;
- struct ocfs2_dx_entry *entry2 = b;
-
- BUG_ON(size != sizeof(*entry1));
-
- swap(*entry1, *entry2);
-}
-
static int ocfs2_dx_leaf_same_major(struct ocfs2_dx_leaf *dx_leaf)
{
struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
@@ -3781,7 +3772,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
*/
sort(dx_leaf->dl_list.de_entries, num_used,
sizeof(struct ocfs2_dx_entry), dx_leaf_sort_cmp,
- dx_leaf_sort_swap);
+ NULL);
ocfs2_journal_dirty(handle, dx_leaf_bh);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index da78a04d6f0b..60df52e4c1f8 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3151,11 +3151,8 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
#ifdef CONFIG_OCFS2_FS_STATS
if (!lockres->l_lock_wait && dlm_debug->d_filter_secs) {
now = ktime_to_us(ktime_get_real());
- if (lockres->l_lock_prmode.ls_last >
- lockres->l_lock_exmode.ls_last)
- last = lockres->l_lock_prmode.ls_last;
- else
- last = lockres->l_lock_exmode.ls_last;
+ last = max(lockres->l_lock_prmode.ls_last,
+ lockres->l_lock_exmode.ls_last);
/*
* Use d_filter_secs field to filter lock resources dump,
* the default d_filter_secs(0) value filters nothing,
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index ccc57038a977..ad131a2fc58e 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -755,7 +755,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
u64 abs_to, struct buffer_head *di_bh)
{
struct address_space *mapping = inode->i_mapping;
- struct page *page;
+ struct folio *folio;
unsigned long index = abs_from >> PAGE_SHIFT;
handle_t *handle;
int ret = 0;
@@ -774,9 +774,10 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
goto out;
}
- page = find_or_create_page(mapping, index, GFP_NOFS);
- if (!page) {
- ret = -ENOMEM;
+ folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
mlog_errno(ret);
goto out_commit_trans;
}
@@ -803,7 +804,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
* __block_write_begin and block_commit_write to zero the
* whole block.
*/
- ret = __block_write_begin(page, block_start + 1, 0,
+ ret = __block_write_begin(folio, block_start + 1, 0,
ocfs2_get_block);
if (ret < 0) {
mlog_errno(ret);
@@ -812,7 +813,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
/* must not update i_size! */
- block_commit_write(page, block_start + 1, block_start + 1);
+ block_commit_write(&folio->page, block_start + 1, block_start + 1);
}
/*
@@ -833,8 +834,8 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
}
out_unlock:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
out_commit_trans:
if (handle)
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
@@ -2750,6 +2751,13 @@ out_unlock:
return remapped > 0 ? remapped : ret;
}
+static loff_t ocfs2_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct ocfs2_file_private *fp = file->private_data;
+
+ return generic_llseek_cookie(file, offset, whence, &fp->cookie);
+}
+
const struct inode_operations ocfs2_file_iops = {
.setattr = ocfs2_setattr,
.getattr = ocfs2_getattr,
@@ -2797,7 +2805,7 @@ const struct file_operations ocfs2_fops = {
WRAP_DIR_ITER(ocfs2_readdir) // FIXME!
const struct file_operations ocfs2_dops = {
- .llseek = generic_file_llseek,
+ .llseek = ocfs2_dir_llseek,
.read = generic_read_dir,
.iterate_shared = shared_ocfs2_readdir,
.fsync = ocfs2_sync_file,
@@ -2843,7 +2851,7 @@ const struct file_operations ocfs2_fops_no_plocks = {
};
const struct file_operations ocfs2_dops_no_plocks = {
- .llseek = generic_file_llseek,
+ .llseek = ocfs2_dir_llseek,
.read = generic_read_dir,
.iterate_shared = shared_ocfs2_readdir,
.fsync = ocfs2_sync_file,
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
index 8e53e4ac1120..41e65e45a9f3 100644
--- a/fs/ocfs2/file.h
+++ b/fs/ocfs2/file.h
@@ -20,6 +20,7 @@ struct ocfs2_alloc_context;
enum ocfs2_alloc_restarted;
struct ocfs2_file_private {
+ u64 cookie;
struct file *fp_file;
struct mutex fp_mutex;
struct ocfs2_lock_res fp_flock;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 530fba34f6d3..1bf188b6866a 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1055,7 +1055,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
if (!igrab(inode))
BUG();
- num_running_trans = atomic_read(&(osb->journal->j_num_trans));
+ num_running_trans = atomic_read(&(journal->j_num_trans));
trace_ocfs2_journal_shutdown(num_running_trans);
/* Do a commit_cache here. It will flush our journal, *and*
@@ -1074,9 +1074,10 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
osb->commit_task = NULL;
}
- BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);
+ BUG_ON(atomic_read(&(journal->j_num_trans)) != 0);
- if (ocfs2_mount_local(osb)) {
+ if (ocfs2_mount_local(osb) &&
+ (journal->j_journal->j_flags & JBD2_LOADED)) {
jbd2_journal_lock_updates(journal->j_journal);
status = jbd2_journal_flush(journal->j_journal, 0);
jbd2_journal_unlock_updates(journal->j_journal);
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 5df34561c551..8ac42ea81a17 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -1002,6 +1002,25 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
start = bit_off + 1;
}
+ /* clear the contiguous bits until the end boundary */
+ if (count) {
+ blkno = la_start_blk +
+ ocfs2_clusters_to_blocks(osb->sb,
+ start - count);
+
+ trace_ocfs2_sync_local_to_main_free(
+ count, start - count,
+ (unsigned long long)la_start_blk,
+ (unsigned long long)blkno);
+
+ status = ocfs2_release_clusters(handle,
+ main_bm_inode,
+ main_bm_bh, blkno,
+ count);
+ if (status < 0)
+ mlog_errno(status);
+ }
+
bail:
if (status)
mlog_errno(status);
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 1834f26522ed..6ef4cb045ccd 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -53,7 +53,7 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
loff_t pos = page_offset(page);
unsigned int len = PAGE_SIZE;
pgoff_t last_index;
- struct page *locked_page = NULL;
+ struct folio *locked_folio = NULL;
void *fsdata;
loff_t size = i_size_read(inode);
@@ -91,7 +91,7 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
len = ((size - 1) & ~PAGE_MASK) + 1;
err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
- &locked_page, &fsdata, di_bh, page);
+ &locked_folio, &fsdata, di_bh, page);
if (err) {
if (err != -ENOSPC)
mlog_errno(err);
@@ -99,7 +99,7 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
goto out;
}
- if (!locked_page) {
+ if (!locked_folio) {
ret = VM_FAULT_NOPAGE;
goto out;
}
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 0575c2d060eb..2b0daced98eb 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -371,12 +371,16 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
status = ocfs2_extent_map_get_blocks(oinfo->dqi_gqinode, 0, &oinfo->dqi_giblk,
&pcount, NULL);
- if (status < 0)
+ if (status < 0) {
+ mlog_errno(status);
goto out_unlock;
+ }
status = ocfs2_qinfo_lock(oinfo, 0);
- if (status < 0)
+ if (status < 0) {
+ mlog_errno(status);
goto out_unlock;
+ }
status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
sizeof(struct ocfs2_global_disk_dqinfo),
OCFS2_GLOBAL_INFO_OFF);
@@ -404,12 +408,11 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
schedule_delayed_work(&oinfo->dqi_sync_work,
msecs_to_jiffies(oinfo->dqi_syncms));
-out_err:
- return status;
+ return 0;
out_unlock:
ocfs2_unlock_global_qf(oinfo, 0);
- mlog_errno(status);
- goto out_err;
+out_err:
+ return status;
}
/* Write information to global quota file. Expects exclusive lock on quota
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 8ce462c64c51..73d3367c533b 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -692,7 +692,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
int status;
struct buffer_head *bh = NULL;
struct ocfs2_quota_recovery *rec;
- int locked = 0;
+ int locked = 0, global_read = 0;
info->dqi_max_spc_limit = 0x7fffffffffffffffLL;
info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
@@ -700,6 +700,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
if (!oinfo) {
mlog(ML_ERROR, "failed to allocate memory for ocfs2 quota"
" info.");
+ status = -ENOMEM;
goto out_err;
}
info->dqi_priv = oinfo;
@@ -712,6 +713,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
status = ocfs2_global_read_info(sb, type);
if (status < 0)
goto out_err;
+ global_read = 1;
status = ocfs2_inode_lock(lqinode, &oinfo->dqi_lqi_bh, 1);
if (status < 0) {
@@ -782,10 +784,12 @@ out_err:
if (locked)
ocfs2_inode_unlock(lqinode, 1);
ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk);
+ if (global_read)
+ cancel_delayed_work_sync(&oinfo->dqi_sync_work);
kfree(oinfo);
}
brelse(bh);
- return -1;
+ return status;
}
/* Write local info to quota file */
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 1f303b1adf1a..4f85508538fc 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -1392,13 +1392,6 @@ static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
return 0;
}
-static void swap_refcount_rec(void *a, void *b, int size)
-{
- struct ocfs2_refcount_rec *l = a, *r = b;
-
- swap(*l, *r);
-}
-
/*
* The refcount cpos are ordered by their 64bit cpos,
* But we will use the low 32 bit to be the e_cpos in the b-tree.
@@ -1474,7 +1467,7 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
*/
sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
sizeof(struct ocfs2_refcount_rec),
- cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
+ cmp_refcount_rec_by_low_cpos, NULL);
ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
if (ret) {
@@ -1499,11 +1492,11 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
sizeof(struct ocfs2_refcount_rec),
- cmp_refcount_rec_by_cpos, swap_refcount_rec);
+ cmp_refcount_rec_by_cpos, NULL);
sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
sizeof(struct ocfs2_refcount_rec),
- cmp_refcount_rec_by_cpos, swap_refcount_rec);
+ cmp_refcount_rec_by_cpos, NULL);
*split_cpos = cpos;
return 0;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index afee70125ae3..3d404624bb96 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1571,15 +1571,13 @@ static int __init ocfs2_init(void)
ocfs2_set_locking_protocol();
- status = register_quota_format(&ocfs2_quota_format);
- if (status < 0)
- goto out3;
+ register_quota_format(&ocfs2_quota_format);
+
status = register_filesystem(&ocfs2_fs_type);
if (!status)
return 0;
unregister_quota_format(&ocfs2_quota_format);
-out3:
debugfs_remove(ocfs2_debugfs_root);
ocfs2_free_mem_caches();
out2:
@@ -2357,8 +2355,8 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
(unsigned long long)bh->b_blocknr);
} else if (le32_to_cpu(di->id2.i_super.s_clustersize_bits) < 12 ||
le32_to_cpu(di->id2.i_super.s_clustersize_bits) > 20) {
- mlog(ML_ERROR, "bad cluster size found: %u\n",
- 1 << le32_to_cpu(di->id2.i_super.s_clustersize_bits));
+ mlog(ML_ERROR, "bad cluster size bit found: %u\n",
+ le32_to_cpu(di->id2.i_super.s_clustersize_bits));
} else if (!le64_to_cpu(di->id2.i_super.s_root_blkno)) {
mlog(ML_ERROR, "bad root_blkno: 0\n");
} else if (!le64_to_cpu(di->id2.i_super.s_system_dir_blkno)) {
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 35c0cc2a51af..0e58a5ce539e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -4167,15 +4167,6 @@ static int cmp_xe(const void *a, const void *b)
return 0;
}
-static void swap_xe(void *a, void *b, int size)
-{
- struct ocfs2_xattr_entry *l = a, *r = b, tmp;
-
- tmp = *l;
- memcpy(l, r, sizeof(struct ocfs2_xattr_entry));
- memcpy(r, &tmp, sizeof(struct ocfs2_xattr_entry));
-}
-
/*
* When the ocfs2_xattr_block is filled up, new bucket will be created
* and all the xattr entries will be moved to the new bucket.
@@ -4241,7 +4232,7 @@ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
trace_ocfs2_cp_xattr_block_to_bucket_end(offset, size, off_change);
sort(target + offset, count, sizeof(struct ocfs2_xattr_entry),
- cmp_xe, swap_xe);
+ cmp_xe, NULL);
}
/*
@@ -4436,7 +4427,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode,
*/
sort(entries, le16_to_cpu(xh->xh_count),
sizeof(struct ocfs2_xattr_entry),
- cmp_xe_offset, swap_xe);
+ cmp_xe_offset, NULL);
/* Move all name/values to the end of the bucket. */
xe = xh->xh_entries;
@@ -4478,7 +4469,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode,
/* sort the entries by their name_hash. */
sort(entries, le16_to_cpu(xh->xh_count),
sizeof(struct ocfs2_xattr_entry),
- cmp_xe, swap_xe);
+ cmp_xe, NULL);
buf = bucket_buf;
for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 6b580b9da8e3..98358d405b6a 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -312,11 +312,11 @@ static void omfs_write_failed(struct address_space *mapping, loff_t to)
static int omfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, pagep, omfs_get_block);
+ ret = block_write_begin(mapping, pos, len, foliop, omfs_get_block);
if (unlikely(ret))
omfs_write_failed(mapping, pos + len);
diff --git a/fs/open.c b/fs/open.c
index 22adbef7ecc2..acaeb3e25c88 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -193,10 +193,10 @@ long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
if (length < 0)
return -EINVAL;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = do_ftruncate(f.file, length, small);
+ error = do_ftruncate(fd_file(f), length, small);
fdput(f);
return error;
@@ -252,40 +252,39 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (offset < 0 || len <= 0)
return -EINVAL;
- /* Return error if mode is not supported */
- if (mode & ~FALLOC_FL_SUPPORTED_MASK)
+ if (mode & ~(FALLOC_FL_MODE_MASK | FALLOC_FL_KEEP_SIZE))
return -EOPNOTSUPP;
- /* Punch hole and zero range are mutually exclusive */
- if ((mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) ==
- (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))
- return -EOPNOTSUPP;
-
- /* Punch hole must have keep size set */
- if ((mode & FALLOC_FL_PUNCH_HOLE) &&
- !(mode & FALLOC_FL_KEEP_SIZE))
+ /*
+ * Modes are exclusive, even if that is not obvious from the encoding
+ * as bit masks and the mix with the flag in the same namespace.
+ *
+ * To make things even more complicated, FALLOC_FL_ALLOCATE_RANGE is
+ * encoded as no bit set.
+ */
+ switch (mode & FALLOC_FL_MODE_MASK) {
+ case FALLOC_FL_ALLOCATE_RANGE:
+ case FALLOC_FL_UNSHARE_RANGE:
+ case FALLOC_FL_ZERO_RANGE:
+ break;
+ case FALLOC_FL_PUNCH_HOLE:
+ if (!(mode & FALLOC_FL_KEEP_SIZE))
+ return -EOPNOTSUPP;
+ break;
+ case FALLOC_FL_COLLAPSE_RANGE:
+ case FALLOC_FL_INSERT_RANGE:
+ if (mode & FALLOC_FL_KEEP_SIZE)
+ return -EOPNOTSUPP;
+ break;
+ default:
return -EOPNOTSUPP;
-
- /* Collapse range should only be used exclusively. */
- if ((mode & FALLOC_FL_COLLAPSE_RANGE) &&
- (mode & ~FALLOC_FL_COLLAPSE_RANGE))
- return -EINVAL;
-
- /* Insert range should only be used exclusively. */
- if ((mode & FALLOC_FL_INSERT_RANGE) &&
- (mode & ~FALLOC_FL_INSERT_RANGE))
- return -EINVAL;
-
- /* Unshare range should only be used with allocate mode. */
- if ((mode & FALLOC_FL_UNSHARE_RANGE) &&
- (mode & ~(FALLOC_FL_UNSHARE_RANGE | FALLOC_FL_KEEP_SIZE)))
- return -EINVAL;
+ }
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
/*
- * We can only allow pure fallocate on append only files
+ * On append-only files only space preallocation is supported.
*/
if ((mode & ~FALLOC_FL_KEEP_SIZE) && IS_APPEND(inode))
return -EPERM;
@@ -353,8 +352,8 @@ int ksys_fallocate(int fd, int mode, loff_t offset, loff_t len)
struct fd f = fdget(fd);
int error = -EBADF;
- if (f.file) {
- error = vfs_fallocate(f.file, mode, offset, len);
+ if (fd_file(f)) {
+ error = vfs_fallocate(fd_file(f), mode, offset, len);
fdput(f);
}
return error;
@@ -585,16 +584,16 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
int error;
error = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
goto out;
error = -ENOTDIR;
- if (!d_can_lookup(f.file->f_path.dentry))
+ if (!d_can_lookup(fd_file(f)->f_path.dentry))
goto out_putf;
- error = file_permission(f.file, MAY_EXEC | MAY_CHDIR);
+ error = file_permission(fd_file(f), MAY_EXEC | MAY_CHDIR);
if (!error)
- set_fs_pwd(current->fs, &f.file->f_path);
+ set_fs_pwd(current->fs, &fd_file(f)->f_path);
out_putf:
fdput(f);
out:
@@ -675,8 +674,8 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
struct fd f = fdget(fd);
int err = -EBADF;
- if (f.file) {
- err = vfs_fchmod(f.file, mode);
+ if (fd_file(f)) {
+ err = vfs_fchmod(fd_file(f), mode);
fdput(f);
}
return err;
@@ -869,8 +868,8 @@ int ksys_fchown(unsigned int fd, uid_t user, gid_t group)
struct fd f = fdget(fd);
int error = -EBADF;
- if (f.file) {
- error = vfs_fchown(f.file, user, group);
+ if (fd_file(f)) {
+ error = vfs_fchown(fd_file(f), user, group);
fdput(f);
}
return error;
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index fdb9b65db1de..aae6d2b8767d 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -309,22 +309,18 @@ static int orangefs_read_folio(struct file *file, struct folio *folio)
static int orangefs_write_begin(struct file *file,
struct address_space *mapping, loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
struct orangefs_write_range *wr;
struct folio *folio;
- struct page *page;
- pgoff_t index;
int ret;
- index = pos >> PAGE_SHIFT;
+ folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- page = grab_cache_page_write_begin(mapping, index);
- if (!page)
- return -ENOMEM;
-
- *pagep = page;
- folio = page_folio(page);
+ *foliop = folio;
if (folio_test_dirty(folio) && !folio_test_private(folio)) {
/*
@@ -365,9 +361,10 @@ okay:
}
static int orangefs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata)
+ loff_t pos, unsigned len, unsigned copied, struct folio *folio,
+ void *fsdata)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
loff_t last_pos = pos + copied;
/*
@@ -377,23 +374,23 @@ static int orangefs_write_end(struct file *file, struct address_space *mapping,
if (last_pos > inode->i_size)
i_size_write(inode, last_pos);
- /* zero the stale part of the page if we did a short copy */
- if (!PageUptodate(page)) {
+ /* zero the stale part of the folio if we did a short copy */
+ if (!folio_test_uptodate(folio)) {
unsigned from = pos & (PAGE_SIZE - 1);
if (copied < len) {
- zero_user(page, from + copied, len - copied);
+ folio_zero_range(folio, from + copied, len - copied);
}
/* Set fully written pages uptodate. */
- if (pos == page_offset(page) &&
+ if (pos == folio_pos(folio) &&
(len == PAGE_SIZE || pos + len == inode->i_size)) {
- zero_user_segment(page, from + copied, PAGE_SIZE);
- SetPageUptodate(page);
+ folio_zero_segment(folio, from + copied, PAGE_SIZE);
+ folio_mark_uptodate(folio);
}
}
- set_page_dirty(page);
- unlock_page(page);
- put_page(page);
+ folio_mark_dirty(folio);
+ folio_unlock(folio);
+ folio_put(folio);
mark_inode_dirty_sync(file_inode(file));
return copied;
diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
index be4ba03a01a0..04e15dfa504a 100644
--- a/fs/orangefs/orangefs-sysfs.c
+++ b/fs/orangefs/orangefs-sysfs.c
@@ -904,7 +904,7 @@ static void orangefs_obj_release(struct kobject *kobj)
orangefs_obj = NULL;
}
-static struct kobj_type orangefs_ktype = {
+static const struct kobj_type orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = orangefs_default_groups,
.release = orangefs_obj_release,
@@ -951,7 +951,7 @@ static void acache_orangefs_obj_release(struct kobject *kobj)
acache_orangefs_obj = NULL;
}
-static struct kobj_type acache_orangefs_ktype = {
+static const struct kobj_type acache_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = acache_orangefs_default_groups,
.release = acache_orangefs_obj_release,
@@ -998,7 +998,7 @@ static void capcache_orangefs_obj_release(struct kobject *kobj)
capcache_orangefs_obj = NULL;
}
-static struct kobj_type capcache_orangefs_ktype = {
+static const struct kobj_type capcache_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = capcache_orangefs_default_groups,
.release = capcache_orangefs_obj_release,
@@ -1045,7 +1045,7 @@ static void ccache_orangefs_obj_release(struct kobject *kobj)
ccache_orangefs_obj = NULL;
}
-static struct kobj_type ccache_orangefs_ktype = {
+static const struct kobj_type ccache_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = ccache_orangefs_default_groups,
.release = ccache_orangefs_obj_release,
@@ -1092,7 +1092,7 @@ static void ncache_orangefs_obj_release(struct kobject *kobj)
ncache_orangefs_obj = NULL;
}
-static struct kobj_type ncache_orangefs_ktype = {
+static const struct kobj_type ncache_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = ncache_orangefs_default_groups,
.release = ncache_orangefs_obj_release,
@@ -1132,7 +1132,7 @@ static void pc_orangefs_obj_release(struct kobject *kobj)
pc_orangefs_obj = NULL;
}
-static struct kobj_type pc_orangefs_ktype = {
+static const struct kobj_type pc_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = pc_orangefs_default_groups,
.release = pc_orangefs_obj_release,
@@ -1165,7 +1165,7 @@ static void stats_orangefs_obj_release(struct kobject *kobj)
stats_orangefs_obj = NULL;
}
-static struct kobj_type stats_orangefs_ktype = {
+static const struct kobj_type stats_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = stats_orangefs_default_groups,
.release = stats_orangefs_obj_release,
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index a5ef2005a2cc..2ed6ad641a20 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -115,12 +115,12 @@ int ovl_copy_xattr(struct super_block *sb, const struct path *oldpath, struct de
continue;
error = security_inode_copy_up_xattr(old, name);
- if (error < 0 && error != -EOPNOTSUPP)
- break;
- if (error == 1) {
+ if (error == -ECANCELED) {
error = 0;
continue; /* Discard */
}
+ if (error < 0 && error != -EOPNOTSUPP)
+ break;
if (is_posix_acl_xattr(name)) {
error = ovl_copy_acl(OVL_FS(sb), oldpath, new, name);
@@ -243,8 +243,24 @@ static int ovl_verify_area(loff_t pos, loff_t pos2, loff_t len, loff_t totlen)
return 0;
}
+static int ovl_sync_file(struct path *path)
+{
+ struct file *new_file;
+ int err;
+
+ new_file = ovl_path_open(path, O_LARGEFILE | O_RDONLY);
+ if (IS_ERR(new_file))
+ return PTR_ERR(new_file);
+
+ err = vfs_fsync(new_file, 0);
+ fput(new_file);
+
+ return err;
+}
+
static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
- struct file *new_file, loff_t len)
+ struct file *new_file, loff_t len,
+ bool datasync)
{
struct path datapath;
struct file *old_file;
@@ -342,7 +358,8 @@ static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
len -= bytes;
}
- if (!error && ovl_should_sync(ofs))
+ /* call fsync once, either now or later along with metadata */
+ if (!error && ovl_should_sync(ofs) && datasync)
error = vfs_fsync(new_file, 0);
out_fput:
fput(old_file);
@@ -574,6 +591,7 @@ struct ovl_copy_up_ctx {
bool indexed;
bool metacopy;
bool metacopy_digest;
+ bool metadata_fsync;
};
static int ovl_link_up(struct ovl_copy_up_ctx *c)
@@ -634,7 +652,8 @@ static int ovl_copy_up_data(struct ovl_copy_up_ctx *c, const struct path *temp)
if (IS_ERR(new_file))
return PTR_ERR(new_file);
- err = ovl_copy_up_file(ofs, c->dentry, new_file, c->stat.size);
+ err = ovl_copy_up_file(ofs, c->dentry, new_file, c->stat.size,
+ !c->metadata_fsync);
fput(new_file);
return err;
@@ -701,6 +720,10 @@ static int ovl_copy_up_metadata(struct ovl_copy_up_ctx *c, struct dentry *temp)
err = ovl_set_attr(ofs, temp, &c->stat);
inode_unlock(temp->d_inode);
+ /* fsync metadata before moving it into upper dir */
+ if (!err && ovl_should_sync(ofs) && c->metadata_fsync)
+ err = ovl_sync_file(&upperpath);
+
return err;
}
@@ -860,7 +883,8 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
temp = tmpfile->f_path.dentry;
if (!c->metacopy && c->stat.size) {
- err = ovl_copy_up_file(ofs, c->dentry, tmpfile, c->stat.size);
+ err = ovl_copy_up_file(ofs, c->dentry, tmpfile, c->stat.size,
+ !c->metadata_fsync);
if (err)
goto out_fput;
}
@@ -1135,6 +1159,17 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
!kgid_has_mapping(current_user_ns(), ctx.stat.gid))
return -EOVERFLOW;
+ /*
+ * With metacopy disabled, we fsync after final metadata copyup, for
+ * both regular files and directories to get atomic copyup semantics
+ * on filesystems that do not use strict metadata ordering (e.g. ubifs).
+ *
+ * With metacopy enabled we want to avoid fsync on all meta copyup
+ * that will hurt performance of workloads such as chown -R, so we
+ * only fsync on data copyup as legacy behavior.
+ */
+ ctx.metadata_fsync = !OVL_FS(dentry->d_sb)->config.metacopy &&
+ (S_ISREG(ctx.stat.mode) || S_ISDIR(ctx.stat.mode));
ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags);
if (parent) {
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 1a411cae57ed..2b7a5a3a7a2f 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -93,11 +93,11 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
bool allow_meta)
{
struct dentry *dentry = file_dentry(file);
+ struct file *realfile = file->private_data;
struct path realpath;
int err;
- real->flags = 0;
- real->file = file->private_data;
+ real->word = (unsigned long)realfile;
if (allow_meta) {
ovl_path_real(dentry, &realpath);
@@ -113,16 +113,17 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
return -EIO;
/* Has it been copied up since we'd opened it? */
- if (unlikely(file_inode(real->file) != d_inode(realpath.dentry))) {
- real->flags = FDPUT_FPUT;
- real->file = ovl_open_realfile(file, &realpath);
-
- return PTR_ERR_OR_ZERO(real->file);
+ if (unlikely(file_inode(realfile) != d_inode(realpath.dentry))) {
+ struct file *f = ovl_open_realfile(file, &realpath);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ real->word = (unsigned long)ovl_open_realfile(file, &realpath) | FDPUT_FPUT;
+ return 0;
}
/* Did the flags change since open? */
- if (unlikely((file->f_flags ^ real->file->f_flags) & ~OVL_OPEN_FLAGS))
- return ovl_change_flags(real->file, file->f_flags);
+ if (unlikely((file->f_flags ^ realfile->f_flags) & ~OVL_OPEN_FLAGS))
+ return ovl_change_flags(realfile, file->f_flags);
return 0;
}
@@ -130,10 +131,11 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
static int ovl_real_fdget(const struct file *file, struct fd *real)
{
if (d_is_dir(file_dentry(file))) {
- real->flags = 0;
- real->file = ovl_dir_real_file(file, false);
-
- return PTR_ERR_OR_ZERO(real->file);
+ struct file *f = ovl_dir_real_file(file, false);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ real->word = (unsigned long)f;
+ return 0;
}
return ovl_real_fdget_meta(file, real, false);
@@ -209,13 +211,13 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
* files, so we use the real file to perform seeks.
*/
ovl_inode_lock(inode);
- real.file->f_pos = file->f_pos;
+ fd_file(real)->f_pos = file->f_pos;
old_cred = ovl_override_creds(inode->i_sb);
- ret = vfs_llseek(real.file, offset, whence);
+ ret = vfs_llseek(fd_file(real), offset, whence);
revert_creds(old_cred);
- file->f_pos = real.file->f_pos;
+ file->f_pos = fd_file(real)->f_pos;
ovl_inode_unlock(inode);
fdput(real);
@@ -275,7 +277,7 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (ret)
return ret;
- ret = backing_file_read_iter(real.file, iter, iocb, iocb->ki_flags,
+ ret = backing_file_read_iter(fd_file(real), iter, iocb, iocb->ki_flags,
&ctx);
fdput(real);
@@ -314,7 +316,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
* this property in case it is set by the issuer.
*/
ifl &= ~IOCB_DIO_CALLER_COMP;
- ret = backing_file_write_iter(real.file, iter, iocb, ifl, &ctx);
+ ret = backing_file_write_iter(fd_file(real), iter, iocb, ifl, &ctx);
fdput(real);
out_unlock:
@@ -339,7 +341,7 @@ static ssize_t ovl_splice_read(struct file *in, loff_t *ppos,
if (ret)
return ret;
- ret = backing_file_splice_read(real.file, ppos, pipe, len, flags, &ctx);
+ ret = backing_file_splice_read(fd_file(real), ppos, pipe, len, flags, &ctx);
fdput(real);
return ret;
@@ -348,7 +350,7 @@ static ssize_t ovl_splice_read(struct file *in, loff_t *ppos,
/*
* Calling iter_file_splice_write() directly from overlay's f_op may deadlock
* due to lock order inversion between pipe->mutex in iter_file_splice_write()
- * and file_start_write(real.file) in ovl_write_iter().
+ * and file_start_write(fd_file(real)) in ovl_write_iter().
*
* So do everything ovl_write_iter() does and call iter_file_splice_write() on
* the real file.
@@ -373,7 +375,7 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
if (ret)
goto out_unlock;
- ret = backing_file_splice_write(pipe, real.file, ppos, len, flags, &ctx);
+ ret = backing_file_splice_write(pipe, fd_file(real), ppos, len, flags, &ctx);
fdput(real);
out_unlock:
@@ -397,9 +399,9 @@ static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
return ret;
/* Don't sync lower file for fear of receiving EROFS error */
- if (file_inode(real.file) == ovl_inode_upper(file_inode(file))) {
+ if (file_inode(fd_file(real)) == ovl_inode_upper(file_inode(file))) {
old_cred = ovl_override_creds(file_inode(file)->i_sb);
- ret = vfs_fsync_range(real.file, start, end, datasync);
+ ret = vfs_fsync_range(fd_file(real), start, end, datasync);
revert_creds(old_cred);
}
@@ -439,7 +441,7 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
goto out_unlock;
old_cred = ovl_override_creds(file_inode(file)->i_sb);
- ret = vfs_fallocate(real.file, mode, offset, len);
+ ret = vfs_fallocate(fd_file(real), mode, offset, len);
revert_creds(old_cred);
/* Update size */
@@ -464,7 +466,7 @@ static int ovl_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
return ret;
old_cred = ovl_override_creds(file_inode(file)->i_sb);
- ret = vfs_fadvise(real.file, offset, len, advice);
+ ret = vfs_fadvise(fd_file(real), offset, len, advice);
revert_creds(old_cred);
fdput(real);
@@ -509,18 +511,18 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
old_cred = ovl_override_creds(file_inode(file_out)->i_sb);
switch (op) {
case OVL_COPY:
- ret = vfs_copy_file_range(real_in.file, pos_in,
- real_out.file, pos_out, len, flags);
+ ret = vfs_copy_file_range(fd_file(real_in), pos_in,
+ fd_file(real_out), pos_out, len, flags);
break;
case OVL_CLONE:
- ret = vfs_clone_file_range(real_in.file, pos_in,
- real_out.file, pos_out, len, flags);
+ ret = vfs_clone_file_range(fd_file(real_in), pos_in,
+ fd_file(real_out), pos_out, len, flags);
break;
case OVL_DEDUPE:
- ret = vfs_dedupe_file_range_one(real_in.file, pos_in,
- real_out.file, pos_out, len,
+ ret = vfs_dedupe_file_range_one(fd_file(real_in), pos_in,
+ fd_file(real_out), pos_out, len,
flags);
break;
}
@@ -583,9 +585,9 @@ static int ovl_flush(struct file *file, fl_owner_t id)
if (err)
return err;
- if (real.file->f_op->flush) {
+ if (fd_file(real)->f_op->flush) {
old_cred = ovl_override_creds(file_inode(file)->i_sb);
- err = real.file->f_op->flush(real.file, id);
+ err = fd_file(real)->f_op->flush(fd_file(real), id);
revert_creds(old_cred);
}
fdput(real);
diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c
index d0568c091341..e42546c6c5df 100644
--- a/fs/overlayfs/params.c
+++ b/fs/overlayfs/params.c
@@ -755,11 +755,6 @@ int ovl_fs_params_verify(const struct ovl_fs_context *ctx,
{
struct ovl_opt_set set = ctx->set;
- if (ctx->nr_data > 0 && !config->metacopy) {
- pr_err("lower data-only dirs require metacopy support.\n");
- return -EINVAL;
- }
-
/* Workdir/index are useless in non-upper mount */
if (!config->upperdir) {
if (config->workdir) {
@@ -911,6 +906,39 @@ int ovl_fs_params_verify(const struct ovl_fs_context *ctx,
config->metacopy = false;
}
+ /*
+ * Fail if we don't have trusted xattr capability and a feature was
+ * explicitly requested that requires them.
+ */
+ if (!config->userxattr && !capable(CAP_SYS_ADMIN)) {
+ if (set.redirect &&
+ config->redirect_mode != OVL_REDIRECT_NOFOLLOW) {
+ pr_err("redirect_dir requires permission to access trusted xattrs\n");
+ return -EPERM;
+ }
+ if (config->metacopy && set.metacopy) {
+ pr_err("metacopy requires permission to access trusted xattrs\n");
+ return -EPERM;
+ }
+ if (config->verity_mode) {
+ pr_err("verity requires permission to access trusted xattrs\n");
+ return -EPERM;
+ }
+ if (ctx->nr_data > 0) {
+ pr_err("lower data-only dirs require permission to access trusted xattrs\n");
+ return -EPERM;
+ }
+ /*
+ * Other xattr-dependent features should be disabled without
+ * great disturbance to the user in ovl_make_workdir().
+ */
+ }
+
+ if (ctx->nr_data > 0 && !config->metacopy) {
+ pr_err("lower data-only dirs require metacopy support.\n");
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 06a231970cb5..fe511192f83c 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -202,15 +202,9 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
int ret;
ret = ovl_sync_status(ofs);
- /*
- * We have to always set the err, because the return value isn't
- * checked in syncfs, and instead indirectly return an error via
- * the sb's writeback errseq, which VFS inspects after this call.
- */
- if (ret < 0) {
- errseq_set(&sb->s_wb_err, -EIO);
+
+ if (ret < 0)
return -EIO;
- }
if (!ret)
return ret;
diff --git a/fs/pipe.c b/fs/pipe.c
index 7dff2aa50a6d..4083ba492cb6 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -686,7 +686,7 @@ pipe_poll(struct file *filp, poll_table *wait)
if (filp->f_mode & FMODE_READ) {
if (!pipe_empty(head, tail))
mask |= EPOLLIN | EPOLLRDNORM;
- if (!pipe->writers && filp->f_version != pipe->w_counter)
+ if (!pipe->writers && filp->f_pipe != pipe->w_counter)
mask |= EPOLLHUP;
}
@@ -945,6 +945,7 @@ int create_pipe_files(struct file **res, int flags)
}
f->private_data = inode->i_pipe;
+ f->f_pipe = 0;
res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
&pipefifo_fops);
@@ -954,6 +955,7 @@ int create_pipe_files(struct file **res, int flags)
return PTR_ERR(res[0]);
}
res[0]->private_data = inode->i_pipe;
+ res[0]->f_pipe = 0;
res[1] = f;
stream_open(inode, res[0]);
stream_open(inode, res[1]);
@@ -1108,7 +1110,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
int ret;
- filp->f_version = 0;
+ filp->f_pipe = 0;
spin_lock(&inode->i_lock);
if (inode->i_pipe) {
@@ -1155,7 +1157,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
if ((filp->f_flags & O_NONBLOCK)) {
/* suppress EPOLLHUP until we have
* seen a writer */
- filp->f_version = pipe->w_counter;
+ filp->f_pipe = pipe->w_counter;
} else {
if (wait_for_partner(pipe, &pipe->w_counter))
goto err_rd;
@@ -1427,7 +1429,7 @@ static const struct super_operations pipefs_ops = {
/*
* pipefs should _never_ be mounted by userland - too much of security hassle,
- * no real gain from having the whole whorehouse mounted. So we don't need
+ * no real gain from having the whole file system mounted. So we don't need
* any operations on the root directory. However, we need a non-trivial
* d_name - pipe: will go nicely and kill the special-casing in procfs.
*/
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 3f87297dbfdb..6c66a37522d0 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -715,8 +715,8 @@ int posix_acl_update_mode(struct mnt_idmap *idmap,
return error;
if (error == 0)
*acl = NULL;
- if (!vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)) &&
- !capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
+ if (!in_group_or_capable(idmap, inode,
+ i_gid_into_vfsgid(idmap, inode)))
mode &= ~S_ISGID;
*mode_p = mode;
return 0;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 72a1acd03675..b31283d81c52 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -85,6 +85,7 @@
#include <linux/elf.h>
#include <linux/pid_namespace.h>
#include <linux/user_namespace.h>
+#include <linux/fs_parser.h>
#include <linux/fs_struct.h>
#include <linux/slab.h>
#include <linux/sched/autogroup.h>
@@ -117,6 +118,40 @@
static u8 nlink_tid __ro_after_init;
static u8 nlink_tgid __ro_after_init;
+enum proc_mem_force {
+ PROC_MEM_FORCE_ALWAYS,
+ PROC_MEM_FORCE_PTRACE,
+ PROC_MEM_FORCE_NEVER
+};
+
+static enum proc_mem_force proc_mem_force_override __ro_after_init =
+ IS_ENABLED(CONFIG_PROC_MEM_NO_FORCE) ? PROC_MEM_FORCE_NEVER :
+ IS_ENABLED(CONFIG_PROC_MEM_FORCE_PTRACE) ? PROC_MEM_FORCE_PTRACE :
+ PROC_MEM_FORCE_ALWAYS;
+
+static const struct constant_table proc_mem_force_table[] __initconst = {
+ { "always", PROC_MEM_FORCE_ALWAYS },
+ { "ptrace", PROC_MEM_FORCE_PTRACE },
+ { "never", PROC_MEM_FORCE_NEVER },
+ { }
+};
+
+static int __init early_proc_mem_force_override(char *buf)
+{
+ if (!buf)
+ return -EINVAL;
+
+ /*
+ * lookup_constant() defaults to proc_mem_force_override to preseve
+ * the initial Kconfig choice in case an invalid param gets passed.
+ */
+ proc_mem_force_override = lookup_constant(proc_mem_force_table,
+ buf, proc_mem_force_override);
+
+ return 0;
+}
+early_param("proc_mem.force_override", early_proc_mem_force_override);
+
struct pid_entry {
const char *name;
unsigned int len;
@@ -827,12 +862,31 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
static int mem_open(struct inode *inode, struct file *file)
{
- int ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
-
- /* OK to pass negative loff_t, we can catch out-of-range */
- file->f_mode |= FMODE_UNSIGNED_OFFSET;
+ if (WARN_ON_ONCE(!(file->f_op->fop_flags & FOP_UNSIGNED_OFFSET)))
+ return -EINVAL;
+ return __mem_open(inode, file, PTRACE_MODE_ATTACH);
+}
- return ret;
+static bool proc_mem_foll_force(struct file *file, struct mm_struct *mm)
+{
+ struct task_struct *task;
+ bool ptrace_active = false;
+
+ switch (proc_mem_force_override) {
+ case PROC_MEM_FORCE_NEVER:
+ return false;
+ case PROC_MEM_FORCE_PTRACE:
+ task = get_proc_task(file_inode(file));
+ if (task) {
+ ptrace_active = READ_ONCE(task->ptrace) &&
+ READ_ONCE(task->mm) == mm &&
+ READ_ONCE(task->parent) == current;
+ put_task_struct(task);
+ }
+ return ptrace_active;
+ default:
+ return true;
+ }
}
static ssize_t mem_rw(struct file *file, char __user *buf,
@@ -855,7 +909,9 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
if (!mmget_not_zero(mm))
goto free;
- flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
+ flags = write ? FOLL_WRITE : 0;
+ if (proc_mem_foll_force(file, mm))
+ flags |= FOLL_FORCE;
while (count > 0) {
size_t this_len = min_t(size_t, count, PAGE_SIZE);
@@ -932,6 +988,7 @@ static const struct file_operations proc_mem_operations = {
.write = mem_write,
.open = mem_open,
.release = mem_release,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
static int environ_open(struct inode *inode, struct file *file)
@@ -2276,8 +2333,8 @@ proc_map_files_instantiate(struct dentry *dentry,
inode->i_op = &proc_map_files_link_inode_operations;
inode->i_size = 64;
- d_set_d_op(dentry, &tid_map_files_dentry_operations);
- return d_splice_alias(inode, dentry);
+ return proc_splice_unmountable(inode, dentry,
+ &tid_map_files_dentry_operations);
}
static struct dentry *proc_map_files_lookup(struct inode *dir,
@@ -2456,13 +2513,13 @@ static void *timers_start(struct seq_file *m, loff_t *pos)
if (!tp->sighand)
return ERR_PTR(-ESRCH);
- return seq_list_start(&tp->task->signal->posix_timers, *pos);
+ return seq_hlist_start(&tp->task->signal->posix_timers, *pos);
}
static void *timers_next(struct seq_file *m, void *v, loff_t *pos)
{
struct timers_private *tp = m->private;
- return seq_list_next(v, &tp->task->signal->posix_timers, pos);
+ return seq_hlist_next(v, &tp->task->signal->posix_timers, pos);
}
static void timers_stop(struct seq_file *m, void *v)
@@ -2491,7 +2548,7 @@ static int show_timer(struct seq_file *m, void *v)
[SIGEV_THREAD] = "thread",
};
- timer = list_entry((struct list_head *)v, struct k_itimer, list);
+ timer = hlist_entry((struct hlist_node *)v, struct k_itimer, list);
notify = timer->it_sigev_notify;
seq_printf(m, "ID: %d\n", timer->it_id);
@@ -2569,10 +2626,11 @@ static ssize_t timerslack_ns_write(struct file *file, const char __user *buf,
}
task_lock(p);
- if (slack_ns == 0)
- p->timer_slack_ns = p->default_timer_slack_ns;
- else
- p->timer_slack_ns = slack_ns;
+ if (rt_or_dl_task_policy(p))
+ slack_ns = 0;
+ else if (slack_ns == 0)
+ slack_ns = p->default_timer_slack_ns;
+ p->timer_slack_ns = slack_ns;
task_unlock(p);
out:
@@ -3870,12 +3928,12 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx)
if (!dir_emit_dots(file, ctx))
return 0;
- /* f_version caches the tgid value that the last readdir call couldn't
- * return. lseek aka telldir automagically resets f_version to 0.
+ /* We cache the tgid value that the last readdir call couldn't
+ * return and lseek resets it to 0.
*/
ns = proc_pid_ns(inode->i_sb);
- tid = (int)file->f_version;
- file->f_version = 0;
+ tid = (int)(intptr_t)file->private_data;
+ file->private_data = NULL;
for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns);
task;
task = next_tid(task), ctx->pos++) {
@@ -3890,7 +3948,7 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx)
proc_task_instantiate, task, NULL)) {
/* returning this tgid failed, save it as the first
* pid for the next readir call */
- file->f_version = (u64)tid;
+ file->private_data = (void *)(intptr_t)tid;
put_task_struct(task);
break;
}
@@ -3915,6 +3973,24 @@ static int proc_task_getattr(struct mnt_idmap *idmap,
return 0;
}
+/*
+ * proc_task_readdir() set @file->private_data to a positive integer
+ * value, so casting that to u64 is safe. generic_llseek_cookie() will
+ * set @cookie to 0, so casting to an int is safe. The WARN_ON_ONCE() is
+ * here to catch any unexpected change in behavior either in
+ * proc_task_readdir() or generic_llseek_cookie().
+ */
+static loff_t proc_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ u64 cookie = (u64)(intptr_t)file->private_data;
+ loff_t off;
+
+ off = generic_llseek_cookie(file, offset, whence, &cookie);
+ WARN_ON_ONCE(cookie > INT_MAX);
+ file->private_data = (void *)(intptr_t)cookie; /* serialized by f_pos_lock */
+ return off;
+}
+
static const struct inode_operations proc_task_inode_operations = {
.lookup = proc_task_lookup,
.getattr = proc_task_getattr,
@@ -3925,7 +4001,7 @@ static const struct inode_operations proc_task_inode_operations = {
static const struct file_operations proc_task_operations = {
.read = generic_read_dir,
.iterate_shared = proc_task_readdir,
- .llseek = generic_file_llseek,
+ .llseek = proc_dir_llseek,
};
void __init set_proc_pid_nlink(void)
diff --git a/fs/proc/consoles.c b/fs/proc/consoles.c
index e0758fe7936d..b7cab1ad990d 100644
--- a/fs/proc/consoles.c
+++ b/fs/proc/consoles.c
@@ -21,6 +21,7 @@ static int show_console_dev(struct seq_file *m, void *v)
{ CON_ENABLED, 'E' },
{ CON_CONSDEV, 'C' },
{ CON_BOOT, 'B' },
+ { CON_NBCON, 'N' },
{ CON_PRINTBUFFER, 'p' },
{ CON_BRL, 'b' },
{ CON_ANYTIME, 'a' },
@@ -58,8 +59,8 @@ static int show_console_dev(struct seq_file *m, void *v)
seq_printf(m, "%s%d", con->name, con->index);
seq_pad(m, ' ');
seq_printf(m, "%c%c%c (%s)", con->read ? 'R' : '-',
- con->write ? 'W' : '-', con->unblank ? 'U' : '-',
- flags);
+ ((con->flags & CON_NBCON) || con->write) ? 'W' : '-',
+ con->unblank ? 'U' : '-', flags);
if (dev)
seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev));
@@ -68,6 +69,7 @@ static int show_console_dev(struct seq_file *m, void *v)
}
static void *c_start(struct seq_file *m, loff_t *pos)
+ __acquires(&console_mutex)
{
struct console *con;
loff_t off = 0;
@@ -94,6 +96,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
}
static void c_stop(struct seq_file *m, void *v)
+ __releases(&console_mutex)
{
console_list_unlock();
}
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 586bbc84ca04..1f54a54bfb91 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -59,7 +59,7 @@ static int seq_show(struct seq_file *m, void *v)
real_mount(file->f_path.mnt)->mnt_id,
file_inode(file)->i_ino);
- /* show_fd_locks() never deferences files so a stale value is safe */
+ /* show_fd_locks() never dereferences files, so a stale value is safe */
show_fd_locks(m, file, files);
if (seq_has_overflowed(m))
goto out;
@@ -220,8 +220,8 @@ static struct dentry *proc_fd_instantiate(struct dentry *dentry,
ei->op.proc_get_link = proc_fd_link;
tid_fd_update_inode(task, inode, data->mode);
- d_set_d_op(dentry, &tid_fd_dentry_operations);
- return d_splice_alias(inode, dentry);
+ return proc_splice_unmountable(inode, dentry,
+ &tid_fd_dentry_operations);
}
static struct dentry *proc_lookupfd_common(struct inode *dir,
@@ -312,14 +312,14 @@ static int proc_readfd_count(struct inode *inode, loff_t *count)
return 0;
}
-static int proc_readfd(struct file *file, struct dir_context *ctx)
+static int proc_fd_iterate(struct file *file, struct dir_context *ctx)
{
return proc_readfd_common(file, ctx, proc_fd_instantiate);
}
const struct file_operations proc_fd_operations = {
.read = generic_read_dir,
- .iterate_shared = proc_readfd,
+ .iterate_shared = proc_fd_iterate,
.llseek = generic_file_llseek,
};
@@ -397,8 +397,8 @@ static struct dentry *proc_fdinfo_instantiate(struct dentry *dentry,
inode->i_fop = &proc_fdinfo_file_operations;
tid_fd_update_inode(task, inode, 0);
- d_set_d_op(dentry, &tid_fd_dentry_operations);
- return d_splice_alias(inode, dentry);
+ return proc_splice_unmountable(inode, dentry,
+ &tid_fd_dentry_operations);
}
static struct dentry *
@@ -407,7 +407,7 @@ proc_lookupfdinfo(struct inode *dir, struct dentry *dentry, unsigned int flags)
return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate);
}
-static int proc_readfdinfo(struct file *file, struct dir_context *ctx)
+static int proc_fdinfo_iterate(struct file *file, struct dir_context *ctx)
{
return proc_readfd_common(file, ctx,
proc_fdinfo_instantiate);
@@ -421,6 +421,6 @@ const struct inode_operations proc_fdinfo_inode_operations = {
const struct file_operations proc_fdinfo_operations = {
.read = generic_read_dir,
- .iterate_shared = proc_readfdinfo,
+ .iterate_shared = proc_fdinfo_iterate,
.llseek = generic_file_llseek,
};
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index c02f1e63f82d..dbe82cf23ee4 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -464,9 +464,9 @@ struct proc_dir_entry *proc_symlink(const char *name,
(S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
if (ent) {
- ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
+ ent->size = strlen(dest);
+ ent->data = kmemdup(dest, ent->size + 1, GFP_KERNEL);
if (ent->data) {
- strcpy((char*)ent->data,dest);
ent->proc_iops = &proc_link_inode_operations;
ent = proc_register(parent, ent);
} else {
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index d19434e2a58e..626ad7bd94f2 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -303,9 +303,7 @@ static ssize_t proc_reg_read_iter(struct kiocb *iocb, struct iov_iter *iter)
static ssize_t pde_read(struct proc_dir_entry *pde, struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
- typeof_member(struct proc_ops, proc_read) read;
-
- read = pde->proc_ops->proc_read;
+ __auto_type read = pde->proc_ops->proc_read;
if (read)
return read(file, buf, count, ppos);
return -EIO;
@@ -327,9 +325,7 @@ static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count,
static ssize_t pde_write(struct proc_dir_entry *pde, struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- typeof_member(struct proc_ops, proc_write) write;
-
- write = pde->proc_ops->proc_write;
+ __auto_type write = pde->proc_ops->proc_write;
if (write)
return write(file, buf, count, ppos);
return -EIO;
@@ -351,9 +347,7 @@ static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t
static __poll_t pde_poll(struct proc_dir_entry *pde, struct file *file, struct poll_table_struct *pts)
{
- typeof_member(struct proc_ops, proc_poll) poll;
-
- poll = pde->proc_ops->proc_poll;
+ __auto_type poll = pde->proc_ops->proc_poll;
if (poll)
return poll(file, pts);
return DEFAULT_POLLMASK;
@@ -375,9 +369,7 @@ static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts)
static long pde_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg)
{
- typeof_member(struct proc_ops, proc_ioctl) ioctl;
-
- ioctl = pde->proc_ops->proc_ioctl;
+ __auto_type ioctl = pde->proc_ops->proc_ioctl;
if (ioctl)
return ioctl(file, cmd, arg);
return -ENOTTY;
@@ -400,9 +392,7 @@ static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigne
#ifdef CONFIG_COMPAT
static long pde_compat_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg)
{
- typeof_member(struct proc_ops, proc_compat_ioctl) compat_ioctl;
-
- compat_ioctl = pde->proc_ops->proc_compat_ioctl;
+ __auto_type compat_ioctl = pde->proc_ops->proc_compat_ioctl;
if (compat_ioctl)
return compat_ioctl(file, cmd, arg);
return -ENOTTY;
@@ -424,9 +414,7 @@ static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned
static int pde_mmap(struct proc_dir_entry *pde, struct file *file, struct vm_area_struct *vma)
{
- typeof_member(struct proc_ops, proc_mmap) mmap;
-
- mmap = pde->proc_ops->proc_mmap;
+ __auto_type mmap = pde->proc_ops->proc_mmap;
if (mmap)
return mmap(file, vma);
return -EIO;
@@ -483,7 +471,6 @@ static int proc_reg_open(struct inode *inode, struct file *file)
struct proc_dir_entry *pde = PDE(inode);
int rv = 0;
typeof_member(struct proc_ops, proc_open) open;
- typeof_member(struct proc_ops, proc_release) release;
struct pde_opener *pdeo;
if (!pde->proc_ops->proc_lseek)
@@ -510,7 +497,7 @@ static int proc_reg_open(struct inode *inode, struct file *file)
if (!use_pde(pde))
return -ENOENT;
- release = pde->proc_ops->proc_release;
+ __auto_type release = pde->proc_ops->proc_release;
if (release) {
pdeo = kmem_cache_alloc(pde_opener_cache, GFP_KERNEL);
if (!pdeo) {
@@ -547,9 +534,7 @@ static int proc_reg_release(struct inode *inode, struct file *file)
struct pde_opener *pdeo;
if (pde_is_permanent(pde)) {
- typeof_member(struct proc_ops, proc_release) release;
-
- release = pde->proc_ops->proc_release;
+ __auto_type release = pde->proc_ops->proc_release;
if (release) {
return release(inode, file);
}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index a8a8576d8592..87e4d6282025 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -166,8 +166,7 @@ static inline int folio_precise_page_mapcount(struct folio *folio,
{
int mapcount = atomic_read(&page->_mapcount) + 1;
- /* Handle page_has_type() pages */
- if (mapcount < PAGE_MAPCOUNT_RESERVE + 1)
+ if (page_mapcount_is_type(mapcount))
mapcount = 0;
if (folio_test_large(folio))
mapcount += folio_entire_mapcount(folio);
@@ -349,3 +348,16 @@ static inline void pde_force_lookup(struct proc_dir_entry *pde)
/* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
pde->proc_dops = &proc_net_dentry_ops;
}
+
+/*
+ * Add a new procfs dentry that can't serve as a mountpoint. That should
+ * encompass anything that is ephemeral and can just disappear while the
+ * process is still around.
+ */
+static inline struct dentry *proc_splice_unmountable(struct inode *inode,
+ struct dentry *dentry, const struct dentry_operations *d_ops)
+{
+ d_set_d_op(dentry, d_ops);
+ dont_mount(dentry);
+ return d_splice_alias(inode, dentry);
+}
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 8e08a9a1b7ed..7d0acdad74e2 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -235,7 +235,7 @@ static int kcore_ram_list(struct list_head *list)
int nid, ret;
unsigned long end_pfn;
- /* Not inialized....update now */
+ /* Not initialized....update now */
/* find out "max pfn" */
end_pfn = 0;
for_each_node_state(nid, N_MEMORY) {
diff --git a/fs/proc/page.c b/fs/proc/page.c
index b7a5c84b5819..a55f5acefa97 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -182,7 +182,6 @@ u64 stable_page_flags(const struct page *page)
#endif
u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
- u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
@@ -207,18 +206,16 @@ u64 stable_page_flags(const struct page *page)
u |= kpf_copy_bit(page->flags, KPF_HWPOISON, PG_hwpoison);
#endif
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
- u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
-#endif
-
u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
- u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk);
+ u |= kpf_copy_bit(k, KPF_OWNER_2, PG_owner_2);
u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private);
u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2);
u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1);
-#ifdef CONFIG_ARCH_USES_PG_ARCH_X
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
u |= kpf_copy_bit(k, KPF_ARCH_2, PG_arch_2);
+#endif
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
u |= kpf_copy_bit(k, KPF_ARCH_3, PG_arch_3);
#endif
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 9553e77c9d31..d11ebc055ce0 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -29,8 +29,13 @@ static const struct inode_operations proc_sys_inode_operations;
static const struct file_operations proc_sys_dir_file_operations;
static const struct inode_operations proc_sys_dir_operations;
-/* Support for permanently empty directories */
-static struct ctl_table sysctl_mount_point[] = { };
+/*
+ * Support for permanently empty directories.
+ * Must be non-empty to avoid sharing an address with other tables.
+ */
+static struct ctl_table sysctl_mount_point[] = {
+ { }
+};
/**
* register_sysctl_mount_point() - registers a sysctl mount point
@@ -42,7 +47,7 @@ static struct ctl_table sysctl_mount_point[] = { };
*/
struct ctl_table_header *register_sysctl_mount_point(const char *path)
{
- return register_sysctl(path, sysctl_mount_point);
+ return register_sysctl_sz(path, sysctl_mount_point, 0);
}
EXPORT_SYMBOL(register_sysctl_mount_point);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 5f171ad7b436..72f14fd59c2d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -543,21 +543,6 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
}
}
- if (karg.build_id_size) {
- __u32 build_id_sz;
-
- err = build_id_parse(vma, build_id_buf, &build_id_sz);
- if (err) {
- karg.build_id_size = 0;
- } else {
- if (karg.build_id_size < build_id_sz) {
- err = -ENAMETOOLONG;
- goto out;
- }
- karg.build_id_size = build_id_sz;
- }
- }
-
if (karg.vma_name_size) {
size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size);
const struct path *path;
@@ -976,7 +961,9 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
[ilog2(VM_PKEY_BIT0)] = "",
[ilog2(VM_PKEY_BIT1)] = "",
[ilog2(VM_PKEY_BIT2)] = "",
+#if VM_PKEY_BIT3
[ilog2(VM_PKEY_BIT3)] = "",
+#endif
#if VM_PKEY_BIT4
[ilog2(VM_PKEY_BIT4)] = "",
#endif
@@ -987,8 +974,10 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
#ifdef CONFIG_X86_USER_SHADOW_STACK
[ilog2(VM_SHADOW_STACK)] = "ss",
#endif
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
[ilog2(VM_DROPPABLE)] = "dp",
+#endif
+#ifdef CONFIG_64BIT
[ilog2(VM_SEALED)] = "sl",
#endif
};
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 3497ede88aa0..f56b066ab80c 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -275,7 +275,7 @@ void pstore_record_init(struct pstore_record *record,
* end of the buffer.
*/
static void pstore_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
struct kmsg_dump_iter iter;
unsigned long total = 0;
@@ -285,16 +285,16 @@ static void pstore_dump(struct kmsg_dumper *dumper,
int saved_ret = 0;
int ret;
- why = kmsg_dump_reason_str(reason);
+ why = kmsg_dump_reason_str(detail->reason);
- if (pstore_cannot_block_path(reason)) {
- if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) {
+ if (pstore_cannot_block_path(detail->reason)) {
+ if (!raw_spin_trylock_irqsave(&psinfo->buf_lock, flags)) {
pr_err("dump skipped in %s path because of concurrent dump\n",
in_nmi() ? "NMI" : why);
return;
}
} else {
- spin_lock_irqsave(&psinfo->buf_lock, flags);
+ raw_spin_lock_irqsave(&psinfo->buf_lock, flags);
}
kmsg_dump_rewind(&iter);
@@ -311,7 +311,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
pstore_record_init(&record, psinfo);
record.type = PSTORE_TYPE_DMESG;
record.count = oopscount;
- record.reason = reason;
+ record.reason = detail->reason;
record.part = part;
record.buf = psinfo->buf;
@@ -352,7 +352,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
}
ret = psinfo->write(&record);
- if (ret == 0 && reason == KMSG_DUMP_OOPS) {
+ if (ret == 0 && detail->reason == KMSG_DUMP_OOPS) {
pstore_new_entry = 1;
pstore_timer_kick();
} else {
@@ -364,7 +364,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
total += record.size;
part++;
}
- spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ raw_spin_unlock_irqrestore(&psinfo->buf_lock, flags);
if (saved_ret) {
pr_err_once("backend (%s) writing error (%d)\n", psinfo->name,
@@ -503,7 +503,7 @@ int pstore_register(struct pstore_info *psi)
psi->write_user = pstore_write_user_compat;
psinfo = psi;
mutex_init(&psinfo->read_mutex);
- spin_lock_init(&psinfo->buf_lock);
+ raw_spin_lock_init(&psinfo->buf_lock);
if (psi->flags & PSTORE_FLAGS_DMESG)
allocate_buf_for_compression();
diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c
index c1cfb8a19e9d..b4d10e45f2e4 100644
--- a/fs/qnx6/dir.c
+++ b/fs/qnx6/dir.c
@@ -24,13 +24,15 @@ static unsigned qnx6_lfile_checksum(char *name, unsigned size)
return crc;
}
-static struct page *qnx6_get_page(struct inode *dir, unsigned long n)
+static void *qnx6_get_folio(struct inode *dir, unsigned long n,
+ struct folio **foliop)
{
- struct address_space *mapping = dir->i_mapping;
- struct page *page = read_mapping_page(mapping, n, NULL);
- if (!IS_ERR(page))
- kmap(page);
- return page;
+ struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
+
+ if (IS_ERR(folio))
+ return folio;
+ *foliop = folio;
+ return kmap_local_folio(folio, 0);
}
static unsigned last_entry(struct inode *inode, unsigned long page_nr)
@@ -44,19 +46,20 @@ static unsigned last_entry(struct inode *inode, unsigned long page_nr)
static struct qnx6_long_filename *qnx6_longname(struct super_block *sb,
struct qnx6_long_dir_entry *de,
- struct page **p)
+ struct folio **foliop)
{
struct qnx6_sb_info *sbi = QNX6_SB(sb);
u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */
u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */
- /* within page */
- u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_MASK;
+ u32 offs;
struct address_space *mapping = sbi->longfile->i_mapping;
- struct page *page = read_mapping_page(mapping, n, NULL);
- if (IS_ERR(page))
- return ERR_CAST(page);
- kmap(*p = page);
- return (struct qnx6_long_filename *)(page_address(page) + offs);
+ struct folio *folio = read_mapping_folio(mapping, n, NULL);
+
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
+ offs = offset_in_folio(folio, s << sb->s_blocksize_bits);
+ *foliop = folio;
+ return kmap_local_folio(folio, offs);
}
static int qnx6_dir_longfilename(struct inode *inode,
@@ -67,7 +70,7 @@ static int qnx6_dir_longfilename(struct inode *inode,
struct qnx6_long_filename *lf;
struct super_block *s = inode->i_sb;
struct qnx6_sb_info *sbi = QNX6_SB(s);
- struct page *page;
+ struct folio *folio;
int lf_size;
if (de->de_size != 0xff) {
@@ -76,7 +79,7 @@ static int qnx6_dir_longfilename(struct inode *inode,
pr_err("invalid direntry size (%i).\n", de->de_size);
return 0;
}
- lf = qnx6_longname(s, de, &page);
+ lf = qnx6_longname(s, de, &folio);
if (IS_ERR(lf)) {
pr_err("Error reading longname\n");
return 0;
@@ -87,7 +90,7 @@ static int qnx6_dir_longfilename(struct inode *inode,
if (lf_size > QNX6_LONG_NAME_MAX) {
pr_debug("file %s\n", lf->lf_fname);
pr_err("Filename too long (%i)\n", lf_size);
- qnx6_put_page(page);
+ folio_release_kmap(folio, lf);
return 0;
}
@@ -100,11 +103,11 @@ static int qnx6_dir_longfilename(struct inode *inode,
pr_debug("qnx6_readdir:%.*s inode:%u\n",
lf_size, lf->lf_fname, de_inode);
if (!dir_emit(ctx, lf->lf_fname, lf_size, de_inode, DT_UNKNOWN)) {
- qnx6_put_page(page);
+ folio_release_kmap(folio, lf);
return 0;
}
- qnx6_put_page(page);
+ folio_release_kmap(folio, lf);
/* success */
return 1;
}
@@ -117,26 +120,27 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
unsigned long npages = dir_pages(inode);
unsigned long n = pos >> PAGE_SHIFT;
- unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE;
+ unsigned offset = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE;
bool done = false;
ctx->pos = pos;
if (ctx->pos >= inode->i_size)
return 0;
- for ( ; !done && n < npages; n++, start = 0) {
- struct page *page = qnx6_get_page(inode, n);
- int limit = last_entry(inode, n);
+ for ( ; !done && n < npages; n++, offset = 0) {
struct qnx6_dir_entry *de;
- int i = start;
+ struct folio *folio;
+ char *kaddr = qnx6_get_folio(inode, n, &folio);
+ char *limit;
- if (IS_ERR(page)) {
+ if (IS_ERR(kaddr)) {
pr_err("%s(): read failed\n", __func__);
ctx->pos = (n + 1) << PAGE_SHIFT;
- return PTR_ERR(page);
+ return PTR_ERR(kaddr);
}
- de = ((struct qnx6_dir_entry *)page_address(page)) + start;
- for (; i < limit; i++, de++, ctx->pos += QNX6_DIR_ENTRY_SIZE) {
+ de = (struct qnx6_dir_entry *)(kaddr + offset);
+ limit = kaddr + last_entry(inode, n);
+ for (; (char *)de < limit; de++, ctx->pos += QNX6_DIR_ENTRY_SIZE) {
int size = de->de_size;
u32 no_inode = fs32_to_cpu(sbi, de->de_inode);
@@ -164,7 +168,7 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
}
}
}
- qnx6_put_page(page);
+ folio_release_kmap(folio, kaddr);
}
return 0;
}
@@ -177,23 +181,23 @@ static unsigned qnx6_long_match(int len, const char *name,
{
struct super_block *s = dir->i_sb;
struct qnx6_sb_info *sbi = QNX6_SB(s);
- struct page *page;
+ struct folio *folio;
int thislen;
- struct qnx6_long_filename *lf = qnx6_longname(s, de, &page);
+ struct qnx6_long_filename *lf = qnx6_longname(s, de, &folio);
if (IS_ERR(lf))
return 0;
thislen = fs16_to_cpu(sbi, lf->lf_size);
if (len != thislen) {
- qnx6_put_page(page);
+ folio_release_kmap(folio, lf);
return 0;
}
if (memcmp(name, lf->lf_fname, len) == 0) {
- qnx6_put_page(page);
+ folio_release_kmap(folio, lf);
return fs32_to_cpu(sbi, de->de_inode);
}
- qnx6_put_page(page);
+ folio_release_kmap(folio, lf);
return 0;
}
@@ -210,20 +214,17 @@ static unsigned qnx6_match(struct super_block *s, int len, const char *name,
}
-unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
- struct page **res_page)
+unsigned qnx6_find_ino(int len, struct inode *dir, const char *name)
{
struct super_block *s = dir->i_sb;
struct qnx6_inode_info *ei = QNX6_I(dir);
- struct page *page = NULL;
+ struct folio *folio;
unsigned long start, n;
unsigned long npages = dir_pages(dir);
unsigned ino;
struct qnx6_dir_entry *de;
struct qnx6_long_dir_entry *lde;
- *res_page = NULL;
-
if (npages == 0)
return 0;
start = ei->i_dir_start_lookup;
@@ -232,12 +233,11 @@ unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
n = start;
do {
- page = qnx6_get_page(dir, n);
- if (!IS_ERR(page)) {
+ de = qnx6_get_folio(dir, n, &folio);
+ if (!IS_ERR(de)) {
int limit = last_entry(dir, n);
int i;
- de = (struct qnx6_dir_entry *)page_address(page);
for (i = 0; i < limit; i++, de++) {
if (len <= QNX6_SHORT_NAME_MAX) {
/* short filename */
@@ -256,7 +256,7 @@ unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
} else
pr_err("undefined filename size in inode.\n");
}
- qnx6_put_page(page);
+ folio_release_kmap(folio, de - i);
}
if (++n >= npages)
@@ -265,8 +265,8 @@ unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
return 0;
found:
- *res_page = page;
ei->i_dir_start_lookup = n;
+ folio_release_kmap(folio, de);
return ino;
}
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 4f1735b882b1..85925ec0051a 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -184,17 +184,17 @@ static const char *qnx6_checkroot(struct super_block *s)
struct qnx6_dir_entry *dir_entry;
struct inode *root = d_inode(s->s_root);
struct address_space *mapping = root->i_mapping;
- struct page *page = read_mapping_page(mapping, 0, NULL);
- if (IS_ERR(page))
+ struct folio *folio = read_mapping_folio(mapping, 0, NULL);
+
+ if (IS_ERR(folio))
return "error reading root directory";
- kmap(page);
- dir_entry = page_address(page);
+ dir_entry = kmap_local_folio(folio, 0);
for (i = 0; i < 2; i++) {
/* maximum 3 bytes - due to match_root limitation */
if (strncmp(dir_entry[i].de_fname, match_root[i], 3))
error = 1;
}
- qnx6_put_page(page);
+ folio_release_kmap(folio, dir_entry);
if (error)
return "error reading root directory.";
return NULL;
@@ -518,7 +518,7 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
struct inode *inode;
struct qnx6_inode_info *ei;
struct address_space *mapping;
- struct page *page;
+ struct folio *folio;
u32 n, offs;
inode = iget_locked(sb, ino);
@@ -538,17 +538,16 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
return ERR_PTR(-EIO);
}
n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS);
- offs = (ino - 1) & (~PAGE_MASK >> QNX6_INODE_SIZE_BITS);
mapping = sbi->inodes->i_mapping;
- page = read_mapping_page(mapping, n, NULL);
- if (IS_ERR(page)) {
+ folio = read_mapping_folio(mapping, n, NULL);
+ if (IS_ERR(folio)) {
pr_err("major problem: unable to read inode from dev %s\n",
sb->s_id);
iget_failed(inode);
- return ERR_CAST(page);
+ return ERR_CAST(folio);
}
- kmap(page);
- raw_inode = ((struct qnx6_inode_entry *)page_address(page)) + offs;
+ offs = offset_in_folio(folio, (ino - 1) << QNX6_INODE_SIZE_BITS);
+ raw_inode = kmap_local_folio(folio, offs);
inode->i_mode = fs16_to_cpu(sbi, raw_inode->di_mode);
i_uid_write(inode, (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid));
@@ -578,7 +577,7 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
inode->i_mapping->a_ops = &qnx6_aops;
} else
init_special_inode(inode, inode->i_mode, 0);
- qnx6_put_page(page);
+ folio_release_kmap(folio, raw_inode);
unlock_new_inode(inode);
return inode;
}
diff --git a/fs/qnx6/namei.c b/fs/qnx6/namei.c
index e2e98e653b8d..0f0755a9ecb5 100644
--- a/fs/qnx6/namei.c
+++ b/fs/qnx6/namei.c
@@ -17,7 +17,6 @@ struct dentry *qnx6_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
unsigned ino;
- struct page *page;
struct inode *foundinode = NULL;
const char *name = dentry->d_name.name;
int len = dentry->d_name.len;
@@ -25,10 +24,9 @@ struct dentry *qnx6_lookup(struct inode *dir, struct dentry *dentry,
if (len > QNX6_LONG_NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
- ino = qnx6_find_entry(len, dir, name, &page);
+ ino = qnx6_find_ino(len, dir, name);
if (ino) {
foundinode = qnx6_iget(dir->i_sb, ino);
- qnx6_put_page(page);
if (IS_ERR(foundinode))
pr_debug("lookup->iget -> error %ld\n",
PTR_ERR(foundinode));
diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
index 34a6b126a3a9..56ed1367499e 100644
--- a/fs/qnx6/qnx6.h
+++ b/fs/qnx6/qnx6.h
@@ -126,11 +126,4 @@ static inline __fs16 cpu_to_fs16(struct qnx6_sb_info *sbi, __u16 n)
extern struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s,
int silent);
-static inline void qnx6_put_page(struct page *page)
-{
- kunmap(page);
- put_page(page);
-}
-
-extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
- struct page **res_page);
+unsigned qnx6_find_ino(int len, struct inode *dir, const char *name);
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 7ae885e6d5d7..b40410cd39af 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -163,13 +163,12 @@ static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
/* SLAB cache for dquot structures */
static struct kmem_cache *dquot_cachep;
-int register_quota_format(struct quota_format_type *fmt)
+void register_quota_format(struct quota_format_type *fmt)
{
spin_lock(&dq_list_lock);
fmt->qf_next = quota_formats;
quota_formats = fmt;
spin_unlock(&dq_list_lock);
- return 0;
}
EXPORT_SYMBOL(register_quota_format);
@@ -1831,7 +1830,6 @@ void dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
spin_unlock(&inode->i_lock);
mark_all_dquot_dirty(dquots);
srcu_read_unlock(&dquot_srcu, index);
- return;
}
EXPORT_SYMBOL(dquot_claim_space_nodirty);
@@ -1873,7 +1871,6 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
spin_unlock(&inode->i_lock);
mark_all_dquot_dirty(dquots);
srcu_read_unlock(&dquot_srcu, index);
- return;
}
EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
@@ -2406,7 +2403,7 @@ static int vfs_setup_quota_inode(struct inode *inode, int type)
int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
unsigned int flags)
{
- struct quota_format_type *fmt = find_quota_format(format_id);
+ struct quota_format_type *fmt;
struct quota_info *dqopt = sb_dqopt(sb);
int error;
@@ -2416,6 +2413,7 @@ int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
if (WARN_ON_ONCE(flags & DQUOT_SUSPENDED))
return -EINVAL;
+ fmt = find_quota_format(format_id);
if (!fmt)
return -ESRCH;
if (!sb->dq_op || !sb->s_qcop ||
@@ -2596,7 +2594,8 @@ static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
goto out_err;
}
if (sb_has_quota_limits_enabled(sb, type)) {
- ret = -EBUSY;
+ /* compatible with XFS */
+ ret = -EEXIST;
goto out_err;
}
spin_lock(&dq_state_lock);
@@ -2610,9 +2609,6 @@ out_err:
if (flags & qtype_enforce_flag(type))
dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
}
- /* Error code translation for better compatibility with XFS */
- if (ret == -EBUSY)
- ret = -EEXIST;
return ret;
}
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 0e41fb84060f..290157bc7bec 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -980,7 +980,7 @@ SYSCALL_DEFINE4(quotactl_fd, unsigned int, fd, unsigned int, cmd,
int ret;
f = fdget_raw(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
ret = -EINVAL;
@@ -988,12 +988,12 @@ SYSCALL_DEFINE4(quotactl_fd, unsigned int, fd, unsigned int, cmd,
goto out;
if (quotactl_cmd_write(cmds)) {
- ret = mnt_want_write(f.file->f_path.mnt);
+ ret = mnt_want_write(fd_file(f)->f_path.mnt);
if (ret)
goto out;
}
- sb = f.file->f_path.mnt->mnt_sb;
+ sb = fd_file(f)->f_path.mnt->mnt_sb;
if (quotactl_cmd_onoff(cmds))
down_write(&sb->s_umount);
else
@@ -1007,7 +1007,7 @@ SYSCALL_DEFINE4(quotactl_fd, unsigned int, fd, unsigned int, cmd,
up_read(&sb->s_umount);
if (quotactl_cmd_write(cmds))
- mnt_drop_write(f.file->f_path.mnt);
+ mnt_drop_write(fd_file(f)->f_path.mnt);
out:
fdput(f);
return ret;
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 3f3e8acc05db..6f7f0b4afba9 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -235,7 +235,8 @@ static struct quota_format_type v1_quota_format = {
static int __init init_v1_quota_format(void)
{
- return register_quota_format(&v1_quota_format);
+ register_quota_format(&v1_quota_format);
+ return 0;
}
static void __exit exit_v1_quota_format(void)
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index c48c233f3bef..1fda93dcbc1b 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -440,12 +440,9 @@ static struct quota_format_type v2r1_quota_format = {
static int __init init_v2_quota_format(void)
{
- int ret;
-
- ret = register_quota_format(&v2r0_quota_format);
- if (ret)
- return ret;
- return register_quota_format(&v2r1_quota_format);
+ register_quota_format(&v2r0_quota_format);
+ register_quota_format(&v2r1_quota_format);
+ return 0;
}
static void __exit exit_v2_quota_format(void)
diff --git a/fs/read_write.c b/fs/read_write.c
index 90e283b31ca1..64dc24afdb3a 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -36,22 +36,24 @@ EXPORT_SYMBOL(generic_ro_fops);
static inline bool unsigned_offsets(struct file *file)
{
- return file->f_mode & FMODE_UNSIGNED_OFFSET;
+ return file->f_op->fop_flags & FOP_UNSIGNED_OFFSET;
}
/**
- * vfs_setpos - update the file offset for lseek
+ * vfs_setpos_cookie - update the file offset for lseek and reset cookie
* @file: file structure in question
* @offset: file offset to seek to
* @maxsize: maximum file size
+ * @cookie: cookie to reset
*
- * This is a low-level filesystem helper for updating the file offset to
- * the value specified by @offset if the given offset is valid and it is
- * not equal to the current file offset.
+ * Update the file offset to the value specified by @offset if the given
+ * offset is valid and it is not equal to the current file offset and
+ * reset the specified cookie to indicate that a seek happened.
*
* Return the specified offset on success and -EINVAL on invalid offset.
*/
-loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
+static loff_t vfs_setpos_cookie(struct file *file, loff_t offset,
+ loff_t maxsize, u64 *cookie)
{
if (offset < 0 && !unsigned_offsets(file))
return -EINVAL;
@@ -60,35 +62,48 @@ loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
if (offset != file->f_pos) {
file->f_pos = offset;
- file->f_version = 0;
+ if (cookie)
+ *cookie = 0;
}
return offset;
}
-EXPORT_SYMBOL(vfs_setpos);
/**
- * generic_file_llseek_size - generic llseek implementation for regular files
- * @file: file structure to seek on
+ * vfs_setpos - update the file offset for lseek
+ * @file: file structure in question
* @offset: file offset to seek to
- * @whence: type of seek
- * @maxsize: max size of this file in file system
- * @eof: offset used for SEEK_END position
+ * @maxsize: maximum file size
*
- * This is a variant of generic_file_llseek that allows passing in a custom
- * maximum file size and a custom EOF position, for e.g. hashed directories
+ * This is a low-level filesystem helper for updating the file offset to
+ * the value specified by @offset if the given offset is valid and it is
+ * not equal to the current file offset.
*
- * Synchronization:
- * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
- * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes.
- * read/writes behave like SEEK_SET against seeks.
+ * Return the specified offset on success and -EINVAL on invalid offset.
*/
-loff_t
-generic_file_llseek_size(struct file *file, loff_t offset, int whence,
- loff_t maxsize, loff_t eof)
+loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
+{
+ return vfs_setpos_cookie(file, offset, maxsize, NULL);
+}
+EXPORT_SYMBOL(vfs_setpos);
+
+/**
+ * must_set_pos - check whether f_pos has to be updated
+ * @file: file to seek on
+ * @offset: offset to use
+ * @whence: type of seek operation
+ * @eof: end of file
+ *
+ * Check whether f_pos needs to be updated and update @offset according
+ * to @whence.
+ *
+ * Return: 0 if f_pos doesn't need to be updated, 1 if f_pos has to be
+ * updated, and negative error code on failure.
+ */
+static int must_set_pos(struct file *file, loff_t *offset, int whence, loff_t eof)
{
switch (whence) {
case SEEK_END:
- offset += eof;
+ *offset += eof;
break;
case SEEK_CUR:
/*
@@ -97,23 +112,17 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
* f_pos value back to the file because a concurrent read(),
* write() or lseek() might have altered it
*/
- if (offset == 0)
- return file->f_pos;
- /*
- * f_lock protects against read/modify/write race with other
- * SEEK_CURs. Note that parallel writes and reads behave
- * like SEEK_SET.
- */
- spin_lock(&file->f_lock);
- offset = vfs_setpos(file, file->f_pos + offset, maxsize);
- spin_unlock(&file->f_lock);
- return offset;
+ if (*offset == 0) {
+ *offset = file->f_pos;
+ return 0;
+ }
+ break;
case SEEK_DATA:
/*
* In the generic case the entire file is data, so as long as
* offset isn't at the end of the file then the offset is data.
*/
- if ((unsigned long long)offset >= eof)
+ if ((unsigned long long)*offset >= eof)
return -ENXIO;
break;
case SEEK_HOLE:
@@ -121,17 +130,103 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
* There is a virtual hole at the end of the file, so as long as
* offset isn't i_size or larger, return i_size.
*/
- if ((unsigned long long)offset >= eof)
+ if ((unsigned long long)*offset >= eof)
return -ENXIO;
- offset = eof;
+ *offset = eof;
break;
}
+ return 1;
+}
+
+/**
+ * generic_file_llseek_size - generic llseek implementation for regular files
+ * @file: file structure to seek on
+ * @offset: file offset to seek to
+ * @whence: type of seek
+ * @maxsize: max size of this file in file system
+ * @eof: offset used for SEEK_END position
+ *
+ * This is a variant of generic_file_llseek that allows passing in a custom
+ * maximum file size and a custom EOF position, for e.g. hashed directories
+ *
+ * Synchronization:
+ * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
+ * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes.
+ * read/writes behave like SEEK_SET against seeks.
+ */
+loff_t
+generic_file_llseek_size(struct file *file, loff_t offset, int whence,
+ loff_t maxsize, loff_t eof)
+{
+ int ret;
+
+ ret = must_set_pos(file, &offset, whence, eof);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return offset;
+
+ if (whence == SEEK_CUR) {
+ /*
+ * f_lock protects against read/modify/write race with
+ * other SEEK_CURs. Note that parallel writes and reads
+ * behave like SEEK_SET.
+ */
+ guard(spinlock)(&file->f_lock);
+ return vfs_setpos(file, file->f_pos + offset, maxsize);
+ }
+
return vfs_setpos(file, offset, maxsize);
}
EXPORT_SYMBOL(generic_file_llseek_size);
/**
+ * generic_llseek_cookie - versioned llseek implementation
+ * @file: file structure to seek on
+ * @offset: file offset to seek to
+ * @whence: type of seek
+ * @cookie: cookie to update
+ *
+ * See generic_file_llseek for a general description and locking assumptions.
+ *
+ * In contrast to generic_file_llseek, this function also resets a
+ * specified cookie to indicate a seek took place.
+ */
+loff_t generic_llseek_cookie(struct file *file, loff_t offset, int whence,
+ u64 *cookie)
+{
+ struct inode *inode = file->f_mapping->host;
+ loff_t maxsize = inode->i_sb->s_maxbytes;
+ loff_t eof = i_size_read(inode);
+ int ret;
+
+ if (WARN_ON_ONCE(!cookie))
+ return -EINVAL;
+
+ /*
+ * Require that this is only used for directories that guarantee
+ * synchronization between readdir and seek so that an update to
+ * @cookie is correctly synchronized with concurrent readdir.
+ */
+ if (WARN_ON_ONCE(!(file->f_mode & FMODE_ATOMIC_POS)))
+ return -EINVAL;
+
+ ret = must_set_pos(file, &offset, whence, eof);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return offset;
+
+ /* No need to hold f_lock because we know that f_pos_lock is held. */
+ if (whence == SEEK_CUR)
+ return vfs_setpos_cookie(file, file->f_pos + offset, maxsize, cookie);
+
+ return vfs_setpos_cookie(file, offset, maxsize, cookie);
+}
+EXPORT_SYMBOL(generic_llseek_cookie);
+
+/**
* generic_file_llseek - generic llseek implementation for regular files
* @file: file structure to seek on
* @offset: file offset to seek to
@@ -270,10 +365,8 @@ loff_t default_llseek(struct file *file, loff_t offset, int whence)
}
retval = -EINVAL;
if (offset >= 0 || unsigned_offsets(file)) {
- if (offset != file->f_pos) {
+ if (offset != file->f_pos)
file->f_pos = offset;
- file->f_version = 0;
- }
retval = offset;
}
out:
@@ -294,12 +387,12 @@ static off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence)
{
off_t retval;
struct fd f = fdget_pos(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
retval = -EINVAL;
if (whence <= SEEK_MAX) {
- loff_t res = vfs_llseek(f.file, offset, whence);
+ loff_t res = vfs_llseek(fd_file(f), offset, whence);
retval = res;
if (res != (loff_t)retval)
retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
@@ -330,14 +423,14 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
struct fd f = fdget_pos(fd);
loff_t offset;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
retval = -EINVAL;
if (whence > SEEK_MAX)
goto out_putf;
- offset = vfs_llseek(f.file, ((loff_t) offset_high << 32) | offset_low,
+ offset = vfs_llseek(fd_file(f), ((loff_t) offset_high << 32) | offset_low,
whence);
retval = (int)offset;
@@ -610,15 +703,15 @@ ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count)
struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
- if (f.file) {
- loff_t pos, *ppos = file_ppos(f.file);
+ if (fd_file(f)) {
+ loff_t pos, *ppos = file_ppos(fd_file(f));
if (ppos) {
pos = *ppos;
ppos = &pos;
}
- ret = vfs_read(f.file, buf, count, ppos);
+ ret = vfs_read(fd_file(f), buf, count, ppos);
if (ret >= 0 && ppos)
- f.file->f_pos = pos;
+ fd_file(f)->f_pos = pos;
fdput_pos(f);
}
return ret;
@@ -634,15 +727,15 @@ ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count)
struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
- if (f.file) {
- loff_t pos, *ppos = file_ppos(f.file);
+ if (fd_file(f)) {
+ loff_t pos, *ppos = file_ppos(fd_file(f));
if (ppos) {
pos = *ppos;
ppos = &pos;
}
- ret = vfs_write(f.file, buf, count, ppos);
+ ret = vfs_write(fd_file(f), buf, count, ppos);
if (ret >= 0 && ppos)
- f.file->f_pos = pos;
+ fd_file(f)->f_pos = pos;
fdput_pos(f);
}
@@ -665,10 +758,10 @@ ssize_t ksys_pread64(unsigned int fd, char __user *buf, size_t count,
return -EINVAL;
f = fdget(fd);
- if (f.file) {
+ if (fd_file(f)) {
ret = -ESPIPE;
- if (f.file->f_mode & FMODE_PREAD)
- ret = vfs_read(f.file, buf, count, &pos);
+ if (fd_file(f)->f_mode & FMODE_PREAD)
+ ret = vfs_read(fd_file(f), buf, count, &pos);
fdput(f);
}
@@ -699,10 +792,10 @@ ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf,
return -EINVAL;
f = fdget(fd);
- if (f.file) {
+ if (fd_file(f)) {
ret = -ESPIPE;
- if (f.file->f_mode & FMODE_PWRITE)
- ret = vfs_write(f.file, buf, count, &pos);
+ if (fd_file(f)->f_mode & FMODE_PWRITE)
+ ret = vfs_write(fd_file(f), buf, count, &pos);
fdput(f);
}
@@ -985,15 +1078,15 @@ static ssize_t do_readv(unsigned long fd, const struct iovec __user *vec,
struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
- if (f.file) {
- loff_t pos, *ppos = file_ppos(f.file);
+ if (fd_file(f)) {
+ loff_t pos, *ppos = file_ppos(fd_file(f));
if (ppos) {
pos = *ppos;
ppos = &pos;
}
- ret = vfs_readv(f.file, vec, vlen, ppos, flags);
+ ret = vfs_readv(fd_file(f), vec, vlen, ppos, flags);
if (ret >= 0 && ppos)
- f.file->f_pos = pos;
+ fd_file(f)->f_pos = pos;
fdput_pos(f);
}
@@ -1009,15 +1102,15 @@ static ssize_t do_writev(unsigned long fd, const struct iovec __user *vec,
struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
- if (f.file) {
- loff_t pos, *ppos = file_ppos(f.file);
+ if (fd_file(f)) {
+ loff_t pos, *ppos = file_ppos(fd_file(f));
if (ppos) {
pos = *ppos;
ppos = &pos;
}
- ret = vfs_writev(f.file, vec, vlen, ppos, flags);
+ ret = vfs_writev(fd_file(f), vec, vlen, ppos, flags);
if (ret >= 0 && ppos)
- f.file->f_pos = pos;
+ fd_file(f)->f_pos = pos;
fdput_pos(f);
}
@@ -1043,10 +1136,10 @@ static ssize_t do_preadv(unsigned long fd, const struct iovec __user *vec,
return -EINVAL;
f = fdget(fd);
- if (f.file) {
+ if (fd_file(f)) {
ret = -ESPIPE;
- if (f.file->f_mode & FMODE_PREAD)
- ret = vfs_readv(f.file, vec, vlen, &pos, flags);
+ if (fd_file(f)->f_mode & FMODE_PREAD)
+ ret = vfs_readv(fd_file(f), vec, vlen, &pos, flags);
fdput(f);
}
@@ -1066,10 +1159,10 @@ static ssize_t do_pwritev(unsigned long fd, const struct iovec __user *vec,
return -EINVAL;
f = fdget(fd);
- if (f.file) {
+ if (fd_file(f)) {
ret = -ESPIPE;
- if (f.file->f_mode & FMODE_PWRITE)
- ret = vfs_writev(f.file, vec, vlen, &pos, flags);
+ if (fd_file(f)->f_mode & FMODE_PWRITE)
+ ret = vfs_writev(fd_file(f), vec, vlen, &pos, flags);
fdput(f);
}
@@ -1235,19 +1328,19 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
*/
retval = -EBADF;
in = fdget(in_fd);
- if (!in.file)
+ if (!fd_file(in))
goto out;
- if (!(in.file->f_mode & FMODE_READ))
+ if (!(fd_file(in)->f_mode & FMODE_READ))
goto fput_in;
retval = -ESPIPE;
if (!ppos) {
- pos = in.file->f_pos;
+ pos = fd_file(in)->f_pos;
} else {
pos = *ppos;
- if (!(in.file->f_mode & FMODE_PREAD))
+ if (!(fd_file(in)->f_mode & FMODE_PREAD))
goto fput_in;
}
- retval = rw_verify_area(READ, in.file, &pos, count);
+ retval = rw_verify_area(READ, fd_file(in), &pos, count);
if (retval < 0)
goto fput_in;
if (count > MAX_RW_COUNT)
@@ -1258,13 +1351,13 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
*/
retval = -EBADF;
out = fdget(out_fd);
- if (!out.file)
+ if (!fd_file(out))
goto fput_in;
- if (!(out.file->f_mode & FMODE_WRITE))
+ if (!(fd_file(out)->f_mode & FMODE_WRITE))
goto fput_out;
- in_inode = file_inode(in.file);
- out_inode = file_inode(out.file);
- out_pos = out.file->f_pos;
+ in_inode = file_inode(fd_file(in));
+ out_inode = file_inode(fd_file(out));
+ out_pos = fd_file(out)->f_pos;
if (!max)
max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
@@ -1284,33 +1377,33 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
* and the application is arguably buggy if it doesn't expect
* EAGAIN on a non-blocking file descriptor.
*/
- if (in.file->f_flags & O_NONBLOCK)
+ if (fd_file(in)->f_flags & O_NONBLOCK)
fl = SPLICE_F_NONBLOCK;
#endif
- opipe = get_pipe_info(out.file, true);
+ opipe = get_pipe_info(fd_file(out), true);
if (!opipe) {
- retval = rw_verify_area(WRITE, out.file, &out_pos, count);
+ retval = rw_verify_area(WRITE, fd_file(out), &out_pos, count);
if (retval < 0)
goto fput_out;
- retval = do_splice_direct(in.file, &pos, out.file, &out_pos,
+ retval = do_splice_direct(fd_file(in), &pos, fd_file(out), &out_pos,
count, fl);
} else {
- if (out.file->f_flags & O_NONBLOCK)
+ if (fd_file(out)->f_flags & O_NONBLOCK)
fl |= SPLICE_F_NONBLOCK;
- retval = splice_file_to_pipe(in.file, opipe, &pos, count, fl);
+ retval = splice_file_to_pipe(fd_file(in), opipe, &pos, count, fl);
}
if (retval > 0) {
add_rchar(current, retval);
add_wchar(current, retval);
- fsnotify_access(in.file);
- fsnotify_modify(out.file);
- out.file->f_pos = out_pos;
+ fsnotify_access(fd_file(in));
+ fsnotify_modify(fd_file(out));
+ fd_file(out)->f_pos = out_pos;
if (ppos)
*ppos = pos;
else
- in.file->f_pos = pos;
+ fd_file(in)->f_pos = pos;
}
inc_syscr(current);
@@ -1583,11 +1676,11 @@ SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in,
ssize_t ret = -EBADF;
f_in = fdget(fd_in);
- if (!f_in.file)
+ if (!fd_file(f_in))
goto out2;
f_out = fdget(fd_out);
- if (!f_out.file)
+ if (!fd_file(f_out))
goto out1;
ret = -EFAULT;
@@ -1595,21 +1688,21 @@ SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in,
if (copy_from_user(&pos_in, off_in, sizeof(loff_t)))
goto out;
} else {
- pos_in = f_in.file->f_pos;
+ pos_in = fd_file(f_in)->f_pos;
}
if (off_out) {
if (copy_from_user(&pos_out, off_out, sizeof(loff_t)))
goto out;
} else {
- pos_out = f_out.file->f_pos;
+ pos_out = fd_file(f_out)->f_pos;
}
ret = -EINVAL;
if (flags != 0)
goto out;
- ret = vfs_copy_file_range(f_in.file, pos_in, f_out.file, pos_out, len,
+ ret = vfs_copy_file_range(fd_file(f_in), pos_in, fd_file(f_out), pos_out, len,
flags);
if (ret > 0) {
pos_in += ret;
@@ -1619,14 +1712,14 @@ SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in,
if (copy_to_user(off_in, &pos_in, sizeof(loff_t)))
ret = -EFAULT;
} else {
- f_in.file->f_pos = pos_in;
+ fd_file(f_in)->f_pos = pos_in;
}
if (off_out) {
if (copy_to_user(off_out, &pos_out, sizeof(loff_t)))
ret = -EFAULT;
} else {
- f_out.file->f_pos = pos_out;
+ fd_file(f_out)->f_pos = pos_out;
}
}
diff --git a/fs/readdir.c b/fs/readdir.c
index d6c82421902a..6d29cab8576e 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -225,10 +225,10 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
.dirent = dirent
};
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ error = iterate_dir(fd_file(f), &buf.ctx);
if (buf.result)
error = buf.result;
@@ -318,10 +318,10 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
int error;
f = fdget_pos(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ error = iterate_dir(fd_file(f), &buf.ctx);
if (error >= 0)
error = buf.error;
if (buf.prev_reclen) {
@@ -401,10 +401,10 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
int error;
f = fdget_pos(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ error = iterate_dir(fd_file(f), &buf.ctx);
if (error >= 0)
error = buf.error;
if (buf.prev_reclen) {
@@ -483,10 +483,10 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
.dirent = dirent
};
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ error = iterate_dir(fd_file(f), &buf.ctx);
if (buf.result)
error = buf.result;
@@ -569,10 +569,10 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
int error;
f = fdget_pos(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ error = iterate_dir(fd_file(f), &buf.ctx);
if (error >= 0)
error = buf.error;
if (buf.prev_reclen) {
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 9b43a81a6488..72c53129c952 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2178,7 +2178,7 @@ static int grab_tail_page(struct inode *inode,
unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
struct buffer_head *bh;
struct buffer_head *head;
- struct page *page;
+ struct folio *folio;
int error;
/*
@@ -2190,20 +2190,20 @@ static int grab_tail_page(struct inode *inode,
if ((offset & (blocksize - 1)) == 0) {
return -ENOENT;
}
- page = grab_cache_page(inode->i_mapping, index);
- error = -ENOMEM;
- if (!page) {
- goto out;
- }
+ folio = __filemap_get_folio(inode->i_mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mapping_gfp_mask(inode->i_mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
/* start within the page of the last block in the file */
start = (offset / blocksize) * blocksize;
- error = __block_write_begin(page, start, offset - start,
+ error = __block_write_begin(folio, start, offset - start,
reiserfs_get_block_create_0);
if (error)
goto unlock;
- head = page_buffers(page);
+ head = folio_buffers(folio);
bh = head;
do {
if (pos >= start) {
@@ -2226,14 +2226,13 @@ static int grab_tail_page(struct inode *inode,
goto unlock;
}
*bh_result = bh;
- *page_result = page;
+ *page_result = &folio->page;
-out:
return error;
unlock:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return error;
}
@@ -2736,23 +2735,24 @@ static void reiserfs_truncate_failed_write(struct inode *inode)
static int reiserfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
struct inode *inode;
- struct page *page;
+ struct folio *folio;
pgoff_t index;
int ret;
int old_ref = 0;
inode = mapping->host;
index = pos >> PAGE_SHIFT;
- page = grab_cache_page_write_begin(mapping, index);
- if (!page)
- return -ENOMEM;
- *pagep = page;
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ *foliop = folio;
reiserfs_wait_on_write_block(inode->i_sb);
- fix_tail_page_for_writing(page);
+ fix_tail_page_for_writing(&folio->page);
if (reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th;
th = (struct reiserfs_transaction_handle *)current->
@@ -2762,7 +2762,7 @@ static int reiserfs_write_begin(struct file *file,
old_ref = th->t_refcount;
th->t_refcount++;
}
- ret = __block_write_begin(page, pos, len, reiserfs_get_block);
+ ret = __block_write_begin(folio, pos, len, reiserfs_get_block);
if (ret && reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th = current->journal_info;
/*
@@ -2792,8 +2792,8 @@ static int reiserfs_write_begin(struct file *file,
}
}
if (ret) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
/* Truncate allocated blocks */
reiserfs_truncate_failed_write(inode);
}
@@ -2822,7 +2822,7 @@ int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
th->t_refcount++;
}
- ret = __block_write_begin(page, from, len, reiserfs_get_block);
+ ret = __block_write_begin(page_folio(page), from, len, reiserfs_get_block);
if (ret && reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th = current->journal_info;
/*
@@ -2862,10 +2862,9 @@ static sector_t reiserfs_aop_bmap(struct address_space *as, sector_t block)
static int reiserfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct folio *folio = page_folio(page);
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
int ret = 0;
int update_sd = 0;
struct reiserfs_transaction_handle *th;
@@ -2887,7 +2886,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
}
flush_dcache_folio(folio);
- reiserfs_commit_page(inode, page, start, start + copied);
+ reiserfs_commit_page(inode, &folio->page, start, start + copied);
/*
* generic_commit_write does this for us, but does not update the
@@ -2942,8 +2941,8 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
out:
if (locked)
reiserfs_write_unlock(inode->i_sb);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
if (pos + len > inode->i_size)
reiserfs_truncate_failed_write(inode);
diff --git a/fs/remap_range.c b/fs/remap_range.c
index 28246dfc8485..4403d5c68fcb 100644
--- a/fs/remap_range.c
+++ b/fs/remap_range.c
@@ -537,7 +537,7 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
for (i = 0, info = same->info; i < count; i++, info++) {
struct fd dst_fd = fdget(info->dest_fd);
- struct file *dst_file = dst_fd.file;
+ struct file *dst_file = fd_file(dst_fd);
if (!dst_file) {
info->status = -EBADF;
diff --git a/fs/select.c b/fs/select.c
index 9515c3fa1a03..a77907faf2b4 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -77,19 +77,16 @@ u64 select_estimate_accuracy(struct timespec64 *tv)
{
u64 ret;
struct timespec64 now;
+ u64 slack = current->timer_slack_ns;
- /*
- * Realtime tasks get a slack of 0 for obvious reasons.
- */
-
- if (rt_task(current))
+ if (slack == 0)
return 0;
ktime_get_ts64(&now);
now = timespec64_sub(*tv, now);
ret = __estimate_accuracy(&now);
- if (ret < current->timer_slack_ns)
- return current->timer_slack_ns;
+ if (ret < slack)
+ return slack;
return ret;
}
@@ -532,10 +529,10 @@ static noinline_for_stack int do_select(int n, fd_set_bits *fds, struct timespec
continue;
mask = EPOLLNVAL;
f = fdget(i);
- if (f.file) {
+ if (fd_file(f)) {
wait_key_set(wait, in, out, bit,
busy_flag);
- mask = vfs_poll(f.file, wait);
+ mask = vfs_poll(fd_file(f), wait);
fdput(f);
}
@@ -780,7 +777,9 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
{
// the path is hot enough for overhead of copy_from_user() to matter
if (from) {
- if (!user_read_access_begin(from, sizeof(*from)))
+ if (can_do_masked_user_access())
+ from = masked_user_access_begin(from);
+ else if (!user_read_access_begin(from, sizeof(*from)))
return -EFAULT;
unsafe_get_user(to->p, &from->p, Efault);
unsafe_get_user(to->size, &from->size, Efault);
@@ -840,7 +839,7 @@ SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
struct poll_list {
struct poll_list *next;
unsigned int len;
- struct pollfd entries[];
+ struct pollfd entries[] __counted_by(len);
};
#define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
@@ -864,13 +863,13 @@ static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
goto out;
mask = EPOLLNVAL;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
goto out;
/* userland u16 ->events contains POLL... bitmap */
filter = demangle_poll(pollfd->events) | EPOLLERR | EPOLLHUP;
pwait->_key = filter | busy_flag;
- mask = vfs_poll(f.file, pwait);
+ mask = vfs_poll(fd_file(f), pwait);
if (mask & busy_flag)
*can_busy_poll = true;
mask &= filter; /* Mask out unneeded events. */
diff --git a/fs/signalfd.c b/fs/signalfd.c
index ec7b2da2477a..736bebf93591 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -159,7 +159,7 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info
DECLARE_WAITQUEUE(wait, current);
spin_lock_irq(&current->sighand->siglock);
- ret = dequeue_signal(current, &ctx->sigmask, info, &type);
+ ret = dequeue_signal(&ctx->sigmask, info, &type);
switch (ret) {
case 0:
if (!nonblock)
@@ -174,7 +174,7 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info
add_wait_queue(&current->sighand->signalfd_wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- ret = dequeue_signal(current, &ctx->sigmask, info, &type);
+ ret = dequeue_signal(&ctx->sigmask, info, &type);
if (ret != 0)
break;
if (signal_pending(current)) {
@@ -289,10 +289,10 @@ static int do_signalfd4(int ufd, sigset_t *mask, int flags)
fd_install(ufd, file);
} else {
struct fd f = fdget(ufd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- ctx = f.file->private_data;
- if (f.file->f_op != &signalfd_fops) {
+ ctx = fd_file(f)->private_data;
+ if (fd_file(f)->f_op != &signalfd_fops) {
fdput(f);
return -EINVAL;
}
diff --git a/fs/smb/client/Kconfig b/fs/smb/client/Kconfig
index 2517dc242386..2aff6d1395ce 100644
--- a/fs/smb/client/Kconfig
+++ b/fs/smb/client/Kconfig
@@ -204,4 +204,18 @@ config CIFS_ROOT
Most people say N here.
+config CIFS_COMPRESSION
+ bool "SMB message compression (Experimental)"
+ depends on CIFS
+ default n
+ help
+ Enables over-the-wire message compression for SMB 3.1.1
+ mounts when negotiated with the server.
+
+ Only write requests with data size >= PAGE_SIZE will be
+ compressed to avoid wasting resources.
+
+ Say Y here if you want SMB traffic to be compressed.
+ If unsure, say N.
+
endif
diff --git a/fs/smb/client/Makefile b/fs/smb/client/Makefile
index e11985f2460b..22023e30915b 100644
--- a/fs/smb/client/Makefile
+++ b/fs/smb/client/Makefile
@@ -33,3 +33,5 @@ cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o
cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o
cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o
+
+cifs-$(CONFIG_CIFS_COMPRESSION) += compress.o compress/lz77.o
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index 4a20e92474b2..e03c890de0a0 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -350,6 +350,9 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
#ifdef CONFIG_CIFS_SWN_UPCALL
seq_puts(m, ",WITNESS");
#endif
+#ifdef CONFIG_CIFS_COMPRESSION
+ seq_puts(m, ",COMPRESSION");
+#endif
seq_putc(m, '\n');
seq_printf(m, "CIFSMaxBufSize: %d\n", CIFSMaxBufSize);
seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
@@ -475,7 +478,9 @@ skip_rdma:
}
seq_puts(m, "\nCompression: ");
- if (!server->compression.requested)
+ if (!IS_ENABLED(CONFIG_CIFS_COMPRESSION))
+ seq_puts(m, "no built-in support");
+ else if (!server->compression.requested)
seq_puts(m, "disabled on mount");
else if (server->compression.enabled)
seq_printf(m, "enabled (%s)", compression_alg_str(server->compression.alg));
diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
index f5b6df82e857..1d294d53f662 100644
--- a/fs/smb/client/cifsacl.c
+++ b/fs/smb/client/cifsacl.c
@@ -27,18 +27,18 @@
#include "cifs_unicode.h"
/* security id for everyone/world system group */
-static const struct cifs_sid sid_everyone = {
+static const struct smb_sid sid_everyone = {
1, 1, {0, 0, 0, 0, 0, 1}, {0} };
/* security id for Authenticated Users system group */
-static const struct cifs_sid sid_authusers = {
+static const struct smb_sid sid_authusers = {
1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(11)} };
/* S-1-22-1 Unmapped Unix users */
-static const struct cifs_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22},
+static const struct smb_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22},
{cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
/* S-1-22-2 Unmapped Unix groups */
-static const struct cifs_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22},
+static const struct smb_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22},
{cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
/*
@@ -48,17 +48,17 @@ static const struct cifs_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22},
/* S-1-5-88 MS NFS and Apple style UID/GID/mode */
/* S-1-5-88-1 Unix uid */
-static const struct cifs_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5},
+static const struct smb_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5},
{cpu_to_le32(88),
cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
/* S-1-5-88-2 Unix gid */
-static const struct cifs_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5},
+static const struct smb_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5},
{cpu_to_le32(88),
cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
/* S-1-5-88-3 Unix mode */
-static const struct cifs_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5},
+static const struct smb_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5},
{cpu_to_le32(88),
cpu_to_le32(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
@@ -106,7 +106,7 @@ static struct key_type cifs_idmap_key_type = {
};
static char *
-sid_to_key_str(struct cifs_sid *sidptr, unsigned int type)
+sid_to_key_str(struct smb_sid *sidptr, unsigned int type)
{
int i, len;
unsigned int saval;
@@ -158,7 +158,7 @@ sid_to_key_str(struct cifs_sid *sidptr, unsigned int type)
* the same returns zero, if they do not match returns non-zero.
*/
static int
-compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
+compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid)
{
int i;
int num_subauth, num_sat, num_saw;
@@ -187,7 +187,7 @@ compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
/* compare all of the subauth values if any */
num_sat = ctsid->num_subauth;
num_saw = cwsid->num_subauth;
- num_subauth = num_sat < num_saw ? num_sat : num_saw;
+ num_subauth = min(num_sat, num_saw);
if (num_subauth) {
for (i = 0; i < num_subauth; ++i) {
if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
@@ -204,11 +204,11 @@ compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
}
static bool
-is_well_known_sid(const struct cifs_sid *psid, uint32_t *puid, bool is_group)
+is_well_known_sid(const struct smb_sid *psid, uint32_t *puid, bool is_group)
{
int i;
int num_subauth;
- const struct cifs_sid *pwell_known_sid;
+ const struct smb_sid *pwell_known_sid;
if (!psid || (puid == NULL))
return false;
@@ -260,7 +260,7 @@ is_well_known_sid(const struct cifs_sid *psid, uint32_t *puid, bool is_group)
}
static __u16
-cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
+cifs_copy_sid(struct smb_sid *dst, const struct smb_sid *src)
{
int i;
__u16 size = 1 + 1 + 6;
@@ -277,11 +277,11 @@ cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
}
static int
-id_to_sid(unsigned int cid, uint sidtype, struct cifs_sid *ssid)
+id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid)
{
int rc;
struct key *sidkey;
- struct cifs_sid *ksid;
+ struct smb_sid *ksid;
unsigned int ksid_size;
char desc[3 + 10 + 1]; /* 3 byte prefix + 10 bytes for value + NULL */
const struct cred *saved_cred;
@@ -312,8 +312,8 @@ id_to_sid(unsigned int cid, uint sidtype, struct cifs_sid *ssid)
* it could be.
*/
ksid = sidkey->datalen <= sizeof(sidkey->payload) ?
- (struct cifs_sid *)&sidkey->payload :
- (struct cifs_sid *)sidkey->payload.data[0];
+ (struct smb_sid *)&sidkey->payload :
+ (struct smb_sid *)sidkey->payload.data[0];
ksid_size = CIFS_SID_BASE_SIZE + (ksid->num_subauth * sizeof(__le32));
if (ksid_size > sidkey->datalen) {
@@ -336,7 +336,7 @@ invalidate_key:
}
int
-sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
+sid_to_id(struct cifs_sb_info *cifs_sb, struct smb_sid *psid,
struct cifs_fattr *fattr, uint sidtype)
{
int rc = 0;
@@ -515,43 +515,43 @@ exit_cifs_idmap(void)
}
/* copy ntsd, owner sid, and group sid from a security descriptor to another */
-static __u32 copy_sec_desc(const struct cifs_ntsd *pntsd,
- struct cifs_ntsd *pnntsd,
+static __u32 copy_sec_desc(const struct smb_ntsd *pntsd,
+ struct smb_ntsd *pnntsd,
__u32 sidsoffset,
- struct cifs_sid *pownersid,
- struct cifs_sid *pgrpsid)
+ struct smb_sid *pownersid,
+ struct smb_sid *pgrpsid)
{
- struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
- struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
+ struct smb_sid *owner_sid_ptr, *group_sid_ptr;
+ struct smb_sid *nowner_sid_ptr, *ngroup_sid_ptr;
/* copy security descriptor control portion */
pnntsd->revision = pntsd->revision;
pnntsd->type = pntsd->type;
- pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
+ pnntsd->dacloffset = cpu_to_le32(sizeof(struct smb_ntsd));
pnntsd->sacloffset = 0;
pnntsd->osidoffset = cpu_to_le32(sidsoffset);
- pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
+ pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct smb_sid));
/* copy owner sid */
if (pownersid)
owner_sid_ptr = pownersid;
else
- owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
le32_to_cpu(pntsd->osidoffset));
- nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
+ nowner_sid_ptr = (struct smb_sid *)((char *)pnntsd + sidsoffset);
cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
/* copy group sid */
if (pgrpsid)
group_sid_ptr = pgrpsid;
else
- group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ group_sid_ptr = (struct smb_sid *)((char *)pntsd +
le32_to_cpu(pntsd->gsidoffset));
- ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
- sizeof(struct cifs_sid));
+ ngroup_sid_ptr = (struct smb_sid *)((char *)pnntsd + sidsoffset +
+ sizeof(struct smb_sid));
cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
- return sidsoffset + (2 * sizeof(struct cifs_sid));
+ return sidsoffset + (2 * sizeof(struct smb_sid));
}
@@ -666,7 +666,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
return;
}
-static __u16 cifs_copy_ace(struct cifs_ace *dst, struct cifs_ace *src, struct cifs_sid *psid)
+static __u16 cifs_copy_ace(struct smb_ace *dst, struct smb_ace *src, struct smb_sid *psid)
{
__u16 size = 1 + 1 + 2 + 4;
@@ -685,8 +685,8 @@ static __u16 cifs_copy_ace(struct cifs_ace *dst, struct cifs_ace *src, struct ci
return size;
}
-static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
- const struct cifs_sid *psid, __u64 nmode,
+static __u16 fill_ace_for_sid(struct smb_ace *pntace,
+ const struct smb_sid *psid, __u64 nmode,
umode_t bits, __u8 access_type,
bool allow_delete_child)
{
@@ -723,7 +723,7 @@ static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
#ifdef CONFIG_CIFS_DEBUG2
-static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
+static void dump_ace(struct smb_ace *pace, char *end_of_acl)
{
int num_subauth;
@@ -758,15 +758,15 @@ static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
}
#endif
-static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
- struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
+static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
+ struct smb_sid *pownersid, struct smb_sid *pgrpsid,
struct cifs_fattr *fattr, bool mode_from_special_sid)
{
int i;
int num_aces = 0;
int acl_size;
char *acl_base;
- struct cifs_ace **ppace;
+ struct smb_ace **ppace;
/* BB need to add parm so we can store the SID BB */
@@ -793,21 +793,21 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
fattr->cf_mode &= ~(0777);
acl_base = (char *)pdacl;
- acl_size = sizeof(struct cifs_acl);
+ acl_size = sizeof(struct smb_acl);
num_aces = le32_to_cpu(pdacl->num_aces);
if (num_aces > 0) {
umode_t denied_mode = 0;
- if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
+ if (num_aces > ULONG_MAX / sizeof(struct smb_ace *))
return;
- ppace = kmalloc_array(num_aces, sizeof(struct cifs_ace *),
+ ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *),
GFP_KERNEL);
if (!ppace)
return;
for (i = 0; i < num_aces; ++i) {
- ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
+ ppace[i] = (struct smb_ace *) (acl_base + acl_size);
#ifdef CONFIG_CIFS_DEBUG2
dump_ace(ppace[i], end_of_acl);
#endif
@@ -849,7 +849,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
/* memcpy((void *)(&(cifscred->aces[i])),
(void *)ppace[i],
- sizeof(struct cifs_ace)); */
+ sizeof(struct smb_ace)); */
acl_base = (char *)ppace[i];
acl_size = le16_to_cpu(ppace[i]->size);
@@ -861,7 +861,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
return;
}
-unsigned int setup_authusers_ACE(struct cifs_ace *pntace)
+unsigned int setup_authusers_ACE(struct smb_ace *pntace)
{
int i;
unsigned int ace_size = 20;
@@ -885,7 +885,7 @@ unsigned int setup_authusers_ACE(struct cifs_ace *pntace)
* Fill in the special SID based on the mode. See
* https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
*/
-unsigned int setup_special_mode_ACE(struct cifs_ace *pntace, __u64 nmode)
+unsigned int setup_special_mode_ACE(struct smb_ace *pntace, __u64 nmode)
{
int i;
unsigned int ace_size = 28;
@@ -907,7 +907,7 @@ unsigned int setup_special_mode_ACE(struct cifs_ace *pntace, __u64 nmode)
return ace_size;
}
-unsigned int setup_special_user_owner_ACE(struct cifs_ace *pntace)
+unsigned int setup_special_user_owner_ACE(struct smb_ace *pntace)
{
int i;
unsigned int ace_size = 28;
@@ -930,8 +930,8 @@ unsigned int setup_special_user_owner_ACE(struct cifs_ace *pntace)
}
static void populate_new_aces(char *nacl_base,
- struct cifs_sid *pownersid,
- struct cifs_sid *pgrpsid,
+ struct smb_sid *pownersid,
+ struct smb_sid *pgrpsid,
__u64 *pnmode, u32 *pnum_aces, u16 *pnsize,
bool modefromsid)
{
@@ -944,17 +944,17 @@ static void populate_new_aces(char *nacl_base,
__u64 deny_user_mode = 0;
__u64 deny_group_mode = 0;
bool sticky_set = false;
- struct cifs_ace *pnntace = NULL;
+ struct smb_ace *pnntace = NULL;
nmode = *pnmode;
num_aces = *pnum_aces;
nsize = *pnsize;
if (modefromsid) {
- pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ pnntace = (struct smb_ace *) (nacl_base + nsize);
nsize += setup_special_mode_ACE(pnntace, nmode);
num_aces++;
- pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ pnntace = (struct smb_ace *) (nacl_base + nsize);
nsize += setup_authusers_ACE(pnntace);
num_aces++;
goto set_size;
@@ -967,7 +967,7 @@ static void populate_new_aces(char *nacl_base,
* updated in the inode.
*/
- if (!memcmp(pownersid, pgrpsid, sizeof(struct cifs_sid))) {
+ if (!memcmp(pownersid, pgrpsid, sizeof(struct smb_sid))) {
/*
* Case when owner and group SIDs are the same.
* Set the more restrictive of the two modes.
@@ -992,7 +992,7 @@ static void populate_new_aces(char *nacl_base,
sticky_set = true;
if (deny_user_mode) {
- pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ pnntace = (struct smb_ace *) (nacl_base + nsize);
nsize += fill_ace_for_sid(pnntace, pownersid, deny_user_mode,
0700, ACCESS_DENIED, false);
num_aces++;
@@ -1000,31 +1000,31 @@ static void populate_new_aces(char *nacl_base,
/* Group DENY ACE does not conflict with owner ALLOW ACE. Keep in preferred order*/
if (deny_group_mode && !(deny_group_mode & (user_mode >> 3))) {
- pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ pnntace = (struct smb_ace *) (nacl_base + nsize);
nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode,
0070, ACCESS_DENIED, false);
num_aces++;
}
- pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ pnntace = (struct smb_ace *) (nacl_base + nsize);
nsize += fill_ace_for_sid(pnntace, pownersid, user_mode,
0700, ACCESS_ALLOWED, true);
num_aces++;
/* Group DENY ACE conflicts with owner ALLOW ACE. So keep it after. */
if (deny_group_mode && (deny_group_mode & (user_mode >> 3))) {
- pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ pnntace = (struct smb_ace *) (nacl_base + nsize);
nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode,
0070, ACCESS_DENIED, false);
num_aces++;
}
- pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ pnntace = (struct smb_ace *) (nacl_base + nsize);
nsize += fill_ace_for_sid(pnntace, pgrpsid, group_mode,
0070, ACCESS_ALLOWED, !sticky_set);
num_aces++;
- pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ pnntace = (struct smb_ace *) (nacl_base + nsize);
nsize += fill_ace_for_sid(pnntace, &sid_everyone, other_mode,
0007, ACCESS_ALLOWED, !sticky_set);
num_aces++;
@@ -1034,31 +1034,31 @@ set_size:
*pnsize = nsize;
}
-static __u16 replace_sids_and_copy_aces(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
- struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
- struct cifs_sid *pnownersid, struct cifs_sid *pngrpsid)
+static __u16 replace_sids_and_copy_aces(struct smb_acl *pdacl, struct smb_acl *pndacl,
+ struct smb_sid *pownersid, struct smb_sid *pgrpsid,
+ struct smb_sid *pnownersid, struct smb_sid *pngrpsid)
{
int i;
u16 size = 0;
- struct cifs_ace *pntace = NULL;
+ struct smb_ace *pntace = NULL;
char *acl_base = NULL;
u32 src_num_aces = 0;
u16 nsize = 0;
- struct cifs_ace *pnntace = NULL;
+ struct smb_ace *pnntace = NULL;
char *nacl_base = NULL;
u16 ace_size = 0;
acl_base = (char *)pdacl;
- size = sizeof(struct cifs_acl);
+ size = sizeof(struct smb_acl);
src_num_aces = le32_to_cpu(pdacl->num_aces);
nacl_base = (char *)pndacl;
- nsize = sizeof(struct cifs_acl);
+ nsize = sizeof(struct smb_acl);
/* Go through all the ACEs */
for (i = 0; i < src_num_aces; ++i) {
- pntace = (struct cifs_ace *) (acl_base + size);
- pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ pntace = (struct smb_ace *) (acl_base + size);
+ pnntace = (struct smb_ace *) (nacl_base + nsize);
if (pnownersid && compare_sids(&pntace->sid, pownersid) == 0)
ace_size = cifs_copy_ace(pnntace, pntace, pnownersid);
@@ -1074,24 +1074,24 @@ static __u16 replace_sids_and_copy_aces(struct cifs_acl *pdacl, struct cifs_acl
return nsize;
}
-static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
- struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
+static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl,
+ struct smb_sid *pownersid, struct smb_sid *pgrpsid,
__u64 *pnmode, bool mode_from_sid)
{
int i;
u16 size = 0;
- struct cifs_ace *pntace = NULL;
+ struct smb_ace *pntace = NULL;
char *acl_base = NULL;
u32 src_num_aces = 0;
u16 nsize = 0;
- struct cifs_ace *pnntace = NULL;
+ struct smb_ace *pnntace = NULL;
char *nacl_base = NULL;
u32 num_aces = 0;
bool new_aces_set = false;
/* Assuming that pndacl and pnmode are never NULL */
nacl_base = (char *)pndacl;
- nsize = sizeof(struct cifs_acl);
+ nsize = sizeof(struct smb_acl);
/* If pdacl is NULL, we don't have a src. Simply populate new ACL. */
if (!pdacl) {
@@ -1103,12 +1103,12 @@ static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
}
acl_base = (char *)pdacl;
- size = sizeof(struct cifs_acl);
+ size = sizeof(struct smb_acl);
src_num_aces = le32_to_cpu(pdacl->num_aces);
/* Retain old ACEs which we can retain */
for (i = 0; i < src_num_aces; ++i) {
- pntace = (struct cifs_ace *) (acl_base + size);
+ pntace = (struct smb_ace *) (acl_base + size);
if (!new_aces_set && (pntace->flags & INHERITED_ACE)) {
/* Place the new ACEs in between existing explicit and inherited */
@@ -1130,7 +1130,7 @@ static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
}
/* update the pointer to the next ACE to populate*/
- pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ pnntace = (struct smb_ace *) (nacl_base + nsize);
nsize += cifs_copy_ace(pnntace, pntace, NULL);
num_aces++;
@@ -1156,7 +1156,7 @@ finalize_dacl:
return 0;
}
-static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
+static int parse_sid(struct smb_sid *psid, char *end_of_acl)
{
/* BB need to add parm so we can store the SID BB */
@@ -1191,24 +1191,24 @@ static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
/* Convert CIFS ACL to POSIX form */
static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
- struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr,
+ struct smb_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr,
bool get_mode_from_special_sid)
{
int rc = 0;
- struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
- struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
+ struct smb_sid *owner_sid_ptr, *group_sid_ptr;
+ struct smb_acl *dacl_ptr; /* no need for SACL ptr */
char *end_of_acl = ((char *)pntsd) + acl_len;
__u32 dacloffset;
if (pntsd == NULL)
return -EIO;
- owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
le32_to_cpu(pntsd->osidoffset));
- group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ group_sid_ptr = (struct smb_sid *)((char *)pntsd +
le32_to_cpu(pntsd->gsidoffset));
dacloffset = le32_to_cpu(pntsd->dacloffset);
- dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
+ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
cifs_dbg(NOISY, "revision %d type 0x%x ooffset 0x%x goffset 0x%x sacloffset 0x%x dacloffset 0x%x\n",
pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
le32_to_cpu(pntsd->gsidoffset),
@@ -1249,7 +1249,7 @@ static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
}
/* Convert permission bits from mode to equivalent CIFS ACL */
-static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
+static int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd,
__u32 secdesclen, __u32 *pnsecdesclen, __u64 *pnmode, kuid_t uid, kgid_t gid,
bool mode_from_sid, bool id_from_sid, int *aclflag)
{
@@ -1257,30 +1257,30 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
__u32 dacloffset;
__u32 ndacloffset;
__u32 sidsoffset;
- struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
- struct cifs_sid *nowner_sid_ptr = NULL, *ngroup_sid_ptr = NULL;
- struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
- struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
+ struct smb_sid *owner_sid_ptr, *group_sid_ptr;
+ struct smb_sid *nowner_sid_ptr = NULL, *ngroup_sid_ptr = NULL;
+ struct smb_acl *dacl_ptr = NULL; /* no need for SACL ptr */
+ struct smb_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
char *end_of_acl = ((char *)pntsd) + secdesclen;
u16 size = 0;
dacloffset = le32_to_cpu(pntsd->dacloffset);
if (dacloffset) {
- dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
+ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
if (end_of_acl < (char *)dacl_ptr + le16_to_cpu(dacl_ptr->size)) {
cifs_dbg(VFS, "Server returned illegal ACL size\n");
return -EINVAL;
}
}
- owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
le32_to_cpu(pntsd->osidoffset));
- group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ group_sid_ptr = (struct smb_sid *)((char *)pntsd +
le32_to_cpu(pntsd->gsidoffset));
if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
- ndacloffset = sizeof(struct cifs_ntsd);
- ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
+ ndacloffset = sizeof(struct smb_ntsd);
+ ndacl_ptr = (struct smb_acl *)((char *)pnntsd + ndacloffset);
ndacl_ptr->revision =
dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
@@ -1297,15 +1297,15 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
*aclflag |= CIFS_ACL_DACL;
} else {
- ndacloffset = sizeof(struct cifs_ntsd);
- ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
+ ndacloffset = sizeof(struct smb_ntsd);
+ ndacl_ptr = (struct smb_acl *)((char *)pnntsd + ndacloffset);
ndacl_ptr->revision =
dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
ndacl_ptr->num_aces = dacl_ptr ? dacl_ptr->num_aces : 0;
if (uid_valid(uid)) { /* chown */
uid_t id;
- nowner_sid_ptr = kzalloc(sizeof(struct cifs_sid),
+ nowner_sid_ptr = kzalloc(sizeof(struct smb_sid),
GFP_KERNEL);
if (!nowner_sid_ptr) {
rc = -ENOMEM;
@@ -1334,7 +1334,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
}
if (gid_valid(gid)) { /* chgrp */
gid_t id;
- ngroup_sid_ptr = kzalloc(sizeof(struct cifs_sid),
+ ngroup_sid_ptr = kzalloc(sizeof(struct smb_sid),
GFP_KERNEL);
if (!ngroup_sid_ptr) {
rc = -ENOMEM;
@@ -1385,11 +1385,11 @@ chown_chgrp_exit:
}
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
-struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
+struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
const struct cifs_fid *cifsfid, u32 *pacllen,
u32 __maybe_unused unused)
{
- struct cifs_ntsd *pntsd = NULL;
+ struct smb_ntsd *pntsd = NULL;
unsigned int xid;
int rc;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
@@ -1410,10 +1410,10 @@ struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
return pntsd;
}
-static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
+static struct smb_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
const char *path, u32 *pacllen)
{
- struct cifs_ntsd *pntsd = NULL;
+ struct smb_ntsd *pntsd = NULL;
int oplock = 0;
unsigned int xid;
int rc;
@@ -1454,11 +1454,11 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
}
/* Retrieve an ACL from the server */
-struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
+struct smb_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
struct inode *inode, const char *path,
u32 *pacllen, u32 info)
{
- struct cifs_ntsd *pntsd = NULL;
+ struct smb_ntsd *pntsd = NULL;
struct cifsFileInfo *open_file = NULL;
if (inode)
@@ -1472,7 +1472,7 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
}
/* Set an ACL on the server */
-int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
+int set_cifs_acl(struct smb_ntsd *pnntsd, __u32 acllen,
struct inode *inode, const char *path, int aclflag)
{
int oplock = 0;
@@ -1528,7 +1528,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
struct inode *inode, bool mode_from_special_sid,
const char *path, const struct cifs_fid *pfid)
{
- struct cifs_ntsd *pntsd = NULL;
+ struct smb_ntsd *pntsd = NULL;
u32 acllen = 0;
int rc = 0;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
@@ -1580,9 +1580,9 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
__u32 secdesclen = 0;
__u32 nsecdesclen = 0;
__u32 dacloffset = 0;
- struct cifs_acl *dacl_ptr = NULL;
- struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
- struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
+ struct smb_acl *dacl_ptr = NULL;
+ struct smb_ntsd *pntsd = NULL; /* acl obtained from server */
+ struct smb_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
struct smb_version_operations *ops;
@@ -1625,18 +1625,18 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
nsecdesclen = secdesclen;
if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
if (mode_from_sid)
- nsecdesclen += 2 * sizeof(struct cifs_ace);
+ nsecdesclen += 2 * sizeof(struct smb_ace);
else /* cifsacl */
- nsecdesclen += 5 * sizeof(struct cifs_ace);
+ nsecdesclen += 5 * sizeof(struct smb_ace);
} else { /* chown */
/* When ownership changes, changes new owner sid length could be different */
- nsecdesclen = sizeof(struct cifs_ntsd) + (sizeof(struct cifs_sid) * 2);
+ nsecdesclen = sizeof(struct smb_ntsd) + (sizeof(struct smb_sid) * 2);
dacloffset = le32_to_cpu(pntsd->dacloffset);
if (dacloffset) {
- dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
+ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
if (mode_from_sid)
nsecdesclen +=
- le32_to_cpu(dacl_ptr->num_aces) * sizeof(struct cifs_ace);
+ le32_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace);
else /* cifsacl */
nsecdesclen += le16_to_cpu(dacl_ptr->size);
}
diff --git a/fs/smb/client/cifsacl.h b/fs/smb/client/cifsacl.h
index ccbfc754bd3c..6529478b7f48 100644
--- a/fs/smb/client/cifsacl.h
+++ b/fs/smb/client/cifsacl.h
@@ -9,8 +9,7 @@
#ifndef _CIFSACL_H
#define _CIFSACL_H
-#define NUM_AUTHS (6) /* number of authority fields */
-#define SID_MAX_SUB_AUTHORITIES (15) /* max number of sub authority fields */
+#include "../common/smbacl.h"
#define READ_BIT 0x4
#define WRITE_BIT 0x2
@@ -23,101 +22,13 @@
#define UBITSHIFT 6
#define GBITSHIFT 3
-#define ACCESS_ALLOWED 0
-#define ACCESS_DENIED 1
-
-#define SIDOWNER 1
-#define SIDGROUP 2
-
/*
* Security Descriptor length containing DACL with 3 ACEs (one each for
* owner, group and world).
*/
-#define DEFAULT_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + \
- sizeof(struct cifs_acl) + \
- (sizeof(struct cifs_ace) * 4))
-
-/*
- * Maximum size of a string representation of a SID:
- *
- * The fields are unsigned values in decimal. So:
- *
- * u8: max 3 bytes in decimal
- * u32: max 10 bytes in decimal
- *
- * "S-" + 3 bytes for version field + 15 for authority field + NULL terminator
- *
- * For authority field, max is when all 6 values are non-zero and it must be
- * represented in hex. So "-0x" + 12 hex digits.
- *
- * Add 11 bytes for each subauthority field (10 bytes each + 1 for '-')
- */
-#define SID_STRING_BASE_SIZE (2 + 3 + 15 + 1)
-#define SID_STRING_SUBAUTH_SIZE (11) /* size of a single subauth string */
-
-struct cifs_ntsd {
- __le16 revision; /* revision level */
- __le16 type;
- __le32 osidoffset;
- __le32 gsidoffset;
- __le32 sacloffset;
- __le32 dacloffset;
-} __attribute__((packed));
-
-struct cifs_sid {
- __u8 revision; /* revision level */
- __u8 num_subauth;
- __u8 authority[NUM_AUTHS];
- __le32 sub_auth[SID_MAX_SUB_AUTHORITIES]; /* sub_auth[num_subauth] */
-} __attribute__((packed));
-
-/* size of a struct cifs_sid, sans sub_auth array */
-#define CIFS_SID_BASE_SIZE (1 + 1 + NUM_AUTHS)
-
-struct cifs_acl {
- __le16 revision; /* revision level */
- __le16 size;
- __le32 num_aces;
-} __attribute__((packed));
-
-/* ACE types - see MS-DTYP 2.4.4.1 */
-#define ACCESS_ALLOWED_ACE_TYPE 0x00
-#define ACCESS_DENIED_ACE_TYPE 0x01
-#define SYSTEM_AUDIT_ACE_TYPE 0x02
-#define SYSTEM_ALARM_ACE_TYPE 0x03
-#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
-#define ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05
-#define ACCESS_DENIED_OBJECT_ACE_TYPE 0x06
-#define SYSTEM_AUDIT_OBJECT_ACE_TYPE 0x07
-#define SYSTEM_ALARM_OBJECT_ACE_TYPE 0x08
-#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
-#define ACCESS_DENIED_CALLBACK_ACE_TYPE 0x0A
-#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
-#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE 0x0C
-#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE 0x0D
-#define SYSTEM_ALARM_CALLBACK_ACE_TYPE 0x0E /* Reserved */
-#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
-#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
-#define SYSTEM_MANDATORY_LABEL_ACE_TYPE 0x11
-#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
-#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
-
-/* ACE flags */
-#define OBJECT_INHERIT_ACE 0x01
-#define CONTAINER_INHERIT_ACE 0x02
-#define NO_PROPAGATE_INHERIT_ACE 0x04
-#define INHERIT_ONLY_ACE 0x08
-#define INHERITED_ACE 0x10
-#define SUCCESSFUL_ACCESS_ACE_FLAG 0x40
-#define FAILED_ACCESS_ACE_FLAG 0x80
-
-struct cifs_ace {
- __u8 type; /* see above and MS-DTYP 2.4.4.1 */
- __u8 flags;
- __le16 size;
- __le32 access_req;
- struct cifs_sid sid; /* ie UUID of user or group who gets these perms */
-} __attribute__((packed));
+#define DEFAULT_SEC_DESC_LEN (sizeof(struct smb_ntsd) + \
+ sizeof(struct smb_acl) + \
+ (sizeof(struct smb_ace) * 4))
/*
* The current SMB3 form of security descriptor is similar to what was used for
@@ -194,6 +105,6 @@ struct owner_group_sids {
* Minimum security descriptor can be one without any SACL and DACL and can
* consist of revision, type, and two sids of minimum size for owner and group
*/
-#define MIN_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + (2 * MIN_SID_LEN))
+#define MIN_SEC_DESC_LEN (sizeof(struct smb_ntsd) + (2 * MIN_SID_LEN))
#endif /* _CIFSACL_H */
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index 6322f0f68a17..7481b21a0489 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -21,127 +21,21 @@
#include <linux/random.h>
#include <linux/highmem.h>
#include <linux/fips.h>
+#include <linux/iov_iter.h>
#include "../common/arc4.h"
#include <crypto/aead.h>
-/*
- * Hash data from a BVEC-type iterator.
- */
-static int cifs_shash_bvec(const struct iov_iter *iter, ssize_t maxsize,
- struct shash_desc *shash)
+static size_t cifs_shash_step(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2)
{
- const struct bio_vec *bv = iter->bvec;
- unsigned long start = iter->iov_offset;
- unsigned int i;
- void *p;
- int ret;
-
- for (i = 0; i < iter->nr_segs; i++) {
- size_t off, len;
-
- len = bv[i].bv_len;
- if (start >= len) {
- start -= len;
- continue;
- }
-
- len = min_t(size_t, maxsize, len - start);
- off = bv[i].bv_offset + start;
+ struct shash_desc *shash = priv;
+ int ret, *pret = priv2;
- p = kmap_local_page(bv[i].bv_page);
- ret = crypto_shash_update(shash, p + off, len);
- kunmap_local(p);
- if (ret < 0)
- return ret;
-
- maxsize -= len;
- if (maxsize <= 0)
- break;
- start = 0;
+ ret = crypto_shash_update(shash, iter_base, len);
+ if (ret < 0) {
+ *pret = ret;
+ return len;
}
-
- return 0;
-}
-
-/*
- * Hash data from a KVEC-type iterator.
- */
-static int cifs_shash_kvec(const struct iov_iter *iter, ssize_t maxsize,
- struct shash_desc *shash)
-{
- const struct kvec *kv = iter->kvec;
- unsigned long start = iter->iov_offset;
- unsigned int i;
- int ret;
-
- for (i = 0; i < iter->nr_segs; i++) {
- size_t len;
-
- len = kv[i].iov_len;
- if (start >= len) {
- start -= len;
- continue;
- }
-
- len = min_t(size_t, maxsize, len - start);
- ret = crypto_shash_update(shash, kv[i].iov_base + start, len);
- if (ret < 0)
- return ret;
- maxsize -= len;
-
- if (maxsize <= 0)
- break;
- start = 0;
- }
-
- return 0;
-}
-
-/*
- * Hash data from an XARRAY-type iterator.
- */
-static ssize_t cifs_shash_xarray(const struct iov_iter *iter, ssize_t maxsize,
- struct shash_desc *shash)
-{
- struct folio *folios[16], *folio;
- unsigned int nr, i, j, npages;
- loff_t start = iter->xarray_start + iter->iov_offset;
- pgoff_t last, index = start / PAGE_SIZE;
- ssize_t ret = 0;
- size_t len, offset, foffset;
- void *p;
-
- if (maxsize == 0)
- return 0;
-
- last = (start + maxsize - 1) / PAGE_SIZE;
- do {
- nr = xa_extract(iter->xarray, (void **)folios, index, last,
- ARRAY_SIZE(folios), XA_PRESENT);
- if (nr == 0)
- return -EIO;
-
- for (i = 0; i < nr; i++) {
- folio = folios[i];
- npages = folio_nr_pages(folio);
- foffset = start - folio_pos(folio);
- offset = foffset % PAGE_SIZE;
- for (j = foffset / PAGE_SIZE; j < npages; j++) {
- len = min_t(size_t, maxsize, PAGE_SIZE - offset);
- p = kmap_local_page(folio_page(folio, j));
- ret = crypto_shash_update(shash, p, len);
- kunmap_local(p);
- if (ret < 0)
- return ret;
- maxsize -= len;
- if (maxsize <= 0)
- return 0;
- start += len;
- offset = 0;
- index++;
- }
- }
- } while (nr == ARRAY_SIZE(folios));
return 0;
}
@@ -151,21 +45,13 @@ static ssize_t cifs_shash_xarray(const struct iov_iter *iter, ssize_t maxsize,
static int cifs_shash_iter(const struct iov_iter *iter, size_t maxsize,
struct shash_desc *shash)
{
- if (maxsize == 0)
- return 0;
+ struct iov_iter tmp_iter = *iter;
+ int err = -EIO;
- switch (iov_iter_type(iter)) {
- case ITER_BVEC:
- return cifs_shash_bvec(iter, maxsize, shash);
- case ITER_KVEC:
- return cifs_shash_kvec(iter, maxsize, shash);
- case ITER_XARRAY:
- return cifs_shash_xarray(iter, maxsize, shash);
- default:
- pr_err("cifs_shash_iter(%u) unsupported\n", iov_iter_type(iter));
- WARN_ON_ONCE(1);
- return -EIO;
- }
+ if (iterate_and_advance_kernel(&tmp_iter, maxsize, shash, &err,
+ cifs_shash_step) != maxsize)
+ return err;
+ return 0;
}
int __cifs_calc_signature(struct smb_rqst *rqst,
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index ca2bd204bcc5..71b720dbb2ce 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -106,7 +106,6 @@ extern int cifs_flush(struct file *, fl_owner_t id);
extern int cifs_file_mmap(struct file *file, struct vm_area_struct *vma);
extern int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma);
extern const struct file_operations cifs_dir_ops;
-extern int cifs_dir_open(struct inode *inode, struct file *file);
extern int cifs_readdir(struct file *file, struct dir_context *ctx);
/* Functions related to dir entries */
@@ -147,6 +146,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 50
-#define CIFS_VERSION "2.50"
+#define SMB3_PRODUCT_BUILD 51
+#define CIFS_VERSION "2.51"
#endif /* _CIFSFS_H */
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 9eae8649f90c..15571cf0ba63 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -202,10 +202,10 @@ struct cifs_cred {
int gid;
int mode;
int cecount;
- struct cifs_sid osid;
- struct cifs_sid gsid;
+ struct smb_sid osid;
+ struct smb_sid gsid;
struct cifs_ntace *ntaces;
- struct cifs_ace *aces;
+ struct smb_ace *aces;
};
struct cifs_open_info_data {
@@ -231,8 +231,8 @@ struct cifs_open_info_data {
unsigned int eas_len;
} wsl;
char *symlink_target;
- struct cifs_sid posix_owner;
- struct cifs_sid posix_group;
+ struct smb_sid posix_owner;
+ struct smb_sid posix_group;
union {
struct smb2_file_all_info fi;
struct smb311_posix_qinfo posix_fi;
@@ -255,7 +255,7 @@ struct smb_rqst {
struct kvec *rq_iov; /* array of kvecs */
unsigned int rq_nvec; /* number of kvecs in array */
struct iov_iter rq_iter; /* Data iterator */
- struct xarray rq_buffer; /* Page buffer for encryption */
+ struct folio_queue *rq_buffer; /* Buffer for encryption */
};
struct mid_q_entry;
@@ -536,12 +536,12 @@ struct smb_version_operations {
int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *,
const char *, const void *, const __u16,
const struct nls_table *, struct cifs_sb_info *);
- struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *,
- const char *, u32 *, u32);
- struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *,
- const struct cifs_fid *, u32 *, u32);
- int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
- int);
+ struct smb_ntsd * (*get_acl)(struct cifs_sb_info *cifssb, struct inode *ino,
+ const char *patch, u32 *plen, u32 info);
+ struct smb_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *cifssmb,
+ const struct cifs_fid *pfid, u32 *plen, u32 info);
+ int (*set_acl)(struct smb_ntsd *pntsd, __u32 len, struct inode *ino, const char *path,
+ int flag);
/* writepages retry size */
unsigned int (*wp_retry_size)(struct inode *);
/* get mtu credits */
@@ -555,7 +555,7 @@ struct smb_version_operations {
bool (*dir_needs_close)(struct cifsFileInfo *);
long (*fallocate)(struct file *, struct cifs_tcon *, int, loff_t,
loff_t);
- /* init transform request - used for encryption for now */
+ /* init transform (compress/encrypt) request */
int (*init_transform_rq)(struct TCP_Server_Info *, int num_rqst,
struct smb_rqst *, struct smb_rqst *);
int (*is_transform_hdr)(void *buf);
@@ -821,6 +821,7 @@ struct TCP_Server_Info {
* format: \\HOST\SHARE[\OPTIONAL PATH]
*/
char *leaf_fullpath;
+ bool dfs_conn:1;
};
static inline bool is_smb1(struct TCP_Server_Info *server)
@@ -1059,6 +1060,7 @@ struct cifs_ses {
struct list_head smb_ses_list;
struct list_head rlist; /* reconnect list */
struct list_head tcon_list;
+ struct list_head dlist; /* dfs list */
struct cifs_tcon *tcon_ipc;
spinlock_t ses_lock; /* protect anything here that is not protected */
struct mutex session_mutex;
@@ -1287,6 +1289,7 @@ struct cifs_tcon {
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
struct delayed_work dfs_cache_work;
+ struct list_head dfs_ses_list;
#endif
struct delayed_work query_interfaces; /* query interfaces workqueue job */
char *origin_fullpath; /* canonical copy of smb3_fs_context::source */
@@ -1485,7 +1488,6 @@ struct cifs_io_subrequest {
struct cifs_io_request *req;
};
ssize_t got_bytes;
- size_t actual_len;
unsigned int xid;
int result;
bool have_xid;
@@ -1550,7 +1552,6 @@ struct cifsInodeInfo {
#define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */
#define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
#define CIFS_INO_LOCK (5) /* lock bit for synchronization */
-#define CIFS_INO_MODIFIED_ATTR (6) /* Indicate change in mtime/ctime */
#define CIFS_INO_CLOSE_ON_LOCK (7) /* Not to defer the close when lock is set */
unsigned long flags;
spinlock_t writers_lock;
@@ -1876,6 +1877,7 @@ static inline bool is_replayable_error(int error)
#define CIFS_HAS_CREDITS 0x0400 /* already has credits */
#define CIFS_TRANSFORM_REQ 0x0800 /* transform request before sending */
#define CIFS_NO_SRV_RSP 0x1000 /* there is no server response */
+#define CIFS_COMPRESS_REQ 0x4000 /* compress request before sending */
/* Security Flags: indicate type of session setup needed */
#define CIFSSEC_MAY_SIGN 0x00001
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index a2072ab9e586..c3b6263060b0 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -2573,12 +2573,6 @@ typedef struct {
} __attribute__((packed)) FIND_FILE_STANDARD_INFO; /* level 0x1 FF resp data */
-struct win_dev {
- unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO or LnxSOCK */
- __le64 major;
- __le64 minor;
-} __attribute__((packed));
-
struct fea {
unsigned char EA_flags;
__u8 name_len;
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 497bf3c447bc..68c716e6261b 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -225,7 +225,7 @@ extern int cifs_set_file_info(struct inode *inode, struct iattr *attrs,
extern int cifs_rename_pending_delete(const char *full_path,
struct dentry *dentry,
const unsigned int xid);
-extern int sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
+extern int sid_to_id(struct cifs_sb_info *cifs_sb, struct smb_sid *psid,
struct cifs_fattr *fattr, uint sidtype);
extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
struct cifs_fattr *fattr, struct inode *inode,
@@ -233,19 +233,19 @@ extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
const char *path, const struct cifs_fid *pfid);
extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
kuid_t uid, kgid_t gid);
-extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
- const char *, u32 *, u32);
-extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *,
- const struct cifs_fid *, u32 *, u32);
+extern struct smb_ntsd *get_cifs_acl(struct cifs_sb_info *cifssmb, struct inode *ino,
+ const char *path, u32 *plen, u32 info);
+extern struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifssb,
+ const struct cifs_fid *pfid, u32 *plen, u32 info);
extern struct posix_acl *cifs_get_acl(struct mnt_idmap *idmap,
struct dentry *dentry, int type);
extern int cifs_set_acl(struct mnt_idmap *idmap,
struct dentry *dentry, struct posix_acl *acl, int type);
-extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
- const char *, int);
-extern unsigned int setup_authusers_ACE(struct cifs_ace *pace);
-extern unsigned int setup_special_mode_ACE(struct cifs_ace *pace, __u64 nmode);
-extern unsigned int setup_special_user_owner_ACE(struct cifs_ace *pace);
+extern int set_cifs_acl(struct smb_ntsd *pntsd, __u32 len, struct inode *ino,
+ const char *path, int flag);
+extern unsigned int setup_authusers_ACE(struct smb_ace *pace);
+extern unsigned int setup_special_mode_ACE(struct smb_ace *pace, __u64 nmode);
+extern unsigned int setup_special_user_owner_ACE(struct smb_ace *pace);
extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
@@ -570,9 +570,9 @@ extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
const struct nls_table *nls_codepage,
struct cifs_sb_info *cifs_sb);
extern int CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon,
- __u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen);
+ __u16 fid, struct smb_ntsd **acl_inf, __u32 *buflen);
extern int CIFSSMBSetCIFSACL(const unsigned int, struct cifs_tcon *, __u16,
- struct cifs_ntsd *, __u32, int);
+ struct smb_ntsd *pntsd, __u32 len, int aclflag);
extern int cifs_do_get_acl(const unsigned int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
struct posix_acl **acl, const int acl_type,
@@ -676,6 +676,10 @@ char *extract_sharename(const char *unc);
int parse_reparse_point(struct reparse_data_buffer *buf,
u32 plen, struct cifs_sb_info *cifs_sb,
bool unicode, struct cifs_open_info_data *data);
+int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ struct dentry *dentry, struct cifs_tcon *tcon,
+ const char *full_path, umode_t mode, dev_t dev,
+ const char *symname);
int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, umode_t mode, dev_t dev);
@@ -720,15 +724,9 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
-/* Put references of @ses and its children */
static inline void cifs_put_smb_ses(struct cifs_ses *ses)
{
- struct cifs_ses *next;
-
- do {
- next = ses->dfs_root_ses;
- __cifs_put_smb_ses(ses);
- } while ((ses = next));
+ __cifs_put_smb_ses(ses);
}
/* Get an active reference of @ses and its children.
@@ -742,9 +740,7 @@ static inline void cifs_put_smb_ses(struct cifs_ses *ses)
static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses)
{
lockdep_assert_held(&cifs_tcp_ses_lock);
-
- for (; ses; ses = ses->dfs_root_ses)
- ses->ses_count++;
+ ses->ses_count++;
}
static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index cfae2e918209..131f20b91c3e 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -1076,8 +1076,8 @@ OldOpenRetry:
pSMB->OpenFlags |= cpu_to_le16(REQ_MORE_INFO);
pSMB->Mode = cpu_to_le16(access_flags_to_smbopen_mode(access_flags));
pSMB->Mode |= cpu_to_le16(0x40); /* deny none */
- /* set file as system file if special file such
- as fifo and server expecting SFU style and
+ /* set file as system file if special file such as fifo,
+ * socket, char or block and server expecting SFU style and
no Unix extensions */
if (create_options & CREATE_OPTION_SPECIAL)
@@ -1193,8 +1193,8 @@ openRetry:
req->AllocationSize = 0;
/*
- * Set file as system file if special file such as fifo and server
- * expecting SFU style and no Unix extensions.
+ * Set file as system file if special file such as fifo, socket, char
+ * or block and server expecting SFU style and no Unix extensions.
*/
if (create_options & CREATE_OPTION_SPECIAL)
req->FileAttributes = cpu_to_le32(ATTR_SYSTEM);
@@ -1266,9 +1266,7 @@ static void cifs_readv_worker(struct work_struct *work)
struct cifs_io_subrequest *rdata =
container_of(work, struct cifs_io_subrequest, subreq.work);
- netfs_subreq_terminated(&rdata->subreq,
- (rdata->result == 0 || rdata->result == -EAGAIN) ?
- rdata->got_bytes : rdata->result, true);
+ netfs_read_subreq_terminated(&rdata->subreq, rdata->result, false);
}
static void
@@ -1327,15 +1325,16 @@ cifs_readv_callback(struct mid_q_entry *mid)
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0;
} else {
- if (rdata->got_bytes < rdata->actual_len &&
- rdata->subreq.start + rdata->subreq.transferred + rdata->got_bytes ==
- ictx->remote_i_size) {
+ size_t trans = rdata->subreq.transferred + rdata->got_bytes;
+ if (trans < rdata->subreq.len &&
+ rdata->subreq.start + trans == ictx->remote_i_size) {
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0;
}
}
rdata->credits.value = 0;
+ rdata->subreq.transferred += rdata->got_bytes;
INIT_WORK(&rdata->subreq.work, cifs_readv_worker);
queue_work(cifsiod_wq, &rdata->subreq.work);
release_mid(mid);
@@ -3428,7 +3427,7 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata,
/* Get Security Descriptor (by handle) from remote server for a file or dir */
int
CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
- struct cifs_ntsd **acl_inf, __u32 *pbuflen)
+ struct smb_ntsd **acl_inf, __u32 *pbuflen)
{
int rc = 0;
int buf_type = 0;
@@ -3498,7 +3497,7 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
/* check if buffer is big enough for the acl
header followed by the smallest SID */
- if ((*pbuflen < sizeof(struct cifs_ntsd) + 8) ||
+ if ((*pbuflen < sizeof(struct smb_ntsd) + 8) ||
(*pbuflen >= 64 * 1024)) {
cifs_dbg(VFS, "bad acl length %d\n", *pbuflen);
rc = -EINVAL;
@@ -3518,7 +3517,7 @@ qsec_out:
int
CIFSSMBSetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
- struct cifs_ntsd *pntsd, __u32 acllen, int aclflag)
+ struct smb_ntsd *pntsd, __u32 acllen, int aclflag)
{
__u16 byte_count, param_count, data_count, param_offset, data_offset;
int rc = 0;
diff --git a/fs/smb/client/compress.c b/fs/smb/client/compress.c
new file mode 100644
index 000000000000..63b5a55b7a57
--- /dev/null
+++ b/fs/smb/client/compress.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024, SUSE LLC
+ *
+ * Authors: Enzo Matsumiya <ematsumiya@suse.de>
+ *
+ * This file implements I/O compression support for SMB2 messages (SMB 3.1.1 only).
+ * See compress/ for implementation details of each algorithm.
+ *
+ * References:
+ * MS-SMB2 "3.1.4.4 Compressing the Message"
+ * MS-SMB2 "3.1.5.3 Decompressing the Chained Message"
+ * MS-XCA - for details of the supported algorithms
+ */
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/uio.h>
+#include <linux/sort.h>
+
+#include "cifsglob.h"
+#include "../common/smb2pdu.h"
+#include "cifsproto.h"
+#include "smb2proto.h"
+
+#include "compress/lz77.h"
+#include "compress.h"
+
+/*
+ * The heuristic_*() functions below try to determine data compressibility.
+ *
+ * Derived from fs/btrfs/compression.c, changing coding style, some parameters, and removing
+ * unused parts.
+ *
+ * Read that file for better and more detailed explanation of the calculations.
+ *
+ * The algorithms are ran in a collected sample of the input (uncompressed) data.
+ * The sample is formed of 2K reads in PAGE_SIZE intervals, with a maximum size of 4M.
+ *
+ * Parsing the sample goes from "low-hanging fruits" (fastest algorithms, likely compressible)
+ * to "need more analysis" (likely uncompressible).
+ */
+
+struct bucket {
+ unsigned int count;
+};
+
+/**
+ * has_low_entropy() - Compute Shannon entropy of the sampled data.
+ * @bkt: Bytes counts of the sample.
+ * @slen: Size of the sample.
+ *
+ * Return: true if the level (percentage of number of bits that would be required to
+ * compress the data) is below the minimum threshold.
+ *
+ * Note:
+ * There _is_ an entropy level here that's > 65 (minimum threshold) that would indicate a
+ * possibility of compression, but compressing, or even further analysing, it would waste so much
+ * resources that it's simply not worth it.
+ *
+ * Also Shannon entropy is the last computed heuristic; if we got this far and ended up
+ * with uncertainty, just stay on the safe side and call it uncompressible.
+ */
+static bool has_low_entropy(struct bucket *bkt, size_t slen)
+{
+ const size_t threshold = 65, max_entropy = 8 * ilog2(16);
+ size_t i, p, p2, len, sum = 0;
+
+#define pow4(n) (n * n * n * n)
+ len = ilog2(pow4(slen));
+
+ for (i = 0; i < 256 && bkt[i].count > 0; i++) {
+ p = bkt[i].count;
+ p2 = ilog2(pow4(p));
+ sum += p * (len - p2);
+ }
+
+ sum /= slen;
+
+ return ((sum * 100 / max_entropy) <= threshold);
+}
+
+#define BYTE_DIST_BAD 0
+#define BYTE_DIST_GOOD 1
+#define BYTE_DIST_MAYBE 2
+/**
+ * calc_byte_distribution() - Compute byte distribution on the sampled data.
+ * @bkt: Byte counts of the sample.
+ * @slen: Size of the sample.
+ *
+ * Return:
+ * BYTE_DIST_BAD: A "hard no" for compression -- a computed uniform distribution of
+ * the bytes (e.g. random or encrypted data).
+ * BYTE_DIST_GOOD: High probability (normal (Gaussian) distribution) of the data being
+ * compressible.
+ * BYTE_DIST_MAYBE: When computed byte distribution resulted in "low > n < high"
+ * grounds. has_low_entropy() should be used for a final decision.
+ */
+static int calc_byte_distribution(struct bucket *bkt, size_t slen)
+{
+ const size_t low = 64, high = 200, threshold = slen * 90 / 100;
+ size_t sum = 0;
+ int i;
+
+ for (i = 0; i < low; i++)
+ sum += bkt[i].count;
+
+ if (sum > threshold)
+ return BYTE_DIST_BAD;
+
+ for (; i < high && bkt[i].count > 0; i++) {
+ sum += bkt[i].count;
+ if (sum > threshold)
+ break;
+ }
+
+ if (i <= low)
+ return BYTE_DIST_GOOD;
+
+ if (i >= high)
+ return BYTE_DIST_BAD;
+
+ return BYTE_DIST_MAYBE;
+}
+
+static bool is_mostly_ascii(const struct bucket *bkt)
+{
+ size_t count = 0;
+ int i;
+
+ for (i = 0; i < 256; i++)
+ if (bkt[i].count > 0)
+ /* Too many non-ASCII (0-63) bytes. */
+ if (++count > 64)
+ return false;
+
+ return true;
+}
+
+static bool has_repeated_data(const u8 *sample, size_t len)
+{
+ size_t s = len / 2;
+
+ return (!memcmp(&sample[0], &sample[s], s));
+}
+
+static int cmp_bkt(const void *_a, const void *_b)
+{
+ const struct bucket *a = _a, *b = _b;
+
+ /* Reverse sort. */
+ if (a->count > b->count)
+ return -1;
+
+ return 1;
+}
+
+/*
+ * TODO:
+ * Support other iter types, if required.
+ * Only ITER_XARRAY is supported for now.
+ */
+static int collect_sample(const struct iov_iter *iter, ssize_t max, u8 *sample)
+{
+ struct folio *folios[16], *folio;
+ unsigned int nr, i, j, npages;
+ loff_t start = iter->xarray_start + iter->iov_offset;
+ pgoff_t last, index = start / PAGE_SIZE;
+ size_t len, off, foff;
+ ssize_t ret = 0;
+ void *p;
+ int s = 0;
+
+ last = (start + max - 1) / PAGE_SIZE;
+ do {
+ nr = xa_extract(iter->xarray, (void **)folios, index, last, ARRAY_SIZE(folios),
+ XA_PRESENT);
+ if (nr == 0)
+ return -EIO;
+
+ for (i = 0; i < nr; i++) {
+ folio = folios[i];
+ npages = folio_nr_pages(folio);
+ foff = start - folio_pos(folio);
+ off = foff % PAGE_SIZE;
+
+ for (j = foff / PAGE_SIZE; j < npages; j++) {
+ size_t len2;
+
+ len = min_t(size_t, max, PAGE_SIZE - off);
+ len2 = min_t(size_t, len, SZ_2K);
+
+ p = kmap_local_page(folio_page(folio, j));
+ memcpy(&sample[s], p, len2);
+ kunmap_local(p);
+
+ if (ret < 0)
+ return ret;
+
+ s += len2;
+
+ if (len2 < SZ_2K || s >= max - SZ_2K)
+ return s;
+
+ max -= len;
+ if (max <= 0)
+ return s;
+
+ start += len;
+ off = 0;
+ index++;
+ }
+ }
+ } while (nr == ARRAY_SIZE(folios));
+
+ return s;
+}
+
+/**
+ * is_compressible() - Determines if a chunk of data is compressible.
+ * @data: Iterator containing uncompressed data.
+ *
+ * Return: true if @data is compressible, false otherwise.
+ *
+ * Tests shows that this function is quite reliable in predicting data compressibility,
+ * matching close to 1:1 with the behaviour of LZ77 compression success and failures.
+ */
+static bool is_compressible(const struct iov_iter *data)
+{
+ const size_t read_size = SZ_2K, bkt_size = 256, max = SZ_4M;
+ struct bucket *bkt = NULL;
+ size_t len;
+ u8 *sample;
+ bool ret = false;
+ int i;
+
+ /* Preventive double check -- already checked in should_compress(). */
+ len = iov_iter_count(data);
+ if (unlikely(len < read_size))
+ return ret;
+
+ if (len - read_size > max)
+ len = max;
+
+ sample = kvzalloc(len, GFP_KERNEL);
+ if (!sample) {
+ WARN_ON_ONCE(1);
+
+ return ret;
+ }
+
+ /* Sample 2K bytes per page of the uncompressed data. */
+ i = collect_sample(data, len, sample);
+ if (i <= 0) {
+ WARN_ON_ONCE(1);
+
+ goto out;
+ }
+
+ len = i;
+ ret = true;
+
+ if (has_repeated_data(sample, len))
+ goto out;
+
+ bkt = kcalloc(bkt_size, sizeof(*bkt), GFP_KERNEL);
+ if (!bkt) {
+ WARN_ON_ONCE(1);
+ ret = false;
+
+ goto out;
+ }
+
+ for (i = 0; i < len; i++)
+ bkt[sample[i]].count++;
+
+ if (is_mostly_ascii(bkt))
+ goto out;
+
+ /* Sort in descending order */
+ sort(bkt, bkt_size, sizeof(*bkt), cmp_bkt, NULL);
+
+ i = calc_byte_distribution(bkt, len);
+ if (i != BYTE_DIST_MAYBE) {
+ ret = !!i;
+
+ goto out;
+ }
+
+ ret = has_low_entropy(bkt, len);
+out:
+ kvfree(sample);
+ kfree(bkt);
+
+ return ret;
+}
+
+bool should_compress(const struct cifs_tcon *tcon, const struct smb_rqst *rq)
+{
+ const struct smb2_hdr *shdr = rq->rq_iov->iov_base;
+
+ if (unlikely(!tcon || !tcon->ses || !tcon->ses->server))
+ return false;
+
+ if (!tcon->ses->server->compression.enabled)
+ return false;
+
+ if (!(tcon->share_flags & SMB2_SHAREFLAG_COMPRESS_DATA))
+ return false;
+
+ if (shdr->Command == SMB2_WRITE) {
+ const struct smb2_write_req *wreq = rq->rq_iov->iov_base;
+
+ if (le32_to_cpu(wreq->Length) < SMB_COMPRESS_MIN_LEN)
+ return false;
+
+ return is_compressible(&rq->rq_iter);
+ }
+
+ return (shdr->Command == SMB2_READ);
+}
+
+int smb_compress(struct TCP_Server_Info *server, struct smb_rqst *rq, compress_send_fn send_fn)
+{
+ struct iov_iter iter;
+ u32 slen, dlen;
+ void *src, *dst = NULL;
+ int ret;
+
+ if (!server || !rq || !rq->rq_iov || !rq->rq_iov->iov_base)
+ return -EINVAL;
+
+ if (rq->rq_iov->iov_len != sizeof(struct smb2_write_req))
+ return -EINVAL;
+
+ slen = iov_iter_count(&rq->rq_iter);
+ src = kvzalloc(slen, GFP_KERNEL);
+ if (!src) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ /* Keep the original iter intact. */
+ iter = rq->rq_iter;
+
+ if (!copy_from_iter_full(src, slen, &iter)) {
+ ret = -EIO;
+ goto err_free;
+ }
+
+ /*
+ * This is just overprovisioning, as the algorithm will error out if @dst reaches 7/8
+ * of @slen.
+ */
+ dlen = slen;
+ dst = kvzalloc(dlen, GFP_KERNEL);
+ if (!dst) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ ret = lz77_compress(src, slen, dst, &dlen);
+ if (!ret) {
+ struct smb2_compression_hdr hdr = { 0 };
+ struct smb_rqst comp_rq = { .rq_nvec = 3, };
+ struct kvec iov[3];
+
+ hdr.ProtocolId = SMB2_COMPRESSION_TRANSFORM_ID;
+ hdr.OriginalCompressedSegmentSize = cpu_to_le32(slen);
+ hdr.CompressionAlgorithm = SMB3_COMPRESS_LZ77;
+ hdr.Flags = SMB2_COMPRESSION_FLAG_NONE;
+ hdr.Offset = cpu_to_le32(rq->rq_iov[0].iov_len);
+
+ iov[0].iov_base = &hdr;
+ iov[0].iov_len = sizeof(hdr);
+ iov[1] = rq->rq_iov[0];
+ iov[2].iov_base = dst;
+ iov[2].iov_len = dlen;
+
+ comp_rq.rq_iov = iov;
+
+ ret = send_fn(server, 1, &comp_rq);
+ } else if (ret == -EMSGSIZE || dlen >= slen) {
+ ret = send_fn(server, 1, rq);
+ }
+err_free:
+ kvfree(dst);
+ kvfree(src);
+
+ return ret;
+}
diff --git a/fs/smb/client/compress.h b/fs/smb/client/compress.h
new file mode 100644
index 000000000000..f3ed1d3e52fb
--- /dev/null
+++ b/fs/smb/client/compress.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024, SUSE LLC
+ *
+ * Authors: Enzo Matsumiya <ematsumiya@suse.de>
+ *
+ * This file implements I/O compression support for SMB2 messages (SMB 3.1.1 only).
+ * See compress/ for implementation details of each algorithm.
+ *
+ * References:
+ * MS-SMB2 "3.1.4.4 Compressing the Message" - for compression details
+ * MS-SMB2 "3.1.5.3 Decompressing the Chained Message" - for decompression details
+ * MS-XCA - for details of the supported algorithms
+ */
+#ifndef _SMB_COMPRESS_H
+#define _SMB_COMPRESS_H
+
+#include <linux/uio.h>
+#include <linux/kernel.h>
+#include "../common/smb2pdu.h"
+#include "cifsglob.h"
+
+/* sizeof(smb2_compression_hdr) - sizeof(OriginalPayloadSize) */
+#define SMB_COMPRESS_HDR_LEN 16
+/* sizeof(smb2_compression_payload_hdr) - sizeof(OriginalPayloadSize) */
+#define SMB_COMPRESS_PAYLOAD_HDR_LEN 8
+#define SMB_COMPRESS_MIN_LEN PAGE_SIZE
+
+#ifdef CONFIG_CIFS_COMPRESSION
+typedef int (*compress_send_fn)(struct TCP_Server_Info *, int, struct smb_rqst *);
+
+int smb_compress(struct TCP_Server_Info *server, struct smb_rqst *rq, compress_send_fn send_fn);
+
+/**
+ * should_compress() - Determines if a request (write) or the response to a
+ * request (read) should be compressed.
+ * @tcon: tcon of the request is being sent to
+ * @rqst: request to evaluate
+ *
+ * Return: true iff:
+ * - compression was successfully negotiated with server
+ * - server has enabled compression for the share
+ * - it's a read or write request
+ * - (write only) request length is >= SMB_COMPRESS_MIN_LEN
+ * - (write only) is_compressible() returns 1
+ *
+ * Return false otherwise.
+ */
+bool should_compress(const struct cifs_tcon *tcon, const struct smb_rqst *rq);
+
+/**
+ * smb_compress_alg_valid() - Validate a compression algorithm.
+ * @alg: Compression algorithm to check.
+ * @valid_none: Conditional check whether NONE algorithm should be
+ * considered valid or not.
+ *
+ * If @alg is SMB3_COMPRESS_NONE, this function returns @valid_none.
+ *
+ * Note that 'NONE' (0) compressor type is considered invalid in protocol
+ * negotiation, as it's never requested to/returned from the server.
+ *
+ * Return: true if @alg is valid/supported, false otherwise.
+ */
+static __always_inline int smb_compress_alg_valid(__le16 alg, bool valid_none)
+{
+ if (alg == SMB3_COMPRESS_NONE)
+ return valid_none;
+
+ if (alg == SMB3_COMPRESS_LZ77 || alg == SMB3_COMPRESS_PATTERN)
+ return true;
+
+ return false;
+}
+#else /* !CONFIG_CIFS_COMPRESSION */
+static inline int smb_compress(void *unused1, void *unused2, void *unused3)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool should_compress(void *unused1, void *unused2)
+{
+ return false;
+}
+
+static inline int smb_compress_alg_valid(__le16 unused1, bool unused2)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* !CONFIG_CIFS_COMPRESSION */
+#endif /* _SMB_COMPRESS_H */
diff --git a/fs/smb/client/compress/lz77.c b/fs/smb/client/compress/lz77.c
new file mode 100644
index 000000000000..553e253ada29
--- /dev/null
+++ b/fs/smb/client/compress/lz77.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024, SUSE LLC
+ *
+ * Authors: Enzo Matsumiya <ematsumiya@suse.de>
+ *
+ * Implementation of the LZ77 "plain" compression algorithm, as per MS-XCA spec.
+ */
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/count_zeros.h>
+#include <asm/unaligned.h>
+
+#include "lz77.h"
+
+/*
+ * Compression parameters.
+ */
+#define LZ77_MATCH_MIN_LEN 4
+#define LZ77_MATCH_MIN_DIST 1
+#define LZ77_MATCH_MAX_DIST SZ_1K
+#define LZ77_HASH_LOG 15
+#define LZ77_HASH_SIZE (1 << LZ77_HASH_LOG)
+#define LZ77_STEP_SIZE sizeof(u64)
+
+static __always_inline u8 lz77_read8(const u8 *ptr)
+{
+ return get_unaligned(ptr);
+}
+
+static __always_inline u64 lz77_read64(const u64 *ptr)
+{
+ return get_unaligned(ptr);
+}
+
+static __always_inline void lz77_write8(u8 *ptr, u8 v)
+{
+ put_unaligned(v, ptr);
+}
+
+static __always_inline void lz77_write16(u16 *ptr, u16 v)
+{
+ put_unaligned_le16(v, ptr);
+}
+
+static __always_inline void lz77_write32(u32 *ptr, u32 v)
+{
+ put_unaligned_le32(v, ptr);
+}
+
+static __always_inline u32 lz77_match_len(const void *wnd, const void *cur, const void *end)
+{
+ const void *start = cur;
+ u64 diff;
+
+ /* Safe for a do/while because otherwise we wouldn't reach here from the main loop. */
+ do {
+ diff = lz77_read64(cur) ^ lz77_read64(wnd);
+ if (!diff) {
+ cur += LZ77_STEP_SIZE;
+ wnd += LZ77_STEP_SIZE;
+
+ continue;
+ }
+
+ /* This computes the number of common bytes in @diff. */
+ cur += count_trailing_zeros(diff) >> 3;
+
+ return (cur - start);
+ } while (likely(cur + LZ77_STEP_SIZE < end));
+
+ while (cur < end && lz77_read8(cur++) == lz77_read8(wnd++))
+ ;
+
+ return (cur - start);
+}
+
+static __always_inline void *lz77_write_match(void *dst, void **nib, u32 dist, u32 len)
+{
+ len -= 3;
+ dist--;
+ dist <<= 3;
+
+ if (len < 7) {
+ lz77_write16(dst, dist + len);
+
+ return dst + 2;
+ }
+
+ dist |= 7;
+ lz77_write16(dst, dist);
+ dst += 2;
+ len -= 7;
+
+ if (!*nib) {
+ lz77_write8(dst, umin(len, 15));
+ *nib = dst;
+ dst++;
+ } else {
+ u8 *b = *nib;
+
+ lz77_write8(b, *b | umin(len, 15) << 4);
+ *nib = NULL;
+ }
+
+ if (len < 15)
+ return dst;
+
+ len -= 15;
+ if (len < 255) {
+ lz77_write8(dst, len);
+
+ return dst + 1;
+ }
+
+ lz77_write8(dst, 0xff);
+ dst++;
+ len += 7 + 15;
+ if (len <= 0xffff) {
+ lz77_write16(dst, len);
+
+ return dst + 2;
+ }
+
+ lz77_write16(dst, 0);
+ dst += 2;
+ lz77_write32(dst, len);
+
+ return dst + 4;
+}
+
+noinline int lz77_compress(const void *src, u32 slen, void *dst, u32 *dlen)
+{
+ const void *srcp, *end;
+ void *dstp, *nib, *flag_pos;
+ u32 flag_count = 0;
+ long flag = 0;
+ u64 *htable;
+
+ srcp = src;
+ end = src + slen;
+ dstp = dst;
+ nib = NULL;
+ flag_pos = dstp;
+ dstp += 4;
+
+ htable = kvcalloc(LZ77_HASH_SIZE, sizeof(*htable), GFP_KERNEL);
+ if (!htable)
+ return -ENOMEM;
+
+ /* Main loop. */
+ do {
+ u32 dist, len = 0;
+ const void *wnd;
+ u64 hash;
+
+ hash = ((lz77_read64(srcp) << 24) * 889523592379ULL) >> (64 - LZ77_HASH_LOG);
+ wnd = src + htable[hash];
+ htable[hash] = srcp - src;
+ dist = srcp - wnd;
+
+ if (dist && dist < LZ77_MATCH_MAX_DIST)
+ len = lz77_match_len(wnd, srcp, end);
+
+ if (len < LZ77_MATCH_MIN_LEN) {
+ lz77_write8(dstp, lz77_read8(srcp));
+
+ dstp++;
+ srcp++;
+
+ flag <<= 1;
+ flag_count++;
+ if (flag_count == 32) {
+ lz77_write32(flag_pos, flag);
+ flag_count = 0;
+ flag_pos = dstp;
+ dstp += 4;
+ }
+
+ continue;
+ }
+
+ /*
+ * Bail out if @dstp reached >= 7/8 of @slen -- already compressed badly, not worth
+ * going further.
+ */
+ if (unlikely(dstp - dst >= slen - (slen >> 3))) {
+ *dlen = slen;
+ goto out;
+ }
+
+ dstp = lz77_write_match(dstp, &nib, dist, len);
+ srcp += len;
+
+ flag = (flag << 1) | 1;
+ flag_count++;
+ if (flag_count == 32) {
+ lz77_write32(flag_pos, flag);
+ flag_count = 0;
+ flag_pos = dstp;
+ dstp += 4;
+ }
+ } while (likely(srcp + LZ77_STEP_SIZE < end));
+
+ while (srcp < end) {
+ u32 c = umin(end - srcp, 32 - flag_count);
+
+ memcpy(dstp, srcp, c);
+
+ dstp += c;
+ srcp += c;
+
+ flag <<= c;
+ flag_count += c;
+ if (flag_count == 32) {
+ lz77_write32(flag_pos, flag);
+ flag_count = 0;
+ flag_pos = dstp;
+ dstp += 4;
+ }
+ }
+
+ flag <<= (32 - flag_count);
+ flag |= (1 << (32 - flag_count)) - 1;
+ lz77_write32(flag_pos, flag);
+
+ *dlen = dstp - dst;
+out:
+ kvfree(htable);
+
+ if (*dlen < slen)
+ return 0;
+
+ return -EMSGSIZE;
+}
diff --git a/fs/smb/client/compress/lz77.h b/fs/smb/client/compress/lz77.h
new file mode 100644
index 000000000000..cdcb191b48a2
--- /dev/null
+++ b/fs/smb/client/compress/lz77.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024, SUSE LLC
+ *
+ * Authors: Enzo Matsumiya <ematsumiya@suse.de>
+ *
+ * Implementation of the LZ77 "plain" compression algorithm, as per MS-XCA spec.
+ */
+#ifndef _SMB_COMPRESS_LZ77_H
+#define _SMB_COMPRESS_LZ77_H
+
+#include <linux/kernel.h>
+
+int lz77_compress(const void *src, u32 slen, void *dst, u32 *dlen);
+#endif /* _SMB_COMPRESS_LZ77_H */
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 5375b0c1dfb9..adf8758847f6 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -811,13 +811,9 @@ cifs_read_iter_from_socket(struct TCP_Server_Info *server, struct iov_iter *iter
unsigned int to_read)
{
struct msghdr smb_msg = { .msg_iter = *iter };
- int ret;
iov_iter_truncate(&smb_msg.msg_iter, to_read);
- ret = cifs_readv_from_socket(server, &smb_msg);
- if (ret > 0)
- iov_iter_advance(iter, ret);
- return ret;
+ return cifs_readv_from_socket(server, &smb_msg);
}
static bool
@@ -1009,11 +1005,10 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
}
if (!list_empty(&server->pending_mid_q)) {
- struct list_head dispose_list;
struct mid_q_entry *mid_entry;
struct list_head *tmp, *tmp2;
+ LIST_HEAD(dispose_list);
- INIT_LIST_HEAD(&dispose_list);
spin_lock(&server->mid_lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
@@ -1531,6 +1526,9 @@ static int match_server(struct TCP_Server_Info *server,
if (server->nosharesock)
return 0;
+ if (!match_super && (ctx->dfs_conn || server->dfs_conn))
+ return 0;
+
/* If multidialect negotiation see if existing sessions match one */
if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
if (server->vals->protocol_id < SMB30_PROT_ID)
@@ -1724,6 +1722,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
if (ctx->nosharesock)
tcp_ses->nosharesock = true;
+ tcp_ses->dfs_conn = ctx->dfs_conn;
tcp_ses->ops = ctx->ops;
tcp_ses->vals = ctx->vals;
@@ -1874,13 +1873,15 @@ out_err:
}
/* this function must be called with ses_lock and chan_lock held */
-static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+static int match_session(struct cifs_ses *ses,
+ struct smb3_fs_context *ctx,
+ bool match_super)
{
if (ctx->sectype != Unspecified &&
ctx->sectype != ses->sectype)
return 0;
- if (ctx->dfs_root_ses != ses->dfs_root_ses)
+ if (!match_super && ctx->dfs_root_ses != ses->dfs_root_ses)
return 0;
/*
@@ -1999,7 +2000,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
continue;
}
spin_lock(&ses->chan_lock);
- if (match_session(ses, ctx)) {
+ if (match_session(ses, ctx, false)) {
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
ret = ses;
@@ -2059,8 +2060,7 @@ void __cifs_put_smb_ses(struct cifs_ses *ses)
if (do_logoff) {
xid = get_xid();
rc = server->ops->logoff(xid, ses);
- if (rc)
- cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
+ cifs_server_dbg(FYI, "%s: Session Logoff: rc=%d\n",
__func__, rc);
_free_xid(xid);
}
@@ -2383,8 +2383,6 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
* need to lock before changing something in the session.
*/
spin_lock(&cifs_tcp_ses_lock);
- if (ctx->dfs_root_ses)
- cifs_smb_ses_inc_refcount(ctx->dfs_root_ses);
ses->dfs_root_ses = ctx->dfs_root_ses;
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
@@ -2459,6 +2457,7 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
{
unsigned int xid;
struct cifs_ses *ses;
+ LIST_HEAD(ses_list);
/*
* IPC tcon share the lifetime of their session and are
@@ -2483,6 +2482,9 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
list_del_init(&tcon->tcon_list);
tcon->status = TID_EXITING;
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ list_replace_init(&tcon->dfs_ses_list, &ses_list);
+#endif
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
@@ -2510,6 +2512,9 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
cifs_fscache_release_super_cookie(tcon);
tconInfoFree(tcon, netfs_trace_tcon_ref_free);
cifs_put_smb_ses(ses);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ dfs_put_root_smb_sessions(&ses_list);
+#endif
}
/**
@@ -2893,7 +2898,7 @@ cifs_match_super(struct super_block *sb, void *data)
spin_lock(&ses->chan_lock);
spin_lock(&tcon->tc_lock);
if (!match_server(tcp_srv, ctx, true) ||
- !match_session(ses, ctx) ||
+ !match_session(ses, ctx, true) ||
!match_tcon(tcon, ctx) ||
!match_prepath(sb, tcon, mnt_data)) {
rc = 0;
@@ -3624,13 +3629,12 @@ out:
int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
{
struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
- bool isdfs;
int rc;
- rc = dfs_mount_share(&mnt_ctx, &isdfs);
+ rc = dfs_mount_share(&mnt_ctx);
if (rc)
goto error;
- if (!isdfs)
+ if (!ctx->dfs_conn)
goto out;
/*
@@ -4035,7 +4039,7 @@ cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
}
static struct cifs_tcon *
-__cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
{
int rc;
struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
@@ -4081,7 +4085,7 @@ __cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
ses = cifs_get_smb_ses(master_tcon->ses->server, ctx);
if (IS_ERR(ses)) {
- tcon = (struct cifs_tcon *)ses;
+ tcon = ERR_CAST(ses);
cifs_put_tcp_session(master_tcon->ses->server, 0);
goto out;
}
@@ -4133,17 +4137,6 @@ out:
return tcon;
}
-static struct cifs_tcon *
-cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
-{
- struct cifs_tcon *ret;
-
- cifs_mount_lock();
- ret = __cifs_construct_tcon(cifs_sb, fsuid);
- cifs_mount_unlock();
- return ret;
-}
-
struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
{
@@ -4213,9 +4206,9 @@ tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
struct tcon_link *
cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
{
- int ret;
- kuid_t fsuid = current_fsuid();
struct tcon_link *tlink, *newtlink;
+ kuid_t fsuid = current_fsuid();
+ int err;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
@@ -4250,9 +4243,9 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
spin_unlock(&cifs_sb->tlink_tree_lock);
} else {
wait_for_construction:
- ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
+ err = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
TASK_INTERRUPTIBLE);
- if (ret) {
+ if (err) {
cifs_put_tlink(tlink);
return ERR_PTR(-ERESTARTSYS);
}
@@ -4263,8 +4256,9 @@ wait_for_construction:
/* return error if we tried this already recently */
if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
+ err = PTR_ERR(tlink->tl_tcon);
cifs_put_tlink(tlink);
- return ERR_PTR(-EACCES);
+ return ERR_PTR(err);
}
if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
@@ -4276,8 +4270,11 @@ wait_for_construction:
wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
if (IS_ERR(tlink->tl_tcon)) {
+ err = PTR_ERR(tlink->tl_tcon);
+ if (err == -ENOKEY)
+ err = -EACCES;
cifs_put_tlink(tlink);
- return ERR_PTR(-EACCES);
+ return ERR_PTR(err);
}
return tlink;
diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
index 3ec965547e3d..3f6077c68d68 100644
--- a/fs/smb/client/dfs.c
+++ b/fs/smb/client/dfs.c
@@ -69,7 +69,7 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
* Get an active reference of @ses so that next call to cifs_put_tcon() won't
* release it as any new DFS referrals must go through its IPC tcon.
*/
-static void add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
+static void set_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_ses *ses = mnt_ctx->ses;
@@ -95,7 +95,7 @@ static inline int parse_dfs_target(struct smb3_fs_context *ctx,
return rc;
}
-static int set_ref_paths(struct cifs_mount_ctx *mnt_ctx,
+static int setup_dfs_ref(struct cifs_mount_ctx *mnt_ctx,
struct dfs_info3_param *tgt,
struct dfs_ref_walk *rw)
{
@@ -120,6 +120,7 @@ static int set_ref_paths(struct cifs_mount_ctx *mnt_ctx,
}
ref_walk_path(rw) = ref_path;
ref_walk_fpath(rw) = full_path;
+ ref_walk_ses(rw) = ctx->dfs_root_ses;
return 0;
}
@@ -128,11 +129,11 @@ static int __dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx,
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct dfs_info3_param tgt = {};
- bool is_refsrv;
int rc = -ENOENT;
again:
do {
+ ctx->dfs_root_ses = ref_walk_ses(rw);
if (ref_walk_empty(rw)) {
rc = dfs_get_referral(mnt_ctx, ref_walk_path(rw) + 1,
NULL, ref_walk_tl(rw));
@@ -158,10 +159,7 @@ again:
if (rc)
continue;
- is_refsrv = tgt.server_type == DFS_TYPE_ROOT ||
- DFS_INTERLINK(tgt.flags);
ref_walk_set_tgt_hint(rw);
-
if (tgt.flags & DFSREF_STORAGE_SERVER) {
rc = cifs_mount_get_tcon(mnt_ctx);
if (!rc)
@@ -172,12 +170,10 @@ again:
continue;
}
- if (is_refsrv)
- add_root_smb_session(mnt_ctx);
-
+ set_root_smb_session(mnt_ctx);
rc = ref_walk_advance(rw);
if (!rc) {
- rc = set_ref_paths(mnt_ctx, &tgt, rw);
+ rc = setup_dfs_ref(mnt_ctx, &tgt, rw);
if (!rc) {
rc = -EREMOTE;
goto again;
@@ -193,20 +189,22 @@ out:
return rc;
}
-static int dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx)
+static int dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx,
+ struct dfs_ref_walk **rw)
{
- struct dfs_ref_walk *rw;
int rc;
- rw = ref_walk_alloc();
- if (IS_ERR(rw))
- return PTR_ERR(rw);
+ *rw = ref_walk_alloc();
+ if (IS_ERR(*rw)) {
+ rc = PTR_ERR(*rw);
+ *rw = NULL;
+ return rc;
+ }
- ref_walk_init(rw);
- rc = set_ref_paths(mnt_ctx, NULL, rw);
+ ref_walk_init(*rw);
+ rc = setup_dfs_ref(mnt_ctx, NULL, *rw);
if (!rc)
- rc = __dfs_referral_walk(mnt_ctx, rw);
- ref_walk_free(rw);
+ rc = __dfs_referral_walk(mnt_ctx, *rw);
return rc;
}
@@ -214,16 +212,16 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
{
struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct dfs_ref_walk *rw = NULL;
struct cifs_tcon *tcon;
char *origin_fullpath;
- bool new_tcon = true;
int rc;
origin_fullpath = dfs_get_path(cifs_sb, ctx->source);
if (IS_ERR(origin_fullpath))
return PTR_ERR(origin_fullpath);
- rc = dfs_referral_walk(mnt_ctx);
+ rc = dfs_referral_walk(mnt_ctx, &rw);
if (!rc) {
/*
* Prevent superblock from being created with any missing
@@ -241,21 +239,16 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
tcon = mnt_ctx->tcon;
spin_lock(&tcon->tc_lock);
- if (!tcon->origin_fullpath) {
- tcon->origin_fullpath = origin_fullpath;
- origin_fullpath = NULL;
- } else {
- new_tcon = false;
- }
+ tcon->origin_fullpath = origin_fullpath;
+ origin_fullpath = NULL;
+ ref_walk_set_tcon(rw, tcon);
spin_unlock(&tcon->tc_lock);
-
- if (new_tcon) {
- queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
- dfs_cache_get_ttl() * HZ);
- }
+ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
+ dfs_cache_get_ttl() * HZ);
out:
kfree(origin_fullpath);
+ ref_walk_free(rw);
return rc;
}
@@ -279,7 +272,7 @@ static int update_fs_context_dstaddr(struct smb3_fs_context *ctx)
return rc;
}
-int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
bool nodfs = ctx->nodfs;
@@ -289,7 +282,6 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
if (rc)
return rc;
- *isdfs = false;
rc = get_session(mnt_ctx, NULL);
if (rc)
return rc;
@@ -317,10 +309,15 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
return rc;
}
- *isdfs = true;
- add_root_smb_session(mnt_ctx);
- rc = __dfs_mount_share(mnt_ctx);
- dfs_put_root_smb_sessions(mnt_ctx);
+ if (!ctx->dfs_conn) {
+ ctx->dfs_conn = true;
+ cifs_mount_put_conns(mnt_ctx);
+ rc = get_session(mnt_ctx, NULL);
+ }
+ if (!rc) {
+ set_root_smb_session(mnt_ctx);
+ rc = __dfs_mount_share(mnt_ctx);
+ }
return rc;
}
diff --git a/fs/smb/client/dfs.h b/fs/smb/client/dfs.h
index e5c4dcf83750..1aa2bc65b3bc 100644
--- a/fs/smb/client/dfs.h
+++ b/fs/smb/client/dfs.h
@@ -19,6 +19,7 @@
struct dfs_ref {
char *path;
char *full_path;
+ struct cifs_ses *ses;
struct dfs_cache_tgt_list tl;
struct dfs_cache_tgt_iterator *tit;
};
@@ -38,6 +39,7 @@ struct dfs_ref_walk {
#define ref_walk_path(w) (ref_walk_cur(w)->path)
#define ref_walk_fpath(w) (ref_walk_cur(w)->full_path)
#define ref_walk_tl(w) (&ref_walk_cur(w)->tl)
+#define ref_walk_ses(w) (ref_walk_cur(w)->ses)
static inline struct dfs_ref_walk *ref_walk_alloc(void)
{
@@ -60,14 +62,19 @@ static inline void __ref_walk_free(struct dfs_ref *ref)
kfree(ref->path);
kfree(ref->full_path);
dfs_cache_free_tgts(&ref->tl);
+ if (ref->ses)
+ cifs_put_smb_ses(ref->ses);
memset(ref, 0, sizeof(*ref));
}
static inline void ref_walk_free(struct dfs_ref_walk *rw)
{
- struct dfs_ref *ref = ref_walk_start(rw);
+ struct dfs_ref *ref;
- for (; ref <= ref_walk_end(rw); ref++)
+ if (!rw)
+ return;
+
+ for (ref = ref_walk_start(rw); ref <= ref_walk_end(rw); ref++)
__ref_walk_free(ref);
kfree(rw);
}
@@ -116,9 +123,22 @@ static inline void ref_walk_set_tgt_hint(struct dfs_ref_walk *rw)
ref_walk_tit(rw));
}
+static inline void ref_walk_set_tcon(struct dfs_ref_walk *rw,
+ struct cifs_tcon *tcon)
+{
+ struct dfs_ref *ref = ref_walk_start(rw);
+
+ for (; ref <= ref_walk_cur(rw); ref++) {
+ if (WARN_ON_ONCE(!ref->ses))
+ continue;
+ list_add(&ref->ses->dlist, &tcon->dfs_ses_list);
+ ref->ses = NULL;
+ }
+}
+
int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
struct smb3_fs_context *ctx);
-int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs);
+int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx);
static inline char *dfs_get_path(struct cifs_sb_info *cifs_sb, const char *path)
{
@@ -142,20 +162,14 @@ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *p
* references of all DFS root sessions that were used across the mount process
* in dfs_mount_share().
*/
-static inline void dfs_put_root_smb_sessions(struct cifs_mount_ctx *mnt_ctx)
+static inline void dfs_put_root_smb_sessions(struct list_head *head)
{
- const struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
- struct cifs_ses *ses = ctx->dfs_root_ses;
- struct cifs_ses *cur;
-
- if (!ses)
- return;
+ struct cifs_ses *ses, *n;
- for (cur = ses; cur; cur = cur->dfs_root_ses) {
- if (cur->dfs_root_ses)
- cifs_put_smb_ses(cur->dfs_root_ses);
+ list_for_each_entry_safe(ses, n, head, dlist) {
+ list_del_init(&ses->dlist);
+ cifs_put_smb_ses(ses);
}
- cifs_put_smb_ses(ses);
}
#endif /* _CIFS_DFS_H */
diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c
index 11c8efecf7aa..110f03df012a 100644
--- a/fs/smb/client/dfs_cache.c
+++ b/fs/smb/client/dfs_cache.c
@@ -126,6 +126,7 @@ static inline void free_tgts(struct cache_entry *ce)
static inline void flush_cache_ent(struct cache_entry *ce)
{
+ cifs_dbg(FYI, "%s: %s\n", __func__, ce->path);
hlist_del_init(&ce->hlist);
kfree(ce->path);
free_tgts(ce);
@@ -441,34 +442,31 @@ static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int n
return ce;
}
-static void remove_oldest_entry_locked(void)
+/* Remove all referrals that have a single target or oldest entry */
+static void purge_cache(void)
{
int i;
struct cache_entry *ce;
- struct cache_entry *to_del = NULL;
-
- WARN_ON(!rwsem_is_locked(&htable_rw_lock));
+ struct cache_entry *oldest = NULL;
for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
struct hlist_head *l = &cache_htable[i];
+ struct hlist_node *n;
- hlist_for_each_entry(ce, l, hlist) {
+ hlist_for_each_entry_safe(ce, n, l, hlist) {
if (hlist_unhashed(&ce->hlist))
continue;
- if (!to_del || timespec64_compare(&ce->etime,
- &to_del->etime) < 0)
- to_del = ce;
+ if (ce->numtgts == 1)
+ flush_cache_ent(ce);
+ else if (!oldest ||
+ timespec64_compare(&ce->etime,
+ &oldest->etime) < 0)
+ oldest = ce;
}
}
- if (!to_del) {
- cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
- return;
- }
-
- cifs_dbg(FYI, "%s: removing entry\n", __func__);
- dump_ce(to_del);
- flush_cache_ent(to_del);
+ if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES && oldest)
+ flush_cache_ent(oldest);
}
/* Add a new DFS cache entry */
@@ -484,7 +482,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
- remove_oldest_entry_locked();
+ purge_cache();
}
rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
@@ -1095,16 +1093,18 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
return 0;
}
-static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
+static bool target_share_equal(struct cifs_tcon *tcon, const char *s1)
{
- char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
+ struct TCP_Server_Info *server = tcon->ses->server;
+ struct sockaddr_storage ss;
const char *host;
+ const char *s2 = &tcon->tree_name[1];
size_t hostlen;
- struct sockaddr_storage ss;
+ char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
bool match;
int rc;
- if (strcasecmp(s1, s2))
+ if (strcasecmp(s2, s1))
return false;
/*
@@ -1128,34 +1128,6 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
return match;
}
-/*
- * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
- * target shares in @refs.
- */
-static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
- const char *path,
- struct dfs_cache_tgt_list *old_tl,
- struct dfs_cache_tgt_list *new_tl)
-{
- struct dfs_cache_tgt_iterator *oit, *nit;
-
- for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
- oit = dfs_cache_get_next_tgt(old_tl, oit)) {
- for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
- nit = dfs_cache_get_next_tgt(new_tl, nit)) {
- if (target_share_equal(server,
- dfs_cache_get_tgt_name(oit),
- dfs_cache_get_tgt_name(nit))) {
- dfs_cache_noreq_update_tgthint(path, nit);
- return;
- }
- }
- }
-
- cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
- cifs_signal_cifsd_for_reconnect(server, true);
-}
-
static bool is_ses_good(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
@@ -1172,41 +1144,35 @@ static bool is_ses_good(struct cifs_ses *ses)
return ret;
}
-/* Refresh dfs referral of @ses and mark it for reconnect if needed */
-static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
+static char *get_ses_refpath(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
- DFS_CACHE_TGT_LIST(old_tl);
- DFS_CACHE_TGT_LIST(new_tl);
- bool needs_refresh = false;
- struct cache_entry *ce;
- unsigned int xid;
- char *path = NULL;
- int rc = 0;
-
- xid = get_xid();
+ char *path = ERR_PTR(-ENOENT);
mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath) {
path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
if (!path)
- rc = -ENOMEM;
+ path = ERR_PTR(-ENOMEM);
}
mutex_unlock(&server->refpath_lock);
- if (!path)
- goto out;
+ return path;
+}
- down_read(&htable_rw_lock);
- ce = lookup_cache_entry(path);
- needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
- if (!IS_ERR(ce)) {
- rc = get_targets(ce, &old_tl);
- cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
- }
- up_read(&htable_rw_lock);
+/* Refresh dfs referral of @ses */
+static void refresh_ses_referral(struct cifs_ses *ses)
+{
+ struct cache_entry *ce;
+ unsigned int xid;
+ char *path;
+ int rc = 0;
- if (!needs_refresh) {
- rc = 0;
+ xid = get_xid();
+
+ path = get_ses_refpath(ses);
+ if (IS_ERR(path)) {
+ rc = PTR_ERR(path);
+ path = NULL;
goto out;
}
@@ -1217,29 +1183,106 @@ static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
goto out;
}
- ce = cache_refresh_path(xid, ses, path, true);
- if (!IS_ERR(ce)) {
- rc = get_targets(ce, &new_tl);
+ ce = cache_refresh_path(xid, ses, path, false);
+ if (!IS_ERR(ce))
up_read(&htable_rw_lock);
- cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
- mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl);
- }
+ else
+ rc = PTR_ERR(ce);
out:
free_xid(xid);
- dfs_cache_free_tgts(&old_tl);
- dfs_cache_free_tgts(&new_tl);
kfree(path);
}
-static inline void refresh_ses_referral(struct cifs_ses *ses)
+static int __refresh_tcon_referral(struct cifs_tcon *tcon,
+ const char *path,
+ struct dfs_info3_param *refs,
+ int numrefs, bool force_refresh)
{
- __refresh_ses_referral(ses, false);
+ struct cache_entry *ce;
+ bool reconnect = force_refresh;
+ int rc = 0;
+ int i;
+
+ if (unlikely(!numrefs))
+ return 0;
+
+ if (force_refresh) {
+ for (i = 0; i < numrefs; i++) {
+ /* TODO: include prefix paths in the matching */
+ if (target_share_equal(tcon, refs[i].node_name)) {
+ reconnect = false;
+ break;
+ }
+ }
+ }
+
+ down_write(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ if (!IS_ERR(ce)) {
+ if (force_refresh || cache_entry_expired(ce))
+ rc = update_cache_entry_locked(ce, refs, numrefs);
+ } else if (PTR_ERR(ce) == -ENOENT) {
+ ce = add_cache_entry_locked(refs, numrefs);
+ }
+ up_write(&htable_rw_lock);
+
+ if (IS_ERR(ce))
+ rc = PTR_ERR(ce);
+ if (reconnect) {
+ cifs_tcon_dbg(FYI, "%s: mark for reconnect\n", __func__);
+ cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
+ }
+ return rc;
}
-static inline void force_refresh_ses_referral(struct cifs_ses *ses)
+static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh)
{
- __refresh_ses_referral(ses, true);
+ struct dfs_info3_param *refs = NULL;
+ struct cache_entry *ce;
+ struct cifs_ses *ses;
+ unsigned int xid;
+ bool needs_refresh;
+ char *path;
+ int numrefs = 0;
+ int rc = 0;
+
+ xid = get_xid();
+ ses = tcon->ses;
+
+ path = get_ses_refpath(ses);
+ if (IS_ERR(path)) {
+ rc = PTR_ERR(path);
+ path = NULL;
+ goto out;
+ }
+
+ down_read(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
+ if (!needs_refresh) {
+ up_read(&htable_rw_lock);
+ goto out;
+ }
+ up_read(&htable_rw_lock);
+
+ ses = CIFS_DFS_ROOT_SES(ses);
+ if (!is_ses_good(ses)) {
+ cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
+ __func__);
+ goto out;
+ }
+
+ rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+ if (!rc) {
+ rc = __refresh_tcon_referral(tcon, path, refs,
+ numrefs, force_refresh);
+ }
+
+out:
+ free_xid(xid);
+ kfree(path);
+ free_dfs_info_array(refs, numrefs);
}
/**
@@ -1280,7 +1323,7 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
*/
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
- force_refresh_ses_referral(tcon->ses);
+ refresh_tcon_referral(tcon, true);
return 0;
}
@@ -1292,8 +1335,9 @@ void dfs_cache_refresh(struct work_struct *work)
tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
- for (ses = tcon->ses; ses; ses = ses->dfs_root_ses)
+ list_for_each_entry(ses, &tcon->dfs_ses_list, dlist)
refresh_ses_referral(ses);
+ refresh_tcon_referral(tcon, false);
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
atomic_read(&dfs_cache_ttl) * HZ);
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 2d387485f05b..78b59c4ef3ce 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -49,6 +49,7 @@ static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
struct cifs_io_subrequest *wdata =
container_of(subreq, struct cifs_io_subrequest, subreq);
struct cifs_io_request *req = wdata->req;
+ struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
struct TCP_Server_Info *server;
struct cifsFileInfo *open_file = req->cfile;
size_t wsize = req->rreq.wsize;
@@ -73,7 +74,7 @@ retry:
}
}
- rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len,
+ rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
&wdata->credits);
if (rc < 0) {
subreq->error = rc;
@@ -92,7 +93,7 @@ retry:
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->smbd_conn)
- subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
+ stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
#endif
}
@@ -111,7 +112,6 @@ static void cifs_issue_write(struct netfs_io_subrequest *subreq)
goto fail;
}
- wdata->actual_len = wdata->subreq.len;
rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
if (rc)
goto fail;
@@ -140,25 +140,22 @@ static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
}
/*
- * Split the read up according to how many credits we can get for each piece.
- * It's okay to sleep here if we need to wait for more credit to become
- * available.
- *
- * We also choose the server and allocate an operation ID to be cleaned up
- * later.
+ * Negotiate the size of a read operation on behalf of the netfs library.
*/
-static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
+static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
struct TCP_Server_Info *server = req->server;
struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
- size_t rsize;
- int rc;
+ size_t size;
+ int rc = 0;
- rdata->xid = get_xid();
- rdata->have_xid = true;
+ if (!rdata->have_xid) {
+ rdata->xid = get_xid();
+ rdata->have_xid = true;
+ }
rdata->server = server;
if (cifs_sb->ctx->rsize == 0)
@@ -166,13 +163,12 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
cifs_sb->ctx);
-
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
- &rsize, &rdata->credits);
- if (rc) {
- subreq->error = rc;
- return false;
- }
+ &size, &rdata->credits);
+ if (rc)
+ return rc;
+
+ rreq->io_streams[0].sreq_max_len = size;
rdata->credits.in_flight_check = 1;
rdata->credits.rreq_debug_id = rreq->debug_id;
@@ -184,14 +180,11 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
server->credits, server->in_flight, 0,
cifs_trace_rw_credits_read_submit);
- subreq->len = umin(subreq->len, rsize);
- rdata->actual_len = subreq->len;
-
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->smbd_conn)
- subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
+ rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
#endif
- return true;
+ return 0;
}
/*
@@ -200,59 +193,41 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
* to only read a portion of that, but as long as we read something, the netfs
* helper will call us again so that we can issue another read.
*/
-static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
+static void cifs_issue_read(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
struct TCP_Server_Info *server = req->server;
- struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
int rc = 0;
cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
__func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
subreq->transferred, subreq->len);
- if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) {
- /*
- * As we're issuing a retry, we need to negotiate some new
- * credits otherwise the server may reject the op with
- * INVALID_PARAMETER. Note, however, we may get back less
- * credit than we need to complete the op, in which case, we
- * shorten the op and rely on additional rounds of retry.
- */
- size_t rsize = umin(subreq->len - subreq->transferred,
- cifs_sb->ctx->rsize);
-
- rc = server->ops->wait_mtu_credits(server, rsize, &rdata->actual_len,
- &rdata->credits);
- if (rc)
- goto out;
-
- rdata->credits.in_flight_check = 1;
-
- trace_smb3_rw_credits(rdata->rreq->debug_id,
- rdata->subreq.debug_index,
- rdata->credits.value,
- server->credits, server->in_flight, 0,
- cifs_trace_rw_credits_read_resubmit);
- }
+ rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
+ if (rc)
+ goto failed;
if (req->cfile->invalidHandle) {
do {
rc = cifs_reopen_file(req->cfile, true);
} while (rc == -EAGAIN);
if (rc)
- goto out;
+ goto failed;
}
if (subreq->rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
rc = rdata->server->ops->async_readv(rdata);
-out:
if (rc)
- netfs_subreq_terminated(subreq, rc, false);
+ goto failed;
+ return;
+
+failed:
+ netfs_read_subreq_terminated(subreq, rc, false);
}
/*
@@ -316,12 +291,6 @@ static void cifs_rreq_done(struct netfs_io_request *rreq)
inode_set_atime_to_ts(inode, inode_get_mtime(inode));
}
-static void cifs_post_modify(struct inode *inode)
-{
- /* Indication to update ctime and mtime as close is deferred */
- set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
-}
-
static void cifs_free_request(struct netfs_io_request *rreq)
{
struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
@@ -369,10 +338,9 @@ const struct netfs_request_ops cifs_req_ops = {
.init_request = cifs_init_request,
.free_request = cifs_free_request,
.free_subrequest = cifs_free_subrequest,
- .clamp_length = cifs_clamp_length,
- .issue_read = cifs_req_issue_read,
+ .prepare_read = cifs_prepare_read,
+ .issue_read = cifs_issue_read,
.done = cifs_rreq_done,
- .post_modify = cifs_post_modify,
.begin_writeback = cifs_begin_writeback,
.prepare_write = cifs_prepare_write,
.issue_write = cifs_issue_write,
@@ -1396,7 +1364,7 @@ int cifs_close(struct inode *inode, struct file *file)
dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
if ((cfile->status_file_deleted == false) &&
(smb2_can_defer_close(inode, dclose))) {
- if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
+ if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
inode_set_mtime_to_ts(inode,
inode_set_ctime_current(inode));
}
@@ -1435,7 +1403,7 @@ void
cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
{
struct cifsFileInfo *open_file, *tmp;
- struct list_head tmp_list;
+ LIST_HEAD(tmp_list);
if (!tcon->use_persistent || !tcon->need_reopen_files)
return;
@@ -1443,7 +1411,6 @@ cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
tcon->need_reopen_files = false;
cifs_dbg(FYI, "Reopen persistent handles\n");
- INIT_LIST_HEAD(&tmp_list);
/* list all files open on tree connection, reopen resilient handles */
spin_lock(&tcon->open_file_lock);
@@ -2126,9 +2093,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct cifsLockInfo *li, *tmp;
__u64 length = cifs_flock_len(flock);
- struct list_head tmp_llist;
-
- INIT_LIST_HEAD(&tmp_llist);
+ LIST_HEAD(tmp_llist);
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index bc926ab2555b..28c4e576d460 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -978,9 +978,12 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
switch (opt) {
case Opt_compress:
+ if (!IS_ENABLED(CONFIG_CIFS_COMPRESSION)) {
+ cifs_errorf(fc, "CONFIG_CIFS_COMPRESSION kernel config option is unset\n");
+ goto cifs_parse_mount_err;
+ }
ctx->compress = true;
- cifs_dbg(VFS,
- "SMB3 compression support is experimental\n");
+ cifs_dbg(VFS, "SMB3 compression support is experimental\n");
break;
case Opt_nodfs:
ctx->nodfs = 1;
@@ -1896,14 +1899,17 @@ void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb)
if (ctx->mfsymlinks) {
if (ctx->sfu_emul) {
/*
- * Our SFU ("Services for Unix" emulation does not allow
- * creating symlinks but does allow reading existing SFU
- * symlinks (it does allow both creating and reading SFU
- * style mknod and FIFOs though). When "mfsymlinks" and
+ * Our SFU ("Services for Unix") emulation allows now
+ * creating new and reading existing SFU symlinks.
+ * Older Linux kernel versions were not able to neither
+ * read existing nor create new SFU symlinks. But
+ * creating and reading SFU style mknod and FIFOs was
+ * supported for long time. When "mfsymlinks" and
* "sfu" are both enabled at the same time, it allows
* reading both types of symlinks, but will only create
* them with mfsymlinks format. This allows better
- * Apple compatibility (probably better for Samba too)
+ * Apple compatibility, compatibility with older Linux
+ * kernel clients (probably better for Samba too)
* while still recognizing old Windows style symlinks.
*/
cifs_dbg(VFS, "mount options mfsymlinks and sfu both enabled\n");
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
index cf577ec0dd0a..69f9d938b336 100644
--- a/fs/smb/client/fs_context.h
+++ b/fs/smb/client/fs_context.h
@@ -284,6 +284,7 @@ struct smb3_fs_context {
struct cifs_ses *dfs_root_ses;
bool dfs_automount:1; /* set for dfs automount only */
enum cifs_reparse_type reparse_type;
+ bool dfs_conn:1; /* set for dfs mounts */
};
extern const struct fs_parameter_spec smb3_fs_parameters[];
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 73e2e6c230b7..647f9bedd9fc 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -529,6 +529,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
struct cifs_fid fid;
struct cifs_open_parms oparms;
struct cifs_io_parms io_parms = {0};
+ char *symlink_buf_utf16;
+ unsigned int symlink_len_utf16;
char buf[24];
unsigned int bytes_read;
char *pbuf;
@@ -539,10 +541,11 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
fattr->cf_mode &= ~S_IFMT;
if (fattr->cf_eof == 0) {
+ cifs_dbg(FYI, "Fifo\n");
fattr->cf_mode |= S_IFIFO;
fattr->cf_dtype = DT_FIFO;
return 0;
- } else if (fattr->cf_eof < 8) {
+ } else if (fattr->cf_eof > 1 && fattr->cf_eof < 8) {
fattr->cf_mode |= S_IFREG;
fattr->cf_dtype = DT_REG;
return -EINVAL; /* EOPNOTSUPP? */
@@ -584,7 +587,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
rc = tcon->ses->server->ops->sync_read(xid, &fid, &io_parms,
&bytes_read, &pbuf, &buf_type);
if ((rc == 0) && (bytes_read >= 8)) {
- if (memcmp("IntxBLK", pbuf, 8) == 0) {
+ if (memcmp("IntxBLK\0", pbuf, 8) == 0) {
cifs_dbg(FYI, "Block device\n");
fattr->cf_mode |= S_IFBLK;
fattr->cf_dtype = DT_BLK;
@@ -596,7 +599,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
fattr->cf_rdev = MKDEV(mjr, mnr);
}
- } else if (memcmp("IntxCHR", pbuf, 8) == 0) {
+ } else if (memcmp("IntxCHR\0", pbuf, 8) == 0) {
cifs_dbg(FYI, "Char device\n");
fattr->cf_mode |= S_IFCHR;
fattr->cf_dtype = DT_CHR;
@@ -612,10 +615,37 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
cifs_dbg(FYI, "Socket\n");
fattr->cf_mode |= S_IFSOCK;
fattr->cf_dtype = DT_SOCK;
- } else if (memcmp("IntxLNK", pbuf, 7) == 0) {
+ } else if (memcmp("IntxLNK\1", pbuf, 8) == 0) {
cifs_dbg(FYI, "Symlink\n");
fattr->cf_mode |= S_IFLNK;
fattr->cf_dtype = DT_LNK;
+ if ((fattr->cf_eof > 8) && (fattr->cf_eof % 2 == 0)) {
+ symlink_buf_utf16 = kmalloc(fattr->cf_eof-8 + 1, GFP_KERNEL);
+ if (symlink_buf_utf16) {
+ io_parms.offset = 8;
+ io_parms.length = fattr->cf_eof-8 + 1;
+ buf_type = CIFS_NO_BUFFER;
+ rc = tcon->ses->server->ops->sync_read(xid, &fid, &io_parms,
+ &symlink_len_utf16,
+ &symlink_buf_utf16,
+ &buf_type);
+ if ((rc == 0) &&
+ (symlink_len_utf16 > 0) &&
+ (symlink_len_utf16 < fattr->cf_eof-8 + 1) &&
+ (symlink_len_utf16 % 2 == 0)) {
+ fattr->cf_symlink_target =
+ cifs_strndup_from_utf16(symlink_buf_utf16,
+ symlink_len_utf16,
+ true,
+ cifs_sb->local_nls);
+ if (!fattr->cf_symlink_target)
+ rc = -ENOMEM;
+ }
+ kfree(symlink_buf_utf16);
+ } else {
+ rc = -ENOMEM;
+ }
+ }
} else if (memcmp("LnxFIFO", pbuf, 8) == 0) {
cifs_dbg(FYI, "FIFO\n");
fattr->cf_mode |= S_IFIFO;
@@ -625,6 +655,10 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
fattr->cf_dtype = DT_REG;
rc = -EOPNOTSUPP;
}
+ } else if ((rc == 0) && (bytes_read == 1) && (pbuf[0] == '\0')) {
+ cifs_dbg(FYI, "Socket\n");
+ fattr->cf_mode |= S_IFSOCK;
+ fattr->cf_dtype = DT_SOCK;
} else {
fattr->cf_mode |= S_IFREG; /* then it is a file */
fattr->cf_dtype = DT_REG;
@@ -800,10 +834,6 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr,
fattr->cf_mode = S_IFREG | cifs_sb->ctx->file_mode;
fattr->cf_dtype = DT_REG;
- /* clear write bits if ATTR_READONLY is set */
- if (fattr->cf_cifsattrs & ATTR_READONLY)
- fattr->cf_mode &= ~(S_IWUGO);
-
/*
* Don't accept zero nlink from non-unix servers unless
* delete is pending. Instead mark it as unknown.
@@ -816,6 +846,10 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr,
}
}
+ /* clear write bits if ATTR_READONLY is set */
+ if (fattr->cf_cifsattrs & ATTR_READONLY)
+ fattr->cf_mode &= ~(S_IWUGO);
+
out_reparse:
if (S_ISLNK(fattr->cf_mode)) {
if (likely(data->symlink_target))
@@ -1233,11 +1267,14 @@ handle_mnt_opt:
__func__, rc);
goto out;
}
- }
-
- /* fill in remaining high mode bits e.g. SUID, VTX */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+ } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+ /* fill in remaining high mode bits e.g. SUID, VTX */
cifs_sfu_mode(fattr, full_path, cifs_sb, xid);
+ else if (!(tcon->posix_extensions))
+ /* clear write bits if ATTR_READONLY is set */
+ if (fattr->cf_cifsattrs & ATTR_READONLY)
+ fattr->cf_mode &= ~(S_IWUGO);
+
/* check for Minshall+French symlinks */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
index 9bb5c869f4db..2ce193609d8b 100644
--- a/fs/smb/client/ioctl.c
+++ b/fs/smb/client/ioctl.c
@@ -90,23 +90,23 @@ static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
}
src_file = fdget(srcfd);
- if (!src_file.file) {
+ if (!fd_file(src_file)) {
rc = -EBADF;
goto out_drop_write;
}
- if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) {
+ if (fd_file(src_file)->f_op->unlocked_ioctl != cifs_ioctl) {
rc = -EBADF;
cifs_dbg(VFS, "src file seems to be from a different filesystem type\n");
goto out_fput;
}
- src_inode = file_inode(src_file.file);
+ src_inode = file_inode(fd_file(src_file));
rc = -EINVAL;
if (S_ISDIR(src_inode->i_mode))
goto out_fput;
- rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0,
+ rc = cifs_file_copychunk_range(xid, fd_file(src_file), 0, dst_file, 0,
src_inode->i_size, 0);
if (rc > 0)
rc = 0;
diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c
index 80099bbb333b..47ddeb7fa111 100644
--- a/fs/smb/client/link.c
+++ b/fs/smb/client/link.c
@@ -606,6 +606,9 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
/* BB what if DFS and this volume is on different share? BB */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname);
+ } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
+ rc = __cifs_sfu_make_node(xid, inode, direntry, pTcon,
+ full_path, S_IFLNK, 0, symname);
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
} else if (pTcon->unix_ext) {
rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index c6f11e6f9eb9..054f10ebf65a 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -145,6 +145,9 @@ tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace)
mutex_init(&ret_buf->fscache_lock);
#endif
trace_smb3_tcon_ref(ret_buf->debug_id, ret_buf->tc_count, trace);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
+#endif
return ret_buf;
}
@@ -751,12 +754,11 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
{
struct cifsFileInfo *cfile = NULL;
struct file_list *tmp_list, *tmp_next_list;
- struct list_head file_head;
+ LIST_HEAD(file_head);
if (cifs_inode == NULL)
return;
- INIT_LIST_HEAD(&file_head);
spin_lock(&cifs_inode->open_file_lock);
list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
if (delayed_work_pending(&cfile->deferred)) {
@@ -787,9 +789,8 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
{
struct cifsFileInfo *cfile;
struct file_list *tmp_list, *tmp_next_list;
- struct list_head file_head;
+ LIST_HEAD(file_head);
- INIT_LIST_HEAD(&file_head);
spin_lock(&tcon->open_file_lock);
list_for_each_entry(cfile, &tcon->openFileList, tlist) {
if (delayed_work_pending(&cfile->deferred)) {
@@ -819,11 +820,10 @@ cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
{
struct cifsFileInfo *cfile;
struct file_list *tmp_list, *tmp_next_list;
- struct list_head file_head;
void *page;
const char *full_path;
+ LIST_HEAD(file_head);
- INIT_LIST_HEAD(&file_head);
page = alloc_dentry_path();
spin_lock(&tcon->open_file_lock);
list_for_each_entry(cfile, &tcon->openFileList, tlist) {
@@ -1111,7 +1111,8 @@ static void tcon_super_cb(struct super_block *sb, void *arg)
t2 = cifs_sb_master_tcon(cifs_sb);
spin_lock(&t2->tc_lock);
- if (t1->ses == t2->ses &&
+ if ((t1->ses == t2->ses ||
+ t1->ses->dfs_root_ses == t2->ses->dfs_root_ses) &&
t1->ses->server == t2->ses->server &&
t2->origin_fullpath &&
dfs_src_pathname_equal(t2->origin_fullpath, t1->origin_fullpath))
diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c
index 4a517b280f2b..0f788031b740 100644
--- a/fs/smb/client/namespace.c
+++ b/fs/smb/client/namespace.c
@@ -240,7 +240,7 @@ static struct vfsmount *cifs_do_automount(struct path *path)
ctx->source = NULL;
goto out;
}
- ctx->dfs_automount = is_dfs_mount(mntpt);
+ ctx->dfs_automount = ctx->dfs_conn = is_dfs_mount(mntpt);
cifs_dbg(FYI, "%s: ctx: source=%s UNC=%s prepath=%s dfs_automount=%d\n",
__func__, ctx->source, ctx->UNC, ctx->prepath, ctx->dfs_automount);
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index 48c27581ec51..3b48a093cfb1 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -108,8 +108,8 @@ static int nfs_set_reparse_buf(struct reparse_posix_data *buf,
buf->InodeType = cpu_to_le64(type);
buf->ReparseDataLength = cpu_to_le16(len + dlen -
sizeof(struct reparse_data_buffer));
- *(__le64 *)buf->DataBuffer = cpu_to_le64(((u64)MAJOR(dev) << 32) |
- MINOR(dev));
+ *(__le64 *)buf->DataBuffer = cpu_to_le64(((u64)MINOR(dev) << 32) |
+ MAJOR(dev));
iov->iov_base = buf;
iov->iov_len = len + dlen;
return 0;
@@ -468,7 +468,7 @@ static void wsl_to_fattr(struct cifs_open_info_data *data,
else if (!strncmp(name, SMB2_WSL_XATTR_MODE, nlen))
fattr->cf_mode = (umode_t)le32_to_cpu(*(__le32 *)v);
else if (!strncmp(name, SMB2_WSL_XATTR_DEV, nlen))
- fattr->cf_rdev = wsl_mkdev(v);
+ fattr->cf_rdev = reparse_mkdev(v);
} while (next);
out:
fattr->cf_dtype = S_DT(fattr->cf_mode);
@@ -485,11 +485,11 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
switch (le64_to_cpu(buf->InodeType)) {
case NFS_SPECFILE_CHR:
fattr->cf_mode |= S_IFCHR;
- fattr->cf_rdev = reparse_nfs_mkdev(buf);
+ fattr->cf_rdev = reparse_mkdev(buf->DataBuffer);
break;
case NFS_SPECFILE_BLK:
fattr->cf_mode |= S_IFBLK;
- fattr->cf_rdev = reparse_nfs_mkdev(buf);
+ fattr->cf_rdev = reparse_mkdev(buf->DataBuffer);
break;
case NFS_SPECFILE_FIFO:
fattr->cf_mode |= S_IFIFO;
diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
index 2c0644bc4e65..158e7b7aae64 100644
--- a/fs/smb/client/reparse.h
+++ b/fs/smb/client/reparse.h
@@ -18,14 +18,7 @@
*/
#define IO_REPARSE_TAG_INTERNAL ((__u32)~0U)
-static inline dev_t reparse_nfs_mkdev(struct reparse_posix_data *buf)
-{
- u64 v = le64_to_cpu(*(__le64 *)buf->DataBuffer);
-
- return MKDEV(v >> 32, v & 0xffffffff);
-}
-
-static inline dev_t wsl_mkdev(void *ptr)
+static inline dev_t reparse_mkdev(void *ptr)
{
u64 v = le64_to_cpu(*(__le64 *)ptr);
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index e1f2feb56f45..e03c91a49650 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -1078,7 +1078,7 @@ cifs_make_node(unsigned int xid, struct inode *inode,
/*
* Check if mounted with mount parm 'sfu' mount parm.
* SFU emulation should work with all servers, but only
- * supports block and char device (no socket & fifo),
+ * supports block and char device, socket & fifo,
* and was used by default in earlier versions of Windows
*/
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c
index c23478ab1cf8..e301349b0078 100644
--- a/fs/smb/client/smb2file.c
+++ b/fs/smb/client/smb2file.c
@@ -21,7 +21,7 @@
#include "cifs_unicode.h"
#include "fscache.h"
#include "smb2proto.h"
-#include "smb2status.h"
+#include "../common/smb2status.h"
static struct smb2_symlink_err_rsp *symlink_data(const struct kvec *iov)
{
@@ -196,9 +196,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct cifsLockInfo *li, *tmp;
__u64 length = 1 + flock->fl_end - flock->fl_start;
- struct list_head tmp_llist;
-
- INIT_LIST_HEAD(&tmp_llist);
+ LIST_HEAD(tmp_llist);
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 11a1c53c64e0..b992117377e9 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -24,7 +24,7 @@
#include "smb2pdu.h"
#include "smb2proto.h"
#include "cached_dir.h"
-#include "smb2status.h"
+#include "../common/smb2status.h"
static struct reparse_data_buffer *reparse_buf_ptr(struct kvec *iov)
{
@@ -315,7 +315,7 @@ replay_again:
SMB2_O_INFO_FILE, 0,
sizeof(struct smb311_posix_qinfo *) +
(PATH_MAX * 2) +
- (sizeof(struct cifs_sid) * 2), 0, NULL);
+ (sizeof(struct smb_sid) * 2), 0, NULL);
} else {
rc = SMB2_query_info_init(tcon, server,
&rqst[num_rqst],
@@ -325,7 +325,7 @@ replay_again:
SMB2_O_INFO_FILE, 0,
sizeof(struct smb311_posix_qinfo *) +
(PATH_MAX * 2) +
- (sizeof(struct cifs_sid) * 2), 0, NULL);
+ (sizeof(struct smb_sid) * 2), 0, NULL);
}
if (!rc && (!cfile || num_rqst > 1)) {
smb2_set_next_command(tcon, &rqst[num_rqst]);
diff --git a/fs/smb/client/smb2maperror.c b/fs/smb/client/smb2maperror.c
index ac1895358908..b05313acf9b2 100644
--- a/fs/smb/client/smb2maperror.c
+++ b/fs/smb/client/smb2maperror.c
@@ -12,7 +12,7 @@
#include "cifs_debug.h"
#include "smb2pdu.h"
#include "smb2proto.h"
-#include "smb2status.h"
+#include "../common/smb2status.h"
#include "smb2glob.h"
#include "trace.h"
diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
index 677ef6f99a5b..f3c4b70b77b9 100644
--- a/fs/smb/client/smb2misc.c
+++ b/fs/smb/client/smb2misc.c
@@ -13,7 +13,7 @@
#include "smb2proto.h"
#include "cifs_debug.h"
#include "cifs_unicode.h"
-#include "smb2status.h"
+#include "../common/smb2status.h"
#include "smb2glob.h"
#include "nterr.h"
#include "cached_dir.h"
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index e6540072ffb0..1ee2dd4a1cae 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -13,6 +13,7 @@
#include <linux/sort.h>
#include <crypto/aead.h>
#include <linux/fiemap.h>
+#include <linux/folio_queue.h>
#include <uapi/linux/magic.h>
#include "cifsfs.h"
#include "cifsglob.h"
@@ -21,7 +22,7 @@
#include "cifsproto.h"
#include "cifs_debug.h"
#include "cifs_unicode.h"
-#include "smb2status.h"
+#include "../common/smb2status.h"
#include "smb2glob.h"
#include "cifs_ioctl.h"
#include "smbdirect.h"
@@ -301,7 +302,8 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
unsigned int /*enum smb3_rw_credits_trace*/ trace)
{
struct cifs_credits *credits = &subreq->credits;
- int new_val = DIV_ROUND_UP(subreq->actual_len, SMB2_MAX_BUFFER_SIZE);
+ int new_val = DIV_ROUND_UP(subreq->subreq.len - subreq->subreq.transferred,
+ SMB2_MAX_BUFFER_SIZE);
int scredits, in_flight;
if (!credits->value || credits->value == new_val)
@@ -3048,11 +3050,11 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
return rc;
}
-static struct cifs_ntsd *
+static struct smb_ntsd *
get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
const struct cifs_fid *cifsfid, u32 *pacllen, u32 info)
{
- struct cifs_ntsd *pntsd = NULL;
+ struct smb_ntsd *pntsd = NULL;
unsigned int xid;
int rc = -EOPNOTSUPP;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
@@ -3077,11 +3079,11 @@ get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
}
-static struct cifs_ntsd *
+static struct smb_ntsd *
get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
const char *path, u32 *pacllen, u32 info)
{
- struct cifs_ntsd *pntsd = NULL;
+ struct smb_ntsd *pntsd = NULL;
u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
unsigned int xid;
int rc;
@@ -3144,7 +3146,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
}
static int
-set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
+set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
struct inode *inode, const char *path, int aclflag)
{
u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
@@ -3202,12 +3204,12 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
}
/* Retrieve an ACL from the server */
-static struct cifs_ntsd *
+static struct smb_ntsd *
get_smb2_acl(struct cifs_sb_info *cifs_sb,
struct inode *inode, const char *path,
u32 *pacllen, u32 info)
{
- struct cifs_ntsd *pntsd = NULL;
+ struct smb_ntsd *pntsd = NULL;
struct cifsFileInfo *open_file = NULL;
if (inode && !(info & SACL_SECINFO))
@@ -4392,30 +4394,86 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
}
/*
- * Clear a read buffer, discarding the folios which have XA_MARK_0 set.
+ * Clear a read buffer, discarding the folios which have the 1st mark set.
*/
-static void cifs_clear_xarray_buffer(struct xarray *buffer)
+static void cifs_clear_folioq_buffer(struct folio_queue *buffer)
{
+ struct folio_queue *folioq;
+
+ while ((folioq = buffer)) {
+ for (int s = 0; s < folioq_count(folioq); s++)
+ if (folioq_is_marked(folioq, s))
+ folio_put(folioq_folio(folioq, s));
+ buffer = folioq->next;
+ kfree(folioq);
+ }
+}
+
+/*
+ * Allocate buffer space into a folio queue.
+ */
+static struct folio_queue *cifs_alloc_folioq_buffer(ssize_t size)
+{
+ struct folio_queue *buffer = NULL, *tail = NULL, *p;
struct folio *folio;
+ unsigned int slot;
+
+ do {
+ if (!tail || folioq_full(tail)) {
+ p = kmalloc(sizeof(*p), GFP_NOFS);
+ if (!p)
+ goto nomem;
+ folioq_init(p);
+ if (tail) {
+ tail->next = p;
+ p->prev = tail;
+ } else {
+ buffer = p;
+ }
+ tail = p;
+ }
+
+ folio = folio_alloc(GFP_KERNEL|__GFP_HIGHMEM, 0);
+ if (!folio)
+ goto nomem;
+
+ slot = folioq_append_mark(tail, folio);
+ size -= folioq_folio_size(tail, slot);
+ } while (size > 0);
- XA_STATE(xas, buffer, 0);
+ return buffer;
- rcu_read_lock();
- xas_for_each_marked(&xas, folio, ULONG_MAX, XA_MARK_0) {
- folio_put(folio);
+nomem:
+ cifs_clear_folioq_buffer(buffer);
+ return NULL;
+}
+
+/*
+ * Copy data from an iterator to the folios in a folio queue buffer.
+ */
+static bool cifs_copy_iter_to_folioq(struct iov_iter *iter, size_t size,
+ struct folio_queue *buffer)
+{
+ for (; buffer; buffer = buffer->next) {
+ for (int s = 0; s < folioq_count(buffer); s++) {
+ struct folio *folio = folioq_folio(buffer, s);
+ size_t part = folioq_folio_size(buffer, s);
+
+ part = umin(part, size);
+
+ if (copy_folio_from_iter(folio, 0, part, iter) != part)
+ return false;
+ size -= part;
+ }
}
- rcu_read_unlock();
- xa_destroy(buffer);
+ return true;
}
void
smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
{
- int i;
-
- for (i = 0; i < num_rqst; i++)
- if (!xa_empty(&rqst[i].rq_buffer))
- cifs_clear_xarray_buffer(&rqst[i].rq_buffer);
+ for (int i = 0; i < num_rqst; i++)
+ cifs_clear_folioq_buffer(rqst[i].rq_buffer);
}
/*
@@ -4436,52 +4494,32 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
struct smb_rqst *new_rq, struct smb_rqst *old_rq)
{
struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
- struct page *page;
unsigned int orig_len = 0;
- int i, j;
int rc = -ENOMEM;
- for (i = 1; i < num_rqst; i++) {
+ for (int i = 1; i < num_rqst; i++) {
struct smb_rqst *old = &old_rq[i - 1];
struct smb_rqst *new = &new_rq[i];
- struct xarray *buffer = &new->rq_buffer;
- size_t size = iov_iter_count(&old->rq_iter), seg, copied = 0;
+ struct folio_queue *buffer;
+ size_t size = iov_iter_count(&old->rq_iter);
orig_len += smb_rqst_len(server, old);
new->rq_iov = old->rq_iov;
new->rq_nvec = old->rq_nvec;
- xa_init(buffer);
-
if (size > 0) {
- unsigned int npages = DIV_ROUND_UP(size, PAGE_SIZE);
-
- for (j = 0; j < npages; j++) {
- void *o;
-
- rc = -ENOMEM;
- page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
- if (!page)
- goto err_free;
- page->index = j;
- o = xa_store(buffer, j, page, GFP_KERNEL);
- if (xa_is_err(o)) {
- rc = xa_err(o);
- put_page(page);
- goto err_free;
- }
+ buffer = cifs_alloc_folioq_buffer(size);
+ if (!buffer)
+ goto err_free;
- xa_set_mark(buffer, j, XA_MARK_0);
+ new->rq_buffer = buffer;
+ iov_iter_folio_queue(&new->rq_iter, ITER_SOURCE,
+ buffer, 0, 0, size);
- seg = min_t(size_t, size - copied, PAGE_SIZE);
- if (copy_page_from_iter(page, 0, seg, &old->rq_iter) != seg) {
- rc = -EFAULT;
- goto err_free;
- }
- copied += seg;
+ if (!cifs_copy_iter_to_folioq(&old->rq_iter, size, buffer)) {
+ rc = -EIO;
+ goto err_free;
}
- iov_iter_xarray(&new->rq_iter, ITER_SOURCE,
- buffer, 0, size);
}
}
@@ -4545,22 +4583,23 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
}
static int
-cifs_copy_pages_to_iter(struct xarray *pages, unsigned int data_size,
- unsigned int skip, struct iov_iter *iter)
+cifs_copy_folioq_to_iter(struct folio_queue *folioq, size_t data_size,
+ size_t skip, struct iov_iter *iter)
{
- struct page *page;
- unsigned long index;
-
- xa_for_each(pages, index, page) {
- size_t n, len = min_t(unsigned int, PAGE_SIZE - skip, data_size);
-
- n = copy_page_to_iter(page, skip, len, iter);
- if (n != len) {
- cifs_dbg(VFS, "%s: something went wrong\n", __func__);
- return -EIO;
+ for (; folioq; folioq = folioq->next) {
+ for (int s = 0; s < folioq_count(folioq); s++) {
+ struct folio *folio = folioq_folio(folioq, s);
+ size_t fsize = folio_size(folio);
+ size_t n, len = umin(fsize - skip, data_size);
+
+ n = copy_folio_to_iter(folio, skip, len, iter);
+ if (n != len) {
+ cifs_dbg(VFS, "%s: something went wrong\n", __func__);
+ return -EIO;
+ }
+ data_size -= n;
+ skip = 0;
}
- data_size -= n;
- skip = 0;
}
return 0;
@@ -4568,8 +4607,8 @@ cifs_copy_pages_to_iter(struct xarray *pages, unsigned int data_size,
static int
handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
- char *buf, unsigned int buf_len, struct xarray *pages,
- unsigned int pages_len, bool is_offloaded)
+ char *buf, unsigned int buf_len, struct folio_queue *buffer,
+ unsigned int buffer_len, bool is_offloaded)
{
unsigned int data_offset;
unsigned int data_len;
@@ -4666,7 +4705,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
return 0;
}
- if (data_len > pages_len - pad_len) {
+ if (data_len > buffer_len - pad_len) {
/* data_len is corrupt -- discard frame */
rdata->result = -EIO;
if (is_offloaded)
@@ -4677,8 +4716,8 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
}
/* Copy the data to the output I/O iterator. */
- rdata->result = cifs_copy_pages_to_iter(pages, pages_len,
- cur_off, &rdata->subreq.io_iter);
+ rdata->result = cifs_copy_folioq_to_iter(buffer, buffer_len,
+ cur_off, &rdata->subreq.io_iter);
if (rdata->result != 0) {
if (is_offloaded)
mid->mid_state = MID_RESPONSE_MALFORMED;
@@ -4686,12 +4725,11 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
dequeue_mid(mid, rdata->result);
return 0;
}
- rdata->got_bytes = pages_len;
+ rdata->got_bytes = buffer_len;
} else if (buf_len >= data_offset + data_len) {
/* read response payload is in buf */
- WARN_ONCE(pages && !xa_empty(pages),
- "read data can be either in buf or in pages");
+ WARN_ONCE(buffer, "read data can be either in buf or in buffer");
length = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter);
if (length < 0)
return length;
@@ -4717,7 +4755,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
struct smb2_decrypt_work {
struct work_struct decrypt;
struct TCP_Server_Info *server;
- struct xarray buffer;
+ struct folio_queue *buffer;
char *buf;
unsigned int len;
};
@@ -4731,7 +4769,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
struct mid_q_entry *mid;
struct iov_iter iter;
- iov_iter_xarray(&iter, ITER_DEST, &dw->buffer, 0, dw->len);
+ iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, dw->len);
rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
&iter, true);
if (rc) {
@@ -4747,7 +4785,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
mid->decrypted = true;
rc = handle_read_data(dw->server, mid, dw->buf,
dw->server->vals->read_rsp_size,
- &dw->buffer, dw->len,
+ dw->buffer, dw->len,
true);
if (rc >= 0) {
#ifdef CONFIG_CIFS_STATS2
@@ -4780,7 +4818,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
}
free_pages:
- cifs_clear_xarray_buffer(&dw->buffer);
+ cifs_clear_folioq_buffer(dw->buffer);
cifs_small_buf_release(dw->buf);
kfree(dw);
}
@@ -4790,20 +4828,17 @@ static int
receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
int *num_mids)
{
- struct page *page;
char *buf = server->smallbuf;
struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
struct iov_iter iter;
- unsigned int len, npages;
+ unsigned int len;
unsigned int buflen = server->pdu_size;
int rc;
- int i = 0;
struct smb2_decrypt_work *dw;
dw = kzalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
if (!dw)
return -ENOMEM;
- xa_init(&dw->buffer);
INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
dw->server = server;
@@ -4819,26 +4854,14 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
server->vals->read_rsp_size;
dw->len = len;
- npages = DIV_ROUND_UP(len, PAGE_SIZE);
+ len = round_up(dw->len, PAGE_SIZE);
rc = -ENOMEM;
- for (; i < npages; i++) {
- void *old;
-
- page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
- if (!page)
- goto discard_data;
- page->index = i;
- old = xa_store(&dw->buffer, i, page, GFP_KERNEL);
- if (xa_is_err(old)) {
- rc = xa_err(old);
- put_page(page);
- goto discard_data;
- }
- xa_set_mark(&dw->buffer, i, XA_MARK_0);
- }
+ dw->buffer = cifs_alloc_folioq_buffer(len);
+ if (!dw->buffer)
+ goto discard_data;
- iov_iter_xarray(&iter, ITER_DEST, &dw->buffer, 0, npages * PAGE_SIZE);
+ iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, len);
/* Read the data into the buffer and clear excess bufferage. */
rc = cifs_read_iter_from_socket(server, &iter, dw->len);
@@ -4846,9 +4869,12 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
goto discard_data;
server->total_read += rc;
- if (rc < npages * PAGE_SIZE)
- iov_iter_zero(npages * PAGE_SIZE - rc, &iter);
- iov_iter_revert(&iter, npages * PAGE_SIZE);
+ if (rc < len) {
+ struct iov_iter tmp = iter;
+
+ iov_iter_advance(&tmp, rc);
+ iov_iter_zero(len - rc, &tmp);
+ }
iov_iter_truncate(&iter, dw->len);
rc = cifs_discard_remaining_data(server);
@@ -4883,7 +4909,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
(*mid)->decrypted = true;
rc = handle_read_data(server, *mid, buf,
server->vals->read_rsp_size,
- &dw->buffer, dw->len, false);
+ dw->buffer, dw->len, false);
if (rc >= 0) {
if (server->ops->is_network_name_deleted) {
server->ops->is_network_name_deleted(buf,
@@ -4893,7 +4919,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
}
free_pages:
- cifs_clear_xarray_buffer(&dw->buffer);
+ cifs_clear_folioq_buffer(dw->buffer);
free_dw:
kfree(dw);
return rc;
@@ -5055,9 +5081,10 @@ static int smb2_next_header(struct TCP_Server_Info *server, char *buf,
return 0;
}
-static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
- const char *full_path, umode_t mode, dev_t dev)
+ const char *full_path, umode_t mode, dev_t dev,
+ const char *symname)
{
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
@@ -5065,30 +5092,64 @@ static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_fid fid;
unsigned int bytes_written;
- struct win_dev pdev = {};
- struct kvec iov[2];
+ u8 type[8];
+ int type_len = 0;
+ struct {
+ __le64 major;
+ __le64 minor;
+ } __packed pdev = {};
+ __le16 *symname_utf16 = NULL;
+ u8 *data = NULL;
+ int data_len = 0;
+ struct kvec iov[3];
__u32 oplock = server->oplocks ? REQ_OPLOCK : 0;
int rc;
switch (mode & S_IFMT) {
case S_IFCHR:
- strscpy(pdev.type, "IntxCHR");
+ type_len = 8;
+ memcpy(type, "IntxCHR\0", type_len);
pdev.major = cpu_to_le64(MAJOR(dev));
pdev.minor = cpu_to_le64(MINOR(dev));
+ data = (u8 *)&pdev;
+ data_len = sizeof(pdev);
break;
case S_IFBLK:
- strscpy(pdev.type, "IntxBLK");
+ type_len = 8;
+ memcpy(type, "IntxBLK\0", type_len);
pdev.major = cpu_to_le64(MAJOR(dev));
pdev.minor = cpu_to_le64(MINOR(dev));
+ data = (u8 *)&pdev;
+ data_len = sizeof(pdev);
+ break;
+ case S_IFLNK:
+ type_len = 8;
+ memcpy(type, "IntxLNK\1", type_len);
+ symname_utf16 = cifs_strndup_to_utf16(symname, strlen(symname),
+ &data_len, cifs_sb->local_nls,
+ NO_MAP_UNI_RSVD);
+ if (!symname_utf16) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ data_len -= 2; /* symlink is without trailing wide-nul */
+ data = (u8 *)symname_utf16;
break;
case S_IFSOCK:
- strscpy(pdev.type, "LnxSOCK");
+ type_len = 8;
+ strscpy(type, "LnxSOCK");
+ data = (u8 *)&pdev;
+ data_len = sizeof(pdev);
break;
case S_IFIFO:
- strscpy(pdev.type, "LnxFIFO");
+ type_len = 8;
+ strscpy(type, "LnxFIFO");
+ data = (u8 *)&pdev;
+ data_len = sizeof(pdev);
break;
default:
- return -EPERM;
+ rc = -EPERM;
+ goto out;
}
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, GENERIC_WRITE,
@@ -5098,17 +5159,26 @@ static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
rc = server->ops->open(xid, &oparms, &oplock, NULL);
if (rc)
- return rc;
+ goto out;
- io_parms.pid = current->tgid;
- io_parms.tcon = tcon;
- io_parms.length = sizeof(pdev);
- iov[1].iov_base = &pdev;
- iov[1].iov_len = sizeof(pdev);
+ if (type_len + data_len > 0) {
+ io_parms.pid = current->tgid;
+ io_parms.tcon = tcon;
+ io_parms.length = type_len + data_len;
+ iov[1].iov_base = type;
+ iov[1].iov_len = type_len;
+ iov[2].iov_base = data;
+ iov[2].iov_len = data_len;
+
+ rc = server->ops->sync_write(xid, &fid, &io_parms,
+ &bytes_written,
+ iov, ARRAY_SIZE(iov)-1);
+ }
- rc = server->ops->sync_write(xid, &fid, &io_parms,
- &bytes_written, iov, 1);
server->ops->close(xid, tcon, &fid);
+
+out:
+ kfree(symname_utf16);
return rc;
}
@@ -5120,7 +5190,7 @@ int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
int rc;
rc = __cifs_sfu_make_node(xid, inode, dentry, tcon,
- full_path, mode, dev);
+ full_path, mode, dev, NULL);
if (rc)
return rc;
@@ -5149,7 +5219,7 @@ static int smb2_make_node(unsigned int xid, struct inode *inode,
/*
* Check if mounted with mount parm 'sfu' mount parm.
* SFU emulation should work with all servers, but only
- * supports block and char device (no socket & fifo),
+ * supports block and char device, socket & fifo,
* and was used by default in earlier versions of Windows
*/
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 88dc49d67037..bb225758448a 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -32,7 +32,7 @@
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "ntlmssp.h"
-#include "smb2status.h"
+#include "../common/smb2status.h"
#include "smb2glob.h"
#include "cifspdu.h"
#include "cifs_spnego.h"
@@ -42,6 +42,7 @@
#include "dfs_cache.h"
#endif
#include "cached_dir.h"
+#include "compress.h"
/*
* The following table defines the expected "StructureSize" of SMB2 requests
@@ -2623,7 +2624,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
unsigned int group_offset = 0;
struct smb3_acl acl = {};
- *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
+ *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct smb_ace) * 4), 8);
if (set_owner) {
/* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
@@ -2672,21 +2673,21 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
ptr += sizeof(struct smb3_acl);
/* create one ACE to hold the mode embedded in reserved special SID */
- acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
+ acelen = setup_special_mode_ACE((struct smb_ace *)ptr, (__u64)mode);
ptr += acelen;
acl_size = acelen + sizeof(struct smb3_acl);
ace_count = 1;
if (set_owner) {
/* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
- acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr);
+ acelen = setup_special_user_owner_ACE((struct smb_ace *)ptr);
ptr += acelen;
acl_size += acelen;
ace_count += 1;
}
/* and one more ACE to allow access for authenticated users */
- acelen = setup_authusers_ACE((struct cifs_ace *)ptr);
+ acelen = setup_authusers_ACE((struct smb_ace *)ptr);
ptr += acelen;
acl_size += acelen;
ace_count += 1;
@@ -3906,7 +3907,7 @@ SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
{
size_t output_len = sizeof(struct smb311_posix_qinfo *) +
- (sizeof(struct cifs_sid) * 2) + (PATH_MAX * 2);
+ (sizeof(struct smb_sid) * 2) + (PATH_MAX * 2);
*plen = 0;
return query_info(xid, tcon, persistent_fid, volatile_fid,
@@ -4498,9 +4499,7 @@ static void smb2_readv_worker(struct work_struct *work)
struct cifs_io_subrequest *rdata =
container_of(work, struct cifs_io_subrequest, subreq.work);
- netfs_subreq_terminated(&rdata->subreq,
- (rdata->result == 0 || rdata->result == -EAGAIN) ?
- rdata->got_bytes : rdata->result, true);
+ netfs_read_subreq_terminated(&rdata->subreq, rdata->result, false);
}
static void
@@ -4532,7 +4531,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu/%zu\n",
__func__, mid->mid, mid->mid_state, rdata->result,
- rdata->actual_len, rdata->subreq.len - rdata->subreq.transferred);
+ rdata->got_bytes, rdata->subreq.len - rdata->subreq.transferred);
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
@@ -4554,6 +4553,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
+ __set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags);
rdata->result = -EAGAIN;
if (server->sign && rdata->got_bytes)
/* reset bytes number since we can not check a sign */
@@ -4588,7 +4588,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
rdata->req->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid,
rdata->subreq.start + rdata->subreq.transferred,
- rdata->actual_len,
+ rdata->subreq.len - rdata->subreq.transferred,
rdata->result);
} else
trace_smb3_read_done(rdata->rreq->debug_id,
@@ -4603,9 +4603,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0;
} else {
- if (rdata->got_bytes < rdata->actual_len &&
- rdata->subreq.start + rdata->subreq.transferred + rdata->got_bytes ==
- ictx->remote_i_size) {
+ size_t trans = rdata->subreq.transferred + rdata->got_bytes;
+ if (trans < rdata->subreq.len &&
+ rdata->subreq.start + trans == ictx->remote_i_size) {
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0;
}
@@ -4614,6 +4614,8 @@ smb2_readv_callback(struct mid_q_entry *mid)
server->credits, server->in_flight,
0, cifs_trace_rw_credits_read_response_clear);
rdata->credits.value = 0;
+ rdata->subreq.transferred += rdata->got_bytes;
+ trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress);
INIT_WORK(&rdata->subreq.work, smb2_readv_worker);
queue_work(cifsiod_wq, &rdata->subreq.work);
release_mid(mid);
@@ -4648,7 +4650,7 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink);
io_parms.server = server = rdata->server;
io_parms.offset = subreq->start + subreq->transferred;
- io_parms.length = rdata->actual_len;
+ io_parms.length = subreq->len - subreq->transferred;
io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid;
io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid;
io_parms.pid = rdata->req->pid;
@@ -4669,7 +4671,7 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
shdr = (struct smb2_hdr *)buf;
if (rdata->credits.value > 0) {
- shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->actual_len,
+ shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(io_parms.length,
SMB2_MAX_BUFFER_SIZE));
credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
if (server->credits >= server->max_credits)
@@ -4697,7 +4699,8 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
rdata->xid, io_parms.persistent_fid,
io_parms.tcon->tid,
io_parms.tcon->ses->Suid,
- io_parms.offset, rdata->actual_len, rc);
+ io_parms.offset,
+ subreq->len - subreq->transferred, rc);
}
async_readv_out:
@@ -4863,7 +4866,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
#endif
if (result) {
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
- trace_smb3_write_err(wdata->xid,
+ trace_smb3_write_err(wdata->rreq->debug_id,
+ wdata->subreq.debug_index,
+ wdata->xid,
wdata->req->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid, wdata->subreq.start,
wdata->subreq.len, wdata->result);
@@ -4871,7 +4876,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
pr_warn_once("Out of space writing to %s\n",
tcon->tree_name);
} else
- trace_smb3_write_done(0 /* no xid */,
+ trace_smb3_write_done(wdata->rreq->debug_id,
+ wdata->subreq.debug_index,
+ wdata->xid,
wdata->req->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid,
wdata->subreq.start, wdata->subreq.len);
@@ -4880,6 +4887,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
server->credits, server->in_flight,
0, cifs_trace_rw_credits_write_response_clear);
wdata->credits.value = 0;
+ trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress);
cifs_write_subrequest_terminated(wdata, result ?: written, true);
release_mid(mid);
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
@@ -4948,7 +4956,9 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
- trace_smb3_write_enter(wdata->xid,
+ trace_smb3_write_enter(wdata->rreq->debug_id,
+ wdata->subreq.debug_index,
+ wdata->xid,
io_parms->persistent_fid,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
@@ -5020,11 +5030,17 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
flags |= CIFS_HAS_CREDITS;
}
+ /* XXX: compression + encryption is unsupported for now */
+ if (((flags & CIFS_TRANSFORM_REQ) != CIFS_TRANSFORM_REQ) && should_compress(tcon, &rqst))
+ flags |= CIFS_COMPRESS_REQ;
+
rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
wdata, flags, &wdata->credits);
/* Can't touch wdata if rc == 0 */
if (rc) {
- trace_smb3_write_err(xid,
+ trace_smb3_write_err(wdata->rreq->debug_id,
+ wdata->subreq.debug_index,
+ xid,
io_parms->persistent_fid,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
@@ -5104,7 +5120,7 @@ replay_again:
offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
- trace_smb3_write_enter(xid, io_parms->persistent_fid,
+ trace_smb3_write_enter(0, 0, xid, io_parms->persistent_fid,
io_parms->tcon->tid, io_parms->tcon->ses->Suid,
io_parms->offset, io_parms->length);
@@ -5125,7 +5141,7 @@ replay_again:
rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
if (rc) {
- trace_smb3_write_err(xid,
+ trace_smb3_write_err(0, 0, xid,
req->PersistentFileId,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
@@ -5134,7 +5150,7 @@ replay_again:
cifs_dbg(VFS, "Send error in write = %d\n", rc);
} else {
*nbytes = le32_to_cpu(rsp->DataLength);
- trace_smb3_write_done(xid,
+ trace_smb3_write_done(0, 0, xid,
req->PersistentFileId,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
@@ -5683,7 +5699,7 @@ SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
int
SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
- struct cifs_ntsd *pnntsd, int pacllen, int aclflag)
+ struct smb_ntsd *pnntsd, int pacllen, int aclflag)
{
return send_set_info(xid, tcon, persistent_fid, volatile_fid,
current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
index 5c458ab3b05a..076d9e83e1a0 100644
--- a/fs/smb/client/smb2pdu.h
+++ b/fs/smb/client/smb2pdu.h
@@ -364,8 +364,8 @@ struct create_posix_rsp {
u32 nlink;
u32 reparse_tag;
u32 mode;
- struct cifs_sid owner; /* var-sized on the wire */
- struct cifs_sid group; /* var-sized on the wire */
+ struct smb_sid owner; /* var-sized on the wire */
+ struct smb_sid group; /* var-sized on the wire */
} __packed;
#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
@@ -408,8 +408,8 @@ struct smb2_posix_info {
struct smb2_posix_info_parsed {
const struct smb2_posix_info *base;
size_t size;
- struct cifs_sid owner;
- struct cifs_sid group;
+ struct smb_sid owner;
+ struct smb_sid group;
int name_len;
const u8 *name;
};
diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
index b208232b12a2..c7e1b149877a 100644
--- a/fs/smb/client/smb2proto.h
+++ b/fs/smb/client/smb2proto.h
@@ -238,7 +238,7 @@ extern int SMB2_set_info_init(struct cifs_tcon *tcon,
extern void SMB2_set_info_free(struct smb_rqst *rqst);
extern int SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
- struct cifs_ntsd *pnntsd, int pacllen, int aclflag);
+ struct smb_ntsd *pnntsd, int pacllen, int aclflag);
extern int SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
struct smb2_file_full_ea_info *buf, int len);
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index 1476c445cadc..e4636fca821d 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -23,7 +23,7 @@
#include "cifsproto.h"
#include "smb2proto.h"
#include "cifs_debug.h"
-#include "smb2status.h"
+#include "../common/smb2status.h"
#include "smb2glob.h"
static int
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index 7bcc379014ca..0c64b37e2660 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
#include <linux/highmem.h>
+#include <linux/folio_queue.h>
#include "smbdirect.h"
#include "cifs_debug.h"
#include "cifsproto.h"
@@ -1585,10 +1586,8 @@ static struct smbd_connection *_smbd_get_connection(
conn_param.initiator_depth = 0;
conn_param.responder_resources =
- info->id->device->attrs.max_qp_rd_atom
- < SMBD_CM_RESPONDER_RESOURCES ?
- info->id->device->attrs.max_qp_rd_atom :
- SMBD_CM_RESPONDER_RESOURCES;
+ min(info->id->device->attrs.max_qp_rd_atom,
+ SMBD_CM_RESPONDER_RESOURCES);
info->responder_resources = conn_param.responder_resources;
log_rdma_mr(INFO, "responder_resources=%d\n",
info->responder_resources);
@@ -2463,6 +2462,8 @@ static ssize_t smb_extract_bvec_to_rdma(struct iov_iter *iter,
start = 0;
}
+ if (ret > 0)
+ iov_iter_advance(iter, ret);
return ret;
}
@@ -2519,50 +2520,65 @@ static ssize_t smb_extract_kvec_to_rdma(struct iov_iter *iter,
start = 0;
}
+ if (ret > 0)
+ iov_iter_advance(iter, ret);
return ret;
}
/*
- * Extract folio fragments from an XARRAY-class iterator and add them to an
- * RDMA list. The folios are not pinned.
+ * Extract folio fragments from a FOLIOQ-class iterator and add them to an RDMA
+ * list. The folios are not pinned.
*/
-static ssize_t smb_extract_xarray_to_rdma(struct iov_iter *iter,
+static ssize_t smb_extract_folioq_to_rdma(struct iov_iter *iter,
struct smb_extract_to_rdma *rdma,
ssize_t maxsize)
{
- struct xarray *xa = iter->xarray;
- struct folio *folio;
- loff_t start = iter->xarray_start + iter->iov_offset;
- pgoff_t index = start / PAGE_SIZE;
+ const struct folio_queue *folioq = iter->folioq;
+ unsigned int slot = iter->folioq_slot;
ssize_t ret = 0;
- size_t off, len;
- XA_STATE(xas, xa, index);
+ size_t offset = iter->iov_offset;
- rcu_read_lock();
+ BUG_ON(!folioq);
- xas_for_each(&xas, folio, ULONG_MAX) {
- if (xas_retry(&xas, folio))
- continue;
- if (WARN_ON(xa_is_value(folio)))
- break;
- if (WARN_ON(folio_test_hugetlb(folio)))
- break;
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = folioq->next;
+ if (WARN_ON_ONCE(!folioq))
+ return -EIO;
+ slot = 0;
+ }
- off = offset_in_folio(folio, start);
- len = min_t(size_t, maxsize, folio_size(folio) - off);
+ do {
+ struct folio *folio = folioq_folio(folioq, slot);
+ size_t fsize = folioq_folio_size(folioq, slot);
- if (!smb_set_sge(rdma, folio_page(folio, 0), off, len)) {
- rcu_read_unlock();
- return -EIO;
+ if (offset < fsize) {
+ size_t part = umin(maxsize - ret, fsize - offset);
+
+ if (!smb_set_sge(rdma, folio_page(folio, 0), offset, part))
+ return -EIO;
+
+ offset += part;
+ ret += part;
}
- maxsize -= len;
- ret += len;
- if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0)
- break;
- }
+ if (offset >= fsize) {
+ offset = 0;
+ slot++;
+ if (slot >= folioq_nr_slots(folioq)) {
+ if (!folioq->next) {
+ WARN_ON_ONCE(ret < iter->count);
+ break;
+ }
+ folioq = folioq->next;
+ slot = 0;
+ }
+ }
+ } while (rdma->nr_sge < rdma->max_sge || maxsize > 0);
- rcu_read_unlock();
+ iter->folioq = folioq;
+ iter->folioq_slot = slot;
+ iter->iov_offset = offset;
+ iter->count -= ret;
return ret;
}
@@ -2590,17 +2606,15 @@ static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
case ITER_KVEC:
ret = smb_extract_kvec_to_rdma(iter, rdma, len);
break;
- case ITER_XARRAY:
- ret = smb_extract_xarray_to_rdma(iter, rdma, len);
+ case ITER_FOLIOQ:
+ ret = smb_extract_folioq_to_rdma(iter, rdma, len);
break;
default:
WARN_ON_ONCE(1);
return -EIO;
}
- if (ret > 0) {
- iov_iter_advance(iter, ret);
- } else if (ret < 0) {
+ if (ret < 0) {
while (rdma->nr_sge > before) {
struct ib_sge *sge = &rdma->sge[rdma->nr_sge--];
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index 8e9964001e2a..0b52d22a91a0 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -157,6 +157,7 @@ DEFINE_EVENT(smb3_rw_err_class, smb3_##name, \
TP_ARGS(rreq_debug_id, rreq_debug_index, xid, fid, tid, sesid, offset, len, rc))
DEFINE_SMB3_RW_ERR_EVENT(read_err);
+DEFINE_SMB3_RW_ERR_EVENT(write_err);
/* For logging errors in other file I/O ops */
DECLARE_EVENT_CLASS(smb3_other_err_class,
@@ -202,7 +203,6 @@ DEFINE_EVENT(smb3_other_err_class, smb3_##name, \
int rc), \
TP_ARGS(xid, fid, tid, sesid, offset, len, rc))
-DEFINE_SMB3_OTHER_ERR_EVENT(write_err);
DEFINE_SMB3_OTHER_ERR_EVENT(query_dir_err);
DEFINE_SMB3_OTHER_ERR_EVENT(zero_err);
DEFINE_SMB3_OTHER_ERR_EVENT(falloc_err);
@@ -370,6 +370,8 @@ DEFINE_EVENT(smb3_rw_done_class, smb3_##name, \
DEFINE_SMB3_RW_DONE_EVENT(read_enter);
DEFINE_SMB3_RW_DONE_EVENT(read_done);
+DEFINE_SMB3_RW_DONE_EVENT(write_enter);
+DEFINE_SMB3_RW_DONE_EVENT(write_done);
/* For logging successful other op */
DECLARE_EVENT_CLASS(smb3_other_done_class,
@@ -411,11 +413,9 @@ DEFINE_EVENT(smb3_other_done_class, smb3_##name, \
__u32 len), \
TP_ARGS(xid, fid, tid, sesid, offset, len))
-DEFINE_SMB3_OTHER_DONE_EVENT(write_enter);
DEFINE_SMB3_OTHER_DONE_EVENT(query_dir_enter);
DEFINE_SMB3_OTHER_DONE_EVENT(zero_enter);
DEFINE_SMB3_OTHER_DONE_EVENT(falloc_enter);
-DEFINE_SMB3_OTHER_DONE_EVENT(write_done);
DEFINE_SMB3_OTHER_DONE_EVENT(query_dir_done);
DEFINE_SMB3_OTHER_DONE_EVENT(zero_done);
DEFINE_SMB3_OTHER_DONE_EVENT(falloc_done);
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index 6e68aaf5bd20..91812150186c 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -28,6 +28,7 @@
#include "cifs_debug.h"
#include "smb2proto.h"
#include "smbdirect.h"
+#include "compress.h"
/* Max number of iovectors we can use off the stack when sending requests. */
#define CIFS_MAX_IOV_SIZE 8
@@ -432,6 +433,9 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
struct kvec *iov;
int rc;
+ if (flags & CIFS_COMPRESS_REQ)
+ return smb_compress(server, &rqst[0], __smb_send_rqst);
+
if (!(flags & CIFS_TRANSFORM_REQ))
return __smb_send_rqst(server, num_rqst, rqst);
@@ -1813,11 +1817,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
length = data_len; /* An RDMA read is already done. */
else
#endif
- {
length = cifs_read_iter_from_socket(server, &rdata->subreq.io_iter,
data_len);
- iov_iter_revert(&rdata->subreq.io_iter, data_len);
- }
if (length > 0)
rdata->got_bytes += length;
server->total_read += length;
diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
index 6780aa3e98a1..58a584f0b27e 100644
--- a/fs/smb/client/xattr.c
+++ b/fs/smb/client/xattr.c
@@ -162,7 +162,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
case XATTR_CIFS_ACL:
case XATTR_CIFS_NTSD:
case XATTR_CIFS_NTSD_FULL: {
- struct cifs_ntsd *pacl;
+ struct smb_ntsd *pacl;
if (!value)
goto out;
@@ -315,7 +315,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
* fetch owner and DACL otherwise
*/
u32 acllen, extra_info;
- struct cifs_ntsd *pacl;
+ struct smb_ntsd *pacl;
if (pTcon->ses->server->ops->get_acl == NULL)
goto out; /* rc already EOPNOTSUPP */
diff --git a/fs/smb/client/smb2status.h b/fs/smb/common/smb2status.h
index 9c6d79b0bd49..14b4a5f04564 100644
--- a/fs/smb/client/smb2status.h
+++ b/fs/smb/common/smb2status.h
@@ -901,6 +901,10 @@ struct ntstatus {
#define STATUS_DEVICE_ENUMERATION_ERROR cpu_to_le32(0xC0000366)
#define STATUS_MOUNT_POINT_NOT_RESOLVED cpu_to_le32(0xC0000368)
#define STATUS_INVALID_DEVICE_OBJECT_PARAMETER cpu_to_le32(0xC0000369)
+/*
+ * 'OCCURED' is typo in MS-ERREF, it should be 'OCCURRED',
+ * but we'll keep it consistent with MS-ERREF.
+ */
#define STATUS_MCA_OCCURED cpu_to_le32(0xC000036A)
#define STATUS_DRIVER_BLOCKED_CRITICAL cpu_to_le32(0xC000036B)
#define STATUS_DRIVER_BLOCKED cpu_to_le32(0xC000036C)
@@ -1769,3 +1773,5 @@ struct ntstatus {
#define STATUS_IPSEC_INVALID_PACKET cpu_to_le32(0xC0360005)
#define STATUS_IPSEC_INTEGRITY_CHECK_FAILED cpu_to_le32(0xC0360006)
#define STATUS_IPSEC_CLEAR_TEXT_DROP cpu_to_le32(0xC0360007)
+#define STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP cpu_to_le32(0xC05D0000)
+#define STATUS_INVALID_LOCK_RANGE cpu_to_le32(0xC00001a1)
diff --git a/fs/smb/common/smbacl.h b/fs/smb/common/smbacl.h
new file mode 100644
index 000000000000..6a60698fc6f0
--- /dev/null
+++ b/fs/smb/common/smbacl.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/*
+ * Copyright (c) International Business Machines Corp., 2007
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ * Modified by Namjae Jeon (linkinjeon@kernel.org)
+ */
+
+#ifndef _COMMON_SMBACL_H
+#define _COMMON_SMBACL_H
+
+#define NUM_AUTHS (6) /* number of authority fields */
+#define SID_MAX_SUB_AUTHORITIES (15) /* max number of sub authority fields */
+
+/* ACE types - see MS-DTYP 2.4.4.1 */
+#define ACCESS_ALLOWED_ACE_TYPE 0x00
+#define ACCESS_DENIED_ACE_TYPE 0x01
+#define SYSTEM_AUDIT_ACE_TYPE 0x02
+#define SYSTEM_ALARM_ACE_TYPE 0x03
+#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
+#define ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05
+#define ACCESS_DENIED_OBJECT_ACE_TYPE 0x06
+#define SYSTEM_AUDIT_OBJECT_ACE_TYPE 0x07
+#define SYSTEM_ALARM_OBJECT_ACE_TYPE 0x08
+#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
+#define ACCESS_DENIED_CALLBACK_ACE_TYPE 0x0A
+#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
+#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE 0x0C
+#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE 0x0D
+#define SYSTEM_ALARM_CALLBACK_ACE_TYPE 0x0E /* Reserved */
+#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
+#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
+#define SYSTEM_MANDATORY_LABEL_ACE_TYPE 0x11
+#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
+#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
+
+/* ACE flags */
+#define OBJECT_INHERIT_ACE 0x01
+#define CONTAINER_INHERIT_ACE 0x02
+#define NO_PROPAGATE_INHERIT_ACE 0x04
+#define INHERIT_ONLY_ACE 0x08
+#define INHERITED_ACE 0x10
+#define SUCCESSFUL_ACCESS_ACE_FLAG 0x40
+#define FAILED_ACCESS_ACE_FLAG 0x80
+
+/*
+ * Maximum size of a string representation of a SID:
+ *
+ * The fields are unsigned values in decimal. So:
+ *
+ * u8: max 3 bytes in decimal
+ * u32: max 10 bytes in decimal
+ *
+ * "S-" + 3 bytes for version field + 15 for authority field + NULL terminator
+ *
+ * For authority field, max is when all 6 values are non-zero and it must be
+ * represented in hex. So "-0x" + 12 hex digits.
+ *
+ * Add 11 bytes for each subauthority field (10 bytes each + 1 for '-')
+ */
+#define SID_STRING_BASE_SIZE (2 + 3 + 15 + 1)
+#define SID_STRING_SUBAUTH_SIZE (11) /* size of a single subauth string */
+
+#define DOMAIN_USER_RID_LE cpu_to_le32(513)
+
+/*
+ * ACE types - see MS-DTYP 2.4.4.1
+ */
+enum {
+ ACCESS_ALLOWED,
+ ACCESS_DENIED,
+};
+
+/*
+ * Security ID types
+ */
+enum {
+ SIDOWNER = 1,
+ SIDGROUP,
+ SIDCREATOR_OWNER,
+ SIDCREATOR_GROUP,
+ SIDUNIX_USER,
+ SIDUNIX_GROUP,
+ SIDNFS_USER,
+ SIDNFS_GROUP,
+ SIDNFS_MODE,
+};
+
+struct smb_ntsd {
+ __le16 revision; /* revision level */
+ __le16 type;
+ __le32 osidoffset;
+ __le32 gsidoffset;
+ __le32 sacloffset;
+ __le32 dacloffset;
+} __attribute__((packed));
+
+struct smb_sid {
+ __u8 revision; /* revision level */
+ __u8 num_subauth;
+ __u8 authority[NUM_AUTHS];
+ __le32 sub_auth[SID_MAX_SUB_AUTHORITIES]; /* sub_auth[num_subauth] */
+} __attribute__((packed));
+
+/* size of a struct smb_sid, sans sub_auth array */
+#define CIFS_SID_BASE_SIZE (1 + 1 + NUM_AUTHS)
+
+struct smb_acl {
+ __le16 revision; /* revision level */
+ __le16 size;
+ __le32 num_aces;
+} __attribute__((packed));
+
+struct smb_ace {
+ __u8 type; /* see above and MS-DTYP 2.4.4.1 */
+ __u8 flags;
+ __le16 size;
+ __le32 access_req;
+ struct smb_sid sid; /* ie UUID of user or group who gets these perms */
+} __attribute__((packed));
+
+#endif /* _COMMON_SMBACL_H */
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index 7889df8112b4..cac80e7bfefc 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -39,7 +39,8 @@ void ksmbd_conn_free(struct ksmbd_conn *conn)
xa_destroy(&conn->sessions);
kvfree(conn->request_buf);
kfree(conn->preauth_info);
- kfree(conn);
+ if (atomic_dec_and_test(&conn->refcnt))
+ kfree(conn);
}
/**
@@ -68,6 +69,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
conn->um = NULL;
atomic_set(&conn->req_running, 0);
atomic_set(&conn->r_count, 0);
+ atomic_set(&conn->refcnt, 1);
conn->total_credits = 1;
conn->outstanding_credits = 0;
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
index 5b947175c048..b379ae4fdcdf 100644
--- a/fs/smb/server/connection.h
+++ b/fs/smb/server/connection.h
@@ -106,6 +106,7 @@ struct ksmbd_conn {
bool signing_negotiated;
__le16 signing_algorithm;
bool binding;
+ atomic_t refcnt;
};
struct ksmbd_conn_ops {
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index e546ffa57b55..246cde380dfb 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -10,7 +10,7 @@
#include "oplock.h"
#include "smb_common.h"
-#include "smbstatus.h"
+#include "../common/smb2status.h"
#include "connection.h"
#include "mgmt/user_session.h"
#include "mgmt/share_config.h"
@@ -51,6 +51,7 @@ static struct oplock_info *alloc_opinfo(struct ksmbd_work *work,
init_waitqueue_head(&opinfo->oplock_brk);
atomic_set(&opinfo->refcount, 1);
atomic_set(&opinfo->breaking_cnt, 0);
+ atomic_inc(&opinfo->conn->refcnt);
return opinfo;
}
@@ -124,6 +125,8 @@ static void free_opinfo(struct oplock_info *opinfo)
{
if (opinfo->is_lease)
free_lease(opinfo);
+ if (opinfo->conn && atomic_dec_and_test(&opinfo->conn->refcnt))
+ kfree(opinfo->conn);
kfree(opinfo);
}
@@ -163,9 +166,7 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
!atomic_inc_not_zero(&opinfo->refcount))
opinfo = NULL;
else {
- atomic_inc(&opinfo->conn->r_count);
if (ksmbd_conn_releasing(opinfo->conn)) {
- atomic_dec(&opinfo->conn->r_count);
atomic_dec(&opinfo->refcount);
opinfo = NULL;
}
@@ -177,26 +178,11 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
return opinfo;
}
-static void opinfo_conn_put(struct oplock_info *opinfo)
+void opinfo_put(struct oplock_info *opinfo)
{
- struct ksmbd_conn *conn;
-
if (!opinfo)
return;
- conn = opinfo->conn;
- /*
- * Checking waitqueue to dropping pending requests on
- * disconnection. waitqueue_active is safe because it
- * uses atomic operation for condition.
- */
- if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
- wake_up(&conn->r_count_q);
- opinfo_put(opinfo);
-}
-
-void opinfo_put(struct oplock_info *opinfo)
-{
if (!atomic_dec_and_test(&opinfo->refcount))
return;
@@ -1127,14 +1113,11 @@ void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
if (!atomic_inc_not_zero(&opinfo->refcount))
continue;
- atomic_inc(&opinfo->conn->r_count);
- if (ksmbd_conn_releasing(opinfo->conn)) {
- atomic_dec(&opinfo->conn->r_count);
+ if (ksmbd_conn_releasing(opinfo->conn))
continue;
- }
oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
- opinfo_conn_put(opinfo);
+ opinfo_put(opinfo);
}
}
up_read(&p_ci->m_lock);
@@ -1167,13 +1150,10 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
if (!atomic_inc_not_zero(&opinfo->refcount))
continue;
- atomic_inc(&opinfo->conn->r_count);
- if (ksmbd_conn_releasing(opinfo->conn)) {
- atomic_dec(&opinfo->conn->r_count);
+ if (ksmbd_conn_releasing(opinfo->conn))
continue;
- }
oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
- opinfo_conn_put(opinfo);
+ opinfo_put(opinfo);
}
}
up_read(&p_ci->m_lock);
@@ -1252,7 +1232,7 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
prev_opinfo = opinfo_get_list(ci);
if (!prev_opinfo ||
(prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) {
- opinfo_conn_put(prev_opinfo);
+ opinfo_put(prev_opinfo);
goto set_lev;
}
prev_op_has_lease = prev_opinfo->is_lease;
@@ -1262,19 +1242,19 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
if (share_ret < 0 &&
prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
err = share_ret;
- opinfo_conn_put(prev_opinfo);
+ opinfo_put(prev_opinfo);
goto err_out;
}
if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
- opinfo_conn_put(prev_opinfo);
+ opinfo_put(prev_opinfo);
goto op_break_not_needed;
}
list_add(&work->interim_entry, &prev_opinfo->interim_list);
err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II);
- opinfo_conn_put(prev_opinfo);
+ opinfo_put(prev_opinfo);
if (err == -ENOENT)
goto set_lev;
/* Check all oplock was freed by close */
@@ -1337,14 +1317,14 @@ static void smb_break_all_write_oplock(struct ksmbd_work *work,
return;
if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
- opinfo_conn_put(brk_opinfo);
+ opinfo_put(brk_opinfo);
return;
}
brk_opinfo->open_trunc = is_trunc;
list_add(&work->interim_entry, &brk_opinfo->interim_list);
oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II);
- opinfo_conn_put(brk_opinfo);
+ opinfo_put(brk_opinfo);
}
/**
@@ -1376,11 +1356,8 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
if (!atomic_inc_not_zero(&brk_op->refcount))
continue;
- atomic_inc(&brk_op->conn->r_count);
- if (ksmbd_conn_releasing(brk_op->conn)) {
- atomic_dec(&brk_op->conn->r_count);
+ if (ksmbd_conn_releasing(brk_op->conn))
continue;
- }
rcu_read_unlock();
if (brk_op->is_lease && (brk_op->o_lease->state &
@@ -1411,7 +1388,7 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
brk_op->open_trunc = is_trunc;
oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE);
next:
- opinfo_conn_put(brk_op);
+ opinfo_put(brk_op);
rcu_read_lock();
}
rcu_read_unlock();
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index 4d24cc105ef6..c402d4abe826 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -15,7 +15,7 @@
#include "server.h"
#include "smb_common.h"
-#include "smbstatus.h"
+#include "../common/smb2status.h"
#include "connection.h"
#include "transport_ipc.h"
#include "mgmt/user_session.h"
diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c
index 727cb49926ee..ae501024665e 100644
--- a/fs/smb/server/smb2misc.c
+++ b/fs/smb/server/smb2misc.c
@@ -7,7 +7,7 @@
#include "glob.h"
#include "nterr.h"
#include "smb_common.h"
-#include "smbstatus.h"
+#include "../common/smb2status.h"
#include "mgmt/user_session.h"
#include "connection.h"
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 8bdc59251418..e6bdc1b20727 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -30,7 +30,7 @@
#include "server.h"
#include "smb_common.h"
-#include "smbstatus.h"
+#include "../common/smb2status.h"
#include "ksmbd_work.h"
#include "mgmt/user_config.h"
#include "mgmt/share_config.h"
diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
index 13818ecb6e1b..cc4bb2377cbd 100644
--- a/fs/smb/server/smb_common.c
+++ b/fs/smb/server/smb_common.c
@@ -9,7 +9,7 @@
#include "smb_common.h"
#include "server.h"
#include "misc.h"
-#include "smbstatus.h"
+#include "../common/smb2status.h"
#include "connection.h"
#include "ksmbd_work.h"
#include "mgmt/user_session.h"
diff --git a/fs/smb/server/smbacl.h b/fs/smb/server/smbacl.h
index 2b52861707d8..24ce576fc292 100644
--- a/fs/smb/server/smbacl.h
+++ b/fs/smb/server/smbacl.h
@@ -8,6 +8,7 @@
#ifndef _SMBACL_H
#define _SMBACL_H
+#include "../common/smbacl.h"
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/posix_acl.h>
@@ -15,32 +16,6 @@
#include "mgmt/tree_connect.h"
-#define NUM_AUTHS (6) /* number of authority fields */
-#define SID_MAX_SUB_AUTHORITIES (15) /* max number of sub authority fields */
-
-/*
- * ACE types - see MS-DTYP 2.4.4.1
- */
-enum {
- ACCESS_ALLOWED,
- ACCESS_DENIED,
-};
-
-/*
- * Security ID types
- */
-enum {
- SIDOWNER = 1,
- SIDGROUP,
- SIDCREATOR_OWNER,
- SIDCREATOR_GROUP,
- SIDUNIX_USER,
- SIDUNIX_GROUP,
- SIDNFS_USER,
- SIDNFS_GROUP,
- SIDNFS_MODE,
-};
-
/* Revision for ACLs */
#define SD_REVISION 1
@@ -62,92 +37,8 @@ enum {
#define RM_CONTROL_VALID 0x4000
#define SELF_RELATIVE 0x8000
-/* ACE types - see MS-DTYP 2.4.4.1 */
-#define ACCESS_ALLOWED_ACE_TYPE 0x00
-#define ACCESS_DENIED_ACE_TYPE 0x01
-#define SYSTEM_AUDIT_ACE_TYPE 0x02
-#define SYSTEM_ALARM_ACE_TYPE 0x03
-#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
-#define ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05
-#define ACCESS_DENIED_OBJECT_ACE_TYPE 0x06
-#define SYSTEM_AUDIT_OBJECT_ACE_TYPE 0x07
-#define SYSTEM_ALARM_OBJECT_ACE_TYPE 0x08
-#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
-#define ACCESS_DENIED_CALLBACK_ACE_TYPE 0x0A
-#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
-#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE 0x0C
-#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE 0x0D
-#define SYSTEM_ALARM_CALLBACK_ACE_TYPE 0x0E /* Reserved */
-#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
-#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
-#define SYSTEM_MANDATORY_LABEL_ACE_TYPE 0x11
-#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
-#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
-
-/* ACE flags */
-#define OBJECT_INHERIT_ACE 0x01
-#define CONTAINER_INHERIT_ACE 0x02
-#define NO_PROPAGATE_INHERIT_ACE 0x04
-#define INHERIT_ONLY_ACE 0x08
-#define INHERITED_ACE 0x10
-#define SUCCESSFUL_ACCESS_ACE_FLAG 0x40
-#define FAILED_ACCESS_ACE_FLAG 0x80
-
-/*
- * Maximum size of a string representation of a SID:
- *
- * The fields are unsigned values in decimal. So:
- *
- * u8: max 3 bytes in decimal
- * u32: max 10 bytes in decimal
- *
- * "S-" + 3 bytes for version field + 15 for authority field + NULL terminator
- *
- * For authority field, max is when all 6 values are non-zero and it must be
- * represented in hex. So "-0x" + 12 hex digits.
- *
- * Add 11 bytes for each subauthority field (10 bytes each + 1 for '-')
- */
-#define SID_STRING_BASE_SIZE (2 + 3 + 15 + 1)
-#define SID_STRING_SUBAUTH_SIZE (11) /* size of a single subauth string */
-
-#define DOMAIN_USER_RID_LE cpu_to_le32(513)
-
struct ksmbd_conn;
-struct smb_ntsd {
- __le16 revision; /* revision level */
- __le16 type;
- __le32 osidoffset;
- __le32 gsidoffset;
- __le32 sacloffset;
- __le32 dacloffset;
-} __packed;
-
-struct smb_sid {
- __u8 revision; /* revision level */
- __u8 num_subauth;
- __u8 authority[NUM_AUTHS];
- __le32 sub_auth[SID_MAX_SUB_AUTHORITIES]; /* sub_auth[num_subauth] */
-} __packed;
-
-/* size of a struct cifs_sid, sans sub_auth array */
-#define CIFS_SID_BASE_SIZE (1 + 1 + NUM_AUTHS)
-
-struct smb_acl {
- __le16 revision; /* revision level */
- __le16 size;
- __le32 num_aces;
-} __packed;
-
-struct smb_ace {
- __u8 type;
- __u8 flags;
- __le16 size;
- __le32 access_req;
- struct smb_sid sid; /* ie UUID of user or group who gets these perms */
-} __packed;
-
struct smb_fattr {
kuid_t cf_uid;
kgid_t cf_gid;
diff --git a/fs/smb/server/smbstatus.h b/fs/smb/server/smbstatus.h
deleted file mode 100644
index 8963deb42404..000000000000
--- a/fs/smb/server/smbstatus.h
+++ /dev/null
@@ -1,1822 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1+ */
-/*
- * fs/server/smb2status.h
- *
- * SMB2 Status code (network error) definitions
- * Definitions are from MS-ERREF
- *
- * Copyright (c) International Business Machines Corp., 2009,2011
- * Author(s): Steve French (sfrench@us.ibm.com)
- */
-
-/*
- * 0 1 2 3 4 5 6 7 8 9 0 A B C D E F 0 1 2 3 4 5 6 7 8 9 A B C D E F
- * SEV C N <-------Facility--------> <------Error Status Code------>
- *
- * C is set if "customer defined" error, N bit is reserved and MBZ
- */
-
-#define STATUS_SEVERITY_SUCCESS cpu_to_le32(0x0000)
-#define STATUS_SEVERITY_INFORMATIONAL cpu_to_le32(0x0001)
-#define STATUS_SEVERITY_WARNING cpu_to_le32(0x0002)
-#define STATUS_SEVERITY_ERROR cpu_to_le32(0x0003)
-
-struct ntstatus {
- /* Facility is the high 12 bits of the following field */
- __le32 Facility; /* low 2 bits Severity, next is Customer, then rsrvd */
- __le32 Code;
-};
-
-#define STATUS_SUCCESS 0x00000000
-#define STATUS_WAIT_0 cpu_to_le32(0x00000000)
-#define STATUS_WAIT_1 cpu_to_le32(0x00000001)
-#define STATUS_WAIT_2 cpu_to_le32(0x00000002)
-#define STATUS_WAIT_3 cpu_to_le32(0x00000003)
-#define STATUS_WAIT_63 cpu_to_le32(0x0000003F)
-#define STATUS_ABANDONED cpu_to_le32(0x00000080)
-#define STATUS_ABANDONED_WAIT_0 cpu_to_le32(0x00000080)
-#define STATUS_ABANDONED_WAIT_63 cpu_to_le32(0x000000BF)
-#define STATUS_USER_APC cpu_to_le32(0x000000C0)
-#define STATUS_KERNEL_APC cpu_to_le32(0x00000100)
-#define STATUS_ALERTED cpu_to_le32(0x00000101)
-#define STATUS_TIMEOUT cpu_to_le32(0x00000102)
-#define STATUS_PENDING cpu_to_le32(0x00000103)
-#define STATUS_REPARSE cpu_to_le32(0x00000104)
-#define STATUS_MORE_ENTRIES cpu_to_le32(0x00000105)
-#define STATUS_NOT_ALL_ASSIGNED cpu_to_le32(0x00000106)
-#define STATUS_SOME_NOT_MAPPED cpu_to_le32(0x00000107)
-#define STATUS_OPLOCK_BREAK_IN_PROGRESS cpu_to_le32(0x00000108)
-#define STATUS_VOLUME_MOUNTED cpu_to_le32(0x00000109)
-#define STATUS_RXACT_COMMITTED cpu_to_le32(0x0000010A)
-#define STATUS_NOTIFY_CLEANUP cpu_to_le32(0x0000010B)
-#define STATUS_NOTIFY_ENUM_DIR cpu_to_le32(0x0000010C)
-#define STATUS_NO_QUOTAS_FOR_ACCOUNT cpu_to_le32(0x0000010D)
-#define STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED cpu_to_le32(0x0000010E)
-#define STATUS_PAGE_FAULT_TRANSITION cpu_to_le32(0x00000110)
-#define STATUS_PAGE_FAULT_DEMAND_ZERO cpu_to_le32(0x00000111)
-#define STATUS_PAGE_FAULT_COPY_ON_WRITE cpu_to_le32(0x00000112)
-#define STATUS_PAGE_FAULT_GUARD_PAGE cpu_to_le32(0x00000113)
-#define STATUS_PAGE_FAULT_PAGING_FILE cpu_to_le32(0x00000114)
-#define STATUS_CACHE_PAGE_LOCKED cpu_to_le32(0x00000115)
-#define STATUS_CRASH_DUMP cpu_to_le32(0x00000116)
-#define STATUS_BUFFER_ALL_ZEROS cpu_to_le32(0x00000117)
-#define STATUS_REPARSE_OBJECT cpu_to_le32(0x00000118)
-#define STATUS_RESOURCE_REQUIREMENTS_CHANGED cpu_to_le32(0x00000119)
-#define STATUS_TRANSLATION_COMPLETE cpu_to_le32(0x00000120)
-#define STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY cpu_to_le32(0x00000121)
-#define STATUS_NOTHING_TO_TERMINATE cpu_to_le32(0x00000122)
-#define STATUS_PROCESS_NOT_IN_JOB cpu_to_le32(0x00000123)
-#define STATUS_PROCESS_IN_JOB cpu_to_le32(0x00000124)
-#define STATUS_VOLSNAP_HIBERNATE_READY cpu_to_le32(0x00000125)
-#define STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY cpu_to_le32(0x00000126)
-#define STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED cpu_to_le32(0x00000127)
-#define STATUS_INTERRUPT_STILL_CONNECTED cpu_to_le32(0x00000128)
-#define STATUS_PROCESS_CLONED cpu_to_le32(0x00000129)
-#define STATUS_FILE_LOCKED_WITH_ONLY_READERS cpu_to_le32(0x0000012A)
-#define STATUS_FILE_LOCKED_WITH_WRITERS cpu_to_le32(0x0000012B)
-#define STATUS_RESOURCEMANAGER_READ_ONLY cpu_to_le32(0x00000202)
-#define STATUS_WAIT_FOR_OPLOCK cpu_to_le32(0x00000367)
-#define DBG_EXCEPTION_HANDLED cpu_to_le32(0x00010001)
-#define DBG_CONTINUE cpu_to_le32(0x00010002)
-#define STATUS_FLT_IO_COMPLETE cpu_to_le32(0x001C0001)
-#define STATUS_OBJECT_NAME_EXISTS cpu_to_le32(0x40000000)
-#define STATUS_THREAD_WAS_SUSPENDED cpu_to_le32(0x40000001)
-#define STATUS_WORKING_SET_LIMIT_RANGE cpu_to_le32(0x40000002)
-#define STATUS_IMAGE_NOT_AT_BASE cpu_to_le32(0x40000003)
-#define STATUS_RXACT_STATE_CREATED cpu_to_le32(0x40000004)
-#define STATUS_SEGMENT_NOTIFICATION cpu_to_le32(0x40000005)
-#define STATUS_LOCAL_USER_SESSION_KEY cpu_to_le32(0x40000006)
-#define STATUS_BAD_CURRENT_DIRECTORY cpu_to_le32(0x40000007)
-#define STATUS_SERIAL_MORE_WRITES cpu_to_le32(0x40000008)
-#define STATUS_REGISTRY_RECOVERED cpu_to_le32(0x40000009)
-#define STATUS_FT_READ_RECOVERY_FROM_BACKUP cpu_to_le32(0x4000000A)
-#define STATUS_FT_WRITE_RECOVERY cpu_to_le32(0x4000000B)
-#define STATUS_SERIAL_COUNTER_TIMEOUT cpu_to_le32(0x4000000C)
-#define STATUS_NULL_LM_PASSWORD cpu_to_le32(0x4000000D)
-#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH cpu_to_le32(0x4000000E)
-#define STATUS_RECEIVE_PARTIAL cpu_to_le32(0x4000000F)
-#define STATUS_RECEIVE_EXPEDITED cpu_to_le32(0x40000010)
-#define STATUS_RECEIVE_PARTIAL_EXPEDITED cpu_to_le32(0x40000011)
-#define STATUS_EVENT_DONE cpu_to_le32(0x40000012)
-#define STATUS_EVENT_PENDING cpu_to_le32(0x40000013)
-#define STATUS_CHECKING_FILE_SYSTEM cpu_to_le32(0x40000014)
-#define STATUS_FATAL_APP_EXIT cpu_to_le32(0x40000015)
-#define STATUS_PREDEFINED_HANDLE cpu_to_le32(0x40000016)
-#define STATUS_WAS_UNLOCKED cpu_to_le32(0x40000017)
-#define STATUS_SERVICE_NOTIFICATION cpu_to_le32(0x40000018)
-#define STATUS_WAS_LOCKED cpu_to_le32(0x40000019)
-#define STATUS_LOG_HARD_ERROR cpu_to_le32(0x4000001A)
-#define STATUS_ALREADY_WIN32 cpu_to_le32(0x4000001B)
-#define STATUS_WX86_UNSIMULATE cpu_to_le32(0x4000001C)
-#define STATUS_WX86_CONTINUE cpu_to_le32(0x4000001D)
-#define STATUS_WX86_SINGLE_STEP cpu_to_le32(0x4000001E)
-#define STATUS_WX86_BREAKPOINT cpu_to_le32(0x4000001F)
-#define STATUS_WX86_EXCEPTION_CONTINUE cpu_to_le32(0x40000020)
-#define STATUS_WX86_EXCEPTION_LASTCHANCE cpu_to_le32(0x40000021)
-#define STATUS_WX86_EXCEPTION_CHAIN cpu_to_le32(0x40000022)
-#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE cpu_to_le32(0x40000023)
-#define STATUS_NO_YIELD_PERFORMED cpu_to_le32(0x40000024)
-#define STATUS_TIMER_RESUME_IGNORED cpu_to_le32(0x40000025)
-#define STATUS_ARBITRATION_UNHANDLED cpu_to_le32(0x40000026)
-#define STATUS_CARDBUS_NOT_SUPPORTED cpu_to_le32(0x40000027)
-#define STATUS_WX86_CREATEWX86TIB cpu_to_le32(0x40000028)
-#define STATUS_MP_PROCESSOR_MISMATCH cpu_to_le32(0x40000029)
-#define STATUS_HIBERNATED cpu_to_le32(0x4000002A)
-#define STATUS_RESUME_HIBERNATION cpu_to_le32(0x4000002B)
-#define STATUS_FIRMWARE_UPDATED cpu_to_le32(0x4000002C)
-#define STATUS_DRIVERS_LEAKING_LOCKED_PAGES cpu_to_le32(0x4000002D)
-#define STATUS_MESSAGE_RETRIEVED cpu_to_le32(0x4000002E)
-#define STATUS_SYSTEM_POWERSTATE_TRANSITION cpu_to_le32(0x4000002F)
-#define STATUS_ALPC_CHECK_COMPLETION_LIST cpu_to_le32(0x40000030)
-#define STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION cpu_to_le32(0x40000031)
-#define STATUS_ACCESS_AUDIT_BY_POLICY cpu_to_le32(0x40000032)
-#define STATUS_ABANDON_HIBERFILE cpu_to_le32(0x40000033)
-#define STATUS_BIZRULES_NOT_ENABLED cpu_to_le32(0x40000034)
-#define STATUS_WAKE_SYSTEM cpu_to_le32(0x40000294)
-#define STATUS_DS_SHUTTING_DOWN cpu_to_le32(0x40000370)
-#define DBG_REPLY_LATER cpu_to_le32(0x40010001)
-#define DBG_UNABLE_TO_PROVIDE_HANDLE cpu_to_le32(0x40010002)
-#define DBG_TERMINATE_THREAD cpu_to_le32(0x40010003)
-#define DBG_TERMINATE_PROCESS cpu_to_le32(0x40010004)
-#define DBG_CONTROL_C cpu_to_le32(0x40010005)
-#define DBG_PRINTEXCEPTION_C cpu_to_le32(0x40010006)
-#define DBG_RIPEXCEPTION cpu_to_le32(0x40010007)
-#define DBG_CONTROL_BREAK cpu_to_le32(0x40010008)
-#define DBG_COMMAND_EXCEPTION cpu_to_le32(0x40010009)
-#define RPC_NT_UUID_LOCAL_ONLY cpu_to_le32(0x40020056)
-#define RPC_NT_SEND_INCOMPLETE cpu_to_le32(0x400200AF)
-#define STATUS_CTX_CDM_CONNECT cpu_to_le32(0x400A0004)
-#define STATUS_CTX_CDM_DISCONNECT cpu_to_le32(0x400A0005)
-#define STATUS_SXS_RELEASE_ACTIVATION_CONTEXT cpu_to_le32(0x4015000D)
-#define STATUS_RECOVERY_NOT_NEEDED cpu_to_le32(0x40190034)
-#define STATUS_RM_ALREADY_STARTED cpu_to_le32(0x40190035)
-#define STATUS_LOG_NO_RESTART cpu_to_le32(0x401A000C)
-#define STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST cpu_to_le32(0x401B00EC)
-#define STATUS_GRAPHICS_PARTIAL_DATA_POPULATED cpu_to_le32(0x401E000A)
-#define STATUS_GRAPHICS_DRIVER_MISMATCH cpu_to_le32(0x401E0117)
-#define STATUS_GRAPHICS_MODE_NOT_PINNED cpu_to_le32(0x401E0307)
-#define STATUS_GRAPHICS_NO_PREFERRED_MODE cpu_to_le32(0x401E031E)
-#define STATUS_GRAPHICS_DATASET_IS_EMPTY cpu_to_le32(0x401E034B)
-#define STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET cpu_to_le32(0x401E034C)
-#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED \
- cpu_to_le32(0x401E0351)
-#define STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS cpu_to_le32(0x401E042F)
-#define STATUS_GRAPHICS_LEADLINK_START_DEFERRED cpu_to_le32(0x401E0437)
-#define STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY cpu_to_le32(0x401E0439)
-#define STATUS_GRAPHICS_START_DEFERRED cpu_to_le32(0x401E043A)
-#define STATUS_NDIS_INDICATION_REQUIRED cpu_to_le32(0x40230001)
-#define STATUS_GUARD_PAGE_VIOLATION cpu_to_le32(0x80000001)
-#define STATUS_DATATYPE_MISALIGNMENT cpu_to_le32(0x80000002)
-#define STATUS_BREAKPOINT cpu_to_le32(0x80000003)
-#define STATUS_SINGLE_STEP cpu_to_le32(0x80000004)
-#define STATUS_BUFFER_OVERFLOW cpu_to_le32(0x80000005)
-#define STATUS_NO_MORE_FILES cpu_to_le32(0x80000006)
-#define STATUS_WAKE_SYSTEM_DEBUGGER cpu_to_le32(0x80000007)
-#define STATUS_HANDLES_CLOSED cpu_to_le32(0x8000000A)
-#define STATUS_NO_INHERITANCE cpu_to_le32(0x8000000B)
-#define STATUS_GUID_SUBSTITUTION_MADE cpu_to_le32(0x8000000C)
-#define STATUS_PARTIAL_COPY cpu_to_le32(0x8000000D)
-#define STATUS_DEVICE_PAPER_EMPTY cpu_to_le32(0x8000000E)
-#define STATUS_DEVICE_POWERED_OFF cpu_to_le32(0x8000000F)
-#define STATUS_DEVICE_OFF_LINE cpu_to_le32(0x80000010)
-#define STATUS_DEVICE_BUSY cpu_to_le32(0x80000011)
-#define STATUS_NO_MORE_EAS cpu_to_le32(0x80000012)
-#define STATUS_INVALID_EA_NAME cpu_to_le32(0x80000013)
-#define STATUS_EA_LIST_INCONSISTENT cpu_to_le32(0x80000014)
-#define STATUS_INVALID_EA_FLAG cpu_to_le32(0x80000015)
-#define STATUS_VERIFY_REQUIRED cpu_to_le32(0x80000016)
-#define STATUS_EXTRANEOUS_INFORMATION cpu_to_le32(0x80000017)
-#define STATUS_RXACT_COMMIT_NECESSARY cpu_to_le32(0x80000018)
-#define STATUS_NO_MORE_ENTRIES cpu_to_le32(0x8000001A)
-#define STATUS_FILEMARK_DETECTED cpu_to_le32(0x8000001B)
-#define STATUS_MEDIA_CHANGED cpu_to_le32(0x8000001C)
-#define STATUS_BUS_RESET cpu_to_le32(0x8000001D)
-#define STATUS_END_OF_MEDIA cpu_to_le32(0x8000001E)
-#define STATUS_BEGINNING_OF_MEDIA cpu_to_le32(0x8000001F)
-#define STATUS_MEDIA_CHECK cpu_to_le32(0x80000020)
-#define STATUS_SETMARK_DETECTED cpu_to_le32(0x80000021)
-#define STATUS_NO_DATA_DETECTED cpu_to_le32(0x80000022)
-#define STATUS_REDIRECTOR_HAS_OPEN_HANDLES cpu_to_le32(0x80000023)
-#define STATUS_SERVER_HAS_OPEN_HANDLES cpu_to_le32(0x80000024)
-#define STATUS_ALREADY_DISCONNECTED cpu_to_le32(0x80000025)
-#define STATUS_LONGJUMP cpu_to_le32(0x80000026)
-#define STATUS_CLEANER_CARTRIDGE_INSTALLED cpu_to_le32(0x80000027)
-#define STATUS_PLUGPLAY_QUERY_VETOED cpu_to_le32(0x80000028)
-#define STATUS_UNWIND_CONSOLIDATE cpu_to_le32(0x80000029)
-#define STATUS_REGISTRY_HIVE_RECOVERED cpu_to_le32(0x8000002A)
-#define STATUS_DLL_MIGHT_BE_INSECURE cpu_to_le32(0x8000002B)
-#define STATUS_DLL_MIGHT_BE_INCOMPATIBLE cpu_to_le32(0x8000002C)
-#define STATUS_STOPPED_ON_SYMLINK cpu_to_le32(0x8000002D)
-#define STATUS_DEVICE_REQUIRES_CLEANING cpu_to_le32(0x80000288)
-#define STATUS_DEVICE_DOOR_OPEN cpu_to_le32(0x80000289)
-#define STATUS_DATA_LOST_REPAIR cpu_to_le32(0x80000803)
-#define DBG_EXCEPTION_NOT_HANDLED cpu_to_le32(0x80010001)
-#define STATUS_CLUSTER_NODE_ALREADY_UP cpu_to_le32(0x80130001)
-#define STATUS_CLUSTER_NODE_ALREADY_DOWN cpu_to_le32(0x80130002)
-#define STATUS_CLUSTER_NETWORK_ALREADY_ONLINE cpu_to_le32(0x80130003)
-#define STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE cpu_to_le32(0x80130004)
-#define STATUS_CLUSTER_NODE_ALREADY_MEMBER cpu_to_le32(0x80130005)
-#define STATUS_COULD_NOT_RESIZE_LOG cpu_to_le32(0x80190009)
-#define STATUS_NO_TXF_METADATA cpu_to_le32(0x80190029)
-#define STATUS_CANT_RECOVER_WITH_HANDLE_OPEN cpu_to_le32(0x80190031)
-#define STATUS_TXF_METADATA_ALREADY_PRESENT cpu_to_le32(0x80190041)
-#define STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET cpu_to_le32(0x80190042)
-#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED \
- cpu_to_le32(0x801B00EB)
-#define STATUS_FLT_BUFFER_TOO_SMALL cpu_to_le32(0x801C0001)
-#define STATUS_FVE_PARTIAL_METADATA cpu_to_le32(0x80210001)
-#define STATUS_UNSUCCESSFUL cpu_to_le32(0xC0000001)
-#define STATUS_NOT_IMPLEMENTED cpu_to_le32(0xC0000002)
-#define STATUS_INVALID_INFO_CLASS cpu_to_le32(0xC0000003)
-#define STATUS_INFO_LENGTH_MISMATCH cpu_to_le32(0xC0000004)
-#define STATUS_ACCESS_VIOLATION cpu_to_le32(0xC0000005)
-#define STATUS_IN_PAGE_ERROR cpu_to_le32(0xC0000006)
-#define STATUS_PAGEFILE_QUOTA cpu_to_le32(0xC0000007)
-#define STATUS_INVALID_HANDLE cpu_to_le32(0xC0000008)
-#define STATUS_BAD_INITIAL_STACK cpu_to_le32(0xC0000009)
-#define STATUS_BAD_INITIAL_PC cpu_to_le32(0xC000000A)
-#define STATUS_INVALID_CID cpu_to_le32(0xC000000B)
-#define STATUS_TIMER_NOT_CANCELED cpu_to_le32(0xC000000C)
-#define STATUS_INVALID_PARAMETER cpu_to_le32(0xC000000D)
-#define STATUS_NO_SUCH_DEVICE cpu_to_le32(0xC000000E)
-#define STATUS_NO_SUCH_FILE cpu_to_le32(0xC000000F)
-#define STATUS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0000010)
-#define STATUS_END_OF_FILE cpu_to_le32(0xC0000011)
-#define STATUS_WRONG_VOLUME cpu_to_le32(0xC0000012)
-#define STATUS_NO_MEDIA_IN_DEVICE cpu_to_le32(0xC0000013)
-#define STATUS_UNRECOGNIZED_MEDIA cpu_to_le32(0xC0000014)
-#define STATUS_NONEXISTENT_SECTOR cpu_to_le32(0xC0000015)
-#define STATUS_MORE_PROCESSING_REQUIRED cpu_to_le32(0xC0000016)
-#define STATUS_NO_MEMORY cpu_to_le32(0xC0000017)
-#define STATUS_CONFLICTING_ADDRESSES cpu_to_le32(0xC0000018)
-#define STATUS_NOT_MAPPED_VIEW cpu_to_le32(0xC0000019)
-#define STATUS_UNABLE_TO_FREE_VM cpu_to_le32(0xC000001A)
-#define STATUS_UNABLE_TO_DELETE_SECTION cpu_to_le32(0xC000001B)
-#define STATUS_INVALID_SYSTEM_SERVICE cpu_to_le32(0xC000001C)
-#define STATUS_ILLEGAL_INSTRUCTION cpu_to_le32(0xC000001D)
-#define STATUS_INVALID_LOCK_SEQUENCE cpu_to_le32(0xC000001E)
-#define STATUS_INVALID_VIEW_SIZE cpu_to_le32(0xC000001F)
-#define STATUS_INVALID_FILE_FOR_SECTION cpu_to_le32(0xC0000020)
-#define STATUS_ALREADY_COMMITTED cpu_to_le32(0xC0000021)
-#define STATUS_ACCESS_DENIED cpu_to_le32(0xC0000022)
-#define STATUS_BUFFER_TOO_SMALL cpu_to_le32(0xC0000023)
-#define STATUS_OBJECT_TYPE_MISMATCH cpu_to_le32(0xC0000024)
-#define STATUS_NONCONTINUABLE_EXCEPTION cpu_to_le32(0xC0000025)
-#define STATUS_INVALID_DISPOSITION cpu_to_le32(0xC0000026)
-#define STATUS_UNWIND cpu_to_le32(0xC0000027)
-#define STATUS_BAD_STACK cpu_to_le32(0xC0000028)
-#define STATUS_INVALID_UNWIND_TARGET cpu_to_le32(0xC0000029)
-#define STATUS_NOT_LOCKED cpu_to_le32(0xC000002A)
-#define STATUS_PARITY_ERROR cpu_to_le32(0xC000002B)
-#define STATUS_UNABLE_TO_DECOMMIT_VM cpu_to_le32(0xC000002C)
-#define STATUS_NOT_COMMITTED cpu_to_le32(0xC000002D)
-#define STATUS_INVALID_PORT_ATTRIBUTES cpu_to_le32(0xC000002E)
-#define STATUS_PORT_MESSAGE_TOO_LONG cpu_to_le32(0xC000002F)
-#define STATUS_INVALID_PARAMETER_MIX cpu_to_le32(0xC0000030)
-#define STATUS_INVALID_QUOTA_LOWER cpu_to_le32(0xC0000031)
-#define STATUS_DISK_CORRUPT_ERROR cpu_to_le32(0xC0000032)
-#define STATUS_OBJECT_NAME_INVALID cpu_to_le32(0xC0000033)
-#define STATUS_OBJECT_NAME_NOT_FOUND cpu_to_le32(0xC0000034)
-#define STATUS_OBJECT_NAME_COLLISION cpu_to_le32(0xC0000035)
-#define STATUS_PORT_DISCONNECTED cpu_to_le32(0xC0000037)
-#define STATUS_DEVICE_ALREADY_ATTACHED cpu_to_le32(0xC0000038)
-#define STATUS_OBJECT_PATH_INVALID cpu_to_le32(0xC0000039)
-#define STATUS_OBJECT_PATH_NOT_FOUND cpu_to_le32(0xC000003A)
-#define STATUS_OBJECT_PATH_SYNTAX_BAD cpu_to_le32(0xC000003B)
-#define STATUS_DATA_OVERRUN cpu_to_le32(0xC000003C)
-#define STATUS_DATA_LATE_ERROR cpu_to_le32(0xC000003D)
-#define STATUS_DATA_ERROR cpu_to_le32(0xC000003E)
-#define STATUS_CRC_ERROR cpu_to_le32(0xC000003F)
-#define STATUS_SECTION_TOO_BIG cpu_to_le32(0xC0000040)
-#define STATUS_PORT_CONNECTION_REFUSED cpu_to_le32(0xC0000041)
-#define STATUS_INVALID_PORT_HANDLE cpu_to_le32(0xC0000042)
-#define STATUS_SHARING_VIOLATION cpu_to_le32(0xC0000043)
-#define STATUS_QUOTA_EXCEEDED cpu_to_le32(0xC0000044)
-#define STATUS_INVALID_PAGE_PROTECTION cpu_to_le32(0xC0000045)
-#define STATUS_MUTANT_NOT_OWNED cpu_to_le32(0xC0000046)
-#define STATUS_SEMAPHORE_LIMIT_EXCEEDED cpu_to_le32(0xC0000047)
-#define STATUS_PORT_ALREADY_SET cpu_to_le32(0xC0000048)
-#define STATUS_SECTION_NOT_IMAGE cpu_to_le32(0xC0000049)
-#define STATUS_SUSPEND_COUNT_EXCEEDED cpu_to_le32(0xC000004A)
-#define STATUS_THREAD_IS_TERMINATING cpu_to_le32(0xC000004B)
-#define STATUS_BAD_WORKING_SET_LIMIT cpu_to_le32(0xC000004C)
-#define STATUS_INCOMPATIBLE_FILE_MAP cpu_to_le32(0xC000004D)
-#define STATUS_SECTION_PROTECTION cpu_to_le32(0xC000004E)
-#define STATUS_EAS_NOT_SUPPORTED cpu_to_le32(0xC000004F)
-#define STATUS_EA_TOO_LARGE cpu_to_le32(0xC0000050)
-#define STATUS_NONEXISTENT_EA_ENTRY cpu_to_le32(0xC0000051)
-#define STATUS_NO_EAS_ON_FILE cpu_to_le32(0xC0000052)
-#define STATUS_EA_CORRUPT_ERROR cpu_to_le32(0xC0000053)
-#define STATUS_FILE_LOCK_CONFLICT cpu_to_le32(0xC0000054)
-#define STATUS_LOCK_NOT_GRANTED cpu_to_le32(0xC0000055)
-#define STATUS_DELETE_PENDING cpu_to_le32(0xC0000056)
-#define STATUS_CTL_FILE_NOT_SUPPORTED cpu_to_le32(0xC0000057)
-#define STATUS_UNKNOWN_REVISION cpu_to_le32(0xC0000058)
-#define STATUS_REVISION_MISMATCH cpu_to_le32(0xC0000059)
-#define STATUS_INVALID_OWNER cpu_to_le32(0xC000005A)
-#define STATUS_INVALID_PRIMARY_GROUP cpu_to_le32(0xC000005B)
-#define STATUS_NO_IMPERSONATION_TOKEN cpu_to_le32(0xC000005C)
-#define STATUS_CANT_DISABLE_MANDATORY cpu_to_le32(0xC000005D)
-#define STATUS_NO_LOGON_SERVERS cpu_to_le32(0xC000005E)
-#define STATUS_NO_SUCH_LOGON_SESSION cpu_to_le32(0xC000005F)
-#define STATUS_NO_SUCH_PRIVILEGE cpu_to_le32(0xC0000060)
-#define STATUS_PRIVILEGE_NOT_HELD cpu_to_le32(0xC0000061)
-#define STATUS_INVALID_ACCOUNT_NAME cpu_to_le32(0xC0000062)
-#define STATUS_USER_EXISTS cpu_to_le32(0xC0000063)
-#define STATUS_NO_SUCH_USER cpu_to_le32(0xC0000064)
-#define STATUS_GROUP_EXISTS cpu_to_le32(0xC0000065)
-#define STATUS_NO_SUCH_GROUP cpu_to_le32(0xC0000066)
-#define STATUS_MEMBER_IN_GROUP cpu_to_le32(0xC0000067)
-#define STATUS_MEMBER_NOT_IN_GROUP cpu_to_le32(0xC0000068)
-#define STATUS_LAST_ADMIN cpu_to_le32(0xC0000069)
-#define STATUS_WRONG_PASSWORD cpu_to_le32(0xC000006A)
-#define STATUS_ILL_FORMED_PASSWORD cpu_to_le32(0xC000006B)
-#define STATUS_PASSWORD_RESTRICTION cpu_to_le32(0xC000006C)
-#define STATUS_LOGON_FAILURE cpu_to_le32(0xC000006D)
-#define STATUS_ACCOUNT_RESTRICTION cpu_to_le32(0xC000006E)
-#define STATUS_INVALID_LOGON_HOURS cpu_to_le32(0xC000006F)
-#define STATUS_INVALID_WORKSTATION cpu_to_le32(0xC0000070)
-#define STATUS_PASSWORD_EXPIRED cpu_to_le32(0xC0000071)
-#define STATUS_ACCOUNT_DISABLED cpu_to_le32(0xC0000072)
-#define STATUS_NONE_MAPPED cpu_to_le32(0xC0000073)
-#define STATUS_TOO_MANY_LUIDS_REQUESTED cpu_to_le32(0xC0000074)
-#define STATUS_LUIDS_EXHAUSTED cpu_to_le32(0xC0000075)
-#define STATUS_INVALID_SUB_AUTHORITY cpu_to_le32(0xC0000076)
-#define STATUS_INVALID_ACL cpu_to_le32(0xC0000077)
-#define STATUS_INVALID_SID cpu_to_le32(0xC0000078)
-#define STATUS_INVALID_SECURITY_DESCR cpu_to_le32(0xC0000079)
-#define STATUS_PROCEDURE_NOT_FOUND cpu_to_le32(0xC000007A)
-#define STATUS_INVALID_IMAGE_FORMAT cpu_to_le32(0xC000007B)
-#define STATUS_NO_TOKEN cpu_to_le32(0xC000007C)
-#define STATUS_BAD_INHERITANCE_ACL cpu_to_le32(0xC000007D)
-#define STATUS_RANGE_NOT_LOCKED cpu_to_le32(0xC000007E)
-#define STATUS_DISK_FULL cpu_to_le32(0xC000007F)
-#define STATUS_SERVER_DISABLED cpu_to_le32(0xC0000080)
-#define STATUS_SERVER_NOT_DISABLED cpu_to_le32(0xC0000081)
-#define STATUS_TOO_MANY_GUIDS_REQUESTED cpu_to_le32(0xC0000082)
-#define STATUS_GUIDS_EXHAUSTED cpu_to_le32(0xC0000083)
-#define STATUS_INVALID_ID_AUTHORITY cpu_to_le32(0xC0000084)
-#define STATUS_AGENTS_EXHAUSTED cpu_to_le32(0xC0000085)
-#define STATUS_INVALID_VOLUME_LABEL cpu_to_le32(0xC0000086)
-#define STATUS_SECTION_NOT_EXTENDED cpu_to_le32(0xC0000087)
-#define STATUS_NOT_MAPPED_DATA cpu_to_le32(0xC0000088)
-#define STATUS_RESOURCE_DATA_NOT_FOUND cpu_to_le32(0xC0000089)
-#define STATUS_RESOURCE_TYPE_NOT_FOUND cpu_to_le32(0xC000008A)
-#define STATUS_RESOURCE_NAME_NOT_FOUND cpu_to_le32(0xC000008B)
-#define STATUS_ARRAY_BOUNDS_EXCEEDED cpu_to_le32(0xC000008C)
-#define STATUS_FLOAT_DENORMAL_OPERAND cpu_to_le32(0xC000008D)
-#define STATUS_FLOAT_DIVIDE_BY_ZERO cpu_to_le32(0xC000008E)
-#define STATUS_FLOAT_INEXACT_RESULT cpu_to_le32(0xC000008F)
-#define STATUS_FLOAT_INVALID_OPERATION cpu_to_le32(0xC0000090)
-#define STATUS_FLOAT_OVERFLOW cpu_to_le32(0xC0000091)
-#define STATUS_FLOAT_STACK_CHECK cpu_to_le32(0xC0000092)
-#define STATUS_FLOAT_UNDERFLOW cpu_to_le32(0xC0000093)
-#define STATUS_INTEGER_DIVIDE_BY_ZERO cpu_to_le32(0xC0000094)
-#define STATUS_INTEGER_OVERFLOW cpu_to_le32(0xC0000095)
-#define STATUS_PRIVILEGED_INSTRUCTION cpu_to_le32(0xC0000096)
-#define STATUS_TOO_MANY_PAGING_FILES cpu_to_le32(0xC0000097)
-#define STATUS_FILE_INVALID cpu_to_le32(0xC0000098)
-#define STATUS_ALLOTTED_SPACE_EXCEEDED cpu_to_le32(0xC0000099)
-#define STATUS_INSUFFICIENT_RESOURCES cpu_to_le32(0xC000009A)
-#define STATUS_DFS_EXIT_PATH_FOUND cpu_to_le32(0xC000009B)
-#define STATUS_DEVICE_DATA_ERROR cpu_to_le32(0xC000009C)
-#define STATUS_DEVICE_NOT_CONNECTED cpu_to_le32(0xC000009D)
-#define STATUS_DEVICE_POWER_FAILURE cpu_to_le32(0xC000009E)
-#define STATUS_FREE_VM_NOT_AT_BASE cpu_to_le32(0xC000009F)
-#define STATUS_MEMORY_NOT_ALLOCATED cpu_to_le32(0xC00000A0)
-#define STATUS_WORKING_SET_QUOTA cpu_to_le32(0xC00000A1)
-#define STATUS_MEDIA_WRITE_PROTECTED cpu_to_le32(0xC00000A2)
-#define STATUS_DEVICE_NOT_READY cpu_to_le32(0xC00000A3)
-#define STATUS_INVALID_GROUP_ATTRIBUTES cpu_to_le32(0xC00000A4)
-#define STATUS_BAD_IMPERSONATION_LEVEL cpu_to_le32(0xC00000A5)
-#define STATUS_CANT_OPEN_ANONYMOUS cpu_to_le32(0xC00000A6)
-#define STATUS_BAD_VALIDATION_CLASS cpu_to_le32(0xC00000A7)
-#define STATUS_BAD_TOKEN_TYPE cpu_to_le32(0xC00000A8)
-#define STATUS_BAD_MASTER_BOOT_RECORD cpu_to_le32(0xC00000A9)
-#define STATUS_INSTRUCTION_MISALIGNMENT cpu_to_le32(0xC00000AA)
-#define STATUS_INSTANCE_NOT_AVAILABLE cpu_to_le32(0xC00000AB)
-#define STATUS_PIPE_NOT_AVAILABLE cpu_to_le32(0xC00000AC)
-#define STATUS_INVALID_PIPE_STATE cpu_to_le32(0xC00000AD)
-#define STATUS_PIPE_BUSY cpu_to_le32(0xC00000AE)
-#define STATUS_ILLEGAL_FUNCTION cpu_to_le32(0xC00000AF)
-#define STATUS_PIPE_DISCONNECTED cpu_to_le32(0xC00000B0)
-#define STATUS_PIPE_CLOSING cpu_to_le32(0xC00000B1)
-#define STATUS_PIPE_CONNECTED cpu_to_le32(0xC00000B2)
-#define STATUS_PIPE_LISTENING cpu_to_le32(0xC00000B3)
-#define STATUS_INVALID_READ_MODE cpu_to_le32(0xC00000B4)
-#define STATUS_IO_TIMEOUT cpu_to_le32(0xC00000B5)
-#define STATUS_FILE_FORCED_CLOSED cpu_to_le32(0xC00000B6)
-#define STATUS_PROFILING_NOT_STARTED cpu_to_le32(0xC00000B7)
-#define STATUS_PROFILING_NOT_STOPPED cpu_to_le32(0xC00000B8)
-#define STATUS_COULD_NOT_INTERPRET cpu_to_le32(0xC00000B9)
-#define STATUS_FILE_IS_A_DIRECTORY cpu_to_le32(0xC00000BA)
-#define STATUS_NOT_SUPPORTED cpu_to_le32(0xC00000BB)
-#define STATUS_REMOTE_NOT_LISTENING cpu_to_le32(0xC00000BC)
-#define STATUS_DUPLICATE_NAME cpu_to_le32(0xC00000BD)
-#define STATUS_BAD_NETWORK_PATH cpu_to_le32(0xC00000BE)
-#define STATUS_NETWORK_BUSY cpu_to_le32(0xC00000BF)
-#define STATUS_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC00000C0)
-#define STATUS_TOO_MANY_COMMANDS cpu_to_le32(0xC00000C1)
-#define STATUS_ADAPTER_HARDWARE_ERROR cpu_to_le32(0xC00000C2)
-#define STATUS_INVALID_NETWORK_RESPONSE cpu_to_le32(0xC00000C3)
-#define STATUS_UNEXPECTED_NETWORK_ERROR cpu_to_le32(0xC00000C4)
-#define STATUS_BAD_REMOTE_ADAPTER cpu_to_le32(0xC00000C5)
-#define STATUS_PRINT_QUEUE_FULL cpu_to_le32(0xC00000C6)
-#define STATUS_NO_SPOOL_SPACE cpu_to_le32(0xC00000C7)
-#define STATUS_PRINT_CANCELLED cpu_to_le32(0xC00000C8)
-#define STATUS_NETWORK_NAME_DELETED cpu_to_le32(0xC00000C9)
-#define STATUS_NETWORK_ACCESS_DENIED cpu_to_le32(0xC00000CA)
-#define STATUS_BAD_DEVICE_TYPE cpu_to_le32(0xC00000CB)
-#define STATUS_BAD_NETWORK_NAME cpu_to_le32(0xC00000CC)
-#define STATUS_TOO_MANY_NAMES cpu_to_le32(0xC00000CD)
-#define STATUS_TOO_MANY_SESSIONS cpu_to_le32(0xC00000CE)
-#define STATUS_SHARING_PAUSED cpu_to_le32(0xC00000CF)
-#define STATUS_REQUEST_NOT_ACCEPTED cpu_to_le32(0xC00000D0)
-#define STATUS_REDIRECTOR_PAUSED cpu_to_le32(0xC00000D1)
-#define STATUS_NET_WRITE_FAULT cpu_to_le32(0xC00000D2)
-#define STATUS_PROFILING_AT_LIMIT cpu_to_le32(0xC00000D3)
-#define STATUS_NOT_SAME_DEVICE cpu_to_le32(0xC00000D4)
-#define STATUS_FILE_RENAMED cpu_to_le32(0xC00000D5)
-#define STATUS_VIRTUAL_CIRCUIT_CLOSED cpu_to_le32(0xC00000D6)
-#define STATUS_NO_SECURITY_ON_OBJECT cpu_to_le32(0xC00000D7)
-#define STATUS_CANT_WAIT cpu_to_le32(0xC00000D8)
-#define STATUS_PIPE_EMPTY cpu_to_le32(0xC00000D9)
-#define STATUS_CANT_ACCESS_DOMAIN_INFO cpu_to_le32(0xC00000DA)
-#define STATUS_CANT_TERMINATE_SELF cpu_to_le32(0xC00000DB)
-#define STATUS_INVALID_SERVER_STATE cpu_to_le32(0xC00000DC)
-#define STATUS_INVALID_DOMAIN_STATE cpu_to_le32(0xC00000DD)
-#define STATUS_INVALID_DOMAIN_ROLE cpu_to_le32(0xC00000DE)
-#define STATUS_NO_SUCH_DOMAIN cpu_to_le32(0xC00000DF)
-#define STATUS_DOMAIN_EXISTS cpu_to_le32(0xC00000E0)
-#define STATUS_DOMAIN_LIMIT_EXCEEDED cpu_to_le32(0xC00000E1)
-#define STATUS_OPLOCK_NOT_GRANTED cpu_to_le32(0xC00000E2)
-#define STATUS_INVALID_OPLOCK_PROTOCOL cpu_to_le32(0xC00000E3)
-#define STATUS_INTERNAL_DB_CORRUPTION cpu_to_le32(0xC00000E4)
-#define STATUS_INTERNAL_ERROR cpu_to_le32(0xC00000E5)
-#define STATUS_GENERIC_NOT_MAPPED cpu_to_le32(0xC00000E6)
-#define STATUS_BAD_DESCRIPTOR_FORMAT cpu_to_le32(0xC00000E7)
-#define STATUS_INVALID_USER_BUFFER cpu_to_le32(0xC00000E8)
-#define STATUS_UNEXPECTED_IO_ERROR cpu_to_le32(0xC00000E9)
-#define STATUS_UNEXPECTED_MM_CREATE_ERR cpu_to_le32(0xC00000EA)
-#define STATUS_UNEXPECTED_MM_MAP_ERROR cpu_to_le32(0xC00000EB)
-#define STATUS_UNEXPECTED_MM_EXTEND_ERR cpu_to_le32(0xC00000EC)
-#define STATUS_NOT_LOGON_PROCESS cpu_to_le32(0xC00000ED)
-#define STATUS_LOGON_SESSION_EXISTS cpu_to_le32(0xC00000EE)
-#define STATUS_INVALID_PARAMETER_1 cpu_to_le32(0xC00000EF)
-#define STATUS_INVALID_PARAMETER_2 cpu_to_le32(0xC00000F0)
-#define STATUS_INVALID_PARAMETER_3 cpu_to_le32(0xC00000F1)
-#define STATUS_INVALID_PARAMETER_4 cpu_to_le32(0xC00000F2)
-#define STATUS_INVALID_PARAMETER_5 cpu_to_le32(0xC00000F3)
-#define STATUS_INVALID_PARAMETER_6 cpu_to_le32(0xC00000F4)
-#define STATUS_INVALID_PARAMETER_7 cpu_to_le32(0xC00000F5)
-#define STATUS_INVALID_PARAMETER_8 cpu_to_le32(0xC00000F6)
-#define STATUS_INVALID_PARAMETER_9 cpu_to_le32(0xC00000F7)
-#define STATUS_INVALID_PARAMETER_10 cpu_to_le32(0xC00000F8)
-#define STATUS_INVALID_PARAMETER_11 cpu_to_le32(0xC00000F9)
-#define STATUS_INVALID_PARAMETER_12 cpu_to_le32(0xC00000FA)
-#define STATUS_REDIRECTOR_NOT_STARTED cpu_to_le32(0xC00000FB)
-#define STATUS_REDIRECTOR_STARTED cpu_to_le32(0xC00000FC)
-#define STATUS_STACK_OVERFLOW cpu_to_le32(0xC00000FD)
-#define STATUS_NO_SUCH_PACKAGE cpu_to_le32(0xC00000FE)
-#define STATUS_BAD_FUNCTION_TABLE cpu_to_le32(0xC00000FF)
-#define STATUS_VARIABLE_NOT_FOUND cpu_to_le32(0xC0000100)
-#define STATUS_DIRECTORY_NOT_EMPTY cpu_to_le32(0xC0000101)
-#define STATUS_FILE_CORRUPT_ERROR cpu_to_le32(0xC0000102)
-#define STATUS_NOT_A_DIRECTORY cpu_to_le32(0xC0000103)
-#define STATUS_BAD_LOGON_SESSION_STATE cpu_to_le32(0xC0000104)
-#define STATUS_LOGON_SESSION_COLLISION cpu_to_le32(0xC0000105)
-#define STATUS_NAME_TOO_LONG cpu_to_le32(0xC0000106)
-#define STATUS_FILES_OPEN cpu_to_le32(0xC0000107)
-#define STATUS_CONNECTION_IN_USE cpu_to_le32(0xC0000108)
-#define STATUS_MESSAGE_NOT_FOUND cpu_to_le32(0xC0000109)
-#define STATUS_PROCESS_IS_TERMINATING cpu_to_le32(0xC000010A)
-#define STATUS_INVALID_LOGON_TYPE cpu_to_le32(0xC000010B)
-#define STATUS_NO_GUID_TRANSLATION cpu_to_le32(0xC000010C)
-#define STATUS_CANNOT_IMPERSONATE cpu_to_le32(0xC000010D)
-#define STATUS_IMAGE_ALREADY_LOADED cpu_to_le32(0xC000010E)
-#define STATUS_ABIOS_NOT_PRESENT cpu_to_le32(0xC000010F)
-#define STATUS_ABIOS_LID_NOT_EXIST cpu_to_le32(0xC0000110)
-#define STATUS_ABIOS_LID_ALREADY_OWNED cpu_to_le32(0xC0000111)
-#define STATUS_ABIOS_NOT_LID_OWNER cpu_to_le32(0xC0000112)
-#define STATUS_ABIOS_INVALID_COMMAND cpu_to_le32(0xC0000113)
-#define STATUS_ABIOS_INVALID_LID cpu_to_le32(0xC0000114)
-#define STATUS_ABIOS_SELECTOR_NOT_AVAILABLE cpu_to_le32(0xC0000115)
-#define STATUS_ABIOS_INVALID_SELECTOR cpu_to_le32(0xC0000116)
-#define STATUS_NO_LDT cpu_to_le32(0xC0000117)
-#define STATUS_INVALID_LDT_SIZE cpu_to_le32(0xC0000118)
-#define STATUS_INVALID_LDT_OFFSET cpu_to_le32(0xC0000119)
-#define STATUS_INVALID_LDT_DESCRIPTOR cpu_to_le32(0xC000011A)
-#define STATUS_INVALID_IMAGE_NE_FORMAT cpu_to_le32(0xC000011B)
-#define STATUS_RXACT_INVALID_STATE cpu_to_le32(0xC000011C)
-#define STATUS_RXACT_COMMIT_FAILURE cpu_to_le32(0xC000011D)
-#define STATUS_MAPPED_FILE_SIZE_ZERO cpu_to_le32(0xC000011E)
-#define STATUS_TOO_MANY_OPENED_FILES cpu_to_le32(0xC000011F)
-#define STATUS_CANCELLED cpu_to_le32(0xC0000120)
-#define STATUS_CANNOT_DELETE cpu_to_le32(0xC0000121)
-#define STATUS_INVALID_COMPUTER_NAME cpu_to_le32(0xC0000122)
-#define STATUS_FILE_DELETED cpu_to_le32(0xC0000123)
-#define STATUS_SPECIAL_ACCOUNT cpu_to_le32(0xC0000124)
-#define STATUS_SPECIAL_GROUP cpu_to_le32(0xC0000125)
-#define STATUS_SPECIAL_USER cpu_to_le32(0xC0000126)
-#define STATUS_MEMBERS_PRIMARY_GROUP cpu_to_le32(0xC0000127)
-#define STATUS_FILE_CLOSED cpu_to_le32(0xC0000128)
-#define STATUS_TOO_MANY_THREADS cpu_to_le32(0xC0000129)
-#define STATUS_THREAD_NOT_IN_PROCESS cpu_to_le32(0xC000012A)
-#define STATUS_TOKEN_ALREADY_IN_USE cpu_to_le32(0xC000012B)
-#define STATUS_PAGEFILE_QUOTA_EXCEEDED cpu_to_le32(0xC000012C)
-#define STATUS_COMMITMENT_LIMIT cpu_to_le32(0xC000012D)
-#define STATUS_INVALID_IMAGE_LE_FORMAT cpu_to_le32(0xC000012E)
-#define STATUS_INVALID_IMAGE_NOT_MZ cpu_to_le32(0xC000012F)
-#define STATUS_INVALID_IMAGE_PROTECT cpu_to_le32(0xC0000130)
-#define STATUS_INVALID_IMAGE_WIN_16 cpu_to_le32(0xC0000131)
-#define STATUS_LOGON_SERVER_CONFLICT cpu_to_le32(0xC0000132)
-#define STATUS_TIME_DIFFERENCE_AT_DC cpu_to_le32(0xC0000133)
-#define STATUS_SYNCHRONIZATION_REQUIRED cpu_to_le32(0xC0000134)
-#define STATUS_DLL_NOT_FOUND cpu_to_le32(0xC0000135)
-#define STATUS_OPEN_FAILED cpu_to_le32(0xC0000136)
-#define STATUS_IO_PRIVILEGE_FAILED cpu_to_le32(0xC0000137)
-#define STATUS_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000138)
-#define STATUS_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000139)
-#define STATUS_CONTROL_C_EXIT cpu_to_le32(0xC000013A)
-#define STATUS_LOCAL_DISCONNECT cpu_to_le32(0xC000013B)
-#define STATUS_REMOTE_DISCONNECT cpu_to_le32(0xC000013C)
-#define STATUS_REMOTE_RESOURCES cpu_to_le32(0xC000013D)
-#define STATUS_LINK_FAILED cpu_to_le32(0xC000013E)
-#define STATUS_LINK_TIMEOUT cpu_to_le32(0xC000013F)
-#define STATUS_INVALID_CONNECTION cpu_to_le32(0xC0000140)
-#define STATUS_INVALID_ADDRESS cpu_to_le32(0xC0000141)
-#define STATUS_DLL_INIT_FAILED cpu_to_le32(0xC0000142)
-#define STATUS_MISSING_SYSTEMFILE cpu_to_le32(0xC0000143)
-#define STATUS_UNHANDLED_EXCEPTION cpu_to_le32(0xC0000144)
-#define STATUS_APP_INIT_FAILURE cpu_to_le32(0xC0000145)
-#define STATUS_PAGEFILE_CREATE_FAILED cpu_to_le32(0xC0000146)
-#define STATUS_NO_PAGEFILE cpu_to_le32(0xC0000147)
-#define STATUS_INVALID_LEVEL cpu_to_le32(0xC0000148)
-#define STATUS_WRONG_PASSWORD_CORE cpu_to_le32(0xC0000149)
-#define STATUS_ILLEGAL_FLOAT_CONTEXT cpu_to_le32(0xC000014A)
-#define STATUS_PIPE_BROKEN cpu_to_le32(0xC000014B)
-#define STATUS_REGISTRY_CORRUPT cpu_to_le32(0xC000014C)
-#define STATUS_REGISTRY_IO_FAILED cpu_to_le32(0xC000014D)
-#define STATUS_NO_EVENT_PAIR cpu_to_le32(0xC000014E)
-#define STATUS_UNRECOGNIZED_VOLUME cpu_to_le32(0xC000014F)
-#define STATUS_SERIAL_NO_DEVICE_INITED cpu_to_le32(0xC0000150)
-#define STATUS_NO_SUCH_ALIAS cpu_to_le32(0xC0000151)
-#define STATUS_MEMBER_NOT_IN_ALIAS cpu_to_le32(0xC0000152)
-#define STATUS_MEMBER_IN_ALIAS cpu_to_le32(0xC0000153)
-#define STATUS_ALIAS_EXISTS cpu_to_le32(0xC0000154)
-#define STATUS_LOGON_NOT_GRANTED cpu_to_le32(0xC0000155)
-#define STATUS_TOO_MANY_SECRETS cpu_to_le32(0xC0000156)
-#define STATUS_SECRET_TOO_LONG cpu_to_le32(0xC0000157)
-#define STATUS_INTERNAL_DB_ERROR cpu_to_le32(0xC0000158)
-#define STATUS_FULLSCREEN_MODE cpu_to_le32(0xC0000159)
-#define STATUS_TOO_MANY_CONTEXT_IDS cpu_to_le32(0xC000015A)
-#define STATUS_LOGON_TYPE_NOT_GRANTED cpu_to_le32(0xC000015B)
-#define STATUS_NOT_REGISTRY_FILE cpu_to_le32(0xC000015C)
-#define STATUS_NT_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000015D)
-#define STATUS_DOMAIN_CTRLR_CONFIG_ERROR cpu_to_le32(0xC000015E)
-#define STATUS_FT_MISSING_MEMBER cpu_to_le32(0xC000015F)
-#define STATUS_ILL_FORMED_SERVICE_ENTRY cpu_to_le32(0xC0000160)
-#define STATUS_ILLEGAL_CHARACTER cpu_to_le32(0xC0000161)
-#define STATUS_UNMAPPABLE_CHARACTER cpu_to_le32(0xC0000162)
-#define STATUS_UNDEFINED_CHARACTER cpu_to_le32(0xC0000163)
-#define STATUS_FLOPPY_VOLUME cpu_to_le32(0xC0000164)
-#define STATUS_FLOPPY_ID_MARK_NOT_FOUND cpu_to_le32(0xC0000165)
-#define STATUS_FLOPPY_WRONG_CYLINDER cpu_to_le32(0xC0000166)
-#define STATUS_FLOPPY_UNKNOWN_ERROR cpu_to_le32(0xC0000167)
-#define STATUS_FLOPPY_BAD_REGISTERS cpu_to_le32(0xC0000168)
-#define STATUS_DISK_RECALIBRATE_FAILED cpu_to_le32(0xC0000169)
-#define STATUS_DISK_OPERATION_FAILED cpu_to_le32(0xC000016A)
-#define STATUS_DISK_RESET_FAILED cpu_to_le32(0xC000016B)
-#define STATUS_SHARED_IRQ_BUSY cpu_to_le32(0xC000016C)
-#define STATUS_FT_ORPHANING cpu_to_le32(0xC000016D)
-#define STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT cpu_to_le32(0xC000016E)
-#define STATUS_PARTITION_FAILURE cpu_to_le32(0xC0000172)
-#define STATUS_INVALID_BLOCK_LENGTH cpu_to_le32(0xC0000173)
-#define STATUS_DEVICE_NOT_PARTITIONED cpu_to_le32(0xC0000174)
-#define STATUS_UNABLE_TO_LOCK_MEDIA cpu_to_le32(0xC0000175)
-#define STATUS_UNABLE_TO_UNLOAD_MEDIA cpu_to_le32(0xC0000176)
-#define STATUS_EOM_OVERFLOW cpu_to_le32(0xC0000177)
-#define STATUS_NO_MEDIA cpu_to_le32(0xC0000178)
-#define STATUS_NO_SUCH_MEMBER cpu_to_le32(0xC000017A)
-#define STATUS_INVALID_MEMBER cpu_to_le32(0xC000017B)
-#define STATUS_KEY_DELETED cpu_to_le32(0xC000017C)
-#define STATUS_NO_LOG_SPACE cpu_to_le32(0xC000017D)
-#define STATUS_TOO_MANY_SIDS cpu_to_le32(0xC000017E)
-#define STATUS_LM_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000017F)
-#define STATUS_KEY_HAS_CHILDREN cpu_to_le32(0xC0000180)
-#define STATUS_CHILD_MUST_BE_VOLATILE cpu_to_le32(0xC0000181)
-#define STATUS_DEVICE_CONFIGURATION_ERROR cpu_to_le32(0xC0000182)
-#define STATUS_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC0000183)
-#define STATUS_INVALID_DEVICE_STATE cpu_to_le32(0xC0000184)
-#define STATUS_IO_DEVICE_ERROR cpu_to_le32(0xC0000185)
-#define STATUS_DEVICE_PROTOCOL_ERROR cpu_to_le32(0xC0000186)
-#define STATUS_BACKUP_CONTROLLER cpu_to_le32(0xC0000187)
-#define STATUS_LOG_FILE_FULL cpu_to_le32(0xC0000188)
-#define STATUS_TOO_LATE cpu_to_le32(0xC0000189)
-#define STATUS_NO_TRUST_LSA_SECRET cpu_to_le32(0xC000018A)
-#define STATUS_NO_TRUST_SAM_ACCOUNT cpu_to_le32(0xC000018B)
-#define STATUS_TRUSTED_DOMAIN_FAILURE cpu_to_le32(0xC000018C)
-#define STATUS_TRUSTED_RELATIONSHIP_FAILURE cpu_to_le32(0xC000018D)
-#define STATUS_EVENTLOG_FILE_CORRUPT cpu_to_le32(0xC000018E)
-#define STATUS_EVENTLOG_CANT_START cpu_to_le32(0xC000018F)
-#define STATUS_TRUST_FAILURE cpu_to_le32(0xC0000190)
-#define STATUS_MUTANT_LIMIT_EXCEEDED cpu_to_le32(0xC0000191)
-#define STATUS_NETLOGON_NOT_STARTED cpu_to_le32(0xC0000192)
-#define STATUS_ACCOUNT_EXPIRED cpu_to_le32(0xC0000193)
-#define STATUS_POSSIBLE_DEADLOCK cpu_to_le32(0xC0000194)
-#define STATUS_NETWORK_CREDENTIAL_CONFLICT cpu_to_le32(0xC0000195)
-#define STATUS_REMOTE_SESSION_LIMIT cpu_to_le32(0xC0000196)
-#define STATUS_EVENTLOG_FILE_CHANGED cpu_to_le32(0xC0000197)
-#define STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT cpu_to_le32(0xC0000198)
-#define STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT cpu_to_le32(0xC0000199)
-#define STATUS_NOLOGON_SERVER_TRUST_ACCOUNT cpu_to_le32(0xC000019A)
-#define STATUS_DOMAIN_TRUST_INCONSISTENT cpu_to_le32(0xC000019B)
-#define STATUS_FS_DRIVER_REQUIRED cpu_to_le32(0xC000019C)
-#define STATUS_IMAGE_ALREADY_LOADED_AS_DLL cpu_to_le32(0xC000019D)
-#define STATUS_NETWORK_OPEN_RESTRICTION cpu_to_le32(0xC0000201)
-#define STATUS_NO_USER_SESSION_KEY cpu_to_le32(0xC0000202)
-#define STATUS_USER_SESSION_DELETED cpu_to_le32(0xC0000203)
-#define STATUS_RESOURCE_LANG_NOT_FOUND cpu_to_le32(0xC0000204)
-#define STATUS_INSUFF_SERVER_RESOURCES cpu_to_le32(0xC0000205)
-#define STATUS_INVALID_BUFFER_SIZE cpu_to_le32(0xC0000206)
-#define STATUS_INVALID_ADDRESS_COMPONENT cpu_to_le32(0xC0000207)
-#define STATUS_INVALID_ADDRESS_WILDCARD cpu_to_le32(0xC0000208)
-#define STATUS_TOO_MANY_ADDRESSES cpu_to_le32(0xC0000209)
-#define STATUS_ADDRESS_ALREADY_EXISTS cpu_to_le32(0xC000020A)
-#define STATUS_ADDRESS_CLOSED cpu_to_le32(0xC000020B)
-#define STATUS_CONNECTION_DISCONNECTED cpu_to_le32(0xC000020C)
-#define STATUS_CONNECTION_RESET cpu_to_le32(0xC000020D)
-#define STATUS_TOO_MANY_NODES cpu_to_le32(0xC000020E)
-#define STATUS_TRANSACTION_ABORTED cpu_to_le32(0xC000020F)
-#define STATUS_TRANSACTION_TIMED_OUT cpu_to_le32(0xC0000210)
-#define STATUS_TRANSACTION_NO_RELEASE cpu_to_le32(0xC0000211)
-#define STATUS_TRANSACTION_NO_MATCH cpu_to_le32(0xC0000212)
-#define STATUS_TRANSACTION_RESPONDED cpu_to_le32(0xC0000213)
-#define STATUS_TRANSACTION_INVALID_ID cpu_to_le32(0xC0000214)
-#define STATUS_TRANSACTION_INVALID_TYPE cpu_to_le32(0xC0000215)
-#define STATUS_NOT_SERVER_SESSION cpu_to_le32(0xC0000216)
-#define STATUS_NOT_CLIENT_SESSION cpu_to_le32(0xC0000217)
-#define STATUS_CANNOT_LOAD_REGISTRY_FILE cpu_to_le32(0xC0000218)
-#define STATUS_DEBUG_ATTACH_FAILED cpu_to_le32(0xC0000219)
-#define STATUS_SYSTEM_PROCESS_TERMINATED cpu_to_le32(0xC000021A)
-#define STATUS_DATA_NOT_ACCEPTED cpu_to_le32(0xC000021B)
-#define STATUS_NO_BROWSER_SERVERS_FOUND cpu_to_le32(0xC000021C)
-#define STATUS_VDM_HARD_ERROR cpu_to_le32(0xC000021D)
-#define STATUS_DRIVER_CANCEL_TIMEOUT cpu_to_le32(0xC000021E)
-#define STATUS_REPLY_MESSAGE_MISMATCH cpu_to_le32(0xC000021F)
-#define STATUS_MAPPED_ALIGNMENT cpu_to_le32(0xC0000220)
-#define STATUS_IMAGE_CHECKSUM_MISMATCH cpu_to_le32(0xC0000221)
-#define STATUS_LOST_WRITEBEHIND_DATA cpu_to_le32(0xC0000222)
-#define STATUS_CLIENT_SERVER_PARAMETERS_INVALID cpu_to_le32(0xC0000223)
-#define STATUS_PASSWORD_MUST_CHANGE cpu_to_le32(0xC0000224)
-#define STATUS_NOT_FOUND cpu_to_le32(0xC0000225)
-#define STATUS_NOT_TINY_STREAM cpu_to_le32(0xC0000226)
-#define STATUS_RECOVERY_FAILURE cpu_to_le32(0xC0000227)
-#define STATUS_STACK_OVERFLOW_READ cpu_to_le32(0xC0000228)
-#define STATUS_FAIL_CHECK cpu_to_le32(0xC0000229)
-#define STATUS_DUPLICATE_OBJECTID cpu_to_le32(0xC000022A)
-#define STATUS_OBJECTID_EXISTS cpu_to_le32(0xC000022B)
-#define STATUS_CONVERT_TO_LARGE cpu_to_le32(0xC000022C)
-#define STATUS_RETRY cpu_to_le32(0xC000022D)
-#define STATUS_FOUND_OUT_OF_SCOPE cpu_to_le32(0xC000022E)
-#define STATUS_ALLOCATE_BUCKET cpu_to_le32(0xC000022F)
-#define STATUS_PROPSET_NOT_FOUND cpu_to_le32(0xC0000230)
-#define STATUS_MARSHALL_OVERFLOW cpu_to_le32(0xC0000231)
-#define STATUS_INVALID_VARIANT cpu_to_le32(0xC0000232)
-#define STATUS_DOMAIN_CONTROLLER_NOT_FOUND cpu_to_le32(0xC0000233)
-#define STATUS_ACCOUNT_LOCKED_OUT cpu_to_le32(0xC0000234)
-#define STATUS_HANDLE_NOT_CLOSABLE cpu_to_le32(0xC0000235)
-#define STATUS_CONNECTION_REFUSED cpu_to_le32(0xC0000236)
-#define STATUS_GRACEFUL_DISCONNECT cpu_to_le32(0xC0000237)
-#define STATUS_ADDRESS_ALREADY_ASSOCIATED cpu_to_le32(0xC0000238)
-#define STATUS_ADDRESS_NOT_ASSOCIATED cpu_to_le32(0xC0000239)
-#define STATUS_CONNECTION_INVALID cpu_to_le32(0xC000023A)
-#define STATUS_CONNECTION_ACTIVE cpu_to_le32(0xC000023B)
-#define STATUS_NETWORK_UNREACHABLE cpu_to_le32(0xC000023C)
-#define STATUS_HOST_UNREACHABLE cpu_to_le32(0xC000023D)
-#define STATUS_PROTOCOL_UNREACHABLE cpu_to_le32(0xC000023E)
-#define STATUS_PORT_UNREACHABLE cpu_to_le32(0xC000023F)
-#define STATUS_REQUEST_ABORTED cpu_to_le32(0xC0000240)
-#define STATUS_CONNECTION_ABORTED cpu_to_le32(0xC0000241)
-#define STATUS_BAD_COMPRESSION_BUFFER cpu_to_le32(0xC0000242)
-#define STATUS_USER_MAPPED_FILE cpu_to_le32(0xC0000243)
-#define STATUS_AUDIT_FAILED cpu_to_le32(0xC0000244)
-#define STATUS_TIMER_RESOLUTION_NOT_SET cpu_to_le32(0xC0000245)
-#define STATUS_CONNECTION_COUNT_LIMIT cpu_to_le32(0xC0000246)
-#define STATUS_LOGIN_TIME_RESTRICTION cpu_to_le32(0xC0000247)
-#define STATUS_LOGIN_WKSTA_RESTRICTION cpu_to_le32(0xC0000248)
-#define STATUS_IMAGE_MP_UP_MISMATCH cpu_to_le32(0xC0000249)
-#define STATUS_INSUFFICIENT_LOGON_INFO cpu_to_le32(0xC0000250)
-#define STATUS_BAD_DLL_ENTRYPOINT cpu_to_le32(0xC0000251)
-#define STATUS_BAD_SERVICE_ENTRYPOINT cpu_to_le32(0xC0000252)
-#define STATUS_LPC_REPLY_LOST cpu_to_le32(0xC0000253)
-#define STATUS_IP_ADDRESS_CONFLICT1 cpu_to_le32(0xC0000254)
-#define STATUS_IP_ADDRESS_CONFLICT2 cpu_to_le32(0xC0000255)
-#define STATUS_REGISTRY_QUOTA_LIMIT cpu_to_le32(0xC0000256)
-#define STATUS_PATH_NOT_COVERED cpu_to_le32(0xC0000257)
-#define STATUS_NO_CALLBACK_ACTIVE cpu_to_le32(0xC0000258)
-#define STATUS_LICENSE_QUOTA_EXCEEDED cpu_to_le32(0xC0000259)
-#define STATUS_PWD_TOO_SHORT cpu_to_le32(0xC000025A)
-#define STATUS_PWD_TOO_RECENT cpu_to_le32(0xC000025B)
-#define STATUS_PWD_HISTORY_CONFLICT cpu_to_le32(0xC000025C)
-#define STATUS_PLUGPLAY_NO_DEVICE cpu_to_le32(0xC000025E)
-#define STATUS_UNSUPPORTED_COMPRESSION cpu_to_le32(0xC000025F)
-#define STATUS_INVALID_HW_PROFILE cpu_to_le32(0xC0000260)
-#define STATUS_INVALID_PLUGPLAY_DEVICE_PATH cpu_to_le32(0xC0000261)
-#define STATUS_DRIVER_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000262)
-#define STATUS_DRIVER_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000263)
-#define STATUS_RESOURCE_NOT_OWNED cpu_to_le32(0xC0000264)
-#define STATUS_TOO_MANY_LINKS cpu_to_le32(0xC0000265)
-#define STATUS_QUOTA_LIST_INCONSISTENT cpu_to_le32(0xC0000266)
-#define STATUS_FILE_IS_OFFLINE cpu_to_le32(0xC0000267)
-#define STATUS_EVALUATION_EXPIRATION cpu_to_le32(0xC0000268)
-#define STATUS_ILLEGAL_DLL_RELOCATION cpu_to_le32(0xC0000269)
-#define STATUS_LICENSE_VIOLATION cpu_to_le32(0xC000026A)
-#define STATUS_DLL_INIT_FAILED_LOGOFF cpu_to_le32(0xC000026B)
-#define STATUS_DRIVER_UNABLE_TO_LOAD cpu_to_le32(0xC000026C)
-#define STATUS_DFS_UNAVAILABLE cpu_to_le32(0xC000026D)
-#define STATUS_VOLUME_DISMOUNTED cpu_to_le32(0xC000026E)
-#define STATUS_WX86_INTERNAL_ERROR cpu_to_le32(0xC000026F)
-#define STATUS_WX86_FLOAT_STACK_CHECK cpu_to_le32(0xC0000270)
-#define STATUS_VALIDATE_CONTINUE cpu_to_le32(0xC0000271)
-#define STATUS_NO_MATCH cpu_to_le32(0xC0000272)
-#define STATUS_NO_MORE_MATCHES cpu_to_le32(0xC0000273)
-#define STATUS_NOT_A_REPARSE_POINT cpu_to_le32(0xC0000275)
-#define STATUS_IO_REPARSE_TAG_INVALID cpu_to_le32(0xC0000276)
-#define STATUS_IO_REPARSE_TAG_MISMATCH cpu_to_le32(0xC0000277)
-#define STATUS_IO_REPARSE_DATA_INVALID cpu_to_le32(0xC0000278)
-#define STATUS_IO_REPARSE_TAG_NOT_HANDLED cpu_to_le32(0xC0000279)
-#define STATUS_REPARSE_POINT_NOT_RESOLVED cpu_to_le32(0xC0000280)
-#define STATUS_DIRECTORY_IS_A_REPARSE_POINT cpu_to_le32(0xC0000281)
-#define STATUS_RANGE_LIST_CONFLICT cpu_to_le32(0xC0000282)
-#define STATUS_SOURCE_ELEMENT_EMPTY cpu_to_le32(0xC0000283)
-#define STATUS_DESTINATION_ELEMENT_FULL cpu_to_le32(0xC0000284)
-#define STATUS_ILLEGAL_ELEMENT_ADDRESS cpu_to_le32(0xC0000285)
-#define STATUS_MAGAZINE_NOT_PRESENT cpu_to_le32(0xC0000286)
-#define STATUS_REINITIALIZATION_NEEDED cpu_to_le32(0xC0000287)
-#define STATUS_ENCRYPTION_FAILED cpu_to_le32(0xC000028A)
-#define STATUS_DECRYPTION_FAILED cpu_to_le32(0xC000028B)
-#define STATUS_RANGE_NOT_FOUND cpu_to_le32(0xC000028C)
-#define STATUS_NO_RECOVERY_POLICY cpu_to_le32(0xC000028D)
-#define STATUS_NO_EFS cpu_to_le32(0xC000028E)
-#define STATUS_WRONG_EFS cpu_to_le32(0xC000028F)
-#define STATUS_NO_USER_KEYS cpu_to_le32(0xC0000290)
-#define STATUS_FILE_NOT_ENCRYPTED cpu_to_le32(0xC0000291)
-#define STATUS_NOT_EXPORT_FORMAT cpu_to_le32(0xC0000292)
-#define STATUS_FILE_ENCRYPTED cpu_to_le32(0xC0000293)
-#define STATUS_WMI_GUID_NOT_FOUND cpu_to_le32(0xC0000295)
-#define STATUS_WMI_INSTANCE_NOT_FOUND cpu_to_le32(0xC0000296)
-#define STATUS_WMI_ITEMID_NOT_FOUND cpu_to_le32(0xC0000297)
-#define STATUS_WMI_TRY_AGAIN cpu_to_le32(0xC0000298)
-#define STATUS_SHARED_POLICY cpu_to_le32(0xC0000299)
-#define STATUS_POLICY_OBJECT_NOT_FOUND cpu_to_le32(0xC000029A)
-#define STATUS_POLICY_ONLY_IN_DS cpu_to_le32(0xC000029B)
-#define STATUS_VOLUME_NOT_UPGRADED cpu_to_le32(0xC000029C)
-#define STATUS_REMOTE_STORAGE_NOT_ACTIVE cpu_to_le32(0xC000029D)
-#define STATUS_REMOTE_STORAGE_MEDIA_ERROR cpu_to_le32(0xC000029E)
-#define STATUS_NO_TRACKING_SERVICE cpu_to_le32(0xC000029F)
-#define STATUS_SERVER_SID_MISMATCH cpu_to_le32(0xC00002A0)
-#define STATUS_DS_NO_ATTRIBUTE_OR_VALUE cpu_to_le32(0xC00002A1)
-#define STATUS_DS_INVALID_ATTRIBUTE_SYNTAX cpu_to_le32(0xC00002A2)
-#define STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED cpu_to_le32(0xC00002A3)
-#define STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS cpu_to_le32(0xC00002A4)
-#define STATUS_DS_BUSY cpu_to_le32(0xC00002A5)
-#define STATUS_DS_UNAVAILABLE cpu_to_le32(0xC00002A6)
-#define STATUS_DS_NO_RIDS_ALLOCATED cpu_to_le32(0xC00002A7)
-#define STATUS_DS_NO_MORE_RIDS cpu_to_le32(0xC00002A8)
-#define STATUS_DS_INCORRECT_ROLE_OWNER cpu_to_le32(0xC00002A9)
-#define STATUS_DS_RIDMGR_INIT_ERROR cpu_to_le32(0xC00002AA)
-#define STATUS_DS_OBJ_CLASS_VIOLATION cpu_to_le32(0xC00002AB)
-#define STATUS_DS_CANT_ON_NON_LEAF cpu_to_le32(0xC00002AC)
-#define STATUS_DS_CANT_ON_RDN cpu_to_le32(0xC00002AD)
-#define STATUS_DS_CANT_MOD_OBJ_CLASS cpu_to_le32(0xC00002AE)
-#define STATUS_DS_CROSS_DOM_MOVE_FAILED cpu_to_le32(0xC00002AF)
-#define STATUS_DS_GC_NOT_AVAILABLE cpu_to_le32(0xC00002B0)
-#define STATUS_DIRECTORY_SERVICE_REQUIRED cpu_to_le32(0xC00002B1)
-#define STATUS_REPARSE_ATTRIBUTE_CONFLICT cpu_to_le32(0xC00002B2)
-#define STATUS_CANT_ENABLE_DENY_ONLY cpu_to_le32(0xC00002B3)
-#define STATUS_FLOAT_MULTIPLE_FAULTS cpu_to_le32(0xC00002B4)
-#define STATUS_FLOAT_MULTIPLE_TRAPS cpu_to_le32(0xC00002B5)
-#define STATUS_DEVICE_REMOVED cpu_to_le32(0xC00002B6)
-#define STATUS_JOURNAL_DELETE_IN_PROGRESS cpu_to_le32(0xC00002B7)
-#define STATUS_JOURNAL_NOT_ACTIVE cpu_to_le32(0xC00002B8)
-#define STATUS_NOINTERFACE cpu_to_le32(0xC00002B9)
-#define STATUS_DS_ADMIN_LIMIT_EXCEEDED cpu_to_le32(0xC00002C1)
-#define STATUS_DRIVER_FAILED_SLEEP cpu_to_le32(0xC00002C2)
-#define STATUS_MUTUAL_AUTHENTICATION_FAILED cpu_to_le32(0xC00002C3)
-#define STATUS_CORRUPT_SYSTEM_FILE cpu_to_le32(0xC00002C4)
-#define STATUS_DATATYPE_MISALIGNMENT_ERROR cpu_to_le32(0xC00002C5)
-#define STATUS_WMI_READ_ONLY cpu_to_le32(0xC00002C6)
-#define STATUS_WMI_SET_FAILURE cpu_to_le32(0xC00002C7)
-#define STATUS_COMMITMENT_MINIMUM cpu_to_le32(0xC00002C8)
-#define STATUS_REG_NAT_CONSUMPTION cpu_to_le32(0xC00002C9)
-#define STATUS_TRANSPORT_FULL cpu_to_le32(0xC00002CA)
-#define STATUS_DS_SAM_INIT_FAILURE cpu_to_le32(0xC00002CB)
-#define STATUS_ONLY_IF_CONNECTED cpu_to_le32(0xC00002CC)
-#define STATUS_DS_SENSITIVE_GROUP_VIOLATION cpu_to_le32(0xC00002CD)
-#define STATUS_PNP_RESTART_ENUMERATION cpu_to_le32(0xC00002CE)
-#define STATUS_JOURNAL_ENTRY_DELETED cpu_to_le32(0xC00002CF)
-#define STATUS_DS_CANT_MOD_PRIMARYGROUPID cpu_to_le32(0xC00002D0)
-#define STATUS_SYSTEM_IMAGE_BAD_SIGNATURE cpu_to_le32(0xC00002D1)
-#define STATUS_PNP_REBOOT_REQUIRED cpu_to_le32(0xC00002D2)
-#define STATUS_POWER_STATE_INVALID cpu_to_le32(0xC00002D3)
-#define STATUS_DS_INVALID_GROUP_TYPE cpu_to_le32(0xC00002D4)
-#define STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D5)
-#define STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D6)
-#define STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D7)
-#define STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC00002D8)
-#define STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D9)
-#define STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER cpu_to_le32(0xC00002DA)
-#define STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER \
- cpu_to_le32(0xC00002DB)
-#define STATUS_DS_HAVE_PRIMARY_MEMBERS cpu_to_le32(0xC00002DC)
-#define STATUS_WMI_NOT_SUPPORTED cpu_to_le32(0xC00002DD)
-#define STATUS_INSUFFICIENT_POWER cpu_to_le32(0xC00002DE)
-#define STATUS_SAM_NEED_BOOTKEY_PASSWORD cpu_to_le32(0xC00002DF)
-#define STATUS_SAM_NEED_BOOTKEY_FLOPPY cpu_to_le32(0xC00002E0)
-#define STATUS_DS_CANT_START cpu_to_le32(0xC00002E1)
-#define STATUS_DS_INIT_FAILURE cpu_to_le32(0xC00002E2)
-#define STATUS_SAM_INIT_FAILURE cpu_to_le32(0xC00002E3)
-#define STATUS_DS_GC_REQUIRED cpu_to_le32(0xC00002E4)
-#define STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY cpu_to_le32(0xC00002E5)
-#define STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS cpu_to_le32(0xC00002E6)
-#define STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED cpu_to_le32(0xC00002E7)
-#define STATUS_MULTIPLE_FAULT_VIOLATION cpu_to_le32(0xC00002E8)
-#define STATUS_CURRENT_DOMAIN_NOT_ALLOWED cpu_to_le32(0xC00002E9)
-#define STATUS_CANNOT_MAKE cpu_to_le32(0xC00002EA)
-#define STATUS_SYSTEM_SHUTDOWN cpu_to_le32(0xC00002EB)
-#define STATUS_DS_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002EC)
-#define STATUS_DS_SAM_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002ED)
-#define STATUS_UNFINISHED_CONTEXT_DELETED cpu_to_le32(0xC00002EE)
-#define STATUS_NO_TGT_REPLY cpu_to_le32(0xC00002EF)
-#define STATUS_OBJECTID_NOT_FOUND cpu_to_le32(0xC00002F0)
-#define STATUS_NO_IP_ADDRESSES cpu_to_le32(0xC00002F1)
-#define STATUS_WRONG_CREDENTIAL_HANDLE cpu_to_le32(0xC00002F2)
-#define STATUS_CRYPTO_SYSTEM_INVALID cpu_to_le32(0xC00002F3)
-#define STATUS_MAX_REFERRALS_EXCEEDED cpu_to_le32(0xC00002F4)
-#define STATUS_MUST_BE_KDC cpu_to_le32(0xC00002F5)
-#define STATUS_STRONG_CRYPTO_NOT_SUPPORTED cpu_to_le32(0xC00002F6)
-#define STATUS_TOO_MANY_PRINCIPALS cpu_to_le32(0xC00002F7)
-#define STATUS_NO_PA_DATA cpu_to_le32(0xC00002F8)
-#define STATUS_PKINIT_NAME_MISMATCH cpu_to_le32(0xC00002F9)
-#define STATUS_SMARTCARD_LOGON_REQUIRED cpu_to_le32(0xC00002FA)
-#define STATUS_KDC_INVALID_REQUEST cpu_to_le32(0xC00002FB)
-#define STATUS_KDC_UNABLE_TO_REFER cpu_to_le32(0xC00002FC)
-#define STATUS_KDC_UNKNOWN_ETYPE cpu_to_le32(0xC00002FD)
-#define STATUS_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FE)
-#define STATUS_SERVER_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FF)
-#define STATUS_NOT_SUPPORTED_ON_SBS cpu_to_le32(0xC0000300)
-#define STATUS_WMI_GUID_DISCONNECTED cpu_to_le32(0xC0000301)
-#define STATUS_WMI_ALREADY_DISABLED cpu_to_le32(0xC0000302)
-#define STATUS_WMI_ALREADY_ENABLED cpu_to_le32(0xC0000303)
-#define STATUS_MFT_TOO_FRAGMENTED cpu_to_le32(0xC0000304)
-#define STATUS_COPY_PROTECTION_FAILURE cpu_to_le32(0xC0000305)
-#define STATUS_CSS_AUTHENTICATION_FAILURE cpu_to_le32(0xC0000306)
-#define STATUS_CSS_KEY_NOT_PRESENT cpu_to_le32(0xC0000307)
-#define STATUS_CSS_KEY_NOT_ESTABLISHED cpu_to_le32(0xC0000308)
-#define STATUS_CSS_SCRAMBLED_SECTOR cpu_to_le32(0xC0000309)
-#define STATUS_CSS_REGION_MISMATCH cpu_to_le32(0xC000030A)
-#define STATUS_CSS_RESETS_EXHAUSTED cpu_to_le32(0xC000030B)
-#define STATUS_PKINIT_FAILURE cpu_to_le32(0xC0000320)
-#define STATUS_SMARTCARD_SUBSYSTEM_FAILURE cpu_to_le32(0xC0000321)
-#define STATUS_NO_KERB_KEY cpu_to_le32(0xC0000322)
-#define STATUS_HOST_DOWN cpu_to_le32(0xC0000350)
-#define STATUS_UNSUPPORTED_PREAUTH cpu_to_le32(0xC0000351)
-#define STATUS_EFS_ALG_BLOB_TOO_BIG cpu_to_le32(0xC0000352)
-#define STATUS_PORT_NOT_SET cpu_to_le32(0xC0000353)
-#define STATUS_DEBUGGER_INACTIVE cpu_to_le32(0xC0000354)
-#define STATUS_DS_VERSION_CHECK_FAILURE cpu_to_le32(0xC0000355)
-#define STATUS_AUDITING_DISABLED cpu_to_le32(0xC0000356)
-#define STATUS_PRENT4_MACHINE_ACCOUNT cpu_to_le32(0xC0000357)
-#define STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC0000358)
-#define STATUS_INVALID_IMAGE_WIN_32 cpu_to_le32(0xC0000359)
-#define STATUS_INVALID_IMAGE_WIN_64 cpu_to_le32(0xC000035A)
-#define STATUS_BAD_BINDINGS cpu_to_le32(0xC000035B)
-#define STATUS_NETWORK_SESSION_EXPIRED cpu_to_le32(0xC000035C)
-#define STATUS_APPHELP_BLOCK cpu_to_le32(0xC000035D)
-#define STATUS_ALL_SIDS_FILTERED cpu_to_le32(0xC000035E)
-#define STATUS_NOT_SAFE_MODE_DRIVER cpu_to_le32(0xC000035F)
-#define STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT cpu_to_le32(0xC0000361)
-#define STATUS_ACCESS_DISABLED_BY_POLICY_PATH cpu_to_le32(0xC0000362)
-#define STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER cpu_to_le32(0xC0000363)
-#define STATUS_ACCESS_DISABLED_BY_POLICY_OTHER cpu_to_le32(0xC0000364)
-#define STATUS_FAILED_DRIVER_ENTRY cpu_to_le32(0xC0000365)
-#define STATUS_DEVICE_ENUMERATION_ERROR cpu_to_le32(0xC0000366)
-#define STATUS_MOUNT_POINT_NOT_RESOLVED cpu_to_le32(0xC0000368)
-#define STATUS_INVALID_DEVICE_OBJECT_PARAMETER cpu_to_le32(0xC0000369)
-#define STATUS_MCA_OCCURRED cpu_to_le32(0xC000036A)
-#define STATUS_DRIVER_BLOCKED_CRITICAL cpu_to_le32(0xC000036B)
-#define STATUS_DRIVER_BLOCKED cpu_to_le32(0xC000036C)
-#define STATUS_DRIVER_DATABASE_ERROR cpu_to_le32(0xC000036D)
-#define STATUS_SYSTEM_HIVE_TOO_LARGE cpu_to_le32(0xC000036E)
-#define STATUS_INVALID_IMPORT_OF_NON_DLL cpu_to_le32(0xC000036F)
-#define STATUS_NO_SECRETS cpu_to_le32(0xC0000371)
-#define STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY cpu_to_le32(0xC0000372)
-#define STATUS_FAILED_STACK_SWITCH cpu_to_le32(0xC0000373)
-#define STATUS_HEAP_CORRUPTION cpu_to_le32(0xC0000374)
-#define STATUS_SMARTCARD_WRONG_PIN cpu_to_le32(0xC0000380)
-#define STATUS_SMARTCARD_CARD_BLOCKED cpu_to_le32(0xC0000381)
-#define STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED cpu_to_le32(0xC0000382)
-#define STATUS_SMARTCARD_NO_CARD cpu_to_le32(0xC0000383)
-#define STATUS_SMARTCARD_NO_KEY_CONTAINER cpu_to_le32(0xC0000384)
-#define STATUS_SMARTCARD_NO_CERTIFICATE cpu_to_le32(0xC0000385)
-#define STATUS_SMARTCARD_NO_KEYSET cpu_to_le32(0xC0000386)
-#define STATUS_SMARTCARD_IO_ERROR cpu_to_le32(0xC0000387)
-#define STATUS_DOWNGRADE_DETECTED cpu_to_le32(0xC0000388)
-#define STATUS_SMARTCARD_CERT_REVOKED cpu_to_le32(0xC0000389)
-#define STATUS_ISSUING_CA_UNTRUSTED cpu_to_le32(0xC000038A)
-#define STATUS_REVOCATION_OFFLINE_C cpu_to_le32(0xC000038B)
-#define STATUS_PKINIT_CLIENT_FAILURE cpu_to_le32(0xC000038C)
-#define STATUS_SMARTCARD_CERT_EXPIRED cpu_to_le32(0xC000038D)
-#define STATUS_DRIVER_FAILED_PRIOR_UNLOAD cpu_to_le32(0xC000038E)
-#define STATUS_SMARTCARD_SILENT_CONTEXT cpu_to_le32(0xC000038F)
-#define STATUS_PER_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000401)
-#define STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000402)
-#define STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000403)
-#define STATUS_DS_NAME_NOT_UNIQUE cpu_to_le32(0xC0000404)
-#define STATUS_DS_DUPLICATE_ID_FOUND cpu_to_le32(0xC0000405)
-#define STATUS_DS_GROUP_CONVERSION_ERROR cpu_to_le32(0xC0000406)
-#define STATUS_VOLSNAP_PREPARE_HIBERNATE cpu_to_le32(0xC0000407)
-#define STATUS_USER2USER_REQUIRED cpu_to_le32(0xC0000408)
-#define STATUS_STACK_BUFFER_OVERRUN cpu_to_le32(0xC0000409)
-#define STATUS_NO_S4U_PROT_SUPPORT cpu_to_le32(0xC000040A)
-#define STATUS_CROSSREALM_DELEGATION_FAILURE cpu_to_le32(0xC000040B)
-#define STATUS_REVOCATION_OFFLINE_KDC cpu_to_le32(0xC000040C)
-#define STATUS_ISSUING_CA_UNTRUSTED_KDC cpu_to_le32(0xC000040D)
-#define STATUS_KDC_CERT_EXPIRED cpu_to_le32(0xC000040E)
-#define STATUS_KDC_CERT_REVOKED cpu_to_le32(0xC000040F)
-#define STATUS_PARAMETER_QUOTA_EXCEEDED cpu_to_le32(0xC0000410)
-#define STATUS_HIBERNATION_FAILURE cpu_to_le32(0xC0000411)
-#define STATUS_DELAY_LOAD_FAILED cpu_to_le32(0xC0000412)
-#define STATUS_AUTHENTICATION_FIREWALL_FAILED cpu_to_le32(0xC0000413)
-#define STATUS_VDM_DISALLOWED cpu_to_le32(0xC0000414)
-#define STATUS_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC0000415)
-#define STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE \
- cpu_to_le32(0xC0000416)
-#define STATUS_INVALID_CRUNTIME_PARAMETER cpu_to_le32(0xC0000417)
-#define STATUS_NTLM_BLOCKED cpu_to_le32(0xC0000418)
-#define STATUS_ASSERTION_FAILURE cpu_to_le32(0xC0000420)
-#define STATUS_VERIFIER_STOP cpu_to_le32(0xC0000421)
-#define STATUS_CALLBACK_POP_STACK cpu_to_le32(0xC0000423)
-#define STATUS_INCOMPATIBLE_DRIVER_BLOCKED cpu_to_le32(0xC0000424)
-#define STATUS_HIVE_UNLOADED cpu_to_le32(0xC0000425)
-#define STATUS_COMPRESSION_DISABLED cpu_to_le32(0xC0000426)
-#define STATUS_FILE_SYSTEM_LIMITATION cpu_to_le32(0xC0000427)
-#define STATUS_INVALID_IMAGE_HASH cpu_to_le32(0xC0000428)
-#define STATUS_NOT_CAPABLE cpu_to_le32(0xC0000429)
-#define STATUS_REQUEST_OUT_OF_SEQUENCE cpu_to_le32(0xC000042A)
-#define STATUS_IMPLEMENTATION_LIMIT cpu_to_le32(0xC000042B)
-#define STATUS_ELEVATION_REQUIRED cpu_to_le32(0xC000042C)
-#define STATUS_BEYOND_VDL cpu_to_le32(0xC0000432)
-#define STATUS_ENCOUNTERED_WRITE_IN_PROGRESS cpu_to_le32(0xC0000433)
-#define STATUS_PTE_CHANGED cpu_to_le32(0xC0000434)
-#define STATUS_PURGE_FAILED cpu_to_le32(0xC0000435)
-#define STATUS_CRED_REQUIRES_CONFIRMATION cpu_to_le32(0xC0000440)
-#define STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE cpu_to_le32(0xC0000441)
-#define STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER cpu_to_le32(0xC0000442)
-#define STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE cpu_to_le32(0xC0000443)
-#define STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE cpu_to_le32(0xC0000444)
-#define STATUS_CS_ENCRYPTION_FILE_NOT_CSE cpu_to_le32(0xC0000445)
-#define STATUS_INVALID_LABEL cpu_to_le32(0xC0000446)
-#define STATUS_DRIVER_PROCESS_TERMINATED cpu_to_le32(0xC0000450)
-#define STATUS_AMBIGUOUS_SYSTEM_DEVICE cpu_to_le32(0xC0000451)
-#define STATUS_SYSTEM_DEVICE_NOT_FOUND cpu_to_le32(0xC0000452)
-#define STATUS_RESTART_BOOT_APPLICATION cpu_to_le32(0xC0000453)
-#define STATUS_INVALID_TASK_NAME cpu_to_le32(0xC0000500)
-#define STATUS_INVALID_TASK_INDEX cpu_to_le32(0xC0000501)
-#define STATUS_THREAD_ALREADY_IN_TASK cpu_to_le32(0xC0000502)
-#define STATUS_CALLBACK_BYPASS cpu_to_le32(0xC0000503)
-#define STATUS_PORT_CLOSED cpu_to_le32(0xC0000700)
-#define STATUS_MESSAGE_LOST cpu_to_le32(0xC0000701)
-#define STATUS_INVALID_MESSAGE cpu_to_le32(0xC0000702)
-#define STATUS_REQUEST_CANCELED cpu_to_le32(0xC0000703)
-#define STATUS_RECURSIVE_DISPATCH cpu_to_le32(0xC0000704)
-#define STATUS_LPC_RECEIVE_BUFFER_EXPECTED cpu_to_le32(0xC0000705)
-#define STATUS_LPC_INVALID_CONNECTION_USAGE cpu_to_le32(0xC0000706)
-#define STATUS_LPC_REQUESTS_NOT_ALLOWED cpu_to_le32(0xC0000707)
-#define STATUS_RESOURCE_IN_USE cpu_to_le32(0xC0000708)
-#define STATUS_HARDWARE_MEMORY_ERROR cpu_to_le32(0xC0000709)
-#define STATUS_THREADPOOL_HANDLE_EXCEPTION cpu_to_le32(0xC000070A)
-#define STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED cpu_to_le32(0xC000070B)
-#define STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED \
- cpu_to_le32(0xC000070C)
-#define STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED \
- cpu_to_le32(0xC000070D)
-#define STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED \
- cpu_to_le32(0xC000070E)
-#define STATUS_THREADPOOL_RELEASED_DURING_OPERATION cpu_to_le32(0xC000070F)
-#define STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000710)
-#define STATUS_APC_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000711)
-#define STATUS_PROCESS_IS_PROTECTED cpu_to_le32(0xC0000712)
-#define STATUS_MCA_EXCEPTION cpu_to_le32(0xC0000713)
-#define STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE cpu_to_le32(0xC0000714)
-#define STATUS_SYMLINK_CLASS_DISABLED cpu_to_le32(0xC0000715)
-#define STATUS_INVALID_IDN_NORMALIZATION cpu_to_le32(0xC0000716)
-#define STATUS_NO_UNICODE_TRANSLATION cpu_to_le32(0xC0000717)
-#define STATUS_ALREADY_REGISTERED cpu_to_le32(0xC0000718)
-#define STATUS_CONTEXT_MISMATCH cpu_to_le32(0xC0000719)
-#define STATUS_PORT_ALREADY_HAS_COMPLETION_LIST cpu_to_le32(0xC000071A)
-#define STATUS_CALLBACK_RETURNED_THREAD_PRIORITY cpu_to_le32(0xC000071B)
-#define STATUS_INVALID_THREAD cpu_to_le32(0xC000071C)
-#define STATUS_CALLBACK_RETURNED_TRANSACTION cpu_to_le32(0xC000071D)
-#define STATUS_CALLBACK_RETURNED_LDR_LOCK cpu_to_le32(0xC000071E)
-#define STATUS_CALLBACK_RETURNED_LANG cpu_to_le32(0xC000071F)
-#define STATUS_CALLBACK_RETURNED_PRI_BACK cpu_to_le32(0xC0000720)
-#define STATUS_CALLBACK_RETURNED_THREAD_AFFINITY cpu_to_le32(0xC0000721)
-#define STATUS_DISK_REPAIR_DISABLED cpu_to_le32(0xC0000800)
-#define STATUS_DS_DOMAIN_RENAME_IN_PROGRESS cpu_to_le32(0xC0000801)
-#define STATUS_DISK_QUOTA_EXCEEDED cpu_to_le32(0xC0000802)
-#define STATUS_CONTENT_BLOCKED cpu_to_le32(0xC0000804)
-#define STATUS_BAD_CLUSTERS cpu_to_le32(0xC0000805)
-#define STATUS_VOLUME_DIRTY cpu_to_le32(0xC0000806)
-#define STATUS_FILE_CHECKED_OUT cpu_to_le32(0xC0000901)
-#define STATUS_CHECKOUT_REQUIRED cpu_to_le32(0xC0000902)
-#define STATUS_BAD_FILE_TYPE cpu_to_le32(0xC0000903)
-#define STATUS_FILE_TOO_LARGE cpu_to_le32(0xC0000904)
-#define STATUS_FORMS_AUTH_REQUIRED cpu_to_le32(0xC0000905)
-#define STATUS_VIRUS_INFECTED cpu_to_le32(0xC0000906)
-#define STATUS_VIRUS_DELETED cpu_to_le32(0xC0000907)
-#define STATUS_BAD_MCFG_TABLE cpu_to_le32(0xC0000908)
-#define STATUS_WOW_ASSERTION cpu_to_le32(0xC0009898)
-#define STATUS_INVALID_SIGNATURE cpu_to_le32(0xC000A000)
-#define STATUS_HMAC_NOT_SUPPORTED cpu_to_le32(0xC000A001)
-#define STATUS_IPSEC_QUEUE_OVERFLOW cpu_to_le32(0xC000A010)
-#define STATUS_ND_QUEUE_OVERFLOW cpu_to_le32(0xC000A011)
-#define STATUS_HOPLIMIT_EXCEEDED cpu_to_le32(0xC000A012)
-#define STATUS_PROTOCOL_NOT_SUPPORTED cpu_to_le32(0xC000A013)
-#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED \
- cpu_to_le32(0xC000A080)
-#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR \
- cpu_to_le32(0xC000A081)
-#define STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR cpu_to_le32(0xC000A082)
-#define STATUS_XML_PARSE_ERROR cpu_to_le32(0xC000A083)
-#define STATUS_XMLDSIG_ERROR cpu_to_le32(0xC000A084)
-#define STATUS_WRONG_COMPARTMENT cpu_to_le32(0xC000A085)
-#define STATUS_AUTHIP_FAILURE cpu_to_le32(0xC000A086)
-#define DBG_NO_STATE_CHANGE cpu_to_le32(0xC0010001)
-#define DBG_APP_NOT_IDLE cpu_to_le32(0xC0010002)
-#define RPC_NT_INVALID_STRING_BINDING cpu_to_le32(0xC0020001)
-#define RPC_NT_WRONG_KIND_OF_BINDING cpu_to_le32(0xC0020002)
-#define RPC_NT_INVALID_BINDING cpu_to_le32(0xC0020003)
-#define RPC_NT_PROTSEQ_NOT_SUPPORTED cpu_to_le32(0xC0020004)
-#define RPC_NT_INVALID_RPC_PROTSEQ cpu_to_le32(0xC0020005)
-#define RPC_NT_INVALID_STRING_UUID cpu_to_le32(0xC0020006)
-#define RPC_NT_INVALID_ENDPOINT_FORMAT cpu_to_le32(0xC0020007)
-#define RPC_NT_INVALID_NET_ADDR cpu_to_le32(0xC0020008)
-#define RPC_NT_NO_ENDPOINT_FOUND cpu_to_le32(0xC0020009)
-#define RPC_NT_INVALID_TIMEOUT cpu_to_le32(0xC002000A)
-#define RPC_NT_OBJECT_NOT_FOUND cpu_to_le32(0xC002000B)
-#define RPC_NT_ALREADY_REGISTERED cpu_to_le32(0xC002000C)
-#define RPC_NT_TYPE_ALREADY_REGISTERED cpu_to_le32(0xC002000D)
-#define RPC_NT_ALREADY_LISTENING cpu_to_le32(0xC002000E)
-#define RPC_NT_NO_PROTSEQS_REGISTERED cpu_to_le32(0xC002000F)
-#define RPC_NT_NOT_LISTENING cpu_to_le32(0xC0020010)
-#define RPC_NT_UNKNOWN_MGR_TYPE cpu_to_le32(0xC0020011)
-#define RPC_NT_UNKNOWN_IF cpu_to_le32(0xC0020012)
-#define RPC_NT_NO_BINDINGS cpu_to_le32(0xC0020013)
-#define RPC_NT_NO_PROTSEQS cpu_to_le32(0xC0020014)
-#define RPC_NT_CANT_CREATE_ENDPOINT cpu_to_le32(0xC0020015)
-#define RPC_NT_OUT_OF_RESOURCES cpu_to_le32(0xC0020016)
-#define RPC_NT_SERVER_UNAVAILABLE cpu_to_le32(0xC0020017)
-#define RPC_NT_SERVER_TOO_BUSY cpu_to_le32(0xC0020018)
-#define RPC_NT_INVALID_NETWORK_OPTIONS cpu_to_le32(0xC0020019)
-#define RPC_NT_NO_CALL_ACTIVE cpu_to_le32(0xC002001A)
-#define RPC_NT_CALL_FAILED cpu_to_le32(0xC002001B)
-#define RPC_NT_CALL_FAILED_DNE cpu_to_le32(0xC002001C)
-#define RPC_NT_PROTOCOL_ERROR cpu_to_le32(0xC002001D)
-#define RPC_NT_UNSUPPORTED_TRANS_SYN cpu_to_le32(0xC002001F)
-#define RPC_NT_UNSUPPORTED_TYPE cpu_to_le32(0xC0020021)
-#define RPC_NT_INVALID_TAG cpu_to_le32(0xC0020022)
-#define RPC_NT_INVALID_BOUND cpu_to_le32(0xC0020023)
-#define RPC_NT_NO_ENTRY_NAME cpu_to_le32(0xC0020024)
-#define RPC_NT_INVALID_NAME_SYNTAX cpu_to_le32(0xC0020025)
-#define RPC_NT_UNSUPPORTED_NAME_SYNTAX cpu_to_le32(0xC0020026)
-#define RPC_NT_UUID_NO_ADDRESS cpu_to_le32(0xC0020028)
-#define RPC_NT_DUPLICATE_ENDPOINT cpu_to_le32(0xC0020029)
-#define RPC_NT_UNKNOWN_AUTHN_TYPE cpu_to_le32(0xC002002A)
-#define RPC_NT_MAX_CALLS_TOO_SMALL cpu_to_le32(0xC002002B)
-#define RPC_NT_STRING_TOO_LONG cpu_to_le32(0xC002002C)
-#define RPC_NT_PROTSEQ_NOT_FOUND cpu_to_le32(0xC002002D)
-#define RPC_NT_PROCNUM_OUT_OF_RANGE cpu_to_le32(0xC002002E)
-#define RPC_NT_BINDING_HAS_NO_AUTH cpu_to_le32(0xC002002F)
-#define RPC_NT_UNKNOWN_AUTHN_SERVICE cpu_to_le32(0xC0020030)
-#define RPC_NT_UNKNOWN_AUTHN_LEVEL cpu_to_le32(0xC0020031)
-#define RPC_NT_INVALID_AUTH_IDENTITY cpu_to_le32(0xC0020032)
-#define RPC_NT_UNKNOWN_AUTHZ_SERVICE cpu_to_le32(0xC0020033)
-#define EPT_NT_INVALID_ENTRY cpu_to_le32(0xC0020034)
-#define EPT_NT_CANT_PERFORM_OP cpu_to_le32(0xC0020035)
-#define EPT_NT_NOT_REGISTERED cpu_to_le32(0xC0020036)
-#define RPC_NT_NOTHING_TO_EXPORT cpu_to_le32(0xC0020037)
-#define RPC_NT_INCOMPLETE_NAME cpu_to_le32(0xC0020038)
-#define RPC_NT_INVALID_VERS_OPTION cpu_to_le32(0xC0020039)
-#define RPC_NT_NO_MORE_MEMBERS cpu_to_le32(0xC002003A)
-#define RPC_NT_NOT_ALL_OBJS_UNEXPORTED cpu_to_le32(0xC002003B)
-#define RPC_NT_INTERFACE_NOT_FOUND cpu_to_le32(0xC002003C)
-#define RPC_NT_ENTRY_ALREADY_EXISTS cpu_to_le32(0xC002003D)
-#define RPC_NT_ENTRY_NOT_FOUND cpu_to_le32(0xC002003E)
-#define RPC_NT_NAME_SERVICE_UNAVAILABLE cpu_to_le32(0xC002003F)
-#define RPC_NT_INVALID_NAF_ID cpu_to_le32(0xC0020040)
-#define RPC_NT_CANNOT_SUPPORT cpu_to_le32(0xC0020041)
-#define RPC_NT_NO_CONTEXT_AVAILABLE cpu_to_le32(0xC0020042)
-#define RPC_NT_INTERNAL_ERROR cpu_to_le32(0xC0020043)
-#define RPC_NT_ZERO_DIVIDE cpu_to_le32(0xC0020044)
-#define RPC_NT_ADDRESS_ERROR cpu_to_le32(0xC0020045)
-#define RPC_NT_FP_DIV_ZERO cpu_to_le32(0xC0020046)
-#define RPC_NT_FP_UNDERFLOW cpu_to_le32(0xC0020047)
-#define RPC_NT_FP_OVERFLOW cpu_to_le32(0xC0020048)
-#define RPC_NT_CALL_IN_PROGRESS cpu_to_le32(0xC0020049)
-#define RPC_NT_NO_MORE_BINDINGS cpu_to_le32(0xC002004A)
-#define RPC_NT_GROUP_MEMBER_NOT_FOUND cpu_to_le32(0xC002004B)
-#define EPT_NT_CANT_CREATE cpu_to_le32(0xC002004C)
-#define RPC_NT_INVALID_OBJECT cpu_to_le32(0xC002004D)
-#define RPC_NT_NO_INTERFACES cpu_to_le32(0xC002004F)
-#define RPC_NT_CALL_CANCELLED cpu_to_le32(0xC0020050)
-#define RPC_NT_BINDING_INCOMPLETE cpu_to_le32(0xC0020051)
-#define RPC_NT_COMM_FAILURE cpu_to_le32(0xC0020052)
-#define RPC_NT_UNSUPPORTED_AUTHN_LEVEL cpu_to_le32(0xC0020053)
-#define RPC_NT_NO_PRINC_NAME cpu_to_le32(0xC0020054)
-#define RPC_NT_NOT_RPC_ERROR cpu_to_le32(0xC0020055)
-#define RPC_NT_SEC_PKG_ERROR cpu_to_le32(0xC0020057)
-#define RPC_NT_NOT_CANCELLED cpu_to_le32(0xC0020058)
-#define RPC_NT_INVALID_ASYNC_HANDLE cpu_to_le32(0xC0020062)
-#define RPC_NT_INVALID_ASYNC_CALL cpu_to_le32(0xC0020063)
-#define RPC_NT_PROXY_ACCESS_DENIED cpu_to_le32(0xC0020064)
-#define RPC_NT_NO_MORE_ENTRIES cpu_to_le32(0xC0030001)
-#define RPC_NT_SS_CHAR_TRANS_OPEN_FAIL cpu_to_le32(0xC0030002)
-#define RPC_NT_SS_CHAR_TRANS_SHORT_FILE cpu_to_le32(0xC0030003)
-#define RPC_NT_SS_IN_NULL_CONTEXT cpu_to_le32(0xC0030004)
-#define RPC_NT_SS_CONTEXT_MISMATCH cpu_to_le32(0xC0030005)
-#define RPC_NT_SS_CONTEXT_DAMAGED cpu_to_le32(0xC0030006)
-#define RPC_NT_SS_HANDLES_MISMATCH cpu_to_le32(0xC0030007)
-#define RPC_NT_SS_CANNOT_GET_CALL_HANDLE cpu_to_le32(0xC0030008)
-#define RPC_NT_NULL_REF_POINTER cpu_to_le32(0xC0030009)
-#define RPC_NT_ENUM_VALUE_OUT_OF_RANGE cpu_to_le32(0xC003000A)
-#define RPC_NT_BYTE_COUNT_TOO_SMALL cpu_to_le32(0xC003000B)
-#define RPC_NT_BAD_STUB_DATA cpu_to_le32(0xC003000C)
-#define RPC_NT_INVALID_ES_ACTION cpu_to_le32(0xC0030059)
-#define RPC_NT_WRONG_ES_VERSION cpu_to_le32(0xC003005A)
-#define RPC_NT_WRONG_STUB_VERSION cpu_to_le32(0xC003005B)
-#define RPC_NT_INVALID_PIPE_OBJECT cpu_to_le32(0xC003005C)
-#define RPC_NT_INVALID_PIPE_OPERATION cpu_to_le32(0xC003005D)
-#define RPC_NT_WRONG_PIPE_VERSION cpu_to_le32(0xC003005E)
-#define RPC_NT_PIPE_CLOSED cpu_to_le32(0xC003005F)
-#define RPC_NT_PIPE_DISCIPLINE_ERROR cpu_to_le32(0xC0030060)
-#define RPC_NT_PIPE_EMPTY cpu_to_le32(0xC0030061)
-#define STATUS_PNP_BAD_MPS_TABLE cpu_to_le32(0xC0040035)
-#define STATUS_PNP_TRANSLATION_FAILED cpu_to_le32(0xC0040036)
-#define STATUS_PNP_IRQ_TRANSLATION_FAILED cpu_to_le32(0xC0040037)
-#define STATUS_PNP_INVALID_ID cpu_to_le32(0xC0040038)
-#define STATUS_IO_REISSUE_AS_CACHED cpu_to_le32(0xC0040039)
-#define STATUS_CTX_WINSTATION_NAME_INVALID cpu_to_le32(0xC00A0001)
-#define STATUS_CTX_INVALID_PD cpu_to_le32(0xC00A0002)
-#define STATUS_CTX_PD_NOT_FOUND cpu_to_le32(0xC00A0003)
-#define STATUS_CTX_CLOSE_PENDING cpu_to_le32(0xC00A0006)
-#define STATUS_CTX_NO_OUTBUF cpu_to_le32(0xC00A0007)
-#define STATUS_CTX_MODEM_INF_NOT_FOUND cpu_to_le32(0xC00A0008)
-#define STATUS_CTX_INVALID_MODEMNAME cpu_to_le32(0xC00A0009)
-#define STATUS_CTX_RESPONSE_ERROR cpu_to_le32(0xC00A000A)
-#define STATUS_CTX_MODEM_RESPONSE_TIMEOUT cpu_to_le32(0xC00A000B)
-#define STATUS_CTX_MODEM_RESPONSE_NO_CARRIER cpu_to_le32(0xC00A000C)
-#define STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE cpu_to_le32(0xC00A000D)
-#define STATUS_CTX_MODEM_RESPONSE_BUSY cpu_to_le32(0xC00A000E)
-#define STATUS_CTX_MODEM_RESPONSE_VOICE cpu_to_le32(0xC00A000F)
-#define STATUS_CTX_TD_ERROR cpu_to_le32(0xC00A0010)
-#define STATUS_CTX_LICENSE_CLIENT_INVALID cpu_to_le32(0xC00A0012)
-#define STATUS_CTX_LICENSE_NOT_AVAILABLE cpu_to_le32(0xC00A0013)
-#define STATUS_CTX_LICENSE_EXPIRED cpu_to_le32(0xC00A0014)
-#define STATUS_CTX_WINSTATION_NOT_FOUND cpu_to_le32(0xC00A0015)
-#define STATUS_CTX_WINSTATION_NAME_COLLISION cpu_to_le32(0xC00A0016)
-#define STATUS_CTX_WINSTATION_BUSY cpu_to_le32(0xC00A0017)
-#define STATUS_CTX_BAD_VIDEO_MODE cpu_to_le32(0xC00A0018)
-#define STATUS_CTX_GRAPHICS_INVALID cpu_to_le32(0xC00A0022)
-#define STATUS_CTX_NOT_CONSOLE cpu_to_le32(0xC00A0024)
-#define STATUS_CTX_CLIENT_QUERY_TIMEOUT cpu_to_le32(0xC00A0026)
-#define STATUS_CTX_CONSOLE_DISCONNECT cpu_to_le32(0xC00A0027)
-#define STATUS_CTX_CONSOLE_CONNECT cpu_to_le32(0xC00A0028)
-#define STATUS_CTX_SHADOW_DENIED cpu_to_le32(0xC00A002A)
-#define STATUS_CTX_WINSTATION_ACCESS_DENIED cpu_to_le32(0xC00A002B)
-#define STATUS_CTX_INVALID_WD cpu_to_le32(0xC00A002E)
-#define STATUS_CTX_WD_NOT_FOUND cpu_to_le32(0xC00A002F)
-#define STATUS_CTX_SHADOW_INVALID cpu_to_le32(0xC00A0030)
-#define STATUS_CTX_SHADOW_DISABLED cpu_to_le32(0xC00A0031)
-#define STATUS_RDP_PROTOCOL_ERROR cpu_to_le32(0xC00A0032)
-#define STATUS_CTX_CLIENT_LICENSE_NOT_SET cpu_to_le32(0xC00A0033)
-#define STATUS_CTX_CLIENT_LICENSE_IN_USE cpu_to_le32(0xC00A0034)
-#define STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE cpu_to_le32(0xC00A0035)
-#define STATUS_CTX_SHADOW_NOT_RUNNING cpu_to_le32(0xC00A0036)
-#define STATUS_CTX_LOGON_DISABLED cpu_to_le32(0xC00A0037)
-#define STATUS_CTX_SECURITY_LAYER_ERROR cpu_to_le32(0xC00A0038)
-#define STATUS_TS_INCOMPATIBLE_SESSIONS cpu_to_le32(0xC00A0039)
-#define STATUS_MUI_FILE_NOT_FOUND cpu_to_le32(0xC00B0001)
-#define STATUS_MUI_INVALID_FILE cpu_to_le32(0xC00B0002)
-#define STATUS_MUI_INVALID_RC_CONFIG cpu_to_le32(0xC00B0003)
-#define STATUS_MUI_INVALID_LOCALE_NAME cpu_to_le32(0xC00B0004)
-#define STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME cpu_to_le32(0xC00B0005)
-#define STATUS_MUI_FILE_NOT_LOADED cpu_to_le32(0xC00B0006)
-#define STATUS_RESOURCE_ENUM_USER_STOP cpu_to_le32(0xC00B0007)
-#define STATUS_CLUSTER_INVALID_NODE cpu_to_le32(0xC0130001)
-#define STATUS_CLUSTER_NODE_EXISTS cpu_to_le32(0xC0130002)
-#define STATUS_CLUSTER_JOIN_IN_PROGRESS cpu_to_le32(0xC0130003)
-#define STATUS_CLUSTER_NODE_NOT_FOUND cpu_to_le32(0xC0130004)
-#define STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND cpu_to_le32(0xC0130005)
-#define STATUS_CLUSTER_NETWORK_EXISTS cpu_to_le32(0xC0130006)
-#define STATUS_CLUSTER_NETWORK_NOT_FOUND cpu_to_le32(0xC0130007)
-#define STATUS_CLUSTER_NETINTERFACE_EXISTS cpu_to_le32(0xC0130008)
-#define STATUS_CLUSTER_NETINTERFACE_NOT_FOUND cpu_to_le32(0xC0130009)
-#define STATUS_CLUSTER_INVALID_REQUEST cpu_to_le32(0xC013000A)
-#define STATUS_CLUSTER_INVALID_NETWORK_PROVIDER cpu_to_le32(0xC013000B)
-#define STATUS_CLUSTER_NODE_DOWN cpu_to_le32(0xC013000C)
-#define STATUS_CLUSTER_NODE_UNREACHABLE cpu_to_le32(0xC013000D)
-#define STATUS_CLUSTER_NODE_NOT_MEMBER cpu_to_le32(0xC013000E)
-#define STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS cpu_to_le32(0xC013000F)
-#define STATUS_CLUSTER_INVALID_NETWORK cpu_to_le32(0xC0130010)
-#define STATUS_CLUSTER_NO_NET_ADAPTERS cpu_to_le32(0xC0130011)
-#define STATUS_CLUSTER_NODE_UP cpu_to_le32(0xC0130012)
-#define STATUS_CLUSTER_NODE_PAUSED cpu_to_le32(0xC0130013)
-#define STATUS_CLUSTER_NODE_NOT_PAUSED cpu_to_le32(0xC0130014)
-#define STATUS_CLUSTER_NO_SECURITY_CONTEXT cpu_to_le32(0xC0130015)
-#define STATUS_CLUSTER_NETWORK_NOT_INTERNAL cpu_to_le32(0xC0130016)
-#define STATUS_CLUSTER_POISONED cpu_to_le32(0xC0130017)
-#define STATUS_ACPI_INVALID_OPCODE cpu_to_le32(0xC0140001)
-#define STATUS_ACPI_STACK_OVERFLOW cpu_to_le32(0xC0140002)
-#define STATUS_ACPI_ASSERT_FAILED cpu_to_le32(0xC0140003)
-#define STATUS_ACPI_INVALID_INDEX cpu_to_le32(0xC0140004)
-#define STATUS_ACPI_INVALID_ARGUMENT cpu_to_le32(0xC0140005)
-#define STATUS_ACPI_FATAL cpu_to_le32(0xC0140006)
-#define STATUS_ACPI_INVALID_SUPERNAME cpu_to_le32(0xC0140007)
-#define STATUS_ACPI_INVALID_ARGTYPE cpu_to_le32(0xC0140008)
-#define STATUS_ACPI_INVALID_OBJTYPE cpu_to_le32(0xC0140009)
-#define STATUS_ACPI_INVALID_TARGETTYPE cpu_to_le32(0xC014000A)
-#define STATUS_ACPI_INCORRECT_ARGUMENT_COUNT cpu_to_le32(0xC014000B)
-#define STATUS_ACPI_ADDRESS_NOT_MAPPED cpu_to_le32(0xC014000C)
-#define STATUS_ACPI_INVALID_EVENTTYPE cpu_to_le32(0xC014000D)
-#define STATUS_ACPI_HANDLER_COLLISION cpu_to_le32(0xC014000E)
-#define STATUS_ACPI_INVALID_DATA cpu_to_le32(0xC014000F)
-#define STATUS_ACPI_INVALID_REGION cpu_to_le32(0xC0140010)
-#define STATUS_ACPI_INVALID_ACCESS_SIZE cpu_to_le32(0xC0140011)
-#define STATUS_ACPI_ACQUIRE_GLOBAL_LOCK cpu_to_le32(0xC0140012)
-#define STATUS_ACPI_ALREADY_INITIALIZED cpu_to_le32(0xC0140013)
-#define STATUS_ACPI_NOT_INITIALIZED cpu_to_le32(0xC0140014)
-#define STATUS_ACPI_INVALID_MUTEX_LEVEL cpu_to_le32(0xC0140015)
-#define STATUS_ACPI_MUTEX_NOT_OWNED cpu_to_le32(0xC0140016)
-#define STATUS_ACPI_MUTEX_NOT_OWNER cpu_to_le32(0xC0140017)
-#define STATUS_ACPI_RS_ACCESS cpu_to_le32(0xC0140018)
-#define STATUS_ACPI_INVALID_TABLE cpu_to_le32(0xC0140019)
-#define STATUS_ACPI_REG_HANDLER_FAILED cpu_to_le32(0xC0140020)
-#define STATUS_ACPI_POWER_REQUEST_FAILED cpu_to_le32(0xC0140021)
-#define STATUS_SXS_SECTION_NOT_FOUND cpu_to_le32(0xC0150001)
-#define STATUS_SXS_CANT_GEN_ACTCTX cpu_to_le32(0xC0150002)
-#define STATUS_SXS_INVALID_ACTCTXDATA_FORMAT cpu_to_le32(0xC0150003)
-#define STATUS_SXS_ASSEMBLY_NOT_FOUND cpu_to_le32(0xC0150004)
-#define STATUS_SXS_MANIFEST_FORMAT_ERROR cpu_to_le32(0xC0150005)
-#define STATUS_SXS_MANIFEST_PARSE_ERROR cpu_to_le32(0xC0150006)
-#define STATUS_SXS_ACTIVATION_CONTEXT_DISABLED cpu_to_le32(0xC0150007)
-#define STATUS_SXS_KEY_NOT_FOUND cpu_to_le32(0xC0150008)
-#define STATUS_SXS_VERSION_CONFLICT cpu_to_le32(0xC0150009)
-#define STATUS_SXS_WRONG_SECTION_TYPE cpu_to_le32(0xC015000A)
-#define STATUS_SXS_THREAD_QUERIES_DISABLED cpu_to_le32(0xC015000B)
-#define STATUS_SXS_ASSEMBLY_MISSING cpu_to_le32(0xC015000C)
-#define STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET cpu_to_le32(0xC015000E)
-#define STATUS_SXS_EARLY_DEACTIVATION cpu_to_le32(0xC015000F)
-#define STATUS_SXS_INVALID_DEACTIVATION cpu_to_le32(0xC0150010)
-#define STATUS_SXS_MULTIPLE_DEACTIVATION cpu_to_le32(0xC0150011)
-#define STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY \
- cpu_to_le32(0xC0150012)
-#define STATUS_SXS_PROCESS_TERMINATION_REQUESTED cpu_to_le32(0xC0150013)
-#define STATUS_SXS_CORRUPT_ACTIVATION_STACK cpu_to_le32(0xC0150014)
-#define STATUS_SXS_CORRUPTION cpu_to_le32(0xC0150015)
-#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE cpu_to_le32(0xC0150016)
-#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME cpu_to_le32(0xC0150017)
-#define STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE cpu_to_le32(0xC0150018)
-#define STATUS_SXS_IDENTITY_PARSE_ERROR cpu_to_le32(0xC0150019)
-#define STATUS_SXS_COMPONENT_STORE_CORRUPT cpu_to_le32(0xC015001A)
-#define STATUS_SXS_FILE_HASH_MISMATCH cpu_to_le32(0xC015001B)
-#define STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT \
- cpu_to_le32(0xC015001C)
-#define STATUS_SXS_IDENTITIES_DIFFERENT cpu_to_le32(0xC015001D)
-#define STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT cpu_to_le32(0xC015001E)
-#define STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY cpu_to_le32(0xC015001F)
-#define STATUS_ADVANCED_INSTALLER_FAILED cpu_to_le32(0xC0150020)
-#define STATUS_XML_ENCODING_MISMATCH cpu_to_le32(0xC0150021)
-#define STATUS_SXS_MANIFEST_TOO_BIG cpu_to_le32(0xC0150022)
-#define STATUS_SXS_SETTING_NOT_REGISTERED cpu_to_le32(0xC0150023)
-#define STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE cpu_to_le32(0xC0150024)
-#define STATUS_SMI_PRIMITIVE_INSTALLER_FAILED cpu_to_le32(0xC0150025)
-#define STATUS_GENERIC_COMMAND_FAILED cpu_to_le32(0xC0150026)
-#define STATUS_SXS_FILE_HASH_MISSING cpu_to_le32(0xC0150027)
-#define STATUS_TRANSACTIONAL_CONFLICT cpu_to_le32(0xC0190001)
-#define STATUS_INVALID_TRANSACTION cpu_to_le32(0xC0190002)
-#define STATUS_TRANSACTION_NOT_ACTIVE cpu_to_le32(0xC0190003)
-#define STATUS_TM_INITIALIZATION_FAILED cpu_to_le32(0xC0190004)
-#define STATUS_RM_NOT_ACTIVE cpu_to_le32(0xC0190005)
-#define STATUS_RM_METADATA_CORRUPT cpu_to_le32(0xC0190006)
-#define STATUS_TRANSACTION_NOT_JOINED cpu_to_le32(0xC0190007)
-#define STATUS_DIRECTORY_NOT_RM cpu_to_le32(0xC0190008)
-#define STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE cpu_to_le32(0xC019000A)
-#define STATUS_LOG_RESIZE_INVALID_SIZE cpu_to_le32(0xC019000B)
-#define STATUS_REMOTE_FILE_VERSION_MISMATCH cpu_to_le32(0xC019000C)
-#define STATUS_CRM_PROTOCOL_ALREADY_EXISTS cpu_to_le32(0xC019000F)
-#define STATUS_TRANSACTION_PROPAGATION_FAILED cpu_to_le32(0xC0190010)
-#define STATUS_CRM_PROTOCOL_NOT_FOUND cpu_to_le32(0xC0190011)
-#define STATUS_TRANSACTION_SUPERIOR_EXISTS cpu_to_le32(0xC0190012)
-#define STATUS_TRANSACTION_REQUEST_NOT_VALID cpu_to_le32(0xC0190013)
-#define STATUS_TRANSACTION_NOT_REQUESTED cpu_to_le32(0xC0190014)
-#define STATUS_TRANSACTION_ALREADY_ABORTED cpu_to_le32(0xC0190015)
-#define STATUS_TRANSACTION_ALREADY_COMMITTED cpu_to_le32(0xC0190016)
-#define STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER cpu_to_le32(0xC0190017)
-#define STATUS_CURRENT_TRANSACTION_NOT_VALID cpu_to_le32(0xC0190018)
-#define STATUS_LOG_GROWTH_FAILED cpu_to_le32(0xC0190019)
-#define STATUS_OBJECT_NO_LONGER_EXISTS cpu_to_le32(0xC0190021)
-#define STATUS_STREAM_MINIVERSION_NOT_FOUND cpu_to_le32(0xC0190022)
-#define STATUS_STREAM_MINIVERSION_NOT_VALID cpu_to_le32(0xC0190023)
-#define STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION \
- cpu_to_le32(0xC0190024)
-#define STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT cpu_to_le32(0xC0190025)
-#define STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS cpu_to_le32(0xC0190026)
-#define STATUS_HANDLE_NO_LONGER_VALID cpu_to_le32(0xC0190028)
-#define STATUS_LOG_CORRUPTION_DETECTED cpu_to_le32(0xC0190030)
-#define STATUS_RM_DISCONNECTED cpu_to_le32(0xC0190032)
-#define STATUS_ENLISTMENT_NOT_SUPERIOR cpu_to_le32(0xC0190033)
-#define STATUS_FILE_IDENTITY_NOT_PERSISTENT cpu_to_le32(0xC0190036)
-#define STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY cpu_to_le32(0xC0190037)
-#define STATUS_CANT_CROSS_RM_BOUNDARY cpu_to_le32(0xC0190038)
-#define STATUS_TXF_DIR_NOT_EMPTY cpu_to_le32(0xC0190039)
-#define STATUS_INDOUBT_TRANSACTIONS_EXIST cpu_to_le32(0xC019003A)
-#define STATUS_TM_VOLATILE cpu_to_le32(0xC019003B)
-#define STATUS_ROLLBACK_TIMER_EXPIRED cpu_to_le32(0xC019003C)
-#define STATUS_TXF_ATTRIBUTE_CORRUPT cpu_to_le32(0xC019003D)
-#define STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC019003E)
-#define STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED cpu_to_le32(0xC019003F)
-#define STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE cpu_to_le32(0xC0190040)
-#define STATUS_TRANSACTION_REQUIRED_PROMOTION cpu_to_le32(0xC0190043)
-#define STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION cpu_to_le32(0xC0190044)
-#define STATUS_TRANSACTIONS_NOT_FROZEN cpu_to_le32(0xC0190045)
-#define STATUS_TRANSACTION_FREEZE_IN_PROGRESS cpu_to_le32(0xC0190046)
-#define STATUS_NOT_SNAPSHOT_VOLUME cpu_to_le32(0xC0190047)
-#define STATUS_NO_SAVEPOINT_WITH_OPEN_FILES cpu_to_le32(0xC0190048)
-#define STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190049)
-#define STATUS_TM_IDENTITY_MISMATCH cpu_to_le32(0xC019004A)
-#define STATUS_FLOATED_SECTION cpu_to_le32(0xC019004B)
-#define STATUS_CANNOT_ACCEPT_TRANSACTED_WORK cpu_to_le32(0xC019004C)
-#define STATUS_CANNOT_ABORT_TRANSACTIONS cpu_to_le32(0xC019004D)
-#define STATUS_TRANSACTION_NOT_FOUND cpu_to_le32(0xC019004E)
-#define STATUS_RESOURCEMANAGER_NOT_FOUND cpu_to_le32(0xC019004F)
-#define STATUS_ENLISTMENT_NOT_FOUND cpu_to_le32(0xC0190050)
-#define STATUS_TRANSACTIONMANAGER_NOT_FOUND cpu_to_le32(0xC0190051)
-#define STATUS_TRANSACTIONMANAGER_NOT_ONLINE cpu_to_le32(0xC0190052)
-#define STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION \
- cpu_to_le32(0xC0190053)
-#define STATUS_TRANSACTION_NOT_ROOT cpu_to_le32(0xC0190054)
-#define STATUS_TRANSACTION_OBJECT_EXPIRED cpu_to_le32(0xC0190055)
-#define STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190056)
-#define STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED cpu_to_le32(0xC0190057)
-#define STATUS_TRANSACTION_RECORD_TOO_LONG cpu_to_le32(0xC0190058)
-#define STATUS_NO_LINK_TRACKING_IN_TRANSACTION cpu_to_le32(0xC0190059)
-#define STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION cpu_to_le32(0xC019005A)
-#define STATUS_TRANSACTION_INTEGRITY_VIOLATED cpu_to_le32(0xC019005B)
-#define STATUS_LOG_SECTOR_INVALID cpu_to_le32(0xC01A0001)
-#define STATUS_LOG_SECTOR_PARITY_INVALID cpu_to_le32(0xC01A0002)
-#define STATUS_LOG_SECTOR_REMAPPED cpu_to_le32(0xC01A0003)
-#define STATUS_LOG_BLOCK_INCOMPLETE cpu_to_le32(0xC01A0004)
-#define STATUS_LOG_INVALID_RANGE cpu_to_le32(0xC01A0005)
-#define STATUS_LOG_BLOCKS_EXHAUSTED cpu_to_le32(0xC01A0006)
-#define STATUS_LOG_READ_CONTEXT_INVALID cpu_to_le32(0xC01A0007)
-#define STATUS_LOG_RESTART_INVALID cpu_to_le32(0xC01A0008)
-#define STATUS_LOG_BLOCK_VERSION cpu_to_le32(0xC01A0009)
-#define STATUS_LOG_BLOCK_INVALID cpu_to_le32(0xC01A000A)
-#define STATUS_LOG_READ_MODE_INVALID cpu_to_le32(0xC01A000B)
-#define STATUS_LOG_METADATA_CORRUPT cpu_to_le32(0xC01A000D)
-#define STATUS_LOG_METADATA_INVALID cpu_to_le32(0xC01A000E)
-#define STATUS_LOG_METADATA_INCONSISTENT cpu_to_le32(0xC01A000F)
-#define STATUS_LOG_RESERVATION_INVALID cpu_to_le32(0xC01A0010)
-#define STATUS_LOG_CANT_DELETE cpu_to_le32(0xC01A0011)
-#define STATUS_LOG_CONTAINER_LIMIT_EXCEEDED cpu_to_le32(0xC01A0012)
-#define STATUS_LOG_START_OF_LOG cpu_to_le32(0xC01A0013)
-#define STATUS_LOG_POLICY_ALREADY_INSTALLED cpu_to_le32(0xC01A0014)
-#define STATUS_LOG_POLICY_NOT_INSTALLED cpu_to_le32(0xC01A0015)
-#define STATUS_LOG_POLICY_INVALID cpu_to_le32(0xC01A0016)
-#define STATUS_LOG_POLICY_CONFLICT cpu_to_le32(0xC01A0017)
-#define STATUS_LOG_PINNED_ARCHIVE_TAIL cpu_to_le32(0xC01A0018)
-#define STATUS_LOG_RECORD_NONEXISTENT cpu_to_le32(0xC01A0019)
-#define STATUS_LOG_RECORDS_RESERVED_INVALID cpu_to_le32(0xC01A001A)
-#define STATUS_LOG_SPACE_RESERVED_INVALID cpu_to_le32(0xC01A001B)
-#define STATUS_LOG_TAIL_INVALID cpu_to_le32(0xC01A001C)
-#define STATUS_LOG_FULL cpu_to_le32(0xC01A001D)
-#define STATUS_LOG_MULTIPLEXED cpu_to_le32(0xC01A001E)
-#define STATUS_LOG_DEDICATED cpu_to_le32(0xC01A001F)
-#define STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS cpu_to_le32(0xC01A0020)
-#define STATUS_LOG_ARCHIVE_IN_PROGRESS cpu_to_le32(0xC01A0021)
-#define STATUS_LOG_EPHEMERAL cpu_to_le32(0xC01A0022)
-#define STATUS_LOG_NOT_ENOUGH_CONTAINERS cpu_to_le32(0xC01A0023)
-#define STATUS_LOG_CLIENT_ALREADY_REGISTERED cpu_to_le32(0xC01A0024)
-#define STATUS_LOG_CLIENT_NOT_REGISTERED cpu_to_le32(0xC01A0025)
-#define STATUS_LOG_FULL_HANDLER_IN_PROGRESS cpu_to_le32(0xC01A0026)
-#define STATUS_LOG_CONTAINER_READ_FAILED cpu_to_le32(0xC01A0027)
-#define STATUS_LOG_CONTAINER_WRITE_FAILED cpu_to_le32(0xC01A0028)
-#define STATUS_LOG_CONTAINER_OPEN_FAILED cpu_to_le32(0xC01A0029)
-#define STATUS_LOG_CONTAINER_STATE_INVALID cpu_to_le32(0xC01A002A)
-#define STATUS_LOG_STATE_INVALID cpu_to_le32(0xC01A002B)
-#define STATUS_LOG_PINNED cpu_to_le32(0xC01A002C)
-#define STATUS_LOG_METADATA_FLUSH_FAILED cpu_to_le32(0xC01A002D)
-#define STATUS_LOG_INCONSISTENT_SECURITY cpu_to_le32(0xC01A002E)
-#define STATUS_LOG_APPENDED_FLUSH_FAILED cpu_to_le32(0xC01A002F)
-#define STATUS_LOG_PINNED_RESERVATION cpu_to_le32(0xC01A0030)
-#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC01B00EA)
-#define STATUS_FLT_NO_HANDLER_DEFINED cpu_to_le32(0xC01C0001)
-#define STATUS_FLT_CONTEXT_ALREADY_DEFINED cpu_to_le32(0xC01C0002)
-#define STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST cpu_to_le32(0xC01C0003)
-#define STATUS_FLT_DISALLOW_FAST_IO cpu_to_le32(0xC01C0004)
-#define STATUS_FLT_INVALID_NAME_REQUEST cpu_to_le32(0xC01C0005)
-#define STATUS_FLT_NOT_SAFE_TO_POST_OPERATION cpu_to_le32(0xC01C0006)
-#define STATUS_FLT_NOT_INITIALIZED cpu_to_le32(0xC01C0007)
-#define STATUS_FLT_FILTER_NOT_READY cpu_to_le32(0xC01C0008)
-#define STATUS_FLT_POST_OPERATION_CLEANUP cpu_to_le32(0xC01C0009)
-#define STATUS_FLT_INTERNAL_ERROR cpu_to_le32(0xC01C000A)
-#define STATUS_FLT_DELETING_OBJECT cpu_to_le32(0xC01C000B)
-#define STATUS_FLT_MUST_BE_NONPAGED_POOL cpu_to_le32(0xC01C000C)
-#define STATUS_FLT_DUPLICATE_ENTRY cpu_to_le32(0xC01C000D)
-#define STATUS_FLT_CBDQ_DISABLED cpu_to_le32(0xC01C000E)
-#define STATUS_FLT_DO_NOT_ATTACH cpu_to_le32(0xC01C000F)
-#define STATUS_FLT_DO_NOT_DETACH cpu_to_le32(0xC01C0010)
-#define STATUS_FLT_INSTANCE_ALTITUDE_COLLISION cpu_to_le32(0xC01C0011)
-#define STATUS_FLT_INSTANCE_NAME_COLLISION cpu_to_le32(0xC01C0012)
-#define STATUS_FLT_FILTER_NOT_FOUND cpu_to_le32(0xC01C0013)
-#define STATUS_FLT_VOLUME_NOT_FOUND cpu_to_le32(0xC01C0014)
-#define STATUS_FLT_INSTANCE_NOT_FOUND cpu_to_le32(0xC01C0015)
-#define STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND cpu_to_le32(0xC01C0016)
-#define STATUS_FLT_INVALID_CONTEXT_REGISTRATION cpu_to_le32(0xC01C0017)
-#define STATUS_FLT_NAME_CACHE_MISS cpu_to_le32(0xC01C0018)
-#define STATUS_FLT_NO_DEVICE_OBJECT cpu_to_le32(0xC01C0019)
-#define STATUS_FLT_VOLUME_ALREADY_MOUNTED cpu_to_le32(0xC01C001A)
-#define STATUS_FLT_ALREADY_ENLISTED cpu_to_le32(0xC01C001B)
-#define STATUS_FLT_CONTEXT_ALREADY_LINKED cpu_to_le32(0xC01C001C)
-#define STATUS_FLT_NO_WAITER_FOR_REPLY cpu_to_le32(0xC01C0020)
-#define STATUS_MONITOR_NO_DESCRIPTOR cpu_to_le32(0xC01D0001)
-#define STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT cpu_to_le32(0xC01D0002)
-#define STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM cpu_to_le32(0xC01D0003)
-#define STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK cpu_to_le32(0xC01D0004)
-#define STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED cpu_to_le32(0xC01D0005)
-#define STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK \
- cpu_to_le32(0xC01D0006)
-#define STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK \
- cpu_to_le32(0xC01D0007)
-#define STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA cpu_to_le32(0xC01D0008)
-#define STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK cpu_to_le32(0xC01D0009)
-#define STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER cpu_to_le32(0xC01E0000)
-#define STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER cpu_to_le32(0xC01E0001)
-#define STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER cpu_to_le32(0xC01E0002)
-#define STATUS_GRAPHICS_ADAPTER_WAS_RESET cpu_to_le32(0xC01E0003)
-#define STATUS_GRAPHICS_INVALID_DRIVER_MODEL cpu_to_le32(0xC01E0004)
-#define STATUS_GRAPHICS_PRESENT_MODE_CHANGED cpu_to_le32(0xC01E0005)
-#define STATUS_GRAPHICS_PRESENT_OCCLUDED cpu_to_le32(0xC01E0006)
-#define STATUS_GRAPHICS_PRESENT_DENIED cpu_to_le32(0xC01E0007)
-#define STATUS_GRAPHICS_CANNOTCOLORCONVERT cpu_to_le32(0xC01E0008)
-#define STATUS_GRAPHICS_NO_VIDEO_MEMORY cpu_to_le32(0xC01E0100)
-#define STATUS_GRAPHICS_CANT_LOCK_MEMORY cpu_to_le32(0xC01E0101)
-#define STATUS_GRAPHICS_ALLOCATION_BUSY cpu_to_le32(0xC01E0102)
-#define STATUS_GRAPHICS_TOO_MANY_REFERENCES cpu_to_le32(0xC01E0103)
-#define STATUS_GRAPHICS_TRY_AGAIN_LATER cpu_to_le32(0xC01E0104)
-#define STATUS_GRAPHICS_TRY_AGAIN_NOW cpu_to_le32(0xC01E0105)
-#define STATUS_GRAPHICS_ALLOCATION_INVALID cpu_to_le32(0xC01E0106)
-#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE cpu_to_le32(0xC01E0107)
-#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED cpu_to_le32(0xC01E0108)
-#define STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION cpu_to_le32(0xC01E0109)
-#define STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE cpu_to_le32(0xC01E0110)
-#define STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION cpu_to_le32(0xC01E0111)
-#define STATUS_GRAPHICS_ALLOCATION_CLOSED cpu_to_le32(0xC01E0112)
-#define STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE cpu_to_le32(0xC01E0113)
-#define STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE cpu_to_le32(0xC01E0114)
-#define STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE cpu_to_le32(0xC01E0115)
-#define STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST cpu_to_le32(0xC01E0116)
-#define STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE cpu_to_le32(0xC01E0200)
-#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0300)
-#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED cpu_to_le32(0xC01E0301)
-#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED \
- cpu_to_le32(0xC01E0302)
-#define STATUS_GRAPHICS_INVALID_VIDPN cpu_to_le32(0xC01E0303)
-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE cpu_to_le32(0xC01E0304)
-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET cpu_to_le32(0xC01E0305)
-#define STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED cpu_to_le32(0xC01E0306)
-#define STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET cpu_to_le32(0xC01E0308)
-#define STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET cpu_to_le32(0xC01E0309)
-#define STATUS_GRAPHICS_INVALID_FREQUENCY cpu_to_le32(0xC01E030A)
-#define STATUS_GRAPHICS_INVALID_ACTIVE_REGION cpu_to_le32(0xC01E030B)
-#define STATUS_GRAPHICS_INVALID_TOTAL_REGION cpu_to_le32(0xC01E030C)
-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE \
- cpu_to_le32(0xC01E0310)
-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE \
- cpu_to_le32(0xC01E0311)
-#define STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET cpu_to_le32(0xC01E0312)
-#define STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY cpu_to_le32(0xC01E0313)
-#define STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET cpu_to_le32(0xC01E0314)
-#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET cpu_to_le32(0xC01E0315)
-#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET cpu_to_le32(0xC01E0316)
-#define STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET cpu_to_le32(0xC01E0317)
-#define STATUS_GRAPHICS_TARGET_ALREADY_IN_SET cpu_to_le32(0xC01E0318)
-#define STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH cpu_to_le32(0xC01E0319)
-#define STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY cpu_to_le32(0xC01E031A)
-#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET \
- cpu_to_le32(0xC01E031B)
-#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE cpu_to_le32(0xC01E031C)
-#define STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET cpu_to_le32(0xC01E031D)
-#define STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET cpu_to_le32(0xC01E031F)
-#define STATUS_GRAPHICS_STALE_MODESET cpu_to_le32(0xC01E0320)
-#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET cpu_to_le32(0xC01E0321)
-#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE cpu_to_le32(0xC01E0322)
-#define STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN cpu_to_le32(0xC01E0323)
-#define STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0324)
-#define STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION \
- cpu_to_le32(0xC01E0325)
-#define STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES \
- cpu_to_le32(0xC01E0326)
-#define STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0327)
-#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE \
- cpu_to_le32(0xC01E0328)
-#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET \
- cpu_to_le32(0xC01E0329)
-#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET cpu_to_le32(0xC01E032A)
-#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR cpu_to_le32(0xC01E032B)
-#define STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET cpu_to_le32(0xC01E032C)
-#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET cpu_to_le32(0xC01E032D)
-#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE \
- cpu_to_le32(0xC01E032E)
-#define STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE cpu_to_le32(0xC01E032F)
-#define STATUS_GRAPHICS_RESOURCES_NOT_RELATED cpu_to_le32(0xC01E0330)
-#define STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0331)
-#define STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0332)
-#define STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET cpu_to_le32(0xC01E0333)
-#define STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER \
- cpu_to_le32(0xC01E0334)
-#define STATUS_GRAPHICS_NO_VIDPNMGR cpu_to_le32(0xC01E0335)
-#define STATUS_GRAPHICS_NO_ACTIVE_VIDPN cpu_to_le32(0xC01E0336)
-#define STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0337)
-#define STATUS_GRAPHICS_MONITOR_NOT_CONNECTED cpu_to_le32(0xC01E0338)
-#define STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0339)
-#define STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE cpu_to_le32(0xC01E033A)
-#define STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE cpu_to_le32(0xC01E033B)
-#define STATUS_GRAPHICS_INVALID_STRIDE cpu_to_le32(0xC01E033C)
-#define STATUS_GRAPHICS_INVALID_PIXELFORMAT cpu_to_le32(0xC01E033D)
-#define STATUS_GRAPHICS_INVALID_COLORBASIS cpu_to_le32(0xC01E033E)
-#define STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE cpu_to_le32(0xC01E033F)
-#define STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0340)
-#define STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT \
- cpu_to_le32(0xC01E0341)
-#define STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE cpu_to_le32(0xC01E0342)
-#define STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN cpu_to_le32(0xC01E0343)
-#define STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL cpu_to_le32(0xC01E0344)
-#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION \
- cpu_to_le32(0xC01E0345)
-#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED \
- cpu_to_le32(0xC01E0346)
-#define STATUS_GRAPHICS_INVALID_GAMMA_RAMP cpu_to_le32(0xC01E0347)
-#define STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED cpu_to_le32(0xC01E0348)
-#define STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED cpu_to_le32(0xC01E0349)
-#define STATUS_GRAPHICS_MODE_NOT_IN_MODESET cpu_to_le32(0xC01E034A)
-#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON \
- cpu_to_le32(0xC01E034D)
-#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE cpu_to_le32(0xC01E034E)
-#define STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE cpu_to_le32(0xC01E034F)
-#define STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS \
- cpu_to_le32(0xC01E0350)
-#define STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING cpu_to_le32(0xC01E0352)
-#define STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED cpu_to_le32(0xC01E0353)
-#define STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS cpu_to_le32(0xC01E0354)
-#define STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT cpu_to_le32(0xC01E0355)
-#define STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM cpu_to_le32(0xC01E0356)
-#define STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN \
- cpu_to_le32(0xC01E0357)
-#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT \
- cpu_to_le32(0xC01E0358)
-#define STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED cpu_to_le32(0xC01E0359)
-#define STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION \
- cpu_to_le32(0xC01E035A)
-#define STATUS_GRAPHICS_INVALID_CLIENT_TYPE cpu_to_le32(0xC01E035B)
-#define STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET cpu_to_le32(0xC01E035C)
-#define STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED \
- cpu_to_le32(0xC01E0400)
-#define STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED cpu_to_le32(0xC01E0401)
-#define STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER cpu_to_le32(0xC01E0430)
-#define STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED cpu_to_le32(0xC01E0431)
-#define STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED cpu_to_le32(0xC01E0432)
-#define STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY cpu_to_le32(0xC01E0433)
-#define STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED cpu_to_le32(0xC01E0434)
-#define STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON cpu_to_le32(0xC01E0435)
-#define STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE cpu_to_le32(0xC01E0436)
-#define STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER cpu_to_le32(0xC01E0438)
-#define STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED cpu_to_le32(0xC01E043B)
-#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS \
- cpu_to_le32(0xC01E051C)
-#define STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST cpu_to_le32(0xC01E051D)
-#define STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC01E051E)
-#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS \
- cpu_to_le32(0xC01E051F)
-#define STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED cpu_to_le32(0xC01E0520)
-#define STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST \
- cpu_to_le32(0xC01E0521)
-#define STATUS_GRAPHICS_OPM_NOT_SUPPORTED cpu_to_le32(0xC01E0500)
-#define STATUS_GRAPHICS_COPP_NOT_SUPPORTED cpu_to_le32(0xC01E0501)
-#define STATUS_GRAPHICS_UAB_NOT_SUPPORTED cpu_to_le32(0xC01E0502)
-#define STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS cpu_to_le32(0xC01E0503)
-#define STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E0504)
-#define STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST cpu_to_le32(0xC01E0505)
-#define STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME \
- cpu_to_le32(0xC01E0506)
-#define STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP \
- cpu_to_le32(0xC01E0507)
-#define STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED \
- cpu_to_le32(0xC01E0508)
-#define STATUS_GRAPHICS_OPM_INVALID_POINTER cpu_to_le32(0xC01E050A)
-#define STATUS_GRAPHICS_OPM_INTERNAL_ERROR cpu_to_le32(0xC01E050B)
-#define STATUS_GRAPHICS_OPM_INVALID_HANDLE cpu_to_le32(0xC01E050C)
-#define STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE \
- cpu_to_le32(0xC01E050D)
-#define STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH cpu_to_le32(0xC01E050E)
-#define STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED cpu_to_le32(0xC01E050F)
-#define STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED cpu_to_le32(0xC01E0510)
-#define STATUS_GRAPHICS_PVP_HFS_FAILED cpu_to_le32(0xC01E0511)
-#define STATUS_GRAPHICS_OPM_INVALID_SRM cpu_to_le32(0xC01E0512)
-#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP cpu_to_le32(0xC01E0513)
-#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP cpu_to_le32(0xC01E0514)
-#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA \
- cpu_to_le32(0xC01E0515)
-#define STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET cpu_to_le32(0xC01E0516)
-#define STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH cpu_to_le32(0xC01E0517)
-#define STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE \
- cpu_to_le32(0xC01E0518)
-#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS \
- cpu_to_le32(0xC01E051A)
-#define STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS \
- cpu_to_le32(0xC01E051B)
-#define STATUS_GRAPHICS_I2C_NOT_SUPPORTED cpu_to_le32(0xC01E0580)
-#define STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC01E0581)
-#define STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA cpu_to_le32(0xC01E0582)
-#define STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA cpu_to_le32(0xC01E0583)
-#define STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED cpu_to_le32(0xC01E0584)
-#define STATUS_GRAPHICS_DDCCI_INVALID_DATA cpu_to_le32(0xC01E0585)
-#define STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE \
- cpu_to_le32(0xC01E0586)
-#define STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING \
- cpu_to_le32(0xC01E0587)
-#define STATUS_GRAPHICS_MCA_INTERNAL_ERROR cpu_to_le32(0xC01E0588)
-#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND cpu_to_le32(0xC01E0589)
-#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH cpu_to_le32(0xC01E058A)
-#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM cpu_to_le32(0xC01E058B)
-#define STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE cpu_to_le32(0xC01E058C)
-#define STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS cpu_to_le32(0xC01E058D)
-#define STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED cpu_to_le32(0xC01E05E0)
-#define STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME \
- cpu_to_le32(0xC01E05E1)
-#define STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP \
- cpu_to_le32(0xC01E05E2)
-#define STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED cpu_to_le32(0xC01E05E3)
-#define STATUS_GRAPHICS_INVALID_POINTER cpu_to_le32(0xC01E05E4)
-#define STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE \
- cpu_to_le32(0xC01E05E5)
-#define STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E05E6)
-#define STATUS_GRAPHICS_INTERNAL_ERROR cpu_to_le32(0xC01E05E7)
-#define STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS cpu_to_le32(0xC01E05E8)
-#define STATUS_FVE_LOCKED_VOLUME cpu_to_le32(0xC0210000)
-#define STATUS_FVE_NOT_ENCRYPTED cpu_to_le32(0xC0210001)
-#define STATUS_FVE_BAD_INFORMATION cpu_to_le32(0xC0210002)
-#define STATUS_FVE_TOO_SMALL cpu_to_le32(0xC0210003)
-#define STATUS_FVE_FAILED_WRONG_FS cpu_to_le32(0xC0210004)
-#define STATUS_FVE_FAILED_BAD_FS cpu_to_le32(0xC0210005)
-#define STATUS_FVE_FS_NOT_EXTENDED cpu_to_le32(0xC0210006)
-#define STATUS_FVE_FS_MOUNTED cpu_to_le32(0xC0210007)
-#define STATUS_FVE_NO_LICENSE cpu_to_le32(0xC0210008)
-#define STATUS_FVE_ACTION_NOT_ALLOWED cpu_to_le32(0xC0210009)
-#define STATUS_FVE_BAD_DATA cpu_to_le32(0xC021000A)
-#define STATUS_FVE_VOLUME_NOT_BOUND cpu_to_le32(0xC021000B)
-#define STATUS_FVE_NOT_DATA_VOLUME cpu_to_le32(0xC021000C)
-#define STATUS_FVE_CONV_READ_ERROR cpu_to_le32(0xC021000D)
-#define STATUS_FVE_CONV_WRITE_ERROR cpu_to_le32(0xC021000E)
-#define STATUS_FVE_OVERLAPPED_UPDATE cpu_to_le32(0xC021000F)
-#define STATUS_FVE_FAILED_SECTOR_SIZE cpu_to_le32(0xC0210010)
-#define STATUS_FVE_FAILED_AUTHENTICATION cpu_to_le32(0xC0210011)
-#define STATUS_FVE_NOT_OS_VOLUME cpu_to_le32(0xC0210012)
-#define STATUS_FVE_KEYFILE_NOT_FOUND cpu_to_le32(0xC0210013)
-#define STATUS_FVE_KEYFILE_INVALID cpu_to_le32(0xC0210014)
-#define STATUS_FVE_KEYFILE_NO_VMK cpu_to_le32(0xC0210015)
-#define STATUS_FVE_TPM_DISABLED cpu_to_le32(0xC0210016)
-#define STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO cpu_to_le32(0xC0210017)
-#define STATUS_FVE_TPM_INVALID_PCR cpu_to_le32(0xC0210018)
-#define STATUS_FVE_TPM_NO_VMK cpu_to_le32(0xC0210019)
-#define STATUS_FVE_PIN_INVALID cpu_to_le32(0xC021001A)
-#define STATUS_FVE_AUTH_INVALID_APPLICATION cpu_to_le32(0xC021001B)
-#define STATUS_FVE_AUTH_INVALID_CONFIG cpu_to_le32(0xC021001C)
-#define STATUS_FVE_DEBUGGER_ENABLED cpu_to_le32(0xC021001D)
-#define STATUS_FVE_DRY_RUN_FAILED cpu_to_le32(0xC021001E)
-#define STATUS_FVE_BAD_METADATA_POINTER cpu_to_le32(0xC021001F)
-#define STATUS_FVE_OLD_METADATA_COPY cpu_to_le32(0xC0210020)
-#define STATUS_FVE_REBOOT_REQUIRED cpu_to_le32(0xC0210021)
-#define STATUS_FVE_RAW_ACCESS cpu_to_le32(0xC0210022)
-#define STATUS_FVE_RAW_BLOCKED cpu_to_le32(0xC0210023)
-#define STATUS_FWP_CALLOUT_NOT_FOUND cpu_to_le32(0xC0220001)
-#define STATUS_FWP_CONDITION_NOT_FOUND cpu_to_le32(0xC0220002)
-#define STATUS_FWP_FILTER_NOT_FOUND cpu_to_le32(0xC0220003)
-#define STATUS_FWP_LAYER_NOT_FOUND cpu_to_le32(0xC0220004)
-#define STATUS_FWP_PROVIDER_NOT_FOUND cpu_to_le32(0xC0220005)
-#define STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND cpu_to_le32(0xC0220006)
-#define STATUS_FWP_SUBLAYER_NOT_FOUND cpu_to_le32(0xC0220007)
-#define STATUS_FWP_NOT_FOUND cpu_to_le32(0xC0220008)
-#define STATUS_FWP_ALREADY_EXISTS cpu_to_le32(0xC0220009)
-#define STATUS_FWP_IN_USE cpu_to_le32(0xC022000A)
-#define STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS cpu_to_le32(0xC022000B)
-#define STATUS_FWP_WRONG_SESSION cpu_to_le32(0xC022000C)
-#define STATUS_FWP_NO_TXN_IN_PROGRESS cpu_to_le32(0xC022000D)
-#define STATUS_FWP_TXN_IN_PROGRESS cpu_to_le32(0xC022000E)
-#define STATUS_FWP_TXN_ABORTED cpu_to_le32(0xC022000F)
-#define STATUS_FWP_SESSION_ABORTED cpu_to_le32(0xC0220010)
-#define STATUS_FWP_INCOMPATIBLE_TXN cpu_to_le32(0xC0220011)
-#define STATUS_FWP_TIMEOUT cpu_to_le32(0xC0220012)
-#define STATUS_FWP_NET_EVENTS_DISABLED cpu_to_le32(0xC0220013)
-#define STATUS_FWP_INCOMPATIBLE_LAYER cpu_to_le32(0xC0220014)
-#define STATUS_FWP_KM_CLIENTS_ONLY cpu_to_le32(0xC0220015)
-#define STATUS_FWP_LIFETIME_MISMATCH cpu_to_le32(0xC0220016)
-#define STATUS_FWP_BUILTIN_OBJECT cpu_to_le32(0xC0220017)
-#define STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS cpu_to_le32(0xC0220018)
-#define STATUS_FWP_TOO_MANY_CALLOUTS cpu_to_le32(0xC0220018)
-#define STATUS_FWP_NOTIFICATION_DROPPED cpu_to_le32(0xC0220019)
-#define STATUS_FWP_TRAFFIC_MISMATCH cpu_to_le32(0xC022001A)
-#define STATUS_FWP_INCOMPATIBLE_SA_STATE cpu_to_le32(0xC022001B)
-#define STATUS_FWP_NULL_POINTER cpu_to_le32(0xC022001C)
-#define STATUS_FWP_INVALID_ENUMERATOR cpu_to_le32(0xC022001D)
-#define STATUS_FWP_INVALID_FLAGS cpu_to_le32(0xC022001E)
-#define STATUS_FWP_INVALID_NET_MASK cpu_to_le32(0xC022001F)
-#define STATUS_FWP_INVALID_RANGE cpu_to_le32(0xC0220020)
-#define STATUS_FWP_INVALID_INTERVAL cpu_to_le32(0xC0220021)
-#define STATUS_FWP_ZERO_LENGTH_ARRAY cpu_to_le32(0xC0220022)
-#define STATUS_FWP_NULL_DISPLAY_NAME cpu_to_le32(0xC0220023)
-#define STATUS_FWP_INVALID_ACTION_TYPE cpu_to_le32(0xC0220024)
-#define STATUS_FWP_INVALID_WEIGHT cpu_to_le32(0xC0220025)
-#define STATUS_FWP_MATCH_TYPE_MISMATCH cpu_to_le32(0xC0220026)
-#define STATUS_FWP_TYPE_MISMATCH cpu_to_le32(0xC0220027)
-#define STATUS_FWP_OUT_OF_BOUNDS cpu_to_le32(0xC0220028)
-#define STATUS_FWP_RESERVED cpu_to_le32(0xC0220029)
-#define STATUS_FWP_DUPLICATE_CONDITION cpu_to_le32(0xC022002A)
-#define STATUS_FWP_DUPLICATE_KEYMOD cpu_to_le32(0xC022002B)
-#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002C)
-#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER cpu_to_le32(0xC022002D)
-#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002E)
-#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT cpu_to_le32(0xC022002F)
-#define STATUS_FWP_INCOMPATIBLE_AUTH_METHOD cpu_to_le32(0xC0220030)
-#define STATUS_FWP_INCOMPATIBLE_DH_GROUP cpu_to_le32(0xC0220031)
-#define STATUS_FWP_EM_NOT_SUPPORTED cpu_to_le32(0xC0220032)
-#define STATUS_FWP_NEVER_MATCH cpu_to_le32(0xC0220033)
-#define STATUS_FWP_PROVIDER_CONTEXT_MISMATCH cpu_to_le32(0xC0220034)
-#define STATUS_FWP_INVALID_PARAMETER cpu_to_le32(0xC0220035)
-#define STATUS_FWP_TOO_MANY_SUBLAYERS cpu_to_le32(0xC0220036)
-#define STATUS_FWP_CALLOUT_NOTIFICATION_FAILED cpu_to_le32(0xC0220037)
-#define STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG cpu_to_le32(0xC0220038)
-#define STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG cpu_to_le32(0xC0220039)
-#define STATUS_FWP_TCPIP_NOT_READY cpu_to_le32(0xC0220100)
-#define STATUS_FWP_INJECT_HANDLE_CLOSING cpu_to_le32(0xC0220101)
-#define STATUS_FWP_INJECT_HANDLE_STALE cpu_to_le32(0xC0220102)
-#define STATUS_FWP_CANNOT_PEND cpu_to_le32(0xC0220103)
-#define STATUS_NDIS_CLOSING cpu_to_le32(0xC0230002)
-#define STATUS_NDIS_BAD_VERSION cpu_to_le32(0xC0230004)
-#define STATUS_NDIS_BAD_CHARACTERISTICS cpu_to_le32(0xC0230005)
-#define STATUS_NDIS_ADAPTER_NOT_FOUND cpu_to_le32(0xC0230006)
-#define STATUS_NDIS_OPEN_FAILED cpu_to_le32(0xC0230007)
-#define STATUS_NDIS_DEVICE_FAILED cpu_to_le32(0xC0230008)
-#define STATUS_NDIS_MULTICAST_FULL cpu_to_le32(0xC0230009)
-#define STATUS_NDIS_MULTICAST_EXISTS cpu_to_le32(0xC023000A)
-#define STATUS_NDIS_MULTICAST_NOT_FOUND cpu_to_le32(0xC023000B)
-#define STATUS_NDIS_REQUEST_ABORTED cpu_to_le32(0xC023000C)
-#define STATUS_NDIS_RESET_IN_PROGRESS cpu_to_le32(0xC023000D)
-#define STATUS_NDIS_INVALID_PACKET cpu_to_le32(0xC023000F)
-#define STATUS_NDIS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0230010)
-#define STATUS_NDIS_ADAPTER_NOT_READY cpu_to_le32(0xC0230011)
-#define STATUS_NDIS_INVALID_LENGTH cpu_to_le32(0xC0230014)
-#define STATUS_NDIS_INVALID_DATA cpu_to_le32(0xC0230015)
-#define STATUS_NDIS_BUFFER_TOO_SHORT cpu_to_le32(0xC0230016)
-#define STATUS_NDIS_INVALID_OID cpu_to_le32(0xC0230017)
-#define STATUS_NDIS_ADAPTER_REMOVED cpu_to_le32(0xC0230018)
-#define STATUS_NDIS_UNSUPPORTED_MEDIA cpu_to_le32(0xC0230019)
-#define STATUS_NDIS_GROUP_ADDRESS_IN_USE cpu_to_le32(0xC023001A)
-#define STATUS_NDIS_FILE_NOT_FOUND cpu_to_le32(0xC023001B)
-#define STATUS_NDIS_ERROR_READING_FILE cpu_to_le32(0xC023001C)
-#define STATUS_NDIS_ALREADY_MAPPED cpu_to_le32(0xC023001D)
-#define STATUS_NDIS_RESOURCE_CONFLICT cpu_to_le32(0xC023001E)
-#define STATUS_NDIS_MEDIA_DISCONNECTED cpu_to_le32(0xC023001F)
-#define STATUS_NDIS_INVALID_ADDRESS cpu_to_le32(0xC0230022)
-#define STATUS_NDIS_PAUSED cpu_to_le32(0xC023002A)
-#define STATUS_NDIS_INTERFACE_NOT_FOUND cpu_to_le32(0xC023002B)
-#define STATUS_NDIS_UNSUPPORTED_REVISION cpu_to_le32(0xC023002C)
-#define STATUS_NDIS_INVALID_PORT cpu_to_le32(0xC023002D)
-#define STATUS_NDIS_INVALID_PORT_STATE cpu_to_le32(0xC023002E)
-#define STATUS_NDIS_LOW_POWER_STATE cpu_to_le32(0xC023002F)
-#define STATUS_NDIS_NOT_SUPPORTED cpu_to_le32(0xC02300BB)
-#define STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED cpu_to_le32(0xC0232000)
-#define STATUS_NDIS_DOT11_MEDIA_IN_USE cpu_to_le32(0xC0232001)
-#define STATUS_NDIS_DOT11_POWER_STATE_INVALID cpu_to_le32(0xC0232002)
-#define STATUS_IPSEC_BAD_SPI cpu_to_le32(0xC0360001)
-#define STATUS_IPSEC_SA_LIFETIME_EXPIRED cpu_to_le32(0xC0360002)
-#define STATUS_IPSEC_WRONG_SA cpu_to_le32(0xC0360003)
-#define STATUS_IPSEC_REPLAY_CHECK_FAILED cpu_to_le32(0xC0360004)
-#define STATUS_IPSEC_INVALID_PACKET cpu_to_le32(0xC0360005)
-#define STATUS_IPSEC_INTEGRITY_CHECK_FAILED cpu_to_le32(0xC0360006)
-#define STATUS_IPSEC_CLEAR_TEXT_DROP cpu_to_le32(0xC0360007)
-
-#define STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP cpu_to_le32(0xC05D0000)
-#define STATUS_INVALID_LOCK_RANGE cpu_to_le32(0xC00001a1)
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index cf4418f72772..44c87e300c16 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -21,7 +21,7 @@
#include "glob.h"
#include "connection.h"
#include "smb_common.h"
-#include "smbstatus.h"
+#include "../common/smb2status.h"
#include "transport_rdma.h"
#define SMB_DIRECT_PORT_IWARP 5445
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 9e859ba010cf..7cbd580120d1 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -496,7 +496,7 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
int err = 0;
if (work->conn->connection_type) {
- if (!(fp->daccess & FILE_WRITE_DATA_LE)) {
+ if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) {
pr_err("no right to write(%pD)\n", fp->filp);
err = -EACCES;
goto out;
@@ -1115,9 +1115,10 @@ static bool __dir_empty(struct dir_context *ctx, const char *name, int namlen,
struct ksmbd_readdir_data *buf;
buf = container_of(ctx, struct ksmbd_readdir_data, ctx);
- buf->dirent_count++;
+ if (!is_dot_dotdot(name, namlen))
+ buf->dirent_count++;
- return buf->dirent_count <= 2;
+ return !buf->dirent_count;
}
/**
@@ -1137,7 +1138,7 @@ int ksmbd_vfs_empty_dir(struct ksmbd_file *fp)
readdir_data.dirent_count = 0;
err = iterate_dir(fp->filp, &readdir_data.ctx);
- if (readdir_data.dirent_count > 2)
+ if (readdir_data.dirent_count)
err = -ENOTEMPTY;
else
err = 0;
@@ -1166,7 +1167,7 @@ static bool __caseless_lookup(struct dir_context *ctx, const char *name,
if (cmp < 0)
cmp = strncasecmp((char *)buf->private, name, namlen);
if (!cmp) {
- memcpy((char *)buf->private, name, namlen);
+ memcpy((char *)buf->private, name, buf->used);
buf->dirent_count = 1;
return false;
}
@@ -1234,10 +1235,7 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
char *filepath;
size_t path_len, remain_len;
- filepath = kstrdup(name, GFP_KERNEL);
- if (!filepath)
- return -ENOMEM;
-
+ filepath = name;
path_len = strlen(filepath);
remain_len = path_len;
@@ -1280,10 +1278,9 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
err = -EINVAL;
out2:
path_put(parent_path);
-out1:
- kfree(filepath);
}
+out1:
if (!err) {
err = mnt_want_write(parent_path->mnt);
if (err) {
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
index 4d4ee696e37c..a19f4e563c7e 100644
--- a/fs/smb/server/vfs_cache.c
+++ b/fs/smb/server/vfs_cache.c
@@ -863,6 +863,8 @@ static bool session_fd_check(struct ksmbd_tree_connect *tcon,
list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
if (op->conn != conn)
continue;
+ if (op->conn && atomic_dec_and_test(&op->conn->refcnt))
+ kfree(op->conn);
op->conn = NULL;
}
up_write(&ci->m_lock);
@@ -965,6 +967,7 @@ int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp)
if (op->conn)
continue;
op->conn = fp->conn;
+ atomic_inc(&op->conn->refcnt);
}
up_write(&ci->m_lock);
diff --git a/fs/splice.c b/fs/splice.c
index 60aed8de21f8..06232d7e505f 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1566,11 +1566,11 @@ static ssize_t vmsplice_to_pipe(struct file *file, struct iov_iter *iter,
static int vmsplice_type(struct fd f, int *type)
{
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (f.file->f_mode & FMODE_WRITE) {
+ if (fd_file(f)->f_mode & FMODE_WRITE) {
*type = ITER_SOURCE;
- } else if (f.file->f_mode & FMODE_READ) {
+ } else if (fd_file(f)->f_mode & FMODE_READ) {
*type = ITER_DEST;
} else {
fdput(f);
@@ -1621,9 +1621,9 @@ SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, uiov,
if (!iov_iter_count(&iter))
error = 0;
else if (type == ITER_SOURCE)
- error = vmsplice_to_pipe(f.file, &iter, flags);
+ error = vmsplice_to_pipe(fd_file(f), &iter, flags);
else
- error = vmsplice_to_user(f.file, &iter, flags);
+ error = vmsplice_to_user(fd_file(f), &iter, flags);
kfree(iov);
out_fdput:
@@ -1646,10 +1646,10 @@ SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
error = -EBADF;
in = fdget(fd_in);
- if (in.file) {
+ if (fd_file(in)) {
out = fdget(fd_out);
- if (out.file) {
- error = __do_splice(in.file, off_in, out.file, off_out,
+ if (fd_file(out)) {
+ error = __do_splice(fd_file(in), off_in, fd_file(out), off_out,
len, flags);
fdput(out);
}
@@ -2016,10 +2016,10 @@ SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
error = -EBADF;
in = fdget(fdin);
- if (in.file) {
+ if (fd_file(in)) {
out = fdget(fdout);
- if (out.file) {
- error = do_tee(in.file, out.file, len, flags);
+ if (fd_file(out)) {
+ error = do_tee(fd_file(in), fd_file(out), len, flags);
fdput(out);
}
fdput(in);
diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c
index 8a218e7c2390..e4d7e507b268 100644
--- a/fs/squashfs/decompressor_multi_percpu.c
+++ b/fs/squashfs/decompressor_multi_percpu.c
@@ -46,7 +46,7 @@ static void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
}
kfree(comp_opts);
- return (__force void *) percpu;
+ return (void *)(__force unsigned long) percpu;
out:
for_each_possible_cpu(cpu) {
@@ -61,7 +61,7 @@ out:
static void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
{
struct squashfs_stream __percpu *percpu =
- (struct squashfs_stream __percpu *) msblk->stream;
+ (void __percpu *)(unsigned long) msblk->stream;
struct squashfs_stream *stream;
int cpu;
@@ -79,7 +79,7 @@ static int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio,
{
struct squashfs_stream *stream;
struct squashfs_stream __percpu *percpu =
- (struct squashfs_stream __percpu *) msblk->stream;
+ (void __percpu *)(unsigned long) msblk->stream;
int res;
local_lock(&percpu->lock);
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index a8c1e7f9a609..21aaa96856c1 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -494,39 +494,73 @@ out:
}
static int squashfs_readahead_fragment(struct page **page,
- unsigned int pages, unsigned int expected)
+ unsigned int pages, unsigned int expected, loff_t start)
{
struct inode *inode = page[0]->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size);
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
- unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
- int error = buffer->error;
+ int i, bytes, copied;
+ struct squashfs_page_actor *actor;
+ unsigned int offset;
+ void *addr;
+ struct page *last_page;
+
+ if (buffer->error)
+ goto out;
- if (error)
+ actor = squashfs_page_actor_init_special(msblk, page, pages,
+ expected, start);
+ if (!actor)
goto out;
- expected += squashfs_i(inode)->fragment_offset;
+ squashfs_actor_nobuff(actor);
+ addr = squashfs_first_page(actor);
+
+ for (copied = offset = 0; offset < expected; offset += PAGE_SIZE) {
+ int avail = min_t(int, expected - offset, PAGE_SIZE);
+
+ if (!IS_ERR(addr)) {
+ bytes = squashfs_copy_data(addr, buffer, offset +
+ squashfs_i(inode)->fragment_offset, avail);
+
+ if (bytes != avail)
+ goto failed;
+ }
+
+ copied += avail;
+ addr = squashfs_next_page(actor);
+ }
- for (n = 0; n < pages; n++) {
- unsigned int base = (page[n]->index & mask) << PAGE_SHIFT;
- unsigned int offset = base + squashfs_i(inode)->fragment_offset;
+ last_page = squashfs_page_actor_free(actor);
- if (expected > offset) {
- unsigned int avail = min_t(unsigned int, expected -
- offset, PAGE_SIZE);
+ if (copied == expected && !IS_ERR(last_page)) {
+ /* Last page (if present) may have trailing bytes not filled */
+ bytes = copied % PAGE_SIZE;
+ if (bytes && last_page)
+ memzero_page(last_page, bytes, PAGE_SIZE - bytes);
- squashfs_fill_page(page[n], buffer, offset, avail);
+ for (i = 0; i < pages; i++) {
+ flush_dcache_page(page[i]);
+ SetPageUptodate(page[i]);
}
+ }
- unlock_page(page[n]);
- put_page(page[n]);
+ for (i = 0; i < pages; i++) {
+ unlock_page(page[i]);
+ put_page(page[i]);
}
+ squashfs_cache_put(buffer);
+ return 0;
+
+failed:
+ squashfs_page_actor_free(actor);
+
out:
squashfs_cache_put(buffer);
- return error;
+ return 1;
}
static void squashfs_readahead(struct readahead_control *ractl)
@@ -551,7 +585,6 @@ static void squashfs_readahead(struct readahead_control *ractl)
return;
for (;;) {
- pgoff_t index;
int res, bsize;
u64 block = 0;
unsigned int expected;
@@ -570,26 +603,21 @@ static void squashfs_readahead(struct readahead_control *ractl)
if (readahead_pos(ractl) >= i_size_read(inode))
goto skip_pages;
- index = pages[0]->index >> shift;
-
- if ((pages[nr_pages - 1]->index >> shift) != index)
- goto skip_pages;
-
- if (index == file_end && squashfs_i(inode)->fragment_block !=
- SQUASHFS_INVALID_BLK) {
+ if (start >> msblk->block_log == file_end &&
+ squashfs_i(inode)->fragment_block != SQUASHFS_INVALID_BLK) {
res = squashfs_readahead_fragment(pages, nr_pages,
- expected);
+ expected, start);
if (res)
goto skip_pages;
continue;
}
- bsize = read_blocklist(inode, index, &block);
+ bsize = read_blocklist(inode, start >> msblk->block_log, &block);
if (bsize == 0)
goto skip_pages;
actor = squashfs_page_actor_init_special(msblk, pages, nr_pages,
- expected);
+ expected, start);
if (!actor)
goto skip_pages;
@@ -597,12 +625,12 @@ static void squashfs_readahead(struct readahead_control *ractl)
last_page = squashfs_page_actor_free(actor);
- if (res == expected) {
+ if (res == expected && !IS_ERR(last_page)) {
int bytes;
/* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE;
- if (index == file_end && bytes && last_page)
+ if (start >> msblk->block_log == file_end && bytes && last_page)
memzero_page(last_page, bytes,
PAGE_SIZE - bytes);
@@ -616,6 +644,8 @@ static void squashfs_readahead(struct readahead_control *ractl)
unlock_page(pages[i]);
put_page(pages[i]);
}
+
+ start += readahead_batch_length(ractl);
}
kfree(pages);
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index 2a689ce71de9..22251743fadf 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -23,15 +23,15 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
int expected)
{
+ struct folio *folio = page_folio(target_page);
struct inode *inode = target_page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-
loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
- loff_t start_index = target_page->index & ~mask;
+ loff_t start_index = folio->index & ~mask;
loff_t end_index = start_index | mask;
int i, n, pages, bytes, res = -ENOMEM;
- struct page **page;
+ struct page **page, *last_page;
struct squashfs_page_actor *actor;
void *pageaddr;
@@ -46,7 +46,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
/* Try to grab all the pages covered by the Squashfs block */
for (i = 0, n = start_index; n <= end_index; n++) {
- page[i] = (n == target_page->index) ? target_page :
+ page[i] = (n == folio->index) ? target_page :
grab_cache_page_nowait(target_page->mapping, n);
if (page[i] == NULL)
@@ -67,27 +67,28 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
* Create a "page actor" which will kmap and kunmap the
* page cache pages appropriately within the decompressor
*/
- actor = squashfs_page_actor_init_special(msblk, page, pages, expected);
+ actor = squashfs_page_actor_init_special(msblk, page, pages, expected,
+ start_index << PAGE_SHIFT);
if (actor == NULL)
goto out;
/* Decompress directly into the page cache buffers */
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
- squashfs_page_actor_free(actor);
+ last_page = squashfs_page_actor_free(actor);
if (res < 0)
goto mark_errored;
- if (res != expected) {
+ if (res != expected || IS_ERR(last_page)) {
res = -EIO;
goto mark_errored;
}
/* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE;
- if (page[pages - 1]->index == end_index && bytes) {
- pageaddr = kmap_local_page(page[pages - 1]);
+ if (end_index == file_end && last_page && bytes) {
+ pageaddr = kmap_local_page(last_page);
memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
kunmap_local(pageaddr);
}
diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c
index 81af6c4ca115..2b3e807d4dea 100644
--- a/fs/squashfs/page_actor.c
+++ b/fs/squashfs/page_actor.c
@@ -60,6 +60,11 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
}
/* Implementation of page_actor for decompressing directly into page cache. */
+static loff_t page_next_index(struct squashfs_page_actor *actor)
+{
+ return page_folio(actor->page[actor->next_page])->index;
+}
+
static void *handle_next_page(struct squashfs_page_actor *actor)
{
int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -68,7 +73,7 @@ static void *handle_next_page(struct squashfs_page_actor *actor)
return NULL;
if ((actor->next_page == actor->pages) ||
- (actor->next_index != actor->page[actor->next_page]->index)) {
+ (actor->next_index != page_next_index(actor))) {
actor->next_index++;
actor->returned_pages++;
actor->last_page = NULL;
@@ -103,7 +108,7 @@ static void direct_finish_page(struct squashfs_page_actor *actor)
}
struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk,
- struct page **page, int pages, int length)
+ struct page **page, int pages, int length, loff_t start_index)
{
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
@@ -125,7 +130,7 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_
actor->pages = pages;
actor->next_page = 0;
actor->returned_pages = 0;
- actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1);
+ actor->next_index = start_index >> PAGE_SHIFT;
actor->pageaddr = NULL;
actor->last_page = NULL;
actor->alloc_buffer = msblk->decompressor->alloc_buffer;
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
index 97d4983559b1..ffe25eb77c32 100644
--- a/fs/squashfs/page_actor.h
+++ b/fs/squashfs/page_actor.h
@@ -29,13 +29,15 @@ extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
int pages, int length);
extern struct squashfs_page_actor *squashfs_page_actor_init_special(
struct squashfs_sb_info *msblk,
- struct page **page, int pages, int length);
+ struct page **page, int pages, int length,
+ loff_t start_index);
static inline struct page *squashfs_page_actor_free(struct squashfs_page_actor *actor)
{
- struct page *last_page = actor->last_page;
+ struct page *last_page = actor->next_page == actor->pages ? actor->last_page : ERR_PTR(-EIO);
kfree(actor->tmp_buffer);
kfree(actor);
+
return last_page;
}
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
diff --git a/fs/stat.c b/fs/stat.c
index 89ce1be56310..41e598376d7e 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -224,9 +224,9 @@ int vfs_fstat(int fd, struct kstat *stat)
int error;
f = fdget_raw(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0);
+ error = vfs_getattr(&fd_file(f)->f_path, stat, STATX_BASIC_STATS, 0);
fdput(f);
return error;
}
@@ -277,9 +277,9 @@ static int vfs_statx_fd(int fd, int flags, struct kstat *stat,
u32 request_mask)
{
CLASS(fd_raw, f)(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- return vfs_statx_path(&f.file->f_path, flags, stat, request_mask);
+ return vfs_statx_path(&fd_file(f)->f_path, flags, stat, request_mask);
}
/**
diff --git a/fs/statfs.c b/fs/statfs.c
index 96d1c3edf289..9c7bb27e7932 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -116,8 +116,8 @@ int fd_statfs(int fd, struct kstatfs *st)
{
struct fd f = fdget_raw(fd);
int error = -EBADF;
- if (f.file) {
- error = vfs_statfs(&f.file->f_path, st);
+ if (fd_file(f)) {
+ error = vfs_statfs(&fd_file(f)->f_path, st);
fdput(f);
}
return error;
diff --git a/fs/super.c b/fs/super.c
index b7913b55debc..1db230432960 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -621,7 +621,7 @@ void generic_shutdown_super(struct super_block *sb)
sync_filesystem(sb);
sb->s_flags &= ~SB_ACTIVE;
- cgroup_writeback_umount();
+ cgroup_writeback_umount(sb);
/* Evict all inodes with zero refcount. */
evict_inodes(sb);
@@ -1905,7 +1905,7 @@ static void lockdep_sb_freeze_release(struct super_block *sb)
int level;
for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
- percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
+ percpu_rwsem_release(sb->s_writers.rw_sem + level, _THIS_IP_);
}
/*
diff --git a/fs/sync.c b/fs/sync.c
index dc725914e1ed..67df255eb189 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -152,15 +152,15 @@ SYSCALL_DEFINE1(syncfs, int, fd)
struct super_block *sb;
int ret, ret2;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- sb = f.file->f_path.dentry->d_sb;
+ sb = fd_file(f)->f_path.dentry->d_sb;
down_read(&sb->s_umount);
ret = sync_filesystem(sb);
up_read(&sb->s_umount);
- ret2 = errseq_check_and_advance(&sb->s_wb_err, &f.file->f_sb_err);
+ ret2 = errseq_check_and_advance(&sb->s_wb_err, &fd_file(f)->f_sb_err);
fdput(f);
return ret ? ret : ret2;
@@ -208,8 +208,8 @@ static int do_fsync(unsigned int fd, int datasync)
struct fd f = fdget(fd);
int ret = -EBADF;
- if (f.file) {
- ret = vfs_fsync(f.file, datasync);
+ if (fd_file(f)) {
+ ret = vfs_fsync(fd_file(f), datasync);
fdput(f);
}
return ret;
@@ -360,8 +360,8 @@ int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
ret = -EBADF;
f = fdget(fd);
- if (f.file)
- ret = sync_file_range(f.file, offset, nbytes, flags);
+ if (fd_file(f))
+ ret = sync_file_range(fd_file(f), offset, nbytes, flags);
fdput(f);
return ret;
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 2e126d72d619..639307e2ff8c 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -28,17 +28,17 @@ const struct file_operations sysv_dir_operations = {
.fsync = generic_file_fsync,
};
-static void dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
+static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
struct inode *dir = mapping->host;
- block_write_end(NULL, mapping, pos, len, len, page, NULL);
+ block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
mark_inode_dirty(dir);
}
- unlock_page(page);
+ folio_unlock(folio);
}
static int sysv_handle_dirsync(struct inode *dir)
@@ -52,20 +52,21 @@ static int sysv_handle_dirsync(struct inode *dir)
}
/*
- * Calls to dir_get_page()/unmap_and_put_page() must be nested according to the
+ * Calls to dir_get_folio()/folio_release_kmap() must be nested according to the
* rules documented in mm/highmem.rst.
*
- * NOTE: sysv_find_entry() and sysv_dotdot() act as calls to dir_get_page()
+ * NOTE: sysv_find_entry() and sysv_dotdot() act as calls to dir_get_folio()
* and must be treated accordingly for nesting purposes.
*/
-static void *dir_get_page(struct inode *dir, unsigned long n, struct page **p)
+static void *dir_get_folio(struct inode *dir, unsigned long n,
+ struct folio **foliop)
{
- struct address_space *mapping = dir->i_mapping;
- struct page *page = read_mapping_page(mapping, n, NULL);
- if (IS_ERR(page))
- return ERR_CAST(page);
- *p = page;
- return kmap_local_page(page);
+ struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
+
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
+ *foliop = folio;
+ return kmap_local_folio(folio, 0);
}
static int sysv_readdir(struct file *file, struct dir_context *ctx)
@@ -87,9 +88,9 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
for ( ; n < npages; n++, offset = 0) {
char *kaddr, *limit;
struct sysv_dir_entry *de;
- struct page *page;
+ struct folio *folio;
- kaddr = dir_get_page(inode, n, &page);
+ kaddr = dir_get_folio(inode, n, &folio);
if (IS_ERR(kaddr))
continue;
de = (struct sysv_dir_entry *)(kaddr+offset);
@@ -103,11 +104,11 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
if (!dir_emit(ctx, name, strnlen(name,SYSV_NAMELEN),
fs16_to_cpu(SYSV_SB(sb), de->inode),
DT_UNKNOWN)) {
- unmap_and_put_page(page, kaddr);
+ folio_release_kmap(folio, kaddr);
return 0;
}
}
- unmap_and_put_page(page, kaddr);
+ folio_release_kmap(folio, kaddr);
}
return 0;
}
@@ -126,39 +127,35 @@ static inline int namecompare(int len, int maxlen,
/*
* sysv_find_entry()
*
- * finds an entry in the specified directory with the wanted name. It
- * returns the cache buffer in which the entry was found, and the entry
- * itself (as a parameter - res_dir). It does NOT read the inode of the
+ * finds an entry in the specified directory with the wanted name.
+ * It does NOT read the inode of the
* entry - you'll have to do that yourself if you want to.
*
- * On Success unmap_and_put_page() should be called on *res_page.
+ * On Success folio_release_kmap() should be called on *foliop.
*
- * sysv_find_entry() acts as a call to dir_get_page() and must be treated
+ * sysv_find_entry() acts as a call to dir_get_folio() and must be treated
* accordingly for nesting purposes.
*/
-struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_page)
+struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct folio **foliop)
{
const char * name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct inode * dir = d_inode(dentry->d_parent);
unsigned long start, n;
unsigned long npages = dir_pages(dir);
- struct page *page = NULL;
struct sysv_dir_entry *de;
- *res_page = NULL;
-
start = SYSV_I(dir)->i_dir_start_lookup;
if (start >= npages)
start = 0;
n = start;
do {
- char *kaddr = dir_get_page(dir, n, &page);
+ char *kaddr = dir_get_folio(dir, n, foliop);
if (!IS_ERR(kaddr)) {
de = (struct sysv_dir_entry *)kaddr;
- kaddr += PAGE_SIZE - SYSV_DIRSIZE;
+ kaddr += folio_size(*foliop) - SYSV_DIRSIZE;
for ( ; (char *) de <= kaddr ; de++) {
if (!de->inode)
continue;
@@ -166,7 +163,7 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_
name, de->name))
goto found;
}
- unmap_and_put_page(page, kaddr);
+ folio_release_kmap(*foliop, kaddr);
}
if (++n >= npages)
@@ -177,7 +174,6 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_
found:
SYSV_I(dir)->i_dir_start_lookup = n;
- *res_page = page;
return de;
}
@@ -186,7 +182,7 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
struct inode *dir = d_inode(dentry->d_parent);
const char * name = dentry->d_name.name;
int namelen = dentry->d_name.len;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct sysv_dir_entry * de;
unsigned long npages = dir_pages(dir);
unsigned long n;
@@ -196,7 +192,7 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
/* We take care of directory expansion in the same loop */
for (n = 0; n <= npages; n++) {
- kaddr = dir_get_page(dir, n, &page);
+ kaddr = dir_get_folio(dir, n, &folio);
if (IS_ERR(kaddr))
return PTR_ERR(kaddr);
de = (struct sysv_dir_entry *)kaddr;
@@ -206,49 +202,49 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
goto got_it;
err = -EEXIST;
if (namecompare(namelen, SYSV_NAMELEN, name, de->name))
- goto out_page;
+ goto out_folio;
de++;
}
- unmap_and_put_page(page, kaddr);
+ folio_release_kmap(folio, kaddr);
}
BUG();
return -EINVAL;
got_it:
- pos = page_offset(page) + offset_in_page(de);
- lock_page(page);
- err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
+ pos = folio_pos(folio) + offset_in_folio(folio, de);
+ folio_lock(folio);
+ err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE);
if (err)
goto out_unlock;
memcpy (de->name, name, namelen);
memset (de->name + namelen, 0, SYSV_DIRSIZE - namelen - 2);
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
- dir_commit_chunk(page, pos, SYSV_DIRSIZE);
+ dir_commit_chunk(folio, pos, SYSV_DIRSIZE);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
err = sysv_handle_dirsync(dir);
-out_page:
- unmap_and_put_page(page, kaddr);
+out_folio:
+ folio_release_kmap(folio, kaddr);
return err;
out_unlock:
- unlock_page(page);
- goto out_page;
+ folio_unlock(folio);
+ goto out_folio;
}
-int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
+int sysv_delete_entry(struct sysv_dir_entry *de, struct folio *folio)
{
- struct inode *inode = page->mapping->host;
- loff_t pos = page_offset(page) + offset_in_page(de);
+ struct inode *inode = folio->mapping->host;
+ loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
int err;
- lock_page(page);
- err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
+ folio_lock(folio);
+ err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE);
if (err) {
- unlock_page(page);
+ folio_unlock(folio);
return err;
}
de->inode = 0;
- dir_commit_chunk(page, pos, SYSV_DIRSIZE);
+ dir_commit_chunk(folio, pos, SYSV_DIRSIZE);
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
return sysv_handle_dirsync(inode);
@@ -256,33 +252,33 @@ int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
int sysv_make_empty(struct inode *inode, struct inode *dir)
{
- struct page *page = grab_cache_page(inode->i_mapping, 0);
+ struct folio *folio = filemap_grab_folio(inode->i_mapping, 0);
struct sysv_dir_entry * de;
- char *base;
+ char *kaddr;
int err;
- if (!page)
- return -ENOMEM;
- err = sysv_prepare_chunk(page, 0, 2 * SYSV_DIRSIZE);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ err = sysv_prepare_chunk(folio, 0, 2 * SYSV_DIRSIZE);
if (err) {
- unlock_page(page);
+ folio_unlock(folio);
goto fail;
}
- base = kmap_local_page(page);
- memset(base, 0, PAGE_SIZE);
+ kaddr = kmap_local_folio(folio, 0);
+ memset(kaddr, 0, folio_size(folio));
- de = (struct sysv_dir_entry *) base;
+ de = (struct sysv_dir_entry *)kaddr;
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
strcpy(de->name,".");
de++;
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), dir->i_ino);
strcpy(de->name,"..");
- kunmap_local(base);
- dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
+ kunmap_local(kaddr);
+ dir_commit_chunk(folio, 0, 2 * SYSV_DIRSIZE);
err = sysv_handle_dirsync(inode);
fail:
- put_page(page);
+ folio_put(folio);
return err;
}
@@ -292,19 +288,19 @@ fail:
int sysv_empty_dir(struct inode * inode)
{
struct super_block *sb = inode->i_sb;
- struct page *page = NULL;
+ struct folio *folio = NULL;
unsigned long i, npages = dir_pages(inode);
char *kaddr;
for (i = 0; i < npages; i++) {
struct sysv_dir_entry *de;
- kaddr = dir_get_page(inode, i, &page);
+ kaddr = dir_get_folio(inode, i, &folio);
if (IS_ERR(kaddr))
continue;
de = (struct sysv_dir_entry *)kaddr;
- kaddr += PAGE_SIZE-SYSV_DIRSIZE;
+ kaddr += folio_size(folio) - SYSV_DIRSIZE;
for ( ;(char *)de <= kaddr; de++) {
if (!de->inode)
@@ -321,46 +317,46 @@ int sysv_empty_dir(struct inode * inode)
if (de->name[1] != '.' || de->name[2])
goto not_empty;
}
- unmap_and_put_page(page, kaddr);
+ folio_release_kmap(folio, kaddr);
}
return 1;
not_empty:
- unmap_and_put_page(page, kaddr);
+ folio_release_kmap(folio, kaddr);
return 0;
}
/* Releases the page */
-int sysv_set_link(struct sysv_dir_entry *de, struct page *page,
- struct inode *inode)
+int sysv_set_link(struct sysv_dir_entry *de, struct folio *folio,
+ struct inode *inode)
{
- struct inode *dir = page->mapping->host;
- loff_t pos = page_offset(page) + offset_in_page(de);
+ struct inode *dir = folio->mapping->host;
+ loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
int err;
- lock_page(page);
- err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
+ folio_lock(folio);
+ err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE);
if (err) {
- unlock_page(page);
+ folio_unlock(folio);
return err;
}
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
- dir_commit_chunk(page, pos, SYSV_DIRSIZE);
+ dir_commit_chunk(folio, pos, SYSV_DIRSIZE);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
return sysv_handle_dirsync(inode);
}
/*
- * Calls to dir_get_page()/unmap_and_put_page() must be nested according to the
+ * Calls to dir_get_folio()/folio_release_kmap() must be nested according to the
* rules documented in mm/highmem.rst.
*
- * sysv_dotdot() acts as a call to dir_get_page() and must be treated
+ * sysv_dotdot() acts as a call to dir_get_folio() and must be treated
* accordingly for nesting purposes.
*/
-struct sysv_dir_entry *sysv_dotdot(struct inode *dir, struct page **p)
+struct sysv_dir_entry *sysv_dotdot(struct inode *dir, struct folio **foliop)
{
- struct sysv_dir_entry *de = dir_get_page(dir, 0, p);
+ struct sysv_dir_entry *de = dir_get_folio(dir, 0, foliop);
if (IS_ERR(de))
return NULL;
@@ -370,13 +366,13 @@ struct sysv_dir_entry *sysv_dotdot(struct inode *dir, struct page **p)
ino_t sysv_inode_by_name(struct dentry *dentry)
{
- struct page *page;
- struct sysv_dir_entry *de = sysv_find_entry (dentry, &page);
+ struct folio *folio;
+ struct sysv_dir_entry *de = sysv_find_entry (dentry, &folio);
ino_t res = 0;
if (de) {
res = fs16_to_cpu(SYSV_SB(dentry->d_sb), de->inode);
- unmap_and_put_page(page, de);
+ folio_release_kmap(folio, de);
}
return res;
}
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 19bcb51a2203..451e95f474fa 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -466,9 +466,9 @@ static int sysv_read_folio(struct file *file, struct folio *folio)
return block_read_full_folio(folio, get_block);
}
-int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len)
+int sysv_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
{
- return __block_write_begin(page, pos, len, get_block);
+ return __block_write_begin(folio, pos, len, get_block);
}
static void sysv_write_failed(struct address_space *mapping, loff_t to)
@@ -483,11 +483,11 @@ static void sysv_write_failed(struct address_space *mapping, loff_t to)
static int sysv_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, pagep, get_block);
+ ret = block_write_begin(mapping, pos, len, foliop, get_block);
if (unlikely(ret))
sysv_write_failed(mapping, pos + len);
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index d6b73798071b..fb8bd8437872 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -151,20 +151,20 @@ out_dir:
static int sysv_unlink(struct inode * dir, struct dentry * dentry)
{
struct inode * inode = d_inode(dentry);
- struct page * page;
+ struct folio *folio;
struct sysv_dir_entry * de;
int err;
- de = sysv_find_entry(dentry, &page);
+ de = sysv_find_entry(dentry, &folio);
if (!de)
return -ENOENT;
- err = sysv_delete_entry(de, page);
+ err = sysv_delete_entry(de, folio);
if (!err) {
inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
inode_dec_link_count(inode);
}
- unmap_and_put_page(page, de);
+ folio_release_kmap(folio, de);
return err;
}
@@ -194,28 +194,28 @@ static int sysv_rename(struct mnt_idmap *idmap, struct inode *old_dir,
{
struct inode * old_inode = d_inode(old_dentry);
struct inode * new_inode = d_inode(new_dentry);
- struct page * dir_page = NULL;
+ struct folio *dir_folio;
struct sysv_dir_entry * dir_de = NULL;
- struct page * old_page;
+ struct folio *old_folio;
struct sysv_dir_entry * old_de;
int err = -ENOENT;
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
- old_de = sysv_find_entry(old_dentry, &old_page);
+ old_de = sysv_find_entry(old_dentry, &old_folio);
if (!old_de)
goto out;
if (S_ISDIR(old_inode->i_mode)) {
err = -EIO;
- dir_de = sysv_dotdot(old_inode, &dir_page);
+ dir_de = sysv_dotdot(old_inode, &dir_folio);
if (!dir_de)
goto out_old;
}
if (new_inode) {
- struct page * new_page;
+ struct folio *new_folio;
struct sysv_dir_entry * new_de;
err = -ENOTEMPTY;
@@ -223,11 +223,11 @@ static int sysv_rename(struct mnt_idmap *idmap, struct inode *old_dir,
goto out_dir;
err = -ENOENT;
- new_de = sysv_find_entry(new_dentry, &new_page);
+ new_de = sysv_find_entry(new_dentry, &new_folio);
if (!new_de)
goto out_dir;
- err = sysv_set_link(new_de, new_page, old_inode);
- unmap_and_put_page(new_page, new_de);
+ err = sysv_set_link(new_de, new_folio, old_inode);
+ folio_release_kmap(new_folio, new_de);
if (err)
goto out_dir;
inode_set_ctime_current(new_inode);
@@ -242,23 +242,23 @@ static int sysv_rename(struct mnt_idmap *idmap, struct inode *old_dir,
inode_inc_link_count(new_dir);
}
- err = sysv_delete_entry(old_de, old_page);
+ err = sysv_delete_entry(old_de, old_folio);
if (err)
goto out_dir;
mark_inode_dirty(old_inode);
if (dir_de) {
- err = sysv_set_link(dir_de, dir_page, new_dir);
+ err = sysv_set_link(dir_de, dir_folio, new_dir);
if (!err)
inode_dec_link_count(old_dir);
}
out_dir:
if (dir_de)
- unmap_and_put_page(dir_page, dir_de);
+ folio_release_kmap(dir_folio, dir_de);
out_old:
- unmap_and_put_page(old_page, old_de);
+ folio_release_kmap(old_folio, old_de);
out:
return err;
}
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index e3f988b469ee..0a48b2e7edb1 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -133,8 +133,8 @@ extern void sysv_free_block(struct super_block *, sysv_zone_t);
extern unsigned long sysv_count_free_blocks(struct super_block *);
/* itree.c */
-extern void sysv_truncate(struct inode *);
-extern int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len);
+void sysv_truncate(struct inode *);
+int sysv_prepare_chunk(struct folio *folio, loff_t pos, unsigned len);
/* inode.c */
extern struct inode *sysv_iget(struct super_block *, unsigned int);
@@ -148,15 +148,15 @@ extern void sysv_destroy_icache(void);
/* dir.c */
-extern struct sysv_dir_entry *sysv_find_entry(struct dentry *, struct page **);
-extern int sysv_add_link(struct dentry *, struct inode *);
-extern int sysv_delete_entry(struct sysv_dir_entry *, struct page *);
-extern int sysv_make_empty(struct inode *, struct inode *);
-extern int sysv_empty_dir(struct inode *);
-extern int sysv_set_link(struct sysv_dir_entry *, struct page *,
+struct sysv_dir_entry *sysv_find_entry(struct dentry *, struct folio **);
+int sysv_add_link(struct dentry *, struct inode *);
+int sysv_delete_entry(struct sysv_dir_entry *, struct folio *);
+int sysv_make_empty(struct inode *, struct inode *);
+int sysv_empty_dir(struct inode *);
+int sysv_set_link(struct sysv_dir_entry *, struct folio *,
struct inode *);
-extern struct sysv_dir_entry *sysv_dotdot(struct inode *, struct page **);
-extern ino_t sysv_inode_by_name(struct dentry *);
+struct sysv_dir_entry *sysv_dotdot(struct inode *, struct folio **);
+ino_t sysv_inode_by_name(struct dentry *);
extern const struct inode_operations sysv_file_inode_operations;
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 4bf2f8bfec11..137523e0bb21 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -397,9 +397,9 @@ static const struct file_operations timerfd_fops = {
static int timerfd_fget(int fd, struct fd *p)
{
struct fd f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (f.file->f_op != &timerfd_fops) {
+ if (fd_file(f)->f_op != &timerfd_fops) {
fdput(f);
return -EINVAL;
}
@@ -482,7 +482,7 @@ static int do_timerfd_settime(int ufd, int flags,
ret = timerfd_fget(ufd, &f);
if (ret)
return ret;
- ctx = f.file->private_data;
+ ctx = fd_file(f)->private_data;
if (isalarm(ctx) && !capable(CAP_WAKE_ALARM)) {
fdput(f);
@@ -546,7 +546,7 @@ static int do_timerfd_gettime(int ufd, struct itimerspec64 *t)
int ret = timerfd_fget(ufd, &f);
if (ret)
return ret;
- ctx = f.file->private_data;
+ ctx = fd_file(f)->private_data;
spin_lock_irq(&ctx->wqh.lock);
if (ctx->expired && ctx->tintv) {
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index c77ea57fe696..fda82f3e16e8 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -555,6 +555,11 @@ static unsigned int vfs_dent_type(uint8_t type)
return 0;
}
+struct ubifs_dir_data {
+ struct ubifs_dent_node *dent;
+ u64 cookie;
+};
+
/*
* The classical Unix view for directory is that it is a linear array of
* (name, inode number) entries. Linux/VFS assumes this model as well.
@@ -582,6 +587,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
struct inode *dir = file_inode(file);
struct ubifs_info *c = dir->i_sb->s_fs_info;
bool encrypted = IS_ENCRYPTED(dir);
+ struct ubifs_dir_data *data = file->private_data;
dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, ctx->pos);
@@ -604,27 +610,27 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
fstr_real_len = fstr.len;
}
- if (file->f_version == 0) {
+ if (data->cookie == 0) {
/*
- * The file was seek'ed, which means that @file->private_data
+ * The file was seek'ed, which means that @data->dent
* is now invalid. This may also be just the first
* 'ubifs_readdir()' invocation, in which case
- * @file->private_data is NULL, and the below code is
+ * @data->dent is NULL, and the below code is
* basically a no-op.
*/
- kfree(file->private_data);
- file->private_data = NULL;
+ kfree(data->dent);
+ data->dent = NULL;
}
/*
- * 'generic_file_llseek()' unconditionally sets @file->f_version to
- * zero, and we use this for detecting whether the file was seek'ed.
+ * 'ubifs_dir_llseek()' sets @data->cookie to zero, and we use this
+ * for detecting whether the file was seek'ed.
*/
- file->f_version = 1;
+ data->cookie = 1;
/* File positions 0 and 1 correspond to "." and ".." */
if (ctx->pos < 2) {
- ubifs_assert(c, !file->private_data);
+ ubifs_assert(c, !data->dent);
if (!dir_emit_dots(file, ctx)) {
if (encrypted)
fscrypt_fname_free_buffer(&fstr);
@@ -641,10 +647,10 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
}
ctx->pos = key_hash_flash(c, &dent->key);
- file->private_data = dent;
+ data->dent = dent;
}
- dent = file->private_data;
+ dent = data->dent;
if (!dent) {
/*
* The directory was seek'ed to and is now readdir'ed.
@@ -658,7 +664,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
goto out;
}
ctx->pos = key_hash_flash(c, &dent->key);
- file->private_data = dent;
+ data->dent = dent;
}
while (1) {
@@ -701,15 +707,15 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
goto out;
}
- kfree(file->private_data);
+ kfree(data->dent);
ctx->pos = key_hash_flash(c, &dent->key);
- file->private_data = dent;
+ data->dent = dent;
cond_resched();
}
out:
- kfree(file->private_data);
- file->private_data = NULL;
+ kfree(data->dent);
+ data->dent = NULL;
if (encrypted)
fscrypt_fname_free_buffer(&fstr);
@@ -733,7 +739,10 @@ out:
/* Free saved readdir() state when the directory is closed */
static int ubifs_dir_release(struct inode *dir, struct file *file)
{
- kfree(file->private_data);
+ struct ubifs_dir_data *data = file->private_data;
+
+ kfree(data->dent);
+ kfree(data);
file->private_data = NULL;
return 0;
}
@@ -1712,6 +1721,24 @@ int ubifs_getattr(struct mnt_idmap *idmap, const struct path *path,
return 0;
}
+static int ubifs_dir_open(struct inode *inode, struct file *file)
+{
+ struct ubifs_dir_data *data;
+
+ data = kzalloc(sizeof(struct ubifs_dir_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ file->private_data = data;
+ return 0;
+}
+
+static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct ubifs_dir_data *data = file->private_data;
+
+ return generic_llseek_cookie(file, offset, whence, &data->cookie);
+}
+
const struct inode_operations ubifs_dir_inode_operations = {
.lookup = ubifs_lookup,
.create = ubifs_create,
@@ -1732,7 +1759,8 @@ const struct inode_operations ubifs_dir_inode_operations = {
};
const struct file_operations ubifs_dir_operations = {
- .llseek = generic_file_llseek,
+ .open = ubifs_dir_open,
+ .llseek = ubifs_dir_llseek,
.release = ubifs_dir_release,
.read = generic_read_dir,
.iterate_shared = ubifs_readdir,
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 68e104423a48..5130123005e4 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -211,7 +211,7 @@ static void release_existing_page_budget(struct ubifs_info *c)
}
static int write_begin_slow(struct address_space *mapping,
- loff_t pos, unsigned len, struct page **pagep)
+ loff_t pos, unsigned len, struct folio **foliop)
{
struct inode *inode = mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
@@ -298,7 +298,7 @@ static int write_begin_slow(struct address_space *mapping,
ubifs_release_dirty_inode_budget(c, ui);
}
- *pagep = &folio->page;
+ *foliop = folio;
return 0;
}
@@ -414,7 +414,7 @@ static int allocate_budget(struct ubifs_info *c, struct folio *folio,
*/
static int ubifs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
struct inode *inode = mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
@@ -483,7 +483,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
folio_unlock(folio);
folio_put(folio);
- return write_begin_slow(mapping, pos, len, pagep);
+ return write_begin_slow(mapping, pos, len, foliop);
}
/*
@@ -492,7 +492,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
* with @ui->ui_mutex locked if we are appending pages, and unlocked
* otherwise. This is an optimization (slightly hacky though).
*/
- *pagep = &folio->page;
+ *foliop = folio;
return 0;
}
@@ -524,9 +524,8 @@ static void cancel_budget(struct ubifs_info *c, struct folio *folio,
static int ubifs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct folio *folio = page_folio(page);
struct inode *inode = mapping->host;
struct ubifs_inode *ui = ubifs_inode(inode);
struct ubifs_info *c = inode->i_sb->s_fs_info;
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index f94f45fe2c91..5023dfe191e8 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -60,7 +60,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
* identifying beginning of dir entry (names are under user control),
* we need to scan the directory from the beginning.
*/
- if (!inode_eq_iversion(dir, file->f_version)) {
+ if (!inode_eq_iversion(dir, *(u64 *)file->private_data)) {
emit_pos = nf_pos;
nf_pos = 0;
} else {
@@ -122,15 +122,37 @@ out_iter:
udf_fiiter_release(&iter);
out:
if (pos_valid)
- file->f_version = inode_query_iversion(dir);
+ *(u64 *)file->private_data = inode_query_iversion(dir);
kfree(fname);
return ret;
}
+static int udf_dir_open(struct inode *inode, struct file *file)
+{
+ file->private_data = kzalloc(sizeof(u64), GFP_KERNEL);
+ if (!file->private_data)
+ return -ENOMEM;
+ return 0;
+}
+
+static int udf_dir_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static loff_t udf_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ return generic_llseek_cookie(file, offset, whence,
+ (u64 *)file->private_data);
+}
+
/* readdir and lookup functions */
const struct file_operations udf_dir_operations = {
- .llseek = generic_file_llseek,
+ .open = udf_dir_open,
+ .release = udf_dir_release,
+ .llseek = udf_dir_llseek,
.read = generic_read_dir,
.iterate_shared = udf_readdir,
.unlocked_ioctl = udf_ioctl,
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 3a4179de316b..412fe7c4d348 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -62,7 +62,7 @@ static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
end = size & ~PAGE_MASK;
else
end = PAGE_SIZE;
- err = __block_write_begin(&folio->page, 0, end, udf_get_block);
+ err = __block_write_begin(folio, 0, end, udf_get_block);
if (err) {
folio_unlock(folio);
ret = vmf_fs_error(err);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 4726a4d014b6..eaee57b91c6c 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -246,14 +246,14 @@ static void udf_readahead(struct readahead_control *rac)
static int udf_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
struct udf_inode_info *iinfo = UDF_I(file_inode(file));
struct folio *folio;
int ret;
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
- ret = block_write_begin(mapping, pos, len, pagep,
+ ret = block_write_begin(mapping, pos, len, foliop,
udf_get_block);
if (unlikely(ret))
udf_write_failed(mapping, pos + len);
@@ -265,7 +265,7 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
return PTR_ERR(folio);
- *pagep = &folio->page;
+ *foliop = folio;
if (!folio_test_uptodate(folio))
udf_adinicb_read_folio(folio);
return 0;
@@ -273,16 +273,14 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
static int udf_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct inode *inode = file_inode(file);
- struct folio *folio;
loff_t last_pos;
if (UDF_I(inode)->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB)
- return generic_write_end(file, mapping, pos, len, copied, page,
+ return generic_write_end(file, mapping, pos, len, copied, folio,
fsdata);
- folio = page_folio(page);
last_pos = pos + copied;
if (last_pos > inode->i_size)
i_size_write(inode, last_pos);
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 61f25d3cf3f7..d6e6a2198971 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -42,18 +42,18 @@ static inline int ufs_match(struct super_block *sb, int len,
return !memcmp(name, de->d_name, len);
}
-static void ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
+static void ufs_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
struct inode *dir = mapping->host;
inode_inc_iversion(dir);
- block_write_end(NULL, mapping, pos, len, len, page, NULL);
+ block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
mark_inode_dirty(dir);
}
- unlock_page(page);
+ folio_unlock(folio);
}
static int ufs_handle_dirsync(struct inode *dir)
@@ -66,22 +66,16 @@ static int ufs_handle_dirsync(struct inode *dir)
return err;
}
-static inline void ufs_put_page(struct page *page)
-{
- kunmap(page);
- put_page(page);
-}
-
ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
{
ino_t res = 0;
struct ufs_dir_entry *de;
- struct page *page;
+ struct folio *folio;
- de = ufs_find_entry(dir, qstr, &page);
+ de = ufs_find_entry(dir, qstr, &folio);
if (de) {
res = fs32_to_cpu(dir->i_sb, de->d_ino);
- ufs_put_page(page);
+ folio_release_kmap(folio, de);
}
return res;
}
@@ -89,43 +83,40 @@ ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
/* Releases the page */
void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
- struct page *page, struct inode *inode,
+ struct folio *folio, struct inode *inode,
bool update_times)
{
- loff_t pos = page_offset(page) +
- (char *) de - (char *) page_address(page);
+ loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
unsigned len = fs16_to_cpu(dir->i_sb, de->d_reclen);
int err;
- lock_page(page);
- err = ufs_prepare_chunk(page, pos, len);
+ folio_lock(folio);
+ err = ufs_prepare_chunk(folio, pos, len);
BUG_ON(err);
de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
ufs_set_de_type(dir->i_sb, de, inode->i_mode);
- ufs_commit_chunk(page, pos, len);
- ufs_put_page(page);
+ ufs_commit_chunk(folio, pos, len);
+ folio_release_kmap(folio, de);
if (update_times)
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
ufs_handle_dirsync(dir);
}
-
-static bool ufs_check_page(struct page *page)
+static bool ufs_check_folio(struct folio *folio, char *kaddr)
{
- struct inode *dir = page->mapping->host;
+ struct inode *dir = folio->mapping->host;
struct super_block *sb = dir->i_sb;
- char *kaddr = page_address(page);
unsigned offs, rec_len;
- unsigned limit = PAGE_SIZE;
+ unsigned limit = folio_size(folio);
const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1;
struct ufs_dir_entry *p;
char *error;
- if ((dir->i_size >> PAGE_SHIFT) == page->index) {
- limit = dir->i_size & ~PAGE_MASK;
+ if (dir->i_size < folio_pos(folio) + limit) {
+ limit = offset_in_folio(folio, dir->i_size);
if (limit & chunk_mask)
goto Ebadsize;
if (!limit)
@@ -150,13 +141,13 @@ static bool ufs_check_page(struct page *page)
if (offs != limit)
goto Eend;
out:
- SetPageChecked(page);
+ folio_set_checked(folio);
return true;
/* Too bad, we had an error */
Ebadsize:
- ufs_error(sb, "ufs_check_page",
+ ufs_error(sb, __func__,
"size of directory #%lu is not a multiple of chunk size",
dir->i_ino
);
@@ -176,36 +167,40 @@ Espan:
Einumber:
error = "inode out of bounds";
bad_entry:
- ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - "
- "offset=%lu, rec_len=%d, name_len=%d",
- dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
+ ufs_error(sb, __func__, "bad entry in directory #%lu: %s - "
+ "offset=%llu, rec_len=%d, name_len=%d",
+ dir->i_ino, error, folio_pos(folio) + offs,
rec_len, ufs_get_de_namlen(sb, p));
goto fail;
Eend:
p = (struct ufs_dir_entry *)(kaddr + offs);
ufs_error(sb, __func__,
"entry in directory #%lu spans the page boundary"
- "offset=%lu",
- dir->i_ino, (page->index<<PAGE_SHIFT)+offs);
+ "offset=%llu",
+ dir->i_ino, folio_pos(folio) + offs);
fail:
return false;
}
-static struct page *ufs_get_page(struct inode *dir, unsigned long n)
+static void *ufs_get_folio(struct inode *dir, unsigned long n,
+ struct folio **foliop)
{
struct address_space *mapping = dir->i_mapping;
- struct page *page = read_mapping_page(mapping, n, NULL);
- if (!IS_ERR(page)) {
- kmap(page);
- if (unlikely(!PageChecked(page))) {
- if (!ufs_check_page(page))
- goto fail;
- }
+ struct folio *folio = read_mapping_folio(mapping, n, NULL);
+ void *kaddr;
+
+ if (IS_ERR(folio))
+ return ERR_CAST(folio);
+ kaddr = kmap_local_folio(folio, 0);
+ if (unlikely(!folio_test_checked(folio))) {
+ if (!ufs_check_folio(folio, kaddr))
+ goto fail;
}
- return page;
+ *foliop = folio;
+ return kaddr;
fail:
- ufs_put_page(page);
+ folio_release_kmap(folio, kaddr);
return ERR_PTR(-EIO);
}
@@ -231,17 +226,14 @@ ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p)
fs16_to_cpu(sb, p->d_reclen));
}
-struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
+struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct folio **foliop)
{
- struct page *page = ufs_get_page(dir, 0);
- struct ufs_dir_entry *de = NULL;
+ struct ufs_dir_entry *de = ufs_get_folio(dir, 0, foliop);
- if (!IS_ERR(page)) {
- de = ufs_next_entry(dir->i_sb,
- (struct ufs_dir_entry *)page_address(page));
- *p = page;
- }
- return de;
+ if (!IS_ERR(de))
+ return ufs_next_entry(dir->i_sb, de);
+
+ return NULL;
}
/*
@@ -253,7 +245,7 @@ struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
* Entry is guaranteed to be valid.
*/
struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
- struct page **res_page)
+ struct folio **foliop)
{
struct super_block *sb = dir->i_sb;
const unsigned char *name = qstr->name;
@@ -261,7 +253,6 @@ struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
unsigned reclen = UFS_DIR_REC_LEN(namelen);
unsigned long start, n;
unsigned long npages = dir_pages(dir);
- struct page *page = NULL;
struct ufs_inode_info *ui = UFS_I(dir);
struct ufs_dir_entry *de;
@@ -270,27 +261,23 @@ struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
if (npages == 0 || namelen > UFS_MAXNAMLEN)
goto out;
- /* OFFSET_CACHE */
- *res_page = NULL;
-
start = ui->i_dir_start_lookup;
if (start >= npages)
start = 0;
n = start;
do {
- char *kaddr;
- page = ufs_get_page(dir, n);
- if (!IS_ERR(page)) {
- kaddr = page_address(page);
- de = (struct ufs_dir_entry *) kaddr;
+ char *kaddr = ufs_get_folio(dir, n, foliop);
+
+ if (!IS_ERR(kaddr)) {
+ de = (struct ufs_dir_entry *)kaddr;
kaddr += ufs_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
if (ufs_match(sb, namelen, name, de))
goto found;
de = ufs_next_entry(sb, de);
}
- ufs_put_page(page);
+ folio_release_kmap(*foliop, kaddr);
}
if (++n >= npages)
n = 0;
@@ -299,7 +286,6 @@ out:
return NULL;
found:
- *res_page = page;
ui->i_dir_start_lookup = n;
return de;
}
@@ -316,11 +302,10 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
unsigned reclen = UFS_DIR_REC_LEN(namelen);
const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize;
unsigned short rec_len, name_len;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct ufs_dir_entry *de;
unsigned long npages = dir_pages(dir);
unsigned long n;
- char *kaddr;
loff_t pos;
int err;
@@ -328,21 +313,19 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
/*
* We take care of directory expansion in the same loop.
- * This code plays outside i_size, so it locks the page
+ * This code plays outside i_size, so it locks the folio
* to protect that region.
*/
for (n = 0; n <= npages; n++) {
+ char *kaddr = ufs_get_folio(dir, n, &folio);
char *dir_end;
- page = ufs_get_page(dir, n);
- err = PTR_ERR(page);
- if (IS_ERR(page))
- goto out;
- lock_page(page);
- kaddr = page_address(page);
+ if (IS_ERR(kaddr))
+ return PTR_ERR(kaddr);
+ folio_lock(folio);
dir_end = kaddr + ufs_last_byte(dir, n);
de = (struct ufs_dir_entry *)kaddr;
- kaddr += PAGE_SIZE - reclen;
+ kaddr += folio_size(folio) - reclen;
while ((char *)de <= kaddr) {
if ((char *)de == dir_end) {
/* We hit i_size */
@@ -369,16 +352,15 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
goto got_it;
de = (struct ufs_dir_entry *) ((char *) de + rec_len);
}
- unlock_page(page);
- ufs_put_page(page);
+ folio_unlock(folio);
+ folio_release_kmap(folio, kaddr);
}
BUG();
return -EINVAL;
got_it:
- pos = page_offset(page) +
- (char*)de - (char*)page_address(page);
- err = ufs_prepare_chunk(page, pos, rec_len);
+ pos = folio_pos(folio) + offset_in_folio(folio, de);
+ err = ufs_prepare_chunk(folio, pos, rec_len);
if (err)
goto out_unlock;
if (de->d_ino) {
@@ -395,18 +377,17 @@ got_it:
de->d_ino = cpu_to_fs32(sb, inode->i_ino);
ufs_set_de_type(sb, de, inode->i_mode);
- ufs_commit_chunk(page, pos, rec_len);
+ ufs_commit_chunk(folio, pos, rec_len);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
err = ufs_handle_dirsync(dir);
/* OFFSET_CACHE */
out_put:
- ufs_put_page(page);
-out:
+ folio_release_kmap(folio, de);
return err;
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
goto out_put;
}
@@ -435,7 +416,7 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
unsigned long n = pos >> PAGE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
- bool need_revalidate = !inode_eq_iversion(inode, file->f_version);
+ bool need_revalidate = !inode_eq_iversion(inode, *(u64 *)file->private_data);
unsigned flags = UFS_SB(sb)->s_flags;
UFSD("BEGIN\n");
@@ -444,25 +425,24 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
return 0;
for ( ; n < npages; n++, offset = 0) {
- char *kaddr, *limit;
struct ufs_dir_entry *de;
+ struct folio *folio;
+ char *kaddr = ufs_get_folio(inode, n, &folio);
+ char *limit;
- struct page *page = ufs_get_page(inode, n);
-
- if (IS_ERR(page)) {
+ if (IS_ERR(kaddr)) {
ufs_error(sb, __func__,
"bad page in #%lu",
inode->i_ino);
ctx->pos += PAGE_SIZE - offset;
- return -EIO;
+ return PTR_ERR(kaddr);
}
- kaddr = page_address(page);
if (unlikely(need_revalidate)) {
if (offset) {
offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
ctx->pos = (n<<PAGE_SHIFT) + offset;
}
- file->f_version = inode_query_iversion(inode);
+ *(u64 *)file->private_data = inode_query_iversion(inode);
need_revalidate = false;
}
de = (struct ufs_dir_entry *)(kaddr+offset);
@@ -482,13 +462,13 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
ufs_get_de_namlen(sb, de),
fs32_to_cpu(sb, de->d_ino),
d_type)) {
- ufs_put_page(page);
+ folio_release_kmap(folio, de);
return 0;
}
}
ctx->pos += fs16_to_cpu(sb, de->d_reclen);
}
- ufs_put_page(page);
+ folio_release_kmap(folio, kaddr);
}
return 0;
}
@@ -499,19 +479,23 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
* previous entry.
*/
int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
- struct page * page)
+ struct folio *folio)
{
struct super_block *sb = inode->i_sb;
- char *kaddr = page_address(page);
- unsigned from = ((char*)dir - kaddr) & ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
- unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen);
+ size_t from, to;
+ char *kaddr;
loff_t pos;
- struct ufs_dir_entry *pde = NULL;
- struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from);
+ struct ufs_dir_entry *de, *pde = NULL;
int err;
UFSD("ENTER\n");
+ from = offset_in_folio(folio, dir);
+ to = from + fs16_to_cpu(sb, dir->d_reclen);
+ kaddr = (char *)dir - from;
+ from &= ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
+ de = (struct ufs_dir_entry *) (kaddr + from);
+
UFSD("ino %u, reclen %u, namlen %u, name %s\n",
fs32_to_cpu(sb, de->d_ino),
fs16_to_cpu(sb, de->d_reclen),
@@ -528,21 +512,20 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
de = ufs_next_entry(sb, de);
}
if (pde)
- from = (char*)pde - (char*)page_address(page);
-
- pos = page_offset(page) + from;
- lock_page(page);
- err = ufs_prepare_chunk(page, pos, to - from);
+ from = offset_in_folio(folio, pde);
+ pos = folio_pos(folio) + from;
+ folio_lock(folio);
+ err = ufs_prepare_chunk(folio, pos, to - from);
BUG_ON(err);
if (pde)
pde->d_reclen = cpu_to_fs16(sb, to - from);
dir->d_ino = 0;
- ufs_commit_chunk(page, pos, to - from);
+ ufs_commit_chunk(folio, pos, to - from);
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
err = ufs_handle_dirsync(inode);
out:
- ufs_put_page(page);
+ folio_release_kmap(folio, kaddr);
UFSD("EXIT\n");
return err;
}
@@ -551,26 +534,25 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
{
struct super_block * sb = dir->i_sb;
struct address_space *mapping = inode->i_mapping;
- struct page *page = grab_cache_page(mapping, 0);
+ struct folio *folio = filemap_grab_folio(mapping, 0);
const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize;
struct ufs_dir_entry * de;
- char *base;
int err;
+ char *kaddr;
- if (!page)
- return -ENOMEM;
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- err = ufs_prepare_chunk(page, 0, chunk_size);
+ err = ufs_prepare_chunk(folio, 0, chunk_size);
if (err) {
- unlock_page(page);
+ folio_unlock(folio);
goto fail;
}
- kmap(page);
- base = (char*)page_address(page);
- memset(base, 0, PAGE_SIZE);
+ kaddr = kmap_local_folio(folio, 0);
+ memset(kaddr, 0, folio_size(folio));
- de = (struct ufs_dir_entry *) base;
+ de = (struct ufs_dir_entry *)kaddr;
de->d_ino = cpu_to_fs32(sb, inode->i_ino);
ufs_set_de_type(sb, de, inode->i_mode);
@@ -584,12 +566,12 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
de->d_reclen = cpu_to_fs16(sb, chunk_size - UFS_DIR_REC_LEN(1));
ufs_set_de_namlen(sb, de, 2);
strcpy (de->d_name, "..");
- kunmap(page);
+ kunmap_local(kaddr);
- ufs_commit_chunk(page, 0, chunk_size);
+ ufs_commit_chunk(folio, 0, chunk_size);
err = ufs_handle_dirsync(inode);
fail:
- put_page(page);
+ folio_put(folio);
return err;
}
@@ -599,18 +581,17 @@ fail:
int ufs_empty_dir(struct inode * inode)
{
struct super_block *sb = inode->i_sb;
- struct page *page = NULL;
+ struct folio *folio;
+ char *kaddr;
unsigned long i, npages = dir_pages(inode);
for (i = 0; i < npages; i++) {
- char *kaddr;
struct ufs_dir_entry *de;
- page = ufs_get_page(inode, i);
- if (IS_ERR(page))
+ kaddr = ufs_get_folio(inode, i, &folio);
+ if (IS_ERR(kaddr))
continue;
- kaddr = page_address(page);
de = (struct ufs_dir_entry *)kaddr;
kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1);
@@ -637,18 +618,40 @@ int ufs_empty_dir(struct inode * inode)
}
de = ufs_next_entry(sb, de);
}
- ufs_put_page(page);
+ folio_release_kmap(folio, kaddr);
}
return 1;
not_empty:
- ufs_put_page(page);
+ folio_release_kmap(folio, kaddr);
return 0;
}
+static int ufs_dir_open(struct inode *inode, struct file *file)
+{
+ file->private_data = kzalloc(sizeof(u64), GFP_KERNEL);
+ if (!file->private_data)
+ return -ENOMEM;
+ return 0;
+}
+
+static int ufs_dir_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static loff_t ufs_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ return generic_llseek_cookie(file, offset, whence,
+ (u64 *)file->private_data);
+}
+
const struct file_operations ufs_dir_operations = {
+ .open = ufs_dir_open,
+ .release = ufs_dir_release,
.read = generic_read_dir,
.iterate_shared = ufs_readdir,
.fsync = generic_file_fsync,
- .llseek = generic_file_llseek,
+ .llseek = ufs_dir_llseek,
};
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index a7bb2e63cdde..5331ae7ebf3e 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -479,9 +479,9 @@ static int ufs_read_folio(struct file *file, struct folio *folio)
return block_read_full_folio(folio, ufs_getfrag_block);
}
-int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
+int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
{
- return __block_write_begin(page, pos, len, ufs_getfrag_block);
+ return __block_write_begin(folio, pos, len, ufs_getfrag_block);
}
static void ufs_truncate_blocks(struct inode *);
@@ -498,11 +498,11 @@ static void ufs_write_failed(struct address_space *mapping, loff_t to)
static int ufs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
int ret;
- ret = block_write_begin(mapping, pos, len, pagep, ufs_getfrag_block);
+ ret = block_write_begin(mapping, pos, len, foliop, ufs_getfrag_block);
if (unlikely(ret))
ufs_write_failed(mapping, pos + len);
@@ -511,11 +511,11 @@ static int ufs_write_begin(struct file *file, struct address_space *mapping,
static int ufs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
int ret;
- ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+ ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
if (ret < len)
ufs_write_failed(mapping, pos + len);
return ret;
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 9cad29463791..24bd12186647 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -209,14 +209,14 @@ static int ufs_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode * inode = d_inode(dentry);
struct ufs_dir_entry *de;
- struct page *page;
+ struct folio *folio;
int err = -ENOENT;
- de = ufs_find_entry(dir, &dentry->d_name, &page);
+ de = ufs_find_entry(dir, &dentry->d_name, &folio);
if (!de)
goto out;
- err = ufs_delete_entry(dir, de, page);
+ err = ufs_delete_entry(dir, de, folio);
if (err)
goto out;
@@ -249,28 +249,28 @@ static int ufs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
{
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
- struct page *dir_page = NULL;
+ struct folio *dir_folio = NULL;
struct ufs_dir_entry * dir_de = NULL;
- struct page *old_page;
+ struct folio *old_folio;
struct ufs_dir_entry *old_de;
int err = -ENOENT;
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
- old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
+ old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_folio);
if (!old_de)
goto out;
if (S_ISDIR(old_inode->i_mode)) {
err = -EIO;
- dir_de = ufs_dotdot(old_inode, &dir_page);
+ dir_de = ufs_dotdot(old_inode, &dir_folio);
if (!dir_de)
goto out_old;
}
if (new_inode) {
- struct page *new_page;
+ struct folio *new_folio;
struct ufs_dir_entry *new_de;
err = -ENOTEMPTY;
@@ -278,10 +278,10 @@ static int ufs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
goto out_dir;
err = -ENOENT;
- new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
+ new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_folio);
if (!new_de)
goto out_dir;
- ufs_set_link(new_dir, new_de, new_page, old_inode, 1);
+ ufs_set_link(new_dir, new_de, new_folio, old_inode, 1);
inode_set_ctime_current(new_inode);
if (dir_de)
drop_nlink(new_inode);
@@ -300,29 +300,24 @@ static int ufs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
*/
inode_set_ctime_current(old_inode);
- ufs_delete_entry(old_dir, old_de, old_page);
+ ufs_delete_entry(old_dir, old_de, old_folio);
mark_inode_dirty(old_inode);
if (dir_de) {
if (old_dir != new_dir)
- ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0);
- else {
- kunmap(dir_page);
- put_page(dir_page);
- }
+ ufs_set_link(old_inode, dir_de, dir_folio, new_dir, 0);
+ else
+ folio_release_kmap(dir_folio, new_dir);
inode_dec_link_count(old_dir);
}
return 0;
out_dir:
- if (dir_de) {
- kunmap(dir_page);
- put_page(dir_page);
- }
+ if (dir_de)
+ folio_release_kmap(dir_folio, dir_de);
out_old:
- kunmap(old_page);
- put_page(old_page);
+ folio_release_kmap(old_folio, old_de);
out:
return err;
}
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 6b499180643b..a2c762cb65a0 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -99,15 +99,17 @@ extern void ufs_put_cylinder (struct super_block *, unsigned);
/* dir.c */
extern const struct inode_operations ufs_dir_inode_operations;
-extern int ufs_add_link (struct dentry *, struct inode *);
-extern ino_t ufs_inode_by_name(struct inode *, const struct qstr *);
-extern int ufs_make_empty(struct inode *, struct inode *);
-extern struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *, struct page **);
-extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *);
-extern int ufs_empty_dir (struct inode *);
-extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **);
-extern void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
- struct page *page, struct inode *inode, bool update_times);
+
+int ufs_add_link(struct dentry *, struct inode *);
+ino_t ufs_inode_by_name(struct inode *, const struct qstr *);
+int ufs_make_empty(struct inode *, struct inode *);
+struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *,
+ struct folio **);
+int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct folio *);
+int ufs_empty_dir(struct inode *);
+struct ufs_dir_entry *ufs_dotdot(struct inode *, struct folio **);
+void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
+ struct folio *folio, struct inode *inode, bool update_times);
/* file.c */
extern const struct inode_operations ufs_file_inode_operations;
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 0ecd2ed792f5..bf708b68f150 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -250,9 +250,9 @@ ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
}
}
-extern dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
-extern void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
-extern int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len);
+dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
+void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
+int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len);
/*
* These functions manipulate ufs buffers
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 27a3e9285fbf..68cdd89c97a3 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -104,21 +104,6 @@ bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
return ctx->features & UFFD_FEATURE_WP_UNPOPULATED;
}
-static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
- vm_flags_t flags)
-{
- const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
-
- vm_flags_reset(vma, flags);
- /*
- * For shared mappings, we want to enable writenotify while
- * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
- * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
- */
- if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
- vma_set_page_prot(vma);
-}
-
static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
int wake_flags, void *key)
{
@@ -386,15 +371,8 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
unsigned int blocking_state;
/*
- * We don't do userfault handling for the final child pid update.
- *
- * We also don't do userfault handling during
- * coredumping. hugetlbfs has the special
- * hugetlb_follow_page_mask() to skip missing pages in the
- * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
- * the no_page_table() helper in follow_page_mask(), but the
- * shmem_vm_ops->fault method is invoked even during
- * coredumping and it ends up here.
+ * We don't do userfault handling for the final child pid update
+ * and when coredumping (faults triggered by get_dump_page()).
*/
if (current->flags & (PF_EXITING|PF_DUMPCORE))
goto out;
@@ -615,22 +593,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
spin_unlock_irq(&ctx->event_wqh.lock);
if (release_new_ctx) {
- struct vm_area_struct *vma;
- struct mm_struct *mm = release_new_ctx->mm;
- VMA_ITERATOR(vmi, mm, 0);
-
- /* the various vma->vm_userfaultfd_ctx still points to it */
- mmap_write_lock(mm);
- for_each_vma(vmi, vma) {
- if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
- vma_start_write(vma);
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
- userfaultfd_set_vm_flags(vma,
- vma->vm_flags & ~__VM_UFFD_FLAGS);
- }
- }
- mmap_write_unlock(mm);
-
+ userfaultfd_release_new(release_new_ctx);
userfaultfd_ctx_put(release_new_ctx);
}
@@ -662,9 +625,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
return 0;
if (!(octx->features & UFFD_FEATURE_EVENT_FORK)) {
- vma_start_write(vma);
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
- userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
+ userfaultfd_reset_ctx(vma);
return 0;
}
@@ -749,9 +710,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
up_write(&ctx->map_changing_lock);
} else {
/* Drop uffd context if remap feature not enabled */
- vma_start_write(vma);
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
- userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
+ userfaultfd_reset_ctx(vma);
}
}
@@ -870,54 +829,14 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
{
struct userfaultfd_ctx *ctx = file->private_data;
struct mm_struct *mm = ctx->mm;
- struct vm_area_struct *vma, *prev;
/* len == 0 means wake all */
struct userfaultfd_wake_range range = { .len = 0, };
- unsigned long new_flags;
- VMA_ITERATOR(vmi, mm, 0);
WRITE_ONCE(ctx->released, true);
- if (!mmget_not_zero(mm))
- goto wakeup;
+ userfaultfd_release_all(mm, ctx);
/*
- * Flush page faults out of all CPUs. NOTE: all page faults
- * must be retried without returning VM_FAULT_SIGBUS if
- * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
- * changes while handle_userfault released the mmap_lock. So
- * it's critical that released is set to true (above), before
- * taking the mmap_lock for writing.
- */
- mmap_write_lock(mm);
- prev = NULL;
- for_each_vma(vmi, vma) {
- cond_resched();
- BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
- !!(vma->vm_flags & __VM_UFFD_FLAGS));
- if (vma->vm_userfaultfd_ctx.ctx != ctx) {
- prev = vma;
- continue;
- }
- /* Reset ptes for the whole vma range if wr-protected */
- if (userfaultfd_wp(vma))
- uffd_wp_range(vma, vma->vm_start,
- vma->vm_end - vma->vm_start, false);
- new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
- vma = vma_modify_flags_uffd(&vmi, prev, vma, vma->vm_start,
- vma->vm_end, new_flags,
- NULL_VM_UFFD_CTX);
-
- vma_start_write(vma);
- userfaultfd_set_vm_flags(vma, new_flags);
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
-
- prev = vma;
- }
- mmap_write_unlock(mm);
- mmput(mm);
-wakeup:
- /*
* After no new page faults can wait on this fault_*wqh, flush
* the last page faults that may have been already waiting on
* the fault_*wqh.
@@ -1293,14 +1212,14 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
unsigned long arg)
{
struct mm_struct *mm = ctx->mm;
- struct vm_area_struct *vma, *prev, *cur;
+ struct vm_area_struct *vma, *cur;
int ret;
struct uffdio_register uffdio_register;
struct uffdio_register __user *user_uffdio_register;
- unsigned long vm_flags, new_flags;
+ unsigned long vm_flags;
bool found;
bool basic_ioctls;
- unsigned long start, end, vma_end;
+ unsigned long start, end;
struct vma_iterator vmi;
bool wp_async = userfaultfd_wp_async_ctx(ctx);
@@ -1428,57 +1347,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
} for_each_vma_range(vmi, cur, end);
BUG_ON(!found);
- vma_iter_set(&vmi, start);
- prev = vma_prev(&vmi);
- if (vma->vm_start < start)
- prev = vma;
-
- ret = 0;
- for_each_vma_range(vmi, vma, end) {
- cond_resched();
-
- BUG_ON(!vma_can_userfault(vma, vm_flags, wp_async));
- BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
- vma->vm_userfaultfd_ctx.ctx != ctx);
- WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
-
- /*
- * Nothing to do: this vma is already registered into this
- * userfaultfd and with the right tracking mode too.
- */
- if (vma->vm_userfaultfd_ctx.ctx == ctx &&
- (vma->vm_flags & vm_flags) == vm_flags)
- goto skip;
-
- if (vma->vm_start > start)
- start = vma->vm_start;
- vma_end = min(end, vma->vm_end);
-
- new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
- vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
- new_flags,
- (struct vm_userfaultfd_ctx){ctx});
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- break;
- }
-
- /*
- * In the vma_merge() successful mprotect-like case 8:
- * the next vma was merged into the current one and
- * the current one has not been updated yet.
- */
- vma_start_write(vma);
- userfaultfd_set_vm_flags(vma, new_flags);
- vma->vm_userfaultfd_ctx.ctx = ctx;
-
- if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
- hugetlb_unshare_all_pmds(vma);
-
- skip:
- prev = vma;
- start = vma->vm_end;
- }
+ ret = userfaultfd_register_range(ctx, vma, vm_flags, start, end,
+ wp_async);
out_unlock:
mmap_write_unlock(mm);
@@ -1519,7 +1389,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
struct vm_area_struct *vma, *prev, *cur;
int ret;
struct uffdio_range uffdio_unregister;
- unsigned long new_flags;
bool found;
unsigned long start, end, vma_end;
const void __user *buf = (void __user *)arg;
@@ -1622,27 +1491,13 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
}
- /* Reset ptes for the whole vma range if wr-protected */
- if (userfaultfd_wp(vma))
- uffd_wp_range(vma, start, vma_end - start, false);
-
- new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
- vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
- new_flags, NULL_VM_UFFD_CTX);
+ vma = userfaultfd_clear_vma(&vmi, prev, vma,
+ start, vma_end);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
break;
}
- /*
- * In the vma_merge() successful mprotect-like case 8:
- * the next vma was merged into the current one and
- * the current one has not been updated yet.
- */
- vma_start_write(vma);
- userfaultfd_set_vm_flags(vma, new_flags);
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
-
skip:
prev = vma;
start = vma->vm_end;
diff --git a/fs/utimes.c b/fs/utimes.c
index 3701b3946f88..99b26f792b89 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -115,9 +115,9 @@ static int do_utimes_fd(int fd, struct timespec64 *times, int flags)
return -EINVAL;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = vfs_utimes(&f.file->f_path, times);
+ error = vfs_utimes(&fd_file(f)->f_path, times);
fdput(f);
return error;
}
diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c
index fdb4da24d662..b780deb81b02 100644
--- a/fs/vboxsf/file.c
+++ b/fs/vboxsf/file.c
@@ -300,23 +300,23 @@ static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
static int vboxsf_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
struct vboxsf_handle *sf_handle = file->private_data;
- unsigned int from = pos & ~PAGE_MASK;
+ size_t from = offset_in_folio(folio, pos);
u32 nwritten = len;
u8 *buf;
int err;
- /* zero the stale part of the page if we did a short copy */
- if (!PageUptodate(page) && copied < len)
- zero_user(page, from + copied, len - copied);
+ /* zero the stale part of the folio if we did a short copy */
+ if (!folio_test_uptodate(folio) && copied < len)
+ folio_zero_range(folio, from + copied, len - copied);
- buf = kmap(page);
+ buf = kmap(&folio->page);
err = vboxsf_write(sf_handle->root, sf_handle->handle,
pos, &nwritten, buf + from);
- kunmap(page);
+ kunmap(&folio->page);
if (err) {
nwritten = 0;
@@ -326,16 +326,16 @@ static int vboxsf_write_end(struct file *file, struct address_space *mapping,
/* mtime changed */
VBOXSF_I(inode)->force_restat = 1;
- if (!PageUptodate(page) && nwritten == PAGE_SIZE)
- SetPageUptodate(page);
+ if (!folio_test_uptodate(folio) && nwritten == folio_size(folio))
+ folio_mark_uptodate(folio);
pos += nwritten;
if (pos > inode->i_size)
i_size_write(inode, pos);
out:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return nwritten;
}
@@ -343,7 +343,7 @@ out:
/*
* Note simple_write_begin does not read the page from disk on partial writes
* this is ok since vboxsf_write_end only writes the written parts of the
- * page and it does not call SetPageUptodate for partial writes.
+ * page and it does not call folio_mark_uptodate for partial writes.
*/
const struct address_space_operations vboxsf_reg_aops = {
.read_folio = vboxsf_read_folio,
diff --git a/fs/verity/signature.c b/fs/verity/signature.c
index 90c07573dd77..0302a4e506ec 100644
--- a/fs/verity/signature.c
+++ b/fs/verity/signature.c
@@ -17,6 +17,7 @@
#include <linux/cred.h>
#include <linux/key.h>
+#include <linux/security.h>
#include <linux/slab.h>
#include <linux/verification.h>
@@ -41,7 +42,11 @@ static struct key *fsverity_keyring;
* @sig_size: size of signature in bytes, or 0 if no signature
*
* If the file includes a signature of its fs-verity file digest, verify it
- * against the certificates in the fs-verity keyring.
+ * against the certificates in the fs-verity keyring. Note that signatures
+ * are verified regardless of the state of the 'fsverity_require_signatures'
+ * variable and the LSM subsystem relies on this behavior to help enforce
+ * file integrity policies. Please discuss changes with the LSM list
+ * (thank you!).
*
* Return: 0 on success (signature valid or not required); -errno on failure
*/
@@ -106,6 +111,17 @@ int fsverity_verify_signature(const struct fsverity_info *vi,
return err;
}
+ err = security_inode_setintegrity(inode,
+ LSM_INT_FSVERITY_BUILTINSIG_VALID,
+ signature,
+ sig_size);
+
+ if (err) {
+ fsverity_err(inode, "Error %d exposing file signature to LSMs",
+ err);
+ return err;
+ }
+
return 0;
}
diff --git a/fs/xattr.c b/fs/xattr.c
index 7672ce5486c5..05ec7e7d9e87 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -697,19 +697,19 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
int error;
CLASS(fd, f)(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- audit_file(f.file);
+ audit_file(fd_file(f));
error = setxattr_copy(name, &ctx);
if (error)
return error;
- error = mnt_want_write_file(f.file);
+ error = mnt_want_write_file(fd_file(f));
if (!error) {
- error = do_setxattr(file_mnt_idmap(f.file),
- f.file->f_path.dentry, &ctx);
- mnt_drop_write_file(f.file);
+ error = do_setxattr(file_mnt_idmap(fd_file(f)),
+ fd_file(f)->f_path.dentry, &ctx);
+ mnt_drop_write_file(fd_file(f));
}
kvfree(ctx.kvalue);
return error;
@@ -812,10 +812,10 @@ SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name,
struct fd f = fdget(fd);
ssize_t error = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
return error;
- audit_file(f.file);
- error = getxattr(file_mnt_idmap(f.file), f.file->f_path.dentry,
+ audit_file(fd_file(f));
+ error = getxattr(file_mnt_idmap(fd_file(f)), fd_file(f)->f_path.dentry,
name, value, size);
fdput(f);
return error;
@@ -888,10 +888,10 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
struct fd f = fdget(fd);
ssize_t error = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
return error;
- audit_file(f.file);
- error = listxattr(f.file->f_path.dentry, list, size);
+ audit_file(fd_file(f));
+ error = listxattr(fd_file(f)->f_path.dentry, list, size);
fdput(f);
return error;
}
@@ -954,9 +954,9 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
char kname[XATTR_NAME_MAX + 1];
int error = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
return error;
- audit_file(f.file);
+ audit_file(fd_file(f));
error = strncpy_from_user(kname, name, sizeof(kname));
if (error == 0 || error == sizeof(kname))
@@ -964,11 +964,11 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
if (error < 0)
return error;
- error = mnt_want_write_file(f.file);
+ error = mnt_want_write_file(fd_file(f));
if (!error) {
- error = removexattr(file_mnt_idmap(f.file),
- f.file->f_path.dentry, kname);
- mnt_drop_write_file(f.file);
+ error = removexattr(file_mnt_idmap(fd_file(f)),
+ fd_file(f)->f_path.dentry, kname);
+ mnt_drop_write_file(fd_file(f));
}
fdput(f);
return error;
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 7e80732cb547..5f0494702e0b 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -46,7 +46,7 @@ xfs_perag_get(
struct xfs_perag *pag;
rcu_read_lock();
- pag = radix_tree_lookup(&mp->m_perag_tree, agno);
+ pag = xa_load(&mp->m_perags, agno);
if (pag) {
trace_xfs_perag_get(pag, _RET_IP_);
ASSERT(atomic_read(&pag->pag_ref) >= 0);
@@ -56,31 +56,6 @@ xfs_perag_get(
return pag;
}
-/*
- * search from @first to find the next perag with the given tag set.
- */
-struct xfs_perag *
-xfs_perag_get_tag(
- struct xfs_mount *mp,
- xfs_agnumber_t first,
- unsigned int tag)
-{
- struct xfs_perag *pag;
- int found;
-
- rcu_read_lock();
- found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
- (void **)&pag, first, 1, tag);
- if (found <= 0) {
- rcu_read_unlock();
- return NULL;
- }
- trace_xfs_perag_get_tag(pag, _RET_IP_);
- atomic_inc(&pag->pag_ref);
- rcu_read_unlock();
- return pag;
-}
-
/* Get a passive reference to the given perag. */
struct xfs_perag *
xfs_perag_hold(
@@ -117,7 +92,7 @@ xfs_perag_grab(
struct xfs_perag *pag;
rcu_read_lock();
- pag = radix_tree_lookup(&mp->m_perag_tree, agno);
+ pag = xa_load(&mp->m_perags, agno);
if (pag) {
trace_xfs_perag_grab(pag, _RET_IP_);
if (!atomic_inc_not_zero(&pag->pag_active_ref))
@@ -127,32 +102,6 @@ xfs_perag_grab(
return pag;
}
-/*
- * search from @first to find the next perag with the given tag set.
- */
-struct xfs_perag *
-xfs_perag_grab_tag(
- struct xfs_mount *mp,
- xfs_agnumber_t first,
- int tag)
-{
- struct xfs_perag *pag;
- int found;
-
- rcu_read_lock();
- found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
- (void **)&pag, first, 1, tag);
- if (found <= 0) {
- rcu_read_unlock();
- return NULL;
- }
- trace_xfs_perag_grab_tag(pag, _RET_IP_);
- if (!atomic_inc_not_zero(&pag->pag_active_ref))
- pag = NULL;
- rcu_read_unlock();
- return pag;
-}
-
void
xfs_perag_rele(
struct xfs_perag *pag)
@@ -235,16 +184,6 @@ out:
return error;
}
-STATIC void
-__xfs_free_perag(
- struct rcu_head *head)
-{
- struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
-
- ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
- kfree(pag);
-}
-
/*
* Free up the per-ag resources associated with the mount structure.
*/
@@ -256,9 +195,7 @@ xfs_free_perag(
xfs_agnumber_t agno;
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- spin_lock(&mp->m_perag_lock);
- pag = radix_tree_delete(&mp->m_perag_tree, agno);
- spin_unlock(&mp->m_perag_lock);
+ pag = xa_erase(&mp->m_perags, agno);
ASSERT(pag);
XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0);
xfs_defer_drain_free(&pag->pag_intents_drain);
@@ -270,7 +207,7 @@ xfs_free_perag(
xfs_perag_rele(pag);
XFS_IS_CORRUPT(pag->pag_mount,
atomic_read(&pag->pag_active_ref) != 0);
- call_rcu(&pag->rcu_head, __xfs_free_perag);
+ kfree_rcu_mightsleep(pag);
}
}
@@ -347,9 +284,7 @@ xfs_free_unused_perag_range(
xfs_agnumber_t index;
for (index = agstart; index < agend; index++) {
- spin_lock(&mp->m_perag_lock);
- pag = radix_tree_delete(&mp->m_perag_tree, index);
- spin_unlock(&mp->m_perag_lock);
+ pag = xa_erase(&mp->m_perags, index);
if (!pag)
break;
xfs_buf_cache_destroy(&pag->pag_bcache);
@@ -390,20 +325,11 @@ xfs_initialize_perag(
pag->pag_agno = index;
pag->pag_mount = mp;
- error = radix_tree_preload(GFP_KERNEL | __GFP_RETRY_MAYFAIL);
- if (error)
- goto out_free_pag;
-
- spin_lock(&mp->m_perag_lock);
- if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
- WARN_ON_ONCE(1);
- spin_unlock(&mp->m_perag_lock);
- radix_tree_preload_end();
- error = -EEXIST;
+ error = xa_insert(&mp->m_perags, index, pag, GFP_KERNEL);
+ if (error) {
+ WARN_ON_ONCE(error == -EBUSY);
goto out_free_pag;
}
- spin_unlock(&mp->m_perag_lock);
- radix_tree_preload_end();
#ifdef __KERNEL__
/* Place kernel structure only init below this point. */
@@ -451,9 +377,7 @@ xfs_initialize_perag(
out_remove_pag:
xfs_defer_drain_free(&pag->pag_intents_drain);
- spin_lock(&mp->m_perag_lock);
- radix_tree_delete(&mp->m_perag_tree, index);
- spin_unlock(&mp->m_perag_lock);
+ pag = xa_erase(&mp->m_perags, index);
out_free_pag:
kfree(pag);
out_unwind_new_pags:
diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h
index 35de09a2516c..d9cccd093b60 100644
--- a/fs/xfs/libxfs/xfs_ag.h
+++ b/fs/xfs/libxfs/xfs_ag.h
@@ -63,9 +63,6 @@ struct xfs_perag {
/* Blocks reserved for the reverse mapping btree. */
struct xfs_ag_resv pag_rmapbt_resv;
- /* for rcu-safe freeing */
- struct rcu_head rcu_head;
-
/* Precalculated geometry info */
xfs_agblock_t block_count;
xfs_agblock_t min_block;
@@ -156,15 +153,11 @@ void xfs_free_perag(struct xfs_mount *mp);
/* Passive AG references */
struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
-struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *mp, xfs_agnumber_t agno,
- unsigned int tag);
struct xfs_perag *xfs_perag_hold(struct xfs_perag *pag);
void xfs_perag_put(struct xfs_perag *pag);
/* Active AG references */
struct xfs_perag *xfs_perag_grab(struct xfs_mount *, xfs_agnumber_t);
-struct xfs_perag *xfs_perag_grab_tag(struct xfs_mount *, xfs_agnumber_t,
- int tag);
void xfs_perag_rele(struct xfs_perag *pag);
/*
@@ -266,13 +259,6 @@ xfs_perag_next(
(agno) = 0; \
for_each_perag_from((mp), (agno), (pag))
-#define for_each_perag_tag(mp, agno, pag, tag) \
- for ((agno) = 0, (pag) = xfs_perag_grab_tag((mp), 0, (tag)); \
- (pag) != NULL; \
- (agno) = (pag)->pag_agno + 1, \
- xfs_perag_rele(pag), \
- (pag) = xfs_perag_grab_tag((mp), (agno), (tag)))
-
static inline struct xfs_perag *
xfs_perag_next_wrap(
struct xfs_perag *pag,
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index 585e98e87ef9..aada676eee51 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -569,11 +569,11 @@ xfs_allocbt_block_maxrecs(
/*
* Calculate number of records in an alloc btree block.
*/
-int
+unsigned int
xfs_allocbt_maxrecs(
struct xfs_mount *mp,
- int blocklen,
- int leaf)
+ unsigned int blocklen,
+ bool leaf)
{
blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
return xfs_allocbt_block_maxrecs(blocklen, leaf);
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.h b/fs/xfs/libxfs/xfs_alloc_btree.h
index 155b47f231ab..12647f9aaa6d 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.h
+++ b/fs/xfs/libxfs/xfs_alloc_btree.h
@@ -53,7 +53,8 @@ struct xfs_btree_cur *xfs_bnobt_init_cursor(struct xfs_mount *mp,
struct xfs_btree_cur *xfs_cntbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_perag *pag);
-extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
+unsigned int xfs_allocbt_maxrecs(struct xfs_mount *mp, unsigned int blocklen,
+ bool leaf);
extern xfs_extlen_t xfs_allocbt_calc_size(struct xfs_mount *mp,
unsigned long long len);
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index b9e98950eb3d..e50d913ad32f 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -686,7 +686,7 @@ xfs_attr_shortform_bytesfit(
*/
if (!dp->i_forkoff && dp->i_df.if_bytes >
xfs_default_attroffset(dp))
- dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
+ dsize = xfs_bmdr_space_calc(MINDBTPTRS);
break;
case XFS_DINODE_FMT_BTREE:
/*
@@ -700,7 +700,7 @@ xfs_attr_shortform_bytesfit(
return 0;
return dp->i_forkoff;
}
- dsize = XFS_BMAP_BROOT_SPACE(mp, dp->i_df.if_broot);
+ dsize = xfs_bmap_bmdr_space(dp->i_df.if_broot);
break;
}
@@ -708,11 +708,11 @@ xfs_attr_shortform_bytesfit(
* A data fork btree root must have space for at least
* MINDBTPTRS key/ptr pairs if the data fork is small or empty.
*/
- minforkoff = max_t(int64_t, dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
+ minforkoff = max_t(int64_t, dsize, xfs_bmdr_space_calc(MINDBTPTRS));
minforkoff = roundup(minforkoff, 8) >> 3;
/* attr fork btree root can have at least this many key/ptr pairs */
- maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
+ maxforkoff = XFS_LITINO(mp) - xfs_bmdr_space_calc(MINABTPTRS);
maxforkoff = maxforkoff >> 3; /* rounded down */
if (offset >= maxforkoff)
@@ -1138,10 +1138,7 @@ xfs_attr3_leaf_to_shortform(
trace_xfs_attr_leaf_to_sf(args);
- tmpbuffer = kmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
- if (!tmpbuffer)
- return -ENOMEM;
-
+ tmpbuffer = kvmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
leaf = (xfs_attr_leafblock_t *)tmpbuffer;
@@ -1205,7 +1202,7 @@ xfs_attr3_leaf_to_shortform(
error = 0;
out:
- kfree(tmpbuffer);
+ kvfree(tmpbuffer);
return error;
}
@@ -1613,7 +1610,7 @@ xfs_attr3_leaf_compact(
trace_xfs_attr_leaf_compact(args);
- tmpbuffer = kmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
+ tmpbuffer = kvmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
memset(bp->b_addr, 0, args->geo->blksize);
leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
@@ -1651,7 +1648,7 @@ xfs_attr3_leaf_compact(
*/
xfs_trans_log_buf(trans, bp, 0, args->geo->blksize - 1);
- kfree(tmpbuffer);
+ kvfree(tmpbuffer);
}
/*
@@ -2330,7 +2327,7 @@ xfs_attr3_leaf_unbalance(
struct xfs_attr_leafblock *tmp_leaf;
struct xfs_attr3_icleaf_hdr tmphdr;
- tmp_leaf = kzalloc(state->args->geo->blksize,
+ tmp_leaf = kvzalloc(state->args->geo->blksize,
GFP_KERNEL | __GFP_NOFAIL);
/*
@@ -2371,7 +2368,7 @@ xfs_attr3_leaf_unbalance(
}
memcpy(save_leaf, tmp_leaf, state->args->geo->blksize);
savehdr = tmphdr; /* struct copy */
- kfree(tmp_leaf);
+ kvfree(tmp_leaf);
}
xfs_attr3_leaf_hdr_to_disk(state->args->geo, save_leaf, &savehdr);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 7df74c35d9f9..8090e8249116 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -79,9 +79,9 @@ xfs_bmap_compute_maxlevels(
maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp),
whichfork);
if (whichfork == XFS_DATA_FORK)
- sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
+ sz = xfs_bmdr_space_calc(MINDBTPTRS);
else
- sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
+ sz = xfs_bmdr_space_calc(MINABTPTRS);
maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
minleafrecs = mp->m_bmap_dmnr[0];
@@ -102,8 +102,8 @@ xfs_bmap_compute_attr_offset(
struct xfs_mount *mp)
{
if (mp->m_sb.sb_inodesize == 256)
- return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
- return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
+ return XFS_LITINO(mp) - xfs_bmdr_space_calc(MINABTPTRS);
+ return xfs_bmdr_space_calc(6 * MINABTPTRS);
}
STATIC int /* error */
@@ -298,7 +298,7 @@ xfs_check_block(
prevp = NULL;
for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
dmxr = mp->m_bmap_dmxr[0];
- keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
+ keyp = xfs_bmbt_key_addr(mp, block, i);
if (prevp) {
ASSERT(be64_to_cpu(prevp->br_startoff) <
@@ -310,15 +310,15 @@ xfs_check_block(
* Compare the block numbers to see if there are dups.
*/
if (root)
- pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
+ pp = xfs_bmap_broot_ptr_addr(mp, block, i, sz);
else
- pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
+ pp = xfs_bmbt_ptr_addr(mp, block, i, dmxr);
for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
if (root)
- thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
+ thispa = xfs_bmap_broot_ptr_addr(mp, block, j, sz);
else
- thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
+ thispa = xfs_bmbt_ptr_addr(mp, block, j, dmxr);
if (*thispa == *pp) {
xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld",
__func__, j, i,
@@ -373,7 +373,7 @@ xfs_bmap_check_leaf_extents(
level = be16_to_cpu(block->bb_level);
ASSERT(level > 0);
xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
- pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
+ pp = xfs_bmap_broot_ptr_addr(mp, block, 1, ifp->if_broot_bytes);
bno = be64_to_cpu(*pp);
ASSERT(bno != NULLFSBLOCK);
@@ -406,7 +406,7 @@ xfs_bmap_check_leaf_extents(
*/
xfs_check_block(block, mp, 0, 0);
- pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
+ pp = xfs_bmbt_ptr_addr(mp, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
xfs_btree_mark_sick(cur);
@@ -446,14 +446,14 @@ xfs_bmap_check_leaf_extents(
* conform with the first entry in this one.
*/
- ep = XFS_BMBT_REC_ADDR(mp, block, 1);
+ ep = xfs_bmbt_rec_addr(mp, block, 1);
if (i) {
ASSERT(xfs_bmbt_disk_get_startoff(&last) +
xfs_bmbt_disk_get_blockcount(&last) <=
xfs_bmbt_disk_get_startoff(ep));
}
for (j = 1; j < num_recs; j++) {
- nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
+ nextp = xfs_bmbt_rec_addr(mp, block, j + 1);
ASSERT(xfs_bmbt_disk_get_startoff(ep) +
xfs_bmbt_disk_get_blockcount(ep) <=
xfs_bmbt_disk_get_startoff(nextp));
@@ -584,9 +584,9 @@ xfs_bmap_btree_to_extents(
ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
ASSERT(be16_to_cpu(rblock->bb_level) == 1);
ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
- ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
+ ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, false) == 1);
- pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
+ pp = xfs_bmap_broot_ptr_addr(mp, rblock, 1, ifp->if_broot_bytes);
cbno = be64_to_cpu(*pp);
#ifdef DEBUG
if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_verify_fsbno(mp, cbno))) {
@@ -714,7 +714,7 @@ xfs_bmap_extents_to_btree(
for_each_xfs_iext(ifp, &icur, &rec) {
if (isnullstartblock(rec.br_startblock))
continue;
- arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
+ arp = xfs_bmbt_rec_addr(mp, ablock, 1 + cnt);
xfs_bmbt_disk_set_all(arp, &rec);
cnt++;
}
@@ -724,10 +724,10 @@ xfs_bmap_extents_to_btree(
/*
* Fill in the root key and pointer.
*/
- kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
- arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
+ kp = xfs_bmbt_key_addr(mp, block, 1);
+ arp = xfs_bmbt_rec_addr(mp, ablock, 1);
kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
- pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
+ pp = xfs_bmbt_ptr_addr(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
be16_to_cpu(block->bb_level)));
*pp = cpu_to_be64(args.fsbno);
@@ -896,7 +896,7 @@ xfs_bmap_add_attrfork_btree(
mp = ip->i_mount;
- if (XFS_BMAP_BMDR_SPACE(block) <= xfs_inode_data_fork_size(ip))
+ if (xfs_bmap_bmdr_space(block) <= xfs_inode_data_fork_size(ip))
*flags |= XFS_ILOG_DBROOT;
else {
cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
@@ -1160,7 +1160,7 @@ xfs_iread_bmbt_block(
}
/* Copy records into the incore cache. */
- frp = XFS_BMBT_REC_ADDR(mp, block, 1);
+ frp = xfs_bmbt_rec_addr(mp, block, 1);
for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
struct xfs_bmbt_irec new;
xfs_failaddr_t fa;
@@ -3112,6 +3112,23 @@ xfs_bmap_extsize_align(
return 0;
}
+static inline bool
+xfs_bmap_adjacent_valid(
+ struct xfs_bmalloca *ap,
+ xfs_fsblock_t x,
+ xfs_fsblock_t y)
+{
+ struct xfs_mount *mp = ap->ip->i_mount;
+
+ if (XFS_IS_REALTIME_INODE(ap->ip) &&
+ (ap->datatype & XFS_ALLOC_USERDATA))
+ return x < mp->m_sb.sb_rblocks;
+
+ return XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) &&
+ XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount &&
+ XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks;
+}
+
#define XFS_ALLOC_GAP_UNITS 4
/* returns true if ap->blkno was modified */
@@ -3119,36 +3136,25 @@ bool
xfs_bmap_adjacent(
struct xfs_bmalloca *ap) /* bmap alloc argument struct */
{
- xfs_fsblock_t adjust; /* adjustment to block numbers */
- xfs_mount_t *mp; /* mount point structure */
- int rt; /* true if inode is realtime */
-
-#define ISVALID(x,y) \
- (rt ? \
- (x) < mp->m_sb.sb_rblocks : \
- XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
- XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
- XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
-
- mp = ap->ip->i_mount;
- rt = XFS_IS_REALTIME_INODE(ap->ip) &&
- (ap->datatype & XFS_ALLOC_USERDATA);
+ xfs_fsblock_t adjust; /* adjustment to block numbers */
+
/*
* If allocating at eof, and there's a previous real block,
* try to use its last block as our starting point.
*/
if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
!isnullstartblock(ap->prev.br_startblock) &&
- ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
- ap->prev.br_startblock)) {
+ xfs_bmap_adjacent_valid(ap,
+ ap->prev.br_startblock + ap->prev.br_blockcount,
+ ap->prev.br_startblock)) {
ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
/*
* Adjust for the gap between prevp and us.
*/
adjust = ap->offset -
(ap->prev.br_startoff + ap->prev.br_blockcount);
- if (adjust &&
- ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
+ if (adjust && xfs_bmap_adjacent_valid(ap, ap->blkno + adjust,
+ ap->prev.br_startblock))
ap->blkno += adjust;
return true;
}
@@ -3171,7 +3177,8 @@ xfs_bmap_adjacent(
!isnullstartblock(ap->prev.br_startblock) &&
(prevbno = ap->prev.br_startblock +
ap->prev.br_blockcount) &&
- ISVALID(prevbno, ap->prev.br_startblock)) {
+ xfs_bmap_adjacent_valid(ap, prevbno,
+ ap->prev.br_startblock)) {
/*
* Calculate gap to end of previous block.
*/
@@ -3187,8 +3194,8 @@ xfs_bmap_adjacent(
* number, then just use the end of the previous block.
*/
if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
- ISVALID(prevbno + prevdiff,
- ap->prev.br_startblock))
+ xfs_bmap_adjacent_valid(ap, prevbno + prevdiff,
+ ap->prev.br_startblock))
prevbno += adjust;
else
prevdiff += adjust;
@@ -3220,9 +3227,11 @@ xfs_bmap_adjacent(
* offset by our length.
*/
if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
- ISVALID(gotbno - gotdiff, gotbno))
+ xfs_bmap_adjacent_valid(ap, gotbno - gotdiff,
+ gotbno))
gotbno -= adjust;
- else if (ISVALID(gotbno - ap->length, gotbno)) {
+ else if (xfs_bmap_adjacent_valid(ap, gotbno - ap->length,
+ gotbno)) {
gotbno -= ap->length;
gotdiff += adjust - ap->length;
} else
@@ -3250,7 +3259,7 @@ xfs_bmap_adjacent(
return true;
}
}
-#undef ISVALID
+
return false;
}
@@ -4847,6 +4856,7 @@ xfs_bmapi_remap(
}
ip->i_nblocks += len;
+ ip->i_delayed_blks -= len; /* see xfs_bmap_defer_add */
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
if (ifp->if_format == XFS_DINODE_FMT_BTREE)
@@ -5376,7 +5386,8 @@ xfs_bmap_del_extent_real(
*/
if (!(tp->t_flags & XFS_TRANS_RTBITMAP_LOCKED)) {
tp->t_flags |= XFS_TRANS_RTBITMAP_LOCKED;
- xfs_rtbitmap_lock(tp, mp);
+ xfs_rtbitmap_lock(mp);
+ xfs_rtbitmap_trans_join(tp);
}
error = xfs_rtfree_blocks(tp, del->br_startblock,
del->br_blockcount);
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index d1b06ccde19e..3464be771f95 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -65,10 +65,10 @@ xfs_bmdr_to_bmbt(
ASSERT(be16_to_cpu(rblock->bb_level) > 0);
rblock->bb_numrecs = dblock->bb_numrecs;
dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
- fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
- tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
- fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
- tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
+ fkp = xfs_bmdr_key_addr(dblock, 1);
+ tkp = xfs_bmbt_key_addr(mp, rblock, 1);
+ fpp = xfs_bmdr_ptr_addr(dblock, 1, dmxr);
+ tpp = xfs_bmap_broot_ptr_addr(mp, rblock, 1, rblocklen);
dmxr = be16_to_cpu(dblock->bb_numrecs);
memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
@@ -168,10 +168,10 @@ xfs_bmbt_to_bmdr(
dblock->bb_level = rblock->bb_level;
dblock->bb_numrecs = rblock->bb_numrecs;
dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
- fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
- tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
- fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
- tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
+ fkp = xfs_bmbt_key_addr(mp, rblock, 1);
+ tkp = xfs_bmdr_key_addr(dblock, 1);
+ fpp = xfs_bmap_broot_ptr_addr(mp, rblock, 1, rblocklen);
+ tpp = xfs_bmdr_ptr_addr(dblock, 1, dmxr);
dmxr = be16_to_cpu(dblock->bb_numrecs);
memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
@@ -645,13 +645,13 @@ xfs_bmbt_commit_staged_btree(
/*
* Calculate number of records in a bmap btree block.
*/
-int
+unsigned int
xfs_bmbt_maxrecs(
struct xfs_mount *mp,
- int blocklen,
- int leaf)
+ unsigned int blocklen,
+ bool leaf)
{
- blocklen -= XFS_BMBT_BLOCK_LEN(mp);
+ blocklen -= xfs_bmbt_block_len(mp);
return xfs_bmbt_block_maxrecs(blocklen, leaf);
}
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.h b/fs/xfs/libxfs/xfs_bmap_btree.h
index de1b73f1225c..49a3bae3f6ec 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.h
+++ b/fs/xfs/libxfs/xfs_bmap_btree.h
@@ -14,70 +14,6 @@ struct xfs_trans;
struct xbtree_ifakeroot;
/*
- * Btree block header size depends on a superblock flag.
- */
-#define XFS_BMBT_BLOCK_LEN(mp) \
- (xfs_has_crc(((mp))) ? \
- XFS_BTREE_LBLOCK_CRC_LEN : XFS_BTREE_LBLOCK_LEN)
-
-#define XFS_BMBT_REC_ADDR(mp, block, index) \
- ((xfs_bmbt_rec_t *) \
- ((char *)(block) + \
- XFS_BMBT_BLOCK_LEN(mp) + \
- ((index) - 1) * sizeof(xfs_bmbt_rec_t)))
-
-#define XFS_BMBT_KEY_ADDR(mp, block, index) \
- ((xfs_bmbt_key_t *) \
- ((char *)(block) + \
- XFS_BMBT_BLOCK_LEN(mp) + \
- ((index) - 1) * sizeof(xfs_bmbt_key_t)))
-
-#define XFS_BMBT_PTR_ADDR(mp, block, index, maxrecs) \
- ((xfs_bmbt_ptr_t *) \
- ((char *)(block) + \
- XFS_BMBT_BLOCK_LEN(mp) + \
- (maxrecs) * sizeof(xfs_bmbt_key_t) + \
- ((index) - 1) * sizeof(xfs_bmbt_ptr_t)))
-
-#define XFS_BMDR_REC_ADDR(block, index) \
- ((xfs_bmdr_rec_t *) \
- ((char *)(block) + \
- sizeof(struct xfs_bmdr_block) + \
- ((index) - 1) * sizeof(xfs_bmdr_rec_t)))
-
-#define XFS_BMDR_KEY_ADDR(block, index) \
- ((xfs_bmdr_key_t *) \
- ((char *)(block) + \
- sizeof(struct xfs_bmdr_block) + \
- ((index) - 1) * sizeof(xfs_bmdr_key_t)))
-
-#define XFS_BMDR_PTR_ADDR(block, index, maxrecs) \
- ((xfs_bmdr_ptr_t *) \
- ((char *)(block) + \
- sizeof(struct xfs_bmdr_block) + \
- (maxrecs) * sizeof(xfs_bmdr_key_t) + \
- ((index) - 1) * sizeof(xfs_bmdr_ptr_t)))
-
-/*
- * These are to be used when we know the size of the block and
- * we don't have a cursor.
- */
-#define XFS_BMAP_BROOT_PTR_ADDR(mp, bb, i, sz) \
- XFS_BMBT_PTR_ADDR(mp, bb, i, xfs_bmbt_maxrecs(mp, sz, 0))
-
-#define XFS_BMAP_BROOT_SPACE_CALC(mp, nrecs) \
- (int)(XFS_BMBT_BLOCK_LEN(mp) + \
- ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t))))
-
-#define XFS_BMAP_BROOT_SPACE(mp, bb) \
- (XFS_BMAP_BROOT_SPACE_CALC(mp, be16_to_cpu((bb)->bb_numrecs)))
-#define XFS_BMDR_SPACE_CALC(nrecs) \
- (int)(sizeof(xfs_bmdr_block_t) + \
- ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t))))
-#define XFS_BMAP_BMDR_SPACE(bb) \
- (XFS_BMDR_SPACE_CALC(be16_to_cpu((bb)->bb_numrecs)))
-
-/*
* Maximum number of bmap btree levels.
*/
#define XFS_BM_MAXLEVELS(mp,w) ((mp)->m_bm_maxlevels[(w)])
@@ -99,7 +35,8 @@ extern void xfs_bmbt_to_bmdr(struct xfs_mount *, struct xfs_btree_block *, int,
extern int xfs_bmbt_get_maxrecs(struct xfs_btree_cur *, int level);
extern int xfs_bmdr_maxrecs(int blocklen, int leaf);
-extern int xfs_bmbt_maxrecs(struct xfs_mount *, int blocklen, int leaf);
+unsigned int xfs_bmbt_maxrecs(struct xfs_mount *mp, unsigned int blocklen,
+ bool leaf);
extern int xfs_bmbt_change_owner(struct xfs_trans *tp, struct xfs_inode *ip,
int whichfork, xfs_ino_t new_owner,
@@ -121,4 +58,144 @@ void xfs_bmbt_destroy_cur_cache(void);
void xfs_bmbt_init_block(struct xfs_inode *ip, struct xfs_btree_block *buf,
struct xfs_buf *bp, __u16 level, __u16 numrecs);
+/*
+ * Btree block header size depends on a superblock flag.
+ */
+static inline size_t
+xfs_bmbt_block_len(struct xfs_mount *mp)
+{
+ return xfs_has_crc(mp) ?
+ XFS_BTREE_LBLOCK_CRC_LEN : XFS_BTREE_LBLOCK_LEN;
+}
+
+/* Addresses of key, pointers, and records within an incore bmbt block. */
+
+static inline struct xfs_bmbt_rec *
+xfs_bmbt_rec_addr(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *block,
+ unsigned int index)
+{
+ return (struct xfs_bmbt_rec *)
+ ((char *)block + xfs_bmbt_block_len(mp) +
+ (index - 1) * sizeof(struct xfs_bmbt_rec));
+}
+
+static inline struct xfs_bmbt_key *
+xfs_bmbt_key_addr(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *block,
+ unsigned int index)
+{
+ return (struct xfs_bmbt_key *)
+ ((char *)block + xfs_bmbt_block_len(mp) +
+ (index - 1) * sizeof(struct xfs_bmbt_key *));
+}
+
+static inline xfs_bmbt_ptr_t *
+xfs_bmbt_ptr_addr(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *block,
+ unsigned int index,
+ unsigned int maxrecs)
+{
+ return (xfs_bmbt_ptr_t *)
+ ((char *)block + xfs_bmbt_block_len(mp) +
+ maxrecs * sizeof(struct xfs_bmbt_key) +
+ (index - 1) * sizeof(xfs_bmbt_ptr_t));
+}
+
+/* Addresses of key, pointers, and records within an ondisk bmbt block. */
+
+static inline struct xfs_bmbt_rec *
+xfs_bmdr_rec_addr(
+ struct xfs_bmdr_block *block,
+ unsigned int index)
+{
+ return (struct xfs_bmbt_rec *)
+ ((char *)(block + 1) +
+ (index - 1) * sizeof(struct xfs_bmbt_rec));
+}
+
+static inline struct xfs_bmbt_key *
+xfs_bmdr_key_addr(
+ struct xfs_bmdr_block *block,
+ unsigned int index)
+{
+ return (struct xfs_bmbt_key *)
+ ((char *)(block + 1) +
+ (index - 1) * sizeof(struct xfs_bmbt_key));
+}
+
+static inline xfs_bmbt_ptr_t *
+xfs_bmdr_ptr_addr(
+ struct xfs_bmdr_block *block,
+ unsigned int index,
+ unsigned int maxrecs)
+{
+ return (xfs_bmbt_ptr_t *)
+ ((char *)(block + 1) +
+ maxrecs * sizeof(struct xfs_bmbt_key) +
+ (index - 1) * sizeof(xfs_bmbt_ptr_t));
+}
+
+/*
+ * Address of pointers within the incore btree root.
+ *
+ * These are to be used when we know the size of the block and
+ * we don't have a cursor.
+ */
+static inline xfs_bmbt_ptr_t *
+xfs_bmap_broot_ptr_addr(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *bb,
+ unsigned int i,
+ unsigned int sz)
+{
+ return xfs_bmbt_ptr_addr(mp, bb, i, xfs_bmbt_maxrecs(mp, sz, false));
+}
+
+/*
+ * Compute the space required for the incore btree root containing the given
+ * number of records.
+ */
+static inline size_t
+xfs_bmap_broot_space_calc(
+ struct xfs_mount *mp,
+ unsigned int nrecs)
+{
+ return xfs_bmbt_block_len(mp) +
+ (nrecs * (sizeof(struct xfs_bmbt_key) + sizeof(xfs_bmbt_ptr_t)));
+}
+
+/*
+ * Compute the space required for the incore btree root given the ondisk
+ * btree root block.
+ */
+static inline size_t
+xfs_bmap_broot_space(
+ struct xfs_mount *mp,
+ struct xfs_bmdr_block *bb)
+{
+ return xfs_bmap_broot_space_calc(mp, be16_to_cpu(bb->bb_numrecs));
+}
+
+/* Compute the space required for the ondisk root block. */
+static inline size_t
+xfs_bmdr_space_calc(unsigned int nrecs)
+{
+ return sizeof(struct xfs_bmdr_block) +
+ (nrecs * (sizeof(struct xfs_bmbt_key) + sizeof(xfs_bmbt_ptr_t)));
+}
+
+/*
+ * Compute the space required for the ondisk root block given an incore root
+ * block.
+ */
+static inline size_t
+xfs_bmap_bmdr_space(struct xfs_btree_block *bb)
+{
+ return xfs_bmdr_space_calc(be16_to_cpu(bb->bb_numrecs));
+}
+
#endif /* __XFS_BMAP_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 40021849b42f..2cd212ad2c1d 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -28,7 +28,6 @@
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_attr.h"
-#include "xfs_trans_priv.h"
#include "xfs_exchmaps.h"
static struct kmem_cache *xfs_defer_pending_cache;
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 454b63ef7201..860284064c5a 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -8,6 +8,7 @@
/*
* SGI's XFS filesystem's major stuff (constants, structures)
+ * NOTE: This file must be compile-able with C++ compilers.
*/
/*
@@ -826,6 +827,30 @@ struct xfs_exchange_range {
};
/*
+ * Using the same definition of file2 as struct xfs_exchange_range, commit the
+ * contents of file1 into file2 if file2 has the same inode number, mtime, and
+ * ctime as the arguments provided to the call. The old contents of file2 will
+ * be moved to file1.
+ *
+ * Returns -EBUSY if there isn't an exact match for the file2 fields.
+ *
+ * Filesystems must be able to restart and complete the operation even after
+ * the system goes down.
+ */
+struct xfs_commit_range {
+ __s32 file1_fd;
+ __u32 pad; /* must be zeroes */
+ __u64 file1_offset; /* file1 offset, bytes */
+ __u64 file2_offset; /* file2 offset, bytes */
+ __u64 length; /* bytes to exchange */
+
+ __u64 flags; /* see XFS_EXCHANGE_RANGE_* below */
+
+ /* opaque file2 metadata for freshness checks */
+ __u64 file2_freshness[6];
+};
+
+/*
* Exchange file data all the way to the ends of both files, and then exchange
* the file sizes. This flag can be used to replace a file's contents with a
* different amount of data. length will be ignored.
@@ -906,13 +931,13 @@ static inline struct xfs_getparents_rec *
xfs_getparents_next_rec(struct xfs_getparents *gp,
struct xfs_getparents_rec *gpr)
{
- void *next = ((void *)gpr + gpr->gpr_reclen);
+ void *next = ((char *)gpr + gpr->gpr_reclen);
void *end = (void *)(uintptr_t)(gp->gp_buffer + gp->gp_bufsize);
if (next >= end)
return NULL;
- return next;
+ return (struct xfs_getparents_rec *)next;
}
/* Iterate through this file handle's directory parent pointers. */
@@ -997,6 +1022,8 @@ struct xfs_getparents_by_handle {
#define XFS_IOC_BULKSTAT _IOR ('X', 127, struct xfs_bulkstat_req)
#define XFS_IOC_INUMBERS _IOR ('X', 128, struct xfs_inumbers_req)
#define XFS_IOC_EXCHANGE_RANGE _IOW ('X', 129, struct xfs_exchange_range)
+#define XFS_IOC_START_COMMIT _IOR ('X', 130, struct xfs_commit_range)
+#define XFS_IOC_COMMIT_RANGE _IOW ('X', 131, struct xfs_commit_range)
/* XFS_IOC_GETFSUUID ---------- deprecated 140 */
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 0af5b7a33d05..271855227514 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -1855,11 +1855,12 @@ out_release:
int
xfs_dialloc(
struct xfs_trans **tpp,
- xfs_ino_t parent,
- umode_t mode,
+ const struct xfs_icreate_args *args,
xfs_ino_t *new_ino)
{
struct xfs_mount *mp = (*tpp)->t_mountp;
+ xfs_ino_t parent = args->pip ? args->pip->i_ino : 0;
+ umode_t mode = args->mode & S_IFMT;
xfs_agnumber_t agno;
int error = 0;
xfs_agnumber_t start_agno;
@@ -2947,8 +2948,8 @@ xfs_ialloc_setup_geometry(
/* Compute inode btree geometry. */
igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
- igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
- igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
+ igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, true);
+ igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, false);
igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2;
igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2;
@@ -3033,6 +3034,11 @@ xfs_ialloc_setup_geometry(
igeo->ialloc_align = mp->m_dalign;
else
igeo->ialloc_align = 0;
+
+ if (mp->m_sb.sb_blocksize > PAGE_SIZE)
+ igeo->min_folio_order = mp->m_sb.sb_blocklog - PAGE_SHIFT;
+ else
+ igeo->min_folio_order = 0;
}
/* Compute the location of the root directory inode that is laid out by mkfs. */
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index b549627e3a61..3a1323155a45 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -33,11 +33,13 @@ xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o)
return xfs_buf_offset(b, o << (mp)->m_sb.sb_inodelog);
}
+struct xfs_icreate_args;
+
/*
* Allocate an inode on disk. Mode is used to tell whether the new inode will
* need space, and whether it is a directory.
*/
-int xfs_dialloc(struct xfs_trans **tpp, xfs_ino_t parent, umode_t mode,
+int xfs_dialloc(struct xfs_trans **tpp, const struct xfs_icreate_args *args,
xfs_ino_t *new_ino);
int xfs_difree(struct xfs_trans *tp, struct xfs_perag *pag,
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 797d5b5f7b72..401b42d52af6 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -572,11 +572,11 @@ xfs_inobt_block_maxrecs(
/*
* Calculate number of records in an inobt btree block.
*/
-int
+unsigned int
xfs_inobt_maxrecs(
struct xfs_mount *mp,
- int blocklen,
- int leaf)
+ unsigned int blocklen,
+ bool leaf)
{
blocklen -= XFS_INOBT_BLOCK_LEN(mp);
return xfs_inobt_block_maxrecs(blocklen, leaf);
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h
index 6472ec1ecbb4..300edf5bc009 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.h
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.h
@@ -50,7 +50,8 @@ struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_perag *pag,
struct xfs_trans *tp, struct xfs_buf *agbp);
struct xfs_btree_cur *xfs_finobt_init_cursor(struct xfs_perag *pag,
struct xfs_trans *tp, struct xfs_buf *agbp);
-extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int);
+unsigned int xfs_inobt_maxrecs(struct xfs_mount *mp, unsigned int blocklen,
+ bool leaf);
/* ir_holemask to inode allocation bitmap conversion */
uint64_t xfs_inobt_irec_to_allocmask(const struct xfs_inobt_rec_incore *irec);
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 9d11ae015909..1158ca48626b 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -185,7 +185,7 @@ xfs_iformat_btree(
ifp = xfs_ifork_ptr(ip, whichfork);
dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
- size = XFS_BMAP_BROOT_SPACE(mp, dfp);
+ size = xfs_bmap_broot_space(mp, dfp);
nrecs = be16_to_cpu(dfp->bb_numrecs);
level = be16_to_cpu(dfp->bb_level);
@@ -198,7 +198,7 @@ xfs_iformat_btree(
*/
if (unlikely(ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork) ||
nrecs == 0 ||
- XFS_BMDR_SPACE_CALC(nrecs) >
+ xfs_bmdr_space_calc(nrecs) >
XFS_DFORK_SIZE(dip, mp, whichfork) ||
ifp->if_nextents > ip->i_nblocks) ||
level == 0 || level > XFS_BM_MAXLEVELS(mp, whichfork)) {
@@ -409,7 +409,7 @@ xfs_iroot_realloc(
* allocate it now and get out.
*/
if (ifp->if_broot_bytes == 0) {
- new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff);
+ new_size = xfs_bmap_broot_space_calc(mp, rec_diff);
ifp->if_broot = kmalloc(new_size,
GFP_KERNEL | __GFP_NOFAIL);
ifp->if_broot_bytes = (int)new_size;
@@ -422,17 +422,17 @@ xfs_iroot_realloc(
* location. The records don't change location because
* they are kept butted up against the btree block header.
*/
- cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
+ cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, false);
new_max = cur_max + rec_diff;
- new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
+ new_size = xfs_bmap_broot_space_calc(mp, new_max);
ifp->if_broot = krealloc(ifp->if_broot, new_size,
GFP_KERNEL | __GFP_NOFAIL);
- op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
+ op = (char *)xfs_bmap_broot_ptr_addr(mp, ifp->if_broot, 1,
ifp->if_broot_bytes);
- np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
+ np = (char *)xfs_bmap_broot_ptr_addr(mp, ifp->if_broot, 1,
(int)new_size);
ifp->if_broot_bytes = (int)new_size;
- ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
+ ASSERT(xfs_bmap_bmdr_space(ifp->if_broot) <=
xfs_inode_fork_size(ip, whichfork));
memmove(np, op, cur_max * (uint)sizeof(xfs_fsblock_t));
return;
@@ -444,11 +444,11 @@ xfs_iroot_realloc(
* records, just get rid of the root and clear the status bit.
*/
ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
- cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
+ cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, false);
new_max = cur_max + rec_diff;
ASSERT(new_max >= 0);
if (new_max > 0)
- new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
+ new_size = xfs_bmap_broot_space_calc(mp, new_max);
else
new_size = 0;
if (new_size > 0) {
@@ -457,28 +457,28 @@ xfs_iroot_realloc(
* First copy over the btree block header.
*/
memcpy(new_broot, ifp->if_broot,
- XFS_BMBT_BLOCK_LEN(ip->i_mount));
+ xfs_bmbt_block_len(ip->i_mount));
} else {
new_broot = NULL;
}
/*
- * Only copy the records and pointers if there are any.
+ * Only copy the keys and pointers if there are any.
*/
if (new_max > 0) {
/*
- * First copy the records.
+ * First copy the keys.
*/
- op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
- np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
- memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
+ op = (char *)xfs_bmbt_key_addr(mp, ifp->if_broot, 1);
+ np = (char *)xfs_bmbt_key_addr(mp, new_broot, 1);
+ memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_key_t));
/*
* Then copy the pointers.
*/
- op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
+ op = (char *)xfs_bmap_broot_ptr_addr(mp, ifp->if_broot, 1,
ifp->if_broot_bytes);
- np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
+ np = (char *)xfs_bmap_broot_ptr_addr(mp, new_broot, 1,
(int)new_size);
memcpy(np, op, new_max * (uint)sizeof(xfs_fsblock_t));
}
@@ -486,7 +486,7 @@ xfs_iroot_realloc(
ifp->if_broot = new_broot;
ifp->if_broot_bytes = (int)new_size;
if (ifp->if_broot)
- ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
+ ASSERT(xfs_bmap_bmdr_space(ifp->if_broot) <=
xfs_inode_fork_size(ip, whichfork));
return;
}
@@ -655,7 +655,7 @@ xfs_iflush_fork(
if ((iip->ili_fields & brootflag[whichfork]) &&
(ifp->if_broot_bytes > 0)) {
ASSERT(ifp->if_broot != NULL);
- ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
+ ASSERT(xfs_bmap_bmdr_space(ifp->if_broot) <=
xfs_inode_fork_size(ip, whichfork));
xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
(xfs_bmdr_block_t *)cp,
diff --git a/fs/xfs/libxfs/xfs_inode_util.c b/fs/xfs/libxfs/xfs_inode_util.c
index 032333289113..cc38e1c3c3e1 100644
--- a/fs/xfs/libxfs/xfs_inode_util.c
+++ b/fs/xfs/libxfs/xfs_inode_util.c
@@ -308,7 +308,7 @@ xfs_inode_init(
!vfsgid_in_group_p(i_gid_into_vfsgid(args->idmap, inode)))
inode->i_mode &= ~S_ISGID;
- ip->i_projid = pip ? xfs_get_initial_prid(pip) : 0;
+ ip->i_projid = xfs_get_initial_prid(pip);
}
ip->i_disk_size = 0;
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index cb3b1d42ae9a..795928d1a66d 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -417,9 +417,10 @@ xfs_refcountbt_block_maxrecs(
/*
* Calculate the number of records in a refcount btree block.
*/
-int
+unsigned int
xfs_refcountbt_maxrecs(
- int blocklen,
+ struct xfs_mount *mp,
+ unsigned int blocklen,
bool leaf)
{
blocklen -= XFS_REFCOUNT_BLOCK_LEN;
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.h b/fs/xfs/libxfs/xfs_refcount_btree.h
index 1e0ab25f6c68..beb93bef6a81 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.h
+++ b/fs/xfs/libxfs/xfs_refcount_btree.h
@@ -48,7 +48,8 @@ struct xbtree_afakeroot;
extern struct xfs_btree_cur *xfs_refcountbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *agbp,
struct xfs_perag *pag);
-extern int xfs_refcountbt_maxrecs(int blocklen, bool leaf);
+unsigned int xfs_refcountbt_maxrecs(struct xfs_mount *mp, unsigned int blocklen,
+ bool leaf);
extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp,
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 56fd6c4bd8b4..ac2f1f499b76 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -731,10 +731,11 @@ xfs_rmapbt_block_maxrecs(
/*
* Calculate number of records in an rmap btree block.
*/
-int
+unsigned int
xfs_rmapbt_maxrecs(
- int blocklen,
- int leaf)
+ struct xfs_mount *mp,
+ unsigned int blocklen,
+ bool leaf)
{
blocklen -= XFS_RMAP_BLOCK_LEN;
return xfs_rmapbt_block_maxrecs(blocklen, leaf);
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h
index eb90d89e8086..119b1567cd0e 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.h
+++ b/fs/xfs/libxfs/xfs_rmap_btree.h
@@ -47,7 +47,8 @@ struct xfs_btree_cur *xfs_rmapbt_init_cursor(struct xfs_mount *mp,
struct xfs_perag *pag);
void xfs_rmapbt_commit_staged_btree(struct xfs_btree_cur *cur,
struct xfs_trans *tp, struct xfs_buf *agbp);
-int xfs_rmapbt_maxrecs(int blocklen, int leaf);
+unsigned int xfs_rmapbt_maxrecs(struct xfs_mount *mp, unsigned int blocklen,
+ bool leaf);
extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp,
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 386b672c5058..27a4472402ba 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -13,6 +13,8 @@
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_trans_space.h"
#include "xfs_trans.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
@@ -69,7 +71,7 @@ xfs_rtbuf_cache_relse(
* Get a buffer for the bitmap or summary file block specified.
* The buffer is returned read and locked.
*/
-int
+static int
xfs_rtbuf_get(
struct xfs_rtalloc_args *args,
xfs_fileoff_t block, /* block number in bitmap or summary */
@@ -138,15 +140,43 @@ xfs_rtbuf_get(
return 0;
}
+int
+xfs_rtbitmap_read_buf(
+ struct xfs_rtalloc_args *args,
+ xfs_fileoff_t block)
+{
+ struct xfs_mount *mp = args->mp;
+
+ if (XFS_IS_CORRUPT(mp, block >= mp->m_sb.sb_rbmblocks)) {
+ xfs_rt_mark_sick(mp, XFS_SICK_RT_BITMAP);
+ return -EFSCORRUPTED;
+ }
+
+ return xfs_rtbuf_get(args, block, 0);
+}
+
+int
+xfs_rtsummary_read_buf(
+ struct xfs_rtalloc_args *args,
+ xfs_fileoff_t block)
+{
+ struct xfs_mount *mp = args->mp;
+
+ if (XFS_IS_CORRUPT(mp, block >= mp->m_rsumblocks)) {
+ xfs_rt_mark_sick(args->mp, XFS_SICK_RT_SUMMARY);
+ return -EFSCORRUPTED;
+ }
+ return xfs_rtbuf_get(args, block, 1);
+}
+
/*
- * Searching backward from start to limit, find the first block whose
- * allocated/free state is different from start's.
+ * Searching backward from start find the first block whose allocated/free state
+ * is different from start's.
*/
int
xfs_rtfind_back(
struct xfs_rtalloc_args *args,
xfs_rtxnum_t start, /* starting rtext to look at */
- xfs_rtxnum_t limit, /* last rtext to look at */
xfs_rtxnum_t *rtx) /* out: start rtext found */
{
struct xfs_mount *mp = args->mp;
@@ -175,7 +205,7 @@ xfs_rtfind_back(
*/
word = xfs_rtx_to_rbmword(mp, start);
bit = (int)(start & (XFS_NBWORD - 1));
- len = start - limit + 1;
+ len = start + 1;
/*
* Compute match value, based on the bit at start: if 1 (free)
* then all-ones, else all-zeroes.
@@ -316,6 +346,8 @@ xfs_rtfind_forw(
xfs_rtword_t incore;
unsigned int word; /* word number in the buffer */
+ ASSERT(start <= limit);
+
/*
* Compute and read in starting bitmap block for starting block.
*/
@@ -698,7 +730,7 @@ xfs_rtfree_range(
* We need to find the beginning and end of the extent so we can
* properly update the summary.
*/
- error = xfs_rtfind_back(args, start, 0, &preblock);
+ error = xfs_rtfind_back(args, start, &preblock);
if (error) {
return error;
}
@@ -990,25 +1022,24 @@ xfs_rtfree_blocks(
xfs_filblks_t rtlen)
{
struct xfs_mount *mp = tp->t_mountp;
- xfs_rtxnum_t start;
- xfs_filblks_t len;
xfs_extlen_t mod;
ASSERT(rtlen <= XFS_MAX_BMBT_EXTLEN);
- len = xfs_rtb_to_rtxrem(mp, rtlen, &mod);
+ mod = xfs_rtb_to_rtxoff(mp, rtlen);
if (mod) {
ASSERT(mod == 0);
return -EIO;
}
- start = xfs_rtb_to_rtxrem(mp, rtbno, &mod);
+ mod = xfs_rtb_to_rtxoff(mp, rtbno);
if (mod) {
ASSERT(mod == 0);
return -EIO;
}
- return xfs_rtfree_extent(tp, start, len);
+ return xfs_rtfree_extent(tp, xfs_rtb_to_rtx(mp, rtbno),
+ xfs_rtb_to_rtx(mp, rtlen));
}
/* Find all the free records within a given range. */
@@ -1016,8 +1047,8 @@ int
xfs_rtalloc_query_range(
struct xfs_mount *mp,
struct xfs_trans *tp,
- const struct xfs_rtalloc_rec *low_rec,
- const struct xfs_rtalloc_rec *high_rec,
+ xfs_rtxnum_t start,
+ xfs_rtxnum_t end,
xfs_rtalloc_query_range_fn fn,
void *priv)
{
@@ -1025,45 +1056,42 @@ xfs_rtalloc_query_range(
.mp = mp,
.tp = tp,
};
- struct xfs_rtalloc_rec rec;
- xfs_rtxnum_t rtstart;
- xfs_rtxnum_t rtend;
- xfs_rtxnum_t high_key;
- int is_free;
int error = 0;
- if (low_rec->ar_startext > high_rec->ar_startext)
+ if (start > end)
return -EINVAL;
- if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
- low_rec->ar_startext == high_rec->ar_startext)
+ if (start == end || start >= mp->m_sb.sb_rextents)
return 0;
- high_key = min(high_rec->ar_startext, mp->m_sb.sb_rextents - 1);
+ end = min(end, mp->m_sb.sb_rextents - 1);
/* Iterate the bitmap, looking for discrepancies. */
- rtstart = low_rec->ar_startext;
- while (rtstart <= high_key) {
+ while (start <= end) {
+ struct xfs_rtalloc_rec rec;
+ int is_free;
+ xfs_rtxnum_t rtend;
+
/* Is the first block free? */
- error = xfs_rtcheck_range(&args, rtstart, 1, 1, &rtend,
+ error = xfs_rtcheck_range(&args, start, 1, 1, &rtend,
&is_free);
if (error)
break;
/* How long does the extent go for? */
- error = xfs_rtfind_forw(&args, rtstart, high_key, &rtend);
+ error = xfs_rtfind_forw(&args, start, end, &rtend);
if (error)
break;
if (is_free) {
- rec.ar_startext = rtstart;
- rec.ar_extcount = rtend - rtstart + 1;
+ rec.ar_startext = start;
+ rec.ar_extcount = rtend - start + 1;
error = fn(mp, tp, &rec, priv);
if (error)
break;
}
- rtstart = rtend + 1;
+ start = rtend + 1;
}
xfs_rtbuf_cache_relse(&args);
@@ -1078,13 +1106,8 @@ xfs_rtalloc_query_all(
xfs_rtalloc_query_range_fn fn,
void *priv)
{
- struct xfs_rtalloc_rec keys[2];
-
- keys[0].ar_startext = 0;
- keys[1].ar_startext = mp->m_sb.sb_rextents - 1;
- keys[0].ar_extcount = keys[1].ar_extcount = 0;
-
- return xfs_rtalloc_query_range(mp, tp, &keys[0], &keys[1], fn, priv);
+ return xfs_rtalloc_query_range(mp, tp, 0, mp->m_sb.sb_rextents - 1, fn,
+ priv);
}
/* Is the given extent all free? */
@@ -1125,21 +1148,6 @@ xfs_rtbitmap_blockcount(
return howmany_64(rtextents, NBBY * mp->m_sb.sb_blocksize);
}
-/*
- * Compute the number of rtbitmap words needed to populate every block of a
- * bitmap that is large enough to track the given number of rt extents.
- */
-unsigned long long
-xfs_rtbitmap_wordcount(
- struct xfs_mount *mp,
- xfs_rtbxlen_t rtextents)
-{
- xfs_filblks_t blocks;
-
- blocks = xfs_rtbitmap_blockcount(mp, rtextents);
- return XFS_FSB_TO_B(mp, blocks) >> XFS_WORDLOG;
-}
-
/* Compute the number of rtsummary blocks needed to track the given rt space. */
xfs_filblks_t
xfs_rtsummary_blockcount(
@@ -1153,39 +1161,25 @@ xfs_rtsummary_blockcount(
return XFS_B_TO_FSB(mp, rsumwords << XFS_WORDLOG);
}
-/*
- * Compute the number of rtsummary info words needed to populate every block of
- * a summary file that is large enough to track the given rt space.
- */
-unsigned long long
-xfs_rtsummary_wordcount(
- struct xfs_mount *mp,
- unsigned int rsumlevels,
- xfs_extlen_t rbmblocks)
+/* Lock both realtime free space metadata inodes for a freespace update. */
+void
+xfs_rtbitmap_lock(
+ struct xfs_mount *mp)
{
- xfs_filblks_t blocks;
-
- blocks = xfs_rtsummary_blockcount(mp, rsumlevels, rbmblocks);
- return XFS_FSB_TO_B(mp, blocks) >> XFS_WORDLOG;
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP);
+ xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL | XFS_ILOCK_RTSUM);
}
/*
- * Lock both realtime free space metadata inodes for a freespace update. If a
- * transaction is given, the inodes will be joined to the transaction and the
+ * Join both realtime free space metadata inodes to the transaction. The
* ILOCKs will be released on transaction commit.
*/
void
-xfs_rtbitmap_lock(
- struct xfs_trans *tp,
- struct xfs_mount *mp)
+xfs_rtbitmap_trans_join(
+ struct xfs_trans *tp)
{
- xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP);
- if (tp)
- xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
-
- xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL | XFS_ILOCK_RTSUM);
- if (tp)
- xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, tp->t_mountp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, tp->t_mountp->m_rsumip, XFS_ILOCK_EXCL);
}
/* Unlock both realtime free space metadata inodes after a freespace update. */
@@ -1225,3 +1219,127 @@ xfs_rtbitmap_unlock_shared(
if (rbmlock_flags & XFS_RBMLOCK_BITMAP)
xfs_iunlock(mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
}
+
+static int
+xfs_rtfile_alloc_blocks(
+ struct xfs_inode *ip,
+ xfs_fileoff_t offset_fsb,
+ xfs_filblks_t count_fsb,
+ struct xfs_bmbt_irec *map)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int nmap = 1;
+ int error;
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtalloc,
+ XFS_GROWFSRT_SPACE_RES(mp, count_fsb), 0, 0, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
+ XFS_IEXT_ADD_NOSPLIT_CNT);
+ if (error)
+ goto out_trans_cancel;
+
+ error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
+ XFS_BMAPI_METADATA, 0, map, &nmap);
+ if (error)
+ goto out_trans_cancel;
+
+ return xfs_trans_commit(tp);
+
+out_trans_cancel:
+ xfs_trans_cancel(tp);
+ return error;
+}
+
+/* Get a buffer for the block. */
+static int
+xfs_rtfile_initialize_block(
+ struct xfs_inode *ip,
+ xfs_fsblock_t fsbno,
+ void *data)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ struct xfs_buf *bp;
+ const size_t copylen = mp->m_blockwsize << XFS_WORDLOG;
+ enum xfs_blft buf_type;
+ int error;
+
+ if (ip == mp->m_rsumip)
+ buf_type = XFS_BLFT_RTSUMMARY_BUF;
+ else
+ buf_type = XFS_BLFT_RTBITMAP_BUF;
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtzero, 0, 0, 0, &tp);
+ if (error)
+ return error;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+ XFS_FSB_TO_DADDR(mp, fsbno), mp->m_bsize, 0, &bp);
+ if (error) {
+ xfs_trans_cancel(tp);
+ return error;
+ }
+
+ xfs_trans_buf_set_type(tp, bp, buf_type);
+ bp->b_ops = &xfs_rtbuf_ops;
+ if (data)
+ memcpy(bp->b_addr, data, copylen);
+ else
+ memset(bp->b_addr, 0, copylen);
+ xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
+ return xfs_trans_commit(tp);
+}
+
+/*
+ * Allocate space to the bitmap or summary file, and zero it, for growfs.
+ * @data must be a contiguous buffer large enough to fill all blocks in the
+ * file; or NULL to initialize the contents to zeroes.
+ */
+int
+xfs_rtfile_initialize_blocks(
+ struct xfs_inode *ip, /* inode (bitmap/summary) */
+ xfs_fileoff_t offset_fsb, /* offset to start from */
+ xfs_fileoff_t end_fsb, /* offset to allocate to */
+ void *data) /* data to fill the blocks */
+{
+ struct xfs_mount *mp = ip->i_mount;
+ const size_t copylen = mp->m_blockwsize << XFS_WORDLOG;
+
+ while (offset_fsb < end_fsb) {
+ struct xfs_bmbt_irec map;
+ xfs_filblks_t i;
+ int error;
+
+ error = xfs_rtfile_alloc_blocks(ip, offset_fsb,
+ end_fsb - offset_fsb, &map);
+ if (error)
+ return error;
+
+ /*
+ * Now we need to clear the allocated blocks.
+ *
+ * Do this one block per transaction, to keep it simple.
+ */
+ for (i = 0; i < map.br_blockcount; i++) {
+ error = xfs_rtfile_initialize_block(ip,
+ map.br_startblock + i, data);
+ if (error)
+ return error;
+ if (data)
+ data += copylen;
+ }
+
+ offset_fsb = map.br_startoff + map.br_blockcount;
+ }
+
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.h b/fs/xfs/libxfs/xfs_rtbitmap.h
index 6186585f2c37..140513d1d6bc 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.h
+++ b/fs/xfs/libxfs/xfs_rtbitmap.h
@@ -87,24 +87,6 @@ xfs_rtb_to_rtxoff(
}
/*
- * Crack an rt block number into an rt extent number and an offset within that
- * rt extent. Returns the rt extent number directly and the offset in @off.
- */
-static inline xfs_rtxnum_t
-xfs_rtb_to_rtxrem(
- struct xfs_mount *mp,
- xfs_rtblock_t rtbno,
- xfs_extlen_t *off)
-{
- if (likely(mp->m_rtxblklog >= 0)) {
- *off = rtbno & mp->m_rtxblkmask;
- return rtbno >> mp->m_rtxblklog;
- }
-
- return div_u64_rem(rtbno, mp->m_sb.sb_rextsize, off);
-}
-
-/*
* Convert an rt block number into an rt extent number, rounding up to the next
* rt extent if the rt block is not aligned to an rt extent boundary.
*/
@@ -293,30 +275,12 @@ typedef int (*xfs_rtalloc_query_range_fn)(
#ifdef CONFIG_XFS_RT
void xfs_rtbuf_cache_relse(struct xfs_rtalloc_args *args);
-
-int xfs_rtbuf_get(struct xfs_rtalloc_args *args, xfs_fileoff_t block,
- int issum);
-
-static inline int
-xfs_rtbitmap_read_buf(
- struct xfs_rtalloc_args *args,
- xfs_fileoff_t block)
-{
- return xfs_rtbuf_get(args, block, 0);
-}
-
-static inline int
-xfs_rtsummary_read_buf(
- struct xfs_rtalloc_args *args,
- xfs_fileoff_t block)
-{
- return xfs_rtbuf_get(args, block, 1);
-}
-
+int xfs_rtbitmap_read_buf(struct xfs_rtalloc_args *args, xfs_fileoff_t block);
+int xfs_rtsummary_read_buf(struct xfs_rtalloc_args *args, xfs_fileoff_t block);
int xfs_rtcheck_range(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
xfs_rtxlen_t len, int val, xfs_rtxnum_t *new, int *stat);
int xfs_rtfind_back(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
- xfs_rtxnum_t limit, xfs_rtxnum_t *rtblock);
+ xfs_rtxnum_t *rtblock);
int xfs_rtfind_forw(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
xfs_rtxnum_t limit, xfs_rtxnum_t *rtblock);
int xfs_rtmodify_range(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
@@ -328,8 +292,7 @@ int xfs_rtmodify_summary(struct xfs_rtalloc_args *args, int log,
int xfs_rtfree_range(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
xfs_rtxlen_t len);
int xfs_rtalloc_query_range(struct xfs_mount *mp, struct xfs_trans *tp,
- const struct xfs_rtalloc_rec *low_rec,
- const struct xfs_rtalloc_rec *high_rec,
+ xfs_rtxnum_t start, xfs_rtxnum_t end,
xfs_rtalloc_query_range_fn fn, void *priv);
int xfs_rtalloc_query_all(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_rtalloc_query_range_fn fn,
@@ -353,16 +316,15 @@ int xfs_rtfree_blocks(struct xfs_trans *tp, xfs_fsblock_t rtbno,
xfs_filblks_t xfs_rtbitmap_blockcount(struct xfs_mount *mp, xfs_rtbxlen_t
rtextents);
-unsigned long long xfs_rtbitmap_wordcount(struct xfs_mount *mp,
- xfs_rtbxlen_t rtextents);
-
xfs_filblks_t xfs_rtsummary_blockcount(struct xfs_mount *mp,
unsigned int rsumlevels, xfs_extlen_t rbmblocks);
-unsigned long long xfs_rtsummary_wordcount(struct xfs_mount *mp,
- unsigned int rsumlevels, xfs_extlen_t rbmblocks);
-void xfs_rtbitmap_lock(struct xfs_trans *tp, struct xfs_mount *mp);
+int xfs_rtfile_initialize_blocks(struct xfs_inode *ip,
+ xfs_fileoff_t offset_fsb, xfs_fileoff_t end_fsb, void *data);
+
+void xfs_rtbitmap_lock(struct xfs_mount *mp);
void xfs_rtbitmap_unlock(struct xfs_mount *mp);
+void xfs_rtbitmap_trans_join(struct xfs_trans *tp);
/* Lock the rt bitmap inode in shared mode */
#define XFS_RBMLOCK_BITMAP (1U << 0)
@@ -388,10 +350,9 @@ xfs_rtbitmap_blockcount(struct xfs_mount *mp, xfs_rtbxlen_t rtextents)
/* shut up gcc */
return 0;
}
-# define xfs_rtbitmap_wordcount(mp, r) (0)
# define xfs_rtsummary_blockcount(mp, l, b) (0)
-# define xfs_rtsummary_wordcount(mp, l, b) (0)
-# define xfs_rtbitmap_lock(tp, mp) do { } while (0)
+# define xfs_rtbitmap_lock(mp) do { } while (0)
+# define xfs_rtbitmap_trans_join(tp) do { } while (0)
# define xfs_rtbitmap_unlock(mp) do { } while (0)
# define xfs_rtbitmap_lock_shared(mp, lf) do { } while (0)
# define xfs_rtbitmap_unlock_shared(mp, lf) do { } while (0)
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 6b56f0f6d4c1..d95409f3cba6 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -232,6 +232,38 @@ xfs_validate_sb_read(
return 0;
}
+static uint64_t
+xfs_sb_calc_rbmblocks(
+ struct xfs_sb *sbp)
+{
+ return howmany_64(sbp->sb_rextents, NBBY * sbp->sb_blocksize);
+}
+
+/* Validate the realtime geometry */
+bool
+xfs_validate_rt_geometry(
+ struct xfs_sb *sbp)
+{
+ if (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE ||
+ sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE)
+ return false;
+
+ if (sbp->sb_rblocks == 0) {
+ if (sbp->sb_rextents != 0 || sbp->sb_rbmblocks != 0 ||
+ sbp->sb_rextslog != 0 || sbp->sb_frextents != 0)
+ return false;
+ return true;
+ }
+
+ if (sbp->sb_rextents == 0 ||
+ sbp->sb_rextents != div_u64(sbp->sb_rblocks, sbp->sb_rextsize) ||
+ sbp->sb_rextslog != xfs_compute_rextslog(sbp->sb_rextents) ||
+ sbp->sb_rbmblocks != xfs_sb_calc_rbmblocks(sbp))
+ return false;
+
+ return true;
+}
+
/* Check all the superblock fields we care about when writing one out. */
STATIC int
xfs_validate_sb_write(
@@ -491,39 +523,13 @@ xfs_validate_sb_common(
}
}
- /* Validate the realtime geometry; stolen from xfs_repair */
- if (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE ||
- sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) {
+ if (!xfs_validate_rt_geometry(sbp)) {
xfs_notice(mp,
- "realtime extent sanity check failed");
+ "realtime %sgeometry check failed",
+ sbp->sb_rblocks ? "" : "zeroed ");
return -EFSCORRUPTED;
}
- if (sbp->sb_rblocks == 0) {
- if (sbp->sb_rextents != 0 || sbp->sb_rbmblocks != 0 ||
- sbp->sb_rextslog != 0 || sbp->sb_frextents != 0) {
- xfs_notice(mp,
- "realtime zeroed geometry check failed");
- return -EFSCORRUPTED;
- }
- } else {
- uint64_t rexts;
- uint64_t rbmblocks;
-
- rexts = div_u64(sbp->sb_rblocks, sbp->sb_rextsize);
- rbmblocks = howmany_64(sbp->sb_rextents,
- NBBY * sbp->sb_blocksize);
-
- if (!xfs_validate_rtextents(rexts) ||
- sbp->sb_rextents != rexts ||
- sbp->sb_rextslog != xfs_compute_rextslog(rexts) ||
- sbp->sb_rbmblocks != rbmblocks) {
- xfs_notice(mp,
- "realtime geometry sanity check failed");
- return -EFSCORRUPTED;
- }
- }
-
/*
* Either (sb_unit and !hasdalign) or (!sb_unit and hasdalign)
* would imply the image is corrupted.
@@ -959,6 +965,15 @@ const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
.verify_write = xfs_sb_write_verify,
};
+void
+xfs_mount_sb_set_rextsize(
+ struct xfs_mount *mp,
+ struct xfs_sb *sbp)
+{
+ mp->m_rtxblklog = log2_if_power2(sbp->sb_rextsize);
+ mp->m_rtxblkmask = mask64_if_power2(sbp->sb_rextsize);
+}
+
/*
* xfs_mount_common
*
@@ -983,26 +998,25 @@ xfs_sb_mount_common(
mp->m_blockmask = sbp->sb_blocksize - 1;
mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
mp->m_blockwmask = mp->m_blockwsize - 1;
- mp->m_rtxblklog = log2_if_power2(sbp->sb_rextsize);
- mp->m_rtxblkmask = mask64_if_power2(sbp->sb_rextsize);
+ xfs_mount_sb_set_rextsize(mp, sbp);
- mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
- mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
+ mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, true);
+ mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, false);
mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
- mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
- mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
+ mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, true);
+ mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, false);
mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
- mp->m_rmap_mxr[0] = xfs_rmapbt_maxrecs(sbp->sb_blocksize, 1);
- mp->m_rmap_mxr[1] = xfs_rmapbt_maxrecs(sbp->sb_blocksize, 0);
+ mp->m_rmap_mxr[0] = xfs_rmapbt_maxrecs(mp, sbp->sb_blocksize, true);
+ mp->m_rmap_mxr[1] = xfs_rmapbt_maxrecs(mp, sbp->sb_blocksize, false);
mp->m_rmap_mnr[0] = mp->m_rmap_mxr[0] / 2;
mp->m_rmap_mnr[1] = mp->m_rmap_mxr[1] / 2;
- mp->m_refc_mxr[0] = xfs_refcountbt_maxrecs(sbp->sb_blocksize, true);
- mp->m_refc_mxr[1] = xfs_refcountbt_maxrecs(sbp->sb_blocksize, false);
+ mp->m_refc_mxr[0] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize, true);
+ mp->m_refc_mxr[1] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize, false);
mp->m_refc_mnr[0] = mp->m_refc_mxr[0] / 2;
mp->m_refc_mnr[1] = mp->m_refc_mxr[1] / 2;
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index 37b1ed1bc209..885c83755991 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -17,6 +17,8 @@ extern void xfs_log_sb(struct xfs_trans *tp);
extern int xfs_sync_sb(struct xfs_mount *mp, bool wait);
extern int xfs_sync_sb_buf(struct xfs_mount *mp);
extern void xfs_sb_mount_common(struct xfs_mount *mp, struct xfs_sb *sbp);
+void xfs_mount_sb_set_rextsize(struct xfs_mount *mp,
+ struct xfs_sb *sbp);
extern void xfs_sb_from_disk(struct xfs_sb *to, struct xfs_dsb *from);
extern void xfs_sb_to_disk(struct xfs_dsb *to, struct xfs_sb *from);
extern void xfs_sb_quota_from_disk(struct xfs_sb *sbp);
@@ -38,6 +40,7 @@ extern int xfs_sb_get_secondary(struct xfs_mount *mp,
bool xfs_validate_stripe_geometry(struct xfs_mount *mp,
__s64 sunit, __s64 swidth, int sectorsize, bool may_repair,
bool silent);
+bool xfs_validate_rt_geometry(struct xfs_sb *sbp);
uint8_t xfs_compute_rextslog(xfs_rtbxlen_t rtextents);
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 2f7413afbf46..33b84a3a83ff 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -224,6 +224,9 @@ struct xfs_ino_geometry {
/* precomputed value for di_flags2 */
uint64_t new_diflags2;
+ /* minimum folio order of a page cache allocation */
+ unsigned int min_folio_order;
+
};
#endif /* __XFS_SHARED_H__ */
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 45aaf169806a..1a7f95bcf069 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -130,7 +130,7 @@ xfs_calc_inode_res(
(4 * sizeof(struct xlog_op_header) +
sizeof(struct xfs_inode_log_format) +
mp->m_sb.sb_inodesize +
- 2 * XFS_BMBT_BLOCK_LEN(mp));
+ 2 * xfs_bmbt_block_len(mp));
}
/*
@@ -918,7 +918,7 @@ xfs_calc_growrtfree_reservation(
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
xfs_calc_inode_res(mp, 2) +
xfs_calc_buf_res(1, mp->m_sb.sb_blocksize) +
- xfs_calc_buf_res(1, mp->m_rsumsize);
+ xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, mp->m_rsumblocks));
}
/*
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 76eb9e328835..a8cd44d03ef6 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -235,16 +235,4 @@ bool xfs_verify_fileoff(struct xfs_mount *mp, xfs_fileoff_t off);
bool xfs_verify_fileext(struct xfs_mount *mp, xfs_fileoff_t off,
xfs_fileoff_t len);
-/* Do we support an rt volume having this number of rtextents? */
-static inline bool
-xfs_validate_rtextents(
- xfs_rtbxlen_t rtextents)
-{
- /* No runt rt volumes */
- if (rtextents == 0)
- return false;
-
- return true;
-}
-
#endif /* __XFS_TYPES_H__ */
diff --git a/fs/xfs/scrub/bmap_repair.c b/fs/xfs/scrub/bmap_repair.c
index 1e656fab5e41..49dc38acc66b 100644
--- a/fs/xfs/scrub/bmap_repair.c
+++ b/fs/xfs/scrub/bmap_repair.c
@@ -480,7 +480,7 @@ xrep_bmap_iroot_size(
{
ASSERT(level > 0);
- return XFS_BMAP_BROOT_SPACE_CALC(cur->bc_mp, nr_this_level);
+ return xfs_bmap_broot_space_calc(cur->bc_mp, nr_this_level);
}
/* Update the inode counters. */
diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
index 3d5f1f6b4b7b..47148cc4a833 100644
--- a/fs/xfs/scrub/common.h
+++ b/fs/xfs/scrub/common.h
@@ -53,6 +53,11 @@ int xchk_checkpoint_log(struct xfs_mount *mp);
bool xchk_should_check_xref(struct xfs_scrub *sc, int *error,
struct xfs_btree_cur **curpp);
+static inline int xchk_setup_nothing(struct xfs_scrub *sc)
+{
+ return -ENOENT;
+}
+
/* Setup functions */
int xchk_setup_agheader(struct xfs_scrub *sc);
int xchk_setup_fs(struct xfs_scrub *sc);
@@ -72,16 +77,8 @@ int xchk_setup_dirtree(struct xfs_scrub *sc);
int xchk_setup_rtbitmap(struct xfs_scrub *sc);
int xchk_setup_rtsummary(struct xfs_scrub *sc);
#else
-static inline int
-xchk_setup_rtbitmap(struct xfs_scrub *sc)
-{
- return -ENOENT;
-}
-static inline int
-xchk_setup_rtsummary(struct xfs_scrub *sc)
-{
- return -ENOENT;
-}
+# define xchk_setup_rtbitmap xchk_setup_nothing
+# define xchk_setup_rtsummary xchk_setup_nothing
#endif
#ifdef CONFIG_XFS_QUOTA
int xchk_ino_dqattach(struct xfs_scrub *sc);
@@ -93,16 +90,8 @@ xchk_ino_dqattach(struct xfs_scrub *sc)
{
return 0;
}
-static inline int
-xchk_setup_quota(struct xfs_scrub *sc)
-{
- return -ENOENT;
-}
-static inline int
-xchk_setup_quotacheck(struct xfs_scrub *sc)
-{
- return -ENOENT;
-}
+# define xchk_setup_quota xchk_setup_nothing
+# define xchk_setup_quotacheck xchk_setup_nothing
#endif
int xchk_setup_fscounters(struct xfs_scrub *sc);
int xchk_setup_nlinks(struct xfs_scrub *sc);
diff --git a/fs/xfs/scrub/inode_repair.c b/fs/xfs/scrub/inode_repair.c
index daf9f1ee7c2c..3e45b9b72312 100644
--- a/fs/xfs/scrub/inode_repair.c
+++ b/fs/xfs/scrub/inode_repair.c
@@ -846,7 +846,7 @@ xrep_dinode_bad_bmbt_fork(
nrecs = be16_to_cpu(dfp->bb_numrecs);
level = be16_to_cpu(dfp->bb_level);
- if (nrecs == 0 || XFS_BMDR_SPACE_CALC(nrecs) > dfork_size)
+ if (nrecs == 0 || xfs_bmdr_space_calc(nrecs) > dfork_size)
return true;
if (level == 0 || level >= XFS_BM_MAXLEVELS(sc->mp, whichfork))
return true;
@@ -858,12 +858,12 @@ xrep_dinode_bad_bmbt_fork(
xfs_fileoff_t fileoff;
xfs_fsblock_t fsbno;
- fkp = XFS_BMDR_KEY_ADDR(dfp, i);
+ fkp = xfs_bmdr_key_addr(dfp, i);
fileoff = be64_to_cpu(fkp->br_startoff);
if (!xfs_verify_fileoff(sc->mp, fileoff))
return true;
- fpp = XFS_BMDR_PTR_ADDR(dfp, i, dmxr);
+ fpp = xfs_bmdr_ptr_addr(dfp, i, dmxr);
fsbno = be64_to_cpu(*fpp);
if (!xfs_verify_fsbno(sc->mp, fsbno))
return true;
@@ -1121,7 +1121,7 @@ xrep_dinode_ensure_forkoff(
struct xfs_bmdr_block *bmdr;
struct xfs_scrub *sc = ri->sc;
xfs_extnum_t attr_extents, data_extents;
- size_t bmdr_minsz = XFS_BMDR_SPACE_CALC(1);
+ size_t bmdr_minsz = xfs_bmdr_space_calc(1);
unsigned int lit_sz = XFS_LITINO(sc->mp);
unsigned int afork_min, dfork_min;
@@ -1173,7 +1173,7 @@ xrep_dinode_ensure_forkoff(
case XFS_DINODE_FMT_BTREE:
/* Must have space for btree header and key/pointers. */
bmdr = XFS_DFORK_PTR(dip, XFS_ATTR_FORK);
- afork_min = XFS_BMAP_BROOT_SPACE(sc->mp, bmdr);
+ afork_min = xfs_bmap_broot_space(sc->mp, bmdr);
break;
default:
/* We should never see any other formats. */
@@ -1223,7 +1223,7 @@ xrep_dinode_ensure_forkoff(
case XFS_DINODE_FMT_BTREE:
/* Must have space for btree header and key/pointers. */
bmdr = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
- dfork_min = XFS_BMAP_BROOT_SPACE(sc->mp, bmdr);
+ dfork_min = xfs_bmap_broot_space(sc->mp, bmdr);
break;
default:
dfork_min = 0;
diff --git a/fs/xfs/scrub/rtsummary.c b/fs/xfs/scrub/rtsummary.c
index 3fee603f5244..7c7366c98338 100644
--- a/fs/xfs/scrub/rtsummary.c
+++ b/fs/xfs/scrub/rtsummary.c
@@ -63,7 +63,8 @@ xchk_setup_rtsummary(
* us to avoid pinning kernel memory for this purpose.
*/
descr = xchk_xfile_descr(sc, "realtime summary file");
- error = xfile_create(descr, mp->m_rsumsize, &sc->xfile);
+ error = xfile_create(descr, XFS_FSB_TO_B(mp, mp->m_rsumblocks),
+ &sc->xfile);
kfree(descr);
if (error)
return error;
@@ -95,16 +96,14 @@ xchk_setup_rtsummary(
* volume. Hence it is safe to compute and check the geometry values.
*/
if (mp->m_sb.sb_rblocks) {
- xfs_filblks_t rsumblocks;
int rextslog;
rts->rextents = xfs_rtb_to_rtx(mp, mp->m_sb.sb_rblocks);
rextslog = xfs_compute_rextslog(rts->rextents);
rts->rsumlevels = rextslog + 1;
rts->rbmblocks = xfs_rtbitmap_blockcount(mp, rts->rextents);
- rsumblocks = xfs_rtsummary_blockcount(mp, rts->rsumlevels,
+ rts->rsumblocks = xfs_rtsummary_blockcount(mp, rts->rsumlevels,
rts->rbmblocks);
- rts->rsumsize = XFS_FSB_TO_B(mp, rsumblocks);
}
return 0;
}
@@ -316,7 +315,7 @@ xchk_rtsummary(
}
/* Is m_rsumsize correct? */
- if (mp->m_rsumsize != rts->rsumsize) {
+ if (mp->m_rsumblocks != rts->rsumblocks) {
xchk_ino_set_corrupt(sc, mp->m_rsumip->i_ino);
goto out_rbm;
}
@@ -332,7 +331,7 @@ xchk_rtsummary(
* growfsrt expands the summary file before updating sb_rextents, so
* the file can be larger than rsumsize.
*/
- if (mp->m_rsumip->i_disk_size < rts->rsumsize) {
+ if (mp->m_rsumip->i_disk_size < XFS_FSB_TO_B(mp, rts->rsumblocks)) {
xchk_ino_set_corrupt(sc, mp->m_rsumip->i_ino);
goto out_rbm;
}
diff --git a/fs/xfs/scrub/rtsummary.h b/fs/xfs/scrub/rtsummary.h
index e1d50304d8d4..e44b04cb6e2d 100644
--- a/fs/xfs/scrub/rtsummary.h
+++ b/fs/xfs/scrub/rtsummary.h
@@ -14,7 +14,7 @@ struct xchk_rtsummary {
uint64_t rextents;
uint64_t rbmblocks;
- uint64_t rsumsize;
+ xfs_filblks_t rsumblocks;
unsigned int rsumlevels;
unsigned int resblks;
diff --git a/fs/xfs/scrub/rtsummary_repair.c b/fs/xfs/scrub/rtsummary_repair.c
index d9e971c4c79f..7deeb948cb70 100644
--- a/fs/xfs/scrub/rtsummary_repair.c
+++ b/fs/xfs/scrub/rtsummary_repair.c
@@ -56,7 +56,7 @@ xrep_setup_rtsummary(
* transaction (which we cannot drop because we cannot drop the
* rtsummary ILOCK) and cannot ask for more reservation.
*/
- blocks = XFS_B_TO_FSB(mp, mp->m_rsumsize);
+ blocks = mp->m_rsumblocks;
blocks += xfs_bmbt_calc_size(mp, blocks) * 2;
if (blocks > UINT_MAX)
return -EOPNOTSUPP;
@@ -100,7 +100,6 @@ xrep_rtsummary(
{
struct xchk_rtsummary *rts = sc->buf;
struct xfs_mount *mp = sc->mp;
- xfs_filblks_t rsumblocks;
int error;
/* We require the rmapbt to rebuild anything. */
@@ -131,10 +130,9 @@ xrep_rtsummary(
}
/* Make sure we have space allocated for the entire summary file. */
- rsumblocks = XFS_B_TO_FSB(mp, rts->rsumsize);
xfs_trans_ijoin(sc->tp, sc->ip, 0);
xfs_trans_ijoin(sc->tp, sc->tempip, 0);
- error = xrep_tempfile_prealloc(sc, 0, rsumblocks);
+ error = xrep_tempfile_prealloc(sc, 0, rts->rsumblocks);
if (error)
return error;
@@ -143,11 +141,11 @@ xrep_rtsummary(
return error;
/* Copy the rtsummary file that we generated. */
- error = xrep_tempfile_copyin(sc, 0, rsumblocks,
+ error = xrep_tempfile_copyin(sc, 0, rts->rsumblocks,
xrep_rtsummary_prep_buf, rts);
if (error)
return error;
- error = xrep_tempfile_set_isize(sc, rts->rsumsize);
+ error = xrep_tempfile_set_isize(sc, XFS_FSB_TO_B(mp, rts->rsumblocks));
if (error)
return error;
@@ -168,7 +166,7 @@ xrep_rtsummary(
memset(mp->m_rsum_cache, 0xFF, mp->m_sb.sb_rbmblocks);
mp->m_rsumlevels = rts->rsumlevels;
- mp->m_rsumsize = rts->rsumsize;
+ mp->m_rsumblocks = rts->rsumblocks;
/* Free the old rtsummary blocks if they're not in use. */
return xrep_reap_ifork(sc, sc->tempip, XFS_DATA_FORK);
diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
index 1bc33f010d0e..5993fcaffb2c 100644
--- a/fs/xfs/scrub/scrub.h
+++ b/fs/xfs/scrub/scrub.h
@@ -231,6 +231,11 @@ xchk_should_terminate(
return false;
}
+static inline int xchk_nothing(struct xfs_scrub *sc)
+{
+ return -ENOENT;
+}
+
/* Metadata scrubbers */
int xchk_tester(struct xfs_scrub *sc);
int xchk_superblock(struct xfs_scrub *sc);
@@ -254,31 +259,15 @@ int xchk_dirtree(struct xfs_scrub *sc);
int xchk_rtbitmap(struct xfs_scrub *sc);
int xchk_rtsummary(struct xfs_scrub *sc);
#else
-static inline int
-xchk_rtbitmap(struct xfs_scrub *sc)
-{
- return -ENOENT;
-}
-static inline int
-xchk_rtsummary(struct xfs_scrub *sc)
-{
- return -ENOENT;
-}
+# define xchk_rtbitmap xchk_nothing
+# define xchk_rtsummary xchk_nothing
#endif
#ifdef CONFIG_XFS_QUOTA
int xchk_quota(struct xfs_scrub *sc);
int xchk_quotacheck(struct xfs_scrub *sc);
#else
-static inline int
-xchk_quota(struct xfs_scrub *sc)
-{
- return -ENOENT;
-}
-static inline int
-xchk_quotacheck(struct xfs_scrub *sc)
-{
- return -ENOENT;
-}
+# define xchk_quota xchk_nothing
+# define xchk_quotacheck xchk_nothing
#endif
int xchk_fscounters(struct xfs_scrub *sc);
int xchk_nlinks(struct xfs_scrub *sc);
diff --git a/fs/xfs/scrub/tempfile.c b/fs/xfs/scrub/tempfile.c
index d390d56cd875..177f922acfaf 100644
--- a/fs/xfs/scrub/tempfile.c
+++ b/fs/xfs/scrub/tempfile.c
@@ -88,7 +88,7 @@ xrep_tempfile_create(
goto out_release_dquots;
/* Allocate inode, set up directory. */
- error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
+ error = xfs_dialloc(&tp, &args, &ino);
if (error)
goto out_trans_cancel;
error = xfs_icreate(tp, ino, &args, &sc->tempip);
diff --git a/fs/xfs/scrub/xfile.c b/fs/xfs/scrub/xfile.c
index 9b5d98fe1f8a..c753c79df203 100644
--- a/fs/xfs/scrub/xfile.c
+++ b/fs/xfs/scrub/xfile.c
@@ -126,7 +126,7 @@ xfile_load(
unsigned int len;
unsigned int offset;
- if (shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio,
+ if (shmem_get_folio(inode, pos >> PAGE_SHIFT, 0, &folio,
SGP_READ) < 0)
break;
if (!folio) {
@@ -196,7 +196,7 @@ xfile_store(
unsigned int len;
unsigned int offset;
- if (shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio,
+ if (shmem_get_folio(inode, pos >> PAGE_SHIFT, 0, &folio,
SGP_CACHE) < 0)
break;
if (filemap_check_wb_err(inode->i_mapping, 0)) {
@@ -267,7 +267,7 @@ xfile_get_folio(
i_size_write(inode, pos + len);
pflags = memalloc_nofs_save();
- error = shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio,
+ error = shmem_get_folio(inode, pos >> PAGE_SHIFT, 0, &folio,
(flags & XFILE_ALLOC) ? SGP_CACHE : SGP_READ);
memalloc_nofs_restore(pflags);
if (error)
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index e224b49b7cff..35a8c1b8b3cb 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -346,6 +346,17 @@ xfs_bmap_defer_add(
trace_xfs_bmap_defer(bi);
xfs_bmap_update_get_group(tp->t_mountp, bi);
+
+ /*
+ * Ensure the deferred mapping is pre-recorded in i_delayed_blks.
+ *
+ * Otherwise stat can report zero blocks for an inode that actually has
+ * data when the entire mapping is in the process of being overwritten
+ * using the out of place write path. This is undone in xfs_bmapi_remap
+ * after it has incremented di_nblocks for a successful operation.
+ */
+ if (bi->bi_type == XFS_BMAP_MAP)
+ bi->bi_owner->i_delayed_blks += bi->bi_bmap.br_blockcount;
xfs_defer_add(tp, &bi->bi_list, &xfs_bmap_update_defer_type);
}
@@ -367,6 +378,9 @@ xfs_bmap_update_cancel_item(
{
struct xfs_bmap_intent *bi = bi_entry(item);
+ if (bi->bi_type == XFS_BMAP_MAP)
+ bi->bi_owner->i_delayed_blks -= bi->bi_bmap.br_blockcount;
+
xfs_bmap_update_put_group(bi);
kmem_cache_free(xfs_bmap_intent_cache, bi);
}
@@ -464,6 +478,9 @@ xfs_bui_recover_work(
bi->bi_owner = *ipp;
xfs_bmap_update_get_group(mp, bi);
+ /* see xfs_bmap_defer_add for details */
+ if (bi->bi_type == XFS_BMAP_MAP)
+ bi->bi_owner->i_delayed_blks += bi->bi_bmap.br_blockcount;
xfs_defer_add_item(dfp, &bi->bi_list);
return bi;
}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index fe2e2c930975..053d567c9108 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -331,8 +331,7 @@ xfs_getbmap(
}
if (xfs_get_extsz_hint(ip) ||
- (ip->i_diflags &
- (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
+ (ip->i_diflags & XFS_DIFLAG_PREALLOC))
max_len = mp->m_super->s_maxbytes;
else
max_len = XFS_ISIZE(ip);
@@ -492,12 +491,12 @@ bool
xfs_can_free_eofblocks(
struct xfs_inode *ip)
{
- struct xfs_bmbt_irec imap;
struct xfs_mount *mp = ip->i_mount;
+ bool found_blocks = false;
xfs_fileoff_t end_fsb;
xfs_fileoff_t last_fsb;
- int nimaps = 1;
- int error;
+ struct xfs_bmbt_irec imap;
+ struct xfs_iext_cursor icur;
/*
* Caller must either hold the exclusive io lock; or be inactivating
@@ -524,12 +523,11 @@ xfs_can_free_eofblocks(
return false;
/*
- * Only free real extents for inodes with persistent preallocations or
- * the append-only flag.
+ * Do not free real extents in preallocated files unless the file has
+ * delalloc blocks and we are forced to remove them.
*/
- if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
- if (ip->i_delayed_blks == 0)
- return false;
+ if ((ip->i_diflags & XFS_DIFLAG_PREALLOC) && !ip->i_delayed_blks)
+ return false;
/*
* Do not try to free post-EOF blocks if EOF is beyond the end of the
@@ -544,21 +542,13 @@ xfs_can_free_eofblocks(
return false;
/*
- * Look up the mapping for the first block past EOF. If we can't find
- * it, there's nothing to free.
+ * Check if there is an post-EOF extent to free.
*/
xfs_ilock(ip, XFS_ILOCK_SHARED);
- error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
- 0);
+ if (xfs_iext_lookup_extent(ip, &ip->i_df, end_fsb, &icur, &imap))
+ found_blocks = true;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- if (error || nimaps == 0)
- return false;
-
- /*
- * If there's a real mapping there or there are delayed allocation
- * reservations, then we have post-EOF blocks to try to free.
- */
- return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
+ return found_blocks;
}
/*
@@ -653,6 +643,9 @@ xfs_alloc_file_space(
xfs_bmbt_irec_t imaps[1], *imapp;
int error;
+ if (xfs_is_always_cow_inode(ip))
+ return 0;
+
trace_xfs_alloc_file_space(ip);
if (xfs_is_shutdown(mp))
@@ -848,6 +841,14 @@ xfs_free_file_space(
if (len <= 0) /* if nothing being freed */
return 0;
+ /*
+ * Now AIO and DIO has drained we flush and (if necessary) invalidate
+ * the cached range over the first operation we are about to run.
+ */
+ error = xfs_flush_unmap_range(ip, offset, len);
+ if (error)
+ return error;
+
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
@@ -1184,7 +1185,7 @@ xfs_swap_extents_check_format(
*/
if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
if (xfs_inode_has_attr_fork(ip) &&
- XFS_BMAP_BMDR_SPACE(tifp->if_broot) > xfs_inode_fork_boff(ip))
+ xfs_bmap_bmdr_space(tifp->if_broot) > xfs_inode_fork_boff(ip))
return -EINVAL;
if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
return -EINVAL;
@@ -1193,7 +1194,7 @@ xfs_swap_extents_check_format(
/* Reciprocal target->temp btree format checks */
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
if (xfs_inode_has_attr_fork(tip) &&
- XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > xfs_inode_fork_boff(tip))
+ xfs_bmap_bmdr_space(ip->i_df.if_broot) > xfs_inode_fork_boff(tip))
return -EINVAL;
if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
return -EINVAL;
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index b1580644501f..209a389f2abc 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -210,7 +210,7 @@ struct xfs_buf {
* success the write is considered to be failed permanently and the
* iodone handler will take appropriate action.
*
- * For retry timeouts, we record the jiffie of the first failure. This
+ * For retry timeouts, we record the jiffy of the first failure. This
* means that we can change the retry timeout for buffers already under
* I/O and thus avoid getting stuck in a retry loop with a long timeout.
*
diff --git a/fs/xfs/xfs_buf_mem.c b/fs/xfs/xfs_buf_mem.c
index 9bb2d24de709..07bebbfb16ee 100644
--- a/fs/xfs/xfs_buf_mem.c
+++ b/fs/xfs/xfs_buf_mem.c
@@ -149,7 +149,7 @@ xmbuf_map_page(
return -ENOMEM;
}
- error = shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio, SGP_CACHE);
+ error = shmem_get_folio(inode, pos >> PAGE_SHIFT, 0, &folio, SGP_CACHE);
if (error)
return error;
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 25f5dffeab2a..d8c4a5dcca7a 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -554,11 +554,10 @@ xfs_trim_rtdev_extents(
xfs_daddr_t end,
xfs_daddr_t minlen)
{
- struct xfs_rtalloc_rec low = { };
- struct xfs_rtalloc_rec high = { };
struct xfs_trim_rtdev tr = {
.minlen_fsb = XFS_BB_TO_FSB(mp, minlen),
};
+ xfs_rtxnum_t low, high;
struct xfs_trans *tp;
xfs_daddr_t rtdev_daddr;
int error;
@@ -584,17 +583,17 @@ xfs_trim_rtdev_extents(
XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks) - 1);
/* Convert the rt blocks to rt extents */
- low.ar_startext = xfs_rtb_to_rtxup(mp, XFS_BB_TO_FSB(mp, start));
- high.ar_startext = xfs_rtb_to_rtx(mp, XFS_BB_TO_FSBT(mp, end));
+ low = xfs_rtb_to_rtxup(mp, XFS_BB_TO_FSB(mp, start));
+ high = xfs_rtb_to_rtx(mp, XFS_BB_TO_FSBT(mp, end));
/*
* Walk the free ranges between low and high. The query_range function
* trims the extents returned.
*/
do {
- tr.stop_rtx = low.ar_startext + (mp->m_sb.sb_blocksize * NBBY);
+ tr.stop_rtx = low + (mp->m_sb.sb_blocksize * NBBY);
xfs_rtbitmap_lock_shared(mp, XFS_RBMLOCK_BITMAP);
- error = xfs_rtalloc_query_range(mp, tp, &low, &high,
+ error = xfs_rtalloc_query_range(mp, tp, low, high,
xfs_trim_gather_rtextent, &tr);
if (error == -ECANCELED)
@@ -615,8 +614,8 @@ xfs_trim_rtdev_extents(
if (error)
break;
- low.ar_startext = tr.restart_rtx;
- } while (!xfs_trim_should_stop() && low.ar_startext <= high.ar_startext);
+ low = tr.restart_rtx;
+ } while (!xfs_trim_should_stop() && low <= high);
xfs_trans_cancel(tp);
return error;
@@ -708,7 +707,7 @@ xfs_ioc_trim(
return last_error;
range.len = min_t(unsigned long long, range.len,
- XFS_FSB_TO_B(mp, max_blocks));
+ XFS_FSB_TO_B(mp, max_blocks) - range.start);
if (copy_to_user(urange, &range, sizeof(range)))
return -EFAULT;
return 0;
diff --git a/fs/xfs/xfs_exchrange.c b/fs/xfs/xfs_exchrange.c
index c8a655c92c92..75cb53f090d1 100644
--- a/fs/xfs/xfs_exchrange.c
+++ b/fs/xfs/xfs_exchrange.c
@@ -72,6 +72,34 @@ xfs_exchrange_estimate(
return error;
}
+/*
+ * Check that file2's metadata agree with the snapshot that we took for the
+ * range commit request.
+ *
+ * This should be called after the filesystem has locked /all/ inode metadata
+ * against modification.
+ */
+STATIC int
+xfs_exchrange_check_freshness(
+ const struct xfs_exchrange *fxr,
+ struct xfs_inode *ip2)
+{
+ struct inode *inode2 = VFS_I(ip2);
+ struct timespec64 ctime = inode_get_ctime(inode2);
+ struct timespec64 mtime = inode_get_mtime(inode2);
+
+ trace_xfs_exchrange_freshness(fxr, ip2);
+
+ /* Check that file2 hasn't otherwise been modified. */
+ if (fxr->file2_ino != ip2->i_ino ||
+ fxr->file2_gen != inode2->i_generation ||
+ !timespec64_equal(&fxr->file2_ctime, &ctime) ||
+ !timespec64_equal(&fxr->file2_mtime, &mtime))
+ return -EBUSY;
+
+ return 0;
+}
+
#define QRETRY_IP1 (0x1)
#define QRETRY_IP2 (0x2)
@@ -607,6 +635,12 @@ xfs_exchrange_prep(
if (error || fxr->length == 0)
return error;
+ if (fxr->flags & __XFS_EXCHANGE_RANGE_CHECK_FRESH2) {
+ error = xfs_exchrange_check_freshness(fxr, ip2);
+ if (error)
+ return error;
+ }
+
/* Attach dquots to both inodes before changing block maps. */
error = xfs_qm_dqattach(ip2);
if (error)
@@ -719,7 +753,8 @@ xfs_exchange_range(
if (fxr->file1->f_path.mnt != fxr->file2->f_path.mnt)
return -EXDEV;
- if (fxr->flags & ~XFS_EXCHANGE_RANGE_ALL_FLAGS)
+ if (fxr->flags & ~(XFS_EXCHANGE_RANGE_ALL_FLAGS |
+ __XFS_EXCHANGE_RANGE_CHECK_FRESH2))
return -EINVAL;
/* Userspace requests only honored for regular files. */
@@ -794,9 +829,115 @@ xfs_ioc_exchange_range(
fxr.flags = args.flags;
file1 = fdget(args.file1_fd);
- if (!file1.file)
+ if (!fd_file(file1))
+ return -EBADF;
+ fxr.file1 = fd_file(file1);
+
+ error = xfs_exchange_range(&fxr);
+ fdput(file1);
+ return error;
+}
+
+/* Opaque freshness blob for XFS_IOC_COMMIT_RANGE */
+struct xfs_commit_range_fresh {
+ xfs_fsid_t fsid; /* m_fixedfsid */
+ __u64 file2_ino; /* inode number */
+ __s64 file2_mtime; /* modification time */
+ __s64 file2_ctime; /* change time */
+ __s32 file2_mtime_nsec; /* mod time, nsec */
+ __s32 file2_ctime_nsec; /* change time, nsec */
+ __u32 file2_gen; /* inode generation */
+ __u32 magic; /* zero */
+};
+#define XCR_FRESH_MAGIC 0x444F524B /* DORK */
+
+/* Set up a commitrange operation by sampling file2's write-related attrs */
+long
+xfs_ioc_start_commit(
+ struct file *file,
+ struct xfs_commit_range __user *argp)
+{
+ struct xfs_commit_range args = { };
+ struct timespec64 ts;
+ struct xfs_commit_range_fresh *kern_f;
+ struct xfs_commit_range_fresh __user *user_f;
+ struct inode *inode2 = file_inode(file);
+ struct xfs_inode *ip2 = XFS_I(inode2);
+ const unsigned int lockflags = XFS_IOLOCK_SHARED |
+ XFS_MMAPLOCK_SHARED |
+ XFS_ILOCK_SHARED;
+
+ BUILD_BUG_ON(sizeof(struct xfs_commit_range_fresh) !=
+ sizeof(args.file2_freshness));
+
+ kern_f = (struct xfs_commit_range_fresh *)&args.file2_freshness;
+
+ memcpy(&kern_f->fsid, ip2->i_mount->m_fixedfsid, sizeof(xfs_fsid_t));
+
+ xfs_ilock(ip2, lockflags);
+ ts = inode_get_ctime(inode2);
+ kern_f->file2_ctime = ts.tv_sec;
+ kern_f->file2_ctime_nsec = ts.tv_nsec;
+ ts = inode_get_mtime(inode2);
+ kern_f->file2_mtime = ts.tv_sec;
+ kern_f->file2_mtime_nsec = ts.tv_nsec;
+ kern_f->file2_ino = ip2->i_ino;
+ kern_f->file2_gen = inode2->i_generation;
+ kern_f->magic = XCR_FRESH_MAGIC;
+ xfs_iunlock(ip2, lockflags);
+
+ user_f = (struct xfs_commit_range_fresh __user *)&argp->file2_freshness;
+ if (copy_to_user(user_f, kern_f, sizeof(*kern_f)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Exchange file1 and file2 contents if file2 has not been written since the
+ * start commit operation.
+ */
+long
+xfs_ioc_commit_range(
+ struct file *file,
+ struct xfs_commit_range __user *argp)
+{
+ struct xfs_exchrange fxr = {
+ .file2 = file,
+ };
+ struct xfs_commit_range args;
+ struct xfs_commit_range_fresh *kern_f;
+ struct xfs_inode *ip2 = XFS_I(file_inode(file));
+ struct xfs_mount *mp = ip2->i_mount;
+ struct fd file1;
+ int error;
+
+ kern_f = (struct xfs_commit_range_fresh *)&args.file2_freshness;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+ if (args.flags & ~XFS_EXCHANGE_RANGE_ALL_FLAGS)
+ return -EINVAL;
+ if (kern_f->magic != XCR_FRESH_MAGIC)
+ return -EBUSY;
+ if (memcmp(&kern_f->fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t)))
+ return -EBUSY;
+
+ fxr.file1_offset = args.file1_offset;
+ fxr.file2_offset = args.file2_offset;
+ fxr.length = args.length;
+ fxr.flags = args.flags | __XFS_EXCHANGE_RANGE_CHECK_FRESH2;
+ fxr.file2_ino = kern_f->file2_ino;
+ fxr.file2_gen = kern_f->file2_gen;
+ fxr.file2_mtime.tv_sec = kern_f->file2_mtime;
+ fxr.file2_mtime.tv_nsec = kern_f->file2_mtime_nsec;
+ fxr.file2_ctime.tv_sec = kern_f->file2_ctime;
+ fxr.file2_ctime.tv_nsec = kern_f->file2_ctime_nsec;
+
+ file1 = fdget(args.file1_fd);
+ if (fd_empty(file1))
return -EBADF;
- fxr.file1 = file1.file;
+ fxr.file1 = fd_file(file1);
error = xfs_exchange_range(&fxr);
fdput(file1);
diff --git a/fs/xfs/xfs_exchrange.h b/fs/xfs/xfs_exchrange.h
index 039abcca546e..bc1298aba806 100644
--- a/fs/xfs/xfs_exchrange.h
+++ b/fs/xfs/xfs_exchrange.h
@@ -10,8 +10,12 @@
#define __XFS_EXCHANGE_RANGE_UPD_CMTIME1 (1ULL << 63)
#define __XFS_EXCHANGE_RANGE_UPD_CMTIME2 (1ULL << 62)
+/* Freshness check required */
+#define __XFS_EXCHANGE_RANGE_CHECK_FRESH2 (1ULL << 61)
+
#define XFS_EXCHANGE_RANGE_PRIV_FLAGS (__XFS_EXCHANGE_RANGE_UPD_CMTIME1 | \
- __XFS_EXCHANGE_RANGE_UPD_CMTIME2)
+ __XFS_EXCHANGE_RANGE_UPD_CMTIME2 | \
+ __XFS_EXCHANGE_RANGE_CHECK_FRESH2)
struct xfs_exchrange {
struct file *file1;
@@ -22,10 +26,20 @@ struct xfs_exchrange {
u64 length;
u64 flags; /* XFS_EXCHANGE_RANGE flags */
+
+ /* file2 metadata for freshness checks */
+ u64 file2_ino;
+ struct timespec64 file2_mtime;
+ struct timespec64 file2_ctime;
+ u32 file2_gen;
};
long xfs_ioc_exchange_range(struct file *file,
struct xfs_exchange_range __user *argp);
+long xfs_ioc_start_commit(struct file *file,
+ struct xfs_commit_range __user *argp);
+long xfs_ioc_commit_range(struct file *file,
+ struct xfs_commit_range __user *argp);
struct xfs_exchmaps_req;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 4cdc54dc9686..412b1d71b52b 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -760,7 +760,7 @@ write_retry:
trace_xfs_file_buffered_write(iocb, from);
ret = iomap_file_buffered_write(iocb, from,
- &xfs_buffered_write_iomap_ops);
+ &xfs_buffered_write_iomap_ops, NULL);
/*
* If we hit a space limit, try to free up some lingering preallocated
@@ -852,6 +852,192 @@ static inline bool xfs_file_sync_writes(struct file *filp)
return false;
}
+static int
+xfs_falloc_newsize(
+ struct file *file,
+ int mode,
+ loff_t offset,
+ loff_t len,
+ loff_t *new_size)
+{
+ struct inode *inode = file_inode(file);
+
+ if ((mode & FALLOC_FL_KEEP_SIZE) || offset + len <= i_size_read(inode))
+ return 0;
+ *new_size = offset + len;
+ return inode_newsize_ok(inode, *new_size);
+}
+
+static int
+xfs_falloc_setsize(
+ struct file *file,
+ loff_t new_size)
+{
+ struct iattr iattr = {
+ .ia_valid = ATTR_SIZE,
+ .ia_size = new_size,
+ };
+
+ if (!new_size)
+ return 0;
+ return xfs_vn_setattr_size(file_mnt_idmap(file), file_dentry(file),
+ &iattr);
+}
+
+static int
+xfs_falloc_collapse_range(
+ struct file *file,
+ loff_t offset,
+ loff_t len)
+{
+ struct inode *inode = file_inode(file);
+ loff_t new_size = i_size_read(inode) - len;
+ int error;
+
+ if (!xfs_is_falloc_aligned(XFS_I(inode), offset, len))
+ return -EINVAL;
+
+ /*
+ * There is no need to overlap collapse range with EOF, in which case it
+ * is effectively a truncate operation
+ */
+ if (offset + len >= i_size_read(inode))
+ return -EINVAL;
+
+ error = xfs_collapse_file_space(XFS_I(inode), offset, len);
+ if (error)
+ return error;
+ return xfs_falloc_setsize(file, new_size);
+}
+
+static int
+xfs_falloc_insert_range(
+ struct file *file,
+ loff_t offset,
+ loff_t len)
+{
+ struct inode *inode = file_inode(file);
+ loff_t isize = i_size_read(inode);
+ int error;
+
+ if (!xfs_is_falloc_aligned(XFS_I(inode), offset, len))
+ return -EINVAL;
+
+ /*
+ * New inode size must not exceed ->s_maxbytes, accounting for
+ * possible signed overflow.
+ */
+ if (inode->i_sb->s_maxbytes - isize < len)
+ return -EFBIG;
+
+ /* Offset should be less than i_size */
+ if (offset >= isize)
+ return -EINVAL;
+
+ error = xfs_falloc_setsize(file, isize + len);
+ if (error)
+ return error;
+
+ /*
+ * Perform hole insertion now that the file size has been updated so
+ * that if we crash during the operation we don't leave shifted extents
+ * past EOF and hence losing access to the data that is contained within
+ * them.
+ */
+ return xfs_insert_file_space(XFS_I(inode), offset, len);
+}
+
+/*
+ * Punch a hole and prealloc the range. We use a hole punch rather than
+ * unwritten extent conversion for two reasons:
+ *
+ * 1.) Hole punch handles partial block zeroing for us.
+ * 2.) If prealloc returns ENOSPC, the file range is still zero-valued by
+ * virtue of the hole punch.
+ */
+static int
+xfs_falloc_zero_range(
+ struct file *file,
+ int mode,
+ loff_t offset,
+ loff_t len)
+{
+ struct inode *inode = file_inode(file);
+ unsigned int blksize = i_blocksize(inode);
+ loff_t new_size = 0;
+ int error;
+
+ trace_xfs_zero_file_space(XFS_I(inode));
+
+ error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
+ if (error)
+ return error;
+
+ error = xfs_free_file_space(XFS_I(inode), offset, len);
+ if (error)
+ return error;
+
+ len = round_up(offset + len, blksize) - round_down(offset, blksize);
+ offset = round_down(offset, blksize);
+ error = xfs_alloc_file_space(XFS_I(inode), offset, len);
+ if (error)
+ return error;
+ return xfs_falloc_setsize(file, new_size);
+}
+
+static int
+xfs_falloc_unshare_range(
+ struct file *file,
+ int mode,
+ loff_t offset,
+ loff_t len)
+{
+ struct inode *inode = file_inode(file);
+ loff_t new_size = 0;
+ int error;
+
+ error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
+ if (error)
+ return error;
+
+ error = xfs_reflink_unshare(XFS_I(inode), offset, len);
+ if (error)
+ return error;
+
+ error = xfs_alloc_file_space(XFS_I(inode), offset, len);
+ if (error)
+ return error;
+ return xfs_falloc_setsize(file, new_size);
+}
+
+static int
+xfs_falloc_allocate_range(
+ struct file *file,
+ int mode,
+ loff_t offset,
+ loff_t len)
+{
+ struct inode *inode = file_inode(file);
+ loff_t new_size = 0;
+ int error;
+
+ /*
+ * If always_cow mode we can't use preallocations and thus should not
+ * create them.
+ */
+ if (xfs_is_always_cow_inode(XFS_I(inode)))
+ return -EOPNOTSUPP;
+
+ error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
+ if (error)
+ return error;
+
+ error = xfs_alloc_file_space(XFS_I(inode), offset, len);
+ if (error)
+ return error;
+ return xfs_falloc_setsize(file, new_size);
+}
+
#define XFS_FALLOC_FL_SUPPORTED \
(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
@@ -868,8 +1054,6 @@ xfs_file_fallocate(
struct xfs_inode *ip = XFS_I(inode);
long error;
uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
- loff_t new_size = 0;
- bool do_file_insert = false;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
@@ -890,156 +1074,35 @@ xfs_file_fallocate(
*/
inode_dio_wait(inode);
- /*
- * Now AIO and DIO has drained we flush and (if necessary) invalidate
- * the cached range over the first operation we are about to run.
- *
- * We care about zero and collapse here because they both run a hole
- * punch over the range first. Because that can zero data, and the range
- * of invalidation for the shift operations is much larger, we still do
- * the required flush for collapse in xfs_prepare_shift().
- *
- * Insert has the same range requirements as collapse, and we extend the
- * file first which can zero data. Hence insert has the same
- * flush/invalidate requirements as collapse and so they are both
- * handled at the right time by xfs_prepare_shift().
- */
- if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
- FALLOC_FL_COLLAPSE_RANGE)) {
- error = xfs_flush_unmap_range(ip, offset, len);
- if (error)
- goto out_unlock;
- }
-
error = file_modified(file);
if (error)
goto out_unlock;
- if (mode & FALLOC_FL_PUNCH_HOLE) {
+ switch (mode & FALLOC_FL_MODE_MASK) {
+ case FALLOC_FL_PUNCH_HOLE:
error = xfs_free_file_space(ip, offset, len);
- if (error)
- goto out_unlock;
- } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
- if (!xfs_is_falloc_aligned(ip, offset, len)) {
- error = -EINVAL;
- goto out_unlock;
- }
-
- /*
- * There is no need to overlap collapse range with EOF,
- * in which case it is effectively a truncate operation
- */
- if (offset + len >= i_size_read(inode)) {
- error = -EINVAL;
- goto out_unlock;
- }
-
- new_size = i_size_read(inode) - len;
-
- error = xfs_collapse_file_space(ip, offset, len);
- if (error)
- goto out_unlock;
- } else if (mode & FALLOC_FL_INSERT_RANGE) {
- loff_t isize = i_size_read(inode);
-
- if (!xfs_is_falloc_aligned(ip, offset, len)) {
- error = -EINVAL;
- goto out_unlock;
- }
-
- /*
- * New inode size must not exceed ->s_maxbytes, accounting for
- * possible signed overflow.
- */
- if (inode->i_sb->s_maxbytes - isize < len) {
- error = -EFBIG;
- goto out_unlock;
- }
- new_size = isize + len;
-
- /* Offset should be less than i_size */
- if (offset >= isize) {
- error = -EINVAL;
- goto out_unlock;
- }
- do_file_insert = true;
- } else {
- if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- offset + len > i_size_read(inode)) {
- new_size = offset + len;
- error = inode_newsize_ok(inode, new_size);
- if (error)
- goto out_unlock;
- }
-
- if (mode & FALLOC_FL_ZERO_RANGE) {
- /*
- * Punch a hole and prealloc the range. We use a hole
- * punch rather than unwritten extent conversion for two
- * reasons:
- *
- * 1.) Hole punch handles partial block zeroing for us.
- * 2.) If prealloc returns ENOSPC, the file range is
- * still zero-valued by virtue of the hole punch.
- */
- unsigned int blksize = i_blocksize(inode);
-
- trace_xfs_zero_file_space(ip);
-
- error = xfs_free_file_space(ip, offset, len);
- if (error)
- goto out_unlock;
-
- len = round_up(offset + len, blksize) -
- round_down(offset, blksize);
- offset = round_down(offset, blksize);
- } else if (mode & FALLOC_FL_UNSHARE_RANGE) {
- error = xfs_reflink_unshare(ip, offset, len);
- if (error)
- goto out_unlock;
- } else {
- /*
- * If always_cow mode we can't use preallocations and
- * thus should not create them.
- */
- if (xfs_is_always_cow_inode(ip)) {
- error = -EOPNOTSUPP;
- goto out_unlock;
- }
- }
-
- if (!xfs_is_always_cow_inode(ip)) {
- error = xfs_alloc_file_space(ip, offset, len);
- if (error)
- goto out_unlock;
- }
- }
-
- /* Change file size if needed */
- if (new_size) {
- struct iattr iattr;
-
- iattr.ia_valid = ATTR_SIZE;
- iattr.ia_size = new_size;
- error = xfs_vn_setattr_size(file_mnt_idmap(file),
- file_dentry(file), &iattr);
- if (error)
- goto out_unlock;
- }
-
- /*
- * Perform hole insertion now that the file size has been
- * updated so that if we crash during the operation we don't
- * leave shifted extents past EOF and hence losing access to
- * the data that is contained within them.
- */
- if (do_file_insert) {
- error = xfs_insert_file_space(ip, offset, len);
- if (error)
- goto out_unlock;
+ break;
+ case FALLOC_FL_COLLAPSE_RANGE:
+ error = xfs_falloc_collapse_range(file, offset, len);
+ break;
+ case FALLOC_FL_INSERT_RANGE:
+ error = xfs_falloc_insert_range(file, offset, len);
+ break;
+ case FALLOC_FL_ZERO_RANGE:
+ error = xfs_falloc_zero_range(file, mode, offset, len);
+ break;
+ case FALLOC_FL_UNSHARE_RANGE:
+ error = xfs_falloc_unshare_range(file, mode, offset, len);
+ break;
+ case FALLOC_FL_ALLOCATE_RANGE:
+ error = xfs_falloc_allocate_range(file, mode, offset, len);
+ break;
+ default:
+ error = -EOPNOTSUPP;
+ break;
}
- if (xfs_file_sync_writes(file))
+ if (!error && xfs_file_sync_writes(file))
error = xfs_log_force_inode(ip);
out_unlock:
@@ -1175,12 +1238,78 @@ xfs_dir_open(
return error;
}
+/*
+ * Don't bother propagating errors. We're just doing cleanup, and the caller
+ * ignores the return value anyway.
+ */
STATIC int
xfs_file_release(
- struct inode *inode,
- struct file *filp)
+ struct inode *inode,
+ struct file *file)
{
- return xfs_release(XFS_I(inode));
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+
+ /*
+ * If this is a read-only mount or the file system has been shut down,
+ * don't generate I/O.
+ */
+ if (xfs_is_readonly(mp) || xfs_is_shutdown(mp))
+ return 0;
+
+ /*
+ * If we previously truncated this file and removed old data in the
+ * process, we want to initiate "early" writeout on the last close.
+ * This is an attempt to combat the notorious NULL files problem which
+ * is particularly noticeable from a truncate down, buffered (re-)write
+ * (delalloc), followed by a crash. What we are effectively doing here
+ * is significantly reducing the time window where we'd otherwise be
+ * exposed to that problem.
+ */
+ if (xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED)) {
+ xfs_iflags_clear(ip, XFS_EOFBLOCKS_RELEASED);
+ if (ip->i_delayed_blks > 0)
+ filemap_flush(inode->i_mapping);
+ }
+
+ /*
+ * XFS aggressively preallocates post-EOF space to generate contiguous
+ * allocations for writers that append to the end of the file.
+ *
+ * To support workloads that close and reopen the file frequently, these
+ * preallocations usually persist after a close unless it is the first
+ * close for the inode. This is a tradeoff to generate tightly packed
+ * data layouts for unpacking tarballs or similar archives that write
+ * one file after another without going back to it while keeping the
+ * preallocation for files that have recurring open/write/close cycles.
+ *
+ * This heuristic is skipped for inodes with the append-only flag as
+ * that flag is rather pointless for inodes written only once.
+ *
+ * There is no point in freeing blocks here for open but unlinked files
+ * as they will be taken care of by the inactivation path soon.
+ *
+ * When releasing a read-only context, don't flush data or trim post-EOF
+ * blocks. This avoids open/read/close workloads from removing EOF
+ * blocks that other writers depend upon to reduce fragmentation.
+ *
+ * If we can't get the iolock just skip truncating the blocks past EOF
+ * because we could deadlock with the mmap_lock otherwise. We'll get
+ * another chance to drop them once the last reference to the inode is
+ * dropped, so we'll never leak blocks permanently.
+ */
+ if (inode->i_nlink &&
+ (file->f_mode & FMODE_WRITE) &&
+ !(ip->i_diflags & XFS_DIFLAG_APPEND) &&
+ !xfs_iflags_test(ip, XFS_EOFBLOCKS_RELEASED) &&
+ xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
+ if (xfs_can_free_eofblocks(ip) &&
+ !xfs_iflags_test_and_set(ip, XFS_EOFBLOCKS_RELEASED))
+ xfs_free_eofblocks(ip);
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ }
+
+ return 0;
}
STATIC int
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 71f32354944e..ae18ab86e608 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -44,7 +44,7 @@ xfs_fsmap_from_internal(
}
/* Convert an fsmap to an xfs_fsmap. */
-void
+static void
xfs_fsmap_to_internal(
struct xfs_fsmap *dest,
struct fsmap *src)
@@ -441,141 +441,6 @@ xfs_getfsmap_set_irec_flags(
irec->rm_flags |= XFS_RMAP_UNWRITTEN;
}
-/* Execute a getfsmap query against the log device. */
-STATIC int
-xfs_getfsmap_logdev(
- struct xfs_trans *tp,
- const struct xfs_fsmap *keys,
- struct xfs_getfsmap_info *info)
-{
- struct xfs_mount *mp = tp->t_mountp;
- struct xfs_rmap_irec rmap;
- xfs_daddr_t rec_daddr, len_daddr;
- xfs_fsblock_t start_fsb, end_fsb;
- uint64_t eofs;
-
- eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
- if (keys[0].fmr_physical >= eofs)
- return 0;
- start_fsb = XFS_BB_TO_FSBT(mp,
- keys[0].fmr_physical + keys[0].fmr_length);
- end_fsb = XFS_BB_TO_FSB(mp, min(eofs - 1, keys[1].fmr_physical));
-
- /* Adjust the low key if we are continuing from where we left off. */
- if (keys[0].fmr_length > 0)
- info->low_daddr = XFS_FSB_TO_BB(mp, start_fsb);
-
- trace_xfs_fsmap_low_key_linear(mp, info->dev, start_fsb);
- trace_xfs_fsmap_high_key_linear(mp, info->dev, end_fsb);
-
- if (start_fsb > 0)
- return 0;
-
- /* Fabricate an rmap entry for the external log device. */
- rmap.rm_startblock = 0;
- rmap.rm_blockcount = mp->m_sb.sb_logblocks;
- rmap.rm_owner = XFS_RMAP_OWN_LOG;
- rmap.rm_offset = 0;
- rmap.rm_flags = 0;
-
- rec_daddr = XFS_FSB_TO_BB(mp, rmap.rm_startblock);
- len_daddr = XFS_FSB_TO_BB(mp, rmap.rm_blockcount);
- return xfs_getfsmap_helper(tp, info, &rmap, rec_daddr, len_daddr);
-}
-
-#ifdef CONFIG_XFS_RT
-/* Transform a rtbitmap "record" into a fsmap */
-STATIC int
-xfs_getfsmap_rtdev_rtbitmap_helper(
- struct xfs_mount *mp,
- struct xfs_trans *tp,
- const struct xfs_rtalloc_rec *rec,
- void *priv)
-{
- struct xfs_getfsmap_info *info = priv;
- struct xfs_rmap_irec irec;
- xfs_rtblock_t rtbno;
- xfs_daddr_t rec_daddr, len_daddr;
-
- rtbno = xfs_rtx_to_rtb(mp, rec->ar_startext);
- rec_daddr = XFS_FSB_TO_BB(mp, rtbno);
- irec.rm_startblock = rtbno;
-
- rtbno = xfs_rtx_to_rtb(mp, rec->ar_extcount);
- len_daddr = XFS_FSB_TO_BB(mp, rtbno);
- irec.rm_blockcount = rtbno;
-
- irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
- irec.rm_offset = 0;
- irec.rm_flags = 0;
-
- return xfs_getfsmap_helper(tp, info, &irec, rec_daddr, len_daddr);
-}
-
-/* Execute a getfsmap query against the realtime device rtbitmap. */
-STATIC int
-xfs_getfsmap_rtdev_rtbitmap(
- struct xfs_trans *tp,
- const struct xfs_fsmap *keys,
- struct xfs_getfsmap_info *info)
-{
-
- struct xfs_rtalloc_rec alow = { 0 };
- struct xfs_rtalloc_rec ahigh = { 0 };
- struct xfs_mount *mp = tp->t_mountp;
- xfs_rtblock_t start_rtb;
- xfs_rtblock_t end_rtb;
- uint64_t eofs;
- int error;
-
- eofs = XFS_FSB_TO_BB(mp, xfs_rtx_to_rtb(mp, mp->m_sb.sb_rextents));
- if (keys[0].fmr_physical >= eofs)
- return 0;
- start_rtb = XFS_BB_TO_FSBT(mp,
- keys[0].fmr_physical + keys[0].fmr_length);
- end_rtb = XFS_BB_TO_FSB(mp, min(eofs - 1, keys[1].fmr_physical));
-
- info->missing_owner = XFS_FMR_OWN_UNKNOWN;
-
- /* Adjust the low key if we are continuing from where we left off. */
- if (keys[0].fmr_length > 0) {
- info->low_daddr = XFS_FSB_TO_BB(mp, start_rtb);
- if (info->low_daddr >= eofs)
- return 0;
- }
-
- trace_xfs_fsmap_low_key_linear(mp, info->dev, start_rtb);
- trace_xfs_fsmap_high_key_linear(mp, info->dev, end_rtb);
-
- xfs_rtbitmap_lock_shared(mp, XFS_RBMLOCK_BITMAP);
-
- /*
- * Set up query parameters to return free rtextents covering the range
- * we want.
- */
- alow.ar_startext = xfs_rtb_to_rtx(mp, start_rtb);
- ahigh.ar_startext = xfs_rtb_to_rtxup(mp, end_rtb);
- error = xfs_rtalloc_query_range(mp, tp, &alow, &ahigh,
- xfs_getfsmap_rtdev_rtbitmap_helper, info);
- if (error)
- goto err;
-
- /*
- * Report any gaps at the end of the rtbitmap by simulating a null
- * rmap starting at the block after the end of the query range.
- */
- info->last = true;
- ahigh.ar_startext = min(mp->m_sb.sb_rextents, ahigh.ar_startext);
-
- error = xfs_getfsmap_rtdev_rtbitmap_helper(mp, tp, &ahigh, info);
- if (error)
- goto err;
-err:
- xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
- return error;
-}
-#endif /* CONFIG_XFS_RT */
-
static inline bool
rmap_not_shareable(struct xfs_mount *mp, const struct xfs_rmap_irec *r)
{
@@ -800,6 +665,140 @@ xfs_getfsmap_datadev_bnobt(
xfs_getfsmap_datadev_bnobt_query, &akeys[0]);
}
+/* Execute a getfsmap query against the log device. */
+STATIC int
+xfs_getfsmap_logdev(
+ struct xfs_trans *tp,
+ const struct xfs_fsmap *keys,
+ struct xfs_getfsmap_info *info)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_rmap_irec rmap;
+ xfs_daddr_t rec_daddr, len_daddr;
+ xfs_fsblock_t start_fsb, end_fsb;
+ uint64_t eofs;
+
+ eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
+ if (keys[0].fmr_physical >= eofs)
+ return 0;
+ start_fsb = XFS_BB_TO_FSBT(mp,
+ keys[0].fmr_physical + keys[0].fmr_length);
+ end_fsb = XFS_BB_TO_FSB(mp, min(eofs - 1, keys[1].fmr_physical));
+
+ /* Adjust the low key if we are continuing from where we left off. */
+ if (keys[0].fmr_length > 0)
+ info->low_daddr = XFS_FSB_TO_BB(mp, start_fsb);
+
+ trace_xfs_fsmap_low_key_linear(mp, info->dev, start_fsb);
+ trace_xfs_fsmap_high_key_linear(mp, info->dev, end_fsb);
+
+ if (start_fsb > 0)
+ return 0;
+
+ /* Fabricate an rmap entry for the external log device. */
+ rmap.rm_startblock = 0;
+ rmap.rm_blockcount = mp->m_sb.sb_logblocks;
+ rmap.rm_owner = XFS_RMAP_OWN_LOG;
+ rmap.rm_offset = 0;
+ rmap.rm_flags = 0;
+
+ rec_daddr = XFS_FSB_TO_BB(mp, rmap.rm_startblock);
+ len_daddr = XFS_FSB_TO_BB(mp, rmap.rm_blockcount);
+ return xfs_getfsmap_helper(tp, info, &rmap, rec_daddr, len_daddr);
+}
+
+#ifdef CONFIG_XFS_RT
+/* Transform a rtbitmap "record" into a fsmap */
+STATIC int
+xfs_getfsmap_rtdev_rtbitmap_helper(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ const struct xfs_rtalloc_rec *rec,
+ void *priv)
+{
+ struct xfs_getfsmap_info *info = priv;
+ struct xfs_rmap_irec irec;
+ xfs_rtblock_t rtbno;
+ xfs_daddr_t rec_daddr, len_daddr;
+
+ rtbno = xfs_rtx_to_rtb(mp, rec->ar_startext);
+ rec_daddr = XFS_FSB_TO_BB(mp, rtbno);
+ irec.rm_startblock = rtbno;
+
+ rtbno = xfs_rtx_to_rtb(mp, rec->ar_extcount);
+ len_daddr = XFS_FSB_TO_BB(mp, rtbno);
+ irec.rm_blockcount = rtbno;
+
+ irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
+ irec.rm_offset = 0;
+ irec.rm_flags = 0;
+
+ return xfs_getfsmap_helper(tp, info, &irec, rec_daddr, len_daddr);
+}
+
+/* Execute a getfsmap query against the realtime device rtbitmap. */
+STATIC int
+xfs_getfsmap_rtdev_rtbitmap(
+ struct xfs_trans *tp,
+ const struct xfs_fsmap *keys,
+ struct xfs_getfsmap_info *info)
+{
+
+ struct xfs_rtalloc_rec ahigh = { 0 };
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_rtblock_t start_rtb;
+ xfs_rtblock_t end_rtb;
+ xfs_rtxnum_t high;
+ uint64_t eofs;
+ int error;
+
+ eofs = XFS_FSB_TO_BB(mp, xfs_rtx_to_rtb(mp, mp->m_sb.sb_rextents));
+ if (keys[0].fmr_physical >= eofs)
+ return 0;
+ start_rtb = XFS_BB_TO_FSBT(mp,
+ keys[0].fmr_physical + keys[0].fmr_length);
+ end_rtb = XFS_BB_TO_FSB(mp, min(eofs - 1, keys[1].fmr_physical));
+
+ info->missing_owner = XFS_FMR_OWN_UNKNOWN;
+
+ /* Adjust the low key if we are continuing from where we left off. */
+ if (keys[0].fmr_length > 0) {
+ info->low_daddr = XFS_FSB_TO_BB(mp, start_rtb);
+ if (info->low_daddr >= eofs)
+ return 0;
+ }
+
+ trace_xfs_fsmap_low_key_linear(mp, info->dev, start_rtb);
+ trace_xfs_fsmap_high_key_linear(mp, info->dev, end_rtb);
+
+ xfs_rtbitmap_lock_shared(mp, XFS_RBMLOCK_BITMAP);
+
+ /*
+ * Set up query parameters to return free rtextents covering the range
+ * we want.
+ */
+ high = xfs_rtb_to_rtxup(mp, end_rtb);
+ error = xfs_rtalloc_query_range(mp, tp, xfs_rtb_to_rtx(mp, start_rtb),
+ high, xfs_getfsmap_rtdev_rtbitmap_helper, info);
+ if (error)
+ goto err;
+
+ /*
+ * Report any gaps at the end of the rtbitmap by simulating a null
+ * rmap starting at the block after the end of the query range.
+ */
+ info->last = true;
+ ahigh.ar_startext = min(mp->m_sb.sb_rextents, high);
+
+ error = xfs_getfsmap_rtdev_rtbitmap_helper(mp, tp, &ahigh, info);
+ if (error)
+ goto err;
+err:
+ xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
+ return error;
+}
+#endif /* CONFIG_XFS_RT */
+
/* Do we recognize the device? */
STATIC bool
xfs_getfsmap_is_valid_device(
@@ -890,7 +889,7 @@ xfs_getfsmap_check_keys(
* xfs_getfsmap_info.low/high -- per-AG low/high keys computed from
* dkeys; used to query the metadata.
*/
-int
+STATIC int
xfs_getfsmap(
struct xfs_mount *mp,
struct xfs_fsmap_head *head,
@@ -1020,3 +1019,133 @@ xfs_getfsmap(
head->fmh_oflags = FMH_OF_DEV_T;
return error;
}
+
+int
+xfs_ioc_getfsmap(
+ struct xfs_inode *ip,
+ struct fsmap_head __user *arg)
+{
+ struct xfs_fsmap_head xhead = {0};
+ struct fsmap_head head;
+ struct fsmap *recs;
+ unsigned int count;
+ __u32 last_flags = 0;
+ bool done = false;
+ int error;
+
+ if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
+ return -EFAULT;
+ if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
+ memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
+ sizeof(head.fmh_keys[0].fmr_reserved)) ||
+ memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
+ sizeof(head.fmh_keys[1].fmr_reserved)))
+ return -EINVAL;
+
+ /*
+ * Use an internal memory buffer so that we don't have to copy fsmap
+ * data to userspace while holding locks. Start by trying to allocate
+ * up to 128k for the buffer, but fall back to a single page if needed.
+ */
+ count = min_t(unsigned int, head.fmh_count,
+ 131072 / sizeof(struct fsmap));
+ recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL);
+ if (!recs) {
+ count = min_t(unsigned int, head.fmh_count,
+ PAGE_SIZE / sizeof(struct fsmap));
+ recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL);
+ if (!recs)
+ return -ENOMEM;
+ }
+
+ xhead.fmh_iflags = head.fmh_iflags;
+ xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
+ xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
+
+ trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
+ trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
+
+ head.fmh_entries = 0;
+ do {
+ struct fsmap __user *user_recs;
+ struct fsmap *last_rec;
+
+ user_recs = &arg->fmh_recs[head.fmh_entries];
+ xhead.fmh_entries = 0;
+ xhead.fmh_count = min_t(unsigned int, count,
+ head.fmh_count - head.fmh_entries);
+
+ /* Run query, record how many entries we got. */
+ error = xfs_getfsmap(ip->i_mount, &xhead, recs);
+ switch (error) {
+ case 0:
+ /*
+ * There are no more records in the result set. Copy
+ * whatever we got to userspace and break out.
+ */
+ done = true;
+ break;
+ case -ECANCELED:
+ /*
+ * The internal memory buffer is full. Copy whatever
+ * records we got to userspace and go again if we have
+ * not yet filled the userspace buffer.
+ */
+ error = 0;
+ break;
+ default:
+ goto out_free;
+ }
+ head.fmh_entries += xhead.fmh_entries;
+ head.fmh_oflags = xhead.fmh_oflags;
+
+ /*
+ * If the caller wanted a record count or there aren't any
+ * new records to return, we're done.
+ */
+ if (head.fmh_count == 0 || xhead.fmh_entries == 0)
+ break;
+
+ /* Copy all the records we got out to userspace. */
+ if (copy_to_user(user_recs, recs,
+ xhead.fmh_entries * sizeof(struct fsmap))) {
+ error = -EFAULT;
+ goto out_free;
+ }
+
+ /* Remember the last record flags we copied to userspace. */
+ last_rec = &recs[xhead.fmh_entries - 1];
+ last_flags = last_rec->fmr_flags;
+
+ /* Set up the low key for the next iteration. */
+ xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec);
+ trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
+ } while (!done && head.fmh_entries < head.fmh_count);
+
+ /*
+ * If there are no more records in the query result set and we're not
+ * in counting mode, mark the last record returned with the LAST flag.
+ */
+ if (done && head.fmh_count > 0 && head.fmh_entries > 0) {
+ struct fsmap __user *user_rec;
+
+ last_flags |= FMR_OF_LAST;
+ user_rec = &arg->fmh_recs[head.fmh_entries - 1];
+
+ if (copy_to_user(&user_rec->fmr_flags, &last_flags,
+ sizeof(last_flags))) {
+ error = -EFAULT;
+ goto out_free;
+ }
+ }
+
+ /* copy back header */
+ if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) {
+ error = -EFAULT;
+ goto out_free;
+ }
+
+out_free:
+ kvfree(recs);
+ return error;
+}
diff --git a/fs/xfs/xfs_fsmap.h b/fs/xfs/xfs_fsmap.h
index a0775788e7b1..a0bcc38486a5 100644
--- a/fs/xfs/xfs_fsmap.h
+++ b/fs/xfs/xfs_fsmap.h
@@ -7,6 +7,7 @@
#define __XFS_FSMAP_H__
struct fsmap;
+struct fsmap_head;
/* internal fsmap representation */
struct xfs_fsmap {
@@ -27,9 +28,6 @@ struct xfs_fsmap_head {
struct xfs_fsmap fmh_keys[2]; /* low and high keys */
};
-void xfs_fsmap_to_internal(struct xfs_fsmap *dest, struct fsmap *src);
-
-int xfs_getfsmap(struct xfs_mount *mp, struct xfs_fsmap_head *head,
- struct fsmap *out_recs);
+int xfs_ioc_getfsmap(struct xfs_inode *ip, struct fsmap_head __user *arg);
#endif /* __XFS_FSMAP_H__ */
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index c211ea2b63c4..3643cc843f62 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -485,7 +485,7 @@ xfs_do_force_shutdown(
const char *why;
- if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate)) {
+ if (xfs_set_shutdown(mp)) {
xlog_shutdown_wait(mp->m_log);
return;
}
diff --git a/fs/xfs/xfs_handle.c b/fs/xfs/xfs_handle.c
index cf5acbd3c7ca..49e5e5f04e60 100644
--- a/fs/xfs/xfs_handle.c
+++ b/fs/xfs/xfs_handle.c
@@ -85,16 +85,16 @@ xfs_find_handle(
int hsize;
xfs_handle_t handle;
struct inode *inode;
- struct fd f = {NULL};
+ struct fd f = EMPTY_FD;
struct path path;
int error;
struct xfs_inode *ip;
if (cmd == XFS_IOC_FD_TO_HANDLE) {
f = fdget(hreq->fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- inode = file_inode(f.file);
+ inode = file_inode(fd_file(f));
} else {
error = user_path_at(AT_FDCWD, hreq->path, 0, &path);
if (error)
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index cf629302d48e..a680e5b82672 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -65,6 +65,18 @@ static int xfs_icwalk_ag(struct xfs_perag *pag,
XFS_ICWALK_FLAG_RECLAIM_SICK | \
XFS_ICWALK_FLAG_UNION)
+/* Marks for the perag xarray */
+#define XFS_PERAG_RECLAIM_MARK XA_MARK_0
+#define XFS_PERAG_BLOCKGC_MARK XA_MARK_1
+
+static inline xa_mark_t ici_tag_to_mark(unsigned int tag)
+{
+ if (tag == XFS_ICI_RECLAIM_TAG)
+ return XFS_PERAG_RECLAIM_MARK;
+ ASSERT(tag == XFS_ICI_BLOCKGC_TAG);
+ return XFS_PERAG_BLOCKGC_MARK;
+}
+
/*
* Allocate and initialise an xfs_inode.
*/
@@ -88,7 +100,8 @@ xfs_inode_alloc(
/* VFS doesn't initialise i_mode! */
VFS_I(ip)->i_mode = 0;
- mapping_set_large_folios(VFS_I(ip)->i_mapping);
+ mapping_set_folio_min_order(VFS_I(ip)->i_mapping,
+ M_IGEO(mp)->min_folio_order);
XFS_STATS_INC(mp, vn_active);
ASSERT(atomic_read(&ip->i_pincount) == 0);
@@ -191,7 +204,7 @@ xfs_reclaim_work_queue(
{
rcu_read_lock();
- if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
+ if (xa_marked(&mp->m_perags, XFS_PERAG_RECLAIM_MARK)) {
queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
}
@@ -241,9 +254,7 @@ xfs_perag_set_inode_tag(
return;
/* propagate the tag up into the perag radix tree */
- spin_lock(&mp->m_perag_lock);
- radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
- spin_unlock(&mp->m_perag_lock);
+ xa_set_mark(&mp->m_perags, pag->pag_agno, ici_tag_to_mark(tag));
/* start background work */
switch (tag) {
@@ -285,14 +296,39 @@ xfs_perag_clear_inode_tag(
return;
/* clear the tag from the perag radix tree */
- spin_lock(&mp->m_perag_lock);
- radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
- spin_unlock(&mp->m_perag_lock);
+ xa_clear_mark(&mp->m_perags, pag->pag_agno, ici_tag_to_mark(tag));
trace_xfs_perag_clear_inode_tag(pag, _RET_IP_);
}
/*
+ * Find the next AG after @pag, or the first AG if @pag is NULL.
+ */
+static struct xfs_perag *
+xfs_perag_grab_next_tag(
+ struct xfs_mount *mp,
+ struct xfs_perag *pag,
+ int tag)
+{
+ unsigned long index = 0;
+
+ if (pag) {
+ index = pag->pag_agno + 1;
+ xfs_perag_rele(pag);
+ }
+
+ rcu_read_lock();
+ pag = xa_find(&mp->m_perags, &index, ULONG_MAX, ici_tag_to_mark(tag));
+ if (pag) {
+ trace_xfs_perag_grab_next_tag(pag, _RET_IP_);
+ if (!atomic_inc_not_zero(&pag->pag_active_ref))
+ pag = NULL;
+ }
+ rcu_read_unlock();
+ return pag;
+}
+
+/*
* When we recycle a reclaimable inode, we need to re-initialise the VFS inode
* part of the structure. This is made more complex by the fact we store
* information about the on-disk values in the VFS inode and so we can't just
@@ -325,7 +361,8 @@ xfs_reinit_inode(
inode->i_uid = uid;
inode->i_gid = gid;
inode->i_state = state;
- mapping_set_large_folios(inode->i_mapping);
+ mapping_set_folio_min_order(inode->i_mapping,
+ M_IGEO(mp)->min_folio_order);
return error;
}
@@ -755,7 +792,7 @@ xfs_iget(
ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
/* reject inode numbers outside existing AGs */
- if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
+ if (!xfs_verify_ino(mp, ino))
return -EINVAL;
XFS_STATS_INC(mp, xs_ig_attempts);
@@ -977,7 +1014,7 @@ xfs_reclaim_inodes(
if (xfs_want_reclaim_sick(mp))
icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
- while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
+ while (xa_marked(&mp->m_perags, XFS_PERAG_RECLAIM_MARK)) {
xfs_ail_push_all_sync(mp->m_ail);
xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
}
@@ -1019,15 +1056,17 @@ long
xfs_reclaim_inodes_count(
struct xfs_mount *mp)
{
- struct xfs_perag *pag;
- xfs_agnumber_t ag = 0;
+ XA_STATE (xas, &mp->m_perags, 0);
long reclaimable = 0;
+ struct xfs_perag *pag;
- while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
- ag = pag->pag_agno + 1;
+ rcu_read_lock();
+ xas_for_each_marked(&xas, pag, ULONG_MAX, XFS_PERAG_RECLAIM_MARK) {
+ trace_xfs_reclaim_inodes_count(pag, _THIS_IP_);
reclaimable += pag->pag_ici_reclaimable;
- xfs_perag_put(pag);
}
+ rcu_read_unlock();
+
return reclaimable;
}
@@ -1159,7 +1198,7 @@ xfs_inode_free_eofblocks(
if (xfs_can_free_eofblocks(ip))
return xfs_free_eofblocks(ip);
- /* inode could be preallocated or append-only */
+ /* inode could be preallocated */
trace_xfs_inode_free_eofblocks_invalid(ip);
xfs_inode_clear_eofblocks_tag(ip);
return 0;
@@ -1369,14 +1408,13 @@ void
xfs_blockgc_start(
struct xfs_mount *mp)
{
- struct xfs_perag *pag;
- xfs_agnumber_t agno;
+ struct xfs_perag *pag = NULL;
if (xfs_set_blockgc_enabled(mp))
return;
trace_xfs_blockgc_start(mp, __return_address);
- for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
+ while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
xfs_blockgc_queue(pag);
}
@@ -1492,21 +1530,19 @@ int
xfs_blockgc_flush_all(
struct xfs_mount *mp)
{
- struct xfs_perag *pag;
- xfs_agnumber_t agno;
+ struct xfs_perag *pag = NULL;
trace_xfs_blockgc_flush_all(mp, __return_address);
/*
- * For each blockgc worker, move its queue time up to now. If it
- * wasn't queued, it will not be requeued. Then flush whatever's
- * left.
+ * For each blockgc worker, move its queue time up to now. If it wasn't
+ * queued, it will not be requeued. Then flush whatever is left.
*/
- for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
+ while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
mod_delayed_work(pag->pag_mount->m_blockgc_wq,
&pag->pag_blockgc_work, 0);
- for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
+ while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
flush_delayed_work(&pag->pag_blockgc_work);
return xfs_inodegc_flush(mp);
@@ -1752,12 +1788,11 @@ xfs_icwalk(
enum xfs_icwalk_goal goal,
struct xfs_icwalk *icw)
{
- struct xfs_perag *pag;
+ struct xfs_perag *pag = NULL;
int error = 0;
int last_error = 0;
- xfs_agnumber_t agno;
- for_each_perag_tag(mp, agno, pag, goal) {
+ while ((pag = xfs_perag_grab_next_tag(mp, pag, goal))) {
error = xfs_icwalk_ag(pag, goal, icw);
if (error) {
last_error = error;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 7dc6f326936c..bcc277fc0a83 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -704,7 +704,7 @@ xfs_create(
* entry pointing to them, but a directory also the "." entry
* pointing to itself.
*/
- error = xfs_dialloc(&tp, dp->i_ino, args->mode, &ino);
+ error = xfs_dialloc(&tp, args, &ino);
if (!error)
error = xfs_icreate(tp, ino, args, &du.ip);
if (error)
@@ -812,7 +812,7 @@ xfs_create_tmpfile(
if (error)
goto out_release_dquots;
- error = xfs_dialloc(&tp, dp->i_ino, args->mode, &ino);
+ error = xfs_dialloc(&tp, args, &ino);
if (!error)
error = xfs_icreate(tp, ino, args, &ip);
if (error)
@@ -1079,88 +1079,6 @@ out:
return error;
}
-int
-xfs_release(
- xfs_inode_t *ip)
-{
- xfs_mount_t *mp = ip->i_mount;
- int error = 0;
-
- if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
- return 0;
-
- /* If this is a read-only mount, don't do this (would generate I/O) */
- if (xfs_is_readonly(mp))
- return 0;
-
- if (!xfs_is_shutdown(mp)) {
- int truncated;
-
- /*
- * If we previously truncated this file and removed old data
- * in the process, we want to initiate "early" writeout on
- * the last close. This is an attempt to combat the notorious
- * NULL files problem which is particularly noticeable from a
- * truncate down, buffered (re-)write (delalloc), followed by
- * a crash. What we are effectively doing here is
- * significantly reducing the time window where we'd otherwise
- * be exposed to that problem.
- */
- truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
- if (truncated) {
- xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
- if (ip->i_delayed_blks > 0) {
- error = filemap_flush(VFS_I(ip)->i_mapping);
- if (error)
- return error;
- }
- }
- }
-
- if (VFS_I(ip)->i_nlink == 0)
- return 0;
-
- /*
- * If we can't get the iolock just skip truncating the blocks past EOF
- * because we could deadlock with the mmap_lock otherwise. We'll get
- * another chance to drop them once the last reference to the inode is
- * dropped, so we'll never leak blocks permanently.
- */
- if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
- return 0;
-
- if (xfs_can_free_eofblocks(ip)) {
- /*
- * Check if the inode is being opened, written and closed
- * frequently and we have delayed allocation blocks outstanding
- * (e.g. streaming writes from the NFS server), truncating the
- * blocks past EOF will cause fragmentation to occur.
- *
- * In this case don't do the truncation, but we have to be
- * careful how we detect this case. Blocks beyond EOF show up as
- * i_delayed_blks even when the inode is clean, so we need to
- * truncate them away first before checking for a dirty release.
- * Hence on the first dirty close we will still remove the
- * speculative allocation, but after that we will leave it in
- * place.
- */
- if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
- goto out_unlock;
-
- error = xfs_free_eofblocks(ip);
- if (error)
- goto out_unlock;
-
- /* delalloc blocks after truncation means it really is dirty */
- if (ip->i_delayed_blks)
- xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
- }
-
-out_unlock:
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return error;
-}
-
/*
* Mark all the buffers attached to this directory stale. In theory we should
* never be freeing a directory with any blocks at all, but this covers the
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 51defdebef30..97ed912306fd 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -276,12 +276,13 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
return ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
}
-static inline bool xfs_is_metadata_inode(struct xfs_inode *ip)
+static inline bool xfs_is_metadata_inode(const struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
- return ip == mp->m_rbmip || ip == mp->m_rsumip ||
- xfs_is_quota_inode(&mp->m_sb, ip->i_ino);
+ return ip->i_ino == mp->m_sb.sb_rbmino ||
+ ip->i_ino == mp->m_sb.sb_rsumino ||
+ xfs_is_quota_inode(&mp->m_sb, ip->i_ino);
}
bool xfs_is_always_cow_inode(struct xfs_inode *ip);
@@ -335,7 +336,7 @@ static inline bool xfs_inode_has_bigrtalloc(struct xfs_inode *ip)
#define XFS_INEW (1 << 3) /* inode has just been allocated */
#define XFS_IPRESERVE_DM_FIELDS (1 << 4) /* has legacy DMAPI fields set */
#define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */
-#define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */
+#define XFS_EOFBLOCKS_RELEASED (1 << 6) /* eofblocks were freed in ->release */
#define XFS_IFLUSHING (1 << 7) /* inode is being flushed */
#define __XFS_IPINNED_BIT 8 /* wakeup key for zero pin count */
#define XFS_IPINNED (1 << __XFS_IPINNED_BIT)
@@ -382,7 +383,7 @@ static inline bool xfs_inode_has_bigrtalloc(struct xfs_inode *ip)
*/
#define XFS_IRECLAIM_RESET_FLAGS \
(XFS_IRECLAIMABLE | XFS_IRECLAIM | \
- XFS_IDIRTY_RELEASE | XFS_ITRUNCATED | XFS_NEED_INACTIVE | \
+ XFS_EOFBLOCKS_RELEASED | XFS_ITRUNCATED | XFS_NEED_INACTIVE | \
XFS_INACTIVATING | XFS_IQUOTAUNCHECKED)
/*
@@ -512,7 +513,6 @@ enum layout_break_reason {
#define XFS_INHERIT_GID(pip) \
(xfs_has_grpid((pip)->i_mount) || (VFS_I(pip)->i_mode & S_ISGID))
-int xfs_release(struct xfs_inode *ip);
int xfs_inactive(struct xfs_inode *ip);
int xfs_lookup(struct xfs_inode *dp, const struct xfs_name *name,
struct xfs_inode **ipp, struct xfs_name *ci_name);
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 6b13666d4e96..a20d426ef021 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -876,136 +876,6 @@ out_free_buf:
return error;
}
-STATIC int
-xfs_ioc_getfsmap(
- struct xfs_inode *ip,
- struct fsmap_head __user *arg)
-{
- struct xfs_fsmap_head xhead = {0};
- struct fsmap_head head;
- struct fsmap *recs;
- unsigned int count;
- __u32 last_flags = 0;
- bool done = false;
- int error;
-
- if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
- return -EFAULT;
- if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
- memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
- sizeof(head.fmh_keys[0].fmr_reserved)) ||
- memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
- sizeof(head.fmh_keys[1].fmr_reserved)))
- return -EINVAL;
-
- /*
- * Use an internal memory buffer so that we don't have to copy fsmap
- * data to userspace while holding locks. Start by trying to allocate
- * up to 128k for the buffer, but fall back to a single page if needed.
- */
- count = min_t(unsigned int, head.fmh_count,
- 131072 / sizeof(struct fsmap));
- recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL);
- if (!recs) {
- count = min_t(unsigned int, head.fmh_count,
- PAGE_SIZE / sizeof(struct fsmap));
- recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL);
- if (!recs)
- return -ENOMEM;
- }
-
- xhead.fmh_iflags = head.fmh_iflags;
- xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
- xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
-
- trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
- trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
-
- head.fmh_entries = 0;
- do {
- struct fsmap __user *user_recs;
- struct fsmap *last_rec;
-
- user_recs = &arg->fmh_recs[head.fmh_entries];
- xhead.fmh_entries = 0;
- xhead.fmh_count = min_t(unsigned int, count,
- head.fmh_count - head.fmh_entries);
-
- /* Run query, record how many entries we got. */
- error = xfs_getfsmap(ip->i_mount, &xhead, recs);
- switch (error) {
- case 0:
- /*
- * There are no more records in the result set. Copy
- * whatever we got to userspace and break out.
- */
- done = true;
- break;
- case -ECANCELED:
- /*
- * The internal memory buffer is full. Copy whatever
- * records we got to userspace and go again if we have
- * not yet filled the userspace buffer.
- */
- error = 0;
- break;
- default:
- goto out_free;
- }
- head.fmh_entries += xhead.fmh_entries;
- head.fmh_oflags = xhead.fmh_oflags;
-
- /*
- * If the caller wanted a record count or there aren't any
- * new records to return, we're done.
- */
- if (head.fmh_count == 0 || xhead.fmh_entries == 0)
- break;
-
- /* Copy all the records we got out to userspace. */
- if (copy_to_user(user_recs, recs,
- xhead.fmh_entries * sizeof(struct fsmap))) {
- error = -EFAULT;
- goto out_free;
- }
-
- /* Remember the last record flags we copied to userspace. */
- last_rec = &recs[xhead.fmh_entries - 1];
- last_flags = last_rec->fmr_flags;
-
- /* Set up the low key for the next iteration. */
- xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec);
- trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
- } while (!done && head.fmh_entries < head.fmh_count);
-
- /*
- * If there are no more records in the query result set and we're not
- * in counting mode, mark the last record returned with the LAST flag.
- */
- if (done && head.fmh_count > 0 && head.fmh_entries > 0) {
- struct fsmap __user *user_rec;
-
- last_flags |= FMR_OF_LAST;
- user_rec = &arg->fmh_recs[head.fmh_entries - 1];
-
- if (copy_to_user(&user_rec->fmr_flags, &last_flags,
- sizeof(last_flags))) {
- error = -EFAULT;
- goto out_free;
- }
- }
-
- /* copy back header */
- if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) {
- error = -EFAULT;
- goto out_free;
- }
-
-out_free:
- kvfree(recs);
- return error;
-}
-
int
xfs_ioc_swapext(
xfs_swapext_t *sxp)
@@ -1016,33 +886,33 @@ xfs_ioc_swapext(
/* Pull information for the target fd */
f = fdget((int)sxp->sx_fdtarget);
- if (!f.file) {
+ if (!fd_file(f)) {
error = -EINVAL;
goto out;
}
- if (!(f.file->f_mode & FMODE_WRITE) ||
- !(f.file->f_mode & FMODE_READ) ||
- (f.file->f_flags & O_APPEND)) {
+ if (!(fd_file(f)->f_mode & FMODE_WRITE) ||
+ !(fd_file(f)->f_mode & FMODE_READ) ||
+ (fd_file(f)->f_flags & O_APPEND)) {
error = -EBADF;
goto out_put_file;
}
tmp = fdget((int)sxp->sx_fdtmp);
- if (!tmp.file) {
+ if (!fd_file(tmp)) {
error = -EINVAL;
goto out_put_file;
}
- if (!(tmp.file->f_mode & FMODE_WRITE) ||
- !(tmp.file->f_mode & FMODE_READ) ||
- (tmp.file->f_flags & O_APPEND)) {
+ if (!(fd_file(tmp)->f_mode & FMODE_WRITE) ||
+ !(fd_file(tmp)->f_mode & FMODE_READ) ||
+ (fd_file(tmp)->f_flags & O_APPEND)) {
error = -EBADF;
goto out_put_tmp_file;
}
- if (IS_SWAPFILE(file_inode(f.file)) ||
- IS_SWAPFILE(file_inode(tmp.file))) {
+ if (IS_SWAPFILE(file_inode(fd_file(f))) ||
+ IS_SWAPFILE(file_inode(fd_file(tmp)))) {
error = -EINVAL;
goto out_put_tmp_file;
}
@@ -1052,14 +922,14 @@ xfs_ioc_swapext(
* before we cast and access them as XFS structures as we have no
* control over what the user passes us here.
*/
- if (f.file->f_op != &xfs_file_operations ||
- tmp.file->f_op != &xfs_file_operations) {
+ if (fd_file(f)->f_op != &xfs_file_operations ||
+ fd_file(tmp)->f_op != &xfs_file_operations) {
error = -EINVAL;
goto out_put_tmp_file;
}
- ip = XFS_I(file_inode(f.file));
- tip = XFS_I(file_inode(tmp.file));
+ ip = XFS_I(file_inode(fd_file(f)));
+ tip = XFS_I(file_inode(fd_file(tmp)));
if (ip->i_mount != tip->i_mount) {
error = -EINVAL;
@@ -1518,6 +1388,10 @@ xfs_file_ioctl(
case XFS_IOC_EXCHANGE_RANGE:
return xfs_ioc_exchange_range(filp, arg);
+ case XFS_IOC_START_COMMIT:
+ return xfs_ioc_start_commit(filp, arg);
+ case XFS_IOC_COMMIT_RANGE:
+ return xfs_ioc_commit_range(filp, arg);
default:
return -ENOTTY;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 72c981e3dc92..1e11f48814c0 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1208,14 +1208,14 @@ out_unlock:
return error;
}
-static int
+static void
xfs_buffered_write_delalloc_punch(
struct inode *inode,
loff_t offset,
- loff_t length)
+ loff_t length,
+ struct iomap *iomap)
{
xfs_bmap_punch_delalloc_range(XFS_I(inode), offset, offset + length);
- return 0;
}
static int
@@ -1227,17 +1227,8 @@ xfs_buffered_write_iomap_end(
unsigned flags,
struct iomap *iomap)
{
-
- struct xfs_mount *mp = XFS_M(inode->i_sb);
- int error;
-
- error = iomap_file_buffered_write_punch_delalloc(inode, iomap, offset,
- length, written, &xfs_buffered_write_delalloc_punch);
- if (error && !xfs_is_shutdown(mp)) {
- xfs_alert(mp, "%s: unable to clean up ino 0x%llx",
- __func__, XFS_I(inode)->i_ino);
- return error;
- }
+ iomap_file_buffered_write_punch_delalloc(inode, offset, length, written,
+ flags, iomap, &xfs_buffered_write_delalloc_punch);
return 0;
}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 1cdc8034f54d..ee79cf161312 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -567,7 +567,7 @@ xfs_stat_blksize(
return 1U << mp->m_allocsize_log;
}
- return PAGE_SIZE;
+ return max_t(uint32_t, PAGE_SIZE, mp->m_sb.sb_blocksize);
}
STATIC int
@@ -870,16 +870,6 @@ xfs_setattr_size(
error = xfs_zero_range(ip, oldsize, newsize - oldsize,
&did_zeroing);
} else {
- /*
- * iomap won't detect a dirty page over an unwritten block (or a
- * cow block over a hole) and subsequently skips zeroing the
- * newly post-EOF portion of the page. Flush the new EOF to
- * convert the block before the pagecache truncate.
- */
- error = filemap_write_and_wait_range(inode->i_mapping, newsize,
- newsize);
- if (error)
- return error;
error = xfs_truncate_page(ip, newsize, &did_zeroing);
}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 817ea7e0a8ab..26b2f5887b88 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -3495,7 +3495,7 @@ xlog_force_shutdown(
* If this log shutdown also sets the mount shutdown state, issue a
* shutdown warning message.
*/
- if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) {
+ if (!xfs_set_shutdown(log->l_mp)) {
xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
"Filesystem has been shut down due to log error (0x%x).",
shutdown_flags);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 4423dd344239..ec766b4bc853 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1336,7 +1336,7 @@ xlog_find_tail(
* headers if we have a filesystem using non-persistent counters.
*/
if (clean)
- set_bit(XFS_OPSTATE_CLEAN, &log->l_mp->m_opstate);
+ xfs_set_clean(log->l_mp);
/*
* Make sure that there are no blocks in front of the head
@@ -2128,7 +2128,7 @@ xlog_recover_add_to_cont_trans(
old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
old_len = item->ri_buf[item->ri_cnt-1].i_len;
- ptr = kvrealloc(old_ptr, old_len, len + old_len, GFP_KERNEL);
+ ptr = kvrealloc(old_ptr, len + old_len, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
memcpy(&ptr[old_len], dp, len);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 09eef1721ef4..1fdd79c5bfa0 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -132,11 +132,15 @@ xfs_sb_validate_fsb_count(
xfs_sb_t *sbp,
uint64_t nblocks)
{
- ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
+ uint64_t max_bytes;
+
ASSERT(sbp->sb_blocklog >= BBSHIFT);
+ if (check_shl_overflow(nblocks, sbp->sb_blocklog, &max_bytes))
+ return -EFBIG;
+
/* Limited by ULONG_MAX of page cache index */
- if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
+ if (max_bytes >> PAGE_SHIFT > ULONG_MAX)
return -EFBIG;
return 0;
}
@@ -595,7 +599,7 @@ xfs_unmount_flush_inodes(
xfs_extent_busy_wait_all(mp);
flush_workqueue(xfs_discard_wq);
- set_bit(XFS_OPSTATE_UNMOUNTING, &mp->m_opstate);
+ xfs_set_unmounting(mp);
xfs_ail_push_all_sync(mp->m_ail);
xfs_inodegc_stop(mp);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index d0567dfbc036..96496f39f551 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -147,7 +147,7 @@ typedef struct xfs_mount {
int m_logbufs; /* number of log buffers */
int m_logbsize; /* size of each log buffer */
uint m_rsumlevels; /* rt summary levels */
- uint m_rsumsize; /* size of rt summary, bytes */
+ xfs_filblks_t m_rsumblocks; /* size of rt summary, FSBs */
int m_fixedfsid[2]; /* unchanged for life of FS */
uint m_qflags; /* quota status flags */
uint64_t m_features; /* active filesystem features */
@@ -208,8 +208,7 @@ typedef struct xfs_mount {
*/
atomic64_t m_allocbt_blks;
- struct radix_tree_root m_perag_tree; /* per-ag accounting info */
- spinlock_t m_perag_lock; /* lock for m_perag_tree */
+ struct xarray m_perags; /* per-ag accounting info */
uint64_t m_resblks; /* total reserved blocks */
uint64_t m_resblks_avail;/* available reserved blocks */
uint64_t m_resblks_save; /* reserved blks @ remount,ro */
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 7443debaffd6..d0f5b403bdbe 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -230,9 +230,8 @@ _xfs_mru_cache_clear_reap_list(
__releases(mru->lock) __acquires(mru->lock)
{
struct xfs_mru_cache_elem *elem, *next;
- struct list_head tmp;
+ LIST_HEAD(tmp);
- INIT_LIST_HEAD(&tmp);
list_for_each_entry_safe(elem, next, &mru->reap_list, list_node) {
/* Remove the element from the data store. */
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 9490b913a4ab..7e2307921deb 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -799,7 +799,7 @@ xfs_qm_qino_alloc(
};
xfs_ino_t ino;
- error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
+ error = xfs_dialloc(&tp, &args, &ino);
if (!error)
error = xfs_icreate(tp, ino, &args, ipp);
if (error) {
@@ -1539,6 +1539,43 @@ xfs_qm_mount_quotas(
}
/*
+ * Load the inode for a given type of quota, assuming that the sb fields have
+ * been sorted out. This is not true when switching quota types on a V4
+ * filesystem, so do not use this function for that.
+ *
+ * Returns -ENOENT if the quota inode field is NULLFSINO; 0 and an inode on
+ * success; or a negative errno.
+ */
+int
+xfs_qm_qino_load(
+ struct xfs_mount *mp,
+ xfs_dqtype_t type,
+ struct xfs_inode **ipp)
+{
+ xfs_ino_t ino = NULLFSINO;
+
+ switch (type) {
+ case XFS_DQTYPE_USER:
+ ino = mp->m_sb.sb_uquotino;
+ break;
+ case XFS_DQTYPE_GROUP:
+ ino = mp->m_sb.sb_gquotino;
+ break;
+ case XFS_DQTYPE_PROJ:
+ ino = mp->m_sb.sb_pquotino;
+ break;
+ default:
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+
+ if (ino == NULLFSINO)
+ return -ENOENT;
+
+ return xfs_iget(mp, NULL, ino, 0, 0, ipp);
+}
+
+/*
* This is called after the superblock has been read in and we're ready to
* iget the quota inodes.
*/
@@ -1561,24 +1598,21 @@ xfs_qm_init_quotainos(
if (XFS_IS_UQUOTA_ON(mp) &&
mp->m_sb.sb_uquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_uquotino > 0);
- error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
- 0, 0, &uip);
+ error = xfs_qm_qino_load(mp, XFS_DQTYPE_USER, &uip);
if (error)
return error;
}
if (XFS_IS_GQUOTA_ON(mp) &&
mp->m_sb.sb_gquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_gquotino > 0);
- error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
- 0, 0, &gip);
+ error = xfs_qm_qino_load(mp, XFS_DQTYPE_GROUP, &gip);
if (error)
goto error_rele;
}
if (XFS_IS_PQUOTA_ON(mp) &&
mp->m_sb.sb_pquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_pquotino > 0);
- error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
- 0, 0, &pip);
+ error = xfs_qm_qino_load(mp, XFS_DQTYPE_PROJ, &pip);
if (error)
goto error_rele;
}
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 6e09dfcd13e2..e919c7f62f57 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -184,4 +184,7 @@ xfs_get_defquota(struct xfs_quotainfo *qi, xfs_dqtype_t type)
}
}
+int xfs_qm_qino_load(struct xfs_mount *mp, xfs_dqtype_t type,
+ struct xfs_inode **ipp);
+
#endif /* __XFS_QM_H__ */
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 392cb39cc10c..4eda50ae2d1c 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -53,16 +53,15 @@ xfs_qm_scall_quotaoff(
STATIC int
xfs_qm_scall_trunc_qfile(
struct xfs_mount *mp,
- xfs_ino_t ino)
+ xfs_dqtype_t type)
{
struct xfs_inode *ip;
struct xfs_trans *tp;
int error;
- if (ino == NULLFSINO)
+ error = xfs_qm_qino_load(mp, type, &ip);
+ if (error == -ENOENT)
return 0;
-
- error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
if (error)
return error;
@@ -113,17 +112,17 @@ xfs_qm_scall_trunc_qfiles(
}
if (flags & XFS_QMOPT_UQUOTA) {
- error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
+ error = xfs_qm_scall_trunc_qfile(mp, XFS_DQTYPE_USER);
if (error)
return error;
}
if (flags & XFS_QMOPT_GQUOTA) {
- error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
+ error = xfs_qm_scall_trunc_qfile(mp, XFS_DQTYPE_GROUP);
if (error)
return error;
}
if (flags & XFS_QMOPT_PQUOTA)
- error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
+ error = xfs_qm_scall_trunc_qfile(mp, XFS_DQTYPE_PROJ);
return error;
}
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 9c162e69976b..4c7f7ce4fd2f 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -16,24 +16,25 @@
#include "xfs_qm.h"
-static void
+static int
xfs_qm_fill_state(
struct qc_type_state *tstate,
struct xfs_mount *mp,
- struct xfs_inode *ip,
- xfs_ino_t ino,
- struct xfs_def_quota *defq)
+ xfs_dqtype_t type)
{
- bool tempqip = false;
-
- tstate->ino = ino;
- if (!ip && ino == NULLFSINO)
- return;
- if (!ip) {
- if (xfs_iget(mp, NULL, ino, 0, 0, &ip))
- return;
- tempqip = true;
+ struct xfs_inode *ip;
+ struct xfs_def_quota *defq;
+ int error;
+
+ error = xfs_qm_qino_load(mp, type, &ip);
+ if (error) {
+ tstate->ino = NULLFSINO;
+ return error != -ENOENT ? error : 0;
}
+
+ defq = xfs_get_defquota(mp->m_quotainfo, type);
+
+ tstate->ino = ip->i_ino;
tstate->flags |= QCI_SYSFILE;
tstate->blocks = ip->i_nblocks;
tstate->nextents = ip->i_df.if_nextents;
@@ -43,8 +44,9 @@ xfs_qm_fill_state(
tstate->spc_warnlimit = 0;
tstate->ino_warnlimit = 0;
tstate->rt_spc_warnlimit = 0;
- if (tempqip)
- xfs_irele(ip);
+ xfs_irele(ip);
+
+ return 0;
}
/*
@@ -56,8 +58,9 @@ xfs_fs_get_quota_state(
struct super_block *sb,
struct qc_state *state)
{
- struct xfs_mount *mp = XFS_M(sb);
- struct xfs_quotainfo *q = mp->m_quotainfo;
+ struct xfs_mount *mp = XFS_M(sb);
+ struct xfs_quotainfo *q = mp->m_quotainfo;
+ int error;
memset(state, 0, sizeof(*state));
if (!XFS_IS_QUOTA_ON(mp))
@@ -76,12 +79,18 @@ xfs_fs_get_quota_state(
if (XFS_IS_PQUOTA_ENFORCED(mp))
state->s_state[PRJQUOTA].flags |= QCI_LIMITS_ENFORCED;
- xfs_qm_fill_state(&state->s_state[USRQUOTA], mp, q->qi_uquotaip,
- mp->m_sb.sb_uquotino, &q->qi_usr_default);
- xfs_qm_fill_state(&state->s_state[GRPQUOTA], mp, q->qi_gquotaip,
- mp->m_sb.sb_gquotino, &q->qi_grp_default);
- xfs_qm_fill_state(&state->s_state[PRJQUOTA], mp, q->qi_pquotaip,
- mp->m_sb.sb_pquotino, &q->qi_prj_default);
+ error = xfs_qm_fill_state(&state->s_state[USRQUOTA], mp,
+ XFS_DQTYPE_USER);
+ if (error)
+ return error;
+ error = xfs_qm_fill_state(&state->s_state[GRPQUOTA], mp,
+ XFS_DQTYPE_GROUP);
+ if (error)
+ return error;
+ error = xfs_qm_fill_state(&state->s_state[PRJQUOTA], mp,
+ XFS_DQTYPE_PROJ);
+ if (error)
+ return error;
return 0;
}
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index ebeab8e4dab1..3a2005a1e673 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -142,7 +142,7 @@ xfs_rtallocate_range(
* We need to find the beginning and end of the extent so we can
* properly update the summary.
*/
- error = xfs_rtfind_back(args, start, 0, &preblock);
+ error = xfs_rtfind_back(args, start, &preblock);
if (error)
return error;
@@ -194,6 +194,17 @@ xfs_rtallocate_range(
return xfs_rtmodify_range(args, start, len, 0);
}
+/* Reduce @rtxlen until it is a multiple of @prod. */
+static inline xfs_rtxlen_t
+xfs_rtalloc_align_len(
+ xfs_rtxlen_t rtxlen,
+ xfs_rtxlen_t prod)
+{
+ if (unlikely(prod > 1))
+ return rounddown(rtxlen, prod);
+ return rtxlen;
+}
+
/*
* Make sure we don't run off the end of the rt volume. Be careful that
* adjusting maxlen downwards doesn't cause us to fail the alignment checks.
@@ -208,7 +219,7 @@ xfs_rtallocate_clamp_len(
xfs_rtxlen_t ret;
ret = min(mp->m_sb.sb_rextents, startrtx + rtxlen) - startrtx;
- return rounddown(ret, prod);
+ return xfs_rtalloc_align_len(ret, prod);
}
/*
@@ -229,39 +240,40 @@ xfs_rtallocate_extent_block(
xfs_rtxnum_t *rtx) /* out: start rtext allocated */
{
struct xfs_mount *mp = args->mp;
- xfs_rtxnum_t besti; /* best rtext found so far */
- xfs_rtxnum_t bestlen;/* best length found so far */
+ xfs_rtxnum_t besti = -1; /* best rtext found so far */
xfs_rtxnum_t end; /* last rtext in chunk */
- int error;
xfs_rtxnum_t i; /* current rtext trying */
xfs_rtxnum_t next; /* next rtext to try */
+ xfs_rtxlen_t scanlen; /* number of free rtx to look for */
+ xfs_rtxlen_t bestlen = 0; /* best length found so far */
int stat; /* status from internal calls */
+ int error;
/*
- * Loop over all the extents starting in this bitmap block,
- * looking for one that's long enough.
+ * Loop over all the extents starting in this bitmap block up to the
+ * end of the rt volume, looking for one that's long enough.
*/
- for (i = xfs_rbmblock_to_rtx(mp, bbno), besti = -1, bestlen = 0,
- end = xfs_rbmblock_to_rtx(mp, bbno + 1) - 1;
- i <= end;
- i++) {
+ end = min(mp->m_sb.sb_rextents, xfs_rbmblock_to_rtx(mp, bbno + 1)) - 1;
+ for (i = xfs_rbmblock_to_rtx(mp, bbno); i <= end; i++) {
/* Make sure we don't scan off the end of the rt volume. */
- maxlen = xfs_rtallocate_clamp_len(mp, i, maxlen, prod);
+ scanlen = xfs_rtallocate_clamp_len(mp, i, maxlen, prod);
+ if (scanlen < minlen)
+ break;
/*
- * See if there's a free extent of maxlen starting at i.
+ * See if there's a free extent of scanlen starting at i.
* If it's not so then next will contain the first non-free.
*/
- error = xfs_rtcheck_range(args, i, maxlen, 1, &next, &stat);
+ error = xfs_rtcheck_range(args, i, scanlen, 1, &next, &stat);
if (error)
return error;
if (stat) {
/*
- * i for maxlen is all free, allocate and return that.
+ * i to scanlen is all free, allocate and return that.
*/
- bestlen = maxlen;
- besti = i;
- goto allocate;
+ *len = scanlen;
+ *rtx = i;
+ return 0;
}
/*
@@ -289,38 +301,28 @@ xfs_rtallocate_extent_block(
return error;
}
- /*
- * Searched the whole thing & didn't find a maxlen free extent.
- */
- if (minlen > maxlen || besti == -1) {
- /*
- * Allocation failed. Set *nextp to the next block to try.
- */
- *nextp = next;
- return -ENOSPC;
- }
+ /* Searched the whole thing & didn't find a maxlen free extent. */
+ if (besti == -1)
+ goto nospace;
/*
- * If size should be a multiple of prod, make that so.
+ * Ensure bestlen is a multiple of prod, but don't return a too-short
+ * extent.
*/
- if (prod > 1) {
- xfs_rtxlen_t p; /* amount to trim length by */
-
- div_u64_rem(bestlen, prod, &p);
- if (p)
- bestlen -= p;
- }
+ bestlen = xfs_rtalloc_align_len(bestlen, prod);
+ if (bestlen < minlen)
+ goto nospace;
/*
- * Allocate besti for bestlen & return that.
+ * Pick besti for bestlen & return that.
*/
-allocate:
- error = xfs_rtallocate_range(args, besti, bestlen);
- if (error)
- return error;
*len = bestlen;
*rtx = besti;
return 0;
+nospace:
+ /* Allocation failed. Set *nextp to the next block to try. */
+ *nextp = next;
+ return -ENOSPC;
}
/*
@@ -339,45 +341,46 @@ xfs_rtallocate_extent_exact(
xfs_rtxlen_t prod, /* extent product factor */
xfs_rtxnum_t *rtx) /* out: start rtext allocated */
{
- int error;
- xfs_rtxlen_t i; /* extent length trimmed due to prod */
- int isfree; /* extent is free */
+ struct xfs_mount *mp = args->mp;
xfs_rtxnum_t next; /* next rtext to try (dummy) */
+ xfs_rtxlen_t alloclen; /* candidate length */
+ xfs_rtxlen_t scanlen; /* number of free rtx to look for */
+ int isfree; /* extent is free */
+ int error;
ASSERT(minlen % prod == 0);
ASSERT(maxlen % prod == 0);
- /*
- * Check if the range in question (for maxlen) is free.
- */
- error = xfs_rtcheck_range(args, start, maxlen, 1, &next, &isfree);
+
+ /* Make sure we don't run off the end of the rt volume. */
+ scanlen = xfs_rtallocate_clamp_len(mp, start, maxlen, prod);
+ if (scanlen < minlen)
+ return -ENOSPC;
+
+ /* Check if the range in question (for scanlen) is free. */
+ error = xfs_rtcheck_range(args, start, scanlen, 1, &next, &isfree);
if (error)
return error;
- if (!isfree) {
- /*
- * If not, allocate what there is, if it's at least minlen.
- */
- maxlen = next - start;
- if (maxlen < minlen)
- return -ENOSPC;
-
- /*
- * Trim off tail of extent, if prod is specified.
- */
- if (prod > 1 && (i = maxlen % prod)) {
- maxlen -= i;
- if (maxlen < minlen)
- return -ENOSPC;
- }
+ if (isfree) {
+ /* start to scanlen is all free; allocate it. */
+ *len = scanlen;
+ *rtx = start;
+ return 0;
}
/*
- * Allocate what we can and return it.
+ * If not, allocate what there is, if it's at least minlen.
*/
- error = xfs_rtallocate_range(args, start, maxlen);
- if (error)
- return error;
- *len = maxlen;
+ alloclen = next - start;
+ if (alloclen < minlen)
+ return -ENOSPC;
+
+ /* Ensure alloclen is a multiple of prod. */
+ alloclen = xfs_rtalloc_align_len(alloclen, prod);
+ if (alloclen < minlen)
+ return -ENOSPC;
+
+ *len = alloclen;
*rtx = start;
return 0;
}
@@ -416,11 +419,6 @@ xfs_rtallocate_extent_near(
if (start >= mp->m_sb.sb_rextents)
start = mp->m_sb.sb_rextents - 1;
- /* Make sure we don't run off the end of the rt volume. */
- maxlen = xfs_rtallocate_clamp_len(mp, start, maxlen, prod);
- if (maxlen < minlen)
- return -ENOSPC;
-
/*
* Try the exact allocation first.
*/
@@ -429,7 +427,6 @@ xfs_rtallocate_extent_near(
if (error != -ENOSPC)
return error;
-
bbno = xfs_rtx_to_rbmblock(mp, start);
i = 0;
j = -1;
@@ -552,11 +549,11 @@ xfs_rtalloc_sumlevel(
xfs_rtxnum_t *rtx) /* out: start rtext allocated */
{
xfs_fileoff_t i; /* bitmap block number */
+ int error;
for (i = 0; i < args->mp->m_sb.sb_rbmblocks; i++) {
xfs_suminfo_t sum; /* summary information for extents */
xfs_rtxnum_t n; /* next rtext to be tried */
- int error;
error = xfs_rtget_summary(args, l, i, &sum);
if (error)
@@ -652,136 +649,20 @@ xfs_rtallocate_extent_size(
return -ENOSPC;
}
-/*
- * Allocate space to the bitmap or summary file, and zero it, for growfs.
- */
-STATIC int
-xfs_growfs_rt_alloc(
- struct xfs_mount *mp, /* file system mount point */
- xfs_extlen_t oblocks, /* old count of blocks */
- xfs_extlen_t nblocks, /* new count of blocks */
- struct xfs_inode *ip) /* inode (bitmap/summary) */
-{
- xfs_fileoff_t bno; /* block number in file */
- struct xfs_buf *bp; /* temporary buffer for zeroing */
- xfs_daddr_t d; /* disk block address */
- int error; /* error return value */
- xfs_fsblock_t fsbno; /* filesystem block for bno */
- struct xfs_bmbt_irec map; /* block map output */
- int nmap; /* number of block maps */
- int resblks; /* space reservation */
- enum xfs_blft buf_type;
- struct xfs_trans *tp;
-
- if (ip == mp->m_rsumip)
- buf_type = XFS_BLFT_RTSUMMARY_BUF;
- else
- buf_type = XFS_BLFT_RTBITMAP_BUF;
-
- /*
- * Allocate space to the file, as necessary.
- */
- while (oblocks < nblocks) {
- resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
- /*
- * Reserve space & log for one extent added to the file.
- */
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtalloc, resblks,
- 0, 0, &tp);
- if (error)
- return error;
- /*
- * Lock the inode.
- */
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
- error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
- XFS_IEXT_ADD_NOSPLIT_CNT);
- if (error)
- goto out_trans_cancel;
-
- /*
- * Allocate blocks to the bitmap file.
- */
- nmap = 1;
- error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
- XFS_BMAPI_METADATA, 0, &map, &nmap);
- if (error)
- goto out_trans_cancel;
- /*
- * Free any blocks freed up in the transaction, then commit.
- */
- error = xfs_trans_commit(tp);
- if (error)
- return error;
- /*
- * Now we need to clear the allocated blocks.
- * Do this one block per transaction, to keep it simple.
- */
- for (bno = map.br_startoff, fsbno = map.br_startblock;
- bno < map.br_startoff + map.br_blockcount;
- bno++, fsbno++) {
- /*
- * Reserve log for one block zeroing.
- */
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtzero,
- 0, 0, 0, &tp);
- if (error)
- return error;
- /*
- * Lock the bitmap inode.
- */
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- /*
- * Get a buffer for the block.
- */
- d = XFS_FSB_TO_DADDR(mp, fsbno);
- error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
- mp->m_bsize, 0, &bp);
- if (error)
- goto out_trans_cancel;
-
- xfs_trans_buf_set_type(tp, bp, buf_type);
- bp->b_ops = &xfs_rtbuf_ops;
- memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
- xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
- /*
- * Commit the transaction.
- */
- error = xfs_trans_commit(tp);
- if (error)
- return error;
- }
- /*
- * Go on to the next extent, if any.
- */
- oblocks = map.br_startoff + map.br_blockcount;
- }
-
- return 0;
-
-out_trans_cancel:
- xfs_trans_cancel(tp);
- return error;
-}
-
-static void
+static int
xfs_alloc_rsum_cache(
- xfs_mount_t *mp, /* file system mount structure */
- xfs_extlen_t rbmblocks) /* number of rt bitmap blocks */
+ struct xfs_mount *mp,
+ xfs_extlen_t rbmblocks)
{
/*
* The rsum cache is initialized to the maximum value, which is
* trivially an upper bound on the maximum level with any free extents.
- * We can continue without the cache if it couldn't be allocated.
*/
mp->m_rsum_cache = kvmalloc(rbmblocks, GFP_KERNEL);
- if (mp->m_rsum_cache)
- memset(mp->m_rsum_cache, -1, rbmblocks);
- else
- xfs_warn(mp, "could not allocate realtime summary cache");
+ if (!mp->m_rsum_cache)
+ return -ENOMEM;
+ memset(mp->m_rsum_cache, -1, rbmblocks);
+ return 0;
}
/*
@@ -817,9 +698,168 @@ out_iolock:
return error;
}
+static int
+xfs_growfs_rt_bmblock(
+ struct xfs_mount *mp,
+ xfs_rfsblock_t nrblocks,
+ xfs_agblock_t rextsize,
+ xfs_fileoff_t bmbno)
+{
+ struct xfs_inode *rbmip = mp->m_rbmip;
+ struct xfs_inode *rsumip = mp->m_rsumip;
+ struct xfs_rtalloc_args args = {
+ .mp = mp,
+ };
+ struct xfs_rtalloc_args nargs = {
+ };
+ struct xfs_mount *nmp;
+ xfs_rfsblock_t nrblocks_step;
+ xfs_rtbxlen_t freed_rtx;
+ int error;
+
+
+ nrblocks_step = (bmbno + 1) * NBBY * mp->m_sb.sb_blocksize * rextsize;
+
+ nmp = nargs.mp = kmemdup(mp, sizeof(*mp), GFP_KERNEL);
+ if (!nmp)
+ return -ENOMEM;
+
+ /*
+ * Calculate new sb and mount fields for this round.
+ */
+ nmp->m_sb.sb_rextsize = rextsize;
+ xfs_mount_sb_set_rextsize(nmp, &nmp->m_sb);
+ nmp->m_sb.sb_rbmblocks = bmbno + 1;
+ nmp->m_sb.sb_rblocks = min(nrblocks, nrblocks_step);
+ nmp->m_sb.sb_rextents = xfs_rtb_to_rtx(nmp, nmp->m_sb.sb_rblocks);
+ nmp->m_sb.sb_rextslog = xfs_compute_rextslog(nmp->m_sb.sb_rextents);
+ nmp->m_rsumlevels = nmp->m_sb.sb_rextslog + 1;
+ nmp->m_rsumblocks = xfs_rtsummary_blockcount(mp, nmp->m_rsumlevels,
+ nmp->m_sb.sb_rbmblocks);
+
+ /*
+ * Recompute the growfsrt reservation from the new rsumsize, so that the
+ * transaction below use the new, potentially larger value.
+ * */
+ xfs_trans_resv_calc(nmp, &nmp->m_resv);
+ error = xfs_trans_alloc(mp, &M_RES(nmp)->tr_growrtfree, 0, 0, 0,
+ &args.tp);
+ if (error)
+ goto out_free;
+ nargs.tp = args.tp;
+
+ xfs_rtbitmap_lock(mp);
+ xfs_rtbitmap_trans_join(args.tp);
+
+ /*
+ * Update the bitmap inode's size ondisk and incore. We need to update
+ * the incore size so that inode inactivation won't punch what it thinks
+ * are "posteof" blocks.
+ */
+ rbmip->i_disk_size = nmp->m_sb.sb_rbmblocks * nmp->m_sb.sb_blocksize;
+ i_size_write(VFS_I(rbmip), rbmip->i_disk_size);
+ xfs_trans_log_inode(args.tp, rbmip, XFS_ILOG_CORE);
+
+ /*
+ * Update the summary inode's size. We need to update the incore size
+ * so that inode inactivation won't punch what it thinks are "posteof"
+ * blocks.
+ */
+ rsumip->i_disk_size = nmp->m_rsumblocks * nmp->m_sb.sb_blocksize;
+ i_size_write(VFS_I(rsumip), rsumip->i_disk_size);
+ xfs_trans_log_inode(args.tp, rsumip, XFS_ILOG_CORE);
+
+ /*
+ * Copy summary data from old to new sizes when the real size (not
+ * block-aligned) changes.
+ */
+ if (mp->m_sb.sb_rbmblocks != nmp->m_sb.sb_rbmblocks ||
+ mp->m_rsumlevels != nmp->m_rsumlevels) {
+ error = xfs_rtcopy_summary(&args, &nargs);
+ if (error)
+ goto out_cancel;
+ }
+
+ /*
+ * Update superblock fields.
+ */
+ if (nmp->m_sb.sb_rextsize != mp->m_sb.sb_rextsize)
+ xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_REXTSIZE,
+ nmp->m_sb.sb_rextsize - mp->m_sb.sb_rextsize);
+ if (nmp->m_sb.sb_rbmblocks != mp->m_sb.sb_rbmblocks)
+ xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_RBMBLOCKS,
+ nmp->m_sb.sb_rbmblocks - mp->m_sb.sb_rbmblocks);
+ if (nmp->m_sb.sb_rblocks != mp->m_sb.sb_rblocks)
+ xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_RBLOCKS,
+ nmp->m_sb.sb_rblocks - mp->m_sb.sb_rblocks);
+ if (nmp->m_sb.sb_rextents != mp->m_sb.sb_rextents)
+ xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_REXTENTS,
+ nmp->m_sb.sb_rextents - mp->m_sb.sb_rextents);
+ if (nmp->m_sb.sb_rextslog != mp->m_sb.sb_rextslog)
+ xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_REXTSLOG,
+ nmp->m_sb.sb_rextslog - mp->m_sb.sb_rextslog);
+
+ /*
+ * Free the new extent.
+ */
+ freed_rtx = nmp->m_sb.sb_rextents - mp->m_sb.sb_rextents;
+ error = xfs_rtfree_range(&nargs, mp->m_sb.sb_rextents, freed_rtx);
+ xfs_rtbuf_cache_relse(&nargs);
+ if (error)
+ goto out_cancel;
+
+ /*
+ * Mark more blocks free in the superblock.
+ */
+ xfs_trans_mod_sb(args.tp, XFS_TRANS_SB_FREXTENTS, freed_rtx);
+
+ /*
+ * Update the calculated values in the real mount structure.
+ */
+ mp->m_rsumlevels = nmp->m_rsumlevels;
+ mp->m_rsumblocks = nmp->m_rsumblocks;
+ xfs_mount_sb_set_rextsize(mp, &mp->m_sb);
+
+ /*
+ * Recompute the growfsrt reservation from the new rsumsize.
+ */
+ xfs_trans_resv_calc(mp, &mp->m_resv);
+
+ error = xfs_trans_commit(args.tp);
+ if (error)
+ goto out_free;
+
+ /*
+ * Ensure the mount RT feature flag is now set.
+ */
+ mp->m_features |= XFS_FEAT_REALTIME;
+
+ kfree(nmp);
+ return 0;
+
+out_cancel:
+ xfs_trans_cancel(args.tp);
+out_free:
+ kfree(nmp);
+ return error;
+}
+
/*
- * Visible (exported) functions.
+ * Calculate the last rbmblock currently used.
+ *
+ * This also deals with the case where there were no rtextents before.
*/
+static xfs_fileoff_t
+xfs_last_rt_bmblock(
+ struct xfs_mount *mp)
+{
+ xfs_fileoff_t bmbno = mp->m_sb.sb_rbmblocks;
+
+ /* Skip the current block if it is exactly full. */
+ if (xfs_rtx_to_rbmword(mp, mp->m_sb.sb_rextents) != 0)
+ bmbno--;
+ return bmbno;
+}
/*
* Grow the realtime area of the filesystem.
@@ -832,23 +872,14 @@ xfs_growfs_rt(
xfs_fileoff_t bmbno; /* bitmap block number */
struct xfs_buf *bp; /* temporary buffer */
int error; /* error return value */
- xfs_mount_t *nmp; /* new (fake) mount structure */
- xfs_rfsblock_t nrblocks; /* new number of realtime blocks */
xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */
xfs_rtxnum_t nrextents; /* new number of realtime extents */
- uint8_t nrextslog; /* new log2 of sb_rextents */
xfs_extlen_t nrsumblocks; /* new number of summary blocks */
- uint nrsumlevels; /* new rt summary levels */
- uint nrsumsize; /* new size of rt summary, bytes */
- xfs_sb_t *nsbp; /* new superblock */
xfs_extlen_t rbmblocks; /* current number of rt bitmap blocks */
xfs_extlen_t rsumblocks; /* current number of rt summary blks */
- xfs_sb_t *sbp; /* old superblock */
uint8_t *rsum_cache; /* old summary cache */
xfs_agblock_t old_rextsize = mp->m_sb.sb_rextsize;
- sbp = &mp->m_sb;
-
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -867,11 +898,10 @@ xfs_growfs_rt(
goto out_unlock;
/* Shrink not supported. */
- if (in->newblocks <= sbp->sb_rblocks)
+ if (in->newblocks <= mp->m_sb.sb_rblocks)
goto out_unlock;
-
/* Can only change rt extent size when adding rt volume. */
- if (sbp->sb_rblocks > 0 && in->extsize != sbp->sb_rextsize)
+ if (mp->m_sb.sb_rblocks > 0 && in->extsize != mp->m_sb.sb_rextsize)
goto out_unlock;
/* Range check the extent size. */
@@ -884,15 +914,14 @@ xfs_growfs_rt(
if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp) || xfs_has_quota(mp))
goto out_unlock;
- nrblocks = in->newblocks;
- error = xfs_sb_validate_fsb_count(sbp, nrblocks);
+ error = xfs_sb_validate_fsb_count(&mp->m_sb, in->newblocks);
if (error)
goto out_unlock;
/*
* Read in the last block of the device, make sure it exists.
*/
error = xfs_buf_read_uncached(mp->m_rtdev_targp,
- XFS_FSB_TO_BB(mp, nrblocks - 1),
+ XFS_FSB_TO_BB(mp, in->newblocks - 1),
XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
if (error)
goto out_unlock;
@@ -901,17 +930,15 @@ xfs_growfs_rt(
/*
* Calculate new parameters. These are the final values to be reached.
*/
- nrextents = nrblocks;
- do_div(nrextents, in->extsize);
- if (!xfs_validate_rtextents(nrextents)) {
+ nrextents = div_u64(in->newblocks, in->extsize);
+ if (nrextents == 0) {
error = -EINVAL;
goto out_unlock;
}
nrbmblocks = xfs_rtbitmap_blockcount(mp, nrextents);
- nrextslog = xfs_compute_rextslog(nrextents);
- nrsumlevels = nrextslog + 1;
- nrsumblocks = xfs_rtsummary_blockcount(mp, nrsumlevels, nrbmblocks);
- nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks);
+ nrsumblocks = xfs_rtsummary_blockcount(mp,
+ xfs_compute_rextslog(nrextents) + 1, nrbmblocks);
+
/*
* New summary size can't be more than half the size of
* the log. This prevents us from getting a log overflow,
@@ -931,154 +958,29 @@ xfs_growfs_rt(
/*
* Allocate space to the bitmap and summary files, as necessary.
*/
- error = xfs_growfs_rt_alloc(mp, rbmblocks, nrbmblocks, mp->m_rbmip);
+ error = xfs_rtfile_initialize_blocks(mp->m_rbmip, rbmblocks,
+ nrbmblocks, NULL);
if (error)
goto out_unlock;
- error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks, mp->m_rsumip);
+ error = xfs_rtfile_initialize_blocks(mp->m_rsumip, rsumblocks,
+ nrsumblocks, NULL);
if (error)
goto out_unlock;
rsum_cache = mp->m_rsum_cache;
- if (nrbmblocks != sbp->sb_rbmblocks)
- xfs_alloc_rsum_cache(mp, nrbmblocks);
-
- /*
- * Allocate a new (fake) mount/sb.
- */
- nmp = kmalloc(sizeof(*nmp), GFP_KERNEL | __GFP_NOFAIL);
- /*
- * Loop over the bitmap blocks.
- * We will do everything one bitmap block at a time.
- * Skip the current block if it is exactly full.
- * This also deals with the case where there were no rtextents before.
- */
- for (bmbno = sbp->sb_rbmblocks -
- ((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0);
- bmbno < nrbmblocks;
- bmbno++) {
- struct xfs_rtalloc_args args = {
- .mp = mp,
- };
- struct xfs_rtalloc_args nargs = {
- .mp = nmp,
- };
- struct xfs_trans *tp;
- xfs_rfsblock_t nrblocks_step;
-
- *nmp = *mp;
- nsbp = &nmp->m_sb;
- /*
- * Calculate new sb and mount fields for this round.
- */
- nsbp->sb_rextsize = in->extsize;
- nmp->m_rtxblklog = -1; /* don't use shift or masking */
- nsbp->sb_rbmblocks = bmbno + 1;
- nrblocks_step = (bmbno + 1) * NBBY * nsbp->sb_blocksize *
- nsbp->sb_rextsize;
- nsbp->sb_rblocks = min(nrblocks, nrblocks_step);
- nsbp->sb_rextents = xfs_rtb_to_rtx(nmp, nsbp->sb_rblocks);
- ASSERT(nsbp->sb_rextents != 0);
- nsbp->sb_rextslog = xfs_compute_rextslog(nsbp->sb_rextents);
- nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1;
- nrsumblocks = xfs_rtsummary_blockcount(mp, nrsumlevels,
- nsbp->sb_rbmblocks);
- nmp->m_rsumsize = nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks);
- /* recompute growfsrt reservation from new rsumsize */
- xfs_trans_resv_calc(nmp, &nmp->m_resv);
-
- /*
- * Start a transaction, get the log reservation.
- */
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtfree, 0, 0, 0,
- &tp);
+ if (nrbmblocks != mp->m_sb.sb_rbmblocks) {
+ error = xfs_alloc_rsum_cache(mp, nrbmblocks);
if (error)
- break;
- args.tp = tp;
- nargs.tp = tp;
-
- /*
- * Lock out other callers by grabbing the bitmap and summary
- * inode locks and joining them to the transaction.
- */
- xfs_rtbitmap_lock(tp, mp);
- /*
- * Update the bitmap inode's size ondisk and incore. We need
- * to update the incore size so that inode inactivation won't
- * punch what it thinks are "posteof" blocks.
- */
- mp->m_rbmip->i_disk_size =
- nsbp->sb_rbmblocks * nsbp->sb_blocksize;
- i_size_write(VFS_I(mp->m_rbmip), mp->m_rbmip->i_disk_size);
- xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
- /*
- * Update the summary inode's size. We need to update the
- * incore size so that inode inactivation won't punch what it
- * thinks are "posteof" blocks.
- */
- mp->m_rsumip->i_disk_size = nmp->m_rsumsize;
- i_size_write(VFS_I(mp->m_rsumip), mp->m_rsumip->i_disk_size);
- xfs_trans_log_inode(tp, mp->m_rsumip, XFS_ILOG_CORE);
- /*
- * Copy summary data from old to new sizes.
- * Do this when the real size (not block-aligned) changes.
- */
- if (sbp->sb_rbmblocks != nsbp->sb_rbmblocks ||
- mp->m_rsumlevels != nmp->m_rsumlevels) {
- error = xfs_rtcopy_summary(&args, &nargs);
- if (error)
- goto error_cancel;
- }
- /*
- * Update superblock fields.
- */
- if (nsbp->sb_rextsize != sbp->sb_rextsize)
- xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSIZE,
- nsbp->sb_rextsize - sbp->sb_rextsize);
- if (nsbp->sb_rbmblocks != sbp->sb_rbmblocks)
- xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBMBLOCKS,
- nsbp->sb_rbmblocks - sbp->sb_rbmblocks);
- if (nsbp->sb_rblocks != sbp->sb_rblocks)
- xfs_trans_mod_sb(tp, XFS_TRANS_SB_RBLOCKS,
- nsbp->sb_rblocks - sbp->sb_rblocks);
- if (nsbp->sb_rextents != sbp->sb_rextents)
- xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTENTS,
- nsbp->sb_rextents - sbp->sb_rextents);
- if (nsbp->sb_rextslog != sbp->sb_rextslog)
- xfs_trans_mod_sb(tp, XFS_TRANS_SB_REXTSLOG,
- nsbp->sb_rextslog - sbp->sb_rextslog);
- /*
- * Free new extent.
- */
- error = xfs_rtfree_range(&nargs, sbp->sb_rextents,
- nsbp->sb_rextents - sbp->sb_rextents);
- xfs_rtbuf_cache_relse(&nargs);
- if (error) {
-error_cancel:
- xfs_trans_cancel(tp);
- break;
- }
- /*
- * Mark more blocks free in the superblock.
- */
- xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS,
- nsbp->sb_rextents - sbp->sb_rextents);
- /*
- * Update mp values into the real mp structure.
- */
- mp->m_rsumlevels = nrsumlevels;
- mp->m_rsumsize = nrsumsize;
- /* recompute growfsrt reservation from new rsumsize */
- xfs_trans_resv_calc(mp, &mp->m_resv);
+ goto out_unlock;
+ }
- error = xfs_trans_commit(tp);
+ /* Initialize the free space bitmap one bitmap block at a time. */
+ for (bmbno = xfs_last_rt_bmblock(mp); bmbno < nrbmblocks; bmbno++) {
+ error = xfs_growfs_rt_bmblock(mp, in->newblocks, in->extsize,
+ bmbno);
if (error)
- break;
-
- /* Ensure the mount RT feature flag is now set. */
- mp->m_features |= XFS_FEAT_REALTIME;
+ goto out_free;
}
- if (error)
- goto out_free;
if (old_rextsize != in->extsize) {
error = xfs_growfs_rt_fixup_extsize(mp);
@@ -1091,11 +993,6 @@ error_cancel:
out_free:
/*
- * Free the fake mp structure.
- */
- kfree(nmp);
-
- /*
* If we had to allocate a new rsum_cache, we either need to free the
* old one (if we succeeded) or free the new one and restore the old one
* (if there was an error).
@@ -1124,7 +1021,6 @@ xfs_rtmount_init(
struct xfs_buf *bp; /* buffer for last block of subvolume */
struct xfs_sb *sbp; /* filesystem superblock copy in mount */
xfs_daddr_t d; /* address of last block of subvolume */
- unsigned int rsumblocks;
int error;
sbp = &mp->m_sb;
@@ -1136,9 +1032,8 @@ xfs_rtmount_init(
return -ENODEV;
}
mp->m_rsumlevels = sbp->sb_rextslog + 1;
- rsumblocks = xfs_rtsummary_blockcount(mp, mp->m_rsumlevels,
+ mp->m_rsumblocks = xfs_rtsummary_blockcount(mp, mp->m_rsumlevels,
mp->m_sb.sb_rbmblocks);
- mp->m_rsumsize = XFS_FSB_TO_B(mp, rsumblocks);
mp->m_rbmip = mp->m_rsumip = NULL;
/*
* Check that the realtime section is an ok size.
@@ -1268,7 +1163,9 @@ xfs_rtmount_inodes(
if (error)
goto out_rele_summary;
- xfs_alloc_rsum_cache(mp, sbp->sb_rbmblocks);
+ error = xfs_alloc_rsum_cache(mp, sbp->sb_rbmblocks);
+ if (error)
+ goto out_rele_summary;
return 0;
out_rele_summary:
@@ -1296,12 +1193,11 @@ xfs_rtunmount_inodes(
* of rtextents and the fraction.
* The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ...
*/
-static int
+static xfs_rtxnum_t
xfs_rtpick_extent(
xfs_mount_t *mp, /* file system mount point */
xfs_trans_t *tp, /* transaction pointer */
- xfs_rtxlen_t len, /* allocation length (rtextents) */
- xfs_rtxnum_t *pick) /* result rt extent */
+ xfs_rtxlen_t len) /* allocation length (rtextents) */
{
xfs_rtxnum_t b; /* result rtext */
int log2; /* log of sequence number */
@@ -1332,8 +1228,7 @@ xfs_rtpick_extent(
ts.tv_sec = seq + 1;
inode_set_atime_to_ts(VFS_I(mp->m_rbmip), ts);
xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
- *pick = b;
- return 0;
+ return b;
}
static void
@@ -1365,36 +1260,109 @@ xfs_rtalloc_align_minmax(
*raminlen = newminlen;
}
-int
-xfs_bmap_rtalloc(
- struct xfs_bmalloca *ap)
+static int
+xfs_rtallocate(
+ struct xfs_trans *tp,
+ xfs_rtblock_t bno_hint,
+ xfs_rtxlen_t minlen,
+ xfs_rtxlen_t maxlen,
+ xfs_rtxlen_t prod,
+ bool wasdel,
+ bool initial_user_data,
+ bool *rtlocked,
+ xfs_rtblock_t *bno,
+ xfs_extlen_t *blen)
+{
+ struct xfs_rtalloc_args args = {
+ .mp = tp->t_mountp,
+ .tp = tp,
+ };
+ xfs_rtxnum_t start = 0;
+ xfs_rtxnum_t rtx;
+ xfs_rtxlen_t len = 0;
+ int error = 0;
+
+ /*
+ * Lock out modifications to both the RT bitmap and summary inodes.
+ */
+ if (!*rtlocked) {
+ xfs_rtbitmap_lock(args.mp);
+ xfs_rtbitmap_trans_join(tp);
+ *rtlocked = true;
+ }
+
+ /*
+ * For an allocation to an empty file at offset 0, pick an extent that
+ * will space things out in the rt area.
+ */
+ if (bno_hint)
+ start = xfs_rtb_to_rtx(args.mp, bno_hint);
+ else if (initial_user_data)
+ start = xfs_rtpick_extent(args.mp, tp, maxlen);
+
+ if (start) {
+ error = xfs_rtallocate_extent_near(&args, start, minlen, maxlen,
+ &len, prod, &rtx);
+ /*
+ * If we can't allocate near a specific rt extent, try again
+ * without locality criteria.
+ */
+ if (error == -ENOSPC) {
+ xfs_rtbuf_cache_relse(&args);
+ error = 0;
+ }
+ }
+
+ if (!error) {
+ error = xfs_rtallocate_extent_size(&args, minlen, maxlen, &len,
+ prod, &rtx);
+ }
+
+ if (error)
+ goto out_release;
+
+ error = xfs_rtallocate_range(&args, rtx, len);
+ if (error)
+ goto out_release;
+
+ xfs_trans_mod_sb(tp, wasdel ?
+ XFS_TRANS_SB_RES_FREXTENTS : XFS_TRANS_SB_FREXTENTS,
+ -(long)len);
+ *bno = xfs_rtx_to_rtb(args.mp, rtx);
+ *blen = xfs_rtxlen_to_extlen(args.mp, len);
+
+out_release:
+ xfs_rtbuf_cache_relse(&args);
+ return error;
+}
+
+static int
+xfs_rtallocate_align(
+ struct xfs_bmalloca *ap,
+ xfs_rtxlen_t *ralen,
+ xfs_rtxlen_t *raminlen,
+ xfs_rtxlen_t *prod,
+ bool *noalign)
{
struct xfs_mount *mp = ap->ip->i_mount;
xfs_fileoff_t orig_offset = ap->offset;
- xfs_rtxnum_t start; /* allocation hint rtextent no */
- xfs_rtxnum_t rtx; /* actually allocated rtextent no */
- xfs_rtxlen_t prod = 0; /* product factor for allocators */
- xfs_extlen_t mod = 0; /* product factor for allocators */
- xfs_rtxlen_t ralen = 0; /* realtime allocation length */
- xfs_extlen_t align; /* minimum allocation alignment */
- xfs_extlen_t orig_length = ap->length;
xfs_extlen_t minlen = mp->m_sb.sb_rextsize;
- xfs_rtxlen_t raminlen;
- bool rtlocked = false;
- bool ignore_locality = false;
- struct xfs_rtalloc_args args = {
- .mp = mp,
- .tp = ap->tp,
- };
+ xfs_extlen_t align; /* minimum allocation alignment */
+ xfs_extlen_t mod; /* product factor for allocators */
int error;
- align = xfs_get_extsz_hint(ap->ip);
- if (!align)
- align = 1;
-retry:
- error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
- align, 1, ap->eof, 0,
- ap->conv, &ap->offset, &ap->length);
+ if (*noalign) {
+ align = mp->m_sb.sb_rextsize;
+ } else {
+ align = xfs_get_extsz_hint(ap->ip);
+ if (!align)
+ align = 1;
+ if (align == mp->m_sb.sb_rextsize)
+ *noalign = true;
+ }
+
+ error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 1,
+ ap->eof, 0, ap->conv, &ap->offset, &ap->length);
if (error)
return error;
ASSERT(ap->length);
@@ -1418,59 +1386,55 @@ retry:
* XFS_BMBT_MAX_EXTLEN), we don't hear about that number, and can't
* adjust the starting point to match it.
*/
- ralen = xfs_extlen_to_rtxlen(mp, min(ap->length, XFS_MAX_BMBT_EXTLEN));
- raminlen = max_t(xfs_rtxlen_t, 1, xfs_extlen_to_rtxlen(mp, minlen));
- ASSERT(raminlen > 0);
- ASSERT(raminlen <= ralen);
-
- /*
- * Lock out modifications to both the RT bitmap and summary inodes
- */
- if (!rtlocked) {
- xfs_rtbitmap_lock(ap->tp, mp);
- rtlocked = true;
- }
-
- if (ignore_locality) {
- start = 0;
- } else if (xfs_bmap_adjacent(ap)) {
- start = xfs_rtb_to_rtx(mp, ap->blkno);
- } else if (ap->datatype & XFS_ALLOC_INITIAL_USER_DATA) {
- /*
- * If it's an allocation to an empty file at offset 0, pick an
- * extent that will space things out in the rt area.
- */
- error = xfs_rtpick_extent(mp, ap->tp, ralen, &start);
- if (error)
- return error;
- } else {
- start = 0;
- }
+ *ralen = xfs_extlen_to_rtxlen(mp, min(ap->length, XFS_MAX_BMBT_EXTLEN));
+ *raminlen = max_t(xfs_rtxlen_t, 1, xfs_extlen_to_rtxlen(mp, minlen));
+ ASSERT(*raminlen > 0);
+ ASSERT(*raminlen <= *ralen);
/*
* Only bother calculating a real prod factor if offset & length are
* perfectly aligned, otherwise it will just get us in trouble.
*/
div_u64_rem(ap->offset, align, &mod);
- if (mod || ap->length % align) {
- prod = 1;
- } else {
- prod = xfs_extlen_to_rtxlen(mp, align);
- if (prod > 1)
- xfs_rtalloc_align_minmax(&raminlen, &ralen, &prod);
- }
+ if (mod || ap->length % align)
+ *prod = 1;
+ else
+ *prod = xfs_extlen_to_rtxlen(mp, align);
- if (start) {
- error = xfs_rtallocate_extent_near(&args, start, raminlen,
- ralen, &ralen, prod, &rtx);
- } else {
- error = xfs_rtallocate_extent_size(&args, raminlen,
- ralen, &ralen, prod, &rtx);
- }
- xfs_rtbuf_cache_relse(&args);
+ if (*prod > 1)
+ xfs_rtalloc_align_minmax(raminlen, ralen, prod);
+ return 0;
+}
+int
+xfs_bmap_rtalloc(
+ struct xfs_bmalloca *ap)
+{
+ xfs_fileoff_t orig_offset = ap->offset;
+ xfs_rtxlen_t prod = 0; /* product factor for allocators */
+ xfs_rtxlen_t ralen = 0; /* realtime allocation length */
+ xfs_rtblock_t bno_hint = NULLRTBLOCK;
+ xfs_extlen_t orig_length = ap->length;
+ xfs_rtxlen_t raminlen;
+ bool rtlocked = false;
+ bool noalign = false;
+ bool initial_user_data =
+ ap->datatype & XFS_ALLOC_INITIAL_USER_DATA;
+ int error;
+
+retry:
+ error = xfs_rtallocate_align(ap, &ralen, &raminlen, &prod, &noalign);
+ if (error)
+ return error;
+
+ if (xfs_bmap_adjacent(ap))
+ bno_hint = ap->blkno;
+
+ error = xfs_rtallocate(ap->tp, bno_hint, raminlen, ralen, prod,
+ ap->wasdel, initial_user_data, &rtlocked,
+ &ap->blkno, &ap->length);
if (error == -ENOSPC) {
- if (align > mp->m_sb.sb_rextsize) {
+ if (!noalign) {
/*
* We previously enlarged the request length to try to
* satisfy an extent size hint. The allocator didn't
@@ -1480,16 +1444,7 @@ retry:
*/
ap->offset = orig_offset;
ap->length = orig_length;
- minlen = align = mp->m_sb.sb_rextsize;
- goto retry;
- }
-
- if (!ignore_locality && start != 0) {
- /*
- * If we can't allocate near a specific rt extent, try
- * again without locality criteria.
- */
- ignore_locality = true;
+ noalign = true;
goto retry;
}
@@ -1500,11 +1455,6 @@ retry:
if (error)
return error;
- xfs_trans_mod_sb(ap->tp, ap->wasdel ?
- XFS_TRANS_SB_RES_FREXTENTS : XFS_TRANS_SB_FREXTENTS,
- -(long)ralen);
- ap->blkno = xfs_rtx_to_rtb(mp, rtx);
- ap->length = xfs_rtxlen_to_extlen(mp, ralen);
xfs_bmap_alloc_account(ap);
return 0;
}
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 27e9f749c4c7..fbb3a1594c0d 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -311,9 +311,9 @@ xfs_set_inode_alloc(
* the allocator to accommodate the request.
*/
if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
- set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
+ xfs_set_inode32(mp);
else
- clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
+ xfs_clear_inode32(mp);
for (index = 0; index < agcount; index++) {
struct xfs_perag *pag;
@@ -1511,7 +1511,7 @@ xfs_fs_fill_super(
* the newer fsopen/fsconfig API.
*/
if (fc->sb_flags & SB_RDONLY)
- set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
+ xfs_set_readonly(mp);
if (fc->sb_flags & SB_DIRSYNC)
mp->m_features |= XFS_FEAT_DIRSYNC;
if (fc->sb_flags & SB_SYNCHRONOUS)
@@ -1638,16 +1638,28 @@ xfs_fs_fill_super(
goto out_free_sb;
}
- /*
- * Until this is fixed only page-sized or smaller data blocks work.
- */
if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
- xfs_warn(mp,
- "File system with blocksize %d bytes. "
- "Only pagesize (%ld) or less will currently work.",
+ size_t max_folio_size = mapping_max_folio_size_supported();
+
+ if (!xfs_has_crc(mp)) {
+ xfs_warn(mp,
+"V4 Filesystem with blocksize %d bytes. Only pagesize (%ld) or less is supported.",
mp->m_sb.sb_blocksize, PAGE_SIZE);
- error = -ENOSYS;
- goto out_free_sb;
+ error = -ENOSYS;
+ goto out_free_sb;
+ }
+
+ if (mp->m_sb.sb_blocksize > max_folio_size) {
+ xfs_warn(mp,
+"block size (%u bytes) not supported; Only block size (%zu) or less is supported",
+ mp->m_sb.sb_blocksize, max_folio_size);
+ error = -ENOSYS;
+ goto out_free_sb;
+ }
+
+ xfs_warn(mp,
+"EXPERIMENTAL: V5 Filesystem with Large Block Size (%d bytes) enabled.",
+ mp->m_sb.sb_blocksize);
}
/* Ensure this filesystem fits in the page cache limits */
@@ -1820,7 +1832,7 @@ xfs_remount_rw(
return -EINVAL;
}
- clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
+ xfs_clear_readonly(mp);
/*
* If this is the first remount to writeable state we might have some
@@ -1908,7 +1920,7 @@ xfs_remount_ro(
xfs_save_resvblks(mp);
xfs_log_clean(mp);
- set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
+ xfs_set_readonly(mp);
return 0;
}
@@ -2009,8 +2021,7 @@ static int xfs_init_fs_context(
return -ENOMEM;
spin_lock_init(&mp->m_sb_lock);
- INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
- spin_lock_init(&mp->m_perag_lock);
+ xa_init(&mp->m_perags);
mutex_init(&mp->m_growlock);
INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 77f19e2f66e0..4252b07cd251 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -165,7 +165,7 @@ xfs_symlink(
/*
* Allocate an inode for the symlink.
*/
- error = xfs_dialloc(&tp, dp->i_ino, S_IFLNK, &ino);
+ error = xfs_dialloc(&tp, &args, &ino);
if (!error)
error = xfs_icreate(tp, ino, &args, &du.ip);
if (error)
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 180ce697305a..ee9f0b1f548d 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -210,14 +210,14 @@ DEFINE_EVENT(xfs_perag_class, name, \
TP_PROTO(struct xfs_perag *pag, unsigned long caller_ip), \
TP_ARGS(pag, caller_ip))
DEFINE_PERAG_REF_EVENT(xfs_perag_get);
-DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_hold);
DEFINE_PERAG_REF_EVENT(xfs_perag_put);
DEFINE_PERAG_REF_EVENT(xfs_perag_grab);
-DEFINE_PERAG_REF_EVENT(xfs_perag_grab_tag);
+DEFINE_PERAG_REF_EVENT(xfs_perag_grab_next_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_rele);
DEFINE_PERAG_REF_EVENT(xfs_perag_set_inode_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag);
+DEFINE_PERAG_REF_EVENT(xfs_reclaim_inodes_count);
TRACE_EVENT(xfs_inodegc_worker,
TP_PROTO(struct xfs_mount *mp, unsigned int shrinker_hits),
@@ -4926,7 +4926,8 @@ DEFINE_INODE_ERROR_EVENT(xfs_exchrange_error);
{ XFS_EXCHANGE_RANGE_DRY_RUN, "DRY_RUN" }, \
{ XFS_EXCHANGE_RANGE_FILE1_WRITTEN, "F1_WRITTEN" }, \
{ __XFS_EXCHANGE_RANGE_UPD_CMTIME1, "CMTIME1" }, \
- { __XFS_EXCHANGE_RANGE_UPD_CMTIME2, "CMTIME2" }
+ { __XFS_EXCHANGE_RANGE_UPD_CMTIME2, "CMTIME2" }, \
+ { __XFS_EXCHANGE_RANGE_CHECK_FRESH2, "FRESH2" }
/* file exchange-range tracepoint class */
DECLARE_EVENT_CLASS(xfs_exchrange_class,
@@ -4986,6 +4987,60 @@ DEFINE_EXCHRANGE_EVENT(xfs_exchrange_prep);
DEFINE_EXCHRANGE_EVENT(xfs_exchrange_flush);
DEFINE_EXCHRANGE_EVENT(xfs_exchrange_mappings);
+TRACE_EVENT(xfs_exchrange_freshness,
+ TP_PROTO(const struct xfs_exchrange *fxr, struct xfs_inode *ip2),
+ TP_ARGS(fxr, ip2),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ip2_ino)
+ __field(long long, ip2_mtime)
+ __field(long long, ip2_ctime)
+ __field(int, ip2_mtime_nsec)
+ __field(int, ip2_ctime_nsec)
+
+ __field(xfs_ino_t, file2_ino)
+ __field(long long, file2_mtime)
+ __field(long long, file2_ctime)
+ __field(int, file2_mtime_nsec)
+ __field(int, file2_ctime_nsec)
+ ),
+ TP_fast_assign(
+ struct timespec64 ts64;
+ struct inode *inode2 = VFS_I(ip2);
+
+ __entry->dev = inode2->i_sb->s_dev;
+ __entry->ip2_ino = ip2->i_ino;
+
+ ts64 = inode_get_ctime(inode2);
+ __entry->ip2_ctime = ts64.tv_sec;
+ __entry->ip2_ctime_nsec = ts64.tv_nsec;
+
+ ts64 = inode_get_mtime(inode2);
+ __entry->ip2_mtime = ts64.tv_sec;
+ __entry->ip2_mtime_nsec = ts64.tv_nsec;
+
+ __entry->file2_ino = fxr->file2_ino;
+ __entry->file2_mtime = fxr->file2_mtime.tv_sec;
+ __entry->file2_ctime = fxr->file2_ctime.tv_sec;
+ __entry->file2_mtime_nsec = fxr->file2_mtime.tv_nsec;
+ __entry->file2_ctime_nsec = fxr->file2_ctime.tv_nsec;
+ ),
+ TP_printk("dev %d:%d "
+ "ino 0x%llx mtime %lld:%d ctime %lld:%d -> "
+ "file 0x%llx mtime %lld:%d ctime %lld:%d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ip2_ino,
+ __entry->ip2_mtime,
+ __entry->ip2_mtime_nsec,
+ __entry->ip2_ctime,
+ __entry->ip2_ctime_nsec,
+ __entry->file2_ino,
+ __entry->file2_mtime,
+ __entry->file2_mtime_nsec,
+ __entry->file2_ctime,
+ __entry->file2_ctime_nsec)
+);
+
TRACE_EVENT(xfs_exchmaps_overhead,
TP_PROTO(struct xfs_mount *mp, unsigned long long bmbt_blocks,
unsigned long long rmapbt_blocks),
diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
index 3b103715acc9..35166c92420c 100644
--- a/fs/zonefs/file.c
+++ b/fs/zonefs/file.c
@@ -563,7 +563,7 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
if (ret <= 0)
goto inode_unlock;
- ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
+ ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops, NULL);
if (ret == -EIO)
zonefs_io_error(inode, true);
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index d768d9c568cf..2da5f4a6e814 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -67,7 +67,6 @@
* General Purpose Events (GPEs)
* Global Lock
* ACPI PM timer
- * FACS table (Waking vectors and Global Lock)
*/
#ifndef ACPI_REDUCED_HARDWARE
#define ACPI_REDUCED_HARDWARE FALSE
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index b1571dd96310..5e0346142f98 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -193,6 +193,7 @@
*/
#ifndef ACPI_NO_ERROR_MESSAGES
#define AE_INFO _acpi_module_name, __LINE__
+#define ACPI_ONCE(_fn, _plist) { static char _done; if (!_done) { _done = 1; _fn _plist; } }
/*
* Error reporting. Callers module and line number are inserted by AE_INFO,
@@ -201,8 +202,10 @@
*/
#define ACPI_INFO(plist) acpi_info plist
#define ACPI_WARNING(plist) acpi_warning plist
+#define ACPI_WARNING_ONCE(plist) ACPI_ONCE(acpi_warning, plist)
#define ACPI_EXCEPTION(plist) acpi_exception plist
#define ACPI_ERROR(plist) acpi_error plist
+#define ACPI_ERROR_ONCE(plist) ACPI_ONCE(acpi_error, plist)
#define ACPI_BIOS_WARNING(plist) acpi_bios_warning plist
#define ACPI_BIOS_EXCEPTION(plist) acpi_bios_exception plist
#define ACPI_BIOS_ERROR(plist) acpi_bios_error plist
@@ -214,8 +217,10 @@
#define ACPI_INFO(plist)
#define ACPI_WARNING(plist)
+#define ACPI_WARNING_ONCE(plist)
#define ACPI_EXCEPTION(plist)
#define ACPI_ERROR(plist)
+#define ACPI_ERROR_ONCE(plist)
#define ACPI_BIOS_WARNING(plist)
#define ACPI_BIOS_EXCEPTION(plist)
#define ACPI_BIOS_ERROR(plist)
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 8db5bd382915..b2e377b7f337 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -228,10 +228,12 @@ struct acpi_device_dir {
/* Plug and Play */
+#define MAX_ACPI_DEVICE_NAME_LEN 40
+#define MAX_ACPI_CLASS_NAME_LEN 20
typedef char acpi_bus_id[8];
typedef u64 acpi_bus_address;
-typedef char acpi_device_name[40];
-typedef char acpi_device_class[20];
+typedef char acpi_device_name[MAX_ACPI_DEVICE_NAME_LEN];
+typedef char acpi_device_class[MAX_ACPI_CLASS_NAME_LEN];
struct acpi_hardware_id {
struct list_head list;
@@ -255,7 +257,6 @@ struct acpi_device_pnp {
struct list_head ids; /* _HID and _CIDs */
acpi_device_name device_name; /* Driver-determined */
acpi_device_class device_class; /* " */
- union acpi_object *str_obj; /* unicode string for _STR method */
};
#define acpi_device_bid(d) ((d)->pnp.bus_id)
@@ -993,6 +994,8 @@ static inline void acpi_put_acpi_dev(struct acpi_device *adev)
int acpi_wait_for_acpi_ipmi(void);
+int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices);
+u32 arch_acpi_add_auto_dep(acpi_handle handle);
#else /* CONFIG_ACPI */
static inline int register_acpi_bus_type(void *bus) { return 0; }
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 9f1c1d225e32..d076ebd19a61 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20240322
+#define ACPI_CA_VERSION 0x20240827
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -878,10 +878,10 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_leave_sleep_state_prep(u8 sleep_state))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_leave_sleep_state(u8 sleep_state))
-ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_set_firmware_waking_vector
- (acpi_physical_address physical_address,
- acpi_physical_address physical_address64))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_set_firmware_waking_vector
+ (acpi_physical_address physical_address,
+ acpi_physical_address physical_address64))
/*
* ACPI Timer interfaces
*/
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 841ef9f22795..199afc2cd122 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -567,6 +567,10 @@ struct acpi_cedt_cxims {
u64 xormap_list[];
};
+struct acpi_cedt_cxims_target_element {
+ u64 xormap;
+};
+
/* 3: CXL RCEC Downstream Port Association Structure */
struct acpi_cedt_rdpas {
@@ -751,6 +755,7 @@ struct acpi_dbg2_device {
#define ACPI_DBG2_16550_WITH_GAS 0x0012
#define ACPI_DBG2_SDM845_7_372MHZ 0x0013
#define ACPI_DBG2_INTEL_LPSS 0x0014
+#define ACPI_DBG2_RISCV_SBI_CON 0x0015
#define ACPI_DBG2_1394_STANDARD 0x0000
@@ -1791,7 +1796,7 @@ struct acpi_hmat_cache {
u32 reserved1;
u64 cache_size;
u32 cache_attributes;
- u16 reserved2;
+ u16 address_mode;
u16 number_of_SMBIOShandles;
};
@@ -1803,6 +1808,9 @@ struct acpi_hmat_cache {
#define ACPI_HMAT_WRITE_POLICY (0x0000F000)
#define ACPI_HMAT_CACHE_LINE_SIZE (0xFFFF0000)
+#define ACPI_HMAT_CACHE_MODE_UNKNOWN (0)
+#define ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR (1)
+
/* Values for cache associativity flag */
#define ACPI_HMAT_CA_NONE (0)
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index e27958ef8264..d3858eebc255 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1607,7 +1607,7 @@ struct acpi_mpam_msc_node {
u32 max_nrdy_usec;
u64 hardware_id_linked_device;
u32 instance_id_linked_device;
- u32 num_resouce_nodes;
+ u32 num_resource_nodes;
};
struct acpi_table_mpam {
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 8f775e3a08fd..5cd755143b7d 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -92,10 +92,10 @@ struct acpi_table_slit {
/*******************************************************************************
*
* SPCR - Serial Port Console Redirection table
- * Version 2
+ * Version 4
*
* Conforms to "Serial Port Console Redirection Table",
- * Version 1.03, August 10, 2015
+ * Version 1.10, Jan 5, 2023
*
******************************************************************************/
@@ -112,7 +112,7 @@ struct acpi_table_spcr {
u8 stop_bits;
u8 flow_control;
u8 terminal_type;
- u8 reserved1;
+ u8 language;
u16 pci_device_id;
u16 pci_vendor_id;
u8 pci_bus;
@@ -120,7 +120,11 @@ struct acpi_table_spcr {
u8 pci_function;
u32 pci_flags;
u8 pci_segment;
- u32 reserved2;
+ u32 uart_clk_freq;
+ u32 precise_baudrate;
+ u16 name_space_string_length;
+ u16 name_space_string_offset;
+ char name_space_string[];
};
/* Masks for pci_flags field above */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 85c2dcf2b704..80767e8bf3ad 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -1311,6 +1311,7 @@ typedef enum {
#define ACPI_OSI_WIN_10_19H1 0x14
#define ACPI_OSI_WIN_10_20H1 0x15
#define ACPI_OSI_WIN_11 0x16
+#define ACPI_OSI_WIN_11_22H2 0x17
/* Definitions of getopt */
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 930b6afba6f4..76e44e102780 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -64,6 +64,8 @@ struct cpc_desc {
int cpu_id;
int write_cmd_status;
int write_cmd_id;
+ /* Lock used for RMW operations in cpc_write() */
+ spinlock_t rmw_lock;
struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT];
struct acpi_psd_package domain_info;
struct kobject kobj;
@@ -159,34 +161,37 @@ extern int cppc_get_epp_perf(int cpunum, u64 *epp_perf);
extern int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable);
extern int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps);
extern int cppc_set_auto_sel(int cpu, bool enable);
+extern int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf);
+extern int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator);
+extern int amd_detect_prefcore(bool *detected);
#else /* !CONFIG_ACPI_CPPC_LIB */
static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_enable(int cpu, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline bool cppc_perf_ctrs_in_pcc(void)
{
@@ -210,27 +215,39 @@ static inline bool cpc_ffh_supported(void)
}
static inline int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_auto_sel(int cpu, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+static inline int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
+{
+ return -ENODEV;
+}
+static inline int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
+{
+ return -EOPNOTSUPP;
+}
+static inline int amd_detect_prefcore(bool *detected)
+{
+ return -ENODEV;
}
#endif /* !CONFIG_ACPI_CPPC_LIB */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 337ffa931ee8..3f31df09a9d6 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -252,6 +252,12 @@
#define ACPI_RELEASE_GLOBAL_LOCK(Glptr, pending) pending = 0
#endif
+/* NULL/invalid value to use for destroyed or not-yet-created semaphores. */
+
+#ifndef ACPI_SEMAPHORE_NULL
+#define ACPI_SEMAPHORE_NULL NULL
+#endif
+
/* Flush CPU cache - used when going to sleep. Wbinvd or similar. */
#ifndef ACPI_FLUSH_CPU_CACHE
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index 4dbb177d1150..6eea3b3c1e65 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap
- * and arch_unmap to be included in asm-FOO/mmu_context.h for any
- * arch FOO which doesn't need to hook these.
+ * Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap
+ * to be included in asm-FOO/mmu_context.h for any arch FOO which
+ * doesn't need to hook these.
*/
#ifndef _ASM_GENERIC_MM_HOOKS_H
#define _ASM_GENERIC_MM_HOOKS_H
@@ -17,11 +17,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
-static inline void arch_unmap(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
-}
-
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
{
diff --git a/include/asm-generic/mmzone.h b/include/asm-generic/mmzone.h
new file mode 100644
index 000000000000..2ab5193e8394
--- /dev/null
+++ b/include/asm-generic/mmzone.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_MMZONE_H
+#define _ASM_GENERIC_MMZONE_H
+
+#endif
diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
index c32e0cf23c90..e063d6487f66 100644
--- a/include/asm-generic/numa.h
+++ b/include/asm-generic/numa.h
@@ -32,10 +32,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
void __init arch_numa_init(void);
int __init numa_add_memblk(int nodeid, u64 start, u64 end);
-void __init numa_set_distance(int from, int to, int distance);
-void __init numa_free_distance(void);
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
-int __init early_cpu_to_node(int cpu);
+int early_cpu_to_node(int cpu);
void numa_store_cpu_info(unsigned int cpu);
void numa_add_cpu(unsigned int cpu);
void numa_remove_cpu(unsigned int cpu);
@@ -51,4 +49,8 @@ static inline int early_cpu_to_node(int cpu) { return 0; }
#endif /* CONFIG_NUMA */
+#ifdef CONFIG_NUMA_EMU
+void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable);
+#endif
+
#endif /* __ASM_GENERIC_NUMA_H */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index a84c64e5f11e..95acdd70b3b2 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -8,16 +8,7 @@
*/
#include <linux/unaligned/packed_struct.h>
#include <asm/byteorder.h>
-
-#define __get_unaligned_t(type, ptr) ({ \
- const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x; \
-})
-
-#define __put_unaligned_t(type, val, ptr) do { \
- struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x = (val); \
-} while (0)
+#include <vdso/unaligned.h>
#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 1ae44793132a..19ec49a9179b 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -133,6 +133,7 @@
*(__dl_sched_class) \
*(__rt_sched_class) \
*(__fair_sched_class) \
+ *(__ext_sched_class) \
*(__idle_sched_class) \
__sched_class_lowest = .;
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index d2316242a988..be97b97a75dd 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -14,11 +14,10 @@
struct simd_skcipher_alg;
struct skcipher_alg;
-struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
+ const char *algname,
const char *drvname,
const char *basename);
-struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
- const char *basename);
void simd_skcipher_free(struct simd_skcipher_alg *alg);
int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
@@ -32,13 +31,6 @@ void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
struct simd_aead_alg;
struct aead_alg;
-struct simd_aead_alg *simd_aead_create_compat(const char *algname,
- const char *drvname,
- const char *basename);
-struct simd_aead_alg *simd_aead_create(const char *algname,
- const char *basename);
-void simd_aead_free(struct simd_aead_alg *alg);
-
int simd_register_aeads_compat(struct aead_alg *algs, int count,
struct simd_aead_alg **simd_algs);
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 173548c6473a..a6f8b098c56f 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -1543,6 +1543,10 @@ enum drm_dp_phy {
#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
+#define DP_OUI_PHY_REPEATER1 0xf003d /* 1.3 */
+#define DP_OUI_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_OUI_PHY_REPEATER1)
+
#define __DP_FEC1_BASE 0xf0290 /* 1.4 */
#define __DP_FEC2_BASE 0xf0298 /* 1.4 */
#define DP_FEC_BASE(dp_phy) \
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index ea03e1dd26ba..279624833ea9 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -112,6 +112,7 @@ struct drm_dp_vsc_sdp {
* @target_rr: Target Refresh
* @duration_incr_ms: Successive frame duration increase
* @duration_decr_ms: Successive frame duration decrease
+ * @target_rr_divider: Target refresh rate divider
* @mode: Adaptive Sync Operation Mode
*/
struct drm_dp_as_sdp {
@@ -657,6 +658,8 @@ struct drm_dp_desc {
int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
bool is_branch);
+int drm_dp_dump_lttpr_desc(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy);
+
/**
* enum drm_dp_quirk - Display Port sink/branch device specific quirks
*
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index cfe096389d94..f6a1cbb0f600 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -244,18 +244,18 @@ struct drm_dp_mst_branch {
bool link_address_sent;
/* global unique identifier to identify branch devices */
- u8 guid[16];
+ guid_t guid;
};
struct drm_dp_nak_reply {
- u8 guid[16];
+ guid_t guid;
u8 reason;
u8 nak_data;
};
struct drm_dp_link_address_ack_reply {
- u8 guid[16];
+ guid_t guid;
u8 nports;
struct drm_dp_link_addr_reply_port {
bool input_port;
@@ -265,7 +265,7 @@ struct drm_dp_link_address_ack_reply {
bool ddps;
bool legacy_device_plug_status;
u8 dpcd_revision;
- u8 peer_guid[16];
+ guid_t peer_guid;
u8 num_sdp_streams;
u8 num_sdp_stream_sinks;
} ports[16];
@@ -348,7 +348,7 @@ struct drm_dp_allocate_payload_ack_reply {
};
struct drm_dp_connection_status_notify {
- u8 guid[16];
+ guid_t guid;
u8 port_number;
bool legacy_device_plug_status;
bool displayport_device_plug_status;
@@ -425,7 +425,7 @@ struct drm_dp_query_payload {
struct drm_dp_resource_status_notify {
u8 port_number;
- u8 guid[16];
+ guid_t guid;
u16 available_pbn;
};
@@ -885,6 +885,8 @@ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_topology_mgr *mgr);
+void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr);
+
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int __must_check
drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h
index f4d3784b1dce..038ccb02f9a3 100644
--- a/include/drm/drm_accel.h
+++ b/include/drm/drm_accel.h
@@ -28,7 +28,8 @@
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek, \
- .mmap = drm_gem_mmap
+ .mmap = drm_gem_mmap, \
+ .fop_flags = FOP_UNSIGNED_OFFSET
/**
* DEFINE_DRM_ACCEL_FOPS() - macro to generate file operations for accelerators drivers
@@ -51,11 +52,10 @@
#if IS_ENABLED(CONFIG_DRM_ACCEL)
+extern struct xarray accel_minors_xa;
+
void accel_core_exit(void);
int accel_core_init(void);
-void accel_minor_remove(int index);
-int accel_minor_alloc(void);
-void accel_minor_replace(struct drm_minor *minor, int index);
void accel_set_device_instance_params(struct device *kdev, int index);
int accel_open(struct inode *inode, struct file *filp);
void accel_debugfs_init(struct drm_device *dev);
@@ -73,19 +73,6 @@ static inline int __init accel_core_init(void)
return 0;
}
-static inline void accel_minor_remove(int index)
-{
-}
-
-static inline int accel_minor_alloc(void)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void accel_minor_replace(struct drm_minor *minor, int index)
-{
-}
-
static inline void accel_set_device_instance_params(struct device *kdev, int index)
{
}
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 4d7f4c5f2001..31ca88deb10d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -460,7 +460,7 @@ struct drm_atomic_state {
*
* Used for signaling unbound planes/connectors.
* When a connector or plane is not bound to any CRTC, it's still important
- * to preserve linearity to prevent the atomic states from being freed to early.
+ * to preserve linearity to prevent the atomic states from being freed too early.
*
* This commit (if set) is not bound to any CRTC, but will be completed when
* drm_atomic_helper_commit_hw_done() is called.
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index c754651044d4..e3fa43291f44 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -471,14 +471,6 @@ enum drm_privacy_screen_status {
*
* DP definitions come from the DP v2.0 spec
* HDMI definitions come from the CTA-861-H spec
- *
- * A note on YCC and RGB variants:
- *
- * Since userspace is not aware of the encoding on the wire
- * (RGB or YCbCr), drivers are free to pick the appropriate
- * variant, regardless of what userspace selects. E.g., if
- * BT2020_RGB is selected by userspace a driver will pick
- * BT2020_YCC if the encoding on the wire is YUV444 or YUV420.
*
* @DRM_MODE_COLORIMETRY_DEFAULT:
* Driver specific behavior.
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 63767cf24371..c91f87b5242d 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -213,8 +213,9 @@ struct drm_device {
* This can be set to true it the hardware has a working vblank counter
* with high-precision timestamping (otherwise there are races) and the
* driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off()
- * appropriately. See also @max_vblank_count and
- * &drm_crtc_funcs.get_vblank_counter.
+ * appropriately. Also, see @max_vblank_count,
+ * &drm_crtc_funcs.get_vblank_counter and
+ * &drm_vblank_crtc_config.disable_immediate.
*/
bool vblank_disable_immediate;
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index cd37936c3926..02ea4e3248fd 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -229,34 +229,6 @@ struct drm_driver {
void (*postclose) (struct drm_device *, struct drm_file *);
/**
- * @lastclose:
- *
- * Called when the last &struct drm_file has been closed and there's
- * currently no userspace client for the &struct drm_device.
- *
- * Modern drivers should only use this to force-restore the fbdev
- * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked().
- * Anything else would indicate there's something seriously wrong.
- * Modern drivers can also use this to execute delayed power switching
- * state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
- * infrastructure.
- *
- * This is called after @postclose hook has been called.
- *
- * NOTE:
- *
- * All legacy drivers use this callback to de-initialize the hardware.
- * This is purely because of the shadow-attach model, where the DRM
- * kernel driver does not really own the hardware. Instead ownershipe is
- * handled with the help of userspace through an inheritedly racy dance
- * to set/unset the VT into raw mode.
- *
- * Legacy drivers initialize the hardware in the @firstopen callback,
- * which isn't even called for modern drivers.
- */
- void (*lastclose) (struct drm_device *);
-
- /**
* @unload:
*
* Reverse the effects of the driver load callback. Ideally,
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 6bdfa254a1c1..eaac5e665892 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -440,8 +440,6 @@ int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
int drm_edid_header_is_valid(const void *edid);
-bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
- bool *edid_corrupt);
bool drm_edid_is_valid(struct edid *edid);
void drm_edid_get_monitor_name(const struct edid *edid, char *name,
int buflen);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 375737fd6c36..699f2790b9ac 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -271,9 +271,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
-
void drm_fb_helper_lastclose(struct drm_device *dev);
-void drm_fb_helper_output_poll_changed(struct drm_device *dev);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
@@ -401,10 +399,6 @@ static inline int drm_fb_helper_debug_leave(struct fb_info *info)
static inline void drm_fb_helper_lastclose(struct drm_device *dev)
{
}
-
-static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
-{
-}
#endif
#endif
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index ab230d3af138..8c0030c77308 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -45,6 +45,8 @@ struct drm_printer;
struct device;
struct file;
+extern struct xarray drm_minors_xa;
+
/*
* FIXME: Not sure we want to have drm_minor here in the end, but to avoid
* header include loops we need it here for now.
@@ -434,6 +436,9 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv)
void drm_file_update_pid(struct drm_file *);
+struct drm_minor *drm_minor_acquire(struct xarray *minors_xa, unsigned int minor_id);
+void drm_minor_release(struct drm_minor *minor);
+
int drm_open(struct inode *inode, struct file *filp);
int drm_open_helper(struct file *filp, struct drm_minor *minor);
ssize_t drm_read(struct file *filp, char __user *buffer,
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index ef8bc8d72039..1922188f00e8 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -25,8 +25,9 @@
#ifndef DRM_FIXED_H
#define DRM_FIXED_H
-#include <linux/kernel.h>
#include <linux/math64.h>
+#include <linux/types.h>
+#include <linux/wordpart.h>
typedef union dfixed {
u32 full;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index bae4865b2101..d8b86df2ec0d 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -447,7 +447,8 @@ struct drm_gem_object {
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek,\
- .mmap = drm_gem_mmap
+ .mmap = drm_gem_mmap, \
+ .fop_flags = FOP_UNSIGNED_OFFSET
/**
* DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers
diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h
index a827bde494f6..f2678e7ecb98 100644
--- a/include/drm/drm_gem_dma_helper.h
+++ b/include/drm/drm_gem_dma_helper.h
@@ -267,6 +267,7 @@ unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
.read = drm_read,\
.llseek = noop_llseek,\
.mmap = drm_gem_mmap,\
+ .fop_flags = FOP_UNSIGNED_OFFSET, \
DRM_GEM_DMA_UNMAPPED_AREA_FOPS \
}
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 0f520eeeaa8e..f725f8654611 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -365,6 +365,18 @@ void mipi_dsi_dcs_set_display_off_multi(struct mipi_dsi_multi_context *ctx);
void mipi_dsi_dcs_set_display_on_multi(struct mipi_dsi_multi_context *ctx);
void mipi_dsi_dcs_set_tear_on_multi(struct mipi_dsi_multi_context *ctx,
enum mipi_dsi_dcs_tear_mode mode);
+void mipi_dsi_turn_on_peripheral_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_soft_reset_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_set_display_brightness_multi(struct mipi_dsi_multi_context *ctx,
+ u16 brightness);
+void mipi_dsi_dcs_set_pixel_format_multi(struct mipi_dsi_multi_context *ctx,
+ u8 format);
+void mipi_dsi_dcs_set_column_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end);
+void mipi_dsi_dcs_set_page_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end);
+void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx,
+ u16 scanline);
/**
* mipi_dsi_generic_write_seq - transmit data using a generic write packet
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index ab0f167474b1..271765e2e9f2 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -98,22 +98,6 @@ struct drm_mode_config_funcs {
const struct drm_format_info *(*get_format_info)(const struct drm_mode_fb_cmd2 *mode_cmd);
/**
- * @output_poll_changed:
- *
- * Callback used by helpers to inform the driver of output configuration
- * changes.
- *
- * Drivers implementing fbdev emulation use drm_kms_helper_hotplug_event()
- * to call this hook to inform the fbdev helper of output changes.
- *
- * This hook is deprecated, drivers should instead implement fbdev
- * support with struct drm_client, which takes care of any necessary
- * hotplug event forwarding already without further involvement by
- * the driver.
- */
- void (*output_poll_changed)(struct drm_device *dev);
-
- /**
* @mode_valid:
*
* Device specific validation of display modes. Can be used to reject
diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h
index 73bb3f3d9ed9..54085d5d05c3 100644
--- a/include/drm/drm_panic.h
+++ b/include/drm/drm_panic.h
@@ -1,4 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
+
+/*
+ * Copyright (c) 2024 Intel
+ * Copyright (c) 2024 Red Hat
+ */
+
#ifndef __DRM_PANIC_H__
#define __DRM_PANIC_H__
@@ -8,9 +14,6 @@
#include <drm/drm_device.h>
#include <drm/drm_fourcc.h>
-/*
- * Copyright (c) 2024 Intel
- */
/**
* struct drm_scanout_buffer - DRM scanout buffer
@@ -146,16 +149,4 @@ struct drm_scanout_buffer {
#define drm_panic_unlock(dev, flags) \
raw_spin_unlock_irqrestore(&(dev)->mode_config.panic_lock, flags)
-#ifdef CONFIG_DRM_PANIC
-
-void drm_panic_register(struct drm_device *dev);
-void drm_panic_unregister(struct drm_device *dev);
-
-#else
-
-static inline void drm_panic_register(struct drm_device *dev) {}
-static inline void drm_panic_unregister(struct drm_device *dev) {}
-
-#endif
-
#endif /* __DRM_PANIC_H__ */
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 2a1d01e5b56b..fa085c44d4ca 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -69,6 +69,9 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags);
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 5d9dff5149c9..d2676831d765 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -221,7 +221,8 @@ drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
/**
* struct drm_print_iterator - local struct used with drm_printer_coredump
- * @data: Pointer to the devcoredump output buffer
+ * @data: Pointer to the devcoredump output buffer, can be NULL if using
+ * drm_printer_coredump to determine size of devcoredump
* @start: The offset within the buffer to start writing
* @remain: The number of bytes to write for this iteration
*/
@@ -266,6 +267,57 @@ struct drm_print_iterator {
* coredump_read, ...)
* }
*
+ * The above example has a time complexity of O(N^2), where N is the size of the
+ * devcoredump. This is acceptable for small devcoredumps but scales poorly for
+ * larger ones.
+ *
+ * Another use case for drm_coredump_printer is to capture the devcoredump into
+ * a saved buffer before the dev_coredump() callback. This involves two passes:
+ * one to determine the size of the devcoredump and another to print it to a
+ * buffer. Then, in dev_coredump(), copy from the saved buffer into the
+ * devcoredump read buffer.
+ *
+ * For example::
+ *
+ * char *devcoredump_saved_buffer;
+ *
+ * ssize_t __coredump_print(char *buffer, ssize_t count, ...)
+ * {
+ * struct drm_print_iterator iter;
+ * struct drm_printer p;
+ *
+ * iter.data = buffer;
+ * iter.start = 0;
+ * iter.remain = count;
+ *
+ * p = drm_coredump_printer(&iter);
+ *
+ * drm_printf(p, "foo=%d\n", foo);
+ * ...
+ * return count - iter.remain;
+ * }
+ *
+ * void coredump_print(...)
+ * {
+ * ssize_t count;
+ *
+ * count = __coredump_print(NULL, INT_MAX, ...);
+ * devcoredump_saved_buffer = kvmalloc(count, GFP_KERNEL);
+ * __coredump_print(devcoredump_saved_buffer, count, ...);
+ * }
+ *
+ * void coredump_read(char *buffer, loff_t offset, size_t count,
+ * void *data, size_t datalen)
+ * {
+ * ...
+ * memcpy(buffer, devcoredump_saved_buffer + offset, count);
+ * ...
+ * }
+ *
+ * The above example has a time complexity of O(N*2), where N is the size of the
+ * devcoredump. This scales better than the previous example for larger
+ * devcoredumps.
+ *
* RETURNS:
* The &drm_printer object
*/
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index 73fcb899a01d..46f09cf68458 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -238,6 +238,21 @@ static inline void drm_rect_fp_to_int(struct drm_rect *dst,
drm_rect_height(src) >> 16);
}
+/**
+ * drm_rect_overlap - Check if two rectangles overlap
+ * @a: first rectangle
+ * @b: second rectangle
+ *
+ * RETURNS:
+ * %true if the rectangles overlap, %false otherwise.
+ */
+static inline bool drm_rect_overlap(const struct drm_rect *a,
+ const struct drm_rect *b)
+{
+ return (a->x2 > b->x1 && b->x2 > a->x1 &&
+ a->y2 > b->y1 && b->y2 > a->y1);
+}
+
bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip);
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index c8f829b4307c..151ab1e85b1b 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -79,6 +79,31 @@ struct drm_pending_vblank_event {
};
/**
+ * struct drm_vblank_crtc_config - vblank configuration for a CRTC
+ */
+struct drm_vblank_crtc_config {
+ /**
+ * @offdelay_ms: Vblank off delay in ms, used to determine how long
+ * &drm_vblank_crtc.disable_timer waits before disabling.
+ *
+ * Defaults to the value of drm_vblank_offdelay in drm_crtc_vblank_on().
+ */
+ int offdelay_ms;
+
+ /**
+ * @disable_immediate: See &drm_device.vblank_disable_immediate
+ * for the exact semantics of immediate vblank disabling.
+ *
+ * Additionally, this tracks the disable immediate value per crtc, just
+ * in case it needs to differ from the default value for a given device.
+ *
+ * Defaults to the value of &drm_device.vblank_disable_immediate in
+ * drm_crtc_vblank_on().
+ */
+ bool disable_immediate;
+};
+
+/**
* struct drm_vblank_crtc - vblank tracking for a CRTC
*
* This structure tracks the vblank state for one CRTC.
@@ -99,8 +124,8 @@ struct drm_vblank_crtc {
wait_queue_head_t queue;
/**
* @disable_timer: Disable timer for the delayed vblank disabling
- * hysteresis logic. Vblank disabling is controlled through the
- * drm_vblank_offdelay module option and the setting of the
+ * hysteresis logic. Vblank disabling is controlled through
+ * &drm_vblank_crtc_config.offdelay_ms and the setting of the
* &drm_device.max_vblank_count value.
*/
struct timer_list disable_timer;
@@ -199,6 +224,12 @@ struct drm_vblank_crtc {
struct drm_display_mode hwmode;
/**
+ * @config: Stores vblank configuration values for a given CRTC.
+ * Also, see drm_crtc_vblank_on_config().
+ */
+ struct drm_vblank_crtc_config config;
+
+ /**
* @enabled: Tracks the enabling state of the corresponding &drm_crtc to
* avoid double-disabling and hence corrupting saved state. Needed by
* drivers not using atomic KMS, since those might go through their CRTC
@@ -247,6 +278,8 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+void drm_crtc_vblank_on_config(struct drm_crtc *crtc,
+ const struct drm_vblank_crtc_config *config);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
void drm_crtc_vblank_restore(struct drm_crtc *crtc);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 5acc64954a88..fe8edb917360 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -579,7 +579,7 @@ bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
-void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
+void drm_sched_start(struct drm_gpu_scheduler *sched);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);
void drm_sched_reset_karma(struct drm_sched_job *bad);
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 6ccf96c91f3a..7b56d1ca36d7 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -190,6 +190,41 @@ struct ttm_operation_ctx {
uint64_t bytes_moved;
};
+struct ttm_lru_walk;
+
+/** struct ttm_lru_walk_ops - Operations for a LRU walk. */
+struct ttm_lru_walk_ops {
+ /**
+ * process_bo - Process this bo.
+ * @walk: struct ttm_lru_walk describing the walk.
+ * @bo: A locked and referenced buffer object.
+ *
+ * Return: Negative error code on error, User-defined positive value
+ * (typically, but not always, size of the processed bo) on success.
+ * On success, the returned values are summed by the walk and the
+ * walk exits when its target is met.
+ * 0 also indicates success, -EBUSY means this bo was skipped.
+ */
+ s64 (*process_bo)(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo);
+};
+
+/**
+ * struct ttm_lru_walk - Structure describing a LRU walk.
+ */
+struct ttm_lru_walk {
+ /** @ops: Pointer to the ops structure. */
+ const struct ttm_lru_walk_ops *ops;
+ /** @ctx: Pointer to the struct ttm_operation_ctx. */
+ struct ttm_operation_ctx *ctx;
+ /** @ticket: The struct ww_acquire_ctx if any. */
+ struct ww_acquire_ctx *ticket;
+ /** @trylock_only: Only use trylock for locking. */
+ bool trylock_only;
+};
+
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target);
+
/**
* ttm_bo_get - reference a struct ttm_buffer_object
*
@@ -378,15 +413,14 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
-int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- gfp_t gfp_flags);
+s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ struct ttm_resource_manager *man, gfp_t gfp_flags,
+ s64 target);
void ttm_bo_pin(struct ttm_buffer_object *bo);
void ttm_bo_unpin(struct ttm_buffer_object *bo);
-int ttm_mem_evict_first(struct ttm_device *bdev,
- struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket);
+int ttm_bo_evict_first(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx);
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf);
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 69769355139f..be034be56ba1 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -49,6 +49,43 @@ struct io_mapping;
struct sg_table;
struct scatterlist;
+/**
+ * enum ttm_lru_item_type - enumerate ttm_lru_item subclasses
+ */
+enum ttm_lru_item_type {
+ /** @TTM_LRU_RESOURCE: The resource subclass */
+ TTM_LRU_RESOURCE,
+ /** @TTM_LRU_HITCH: The iterator hitch subclass */
+ TTM_LRU_HITCH
+};
+
+/**
+ * struct ttm_lru_item - The TTM lru list node base class
+ * @link: The list link
+ * @type: The subclass type
+ */
+struct ttm_lru_item {
+ struct list_head link;
+ enum ttm_lru_item_type type;
+};
+
+/**
+ * ttm_lru_item_init() - initialize a struct ttm_lru_item
+ * @item: The item to initialize
+ * @type: The subclass type
+ */
+static inline void ttm_lru_item_init(struct ttm_lru_item *item,
+ enum ttm_lru_item_type type)
+{
+ item->type = type;
+ INIT_LIST_HEAD(&item->link);
+}
+
+static inline bool ttm_lru_item_is_res(const struct ttm_lru_item *item)
+{
+ return item->type == TTM_LRU_RESOURCE;
+}
+
struct ttm_resource_manager_func {
/**
* struct ttm_resource_manager_func member alloc
@@ -217,19 +254,20 @@ struct ttm_resource {
/**
* @lru: Least recently used list, see &ttm_resource_manager.lru
*/
- struct list_head lru;
+ struct ttm_lru_item lru;
};
/**
- * struct ttm_resource_cursor
+ * ttm_lru_item_to_res() - Downcast a struct ttm_lru_item to a struct ttm_resource
+ * @item: The struct ttm_lru_item to downcast
*
- * @priority: the current priority
- *
- * Cursor to iterate over the resources in a manager.
+ * Return: Pointer to the embedding struct ttm_resource
*/
-struct ttm_resource_cursor {
- unsigned int priority;
-};
+static inline struct ttm_resource *
+ttm_lru_item_to_res(struct ttm_lru_item *item)
+{
+ return container_of(item, struct ttm_resource, lru);
+}
/**
* struct ttm_lru_bulk_move_pos
@@ -246,8 +284,9 @@ struct ttm_lru_bulk_move_pos {
/**
* struct ttm_lru_bulk_move
- *
* @pos: first/last lru entry for resources in the each domain/priority
+ * @cursor_list: The list of cursors currently traversing any of
+ * the sublists of @pos. Protected by the ttm device's lru_lock.
*
* Container for the current bulk move state. Should be used with
* ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
@@ -257,9 +296,38 @@ struct ttm_lru_bulk_move_pos {
*/
struct ttm_lru_bulk_move {
struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
+ struct list_head cursor_list;
};
/**
+ * struct ttm_resource_cursor
+ * @man: The resource manager currently being iterated over
+ * @hitch: A hitch list node inserted before the next resource
+ * to iterate over.
+ * @bulk_link: A list link for the list of cursors traversing the
+ * bulk sublist of @bulk. Protected by the ttm device's lru_lock.
+ * @bulk: Pointer to struct ttm_lru_bulk_move whose subrange @hitch is
+ * inserted to. NULL if none. Never dereference this pointer since
+ * the struct ttm_lru_bulk_move object pointed to might have been
+ * freed. The pointer is only for comparison.
+ * @mem_type: The memory type of the LRU list being traversed.
+ * This field is valid iff @bulk != NULL.
+ * @priority: the current priority
+ *
+ * Cursor to iterate over the resources in a manager.
+ */
+struct ttm_resource_cursor {
+ struct ttm_resource_manager *man;
+ struct ttm_lru_item hitch;
+ struct list_head bulk_link;
+ struct ttm_lru_bulk_move *bulk;
+ unsigned int mem_type;
+ unsigned int priority;
+};
+
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor);
+
+/**
* struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
* struct sg_table backed struct ttm_resource.
* @base: Embedded struct ttm_kmap_iter providing the usage interface.
@@ -347,6 +415,8 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
+void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
+ struct ttm_lru_bulk_move *bulk);
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo);
@@ -389,9 +459,10 @@ struct ttm_resource *
ttm_resource_manager_first(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor);
struct ttm_resource *
-ttm_resource_manager_next(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor,
- struct ttm_resource *res);
+ttm_resource_manager_next(struct ttm_resource_cursor *cursor);
+
+struct ttm_resource *
+ttm_lru_first_res_or_null(struct list_head *head);
/**
* ttm_resource_manager_for_each_res - iterate over all resources
@@ -403,7 +474,7 @@ ttm_resource_manager_next(struct ttm_resource_manager *man,
*/
#define ttm_resource_manager_for_each_res(man, cursor, res) \
for (res = ttm_resource_manager_first(man, cursor); res; \
- res = ttm_resource_manager_next(man, cursor, res))
+ res = ttm_resource_manager_next(cursor))
struct ttm_kmap_iter *
ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
index d6c9e9472121..8332f8d82f96 100644
--- a/include/dt-bindings/arm/qcom,ids.h
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -234,11 +234,13 @@
#define QCOM_ID_SA8540P 461
#define QCOM_ID_QCM4290 469
#define QCOM_ID_QCS4290 470
+#define QCOM_ID_SM7325 475
#define QCOM_ID_SM8450_2 480
#define QCOM_ID_SM8450_3 482
#define QCOM_ID_SC7280 487
#define QCOM_ID_SC7180P 495
#define QCOM_ID_QCM6490 497
+#define QCOM_ID_SM7325P 499
#define QCOM_ID_IPQ5000 503
#define QCOM_ID_IPQ0509 504
#define QCOM_ID_IPQ0518 505
@@ -274,6 +276,8 @@
#define QCOM_ID_QCM8550 604
#define QCOM_ID_IPQ5300 624
#define QCOM_ID_IPQ5321 650
+#define QCOM_ID_QCS8300 674
+#define QCOM_ID_QCS8275 675
/*
* The board type and revision information, used by Qualcomm bootloaders and
diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h
index 3e3972a814c1..6ede88c3992d 100644
--- a/include/dt-bindings/clock/at91.h
+++ b/include/dt-bindings/clock/at91.h
@@ -38,6 +38,10 @@
#define PMC_CPU (PMC_MAIN + 9)
#define PMC_MCK1 (PMC_MAIN + 10)
+/* SAM9X7 */
+#define PMC_PLLADIV2 (PMC_MAIN + 11)
+#define PMC_LVDSPLL (PMC_MAIN + 12)
+
#ifndef AT91_PMC_MOSCS
#define AT91_PMC_MOSCS 0 /* MOSCS Flag */
#define AT91_PMC_LOCKA 1 /* PLLA Lock */
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
index 08c82c22fa5f..607f23b83fa7 100644
--- a/include/dt-bindings/clock/axg-audio-clkc.h
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -155,5 +155,12 @@
#define AUD_CLKID_SYSCLK_B_DIV 175
#define AUD_CLKID_SYSCLK_A_EN 176
#define AUD_CLKID_SYSCLK_B_EN 177
+#define AUD_CLKID_EARCRX 178
+#define AUD_CLKID_EARCRX_CMDC_SEL 179
+#define AUD_CLKID_EARCRX_CMDC_DIV 180
+#define AUD_CLKID_EARCRX_CMDC 181
+#define AUD_CLKID_EARCRX_DMAC_SEL 182
+#define AUD_CLKID_EARCRX_DMAC_DIV 183
+#define AUD_CLKID_EARCRX_DMAC 184
#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/exynos7885.h b/include/dt-bindings/clock/exynos7885.h
index 255e3aa94323..cfede84b46b9 100644
--- a/include/dt-bindings/clock/exynos7885.h
+++ b/include/dt-bindings/clock/exynos7885.h
@@ -69,6 +69,8 @@
#define CLK_GOUT_FSYS_MMC_EMBD 58
#define CLK_GOUT_FSYS_MMC_SDIO 59
#define CLK_GOUT_FSYS_USB30DRD 60
+#define CLK_MOUT_SHARED0_PLL 61
+#define CLK_MOUT_SHARED1_PLL 62
/* CMU_CORE */
#define CLK_MOUT_CORE_BUS_USER 1
@@ -132,16 +134,24 @@
#define CLK_GOUT_WDT1_PCLK 43
/* CMU_FSYS */
-#define CLK_MOUT_FSYS_BUS_USER 1
-#define CLK_MOUT_FSYS_MMC_CARD_USER 2
-#define CLK_MOUT_FSYS_MMC_EMBD_USER 3
-#define CLK_MOUT_FSYS_MMC_SDIO_USER 4
-#define CLK_MOUT_FSYS_USB30DRD_USER 4
-#define CLK_GOUT_MMC_CARD_ACLK 5
-#define CLK_GOUT_MMC_CARD_SDCLKIN 6
-#define CLK_GOUT_MMC_EMBD_ACLK 7
-#define CLK_GOUT_MMC_EMBD_SDCLKIN 8
-#define CLK_GOUT_MMC_SDIO_ACLK 9
-#define CLK_GOUT_MMC_SDIO_SDCLKIN 10
+#define CLK_MOUT_FSYS_BUS_USER 1
+#define CLK_MOUT_FSYS_MMC_CARD_USER 2
+#define CLK_MOUT_FSYS_MMC_EMBD_USER 3
+#define CLK_MOUT_FSYS_MMC_SDIO_USER 4
+#define CLK_GOUT_MMC_CARD_ACLK 5
+#define CLK_GOUT_MMC_CARD_SDCLKIN 6
+#define CLK_GOUT_MMC_EMBD_ACLK 7
+#define CLK_GOUT_MMC_EMBD_SDCLKIN 8
+#define CLK_GOUT_MMC_SDIO_ACLK 9
+#define CLK_GOUT_MMC_SDIO_SDCLKIN 10
+#define CLK_MOUT_FSYS_USB30DRD_USER 11
+#define CLK_MOUT_USB_PLL 12
+#define CLK_FOUT_USB_PLL 13
+#define CLK_FSYS_USB20PHY_CLKCORE 14
+#define CLK_FSYS_USB30DRD_ACLK_20PHYCTRL 15
+#define CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_0 16
+#define CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_1 17
+#define CLK_FSYS_USB30DRD_BUS_CLK_EARLY 18
+#define CLK_FSYS_USB30DRD_REF_CLK 19
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_7885_H */
diff --git a/include/dt-bindings/clock/exynos850.h b/include/dt-bindings/clock/exynos850.h
index 7666241520f8..80dacda57229 100644
--- a/include/dt-bindings/clock/exynos850.h
+++ b/include/dt-bindings/clock/exynos850.h
@@ -358,6 +358,7 @@
#define CLK_GOUT_UART_PCLK 32
#define CLK_GOUT_WDT0_PCLK 33
#define CLK_GOUT_WDT1_PCLK 34
+#define CLK_GOUT_BUSIF_TMU_PCLK 35
/* CMU_CORE */
#define CLK_MOUT_CORE_BUS_USER 1
diff --git a/include/dt-bindings/clock/nxp,imx95-clock.h b/include/dt-bindings/clock/nxp,imx95-clock.h
index 782662c3e740..b7a713a9ac8c 100644
--- a/include/dt-bindings/clock/nxp,imx95-clock.h
+++ b/include/dt-bindings/clock/nxp,imx95-clock.h
@@ -25,4 +25,7 @@
#define IMX95_CLK_DISPMIX_ENG0_SEL 0
#define IMX95_CLK_DISPMIX_ENG1_SEL 1
+#define IMX95_CLK_NETCMIX_ENETC0_RMII 0
+#define IMX95_CLK_NETCMIX_ENETC1_RMII 1
+
#endif /* __DT_BINDINGS_CLOCK_IMX95_H */
diff --git a/include/dt-bindings/clock/px30-cru.h b/include/dt-bindings/clock/px30-cru.h
index 5b1416fcde6f..a2abf1995c34 100644
--- a/include/dt-bindings/clock/px30-cru.h
+++ b/include/dt-bindings/clock/px30-cru.h
@@ -175,8 +175,6 @@
#define PCLK_CIF 352
#define PCLK_OTP_PHY 353
-#define CLK_NR_CLKS (PCLK_OTP_PHY + 1)
-
/* pmu-clocks indices */
#define PLL_GPLL 1
@@ -195,8 +193,6 @@
#define PCLK_GPIO0_PMU 20
#define PCLK_UART0_PMU 21
-#define CLKPMU_NR_CLKS (PCLK_UART0_PMU + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index b5456a64d421..5b0dde080900 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -193,10 +193,15 @@
#define GCC_MMSS_GPLL0_DIV_CLK 184
#define GCC_GPU_GPLL0_DIV_CLK 185
#define GCC_GPU_GPLL0_CLK 186
+#define HLOS1_VOTE_LPASS_CORE_SMMU_CLK 187
+#define HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 188
+#define GCC_MSS_Q6_BIMC_AXI_CLK 189
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
#define USB_30_GDSC 2
+#define LPASS_ADSP_GDSC 3
+#define LPASS_CORE_GDSC 4
#define GCC_BLSP1_QUP1_BCR 0
#define GCC_BLSP1_QUP2_BCR 1
diff --git a/include/dt-bindings/clock/qcom,gcc-sc8180x.h b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
index 90c6e021a035..e364006aa6ea 100644
--- a/include/dt-bindings/clock/qcom,gcc-sc8180x.h
+++ b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
@@ -248,6 +248,7 @@
#define GCC_USB3_SEC_CLKREF_CLK 238
#define GCC_UFS_MEM_CLKREF_EN 239
#define GCC_UFS_CARD_CLKREF_EN 240
+#define GPLL9 241
#define GCC_EMAC_BCR 0
#define GCC_GPU_BCR 1
@@ -294,6 +295,10 @@
#define GCC_VIDEO_AXI0_CLK_BCR 42
#define GCC_VIDEO_AXI1_CLK_BCR 43
#define GCC_USB3_DP_PHY_SEC_BCR 44
+#define GCC_USB3_UNIPHY_MP0_BCR 45
+#define GCC_USB3_UNIPHY_MP1_BCR 46
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 47
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 48
/* GCC GDSCRs */
#define EMAC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,sm4450-camcc.h b/include/dt-bindings/clock/qcom,sm4450-camcc.h
new file mode 100644
index 000000000000..bf077951bf1c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-camcc.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM4450_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_CLK 2
+#define CAM_CC_BPS_CLK_SRC 3
+#define CAM_CC_CAMNOC_ATB_CLK 4
+#define CAM_CC_CAMNOC_AXI_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 6
+#define CAM_CC_CAMNOC_AXI_HF_CLK 7
+#define CAM_CC_CAMNOC_AXI_SF_CLK 8
+#define CAM_CC_CCI_0_CLK 9
+#define CAM_CC_CCI_0_CLK_SRC 10
+#define CAM_CC_CCI_1_CLK 11
+#define CAM_CC_CCI_1_CLK_SRC 12
+#define CAM_CC_CORE_AHB_CLK 13
+#define CAM_CC_CPAS_AHB_CLK 14
+#define CAM_CC_CPHY_RX_CLK_SRC 15
+#define CAM_CC_CRE_AHB_CLK 16
+#define CAM_CC_CRE_CLK 17
+#define CAM_CC_CRE_CLK_SRC 18
+#define CAM_CC_CSI0PHYTIMER_CLK 19
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 20
+#define CAM_CC_CSI1PHYTIMER_CLK 21
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 22
+#define CAM_CC_CSI2PHYTIMER_CLK 23
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 24
+#define CAM_CC_CSIPHY0_CLK 25
+#define CAM_CC_CSIPHY1_CLK 26
+#define CAM_CC_CSIPHY2_CLK 27
+#define CAM_CC_FAST_AHB_CLK_SRC 28
+#define CAM_CC_ICP_ATB_CLK 29
+#define CAM_CC_ICP_CLK 30
+#define CAM_CC_ICP_CLK_SRC 31
+#define CAM_CC_ICP_CTI_CLK 32
+#define CAM_CC_ICP_TS_CLK 33
+#define CAM_CC_MCLK0_CLK 34
+#define CAM_CC_MCLK0_CLK_SRC 35
+#define CAM_CC_MCLK1_CLK 36
+#define CAM_CC_MCLK1_CLK_SRC 37
+#define CAM_CC_MCLK2_CLK 38
+#define CAM_CC_MCLK2_CLK_SRC 39
+#define CAM_CC_MCLK3_CLK 40
+#define CAM_CC_MCLK3_CLK_SRC 41
+#define CAM_CC_OPE_0_AHB_CLK 42
+#define CAM_CC_OPE_0_AREG_CLK 43
+#define CAM_CC_OPE_0_CLK 44
+#define CAM_CC_OPE_0_CLK_SRC 45
+#define CAM_CC_PLL0 46
+#define CAM_CC_PLL0_OUT_EVEN 47
+#define CAM_CC_PLL0_OUT_ODD 48
+#define CAM_CC_PLL1 49
+#define CAM_CC_PLL1_OUT_EVEN 50
+#define CAM_CC_PLL2 51
+#define CAM_CC_PLL2_OUT_EVEN 52
+#define CAM_CC_PLL3 53
+#define CAM_CC_PLL3_OUT_EVEN 54
+#define CAM_CC_PLL4 55
+#define CAM_CC_PLL4_OUT_EVEN 56
+#define CAM_CC_SLOW_AHB_CLK_SRC 57
+#define CAM_CC_SOC_AHB_CLK 58
+#define CAM_CC_SYS_TMR_CLK 59
+#define CAM_CC_TFE_0_AHB_CLK 60
+#define CAM_CC_TFE_0_CLK 61
+#define CAM_CC_TFE_0_CLK_SRC 62
+#define CAM_CC_TFE_0_CPHY_RX_CLK 63
+#define CAM_CC_TFE_0_CSID_CLK 64
+#define CAM_CC_TFE_0_CSID_CLK_SRC 65
+#define CAM_CC_TFE_1_AHB_CLK 66
+#define CAM_CC_TFE_1_CLK 67
+#define CAM_CC_TFE_1_CLK_SRC 68
+#define CAM_CC_TFE_1_CPHY_RX_CLK 69
+#define CAM_CC_TFE_1_CSID_CLK 70
+#define CAM_CC_TFE_1_CSID_CLK_SRC 71
+
+/* CAM_CC power domains */
+#define CAM_CC_CAMSS_TOP_GDSC 0
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CAMSS_TOP_BCR 2
+#define CAM_CC_CCI_0_BCR 3
+#define CAM_CC_CCI_1_BCR 4
+#define CAM_CC_CPAS_BCR 5
+#define CAM_CC_CRE_BCR 6
+#define CAM_CC_CSI0PHY_BCR 7
+#define CAM_CC_CSI1PHY_BCR 8
+#define CAM_CC_CSI2PHY_BCR 9
+#define CAM_CC_ICP_BCR 10
+#define CAM_CC_MCLK0_BCR 11
+#define CAM_CC_MCLK1_BCR 12
+#define CAM_CC_MCLK2_BCR 13
+#define CAM_CC_MCLK3_BCR 14
+#define CAM_CC_OPE_0_BCR 15
+#define CAM_CC_TFE_0_BCR 16
+#define CAM_CC_TFE_1_BCR 17
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm4450-dispcc.h b/include/dt-bindings/clock/qcom,sm4450-dispcc.h
new file mode 100644
index 000000000000..ca6f2ef90157
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-dispcc.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM4450_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB1_CLK 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_ESC0_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK_SRC 8
+#define DISP_CC_MDSS_MDP1_CLK 9
+#define DISP_CC_MDSS_MDP_CLK 10
+#define DISP_CC_MDSS_MDP_CLK_SRC 11
+#define DISP_CC_MDSS_MDP_LUT1_CLK 12
+#define DISP_CC_MDSS_MDP_LUT_CLK 13
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 14
+#define DISP_CC_MDSS_PCLK0_CLK 15
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 16
+#define DISP_CC_MDSS_ROT1_CLK 17
+#define DISP_CC_MDSS_ROT_CLK 18
+#define DISP_CC_MDSS_ROT_CLK_SRC 19
+#define DISP_CC_MDSS_RSCC_AHB_CLK 20
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 21
+#define DISP_CC_MDSS_VSYNC1_CLK 22
+#define DISP_CC_MDSS_VSYNC_CLK 23
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 24
+#define DISP_CC_PLL0 25
+#define DISP_CC_PLL1 26
+#define DISP_CC_SLEEP_CLK 27
+#define DISP_CC_SLEEP_CLK_SRC 28
+#define DISP_CC_XO_CLK 29
+#define DISP_CC_XO_CLK_SRC 30
+
+/* DISP_CC power domains */
+#define DISP_CC_MDSS_CORE_GDSC 0
+#define DISP_CC_MDSS_CORE_INT2_GDSC 1
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm4450-gpucc.h b/include/dt-bindings/clock/qcom,sm4450-gpucc.h
new file mode 100644
index 000000000000..304f83e5f645
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-gpucc.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM4450_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CB_CLK 1
+#define GPU_CC_CRC_AHB_CLK 2
+#define GPU_CC_CX_FF_CLK 3
+#define GPU_CC_CX_GFX3D_CLK 4
+#define GPU_CC_CX_GFX3D_SLV_CLK 5
+#define GPU_CC_CX_GMU_CLK 6
+#define GPU_CC_CX_SNOC_DVM_CLK 7
+#define GPU_CC_CXO_AON_CLK 8
+#define GPU_CC_CXO_CLK 9
+#define GPU_CC_DEMET_CLK 10
+#define GPU_CC_DEMET_DIV_CLK_SRC 11
+#define GPU_CC_FF_CLK_SRC 12
+#define GPU_CC_FREQ_MEASURE_CLK 13
+#define GPU_CC_GMU_CLK_SRC 14
+#define GPU_CC_GX_CXO_CLK 15
+#define GPU_CC_GX_FF_CLK 16
+#define GPU_CC_GX_GFX3D_CLK 17
+#define GPU_CC_GX_GFX3D_CLK_SRC 18
+#define GPU_CC_GX_GFX3D_RDVM_CLK 19
+#define GPU_CC_GX_GMU_CLK 20
+#define GPU_CC_GX_VSENSE_CLK 21
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 22
+#define GPU_CC_HUB_AON_CLK 23
+#define GPU_CC_HUB_CLK_SRC 24
+#define GPU_CC_HUB_CX_INT_CLK 25
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 26
+#define GPU_CC_MEMNOC_GFX_CLK 27
+#define GPU_CC_MND1X_0_GFX3D_CLK 28
+#define GPU_CC_PLL0 29
+#define GPU_CC_PLL1 30
+#define GPU_CC_SLEEP_CLK 31
+#define GPU_CC_XO_CLK_SRC 32
+#define GPU_CC_XO_DIV_CLK_SRC 33
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+/* GPU_CC resets */
+#define GPU_CC_ACD_BCR 0
+#define GPU_CC_CB_BCR 1
+#define GPU_CC_CX_BCR 2
+#define GPU_CC_FAST_HUB_BCR 3
+#define GPU_CC_FF_BCR 4
+#define GPU_CC_GFX3D_AON_BCR 5
+#define GPU_CC_GMU_BCR 6
+#define GPU_CC_GX_BCR 7
+#define GPU_CC_XO_BCR 8
+#define GPU_CC_GX_ACD_IROOT_BCR 9
+#define GPU_CC_RBCPR_BCR 10
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8150-camcc.h b/include/dt-bindings/clock/qcom,sm8150-camcc.h
new file mode 100644
index 000000000000..5444035efa93
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8150-camcc.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8150_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8150_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL0 0
+#define CAM_CC_PLL0_OUT_EVEN 1
+#define CAM_CC_PLL0_OUT_ODD 2
+#define CAM_CC_PLL1 3
+#define CAM_CC_PLL1_OUT_EVEN 4
+#define CAM_CC_PLL2 5
+#define CAM_CC_PLL2_OUT_MAIN 6
+#define CAM_CC_PLL3 7
+#define CAM_CC_PLL3_OUT_EVEN 8
+#define CAM_CC_PLL4 9
+#define CAM_CC_PLL4_OUT_EVEN 10
+#define CAM_CC_BPS_AHB_CLK 11
+#define CAM_CC_BPS_AREG_CLK 12
+#define CAM_CC_BPS_AXI_CLK 13
+#define CAM_CC_BPS_CLK 14
+#define CAM_CC_BPS_CLK_SRC 15
+#define CAM_CC_CAMNOC_AXI_CLK 16
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 17
+#define CAM_CC_CAMNOC_DCD_XO_CLK 18
+#define CAM_CC_CCI_0_CLK 19
+#define CAM_CC_CCI_0_CLK_SRC 20
+#define CAM_CC_CCI_1_CLK 21
+#define CAM_CC_CCI_1_CLK_SRC 22
+#define CAM_CC_CORE_AHB_CLK 23
+#define CAM_CC_CPAS_AHB_CLK 24
+#define CAM_CC_CPHY_RX_CLK_SRC 25
+#define CAM_CC_CSI0PHYTIMER_CLK 26
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 27
+#define CAM_CC_CSI1PHYTIMER_CLK 28
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 29
+#define CAM_CC_CSI2PHYTIMER_CLK 30
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 31
+#define CAM_CC_CSI3PHYTIMER_CLK 32
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 33
+#define CAM_CC_CSIPHY0_CLK 34
+#define CAM_CC_CSIPHY1_CLK 35
+#define CAM_CC_CSIPHY2_CLK 36
+#define CAM_CC_CSIPHY3_CLK 37
+#define CAM_CC_FAST_AHB_CLK_SRC 38
+#define CAM_CC_FD_CORE_CLK 39
+#define CAM_CC_FD_CORE_CLK_SRC 40
+#define CAM_CC_FD_CORE_UAR_CLK 41
+#define CAM_CC_GDSC_CLK 42
+#define CAM_CC_ICP_AHB_CLK 43
+#define CAM_CC_ICP_CLK 44
+#define CAM_CC_ICP_CLK_SRC 45
+#define CAM_CC_IFE_0_AXI_CLK 46
+#define CAM_CC_IFE_0_CLK 47
+#define CAM_CC_IFE_0_CLK_SRC 48
+#define CAM_CC_IFE_0_CPHY_RX_CLK 49
+#define CAM_CC_IFE_0_CSID_CLK 50
+#define CAM_CC_IFE_0_CSID_CLK_SRC 51
+#define CAM_CC_IFE_0_DSP_CLK 52
+#define CAM_CC_IFE_1_AXI_CLK 53
+#define CAM_CC_IFE_1_CLK 54
+#define CAM_CC_IFE_1_CLK_SRC 55
+#define CAM_CC_IFE_1_CPHY_RX_CLK 56
+#define CAM_CC_IFE_1_CSID_CLK 57
+#define CAM_CC_IFE_1_CSID_CLK_SRC 58
+#define CAM_CC_IFE_1_DSP_CLK 59
+#define CAM_CC_IFE_LITE_0_CLK 60
+#define CAM_CC_IFE_LITE_0_CLK_SRC 61
+#define CAM_CC_IFE_LITE_0_CPHY_RX_CLK 62
+#define CAM_CC_IFE_LITE_0_CSID_CLK 63
+#define CAM_CC_IFE_LITE_0_CSID_CLK_SRC 64
+#define CAM_CC_IFE_LITE_1_CLK 65
+#define CAM_CC_IFE_LITE_1_CLK_SRC 66
+#define CAM_CC_IFE_LITE_1_CPHY_RX_CLK 67
+#define CAM_CC_IFE_LITE_1_CSID_CLK 68
+#define CAM_CC_IFE_LITE_1_CSID_CLK_SRC 69
+#define CAM_CC_IPE_0_AHB_CLK 70
+#define CAM_CC_IPE_0_AREG_CLK 71
+#define CAM_CC_IPE_0_AXI_CLK 72
+#define CAM_CC_IPE_0_CLK 73
+#define CAM_CC_IPE_0_CLK_SRC 74
+#define CAM_CC_IPE_1_AHB_CLK 75
+#define CAM_CC_IPE_1_AREG_CLK 76
+#define CAM_CC_IPE_1_AXI_CLK 77
+#define CAM_CC_IPE_1_CLK 78
+#define CAM_CC_JPEG_CLK 79
+#define CAM_CC_JPEG_CLK_SRC 80
+#define CAM_CC_LRME_CLK 81
+#define CAM_CC_LRME_CLK_SRC 82
+#define CAM_CC_MCLK0_CLK 83
+#define CAM_CC_MCLK0_CLK_SRC 84
+#define CAM_CC_MCLK1_CLK 85
+#define CAM_CC_MCLK1_CLK_SRC 86
+#define CAM_CC_MCLK2_CLK 87
+#define CAM_CC_MCLK2_CLK_SRC 88
+#define CAM_CC_MCLK3_CLK 89
+#define CAM_CC_MCLK3_CLK_SRC 90
+#define CAM_CC_SLOW_AHB_CLK_SRC 91
+
+/* CAM_CC power domains */
+#define TITAN_TOP_GDSC 0
+#define BPS_GDSC 1
+#define IFE_0_GDSC 2
+#define IFE_1_GDSC 3
+#define IPE_0_GDSC 4
+#define IPE_1_GDSC 5
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CCI_BCR 2
+#define CAM_CC_CPAS_BCR 3
+#define CAM_CC_CSI0PHY_BCR 4
+#define CAM_CC_CSI1PHY_BCR 5
+#define CAM_CC_CSI2PHY_BCR 6
+#define CAM_CC_CSI3PHY_BCR 7
+#define CAM_CC_FD_BCR 8
+#define CAM_CC_ICP_BCR 9
+#define CAM_CC_IFE_0_BCR 10
+#define CAM_CC_IFE_1_BCR 11
+#define CAM_CC_IFE_LITE_0_BCR 12
+#define CAM_CC_IFE_LITE_1_BCR 13
+#define CAM_CC_IPE_0_BCR 14
+#define CAM_CC_IPE_1_BCR 15
+#define CAM_CC_JPEG_BCR 16
+#define CAM_CC_LRME_BCR 17
+#define CAM_CC_MCLK0_BCR 18
+#define CAM_CC_MCLK1_BCR 19
+#define CAM_CC_MCLK2_BCR 20
+#define CAM_CC_MCLK3_BCR 21
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-dispcc.h b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
index b0a668b395a5..c0a291188f28 100644..120000
--- a/include/dt-bindings/clock/qcom,sm8650-dispcc.h
+++ b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
@@ -1,102 +1 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-/*
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
- * Copyright (c) 2023, Linaro Ltd.
- */
-
-#ifndef _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H
-#define _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H
-
-/* DISP_CC clocks */
-#define DISP_CC_MDSS_ACCU_CLK 0
-#define DISP_CC_MDSS_AHB1_CLK 1
-#define DISP_CC_MDSS_AHB_CLK 2
-#define DISP_CC_MDSS_AHB_CLK_SRC 3
-#define DISP_CC_MDSS_BYTE0_CLK 4
-#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
-#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
-#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
-#define DISP_CC_MDSS_BYTE1_CLK 8
-#define DISP_CC_MDSS_BYTE1_CLK_SRC 9
-#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 10
-#define DISP_CC_MDSS_BYTE1_INTF_CLK 11
-#define DISP_CC_MDSS_DPTX0_AUX_CLK 12
-#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 13
-#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 14
-#define DISP_CC_MDSS_DPTX0_LINK_CLK 15
-#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 16
-#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 17
-#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 18
-#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 19
-#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 20
-#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 21
-#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 22
-#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 23
-#define DISP_CC_MDSS_DPTX1_AUX_CLK 24
-#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 25
-#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 26
-#define DISP_CC_MDSS_DPTX1_LINK_CLK 27
-#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 28
-#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 29
-#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 30
-#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 31
-#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 32
-#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 33
-#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 34
-#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 35
-#define DISP_CC_MDSS_DPTX2_AUX_CLK 36
-#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 37
-#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 38
-#define DISP_CC_MDSS_DPTX2_LINK_CLK 39
-#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 40
-#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 41
-#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 42
-#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 43
-#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 44
-#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 45
-#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 46
-#define DISP_CC_MDSS_DPTX3_AUX_CLK 47
-#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 48
-#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 49
-#define DISP_CC_MDSS_DPTX3_LINK_CLK 50
-#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 51
-#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 52
-#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 53
-#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 54
-#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 55
-#define DISP_CC_MDSS_ESC0_CLK 56
-#define DISP_CC_MDSS_ESC0_CLK_SRC 57
-#define DISP_CC_MDSS_ESC1_CLK 58
-#define DISP_CC_MDSS_ESC1_CLK_SRC 59
-#define DISP_CC_MDSS_MDP1_CLK 60
-#define DISP_CC_MDSS_MDP_CLK 61
-#define DISP_CC_MDSS_MDP_CLK_SRC 62
-#define DISP_CC_MDSS_MDP_LUT1_CLK 63
-#define DISP_CC_MDSS_MDP_LUT_CLK 64
-#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 65
-#define DISP_CC_MDSS_PCLK0_CLK 66
-#define DISP_CC_MDSS_PCLK0_CLK_SRC 67
-#define DISP_CC_MDSS_PCLK1_CLK 68
-#define DISP_CC_MDSS_PCLK1_CLK_SRC 69
-#define DISP_CC_MDSS_RSCC_AHB_CLK 70
-#define DISP_CC_MDSS_RSCC_VSYNC_CLK 71
-#define DISP_CC_MDSS_VSYNC1_CLK 72
-#define DISP_CC_MDSS_VSYNC_CLK 73
-#define DISP_CC_MDSS_VSYNC_CLK_SRC 74
-#define DISP_CC_PLL0 75
-#define DISP_CC_PLL1 76
-#define DISP_CC_SLEEP_CLK 77
-#define DISP_CC_SLEEP_CLK_SRC 78
-#define DISP_CC_XO_CLK 79
-#define DISP_CC_XO_CLK_SRC 80
-
-/* DISP_CC resets */
-#define DISP_CC_MDSS_CORE_BCR 0
-#define DISP_CC_MDSS_CORE_INT2_BCR 1
-#define DISP_CC_MDSS_RSCC_BCR 2
-
-/* DISP_CC GDSCR */
-#define MDSS_GDSC 0
-#define MDSS_INT2_GDSC 1
-
-#endif
+qcom,sm8550-dispcc.h \ No newline at end of file
diff --git a/include/dt-bindings/clock/renesas,r9a09g057-cpg.h b/include/dt-bindings/clock/renesas,r9a09g057-cpg.h
new file mode 100644
index 000000000000..541e6d719bd6
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g057-cpg.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G057_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G057_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* Core Clock list */
+#define R9A09G057_SYS_0_PCLK 0
+#define R9A09G057_CA55_0_CORE_CLK0 1
+#define R9A09G057_CA55_0_CORE_CLK1 2
+#define R9A09G057_CA55_0_CORE_CLK2 3
+#define R9A09G057_CA55_0_CORE_CLK3 4
+#define R9A09G057_CA55_0_PERIPHCLK 5
+#define R9A09G057_CM33_CLK0 6
+#define R9A09G057_CST_0_SWCLKTCK 7
+#define R9A09G057_IOTOP_0_SHCLK 8
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G057_CPG_H__ */
diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h
index a96a9870ad59..99cc617e1e54 100644
--- a/include/dt-bindings/clock/rk3036-cru.h
+++ b/include/dt-bindings/clock/rk3036-cru.h
@@ -94,8 +94,6 @@
#define HCLK_CPU 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0 0
#define SRST_CORE1 1
diff --git a/include/dt-bindings/clock/rk3228-cru.h b/include/dt-bindings/clock/rk3228-cru.h
index de550ea56eeb..138b6ce514dd 100644
--- a/include/dt-bindings/clock/rk3228-cru.h
+++ b/include/dt-bindings/clock/rk3228-cru.h
@@ -146,8 +146,6 @@
#define HCLK_S_CRYPTO 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
index 33819acbfc56..c6034b01b050 100644
--- a/include/dt-bindings/clock/rk3288-cru.h
+++ b/include/dt-bindings/clock/rk3288-cru.h
@@ -195,8 +195,6 @@
#define HCLK_CPU 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0 0
#define SRST_CORE1 1
diff --git a/include/dt-bindings/clock/rk3308-cru.h b/include/dt-bindings/clock/rk3308-cru.h
index d97840f9ee2e..ce4cd72b9d3d 100644
--- a/include/dt-bindings/clock/rk3308-cru.h
+++ b/include/dt-bindings/clock/rk3308-cru.h
@@ -212,8 +212,6 @@
#define PCLK_CAN 233
#define PCLK_OWIRE 234
-#define CLK_NR_CLKS (PCLK_OWIRE + 1)
-
/* soft-reset indices */
/* cru_softrst_con0 */
diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h
index 555b4ff660ae..8885a2e98c65 100644
--- a/include/dt-bindings/clock/rk3328-cru.h
+++ b/include/dt-bindings/clock/rk3328-cru.h
@@ -201,8 +201,6 @@
#define HCLK_RGA 340
#define HCLK_HDCP 341
-#define CLK_NR_CLKS (HCLK_HDCP + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h
index 83c72a163fd3..ebae3cbf8192 100644
--- a/include/dt-bindings/clock/rk3368-cru.h
+++ b/include/dt-bindings/clock/rk3368-cru.h
@@ -182,8 +182,6 @@
#define HCLK_BUS 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE_B0 0
#define SRST_CORE_B1 1
diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h
index 39169d94a44e..4c90c7703a83 100644
--- a/include/dt-bindings/clock/rk3399-cru.h
+++ b/include/dt-bindings/clock/rk3399-cru.h
@@ -335,8 +335,6 @@
#define HCLK_SDIO_NOC 495
#define HCLK_SDIOAUDIO_NOC 496
-#define CLK_NR_CLKS (HCLK_SDIOAUDIO_NOC + 1)
-
/* pmu-clocks indices */
#define PLL_PPLL 1
@@ -378,8 +376,6 @@
#define PCLK_INTR_ARB_PMU 49
#define HCLK_NOC_PMU 50
-#define CLKPMU_NR_CLKS (HCLK_NOC_PMU + 1)
-
/* soft-reset indices */
/* cru_softrst_con0 */
diff --git a/include/dt-bindings/clock/rockchip,rk3576-cru.h b/include/dt-bindings/clock/rockchip,rk3576-cru.h
new file mode 100644
index 000000000000..25aed298ac2c
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk3576-cru.h
@@ -0,0 +1,592 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2023 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3576_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3576_H
+
+/* cru-clocks indices */
+
+/* cru plls */
+#define PLL_BPLL 0
+#define PLL_LPLL 1
+#define PLL_VPLL 2
+#define PLL_AUPLL 3
+#define PLL_CPLL 4
+#define PLL_GPLL 5
+#define PLL_PPLL 6
+#define ARMCLK_L 7
+#define ARMCLK_B 8
+
+/* cru clocks */
+#define CLK_CPLL_DIV20 9
+#define CLK_CPLL_DIV10 10
+#define CLK_GPLL_DIV8 11
+#define CLK_GPLL_DIV6 12
+#define CLK_CPLL_DIV4 13
+#define CLK_GPLL_DIV4 14
+#define CLK_SPLL_DIV2 15
+#define CLK_GPLL_DIV3 16
+#define CLK_CPLL_DIV2 17
+#define CLK_GPLL_DIV2 18
+#define CLK_SPLL_DIV1 19
+#define PCLK_TOP_ROOT 20
+#define ACLK_TOP 21
+#define HCLK_TOP 22
+#define CLK_AUDIO_FRAC_0 23
+#define CLK_AUDIO_FRAC_1 24
+#define CLK_AUDIO_FRAC_2 25
+#define CLK_AUDIO_FRAC_3 26
+#define CLK_UART_FRAC_0 27
+#define CLK_UART_FRAC_1 28
+#define CLK_UART_FRAC_2 29
+#define CLK_UART1_SRC_TOP 30
+#define CLK_AUDIO_INT_0 31
+#define CLK_AUDIO_INT_1 32
+#define CLK_AUDIO_INT_2 33
+#define CLK_PDM0_SRC_TOP 34
+#define CLK_PDM1_OUT 35
+#define CLK_GMAC0_125M_SRC 36
+#define CLK_GMAC1_125M_SRC 37
+#define LCLK_ASRC_SRC_0 38
+#define LCLK_ASRC_SRC_1 39
+#define REF_CLK0_OUT_PLL 40
+#define REF_CLK1_OUT_PLL 41
+#define REF_CLK2_OUT_PLL 42
+#define REFCLKO25M_GMAC0_OUT 43
+#define REFCLKO25M_GMAC1_OUT 44
+#define CLK_CIFOUT_OUT 45
+#define CLK_GMAC0_RMII_CRU 46
+#define CLK_GMAC1_RMII_CRU 47
+#define CLK_OTPC_AUTO_RD_G 48
+#define CLK_OTP_PHY_G 49
+#define CLK_MIPI_CAMERAOUT_M0 50
+#define CLK_MIPI_CAMERAOUT_M1 51
+#define CLK_MIPI_CAMERAOUT_M2 52
+#define MCLK_PDM0_SRC_TOP 53
+#define HCLK_AUDIO_ROOT 54
+#define HCLK_ASRC_2CH_0 55
+#define HCLK_ASRC_2CH_1 56
+#define HCLK_ASRC_4CH_0 57
+#define HCLK_ASRC_4CH_1 58
+#define CLK_ASRC_2CH_0 59
+#define CLK_ASRC_2CH_1 60
+#define CLK_ASRC_4CH_0 61
+#define CLK_ASRC_4CH_1 62
+#define MCLK_SAI0_8CH_SRC 63
+#define MCLK_SAI0_8CH 64
+#define HCLK_SAI0_8CH 65
+#define HCLK_SPDIF_RX0 66
+#define MCLK_SPDIF_RX0 67
+#define HCLK_SPDIF_RX1 68
+#define MCLK_SPDIF_RX1 69
+#define MCLK_SAI1_8CH_SRC 70
+#define MCLK_SAI1_8CH 71
+#define HCLK_SAI1_8CH 72
+#define MCLK_SAI2_2CH_SRC 73
+#define MCLK_SAI2_2CH 74
+#define HCLK_SAI2_2CH 75
+#define MCLK_SAI3_2CH_SRC 76
+#define MCLK_SAI3_2CH 77
+#define HCLK_SAI3_2CH 78
+#define MCLK_SAI4_2CH_SRC 79
+#define MCLK_SAI4_2CH 80
+#define HCLK_SAI4_2CH 81
+#define HCLK_ACDCDIG_DSM 82
+#define MCLK_ACDCDIG_DSM 83
+#define CLK_PDM1 84
+#define HCLK_PDM1 85
+#define MCLK_PDM1 86
+#define HCLK_SPDIF_TX0 87
+#define MCLK_SPDIF_TX0 88
+#define HCLK_SPDIF_TX1 89
+#define MCLK_SPDIF_TX1 90
+#define CLK_SAI1_MCLKOUT 91
+#define CLK_SAI2_MCLKOUT 92
+#define CLK_SAI3_MCLKOUT 93
+#define CLK_SAI4_MCLKOUT 94
+#define CLK_SAI0_MCLKOUT 95
+#define HCLK_BUS_ROOT 96
+#define PCLK_BUS_ROOT 97
+#define ACLK_BUS_ROOT 98
+#define HCLK_CAN0 99
+#define CLK_CAN0 100
+#define HCLK_CAN1 101
+#define CLK_CAN1 102
+#define CLK_KEY_SHIFT 103
+#define PCLK_I2C1 104
+#define PCLK_I2C2 105
+#define PCLK_I2C3 106
+#define PCLK_I2C4 107
+#define PCLK_I2C5 108
+#define PCLK_I2C6 109
+#define PCLK_I2C7 110
+#define PCLK_I2C8 111
+#define PCLK_I2C9 112
+#define PCLK_WDT_BUSMCU 113
+#define TCLK_WDT_BUSMCU 114
+#define ACLK_GIC 115
+#define CLK_I2C1 116
+#define CLK_I2C2 117
+#define CLK_I2C3 118
+#define CLK_I2C4 119
+#define CLK_I2C5 120
+#define CLK_I2C6 121
+#define CLK_I2C7 122
+#define CLK_I2C8 123
+#define CLK_I2C9 124
+#define PCLK_SARADC 125
+#define CLK_SARADC 126
+#define PCLK_TSADC 127
+#define CLK_TSADC 128
+#define PCLK_UART0 129
+#define PCLK_UART2 130
+#define PCLK_UART3 131
+#define PCLK_UART4 132
+#define PCLK_UART5 133
+#define PCLK_UART6 134
+#define PCLK_UART7 135
+#define PCLK_UART8 136
+#define PCLK_UART9 137
+#define PCLK_UART10 138
+#define PCLK_UART11 139
+#define SCLK_UART0 140
+#define SCLK_UART2 141
+#define SCLK_UART3 142
+#define SCLK_UART4 143
+#define SCLK_UART5 144
+#define SCLK_UART6 145
+#define SCLK_UART7 146
+#define SCLK_UART8 147
+#define SCLK_UART9 148
+#define SCLK_UART10 149
+#define SCLK_UART11 150
+#define PCLK_SPI0 151
+#define PCLK_SPI1 152
+#define PCLK_SPI2 153
+#define PCLK_SPI3 154
+#define PCLK_SPI4 155
+#define CLK_SPI0 156
+#define CLK_SPI1 157
+#define CLK_SPI2 158
+#define CLK_SPI3 159
+#define CLK_SPI4 160
+#define PCLK_WDT0 161
+#define TCLK_WDT0 162
+#define PCLK_PWM1 163
+#define CLK_PWM1 164
+#define CLK_OSC_PWM1 165
+#define CLK_RC_PWM1 166
+#define PCLK_BUSTIMER0 167
+#define PCLK_BUSTIMER1 168
+#define CLK_TIMER0_ROOT 169
+#define CLK_TIMER0 170
+#define CLK_TIMER1 171
+#define CLK_TIMER2 172
+#define CLK_TIMER3 173
+#define CLK_TIMER4 174
+#define CLK_TIMER5 175
+#define PCLK_MAILBOX0 176
+#define PCLK_GPIO1 177
+#define DBCLK_GPIO1 178
+#define PCLK_GPIO2 179
+#define DBCLK_GPIO2 180
+#define PCLK_GPIO3 181
+#define DBCLK_GPIO3 182
+#define PCLK_GPIO4 183
+#define DBCLK_GPIO4 184
+#define ACLK_DECOM 185
+#define PCLK_DECOM 186
+#define DCLK_DECOM 187
+#define CLK_TIMER1_ROOT 188
+#define CLK_TIMER6 189
+#define CLK_TIMER7 190
+#define CLK_TIMER8 191
+#define CLK_TIMER9 192
+#define CLK_TIMER10 193
+#define CLK_TIMER11 194
+#define ACLK_DMAC0 195
+#define ACLK_DMAC1 196
+#define ACLK_DMAC2 197
+#define ACLK_SPINLOCK 198
+#define HCLK_I3C0 199
+#define HCLK_I3C1 200
+#define HCLK_BUS_CM0_ROOT 201
+#define FCLK_BUS_CM0_CORE 202
+#define CLK_BUS_CM0_RTC 203
+#define PCLK_PMU2 204
+#define PCLK_PWM2 205
+#define CLK_PWM2 206
+#define CLK_RC_PWM2 207
+#define CLK_OSC_PWM2 208
+#define CLK_FREQ_PWM1 209
+#define CLK_COUNTER_PWM1 210
+#define SAI_SCLKIN_FREQ 211
+#define SAI_SCLKIN_COUNTER 212
+#define CLK_I3C0 213
+#define CLK_I3C1 214
+#define PCLK_CSIDPHY1 215
+#define PCLK_DDR_ROOT 216
+#define PCLK_DDR_MON_CH0 217
+#define TMCLK_DDR_MON_CH0 218
+#define ACLK_DDR_ROOT 219
+#define HCLK_DDR_ROOT 220
+#define FCLK_DDR_CM0_CORE 221
+#define CLK_DDR_TIMER_ROOT 222
+#define CLK_DDR_TIMER0 223
+#define CLK_DDR_TIMER1 224
+#define TCLK_WDT_DDR 225
+#define PCLK_WDT 226
+#define PCLK_TIMER 227
+#define CLK_DDR_CM0_RTC 228
+#define ACLK_RKNN0 229
+#define ACLK_RKNN1 230
+#define HCLK_RKNN_ROOT 231
+#define CLK_RKNN_DSU0 232
+#define PCLK_NPUTOP_ROOT 233
+#define PCLK_NPU_TIMER 234
+#define CLK_NPUTIMER_ROOT 235
+#define CLK_NPUTIMER0 236
+#define CLK_NPUTIMER1 237
+#define PCLK_NPU_WDT 238
+#define TCLK_NPU_WDT 239
+#define ACLK_RKNN_CBUF 240
+#define HCLK_NPU_CM0_ROOT 241
+#define FCLK_NPU_CM0_CORE 242
+#define CLK_NPU_CM0_RTC 243
+#define HCLK_RKNN_CBUF 244
+#define HCLK_NVM_ROOT 245
+#define ACLK_NVM_ROOT 246
+#define SCLK_FSPI_X2 247
+#define HCLK_FSPI 248
+#define CCLK_SRC_EMMC 249
+#define HCLK_EMMC 250
+#define ACLK_EMMC 251
+#define BCLK_EMMC 252
+#define TCLK_EMMC 253
+#define PCLK_PHP_ROOT 254
+#define ACLK_PHP_ROOT 255
+#define PCLK_PCIE0 256
+#define CLK_PCIE0_AUX 257
+#define ACLK_PCIE0_MST 258
+#define ACLK_PCIE0_SLV 259
+#define ACLK_PCIE0_DBI 260
+#define ACLK_USB3OTG1 261
+#define CLK_REF_USB3OTG1 262
+#define CLK_SUSPEND_USB3OTG1 263
+#define ACLK_MMU0 264
+#define ACLK_SLV_MMU0 265
+#define ACLK_MMU1 266
+#define ACLK_SLV_MMU1 267
+#define PCLK_PCIE1 268
+#define CLK_PCIE1_AUX 269
+#define ACLK_PCIE1_MST 270
+#define ACLK_PCIE1_SLV 271
+#define ACLK_PCIE1_DBI 272
+#define CLK_RXOOB0 273
+#define CLK_RXOOB1 274
+#define CLK_PMALIVE0 275
+#define CLK_PMALIVE1 276
+#define ACLK_SATA0 277
+#define ACLK_SATA1 278
+#define CLK_USB3OTG1_PIPE_PCLK 279
+#define CLK_USB3OTG1_UTMI 280
+#define CLK_USB3OTG0_PIPE_PCLK 281
+#define CLK_USB3OTG0_UTMI 282
+#define HCLK_SDGMAC_ROOT 283
+#define ACLK_SDGMAC_ROOT 284
+#define PCLK_SDGMAC_ROOT 285
+#define ACLK_GMAC0 286
+#define ACLK_GMAC1 287
+#define PCLK_GMAC0 288
+#define PCLK_GMAC1 289
+#define CCLK_SRC_SDIO 290
+#define HCLK_SDIO 291
+#define CLK_GMAC1_PTP_REF 292
+#define CLK_GMAC0_PTP_REF 293
+#define CLK_GMAC1_PTP_REF_SRC 294
+#define CLK_GMAC0_PTP_REF_SRC 295
+#define CCLK_SRC_SDMMC0 296
+#define HCLK_SDMMC0 297
+#define SCLK_FSPI1_X2 298
+#define HCLK_FSPI1 299
+#define ACLK_DSMC_ROOT 300
+#define ACLK_DSMC 301
+#define PCLK_DSMC 302
+#define CLK_DSMC_SYS 303
+#define HCLK_HSGPIO 304
+#define CLK_HSGPIO_TX 305
+#define CLK_HSGPIO_RX 306
+#define ACLK_HSGPIO 307
+#define PCLK_PHPPHY_ROOT 308
+#define PCLK_PCIE2_COMBOPHY0 309
+#define PCLK_PCIE2_COMBOPHY1 310
+#define CLK_PCIE_100M_SRC 311
+#define CLK_PCIE_100M_NDUTY_SRC 312
+#define CLK_REF_PCIE0_PHY 313
+#define CLK_REF_PCIE1_PHY 314
+#define CLK_REF_MPHY_26M 315
+#define HCLK_RKVDEC_ROOT 316
+#define ACLK_RKVDEC_ROOT 317
+#define HCLK_RKVDEC 318
+#define CLK_RKVDEC_HEVC_CA 319
+#define CLK_RKVDEC_CORE 320
+#define ACLK_UFS_ROOT 321
+#define ACLK_USB_ROOT 322
+#define PCLK_USB_ROOT 323
+#define ACLK_USB3OTG0 324
+#define CLK_REF_USB3OTG0 325
+#define CLK_SUSPEND_USB3OTG0 326
+#define ACLK_MMU2 327
+#define ACLK_SLV_MMU2 328
+#define ACLK_UFS_SYS 329
+#define ACLK_VPU_ROOT 330
+#define ACLK_VPU_MID_ROOT 331
+#define HCLK_VPU_ROOT 332
+#define ACLK_JPEG_ROOT 333
+#define ACLK_VPU_LOW_ROOT 334
+#define HCLK_RGA2E_0 335
+#define ACLK_RGA2E_0 336
+#define CLK_CORE_RGA2E_0 337
+#define ACLK_JPEG 338
+#define HCLK_JPEG 339
+#define HCLK_VDPP 340
+#define ACLK_VDPP 341
+#define CLK_CORE_VDPP 342
+#define HCLK_RGA2E_1 343
+#define ACLK_RGA2E_1 344
+#define CLK_CORE_RGA2E_1 345
+#define DCLK_EBC_FRAC_SRC 346
+#define HCLK_EBC 347
+#define ACLK_EBC 348
+#define DCLK_EBC 349
+#define HCLK_VEPU0_ROOT 350
+#define ACLK_VEPU0_ROOT 351
+#define HCLK_VEPU0 352
+#define ACLK_VEPU0 353
+#define CLK_VEPU0_CORE 354
+#define ACLK_VI_ROOT 355
+#define HCLK_VI_ROOT 356
+#define PCLK_VI_ROOT 357
+#define DCLK_VICAP 358
+#define ACLK_VICAP 359
+#define HCLK_VICAP 360
+#define CLK_ISP_CORE 361
+#define CLK_ISP_CORE_MARVIN 362
+#define CLK_ISP_CORE_VICAP 363
+#define ACLK_ISP 364
+#define HCLK_ISP 365
+#define ACLK_VPSS 366
+#define HCLK_VPSS 367
+#define CLK_CORE_VPSS 368
+#define PCLK_CSI_HOST_0 369
+#define PCLK_CSI_HOST_1 370
+#define PCLK_CSI_HOST_2 371
+#define PCLK_CSI_HOST_3 372
+#define PCLK_CSI_HOST_4 373
+#define ICLK_CSIHOST01 374
+#define ICLK_CSIHOST0 375
+#define CLK_ISP_PVTPLL_SRC 376
+#define ACLK_VI_ROOT_INTER 377
+#define CLK_VICAP_I0CLK 378
+#define CLK_VICAP_I1CLK 379
+#define CLK_VICAP_I2CLK 380
+#define CLK_VICAP_I3CLK 381
+#define CLK_VICAP_I4CLK 382
+#define ACLK_VOP_ROOT 383
+#define HCLK_VOP_ROOT 384
+#define PCLK_VOP_ROOT 385
+#define HCLK_VOP 386
+#define ACLK_VOP 387
+#define DCLK_VP0_SRC 388
+#define DCLK_VP1_SRC 389
+#define DCLK_VP2_SRC 390
+#define DCLK_VP0 391
+#define DCLK_VP1 392
+#define DCLK_VP2 393
+#define PCLK_VOPGRF 394
+#define ACLK_VO0_ROOT 395
+#define HCLK_VO0_ROOT 396
+#define PCLK_VO0_ROOT 397
+#define PCLK_VO0_GRF 398
+#define ACLK_HDCP0 399
+#define HCLK_HDCP0 400
+#define PCLK_HDCP0 401
+#define CLK_TRNG0_SKP 402
+#define PCLK_DSIHOST0 403
+#define CLK_DSIHOST0 404
+#define PCLK_HDMITX0 405
+#define CLK_HDMITX0_EARC 406
+#define CLK_HDMITX0_REF 407
+#define PCLK_EDP0 408
+#define CLK_EDP0_24M 409
+#define CLK_EDP0_200M 410
+#define MCLK_SAI5_8CH_SRC 411
+#define MCLK_SAI5_8CH 412
+#define HCLK_SAI5_8CH 413
+#define MCLK_SAI6_8CH_SRC 414
+#define MCLK_SAI6_8CH 415
+#define HCLK_SAI6_8CH 416
+#define HCLK_SPDIF_TX2 417
+#define MCLK_SPDIF_TX2 418
+#define HCLK_SPDIF_RX2 419
+#define MCLK_SPDIF_RX2 420
+#define HCLK_SAI8_8CH 421
+#define MCLK_SAI8_8CH_SRC 422
+#define MCLK_SAI8_8CH 423
+#define ACLK_VO1_ROOT 424
+#define HCLK_VO1_ROOT 425
+#define PCLK_VO1_ROOT 426
+#define MCLK_SAI7_8CH_SRC 427
+#define MCLK_SAI7_8CH 428
+#define HCLK_SAI7_8CH 429
+#define HCLK_SPDIF_TX3 430
+#define HCLK_SPDIF_TX4 431
+#define HCLK_SPDIF_TX5 432
+#define MCLK_SPDIF_TX3 433
+#define CLK_AUX16MHZ_0 434
+#define ACLK_DP0 435
+#define PCLK_DP0 436
+#define PCLK_VO1_GRF 437
+#define ACLK_HDCP1 438
+#define HCLK_HDCP1 439
+#define PCLK_HDCP1 440
+#define CLK_TRNG1_SKP 441
+#define HCLK_SAI9_8CH 442
+#define MCLK_SAI9_8CH_SRC 443
+#define MCLK_SAI9_8CH 444
+#define MCLK_SPDIF_TX4 445
+#define MCLK_SPDIF_TX5 446
+#define CLK_GPU_SRC_PRE 447
+#define CLK_GPU 448
+#define PCLK_GPU_ROOT 449
+#define ACLK_CENTER_ROOT 450
+#define ACLK_CENTER_LOW_ROOT 451
+#define HCLK_CENTER_ROOT 452
+#define PCLK_CENTER_ROOT 453
+#define ACLK_DMA2DDR 454
+#define ACLK_DDR_SHAREMEM 455
+#define PCLK_DMA2DDR 456
+#define PCLK_SHAREMEM 457
+#define HCLK_VEPU1_ROOT 458
+#define ACLK_VEPU1_ROOT 459
+#define HCLK_VEPU1 460
+#define ACLK_VEPU1 461
+#define CLK_VEPU1_CORE 462
+#define CLK_JDBCK_DAP 463
+#define PCLK_MIPI_DCPHY 464
+#define CLK_32K_USB2DEBUG 465
+#define PCLK_CSIDPHY 466
+#define PCLK_USBDPPHY 467
+#define CLK_PMUPHY_REF_SRC 468
+#define CLK_USBDP_COMBO_PHY_IMMORTAL 469
+#define CLK_HDMITXHDP 470
+#define PCLK_MPHY 471
+#define CLK_REF_OSC_MPHY 472
+#define CLK_REF_UFS_CLKOUT 473
+#define HCLK_PMU1_ROOT 474
+#define HCLK_PMU_CM0_ROOT 475
+#define CLK_200M_PMU_SRC 476
+#define CLK_100M_PMU_SRC 477
+#define CLK_50M_PMU_SRC 478
+#define FCLK_PMU_CM0_CORE 479
+#define CLK_PMU_CM0_RTC 480
+#define PCLK_PMU1 481
+#define CLK_PMU1 482
+#define PCLK_PMU1WDT 483
+#define TCLK_PMU1WDT 484
+#define PCLK_PMUTIMER 485
+#define CLK_PMUTIMER_ROOT 486
+#define CLK_PMUTIMER0 487
+#define CLK_PMUTIMER1 488
+#define PCLK_PMU1PWM 489
+#define CLK_PMU1PWM 490
+#define CLK_PMU1PWM_OSC 491
+#define PCLK_PMUPHY_ROOT 492
+#define PCLK_I2C0 493
+#define CLK_I2C0 494
+#define SCLK_UART1 495
+#define PCLK_UART1 496
+#define CLK_PMU1PWM_RC 497
+#define CLK_PDM0 498
+#define HCLK_PDM0 499
+#define MCLK_PDM0 500
+#define HCLK_VAD 501
+#define CLK_OSCCHK_PVTM 502
+#define CLK_PDM0_OUT 503
+#define CLK_HPTIMER_SRC 504
+#define PCLK_PMU0_ROOT 505
+#define PCLK_PMU0 506
+#define PCLK_GPIO0 507
+#define DBCLK_GPIO0 508
+#define CLK_OSC0_PMU1 509
+#define PCLK_PMU1_ROOT 510
+#define XIN_OSC0_DIV 511
+#define ACLK_USB 512
+#define ACLK_UFS 513
+#define ACLK_SDGMAC 514
+#define HCLK_SDGMAC 515
+#define PCLK_SDGMAC 516
+#define HCLK_VO1 517
+#define HCLK_VO0 518
+#define PCLK_CCI_ROOT 519
+#define ACLK_CCI_ROOT 520
+#define HCLK_VO0VOP_CHANNEL 521
+#define ACLK_VO0VOP_CHANNEL 522
+#define ACLK_TOP_MID 523
+#define ACLK_SECURE_HIGH 524
+#define CLK_USBPHY_REF_SRC 525
+#define CLK_PHY_REF_SRC 526
+#define CLK_CPLL_REF_SRC 527
+#define CLK_AUPLL_REF_SRC 528
+#define PCLK_SECURE_NS 529
+#define HCLK_SECURE_NS 530
+#define ACLK_SECURE_NS 531
+#define PCLK_OTPC_NS 532
+#define HCLK_CRYPTO_NS 533
+#define HCLK_TRNG_NS 534
+#define CLK_OTPC_NS 535
+#define SCLK_DSU 536
+#define SCLK_DDR 537
+#define ACLK_CRYPTO_NS 538
+#define CLK_PKA_CRYPTO_NS 539
+#define ACLK_RKVDEC_ROOT_BAK 540
+#define CLK_AUDIO_FRAC_0_SRC 541
+#define CLK_AUDIO_FRAC_1_SRC 542
+#define CLK_AUDIO_FRAC_2_SRC 543
+#define CLK_AUDIO_FRAC_3_SRC 544
+#define PCLK_HDPTX_APB 545
+
+/* secure clk */
+#define CLK_STIMER0_ROOT 546
+#define CLK_STIMER1_ROOT 547
+#define PCLK_SECURE_S 548
+#define HCLK_SECURE_S 549
+#define ACLK_SECURE_S 550
+#define CLK_PKA_CRYPTO_S 551
+#define HCLK_VO1_S 552
+#define PCLK_VO1_S 553
+#define HCLK_VO0_S 554
+#define PCLK_VO0_S 555
+#define PCLK_KLAD 556
+#define HCLK_CRYPTO_S 557
+#define HCLK_KLAD 558
+#define ACLK_CRYPTO_S 559
+#define HCLK_TRNG_S 560
+#define PCLK_OTPC_S 561
+#define CLK_OTPC_S 562
+#define PCLK_WDT_S 563
+#define TCLK_WDT_S 564
+#define PCLK_HDCP0_TRNG 565
+#define PCLK_HDCP1_TRNG 566
+#define HCLK_HDCP_KEY0 567
+#define HCLK_HDCP_KEY1 568
+#define PCLK_EDP_S 569
+#define ACLK_KLAD 570
+
+#endif
diff --git a/include/dt-bindings/clock/samsung,exynosautov9.h b/include/dt-bindings/clock/samsung,exynosautov9.h
index 3065375c2d8b..ce8fb8f7d718 100644
--- a/include/dt-bindings/clock/samsung,exynosautov9.h
+++ b/include/dt-bindings/clock/samsung,exynosautov9.h
@@ -179,6 +179,17 @@
#define CLK_GOUT_CORE_CCI_PCLK 4
#define CLK_GOUT_CORE_CMU_CORE_PCLK 5
+/* CMU_DPUM */
+#define CLK_MOUT_DPUM_BUS_USER 1
+#define CLK_DOUT_DPUM_BUSP 2
+#define CLK_GOUT_DPUM_ACLK_DECON 3
+#define CLK_GOUT_DPUM_ACLK_DMA 4
+#define CLK_GOUT_DPUM_ACLK_DPP 5
+#define CLK_GOUT_DPUM_SYSMMU_D0_CLK 6
+#define CLK_GOUT_DPUM_SYSMMU_D1_CLK 7
+#define CLK_GOUT_DPUM_SYSMMU_D2_CLK 8
+#define CLK_GOUT_DPUM_SYSMMU_D3_CLK 9
+
/* CMU_FSYS0 */
#define CLK_MOUT_FSYS0_BUS_USER 1
#define CLK_MOUT_FSYS0_PCIE_USER 2
diff --git a/include/dt-bindings/clock/samsung,exynosautov920.h b/include/dt-bindings/clock/samsung,exynosautov920.h
new file mode 100644
index 000000000000..c720f344b6bf
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynosautov920.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Sunyeal Hong <sunyeal.hong@samsung.com>
+ *
+ * Device Tree binding constants for ExynosAuto v920 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H
+#define _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H
+
+/* CMU_TOP */
+#define FOUT_SHARED0_PLL 1
+#define FOUT_SHARED1_PLL 2
+#define FOUT_SHARED2_PLL 3
+#define FOUT_SHARED3_PLL 4
+#define FOUT_SHARED4_PLL 5
+#define FOUT_SHARED5_PLL 6
+#define FOUT_MMC_PLL 7
+
+/* MUX in CMU_TOP */
+#define MOUT_SHARED0_PLL 8
+#define MOUT_SHARED1_PLL 9
+#define MOUT_SHARED2_PLL 10
+#define MOUT_SHARED3_PLL 11
+#define MOUT_SHARED4_PLL 12
+#define MOUT_SHARED5_PLL 13
+#define MOUT_MMC_PLL 14
+#define MOUT_CLKCMU_CMU_BOOST 15
+#define MOUT_CLKCMU_CMU_CMUREF 16
+#define MOUT_CLKCMU_ACC_NOC 17
+#define MOUT_CLKCMU_ACC_ORB 18
+#define MOUT_CLKCMU_APM_NOC 19
+#define MOUT_CLKCMU_AUD_CPU 20
+#define MOUT_CLKCMU_AUD_NOC 21
+#define MOUT_CLKCMU_CPUCL0_SWITCH 22
+#define MOUT_CLKCMU_CPUCL0_CLUSTER 23
+#define MOUT_CLKCMU_CPUCL0_DBG 24
+#define MOUT_CLKCMU_CPUCL1_SWITCH 25
+#define MOUT_CLKCMU_CPUCL1_CLUSTER 26
+#define MOUT_CLKCMU_CPUCL2_SWITCH 27
+#define MOUT_CLKCMU_CPUCL2_CLUSTER 28
+#define MOUT_CLKCMU_DNC_NOC 29
+#define MOUT_CLKCMU_DPTX_NOC 30
+#define MOUT_CLKCMU_DPTX_DPGTC 31
+#define MOUT_CLKCMU_DPTX_DPOSC 32
+#define MOUT_CLKCMU_DPUB_NOC 33
+#define MOUT_CLKCMU_DPUB_DSIM 34
+#define MOUT_CLKCMU_DPUF0_NOC 35
+#define MOUT_CLKCMU_DPUF1_NOC 36
+#define MOUT_CLKCMU_DPUF2_NOC 37
+#define MOUT_CLKCMU_DSP_NOC 38
+#define MOUT_CLKCMU_G3D_SWITCH 39
+#define MOUT_CLKCMU_G3D_NOCP 40
+#define MOUT_CLKCMU_GNPU_NOC 41
+#define MOUT_CLKCMU_HSI0_NOC 42
+#define MOUT_CLKCMU_HSI1_NOC 43
+#define MOUT_CLKCMU_HSI1_USBDRD 44
+#define MOUT_CLKCMU_HSI1_MMC_CARD 45
+#define MOUT_CLKCMU_HSI2_NOC 46
+#define MOUT_CLKCMU_HSI2_NOC_UFS 47
+#define MOUT_CLKCMU_HSI2_UFS_EMBD 48
+#define MOUT_CLKCMU_HSI2_ETHERNET 49
+#define MOUT_CLKCMU_ISP_NOC 50
+#define MOUT_CLKCMU_M2M_NOC 51
+#define MOUT_CLKCMU_M2M_JPEG 52
+#define MOUT_CLKCMU_MFC_MFC 53
+#define MOUT_CLKCMU_MFC_WFD 54
+#define MOUT_CLKCMU_MFD_NOC 55
+#define MOUT_CLKCMU_MIF_SWITCH 56
+#define MOUT_CLKCMU_MIF_NOCP 57
+#define MOUT_CLKCMU_MISC_NOC 58
+#define MOUT_CLKCMU_NOCL0_NOC 59
+#define MOUT_CLKCMU_NOCL1_NOC 60
+#define MOUT_CLKCMU_NOCL2_NOC 61
+#define MOUT_CLKCMU_PERIC0_NOC 62
+#define MOUT_CLKCMU_PERIC0_IP 63
+#define MOUT_CLKCMU_PERIC1_NOC 64
+#define MOUT_CLKCMU_PERIC1_IP 65
+#define MOUT_CLKCMU_SDMA_NOC 66
+#define MOUT_CLKCMU_SNW_NOC 67
+#define MOUT_CLKCMU_SSP_NOC 68
+#define MOUT_CLKCMU_TAA_NOC 69
+
+/* DIV in CMU_TOP */
+#define DOUT_SHARED0_DIV1 70
+#define DOUT_SHARED0_DIV2 71
+#define DOUT_SHARED0_DIV3 72
+#define DOUT_SHARED0_DIV4 73
+#define DOUT_SHARED1_DIV1 74
+#define DOUT_SHARED1_DIV2 75
+#define DOUT_SHARED1_DIV3 76
+#define DOUT_SHARED1_DIV4 77
+#define DOUT_SHARED2_DIV1 78
+#define DOUT_SHARED2_DIV2 79
+#define DOUT_SHARED2_DIV3 80
+#define DOUT_SHARED2_DIV4 81
+#define DOUT_SHARED3_DIV1 82
+#define DOUT_SHARED3_DIV2 83
+#define DOUT_SHARED3_DIV3 84
+#define DOUT_SHARED3_DIV4 85
+#define DOUT_SHARED4_DIV1 86
+#define DOUT_SHARED4_DIV2 87
+#define DOUT_SHARED4_DIV3 88
+#define DOUT_SHARED4_DIV4 89
+#define DOUT_SHARED5_DIV1 90
+#define DOUT_SHARED5_DIV2 91
+#define DOUT_SHARED5_DIV3 92
+#define DOUT_SHARED5_DIV4 93
+#define DOUT_CLKCMU_CMU_BOOST 94
+#define DOUT_CLKCMU_ACC_NOC 95
+#define DOUT_CLKCMU_ACC_ORB 96
+#define DOUT_CLKCMU_APM_NOC 97
+#define DOUT_CLKCMU_AUD_CPU 98
+#define DOUT_CLKCMU_AUD_NOC 99
+#define DOUT_CLKCMU_CPUCL0_SWITCH 100
+#define DOUT_CLKCMU_CPUCL0_CLUSTER 101
+#define DOUT_CLKCMU_CPUCL0_DBG 102
+#define DOUT_CLKCMU_CPUCL1_SWITCH 103
+#define DOUT_CLKCMU_CPUCL1_CLUSTER 104
+#define DOUT_CLKCMU_CPUCL2_SWITCH 105
+#define DOUT_CLKCMU_CPUCL2_CLUSTER 106
+#define DOUT_CLKCMU_DNC_NOC 107
+#define DOUT_CLKCMU_DPTX_NOC 108
+#define DOUT_CLKCMU_DPTX_DPGTC 109
+#define DOUT_CLKCMU_DPTX_DPOSC 110
+#define DOUT_CLKCMU_DPUB_NOC 111
+#define DOUT_CLKCMU_DPUB_DSIM 112
+#define DOUT_CLKCMU_DPUF0_NOC 113
+#define DOUT_CLKCMU_DPUF1_NOC 114
+#define DOUT_CLKCMU_DPUF2_NOC 115
+#define DOUT_CLKCMU_DSP_NOC 116
+#define DOUT_CLKCMU_G3D_SWITCH 117
+#define DOUT_CLKCMU_G3D_NOCP 118
+#define DOUT_CLKCMU_GNPU_NOC 119
+#define DOUT_CLKCMU_HSI0_NOC 120
+#define DOUT_CLKCMU_HSI1_NOC 121
+#define DOUT_CLKCMU_HSI1_USBDRD 122
+#define DOUT_CLKCMU_HSI1_MMC_CARD 123
+#define DOUT_CLKCMU_HSI2_NOC 124
+#define DOUT_CLKCMU_HSI2_NOC_UFS 125
+#define DOUT_CLKCMU_HSI2_UFS_EMBD 126
+#define DOUT_CLKCMU_HSI2_ETHERNET 127
+#define DOUT_CLKCMU_ISP_NOC 128
+#define DOUT_CLKCMU_M2M_NOC 129
+#define DOUT_CLKCMU_M2M_JPEG 130
+#define DOUT_CLKCMU_MFC_MFC 131
+#define DOUT_CLKCMU_MFC_WFD 132
+#define DOUT_CLKCMU_MFD_NOC 133
+#define DOUT_CLKCMU_MIF_NOCP 134
+#define DOUT_CLKCMU_MISC_NOC 135
+#define DOUT_CLKCMU_NOCL0_NOC 136
+#define DOUT_CLKCMU_NOCL1_NOC 137
+#define DOUT_CLKCMU_NOCL2_NOC 138
+#define DOUT_CLKCMU_PERIC0_NOC 139
+#define DOUT_CLKCMU_PERIC0_IP 140
+#define DOUT_CLKCMU_PERIC1_NOC 141
+#define DOUT_CLKCMU_PERIC1_IP 142
+#define DOUT_CLKCMU_SDMA_NOC 143
+#define DOUT_CLKCMU_SNW_NOC 144
+#define DOUT_CLKCMU_SSP_NOC 145
+#define DOUT_CLKCMU_TAA_NOC 146
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_IP_USER 1
+#define CLK_MOUT_PERIC0_NOC_USER 2
+#define CLK_MOUT_PERIC0_USI00_USI 3
+#define CLK_MOUT_PERIC0_USI01_USI 4
+#define CLK_MOUT_PERIC0_USI02_USI 5
+#define CLK_MOUT_PERIC0_USI03_USI 6
+#define CLK_MOUT_PERIC0_USI04_USI 7
+#define CLK_MOUT_PERIC0_USI05_USI 8
+#define CLK_MOUT_PERIC0_USI06_USI 9
+#define CLK_MOUT_PERIC0_USI07_USI 10
+#define CLK_MOUT_PERIC0_USI08_USI 11
+#define CLK_MOUT_PERIC0_USI_I2C 12
+#define CLK_MOUT_PERIC0_I3C 13
+
+#define CLK_DOUT_PERIC0_USI00_USI 14
+#define CLK_DOUT_PERIC0_USI01_USI 15
+#define CLK_DOUT_PERIC0_USI02_USI 16
+#define CLK_DOUT_PERIC0_USI03_USI 17
+#define CLK_DOUT_PERIC0_USI04_USI 18
+#define CLK_DOUT_PERIC0_USI05_USI 19
+#define CLK_DOUT_PERIC0_USI06_USI 20
+#define CLK_DOUT_PERIC0_USI07_USI 21
+#define CLK_DOUT_PERIC0_USI08_USI 22
+#define CLK_DOUT_PERIC0_USI_I2C 23
+#define CLK_DOUT_PERIC0_I3C 24
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H */
diff --git a/include/dt-bindings/interconnect/qcom,ipq5332.h b/include/dt-bindings/interconnect/qcom,ipq5332.h
new file mode 100644
index 000000000000..16475bb07a48
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,ipq5332.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef INTERCONNECT_QCOM_IPQ5332_H
+#define INTERCONNECT_QCOM_IPQ5332_H
+
+#define MASTER_SNOC_PCIE3_1_M 0
+#define SLAVE_SNOC_PCIE3_1_M 1
+#define MASTER_ANOC_PCIE3_1_S 2
+#define SLAVE_ANOC_PCIE3_1_S 3
+#define MASTER_SNOC_PCIE3_2_M 4
+#define SLAVE_SNOC_PCIE3_2_M 5
+#define MASTER_ANOC_PCIE3_2_S 6
+#define SLAVE_ANOC_PCIE3_2_S 7
+#define MASTER_SNOC_USB 8
+#define SLAVE_SNOC_USB 9
+#define MASTER_NSSNOC_NSSCC 10
+#define SLAVE_NSSNOC_NSSCC 11
+#define MASTER_NSSNOC_SNOC_0 12
+#define SLAVE_NSSNOC_SNOC_0 13
+#define MASTER_NSSNOC_SNOC_1 14
+#define SLAVE_NSSNOC_SNOC_1 15
+#define MASTER_NSSNOC_ATB 16
+#define SLAVE_NSSNOC_ATB 17
+#define MASTER_NSSNOC_PCNOC_1 18
+#define SLAVE_NSSNOC_PCNOC_1 19
+#define MASTER_NSSNOC_QOSGEN_REF 20
+#define SLAVE_NSSNOC_QOSGEN_REF 21
+#define MASTER_NSSNOC_TIMEOUT_REF 22
+#define SLAVE_NSSNOC_TIMEOUT_REF 23
+#define MASTER_NSSNOC_XO_DCD 24
+#define SLAVE_NSSNOC_XO_DCD 25
+
+#define MASTER_NSSNOC_PPE 0
+#define SLAVE_NSSNOC_PPE 1
+#define MASTER_NSSNOC_PPE_CFG 2
+#define SLAVE_NSSNOC_PPE_CFG 3
+#define MASTER_NSSNOC_NSS_CSR 4
+#define SLAVE_NSSNOC_NSS_CSR 5
+#define MASTER_NSSNOC_CE_APB 6
+#define SLAVE_NSSNOC_CE_APB 7
+#define MASTER_NSSNOC_CE_AXI 8
+#define SLAVE_NSSNOC_CE_AXI 9
+
+#define MASTER_CNOC_AHB 0
+#define SLAVE_CNOC_AHB 1
+
+#endif /* INTERCONNECT_QCOM_IPQ5332_H */
diff --git a/include/dt-bindings/interrupt-controller/arm-gic.h b/include/dt-bindings/interrupt-controller/arm-gic.h
index 35b6f69b7db6..887f53363e8a 100644
--- a/include/dt-bindings/interrupt-controller/arm-gic.h
+++ b/include/dt-bindings/interrupt-controller/arm-gic.h
@@ -12,6 +12,8 @@
#define GIC_SPI 0
#define GIC_PPI 1
+#define GIC_ESPI 2
+#define GIC_EPPI 3
/*
* Interrupt specifier cell 2.
diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h
index fbfa3febc66d..fd85a79381b3 100644
--- a/include/dt-bindings/mailbox/qcom-ipcc.h
+++ b/include/dt-bindings/mailbox/qcom-ipcc.h
@@ -33,5 +33,7 @@
#define IPCC_CLIENT_NSP1 18
#define IPCC_CLIENT_TME 23
#define IPCC_CLIENT_WPSS 24
+#define IPCC_CLIENT_GPDSP0 31
+#define IPCC_CLIENT_GPDSP1 32
#endif
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv1800b.h b/include/dt-bindings/pinctrl/pinctrl-cv1800b.h
new file mode 100644
index 000000000000..0593fc33d470
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv1800b.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV1800B_H
+#define _DT_BINDINGS_PINCTRL_CV1800B_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PIN_AUD_AOUTR 1
+#define PIN_SD0_CLK 3
+#define PIN_SD0_CMD 4
+#define PIN_SD0_D0 5
+#define PIN_SD0_D1 7
+#define PIN_SD0_D2 8
+#define PIN_SD0_D3 9
+#define PIN_SD0_CD 11
+#define PIN_SD0_PWR_EN 12
+#define PIN_SPK_EN 14
+#define PIN_UART0_TX 15
+#define PIN_UART0_RX 16
+#define PIN_SPINOR_HOLD_X 17
+#define PIN_SPINOR_SCK 18
+#define PIN_SPINOR_MOSI 19
+#define PIN_SPINOR_WP_X 20
+#define PIN_SPINOR_MISO 21
+#define PIN_SPINOR_CS_X 22
+#define PIN_IIC0_SCL 23
+#define PIN_IIC0_SDA 24
+#define PIN_AUX0 25
+#define PIN_PWR_VBAT_DET 30
+#define PIN_PWR_SEQ2 31
+#define PIN_XTAL_XIN 33
+#define PIN_SD1_GPIO0 35
+#define PIN_SD1_GPIO1 36
+#define PIN_SD1_D3 38
+#define PIN_SD1_D2 39
+#define PIN_SD1_D1 40
+#define PIN_SD1_D0 41
+#define PIN_SD1_CMD 42
+#define PIN_SD1_CLK 43
+#define PIN_ADC1 44
+#define PIN_USB_VBUS_DET 45
+#define PIN_ETH_TXP 47
+#define PIN_ETH_TXM 48
+#define PIN_ETH_RXP 49
+#define PIN_ETH_RXM 50
+#define PIN_MIPIRX4N 56
+#define PIN_MIPIRX4P 57
+#define PIN_MIPIRX3N 58
+#define PIN_MIPIRX3P 59
+#define PIN_MIPIRX2N 60
+#define PIN_MIPIRX2P 61
+#define PIN_MIPIRX1N 62
+#define PIN_MIPIRX1P 63
+#define PIN_MIPIRX0N 64
+#define PIN_MIPIRX0P 65
+#define PIN_AUD_AINL_MIC 67
+
+#endif /* _DT_BINDINGS_PINCTRL_CV1800B_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv1812h.h b/include/dt-bindings/pinctrl/pinctrl-cv1812h.h
new file mode 100644
index 000000000000..2908de347919
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv1812h.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV1812H_H
+#define _DT_BINDINGS_PINCTRL_CV1812H_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PINPOS(row, col) \
+ ((((row) - 'A' + 1) << 8) + ((col) - 1))
+
+#define PIN_MIPI_TXM4 PINPOS('A', 2)
+#define PIN_MIPIRX0N PINPOS('A', 4)
+#define PIN_MIPIRX3P PINPOS('A', 6)
+#define PIN_MIPIRX4P PINPOS('A', 7)
+#define PIN_VIVO_D2 PINPOS('A', 9)
+#define PIN_VIVO_D3 PINPOS('A', 10)
+#define PIN_VIVO_D10 PINPOS('A', 12)
+#define PIN_USB_VBUS_DET PINPOS('A', 13)
+#define PIN_MIPI_TXP3 PINPOS('B', 1)
+#define PIN_MIPI_TXM3 PINPOS('B', 2)
+#define PIN_MIPI_TXP4 PINPOS('B', 3)
+#define PIN_MIPIRX0P PINPOS('B', 4)
+#define PIN_MIPIRX1N PINPOS('B', 5)
+#define PIN_MIPIRX2N PINPOS('B', 6)
+#define PIN_MIPIRX4N PINPOS('B', 7)
+#define PIN_MIPIRX5N PINPOS('B', 8)
+#define PIN_VIVO_D1 PINPOS('B', 9)
+#define PIN_VIVO_D5 PINPOS('B', 10)
+#define PIN_VIVO_D7 PINPOS('B', 11)
+#define PIN_VIVO_D9 PINPOS('B', 12)
+#define PIN_USB_ID PINPOS('B', 13)
+#define PIN_ETH_RXM PINPOS('B', 15)
+#define PIN_MIPI_TXP2 PINPOS('C', 1)
+#define PIN_MIPI_TXM2 PINPOS('C', 2)
+#define PIN_CAM_PD0 PINPOS('C', 3)
+#define PIN_CAM_MCLK0 PINPOS('C', 4)
+#define PIN_MIPIRX1P PINPOS('C', 5)
+#define PIN_MIPIRX2P PINPOS('C', 6)
+#define PIN_MIPIRX3N PINPOS('C', 7)
+#define PIN_MIPIRX5P PINPOS('C', 8)
+#define PIN_VIVO_CLK PINPOS('C', 9)
+#define PIN_VIVO_D6 PINPOS('C', 10)
+#define PIN_VIVO_D8 PINPOS('C', 11)
+#define PIN_USB_VBUS_EN PINPOS('C', 12)
+#define PIN_ETH_RXP PINPOS('C', 14)
+#define PIN_GPIO_RTX PINPOS('C', 15)
+#define PIN_MIPI_TXP1 PINPOS('D', 1)
+#define PIN_MIPI_TXM1 PINPOS('D', 2)
+#define PIN_CAM_MCLK1 PINPOS('D', 3)
+#define PIN_IIC3_SCL PINPOS('D', 4)
+#define PIN_VIVO_D4 PINPOS('D', 10)
+#define PIN_ETH_TXM PINPOS('D', 14)
+#define PIN_ETH_TXP PINPOS('D', 15)
+#define PIN_MIPI_TXP0 PINPOS('E', 1)
+#define PIN_MIPI_TXM0 PINPOS('E', 2)
+#define PIN_CAM_PD1 PINPOS('E', 4)
+#define PIN_CAM_RST0 PINPOS('E', 5)
+#define PIN_VIVO_D0 PINPOS('E', 10)
+#define PIN_ADC1 PINPOS('E', 13)
+#define PIN_ADC2 PINPOS('E', 14)
+#define PIN_ADC3 PINPOS('E', 15)
+#define PIN_AUD_AOUTL PINPOS('F', 2)
+#define PIN_IIC3_SDA PINPOS('F', 4)
+#define PIN_SD1_D2 PINPOS('F', 14)
+#define PIN_AUD_AOUTR PINPOS('G', 2)
+#define PIN_SD1_D3 PINPOS('G', 13)
+#define PIN_SD1_CLK PINPOS('G', 14)
+#define PIN_SD1_CMD PINPOS('G', 15)
+#define PIN_AUD_AINL_MIC PINPOS('H', 1)
+#define PIN_RSTN PINPOS('H', 12)
+#define PIN_PWM0_BUCK PINPOS('H', 13)
+#define PIN_SD1_D1 PINPOS('H', 14)
+#define PIN_SD1_D0 PINPOS('H', 15)
+#define PIN_AUD_AINR_MIC PINPOS('J', 1)
+#define PIN_IIC2_SCL PINPOS('J', 13)
+#define PIN_IIC2_SDA PINPOS('J', 14)
+#define PIN_SD0_CD PINPOS('K', 2)
+#define PIN_SD0_D1 PINPOS('K', 3)
+#define PIN_UART2_RX PINPOS('K', 13)
+#define PIN_UART2_CTS PINPOS('K', 14)
+#define PIN_UART2_TX PINPOS('K', 15)
+#define PIN_SD0_CLK PINPOS('L', 1)
+#define PIN_SD0_D0 PINPOS('L', 2)
+#define PIN_SD0_CMD PINPOS('L', 3)
+#define PIN_CLK32K PINPOS('L', 14)
+#define PIN_UART2_RTS PINPOS('L', 15)
+#define PIN_SD0_D3 PINPOS('M', 1)
+#define PIN_SD0_D2 PINPOS('M', 2)
+#define PIN_UART0_RX PINPOS('M', 4)
+#define PIN_UART0_TX PINPOS('M', 5)
+#define PIN_JTAG_CPU_TRST PINPOS('M', 6)
+#define PIN_PWR_ON PINPOS('M', 11)
+#define PIN_PWR_GPIO2 PINPOS('M', 12)
+#define PIN_PWR_GPIO0 PINPOS('M', 13)
+#define PIN_CLK25M PINPOS('M', 14)
+#define PIN_SD0_PWR_EN PINPOS('N', 1)
+#define PIN_SPK_EN PINPOS('N', 3)
+#define PIN_JTAG_CPU_TCK PINPOS('N', 4)
+#define PIN_JTAG_CPU_TMS PINPOS('N', 6)
+#define PIN_PWR_WAKEUP1 PINPOS('N', 11)
+#define PIN_PWR_WAKEUP0 PINPOS('N', 12)
+#define PIN_PWR_GPIO1 PINPOS('N', 13)
+#define PIN_EMMC_DAT3 PINPOS('P', 1)
+#define PIN_EMMC_DAT0 PINPOS('P', 2)
+#define PIN_EMMC_DAT2 PINPOS('P', 3)
+#define PIN_EMMC_RSTN PINPOS('P', 4)
+#define PIN_AUX0 PINPOS('P', 5)
+#define PIN_IIC0_SDA PINPOS('P', 6)
+#define PIN_PWR_SEQ3 PINPOS('P', 10)
+#define PIN_PWR_VBAT_DET PINPOS('P', 11)
+#define PIN_PWR_SEQ1 PINPOS('P', 12)
+#define PIN_PWR_BUTTON1 PINPOS('P', 13)
+#define PIN_EMMC_DAT1 PINPOS('R', 2)
+#define PIN_EMMC_CMD PINPOS('R', 3)
+#define PIN_EMMC_CLK PINPOS('R', 4)
+#define PIN_IIC0_SCL PINPOS('R', 6)
+#define PIN_GPIO_ZQ PINPOS('R', 10)
+#define PIN_PWR_RSTN PINPOS('R', 11)
+#define PIN_PWR_SEQ2 PINPOS('R', 12)
+#define PIN_XTAL_XIN PINPOS('R', 13)
+
+#endif /* _DT_BINDINGS_PINCTRL_CV1812H_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv18xx.h b/include/dt-bindings/pinctrl/pinctrl-cv18xx.h
new file mode 100644
index 000000000000..bc92ad1067ec
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv18xx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Ltd.
+ *
+ * Author: Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV18XX_H
+#define _DT_BINDINGS_PINCTRL_CV18XX_H
+
+#define PIN_MUX_INVALD 0xff
+
+#define PINMUX2(pin, mux, mux2) \
+ (((pin) & 0xffff) | (((mux) & 0xff) << 16) | (((mux2) & 0xff) << 24))
+
+#define PINMUX(pin, mux) \
+ PINMUX2(pin, mux, PIN_MUX_INVALD)
+
+#endif /* _DT_BINDINGS_PINCTRL_CV18XX_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2000.h b/include/dt-bindings/pinctrl/pinctrl-sg2000.h
new file mode 100644
index 000000000000..4871f9a7c6c1
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2000.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2000_H
+#define _DT_BINDINGS_PINCTRL_SG2000_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PINPOS(row, col) \
+ ((((row) - 'A' + 1) << 8) + ((col) - 1))
+
+#define PIN_MIPI_TXM4 PINPOS('A', 2)
+#define PIN_MIPIRX0N PINPOS('A', 4)
+#define PIN_MIPIRX3P PINPOS('A', 6)
+#define PIN_MIPIRX4P PINPOS('A', 7)
+#define PIN_VIVO_D2 PINPOS('A', 9)
+#define PIN_VIVO_D3 PINPOS('A', 10)
+#define PIN_VIVO_D10 PINPOS('A', 12)
+#define PIN_USB_VBUS_DET PINPOS('A', 13)
+#define PIN_MIPI_TXP3 PINPOS('B', 1)
+#define PIN_MIPI_TXM3 PINPOS('B', 2)
+#define PIN_MIPI_TXP4 PINPOS('B', 3)
+#define PIN_MIPIRX0P PINPOS('B', 4)
+#define PIN_MIPIRX1N PINPOS('B', 5)
+#define PIN_MIPIRX2N PINPOS('B', 6)
+#define PIN_MIPIRX4N PINPOS('B', 7)
+#define PIN_MIPIRX5N PINPOS('B', 8)
+#define PIN_VIVO_D1 PINPOS('B', 9)
+#define PIN_VIVO_D5 PINPOS('B', 10)
+#define PIN_VIVO_D7 PINPOS('B', 11)
+#define PIN_VIVO_D9 PINPOS('B', 12)
+#define PIN_USB_ID PINPOS('B', 13)
+#define PIN_ETH_RXM PINPOS('B', 15)
+#define PIN_MIPI_TXP2 PINPOS('C', 1)
+#define PIN_MIPI_TXM2 PINPOS('C', 2)
+#define PIN_CAM_PD0 PINPOS('C', 3)
+#define PIN_CAM_MCLK0 PINPOS('C', 4)
+#define PIN_MIPIRX1P PINPOS('C', 5)
+#define PIN_MIPIRX2P PINPOS('C', 6)
+#define PIN_MIPIRX3N PINPOS('C', 7)
+#define PIN_MIPIRX5P PINPOS('C', 8)
+#define PIN_VIVO_CLK PINPOS('C', 9)
+#define PIN_VIVO_D6 PINPOS('C', 10)
+#define PIN_VIVO_D8 PINPOS('C', 11)
+#define PIN_USB_VBUS_EN PINPOS('C', 12)
+#define PIN_ETH_RXP PINPOS('C', 14)
+#define PIN_GPIO_RTX PINPOS('C', 15)
+#define PIN_MIPI_TXP1 PINPOS('D', 1)
+#define PIN_MIPI_TXM1 PINPOS('D', 2)
+#define PIN_CAM_MCLK1 PINPOS('D', 3)
+#define PIN_IIC3_SCL PINPOS('D', 4)
+#define PIN_VIVO_D4 PINPOS('D', 10)
+#define PIN_ETH_TXM PINPOS('D', 14)
+#define PIN_ETH_TXP PINPOS('D', 15)
+#define PIN_MIPI_TXP0 PINPOS('E', 1)
+#define PIN_MIPI_TXM0 PINPOS('E', 2)
+#define PIN_CAM_PD1 PINPOS('E', 4)
+#define PIN_CAM_RST0 PINPOS('E', 5)
+#define PIN_VIVO_D0 PINPOS('E', 10)
+#define PIN_ADC1 PINPOS('E', 13)
+#define PIN_ADC2 PINPOS('E', 14)
+#define PIN_ADC3 PINPOS('E', 15)
+#define PIN_AUD_AOUTL PINPOS('F', 2)
+#define PIN_IIC3_SDA PINPOS('F', 4)
+#define PIN_SD1_D2 PINPOS('F', 14)
+#define PIN_AUD_AOUTR PINPOS('G', 2)
+#define PIN_SD1_D3 PINPOS('G', 13)
+#define PIN_SD1_CLK PINPOS('G', 14)
+#define PIN_SD1_CMD PINPOS('G', 15)
+#define PIN_AUD_AINL_MIC PINPOS('H', 1)
+#define PIN_RSTN PINPOS('H', 12)
+#define PIN_PWM0_BUCK PINPOS('H', 13)
+#define PIN_SD1_D1 PINPOS('H', 14)
+#define PIN_SD1_D0 PINPOS('H', 15)
+#define PIN_AUD_AINR_MIC PINPOS('J', 1)
+#define PIN_IIC2_SCL PINPOS('J', 13)
+#define PIN_IIC2_SDA PINPOS('J', 14)
+#define PIN_SD0_CD PINPOS('K', 2)
+#define PIN_SD0_D1 PINPOS('K', 3)
+#define PIN_UART2_RX PINPOS('K', 13)
+#define PIN_UART2_CTS PINPOS('K', 14)
+#define PIN_UART2_TX PINPOS('K', 15)
+#define PIN_SD0_CLK PINPOS('L', 1)
+#define PIN_SD0_D0 PINPOS('L', 2)
+#define PIN_SD0_CMD PINPOS('L', 3)
+#define PIN_CLK32K PINPOS('L', 14)
+#define PIN_UART2_RTS PINPOS('L', 15)
+#define PIN_SD0_D3 PINPOS('M', 1)
+#define PIN_SD0_D2 PINPOS('M', 2)
+#define PIN_UART0_RX PINPOS('M', 4)
+#define PIN_UART0_TX PINPOS('M', 5)
+#define PIN_JTAG_CPU_TRST PINPOS('M', 6)
+#define PIN_PWR_ON PINPOS('M', 11)
+#define PIN_PWR_GPIO2 PINPOS('M', 12)
+#define PIN_PWR_GPIO0 PINPOS('M', 13)
+#define PIN_CLK25M PINPOS('M', 14)
+#define PIN_SD0_PWR_EN PINPOS('N', 1)
+#define PIN_SPK_EN PINPOS('N', 3)
+#define PIN_JTAG_CPU_TCK PINPOS('N', 4)
+#define PIN_JTAG_CPU_TMS PINPOS('N', 6)
+#define PIN_PWR_WAKEUP1 PINPOS('N', 11)
+#define PIN_PWR_WAKEUP0 PINPOS('N', 12)
+#define PIN_PWR_GPIO1 PINPOS('N', 13)
+#define PIN_EMMC_DAT3 PINPOS('P', 1)
+#define PIN_EMMC_DAT0 PINPOS('P', 2)
+#define PIN_EMMC_DAT2 PINPOS('P', 3)
+#define PIN_EMMC_RSTN PINPOS('P', 4)
+#define PIN_AUX0 PINPOS('P', 5)
+#define PIN_IIC0_SDA PINPOS('P', 6)
+#define PIN_PWR_SEQ3 PINPOS('P', 10)
+#define PIN_PWR_VBAT_DET PINPOS('P', 11)
+#define PIN_PWR_SEQ1 PINPOS('P', 12)
+#define PIN_PWR_BUTTON1 PINPOS('P', 13)
+#define PIN_EMMC_DAT1 PINPOS('R', 2)
+#define PIN_EMMC_CMD PINPOS('R', 3)
+#define PIN_EMMC_CLK PINPOS('R', 4)
+#define PIN_IIC0_SCL PINPOS('R', 6)
+#define PIN_GPIO_ZQ PINPOS('R', 10)
+#define PIN_PWR_RSTN PINPOS('R', 11)
+#define PIN_PWR_SEQ2 PINPOS('R', 12)
+#define PIN_XTAL_XIN PINPOS('R', 13)
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2000_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2002.h b/include/dt-bindings/pinctrl/pinctrl-sg2002.h
new file mode 100644
index 000000000000..3c36cfa0a550
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2002.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2002_H
+#define _DT_BINDINGS_PINCTRL_SG2002_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PIN_AUD_AINL_MIC 2
+#define PIN_AUD_AOUTR 4
+#define PIN_SD0_CLK 6
+#define PIN_SD0_CMD 7
+#define PIN_SD0_D0 8
+#define PIN_SD0_D1 10
+#define PIN_SD0_D2 11
+#define PIN_SD0_D3 12
+#define PIN_SD0_CD 14
+#define PIN_SD0_PWR_EN 15
+#define PIN_SPK_EN 17
+#define PIN_UART0_TX 18
+#define PIN_UART0_RX 19
+#define PIN_EMMC_DAT2 20
+#define PIN_EMMC_CLK 21
+#define PIN_EMMC_DAT0 22
+#define PIN_EMMC_DAT3 23
+#define PIN_EMMC_CMD 24
+#define PIN_EMMC_DAT1 25
+#define PIN_JTAG_CPU_TMS 26
+#define PIN_JTAG_CPU_TCK 27
+#define PIN_IIC0_SCL 28
+#define PIN_IIC0_SDA 29
+#define PIN_AUX0 30
+#define PIN_GPIO_ZQ 35
+#define PIN_PWR_VBAT_DET 38
+#define PIN_PWR_RSTN 39
+#define PIN_PWR_SEQ1 40
+#define PIN_PWR_SEQ2 41
+#define PIN_PWR_WAKEUP0 43
+#define PIN_PWR_BUTTON1 44
+#define PIN_XTAL_XIN 45
+#define PIN_PWR_GPIO0 47
+#define PIN_PWR_GPIO1 48
+#define PIN_PWR_GPIO2 49
+#define PIN_SD1_D3 51
+#define PIN_SD1_D2 52
+#define PIN_SD1_D1 53
+#define PIN_SD1_D0 54
+#define PIN_SD1_CMD 55
+#define PIN_SD1_CLK 56
+#define PIN_PWM0_BUCK 58
+#define PIN_ADC1 59
+#define PIN_USB_VBUS_DET 60
+#define PIN_ETH_TXP 62
+#define PIN_ETH_TXM 63
+#define PIN_ETH_RXP 64
+#define PIN_ETH_RXM 65
+#define PIN_GPIO_RTX 67
+#define PIN_MIPIRX4N 72
+#define PIN_MIPIRX4P 73
+#define PIN_MIPIRX3N 74
+#define PIN_MIPIRX3P 75
+#define PIN_MIPIRX2N 76
+#define PIN_MIPIRX2P 77
+#define PIN_MIPIRX1N 78
+#define PIN_MIPIRX1P 79
+#define PIN_MIPIRX0N 80
+#define PIN_MIPIRX0P 81
+#define PIN_MIPI_TXM2 83
+#define PIN_MIPI_TXP2 84
+#define PIN_MIPI_TXM1 85
+#define PIN_MIPI_TXP1 86
+#define PIN_MIPI_TXM0 87
+#define PIN_MIPI_TXP0 88
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2002_H */
diff --git a/include/dt-bindings/power/rockchip,rk3576-power.h b/include/dt-bindings/power/rockchip,rk3576-power.h
new file mode 100644
index 000000000000..324a056aa851
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rk3576-power.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+#ifndef __DT_BINDINGS_POWER_RK3576_POWER_H__
+#define __DT_BINDINGS_POWER_RK3576_POWER_H__
+
+/* VD_NPU */
+#define RK3576_PD_NPU 0
+#define RK3576_PD_NPUTOP 1
+#define RK3576_PD_NPU0 2
+#define RK3576_PD_NPU1 3
+
+/* VD_GPU */
+#define RK3576_PD_GPU 4
+
+/* VD_LOGIC */
+#define RK3576_PD_NVM 5
+#define RK3576_PD_SDGMAC 6
+#define RK3576_PD_USB 7
+#define RK3576_PD_PHP 8
+#define RK3576_PD_SUBPHP 9
+#define RK3576_PD_AUDIO 10
+#define RK3576_PD_VEPU0 11
+#define RK3576_PD_VEPU1 12
+#define RK3576_PD_VPU 13
+#define RK3576_PD_VDEC 14
+#define RK3576_PD_VI 15
+#define RK3576_PD_VO0 16
+#define RK3576_PD_VO1 17
+#define RK3576_PD_VOP 18
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rk3576-cru.h b/include/dt-bindings/reset/rockchip,rk3576-cru.h
new file mode 100644
index 000000000000..ae856906f3a3
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rk3576-cru.h
@@ -0,0 +1,564 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2023 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_ROCKCHIP_RK3576_H
+#define _DT_BINDINGS_RESET_ROCKCHIP_RK3576_H
+
+#define SRST_A_TOP_BIU 0
+#define SRST_P_TOP_BIU 1
+#define SRST_A_TOP_MID_BIU 2
+#define SRST_A_SECURE_HIGH_BIU 3
+#define SRST_H_TOP_BIU 4
+
+#define SRST_H_VO0VOP_CHANNEL_BIU 5
+#define SRST_A_VO0VOP_CHANNEL_BIU 6
+
+#define SRST_BISRINTF 7
+
+#define SRST_H_AUDIO_BIU 8
+#define SRST_H_ASRC_2CH_0 9
+#define SRST_H_ASRC_2CH_1 10
+#define SRST_H_ASRC_4CH_0 11
+#define SRST_H_ASRC_4CH_1 12
+#define SRST_ASRC_2CH_0 13
+#define SRST_ASRC_2CH_1 14
+#define SRST_ASRC_4CH_0 15
+#define SRST_ASRC_4CH_1 16
+#define SRST_M_SAI0_8CH 17
+#define SRST_H_SAI0_8CH 18
+#define SRST_H_SPDIF_RX0 19
+#define SRST_M_SPDIF_RX0 20
+
+#define SRST_H_SPDIF_RX1 21
+#define SRST_M_SPDIF_RX1 22
+#define SRST_M_SAI1_8CH 23
+#define SRST_H_SAI1_8CH 24
+#define SRST_M_SAI2_2CH 25
+#define SRST_H_SAI2_2CH 26
+#define SRST_M_SAI3_2CH 27
+#define SRST_H_SAI3_2CH 28
+
+#define SRST_M_SAI4_2CH 29
+#define SRST_H_SAI4_2CH 30
+#define SRST_H_ACDCDIG_DSM 31
+#define SRST_M_ACDCDIG_DSM 32
+#define SRST_PDM1 33
+#define SRST_H_PDM1 34
+#define SRST_M_PDM1 35
+#define SRST_H_SPDIF_TX0 36
+#define SRST_M_SPDIF_TX0 37
+#define SRST_H_SPDIF_TX1 38
+#define SRST_M_SPDIF_TX1 39
+
+#define SRST_A_BUS_BIU 40
+#define SRST_P_BUS_BIU 41
+#define SRST_P_CRU 42
+#define SRST_H_CAN0 43
+#define SRST_CAN0 44
+#define SRST_H_CAN1 45
+#define SRST_CAN1 46
+#define SRST_P_INTMUX2BUS 47
+#define SRST_P_VCCIO_IOC 48
+#define SRST_H_BUS_BIU 49
+#define SRST_KEY_SHIFT 50
+
+#define SRST_P_I2C1 51
+#define SRST_P_I2C2 52
+#define SRST_P_I2C3 53
+#define SRST_P_I2C4 54
+#define SRST_P_I2C5 55
+#define SRST_P_I2C6 56
+#define SRST_P_I2C7 57
+#define SRST_P_I2C8 58
+#define SRST_P_I2C9 59
+#define SRST_P_WDT_BUSMCU 60
+#define SRST_T_WDT_BUSMCU 61
+#define SRST_A_GIC 62
+#define SRST_I2C1 63
+#define SRST_I2C2 64
+#define SRST_I2C3 65
+#define SRST_I2C4 66
+
+#define SRST_I2C5 67
+#define SRST_I2C6 68
+#define SRST_I2C7 69
+#define SRST_I2C8 70
+#define SRST_I2C9 71
+#define SRST_P_SARADC 72
+#define SRST_SARADC 73
+#define SRST_P_TSADC 74
+#define SRST_TSADC 75
+#define SRST_P_UART0 76
+#define SRST_P_UART2 77
+#define SRST_P_UART3 78
+#define SRST_P_UART4 79
+#define SRST_P_UART5 80
+#define SRST_P_UART6 81
+
+#define SRST_P_UART7 82
+#define SRST_P_UART8 83
+#define SRST_P_UART9 84
+#define SRST_P_UART10 85
+#define SRST_P_UART11 86
+#define SRST_S_UART0 87
+#define SRST_S_UART2 88
+#define SRST_S_UART3 89
+#define SRST_S_UART4 90
+#define SRST_S_UART5 91
+
+#define SRST_S_UART6 92
+#define SRST_S_UART7 93
+#define SRST_S_UART8 94
+#define SRST_S_UART9 95
+#define SRST_S_UART10 96
+#define SRST_S_UART11 97
+#define SRST_P_SPI0 98
+#define SRST_P_SPI1 99
+#define SRST_P_SPI2 100
+
+#define SRST_P_SPI3 101
+#define SRST_P_SPI4 102
+#define SRST_SPI0 103
+#define SRST_SPI1 104
+#define SRST_SPI2 105
+#define SRST_SPI3 106
+#define SRST_SPI4 107
+#define SRST_P_WDT0 108
+#define SRST_T_WDT0 109
+#define SRST_P_SYS_GRF 110
+#define SRST_P_PWM1 111
+#define SRST_PWM1 112
+
+#define SRST_P_BUSTIMER0 113
+#define SRST_P_BUSTIMER1 114
+#define SRST_TIMER0 115
+#define SRST_TIMER1 116
+#define SRST_TIMER2 117
+#define SRST_TIMER3 118
+#define SRST_TIMER4 119
+#define SRST_TIMER5 120
+#define SRST_P_BUSIOC 121
+#define SRST_P_MAILBOX0 122
+#define SRST_P_GPIO1 123
+
+#define SRST_GPIO1 124
+#define SRST_P_GPIO2 125
+#define SRST_GPIO2 126
+#define SRST_P_GPIO3 127
+#define SRST_GPIO3 128
+#define SRST_P_GPIO4 129
+#define SRST_GPIO4 130
+#define SRST_A_DECOM 131
+#define SRST_P_DECOM 132
+#define SRST_D_DECOM 133
+#define SRST_TIMER6 134
+#define SRST_TIMER7 135
+#define SRST_TIMER8 136
+#define SRST_TIMER9 137
+#define SRST_TIMER10 138
+
+#define SRST_TIMER11 139
+#define SRST_A_DMAC0 140
+#define SRST_A_DMAC1 141
+#define SRST_A_DMAC2 142
+#define SRST_A_SPINLOCK 143
+#define SRST_REF_PVTPLL_BUS 144
+#define SRST_H_I3C0 145
+#define SRST_H_I3C1 146
+#define SRST_H_BUS_CM0_BIU 147
+#define SRST_F_BUS_CM0_CORE 148
+#define SRST_T_BUS_CM0_JTAG 149
+
+#define SRST_P_INTMUX2PMU 150
+#define SRST_P_INTMUX2DDR 151
+#define SRST_P_PVTPLL_BUS 152
+#define SRST_P_PWM2 153
+#define SRST_PWM2 154
+#define SRST_FREQ_PWM1 155
+#define SRST_COUNTER_PWM1 156
+#define SRST_I3C0 157
+#define SRST_I3C1 158
+
+#define SRST_P_DDR_MON_CH0 159
+#define SRST_P_DDR_BIU 160
+#define SRST_P_DDR_UPCTL_CH0 161
+#define SRST_TM_DDR_MON_CH0 162
+#define SRST_A_DDR_BIU 163
+#define SRST_DFI_CH0 164
+#define SRST_DDR_MON_CH0 165
+#define SRST_P_DDR_HWLP_CH0 166
+#define SRST_P_DDR_MON_CH1 167
+#define SRST_P_DDR_HWLP_CH1 168
+
+#define SRST_P_DDR_UPCTL_CH1 169
+#define SRST_TM_DDR_MON_CH1 170
+#define SRST_DFI_CH1 171
+#define SRST_A_DDR01_MSCH0 172
+#define SRST_A_DDR01_MSCH1 173
+#define SRST_DDR_MON_CH1 174
+#define SRST_DDR_SCRAMBLE_CH0 175
+#define SRST_DDR_SCRAMBLE_CH1 176
+#define SRST_P_AHB2APB 177
+#define SRST_H_AHB2APB 178
+#define SRST_H_DDR_BIU 179
+#define SRST_F_DDR_CM0_CORE 180
+
+#define SRST_P_DDR01_MSCH0 181
+#define SRST_P_DDR01_MSCH1 182
+#define SRST_DDR_TIMER0 183
+#define SRST_DDR_TIMER1 184
+#define SRST_T_WDT_DDR 185
+#define SRST_P_WDT 186
+#define SRST_P_TIMER 187
+#define SRST_T_DDR_CM0_JTAG 188
+#define SRST_P_DDR_GRF 189
+
+#define SRST_DDR_UPCTL_CH0 190
+#define SRST_A_DDR_UPCTL_0_CH0 191
+#define SRST_A_DDR_UPCTL_1_CH0 192
+#define SRST_A_DDR_UPCTL_2_CH0 193
+#define SRST_A_DDR_UPCTL_3_CH0 194
+#define SRST_A_DDR_UPCTL_4_CH0 195
+
+#define SRST_DDR_UPCTL_CH1 196
+#define SRST_A_DDR_UPCTL_0_CH1 197
+#define SRST_A_DDR_UPCTL_1_CH1 198
+#define SRST_A_DDR_UPCTL_2_CH1 199
+#define SRST_A_DDR_UPCTL_3_CH1 200
+#define SRST_A_DDR_UPCTL_4_CH1 201
+
+#define SRST_REF_PVTPLL_DDR 202
+#define SRST_P_PVTPLL_DDR 203
+
+#define SRST_A_RKNN0 204
+#define SRST_A_RKNN0_BIU 205
+#define SRST_L_RKNN0_BIU 206
+
+#define SRST_A_RKNN1 207
+#define SRST_A_RKNN1_BIU 208
+#define SRST_L_RKNN1_BIU 209
+
+#define SRST_NPU_DAP 210
+#define SRST_L_NPUSUBSYS_BIU 211
+#define SRST_P_NPUTOP_BIU 212
+#define SRST_P_NPU_TIMER 213
+#define SRST_NPUTIMER0 214
+#define SRST_NPUTIMER1 215
+#define SRST_P_NPU_WDT 216
+#define SRST_T_NPU_WDT 217
+
+#define SRST_A_RKNN_CBUF 218
+#define SRST_A_RVCORE0 219
+#define SRST_P_NPU_GRF 220
+#define SRST_P_PVTPLL_NPU 221
+#define SRST_NPU_PVTPLL 222
+#define SRST_H_NPU_CM0_BIU 223
+#define SRST_F_NPU_CM0_CORE 224
+#define SRST_T_NPU_CM0_JTAG 225
+#define SRST_A_RKNNTOP_BIU 226
+#define SRST_H_RKNN_CBUF 227
+#define SRST_H_RKNNTOP_BIU 228
+
+#define SRST_H_NVM_BIU 229
+#define SRST_A_NVM_BIU 230
+#define SRST_S_FSPI 231
+#define SRST_H_FSPI 232
+#define SRST_C_EMMC 233
+#define SRST_H_EMMC 234
+#define SRST_A_EMMC 235
+#define SRST_B_EMMC 236
+#define SRST_T_EMMC 237
+
+#define SRST_P_GRF 238
+#define SRST_P_PHP_BIU 239
+#define SRST_A_PHP_BIU 240
+#define SRST_P_PCIE0 241
+#define SRST_PCIE0_POWER_UP 242
+
+#define SRST_A_USB3OTG1 243
+#define SRST_A_MMU0 244
+#define SRST_A_SLV_MMU0 245
+#define SRST_A_MMU1 246
+
+#define SRST_A_SLV_MMU1 247
+#define SRST_P_PCIE1 248
+#define SRST_PCIE1_POWER_UP 249
+
+#define SRST_RXOOB0 250
+#define SRST_RXOOB1 251
+#define SRST_PMALIVE0 252
+#define SRST_PMALIVE1 253
+#define SRST_A_SATA0 254
+#define SRST_A_SATA1 255
+#define SRST_ASIC1 256
+#define SRST_ASIC0 257
+
+#define SRST_P_CSIDPHY1 258
+#define SRST_SCAN_CSIDPHY1 259
+
+#define SRST_P_SDGMAC_GRF 260
+#define SRST_P_SDGMAC_BIU 261
+#define SRST_A_SDGMAC_BIU 262
+#define SRST_H_SDGMAC_BIU 263
+#define SRST_A_GMAC0 264
+#define SRST_A_GMAC1 265
+#define SRST_P_GMAC0 266
+#define SRST_P_GMAC1 267
+#define SRST_H_SDIO 268
+
+#define SRST_H_SDMMC0 269
+#define SRST_S_FSPI1 270
+#define SRST_H_FSPI1 271
+#define SRST_A_DSMC_BIU 272
+#define SRST_A_DSMC 273
+#define SRST_P_DSMC 274
+#define SRST_H_HSGPIO 275
+#define SRST_HSGPIO 276
+#define SRST_A_HSGPIO 277
+
+#define SRST_H_RKVDEC 278
+#define SRST_H_RKVDEC_BIU 279
+#define SRST_A_RKVDEC_BIU 280
+#define SRST_RKVDEC_HEVC_CA 281
+#define SRST_RKVDEC_CORE 282
+
+#define SRST_A_USB_BIU 283
+#define SRST_P_USBUFS_BIU 284
+#define SRST_A_USB3OTG0 285
+#define SRST_A_UFS_BIU 286
+#define SRST_A_MMU2 287
+#define SRST_A_SLV_MMU2 288
+#define SRST_A_UFS_SYS 289
+
+#define SRST_A_UFS 290
+#define SRST_P_USBUFS_GRF 291
+#define SRST_P_UFS_GRF 292
+
+#define SRST_H_VPU_BIU 293
+#define SRST_A_JPEG_BIU 294
+#define SRST_A_RGA_BIU 295
+#define SRST_A_VDPP_BIU 296
+#define SRST_A_EBC_BIU 297
+#define SRST_H_RGA2E_0 298
+#define SRST_A_RGA2E_0 299
+#define SRST_CORE_RGA2E_0 300
+
+#define SRST_A_JPEG 301
+#define SRST_H_JPEG 302
+#define SRST_H_VDPP 303
+#define SRST_A_VDPP 304
+#define SRST_CORE_VDPP 305
+#define SRST_H_RGA2E_1 306
+#define SRST_A_RGA2E_1 307
+#define SRST_CORE_RGA2E_1 308
+#define SRST_H_EBC 309
+#define SRST_A_EBC 310
+#define SRST_D_EBC 311
+
+#define SRST_H_VEPU0_BIU 312
+#define SRST_A_VEPU0_BIU 313
+#define SRST_H_VEPU0 314
+#define SRST_A_VEPU0 315
+#define SRST_VEPU0_CORE 316
+
+#define SRST_A_VI_BIU 317
+#define SRST_H_VI_BIU 318
+#define SRST_P_VI_BIU 319
+#define SRST_D_VICAP 320
+#define SRST_A_VICAP 321
+#define SRST_H_VICAP 322
+#define SRST_ISP0 323
+#define SRST_ISP0_VICAP 324
+
+#define SRST_CORE_VPSS 325
+#define SRST_P_CSI_HOST_0 326
+#define SRST_P_CSI_HOST_1 327
+#define SRST_P_CSI_HOST_2 328
+#define SRST_P_CSI_HOST_3 329
+#define SRST_P_CSI_HOST_4 330
+
+#define SRST_CIFIN 331
+#define SRST_VICAP_I0CLK 332
+#define SRST_VICAP_I1CLK 333
+#define SRST_VICAP_I2CLK 334
+#define SRST_VICAP_I3CLK 335
+#define SRST_VICAP_I4CLK 336
+
+#define SRST_A_VOP_BIU 337
+#define SRST_A_VOP2_BIU 338
+#define SRST_H_VOP_BIU 339
+#define SRST_P_VOP_BIU 340
+#define SRST_H_VOP 341
+#define SRST_A_VOP 342
+#define SRST_D_VP0 343
+
+#define SRST_D_VP1 344
+#define SRST_D_VP2 345
+#define SRST_P_VOP2_BIU 346
+#define SRST_P_VOPGRF 347
+
+#define SRST_H_VO0_BIU 348
+#define SRST_P_VO0_BIU 349
+#define SRST_A_HDCP0_BIU 350
+#define SRST_P_VO0_GRF 351
+#define SRST_A_HDCP0 352
+#define SRST_H_HDCP0 353
+#define SRST_HDCP0 354
+
+#define SRST_P_DSIHOST0 355
+#define SRST_DSIHOST0 356
+#define SRST_P_HDMITX0 357
+#define SRST_HDMITX0_REF 358
+#define SRST_P_EDP0 359
+#define SRST_EDP0_24M 360
+
+#define SRST_M_SAI5_8CH 361
+#define SRST_H_SAI5_8CH 362
+#define SRST_M_SAI6_8CH 363
+#define SRST_H_SAI6_8CH 364
+#define SRST_H_SPDIF_TX2 365
+#define SRST_M_SPDIF_TX2 366
+#define SRST_H_SPDIF_RX2 367
+#define SRST_M_SPDIF_RX2 368
+
+#define SRST_H_SAI8_8CH 369
+#define SRST_M_SAI8_8CH 370
+
+#define SRST_H_VO1_BIU 371
+#define SRST_P_VO1_BIU 372
+#define SRST_M_SAI7_8CH 373
+#define SRST_H_SAI7_8CH 374
+#define SRST_H_SPDIF_TX3 375
+#define SRST_H_SPDIF_TX4 376
+#define SRST_H_SPDIF_TX5 377
+#define SRST_M_SPDIF_TX3 378
+
+#define SRST_DP0 379
+#define SRST_P_VO1_GRF 380
+#define SRST_A_HDCP1_BIU 381
+#define SRST_A_HDCP1 382
+#define SRST_H_HDCP1 383
+#define SRST_HDCP1 384
+#define SRST_H_SAI9_8CH 385
+#define SRST_M_SAI9_8CH 386
+#define SRST_M_SPDIF_TX4 387
+#define SRST_M_SPDIF_TX5 388
+
+#define SRST_GPU 389
+#define SRST_A_S_GPU_BIU 390
+#define SRST_A_M0_GPU_BIU 391
+#define SRST_P_GPU_BIU 392
+#define SRST_P_GPU_GRF 393
+#define SRST_GPU_PVTPLL 394
+#define SRST_P_PVTPLL_GPU 395
+
+#define SRST_A_CENTER_BIU 396
+#define SRST_A_DMA2DDR 397
+#define SRST_A_DDR_SHAREMEM 398
+#define SRST_A_DDR_SHAREMEM_BIU 399
+#define SRST_H_CENTER_BIU 400
+#define SRST_P_CENTER_GRF 401
+#define SRST_P_DMA2DDR 402
+#define SRST_P_SHAREMEM 403
+#define SRST_P_CENTER_BIU 404
+
+#define SRST_LINKSYM_HDMITXPHY0 405
+
+#define SRST_DP0_PIXELCLK 406
+#define SRST_PHY_DP0_TX 407
+#define SRST_DP1_PIXELCLK 408
+#define SRST_DP2_PIXELCLK 409
+
+#define SRST_H_VEPU1_BIU 410
+#define SRST_A_VEPU1_BIU 411
+#define SRST_H_VEPU1 412
+#define SRST_A_VEPU1 413
+#define SRST_VEPU1_CORE 414
+
+#define SRST_P_PHPPHY_CRU 415
+#define SRST_P_APB2ASB_SLV_CHIP_TOP 416
+#define SRST_P_PCIE2_COMBOPHY0 417
+#define SRST_P_PCIE2_COMBOPHY0_GRF 418
+#define SRST_P_PCIE2_COMBOPHY1 419
+#define SRST_P_PCIE2_COMBOPHY1_GRF 420
+
+#define SRST_PCIE0_PIPE_PHY 421
+#define SRST_PCIE1_PIPE_PHY 422
+
+#define SRST_H_CRYPTO_NS 423
+#define SRST_H_TRNG_NS 424
+#define SRST_P_OTPC_NS 425
+#define SRST_OTPC_NS 426
+
+#define SRST_P_HDPTX_GRF 427
+#define SRST_P_HDPTX_APB 428
+#define SRST_P_MIPI_DCPHY 429
+#define SRST_P_DCPHY_GRF 430
+#define SRST_P_BOT0_APB2ASB 431
+#define SRST_P_BOT1_APB2ASB 432
+#define SRST_USB2DEBUG 433
+#define SRST_P_CSIPHY_GRF 434
+#define SRST_P_CSIPHY 435
+#define SRST_P_USBPHY_GRF_0 436
+#define SRST_P_USBPHY_GRF_1 437
+#define SRST_P_USBDP_GRF 438
+#define SRST_P_USBDPPHY 439
+#define SRST_USBDP_COMBO_PHY_INIT 440
+
+#define SRST_USBDP_COMBO_PHY_CMN 441
+#define SRST_USBDP_COMBO_PHY_LANE 442
+#define SRST_USBDP_COMBO_PHY_PCS 443
+#define SRST_M_MIPI_DCPHY 444
+#define SRST_S_MIPI_DCPHY 445
+#define SRST_SCAN_CSIPHY 446
+#define SRST_P_VCCIO6_IOC 447
+#define SRST_OTGPHY_0 448
+#define SRST_OTGPHY_1 449
+#define SRST_HDPTX_INIT 450
+#define SRST_HDPTX_CMN 451
+#define SRST_HDPTX_LANE 452
+#define SRST_HDMITXHDP 453
+
+#define SRST_MPHY_INIT 454
+#define SRST_P_MPHY_GRF 455
+#define SRST_P_VCCIO7_IOC 456
+
+#define SRST_H_PMU1_BIU 457
+#define SRST_P_PMU1_NIU 458
+#define SRST_H_PMU_CM0_BIU 459
+#define SRST_PMU_CM0_CORE 460
+#define SRST_PMU_CM0_JTAG 461
+
+#define SRST_P_CRU_PMU1 462
+#define SRST_P_PMU1_GRF 463
+#define SRST_P_PMU1_IOC 464
+#define SRST_P_PMU1WDT 465
+#define SRST_T_PMU1WDT 466
+#define SRST_P_PMUTIMER 467
+#define SRST_PMUTIMER0 468
+#define SRST_PMUTIMER1 469
+#define SRST_P_PMU1PWM 470
+#define SRST_PMU1PWM 471
+
+#define SRST_P_I2C0 472
+#define SRST_I2C0 473
+#define SRST_S_UART1 474
+#define SRST_P_UART1 475
+#define SRST_PDM0 476
+#define SRST_H_PDM0 477
+
+#define SRST_M_PDM0 478
+#define SRST_H_VAD 479
+
+#define SRST_P_PMU0GRF 480
+#define SRST_P_PMU0IOC 481
+#define SRST_P_GPIO0 482
+#define SRST_DB_GPIO0 483
+
+#endif
diff --git a/include/dt-bindings/soc/qe-fsl,tsa.h b/include/dt-bindings/soc/qe-fsl,tsa.h
new file mode 100644
index 000000000000..3cf3df9c0968
--- /dev/null
+++ b/include/dt-bindings/soc/qe-fsl,tsa.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_SOC_FSL_QE_TSA_H
+#define __DT_BINDINGS_SOC_FSL_QE_TSA_H
+
+#define FSL_QE_TSA_NU 0
+#define FSL_QE_TSA_UCC1 1
+#define FSL_QE_TSA_UCC2 2
+#define FSL_QE_TSA_UCC3 3
+#define FSL_QE_TSA_UCC4 4
+#define FSL_QE_TSA_UCC5 5
+
+#endif
diff --git a/include/keys/dns_resolver-type.h b/include/keys/dns_resolver-type.h
index 218ca22fb056..1b89088a2837 100644
--- a/include/keys/dns_resolver-type.h
+++ b/include/keys/dns_resolver-type.h
@@ -12,8 +12,4 @@
extern struct key_type key_type_dns_resolver;
-extern int request_dns_resolver_key(const char *description,
- const char *callout_info,
- char **data);
-
#endif /* _KEYS_DNS_RESOLVER_TYPE_H */
diff --git a/include/kunit/clk.h b/include/kunit/clk.h
new file mode 100644
index 000000000000..73bc99cefe7b
--- /dev/null
+++ b/include/kunit/clk.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CLK_KUNIT_H
+#define _CLK_KUNIT_H
+
+struct clk;
+struct clk_hw;
+struct device;
+struct device_node;
+struct kunit;
+
+struct clk *
+clk_get_kunit(struct kunit *test, struct device *dev, const char *con_id);
+struct clk *
+of_clk_get_kunit(struct kunit *test, struct device_node *np, int index);
+
+struct clk *
+clk_hw_get_clk_kunit(struct kunit *test, struct clk_hw *hw, const char *con_id);
+struct clk *
+clk_hw_get_clk_prepared_enabled_kunit(struct kunit *test, struct clk_hw *hw,
+ const char *con_id);
+
+int clk_prepare_enable_kunit(struct kunit *test, struct clk *clk);
+
+int clk_hw_register_kunit(struct kunit *test, struct device *dev, struct clk_hw *hw);
+int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node,
+ struct clk_hw *hw);
+
+#endif
diff --git a/include/kunit/of.h b/include/kunit/of.h
new file mode 100644
index 000000000000..48d4e70c9666
--- /dev/null
+++ b/include/kunit/of.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _KUNIT_OF_H
+#define _KUNIT_OF_H
+
+#include <kunit/test.h>
+
+struct device_node;
+
+#ifdef CONFIG_OF
+
+void of_node_put_kunit(struct kunit *test, struct device_node *node);
+
+#else
+
+static inline
+void of_node_put_kunit(struct kunit *test, struct device_node *node)
+{
+ kunit_skip(test, "requires CONFIG_OF");
+}
+
+#endif /* !CONFIG_OF */
+
+#if defined(CONFIG_OF) && defined(CONFIG_OF_OVERLAY) && defined(CONFIG_OF_EARLY_FLATTREE)
+
+int of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt,
+ u32 overlay_fdt_size, int *ovcs_id);
+#else
+
+static inline int
+of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt,
+ u32 overlay_fdt_size, int *ovcs_id)
+{
+ kunit_skip(test, "requires CONFIG_OF and CONFIG_OF_OVERLAY and CONFIG_OF_EARLY_FLATTREE for root node");
+ return -EINVAL;
+}
+
+#endif
+
+/**
+ * __of_overlay_apply_kunit() - Test managed of_overlay_fdt_apply() variant
+ * @test: test context
+ * @overlay_begin: start address of overlay to apply
+ * @overlay_end: end address of overlay to apply
+ *
+ * This is mostly internal API. See of_overlay_apply_kunit() for the wrapper
+ * that makes this easier to use.
+ *
+ * Similar to of_overlay_fdt_apply(), except the overlay is managed by the test
+ * case and is automatically removed with of_overlay_remove() after the test
+ * case concludes.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static inline int __of_overlay_apply_kunit(struct kunit *test,
+ u8 *overlay_begin,
+ const u8 *overlay_end)
+{
+ int unused;
+
+ return of_overlay_fdt_apply_kunit(test, overlay_begin,
+ overlay_end - overlay_begin,
+ &unused);
+}
+
+/**
+ * of_overlay_apply_kunit() - Test managed of_overlay_fdt_apply() for built-in overlays
+ * @test: test context
+ * @overlay_name: name of overlay to apply
+ *
+ * This macro is used to apply a device tree overlay built with the
+ * cmd_dt_S_dtbo rule in scripts/Makefile.lib that has been compiled into the
+ * kernel image or KUnit test module. The overlay is automatically removed when
+ * the test is finished.
+ *
+ * Unit tests that need device tree nodes should compile an overlay file with
+ * @overlay_name\.dtbo.o in their Makefile along with their unit test and then
+ * load the overlay during their test. The @overlay_name matches the filename
+ * of the overlay without the dtbo filename extension. If CONFIG_OF_OVERLAY is
+ * not enabled, the @test will be skipped.
+ *
+ * In the Makefile
+ *
+ * .. code-block:: none
+ *
+ * obj-$(CONFIG_OF_OVERLAY_KUNIT_TEST) += overlay_test.o kunit_overlay_test.dtbo.o
+ *
+ * In the test
+ *
+ * .. code-block:: c
+ *
+ * static void of_overlay_kunit_of_overlay_apply(struct kunit *test)
+ * {
+ * struct device_node *np;
+ *
+ * KUNIT_ASSERT_EQ(test, 0,
+ * of_overlay_apply_kunit(test, kunit_overlay_test));
+ *
+ * np = of_find_node_by_name(NULL, "test-kunit");
+ * KUNIT_EXPECT_NOT_ERR_OR_NULL(test, np);
+ * of_node_put(np);
+ * }
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+#define of_overlay_apply_kunit(test, overlay_name) \
+({ \
+ extern uint8_t __dtbo_##overlay_name##_begin[]; \
+ extern uint8_t __dtbo_##overlay_name##_end[]; \
+ \
+ __of_overlay_apply_kunit((test), \
+ __dtbo_##overlay_name##_begin, \
+ __dtbo_##overlay_name##_end); \
+})
+
+#endif
diff --git a/include/kunit/platform_device.h b/include/kunit/platform_device.h
new file mode 100644
index 000000000000..0fc0999d2420
--- /dev/null
+++ b/include/kunit/platform_device.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _KUNIT_PLATFORM_DRIVER_H
+#define _KUNIT_PLATFORM_DRIVER_H
+
+struct kunit;
+struct platform_device;
+struct platform_driver;
+
+struct platform_device *
+kunit_platform_device_alloc(struct kunit *test, const char *name, int id);
+int kunit_platform_device_add(struct kunit *test, struct platform_device *pdev);
+
+int kunit_platform_device_prepare_wait_for_probe(struct kunit *test,
+ struct platform_device *pdev,
+ struct completion *x);
+
+int kunit_platform_driver_register(struct kunit *test,
+ struct platform_driver *drv);
+
+#endif
diff --git a/include/kunit/visibility.h b/include/kunit/visibility.h
index 0dfe35feeec6..efff77b58dd6 100644
--- a/include/kunit/visibility.h
+++ b/include/kunit/visibility.h
@@ -22,6 +22,7 @@
* EXPORTED_FOR_KUNIT_TESTING namespace only if CONFIG_KUNIT is
* enabled. Must use MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING)
* in test file in order to use symbols.
+ * @symbol: the symbol identifier to export
*/
#define EXPORT_SYMBOL_IF_KUNIT(symbol) EXPORT_SYMBOL_NS(symbol, \
EXPORTED_FOR_KUNIT_TESTING)
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 35d4ca4f6122..e08aeec5d936 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -10,7 +10,7 @@
#include <linux/perf_event.h>
#include <linux/perf/arm_pmuv3.h>
-#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
+#define KVM_ARMV8_PMU_MAX_COUNTERS 32
#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
struct kvm_pmc {
@@ -19,14 +19,14 @@ struct kvm_pmc {
};
struct kvm_pmu_events {
- u32 events_host;
- u32 events_guest;
+ u64 events_host;
+ u64 events_guest;
};
struct kvm_pmu {
struct irq_work overflow_work;
struct kvm_pmu_events events;
- struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
+ struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS];
int irq_num;
bool created;
bool irq_level;
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 0687a442fec7..1655c4c23a78 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -107,6 +107,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_PLATFORM,
ACPI_IRQ_MODEL_GIC,
ACPI_IRQ_MODEL_LPIC,
+ ACPI_IRQ_MODEL_RINTC,
ACPI_IRQ_MODEL_COUNT
};
@@ -386,7 +387,7 @@ extern bool acpi_is_pnp_device(struct acpi_device *);
#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
-typedef void (*wmi_notify_handler) (u32 value, void *context);
+typedef void (*wmi_notify_handler) (union acpi_object *data, void *context);
int wmi_instance_count(const char *guid);
@@ -401,7 +402,6 @@ extern acpi_status wmi_set_block(const char *guid, u8 instance,
extern acpi_status wmi_install_notify_handler(const char *guid,
wmi_notify_handler handler, void *data);
extern acpi_status wmi_remove_notify_handler(const char *guid);
-extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out);
extern bool wmi_has_guid(const char *guid);
extern char *wmi_get_acpi_device_uid(const char *guid);
@@ -1343,6 +1343,8 @@ struct acpi_probe_entry {
kernel_ulong_t driver_data;
};
+void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr);
+
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \
valid, data, fn) \
static const struct acpi_probe_entry __acpi_probe_##name \
@@ -1529,6 +1531,12 @@ void acpi_arm_init(void);
static inline void acpi_arm_init(void) { }
#endif
+#ifdef CONFIG_RISCV
+void acpi_riscv_init(void);
+#else
+static inline void acpi_riscv_init(void) { }
+#endif
+
#ifdef CONFIG_ACPI_PCC
void acpi_init_pcc(void);
#else
diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h
index 50d88bf1498d..0ded9220d379 100644
--- a/include/linux/acpi_pmtmr.h
+++ b/include/linux/acpi_pmtmr.h
@@ -26,6 +26,19 @@ static inline u32 acpi_pm_read_early(void)
return acpi_pm_read_verified() & ACPI_PM_MASK;
}
+/**
+ * Register callback for suspend and resume event
+ *
+ * @cb Callback triggered on suspend and resume
+ * @data Data passed with the callback
+ */
+void acpi_pmtmr_register_suspend_resume_callback(void (*cb)(void *data, bool suspend), void *data);
+
+/**
+ * Remove registered callback for suspend and resume event
+ */
+void acpi_pmtmr_unregister_suspend_resume_callback(void);
+
#else
static inline u32 acpi_pm_read_early(void)
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index 8c61ccd161ba..1f0a9ff23a2c 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -70,7 +70,7 @@ static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
/*
* When percpu variables are required to be defined as weak, static percpu
* variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
- * Instead we will accound all module allocations to a single counter.
+ * Instead we will account all module allocations to a single counter.
*/
DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
@@ -137,7 +137,16 @@ static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
/* Caller should verify both ref and tag to be valid */
static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
{
+ alloc_tag_add_check(ref, tag);
+ if (!ref || !tag)
+ return;
+
ref->ct = &tag->ct;
+}
+
+static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
+{
+ __alloc_tag_ref_set(ref, tag);
/*
* We need in increment the call counter every time we have a new
* allocation or when we split a large allocation into smaller ones.
@@ -147,22 +156,9 @@ static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag
this_cpu_inc(tag->counters->calls);
}
-static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
-{
- alloc_tag_add_check(ref, tag);
- if (!ref || !tag)
- return;
-
- __alloc_tag_ref_set(ref, tag);
-}
-
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
{
- alloc_tag_add_check(ref, tag);
- if (!ref || !tag)
- return;
-
- __alloc_tag_ref_set(ref, tag);
+ alloc_tag_ref_set(ref, tag);
this_cpu_add(tag->counters->bytes, bytes);
}
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 958a55bcc708..dda2f3ea89cb 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -105,7 +105,7 @@ enum amba_vendor {
AMBA_VENDOR_LSI = 0xb6,
};
-extern struct bus_type amba_bustype;
+extern const struct bus_type amba_bustype;
#define to_amba_device(d) container_of_const(d, struct amba_device, dev)
diff --git a/include/linux/args.h b/include/linux/args.h
index 8ff60a54eb7d..2e8e65d975c7 100644
--- a/include/linux/args.h
+++ b/include/linux/args.h
@@ -17,9 +17,9 @@
* that as _n.
*/
-/* This counts to 12. Any more, it will return 13th argument. */
-#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
-#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+/* This counts to 15. Any more, it will return 16th argument. */
+#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _n, X...) _n
+#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
/* Concatenate two parameters, but allow them to be expanded beforehand. */
#define __CONCAT(a, b) a ## b
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 083f85653716..f59099a213d0 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -115,6 +115,70 @@
/* KVM "vendor specific" services */
#define ARM_SMCCC_KVM_FUNC_FEATURES 0
#define ARM_SMCCC_KVM_FUNC_PTP 1
+/* Start of pKVM hypercall range */
+#define ARM_SMCCC_KVM_FUNC_HYP_MEMINFO 2
+#define ARM_SMCCC_KVM_FUNC_MEM_SHARE 3
+#define ARM_SMCCC_KVM_FUNC_MEM_UNSHARE 4
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_5 5
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_6 6
+#define ARM_SMCCC_KVM_FUNC_MMIO_GUARD 7
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_8 8
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_9 9
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_10 10
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_11 11
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_12 12
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_13 13
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_14 14
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_15 15
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_16 16
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_17 17
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_18 18
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_19 19
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_20 20
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_21 21
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_22 22
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_23 23
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_24 24
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_25 25
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_26 26
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_27 27
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_28 28
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_29 29
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_30 30
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_31 31
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_32 32
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_33 33
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_34 34
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_35 35
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_36 36
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_37 37
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_38 38
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_39 39
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_40 40
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_41 41
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_42 42
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_43 43
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_44 44
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_45 45
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_46 46
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_47 47
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_48 48
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_49 49
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_50 50
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_51 51
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_52 52
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_53 53
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_54 54
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_55 55
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_56 56
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_57 57
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_58 58
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_59 59
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_60 60
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_61 61
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_62 62
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_63 63
+/* End of pKVM hypercall range */
#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127
#define ARM_SMCCC_KVM_NUM_FUNCS 128
@@ -137,6 +201,30 @@
ARM_SMCCC_OWNER_VENDOR_HYP, \
ARM_SMCCC_KVM_FUNC_PTP)
+#define ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_HYP_MEMINFO)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MEM_SHARE)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MEM_UNSHARE)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MMIO_GUARD)
+
/* ptp_kvm counter type ID */
#define KVM_PTP_VIRT_COUNTER 0
#define KVM_PTP_PHYS_COUNTER 1
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index 89683f31ae12..a28e2a6a13d0 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -73,6 +73,11 @@
#define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88)
#define FFA_MEM_PERM_SET FFA_SMC_32(0x89)
#define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89)
+#define FFA_CONSOLE_LOG FFA_SMC_32(0x8A)
+#define FFA_PARTITION_INFO_GET_REGS FFA_SMC_64(0x8B)
+#define FFA_EL3_INTR_HANDLE FFA_SMC_32(0x8C)
+#define FFA_MSG_SEND_DIRECT_REQ2 FFA_SMC_64(0x8D)
+#define FFA_MSG_SEND_DIRECT_RESP2 FFA_SMC_64(0x8E)
/*
* For some calls it is necessary to use SMC64 to pass or return 64-bit values.
@@ -265,6 +270,11 @@ struct ffa_indirect_msg_hdr {
u32 size;
};
+/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP}2 which pass data via registers */
+struct ffa_send_direct_data2 {
+ unsigned long data[14]; /* x4-x17 */
+};
+
struct ffa_mem_region_addr_range {
/* The base IPA of the constituent memory region, aligned to 4 kiB */
u64 address;
@@ -426,6 +436,8 @@ struct ffa_msg_ops {
int (*sync_send_receive)(struct ffa_device *dev,
struct ffa_send_direct_data *data);
int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz);
+ int (*sync_send_receive2)(struct ffa_device *dev, const uuid_t *uuid,
+ struct ffa_send_direct_data2 *data);
};
struct ffa_mem_ops {
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 8e177b67e82f..f41395264dca 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -247,6 +247,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
/* used to negotiate communicating link speeds in Mbps */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
#define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_TC_U32 BIT(11)
#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
@@ -1121,6 +1122,7 @@ enum virtchnl_vfr_states {
};
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
+#define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
@@ -1266,13 +1268,22 @@ struct virtchnl_proto_hdrs {
u8 pad[3];
/**
* specify where protocol header start from.
+ * must be 0 when sending a raw packet request.
* 0 - from the outer layer
* 1 - from the first inner layer
* 2 - from the second inner layer
* ....
**/
int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
- struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+ union {
+ struct virtchnl_proto_hdr
+ proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+ struct {
+ u16 pkt_len;
+ u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+ u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+ } raw;
+ };
};
VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 68da8dba5162..dba41b65ae0d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -203,7 +203,7 @@ struct pci_dev;
#define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840
/* PCIE Root Capability Register bits (Host mode only) */
-#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001
+#define BCMA_CORE_PCI_RC_RRS_VISIBILITY 0x0001
struct bcma_drv_pci;
struct bcma_bus;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index a46e2047bea4..faceadb040f9 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -324,8 +324,8 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
void bio_trim(struct bio *bio, sector_t offset, sector_t size);
extern struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs);
-struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
- unsigned *segs, struct bio_set *bs, unsigned max_bytes);
+int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
+ unsigned *segs, unsigned max_bytes);
/**
* bio_next_split - get next @sectors from a bio, splitting if necessary
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index de98049b7ded..676f8f860c47 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -25,9 +25,10 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
-int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
- struct scatterlist *);
+int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
+int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
+ ssize_t bytes, u32 seed);
static inline bool
blk_integrity_queue_supports_integrity(struct request_queue *q)
@@ -96,12 +97,18 @@ static inline int blk_rq_count_integrity_sg(struct request_queue *q,
{
return 0;
}
-static inline int blk_rq_map_integrity_sg(struct request_queue *q,
- struct bio *b,
+static inline int blk_rq_map_integrity_sg(struct request *q,
struct scatterlist *s)
{
return 0;
}
+static inline int blk_rq_integrity_map_user(struct request *rq,
+ void __user *ubuf,
+ ssize_t bytes,
+ u32 seed)
+{
+ return -EINVAL;
+}
static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
{
return NULL;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8d304b1d16b1..4fecf46ef681 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -149,10 +149,7 @@ struct request {
* physical address coalescing is performed.
*/
unsigned short nr_phys_segments;
-
-#ifdef CONFIG_BLK_DEV_INTEGRITY
unsigned short nr_integrity_segments;
-#endif
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct bio_crypt_ctx *crypt_ctx;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 36ed96133217..dce7615c35e7 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -71,6 +71,9 @@ struct block_device {
struct partition_meta_info *bd_meta_info;
int bd_writers;
+#ifdef CONFIG_SECURITY
+ void *bd_security;
+#endif
/*
* keep this out-of-line as it's both big and not needed in the fast
* path
@@ -248,11 +251,9 @@ struct bio {
struct bio_crypt_ctx *bi_crypt_context;
#endif
- union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
- struct bio_integrity_payload *bi_integrity; /* data integrity */
+ struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
- };
unsigned short bi_vcnt; /* how many bio_vec's */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b7664d593486..50c3b959da28 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -968,8 +968,6 @@ static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
/*
* Access functions for manipulating queue properties
*/
-extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
-extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
@@ -1187,7 +1185,8 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
return q->limits.max_segment_size;
}
-static inline unsigned int queue_limits_max_zone_append_sectors(struct queue_limits *l)
+static inline unsigned int
+queue_limits_max_zone_append_sectors(const struct queue_limits *l)
{
unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 3b94ec161e8c..19d8ca8ac960 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -294,6 +294,7 @@ struct bpf_map {
* same prog type, JITed flag and xdp_has_frags flag.
*/
struct {
+ const struct btf_type *attach_func_proto;
spinlock_t lock;
enum bpf_prog_type type;
bool jited;
@@ -694,6 +695,11 @@ enum bpf_type_flag {
/* DYNPTR points to xdp_buff */
DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
+ /* Memory must be aligned on some architectures, used in combination with
+ * MEM_FIXED_SIZE.
+ */
+ MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS),
+
__BPF_TYPE_FLAG_MAX,
__BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
};
@@ -731,8 +737,6 @@ enum bpf_arg_type {
ARG_ANYTHING, /* any (initialized) argument is ok */
ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
- ARG_PTR_TO_INT, /* pointer to int */
- ARG_PTR_TO_LONG, /* pointer to long */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */
@@ -743,7 +747,7 @@ enum bpf_arg_type {
ARG_PTR_TO_STACK, /* pointer to stack */
ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
- ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
+ ARG_KPTR_XCHG_DEST, /* pointer to destination that kptrs are bpf_kptr_xchg'd into */
ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
__BPF_ARG_TYPE_MAX,
@@ -807,6 +811,12 @@ struct bpf_func_proto {
bool gpl_only;
bool pkt_access;
bool might_sleep;
+ /* set to true if helper follows contract for llvm
+ * attribute bpf_fastcall:
+ * - void functions do not scratch r0
+ * - functions taking N arguments scratch only registers r1-rN
+ */
+ bool allow_fastcall;
enum bpf_return_type ret_type;
union {
struct {
@@ -919,6 +929,7 @@ static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
*/
struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
+ bool is_ldsx;
union {
int ctx_field_size;
struct {
@@ -927,6 +938,7 @@ struct bpf_insn_access_aux {
};
};
struct bpf_verifier_log *log; /* for verbose logs */
+ bool is_retval; /* is accessing function return value ? */
};
static inline void
@@ -965,6 +977,8 @@ struct bpf_verifier_ops {
struct bpf_insn_access_aux *info);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
+ int (*gen_epilogue)(struct bpf_insn *insn, const struct bpf_prog *prog,
+ s16 ctx_stack_off);
int (*gen_ld_abs)(const struct bpf_insn *orig,
struct bpf_insn *insn_buf);
u32 (*convert_ctx_access)(enum bpf_access_type type,
@@ -1795,6 +1809,7 @@ struct bpf_struct_ops_common_value {
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
+int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
void *value);
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
@@ -1851,6 +1866,10 @@ static inline void bpf_module_put(const void *data, struct module *owner)
{
module_put(owner);
}
+static inline int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
+{
+ return -ENOTSUPP;
+}
static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
void *key,
void *value)
@@ -2227,7 +2246,16 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
-struct bpf_map *__bpf_map_get(struct fd f);
+
+static inline struct bpf_map *__bpf_map_get(struct fd f)
+{
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (unlikely(fd_file(f)->f_op != &bpf_map_fops))
+ return ERR_PTR(-EINVAL);
+ return fd_file(f)->private_data;
+}
+
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
@@ -3184,7 +3212,9 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
extern const struct bpf_func_proto bpf_get_stack_proto;
+extern const struct bpf_func_proto bpf_get_stack_sleepable_proto;
extern const struct bpf_func_proto bpf_get_task_stack_proto;
+extern const struct bpf_func_proto bpf_get_task_stack_sleepable_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
extern const struct bpf_func_proto bpf_get_stack_proto_pe;
extern const struct bpf_func_proto bpf_sock_map_update_proto;
@@ -3192,6 +3222,7 @@ extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
+extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto;
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
index 1de7ece5d36d..aefcd6564251 100644
--- a/include/linux/bpf_lsm.h
+++ b/include/linux/bpf_lsm.h
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
#include <linux/lsm_hooks.h>
#ifdef CONFIG_BPF_LSM
@@ -45,6 +46,8 @@ void bpf_inode_storage_free(struct inode *inode);
void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
+int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *range);
#else /* !CONFIG_BPF_LSM */
static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
@@ -78,6 +81,11 @@ static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
{
}
+static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *range)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_BPF_LSM */
#endif /* _LINUX_BPF_LSM_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7b776dae36e5..4513372c5bc8 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -23,6 +23,8 @@
* (in the "-8,-16,...,-512" form)
*/
#define TMP_STR_BUF_LEN 320
+/* Patch buffer size */
+#define INSN_BUF_SIZE 32
/* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that
@@ -371,6 +373,10 @@ struct bpf_jmp_history_entry {
u32 prev_idx : 22;
/* special flags, e.g., whether insn is doing register stack spill/load */
u32 flags : 10;
+ /* additional registers that need precision tracking when this
+ * jump is backtracked, vector of six 10-bit records
+ */
+ u64 linked_regs;
};
/* Maximum number of register states that can exist at once */
@@ -572,6 +578,14 @@ struct bpf_insn_aux_data {
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
u8 alu_state; /* used in combination with alu_limit */
+ /* true if STX or LDX instruction is a part of a spill/fill
+ * pattern for a bpf_fastcall call.
+ */
+ u8 fastcall_pattern:1;
+ /* for CALL instructions, a number of spill/fill pairs in the
+ * bpf_fastcall pattern.
+ */
+ u8 fastcall_spills_num:3;
/* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
@@ -641,6 +655,10 @@ struct bpf_subprog_info {
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
u16 stack_extra;
+ /* offsets in range [stack_depth .. fastcall_stack_off)
+ * are used for bpf_fastcall spills and fills.
+ */
+ s16 fastcall_stack_off;
bool has_tail_call: 1;
bool tail_call_reachable: 1;
bool has_ld_abs: 1;
@@ -648,6 +666,8 @@ struct bpf_subprog_info {
bool is_async_cb: 1;
bool is_exception_cb: 1;
bool args_cached: 1;
+ /* true if bpf_fastcall stack region is used by functions that can't be inlined */
+ bool keep_fastcall_stack: 1;
u8 arg_cnt;
struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
@@ -762,6 +782,8 @@ struct bpf_verifier_env {
* e.g., in reg_type_str() to generate reg_type string
*/
char tmp_str_buf[TMP_STR_BUF_LEN];
+ struct bpf_insn insn_buf[INSN_BUF_SIZE];
+ struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
};
static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
@@ -905,6 +927,11 @@ static inline bool type_is_sk_pointer(enum bpf_reg_type type)
type == PTR_TO_XDP_SOCK;
}
+static inline bool type_may_be_null(u32 type)
+{
+ return type & PTR_MAYBE_NULL;
+}
+
static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
{
env->scratched_regs |= 1U << regno;
diff --git a/include/linux/btf.h b/include/linux/btf.h
index cffb43133c68..b8a583194c4a 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -580,6 +580,7 @@ bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type);
bool btf_types_are_same(const struct btf *btf1, u32 id1,
const struct btf *btf2, u32 id2);
+int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx);
#else
static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
u32 type_id)
@@ -654,6 +655,10 @@ static inline bool btf_types_are_same(const struct btf *btf1, u32 id1,
{
return false;
}
+static inline int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx)
+{
+ return -EOPNOTSUPP;
+}
#endif
static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 14acf1bbe0ce..932139c5d46f 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -199,8 +199,7 @@ void folio_set_bh(struct buffer_head *bh, struct folio *folio,
unsigned long offset);
struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
gfp_t gfp);
-struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
- bool retry);
+struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size);
struct buffer_head *create_empty_buffers(struct folio *folio,
unsigned long blocksize, unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
@@ -258,18 +257,18 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
int block_read_full_folio(struct folio *, get_block_t *);
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- struct page **pagep, get_block_t *get_block);
-int __block_write_begin(struct page *page, loff_t pos, unsigned len,
+ struct folio **foliop, get_block_t *get_block);
+int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block);
int block_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
+ loff_t, unsigned len, unsigned copied,
+ struct folio *, void *);
int generic_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
+ loff_t, unsigned len, unsigned copied,
+ struct folio *, void *);
void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
int cont_write_begin(struct file *, struct address_space *, loff_t,
- unsigned, struct page **, void **,
+ unsigned, struct folio **, void **,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
void block_commit_write(struct page *page, unsigned int from, unsigned int to);
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
index 20aa3c2d89f7..014a88c41073 100644
--- a/include/linux/buildid.h
+++ b/include/linux/buildid.h
@@ -7,8 +7,8 @@
#define BUILD_ID_SIZE_MAX 20
struct vm_area_struct;
-int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
- __u32 *size);
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
+int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ae04035b6cbe..47ae4c4d924c 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -172,7 +172,11 @@ struct cgroup_subsys_state {
/* reference count - access via css_[try]get() and css_put() */
struct percpu_ref refcnt;
- /* siblings list anchored at the parent's ->children */
+ /*
+ * siblings list anchored at the parent's ->children
+ *
+ * linkage is protected by cgroup_mutex or RCU
+ */
struct list_head sibling;
struct list_head children;
@@ -210,6 +214,14 @@ struct cgroup_subsys_state {
* fields of the containing structure.
*/
struct cgroup_subsys_state *parent;
+
+ /*
+ * Keep track of total numbers of visible descendant CSSes.
+ * The total number of dying CSSes is tracked in
+ * css->cgroup->nr_dying_subsys[ssid].
+ * Protected by cgroup_mutex.
+ */
+ int nr_descendants;
};
/*
@@ -470,6 +482,12 @@ struct cgroup {
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
+ /*
+ * Keep track of total number of dying CSSes at and below this cgroup.
+ * Protected by cgroup_mutex.
+ */
+ int nr_dying_subsys[CGROUP_SUBSYS_COUNT];
+
struct cgroup_root *root;
/*
@@ -775,6 +793,11 @@ struct cgroup_subsys {
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+struct cgroup_of_peak {
+ unsigned long value;
+ struct list_head list;
+};
+
/**
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
* @tsk: target task
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c60ba0ab1462..f8ef47f8a634 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/nodemask.h>
+#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/cgroupstats.h>
#include <linux/fs.h>
@@ -28,8 +29,6 @@
struct kernel_clone_args;
-#ifdef CONFIG_CGROUPS
-
/*
* All weight knobs on the default hierarchy should use the following min,
* default and max values. The default value is the logarithmic center of
@@ -39,6 +38,8 @@ struct kernel_clone_args;
#define CGROUP_WEIGHT_DFL 100
#define CGROUP_WEIGHT_MAX 10000
+#ifdef CONFIG_CGROUPS
+
enum {
CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
@@ -854,4 +855,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
+struct cgroup_of_peak *of_peak(struct kernfs_open_file *of);
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index d9e613803df1..a3d3e888cf1f 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -98,7 +98,7 @@ const volatile void * __must_check_fn(const volatile void *val)
* DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
*
* CLASS(fdget, f)(fd);
- * if (!f.file)
+ * if (!fd_file(f))
* return -EBADF;
*
* // use 'f' without concern
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 4a537260f655..7e43caabb54b 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -394,6 +394,20 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
NULL, (flags), (fixed_rate), 0, 0, true)
/**
+ * devm_clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define devm_clk_hw_register_fixed_rate_parent_data(dev, name, parent_data, flags, \
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ 0, true)
+/**
* clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
* the clock framework
* @dev: device that is registering this clock
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 0fa56d672532..851a0f2cf42c 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -641,6 +641,32 @@ struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
/**
+ * devm_clk_get_optional_enabled_with_rate - devm_clk_get_optional() +
+ * clk_set_rate() +
+ * clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ * @rate: new clock rate
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_enabled().
+ *
+ * The returned clk (if valid) is prepared and enabled and rate was set.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev,
+ const char *id,
+ unsigned long rate);
+
+/**
* devm_get_clk_from_child - lookup and obtain a managed reference to a
* clock producer from child node.
* @dev: device for clock "consumer"
@@ -982,6 +1008,13 @@ static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
return NULL;
}
+static inline struct clk *
+devm_clk_get_optional_enabled_with_rate(struct device *dev, const char *id,
+ unsigned long rate)
+{
+ return NULL;
+}
+
static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 9db877506ea8..d15b64f51336 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -52,4 +52,20 @@ extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
extern void cma_reserve_pages_on_error(struct cma *cma);
+
+#ifdef CONFIG_CMA
+struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
+bool cma_free_folio(struct cma *cma, const struct folio *folio);
+#else
+static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
+{
+ return false;
+}
+#endif
+
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 2df665fa2964..ec55bcce4146 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -242,6 +242,9 @@ static inline void *offset_to_ptr(const int *off)
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+/* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */
+#define __must_be_cstr(p) BUILD_BUG_ON_ZERO(__annotated(p, nonstring))
+
/*
* This returns a constant expression while determining if an argument is
* a constant expression, most importantly without evaluating the argument.
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index f14c275950b5..1a957ea2f4fe 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -421,6 +421,13 @@ struct ftrace_likely_data {
#define __member_size(p) __builtin_object_size(p, 1)
#endif
+/* Determine if an attribute has been applied to a variable. */
+#if __has_builtin(__builtin_has_attribute)
+#define __annotated(var, attr) __builtin_has_attribute(var, attr)
+#else
+#define __annotated(var, attr) (false)
+#endif
+
/*
* Some versions of gcc do not mark 'asm goto' volatile:
*
diff --git a/include/linux/console.h b/include/linux/console.h
index 31a8f5b85f5d..eba367bf605d 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -16,7 +16,9 @@
#include <linux/atomic.h>
#include <linux/bits.h>
+#include <linux/irq_work.h>
#include <linux/rculist.h>
+#include <linux/rcuwait.h>
#include <linux/types.h>
#include <linux/vesa.h>
@@ -303,7 +305,7 @@ struct nbcon_write_context {
/**
* struct console - The console descriptor structure
* @name: The name of the console driver
- * @write: Write callback to output messages (Optional)
+ * @write: Legacy write callback to output messages (Optional)
* @read: Read callback for console input (Optional)
* @device: The underlying TTY device driver (Optional)
* @unblank: Callback to unblank the console (Optional)
@@ -320,10 +322,14 @@ struct nbcon_write_context {
* @data: Driver private data
* @node: hlist node for the console list
*
- * @write_atomic: Write callback for atomic context
* @nbcon_state: State for nbcon consoles
* @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @nbcon_device_ctxt: Context available for non-printing operations
+ * @nbcon_prev_seq: Seq num the previous nbcon owner was assigned to print
* @pbufs: Pointer to nbcon private buffer
+ * @kthread: Printer kthread for this console
+ * @rcuwait: RCU-safe wait object for @kthread waking
+ * @irq_work: Defer @kthread waking to IRQ work context
*/
struct console {
char name[16];
@@ -345,11 +351,121 @@ struct console {
struct hlist_node node;
/* nbcon console specific members */
- bool (*write_atomic)(struct console *con,
- struct nbcon_write_context *wctxt);
+
+ /**
+ * @write_atomic:
+ *
+ * NBCON callback to write out text in any context. (Optional)
+ *
+ * This callback is called with the console already acquired. However,
+ * a higher priority context is allowed to take it over by default.
+ *
+ * The callback must call nbcon_enter_unsafe() and nbcon_exit_unsafe()
+ * around any code where the takeover is not safe, for example, when
+ * manipulating the serial port registers.
+ *
+ * nbcon_enter_unsafe() will fail if the context has lost the console
+ * ownership in the meantime. In this case, the callback is no longer
+ * allowed to go forward. It must back out immediately and carefully.
+ * The buffer content is also no longer trusted since it no longer
+ * belongs to the context.
+ *
+ * The callback should allow the takeover whenever it is safe. It
+ * increases the chance to see messages when the system is in trouble.
+ * If the driver must reacquire ownership in order to finalize or
+ * revert hardware changes, nbcon_reacquire_nobuf() can be used.
+ * However, on reacquire the buffer content is no longer available. A
+ * reacquire cannot be used to resume printing.
+ *
+ * The callback can be called from any context (including NMI).
+ * Therefore it must avoid usage of any locking and instead rely
+ * on the console ownership for synchronization.
+ */
+ void (*write_atomic)(struct console *con, struct nbcon_write_context *wctxt);
+
+ /**
+ * @write_thread:
+ *
+ * NBCON callback to write out text in task context.
+ *
+ * This callback must be called only in task context with both
+ * device_lock() and the nbcon console acquired with
+ * NBCON_PRIO_NORMAL.
+ *
+ * The same rules for console ownership verification and unsafe
+ * sections handling applies as with write_atomic().
+ *
+ * The console ownership handling is necessary for synchronization
+ * against write_atomic() which is synchronized only via the context.
+ *
+ * The device_lock() provides the primary serialization for operations
+ * on the device. It might be as relaxed (mutex)[*] or as tight
+ * (disabled preemption and interrupts) as needed. It allows
+ * the kthread to operate in the least restrictive mode[**].
+ *
+ * [*] Standalone nbcon_context_try_acquire() is not safe with
+ * the preemption enabled, see nbcon_owner_matches(). But it
+ * can be safe when always called in the preemptive context
+ * under the device_lock().
+ *
+ * [**] The device_lock() makes sure that nbcon_context_try_acquire()
+ * would never need to spin which is important especially with
+ * PREEMPT_RT.
+ */
+ void (*write_thread)(struct console *con, struct nbcon_write_context *wctxt);
+
+ /**
+ * @device_lock:
+ *
+ * NBCON callback to begin synchronization with driver code.
+ *
+ * Console drivers typically must deal with access to the hardware
+ * via user input/output (such as an interactive login shell) and
+ * output of kernel messages via printk() calls. This callback is
+ * called by the printk-subsystem whenever it needs to synchronize
+ * with hardware access by the driver. It should be implemented to
+ * use whatever synchronization mechanism the driver is using for
+ * itself (for example, the port lock for uart serial consoles).
+ *
+ * The callback is always called from task context. It may use any
+ * synchronization method required by the driver.
+ *
+ * IMPORTANT: The callback MUST disable migration. The console driver
+ * may be using a synchronization mechanism that already takes
+ * care of this (such as spinlocks). Otherwise this function must
+ * explicitly call migrate_disable().
+ *
+ * The flags argument is provided as a convenience to the driver. It
+ * will be passed again to device_unlock(). It can be ignored if the
+ * driver does not need it.
+ */
+ void (*device_lock)(struct console *con, unsigned long *flags);
+
+ /**
+ * @device_unlock:
+ *
+ * NBCON callback to finish synchronization with driver code.
+ *
+ * It is the counterpart to device_lock().
+ *
+ * This callback is always called from task context. It must
+ * appropriately re-enable migration (depending on how device_lock()
+ * disabled migration).
+ *
+ * The flags argument is the value of the same variable that was
+ * passed to device_lock().
+ */
+ void (*device_unlock)(struct console *con, unsigned long flags);
+
atomic_t __private nbcon_state;
atomic_long_t __private nbcon_seq;
+ struct nbcon_context __private nbcon_device_ctxt;
+ atomic_long_t __private nbcon_prev_seq;
+
struct printk_buffers *pbufs;
+ struct task_struct *kthread;
+ struct rcuwait rcuwait;
+ struct irq_work irq_work;
};
#ifdef CONFIG_LOCKDEP
@@ -378,28 +494,34 @@ extern void console_list_unlock(void) __releases(console_mutex);
extern struct hlist_head console_list;
/**
- * console_srcu_read_flags - Locklessly read the console flags
+ * console_srcu_read_flags - Locklessly read flags of a possibly registered
+ * console
* @con: struct console pointer of console to read flags from
*
- * This function provides the necessary READ_ONCE() and data_race()
- * notation for locklessly reading the console flags. The READ_ONCE()
- * in this function matches the WRITE_ONCE() when @flags are modified
- * for registered consoles with console_srcu_write_flags().
+ * Locklessly reading @con->flags provides a consistent read value because
+ * there is at most one CPU modifying @con->flags and that CPU is using only
+ * read-modify-write operations to do so.
+ *
+ * Requires console_srcu_read_lock to be held, which implies that @con might
+ * be a registered console. The purpose of holding console_srcu_read_lock is
+ * to guarantee that the console state is valid (CON_SUSPENDED/CON_ENABLED)
+ * and that no exit/cleanup routines will run if the console is currently
+ * undergoing unregistration.
*
- * Only use this function to read console flags when locklessly
- * iterating the console list via srcu.
+ * If the caller is holding the console_list_lock or it is _certain_ that
+ * @con is not and will not become registered, the caller may read
+ * @con->flags directly instead.
*
* Context: Any context.
+ * Return: The current value of the @con->flags field.
*/
static inline short console_srcu_read_flags(const struct console *con)
{
WARN_ON_ONCE(!console_srcu_read_lock_is_held());
/*
- * Locklessly reading console->flags provides a consistent
- * read value because there is at most one CPU modifying
- * console->flags and that CPU is using only read-modify-write
- * operations to do so.
+ * The READ_ONCE() matches the WRITE_ONCE() when @flags are modified
+ * for registered consoles with console_srcu_write_flags().
*/
return data_race(READ_ONCE(con->flags));
}
@@ -477,13 +599,19 @@ static inline bool console_is_registered(const struct console *con)
hlist_for_each_entry(con, &console_list, node)
#ifdef CONFIG_PRINTK
+extern void nbcon_cpu_emergency_enter(void);
+extern void nbcon_cpu_emergency_exit(void);
extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
+extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt);
#else
+static inline void nbcon_cpu_emergency_enter(void) { }
+static inline void nbcon_cpu_emergency_exit(void) { }
static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt) { }
#endif
extern int console_set_on_cmdline;
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 8a78fabeafc3..af9fe87a0922 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -26,26 +26,26 @@ extern void user_exit_callable(void);
static inline void user_enter(void)
{
if (context_tracking_enabled())
- ct_user_enter(CONTEXT_USER);
+ ct_user_enter(CT_STATE_USER);
}
static inline void user_exit(void)
{
if (context_tracking_enabled())
- ct_user_exit(CONTEXT_USER);
+ ct_user_exit(CT_STATE_USER);
}
/* Called with interrupts disabled. */
static __always_inline void user_enter_irqoff(void)
{
if (context_tracking_enabled())
- __ct_user_enter(CONTEXT_USER);
+ __ct_user_enter(CT_STATE_USER);
}
static __always_inline void user_exit_irqoff(void)
{
if (context_tracking_enabled())
- __ct_user_exit(CONTEXT_USER);
+ __ct_user_exit(CT_STATE_USER);
}
static inline enum ctx_state exception_enter(void)
@@ -57,7 +57,7 @@ static inline enum ctx_state exception_enter(void)
return 0;
prev_ctx = __ct_state();
- if (prev_ctx != CONTEXT_KERNEL)
+ if (prev_ctx != CT_STATE_KERNEL)
ct_user_exit(prev_ctx);
return prev_ctx;
@@ -67,7 +67,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
{
if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
context_tracking_enabled()) {
- if (prev_ctx != CONTEXT_KERNEL)
+ if (prev_ctx != CT_STATE_KERNEL)
ct_user_enter(prev_ctx);
}
}
@@ -75,7 +75,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
static __always_inline bool context_tracking_guest_enter(void)
{
if (context_tracking_enabled())
- __ct_user_enter(CONTEXT_GUEST);
+ __ct_user_enter(CT_STATE_GUEST);
return context_tracking_enabled_this_cpu();
}
@@ -83,7 +83,7 @@ static __always_inline bool context_tracking_guest_enter(void)
static __always_inline bool context_tracking_guest_exit(void)
{
if (context_tracking_enabled())
- __ct_user_exit(CONTEXT_GUEST);
+ __ct_user_exit(CT_STATE_GUEST);
return context_tracking_enabled_this_cpu();
}
@@ -115,13 +115,17 @@ extern void ct_idle_enter(void);
extern void ct_idle_exit(void);
/*
- * Is the current CPU in an extended quiescent state?
+ * Is RCU watching the current CPU (IOW, it is not in an extended quiescent state)?
+ *
+ * Note that this returns the actual boolean data (watching / not watching),
+ * whereas ct_rcu_watching() returns the RCU_WATCHING subvariable of
+ * context_tracking.state.
*
* No ordering, as we are sampling CPU-local information.
*/
-static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
+static __always_inline bool rcu_is_watching_curr_cpu(void)
{
- return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
+ return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING;
}
/*
@@ -142,9 +146,9 @@ static __always_inline bool warn_rcu_enter(void)
* lots of the actual reporting also relies on RCU.
*/
preempt_disable_notrace();
- if (rcu_dynticks_curr_cpu_in_eqs()) {
+ if (!rcu_is_watching_curr_cpu()) {
ret = true;
- ct_state_inc(RCU_DYNTICKS_IDX);
+ ct_state_inc(CT_RCU_WATCHING);
}
return ret;
@@ -153,7 +157,7 @@ static __always_inline bool warn_rcu_enter(void)
static __always_inline void warn_rcu_exit(bool rcu)
{
if (rcu)
- ct_state_inc(RCU_DYNTICKS_IDX);
+ ct_state_inc(CT_RCU_WATCHING);
preempt_enable_notrace();
}
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index bbff5f7f8803..7b8433d5a8ef 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -7,22 +7,22 @@
#include <linux/context_tracking_irq.h>
/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
-#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
+#define CT_NESTING_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
enum ctx_state {
- CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
- CONTEXT_KERNEL = 0,
- CONTEXT_IDLE = 1,
- CONTEXT_USER = 2,
- CONTEXT_GUEST = 3,
- CONTEXT_MAX = 4,
+ CT_STATE_DISABLED = -1, /* returned by ct_state() if unknown */
+ CT_STATE_KERNEL = 0,
+ CT_STATE_IDLE = 1,
+ CT_STATE_USER = 2,
+ CT_STATE_GUEST = 3,
+ CT_STATE_MAX = 4,
};
-/* Even value for idle, else odd. */
-#define RCU_DYNTICKS_IDX CONTEXT_MAX
+/* Odd value for watching, else even. */
+#define CT_RCU_WATCHING CT_STATE_MAX
-#define CT_STATE_MASK (CONTEXT_MAX - 1)
-#define CT_DYNTICKS_MASK (~CT_STATE_MASK)
+#define CT_STATE_MASK (CT_STATE_MAX - 1)
+#define CT_RCU_WATCHING_MASK (~CT_STATE_MASK)
struct context_tracking {
#ifdef CONFIG_CONTEXT_TRACKING_USER
@@ -39,8 +39,8 @@ struct context_tracking {
atomic_t state;
#endif
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
- long dynticks_nesting; /* Track process nesting level. */
- long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
+ long nesting; /* Track process nesting level. */
+ long nmi_nesting; /* Track irq/NMI nesting level. */
#endif
};
@@ -56,47 +56,47 @@ static __always_inline int __ct_state(void)
#endif
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
-static __always_inline int ct_dynticks(void)
+static __always_inline int ct_rcu_watching(void)
{
- return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK;
+ return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING_MASK;
}
-static __always_inline int ct_dynticks_cpu(int cpu)
+static __always_inline int ct_rcu_watching_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return atomic_read(&ct->state) & CT_DYNTICKS_MASK;
+ return atomic_read(&ct->state) & CT_RCU_WATCHING_MASK;
}
-static __always_inline int ct_dynticks_cpu_acquire(int cpu)
+static __always_inline int ct_rcu_watching_cpu_acquire(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK;
+ return atomic_read_acquire(&ct->state) & CT_RCU_WATCHING_MASK;
}
-static __always_inline long ct_dynticks_nesting(void)
+static __always_inline long ct_nesting(void)
{
- return __this_cpu_read(context_tracking.dynticks_nesting);
+ return __this_cpu_read(context_tracking.nesting);
}
-static __always_inline long ct_dynticks_nesting_cpu(int cpu)
+static __always_inline long ct_nesting_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return ct->dynticks_nesting;
+ return ct->nesting;
}
-static __always_inline long ct_dynticks_nmi_nesting(void)
+static __always_inline long ct_nmi_nesting(void)
{
- return __this_cpu_read(context_tracking.dynticks_nmi_nesting);
+ return __this_cpu_read(context_tracking.nmi_nesting);
}
-static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu)
+static __always_inline long ct_nmi_nesting_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return ct->dynticks_nmi_nesting;
+ return ct->nmi_nesting;
}
#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
@@ -113,7 +113,7 @@ static __always_inline bool context_tracking_enabled_cpu(int cpu)
return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
}
-static inline bool context_tracking_enabled_this_cpu(void)
+static __always_inline bool context_tracking_enabled_this_cpu(void)
{
return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
}
@@ -123,14 +123,14 @@ static inline bool context_tracking_enabled_this_cpu(void)
*
* Returns the current cpu's context tracking state if context tracking
* is enabled. If context tracking is disabled, returns
- * CONTEXT_DISABLED. This should be used primarily for debugging.
+ * CT_STATE_DISABLED. This should be used primarily for debugging.
*/
static __always_inline int ct_state(void)
{
int ret;
if (!context_tracking_enabled())
- return CONTEXT_DISABLED;
+ return CT_STATE_DISABLED;
preempt_disable();
ret = __ct_state();
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 0904ba010341..edeb8532ce0f 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -42,9 +42,35 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
int dump_user_range(struct coredump_params *cprm, unsigned long start,
unsigned long len);
-extern void do_coredump(const kernel_siginfo_t *siginfo);
+extern int do_coredump(const kernel_siginfo_t *siginfo);
+
+/*
+ * Logging for the coredump code, ratelimited.
+ * The TGID and comm fields are added to the message.
+ */
+
+#define __COREDUMP_PRINTK(Level, Format, ...) \
+ do { \
+ char comm[TASK_COMM_LEN]; \
+ \
+ get_task_comm(comm, current); \
+ printk_ratelimited(Level "coredump: %d(%*pE): " Format "\n", \
+ task_tgid_vnr(current), (int)strlen(comm), comm, ##__VA_ARGS__); \
+ } while (0) \
+
+#define coredump_report(fmt, ...) __COREDUMP_PRINTK(KERN_INFO, fmt, ##__VA_ARGS__)
+#define coredump_report_failure(fmt, ...) __COREDUMP_PRINTK(KERN_WARNING, fmt, ##__VA_ARGS__)
+
#else
-static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
+static inline int do_coredump(const kernel_siginfo_t *siginfo)
+{
+ /* Coredump support is not available, can't fail. */
+ return 0;
+}
+
+#define coredump_report(...)
+#define coredump_report_failure(...)
+
#endif
#if defined(CONFIG_COREDUMP) && defined(CONFIG_SYSCTL)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index d4d2f4d1d7cb..e0e19d9c1323 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -577,12 +577,6 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
-/*
- * The polling frequency depends on the capability of the processor. Default
- * polling frequency is 1000 times the transition latency of the processor.
- */
-#define LATENCY_MULTIPLIER (1000)
-
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
int (*init)(struct cpufreq_policy *policy);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 9316c39260e0..2361ed4d2b15 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -144,7 +144,8 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
- CPUHP_AP_IRQ_LOONGARCH_STARTING,
+ CPUHP_AP_IRQ_EIOINTC_STARTING,
+ CPUHP_AP_IRQ_AVECINTC_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
@@ -152,7 +153,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
@@ -209,7 +209,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
CPUHP_AP_PERF_X86_RAPL_ONLINE,
- CPUHP_AP_PERF_X86_CSTATE_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index de4cf0ee96f7..835e7b793f6a 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -99,6 +99,7 @@ static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2);
+#ifdef CONFIG_CPUSETS_V1
#define cpuset_memory_pressure_bump() \
do { \
if (cpuset_memory_pressure_enabled) \
@@ -106,6 +107,9 @@ extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
} while (0)
extern int cpuset_memory_pressure_enabled;
extern void __cpuset_memory_pressure_bump(void);
+#else
+static inline void cpuset_memory_pressure_bump(void) { }
+#endif
extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);
@@ -113,7 +117,6 @@ extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
extern int cpuset_mem_spread_node(void);
-extern int cpuset_slab_spread_node(void);
static inline int cpuset_do_page_mem_spread(void)
{
@@ -246,11 +249,6 @@ static inline int cpuset_mem_spread_node(void)
return 0;
}
-static inline int cpuset_slab_spread_node(void)
-{
- return 0;
-}
-
static inline int cpuset_do_page_mem_spread(void)
{
return 0;
diff --git a/include/linux/damon.h b/include/linux/damon.h
index 27c546bfc6d4..a67f2c4940e9 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -233,7 +233,6 @@ struct damos_quota {
unsigned long charge_addr_from;
/* For prioritization */
- unsigned long histogram[DAMOS_MAX_SCORE + 1];
unsigned int min_score;
/* For feedback loop */
@@ -630,6 +629,8 @@ struct damon_ctx {
unsigned long next_ops_update_sis;
/* for waiting until the execution of the kdamond_fn is started */
struct completion kdamond_started;
+ /* for scheme quotas prioritization */
+ unsigned long *regions_score_histogram;
/* public: */
struct task_struct *kdamond;
diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h
index f764e2a7201e..3dd2658a9dab 100644
--- a/include/linux/decompress/unxz.h
+++ b/include/linux/decompress/unxz.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef DECOMPRESS_UNXZ_H
diff --git a/include/linux/device.h b/include/linux/device.h
index 34eb20f5966f..b4bde8d22697 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -707,6 +707,8 @@ struct device_physical_location {
* for dma allocations. This flag is managed by the dma ops
* instance from ->dma_supported.
* @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
+ * @dma_iommu: Device is using default IOMMU implementation for DMA and
+ * doesn't rely on dma_ops structure.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -748,7 +750,7 @@ struct device {
struct dev_pin_info *pins;
#endif
struct dev_msi_info msi;
-#ifdef CONFIG_DMA_OPS
+#ifdef CONFIG_ARCH_HAS_DMA_OPS
const struct dma_map_ops *dma_ops;
#endif
u64 *dma_mask; /* dma mask (if dma'able device) */
@@ -822,6 +824,9 @@ struct device {
#ifdef CONFIG_DMA_NEED_SYNC
bool dma_skip_sync:1;
#endif
+#ifdef CONFIG_IOMMU_DMA
+ bool dma_iommu:1;
+#endif
};
/**
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index edbe13d00776..d7e30d4f7503 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -12,7 +12,7 @@
#include <linux/mem_encrypt.h>
#include <linux/swiotlb.h>
-extern unsigned int zone_dma_bits;
+extern u64 zone_dma_limit;
/*
* Record the mapping of CPU physical to DMA addresses for a given region.
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 29c5650c1038..079b3dec0a16 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -79,6 +79,12 @@ to_dma_fence_array(struct dma_fence *fence)
for (index = 0, fence = dma_fence_array_first(head); fence; \
++(index), fence = dma_fence_array_next(head, index))
+struct dma_fence_array *dma_fence_array_alloc(int num_fences);
+void dma_fence_array_init(struct dma_fence_array *array,
+ int num_fences, struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any);
+
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 064bad725061..27d15f60950a 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -9,14 +9,13 @@
#ifndef _DMA_HEAPS_H
#define _DMA_HEAPS_H
-#include <linux/cdev.h>
#include <linux/types.h>
struct dma_heap;
/**
* struct dma_heap_ops - ops to operate on a given heap
- * @allocate: allocate dmabuf and return struct dma_buf ptr
+ * @allocate: allocate dmabuf and return struct dma_buf ptr
*
* allocate returns dmabuf on success, ERR_PTR(-errno) on error.
*/
@@ -41,28 +40,10 @@ struct dma_heap_export_info {
void *priv;
};
-/**
- * dma_heap_get_drvdata() - get per-heap driver data
- * @heap: DMA-Heap to retrieve private data for
- *
- * Returns:
- * The per-heap data for the heap.
- */
void *dma_heap_get_drvdata(struct dma_heap *heap);
-/**
- * dma_heap_get_name() - get heap name
- * @heap: DMA-Heap to retrieve private data for
- *
- * Returns:
- * The char* for the heap name.
- */
const char *dma_heap_get_name(struct dma_heap *heap);
-/**
- * dma_heap_add - adds a heap to dmabuf heaps
- * @exp_info: information needed to register this heap
- */
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 02a1c825896b..b7773201414c 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -13,20 +13,7 @@
struct cma;
struct iommu_ops;
-/*
- * Values for struct dma_map_ops.flags:
- *
- * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
- * handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
- * DMA_F_CAN_SKIP_SYNC: DMA sync operations can be skipped if the device is
- * coherent and it's not an SWIOTLB buffer.
- */
-#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0)
-#define DMA_F_CAN_SKIP_SYNC (1 << 1)
-
struct dma_map_ops {
- unsigned int flags;
-
void *(*alloc)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs);
@@ -37,11 +24,6 @@ struct dma_map_ops {
gfp_t gfp);
void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
dma_addr_t dma_handle, enum dma_data_direction dir);
- struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
- enum dma_data_direction dir, gfp_t gfp,
- unsigned long attrs);
- void (*free_noncontiguous)(struct device *dev, size_t size,
- struct sg_table *sgt, enum dma_data_direction dir);
int (*mmap)(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t, unsigned long attrs);
@@ -88,7 +70,7 @@ struct dma_map_ops {
unsigned long (*get_merge_boundary)(struct device *dev);
};
-#ifdef CONFIG_DMA_OPS
+#ifdef CONFIG_ARCH_HAS_DMA_OPS
#include <asm/dma-mapping.h>
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
@@ -103,7 +85,7 @@ static inline void set_dma_ops(struct device *dev,
{
dev->dma_ops = dma_ops;
}
-#else /* CONFIG_DMA_OPS */
+#else /* CONFIG_ARCH_HAS_DMA_OPS */
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return NULL;
@@ -112,7 +94,7 @@ static inline void set_dma_ops(struct device *dev,
const struct dma_map_ops *dma_ops)
{
}
-#endif /* CONFIG_DMA_OPS */
+#endif /* CONFIG_ARCH_HAS_DMA_OPS */
#ifdef CONFIG_DMA_CMA
extern struct cma *dma_contiguous_default_area;
@@ -219,20 +201,6 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
}
#endif /* CONFIG_DMA_GLOBAL_POOL */
-/*
- * This is the actual return value from the ->alloc_noncontiguous method.
- * The users of the DMA API should only care about the sg_table, but to make
- * the DMA-API internal vmaping and freeing easier we stash away the page
- * array as well (except for the fallback case). This can go away any time,
- * e.g. when a vmap-variant that takes a scatterlist comes along.
- */
-struct dma_sgt_handle {
- struct sg_table sgt;
- struct page **pages;
-};
-#define sgt_handle(sgt) \
- container_of((sgt), struct dma_sgt_handle, sgt)
-
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f693aafe221f..1524da363734 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -524,13 +524,11 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K;
}
-static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
+static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
{
- if (dev->dma_parms) {
- dev->dma_parms->max_segment_size = size;
- return 0;
- }
- return -EIO;
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return;
+ dev->dma_parms->max_segment_size = size;
}
static inline unsigned long dma_get_seg_boundary(struct device *dev)
@@ -559,13 +557,11 @@ static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
return (dma_get_seg_boundary(dev) >> page_shift) + 1;
}
-static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
+static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask)
{
- if (dev->dma_parms) {
- dev->dma_parms->segment_boundary_mask = mask;
- return 0;
- }
- return -EIO;
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return;
+ dev->dma_parms->segment_boundary_mask = mask;
}
static inline unsigned int dma_get_min_align_mask(struct device *dev)
@@ -575,13 +571,12 @@ static inline unsigned int dma_get_min_align_mask(struct device *dev)
return 0;
}
-static inline int dma_set_min_align_mask(struct device *dev,
+static inline void dma_set_min_align_mask(struct device *dev,
unsigned int min_align_mask)
{
if (WARN_ON_ONCE(!dev->dma_parms))
- return -EIO;
+ return;
dev->dma_parms->min_align_mask = min_align_mask;
- return 0;
}
#ifndef dma_get_cache_alignment
diff --git a/include/linux/dma/ipu-dma.h b/include/linux/dma/ipu-dma.h
deleted file mode 100644
index 6969391580d2..000000000000
--- a/include/linux/dma/ipu-dma.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Copyright (C) 2005-2007 Freescale Semiconductor, Inc.
- */
-
-#ifndef __LINUX_DMA_IPU_DMA_H
-#define __LINUX_DMA_IPU_DMA_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-
-/* IPU DMA Controller channel definitions. */
-enum ipu_channel {
- IDMAC_IC_0 = 0, /* IC (encoding task) to memory */
- IDMAC_IC_1 = 1, /* IC (viewfinder task) to memory */
- IDMAC_ADC_0 = 1,
- IDMAC_IC_2 = 2,
- IDMAC_ADC_1 = 2,
- IDMAC_IC_3 = 3,
- IDMAC_IC_4 = 4,
- IDMAC_IC_5 = 5,
- IDMAC_IC_6 = 6,
- IDMAC_IC_7 = 7, /* IC (sensor data) to memory */
- IDMAC_IC_8 = 8,
- IDMAC_IC_9 = 9,
- IDMAC_IC_10 = 10,
- IDMAC_IC_11 = 11,
- IDMAC_IC_12 = 12,
- IDMAC_IC_13 = 13,
- IDMAC_SDC_0 = 14, /* Background synchronous display data */
- IDMAC_SDC_1 = 15, /* Foreground data (overlay) */
- IDMAC_SDC_2 = 16,
- IDMAC_SDC_3 = 17,
- IDMAC_ADC_2 = 18,
- IDMAC_ADC_3 = 19,
- IDMAC_ADC_4 = 20,
- IDMAC_ADC_5 = 21,
- IDMAC_ADC_6 = 22,
- IDMAC_ADC_7 = 23,
- IDMAC_PF_0 = 24,
- IDMAC_PF_1 = 25,
- IDMAC_PF_2 = 26,
- IDMAC_PF_3 = 27,
- IDMAC_PF_4 = 28,
- IDMAC_PF_5 = 29,
- IDMAC_PF_6 = 30,
- IDMAC_PF_7 = 31,
-};
-
-/* Order significant! */
-enum ipu_channel_status {
- IPU_CHANNEL_FREE,
- IPU_CHANNEL_INITIALIZED,
- IPU_CHANNEL_READY,
- IPU_CHANNEL_ENABLED,
-};
-
-#define IPU_CHANNELS_NUM 32
-
-enum pixel_fmt {
- /* 1 byte */
- IPU_PIX_FMT_GENERIC,
- IPU_PIX_FMT_RGB332,
- IPU_PIX_FMT_YUV420P,
- IPU_PIX_FMT_YUV422P,
- IPU_PIX_FMT_YUV420P2,
- IPU_PIX_FMT_YVU422P,
- /* 2 bytes */
- IPU_PIX_FMT_RGB565,
- IPU_PIX_FMT_RGB666,
- IPU_PIX_FMT_BGR666,
- IPU_PIX_FMT_YUYV,
- IPU_PIX_FMT_UYVY,
- /* 3 bytes */
- IPU_PIX_FMT_RGB24,
- IPU_PIX_FMT_BGR24,
- /* 4 bytes */
- IPU_PIX_FMT_GENERIC_32,
- IPU_PIX_FMT_RGB32,
- IPU_PIX_FMT_BGR32,
- IPU_PIX_FMT_ABGR32,
- IPU_PIX_FMT_BGRA32,
- IPU_PIX_FMT_RGBA32,
-};
-
-enum ipu_color_space {
- IPU_COLORSPACE_RGB,
- IPU_COLORSPACE_YCBCR,
- IPU_COLORSPACE_YUV
-};
-
-/*
- * Enumeration of IPU rotation modes
- */
-enum ipu_rotate_mode {
- /* Note the enum values correspond to BAM value */
- IPU_ROTATE_NONE = 0,
- IPU_ROTATE_VERT_FLIP = 1,
- IPU_ROTATE_HORIZ_FLIP = 2,
- IPU_ROTATE_180 = 3,
- IPU_ROTATE_90_RIGHT = 4,
- IPU_ROTATE_90_RIGHT_VFLIP = 5,
- IPU_ROTATE_90_RIGHT_HFLIP = 6,
- IPU_ROTATE_90_LEFT = 7,
-};
-
-/*
- * Enumeration of DI ports for ADC.
- */
-enum display_port {
- DISP0,
- DISP1,
- DISP2,
- DISP3
-};
-
-struct idmac_video_param {
- unsigned short in_width;
- unsigned short in_height;
- uint32_t in_pixel_fmt;
- unsigned short out_width;
- unsigned short out_height;
- uint32_t out_pixel_fmt;
- unsigned short out_stride;
- bool graphics_combine_en;
- bool global_alpha_en;
- bool key_color_en;
- enum display_port disp;
- unsigned short out_left;
- unsigned short out_top;
-};
-
-/*
- * Union of initialization parameters for a logical channel. So far only video
- * parameters are used.
- */
-union ipu_channel_param {
- struct idmac_video_param video;
-};
-
-struct idmac_tx_desc {
- struct dma_async_tx_descriptor txd;
- struct scatterlist *sg; /* scatterlist for this */
- unsigned int sg_len; /* tx-descriptor. */
- struct list_head list;
-};
-
-struct idmac_channel {
- struct dma_chan dma_chan;
- dma_cookie_t completed; /* last completed cookie */
- union ipu_channel_param params;
- enum ipu_channel link; /* input channel, linked to the output */
- enum ipu_channel_status status;
- void *client; /* Only one client per channel */
- unsigned int n_tx_desc;
- struct idmac_tx_desc *desc; /* allocated tx-descriptors */
- struct scatterlist *sg[2]; /* scatterlist elements in buffer-0 and -1 */
- struct list_head free_list; /* free tx-descriptors */
- struct list_head queue; /* queued tx-descriptors */
- spinlock_t lock; /* protects sg[0,1], queue */
- struct mutex chan_mutex; /* protects status, cookie, free_list */
- bool sec_chan_en;
- int active_buffer;
- unsigned int eof_irq;
- char eof_name[16]; /* EOF IRQ name for request_irq() */
-};
-
-#define to_tx_desc(tx) container_of(tx, struct idmac_tx_desc, txd)
-#define to_idmac_chan(c) container_of(c, struct idmac_channel, dma_chan)
-
-#endif /* __LINUX_DMA_IPU_DMA_H */
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
index 1e491c5dcac2..2dea217629d0 100644
--- a/include/linux/dma/k3-udma-glue.h
+++ b/include/linux/dma/k3-udma-glue.h
@@ -136,8 +136,6 @@ u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn);
int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num);
-void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn,
- u32 flow_num);
void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, void *data,
void (*cleanup)(void *data, dma_addr_t desc_dma),
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
index d275736230b3..81f7b623d0ba 100644
--- a/include/linux/dpll.h
+++ b/include/linux/dpll.h
@@ -15,6 +15,7 @@
struct dpll_device;
struct dpll_pin;
+struct dpll_pin_esync;
struct dpll_device_ops {
int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv,
@@ -83,6 +84,13 @@ struct dpll_pin_ops {
int (*ffo_get)(const struct dpll_pin *pin, void *pin_priv,
const struct dpll_device *dpll, void *dpll_priv,
s64 *ffo, struct netlink_ext_ack *extack);
+ int (*esync_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack);
+ int (*esync_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack);
};
struct dpll_pin_frequency {
@@ -111,6 +119,13 @@ struct dpll_pin_phase_adjust_range {
s32 max;
};
+struct dpll_pin_esync {
+ u64 freq;
+ const struct dpll_pin_frequency *range;
+ u8 range_num;
+ u8 pulse;
+};
+
struct dpll_pin_properties {
const char *board_label;
const char *panel_label;
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index b0fb775a600d..1e50cdb83ae5 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -108,7 +108,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs)
arch_enter_from_user_mode(regs);
lockdep_hardirqs_off(CALLER_ADDR0);
- CT_WARN_ON(__ct_state() != CONTEXT_USER);
+ CT_WARN_ON(__ct_state() != CT_STATE_USER);
user_exit_irqoff();
instrumentation_begin();
diff --git a/include/linux/err.h b/include/linux/err.h
index b5d9bb2a2349..a4dacd745fcf 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -41,6 +41,9 @@ static inline void * __must_check ERR_PTR(long error)
return (void *) error;
}
+/* Return the pointer in the percpu address space. */
+#define ERR_PTR_PCPU(error) ((void __percpu *)(unsigned long)ERR_PTR(error))
+
/**
* PTR_ERR - Extract the error code from an error pointer.
* @ptr: An error pointer.
@@ -51,6 +54,9 @@ static inline long __must_check PTR_ERR(__force const void *ptr)
return (long) ptr;
}
+/* Read an error pointer from the percpu address space. */
+#define PTR_ERR_PCPU(ptr) (PTR_ERR((const void *)(__force const unsigned long)(ptr)))
+
/**
* IS_ERR - Detect an error pointer.
* @ptr: The pointer to check.
@@ -61,6 +67,9 @@ static inline bool __must_check IS_ERR(__force const void *ptr)
return IS_ERR_VALUE((unsigned long)ptr);
}
+/* Read an error pointer from the percpu address space. */
+#define IS_ERR_PCPU(ptr) (IS_ERR((const void *)(__force const unsigned long)(ptr)))
+
/**
* IS_ERR_OR_NULL - Detect an error pointer or a null pointer.
* @ptr: The pointer to check.
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 0ed47d00549b..30114c25ad12 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -645,7 +645,7 @@ static inline struct ethhdr *eth_skb_pull_mac(struct sk_buff *skb)
}
/**
- * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
+ * eth_skb_pad - Pad buffer to minimum number of octets for Ethernet frame
* @skb: Buffer to pad
*
* An Ethernet frame should have a minimum size of 60 bytes. This function
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 989c94eddb2b..12f6dc567598 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -727,9 +727,13 @@ struct kernel_ethtool_ts_info {
* @cap_link_lanes_supported: indicates if the driver supports lanes
* parameter.
* @cap_rss_ctx_supported: indicates if the driver supports RSS
- * contexts.
+ * contexts via legacy API, drivers implementing @create_rxfh_context
+ * do not have to set this bit.
* @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor
* RSS.
+ * @rxfh_per_ctx_key: device supports setting different RSS key for each
+ * additional context. Netlink API should report hfunc, key, and input_xfrm
+ * for every context, not just context 0.
* @rxfh_indir_space: max size of RSS indirection tables, if indirection table
* size as returned by @get_rxfh_indir_size may change during lifetime
* of the device. Leave as 0 if the table size is constant.
@@ -951,6 +955,7 @@ struct ethtool_ops {
u32 cap_link_lanes_supported:1;
u32 cap_rss_ctx_supported:1;
u32 cap_rss_sym_xor_supported:1;
+ u32 rxfh_per_ctx_key:1;
u32 rxfh_indir_space;
u16 rxfh_key_space;
u16 rxfh_priv_size;
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
index fae0dfb9a9c8..aba91335273a 100644
--- a/include/linux/ethtool_netlink.h
+++ b/include/linux/ethtool_netlink.h
@@ -23,8 +23,10 @@ struct phy_device;
int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd);
void ethnl_cable_test_free(struct phy_device *phydev);
void ethnl_cable_test_finished(struct phy_device *phydev);
-int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, u8 result);
-int ethnl_cable_test_fault_length(struct phy_device *phydev, u8 pair, u32 cm);
+int ethnl_cable_test_result_with_src(struct phy_device *phydev, u8 pair,
+ u8 result, u32 src);
+int ethnl_cable_test_fault_length_with_src(struct phy_device *phydev, u8 pair,
+ u32 cm, u32 src);
int ethnl_cable_test_amplitude(struct phy_device *phydev, u8 pair, s16 mV);
int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV);
int ethnl_cable_test_step(struct phy_device *phydev, u32 first, u32 last,
@@ -54,14 +56,14 @@ static inline void ethnl_cable_test_free(struct phy_device *phydev)
static inline void ethnl_cable_test_finished(struct phy_device *phydev)
{
}
-static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair,
- u8 result)
+static inline int ethnl_cable_test_result_with_src(struct phy_device *phydev,
+ u8 pair, u8 result, u32 src)
{
return -EOPNOTSUPP;
}
-static inline int ethnl_cable_test_fault_length(struct phy_device *phydev,
- u8 pair, u32 cm)
+static inline int ethnl_cable_test_fault_length_with_src(struct phy_device *phydev,
+ u8 pair, u32 cm, u32 src)
{
return -EOPNOTSUPP;
}
@@ -119,4 +121,19 @@ static inline bool ethtool_dev_mm_supported(struct net_device *dev)
}
#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
+
+static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair,
+ u8 result)
+{
+ return ethnl_cable_test_result_with_src(phydev, pair, result,
+ ETHTOOL_A_CABLE_INF_SRC_TDR);
+}
+
+static inline int ethnl_cable_test_fault_length(struct phy_device *phydev,
+ u8 pair, u32 cm)
+{
+ return ethnl_cable_test_fault_length_with_src(phydev, pair, cm,
+ ETHTOOL_A_CABLE_INF_SRC_TDR);
+}
+
#endif /* _LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 01bee2b289c2..b0b821edfd97 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -19,7 +19,6 @@
#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
-#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
@@ -28,6 +27,7 @@
#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
#define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1)
+#define F2FS_BLK_ALIGN(x) (F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1))
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
@@ -278,7 +278,7 @@ struct node_footer {
#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
-#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
+#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries (obsolete) */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
#define F2FS_PIN_FILE 0x40 /* file should not be gced */
#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index f3f0b97b1675..3f49f3df6af5 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -25,12 +25,18 @@ struct space_resv {
#define FS_IOC_UNRESVSP64 _IOW('X', 43, struct space_resv)
#define FS_IOC_ZERO_RANGE _IOW('X', 57, struct space_resv)
-#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \
- FALLOC_FL_PUNCH_HOLE | \
- FALLOC_FL_COLLAPSE_RANGE | \
- FALLOC_FL_ZERO_RANGE | \
- FALLOC_FL_INSERT_RANGE | \
- FALLOC_FL_UNSHARE_RANGE)
+/*
+ * Mask of all supported fallocate modes. Only one can be set at a time.
+ *
+ * In addition to the mode bit, the mode argument can also encode flags.
+ * FALLOC_FL_KEEP_SIZE is the only supported flag so far.
+ */
+#define FALLOC_FL_MODE_MASK (FALLOC_FL_ALLOCATE_RANGE | \
+ FALLOC_FL_PUNCH_HOLE | \
+ FALLOC_FL_COLLAPSE_RANGE | \
+ FALLOC_FL_ZERO_RANGE | \
+ FALLOC_FL_INSERT_RANGE | \
+ FALLOC_FL_UNSHARE_RANGE)
/* on ia32 l_start is on a 32-bit boundary */
#if defined(CONFIG_X86_64)
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 354413950d34..8c829d28dcf3 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -2,13 +2,17 @@
#ifndef _LINUX_FAULT_INJECT_H
#define _LINUX_FAULT_INJECT_H
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct dentry;
+struct kmem_cache;
+
#ifdef CONFIG_FAULT_INJECTION
-#include <linux/types.h>
-#include <linux/debugfs.h>
+#include <linux/atomic.h>
#include <linux/configfs.h>
#include <linux/ratelimit.h>
-#include <linux/atomic.h>
/*
* For explanation of the elements of this struct, see
@@ -51,6 +55,28 @@ int setup_fault_attr(struct fault_attr *attr, char *str);
bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags);
bool should_fail(struct fault_attr *attr, ssize_t size);
+#else /* CONFIG_FAULT_INJECTION */
+
+struct fault_attr {
+};
+
+#define DECLARE_FAULT_ATTR(name) struct fault_attr name = {}
+
+static inline int setup_fault_attr(struct fault_attr *attr, char *str)
+{
+ return 0; /* Note: 0 means error for __setup() handlers! */
+}
+static inline bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
+{
+ return false;
+}
+static inline bool should_fail(struct fault_attr *attr, ssize_t size)
+{
+ return false;
+}
+
+#endif /* CONFIG_FAULT_INJECTION */
+
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
struct dentry *fault_create_debugfs_attr(const char *name,
@@ -87,10 +113,6 @@ static inline void fault_config_init(struct fault_config *config,
#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */
-#endif /* CONFIG_FAULT_INJECTION */
-
-struct kmem_cache;
-
#ifdef CONFIG_FAIL_PAGE_ALLOC
bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
#else
diff --git a/include/linux/fb.h b/include/linux/fb.h
index db7d97b10964..267b59ead432 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -510,6 +510,7 @@ struct fb_info {
void *par;
bool skip_vt_switch; /* no VT switch on suspend/resume required */
+ bool skip_panic; /* Do not write to the fb after a panic */
};
/* This will go away
@@ -601,6 +602,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
/* fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
+extern int devm_register_framebuffer(struct device *dev, struct fb_info *fb_info);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx,
u32 height, u32 shift_high, u32 shift_low, u32 mod);
diff --git a/include/linux/file.h b/include/linux/file.h
index 59b146a14dca..f98de143245a 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -11,6 +11,7 @@
#include <linux/posix_types.h>
#include <linux/errno.h>
#include <linux/cleanup.h>
+#include <linux/err.h>
struct file;
@@ -35,51 +36,52 @@ static inline void fput_light(struct file *file, int fput_needed)
fput(file);
}
+/* either a reference to struct file + flags
+ * (cloned vs. borrowed, pos locked), with
+ * flags stored in lower bits of value,
+ * or empty (represented by 0).
+ */
struct fd {
- struct file *file;
- unsigned int flags;
+ unsigned long word;
};
#define FDPUT_FPUT 1
#define FDPUT_POS_UNLOCK 2
-static inline void fdput(struct fd fd)
+#define fd_file(f) ((struct file *)((f).word & ~(FDPUT_FPUT|FDPUT_POS_UNLOCK)))
+static inline bool fd_empty(struct fd f)
{
- if (fd.flags & FDPUT_FPUT)
- fput(fd.file);
+ return unlikely(!f.word);
}
-extern struct file *fget(unsigned int fd);
-extern struct file *fget_raw(unsigned int fd);
-extern struct file *fget_task(struct task_struct *task, unsigned int fd);
-extern unsigned long __fdget(unsigned int fd);
-extern unsigned long __fdget_raw(unsigned int fd);
-extern unsigned long __fdget_pos(unsigned int fd);
-extern void __f_unlock_pos(struct file *);
-
-static inline struct fd __to_fd(unsigned long v)
+#define EMPTY_FD (struct fd){0}
+static inline struct fd BORROWED_FD(struct file *f)
{
- return (struct fd){(struct file *)(v & ~3),v & 3};
+ return (struct fd){(unsigned long)f};
}
-
-static inline struct fd fdget(unsigned int fd)
+static inline struct fd CLONED_FD(struct file *f)
{
- return __to_fd(__fdget(fd));
+ return (struct fd){(unsigned long)f | FDPUT_FPUT};
}
-static inline struct fd fdget_raw(unsigned int fd)
+static inline void fdput(struct fd fd)
{
- return __to_fd(__fdget_raw(fd));
+ if (fd.word & FDPUT_FPUT)
+ fput(fd_file(fd));
}
-static inline struct fd fdget_pos(int fd)
-{
- return __to_fd(__fdget_pos(fd));
-}
+extern struct file *fget(unsigned int fd);
+extern struct file *fget_raw(unsigned int fd);
+extern struct file *fget_task(struct task_struct *task, unsigned int fd);
+extern void __f_unlock_pos(struct file *);
+
+struct fd fdget(unsigned int fd);
+struct fd fdget_raw(unsigned int fd);
+struct fd fdget_pos(unsigned int fd);
static inline void fdput_pos(struct fd f)
{
- if (f.flags & FDPUT_POS_UNLOCK)
- __f_unlock_pos(f.file);
+ if (f.word & FDPUT_POS_UNLOCK)
+ __f_unlock_pos(fd_file(f));
fdput(f);
}
@@ -96,6 +98,7 @@ extern void put_unused_fd(unsigned int fd);
DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
get_unused_fd_flags(flags), unsigned flags)
+DEFINE_FREE(fput, struct file *, if (!IS_ERR_OR_NULL(_T)) fput(_T))
/*
* take_fd() will take care to set @fd to -EBADF ensuring that
diff --git a/include/linux/filelock.h b/include/linux/filelock.h
index daee999d05f3..bb44224c6676 100644
--- a/include/linux/filelock.h
+++ b/include/linux/filelock.h
@@ -420,28 +420,38 @@ static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
#ifdef CONFIG_FILE_LOCKING
static inline int break_lease(struct inode *inode, unsigned int mode)
{
+ struct file_lock_context *flctx;
+
/*
* Since this check is lockless, we must ensure that any refcounts
* taken are done before checking i_flctx->flc_lease. Otherwise, we
* could end up racing with tasks trying to set a new lease on this
* file.
*/
+ flctx = READ_ONCE(inode->i_flctx);
+ if (!flctx)
+ return 0;
smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ if (!list_empty_careful(&flctx->flc_lease))
return __break_lease(inode, mode, FL_LEASE);
return 0;
}
static inline int break_deleg(struct inode *inode, unsigned int mode)
{
+ struct file_lock_context *flctx;
+
/*
* Since this check is lockless, we must ensure that any refcounts
* taken are done before checking i_flctx->flc_lease. Otherwise, we
* could end up racing with tasks trying to set a new lease on this
* file.
*/
+ flctx = READ_ONCE(inode->i_flctx);
+ if (!flctx)
+ return 0;
smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ if (!list_empty_careful(&flctx->flc_lease))
return __break_lease(inode, mode, FL_DELEG);
return 0;
}
diff --git a/include/linux/filter.h b/include/linux/filter.h
index b6672ff61407..7d7578a8eac1 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -437,6 +437,16 @@ static inline bool insn_is_cast_user(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
+/* Unconditional jumps, gotol pc + imm32 */
+
+#define BPF_JMP32_A(IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_JA, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
/* Relative call */
#define BPF_CALL_REL(TGT) \
@@ -1616,7 +1626,7 @@ extern struct static_key_false bpf_sk_lookup_enabled;
_all_pass || _selected_sk ? SK_PASS : SK_DROP; \
})
-static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
+static inline bool bpf_sk_lookup_run_v4(const struct net *net, int protocol,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 dport,
const int ifindex, struct sock **psk)
@@ -1653,7 +1663,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
}
#if IS_ENABLED(CONFIG_IPV6)
-static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
+static inline bool bpf_sk_lookup_run_v6(const struct net *net, int protocol,
const struct in6_addr *saddr,
const __be16 sport,
const struct in6_addr *daddr,
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 1cca14cf5652..b632eec3ab52 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -134,6 +134,8 @@ struct fw_card {
__be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
__be32 maint_utility_register;
+
+ struct workqueue_struct *isoc_wq;
};
static inline struct fw_card *fw_card_get(struct fw_card *card)
@@ -509,6 +511,7 @@ union fw_iso_callback {
struct fw_iso_context {
struct fw_card *card;
+ struct work_struct work;
int type;
int channel;
int speed;
@@ -528,6 +531,25 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
unsigned long payload);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
+
+/**
+ * fw_iso_context_schedule_flush_completions() - schedule work item to process isochronous context.
+ * @ctx: the isochronous context
+ *
+ * Schedule a work item on workqueue to process the isochronous context. The registered callback
+ * function is called by the worker when a queued packet buffer with the interrupt flag is
+ * completed, either after transmission in the IT context or after being filled in the IR context.
+ * The callback function is also called when the header buffer in the context becomes full, If it
+ * is required to process the context in the current context, fw_iso_context_flush_completions() is
+ * available instead.
+ *
+ * Context: Any context.
+ */
+static inline void fw_iso_context_schedule_flush_completions(struct fw_iso_context *ctx)
+{
+ queue_work(ctx->card->isoc_wq, &ctx->work);
+}
+
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int fw_iso_context_stop(struct fw_iso_context *ctx);
diff --git a/include/linux/firmware/imx/sm.h b/include/linux/firmware/imx/sm.h
new file mode 100644
index 000000000000..9b85a3f028d1
--- /dev/null
+++ b/include/linux/firmware/imx/sm.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef _SCMI_IMX_H
+#define _SCMI_IMX_H
+
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+
+#define SCMI_IMX_CTRL_PDM_CLK_SEL 0 /* AON PDM clock sel */
+#define SCMI_IMX_CTRL_MQS1_SETTINGS 1 /* AON MQS settings */
+#define SCMI_IMX_CTRL_SAI1_MCLK 2 /* AON SAI1 MCLK */
+#define SCMI_IMX_CTRL_SAI3_MCLK 3 /* WAKE SAI3 MCLK */
+#define SCMI_IMX_CTRL_SAI4_MCLK 4 /* WAKE SAI4 MCLK */
+#define SCMI_IMX_CTRL_SAI5_MCLK 5 /* WAKE SAI5 MCLK */
+
+int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val);
+int scmi_imx_misc_ctrl_set(u32 id, u32 val);
+
+#endif
diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h
new file mode 100644
index 000000000000..955680c3bb5f
--- /dev/null
+++ b/include/linux/folio_queue.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Queue of folios definitions
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _LINUX_FOLIO_QUEUE_H
+#define _LINUX_FOLIO_QUEUE_H
+
+#include <linux/pagevec.h>
+
+/*
+ * Segment in a queue of running buffers. Each segment can hold a number of
+ * folios and a portion of the queue can be referenced with the ITER_FOLIOQ
+ * iterator. The possibility exists of inserting non-folio elements into the
+ * queue (such as gaps).
+ *
+ * Explicit prev and next pointers are used instead of a list_head to make it
+ * easier to add segments to tail and remove them from the head without the
+ * need for a lock.
+ */
+struct folio_queue {
+ struct folio_batch vec; /* Folios in the queue segment */
+ u8 orders[PAGEVEC_SIZE]; /* Order of each folio */
+ struct folio_queue *next; /* Next queue segment or NULL */
+ struct folio_queue *prev; /* Previous queue segment of NULL */
+ unsigned long marks; /* 1-bit mark per folio */
+ unsigned long marks2; /* Second 1-bit mark per folio */
+ unsigned long marks3; /* Third 1-bit mark per folio */
+#if PAGEVEC_SIZE > BITS_PER_LONG
+#error marks is not big enough
+#endif
+};
+
+static inline void folioq_init(struct folio_queue *folioq)
+{
+ folio_batch_init(&folioq->vec);
+ folioq->next = NULL;
+ folioq->prev = NULL;
+ folioq->marks = 0;
+ folioq->marks2 = 0;
+ folioq->marks3 = 0;
+}
+
+static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq)
+{
+ return PAGEVEC_SIZE;
+}
+
+static inline unsigned int folioq_count(struct folio_queue *folioq)
+{
+ return folio_batch_count(&folioq->vec);
+}
+
+static inline bool folioq_full(struct folio_queue *folioq)
+{
+ //return !folio_batch_space(&folioq->vec);
+ return folioq_count(folioq) >= folioq_nr_slots(folioq);
+}
+
+static inline bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot)
+{
+ return test_bit(slot, &folioq->marks);
+}
+
+static inline void folioq_mark(struct folio_queue *folioq, unsigned int slot)
+{
+ set_bit(slot, &folioq->marks);
+}
+
+static inline void folioq_unmark(struct folio_queue *folioq, unsigned int slot)
+{
+ clear_bit(slot, &folioq->marks);
+}
+
+static inline bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot)
+{
+ return test_bit(slot, &folioq->marks2);
+}
+
+static inline void folioq_mark2(struct folio_queue *folioq, unsigned int slot)
+{
+ set_bit(slot, &folioq->marks2);
+}
+
+static inline void folioq_unmark2(struct folio_queue *folioq, unsigned int slot)
+{
+ clear_bit(slot, &folioq->marks2);
+}
+
+static inline bool folioq_is_marked3(const struct folio_queue *folioq, unsigned int slot)
+{
+ return test_bit(slot, &folioq->marks3);
+}
+
+static inline void folioq_mark3(struct folio_queue *folioq, unsigned int slot)
+{
+ set_bit(slot, &folioq->marks3);
+}
+
+static inline void folioq_unmark3(struct folio_queue *folioq, unsigned int slot)
+{
+ clear_bit(slot, &folioq->marks3);
+}
+
+static inline unsigned int __folio_order(struct folio *folio)
+{
+ if (!folio_test_large(folio))
+ return 0;
+ return folio->_flags_1 & 0xff;
+}
+
+static inline unsigned int folioq_append(struct folio_queue *folioq, struct folio *folio)
+{
+ unsigned int slot = folioq->vec.nr++;
+
+ folioq->vec.folios[slot] = folio;
+ folioq->orders[slot] = __folio_order(folio);
+ return slot;
+}
+
+static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct folio *folio)
+{
+ unsigned int slot = folioq->vec.nr++;
+
+ folioq->vec.folios[slot] = folio;
+ folioq->orders[slot] = __folio_order(folio);
+ folioq_mark(folioq, slot);
+ return slot;
+}
+
+static inline struct folio *folioq_folio(const struct folio_queue *folioq, unsigned int slot)
+{
+ return folioq->vec.folios[slot];
+}
+
+static inline unsigned int folioq_folio_order(const struct folio_queue *folioq, unsigned int slot)
+{
+ return folioq->orders[slot];
+}
+
+static inline size_t folioq_folio_size(const struct folio_queue *folioq, unsigned int slot)
+{
+ return PAGE_SIZE << folioq_folio_order(folioq, slot);
+}
+
+static inline void folioq_clear(struct folio_queue *folioq, unsigned int slot)
+{
+ folioq->vec.folios[slot] = NULL;
+ folioq_unmark(folioq, slot);
+ folioq_unmark2(folioq, slot);
+ folioq_unmark3(folioq, slot);
+}
+
+#endif /* _LINUX_FOLIO_QUEUE_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6ca11e241a24..eae5b67e4a15 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -146,8 +146,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* Expect random access pattern */
#define FMODE_RANDOM ((__force fmode_t)(1 << 12))
-/* File is huge (eg. /dev/mem): treat loff_t as unsigned */
-#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)(1 << 13))
+/* FMODE_* bit 13 */
/* File is opened with O_PATH; almost nothing can be done with it */
#define FMODE_PATH ((__force fmode_t)(1 << 14))
@@ -409,10 +408,10 @@ struct address_space_operations {
int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata);
+ struct folio **foliop, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
+ struct folio *folio, void *fsdata);
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
@@ -683,7 +682,8 @@ struct inode {
#endif
/* Misc */
- unsigned long i_state;
+ u32 i_state;
+ /* 32-bit hole */
struct rw_semaphore i_rwsem;
unsigned long dirtied_when; /* jiffies of first dirtying */
@@ -746,6 +746,21 @@ struct inode {
void *i_private; /* fs or device private pointer */
} __randomize_layout;
+/*
+ * Get bit address from inode->i_state to use with wait_var_event()
+ * infrastructre.
+ */
+#define inode_state_wait_address(inode, bit) ((char *)&(inode)->i_state + (bit))
+
+struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
+ struct inode *inode, u32 bit);
+
+static inline void inode_wake_up_bit(struct inode *inode, u32 bit)
+{
+ /* Caller is responsible for correct memory barriers. */
+ wake_up_var(inode_state_wait_address(inode, bit));
+}
+
struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode);
static inline unsigned int i_blocksize(const struct inode *node)
@@ -948,6 +963,7 @@ static inline unsigned imajor(const struct inode *inode)
}
struct fown_struct {
+ struct file *file; /* backpointer for security modules */
rwlock_t lock; /* protects pid, uid, euid fields */
struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */
@@ -987,52 +1003,69 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
index < ra->start + ra->size);
}
-/*
- * f_{lock,count,pos_lock} members can be highly contended and share
- * the same cacheline. f_{lock,mode} are very frequently used together
- * and so share the same cacheline as well. The read-mostly
- * f_{path,inode,op} are kept on a separate cacheline.
+/**
+ * struct file - Represents a file
+ * @f_count: reference count
+ * @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context.
+ * @f_mode: FMODE_* flags often used in hotpaths
+ * @f_op: file operations
+ * @f_mapping: Contents of a cacheable, mappable object.
+ * @private_data: filesystem or driver specific data
+ * @f_inode: cached inode
+ * @f_flags: file flags
+ * @f_iocb_flags: iocb flags
+ * @f_cred: stashed credentials of creator/opener
+ * @f_path: path of the file
+ * @f_pos_lock: lock protecting file position
+ * @f_pipe: specific to pipes
+ * @f_pos: file position
+ * @f_security: LSM security context of this file
+ * @f_owner: file owner
+ * @f_wb_err: writeback error
+ * @f_sb_err: per sb writeback errors
+ * @f_ep: link of all epoll hooks for this file
+ * @f_task_work: task work entry point
+ * @f_llist: work queue entrypoint
+ * @f_ra: file's readahead state
+ * @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.)
*/
struct file {
+ atomic_long_t f_count;
+ spinlock_t f_lock;
+ fmode_t f_mode;
+ const struct file_operations *f_op;
+ struct address_space *f_mapping;
+ void *private_data;
+ struct inode *f_inode;
+ unsigned int f_flags;
+ unsigned int f_iocb_flags;
+ const struct cred *f_cred;
+ /* --- cacheline 1 boundary (64 bytes) --- */
+ struct path f_path;
union {
- /* fput() uses task work when closing and freeing file (default). */
- struct callback_head f_task_work;
- /* fput() must use workqueue (most kernel threads). */
- struct llist_node f_llist;
- unsigned int f_iocb_flags;
+ /* regular files (with FMODE_ATOMIC_POS) and directories */
+ struct mutex f_pos_lock;
+ /* pipes */
+ u64 f_pipe;
};
-
- /*
- * Protects f_ep, f_flags.
- * Must not be taken from IRQ context.
- */
- spinlock_t f_lock;
- fmode_t f_mode;
- atomic_long_t f_count;
- struct mutex f_pos_lock;
- loff_t f_pos;
- unsigned int f_flags;
- struct fown_struct f_owner;
- const struct cred *f_cred;
- struct file_ra_state f_ra;
- struct path f_path;
- struct inode *f_inode; /* cached value */
- const struct file_operations *f_op;
-
- u64 f_version;
+ loff_t f_pos;
#ifdef CONFIG_SECURITY
- void *f_security;
+ void *f_security;
#endif
- /* needed for tty driver, and maybe others */
- void *private_data;
-
+ /* --- cacheline 2 boundary (128 bytes) --- */
+ struct fown_struct *f_owner;
+ errseq_t f_wb_err;
+ errseq_t f_sb_err;
#ifdef CONFIG_EPOLL
- /* Used by fs/eventpoll.c to link all the hooks to this file */
- struct hlist_head *f_ep;
-#endif /* #ifdef CONFIG_EPOLL */
- struct address_space *f_mapping;
- errseq_t f_wb_err;
- errseq_t f_sb_err; /* for syncfs */
+ struct hlist_head *f_ep;
+#endif
+ union {
+ struct callback_head f_task_work;
+ struct llist_node f_llist;
+ struct file_ra_state f_ra;
+ freeptr_t f_freeptr;
+ };
+ /* --- cacheline 3 boundary (192 bytes) --- */
} __randomize_layout
__attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
@@ -1077,6 +1110,12 @@ struct file_lease;
#define OFFT_OFFSET_MAX type_max(off_t)
#endif
+int file_f_owner_allocate(struct file *file);
+static inline struct fown_struct *file_f_owner(const struct file *file)
+{
+ return READ_ONCE(file->f_owner);
+}
+
extern void send_sigio(struct fown_struct *fown, int fd, int band);
static inline struct inode *file_inode(const struct file *f)
@@ -1125,7 +1164,7 @@ extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force
extern int f_setown(struct file *filp, int who, int force);
extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
-extern int send_sigurg(struct fown_struct *fown);
+extern int send_sigurg(struct file *file);
/*
* sb->s_flags. Note that these mirror the equivalent MS_* flags where
@@ -1190,6 +1229,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
+#define SB_I_NOIDMAP 0x00002000 /* No idmapped mounts on this superblock */
/* Possible states of 'frozen' field */
enum {
@@ -1268,7 +1308,7 @@ struct super_block {
time64_t s_time_min;
time64_t s_time_max;
#ifdef CONFIG_FSNOTIFY
- __u32 s_fsnotify_mask;
+ u32 s_fsnotify_mask;
struct fsnotify_sb_info *s_fsnotify_info;
#endif
@@ -1684,7 +1724,7 @@ static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
#define __sb_writers_acquired(sb, lev) \
percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
#define __sb_writers_release(sb, lev) \
- percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+ percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], _THIS_IP_)
/**
* __sb_write_started - check if sb freeze level is held
@@ -2074,6 +2114,8 @@ struct file_operations {
#define FOP_DIO_PARALLEL_WRITE ((__force fop_flags_t)(1 << 3))
/* Contains huge pages */
#define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4))
+/* Treat loff_t as unsigned (e.g., /dev/mem) */
+#define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5))
/* Wrap a directory iterator that needs exclusive inode access */
int wrap_directory_iterator(struct file *, struct dir_context *,
@@ -2373,8 +2415,6 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
*
* I_REFERENCED Marks the inode as recently references on the LRU list.
*
- * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
- *
* I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
* synchronize competing switching instances and to tell
* wb stat updates to grab the i_pages lock. See
@@ -2397,30 +2437,32 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
* i_count.
*
* Q: What is the difference between I_WILL_FREE and I_FREEING?
+ *
+ * __I_{SYNC,NEW,LRU_ISOLATING} are used to derive unique addresses to wait
+ * upon. There's one free address left.
*/
-#define I_DIRTY_SYNC (1 << 0)
-#define I_DIRTY_DATASYNC (1 << 1)
-#define I_DIRTY_PAGES (1 << 2)
-#define __I_NEW 3
+#define __I_NEW 0
#define I_NEW (1 << __I_NEW)
-#define I_WILL_FREE (1 << 4)
-#define I_FREEING (1 << 5)
-#define I_CLEAR (1 << 6)
-#define __I_SYNC 7
+#define __I_SYNC 1
#define I_SYNC (1 << __I_SYNC)
-#define I_REFERENCED (1 << 8)
-#define __I_DIO_WAKEUP 9
-#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
+#define __I_LRU_ISOLATING 2
+#define I_LRU_ISOLATING (1 << __I_LRU_ISOLATING)
+
+#define I_DIRTY_SYNC (1 << 3)
+#define I_DIRTY_DATASYNC (1 << 4)
+#define I_DIRTY_PAGES (1 << 5)
+#define I_WILL_FREE (1 << 6)
+#define I_FREEING (1 << 7)
+#define I_CLEAR (1 << 8)
+#define I_REFERENCED (1 << 9)
#define I_LINKABLE (1 << 10)
#define I_DIRTY_TIME (1 << 11)
-#define I_WB_SWITCH (1 << 13)
-#define I_OVL_INUSE (1 << 14)
-#define I_CREATING (1 << 15)
-#define I_DONTCACHE (1 << 16)
-#define I_SYNC_QUEUED (1 << 17)
-#define I_PINNING_NETFS_WB (1 << 18)
-#define __I_LRU_ISOLATING 19
-#define I_LRU_ISOLATING (1 << __I_LRU_ISOLATING)
+#define I_WB_SWITCH (1 << 12)
+#define I_OVL_INUSE (1 << 13)
+#define I_CREATING (1 << 14)
+#define I_DONTCACHE (1 << 15)
+#define I_SYNC_QUEUED (1 << 16)
+#define I_PINNING_NETFS_WB (1 << 17)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2554,10 +2596,17 @@ struct super_block *sget(struct file_system_type *type,
struct super_block *sget_dev(struct fs_context *fc, dev_t dev);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
-#define fops_get(fops) \
- (((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
-#define fops_put(fops) \
- do { if (fops) module_put((fops)->owner); } while(0)
+#define fops_get(fops) ({ \
+ const struct file_operations *_fops = (fops); \
+ (((_fops) && try_module_get((_fops)->owner) ? (_fops) : NULL)); \
+})
+
+#define fops_put(fops) ({ \
+ const struct file_operations *_fops = (fops); \
+ if (_fops) \
+ module_put((_fops)->owner); \
+})
+
/*
* This one is to be used *ONLY* from ->open() instances.
* fops must be non-NULL, pinned down *and* module dependencies
@@ -3100,7 +3149,14 @@ static inline bool is_zero_ino(ino_t ino)
return (u32)ino == 0;
}
-extern void __iget(struct inode * inode);
+/*
+ * inode->i_lock must be held
+ */
+static inline void __iget(struct inode *inode)
+{
+ atomic_inc(&inode->i_count);
+}
+
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void __destroy_inode(struct inode *);
@@ -3183,6 +3239,8 @@ extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
int whence, loff_t maxsize, loff_t eof);
+loff_t generic_llseek_cookie(struct file *file, loff_t offset, int whence,
+ u64 *cookie);
extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
int whence, loff_t size);
extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
@@ -3220,7 +3278,9 @@ static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
}
#endif
+bool inode_dio_finished(const struct inode *inode);
void inode_dio_wait(struct inode *inode);
+void inode_dio_wait_interruptible(struct inode *inode);
/**
* inode_dio_begin - signal start of a direct I/O requests
@@ -3244,7 +3304,7 @@ static inline void inode_dio_begin(struct inode *inode)
static inline void inode_dio_end(struct inode *inode)
{
if (atomic_dec_and_test(&inode->i_dio_count))
- wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+ wake_up_var(&inode->i_dio_count);
}
extern void inode_set_flags(struct inode *inode, unsigned int flags,
@@ -3337,7 +3397,7 @@ extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
extern int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata);
+ struct folio **foliop, void **fsdata);
extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
@@ -3461,7 +3521,6 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags,
if (flags & RWF_NOWAIT) {
if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
return -EOPNOTSUPP;
- kiocb_flags |= IOCB_NOIO;
}
if (flags & RWF_ATOMIC) {
if (rw_type != WRITE)
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index f3512fddf3d7..5b51c3d582d6 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -41,6 +41,7 @@
#include <linux/limits.h>
#include <linux/log2.h>
#include <linux/math.h>
+#include <linux/slab.h>
#include <linux/types.h>
struct genradix_root;
@@ -48,10 +49,63 @@ struct genradix_root;
#define GENRADIX_NODE_SHIFT 9
#define GENRADIX_NODE_SIZE (1U << GENRADIX_NODE_SHIFT)
+#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *))
+#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
+
+/* depth that's needed for a genradix that can address up to ULONG_MAX: */
+#define GENRADIX_MAX_DEPTH \
+ DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT)
+
+#define GENRADIX_DEPTH_MASK \
+ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
+
+static inline int genradix_depth_shift(unsigned depth)
+{
+ return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth;
+}
+
+/*
+ * Returns size (of data, in bytes) that a tree of a given depth holds:
+ */
+static inline size_t genradix_depth_size(unsigned depth)
+{
+ return 1UL << genradix_depth_shift(depth);
+}
+
+static inline unsigned genradix_root_to_depth(struct genradix_root *r)
+{
+ return (unsigned long) r & GENRADIX_DEPTH_MASK;
+}
+
+static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
+{
+ return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
+}
+
struct __genradix {
struct genradix_root *root;
};
+struct genradix_node {
+ union {
+ /* Interior node: */
+ struct genradix_node *children[GENRADIX_ARY];
+
+ /* Leaf: */
+ u8 data[GENRADIX_NODE_SIZE];
+ };
+};
+
+static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
+{
+ return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
+}
+
+static inline void genradix_free_node(struct genradix_node *node)
+{
+ kfree(node);
+}
+
/*
* NOTE: currently, sizeof(_type) must not be larger than GENRADIX_NODE_SIZE:
*/
@@ -128,6 +182,30 @@ static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
#define __genradix_idx_to_offset(_radix, _idx) \
__idx_to_offset(_idx, __genradix_obj_size(_radix))
+static inline void *__genradix_ptr_inlined(struct __genradix *radix, size_t offset)
+{
+ struct genradix_root *r = READ_ONCE(radix->root);
+ struct genradix_node *n = genradix_root_to_node(r);
+ unsigned level = genradix_root_to_depth(r);
+ unsigned shift = genradix_depth_shift(level);
+
+ if (unlikely(ilog2(offset) >= genradix_depth_shift(level)))
+ return NULL;
+
+ while (n && shift > GENRADIX_NODE_SHIFT) {
+ shift -= GENRADIX_ARY_SHIFT;
+ n = n->children[offset >> shift];
+ offset &= (1UL << shift) - 1;
+ }
+
+ return n ? &n->data[offset] : NULL;
+}
+
+#define genradix_ptr_inlined(_radix, _idx) \
+ (__genradix_cast(_radix) \
+ __genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)))
+
void *__genradix_ptr(struct __genradix *, size_t);
/**
@@ -142,7 +220,24 @@ void *__genradix_ptr(struct __genradix *, size_t);
__genradix_ptr(&(_radix)->tree, \
__genradix_idx_to_offset(_radix, _idx)))
-void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t);
+void *__genradix_ptr_alloc(struct __genradix *, size_t,
+ struct genradix_node **, gfp_t);
+
+#define genradix_ptr_alloc_inlined(_radix, _idx, _gfp) \
+ (__genradix_cast(_radix) \
+ (__genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)) ?: \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ NULL, _gfp)))
+
+#define genradix_ptr_alloc_preallocated_inlined(_radix, _idx, _new_node, _gfp)\
+ (__genradix_cast(_radix) \
+ (__genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)) ?: \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _new_node, _gfp)))
/**
* genradix_ptr_alloc - get a pointer to a genradix entry, allocating it
@@ -157,7 +252,13 @@ void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t);
(__genradix_cast(_radix) \
__genradix_ptr_alloc(&(_radix)->tree, \
__genradix_idx_to_offset(_radix, _idx), \
- _gfp))
+ NULL, _gfp))
+
+#define genradix_ptr_alloc_preallocated(_radix, _idx, _new_node, _gfp)\
+ (__genradix_cast(_radix) \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _new_node, _gfp))
struct genradix_iter {
size_t offset;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f53f76e0b17e..a951de920e20 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -319,7 +319,7 @@ static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order
}
static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
{
- return __folio_alloc_node(gfp, order, numa_node_id());
+ return __folio_alloc_node_noprof(gfp, order, numa_node_id());
}
static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *mpol, pgoff_t ilx, int nid)
@@ -446,4 +446,27 @@ extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_
#endif
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
+#ifdef CONFIG_CONTIG_ALLOC
+static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
+ int nid, nodemask_t *node)
+{
+ struct page *page;
+
+ if (WARN_ON(!order || !(gfp & __GFP_COMP)))
+ return NULL;
+
+ page = alloc_contig_pages_noprof(1 << order, gfp, nid, node);
+
+ return page ? page_folio(page) : NULL;
+}
+#else
+static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
+ int nid, nodemask_t *node)
+{
+ return NULL;
+}
+#endif
+/* This should be paired with folio_put() rather than free_contig_range(). */
+#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
+
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 313be4ad79fd..65db9349f905 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -215,7 +215,8 @@ enum {
* the caller still has to check for failures) while costly requests try to be
* not disruptive and back off even without invoking the OOM killer.
* The following three modifiers might be used to override some of these
- * implicit rules.
+ * implicit rules. Please note that all of them must be used along with
+ * %__GFP_DIRECT_RECLAIM flag.
*
* %__GFP_NORETRY: The VM implementation will try only very lightweight
* memory direct reclaim to get some memory under memory pressure (thus
@@ -246,11 +247,14 @@ enum {
* cannot handle allocation failures. The allocation could block
* indefinitely but will never return with failure. Testing for
* failure is pointless.
+ * It _must_ be blockable and used together with __GFP_DIRECT_RECLAIM.
+ * It should _never_ be used in non-sleepable contexts.
* New users should be evaluated carefully (and the flag should be
* used only when there is no reasonable failure policy) but it is
* definitely preferable to use the flag rather than opencode endless
* loop around allocator.
- * Using this flag for costly allocations is _highly_ discouraged.
+ * Allocating pages from the buddy with __GFP_NOFAIL and order > 1 is
+ * not supported. Please consider using kvmalloc() instead.
*/
#define __GFP_IO ((__force gfp_t)___GFP_IO)
#define __GFP_FS ((__force gfp_t)___GFP_FS)
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 063f71b18a7c..2d105be7bbc3 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -17,15 +17,9 @@
struct device;
/* make these flag values available regardless of GPIO kconfig options */
-#define GPIOF_DIR_OUT (0 << 0)
-#define GPIOF_DIR_IN (1 << 0)
-
-#define GPIOF_INIT_LOW (0 << 1)
-#define GPIOF_INIT_HIGH (1 << 1)
-
-#define GPIOF_IN (GPIOF_DIR_IN)
-#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
-#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
+#define GPIOF_IN ((1 << 0))
+#define GPIOF_OUT_INIT_LOW ((0 << 0) | (0 << 1))
+#define GPIOF_OUT_INIT_HIGH ((0 << 0) | (1 << 1))
/* Gpio pin is active-low */
#define GPIOF_ACTIVE_LOW (1 << 2)
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 1533c9dcd3a6..121d5b8bc867 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -46,7 +46,7 @@ struct hid_item {
__s16 s16;
__u32 u32;
__s32 s32;
- __u8 *longdata;
+ const __u8 *longdata;
} data;
};
@@ -600,9 +600,9 @@ struct hid_driver;
struct hid_ll_driver;
struct hid_device { /* device report descriptor */
- __u8 *dev_rdesc;
+ const __u8 *dev_rdesc;
unsigned dev_rsize;
- __u8 *rdesc;
+ const __u8 *rdesc;
unsigned rsize;
struct hid_collection *collection; /* List of HID collections */
unsigned collection_size; /* Number of allocated hid_collections */
@@ -822,7 +822,7 @@ struct hid_driver {
struct hid_usage *usage, __s32 value);
void (*report)(struct hid_device *hdev, struct hid_report *report);
- __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf,
+ const __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf,
unsigned int *size);
int (*input_mapping)(struct hid_device *hdev,
@@ -940,6 +940,8 @@ extern void hidinput_report_event(struct hid_device *hid, struct hid_report *rep
extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
+struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
+ unsigned int application, unsigned int usage);
int hid_set_field(struct hid_field *, unsigned, __s32);
int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
int interrupt);
@@ -953,7 +955,7 @@ struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device,
enum hid_report_type type, unsigned int id,
unsigned int application);
-int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
+int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size);
struct hid_report *hid_validate_values(struct hid_device *hid,
enum hid_report_type type, unsigned int id,
unsigned int field_index,
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
index d4d063cf63b5..6a47223e6460 100644
--- a/include/linux/hid_bpf.h
+++ b/include/linux/hid_bpf.h
@@ -212,7 +212,7 @@ int hid_bpf_connect_device(struct hid_device *hdev);
void hid_bpf_disconnect_device(struct hid_device *hdev);
void hid_bpf_destroy_device(struct hid_device *hid);
int hid_bpf_device_init(struct hid_device *hid);
-u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size);
+u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size);
#else /* CONFIG_HID_BPF */
static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type,
u8 *data, u32 *size, int interrupt,
diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h
index cd67f4ca5599..18fd30a288de 100644
--- a/include/linux/hidraw.h
+++ b/include/linux/hidraw.h
@@ -32,6 +32,7 @@ struct hidraw_list {
struct hidraw *hidraw;
struct list_head node;
struct mutex read_mutex;
+ bool revoked;
};
#ifdef CONFIG_HIDRAW
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index e25d9ebfdf89..67d0ab3c3bba 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -76,9 +76,9 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
/*
* Mask of all large folio orders supported for file THP. Folios in a DAX
* file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
- * it.
+ * it. Same to PFNMAPs where there's neither page* nor pagecache.
*/
-#define THP_ORDERS_ALL_FILE_DAX \
+#define THP_ORDERS_ALL_SPECIAL \
(BIT(PMD_ORDER) | BIT(PUD_ORDER))
#define THP_ORDERS_ALL_FILE_DEFAULT \
((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
@@ -87,7 +87,7 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
* Mask of all large folio orders supported for THP.
*/
#define THP_ORDERS_ALL \
- (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DAX | THP_ORDERS_ALL_FILE_DEFAULT)
+ (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
#define TVA_IN_PF (1 << 1) /* Page fault handler */
@@ -96,6 +96,8 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
(!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
+#define split_folio(f) split_folio_to_list(f, NULL)
+
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PUD_SHIFT PUD_SHIFT
@@ -114,6 +116,53 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
+enum mthp_stat_item {
+ MTHP_STAT_ANON_FAULT_ALLOC,
+ MTHP_STAT_ANON_FAULT_FALLBACK,
+ MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
+ MTHP_STAT_SWPOUT,
+ MTHP_STAT_SWPOUT_FALLBACK,
+ MTHP_STAT_SHMEM_ALLOC,
+ MTHP_STAT_SHMEM_FALLBACK,
+ MTHP_STAT_SHMEM_FALLBACK_CHARGE,
+ MTHP_STAT_SPLIT,
+ MTHP_STAT_SPLIT_FAILED,
+ MTHP_STAT_SPLIT_DEFERRED,
+ MTHP_STAT_NR_ANON,
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
+ __MTHP_STAT_COUNT
+};
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
+struct mthp_stat {
+ unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
+};
+
+DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
+
+static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
+{
+ if (order <= 0 || order > PMD_ORDER)
+ return;
+
+ this_cpu_add(mthp_stats.stats[order][item], delta);
+}
+
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+ mod_mthp_stat(order, item, 1);
+}
+
+#else
+static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
+{
+}
+
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+}
+#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern unsigned long transparent_hugepage_flags;
@@ -269,41 +318,6 @@ struct thpsize {
#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
-enum mthp_stat_item {
- MTHP_STAT_ANON_FAULT_ALLOC,
- MTHP_STAT_ANON_FAULT_FALLBACK,
- MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
- MTHP_STAT_SWPOUT,
- MTHP_STAT_SWPOUT_FALLBACK,
- MTHP_STAT_SHMEM_ALLOC,
- MTHP_STAT_SHMEM_FALLBACK,
- MTHP_STAT_SHMEM_FALLBACK_CHARGE,
- MTHP_STAT_SPLIT,
- MTHP_STAT_SPLIT_FAILED,
- MTHP_STAT_SPLIT_DEFERRED,
- __MTHP_STAT_COUNT
-};
-
-struct mthp_stat {
- unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
-};
-
-#ifdef CONFIG_SYSFS
-DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
-
-static inline void count_mthp_stat(int order, enum mthp_stat_item item)
-{
- if (order <= 0 || order > PMD_ORDER)
- return;
-
- this_cpu_inc(mthp_stats.stats[order][item]);
-}
-#else
-static inline void count_mthp_stat(int order, enum mthp_stat_item item)
-{
-}
-#endif
-
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
@@ -314,14 +328,29 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
unsigned long len, unsigned long pgoff, unsigned long flags,
vm_flags_t vm_flags);
-bool can_split_folio(struct folio *folio, int *pextra_pins);
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
+int min_order_for_split(struct folio *folio);
+int split_folio_to_list(struct folio *folio, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
- return split_huge_page_to_list_to_order(page, NULL, 0);
+ struct folio *folio = page_folio(page);
+ int ret = min_order_for_split(folio);
+
+ if (ret < 0)
+ return ret;
+
+ /*
+ * split_huge_page() locks the page before splitting and
+ * expects the same page that has been split to be locked when
+ * returned. split_folio(page_folio(page)) cannot be used here
+ * because it converts the page to folio and passes the head
+ * page to be split.
+ */
+ return split_huge_page_to_list_to_order(page, NULL, ret);
}
-void deferred_split_folio(struct folio *folio);
+void deferred_split_folio(struct folio *folio, bool partially_mapped);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct folio *folio);
@@ -342,6 +371,17 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags);
+#else
+static inline int
+change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags) { return 0; }
+#endif
+
#define split_huge_pud(__vma, __pud, __address) \
do { \
pud_t *____pud = (__pud); \
@@ -410,11 +450,6 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
}
-static inline bool is_huge_zero_pud(pud_t pud)
-{
- return false;
-}
-
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
void mm_put_huge_zero_folio(struct mm_struct *mm);
@@ -470,7 +505,7 @@ thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
}
static inline bool
-can_split_folio(struct folio *folio, int *pextra_pins)
+can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
{
return false;
}
@@ -484,7 +519,13 @@ static inline int split_huge_page(struct page *page)
{
return 0;
}
-static inline void deferred_split_folio(struct folio *folio) {}
+
+static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ return 0;
+}
+
+static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
@@ -555,11 +596,6 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
return false;
}
-static inline bool is_huge_zero_pud(pud_t pud)
-{
- return false;
-}
-
static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
{
return;
@@ -585,6 +621,19 @@ static inline int next_order(unsigned long *orders, int prev)
{
return 0;
}
+
+static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long address)
+{
+}
+
+static inline int change_huge_pud(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pud_t *pudp,
+ unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags)
+{
+ return 0;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline int split_folio_to_list_to_order(struct folio *folio,
@@ -598,7 +647,4 @@ static inline int split_folio_to_order(struct folio *folio, int new_order)
return split_folio_to_list_to_order(folio, NULL, new_order);
}
-#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
-#define split_folio(f) split_folio_to_order(f, 0)
-
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 45bf05ad5c53..98c47c394b89 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -127,9 +127,6 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
unsigned long len);
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
struct vm_area_struct *, struct vm_area_struct *);
-struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags,
- unsigned int *page_mask);
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *,
zap_flags_t);
@@ -899,10 +896,11 @@ static inline bool hugepage_movable_supported(struct hstate *h)
/* Movability of hugepages depends on migration support. */
static inline gfp_t htlb_alloc_mask(struct hstate *h)
{
- if (hugepage_movable_supported(h))
- return GFP_HIGHUSER_MOVABLE;
- else
- return GFP_HIGHUSER;
+ gfp_t gfp = __GFP_COMP | __GFP_NOWARN;
+
+ gfp |= hugepage_movable_supported(h) ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
+
+ return gfp;
}
static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
@@ -1251,7 +1249,7 @@ static inline __init void hugetlb_cma_reserve(int order)
}
#endif
-#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static inline bool hugetlb_pmd_shared(pte_t *pte)
{
return page_count(virt_to_page(pte)) > 1;
@@ -1287,8 +1285,7 @@ bool __vma_private_lock(struct vm_area_struct *vma);
static inline pte_t *
hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
{
-#if defined(CONFIG_HUGETLB_PAGE) && \
- defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
+#if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
/*
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index e94314760aab..5c6a421ad580 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -481,7 +481,6 @@ devm_hwmon_device_register_with_info(struct device *dev,
const struct attribute_group **extra_groups);
void hwmon_device_unregister(struct device *dev);
-void devm_hwmon_device_unregister(struct device *dev);
int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 377def497298..388ce71a29a9 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -761,6 +761,9 @@ struct i2c_adapter {
struct regulator *bus_regulator;
struct dentry *debugfs;
+
+ /* 7bit address space */
+ DECLARE_BITMAP(addrs_in_instantiation, 1 << 7);
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 074f632868d9..2a1ed05d5782 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -278,6 +278,20 @@ enum i3c_bus_mode {
};
/**
+ * enum i3c_open_drain_speed - I3C open-drain speed
+ * @I3C_OPEN_DRAIN_SLOW_SPEED: Slow open-drain speed for sending the first
+ * broadcast address. The first broadcast address at this speed
+ * will be visible to all devices on the I3C bus. I3C devices
+ * working in I2C mode will turn off their spike filter when
+ * switching into I3C mode.
+ * @I3C_OPEN_DRAIN_NORMAL_SPEED: Normal open-drain speed in I3C bus mode.
+ */
+enum i3c_open_drain_speed {
+ I3C_OPEN_DRAIN_SLOW_SPEED,
+ I3C_OPEN_DRAIN_NORMAL_SPEED,
+};
+
+/**
* enum i3c_addr_slot_status - I3C address slot status
* @I3C_ADDR_SLOT_FREE: address is free
* @I3C_ADDR_SLOT_RSVD: address is reserved
@@ -436,6 +450,7 @@ struct i3c_bus {
* NULL.
* @enable_hotjoin: enable hot join event detect.
* @disable_hotjoin: disable hot join event detect.
+ * @set_speed: adjust I3C open drain mode timing.
*/
struct i3c_master_controller_ops {
int (*bus_init)(struct i3c_master_controller *master);
@@ -464,6 +479,7 @@ struct i3c_master_controller_ops {
struct i3c_ibi_slot *slot);
int (*enable_hotjoin)(struct i3c_master_controller *master);
int (*disable_hotjoin)(struct i3c_master_controller *master);
+ int (*set_speed)(struct i3c_master_controller *master, enum i3c_open_drain_speed speed);
};
/**
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index 8336b2f6f834..56c280eb2d4f 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -21,9 +21,9 @@
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
-extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
+extern void clockevent_i8253_disable(void);
extern void setup_pit_timer(void);
diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h
index 839d1e48b85e..c44bf6e80ecb 100644
--- a/include/linux/if_rmnet.h
+++ b/include/linux/if_rmnet.h
@@ -42,7 +42,7 @@ struct rmnet_map_ul_csum_header {
/* csum_info field:
* OFFSET: where (offset in bytes) to insert computed checksum
- * UDP: 1 = UDP checksum (zero checkum means no checksum)
+ * UDP: 1 = UDP checksum (zero checksum means no checksum)
* ENABLED: 1 = checksum computation requested
*/
#define MAP_CSUM_UL_OFFSET_MASK GENMASK(13, 0)
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index b8d8d69eba29..90867f44ab4d 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -34,52 +34,6 @@ struct matrix_keymap_data {
unsigned int keymap_size;
};
-/**
- * struct matrix_keypad_platform_data - platform-dependent keypad data
- * @keymap_data: pointer to &matrix_keymap_data
- * @row_gpios: pointer to array of gpio numbers representing rows
- * @col_gpios: pointer to array of gpio numbers reporesenting colums
- * @num_row_gpios: actual number of row gpios used by device
- * @num_col_gpios: actual number of col gpios used by device
- * @col_scan_delay_us: delay, measured in microseconds, that is
- * needed before we can keypad after activating column gpio
- * @debounce_ms: debounce interval in milliseconds
- * @clustered_irq: may be specified if interrupts of all row/column GPIOs
- * are bundled to one single irq
- * @clustered_irq_flags: flags that are needed for the clustered irq
- * @active_low: gpio polarity
- * @wakeup: controls whether the device should be set up as wakeup
- * source
- * @no_autorepeat: disable key autorepeat
- * @drive_inactive_cols: drive inactive columns during scan, rather than
- * making them inputs.
- *
- * This structure represents platform-specific data that use used by
- * matrix_keypad driver to perform proper initialization.
- */
-struct matrix_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
-
- const unsigned int *row_gpios;
- const unsigned int *col_gpios;
-
- unsigned int num_row_gpios;
- unsigned int num_col_gpios;
-
- unsigned int col_scan_delay_us;
-
- /* key debounce interval in milli-second */
- unsigned int debounce_ms;
-
- unsigned int clustered_irq;
- unsigned int clustered_irq_flags;
-
- bool active_low;
- bool wakeup;
- bool no_autorepeat;
- bool drive_inactive_cols;
-};
-
int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
const char *keymap_name,
unsigned int rows, unsigned int cols,
@@ -88,6 +42,4 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
int matrix_keypad_parse_properties(struct device *dev,
unsigned int *rows, unsigned int *cols);
-#define matrix_keypad_parse_of_params matrix_keypad_parse_properties
-
#endif /* _MATRIX_KEYPAD_H */
diff --git a/drivers/platform/x86/intel/vsec.h b/include/linux/intel_vsec.h
index e23e76129691..11ee185566c3 100644
--- a/drivers/platform/x86/intel/vsec.h
+++ b/include/linux/intel_vsec.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _VSEC_H
-#define _VSEC_H
+#ifndef _INTEL_VSEC_H
+#define _INTEL_VSEC_H
#include <linux/auxiliary_bus.h>
#include <linux/bits.h>
@@ -67,15 +67,49 @@ enum intel_vsec_quirks {
VSEC_QUIRK_EARLY_HW = BIT(4),
};
-/* Platform specific data */
+/**
+ * struct pmt_callbacks - Callback infrastructure for PMT devices
+ * ->read_telem() when specified, called by client driver to access PMT data (instead
+ * of direct copy).
+ * @pdev: PCI device reference for the callback's use
+ * @guid: ID of data to acccss
+ * @data: buffer for the data to be copied
+ * @count: size of buffer
+ */
+struct pmt_callbacks {
+ int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, u32 count);
+};
+
+/**
+ * struct intel_vsec_platform_info - Platform specific data
+ * @parent: parent device in the auxbus chain
+ * @headers: list of headers to define the PMT client devices to create
+ * @priv_data: private data, usable by parent devices, currently a callback
+ * @caps: bitmask of PMT capabilities for the given headers
+ * @quirks: bitmask of VSEC device quirks
+ * @base_addr: allow a base address to be specified (rather than derived)
+ */
struct intel_vsec_platform_info {
struct device *parent;
struct intel_vsec_header **headers;
+ void *priv_data;
unsigned long caps;
unsigned long quirks;
u64 base_addr;
};
+/**
+ * struct intel_sec_device - Auxbus specific device information
+ * @auxdev: auxbus device struct for auxbus access
+ * @pcidev: pci device associated with the device
+ * @resource: any resources shared by the parent
+ * @ida: id reference
+ * @num_resources: number of resources
+ * @id: xarray id
+ * @priv_data: any private data needed
+ * @quirks: specified quirks
+ * @base_addr: base address of entries (if specified)
+ */
struct intel_vsec_device {
struct auxiliary_device auxdev;
struct pci_dev *pcidev;
@@ -103,6 +137,13 @@ static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device
return container_of(auxdev, struct intel_vsec_device, auxdev);
}
+#if IS_ENABLED(CONFIG_INTEL_VSEC)
void intel_vsec_register(struct pci_dev *pdev,
struct intel_vsec_platform_info *info);
+#else
+static inline void intel_vsec_register(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
+{
+}
+#endif
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 3f30c88e0b4c..457151f9f263 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -276,7 +276,7 @@ struct irq_affinity_notify {
#define IRQ_AFFINITY_MAX_SETS 4
/**
- * struct irq_affinity - Description for automatic irq affinity assignements
+ * struct irq_affinity - Description for automatic irq affinity assignments
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
@@ -594,7 +594,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
struct softirq_action
{
- void (*action)(struct softirq_action *);
+ void (*action)(void);
};
asmlinkage void do_softirq(void);
@@ -609,7 +609,7 @@ static inline void do_softirq_post_smp_call_flush(unsigned int unused)
}
#endif
-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+extern void open_softirq(int nr, void (*action)(void));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index f9a81761bfce..b1ecfc3cd5bc 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -171,6 +171,10 @@ struct io_pgtable_cfg {
u64 ttbr[4];
u32 n_ttbrs;
} apple_dart_cfg;
+
+ struct {
+ int nid;
+ } amd;
};
};
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index 447fbfd32215..c189d36ad55e 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -23,6 +23,15 @@ static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
return sqe->cmd;
}
+static inline void io_uring_cmd_private_sz_check(size_t cmd_sz)
+{
+ BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu));
+}
+#define io_uring_cmd_to_pdu(cmd, pdu_type) ( \
+ io_uring_cmd_private_sz_check(sizeof(pdu_type)), \
+ ((pdu_type *)&(cmd)->pdu) \
+)
+
#if defined(CONFIG_IO_URING)
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd);
@@ -48,6 +57,9 @@ void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
unsigned int issue_flags);
+/* Execute the request from a blocking context */
+void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
+
#else
static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd)
@@ -67,6 +79,9 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
}
+static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
+{
+}
#endif
/*
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 3315005df117..4b9ba523978d 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -239,6 +239,9 @@ struct io_ring_ctx {
struct io_rings *rings;
struct percpu_ref refs;
+ clockid_t clockid;
+ enum tk_offsets clock_offset;
+
enum task_work_notify_mode notify_method;
unsigned sq_thread_idle;
} ____cacheline_aligned_in_smp;
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 6fc1c858013d..4ad12a3c8bae 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -257,11 +257,7 @@ static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
}
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
- const struct iomap_ops *ops);
-int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
- struct iomap *iomap, loff_t pos, loff_t length, ssize_t written,
- int (*punch)(struct inode *inode, loff_t pos, loff_t length));
-
+ const struct iomap_ops *ops, void *private);
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
@@ -277,6 +273,13 @@ int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
const struct iomap_ops *ops);
vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
const struct iomap_ops *ops);
+
+typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
+ struct iomap *iomap);
+void iomap_file_buffered_write_punch_delalloc(struct inode *inode, loff_t pos,
+ loff_t length, ssize_t written, unsigned flag,
+ struct iomap *iomap, iomap_punch_t punch);
+
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len, const struct iomap_ops *ops);
loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
diff --git a/include/linux/iommu-dma.h b/include/linux/iommu-dma.h
new file mode 100644
index 000000000000..508beaa44c39
--- /dev/null
+++ b/include/linux/iommu-dma.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ *
+ * DMA operations that map physical memory through IOMMU.
+ */
+#ifndef _LINUX_IOMMU_DMA_H
+#define _LINUX_IOMMU_DMA_H
+
+#include <linux/dma-direction.h>
+
+#ifdef CONFIG_IOMMU_DMA
+static inline bool use_dma_iommu(struct device *dev)
+{
+ return dev->dma_iommu;
+}
+#else
+static inline bool use_dma_iommu(struct device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_IOMMU_DMA */
+
+dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, unsigned long attrs);
+int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+unsigned long iommu_dma_get_merge_boundary(struct device *dev);
+size_t iommu_dma_opt_mapping_size(void);
+size_t iommu_dma_max_mapping_size(struct device *dev);
+void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, unsigned long attrs);
+dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
+void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir);
+void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt);
+#define iommu_dma_vunmap_noncontiguous(dev, vaddr) \
+ vunmap(vaddr);
+int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct sg_table *sgt);
+void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir);
+void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir);
+
+#endif /* _LINUX_IOMMU_DMA_H */
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
index ffc3a949f837..30f832a60ccb 100644
--- a/include/linux/iommufd.h
+++ b/include/linux/iommufd.h
@@ -6,17 +6,17 @@
#ifndef __LINUX_IOMMUFD_H
#define __LINUX_IOMMUFD_H
-#include <linux/types.h>
-#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/types.h>
struct device;
-struct iommufd_device;
-struct page;
-struct iommufd_ctx;
-struct iommufd_access;
struct file;
struct iommu_group;
+struct iommufd_access;
+struct iommufd_ctx;
+struct iommufd_device;
+struct page;
struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
struct device *dev, u32 *id);
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index db1249cd9692..b25377b6ea98 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -40,7 +40,7 @@ static inline int task_nice_ioclass(struct task_struct *task)
{
if (task->policy == SCHED_IDLE)
return IOPRIO_CLASS_IDLE;
- else if (task_is_realtime(task))
+ else if (rt_or_dl_task_policy(task))
return IOPRIO_CLASS_RT;
else
return IOPRIO_CLASS_BE;
diff --git a/include/linux/ioremap.h b/include/linux/ioremap.h
index f0e99fc7dd8b..2bd1661fe9ad 100644
--- a/include/linux/ioremap.h
+++ b/include/linux/ioremap.h
@@ -4,6 +4,7 @@
#include <linux/kasan.h>
#include <asm/pgtable.h>
+#include <asm/vmalloc.h>
#if defined(CONFIG_HAS_IOMEM) || defined(CONFIG_GENERIC_IOREMAP)
/*
diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
index 270454a6703d..c4aa58032faf 100644
--- a/include/linux/iov_iter.h
+++ b/include/linux/iov_iter.h
@@ -10,6 +10,7 @@
#include <linux/uio.h>
#include <linux/bvec.h>
+#include <linux/folio_queue.h>
typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len,
void *priv, void *priv2);
@@ -141,6 +142,60 @@ size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
}
/*
+ * Handle ITER_FOLIOQ.
+ */
+static __always_inline
+size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct folio_queue *folioq = iter->folioq;
+ unsigned int slot = iter->folioq_slot;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ if (slot == folioq_nr_slots(folioq)) {
+ /* The iterator may have been extended. */
+ folioq = folioq->next;
+ slot = 0;
+ }
+
+ do {
+ struct folio *folio = folioq_folio(folioq, slot);
+ size_t part, remain, consumed;
+ size_t fsize;
+ void *base;
+
+ if (!folio)
+ break;
+
+ fsize = folioq_folio_size(folioq, slot);
+ base = kmap_local_folio(folio, skip);
+ part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ if (skip >= fsize) {
+ skip = 0;
+ slot++;
+ if (slot == folioq_nr_slots(folioq) && folioq->next) {
+ folioq = folioq->next;
+ slot = 0;
+ }
+ }
+ if (remain)
+ break;
+ } while (len);
+
+ iter->folioq_slot = slot;
+ iter->folioq = folioq;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
* Handle ITER_XARRAY.
*/
static __always_inline
@@ -249,6 +304,8 @@ size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv,
return iterate_bvec(iter, len, priv, priv2, step);
if (iov_iter_is_kvec(iter))
return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_folioq(iter))
+ return iterate_folioq(iter, len, priv, priv2, step);
if (iov_iter_is_xarray(iter))
return iterate_xarray(iter, len, priv, priv2, step);
return iterate_discard(iter, len, priv, priv2, step);
@@ -271,4 +328,51 @@ size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
}
+/**
+ * iterate_and_advance_kernel - Iterate over a kernel-internal iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length. The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * [!] Note This will only handle BVEC, KVEC, FOLIOQ, XARRAY and DISCARD-type
+ * iterators; it will not handle UBUF or IOVEC-type iterators.
+ *
+ * A step functions, @step, must be provided, one for handling mapped kernel
+ * addresses and the other is given user addresses which have the potential to
+ * fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer). The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance_kernel(struct iov_iter *iter, size_t len, void *priv,
+ void *priv2, iov_step_f step)
+{
+ if (unlikely(iter->count < len))
+ len = iter->count;
+ if (unlikely(!len))
+ return 0;
+ if (iov_iter_is_bvec(iter))
+ return iterate_bvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_kvec(iter))
+ return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_folioq(iter))
+ return iterate_folioq(iter, len, priv, priv2, step);
+ if (iov_iter_is_xarray(iter))
+ return iterate_xarray(iter, len, priv, priv2, step);
+ return iterate_discard(iter, len, priv, priv2, step);
+}
+
#endif /* _LINUX_IOV_ITER_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 383a0ea2ab91..a6e2aadbb91b 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -89,6 +89,7 @@ struct ipv6_devconf {
__u8 ioam6_enabled;
__u8 ndisc_evict_nocarrier;
__u8 ra_honor_pio_life;
+ __u8 ra_honor_pio_pflag;
struct ctl_table_header *sysctl_header;
};
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 1f5dbf1f92c9..fa711f80957b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -991,7 +991,6 @@ void irq_init_desc(unsigned int irq);
* @ack: Ack register offset to reg_base
* @eoi: Eoi register offset to reg_base
* @type: Type configuration register offset to reg_base
- * @polarity: Polarity configuration register offset to reg_base
*/
struct irq_chip_regs {
unsigned long enable;
@@ -1000,7 +999,6 @@ struct irq_chip_regs {
unsigned long ack;
unsigned long eoi;
unsigned long type;
- unsigned long polarity;
};
/**
@@ -1040,8 +1038,6 @@ struct irq_chip_type {
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register shared between all chip types
- * @type_cache: Cached type register
- * @polarity_cache: Cached polarity register
* @wake_enabled: Interrupt can wakeup from suspend
* @wake_active: Interrupt is marked as an wakeup from suspend source
* @num_ct: Number of available irq_chip_type instances (usually 1)
@@ -1068,8 +1064,6 @@ struct irq_chip_generic {
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
- u32 type_cache;
- u32 polarity_cache;
u32 wake_enabled;
u32 wake_active;
unsigned int num_ct;
diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h
index faf0b800b1b0..7494952c5518 100644
--- a/include/linux/irqchip/riscv-imsic.h
+++ b/include/linux/irqchip/riscv-imsic.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/fwnode.h>
#include <asm/csr.h>
#define IMSIC_MMIO_PAGE_SHIFT 12
@@ -84,4 +86,11 @@ static inline const struct imsic_global_config *imsic_get_global_config(void)
#endif
+#ifdef CONFIG_ACPI
+int imsic_platform_acpi_probe(struct fwnode_handle *fwnode);
+struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev);
+#else
+static inline struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev) { return NULL; }
+#endif
+
#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index de6105f68fec..e432b6a12a32 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -291,7 +291,12 @@ struct irq_domain_chip_generic_info;
* @hwirq_max: Maximum number of interrupts supported by controller
* @direct_max: Maximum value of direct maps;
* Use ~0 for no limit; 0 for no direct mapping
+ * @hwirq_base: The first hardware interrupt number (legacy domains only)
+ * @virq_base: The first Linux interrupt number for legacy domains to
+ * immediately associate the interrupts after domain creation
* @bus_token: Domain bus token
+ * @name_suffix: Optional name suffix to avoid collisions when multiple
+ * domains are added using same fwnode
* @ops: Domain operation callbacks
* @host_data: Controller private data pointer
* @dgc_info: Geneneric chip information structure pointer used to
@@ -307,7 +312,10 @@ struct irq_domain_info {
unsigned int size;
irq_hw_number_t hwirq_max;
int direct_max;
+ unsigned int hwirq_base;
+ unsigned int virq_base;
enum irq_domain_bus_token bus_token;
+ const char *name_suffix;
const struct irq_domain_ops *ops;
void *host_data;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 5157d92b6f23..8aef9bb6ad57 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1086,7 +1086,7 @@ struct journal_s
int j_revoke_records_per_block;
/**
- * @j_transaction_overhead:
+ * @j_transaction_overhead_buffers:
*
* Number of blocks each transaction needs for its own bookkeeping
*/
@@ -1675,7 +1675,7 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out);
int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode);
int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
-int jbd2_fc_release_bufs(journal_t *journal);
+void jbd2_fc_release_bufs(journal_t *journal);
/*
* is_journal_abort
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index d9f1435a5a13..1220f0fbe5bf 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -418,7 +418,7 @@ extern unsigned long preset_lpj;
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
/*
- * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
+ * The maximum jiffy value is (MAX_INT >> 1). Here we translate that
* into seconds. The 64-bit case will overflow if we are not careful,
* so use the messy SH_DIV macro to do it. Still all constants.
*/
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 70d6a8f6e25d..00a3bf7c0d8f 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -175,13 +175,59 @@ static __always_inline void * __must_check kasan_init_slab_obj(
return (void *)object;
}
-bool __kasan_slab_free(struct kmem_cache *s, void *object,
- unsigned long ip, bool init);
+bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
+ unsigned long ip);
+/**
+ * kasan_slab_pre_free - Check whether freeing a slab object is safe.
+ * @object: Object to be freed.
+ *
+ * This function checks whether freeing the given object is safe. It may
+ * check for double-free and invalid-free bugs and report them.
+ *
+ * This function is intended only for use by the slab allocator.
+ *
+ * @Return true if freeing the object is unsafe; false otherwise.
+ */
+static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
+ void *object)
+{
+ if (kasan_enabled())
+ return __kasan_slab_pre_free(s, object, _RET_IP_);
+ return false;
+}
+
+bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
+ bool still_accessible);
+/**
+ * kasan_slab_free - Poison, initialize, and quarantine a slab object.
+ * @object: Object to be freed.
+ * @init: Whether to initialize the object.
+ * @still_accessible: Whether the object contents are still accessible.
+ *
+ * This function informs that a slab object has been freed and is not
+ * supposed to be accessed anymore, except when @still_accessible is set
+ * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
+ * grace period might not have passed yet).
+ *
+ * For KASAN modes that have integrated memory initialization
+ * (kasan_has_integrated_init() == true), this function also initializes
+ * the object's memory. For other modes, the @init argument is ignored.
+ *
+ * This function might also take ownership of the object to quarantine it.
+ * When this happens, KASAN will defer freeing the object to a later
+ * stage and handle it internally until then. The return value indicates
+ * whether KASAN took ownership of the object.
+ *
+ * This function is intended only for use by the slab allocator.
+ *
+ * @Return true if KASAN took ownership of the object; false otherwise.
+ */
static __always_inline bool kasan_slab_free(struct kmem_cache *s,
- void *object, bool init)
+ void *object, bool init,
+ bool still_accessible)
{
if (kasan_enabled())
- return __kasan_slab_free(s, object, _RET_IP_, init);
+ return __kasan_slab_free(s, object, init, still_accessible);
return false;
}
@@ -371,7 +417,14 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
{
return (void *)object;
}
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
+
+static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
+{
+ return false;
+}
+
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
+ bool init, bool still_accessible)
{
return false;
}
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index 859f4b0c1b2b..196778a087c4 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -10,12 +10,11 @@
*/
#define KPF_RESERVED 32
#define KPF_MLOCKED 33
-#define KPF_MAPPEDTODISK 34
+#define KPF_OWNER_2 34
#define KPF_PRIVATE 35
#define KPF_PRIVATE_2 36
#define KPF_OWNER_PRIVATE 37
#define KPF_ARCH 38
-#define KPF_UNCACHED 39
#define KPF_SOFTDIRTY 40
#define KPF_ARCH_2 41
#define KPF_ARCH_3 42
diff --git a/include/linux/key.h b/include/linux/key.h
index 943a432da3ae..074dca3222b9 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -436,9 +436,6 @@ extern key_ref_t keyring_search(key_ref_t keyring,
const char *description,
bool recurse);
-extern int keyring_add_key(struct key *keyring,
- struct key *key);
-
extern int keyring_restrict(key_ref_t keyring, const char *type,
const char *restriction);
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 88100cc9caba..0ad1ddbb8b99 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -124,7 +124,7 @@ static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp
if (!static_branch_likely(&kfence_allocation_key))
return NULL;
#endif
- if (likely(atomic_read(&kfence_allocation_gate)))
+ if (likely(atomic_read(&kfence_allocation_gate) > 0))
return NULL;
return __kfence_alloc(s, size, flags);
}
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index f68865e19b0b..30baae91b225 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -4,6 +4,7 @@
#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
+extern unsigned int khugepaged_max_ptes_none __read_mostly;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern struct attribute_group khugepaged_attr_group;
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 906521c2329c..6055fc969877 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -40,6 +40,17 @@ struct kmsg_dump_iter {
};
/**
+ * struct kmsg_dump_detail - kernel crash detail
+ * @reason: reason for the crash, see kmsg_dump_reason.
+ * @description: optional short string, to provide additional information.
+ */
+
+struct kmsg_dump_detail {
+ enum kmsg_dump_reason reason;
+ const char *description;
+};
+
+/**
* struct kmsg_dumper - kernel crash message dumper structure
* @list: Entry in the dumper list (private)
* @dump: Call into dumping code which will retrieve the data with
@@ -49,13 +60,13 @@ struct kmsg_dump_iter {
*/
struct kmsg_dumper {
struct list_head list;
- void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
+ void (*dump)(struct kmsg_dumper *dumper, struct kmsg_dump_detail *detail);
enum kmsg_dump_reason max_reason;
bool registered;
};
#ifdef CONFIG_PRINTK
-void kmsg_dump(enum kmsg_dump_reason reason);
+void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc);
bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
char *line, size_t size, size_t *len);
@@ -71,7 +82,7 @@ int kmsg_dump_unregister(struct kmsg_dumper *dumper);
const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason);
#else
-static inline void kmsg_dump(enum kmsg_dump_reason reason)
+static inline void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
{
}
@@ -107,4 +118,9 @@ static inline const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
}
#endif
+static inline void kmsg_dump(enum kmsg_dump_reason reason)
+{
+ kmsg_dump_desc(reason, NULL);
+}
+
#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 5fcbc254d186..8c4f3bb24429 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -269,15 +269,6 @@ extern unsigned long __stop_kprobe_blacklist[];
extern struct kretprobe_blackpoint kretprobe_blacklist[];
-#ifdef CONFIG_KPROBES_SANITY_TEST
-extern int init_test_probes(void);
-#else /* !CONFIG_KPROBES_SANITY_TEST */
-static inline int init_test_probes(void)
-{
- return 0;
-}
-#endif /* CONFIG_KPROBES_SANITY_TEST */
-
extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 6885603f211b..e5968c3ed4ae 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -611,6 +611,8 @@ enum led_trigger_netdev_modes {
TRIGGER_NETDEV_FULL_DUPLEX,
TRIGGER_NETDEV_TX,
TRIGGER_NETDEV_RX,
+ TRIGGER_NETDEV_TX_ERR,
+ TRIGGER_NETDEV_RX_ERR,
/* Keep last */
__TRIGGER_NETDEV_MAX,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 17394098bee9..9b4a6ff03235 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -55,6 +55,46 @@
/* defines only for the constants which don't work well as enums */
#define ATA_TAG_POISON 0xfafbfcfdU
+/*
+ * Quirk flags bits.
+ * ata_device->quirks is an unsigned int, so __ATA_QUIRK_MAX must not exceed 32.
+ */
+enum ata_quirks {
+ __ATA_QUIRK_DIAGNOSTIC, /* Failed boot diag */
+ __ATA_QUIRK_NODMA, /* DMA problems */
+ __ATA_QUIRK_NONCQ, /* Don't use NCQ */
+ __ATA_QUIRK_MAX_SEC_128, /* Limit max sects to 128 */
+ __ATA_QUIRK_BROKEN_HPA, /* Broken HPA */
+ __ATA_QUIRK_DISABLE, /* Disable it */
+ __ATA_QUIRK_HPA_SIZE, /* Native size off by one */
+ __ATA_QUIRK_IVB, /* cbl det validity bit bugs */
+ __ATA_QUIRK_STUCK_ERR, /* Stuck ERR on next PACKET */
+ __ATA_QUIRK_BRIDGE_OK, /* No bridge limits */
+ __ATA_QUIRK_ATAPI_MOD16_DMA, /* Use ATAPI DMA for commands that */
+ /* are not a multiple of 16 bytes */
+ __ATA_QUIRK_FIRMWARE_WARN, /* Firmware update warning */
+ __ATA_QUIRK_1_5_GBPS, /* Force 1.5 Gbps */
+ __ATA_QUIRK_NOSETXFER, /* Skip SETXFER, SATA only */
+ __ATA_QUIRK_BROKEN_FPDMA_AA, /* Skip AA */
+ __ATA_QUIRK_DUMP_ID, /* Dump IDENTIFY data */
+ __ATA_QUIRK_MAX_SEC_LBA48, /* Set max sects to 65535 */
+ __ATA_QUIRK_ATAPI_DMADIR, /* Device requires dmadir */
+ __ATA_QUIRK_NO_NCQ_TRIM, /* Do not use queued TRIM */
+ __ATA_QUIRK_NOLPM, /* Do not use LPM */
+ __ATA_QUIRK_WD_BROKEN_LPM, /* Some WDs have broken LPM */
+ __ATA_QUIRK_ZERO_AFTER_TRIM, /* Guarantees zero after trim */
+ __ATA_QUIRK_NO_DMA_LOG, /* Do not use DMA for log read */
+ __ATA_QUIRK_NOTRIM, /* Do not use TRIM */
+ __ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */
+ __ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */
+ __ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */
+ __ATA_QUIRK_NO_ID_DEV_LOG, /* Identify device log missing */
+ __ATA_QUIRK_NO_LOG_DIR, /* Do not read log directory */
+ __ATA_QUIRK_NO_FUA, /* Do not use FUA */
+
+ __ATA_QUIRK_MAX,
+};
+
enum {
/* various global constants */
LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
@@ -338,6 +378,7 @@ enum {
ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */
ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */
ATA_EHI_POST_SETMODE = (1 << 20), /* revalidating after setmode */
+ ATA_EHI_DID_PRINT_QUIRKS = (1 << 21), /* already printed quirks info */
ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
@@ -362,43 +403,42 @@ enum {
*/
ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
- /* Horkage types. May be set by libata or controller on drives
- (some horkage may be drive/controller pair dependent */
-
- ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */
- ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */
- ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */
- ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */
- ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */
- ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */
- ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */
- ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */
- ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */
- ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
- ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
- not multiple of 16 bytes */
- ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
- ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
- ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
- ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
- ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
- ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
- ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
- ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
- ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
- ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
- ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
- ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
- ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
- ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
- ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
- ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */
- ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */
- ATA_HORKAGE_NO_LOG_DIR = (1 << 29), /* Do not read log directory */
- ATA_HORKAGE_NO_FUA = (1 << 30), /* Do not use FUA */
-
- /* DMA mask for user DMA control: User visible values; DO NOT
- renumber */
+ /*
+ * Quirk flags: may be set by libata or controller drivers on drives.
+ * Some quirks may be drive/controller pair dependent.
+ */
+ ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC),
+ ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA),
+ ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ),
+ ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128),
+ ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA),
+ ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE),
+ ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE),
+ ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB),
+ ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR),
+ ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK),
+ ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA),
+ ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN),
+ ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS),
+ ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER),
+ ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA),
+ ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID),
+ ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48),
+ ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR),
+ ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM),
+ ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM),
+ ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM),
+ ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM),
+ ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG),
+ ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM),
+ ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024),
+ ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M),
+ ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI),
+ ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG),
+ ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR),
+ ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA),
+
+ /* User visible DMA mask for DMA control. DO NOT renumber. */
ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */
ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */
ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */
@@ -660,10 +700,25 @@ struct ata_cpr_log {
struct ata_cpr cpr[] __counted_by(nr_cpr);
};
+struct ata_cdl {
+ /*
+ * Buffer to cache the CDL log page 18h (command duration descriptors)
+ * for SCSI-ATA translation.
+ */
+ u8 desc_log_buf[ATA_LOG_CDL_SIZE];
+
+ /*
+ * Buffer to handle reading the sense data for successful NCQ Commands
+ * log page for commands using a CDL with one of the limits policy set
+ * to 0xD (successful completion with sense data available bit set).
+ */
+ u8 ncq_sense_log_buf[ATA_LOG_SENSE_NCQ_SIZE];
+};
+
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
- unsigned int horkage; /* List of broken features */
+ unsigned int quirks; /* List of broken features */
unsigned long flags; /* ATA_DFLAG_xxx */
struct scsi_device *sdev; /* attached SCSI device */
void *private_data;
@@ -722,13 +777,16 @@ struct ata_device {
/* Concurrent positioning ranges */
struct ata_cpr_log *cpr_log;
- /* Command Duration Limits log support */
- u8 cdl[ATA_LOG_CDL_SIZE];
+ /* Command Duration Limits support */
+ struct ata_cdl *cdl;
/* error history */
int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */
struct ata_ering ering;
+
+ /* For EH */
+ u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
/* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are
@@ -874,9 +932,6 @@ struct ata_port {
#ifdef CONFIG_ATA_ACPI
struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
#endif
- /* owned by EH */
- u8 *ncq_sense_buf;
- u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
/* The following initializer overrides a method to NULL whether one of
@@ -1064,8 +1119,6 @@ static inline bool ata_port_is_frozen(const struct ata_port *ap)
extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
-extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
extern struct ata_host *ata_host_alloc(struct device *dev, int n_ports);
@@ -1129,7 +1182,6 @@ extern int ata_xfer_mode2shift(u8 xfer_mode);
extern const char *ata_mode_string(unsigned int xfer_mask);
extern unsigned int ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
-extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
@@ -1190,12 +1242,13 @@ extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
extern int sata_set_spd(struct ata_link *link);
+int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
extern int sata_link_hardreset(struct ata_link *link,
const unsigned int *timing, unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *));
extern int sata_link_resume(struct ata_link *link, const unsigned int *params,
unsigned long deadline);
-extern int ata_eh_read_sense_success_ncq_log(struct ata_link *link);
extern void ata_eh_analyze_ncq_error(struct ata_link *link);
#else
static inline const unsigned int *
@@ -1217,6 +1270,11 @@ static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
return -EOPNOTSUPP;
}
static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
+static inline int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ return -EOPNOTSUPP;
+}
static inline int sata_link_hardreset(struct ata_link *link,
const unsigned int *timing,
unsigned long deadline,
@@ -1233,10 +1291,6 @@ static inline int sata_link_resume(struct ata_link *link,
{
return -EOPNOTSUPP;
}
-static inline int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
-{
- return -EOPNOTSUPP;
-}
static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
#endif
extern int sata_link_debounce(struct ata_link *link,
@@ -1967,7 +2021,6 @@ extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc,
extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
extern void ata_sff_irq_on(struct ata_port *ap);
-extern void ata_sff_irq_clear(struct ata_port *ap);
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq);
extern void ata_sff_queue_work(struct work_struct *work);
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index d94bfd9ac8cc..3b9de09871f6 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -37,8 +37,9 @@ static inline bool linkmode_empty(const unsigned long *src)
return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2)
+static inline bool linkmode_andnot(unsigned long *dst,
+ const unsigned long *src1,
+ const unsigned long *src2)
{
return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 1b95fe31051f..61c4b9c41904 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -200,7 +200,7 @@ extern const struct svc_procedure nlmsvc_procedures[24];
extern const struct svc_procedure nlmsvc_procedures4[24];
#endif
extern int nlmsvc_grace_period;
-extern unsigned long nlmsvc_timeout;
+extern unsigned long nlm_timeout;
extern bool nsm_use_hostnames;
extern u32 nsm_local_state;
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index c9afcdd9324c..ff82ef85a084 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -119,7 +119,7 @@ write intent log information, three of which are mentioned here.
*/
/* this defines an element in a tracked set
- * .colision is for hash table lookup.
+ * .collision is for hash table lookup.
* When we process a new IO request, we know its sector, thus can deduce the
* region number (label) easily. To do the label -> object lookup without a
* full list walk, we use a simple hash table.
@@ -145,7 +145,7 @@ write intent log information, three of which are mentioned here.
* But it avoids high order page allocations in kmalloc.
*/
struct lc_element {
- struct hlist_node colision;
+ struct hlist_node collision;
struct list_head list; /* LRU list or free list */
unsigned refcnt;
/* back "pointer" into lc_cache->element[index],
diff --git a/include/linux/lsm_count.h b/include/linux/lsm_count.h
new file mode 100644
index 000000000000..16eb49761b25
--- /dev/null
+++ b/include/linux/lsm_count.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2023 Google LLC.
+ */
+
+#ifndef __LINUX_LSM_COUNT_H
+#define __LINUX_LSM_COUNT_H
+
+#include <linux/args.h>
+
+#ifdef CONFIG_SECURITY
+
+/*
+ * Macros to count the number of LSMs enabled in the kernel at compile time.
+ */
+
+/*
+ * Capabilities is enabled when CONFIG_SECURITY is enabled.
+ */
+#if IS_ENABLED(CONFIG_SECURITY)
+#define CAPABILITIES_ENABLED 1,
+#else
+#define CAPABILITIES_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SELINUX)
+#define SELINUX_ENABLED 1,
+#else
+#define SELINUX_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SMACK)
+#define SMACK_ENABLED 1,
+#else
+#define SMACK_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_APPARMOR)
+#define APPARMOR_ENABLED 1,
+#else
+#define APPARMOR_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_TOMOYO)
+#define TOMOYO_ENABLED 1,
+#else
+#define TOMOYO_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_YAMA)
+#define YAMA_ENABLED 1,
+#else
+#define YAMA_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LOADPIN)
+#define LOADPIN_ENABLED 1,
+#else
+#define LOADPIN_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM)
+#define LOCKDOWN_ENABLED 1,
+#else
+#define LOCKDOWN_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SAFESETID)
+#define SAFESETID_ENABLED 1,
+#else
+#define SAFESETID_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_BPF_LSM)
+#define BPF_LSM_ENABLED 1,
+#else
+#define BPF_LSM_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LANDLOCK)
+#define LANDLOCK_ENABLED 1,
+#else
+#define LANDLOCK_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_IMA)
+#define IMA_ENABLED 1,
+#else
+#define IMA_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_EVM)
+#define EVM_ENABLED 1,
+#else
+#define EVM_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_IPE)
+#define IPE_ENABLED 1,
+#else
+#define IPE_ENABLED
+#endif
+
+/*
+ * There is a trailing comma that we need to be accounted for. This is done by
+ * using a skipped argument in __COUNT_LSMS
+ */
+#define __COUNT_LSMS(skipped_arg, args...) COUNT_ARGS(args...)
+#define COUNT_LSMS(args...) __COUNT_LSMS(args)
+
+#define MAX_LSM_COUNT \
+ COUNT_LSMS( \
+ CAPABILITIES_ENABLED \
+ SELINUX_ENABLED \
+ SMACK_ENABLED \
+ APPARMOR_ENABLED \
+ TOMOYO_ENABLED \
+ YAMA_ENABLED \
+ LOADPIN_ENABLED \
+ LOCKDOWN_ENABLED \
+ SAFESETID_ENABLED \
+ BPF_LSM_ENABLED \
+ LANDLOCK_ENABLED \
+ IMA_ENABLED \
+ EVM_ENABLED \
+ IPE_ENABLED)
+
+#else
+
+#define MAX_LSM_COUNT 0
+
+#endif /* CONFIG_SECURITY */
+
+#endif /* __LINUX_LSM_COUNT_H */
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 855db460e08b..9eca013aa5e1 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -48,7 +48,7 @@ LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
LSM_HOOK(int, 0, syslog, int type)
LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
const struct timezone *tz)
-LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages)
+LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file)
LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
@@ -114,6 +114,7 @@ LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask,
unsigned int obj_type)
LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode)
LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode)
+LSM_HOOK(void, LSM_RET_VOID, inode_free_security_rcu, void *inode_security)
LSM_HOOK(int, -EOPNOTSUPP, inode_init_security, struct inode *inode,
struct inode *dir, const struct qstr *qstr, struct xattr *xattrs,
int *xattr_count)
@@ -179,6 +180,8 @@ LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src,
const char *name)
+LSM_HOOK(int, 0, inode_setintegrity, const struct inode *inode,
+ enum lsm_integrity_type type, const void *value, size_t size)
LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
struct kernfs_node *kn)
LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
@@ -353,8 +356,7 @@ LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void)
LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void)
LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req,
struct flowi_common *flic)
-LSM_HOOK(int, 0, tun_dev_alloc_security, void **security)
-LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security)
+LSM_HOOK(int, 0, tun_dev_alloc_security, void *security)
LSM_HOOK(int, 0, tun_dev_create, void)
LSM_HOOK(int, 0, tun_dev_attach_queue, void *security)
LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security)
@@ -374,8 +376,7 @@ LSM_HOOK(int, 0, mptcp_add_subflow, struct sock *sk, struct sock *ssk)
LSM_HOOK(int, 0, ib_pkey_access, void *sec, u64 subnet_prefix, u16 pkey)
LSM_HOOK(int, 0, ib_endport_manage_subnet, void *sec, const char *dev_name,
u8 port_num)
-LSM_HOOK(int, 0, ib_alloc_security, void **sec)
-LSM_HOOK(void, LSM_RET_VOID, ib_free_security, void *sec)
+LSM_HOOK(int, 0, ib_alloc_security, void *sec)
#endif /* CONFIG_SECURITY_INFINIBAND */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -403,7 +404,6 @@ LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid,
#ifdef CONFIG_KEYS
LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred,
unsigned long flags)
-LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key)
LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred,
enum key_need_perm need_perm)
LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **buffer)
@@ -431,7 +431,7 @@ LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr,
struct bpf_token *token)
LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog)
LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token)
LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd)
LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap)
@@ -442,7 +442,6 @@ LSM_HOOK(int, 0, locked_down, enum lockdown_reason what)
#ifdef CONFIG_PERF_EVENTS
LSM_HOOK(int, 0, perf_event_open, struct perf_event_attr *attr, int type)
LSM_HOOK(int, 0, perf_event_alloc, struct perf_event *event)
-LSM_HOOK(void, LSM_RET_VOID, perf_event_free, struct perf_event *event)
LSM_HOOK(int, 0, perf_event_read, struct perf_event *event)
LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
#endif /* CONFIG_PERF_EVENTS */
@@ -452,3 +451,10 @@ LSM_HOOK(int, 0, uring_override_creds, const struct cred *new)
LSM_HOOK(int, 0, uring_sqpoll, void)
LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd)
#endif /* CONFIG_IO_URING */
+
+LSM_HOOK(void, LSM_RET_VOID, initramfs_populated, void)
+
+LSM_HOOK(int, 0, bdev_alloc_security, struct block_device *bdev)
+LSM_HOOK(void, LSM_RET_VOID, bdev_free_security, struct block_device *bdev)
+LSM_HOOK(int, 0, bdev_setintegrity, struct block_device *bdev,
+ enum lsm_integrity_type type, const void *value, size_t size)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index a2ade0ffe9e7..090d1d3e19fe 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -30,19 +30,47 @@
#include <linux/init.h>
#include <linux/rculist.h>
#include <linux/xattr.h>
+#include <linux/static_call.h>
+#include <linux/unroll.h>
+#include <linux/jump_label.h>
+#include <linux/lsm_count.h>
union security_list_options {
#define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
#include "lsm_hook_defs.h"
#undef LSM_HOOK
+ void *lsm_func_addr;
};
-struct security_hook_heads {
- #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME;
- #include "lsm_hook_defs.h"
- #undef LSM_HOOK
+/*
+ * @key: static call key as defined by STATIC_CALL_KEY
+ * @trampoline: static call trampoline as defined by STATIC_CALL_TRAMP
+ * @hl: The security_hook_list as initialized by the owning LSM.
+ * @active: Enabled when the static call has an LSM hook associated.
+ */
+struct lsm_static_call {
+ struct static_call_key *key;
+ void *trampoline;
+ struct security_hook_list *hl;
+ /* this needs to be true or false based on what the key defaults to */
+ struct static_key_false *active;
} __randomize_layout;
+/*
+ * Table of the static calls for each LSM hook.
+ * Once the LSMs are initialized, their callbacks will be copied to these
+ * tables such that the calls are filled backwards (from last to first).
+ * This way, we can jump directly to the first used static call, and execute
+ * all of them after. This essentially makes the entry point
+ * dynamic to adapt the number of static calls to the number of callbacks.
+ */
+struct lsm_static_calls_table {
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ struct lsm_static_call NAME[MAX_LSM_COUNT];
+ #include <linux/lsm_hook_defs.h>
+ #undef LSM_HOOK
+} __packed __randomize_layout;
+
/**
* struct lsm_id - Identify a Linux Security Module.
* @lsm: name of the LSM, must be approved by the LSM maintainers
@@ -51,53 +79,45 @@ struct security_hook_heads {
* Contains the information that identifies the LSM.
*/
struct lsm_id {
- const char *name;
- u64 id;
+ const char *name;
+ u64 id;
};
/*
* Security module hook list structure.
* For use with generic list macros for common operations.
+ *
+ * struct security_hook_list - Contents of a cacheable, mappable object.
+ * @scalls: The beginning of the array of static calls assigned to this hook.
+ * @hook: The callback for the hook.
+ * @lsm: The name of the lsm that owns this hook.
*/
struct security_hook_list {
- struct hlist_node list;
- struct hlist_head *head;
- union security_list_options hook;
- const struct lsm_id *lsmid;
+ struct lsm_static_call *scalls;
+ union security_list_options hook;
+ const struct lsm_id *lsmid;
} __randomize_layout;
/*
* Security blob size or offset data.
*/
struct lsm_blob_sizes {
- int lbs_cred;
- int lbs_file;
- int lbs_inode;
- int lbs_superblock;
- int lbs_ipc;
- int lbs_msg_msg;
- int lbs_task;
- int lbs_xattr_count; /* number of xattr slots in new_xattrs array */
+ int lbs_cred;
+ int lbs_file;
+ int lbs_ib;
+ int lbs_inode;
+ int lbs_sock;
+ int lbs_superblock;
+ int lbs_ipc;
+ int lbs_key;
+ int lbs_msg_msg;
+ int lbs_perf_event;
+ int lbs_task;
+ int lbs_xattr_count; /* number of xattr slots in new_xattrs array */
+ int lbs_tun_dev;
+ int lbs_bdev;
};
-/**
- * lsm_get_xattr_slot - Return the next available slot and increment the index
- * @xattrs: array storing LSM-provided xattrs
- * @xattr_count: number of already stored xattrs (updated)
- *
- * Retrieve the first available slot in the @xattrs array to fill with an xattr,
- * and increment @xattr_count.
- *
- * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise.
- */
-static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
- int *xattr_count)
-{
- if (unlikely(!xattrs))
- return NULL;
- return &xattrs[(*xattr_count)++];
-}
-
/*
* LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void
* LSM hooks (in include/linux/lsm_hook_defs.h).
@@ -110,11 +130,11 @@ static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
* care of the common case and reduces the amount of
* text involved.
*/
-#define LSM_HOOK_INIT(HEAD, HOOK) \
- { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } }
-
-extern struct security_hook_heads security_hook_heads;
-extern char *lsm_names;
+#define LSM_HOOK_INIT(NAME, HOOK) \
+ { \
+ .scalls = static_calls_table.NAME, \
+ .hook = { .NAME = HOOK } \
+ }
extern void security_add_hooks(struct security_hook_list *hooks, int count,
const struct lsm_id *lsmid);
@@ -137,9 +157,6 @@ struct lsm_info {
struct lsm_blob_sizes *blobs; /* Optional: for blob sharing. */
};
-extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
-extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
-
#define DEFINE_LSM(lsm) \
static struct lsm_info __lsm_##lsm \
__used __section(".lsm_info.init") \
@@ -150,6 +167,28 @@ extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
__used __section(".early_lsm_info.init") \
__aligned(sizeof(unsigned long))
-extern int lsm_inode_alloc(struct inode *inode);
+/* DO NOT tamper with these variables outside of the LSM framework */
+extern char *lsm_names;
+extern struct lsm_static_calls_table static_calls_table __ro_after_init;
+extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
+extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
+
+/**
+ * lsm_get_xattr_slot - Return the next available slot and increment the index
+ * @xattrs: array storing LSM-provided xattrs
+ * @xattr_count: number of already stored xattrs (updated)
+ *
+ * Retrieve the first available slot in the @xattrs array to fill with an xattr,
+ * and increment @xattr_count.
+ *
+ * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise.
+ */
+static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
+ int *xattr_count)
+{
+ if (unlikely(!xattrs))
+ return NULL;
+ return &xattrs[(*xattr_count)++];
+}
#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index a53ad4dabd7e..c2c11004085e 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -52,9 +52,9 @@
* bit in the node type. This is possible by using bit 1 to indicate if bit 2
* is part of the type or the slot.
*
- * Once the type is decided, the decision of an allocation range type or a range
- * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE
- * flag.
+ * Once the type is decided, the decision of an allocation range type or a
+ * range type is done by examining the immutable tree flag for the
+ * MT_FLAGS_ALLOC_RANGE flag.
*
* Node types:
* 0x??1 = Root
@@ -148,6 +148,18 @@ enum maple_type {
maple_arange_64,
};
+enum store_type {
+ wr_invalid,
+ wr_new_root,
+ wr_store_root,
+ wr_exact_fit,
+ wr_spanning_store,
+ wr_split_store,
+ wr_rebalance,
+ wr_append,
+ wr_node_store,
+ wr_slot_store,
+};
/**
* DOC: Maple tree flags
@@ -436,6 +448,7 @@ struct ma_state {
unsigned char offset;
unsigned char mas_flags;
unsigned char end; /* The end of the node */
+ enum store_type store_type; /* The type of store needed for this operation */
};
struct ma_wr_state {
@@ -477,6 +490,7 @@ struct ma_wr_state {
.max = ULONG_MAX, \
.alloc = NULL, \
.mas_flags = 0, \
+ .store_type = wr_invalid, \
}
#define MA_WR_STATE(name, ma_state, wr_entry) \
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index fc4d75c6cec3..673d5cae7c81 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -467,6 +467,7 @@ static inline __init_memblock bool memblock_bottom_up(void)
phys_addr_t memblock_phys_mem_size(void);
phys_addr_t memblock_reserved_size(void);
+unsigned long memblock_estimated_nr_free_pages(void);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0e5bf25d324f..34d2da05f2f1 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -57,7 +57,7 @@ enum memcg_memory_event {
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
- unsigned int generation;
+ int generation;
};
#ifdef CONFIG_MEMCG
@@ -70,6 +70,7 @@ struct mem_cgroup_id {
};
struct memcg_vmstats_percpu;
+struct memcg1_events_percpu;
struct memcg_vmstats;
struct lruvec_stats_percpu;
struct lruvec_stats;
@@ -77,7 +78,7 @@ struct lruvec_stats;
struct mem_cgroup_reclaim_iter {
struct mem_cgroup *position;
/* scan generation, increased every round-trip */
- unsigned int generation;
+ atomic_t generation;
};
/*
@@ -193,6 +194,11 @@ struct mem_cgroup {
struct page_counter memsw; /* v1 only */
};
+ /* registered local peak watchers */
+ struct list_head memory_peaks;
+ struct list_head swap_peaks;
+ spinlock_t peaks_lock;
+
/* Range enforcement for interrupt charges */
struct work_struct high_work;
@@ -270,6 +276,8 @@ struct mem_cgroup {
struct page_counter kmem; /* v1 only */
struct page_counter tcpmem; /* v1 only */
+ struct memcg1_events_percpu __percpu *events_percpu;
+
unsigned long soft_limit;
/* protected by memcg_oom_lock */
@@ -361,11 +369,11 @@ static inline bool folio_memcg_kmem(struct folio *folio);
* After the initialization objcg->memcg is always pointing at
* a valid memcg, but can be atomically swapped to the parent memcg.
*
- * The caller must ensure that the returned memcg won't be released:
- * e.g. acquire the rcu_read_lock or css_set_lock.
+ * The caller must ensure that the returned memcg won't be released.
*/
static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
{
+ lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex));
return READ_ONCE(objcg->memcg);
}
@@ -439,6 +447,19 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio)
return __folio_memcg(folio);
}
+/*
+ * folio_memcg_charged - If a folio is charged to a memory cgroup.
+ * @folio: Pointer to the folio.
+ *
+ * Returns true if folio is charged to a memory cgroup, otherwise returns false.
+ */
+static inline bool folio_memcg_charged(struct folio *folio)
+{
+ if (folio_memcg_kmem(folio))
+ return __folio_objcg(folio) != NULL;
+ return __folio_memcg(folio) != NULL;
+}
+
/**
* folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
* @folio: Pointer to the folio.
@@ -455,7 +476,6 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
unsigned long memcg_data = READ_ONCE(folio->memcg_data);
VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
- WARN_ON_ONCE(!rcu_read_lock_held());
if (memcg_data & MEMCG_DATA_KMEM) {
struct obj_cgroup *objcg;
@@ -464,6 +484,8 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
return obj_cgroup_memcg(objcg);
}
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
}
@@ -677,7 +699,8 @@ int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
-void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
+
+void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
void __mem_cgroup_uncharge(struct folio *folio);
@@ -762,6 +785,8 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
struct mem_cgroup *get_mem_cgroup_from_current(void);
+struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio);
+
struct lruvec *folio_lruvec_lock(struct folio *folio);
struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
@@ -1006,8 +1031,8 @@ static inline void count_memcg_folio_events(struct folio *folio,
count_memcg_events(memcg, idx, nr);
}
-static inline void count_memcg_event_mm(struct mm_struct *mm,
- enum vm_event_item idx)
+static inline void count_memcg_events_mm(struct mm_struct *mm,
+ enum vm_event_item idx, unsigned long count)
{
struct mem_cgroup *memcg;
@@ -1017,10 +1042,16 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
if (likely(memcg))
- count_memcg_events(memcg, idx, 1);
+ count_memcg_events(memcg, idx, count);
rcu_read_unlock();
}
+static inline void count_memcg_event_mm(struct mm_struct *mm,
+ enum vm_event_item idx)
+{
+ count_memcg_events_mm(mm, idx, 1);
+}
+
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
@@ -1176,7 +1207,7 @@ static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
return 0;
}
-static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
+static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr)
{
}
@@ -1240,6 +1271,11 @@ static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
return NULL;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
+{
+ return NULL;
+}
+
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
{
@@ -1462,6 +1498,11 @@ static inline void count_memcg_folio_events(struct folio *folio,
{
}
+static inline void count_memcg_events_mm(struct mm_struct *mm,
+ enum vm_event_item idx, unsigned long count)
+{
+}
+
static inline
void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
@@ -1717,7 +1758,6 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
-struct mem_cgroup *mem_cgroup_from_obj(void *p);
struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
static inline void count_objcg_event(struct obj_cgroup *objcg,
@@ -1780,11 +1820,6 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
return -1;
}
-static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
-{
- return NULL;
-}
-
static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
{
return NULL;
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index ebe876930e78..b27ddce5d324 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -16,54 +16,6 @@ struct resource;
struct vmem_altmap;
struct dev_pagemap;
-#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
-/*
- * For supporting node-hotadd, we have to allocate a new pgdat.
- *
- * If an arch has generic style NODE_DATA(),
- * node_data[nid] = kzalloc() works well. But it depends on the architecture.
- *
- * In general, generic_alloc_nodedata() is used.
- *
- */
-extern pg_data_t *arch_alloc_nodedata(int nid);
-extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
-
-#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-
-#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
-
-#ifdef CONFIG_NUMA
-/*
- * XXX: node aware allocation can't work well to get new node's memory at this time.
- * Because, pgdat for the new node is not allocated/initialized yet itself.
- * To use new node's memory, more consideration will be necessary.
- */
-#define generic_alloc_nodedata(nid) \
-({ \
- memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
-})
-
-extern pg_data_t *node_data[];
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
- node_data[nid] = pgdat;
-}
-
-#else /* !CONFIG_NUMA */
-
-/* never called */
-static inline pg_data_t *generic_alloc_nodedata(int nid)
-{
- BUG();
- return NULL;
-}
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
-}
-#endif /* CONFIG_NUMA */
-#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-
#ifdef CONFIG_MEMORY_HOTPLUG
struct page *pfn_to_online_page(unsigned long pfn);
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index def5df6e74bf..551ef1c367d6 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -294,7 +294,7 @@ struct pm80x_chip {
struct i2c_client *client;
struct i2c_client *companion;
struct regmap *regmap;
- struct regmap_irq_chip *regmap_irq_chip;
+ const struct regmap_irq_chip *regmap_irq_chip;
struct regmap_irq_chip_data *irq_data;
int type;
int irq;
diff --git a/include/linux/mfd/adp5585.h b/include/linux/mfd/adp5585.h
new file mode 100644
index 000000000000..016033cd68e4
--- /dev/null
+++ b/include/linux/mfd/adp5585.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Analog Devices ADP5585 I/O expander, PWM controller and keypad controller
+ *
+ * Copyright 2022 NXP
+ * Copyright 2024 Ideas on Board Oy
+ */
+
+#ifndef __MFD_ADP5585_H_
+#define __MFD_ADP5585_H_
+
+#include <linux/bits.h>
+
+#define ADP5585_ID 0x00
+#define ADP5585_MAN_ID_VALUE 0x20
+#define ADP5585_MAN_ID_MASK GENMASK(7, 4)
+#define ADP5585_INT_STATUS 0x01
+#define ADP5585_STATUS 0x02
+#define ADP5585_FIFO_1 0x03
+#define ADP5585_FIFO_2 0x04
+#define ADP5585_FIFO_3 0x05
+#define ADP5585_FIFO_4 0x06
+#define ADP5585_FIFO_5 0x07
+#define ADP5585_FIFO_6 0x08
+#define ADP5585_FIFO_7 0x09
+#define ADP5585_FIFO_8 0x0a
+#define ADP5585_FIFO_9 0x0b
+#define ADP5585_FIFO_10 0x0c
+#define ADP5585_FIFO_11 0x0d
+#define ADP5585_FIFO_12 0x0e
+#define ADP5585_FIFO_13 0x0f
+#define ADP5585_FIFO_14 0x10
+#define ADP5585_FIFO_15 0x11
+#define ADP5585_FIFO_16 0x12
+#define ADP5585_GPI_INT_STAT_A 0x13
+#define ADP5585_GPI_INT_STAT_B 0x14
+#define ADP5585_GPI_STATUS_A 0x15
+#define ADP5585_GPI_STATUS_B 0x16
+#define ADP5585_RPULL_CONFIG_A 0x17
+#define ADP5585_RPULL_CONFIG_B 0x18
+#define ADP5585_RPULL_CONFIG_C 0x19
+#define ADP5585_RPULL_CONFIG_D 0x1a
+#define ADP5585_Rx_PULL_CFG_PU_300K 0
+#define ADP5585_Rx_PULL_CFG_PD_300K 1
+#define ADP5585_Rx_PULL_CFG_PU_100K 2
+#define ADP5585_Rx_PULL_CFG_DISABLE 3
+#define ADP5585_Rx_PULL_CFG_MASK 3
+#define ADP5585_GPI_INT_LEVEL_A 0x1b
+#define ADP5585_GPI_INT_LEVEL_B 0x1c
+#define ADP5585_GPI_EVENT_EN_A 0x1d
+#define ADP5585_GPI_EVENT_EN_B 0x1e
+#define ADP5585_GPI_INTERRUPT_EN_A 0x1f
+#define ADP5585_GPI_INTERRUPT_EN_B 0x20
+#define ADP5585_DEBOUNCE_DIS_A 0x21
+#define ADP5585_DEBOUNCE_DIS_B 0x22
+#define ADP5585_GPO_DATA_OUT_A 0x23
+#define ADP5585_GPO_DATA_OUT_B 0x24
+#define ADP5585_GPO_OUT_MODE_A 0x25
+#define ADP5585_GPO_OUT_MODE_B 0x26
+#define ADP5585_GPIO_DIRECTION_A 0x27
+#define ADP5585_GPIO_DIRECTION_B 0x28
+#define ADP5585_RESET1_EVENT_A 0x29
+#define ADP5585_RESET1_EVENT_B 0x2a
+#define ADP5585_RESET1_EVENT_C 0x2b
+#define ADP5585_RESET2_EVENT_A 0x2c
+#define ADP5585_RESET2_EVENT_B 0x2d
+#define ADP5585_RESET_CFG 0x2e
+#define ADP5585_PWM_OFFT_LOW 0x2f
+#define ADP5585_PWM_OFFT_HIGH 0x30
+#define ADP5585_PWM_ONT_LOW 0x31
+#define ADP5585_PWM_ONT_HIGH 0x32
+#define ADP5585_PWM_CFG 0x33
+#define ADP5585_PWM_IN_AND BIT(2)
+#define ADP5585_PWM_MODE BIT(1)
+#define ADP5585_PWM_EN BIT(0)
+#define ADP5585_LOGIC_CFG 0x34
+#define ADP5585_LOGIC_FF_CFG 0x35
+#define ADP5585_LOGIC_INT_EVENT_EN 0x36
+#define ADP5585_POLL_PTIME_CFG 0x37
+#define ADP5585_PIN_CONFIG_A 0x38
+#define ADP5585_PIN_CONFIG_B 0x39
+#define ADP5585_PIN_CONFIG_C 0x3a
+#define ADP5585_PULL_SELECT BIT(7)
+#define ADP5585_C4_EXTEND_CFG_GPIO11 (0U << 6)
+#define ADP5585_C4_EXTEND_CFG_RESET2 (1U << 6)
+#define ADP5585_C4_EXTEND_CFG_MASK GENMASK(6, 6)
+#define ADP5585_R4_EXTEND_CFG_GPIO5 (0U << 5)
+#define ADP5585_R4_EXTEND_CFG_RESET1 (1U << 5)
+#define ADP5585_R4_EXTEND_CFG_MASK GENMASK(5, 5)
+#define ADP5585_R3_EXTEND_CFG_GPIO4 (0U << 2)
+#define ADP5585_R3_EXTEND_CFG_LC (1U << 2)
+#define ADP5585_R3_EXTEND_CFG_PWM_OUT (2U << 2)
+#define ADP5585_R3_EXTEND_CFG_MASK GENMASK(3, 2)
+#define ADP5585_R0_EXTEND_CFG_GPIO1 (0U << 0)
+#define ADP5585_R0_EXTEND_CFG_LY (1U << 0)
+#define ADP5585_R0_EXTEND_CFG_MASK GENMASK(0, 0)
+#define ADP5585_GENERAL_CFG 0x3b
+#define ADP5585_OSC_EN BIT(7)
+#define ADP5585_OSC_FREQ_50KHZ (0U << 5)
+#define ADP5585_OSC_FREQ_100KHZ (1U << 5)
+#define ADP5585_OSC_FREQ_200KHZ (2U << 5)
+#define ADP5585_OSC_FREQ_500KHZ (3U << 5)
+#define ADP5585_OSC_FREQ_MASK GENMASK(6, 5)
+#define ADP5585_INT_CFG BIT(1)
+#define ADP5585_RST_CFG BIT(0)
+#define ADP5585_INT_EN 0x3c
+
+#define ADP5585_MAX_REG ADP5585_INT_EN
+
+/*
+ * Bank 0 covers pins "GPIO 1/R0" to "GPIO 6/R5", numbered 0 to 5 by the
+ * driver, and bank 1 covers pins "GPIO 7/C0" to "GPIO 11/C4", numbered 6 to
+ * 10. Some variants of the ADP5585 don't support "GPIO 6/R5". As the driver
+ * uses identical GPIO numbering for all variants to avoid confusion, GPIO 5 is
+ * marked as reserved in the device tree for variants that don't support it.
+ */
+#define ADP5585_BANK(n) ((n) >= 6 ? 1 : 0)
+#define ADP5585_BIT(n) ((n) >= 6 ? BIT((n) - 6) : BIT(n))
+
+struct regmap;
+
+struct adp5585_dev {
+ struct regmap *regmap;
+};
+
+#endif
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 8c0a33a2e9ce..f4dfc1871a95 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -115,6 +115,16 @@ enum axp20x_variants {
#define AXP313A_IRQ_STATE 0x21
#define AXP717_ON_INDICATE 0x00
+#define AXP717_PMU_STATUS_2 0x01
+#define AXP717_BC_DETECT 0x05
+#define AXP717_PMU_FAULT 0x08
+#define AXP717_MODULE_EN_CONTROL_1 0x0b
+#define AXP717_MIN_SYS_V_CONTROL 0x15
+#define AXP717_INPUT_VOL_LIMIT_CTRL 0x16
+#define AXP717_INPUT_CUR_LIMIT_CTRL 0x17
+#define AXP717_MODULE_EN_CONTROL_2 0x19
+#define AXP717_BOOST_CONTROL 0x1e
+#define AXP717_VSYS_V_POWEROFF 0x24
#define AXP717_IRQ0_EN 0x40
#define AXP717_IRQ1_EN 0x41
#define AXP717_IRQ2_EN 0x42
@@ -125,6 +135,9 @@ enum axp20x_variants {
#define AXP717_IRQ2_STATE 0x4a
#define AXP717_IRQ3_STATE 0x4b
#define AXP717_IRQ4_STATE 0x4c
+#define AXP717_ICC_CHG_SET 0x62
+#define AXP717_ITERM_CHG_SET 0x63
+#define AXP717_CV_CHG_SET 0x64
#define AXP717_DCDC_OUTPUT_CONTROL 0x80
#define AXP717_DCDC1_CONTROL 0x83
#define AXP717_DCDC2_CONTROL 0x84
@@ -145,6 +158,19 @@ enum axp20x_variants {
#define AXP717_CLDO3_CONTROL 0x9d
#define AXP717_CLDO4_CONTROL 0x9e
#define AXP717_CPUSLDO_CONTROL 0x9f
+#define AXP717_BATT_PERCENT_DATA 0xa4
+#define AXP717_ADC_CH_EN_CONTROL 0xc0
+#define AXP717_BATT_V_H 0xc4
+#define AXP717_BATT_V_L 0xc5
+#define AXP717_VBUS_V_H 0xc6
+#define AXP717_VBUS_V_L 0xc7
+#define AXP717_VSYS_V_H 0xc8
+#define AXP717_VSYS_V_L 0xc9
+#define AXP717_BATT_CHRG_I_H 0xca
+#define AXP717_BATT_CHRG_I_L 0xcb
+#define AXP717_ADC_DATA_SEL 0xcd
+#define AXP717_ADC_DATA_H 0xce
+#define AXP717_ADC_DATA_L 0xcf
#define AXP806_STARTUP_SRC 0x00
#define AXP806_CHIP_ID 0x03
@@ -484,6 +510,7 @@ enum {
AXP717_CLDO3,
AXP717_CLDO4,
AXP717_CPUSLDO,
+ AXP717_BOOST,
AXP717_REG_ID_MAX,
};
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
deleted file mode 100644
index 43dfca1c9702..000000000000
--- a/include/linux/mfd/ds1wm.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* MFD cell driver data for the DS1WM driver
- *
- * to be defined in the MFD device that is
- * using this driver for one of his sub devices
- */
-
-struct ds1wm_driver_data {
- int active_high;
- int clock_rate;
- /* in milliseconds, the amount of time to
- * sleep following a reset pulse. Zero
- * should work if your bus devices recover
- * time respects the 1-wire spec since the
- * ds1wm implements the precise timings of
- * a reset pulse/presence detect sequence.
- */
- unsigned int reset_recover_delay;
-
- /* Say 1 here for big endian Hardware
- * (only relevant with bus-shift > 0
- */
- bool is_hw_big_endian;
-
- /* left shift of register number to get register address offsett.
- * Only 0,1,2 allowed for 8,16 or 32 bit bus width respectively
- */
- unsigned int bus_shift;
-};
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 54444ff2a5de..20c5e02ed9da 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -217,6 +217,10 @@ enum max77693_charger_battery_state {
#define CHG_CNFG_01_CHGRSTRT_MASK (0x3 << CHG_CNFG_01_CHGRSTRT_SHIFT)
#define CHG_CNFG_01_PQEN_MAKS BIT(CHG_CNFG_01_PQEN_SHIFT)
+/* MAX77693_CHG_REG_CHG_CNFG_02 register */
+#define CHG_CNFG_02_CC_SHIFT 0
+#define CHG_CNFG_02_CC_MASK 0x3F
+
/* MAX77693_CHG_REG_CHG_CNFG_03 register */
#define CHG_CNFG_03_TOITH_SHIFT 0
#define CHG_CNFG_03_TOTIME_SHIFT 3
@@ -244,6 +248,7 @@ enum max77693_charger_battery_state {
#define CHG_CNFG_12_VCHGINREG_MASK (0x3 << CHG_CNFG_12_VCHGINREG_SHIFT)
/* MAX77693 CHG_CNFG_09 Register */
+#define CHG_CNFG_09_CHGIN_ILIM_SHIFT 0
#define CHG_CNFG_09_CHGIN_ILIM_MASK 0x7F
/* MAX77693 CHG_CTRL Register */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 644be30b69c8..002e49b2ebd9 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned int *ret_succeeded);
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
@@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
{ return NULL; }
static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
{ return false; }
+static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+ { return false; }
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src)
diff --git a/include/linux/mii.h b/include/linux/mii.h
index d5a959ce4877..b8f26d4513c3 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -140,7 +140,7 @@ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
* settings to phy autonegotiation advertisements for the
* MII_ADVERTISE register.
*/
-static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising)
+static inline u32 linkmode_adv_to_mii_adv_t(const unsigned long *advertising)
{
u32 result = 0;
@@ -215,7 +215,8 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
* settings to phy autonegotiation advertisements for the
* MII_CTRL1000 register when in 1000T mode.
*/
-static inline u32 linkmode_adv_to_mii_ctrl1000_t(unsigned long *advertising)
+static inline u32
+linkmode_adv_to_mii_ctrl1000_t(const unsigned long *advertising)
{
u32 result = 0;
@@ -453,7 +454,7 @@ static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising,
* A small helper function that translates linkmode advertising to LVL
* pause capabilities.
*/
-static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
+static inline u32 linkmode_adv_to_lcl_adv_t(const unsigned long *advertising)
{
u32 lcl_adv = 0;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index ba875a619b97..cc647992f3d1 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -211,6 +211,7 @@ enum {
enum {
MLX5_PFAULT_SUBTYPE_WQE = 0,
MLX5_PFAULT_SUBTYPE_RDMA = 1,
+ MLX5_PFAULT_SUBTYPE_MEMORY = 2,
};
enum wqe_page_fault_type {
@@ -370,6 +371,7 @@ enum mlx5_driver_event {
MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
+ MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
};
enum {
@@ -646,10 +648,11 @@ struct mlx5_eqe_page_req {
__be32 rsvd1[5];
};
+#define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY 4096
struct mlx5_eqe_page_fault {
- __be32 bytes_committed;
union {
struct {
+ __be32 bytes_committed;
u16 reserved1;
__be16 wqe_index;
u16 reserved2;
@@ -659,6 +662,7 @@ struct mlx5_eqe_page_fault {
__be32 pftype_wq;
} __packed wqe;
struct {
+ __be32 bytes_committed;
__be32 r_key;
u16 reserved1;
__be16 packet_length;
@@ -666,6 +670,23 @@ struct mlx5_eqe_page_fault {
__be64 rdma_va;
__be32 pftype_token;
} __packed rdma;
+ struct {
+ u8 flags;
+ u8 reserved1;
+ __be16 post_demand_fault_pages;
+ __be16 pre_demand_fault_pages;
+ __be16 token47_32;
+ __be32 token31_0;
+ /*
+ * FW changed from specifying the fault size in byte
+ * count to 4k pages granularity. The size specified
+ * in pages uses bits 31:12, to keep backward
+ * compatibility.
+ */
+ __be32 demand_fault_pages;
+ __be32 mkey;
+ __be64 va;
+ } __packed memory;
} __packed;
} __packed;
@@ -1243,7 +1264,8 @@ enum mlx5_pcam_feature_groups {
enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0,
MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
- MLX5_MCAM_REGS_NUM = 0x3,
+ MLX5_MCAM_REGS_0x9180_0x91FF = 0x3,
+ MLX5_MCAM_REGS_NUM = 0x4,
};
enum mlx5_mcam_feature_groups {
@@ -1369,6 +1391,14 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
+#define MLX5_CAP_ODP_SCHEME(mdev, cap) \
+ (MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ mem_page_fault) ? \
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ memory_page_fault_scheme_cap.cap) : \
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ transport_page_fault_scheme_cap.cap))
+
#define MLX5_CAP_ODP_MAX(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
@@ -1392,6 +1422,10 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
mng_access_reg_cap_mask.access_regs2.reg)
+#define MLX5_CAP_MCAM_REG3(mdev, reg) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9180_0x91FF], \
+ mng_access_reg_cap_mask.access_regs3.reg)
+
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
@@ -1444,6 +1478,7 @@ enum {
MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
MLX5_CMD_STAT_RES_BUSY = 0x6,
+ MLX5_CMD_STAT_NOT_READY = 0x7,
MLX5_CMD_STAT_LIM_ERR = 0x8,
MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
MLX5_CMD_STAT_IX_ERR = 0xa,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index a96438ded15f..e23c692a34c7 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -159,6 +159,8 @@ enum {
MLX5_REG_MSECQ = 0x9155,
MLX5_REG_MSEES = 0x9156,
MLX5_REG_MIRC = 0x9162,
+ MLX5_REG_MTPTM = 0x9180,
+ MLX5_REG_MTCTR = 0x9181,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
MLX5_REG_DTOR = 0xC00E,
@@ -643,6 +645,7 @@ struct mlx5_priv {
struct mlx5_sf_hw_table *sf_hw_table;
struct mlx5_sf_table *sf_table;
#endif
+ struct blocking_notifier_head lag_nh;
};
enum mlx5_device_state {
@@ -1181,7 +1184,6 @@ bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
-struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 3fb428ce7d1c..b744e554f014 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -342,4 +342,7 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
struct mlx5_pkt_reformat *reformat);
u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
+
+struct mlx5_flow_root_namespace *
+mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type);
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index cab228cf51c6..97f6de69f616 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -80,23 +80,15 @@ enum {
enum {
MLX5_OBJ_TYPE_SW_ICM = 0x0008,
- MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23,
-};
-
-enum {
- MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
- MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
- MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
- MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT =
- (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT),
- MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
-};
-
-enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
+ MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23,
+ MLX5_OBJ_TYPE_STC = 0x0040,
+ MLX5_OBJ_TYPE_RTC = 0x0041,
+ MLX5_OBJ_TYPE_STE = 0x0042,
+ MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN = 0x0043,
MLX5_OBJ_TYPE_PAGE_TRACK = 0x46,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
@@ -112,6 +104,16 @@ enum {
MLX5_OBJ_TYPE_RQT = 0xff0e,
MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f,
MLX5_OBJ_TYPE_CQ = 0xff10,
+ MLX5_OBJ_TYPE_FT_ALIAS = 0xff15,
+};
+
+enum {
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
+ MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
+ MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT =
+ (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT),
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
};
enum {
@@ -313,6 +315,8 @@ enum {
MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16,
+ MLX5_CMD_OP_GENERATE_WQE = 0xb17,
+ MLX5_CMD_OPCODE_QUERY_VUID = 0xb22,
MLX5_CMD_OP_MAX
};
@@ -485,7 +489,13 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_66[0x2];
u8 reformat_add_macsec[0x1];
u8 reformat_remove_macsec[0x1];
- u8 reserved_at_6a[0xe];
+ u8 reparse[0x1];
+ u8 reserved_at_6b[0x1];
+ u8 cross_vhca_object[0x1];
+ u8 reformat_l2_to_l3_audp_tunnel[0x1];
+ u8 reformat_l3_audp_tunnel_to_l2[0x1];
+ u8 ignore_flow_level_rtc_valid[0x1];
+ u8 reserved_at_70[0x8];
u8 log_max_ft_num[0x8];
u8 reserved_at_80[0x10];
@@ -522,7 +532,15 @@ struct mlx5_ifc_ipv6_layout_bits {
u8 ipv6[16][0x8];
};
+struct mlx5_ifc_ipv6_simple_layout_bits {
+ u8 ipv6_127_96[0x20];
+ u8 ipv6_95_64[0x20];
+ u8 ipv6_63_32[0x20];
+ u8 ipv6_31_0[0x20];
+};
+
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_simple_layout_bits ipv6_simple_layout;
struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
u8 reserved_at_0[0x80];
@@ -911,7 +929,9 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_8[0x5];
u8 fdb_uplink_hairpin[0x1];
u8 fdb_multi_path_any_table_limit_regc[0x1];
- u8 reserved_at_f[0x3];
+ u8 reserved_at_f[0x1];
+ u8 fdb_dynamic_tunnel[0x1];
+ u8 reserved_at_11[0x1];
u8 fdb_multi_path_any_table[0x1];
u8 reserved_at_13[0x2];
u8 fdb_modify_header_fwd_to_table[0x1];
@@ -950,6 +970,73 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_1900[0x6700];
};
+struct mlx5_ifc_wqe_based_flow_table_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 log_max_num_ste[0x5];
+ u8 reserved_at_8[0x3];
+ u8 log_max_num_stc[0x5];
+ u8 reserved_at_10[0x3];
+ u8 log_max_num_rtc[0x5];
+ u8 reserved_at_18[0x3];
+ u8 log_max_num_header_modify_pattern[0x5];
+
+ u8 rtc_hash_split_table[0x1];
+ u8 rtc_linear_lookup_table[0x1];
+ u8 reserved_at_22[0x1];
+ u8 stc_alloc_log_granularity[0x5];
+ u8 reserved_at_28[0x3];
+ u8 stc_alloc_log_max[0x5];
+ u8 reserved_at_30[0x3];
+ u8 ste_alloc_log_granularity[0x5];
+ u8 reserved_at_38[0x3];
+ u8 ste_alloc_log_max[0x5];
+
+ u8 reserved_at_40[0xb];
+ u8 rtc_reparse_mode[0x5];
+ u8 reserved_at_50[0x3];
+ u8 rtc_index_mode[0x5];
+ u8 reserved_at_58[0x3];
+ u8 rtc_log_depth_max[0x5];
+
+ u8 reserved_at_60[0x10];
+ u8 ste_format[0x10];
+
+ u8 stc_action_type[0x80];
+
+ u8 header_insert_type[0x10];
+ u8 header_remove_type[0x10];
+
+ u8 trivial_match_definer[0x20];
+
+ u8 reserved_at_140[0x1b];
+ u8 rtc_max_num_hash_definer_gen_wqe[0x5];
+
+ u8 reserved_at_160[0x18];
+ u8 access_index_mode[0x8];
+
+ u8 reserved_at_180[0x10];
+ u8 ste_format_gen_wqe[0x10];
+
+ u8 linear_match_definer_reg_c3[0x20];
+
+ u8 fdb_jump_to_tir_stc[0x1];
+ u8 reserved_at_1c1[0x1f];
+};
+
+struct mlx5_ifc_esw_cap_bits {
+ u8 reserved_at_0[0x1d];
+ u8 merged_eswitch[0x1];
+ u8 reserved_at_1e[0x2];
+
+ u8 reserved_at_20[0x40];
+
+ u8 esw_manager_vport_number_valid[0x1];
+ u8 reserved_at_61[0xf];
+ u8 esw_manager_vport_number[0x10];
+
+ u8 reserved_at_80[0x780];
+};
+
enum {
MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
MLX5_COUNTER_FLOW_ESWITCH = 0x1,
@@ -1027,7 +1114,8 @@ struct mlx5_ifc_qos_cap_bits {
u8 max_tsar_bw_share[0x20];
- u8 reserved_at_100[0x20];
+ u8 nic_element_type[0x10];
+ u8 nic_tsar_type[0x10];
u8 reserved_at_120[0x3];
u8 log_meter_aso_granularity[0x5];
@@ -1325,11 +1413,13 @@ struct mlx5_ifc_atomic_caps_bits {
u8 reserved_at_e0[0x720];
};
-struct mlx5_ifc_odp_cap_bits {
+struct mlx5_ifc_odp_scheme_cap_bits {
u8 reserved_at_0[0x40];
u8 sig[0x1];
- u8 reserved_at_41[0x1f];
+ u8 reserved_at_41[0x4];
+ u8 page_prefetch[0x1];
+ u8 reserved_at_46[0x1a];
u8 reserved_at_60[0x20];
@@ -1343,7 +1433,20 @@ struct mlx5_ifc_odp_cap_bits {
struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps;
- u8 reserved_at_120[0x6E0];
+ u8 reserved_at_120[0xe0];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+ struct mlx5_ifc_odp_scheme_cap_bits transport_page_fault_scheme_cap;
+
+ struct mlx5_ifc_odp_scheme_cap_bits memory_page_fault_scheme_cap;
+
+ u8 reserved_at_400[0x200];
+
+ u8 mem_page_fault[0x1];
+ u8 reserved_at_601[0x1f];
+
+ u8 reserved_at_620[0x1e0];
};
struct mlx5_ifc_tls_cap_bits {
@@ -1443,9 +1546,13 @@ enum {
};
enum {
+ MLX5_FLEX_IPV4_OVER_VXLAN_ENABLED = 1 << 0,
+ MLX5_FLEX_IPV6_OVER_VXLAN_ENABLED = 1 << 1,
+ MLX5_FLEX_IPV6_OVER_IP_ENABLED = 1 << 2,
MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4,
MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
+ MLX5_FLEX_P_BIT_VXLAN_GPE_ENABLED = 1 << 6,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
@@ -1650,7 +1757,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pci_sync_for_fw_update_event[0x1];
u8 reserved_at_1f2[0x6];
u8 init2_lag_tx_port_affinity[0x1];
- u8 reserved_at_1fa[0x3];
+ u8 reserved_at_1fa[0x2];
+ u8 wqe_based_flow_table_update_cap[0x1];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
@@ -1764,7 +1872,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_328[0x2];
u8 relaxed_ordering_read[0x1];
u8 log_max_pd[0x5];
- u8 reserved_at_330[0x6];
+ u8 reserved_at_330[0x5];
+ u8 pcie_reset_using_hotreset_method[0x1];
u8 pci_sync_for_fw_update_with_driver_unload[0x1];
u8 vnic_env_cnt_steering_fail[0x1];
u8 vport_counter_local_loopback[0x1];
@@ -1885,7 +1994,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_5a0[0x10];
u8 enhanced_cqe_compression[0x1];
- u8 reserved_at_5b1[0x2];
+ u8 reserved_at_5b1[0x1];
+ u8 crossing_vhca_mkey[0x1];
u8 log_max_dek[0x5];
u8 reserved_at_5b8[0x4];
u8 mini_cqe_resp_stride_index[0x1];
@@ -1954,12 +2064,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 dynamic_msix_table_size[0xc];
u8 reserved_at_740[0xc];
u8 min_dynamic_vf_msix_table_size[0x4];
- u8 reserved_at_750[0x4];
+ u8 reserved_at_750[0x2];
+ u8 data_direct[0x1];
+ u8 reserved_at_753[0x1];
u8 max_dynamic_vf_msix_table_size[0xc];
u8 reserved_at_760[0x3];
u8 log_max_num_header_modify_argument[0x5];
- u8 reserved_at_768[0x4];
+ u8 log_header_modify_argument_granularity_offset[0x4];
u8 log_header_modify_argument_granularity[0x4];
u8 reserved_at_770[0x3];
u8 log_header_modify_argument_max_alloc[0x5];
@@ -1982,7 +2094,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_0[0x80];
u8 migratable[0x1];
- u8 reserved_at_81[0x1f];
+ u8 reserved_at_81[0x11];
+ u8 query_vuid[0x1];
+ u8 reserved_at_93[0x5];
+ u8 umr_log_entity_size_5[0x1];
+ u8 reserved_at_99[0x7];
u8 max_reformat_insert_size[0x8];
u8 max_reformat_insert_offset[0x8];
@@ -2006,7 +2122,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_140[0x60];
u8 flow_table_type_2_type[0x8];
- u8 reserved_at_1a8[0x3];
+ u8 reserved_at_1a8[0x2];
+ u8 format_select_dw_8_6_ext[0x1];
u8 log_min_mkey_entity_size[0x5];
u8 reserved_at_1b0[0x10];
@@ -2022,11 +2139,22 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_250[0x10];
u8 reserved_at_260[0x120];
+
+ u8 format_select_dw_gtpu_dw_0[0x8];
+ u8 format_select_dw_gtpu_dw_1[0x8];
+ u8 format_select_dw_gtpu_dw_2[0x8];
+ u8 format_select_dw_gtpu_first_ext_dw_0[0x8];
+
+ u8 generate_wqe_type[0x20];
+
+ u8 reserved_at_2c0[0xc0];
+
u8 reserved_at_380[0xb];
u8 min_mkey_log_entity_size_fixed_buffer[0x5];
u8 ec_vf_vport_base[0x10];
- u8 reserved_at_3a0[0x10];
+ u8 reserved_at_3a0[0xa];
+ u8 max_mkey_log_entity_size_mtt[0x6];
u8 max_rqt_vhca_id[0x10];
u8 reserved_at_3c0[0x20];
@@ -2037,9 +2165,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_400[0x1];
u8 min_mkey_log_entity_size_fixed_buffer_valid[0x1];
- u8 reserved_at_402[0x1e];
+ u8 reserved_at_402[0xe];
+ u8 return_reg_id[0x10];
- u8 reserved_at_420[0x20];
+ u8 reserved_at_420[0x1c];
+ u8 flow_table_hash_type[0x4];
u8 reserved_at_440[0x8];
u8 max_num_eqs_24b[0x18];
@@ -2086,7 +2216,7 @@ struct mlx5_ifc_extended_dest_format_bits {
u8 reserved_at_60[0x20];
};
-union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
+union mlx5_ifc_dest_format_flow_counter_list_auto_bits {
struct mlx5_ifc_extended_dest_format_bits extended_dest_format;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
};
@@ -2178,7 +2308,10 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_139[0x4];
u8 log_wqe_stride_size[0x3];
- u8 reserved_at_140[0x80];
+ u8 dbr_umem_id[0x20];
+ u8 wq_umem_id[0x20];
+
+ u8 wq_umem_offset[0x40];
u8 headers_mkey[0x20];
@@ -3562,6 +3695,8 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
+ struct mlx5_ifc_wqe_based_flow_table_cap_bits wqe_based_flow_table_cap;
+ struct mlx5_ifc_esw_cap_bits esw_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
@@ -3678,7 +3813,7 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_1300[0x500];
- union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[];
+ union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[];
};
enum {
@@ -3919,7 +4054,8 @@ struct mlx5_ifc_sqc_bits {
u8 reg_umr[0x1];
u8 allow_swp[0x1];
u8 hairpin[0x1];
- u8 reserved_at_f[0xb];
+ u8 non_wire[0x1];
+ u8 reserved_at_10[0xa];
u8 ts_format[0x2];
u8 reserved_at_1c[0x4];
@@ -3966,6 +4102,7 @@ enum {
ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
+ ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4,
};
struct mlx5_ifc_scheduling_context_bits {
@@ -4154,6 +4291,7 @@ enum {
MLX5_MKC_ACCESS_MODE_KSM = 0x3,
MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4,
MLX5_MKC_ACCESS_MODE_MEMIC = 0x5,
+ MLX5_MKC_ACCESS_MODE_CROSSING = 0x6,
};
struct mlx5_ifc_mkc_bits {
@@ -4196,14 +4334,16 @@ struct mlx5_ifc_mkc_bits {
u8 bsf_octword_size[0x20];
- u8 reserved_at_120[0x80];
+ u8 reserved_at_120[0x60];
+
+ u8 crossing_target_vhca_id[0x10];
+ u8 reserved_at_190[0x10];
u8 translations_octword_size[0x20];
u8 reserved_at_1c0[0x19];
u8 relaxed_ordering_read[0x1];
- u8 reserved_at_1d9[0x1];
- u8 log_page_size[0x5];
+ u8 log_page_size[0x6];
u8 reserved_at_1e0[0x20];
};
@@ -4675,6 +4815,12 @@ enum {
TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
};
+enum {
+ TSAR_TYPE_CAP_MASK_DWRR = 1 << 0,
+ TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1,
+ TSAR_TYPE_CAP_MASK_ETS = 1 << 2,
+};
+
struct mlx5_ifc_tsar_element_bits {
u8 reserved_at_0[0x8];
u8 tsar_type[0x8];
@@ -4961,6 +5107,16 @@ struct mlx5_ifc_set_fte_in_bits {
struct mlx5_ifc_flow_context_bits flow_context;
};
+struct mlx5_ifc_dest_format_bits {
+ u8 destination_type[0x8];
+ u8 destination_id[0x18];
+
+ u8 destination_eswitch_owner_vhca_id_valid[0x1];
+ u8 packet_reformat[0x1];
+ u8 reserved_at_22[0xe];
+ u8 destination_eswitch_owner_vhca_id[0x10];
+};
+
struct mlx5_ifc_rts2rts_qp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5124,6 +5280,36 @@ struct mlx5_ifc_query_vport_state_out_bits {
u8 state[0x4];
};
+struct mlx5_ifc_array1024_auto_bits {
+ u8 array1024_auto[32][0x20];
+};
+
+struct mlx5_ifc_query_vuid_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x40];
+
+ u8 query_vfs_vuid[0x1];
+ u8 data_direct[0x1];
+ u8 reserved_at_62[0xe];
+ u8 vhca_id[0x10];
+};
+
+struct mlx5_ifc_query_vuid_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x1a0];
+
+ u8 reserved_at_1e0[0x10];
+ u8 num_of_entries[0x10];
+
+ struct mlx5_ifc_array1024_auto_bits vuid[];
+};
+
enum {
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
@@ -6127,7 +6313,8 @@ struct mlx5_ifc_flow_table_context_bits {
u8 termination_table[0x1];
u8 table_miss_action[0x4];
u8 level[0x8];
- u8 reserved_at_10[0x8];
+ u8 rtc_valid[0x1];
+ u8 reserved_at_11[0x7];
u8 log_size[0x8];
u8 reserved_at_20[0x8];
@@ -6137,11 +6324,21 @@ struct mlx5_ifc_flow_table_context_bits {
u8 lag_master_next_table_id[0x18];
u8 reserved_at_60[0x60];
+ union {
+ struct {
+ u8 sw_owner_icm_root_1[0x40];
+
+ u8 sw_owner_icm_root_0[0x40];
+ } sws;
+ struct {
+ u8 rtc_id_0[0x20];
- u8 sw_owner_icm_root_1[0x40];
+ u8 rtc_id_1[0x20];
- u8 sw_owner_icm_root_0[0x40];
+ u8 reserved_at_100[0x40];
+ } hws;
+ };
};
struct mlx5_ifc_query_flow_table_out_bits {
@@ -7217,6 +7414,30 @@ struct mlx5_ifc_qp_2err_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_trans_page_fault_info_bits {
+ u8 error[0x1];
+ u8 reserved_at_1[0x4];
+ u8 page_fault_type[0x3];
+ u8 wq_number[0x18];
+
+ u8 reserved_at_20[0x8];
+ u8 fault_token[0x18];
+};
+
+struct mlx5_ifc_mem_page_fault_info_bits {
+ u8 error[0x1];
+ u8 reserved_at_1[0xf];
+ u8 fault_token_47_32[0x10];
+
+ u8 fault_token_31_0[0x20];
+};
+
+union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits {
+ struct mlx5_ifc_trans_page_fault_info_bits trans_page_fault_info;
+ struct mlx5_ifc_mem_page_fault_info_bits mem_page_fault_info;
+ u8 reserved_at_0[0x40];
+};
+
struct mlx5_ifc_page_fault_resume_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7233,13 +7454,8 @@ struct mlx5_ifc_page_fault_resume_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 error[0x1];
- u8 reserved_at_41[0x4];
- u8 page_fault_type[0x3];
- u8 wq_number[0x18];
-
- u8 reserved_at_60[0x8];
- u8 token[0x18];
+ union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits
+ page_fault_info;
};
struct mlx5_ifc_nop_out_bits {
@@ -8923,7 +9139,9 @@ struct mlx5_ifc_create_qp_in_bits {
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_at_800[0x60];
+ u8 wq_umem_offset[0x40];
+
+ u8 wq_umem_id[0x20];
u8 wq_umem_valid[0x1];
u8 reserved_at_861[0x1f];
@@ -8989,7 +9207,8 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 pg_access[0x1];
u8 mkey_umem_valid[0x1];
- u8 reserved_at_62[0x1e];
+ u8 data_direct[0x1];
+ u8 reserved_at_63[0x1d];
struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
@@ -10401,6 +10620,18 @@ struct mlx5_ifc_mcam_access_reg_bits2 {
u8 regs_31_to_0[0x20];
};
+struct mlx5_ifc_mcam_access_reg_bits3 {
+ u8 regs_127_to_96[0x20];
+
+ u8 regs_95_to_64[0x20];
+
+ u8 regs_63_to_32[0x20];
+
+ u8 regs_31_to_2[0x1e];
+ u8 mtctr[0x1];
+ u8 mtptm[0x1];
+};
+
struct mlx5_ifc_mcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
@@ -10413,6 +10644,7 @@ struct mlx5_ifc_mcam_reg_bits {
struct mlx5_ifc_mcam_access_reg_bits access_regs;
struct mlx5_ifc_mcam_access_reg_bits1 access_regs1;
struct mlx5_ifc_mcam_access_reg_bits2 access_regs2;
+ struct mlx5_ifc_mcam_access_reg_bits3 access_regs3;
u8 reserved_at_0[0x80];
} mng_access_reg_cap_mask;
@@ -11035,6 +11267,11 @@ struct mlx5_ifc_mcda_reg_bits {
};
enum {
+ MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE = 0,
+ MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET = 1,
+};
+
+enum {
MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2,
@@ -11061,7 +11298,8 @@ struct mlx5_ifc_mfrl_reg_bits {
u8 pci_sync_for_fw_update_start[0x1];
u8 pci_sync_for_fw_update_resp[0x2];
u8 rst_type_sel[0x3];
- u8 reserved_at_28[0x4];
+ u8 pci_reset_req_method[0x3];
+ u8 reserved_at_2b[0x1];
u8 reset_state[0x4];
u8 reset_type[0x8];
u8 reset_level[0x8];
@@ -11166,6 +11404,34 @@ struct mlx5_ifc_mtmp_reg_bits {
u8 sensor_name_lo[0x20];
};
+struct mlx5_ifc_mtptm_reg_bits {
+ u8 reserved_at_0[0x10];
+ u8 psta[0x1];
+ u8 reserved_at_11[0xf];
+
+ u8 reserved_at_20[0x60];
+};
+
+enum {
+ MLX5_MTCTR_REQUEST_NOP = 0x0,
+ MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK = 0x1,
+ MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER = 0x2,
+ MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK = 0x3,
+};
+
+struct mlx5_ifc_mtctr_reg_bits {
+ u8 first_clock_timestamp_request[0x8];
+ u8 second_clock_timestamp_request[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 first_clock_valid[0x1];
+ u8 second_clock_valid[0x1];
+ u8 reserved_at_22[0x1e];
+
+ u8 first_clock_timestamp[0x40];
+ u8 second_clock_timestamp[0x40];
+};
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -11230,6 +11496,8 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mrtc_reg_bits mrtc_reg;
struct mlx5_ifc_mtcap_reg_bits mtcap_reg;
struct mlx5_ifc_mtmp_reg_bits mtmp_reg;
+ struct mlx5_ifc_mtptm_reg_bits mtptm_reg;
+ struct mlx5_ifc_mtctr_reg_bits mtctr_reg;
u8 reserved_at_0[0x60e0];
};
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index ad1ce650146c..fc7eeff99a8a 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -149,6 +149,7 @@ enum {
MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
MLX5_WQE_CTRL_SOLICITED = 1 << 1,
+ MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = 1 << 5,
};
enum {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 147073601716..ecf63d2b0582 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -98,7 +98,11 @@ extern int mmap_rnd_compat_bits __read_mostly;
#endif
#ifndef PHYSMEM_END
+# ifdef MAX_PHYSMEM_BITS
# define PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1)
+# else
+# define PHYSMEM_END (((phys_addr_t)-1)&~(1ULL<<63))
+# endif
#endif
#include <asm/page.h>
@@ -334,12 +338,16 @@ extern unsigned int kobjsize(const void *objp);
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
#ifdef CONFIG_ARCH_HAS_PKEYS
-# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
-# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
-# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */
-# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
-# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
-#ifdef CONFIG_PPC
+# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
+# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
+# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
+# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
+#if CONFIG_ARCH_PKEY_BITS > 3
+# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
+#else
+# define VM_PKEY_BIT3 0
+#endif
+#if CONFIG_ARCH_PKEY_BITS > 4
# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
#else
# define VM_PKEY_BIT4 0
@@ -363,7 +371,7 @@ extern unsigned int kobjsize(const void *objp);
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
-#elif defined(CONFIG_PPC)
+#elif defined(CONFIG_PPC64)
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP VM_ARCH_1
@@ -378,8 +386,8 @@ extern unsigned int kobjsize(const void *objp);
#endif
#if defined(CONFIG_ARM64_MTE)
-# define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */
-# define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */
+# define VM_MTE VM_HIGH_ARCH_4 /* Use Tagged memory for access control */
+# define VM_MTE_ALLOWED VM_HIGH_ARCH_5 /* Tagged memory permitted */
#else
# define VM_MTE VM_NONE
# define VM_MTE_ALLOWED VM_NONE
@@ -413,6 +421,8 @@ extern unsigned int kobjsize(const void *objp);
#ifdef CONFIG_64BIT
#define VM_DROPPABLE_BIT 40
#define VM_DROPPABLE BIT(VM_DROPPABLE_BIT)
+#elif defined(CONFIG_PPC32)
+#define VM_DROPPABLE VM_ARCH_1
#else
#define VM_DROPPABLE VM_NONE
#endif
@@ -1009,27 +1019,6 @@ static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
return mas_prev(&vmi->mas, 0);
}
-static inline
-struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
-{
- return mas_prev_range(&vmi->mas, 0);
-}
-
-static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
-{
- return vmi->mas.index;
-}
-
-static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
-{
- return vmi->mas.last + 1;
-}
-static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
- unsigned long count)
-{
- return mas_expected_entries(&vmi->mas, count);
-}
-
static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
unsigned long start, unsigned long end, gfp_t gfp)
{
@@ -1253,8 +1242,7 @@ static inline int folio_mapcount(const struct folio *folio)
if (likely(!folio_test_large(folio))) {
mapcount = atomic_read(&folio->_mapcount) + 1;
- /* Handle page_has_type() pages */
- if (mapcount < PAGE_MAPCOUNT_RESERVE + 1)
+ if (page_mapcount_is_type(mapcount))
mapcount = 0;
return mapcount;
}
@@ -1601,6 +1589,7 @@ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
bool make_dirty);
void unpin_user_pages(struct page **pages, unsigned long npages);
+void unpin_user_folio(struct folio *folio, unsigned long npages);
void unpin_folios(struct folio **folios, unsigned long nfolios);
static inline bool is_cow_mapping(vm_flags_t flags)
@@ -1749,6 +1738,8 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
__set_bit(pid_bit, &vma->numab_state->pids_active[1]);
}
}
+
+bool folio_use_access_time(struct folio *folio);
#else /* !CONFIG_NUMA_BALANCING */
static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
@@ -1802,6 +1793,10 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
{
}
+static inline bool folio_use_access_time(struct folio *folio)
+{
+ return false;
+}
#endif /* CONFIG_NUMA_BALANCING */
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
@@ -2151,14 +2146,19 @@ static inline size_t folio_size(const struct folio *folio)
* MM ("mapped shared"), or if the folio is only mapped into a single MM
* ("mapped exclusively").
*
+ * For KSM folios, this function also returns "mapped shared" when a folio is
+ * mapped multiple times into the same MM, because the individual page mappings
+ * are independent.
+ *
* As precise information is not easily available for all folios, this function
* estimates the number of MMs ("sharers") that are currently mapping a folio
* using the number of times the first page of the folio is currently mapped
* into page tables.
*
- * For small anonymous folios (except KSM folios) and anonymous hugetlb folios,
- * the return value will be exactly correct, because they can only be mapped
- * at most once into an MM, and they cannot be partially mapped.
+ * For small anonymous folios and anonymous hugetlb folios, the return
+ * value will be exactly correct: non-KSM folios can only be mapped at most once
+ * into an MM, and they cannot be partially mapped. KSM folios are
+ * considered shared even if mapped multiple times into the same MM.
*
* For other folios, the result can be fuzzy:
* #. For partially-mappable large folios (THP), the return value can wrongly
@@ -2167,9 +2167,6 @@ static inline size_t folio_size(const struct folio *folio)
* #. For pagecache folios (including hugetlb), the return value can wrongly
* indicate "mapped shared" (false positive) when two VMAs in the same MM
* cover the same file range.
- * #. For (small) KSM folios, the return value can wrongly indicate "mapped
- * shared" (false positive), when the folio is mapped multiple times into
- * the same MM.
*
* Further, this function only considers current page table mappings that
* are tracked using the folio mapcount(s).
@@ -2203,26 +2200,10 @@ static inline bool folio_likely_mapped_shared(struct folio *folio)
return atomic_read(&folio->_mapcount) > 0;
}
-#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
-static inline int arch_make_page_accessible(struct page *page)
-{
- return 0;
-}
-#endif
-
#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
static inline int arch_make_folio_accessible(struct folio *folio)
{
- int ret;
- long i, nr = folio_nr_pages(folio);
-
- for (i = 0; i < nr; i++) {
- ret = arch_make_page_accessible(folio_page(folio, i));
- if (ret)
- break;
- }
-
- return ret;
+ return 0;
}
#endif
@@ -2402,11 +2383,40 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
-int follow_pte(struct vm_area_struct *vma, unsigned long address,
- pte_t **ptepp, spinlock_t **ptlp);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
+struct follow_pfnmap_args {
+ /**
+ * Inputs:
+ * @vma: Pointer to @vm_area_struct struct
+ * @address: the virtual address to walk
+ */
+ struct vm_area_struct *vma;
+ unsigned long address;
+ /**
+ * Internals:
+ *
+ * The caller shouldn't touch any of these.
+ */
+ spinlock_t *lock;
+ pte_t *ptep;
+ /**
+ * Outputs:
+ *
+ * @pfn: the PFN of the address
+ * @pgprot: the pgprot_t of the mapping
+ * @writable: whether the mapping is writable
+ * @special: whether the mapping is a special mapping (real PFN maps)
+ */
+ unsigned long pfn;
+ pgprot_t pgprot;
+ bool writable;
+ bool special;
+};
+int follow_pfnmap_start(struct follow_pfnmap_args *args);
+void follow_pfnmap_end(struct follow_pfnmap_args *args);
+
extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
@@ -2534,11 +2544,6 @@ int set_page_dirty_lock(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
-extern unsigned long move_page_tables(struct vm_area_struct *vma,
- unsigned long old_addr, struct vm_area_struct *new_vma,
- unsigned long new_addr, unsigned long len,
- bool need_rmap_locks, bool for_stack);
-
/*
* Flags used by change_protection(). For now we make it a bitmap so
* that we can pass in multiple flags just like parameters. However
@@ -2559,21 +2564,6 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
MM_CP_UFFD_WP_RESOLVE)
-bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
-bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
-static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
-{
- /*
- * We want to check manually if we can change individual PTEs writable
- * if we can't do that automatically for all PTEs in a mapping. For
- * private mappings, that's always the case when we have write
- * permissions as we properly have to handle COW.
- */
- if (vma->vm_flags & VM_SHARED)
- return vma_wants_writenotify(vma, vma->vm_page_prot);
- return !!(vma->vm_flags & VM_WRITE);
-
-}
bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
extern long change_protection(struct mmu_gather *tlb,
@@ -2697,6 +2687,30 @@ static inline pte_t pte_mkspecial(pte_t pte)
}
#endif
+#ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+static inline bool pmd_special(pmd_t pmd)
+{
+ return false;
+}
+
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return pmd;
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
+
+#ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+static inline bool pud_special(pud_t pud)
+{
+ return false;
+}
+
+static inline pud_t pud_mkspecial(pud_t pud)
+{
+ return pud;
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
+
#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pte_devmap(pte_t pte)
{
@@ -2889,7 +2903,7 @@ static inline void pagetable_free(struct ptdesc *pt)
__free_pages(page, compound_order(page));
}
-#if USE_SPLIT_PTE_PTLOCKS
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
#if ALLOC_SPLIT_PTLOCKS
void __init ptlock_cache_init(void);
bool ptlock_alloc(struct ptdesc *ptdesc);
@@ -2947,7 +2961,7 @@ static inline bool ptlock_init(struct ptdesc *ptdesc)
return true;
}
-#else /* !USE_SPLIT_PTE_PTLOCKS */
+#else /* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
@@ -2962,7 +2976,7 @@ static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
static inline void ptlock_free(struct ptdesc *ptdesc) {}
-#endif /* USE_SPLIT_PTE_PTLOCKS */
+#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
{
@@ -3022,7 +3036,7 @@ pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
NULL: pte_offset_kernel(pmd, address))
-#if USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_SPLIT_PMD_PTLOCKS)
static inline struct page *pmd_pgtable_page(pmd_t *pmd)
{
@@ -3281,78 +3295,9 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff,
- struct vm_area_struct *next);
-extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff);
-extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
-extern void unlink_file_vma(struct vm_area_struct *);
-extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
- unsigned long addr, unsigned long len, pgoff_t pgoff,
- bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
-struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long vm_flags,
- struct mempolicy *policy,
- struct vm_userfaultfd_ctx uffd_ctx,
- struct anon_vma_name *anon_name);
-
-/* We are about to modify the VMA's flags. */
-static inline struct vm_area_struct
-*vma_modify_flags(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long new_flags)
-{
- return vma_modify(vmi, prev, vma, start, end, new_flags,
- vma_policy(vma), vma->vm_userfaultfd_ctx,
- anon_vma_name(vma));
-}
-
-/* We are about to modify the VMA's flags and/or anon_name. */
-static inline struct vm_area_struct
-*vma_modify_flags_name(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- unsigned long new_flags,
- struct anon_vma_name *new_name)
-{
- return vma_modify(vmi, prev, vma, start, end, new_flags,
- vma_policy(vma), vma->vm_userfaultfd_ctx, new_name);
-}
-
-/* We are about to modify the VMA's memory policy. */
-static inline struct vm_area_struct
-*vma_modify_policy(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct mempolicy *new_pol)
-{
- return vma_modify(vmi, prev, vma, start, end, vma->vm_flags,
- new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma));
-}
-
-/* We are about to modify the VMA's flags and/or uffd context. */
-static inline struct vm_area_struct
-*vma_modify_flags_uffd(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long new_flags,
- struct vm_userfaultfd_ctx new_ctx)
-{
- return vma_modify(vmi, prev, vma, start, end, new_flags,
- vma_policy(vma), new_ctx, anon_vma_name(vma));
-}
+int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
@@ -3385,10 +3330,6 @@ extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags,
const struct vm_special_mapping *spec);
-/* This is an obsolete alternative to _install_special_mapping. */
-extern int install_special_mapping(struct mm_struct *mm,
- unsigned long addr, unsigned long len,
- unsigned long flags, struct page **pages);
unsigned long randomize_stack_top(unsigned long stack_top);
unsigned long randomize_page(unsigned long start, unsigned long range);
@@ -3414,14 +3355,14 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool unlock);
+int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct list_head *uf, bool unlock);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
#ifdef CONFIG_MMU
-extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct list_head *uf, bool unlock);
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
@@ -3649,9 +3590,6 @@ static inline vm_fault_t vmf_fs_error(int err)
return VM_FAULT_SIGBUS;
}
-struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
- unsigned int foll_flags);
-
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{
if (vm_fault & VM_FAULT_OOM)
@@ -4187,18 +4125,18 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
#ifdef CONFIG_UNACCEPTED_MEMORY
-bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end);
-void accept_memory(phys_addr_t start, phys_addr_t end);
+bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
+void accept_memory(phys_addr_t start, unsigned long size);
#else
static inline bool range_contains_unaccepted_memory(phys_addr_t start,
- phys_addr_t end)
+ unsigned long size)
{
return false;
}
-static inline void accept_memory(phys_addr_t start, phys_addr_t end)
+static inline void accept_memory(phys_addr_t start, unsigned long size)
{
}
@@ -4206,9 +4144,7 @@ static inline void accept_memory(phys_addr_t start, phys_addr_t end)
static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
{
- phys_addr_t paddr = pfn << PAGE_SHIFT;
-
- return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE);
+ return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
}
void vma_pgtable_walk_begin(struct vm_area_struct *vma);
@@ -4216,4 +4152,71 @@ void vma_pgtable_walk_end(struct vm_area_struct *vma);
int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
+#ifdef CONFIG_64BIT
+int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
+#else
+static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
+{
+ /* noop on 32 bit */
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
+{
+ int i;
+ struct alloc_tag *tag;
+ unsigned int nr_pages = 1 << new_order;
+
+ if (!mem_alloc_profiling_enabled())
+ return;
+
+ tag = pgalloc_tag_get(&folio->page);
+ if (!tag)
+ return;
+
+ for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
+ union codetag_ref *ref = get_page_tag_ref(folio_page(folio, i));
+
+ if (ref) {
+ /* Set new reference to point to the original tag */
+ alloc_tag_ref_set(ref, tag);
+ put_page_tag_ref(ref);
+ }
+ }
+}
+
+static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
+{
+ struct alloc_tag *tag;
+ union codetag_ref *ref;
+
+ tag = pgalloc_tag_get(&old->page);
+ if (!tag)
+ return;
+
+ ref = get_page_tag_ref(&new->page);
+ if (!ref)
+ return;
+
+ /* Clear the old ref to the original allocation tag. */
+ clear_page_tag_ref(&old->page);
+ /* Decrement the counters of the tag on get_new_folio. */
+ alloc_tag_sub(ref, folio_nr_pages(new));
+
+ __alloc_tag_ref_set(ref, tag);
+
+ put_page_tag_ref(ref);
+}
+#else /* !CONFIG_MEM_ALLOC_PROFILING */
+static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
+{
+}
+
+static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
+{
+}
+#endif /* CONFIG_MEM_ALLOC_PROFILING */
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 485424979254..6e3bdf8e38bc 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -109,7 +109,7 @@ struct page {
/**
* @private: Mapping-private opaque data.
* Usually used for buffer_heads if PagePrivate.
- * Used for swp_entry_t if PageSwapCache.
+ * Used for swp_entry_t if swapcache flag set.
* Indicates order in the buddy system if PageBuddy.
*/
unsigned long private;
@@ -660,6 +660,9 @@ struct vma_numab_state {
* per VM-area/task. A VM area is any part of the process virtual memory
* space that has a special rule for the page-fault handlers (ie a shared
* library, the executable area etc).
+ *
+ * Only explicitly marked struct members may be accessed by RCU readers before
+ * getting a stable reference.
*/
struct vm_area_struct {
/* The first cache line has the info for VMA tree walking. */
@@ -675,7 +678,11 @@ struct vm_area_struct {
#endif
};
- struct mm_struct *vm_mm; /* The address space we belong to. */
+ /*
+ * The address space we belong to.
+ * Unstable RCU readers are allowed to read this.
+ */
+ struct mm_struct *vm_mm;
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
/*
@@ -688,7 +695,10 @@ struct vm_area_struct {
};
#ifdef CONFIG_PER_VMA_LOCK
- /* Flag to indicate areas detached from the mm->mm_mt tree */
+ /*
+ * Flag to indicate areas detached from the mm->mm_mt tree.
+ * Unstable RCU readers are allowed to read this.
+ */
bool detached;
/*
@@ -706,6 +716,7 @@ struct vm_area_struct {
* slowpath.
*/
int vm_lock_seq;
+ /* Unstable RCU readers are allowed to read this. */
struct vma_lock *vm_lock;
#endif
@@ -947,7 +958,7 @@ struct mm_struct {
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_subscriptions *notifier_subscriptions;
#endif
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_NUMA_BALANCING
@@ -1313,6 +1324,9 @@ struct vm_special_mapping {
int (*mremap)(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma);
+
+ void (*close)(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma);
};
enum tlb_flush_reason {
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index a2f6179b672b..bff5706b76e1 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -16,9 +16,6 @@
#include <asm/tlbbatch.h>
#endif
-#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
-#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
- IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/*
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 2c7928a50907..f0ac2e469b32 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -11,18 +11,6 @@
struct mmc_data;
struct mmc_request;
-enum mmc_blk_status {
- MMC_BLK_SUCCESS = 0,
- MMC_BLK_PARTIAL,
- MMC_BLK_CMD_ERR,
- MMC_BLK_RETRY,
- MMC_BLK_ABORT,
- MMC_BLK_DATA_ERR,
- MMC_BLK_ECC_ERR,
- MMC_BLK_NOMEDIUM,
- MMC_BLK_NEW_REQUEST,
-};
-
struct mmc_command {
u32 opcode;
u32 arg;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 88c6a76042ee..8fc2b328ec4d 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -10,6 +10,7 @@
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
@@ -264,16 +265,6 @@ struct mmc_cqe_ops {
void (*cqe_recovery_finish)(struct mmc_host *host);
};
-struct mmc_async_req {
- /* active mmc request */
- struct mmc_request *mrq;
- /*
- * Check error status of completed mmc request.
- * Returns 0 if success otherwise non zero.
- */
- enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *);
-};
-
/**
* struct mmc_slot - MMC slot functions
*
@@ -291,20 +282,6 @@ struct mmc_slot {
void *handler_priv;
};
-/**
- * mmc_context_info - synchronization details for mmc context
- * @is_done_rcv wake up reason was done request
- * @is_new_req wake up reason was new request
- * @is_waiting_last_req mmc context waiting for single running request
- * @wait wait queue
- */
-struct mmc_context_info {
- bool is_done_rcv;
- bool is_new_req;
- bool is_waiting_last_req;
- wait_queue_head_t wait;
-};
-
struct regulator;
struct mmc_pwrseq;
@@ -672,7 +649,8 @@ static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host,
host->err_stats[stat] += 1;
}
-int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp);
+int mmc_sd_switch(struct mmc_card *card, bool mode, int group,
+ u8 value, u8 *resp);
int mmc_send_status(struct mmc_card *card, u32 *status);
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1dc6248feb83..17506e4a2835 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -666,11 +666,6 @@ enum zone_watermarks {
#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
-#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
-#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
-#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
-#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
-
/*
* Flags used in pcp->flags field.
*
@@ -1016,6 +1011,32 @@ enum zone_flags {
ZONE_BELOW_HIGH, /* zone is below high watermark. */
};
+static inline unsigned long wmark_pages(const struct zone *z,
+ enum zone_watermarks w)
+{
+ return z->_watermark[w] + z->watermark_boost;
+}
+
+static inline unsigned long min_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_MIN);
+}
+
+static inline unsigned long low_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_LOW);
+}
+
+static inline unsigned long high_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_HIGH);
+}
+
+static inline unsigned long promo_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_PROMO);
+}
+
static inline unsigned long zone_managed_pages(struct zone *zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
@@ -1688,7 +1709,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
zone = zonelist_zone(z))
#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
- for (zone = z->zone; \
+ for (zone = zonelist_zone(z); \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
zone = zonelist_zone(z))
@@ -1724,7 +1745,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
nid = first_node(*nodes);
zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
- return (!z->zone) ? true : false;
+ return (!zonelist_zone(z)) ? true : false;
}
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
index cd4d5c8781f5..b1b219bc3422 100644
--- a/include/linux/mnt_idmapping.h
+++ b/include/linux/mnt_idmapping.h
@@ -9,6 +9,7 @@ struct mnt_idmap;
struct user_namespace;
extern struct mnt_idmap nop_mnt_idmap;
+extern struct mnt_idmap invalid_mnt_idmap;
extern struct user_namespace init_user_ns;
typedef struct {
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 8f882f5881e8..70b366b64816 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -3,6 +3,9 @@
#define _NAMESPACE_H_
#ifdef __KERNEL__
+#include <linux/cleanup.h>
+#include <linux/err.h>
+
struct mnt_namespace;
struct fs_struct;
struct user_namespace;
@@ -11,6 +14,7 @@ struct ns_common;
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
struct user_namespace *, struct fs_struct *);
extern void put_mnt_ns(struct mnt_namespace *ns);
+DEFINE_FREE(put_mnt_ns, struct mnt_namespace *, if (!IS_ERR_OR_NULL(_T)) put_mnt_ns(_T))
extern struct ns_common *from_mnt_ns(struct mnt_namespace *);
extern const struct file_operations proc_mounts_operations;
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index eb0d1c1db208..47be46f36435 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -40,79 +40,26 @@ struct gcry_mpi {
typedef struct gcry_mpi *MPI;
#define mpi_get_nlimbs(a) ((a)->nlimbs)
-#define mpi_has_sign(a) ((a)->sign)
/*-- mpiutil.c --*/
MPI mpi_alloc(unsigned nlimbs);
-void mpi_clear(MPI a);
void mpi_free(MPI a);
int mpi_resize(MPI a, unsigned nlimbs);
-static inline MPI mpi_new(unsigned int nbits)
-{
- return mpi_alloc((nbits + BITS_PER_MPI_LIMB - 1) / BITS_PER_MPI_LIMB);
-}
-
MPI mpi_copy(MPI a);
-MPI mpi_alloc_like(MPI a);
-void mpi_snatch(MPI w, MPI u);
-MPI mpi_set(MPI w, MPI u);
-MPI mpi_set_ui(MPI w, unsigned long u);
-MPI mpi_alloc_set_ui(unsigned long u);
-void mpi_swap_cond(MPI a, MPI b, unsigned long swap);
-
-/* Constants used to return constant MPIs. See mpi_init if you
- * want to add more constants.
- */
-#define MPI_NUMBER_OF_CONSTANTS 6
-enum gcry_mpi_constants {
- MPI_C_ZERO,
- MPI_C_ONE,
- MPI_C_TWO,
- MPI_C_THREE,
- MPI_C_FOUR,
- MPI_C_EIGHT
-};
-
-MPI mpi_const(enum gcry_mpi_constants no);
/*-- mpicoder.c --*/
-
-/* Different formats of external big integer representation. */
-enum gcry_mpi_format {
- GCRYMPI_FMT_NONE = 0,
- GCRYMPI_FMT_STD = 1, /* Twos complement stored without length. */
- GCRYMPI_FMT_PGP = 2, /* As used by OpenPGP (unsigned only). */
- GCRYMPI_FMT_SSH = 3, /* As used by SSH (like STD but with length). */
- GCRYMPI_FMT_HEX = 4, /* Hex format. */
- GCRYMPI_FMT_USG = 5, /* Like STD but unsigned. */
- GCRYMPI_FMT_OPAQUE = 8 /* Opaque format (some functions only). */
-};
-
MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes);
MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
-int mpi_fromstr(MPI val, const char *str);
-MPI mpi_scanval(const char *string);
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign);
int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes,
int *sign);
-int mpi_print(enum gcry_mpi_format format, unsigned char *buffer,
- size_t buflen, size_t *nwritten, MPI a);
/*-- mpi-mod.c --*/
-void mpi_mod(MPI rem, MPI dividend, MPI divisor);
-
-/* Context used with Barrett reduction. */
-struct barrett_ctx_s;
-typedef struct barrett_ctx_s *mpi_barrett_t;
-
-mpi_barrett_t mpi_barrett_init(MPI m, int copy);
-void mpi_barrett_free(mpi_barrett_t ctx);
-void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx);
-void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx);
+int mpi_mod(MPI rem, MPI dividend, MPI divisor);
/*-- mpi-pow.c --*/
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
@@ -120,7 +67,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
/*-- mpi-cmp.c --*/
int mpi_cmp_ui(MPI u, ulong v);
int mpi_cmp(MPI u, MPI v);
-int mpi_cmpabs(MPI u, MPI v);
/*-- mpi-sub-ui.c --*/
int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
@@ -129,138 +75,22 @@ int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
void mpi_normalize(MPI a);
unsigned mpi_get_nbits(MPI a);
int mpi_test_bit(MPI a, unsigned int n);
-void mpi_set_bit(MPI a, unsigned int n);
-void mpi_set_highbit(MPI a, unsigned int n);
-void mpi_clear_highbit(MPI a, unsigned int n);
-void mpi_clear_bit(MPI a, unsigned int n);
-void mpi_rshift_limbs(MPI a, unsigned int count);
-void mpi_rshift(MPI x, MPI a, unsigned int n);
-void mpi_lshift_limbs(MPI a, unsigned int count);
-void mpi_lshift(MPI x, MPI a, unsigned int n);
+int mpi_set_bit(MPI a, unsigned int n);
+int mpi_rshift(MPI x, MPI a, unsigned int n);
/*-- mpi-add.c --*/
-void mpi_add_ui(MPI w, MPI u, unsigned long v);
-void mpi_add(MPI w, MPI u, MPI v);
-void mpi_sub(MPI w, MPI u, MPI v);
-void mpi_addm(MPI w, MPI u, MPI v, MPI m);
-void mpi_subm(MPI w, MPI u, MPI v, MPI m);
+int mpi_add(MPI w, MPI u, MPI v);
+int mpi_sub(MPI w, MPI u, MPI v);
+int mpi_addm(MPI w, MPI u, MPI v, MPI m);
+int mpi_subm(MPI w, MPI u, MPI v, MPI m);
/*-- mpi-mul.c --*/
-void mpi_mul(MPI w, MPI u, MPI v);
-void mpi_mulm(MPI w, MPI u, MPI v, MPI m);
+int mpi_mul(MPI w, MPI u, MPI v);
+int mpi_mulm(MPI w, MPI u, MPI v, MPI m);
/*-- mpi-div.c --*/
-void mpi_tdiv_r(MPI rem, MPI num, MPI den);
-void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
-void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor);
-
-/*-- mpi-inv.c --*/
-int mpi_invm(MPI x, MPI a, MPI n);
-
-/*-- ec.c --*/
-
-/* Object to represent a point in projective coordinates */
-struct gcry_mpi_point {
- MPI x;
- MPI y;
- MPI z;
-};
-
-typedef struct gcry_mpi_point *MPI_POINT;
-
-/* Models describing an elliptic curve */
-enum gcry_mpi_ec_models {
- /* The Short Weierstrass equation is
- * y^2 = x^3 + ax + b
- */
- MPI_EC_WEIERSTRASS = 0,
- /* The Montgomery equation is
- * by^2 = x^3 + ax^2 + x
- */
- MPI_EC_MONTGOMERY,
- /* The Twisted Edwards equation is
- * ax^2 + y^2 = 1 + bx^2y^2
- * Note that we use 'b' instead of the commonly used 'd'.
- */
- MPI_EC_EDWARDS
-};
-
-/* Dialects used with elliptic curves */
-enum ecc_dialects {
- ECC_DIALECT_STANDARD = 0,
- ECC_DIALECT_ED25519,
- ECC_DIALECT_SAFECURVE
-};
-
-/* This context is used with all our EC functions. */
-struct mpi_ec_ctx {
- enum gcry_mpi_ec_models model; /* The model describing this curve. */
- enum ecc_dialects dialect; /* The ECC dialect used with the curve. */
- int flags; /* Public key flags (not always used). */
- unsigned int nbits; /* Number of bits. */
-
- /* Domain parameters. Note that they may not all be set and if set
- * the MPIs may be flagged as constant.
- */
- MPI p; /* Prime specifying the field GF(p). */
- MPI a; /* First coefficient of the Weierstrass equation. */
- MPI b; /* Second coefficient of the Weierstrass equation. */
- MPI_POINT G; /* Base point (generator). */
- MPI n; /* Order of G. */
- unsigned int h; /* Cofactor. */
-
- /* The actual key. May not be set. */
- MPI_POINT Q; /* Public key. */
- MPI d; /* Private key. */
-
- const char *name; /* Name of the curve. */
-
- /* This structure is private to mpi/ec.c! */
- struct {
- struct {
- unsigned int a_is_pminus3:1;
- unsigned int two_inv_p:1;
- } valid; /* Flags to help setting the helper vars below. */
-
- int a_is_pminus3; /* True if A = P - 3. */
-
- MPI two_inv_p;
-
- mpi_barrett_t p_barrett;
-
- /* Scratch variables. */
- MPI scratch[11];
-
- /* Helper for fast reduction. */
- /* int nist_nbits; /\* If this is a NIST curve, the # of bits. *\/ */
- /* MPI s[10]; */
- /* MPI c; */
- } t;
-
- /* Curve specific computation routines for the field. */
- void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
- void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec);
- void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
- void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx);
- void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx);
-};
-
-void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
- enum ecc_dialects dialect,
- int flags, MPI p, MPI a, MPI b);
-void mpi_ec_deinit(struct mpi_ec_ctx *ctx);
-MPI_POINT mpi_point_new(unsigned int nbits);
-void mpi_point_release(MPI_POINT p);
-void mpi_point_init(MPI_POINT p);
-void mpi_point_free_parts(MPI_POINT p);
-int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx);
-void mpi_ec_add_points(MPI_POINT result,
- MPI_POINT p1, MPI_POINT p2,
- struct mpi_ec_ctx *ctx);
-void mpi_ec_mul_point(MPI_POINT result,
- MPI scalar, MPI_POINT point,
- struct mpi_ec_ctx *ctx);
-int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx);
+int mpi_tdiv_r(MPI rem, MPI num, MPI den);
+int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
/* inline functions */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 944979763825..b10093c4d00e 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -554,6 +554,8 @@ enum {
MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19),
/* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */
MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20),
+ /* PCI MSIs cannot be steered separately to CPU cores */
+ MSI_FLAG_NO_AFFINITY = (1 << 21),
};
/**
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index b2996dc987ff..1e4208040956 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -103,6 +103,8 @@ enum nand_page_io_req_type {
* @ooblen: the number of OOB bytes to read from/write to this page
* @oobbuf: buffer to store OOB data in or get OOB data from
* @mode: one of the %MTD_OPS_XXX mode
+ * @continuous: no need to start over the operation at the end of each page, the
+ * NAND device will automatically prepare the next one
*
* This object is used to pass per-page I/O requests to NAND sub-layers. This
* way all useful information are already formatted in a useful way and
@@ -125,6 +127,7 @@ struct nand_page_io_req {
void *in;
} oobbuf;
int mode;
+ bool continuous;
};
const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
@@ -906,19 +909,19 @@ static inline void nanddev_pos_next_page(struct nand_device *nand,
}
/**
- * nand_io_iter_init - Initialize a NAND I/O iterator
+ * nand_io_page_iter_init - Initialize a NAND I/O iterator
* @nand: NAND device
* @offs: absolute offset
* @req: MTD request
* @iter: NAND I/O iterator
*
* Initializes a NAND iterator based on the information passed by the MTD
- * layer.
+ * layer for page jumps.
*/
-static inline void nanddev_io_iter_init(struct nand_device *nand,
- enum nand_page_io_req_type reqtype,
- loff_t offs, struct mtd_oob_ops *req,
- struct nand_io_iter *iter)
+static inline void nanddev_io_page_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
@@ -937,6 +940,43 @@ static inline void nanddev_io_iter_init(struct nand_device *nand,
iter->req.ooblen = min_t(unsigned int,
iter->oobbytes_per_page - iter->req.ooboffs,
iter->oobleft);
+ iter->req.continuous = false;
+}
+
+/**
+ * nand_io_block_iter_init - Initialize a NAND I/O iterator
+ * @nand: NAND device
+ * @offs: absolute offset
+ * @req: MTD request
+ * @iter: NAND I/O iterator
+ *
+ * Initializes a NAND iterator based on the information passed by the MTD
+ * layer for block jumps (no OOB)
+ *
+ * In practice only reads may leverage this iterator.
+ */
+static inline void nanddev_io_block_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
+{
+ unsigned int offs_in_eb;
+
+ iter->req.type = reqtype;
+ iter->req.mode = req->mode;
+ iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
+ iter->req.ooboffs = 0;
+ iter->oobbytes_per_page = 0;
+ iter->dataleft = req->len;
+ iter->oobleft = 0;
+ iter->req.databuf.in = req->datbuf;
+ offs_in_eb = (nand->memorg.pagesize * iter->req.pos.page) + iter->req.dataoffs;
+ iter->req.datalen = min_t(unsigned int,
+ nanddev_eraseblock_size(nand) - offs_in_eb,
+ iter->dataleft);
+ iter->req.oobbuf.in = NULL;
+ iter->req.ooblen = 0;
+ iter->req.continuous = true;
}
/**
@@ -963,6 +1003,25 @@ static inline void nanddev_io_iter_next_page(struct nand_device *nand,
}
/**
+ * nand_io_iter_next_block - Move to the next block
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Updates the @iter to point to the next block.
+ * No OOB handling available.
+ */
+static inline void nanddev_io_iter_next_block(struct nand_device *nand,
+ struct nand_io_iter *iter)
+{
+ nanddev_pos_next_eraseblock(nand, &iter->req.pos);
+ iter->dataleft -= iter->req.datalen;
+ iter->req.databuf.in += iter->req.datalen;
+ iter->req.dataoffs = 0;
+ iter->req.datalen = min_t(unsigned int, nanddev_eraseblock_size(nand),
+ iter->dataleft);
+}
+
+/**
* nand_io_iter_end - Should end iteration or not
* @nand: NAND device
* @iter: NAND I/O iterator
@@ -990,13 +1049,28 @@ static inline bool nanddev_io_iter_end(struct nand_device *nand,
* @req: MTD I/O request
* @iter: NAND I/O iterator
*
- * Should be used for iterate over pages that are contained in an MTD request.
+ * Should be used for iterating over pages that are contained in an MTD request.
*/
#define nanddev_io_for_each_page(nand, type, start, req, iter) \
- for (nanddev_io_iter_init(nand, type, start, req, iter); \
+ for (nanddev_io_page_iter_init(nand, type, start, req, iter); \
!nanddev_io_iter_end(nand, iter); \
nanddev_io_iter_next_page(nand, iter))
+/**
+ * nand_io_for_each_block - Iterate over all NAND pages contained in an MTD I/O
+ * request, one block at a time
+ * @nand: NAND device
+ * @start: start address to read/write from
+ * @req: MTD I/O request
+ * @iter: NAND I/O iterator
+ *
+ * Should be used for iterating over blocks that are contained in an MTD request.
+ */
+#define nanddev_io_for_each_block(nand, type, start, req, iter) \
+ for (nanddev_io_block_iter_init(nand, type, start, req, iter); \
+ !nanddev_io_iter_end(nand, iter); \
+ nanddev_io_iter_next_block(nand, iter))
+
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 5c19ead60499..702e5fb13dae 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -312,6 +312,8 @@ struct spinand_ecc_info {
#define SPINAND_HAS_QE_BIT BIT(0)
#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
+#define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2)
+#define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3)
/**
* struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure
@@ -336,6 +338,7 @@ struct spinand_ondie_ecc_conf {
* @op_variants.update_cache: variants of the update-cache operation
* @select_target: function used to select a target/die. Required only for
* multi-die chips
+ * @set_cont_read: enable/disable continuous cached reads
*
* Each SPI NAND manufacturer driver should have a spinand_info table
* describing all the chips supported by the driver.
@@ -354,6 +357,8 @@ struct spinand_info {
} op_variants;
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
};
#define SPINAND_ID(__method, ...) \
@@ -379,6 +384,9 @@ struct spinand_info {
#define SPINAND_SELECT_TARGET(__func) \
.select_target = __func,
+#define SPINAND_CONT_READ(__set_cont_read) \
+ .set_cont_read = __set_cont_read,
+
#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
__flags, ...) \
{ \
@@ -422,6 +430,12 @@ struct spinand_dirmap {
* passed in spi_mem_op be DMA-able, so we can't based the bufs on
* the stack
* @manufacturer: SPI NAND manufacturer information
+ * @cont_read_possible: Field filled by the core once the whole system
+ * configuration is known to tell whether continuous reads are
+ * suitable to use or not in general with this chip/configuration.
+ * A per-transfer check must of course be done to ensure it is
+ * actually relevant to enable this feature.
+ * @set_cont_read: Enable/disable the continuous read feature
* @priv: manufacturer private data
*/
struct spinand_device {
@@ -451,6 +465,10 @@ struct spinand_device {
u8 *scratchbuf;
const struct spinand_manufacturer *manufacturer;
void *priv;
+
+ bool cont_read_possible;
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
};
/**
@@ -517,6 +535,7 @@ int spinand_match_and_init(struct spinand_device *spinand,
enum spinand_readid_method rdid_method);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
+int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
#endif /* __LINUX_MTD_SPINAND_H */
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
deleted file mode 100644
index 000b126acfb6..000000000000
--- a/include/linux/mv643xx.h
+++ /dev/null
@@ -1,921 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mv643xx.h - MV-643XX Internal registers definition file.
- *
- * Copyright 2002 Momentum Computer, Inc.
- * Author: Matthew Dharm <mdharm@momenco.com>
- * Copyright 2002 GALILEO TECHNOLOGY, LTD.
- */
-#ifndef __ASM_MV643XX_H
-#define __ASM_MV643XX_H
-
-#include <asm/types.h>
-#include <linux/mv643xx_eth.h>
-#include <linux/mv643xx_i2c.h>
-
-/****************************************/
-/* Processor Address Space */
-/****************************************/
-
-/* DDR SDRAM BAR and size registers */
-
-#define MV64340_CS_0_BASE_ADDR 0x008
-#define MV64340_CS_0_SIZE 0x010
-#define MV64340_CS_1_BASE_ADDR 0x208
-#define MV64340_CS_1_SIZE 0x210
-#define MV64340_CS_2_BASE_ADDR 0x018
-#define MV64340_CS_2_SIZE 0x020
-#define MV64340_CS_3_BASE_ADDR 0x218
-#define MV64340_CS_3_SIZE 0x220
-
-/* Devices BAR and size registers */
-
-#define MV64340_DEV_CS0_BASE_ADDR 0x028
-#define MV64340_DEV_CS0_SIZE 0x030
-#define MV64340_DEV_CS1_BASE_ADDR 0x228
-#define MV64340_DEV_CS1_SIZE 0x230
-#define MV64340_DEV_CS2_BASE_ADDR 0x248
-#define MV64340_DEV_CS2_SIZE 0x250
-#define MV64340_DEV_CS3_BASE_ADDR 0x038
-#define MV64340_DEV_CS3_SIZE 0x040
-#define MV64340_BOOTCS_BASE_ADDR 0x238
-#define MV64340_BOOTCS_SIZE 0x240
-
-/* PCI 0 BAR and size registers */
-
-#define MV64340_PCI_0_IO_BASE_ADDR 0x048
-#define MV64340_PCI_0_IO_SIZE 0x050
-#define MV64340_PCI_0_MEMORY0_BASE_ADDR 0x058
-#define MV64340_PCI_0_MEMORY0_SIZE 0x060
-#define MV64340_PCI_0_MEMORY1_BASE_ADDR 0x080
-#define MV64340_PCI_0_MEMORY1_SIZE 0x088
-#define MV64340_PCI_0_MEMORY2_BASE_ADDR 0x258
-#define MV64340_PCI_0_MEMORY2_SIZE 0x260
-#define MV64340_PCI_0_MEMORY3_BASE_ADDR 0x280
-#define MV64340_PCI_0_MEMORY3_SIZE 0x288
-
-/* PCI 1 BAR and size registers */
-#define MV64340_PCI_1_IO_BASE_ADDR 0x090
-#define MV64340_PCI_1_IO_SIZE 0x098
-#define MV64340_PCI_1_MEMORY0_BASE_ADDR 0x0a0
-#define MV64340_PCI_1_MEMORY0_SIZE 0x0a8
-#define MV64340_PCI_1_MEMORY1_BASE_ADDR 0x0b0
-#define MV64340_PCI_1_MEMORY1_SIZE 0x0b8
-#define MV64340_PCI_1_MEMORY2_BASE_ADDR 0x2a0
-#define MV64340_PCI_1_MEMORY2_SIZE 0x2a8
-#define MV64340_PCI_1_MEMORY3_BASE_ADDR 0x2b0
-#define MV64340_PCI_1_MEMORY3_SIZE 0x2b8
-
-/* SRAM base address */
-#define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268
-
-/* internal registers space base address */
-#define MV64340_INTERNAL_SPACE_BASE_ADDR 0x068
-
-/* Enables the CS , DEV_CS , PCI 0 and PCI 1
- windows above */
-#define MV64340_BASE_ADDR_ENABLE 0x278
-
-/****************************************/
-/* PCI remap registers */
-/****************************************/
- /* PCI 0 */
-#define MV64340_PCI_0_IO_ADDR_REMAP 0x0f0
-#define MV64340_PCI_0_MEMORY0_LOW_ADDR_REMAP 0x0f8
-#define MV64340_PCI_0_MEMORY0_HIGH_ADDR_REMAP 0x320
-#define MV64340_PCI_0_MEMORY1_LOW_ADDR_REMAP 0x100
-#define MV64340_PCI_0_MEMORY1_HIGH_ADDR_REMAP 0x328
-#define MV64340_PCI_0_MEMORY2_LOW_ADDR_REMAP 0x2f8
-#define MV64340_PCI_0_MEMORY2_HIGH_ADDR_REMAP 0x330
-#define MV64340_PCI_0_MEMORY3_LOW_ADDR_REMAP 0x300
-#define MV64340_PCI_0_MEMORY3_HIGH_ADDR_REMAP 0x338
- /* PCI 1 */
-#define MV64340_PCI_1_IO_ADDR_REMAP 0x108
-#define MV64340_PCI_1_MEMORY0_LOW_ADDR_REMAP 0x110
-#define MV64340_PCI_1_MEMORY0_HIGH_ADDR_REMAP 0x340
-#define MV64340_PCI_1_MEMORY1_LOW_ADDR_REMAP 0x118
-#define MV64340_PCI_1_MEMORY1_HIGH_ADDR_REMAP 0x348
-#define MV64340_PCI_1_MEMORY2_LOW_ADDR_REMAP 0x310
-#define MV64340_PCI_1_MEMORY2_HIGH_ADDR_REMAP 0x350
-#define MV64340_PCI_1_MEMORY3_LOW_ADDR_REMAP 0x318
-#define MV64340_PCI_1_MEMORY3_HIGH_ADDR_REMAP 0x358
-
-#define MV64340_CPU_PCI_0_HEADERS_RETARGET_CONTROL 0x3b0
-#define MV64340_CPU_PCI_0_HEADERS_RETARGET_BASE 0x3b8
-#define MV64340_CPU_PCI_1_HEADERS_RETARGET_CONTROL 0x3c0
-#define MV64340_CPU_PCI_1_HEADERS_RETARGET_BASE 0x3c8
-#define MV64340_CPU_GE_HEADERS_RETARGET_CONTROL 0x3d0
-#define MV64340_CPU_GE_HEADERS_RETARGET_BASE 0x3d8
-#define MV64340_CPU_IDMA_HEADERS_RETARGET_CONTROL 0x3e0
-#define MV64340_CPU_IDMA_HEADERS_RETARGET_BASE 0x3e8
-
-/****************************************/
-/* CPU Control Registers */
-/****************************************/
-
-#define MV64340_CPU_CONFIG 0x000
-#define MV64340_CPU_MODE 0x120
-#define MV64340_CPU_MASTER_CONTROL 0x160
-#define MV64340_CPU_CROSS_BAR_CONTROL_LOW 0x150
-#define MV64340_CPU_CROSS_BAR_CONTROL_HIGH 0x158
-#define MV64340_CPU_CROSS_BAR_TIMEOUT 0x168
-
-/****************************************/
-/* SMP RegisterS */
-/****************************************/
-
-#define MV64340_SMP_WHO_AM_I 0x200
-#define MV64340_SMP_CPU0_DOORBELL 0x214
-#define MV64340_SMP_CPU0_DOORBELL_CLEAR 0x21C
-#define MV64340_SMP_CPU1_DOORBELL 0x224
-#define MV64340_SMP_CPU1_DOORBELL_CLEAR 0x22C
-#define MV64340_SMP_CPU0_DOORBELL_MASK 0x234
-#define MV64340_SMP_CPU1_DOORBELL_MASK 0x23C
-#define MV64340_SMP_SEMAPHOR0 0x244
-#define MV64340_SMP_SEMAPHOR1 0x24c
-#define MV64340_SMP_SEMAPHOR2 0x254
-#define MV64340_SMP_SEMAPHOR3 0x25c
-#define MV64340_SMP_SEMAPHOR4 0x264
-#define MV64340_SMP_SEMAPHOR5 0x26c
-#define MV64340_SMP_SEMAPHOR6 0x274
-#define MV64340_SMP_SEMAPHOR7 0x27c
-
-/****************************************/
-/* CPU Sync Barrier Register */
-/****************************************/
-
-#define MV64340_CPU_0_SYNC_BARRIER_TRIGGER 0x0c0
-#define MV64340_CPU_0_SYNC_BARRIER_VIRTUAL 0x0c8
-#define MV64340_CPU_1_SYNC_BARRIER_TRIGGER 0x0d0
-#define MV64340_CPU_1_SYNC_BARRIER_VIRTUAL 0x0d8
-
-/****************************************/
-/* CPU Access Protect */
-/****************************************/
-
-#define MV64340_CPU_PROTECT_WINDOW_0_BASE_ADDR 0x180
-#define MV64340_CPU_PROTECT_WINDOW_0_SIZE 0x188
-#define MV64340_CPU_PROTECT_WINDOW_1_BASE_ADDR 0x190
-#define MV64340_CPU_PROTECT_WINDOW_1_SIZE 0x198
-#define MV64340_CPU_PROTECT_WINDOW_2_BASE_ADDR 0x1a0
-#define MV64340_CPU_PROTECT_WINDOW_2_SIZE 0x1a8
-#define MV64340_CPU_PROTECT_WINDOW_3_BASE_ADDR 0x1b0
-#define MV64340_CPU_PROTECT_WINDOW_3_SIZE 0x1b8
-
-
-/****************************************/
-/* CPU Error Report */
-/****************************************/
-
-#define MV64340_CPU_ERROR_ADDR_LOW 0x070
-#define MV64340_CPU_ERROR_ADDR_HIGH 0x078
-#define MV64340_CPU_ERROR_DATA_LOW 0x128
-#define MV64340_CPU_ERROR_DATA_HIGH 0x130
-#define MV64340_CPU_ERROR_PARITY 0x138
-#define MV64340_CPU_ERROR_CAUSE 0x140
-#define MV64340_CPU_ERROR_MASK 0x148
-
-/****************************************/
-/* CPU Interface Debug Registers */
-/****************************************/
-
-#define MV64340_PUNIT_SLAVE_DEBUG_LOW 0x360
-#define MV64340_PUNIT_SLAVE_DEBUG_HIGH 0x368
-#define MV64340_PUNIT_MASTER_DEBUG_LOW 0x370
-#define MV64340_PUNIT_MASTER_DEBUG_HIGH 0x378
-#define MV64340_PUNIT_MMASK 0x3e4
-
-/****************************************/
-/* Integrated SRAM Registers */
-/****************************************/
-
-#define MV64340_SRAM_CONFIG 0x380
-#define MV64340_SRAM_TEST_MODE 0X3F4
-#define MV64340_SRAM_ERROR_CAUSE 0x388
-#define MV64340_SRAM_ERROR_ADDR 0x390
-#define MV64340_SRAM_ERROR_ADDR_HIGH 0X3F8
-#define MV64340_SRAM_ERROR_DATA_LOW 0x398
-#define MV64340_SRAM_ERROR_DATA_HIGH 0x3a0
-#define MV64340_SRAM_ERROR_DATA_PARITY 0x3a8
-
-/****************************************/
-/* SDRAM Configuration */
-/****************************************/
-
-#define MV64340_SDRAM_CONFIG 0x1400
-#define MV64340_D_UNIT_CONTROL_LOW 0x1404
-#define MV64340_D_UNIT_CONTROL_HIGH 0x1424
-#define MV64340_SDRAM_TIMING_CONTROL_LOW 0x1408
-#define MV64340_SDRAM_TIMING_CONTROL_HIGH 0x140c
-#define MV64340_SDRAM_ADDR_CONTROL 0x1410
-#define MV64340_SDRAM_OPEN_PAGES_CONTROL 0x1414
-#define MV64340_SDRAM_OPERATION 0x1418
-#define MV64340_SDRAM_MODE 0x141c
-#define MV64340_EXTENDED_DRAM_MODE 0x1420
-#define MV64340_SDRAM_CROSS_BAR_CONTROL_LOW 0x1430
-#define MV64340_SDRAM_CROSS_BAR_CONTROL_HIGH 0x1434
-#define MV64340_SDRAM_CROSS_BAR_TIMEOUT 0x1438
-#define MV64340_SDRAM_ADDR_CTRL_PADS_CALIBRATION 0x14c0
-#define MV64340_SDRAM_DATA_PADS_CALIBRATION 0x14c4
-
-/****************************************/
-/* SDRAM Error Report */
-/****************************************/
-
-#define MV64340_SDRAM_ERROR_DATA_LOW 0x1444
-#define MV64340_SDRAM_ERROR_DATA_HIGH 0x1440
-#define MV64340_SDRAM_ERROR_ADDR 0x1450
-#define MV64340_SDRAM_RECEIVED_ECC 0x1448
-#define MV64340_SDRAM_CALCULATED_ECC 0x144c
-#define MV64340_SDRAM_ECC_CONTROL 0x1454
-#define MV64340_SDRAM_ECC_ERROR_COUNTER 0x1458
-
-/******************************************/
-/* Controlled Delay Line (CDL) Registers */
-/******************************************/
-
-#define MV64340_DFCDL_CONFIG0 0x1480
-#define MV64340_DFCDL_CONFIG1 0x1484
-#define MV64340_DLL_WRITE 0x1488
-#define MV64340_DLL_READ 0x148c
-#define MV64340_SRAM_ADDR 0x1490
-#define MV64340_SRAM_DATA0 0x1494
-#define MV64340_SRAM_DATA1 0x1498
-#define MV64340_SRAM_DATA2 0x149c
-#define MV64340_DFCL_PROBE 0x14a0
-
-/******************************************/
-/* Debug Registers */
-/******************************************/
-
-#define MV64340_DUNIT_DEBUG_LOW 0x1460
-#define MV64340_DUNIT_DEBUG_HIGH 0x1464
-#define MV64340_DUNIT_MMASK 0X1b40
-
-/****************************************/
-/* Device Parameters */
-/****************************************/
-
-#define MV64340_DEVICE_BANK0_PARAMETERS 0x45c
-#define MV64340_DEVICE_BANK1_PARAMETERS 0x460
-#define MV64340_DEVICE_BANK2_PARAMETERS 0x464
-#define MV64340_DEVICE_BANK3_PARAMETERS 0x468
-#define MV64340_DEVICE_BOOT_BANK_PARAMETERS 0x46c
-#define MV64340_DEVICE_INTERFACE_CONTROL 0x4c0
-#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_LOW 0x4c8
-#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_HIGH 0x4cc
-#define MV64340_DEVICE_INTERFACE_CROSS_BAR_TIMEOUT 0x4c4
-
-/****************************************/
-/* Device interrupt registers */
-/****************************************/
-
-#define MV64340_DEVICE_INTERRUPT_CAUSE 0x4d0
-#define MV64340_DEVICE_INTERRUPT_MASK 0x4d4
-#define MV64340_DEVICE_ERROR_ADDR 0x4d8
-#define MV64340_DEVICE_ERROR_DATA 0x4dc
-#define MV64340_DEVICE_ERROR_PARITY 0x4e0
-
-/****************************************/
-/* Device debug registers */
-/****************************************/
-
-#define MV64340_DEVICE_DEBUG_LOW 0x4e4
-#define MV64340_DEVICE_DEBUG_HIGH 0x4e8
-#define MV64340_RUNIT_MMASK 0x4f0
-
-/****************************************/
-/* PCI Slave Address Decoding registers */
-/****************************************/
-
-#define MV64340_PCI_0_CS_0_BANK_SIZE 0xc08
-#define MV64340_PCI_1_CS_0_BANK_SIZE 0xc88
-#define MV64340_PCI_0_CS_1_BANK_SIZE 0xd08
-#define MV64340_PCI_1_CS_1_BANK_SIZE 0xd88
-#define MV64340_PCI_0_CS_2_BANK_SIZE 0xc0c
-#define MV64340_PCI_1_CS_2_BANK_SIZE 0xc8c
-#define MV64340_PCI_0_CS_3_BANK_SIZE 0xd0c
-#define MV64340_PCI_1_CS_3_BANK_SIZE 0xd8c
-#define MV64340_PCI_0_DEVCS_0_BANK_SIZE 0xc10
-#define MV64340_PCI_1_DEVCS_0_BANK_SIZE 0xc90
-#define MV64340_PCI_0_DEVCS_1_BANK_SIZE 0xd10
-#define MV64340_PCI_1_DEVCS_1_BANK_SIZE 0xd90
-#define MV64340_PCI_0_DEVCS_2_BANK_SIZE 0xd18
-#define MV64340_PCI_1_DEVCS_2_BANK_SIZE 0xd98
-#define MV64340_PCI_0_DEVCS_3_BANK_SIZE 0xc14
-#define MV64340_PCI_1_DEVCS_3_BANK_SIZE 0xc94
-#define MV64340_PCI_0_DEVCS_BOOT_BANK_SIZE 0xd14
-#define MV64340_PCI_1_DEVCS_BOOT_BANK_SIZE 0xd94
-#define MV64340_PCI_0_P2P_MEM0_BAR_SIZE 0xd1c
-#define MV64340_PCI_1_P2P_MEM0_BAR_SIZE 0xd9c
-#define MV64340_PCI_0_P2P_MEM1_BAR_SIZE 0xd20
-#define MV64340_PCI_1_P2P_MEM1_BAR_SIZE 0xda0
-#define MV64340_PCI_0_P2P_I_O_BAR_SIZE 0xd24
-#define MV64340_PCI_1_P2P_I_O_BAR_SIZE 0xda4
-#define MV64340_PCI_0_CPU_BAR_SIZE 0xd28
-#define MV64340_PCI_1_CPU_BAR_SIZE 0xda8
-#define MV64340_PCI_0_INTERNAL_SRAM_BAR_SIZE 0xe00
-#define MV64340_PCI_1_INTERNAL_SRAM_BAR_SIZE 0xe80
-#define MV64340_PCI_0_EXPANSION_ROM_BAR_SIZE 0xd2c
-#define MV64340_PCI_1_EXPANSION_ROM_BAR_SIZE 0xd9c
-#define MV64340_PCI_0_BASE_ADDR_REG_ENABLE 0xc3c
-#define MV64340_PCI_1_BASE_ADDR_REG_ENABLE 0xcbc
-#define MV64340_PCI_0_CS_0_BASE_ADDR_REMAP 0xc48
-#define MV64340_PCI_1_CS_0_BASE_ADDR_REMAP 0xcc8
-#define MV64340_PCI_0_CS_1_BASE_ADDR_REMAP 0xd48
-#define MV64340_PCI_1_CS_1_BASE_ADDR_REMAP 0xdc8
-#define MV64340_PCI_0_CS_2_BASE_ADDR_REMAP 0xc4c
-#define MV64340_PCI_1_CS_2_BASE_ADDR_REMAP 0xccc
-#define MV64340_PCI_0_CS_3_BASE_ADDR_REMAP 0xd4c
-#define MV64340_PCI_1_CS_3_BASE_ADDR_REMAP 0xdcc
-#define MV64340_PCI_0_CS_0_BASE_HIGH_ADDR_REMAP 0xF04
-#define MV64340_PCI_1_CS_0_BASE_HIGH_ADDR_REMAP 0xF84
-#define MV64340_PCI_0_CS_1_BASE_HIGH_ADDR_REMAP 0xF08
-#define MV64340_PCI_1_CS_1_BASE_HIGH_ADDR_REMAP 0xF88
-#define MV64340_PCI_0_CS_2_BASE_HIGH_ADDR_REMAP 0xF0C
-#define MV64340_PCI_1_CS_2_BASE_HIGH_ADDR_REMAP 0xF8C
-#define MV64340_PCI_0_CS_3_BASE_HIGH_ADDR_REMAP 0xF10
-#define MV64340_PCI_1_CS_3_BASE_HIGH_ADDR_REMAP 0xF90
-#define MV64340_PCI_0_DEVCS_0_BASE_ADDR_REMAP 0xc50
-#define MV64340_PCI_1_DEVCS_0_BASE_ADDR_REMAP 0xcd0
-#define MV64340_PCI_0_DEVCS_1_BASE_ADDR_REMAP 0xd50
-#define MV64340_PCI_1_DEVCS_1_BASE_ADDR_REMAP 0xdd0
-#define MV64340_PCI_0_DEVCS_2_BASE_ADDR_REMAP 0xd58
-#define MV64340_PCI_1_DEVCS_2_BASE_ADDR_REMAP 0xdd8
-#define MV64340_PCI_0_DEVCS_3_BASE_ADDR_REMAP 0xc54
-#define MV64340_PCI_1_DEVCS_3_BASE_ADDR_REMAP 0xcd4
-#define MV64340_PCI_0_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xd54
-#define MV64340_PCI_1_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xdd4
-#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xd5c
-#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xddc
-#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xd60
-#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xde0
-#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xd64
-#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xde4
-#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xd68
-#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xde8
-#define MV64340_PCI_0_P2P_I_O_BASE_ADDR_REMAP 0xd6c
-#define MV64340_PCI_1_P2P_I_O_BASE_ADDR_REMAP 0xdec
-#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_LOW 0xd70
-#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_LOW 0xdf0
-#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_HIGH 0xd74
-#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_HIGH 0xdf4
-#define MV64340_PCI_0_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf00
-#define MV64340_PCI_1_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf80
-#define MV64340_PCI_0_EXPANSION_ROM_BASE_ADDR_REMAP 0xf38
-#define MV64340_PCI_1_EXPANSION_ROM_BASE_ADDR_REMAP 0xfb8
-#define MV64340_PCI_0_ADDR_DECODE_CONTROL 0xd3c
-#define MV64340_PCI_1_ADDR_DECODE_CONTROL 0xdbc
-#define MV64340_PCI_0_HEADERS_RETARGET_CONTROL 0xF40
-#define MV64340_PCI_1_HEADERS_RETARGET_CONTROL 0xFc0
-#define MV64340_PCI_0_HEADERS_RETARGET_BASE 0xF44
-#define MV64340_PCI_1_HEADERS_RETARGET_BASE 0xFc4
-#define MV64340_PCI_0_HEADERS_RETARGET_HIGH 0xF48
-#define MV64340_PCI_1_HEADERS_RETARGET_HIGH 0xFc8
-
-/***********************************/
-/* PCI Control Register Map */
-/***********************************/
-
-#define MV64340_PCI_0_DLL_STATUS_AND_COMMAND 0x1d20
-#define MV64340_PCI_1_DLL_STATUS_AND_COMMAND 0x1da0
-#define MV64340_PCI_0_MPP_PADS_DRIVE_CONTROL 0x1d1C
-#define MV64340_PCI_1_MPP_PADS_DRIVE_CONTROL 0x1d9C
-#define MV64340_PCI_0_COMMAND 0xc00
-#define MV64340_PCI_1_COMMAND 0xc80
-#define MV64340_PCI_0_MODE 0xd00
-#define MV64340_PCI_1_MODE 0xd80
-#define MV64340_PCI_0_RETRY 0xc04
-#define MV64340_PCI_1_RETRY 0xc84
-#define MV64340_PCI_0_READ_BUFFER_DISCARD_TIMER 0xd04
-#define MV64340_PCI_1_READ_BUFFER_DISCARD_TIMER 0xd84
-#define MV64340_PCI_0_MSI_TRIGGER_TIMER 0xc38
-#define MV64340_PCI_1_MSI_TRIGGER_TIMER 0xcb8
-#define MV64340_PCI_0_ARBITER_CONTROL 0x1d00
-#define MV64340_PCI_1_ARBITER_CONTROL 0x1d80
-#define MV64340_PCI_0_CROSS_BAR_CONTROL_LOW 0x1d08
-#define MV64340_PCI_1_CROSS_BAR_CONTROL_LOW 0x1d88
-#define MV64340_PCI_0_CROSS_BAR_CONTROL_HIGH 0x1d0c
-#define MV64340_PCI_1_CROSS_BAR_CONTROL_HIGH 0x1d8c
-#define MV64340_PCI_0_CROSS_BAR_TIMEOUT 0x1d04
-#define MV64340_PCI_1_CROSS_BAR_TIMEOUT 0x1d84
-#define MV64340_PCI_0_SYNC_BARRIER_TRIGGER_REG 0x1D18
-#define MV64340_PCI_1_SYNC_BARRIER_TRIGGER_REG 0x1D98
-#define MV64340_PCI_0_SYNC_BARRIER_VIRTUAL_REG 0x1d10
-#define MV64340_PCI_1_SYNC_BARRIER_VIRTUAL_REG 0x1d90
-#define MV64340_PCI_0_P2P_CONFIG 0x1d14
-#define MV64340_PCI_1_P2P_CONFIG 0x1d94
-
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_LOW 0x1e00
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_HIGH 0x1e04
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_0 0x1e08
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_LOW 0x1e10
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_HIGH 0x1e14
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_1 0x1e18
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_LOW 0x1e20
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_HIGH 0x1e24
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_2 0x1e28
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_LOW 0x1e30
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_HIGH 0x1e34
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_3 0x1e38
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_LOW 0x1e40
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_HIGH 0x1e44
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_4 0x1e48
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_LOW 0x1e50
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_HIGH 0x1e54
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_5 0x1e58
-
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_LOW 0x1e80
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_HIGH 0x1e84
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_0 0x1e88
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_LOW 0x1e90
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_HIGH 0x1e94
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_1 0x1e98
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_LOW 0x1ea0
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_HIGH 0x1ea4
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_2 0x1ea8
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_LOW 0x1eb0
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_HIGH 0x1eb4
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_3 0x1eb8
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_LOW 0x1ec0
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_HIGH 0x1ec4
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_4 0x1ec8
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_LOW 0x1ed0
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_HIGH 0x1ed4
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_5 0x1ed8
-
-/****************************************/
-/* PCI Configuration Access Registers */
-/****************************************/
-
-#define MV64340_PCI_0_CONFIG_ADDR 0xcf8
-#define MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG 0xcfc
-#define MV64340_PCI_1_CONFIG_ADDR 0xc78
-#define MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG 0xc7c
-#define MV64340_PCI_0_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xc34
-#define MV64340_PCI_1_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xcb4
-
-/****************************************/
-/* PCI Error Report Registers */
-/****************************************/
-
-#define MV64340_PCI_0_SERR_MASK 0xc28
-#define MV64340_PCI_1_SERR_MASK 0xca8
-#define MV64340_PCI_0_ERROR_ADDR_LOW 0x1d40
-#define MV64340_PCI_1_ERROR_ADDR_LOW 0x1dc0
-#define MV64340_PCI_0_ERROR_ADDR_HIGH 0x1d44
-#define MV64340_PCI_1_ERROR_ADDR_HIGH 0x1dc4
-#define MV64340_PCI_0_ERROR_ATTRIBUTE 0x1d48
-#define MV64340_PCI_1_ERROR_ATTRIBUTE 0x1dc8
-#define MV64340_PCI_0_ERROR_COMMAND 0x1d50
-#define MV64340_PCI_1_ERROR_COMMAND 0x1dd0
-#define MV64340_PCI_0_ERROR_CAUSE 0x1d58
-#define MV64340_PCI_1_ERROR_CAUSE 0x1dd8
-#define MV64340_PCI_0_ERROR_MASK 0x1d5c
-#define MV64340_PCI_1_ERROR_MASK 0x1ddc
-
-/****************************************/
-/* PCI Debug Registers */
-/****************************************/
-
-#define MV64340_PCI_0_MMASK 0X1D24
-#define MV64340_PCI_1_MMASK 0X1DA4
-
-/*********************************************/
-/* PCI Configuration, Function 0, Registers */
-/*********************************************/
-
-#define MV64340_PCI_DEVICE_AND_VENDOR_ID 0x000
-#define MV64340_PCI_STATUS_AND_COMMAND 0x004
-#define MV64340_PCI_CLASS_CODE_AND_REVISION_ID 0x008
-#define MV64340_PCI_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE 0x00C
-
-#define MV64340_PCI_SCS_0_BASE_ADDR_LOW 0x010
-#define MV64340_PCI_SCS_0_BASE_ADDR_HIGH 0x014
-#define MV64340_PCI_SCS_1_BASE_ADDR_LOW 0x018
-#define MV64340_PCI_SCS_1_BASE_ADDR_HIGH 0x01C
-#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_LOW 0x020
-#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_HIGH 0x024
-#define MV64340_PCI_SUBSYSTEM_ID_AND_SUBSYSTEM_VENDOR_ID 0x02c
-#define MV64340_PCI_EXPANSION_ROM_BASE_ADDR_REG 0x030
-#define MV64340_PCI_CAPABILTY_LIST_POINTER 0x034
-#define MV64340_PCI_INTERRUPT_PIN_AND_LINE 0x03C
- /* capability list */
-#define MV64340_PCI_POWER_MANAGEMENT_CAPABILITY 0x040
-#define MV64340_PCI_POWER_MANAGEMENT_STATUS_AND_CONTROL 0x044
-#define MV64340_PCI_VPD_ADDR 0x048
-#define MV64340_PCI_VPD_DATA 0x04c
-#define MV64340_PCI_MSI_MESSAGE_CONTROL 0x050
-#define MV64340_PCI_MSI_MESSAGE_ADDR 0x054
-#define MV64340_PCI_MSI_MESSAGE_UPPER_ADDR 0x058
-#define MV64340_PCI_MSI_MESSAGE_DATA 0x05c
-#define MV64340_PCI_X_COMMAND 0x060
-#define MV64340_PCI_X_STATUS 0x064
-#define MV64340_PCI_COMPACT_PCI_HOT_SWAP 0x068
-
-/***********************************************/
-/* PCI Configuration, Function 1, Registers */
-/***********************************************/
-
-#define MV64340_PCI_SCS_2_BASE_ADDR_LOW 0x110
-#define MV64340_PCI_SCS_2_BASE_ADDR_HIGH 0x114
-#define MV64340_PCI_SCS_3_BASE_ADDR_LOW 0x118
-#define MV64340_PCI_SCS_3_BASE_ADDR_HIGH 0x11c
-#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_LOW 0x120
-#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_HIGH 0x124
-
-/***********************************************/
-/* PCI Configuration, Function 2, Registers */
-/***********************************************/
-
-#define MV64340_PCI_DEVCS_0_BASE_ADDR_LOW 0x210
-#define MV64340_PCI_DEVCS_0_BASE_ADDR_HIGH 0x214
-#define MV64340_PCI_DEVCS_1_BASE_ADDR_LOW 0x218
-#define MV64340_PCI_DEVCS_1_BASE_ADDR_HIGH 0x21c
-#define MV64340_PCI_DEVCS_2_BASE_ADDR_LOW 0x220
-#define MV64340_PCI_DEVCS_2_BASE_ADDR_HIGH 0x224
-
-/***********************************************/
-/* PCI Configuration, Function 3, Registers */
-/***********************************************/
-
-#define MV64340_PCI_DEVCS_3_BASE_ADDR_LOW 0x310
-#define MV64340_PCI_DEVCS_3_BASE_ADDR_HIGH 0x314
-#define MV64340_PCI_BOOT_CS_BASE_ADDR_LOW 0x318
-#define MV64340_PCI_BOOT_CS_BASE_ADDR_HIGH 0x31c
-#define MV64340_PCI_CPU_BASE_ADDR_LOW 0x220
-#define MV64340_PCI_CPU_BASE_ADDR_HIGH 0x224
-
-/***********************************************/
-/* PCI Configuration, Function 4, Registers */
-/***********************************************/
-
-#define MV64340_PCI_P2P_MEM0_BASE_ADDR_LOW 0x410
-#define MV64340_PCI_P2P_MEM0_BASE_ADDR_HIGH 0x414
-#define MV64340_PCI_P2P_MEM1_BASE_ADDR_LOW 0x418
-#define MV64340_PCI_P2P_MEM1_BASE_ADDR_HIGH 0x41c
-#define MV64340_PCI_P2P_I_O_BASE_ADDR 0x420
-#define MV64340_PCI_INTERNAL_REGS_I_O_MAPPED_BASE_ADDR 0x424
-
-/****************************************/
-/* Messaging Unit Registers (I20) */
-/****************************************/
-
-#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_0_SIDE 0x010
-#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_0_SIDE 0x014
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_0_SIDE 0x018
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_0_SIDE 0x01C
-#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_0_SIDE 0x020
-#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x024
-#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x028
-#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_0_SIDE 0x02C
-#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x030
-#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x034
-#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x040
-#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x044
-#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_0_SIDE 0x050
-#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_0_SIDE 0x054
-#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x060
-#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x064
-#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x068
-#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x06C
-#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x070
-#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x074
-#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x0F8
-#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x0FC
-
-#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_1_SIDE 0x090
-#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_1_SIDE 0x094
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_1_SIDE 0x098
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_1_SIDE 0x09C
-#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_1_SIDE 0x0A0
-#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0A4
-#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0A8
-#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_1_SIDE 0x0AC
-#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0B0
-#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0B4
-#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C0
-#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C4
-#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_1_SIDE 0x0D0
-#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_1_SIDE 0x0D4
-#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0E0
-#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0E4
-#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x0E8
-#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x0EC
-#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0F0
-#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0F4
-#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x078
-#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x07C
-
-#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C10
-#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C14
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C18
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C1C
-#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU0_SIDE 0x1C20
-#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C24
-#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C28
-#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU0_SIDE 0x1C2C
-#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C30
-#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C34
-#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C40
-#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C44
-#define MV64340_I2O_QUEUE_CONTROL_REG_CPU0_SIDE 0x1C50
-#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU0_SIDE 0x1C54
-#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C60
-#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C64
-#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1C68
-#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1C6C
-#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C70
-#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C74
-#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1CF8
-#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1CFC
-#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C90
-#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C94
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C98
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C9C
-#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU1_SIDE 0x1CA0
-#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CA4
-#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CA8
-#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU1_SIDE 0x1CAC
-#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CB0
-#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CB4
-#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC0
-#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC4
-#define MV64340_I2O_QUEUE_CONTROL_REG_CPU1_SIDE 0x1CD0
-#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU1_SIDE 0x1CD4
-#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CE0
-#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CE4
-#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1CE8
-#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1CEC
-#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CF0
-#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CF4
-#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1C78
-#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1C7C
-
-/****************************************/
-/* Ethernet Unit Registers */
-/****************************************/
-
-/*******************************************/
-/* CUNIT Registers */
-/*******************************************/
-
- /* Address Decoding Register Map */
-
-#define MV64340_CUNIT_BASE_ADDR_REG0 0xf200
-#define MV64340_CUNIT_BASE_ADDR_REG1 0xf208
-#define MV64340_CUNIT_BASE_ADDR_REG2 0xf210
-#define MV64340_CUNIT_BASE_ADDR_REG3 0xf218
-#define MV64340_CUNIT_SIZE0 0xf204
-#define MV64340_CUNIT_SIZE1 0xf20c
-#define MV64340_CUNIT_SIZE2 0xf214
-#define MV64340_CUNIT_SIZE3 0xf21c
-#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG0 0xf240
-#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG1 0xf244
-#define MV64340_CUNIT_BASE_ADDR_ENABLE_REG 0xf250
-#define MV64340_MPSC0_ACCESS_PROTECTION_REG 0xf254
-#define MV64340_MPSC1_ACCESS_PROTECTION_REG 0xf258
-#define MV64340_CUNIT_INTERNAL_SPACE_BASE_ADDR_REG 0xf25C
-
- /* Error Report Registers */
-
-#define MV64340_CUNIT_INTERRUPT_CAUSE_REG 0xf310
-#define MV64340_CUNIT_INTERRUPT_MASK_REG 0xf314
-#define MV64340_CUNIT_ERROR_ADDR 0xf318
-
- /* Cunit Control Registers */
-
-#define MV64340_CUNIT_ARBITER_CONTROL_REG 0xf300
-#define MV64340_CUNIT_CONFIG_REG 0xb40c
-#define MV64340_CUNIT_CRROSBAR_TIMEOUT_REG 0xf304
-
- /* Cunit Debug Registers */
-
-#define MV64340_CUNIT_DEBUG_LOW 0xf340
-#define MV64340_CUNIT_DEBUG_HIGH 0xf344
-#define MV64340_CUNIT_MMASK 0xf380
-
- /* MPSCs Clocks Routing Registers */
-
-#define MV64340_MPSC_ROUTING_REG 0xb400
-#define MV64340_MPSC_RX_CLOCK_ROUTING_REG 0xb404
-#define MV64340_MPSC_TX_CLOCK_ROUTING_REG 0xb408
-
- /* MPSCs Interrupts Registers */
-
-#define MV64340_MPSC_CAUSE_REG(port) (0xb804 + (port<<3))
-#define MV64340_MPSC_MASK_REG(port) (0xb884 + (port<<3))
-
-#define MV64340_MPSC_MAIN_CONFIG_LOW(port) (0x8000 + (port<<12))
-#define MV64340_MPSC_MAIN_CONFIG_HIGH(port) (0x8004 + (port<<12))
-#define MV64340_MPSC_PROTOCOL_CONFIG(port) (0x8008 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG1(port) (0x800c + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG2(port) (0x8010 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG3(port) (0x8014 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG4(port) (0x8018 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG5(port) (0x801c + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG6(port) (0x8020 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG7(port) (0x8024 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG8(port) (0x8028 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG9(port) (0x802c + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG10(port) (0x8030 + (port<<12))
-
- /* MPSC0 Registers */
-
-
-/***************************************/
-/* SDMA Registers */
-/***************************************/
-
-#define MV64340_SDMA_CONFIG_REG(channel) (0x4000 + (channel<<13))
-#define MV64340_SDMA_COMMAND_REG(channel) (0x4008 + (channel<<13))
-#define MV64340_SDMA_CURRENT_RX_DESCRIPTOR_POINTER(channel) (0x4810 + (channel<<13))
-#define MV64340_SDMA_CURRENT_TX_DESCRIPTOR_POINTER(channel) (0x4c10 + (channel<<13))
-#define MV64340_SDMA_FIRST_TX_DESCRIPTOR_POINTER(channel) (0x4c14 + (channel<<13))
-
-#define MV64340_SDMA_CAUSE_REG 0xb800
-#define MV64340_SDMA_MASK_REG 0xb880
-
-/* BRG Interrupts */
-
-#define MV64340_BRG_CONFIG_REG(brg) (0xb200 + (brg<<3))
-#define MV64340_BRG_BAUDE_TUNING_REG(brg) (0xb208 + (brg<<3))
-#define MV64340_BRG_CAUSE_REG 0xb834
-#define MV64340_BRG_MASK_REG 0xb8b4
-
-/****************************************/
-/* DMA Channel Control */
-/****************************************/
-
-#define MV64340_DMA_CHANNEL0_CONTROL 0x840
-#define MV64340_DMA_CHANNEL0_CONTROL_HIGH 0x880
-#define MV64340_DMA_CHANNEL1_CONTROL 0x844
-#define MV64340_DMA_CHANNEL1_CONTROL_HIGH 0x884
-#define MV64340_DMA_CHANNEL2_CONTROL 0x848
-#define MV64340_DMA_CHANNEL2_CONTROL_HIGH 0x888
-#define MV64340_DMA_CHANNEL3_CONTROL 0x84C
-#define MV64340_DMA_CHANNEL3_CONTROL_HIGH 0x88C
-
-
-/****************************************/
-/* IDMA Registers */
-/****************************************/
-
-#define MV64340_DMA_CHANNEL0_BYTE_COUNT 0x800
-#define MV64340_DMA_CHANNEL1_BYTE_COUNT 0x804
-#define MV64340_DMA_CHANNEL2_BYTE_COUNT 0x808
-#define MV64340_DMA_CHANNEL3_BYTE_COUNT 0x80C
-#define MV64340_DMA_CHANNEL0_SOURCE_ADDR 0x810
-#define MV64340_DMA_CHANNEL1_SOURCE_ADDR 0x814
-#define MV64340_DMA_CHANNEL2_SOURCE_ADDR 0x818
-#define MV64340_DMA_CHANNEL3_SOURCE_ADDR 0x81c
-#define MV64340_DMA_CHANNEL0_DESTINATION_ADDR 0x820
-#define MV64340_DMA_CHANNEL1_DESTINATION_ADDR 0x824
-#define MV64340_DMA_CHANNEL2_DESTINATION_ADDR 0x828
-#define MV64340_DMA_CHANNEL3_DESTINATION_ADDR 0x82C
-#define MV64340_DMA_CHANNEL0_NEXT_DESCRIPTOR_POINTER 0x830
-#define MV64340_DMA_CHANNEL1_NEXT_DESCRIPTOR_POINTER 0x834
-#define MV64340_DMA_CHANNEL2_NEXT_DESCRIPTOR_POINTER 0x838
-#define MV64340_DMA_CHANNEL3_NEXT_DESCRIPTOR_POINTER 0x83C
-#define MV64340_DMA_CHANNEL0_CURRENT_DESCRIPTOR_POINTER 0x870
-#define MV64340_DMA_CHANNEL1_CURRENT_DESCRIPTOR_POINTER 0x874
-#define MV64340_DMA_CHANNEL2_CURRENT_DESCRIPTOR_POINTER 0x878
-#define MV64340_DMA_CHANNEL3_CURRENT_DESCRIPTOR_POINTER 0x87C
-
- /* IDMA Address Decoding Base Address Registers */
-
-#define MV64340_DMA_BASE_ADDR_REG0 0xa00
-#define MV64340_DMA_BASE_ADDR_REG1 0xa08
-#define MV64340_DMA_BASE_ADDR_REG2 0xa10
-#define MV64340_DMA_BASE_ADDR_REG3 0xa18
-#define MV64340_DMA_BASE_ADDR_REG4 0xa20
-#define MV64340_DMA_BASE_ADDR_REG5 0xa28
-#define MV64340_DMA_BASE_ADDR_REG6 0xa30
-#define MV64340_DMA_BASE_ADDR_REG7 0xa38
-
- /* IDMA Address Decoding Size Address Register */
-
-#define MV64340_DMA_SIZE_REG0 0xa04
-#define MV64340_DMA_SIZE_REG1 0xa0c
-#define MV64340_DMA_SIZE_REG2 0xa14
-#define MV64340_DMA_SIZE_REG3 0xa1c
-#define MV64340_DMA_SIZE_REG4 0xa24
-#define MV64340_DMA_SIZE_REG5 0xa2c
-#define MV64340_DMA_SIZE_REG6 0xa34
-#define MV64340_DMA_SIZE_REG7 0xa3C
-
- /* IDMA Address Decoding High Address Remap and Access
- Protection Registers */
-
-#define MV64340_DMA_HIGH_ADDR_REMAP_REG0 0xa60
-#define MV64340_DMA_HIGH_ADDR_REMAP_REG1 0xa64
-#define MV64340_DMA_HIGH_ADDR_REMAP_REG2 0xa68
-#define MV64340_DMA_HIGH_ADDR_REMAP_REG3 0xa6C
-#define MV64340_DMA_BASE_ADDR_ENABLE_REG 0xa80
-#define MV64340_DMA_CHANNEL0_ACCESS_PROTECTION_REG 0xa70
-#define MV64340_DMA_CHANNEL1_ACCESS_PROTECTION_REG 0xa74
-#define MV64340_DMA_CHANNEL2_ACCESS_PROTECTION_REG 0xa78
-#define MV64340_DMA_CHANNEL3_ACCESS_PROTECTION_REG 0xa7c
-#define MV64340_DMA_ARBITER_CONTROL 0x860
-#define MV64340_DMA_CROSS_BAR_TIMEOUT 0x8d0
-
- /* IDMA Headers Retarget Registers */
-
-#define MV64340_DMA_HEADERS_RETARGET_CONTROL 0xa84
-#define MV64340_DMA_HEADERS_RETARGET_BASE 0xa88
-
- /* IDMA Interrupt Register */
-
-#define MV64340_DMA_INTERRUPT_CAUSE_REG 0x8c0
-#define MV64340_DMA_INTERRUPT_CAUSE_MASK 0x8c4
-#define MV64340_DMA_ERROR_ADDR 0x8c8
-#define MV64340_DMA_ERROR_SELECT 0x8cc
-
- /* IDMA Debug Register ( for internal use ) */
-
-#define MV64340_DMA_DEBUG_LOW 0x8e0
-#define MV64340_DMA_DEBUG_HIGH 0x8e4
-#define MV64340_DMA_SPARE 0xA8C
-
-/****************************************/
-/* Timer_Counter */
-/****************************************/
-
-#define MV64340_TIMER_COUNTER0 0x850
-#define MV64340_TIMER_COUNTER1 0x854
-#define MV64340_TIMER_COUNTER2 0x858
-#define MV64340_TIMER_COUNTER3 0x85C
-#define MV64340_TIMER_COUNTER_0_3_CONTROL 0x864
-#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_CAUSE 0x868
-#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_MASK 0x86c
-
-/****************************************/
-/* Watchdog registers */
-/****************************************/
-
-#define MV64340_WATCHDOG_CONFIG_REG 0xb410
-#define MV64340_WATCHDOG_VALUE_REG 0xb414
-
-/****************************************/
-/* I2C Registers */
-/****************************************/
-
-#define MV64XXX_I2C_OFFSET 0xc000
-#define MV64XXX_I2C_REG_BLOCK_SIZE 0x0020
-
-/****************************************/
-/* GPP Interface Registers */
-/****************************************/
-
-#define MV64340_GPP_IO_CONTROL 0xf100
-#define MV64340_GPP_LEVEL_CONTROL 0xf110
-#define MV64340_GPP_VALUE 0xf104
-#define MV64340_GPP_INTERRUPT_CAUSE 0xf108
-#define MV64340_GPP_INTERRUPT_MASK0 0xf10c
-#define MV64340_GPP_INTERRUPT_MASK1 0xf114
-#define MV64340_GPP_VALUE_SET 0xf118
-#define MV64340_GPP_VALUE_CLEAR 0xf11c
-
-/****************************************/
-/* Interrupt Controller Registers */
-/****************************************/
-
-/****************************************/
-/* Interrupts */
-/****************************************/
-
-#define MV64340_MAIN_INTERRUPT_CAUSE_LOW 0x004
-#define MV64340_MAIN_INTERRUPT_CAUSE_HIGH 0x00c
-#define MV64340_CPU_INTERRUPT0_MASK_LOW 0x014
-#define MV64340_CPU_INTERRUPT0_MASK_HIGH 0x01c
-#define MV64340_CPU_INTERRUPT0_SELECT_CAUSE 0x024
-#define MV64340_CPU_INTERRUPT1_MASK_LOW 0x034
-#define MV64340_CPU_INTERRUPT1_MASK_HIGH 0x03c
-#define MV64340_CPU_INTERRUPT1_SELECT_CAUSE 0x044
-#define MV64340_INTERRUPT0_MASK_0_LOW 0x054
-#define MV64340_INTERRUPT0_MASK_0_HIGH 0x05c
-#define MV64340_INTERRUPT0_SELECT_CAUSE 0x064
-#define MV64340_INTERRUPT1_MASK_0_LOW 0x074
-#define MV64340_INTERRUPT1_MASK_0_HIGH 0x07c
-#define MV64340_INTERRUPT1_SELECT_CAUSE 0x084
-
-/****************************************/
-/* MPP Interface Registers */
-/****************************************/
-
-#define MV64340_MPP_CONTROL0 0xf000
-#define MV64340_MPP_CONTROL1 0xf004
-#define MV64340_MPP_CONTROL2 0xf008
-#define MV64340_MPP_CONTROL3 0xf00c
-
-/****************************************/
-/* Serial Initialization registers */
-/****************************************/
-
-#define MV64340_SERIAL_INIT_LAST_DATA 0xf324
-#define MV64340_SERIAL_INIT_CONTROL 0xf328
-#define MV64340_SERIAL_INIT_STATUS 0xf32c
-
-extern void mv64340_irq_init(unsigned int base);
-
-#endif /* __ASM_MV643XX_H */
diff --git a/include/linux/net.h b/include/linux/net.h
index 688320b79fcc..b75bc534c1b3 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -322,6 +322,25 @@ static inline bool sendpage_ok(struct page *page)
return !PageSlab(page) && page_count(page) >= 1;
}
+/*
+ * Check sendpage_ok on contiguous pages.
+ */
+static inline bool sendpages_ok(struct page *page, size_t len, size_t offset)
+{
+ struct page *p = page + (offset >> PAGE_SHIFT);
+ size_t count = 0;
+
+ while (count < len) {
+ if (!sendpage_ok(p))
+ return false;
+
+ p++;
+ count += PAGE_SIZE;
+ }
+
+ return true;
+}
+
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 7c2d77d75a88..66e7d26b70a4 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -24,9 +24,8 @@ enum {
NETIF_F_HW_VLAN_CTAG_FILTER_BIT,/* Receive filtering on VLAN CTAGs */
NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */
NETIF_F_GSO_BIT, /* Enable software GSO. */
- NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */
- /* do not use LLTX in new drivers */
- NETIF_F_NETNS_LOCAL_BIT, /* Does not change network namespaces */
+ __UNUSED_NETIF_F_12,
+ __UNUSED_NETIF_F_13,
NETIF_F_GRO_BIT, /* Generic receive offload */
NETIF_F_LRO_BIT, /* large receive offload */
@@ -59,7 +58,7 @@ enum {
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
- NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/
+ __UNUSED_NETIF_F_37,
NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */
NETIF_F_RXHASH_BIT, /* Receive hashing offload */
NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */
@@ -106,7 +105,6 @@ enum {
#define __NETIF_F(name) __NETIF_F_BIT(NETIF_F_##name##_BIT)
#define NETIF_F_FCOE_CRC __NETIF_F(FCOE_CRC)
-#define NETIF_F_FCOE_MTU __NETIF_F(FCOE_MTU)
#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST)
#define NETIF_F_FSO __NETIF_F(FSO)
#define NETIF_F_GRO __NETIF_F(GRO)
@@ -120,10 +118,8 @@ enum {
#define NETIF_F_HW_VLAN_CTAG_TX __NETIF_F(HW_VLAN_CTAG_TX)
#define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM)
#define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM)
-#define NETIF_F_LLTX __NETIF_F(LLTX)
#define NETIF_F_LOOPBACK __NETIF_F(LOOPBACK)
#define NETIF_F_LRO __NETIF_F(LRO)
-#define NETIF_F_NETNS_LOCAL __NETIF_F(NETNS_LOCAL)
#define NETIF_F_NOCACHE_COPY __NETIF_F(NOCACHE_COPY)
#define NETIF_F_NTUPLE __NETIF_F(NTUPLE)
#define NETIF_F_RXCSUM __NETIF_F(RXCSUM)
@@ -192,8 +188,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
-#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \
- NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
+#define NETIF_F_NEVER_CHANGE NETIF_F_VLAN_CHALLENGED
/* remember that ((t)1 << t_BITS) is undefined in C99 */
#define NETIF_F_ETHTOOL_BITS ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \
@@ -214,9 +209,6 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | \
NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID)
-#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
- NETIF_F_FSO)
-
/* List of features with software fallbacks. */
#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_GSO_SCTP | \
NETIF_F_GSO_UDP_L4 | NETIF_F_GSO_FRAGLIST)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 607009150b5f..e87b5e488325 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -40,7 +40,6 @@
#include <net/dcbnl.h>
#endif
#include <net/netprio_cgroup.h>
-
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
#include <linux/netdevice_xmit.h>
@@ -81,6 +80,7 @@ struct xdp_frame;
struct xdp_metadata_ops;
struct xdp_md;
struct ethtool_netdev_state;
+struct phy_link_topology;
typedef u32 xdp_features_t;
@@ -356,7 +356,7 @@ struct napi_struct {
unsigned long state;
int weight;
- int defer_hard_irqs_count;
+ u32 defer_hard_irqs_count;
unsigned long gro_bitmask;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
@@ -645,9 +645,6 @@ struct netdev_queue {
#ifdef CONFIG_SYSFS
struct kobject kobj;
#endif
-#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
- int numa_node;
-#endif
unsigned long tx_maxrate;
/*
* Number of TX timeouts for this queue
@@ -660,13 +657,13 @@ struct netdev_queue {
#ifdef CONFIG_XDP_SOCKETS
struct xsk_buff_pool *pool;
#endif
- /* NAPI instance for the queue
- * Readers and writers must hold RTNL
- */
- struct napi_struct *napi;
+
/*
* write-mostly part
*/
+#ifdef CONFIG_BQL
+ struct dql dql;
+#endif
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
int xmit_lock_owner;
/*
@@ -676,8 +673,16 @@ struct netdev_queue {
unsigned long state;
-#ifdef CONFIG_BQL
- struct dql dql;
+/*
+ * slow- / control-path part
+ */
+ /* NAPI instance for the queue
+ * Readers and writers must hold RTNL
+ */
+ struct napi_struct *napi;
+
+#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
+ int numa_node;
#endif
} ____cacheline_aligned_in_smp;
@@ -1232,7 +1237,7 @@ struct netdev_net_notifier {
* int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
* const unsigned char *addr, u16 vid)
- * Deletes the FDB entry from dev coresponding to addr.
+ * Deletes the FDB entry from dev corresponding to addr.
* int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev,
* struct netlink_ext_ack *extack);
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
@@ -1608,7 +1613,8 @@ struct net_device_ops {
* userspace; this means that the order of these flags can change
* during any kernel release.
*
- * You should have a pretty good reason to be extending these flags.
+ * You should add bitfield booleans after either net_device::priv_flags
+ * (hotpath) or ::threaded (slowpath) instead of extending these flags.
*
* @IFF_802_1Q_VLAN: 802.1Q VLAN device
* @IFF_EBRIDGE: Ethernet bridging device
@@ -1647,10 +1653,6 @@ struct net_device_ops {
* @IFF_NO_ADDRCONF: prevent ipv6 addrconf
* @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
* skb_headlen(skb) == 0 (data starts from frag0)
- * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN
- * @IFF_SEE_ALL_HWTSTAMP_REQUESTS: device wants to see calls to
- * ndo_hwtstamp_set() for all timestamp requests regardless of source,
- * even if those aren't HWTSTAMP_SOURCE_NETDEV.
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1685,42 +1687,8 @@ enum netdev_priv_flags {
IFF_L3MDEV_RX_HANDLER = 1<<29,
IFF_NO_ADDRCONF = BIT_ULL(30),
IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
- IFF_CHANGE_PROTO_DOWN = BIT_ULL(32),
- IFF_SEE_ALL_HWTSTAMP_REQUESTS = BIT_ULL(33),
};
-#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
-#define IFF_EBRIDGE IFF_EBRIDGE
-#define IFF_BONDING IFF_BONDING
-#define IFF_ISATAP IFF_ISATAP
-#define IFF_WAN_HDLC IFF_WAN_HDLC
-#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
-#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
-#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
-#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
-#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
-#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
-#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
-#define IFF_UNICAST_FLT IFF_UNICAST_FLT
-#define IFF_TEAM_PORT IFF_TEAM_PORT
-#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
-#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
-#define IFF_MACVLAN IFF_MACVLAN
-#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
-#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
-#define IFF_NO_QUEUE IFF_NO_QUEUE
-#define IFF_OPENVSWITCH IFF_OPENVSWITCH
-#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
-#define IFF_TEAM IFF_TEAM
-#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
-#define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM
-#define IFF_MACSEC IFF_MACSEC
-#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
-#define IFF_FAILOVER IFF_FAILOVER
-#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
-#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
-#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR
-
/* Specifies the type of the struct net_device::ml_priv pointer */
enum netdev_ml_priv_type {
ML_PRIV_NONE,
@@ -1750,6 +1718,12 @@ enum netdev_reg_state {
* data with strictly "high-level" data, and it has to know about
* almost every data structure used in the INET module.
*
+ * @priv_flags: flags invisible to userspace defined as bits, see
+ * enum netdev_priv_flags for the definitions
+ * @lltx: device supports lockless Tx. Deprecated for real HW
+ * drivers. Mainly used by logical interfaces, such as
+ * bonding and tunnels
+ *
* @name: This is the first field of the "visible" part of this structure
* (i.e. as seen by users in the "Space.c" file). It is the name
* of the interface.
@@ -1816,8 +1790,6 @@ enum netdev_reg_state {
*
* @flags: Interface flags (a la BSD)
* @xdp_features: XDP capability supported by the device
- * @priv_flags: Like 'flags' but invisible to userspace,
- * see if.h for the definitions
* @gflags: Global flags ( kept as legacy )
* @priv_len: Size of the ->priv flexible array
* @priv: Flexible array containing private data
@@ -1978,6 +1950,7 @@ enum netdev_reg_state {
* @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
*
* @priomap: XXX: need comments on this one
+ * @link_topo: Physical link topology tracking attached PHYs
* @phydev: Physical device may attach itself
* for hardware timestamping
* @sfp_bus: attached &struct sfp_bus structure.
@@ -1990,6 +1963,14 @@ enum netdev_reg_state {
*
* @threaded: napi threaded mode is enabled
*
+ * @see_all_hwtstamp_requests: device wants to see calls to
+ * ndo_hwtstamp_set() for all timestamp requests
+ * regardless of source, even if those aren't
+ * HWTSTAMP_SOURCE_NETDEV
+ * @change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN
+ * @netns_local: interface can't change network namespaces
+ * @fcoe_mtu: device supports maximum FCoE MTU, 2158 bytes
+ *
* @net_notifier_list: List of per-net netdev notifier block
* that follow this device when it is moved
* to another network namespace.
@@ -2040,7 +2021,10 @@ struct net_device {
/* TX read-mostly hotpath */
__cacheline_group_begin(net_device_read_tx);
- unsigned long long priv_flags;
+ struct_group(priv_flags_fast,
+ unsigned long priv_flags:32;
+ unsigned long lltx:1;
+ );
const struct net_device_ops *netdev_ops;
const struct header_ops *header_ops;
struct netdev_queue *_tx;
@@ -2091,7 +2075,7 @@ struct net_device {
unsigned int real_num_rx_queues;
struct netdev_rx_queue *_rx;
unsigned long gro_flush_timeout;
- int napi_defer_hard_irqs;
+ u32 napi_defer_hard_irqs;
unsigned int gro_max_size;
unsigned int gro_ipv4_max_size;
rx_handler_func_t __rcu *rx_handler;
@@ -2369,12 +2353,19 @@ struct net_device {
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
struct netprio_map __rcu *priomap;
#endif
+ struct phy_link_topology *link_topo;
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
bool proto_down;
bool threaded;
+ /* priv_flags_slow, ungrouped to save space */
+ unsigned long see_all_hwtstamp_requests:1;
+ unsigned long change_proto_down:1;
+ unsigned long netns_local:1;
+ unsigned long fcoe_mtu:1;
+
struct list_head net_notifier_list;
#if IS_ENABLED(CONFIG_MACSEC)
@@ -3094,8 +3085,6 @@ void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
-u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev);
int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
@@ -3539,7 +3528,7 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
dql_completed(&dev_queue->dql, bytes);
/*
- * Without the memory barrier there is a small possiblity that
+ * Without the memory barrier there is a small possibility that
* netdev_tx_sent_queue will miss the update and cause the queue to
* be stopped forever
*/
@@ -3578,6 +3567,17 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
}
/**
+ * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue
+ * @dev: network device
+ * @qid: stack index of the queue to reset
+ */
+static inline void netdev_tx_reset_subqueue(const struct net_device *dev,
+ u32 qid)
+{
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid));
+}
+
+/**
* netdev_reset_queue - reset the packets and bytes count of a network device
* @dev_queue: network device
*
@@ -3586,7 +3586,7 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
*/
static inline void netdev_reset_queue(struct net_device *dev_queue)
{
- netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
+ netdev_tx_reset_subqueue(dev_queue, 0);
}
/**
@@ -3950,8 +3950,11 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
u8 dev_xdp_prog_count(struct net_device *dev);
+int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
+u32 dev_get_min_mp_channel_count(const struct net_device *dev);
+
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
@@ -4449,7 +4452,7 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
}
#define HARD_TX_LOCK(dev, txq, cpu) { \
- if ((dev->features & NETIF_F_LLTX) == 0) { \
+ if (!(dev)->lltx) { \
__netif_tx_lock(txq, cpu); \
} else { \
__netif_tx_acquire(txq); \
@@ -4457,12 +4460,12 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
}
#define HARD_TX_TRYLOCK(dev, txq) \
- (((dev->features & NETIF_F_LLTX) == 0) ? \
+ (!(dev)->lltx ? \
__netif_tx_trylock(txq) : \
__netif_tx_acquire(txq))
#define HARD_TX_UNLOCK(dev, txq) { \
- if ((dev->features & NETIF_F_LLTX) == 0) { \
+ if (!(dev)->lltx) { \
__netif_tx_unlock(txq); \
} else { \
__netif_tx_release(txq); \
@@ -4607,7 +4610,7 @@ void dev_uc_flush(struct net_device *dev);
void dev_uc_init(struct net_device *dev);
/**
- * __dev_uc_sync - Synchonize device's unicast list
+ * __dev_uc_sync - Synchronize device's unicast list
* @dev: device to sync
* @sync: function to call if address should be added
* @unsync: function to call if address should be removed
@@ -4651,7 +4654,7 @@ void dev_mc_flush(struct net_device *dev);
void dev_mc_init(struct net_device *dev);
/**
- * __dev_mc_sync - Synchonize device's multicast list
+ * __dev_mc_sync - Synchronize device's multicast list
* @dev: device to sync
* @sync: function to call if address should be added
* @unsync: function to call if address should be removed
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index c47443e7a97e..5eaceef41e6c 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -38,11 +38,8 @@ static inline void folio_start_private_2(struct folio *folio)
folio_set_private_2(folio);
}
-/* Marks used on xarray-based buffers */
-#define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */
-#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */
-
enum netfs_io_source {
+ NETFS_SOURCE_UNKNOWN,
NETFS_FILL_WITH_ZEROES,
NETFS_DOWNLOAD_FROM_SERVER,
NETFS_READ_FROM_CACHE,
@@ -73,6 +70,7 @@ struct netfs_inode {
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
+#define NETFS_ICTX_MODIFIED_ATTR 3 /* Indicate change in mtime/ctime */
};
/*
@@ -133,9 +131,11 @@ static inline struct netfs_group *netfs_folio_group(struct folio *folio)
struct netfs_io_stream {
/* Submission tracking */
struct netfs_io_subrequest *construct; /* Op being constructed */
+ size_t sreq_max_len; /* Maximum size of a subrequest */
+ unsigned int sreq_max_segs; /* 0 or max number of segments in an iterator */
unsigned int submit_off; /* Folio offset we're submitting from */
unsigned int submit_len; /* Amount of data left to submit */
- unsigned int submit_max_len; /* Amount I/O can be rounded up to */
+ unsigned int submit_extendable_to; /* Amount I/O can be rounded up to */
void (*prepare_write)(struct netfs_io_subrequest *subreq);
void (*issue_write)(struct netfs_io_subrequest *subreq);
/* Collection tracking */
@@ -176,41 +176,45 @@ struct netfs_io_subrequest {
struct list_head rreq_link; /* Link in rreq->subrequests */
struct iov_iter io_iter; /* Iterator for this subrequest */
unsigned long long start; /* Where to start the I/O */
- size_t max_len; /* Maximum size of the I/O */
size_t len; /* Size of the I/O */
size_t transferred; /* Amount of data transferred */
+ size_t consumed; /* Amount of read data consumed */
+ size_t prev_donated; /* Amount of data donated from previous subreq */
+ size_t next_donated; /* Amount of data donated from next subreq */
refcount_t ref;
short error; /* 0 or error that occurred */
unsigned short debug_index; /* Index in list (for debugging output) */
unsigned int nr_segs; /* Number of segs in io_iter */
- unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */
enum netfs_io_source source; /* Where to read from/write to */
unsigned char stream_nr; /* I/O stream this belongs to */
+ unsigned char curr_folioq_slot; /* Folio currently being read */
+ unsigned char curr_folio_order; /* Order of folio */
+ struct folio_queue *curr_folioq; /* Queue segment in which current folio resides */
unsigned long flags;
#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
-#define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */
#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
#define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */
+#define NETFS_SREQ_HIT_EOF 7 /* Set if short due to EOF */
#define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */
#define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */
#define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */
#define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */
-#define NETFS_SREQ_HIT_EOF 12 /* Set if we hit the EOF */
};
enum netfs_io_origin {
NETFS_READAHEAD, /* This read was triggered by readahead */
NETFS_READPAGE, /* This read is a synchronous read */
+ NETFS_READ_GAPS, /* This read is a synchronous read to fill gaps */
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
- NETFS_COPY_TO_CACHE, /* This write is to copy a read to the cache */
+ NETFS_DIO_READ, /* This is a direct I/O read */
NETFS_WRITEBACK, /* This write was triggered by writepages */
NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
- NETFS_DIO_READ, /* This is a direct I/O read */
NETFS_DIO_WRITE, /* This is a direct I/O write */
+ NETFS_PGPRIV2_COPY_TO_CACHE, /* [DEPRECATED] This is writing read data to the cache */
nr__netfs_io_origin
} __mode(byte);
@@ -227,11 +231,14 @@ struct netfs_io_request {
struct address_space *mapping; /* The mapping being accessed */
struct kiocb *iocb; /* AIO completion vector */
struct netfs_cache_resources cache_resources;
+ struct readahead_control *ractl; /* Readahead descriptor */
struct list_head proc_link; /* Link in netfs_iorequests */
struct list_head subrequests; /* Contributory I/O operations */
struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */
#define NR_IO_STREAMS 2 //wreq->nr_io_streams
struct netfs_group *group; /* Writeback group being written back */
+ struct folio_queue *buffer; /* Head of I/O buffer */
+ struct folio_queue *buffer_tail; /* Tail of I/O buffer */
struct iov_iter iter; /* Unencrypted-side iterator */
struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
void *netfs_priv; /* Private data for the netfs */
@@ -245,24 +252,23 @@ struct netfs_io_request {
unsigned int nr_group_rel; /* Number of refs to release on ->group */
spinlock_t lock; /* Lock for queuing subreqs */
atomic_t nr_outstanding; /* Number of ops in progress */
- atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */
- size_t upper_len; /* Length can be extended to here */
unsigned long long submitted; /* Amount submitted for I/O so far */
unsigned long long len; /* Length of the request */
size_t transferred; /* Amount to be indicated as transferred */
- short error; /* 0 or error that occurred */
+ long error; /* 0 or error that occurred */
enum netfs_io_origin origin; /* Origin of the request */
bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
+ u8 buffer_head_slot; /* First slot in ->buffer */
+ u8 buffer_tail_slot; /* Next slot in ->buffer_tail */
unsigned long long i_size; /* Size of the file */
unsigned long long start; /* Start position */
atomic64_t issued_to; /* Write issuer folio cursor */
- unsigned long long contiguity; /* Tracking for gaps in the writeback sequence */
unsigned long long collected_to; /* Point we've collected to */
unsigned long long cleaned_to; /* Position we've cleaned folios to */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
+ size_t prev_donated; /* Fallback for subreq->prev_donated */
refcount_t ref;
unsigned long flags;
-#define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */
#define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */
#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
@@ -274,6 +280,7 @@ struct netfs_io_request {
#define NETFS_RREQ_PAUSE 11 /* Pause subrequest generation */
#define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */
#define NETFS_RREQ_ALL_QUEUED 13 /* All subreqs are now queued */
+#define NETFS_RREQ_NEED_RETRY 14 /* Need to try retrying */
#define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
* write to cache on read */
const struct netfs_request_ops *netfs_ops;
@@ -292,7 +299,7 @@ struct netfs_request_ops {
/* Read request handling */
void (*expand_readahead)(struct netfs_io_request *rreq);
- bool (*clamp_length)(struct netfs_io_subrequest *subreq);
+ int (*prepare_read)(struct netfs_io_subrequest *subreq);
void (*issue_read)(struct netfs_io_subrequest *subreq);
bool (*is_still_valid)(struct netfs_io_request *rreq);
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
@@ -422,7 +429,10 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp);
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
/* (Sub)request management API. */
-void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
+void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq,
+ bool was_async);
+void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
+ int error, bool was_async);
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
enum netfs_sreq_ref_trace what);
void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index bd19c4b91e31..cd4e28db0cbd 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -64,6 +64,7 @@ int netpoll_setup(struct netpoll *np);
void __netpoll_cleanup(struct netpoll *np);
void __netpoll_free(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
+void do_netpoll_cleanup(struct netpoll *np);
netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
#ifdef CONFIG_NETPOLL
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index ceb70a926b95..9ad727ddfedb 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -8,11 +8,20 @@
#ifndef _LINUX_NFS_H
#define _LINUX_NFS_H
+#include <linux/cred.h>
+#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/string.h>
#include <linux/crc32.h>
#include <uapi/linux/nfs.h>
+/* The LOCALIO program is entirely private to Linux and is
+ * NOT part of the uapi.
+ */
+#define NFS_LOCALIO_PROGRAM 400122
+#define LOCALIOPROC_NULL 0
+#define LOCALIOPROC_UUID_IS_LOCAL 1
+
/*
* This is the kernel NFS client file handle representation
*/
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index f9df88091c6d..8d7430d9f218 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -281,15 +281,18 @@ enum nfsstat4 {
/* nfs42 */
NFS4ERR_PARTNER_NOTSUPP = 10088,
NFS4ERR_PARTNER_NO_AUTH = 10089,
- NFS4ERR_UNION_NOTSUPP = 10090,
- NFS4ERR_OFFLOAD_DENIED = 10091,
- NFS4ERR_WRONG_LFS = 10092,
- NFS4ERR_BADLABEL = 10093,
- NFS4ERR_OFFLOAD_NO_REQS = 10094,
+ NFS4ERR_UNION_NOTSUPP = 10090,
+ NFS4ERR_OFFLOAD_DENIED = 10091,
+ NFS4ERR_WRONG_LFS = 10092,
+ NFS4ERR_BADLABEL = 10093,
+ NFS4ERR_OFFLOAD_NO_REQS = 10094,
/* xattr (RFC8276) */
- NFS4ERR_NOXATTR = 10095,
- NFS4ERR_XATTR2BIG = 10096,
+ NFS4ERR_NOXATTR = 10095,
+ NFS4ERR_XATTR2BIG = 10096,
+
+ /* can be used for internal errors */
+ NFS4ERR_FIRST_FREE
};
/* error codes for internal client use */
diff --git a/include/linux/nfs_common.h b/include/linux/nfs_common.h
new file mode 100644
index 000000000000..5fc02df88252
--- /dev/null
+++ b/include/linux/nfs_common.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file contains constants and methods used by both NFS client and server.
+ */
+#ifndef _LINUX_NFS_COMMON_H
+#define _LINUX_NFS_COMMON_H
+
+#include <linux/errno.h>
+#include <uapi/linux/nfs.h>
+
+/* Mapping from NFS error code to "errno" error code. */
+#define errno_NFSERR_IO EIO
+
+int nfs_stat_to_errno(enum nfs_stat status);
+int nfs4_stat_to_errno(int stat);
+
+#endif /* _LINUX_NFS_COMMON_H */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 1df86ab98c77..853df3fcd4c2 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -8,6 +8,7 @@
#include <linux/wait.h>
#include <linux/nfs_xdr.h>
#include <linux/sunrpc/xprt.h>
+#include <linux/nfslocalio.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
@@ -49,6 +50,7 @@ struct nfs_client {
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
#define NFS_CS_PNFS 9 /* - Server used for pnfs */
+#define NFS_CS_LOCAL_IO 10 /* - client is local */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -125,6 +127,13 @@ struct nfs_client {
struct net *cl_net;
struct list_head pending_cb_stateids;
struct rcu_head rcu;
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ struct timespec64 cl_nfssvc_boot;
+ seqlock_t cl_boot_lock;
+ nfs_uuid_t cl_uuid;
+ spinlock_t cl_localio_lock;
+#endif /* CONFIG_NFS_LOCALIO */
};
/*
@@ -158,6 +167,7 @@ struct nfs_server {
#define NFS_MOUNT_WRITE_WAIT 0x02000000
#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
#define NFS_MOUNT_SHUTDOWN 0x08000000
+#define NFS_MOUNT_NO_ALIGNWRITE 0x10000000
unsigned int fattr_valid; /* Valid attributes */
unsigned int caps; /* server capabilities */
@@ -234,8 +244,7 @@ struct nfs_server {
/* the following fields are protected by nfs_client->cl_lock */
struct rb_root state_owners;
#endif
- struct ida openowner_id;
- struct ida lockowner_id;
+ atomic64_t owner_ctr;
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 45623af3e7b8..12d8e47bc5a3 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -446,7 +446,7 @@ struct nfs42_clone_res {
struct stateowner_id {
__u64 create_time;
- __u32 uniquifier;
+ __u64 uniquifier;
};
struct nfs4_open_delegation {
@@ -1854,6 +1854,24 @@ struct nfs_rpc_ops {
};
/*
+ * Helper functions used by NFS client and/or server
+ */
+static inline void encode_opaque_fixed(struct xdr_stream *xdr,
+ const void *buf, size_t len)
+{
+ WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
+}
+
+static inline int decode_opaque_fixed(struct xdr_stream *xdr,
+ void *buf, size_t len)
+{
+ ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len);
+ if (unlikely(ret < 0))
+ return -EIO;
+ return 0;
+}
+
+/*
* Function vectors etc. for the NFS client
*/
extern const struct nfs_rpc_ops nfs_v2_clientops;
@@ -1866,4 +1884,4 @@ extern const struct rpc_version nfs_version4;
extern const struct rpc_version nfsacl_version3;
extern const struct rpc_program nfsacl_program;
-#endif
+#endif /* _LINUX_NFS_XDR_H */
diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h
new file mode 100644
index 000000000000..b353abe00357
--- /dev/null
+++ b/include/linux/nfslocalio.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+#ifndef __LINUX_NFSLOCALIO_H
+#define __LINUX_NFSLOCALIO_H
+
+/* nfsd_file structure is purposely kept opaque to NFS client */
+struct nfsd_file;
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/uuid.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/nfs.h>
+#include <net/net_namespace.h>
+
+/*
+ * Useful to allow a client to negotiate if localio
+ * possible with its server.
+ *
+ * See Documentation/filesystems/nfs/localio.rst for more detail.
+ */
+typedef struct {
+ uuid_t uuid;
+ struct list_head list;
+ struct net __rcu *net; /* nfsd's network namespace */
+ struct auth_domain *dom; /* auth_domain for localio */
+} nfs_uuid_t;
+
+void nfs_uuid_begin(nfs_uuid_t *);
+void nfs_uuid_end(nfs_uuid_t *);
+void nfs_uuid_is_local(const uuid_t *, struct list_head *,
+ struct net *, struct auth_domain *, struct module *);
+void nfs_uuid_invalidate_clients(struct list_head *list);
+void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid);
+
+/* localio needs to map filehandle -> struct nfsd_file */
+extern struct nfsd_file *
+nfsd_open_local_fh(struct net *, struct auth_domain *, struct rpc_clnt *,
+ const struct cred *, const struct nfs_fh *,
+ const fmode_t) __must_hold(rcu);
+
+struct nfsd_localio_operations {
+ bool (*nfsd_serv_try_get)(struct net *);
+ void (*nfsd_serv_put)(struct net *);
+ struct nfsd_file *(*nfsd_open_local_fh)(struct net *,
+ struct auth_domain *,
+ struct rpc_clnt *,
+ const struct cred *,
+ const struct nfs_fh *,
+ const fmode_t);
+ void (*nfsd_file_put_local)(struct nfsd_file *);
+ struct file *(*nfsd_file_file)(struct nfsd_file *);
+} ____cacheline_aligned;
+
+extern void nfsd_localio_ops_init(void);
+extern const struct nfsd_localio_operations *nfs_to;
+
+struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *,
+ struct rpc_clnt *, const struct cred *,
+ const struct nfs_fh *, const fmode_t);
+
+#else /* CONFIG_NFS_LOCALIO */
+static inline void nfsd_localio_ops_init(void)
+{
+}
+#endif /* CONFIG_NFS_LOCALIO */
+
+#endif /* __LINUX_NFSLOCALIO_H */
diff --git a/include/linux/numa.h b/include/linux/numa.h
index eb19503604fe..3567e40329eb 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -30,6 +30,12 @@ static inline bool numa_valid_node(int nid)
#ifdef CONFIG_NUMA
#include <asm/sparsemem.h>
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid) (node_data[nid])
+
+void __init alloc_node_data(int nid);
+void __init alloc_offline_node_data(int nid);
+
/* Generic implementation available */
int numa_nearest_node(int node, unsigned int state);
@@ -57,6 +63,8 @@ static inline int phys_to_target_node(u64 start)
{
return 0;
}
+
+static inline void alloc_offline_node_data(int nid) {}
#endif
#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)
diff --git a/include/linux/numa_memblks.h b/include/linux/numa_memblks.h
new file mode 100644
index 000000000000..cfad6ce7e1bd
--- /dev/null
+++ b/include/linux/numa_memblks.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NUMA_MEMBLKS_H
+#define __NUMA_MEMBLKS_H
+
+#ifdef CONFIG_NUMA_MEMBLKS
+#include <linux/types.h>
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2)
+
+void __init numa_set_distance(int from, int to, int distance);
+void __init numa_reset_distance(void);
+
+struct numa_memblk {
+ u64 start;
+ u64 end;
+ int nid;
+};
+
+struct numa_meminfo {
+ int nr_blks;
+ struct numa_memblk blk[NR_NODE_MEMBLKS];
+};
+
+int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi);
+
+int __init numa_cleanup_meminfo(struct numa_meminfo *mi);
+
+int __init numa_memblks_init(int (*init_func)(void),
+ bool memblock_force_top_down);
+
+#ifdef CONFIG_NUMA_EMU
+int numa_emu_cmdline(char *str);
+void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
+ unsigned int nr_emu_nids);
+u64 __init numa_emu_dma_end(void);
+void __init numa_emulation(struct numa_meminfo *numa_meminfo,
+ int numa_dist_cnt);
+#else
+static inline void numa_emulation(struct numa_meminfo *numa_meminfo,
+ int numa_dist_cnt)
+{ }
+static inline int numa_emu_cmdline(char *str)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_NUMA_EMU */
+
+#ifdef CONFIG_NUMA_KEEP_MEMINFO
+extern int phys_to_target_node(u64 start);
+#define phys_to_target_node phys_to_target_node
+extern int memory_add_physaddr_to_nid(u64 start);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+#endif /* CONFIG_NUMA_KEEP_MEMINFO */
+
+#endif /* CONFIG_NUMA_MEMBLKS */
+
+#endif /* __NUMA_MEMBLKS_H */
diff --git a/include/linux/nvme-keyring.h b/include/linux/nvme-keyring.h
index e10333d78dbb..19d2b256180f 100644
--- a/include/linux/nvme-keyring.h
+++ b/include/linux/nvme-keyring.h
@@ -12,7 +12,7 @@ key_serial_t nvme_tls_psk_default(struct key *keyring,
const char *hostnqn, const char *subnqn);
key_serial_t nvme_keyring_id(void);
-
+struct key *nvme_tls_key_lookup(key_serial_t key_id);
#else
static inline key_serial_t nvme_tls_psk_default(struct key *keyring,
@@ -24,5 +24,9 @@ static inline key_serial_t nvme_keyring_id(void)
{
return 0;
}
+static inline struct key *nvme_tls_key_lookup(key_serial_t key_id)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
#endif /* !CONFIG_NVME_KEYRING */
#endif /* _NVME_KEYRING_H */
diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h
index eb2f04d636c8..97c5f00b9aa3 100644
--- a/include/linux/nvme-rdma.h
+++ b/include/linux/nvme-rdma.h
@@ -25,6 +25,7 @@ enum nvme_rdma_cm_status {
NVME_RDMA_CM_NO_RSC = 0x06,
NVME_RDMA_CM_INVALID_IRD = 0x07,
NVME_RDMA_CM_INVALID_ORD = 0x08,
+ NVME_RDMA_CM_INVALID_CNTLID = 0x09,
};
static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
@@ -46,6 +47,8 @@ static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
return "invalid IRD";
case NVME_RDMA_CM_INVALID_ORD:
return "Invalid ORD";
+ case NVME_RDMA_CM_INVALID_CNTLID:
+ return "invalid controller ID";
default:
return "unrecognized reason";
}
@@ -64,7 +67,8 @@ struct nvme_rdma_cm_req {
__le16 qid;
__le16 hrqsize;
__le16 hsqsize;
- u8 rsvd[24];
+ __le16 cntlid;
+ u8 rsvd[22];
};
/**
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 7b2ae2e43544..b58d9405d65e 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -987,8 +987,8 @@ struct nvme_rw_command {
__le16 control;
__le32 dsmgmt;
__le32 reftag;
- __le16 apptag;
- __le16 appmask;
+ __le16 lbat;
+ __le16 lbatm;
};
enum {
@@ -1057,8 +1057,8 @@ struct nvme_write_zeroes_cmd {
__le16 control;
__le32 dsmgmt;
__le32 reftag;
- __le16 apptag;
- __le16 appmask;
+ __le16 lbat;
+ __le16 lbatm;
};
enum nvme_zone_mgmt_action {
diff --git a/include/linux/oa_tc6.h b/include/linux/oa_tc6.h
new file mode 100644
index 000000000000..15f58e3c56c7
--- /dev/null
+++ b/include/linux/oa_tc6.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework
+ *
+ * Link: https://opensig.org/download/document/OPEN_Alliance_10BASET1x_MAC-PHY_Serial_Interface_V1.1.pdf
+ *
+ * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/spi/spi.h>
+
+struct oa_tc6;
+
+struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev);
+void oa_tc6_exit(struct oa_tc6 *tc6);
+int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, u32 value);
+int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length);
+int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, u32 *value);
+int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length);
+netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb);
+int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6);
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 082841908fe7..c9e3843d2dd5 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -84,13 +84,3 @@ extern void gpmc_read_settings_dt(struct device_node *np,
struct gpmc_timings;
struct omap_nand_platform_data;
struct omap_onenand_platform_data;
-
-#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
-extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
-#else
-#define board_onenand_data NULL
-static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
-{
- return 0;
-}
-#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5769fe6e4950..1b3a76710487 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -66,8 +66,6 @@
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and
* file-backed pagecache (see mm/vmscan.c).
*
- * PG_error is set to indicate that an I/O error occurred on this page.
- *
* PG_arch_1 is an architecture specific page state bit. The generic code
* guarantees that this bit is cleared for a page when it first is entered into
* the page cache.
@@ -103,22 +101,18 @@ enum pageflags {
PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_active,
PG_workingset,
- PG_error,
- PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
+ PG_owner_priv_1, /* Owner use. If pagecache, fs may use */
+ PG_owner_2, /* Owner use. If pagecache, fs may use */
PG_arch_1,
PG_reserved,
PG_private, /* If pagecache, has fs-private data */
PG_private_2, /* If pagecache, has fs aux data */
- PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
- PG_uncached, /* Page has been mapped as uncached */
-#endif
#ifdef CONFIG_MEMORY_FAILURE
PG_hwpoison, /* hardware poisoned page. Don't touch */
#endif
@@ -126,14 +120,21 @@ enum pageflags {
PG_young,
PG_idle,
#endif
-#ifdef CONFIG_ARCH_USES_PG_ARCH_X
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
PG_arch_2,
+#endif
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
PG_arch_3,
#endif
__NR_PAGEFLAGS,
PG_readahead = PG_reclaim,
+ /* Anonymous memory (and shmem) */
+ PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
+ /* Some filesystems */
+ PG_checked = PG_owner_priv_1,
+
/*
* Depending on the way an anonymous folio can be mapped into a page
* table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
@@ -141,13 +142,13 @@ enum pageflags {
* tail pages of an anonymous folio. For now, we only expect it to be
* set on tail pages for PTE-mapped THP.
*/
- PG_anon_exclusive = PG_mappedtodisk,
-
- /* Filesystems */
- PG_checked = PG_owner_priv_1,
+ PG_anon_exclusive = PG_owner_2,
- /* SwapBacked */
- PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
+ /*
+ * Set if all buffer heads in the folio are mapped.
+ * Filesystems which do not use BHs can use it for their own purpose.
+ */
+ PG_mappedtodisk = PG_owner_2,
/* Two page bits are conscripted by FS-Cache to maintain local caching
* state. These bits are set on pages belonging to the netfs's inodes
@@ -183,8 +184,9 @@ enum pageflags {
*/
/* At least one page in this folio has the hwpoison flag set */
- PG_has_hwpoisoned = PG_error,
+ PG_has_hwpoisoned = PG_active,
PG_large_rmappable = PG_workingset, /* anon or file-backed */
+ PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */
};
#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
@@ -235,7 +237,7 @@ static __always_inline int page_is_fake_head(const struct page *page)
return page_fixed_fake_head(page) != page;
}
-static inline unsigned long _compound_head(const struct page *page)
+static __always_inline unsigned long _compound_head(const struct page *page)
{
unsigned long head = READ_ONCE(page->compound_head);
@@ -506,7 +508,6 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
-PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
__FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
@@ -514,8 +515,9 @@ PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
TESTCLEARFLAG(LRU, lru, PF_HEAD)
-PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
- TESTCLEARFLAG(Active, active, PF_HEAD)
+FOLIO_FLAG(active, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
PAGEFLAG(Workingset, workingset, PF_HEAD)
TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
@@ -531,9 +533,9 @@ PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
-PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
- __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
- __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
+FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(swapbacked, FOLIO_HEAD_PAGE)
+ __FOLIO_SET_FLAG(swapbacked, FOLIO_HEAD_PAGE)
/*
* Private page markings that may be used by the filesystem that owns the page
@@ -542,8 +544,9 @@ PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
*/
PAGEFLAG(Private, private, PF_ANY)
PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
-PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
- TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
+
+/* owner_2 can be set on tail pages for anon memory */
+FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE)
/*
* Only test-and-set exist for PG_writeback. The unconditional operators are
@@ -556,8 +559,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
-PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
- TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
+FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE)
#ifdef CONFIG_HIGHMEM
/*
@@ -577,34 +580,26 @@ static __always_inline bool folio_test_swapcache(const struct folio *folio)
test_bit(PG_swapcache, const_folio_flags(folio, 0));
}
-static __always_inline bool PageSwapCache(const struct page *page)
-{
- return folio_test_swapcache(page_folio(page));
-}
-
-SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
-CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
+FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE)
+FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE)
#else
-PAGEFLAG_FALSE(SwapCache, swapcache)
+FOLIO_FLAG_FALSE(swapcache)
#endif
-PAGEFLAG(Unevictable, unevictable, PF_HEAD)
- __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
- TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
+FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
#ifdef CONFIG_MMU
-PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
- __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
- TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
-#else
-PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
- TESTSCFLAG_FALSE(Mlocked, mlocked)
-#endif
-
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
-PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
+FOLIO_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_SET_FLAG(mlocked, FOLIO_HEAD_PAGE)
#else
-PAGEFLAG_FALSE(Uncached, uncached)
+FOLIO_FLAG_FALSE(mlocked)
+ __FOLIO_CLEAR_FLAG_NOOP(mlocked)
+ FOLIO_TEST_CLEAR_FLAG_FALSE(mlocked)
+ FOLIO_TEST_SET_FLAG_FALSE(mlocked)
#endif
#ifdef CONFIG_MEMORY_FAILURE
@@ -865,8 +860,18 @@ static inline void ClearPageCompound(struct page *page)
ClearPageHead(page);
}
FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
+FOLIO_TEST_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
+/*
+ * PG_partially_mapped is protected by deferred_split split_queue_lock,
+ * so its safe to use non-atomic set/clear.
+ */
+__FOLIO_SET_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
+__FOLIO_CLEAR_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
#else
FOLIO_FLAG_FALSE(large_rmappable)
+FOLIO_TEST_FLAG_FALSE(partially_mapped)
+__FOLIO_SET_FLAG_NOOP(partially_mapped)
+__FOLIO_CLEAR_FLAG_NOOP(partially_mapped)
#endif
#define PG_head_mask ((1UL << PG_head))
@@ -927,79 +932,74 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
#endif
/*
- * For pages that are never mapped to userspace,
- * page_type may be used. Because it is initialised to -1, we invert the
- * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
- * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
- * low bits so that an underflow or overflow of _mapcount won't be
- * mistaken for a page type value.
+ * For pages that do not use mapcount, page_type may be used.
+ * The low 24 bits of pagetype may be used for your own purposes, as long
+ * as you are careful to not affect the top 8 bits. The low bits of
+ * pagetype will be overwritten when you clear the page_type from the page.
*/
-
enum pagetype {
- PG_buddy = 0x40000000,
- PG_offline = 0x20000000,
- PG_table = 0x10000000,
- PG_guard = 0x08000000,
- PG_hugetlb = 0x04000000,
- PG_slab = 0x02000000,
- PG_zsmalloc = 0x01000000,
-
- PAGE_TYPE_BASE = 0x80000000,
-
- /*
- * Reserve 0xffff0000 - 0xfffffffe to catch _mapcount underflows and
- * allow owners that set a type to reuse the lower 16 bit for their own
- * purposes.
- */
- PAGE_MAPCOUNT_RESERVE = ~0x0000ffff,
+ /* 0x00-0x7f are positive numbers, ie mapcount */
+ /* Reserve 0x80-0xef for mapcount overflow. */
+ PGTY_buddy = 0xf0,
+ PGTY_offline = 0xf1,
+ PGTY_table = 0xf2,
+ PGTY_guard = 0xf3,
+ PGTY_hugetlb = 0xf4,
+ PGTY_slab = 0xf5,
+ PGTY_zsmalloc = 0xf6,
+ PGTY_unaccepted = 0xf7,
+
+ PGTY_mapcount_underflow = 0xff
};
-#define PageType(page, flag) \
- ((READ_ONCE(page->page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
-#define folio_test_type(folio, flag) \
- ((READ_ONCE(folio->page.page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+static inline bool page_type_has_type(int page_type)
+{
+ return page_type < (PGTY_mapcount_underflow << 24);
+}
-static inline int page_type_has_type(unsigned int page_type)
+/* This takes a mapcount which is one more than page->_mapcount */
+static inline bool page_mapcount_is_type(unsigned int mapcount)
{
- return (int)page_type < PAGE_MAPCOUNT_RESERVE;
+ return page_type_has_type(mapcount - 1);
}
-static inline int page_has_type(const struct page *page)
+static inline bool page_has_type(const struct page *page)
{
- return page_type_has_type(READ_ONCE(page->page_type));
+ return page_mapcount_is_type(data_race(page->page_type));
}
#define FOLIO_TYPE_OPS(lname, fname) \
-static __always_inline bool folio_test_##fname(const struct folio *folio)\
+static __always_inline bool folio_test_##fname(const struct folio *folio) \
{ \
- return folio_test_type(folio, PG_##lname); \
+ return data_race(folio->page.page_type >> 24) == PGTY_##lname; \
} \
static __always_inline void __folio_set_##fname(struct folio *folio) \
{ \
- VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
- folio->page.page_type &= ~PG_##lname; \
+ VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
+ folio); \
+ folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
} \
static __always_inline void __folio_clear_##fname(struct folio *folio) \
{ \
VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
- folio->page.page_type |= PG_##lname; \
+ folio->page.page_type = UINT_MAX; \
}
#define PAGE_TYPE_OPS(uname, lname, fname) \
FOLIO_TYPE_OPS(lname, fname) \
static __always_inline int Page##uname(const struct page *page) \
{ \
- return PageType(page, PG_##lname); \
+ return data_race(page->page_type >> 24) == PGTY_##lname; \
} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
- VM_BUG_ON_PAGE(!PageType(page, 0), page); \
- page->page_type &= ~PG_##lname; \
+ VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
+ page->page_type = (unsigned int)PGTY_##lname << 24; \
} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
- page->page_type |= PG_##lname; \
+ page->page_type = UINT_MAX; \
}
/*
@@ -1076,6 +1076,13 @@ FOLIO_TEST_FLAG_FALSE(hugetlb)
PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
+/*
+ * Mark pages that has to be accepted before touched for the first time.
+ *
+ * Serialized with zone lock.
+ */
+PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
+
/**
* PageHuge - Determine if the page belongs to hugetlbfs
* @page: The page to test.
@@ -1175,25 +1182,20 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
*/
#define PAGE_FLAGS_SECOND \
(0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
- 1UL << PG_large_rmappable)
+ 1UL << PG_large_rmappable | 1UL << PG_partially_mapped)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
/**
- * page_has_private - Determine if page has private stuff
- * @page: The page to be checked
+ * folio_has_private - Determine if folio has private stuff
+ * @folio: The folio to be checked
*
- * Determine if a page has private stuff, indicating that release routines
+ * Determine if a folio has private stuff, indicating that release routines
* should be invoked upon it.
*/
-static inline int page_has_private(const struct page *page)
-{
- return !!(page->flags & PAGE_FLAGS_PRIVATE);
-}
-
-static inline bool folio_has_private(const struct folio *folio)
+static inline int folio_has_private(const struct folio *folio)
{
- return page_has_private(&folio->page);
+ return !!(folio->flags & PAGE_FLAGS_PRIVATE);
}
#undef PF_ANY
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 904c52f97284..79dbd8bc35a7 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -26,11 +26,14 @@ struct page_counter {
atomic_long_t children_low_usage;
unsigned long watermark;
+ /* Latest cg2 reset watermark */
+ unsigned long local_watermark;
unsigned long failcnt;
/* Keep all the read most fields in a separete cacheline. */
CACHELINE_PADDING(_pad2_);
+ bool protection_support;
unsigned long min;
unsigned long low;
unsigned long high;
@@ -44,12 +47,17 @@ struct page_counter {
#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
#endif
+/*
+ * Protection is supported only for the first counter (with id 0).
+ */
static inline void page_counter_init(struct page_counter *counter,
- struct page_counter *parent)
+ struct page_counter *parent,
+ bool protection_support)
{
- atomic_long_set(&counter->usage, 0);
+ counter->usage = (atomic_long_t)ATOMIC_LONG_INIT(0);
counter->max = PAGE_COUNTER_MAX;
counter->parent = parent;
+ counter->protection_support = protection_support;
}
static inline unsigned long page_counter_read(struct page_counter *counter)
@@ -78,11 +86,24 @@ int page_counter_memparse(const char *buf, const char *max,
static inline void page_counter_reset_watermark(struct page_counter *counter)
{
- counter->watermark = page_counter_read(counter);
+ unsigned long usage = page_counter_read(counter);
+
+ /*
+ * Update local_watermark first, so it's always <= watermark
+ * (modulo CPU/compiler re-ordering)
+ */
+ counter->local_watermark = usage;
+ counter->watermark = usage;
}
+#ifdef CONFIG_MEMCG
void page_counter_calculate_protection(struct page_counter *root,
struct page_counter *counter,
bool recursive_protection);
+#else
+static inline void page_counter_calculate_protection(struct page_counter *root,
+ struct page_counter *counter,
+ bool recursive_protection) {}
+#endif
#endif /* _LINUX_PAGE_COUNTER_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index d9c7edb6422b..68a5f1ff3301 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -32,6 +32,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end);
int kiocb_invalidate_pages(struct kiocb *iocb, size_t count);
void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count);
+int filemap_invalidate_pages(struct address_space *mapping,
+ loff_t pos, loff_t end, bool nowait);
int write_inode_now(struct inode *, int sync);
int filemap_fdatawrite(struct address_space *);
@@ -204,14 +206,21 @@ enum mapping_flags {
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
- AS_LARGE_FOLIO_SUPPORT = 6,
- AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
- AS_STABLE_WRITES, /* must wait for writeback before modifying
+ AS_RELEASE_ALWAYS = 6, /* Call ->release_folio(), even if no private data */
+ AS_STABLE_WRITES = 7, /* must wait for writeback before modifying
folio contents */
- AS_INACCESSIBLE, /* Do not attempt direct R/W access to the mapping,
- including to move the mapping */
+ AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */
+ /* Bits 16-25 are used for FOLIO_ORDER */
+ AS_FOLIO_ORDER_BITS = 5,
+ AS_FOLIO_ORDER_MIN = 16,
+ AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS,
};
+#define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1)
+#define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN)
+#define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX)
+#define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK)
+
/**
* mapping_set_error - record a writeback error in the address_space
* @mapping: the mapping in which an error should be set
@@ -367,9 +376,64 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
+/*
+ * mapping_max_folio_size_supported() - Check the max folio size supported
+ *
+ * The filesystem should call this function at mount time if there is a
+ * requirement on the folio mapping size in the page cache.
+ */
+static inline size_t mapping_max_folio_size_supported(void)
+{
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER);
+ return PAGE_SIZE;
+}
+
+/*
+ * mapping_set_folio_order_range() - Set the orders supported by a file.
+ * @mapping: The address space of the file.
+ * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive).
+ * @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive).
+ *
+ * The filesystem should call this function in its inode constructor to
+ * indicate which base size (min) and maximum size (max) of folio the VFS
+ * can use to cache the contents of the file. This should only be used
+ * if the filesystem needs special handling of folio sizes (ie there is
+ * something the core cannot know).
+ * Do not tune it based on, eg, i_size.
+ *
+ * Context: This should not be called while the inode is active as it
+ * is non-atomic.
+ */
+static inline void mapping_set_folio_order_range(struct address_space *mapping,
+ unsigned int min,
+ unsigned int max)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return;
+
+ if (min > MAX_PAGECACHE_ORDER)
+ min = MAX_PAGECACHE_ORDER;
+
+ if (max > MAX_PAGECACHE_ORDER)
+ max = MAX_PAGECACHE_ORDER;
+
+ if (max < min)
+ max = min;
+
+ mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) |
+ (min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX);
+}
+
+static inline void mapping_set_folio_min_order(struct address_space *mapping,
+ unsigned int min)
+{
+ mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER);
+}
+
/**
* mapping_set_large_folios() - Indicate the file supports large folios.
- * @mapping: The file.
+ * @mapping: The address space of the file.
*
* The filesystem should call this function in its inode constructor to
* indicate that the VFS can use large folios to cache the contents of
@@ -380,7 +444,44 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
*/
static inline void mapping_set_large_folios(struct address_space *mapping)
{
- __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+ mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER);
+}
+
+static inline unsigned int
+mapping_max_folio_order(const struct address_space *mapping)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 0;
+ return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX;
+}
+
+static inline unsigned int
+mapping_min_folio_order(const struct address_space *mapping)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 0;
+ return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN;
+}
+
+static inline unsigned long
+mapping_min_folio_nrpages(struct address_space *mapping)
+{
+ return 1UL << mapping_min_folio_order(mapping);
+}
+
+/**
+ * mapping_align_index() - Align index for this mapping.
+ * @mapping: The address_space.
+ * @index: The page index.
+ *
+ * The index of a folio must be naturally aligned. If you are adding a
+ * new folio to the page cache and need to know what index to give it,
+ * call this function.
+ */
+static inline pgoff_t mapping_align_index(struct address_space *mapping,
+ pgoff_t index)
+{
+ return round_down(index, mapping_min_folio_nrpages(mapping));
}
/*
@@ -389,20 +490,17 @@ static inline void mapping_set_large_folios(struct address_space *mapping)
*/
static inline bool mapping_large_folio_support(struct address_space *mapping)
{
- /* AS_LARGE_FOLIO_SUPPORT is only reasonable for pagecache folios */
+ /* AS_FOLIO_ORDER is only reasonable for pagecache folios */
VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON,
"Anonymous mapping always supports large folio");
- return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
- test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+ return mapping_max_folio_order(mapping) > 0;
}
/* Return the maximum folio size for this pagecache mapping, in bytes. */
-static inline size_t mapping_max_folio_size(struct address_space *mapping)
+static inline size_t mapping_max_folio_size(const struct address_space *mapping)
{
- if (mapping_large_folio_support(mapping))
- return PAGE_SIZE << MAX_PAGECACHE_ORDER;
- return PAGE_SIZE;
+ return PAGE_SIZE << mapping_max_folio_order(mapping);
}
static inline int filemap_nr_thps(struct address_space *mapping)
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index 27cd1e59ccf7..f5eb5a32aeed 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -130,4 +130,62 @@ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
pgoff_t nr, const struct mm_walk_ops *ops,
void *private);
+typedef int __bitwise folio_walk_flags_t;
+
+/*
+ * Walk migration entries as well. Careful: a large folio might get split
+ * concurrently.
+ */
+#define FW_MIGRATION ((__force folio_walk_flags_t)BIT(0))
+
+/* Walk shared zeropages (small + huge) as well. */
+#define FW_ZEROPAGE ((__force folio_walk_flags_t)BIT(1))
+
+enum folio_walk_level {
+ FW_LEVEL_PTE,
+ FW_LEVEL_PMD,
+ FW_LEVEL_PUD,
+};
+
+/**
+ * struct folio_walk - folio_walk_start() / folio_walk_end() data
+ * @page: exact folio page referenced (if applicable)
+ * @level: page table level identifying the entry type
+ * @pte: pointer to the page table entry (FW_LEVEL_PTE).
+ * @pmd: pointer to the page table entry (FW_LEVEL_PMD).
+ * @pud: pointer to the page table entry (FW_LEVEL_PUD).
+ * @ptl: pointer to the page table lock.
+ *
+ * (see folio_walk_start() documentation for more details)
+ */
+struct folio_walk {
+ /* public */
+ struct page *page;
+ enum folio_walk_level level;
+ union {
+ pte_t *ptep;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ };
+ union {
+ pte_t pte;
+ pud_t pud;
+ pmd_t pmd;
+ };
+ /* private */
+ struct vm_area_struct *vma;
+ spinlock_t *ptl;
+};
+
+struct folio *folio_walk_start(struct folio_walk *fw,
+ struct vm_area_struct *vma, unsigned long addr,
+ folio_walk_flags_t flags);
+
+#define folio_walk_end(__fw, __vma) do { \
+ spin_unlock((__fw)->ptl); \
+ if (likely((__fw)->level == FW_LEVEL_PTE)) \
+ pte_unmap((__fw)->ptep); \
+ vma_pgtable_walk_end(__vma); \
+} while (0)
+
#endif /* _LINUX_PAGEWALK_H */
diff --git a/include/linux/path.h b/include/linux/path.h
index ca073e70decd..7ea389dc764b 100644
--- a/include/linux/path.h
+++ b/include/linux/path.h
@@ -18,12 +18,6 @@ static inline int path_equal(const struct path *path1, const struct path *path2)
return path1->mnt == path2->mnt && path1->dentry == path2->dentry;
}
-static inline void path_put_init(struct path *path)
-{
- path_put(path);
- *path = (struct path) { };
-}
-
/*
* Cleanup macro for use with __free(path_put). Avoids dereference and
* copying @path unlike DEFINE_FREE(). path_put() will handle the empty
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index df54cd5b15db..0e8b74e63767 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -8,6 +8,7 @@
/* Address Translation Service */
bool pci_ats_supported(struct pci_dev *dev);
int pci_enable_ats(struct pci_dev *dev, int ps);
+int pci_prepare_ats(struct pci_dev *dev, int ps);
void pci_disable_ats(struct pci_dev *dev);
int pci_ats_queue_depth(struct pci_dev *dev);
int pci_ats_page_aligned(struct pci_dev *dev);
@@ -16,6 +17,8 @@ static inline bool pci_ats_supported(struct pci_dev *d)
{ return false; }
static inline int pci_enable_ats(struct pci_dev *d, int ps)
{ return -ENODEV; }
+static inline int pci_prepare_ats(struct pci_dev *dev, int ps)
+{ return -ENODEV; }
static inline void pci_disable_ats(struct pci_dev *d) { }
static inline int pci_ats_queue_depth(struct pci_dev *d)
{ return -ENODEV; }
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 85bdf2adb760..42ef06136bd1 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -128,6 +128,7 @@ struct pci_epc_mem {
* @group: configfs group representing the PCI EPC device
* @lock: mutex to protect pci_epc ops
* @function_num_map: bitmap to manage physical function number
+ * @domain_nr: PCI domain number of the endpoint controller
* @init_complete: flag to indicate whether the EPC initialization is complete
* or not
*/
@@ -145,10 +146,12 @@ struct pci_epc {
/* mutex to protect against concurrent access of EP controller */
struct mutex lock;
unsigned long function_num_map;
+ int domain_nr;
bool init_complete;
};
/**
+ * enum pci_epc_bar_type - configurability of endpoint BAR
* @BAR_PROGRAMMABLE: The BAR mask can be configured by the EPC.
* @BAR_FIXED: The BAR mask is fixed by the hardware.
* @BAR_RESERVED: The BAR should not be touched by an EPF driver.
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4cf89a4b4cbc..573b4c4c2be6 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -371,6 +371,7 @@ struct pci_dev {
can be generated */
unsigned int pme_poll:1; /* Poll device's PME status bit */
unsigned int pinned:1; /* Whether this dev is pinned */
+ unsigned int config_rrs_sv:1; /* Config RRS software visibility */
unsigned int imm_ready:1; /* Supports Immediate Readiness */
unsigned int d1_support:1; /* Low power state D1 is supported */
unsigned int d2_support:1; /* Low power state D2 is supported */
@@ -517,6 +518,9 @@ struct pci_dev {
#ifdef CONFIG_PCI_DOE
struct xarray doe_mbs; /* Data Object Exchange mailboxes */
#endif
+#ifdef CONFIG_PCI_NPEM
+ struct npem *npem; /* Native PCIe Enclosure Management */
+#endif
u16 acs_cap; /* ACS Capability offset */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
@@ -1098,7 +1102,7 @@ enum pcie_bus_config_types {
extern enum pcie_bus_config_types pcie_bus_config;
-extern struct bus_type pci_bus_type;
+extern const struct bus_type pci_bus_type;
/* Do NOT directly access these two variables, unless you are arch-specific PCI
* code, or PCI core code. */
@@ -1884,7 +1888,7 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
{ return 0; }
#endif
int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
-void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent);
+void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
#endif
/* Some architectures require additional setup to direct VGA traffic */
@@ -2290,8 +2294,11 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass,
#endif
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
+void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
+ const char *name);
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
+int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
const char *name);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index e388c8b1cbc2..4cf6aaed5f35 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -580,6 +580,7 @@
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
+#define PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3 0x124b
#define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3 0x12bb
#define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
#define PCI_DEVICE_ID_AMD_MI300_DF_F3 0x152b
@@ -2661,6 +2662,8 @@
#define PCI_DEVICE_ID_DCI_PCCOM8 0x0002
#define PCI_DEVICE_ID_DCI_PCCOM2 0x0004
+#define PCI_VENDOR_ID_GLENFLY 0x6766
+
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
#define PCI_DEVICE_ID_INTEL_HDA_CML_LP 0x02c8
@@ -2706,6 +2709,9 @@
#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
#define PCI_DEVICE_ID_INTEL_SST_TNG 0x119a
+#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb
+#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
+#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
#define PCI_DEVICE_ID_INTEL_82437 0x122d
#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 36b942b67b7d..c012df33a9f0 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -145,7 +145,7 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
- bool read, unsigned long ip)
+ unsigned long ip)
{
lock_release(&sem->dep_map, ip);
}
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 4b2047b78b67..b6321fc49159 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -135,7 +135,6 @@ extern void __init setup_per_cpu_areas(void);
extern void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
gfp_t gfp) __alloc_size(1);
-extern size_t pcpu_alloc_size(void __percpu *__pdata);
#define __alloc_percpu_gfp(_size, _align, _gfp) \
alloc_hooks(pcpu_alloc_noprof(_size, _align, false, _gfp))
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index b3b34f6670cf..4b5b83677e3f 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -17,10 +17,14 @@
#ifdef CONFIG_ARM_PMU
/*
- * The ARMv7 CPU PMU supports up to 32 event counters.
+ * The Armv7 and Armv8.8 or less CPU PMU supports up to 32 event counters.
+ * The Armv8.9/9.4 CPU PMU supports up to 33 event counters.
*/
+#ifdef CONFIG_ARM
#define ARMPMU_MAX_HWEVENTS 32
-
+#else
+#define ARMPMU_MAX_HWEVENTS 33
+#endif
/*
* ARM PMU hw_event flags
*/
@@ -96,7 +100,7 @@ struct arm_pmu {
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
- int num_events;
+ DECLARE_BITMAP(cntr_mask, ARMPMU_MAX_HWEVENTS);
bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
index 7867db04ec98..3372c1b56486 100644
--- a/include/linux/perf/arm_pmuv3.h
+++ b/include/linux/perf/arm_pmuv3.h
@@ -6,8 +6,9 @@
#ifndef __PERF_ARM_PMUV3_H
#define __PERF_ARM_PMUV3_H
-#define ARMV8_PMU_MAX_COUNTERS 32
-#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
+#define ARMV8_PMU_MAX_GENERAL_COUNTERS 31
+#define ARMV8_PMU_CYCLE_IDX 31
+#define ARMV8_PMU_INSTR_IDX 32 /* Not accessible from AArch32 */
/*
* Common architectural and microarchitectural event numbers.
@@ -227,8 +228,10 @@
*/
#define ARMV8_PMU_OVSR_P GENMASK(30, 0)
#define ARMV8_PMU_OVSR_C BIT(31)
+#define ARMV8_PMU_OVSR_F BIT_ULL(32) /* arm64 only */
/* Mask for writable bits is both P and C fields */
-#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C)
+#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C | \
+ ARMV8_PMU_OVSR_F)
/*
* PMXEVTYPER: Event selection reg
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1a8942277dda..fb908843f209 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -168,6 +168,9 @@ struct hw_perf_event {
struct hw_perf_event_extra extra_reg;
struct hw_perf_event_extra branch_reg;
};
+ struct { /* aux / Intel-PT */
+ u64 aux_config;
+ };
struct { /* software */
struct hrtimer hrtimer;
};
@@ -292,6 +295,19 @@ struct perf_event_pmu_context;
#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
+/**
+ * pmu::scope
+ */
+enum perf_pmu_scope {
+ PERF_PMU_SCOPE_NONE = 0,
+ PERF_PMU_SCOPE_CORE,
+ PERF_PMU_SCOPE_DIE,
+ PERF_PMU_SCOPE_CLUSTER,
+ PERF_PMU_SCOPE_PKG,
+ PERF_PMU_SCOPE_SYS_WIDE,
+ PERF_PMU_MAX_SCOPE,
+};
+
struct perf_output_handle;
#define PMU_NULL_DEV ((void *)(~0UL))
@@ -315,6 +331,11 @@ struct pmu {
*/
int capabilities;
+ /*
+ * PMU scope
+ */
+ unsigned int scope;
+
int __percpu *pmu_disable_count;
struct perf_cpu_pmu_context __percpu *cpu_pmu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
@@ -615,10 +636,13 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *,
* PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
* cannot be a group leader. If an event with this flag is detached from the
* group it is scheduled out and moved into an unrecoverable ERROR state.
+ * PERF_EV_CAP_READ_SCOPE: A CPU event that can be read from any CPU of the
+ * PMU scope where it is active.
*/
#define PERF_EV_CAP_SOFTWARE BIT(0)
#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
#define PERF_EV_CAP_SIBLING BIT(2)
+#define PERF_EV_CAP_READ_SCOPE BIT(3)
#define SWEVENT_HLIST_BITS 8
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
@@ -963,12 +987,16 @@ struct perf_event_context {
struct rcu_head rcu_head;
/*
- * Sum (event->pending_work + event->pending_work)
+ * The count of events for which using the switch-out fast path
+ * should be avoided.
+ *
+ * Sum (event->pending_work + events with
+ * (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)))
*
* The SIGTRAP is targeted at ctx->task, as such it won't do changing
* that until the signal is delivered.
*/
- local_t nr_pending;
+ local_t nr_no_switch_fast;
};
struct perf_cpu_pmu_context {
@@ -1602,13 +1630,7 @@ static inline int perf_is_paranoid(void)
return sysctl_perf_event_paranoid > -1;
}
-static inline int perf_allow_kernel(struct perf_event_attr *attr)
-{
- if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
- return -EACCES;
-
- return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
-}
+int perf_allow_kernel(struct perf_event_attr *attr);
static inline int perf_allow_cpu(struct perf_event_attr *attr)
{
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index 207f0c83c8e9..59a3deb792a8 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -80,36 +80,6 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
}
}
-static inline void pgalloc_tag_split(struct page *page, unsigned int nr)
-{
- int i;
- struct page_ext *first_page_ext;
- struct page_ext *page_ext;
- union codetag_ref *ref;
- struct alloc_tag *tag;
-
- if (!mem_alloc_profiling_enabled())
- return;
-
- first_page_ext = page_ext = page_ext_get(page);
- if (unlikely(!page_ext))
- return;
-
- ref = codetag_ref_from_page_ext(page_ext);
- if (!ref->ct)
- goto out;
-
- tag = ct_to_alloc_tag(ref->ct);
- page_ext = page_ext_next(page_ext);
- for (i = 1; i < nr; i++) {
- /* Set new reference to point to the original tag */
- alloc_tag_ref_set(codetag_ref_from_page_ext(page_ext), tag);
- page_ext = page_ext_next(page_ext);
- }
-out:
- page_ext_put(first_page_ext);
-}
-
static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
{
struct alloc_tag *tag = NULL;
@@ -142,7 +112,6 @@ static inline void clear_page_tag_ref(struct page *page) {}
static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
unsigned int nr) {}
static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
-static inline void pgalloc_tag_split(struct page *page, unsigned int nr) {}
static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; }
static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 2a6a3cccfc36..e8b2ac6bd2ae 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -447,6 +447,12 @@ static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
}
#endif
+#ifndef arch_check_zapped_pud
+static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
+{
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
@@ -1950,6 +1956,18 @@ typedef unsigned int pgtbl_mod_mask;
#define MAX_PTRS_PER_P4D PTRS_PER_P4D
#endif
+#ifndef pte_pgprot
+#define pte_pgprot(x) ((pgprot_t) {0})
+#endif
+
+#ifndef pmd_pgprot
+#define pmd_pgprot(x) ((pgprot_t) {0})
+#endif
+
+#ifndef pud_pgprot
+#define pud_pgprot(x) ((pgprot_t) {0})
+#endif
+
/* description of effects of mapping type and prot in current implementation.
* this is due to the limited x86 page protection hardware. The expected
* behavior is in parens:
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6b7d40d49129..a98bc91a0cde 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -554,6 +554,9 @@ struct macsec_ops;
* @drv: Pointer to the driver for this PHY instance
* @devlink: Create a link between phy dev and mac dev, if the external phy
* used by current mac interface is managed by another mac interface.
+ * @phyindex: Unique id across the phy's parent tree of phys to address the PHY
+ * from userspace, similar to ifindex. A zero index means the PHY
+ * wasn't assigned an id yet.
* @phy_id: UID for this device found during discovery
* @c45_ids: 802.3-c45 Device Identifiers if is_c45.
* @is_c45: Set to true if this PHY uses clause 45 addressing.
@@ -656,6 +659,7 @@ struct phy_device {
struct device_link *devlink;
+ u32 phyindex;
u32 phy_id;
struct phy_c45_device_ids c45_ids;
@@ -1777,6 +1781,8 @@ int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
int __phy_resume(struct phy_device *phydev);
int phy_loopback(struct phy_device *phydev, bool enable);
+int phy_sfp_connect_phy(void *upstream, struct phy_device *phy);
+void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy);
void phy_sfp_attach(void *upstream, struct sfp_bus *bus);
void phy_sfp_detach(void *upstream, struct sfp_bus *bus);
int phy_sfp_probe(struct phy_device *phydev,
diff --git a/include/linux/phy_link_topology.h b/include/linux/phy_link_topology.h
new file mode 100644
index 000000000000..68a59e25821c
--- /dev/null
+++ b/include/linux/phy_link_topology.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PHY device list allow maintaining a list of PHY devices that are
+ * part of a netdevice's link topology. PHYs can for example be chained,
+ * as is the case when using a PHY that exposes an SFP module, on which an
+ * SFP transceiver that embeds a PHY is connected.
+ *
+ * This list can then be used by userspace to leverage individual PHY
+ * capabilities.
+ */
+#ifndef __PHY_LINK_TOPOLOGY_H
+#define __PHY_LINK_TOPOLOGY_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+struct xarray;
+struct phy_device;
+struct sfp_bus;
+
+struct phy_link_topology {
+ struct xarray phys;
+ u32 next_phy_index;
+};
+
+struct phy_device_node {
+ enum phy_upstream upstream_type;
+
+ union {
+ struct net_device *netdev;
+ struct phy_device *phydev;
+ } upstream;
+
+ struct sfp_bus *parent_sfp_bus;
+
+ struct phy_device *phy;
+};
+
+#if IS_ENABLED(CONFIG_PHYLIB)
+int phy_link_topo_add_phy(struct net_device *dev,
+ struct phy_device *phy,
+ enum phy_upstream upt, void *upstream);
+
+void phy_link_topo_del_phy(struct net_device *dev, struct phy_device *phy);
+
+static inline struct phy_device *
+phy_link_topo_get_phy(struct net_device *dev, u32 phyindex)
+{
+ struct phy_link_topology *topo = dev->link_topo;
+ struct phy_device_node *pdn;
+
+ if (!topo)
+ return NULL;
+
+ pdn = xa_load(&topo->phys, phyindex);
+ if (pdn)
+ return pdn->phy;
+
+ return NULL;
+}
+
+#else
+static inline int phy_link_topo_add_phy(struct net_device *dev,
+ struct phy_device *phy,
+ enum phy_upstream upt, void *upstream)
+{
+ return 0;
+}
+
+static inline void phy_link_topo_del_phy(struct net_device *dev,
+ struct phy_device *phy)
+{
+}
+
+static inline struct phy_device *
+phy_link_topo_get_phy(struct net_device *dev, u32 phyindex)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __PHY_LINK_TOPOLOGY_H */
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 2381e07429a2..5c01048860c4 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -598,6 +598,8 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
const struct fwnode_handle *fwnode,
u32 flags);
void phylink_disconnect_phy(struct phylink *);
+int phylink_set_fixed_link(struct phylink *,
+ const struct phylink_link_state *);
void phylink_mac_change(struct phylink *, bool up);
void phylink_pcs_change(struct phylink_pcs *, bool up);
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index a65d3d078e58..53cfde98433d 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -81,6 +81,8 @@ struct pinctrl_map;
* @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
* If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
* schmitt-trigger mode is disabled.
+ * @PIN_CONFIG_INPUT_SCHMITT_UV: this will configure an input pin to run in
+ * schmitt-trigger mode. The argument is in uV.
* @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power
* operation, if several modes of operation are supported these can be
* passed in the argument on a custom form, else just use argument 1
@@ -132,6 +134,7 @@ enum pin_config_param {
PIN_CONFIG_INPUT_ENABLE,
PIN_CONFIG_INPUT_SCHMITT,
PIN_CONFIG_INPUT_SCHMITT_ENABLE,
+ PIN_CONFIG_INPUT_SCHMITT_UV,
PIN_CONFIG_MODE_LOW_POWER,
PIN_CONFIG_MODE_PWM,
PIN_CONFIG_OUTPUT,
diff --git a/include/linux/platform_data/amd_qdma.h b/include/linux/platform_data/amd_qdma.h
new file mode 100644
index 000000000000..576d952f97ed
--- /dev/null
+++ b/include/linux/platform_data/amd_qdma.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _PLATDATA_AMD_QDMA_H
+#define _PLATDATA_AMD_QDMA_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * struct qdma_queue_info - DMA queue information. This information is used to
+ * match queue when DMA channel is requested
+ * @dir: Channel transfer direction
+ */
+struct qdma_queue_info {
+ enum dma_transfer_direction dir;
+};
+
+#define QDMA_FILTER_PARAM(qinfo) ((void *)(qinfo))
+
+struct dma_slave_map;
+
+/**
+ * struct qdma_platdata - Platform specific data for QDMA engine
+ * @max_mm_channels: Maximum number of MM DMA channels in each direction
+ * @device_map: DMA slave map
+ * @irq_index: The index of first IRQ
+ */
+struct qdma_platdata {
+ u32 max_mm_channels;
+ u32 irq_index;
+ struct dma_slave_map *device_map;
+};
+
+#endif /* _PLATDATA_AMD_QDMA_H */
diff --git a/include/linux/platform_data/cyttsp4.h b/include/linux/platform_data/cyttsp4.h
deleted file mode 100644
index 5dc9d2be384b..000000000000
--- a/include/linux/platform_data/cyttsp4.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header file for:
- * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
- * For use with Cypress Txx3xx parts.
- * Supported parts include:
- * CY8CTST341
- * CY8CTMA340
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- *
- * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
- */
-#ifndef _CYTTSP4_H_
-#define _CYTTSP4_H_
-
-#define CYTTSP4_MT_NAME "cyttsp4_mt"
-#define CYTTSP4_I2C_NAME "cyttsp4_i2c_adapter"
-#define CYTTSP4_SPI_NAME "cyttsp4_spi_adapter"
-
-#define CY_TOUCH_SETTINGS_MAX 32
-
-struct touch_framework {
- const uint16_t *abs;
- uint8_t size;
- uint8_t enable_vkeys;
-} __packed;
-
-struct cyttsp4_mt_platform_data {
- struct touch_framework *frmwrk;
- unsigned short flags;
- char const *inp_dev_name;
-};
-
-struct touch_settings {
- const uint8_t *data;
- uint32_t size;
- uint8_t tag;
-} __packed;
-
-struct cyttsp4_core_platform_data {
- int irq_gpio;
- int rst_gpio;
- int level_irq_udelay;
- int (*xres)(struct cyttsp4_core_platform_data *pdata,
- struct device *dev);
- int (*init)(struct cyttsp4_core_platform_data *pdata,
- int on, struct device *dev);
- int (*power)(struct cyttsp4_core_platform_data *pdata,
- int on, struct device *dev, atomic_t *ignore_irq);
- int (*irq_stat)(struct cyttsp4_core_platform_data *pdata,
- struct device *dev);
- struct touch_settings *sett[CY_TOUCH_SETTINGS_MAX];
-};
-
-struct cyttsp4_platform_data {
- struct cyttsp4_core_platform_data *core_pdata;
- struct cyttsp4_mt_platform_data *mt_pdata;
-};
-
-#endif /* _CYTTSP4_H_ */
diff --git a/include/linux/platform_data/gpio-ath79.h b/include/linux/platform_data/gpio-ath79.h
deleted file mode 100644
index 3ea6dd942c27..000000000000
--- a/include/linux/platform_data/gpio-ath79.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Atheros AR7XXX/AR9XXX GPIO controller platform data
- *
- * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_GPIO_ATH79_H
-#define __LINUX_PLATFORM_DATA_GPIO_ATH79_H
-
-struct ath79_gpio_platform_data {
- unsigned ngpios;
- bool oe_inverted;
-};
-
-#endif
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
deleted file mode 100644
index b82e44662efe..000000000000
--- a/include/linux/platform_data/gpio-davinci.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * DaVinci GPIO Platform Related Defines
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
- */
-
-#ifndef __DAVINCI_GPIO_PLATFORM_H
-#define __DAVINCI_GPIO_PLATFORM_H
-
-struct davinci_gpio_platform_data {
- bool no_auto_base;
- u32 base;
- u32 ngpio;
- u32 gpio_unbanked;
-};
-
-/* Convert GPIO signal to GPIO pin number */
-#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
-
-#endif
diff --git a/include/linux/platform_data/keypad-nomadik-ske.h b/include/linux/platform_data/keypad-nomadik-ske.h
deleted file mode 100644
index 7efabbca1dca..000000000000
--- a/include/linux/platform_data/keypad-nomadik-ske.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Naveen Kumar Gaddipati <naveen.gaddipati@stericsson.com>
- *
- * ux500 Scroll key and Keypad Encoder (SKE) header
- */
-
-#ifndef __SKE_H
-#define __SKE_H
-
-#include <linux/input/matrix_keypad.h>
-
-/* register definitions for SKE peripheral */
-#define SKE_CR 0x00
-#define SKE_VAL0 0x04
-#define SKE_VAL1 0x08
-#define SKE_DBCR 0x0C
-#define SKE_IMSC 0x10
-#define SKE_RIS 0x14
-#define SKE_MIS 0x18
-#define SKE_ICR 0x1C
-
-/*
- * Keypad module
- */
-
-/**
- * struct keypad_platform_data - structure for platform specific data
- * @init: pointer to keypad init function
- * @exit: pointer to keypad deinitialisation function
- * @keymap_data: matrix scan code table for keycodes
- * @krow: maximum number of rows
- * @kcol: maximum number of columns
- * @debounce_ms: platform specific debounce time
- * @no_autorepeat: flag for auto repetition
- * @wakeup_enable: allow waking up the system
- */
-struct ske_keypad_platform_data {
- int (*init)(void);
- int (*exit)(void);
- const struct matrix_keymap_data *keymap_data;
- u8 krow;
- u8 kcol;
- u8 debounce_ms;
- bool no_autorepeat;
- bool wakeup_enable;
-};
-#endif /*__SKE_KPD_H*/
diff --git a/include/linux/platform_data/max6697.h b/include/linux/platform_data/max6697.h
deleted file mode 100644
index 6fbb70005541..000000000000
--- a/include/linux/platform_data/max6697.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * max6697.h
- * Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net>
- */
-
-#ifndef MAX6697_H
-#define MAX6697_H
-
-#include <linux/types.h>
-
-/*
- * For all bit masks:
- * bit 0: local temperature
- * bit 1..7: remote temperatures
- */
-struct max6697_platform_data {
- bool smbus_timeout_disable; /* set to disable SMBus timeouts */
- bool extended_range_enable; /* set to enable extended temp range */
- bool beta_compensation; /* set to enable beta compensation */
- u8 alert_mask; /* set bit to 1 to disable alert */
- u8 over_temperature_mask; /* set bit to 1 to disable */
- u8 resistance_cancellation; /* set bit to 0 to disable
- * bit mask for MAX6581,
- * boolean for other chips
- */
- u8 ideality_mask; /* set bit to 0 to disable */
- u8 ideality_value; /* transistor ideality as per
- * MAX6581 datasheet
- */
-};
-
-#endif /* MAX6697_H */
diff --git a/include/linux/platform_data/mcs.h b/include/linux/platform_data/mcs.h
deleted file mode 100644
index fcc6f2a1f5c3..000000000000
--- a/include/linux/platform_data/mcs.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- * Author: HeungJun Kim <riverful.kim@samsung.com>
- */
-
-#ifndef __LINUX_MCS_H
-#define __LINUX_MCS_H
-
-#define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff))
-#define MCS_KEY_VAL(v) (((v) >> 16) & 0xff)
-#define MCS_KEY_CODE(v) ((v) & 0xffff)
-
-struct mcs_platform_data {
- void (*poweron)(bool);
- void (*cfg_pin)(void);
-
- /* touchscreen */
- unsigned int x_size;
- unsigned int y_size;
-
- /* touchkey */
- const u32 *keymap;
- unsigned int keymap_size;
- unsigned int key_maxval;
- bool no_autorepeat;
-};
-
-#endif /* __LINUX_MCS_H */
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
index 8c659db4da6b..2ee1a679e592 100644
--- a/include/linux/platform_data/microchip-ksz.h
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -27,7 +27,9 @@ enum ksz_chip_id {
KSZ8795_CHIP_ID = 0x8795,
KSZ8794_CHIP_ID = 0x8794,
KSZ8765_CHIP_ID = 0x8765,
- KSZ8830_CHIP_ID = 0x8830,
+ KSZ88X3_CHIP_ID = 0x8830,
+ KSZ8864_CHIP_ID = 0x8864,
+ KSZ8895_CHIP_ID = 0x8895,
KSZ9477_CHIP_ID = 0x00947700,
KSZ9896_CHIP_ID = 0x00989600,
KSZ9897_CHIP_ID = 0x00989700,
diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h
deleted file mode 100644
index a49826214a39..000000000000
--- a/include/linux/platform_data/mtd-davinci-aemif.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * TI DaVinci AEMIF support
- *
- * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#ifndef _MACH_DAVINCI_AEMIF_H
-#define _MACH_DAVINCI_AEMIF_H
-
-#include <linux/platform_device.h>
-
-#define NRCSR_OFFSET 0x00
-#define AWCCR_OFFSET 0x04
-#define A1CR_OFFSET 0x10
-
-#define ACR_ASIZE_MASK 0x3
-#define ACR_EW_MASK BIT(30)
-#define ACR_SS_MASK BIT(31)
-
-/* All timings in nanoseconds */
-struct davinci_aemif_timing {
- u8 wsetup;
- u8 wstrobe;
- u8 whold;
-
- u8 rsetup;
- u8 rstrobe;
- u8 rhold;
-
- u8 ta;
-};
-
-#endif
diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h
deleted file mode 100644
index dd474dd44848..000000000000
--- a/include/linux/platform_data/mtd-davinci.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mach-davinci/nand.h
- *
- * Copyright © 2006 Texas Instruments.
- *
- * Ported to 2.6.23 Copyright © 2008 by
- * Sander Huijsen <Shuijsen@optelecom-nkf.com>
- * Troy Kisky <troy.kisky@boundarydevices.com>
- * Dirk Behme <Dirk.Behme@gmail.com>
- *
- * --------------------------------------------------------------------------
- */
-
-#ifndef __ARCH_ARM_DAVINCI_NAND_H
-#define __ARCH_ARM_DAVINCI_NAND_H
-
-#include <linux/mtd/rawnand.h>
-
-#define NANDFCR_OFFSET 0x60
-#define NANDFSR_OFFSET 0x64
-#define NANDF1ECC_OFFSET 0x70
-
-/* 4-bit ECC syndrome registers */
-#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc
-#define NAND_4BIT_ECC1_OFFSET 0xc0
-#define NAND_4BIT_ECC2_OFFSET 0xc4
-#define NAND_4BIT_ECC3_OFFSET 0xc8
-#define NAND_4BIT_ECC4_OFFSET 0xcc
-#define NAND_ERR_ADD1_OFFSET 0xd0
-#define NAND_ERR_ADD2_OFFSET 0xd4
-#define NAND_ERR_ERRVAL1_OFFSET 0xd8
-#define NAND_ERR_ERRVAL2_OFFSET 0xdc
-
-/* NOTE: boards don't need to use these address bits
- * for ALE/CLE unless they support booting from NAND.
- * They're used unless platform data overrides them.
- */
-#define MASK_ALE 0x08
-#define MASK_CLE 0x10
-
-struct davinci_nand_pdata { /* platform_data */
- uint32_t mask_ale;
- uint32_t mask_cle;
-
- /*
- * 0-indexed chip-select number of the asynchronous
- * interface to which the NAND device has been connected.
- *
- * So, if you have NAND connected to CS3 of DA850, you
- * will pass '1' here. Since the asynchronous interface
- * on DA850 starts from CS2.
- */
- uint32_t core_chipsel;
-
- /* for packages using two chipselects */
- uint32_t mask_chipsel;
-
- /* board's default static partition info */
- struct mtd_partition *parts;
- unsigned nr_parts;
-
- /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
- * soft == NAND_ECC_ENGINE_TYPE_SOFT
- * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
- *
- * All DaVinci-family chips support 1-bit hardware ECC.
- * Newer ones also support 4-bit ECC, but are awkward
- * using it with large page chips.
- */
- enum nand_ecc_engine_type engine_type;
- enum nand_ecc_placement ecc_placement;
- u8 ecc_bits;
-
- /* e.g. NAND_BUSWIDTH_16 */
- unsigned options;
- /* e.g. NAND_BBT_USE_FLASH */
- unsigned bbt_options;
-
- /* Main and mirror bbt descriptor overrides */
- struct nand_bbt_descr *bbt_td;
- struct nand_bbt_descr *bbt_md;
-
- /* Access timings */
- struct davinci_aemif_timing *timing;
-};
-
-#endif /* __ARCH_ARM_DAVINCI_NAND_H */
diff --git a/include/linux/platform_data/ti-aemif.h b/include/linux/platform_data/ti-aemif.h
deleted file mode 100644
index 77625251df07..000000000000
--- a/include/linux/platform_data/ti-aemif.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * TI DaVinci AEMIF platform glue.
- *
- * Copyright (C) 2017 BayLibre SAS
- *
- * Author:
- * Bartosz Golaszewski <bgolaszewski@baylibre.com>
- */
-
-#ifndef __TI_DAVINCI_AEMIF_DATA_H__
-#define __TI_DAVINCI_AEMIF_DATA_H__
-
-#include <linux/of_platform.h>
-
-/**
- * struct aemif_abus_data - Async bus configuration parameters.
- *
- * @cs - Chip-select number.
- */
-struct aemif_abus_data {
- u32 cs;
-};
-
-/**
- * struct aemif_platform_data - Data to set up the TI aemif driver.
- *
- * @dev_lookup: of_dev_auxdata passed to of_platform_populate() for aemif
- * subdevices.
- * @cs_offset: Lowest allowed chip-select number.
- * @abus_data: Array of async bus configuration entries.
- * @num_abus_data: Number of abus entries.
- * @sub_devices: Array of platform subdevices.
- * @num_sub_devices: Number of subdevices.
- */
-struct aemif_platform_data {
- struct of_dev_auxdata *dev_lookup;
- u32 cs_offset;
- struct aemif_abus_data *abus_data;
- size_t num_abus_data;
- struct platform_device *sub_devices;
- size_t num_sub_devices;
-};
-
-#endif /* __TI_DAVINCI_AEMIF_DATA_H__ */
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 0aeeae1c1943..365e119bebaa 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -4,6 +4,7 @@
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/dmi.h>
/* WMI Methods */
#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
@@ -62,12 +63,14 @@
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
+#define ASUS_WMI_DEVID_OOBE 0x0005002F
/* This can only be used to disable the screen, not re-enable */
#define ASUS_WMI_DEVID_SCREENPAD_POWER 0x00050031
/* Writing a brightness re-enables the screen if disabled */
#define ASUS_WMI_DEVID_SCREENPAD_LIGHT 0x00050032
#define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018
#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
+#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO 0x00110019
/* Misc */
#define ASUS_WMI_DEVID_PANEL_OD 0x00050019
@@ -164,4 +167,39 @@ static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
}
#endif
+/* To be used by both hid-asus and asus-wmi to determine which controls kbd_brightness */
+static const struct dmi_system_id asus_use_hid_led_dmi_ids[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Zephyrus"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Strix"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Flow"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA403U"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU605M"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "RC71L"),
+ },
+ },
+ { },
+};
+
#endif /* __PLATFORM_DATA_X86_ASUS_WMI_H */
diff --git a/include/linux/platform_data/intel-mid_wdt.h b/include/linux/platform_data/x86/intel-mid_wdt.h
index 8dba70b4b020..e5c0210d0fec 100644
--- a/include/linux/platform_data/intel-mid_wdt.h
+++ b/include/linux/platform_data/x86/intel-mid_wdt.h
@@ -6,8 +6,8 @@
* Contact: David Cohen <david.a.cohen@linux.intel.com>
*/
-#ifndef __INTEL_MID_WDT_H__
-#define __INTEL_MID_WDT_H__
+#ifndef __PLATFORM_X86_INTEL_MID_WDT_H_
+#define __PLATFORM_X86_INTEL_MID_WDT_H_
#include <linux/platform_device.h>
@@ -16,4 +16,4 @@ struct intel_mid_wdt_pdata {
int (*probe)(struct platform_device *pdev);
};
-#endif /*__INTEL_MID_WDT_H__*/
+#endif /* __PLATFORM_X86_INTEL_MID_WDT_H_ */
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/include/linux/platform_data/x86/intel_scu_ipc.h
index 8537f597d20a..0ca9962e97f2 100644
--- a/arch/x86/include/asm/intel_scu_ipc.h
+++ b/include/linux/platform_data/x86/intel_scu_ipc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_INTEL_SCU_IPC_H_
-#define _ASM_X86_INTEL_SCU_IPC_H_
+#ifndef __PLATFORM_X86_INTEL_SCU_IPC_H_
+#define __PLATFORM_X86_INTEL_SCU_IPC_H_
#include <linux/ioport.h>
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
deleted file mode 100644
index 2463a4a856a6..000000000000
--- a/include/linux/platform_data/zforce_ts.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* drivers/input/touchscreen/zforce.c
- *
- * Copyright (C) 2012-2013 MundoReader S.L.
- */
-
-#ifndef _LINUX_INPUT_ZFORCE_TS_H
-#define _LINUX_INPUT_ZFORCE_TS_H
-
-struct zforce_ts_platdata {
- unsigned int x_max;
- unsigned int y_max;
-};
-
-#endif /* _LINUX_INPUT_ZFORCE_TS_H */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 858c8e7851fb..b637ec14025f 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -198,8 +198,11 @@ struct generic_pm_domain {
spinlock_t slock;
unsigned long lock_flags;
};
+ struct {
+ raw_spinlock_t raw_slock;
+ unsigned long raw_lock_flags;
+ };
};
-
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -473,6 +476,9 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev,
int dev_pm_domain_attach_list(struct device *dev,
const struct dev_pm_domain_attach_data *data,
struct dev_pm_domain_list **list);
+int devm_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list);
void dev_pm_domain_detach(struct device *dev, bool power_off);
void dev_pm_domain_detach_list(struct dev_pm_domain_list *list);
int dev_pm_domain_start(struct device *dev);
@@ -499,6 +505,14 @@ static inline int dev_pm_domain_attach_list(struct device *dev,
{
return 0;
}
+
+static inline int devm_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ return 0;
+}
+
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
static inline void dev_pm_domain_detach_list(struct dev_pm_domain_list *list) {}
static inline int dev_pm_domain_start(struct device *dev)
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index dc7b738de299..453691710839 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -158,7 +158,7 @@ static inline void posix_cputimers_init_work(void) { }
* @rcu: RCU head for freeing the timer.
*/
struct k_itimer {
- struct list_head list;
+ struct hlist_node list;
struct hlist_node t_hash;
spinlock_t it_lock;
const struct k_clock *kclock;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 72dc7e45c90c..910d407ebe63 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -243,8 +243,7 @@ struct power_supply_desc {
const char *name;
enum power_supply_type type;
u8 charge_behaviours;
- const enum power_supply_usb_type *usb_types;
- size_t num_usb_types;
+ u32 usb_types;
const enum power_supply_property *properties;
size_t num_properties;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index b937cefcb31c..eca9bb2ee637 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -9,6 +9,8 @@
#include <linux/ratelimit_types.h>
#include <linux/once_lite.h>
+struct console;
+
extern const char linux_banner[];
extern const char linux_proc_banner[];
@@ -161,15 +163,16 @@ int _printk(const char *fmt, ...);
*/
__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...);
-extern void __printk_safe_enter(void);
-extern void __printk_safe_exit(void);
+extern void __printk_deferred_enter(void);
+extern void __printk_deferred_exit(void);
+
/*
* The printk_deferred_enter/exit macros are available only as a hack for
* some code paths that need to defer all printk console printing. Interrupts
* must be disabled for the deferred duration.
*/
-#define printk_deferred_enter __printk_safe_enter
-#define printk_deferred_exit __printk_safe_exit
+#define printk_deferred_enter() __printk_deferred_enter()
+#define printk_deferred_exit() __printk_deferred_exit()
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
@@ -197,6 +200,10 @@ extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
extern asmlinkage void dump_stack(void) __cold;
void printk_trigger_flush(void);
void console_try_replay_all(void);
+void printk_legacy_allow_panic_sync(void);
+extern bool nbcon_device_try_acquire(struct console *con);
+extern void nbcon_device_release(struct console *con);
+void nbcon_atomic_flush_unsafe(void);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
@@ -279,6 +286,24 @@ static inline void printk_trigger_flush(void)
static inline void console_try_replay_all(void)
{
}
+
+static inline void printk_legacy_allow_panic_sync(void)
+{
+}
+
+static inline bool nbcon_device_try_acquire(struct console *con)
+{
+ return false;
+}
+
+static inline void nbcon_device_release(struct console *con)
+{
+}
+
+static inline void nbcon_atomic_flush_unsafe(void)
+{
+}
+
#endif
bool this_cpu_in_panic(void);
diff --git a/include/linux/prmt.h b/include/linux/prmt.h
index 24da8364b919..9c094294403f 100644
--- a/include/linux/prmt.h
+++ b/include/linux/prmt.h
@@ -2,6 +2,11 @@
#ifdef CONFIG_ACPI_PRMT
void init_prmt(void);
+int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer);
#else
static inline void init_prmt(void) { }
+static inline int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer)
+{
+ return -EOPNOTSUPP;
+}
#endif
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 638507a3c8ff..fed601053c51 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -182,7 +182,7 @@ struct pstore_info {
struct module *owner;
const char *name;
- spinlock_t buf_lock;
+ raw_spinlock_t buf_lock;
char *buf;
size_t bufsize;
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 6e4b8206c7d0..c892d22ce0a7 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -47,10 +47,12 @@ struct system_device_crosststamp;
* struct ptp_system_timestamp - system time corresponding to a PHC timestamp
* @pre_ts: system timestamp before capturing PHC
* @post_ts: system timestamp after capturing PHC
+ * @clockid: clock-base used for capturing the system timestamps
*/
struct ptp_system_timestamp {
struct timespec64 pre_ts;
struct timespec64 post_ts;
+ clockid_t clockid;
};
/**
@@ -457,14 +459,40 @@ static inline ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp,
static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts)
{
- if (sts)
- ktime_get_real_ts64(&sts->pre_ts);
+ if (sts) {
+ switch (sts->clockid) {
+ case CLOCK_REALTIME:
+ ktime_get_real_ts64(&sts->pre_ts);
+ break;
+ case CLOCK_MONOTONIC:
+ ktime_get_ts64(&sts->pre_ts);
+ break;
+ case CLOCK_MONOTONIC_RAW:
+ ktime_get_raw_ts64(&sts->pre_ts);
+ break;
+ default:
+ break;
+ }
+ }
}
static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts)
{
- if (sts)
- ktime_get_real_ts64(&sts->post_ts);
+ if (sts) {
+ switch (sts->clockid) {
+ case CLOCK_REALTIME:
+ ktime_get_real_ts64(&sts->post_ts);
+ break;
+ case CLOCK_MONOTONIC:
+ ktime_get_ts64(&sts->post_ts);
+ break;
+ case CLOCK_MONOTONIC_RAW:
+ ktime_get_raw_ts64(&sts->post_ts);
+ break;
+ default:
+ break;
+ }
+ }
}
#endif
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index f8c2dc12dbd3..8acd60b53f58 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -394,9 +394,6 @@ static inline bool pwm_might_sleep(struct pwm_device *pwm)
}
/* PWM provider APIs */
-int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
- unsigned long timeout);
-
void pwmchip_put(struct pwm_chip *chip);
struct pwm_chip *pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv);
struct pwm_chip *devm_pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv);
@@ -462,13 +459,6 @@ static inline void pwm_disable(struct pwm_device *pwm)
might_sleep();
}
-static inline int pwm_capture(struct pwm_device *pwm,
- struct pwm_capture *result,
- unsigned long timeout)
-{
- return -EINVAL;
-}
-
static inline void pwmchip_put(struct pwm_chip *chip)
{
}
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 07071e64abf3..89a0d83ddad0 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -526,7 +526,7 @@ struct quota_info {
const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
};
-int register_quota_format(struct quota_format_type *fmt);
+void register_quota_format(struct quota_format_type *fmt);
void unregister_quota_format(struct quota_format_type *fmt);
struct quota_module_name {
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
index 002266693e50..765232ce0b5e 100644
--- a/include/linux/ratelimit_types.h
+++ b/include/linux/ratelimit_types.h
@@ -19,8 +19,8 @@ struct ratelimit_state {
int burst;
int printed;
int missed;
+ unsigned int flags;
unsigned long begin;
- unsigned long flags;
};
#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index f7edca369eda..7c173aa64e1e 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -245,6 +245,42 @@ rb_find_add(struct rb_node *node, struct rb_root *tree,
}
/**
+ * rb_find_add_rcu() - find equivalent @node in @tree, or add @node
+ * @node: node to look-for / insert
+ * @tree: tree to search / modify
+ * @cmp: operator defining the node order
+ *
+ * Adds a Store-Release for link_node.
+ *
+ * Returns the rb_node matching @node, or NULL when no match is found and @node
+ * is inserted.
+ */
+static __always_inline struct rb_node *
+rb_find_add_rcu(struct rb_node *node, struct rb_root *tree,
+ int (*cmp)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ int c;
+
+ while (*link) {
+ parent = *link;
+ c = cmp(node, parent);
+
+ if (c < 0)
+ link = &parent->rb_left;
+ else if (c > 0)
+ link = &parent->rb_right;
+ else
+ return parent;
+ }
+
+ rb_link_node_rcu(node, parent, link);
+ rb_insert_color(node, tree);
+ return NULL;
+}
+
+/**
* rb_find() - find @key in tree @tree
* @key: key to match
* @tree: tree to search
@@ -273,6 +309,37 @@ rb_find(const void *key, const struct rb_root *tree,
}
/**
+ * rb_find_rcu() - find @key in tree @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining the node order
+ *
+ * Notably, tree descent vs concurrent tree rotations is unsound and can result
+ * in false-negatives.
+ *
+ * Returns the rb_node matching @key or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find_rcu(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c < 0)
+ node = rcu_dereference_raw(node->rb_left);
+ else if (c > 0)
+ node = rcu_dereference_raw(node->rb_right);
+ else
+ return node;
+ }
+
+ return NULL;
+}
+
+/**
* rb_find_first() - find the first @key in @tree
* @key: key to match
* @tree: tree to search
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index ba95c06675e1..2fdc2208f1ca 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -185,11 +185,7 @@ struct rcu_cblist {
* ----------------------------------------------------------------------------
*/
#define SEGCBLIST_ENABLED BIT(0)
-#define SEGCBLIST_RCU_CORE BIT(1)
-#define SEGCBLIST_LOCKING BIT(2)
-#define SEGCBLIST_KTHREAD_CB BIT(3)
-#define SEGCBLIST_KTHREAD_GP BIT(4)
-#define SEGCBLIST_OFFLOADED BIT(5)
+#define SEGCBLIST_OFFLOADED BIT(1)
struct rcu_segcblist {
struct rcu_head *head;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 3dc1e58865f7..14dfa6008467 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -191,7 +191,10 @@ static inline void hlist_del_init_rcu(struct hlist_node *n)
* @old : the element to be replaced
* @new : the new element to insert
*
- * The @old entry will be replaced with the @new entry atomically.
+ * The @old entry will be replaced with the @new entry atomically from
+ * the perspective of concurrent readers. It is the caller's responsibility
+ * to synchronize with concurrent updaters, if any.
+ *
* Note: @old should not be empty.
*/
static inline void list_replace_rcu(struct list_head *old,
@@ -519,7 +522,9 @@ static inline void hlist_del_rcu(struct hlist_node *n)
* @old : the element to be replaced
* @new : the new element to insert
*
- * The @old entry will be replaced with the @new entry atomically.
+ * The @old entry will be replaced with the @new entry atomically from
+ * the perspective of concurrent readers. It is the caller's responsibility
+ * to synchronize with concurrent updaters, if any.
*/
static inline void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *new)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 13f6f00aecf9..58d84c59f3dd 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -34,10 +34,12 @@
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
+#define RCU_SEQ_CTR_SHIFT 2
+#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
+
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
-void rcu_barrier_tasks_rude(void);
void synchronize_rcu(void);
struct rcu_gp_oldstate;
@@ -144,11 +146,18 @@ void rcu_init_nohz(void);
int rcu_nocb_cpu_offload(int cpu);
int rcu_nocb_cpu_deoffload(int cpu);
void rcu_nocb_flush_deferred_wakeup(void);
+
+#define RCU_NOCB_LOCKDEP_WARN(c, s) RCU_LOCKDEP_WARN(c, s)
+
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+
static inline void rcu_init_nohz(void) { }
static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; }
static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
static inline void rcu_nocb_flush_deferred_wakeup(void) { }
+
+#define RCU_NOCB_LOCKDEP_WARN(c, s)
+
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/*
@@ -165,6 +174,7 @@ static inline void rcu_nocb_flush_deferred_wakeup(void) { }
} while (0)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
+void rcu_tasks_torture_stats_print(char *tt, char *tf);
# else
# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
# define call_rcu_tasks call_rcu
@@ -191,6 +201,7 @@ void rcu_tasks_trace_qs_blkd(struct task_struct *t);
rcu_tasks_trace_qs_blkd(t); \
} \
} while (0)
+void rcu_tasks_trace_torture_stats_print(char *tt, char *tf);
# else
# define rcu_tasks_trace_qs(t) do { } while (0)
# endif
@@ -202,8 +213,8 @@ do { \
} while (0)
# ifdef CONFIG_TASKS_RUDE_RCU
-void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks_rude(void);
+void rcu_tasks_rude_torture_stats_print(char *tt, char *tf);
# endif
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index d9ac7b136aea..0ee270b3f5ed 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -111,6 +111,11 @@ static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
kvfree(ptr);
}
+static inline void kvfree_rcu_barrier(void)
+{
+ rcu_barrier();
+}
+
#ifdef CONFIG_KASAN_GENERIC
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
#else
@@ -158,7 +163,7 @@ void rcu_scheduler_starting(void);
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
-static inline void rcu_momentary_dyntick_idle(void) { }
+static inline void rcu_momentary_eqs(void) { }
static inline void kfree_rcu_scheduler_running(void) { }
static inline bool rcu_gp_might_be_stalled(void) { return false; }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 254244202ea9..90a684f94776 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -35,9 +35,10 @@ static inline void rcu_virt_note_context_switch(void)
void synchronize_rcu_expedited(void);
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
+void kvfree_rcu_barrier(void);
void rcu_barrier(void);
-void rcu_momentary_dyntick_idle(void);
+void rcu_momentary_eqs(void);
void kfree_rcu_scheduler_running(void);
bool rcu_gp_might_be_stalled(void);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 122e38161acb..f9ccad32fc5c 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1521,6 +1521,9 @@ struct regmap_irq_chip_data;
* struct regmap_irq_chip - Description of a generic regmap irq_chip.
*
* @name: Descriptive name for IRQ controller.
+ * @domain_suffix: Name suffix to be appended to end of IRQ domain name. Needed
+ * when multiple regmap-IRQ controllers are created from same
+ * device.
*
* @main_status: Base main status register address. For chips which have
* interrupts arranged in separate sub-irq blocks with own IRQ
@@ -1606,6 +1609,7 @@ struct regmap_irq_chip_data;
*/
struct regmap_irq_chip {
const char *name;
+ const char *domain_suffix;
unsigned int main_status;
unsigned int num_main_status_bits;
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 373003ace639..997b34197385 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -147,7 +147,8 @@ void rfkill_destroy(struct rfkill *rfkill);
* Prefer to use rfkill_set_hw_state if you don't need any special reason.
*/
bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
- bool blocked, unsigned long reason);
+ bool blocked,
+ enum rfkill_hard_block_reasons reason);
/**
* rfkill_set_hw_state - Set the internal rfkill hardware block state
* @rfkill: pointer to the rfkill class to modify.
@@ -280,7 +281,7 @@ static inline void rfkill_destroy(struct rfkill *rfkill)
static inline bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
bool blocked,
- unsigned long reason)
+ enum rfkill_hard_block_reasons reason)
{
return blocked;
}
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index fd35d4ec12e1..17fbb7855295 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -89,6 +89,14 @@ void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct trace_buffer *
__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
+struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
+ int order, unsigned long start,
+ unsigned long range_size,
+ struct lock_class_key *key);
+
+bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, long *text,
+ long *data);
+
/*
* Because the ring buffer is generic, if other users of the ring buffer get
* traced by ftrace, it can produce lockdep warnings. We need to keep each
@@ -100,6 +108,18 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
+/*
+ * Because the ring buffer is generic, if other users of the ring buffer get
+ * traced by ftrace, it can produce lockdep warnings. We need to keep each
+ * ring buffer's lock class separate.
+ */
+#define ring_buffer_alloc_range(size, flags, order, start, range_size) \
+({ \
+ static struct lock_class_key __key; \
+ __ring_buffer_alloc_range((size), (flags), (order), (start), \
+ (range_size), &__key); \
+})
+
typedef bool (*ring_buffer_cond_fn)(void *data);
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
ring_buffer_cond_fn cond, void *data);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 0978c64f49d8..d5e93e44322e 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -331,7 +331,7 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
switch (level) {
case RMAP_LEVEL_PTE:
if (!folio_test_large(folio)) {
- atomic_inc(&page->_mapcount);
+ atomic_inc(&folio->_mapcount);
break;
}
@@ -425,7 +425,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
if (!folio_test_large(folio)) {
if (PageAnonExclusive(page))
ClearPageAnonExclusive(page);
- atomic_inc(&page->_mapcount);
+ atomic_inc(&folio->_mapcount);
break;
}
@@ -745,7 +745,12 @@ int folio_mkclean(struct folio *);
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
struct vm_area_struct *vma);
-void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
+enum rmp_flags {
+ RMP_LOCKED = 1 << 0,
+ RMP_USE_SHARED_ZEROPAGE = 1 << 1,
+};
+
+void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
/*
* rmap_walk_control: To control rmap traversing for specific needs
diff --git a/include/linux/rpmb.h b/include/linux/rpmb.h
new file mode 100644
index 000000000000..cccda73eea4d
--- /dev/null
+++ b/include/linux/rpmb.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Intel Corp. All rights reserved
+ * Copyright (C) 2021-2022 Linaro Ltd
+ */
+#ifndef __RPMB_H__
+#define __RPMB_H__
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+/**
+ * enum rpmb_type - type of underlying storage technology
+ *
+ * @RPMB_TYPE_EMMC : emmc (JESD84-B50.1)
+ * @RPMB_TYPE_UFS : UFS (JESD220)
+ * @RPMB_TYPE_NVME : NVM Express
+ */
+enum rpmb_type {
+ RPMB_TYPE_EMMC,
+ RPMB_TYPE_UFS,
+ RPMB_TYPE_NVME,
+};
+
+/**
+ * struct rpmb_descr - RPMB description provided by the underlying block device
+ *
+ * @type : block device type
+ * @route_frames : routes frames to and from the RPMB device
+ * @dev_id : unique device identifier read from the hardware
+ * @dev_id_len : length of unique device identifier
+ * @reliable_wr_count: number of sectors that can be written in one access
+ * @capacity : capacity of the device in units of 128K
+ *
+ * @dev_id is intended to be used as input when deriving the authenticaion key.
+ */
+struct rpmb_descr {
+ enum rpmb_type type;
+ int (*route_frames)(struct device *dev, u8 *req, unsigned int req_len,
+ u8 *resp, unsigned int resp_len);
+ u8 *dev_id;
+ size_t dev_id_len;
+ u16 reliable_wr_count;
+ u16 capacity;
+};
+
+/**
+ * struct rpmb_dev - device which can support RPMB partition
+ *
+ * @dev : device
+ * @id : device_id
+ * @list_node : linked list node
+ * @descr : RPMB description
+ */
+struct rpmb_dev {
+ struct device dev;
+ int id;
+ struct list_head list_node;
+ struct rpmb_descr descr;
+};
+
+#define to_rpmb_dev(x) container_of((x), struct rpmb_dev, dev)
+
+#if IS_ENABLED(CONFIG_RPMB)
+struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev);
+void rpmb_dev_put(struct rpmb_dev *rdev);
+struct rpmb_dev *rpmb_dev_find_device(const void *data,
+ const struct rpmb_dev *start,
+ int (*match)(struct device *dev,
+ const void *data));
+int rpmb_interface_register(struct class_interface *intf);
+void rpmb_interface_unregister(struct class_interface *intf);
+struct rpmb_dev *rpmb_dev_register(struct device *dev,
+ struct rpmb_descr *descr);
+int rpmb_dev_unregister(struct rpmb_dev *rdev);
+
+int rpmb_route_frames(struct rpmb_dev *rdev, u8 *req,
+ unsigned int req_len, u8 *resp, unsigned int resp_len);
+
+#else
+static inline struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev)
+{
+ return NULL;
+}
+
+static inline void rpmb_dev_put(struct rpmb_dev *rdev) { }
+
+static inline struct rpmb_dev *
+rpmb_dev_find_device(const void *data, const struct rpmb_dev *start,
+ int (*match)(struct device *dev, const void *data))
+{
+ return NULL;
+}
+
+static inline int rpmb_interface_register(struct class_interface *intf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void rpmb_interface_unregister(struct class_interface *intf)
+{
+}
+
+static inline struct rpmb_dev *
+rpmb_dev_register(struct device *dev, struct rpmb_descr *descr)
+{
+ return NULL;
+}
+
+static inline int rpmb_dev_unregister(struct rpmb_dev *dev)
+{
+ return 0;
+}
+
+static inline int rpmb_route_frames(struct rpmb_dev *rdev, u8 *req,
+ unsigned int req_len, u8 *resp,
+ unsigned int resp_len)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_RPMB */
+
+#endif /* __RPMB_H__ */
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index c09cdcc99471..189140bf11fc 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -40,7 +40,7 @@ struct sbitmap_word {
/**
* @swap_lock: serializes simultaneous updates of ->word and ->cleared
*/
- spinlock_t swap_lock;
+ raw_spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;
/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f8d150343d42..e6ee4258169a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -82,6 +82,8 @@ struct task_group;
struct task_struct;
struct user_event_mm;
+#include <linux/sched/ext.h>
+
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
@@ -149,8 +151,9 @@ struct user_event_mm;
* Special states are those that do not use the normal wait-loop pattern. See
* the comment with set_special_state().
*/
-#define is_special_task_state(state) \
- ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
+#define is_special_task_state(state) \
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | \
+ TASK_DEAD | TASK_FROZEN))
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
# define debug_normal_state_change(state_value) \
@@ -541,9 +544,14 @@ struct sched_entity {
struct rb_node run_node;
u64 deadline;
u64 min_vruntime;
+ u64 min_slice;
struct list_head group_node;
- unsigned int on_rq;
+ unsigned char on_rq;
+ unsigned char sched_delayed;
+ unsigned char rel_deadline;
+ unsigned char custom_slice;
+ /* hole */
u64 exec_start;
u64 sum_exec_runtime;
@@ -639,12 +647,26 @@ struct sched_dl_entity {
*
* @dl_overrun tells if the task asked to be informed about runtime
* overruns.
+ *
+ * @dl_server tells if this is a server entity.
+ *
+ * @dl_defer tells if this is a deferred or regular server. For
+ * now only defer server exists.
+ *
+ * @dl_defer_armed tells if the deferrable server is waiting
+ * for the replenishment timer to activate it.
+ *
+ * @dl_defer_running tells if the deferrable server is actually
+ * running, skipping the defer phase.
*/
unsigned int dl_throttled : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
unsigned int dl_server : 1;
+ unsigned int dl_defer : 1;
+ unsigned int dl_defer_armed : 1;
+ unsigned int dl_defer_running : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -672,7 +694,7 @@ struct sched_dl_entity {
*/
struct rq *rq;
dl_server_has_tasks_f server_has_tasks;
- dl_server_pick_f server_pick;
+ dl_server_pick_f server_pick_task;
#ifdef CONFIG_RT_MUTEXES
/*
@@ -810,6 +832,9 @@ struct task_struct {
struct sched_rt_entity rt;
struct sched_dl_entity dl;
struct sched_dl_entity *dl_server;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ struct sched_ext_entity scx;
+#endif
const struct sched_class *sched_class;
#ifdef CONFIG_SCHED_CORE
@@ -1243,7 +1268,6 @@ struct task_struct {
/* Sequence number to catch updates: */
seqcount_spinlock_t mems_allowed_seq;
int cpuset_mem_spread_rotor;
- int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
/* Control Group info protected by css_set_lock: */
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index df3aca89d4f5..3a912ab42bb5 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -10,16 +10,16 @@
#include <linux/sched.h>
-#define MAX_DL_PRIO 0
-
-static inline int dl_prio(int prio)
+static inline bool dl_prio(int prio)
{
- if (unlikely(prio < MAX_DL_PRIO))
- return 1;
- return 0;
+ return unlikely(prio < MAX_DL_PRIO);
}
-static inline int dl_task(struct task_struct *p)
+/*
+ * Returns true if a task has a priority that belongs to DL class. PI-boosted
+ * tasks will return true. Use dl_policy() to ignore PI-boosted tasks.
+ */
+static inline bool dl_task(struct task_struct *p)
{
return dl_prio(p->prio);
}
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
new file mode 100644
index 000000000000..1ddbde64a31b
--- /dev/null
+++ b/include/linux/sched/ext.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#ifndef _LINUX_SCHED_EXT_H
+#define _LINUX_SCHED_EXT_H
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+
+#include <linux/llist.h>
+#include <linux/rhashtable-types.h>
+
+enum scx_public_consts {
+ SCX_OPS_NAME_LEN = 128,
+
+ SCX_SLICE_DFL = 20 * 1000000, /* 20ms */
+ SCX_SLICE_INF = U64_MAX, /* infinite, implies nohz */
+};
+
+/*
+ * DSQ (dispatch queue) IDs are 64bit of the format:
+ *
+ * Bits: [63] [62 .. 0]
+ * [ B] [ ID ]
+ *
+ * B: 1 for IDs for built-in DSQs, 0 for ops-created user DSQs
+ * ID: 63 bit ID
+ *
+ * Built-in IDs:
+ *
+ * Bits: [63] [62] [61..32] [31 .. 0]
+ * [ 1] [ L] [ R ] [ V ]
+ *
+ * 1: 1 for built-in DSQs.
+ * L: 1 for LOCAL_ON DSQ IDs, 0 for others
+ * V: For LOCAL_ON DSQ IDs, a CPU number. For others, a pre-defined value.
+ */
+enum scx_dsq_id_flags {
+ SCX_DSQ_FLAG_BUILTIN = 1LLU << 63,
+ SCX_DSQ_FLAG_LOCAL_ON = 1LLU << 62,
+
+ SCX_DSQ_INVALID = SCX_DSQ_FLAG_BUILTIN | 0,
+ SCX_DSQ_GLOBAL = SCX_DSQ_FLAG_BUILTIN | 1,
+ SCX_DSQ_LOCAL = SCX_DSQ_FLAG_BUILTIN | 2,
+ SCX_DSQ_LOCAL_ON = SCX_DSQ_FLAG_BUILTIN | SCX_DSQ_FLAG_LOCAL_ON,
+ SCX_DSQ_LOCAL_CPU_MASK = 0xffffffffLLU,
+};
+
+/*
+ * A dispatch queue (DSQ) can be either a FIFO or p->scx.dsq_vtime ordered
+ * queue. A built-in DSQ is always a FIFO. The built-in local DSQs are used to
+ * buffer between the scheduler core and the BPF scheduler. See the
+ * documentation for more details.
+ */
+struct scx_dispatch_q {
+ raw_spinlock_t lock;
+ struct list_head list; /* tasks in dispatch order */
+ struct rb_root priq; /* used to order by p->scx.dsq_vtime */
+ u32 nr;
+ u32 seq; /* used by BPF iter */
+ u64 id;
+ struct rhash_head hash_node;
+ struct llist_node free_node;
+ struct rcu_head rcu;
+};
+
+/* scx_entity.flags */
+enum scx_ent_flags {
+ SCX_TASK_QUEUED = 1 << 0, /* on ext runqueue */
+ SCX_TASK_RESET_RUNNABLE_AT = 1 << 2, /* runnable_at should be reset */
+ SCX_TASK_DEQD_FOR_SLEEP = 1 << 3, /* last dequeue was for SLEEP */
+
+ SCX_TASK_STATE_SHIFT = 8, /* bit 8 and 9 are used to carry scx_task_state */
+ SCX_TASK_STATE_BITS = 2,
+ SCX_TASK_STATE_MASK = ((1 << SCX_TASK_STATE_BITS) - 1) << SCX_TASK_STATE_SHIFT,
+
+ SCX_TASK_CURSOR = 1 << 31, /* iteration cursor, not a task */
+};
+
+/* scx_entity.flags & SCX_TASK_STATE_MASK */
+enum scx_task_state {
+ SCX_TASK_NONE, /* ops.init_task() not called yet */
+ SCX_TASK_INIT, /* ops.init_task() succeeded, but task can be cancelled */
+ SCX_TASK_READY, /* fully initialized, but not in sched_ext */
+ SCX_TASK_ENABLED, /* fully initialized and in sched_ext */
+
+ SCX_TASK_NR_STATES,
+};
+
+/* scx_entity.dsq_flags */
+enum scx_ent_dsq_flags {
+ SCX_TASK_DSQ_ON_PRIQ = 1 << 0, /* task is queued on the priority queue of a dsq */
+};
+
+/*
+ * Mask bits for scx_entity.kf_mask. Not all kfuncs can be called from
+ * everywhere and the following bits track which kfunc sets are currently
+ * allowed for %current. This simple per-task tracking works because SCX ops
+ * nest in a limited way. BPF will likely implement a way to allow and disallow
+ * kfuncs depending on the calling context which will replace this manual
+ * mechanism. See scx_kf_allow().
+ */
+enum scx_kf_mask {
+ SCX_KF_UNLOCKED = 0, /* sleepable and not rq locked */
+ /* ENQUEUE and DISPATCH may be nested inside CPU_RELEASE */
+ SCX_KF_CPU_RELEASE = 1 << 0, /* ops.cpu_release() */
+ /* ops.dequeue (in REST) may be nested inside DISPATCH */
+ SCX_KF_DISPATCH = 1 << 1, /* ops.dispatch() */
+ SCX_KF_ENQUEUE = 1 << 2, /* ops.enqueue() and ops.select_cpu() */
+ SCX_KF_SELECT_CPU = 1 << 3, /* ops.select_cpu() */
+ SCX_KF_REST = 1 << 4, /* other rq-locked operations */
+
+ __SCX_KF_RQ_LOCKED = SCX_KF_CPU_RELEASE | SCX_KF_DISPATCH |
+ SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
+ __SCX_KF_TERMINAL = SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
+};
+
+enum scx_dsq_lnode_flags {
+ SCX_DSQ_LNODE_ITER_CURSOR = 1 << 0,
+
+ /* high 16 bits can be for iter cursor flags */
+ __SCX_DSQ_LNODE_PRIV_SHIFT = 16,
+};
+
+struct scx_dsq_list_node {
+ struct list_head node;
+ u32 flags;
+ u32 priv; /* can be used by iter cursor */
+};
+
+/*
+ * The following is embedded in task_struct and contains all fields necessary
+ * for a task to be scheduled by SCX.
+ */
+struct sched_ext_entity {
+ struct scx_dispatch_q *dsq;
+ struct scx_dsq_list_node dsq_list; /* dispatch order */
+ struct rb_node dsq_priq; /* p->scx.dsq_vtime order */
+ u32 dsq_seq;
+ u32 dsq_flags; /* protected by DSQ lock */
+ u32 flags; /* protected by rq lock */
+ u32 weight;
+ s32 sticky_cpu;
+ s32 holding_cpu;
+ u32 kf_mask; /* see scx_kf_mask above */
+ struct task_struct *kf_tasks[2]; /* see SCX_CALL_OP_TASK() */
+ atomic_long_t ops_state;
+
+ struct list_head runnable_node; /* rq->scx.runnable_list */
+ unsigned long runnable_at;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 core_sched_at; /* see scx_prio_less() */
+#endif
+ u64 ddsp_dsq_id;
+ u64 ddsp_enq_flags;
+
+ /* BPF scheduler modifiable fields */
+
+ /*
+ * Runtime budget in nsecs. This is usually set through
+ * scx_bpf_dispatch() but can also be modified directly by the BPF
+ * scheduler. Automatically decreased by SCX as the task executes. On
+ * depletion, a scheduling event is triggered.
+ *
+ * This value is cleared to zero if the task is preempted by
+ * %SCX_KICK_PREEMPT and shouldn't be used to determine how long the
+ * task ran. Use p->se.sum_exec_runtime instead.
+ */
+ u64 slice;
+
+ /*
+ * Used to order tasks when dispatching to the vtime-ordered priority
+ * queue of a dsq. This is usually set through scx_bpf_dispatch_vtime()
+ * but can also be modified directly by the BPF scheduler. Modifying it
+ * while a task is queued on a dsq may mangle the ordering and is not
+ * recommended.
+ */
+ u64 dsq_vtime;
+
+ /*
+ * If set, reject future sched_setscheduler(2) calls updating the policy
+ * to %SCHED_EXT with -%EACCES.
+ *
+ * Can be set from ops.init_task() while the BPF scheduler is being
+ * loaded (!scx_init_task_args->fork). If set and the task's policy is
+ * already %SCHED_EXT, the task's policy is rejected and forcefully
+ * reverted to %SCHED_NORMAL. The number of such events are reported
+ * through /sys/kernel/debug/sched_ext::nr_rejected. Setting this flag
+ * during fork is not allowed.
+ */
+ bool disallow; /* reject switching into SCX */
+
+ /* cold fields */
+#ifdef CONFIG_EXT_GROUP_SCHED
+ struct cgroup *cgrp_moving_from;
+#endif
+ /* must be the last field, see init_scx_entity() */
+ struct list_head tasks_node;
+};
+
+void sched_ext_free(struct task_struct *p);
+void print_scx_info(const char *log_lvl, struct task_struct *p);
+
+#else /* !CONFIG_SCHED_CLASS_EXT */
+
+static inline void sched_ext_free(struct task_struct *p) {}
+static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {}
+
+#endif /* CONFIG_SCHED_CLASS_EXT */
+#endif /* _LINUX_SCHED_EXT_H */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 91546493c43d..07bb8d4181d7 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -179,27 +179,20 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
extern void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack);
-extern unsigned long
-arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long);
-extern unsigned long
+
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags);
+unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t);
unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
-unsigned long
-arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags, vm_flags_t vm_flags);
-unsigned long
-arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags, vm_flags_t);
-
unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
struct file *filp,
unsigned long addr,
@@ -211,11 +204,11 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
unsigned long
generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long flags, vm_flags_t vm_flags);
unsigned long
generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long flags, vm_flags_t vm_flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack) {}
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index ab83d85e1183..6ab43b4f72f9 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -14,6 +14,7 @@
*/
#define MAX_RT_PRIO 100
+#define MAX_DL_PRIO 0
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index b2b9e6eb9683..4e3338103654 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -6,19 +6,40 @@
struct task_struct;
-static inline int rt_prio(int prio)
+static inline bool rt_prio(int prio)
{
- if (unlikely(prio < MAX_RT_PRIO))
- return 1;
- return 0;
+ return unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO);
}
-static inline int rt_task(struct task_struct *p)
+static inline bool rt_or_dl_prio(int prio)
+{
+ return unlikely(prio < MAX_RT_PRIO);
+}
+
+/*
+ * Returns true if a task has a priority that belongs to RT class. PI-boosted
+ * tasks will return true. Use rt_policy() to ignore PI-boosted tasks.
+ */
+static inline bool rt_task(struct task_struct *p)
{
return rt_prio(p->prio);
}
-static inline bool task_is_realtime(struct task_struct *tsk)
+/*
+ * Returns true if a task has a priority that belongs to RT or DL classes.
+ * PI-boosted tasks will return true. Use rt_or_dl_task_policy() to ignore
+ * PI-boosted tasks.
+ */
+static inline bool rt_or_dl_task(struct task_struct *p)
+{
+ return rt_or_dl_prio(p->prio);
+}
+
+/*
+ * Returns true if a task has a policy that belongs to RT or DL classes.
+ * PI-boosted tasks will return false.
+ */
+static inline bool rt_or_dl_task_policy(struct task_struct *tsk)
{
int policy = tsk->policy;
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 0a0e23c45406..c8ed09ac29ac 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -137,7 +137,7 @@ struct signal_struct {
/* POSIX.1b Interval Timers */
unsigned int next_posix_timer_id;
- struct list_head posix_timers;
+ struct hlist_head posix_timers;
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
@@ -276,8 +276,7 @@ static inline void signal_set_stop_flags(struct signal_struct *sig,
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
-extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
- kernel_siginfo_t *info, enum pid_type *type);
+extern int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type);
static inline int kernel_dequeue_signal(void)
{
@@ -287,7 +286,7 @@ static inline int kernel_dequeue_signal(void)
int ret;
spin_lock_irq(&task->sighand->siglock);
- ret = dequeue_signal(task, &task->blocked, &__info, &__type);
+ ret = dequeue_signal(&task->blocked, &__info, &__type);
spin_unlock_irq(&task->sighand->siglock);
return ret;
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index d362aacf9f89..0f2aeb37bbb0 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -63,7 +63,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
-extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern void sched_cancel_fork(struct task_struct *p);
extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
@@ -119,6 +120,11 @@ static inline struct task_struct *get_task_struct(struct task_struct *t)
return t;
}
+static inline struct task_struct *tryget_task_struct(struct task_struct *t)
+{
+ return refcount_inc_not_zero(&t->usage) ? t : NULL;
+}
+
extern void __put_task_struct(struct task_struct *t);
extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index ccd72b978e1f..bf10bdb487dd 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -95,23 +95,11 @@ static inline int object_is_on_stack(const void *obj)
extern void thread_stack_cache_init(void);
#ifdef CONFIG_DEBUG_STACK_USAGE
+unsigned long stack_not_used(struct task_struct *p);
+#else
static inline unsigned long stack_not_used(struct task_struct *p)
{
- unsigned long *n = end_of_stack(p);
-
- do { /* Skip over canary */
-# ifdef CONFIG_STACK_GROWSUP
- n--;
-# else
- n++;
-# endif
- } while (!*n);
-
-# ifdef CONFIG_STACK_GROWSUP
- return (unsigned long)end_of_stack(p) - (unsigned long)n;
-# else
- return (unsigned long)n - (unsigned long)end_of_stack(p);
-# endif
+ return 0;
}
#endif
extern void set_task_stack_end_magic(struct task_struct *tsk);
diff --git a/include/linux/scmi_imx_protocol.h b/include/linux/scmi_imx_protocol.h
new file mode 100644
index 000000000000..066216f1357a
--- /dev/null
+++ b/include/linux/scmi_imx_protocol.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SCMI Message Protocol driver NXP extension header
+ *
+ * Copyright 2024 NXP.
+ */
+
+#ifndef _LINUX_SCMI_NXP_PROTOCOL_H
+#define _LINUX_SCMI_NXP_PROTOCOL_H
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+enum scmi_nxp_protocol {
+ SCMI_PROTOCOL_IMX_BBM = 0x81,
+ SCMI_PROTOCOL_IMX_MISC = 0x84,
+};
+
+struct scmi_imx_bbm_proto_ops {
+ int (*rtc_time_set)(const struct scmi_protocol_handle *ph, u32 id,
+ uint64_t sec);
+ int (*rtc_time_get)(const struct scmi_protocol_handle *ph, u32 id,
+ u64 *val);
+ int (*rtc_alarm_set)(const struct scmi_protocol_handle *ph, u32 id,
+ bool enable, u64 sec);
+ int (*button_get)(const struct scmi_protocol_handle *ph, u32 *state);
+};
+
+enum scmi_nxp_notification_events {
+ SCMI_EVENT_IMX_BBM_RTC = 0x0,
+ SCMI_EVENT_IMX_BBM_BUTTON = 0x1,
+ SCMI_EVENT_IMX_MISC_CONTROL = 0x0,
+};
+
+struct scmi_imx_bbm_notif_report {
+ bool is_rtc;
+ bool is_button;
+ ktime_t timestamp;
+ unsigned int rtc_id;
+ unsigned int rtc_evt;
+};
+
+struct scmi_imx_misc_ctrl_notify_report {
+ ktime_t timestamp;
+ unsigned int ctrl_id;
+ unsigned int flags;
+};
+
+struct scmi_imx_misc_proto_ops {
+ int (*misc_ctrl_set)(const struct scmi_protocol_handle *ph, u32 id,
+ u32 num, u32 *val);
+ int (*misc_ctrl_get)(const struct scmi_protocol_handle *ph, u32 id,
+ u32 *num, u32 *val);
+ int (*misc_ctrl_req_notify)(const struct scmi_protocol_handle *ph,
+ u32 ctrl_id, u32 evt_id, u32 flags);
+};
+#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 1390f1efb4f0..b86ec2afc691 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -83,6 +83,18 @@ enum lsm_event {
LSM_POLICY_CHANGE,
};
+struct dm_verity_digest {
+ const char *alg;
+ const u8 *digest;
+ size_t digest_len;
+};
+
+enum lsm_integrity_type {
+ LSM_INT_DMVERITY_SIG_VALID,
+ LSM_INT_DMVERITY_ROOTHASH,
+ LSM_INT_FSVERITY_BUILTINSIG_VALID,
+};
+
/*
* These are reasons that can be passed to the security_locked_down()
* LSM hook. Lockdown reasons that protect kernel integrity (ie, the
@@ -399,6 +411,9 @@ int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer
void security_inode_getsecid(struct inode *inode, u32 *secid);
int security_inode_copy_up(struct dentry *src, struct cred **new);
int security_inode_copy_up_xattr(struct dentry *src, const char *name);
+int security_inode_setintegrity(const struct inode *inode,
+ enum lsm_integrity_type type, const void *value,
+ size_t size);
int security_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn);
int security_file_permission(struct file *file, int mask);
@@ -509,6 +524,11 @@ int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
int security_locked_down(enum lockdown_reason what);
int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len,
void *val, size_t val_len, u64 id, u64 flags);
+int security_bdev_alloc(struct block_device *bdev);
+void security_bdev_free(struct block_device *bdev);
+int security_bdev_setintegrity(struct block_device *bdev,
+ enum lsm_integrity_type type, const void *value,
+ size_t size);
#else /* CONFIG_SECURITY */
static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data)
@@ -634,7 +654,7 @@ static inline int security_settime64(const struct timespec64 *ts,
static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
- return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
+ return __vm_enough_memory(mm, pages, !cap_vm_enough_memory(mm, pages));
}
static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm)
@@ -1010,6 +1030,13 @@ static inline int security_inode_copy_up(struct dentry *src, struct cred **new)
return 0;
}
+static inline int security_inode_setintegrity(const struct inode *inode,
+ enum lsm_integrity_type type,
+ const void *value, size_t size)
+{
+ return 0;
+}
+
static inline int security_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn)
{
@@ -1483,6 +1510,23 @@ static inline int lsm_fill_user_ctx(struct lsm_ctx __user *uctx,
{
return -EOPNOTSUPP;
}
+
+static inline int security_bdev_alloc(struct block_device *bdev)
+{
+ return 0;
+}
+
+static inline void security_bdev_free(struct block_device *bdev)
+{
+}
+
+static inline int security_bdev_setintegrity(struct block_device *bdev,
+ enum lsm_integrity_type type,
+ const void *value, size_t size)
+{
+ return 0;
+}
+
#endif /* CONFIG_SECURITY */
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
@@ -2090,6 +2134,7 @@ struct dentry *securityfs_create_symlink(const char *name,
const char *target,
const struct inode_operations *iops);
extern void securityfs_remove(struct dentry *dentry);
+extern void securityfs_recursive_remove(struct dentry *dentry);
#else /* CONFIG_SECURITYFS */
@@ -2137,7 +2182,7 @@ extern int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
struct bpf_token *token);
extern void security_bpf_prog_free(struct bpf_prog *prog);
extern int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path);
+ const struct path *path);
extern void security_bpf_token_free(struct bpf_token *token);
extern int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
extern int security_bpf_token_capable(const struct bpf_token *token, int cap);
@@ -2177,7 +2222,7 @@ static inline void security_bpf_prog_free(struct bpf_prog *prog)
{ }
static inline int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
{
return 0;
}
@@ -2256,4 +2301,12 @@ static inline int security_uring_cmd(struct io_uring_cmd *ioucmd)
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_IO_URING */
+#ifdef CONFIG_SECURITY
+extern void security_initramfs_populated(void);
+#else
+static inline void security_initramfs_populated(void)
+{
+}
+#endif /* CONFIG_SECURITY */
+
#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index d90d8ee29d81..fffeb754880f 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -157,7 +157,7 @@ __seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
static __always_inline unsigned \
__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
{ \
- unsigned seq = READ_ONCE(s->seqcount.sequence); \
+ unsigned seq = smp_load_acquire(&s->seqcount.sequence); \
\
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
return seq; \
@@ -170,7 +170,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
* Re-read the sequence counter since the (possibly \
* preempted) writer made progress. \
*/ \
- seq = READ_ONCE(s->seqcount.sequence); \
+ seq = smp_load_acquire(&s->seqcount.sequence); \
} \
\
return seq; \
@@ -208,7 +208,7 @@ static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
static inline unsigned __seqprop_sequence(const seqcount_t *s)
{
- return READ_ONCE(s->sequence);
+ return smp_load_acquire(&s->sequence);
}
static inline bool __seqprop_preemptible(const seqcount_t *s)
@@ -263,17 +263,9 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
#define seqprop_assert(s) __seqprop(s, assert)(s)
/**
- * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
+ * __read_seqcount_begin() - begin a seqcount_t read section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
- * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
- * provided before actually loading any of the variables that are to be
- * protected in this critical section.
- *
- * Use carefully, only in critical code, and comment how the barrier is
- * provided.
- *
* Return: count to be passed to read_seqcount_retry()
*/
#define __read_seqcount_begin(s) \
@@ -293,13 +285,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
*
* Return: count to be passed to read_seqcount_retry()
*/
-#define raw_read_seqcount_begin(s) \
-({ \
- unsigned _seq = __read_seqcount_begin(s); \
- \
- smp_rmb(); \
- _seq; \
-})
+#define raw_read_seqcount_begin(s) __read_seqcount_begin(s)
/**
* read_seqcount_begin() - begin a seqcount_t read critical section
@@ -328,7 +314,6 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
({ \
unsigned __seq = seqprop_sequence(s); \
\
- smp_rmb(); \
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
__seq; \
})
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index aea25eef9a1a..4ab65874a850 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -11,6 +11,8 @@
#include <linux/compiler.h>
#include <linux/console.h>
#include <linux/interrupt.h>
+#include <linux/lockdep.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/tty.h>
@@ -590,6 +592,95 @@ struct uart_port {
void *private_data; /* generic platform data pointer */
};
+/*
+ * Only for console->device_lock()/_unlock() callbacks and internal
+ * port lock wrapper synchronization.
+ */
+static inline void __uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ spin_lock_irqsave(&up->lock, *flags);
+}
+
+/*
+ * Only for console->device_lock()/_unlock() callbacks and internal
+ * port lock wrapper synchronization.
+ */
+static inline void __uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
+{
+ spin_unlock_irqrestore(&up->lock, flags);
+}
+
+/**
+ * uart_port_set_cons - Safely set the @cons field for a uart
+ * @up: The uart port to set
+ * @con: The new console to set to
+ *
+ * This function must be used to set @up->cons. It uses the port lock to
+ * synchronize with the port lock wrappers in order to ensure that the console
+ * cannot change or disappear while another context is holding the port lock.
+ */
+static inline void uart_port_set_cons(struct uart_port *up, struct console *con)
+{
+ unsigned long flags;
+
+ __uart_port_lock_irqsave(up, &flags);
+ up->cons = con;
+ __uart_port_unlock_irqrestore(up, flags);
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline bool __uart_port_using_nbcon(struct uart_port *up)
+{
+ lockdep_assert_held_once(&up->lock);
+
+ if (likely(!uart_console(up)))
+ return false;
+
+ /*
+ * @up->cons is only modified under the port lock. Therefore it is
+ * certain that it cannot disappear here.
+ *
+ * @up->cons->node is added/removed from the console list under the
+ * port lock. Therefore it is certain that the registration status
+ * cannot change here, thus @up->cons->flags can be read directly.
+ */
+ if (hlist_unhashed_lockless(&up->cons->node) ||
+ !(up->cons->flags & CON_NBCON) ||
+ !up->cons->write_atomic) {
+ return false;
+ }
+
+ return true;
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline bool __uart_port_nbcon_try_acquire(struct uart_port *up)
+{
+ if (!__uart_port_using_nbcon(up))
+ return true;
+
+ return nbcon_device_try_acquire(up->cons);
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline void __uart_port_nbcon_acquire(struct uart_port *up)
+{
+ if (!__uart_port_using_nbcon(up))
+ return;
+
+ while (!nbcon_device_try_acquire(up->cons))
+ cpu_relax();
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline void __uart_port_nbcon_release(struct uart_port *up)
+{
+ if (!__uart_port_using_nbcon(up))
+ return;
+
+ nbcon_device_release(up->cons);
+}
+
/**
* uart_port_lock - Lock the UART port
* @up: Pointer to UART port structure
@@ -597,6 +688,7 @@ struct uart_port {
static inline void uart_port_lock(struct uart_port *up)
{
spin_lock(&up->lock);
+ __uart_port_nbcon_acquire(up);
}
/**
@@ -606,6 +698,7 @@ static inline void uart_port_lock(struct uart_port *up)
static inline void uart_port_lock_irq(struct uart_port *up)
{
spin_lock_irq(&up->lock);
+ __uart_port_nbcon_acquire(up);
}
/**
@@ -616,6 +709,7 @@ static inline void uart_port_lock_irq(struct uart_port *up)
static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
{
spin_lock_irqsave(&up->lock, *flags);
+ __uart_port_nbcon_acquire(up);
}
/**
@@ -626,7 +720,15 @@ static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *f
*/
static inline bool uart_port_trylock(struct uart_port *up)
{
- return spin_trylock(&up->lock);
+ if (!spin_trylock(&up->lock))
+ return false;
+
+ if (!__uart_port_nbcon_try_acquire(up)) {
+ spin_unlock(&up->lock);
+ return false;
+ }
+
+ return true;
}
/**
@@ -638,7 +740,15 @@ static inline bool uart_port_trylock(struct uart_port *up)
*/
static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
{
- return spin_trylock_irqsave(&up->lock, *flags);
+ if (!spin_trylock_irqsave(&up->lock, *flags))
+ return false;
+
+ if (!__uart_port_nbcon_try_acquire(up)) {
+ spin_unlock_irqrestore(&up->lock, *flags);
+ return false;
+ }
+
+ return true;
}
/**
@@ -647,6 +757,7 @@ static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long
*/
static inline void uart_port_unlock(struct uart_port *up)
{
+ __uart_port_nbcon_release(up);
spin_unlock(&up->lock);
}
@@ -656,6 +767,7 @@ static inline void uart_port_unlock(struct uart_port *up)
*/
static inline void uart_port_unlock_irq(struct uart_port *up)
{
+ __uart_port_nbcon_release(up);
spin_unlock_irq(&up->lock);
}
@@ -666,6 +778,7 @@ static inline void uart_port_unlock_irq(struct uart_port *up)
*/
static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
{
+ __uart_port_nbcon_release(up);
spin_unlock_irqrestore(&up->lock, flags);
}
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 95ac8398ee72..e7aec20fb44f 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -8,10 +8,10 @@
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
#include <asm/set_memory.h>
#else
-static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_ro(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_rw(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_x(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
#ifndef set_memory_rox
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index b14be59550e3..60c65cea74f6 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -550,7 +550,7 @@ struct sfp_upstream_ops {
void (*link_down)(void *priv);
void (*link_up)(void *priv);
int (*connect_phy)(void *priv, struct phy_device *);
- void (*disconnect_phy)(void *priv);
+ void (*disconnect_phy)(void *priv, struct phy_device *);
};
#if IS_ENABLED(CONFIG_SFP)
@@ -576,6 +576,7 @@ struct sfp_bus *sfp_bus_find_fwnode(const struct fwnode_handle *fwnode);
int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
const struct sfp_upstream_ops *ops);
void sfp_bus_del_upstream(struct sfp_bus *bus);
+const char *sfp_get_name(struct sfp_bus *bus);
#else
static inline int sfp_parse_port(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
@@ -654,6 +655,11 @@ static inline int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
static inline void sfp_bus_del_upstream(struct sfp_bus *bus)
{
}
+
+static inline const char *sfp_get_name(struct sfp_bus *bus)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 1d06b1e5408a..515a9a6a3c6f 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -111,20 +111,13 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
- struct mm_struct *mm, unsigned long vm_flags);
unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
- bool global_huge);
+ loff_t write_end, bool shmem_huge_force);
#else
-static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
- struct mm_struct *mm, unsigned long vm_flags)
-{
- return false;
-}
static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
- bool global_huge)
+ loff_t write_end, bool shmem_huge_force)
{
return 0;
}
@@ -150,8 +143,8 @@ enum sgp_type {
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
-int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
- enum sgp_type sgp);
+int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
+ struct folio **foliop, enum sgp_type sgp);
struct folio *shmem_read_folio_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 29c3ea5b6e93..39f1d16f3628 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -827,6 +827,8 @@ enum skb_tstamp_type {
* @csum_level: indicates the number of consecutive checksums found in
* the packet minus one that have been verified as
* CHECKSUM_UNNECESSARY (max 3)
+ * @unreadable: indicates that at least 1 of the fragments in this skb is
+ * unreadable.
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
* @slow_gro: state present at GRO time, slower prepare step required
@@ -1008,7 +1010,7 @@ struct sk_buff {
#if IS_ENABLED(CONFIG_IP_SCTP)
__u8 csum_not_inet:1;
#endif
-
+ __u8 unreadable:1;
#if defined(CONFIG_NET_SCHED) || defined(CONFIG_NET_XGRESS)
__u16 tc_index; /* traffic control index */
#endif
@@ -1225,7 +1227,7 @@ static inline bool skb_unref(struct sk_buff *skb)
{
if (unlikely(!skb))
return false;
- if (likely(refcount_read(&skb->users) == 1))
+ if (!IS_ENABLED(CONFIG_DEBUG_NET) && likely(refcount_read(&skb->users) == 1))
smp_rmb();
else if (likely(!refcount_dec_and_test(&skb->users)))
return false;
@@ -1433,6 +1435,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
struct skb_seq_state *st);
void skb_abort_seq_read(struct skb_seq_state *st);
+int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len);
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config);
@@ -1823,6 +1826,12 @@ static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb)
__skb_zcopy_downgrade_managed(skb);
}
+/* Return true if frags in this skb are readable by the host. */
+static inline bool skb_frags_readable(const struct sk_buff *skb)
+{
+ return !skb->unreadable;
+}
+
static inline void skb_mark_not_on_list(struct sk_buff *skb)
{
skb->next = NULL;
@@ -2539,10 +2548,17 @@ static inline void skb_len_add(struct sk_buff *skb, int delta)
static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i,
netmem_ref netmem, int off, int size)
{
- struct page *page = netmem_to_page(netmem);
+ struct page *page;
__skb_fill_netmem_desc_noacc(skb_shinfo(skb), i, netmem, off, size);
+ if (netmem_is_net_iov(netmem)) {
+ skb->unreadable = true;
+ return;
+ }
+
+ page = netmem_to_page(netmem);
+
/* Propagate page pfmemalloc to the skb if we can. The problem is
* that not all callers have unique ownership of the page but rely
* on page_is_pfmemalloc doing the right thing(tm).
@@ -3523,21 +3539,58 @@ static inline void skb_frag_off_copy(skb_frag_t *fragto,
fragto->offset = fragfrom->offset;
}
+/* Return: true if the skb_frag contains a net_iov. */
+static inline bool skb_frag_is_net_iov(const skb_frag_t *frag)
+{
+ return netmem_is_net_iov(frag->netmem);
+}
+
+/**
+ * skb_frag_net_iov - retrieve the net_iov referred to by fragment
+ * @frag: the fragment
+ *
+ * Return: the &struct net_iov associated with @frag. Returns NULL if this
+ * frag has no associated net_iov.
+ */
+static inline struct net_iov *skb_frag_net_iov(const skb_frag_t *frag)
+{
+ if (!skb_frag_is_net_iov(frag))
+ return NULL;
+
+ return netmem_to_net_iov(frag->netmem);
+}
+
/**
* skb_frag_page - retrieve the page referred to by a paged fragment
* @frag: the paged fragment
*
- * Returns the &struct page associated with @frag.
+ * Return: the &struct page associated with @frag. Returns NULL if this frag
+ * has no associated page.
*/
static inline struct page *skb_frag_page(const skb_frag_t *frag)
{
+ if (skb_frag_is_net_iov(frag))
+ return NULL;
+
return netmem_to_page(frag->netmem);
}
+/**
+ * skb_frag_netmem - retrieve the netmem referred to by a fragment
+ * @frag: the fragment
+ *
+ * Return: the &netmem_ref associated with @frag.
+ */
+static inline netmem_ref skb_frag_netmem(const skb_frag_t *frag)
+{
+ return frag->netmem;
+}
+
int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
unsigned int headroom);
int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
struct bpf_prog *prog);
+
/**
* skb_frag_address - gets the address of the data contained in a paged fragment
* @frag: the paged fragment buffer
@@ -3547,6 +3600,9 @@ int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
*/
static inline void *skb_frag_address(const skb_frag_t *frag)
{
+ if (!skb_frag_page(frag))
+ return NULL;
+
return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
}
diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h
index 16c241a23472..0f3c58007488 100644
--- a/include/linux/skbuff_ref.h
+++ b/include/linux/skbuff_ref.h
@@ -34,14 +34,13 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
bool napi_pp_put_page(netmem_ref netmem);
-static inline void
-skb_page_unref(struct page *page, bool recycle)
+static inline void skb_page_unref(netmem_ref netmem, bool recycle)
{
#ifdef CONFIG_PAGE_POOL
- if (recycle && napi_pp_put_page(page_to_netmem(page)))
+ if (recycle && napi_pp_put_page(netmem))
return;
#endif
- put_page(page);
+ put_page(netmem_to_page(netmem));
}
/**
@@ -54,7 +53,7 @@ skb_page_unref(struct page *page, bool recycle)
*/
static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{
- skb_page_unref(skb_frag_page(frag), recycle);
+ skb_page_unref(skb_frag_netmem(frag), recycle);
}
/**
diff --git a/include/linux/slab.h b/include/linux/slab.h
index eb2bf4629157..b35e2db7eb0e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -213,6 +213,12 @@ enum _slab_flag_bits {
#endif
/*
+ * freeptr_t represents a SLUB freelist pointer, which might be encoded
+ * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
+ */
+typedef struct { unsigned long v; } freeptr_t;
+
+/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
* Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
@@ -234,14 +240,173 @@ struct mem_cgroup;
*/
bool slab_is_available(void);
-struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
- unsigned int align, slab_flags_t flags,
- void (*ctor)(void *));
-struct kmem_cache *kmem_cache_create_usercopy(const char *name,
- unsigned int size, unsigned int align,
- slab_flags_t flags,
- unsigned int useroffset, unsigned int usersize,
- void (*ctor)(void *));
+/**
+ * struct kmem_cache_args - Less common arguments for kmem_cache_create()
+ *
+ * Any uninitialized fields of the structure are interpreted as unused. The
+ * exception is @freeptr_offset where %0 is a valid value, so
+ * @use_freeptr_offset must be also set to %true in order to interpret the field
+ * as used. For @useroffset %0 is also valid, but only with non-%0
+ * @usersize.
+ *
+ * When %NULL args is passed to kmem_cache_create(), it is equivalent to all
+ * fields unused.
+ */
+struct kmem_cache_args {
+ /**
+ * @align: The required alignment for the objects.
+ *
+ * %0 means no specific alignment is requested.
+ */
+ unsigned int align;
+ /**
+ * @useroffset: Usercopy region offset.
+ *
+ * %0 is a valid offset, when @usersize is non-%0
+ */
+ unsigned int useroffset;
+ /**
+ * @usersize: Usercopy region size.
+ *
+ * %0 means no usercopy region is specified.
+ */
+ unsigned int usersize;
+ /**
+ * @freeptr_offset: Custom offset for the free pointer
+ * in &SLAB_TYPESAFE_BY_RCU caches
+ *
+ * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
+ * outside of the object. This might cause the object to grow in size.
+ * Cache creators that have a reason to avoid this can specify a custom
+ * free pointer offset in their struct where the free pointer will be
+ * placed.
+ *
+ * Note that placing the free pointer inside the object requires the
+ * caller to ensure that no fields are invalidated that are required to
+ * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
+ * details).
+ *
+ * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
+ * is specified, %use_freeptr_offset must be set %true.
+ *
+ * Note that @ctor currently isn't supported with custom free pointers
+ * as a @ctor requires an external free pointer.
+ */
+ unsigned int freeptr_offset;
+ /**
+ * @use_freeptr_offset: Whether a @freeptr_offset is used.
+ */
+ bool use_freeptr_offset;
+ /**
+ * @ctor: A constructor for the objects.
+ *
+ * The constructor is invoked for each object in a newly allocated slab
+ * page. It is the cache user's responsibility to free object in the
+ * same state as after calling the constructor, or deal appropriately
+ * with any differences between a freshly constructed and a reallocated
+ * object.
+ *
+ * %NULL means no constructor.
+ */
+ void (*ctor)(void *);
+};
+
+struct kmem_cache *__kmem_cache_create_args(const char *name,
+ unsigned int object_size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags);
+static inline struct kmem_cache *
+__kmem_cache_create(const char *name, unsigned int size, unsigned int align,
+ slab_flags_t flags, void (*ctor)(void *))
+{
+ struct kmem_cache_args kmem_args = {
+ .align = align,
+ .ctor = ctor,
+ };
+
+ return __kmem_cache_create_args(name, size, &kmem_args, flags);
+}
+
+/**
+ * kmem_cache_create_usercopy - Create a kmem cache with a region suitable
+ * for copying to userspace.
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @align: The required alignment for the objects.
+ * @flags: SLAB flags
+ * @useroffset: Usercopy region offset
+ * @usersize: Usercopy region size
+ * @ctor: A constructor for the objects, or %NULL.
+ *
+ * This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY()
+ * if whitelisting a single field is sufficient, or kmem_cache_create() with
+ * the necessary parameters passed via the args parameter (see
+ * &struct kmem_cache_args)
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+static inline struct kmem_cache *
+kmem_cache_create_usercopy(const char *name, unsigned int size,
+ unsigned int align, slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
+ void (*ctor)(void *))
+{
+ struct kmem_cache_args kmem_args = {
+ .align = align,
+ .ctor = ctor,
+ .useroffset = useroffset,
+ .usersize = usersize,
+ };
+
+ return __kmem_cache_create_args(name, size, &kmem_args, flags);
+}
+
+/* If NULL is passed for @args, use this variant with default arguments. */
+static inline struct kmem_cache *
+__kmem_cache_default_args(const char *name, unsigned int size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags)
+{
+ struct kmem_cache_args kmem_default_args = {};
+
+ /* Make sure we don't get passed garbage. */
+ if (WARN_ON_ONCE(args))
+ return ERR_PTR(-EINVAL);
+
+ return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
+}
+
+/**
+ * kmem_cache_create - Create a kmem cache.
+ * @__name: A string which is used in /proc/slabinfo to identify this cache.
+ * @__object_size: The size of objects to be created in this cache.
+ * @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL
+ * means defaults will be used for all the arguments.
+ *
+ * This is currently implemented as a macro using ``_Generic()`` to call
+ * either the new variant of the function, or a legacy one.
+ *
+ * The new variant has 4 parameters:
+ * ``kmem_cache_create(name, object_size, args, flags)``
+ *
+ * See __kmem_cache_create_args() which implements this.
+ *
+ * The legacy variant has 5 parameters:
+ * ``kmem_cache_create(name, object_size, align, flags, ctor)``
+ *
+ * The align and ctor parameters map to the respective fields of
+ * &struct kmem_cache_args
+ *
+ * Context: Cannot be called within a interrupt, but can be interrupted.
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+#define kmem_cache_create(__name, __object_size, __args, ...) \
+ _Generic((__args), \
+ struct kmem_cache_args *: __kmem_cache_create_args, \
+ void *: __kmem_cache_default_args, \
+ default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
+
void kmem_cache_destroy(struct kmem_cache *s);
int kmem_cache_shrink(struct kmem_cache *s);
@@ -253,20 +418,23 @@ int kmem_cache_shrink(struct kmem_cache *s);
* f.e. add ____cacheline_aligned_in_smp to the struct declaration
* then the objects will be properly aligned in SMP configurations.
*/
-#define KMEM_CACHE(__struct, __flags) \
- kmem_cache_create(#__struct, sizeof(struct __struct), \
- __alignof__(struct __struct), (__flags), NULL)
+#define KMEM_CACHE(__struct, __flags) \
+ __kmem_cache_create_args(#__struct, sizeof(struct __struct), \
+ &(struct kmem_cache_args) { \
+ .align = __alignof__(struct __struct), \
+ }, (__flags))
/*
* To whitelist a single field for copying to/from usercopy, use this
* macro instead for KMEM_CACHE() above.
*/
-#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
- kmem_cache_create_usercopy(#__struct, \
- sizeof(struct __struct), \
- __alignof__(struct __struct), (__flags), \
- offsetof(struct __struct, __field), \
- sizeof_field(struct __struct, __field), NULL)
+#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
+ __kmem_cache_create_args(#__struct, sizeof(struct __struct), \
+ &(struct kmem_cache_args) { \
+ .align = __alignof__(struct __struct), \
+ .useroffset = offsetof(struct __struct, __field), \
+ .usersize = sizeof_field(struct __struct, __field), \
+ }, (__flags))
/*
* Common kmalloc functions provided by all allocators
@@ -547,6 +715,35 @@ void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
gfp_t gfpflags) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__))
+/**
+ * kmem_cache_charge - memcg charge an already allocated slab memory
+ * @objp: address of the slab object to memcg charge
+ * @gfpflags: describe the allocation context
+ *
+ * kmem_cache_charge allows charging a slab object to the current memcg,
+ * primarily in cases where charging at allocation time might not be possible
+ * because the target memcg is not known (i.e. softirq context)
+ *
+ * The objp should be pointer returned by the slab allocator functions like
+ * kmalloc (with __GFP_ACCOUNT in flags) or kmem_cache_alloc. The memcg charge
+ * behavior can be controlled through gfpflags parameter, which affects how the
+ * necessary internal metadata can be allocated. Including __GFP_NOFAIL denotes
+ * that overcharging is requested instead of failure, but is not applied for the
+ * internal metadata allocation.
+ *
+ * There are several cases where it will return true even if the charging was
+ * not done:
+ * More specifically:
+ *
+ * 1. For !CONFIG_MEMCG or cgroup_disable=memory systems.
+ * 2. Already charged slab objects.
+ * 3. For slab objects from KMALLOC_NORMAL caches - allocated by kmalloc()
+ * without __GFP_ACCOUNT
+ * 4. Allocating internal metadata has failed
+ *
+ * Return: true if charge was successful otherwise false.
+ */
+bool kmem_cache_charge(void *objp, gfp_t gfpflags);
void kmem_cache_free(struct kmem_cache *s, void *objp);
kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
@@ -733,6 +930,16 @@ static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t siz
* @new_n: new number of elements to alloc
* @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc)
+ *
+ * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
+ * initial memory allocation, every subsequent call to this API for the same
+ * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
+ * __GFP_ZERO is not fully honored by this API.
+ *
+ * See krealloc_noprof() for further details.
+ *
+ * In any case, the contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.
*/
static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p,
size_t new_n,
@@ -841,8 +1048,8 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
#define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
#define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
-extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
- __realloc_size(3);
+void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
+ __realloc_size(2);
#define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__))
extern void kvfree(const void *addr);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index fcd61dfe2af3..f1aa0952e8c3 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -109,7 +109,7 @@ static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
* Architecture specific boot CPU setup. Defined as empty weak function in
* init/main.c. Architectures can override it.
*/
-void smp_prepare_boot_cpu(void);
+void __init smp_prepare_boot_cpu(void);
#ifdef CONFIG_SMP
@@ -294,4 +294,10 @@ int smpcfd_prepare_cpu(unsigned int cpu);
int smpcfd_dead_cpu(unsigned int cpu);
int smpcfd_dying_cpu(unsigned int cpu);
+#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
+bool csd_lock_is_stuck(void);
+#else
+static inline bool csd_lock_is_stuck(void) { return false; }
+#endif
+
#endif /* __LINUX_SMP_H */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index df9cdb8bbfb8..d18cc47e89bd 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -327,6 +327,7 @@ struct ucred {
* plain text and require encryption
*/
+#define MSG_SOCK_DEVMEM 0x2000000 /* Receive devmem skbs as cmsg */
#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
#define MSG_SPLICE_PAGES 0x8000000 /* Splice the pages from the iterator in sendmsg() */
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 94fc1b57c57b..5e0dd47a0412 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -704,8 +704,6 @@ struct sdw_master_device {
container_of(d, struct sdw_master_device, dev)
struct sdw_driver {
- const char *name;
-
int (*probe)(struct sdw_slave *sdw,
const struct sdw_device_id *id);
int (*remove)(struct sdw_slave *sdw);
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index d537587b4499..37ae69365fe2 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -388,6 +388,7 @@ struct sdw_intel;
/* struct intel_sdw_hw_ops - SoundWire ops for Intel platforms.
* @debugfs_init: initialize all debugfs capabilities
* @debugfs_exit: close and cleanup debugfs capabilities
+ * @get_link_count: fetch link count from hardware registers
* @register_dai: read all PDI information and register DAIs
* @check_clock_stop: throw error message if clock is not stopped.
* @start_bus: normal start
@@ -412,6 +413,8 @@ struct sdw_intel_hw_ops {
void (*debugfs_init)(struct sdw_intel *sdw);
void (*debugfs_exit)(struct sdw_intel *sdw);
+ int (*get_link_count)(struct sdw_intel *sdw);
+
int (*register_dai)(struct sdw_intel *sdw);
void (*check_clock_stop)(struct sdw_intel *sdw);
@@ -447,4 +450,9 @@ extern const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops;
#define SDW_INTEL_DEV_NUM_IDA_MIN 6
+/*
+ * Max number of links supported in hardware
+ */
+#define SDW_INTEL_MAX_LINKS 5
+
#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index d47d5f14ff99..4b95663163e0 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -498,7 +498,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* controller has native support for memory like operations.
* @mem_caps: controller capabilities for the handling of memory operations.
* @unprepare_message: undo any work done by prepare_message().
- * @slave_abort: abort the ongoing transfer request on an SPI slave controller
* @target_abort: abort the ongoing transfer request on an SPI target controller
* @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS
* number. Any individual value may be NULL for CS lines that
@@ -725,10 +724,7 @@ struct spi_controller {
struct spi_message *message);
int (*unprepare_message)(struct spi_controller *ctlr,
struct spi_message *message);
- union {
- int (*slave_abort)(struct spi_controller *ctlr);
- int (*target_abort)(struct spi_controller *ctlr);
- };
+ int (*target_abort)(struct spi_controller *ctlr);
/*
* These hooks are for drivers that use a generic implementation
@@ -802,11 +798,6 @@ static inline void spi_controller_put(struct spi_controller *ctlr)
put_device(&ctlr->dev);
}
-static inline bool spi_controller_is_slave(struct spi_controller *ctlr)
-{
- return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
-}
-
static inline bool spi_controller_is_target(struct spi_controller *ctlr)
{
return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->target;
@@ -1296,7 +1287,6 @@ extern int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
-extern int spi_slave_abort(struct spi_device *spi);
extern int spi_target_abort(struct spi_device *spi);
static inline size_t
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index d4cb83195f7a..c92cd43a47f4 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -24,6 +24,7 @@ struct spi_bitbang {
#define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */
#define BITBANG_CS_INACTIVE 0
+ void (*set_mosi_idle)(struct spi_device *spi);
/* txrx_bufs() may handle dma mapping for transfers that don't
* already have one (transfer.{tx,rx}_dma is zero), or use PIO
*/
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 8f3f72480e78..ed57598394de 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -129,10 +129,23 @@ struct srcu_struct {
#define SRCU_STATE_SCAN1 1
#define SRCU_STATE_SCAN2 2
+/*
+ * Values for initializing gp sequence fields. Higher values allow wrap arounds to
+ * occur earlier.
+ * The second value with state is useful in the case of static initialization of
+ * srcu_usage where srcu_gp_seq_needed is expected to have some state value in its
+ * lower bits (or else it will appear to be already initialized within
+ * the call check_init_srcu_struct()).
+ */
+#define SRCU_GP_SEQ_INITIAL_VAL ((0UL - 100UL) << RCU_SEQ_CTR_SHIFT)
+#define SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE (SRCU_GP_SEQ_INITIAL_VAL - 1)
+
#define __SRCU_USAGE_INIT(name) \
{ \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
- .srcu_gp_seq_needed = -1UL, \
+ .srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL, \
+ .srcu_gp_seq_needed = SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE, \
+ .srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL, \
.work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
}
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 84e13bd5df28..d79ff252cfdc 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -100,6 +100,7 @@ struct stmmac_dma_cfg {
bool eame;
bool multi_msi_en;
bool dche;
+ bool atds;
};
#define AXI_BLEN 7
@@ -137,33 +138,6 @@ struct stmmac_txq_cfg {
int tbs_en;
};
-/* FPE link state */
-enum stmmac_fpe_state {
- FPE_STATE_OFF = 0,
- FPE_STATE_CAPABLE = 1,
- FPE_STATE_ENTERING_ON = 2,
- FPE_STATE_ON = 3,
-};
-
-/* FPE link-partner hand-shaking mPacket type */
-enum stmmac_mpacket_type {
- MPACKET_VERIFY = 0,
- MPACKET_RESPONSE = 1,
-};
-
-enum stmmac_fpe_task_state_t {
- __FPE_REMOVING,
- __FPE_TASK_SCHED,
-};
-
-struct stmmac_fpe_cfg {
- bool enable; /* FPE enable */
- bool hs_enable; /* FPE handshake enable */
- enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */
- enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
- u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
-};
-
struct stmmac_safety_feature_cfg {
u32 tsoee;
u32 mrxpee;
@@ -231,7 +205,6 @@ struct plat_stmmacenet_data {
struct fwnode_handle *port_node;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
- struct stmmac_fpe_cfg *fpe_cfg;
struct stmmac_safety_feature_cfg *safety_feat_cfg;
int clk_csr;
int has_gmac;
diff --git a/include/linux/string.h b/include/linux/string.h
index 9edace076ddb..0dd27afcfaf7 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -76,12 +76,16 @@ ssize_t sized_strscpy(char *, const char *, size_t);
* known size.
*/
#define __strscpy0(dst, src, ...) \
- sized_strscpy(dst, src, sizeof(dst) + __must_be_array(dst))
-#define __strscpy1(dst, src, size) sized_strscpy(dst, src, size)
+ sized_strscpy(dst, src, sizeof(dst) + __must_be_array(dst) + \
+ __must_be_cstr(dst) + __must_be_cstr(src))
+#define __strscpy1(dst, src, size) \
+ sized_strscpy(dst, src, size + __must_be_cstr(dst) + __must_be_cstr(src))
#define __strscpy_pad0(dst, src, ...) \
- sized_strscpy_pad(dst, src, sizeof(dst) + __must_be_array(dst))
-#define __strscpy_pad1(dst, src, size) sized_strscpy_pad(dst, src, size)
+ sized_strscpy_pad(dst, src, sizeof(dst) + __must_be_array(dst) + \
+ __must_be_cstr(dst) + __must_be_cstr(src))
+#define __strscpy_pad1(dst, src, size) \
+ sized_strscpy_pad(dst, src, size + __must_be_cstr(dst) + __must_be_cstr(src))
/**
* strscpy - Copy a C-string into a sized buffer
@@ -279,6 +283,18 @@ static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *str, char old, char new);
+/**
+ * mem_is_zero - Check if an area of memory is all 0's.
+ * @s: The memory area
+ * @n: The size of the area
+ *
+ * Return: True if the area of memory is all 0's.
+ */
+static inline bool mem_is_zero(const void *s, size_t n)
+{
+ return !memchr_inv(s, 0, n);
+}
+
extern void kfree_const(const void *x);
extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
diff --git a/include/linux/string_choices.h b/include/linux/string_choices.h
index d9ebe20229f8..120ca0f28e95 100644
--- a/include/linux/string_choices.h
+++ b/include/linux/string_choices.h
@@ -2,17 +2,32 @@
#ifndef _LINUX_STRING_CHOICES_H_
#define _LINUX_STRING_CHOICES_H_
+/*
+ * Here provide a series of helpers in the str_$TRUE_$FALSE format (you can
+ * also expand some helpers as needed), where $TRUE and $FALSE are their
+ * corresponding literal strings. These helpers can be used in the printing
+ * and also in other places where constant strings are required. Using these
+ * helpers offers the following benefits:
+ * 1) Reducing the hardcoding of strings, which makes the code more elegant
+ * through these simple literal-meaning helpers.
+ * 2) Unifying the output, which prevents the same string from being printed
+ * in various forms, such as enable/disable, enabled/disabled, en/dis.
+ * 3) Deduping by the linker, which results in a smaller binary file.
+ */
+
#include <linux/types.h>
static inline const char *str_enable_disable(bool v)
{
return v ? "enable" : "disable";
}
+#define str_disable_enable(v) str_enable_disable(!(v))
static inline const char *str_enabled_disabled(bool v)
{
return v ? "enabled" : "disabled";
}
+#define str_disabled_enabled(v) str_enabled_disabled(!(v))
static inline const char *str_hi_lo(bool v)
{
@@ -36,11 +51,25 @@ static inline const char *str_on_off(bool v)
{
return v ? "on" : "off";
}
+#define str_off_on(v) str_on_off(!(v))
static inline const char *str_yes_no(bool v)
{
return v ? "yes" : "no";
}
+#define str_no_yes(v) str_yes_no(!(v))
+
+static inline const char *str_up_down(bool v)
+{
+ return v ? "up" : "down";
+}
+#define str_down_up(v) str_up_down(!(v))
+
+static inline const char *str_true_false(bool v)
+{
+ return v ? "true" : "false";
+}
+#define str_false_true(v) str_true_false(!(v))
/**
* str_plural - Return the simple pluralization based on English counts
diff --git a/include/linux/sungem_phy.h b/include/linux/sungem_phy.h
index c505f30e8b68..eecc7eb63bfb 100644
--- a/include/linux/sungem_phy.h
+++ b/include/linux/sungem_phy.h
@@ -40,7 +40,7 @@ enum {
/* An instance of a PHY, partially borrowed from mii_if_info */
struct mii_phy
{
- struct mii_phy_def* def;
+ const struct mii_phy_def *def;
u32 advertising;
int mii_id;
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 0c77ba488bba..fec1e8a1570c 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -151,13 +151,15 @@ struct rpc_task_setup {
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
#define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE)
-#define RPC_TASK_RUNNING 0
-#define RPC_TASK_QUEUED 1
-#define RPC_TASK_ACTIVE 2
-#define RPC_TASK_NEED_XMIT 3
-#define RPC_TASK_NEED_RECV 4
-#define RPC_TASK_MSG_PIN_WAIT 5
-#define RPC_TASK_SIGNALLED 6
+enum {
+ RPC_TASK_RUNNING,
+ RPC_TASK_QUEUED,
+ RPC_TASK_ACTIVE,
+ RPC_TASK_NEED_XMIT,
+ RPC_TASK_NEED_RECV,
+ RPC_TASK_MSG_PIN_WAIT,
+ RPC_TASK_SIGNALLED,
+};
#define rpc_test_and_set_running(t) \
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index a7d0406b9ef5..e68fecf6eab5 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -21,6 +21,7 @@
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/pagevec.h>
+#include <linux/kthread.h>
/*
*
@@ -33,9 +34,9 @@
* node traffic on multi-node NUMA NFS servers.
*/
struct svc_pool {
- unsigned int sp_id; /* pool id; also node id on NUMA */
+ unsigned int sp_id; /* pool id; also node id on NUMA */
struct lwq sp_xprts; /* pending transports */
- atomic_t sp_nrthreads; /* # of threads in pool */
+ unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct llist_head sp_idle_threads; /* idle server threads */
@@ -66,9 +67,10 @@ enum {
* We currently do not support more than one RPC program per daemon.
*/
struct svc_serv {
- struct svc_program * sv_program; /* RPC program */
+ struct svc_program * sv_programs; /* RPC programs */
struct svc_stat * sv_stats; /* RPC statistics */
spinlock_t sv_lock;
+ unsigned int sv_nprogs; /* Number of sv_programs */
unsigned int sv_nrthreads; /* # of server threads */
unsigned int sv_maxconn; /* max connections allowed or
* '0' causing max to be based
@@ -232,6 +234,11 @@ struct svc_rqst {
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
+
+ int rq_err; /* Thread sets this to inidicate
+ * initialisation success.
+ */
+
unsigned long bc_to_initval;
unsigned int bc_to_retries;
void ** rq_lease_breaker; /* The v4 client breaking a lease */
@@ -305,6 +312,31 @@ static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
return test_bit(RQ_VICTIM, &rqstp->rq_flags);
}
+/**
+ * svc_thread_init_status - report whether thread has initialised successfully
+ * @rqstp: the thread in question
+ * @err: errno code
+ *
+ * After performing any initialisation that could fail, and before starting
+ * normal work, each sunrpc svc_thread must call svc_thread_init_status()
+ * with an appropriate error, or zero.
+ *
+ * If zero is passed, the thread is ready and must continue until
+ * svc_thread_should_stop() returns true. If a non-zero error is passed
+ * the call will not return - the thread will exit.
+ */
+static inline void svc_thread_init_status(struct svc_rqst *rqstp, int err)
+{
+ rqstp->rq_err = err;
+ /* memory barrier ensures assignment to error above is visible before
+ * waitqueue_active() test below completes.
+ */
+ smp_mb();
+ wake_up_var(&rqstp->rq_err);
+ if (err)
+ kthread_exit(1);
+}
+
struct svc_deferred_req {
u32 prot; /* protocol (UDP or TCP) */
struct svc_xprt *xprt;
@@ -329,10 +361,9 @@ struct svc_process_info {
};
/*
- * List of RPC programs on the same transport endpoint
+ * RPC program - an array of these can use the same transport endpoint
*/
struct svc_program {
- struct svc_program * pg_next; /* other programs (same xprt) */
u32 pg_prog; /* program number */
unsigned int pg_lovers; /* lowest version */
unsigned int pg_hivers; /* highest version */
@@ -401,19 +432,16 @@ struct svc_procedure {
*/
int sunrpc_set_pool_mode(const char *val);
int sunrpc_get_pool_mode(char *val, size_t size);
-int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
int svc_bind(struct svc_serv *serv, struct net *net);
struct svc_serv *svc_create(struct svc_program *, unsigned int,
int (*threadfn)(void *data));
-struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
- struct svc_pool *pool, int node);
bool svc_rqst_replace_page(struct svc_rqst *rqstp,
struct page *page);
void svc_rqst_release_pages(struct svc_rqst *rqstp);
-void svc_rqst_free(struct svc_rqst *);
void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *prog,
+ unsigned int nprog,
struct svc_stat *stats,
unsigned int bufsize,
int (*threadfn)(void *data));
@@ -446,11 +474,6 @@ int svc_generic_rpcbind_set(struct net *net,
u32 version, int family,
unsigned short proto,
unsigned short port);
-int svc_rpcbind_set_version(struct net *net,
- const struct svc_program *progp,
- u32 version, int family,
- unsigned short proto,
- unsigned short port);
#define RPC_MAX_ADDRBUFLEN (63U)
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index d33bab33099a..619fc0bd837a 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -48,6 +48,7 @@
#include <linux/sunrpc/rpc_rdma.h>
#include <linux/sunrpc/rpc_rdma_cid.h>
#include <linux/sunrpc/svc_rdma_pcl.h>
+#include <linux/sunrpc/rdma_rn.h>
#include <linux/percpu_counter.h>
#include <rdma/ib_verbs.h>
@@ -76,6 +77,7 @@ struct svcxprt_rdma {
struct svc_xprt sc_xprt; /* SVC transport structure */
struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
struct list_head sc_accept_q; /* Conn. waiting accept */
+ struct rpcrdma_notification sc_rn; /* removal notification */
int sc_ord; /* RDMA read limit */
int sc_max_send_sges;
bool sc_snd_w_inv; /* OK to use Send With Invalidate */
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 61c455f1e1f5..2e111153f7cd 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -14,6 +14,7 @@
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/gss_api.h>
+#include <linux/sunrpc/clnt.h>
#include <linux/hash.h>
#include <linux/stringhash.h>
#include <linux/cred.h>
@@ -151,13 +152,16 @@ struct auth_ops {
struct svc_xprt;
-extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp);
extern rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp);
extern int svc_authorise(struct svc_rqst *rqstp);
extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp);
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
extern void svc_auth_unregister(rpc_authflavor_t flavor);
+extern void svcauth_map_clnt_to_svc_cred_local(struct rpc_clnt *clnt,
+ const struct cred *,
+ struct svc_cred *);
+
extern struct auth_domain *unix_domain_find(char *name);
extern void auth_domain_put(struct auth_domain *item);
extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 7c78ec6356b9..bf45d9e8492a 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -58,8 +58,6 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
*/
void svc_recv(struct svc_rqst *rqstp);
void svc_send(struct svc_rqst *rqstp);
-void svc_drop(struct svc_rqst *);
-void svc_sock_update_bufs(struct svc_serv *serv);
int svc_addsock(struct svc_serv *serv, struct net *net,
const int fd, char *name_return, const size_t len,
const struct cred *cred);
diff --git a/include/linux/sunrpc/xdrgen/_builtins.h b/include/linux/sunrpc/xdrgen/_builtins.h
new file mode 100644
index 000000000000..66ca3ece951a
--- /dev/null
+++ b/include/linux/sunrpc/xdrgen/_builtins.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Oracle and/or its affiliates.
+ *
+ * This header defines XDR data type primitives specified in
+ * Section 4 of RFC 4506, used by RPC programs implemented
+ * in the Linux kernel.
+ */
+
+#ifndef _SUNRPC_XDRGEN__BUILTINS_H_
+#define _SUNRPC_XDRGEN__BUILTINS_H_
+
+#include <linux/sunrpc/xdr.h>
+
+static inline bool
+xdrgen_decode_void(struct xdr_stream *xdr)
+{
+ return true;
+}
+
+static inline bool
+xdrgen_encode_void(struct xdr_stream *xdr)
+{
+ return true;
+}
+
+static inline bool
+xdrgen_decode_bool(struct xdr_stream *xdr, bool *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = (*p != xdr_zero);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_bool(struct xdr_stream *xdr, bool val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = val ? xdr_one : xdr_zero;
+ return true;
+}
+
+static inline bool
+xdrgen_decode_int(struct xdr_stream *xdr, s32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_int(struct xdr_stream *xdr, s32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_int(struct xdr_stream *xdr, u32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_int(struct xdr_stream *xdr, u32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_long(struct xdr_stream *xdr, s32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_long(struct xdr_stream *xdr, s32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_long(struct xdr_stream *xdr, u32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_long(struct xdr_stream *xdr, u32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_hyper(struct xdr_stream *xdr, s64 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = get_unaligned_be64(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_hyper(struct xdr_stream *xdr, s64 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ put_unaligned_be64(val, p);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_hyper(struct xdr_stream *xdr, u64 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = get_unaligned_be64(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_hyper(struct xdr_stream *xdr, u64 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ put_unaligned_be64(val, p);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_string(struct xdr_stream *xdr, string *ptr, u32 maxlen)
+{
+ __be32 *p;
+ u32 len;
+
+ if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
+ return false;
+ if (unlikely(maxlen && len > maxlen))
+ return false;
+ if (len != 0) {
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (unsigned char *)p;
+ }
+ ptr->len = len;
+ return true;
+}
+
+static inline bool
+xdrgen_encode_string(struct xdr_stream *xdr, string val, u32 maxlen)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(val.len));
+
+ if (unlikely(!p))
+ return false;
+ xdr_encode_opaque(p, val.data, val.len);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_opaque(struct xdr_stream *xdr, opaque *ptr, u32 maxlen)
+{
+ __be32 *p;
+ u32 len;
+
+ if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
+ return false;
+ if (unlikely(maxlen && len > maxlen))
+ return false;
+ if (len != 0) {
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (u8 *)p;
+ }
+ ptr->len = len;
+ return true;
+}
+
+static inline bool
+xdrgen_encode_opaque(struct xdr_stream *xdr, opaque val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(val.len));
+
+ if (unlikely(!p))
+ return false;
+ xdr_encode_opaque(p, val.data, val.len);
+ return true;
+}
+
+#endif /* _SUNRPC_XDRGEN__BUILTINS_H_ */
diff --git a/include/linux/sunrpc/xdrgen/_defs.h b/include/linux/sunrpc/xdrgen/_defs.h
new file mode 100644
index 000000000000..be9e62371758
--- /dev/null
+++ b/include/linux/sunrpc/xdrgen/_defs.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Oracle and/or its affiliates.
+ *
+ * This header defines XDR data type primitives specified in
+ * Section 4 of RFC 4506, used by RPC programs implemented
+ * in the Linux kernel.
+ */
+
+#ifndef _SUNRPC_XDRGEN__DEFS_H_
+#define _SUNRPC_XDRGEN__DEFS_H_
+
+#define TRUE (true)
+#define FALSE (false)
+
+typedef struct {
+ u32 len;
+ unsigned char *data;
+} string;
+
+typedef struct {
+ u32 len;
+ u8 *data;
+} opaque;
+
+#endif /* _SUNRPC_XDRGEN__DEFS_H_ */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index ba7ea95d1c57..ca533b478c21 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -243,22 +243,24 @@ enum {
* free clusters are organized into a list. We fetch an entry from the list to
* get a free cluster.
*
- * The data field stores next cluster if the cluster is free or cluster usage
- * counter otherwise. The flags field determines if a cluster is free. This is
- * protected by swap_info_struct.lock.
+ * The flags field determines if a cluster is free. This is
+ * protected by cluster lock.
*/
struct swap_cluster_info {
spinlock_t lock; /*
* Protect swap_cluster_info fields
- * and swap_info_struct->swap_map
- * elements correspond to the swap
- * cluster
+ * other than list, and swap_info_struct->swap_map
+ * elements corresponding to the swap cluster.
*/
- unsigned int data:24;
- unsigned int flags:8;
+ u16 count;
+ u8 flags;
+ u8 order;
+ struct list_head list;
};
#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
-#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
+#define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */
+#define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */
+#define CLUSTER_FLAG_FULL 8 /* This cluster is on full list */
/*
* The first page in the swap file is the swap header, which is always marked
@@ -283,11 +285,6 @@ struct percpu_cluster {
unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */
};
-struct swap_cluster_list {
- struct swap_cluster_info head;
- struct swap_cluster_info tail;
-};
-
/*
* The in-memory structure used to track swap areas.
*/
@@ -299,8 +296,15 @@ struct swap_info_struct {
signed char type; /* strange name for an index */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
- struct swap_cluster_list free_clusters; /* free clusters list */
+ struct list_head free_clusters; /* free clusters list */
+ struct list_head full_clusters; /* full clusters list */
+ struct list_head nonfull_clusters[SWAP_NR_ORDERS];
+ /* list of cluster that contains at least one free slot */
+ struct list_head frag_clusters[SWAP_NR_ORDERS];
+ /* list of cluster that are fragmented or contented */
+ unsigned int frag_cluster_nr[SWAP_NR_ORDERS];
unsigned int lowest_bit; /* index of first free in swap_map */
unsigned int highest_bit; /* index of last free in swap_map */
unsigned int pages; /* total of usable pages of swap */
@@ -331,7 +335,7 @@ struct swap_info_struct {
* list.
*/
struct work_struct discard_work; /* discard worker */
- struct swap_cluster_list discard_clusters; /* discard clusters list */
+ struct list_head discard_clusters; /* discard clusters list */
struct plist_node avail_lists[]; /*
* entries in swap_avail_heads, one
* entry per node.
@@ -478,9 +482,9 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
-extern void swap_shmem_alloc(swp_entry_t);
+extern void swap_shmem_alloc(swp_entry_t, int);
extern int swap_duplicate(swp_entry_t);
-extern int swapcache_prepare(swp_entry_t);
+extern int swapcache_prepare(swp_entry_t entry, int nr);
extern void swap_free_nr(swp_entry_t entry, int nr_pages);
extern void swapcache_free_entries(swp_entry_t *entries, int n);
extern void free_swap_and_cache_nr(swp_entry_t entry, int nr);
@@ -545,7 +549,7 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
return 0;
}
-static inline void swap_shmem_alloc(swp_entry_t swp)
+static inline void swap_shmem_alloc(swp_entry_t swp, int nr)
{
}
@@ -554,7 +558,7 @@ static inline int swap_duplicate(swp_entry_t swp)
return 0;
}
-static inline int swapcache_prepare(swp_entry_t swp)
+static inline int swapcache_prepare(swp_entry_t swp, int nr)
{
return 0;
}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 4bcf6754738d..5758104921e6 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -870,7 +870,7 @@ asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
#endif
asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name,
struct file_handle __user *handle,
- int __user *mnt_id, int flag);
+ void __user *mnt_id, int flag);
asmlinkage long sys_open_by_handle_at(int mountdirfd,
struct file_handle __user *handle,
int flags);
diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h
index efd16ed52315..a38494d6b5f4 100644
--- a/include/linux/tee_core.h
+++ b/include/linux/tee_core.h
@@ -155,6 +155,18 @@ int tee_device_register(struct tee_device *teedev);
void tee_device_unregister(struct tee_device *teedev);
/**
+ * tee_device_set_dev_groups() - Set device attribute groups
+ * @teedev: Device to register
+ * @dev_groups: Attribute groups
+ *
+ * Assigns the provided @dev_groups to the @teedev to be registered later
+ * with tee_device_register(). Calling this function is optional, but if
+ * it's called it must be called before tee_device_register().
+ */
+void tee_device_set_dev_groups(struct tee_device *teedev,
+ const struct attribute_group **dev_groups);
+
+/**
* tee_session_calc_client_uuid() - Calculates client UUID for session
* @uuid: Resulting UUID
* @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*)
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index b86ddca46b9e..25ea8fe2313e 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -85,11 +85,17 @@ struct thermal_trip {
struct thermal_zone_device;
+struct cooling_spec {
+ unsigned long upper; /* Highest cooling state */
+ unsigned long lower; /* Lowest cooling state */
+ unsigned int weight; /* Cooling device weight */
+};
+
struct thermal_zone_device_ops {
- int (*bind) (struct thermal_zone_device *,
- struct thermal_cooling_device *);
- int (*unbind) (struct thermal_zone_device *,
- struct thermal_cooling_device *);
+ bool (*should_bind) (struct thermal_zone_device *,
+ const struct thermal_trip *,
+ struct thermal_cooling_device *,
+ struct cooling_spec *);
int (*get_temp) (struct thermal_zone_device *, int *);
int (*set_trips) (struct thermal_zone_device *, int, int);
int (*change_mode) (struct thermal_zone_device *,
@@ -203,15 +209,12 @@ static inline void devm_thermal_of_zone_unregister(struct device *dev,
}
#endif
-int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
- struct thermal_trip *trip);
int for_each_thermal_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data);
int thermal_zone_for_each_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data);
-int thermal_zone_get_num_trips(struct thermal_zone_device *tz);
void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
struct thermal_trip *trip, int temp);
@@ -240,20 +243,6 @@ const char *thermal_zone_device_type(struct thermal_zone_device *tzd);
int thermal_zone_device_id(struct thermal_zone_device *tzd);
struct device *thermal_zone_device(struct thermal_zone_device *tzd);
-int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
- struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower,
- unsigned int weight);
-int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
- struct thermal_cooling_device *,
- unsigned long, unsigned long,
- unsigned int);
-int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
- struct thermal_cooling_device *cdev);
-int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
- struct thermal_cooling_device *);
void thermal_zone_device_update(struct thermal_zone_device *,
enum thermal_notify_event);
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 84ff2844df2a..902c20ef495a 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -73,7 +73,7 @@ struct tk_read_base {
* @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING)
*
* Note: For timespec(64) based interfaces wall_to_monotonic is what
- * we need to add to xtime (or xtime corrected for sub jiffie times)
+ * we need to add to xtime (or xtime corrected for sub jiffy times)
* to get to monotonic time. Monotonic is pegged at zero at system
* boot time, so wall_to_monotonic will be negative, however, we will
* ALWAYS keep the tv_nsec part positive so we can use the usual
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 6be396bb4297..93a9f3070b48 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -64,6 +64,13 @@ struct tp_module {
bool trace_module_has_bad_taint(struct module *mod);
extern int register_tracepoint_module_notifier(struct notifier_block *nb);
extern int unregister_tracepoint_module_notifier(struct notifier_block *nb);
+void for_each_module_tracepoint(void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv);
+void for_each_tracepoint_in_module(struct module *,
+ void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv);
#else
static inline bool trace_module_has_bad_taint(struct module *mod)
{
@@ -79,6 +86,19 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
{
return 0;
}
+static inline
+void for_each_module_tracepoint(void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv)
+{
+}
+static inline
+void for_each_tracepoint_in_module(struct module *mod,
+ void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv)
+{
+}
#endif /* CONFIG_MODULES */
/*
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index d8e4105a2f21..39c7cf82b0c2 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -33,6 +33,13 @@
})
#endif
+#ifdef masked_user_access_begin
+ #define can_do_masked_user_access() 1
+#else
+ #define can_do_masked_user_access() 0
+ #define masked_user_access_begin(src) NULL
+#endif
+
/*
* Architectures should provide two primitives (raw_copy_{to,from}_user())
* and get rid of their private instances of copy_{to,from}_user() and
diff --git a/include/linux/ubsan.h b/include/linux/ubsan.h
index bff7445498de..d8219cbe09ff 100644
--- a/include/linux/ubsan.h
+++ b/include/linux/ubsan.h
@@ -4,6 +4,11 @@
#ifdef CONFIG_UBSAN_TRAP
const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type);
+#else
+static inline const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 7020adedfa08..853f9de5aa05 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -11,6 +11,7 @@
#include <uapi/linux/uio.h>
struct page;
+struct folio_queue;
typedef unsigned int __bitwise iov_iter_extraction_t;
@@ -25,6 +26,7 @@ enum iter_type {
ITER_IOVEC,
ITER_BVEC,
ITER_KVEC,
+ ITER_FOLIOQ,
ITER_XARRAY,
ITER_DISCARD,
};
@@ -66,6 +68,7 @@ struct iov_iter {
const struct iovec *__iov;
const struct kvec *kvec;
const struct bio_vec *bvec;
+ const struct folio_queue *folioq;
struct xarray *xarray;
void __user *ubuf;
};
@@ -74,6 +77,7 @@ struct iov_iter {
};
union {
unsigned long nr_segs;
+ u8 folioq_slot;
loff_t xarray_start;
};
};
@@ -126,6 +130,11 @@ static inline bool iov_iter_is_discard(const struct iov_iter *i)
return iov_iter_type(i) == ITER_DISCARD;
}
+static inline bool iov_iter_is_folioq(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_FOLIOQ;
+}
+
static inline bool iov_iter_is_xarray(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_XARRAY;
@@ -180,6 +189,12 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
return copy_page_to_iter(&folio->page, offset, bytes, i);
}
+static inline size_t copy_folio_from_iter(struct folio *folio, size_t offset,
+ size_t bytes, struct iov_iter *i)
+{
+ return copy_page_from_iter(&folio->page, offset, bytes, i);
+}
+
static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
size_t offset, size_t bytes, struct iov_iter *i)
{
@@ -273,6 +288,9 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec
void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
unsigned long nr_segs, size_t count);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
+void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
+ const struct folio_queue *folioq,
+ unsigned int first_slot, unsigned int offset, size_t count);
void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
loff_t start, size_t count);
ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
diff --git a/include/linux/union_find.h b/include/linux/union_find.h
new file mode 100644
index 000000000000..cfd49263c138
--- /dev/null
+++ b/include/linux/union_find.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_UNION_FIND_H
+#define __LINUX_UNION_FIND_H
+/**
+ * union_find.h - union-find data structure implementation
+ *
+ * This header provides functions and structures to implement the union-find
+ * data structure. The union-find data structure is used to manage disjoint
+ * sets and supports efficient union and find operations.
+ *
+ * See Documentation/core-api/union_find.rst for documentation and samples.
+ */
+
+struct uf_node {
+ struct uf_node *parent;
+ unsigned int rank;
+};
+
+/* This macro is used for static initialization of a union-find node. */
+#define UF_INIT_NODE(node) {.parent = &node, .rank = 0}
+
+/**
+ * uf_node_init - Initialize a union-find node
+ * @node: pointer to the union-find node to be initialized
+ *
+ * This function sets the parent of the node to itself and
+ * initializes its rank to 0.
+ */
+static inline void uf_node_init(struct uf_node *node)
+{
+ node->parent = node;
+ node->rank = 0;
+}
+
+/* find the root of a node */
+struct uf_node *uf_find(struct uf_node *node);
+
+/* Merge two intersecting nodes */
+void uf_union(struct uf_node *node1, struct uf_node *node2);
+
+#endif /* __LINUX_UNION_FIND_H */
diff --git a/include/linux/unroll.h b/include/linux/unroll.h
new file mode 100644
index 000000000000..d42fd6366373
--- /dev/null
+++ b/include/linux/unroll.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2023 Google LLC.
+ */
+
+#ifndef __UNROLL_H
+#define __UNROLL_H
+
+#include <linux/args.h>
+
+#define UNROLL(N, MACRO, args...) CONCATENATE(__UNROLL_, N)(MACRO, args)
+
+#define __UNROLL_0(MACRO, args...)
+#define __UNROLL_1(MACRO, args...) __UNROLL_0(MACRO, args) MACRO(0, args)
+#define __UNROLL_2(MACRO, args...) __UNROLL_1(MACRO, args) MACRO(1, args)
+#define __UNROLL_3(MACRO, args...) __UNROLL_2(MACRO, args) MACRO(2, args)
+#define __UNROLL_4(MACRO, args...) __UNROLL_3(MACRO, args) MACRO(3, args)
+#define __UNROLL_5(MACRO, args...) __UNROLL_4(MACRO, args) MACRO(4, args)
+#define __UNROLL_6(MACRO, args...) __UNROLL_5(MACRO, args) MACRO(5, args)
+#define __UNROLL_7(MACRO, args...) __UNROLL_6(MACRO, args) MACRO(6, args)
+#define __UNROLL_8(MACRO, args...) __UNROLL_7(MACRO, args) MACRO(7, args)
+#define __UNROLL_9(MACRO, args...) __UNROLL_8(MACRO, args) MACRO(8, args)
+#define __UNROLL_10(MACRO, args...) __UNROLL_9(MACRO, args) MACRO(9, args)
+#define __UNROLL_11(MACRO, args...) __UNROLL_10(MACRO, args) MACRO(10, args)
+#define __UNROLL_12(MACRO, args...) __UNROLL_11(MACRO, args) MACRO(11, args)
+#define __UNROLL_13(MACRO, args...) __UNROLL_12(MACRO, args) MACRO(12, args)
+#define __UNROLL_14(MACRO, args...) __UNROLL_13(MACRO, args) MACRO(13, args)
+#define __UNROLL_15(MACRO, args...) __UNROLL_14(MACRO, args) MACRO(14, args)
+#define __UNROLL_16(MACRO, args...) __UNROLL_15(MACRO, args) MACRO(15, args)
+#define __UNROLL_17(MACRO, args...) __UNROLL_16(MACRO, args) MACRO(16, args)
+#define __UNROLL_18(MACRO, args...) __UNROLL_17(MACRO, args) MACRO(17, args)
+#define __UNROLL_19(MACRO, args...) __UNROLL_18(MACRO, args) MACRO(18, args)
+#define __UNROLL_20(MACRO, args...) __UNROLL_19(MACRO, args) MACRO(19, args)
+
+#endif /* __UNROLL_H */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index b503fafb7fb3..2b294bf1881f 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/wait.h>
+struct uprobe;
struct vm_area_struct;
struct mm_struct;
struct inode;
@@ -27,22 +28,22 @@ struct page;
#define MAX_URETPROBE_DEPTH 64
-enum uprobe_filter_ctx {
- UPROBE_FILTER_REGISTER,
- UPROBE_FILTER_UNREGISTER,
- UPROBE_FILTER_MMAP,
-};
-
struct uprobe_consumer {
+ /*
+ * handler() can return UPROBE_HANDLER_REMOVE to signal the need to
+ * unregister uprobe for current process. If UPROBE_HANDLER_REMOVE is
+ * returned, filter() callback has to be implemented as well and it
+ * should return false to "confirm" the decision to uninstall uprobe
+ * for the current process. If filter() is omitted or returns true,
+ * UPROBE_HANDLER_REMOVE is effectively ignored.
+ */
int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
int (*ret_handler)(struct uprobe_consumer *self,
unsigned long func,
struct pt_regs *regs);
- bool (*filter)(struct uprobe_consumer *self,
- enum uprobe_filter_ctx ctx,
- struct mm_struct *mm);
+ bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
- struct uprobe_consumer *next;
+ struct list_head cons_node;
};
#ifdef CONFIG_UPROBES
@@ -76,6 +77,8 @@ struct uprobe_task {
struct uprobe *active_uprobe;
unsigned long xol_vaddr;
+ struct arch_uprobe *auprobe;
+
struct return_instance *return_instances;
unsigned int depth;
};
@@ -110,10 +113,10 @@ extern bool is_trap_insn(uprobe_opcode_t *insn);
extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
-extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
-extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
-extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
-extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+extern struct uprobe *uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
+extern int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool);
+extern void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc);
+extern void uprobe_unregister_sync(void);
extern int uprobe_mmap(struct vm_area_struct *vma);
extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void uprobe_start_dup_mmap(void);
@@ -151,22 +154,21 @@ static inline void uprobes_init(void)
#define uprobe_get_trap_addr(regs) instruction_pointer(regs)
-static inline int
-uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
-{
- return -ENOSYS;
-}
-static inline int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc)
+static inline struct uprobe *
+uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc)
{
- return -ENOSYS;
+ return ERR_PTR(-ENOSYS);
}
static inline int
-uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add)
+uprobe_apply(struct uprobe* uprobe, struct uprobe_consumer *uc, bool add)
{
return -ENOSYS;
}
static inline void
-uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
+uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc)
+{
+}
+static inline void uprobe_unregister_sync(void)
{
}
static inline int uprobe_mmap(struct vm_area_struct *vma)
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 6030a8235617..3625096d5f85 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -21,9 +21,11 @@ struct uid_gid_extent {
};
struct uid_gid_map { /* 64 bytes -- 1 cache line */
- u32 nr_extents;
union {
- struct uid_gid_extent extent[UID_GID_MAP_MAX_BASE_EXTENTS];
+ struct {
+ struct uid_gid_extent extent[UID_GID_MAP_MAX_BASE_EXTENTS];
+ u32 nr_extents;
+ };
struct {
struct uid_gid_extent *forward;
struct uid_gid_extent *reverse;
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index a12bcf042551..9fc6ce15c499 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -267,6 +267,25 @@ extern void userfaultfd_unmap_complete(struct mm_struct *mm,
extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma);
extern bool userfaultfd_wp_async(struct vm_area_struct *vma);
+void userfaultfd_reset_ctx(struct vm_area_struct *vma);
+
+struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end);
+
+int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
+ struct vm_area_struct *vma,
+ unsigned long vm_flags,
+ unsigned long start, unsigned long end,
+ bool wp_async);
+
+void userfaultfd_release_new(struct userfaultfd_ctx *ctx);
+
+void userfaultfd_release_all(struct mm_struct *mm,
+ struct userfaultfd_ctx *ctx);
+
#else /* CONFIG_USERFAULTFD */
/* mm helpers */
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 7977ca03ac7a..2e7a30fe6b92 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -582,11 +582,20 @@ void vdpa_set_status(struct vdpa_device *vdev, u8 status);
* @dev: vdpa device to remove
* Driver need to remove the specified device by calling
* _vdpa_unregister_device().
+ * @dev_set_attr: change a vdpa device's attr after it was create
+ * @mdev: parent device to use for device
+ * @dev: vdpa device structure
+ * @config:Attributes to be set for the device.
+ * The driver needs to check the mask of the structure and then set
+ * the related information to the vdpa device. The driver must return 0
+ * if set successfully.
*/
struct vdpa_mgmtdev_ops {
int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config);
void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
+ int (*dev_set_attr)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev,
+ const struct vdpa_dev_set_config *config);
};
/**
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 4b16844c6bc2..306137a15d07 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -118,7 +118,9 @@ struct virtio_admin_cmd {
* struct virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
* @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
- * @config_enabled: configuration change reporting enabled
+ * @config_core_enabled: configuration change reporting enabled by core
+ * @config_driver_disabled: configuration change reporting disabled by
+ * a driver
* @config_change_pending: configuration change reported while disabled
* @config_lock: protects configuration change reporting
* @vqs_list_lock: protects @vqs.
@@ -135,7 +137,8 @@ struct virtio_admin_cmd {
struct virtio_device {
int index;
bool failed;
- bool config_enabled;
+ bool config_core_enabled;
+ bool config_driver_disabled;
bool config_change_pending;
spinlock_t config_lock;
spinlock_t vqs_list_lock;
@@ -166,6 +169,10 @@ void __virtqueue_break(struct virtqueue *_vq);
void __virtqueue_unbreak(struct virtqueue *_vq);
void virtio_config_changed(struct virtio_device *dev);
+
+void virtio_config_driver_disable(struct virtio_device *dev);
+void virtio_config_driver_enable(struct virtio_device *dev);
+
#ifdef CONFIG_PM_SLEEP
int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 6c395a2600e8..276ca543ef44 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -173,7 +173,8 @@ retry:
break;
case SKB_GSO_TCPV4:
case SKB_GSO_TCPV6:
- if (skb->csum_offset != offsetof(struct tcphdr, check))
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb->csum_offset != offsetof(struct tcphdr, check))
return -EINVAL;
break;
}
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index c82089dee0c8..0387d64e2c66 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -133,6 +133,7 @@ struct virtio_vsock_sock {
u32 tx_cnt;
u32 peer_fwd_cnt;
u32 peer_buf_alloc;
+ size_t bytes_unsent;
/* Protected by rx_lock */
u32 fwd_cnt;
@@ -193,6 +194,11 @@ s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk);
+ssize_t virtio_transport_unsent_bytes(struct vsock_sock *vsk);
+
+void virtio_transport_consume_skb_sent(struct sk_buff *skb,
+ bool consume);
+
int virtio_transport_do_socket_init(struct vsock_sock *vsk,
struct vsock_sock *psk);
int
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 747943bc8cc2..aed952d04132 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -50,6 +50,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSTEAL_ANON,
PGSTEAL_FILE,
#ifdef CONFIG_NUMA
+ PGSCAN_ZONE_RECLAIM_SUCCESS,
PGSCAN_ZONE_RECLAIM_FAILED,
#endif
PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
@@ -104,6 +105,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_SPLIT_PAGE,
THP_SPLIT_PAGE_FAILED,
THP_DEFERRED_SPLIT_PAGE,
+ THP_UNDERUSED_SPLIT_PAGE,
THP_SPLIT_PMD,
THP_SCAN_EXCEED_NONE_PTE,
THP_SCAN_EXCEED_SWAP_PTE,
@@ -154,6 +156,30 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
VMA_LOCK_RETRY,
VMA_LOCK_MISS,
#endif
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ KSTACK_1K,
+#if THREAD_SIZE > 1024
+ KSTACK_2K,
+#endif
+#if THREAD_SIZE > 2048
+ KSTACK_4K,
+#endif
+#if THREAD_SIZE > 4096
+ KSTACK_8K,
+#endif
+#if THREAD_SIZE > 8192
+ KSTACK_16K,
+#endif
+#if THREAD_SIZE > 16384
+ KSTACK_32K,
+#endif
+#if THREAD_SIZE > 32768
+ KSTACK_64K,
+#endif
+#if THREAD_SIZE > 65536
+ KSTACK_REST,
+#endif
+#endif /* CONFIG_DEBUG_STACK_USAGE */
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index e4a631ec430b..ad2ce7a6ab7a 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -189,6 +189,10 @@ extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1
extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
#define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__))
+void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+ __realloc_size(2);
+#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__))
+
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 9eb77c9007e6..d2761bf8ff32 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -32,6 +32,7 @@ struct reclaim_stat {
unsigned nr_ref_keep;
unsigned nr_unmap_fail;
unsigned nr_lazyfree_fail;
+ unsigned nr_demoted;
};
/* Stat data for system wide items */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4eb8f9563136..59c2695e12e7 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -507,6 +507,47 @@ void workqueue_softirq_dead(unsigned int cpu);
__printf(1, 4) struct workqueue_struct *
alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
+#ifdef CONFIG_LOCKDEP
+/**
+ * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags
+ * @max_active: max in-flight work items, 0 for default
+ * @lockdep_map: user-defined lockdep_map
+ * @...: args for @fmt
+ *
+ * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for
+ * workqueues created with the same purpose and to avoid leaking a lockdep_map
+ * on each workqueue creation.
+ *
+ * RETURNS:
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
+__printf(1, 5) struct workqueue_struct *
+alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
+ struct lockdep_map *lockdep_map, ...);
+
+/**
+ * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with
+ * user-defined lockdep_map
+ *
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
+ * @lockdep_map: user-defined lockdep_map
+ * @args: args for @fmt
+ *
+ * Same as alloc_ordered_workqueue but with the a user-define lockdep_map.
+ * Useful for workqueues created with the same purpose and to avoid leaking a
+ * lockdep_map on each workqueue creation.
+ *
+ * RETURNS:
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
+#define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \
+ alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \
+ 1, lockdep_map, ##args)
+#endif
+
/**
* alloc_ordered_workqueue - allocate an ordered workqueue
* @fmt: printf format for the name of the workqueue
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 1a54676d843a..d6db822e4bb3 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -79,6 +79,9 @@ struct writeback_control {
*/
struct swap_iocb **swap_plug;
+ /* Target list for splitting a large folio */
+ struct list_head *list;
+
/* internal fields used by the ->writepages implementation: */
struct folio_batch fbatch;
pgoff_t index;
@@ -200,7 +203,8 @@ void inode_io_list_del(struct inode *inode);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
{
- wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
+ wait_var_event(inode_state_wait_address(inode, __I_NEW),
+ !(READ_ONCE(inode->i_state) & I_NEW));
}
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -217,7 +221,7 @@ void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
size_t bytes);
int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
enum wb_reason reason, struct wb_completion *done);
-void cgroup_writeback_umount(void);
+void cgroup_writeback_umount(struct super_block *sb);
bool cleanup_offline_cgwb(struct bdi_writeback *wb);
/**
@@ -324,7 +328,7 @@ static inline void wbc_account_cgroup_owner(struct writeback_control *wbc,
{
}
-static inline void cgroup_writeback_umount(void)
+static inline void cgroup_writeback_umount(struct super_block *sb)
{
}
diff --git a/include/linux/xz.h b/include/linux/xz.h
index 7285ca5d56e9..58ae1d746c6f 100644
--- a/include/linux/xz.h
+++ b/include/linux/xz.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* XZ decompressor
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef XZ_H
@@ -19,11 +18,6 @@
# include <stdint.h>
#endif
-/* In Linux, this is used to make extern functions static when needed. */
-#ifndef XZ_EXTERN
-# define XZ_EXTERN extern
-#endif
-
/**
* enum xz_mode - Operation mode
*
@@ -143,7 +137,7 @@ struct xz_buf {
size_t out_size;
};
-/**
+/*
* struct xz_dec - Opaque type to hold the XZ decoder state
*/
struct xz_dec;
@@ -191,7 +185,7 @@ struct xz_dec;
* ready to be used with xz_dec_run(). If memory allocation fails,
* xz_dec_init() returns NULL.
*/
-XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
+struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
/**
* xz_dec_run() - Run the XZ decoder
@@ -211,7 +205,7 @@ XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
* get that amount valid data from the beginning of the stream. You must use
* the multi-call decoder if you don't want to uncompress the whole stream.
*/
-XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
+enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
/**
* xz_dec_reset() - Reset an already allocated decoder state
@@ -224,32 +218,38 @@ XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
* xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in
* multi-call mode.
*/
-XZ_EXTERN void xz_dec_reset(struct xz_dec *s);
+void xz_dec_reset(struct xz_dec *s);
/**
* xz_dec_end() - Free the memory allocated for the decoder state
* @s: Decoder state allocated using xz_dec_init(). If s is NULL,
* this function does nothing.
*/
-XZ_EXTERN void xz_dec_end(struct xz_dec *s);
+void xz_dec_end(struct xz_dec *s);
-/*
- * Decompressor for MicroLZMA, an LZMA variant with a very minimal header.
- * See xz_dec_microlzma_alloc() below for details.
+/**
+ * DOC: MicroLZMA decompressor
+ *
+ * This MicroLZMA header format was created for use in EROFS but may be used
+ * by others too. **In most cases one needs the XZ APIs above instead.**
*
- * These functions aren't used or available in preboot code and thus aren't
- * marked with XZ_EXTERN. This avoids warnings about static functions that
- * are never defined.
+ * The compressed format supported by this decoder is a raw LZMA stream
+ * whose first byte (always 0x00) has been replaced with bitwise-negation
+ * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is
+ * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00.
+ * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream
+ * marker must not be used. The unused values are reserved for future use.
*/
-/**
+
+/*
* struct xz_dec_microlzma - Opaque type to hold the MicroLZMA decoder state
*/
struct xz_dec_microlzma;
/**
* xz_dec_microlzma_alloc() - Allocate memory for the MicroLZMA decoder
- * @mode XZ_SINGLE or XZ_PREALLOC
- * @dict_size LZMA dictionary size. This must be at least 4 KiB and
+ * @mode: XZ_SINGLE or XZ_PREALLOC
+ * @dict_size: LZMA dictionary size. This must be at least 4 KiB and
* at most 3 GiB.
*
* In contrast to xz_dec_init(), this function only allocates the memory
@@ -262,40 +262,30 @@ struct xz_dec_microlzma;
* On success, xz_dec_microlzma_alloc() returns a pointer to
* struct xz_dec_microlzma. If memory allocation fails or
* dict_size is invalid, NULL is returned.
- *
- * The compressed format supported by this decoder is a raw LZMA stream
- * whose first byte (always 0x00) has been replaced with bitwise-negation
- * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is
- * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00.
- * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream
- * marker must not be used. The unused values are reserved for future use.
- * This MicroLZMA header format was created for use in EROFS but may be used
- * by others too.
*/
-extern struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
- uint32_t dict_size);
+struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
+ uint32_t dict_size);
/**
* xz_dec_microlzma_reset() - Reset the MicroLZMA decoder state
- * @s Decoder state allocated using xz_dec_microlzma_alloc()
- * @comp_size Compressed size of the input stream
- * @uncomp_size Uncompressed size of the input stream. A value smaller
+ * @s: Decoder state allocated using xz_dec_microlzma_alloc()
+ * @comp_size: Compressed size of the input stream
+ * @uncomp_size: Uncompressed size of the input stream. A value smaller
* than the real uncompressed size of the input stream can
* be specified if uncomp_size_is_exact is set to false.
* uncomp_size can never be set to a value larger than the
* expected real uncompressed size because it would eventually
* result in XZ_DATA_ERROR.
- * @uncomp_size_is_exact This is an int instead of bool to avoid
+ * @uncomp_size_is_exact: This is an int instead of bool to avoid
* requiring stdbool.h. This should normally be set to true.
* When this is set to false, error detection is weaker.
*/
-extern void xz_dec_microlzma_reset(struct xz_dec_microlzma *s,
- uint32_t comp_size, uint32_t uncomp_size,
- int uncomp_size_is_exact);
+void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, uint32_t comp_size,
+ uint32_t uncomp_size, int uncomp_size_is_exact);
/**
* xz_dec_microlzma_run() - Run the MicroLZMA decoder
- * @s Decoder state initialized using xz_dec_microlzma_reset()
+ * @s: Decoder state initialized using xz_dec_microlzma_reset()
* @b: Input and output buffers
*
* This works similarly to xz_dec_run() with a few important differences.
@@ -329,15 +319,14 @@ extern void xz_dec_microlzma_reset(struct xz_dec_microlzma *s,
* may be changed normally like with XZ_PREALLOC. This way input data can be
* provided from non-contiguous memory.
*/
-extern enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s,
- struct xz_buf *b);
+enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s, struct xz_buf *b);
/**
* xz_dec_microlzma_end() - Free the memory allocated for the decoder state
* @s: Decoder state allocated using xz_dec_microlzma_alloc().
* If s is NULL, this function does nothing.
*/
-extern void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
+void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
/*
* Standalone build (userspace build or in-kernel build for boot time use)
@@ -358,13 +347,13 @@ extern void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
* This must be called before any other xz_* function to initialize
* the CRC32 lookup table.
*/
-XZ_EXTERN void xz_crc32_init(void);
+void xz_crc32_init(void);
/*
* Update CRC32 value using the polynomial from IEEE-802.3. To start a new
* calculation, the third argument must be zero. To continue the calculation,
* the previously returned value is passed as the third argument.
*/
-XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc);
+uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc);
#endif
#endif
diff --git a/include/linux/zstd.h b/include/linux/zstd.h
index 113408eef6ec..b2c7cf310c8f 100644
--- a/include/linux/zstd.h
+++ b/include/linux/zstd.h
@@ -77,6 +77,30 @@ int zstd_min_clevel(void);
*/
int zstd_max_clevel(void);
+/**
+ * zstd_default_clevel() - default compression level
+ *
+ * Return: Default compression level.
+ */
+int zstd_default_clevel(void);
+
+/**
+ * struct zstd_custom_mem - custom memory allocation
+ */
+typedef ZSTD_customMem zstd_custom_mem;
+
+/**
+ * struct zstd_dict_load_method - Dictionary load method.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_dictLoadMethod_e zstd_dict_load_method;
+
+/**
+ * struct zstd_dict_content_type - Dictionary context type.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_dictContentType_e zstd_dict_content_type;
+
/* ====== Parameter Selection ====== */
/**
@@ -136,6 +160,19 @@ typedef ZSTD_parameters zstd_parameters;
zstd_parameters zstd_get_params(int level,
unsigned long long estimated_src_size);
+
+/**
+ * zstd_get_cparams() - returns zstd_compression_parameters for selected level
+ * @level: The compression level
+ * @estimated_src_size: The estimated source size to compress or 0
+ * if unknown.
+ * @dict_size: Dictionary size.
+ *
+ * Return: The selected zstd_compression_parameters.
+ */
+zstd_compression_parameters zstd_get_cparams(int level,
+ unsigned long long estimated_src_size, size_t dict_size);
+
/* ====== Single-pass Compression ====== */
typedef ZSTD_CCtx zstd_cctx;
@@ -180,6 +217,71 @@ zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size);
size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size, const zstd_parameters *parameters);
+/**
+ * zstd_create_cctx_advanced() - Create compression context
+ * @custom_mem: Custom allocator.
+ *
+ * Return: NULL on error, pointer to compression context otherwise.
+ */
+zstd_cctx *zstd_create_cctx_advanced(zstd_custom_mem custom_mem);
+
+/**
+ * zstd_free_cctx() - Free compression context
+ * @cdict: Pointer to compression context.
+ *
+ * Return: Always 0.
+ */
+size_t zstd_free_cctx(zstd_cctx* cctx);
+
+/**
+ * struct zstd_cdict - Compression dictionary.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_CDict zstd_cdict;
+
+/**
+ * zstd_create_cdict_byreference() - Create compression dictionary
+ * @dict: Pointer to dictionary buffer.
+ * @dict_size: Size of the dictionary buffer.
+ * @dict_load_method: Dictionary load method.
+ * @dict_content_type: Dictionary content type.
+ * @custom_mem: Memory allocator.
+ *
+ * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be
+ * free before zstd_cdict is destroyed.
+ *
+ * Return: NULL on error, pointer to compression dictionary
+ * otherwise.
+ */
+zstd_cdict *zstd_create_cdict_byreference(const void *dict, size_t dict_size,
+ zstd_compression_parameters cparams,
+ zstd_custom_mem custom_mem);
+
+/**
+ * zstd_free_cdict() - Free compression dictionary
+ * @cdict: Pointer to compression dictionary.
+ *
+ * Return: Always 0.
+ */
+size_t zstd_free_cdict(zstd_cdict* cdict);
+
+/**
+ * zstd_compress_using_cdict() - compress src into dst using a dictionary
+ * @cctx: The context. Must have been initialized with zstd_init_cctx().
+ * @dst: The buffer to compress src into.
+ * @dst_capacity: The size of the destination buffer. May be any size, but
+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src: The data to compress.
+ * @src_size: The size of the data to compress.
+ * @cdict: The dictionary to be used.
+ *
+ * Return: The compressed size or an error, which can be checked using
+ * zstd_is_error().
+ */
+size_t zstd_compress_using_cdict(zstd_cctx *cctx, void *dst,
+ size_t dst_capacity, const void *src, size_t src_size,
+ const zstd_cdict *cdict);
+
/* ====== Single-pass Decompression ====== */
typedef ZSTD_DCtx zstd_dctx;
@@ -220,6 +322,71 @@ zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size);
size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size);
+/**
+ * struct zstd_ddict - Decompression dictionary.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_DDict zstd_ddict;
+
+/**
+ * zstd_create_ddict_byreference() - Create decompression dictionary
+ * @dict: Pointer to dictionary buffer.
+ * @dict_size: Size of the dictionary buffer.
+ * @dict_load_method: Dictionary load method.
+ * @dict_content_type: Dictionary content type.
+ * @custom_mem: Memory allocator.
+ *
+ * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be
+ * free before zstd_ddict is destroyed.
+ *
+ * Return: NULL on error, pointer to decompression dictionary
+ * otherwise.
+ */
+zstd_ddict *zstd_create_ddict_byreference(const void *dict, size_t dict_size,
+ zstd_custom_mem custom_mem);
+/**
+ * zstd_free_ddict() - Free decompression dictionary
+ * @dict: Pointer to the dictionary.
+ *
+ * Return: Always 0.
+ */
+size_t zstd_free_ddict(zstd_ddict *ddict);
+
+/**
+ * zstd_create_dctx_advanced() - Create decompression context
+ * @custom_mem: Custom allocator.
+ *
+ * Return: NULL on error, pointer to decompression context otherwise.
+ */
+zstd_dctx *zstd_create_dctx_advanced(zstd_custom_mem custom_mem);
+
+/**
+ * zstd_free_dctx() -- Free decompression context
+ * @dctx: Pointer to decompression context.
+ * Return: Always 0.
+ */
+size_t zstd_free_dctx(zstd_dctx *dctx);
+
+/**
+ * zstd_decompress_using_ddict() - decompress src into dst using a dictionary
+ * @dctx: The decompression context.
+ * @dst: The buffer to decompress src into.
+ * @dst_capacity: The size of the destination buffer. Must be at least as large
+ * as the decompressed size. If the caller cannot upper bound the
+ * decompressed size, then it's better to use the streaming API.
+ * @src: The zstd compressed data to decompress. Multiple concatenated
+ * frames and skippable frames are allowed.
+ * @src_size: The exact size of the data to decompress.
+ * @ddict: The dictionary to be used.
+ *
+ * Return: The decompressed size or an error, which can be checked using
+ * zstd_is_error().
+ */
+size_t zstd_decompress_using_ddict(zstd_dctx *dctx,
+ void *dst, size_t dst_capacity, const void *src, size_t src_size,
+ const zstd_ddict *ddict);
+
+
/* ====== Streaming Buffers ====== */
/**
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index 6cecb4a4f68b..9cd1beef0654 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -13,17 +13,15 @@ extern atomic_t zswap_stored_pages;
struct zswap_lruvec_state {
/*
- * Number of pages in zswap that should be protected from the shrinker.
- * This number is an estimate of the following counts:
+ * Number of swapped in pages from disk, i.e not found in the zswap pool.
*
- * a) Recent page faults.
- * b) Recent insertion to the zswap LRU. This includes new zswap stores,
- * as well as recent zswap LRU rotations.
- *
- * These pages are likely to be warm, and might incur IO if the are written
- * to swap.
+ * This is consumed and subtracted from the lru size in
+ * zswap_shrinker_count() to penalize past overshrinking that led to disk
+ * swapins. The idea is that had we considered this many more pages in the
+ * LRU active/protected and not written them back, we would not have had to
+ * swapped them in.
*/
- atomic_long_t nr_zswap_protected;
+ atomic_long_t nr_disk_swapins;
};
unsigned long zswap_total_pages(void);
diff --git a/include/media/cec.h b/include/media/cec.h
index d131514032f2..16b412b3131b 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -66,6 +66,8 @@ struct cec_data {
struct list_head xfer_list;
struct cec_adapter *adap;
struct cec_msg msg;
+ u8 match_len;
+ u8 match_reply[5];
struct cec_fh *fh;
struct delayed_work work;
struct completion c;
@@ -296,6 +298,37 @@ struct cec_adapter {
char input_phys[40];
};
+static inline int cec_get_device(struct cec_adapter *adap)
+{
+ struct cec_devnode *devnode = &adap->devnode;
+
+ /*
+ * Check if the cec device is available. This needs to be done with
+ * the devnode->lock held to prevent an open/unregister race:
+ * without the lock, the device could be unregistered and freed between
+ * the devnode->registered check and get_device() calls, leading to
+ * a crash.
+ */
+ mutex_lock(&devnode->lock);
+ /*
+ * return ENODEV if the cec device has been removed
+ * already or if it is not registered anymore.
+ */
+ if (!devnode->registered) {
+ mutex_unlock(&devnode->lock);
+ return -ENODEV;
+ }
+ /* and increase the device refcount */
+ get_device(&devnode->dev);
+ mutex_unlock(&devnode->lock);
+ return 0;
+}
+
+static inline void cec_put_device(struct cec_adapter *adap)
+{
+ put_device(&adap->devnode.dev);
+}
+
static inline void *cec_get_drvdata(const struct cec_adapter *adap)
{
return adap->priv;
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 803349599c27..d095908073ef 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -127,7 +127,6 @@ struct lirc_fh {
* @min_timeout: minimum timeout supported by device
* @max_timeout: maximum timeout supported by device
* @rx_resolution : resolution (in us) of input sampler
- * @tx_resolution: resolution (in us) of output sampler
* @lirc_dev: lirc device
* @lirc_cdev: lirc char cdev
* @gap_start: start time for gap after timeout if non-zero
@@ -194,7 +193,6 @@ struct rc_dev {
u32 min_timeout;
u32 max_timeout;
u32 rx_resolution;
- u32 tx_resolution;
#ifdef CONFIG_LIRC
struct device lirc_dev;
struct cdev lirc_cdev;
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index ed0a44b6eada..1837c9fd78cf 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -178,6 +178,9 @@ void v4l2_pipeline_pm_put(struct media_entity *entity);
* @flags: New link flags that will be applied
* @notification: The link's state change notification type (MEDIA_DEV_NOTIFY_*)
*
+ * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM
+ * ON SUB-DEVICE DRIVERS INSTEAD.
+ *
* React to link management on powered pipelines by updating the use count of
* all entities in the source and sink sides of the link. Entities are powered
* on or off accordingly. The use of this function should be paired
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index bd235d325ff9..8daa0929865c 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -1250,6 +1250,12 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
* calls v4l2_subdev_link_validate_default() to ensure that
* width, height and the media bus pixel code are equal on both
* source and sink of the link.
+ *
+ * The function can be used as a drop-in &media_entity_ops.link_validate
+ * implementation for v4l2_subdev instances. It supports all links between
+ * subdevs, as well as links between subdevs and video devices, provided that
+ * the video devices also implement their &media_entity_ops.link_validate
+ * operation.
*/
int v4l2_subdev_link_validate(struct media_link *link);
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 955237ac503d..9b02aeba4108 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -154,6 +154,8 @@ struct vb2_mem_ops {
* @mem_priv: private data with this plane.
* @dbuf: dma_buf - shared buffer object.
* @dbuf_mapped: flag to show whether dbuf is mapped or not
+ * @dbuf_duplicated: boolean to show whether dbuf is duplicated with a
+ * previous plane of the buffer.
* @bytesused: number of bytes occupied by data in the plane (payload).
* @length: size of this plane (NOT the payload) in bytes. The maximum
* valid size is MAX_UINT - PAGE_SIZE.
@@ -179,6 +181,7 @@ struct vb2_plane {
void *mem_priv;
struct dma_buf *dbuf;
unsigned int dbuf_mapped;
+ bool dbuf_duplicated;
unsigned int bytesused;
unsigned int length;
unsigned int min_length;
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 62a407db1bf5..363dd63babe7 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -37,10 +37,14 @@ struct prefix_info {
struct __packed {
#if defined(__BIG_ENDIAN_BITFIELD)
__u8 onlink : 1,
- autoconf : 1,
- reserved : 6;
+ autoconf : 1,
+ routeraddr : 1,
+ preferpd : 1,
+ reserved : 4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 reserved : 6,
+ __u8 reserved : 4,
+ preferpd : 1,
+ routeraddr : 1,
autoconf : 1,
onlink : 1;
#else
@@ -183,10 +187,12 @@ static inline int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
return 0;
}
+#define INFINITY_LIFE_TIME 0xFFFFFFFF
+
static inline unsigned long addrconf_timeout_fixup(u32 timeout,
unsigned int unit)
{
- if (timeout == 0xffffffff)
+ if (timeout == INFINITY_LIFE_TIME)
return ~0UL;
/*
@@ -327,7 +333,7 @@ static inline struct inet6_dev *__in6_dev_get(const struct net_device *dev)
/**
* __in6_dev_stats_get - get inet6_dev pointer for stats
* @dev: network device
- * @skb: skb for original incoming interface if neeeded
+ * @skb: skb for original incoming interface if needed
*
* Caller must hold rcu_read_lock or RTNL, because this function
* does not take a reference on the inet6_dev.
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 24d970f7a4fa..9e85424c8343 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -169,6 +169,9 @@ struct vsock_transport {
void (*notify_buffer_size)(struct vsock_sock *, u64 *);
int (*notify_set_rcvlowat)(struct vsock_sock *vsk, int val);
+ /* SIOCOUTQ ioctl */
+ ssize_t (*unsent_bytes)(struct vsock_sock *vsk);
+
/* Shutdown. */
int (*shutdown)(struct vsock_sock *, int);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index d1d073089f38..bab1e3d7452a 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -2901,6 +2901,11 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
return (struct hci_sco_hdr *) skb->data;
}
+static inline struct hci_iso_hdr *hci_iso_hdr(const struct sk_buff *skb)
+{
+ return (struct hci_iso_hdr *)skb->data;
+}
+
/* Command opcode pack/unpack */
#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10)))
#define hci_opcode_ogf(op) (op >> 10)
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 1a32e602630e..88265d37aa72 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -2257,8 +2257,8 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
bool mgmt_connected);
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 status);
-void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status);
+void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 status);
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 status);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 5cfdc813491a..313d0b972e06 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -968,10 +968,6 @@ void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
void *data);
void l2cap_chan_del(struct l2cap_chan *chan, int err);
void l2cap_send_conn_req(struct l2cap_chan *chan);
-void l2cap_move_start(struct l2cap_chan *chan);
-void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
- u8 status);
-void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn);
void l2cap_conn_put(struct l2cap_conn *conn);
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index 9ce5ac2bfbad..2053cd8e788a 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -231,7 +231,10 @@ typedef struct port {
mux_states_t sm_mux_state; /* state machine mux state */
u16 sm_mux_timer_counter; /* state machine mux timer counter */
tx_states_t sm_tx_state; /* state machine tx state */
- u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+ u16 sm_tx_timer_counter; /* state machine tx timer counter
+ * (always on - enter to transmit
+ * state 3 time per second)
+ */
u16 sm_churn_actor_timer_counter;
u16 sm_churn_partner_timer_counter;
u32 churn_actor_count;
diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
index 9dc082b2d543..e5945427f38d 100644
--- a/include/net/bond_alb.h
+++ b/include/net/bond_alb.h
@@ -53,7 +53,7 @@ struct slave;
struct tlb_client_info {
- struct slave *tx_slave; /* A pointer to slave used for transmiting
+ struct slave *tx_slave; /* A pointer to slave used for transmitting
* packets to a Client that the Hash function
* gave this entry index.
*/
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 522f1da8b747..f03040baaefd 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -131,7 +131,7 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
#endif
}
-/* used in the protocol hanlder to propagate the napi_id to the socket */
+/* used in the protocol handler to propagate the napi_id to the socket */
static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index 0f45d875905f..053e7c6a6a66 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -20,7 +20,7 @@ struct caif_payload_info;
* @assert: expression to evaluate.
*
* This function will print a error message and a do WARN_ON if the
- * assertion failes. Normally this will do a stack up at the current location.
+ * assertion fails. Normally this will do a stack up at the current location.
*/
#define caif_assert(assert) \
do { \
@@ -116,7 +116,7 @@ enum caif_direction {
* @dn: Pointer down to the layer below.
* @node: List node used when layer participate in a list.
* @receive: Packet receive function.
- * @transmit: Packet transmit funciton.
+ * @transmit: Packet transmit function.
* @ctrlcmd: Used for control signalling upwards in the stack.
* @modemcmd: Used for control signaling downwards in the stack.
* @id: The identity of this layer
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index 44d914a50369..acf664227d96 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -18,7 +18,7 @@ struct cfpkt *cfpkt_create(u16 len);
/*
* Destroy a CAIF Packet.
- * pkt Packet to be destoyed.
+ * pkt Packet to be destroyed.
*/
void cfpkt_destroy(struct cfpkt *pkt);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 192d72c8b465..69ec1eb41a09 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4837,9 +4837,9 @@ struct cfg80211_ops {
int (*start_radar_detection)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms);
+ u32 cac_time_ms, int link_id);
void (*end_cac)(struct wiphy *wiphy,
- struct net_device *dev);
+ struct net_device *dev, unsigned int link_id);
int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_update_ft_ies_params *ftie);
int (*crit_proto_start)(struct wiphy *wiphy,
@@ -6194,9 +6194,6 @@ enum ieee80211_ap_reg_power {
* @address: The address for this device, valid only if @netdev is %NULL
* @is_running: true if this is a non-netdev device that has been started, e.g.
* the P2P Device.
- * @cac_started: true if DFS channel availability check has been started
- * @cac_start_time: timestamp (jiffies) when the dfs state was entered.
- * @cac_time_ms: CAC time in ms
* @ps: powersave mode is enabled
* @ps_timeout: dynamic powersave timeout
* @ap_unexpected_nlportid: (private) netlink port ID of application
@@ -6220,6 +6217,11 @@ enum ieee80211_ap_reg_power {
* unprotected beacon report
* @links: array of %IEEE80211_MLD_MAX_NUM_LINKS elements containing @addr
* @ap and @client for each link
+ * @links.cac_started: true if DFS channel availability check has been
+ * started
+ * @links.cac_start_time: timestamp (jiffies) when the dfs state was
+ * entered.
+ * @links.cac_time_ms: CAC time in ms
* @valid_links: bitmap describing what elements of @links are valid
*/
struct wireless_dev {
@@ -6261,11 +6263,6 @@ struct wireless_dev {
u32 owner_nlportid;
bool nl_owner_dead;
- /* FIXME: need to rework radar detection for MLO */
- bool cac_started;
- unsigned long cac_start_time;
- unsigned int cac_time_ms;
-
#ifdef CONFIG_CFG80211_WEXT
/* wext data */
struct {
@@ -6332,6 +6329,10 @@ struct wireless_dev {
struct cfg80211_internal_bss *current_bss;
} client;
};
+
+ bool cac_started;
+ unsigned long cac_start_time;
+ unsigned int cac_time_ms;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
u16 valid_links;
};
@@ -8740,6 +8741,7 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
* @chandef: chandef for the current channel
* @event: type of event
* @gfp: context flags
+ * @link_id: valid link_id for MLO operation or 0 otherwise.
*
* This function is called when a Channel availability check (CAC) is finished
* or aborted. This must be called to notify the completion of a CAC process,
@@ -8747,7 +8749,8 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
*/
void cfg80211_cac_event(struct net_device *netdev,
const struct cfg80211_chan_def *chandef,
- enum nl80211_radar_event event, gfp_t gfp);
+ enum nl80211_radar_event event, gfp_t gfp,
+ unsigned int link_id);
/**
* cfg80211_background_cac_abort - Channel Availability Check offchan abort event
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index 9707ab54fdd5..4748680e8c88 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -155,8 +155,8 @@ enum skb_drop_reason {
/** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */
SKB_DROP_REASON_SOCKET_RCVBUFF,
/**
- * @SKB_DROP_REASON_PROTO_MEM: proto memory limition, such as udp packet
- * drop out of udp_memory_allocated.
+ * @SKB_DROP_REASON_PROTO_MEM: proto memory limitation, such as
+ * udp packet drop out of udp_memory_allocated.
*/
SKB_DROP_REASON_PROTO_MEM,
/**
@@ -217,7 +217,7 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_TCP_ZEROWINDOW,
/**
- * @SKB_DROP_REASON_TCP_OLD_DATA: the TCP data reveived is already
+ * @SKB_DROP_REASON_TCP_OLD_DATA: the TCP data received is already
* received before (spurious retrans may happened), see
* LINUX_MIB_DELAYEDACKLOST
*/
diff --git a/include/net/dst.h b/include/net/dst.h
index 0aa331bd2fdb..0f303cc60252 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -341,7 +341,7 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
skb->dev = dev;
/*
- * Clear hash so that we can recalulate the hash for the
+ * Clear hash so that we can recalculate the hash for the
* encapsulated packet, unless we have already determine the hash
* over the L4 4-tuple.
*/
diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h
index b4a55d2d5e71..1961699598e2 100644
--- a/include/net/dst_cache.h
+++ b/include/net/dst_cache.h
@@ -102,7 +102,7 @@ int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp);
* @dst_cache: the cache
*
* No synchronization is enforced: it must be called only when the cache
- * is unsed.
+ * is unused.
*/
void dst_cache_destroy(struct dst_cache *dst_cache);
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 4160731dcb6e..84c15402931c 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -163,8 +163,11 @@ static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
if (!new_md)
return ERR_PTR(-ENOMEM);
- memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
- sizeof(struct ip_tunnel_info) + md_size);
+ unsafe_memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
+ sizeof(struct ip_tunnel_info) + md_size,
+ /* metadata_dst_alloc() reserves room (md_size bytes) for
+ * options right after the ip_tunnel_info struct.
+ */);
#ifdef CONFIG_DST_CACHE
/* Unclone the dst cache if there is one */
if (new_md->u.tun_info.dst_cache.cache) {
diff --git a/include/net/erspan.h b/include/net/erspan.h
index 6cb4cbd6a48f..c6209e7b6c96 100644
--- a/include/net/erspan.h
+++ b/include/net/erspan.h
@@ -89,7 +89,7 @@ enum erspan_encap_type {
ERSPAN_ENCAP_NOVLAN = 0x0, /* originally without VLAN tag */
ERSPAN_ENCAP_ISL = 0x1, /* originally ISL encapsulated */
ERSPAN_ENCAP_8021Q = 0x2, /* originally 802.1Q encapsulated */
- ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag perserved in frame */
+ ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag preserved in frame */
};
#define ERSPAN_V1_MDSIZE 4
@@ -192,7 +192,7 @@ static inline void erspan_build_header(struct sk_buff *skb,
enc_type = ERSPAN_ENCAP_NOVLAN;
/* If mirrored packet has vlan tag, extract tci and
- * perserve vlan header in the mirrored frame.
+ * preserve vlan header in the mirrored frame.
*/
if (eth->h_proto == htons(ETH_P_8021Q)) {
qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
diff --git a/include/net/hwbm.h b/include/net/hwbm.h
index aa495decec35..bdbe91c609ff 100644
--- a/include/net/hwbm.h
+++ b/include/net/hwbm.h
@@ -11,9 +11,9 @@ struct hwbm_pool {
int frag_size;
/* Number of buffers currently used by this pool */
int buf_num;
- /* constructor called during alocation */
+ /* constructor called during allocation */
int (*construct)(struct hwbm_pool *bm_pool, void *buf);
- /* protect acces to the buffer counter*/
+ /* protect access to the buffer counter*/
struct mutex buf_lock;
/* private data */
void *priv;
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 533a7337865a..74dd90ff5f12 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -40,7 +40,7 @@ static inline unsigned int __inet6_ehashfn(const u32 lhash,
*
* The sockhash lock must be held as a reader here.
*/
-struct sock *__inet6_lookup_established(struct net *net,
+struct sock *__inet6_lookup_established(const struct net *net,
struct inet_hashinfo *hashinfo,
const struct in6_addr *saddr,
const __be16 sport,
@@ -56,7 +56,7 @@ inet6_ehashfn_t inet6_ehashfn;
INDIRECT_CALLABLE_DECLARE(inet6_ehashfn_t udp6_ehashfn);
-struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
+struct sock *inet6_lookup_reuseport(const struct net *net, struct sock *sk,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
__be16 sport,
@@ -64,7 +64,7 @@ struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
unsigned short hnum,
inet6_ehashfn_t *ehashfn);
-struct sock *inet6_lookup_listener(struct net *net,
+struct sock *inet6_lookup_listener(const struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
@@ -73,7 +73,7 @@ struct sock *inet6_lookup_listener(struct net *net,
const unsigned short hnum,
const int dif, const int sdif);
-struct sock *inet6_lookup_run_sk_lookup(struct net *net,
+struct sock *inet6_lookup_run_sk_lookup(const struct net *net,
int protocol,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
@@ -82,7 +82,7 @@ struct sock *inet6_lookup_run_sk_lookup(struct net *net,
const u16 hnum, const int dif,
inet6_ehashfn_t *ehashfn);
-static inline struct sock *__inet6_lookup(struct net *net,
+static inline struct sock *__inet6_lookup(const struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
@@ -167,7 +167,7 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
iif, sdif, refcounted);
}
-struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
+struct sock *inet6_lookup(const struct net *net, struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr, const __be16 sport,
const struct in6_addr *daddr, const __be16 dport,
@@ -175,7 +175,7 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
int inet6_hash(struct sock *sk);
-static inline bool inet6_match(struct net *net, const struct sock *sk,
+static inline bool inet6_match(const struct net *net, const struct sock *sk,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
const __portpair ports,
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 7f1b38458743..5eea47f135a4 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -304,7 +304,7 @@ int __inet_hash(struct sock *sk, struct sock *osk);
int inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
-struct sock *__inet_lookup_listener(struct net *net,
+struct sock *__inet_lookup_listener(const struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
@@ -351,7 +351,7 @@ static inline struct sock *inet_lookup_listener(struct net *net,
((__force __u64)(__be32)(__saddr)))
#endif /* __BIG_ENDIAN */
-static inline bool inet_match(struct net *net, const struct sock *sk,
+static inline bool inet_match(const struct net *net, const struct sock *sk,
const __addrpair cookie, const __portpair ports,
int dif, int sdif)
{
@@ -368,7 +368,7 @@ static inline bool inet_match(struct net *net, const struct sock *sk,
/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
* not check it for lookups anymore, thanks Alexey. -DaveM
*/
-struct sock *__inet_lookup_established(struct net *net,
+struct sock *__inet_lookup_established(const struct net *net,
struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 hnum,
@@ -382,13 +382,13 @@ inet_ehashfn_t inet_ehashfn;
INDIRECT_CALLABLE_DECLARE(inet_ehashfn_t udp_ehashfn);
-struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
+struct sock *inet_lookup_reuseport(const struct net *net, struct sock *sk,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned short hnum,
inet_ehashfn_t *ehashfn);
-struct sock *inet_lookup_run_sk_lookup(struct net *net,
+struct sock *inet_lookup_run_sk_lookup(const struct net *net,
int protocol,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index f9ddd47dc4f8..394c3b66065e 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -150,7 +150,8 @@ static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if,
return bound_dev_if == dif || bound_dev_if == sdif;
}
-static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+static inline bool inet_sk_bound_dev_eq(const struct net *net,
+ int bound_dev_if,
int dif, int sdif)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index f88b68269012..beb533a0e880 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -58,7 +58,7 @@ struct inet_timewait_sock {
#define tw_dr __tw_common.skc_tw_dr
__u32 tw_mark;
- volatile unsigned char tw_substate;
+ unsigned char tw_substate;
unsigned char tw_rcv_wscale;
/* Socket demultiplex comparisons on incoming packets. */
diff --git a/include/net/ip.h b/include/net/ip.h
index c5606cadb1a5..d92d3bc3ec0e 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -33,6 +33,7 @@
#include <net/flow_dissector.h>
#include <net/netns/hash.h>
#include <net/lwtunnel.h>
+#include <net/inet_dscp.h>
#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
#define IPV4_MIN_MTU 68 /* RFC 791 */
@@ -258,7 +259,9 @@ static inline u8 ip_sendmsg_scope(const struct inet_sock *inet,
static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
{
- return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(READ_ONCE(inet->tos));
+ u8 dsfield = ipc->tos != -1 ? ipc->tos : READ_ONCE(inet->tos);
+
+ return dsfield & INET_DSCP_MASK;
}
/* datagram.c */
@@ -794,9 +797,8 @@ static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
}
-bool icmp_global_allow(void);
-extern int sysctl_icmp_msgs_per_sec;
-extern int sysctl_icmp_msgs_burst;
+bool icmp_global_allow(struct net *net);
+void icmp_global_consume(struct net *net);
#ifdef CONFIG_PROC_FS
int ip_misc_proc_init(void);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 72af2f223e59..967e4dc555fa 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -22,6 +22,8 @@
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/refcount.h>
+#include <linux/ip.h>
+#include <linux/in_route.h>
struct fib_config {
u8 fc_dst_len;
@@ -434,6 +436,11 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
#endif /* CONFIG_IP_MULTIPLE_TABLES */
+static inline bool fib_dscp_masked_match(dscp_t dscp, const struct flowi4 *fl4)
+{
+ return dscp == inet_dsfield_to_dscp(RT_TOS(fl4->flowi4_tos));
+}
+
/* Exported by fib_frontend.c */
extern const struct nla_policy rtm_ipv4_policy[];
void ip_fib_init(void);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 1db2417b8ff5..6194fbb564c6 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -573,7 +573,7 @@ static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
return 0;
}
-/* Propogate ECN bits out */
+/* Propagate ECN bits out */
static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
const struct sk_buff *skb)
{
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 88a8e554f7a1..248bfb26e2af 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -851,7 +851,7 @@ static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int
* we should *never* get to this point since that
* would mean the addrs are equal
*
- * However, we do get to it 8) And exacly, when
+ * However, we do get to it 8) And exactly, when
* addresses are equal 8)
*
* ip route add 1111::/128 via ...
@@ -973,7 +973,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
hash = skb_get_hash_flowi6(skb, fl6);
/* Since this is being sent on the wire obfuscate hash a bit
- * to minimize possbility that any useful information to an
+ * to minimize possibility that any useful information to an
* attacker is leaked. Only lower 20 bits are relevant.
*/
hash = rol32(hash, 16);
@@ -1365,4 +1365,16 @@ static inline void ip6_sock_set_recvpktinfo(struct sock *sk)
release_sock(sk);
}
+#define IPV6_ADDR_WORDS 4
+
+static inline void ipv6_addr_cpu_to_be32(__be32 *dst, const u32 *src)
+{
+ cpu_to_be32_array(dst, src, IPV6_ADDR_WORDS);
+}
+
+static inline void ipv6_addr_be32_to_cpu(u32 *dst, const __be32 *src)
+{
+ be32_to_cpu_array(dst, src, IPV6_ADDR_WORDS);
+}
+
#endif /* _NET_IPV6_H */
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
index 11cefd50704d..8a3465c8c2c5 100644
--- a/include/net/ipv6_stubs.h
+++ b/include/net/ipv6_stubs.h
@@ -82,7 +82,7 @@ extern const struct ipv6_stub *ipv6_stub __read_mostly;
struct ipv6_bpf_stub {
int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len,
u32 flags);
- struct sock *(*udp6_lib_lookup)(struct net *net,
+ struct sock *(*udp6_lib_lookup)(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *tbl,
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
index 4d114e6d6d23..dd9e93c12260 100644
--- a/include/net/iucv/iucv.h
+++ b/include/net/iucv/iucv.h
@@ -15,7 +15,7 @@
* To explore any of the IUCV functions, one must first register their
* program using iucv_register(). Once your program has successfully
* completed a register, it can exploit the other functions.
- * For furthur reference on all IUCV functionality, refer to the
+ * For further reference on all IUCV functionality, refer to the
* CP Programming Services book, also available on the web thru
* www.vm.ibm.com/pubs, manual # SC24-6084
*
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index b2cf243ebe44..7af1082ea9a0 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -23,7 +23,7 @@
* to handle wireless statistics.
*
* The initial APIs served us well and has proven a reasonably good design.
- * However, there is a few shortcommings :
+ * However, there are a few shortcomings :
* o No events, everything is a request to the driver.
* o Large ioctl function in driver with gigantic switch statement
* (i.e. spaghetti code).
@@ -38,13 +38,13 @@
* -------------------------------
* The new driver API is just a bunch of standard functions (handlers),
* each handling a specific Wireless Extension. The driver just export
- * the list of handler it supports, and those will be called apropriately.
+ * the list of handler it supports, and those will be called appropriately.
*
* I tried to keep the main advantage of the previous API (simplicity,
* efficiency and light weight), and also I provide a good dose of backward
* compatibility (most structures are the same, driver can use both API
* simultaneously, ...).
- * Hopefully, I've also addressed the shortcomming of the initial API.
+ * Hopefully, I've also addressed the shortcoming of the initial API.
*
* The advantage of the new API are :
* o Handling of Extensions in driver broken in small contained functions
@@ -84,7 +84,7 @@
/* ---------------------- THE IMPLEMENTATION ---------------------- */
/*
- * Some of the choice I've made are pretty controversials. Defining an
+ * Some of the choice I've made are pretty controversial. Defining an
* API is very much weighting compromises. This goes into some of the
* details and the thinking behind the implementation.
*
@@ -140,7 +140,7 @@
* example to distinguish setting max rate and basic rate), I would
* break the prototype. Using iwreq_data is more flexible.
* 3) Also, the above form is not generic (see above).
- * 4) I don't expect driver developper using the wrong field of the
+ * 4) I don't expect driver developer using the wrong field of the
* union (Doh !), so static typechecking doesn't add much value.
* 5) Lastly, you can skip the union by doing :
* static int mydriver_ioctl_setrate(struct net_device *dev,
@@ -459,7 +459,7 @@ int iw_handler_get_thrspy(struct net_device *dev, struct iw_request_info *info,
void wireless_spy_update(struct net_device *dev, unsigned char *address,
struct iw_quality *wstats);
-/************************* INLINE FUNTIONS *************************/
+/************************* INLINE FUNCTIONS *************************/
/*
* Function that are so simple that it's more efficient inlining them
*/
diff --git a/include/net/lib80211.h b/include/net/lib80211.h
index 8b47d3a51cf8..fd0f15d87d80 100644
--- a/include/net/lib80211.h
+++ b/include/net/lib80211.h
@@ -92,7 +92,7 @@ struct lib80211_crypto_ops {
struct lib80211_crypt_data {
struct list_head list; /* delayed deletion list */
- struct lib80211_crypto_ops *ops;
+ const struct lib80211_crypto_ops *ops;
void *priv;
atomic_t refcnt;
};
@@ -113,9 +113,9 @@ struct lib80211_crypt_info {
int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
spinlock_t *lock);
void lib80211_crypt_info_free(struct lib80211_crypt_info *info);
-int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops);
-int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops);
-struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name);
+int lib80211_register_crypto_ops(const struct lib80211_crypto_ops *ops);
+int lib80211_unregister_crypto_ops(const struct lib80211_crypto_ops *ops);
+const struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name);
void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info,
struct lib80211_crypt_data **crypt);
diff --git a/include/net/libeth/tx.h b/include/net/libeth/tx.h
new file mode 100644
index 000000000000..35614f9523f6
--- /dev/null
+++ b/include/net/libeth/tx.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_TX_H
+#define __LIBETH_TX_H
+
+#include <linux/skbuff.h>
+
+#include <net/libeth/types.h>
+
+/* Tx buffer completion */
+
+/**
+ * enum libeth_sqe_type - type of &libeth_sqe to act on Tx completion
+ * @LIBETH_SQE_EMPTY: unused/empty, no action required
+ * @LIBETH_SQE_CTX: context descriptor with empty SQE, no action required
+ * @LIBETH_SQE_SLAB: kmalloc-allocated buffer, unmap and kfree()
+ * @LIBETH_SQE_FRAG: mapped skb frag, only unmap DMA
+ * @LIBETH_SQE_SKB: &sk_buff, unmap and napi_consume_skb(), update stats
+ */
+enum libeth_sqe_type {
+ LIBETH_SQE_EMPTY = 0U,
+ LIBETH_SQE_CTX,
+ LIBETH_SQE_SLAB,
+ LIBETH_SQE_FRAG,
+ LIBETH_SQE_SKB,
+};
+
+/**
+ * struct libeth_sqe - represents a Send Queue Element / Tx buffer
+ * @type: type of the buffer, see the enum above
+ * @rs_idx: index of the last buffer from the batch this one was sent in
+ * @raw: slab buffer to free via kfree()
+ * @skb: &sk_buff to consume
+ * @dma: DMA address to unmap
+ * @len: length of the mapped region to unmap
+ * @nr_frags: number of frags in the frame this buffer belongs to
+ * @packets: number of physical packets sent for this frame
+ * @bytes: number of physical bytes sent for this frame
+ * @priv: driver-private scratchpad
+ */
+struct libeth_sqe {
+ enum libeth_sqe_type type:32;
+ u32 rs_idx;
+
+ union {
+ void *raw;
+ struct sk_buff *skb;
+ };
+
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+
+ u32 nr_frags;
+ u32 packets;
+ u32 bytes;
+
+ unsigned long priv;
+} __aligned_largest;
+
+/**
+ * LIBETH_SQE_CHECK_PRIV - check the driver's private SQE data
+ * @p: type or name of the object the driver wants to fit into &libeth_sqe
+ *
+ * Make sure the driver's private data fits into libeth_sqe::priv. To be used
+ * right after its declaration.
+ */
+#define LIBETH_SQE_CHECK_PRIV(p) \
+ static_assert(sizeof(p) <= sizeof_field(struct libeth_sqe, priv))
+
+/**
+ * struct libeth_cq_pp - completion queue poll params
+ * @dev: &device to perform DMA unmapping
+ * @ss: onstack NAPI stats to fill
+ * @napi: whether it's called from the NAPI context
+ *
+ * libeth uses this structure to access objects needed for performing full
+ * Tx complete operation without passing lots of arguments and change the
+ * prototypes each time a new one is added.
+ */
+struct libeth_cq_pp {
+ struct device *dev;
+ struct libeth_sq_napi_stats *ss;
+
+ bool napi;
+};
+
+/**
+ * libeth_tx_complete - perform Tx completion for one SQE
+ * @sqe: SQE to complete
+ * @cp: poll params
+ *
+ * Do Tx complete for all the types of buffers, incl. freeing, unmapping,
+ * updating the stats etc.
+ */
+static inline void libeth_tx_complete(struct libeth_sqe *sqe,
+ const struct libeth_cq_pp *cp)
+{
+ switch (sqe->type) {
+ case LIBETH_SQE_EMPTY:
+ return;
+ case LIBETH_SQE_SKB:
+ case LIBETH_SQE_FRAG:
+ case LIBETH_SQE_SLAB:
+ dma_unmap_page(cp->dev, dma_unmap_addr(sqe, dma),
+ dma_unmap_len(sqe, len), DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+
+ switch (sqe->type) {
+ case LIBETH_SQE_SKB:
+ cp->ss->packets += sqe->packets;
+ cp->ss->bytes += sqe->bytes;
+
+ napi_consume_skb(sqe->skb, cp->napi);
+ break;
+ case LIBETH_SQE_SLAB:
+ kfree(sqe->raw);
+ break;
+ default:
+ break;
+ }
+
+ sqe->type = LIBETH_SQE_EMPTY;
+}
+
+#endif /* __LIBETH_TX_H */
diff --git a/include/net/libeth/types.h b/include/net/libeth/types.h
new file mode 100644
index 000000000000..603825e45133
--- /dev/null
+++ b/include/net/libeth/types.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_TYPES_H
+#define __LIBETH_TYPES_H
+
+#include <linux/types.h>
+
+/**
+ * struct libeth_sq_napi_stats - "hot" counters to update in Tx completion loop
+ * @packets: completed frames counter
+ * @bytes: sum of bytes of completed frames above
+ * @raw: alias to access all the fields as an array
+ */
+struct libeth_sq_napi_stats {
+ union {
+ struct {
+ u32 packets;
+ u32 bytes;
+ };
+ DECLARE_FLEX_ARRAY(u32, raw);
+ };
+};
+
+#endif /* __LIBETH_TYPES_H */
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 1d55ba7c45be..86681f29bda7 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -254,7 +254,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
}
/**
- * llc_pdu_decode_sa - extracs source address (MAC) of input frame
+ * llc_pdu_decode_sa - extracts, source address (MAC) of input frame
* @skb: input skb that source address must be extracted from it.
* @sa: pointer to source address (6 byte array).
*
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 0a04eaf5343c..954dff901b69 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -994,8 +994,9 @@ enum mac80211_tx_info_flags {
* of their QoS TID or other priority field values.
* @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally
* for sequence number assignment
- * @IEEE80211_TX_CTRL_SCAN_TX: Indicates that this frame is transmitted
- * due to scanning, not in normal operation on the interface.
+ * @IEEE80211_TX_CTRL_DONT_USE_RATE_MASK: Don't use rate mask for this frame
+ * which is transmitted due to scanning or offchannel TX, not in normal
+ * operation on the interface.
* @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this
* frame should be transmitted on the specific link. This really is
* only relevant for frames that do not have data present, and is
@@ -1016,7 +1017,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
IEEE80211_TX_CTRL_DONT_REORDER = BIT(8),
IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9),
- IEEE80211_TX_CTRL_SCAN_TX = BIT(10),
+ IEEE80211_TX_CTRL_DONT_USE_RATE_MASK = BIT(10),
IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000,
};
@@ -2487,7 +2488,7 @@ struct ieee80211_link_sta {
* @spp_amsdu: indicates whether the STA uses SPP A-MSDU or not.
*/
struct ieee80211_sta {
- u8 addr[ETH_ALEN];
+ u8 addr[ETH_ALEN] __aligned(2);
u16 aid;
u16 max_rx_aggregation_subframes;
bool wme;
@@ -3181,6 +3182,19 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
/**
+ * ieee80211_purge_tx_queue - purge TX skb queue
+ * @hw: the hardware
+ * @skbs: the skbs
+ *
+ * Free a set of transmit skbs. Use this function when device is going to stop
+ * but some transmit skbs without TX status are still queued.
+ * This function does not take the list lock and the caller must hold the
+ * relevant locks to use it.
+ */
+void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
+ struct sk_buff_head *skbs);
+
+/**
* DOC: Hardware crypto acceleration
*
* mac80211 is capable of taking advantage of many hardware
@@ -6242,6 +6256,24 @@ void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
void (*iterator)(void *data,
struct ieee80211_sta *sta),
void *data);
+
+/**
+ * ieee80211_iterate_stations_mtx - iterate stations
+ *
+ * This function iterates over all stations associated with a given
+ * hardware that are currently uploaded to the driver and calls the callback
+ * function for them. This version can only be used while holding the wiphy
+ * mutex.
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iterator: the iterator function to call
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_stations_mtx(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data);
+
/**
* ieee80211_queue_work - add work onto the mac80211 workqueue
*
@@ -6716,8 +6748,11 @@ void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp);
* ieee80211_radar_detected - inform that a radar was detected
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @chanctx_conf: Channel context on which radar is detected. Mandatory to
+ * pass a valid pointer during MLO. For non-MLO %NULL can be passed
*/
-void ieee80211_radar_detected(struct ieee80211_hw *hw);
+void ieee80211_radar_detected(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf);
/**
* ieee80211_chswitch_done - Complete channel switch process
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 4a3a9de9da73..1b5488fa2ff0 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -140,7 +140,7 @@ enum ieee802154_hw_flags {
*
* xmit_sync:
* Handler that 802.15.4 module calls for each transmitted frame.
- * skb cntains the buffer starting from the IEEE 802.15.4 header.
+ * skb contains the buffer starting from the IEEE 802.15.4 header.
* The low-level driver should send the frame based on available
* configuration. This is called by a workqueue and useful for
* synchronous 802.15.4 drivers.
@@ -152,7 +152,7 @@ enum ieee802154_hw_flags {
*
* xmit_async:
* Handler that 802.15.4 module calls for each transmitted frame.
- * skb cntains the buffer starting from the IEEE 802.15.4 header.
+ * skb contains the buffer starting from the IEEE 802.15.4 header.
* The low-level driver should send the frame based on available
* configuration.
* This function should return zero or negative errno.
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index b8a6c7504ee1..f2a5200d8a0f 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -38,9 +38,21 @@ enum TRI_STATE {
#define COMP_ENTRY_SIZE 64
-#define RX_BUFFERS_PER_QUEUE 512
+/* This Max value for RX buffers is derived from __alloc_page()'s max page
+ * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer
+ * size beyond this value gets rejected by __alloc_page() call.
+ */
+#define MAX_RX_BUFFERS_PER_QUEUE 8192
+#define DEF_RX_BUFFERS_PER_QUEUE 512
+#define MIN_RX_BUFFERS_PER_QUEUE 128
-#define MAX_SEND_BUFFERS_PER_QUEUE 256
+/* This max value for TX buffers is derived as the maximum allocatable
+ * pages supported on host per guest through testing. TX buffer size beyond
+ * this value is rejected by the hardware.
+ */
+#define MAX_TX_BUFFERS_PER_QUEUE 16384
+#define DEF_TX_BUFFERS_PER_QUEUE 256
+#define MIN_TX_BUFFERS_PER_QUEUE 128
#define EQ_SIZE (8 * MANA_PAGE_SIZE)
@@ -288,7 +300,7 @@ struct mana_recv_buf_oob {
void *buf_va;
bool from_pool; /* allocated from a page pool */
- /* SGL of the buffer going to be sent has part of the work request. */
+ /* SGL of the buffer going to be sent as part of the work request. */
u32 num_sge;
struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
@@ -440,6 +452,9 @@ struct mana_port_context {
unsigned int max_queues;
unsigned int num_queues;
+ unsigned int rx_queue_size;
+ unsigned int tx_queue_size;
+
mana_handle_t port_handle;
mana_handle_t pf_filter_handle;
@@ -475,6 +490,8 @@ struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
void mana_query_gf_stats(struct mana_port_context *apc);
+int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
+void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
extern const struct ethtool_ops mana_ethtool_ops;
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 0bc4ab03f487..814b5f2e3ed5 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -223,6 +223,8 @@ static inline __be32 mptcp_reset_option(const struct sk_buff *skb)
return htonl(0u);
}
+
+void mptcp_active_detect_blackhole(struct sock *sk, bool expired);
#else
static inline void mptcp_init(void)
@@ -307,6 +309,8 @@ static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct reques
}
static inline __be32 mptcp_reset_option(const struct sk_buff *skb) { return htonl(0u); }
+
+static inline void mptcp_active_detect_blackhole(struct sock *sk, bool expired) { }
#endif /* CONFIG_MPTCP */
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 7a533d5b1d59..3c88d5bc5eed 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -147,11 +147,6 @@ void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
* The following hooks can be defined; unless noted otherwise, they are
* optional and can be filled with a null pointer.
*
- * int (*is_useropt)(u8 nd_opt_type):
- * This function is called when IPv6 decide RA userspace options. if
- * this function returns 1 then the option given by nd_opt_type will
- * be handled as userspace option additional to the IPv6 options.
- *
* int (*parse_options)(const struct net_device *dev,
* struct nd_opt_hdr *nd_opt,
* struct ndisc_options *ndopts):
@@ -200,7 +195,6 @@ void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
* addresses. E.g. 802.15.4 6LoWPAN.
*/
struct ndisc_ops {
- int (*is_useropt)(u8 nd_opt_type);
int (*parse_options)(const struct net_device *dev,
struct nd_opt_hdr *nd_opt,
struct ndisc_options *ndopts);
@@ -224,15 +218,6 @@ struct ndisc_ops {
};
#if IS_ENABLED(CONFIG_IPV6)
-static inline int ndisc_ops_is_useropt(const struct net_device *dev,
- u8 nd_opt_type)
-{
- if (dev->ndisc_ops && dev->ndisc_ops->is_useropt)
- return dev->ndisc_ops->is_useropt(nd_opt_type);
- else
- return 0;
-}
-
static inline int ndisc_ops_parse_options(const struct net_device *dev,
struct nd_opt_hdr *nd_opt,
struct ndisc_options *ndopts)
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 20c34bd7a077..e67b483cc8bb 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -451,8 +451,8 @@ struct pernet_operations {
/* Following method is called with RTNL held. */
void (*exit_batch_rtnl)(struct list_head *net_exit_list,
struct list_head *dev_kill_list);
- unsigned int *id;
- size_t size;
+ unsigned int * const id;
+ const size_t size;
};
/*
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
index aa1716fb0e53..596836abf7bf 100644
--- a/include/net/netdev_rx_queue.h
+++ b/include/net/netdev_rx_queue.h
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/sysfs.h>
#include <net/xdp.h>
+#include <net/page_pool/types.h>
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
@@ -25,6 +26,7 @@ struct netdev_rx_queue {
* Readers and writers must hold RTNL
*/
struct napi_struct *napi;
+ struct pp_memory_provider_params mp_params;
} ____cacheline_aligned_in_smp;
/*
@@ -43,7 +45,6 @@ __netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
return dev->_rx + rxq;
}
-#ifdef CONFIG_SYSFS
static inline unsigned int
get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
{
@@ -53,5 +54,7 @@ get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
BUG_ON(index >= dev->num_rx_queues);
return index;
}
-#endif
+
+int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
+
#endif
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index e227d997fc71..1b58b5b91ff6 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -15,10 +15,8 @@ struct nf_conncount_list {
unsigned int count; /* length of list */
};
-struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
- unsigned int keylen);
-void nf_conncount_destroy(struct net *net, unsigned int family,
- struct nf_conncount_data *data);
+struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int keylen);
+void nf_conncount_destroy(struct net *net, struct nf_conncount_data *data);
unsigned int nf_conncount_count(struct net *net,
struct nf_conncount_data *data,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 1bfdd16890fa..49708e7e1339 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -209,6 +209,7 @@ static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
* @family: protocol family
* @level: depth of the chains
* @report: notify via unicast netlink message
+ * @reg_inited: bitmap of initialised registers
*/
struct nft_ctx {
struct net *net;
@@ -221,6 +222,7 @@ struct nft_ctx {
u8 family;
u8 level;
bool report;
+ DECLARE_BITMAP(reg_inited, NFT_REG32_NUM);
};
enum nft_data_desc_flags {
@@ -254,7 +256,8 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
-int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len);
+int nft_parse_register_load(const struct nft_ctx *ctx,
+ const struct nlattr *attr, u8 *sreg, u32 len);
int nft_parse_register_store(const struct nft_ctx *ctx,
const struct nlattr *attr, u8 *dreg,
const struct nft_data *data,
@@ -311,6 +314,7 @@ static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv)
/**
* enum nft_iter_type - nftables set iterator type
*
+ * @NFT_ITER_UNSPEC: unspecified, to catch errors
* @NFT_ITER_READ: read-only iteration over set elements
* @NFT_ITER_UPDATE: iteration under mutex to update set element state
*/
@@ -683,9 +687,8 @@ void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
* @NFT_SET_EXT_DATA: mapping data
* @NFT_SET_EXT_FLAGS: element flags
* @NFT_SET_EXT_TIMEOUT: element timeout
- * @NFT_SET_EXT_EXPIRATION: element expiration time
* @NFT_SET_EXT_USERDATA: user data associated with the element
- * @NFT_SET_EXT_EXPRESSIONS: expressions assiciated with the element
+ * @NFT_SET_EXT_EXPRESSIONS: expressions associated with the element
* @NFT_SET_EXT_OBJREF: stateful object reference associated with element
* @NFT_SET_EXT_NUM: number of extension types
*/
@@ -695,7 +698,6 @@ enum nft_set_extensions {
NFT_SET_EXT_DATA,
NFT_SET_EXT_FLAGS,
NFT_SET_EXT_TIMEOUT,
- NFT_SET_EXT_EXPIRATION,
NFT_SET_EXT_USERDATA,
NFT_SET_EXT_EXPRESSIONS,
NFT_SET_EXT_OBJREF,
@@ -807,14 +809,14 @@ static inline u8 *nft_set_ext_flags(const struct nft_set_ext *ext)
return nft_set_ext(ext, NFT_SET_EXT_FLAGS);
}
-static inline u64 *nft_set_ext_timeout(const struct nft_set_ext *ext)
-{
- return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT);
-}
+struct nft_timeout {
+ u64 timeout;
+ u64 expiration;
+};
-static inline u64 *nft_set_ext_expiration(const struct nft_set_ext *ext)
+static inline struct nft_timeout *nft_set_ext_timeout(const struct nft_set_ext *ext)
{
- return nft_set_ext(ext, NFT_SET_EXT_EXPIRATION);
+ return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT);
}
static inline struct nft_userdata *nft_set_ext_userdata(const struct nft_set_ext *ext)
@@ -830,8 +832,11 @@ static inline struct nft_set_elem_expr *nft_set_ext_expr(const struct nft_set_ex
static inline bool __nft_set_elem_expired(const struct nft_set_ext *ext,
u64 tstamp)
{
- return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
- time_after_eq64(tstamp, *nft_set_ext_expiration(ext));
+ if (!nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) ||
+ READ_ONCE(nft_set_ext_timeout(ext)->timeout) == 0)
+ return false;
+
+ return time_after_eq64(tstamp, READ_ONCE(nft_set_ext_timeout(ext)->expiration));
}
static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
@@ -959,8 +964,7 @@ struct nft_expr_ops {
const struct nft_expr *expr,
bool reset);
int (*validate)(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data);
+ const struct nft_expr *expr);
bool (*reduce)(struct nft_regs_track *track,
const struct nft_expr *expr);
bool (*gc)(struct net *net,
@@ -1674,6 +1678,7 @@ struct nft_trans_rule {
struct nft_trans_set {
struct nft_trans_binding nft_trans_binding;
+ struct list_head list_trans_newset;
struct nft_set *set;
u32 set_id;
u32 gc_int;
@@ -1744,10 +1749,18 @@ struct nft_trans_table {
#define nft_trans_table_update(trans) \
nft_trans_container_table(trans)->update
+enum nft_trans_elem_flags {
+ NFT_TRANS_UPD_TIMEOUT = (1 << 0),
+ NFT_TRANS_UPD_EXPIRATION = (1 << 1),
+};
+
struct nft_trans_elem {
struct nft_trans nft_trans;
struct nft_set *set;
struct nft_elem_priv *elem_priv;
+ u64 timeout;
+ u64 expiration;
+ u8 update_flags;
bool bound;
};
@@ -1757,6 +1770,12 @@ struct nft_trans_elem {
nft_trans_container_elem(trans)->set
#define nft_trans_elem_priv(trans) \
nft_trans_container_elem(trans)->elem_priv
+#define nft_trans_elem_update_flags(trans) \
+ nft_trans_container_elem(trans)->update_flags
+#define nft_trans_elem_timeout(trans) \
+ nft_trans_container_elem(trans)->timeout
+#define nft_trans_elem_expiration(trans) \
+ nft_trans_container_elem(trans)->expiration
#define nft_trans_elem_set_bound(trans) \
nft_trans_container_elem(trans)->bound
@@ -1875,6 +1894,7 @@ static inline int nft_request_module(struct net *net, const char *fmt, ...) { re
struct nftables_pernet {
struct list_head tables;
struct list_head commit_list;
+ struct list_head commit_set_list;
struct list_head binding_list;
struct list_head module_list;
struct list_head notify_list;
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
index faa108b1ba67..5adf6fda11e8 100644
--- a/include/net/netfilter/nf_tproxy.h
+++ b/include/net/netfilter/nf_tproxy.h
@@ -36,6 +36,7 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
/**
* nf_tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
+ * @net: The network namespace.
* @skb: The skb being processed.
* @laddr: IPv4 address to redirect to or zero.
* @lport: TCP port to redirect to or zero.
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index 167640b843ef..38cae7113de4 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -21,9 +21,7 @@ nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset);
int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[]);
-int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
- const struct nft_data **data);
-
+int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr);
void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt);
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index ba1238f12a48..d602263590fe 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -41,8 +41,7 @@ void nft_meta_set_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr);
int nft_meta_set_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data);
+ const struct nft_expr *expr);
bool nft_meta_get_reduce(struct nft_regs_track *track,
const struct nft_expr *expr);
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
index 6d9ba62efd75..19060212988a 100644
--- a/include/net/netfilter/nft_reject.h
+++ b/include/net/netfilter/nft_reject.h
@@ -15,8 +15,7 @@ struct nft_reject {
extern const struct nla_policy nft_reject_policy[];
int nft_reject_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data);
+ const struct nft_expr *expr);
int nft_reject_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index 654bc777d2a7..529160f76cac 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -30,7 +30,7 @@ struct calipso_doi;
/*
* NetLabel - A management interface for maintaining network packet label
- * mapping tables for explicit packet labling protocols.
+ * mapping tables for explicit packet labeling protocols.
*
* Network protocols such as CIPSO and RIPSO require a label translation layer
* to convert the label on the packet into something meaningful on the host
diff --git a/include/net/netlink.h b/include/net/netlink.h
index e78ce008e07c..db6af207287c 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -827,7 +827,7 @@ nlmsg_parse_deprecated_strict(const struct nlmsghdr *nlh, int hdrlen,
/**
* nlmsg_find_attr - find a specific attribute in a netlink message
* @nlh: netlink message header
- * @hdrlen: length of familiy specific header
+ * @hdrlen: length of family specific header
* @attrtype: type of attribute to look for
*
* Returns the first attribute which matches the specified type.
@@ -849,7 +849,7 @@ static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation is done in liberal mode.
- * See documenation of struct nla_policy for more details.
+ * See documentation of struct nla_policy for more details.
*
* Returns 0 on success or a negative error code.
*/
@@ -872,7 +872,7 @@ static inline int nla_validate_deprecated(const struct nlattr *head, int len,
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation is done in strict mode.
- * See documenation of struct nla_policy for more details.
+ * See documentation of struct nla_policy for more details.
*
* Returns 0 on success or a negative error code.
*/
@@ -887,7 +887,7 @@ static inline int nla_validate(const struct nlattr *head, int len, int maxtype,
/**
* nlmsg_validate_deprecated - validate a netlink message including attributes
* @nlh: netlinket message header
- * @hdrlen: length of familiy specific header
+ * @hdrlen: length of family specific header
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @extack: extended ACK report struct
@@ -933,7 +933,7 @@ static inline u32 nlmsg_seq(const struct nlmsghdr *nlh)
* nlmsg_for_each_attr - iterate over a stream of attributes
* @pos: loop counter, set to current attribute
* @nlh: netlink message header
- * @hdrlen: length of familiy specific header
+ * @hdrlen: length of family specific header
* @rem: initialized to len, holds bytes currently remaining in stream
*/
#define nlmsg_for_each_attr(pos, nlh, hdrlen, rem) \
@@ -1034,7 +1034,7 @@ static inline struct sk_buff *nlmsg_new_large(size_t payload)
* @skb: socket buffer the message is stored in
* @nlh: netlink message header
*
- * Corrects the netlink message header to include the appeneded
+ * Corrects the netlink message header to include the appended
* attributes. Only necessary if attributes have been added to
* the message.
*/
@@ -1954,7 +1954,7 @@ static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
* @start: container attribute
*
* Corrects the container attribute header to include the all
- * appeneded attributes.
+ * appended attributes.
*
* Returns the total data length of the skb.
*/
@@ -1987,7 +1987,7 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
*
* Validates all attributes in the nested attribute stream against the
* specified policy. Attributes with a type exceeding maxtype will be
- * ignored. See documenation of struct nla_policy for more details.
+ * ignored. See documentation of struct nla_policy for more details.
*
* Returns 0 on success or a negative error code.
*/
diff --git a/include/net/netmem.h b/include/net/netmem.h
index 46cc9b89ac79..8a6e20be4b9d 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -8,6 +8,54 @@
#ifndef _NET_NETMEM_H
#define _NET_NETMEM_H
+#include <linux/mm.h>
+#include <net/net_debug.h>
+
+/* net_iov */
+
+DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers);
+
+/* We overload the LSB of the struct page pointer to indicate whether it's
+ * a page or net_iov.
+ */
+#define NET_IOV 0x01UL
+
+struct net_iov {
+ unsigned long __unused_padding;
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ struct dmabuf_genpool_chunk_owner *owner;
+ unsigned long dma_addr;
+ atomic_long_t pp_ref_count;
+};
+
+/* These fields in struct page are used by the page_pool and net stack:
+ *
+ * struct {
+ * unsigned long pp_magic;
+ * struct page_pool *pp;
+ * unsigned long _pp_mapping_pad;
+ * unsigned long dma_addr;
+ * atomic_long_t pp_ref_count;
+ * };
+ *
+ * We mirror the page_pool fields here so the page_pool can access these fields
+ * without worrying whether the underlying fields belong to a page or net_iov.
+ *
+ * The non-net stack fields of struct page are private to the mm stack and must
+ * never be mirrored to net_iov.
+ */
+#define NET_IOV_ASSERT_OFFSET(pg, iov) \
+ static_assert(offsetof(struct page, pg) == \
+ offsetof(struct net_iov, iov))
+NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic);
+NET_IOV_ASSERT_OFFSET(pp, pp);
+NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
+NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
+#undef NET_IOV_ASSERT_OFFSET
+
+/* netmem */
+
/**
* typedef netmem_ref - a nonexistent type marking a reference to generic
* network memory.
@@ -19,20 +67,37 @@
*/
typedef unsigned long __bitwise netmem_ref;
+static inline bool netmem_is_net_iov(const netmem_ref netmem)
+{
+ return (__force unsigned long)netmem & NET_IOV;
+}
+
/* This conversion fails (returns NULL) if the netmem_ref is not struct page
* backed.
- *
- * Currently struct page is the only possible netmem, and this helper never
- * fails.
*/
static inline struct page *netmem_to_page(netmem_ref netmem)
{
+ if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
+ return NULL;
+
return (__force struct page *)netmem;
}
-/* Converting from page to netmem is always safe, because a page can always be
- * a netmem.
- */
+static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ return (struct net_iov *)((__force unsigned long)netmem &
+ ~NET_IOV);
+
+ DEBUG_NET_WARN_ON_ONCE(true);
+ return NULL;
+}
+
+static inline netmem_ref net_iov_to_netmem(struct net_iov *niov)
+{
+ return (__force netmem_ref)((unsigned long)niov | NET_IOV);
+}
+
static inline netmem_ref page_to_netmem(struct page *page)
{
return (__force netmem_ref)page;
@@ -40,17 +105,70 @@ static inline netmem_ref page_to_netmem(struct page *page)
static inline int netmem_ref_count(netmem_ref netmem)
{
+ /* The non-pp refcount of net_iov is always 1. On net_iov, we only
+ * support pp refcounting which uses the pp_ref_count field.
+ */
+ if (netmem_is_net_iov(netmem))
+ return 1;
+
return page_ref_count(netmem_to_page(netmem));
}
-static inline unsigned long netmem_to_pfn(netmem_ref netmem)
+static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
{
+ if (netmem_is_net_iov(netmem))
+ return 0;
+
return page_to_pfn(netmem_to_page(netmem));
}
+static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
+{
+ return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV);
+}
+
+static inline struct page_pool *netmem_get_pp(netmem_ref netmem)
+{
+ return __netmem_clear_lsb(netmem)->pp;
+}
+
+static inline atomic_long_t *netmem_get_pp_ref_count_ref(netmem_ref netmem)
+{
+ return &__netmem_clear_lsb(netmem)->pp_ref_count;
+}
+
+static inline bool netmem_is_pref_nid(netmem_ref netmem, int pref_nid)
+{
+ /* NUMA node preference only makes sense if we're allocating
+ * system memory. Memory providers (which give us net_iovs)
+ * choose for us.
+ */
+ if (netmem_is_net_iov(netmem))
+ return true;
+
+ return page_to_nid(netmem_to_page(netmem)) == pref_nid;
+}
+
static inline netmem_ref netmem_compound_head(netmem_ref netmem)
{
+ /* niov are never compounded */
+ if (netmem_is_net_iov(netmem))
+ return netmem;
+
return page_to_netmem(compound_head(netmem_to_page(netmem)));
}
+static inline void *netmem_address(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ return NULL;
+
+ return page_address(netmem_to_page(netmem));
+}
+
+static inline unsigned long netmem_get_dma_addr(netmem_ref netmem)
+{
+ return __netmem_clear_lsb(netmem)->dma_addr;
+}
+
#endif /* _NET_NETMEM_H */
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 5fcd61ada622..276f622f3516 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -122,7 +122,10 @@ struct netns_ipv4 {
u8 sysctl_icmp_errors_use_inbound_ifaddr;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
-
+ int sysctl_icmp_msgs_per_sec;
+ int sysctl_icmp_msgs_burst;
+ atomic_t icmp_global_credit;
+ u32 icmp_global_stamp;
u32 ip_rt_min_pmtu;
int ip_rt_mtu_expires;
int ip_rt_min_advmss;
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index 7eff3d981b89..d25cd7a9c5ff 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -125,14 +125,14 @@ struct netns_sctp {
int pf_expose;
/*
- * Policy for preforming sctp/socket accounting
+ * Policy for performing sctp/socket accounting
* 0 - do socket level accounting, all assocs share sk_sndbuf
* 1 - do sctp accounting, each asoc may use sk_sndbuf bytes
*/
int sndbuf_policy;
/*
- * Policy for preforming sctp/socket accounting
+ * Policy for performing sctp/socket accounting
* 0 - do socket level accounting, all assocs share sk_rcvbuf
* 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes
*/
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 68463aebcc05..d9fb44e8b321 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -105,7 +105,7 @@ struct nh_grp_entry_stats {
struct nh_grp_entry {
struct nexthop *nh;
struct nh_grp_entry_stats __percpu *stats;
- u8 weight;
+ u16 weight;
union {
struct {
@@ -192,7 +192,7 @@ struct nh_notifier_single_info {
};
struct nh_notifier_grp_entry_info {
- u8 weight;
+ u16 weight;
struct nh_notifier_single_info nh;
};
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index e82f55f543bb..dc36519d16aa 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -332,7 +332,7 @@ struct nci_core_init_rsp_1 {
__le32 nfcc_features;
__u8 num_supported_rf_interfaces;
__u8 supported_rf_interfaces[]; /* variable size array */
- /* continuted in nci_core_init_rsp_2 */
+ /* continued in nci_core_init_rsp_2 */
} __packed;
struct nci_core_init_rsp_2 {
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 3d07abacf08b..3a3781838c67 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -80,7 +80,7 @@ struct nfc_ops {
#define NFC_ATR_REQ_GT_OFFSET 14
/**
- * struct nfc_target - NFC target descriptiom
+ * struct nfc_target - NFC target description
*
* @sens_res: 2 bytes describing the target SENS_RES response, if the target
* is a type A one. The %sens_res most significant byte must be byte 2
@@ -230,10 +230,10 @@ static inline void nfc_set_parent_dev(struct nfc_dev *nfc_dev,
}
/**
- * nfc_set_drvdata - set driver specifc data
+ * nfc_set_drvdata - set driver specific data
*
* @dev: The nfc device
- * @data: Pointer to driver specifc data
+ * @data: Pointer to driver specific data
*/
static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data)
{
@@ -241,7 +241,7 @@ static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data)
}
/**
- * nfc_get_drvdata - get driver specifc data
+ * nfc_get_drvdata - get driver specific data
*
* @dev: The nfc device
*/
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index 4c752f799957..a994dea74596 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -192,7 +192,7 @@ enum nl802154_iftype {
* @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for
* nl802154_wpan_phy_tx_power
* @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level
- * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maximum value for cca_ed_level
* @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags
* @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags
* @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 2b43a893c619..793e6fd78bc5 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -216,7 +216,7 @@ page_pool_get_dma_dir(const struct page_pool *pool)
static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr)
{
- atomic_long_set(&netmem_to_page(netmem)->pp_ref_count, nr);
+ atomic_long_set(netmem_get_pp_ref_count_ref(netmem), nr);
}
/**
@@ -244,7 +244,7 @@ static inline void page_pool_fragment_page(struct page *page, long nr)
static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
{
- struct page *page = netmem_to_page(netmem);
+ atomic_long_t *pp_ref_count = netmem_get_pp_ref_count_ref(netmem);
long ret;
/* If nr == pp_ref_count then we have cleared all remaining
@@ -261,19 +261,19 @@ static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
* initially, and only overwrite it when the page is partitioned into
* more than one piece.
*/
- if (atomic_long_read(&page->pp_ref_count) == nr) {
+ if (atomic_long_read(pp_ref_count) == nr) {
/* As we have ensured nr is always one for constant case using
* the BUILD_BUG_ON(), only need to handle the non-constant case
* here for pp_ref_count draining, which is a rare case.
*/
BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
if (!__builtin_constant_p(nr))
- atomic_long_set(&page->pp_ref_count, 1);
+ atomic_long_set(pp_ref_count, 1);
return 0;
}
- ret = atomic_long_sub_return(nr, &page->pp_ref_count);
+ ret = atomic_long_sub_return(nr, pp_ref_count);
WARN_ON(ret < 0);
/* We are the last user here too, reset pp_ref_count back to 1 to
@@ -282,7 +282,7 @@ static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
* page_pool_unref_page() currently.
*/
if (unlikely(!ret))
- atomic_long_set(&page->pp_ref_count, 1);
+ atomic_long_set(pp_ref_count, 1);
return ret;
}
@@ -401,9 +401,7 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
{
- struct page *page = netmem_to_page(netmem);
-
- dma_addr_t ret = page->dma_addr;
+ dma_addr_t ret = netmem_get_dma_addr(netmem);
if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
ret <<= PAGE_SHIFT;
@@ -423,24 +421,6 @@ static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
return page_pool_get_dma_addr_netmem(page_to_netmem((struct page *)page));
}
-static inline bool page_pool_set_dma_addr_netmem(netmem_ref netmem,
- dma_addr_t addr)
-{
- struct page *page = netmem_to_page(netmem);
-
- if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
- page->dma_addr = addr >> PAGE_SHIFT;
-
- /* We assume page alignment to shave off bottom bits,
- * if this "compression" doesn't work we need to drop.
- */
- return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT;
- }
-
- page->dma_addr = addr;
- return false;
-}
-
/**
* page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW
* @pool: &page_pool the @page belongs to
@@ -463,11 +443,6 @@ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
page_pool_get_dma_dir(pool));
}
-static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
-{
- return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr);
-}
-
static inline bool page_pool_put(struct page_pool *pool)
{
return refcount_dec_and_test(&pool->user_cnt);
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 50569fed7868..c022c410abe3 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -20,8 +20,18 @@
* device driver responsibility
*/
#define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */
+
+/* Allow unreadable (net_iov backed) netmem in this page_pool. Drivers setting
+ * this must be able to support unreadable netmem, where netmem_address() would
+ * return NULL. This flag should not be set for header page_pools.
+ *
+ * If the driver sets PP_FLAG_ALLOW_UNREADABLE_NETMEM, it should also set
+ * page_pool_params.slow.queue_idx.
+ */
+#define PP_FLAG_ALLOW_UNREADABLE_NETMEM BIT(3)
+
#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
- PP_FLAG_SYSTEM_POOL)
+ PP_FLAG_SYSTEM_POOL | PP_FLAG_ALLOW_UNREADABLE_NETMEM)
/*
* Fast allocation side cache array/stack
@@ -57,7 +67,9 @@ struct pp_alloc_cache {
* @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
* @slow: params with slowpath access only (initialization and Netlink)
* @netdev: netdev this pool will serve (leave as NULL if none or multiple)
- * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL
+ * @queue_idx: queue idx this page_pool is being created for.
+ * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL,
+ * PP_FLAG_ALLOW_UNREADABLE_NETMEM.
*/
struct page_pool_params {
struct_group_tagged(page_pool_params_fast, fast,
@@ -72,6 +84,7 @@ struct page_pool_params {
);
struct_group_tagged(page_pool_params_slow, slow,
struct net_device *netdev;
+ unsigned int queue_idx;
unsigned int flags;
/* private: used by test code only */
void (*init_callback)(netmem_ref netmem, void *arg);
@@ -139,6 +152,10 @@ struct page_pool_stats {
*/
#define PAGE_POOL_FRAG_GROUP_ALIGN (4 * sizeof(long))
+struct pp_memory_provider_params {
+ void *mp_priv;
+};
+
struct page_pool {
struct page_pool_params_fast p;
@@ -197,6 +214,8 @@ struct page_pool {
*/
struct ptr_ring ring;
+ void *mp_priv;
+
#ifdef CONFIG_PAGE_POOL_STATS
/* recycle stats are per-cpu to avoid locking */
struct page_pool_recycle_stats __percpu *recycle_stats;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 41297bd38dff..4880b3a7aced 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -491,7 +491,7 @@ int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
struct tcf_pkt_info *);
/**
- * tcf_em_tree_match - evaulate an ematch tree
+ * tcf_em_tree_match - evaluate an ematch tree
*
* @skb: socket buffer of the packet in question
* @tree: ematch tree to be used for evaluation
diff --git a/include/net/red.h b/include/net/red.h
index 802287d52c9e..159a09359fc0 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -40,7 +40,7 @@
max_P should be small (not 1), usually 0.01..0.02 is good value.
max_P is chosen as a number, so that max_P/(th_max-th_min)
- is a negative power of two in order arithmetics to contain
+ is a negative power of two in order arithmetic to contain
only shifts.
@@ -159,7 +159,7 @@ static inline u32 red_maxp(u8 Plog)
static inline void red_set_vars(struct red_vars *v)
{
/* Reset average queue length, the value is strictly bound
- * to the parameters below, reseting hurts a bit but leaving
+ * to the parameters below, resetting hurts a bit but leaving
* it might result in an unreasonable qavg for a while. --TGR
*/
v->qavg = 0;
@@ -340,7 +340,7 @@ static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p
{
/*
* NOTE: v->qavg is fixed point number with point at Wlog.
- * The formula below is equvalent to floating point
+ * The formula below is equivalent to floating point
* version:
*
* qavg = qavg*(1-W) + backlog*W;
@@ -375,7 +375,7 @@ static inline int red_mark_probability(const struct red_parms *p,
OK. qR is random number in the interval
(0..1/max_P)*(qth_max-qth_min)
i.e. 0..(2^Plog). If we used floating point
- arithmetics, it would be: (2^Plog)*rnd_num,
+ arithmetic, it would be: (2^Plog)*rnd_num,
where rnd_num is less 1.
Taking into account, that qavg have fixed
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index a103f4c8cf75..6633627f6e76 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -121,7 +121,7 @@ struct regulatory_request {
* @REGULATORY_DISABLE_BEACON_HINTS: enable this if your driver needs to
* ensure that passive scan flags and beaconing flags may not be lifted by
* cfg80211 due to regulatory beacon hints. For more information on beacon
- * hints read the documenation for regulatory_hint_found_beacon()
+ * hints read the documentation for regulatory_hint_found_beacon()
* @REGULATORY_COUNTRY_IE_FOLLOW_POWER: for devices that have a preference
* that even though they may have programmed their own custom power
* setting prior to wiphy registration, they want to ensure their channel
diff --git a/include/net/route.h b/include/net/route.h
index 93833cfe9c96..1789f1e6640b 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -27,6 +27,7 @@
#include <net/ip_fib.h>
#include <net/arp.h>
#include <net/ndisc.h>
+#include <net/inet_dscp.h>
#include <linux/in_route.h>
#include <linux/rtnetlink.h>
#include <linux/rcupdate.h>
@@ -45,7 +46,7 @@ static inline __u8 ip_sock_rt_scope(const struct sock *sk)
static inline __u8 ip_sock_rt_tos(const struct sock *sk)
{
- return RT_TOS(READ_ONCE(inet_sk(sk)->tos));
+ return READ_ONCE(inet_sk(sk)->tos) & INET_DSCP_MASK;
}
struct ip_tunnel_info;
@@ -265,8 +266,6 @@ static inline void ip_rt_put(struct rtable *rt)
dst_release(&rt->dst);
}
-#define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3)
-
extern const __u8 ip_tos2prio[16];
static inline char rt_tos2priority(u8 tos)
diff --git a/include/net/rstreason.h b/include/net/rstreason.h
index 2575c85d7f7a..69cb2e52b7da 100644
--- a/include/net/rstreason.h
+++ b/include/net/rstreason.h
@@ -17,6 +17,12 @@
FN(TCP_ABORT_ON_DATA) \
FN(TCP_TIMEWAIT_SOCKET) \
FN(INVALID_SYN) \
+ FN(TCP_ABORT_ON_CLOSE) \
+ FN(TCP_ABORT_ON_LINGER) \
+ FN(TCP_ABORT_ON_MEMORY) \
+ FN(TCP_STATE) \
+ FN(TCP_KEEPALIVE_TIMEOUT) \
+ FN(TCP_DISCONNECT_WITH_DATA) \
FN(MPTCP_RST_EUNSPEC) \
FN(MPTCP_RST_EMPTCP) \
FN(MPTCP_RST_ERESOURCE) \
@@ -84,6 +90,39 @@ enum sk_rst_reason {
* an error, send a reset"
*/
SK_RST_REASON_INVALID_SYN,
+ /**
+ * @SK_RST_REASON_TCP_ABORT_ON_CLOSE: abort on close
+ * corresponding to LINUX_MIB_TCPABORTONCLOSE
+ */
+ SK_RST_REASON_TCP_ABORT_ON_CLOSE,
+ /**
+ * @SK_RST_REASON_TCP_ABORT_ON_LINGER: abort on linger
+ * corresponding to LINUX_MIB_TCPABORTONLINGER
+ */
+ SK_RST_REASON_TCP_ABORT_ON_LINGER,
+ /**
+ * @SK_RST_REASON_TCP_ABORT_ON_MEMORY: abort on memory
+ * corresponding to LINUX_MIB_TCPABORTONMEMORY
+ */
+ SK_RST_REASON_TCP_ABORT_ON_MEMORY,
+ /**
+ * @SK_RST_REASON_TCP_STATE: abort on tcp state
+ * Please see RFC 9293 for all possible reset conditions
+ */
+ SK_RST_REASON_TCP_STATE,
+ /**
+ * @SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT: time to timeout
+ * When we have already run out of all the chances, which means
+ * keepalive timeout, we have to reset the connection
+ */
+ SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT,
+ /**
+ * @SK_RST_REASON_TCP_DISCONNECT_WITH_DATA: disconnect when write
+ * queue is not empty
+ * It means user has written data into the write queue when doing
+ * disconnecting, so we have to send an RST.
+ */
+ SK_RST_REASON_TCP_DISCONNECT_WITH_DATA,
/* Copy from include/uapi/linux/mptcp.h.
* These reset fields will not be changed since they adhere to
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index a2310fa995f6..84e6b9fd5610 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -28,7 +28,7 @@
#define __net_sctp_h__
/* Header Strategy.
- * Start getting some control over the header file depencies:
+ * Start getting some control over the header file dependencies:
* includes
* constants
* structs
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index f24a1bbcb3ef..31248cfdfb23 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -521,7 +521,7 @@ struct sctp_datamsg {
refcount_t refcnt;
/* When is this message no longer interesting to the peer? */
unsigned long expires_at;
- /* Did the messenge fail to send? */
+ /* Did the message fail to send? */
int send_error;
u8 send_failed:1,
can_delay:1, /* should this message be Nagle delayed */
@@ -792,7 +792,7 @@ struct sctp_transport {
*/
hb_sent:1,
- /* Is the Path MTU update pending on this tranport */
+ /* Is the Path MTU update pending on this transport */
pmtu_pending:1,
dst_pending_confirm:1, /* need to confirm neighbour */
@@ -1223,7 +1223,7 @@ enum sctp_endpoint_type {
};
/*
- * A common base class to bridge the implmentation view of a
+ * A common base class to bridge the implementation view of a
* socket (usually listening) endpoint versus an association's
* local endpoint.
* This common structure is useful for several purposes:
@@ -1353,7 +1353,7 @@ struct sctp_endpoint {
struct rcu_head rcu;
};
-/* Recover the outter endpoint structure. */
+/* Recover the outer endpoint structure. */
static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
{
struct sctp_endpoint *ep;
@@ -1906,7 +1906,7 @@ struct sctp_association {
__u32 rwnd_over;
/* Keeps treack of rwnd pressure. This happens when we have
- * a window, but not recevie buffer (i.e small packets). This one
+ * a window, but not receive buffer (i.e small packets). This one
* is releases slowly (1 PMTU at a time ).
*/
__u32 rwnd_press;
@@ -1994,7 +1994,7 @@ struct sctp_association {
/* ADDIP Section 5.2 Upon reception of an ASCONF Chunk.
*
- * This is needed to implement itmes E1 - E4 of the updated
+ * This is needed to implement items E1 - E4 of the updated
* spec. Here is the justification:
*
* Since the peer may bundle multiple ASCONF chunks toward us,
@@ -2005,7 +2005,7 @@ struct sctp_association {
/* These ASCONF chunks are waiting to be sent.
*
- * These chunaks can't be pushed to outqueue until receiving
+ * These chunks can't be pushed to outqueue until receiving
* ASCONF_ACK for the previous ASCONF indicated by
* addip_last_asconf, so as to guarantee that only one ASCONF
* is in flight at any time.
@@ -2059,13 +2059,13 @@ struct sctp_association {
struct sctp_transport *new_transport;
/* SCTP AUTH: list of the endpoint shared keys. These
- * keys are provided out of band by the user applicaton
+ * keys are provided out of band by the user application
* and can't change during the lifetime of the association
*/
struct list_head endpoint_shared_keys;
/* SCTP AUTH:
- * The current generated assocaition shared key (secret)
+ * The current generated association shared key (secret)
*/
struct sctp_auth_bytes *asoc_shared_key;
struct sctp_shared_key *shkey;
@@ -2121,7 +2121,7 @@ enum {
SCTP_ASSOC_EYECATCHER = 0xa550c123,
};
-/* Recover the outter association structure. */
+/* Recover the outer association structure. */
static inline struct sctp_association *sctp_assoc(struct sctp_ep_common *base)
{
struct sctp_association *asoc;
diff --git a/include/net/sock.h b/include/net/sock.h
index cce23ac4d514..c58ca8dd561b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -337,6 +337,7 @@ struct sk_filter;
* @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
* @ns_tracker: tracker for netns reference
+ * @sk_user_frags: xarray of pages the user is holding a reference on.
*/
struct sock {
/*
@@ -542,6 +543,7 @@ struct sock {
#endif
struct rcu_head sk_rcu;
netns_tracker ns_tracker;
+ struct xarray sk_user_frags;
};
struct sock_bh_locked {
@@ -1624,7 +1626,7 @@ bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
* lock_sock_fast - fast version of lock_sock
* @sk: socket
*
- * This version should be used for very small section, where process wont block
+ * This version should be used for very small section, where process won't block
* return false if fast path is taken:
*
* sk_lock.slock locked, owned = 0, BH disabled
@@ -2546,7 +2548,7 @@ struct sock_skb_cb {
/* Store sock_skb_cb at the end of skb->cb[] so protocol families
* using skb->cb[] would keep using it directly and utilize its
- * alignement guarantee.
+ * alignment guarantee.
*/
#define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \
sizeof(struct sock_skb_cb)))
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 6ec140b0a61b..6e4faf3ee76f 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -26,7 +26,7 @@ struct sock_reuseport {
unsigned int bind_inany:1;
unsigned int has_conns:1;
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
- struct sock *socks[]; /* array of sock pointers */
+ struct sock *socks[] __counted_by(max_socks);
};
extern int reuseport_alloc(struct sock *sk, bool bind_inany);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 2aac11e7e1cc..f77f812bfbe7 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1069,7 +1069,8 @@ static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
/* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
return likely(tcp_skb_can_collapse_to(to) &&
mptcp_skb_can_collapse(to, from) &&
- skb_pure_zcopy_same(to, from));
+ skb_pure_zcopy_same(to, from) &&
+ skb_frags_readable(to) == skb_frags_readable(from));
}
static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
diff --git a/include/net/udp.h b/include/net/udp.h
index c4e05b14b648..61222545ab1c 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -79,7 +79,8 @@ struct udp_table {
extern struct udp_table udp_table;
void udp_table_init(struct udp_table *, const char *);
static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
- struct net *net, unsigned int num)
+ const struct net *net,
+ unsigned int num)
{
return &table->hash[udp_hashfn(net, num, table->mask)];
}
@@ -231,7 +232,7 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
}
/* Since this is being sent on the wire obfuscate hash a bit
- * to minimize possbility that any useful information to an
+ * to minimize possibility that any useful information to an
* attacker is leaked. Only upper 16 bits are relevant in the
* computation for 16 bit port value.
*/
@@ -245,7 +246,7 @@ static inline int udp_rqueue_get(struct sock *sk)
return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
}
-static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+static inline bool udp_sk_bound_dev_eq(const struct net *net, int bound_dev_if,
int dif, int sdif)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
@@ -296,18 +297,19 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen,
int (*push_pending_frames)(struct sock *));
-struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+struct sock *udp4_lib_lookup(const struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif);
-struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+struct sock *__udp4_lib_lookup(const struct net *net, __be32 saddr,
+ __be16 sport,
__be32 daddr, __be16 dport, int dif, int sdif,
struct udp_table *tbl, struct sk_buff *skb);
struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport);
-struct sock *udp6_lib_lookup(struct net *net,
+struct sock *udp6_lib_lookup(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif);
-struct sock *__udp6_lib_lookup(struct net *net,
+struct sock *__udp6_lib_lookup(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *tbl,
diff --git a/include/net/x25.h b/include/net/x25.h
index 597eb53c471e..5e833cfc864e 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -81,7 +81,7 @@ enum {
#define X25_DEFAULT_WINDOW_SIZE 2 /* Default Window Size */
#define X25_DEFAULT_PACKET_SIZE X25_PS128 /* Default Packet Size */
-#define X25_DEFAULT_THROUGHPUT 0x0A /* Deafult Throughput */
+#define X25_DEFAULT_THROUGHPUT 0x0A /* Default Throughput */
#define X25_DEFAULT_REVERSE 0x00 /* Default Reverse Charging */
#define X25_SMODULUS 8
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 54cef89f6c1e..b6bfdc6416c7 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -67,27 +67,27 @@
- instance of a transformer, struct xfrm_state (=SA)
- template to clone xfrm_state, struct xfrm_tmpl
- SPD is plain linear list of xfrm_policy rules, ordered by priority.
+ SPD is organized as hash table (for policies that meet minimum address prefix
+ length setting, net->xfrm.policy_hthresh). Other policies are stored in
+ lists, sorted into rbtree ordered by destination and source address networks.
+ See net/xfrm/xfrm_policy.c for details.
+
(To be compatible with existing pfkeyv2 implementations,
many rules with priority of 0x7fffffff are allowed to exist and
such rules are ordered in an unpredictable way, thanks to bsd folks.)
- Lookup is plain linear search until the first match with selector.
-
If "action" is "block", then we prohibit the flow, otherwise:
if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
policy entry has list of up to XFRM_MAX_DEPTH transformations,
described by templates xfrm_tmpl. Each template is resolved
to a complete xfrm_state (see below) and we pack bundle of transformations
- to a dst_entry returned to requestor.
+ to a dst_entry returned to requester.
dst -. xfrm .-> xfrm_state #1
|---. child .-> dst -. xfrm .-> xfrm_state #2
|---. child .-> dst -. xfrm .-> xfrm_state #3
|---. child .-> NULL
- Bundles are cached at xrfm_policy struct (field ->bundles).
-
Resolution of xrfm_tmpl
-----------------------
@@ -526,6 +526,36 @@ struct xfrm_policy_queue {
unsigned long timeout;
};
+/**
+ * struct xfrm_policy - xfrm policy
+ * @xp_net: network namespace the policy lives in
+ * @bydst: hlist node for SPD hash table or rbtree list
+ * @byidx: hlist node for index hash table
+ * @lock: serialize changes to policy structure members
+ * @refcnt: reference count, freed once it reaches 0
+ * @pos: kernel internal tie-breaker to determine age of policy
+ * @timer: timer
+ * @genid: generation, used to invalidate old policies
+ * @priority: priority, set by userspace
+ * @index: policy index (autogenerated)
+ * @if_id: virtual xfrm interface id
+ * @mark: packet mark
+ * @selector: selector
+ * @lft: liftime configuration data
+ * @curlft: liftime state
+ * @walk: list head on pernet policy list
+ * @polq: queue to hold packets while aqcuire operaion in progress
+ * @bydst_reinsert: policy tree node needs to be merged
+ * @type: XFRM_POLICY_TYPE_MAIN or _SUB
+ * @action: XFRM_POLICY_ALLOW or _BLOCK
+ * @flags: XFRM_POLICY_LOCALOK, XFRM_POLICY_ICMP
+ * @xfrm_nr: number of used templates in @xfrm_vec
+ * @family: protocol family
+ * @security: SELinux security label
+ * @xfrm_vec: array of templates to resolve state
+ * @rcu: rcu head, used to defer memory release
+ * @xdo: hardware offload state
+ */
struct xfrm_policy {
possible_net_t xp_net;
struct hlist_node bydst;
@@ -555,7 +585,6 @@ struct xfrm_policy {
u16 family;
struct xfrm_sec_ctx *security;
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
- struct hlist_node bydst_inexact_list;
struct rcu_head rcu;
struct xfrm_dev_offload xdo;
@@ -1016,7 +1045,7 @@ void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
struct xfrm_if_parms {
int link; /* ifindex of underlying L2 interface */
- u32 if_id; /* interface identifyer */
+ u32 if_id; /* interface identifier */
bool collect_md;
};
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 565a85044541..7dc7b1cc71b5 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -38,6 +38,7 @@ struct ib_umem_dmabuf {
unsigned long last_sg_trim;
void *private;
u8 pinned : 1;
+ u8 revoked : 1;
};
static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
@@ -150,9 +151,15 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
unsigned long offset,
size_t size, int fd,
int access);
+struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access);
int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
+void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf);
#else /* CONFIG_INFINIBAND_USER_MEM */
@@ -196,12 +203,23 @@ ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
{
return ERR_PTR(-EOPNOTSUPP);
}
+
+static inline struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
{
return -EOPNOTSUPP;
}
static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
+static inline void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf) {}
#endif /* CONFIG_INFINIBAND_USER_MEM */
#endif /* IB_UMEM_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 6c5712ae559d..aa8ede439905 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2476,7 +2476,7 @@ struct ib_device_ops {
struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
u64 length, u64 virt_addr, int fd,
int mr_access_flags,
- struct ib_udata *udata);
+ struct uverbs_attr_bundle *attrs);
struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
u64 length, u64 virt_addr,
int mr_access_flags, struct ib_pd *pd,
@@ -4453,6 +4453,8 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
const struct sockaddr *addr);
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
unsigned int port);
+struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
+ u32 port);
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index c2a79aeee113..326deaf56d5d 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -6,6 +6,8 @@
#include <linux/netlink.h>
#include <uapi/rdma/rdma_netlink.h>
+struct ib_device;
+
enum {
RDMA_NLDEV_ATTR_EMPTY_STRING = 1,
RDMA_NLDEV_ATTR_ENTRY_STRLEN = 16,
@@ -110,6 +112,16 @@ int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
*/
bool rdma_nl_chk_listeners(unsigned int group);
+/**
+ * Prepare and send an event message
+ * @ib: the IB device which triggered the event
+ * @port_num: the port number which triggered the event - 0 if unused
+ * @type: the event type
+ * Returns 0 on success or a negative error code
+ */
+int rdma_nl_notify_event(struct ib_device *ib, u32 port_num,
+ enum rdma_nl_notify_event_type type);
+
struct rdma_link_ops {
struct list_head list;
const char *type;
diff --git a/include/scsi/fcoe_sysfs.h b/include/scsi/fcoe_sysfs.h
index 4b1216de3f22..2b28a05e492b 100644
--- a/include/scsi/fcoe_sysfs.h
+++ b/include/scsi/fcoe_sysfs.h
@@ -50,9 +50,7 @@ struct fcoe_ctlr_device {
struct fcoe_sysfs_function_template *f;
struct list_head fcfs;
- char work_q_name[20];
struct workqueue_struct *work_q;
- char devloss_work_q_name[20];
struct workqueue_struct *devloss_work_q;
struct mutex lock;
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index 7b196d234626..bd29cdb513a5 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -24,7 +24,6 @@ extern const char *scsi_extd_sense_format(unsigned char, unsigned char,
const char **);
extern const char *scsi_mlreturn_string(int);
extern const char *scsi_hostbyte_string(int);
-extern const char *scsi_driverbyte_string(int);
#else
static inline bool
scsi_opcode_sa_name(int cmd, int sa,
@@ -76,12 +75,6 @@ scsi_hostbyte_string(int result)
return NULL;
}
-static inline const char *
-scsi_driverbyte_string(int result)
-{
- return NULL;
-}
-
#endif
#endif /* _SCSI_SCSI_DBG_H */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 19a1c5c48935..2b4ab0369ffb 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -677,7 +677,6 @@ struct Scsi_Host {
/*
* Optional work queue to be utilized by the transport
*/
- char work_q_name[20];
struct workqueue_struct *work_q;
/*
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 4b884b8013e0..8e6c60090c62 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -575,9 +575,7 @@ struct fc_host_attrs {
u16 npiv_vports_inuse;
/* work queues for rport state manipulation */
- char work_q_name[20];
struct workqueue_struct *work_q;
- char devloss_work_q_name[20];
struct workqueue_struct *devloss_work_q;
/* bsg support */
@@ -654,12 +652,8 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->next_vport_number)
#define fc_host_npiv_vports_inuse(x) \
(((struct fc_host_attrs *)(x)->shost_data)->npiv_vports_inuse)
-#define fc_host_work_q_name(x) \
- (((struct fc_host_attrs *)(x)->shost_data)->work_q_name)
#define fc_host_work_q(x) \
(((struct fc_host_attrs *)(x)->shost_data)->work_q)
-#define fc_host_devloss_work_q_name(x) \
- (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q_name)
#define fc_host_devloss_work_q(x) \
(((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q)
#define fc_host_dev_loss_tmo(x) \
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index af793f2a0ec4..8f967d15e479 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -23,6 +23,8 @@
#include <linux/of_address.h>
#include <linux/types.h>
+struct device;
+
#define QE_NUM_OF_SNUM 256 /* There are 256 serial number in QE */
#define QE_NUM_OF_BRGS 16
#define QE_NUM_OF_PORTS 1024
@@ -93,8 +95,12 @@ int cpm_muram_init(void);
#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
s32 cpm_muram_alloc(unsigned long size, unsigned long align);
+s32 devm_cpm_muram_alloc(struct device *dev, unsigned long size,
+ unsigned long align);
void cpm_muram_free(s32 offset);
s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
+s32 devm_cpm_muram_alloc_fixed(struct device *dev, unsigned long offset,
+ unsigned long size);
void __iomem *cpm_muram_addr(unsigned long offset);
unsigned long cpm_muram_offset(const void __iomem *addr);
dma_addr_t cpm_muram_dma(void __iomem *addr);
@@ -106,6 +112,12 @@ static inline s32 cpm_muram_alloc(unsigned long size,
return -ENOSYS;
}
+static inline s32 devm_cpm_muram_alloc(struct device *dev, unsigned long size,
+ unsigned long align)
+{
+ return -ENOSYS;
+}
+
static inline void cpm_muram_free(s32 offset)
{
}
@@ -116,6 +128,13 @@ static inline s32 cpm_muram_alloc_fixed(unsigned long offset,
return -ENOSYS;
}
+static inline s32 devm_cpm_muram_alloc_fixed(struct device *dev,
+ unsigned long offset,
+ unsigned long size)
+{
+ return -ENOSYS;
+}
+
static inline void __iomem *cpm_muram_addr(unsigned long offset)
{
return NULL;
@@ -172,7 +191,6 @@ static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; }
/*
* Pin multiplexing functions.
*/
-struct device;
struct qe_pin;
#ifdef CONFIG_QE_GPIO
extern struct qe_pin *qe_pin_request(struct device *dev, int index);
@@ -233,7 +251,9 @@ static inline int qe_alive_during_sleep(void)
/* we actually use cpm_muram implementation, define this for convenience */
#define qe_muram_init cpm_muram_init
#define qe_muram_alloc cpm_muram_alloc
+#define devm_qe_muram_alloc devm_cpm_muram_alloc
#define qe_muram_alloc_fixed cpm_muram_alloc_fixed
+#define devm_qe_muram_alloc_fixed devm_cpm_muram_alloc_fixed
#define qe_muram_free cpm_muram_free
#define qe_muram_addr cpm_muram_addr
#define qe_muram_offset cpm_muram_offset
@@ -449,6 +469,7 @@ enum comm_dir {
#define QE_QMC_STOP_TX 0x0000000c
#define QE_QMC_STOP_RX 0x0000000d
#define QE_SS7_SU_FIL_RESET 0x0000000e
+#define QE_PUSHSCHED 0x0000000f
/* jonathbr added from here down for 83xx */
#define QE_RESET_BCS 0x0000000a
#define QE_MCC_INIT_TX_RX_16 0x00000003
diff --git a/include/sound/aci.h b/include/sound/aci.h
index 6ebbd4223f12..36a761c9820d 100644
--- a/include/sound/aci.h
+++ b/include/sound/aci.h
@@ -72,6 +72,7 @@
#define ACI_SET_EQ7 0x46 /* ... to Treble */
struct snd_miro_aci {
+ struct snd_card *card;
unsigned long aci_port;
int aci_vendor;
int aci_product;
diff --git a/include/sound/asoundef.h b/include/sound/asoundef.h
index 9fdeac19dadb..09b2c3dffb30 100644
--- a/include/sound/asoundef.h
+++ b/include/sound/asoundef.h
@@ -110,18 +110,22 @@
#define IEC958_AES2_CON_SOURCE_UNSPEC (0<<0) /* unspecified */
#define IEC958_AES2_CON_CHANNEL (15<<4) /* mask - channel number */
#define IEC958_AES2_CON_CHANNEL_UNSPEC (0<<4) /* unspecified */
-#define IEC958_AES3_CON_FS (15<<0) /* mask - sample frequency */
+#define IEC958_AES3_CON_FS ((1<<7) | (15<<0)) /* mask - sample frequency */
#define IEC958_AES3_CON_FS_44100 (0<<0) /* 44.1kHz */
#define IEC958_AES3_CON_FS_NOTID (1<<0) /* non indicated */
#define IEC958_AES3_CON_FS_48000 (2<<0) /* 48kHz */
#define IEC958_AES3_CON_FS_32000 (3<<0) /* 32kHz */
#define IEC958_AES3_CON_FS_22050 (4<<0) /* 22.05kHz */
+#define IEC958_AES3_CON_FS_384000 (5<<0) /* 384kHz */
#define IEC958_AES3_CON_FS_24000 (6<<0) /* 24kHz */
#define IEC958_AES3_CON_FS_88200 (8<<0) /* 88.2kHz */
#define IEC958_AES3_CON_FS_768000 (9<<0) /* 768kHz */
#define IEC958_AES3_CON_FS_96000 (10<<0) /* 96kHz */
#define IEC958_AES3_CON_FS_176400 (12<<0) /* 176.4kHz */
+#define IEC958_AES3_CON_FS_352400 (13<<0) /* 352.4kHz */
#define IEC958_AES3_CON_FS_192000 (14<<0) /* 192kHz */
+#define IEC958_AES3_CON_FS_128000 ((1<<7) | (11<<0)) /* 128kHz */
+#define IEC958_AES3_CON_FS_705600 ((1<<7) | (13<<0)) /* 705.6kHz */
#define IEC958_AES3_CON_CLOCK (3<<4) /* mask - clock accuracy */
#define IEC958_AES3_CON_CLOCK_1000PPM (0<<4) /* 1000 ppm */
#define IEC958_AES3_CON_CLOCK_50PPM (1<<4) /* 50 ppm */
diff --git a/include/sound/control.h b/include/sound/control.h
index c1659036c4a7..e07f6b960641 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -81,7 +81,7 @@ struct snd_kcontrol {
unsigned long private_value;
void *private_data;
void (*private_free)(struct snd_kcontrol *kcontrol);
- struct snd_kcontrol_volatile vd[]; /* volatile data */
+ struct snd_kcontrol_volatile vd[] __counted_by(count); /* volatile data */
};
#define snd_kcontrol(n) list_entry(n, struct snd_kcontrol, list)
@@ -140,9 +140,7 @@ int snd_ctl_remove_id(struct snd_card * card, struct snd_ctl_elem_id *id);
int snd_ctl_rename_id(struct snd_card * card, struct snd_ctl_elem_id *src_id, struct snd_ctl_elem_id *dst_id);
void snd_ctl_rename(struct snd_card *card, struct snd_kcontrol *kctl, const char *name);
int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id, int active);
-struct snd_kcontrol *snd_ctl_find_numid_locked(struct snd_card *card, unsigned int numid);
struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid);
-struct snd_kcontrol *snd_ctl_find_id_locked(struct snd_card *card, const struct snd_ctl_elem_id *id);
struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card, const struct snd_ctl_elem_id *id);
/**
@@ -167,29 +165,6 @@ snd_ctl_find_id_mixer(struct snd_card *card, const char *name)
return snd_ctl_find_id(card, &id);
}
-/**
- * snd_ctl_find_id_mixer_locked - find the control instance with the given name string
- * @card: the card instance
- * @name: the name string
- *
- * Finds the control instance with the given name and
- * @SNDRV_CTL_ELEM_IFACE_MIXER. Other fields are set to zero.
- *
- * This is merely a wrapper to snd_ctl_find_id_locked().
- * The caller must down card->controls_rwsem before calling this function.
- *
- * Return: The pointer of the instance if found, or %NULL if not.
- */
-static inline struct snd_kcontrol *
-snd_ctl_find_id_mixer_locked(struct snd_card *card, const char *name)
-{
- struct snd_ctl_elem_id id = {};
-
- id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
- strscpy(id.name, name, sizeof(id.name));
- return snd_ctl_find_id_locked(card, &id);
-}
-
int snd_ctl_create(struct snd_card *card);
int snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn);
diff --git a/include/sound/core.h b/include/sound/core.h
index dfef0c9d4b9f..1f3f5dccd736 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -99,7 +99,7 @@ struct snd_card {
struct device *ctl_dev; /* control device */
unsigned int last_numid; /* last used numeric ID */
struct rw_semaphore controls_rwsem; /* controls lock (list and values) */
- rwlock_t ctl_files_rwlock; /* ctl_files list lock */
+ rwlock_t controls_rwlock; /* lock for lookup and ctl_files list */
int controls_count; /* count of all controls */
size_t user_ctl_alloc_size; // current memory allocation by user controls.
struct list_head controls; /* all controls for this card */
@@ -345,46 +345,8 @@ void release_and_free_resource(struct resource *res);
/* --- */
-/* sound printk debug levels */
-enum {
- SND_PR_ALWAYS,
- SND_PR_DEBUG,
- SND_PR_VERBOSE,
-};
-
-#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
-__printf(4, 5)
-void __snd_printk(unsigned int level, const char *file, int line,
- const char *format, ...);
-#else
-#define __snd_printk(level, file, line, format, ...) \
- printk(format, ##__VA_ARGS__)
-#endif
-
-/**
- * snd_printk - printk wrapper
- * @fmt: format string
- *
- * Works like printk() but prints the file and the line of the caller
- * when configured with CONFIG_SND_VERBOSE_PRINTK.
- */
-#define snd_printk(fmt, ...) \
- __snd_printk(0, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
-
#ifdef CONFIG_SND_DEBUG
/**
- * snd_printd - debug printk
- * @fmt: format string
- *
- * Works like snd_printk() for debugging purposes.
- * Ignored when CONFIG_SND_DEBUG is not set.
- */
-#define snd_printd(fmt, ...) \
- __snd_printk(1, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
-#define _snd_printd(level, fmt, ...) \
- __snd_printk(level, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
-
-/**
* snd_BUG - give a BUG warning message and stack trace
*
* Calls WARN() if CONFIG_SND_DEBUG is set.
@@ -393,12 +355,6 @@ void __snd_printk(unsigned int level, const char *file, int line,
#define snd_BUG() WARN(1, "BUG?\n")
/**
- * snd_printd_ratelimit - Suppress high rates of output when
- * CONFIG_SND_DEBUG is enabled.
- */
-#define snd_printd_ratelimit() printk_ratelimit()
-
-/**
* snd_BUG_ON - debugging check macro
* @cond: condition to evaluate
*
@@ -409,11 +365,6 @@ void __snd_printk(unsigned int level, const char *file, int line,
#else /* !CONFIG_SND_DEBUG */
-__printf(1, 2)
-static inline void snd_printd(const char *format, ...) {}
-__printf(2, 3)
-static inline void _snd_printd(int level, const char *format, ...) {}
-
#define snd_BUG() do { } while (0)
#define snd_BUG_ON(condition) ({ \
@@ -421,26 +372,8 @@ static inline void _snd_printd(int level, const char *format, ...) {}
unlikely(__ret_warn_on); \
})
-static inline bool snd_printd_ratelimit(void) { return false; }
-
#endif /* CONFIG_SND_DEBUG */
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-/**
- * snd_printdd - debug printk
- * @format: format string
- *
- * Works like snd_printk() for debugging purposes.
- * Ignored when CONFIG_SND_DEBUG_VERBOSE is not set.
- */
-#define snd_printdd(format, ...) \
- __snd_printk(2, __FILE__, __LINE__, format, ##__VA_ARGS__)
-#else
-__printf(1, 2)
-static inline void snd_printdd(const char *format, ...) {}
-#endif
-
-
#define SNDRV_OSS_VERSION ((3<<16)|(8<<8)|(1<<4)|(0)) /* 3.8.1a */
/* for easier backward-porting */
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index a51acefa785f..94e8185c4795 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -282,9 +282,9 @@ static inline bool cs35l56_is_otp_register(unsigned int reg)
return (reg >> 16) == 3;
}
-extern struct regmap_config cs35l56_regmap_i2c;
-extern struct regmap_config cs35l56_regmap_spi;
-extern struct regmap_config cs35l56_regmap_sdw;
+extern const struct regmap_config cs35l56_regmap_i2c;
+extern const struct regmap_config cs35l56_regmap_spi;
+extern const struct regmap_config cs35l56_regmap_sdw;
extern const struct cirrus_amp_cal_controls cs35l56_calibration_controls;
diff --git a/include/sound/es1688.h b/include/sound/es1688.h
index 099569c31fbb..425a3717d77a 100644
--- a/include/sound/es1688.h
+++ b/include/sound/es1688.h
@@ -17,6 +17,7 @@
#define ES1688_HW_UNDEF 0x0003
struct snd_es1688 {
+ struct snd_card *card;
unsigned long port; /* port of ESS chip */
struct resource *res_port;
unsigned long mpu_port; /* MPU-401 port of ESS chip */
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 43d524580bd2..9dd475cf4e8c 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -42,17 +42,12 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */
#define SNDRV_DMA_TYPE_NONCOHERENT 9 /* non-coherent buffer */
#ifdef CONFIG_SND_DMA_SGBUF
-#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_NONCONTIG
+#define SNDRV_DMA_TYPE_DEV_SG 3 /* S/G pages */
#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
#else
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
#endif
-/* fallback types, don't use those directly */
-#ifdef CONFIG_SND_DMA_SGBUF
-#define SNDRV_DMA_TYPE_DEV_SG_FALLBACK 10
-#define SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK 11
-#endif
/*
* info for buffer allocation
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index ac8f3aef9205..0bf7d25434d7 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -123,6 +123,10 @@ struct snd_pcm_ops {
#define SNDRV_PCM_RATE_384000 (1U<<14) /* 384000Hz */
#define SNDRV_PCM_RATE_705600 (1U<<15) /* 705600Hz */
#define SNDRV_PCM_RATE_768000 (1U<<16) /* 768000Hz */
+/* extended rates since 6.12 */
+#define SNDRV_PCM_RATE_12000 (1U<<17) /* 12000Hz */
+#define SNDRV_PCM_RATE_24000 (1U<<18) /* 24000Hz */
+#define SNDRV_PCM_RATE_128000 (1U<<19) /* 128000Hz */
#define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */
#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuous rates */
@@ -498,6 +502,9 @@ struct snd_pcm_substream {
/* misc flags */
unsigned int hw_opened: 1;
unsigned int managed_buffer_alloc:1;
+#ifdef CONFIG_SND_PCM_XRUN_DEBUG
+ unsigned int xrun_counter; /* number of times xrun happens */
+#endif /* CONFIG_SND_PCM_XRUN_DEBUG */
};
#define SUBSTREAM_BUSY(substream) ((substream)->ref_count > 0)
@@ -1355,48 +1362,6 @@ snd_pcm_set_fixed_buffer_all(struct snd_pcm *pcm, int type,
return snd_pcm_set_managed_buffer_all(pcm, type, data, size, 0);
}
-int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
- size_t size, gfp_t gfp_flags);
-int snd_pcm_lib_free_vmalloc_buffer(struct snd_pcm_substream *substream);
-struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream,
- unsigned long offset);
-/**
- * snd_pcm_lib_alloc_vmalloc_buffer - allocate virtual DMA buffer
- * @substream: the substream to allocate the buffer to
- * @size: the requested buffer size, in bytes
- *
- * Allocates the PCM substream buffer using vmalloc(), i.e., the memory is
- * contiguous in kernel virtual space, but not in physical memory. Use this
- * if the buffer is accessed by kernel code but not by device DMA.
- *
- * Return: 1 if the buffer was changed, 0 if not changed, or a negative error
- * code.
- */
-static inline int snd_pcm_lib_alloc_vmalloc_buffer
- (struct snd_pcm_substream *substream, size_t size)
-{
- return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
-}
-
-/**
- * snd_pcm_lib_alloc_vmalloc_32_buffer - allocate 32-bit-addressable buffer
- * @substream: the substream to allocate the buffer to
- * @size: the requested buffer size, in bytes
- *
- * This function works like snd_pcm_lib_alloc_vmalloc_buffer(), but uses
- * vmalloc_32(), i.e., the pages are allocated from 32-bit-addressable memory.
- *
- * Return: 1 if the buffer was changed, 0 if not changed, or a negative error
- * code.
- */
-static inline int snd_pcm_lib_alloc_vmalloc_32_buffer
- (struct snd_pcm_substream *substream, size_t size)
-{
- return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size,
- GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
-}
-
#define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p)
/**
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index c8621671fa70..00c32eed2124 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -86,10 +86,6 @@ static inline size_t snd_seq_event_packet_size(struct snd_seq_event *ev)
/* interface for OSS emulation */
int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo);
-/* port callback routines */
-void snd_port_init_callback(struct snd_seq_port_callback *p);
-struct snd_seq_port_callback *snd_port_alloc_callback(void);
-
/* port attach/detach */
int snd_seq_event_port_attach(int client, struct snd_seq_port_callback *pcbp,
int cap, int type, int midi_channels, int midi_voices, char *portname);
diff --git a/include/sound/snd_wavefront.h b/include/sound/snd_wavefront.h
index 55053557c898..27f7e8a477c2 100644
--- a/include/sound/snd_wavefront.h
+++ b/include/sound/snd_wavefront.h
@@ -137,8 +137,4 @@ extern int snd_wavefront_fx_ioctl (struct snd_hwdep *,
extern int snd_wavefront_fx_open (struct snd_hwdep *, struct file *);
extern int snd_wavefront_fx_release (struct snd_hwdep *, struct file *);
-/* prefix in all snd_printk() delivered messages */
-
-#define LOGNAME "WaveFront: "
-
#endif /* __SOUND_SND_WAVEFRONT_H__ */
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 4843b57798f6..daed7123df9d 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -33,6 +33,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_sdw_machines[];
@@ -44,6 +45,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_sdw_machines[];
/*
* generic table used for HDA codec-based platforms, possibly with
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 38ccec4e3fcd..60d3b86a4660 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -62,7 +62,6 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
* @platform: string used for HDAudio codec support
* @codec_mask: used for HDAudio support
* @dmic_num: number of SoC- or chipset-attached PDM digital microphones
- * @common_hdmi_codec_drv: use commom HDAudio HDMI codec driver
* @link_mask: SoundWire links enabled on the board
* @links: array of SoundWire link _ADR descriptors, null terminated
* @i2s_link_mask: I2S/TDM links enabled on the board
@@ -70,15 +69,16 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
* @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode
* @subsystem_vendor: optional PCI SSID vendor value
* @subsystem_device: optional PCI SSID device value
+ * @subsystem_rev: optional PCI SSID revision value
* @subsystem_id_set: true if a value has been written to
* subsystem_vendor and subsystem_device.
+ * @bt_link_mask: BT offload link enabled on the board
*/
struct snd_soc_acpi_mach_params {
u32 acpi_ipc_irq_index;
const char *platform;
u32 codec_mask;
u32 dmic_num;
- bool common_hdmi_codec_drv;
u32 link_mask;
const struct snd_soc_acpi_link_adr *links;
u32 i2s_link_mask;
@@ -86,7 +86,9 @@ struct snd_soc_acpi_mach_params {
struct snd_soc_dai_driver *dai_drivers;
unsigned short subsystem_vendor;
unsigned short subsystem_device;
+ unsigned short subsystem_rev;
bool subsystem_id_set;
+ u32 bt_link_mask;
};
/**
diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
index 1f4c39922d82..ecc02e955279 100644
--- a/include/sound/soc-card.h
+++ b/include/sound/soc-card.h
@@ -30,8 +30,6 @@ static inline void snd_soc_card_mutex_unlock(struct snd_soc_card *card)
struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
const char *name);
-struct snd_kcontrol *snd_soc_card_get_kcontrol_locked(struct snd_soc_card *soc_card,
- const char *name);
int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
struct snd_soc_jack *jack);
int snd_soc_card_jack_new_pins(struct snd_soc_card *card, const char *id,
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index bf2e381cd124..61534ac0edd1 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -464,9 +464,6 @@ int snd_soc_component_force_enable_pin_unlocked(
/* component controls */
struct snd_kcontrol *snd_soc_component_get_kcontrol(struct snd_soc_component *component,
const char * const ctl);
-struct snd_kcontrol *
-snd_soc_component_get_kcontrol_locked(struct snd_soc_component *component,
- const char * const ctl);
int snd_soc_component_notify_control(struct snd_soc_component *component,
const char * const ctl);
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index bbb72ad4c951..0d1b215f24f4 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -219,7 +219,6 @@ void snd_soc_dai_resume(struct snd_soc_dai *dai);
int snd_soc_dai_compress_new(struct snd_soc_dai *dai,
struct snd_soc_pcm_runtime *rtd, int num);
bool snd_soc_dai_stream_valid(const struct snd_soc_dai *dai, int stream);
-void snd_soc_dai_link_set_capabilities(struct snd_soc_dai_link *dai_link);
void snd_soc_dai_action(struct snd_soc_dai *dai,
int stream, int action);
static inline void snd_soc_dai_activate(struct snd_soc_dai *dai,
@@ -240,8 +239,6 @@ int snd_soc_pcm_dai_new(struct snd_soc_pcm_runtime *rtd);
int snd_soc_pcm_dai_prepare(struct snd_pcm_substream *substream);
int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream, int cmd,
int rollback);
-int snd_soc_pcm_dai_bespoke_trigger(struct snd_pcm_substream *substream,
- int cmd);
void snd_soc_pcm_dai_delay(struct snd_pcm_substream *substream,
snd_pcm_sframes_t *cpu_delay, snd_pcm_sframes_t *codec_delay);
@@ -345,8 +342,7 @@ struct snd_soc_dai_ops {
*/
int (*trigger)(struct snd_pcm_substream *, int,
struct snd_soc_dai *);
- int (*bespoke_trigger)(struct snd_pcm_substream *, int,
- struct snd_soc_dai *);
+
/*
* For hardware based FIFO caused delay reporting.
* Optional.
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index ebd24753dd00..c6fb350b4b06 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -58,7 +58,6 @@ enum snd_soc_dpcm_state {
enum snd_soc_dpcm_trigger {
SND_SOC_DPCM_TRIGGER_PRE = 0,
SND_SOC_DPCM_TRIGGER_POST,
- SND_SOC_DPCM_TRIGGER_BESPOKE,
};
/*
@@ -114,24 +113,6 @@ struct snd_soc_dpcm_runtime {
#define for_each_dpcm_be_rollback(fe, stream, _dpcm) \
list_for_each_entry_continue_reverse(_dpcm, &(fe)->dpcm[stream].be_clients, list_be)
-/* can this BE stop and free */
-int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
- struct snd_soc_pcm_runtime *be, int stream);
-
-/* can this BE perform a hw_params() */
-int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
- struct snd_soc_pcm_runtime *be, int stream);
-
-/* can this BE perform prepare */
-int snd_soc_dpcm_can_be_prepared(struct snd_soc_pcm_runtime *fe,
- struct snd_soc_pcm_runtime *be, int stream);
-
-/* is the current PCM operation for this FE ? */
-int snd_soc_dpcm_fe_can_update(struct snd_soc_pcm_runtime *fe, int stream);
-
-/* is the current PCM operation for this BE ? */
-int snd_soc_dpcm_be_can_update(struct snd_soc_pcm_runtime *fe,
- struct snd_soc_pcm_runtime *be, int stream);
/* get the substream for this BE */
struct snd_pcm_substream *
diff --git a/include/sound/soc.h b/include/sound/soc.h
index a8e66bbf932b..e6e359c1a2ac 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -815,6 +815,7 @@ struct snd_soc_dai_link {
/* This DAI link can route to other DAI links at runtime (Frontend)*/
unsigned int dynamic:1;
+ /* REMOVE ME */
/* DPCM capture and Playback support */
unsigned int dpcm_capture:1;
unsigned int dpcm_playback:1;
@@ -1206,11 +1207,11 @@ struct snd_soc_pcm_runtime {
/* bit field */
unsigned int pop_wait:1;
unsigned int fe_compr:1; /* for Dynamic PCM */
+ unsigned int initialized:1;
- bool initialized;
-
+ /* CPU/Codec/Platform */
int num_components;
- struct snd_soc_component *components[]; /* CPU/Codec/Platform */
+ struct snd_soc_component *components[] __counted_by(num_components);
};
/* see soc_new_pcm_runtime() */
diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h
new file mode 100644
index 000000000000..f68c1f193b3b
--- /dev/null
+++ b/include/sound/soc_sdw_utils.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This file incorporates work covered by the following copyright notice:
+ * Copyright (c) 2020 Intel Corporation
+ * Copyright(c) 2024 Advanced Micro Devices, Inc.
+ *
+ */
+
+#ifndef SOC_SDW_UTILS_H
+#define SOC_SDW_UTILS_H
+
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+
+#define SOC_SDW_MAX_DAI_NUM 8
+#define SOC_SDW_MAX_NO_PROPS 2
+#define SOC_SDW_JACK_JDSRC(quirk) ((quirk) & GENMASK(3, 0))
+
+/* If a CODEC has an optional speaker output, this quirk will enable it */
+#define SOC_SDW_CODEC_SPKR BIT(15)
+/*
+ * If the CODEC has additional devices attached directly to it.
+ *
+ * For the cs42l43:
+ * - 0 - No speaker output
+ * - SOC_SDW_CODEC_SPKR - CODEC internal speaker
+ * - SOC_SDW_SIDECAR_AMPS - 2x Sidecar amplifiers + CODEC internal speaker
+ * - SOC_SDW_CODEC_SPKR | SOF_SIDECAR_AMPS - Not currently supported
+ */
+#define SOC_SDW_SIDECAR_AMPS BIT(16)
+
+#define SOC_SDW_UNUSED_DAI_ID -1
+#define SOC_SDW_JACK_OUT_DAI_ID 0
+#define SOC_SDW_JACK_IN_DAI_ID 1
+#define SOC_SDW_AMP_OUT_DAI_ID 2
+#define SOC_SDW_AMP_IN_DAI_ID 3
+#define SOC_SDW_DMIC_DAI_ID 4
+
+#define SOC_SDW_DAI_TYPE_JACK 0
+#define SOC_SDW_DAI_TYPE_AMP 1
+#define SOC_SDW_DAI_TYPE_MIC 2
+
+struct asoc_sdw_codec_info;
+
+struct asoc_sdw_dai_info {
+ const bool direction[2]; /* playback & capture support */
+ const char *dai_name;
+ const int dai_type;
+ const int dailink[2]; /* dailink id for each direction */
+ const struct snd_kcontrol_new *controls;
+ const int num_controls;
+ const struct snd_soc_dapm_widget *widgets;
+ const int num_widgets;
+ int (*init)(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+ int (*exit)(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link);
+ int (*rtd_init)(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+ bool rtd_init_done; /* Indicate that the rtd_init callback is done */
+ unsigned long quirk;
+};
+
+struct asoc_sdw_codec_info {
+ const int part_id;
+ const int version_id;
+ const char *codec_name;
+ int amp_num;
+ const u8 acpi_id[ACPI_ID_LEN];
+ const bool ignore_internal_dmic;
+ const struct snd_soc_ops *ops;
+ struct asoc_sdw_dai_info dais[SOC_SDW_MAX_DAI_NUM];
+ const int dai_num;
+
+ int (*codec_card_late_probe)(struct snd_soc_card *card);
+
+ int (*count_sidecar)(struct snd_soc_card *card,
+ int *num_dais, int *num_devs);
+ int (*add_sidecar)(struct snd_soc_card *card,
+ struct snd_soc_dai_link **dai_links,
+ struct snd_soc_codec_conf **codec_conf);
+};
+
+struct asoc_sdw_mc_private {
+ struct snd_soc_card card;
+ struct snd_soc_jack sdw_headset;
+ struct device *headset_codec_dev; /* only one headset per card */
+ struct device *amp_dev1, *amp_dev2;
+ bool append_dai_type;
+ bool ignore_internal_dmic;
+ void *private;
+ unsigned long mc_quirk;
+ int codec_info_list_count;
+};
+
+struct asoc_sdw_endpoint {
+ struct list_head list;
+
+ u32 link_mask;
+ const char *codec_name;
+ const char *name_prefix;
+ bool include_sidecar;
+
+ struct asoc_sdw_codec_info *codec_info;
+ const struct asoc_sdw_dai_info *dai_info;
+};
+
+struct asoc_sdw_dailink {
+ bool initialised;
+
+ u8 group_id;
+ u32 link_mask[SNDRV_PCM_STREAM_LAST + 1];
+ int num_devs[SNDRV_PCM_STREAM_LAST + 1];
+ struct list_head endpoints;
+};
+
+extern struct asoc_sdw_codec_info codec_info_list[];
+int asoc_sdw_get_codec_info_list_count(void);
+
+int asoc_sdw_startup(struct snd_pcm_substream *substream);
+int asoc_sdw_prepare(struct snd_pcm_substream *substream);
+int asoc_sdw_prepare(struct snd_pcm_substream *substream);
+int asoc_sdw_trigger(struct snd_pcm_substream *substream, int cmd);
+int asoc_sdw_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+int asoc_sdw_hw_free(struct snd_pcm_substream *substream);
+void asoc_sdw_shutdown(struct snd_pcm_substream *substream);
+
+const char *asoc_sdw_get_codec_name(struct device *dev,
+ const struct asoc_sdw_codec_info *codec_info,
+ const struct snd_soc_acpi_link_adr *adr_link,
+ int adr_index);
+
+struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_part(const u64 adr);
+
+struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_acpi(const u8 *acpi_id);
+
+struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_dai(const char *dai_name,
+ int *dai_index);
+
+struct snd_soc_dai_link *asoc_sdw_mc_find_codec_dai_used(struct snd_soc_card *card,
+ const char *dai_name);
+
+void asoc_sdw_mc_dailink_exit_loop(struct snd_soc_card *card);
+
+int asoc_sdw_card_late_probe(struct snd_soc_card *card);
+
+void asoc_sdw_init_dai_link(struct device *dev, struct snd_soc_dai_link *dai_links,
+ int *be_id, char *name, int playback, int capture,
+ struct snd_soc_dai_link_component *cpus, int cpus_num,
+ struct snd_soc_dai_link_component *platform_component,
+ int num_platforms, struct snd_soc_dai_link_component *codecs,
+ int codecs_num, int (*init)(struct snd_soc_pcm_runtime *rtd),
+ const struct snd_soc_ops *ops);
+
+int asoc_sdw_init_simple_dai_link(struct device *dev, struct snd_soc_dai_link *dai_links,
+ int *be_id, char *name, int playback, int capture,
+ const char *cpu_dai_name, const char *platform_comp_name,
+ int num_platforms, const char *codec_name,
+ const char *codec_dai_name,
+ int (*init)(struct snd_soc_pcm_runtime *rtd),
+ const struct snd_soc_ops *ops);
+
+int asoc_sdw_count_sdw_endpoints(struct snd_soc_card *card, int *num_devs, int *num_ends);
+
+struct asoc_sdw_dailink *asoc_sdw_find_dailink(struct asoc_sdw_dailink *dailinks,
+ const struct snd_soc_acpi_endpoint *new);
+
+int asoc_sdw_parse_sdw_endpoints(struct snd_soc_card *card,
+ struct asoc_sdw_dailink *soc_dais,
+ struct asoc_sdw_endpoint *soc_ends,
+ int *num_devs);
+
+int asoc_sdw_rtd_init(struct snd_soc_pcm_runtime *rtd);
+
+/* DMIC support */
+int asoc_sdw_dmic_init(struct snd_soc_pcm_runtime *rtd);
+
+/* RT711 support */
+int asoc_sdw_rt711_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+int asoc_sdw_rt711_exit(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link);
+
+/* RT711-SDCA support */
+int asoc_sdw_rt_sdca_jack_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+int asoc_sdw_rt_sdca_jack_exit(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link);
+
+/* RT1308 I2S support */
+extern const struct snd_soc_ops soc_sdw_rt1308_i2s_ops;
+
+/* generic amp support */
+int asoc_sdw_rt_amp_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+int asoc_sdw_rt_amp_exit(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link);
+
+/* CS42L43 support */
+int asoc_sdw_cs42l43_spk_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+
+/* CS AMP support */
+int asoc_sdw_bridge_cs35l56_count_sidecar(struct snd_soc_card *card,
+ int *num_dais, int *num_devs);
+int asoc_sdw_bridge_cs35l56_add_sidecar(struct snd_soc_card *card,
+ struct snd_soc_dai_link **dai_links,
+ struct snd_soc_codec_conf **codec_conf);
+int asoc_sdw_bridge_cs35l56_spk_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+
+int asoc_sdw_cs_amp_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+
+/* MAXIM codec support */
+int asoc_sdw_maxim_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+
+/* dai_link init callbacks */
+int asoc_sdw_rt_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt_sdca_jack_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt_amp_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt700_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt711_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt712_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt722_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt5682_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l42_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l43_hs_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l43_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l43_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_maxim_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+
+#endif
diff --git a/include/sound/soundfont.h b/include/sound/soundfont.h
index 98ed98d89d6d..8a40cc15f66d 100644
--- a/include/sound/soundfont.h
+++ b/include/sound/soundfont.h
@@ -86,9 +86,11 @@ struct snd_sf_list {
};
/* Prototypes for soundfont.c */
-int snd_soundfont_load(struct snd_sf_list *sflist, const void __user *data,
+int snd_soundfont_load(struct snd_card *card,
+ struct snd_sf_list *sflist, const void __user *data,
long count, int client);
-int snd_soundfont_load_guspatch(struct snd_sf_list *sflist, const char __user *data,
+int snd_soundfont_load_guspatch(struct snd_card *card,
+ struct snd_sf_list *sflist, const char __user *data,
long count);
int snd_soundfont_close_check(struct snd_sf_list *sflist, int client);
diff --git a/include/sound/tas2563-tlv.h b/include/sound/tas2563-tlv.h
new file mode 100644
index 000000000000..bb269b21f460
--- /dev/null
+++ b/include/sound/tas2563-tlv.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2563 Audio Smart Amplifier
+//
+// Copyright (C) 2022 - 2024 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2563 driver implements a flexible and configurable
+// algo coefficient setting for one, two, or even multiple
+// TAS2563 chips.
+//
+// Author: Shenghao Ding <shenghao-ding@ti.com>
+//
+
+#ifndef __TAS2563_TLV_H__
+#define __TAS2563_TLV_H__
+
+static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2563_dvc_tlv, -12150, 50, 1);
+
+/* pow(10, db/20) * pow(2,30) */
+static const __maybe_unused unsigned char tas2563_dvc_table[][4] = {
+ { 0X00, 0X00, 0X00, 0X00 }, /* -121.5db */
+ { 0X00, 0X00, 0X03, 0XBC }, /* -121.0db */
+ { 0X00, 0X00, 0X03, 0XF5 }, /* -120.5db */
+ { 0X00, 0X00, 0X04, 0X31 }, /* -120.0db */
+ { 0X00, 0X00, 0X04, 0X71 }, /* -119.5db */
+ { 0X00, 0X00, 0X04, 0XB4 }, /* -119.0db */
+ { 0X00, 0X00, 0X04, 0XFC }, /* -118.5db */
+ { 0X00, 0X00, 0X05, 0X47 }, /* -118.0db */
+ { 0X00, 0X00, 0X05, 0X97 }, /* -117.5db */
+ { 0X00, 0X00, 0X05, 0XEC }, /* -117.0db */
+ { 0X00, 0X00, 0X06, 0X46 }, /* -116.5db */
+ { 0X00, 0X00, 0X06, 0XA5 }, /* -116.0db */
+ { 0X00, 0X00, 0X07, 0X0A }, /* -115.5db */
+ { 0X00, 0X00, 0X07, 0X75 }, /* -115.0db */
+ { 0X00, 0X00, 0X07, 0XE6 }, /* -114.5db */
+ { 0X00, 0X00, 0X08, 0X5E }, /* -114.0db */
+ { 0X00, 0X00, 0X08, 0XDD }, /* -113.5db */
+ { 0X00, 0X00, 0X09, 0X63 }, /* -113.0db */
+ { 0X00, 0X00, 0X09, 0XF2 }, /* -112.5db */
+ { 0X00, 0X00, 0X0A, 0X89 }, /* -112.0db */
+ { 0X00, 0X00, 0X0B, 0X28 }, /* -111.5db */
+ { 0X00, 0X00, 0X0B, 0XD2 }, /* -111.0db */
+ { 0X00, 0X00, 0X0C, 0X85 }, /* -110.5db */
+ { 0X00, 0X00, 0X0D, 0X43 }, /* -110.0db */
+ { 0X00, 0X00, 0X0E, 0X0C }, /* -109.5db */
+ { 0X00, 0X00, 0X0E, 0XE1 }, /* -109.0db */
+ { 0X00, 0X00, 0X0F, 0XC3 }, /* -108.5db */
+ { 0X00, 0X00, 0X10, 0XB2 }, /* -108.0db */
+ { 0X00, 0X00, 0X11, 0XAF }, /* -107.5db */
+ { 0X00, 0X00, 0X12, 0XBC }, /* -107.0db */
+ { 0X00, 0X00, 0X13, 0XD8 }, /* -106.5db */
+ { 0X00, 0X00, 0X15, 0X05 }, /* -106.0db */
+ { 0X00, 0X00, 0X16, 0X44 }, /* -105.5db */
+ { 0X00, 0X00, 0X17, 0X96 }, /* -105.0db */
+ { 0X00, 0X00, 0X18, 0XFB }, /* -104.5db */
+ { 0X00, 0X00, 0X1A, 0X76 }, /* -104.0db */
+ { 0X00, 0X00, 0X1C, 0X08 }, /* -103.5db */
+ { 0X00, 0X00, 0X1D, 0XB1 }, /* -103.0db */
+ { 0X00, 0X00, 0X1F, 0X73 }, /* -102.5db */
+ { 0X00, 0X00, 0X21, 0X51 }, /* -102.0db */
+ { 0X00, 0X00, 0X23, 0X4A }, /* -101.5db */
+ { 0X00, 0X00, 0X25, 0X61 }, /* -101.0db */
+ { 0X00, 0X00, 0X27, 0X98 }, /* -100.5db */
+ { 0X00, 0X00, 0X29, 0XF1 }, /* -100.0db */
+ { 0X00, 0X00, 0X2C, 0X6D }, /* -99.5db */
+ { 0X00, 0X00, 0X2F, 0X0F }, /* -99.0db */
+ { 0X00, 0X00, 0X31, 0XD9 }, /* -98.5db */
+ { 0X00, 0X00, 0X34, 0XCD }, /* -98.0db */
+ { 0X00, 0X00, 0X37, 0XEE }, /* -97.5db */
+ { 0X00, 0X00, 0X3B, 0X3F }, /* -97.0db */
+ { 0X00, 0X00, 0X3E, 0XC1 }, /* -96.5db */
+ { 0X00, 0X00, 0X42, 0X79 }, /* -96.0db */
+ { 0X00, 0X00, 0X46, 0X6A }, /* -95.5db */
+ { 0X00, 0X00, 0X4A, 0X96 }, /* -95.0db */
+ { 0X00, 0X00, 0X4F, 0X01 }, /* -94.5db */
+ { 0X00, 0X00, 0X53, 0XAF }, /* -94.0db */
+ { 0X00, 0X00, 0X58, 0XA5 }, /* -93.5db */
+ { 0X00, 0X00, 0X5D, 0XE6 }, /* -93.0db */
+ { 0X00, 0X00, 0X63, 0X76 }, /* -92.5db */
+ { 0X00, 0X00, 0X69, 0X5B }, /* -92.0db */
+ { 0X00, 0X00, 0X6F, 0X99 }, /* -91.5db */
+ { 0X00, 0X00, 0X76, 0X36 }, /* -91.0db */
+ { 0X00, 0X00, 0X7D, 0X37 }, /* -90.5db */
+ { 0X00, 0X00, 0X84, 0XA2 }, /* -90.0db */
+ { 0X00, 0X00, 0X8C, 0X7E }, /* -89.5db */
+ { 0X00, 0X00, 0X94, 0XD1 }, /* -89.0db */
+ { 0X00, 0X00, 0X9D, 0XA3 }, /* -88.5db */
+ { 0X00, 0X00, 0XA6, 0XFA }, /* -88.0db */
+ { 0X00, 0X00, 0XB0, 0XDF }, /* -87.5db */
+ { 0X00, 0X00, 0XBB, 0X5A }, /* -87.0db */
+ { 0X00, 0X00, 0XC6, 0X74 }, /* -86.5db */
+ { 0X00, 0X00, 0XD2, 0X36 }, /* -86.0db */
+ { 0X00, 0X00, 0XDE, 0XAB }, /* -85.5db */
+ { 0X00, 0X00, 0XEB, 0XDC }, /* -85.0db */
+ { 0X00, 0X00, 0XF9, 0XD6 }, /* -84.5db */
+ { 0X00, 0X01, 0X08, 0XA4 }, /* -84.0db */
+ { 0X00, 0X01, 0X18, 0X52 }, /* -83.5db */
+ { 0X00, 0X01, 0X28, 0XEF }, /* -83.0db */
+ { 0X00, 0X01, 0X3A, 0X87 }, /* -82.5db */
+ { 0X00, 0X01, 0X4D, 0X2A }, /* -82.0db */
+ { 0X00, 0X01, 0X60, 0XE8 }, /* -81.5db */
+ { 0X00, 0X01, 0X75, 0XD1 }, /* -81.0db */
+ { 0X00, 0X01, 0X8B, 0XF7 }, /* -80.5db */
+ { 0X00, 0X01, 0XA3, 0X6E }, /* -80.0db */
+ { 0X00, 0X01, 0XBC, 0X48 }, /* -79.5db */
+ { 0X00, 0X01, 0XD6, 0X9B }, /* -79.0db */
+ { 0X00, 0X01, 0XF2, 0X7E }, /* -78.5db */
+ { 0X00, 0X02, 0X10, 0X08 }, /* -78.0db */
+ { 0X00, 0X02, 0X2F, 0X51 }, /* -77.5db */
+ { 0X00, 0X02, 0X50, 0X76 }, /* -77.0db */
+ { 0X00, 0X02, 0X73, 0X91 }, /* -76.5db */
+ { 0X00, 0X02, 0X98, 0XC0 }, /* -76.0db */
+ { 0X00, 0X02, 0XC0, 0X24 }, /* -75.5db */
+ { 0X00, 0X02, 0XE9, 0XDD }, /* -75.0db */
+ { 0X00, 0X03, 0X16, 0X0F }, /* -74.5db */
+ { 0X00, 0X03, 0X44, 0XDF }, /* -74.0db */
+ { 0X00, 0X03, 0X76, 0X76 }, /* -73.5db */
+ { 0X00, 0X03, 0XAA, 0XFC }, /* -73.0db */
+ { 0X00, 0X03, 0XE2, 0XA0 }, /* -72.5db */
+ { 0X00, 0X04, 0X1D, 0X8F }, /* -72.0db */
+ { 0X00, 0X04, 0X5B, 0XFD }, /* -71.5db */
+ { 0X00, 0X04, 0X9E, 0X1D }, /* -71.0db */
+ { 0X00, 0X04, 0XE4, 0X29 }, /* -70.5db */
+ { 0X00, 0X05, 0X2E, 0X5A }, /* -70.0db */
+ { 0X00, 0X05, 0X7C, 0XF2 }, /* -69.5db */
+ { 0X00, 0X05, 0XD0, 0X31 }, /* -69.0db */
+ { 0X00, 0X06, 0X28, 0X60 }, /* -68.5db */
+ { 0X00, 0X06, 0X85, 0XC8 }, /* -68.0db */
+ { 0X00, 0X06, 0XE8, 0XB9 }, /* -67.5db */
+ { 0X00, 0X07, 0X51, 0X86 }, /* -67.0db */
+ { 0X00, 0X07, 0XC0, 0X8A }, /* -66.5db */
+ { 0X00, 0X08, 0X36, 0X21 }, /* -66.0db */
+ { 0X00, 0X08, 0XB2, 0XB0 }, /* -65.5db */
+ { 0X00, 0X09, 0X36, 0XA1 }, /* -65.0db */
+ { 0X00, 0X09, 0XC2, 0X63 }, /* -64.5db */
+ { 0X00, 0X0A, 0X56, 0X6D }, /* -64.0db */
+ { 0X00, 0X0A, 0XF3, 0X3C }, /* -63.5db */
+ { 0X00, 0X0B, 0X99, 0X56 }, /* -63.0db */
+ { 0X00, 0X0C, 0X49, 0X48 }, /* -62.5db */
+ { 0X00, 0X0D, 0X03, 0XA7 }, /* -62.0db */
+ { 0X00, 0X0D, 0XC9, 0X11 }, /* -61.5db */
+ { 0X00, 0X0E, 0X9A, 0X2D }, /* -61.0db */
+ { 0X00, 0X0F, 0X77, 0XAD }, /* -60.5db */
+ { 0X00, 0X10, 0X62, 0X4D }, /* -60.0db */
+ { 0X00, 0X11, 0X5A, 0XD5 }, /* -59.5db */
+ { 0X00, 0X12, 0X62, 0X16 }, /* -59.0db */
+ { 0X00, 0X13, 0X78, 0XF0 }, /* -58.5db */
+ { 0X00, 0X14, 0XA0, 0X50 }, /* -58.0db */
+ { 0X00, 0X15, 0XD9, 0X31 }, /* -57.5db */
+ { 0X00, 0X17, 0X24, 0X9C }, /* -57.0db */
+ { 0X00, 0X18, 0X83, 0XAA }, /* -56.5db */
+ { 0X00, 0X19, 0XF7, 0X86 }, /* -56.0db */
+ { 0X00, 0X1B, 0X81, 0X6A }, /* -55.5db */
+ { 0X00, 0X1D, 0X22, 0XA4 }, /* -55.0db */
+ { 0X00, 0X1E, 0XDC, 0X98 }, /* -54.5db */
+ { 0X00, 0X20, 0XB0, 0XBC }, /* -54.0db */
+ { 0X00, 0X22, 0XA0, 0X9D }, /* -53.5db */
+ { 0X00, 0X24, 0XAD, 0XE0 }, /* -53.0db */
+ { 0X00, 0X26, 0XDA, 0X43 }, /* -52.5db */
+ { 0X00, 0X29, 0X27, 0X9D }, /* -52.0db */
+ { 0X00, 0X2B, 0X97, 0XE3 }, /* -51.5db */
+ { 0X00, 0X2E, 0X2D, 0X27 }, /* -51.0db */
+ { 0X00, 0X30, 0XE9, 0X9A }, /* -50.5db */
+ { 0X00, 0X33, 0XCF, 0X8D }, /* -50.0db */
+ { 0X00, 0X36, 0XE1, 0X78 }, /* -49.5db */
+ { 0X00, 0X3A, 0X21, 0XF3 }, /* -49.0db */
+ { 0X00, 0X3D, 0X93, 0XC3 }, /* -48.5db */
+ { 0X00, 0X41, 0X39, 0XD3 }, /* -48.0db */
+ { 0X00, 0X45, 0X17, 0X3B }, /* -47.5db */
+ { 0X00, 0X49, 0X2F, 0X44 }, /* -47.0db */
+ { 0X00, 0X4D, 0X85, 0X66 }, /* -46.5db */
+ { 0X00, 0X52, 0X1D, 0X50 }, /* -46.0db */
+ { 0X00, 0X56, 0XFA, 0XE8 }, /* -45.5db */
+ { 0X00, 0X5C, 0X22, 0X4E }, /* -45.0db */
+ { 0X00, 0X61, 0X97, 0XE1 }, /* -44.5db */
+ { 0X00, 0X67, 0X60, 0X44 }, /* -44.0db */
+ { 0X00, 0X6D, 0X80, 0X60 }, /* -43.5db */
+ { 0X00, 0X73, 0XFD, 0X65 }, /* -43.0db */
+ { 0X00, 0X7A, 0XDC, 0XD7 }, /* -42.5db */
+ { 0X00, 0X82, 0X24, 0X8A }, /* -42.0db */
+ { 0X00, 0X89, 0XDA, 0XAB }, /* -41.5db */
+ { 0X00, 0X92, 0X05, 0XC6 }, /* -41.0db */
+ { 0X00, 0X9A, 0XAC, 0XC8 }, /* -40.5db */
+ { 0X00, 0XA3, 0XD7, 0X0A }, /* -40.0db */
+ { 0X00, 0XAD, 0X8C, 0X52 }, /* -39.5db */
+ { 0X00, 0XB7, 0XD4, 0XDD }, /* -39.0db */
+ { 0X00, 0XC2, 0XB9, 0X65 }, /* -38.5db */
+ { 0X00, 0XCE, 0X43, 0X28 }, /* -38.0db */
+ { 0X00, 0XDA, 0X7B, 0XF1 }, /* -37.5db */
+ { 0X00, 0XE7, 0X6E, 0X1E }, /* -37.0db */
+ { 0X00, 0XF5, 0X24, 0XAC }, /* -36.5db */
+ { 0X01, 0X03, 0XAB, 0X3D }, /* -36.0db */
+ { 0X01, 0X13, 0X0E, 0X24 }, /* -35.5db */
+ { 0X01, 0X23, 0X5A, 0X71 }, /* -35.0db */
+ { 0X01, 0X34, 0X9D, 0XF8 }, /* -34.5db */
+ { 0X01, 0X46, 0XE7, 0X5D }, /* -34.0db */
+ { 0X01, 0X5A, 0X46, 0X27 }, /* -33.5db */
+ { 0X01, 0X6E, 0XCA, 0XC5 }, /* -33.0db */
+ { 0X01, 0X84, 0X86, 0X9F }, /* -32.5db */
+ { 0X01, 0X9B, 0X8C, 0X27 }, /* -32.0db */
+ { 0X01, 0XB3, 0XEE, 0XE5 }, /* -31.5db */
+ { 0X01, 0XCD, 0XC3, 0X8C }, /* -31.0db */
+ { 0X01, 0XE9, 0X20, 0X05 }, /* -30.5db */
+ { 0X02, 0X06, 0X1B, 0X89 }, /* -30.0db */
+ { 0X02, 0X24, 0XCE, 0XB0 }, /* -29.5db */
+ { 0X02, 0X45, 0X53, 0X85 }, /* -29.0db */
+ { 0X02, 0X67, 0XC5, 0XA2 }, /* -28.5db */
+ { 0X02, 0X8C, 0X42, 0X3F }, /* -28.0db */
+ { 0X02, 0XB2, 0XE8, 0X55 }, /* -27.5db */
+ { 0X02, 0XDB, 0XD8, 0XAD }, /* -27.0db */
+ { 0X03, 0X07, 0X36, 0X05 }, /* -26.5db */
+ { 0X03, 0X35, 0X25, 0X29 }, /* -26.0db */
+ { 0X03, 0X65, 0XCD, 0X13 }, /* -25.5db */
+ { 0X03, 0X99, 0X57, 0X0C }, /* -25.0db */
+ { 0X03, 0XCF, 0XEE, 0XCF }, /* -24.5db */
+ { 0X04, 0X09, 0XC2, 0XB0 }, /* -24.0db */
+ { 0X04, 0X47, 0X03, 0XC1 }, /* -23.5db */
+ { 0X04, 0X87, 0XE5, 0XFB }, /* -23.0db */
+ { 0X04, 0XCC, 0XA0, 0X6D }, /* -22.5db */
+ { 0X05, 0X15, 0X6D, 0X68 }, /* -22.0db */
+ { 0X05, 0X62, 0X8A, 0XB3 }, /* -21.5db */
+ { 0X05, 0XB4, 0X39, 0XBC }, /* -21.0db */
+ { 0X06, 0X0A, 0XBF, 0XD4 }, /* -20.5db */
+ { 0X06, 0X66, 0X66, 0X66 }, /* -20.0db */
+ { 0X06, 0XC7, 0X7B, 0X36 }, /* -19.5db */
+ { 0X07, 0X2E, 0X50, 0XA6 }, /* -19.0db */
+ { 0X07, 0X9B, 0X3D, 0XF6 }, /* -18.5db */
+ { 0X08, 0X0E, 0X9F, 0X96 }, /* -18.0db */
+ { 0X08, 0X88, 0XD7, 0X6D }, /* -17.5db */
+ { 0X09, 0X0A, 0X4D, 0X2F }, /* -17.0db */
+ { 0X09, 0X93, 0X6E, 0XB8 }, /* -16.5db */
+ { 0X0A, 0X24, 0XB0, 0X62 }, /* -16.0db */
+ { 0X0A, 0XBE, 0X8D, 0X70 }, /* -15.5db */
+ { 0X0B, 0X61, 0X88, 0X71 }, /* -15.0db */
+ { 0X0C, 0X0E, 0X2B, 0XB0 }, /* -14.5db */
+ { 0X0C, 0XC5, 0X09, 0XAB }, /* -14.0db */
+ { 0X0D, 0X86, 0XBD, 0X8D }, /* -13.5db */
+ { 0X0E, 0X53, 0XEB, 0XB3 }, /* -13.0db */
+ { 0X0F, 0X2D, 0X42, 0X38 }, /* -12.5db */
+ { 0X10, 0X13, 0X79, 0X87 }, /* -12.0db */
+ { 0X11, 0X07, 0X54, 0XF9 }, /* -11.5db */
+ { 0X12, 0X09, 0XA3, 0X7A }, /* -11.0db */
+ { 0X13, 0X1B, 0X40, 0X39 }, /* -10.5db */
+ { 0X14, 0X3D, 0X13, 0X62 }, /* -10.0db */
+ { 0X15, 0X70, 0X12, 0XE1 }, /* -9.5db */
+ { 0X16, 0XB5, 0X43, 0X37 }, /* -9.0db */
+ { 0X18, 0X0D, 0XB8, 0X54 }, /* -8.5db */
+ { 0X19, 0X7A, 0X96, 0X7F }, /* -8.0db */
+ { 0X1A, 0XFD, 0X13, 0X54 }, /* -7.5db */
+ { 0X1C, 0X96, 0X76, 0XC6 }, /* -7.0db */
+ { 0X1E, 0X48, 0X1C, 0X37 }, /* -6.5db */
+ { 0X20, 0X13, 0X73, 0X9E }, /* -6.0db */
+ { 0X21, 0XFA, 0X02, 0XBF }, /* -5.5db */
+ { 0X23, 0XFD, 0X66, 0X78 }, /* -5.0db */
+ { 0X26, 0X1F, 0X54, 0X1C }, /* -4.5db */
+ { 0X28, 0X61, 0X9A, 0XE9 }, /* -4.0db */
+ { 0X2A, 0XC6, 0X25, 0X91 }, /* -3.5db */
+ { 0X2D, 0X4E, 0XFB, 0XD5 }, /* -3.0db */
+ { 0X2F, 0XFE, 0X44, 0X48 }, /* -2.5db */
+ { 0X32, 0XD6, 0X46, 0X17 }, /* -2.0db */
+ { 0X35, 0XD9, 0X6B, 0X02 }, /* -1.5db */
+ { 0X39, 0X0A, 0X41, 0X5F }, /* -1.0db */
+ { 0X3C, 0X6B, 0X7E, 0X4F }, /* -0.5db */
+ { 0X40, 0X00, 0X00, 0X00 }, /* 0.0db */
+ { 0X43, 0XCA, 0XD0, 0X22 }, /* 0.5db */
+ { 0X47, 0XCF, 0X26, 0X7D }, /* 1.0db */
+ { 0X4C, 0X10, 0X6B, 0XA5 }, /* 1.5db */
+ { 0X50, 0X92, 0X3B, 0XE3 }, /* 2.0db */
+ { 0X55, 0X58, 0X6A, 0X46 }, /* 2.5db */
+ { 0X5A, 0X67, 0X03, 0XDF }, /* 3.0db */
+ { 0X5F, 0XC2, 0X53, 0X32 }, /* 3.5db */
+ { 0X65, 0X6E, 0XE3, 0XDB }, /* 4.0db */
+ { 0X6B, 0X71, 0X86, 0X68 }, /* 4.5db */
+ { 0X71, 0XCF, 0X54, 0X71 }, /* 5.0db */
+ { 0X78, 0X8D, 0XB4, 0XE9 }, /* 5.5db */
+ { 0X7F, 0XFF, 0XFF, 0XFF }, /* 6.0db */
+};
+#endif
diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h
index 00fd4d449ff3..d87263e43fdb 100644
--- a/include/sound/tas2781-tlv.h
+++ b/include/sound/tas2781-tlv.h
@@ -17,265 +17,5 @@
static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
static const __maybe_unused DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
-static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2563_dvc_tlv, -12150, 50, 1);
-/* pow(10, db/20) * pow(2,30) */
-static const __maybe_unused unsigned char tas2563_dvc_table[][4] = {
- { 0X00, 0X00, 0X00, 0X00 }, /* -121.5db */
- { 0X00, 0X00, 0X03, 0XBC }, /* -121.0db */
- { 0X00, 0X00, 0X03, 0XF5 }, /* -120.5db */
- { 0X00, 0X00, 0X04, 0X31 }, /* -120.0db */
- { 0X00, 0X00, 0X04, 0X71 }, /* -119.5db */
- { 0X00, 0X00, 0X04, 0XB4 }, /* -119.0db */
- { 0X00, 0X00, 0X04, 0XFC }, /* -118.5db */
- { 0X00, 0X00, 0X05, 0X47 }, /* -118.0db */
- { 0X00, 0X00, 0X05, 0X97 }, /* -117.5db */
- { 0X00, 0X00, 0X05, 0XEC }, /* -117.0db */
- { 0X00, 0X00, 0X06, 0X46 }, /* -116.5db */
- { 0X00, 0X00, 0X06, 0XA5 }, /* -116.0db */
- { 0X00, 0X00, 0X07, 0X0A }, /* -115.5db */
- { 0X00, 0X00, 0X07, 0X75 }, /* -115.0db */
- { 0X00, 0X00, 0X07, 0XE6 }, /* -114.5db */
- { 0X00, 0X00, 0X08, 0X5E }, /* -114.0db */
- { 0X00, 0X00, 0X08, 0XDD }, /* -113.5db */
- { 0X00, 0X00, 0X09, 0X63 }, /* -113.0db */
- { 0X00, 0X00, 0X09, 0XF2 }, /* -112.5db */
- { 0X00, 0X00, 0X0A, 0X89 }, /* -112.0db */
- { 0X00, 0X00, 0X0B, 0X28 }, /* -111.5db */
- { 0X00, 0X00, 0X0B, 0XD2 }, /* -111.0db */
- { 0X00, 0X00, 0X0C, 0X85 }, /* -110.5db */
- { 0X00, 0X00, 0X0D, 0X43 }, /* -110.0db */
- { 0X00, 0X00, 0X0E, 0X0C }, /* -109.5db */
- { 0X00, 0X00, 0X0E, 0XE1 }, /* -109.0db */
- { 0X00, 0X00, 0X0F, 0XC3 }, /* -108.5db */
- { 0X00, 0X00, 0X10, 0XB2 }, /* -108.0db */
- { 0X00, 0X00, 0X11, 0XAF }, /* -107.5db */
- { 0X00, 0X00, 0X12, 0XBC }, /* -107.0db */
- { 0X00, 0X00, 0X13, 0XD8 }, /* -106.5db */
- { 0X00, 0X00, 0X15, 0X05 }, /* -106.0db */
- { 0X00, 0X00, 0X16, 0X44 }, /* -105.5db */
- { 0X00, 0X00, 0X17, 0X96 }, /* -105.0db */
- { 0X00, 0X00, 0X18, 0XFB }, /* -104.5db */
- { 0X00, 0X00, 0X1A, 0X76 }, /* -104.0db */
- { 0X00, 0X00, 0X1C, 0X08 }, /* -103.5db */
- { 0X00, 0X00, 0X1D, 0XB1 }, /* -103.0db */
- { 0X00, 0X00, 0X1F, 0X73 }, /* -102.5db */
- { 0X00, 0X00, 0X21, 0X51 }, /* -102.0db */
- { 0X00, 0X00, 0X23, 0X4A }, /* -101.5db */
- { 0X00, 0X00, 0X25, 0X61 }, /* -101.0db */
- { 0X00, 0X00, 0X27, 0X98 }, /* -100.5db */
- { 0X00, 0X00, 0X29, 0XF1 }, /* -100.0db */
- { 0X00, 0X00, 0X2C, 0X6D }, /* -99.5db */
- { 0X00, 0X00, 0X2F, 0X0F }, /* -99.0db */
- { 0X00, 0X00, 0X31, 0XD9 }, /* -98.5db */
- { 0X00, 0X00, 0X34, 0XCD }, /* -98.0db */
- { 0X00, 0X00, 0X37, 0XEE }, /* -97.5db */
- { 0X00, 0X00, 0X3B, 0X3F }, /* -97.0db */
- { 0X00, 0X00, 0X3E, 0XC1 }, /* -96.5db */
- { 0X00, 0X00, 0X42, 0X79 }, /* -96.0db */
- { 0X00, 0X00, 0X46, 0X6A }, /* -95.5db */
- { 0X00, 0X00, 0X4A, 0X96 }, /* -95.0db */
- { 0X00, 0X00, 0X4F, 0X01 }, /* -94.5db */
- { 0X00, 0X00, 0X53, 0XAF }, /* -94.0db */
- { 0X00, 0X00, 0X58, 0XA5 }, /* -93.5db */
- { 0X00, 0X00, 0X5D, 0XE6 }, /* -93.0db */
- { 0X00, 0X00, 0X63, 0X76 }, /* -92.5db */
- { 0X00, 0X00, 0X69, 0X5B }, /* -92.0db */
- { 0X00, 0X00, 0X6F, 0X99 }, /* -91.5db */
- { 0X00, 0X00, 0X76, 0X36 }, /* -91.0db */
- { 0X00, 0X00, 0X7D, 0X37 }, /* -90.5db */
- { 0X00, 0X00, 0X84, 0XA2 }, /* -90.0db */
- { 0X00, 0X00, 0X8C, 0X7E }, /* -89.5db */
- { 0X00, 0X00, 0X94, 0XD1 }, /* -89.0db */
- { 0X00, 0X00, 0X9D, 0XA3 }, /* -88.5db */
- { 0X00, 0X00, 0XA6, 0XFA }, /* -88.0db */
- { 0X00, 0X00, 0XB0, 0XDF }, /* -87.5db */
- { 0X00, 0X00, 0XBB, 0X5A }, /* -87.0db */
- { 0X00, 0X00, 0XC6, 0X74 }, /* -86.5db */
- { 0X00, 0X00, 0XD2, 0X36 }, /* -86.0db */
- { 0X00, 0X00, 0XDE, 0XAB }, /* -85.5db */
- { 0X00, 0X00, 0XEB, 0XDC }, /* -85.0db */
- { 0X00, 0X00, 0XF9, 0XD6 }, /* -84.5db */
- { 0X00, 0X01, 0X08, 0XA4 }, /* -84.0db */
- { 0X00, 0X01, 0X18, 0X52 }, /* -83.5db */
- { 0X00, 0X01, 0X28, 0XEF }, /* -83.0db */
- { 0X00, 0X01, 0X3A, 0X87 }, /* -82.5db */
- { 0X00, 0X01, 0X4D, 0X2A }, /* -82.0db */
- { 0X00, 0X01, 0X60, 0XE8 }, /* -81.5db */
- { 0X00, 0X01, 0X75, 0XD1 }, /* -81.0db */
- { 0X00, 0X01, 0X8B, 0XF7 }, /* -80.5db */
- { 0X00, 0X01, 0XA3, 0X6E }, /* -80.0db */
- { 0X00, 0X01, 0XBC, 0X48 }, /* -79.5db */
- { 0X00, 0X01, 0XD6, 0X9B }, /* -79.0db */
- { 0X00, 0X01, 0XF2, 0X7E }, /* -78.5db */
- { 0X00, 0X02, 0X10, 0X08 }, /* -78.0db */
- { 0X00, 0X02, 0X2F, 0X51 }, /* -77.5db */
- { 0X00, 0X02, 0X50, 0X76 }, /* -77.0db */
- { 0X00, 0X02, 0X73, 0X91 }, /* -76.5db */
- { 0X00, 0X02, 0X98, 0XC0 }, /* -76.0db */
- { 0X00, 0X02, 0XC0, 0X24 }, /* -75.5db */
- { 0X00, 0X02, 0XE9, 0XDD }, /* -75.0db */
- { 0X00, 0X03, 0X16, 0X0F }, /* -74.5db */
- { 0X00, 0X03, 0X44, 0XDF }, /* -74.0db */
- { 0X00, 0X03, 0X76, 0X76 }, /* -73.5db */
- { 0X00, 0X03, 0XAA, 0XFC }, /* -73.0db */
- { 0X00, 0X03, 0XE2, 0XA0 }, /* -72.5db */
- { 0X00, 0X04, 0X1D, 0X8F }, /* -72.0db */
- { 0X00, 0X04, 0X5B, 0XFD }, /* -71.5db */
- { 0X00, 0X04, 0X9E, 0X1D }, /* -71.0db */
- { 0X00, 0X04, 0XE4, 0X29 }, /* -70.5db */
- { 0X00, 0X05, 0X2E, 0X5A }, /* -70.0db */
- { 0X00, 0X05, 0X7C, 0XF2 }, /* -69.5db */
- { 0X00, 0X05, 0XD0, 0X31 }, /* -69.0db */
- { 0X00, 0X06, 0X28, 0X60 }, /* -68.5db */
- { 0X00, 0X06, 0X85, 0XC8 }, /* -68.0db */
- { 0X00, 0X06, 0XE8, 0XB9 }, /* -67.5db */
- { 0X00, 0X07, 0X51, 0X86 }, /* -67.0db */
- { 0X00, 0X07, 0XC0, 0X8A }, /* -66.5db */
- { 0X00, 0X08, 0X36, 0X21 }, /* -66.0db */
- { 0X00, 0X08, 0XB2, 0XB0 }, /* -65.5db */
- { 0X00, 0X09, 0X36, 0XA1 }, /* -65.0db */
- { 0X00, 0X09, 0XC2, 0X63 }, /* -64.5db */
- { 0X00, 0X0A, 0X56, 0X6D }, /* -64.0db */
- { 0X00, 0X0A, 0XF3, 0X3C }, /* -63.5db */
- { 0X00, 0X0B, 0X99, 0X56 }, /* -63.0db */
- { 0X00, 0X0C, 0X49, 0X48 }, /* -62.5db */
- { 0X00, 0X0D, 0X03, 0XA7 }, /* -62.0db */
- { 0X00, 0X0D, 0XC9, 0X11 }, /* -61.5db */
- { 0X00, 0X0E, 0X9A, 0X2D }, /* -61.0db */
- { 0X00, 0X0F, 0X77, 0XAD }, /* -60.5db */
- { 0X00, 0X10, 0X62, 0X4D }, /* -60.0db */
- { 0X00, 0X11, 0X5A, 0XD5 }, /* -59.5db */
- { 0X00, 0X12, 0X62, 0X16 }, /* -59.0db */
- { 0X00, 0X13, 0X78, 0XF0 }, /* -58.5db */
- { 0X00, 0X14, 0XA0, 0X50 }, /* -58.0db */
- { 0X00, 0X15, 0XD9, 0X31 }, /* -57.5db */
- { 0X00, 0X17, 0X24, 0X9C }, /* -57.0db */
- { 0X00, 0X18, 0X83, 0XAA }, /* -56.5db */
- { 0X00, 0X19, 0XF7, 0X86 }, /* -56.0db */
- { 0X00, 0X1B, 0X81, 0X6A }, /* -55.5db */
- { 0X00, 0X1D, 0X22, 0XA4 }, /* -55.0db */
- { 0X00, 0X1E, 0XDC, 0X98 }, /* -54.5db */
- { 0X00, 0X20, 0XB0, 0XBC }, /* -54.0db */
- { 0X00, 0X22, 0XA0, 0X9D }, /* -53.5db */
- { 0X00, 0X24, 0XAD, 0XE0 }, /* -53.0db */
- { 0X00, 0X26, 0XDA, 0X43 }, /* -52.5db */
- { 0X00, 0X29, 0X27, 0X9D }, /* -52.0db */
- { 0X00, 0X2B, 0X97, 0XE3 }, /* -51.5db */
- { 0X00, 0X2E, 0X2D, 0X27 }, /* -51.0db */
- { 0X00, 0X30, 0XE9, 0X9A }, /* -50.5db */
- { 0X00, 0X33, 0XCF, 0X8D }, /* -50.0db */
- { 0X00, 0X36, 0XE1, 0X78 }, /* -49.5db */
- { 0X00, 0X3A, 0X21, 0XF3 }, /* -49.0db */
- { 0X00, 0X3D, 0X93, 0XC3 }, /* -48.5db */
- { 0X00, 0X41, 0X39, 0XD3 }, /* -48.0db */
- { 0X00, 0X45, 0X17, 0X3B }, /* -47.5db */
- { 0X00, 0X49, 0X2F, 0X44 }, /* -47.0db */
- { 0X00, 0X4D, 0X85, 0X66 }, /* -46.5db */
- { 0X00, 0X52, 0X1D, 0X50 }, /* -46.0db */
- { 0X00, 0X56, 0XFA, 0XE8 }, /* -45.5db */
- { 0X00, 0X5C, 0X22, 0X4E }, /* -45.0db */
- { 0X00, 0X61, 0X97, 0XE1 }, /* -44.5db */
- { 0X00, 0X67, 0X60, 0X44 }, /* -44.0db */
- { 0X00, 0X6D, 0X80, 0X60 }, /* -43.5db */
- { 0X00, 0X73, 0XFD, 0X65 }, /* -43.0db */
- { 0X00, 0X7A, 0XDC, 0XD7 }, /* -42.5db */
- { 0X00, 0X82, 0X24, 0X8A }, /* -42.0db */
- { 0X00, 0X89, 0XDA, 0XAB }, /* -41.5db */
- { 0X00, 0X92, 0X05, 0XC6 }, /* -41.0db */
- { 0X00, 0X9A, 0XAC, 0XC8 }, /* -40.5db */
- { 0X00, 0XA3, 0XD7, 0X0A }, /* -40.0db */
- { 0X00, 0XAD, 0X8C, 0X52 }, /* -39.5db */
- { 0X00, 0XB7, 0XD4, 0XDD }, /* -39.0db */
- { 0X00, 0XC2, 0XB9, 0X65 }, /* -38.5db */
- { 0X00, 0XCE, 0X43, 0X28 }, /* -38.0db */
- { 0X00, 0XDA, 0X7B, 0XF1 }, /* -37.5db */
- { 0X00, 0XE7, 0X6E, 0X1E }, /* -37.0db */
- { 0X00, 0XF5, 0X24, 0XAC }, /* -36.5db */
- { 0X01, 0X03, 0XAB, 0X3D }, /* -36.0db */
- { 0X01, 0X13, 0X0E, 0X24 }, /* -35.5db */
- { 0X01, 0X23, 0X5A, 0X71 }, /* -35.0db */
- { 0X01, 0X34, 0X9D, 0XF8 }, /* -34.5db */
- { 0X01, 0X46, 0XE7, 0X5D }, /* -34.0db */
- { 0X01, 0X5A, 0X46, 0X27 }, /* -33.5db */
- { 0X01, 0X6E, 0XCA, 0XC5 }, /* -33.0db */
- { 0X01, 0X84, 0X86, 0X9F }, /* -32.5db */
- { 0X01, 0X9B, 0X8C, 0X27 }, /* -32.0db */
- { 0X01, 0XB3, 0XEE, 0XE5 }, /* -31.5db */
- { 0X01, 0XCD, 0XC3, 0X8C }, /* -31.0db */
- { 0X01, 0XE9, 0X20, 0X05 }, /* -30.5db */
- { 0X02, 0X06, 0X1B, 0X89 }, /* -30.0db */
- { 0X02, 0X24, 0XCE, 0XB0 }, /* -29.5db */
- { 0X02, 0X45, 0X53, 0X85 }, /* -29.0db */
- { 0X02, 0X67, 0XC5, 0XA2 }, /* -28.5db */
- { 0X02, 0X8C, 0X42, 0X3F }, /* -28.0db */
- { 0X02, 0XB2, 0XE8, 0X55 }, /* -27.5db */
- { 0X02, 0XDB, 0XD8, 0XAD }, /* -27.0db */
- { 0X03, 0X07, 0X36, 0X05 }, /* -26.5db */
- { 0X03, 0X35, 0X25, 0X29 }, /* -26.0db */
- { 0X03, 0X65, 0XCD, 0X13 }, /* -25.5db */
- { 0X03, 0X99, 0X57, 0X0C }, /* -25.0db */
- { 0X03, 0XCF, 0XEE, 0XCF }, /* -24.5db */
- { 0X04, 0X09, 0XC2, 0XB0 }, /* -24.0db */
- { 0X04, 0X47, 0X03, 0XC1 }, /* -23.5db */
- { 0X04, 0X87, 0XE5, 0XFB }, /* -23.0db */
- { 0X04, 0XCC, 0XA0, 0X6D }, /* -22.5db */
- { 0X05, 0X15, 0X6D, 0X68 }, /* -22.0db */
- { 0X05, 0X62, 0X8A, 0XB3 }, /* -21.5db */
- { 0X05, 0XB4, 0X39, 0XBC }, /* -21.0db */
- { 0X06, 0X0A, 0XBF, 0XD4 }, /* -20.5db */
- { 0X06, 0X66, 0X66, 0X66 }, /* -20.0db */
- { 0X06, 0XC7, 0X7B, 0X36 }, /* -19.5db */
- { 0X07, 0X2E, 0X50, 0XA6 }, /* -19.0db */
- { 0X07, 0X9B, 0X3D, 0XF6 }, /* -18.5db */
- { 0X08, 0X0E, 0X9F, 0X96 }, /* -18.0db */
- { 0X08, 0X88, 0XD7, 0X6D }, /* -17.5db */
- { 0X09, 0X0A, 0X4D, 0X2F }, /* -17.0db */
- { 0X09, 0X93, 0X6E, 0XB8 }, /* -16.5db */
- { 0X0A, 0X24, 0XB0, 0X62 }, /* -16.0db */
- { 0X0A, 0XBE, 0X8D, 0X70 }, /* -15.5db */
- { 0X0B, 0X61, 0X88, 0X71 }, /* -15.0db */
- { 0X0C, 0X0E, 0X2B, 0XB0 }, /* -14.5db */
- { 0X0C, 0XC5, 0X09, 0XAB }, /* -14.0db */
- { 0X0D, 0X86, 0XBD, 0X8D }, /* -13.5db */
- { 0X0E, 0X53, 0XEB, 0XB3 }, /* -13.0db */
- { 0X0F, 0X2D, 0X42, 0X38 }, /* -12.5db */
- { 0X10, 0X13, 0X79, 0X87 }, /* -12.0db */
- { 0X11, 0X07, 0X54, 0XF9 }, /* -11.5db */
- { 0X12, 0X09, 0XA3, 0X7A }, /* -11.0db */
- { 0X13, 0X1B, 0X40, 0X39 }, /* -10.5db */
- { 0X14, 0X3D, 0X13, 0X62 }, /* -10.0db */
- { 0X15, 0X70, 0X12, 0XE1 }, /* -9.5db */
- { 0X16, 0XB5, 0X43, 0X37 }, /* -9.0db */
- { 0X18, 0X0D, 0XB8, 0X54 }, /* -8.5db */
- { 0X19, 0X7A, 0X96, 0X7F }, /* -8.0db */
- { 0X1A, 0XFD, 0X13, 0X54 }, /* -7.5db */
- { 0X1C, 0X96, 0X76, 0XC6 }, /* -7.0db */
- { 0X1E, 0X48, 0X1C, 0X37 }, /* -6.5db */
- { 0X20, 0X13, 0X73, 0X9E }, /* -6.0db */
- { 0X21, 0XFA, 0X02, 0XBF }, /* -5.5db */
- { 0X23, 0XFD, 0X66, 0X78 }, /* -5.0db */
- { 0X26, 0X1F, 0X54, 0X1C }, /* -4.5db */
- { 0X28, 0X61, 0X9A, 0XE9 }, /* -4.0db */
- { 0X2A, 0XC6, 0X25, 0X91 }, /* -3.5db */
- { 0X2D, 0X4E, 0XFB, 0XD5 }, /* -3.0db */
- { 0X2F, 0XFE, 0X44, 0X48 }, /* -2.5db */
- { 0X32, 0XD6, 0X46, 0X17 }, /* -2.0db */
- { 0X35, 0XD9, 0X6B, 0X02 }, /* -1.5db */
- { 0X39, 0X0A, 0X41, 0X5F }, /* -1.0db */
- { 0X3C, 0X6B, 0X7E, 0X4F }, /* -0.5db */
- { 0X40, 0X00, 0X00, 0X00 }, /* 0.0db */
- { 0X43, 0XCA, 0XD0, 0X22 }, /* 0.5db */
- { 0X47, 0XCF, 0X26, 0X7D }, /* 1.0db */
- { 0X4C, 0X10, 0X6B, 0XA5 }, /* 1.5db */
- { 0X50, 0X92, 0X3B, 0XE3 }, /* 2.0db */
- { 0X55, 0X58, 0X6A, 0X46 }, /* 2.5db */
- { 0X5A, 0X67, 0X03, 0XDF }, /* 3.0db */
- { 0X5F, 0XC2, 0X53, 0X32 }, /* 3.5db */
- { 0X65, 0X6E, 0XE3, 0XDB }, /* 4.0db */
- { 0X6B, 0X71, 0X86, 0X68 }, /* 4.5db */
- { 0X71, 0XCF, 0X54, 0X71 }, /* 5.0db */
- { 0X78, 0X8D, 0XB4, 0XE9 }, /* 5.5db */
- { 0XFF, 0XFF, 0XFF, 0XFF }, /* 6.0db */
-};
#endif
diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h
index 18161d02a96f..8cd6da0480b7 100644
--- a/include/sound/tas2781.h
+++ b/include/sound/tas2781.h
@@ -49,12 +49,59 @@
/*I2C Checksum */
#define TASDEVICE_I2CChecksum TASDEVICE_REG(0x0, 0x0, 0x7E)
+/* XM_340 */
+#define TASDEVICE_XM_A1_REG TASDEVICE_REG(0x64, 0x63, 0x3c)
+/* XM_341 */
+#define TASDEVICE_XM_A2_REG TASDEVICE_REG(0x64, 0x63, 0x38)
+
/* Volume control */
#define TAS2563_DVC_LVL TASDEVICE_REG(0x00, 0x02, 0x0C)
#define TAS2781_DVC_LVL TASDEVICE_REG(0x0, 0x0, 0x1A)
#define TAS2781_AMP_LEVEL TASDEVICE_REG(0x0, 0x0, 0x03)
#define TAS2781_AMP_LEVEL_MASK GENMASK(5, 1)
+#define TAS2563_IDLE TASDEVICE_REG(0x00, 0x00, 0x3e)
+#define TAS2563_PRM_R0_REG TASDEVICE_REG(0x00, 0x0f, 0x34)
+
+#define TAS2563_RUNTIME_RE_REG_TF TASDEVICE_REG(0x64, 0x02, 0x70)
+#define TAS2563_RUNTIME_RE_REG TASDEVICE_REG(0x64, 0x02, 0x48)
+
+#define TAS2563_PRM_ENFF_REG TASDEVICE_REG(0x00, 0x0d, 0x54)
+#define TAS2563_PRM_DISTCK_REG TASDEVICE_REG(0x00, 0x0d, 0x58)
+#define TAS2563_PRM_TE_SCTHR_REG TASDEVICE_REG(0x00, 0x0f, 0x60)
+#define TAS2563_PRM_PLT_FLAG_REG TASDEVICE_REG(0x00, 0x0d, 0x74)
+#define TAS2563_PRM_SINEGAIN_REG TASDEVICE_REG(0x00, 0x0d, 0x7c)
+/* prm_Int_B0 */
+#define TAS2563_TE_TA1_REG TASDEVICE_REG(0x00, 0x10, 0x0c)
+/* prm_Int_A1 */
+#define TAS2563_TE_TA1_AT_REG TASDEVICE_REG(0x00, 0x10, 0x10)
+/* prm_TE_Beta */
+#define TAS2563_TE_TA2_REG TASDEVICE_REG(0x00, 0x0f, 0x64)
+/* prm_TE_Beta1 */
+#define TAS2563_TE_AT_REG TASDEVICE_REG(0x00, 0x0f, 0x68)
+/* prm_TE_1_Beta1 */
+#define TAS2563_TE_DT_REG TASDEVICE_REG(0x00, 0x0f, 0x70)
+
+#define TAS2781_PRM_INT_MASK_REG TASDEVICE_REG(0x00, 0x00, 0x3b)
+#define TAS2781_PRM_CLK_CFG_REG TASDEVICE_REG(0x00, 0x00, 0x5c)
+#define TAS2781_PRM_RSVD_REG TASDEVICE_REG(0x00, 0x01, 0x19)
+#define TAS2781_PRM_TEST_57_REG TASDEVICE_REG(0x00, 0xfd, 0x39)
+#define TAS2781_PRM_TEST_62_REG TASDEVICE_REG(0x00, 0xfd, 0x3e)
+#define TAS2781_PRM_PVDD_UVLO_REG TASDEVICE_REG(0x00, 0x00, 0x71)
+#define TAS2781_PRM_CHNL_0_REG TASDEVICE_REG(0x00, 0x00, 0x03)
+#define TAS2781_PRM_NG_CFG0_REG TASDEVICE_REG(0x00, 0x00, 0x35)
+#define TAS2781_PRM_IDLE_CH_DET_REG TASDEVICE_REG(0x00, 0x00, 0x66)
+#define TAS2781_PRM_PLT_FLAG_REG TASDEVICE_REG(0x00, 0x14, 0x38)
+#define TAS2781_PRM_SINEGAIN_REG TASDEVICE_REG(0x00, 0x14, 0x40)
+#define TAS2781_PRM_SINEGAIN2_REG TASDEVICE_REG(0x00, 0x14, 0x44)
+
+#define TAS2781_TEST_UNLOCK_REG TASDEVICE_REG(0x00, 0xFD, 0x0D)
+#define TAS2781_TEST_PAGE_UNLOCK 0x0D
+
+#define TAS2781_RUNTIME_LATCH_RE_REG TASDEVICE_REG(0x00, 0x00, 0x49)
+#define TAS2781_RUNTIME_RE_REG_TF TASDEVICE_REG(0x64, 0x62, 0x48)
+#define TAS2781_RUNTIME_RE_REG TASDEVICE_REG(0x64, 0x63, 0x44)
+
#define TASDEVICE_CMD_SING_W 0x1
#define TASDEVICE_CMD_BURST 0x2
#define TASDEVICE_CMD_DELAY 0x3
@@ -70,7 +117,15 @@ enum device_catlog_id {
OTHERS
};
+struct bulk_reg_val {
+ int reg;
+ unsigned char val[4];
+ unsigned char val_len;
+ bool is_locked;
+};
+
struct tasdevice {
+ struct bulk_reg_val *cali_data_backup;
struct tasdevice_fw *cali_data_fmw;
unsigned int dev_addr;
unsigned int err_code;
@@ -81,19 +136,23 @@ struct tasdevice {
bool is_loaderr;
};
-struct tasdevice_irqinfo {
- int irq_gpio;
- int irq;
+struct cali_reg {
+ unsigned int r0_reg;
+ unsigned int r0_low_reg;
+ unsigned int invr0_reg;
+ unsigned int pow_reg;
+ unsigned int tlimit_reg;
};
struct calidata {
unsigned char *data;
unsigned long total_sz;
+ struct cali_reg cali_reg_array;
+ unsigned int cali_dat_sz_per_dev;
};
struct tasdevice_priv {
struct tasdevice tasdevice[TASDEVICE_MAX_CHANNELS];
- struct tasdevice_irqinfo irq_info;
struct tasdevice_rca rcabin;
struct calidata cali_data;
struct tasdevice_fw *fmw;
@@ -115,6 +174,7 @@ struct tasdevice_priv {
unsigned int chip_id;
unsigned int sysclk;
+ int irq;
int cur_prog;
int cur_conf;
int fw_state;
@@ -124,6 +184,7 @@ struct tasdevice_priv {
bool force_fwload_status;
bool playback_started;
bool isacpi;
+ bool is_user_space_calidata;
unsigned int global_addr;
int (*fw_parse_variable_header)(struct tasdevice_priv *tas_priv,
@@ -150,6 +211,8 @@ int tasdevice_init(struct tasdevice_priv *tas_priv);
void tasdevice_remove(struct tasdevice_priv *tas_priv);
int tasdevice_save_calibration(struct tasdevice_priv *tas_priv);
void tasdevice_apply_calibration(struct tasdevice_priv *tas_priv);
+int tasdev_chn_switch(struct tasdevice_priv *tas_priv,
+ unsigned short chn);
int tasdevice_dev_read(struct tasdevice_priv *tas_priv,
unsigned short chn, unsigned int reg, unsigned int *value);
int tasdevice_dev_write(struct tasdevice_priv *tas_priv,
diff --git a/include/sound/ump.h b/include/sound/ump.h
index 91238dabe307..532c2c3ea28e 100644
--- a/include/sound/ump.h
+++ b/include/sound/ump.h
@@ -13,6 +13,15 @@ struct snd_ump_ops;
struct ump_cvt_to_ump;
struct snd_seq_ump_ops;
+struct snd_ump_group {
+ int group; /* group index (0-based) */
+ unsigned int dir_bits; /* directions */
+ bool active; /* activeness */
+ bool valid; /* valid group (referred by blocks) */
+ bool is_midi1; /* belongs to a MIDI1 FB */
+ char name[64]; /* group name */
+};
+
struct snd_ump_endpoint {
struct snd_rawmidi core; /* raw UMP access */
@@ -41,6 +50,8 @@ struct snd_ump_endpoint {
struct mutex open_mutex;
+ struct snd_ump_group groups[SNDRV_UMP_MAX_GROUPS]; /* table of groups */
+
#if IS_ENABLED(CONFIG_SND_UMP_LEGACY_RAWMIDI)
spinlock_t legacy_locks[2];
struct snd_rawmidi *legacy_rmidi;
@@ -112,6 +123,7 @@ static inline int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
int snd_ump_receive_ump_val(struct snd_ump_endpoint *ump, u32 val);
int snd_ump_switch_protocol(struct snd_ump_endpoint *ump, unsigned int protocol);
+void snd_ump_update_group_attrs(struct snd_ump_endpoint *ump);
/*
* Some definitions for UMP
diff --git a/include/sound/vx_core.h b/include/sound/vx_core.h
index 1ddd3036bdfc..ca87fa6a8135 100644
--- a/include/sound/vx_core.h
+++ b/include/sound/vx_core.h
@@ -155,7 +155,6 @@ struct vx_core {
unsigned int chip_status;
unsigned int pcm_running;
- struct device *dev;
struct snd_hwdep *hwdep;
struct vx_rmh irq_rmh; /* RMH used in interrupts */
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 202fc3680c36..6696dbcc2b96 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -8,6 +8,7 @@
#include <linux/ktime.h>
#include <linux/tracepoint.h>
#include <sound/jack.h>
+#include <sound/pcm.h>
#define DAPM_DIRECT "(direct)"
#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
@@ -212,7 +213,7 @@ TRACE_EVENT(snd_soc_dapm_connected,
),
TP_printk("%s: found %d paths",
- __entry->stream ? "capture" : "playback", __entry->paths)
+ snd_pcm_direction_name(__entry->stream), __entry->paths)
);
TRACE_EVENT(snd_soc_jack_irq,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 0a523023bdcc..bf60ad50011e 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -674,10 +674,10 @@ TRACE_EVENT(btrfs_finish_ordered_extent,
DECLARE_EVENT_CLASS(btrfs__writepage,
- TP_PROTO(const struct page *page, const struct inode *inode,
+ TP_PROTO(const struct folio *folio, const struct inode *inode,
const struct writeback_control *wbc),
- TP_ARGS(page, inode, wbc),
+ TP_ARGS(folio, inode, wbc),
TP_STRUCT__entry_btrfs(
__field( u64, ino )
@@ -695,7 +695,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
__entry->ino = btrfs_ino(BTRFS_I(inode));
- __entry->index = page->index;
+ __entry->index = folio->index;
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->range_start = wbc->range_start;
@@ -721,12 +721,12 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__entry->writeback_index)
);
-DEFINE_EVENT(btrfs__writepage, __extent_writepage,
+DEFINE_EVENT(btrfs__writepage, extent_writepage,
- TP_PROTO(const struct page *page, const struct inode *inode,
+ TP_PROTO(const struct folio *folio, const struct inode *inode,
const struct writeback_control *wbc),
- TP_ARGS(page, inode, wbc)
+ TP_ARGS(folio, inode, wbc)
);
TRACE_EVENT(btrfs_writepage_end_io_hook,
@@ -1825,7 +1825,7 @@ TRACE_EVENT(qgroup_update_counters,
TRACE_EVENT(qgroup_update_reserve,
- TP_PROTO(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup,
+ TP_PROTO(const struct btrfs_fs_info *fs_info, const struct btrfs_qgroup *qgroup,
s64 diff, int type),
TP_ARGS(fs_info, qgroup, diff, type),
@@ -1851,7 +1851,7 @@ TRACE_EVENT(qgroup_update_reserve,
TRACE_EVENT(qgroup_meta_reserve,
- TP_PROTO(struct btrfs_root *root, s64 diff, int type),
+ TP_PROTO(const struct btrfs_root *root, s64 diff, int type),
TP_ARGS(root, diff, type),
@@ -1874,7 +1874,7 @@ TRACE_EVENT(qgroup_meta_reserve,
TRACE_EVENT(qgroup_meta_convert,
- TP_PROTO(struct btrfs_root *root, s64 diff),
+ TP_PROTO(const struct btrfs_root *root, s64 diff),
TP_ARGS(root, diff),
diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
new file mode 100644
index 000000000000..f57f05331d73
--- /dev/null
+++ b/include/trace/events/dma.h
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dma
+
+#if !defined(_TRACE_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DMA_H
+
+#include <linux/tracepoint.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <trace/events/mmflags.h>
+
+TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
+TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
+TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
+TRACE_DEFINE_ENUM(DMA_NONE);
+
+#define decode_dma_data_direction(dir) \
+ __print_symbolic(dir, \
+ { DMA_BIDIRECTIONAL, "BIDIRECTIONAL" }, \
+ { DMA_TO_DEVICE, "TO_DEVICE" }, \
+ { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
+ { DMA_NONE, "NONE" })
+
+#define decode_dma_attrs(attrs) \
+ __print_flags(attrs, "|", \
+ { DMA_ATTR_WEAK_ORDERING, "WEAK_ORDERING" }, \
+ { DMA_ATTR_WRITE_COMBINE, "WRITE_COMBINE" }, \
+ { DMA_ATTR_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING" }, \
+ { DMA_ATTR_SKIP_CPU_SYNC, "SKIP_CPU_SYNC" }, \
+ { DMA_ATTR_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS" }, \
+ { DMA_ATTR_ALLOC_SINGLE_PAGES, "ALLOC_SINGLE_PAGES" }, \
+ { DMA_ATTR_NO_WARN, "NO_WARN" }, \
+ { DMA_ATTR_PRIVILEGED, "PRIVILEGED" })
+
+DECLARE_EVENT_CLASS(dma_map,
+ TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->phys_addr = phys_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addr=%llx attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->phys_addr,
+ decode_dma_attrs(__entry->attrs))
+);
+
+DEFINE_EVENT(dma_map, dma_map_page,
+ TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs));
+
+DEFINE_EVENT(dma_map, dma_map_resource,
+ TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs));
+
+DECLARE_EVENT_CLASS(dma_unmap,
+ TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, addr, size, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->addr = addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->addr,
+ __entry->size,
+ decode_dma_attrs(__entry->attrs))
+);
+
+DEFINE_EVENT(dma_unmap, dma_unmap_page,
+ TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, addr, size, dir, attrs));
+
+DEFINE_EVENT(dma_unmap, dma_unmap_resource,
+ TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, addr, size, dir, attrs));
+
+TRACE_EVENT(dma_alloc,
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+ size_t size, gfp_t flags, unsigned long attrs),
+ TP_ARGS(dev, virt_addr, dma_addr, size, flags, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(gfp_t, flags)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->phys_addr = virt_to_phys(virt_addr);
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->flags = flags;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dma_addr=%llx size=%zu phys_addr=%llx flags=%s attrs=%s",
+ __get_str(device),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->phys_addr,
+ show_gfp_flags(__entry->flags),
+ decode_dma_attrs(__entry->attrs))
+);
+
+TRACE_EVENT(dma_free,
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+ size_t size, unsigned long attrs),
+ TP_ARGS(dev, virt_addr, dma_addr, size, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->phys_addr = virt_to_phys(virt_addr);
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dma_addr=%llx size=%zu phys_addr=%llx attrs=%s",
+ __get_str(device),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->phys_addr,
+ decode_dma_attrs(__entry->attrs))
+);
+
+TRACE_EVENT(dma_map_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
+ int ents, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, sg, nents, ents, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, phys_addrs, nents)
+ __dynamic_array(u64, dma_addrs, ents)
+ __dynamic_array(unsigned int, lengths, ents)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ int i;
+
+ __assign_str(device);
+ for (i = 0; i < nents; i++)
+ ((u64 *)__get_dynamic_array(phys_addrs))[i] =
+ sg_phys(sg + i);
+ for (i = 0; i < ents; i++) {
+ ((u64 *)__get_dynamic_array(dma_addrs))[i] =
+ sg_dma_address(sg + i);
+ ((unsigned int *)__get_dynamic_array(lengths))[i] =
+ sg_dma_len(sg + i);
+ }
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addrs=%s sizes=%s phys_addrs=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(dma_addrs),
+ __get_dynamic_array_len(dma_addrs) /
+ sizeof(u64), sizeof(u64)),
+ __print_array(__get_dynamic_array(lengths),
+ __get_dynamic_array_len(lengths) /
+ sizeof(unsigned int), sizeof(unsigned int)),
+ __print_array(__get_dynamic_array(phys_addrs),
+ __get_dynamic_array_len(phys_addrs) /
+ sizeof(u64), sizeof(u64)),
+ decode_dma_attrs(__entry->attrs))
+);
+
+TRACE_EVENT(dma_unmap_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, sg, nents, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, addrs, nents)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ int i;
+
+ __assign_str(device);
+ for (i = 0; i < nents; i++)
+ ((u64 *)__get_dynamic_array(addrs))[i] =
+ sg_phys(sg + i);
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s phys_addrs=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(addrs),
+ __get_dynamic_array_len(addrs) /
+ sizeof(u64), sizeof(u64)),
+ decode_dma_attrs(__entry->attrs))
+);
+
+DECLARE_EVENT_CLASS(dma_sync_single,
+ TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, dma_addr, size, dir),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size)
+);
+
+DEFINE_EVENT(dma_sync_single, dma_sync_single_for_cpu,
+ TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, dma_addr, size, dir));
+
+DEFINE_EVENT(dma_sync_single, dma_sync_single_for_device,
+ TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, dma_addr, size, dir));
+
+DECLARE_EVENT_CLASS(dma_sync_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, sg, nents, dir),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, dma_addrs, nents)
+ __dynamic_array(unsigned int, lengths, nents)
+ __field(enum dma_data_direction, dir)
+ ),
+
+ TP_fast_assign(
+ int i;
+
+ __assign_str(device);
+ for (i = 0; i < nents; i++) {
+ ((u64 *)__get_dynamic_array(dma_addrs))[i] =
+ sg_dma_address(sg + i);
+ ((unsigned int *)__get_dynamic_array(lengths))[i] =
+ sg_dma_len(sg + i);
+ }
+ __entry->dir = dir;
+ ),
+
+ TP_printk("%s dir=%s dma_addrs=%s sizes=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(dma_addrs),
+ __get_dynamic_array_len(dma_addrs) /
+ sizeof(u64), sizeof(u64)),
+ __print_array(__get_dynamic_array(lengths),
+ __get_dynamic_array_len(lengths) /
+ sizeof(unsigned int), sizeof(unsigned int)))
+);
+
+DEFINE_EVENT(dma_sync_sg, dma_sync_sg_for_cpu,
+ TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, sg, nents, dir));
+
+DEFINE_EVENT(dma_sync_sg, dma_sync_sg_for_device,
+ TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, sg, nents, dir));
+
+#endif /* _TRACE_DMA_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index cc5e9b7b2b44..156908641e68 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -91,7 +91,6 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B);
#define show_falloc_mode(mode) __print_flags(mode, "|", \
{ FALLOC_FL_KEEP_SIZE, "KEEP_SIZE"}, \
{ FALLOC_FL_PUNCH_HOLE, "PUNCH_HOLE"}, \
- { FALLOC_FL_NO_HIDE_STALE, "NO_HIDE_STALE"}, \
{ FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \
{ FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"})
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index ed794b5fefbe..2851c823095b 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -139,7 +139,8 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
{ CP_NODE_NEED_CP, "node needs cp" }, \
{ CP_FASTBOOT_MODE, "fastboot mode" }, \
{ CP_SPEC_LOG_NUM, "log type is 2" }, \
- { CP_RECOVER_DIR, "dir needs recovery" })
+ { CP_RECOVER_DIR, "dir needs recovery" }, \
+ { CP_XATTR_DIR, "dir's xattr updated" })
#define show_shutdown_mode(type) \
__print_symbolic(type, \
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 46c89c1e460c..f48fe637bfd2 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -56,6 +56,90 @@ DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
TP_ARGS(folio)
);
+DECLARE_EVENT_CLASS(mm_filemap_op_page_cache_range,
+
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+
+ TP_ARGS(mapping, index, last_index),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(unsigned long, index)
+ __field(unsigned long, last_index)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = mapping->host->i_ino;
+ if (mapping->host->i_sb)
+ __entry->s_dev =
+ mapping->host->i_sb->s_dev;
+ else
+ __entry->s_dev = mapping->host->i_rdev;
+ __entry->index = index;
+ __entry->last_index = last_index;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx ofs=%lld-%lld",
+ MAJOR(__entry->s_dev),
+ MINOR(__entry->s_dev), __entry->i_ino,
+ ((loff_t)__entry->index) << PAGE_SHIFT,
+ ((((loff_t)__entry->last_index + 1) << PAGE_SHIFT) - 1)
+ )
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_get_pages,
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+ TP_ARGS(mapping, index, last_index)
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_map_pages,
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+ TP_ARGS(mapping, index, last_index)
+);
+
+TRACE_EVENT(mm_filemap_fault,
+ TP_PROTO(struct address_space *mapping, pgoff_t index),
+
+ TP_ARGS(mapping, index),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(unsigned long, index)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = mapping->host->i_ino;
+ if (mapping->host->i_sb)
+ __entry->s_dev =
+ mapping->host->i_sb->s_dev;
+ else
+ __entry->s_dev = mapping->host->i_rdev;
+ __entry->index = index;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx ofs=%lld",
+ MAJOR(__entry->s_dev),
+ MINOR(__entry->s_dev), __entry->i_ino,
+ ((loff_t)__entry->index) << PAGE_SHIFT
+ )
+);
+
TRACE_EVENT(filemap_set_wb_err,
TP_PROTO(struct address_space *mapping, errseq_t eseq),
diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h
index b108176deb22..ad0e0cf82b9c 100644
--- a/include/trace/events/firewire.h
+++ b/include/trace/events/firewire.h
@@ -830,13 +830,13 @@ TRACE_EVENT_CONDITION(isoc_inbound_multiple_queue,
#ifndef show_cause
enum fw_iso_context_completions_cause {
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH = 0,
- FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ,
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW,
};
#define show_cause(cause) \
__print_symbolic(cause, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH, "FLUSH" }, \
- { FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ, "IRQ" }, \
+ { FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT, "INTERRUPT" }, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW, "HEADER_OVERFLOW" } \
)
#endif
diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h
index 0d88ebf2c980..70323acde1de 100644
--- a/include/trace/events/intel_ifs.h
+++ b/include/trace/events/intel_ifs.h
@@ -35,6 +35,33 @@ TRACE_EVENT(ifs_status,
__entry->status)
);
+TRACE_EVENT(ifs_sbaf,
+
+ TP_PROTO(int batch, union ifs_sbaf activate, union ifs_sbaf_status status),
+
+ TP_ARGS(batch, activate, status),
+
+ TP_STRUCT__entry(
+ __field( u64, status )
+ __field( int, batch )
+ __field( u16, bundle )
+ __field( u16, pgm )
+ ),
+
+ TP_fast_assign(
+ __entry->status = status.data;
+ __entry->batch = batch;
+ __entry->bundle = activate.bundle_idx;
+ __entry->pgm = activate.pgm_idx;
+ ),
+
+ TP_printk("batch: 0x%.2x, bundle_idx: 0x%.4x, pgm_idx: 0x%.4x, status: 0x%.16llx",
+ __entry->batch,
+ __entry->bundle,
+ __entry->pgm,
+ __entry->status)
+);
+
#endif /* _TRACE_IFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index b63d211bd141..bb8a59c6caa2 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -71,12 +71,6 @@
#define IF_HAVE_PG_MLOCK(_name)
#endif
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
-#define IF_HAVE_PG_UNCACHED(_name) ,{1UL << PG_##_name, __stringify(_name)}
-#else
-#define IF_HAVE_PG_UNCACHED(_name)
-#endif
-
#ifdef CONFIG_MEMORY_FAILURE
#define IF_HAVE_PG_HWPOISON(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
@@ -89,10 +83,16 @@
#define IF_HAVE_PG_IDLE(_name)
#endif
-#ifdef CONFIG_ARCH_USES_PG_ARCH_X
-#define IF_HAVE_PG_ARCH_X(_name) ,{1UL << PG_##_name, __stringify(_name)}
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
+#define IF_HAVE_PG_ARCH_2(_name) ,{1UL << PG_##_name, __stringify(_name)}
+#else
+#define IF_HAVE_PG_ARCH_2(_name)
+#endif
+
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
+#define IF_HAVE_PG_ARCH_3(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
-#define IF_HAVE_PG_ARCH_X(_name)
+#define IF_HAVE_PG_ARCH_3(_name)
#endif
#define DEF_PAGEFLAG_NAME(_name) { 1UL << PG_##_name, __stringify(_name) }
@@ -100,7 +100,6 @@
#define __def_pageflag_names \
DEF_PAGEFLAG_NAME(locked), \
DEF_PAGEFLAG_NAME(waiters), \
- DEF_PAGEFLAG_NAME(error), \
DEF_PAGEFLAG_NAME(referenced), \
DEF_PAGEFLAG_NAME(uptodate), \
DEF_PAGEFLAG_NAME(dirty), \
@@ -108,42 +107,31 @@
DEF_PAGEFLAG_NAME(active), \
DEF_PAGEFLAG_NAME(workingset), \
DEF_PAGEFLAG_NAME(owner_priv_1), \
+ DEF_PAGEFLAG_NAME(owner_2), \
DEF_PAGEFLAG_NAME(arch_1), \
DEF_PAGEFLAG_NAME(reserved), \
DEF_PAGEFLAG_NAME(private), \
DEF_PAGEFLAG_NAME(private_2), \
DEF_PAGEFLAG_NAME(writeback), \
DEF_PAGEFLAG_NAME(head), \
- DEF_PAGEFLAG_NAME(mappedtodisk), \
DEF_PAGEFLAG_NAME(reclaim), \
DEF_PAGEFLAG_NAME(swapbacked), \
DEF_PAGEFLAG_NAME(unevictable) \
IF_HAVE_PG_MLOCK(mlocked) \
-IF_HAVE_PG_UNCACHED(uncached) \
IF_HAVE_PG_HWPOISON(hwpoison) \
IF_HAVE_PG_IDLE(idle) \
IF_HAVE_PG_IDLE(young) \
-IF_HAVE_PG_ARCH_X(arch_2) \
-IF_HAVE_PG_ARCH_X(arch_3)
+IF_HAVE_PG_ARCH_2(arch_2) \
+IF_HAVE_PG_ARCH_3(arch_3)
#define show_page_flags(flags) \
(flags) ? __print_flags(flags, "|", \
__def_pageflag_names \
) : "none"
-#define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) }
-
-#define __def_pagetype_names \
- DEF_PAGETYPE_NAME(slab), \
- DEF_PAGETYPE_NAME(hugetlb), \
- DEF_PAGETYPE_NAME(offline), \
- DEF_PAGETYPE_NAME(guard), \
- DEF_PAGETYPE_NAME(table), \
- DEF_PAGETYPE_NAME(buddy)
-
#if defined(CONFIG_X86)
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
-#elif defined(CONFIG_PPC)
+#elif defined(CONFIG_PPC64)
#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
#elif defined(CONFIG_PARISC)
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
@@ -165,7 +153,7 @@ IF_HAVE_PG_ARCH_X(arch_3)
# define IF_HAVE_UFFD_MINOR(flag, name)
#endif
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
# define IF_HAVE_VM_DROPPABLE(flag, name) {flag, name},
#else
# define IF_HAVE_VM_DROPPABLE(flag, name)
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index 606b4a0f92da..76bd42a96815 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -20,6 +20,7 @@
EM(netfs_read_trace_expanded, "EXPANDED ") \
EM(netfs_read_trace_readahead, "READAHEAD") \
EM(netfs_read_trace_readpage, "READPAGE ") \
+ EM(netfs_read_trace_read_gaps, "READ-GAPS") \
EM(netfs_read_trace_prefetch_for_write, "PREFETCHW") \
E_(netfs_read_trace_write_begin, "WRITEBEGN")
@@ -33,13 +34,14 @@
#define netfs_rreq_origins \
EM(NETFS_READAHEAD, "RA") \
EM(NETFS_READPAGE, "RP") \
+ EM(NETFS_READ_GAPS, "RG") \
EM(NETFS_READ_FOR_WRITE, "RW") \
- EM(NETFS_COPY_TO_CACHE, "CC") \
+ EM(NETFS_DIO_READ, "DR") \
EM(NETFS_WRITEBACK, "WB") \
EM(NETFS_WRITETHROUGH, "WT") \
EM(NETFS_UNBUFFERED_WRITE, "UW") \
- EM(NETFS_DIO_READ, "DR") \
- E_(NETFS_DIO_WRITE, "DW")
+ EM(NETFS_DIO_WRITE, "DW") \
+ E_(NETFS_PGPRIV2_COPY_TO_CACHE, "2C")
#define netfs_rreq_traces \
EM(netfs_rreq_trace_assess, "ASSESS ") \
@@ -60,6 +62,7 @@
E_(netfs_rreq_trace_write_done, "WR-DONE")
#define netfs_sreq_sources \
+ EM(NETFS_SOURCE_UNKNOWN, "----") \
EM(NETFS_FILL_WITH_ZEROES, "ZERO") \
EM(NETFS_DOWNLOAD_FROM_SERVER, "DOWN") \
EM(NETFS_READ_FROM_CACHE, "READ") \
@@ -69,15 +72,25 @@
E_(NETFS_INVALID_WRITE, "INVL")
#define netfs_sreq_traces \
+ EM(netfs_sreq_trace_add_donations, "+DON ") \
+ EM(netfs_sreq_trace_added, "ADD ") \
+ EM(netfs_sreq_trace_clear, "CLEAR") \
EM(netfs_sreq_trace_discard, "DSCRD") \
+ EM(netfs_sreq_trace_donate_to_prev, "DON-P") \
+ EM(netfs_sreq_trace_donate_to_next, "DON-N") \
EM(netfs_sreq_trace_download_instead, "RDOWN") \
EM(netfs_sreq_trace_fail, "FAIL ") \
EM(netfs_sreq_trace_free, "FREE ") \
+ EM(netfs_sreq_trace_hit_eof, "EOF ") \
+ EM(netfs_sreq_trace_io_progress, "IO ") \
EM(netfs_sreq_trace_limited, "LIMIT") \
EM(netfs_sreq_trace_prepare, "PREP ") \
EM(netfs_sreq_trace_prep_failed, "PRPFL") \
- EM(netfs_sreq_trace_resubmit_short, "SHORT") \
+ EM(netfs_sreq_trace_progress, "PRGRS") \
+ EM(netfs_sreq_trace_reprep_failed, "REPFL") \
EM(netfs_sreq_trace_retry, "RETRY") \
+ EM(netfs_sreq_trace_short, "SHORT") \
+ EM(netfs_sreq_trace_split, "SPLIT") \
EM(netfs_sreq_trace_submit, "SUBMT") \
EM(netfs_sreq_trace_terminated, "TERM ") \
EM(netfs_sreq_trace_write, "WRITE") \
@@ -118,7 +131,7 @@
EM(netfs_sreq_trace_new, "NEW ") \
EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \
EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \
- EM(netfs_sreq_trace_put_discard, "PUT DISCARD") \
+ EM(netfs_sreq_trace_put_consumed, "PUT CONSUME") \
EM(netfs_sreq_trace_put_done, "PUT DONE ") \
EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \
EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \
@@ -129,7 +142,6 @@
E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
#define netfs_folio_traces \
- /* The first few correspond to enum netfs_how_to_modify */ \
EM(netfs_folio_is_uptodate, "mod-uptodate") \
EM(netfs_just_prefetch, "mod-prefetch") \
EM(netfs_whole_folio_modify, "mod-whole-f") \
@@ -139,8 +151,9 @@
EM(netfs_flush_content, "flush") \
EM(netfs_streaming_filled_page, "mod-streamw-f") \
EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \
- /* The rest are for writeback */ \
+ EM(netfs_folio_trace_abandon, "abandon") \
EM(netfs_folio_trace_cancel_copy, "cancel-copy") \
+ EM(netfs_folio_trace_cancel_store, "cancel-store") \
EM(netfs_folio_trace_clear, "clear") \
EM(netfs_folio_trace_clear_cc, "clear-cc") \
EM(netfs_folio_trace_clear_g, "clear-g") \
@@ -155,7 +168,12 @@
EM(netfs_folio_trace_mkwrite, "mkwrite") \
EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
EM(netfs_folio_trace_not_under_wback, "!wback") \
+ EM(netfs_folio_trace_put, "put") \
+ EM(netfs_folio_trace_read, "read") \
+ EM(netfs_folio_trace_read_done, "read-done") \
EM(netfs_folio_trace_read_gaps, "read-gaps") \
+ EM(netfs_folio_trace_read_put, "read-put") \
+ EM(netfs_folio_trace_read_unlock, "read-unlock") \
EM(netfs_folio_trace_redirtied, "redirtied") \
EM(netfs_folio_trace_store, "store") \
EM(netfs_folio_trace_store_copy, "store-copy") \
@@ -168,6 +186,12 @@
EM(netfs_contig_trace_jump, "-->JUMP-->") \
E_(netfs_contig_trace_unlock, "Unlock")
+#define netfs_donate_traces \
+ EM(netfs_trace_donate_tail_to_prev, "tail-to-prev") \
+ EM(netfs_trace_donate_to_prev, "to-prev") \
+ EM(netfs_trace_donate_to_next, "to-next") \
+ E_(netfs_trace_donate_to_deferred_next, "defer-next")
+
#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
@@ -185,6 +209,7 @@ enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
enum netfs_folio_trace { netfs_folio_traces } __mode(byte);
enum netfs_collect_contig_trace { netfs_collect_contig_traces } __mode(byte);
+enum netfs_donate_trace { netfs_donate_traces } __mode(byte);
#endif
@@ -207,6 +232,7 @@ netfs_rreq_ref_traces;
netfs_sreq_ref_traces;
netfs_folio_traces;
netfs_collect_contig_traces;
+netfs_donate_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -227,6 +253,7 @@ TRACE_EVENT(netfs_read,
TP_STRUCT__entry(
__field(unsigned int, rreq )
__field(unsigned int, cookie )
+ __field(loff_t, i_size )
__field(loff_t, start )
__field(size_t, len )
__field(enum netfs_read_trace, what )
@@ -236,18 +263,19 @@ TRACE_EVENT(netfs_read,
TP_fast_assign(
__entry->rreq = rreq->debug_id;
__entry->cookie = rreq->cache_resources.debug_id;
+ __entry->i_size = rreq->i_size;
__entry->start = start;
__entry->len = len;
__entry->what = what;
__entry->netfs_inode = rreq->inode->i_ino;
),
- TP_printk("R=%08x %s c=%08x ni=%x s=%llx %zx",
+ TP_printk("R=%08x %s c=%08x ni=%x s=%llx l=%zx sz=%llx",
__entry->rreq,
__print_symbolic(__entry->what, netfs_read_traces),
__entry->cookie,
__entry->netfs_inode,
- __entry->start, __entry->len)
+ __entry->start, __entry->len, __entry->i_size)
);
TRACE_EVENT(netfs_rreq,
@@ -513,33 +541,6 @@ TRACE_EVENT(netfs_collect,
__entry->start + __entry->len)
);
-TRACE_EVENT(netfs_collect_contig,
- TP_PROTO(const struct netfs_io_request *wreq, unsigned long long to,
- enum netfs_collect_contig_trace type),
-
- TP_ARGS(wreq, to, type),
-
- TP_STRUCT__entry(
- __field(unsigned int, wreq)
- __field(enum netfs_collect_contig_trace, type)
- __field(unsigned long long, contiguity)
- __field(unsigned long long, to)
- ),
-
- TP_fast_assign(
- __entry->wreq = wreq->debug_id;
- __entry->type = type;
- __entry->contiguity = wreq->contiguity;
- __entry->to = to;
- ),
-
- TP_printk("R=%08x %llx -> %llx %s",
- __entry->wreq,
- __entry->contiguity,
- __entry->to,
- __print_symbolic(__entry->type, netfs_collect_contig_traces))
- );
-
TRACE_EVENT(netfs_collect_sreq,
TP_PROTO(const struct netfs_io_request *wreq,
const struct netfs_io_subrequest *subreq),
@@ -611,7 +612,6 @@ TRACE_EVENT(netfs_collect_state,
__field(unsigned int, notes )
__field(unsigned long long, collected_to )
__field(unsigned long long, cleaned_to )
- __field(unsigned long long, contiguity )
),
TP_fast_assign(
@@ -619,12 +619,11 @@ TRACE_EVENT(netfs_collect_state,
__entry->notes = notes;
__entry->collected_to = collected_to;
__entry->cleaned_to = wreq->cleaned_to;
- __entry->contiguity = wreq->contiguity;
),
- TP_printk("R=%08x cto=%llx fto=%llx ctg=%llx n=%x",
+ TP_printk("R=%08x col=%llx cln=%llx n=%x",
__entry->wreq, __entry->collected_to,
- __entry->cleaned_to, __entry->contiguity,
+ __entry->cleaned_to,
__entry->notes)
);
@@ -681,6 +680,71 @@ TRACE_EVENT(netfs_collect_stream,
__entry->collected_to, __entry->front)
);
+TRACE_EVENT(netfs_progress,
+ TP_PROTO(const struct netfs_io_subrequest *subreq,
+ unsigned long long start, size_t avail, size_t part),
+
+ TP_ARGS(subreq, start, avail, part),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq)
+ __field(unsigned int, subreq)
+ __field(unsigned int, consumed)
+ __field(unsigned int, transferred)
+ __field(unsigned long long, f_start)
+ __field(unsigned int, f_avail)
+ __field(unsigned int, f_part)
+ __field(unsigned char, slot)
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = subreq->rreq->debug_id;
+ __entry->subreq = subreq->debug_index;
+ __entry->consumed = subreq->consumed;
+ __entry->transferred = subreq->transferred;
+ __entry->f_start = start;
+ __entry->f_avail = avail;
+ __entry->f_part = part;
+ __entry->slot = subreq->curr_folioq_slot;
+ ),
+
+ TP_printk("R=%08x[%02x] s=%llx ct=%x/%x pa=%x/%x sl=%x",
+ __entry->rreq, __entry->subreq, __entry->f_start,
+ __entry->consumed, __entry->transferred,
+ __entry->f_part, __entry->f_avail, __entry->slot)
+ );
+
+TRACE_EVENT(netfs_donate,
+ TP_PROTO(const struct netfs_io_request *rreq,
+ const struct netfs_io_subrequest *from,
+ const struct netfs_io_subrequest *to,
+ size_t amount,
+ enum netfs_donate_trace trace),
+
+ TP_ARGS(rreq, from, to, amount, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq)
+ __field(unsigned int, from)
+ __field(unsigned int, to)
+ __field(unsigned int, amount)
+ __field(enum netfs_donate_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq->debug_id;
+ __entry->from = from->debug_index;
+ __entry->to = to ? to->debug_index : -1;
+ __entry->amount = amount;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("R=%08x[%02x] -> [%02x] %s am=%x",
+ __entry->rreq, __entry->from, __entry->to,
+ __print_symbolic(__entry->trace, netfs_donate_traces),
+ __entry->amount)
+ );
+
#undef EM
#undef E_
#endif /* _TRACE_NETFS_H */
diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
index a42be4c8563b..9f0a5d1482c4 100644
--- a/include/trace/events/oom.h
+++ b/include/trace/events/oom.h
@@ -55,8 +55,8 @@ TRACE_EVENT(reclaim_retry_zone,
),
TP_fast_assign(
- __entry->node = zone_to_nid(zoneref->zone);
- __entry->zone_idx = zoneref->zone_idx;
+ __entry->node = zonelist_node_idx(zoneref);
+ __entry->zone_idx = zonelist_zone_idx(zoneref);
__entry->order = order;
__entry->reclaimable = reclaimable;
__entry->available = available;
diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h
index 543e54e432a1..31825ed30032 100644
--- a/include/trace/events/page_pool.h
+++ b/include/trace/events/page_pool.h
@@ -57,12 +57,12 @@ TRACE_EVENT(page_pool_state_release,
__entry->pool = pool;
__entry->netmem = (__force unsigned long)netmem;
__entry->release = release;
- __entry->pfn = netmem_to_pfn(netmem);
+ __entry->pfn = netmem_pfn_trace(netmem);
),
- TP_printk("page_pool=%p netmem=%p pfn=0x%lx release=%u",
+ TP_printk("page_pool=%p netmem=%p is_net_iov=%lu pfn=0x%lx release=%u",
__entry->pool, (void *)__entry->netmem,
- __entry->pfn, __entry->release)
+ __entry->netmem & NET_IOV, __entry->pfn, __entry->release)
);
TRACE_EVENT(page_pool_state_hold,
@@ -83,12 +83,12 @@ TRACE_EVENT(page_pool_state_hold,
__entry->pool = pool;
__entry->netmem = (__force unsigned long)netmem;
__entry->hold = hold;
- __entry->pfn = netmem_to_pfn(netmem);
+ __entry->pfn = netmem_pfn_trace(netmem);
),
- TP_printk("page_pool=%p netmem=%p pfn=0x%lx hold=%u",
+ TP_printk("page_pool=%p netmem=%p is_net_iov=%lu, pfn=0x%lx hold=%u",
__entry->pool, (void *)__entry->netmem,
- __entry->pfn, __entry->hold)
+ __entry->netmem & NET_IOV, __entry->pfn, __entry->hold)
);
TRACE_EVENT(page_pool_update_nid,
diff --git a/include/trace/events/pwm.h b/include/trace/events/pwm.h
index 12b35e4ff917..8022701c446d 100644
--- a/include/trace/events/pwm.h
+++ b/include/trace/events/pwm.h
@@ -15,7 +15,8 @@ DECLARE_EVENT_CLASS(pwm,
TP_ARGS(pwm, state, err),
TP_STRUCT__entry(
- __field(struct pwm_device *, pwm)
+ __field(unsigned int, chipid)
+ __field(unsigned int, hwpwm)
__field(u64, period)
__field(u64, duty_cycle)
__field(enum pwm_polarity, polarity)
@@ -24,7 +25,8 @@ DECLARE_EVENT_CLASS(pwm,
),
TP_fast_assign(
- __entry->pwm = pwm;
+ __entry->chipid = pwm->chip->id;
+ __entry->hwpwm = pwm->hwpwm;
__entry->period = state->period;
__entry->duty_cycle = state->duty_cycle;
__entry->polarity = state->polarity;
@@ -32,8 +34,8 @@ DECLARE_EVENT_CLASS(pwm,
__entry->err = err;
),
- TP_printk("%p: period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d",
- __entry->pwm, __entry->period, __entry->duty_cycle,
+ TP_printk("pwmchip%u.%u: period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d",
+ __entry->chipid, __entry->hwpwm, __entry->period, __entry->duty_cycle,
__entry->polarity, __entry->enabled, __entry->err)
);
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 31b3e0d3e65f..e81431deaa50 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -466,40 +466,40 @@ TRACE_EVENT(rcu_stall_warning,
/*
* Tracepoint for dyntick-idle entry/exit events. These take 2 strings
* as argument:
- * polarity: "Start", "End", "StillNonIdle" for entering, exiting or still not
- * being in dyntick-idle mode.
+ * polarity: "Start", "End", "StillWatching" for entering, exiting or still not
+ * being in EQS mode.
* context: "USER" or "IDLE" or "IRQ".
- * NMIs nested in IRQs are inferred with dynticks_nesting > 1 in IRQ context.
+ * NMIs nested in IRQs are inferred with nesting > 1 in IRQ context.
*
* These events also take a pair of numbers, which indicate the nesting
* depth before and after the event of interest, and a third number that is
- * the ->dynticks counter. Note that task-related and interrupt-related
+ * the RCU_WATCHING counter. Note that task-related and interrupt-related
* events use two separate counters, and that the "++=" and "--=" events
* for irq/NMI will change the counter by two, otherwise by one.
*/
-TRACE_EVENT_RCU(rcu_dyntick,
+TRACE_EVENT_RCU(rcu_watching,
- TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
+ TP_PROTO(const char *polarity, long oldnesting, long newnesting, int counter),
- TP_ARGS(polarity, oldnesting, newnesting, dynticks),
+ TP_ARGS(polarity, oldnesting, newnesting, counter),
TP_STRUCT__entry(
__field(const char *, polarity)
__field(long, oldnesting)
__field(long, newnesting)
- __field(int, dynticks)
+ __field(int, counter)
),
TP_fast_assign(
__entry->polarity = polarity;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
- __entry->dynticks = dynticks;
+ __entry->counter = counter;
),
TP_printk("%s %lx %lx %#3x", __entry->polarity,
__entry->oldnesting, __entry->newnesting,
- __entry->dynticks & 0xfff)
+ __entry->counter & 0xfff)
);
/*
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index a96a985c49b3..e6a72646c507 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -2172,6 +2172,29 @@ TRACE_EVENT(svcrdma_qp_error,
)
);
+TRACE_EVENT(svcrdma_device_removal,
+ TP_PROTO(
+ const struct rdma_cm_id *id
+ ),
+
+ TP_ARGS(id),
+
+ TP_STRUCT__entry(
+ __string(name, id->device->name)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ memcpy(__entry->addr, &id->route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("device %s to be removed, disconnecting %pISpc\n",
+ __get_str(name), __entry->addr
+ )
+);
+
DECLARE_EVENT_CLASS(svcrdma_sendqueue_class,
TP_PROTO(
const struct svcxprt_rdma *rdma,
diff --git a/include/trace/events/sched_ext.h b/include/trace/events/sched_ext.h
new file mode 100644
index 000000000000..fe19da7315a9
--- /dev/null
+++ b/include/trace/events/sched_ext.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sched_ext
+
+#if !defined(_TRACE_SCHED_EXT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCHED_EXT_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sched_ext_dump,
+
+ TP_PROTO(const char *line),
+
+ TP_ARGS(line),
+
+ TP_STRUCT__entry(
+ __string(line, line)
+ ),
+
+ TP_fast_assign(
+ __assign_str(line);
+ ),
+
+ TP_printk("%s",
+ __get_str(line)
+ )
+);
+
+#endif /* _TRACE_SCHED_EXT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 1c8bd8e186b8..a27c4b619dff 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -91,10 +91,10 @@ DEFINE_RST_REASON(FN, FN)
TRACE_EVENT(tcp_send_reset,
TP_PROTO(const struct sock *sk,
- const struct sk_buff *skb,
+ const struct sk_buff *skb__nullable,
const enum sk_rst_reason reason),
- TP_ARGS(sk, skb, reason),
+ TP_ARGS(sk, skb__nullable, reason),
TP_STRUCT__entry(
__field(const void *, skbaddr)
@@ -106,7 +106,7 @@ TRACE_EVENT(tcp_send_reset,
),
TP_fast_assign(
- __entry->skbaddr = skb;
+ __entry->skbaddr = skb__nullable;
__entry->skaddr = sk;
/* Zero means unknown state. */
__entry->state = sk ? sk->sk_state : 0;
@@ -118,13 +118,13 @@ TRACE_EVENT(tcp_send_reset,
const struct inet_sock *inet = inet_sk(sk);
TP_STORE_ADDR_PORTS(__entry, inet, sk);
- } else if (skb) {
- const struct tcphdr *th = (const struct tcphdr *)skb->data;
+ } else if (skb__nullable) {
+ const struct tcphdr *th = (const struct tcphdr *)skb__nullable->data;
/*
* We should reverse the 4-tuple of skb, so later
* it can print the right flow direction of rst.
*/
- TP_STORE_ADDR_PORTS_SKB(skb, th, entry->daddr, entry->saddr);
+ TP_STORE_ADDR_PORTS_SKB(skb__nullable, th, entry->daddr, entry->saddr);
}
__entry->reason = reason;
),
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 54e353c9f919..a261e86e61fa 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -20,7 +20,15 @@
{I_CLEAR, "I_CLEAR"}, \
{I_SYNC, "I_SYNC"}, \
{I_DIRTY_TIME, "I_DIRTY_TIME"}, \
- {I_REFERENCED, "I_REFERENCED"} \
+ {I_REFERENCED, "I_REFERENCED"}, \
+ {I_LINKABLE, "I_LINKABLE"}, \
+ {I_WB_SWITCH, "I_WB_SWITCH"}, \
+ {I_OVL_INUSE, "I_OVL_INUSE"}, \
+ {I_CREATING, "I_CREATING"}, \
+ {I_DONTCACHE, "I_DONTCACHE"}, \
+ {I_SYNC_QUEUED, "I_SYNC_QUEUED"}, \
+ {I_PINNING_NETFS_WB, "I_PINNING_NETFS_WB"}, \
+ {I_LRU_ISOLATING, "I_LRU_ISOLATING"} \
)
/* enums need to be exported to user space */
diff --git a/include/trace/misc/nfs.h b/include/trace/misc/nfs.h
index 7b221d51133a..c82233e950ac 100644
--- a/include/trace/misc/nfs.h
+++ b/include/trace/misc/nfs.h
@@ -51,6 +51,7 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
{ NFSERR_IO, "IO" }, \
{ NFSERR_NXIO, "NXIO" }, \
{ ECHILD, "CHILD" }, \
+ { ETIMEDOUT, "TIMEDOUT" }, \
{ NFSERR_EAGAIN, "AGAIN" }, \
{ NFSERR_ACCES, "ACCES" }, \
{ NFSERR_EXIST, "EXIST" }, \
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 8ce8a39a1e5f..3b4e3e815602 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -135,6 +135,12 @@
#define SO_PASSPIDFD 76
#define SO_PEERPIDFD 77
+#define SO_DEVMEM_LINEAR 78
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 79
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 80
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 2d84a8052b15..78abd819fd62 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -703,6 +703,31 @@ extern "C" {
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
/*
+ * Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression
+ * on integrated graphics
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all compressible GEM objects.
+ */
+#define I915_FORMAT_MOD_4_TILED_LNL_CCS fourcc_mod_code(INTEL, 16)
+
+/*
+ * Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression
+ * on discrete graphics
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all compressible GEM objects. The GEM object must be stored in
+ * contiguous memory with a size aligned to 64KB
+ */
+#define I915_FORMAT_MOD_4_TILED_BMG_CCS fourcc_mod_code(INTEL, 17)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index d390011b89b4..c082810c08a8 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -859,6 +859,8 @@ struct drm_color_lut {
/**
* struct drm_plane_size_hint - Plane size hints
+ * @width: The width of the plane in pixel
+ * @height: The height of the plane in pixel
*
* The plane SIZE_HINTS property blob contains an
* array of struct drm_plane_size_hint.
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 3fca72f73861..2377147b6af0 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -88,6 +88,8 @@ struct drm_msm_timespec {
#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
#define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */
#define MSM_PARAM_RAYTRACING 0x11 /* RO */
+#define MSM_PARAM_UBWC_SWIZZLE 0x12 /* RO */
+#define MSM_PARAM_MACROTILE_MODE 0x13 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index db232a25189e..b6fbe4988f2e 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -517,7 +517,14 @@ struct drm_xe_query_gt_list {
* available per Dual Sub Slices (DSS). For example a query response
* containing the following in mask:
* ``EU_PER_DSS ff ff 00 00 00 00 00 00``
- * means each DSS has 16 EU.
+ * means each DSS has 16 SIMD8 EUs. This type may be omitted if device
+ * doesn't have SIMD8 EUs.
+ * - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution
+ * Units (EU) available per Dual Sub Slices (DSS). For example a query
+ * response containing the following in mask:
+ * ``SIMD16_EU_PER_DSS ff ff 00 00 00 00 00 00``
+ * means each DSS has 16 SIMD16 EUs. This type may be omitted if device
+ * doesn't have SIMD16 EUs.
*/
struct drm_xe_query_topology_mask {
/** @gt_id: GT ID the mask is associated with */
@@ -527,6 +534,7 @@ struct drm_xe_query_topology_mask {
#define DRM_XE_TOPO_DSS_COMPUTE 2
#define DRM_XE_TOPO_L3_BANK 3
#define DRM_XE_TOPO_EU_PER_DSS 4
+#define DRM_XE_TOPO_SIMD16_EU_PER_DSS 5
/** @type: type of mask */
__u16 type;
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index d676ed2b246e..75e21a135483 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -143,6 +143,9 @@
#define AUDIT_MAC_UNLBL_STCDEL 1417 /* NetLabel: del a static label */
#define AUDIT_MAC_CALIPSO_ADD 1418 /* NetLabel: add CALIPSO DOI entry */
#define AUDIT_MAC_CALIPSO_DEL 1419 /* NetLabel: del CALIPSO DOI entry */
+#define AUDIT_IPE_ACCESS 1420 /* IPE denial or grant */
+#define AUDIT_IPE_CONFIG_CHANGE 1421 /* IPE config change */
+#define AUDIT_IPE_POLICY_LOAD 1422 /* IPE policy load */
#define AUDIT_FIRST_KERN_ANOM_MSG 1700
#define AUDIT_LAST_KERN_ANOM_MSG 1799
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h
index 1f7925afad2d..8081df849743 100644
--- a/include/uapi/linux/auto_fs.h
+++ b/include/uapi/linux/auto_fs.h
@@ -23,7 +23,7 @@
#define AUTOFS_MIN_PROTO_VERSION 3
#define AUTOFS_MAX_PROTO_VERSION 5
-#define AUTOFS_PROTO_SUBVERSION 5
+#define AUTOFS_PROTO_SUBVERSION 6
/*
* The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
diff --git a/include/uapi/linux/blkdev.h b/include/uapi/linux/blkdev.h
new file mode 100644
index 000000000000..66373cd1a83a
--- /dev/null
+++ b/include/uapi/linux/blkdev.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_BLKDEV_H
+#define _UAPI_LINUX_BLKDEV_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * io_uring block file commands, see IORING_OP_URING_CMD.
+ * It's a different number space from ioctl(), reuse the block's code 0x12.
+ */
+#define BLOCK_URING_CMD_DISCARD _IO(0x12, 0)
+
+#endif
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 35bcf52dbc65..c6cd7c7aeeee 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -2851,7 +2851,7 @@ union bpf_attr {
* **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
* **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
* **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
- * **TCP_BPF_RTO_MIN**.
+ * **TCP_BPF_RTO_MIN**, **TCP_BPF_SOCK_OPS_CB_FLAGS**.
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
* * **IPPROTO_IPV6**, which supports the following *optname*\ s:
* **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
@@ -5519,11 +5519,12 @@ union bpf_attr {
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
*
- * void *bpf_kptr_xchg(void *map_value, void *ptr)
+ * void *bpf_kptr_xchg(void *dst, void *ptr)
* Description
- * Exchange kptr at pointer *map_value* with *ptr*, and return the
- * old value. *ptr* can be NULL, otherwise it must be a referenced
- * pointer which will be released when this helper is called.
+ * Exchange kptr at pointer *dst* with *ptr*, and return the old value.
+ * *dst* can be map value or local kptr. *ptr* can be NULL, otherwise
+ * it must be a referenced pointer which will be released when this helper
+ * is called.
* Return
* The old value of kptr (which can be NULL). The returned pointer
* if not NULL, is a reference which must be released using its
@@ -7080,6 +7081,7 @@ enum {
TCP_BPF_SYN = 1005, /* Copy the TCP header */
TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
+ TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */
};
enum {
@@ -7512,4 +7514,13 @@ struct bpf_iter_num {
__u64 __opaque[1];
} __attribute__((aligned(8)));
+/*
+ * Flags to control BPF kfunc behaviour.
+ * - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective
+ * helper documentation for details.)
+ */
+enum bpf_kfunc_flags {
+ BPF_F_PAD_ZEROS = (1ULL << 0),
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index b8e071abaea5..b2af1dddd4d7 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -132,6 +132,8 @@ static inline void cec_msg_init(struct cec_msg *msg,
* Set the msg destination to the orig initiator and the msg initiator to the
* orig destination. Note that msg and orig may be the same pointer, in which
* case the change is done in place.
+ *
+ * It also zeroes the reply, timeout and flags fields.
*/
static inline void cec_msg_set_reply_to(struct cec_msg *msg,
struct cec_msg *orig)
@@ -139,7 +141,9 @@ static inline void cec_msg_set_reply_to(struct cec_msg *msg,
/* The destination becomes the initiator and vice versa */
msg->msg[0] = (cec_msg_destination(orig) << 4) |
cec_msg_initiator(orig);
- msg->reply = msg->timeout = 0;
+ msg->reply = 0;
+ msg->timeout = 0;
+ msg->flags = 0;
}
/**
@@ -165,6 +169,7 @@ static inline int cec_msg_recv_is_rx_result(const struct cec_msg *msg)
/* cec_msg flags field */
#define CEC_MSG_FL_REPLY_TO_FOLLOWERS (1 << 0)
#define CEC_MSG_FL_RAW (1 << 1)
+#define CEC_MSG_FL_REPLY_VENDOR_ID (1 << 2)
/* cec_msg tx/rx_status field */
#define CEC_TX_STATUS_OK (1 << 0)
@@ -339,6 +344,8 @@ static inline int cec_is_unconfigured(__u16 log_addr_mask)
#define CEC_CAP_MONITOR_PIN (1 << 7)
/* CEC_ADAP_G_CONNECTOR_INFO is available */
#define CEC_CAP_CONNECTOR_INFO (1 << 8)
+/* CEC_MSG_FL_REPLY_VENDOR_ID is available */
+#define CEC_CAP_REPLY_VENDOR_ID (1 << 9)
/**
* struct cec_caps - CEC capabilities structure.
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
index 0c13d7f1a1bc..b0654ade7b7e 100644
--- a/include/uapi/linux/dpll.h
+++ b/include/uapi/linux/dpll.h
@@ -210,6 +210,9 @@ enum dpll_a_pin {
DPLL_A_PIN_PHASE_ADJUST,
DPLL_A_PIN_PHASE_OFFSET,
DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET,
+ DPLL_A_PIN_ESYNC_FREQUENCY,
+ DPLL_A_PIN_ESYNC_FREQUENCY_SUPPORTED,
+ DPLL_A_PIN_ESYNC_PULSE,
__DPLL_A_PIN_MAX,
DPLL_A_PIN_MAX = (__DPLL_A_PIN_MAX - 1)
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index b54b313bcf07..b9935988da5c 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -411,6 +411,7 @@ typedef struct elf64_shdr {
#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
/* Old binutils treats 0x203 as a CET state */
#define NT_X86_SHSTK 0x204 /* x86 SHSTK state */
+#define NT_X86_XSAVE_LAYOUT 0x205 /* XSAVE layout description */
#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
#define NT_S390_TIMER 0x301 /* s390 timer register */
#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
@@ -441,6 +442,7 @@ typedef struct elf64_shdr {
#define NT_ARM_ZA 0x40c /* ARM SME ZA registers */
#define NT_ARM_ZT 0x40d /* ARM SME ZT registers */
#define NT_ARM_FPMR 0x40e /* ARM floating point mode register */
+#define NT_ARM_POE 0x40f /* ARM POE registers */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 4a0a6e703483..c405ed63acfa 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -2533,4 +2533,20 @@ struct ethtool_link_settings {
* __u32 map_lp_advertising[link_mode_masks_nwords];
*/
};
+
+/**
+ * enum phy_upstream - Represents the upstream component a given PHY device
+ * is connected to, as in what is on the other end of the MII bus. Most PHYs
+ * will be attached to an Ethernet MAC controller, but in some cases, there's
+ * an intermediate PHY used as a media-converter, which will driver another
+ * MII interface as its output.
+ * @PHY_UPSTREAM_MAC: Upstream component is a MAC (a switch port,
+ * or ethernet controller)
+ * @PHY_UPSTREAM_PHY: Upstream component is a PHY (likely a media converter)
+ */
+enum phy_upstream {
+ PHY_UPSTREAM_MAC,
+ PHY_UPSTREAM_PHY,
+};
+
#endif /* _UAPI_LINUX_ETHTOOL_H */
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 6d5bdcc67631..283305f6b063 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -58,6 +58,7 @@ enum {
ETHTOOL_MSG_MM_GET,
ETHTOOL_MSG_MM_SET,
ETHTOOL_MSG_MODULE_FW_FLASH_ACT,
+ ETHTOOL_MSG_PHY_GET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -111,6 +112,8 @@ enum {
ETHTOOL_MSG_MM_GET_REPLY,
ETHTOOL_MSG_MM_NTF,
ETHTOOL_MSG_MODULE_FW_FLASH_NTF,
+ ETHTOOL_MSG_PHY_GET_REPLY,
+ ETHTOOL_MSG_PHY_NTF,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -134,6 +137,7 @@ enum {
ETHTOOL_A_HEADER_DEV_INDEX, /* u32 */
ETHTOOL_A_HEADER_DEV_NAME, /* string */
ETHTOOL_A_HEADER_FLAGS, /* u32 - ETHTOOL_FLAG_* */
+ ETHTOOL_A_HEADER_PHY_INDEX, /* u32 */
/* add new constants above here */
__ETHTOOL_A_HEADER_CNT,
@@ -556,6 +560,10 @@ enum {
* a regular 100 Ohm cable and a part with the abnormal impedance value
*/
ETHTOOL_A_CABLE_RESULT_CODE_IMPEDANCE_MISMATCH,
+ /* TDR not possible due to high noise level */
+ ETHTOOL_A_CABLE_RESULT_CODE_NOISE,
+ /* TDR resolution not possible / out of distance */
+ ETHTOOL_A_CABLE_RESULT_CODE_RESOLUTION_NOT_POSSIBLE,
};
enum {
@@ -565,10 +573,20 @@ enum {
ETHTOOL_A_CABLE_PAIR_D,
};
+/* Information source for specific results. */
+enum {
+ ETHTOOL_A_CABLE_INF_SRC_UNSPEC,
+ /* Results provided by the Time Domain Reflectometry (TDR) */
+ ETHTOOL_A_CABLE_INF_SRC_TDR,
+ /* Results provided by the Active Link Cable Diagnostic (ALCD) */
+ ETHTOOL_A_CABLE_INF_SRC_ALCD,
+};
+
enum {
ETHTOOL_A_CABLE_RESULT_UNSPEC,
ETHTOOL_A_CABLE_RESULT_PAIR, /* u8 ETHTOOL_A_CABLE_PAIR_ */
ETHTOOL_A_CABLE_RESULT_CODE, /* u8 ETHTOOL_A_CABLE_RESULT_CODE_ */
+ ETHTOOL_A_CABLE_RESULT_SRC, /* u32 ETHTOOL_A_CABLE_INF_SRC_ */
__ETHTOOL_A_CABLE_RESULT_CNT,
ETHTOOL_A_CABLE_RESULT_MAX = (__ETHTOOL_A_CABLE_RESULT_CNT - 1)
@@ -578,6 +596,7 @@ enum {
ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC,
ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR, /* u8 ETHTOOL_A_CABLE_PAIR_ */
ETHTOOL_A_CABLE_FAULT_LENGTH_CM, /* u32 */
+ ETHTOOL_A_CABLE_FAULT_LENGTH_SRC, /* u32 ETHTOOL_A_CABLE_INF_SRC_ */
__ETHTOOL_A_CABLE_FAULT_LENGTH_CNT,
ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = (__ETHTOOL_A_CABLE_FAULT_LENGTH_CNT - 1)
@@ -965,6 +984,7 @@ enum {
ETHTOOL_A_RSS_INDIR, /* binary */
ETHTOOL_A_RSS_HKEY, /* binary */
ETHTOOL_A_RSS_INPUT_XFRM, /* u32 */
+ ETHTOOL_A_RSS_START_CONTEXT, /* u32 */
__ETHTOOL_A_RSS_CNT,
ETHTOOL_A_RSS_MAX = (__ETHTOOL_A_RSS_CNT - 1),
@@ -1049,6 +1069,22 @@ enum {
ETHTOOL_A_MODULE_FW_FLASH_MAX = (__ETHTOOL_A_MODULE_FW_FLASH_CNT - 1)
};
+enum {
+ ETHTOOL_A_PHY_UNSPEC,
+ ETHTOOL_A_PHY_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PHY_INDEX, /* u32 */
+ ETHTOOL_A_PHY_DRVNAME, /* string */
+ ETHTOOL_A_PHY_NAME, /* string */
+ ETHTOOL_A_PHY_UPSTREAM_TYPE, /* u32 */
+ ETHTOOL_A_PHY_UPSTREAM_INDEX, /* u32 */
+ ETHTOOL_A_PHY_UPSTREAM_SFP_NAME, /* string */
+ ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME, /* string */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PHY_CNT,
+ ETHTOOL_A_PHY_MAX = (__ETHTOOL_A_PHY_CNT - 1)
+};
+
/* generic netlink info */
#define ETHTOOL_GENL_NAME "ethtool"
#define ETHTOOL_GENL_VERSION 1
diff --git a/include/uapi/linux/exfat.h b/include/uapi/linux/exfat.h
new file mode 100644
index 000000000000..46d95b16fc4b
--- /dev/null
+++ b/include/uapi/linux/exfat.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2024 Unisoc Technologies Co., Ltd.
+ */
+
+#ifndef _UAPI_LINUX_EXFAT_H
+#define _UAPI_LINUX_EXFAT_H
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * exfat-specific ioctl commands
+ */
+
+#define EXFAT_IOC_SHUTDOWN _IOR('X', 125, __u32)
+
+/*
+ * Flags used by EXFAT_IOC_SHUTDOWN
+ */
+
+#define EXFAT_GOING_DOWN_DEFAULT 0x0 /* default with full sync */
+#define EXFAT_GOING_DOWN_FULLSYNC 0x1 /* going down with full sync*/
+#define EXFAT_GOING_DOWN_NOSYNC 0x2 /* going down */
+
+#endif /* _UAPI_LINUX_EXFAT_H */
diff --git a/include/uapi/linux/falloc.h b/include/uapi/linux/falloc.h
index 51398fa57f6c..5810371ed72b 100644
--- a/include/uapi/linux/falloc.h
+++ b/include/uapi/linux/falloc.h
@@ -2,6 +2,7 @@
#ifndef _UAPI_FALLOC_H_
#define _UAPI_FALLOC_H_
+#define FALLOC_FL_ALLOCATE_RANGE 0x00 /* allocate range */
#define FALLOC_FL_KEEP_SIZE 0x01 /* default is extend size */
#define FALLOC_FL_PUNCH_HOLE 0x02 /* de-allocates range */
#define FALLOC_FL_NO_HIDE_STALE 0x04 /* reserved codepoint */
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index c0bcc185fa48..87e2dec79fea 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -16,6 +16,9 @@
#define F_DUPFD_QUERY (F_LINUX_SPECIFIC_BASE + 3)
+/* Was the file just created? */
+#define F_CREATED_QUERY (F_LINUX_SPECIFIC_BASE + 4)
+
/*
* Cancel a blocking posix lock; internal use only until we expose an
* asynchronous lock api to userspace:
@@ -87,37 +90,70 @@
#define DN_ATTRIB 0x00000020 /* File changed attibutes */
#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
+#define AT_FDCWD -100 /* Special value for dirfd used to
+ indicate openat should use the
+ current working directory. */
+
+
+/* Generic flags for the *at(2) family of syscalls. */
+
+/* Reserved for per-syscall flags 0xff. */
+#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic
+ links. */
+/* Reserved for per-syscall flags 0x200 */
+#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
+#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount
+ traversal. */
+#define AT_EMPTY_PATH 0x1000 /* Allow empty relative
+ pathname to operate on dirfd
+ directly. */
+/*
+ * These flags are currently statx(2)-specific, but they could be made generic
+ * in the future and so they should not be used for other per-syscall flags.
+ */
+#define AT_STATX_SYNC_TYPE 0x6000 /* Type of synchronisation required from statx() */
+#define AT_STATX_SYNC_AS_STAT 0x0000 /* - Do whatever stat() does */
+#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */
+#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */
+
+#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
+
/*
- * The constants AT_REMOVEDIR and AT_EACCESS have the same value. AT_EACCESS is
- * meaningful only to faccessat, while AT_REMOVEDIR is meaningful only to
- * unlinkat. The two functions do completely different things and therefore,
- * the flags can be allowed to overlap. For example, passing AT_REMOVEDIR to
- * faccessat would be undefined behavior and thus treating it equivalent to
- * AT_EACCESS is valid undefined behavior.
+ * Per-syscall flags for the *at(2) family of syscalls.
+ *
+ * These are flags that are so syscall-specific that a user passing these flags
+ * to the wrong syscall is so "clearly wrong" that we can safely call such
+ * usage "undefined behaviour".
+ *
+ * For example, the constants AT_REMOVEDIR and AT_EACCESS have the same value.
+ * AT_EACCESS is meaningful only to faccessat, while AT_REMOVEDIR is meaningful
+ * only to unlinkat. The two functions do completely different things and
+ * therefore, the flags can be allowed to overlap. For example, passing
+ * AT_REMOVEDIR to faccessat would be undefined behavior and thus treating it
+ * equivalent to AT_EACCESS is valid undefined behavior.
+ *
+ * Note for implementers: When picking a new per-syscall AT_* flag, try to
+ * reuse already existing flags first. This leaves us with as many unused bits
+ * as possible, so we can use them for generic bits in the future if necessary.
*/
-#define AT_FDCWD -100 /* Special value used to indicate
- openat should use the current
- working directory. */
-#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */
+
+/* Flags for renameat2(2) (must match legacy RENAME_* flags). */
+#define AT_RENAME_NOREPLACE 0x0001
+#define AT_RENAME_EXCHANGE 0x0002
+#define AT_RENAME_WHITEOUT 0x0004
+
+/* Flag for faccessat(2). */
#define AT_EACCESS 0x200 /* Test access permitted for
effective IDs, not real IDs. */
+/* Flag for unlinkat(2). */
#define AT_REMOVEDIR 0x200 /* Remove directory instead of
unlinking file. */
-#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
-#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */
-#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */
-
-#define AT_STATX_SYNC_TYPE 0x6000 /* Type of synchronisation required from statx() */
-#define AT_STATX_SYNC_AS_STAT 0x0000 /* - Do whatever stat() does */
-#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */
-#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */
-
-#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
+/* Flags for name_to_handle_at(2). */
+#define AT_HANDLE_FID 0x200 /* File handle is needed to compare
+ object identity and may not be
+ usable with open_by_handle_at(2). */
+#define AT_HANDLE_MNT_ID_UNIQUE 0x001 /* Return the u64 unique mount ID. */
-/* Flags for name_to_handle_at(2). We reuse AT_ flag space to save bits... */
-#define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to
- compare object identity and may not
- be usable to open_by_handle_at(2) */
#if defined(__KERNEL__)
#define AT_GETATTR_NOSEC 0x80000000
#endif
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 232df14e1287..a6924dd3aff1 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -67,6 +67,7 @@ enum {
FRA_IP_PROTO, /* ip proto */
FRA_SPORT_RANGE, /* sport */
FRA_DPORT_RANGE, /* dport */
+ FRA_DSCP, /* dscp */
__FRA_MAX
};
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index d08b99d60f6f..f1e99458e29e 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -217,6 +217,9 @@
* - add backing_id to fuse_open_out, add FOPEN_PASSTHROUGH open flag
* - add FUSE_NO_EXPORT_SUPPORT init flag
* - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag
+ *
+ * 7.41
+ * - add FUSE_ALLOW_IDMAP
*/
#ifndef _LINUX_FUSE_H
@@ -252,7 +255,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 40
+#define FUSE_KERNEL_MINOR_VERSION 41
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -421,6 +424,7 @@ struct fuse_file_lock {
* FUSE_NO_EXPORT_SUPPORT: explicitly disable export support
* FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit
* of the request ID indicates resend requests
+ * FUSE_ALLOW_IDMAP: allow creation of idmapped mounts
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -466,6 +470,7 @@ struct fuse_file_lock {
/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
+#define FUSE_ALLOW_IDMAP (1ULL << 40)
/**
* CUSE INIT request/reply flags
@@ -984,6 +989,21 @@ struct fuse_fallocate_in {
*/
#define FUSE_UNIQUE_RESEND (1ULL << 63)
+/**
+ * This value will be set by the kernel to
+ * (struct fuse_in_header).{uid,gid} fields in
+ * case when:
+ * - fuse daemon enabled FUSE_ALLOW_IDMAP
+ * - idmapping information is not available and uid/gid
+ * can not be mapped in accordance with an idmapping.
+ *
+ * Note: an idmapping information always available
+ * for inode creation operations like:
+ * FUSE_MKNOD, FUSE_SYMLINK, FUSE_MKDIR, FUSE_TMPFILE,
+ * FUSE_CREATE and FUSE_RENAME2 (with RENAME_WHITEOUT).
+ */
+#define FUSE_INVALID_UIDGID ((uint32_t)(-1))
+
struct fuse_in_header {
uint32_t len;
uint32_t opcode;
diff --git a/include/uapi/linux/hidraw.h b/include/uapi/linux/hidraw.h
index 33ebad81720a..d5ee269864e0 100644
--- a/include/uapi/linux/hidraw.h
+++ b/include/uapi/linux/hidraw.h
@@ -46,6 +46,7 @@ struct hidraw_devinfo {
/* The first byte of SOUTPUT and GOUTPUT is the report number */
#define HIDIOCSOUTPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x0B, len)
#define HIDIOCGOUTPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x0C, len)
+#define HIDIOCREVOKE _IOW('H', 0x0D, int) /* Revoke device access */
#define HIDRAW_FIRST_MINOR 0
#define HIDRAW_MAX_DEVICES 64
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index 9efc42382fdb..1d2718dd9647 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -230,8 +230,8 @@ struct tpacket_hdr_v1 {
* ts_first_pkt:
* Is always the time-stamp when the block was opened.
* Case a) ZERO packets
- * No packets to deal with but atleast you know the
- * time-interval of this block.
+ * No packets to deal with but at least you know
+ * the time-interval of this block.
* Case b) Non-zero packets
* Use the ts of the first packet in the block.
*
@@ -265,7 +265,8 @@ enum tpacket_versions {
- struct tpacket_hdr
- pad to TPACKET_ALIGNMENT=16
- struct sockaddr_ll
- - Gap, chosen so that packet data (Start+tp_net) alignes to TPACKET_ALIGNMENT=16
+ - Gap, chosen so that packet data (Start+tp_net) aligns to
+ TPACKET_ALIGNMENT=16
- Start+tp_mac: [ Optional MAC header ]
- Start+tp_net: Packet data, aligned to TPACKET_ALIGNMENT=16.
- Pad to align to TPACKET_ALIGNMENT=16
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index d358add1611c..5d32d53508d9 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -141,7 +141,7 @@ struct in_addr {
*/
#define IP_PMTUDISC_INTERFACE 4
/* weaker version of IP_PMTUDISC_INTERFACE, which allows packets to get
- * fragmented if they exeed the interface mtu
+ * fragmented if they exceed the interface mtu
*/
#define IP_PMTUDISC_OMIT 5
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 50655de04c9b..86bb2e8b17c9 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -143,7 +143,7 @@ enum {
INET_DIAG_SHUTDOWN,
/*
- * Next extenstions cannot be requested in struct inet_diag_req_v2:
+ * Next extensions cannot be requested in struct inet_diag_req_v2:
* its field idiag_ext has only 8 bits.
*/
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index adc2524fd8e3..1fe79e750470 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -440,11 +440,21 @@ struct io_uring_cqe {
* IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
* IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
* them from sends.
+ * IORING_CQE_F_BUF_MORE If set, the buffer ID set in the completion will get
+ * more completions. In other words, the buffer is being
+ * partially consumed, and will be used by the kernel for
+ * more completions. This is only set for buffers used via
+ * the incremental buffer consumption, as provided by
+ * a ring buffer setup with IOU_PBUF_RING_INC. For any
+ * other provided buffer type, all completions with a
+ * buffer passed back is automatically returned to the
+ * application.
*/
#define IORING_CQE_F_BUFFER (1U << 0)
#define IORING_CQE_F_MORE (1U << 1)
#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
#define IORING_CQE_F_NOTIF (1U << 3)
+#define IORING_CQE_F_BUF_MORE (1U << 4)
#define IORING_CQE_BUFFER_SHIFT 16
@@ -507,6 +517,7 @@ struct io_cqring_offsets {
#define IORING_ENTER_SQ_WAIT (1U << 2)
#define IORING_ENTER_EXT_ARG (1U << 3)
#define IORING_ENTER_REGISTERED_RING (1U << 4)
+#define IORING_ENTER_ABS_TIMER (1U << 5)
/*
* Passed in for io_uring_setup(2). Copied back with updated info on success
@@ -542,6 +553,7 @@ struct io_uring_params {
#define IORING_FEAT_LINKED_FILE (1U << 12)
#define IORING_FEAT_REG_REG_RING (1U << 13)
#define IORING_FEAT_RECVSEND_BUNDLE (1U << 14)
+#define IORING_FEAT_MIN_TIMEOUT (1U << 15)
/*
* io_uring_register(2) opcodes and arguments
@@ -595,6 +607,11 @@ enum io_uring_register_op {
IORING_REGISTER_NAPI = 27,
IORING_UNREGISTER_NAPI = 28,
+ IORING_REGISTER_CLOCK = 29,
+
+ /* clone registered buffers from source ring to current ring */
+ IORING_REGISTER_CLONE_BUFFERS = 30,
+
/* this goes last */
IORING_REGISTER_LAST,
@@ -675,6 +692,21 @@ struct io_uring_restriction {
__u32 resv2[3];
};
+struct io_uring_clock_register {
+ __u32 clockid;
+ __u32 __resv[3];
+};
+
+enum {
+ IORING_REGISTER_SRC_REGISTERED = 1,
+};
+
+struct io_uring_clone_buffers {
+ __u32 src_fd;
+ __u32 flags;
+ __u32 pad[6];
+};
+
struct io_uring_buf {
__u64 addr;
__u32 len;
@@ -707,9 +739,17 @@ struct io_uring_buf_ring {
* mmap(2) with the offset set as:
* IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT)
* to get a virtual mapping for the ring.
+ * IOU_PBUF_RING_INC: If set, buffers consumed from this buffer ring can be
+ * consumed incrementally. Normally one (or more) buffers
+ * are fully consumed. With incremental consumptions, it's
+ * feasible to register big ranges of buffers, and each
+ * use of it will consume only as much as it needs. This
+ * requires that both the kernel and application keep
+ * track of where the current read/recv index is at.
*/
enum io_uring_register_pbuf_ring_flags {
IOU_PBUF_RING_MMAP = 1,
+ IOU_PBUF_RING_INC = 2,
};
/* argument for IORING_(UN)REGISTER_PBUF_RING */
@@ -758,7 +798,7 @@ enum io_uring_register_restriction_op {
struct io_uring_getevents_arg {
__u64 sigmask;
__u32 sigmask_sz;
- __u32 pad;
+ __u32 min_wait_usec;
__u64 ts;
};
diff --git a/include/uapi/linux/ioam6_iptunnel.h b/include/uapi/linux/ioam6_iptunnel.h
index 38f6a8fdfd34..8aef21e4a8c1 100644
--- a/include/uapi/linux/ioam6_iptunnel.h
+++ b/include/uapi/linux/ioam6_iptunnel.h
@@ -50,6 +50,12 @@ enum {
IOAM6_IPTUNNEL_FREQ_K, /* u32 */
IOAM6_IPTUNNEL_FREQ_N, /* u32 */
+ /* Tunnel src address.
+ * For encap,auto modes.
+ * Optional (automatic if not provided).
+ */
+ IOAM6_IPTUNNEL_SRC, /* struct in6_addr */
+
__IOAM6_IPTUNNEL_MAX,
};
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 4dde745cfb7e..72010f71c5e4 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -4,8 +4,8 @@
#ifndef _UAPI_IOMMUFD_H
#define _UAPI_IOMMUFD_H
-#include <linux/types.h>
#include <linux/ioctl.h>
+#include <linux/types.h>
#define IOMMUFD_TYPE (';')
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index 6f2f2720f3ac..ff8032227876 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -7,7 +7,7 @@
*/
#define KPF_LOCKED 0
-#define KPF_ERROR 1
+#define KPF_ERROR 1 /* Now unused */
#define KPF_REFERENCED 2
#define KPF_UPTODATE 3
#define KPF_DIRTY 4
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 285a36601dc9..717307d6b5b7 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -42,9 +42,10 @@
* - 1.14 - Update kfd_event_data
* - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl
* - 1.16 - Add contiguous VRAM allocation flag
+ * - 1.17 - Add SDMA queue creation with target SDMA engine ID
*/
#define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 16
+#define KFD_IOCTL_MINOR_VERSION 17
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
@@ -56,6 +57,7 @@ struct kfd_ioctl_get_version_args {
#define KFD_IOC_QUEUE_TYPE_SDMA 0x1
#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
#define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
+#define KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID 0x4
#define KFD_MAX_QUEUE_PERCENTAGE 100
#define KFD_MAX_QUEUE_PRIORITY 15
@@ -78,6 +80,8 @@ struct kfd_ioctl_create_queue_args {
__u64 ctx_save_restore_address; /* to KFD */
__u32 ctx_save_restore_size; /* to KFD */
__u32 ctl_stack_size; /* to KFD */
+ __u32 sdma_engine_id; /* to KFD */
+ __u32 pad;
};
struct kfd_ioctl_destroy_queue_args {
@@ -536,26 +540,29 @@ enum kfd_smi_event {
KFD_SMI_EVENT_ALL_PROCESS = 64
};
+/* The reason of the page migration event */
enum KFD_MIGRATE_TRIGGERS {
- KFD_MIGRATE_TRIGGER_PREFETCH,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
- KFD_MIGRATE_TRIGGER_TTM_EVICTION
+ KFD_MIGRATE_TRIGGER_PREFETCH, /* Prefetch to GPU VRAM or system memory */
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, /* GPU page fault recover */
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, /* CPU page fault recover */
+ KFD_MIGRATE_TRIGGER_TTM_EVICTION /* TTM eviction */
};
+/* The reason of user queue evition event */
enum KFD_QUEUE_EVICTION_TRIGGERS {
- KFD_QUEUE_EVICTION_TRIGGER_SVM,
- KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
- KFD_QUEUE_EVICTION_TRIGGER_TTM,
- KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
- KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
- KFD_QUEUE_EVICTION_CRIU_RESTORE
+ KFD_QUEUE_EVICTION_TRIGGER_SVM, /* SVM buffer migration */
+ KFD_QUEUE_EVICTION_TRIGGER_USERPTR, /* userptr movement */
+ KFD_QUEUE_EVICTION_TRIGGER_TTM, /* TTM move buffer */
+ KFD_QUEUE_EVICTION_TRIGGER_SUSPEND, /* GPU suspend */
+ KFD_QUEUE_EVICTION_CRIU_CHECKPOINT, /* CRIU checkpoint */
+ KFD_QUEUE_EVICTION_CRIU_RESTORE /* CRIU restore */
};
+/* The reason of unmap buffer from GPU event */
enum KFD_SVM_UNMAP_TRIGGERS {
- KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
- KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
- KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
+ KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY, /* MMU notifier CPU buffer movement */
+ KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,/* MMU notifier page migration */
+ KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU /* Unmap to free the buffer */
};
#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
@@ -566,6 +573,77 @@ struct kfd_ioctl_smi_events_args {
__u32 anon_fd; /* from KFD */
};
+/*
+ * SVM event tracing via SMI system management interface
+ *
+ * Open event file descriptor
+ * use ioctl AMDKFD_IOC_SMI_EVENTS, pass in gpuid and return a anonymous file
+ * descriptor to receive SMI events.
+ * If calling with sudo permission, then file descriptor can be used to receive
+ * SVM events from all processes, otherwise, to only receive SVM events of same
+ * process.
+ *
+ * To enable the SVM event
+ * Write event file descriptor with KFD_SMI_EVENT_MASK_FROM_INDEX(event) bitmap
+ * mask to start record the event to the kfifo, use bitmap mask combination
+ * for multiple events. New event mask will overwrite the previous event mask.
+ * KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS) bit requires sudo
+ * permisson to receive SVM events from all process.
+ *
+ * To receive the event
+ * Application can poll file descriptor to wait for the events, then read event
+ * from the file into a buffer. Each event is one line string message, starting
+ * with the event id, then the event specific information.
+ *
+ * To decode event information
+ * The following event format string macro can be used with sscanf to decode
+ * the specific event information.
+ * event triggers: the reason to generate the event, defined as enum for unmap,
+ * eviction and migrate events.
+ * node, from, to, prefetch_loc, preferred_loc: GPU ID, or 0 for system memory.
+ * addr: user mode address, in pages
+ * size: in pages
+ * pid: the process ID to generate the event
+ * ns: timestamp in nanosecond-resolution, starts at system boot time but
+ * stops during suspend
+ * migrate_update: GPU page fault is recovered by 'M' for migrate, 'U' for update
+ * rw: 'W' for write page fault, 'R' for read page fault
+ * rescheduled: 'R' if the queue restore failed and rescheduled to try again
+ */
+#define KFD_EVENT_FMT_UPDATE_GPU_RESET(reset_seq_num, reset_cause)\
+ "%x %s\n", (reset_seq_num), (reset_cause)
+
+#define KFD_EVENT_FMT_THERMAL_THROTTLING(bitmask, counter)\
+ "%llx:%llx\n", (bitmask), (counter)
+
+#define KFD_EVENT_FMT_VMFAULT(pid, task_name)\
+ "%x:%s\n", (pid), (task_name)
+
+#define KFD_EVENT_FMT_PAGEFAULT_START(ns, pid, addr, node, rw)\
+ "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (rw)
+
+#define KFD_EVENT_FMT_PAGEFAULT_END(ns, pid, addr, node, migrate_update)\
+ "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (migrate_update)
+
+#define KFD_EVENT_FMT_MIGRATE_START(ns, pid, start, size, from, to, prefetch_loc,\
+ preferred_loc, migrate_trigger)\
+ "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", (ns), (pid), (start), (size),\
+ (from), (to), (prefetch_loc), (preferred_loc), (migrate_trigger)
+
+#define KFD_EVENT_FMT_MIGRATE_END(ns, pid, start, size, from, to, migrate_trigger)\
+ "%lld -%d @%lx(%lx) %x->%x %d\n", (ns), (pid), (start), (size),\
+ (from), (to), (migrate_trigger)
+
+#define KFD_EVENT_FMT_QUEUE_EVICTION(ns, pid, node, evict_trigger)\
+ "%lld -%d %x %d\n", (ns), (pid), (node), (evict_trigger)
+
+#define KFD_EVENT_FMT_QUEUE_RESTORE(ns, pid, node, rescheduled)\
+ "%lld -%d %x %c\n", (ns), (pid), (node), (rescheduled)
+
+#define KFD_EVENT_FMT_UNMAP_FROM_GPU(ns, pid, addr, size, node, unmap_trigger)\
+ "%lld -%d @%lx(%lx) %x %d\n", (ns), (pid), (addr), (size),\
+ (node), (unmap_trigger)
+
/**************************************************************************************************
* CRIU IOCTLs (Checkpoint Restore In Userspace)
*
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index 2c8dbc74b955..33745642f787 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -44,6 +44,12 @@ struct landlock_ruleset_attr {
* flags`_).
*/
__u64 handled_access_net;
+ /**
+ * @scoped: Bitmask of scopes (cf. `Scope flags`_)
+ * restricting a Landlock domain from accessing outside
+ * resources (e.g. IPCs).
+ */
+ __u64 scoped;
};
/*
@@ -274,4 +280,28 @@ struct landlock_net_port_attr {
#define LANDLOCK_ACCESS_NET_BIND_TCP (1ULL << 0)
#define LANDLOCK_ACCESS_NET_CONNECT_TCP (1ULL << 1)
/* clang-format on */
+
+/**
+ * DOC: scope
+ *
+ * Scope flags
+ * ~~~~~~~~~~~
+ *
+ * These flags enable to isolate a sandboxed process from a set of IPC actions.
+ * Setting a flag for a ruleset will isolate the Landlock domain to forbid
+ * connections to resources outside the domain.
+ *
+ * Scopes:
+ *
+ * - %LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET: Restrict a sandboxed process from
+ * connecting to an abstract UNIX socket created by a process outside the
+ * related Landlock domain (e.g. a parent domain or a non-sandboxed process).
+ * - %LANDLOCK_SCOPE_SIGNAL: Restrict a sandboxed process from sending a signal
+ * to another process outside the domain.
+ */
+/* clang-format off */
+#define LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET (1ULL << 0)
+#define LANDLOCK_SCOPE_SIGNAL (1ULL << 1)
+/* clang-format on*/
+
#endif /* _UAPI_LINUX_LANDLOCK_H */
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index 8254c937c9f4..0eca95ccb41e 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -140,25 +140,6 @@
#endif /* _NETINET_IN_H */
-/* Coordinate with glibc netipx/ipx.h header. */
-#if defined(__NETIPX_IPX_H)
-
-#define __UAPI_DEF_SOCKADDR_IPX 0
-#define __UAPI_DEF_IPX_ROUTE_DEFINITION 0
-#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 0
-#define __UAPI_DEF_IPX_CONFIG_DATA 0
-#define __UAPI_DEF_IPX_ROUTE_DEF 0
-
-#else /* defined(__NETIPX_IPX_H) */
-
-#define __UAPI_DEF_SOCKADDR_IPX 1
-#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
-#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
-#define __UAPI_DEF_IPX_CONFIG_DATA 1
-#define __UAPI_DEF_IPX_ROUTE_DEF 1
-
-#endif /* defined(__NETIPX_IPX_H) */
-
/* Definitions for xattr.h */
#if defined(_SYS_XATTR_H)
#define __UAPI_DEF_XATTR 0
@@ -240,23 +221,6 @@
#define __UAPI_DEF_IP6_MTUINFO 1
#endif
-/* Definitions for ipx.h */
-#ifndef __UAPI_DEF_SOCKADDR_IPX
-#define __UAPI_DEF_SOCKADDR_IPX 1
-#endif
-#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION
-#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
-#endif
-#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION
-#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
-#endif
-#ifndef __UAPI_DEF_IPX_CONFIG_DATA
-#define __UAPI_DEF_IPX_CONFIG_DATA 1
-#endif
-#ifndef __UAPI_DEF_IPX_ROUTE_DEF
-#define __UAPI_DEF_IPX_ROUTE_DEF 1
-#endif
-
/* Definitions for xattr.h */
#ifndef __UAPI_DEF_XATTR
#define __UAPI_DEF_XATTR 1
diff --git a/include/uapi/linux/lsm.h b/include/uapi/linux/lsm.h
index 33d8c9f4aa6b..938593dfd5da 100644
--- a/include/uapi/linux/lsm.h
+++ b/include/uapi/linux/lsm.h
@@ -64,6 +64,7 @@ struct lsm_ctx {
#define LSM_ID_LANDLOCK 110
#define LSM_ID_IMA 111
#define LSM_ID_EVM 112
+#define LSM_ID_IPE 113
/*
* LSM_ATTR_XXX definitions identify different LSM attributes
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index c0c8ec995b06..f0d3f268240d 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -23,6 +23,7 @@
#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */
#define MDIO_MMD_TC 6 /* Transmission Convergence */
#define MDIO_MMD_AN 7 /* Auto-Negotiation */
+#define MDIO_MMD_POWER_UNIT 13 /* PHY Power Unit */
#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */
#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */
#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index 80ce0ef43afd..f1d468acfb25 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -42,8 +42,9 @@ enum {
NBD_CMD_WRITE = 1,
NBD_CMD_DISC = 2,
NBD_CMD_FLUSH = 3,
- NBD_CMD_TRIM = 4
+ NBD_CMD_TRIM = 4,
/* userspace defines additional extension commands */
+ NBD_CMD_WRITE_ZEROES = 6,
};
/* values for flags field, these are server interaction specific. */
@@ -51,12 +52,15 @@ enum {
#define NBD_FLAG_READ_ONLY (1 << 1) /* device is read-only */
#define NBD_FLAG_SEND_FLUSH (1 << 2) /* can flush writeback cache */
#define NBD_FLAG_SEND_FUA (1 << 3) /* send FUA (forced unit access) */
-/* there is a gap here to match userspace */
+#define NBD_FLAG_ROTATIONAL (1 << 4) /* device is rotational */
#define NBD_FLAG_SEND_TRIM (1 << 5) /* send trim/discard */
+#define NBD_FLAG_SEND_WRITE_ZEROES (1 << 6) /* supports WRITE_ZEROES */
+/* there is a gap here to match userspace */
#define NBD_FLAG_CAN_MULTI_CONN (1 << 8) /* Server supports multiple connections per export. */
/* values for cmd flags in the upper 16 bits of request type */
#define NBD_CMD_FLAG_FUA (1 << 16) /* FUA (forced unit access) op */
+#define NBD_CMD_FLAG_NO_HOLE (1 << 17) /* Do not punch a hole for WRITE_ZEROES */
/* These are client behavior specific flags. */
#define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index a2c66b3d7f0f..858339d1c1c4 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -32,8 +32,9 @@ enum {
SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14),
SOF_TIMESTAMPING_BIND_PHC = (1 << 15),
SOF_TIMESTAMPING_OPT_ID_TCP = (1 << 16),
+ SOF_TIMESTAMPING_OPT_RX_FILTER = (1 << 17),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_ID_TCP,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_RX_FILTER,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index 43742ac5b00d..7c308f04e7a0 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -93,6 +93,7 @@ enum {
NETDEV_A_PAGE_POOL_INFLIGHT,
NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
NETDEV_A_PAGE_POOL_DETACH_TIME,
+ NETDEV_A_PAGE_POOL_DMABUF,
__NETDEV_A_PAGE_POOL_MAX,
NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1)
@@ -131,6 +132,7 @@ enum {
NETDEV_A_QUEUE_IFINDEX,
NETDEV_A_QUEUE_TYPE,
NETDEV_A_QUEUE_NAPI_ID,
+ NETDEV_A_QUEUE_DMABUF,
__NETDEV_A_QUEUE_MAX,
NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
@@ -174,6 +176,16 @@ enum {
};
enum {
+ NETDEV_A_DMABUF_IFINDEX = 1,
+ NETDEV_A_DMABUF_QUEUES,
+ NETDEV_A_DMABUF_FD,
+ NETDEV_A_DMABUF_ID,
+
+ __NETDEV_A_DMABUF_MAX,
+ NETDEV_A_DMABUF_MAX = (__NETDEV_A_DMABUF_MAX - 1)
+};
+
+enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
NETDEV_CMD_DEV_DEL_NTF,
@@ -186,6 +198,7 @@ enum {
NETDEV_CMD_QUEUE_GET,
NETDEV_CMD_NAPI_GET,
NETDEV_CMD_QSTATS_GET,
+ NETDEV_CMD_BIND_RX,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 639894ed1b97..d6476ca5d7a6 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -436,7 +436,7 @@ enum nft_set_elem_flags {
* @NFTA_SET_ELEM_KEY: key value (NLA_NESTED: nft_data)
* @NFTA_SET_ELEM_DATA: data value of mapping (NLA_NESTED: nft_data_attributes)
* @NFTA_SET_ELEM_FLAGS: bitmask of nft_set_elem_flags (NLA_U32)
- * @NFTA_SET_ELEM_TIMEOUT: timeout value (NLA_U64)
+ * @NFTA_SET_ELEM_TIMEOUT: timeout value, zero means never times out (NLA_U64)
* @NFTA_SET_ELEM_EXPIRATION: expiration time (NLA_U64)
* @NFTA_SET_ELEM_USERDATA: user data (NLA_BINARY)
* @NFTA_SET_ELEM_EXPR: expression (NLA_NESTED: nft_expr_attributes)
diff --git a/include/uapi/linux/nexthop.h b/include/uapi/linux/nexthop.h
index dd8787f9cf39..bc49baf4a267 100644
--- a/include/uapi/linux/nexthop.h
+++ b/include/uapi/linux/nexthop.h
@@ -16,10 +16,15 @@ struct nhmsg {
struct nexthop_grp {
__u32 id; /* nexthop id - must exist */
__u8 weight; /* weight of this nexthop */
- __u8 resvd1;
+ __u8 weight_high; /* high order bits of weight */
__u16 resvd2;
};
+static inline __u16 nexthop_grp_weight(const struct nexthop_grp *entry)
+{
+ return ((entry->weight_high << 8) | entry->weight) + 1;
+}
+
enum {
NEXTHOP_GRP_TYPE_MPATH, /* hash-threshold nexthop group
* default type if not specified
@@ -33,6 +38,9 @@ enum {
#define NHA_OP_FLAG_DUMP_STATS BIT(0)
#define NHA_OP_FLAG_DUMP_HW_STATS BIT(1)
+/* Response OP_FLAGS. */
+#define NHA_OP_FLAG_RESP_GRP_RESVD_0 BIT(31) /* Dump clears resvd fields. */
+
enum {
NHA_UNSPEC,
NHA_ID, /* u32; id for nexthop. id == 0 means auto-assign */
diff --git a/include/uapi/linux/nsfs.h b/include/uapi/linux/nsfs.h
index 5fad3d0fcd70..34127653fd00 100644
--- a/include/uapi/linux/nsfs.h
+++ b/include/uapi/linux/nsfs.h
@@ -27,4 +27,19 @@
/* Return thread-group leader id of pid in the target pid namespace. */
#define NS_GET_TGID_IN_PIDNS _IOR(NSIO, 0x9, int)
+struct mnt_ns_info {
+ __u32 size;
+ __u32 nr_mounts;
+ __u64 mnt_ns_id;
+};
+
+#define MNT_NS_INFO_SIZE_VER0 16 /* size of first published struct */
+
+/* Get information about namespace. */
+#define NS_MNT_GET_INFO _IOR(NSIO, 10, struct mnt_ns_info)
+/* Get next namespace. */
+#define NS_MNT_GET_NEXT _IOR(NSIO, 11, struct mnt_ns_info)
+/* Get previous namespace. */
+#define NS_MNT_GET_PREV _IOR(NSIO, 12, struct mnt_ns_info)
+
#endif /* __LINUX_NSFS_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 94c00996e633..12323b3334a9 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -634,9 +634,11 @@
#define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */
#define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
-#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
+#define PCI_EXP_RTCTL_RRS_SVE 0x0010 /* Config RRS Software Visibility Enable */
+#define PCI_EXP_RTCTL_CRSSVE PCI_EXP_RTCTL_RRS_SVE /* compatibility */
#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */
-#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
+#define PCI_EXP_RTCAP_RRS_SV 0x0001 /* Config RRS Software Visibility */
+#define PCI_EXP_RTCAP_CRSVIS PCI_EXP_RTCAP_RRS_SV /* compatibility */
#define PCI_EXP_RTSTA 0x20 /* Root Status */
#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
@@ -740,6 +742,7 @@
#define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */
#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */
#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */
+#define PCI_EXT_CAP_ID_NPEM 0x29 /* Native PCIe Enclosure Management */
#define PCI_EXT_CAP_ID_PL_32GT 0x2A /* Physical Layer 32.0 GT/s */
#define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */
#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE
@@ -1121,6 +1124,40 @@
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4
+/* Native PCIe Enclosure Management */
+#define PCI_NPEM_CAP 0x04 /* NPEM capability register */
+#define PCI_NPEM_CAP_CAPABLE 0x00000001 /* NPEM Capable */
+
+#define PCI_NPEM_CTRL 0x08 /* NPEM control register */
+#define PCI_NPEM_CTRL_ENABLE 0x00000001 /* NPEM Enable */
+
+/*
+ * Native PCIe Enclosure Management indication bits and Reset command bit
+ * are corresponding for capability and control registers.
+ */
+#define PCI_NPEM_CMD_RESET 0x00000002 /* Reset Command */
+#define PCI_NPEM_IND_OK 0x00000004 /* OK */
+#define PCI_NPEM_IND_LOCATE 0x00000008 /* Locate */
+#define PCI_NPEM_IND_FAIL 0x00000010 /* Fail */
+#define PCI_NPEM_IND_REBUILD 0x00000020 /* Rebuild */
+#define PCI_NPEM_IND_PFA 0x00000040 /* Predicted Failure Analysis */
+#define PCI_NPEM_IND_HOTSPARE 0x00000080 /* Hot Spare */
+#define PCI_NPEM_IND_ICA 0x00000100 /* In Critical Array */
+#define PCI_NPEM_IND_IFA 0x00000200 /* In Failed Array */
+#define PCI_NPEM_IND_IDT 0x00000400 /* Device Type */
+#define PCI_NPEM_IND_DISABLED 0x00000800 /* Disabled */
+#define PCI_NPEM_IND_SPEC_0 0x01000000
+#define PCI_NPEM_IND_SPEC_1 0x02000000
+#define PCI_NPEM_IND_SPEC_2 0x04000000
+#define PCI_NPEM_IND_SPEC_3 0x08000000
+#define PCI_NPEM_IND_SPEC_4 0x10000000
+#define PCI_NPEM_IND_SPEC_5 0x20000000
+#define PCI_NPEM_IND_SPEC_6 0x40000000
+#define PCI_NPEM_IND_SPEC_7 0x80000000
+
+#define PCI_NPEM_STATUS 0x0c /* NPEM status register */
+#define PCI_NPEM_STATUS_CC 0x00000001 /* Command Completed */
+
/* Data Object Exchange */
#define PCI_DOE_CAP 0x04 /* DOE Capabilities Register */
#define PCI_DOE_CAP_INT_SUP 0x00000001 /* Interrupt Support */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index d36d9cdf0c00..2c32080416b5 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -246,16 +246,19 @@ struct tc_u32_key {
};
struct tc_u32_sel {
- unsigned char flags;
- unsigned char offshift;
- unsigned char nkeys;
-
- __be16 offmask;
- __u16 off;
- short offoff;
-
- short hoff;
- __be32 hmask;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(tc_u32_sel_hdr, hdr, /* no attrs */,
+ unsigned char flags;
+ unsigned char offshift;
+ unsigned char nkeys;
+
+ __be16 offmask;
+ __u16 off;
+ short offoff;
+
+ short hoff;
+ __be32 hmask;
+ );
struct tc_u32_key keys[];
};
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index 053b40d642de..18eefa6d93d6 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -155,13 +155,25 @@ struct ptp_sys_offset {
struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1];
};
+/*
+ * ptp_sys_offset_extended - data structure for IOCTL operation
+ * PTP_SYS_OFFSET_EXTENDED
+ *
+ * @n_samples: Desired number of measurements.
+ * @clockid: clockid of a clock-base used for pre/post timestamps.
+ * @rsv: Reserved for future use.
+ * @ts: Array of samples in the form [pre-TS, PHC, post-TS]. The
+ * kernel provides @n_samples.
+ *
+ * Starting from kernel 6.12 and onwards, the first word of the reserved-field
+ * is used for @clockid. That's backward compatible since previous kernel
+ * expect all three reserved words (@rsv[3]) to be 0 while the clockid (first
+ * word in the new structure) for CLOCK_REALTIME is '0'.
+ */
struct ptp_sys_offset_extended {
- unsigned int n_samples; /* Desired number of measurements. */
- unsigned int rsv[3]; /* Reserved for future use. */
- /*
- * Array of [system, phc, system] time stamps. The kernel will provide
- * 3*n_samples time stamps.
- */
+ unsigned int n_samples;
+ __kernel_clockid_t clockid;
+ unsigned int rsv[2];
struct ptp_clock_time ts[PTP_MAX_SAMPLES][3];
};
diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h
index 6eeaf8bf2362..430daceafac7 100644
--- a/include/uapi/linux/rkisp1-config.h
+++ b/include/uapi/linux/rkisp1-config.h
@@ -165,6 +165,11 @@
#define RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS 6
/*
+ * Compand
+ */
+#define RKISP1_CIF_ISP_COMPAND_NUM_POINTS 64
+
+/*
* Measurement types
*/
#define RKISP1_CIF_ISP_STAT_AWB (1U << 0)
@@ -851,6 +856,39 @@ struct rkisp1_params_cfg {
struct rkisp1_cif_isp_isp_other_cfg others;
};
+/**
+ * struct rkisp1_cif_isp_compand_bls_config - Rockchip ISP1 Companding parameters (BLS)
+ * @r: Fixed subtraction value for Bayer pattern R
+ * @gr: Fixed subtraction value for Bayer pattern Gr
+ * @gb: Fixed subtraction value for Bayer pattern Gb
+ * @b: Fixed subtraction value for Bayer pattern B
+ *
+ * The values will be subtracted from the sensor values. Note that unlike the
+ * dedicated BLS block, the BLS values in the compander are 20-bit unsigned.
+ */
+struct rkisp1_cif_isp_compand_bls_config {
+ __u32 r;
+ __u32 gr;
+ __u32 gb;
+ __u32 b;
+};
+
+/**
+ * struct rkisp1_cif_isp_compand_curve_config - Rockchip ISP1 Companding
+ * parameters (expand and compression curves)
+ * @px: Compand curve x-values. Each value stores the distance from the
+ * previous x-value, expressed as log2 of the distance on 5 bits.
+ * @x: Compand curve x-values. The functionality of these parameters are
+ * unknown due to do a lack of hardware documentation, but these are left
+ * here for future compatibility purposes.
+ * @y: Compand curve y-values
+ */
+struct rkisp1_cif_isp_compand_curve_config {
+ __u8 px[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
+ __u32 x[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
+ __u32 y[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
+};
+
/*---------- PART2: Measurement Statistics ------------*/
/**
@@ -996,4 +1034,544 @@ struct rkisp1_stat_buffer {
struct rkisp1_cif_isp_stat params;
};
+/*---------- PART3: Extensible Configuration Parameters ------------*/
+
+/**
+ * enum rkisp1_ext_params_block_type - RkISP1 extensible params block type
+ *
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS: Black level subtraction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC: Defect pixel cluster correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG: Sensor de-gamma
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN: Auto white balance gains
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT: ISP filtering
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM: Bayer de-mosaic
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK: Cross-talk correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC: Gamma out correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF: De-noise pre-filter
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH: De-noise pre-filter strength
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC: Color processing
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_IE: Image effects
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC: Lens shading correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS: Auto white balance statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS: Histogram statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS: Auto exposure statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS: Auto-focus statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS: BLS in the compand block
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND: Companding expand curve
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS: Companding compress curve
+ */
+enum rkisp1_ext_params_block_type {
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_IE,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS,
+};
+
+#define RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE (1U << 0)
+#define RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE (1U << 1)
+
+/**
+ * struct rkisp1_ext_params_block_header - RkISP1 extensible parameters block
+ * header
+ *
+ * This structure represents the common part of all the ISP configuration
+ * blocks. Each parameters block shall embed an instance of this structure type
+ * as its first member, followed by the block-specific configuration data. The
+ * driver inspects this common header to discern the block type and its size and
+ * properly handle the block content by casting it to the correct block-specific
+ * type.
+ *
+ * The @type field is one of the values enumerated by
+ * :c:type:`rkisp1_ext_params_block_type` and specifies how the data should be
+ * interpreted by the driver. The @size field specifies the size of the
+ * parameters block and is used by the driver for validation purposes.
+ *
+ * The @flags field is a bitmask of per-block flags RKISP1_EXT_PARAMS_FL_*.
+ *
+ * When userspace wants to configure and enable an ISP block it shall fully
+ * populate the block configuration and set the
+ * RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE bit in the @flags field.
+ *
+ * When userspace simply wants to disable an ISP block the
+ * RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE bit should be set in @flags field. The
+ * driver ignores the rest of the block configuration structure in this case.
+ *
+ * If a new configuration of an ISP block has to be applied userspace shall
+ * fully populate the ISP block configuration and omit setting the
+ * RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE and RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE bits
+ * in the @flags field.
+ *
+ * Setting both the RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE and
+ * RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE bits in the @flags field is not allowed
+ * and not accepted by the driver.
+ *
+ * Userspace is responsible for correctly populating the parameters block header
+ * fields (@type, @flags and @size) and the block-specific parameters.
+ *
+ * For example:
+ *
+ * .. code-block:: c
+ *
+ * void populate_bls(struct rkisp1_ext_params_block_header *block) {
+ * struct rkisp1_ext_params_bls_config *bls =
+ * (struct rkisp1_ext_params_bls_config *)block;
+ *
+ * bls->header.type = RKISP1_EXT_PARAMS_BLOCK_ID_BLS;
+ * bls->header.flags = RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE;
+ * bls->header.size = sizeof(*bls);
+ *
+ * bls->config.enable_auto = 0;
+ * bls->config.fixed_val.r = blackLevelRed_;
+ * bls->config.fixed_val.gr = blackLevelGreenR_;
+ * bls->config.fixed_val.gb = blackLevelGreenB_;
+ * bls->config.fixed_val.b = blackLevelBlue_;
+ * }
+ *
+ * @type: The parameters block type, see
+ * :c:type:`rkisp1_ext_params_block_type`
+ * @flags: A bitmask of block flags
+ * @size: Size (in bytes) of the parameters block, including this header
+ */
+struct rkisp1_ext_params_block_header {
+ __u16 type;
+ __u16 flags;
+ __u32 size;
+};
+
+/**
+ * struct rkisp1_ext_params_bls_config - RkISP1 extensible params BLS config
+ *
+ * RkISP1 extensible parameters Black Level Subtraction configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Black Level Subtraction configuration, see
+ * :c:type:`rkisp1_cif_isp_bls_config`
+ */
+struct rkisp1_ext_params_bls_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_bls_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_dpcc_config - RkISP1 extensible params DPCC config
+ *
+ * RkISP1 extensible parameters Defective Pixel Cluster Correction configuration
+ * block. Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Defective Pixel Cluster Correction configuration, see
+ * :c:type:`rkisp1_cif_isp_dpcc_config`
+ */
+struct rkisp1_ext_params_dpcc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_dpcc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_sdg_config - RkISP1 extensible params SDG config
+ *
+ * RkISP1 extensible parameters Sensor Degamma configuration block. Identified
+ * by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Sensor Degamma configuration, see
+ * :c:type:`rkisp1_cif_isp_sdg_config`
+ */
+struct rkisp1_ext_params_sdg_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_sdg_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_lsc_config - RkISP1 extensible params LSC config
+ *
+ * RkISP1 extensible parameters Lens Shading Correction configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Lens Shading Correction configuration, see
+ * :c:type:`rkisp1_cif_isp_lsc_config`
+ */
+struct rkisp1_ext_params_lsc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_lsc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_awb_gain_config - RkISP1 extensible params AWB
+ * gain config
+ *
+ * RkISP1 extensible parameters Auto-White Balance Gains configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-White Balance Gains configuration, see
+ * :c:type:`rkisp1_cif_isp_awb_gain_config`
+ */
+struct rkisp1_ext_params_awb_gain_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_awb_gain_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_flt_config - RkISP1 extensible params FLT config
+ *
+ * RkISP1 extensible parameters Filter configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Filter configuration, see :c:type:`rkisp1_cif_isp_flt_config`
+ */
+struct rkisp1_ext_params_flt_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_flt_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_bdm_config - RkISP1 extensible params BDM config
+ *
+ * RkISP1 extensible parameters Demosaicing configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Demosaicing configuration, see :c:type:`rkisp1_cif_isp_bdm_config`
+ */
+struct rkisp1_ext_params_bdm_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_bdm_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_ctk_config - RkISP1 extensible params CTK config
+ *
+ * RkISP1 extensible parameters Cross-Talk configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Cross-Talk configuration, see :c:type:`rkisp1_cif_isp_ctk_config`
+ */
+struct rkisp1_ext_params_ctk_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_ctk_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_goc_config - RkISP1 extensible params GOC config
+ *
+ * RkISP1 extensible parameters Gamma-Out configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Gamma-Out configuration, see :c:type:`rkisp1_cif_isp_goc_config`
+ */
+struct rkisp1_ext_params_goc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_goc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_dpf_config - RkISP1 extensible params DPF config
+ *
+ * RkISP1 extensible parameters De-noise Pre-Filter configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: De-noise Pre-Filter configuration, see
+ * :c:type:`rkisp1_cif_isp_dpf_config`
+ */
+struct rkisp1_ext_params_dpf_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_dpf_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_dpf_strength_config - RkISP1 extensible params DPF
+ * strength config
+ *
+ * RkISP1 extensible parameters De-noise Pre-Filter strength configuration
+ * block. Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: De-noise Pre-Filter strength configuration, see
+ * :c:type:`rkisp1_cif_isp_dpf_strength_config`
+ */
+struct rkisp1_ext_params_dpf_strength_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_dpf_strength_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_cproc_config - RkISP1 extensible params CPROC config
+ *
+ * RkISP1 extensible parameters Color Processing configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Color processing configuration, see
+ * :c:type:`rkisp1_cif_isp_cproc_config`
+ */
+struct rkisp1_ext_params_cproc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_cproc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_ie_config - RkISP1 extensible params IE config
+ *
+ * RkISP1 extensible parameters Image Effect configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_IE`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Image Effect configuration, see :c:type:`rkisp1_cif_isp_ie_config`
+ */
+struct rkisp1_ext_params_ie_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_ie_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_awb_meas_config - RkISP1 extensible params AWB
+ * Meas config
+ *
+ * RkISP1 extensible parameters Auto-White Balance Measurement configuration
+ * block. Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-White Balance measure configuration, see
+ * :c:type:`rkisp1_cif_isp_awb_meas_config`
+ */
+struct rkisp1_ext_params_awb_meas_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_awb_meas_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_hst_config - RkISP1 extensible params Histogram config
+ *
+ * RkISP1 extensible parameters Histogram statistics configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Histogram statistics configuration, see
+ * :c:type:`rkisp1_cif_isp_hst_config`
+ */
+struct rkisp1_ext_params_hst_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_hst_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_aec_config - RkISP1 extensible params AEC config
+ *
+ * RkISP1 extensible parameters Auto-Exposure statistics configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-Exposure statistics configuration, see
+ * :c:type:`rkisp1_cif_isp_aec_config`
+ */
+struct rkisp1_ext_params_aec_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_aec_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_afc_config - RkISP1 extensible params AFC config
+ *
+ * RkISP1 extensible parameters Auto-Focus statistics configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-Focus statistics configuration, see
+ * :c:type:`rkisp1_cif_isp_afc_config`
+ */
+struct rkisp1_ext_params_afc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_afc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_compand_bls_config - RkISP1 extensible params
+ * Compand BLS config
+ *
+ * RkISP1 extensible parameters Companding configuration block (black level
+ * subtraction). Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Companding BLS configuration, see
+ * :c:type:`rkisp1_cif_isp_compand_bls_config`
+ */
+struct rkisp1_ext_params_compand_bls_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_compand_bls_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_compand_curve_config - RkISP1 extensible params
+ * Compand curve config
+ *
+ * RkISP1 extensible parameters Companding configuration block (expand and
+ * compression curves). Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND` or
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Companding curve configuration, see
+ * :c:type:`rkisp1_cif_isp_compand_curve_config`
+ */
+struct rkisp1_ext_params_compand_curve_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_compand_curve_config config;
+} __attribute__((aligned(8)));
+
+/*
+ * The rkisp1_ext_params_compand_curve_config structure is counted twice as it
+ * is used for both the COMPAND_EXPAND and COMPAND_COMPRESS block types.
+ */
+#define RKISP1_EXT_PARAMS_MAX_SIZE \
+ (sizeof(struct rkisp1_ext_params_bls_config) +\
+ sizeof(struct rkisp1_ext_params_dpcc_config) +\
+ sizeof(struct rkisp1_ext_params_sdg_config) +\
+ sizeof(struct rkisp1_ext_params_lsc_config) +\
+ sizeof(struct rkisp1_ext_params_awb_gain_config) +\
+ sizeof(struct rkisp1_ext_params_flt_config) +\
+ sizeof(struct rkisp1_ext_params_bdm_config) +\
+ sizeof(struct rkisp1_ext_params_ctk_config) +\
+ sizeof(struct rkisp1_ext_params_goc_config) +\
+ sizeof(struct rkisp1_ext_params_dpf_config) +\
+ sizeof(struct rkisp1_ext_params_dpf_strength_config) +\
+ sizeof(struct rkisp1_ext_params_cproc_config) +\
+ sizeof(struct rkisp1_ext_params_ie_config) +\
+ sizeof(struct rkisp1_ext_params_awb_meas_config) +\
+ sizeof(struct rkisp1_ext_params_hst_config) +\
+ sizeof(struct rkisp1_ext_params_aec_config) +\
+ sizeof(struct rkisp1_ext_params_afc_config) +\
+ sizeof(struct rkisp1_ext_params_compand_bls_config) +\
+ sizeof(struct rkisp1_ext_params_compand_curve_config) +\
+ sizeof(struct rkisp1_ext_params_compand_curve_config))
+
+/**
+ * enum rksip1_ext_param_buffer_version - RkISP1 extensible parameters version
+ *
+ * @RKISP1_EXT_PARAM_BUFFER_V1: First version of RkISP1 extensible parameters
+ */
+enum rksip1_ext_param_buffer_version {
+ RKISP1_EXT_PARAM_BUFFER_V1 = 1,
+};
+
+/**
+ * struct rkisp1_ext_params_cfg - RkISP1 extensible parameters configuration
+ *
+ * This struct contains the configuration parameters of the RkISP1 ISP
+ * algorithms, serialized by userspace into a data buffer. Each configuration
+ * parameter block is represented by a block-specific structure which contains a
+ * :c:type:`rkisp1_ext_params_block_header` entry as first member. Userspace
+ * populates the @data buffer with configuration parameters for the blocks that
+ * it intends to configure. As a consequence, the data buffer effective size
+ * changes according to the number of ISP blocks that userspace intends to
+ * configure and is set by userspace in the @data_size field.
+ *
+ * The parameters buffer is versioned by the @version field to allow modifying
+ * and extending its definition. Userspace shall populate the @version field to
+ * inform the driver about the version it intends to use. The driver will parse
+ * and handle the @data buffer according to the data layout specific to the
+ * indicated version and return an error if the desired version is not
+ * supported.
+ *
+ * Currently the single RKISP1_EXT_PARAM_BUFFER_V1 version is supported.
+ * When a new format version will be added, a mechanism for userspace to query
+ * the supported format versions will be implemented in the form of a read-only
+ * V4L2 control. If such control is not available, userspace should assume only
+ * RKISP1_EXT_PARAM_BUFFER_V1 is supported by the driver.
+ *
+ * For each ISP block that userspace wants to configure, a block-specific
+ * structure is appended to the @data buffer, one after the other without gaps
+ * in between nor overlaps. Userspace shall populate the @data_size field with
+ * the effective size, in bytes, of the @data buffer.
+ *
+ * The expected memory layout of the parameters buffer is::
+ *
+ * +-------------------- struct rkisp1_ext_params_cfg -------------------+
+ * | version = RKISP_EXT_PARAMS_BUFFER_V1; |
+ * | data_size = sizeof(struct rkisp1_ext_params_bls_config) |
+ * | + sizeof(struct rkisp1_ext_params_dpcc_config); |
+ * | +------------------------- data ---------------------------------+ |
+ * | | +------------- struct rkisp1_ext_params_bls_config -----------+ | |
+ * | | | +-------- struct rkisp1_ext_params_block_header ---------+ | | |
+ * | | | | type = RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS; | | | |
+ * | | | | flags = RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE; | | | |
+ * | | | | size = sizeof(struct rkisp1_ext_params_bls_config); | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | | +---------- struct rkisp1_cif_isp_bls_config -------------+ | | |
+ * | | | | enable_auto = 0; | | | |
+ * | | | | fixed_val.r = 256; | | | |
+ * | | | | fixed_val.gr = 256; | | | |
+ * | | | | fixed_val.gb = 256; | | | |
+ * | | | | fixed_val.b = 256; | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | +------------ struct rkisp1_ext_params_dpcc_config -----------+ | |
+ * | | | +-------- struct rkisp1_ext_params_block_header ---------+ | | |
+ * | | | | type = RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC; | | | |
+ * | | | | flags = RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE; | | | |
+ * | | | | size = sizeof(struct rkisp1_ext_params_dpcc_config); | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | | +---------- struct rkisp1_cif_isp_dpcc_config ------------+ | | |
+ * | | | | mode = RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE; | | | |
+ * | | | | output_mode = | | | |
+ * | | | | RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER; | | | |
+ * | | | | set_use = ... ; | | | |
+ * | | | | ... = ... ; | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | +-------------------------------------------------------------+ | |
+ * | +-----------------------------------------------------------------+ |
+ * +---------------------------------------------------------------------+
+ *
+ * @version: The RkISP1 extensible parameters buffer version, see
+ * :c:type:`rksip1_ext_param_buffer_version`
+ * @data_size: The RkISP1 configuration data effective size, excluding this
+ * header
+ * @data: The RkISP1 extensible configuration data blocks
+ */
+struct rkisp1_ext_params_cfg {
+ __u32 version;
+ __u32 data_size;
+ __u8 data[RKISP1_EXT_PARAMS_MAX_SIZE];
+};
+
#endif /* _UAPI_RKISP1_CONFIG_H */
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 3bac0a8ceab2..359a14cc76a4 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -118,6 +118,7 @@ struct clone_args {
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE 5
#define SCHED_DEADLINE 6
+#define SCHED_EXT 7
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK 0x40000000
diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h
index 90662385689b..bf6e9ae031c1 100644
--- a/include/uapi/linux/sched/types.h
+++ b/include/uapi/linux/sched/types.h
@@ -58,9 +58,9 @@
*
* This is reflected by the following fields of the sched_attr structure:
*
- * @sched_deadline representative of the task's deadline
- * @sched_runtime representative of the task's runtime
- * @sched_period representative of the task's period
+ * @sched_deadline representative of the task's deadline in nanoseconds
+ * @sched_runtime representative of the task's runtime in nanoseconds
+ * @sched_period representative of the task's period in nanoseconds
*
* Given this task model, there are a multiplicity of scheduling algorithms
* and policies, that can be used to ensure all the tasks will make their
diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h
index ed2a96f43ce4..5a2af0942c9f 100644
--- a/include/uapi/linux/serio.h
+++ b/include/uapi/linux/serio.h
@@ -83,5 +83,6 @@
#define SERIO_PULSE8_CEC 0x40
#define SERIO_RAINSHADOW_CEC 0x41
#define SERIO_FSIA6B 0x42
+#define SERIO_EXTRON_DA_HD_4K_PLUS 0x43
#endif /* _UAPI_SERIO_H */
diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h
index b531e3ef011a..1f58cb0c266b 100644
--- a/include/uapi/linux/smc.h
+++ b/include/uapi/linux/smc.h
@@ -127,6 +127,8 @@ enum {
SMC_NLA_LGR_R_NET_COOKIE, /* u64 */
SMC_NLA_LGR_R_PAD, /* flag */
SMC_NLA_LGR_R_BUF_TYPE, /* u8 */
+ SMC_NLA_LGR_R_SNDBUF_ALLOC, /* uint */
+ SMC_NLA_LGR_R_RMB_ALLOC, /* uint */
__SMC_NLA_LGR_R_MAX,
SMC_NLA_LGR_R_MAX = __SMC_NLA_LGR_R_MAX - 1
};
@@ -162,6 +164,8 @@ enum {
SMC_NLA_LGR_D_V2_COMMON, /* nest */
SMC_NLA_LGR_D_EXT_GID, /* u64 */
SMC_NLA_LGR_D_PEER_EXT_GID, /* u64 */
+ SMC_NLA_LGR_D_SNDBUF_ALLOC, /* uint */
+ SMC_NLA_LGR_D_DMB_ALLOC, /* uint */
__SMC_NLA_LGR_D_MAX,
SMC_NLA_LGR_D_MAX = __SMC_NLA_LGR_D_MAX - 1
};
@@ -249,6 +253,8 @@ enum {
SMC_NLA_STATS_T_TX_BYTES, /* u64 */
SMC_NLA_STATS_T_RX_CNT, /* u64 */
SMC_NLA_STATS_T_TX_CNT, /* u64 */
+ SMC_NLA_STATS_T_RX_RMB_USAGE, /* uint */
+ SMC_NLA_STATS_T_TX_RMB_USAGE, /* uint */
__SMC_NLA_STATS_T_MAX,
SMC_NLA_STATS_T_MAX = __SMC_NLA_STATS_T_MAX - 1
};
diff --git a/include/uapi/linux/spi/spi.h b/include/uapi/linux/spi/spi.h
index ca56e477d161..ee4ac812b8f8 100644
--- a/include/uapi/linux/spi/spi.h
+++ b/include/uapi/linux/spi/spi.h
@@ -28,7 +28,8 @@
#define SPI_RX_OCTAL _BITUL(14) /* receive with 8 wires */
#define SPI_3WIRE_HIZ _BITUL(15) /* high impedance turnaround */
#define SPI_RX_CPHA_FLIP _BITUL(16) /* flip CPHA on Rx only xfer */
-#define SPI_MOSI_IDLE_LOW _BITUL(17) /* leave mosi line low when idle */
+#define SPI_MOSI_IDLE_LOW _BITUL(17) /* leave MOSI line low when idle */
+#define SPI_MOSI_IDLE_HIGH _BITUL(18) /* leave MOSI line high when idle */
/*
* All the bits defined above should be covered by SPI_MODE_USER_MASK.
@@ -38,6 +39,6 @@
* These bits must not overlap. A static assert check should make sure of that.
* If adding extra bits, make sure to increase the bit index below as well.
*/
-#define SPI_MODE_USER_MASK (_BITUL(18) - 1)
+#define SPI_MODE_USER_MASK (_BITUL(19) - 1)
#endif /* _UAPI_SPI_H */
diff --git a/include/uapi/linux/uio.h b/include/uapi/linux/uio.h
index 059b1a9147f4..649739e0c404 100644
--- a/include/uapi/linux/uio.h
+++ b/include/uapi/linux/uio.h
@@ -20,6 +20,24 @@ struct iovec
__kernel_size_t iov_len; /* Must be size_t (1003.1g) */
};
+struct dmabuf_cmsg {
+ __u64 frag_offset; /* offset into the dmabuf where the frag starts.
+ */
+ __u32 frag_size; /* size of the frag. */
+ __u32 frag_token; /* token representing this frag for
+ * DEVMEM_DONTNEED.
+ */
+ __u32 dmabuf_id; /* dmabuf id this frag belongs to. */
+ __u32 flags; /* Currently unused. Reserved for future
+ * uses.
+ */
+};
+
+struct dmabuf_token {
+ __u32 token_start;
+ __u32 token_count;
+};
+
/*
* UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1)
*/
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index f8a8d6b3c521..6073858d52a2 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -282,7 +282,10 @@ struct vmmdev_hgcm_pagelist {
__u32 flags; /** VMMDEV_HGCM_F_PARM_*. */
__u16 offset_first_page; /** Data offset in the first page. */
__u16 page_count; /** Number of pages. */
- __u64 pages[1]; /** Page addresses. */
+ union {
+ __u64 unused; /** Deprecated place-holder for first "pages" entry. */
+ __DECLARE_FLEX_ARRAY(__u64, pages); /** Page addresses. */
+ };
};
VMMDEV_ASSERT_SIZE(vmmdev_hgcm_pagelist, 4 + 2 + 2 + 8);
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
index 842bf1201ac4..71edf2c70cc3 100644
--- a/include/uapi/linux/vdpa.h
+++ b/include/uapi/linux/vdpa.h
@@ -19,6 +19,7 @@ enum vdpa_command {
VDPA_CMD_DEV_GET, /* can dump */
VDPA_CMD_DEV_CONFIG_GET, /* can dump */
VDPA_CMD_DEV_VSTATS_GET,
+ VDPA_CMD_DEV_ATTR_SET,
};
enum vdpa_attr {
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 4e91362da6da..27239cb64065 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -502,6 +502,7 @@ struct v4l2_capability {
#define V4L2_CAP_META_CAPTURE 0x00800000 /* Is a metadata capture device */
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
+#define V4L2_CAP_EDID 0x02000000 /* Is an EDID-only device */
#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */
@@ -854,6 +855,7 @@ struct v4l2_pix_format {
/* Vendor specific - used for RK_ISP1 camera sub-system */
#define V4L2_META_FMT_RK_ISP1_PARAMS v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 3A Parameters */
#define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A Statistics */
+#define V4L2_META_FMT_RK_ISP1_EXT_PARAMS v4l2_fourcc('R', 'K', '1', 'E') /* Rockchip ISP1 3a Extensible Parameters */
/* Vendor specific - used for RaspberryPi PiSP */
#define V4L2_META_FMT_RPI_BE_CFG v4l2_fourcc('R', 'P', 'B', 'C') /* PiSP BE configuration */
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index ddaa45e723c4..ee35a372805d 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -71,7 +71,13 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_CACHES 7 /* Disk caches */
#define VIRTIO_BALLOON_S_HTLB_PGALLOC 8 /* Hugetlb page allocations */
#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */
-#define VIRTIO_BALLOON_S_NR 10
+#define VIRTIO_BALLOON_S_OOM_KILL 10 /* OOM killer invocations */
+#define VIRTIO_BALLOON_S_ALLOC_STALL 11 /* Stall count of memory allocatoin */
+#define VIRTIO_BALLOON_S_ASYNC_SCAN 12 /* Amount of memory scanned asynchronously */
+#define VIRTIO_BALLOON_S_DIRECT_SCAN 13 /* Amount of memory scanned directly */
+#define VIRTIO_BALLOON_S_ASYNC_RECLAIM 14 /* Amount of memory reclaimed asynchronously */
+#define VIRTIO_BALLOON_S_DIRECT_RECLAIM 15 /* Amount of memory reclaimed directly */
+#define VIRTIO_BALLOON_S_NR 16
#define VIRTIO_BALLOON_S_NAMES_WITH_PREFIX(VIRTIO_BALLOON_S_NAMES_prefix) { \
VIRTIO_BALLOON_S_NAMES_prefix "swap-in", \
@@ -83,7 +89,13 @@ struct virtio_balloon_config {
VIRTIO_BALLOON_S_NAMES_prefix "available-memory", \
VIRTIO_BALLOON_S_NAMES_prefix "disk-caches", \
VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-allocations", \
- VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures" \
+ VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures", \
+ VIRTIO_BALLOON_S_NAMES_prefix "oom-kills", \
+ VIRTIO_BALLOON_S_NAMES_prefix "alloc-stalls", \
+ VIRTIO_BALLOON_S_NAMES_prefix "async-scans", \
+ VIRTIO_BALLOON_S_NAMES_prefix "direct-scans", \
+ VIRTIO_BALLOON_S_NAMES_prefix "async-reclaims", \
+ VIRTIO_BALLOON_S_NAMES_prefix "direct-reclaims" \
}
#define VIRTIO_BALLOON_S_NAMES VIRTIO_BALLOON_S_NAMES_WITH_PREFIX("")
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 0e21f3998108..bf2c9cabd207 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -311,6 +311,7 @@ struct virtio_gpu_cmd_submit {
#define VIRTIO_GPU_CAPSET_VIRGL2 2
/* 3 is reserved for gfxstream */
#define VIRTIO_GPU_CAPSET_VENUS 4
+#define VIRTIO_GPU_CAPSET_DRM 6
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
struct virtio_gpu_get_capset_info {
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index e61104f35d73..faa9d62b3b30 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -66,6 +66,7 @@ enum bnxt_re_wqe_mode {
enum {
BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT = 0x01,
+ BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT = 0x02,
};
struct bnxt_re_uctx_req {
@@ -118,10 +119,16 @@ struct bnxt_re_resize_cq_req {
__aligned_u64 cq_va;
};
+enum bnxt_re_qp_mask {
+ BNXT_RE_QP_REQ_MASK_VAR_WQE_SQ_SLOTS = 0x1,
+};
+
struct bnxt_re_qp_req {
__aligned_u64 qpsva;
__aligned_u64 qprva;
__aligned_u64 qp_handle;
+ __aligned_u64 comp_mask;
+ __u32 sq_slots;
};
struct bnxt_re_qp_resp {
@@ -134,8 +141,14 @@ struct bnxt_re_srq_req {
__aligned_u64 srq_handle;
};
+enum bnxt_re_srq_mask {
+ BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT = 0x1,
+};
+
struct bnxt_re_srq_resp {
__u32 srqid;
+ __u32 rsvd; /* padding */
+ __aligned_u64 comp_mask;
};
enum bnxt_re_shpg_offt {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index 5b74d6534899..fd2e4a3a56b3 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -274,6 +274,10 @@ enum mlx5_ib_create_cq_attrs {
MLX5_IB_ATTR_CREATE_CQ_UAR_INDEX = UVERBS_ID_DRIVER_NS_WITH_UHW,
};
+enum mlx5_ib_reg_dmabuf_mr_attrs {
+ MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS = (1U << UVERBS_ID_NS_SHIFT),
+};
+
#define MLX5_IB_DW_MATCH_PARAM 0xA0
struct mlx5_ib_match_params {
@@ -344,6 +348,7 @@ enum mlx5_ib_pd_methods {
enum mlx5_ib_device_methods {
MLX5_IB_METHOD_QUERY_PORT = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH,
};
enum mlx5_ib_query_port_attrs {
@@ -351,4 +356,8 @@ enum mlx5_ib_query_port_attrs {
MLX5_IB_ATTR_QUERY_PORT,
};
+enum mlx5_ib_get_data_direct_sysfs_path_attrs {
+ MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH = (1U << UVERBS_ID_NS_SHIFT),
+};
+
#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 3189c7f08d17..7c233df475e7 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -54,6 +54,10 @@ enum mlx5_ib_uapi_flow_action_packet_reformat_type {
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x3,
};
+enum mlx5_ib_uapi_reg_dmabuf_flags {
+ MLX5_IB_UAPI_REG_DMABUF_ACCESS_DATA_DIRECT = 1 << 0,
+};
+
struct mlx5_ib_uapi_devx_async_cmd_hdr {
__aligned_u64 wr_id;
__u8 out_data[];
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 2f37568f5556..39be09c0ffbb 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -15,6 +15,7 @@ enum {
enum {
RDMA_NL_GROUP_IWPM = 2,
RDMA_NL_GROUP_LS,
+ RDMA_NL_GROUP_NOTIFY,
RDMA_NL_NUM_GROUPS
};
@@ -305,6 +306,8 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_DELDEV,
+ RDMA_NLDEV_CMD_MONITOR,
+
RDMA_NLDEV_NUM_OPS
};
@@ -574,6 +577,9 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_EVENT_TYPE, /* u8 */
+
+ RDMA_NLDEV_SYS_ATTR_MONITOR_MODE, /* u8 */
/*
* Always the end
*/
@@ -624,4 +630,14 @@ enum rdma_nl_name_assign_type {
RDMA_NAME_ASSIGN_TYPE_USER = 1, /* Provided by user-space */
};
+/*
+ * Supported rdma monitoring event types.
+ */
+enum rdma_nl_notify_event_type {
+ RDMA_REGISTER_EVENT,
+ RDMA_UNREGISTER_EVENT,
+ RDMA_NETDEV_ATTACH_EVENT,
+ RDMA_NETDEV_DETACH_EVENT,
+};
+
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
index 39b37edcf813..bc30c1f2a109 100644
--- a/include/uapi/sound/asequencer.h
+++ b/include/uapi/sound/asequencer.h
@@ -461,6 +461,8 @@ struct snd_seq_remove_events {
#define SNDRV_SEQ_PORT_FLG_TIMESTAMP (1<<1)
#define SNDRV_SEQ_PORT_FLG_TIME_REAL (1<<2)
+#define SNDRV_SEQ_PORT_FLG_IS_MIDI1 (1<<3) /* Keep MIDI 1.0 protocol */
+
/* port direction */
#define SNDRV_SEQ_PORT_DIR_UNKNOWN 0
#define SNDRV_SEQ_PORT_DIR_INPUT 1
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 8bf7e8a0eb6f..4cd513215bcd 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -869,7 +869,7 @@ struct snd_ump_block_info {
* Timer section - /dev/snd/timer
*/
-#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 7)
+#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 8)
enum {
SNDRV_TIMER_CLASS_NONE = -1,
@@ -894,6 +894,7 @@ enum {
#define SNDRV_TIMER_GLOBAL_RTC 1 /* unused */
#define SNDRV_TIMER_GLOBAL_HPET 2
#define SNDRV_TIMER_GLOBAL_HRTIMER 3
+#define SNDRV_TIMER_GLOBAL_UDRIVEN 4
/* info flags */
#define SNDRV_TIMER_FLG_SLAVE (1<<0) /* cannot be controlled */
@@ -974,6 +975,18 @@ struct snd_timer_status {
};
#endif
+/*
+ * This structure describes the userspace-driven timer. Such timers are purely virtual,
+ * and can only be triggered from software (for instance, by userspace application).
+ */
+struct snd_timer_uinfo {
+ /* To pretend being a normal timer, we need to know the resolution in ns. */
+ __u64 resolution;
+ int fd;
+ unsigned int id;
+ unsigned char reserved[16];
+};
+
#define SNDRV_TIMER_IOCTL_PVERSION _IOR('T', 0x00, int)
#define SNDRV_TIMER_IOCTL_NEXT_DEVICE _IOWR('T', 0x01, struct snd_timer_id)
#define SNDRV_TIMER_IOCTL_TREAD_OLD _IOW('T', 0x02, int)
@@ -990,6 +1003,8 @@ struct snd_timer_status {
#define SNDRV_TIMER_IOCTL_CONTINUE _IO('T', 0xa2)
#define SNDRV_TIMER_IOCTL_PAUSE _IO('T', 0xa3)
#define SNDRV_TIMER_IOCTL_TREAD64 _IOW('T', 0xa4, int)
+#define SNDRV_TIMER_IOCTL_CREATE _IOWR('T', 0xa5, struct snd_timer_uinfo)
+#define SNDRV_TIMER_IOCTL_TRIGGER _IO('T', 0xa6)
#if __BITS_PER_LONG == 64
#define SNDRV_TIMER_IOCTL_TREAD SNDRV_TIMER_IOCTL_TREAD_OLD
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index 853e95957c31..e594abe5d05f 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -597,7 +597,7 @@ struct ufs_dev_info {
};
/*
- * This enum is used in string mapping in include/trace/events/ufs.h.
+ * This enum is used in string mapping in ufs_trace.h.
*/
enum ufs_trace_str_t {
UFS_CMD_SEND, UFS_CMD_COMP, UFS_DEV_COMP,
@@ -607,7 +607,7 @@ enum ufs_trace_str_t {
/*
* Transaction Specific Fields (TSF) type in the UPIU package, this enum is
- * used in include/trace/events/ufs.h for UFS command trace.
+ * used in ufs_trace.h for UFS command trace.
*/
enum ufs_trace_tsf_t {
UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 0fd2aebac728..3f68ae3e4330 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -17,6 +17,7 @@
#include <linux/blk-mq.h>
#include <linux/devfreq.h>
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/msi.h>
#include <linux/pm_runtime.h>
#include <linux/dma-direction.h>
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index 9917c7743d80..27364c4a6ef9 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -25,8 +25,9 @@ enum {
REG_CONTROLLER_CAPABILITIES = 0x00,
REG_MCQCAP = 0x04,
REG_UFS_VERSION = 0x08,
- REG_CONTROLLER_DEV_ID = 0x10,
- REG_CONTROLLER_PROD_ID = 0x14,
+ REG_EXT_CONTROLLER_CAPABILITIES = 0x0C,
+ REG_CONTROLLER_PID = 0x10,
+ REG_CONTROLLER_MID = 0x14,
REG_AUTO_HIBERNATE_IDLE_TIMER = 0x18,
REG_INTERRUPT_STATUS = 0x20,
REG_INTERRUPT_ENABLE = 0x24,
diff --git a/include/vdso/getrandom.h b/include/vdso/getrandom.h
index a8b7c14b0ae0..6ca4d6de9e46 100644
--- a/include/vdso/getrandom.h
+++ b/include/vdso/getrandom.h
@@ -43,4 +43,32 @@ struct vgetrandom_state {
bool in_use;
};
+/**
+ * __arch_chacha20_blocks_nostack - Generate ChaCha20 stream without using the stack.
+ * @dst_bytes: Destination buffer to hold @nblocks * 64 bytes of output.
+ * @key: 32-byte input key.
+ * @counter: 8-byte counter, read on input and updated on return.
+ * @nblocks: Number of blocks to generate.
+ *
+ * Generates a given positive number of blocks of ChaCha20 output with nonce=0, and does not write
+ * to any stack or memory outside of the parameters passed to it, in order to mitigate stack data
+ * leaking into forked child processes.
+ */
+extern void __arch_chacha20_blocks_nostack(u8 *dst_bytes, const u32 *key, u32 *counter, size_t nblocks);
+
+/**
+ * __vdso_getrandom - Architecture-specific vDSO implementation of getrandom() syscall.
+ * @buffer: Passed to __cvdso_getrandom().
+ * @len: Passed to __cvdso_getrandom().
+ * @flags: Passed to __cvdso_getrandom().
+ * @opaque_state: Passed to __cvdso_getrandom().
+ * @opaque_len: Passed to __cvdso_getrandom();
+ *
+ * This function is implemented by making a single call to to __cvdso_getrandom(), whose
+ * documentation may be consulted for more information.
+ *
+ * Returns: The return value of __cvdso_getrandom().
+ */
+extern ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len);
+
#endif /* _VDSO_GETRANDOM_H */
diff --git a/include/vdso/helpers.h b/include/vdso/helpers.h
index 73501149439d..3ddb03bb05cb 100644
--- a/include/vdso/helpers.h
+++ b/include/vdso/helpers.h
@@ -4,6 +4,7 @@
#ifndef __ASSEMBLY__
+#include <asm/barrier.h>
#include <vdso/datapage.h>
static __always_inline u32 vdso_read_begin(const struct vdso_data *vd)
diff --git a/include/vdso/unaligned.h b/include/vdso/unaligned.h
new file mode 100644
index 000000000000..eee3d2a4dbe4
--- /dev/null
+++ b/include/vdso/unaligned.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_UNALIGNED_H
+#define __VDSO_UNALIGNED_H
+
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x = (val); \
+} while (0)
+
+#endif /* __VDSO_UNALIGNED_H */
diff --git a/include/video/vga.h b/include/video/vga.h
index 947c0abd04ef..468764d6727a 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -197,9 +197,26 @@ struct vgastate {
extern int save_vga(struct vgastate *state);
extern int restore_vga(struct vgastate *state);
+static inline unsigned char vga_mm_r (void __iomem *regbase, unsigned short port)
+{
+ return readb (regbase + port);
+}
+
+static inline void vga_mm_w (void __iomem *regbase, unsigned short port, unsigned char val)
+{
+ writeb (val, regbase + port);
+}
+
+static inline void vga_mm_w_fast (void __iomem *regbase, unsigned short port,
+ unsigned char reg, unsigned char val)
+{
+ writew (VGA_OUT16VAL (val, reg), regbase + port);
+}
+
/*
* generic VGA port read/write
*/
+#ifdef CONFIG_HAS_IOPORT
static inline unsigned char vga_io_r (unsigned short port)
{
@@ -217,22 +234,6 @@ static inline void vga_io_w_fast (unsigned short port, unsigned char reg,
outw(VGA_OUT16VAL (val, reg), port);
}
-static inline unsigned char vga_mm_r (void __iomem *regbase, unsigned short port)
-{
- return readb (regbase + port);
-}
-
-static inline void vga_mm_w (void __iomem *regbase, unsigned short port, unsigned char val)
-{
- writeb (val, regbase + port);
-}
-
-static inline void vga_mm_w_fast (void __iomem *regbase, unsigned short port,
- unsigned char reg, unsigned char val)
-{
- writew (VGA_OUT16VAL (val, reg), regbase + port);
-}
-
static inline unsigned char vga_r (void __iomem *regbase, unsigned short port)
{
if (regbase)
@@ -258,8 +259,25 @@ static inline void vga_w_fast (void __iomem *regbase, unsigned short port,
else
vga_io_w_fast (port, reg, val);
}
+#else /* CONFIG_HAS_IOPORT */
+static inline unsigned char vga_r (void __iomem *regbase, unsigned short port)
+{
+ return vga_mm_r (regbase, port);
+}
+
+static inline void vga_w (void __iomem *regbase, unsigned short port, unsigned char val)
+{
+ vga_mm_w (regbase, port, val);
+}
+static inline void vga_w_fast (void __iomem *regbase, unsigned short port,
+ unsigned char reg, unsigned char val)
+{
+ vga_mm_w_fast (regbase, port, reg, val);
+}
+#endif /* CONFIG_HAS_IOPORT */
+
/*
* VGA CRTC register read/write
*/
@@ -280,6 +298,7 @@ static inline void vga_wcrt (void __iomem *regbase, unsigned char reg, unsigned
#endif /* VGA_OUTW_WRITE */
}
+#ifdef CONFIG_HAS_IOPORT
static inline unsigned char vga_io_rcrt (unsigned char reg)
{
vga_io_w (VGA_CRT_IC, reg);
@@ -295,6 +314,7 @@ static inline void vga_io_wcrt (unsigned char reg, unsigned char val)
vga_io_w (VGA_CRT_DC, val);
#endif /* VGA_OUTW_WRITE */
}
+#endif /* CONFIG_HAS_IOPORT */
static inline unsigned char vga_mm_rcrt (void __iomem *regbase, unsigned char reg)
{
@@ -333,6 +353,7 @@ static inline void vga_wseq (void __iomem *regbase, unsigned char reg, unsigned
#endif /* VGA_OUTW_WRITE */
}
+#ifdef CONFIG_HAS_IOPORT
static inline unsigned char vga_io_rseq (unsigned char reg)
{
vga_io_w (VGA_SEQ_I, reg);
@@ -348,6 +369,7 @@ static inline void vga_io_wseq (unsigned char reg, unsigned char val)
vga_io_w (VGA_SEQ_D, val);
#endif /* VGA_OUTW_WRITE */
}
+#endif /* CONFIG_HAS_IOPORT */
static inline unsigned char vga_mm_rseq (void __iomem *regbase, unsigned char reg)
{
@@ -385,6 +407,7 @@ static inline void vga_wgfx (void __iomem *regbase, unsigned char reg, unsigned
#endif /* VGA_OUTW_WRITE */
}
+#ifdef CONFIG_HAS_IOPORT
static inline unsigned char vga_io_rgfx (unsigned char reg)
{
vga_io_w (VGA_GFX_I, reg);
@@ -400,6 +423,7 @@ static inline void vga_io_wgfx (unsigned char reg, unsigned char val)
vga_io_w (VGA_GFX_D, val);
#endif /* VGA_OUTW_WRITE */
}
+#endif /* CONFIG_HAS_IOPORT */
static inline unsigned char vga_mm_rgfx (void __iomem *regbase, unsigned char reg)
{
@@ -434,6 +458,7 @@ static inline void vga_wattr (void __iomem *regbase, unsigned char reg, unsigned
vga_w (regbase, VGA_ATT_W, val);
}
+#ifdef CONFIG_HAS_IOPORT
static inline unsigned char vga_io_rattr (unsigned char reg)
{
vga_io_w (VGA_ATT_IW, reg);
@@ -445,6 +470,7 @@ static inline void vga_io_wattr (unsigned char reg, unsigned char val)
vga_io_w (VGA_ATT_IW, reg);
vga_io_w (VGA_ATT_W, val);
}
+#endif /* CONFIG_HAS_IOPORT */
static inline unsigned char vga_mm_rattr (void __iomem *regbase, unsigned char reg)
{
diff --git a/init/Kconfig b/init/Kconfig
index 5783a0b87517..fbd0cb06a50a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -60,6 +60,13 @@ config LLD_VERSION
default $(ld-version) if LD_IS_LLD
default 0
+config RUSTC_VERSION
+ int
+ default $(shell,$(srctree)/scripts/rustc-version.sh $(RUSTC))
+ help
+ It does not depend on `RUST` since that one may need to use the version
+ in a `depends on`.
+
config RUST_IS_AVAILABLE
def_bool $(success,$(srctree)/scripts/rust_is_available.sh)
help
@@ -310,8 +317,9 @@ config KERNEL_XZ
BCJ filters which can improve compression ratio of executable
code. The size of the kernel is about 30% smaller with XZ in
comparison to gzip. On architectures for which there is a BCJ
- filter (i386, x86_64, ARM, IA-64, PowerPC, and SPARC), XZ
- will create a few percent smaller kernel than plain LZMA.
+ filter (i386, x86_64, ARM, ARM64, RISC-V, big endian PowerPC,
+ and SPARC), XZ will create a few percent smaller kernel than
+ plain LZMA.
The speed is about the same as with LZMA: The decompression
speed of XZ is better than that of bzip2 but worse than gzip
@@ -986,7 +994,7 @@ config MEMCG_V1
going to shrink due to deprecation process. New deployments with v1
controller are highly discouraged.
- San N is unsure.
+ Say N if unsure.
config BLK_CGROUP
bool "IO controller"
@@ -1024,9 +1032,13 @@ menuconfig CGROUP_SCHED
tasks.
if CGROUP_SCHED
+config GROUP_SCHED_WEIGHT
+ def_bool n
+
config FAIR_GROUP_SCHED
bool "Group scheduling for SCHED_OTHER"
depends on CGROUP_SCHED
+ select GROUP_SCHED_WEIGHT
default CGROUP_SCHED
config CFS_BANDWIDTH
@@ -1051,6 +1063,12 @@ config RT_GROUP_SCHED
realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.rst for more information.
+config EXT_GROUP_SCHED
+ bool
+ depends on SCHED_CLASS_EXT && CGROUP_SCHED
+ select GROUP_SCHED_WEIGHT
+ default y
+
endif #CGROUP_SCHED
config SCHED_MM_CID
@@ -1143,6 +1161,19 @@ config CPUSETS
Say N if unsure.
+config CPUSETS_V1
+ bool "Legacy cgroup v1 cpusets controller"
+ depends on CPUSETS
+ default n
+ help
+ Legacy cgroup v1 cpusets controller which has been deprecated by
+ cgroup v2 implementation. The v1 is there for legacy applications
+ which haven't migrated to the new cgroup v2 interface yet. If you
+ do not have any such application then you are completely fine leaving
+ this option disabled.
+
+ Say N if unsure.
+
config PROC_PID_CPUSET
bool "Include legacy /proc/<pid>/cpuset file"
depends on CPUSETS
@@ -1687,6 +1718,19 @@ config IO_URING
applications to submit and complete IO through submission and
completion rings that are shared between the kernel and application.
+config GCOV_PROFILE_URING
+ bool "Enable GCOV profiling on the io_uring subsystem"
+ depends on GCOV_KERNEL
+ help
+ Enable GCOV profiling on the io_uring subsystem, to facilitate
+ code coverage testing.
+
+ If unsure, say N.
+
+ Note that this will have a negative impact on the performance of
+ the io_uring subsystem, hence this should only be enabled for
+ specific test purposes.
+
config ADVISE_SYSCALLS
bool "Enable madvise/fadvise syscalls" if EXPERT
default y
@@ -1898,12 +1942,14 @@ config RUST
bool "Rust support"
depends on HAVE_RUST
depends on RUST_IS_AVAILABLE
- depends on !CFI_CLANG
depends on !MODVERSIONS
- depends on !GCC_PLUGINS
+ depends on !GCC_PLUGIN_RANDSTRUCT
depends on !RANDSTRUCT
- depends on !SHADOW_CALL_STACK
depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
+ depends on !CFI_CLANG || RUSTC_VERSION >= 107900 && $(cc-option,-fsanitize=kcfi -fsanitize-cfi-icall-experimental-normalize-integers)
+ select CFI_ICALL_NORMALIZE_INTEGERS if CFI_CLANG
+ depends on !CALL_PADDING || RUSTC_VERSION >= 108000
+ depends on !KASAN_SW_TAGS
help
Enables Rust support in the kernel.
@@ -1920,7 +1966,9 @@ config RUST
config RUSTC_VERSION_TEXT
string
depends on RUST
- default "$(shell,$(RUSTC) --version 2>/dev/null)"
+ default "$(RUSTC_VERSION_TEXT)"
+ help
+ See `CC_VERSION_TEXT`.
config BINDGEN_VERSION_TEXT
string
diff --git a/init/init_task.c b/init/init_task.c
index eeb110c65fe2..136a8231355a 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -6,6 +6,7 @@
#include <linux/sched/sysctl.h>
#include <linux/sched/rt.h>
#include <linux/sched/task.h>
+#include <linux/sched/ext.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
@@ -29,7 +30,7 @@ static struct signal_struct init_signals = {
.cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex),
.exec_update_lock = __RWSEM_INITIALIZER(init_signals.exec_update_lock),
#ifdef CONFIG_POSIX_TIMERS
- .posix_timers = LIST_HEAD_INIT(init_signals.posix_timers),
+ .posix_timers = HLIST_HEAD_INIT,
.cputimer = {
.cputime_atomic = INIT_CPUTIME_ATOMIC,
},
@@ -99,6 +100,17 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
#ifdef CONFIG_CGROUP_SCHED
.sched_task_group = &root_task_group,
#endif
+#ifdef CONFIG_SCHED_CLASS_EXT
+ .scx = {
+ .dsq_list.node = LIST_HEAD_INIT(init_task.scx.dsq_list.node),
+ .sticky_cpu = -1,
+ .holding_cpu = -1,
+ .runnable_node = LIST_HEAD_INIT(init_task.scx.runnable_node),
+ .runnable_at = INITIAL_JIFFIES,
+ .ddsp_dsq_id = SCX_DSQ_INVALID,
+ .slice = SCX_SLICE_DFL,
+ },
+#endif
.ptraced = LIST_HEAD_INIT(init_task.ptraced),
.ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry),
.real_parent = &init_task,
diff --git a/init/initramfs.c b/init/initramfs.c
index 814241b64827..bc911e466d5b 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -17,6 +17,7 @@
#include <linux/namei.h>
#include <linux/init_syscalls.h>
#include <linux/umh.h>
+#include <linux/security.h>
#include "do_mounts.h"
@@ -712,6 +713,8 @@ static void __init do_populate_rootfs(void *unused, async_cookie_t cookie)
}
done:
+ security_initramfs_populated();
+
/*
* If the initrd region is overlapped with crashkernel reserved region,
* free only memory that is not part of crashkernel region.
diff --git a/init/main.c b/init/main.c
index 206acdde51f5..c4778edae797 100644
--- a/init/main.c
+++ b/init/main.c
@@ -922,8 +922,11 @@ void start_kernel(void)
boot_cpu_init();
page_address_init();
pr_notice("%s", linux_banner);
- early_security_init();
setup_arch(&command_line);
+ /* Static keys and static calls are needed by LSMs */
+ jump_label_init();
+ static_call_init();
+ early_security_init();
setup_boot_config();
setup_command_line(command_line);
setup_nr_cpu_ids();
@@ -934,7 +937,6 @@ void start_kernel(void)
pr_notice("Kernel command line: %s\n", saved_command_line);
/* parameters may set static keys */
- jump_label_init();
parse_early_param();
after_dashes = parse_args("Booting kernel",
static_command_line, __start___param,
diff --git a/io_uring/Makefile b/io_uring/Makefile
index 61923e11c767..53167bef37d7 100644
--- a/io_uring/Makefile
+++ b/io_uring/Makefile
@@ -2,6 +2,10 @@
#
# Makefile for io_uring
+ifdef CONFIG_GCOV_PROFILE_URING
+GCOV_PROFILE := y
+endif
+
obj-$(CONFIG_IO_URING) += io_uring.o opdef.o kbuf.o rsrc.o notif.o \
tctx.o filetable.o rw.o net.o poll.o \
eventfd.o uring_cmd.o openclose.o \
diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c
index b9384503a2b7..e37fddd5d9ce 100644
--- a/io_uring/eventfd.c
+++ b/io_uring/eventfd.c
@@ -15,7 +15,7 @@ struct io_ev_fd {
struct eventfd_ctx *cq_ev_fd;
unsigned int eventfd_async: 1;
struct rcu_head rcu;
- atomic_t refs;
+ refcount_t refs;
atomic_t ops;
};
@@ -37,7 +37,7 @@ static void io_eventfd_do_signal(struct rcu_head *rcu)
eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
- if (atomic_dec_and_test(&ev_fd->refs))
+ if (refcount_dec_and_test(&ev_fd->refs))
io_eventfd_free(rcu);
}
@@ -63,7 +63,7 @@ void io_eventfd_signal(struct io_ring_ctx *ctx)
*/
if (unlikely(!ev_fd))
return;
- if (!atomic_inc_not_zero(&ev_fd->refs))
+ if (!refcount_inc_not_zero(&ev_fd->refs))
return;
if (ev_fd->eventfd_async && !io_wq_current_is_worker())
goto out;
@@ -77,7 +77,7 @@ void io_eventfd_signal(struct io_ring_ctx *ctx)
}
}
out:
- if (atomic_dec_and_test(&ev_fd->refs))
+ if (refcount_dec_and_test(&ev_fd->refs))
call_rcu(&ev_fd->rcu, io_eventfd_free);
}
@@ -126,6 +126,7 @@ int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
if (IS_ERR(ev_fd->cq_ev_fd)) {
int ret = PTR_ERR(ev_fd->cq_ev_fd);
+
kfree(ev_fd);
return ret;
}
@@ -136,7 +137,7 @@ int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
ev_fd->eventfd_async = eventfd_async;
ctx->has_evfd = true;
- atomic_set(&ev_fd->refs, 1);
+ refcount_set(&ev_fd->refs, 1);
atomic_set(&ev_fd->ops, 0);
rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
return 0;
@@ -151,7 +152,7 @@ int io_eventfd_unregister(struct io_ring_ctx *ctx)
if (ev_fd) {
ctx->has_evfd = false;
rcu_assign_pointer(ctx->io_ev_fd, NULL);
- if (atomic_dec_and_test(&ev_fd->refs))
+ if (refcount_dec_and_test(&ev_fd->refs))
call_rcu(&ev_fd->rcu, io_eventfd_free);
return 0;
}
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index b1e0e0d85349..6b1247664b35 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -177,9 +177,8 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
struct io_mapped_ubuf *buf = ctx->user_bufs[i];
- unsigned int len = buf->ubuf_end - buf->ubuf;
- seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
+ seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, buf->len);
}
if (has_lock && !xa_empty(&ctx->personalities)) {
unsigned long index;
@@ -221,7 +220,19 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
cqe->user_data, cqe->res, cqe->flags);
}
-
spin_unlock(&ctx->completion_lock);
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (ctx->napi_enabled) {
+ seq_puts(m, "NAPI:\tenabled\n");
+ seq_printf(m, "napi_busy_poll_dt:\t%llu\n", ctx->napi_busy_poll_dt);
+ if (ctx->napi_prefer_busy_poll)
+ seq_puts(m, "napi_prefer_busy_poll:\ttrue\n");
+ else
+ seq_puts(m, "napi_prefer_busy_poll:\tfalse\n");
+ } else {
+ seq_puts(m, "NAPI:\tdisabled\n");
+ }
+#endif
}
#endif
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index f1e7c670add8..a38f36b68060 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/rculist_nulls.h>
#include <linux/cpu.h>
+#include <linux/cpuset.h>
#include <linux/task_work.h>
#include <linux/audit.h>
#include <linux/mmu_context.h>
@@ -1167,7 +1168,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
if (!alloc_cpumask_var(&wq->cpu_mask, GFP_KERNEL))
goto err;
- cpumask_copy(wq->cpu_mask, cpu_possible_mask);
+ cpuset_cpus_allowed(data->task, wq->cpu_mask);
wq->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
wq->acct[IO_WQ_ACCT_UNBOUND].max_workers =
task_rlimit(current, RLIMIT_NPROC);
@@ -1322,17 +1323,29 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
{
+ cpumask_var_t allowed_mask;
+ int ret = 0;
+
if (!tctx || !tctx->io_wq)
return -EINVAL;
+ if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL))
+ return -ENOMEM;
+
rcu_read_lock();
- if (mask)
- cpumask_copy(tctx->io_wq->cpu_mask, mask);
- else
- cpumask_copy(tctx->io_wq->cpu_mask, cpu_possible_mask);
+ cpuset_cpus_allowed(tctx->io_wq->task, allowed_mask);
+ if (mask) {
+ if (cpumask_subset(mask, allowed_mask))
+ cpumask_copy(tctx->io_wq->cpu_mask, mask);
+ else
+ ret = -EINVAL;
+ } else {
+ cpumask_copy(tctx->io_wq->cpu_mask, allowed_mask);
+ }
rcu_read_unlock();
- return 0;
+ free_cpumask_var(allowed_mask);
+ return ret;
}
/*
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 3942db160f18..feb61d68dca6 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -533,6 +533,17 @@ static void io_queue_iowq(struct io_kiocb *req)
io_queue_linked_timeout(link);
}
+static void io_req_queue_iowq_tw(struct io_kiocb *req, struct io_tw_state *ts)
+{
+ io_queue_iowq(req);
+}
+
+void io_req_queue_iowq(struct io_kiocb *req)
+{
+ req->io_task_work.func = io_req_queue_iowq_tw;
+ io_req_task_work_add(req);
+}
+
static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
{
while (!list_empty(&ctx->defer_list)) {
@@ -624,6 +635,21 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying)
}
list_del(&ocqe->list);
kfree(ocqe);
+
+ /*
+ * For silly syzbot cases that deliberately overflow by huge
+ * amounts, check if we need to resched and drop and
+ * reacquire the locks if so. Nothing real would ever hit this.
+ * Ideally we'd have a non-posting unlock for this, but hard
+ * to care for a non-real case.
+ */
+ if (need_resched()) {
+ io_cq_unlock_post(ctx);
+ mutex_unlock(&ctx->uring_lock);
+ cond_resched();
+ mutex_lock(&ctx->uring_lock);
+ io_cq_lock(ctx);
+ }
}
if (list_empty(&ctx->cq_overflow_list)) {
@@ -904,7 +930,7 @@ void io_req_defer_failed(struct io_kiocb *req, s32 res)
lockdep_assert_held(&req->ctx->uring_lock);
req_set_fail(req);
- io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
+ io_req_set_res(req, res, io_put_kbuf(req, res, IO_URING_F_UNLOCKED));
if (def->fail)
def->fail(req);
io_req_complete_defer(req);
@@ -2153,7 +2179,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
* conditions are true (normal request), then just queue it.
*/
if (unlikely(link->head)) {
- trace_io_uring_link(req, link->head);
+ trace_io_uring_link(req, link->last);
link->last->link = req;
link->last = req;
@@ -2350,22 +2376,92 @@ static bool current_pending_io(void)
return percpu_counter_read_positive(&tctx->inflight);
}
-/* when returns >0, the caller should retry */
-static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
- struct io_wait_queue *iowq)
+static enum hrtimer_restart io_cqring_timer_wakeup(struct hrtimer *timer)
{
- int ret;
+ struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t);
- if (unlikely(READ_ONCE(ctx->check_cq)))
- return 1;
- if (unlikely(!llist_empty(&ctx->work_llist)))
- return 1;
- if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL)))
- return 1;
- if (unlikely(task_sigpending(current)))
- return -EINTR;
- if (unlikely(io_should_wake(iowq)))
- return 0;
+ WRITE_ONCE(iowq->hit_timeout, 1);
+ iowq->min_timeout = 0;
+ wake_up_process(iowq->wq.private);
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * Doing min_timeout portion. If we saw any timeouts, events, or have work,
+ * wake up. If not, and we have a normal timeout, switch to that and keep
+ * sleeping.
+ */
+static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer)
+{
+ struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t);
+ struct io_ring_ctx *ctx = iowq->ctx;
+
+ /* no general timeout, or shorter (or equal), we are done */
+ if (iowq->timeout == KTIME_MAX ||
+ ktime_compare(iowq->min_timeout, iowq->timeout) >= 0)
+ goto out_wake;
+ /* work we may need to run, wake function will see if we need to wake */
+ if (io_has_work(ctx))
+ goto out_wake;
+ /* got events since we started waiting, min timeout is done */
+ if (iowq->cq_min_tail != READ_ONCE(ctx->rings->cq.tail))
+ goto out_wake;
+ /* if we have any events and min timeout expired, we're done */
+ if (io_cqring_events(ctx))
+ goto out_wake;
+
+ /*
+ * If using deferred task_work running and application is waiting on
+ * more than one request, ensure we reset it now where we are switching
+ * to normal sleeps. Any request completion post min_wait should wake
+ * the task and return.
+ */
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
+ atomic_set(&ctx->cq_wait_nr, 1);
+ smp_mb();
+ if (!llist_empty(&ctx->work_llist))
+ goto out_wake;
+ }
+
+ iowq->t.function = io_cqring_timer_wakeup;
+ hrtimer_set_expires(timer, iowq->timeout);
+ return HRTIMER_RESTART;
+out_wake:
+ return io_cqring_timer_wakeup(timer);
+}
+
+static int io_cqring_schedule_timeout(struct io_wait_queue *iowq,
+ clockid_t clock_id, ktime_t start_time)
+{
+ ktime_t timeout;
+
+ hrtimer_init_on_stack(&iowq->t, clock_id, HRTIMER_MODE_ABS);
+ if (iowq->min_timeout) {
+ timeout = ktime_add_ns(iowq->min_timeout, start_time);
+ iowq->t.function = io_cqring_min_timer_wakeup;
+ } else {
+ timeout = iowq->timeout;
+ iowq->t.function = io_cqring_timer_wakeup;
+ }
+
+ hrtimer_set_expires_range_ns(&iowq->t, timeout, 0);
+ hrtimer_start_expires(&iowq->t, HRTIMER_MODE_ABS);
+
+ if (!READ_ONCE(iowq->hit_timeout))
+ schedule();
+
+ hrtimer_cancel(&iowq->t);
+ destroy_hrtimer_on_stack(&iowq->t);
+ __set_current_state(TASK_RUNNING);
+
+ return READ_ONCE(iowq->hit_timeout) ? -ETIME : 0;
+}
+
+static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ struct io_wait_queue *iowq,
+ ktime_t start_time)
+{
+ int ret = 0;
/*
* Mark us as being in io_wait if we have pending requests, so cpufreq
@@ -2374,25 +2470,50 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
*/
if (current_pending_io())
current->in_iowait = 1;
- ret = 0;
- if (iowq->timeout == KTIME_MAX)
+ if (iowq->timeout != KTIME_MAX || iowq->min_timeout)
+ ret = io_cqring_schedule_timeout(iowq, ctx->clockid, start_time);
+ else
schedule();
- else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
- ret = -ETIME;
current->in_iowait = 0;
return ret;
}
+/* If this returns > 0, the caller should retry */
+static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ struct io_wait_queue *iowq,
+ ktime_t start_time)
+{
+ if (unlikely(READ_ONCE(ctx->check_cq)))
+ return 1;
+ if (unlikely(!llist_empty(&ctx->work_llist)))
+ return 1;
+ if (unlikely(task_work_pending(current)))
+ return 1;
+ if (unlikely(task_sigpending(current)))
+ return -EINTR;
+ if (unlikely(io_should_wake(iowq)))
+ return 0;
+
+ return __io_cqring_wait_schedule(ctx, iowq, start_time);
+}
+
+struct ext_arg {
+ size_t argsz;
+ struct __kernel_timespec __user *ts;
+ const sigset_t __user *sig;
+ ktime_t min_time;
+};
+
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
*/
-static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
- const sigset_t __user *sig, size_t sigsz,
- struct __kernel_timespec __user *uts)
+static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
+ struct ext_arg *ext_arg)
{
struct io_wait_queue iowq;
struct io_rings *rings = ctx->rings;
+ ktime_t start_time;
int ret;
if (!io_allowed_run_tw(ctx))
@@ -2410,30 +2531,33 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
iowq.wq.private = current;
INIT_LIST_HEAD(&iowq.wq.entry);
iowq.ctx = ctx;
- iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
+ iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail);
+ iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
+ iowq.hit_timeout = 0;
+ iowq.min_timeout = ext_arg->min_time;
iowq.timeout = KTIME_MAX;
+ start_time = io_get_time(ctx);
- if (uts) {
+ if (ext_arg->ts) {
struct timespec64 ts;
- ktime_t dt;
- if (get_timespec64(&ts, uts))
+ if (get_timespec64(&ts, ext_arg->ts))
return -EFAULT;
- dt = timespec64_to_ktime(ts);
- iowq.timeout = ktime_add(dt, ktime_get());
- io_napi_adjust_timeout(ctx, &iowq, dt);
+ iowq.timeout = timespec64_to_ktime(ts);
+ if (!(flags & IORING_ENTER_ABS_TIMER))
+ iowq.timeout = ktime_add(iowq.timeout, start_time);
}
- if (sig) {
+ if (ext_arg->sig) {
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
- ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
- sigsz);
+ ret = set_compat_user_sigmask((const compat_sigset_t __user *)ext_arg->sig,
+ ext_arg->argsz);
else
#endif
- ret = set_user_sigmask(sig, sigsz);
+ ret = set_user_sigmask(ext_arg->sig, ext_arg->argsz);
if (ret)
return ret;
@@ -2443,8 +2567,15 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
trace_io_uring_cqring_wait(ctx, min_events);
do {
- int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail);
unsigned long check_cq;
+ int nr_wait;
+
+ /* if min timeout has been hit, don't reset wait count */
+ if (!iowq.hit_timeout)
+ nr_wait = (int) iowq.cq_tail -
+ READ_ONCE(ctx->rings->cq.tail);
+ else
+ nr_wait = 1;
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
atomic_set(&ctx->cq_wait_nr, nr_wait);
@@ -2454,7 +2585,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
TASK_INTERRUPTIBLE);
}
- ret = io_cqring_wait_schedule(ctx, &iowq);
+ ret = io_cqring_wait_schedule(ctx, &iowq, start_time);
__set_current_state(TASK_RUNNING);
atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
@@ -2463,9 +2594,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
* If we got woken because of task_work being processed, run it
* now rather than let the caller do another wait loop.
*/
- io_run_task_work();
if (!llist_empty(&ctx->work_llist))
io_run_local_work(ctx, nr_wait);
+ io_run_task_work();
/*
* Non-local task_work will be run on exit to userspace, but
@@ -3112,9 +3243,8 @@ static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t a
return 0;
}
-static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
- struct __kernel_timespec __user **ts,
- const sigset_t __user **sig)
+static int io_get_ext_arg(unsigned flags, const void __user *argp,
+ struct ext_arg *ext_arg)
{
struct io_uring_getevents_arg arg;
@@ -3123,8 +3253,8 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz
* is just a pointer to the sigset_t.
*/
if (!(flags & IORING_ENTER_EXT_ARG)) {
- *sig = (const sigset_t __user *) argp;
- *ts = NULL;
+ ext_arg->sig = (const sigset_t __user *) argp;
+ ext_arg->ts = NULL;
return 0;
}
@@ -3132,15 +3262,14 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz
* EXT_ARG is set - ensure we agree on the size of it and copy in our
* timespec and sigset_t pointers if good.
*/
- if (*argsz != sizeof(arg))
+ if (ext_arg->argsz != sizeof(arg))
return -EINVAL;
if (copy_from_user(&arg, argp, sizeof(arg)))
return -EFAULT;
- if (arg.pad)
- return -EINVAL;
- *sig = u64_to_user_ptr(arg.sigmask);
- *argsz = arg.sigmask_sz;
- *ts = u64_to_user_ptr(arg.ts);
+ ext_arg->min_time = arg.min_wait_usec * NSEC_PER_USEC;
+ ext_arg->sig = u64_to_user_ptr(arg.sigmask);
+ ext_arg->argsz = arg.sigmask_sz;
+ ext_arg->ts = u64_to_user_ptr(arg.ts);
return 0;
}
@@ -3154,7 +3283,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
- IORING_ENTER_REGISTERED_RING)))
+ IORING_ENTER_REGISTERED_RING |
+ IORING_ENTER_ABS_TIMER)))
return -EINVAL;
/*
@@ -3245,15 +3375,14 @@ iopoll_locked:
}
mutex_unlock(&ctx->uring_lock);
} else {
- const sigset_t __user *sig;
- struct __kernel_timespec __user *ts;
+ struct ext_arg ext_arg = { .argsz = argsz };
- ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
+ ret2 = io_get_ext_arg(flags, argp, &ext_arg);
if (likely(!ret2)) {
min_complete = min(min_complete,
ctx->cq_entries);
- ret2 = io_cqring_wait(ctx, min_complete, sig,
- argsz, ts);
+ ret2 = io_cqring_wait(ctx, min_complete, flags,
+ &ext_arg);
}
}
@@ -3424,6 +3553,9 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
if (!ctx)
return -ENOMEM;
+ ctx->clockid = CLOCK_MONOTONIC;
+ ctx->clock_offset = 0;
+
if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
!(ctx->flags & IORING_SETUP_IOPOLL) &&
!(ctx->flags & IORING_SETUP_SQPOLL))
@@ -3535,7 +3667,7 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING |
- IORING_FEAT_RECVSEND_BUNDLE;
+ IORING_FEAT_RECVSEND_BUNDLE | IORING_FEAT_MIN_TIMEOUT;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
@@ -3638,6 +3770,11 @@ SYSCALL_DEFINE2(io_uring_setup, u32, entries,
static int __init io_uring_init(void)
{
+ struct kmem_cache_args kmem_args = {
+ .useroffset = offsetof(struct io_kiocb, cmd.data),
+ .usersize = sizeof_field(struct io_kiocb, cmd.data),
+ };
+
#define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
@@ -3722,12 +3859,9 @@ static int __init io_uring_init(void)
* range, and HARDENED_USERCOPY will complain if we haven't
* correctly annotated this range.
*/
- req_cachep = kmem_cache_create_usercopy("io_kiocb",
- sizeof(struct io_kiocb), 0,
- SLAB_HWCACHE_ALIGN | SLAB_PANIC |
- SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU,
- offsetof(struct io_kiocb, cmd.data),
- sizeof_field(struct io_kiocb, cmd.data), NULL);
+ req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU);
io_buf_cachep = KMEM_CACHE(io_buffer,
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index c2acf6180845..9d70b2cf7b1e 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -39,8 +39,12 @@ struct io_wait_queue {
struct wait_queue_entry wq;
struct io_ring_ctx *ctx;
unsigned cq_tail;
+ unsigned cq_min_tail;
unsigned nr_timeouts;
+ int hit_timeout;
+ ktime_t min_timeout;
ktime_t timeout;
+ struct hrtimer t;
#ifdef CONFIG_NET_RX_BUSY_POLL
ktime_t napi_busy_poll_dt;
@@ -90,6 +94,7 @@ int io_uring_alloc_task_context(struct task_struct *task,
int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
int start, int end);
+void io_req_queue_iowq(struct io_kiocb *req);
int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
@@ -437,6 +442,14 @@ static inline bool io_file_can_poll(struct io_kiocb *req)
return false;
}
+static inline ktime_t io_get_time(struct io_ring_ctx *ctx)
+{
+ if (ctx->clockid == CLOCK_MONOTONIC)
+ return ktime_get();
+
+ return ktime_get_with_offset(ctx->clock_offset);
+}
+
enum {
IO_CHECK_CQ_OVERFLOW_BIT,
IO_CHECK_CQ_DROPPED_BIT,
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index bdfa30b38321..d407576ddfb7 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -70,7 +70,7 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
return true;
}
-void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
+void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags)
{
/*
* We can add this buffer back to two lists:
@@ -88,12 +88,12 @@ void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
struct io_ring_ctx *ctx = req->ctx;
spin_lock(&ctx->completion_lock);
- __io_put_kbuf_list(req, &ctx->io_buffers_comp);
+ __io_put_kbuf_list(req, len, &ctx->io_buffers_comp);
spin_unlock(&ctx->completion_lock);
} else {
lockdep_assert_held(&req->ctx->uring_lock);
- __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
+ __io_put_kbuf_list(req, len, &req->ctx->io_buffers_cache);
}
}
@@ -132,12 +132,6 @@ static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
return 1;
}
-static struct io_uring_buf *io_ring_head_to_buf(struct io_uring_buf_ring *br,
- __u16 head, __u16 mask)
-{
- return &br->bufs[head & mask];
-}
-
static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
struct io_buffer_list *bl,
unsigned int issue_flags)
@@ -171,9 +165,8 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
* the transfer completes (or if we get -EAGAIN and must poll of
* retry).
*/
- req->flags &= ~REQ_F_BUFFERS_COMMIT;
+ io_kbuf_commit(req, bl, *len, 1);
req->buf_list = NULL;
- bl->head++;
}
return u64_to_user_ptr(buf->addr);
}
@@ -189,7 +182,7 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
bl = io_buffer_get_list(ctx, req->buf_index);
if (likely(bl)) {
- if (bl->is_buf_ring)
+ if (bl->flags & IOBL_BUF_RING)
ret = io_ring_buffer_select(req, len, bl, issue_flags);
else
ret = io_provided_buffer_select(req, len, bl);
@@ -219,14 +212,25 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
buf = io_ring_head_to_buf(br, head, bl->mask);
if (arg->max_len) {
u32 len = READ_ONCE(buf->len);
- size_t needed;
if (unlikely(!len))
return -ENOBUFS;
- needed = (arg->max_len + len - 1) / len;
- needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
- if (nr_avail > needed)
- nr_avail = needed;
+ /*
+ * Limit incremental buffers to 1 segment. No point trying
+ * to peek ahead and map more than we need, when the buffers
+ * themselves should be large when setup with
+ * IOU_PBUF_RING_INC.
+ */
+ if (bl->flags & IOBL_INC) {
+ nr_avail = 1;
+ } else {
+ size_t needed;
+
+ needed = (arg->max_len + len - 1) / len;
+ needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
+ if (nr_avail > needed)
+ nr_avail = needed;
+ }
}
/*
@@ -251,16 +255,21 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
req->buf_index = buf->bid;
do {
- /* truncate end piece, if needed */
- if (buf->len > arg->max_len)
- buf->len = arg->max_len;
+ u32 len = buf->len;
+
+ /* truncate end piece, if needed, for non partial buffers */
+ if (len > arg->max_len) {
+ len = arg->max_len;
+ if (!(bl->flags & IOBL_INC))
+ buf->len = len;
+ }
iov->iov_base = u64_to_user_ptr(buf->addr);
- iov->iov_len = buf->len;
+ iov->iov_len = len;
iov++;
- arg->out_len += buf->len;
- arg->max_len -= buf->len;
+ arg->out_len += len;
+ arg->max_len -= len;
if (!arg->max_len)
break;
@@ -287,7 +296,7 @@ int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
if (unlikely(!bl))
goto out_unlock;
- if (bl->is_buf_ring) {
+ if (bl->flags & IOBL_BUF_RING) {
ret = io_ring_buffers_peek(req, arg, bl);
/*
* Don't recycle these buffers if we need to go through poll.
@@ -297,8 +306,8 @@ int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
* committed them, they cannot be put back in the queue.
*/
if (ret > 0) {
- req->flags |= REQ_F_BL_NO_RECYCLE;
- req->buf_list->head += ret;
+ req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
+ io_kbuf_commit(req, bl, arg->out_len, ret);
}
} else {
ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
@@ -320,7 +329,7 @@ int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
if (unlikely(!bl))
return -ENOENT;
- if (bl->is_buf_ring) {
+ if (bl->flags & IOBL_BUF_RING) {
ret = io_ring_buffers_peek(req, arg, bl);
if (ret > 0)
req->flags |= REQ_F_BUFFERS_COMMIT;
@@ -340,22 +349,22 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
if (!nbufs)
return 0;
- if (bl->is_buf_ring) {
+ if (bl->flags & IOBL_BUF_RING) {
i = bl->buf_ring->tail - bl->head;
if (bl->buf_nr_pages) {
int j;
- if (!bl->is_mmap) {
+ if (!(bl->flags & IOBL_MMAP)) {
for (j = 0; j < bl->buf_nr_pages; j++)
unpin_user_page(bl->buf_pages[j]);
}
io_pages_unmap(bl->buf_ring, &bl->buf_pages,
- &bl->buf_nr_pages, bl->is_mmap);
- bl->is_mmap = 0;
+ &bl->buf_nr_pages, bl->flags & IOBL_MMAP);
+ bl->flags &= ~IOBL_MMAP;
}
/* make sure it's seen as empty */
INIT_LIST_HEAD(&bl->buf_list);
- bl->is_buf_ring = 0;
+ bl->flags &= ~IOBL_BUF_RING;
return i;
}
@@ -442,7 +451,7 @@ int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
if (bl) {
ret = -EINVAL;
/* can't use provide/remove buffers command on mapped buffers */
- if (!bl->is_buf_ring)
+ if (!(bl->flags & IOBL_BUF_RING))
ret = __io_remove_buffers(ctx, bl, p->nbufs);
}
io_ring_submit_unlock(ctx, issue_flags);
@@ -589,7 +598,7 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
}
}
/* can't add buffers via this command for a mapped buffer ring */
- if (bl->is_buf_ring) {
+ if (bl->flags & IOBL_BUF_RING) {
ret = -EINVAL;
goto err;
}
@@ -641,8 +650,8 @@ static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
bl->buf_pages = pages;
bl->buf_nr_pages = nr_pages;
bl->buf_ring = br;
- bl->is_buf_ring = 1;
- bl->is_mmap = 0;
+ bl->flags |= IOBL_BUF_RING;
+ bl->flags &= ~IOBL_MMAP;
return 0;
error_unpin:
unpin_user_pages(pages, nr_pages);
@@ -665,8 +674,7 @@ static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
return -ENOMEM;
}
- bl->is_buf_ring = 1;
- bl->is_mmap = 1;
+ bl->flags |= (IOBL_BUF_RING | IOBL_MMAP);
return 0;
}
@@ -683,7 +691,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
if (reg.resv[0] || reg.resv[1] || reg.resv[2])
return -EINVAL;
- if (reg.flags & ~IOU_PBUF_RING_MMAP)
+ if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC))
return -EINVAL;
if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
if (!reg.ring_addr)
@@ -705,7 +713,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
bl = io_buffer_get_list(ctx, reg.bgid);
if (bl) {
/* if mapped buffer ring OR classic exists, don't allow */
- if (bl->is_buf_ring || !list_empty(&bl->buf_list))
+ if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
return -EEXIST;
} else {
free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
@@ -721,6 +729,8 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
if (!ret) {
bl->nr_entries = reg.ring_entries;
bl->mask = reg.ring_entries - 1;
+ if (reg.flags & IOU_PBUF_RING_INC)
+ bl->flags |= IOBL_INC;
io_buffer_add_list(ctx, bl, reg.bgid);
return 0;
@@ -747,7 +757,7 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
bl = io_buffer_get_list(ctx, reg.bgid);
if (!bl)
return -ENOENT;
- if (!bl->is_buf_ring)
+ if (!(bl->flags & IOBL_BUF_RING))
return -EINVAL;
xa_erase(&ctx->io_bl_xa, bl->bgid);
@@ -771,7 +781,7 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
bl = io_buffer_get_list(ctx, buf_status.buf_group);
if (!bl)
return -ENOENT;
- if (!bl->is_buf_ring)
+ if (!(bl->flags & IOBL_BUF_RING))
return -EINVAL;
buf_status.head = bl->head;
@@ -802,7 +812,7 @@ struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
bl = xa_load(&ctx->io_bl_xa, bgid);
/* must be a mmap'able buffer ring and have pages */
ret = false;
- if (bl && bl->is_mmap)
+ if (bl && bl->flags & IOBL_MMAP)
ret = atomic_inc_not_zero(&bl->refs);
rcu_read_unlock();
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index b90aca3a57fa..36aadfe5ac00 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -4,6 +4,16 @@
#include <uapi/linux/io_uring.h>
+enum {
+ /* ring mapped provided buffers */
+ IOBL_BUF_RING = 1,
+ /* ring mapped provided buffers, but mmap'ed by application */
+ IOBL_MMAP = 2,
+ /* buffers are consumed incrementally rather than always fully */
+ IOBL_INC = 4,
+
+};
+
struct io_buffer_list {
/*
* If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
@@ -25,12 +35,9 @@ struct io_buffer_list {
__u16 head;
__u16 mask;
- atomic_t refs;
+ __u16 flags;
- /* ring mapped provided buffers */
- __u8 is_buf_ring;
- /* ring mapped provided buffers, but mmap'ed by application */
- __u8 is_mmap;
+ atomic_t refs;
};
struct io_buffer {
@@ -52,8 +59,8 @@ struct buf_sel_arg {
struct iovec *iovs;
size_t out_len;
size_t max_len;
- int nr_iovs;
- int mode;
+ unsigned short nr_iovs;
+ unsigned short mode;
};
void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
@@ -73,7 +80,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
-void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
+void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags);
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
@@ -117,25 +124,55 @@ static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
return false;
}
-static inline void __io_put_kbuf_ring(struct io_kiocb *req, int nr)
+/* Mapped buffer ring, return io_uring_buf from head */
+#define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)]
+
+static inline bool io_kbuf_commit(struct io_kiocb *req,
+ struct io_buffer_list *bl, int len, int nr)
+{
+ if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
+ return true;
+
+ req->flags &= ~REQ_F_BUFFERS_COMMIT;
+
+ if (unlikely(len < 0))
+ return true;
+
+ if (bl->flags & IOBL_INC) {
+ struct io_uring_buf *buf;
+
+ buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
+ if (WARN_ON_ONCE(len > buf->len))
+ len = buf->len;
+ buf->len -= len;
+ if (buf->len) {
+ buf->addr += len;
+ return false;
+ }
+ }
+
+ bl->head += nr;
+ return true;
+}
+
+static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
{
struct io_buffer_list *bl = req->buf_list;
+ bool ret = true;
if (bl) {
- if (req->flags & REQ_F_BUFFERS_COMMIT) {
- bl->head += nr;
- req->flags &= ~REQ_F_BUFFERS_COMMIT;
- }
+ ret = io_kbuf_commit(req, bl, len, nr);
req->buf_index = bl->bgid;
}
req->flags &= ~REQ_F_BUFFER_RING;
+ return ret;
}
-static inline void __io_put_kbuf_list(struct io_kiocb *req,
+static inline void __io_put_kbuf_list(struct io_kiocb *req, int len,
struct list_head *list)
{
if (req->flags & REQ_F_BUFFER_RING) {
- __io_put_kbuf_ring(req, 1);
+ __io_put_kbuf_ring(req, len, 1);
} else {
req->buf_index = req->kbuf->bgid;
list_add(&req->kbuf->list, list);
@@ -150,11 +187,12 @@ static inline void io_kbuf_drop(struct io_kiocb *req)
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
return;
- __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
+ /* len == 0 is fine here, non-ring will always drop all of it */
+ __io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp);
}
-static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int nbufs,
- unsigned issue_flags)
+static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len,
+ int nbufs, unsigned issue_flags)
{
unsigned int ret;
@@ -162,22 +200,24 @@ static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int nbufs,
return 0;
ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
- if (req->flags & REQ_F_BUFFER_RING)
- __io_put_kbuf_ring(req, nbufs);
- else
- __io_put_kbuf(req, issue_flags);
+ if (req->flags & REQ_F_BUFFER_RING) {
+ if (!__io_put_kbuf_ring(req, len, nbufs))
+ ret |= IORING_CQE_F_BUF_MORE;
+ } else {
+ __io_put_kbuf(req, len, issue_flags);
+ }
return ret;
}
-static inline unsigned int io_put_kbuf(struct io_kiocb *req,
+static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
unsigned issue_flags)
{
- return __io_put_kbufs(req, 1, issue_flags);
+ return __io_put_kbufs(req, len, 1, issue_flags);
}
-static inline unsigned int io_put_kbufs(struct io_kiocb *req, int nbufs,
- unsigned issue_flags)
+static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
+ int nbufs, unsigned issue_flags)
{
- return __io_put_kbufs(req, nbufs, issue_flags);
+ return __io_put_kbufs(req, len, nbufs, issue_flags);
}
#endif
diff --git a/io_uring/napi.c b/io_uring/napi.c
index 1de1d4d62925..d0cf694d0172 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -270,27 +270,6 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
}
/*
- * __io_napi_adjust_timeout() - adjust busy loop timeout
- * @ctx: pointer to io-uring context structure
- * @iowq: pointer to io wait queue
- * @ts: pointer to timespec or NULL
- *
- * Adjust the busy loop timeout according to timespec and busy poll timeout.
- * If the specified NAPI timeout is bigger than the wait timeout, then adjust
- * the NAPI timeout accordingly.
- */
-void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
- ktime_t to_wait)
-{
- ktime_t poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
-
- if (to_wait)
- poll_dt = min(poll_dt, to_wait);
-
- iowq->napi_busy_poll_dt = poll_dt;
-}
-
-/*
* __io_napi_busy_loop() - execute busy poll loop
* @ctx: pointer to io-uring context structure
* @iowq: pointer to io wait queue
@@ -299,10 +278,18 @@ void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iow
*/
void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
{
- iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
+ if (ctx->flags & IORING_SETUP_SQPOLL)
+ return;
- if (!(ctx->flags & IORING_SETUP_SQPOLL))
- io_napi_blocking_busy_loop(ctx, iowq);
+ iowq->napi_busy_poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
+ if (iowq->timeout != KTIME_MAX) {
+ ktime_t dt = ktime_sub(iowq->timeout, io_get_time(ctx));
+
+ iowq->napi_busy_poll_dt = min_t(u64, iowq->napi_busy_poll_dt, dt);
+ }
+
+ iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
+ io_napi_blocking_busy_loop(ctx, iowq);
}
/*
diff --git a/io_uring/napi.h b/io_uring/napi.h
index 27b88c3eb428..fd275ef0456d 100644
--- a/io_uring/napi.h
+++ b/io_uring/napi.h
@@ -17,8 +17,6 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock);
-void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
- struct io_wait_queue *iowq, ktime_t to_wait);
void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
@@ -27,15 +25,6 @@ static inline bool io_napi(struct io_ring_ctx *ctx)
return !list_empty(&ctx->napi_list);
}
-static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
- struct io_wait_queue *iowq,
- ktime_t to_wait)
-{
- if (!io_napi(ctx))
- return;
- __io_napi_adjust_timeout(ctx, iowq, to_wait);
-}
-
static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq)
{
@@ -86,11 +75,6 @@ static inline bool io_napi(struct io_ring_ctx *ctx)
static inline void io_napi_add(struct io_kiocb *req)
{
}
-static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
- struct io_wait_queue *iowq,
- ktime_t to_wait)
-{
-}
static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq)
{
diff --git a/io_uring/net.c b/io_uring/net.c
index d08abcca89cc..f10f5a22d66a 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -434,8 +434,6 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sr->buf_group = req->buf_index;
req->buf_list = NULL;
}
- if (req->flags & REQ_F_BUFFER_SELECT && sr->len)
- return -EINVAL;
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
@@ -499,11 +497,11 @@ static inline bool io_send_finish(struct io_kiocb *req, int *ret,
unsigned int cflags;
if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
- cflags = io_put_kbuf(req, issue_flags);
+ cflags = io_put_kbuf(req, *ret, issue_flags);
goto finish;
}
- cflags = io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret), issue_flags);
+ cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags);
if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
goto finish;
@@ -599,7 +597,7 @@ retry_bundle:
if (io_do_buffer_select(req)) {
struct buf_sel_arg arg = {
.iovs = &kmsg->fast_iov,
- .max_len = INT_MAX,
+ .max_len = min_not_zero(sr->len, INT_MAX),
.nr_iovs = 1,
};
@@ -618,14 +616,23 @@ retry_bundle:
if (unlikely(ret < 0))
return ret;
- sr->len = arg.out_len;
- iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE, arg.iovs, ret,
- arg.out_len);
if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
kmsg->free_iov_nr = ret;
kmsg->free_iov = arg.iovs;
req->flags |= REQ_F_NEED_CLEANUP;
}
+ sr->len = arg.out_len;
+
+ if (ret == 1) {
+ sr->buf = arg.iovs[0].iov_base;
+ ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
+ &kmsg->msg.msg_iter);
+ if (unlikely(ret))
+ return ret;
+ } else {
+ iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE,
+ arg.iovs, ret, arg.out_len);
+ }
}
/*
@@ -835,13 +842,13 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
if (sr->flags & IORING_RECVSEND_BUNDLE) {
- cflags |= io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret),
+ cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret),
issue_flags);
/* bundle with no more immediate buffers, we're done */
if (req->flags & REQ_F_BL_EMPTY)
goto finish;
} else {
- cflags |= io_put_kbuf(req, issue_flags);
+ cflags |= io_put_kbuf(req, *ret, issue_flags);
}
/*
diff --git a/io_uring/register.c b/io_uring/register.c
index e3c20be5a198..eca26d4884d9 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -335,6 +335,31 @@ err:
return ret;
}
+static int io_register_clock(struct io_ring_ctx *ctx,
+ struct io_uring_clock_register __user *arg)
+{
+ struct io_uring_clock_register reg;
+
+ if (copy_from_user(&reg, arg, sizeof(reg)))
+ return -EFAULT;
+ if (memchr_inv(&reg.__resv, 0, sizeof(reg.__resv)))
+ return -EINVAL;
+
+ switch (reg.clockid) {
+ case CLOCK_MONOTONIC:
+ ctx->clock_offset = 0;
+ break;
+ case CLOCK_BOOTTIME:
+ ctx->clock_offset = TK_OFFS_BOOT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->clockid = reg.clockid;
+ return 0;
+}
+
static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
void __user *arg, unsigned nr_args)
__releases(ctx->uring_lock)
@@ -511,6 +536,18 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break;
ret = io_unregister_napi(ctx, arg);
break;
+ case IORING_REGISTER_CLOCK:
+ ret = -EINVAL;
+ if (!arg || nr_args)
+ break;
+ ret = io_register_clock(ctx, arg);
+ break;
+ case IORING_REGISTER_CLONE_BUFFERS:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_register_clone_buffers(ctx, arg);
+ break;
default:
ret = -EINVAL;
break;
@@ -519,21 +556,16 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
return ret;
}
-SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
- void __user *, arg, unsigned int, nr_args)
+/*
+ * Given an 'fd' value, return the ctx associated with if. If 'registered' is
+ * true, then the registered index is used. Otherwise, the normal fd table.
+ * Caller must call fput() on the returned file, unless it's an ERR_PTR.
+ */
+struct file *io_uring_register_get_file(unsigned int fd, bool registered)
{
- struct io_ring_ctx *ctx;
- long ret = -EBADF;
struct file *file;
- bool use_registered_ring;
- use_registered_ring = !!(opcode & IORING_REGISTER_USE_REGISTERED_RING);
- opcode &= ~IORING_REGISTER_USE_REGISTERED_RING;
-
- if (opcode >= IORING_REGISTER_LAST)
- return -EINVAL;
-
- if (use_registered_ring) {
+ if (registered) {
/*
* Ring fd has been registered via IORING_REGISTER_RING_FDS, we
* need only dereference our task private array to find it.
@@ -541,27 +573,44 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
struct io_uring_task *tctx = current->io_uring;
if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
file = tctx->registered_rings[fd];
- if (unlikely(!file))
- return -EBADF;
} else {
file = fget(fd);
- if (unlikely(!file))
- return -EBADF;
- ret = -EOPNOTSUPP;
- if (!io_is_uring_fops(file))
- goto out_fput;
}
+ if (unlikely(!file))
+ return ERR_PTR(-EBADF);
+ if (io_is_uring_fops(file))
+ return file;
+ fput(file);
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
+ void __user *, arg, unsigned int, nr_args)
+{
+ struct io_ring_ctx *ctx;
+ long ret = -EBADF;
+ struct file *file;
+ bool use_registered_ring;
+
+ use_registered_ring = !!(opcode & IORING_REGISTER_USE_REGISTERED_RING);
+ opcode &= ~IORING_REGISTER_USE_REGISTERED_RING;
+
+ if (opcode >= IORING_REGISTER_LAST)
+ return -EINVAL;
+
+ file = io_uring_register_get_file(fd, use_registered_ring);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
ctx = file->private_data;
mutex_lock(&ctx->uring_lock);
ret = __io_uring_register(ctx, opcode, arg, nr_args);
mutex_unlock(&ctx->uring_lock);
trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
-out_fput:
if (!use_registered_ring)
fput(file);
return ret;
diff --git a/io_uring/register.h b/io_uring/register.h
index c9da997d503c..a5f39d5ef9e0 100644
--- a/io_uring/register.h
+++ b/io_uring/register.h
@@ -4,5 +4,6 @@
int io_eventfd_unregister(struct io_ring_ctx *ctx);
int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id);
+struct file *io_uring_register_get_file(unsigned int fd, bool registered);
#endif
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 453867add7ca..33a3d156a85b 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -17,6 +17,7 @@
#include "openclose.h"
#include "rsrc.h"
#include "memmap.h"
+#include "register.h"
struct io_rsrc_update {
struct file *file;
@@ -37,7 +38,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
static const struct io_mapped_ubuf dummy_ubuf = {
/* set invalid range, so io_import_fixed() fails meeting it */
.ubuf = -1UL,
- .ubuf_end = 0,
+ .len = UINT_MAX,
};
int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
@@ -114,14 +115,16 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
struct io_mapped_ubuf *imu = *slot;
unsigned int i;
+ *slot = NULL;
if (imu != &dummy_ubuf) {
+ if (!refcount_dec_and_test(&imu->refs))
+ return;
for (i = 0; i < imu->nr_bvecs; i++)
unpin_user_page(imu->bvec[i].bv_page);
if (imu->acct_pages)
io_unaccount_mem(ctx, imu->acct_pages);
kvfree(imu);
}
- *slot = NULL;
}
static void io_rsrc_put_work(struct io_rsrc_node *node)
@@ -855,6 +858,98 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
return ret;
}
+static bool io_do_coalesce_buffer(struct page ***pages, int *nr_pages,
+ struct io_imu_folio_data *data, int nr_folios)
+{
+ struct page **page_array = *pages, **new_array = NULL;
+ int nr_pages_left = *nr_pages, i, j;
+
+ /* Store head pages only*/
+ new_array = kvmalloc_array(nr_folios, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!new_array)
+ return false;
+
+ new_array[0] = compound_head(page_array[0]);
+ /*
+ * The pages are bound to the folio, it doesn't
+ * actually unpin them but drops all but one reference,
+ * which is usually put down by io_buffer_unmap().
+ * Note, needs a better helper.
+ */
+ if (data->nr_pages_head > 1)
+ unpin_user_pages(&page_array[1], data->nr_pages_head - 1);
+
+ j = data->nr_pages_head;
+ nr_pages_left -= data->nr_pages_head;
+ for (i = 1; i < nr_folios; i++) {
+ unsigned int nr_unpin;
+
+ new_array[i] = page_array[j];
+ nr_unpin = min_t(unsigned int, nr_pages_left - 1,
+ data->nr_pages_mid - 1);
+ if (nr_unpin)
+ unpin_user_pages(&page_array[j+1], nr_unpin);
+ j += data->nr_pages_mid;
+ nr_pages_left -= data->nr_pages_mid;
+ }
+ kvfree(page_array);
+ *pages = new_array;
+ *nr_pages = nr_folios;
+ return true;
+}
+
+static bool io_try_coalesce_buffer(struct page ***pages, int *nr_pages,
+ struct io_imu_folio_data *data)
+{
+ struct page **page_array = *pages;
+ struct folio *folio = page_folio(page_array[0]);
+ unsigned int count = 1, nr_folios = 1;
+ int i;
+
+ if (*nr_pages <= 1)
+ return false;
+
+ data->nr_pages_mid = folio_nr_pages(folio);
+ if (data->nr_pages_mid == 1)
+ return false;
+
+ data->folio_shift = folio_shift(folio);
+ /*
+ * Check if pages are contiguous inside a folio, and all folios have
+ * the same page count except for the head and tail.
+ */
+ for (i = 1; i < *nr_pages; i++) {
+ if (page_folio(page_array[i]) == folio &&
+ page_array[i] == page_array[i-1] + 1) {
+ count++;
+ continue;
+ }
+
+ if (nr_folios == 1) {
+ if (folio_page_idx(folio, page_array[i-1]) !=
+ data->nr_pages_mid - 1)
+ return false;
+
+ data->nr_pages_head = count;
+ } else if (count != data->nr_pages_mid) {
+ return false;
+ }
+
+ folio = page_folio(page_array[i]);
+ if (folio_size(folio) != (1UL << data->folio_shift) ||
+ folio_page_idx(folio, page_array[i]) != 0)
+ return false;
+
+ count = 1;
+ nr_folios++;
+ }
+ if (nr_folios == 1)
+ data->nr_pages_head = count;
+
+ return io_do_coalesce_buffer(pages, nr_pages, data, nr_folios);
+}
+
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
struct io_mapped_ubuf **pimu,
struct page **last_hpage)
@@ -864,7 +959,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
unsigned long off;
size_t size;
int ret, nr_pages, i;
- struct folio *folio = NULL;
+ struct io_imu_folio_data data;
+ bool coalesced;
*pimu = (struct io_mapped_ubuf *)&dummy_ubuf;
if (!iov->iov_base)
@@ -879,31 +975,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
goto done;
}
- /* If it's a huge page, try to coalesce them into a single bvec entry */
- if (nr_pages > 1) {
- folio = page_folio(pages[0]);
- for (i = 1; i < nr_pages; i++) {
- /*
- * Pages must be consecutive and on the same folio for
- * this to work
- */
- if (page_folio(pages[i]) != folio ||
- pages[i] != pages[i - 1] + 1) {
- folio = NULL;
- break;
- }
- }
- if (folio) {
- /*
- * The pages are bound to the folio, it doesn't
- * actually unpin them but drops all but one reference,
- * which is usually put down by io_buffer_unmap().
- * Note, needs a better helper.
- */
- unpin_user_pages(&pages[1], nr_pages - 1);
- nr_pages = 1;
- }
- }
+ /* If it's huge page(s), try to coalesce them into fewer bvec entries */
+ coalesced = io_try_coalesce_buffer(&pages, &nr_pages, &data);
imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
if (!imu)
@@ -915,23 +988,23 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
goto done;
}
- off = (unsigned long) iov->iov_base & ~PAGE_MASK;
size = iov->iov_len;
/* store original address for later verification */
imu->ubuf = (unsigned long) iov->iov_base;
- imu->ubuf_end = imu->ubuf + iov->iov_len;
+ imu->len = iov->iov_len;
imu->nr_bvecs = nr_pages;
+ imu->folio_shift = PAGE_SHIFT;
+ if (coalesced)
+ imu->folio_shift = data.folio_shift;
+ refcount_set(&imu->refs, 1);
+ off = (unsigned long) iov->iov_base & ((1UL << imu->folio_shift) - 1);
*pimu = imu;
ret = 0;
- if (folio) {
- bvec_set_page(&imu->bvec[0], pages[0], size, off);
- goto done;
- }
for (i = 0; i < nr_pages; i++) {
size_t vec_len;
- vec_len = min_t(size_t, size, PAGE_SIZE - off);
+ vec_len = min_t(size_t, size, (1UL << imu->folio_shift) - off);
bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
off = 0;
size -= vec_len;
@@ -1024,7 +1097,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
return -EFAULT;
/* not inside the mapped region */
- if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
+ if (unlikely(buf_addr < imu->ubuf || buf_end > (imu->ubuf + imu->len)))
return -EFAULT;
/*
@@ -1042,23 +1115,18 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
* we know that:
*
* 1) it's a BVEC iter, we set it up
- * 2) all bvecs are PAGE_SIZE in size, except potentially the
+ * 2) all bvecs are the same in size, except potentially the
* first and last bvec
*
* So just find our index, and adjust the iterator afterwards.
* If the offset is within the first bvec (or the whole first
* bvec, just use iov_iter_advance(). This makes it easier
* since we can just skip the first segment, which may not
- * be PAGE_SIZE aligned.
+ * be folio_size aligned.
*/
const struct bio_vec *bvec = imu->bvec;
if (offset < bvec->bv_len) {
- /*
- * Note, huge pages buffers consists of one large
- * bvec entry and should always go this way. The other
- * branch doesn't expect non PAGE_SIZE'd chunks.
- */
iter->bvec = bvec;
iter->count -= offset;
iter->iov_offset = offset;
@@ -1067,14 +1135,104 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
/* skip first vec */
offset -= bvec->bv_len;
- seg_skip = 1 + (offset >> PAGE_SHIFT);
+ seg_skip = 1 + (offset >> imu->folio_shift);
iter->bvec = bvec + seg_skip;
iter->nr_segs -= seg_skip;
iter->count -= bvec->bv_len + offset;
- iter->iov_offset = offset & ~PAGE_MASK;
+ iter->iov_offset = offset & ((1UL << imu->folio_shift) - 1);
}
}
return 0;
}
+
+static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx)
+{
+ struct io_mapped_ubuf **user_bufs;
+ struct io_rsrc_data *data;
+ int i, ret, nbufs;
+
+ /*
+ * Drop our own lock here. We'll setup the data we need and reference
+ * the source buffers, then re-grab, check, and assign at the end.
+ */
+ mutex_unlock(&ctx->uring_lock);
+
+ mutex_lock(&src_ctx->uring_lock);
+ ret = -ENXIO;
+ nbufs = src_ctx->nr_user_bufs;
+ if (!nbufs)
+ goto out_unlock;
+ ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, NULL, nbufs, &data);
+ if (ret)
+ goto out_unlock;
+
+ ret = -ENOMEM;
+ user_bufs = kcalloc(nbufs, sizeof(*ctx->user_bufs), GFP_KERNEL);
+ if (!user_bufs)
+ goto out_free_data;
+
+ for (i = 0; i < nbufs; i++) {
+ struct io_mapped_ubuf *src = src_ctx->user_bufs[i];
+
+ refcount_inc(&src->refs);
+ user_bufs[i] = src;
+ }
+
+ /* Have a ref on the bufs now, drop src lock and re-grab our own lock */
+ mutex_unlock(&src_ctx->uring_lock);
+ mutex_lock(&ctx->uring_lock);
+ if (!ctx->user_bufs) {
+ ctx->user_bufs = user_bufs;
+ ctx->buf_data = data;
+ ctx->nr_user_bufs = nbufs;
+ return 0;
+ }
+
+ /* someone raced setting up buffers, dump ours */
+ for (i = 0; i < nbufs; i++)
+ io_buffer_unmap(ctx, &user_bufs[i]);
+ io_rsrc_data_free(data);
+ kfree(user_bufs);
+ return -EBUSY;
+out_free_data:
+ io_rsrc_data_free(data);
+out_unlock:
+ mutex_unlock(&src_ctx->uring_lock);
+ mutex_lock(&ctx->uring_lock);
+ return ret;
+}
+
+/*
+ * Copy the registered buffers from the source ring whose file descriptor
+ * is given in the src_fd to the current ring. This is identical to registering
+ * the buffers with ctx, except faster as mappings already exist.
+ *
+ * Since the memory is already accounted once, don't account it again.
+ */
+int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
+{
+ struct io_uring_clone_buffers buf;
+ bool registered_src;
+ struct file *file;
+ int ret;
+
+ if (ctx->user_bufs || ctx->nr_user_bufs)
+ return -EBUSY;
+ if (copy_from_user(&buf, arg, sizeof(buf)))
+ return -EFAULT;
+ if (buf.flags & ~IORING_REGISTER_SRC_REGISTERED)
+ return -EINVAL;
+ if (memchr_inv(buf.pad, 0, sizeof(buf.pad)))
+ return -EINVAL;
+
+ registered_src = (buf.flags & IORING_REGISTER_SRC_REGISTERED) != 0;
+ file = io_uring_register_get_file(buf.src_fd, registered_src);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ ret = io_clone_buffers(ctx, file->private_data);
+ if (!registered_src)
+ fput(file);
+ return ret;
+}
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index c032ca3436ca..8ed588036210 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -22,8 +22,6 @@ struct io_rsrc_put {
};
};
-typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
-
struct io_rsrc_data {
struct io_ring_ctx *ctx;
@@ -44,12 +42,22 @@ struct io_rsrc_node {
struct io_mapped_ubuf {
u64 ubuf;
- u64 ubuf_end;
+ unsigned int len;
unsigned int nr_bvecs;
+ unsigned int folio_shift;
+ refcount_t refs;
unsigned long acct_pages;
struct bio_vec bvec[] __counted_by(nr_bvecs);
};
+struct io_imu_folio_data {
+ /* Head folio can be partially included in the fixed buf */
+ unsigned int nr_pages_head;
+ /* For non-head/tail folios, has to be fully included */
+ unsigned int nr_pages_mid;
+ unsigned int folio_shift;
+};
+
void io_rsrc_node_ref_zero(struct io_rsrc_node *node);
void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *ref_node);
struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
@@ -59,6 +67,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
struct io_mapped_ubuf *imu,
u64 buf_addr, size_t len);
+int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg);
void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
diff --git a/io_uring/rw.c b/io_uring/rw.c
index c004d21e2f12..f023ff49c688 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -467,8 +467,7 @@ static void io_req_io_end(struct io_kiocb *req)
static bool __io_complete_rw_common(struct io_kiocb *req, long res)
{
if (unlikely(res != req->cqe.res)) {
- if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
- io_rw_should_reissue(req)) {
+ if (res == -EAGAIN && io_rw_should_reissue(req)) {
/*
* Reissue will start accounting again, finish the
* current cycle.
@@ -511,7 +510,7 @@ void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
- req->cqe.flags |= io_put_kbuf(req, 0);
+ req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0);
io_req_rw_cleanup(req, 0);
io_req_task_complete(req, ts);
@@ -593,7 +592,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
*/
io_req_io_end(req);
io_req_set_res(req, final_ret,
- io_put_kbuf(req, issue_flags));
+ io_put_kbuf(req, ret, issue_flags));
io_req_rw_cleanup(req, issue_flags);
return IOU_OK;
}
@@ -855,6 +854,14 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
ret = io_iter_do_read(rw, &io->iter);
+ /*
+ * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT
+ * issue, even though they should be returning -EAGAIN. To be safe,
+ * retry from blocking context for either.
+ */
+ if (ret == -EOPNOTSUPP && force_nonblock)
+ ret = -EAGAIN;
+
if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
req->flags &= ~REQ_F_REISSUE;
/* If we can poll, just do that. */
@@ -975,7 +982,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
* Put our buffer and post a CQE. If we fail to post a CQE, then
* jump to the termination path. This request is then done.
*/
- cflags = io_put_kbuf(req, issue_flags);
+ cflags = io_put_kbuf(req, ret, issue_flags);
rw->len = 0; /* similarly to above, reset len to 0 */
if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
@@ -1167,7 +1174,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (!smp_load_acquire(&req->iopoll_completed))
break;
nr_events++;
- req->cqe.flags = io_put_kbuf(req, 0);
+ req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0);
if (req->opcode != IORING_OP_URING_CMD)
io_req_rw_cleanup(req, 0);
}
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index 3b50dc9586d1..a26593979887 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/audit.h>
#include <linux/security.h>
+#include <linux/cpuset.h>
#include <linux/io_uring.h>
#include <uapi/linux/io_uring.h>
@@ -108,14 +109,14 @@ static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
struct fd f;
f = fdget(p->wq_fd);
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-ENXIO);
- if (!io_is_uring_fops(f.file)) {
+ if (!io_is_uring_fops(fd_file(f))) {
fdput(f);
return ERR_PTR(-EINVAL);
}
- ctx_attach = f.file->private_data;
+ ctx_attach = fd_file(f)->private_data;
sqd = ctx_attach->sq_data;
if (!sqd) {
fdput(f);
@@ -176,7 +177,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
- if (!wq_list_empty(&ctx->iopoll_list) || to_submit) {
+ if (to_submit || !wq_list_empty(&ctx->iopoll_list)) {
const struct cred *creds = NULL;
if (ctx->sq_creds != current_cred())
@@ -195,9 +196,6 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
ret = io_submit_sqes(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
- if (io_napi(ctx))
- ret += io_napi_sqpoll_busy_poll(ctx);
-
if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
wake_up(&ctx->sqo_sq_wait);
if (creds)
@@ -322,6 +320,10 @@ static int io_sq_thread(void *data)
if (io_sq_tw(&retry_list, IORING_TW_CAP_ENTRIES_VALUE))
sqt_spin = true;
+ list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+ if (io_napi(ctx))
+ io_napi_sqpoll_busy_poll(ctx);
+
if (sqt_spin || !time_after(jiffies, timeout)) {
if (sqt_spin) {
io_sq_update_worktime(sqd, &start);
@@ -418,9 +420,9 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
struct fd f;
f = fdget(p->wq_fd);
- if (!f.file)
+ if (!fd_file(f))
return -ENXIO;
- if (!io_is_uring_fops(f.file)) {
+ if (!io_is_uring_fops(fd_file(f))) {
fdput(f);
return -EINVAL;
}
@@ -460,11 +462,22 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
return 0;
if (p->flags & IORING_SETUP_SQ_AFF) {
+ cpumask_var_t allowed_mask;
int cpu = p->sq_thread_cpu;
ret = -EINVAL;
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
goto err_sqpoll;
+ ret = -ENOMEM;
+ if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL))
+ goto err_sqpoll;
+ ret = -EINVAL;
+ cpuset_cpus_allowed(current, allowed_mask);
+ if (!cpumask_test_cpu(cpu, allowed_mask)) {
+ free_cpumask_var(allowed_mask);
+ goto err_sqpoll;
+ }
+ free_cpumask_var(allowed_mask);
sqd->sq_cpu = cpu;
} else {
sqd->sq_cpu = -1;
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index 8391c7c7c1ec..39c3c816ec78 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -277,6 +277,13 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
}
EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
+void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+
+ io_req_queue_iowq(req);
+}
+
static inline int io_uring_cmd_getsockopt(struct socket *sock,
struct io_uring_cmd *cmd,
unsigned int issue_flags)
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index a7cbd69efbef..34fa0bd8bb11 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1085,20 +1085,20 @@ static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
f = fdget(mqdes);
- if (unlikely(!f.file)) {
+ if (unlikely(!fd_file(f))) {
ret = -EBADF;
goto out;
}
- inode = file_inode(f.file);
- if (unlikely(f.file->f_op != &mqueue_file_operations)) {
+ inode = file_inode(fd_file(f));
+ if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) {
ret = -EBADF;
goto out_fput;
}
info = MQUEUE_I(inode);
- audit_file(f.file);
+ audit_file(fd_file(f));
- if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
+ if (unlikely(!(fd_file(f)->f_mode & FMODE_WRITE))) {
ret = -EBADF;
goto out_fput;
}
@@ -1138,7 +1138,7 @@ static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
}
if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
- if (f.file->f_flags & O_NONBLOCK) {
+ if (fd_file(f)->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
} else {
wait.task = current;
@@ -1199,20 +1199,20 @@ static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
audit_mq_sendrecv(mqdes, msg_len, 0, ts);
f = fdget(mqdes);
- if (unlikely(!f.file)) {
+ if (unlikely(!fd_file(f))) {
ret = -EBADF;
goto out;
}
- inode = file_inode(f.file);
- if (unlikely(f.file->f_op != &mqueue_file_operations)) {
+ inode = file_inode(fd_file(f));
+ if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) {
ret = -EBADF;
goto out_fput;
}
info = MQUEUE_I(inode);
- audit_file(f.file);
+ audit_file(fd_file(f));
- if (unlikely(!(f.file->f_mode & FMODE_READ))) {
+ if (unlikely(!(fd_file(f)->f_mode & FMODE_READ))) {
ret = -EBADF;
goto out_fput;
}
@@ -1242,7 +1242,7 @@ static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
}
if (info->attr.mq_curmsgs == 0) {
- if (f.file->f_flags & O_NONBLOCK) {
+ if (fd_file(f)->f_flags & O_NONBLOCK) {
spin_unlock(&info->lock);
ret = -EAGAIN;
} else {
@@ -1356,11 +1356,11 @@ static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
/* and attach it to the socket */
retry:
f = fdget(notification->sigev_signo);
- if (!f.file) {
+ if (!fd_file(f)) {
ret = -EBADF;
goto out;
}
- sock = netlink_getsockbyfilp(f.file);
+ sock = netlink_getsockbyfilp(fd_file(f));
fdput(f);
if (IS_ERR(sock)) {
ret = PTR_ERR(sock);
@@ -1379,13 +1379,13 @@ retry:
}
f = fdget(mqdes);
- if (!f.file) {
+ if (!fd_file(f)) {
ret = -EBADF;
goto out;
}
- inode = file_inode(f.file);
- if (unlikely(f.file->f_op != &mqueue_file_operations)) {
+ inode = file_inode(fd_file(f));
+ if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) {
ret = -EBADF;
goto out_fput;
}
@@ -1460,31 +1460,31 @@ static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
return -EINVAL;
f = fdget(mqdes);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (unlikely(f.file->f_op != &mqueue_file_operations)) {
+ if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) {
fdput(f);
return -EBADF;
}
- inode = file_inode(f.file);
+ inode = file_inode(fd_file(f));
info = MQUEUE_I(inode);
spin_lock(&info->lock);
if (old) {
*old = info->attr;
- old->mq_flags = f.file->f_flags & O_NONBLOCK;
+ old->mq_flags = fd_file(f)->f_flags & O_NONBLOCK;
}
if (new) {
audit_mq_getsetattr(mqdes, new);
- spin_lock(&f.file->f_lock);
+ spin_lock(&fd_file(f)->f_lock);
if (new->mq_flags & O_NONBLOCK)
- f.file->f_flags |= O_NONBLOCK;
+ fd_file(f)->f_flags |= O_NONBLOCK;
else
- f.file->f_flags &= ~O_NONBLOCK;
- spin_unlock(&f.file->f_lock);
+ fd_file(f)->f_flags &= ~O_NONBLOCK;
+ spin_unlock(&fd_file(f)->f_lock);
inode_set_atime_to_ts(inode, inode_set_ctime_current(inode));
}
diff --git a/ipc/shm.c b/ipc/shm.c
index 3e3071252dac..99564c870084 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1778,8 +1778,8 @@ long ksys_shmdt(char __user *shmaddr)
*/
file = vma->vm_file;
size = i_size_read(file_inode(vma->vm_file));
- do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
- NULL, false);
+ do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start,
+ vma->vm_end, NULL, false);
/*
* We discovered the size of the shm segment, so
* break out of here and fall through to the next
@@ -1803,8 +1803,8 @@ long ksys_shmdt(char __user *shmaddr)
if ((vma->vm_ops == &shm_vm_ops) &&
((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
(vma->vm_file == file)) {
- do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
- NULL, false);
+ do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start,
+ vma->vm_end, NULL, false);
}
vma = vma_next(&vmi);
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index c2f1fd95a821..fe782cd77388 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -133,4 +133,29 @@ config SCHED_CORE
which is the likely usage by Linux distributions, there should
be no measurable impact on performance.
-
+config SCHED_CLASS_EXT
+ bool "Extensible Scheduling Class"
+ depends on BPF_SYSCALL && BPF_JIT && DEBUG_INFO_BTF
+ select STACKTRACE if STACKTRACE_SUPPORT
+ help
+ This option enables a new scheduler class sched_ext (SCX), which
+ allows scheduling policies to be implemented as BPF programs to
+ achieve the following:
+
+ - Ease of experimentation and exploration: Enabling rapid
+ iteration of new scheduling policies.
+ - Customization: Building application-specific schedulers which
+ implement policies that are not applicable to general-purpose
+ schedulers.
+ - Rapid scheduler deployments: Non-disruptive swap outs of
+ scheduling policies in production environments.
+
+ sched_ext leverages BPF struct_ops feature to define a structure
+ which exports function callbacks and flags to BPF programs that
+ wish to implement scheduling policies. The struct_ops structure
+ exported by sched_ext is struct sched_ext_ops, and is conceptually
+ similar to struct sched_class.
+
+ For more information:
+ Documentation/scheduler/sched-ext.rst
+ https://github.com/sched-ext/scx
diff --git a/kernel/Makefile b/kernel/Makefile
index 3c13240dfc9f..87866b037fbe 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -116,7 +116,6 @@ obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o
obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call_inline.o
obj-$(CONFIG_CFI_CLANG) += cfi.o
-obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_PERF_EVENTS) += events/
diff --git a/kernel/audit.c b/kernel/audit.c
index e7a62ebbf4d1..1edaa4846a47 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1612,7 +1612,7 @@ static void audit_log_multicast(int group, const char *op, int err)
cred = current_cred();
tty = audit_get_tty();
audit_log_format(ab, "pid=%u uid=%u auid=%u tty=%s ses=%u",
- task_pid_nr(current),
+ task_tgid_nr(current),
from_kuid(&init_user_ns, cred->uid),
from_kuid(&init_user_ns, audit_get_loginuid(current)),
tty ? tty_name(tty) : "(none)",
@@ -1706,7 +1706,7 @@ static int __init audit_init(void)
audit_cmd_mutex.owner = NULL;
pr_info("initializing netlink subsys (%s)\n",
- audit_default ? "enabled" : "disabled");
+ str_enabled_disabled(audit_default));
register_pernet_subsys(&audit_net_ops);
audit_initialized = AUDIT_INITIALIZED;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index d6ef4f4f9cba..470041c49a44 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1344,7 +1344,7 @@ int audit_filter(int msgtype, unsigned int listtype)
switch (f->type) {
case AUDIT_PID:
- pid = task_pid_nr(current);
+ pid = task_tgid_nr(current);
result = audit_comparator(pid, f->op, f->val);
break;
case AUDIT_UID:
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 6f0d6fb6523f..cd57053b4a69 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -2933,7 +2933,7 @@ void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries,
audit_log_format(ab, "table=%s family=%u entries=%u op=%s",
name, af, nentries, audit_nfcfgs[op].s);
- audit_log_format(ab, " pid=%u", task_pid_nr(current));
+ audit_log_format(ab, " pid=%u", task_tgid_nr(current));
audit_log_task_context(ab); /* subj= */
audit_log_format(ab, " comm=");
audit_log_untrustedstring(ab, get_task_comm(comm, current));
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 0291eef9ce92..9b9c151b5c82 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -52,9 +52,3 @@ obj-$(CONFIG_BPF_PRELOAD) += preload/
obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o
obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o
-
-# Some source files are common to libbpf.
-vpath %.c $(srctree)/kernel/bpf:$(srctree)/tools/lib/bpf
-
-$(obj)/%.o: %.c FORCE
- $(call if_changed_rule,cc_o_c)
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index feabc0193852..79660e3fca4c 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -73,6 +73,9 @@ int array_map_alloc_check(union bpf_attr *attr)
/* avoid overflow on round_up(map->value_size) */
if (attr->value_size > INT_MAX)
return -E2BIG;
+ /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
+ if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
+ return -E2BIG;
return 0;
}
@@ -494,7 +497,7 @@ static void array_map_seq_show_elem(struct bpf_map *map, void *key,
if (map->btf_key_type_id)
seq_printf(m, "%u: ", *(u32 *)key);
btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
rcu_read_unlock();
}
@@ -515,7 +518,7 @@ static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
seq_printf(m, "\tcpu%d: ", cpu);
btf_type_seq_show(map->btf, map->btf_value_type_id,
per_cpu_ptr(pptr, cpu), m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
}
seq_puts(m, "}\n");
@@ -600,7 +603,7 @@ static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
array = container_of(map, struct bpf_array, map);
index = info->index & array->index_mask;
if (info->percpu_value_buf)
- return array->pptrs[index];
+ return (void *)(uintptr_t)array->pptrs[index];
return array_map_elem_ptr(array, index);
}
@@ -619,7 +622,7 @@ static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
array = container_of(map, struct bpf_array, map);
index = info->index & array->index_mask;
if (info->percpu_value_buf)
- return array->pptrs[index];
+ return (void *)(uintptr_t)array->pptrs[index];
return array_map_elem_ptr(array, index);
}
@@ -632,7 +635,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
struct bpf_iter_meta meta;
struct bpf_prog *prog;
int off = 0, cpu = 0;
- void __percpu **pptr;
+ void __percpu *pptr;
u32 size;
meta.seq = seq;
@@ -648,7 +651,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
if (!info->percpu_value_buf) {
ctx.value = v;
} else {
- pptr = v;
+ pptr = (void __percpu *)(uintptr_t)v;
size = array->elem_size;
for_each_possible_cpu(cpu) {
copy_map_value_long(map, info->percpu_value_buf + off,
@@ -993,7 +996,7 @@ static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
prog_id = prog_fd_array_sys_lookup_elem(ptr);
btf_type_seq_show(map->btf, map->btf_value_type_id,
&prog_id, m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
}
}
diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
index b0ef45db207c..29da6d3838f6 100644
--- a/kernel/bpf/bpf_inode_storage.c
+++ b/kernel/bpf/bpf_inode_storage.c
@@ -78,13 +78,12 @@ void bpf_inode_storage_free(struct inode *inode)
static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_local_storage_data *sdata;
- struct fd f = fdget_raw(*(int *)key);
+ CLASS(fd_raw, f)(*(int *)key);
- if (!f.file)
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- sdata = inode_storage_lookup(file_inode(f.file), map, true);
- fdput(f);
+ sdata = inode_storage_lookup(file_inode(fd_file(f)), map, true);
return sdata ? sdata->data : NULL;
}
@@ -92,19 +91,16 @@ static long bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key,
void *value, u64 map_flags)
{
struct bpf_local_storage_data *sdata;
- struct fd f = fdget_raw(*(int *)key);
+ CLASS(fd_raw, f)(*(int *)key);
- if (!f.file)
+ if (fd_empty(f))
return -EBADF;
- if (!inode_storage_ptr(file_inode(f.file))) {
- fdput(f);
+ if (!inode_storage_ptr(file_inode(fd_file(f))))
return -EBADF;
- }
- sdata = bpf_local_storage_update(file_inode(f.file),
+ sdata = bpf_local_storage_update(file_inode(fd_file(f)),
(struct bpf_local_storage_map *)map,
value, map_flags, GFP_ATOMIC);
- fdput(f);
return PTR_ERR_OR_ZERO(sdata);
}
@@ -123,15 +119,11 @@ static int inode_storage_delete(struct inode *inode, struct bpf_map *map)
static long bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key)
{
- struct fd f = fdget_raw(*(int *)key);
- int err;
+ CLASS(fd_raw, f)(*(int *)key);
- if (!f.file)
+ if (fd_empty(f))
return -EBADF;
-
- err = inode_storage_delete(file_inode(f.file), map);
- fdput(f);
- return err;
+ return inode_storage_delete(file_inode(fd_file(f)), map);
}
/* *gfp_flags* is a hidden argument provided by the verifier */
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index 08a338e1f231..6292ac5f9bd1 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -11,7 +11,6 @@
#include <linux/lsm_hooks.h>
#include <linux/bpf_lsm.h>
#include <linux/kallsyms.h>
-#include <linux/bpf_verifier.h>
#include <net/bpf_sk_storage.h>
#include <linux/bpf_local_storage.h>
#include <linux/btf_ids.h>
@@ -36,6 +35,24 @@ BTF_SET_START(bpf_lsm_hooks)
#undef LSM_HOOK
BTF_SET_END(bpf_lsm_hooks)
+BTF_SET_START(bpf_lsm_disabled_hooks)
+BTF_ID(func, bpf_lsm_vm_enough_memory)
+BTF_ID(func, bpf_lsm_inode_need_killpriv)
+BTF_ID(func, bpf_lsm_inode_getsecurity)
+BTF_ID(func, bpf_lsm_inode_listsecurity)
+BTF_ID(func, bpf_lsm_inode_copy_up_xattr)
+BTF_ID(func, bpf_lsm_getselfattr)
+BTF_ID(func, bpf_lsm_getprocattr)
+BTF_ID(func, bpf_lsm_setprocattr)
+#ifdef CONFIG_KEYS
+BTF_ID(func, bpf_lsm_key_getsecurity)
+#endif
+#ifdef CONFIG_AUDIT
+BTF_ID(func, bpf_lsm_audit_rule_match)
+#endif
+BTF_ID(func, bpf_lsm_ismaclabel)
+BTF_SET_END(bpf_lsm_disabled_hooks)
+
/* List of LSM hooks that should operate on 'current' cgroup regardless
* of function signature.
*/
@@ -97,15 +114,24 @@ void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog)
{
+ u32 btf_id = prog->aux->attach_btf_id;
+ const char *func_name = prog->aux->attach_func_name;
+
if (!prog->gpl_compatible) {
bpf_log(vlog,
"LSM programs must have a GPL compatible license\n");
return -EINVAL;
}
- if (!btf_id_set_contains(&bpf_lsm_hooks, prog->aux->attach_btf_id)) {
+ if (btf_id_set_contains(&bpf_lsm_disabled_hooks, btf_id)) {
+ bpf_log(vlog, "attach_btf_id %u points to disabled hook %s\n",
+ btf_id, func_name);
+ return -EINVAL;
+ }
+
+ if (!btf_id_set_contains(&bpf_lsm_hooks, btf_id)) {
bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n",
- prog->aux->attach_btf_id, prog->aux->attach_func_name);
+ btf_id, func_name);
return -EINVAL;
}
@@ -390,3 +416,36 @@ const struct bpf_verifier_ops lsm_verifier_ops = {
.get_func_proto = bpf_lsm_func_proto,
.is_valid_access = btf_ctx_access,
};
+
+/* hooks return 0 or 1 */
+BTF_SET_START(bool_lsm_hooks)
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+BTF_ID(func, bpf_lsm_xfrm_state_pol_flow_match)
+#endif
+#ifdef CONFIG_AUDIT
+BTF_ID(func, bpf_lsm_audit_rule_known)
+#endif
+BTF_ID(func, bpf_lsm_inode_xattr_skipcap)
+BTF_SET_END(bool_lsm_hooks)
+
+int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *retval_range)
+{
+ /* no return value range for void hooks */
+ if (!prog->aux->attach_func_proto->type)
+ return -EINVAL;
+
+ if (btf_id_set_contains(&bool_lsm_hooks, prog->aux->attach_btf_id)) {
+ retval_range->minval = 0;
+ retval_range->maxval = 1;
+ } else {
+ /* All other available LSM hooks, except task_prctl, return 0
+ * on success and negative error code on failure.
+ * To keep things simple, we only allow bpf progs to return 0
+ * or negative errno for task_prctl too.
+ */
+ retval_range->minval = -MAX_ERRNO;
+ retval_range->maxval = 0;
+ }
+ return 0;
+}
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 0d515ec57aa5..fda3dd2ee984 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -837,7 +837,7 @@ static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
btf_type_seq_show(st_map->btf,
map->btf_vmlinux_value_type_id,
value, m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
}
kfree(value);
@@ -1040,6 +1040,13 @@ void bpf_struct_ops_put(const void *kdata)
bpf_map_put(&st_map->map);
}
+int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
+{
+ void *func_ptr = *(void **)(st_ops->cfi_stubs + moff);
+
+ return func_ptr ? 0 : -ENOTSUPP;
+}
+
static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
{
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index a4e4f8d43ecf..75e4fe83c509 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -212,7 +212,7 @@ enum btf_kfunc_hook {
BTF_KFUNC_HOOK_TRACING,
BTF_KFUNC_HOOK_SYSCALL,
BTF_KFUNC_HOOK_FMODRET,
- BTF_KFUNC_HOOK_CGROUP_SKB,
+ BTF_KFUNC_HOOK_CGROUP,
BTF_KFUNC_HOOK_SCHED_ACT,
BTF_KFUNC_HOOK_SK_SKB,
BTF_KFUNC_HOOK_SOCKET_FILTER,
@@ -790,7 +790,7 @@ const char *btf_str_by_offset(const struct btf *btf, u32 offset)
return NULL;
}
-static bool __btf_name_valid(const struct btf *btf, u32 offset)
+static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
{
/* offset must be valid */
const char *src = btf_str_by_offset(btf, offset);
@@ -811,11 +811,6 @@ static bool __btf_name_valid(const struct btf *btf, u32 offset)
return !*src;
}
-static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
-{
- return __btf_name_valid(btf, offset);
-}
-
/* Allow any printable character in DATASEC names */
static bool btf_name_valid_section(const struct btf *btf, u32 offset)
{
@@ -3761,6 +3756,7 @@ static int btf_find_field(const struct btf *btf, const struct btf_type *t,
return -EINVAL;
}
+/* Callers have to ensure the life cycle of btf if it is program BTF */
static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
struct btf_field_info *info)
{
@@ -3789,7 +3785,6 @@ static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
field->kptr.dtor = NULL;
id = info->kptr.type_id;
kptr_btf = (struct btf *)btf;
- btf_get(kptr_btf);
goto found_dtor;
}
if (id < 0)
@@ -4631,7 +4626,7 @@ static s32 btf_var_check_meta(struct btf_verifier_env *env,
}
if (!t->name_off ||
- !__btf_name_valid(env->btf, t->name_off)) {
+ !btf_name_valid_identifier(env->btf, t->name_off)) {
btf_verifier_log_type(env, t, "Invalid name");
return -EINVAL;
}
@@ -5519,36 +5514,72 @@ static const char *alloc_obj_fields[] = {
static struct btf_struct_metas *
btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
{
- union {
- struct btf_id_set set;
- struct {
- u32 _cnt;
- u32 _ids[ARRAY_SIZE(alloc_obj_fields)];
- } _arr;
- } aof;
struct btf_struct_metas *tab = NULL;
+ struct btf_id_set *aof;
int i, n, id, ret;
BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0);
BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32));
- memset(&aof, 0, sizeof(aof));
+ aof = kmalloc(sizeof(*aof), GFP_KERNEL | __GFP_NOWARN);
+ if (!aof)
+ return ERR_PTR(-ENOMEM);
+ aof->cnt = 0;
+
for (i = 0; i < ARRAY_SIZE(alloc_obj_fields); i++) {
/* Try to find whether this special type exists in user BTF, and
* if so remember its ID so we can easily find it among members
* of structs that we iterate in the next loop.
*/
+ struct btf_id_set *new_aof;
+
id = btf_find_by_name_kind(btf, alloc_obj_fields[i], BTF_KIND_STRUCT);
if (id < 0)
continue;
- aof.set.ids[aof.set.cnt++] = id;
+
+ new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!new_aof) {
+ ret = -ENOMEM;
+ goto free_aof;
+ }
+ aof = new_aof;
+ aof->ids[aof->cnt++] = id;
+ }
+
+ n = btf_nr_types(btf);
+ for (i = 1; i < n; i++) {
+ /* Try to find if there are kptrs in user BTF and remember their ID */
+ struct btf_id_set *new_aof;
+ struct btf_field_info tmp;
+ const struct btf_type *t;
+
+ t = btf_type_by_id(btf, i);
+ if (!t) {
+ ret = -EINVAL;
+ goto free_aof;
+ }
+
+ ret = btf_find_kptr(btf, t, 0, 0, &tmp);
+ if (ret != BTF_FIELD_FOUND)
+ continue;
+
+ new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!new_aof) {
+ ret = -ENOMEM;
+ goto free_aof;
+ }
+ aof = new_aof;
+ aof->ids[aof->cnt++] = i;
}
- if (!aof.set.cnt)
+ if (!aof->cnt) {
+ kfree(aof);
return NULL;
- sort(&aof.set.ids, aof.set.cnt, sizeof(aof.set.ids[0]), btf_id_cmp_func, NULL);
+ }
+ sort(&aof->ids, aof->cnt, sizeof(aof->ids[0]), btf_id_cmp_func, NULL);
- n = btf_nr_types(btf);
for (i = 1; i < n; i++) {
struct btf_struct_metas *new_tab;
const struct btf_member *member;
@@ -5558,17 +5589,13 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
int j, tab_cnt;
t = btf_type_by_id(btf, i);
- if (!t) {
- ret = -EINVAL;
- goto free;
- }
if (!__btf_type_is_struct(t))
continue;
cond_resched();
for_each_member(j, t, member) {
- if (btf_id_set_contains(&aof.set, member->type))
+ if (btf_id_set_contains(aof, member->type))
goto parse;
}
continue;
@@ -5587,7 +5614,8 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
type = &tab->types[tab->cnt];
type->btf_id = i;
record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE |
- BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT, t->size);
+ BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT |
+ BPF_KPTR, t->size);
/* The record cannot be unset, treat it as an error if so */
if (IS_ERR_OR_NULL(record)) {
ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
@@ -5596,9 +5624,12 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
type->record = record;
tab->cnt++;
}
+ kfree(aof);
return tab;
free:
btf_struct_metas_free(tab);
+free_aof:
+ kfree(aof);
return ERR_PTR(ret);
}
@@ -6245,12 +6276,11 @@ static struct btf *btf_parse_module(const char *module_name, const void *data,
btf->kernel_btf = true;
snprintf(btf->name, sizeof(btf->name), "%s", module_name);
- btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN);
+ btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN);
if (!btf->data) {
err = -ENOMEM;
goto errout;
}
- memcpy(btf->data, data, data_size);
btf->data_size = data_size;
err = btf_parse_hdr(env);
@@ -6418,8 +6448,11 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
if (arg == nr_args) {
switch (prog->expected_attach_type) {
- case BPF_LSM_CGROUP:
case BPF_LSM_MAC:
+ /* mark we are accessing the return value */
+ info->is_retval = true;
+ fallthrough;
+ case BPF_LSM_CGROUP:
case BPF_TRACE_FEXIT:
/* When LSM programs are attached to void LSM hooks
* they use FEXIT trampolines and when attached to
@@ -6525,6 +6558,9 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
if (prog_args_trusted(prog))
info->reg_type |= PTR_TRUSTED;
+ if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
+ info->reg_type |= PTR_MAYBE_NULL;
+
if (tgt_prog) {
enum bpf_prog_type tgt_type;
@@ -7675,21 +7711,16 @@ int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
struct btf *btf_get_by_fd(int fd)
{
struct btf *btf;
- struct fd f;
+ CLASS(fd, f)(fd);
- f = fdget(fd);
-
- if (!f.file)
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- if (f.file->f_op != &btf_fops) {
- fdput(f);
+ if (fd_file(f)->f_op != &btf_fops)
return ERR_PTR(-EINVAL);
- }
- btf = f.file->private_data;
+ btf = fd_file(f)->private_data;
refcount_inc(&btf->refcnt);
- fdput(f);
return btf;
}
@@ -8051,15 +8082,44 @@ BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
BTF_TRACING_TYPE_xxx
#undef BTF_TRACING_TYPE
+/* Validate well-formedness of iter argument type.
+ * On success, return positive BTF ID of iter state's STRUCT type.
+ * On error, negative error is returned.
+ */
+int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx)
+{
+ const struct btf_param *arg;
+ const struct btf_type *t;
+ const char *name;
+ int btf_id;
+
+ if (btf_type_vlen(func) <= arg_idx)
+ return -EINVAL;
+
+ arg = &btf_params(func)[arg_idx];
+ t = btf_type_skip_modifiers(btf, arg->type, NULL);
+ if (!t || !btf_type_is_ptr(t))
+ return -EINVAL;
+ t = btf_type_skip_modifiers(btf, t->type, &btf_id);
+ if (!t || !__btf_type_is_struct(t))
+ return -EINVAL;
+
+ name = btf_name_by_offset(btf, t->name_off);
+ if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1))
+ return -EINVAL;
+
+ return btf_id;
+}
+
static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name,
const struct btf_type *func, u32 func_flags)
{
u32 flags = func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
- const char *name, *sfx, *iter_name;
- const struct btf_param *arg;
+ const char *sfx, *iter_name;
const struct btf_type *t;
char exp_name[128];
u32 nr_args;
+ int btf_id;
/* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */
if (!flags || (flags & (flags - 1)))
@@ -8070,28 +8130,21 @@ static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name,
if (nr_args < 1)
return -EINVAL;
- arg = &btf_params(func)[0];
- t = btf_type_skip_modifiers(btf, arg->type, NULL);
- if (!t || !btf_type_is_ptr(t))
- return -EINVAL;
- t = btf_type_skip_modifiers(btf, t->type, NULL);
- if (!t || !__btf_type_is_struct(t))
- return -EINVAL;
-
- name = btf_name_by_offset(btf, t->name_off);
- if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1))
- return -EINVAL;
+ btf_id = btf_check_iter_arg(btf, func, 0);
+ if (btf_id < 0)
+ return btf_id;
/* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to
* fit nicely in stack slots
*/
+ t = btf_type_by_id(btf, btf_id);
if (t->size == 0 || (t->size % 8))
return -EINVAL;
/* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *)
* naming pattern
*/
- iter_name = name + sizeof(ITER_PREFIX) - 1;
+ iter_name = btf_name_by_offset(btf, t->name_off) + sizeof(ITER_PREFIX) - 1;
if (flags & KF_ITER_NEW)
sfx = "new";
else if (flags & KF_ITER_NEXT)
@@ -8306,13 +8359,19 @@ static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
case BPF_PROG_TYPE_STRUCT_OPS:
return BTF_KFUNC_HOOK_STRUCT_OPS;
case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_TRACEPOINT:
+ case BPF_PROG_TYPE_PERF_EVENT:
case BPF_PROG_TYPE_LSM:
return BTF_KFUNC_HOOK_TRACING;
case BPF_PROG_TYPE_SYSCALL:
return BTF_KFUNC_HOOK_SYSCALL;
case BPF_PROG_TYPE_CGROUP_SKB:
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_CGROUP_DEVICE:
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
- return BTF_KFUNC_HOOK_CGROUP_SKB;
+ case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+ case BPF_PROG_TYPE_CGROUP_SYSCTL:
+ return BTF_KFUNC_HOOK_CGROUP;
case BPF_PROG_TYPE_SCHED_ACT:
return BTF_KFUNC_HOOK_SCHED_ACT;
case BPF_PROG_TYPE_SK_SKB:
@@ -8888,6 +8947,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
struct bpf_core_cand_list cands = {};
struct bpf_core_relo_res targ_res;
struct bpf_core_spec *specs;
+ const struct btf_type *type;
int err;
/* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
@@ -8897,6 +8957,13 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
if (!specs)
return -ENOMEM;
+ type = btf_type_by_id(ctx->btf, relo->type_id);
+ if (!type) {
+ bpf_log(ctx->log, "relo #%u: bad type id %u\n",
+ relo_idx, relo->type_id);
+ return -EINVAL;
+ }
+
if (need_cands) {
struct bpf_cand_cache *cc;
int i;
diff --git a/kernel/bpf/btf_iter.c b/kernel/bpf/btf_iter.c
new file mode 100644
index 000000000000..0e2c66a52df9
--- /dev/null
+++ b/kernel/bpf/btf_iter.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#include "../../tools/lib/bpf/btf_iter.c"
diff --git a/kernel/bpf/btf_relocate.c b/kernel/bpf/btf_relocate.c
new file mode 100644
index 000000000000..c12ccbf66507
--- /dev/null
+++ b/kernel/bpf/btf_relocate.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#include "../../tools/lib/bpf/btf_relocate.c"
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 8ba73042a239..e7113d700b87 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -2581,6 +2581,8 @@ cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_cgroup_classid:
return &bpf_get_cgroup_classid_curr_proto;
#endif
+ case BPF_FUNC_current_task_under_cgroup:
+ return &bpf_current_task_under_cgroup_proto;
default:
return NULL;
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 7ee62e38faf0..4e07cc057d6f 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2302,6 +2302,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
{
enum bpf_prog_type prog_type = resolve_prog_type(fp);
bool ret;
+ struct bpf_prog_aux *aux = fp->aux;
if (fp->kprobe_override)
return false;
@@ -2311,7 +2312,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
* in the case of devmap and cpumap). Until device checks
* are implemented, prohibit adding dev-bound programs to program maps.
*/
- if (bpf_prog_is_dev_bound(fp->aux))
+ if (bpf_prog_is_dev_bound(aux))
return false;
spin_lock(&map->owner.lock);
@@ -2321,12 +2322,26 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
*/
map->owner.type = prog_type;
map->owner.jited = fp->jited;
- map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
+ map->owner.xdp_has_frags = aux->xdp_has_frags;
+ map->owner.attach_func_proto = aux->attach_func_proto;
ret = true;
} else {
ret = map->owner.type == prog_type &&
map->owner.jited == fp->jited &&
- map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
+ map->owner.xdp_has_frags == aux->xdp_has_frags;
+ if (ret &&
+ map->owner.attach_func_proto != aux->attach_func_proto) {
+ switch (prog_type) {
+ case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_LSM:
+ case BPF_PROG_TYPE_EXT:
+ case BPF_PROG_TYPE_STRUCT_OPS:
+ ret = false;
+ break;
+ default:
+ break;
+ }
+ }
}
spin_unlock(&map->owner.lock);
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index fbdf5a1aabfe..a2f46785ac3b 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -354,12 +354,14 @@ static int cpu_map_kthread_run(void *data)
list_add_tail(&skb->list, &list);
}
- netif_receive_skb_list(&list);
- /* Feedback loop via tracepoint */
+ /* Feedback loop via tracepoint.
+ * NB: keep before recv to allow measuring enqueue/dequeue latency.
+ */
trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops,
sched, &stats);
+ netif_receive_skb_list(&list);
local_bh_enable(); /* resched point, may call do_softirq() */
}
__set_current_state(TASK_RUNNING);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 06115f8728e8..b14b87463ee0 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -462,6 +462,9 @@ static int htab_map_alloc_check(union bpf_attr *attr)
* kmalloc-able later in htab_map_update_elem()
*/
return -E2BIG;
+ /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
+ if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
+ return -E2BIG;
return 0;
}
@@ -1049,14 +1052,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
pptr = htab_elem_get_ptr(l_new, key_size);
} else {
/* alloc_percpu zero-fills */
- pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
- if (!pptr) {
+ void *ptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
+
+ if (!ptr) {
bpf_mem_cache_free(&htab->ma, l_new);
l_new = ERR_PTR(-ENOMEM);
goto dec_count;
}
- l_new->ptr_to_pptr = pptr;
- pptr = *(void **)pptr;
+ l_new->ptr_to_pptr = ptr;
+ pptr = *(void __percpu **)ptr;
}
pcpu_init_value(htab, pptr, value, onallcpus);
@@ -1586,7 +1590,7 @@ static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
seq_puts(m, ": ");
btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
rcu_read_unlock();
}
@@ -2450,7 +2454,7 @@ static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
seq_printf(m, "\tcpu%d: ", cpu);
btf_type_seq_show(map->btf, map->btf_value_type_id,
per_cpu_ptr(pptr, cpu), m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
}
seq_puts(m, "}\n");
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index b5f0adae8293..1a43d06eab28 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -158,6 +158,7 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
.func = bpf_get_smp_processor_id,
.gpl_only = false,
.ret_type = RET_INTEGER,
+ .allow_fastcall = true,
};
BPF_CALL_0(bpf_get_numa_node_id)
@@ -517,16 +518,15 @@ static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
}
BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
- long *, res)
+ s64 *, res)
{
long long _res;
int err;
+ *res = 0;
err = __bpf_strtoll(buf, buf_len, flags, &_res);
if (err < 0)
return err;
- if (_res != (long)_res)
- return -ERANGE;
*res = _res;
return err;
}
@@ -538,23 +538,23 @@ const struct bpf_func_proto bpf_strtol_proto = {
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg2_type = ARG_CONST_SIZE,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_LONG,
+ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg4_size = sizeof(s64),
};
BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
- unsigned long *, res)
+ u64 *, res)
{
unsigned long long _res;
bool is_negative;
int err;
+ *res = 0;
err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
if (err < 0)
return err;
if (is_negative)
return -EINVAL;
- if (_res != (unsigned long)_res)
- return -ERANGE;
*res = _res;
return err;
}
@@ -566,7 +566,8 @@ const struct bpf_func_proto bpf_strtoul_proto = {
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg2_type = ARG_CONST_SIZE,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_LONG,
+ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg4_size = sizeof(u64),
};
BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
@@ -714,7 +715,7 @@ BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
if (cpu >= nr_cpu_ids)
return (unsigned long)NULL;
- return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
+ return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
}
const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
@@ -727,7 +728,7 @@ const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
{
- return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
+ return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr);
}
const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
@@ -1618,9 +1619,9 @@ void bpf_wq_cancel_and_free(void *val)
schedule_work(&work->delete_work);
}
-BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
+BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr)
{
- unsigned long *kptr = map_value;
+ unsigned long *kptr = dst;
/* This helper may be inlined by verifier. */
return xchg(kptr, (unsigned long)ptr);
@@ -1635,7 +1636,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.ret_btf_id = BPF_PTR_POISON,
- .arg1_type = ARG_PTR_TO_KPTR,
+ .arg1_type = ARG_KPTR_XCHG_DEST,
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
.arg2_btf_id = BPF_PTR_POISON,
};
@@ -2033,6 +2034,7 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return NULL;
}
}
+EXPORT_SYMBOL_GPL(bpf_base_func_proto);
void bpf_list_head_free(const struct btf_field *field, void *list_head,
struct bpf_spin_lock *spin_lock)
@@ -2457,6 +2459,29 @@ __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
return ret;
}
+BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ struct cgroup *cgrp;
+
+ if (unlikely(idx >= array->map.max_entries))
+ return -E2BIG;
+
+ cgrp = READ_ONCE(array->ptrs[idx]);
+ if (unlikely(!cgrp))
+ return -EAGAIN;
+
+ return task_under_cgroup_hierarchy(current, cgrp);
+}
+
+const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
+ .func = bpf_current_task_under_cgroup,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_ANYTHING,
+};
+
/**
* bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
* specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
@@ -2938,6 +2963,47 @@ __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
bpf_mem_free(&bpf_global_ma, kit->bits);
}
+/**
+ * bpf_copy_from_user_str() - Copy a string from an unsafe user address
+ * @dst: Destination address, in kernel space. This buffer must be
+ * at least @dst__sz bytes long.
+ * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL.
+ * @unsafe_ptr__ign: Source address, in user space.
+ * @flags: The only supported flag is BPF_F_PAD_ZEROS
+ *
+ * Copies a NUL-terminated string from userspace to BPF space. If user string is
+ * too long this will still ensure zero termination in the dst buffer unless
+ * buffer size is 0.
+ *
+ * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and
+ * memset all of @dst on failure.
+ */
+__bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags)
+{
+ int ret;
+
+ if (unlikely(flags & ~BPF_F_PAD_ZEROS))
+ return -EINVAL;
+
+ if (unlikely(!dst__sz))
+ return 0;
+
+ ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1);
+ if (ret < 0) {
+ if (flags & BPF_F_PAD_ZEROS)
+ memset((char *)dst, 0, dst__sz);
+
+ return ret;
+ }
+
+ if (flags & BPF_F_PAD_ZEROS)
+ memset((char *)dst + ret, 0, dst__sz - ret);
+ else
+ ((char *)dst)[ret] = '\0';
+
+ return ret + 1;
+}
+
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(generic_btf_ids)
@@ -3023,6 +3089,7 @@ BTF_ID_FLAGS(func, bpf_preempt_enable)
BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
BTF_KFUNCS_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {
@@ -3051,6 +3118,7 @@ static int __init kfunc_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set);
ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
ARRAY_SIZE(generic_dtors),
THIS_MODULE);
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index af5d2ffadd70..d8fc5eba529d 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -709,10 +709,10 @@ static void seq_print_delegate_opts(struct seq_file *m,
msk = 1ULL << e->val;
if (delegate_msk & msk) {
/* emit lower-case name without prefix */
- seq_printf(m, "%c", first ? '=' : ':');
+ seq_putc(m, first ? '=' : ':');
name += pfx_len;
while (*name) {
- seq_printf(m, "%c", tolower(*name));
+ seq_putc(m, tolower(*name));
name++;
}
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index a04f505aefe9..3969eb0382af 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -431,7 +431,7 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
seq_puts(m, ": ");
btf_type_seq_show(map->btf, map->btf_value_type_id,
&READ_ONCE(storage->buf)->data[0], m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
} else {
seq_puts(m, ": {\n");
for_each_possible_cpu(cpu) {
@@ -439,7 +439,7 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
btf_type_seq_show(map->btf, map->btf_value_type_id,
per_cpu_ptr(storage->percpu_buf, cpu),
m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
}
seq_puts(m, "}\n");
}
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index b4f18c85d7bc..645bd30bc9a9 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -11,24 +11,18 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
{
struct bpf_map *inner_map, *inner_map_meta;
u32 inner_map_meta_size;
- struct fd f;
- int ret;
+ CLASS(fd, f)(inner_map_ufd);
- f = fdget(inner_map_ufd);
inner_map = __bpf_map_get(f);
if (IS_ERR(inner_map))
return inner_map;
/* Does not support >1 level map-in-map */
- if (inner_map->inner_map_meta) {
- ret = -EINVAL;
- goto put;
- }
+ if (inner_map->inner_map_meta)
+ return ERR_PTR(-EINVAL);
- if (!inner_map->ops->map_meta_equal) {
- ret = -ENOTSUPP;
- goto put;
- }
+ if (!inner_map->ops->map_meta_equal)
+ return ERR_PTR(-ENOTSUPP);
inner_map_meta_size = sizeof(*inner_map_meta);
/* In some cases verifier needs to access beyond just base map. */
@@ -36,10 +30,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
inner_map_meta_size = sizeof(struct bpf_array);
inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
- if (!inner_map_meta) {
- ret = -ENOMEM;
- goto put;
- }
+ if (!inner_map_meta)
+ return ERR_PTR(-ENOMEM);
inner_map_meta->map_type = inner_map->map_type;
inner_map_meta->key_size = inner_map->key_size;
@@ -53,8 +45,9 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
* invalid/empty/valid, but ERR_PTR in case of errors. During
* equality NULL or IS_ERR is equivalent.
*/
- ret = PTR_ERR(inner_map_meta->record);
- goto free;
+ struct bpf_map *ret = ERR_CAST(inner_map_meta->record);
+ kfree(inner_map_meta);
+ return ret;
}
/* Note: We must use the same BTF, as we also used btf_record_dup above
* which relies on BTF being same for both maps, as some members like
@@ -77,14 +70,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
inner_array_meta->elem_size = inner_array->elem_size;
inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1;
}
-
- fdput(f);
return inner_map_meta;
-free:
- kfree(inner_map_meta);
-put:
- fdput(f);
- return ERR_PTR(ret);
}
void bpf_map_meta_free(struct bpf_map *map_meta)
@@ -110,9 +96,8 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
int ufd)
{
struct bpf_map *inner_map, *inner_map_meta;
- struct fd f;
+ CLASS(fd, f)(ufd);
- f = fdget(ufd);
inner_map = __bpf_map_get(f);
if (IS_ERR(inner_map))
return inner_map;
@@ -123,7 +108,6 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
else
inner_map = ERR_PTR(-EINVAL);
- fdput(f);
return inner_map;
}
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index dec892ded031..b3858a76e0b3 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -138,8 +138,8 @@ static struct llist_node notrace *__llist_del_first(struct llist_head *head)
static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags)
{
if (c->percpu_size) {
- void **obj = kmalloc_node(c->percpu_size, flags, node);
- void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
+ void __percpu **obj = kmalloc_node(c->percpu_size, flags, node);
+ void __percpu *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
if (!obj || !pptr) {
free_percpu(pptr);
@@ -253,7 +253,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic)
static void free_one(void *obj, bool percpu)
{
if (percpu) {
- free_percpu(((void **)obj)[1]);
+ free_percpu(((void __percpu **)obj)[1]);
kfree(obj);
return;
}
@@ -509,8 +509,8 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
*/
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
{
- struct bpf_mem_caches *cc, __percpu *pcc;
- struct bpf_mem_cache *c, __percpu *pc;
+ struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc;
+ struct bpf_mem_cache *c; struct bpf_mem_cache __percpu *pc;
struct obj_cgroup *objcg = NULL;
int cpu, i, unit_size, percpu_size = 0;
@@ -591,7 +591,7 @@ int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg
int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size)
{
- struct bpf_mem_caches *cc, __percpu *pcc;
+ struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc;
int cpu, i, unit_size, percpu_size;
struct obj_cgroup *objcg;
struct bpf_mem_cache *c;
diff --git a/kernel/bpf/relo_core.c b/kernel/bpf/relo_core.c
new file mode 100644
index 000000000000..aa822c9fcfde
--- /dev/null
+++ b/kernel/bpf/relo_core.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#include "../../tools/lib/bpf/relo_core.c"
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
index 4b4f9670f1a9..49b8e5a0c6b4 100644
--- a/kernel/bpf/reuseport_array.c
+++ b/kernel/bpf/reuseport_array.c
@@ -308,7 +308,7 @@ put_file_unlock:
spin_unlock_bh(&reuseport_lock);
put_file:
- fput(socket->file);
+ sockfd_put(socket);
return err;
}
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index c99f8e5234ac..3615c06b7dfa 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -124,8 +124,24 @@ free_smap:
return ERR_PTR(err);
}
+static int fetch_build_id(struct vm_area_struct *vma, unsigned char *build_id, bool may_fault)
+{
+ return may_fault ? build_id_parse(vma, build_id, NULL)
+ : build_id_parse_nofault(vma, build_id, NULL);
+}
+
+/*
+ * Expects all id_offs[i].ip values to be set to correct initial IPs.
+ * They will be subsequently:
+ * - either adjusted in place to a file offset, if build ID fetching
+ * succeeds; in this case id_offs[i].build_id is set to correct build ID,
+ * and id_offs[i].status is set to BPF_STACK_BUILD_ID_VALID;
+ * - or IP will be kept intact, if build ID fetching failed; in this case
+ * id_offs[i].build_id is zeroed out and id_offs[i].status is set to
+ * BPF_STACK_BUILD_ID_IP.
+ */
static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
- u64 *ips, u32 trace_nr, bool user)
+ u32 trace_nr, bool user, bool may_fault)
{
int i;
struct mmap_unlock_irq_work *work = NULL;
@@ -142,30 +158,28 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
/* cannot access current->mm, fall back to ips */
for (i = 0; i < trace_nr; i++) {
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
- id_offs[i].ip = ips[i];
memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
}
return;
}
for (i = 0; i < trace_nr; i++) {
- if (range_in_vma(prev_vma, ips[i], ips[i])) {
+ u64 ip = READ_ONCE(id_offs[i].ip);
+
+ if (range_in_vma(prev_vma, ip, ip)) {
vma = prev_vma;
- memcpy(id_offs[i].build_id, prev_build_id,
- BUILD_ID_SIZE_MAX);
+ memcpy(id_offs[i].build_id, prev_build_id, BUILD_ID_SIZE_MAX);
goto build_id_valid;
}
- vma = find_vma(current->mm, ips[i]);
- if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) {
+ vma = find_vma(current->mm, ip);
+ if (!vma || fetch_build_id(vma, id_offs[i].build_id, may_fault)) {
/* per entry fall back to ips */
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
- id_offs[i].ip = ips[i];
memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
continue;
}
build_id_valid:
- id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
- - vma->vm_start;
+ id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ip - vma->vm_start;
id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
prev_vma = vma;
prev_build_id = id_offs[i].build_id;
@@ -216,7 +230,7 @@ static long __bpf_get_stackid(struct bpf_map *map,
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
- u32 hash, id, trace_nr, trace_len;
+ u32 hash, id, trace_nr, trace_len, i;
bool user = flags & BPF_F_USER_STACK;
u64 *ips;
bool hash_matches;
@@ -238,15 +252,18 @@ static long __bpf_get_stackid(struct bpf_map *map,
return id;
if (stack_map_use_build_id(map)) {
+ struct bpf_stack_build_id *id_offs;
+
/* for build_id+offset, pop a bucket before slow cmp */
new_bucket = (struct stack_map_bucket *)
pcpu_freelist_pop(&smap->freelist);
if (unlikely(!new_bucket))
return -ENOMEM;
new_bucket->nr = trace_nr;
- stack_map_get_build_id_offset(
- (struct bpf_stack_build_id *)new_bucket->data,
- ips, trace_nr, user);
+ id_offs = (struct bpf_stack_build_id *)new_bucket->data;
+ for (i = 0; i < trace_nr; i++)
+ id_offs[i].ip = ips[i];
+ stack_map_get_build_id_offset(id_offs, trace_nr, user, false /* !may_fault */);
trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
if (hash_matches && bucket->nr == trace_nr &&
memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
@@ -387,7 +404,7 @@ const struct bpf_func_proto bpf_get_stackid_proto_pe = {
static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
struct perf_callchain_entry *trace_in,
- void *buf, u32 size, u64 flags)
+ void *buf, u32 size, u64 flags, bool may_fault)
{
u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
bool user_build_id = flags & BPF_F_USER_BUILD_ID;
@@ -405,8 +422,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
if (kernel && user_build_id)
goto clear;
- elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
- : sizeof(u64);
+ elem_size = user_build_id ? sizeof(struct bpf_stack_build_id) : sizeof(u64);
if (unlikely(size % elem_size))
goto clear;
@@ -427,6 +443,9 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
if (sysctl_perf_event_max_stack < max_depth)
max_depth = sysctl_perf_event_max_stack;
+ if (may_fault)
+ rcu_read_lock(); /* need RCU for perf's callchain below */
+
if (trace_in)
trace = trace_in;
else if (kernel && task)
@@ -434,21 +453,34 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
else
trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
crosstask, false);
- if (unlikely(!trace))
- goto err_fault;
- if (trace->nr < skip)
+ if (unlikely(!trace) || trace->nr < skip) {
+ if (may_fault)
+ rcu_read_unlock();
goto err_fault;
+ }
trace_nr = trace->nr - skip;
trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
copy_len = trace_nr * elem_size;
ips = trace->ip + skip;
- if (user && user_build_id)
- stack_map_get_build_id_offset(buf, ips, trace_nr, user);
- else
+ if (user_build_id) {
+ struct bpf_stack_build_id *id_offs = buf;
+ u32 i;
+
+ for (i = 0; i < trace_nr; i++)
+ id_offs[i].ip = ips[i];
+ } else {
memcpy(buf, ips, copy_len);
+ }
+
+ /* trace/ips should not be dereferenced after this point */
+ if (may_fault)
+ rcu_read_unlock();
+
+ if (user_build_id)
+ stack_map_get_build_id_offset(buf, trace_nr, user, may_fault);
if (size > copy_len)
memset(buf + copy_len, 0, size - copy_len);
@@ -464,7 +496,7 @@ clear:
BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
u64, flags)
{
- return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
+ return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, false /* !may_fault */);
}
const struct bpf_func_proto bpf_get_stack_proto = {
@@ -477,8 +509,24 @@ const struct bpf_func_proto bpf_get_stack_proto = {
.arg4_type = ARG_ANYTHING,
};
-BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
- u32, size, u64, flags)
+BPF_CALL_4(bpf_get_stack_sleepable, struct pt_regs *, regs, void *, buf, u32, size,
+ u64, flags)
+{
+ return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, true /* may_fault */);
+}
+
+const struct bpf_func_proto bpf_get_stack_sleepable_proto = {
+ .func = bpf_get_stack_sleepable,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg4_type = ARG_ANYTHING,
+};
+
+static long __bpf_get_task_stack(struct task_struct *task, void *buf, u32 size,
+ u64 flags, bool may_fault)
{
struct pt_regs *regs;
long res = -EINVAL;
@@ -488,12 +536,18 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
regs = task_pt_regs(task);
if (regs)
- res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
+ res = __bpf_get_stack(regs, task, NULL, buf, size, flags, may_fault);
put_task_stack(task);
return res;
}
+BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
+ u32, size, u64, flags)
+{
+ return __bpf_get_task_stack(task, buf, size, flags, false /* !may_fault */);
+}
+
const struct bpf_func_proto bpf_get_task_stack_proto = {
.func = bpf_get_task_stack,
.gpl_only = false,
@@ -505,6 +559,23 @@ const struct bpf_func_proto bpf_get_task_stack_proto = {
.arg4_type = ARG_ANYTHING,
};
+BPF_CALL_4(bpf_get_task_stack_sleepable, struct task_struct *, task, void *, buf,
+ u32, size, u64, flags)
+{
+ return __bpf_get_task_stack(task, buf, size, flags, true /* !may_fault */);
+}
+
+const struct bpf_func_proto bpf_get_task_stack_sleepable_proto = {
+ .func = bpf_get_task_stack_sleepable,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
+ .arg2_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg4_type = ARG_ANYTHING,
+};
+
BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
void *, buf, u32, size, u64, flags)
{
@@ -516,7 +587,7 @@ BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
__u64 nr_kernel;
if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
- return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
+ return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, false /* !may_fault */);
if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
BPF_F_USER_BUILD_ID)))
@@ -536,7 +607,7 @@ BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
__u64 nr = trace->nr;
trace->nr = nr_kernel;
- err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
+ err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */);
/* restore nr */
trace->nr = nr;
@@ -548,7 +619,7 @@ BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
goto clear;
flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
- err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
+ err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */);
}
return err;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index bf6c5f685ea2..a8f1808a1ca5 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -550,7 +550,8 @@ void btf_record_free(struct btf_record *rec)
case BPF_KPTR_PERCPU:
if (rec->fields[i].kptr.module)
module_put(rec->fields[i].kptr.module);
- btf_put(rec->fields[i].kptr.btf);
+ if (btf_is_kernel(rec->fields[i].kptr.btf))
+ btf_put(rec->fields[i].kptr.btf);
break;
case BPF_LIST_HEAD:
case BPF_LIST_NODE:
@@ -596,7 +597,8 @@ struct btf_record *btf_record_dup(const struct btf_record *rec)
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
- btf_get(fields[i].kptr.btf);
+ if (btf_is_kernel(fields[i].kptr.btf))
+ btf_get(fields[i].kptr.btf);
if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
ret = -ENXIO;
goto free;
@@ -733,15 +735,11 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
}
}
-/* called from workqueue */
-static void bpf_map_free_deferred(struct work_struct *work)
+static void bpf_map_free(struct bpf_map *map)
{
- struct bpf_map *map = container_of(work, struct bpf_map, work);
struct btf_record *rec = map->record;
struct btf *btf = map->btf;
- security_bpf_map_free(map);
- bpf_map_release_memcg(map);
/* implementation dependent freeing */
map->ops->map_free(map);
/* Delay freeing of btf_record for maps, as map_free
@@ -760,6 +758,16 @@ static void bpf_map_free_deferred(struct work_struct *work)
btf_put(btf);
}
+/* called from workqueue */
+static void bpf_map_free_deferred(struct work_struct *work)
+{
+ struct bpf_map *map = container_of(work, struct bpf_map, work);
+
+ security_bpf_map_free(map);
+ bpf_map_release_memcg(map);
+ bpf_map_free(map);
+}
+
static void bpf_map_put_uref(struct bpf_map *map)
{
if (atomic64_dec_and_test(&map->usercnt)) {
@@ -829,7 +837,7 @@ static int bpf_map_release(struct inode *inode, struct file *filp)
static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
{
- fmode_t mode = f.file->f_mode;
+ fmode_t mode = fd_file(f)->f_mode;
/* Our file permissions may have been overridden by global
* map permissions facing syscall side.
@@ -1411,28 +1419,12 @@ static int map_create(union bpf_attr *attr)
free_map_sec:
security_bpf_map_free(map);
free_map:
- btf_put(map->btf);
- map->ops->map_free(map);
+ bpf_map_free(map);
put_token:
bpf_token_put(token);
return err;
}
-/* if error is returned, fd is released.
- * On success caller should complete fd access with matching fdput()
- */
-struct bpf_map *__bpf_map_get(struct fd f)
-{
- if (!f.file)
- return ERR_PTR(-EBADF);
- if (f.file->f_op != &bpf_map_fops) {
- fdput(f);
- return ERR_PTR(-EINVAL);
- }
-
- return f.file->private_data;
-}
-
void bpf_map_inc(struct bpf_map *map)
{
atomic64_inc(&map->refcnt);
@@ -1448,15 +1440,11 @@ EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
struct bpf_map *bpf_map_get(u32 ufd)
{
- struct fd f = fdget(ufd);
- struct bpf_map *map;
-
- map = __bpf_map_get(f);
- if (IS_ERR(map))
- return map;
+ CLASS(fd, f)(ufd);
+ struct bpf_map *map = __bpf_map_get(f);
- bpf_map_inc(map);
- fdput(f);
+ if (!IS_ERR(map))
+ bpf_map_inc(map);
return map;
}
@@ -1464,15 +1452,11 @@ EXPORT_SYMBOL(bpf_map_get);
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
{
- struct fd f = fdget(ufd);
- struct bpf_map *map;
-
- map = __bpf_map_get(f);
- if (IS_ERR(map))
- return map;
+ CLASS(fd, f)(ufd);
+ struct bpf_map *map = __bpf_map_get(f);
- bpf_map_inc_with_uref(map);
- fdput(f);
+ if (!IS_ERR(map))
+ bpf_map_inc_with_uref(map);
return map;
}
@@ -1537,11 +1521,9 @@ static int map_lookup_elem(union bpf_attr *attr)
{
void __user *ukey = u64_to_user_ptr(attr->key);
void __user *uvalue = u64_to_user_ptr(attr->value);
- int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *value;
u32 value_size;
- struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
@@ -1550,26 +1532,20 @@ static int map_lookup_elem(union bpf_attr *attr)
if (attr->flags & ~BPF_F_LOCK)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
- if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
- err = -EPERM;
- goto err_put;
- }
+ if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
+ return -EPERM;
if ((attr->flags & BPF_F_LOCK) &&
- !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
- err = -EINVAL;
- goto err_put;
- }
+ !btf_record_has_field(map->record, BPF_SPIN_LOCK))
+ return -EINVAL;
key = __bpf_copy_key(ukey, map->key_size);
- if (IS_ERR(key)) {
- err = PTR_ERR(key);
- goto err_put;
- }
+ if (IS_ERR(key))
+ return PTR_ERR(key);
value_size = bpf_map_value_size(map);
@@ -1600,8 +1576,6 @@ free_value:
kvfree(value);
free_key:
kvfree(key);
-err_put:
- fdput(f);
return err;
}
@@ -1612,17 +1586,15 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
{
bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
- int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *value;
u32 value_size;
- struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -1651,7 +1623,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
goto free_key;
}
- err = bpf_map_update_value(map, f.file, key, value, attr->flags);
+ err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags);
if (!err)
maybe_wait_bpf_programs(map);
@@ -1660,7 +1632,6 @@ free_key:
kvfree(key);
err_put:
bpf_map_write_active_dec(map);
- fdput(f);
return err;
}
@@ -1669,16 +1640,14 @@ err_put:
static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
{
bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
- int ufd = attr->map_fd;
struct bpf_map *map;
- struct fd f;
void *key;
int err;
if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -1715,7 +1684,6 @@ out:
kvfree(key);
err_put:
bpf_map_write_active_dec(map);
- fdput(f);
return err;
}
@@ -1726,30 +1694,24 @@ static int map_get_next_key(union bpf_attr *attr)
{
void __user *ukey = u64_to_user_ptr(attr->key);
void __user *unext_key = u64_to_user_ptr(attr->next_key);
- int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *next_key;
- struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
- if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
- err = -EPERM;
- goto err_put;
- }
+ if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
+ return -EPERM;
if (ukey) {
key = __bpf_copy_key(ukey, map->key_size);
- if (IS_ERR(key)) {
- err = PTR_ERR(key);
- goto err_put;
- }
+ if (IS_ERR(key))
+ return PTR_ERR(key);
} else {
key = NULL;
}
@@ -1781,8 +1743,6 @@ free_next_key:
kvfree(next_key);
free_key:
kvfree(key);
-err_put:
- fdput(f);
return err;
}
@@ -2011,11 +1971,9 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
{
void __user *ukey = u64_to_user_ptr(attr->key);
void __user *uvalue = u64_to_user_ptr(attr->value);
- int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *value;
u32 value_size;
- struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
@@ -2024,7 +1982,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
if (attr->flags & ~BPF_F_LOCK)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -2094,7 +2052,6 @@ free_key:
kvfree(key);
err_put:
bpf_map_write_active_dec(map);
- fdput(f);
return err;
}
@@ -2102,27 +2059,22 @@ err_put:
static int map_freeze(const union bpf_attr *attr)
{
- int err = 0, ufd = attr->map_fd;
+ int err = 0;
struct bpf_map *map;
- struct fd f;
if (CHECK_ATTR(BPF_MAP_FREEZE))
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
- if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) {
- fdput(f);
+ if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record))
return -ENOTSUPP;
- }
- if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
- fdput(f);
+ if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE))
return -EPERM;
- }
mutex_lock(&map->freeze_mutex);
if (bpf_map_write_active(map)) {
@@ -2137,7 +2089,6 @@ static int map_freeze(const union bpf_attr *attr)
WRITE_ONCE(map->frozen, true);
err_put:
mutex_unlock(&map->freeze_mutex);
- fdput(f);
return err;
}
@@ -2407,18 +2358,6 @@ int bpf_prog_new_fd(struct bpf_prog *prog)
O_RDWR | O_CLOEXEC);
}
-static struct bpf_prog *____bpf_prog_get(struct fd f)
-{
- if (!f.file)
- return ERR_PTR(-EBADF);
- if (f.file->f_op != &bpf_prog_fops) {
- fdput(f);
- return ERR_PTR(-EINVAL);
- }
-
- return f.file->private_data;
-}
-
void bpf_prog_add(struct bpf_prog *prog, int i)
{
atomic64_add(i, &prog->aux->refcnt);
@@ -2474,20 +2413,19 @@ bool bpf_prog_get_ok(struct bpf_prog *prog,
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
bool attach_drv)
{
- struct fd f = fdget(ufd);
+ CLASS(fd, f)(ufd);
struct bpf_prog *prog;
- prog = ____bpf_prog_get(f);
- if (IS_ERR(prog))
- return prog;
- if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
- prog = ERR_PTR(-EINVAL);
- goto out;
- }
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (fd_file(f)->f_op != &bpf_prog_fops)
+ return ERR_PTR(-EINVAL);
+
+ prog = fd_file(f)->private_data;
+ if (!bpf_prog_get_ok(prog, attach_type, attach_drv))
+ return ERR_PTR(-EINVAL);
bpf_prog_inc(prog);
-out:
- fdput(f);
return prog;
}
@@ -3256,20 +3194,16 @@ int bpf_link_new_fd(struct bpf_link *link)
struct bpf_link *bpf_link_get_from_fd(u32 ufd)
{
- struct fd f = fdget(ufd);
+ CLASS(fd, f)(ufd);
struct bpf_link *link;
- if (!f.file)
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- if (f.file->f_op != &bpf_link_fops && f.file->f_op != &bpf_link_fops_poll) {
- fdput(f);
+ if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll)
return ERR_PTR(-EINVAL);
- }
- link = f.file->private_data;
+ link = fd_file(f)->private_data;
bpf_link_inc(link);
- fdput(f);
-
return link;
}
EXPORT_SYMBOL(bpf_link_get_from_fd);
@@ -4974,33 +4908,25 @@ static int bpf_link_get_info_by_fd(struct file *file,
static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
- int ufd = attr->info.bpf_fd;
- struct fd f;
- int err;
-
if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
return -EINVAL;
- f = fdget(ufd);
- if (!f.file)
+ CLASS(fd, f)(attr->info.bpf_fd);
+ if (fd_empty(f))
return -EBADFD;
- if (f.file->f_op == &bpf_prog_fops)
- err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
+ if (fd_file(f)->f_op == &bpf_prog_fops)
+ return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
uattr);
- else if (f.file->f_op == &bpf_map_fops)
- err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
+ else if (fd_file(f)->f_op == &bpf_map_fops)
+ return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
uattr);
- else if (f.file->f_op == &btf_fops)
- err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
- else if (f.file->f_op == &bpf_link_fops || f.file->f_op == &bpf_link_fops_poll)
- err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
+ else if (fd_file(f)->f_op == &btf_fops)
+ return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr);
+ else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll)
+ return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data,
attr, uattr);
- else
- err = -EINVAL;
-
- fdput(f);
- return err;
+ return -EINVAL;
}
#define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
@@ -5188,14 +5114,13 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
struct bpf_map *map;
- int err, ufd;
- struct fd f;
+ int err;
if (CHECK_ATTR(BPF_MAP_BATCH))
return -EINVAL;
- ufd = attr->batch.map_fd;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->batch.map_fd);
+
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -5215,7 +5140,7 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
else if (cmd == BPF_MAP_UPDATE_BATCH)
- BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr);
+ BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr);
else
BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
err_put:
@@ -5223,7 +5148,6 @@ err_put:
maybe_wait_bpf_programs(map);
bpf_map_write_active_dec(map);
}
- fdput(f);
return err;
}
@@ -5668,7 +5592,7 @@ static int token_create(union bpf_attr *attr)
return bpf_token_create(attr);
}
-static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
+static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
{
union bpf_attr attr;
int err;
@@ -5932,6 +5856,7 @@ static const struct bpf_func_proto bpf_sys_close_proto = {
BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
{
+ *res = 0;
if (flags)
return -EINVAL;
@@ -5952,7 +5877,8 @@ static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
.arg1_type = ARG_PTR_TO_MEM,
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_LONG,
+ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg4_size = sizeof(u64),
};
static const struct bpf_func_proto *
diff --git a/kernel/bpf/token.c b/kernel/bpf/token.c
index d6ccf8d00eab..dcbec1a0dfb3 100644
--- a/kernel/bpf/token.c
+++ b/kernel/bpf/token.c
@@ -116,67 +116,52 @@ int bpf_token_create(union bpf_attr *attr)
struct user_namespace *userns;
struct inode *inode;
struct file *file;
+ CLASS(fd, f)(attr->token_create.bpffs_fd);
struct path path;
- struct fd f;
+ struct super_block *sb;
umode_t mode;
int err, fd;
- f = fdget(attr->token_create.bpffs_fd);
- if (!f.file)
+ if (fd_empty(f))
return -EBADF;
- path = f.file->f_path;
- path_get(&path);
- fdput(f);
+ path = fd_file(f)->f_path;
+ sb = path.dentry->d_sb;
- if (path.dentry != path.mnt->mnt_sb->s_root) {
- err = -EINVAL;
- goto out_path;
- }
- if (path.mnt->mnt_sb->s_op != &bpf_super_ops) {
- err = -EINVAL;
- goto out_path;
- }
+ if (path.dentry != sb->s_root)
+ return -EINVAL;
+ if (sb->s_op != &bpf_super_ops)
+ return -EINVAL;
err = path_permission(&path, MAY_ACCESS);
if (err)
- goto out_path;
+ return err;
- userns = path.dentry->d_sb->s_user_ns;
+ userns = sb->s_user_ns;
/*
* Enforce that creators of BPF tokens are in the same user
* namespace as the BPF FS instance. This makes reasoning about
* permissions a lot easier and we can always relax this later.
*/
- if (current_user_ns() != userns) {
- err = -EPERM;
- goto out_path;
- }
- if (!ns_capable(userns, CAP_BPF)) {
- err = -EPERM;
- goto out_path;
- }
+ if (current_user_ns() != userns)
+ return -EPERM;
+ if (!ns_capable(userns, CAP_BPF))
+ return -EPERM;
/* Creating BPF token in init_user_ns doesn't make much sense. */
- if (current_user_ns() == &init_user_ns) {
- err = -EOPNOTSUPP;
- goto out_path;
- }
+ if (current_user_ns() == &init_user_ns)
+ return -EOPNOTSUPP;
- mnt_opts = path.dentry->d_sb->s_fs_info;
+ mnt_opts = sb->s_fs_info;
if (mnt_opts->delegate_cmds == 0 &&
mnt_opts->delegate_maps == 0 &&
mnt_opts->delegate_progs == 0 &&
- mnt_opts->delegate_attachs == 0) {
- err = -ENOENT; /* no BPF token delegation is set up */
- goto out_path;
- }
+ mnt_opts->delegate_attachs == 0)
+ return -ENOENT; /* no BPF token delegation is set up */
mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
- inode = bpf_get_inode(path.mnt->mnt_sb, NULL, mode);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto out_path;
- }
+ inode = bpf_get_inode(sb, NULL, mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
inode->i_op = &bpf_token_iops;
inode->i_fop = &bpf_token_fops;
@@ -185,8 +170,7 @@ int bpf_token_create(union bpf_attr *attr)
file = alloc_file_pseudo(inode, path.mnt, BPF_TOKEN_INODE_NAME, O_RDWR, &bpf_token_fops);
if (IS_ERR(file)) {
iput(inode);
- err = PTR_ERR(file);
- goto out_path;
+ return PTR_ERR(file);
}
token = kzalloc(sizeof(*token), GFP_USER);
@@ -218,33 +202,27 @@ int bpf_token_create(union bpf_attr *attr)
file->private_data = token;
fd_install(fd, file);
- path_put(&path);
return fd;
out_token:
bpf_token_free(token);
out_file:
fput(file);
-out_path:
- path_put(&path);
return err;
}
struct bpf_token *bpf_token_get_from_fd(u32 ufd)
{
- struct fd f = fdget(ufd);
+ CLASS(fd, f)(ufd);
struct bpf_token *token;
- if (!f.file)
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- if (f.file->f_op != &bpf_token_fops) {
- fdput(f);
+ if (fd_file(f)->f_op != &bpf_token_fops)
return ERR_PTR(-EINVAL);
- }
- token = f.file->private_data;
+ token = fd_file(f)->private_data;
bpf_token_inc(token);
- fdput(f);
return token;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d8520095ca03..9a7ed527e47e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -28,6 +28,8 @@
#include <linux/cpumask.h>
#include <linux/bpf_mem_alloc.h>
#include <net/xdp.h>
+#include <linux/trace_events.h>
+#include <linux/kallsyms.h>
#include "disasm.h"
@@ -383,11 +385,6 @@ static void verbose_invalid_scalar(struct bpf_verifier_env *env,
verbose(env, " should have been in [%d, %d]\n", range.minval, range.maxval);
}
-static bool type_may_be_null(u32 type)
-{
- return type & PTR_MAYBE_NULL;
-}
-
static bool reg_not_null(const struct bpf_reg_state *reg)
{
enum bpf_reg_type type;
@@ -2182,6 +2179,44 @@ static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg)
reg->smin_value = max_t(s64, reg->smin_value, new_smin);
reg->smax_value = min_t(s64, reg->smax_value, new_smax);
}
+
+ /* Here we would like to handle a special case after sign extending load,
+ * when upper bits for a 64-bit range are all 1s or all 0s.
+ *
+ * Upper bits are all 1s when register is in a range:
+ * [0xffff_ffff_0000_0000, 0xffff_ffff_ffff_ffff]
+ * Upper bits are all 0s when register is in a range:
+ * [0x0000_0000_0000_0000, 0x0000_0000_ffff_ffff]
+ * Together this forms are continuous range:
+ * [0xffff_ffff_0000_0000, 0x0000_0000_ffff_ffff]
+ *
+ * Now, suppose that register range is in fact tighter:
+ * [0xffff_ffff_8000_0000, 0x0000_0000_ffff_ffff] (R)
+ * Also suppose that it's 32-bit range is positive,
+ * meaning that lower 32-bits of the full 64-bit register
+ * are in the range:
+ * [0x0000_0000, 0x7fff_ffff] (W)
+ *
+ * If this happens, then any value in a range:
+ * [0xffff_ffff_0000_0000, 0xffff_ffff_7fff_ffff]
+ * is smaller than a lowest bound of the range (R):
+ * 0xffff_ffff_8000_0000
+ * which means that upper bits of the full 64-bit register
+ * can't be all 1s, when lower bits are in range (W).
+ *
+ * Note that:
+ * - 0xffff_ffff_8000_0000 == (s64)S32_MIN
+ * - 0x0000_0000_7fff_ffff == (s64)S32_MAX
+ * These relations are used in the conditions below.
+ */
+ if (reg->s32_min_value >= 0 && reg->smin_value >= S32_MIN && reg->smax_value <= S32_MAX) {
+ reg->smin_value = reg->s32_min_value;
+ reg->smax_value = reg->s32_max_value;
+ reg->umin_value = reg->s32_min_value;
+ reg->umax_value = reg->s32_max_value;
+ reg->var_off = tnum_intersect(reg->var_off,
+ tnum_range(reg->smin_value, reg->smax_value));
+ }
}
static void __reg_deduce_bounds(struct bpf_reg_state *reg)
@@ -2334,6 +2369,25 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
__mark_reg_unknown(env, regs + regno);
}
+static int __mark_reg_s32_range(struct bpf_verifier_env *env,
+ struct bpf_reg_state *regs,
+ u32 regno,
+ s32 s32_min,
+ s32 s32_max)
+{
+ struct bpf_reg_state *reg = regs + regno;
+
+ reg->s32_min_value = max_t(s32, reg->s32_min_value, s32_min);
+ reg->s32_max_value = min_t(s32, reg->s32_max_value, s32_max);
+
+ reg->smin_value = max_t(s64, reg->smin_value, s32_min);
+ reg->smax_value = min_t(s64, reg->smax_value, s32_max);
+
+ reg_bounds_sync(reg);
+
+ return reg_bounds_sanity_check(env, reg, "s32_range");
+}
+
static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg)
{
@@ -3335,9 +3389,87 @@ static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
return env->insn_aux_data[insn_idx].jmp_point;
}
+#define LR_FRAMENO_BITS 3
+#define LR_SPI_BITS 6
+#define LR_ENTRY_BITS (LR_SPI_BITS + LR_FRAMENO_BITS + 1)
+#define LR_SIZE_BITS 4
+#define LR_FRAMENO_MASK ((1ull << LR_FRAMENO_BITS) - 1)
+#define LR_SPI_MASK ((1ull << LR_SPI_BITS) - 1)
+#define LR_SIZE_MASK ((1ull << LR_SIZE_BITS) - 1)
+#define LR_SPI_OFF LR_FRAMENO_BITS
+#define LR_IS_REG_OFF (LR_SPI_BITS + LR_FRAMENO_BITS)
+#define LINKED_REGS_MAX 6
+
+struct linked_reg {
+ u8 frameno;
+ union {
+ u8 spi;
+ u8 regno;
+ };
+ bool is_reg;
+};
+
+struct linked_regs {
+ int cnt;
+ struct linked_reg entries[LINKED_REGS_MAX];
+};
+
+static struct linked_reg *linked_regs_push(struct linked_regs *s)
+{
+ if (s->cnt < LINKED_REGS_MAX)
+ return &s->entries[s->cnt++];
+
+ return NULL;
+}
+
+/* Use u64 as a vector of 6 10-bit values, use first 4-bits to track
+ * number of elements currently in stack.
+ * Pack one history entry for linked registers as 10 bits in the following format:
+ * - 3-bits frameno
+ * - 6-bits spi_or_reg
+ * - 1-bit is_reg
+ */
+static u64 linked_regs_pack(struct linked_regs *s)
+{
+ u64 val = 0;
+ int i;
+
+ for (i = 0; i < s->cnt; ++i) {
+ struct linked_reg *e = &s->entries[i];
+ u64 tmp = 0;
+
+ tmp |= e->frameno;
+ tmp |= e->spi << LR_SPI_OFF;
+ tmp |= (e->is_reg ? 1 : 0) << LR_IS_REG_OFF;
+
+ val <<= LR_ENTRY_BITS;
+ val |= tmp;
+ }
+ val <<= LR_SIZE_BITS;
+ val |= s->cnt;
+ return val;
+}
+
+static void linked_regs_unpack(u64 val, struct linked_regs *s)
+{
+ int i;
+
+ s->cnt = val & LR_SIZE_MASK;
+ val >>= LR_SIZE_BITS;
+
+ for (i = 0; i < s->cnt; ++i) {
+ struct linked_reg *e = &s->entries[i];
+
+ e->frameno = val & LR_FRAMENO_MASK;
+ e->spi = (val >> LR_SPI_OFF) & LR_SPI_MASK;
+ e->is_reg = (val >> LR_IS_REG_OFF) & 0x1;
+ val >>= LR_ENTRY_BITS;
+ }
+}
+
/* for any branch, call, exit record the history of jmps in the given state */
static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
- int insn_flags)
+ int insn_flags, u64 linked_regs)
{
u32 cnt = cur->jmp_history_cnt;
struct bpf_jmp_history_entry *p;
@@ -3353,6 +3485,10 @@ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_st
"verifier insn history bug: insn_idx %d cur flags %x new flags %x\n",
env->insn_idx, env->cur_hist_ent->flags, insn_flags);
env->cur_hist_ent->flags |= insn_flags;
+ WARN_ONCE(env->cur_hist_ent->linked_regs != 0,
+ "verifier insn history bug: insn_idx %d linked_regs != 0: %#llx\n",
+ env->insn_idx, env->cur_hist_ent->linked_regs);
+ env->cur_hist_ent->linked_regs = linked_regs;
return 0;
}
@@ -3367,6 +3503,7 @@ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_st
p->idx = env->insn_idx;
p->prev_idx = env->prev_insn_idx;
p->flags = insn_flags;
+ p->linked_regs = linked_regs;
cur->jmp_history_cnt = cnt;
env->cur_hist_ent = p;
@@ -3532,6 +3669,11 @@ static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
return bt->reg_masks[bt->frame] & (1 << reg);
}
+static inline bool bt_is_frame_reg_set(struct backtrack_state *bt, u32 frame, u32 reg)
+{
+ return bt->reg_masks[frame] & (1 << reg);
+}
+
static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot)
{
return bt->stack_masks[frame] & (1ull << slot);
@@ -3576,6 +3718,42 @@ static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
}
}
+/* If any register R in hist->linked_regs is marked as precise in bt,
+ * do bt_set_frame_{reg,slot}(bt, R) for all registers in hist->linked_regs.
+ */
+static void bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_jmp_history_entry *hist)
+{
+ struct linked_regs linked_regs;
+ bool some_precise = false;
+ int i;
+
+ if (!hist || hist->linked_regs == 0)
+ return;
+
+ linked_regs_unpack(hist->linked_regs, &linked_regs);
+ for (i = 0; i < linked_regs.cnt; ++i) {
+ struct linked_reg *e = &linked_regs.entries[i];
+
+ if ((e->is_reg && bt_is_frame_reg_set(bt, e->frameno, e->regno)) ||
+ (!e->is_reg && bt_is_frame_slot_set(bt, e->frameno, e->spi))) {
+ some_precise = true;
+ break;
+ }
+ }
+
+ if (!some_precise)
+ return;
+
+ for (i = 0; i < linked_regs.cnt; ++i) {
+ struct linked_reg *e = &linked_regs.entries[i];
+
+ if (e->is_reg)
+ bt_set_frame_reg(bt, e->frameno, e->regno);
+ else
+ bt_set_frame_slot(bt, e->frameno, e->spi);
+ }
+}
+
static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);
/* For given verifier state backtrack_insn() is called from the last insn to
@@ -3615,6 +3793,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
}
+ /* If there is a history record that some registers gained range at this insn,
+ * propagate precision marks to those registers, so that bt_is_reg_set()
+ * accounts for these registers.
+ */
+ bt_sync_linked_regs(bt, hist);
+
if (class == BPF_ALU || class == BPF_ALU64) {
if (!bt_is_reg_set(bt, dreg))
return 0;
@@ -3844,7 +4028,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
*/
bt_set_reg(bt, dreg);
bt_set_reg(bt, sreg);
- /* else dreg <cond> K
+ } else if (BPF_SRC(insn->code) == BPF_K) {
+ /* dreg <cond> K
* Only dreg still needs precision before
* this insn, so for the K-based conditional
* there is nothing new to be marked.
@@ -3862,6 +4047,10 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
/* to be analyzed */
return -ENOTSUPP;
}
+ /* Propagate precision marks to linked registers, to account for
+ * registers marked as precise in this function.
+ */
+ bt_sync_linked_regs(bt, hist);
return 0;
}
@@ -3989,96 +4178,6 @@ static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_
}
}
-static bool idset_contains(struct bpf_idset *s, u32 id)
-{
- u32 i;
-
- for (i = 0; i < s->count; ++i)
- if (s->ids[i] == (id & ~BPF_ADD_CONST))
- return true;
-
- return false;
-}
-
-static int idset_push(struct bpf_idset *s, u32 id)
-{
- if (WARN_ON_ONCE(s->count >= ARRAY_SIZE(s->ids)))
- return -EFAULT;
- s->ids[s->count++] = id & ~BPF_ADD_CONST;
- return 0;
-}
-
-static void idset_reset(struct bpf_idset *s)
-{
- s->count = 0;
-}
-
-/* Collect a set of IDs for all registers currently marked as precise in env->bt.
- * Mark all registers with these IDs as precise.
- */
-static int mark_precise_scalar_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
-{
- struct bpf_idset *precise_ids = &env->idset_scratch;
- struct backtrack_state *bt = &env->bt;
- struct bpf_func_state *func;
- struct bpf_reg_state *reg;
- DECLARE_BITMAP(mask, 64);
- int i, fr;
-
- idset_reset(precise_ids);
-
- for (fr = bt->frame; fr >= 0; fr--) {
- func = st->frame[fr];
-
- bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr));
- for_each_set_bit(i, mask, 32) {
- reg = &func->regs[i];
- if (!reg->id || reg->type != SCALAR_VALUE)
- continue;
- if (idset_push(precise_ids, reg->id))
- return -EFAULT;
- }
-
- bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr));
- for_each_set_bit(i, mask, 64) {
- if (i >= func->allocated_stack / BPF_REG_SIZE)
- break;
- if (!is_spilled_scalar_reg(&func->stack[i]))
- continue;
- reg = &func->stack[i].spilled_ptr;
- if (!reg->id)
- continue;
- if (idset_push(precise_ids, reg->id))
- return -EFAULT;
- }
- }
-
- for (fr = 0; fr <= st->curframe; ++fr) {
- func = st->frame[fr];
-
- for (i = BPF_REG_0; i < BPF_REG_10; ++i) {
- reg = &func->regs[i];
- if (!reg->id)
- continue;
- if (!idset_contains(precise_ids, reg->id))
- continue;
- bt_set_frame_reg(bt, fr, i);
- }
- for (i = 0; i < func->allocated_stack / BPF_REG_SIZE; ++i) {
- if (!is_spilled_scalar_reg(&func->stack[i]))
- continue;
- reg = &func->stack[i].spilled_ptr;
- if (!reg->id)
- continue;
- if (!idset_contains(precise_ids, reg->id))
- continue;
- bt_set_frame_slot(bt, fr, i);
- }
- }
-
- return 0;
-}
-
/*
* __mark_chain_precision() backtracks BPF program instruction sequence and
* chain of verifier states making sure that register *regno* (if regno >= 0)
@@ -4211,31 +4310,6 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
bt->frame, last_idx, first_idx, subseq_idx);
}
- /* If some register with scalar ID is marked as precise,
- * make sure that all registers sharing this ID are also precise.
- * This is needed to estimate effect of find_equal_scalars().
- * Do this at the last instruction of each state,
- * bpf_reg_state::id fields are valid for these instructions.
- *
- * Allows to track precision in situation like below:
- *
- * r2 = unknown value
- * ...
- * --- state #0 ---
- * ...
- * r1 = r2 // r1 and r2 now share the same ID
- * ...
- * --- state #1 {r1.id = A, r2.id = A} ---
- * ...
- * if (r2 > 10) goto exit; // find_equal_scalars() assigns range to r1
- * ...
- * --- state #2 {r1.id = A, r2.id = A} ---
- * r3 = r10
- * r3 += r1 // need to mark both r1 and r2
- */
- if (mark_precise_scalar_ids(env, st))
- return -EFAULT;
-
if (last_idx < 0) {
/* we are at the entry into subprog, which
* is expected for global funcs, but only if
@@ -4456,7 +4530,7 @@ static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
if (!src_reg->id && !tnum_is_const(src_reg->var_off))
/* Ensure that src_reg has a valid ID that will be copied to
- * dst_reg and then will be used by find_equal_scalars() to
+ * dst_reg and then will be used by sync_linked_regs() to
* propagate min/max range.
*/
src_reg->id = ++env->id_gen;
@@ -4502,6 +4576,31 @@ static int get_reg_width(struct bpf_reg_state *reg)
return fls64(reg->umax_value);
}
+/* See comment for mark_fastcall_pattern_for_call() */
+static void check_fastcall_stack_contract(struct bpf_verifier_env *env,
+ struct bpf_func_state *state, int insn_idx, int off)
+{
+ struct bpf_subprog_info *subprog = &env->subprog_info[state->subprogno];
+ struct bpf_insn_aux_data *aux = env->insn_aux_data;
+ int i;
+
+ if (subprog->fastcall_stack_off <= off || aux[insn_idx].fastcall_pattern)
+ return;
+ /* access to the region [max_stack_depth .. fastcall_stack_off)
+ * from something that is not a part of the fastcall pattern,
+ * disable fastcall rewrites for current subprogram by setting
+ * fastcall_stack_off to a value smaller than any possible offset.
+ */
+ subprog->fastcall_stack_off = S16_MIN;
+ /* reset fastcall aux flags within subprogram,
+ * happens at most once per subprogram
+ */
+ for (i = subprog->start; i < (subprog + 1)->start; ++i) {
+ aux[i].fastcall_spills_num = 0;
+ aux[i].fastcall_pattern = 0;
+ }
+}
+
/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
@@ -4550,6 +4649,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
if (err)
return err;
+ check_fastcall_stack_contract(env, state, insn_idx, off);
mark_stack_slot_scratched(env, spi);
if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) {
bool reg_value_fits;
@@ -4625,7 +4725,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
}
if (insn_flags)
- return push_jmp_history(env, env->cur_state, insn_flags);
+ return push_jmp_history(env, env->cur_state, insn_flags, 0);
return 0;
}
@@ -4684,6 +4784,7 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
return err;
}
+ check_fastcall_stack_contract(env, state, insn_idx, min_off);
/* Variable offset writes destroy any spilled pointers in range. */
for (i = min_off; i < max_off; i++) {
u8 new_type, *stype;
@@ -4822,6 +4923,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
reg = &reg_state->stack[spi].spilled_ptr;
mark_stack_slot_scratched(env, spi);
+ check_fastcall_stack_contract(env, state, env->insn_idx, off);
if (is_spilled_reg(&reg_state->stack[spi])) {
u8 spill_size = 1;
@@ -4930,7 +5032,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
insn_flags = 0; /* we are not restoring spilled register */
}
if (insn_flags)
- return push_jmp_history(env, env->cur_state, insn_flags);
+ return push_jmp_history(env, env->cur_state, insn_flags, 0);
return 0;
}
@@ -4982,6 +5084,7 @@ static int check_stack_read_var_off(struct bpf_verifier_env *env,
min_off = reg->smin_value + off;
max_off = reg->smax_value + off;
mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
+ check_fastcall_stack_contract(env, ptr_state, env->insn_idx, min_off);
return 0;
}
@@ -5587,11 +5690,13 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type,
- struct btf **btf, u32 *btf_id)
+ struct btf **btf, u32 *btf_id, bool *is_retval, bool is_ldsx)
{
struct bpf_insn_access_aux info = {
.reg_type = *reg_type,
.log = &env->log,
+ .is_retval = false,
+ .is_ldsx = is_ldsx,
};
if (env->ops->is_valid_access &&
@@ -5604,6 +5709,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
* type of narrower access.
*/
*reg_type = info.reg_type;
+ *is_retval = info.is_retval;
if (base_type(*reg_type) == PTR_TO_BTF_ID) {
*btf = info.btf;
@@ -6692,10 +6798,20 @@ static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
struct bpf_func_state *state,
enum bpf_access_type t)
{
- int min_valid_off;
+ struct bpf_insn_aux_data *aux = &env->insn_aux_data[env->insn_idx];
+ int min_valid_off, max_bpf_stack;
+
+ /* If accessing instruction is a spill/fill from bpf_fastcall pattern,
+ * add room for all caller saved registers below MAX_BPF_STACK.
+ * In case if bpf_fastcall rewrite won't happen maximal stack depth
+ * would be checked by check_max_stack_depth_subprog().
+ */
+ max_bpf_stack = MAX_BPF_STACK;
+ if (aux->fastcall_pattern)
+ max_bpf_stack += CALLER_SAVED_REGS * BPF_REG_SIZE;
if (t == BPF_WRITE || env->allow_uninit_stack)
- min_valid_off = -MAX_BPF_STACK;
+ min_valid_off = -max_bpf_stack;
else
min_valid_off = -state->allocated_stack;
@@ -6772,6 +6888,17 @@ static int check_stack_access_within_bounds(
return grow_stack_state(env, state, -min_off /* size */);
}
+static bool get_func_retval_range(struct bpf_prog *prog,
+ struct bpf_retval_range *range)
+{
+ if (prog->type == BPF_PROG_TYPE_LSM &&
+ prog->expected_attach_type == BPF_LSM_MAC &&
+ !bpf_lsm_get_retval_range(prog, range)) {
+ return true;
+ }
+ return false;
+}
+
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
@@ -6876,6 +7003,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_CTX) {
+ bool is_retval = false;
+ struct bpf_retval_range range;
enum bpf_reg_type reg_type = SCALAR_VALUE;
struct btf *btf = NULL;
u32 btf_id = 0;
@@ -6891,7 +7020,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return err;
err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
- &btf_id);
+ &btf_id, &is_retval, is_ldsx);
if (err)
verbose_linfo(env, insn_idx, "; ");
if (!err && t == BPF_READ && value_regno >= 0) {
@@ -6900,7 +7029,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
* case, we know the offset is zero.
*/
if (reg_type == SCALAR_VALUE) {
- mark_reg_unknown(env, regs, value_regno);
+ if (is_retval && get_func_retval_range(env->prog, &range)) {
+ err = __mark_reg_s32_range(env, regs, value_regno,
+ range.minval, range.maxval);
+ if (err)
+ return err;
+ } else {
+ mark_reg_unknown(env, regs, value_regno);
+ }
} else {
mark_reg_known_zero(env, regs,
value_regno);
@@ -7664,29 +7800,38 @@ static int process_kptr_func(struct bpf_verifier_env *env, int regno,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
- struct bpf_map *map_ptr = reg->map_ptr;
struct btf_field *kptr_field;
+ struct bpf_map *map_ptr;
+ struct btf_record *rec;
u32 kptr_off;
+ if (type_is_ptr_alloc_obj(reg->type)) {
+ rec = reg_btf_record(reg);
+ } else { /* PTR_TO_MAP_VALUE */
+ map_ptr = reg->map_ptr;
+ if (!map_ptr->btf) {
+ verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
+ map_ptr->name);
+ return -EINVAL;
+ }
+ rec = map_ptr->record;
+ meta->map_ptr = map_ptr;
+ }
+
if (!tnum_is_const(reg->var_off)) {
verbose(env,
"R%d doesn't have constant offset. kptr has to be at the constant offset\n",
regno);
return -EINVAL;
}
- if (!map_ptr->btf) {
- verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
- map_ptr->name);
- return -EINVAL;
- }
- if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) {
- verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
+
+ if (!btf_record_has_field(rec, BPF_KPTR)) {
+ verbose(env, "R%d has no valid kptr\n", regno);
return -EINVAL;
}
- meta->map_ptr = map_ptr;
kptr_off = reg->off + reg->var_off.value;
- kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR);
+ kptr_field = btf_record_find(rec, kptr_off, BPF_KPTR);
if (!kptr_field) {
verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
return -EACCES;
@@ -7831,12 +7976,17 @@ static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta)
return meta->kfunc_flags & KF_ITER_DESTROY;
}
-static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg)
+static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg_idx,
+ const struct btf_param *arg)
{
/* btf_check_iter_kfuncs() guarantees that first argument of any iter
* kfunc is iter state pointer
*/
- return arg == 0 && is_iter_kfunc(meta);
+ if (is_iter_kfunc(meta))
+ return arg_idx == 0;
+
+ /* iter passed as an argument to a generic kfunc */
+ return btf_param_match_suffix(meta->btf, arg, "__iter");
}
static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx,
@@ -7844,14 +7994,20 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id
{
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
const struct btf_type *t;
- const struct btf_param *arg;
- int spi, err, i, nr_slots;
- u32 btf_id;
+ int spi, err, i, nr_slots, btf_id;
- /* btf_check_iter_kfuncs() ensures we don't need to validate anything here */
- arg = &btf_params(meta->func_proto)[0];
- t = btf_type_skip_modifiers(meta->btf, arg->type, NULL); /* PTR */
- t = btf_type_skip_modifiers(meta->btf, t->type, &btf_id); /* STRUCT */
+ /* For iter_{new,next,destroy} functions, btf_check_iter_kfuncs()
+ * ensures struct convention, so we wouldn't need to do any BTF
+ * validation here. But given iter state can be passed as a parameter
+ * to any kfunc, if arg has "__iter" suffix, we need to be a bit more
+ * conservative here.
+ */
+ btf_id = btf_check_iter_arg(meta->btf, meta->func_proto, regno - 1);
+ if (btf_id < 0) {
+ verbose(env, "expected valid iter pointer as arg #%d\n", regno);
+ return -EINVAL;
+ }
+ t = btf_type_by_id(meta->btf, btf_id);
nr_slots = t->size / BPF_REG_SIZE;
if (is_iter_new_kfunc(meta)) {
@@ -7873,7 +8029,9 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id
if (err)
return err;
} else {
- /* iter_next() or iter_destroy() expect initialized iter state*/
+ /* iter_next() or iter_destroy(), as well as any kfunc
+ * accepting iter argument, expect initialized iter state
+ */
err = is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots);
switch (err) {
case 0:
@@ -7987,6 +8145,15 @@ static int widen_imprecise_scalars(struct bpf_verifier_env *env,
return 0;
}
+static struct bpf_reg_state *get_iter_from_state(struct bpf_verifier_state *cur_st,
+ struct bpf_kfunc_call_arg_meta *meta)
+{
+ int iter_frameno = meta->iter.frameno;
+ int iter_spi = meta->iter.spi;
+
+ return &cur_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
+}
+
/* process_iter_next_call() is called when verifier gets to iterator's next
* "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer
* to it as just "iter_next()" in comments below.
@@ -8071,12 +8238,10 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st;
struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr;
struct bpf_reg_state *cur_iter, *queued_iter;
- int iter_frameno = meta->iter.frameno;
- int iter_spi = meta->iter.spi;
BTF_TYPE_EMIT(struct bpf_iter);
- cur_iter = &env->cur_state->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
+ cur_iter = get_iter_from_state(cur_st, meta);
if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE &&
cur_iter->iter.state != BPF_ITER_STATE_DRAINED) {
@@ -8104,7 +8269,7 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
if (!queued_st)
return -ENOMEM;
- queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
+ queued_iter = get_iter_from_state(queued_st, meta);
queued_iter->iter.state = BPF_ITER_STATE_ACTIVE;
queued_iter->iter.depth++;
if (prev_st)
@@ -8128,6 +8293,12 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type)
type == ARG_CONST_SIZE_OR_ZERO;
}
+static bool arg_type_is_raw_mem(enum bpf_arg_type type)
+{
+ return base_type(type) == ARG_PTR_TO_MEM &&
+ type & MEM_UNINIT;
+}
+
static bool arg_type_is_release(enum bpf_arg_type type)
{
return type & OBJ_RELEASE;
@@ -8138,16 +8309,6 @@ static bool arg_type_is_dynptr(enum bpf_arg_type type)
return base_type(type) == ARG_PTR_TO_DYNPTR;
}
-static int int_ptr_type_to_size(enum bpf_arg_type type)
-{
- if (type == ARG_PTR_TO_INT)
- return sizeof(u32);
- else if (type == ARG_PTR_TO_LONG)
- return sizeof(u64);
-
- return -EINVAL;
-}
-
static int resolve_map_arg_type(struct bpf_verifier_env *env,
const struct bpf_call_arg_meta *meta,
enum bpf_arg_type *arg_type)
@@ -8220,16 +8381,6 @@ static const struct bpf_reg_types mem_types = {
},
};
-static const struct bpf_reg_types int_ptr_types = {
- .types = {
- PTR_TO_STACK,
- PTR_TO_PACKET,
- PTR_TO_PACKET_META,
- PTR_TO_MAP_KEY,
- PTR_TO_MAP_VALUE,
- },
-};
-
static const struct bpf_reg_types spin_lock_types = {
.types = {
PTR_TO_MAP_VALUE,
@@ -8260,7 +8411,12 @@ static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
-static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } };
+static const struct bpf_reg_types kptr_xchg_dest_types = {
+ .types = {
+ PTR_TO_MAP_VALUE,
+ PTR_TO_BTF_ID | MEM_ALLOC
+ }
+};
static const struct bpf_reg_types dynptr_types = {
.types = {
PTR_TO_STACK,
@@ -8285,14 +8441,12 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
[ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
[ARG_PTR_TO_MEM] = &mem_types,
[ARG_PTR_TO_RINGBUF_MEM] = &ringbuf_mem_types,
- [ARG_PTR_TO_INT] = &int_ptr_types,
- [ARG_PTR_TO_LONG] = &int_ptr_types,
[ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types,
[ARG_PTR_TO_FUNC] = &func_ptr_types,
[ARG_PTR_TO_STACK] = &stack_ptr_types,
[ARG_PTR_TO_CONST_STR] = &const_str_ptr_types,
[ARG_PTR_TO_TIMER] = &timer_types,
- [ARG_PTR_TO_KPTR] = &kptr_types,
+ [ARG_KPTR_XCHG_DEST] = &kptr_xchg_dest_types,
[ARG_PTR_TO_DYNPTR] = &dynptr_types,
};
@@ -8331,7 +8485,8 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
if (base_type(arg_type) == ARG_PTR_TO_MEM)
type &= ~DYNPTR_TYPE_FLAG_MASK;
- if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type)) {
+ /* Local kptr types are allowed as the source argument of bpf_kptr_xchg */
+ if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type) && regno == BPF_REG_2) {
type &= ~MEM_ALLOC;
type &= ~MEM_PERCPU;
}
@@ -8424,7 +8579,8 @@ found:
verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
return -EFAULT;
}
- if (meta->func_id == BPF_FUNC_kptr_xchg) {
+ /* Check if local kptr in src arg matches kptr in dst arg */
+ if (meta->func_id == BPF_FUNC_kptr_xchg && regno == BPF_REG_2) {
if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
return -EACCES;
}
@@ -8735,7 +8891,7 @@ skip_type_check:
meta->release_regno = regno;
}
- if (reg->ref_obj_id) {
+ if (reg->ref_obj_id && base_type(arg_type) != ARG_KPTR_XCHG_DEST) {
if (meta->ref_obj_id) {
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
regno, reg->ref_obj_id,
@@ -8847,9 +9003,11 @@ skip_type_check:
*/
meta->raw_mode = arg_type & MEM_UNINIT;
if (arg_type & MEM_FIXED_SIZE) {
- err = check_helper_mem_access(env, regno,
- fn->arg_size[arg], false,
- meta);
+ err = check_helper_mem_access(env, regno, fn->arg_size[arg], false, meta);
+ if (err)
+ return err;
+ if (arg_type & MEM_ALIGNED)
+ err = check_ptr_alignment(env, reg, 0, fn->arg_size[arg], true);
}
break;
case ARG_CONST_SIZE:
@@ -8874,17 +9032,6 @@ skip_type_check:
if (err)
return err;
break;
- case ARG_PTR_TO_INT:
- case ARG_PTR_TO_LONG:
- {
- int size = int_ptr_type_to_size(arg_type);
-
- err = check_helper_mem_access(env, regno, size, false, meta);
- if (err)
- return err;
- err = check_ptr_alignment(env, reg, 0, size, true);
- break;
- }
case ARG_PTR_TO_CONST_STR:
{
err = check_reg_const_str(env, reg, regno);
@@ -8892,7 +9039,7 @@ skip_type_check:
return err;
break;
}
- case ARG_PTR_TO_KPTR:
+ case ARG_KPTR_XCHG_DEST:
err = process_kptr_func(env, regno, meta);
if (err)
return err;
@@ -9201,15 +9348,15 @@ static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
{
int count = 0;
- if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
+ if (arg_type_is_raw_mem(fn->arg1_type))
count++;
- if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
+ if (arg_type_is_raw_mem(fn->arg2_type))
count++;
- if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
+ if (arg_type_is_raw_mem(fn->arg3_type))
count++;
- if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
+ if (arg_type_is_raw_mem(fn->arg4_type))
count++;
- if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
+ if (arg_type_is_raw_mem(fn->arg5_type))
count++;
/* We only support one arg being in raw mode at the moment,
@@ -9923,9 +10070,13 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
return is_rbtree_lock_required_kfunc(kfunc_btf_id);
}
-static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
+static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg,
+ bool return_32bit)
{
- return range.minval <= reg->smin_value && reg->smax_value <= range.maxval;
+ if (return_32bit)
+ return range.minval <= reg->s32_min_value && reg->s32_max_value <= range.maxval;
+ else
+ return range.minval <= reg->smin_value && reg->smax_value <= range.maxval;
}
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
@@ -9962,8 +10113,8 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
if (err)
return err;
- /* enforce R0 return value range */
- if (!retval_range_within(callee->callback_ret_range, r0)) {
+ /* enforce R0 return value range, and bpf_callback_t returns 64bit */
+ if (!retval_range_within(callee->callback_ret_range, r0, false)) {
verbose_invalid_scalar(env, r0, callee->callback_ret_range,
"At callback return", "R0");
return -EINVAL;
@@ -10265,6 +10416,19 @@ static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno
state->callback_subprogno == subprogno);
}
+static int get_helper_proto(struct bpf_verifier_env *env, int func_id,
+ const struct bpf_func_proto **ptr)
+{
+ if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID)
+ return -ERANGE;
+
+ if (!env->ops->get_func_proto)
+ return -EINVAL;
+
+ *ptr = env->ops->get_func_proto(func_id, env->prog);
+ return *ptr ? 0 : -EINVAL;
+}
+
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx_p)
{
@@ -10281,18 +10445,16 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
/* find function prototype */
func_id = insn->imm;
- if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
- verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
- func_id);
+ err = get_helper_proto(env, insn->imm, &fn);
+ if (err == -ERANGE) {
+ verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id);
return -EINVAL;
}
- if (env->ops->get_func_proto)
- fn = env->ops->get_func_proto(func_id, env->prog);
- if (!fn) {
+ if (err) {
verbose(env, "program of this type cannot use helper %s#%d\n",
func_id_name(func_id), func_id);
- return -EINVAL;
+ return err;
}
/* eBPF programs must be GPL compatible to use GPL-ed functions */
@@ -11228,7 +11390,7 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
if (is_kfunc_arg_dynptr(meta->btf, &args[argno]))
return KF_ARG_PTR_TO_DYNPTR;
- if (is_kfunc_arg_iter(meta, argno))
+ if (is_kfunc_arg_iter(meta, argno, &args[argno]))
return KF_ARG_PTR_TO_ITER;
if (is_kfunc_arg_list_head(meta->btf, &args[argno]))
@@ -11330,8 +11492,7 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
* btf_struct_ids_match() to walk the struct at the 0th offset, and
* resolve types.
*/
- if (is_kfunc_acquire(meta) ||
- (is_kfunc_release(meta) && reg->ref_obj_id) ||
+ if ((is_kfunc_release(meta) && reg->ref_obj_id) ||
btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id))
strict_type_match = true;
@@ -11948,7 +12109,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
switch (kf_arg_type) {
case KF_ARG_PTR_TO_CTX:
if (reg->type != PTR_TO_CTX) {
- verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t));
+ verbose(env, "arg#%d expected pointer to ctx, but got %s\n",
+ i, reg_type_str(env, reg->type));
return -EINVAL;
}
@@ -12671,6 +12833,17 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
regs[BPF_REG_0].btf = desc_btf;
regs[BPF_REG_0].type = PTR_TO_BTF_ID;
regs[BPF_REG_0].btf_id = ptr_type_id;
+
+ if (is_iter_next_kfunc(&meta)) {
+ struct bpf_reg_state *cur_iter;
+
+ cur_iter = get_iter_from_state(env->cur_state, &meta);
+
+ if (cur_iter->type & MEM_RCU) /* KF_RCU_PROTECTED */
+ regs[BPF_REG_0].type |= MEM_RCU;
+ else
+ regs[BPF_REG_0].type |= PTR_TRUSTED;
+ }
}
if (is_kfunc_ret_null(&meta)) {
@@ -14099,7 +14272,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
u64 val = reg_const_value(src_reg, alu32);
if ((dst_reg->id & BPF_ADD_CONST) ||
- /* prevent overflow in find_equal_scalars() later */
+ /* prevent overflow in sync_linked_regs() later */
val > (u32)S32_MAX) {
/*
* If the register already went through rX += val
@@ -14114,7 +14287,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
} else {
/*
* Make sure ID is cleared otherwise dst_reg min/max could be
- * incorrectly propagated into other registers by find_equal_scalars()
+ * incorrectly propagated into other registers by sync_linked_regs()
*/
dst_reg->id = 0;
}
@@ -14264,7 +14437,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
copy_register_state(dst_reg, src_reg);
/* Make sure ID is cleared if src_reg is not in u32
* range otherwise dst_reg min/max could be incorrectly
- * propagated into src_reg by find_equal_scalars()
+ * propagated into src_reg by sync_linked_regs()
*/
if (!is_src_reg_u32)
dst_reg->id = 0;
@@ -15087,14 +15260,66 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
return true;
}
-static void find_equal_scalars(struct bpf_verifier_state *vstate,
- struct bpf_reg_state *known_reg)
+static void __collect_linked_regs(struct linked_regs *reg_set, struct bpf_reg_state *reg,
+ u32 id, u32 frameno, u32 spi_or_reg, bool is_reg)
+{
+ struct linked_reg *e;
+
+ if (reg->type != SCALAR_VALUE || (reg->id & ~BPF_ADD_CONST) != id)
+ return;
+
+ e = linked_regs_push(reg_set);
+ if (e) {
+ e->frameno = frameno;
+ e->is_reg = is_reg;
+ e->regno = spi_or_reg;
+ } else {
+ reg->id = 0;
+ }
+}
+
+/* For all R being scalar registers or spilled scalar registers
+ * in verifier state, save R in linked_regs if R->id == id.
+ * If there are too many Rs sharing same id, reset id for leftover Rs.
+ */
+static void collect_linked_regs(struct bpf_verifier_state *vstate, u32 id,
+ struct linked_regs *linked_regs)
+{
+ struct bpf_func_state *func;
+ struct bpf_reg_state *reg;
+ int i, j;
+
+ id = id & ~BPF_ADD_CONST;
+ for (i = vstate->curframe; i >= 0; i--) {
+ func = vstate->frame[i];
+ for (j = 0; j < BPF_REG_FP; j++) {
+ reg = &func->regs[j];
+ __collect_linked_regs(linked_regs, reg, id, i, j, true);
+ }
+ for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
+ if (!is_spilled_reg(&func->stack[j]))
+ continue;
+ reg = &func->stack[j].spilled_ptr;
+ __collect_linked_regs(linked_regs, reg, id, i, j, false);
+ }
+ }
+}
+
+/* For all R in linked_regs, copy known_reg range into R
+ * if R->id == known_reg->id.
+ */
+static void sync_linked_regs(struct bpf_verifier_state *vstate, struct bpf_reg_state *known_reg,
+ struct linked_regs *linked_regs)
{
struct bpf_reg_state fake_reg;
- struct bpf_func_state *state;
struct bpf_reg_state *reg;
+ struct linked_reg *e;
+ int i;
- bpf_for_each_reg_in_vstate(vstate, state, reg, ({
+ for (i = 0; i < linked_regs->cnt; ++i) {
+ e = &linked_regs->entries[i];
+ reg = e->is_reg ? &vstate->frame[e->frameno]->regs[e->regno]
+ : &vstate->frame[e->frameno]->stack[e->spi].spilled_ptr;
if (reg->type != SCALAR_VALUE || reg == known_reg)
continue;
if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST))
@@ -15112,7 +15337,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
copy_register_state(reg, known_reg);
/*
* Must preserve off, id and add_const flag,
- * otherwise another find_equal_scalars() will be incorrect.
+ * otherwise another sync_linked_regs() will be incorrect.
*/
reg->off = saved_off;
@@ -15120,7 +15345,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
scalar_min_max_add(reg, &fake_reg);
reg->var_off = tnum_add(reg->var_off, fake_reg.var_off);
}
- }));
+ }
}
static int check_cond_jmp_op(struct bpf_verifier_env *env,
@@ -15131,6 +15356,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
struct bpf_reg_state *eq_branch_regs;
+ struct linked_regs linked_regs = {};
u8 opcode = BPF_OP(insn->code);
bool is_jmp32;
int pred = -1;
@@ -15245,6 +15471,21 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return 0;
}
+ /* Push scalar registers sharing same ID to jump history,
+ * do this before creating 'other_branch', so that both
+ * 'this_branch' and 'other_branch' share this history
+ * if parent state is created.
+ */
+ if (BPF_SRC(insn->code) == BPF_X && src_reg->type == SCALAR_VALUE && src_reg->id)
+ collect_linked_regs(this_branch, src_reg->id, &linked_regs);
+ if (dst_reg->type == SCALAR_VALUE && dst_reg->id)
+ collect_linked_regs(this_branch, dst_reg->id, &linked_regs);
+ if (linked_regs.cnt > 1) {
+ err = push_jmp_history(env, this_branch, 0, linked_regs_pack(&linked_regs));
+ if (err)
+ return err;
+ }
+
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
false);
if (!other_branch)
@@ -15275,13 +15516,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (BPF_SRC(insn->code) == BPF_X &&
src_reg->type == SCALAR_VALUE && src_reg->id &&
!WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
- find_equal_scalars(this_branch, src_reg);
- find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
+ sync_linked_regs(this_branch, src_reg, &linked_regs);
+ sync_linked_regs(other_branch, &other_branch_regs[insn->src_reg], &linked_regs);
}
if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
!WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
- find_equal_scalars(this_branch, dst_reg);
- find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
+ sync_linked_regs(this_branch, dst_reg, &linked_regs);
+ sync_linked_regs(other_branch, &other_branch_regs[insn->dst_reg], &linked_regs);
}
/* if one pointer register is compared to another pointer
@@ -15569,6 +15810,7 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
int err;
struct bpf_func_state *frame = env->cur_state->frame[0];
const bool is_subprog = frame->subprogno;
+ bool return_32bit = false;
/* LSM and struct_ops func-ptr's return type could be "void" */
if (!is_subprog || frame->in_exception_callback_fn) {
@@ -15674,12 +15916,14 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
case BPF_PROG_TYPE_LSM:
if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
- /* Regular BPF_PROG_TYPE_LSM programs can return
- * any value.
- */
- return 0;
- }
- if (!env->prog->aux->attach_func_proto->type) {
+ /* no range found, any return value is allowed */
+ if (!get_func_retval_range(env->prog, &range))
+ return 0;
+ /* no restricted range, any return value is allowed */
+ if (range.minval == S32_MIN && range.maxval == S32_MAX)
+ return 0;
+ return_32bit = true;
+ } else if (!env->prog->aux->attach_func_proto->type) {
/* Make sure programs that attach to void
* hooks don't try to modify return value.
*/
@@ -15709,7 +15953,7 @@ enforce_retval:
if (err)
return err;
- if (!retval_range_within(range, reg)) {
+ if (!retval_range_within(range, reg, return_32bit)) {
verbose_invalid_scalar(env, reg, range, exit_ctx, reg_name);
if (!is_subprog &&
prog->expected_attach_type == BPF_LSM_CGROUP &&
@@ -15875,6 +16119,274 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
return ret;
}
+/* Bitmask with 1s for all caller saved registers */
+#define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1)
+
+/* Return a bitmask specifying which caller saved registers are
+ * clobbered by a call to a helper *as if* this helper follows
+ * bpf_fastcall contract:
+ * - includes R0 if function is non-void;
+ * - includes R1-R5 if corresponding parameter has is described
+ * in the function prototype.
+ */
+static u32 helper_fastcall_clobber_mask(const struct bpf_func_proto *fn)
+{
+ u32 mask;
+ int i;
+
+ mask = 0;
+ if (fn->ret_type != RET_VOID)
+ mask |= BIT(BPF_REG_0);
+ for (i = 0; i < ARRAY_SIZE(fn->arg_type); ++i)
+ if (fn->arg_type[i] != ARG_DONTCARE)
+ mask |= BIT(BPF_REG_1 + i);
+ return mask;
+}
+
+/* True if do_misc_fixups() replaces calls to helper number 'imm',
+ * replacement patch is presumed to follow bpf_fastcall contract
+ * (see mark_fastcall_pattern_for_call() below).
+ */
+static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
+{
+ switch (imm) {
+#ifdef CONFIG_X86_64
+ case BPF_FUNC_get_smp_processor_id:
+ return env->prog->jit_requested && bpf_jit_supports_percpu_insn();
+#endif
+ default:
+ return false;
+ }
+}
+
+/* Same as helper_fastcall_clobber_mask() but for kfuncs, see comment above */
+static u32 kfunc_fastcall_clobber_mask(struct bpf_kfunc_call_arg_meta *meta)
+{
+ u32 vlen, i, mask;
+
+ vlen = btf_type_vlen(meta->func_proto);
+ mask = 0;
+ if (!btf_type_is_void(btf_type_by_id(meta->btf, meta->func_proto->type)))
+ mask |= BIT(BPF_REG_0);
+ for (i = 0; i < vlen; ++i)
+ mask |= BIT(BPF_REG_1 + i);
+ return mask;
+}
+
+/* Same as verifier_inlines_helper_call() but for kfuncs, see comment above */
+static bool is_fastcall_kfunc_call(struct bpf_kfunc_call_arg_meta *meta)
+{
+ if (meta->btf == btf_vmlinux)
+ return meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
+ meta->func_id == special_kfunc_list[KF_bpf_rdonly_cast];
+ return false;
+}
+
+/* LLVM define a bpf_fastcall function attribute.
+ * This attribute means that function scratches only some of
+ * the caller saved registers defined by ABI.
+ * For BPF the set of such registers could be defined as follows:
+ * - R0 is scratched only if function is non-void;
+ * - R1-R5 are scratched only if corresponding parameter type is defined
+ * in the function prototype.
+ *
+ * The contract between kernel and clang allows to simultaneously use
+ * such functions and maintain backwards compatibility with old
+ * kernels that don't understand bpf_fastcall calls:
+ *
+ * - for bpf_fastcall calls clang allocates registers as-if relevant r0-r5
+ * registers are not scratched by the call;
+ *
+ * - as a post-processing step, clang visits each bpf_fastcall call and adds
+ * spill/fill for every live r0-r5;
+ *
+ * - stack offsets used for the spill/fill are allocated as lowest
+ * stack offsets in whole function and are not used for any other
+ * purposes;
+ *
+ * - when kernel loads a program, it looks for such patterns
+ * (bpf_fastcall function surrounded by spills/fills) and checks if
+ * spill/fill stack offsets are used exclusively in fastcall patterns;
+ *
+ * - if so, and if verifier or current JIT inlines the call to the
+ * bpf_fastcall function (e.g. a helper call), kernel removes unnecessary
+ * spill/fill pairs;
+ *
+ * - when old kernel loads a program, presence of spill/fill pairs
+ * keeps BPF program valid, albeit slightly less efficient.
+ *
+ * For example:
+ *
+ * r1 = 1;
+ * r2 = 2;
+ * *(u64 *)(r10 - 8) = r1; r1 = 1;
+ * *(u64 *)(r10 - 16) = r2; r2 = 2;
+ * call %[to_be_inlined] --> call %[to_be_inlined]
+ * r2 = *(u64 *)(r10 - 16); r0 = r1;
+ * r1 = *(u64 *)(r10 - 8); r0 += r2;
+ * r0 = r1; exit;
+ * r0 += r2;
+ * exit;
+ *
+ * The purpose of mark_fastcall_pattern_for_call is to:
+ * - look for such patterns;
+ * - mark spill and fill instructions in env->insn_aux_data[*].fastcall_pattern;
+ * - mark set env->insn_aux_data[*].fastcall_spills_num for call instruction;
+ * - update env->subprog_info[*]->fastcall_stack_off to find an offset
+ * at which bpf_fastcall spill/fill stack slots start;
+ * - update env->subprog_info[*]->keep_fastcall_stack.
+ *
+ * The .fastcall_pattern and .fastcall_stack_off are used by
+ * check_fastcall_stack_contract() to check if every stack access to
+ * fastcall spill/fill stack slot originates from spill/fill
+ * instructions, members of fastcall patterns.
+ *
+ * If such condition holds true for a subprogram, fastcall patterns could
+ * be rewritten by remove_fastcall_spills_fills().
+ * Otherwise bpf_fastcall patterns are not changed in the subprogram
+ * (code, presumably, generated by an older clang version).
+ *
+ * For example, it is *not* safe to remove spill/fill below:
+ *
+ * r1 = 1;
+ * *(u64 *)(r10 - 8) = r1; r1 = 1;
+ * call %[to_be_inlined] --> call %[to_be_inlined]
+ * r1 = *(u64 *)(r10 - 8); r0 = *(u64 *)(r10 - 8); <---- wrong !!!
+ * r0 = *(u64 *)(r10 - 8); r0 += r1;
+ * r0 += r1; exit;
+ * exit;
+ */
+static void mark_fastcall_pattern_for_call(struct bpf_verifier_env *env,
+ struct bpf_subprog_info *subprog,
+ int insn_idx, s16 lowest_off)
+{
+ struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx;
+ struct bpf_insn *call = &env->prog->insnsi[insn_idx];
+ const struct bpf_func_proto *fn;
+ u32 clobbered_regs_mask = ALL_CALLER_SAVED_REGS;
+ u32 expected_regs_mask;
+ bool can_be_inlined = false;
+ s16 off;
+ int i;
+
+ if (bpf_helper_call(call)) {
+ if (get_helper_proto(env, call->imm, &fn) < 0)
+ /* error would be reported later */
+ return;
+ clobbered_regs_mask = helper_fastcall_clobber_mask(fn);
+ can_be_inlined = fn->allow_fastcall &&
+ (verifier_inlines_helper_call(env, call->imm) ||
+ bpf_jit_inlines_helper_call(call->imm));
+ }
+
+ if (bpf_pseudo_kfunc_call(call)) {
+ struct bpf_kfunc_call_arg_meta meta;
+ int err;
+
+ err = fetch_kfunc_meta(env, call, &meta, NULL);
+ if (err < 0)
+ /* error would be reported later */
+ return;
+
+ clobbered_regs_mask = kfunc_fastcall_clobber_mask(&meta);
+ can_be_inlined = is_fastcall_kfunc_call(&meta);
+ }
+
+ if (clobbered_regs_mask == ALL_CALLER_SAVED_REGS)
+ return;
+
+ /* e.g. if helper call clobbers r{0,1}, expect r{2,3,4,5} in the pattern */
+ expected_regs_mask = ~clobbered_regs_mask & ALL_CALLER_SAVED_REGS;
+
+ /* match pairs of form:
+ *
+ * *(u64 *)(r10 - Y) = rX (where Y % 8 == 0)
+ * ...
+ * call %[to_be_inlined]
+ * ...
+ * rX = *(u64 *)(r10 - Y)
+ */
+ for (i = 1, off = lowest_off; i <= ARRAY_SIZE(caller_saved); ++i, off += BPF_REG_SIZE) {
+ if (insn_idx - i < 0 || insn_idx + i >= env->prog->len)
+ break;
+ stx = &insns[insn_idx - i];
+ ldx = &insns[insn_idx + i];
+ /* must be a stack spill/fill pair */
+ if (stx->code != (BPF_STX | BPF_MEM | BPF_DW) ||
+ ldx->code != (BPF_LDX | BPF_MEM | BPF_DW) ||
+ stx->dst_reg != BPF_REG_10 ||
+ ldx->src_reg != BPF_REG_10)
+ break;
+ /* must be a spill/fill for the same reg */
+ if (stx->src_reg != ldx->dst_reg)
+ break;
+ /* must be one of the previously unseen registers */
+ if ((BIT(stx->src_reg) & expected_regs_mask) == 0)
+ break;
+ /* must be a spill/fill for the same expected offset,
+ * no need to check offset alignment, BPF_DW stack access
+ * is always 8-byte aligned.
+ */
+ if (stx->off != off || ldx->off != off)
+ break;
+ expected_regs_mask &= ~BIT(stx->src_reg);
+ env->insn_aux_data[insn_idx - i].fastcall_pattern = 1;
+ env->insn_aux_data[insn_idx + i].fastcall_pattern = 1;
+ }
+ if (i == 1)
+ return;
+
+ /* Conditionally set 'fastcall_spills_num' to allow forward
+ * compatibility when more helper functions are marked as
+ * bpf_fastcall at compile time than current kernel supports, e.g:
+ *
+ * 1: *(u64 *)(r10 - 8) = r1
+ * 2: call A ;; assume A is bpf_fastcall for current kernel
+ * 3: r1 = *(u64 *)(r10 - 8)
+ * 4: *(u64 *)(r10 - 8) = r1
+ * 5: call B ;; assume B is not bpf_fastcall for current kernel
+ * 6: r1 = *(u64 *)(r10 - 8)
+ *
+ * There is no need to block bpf_fastcall rewrite for such program.
+ * Set 'fastcall_pattern' for both calls to keep check_fastcall_stack_contract() happy,
+ * don't set 'fastcall_spills_num' for call B so that remove_fastcall_spills_fills()
+ * does not remove spill/fill pair {4,6}.
+ */
+ if (can_be_inlined)
+ env->insn_aux_data[insn_idx].fastcall_spills_num = i - 1;
+ else
+ subprog->keep_fastcall_stack = 1;
+ subprog->fastcall_stack_off = min(subprog->fastcall_stack_off, off);
+}
+
+static int mark_fastcall_patterns(struct bpf_verifier_env *env)
+{
+ struct bpf_subprog_info *subprog = env->subprog_info;
+ struct bpf_insn *insn;
+ s16 lowest_off;
+ int s, i;
+
+ for (s = 0; s < env->subprog_cnt; ++s, ++subprog) {
+ /* find lowest stack spill offset used in this subprog */
+ lowest_off = 0;
+ for (i = subprog->start; i < (subprog + 1)->start; ++i) {
+ insn = env->prog->insnsi + i;
+ if (insn->code != (BPF_STX | BPF_MEM | BPF_DW) ||
+ insn->dst_reg != BPF_REG_10)
+ continue;
+ lowest_off = min(lowest_off, insn->off);
+ }
+ /* use this offset to find fastcall patterns */
+ for (i = subprog->start; i < (subprog + 1)->start; ++i) {
+ insn = env->prog->insnsi + i;
+ if (insn->code != (BPF_JMP | BPF_CALL))
+ continue;
+ mark_fastcall_pattern_for_call(env, subprog, i, lowest_off);
+ }
+ }
+ return 0;
+}
+
/* Visits the instruction at index t and returns one of the following:
* < 0 - an error occurred
* DONE_EXPLORING - the instruction was fully explored
@@ -16770,7 +17282,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
*
* First verification path is [1-6]:
* - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7;
- * - at (5) r6 would be marked <= X, find_equal_scalars() would also mark
+ * - at (5) r6 would be marked <= X, sync_linked_regs() would also mark
* r7 <= X, because r6 and r7 share same id.
* Next verification path is [1-4, 6].
*
@@ -17564,7 +18076,7 @@ hit:
* the current state.
*/
if (is_jmp_point(env, env->insn_idx))
- err = err ? : push_jmp_history(env, cur, 0);
+ err = err ? : push_jmp_history(env, cur, 0, 0);
err = err ? : propagate_precision(env, &sl->state);
if (err)
return err;
@@ -17832,7 +18344,7 @@ static int do_check(struct bpf_verifier_env *env)
}
if (is_jmp_point(env, env->insn_idx)) {
- err = push_jmp_history(env, state, 0);
+ err = push_jmp_history(env, state, 0, 0);
if (err)
return err;
}
@@ -18408,6 +18920,53 @@ static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
}
+/* Add map behind fd to used maps list, if it's not already there, and return
+ * its index. Also set *reused to true if this map was already in the list of
+ * used maps.
+ * Returns <0 on error, or >= 0 index, on success.
+ */
+static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reused)
+{
+ CLASS(fd, f)(fd);
+ struct bpf_map *map;
+ int i;
+
+ map = __bpf_map_get(f);
+ if (IS_ERR(map)) {
+ verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
+ return PTR_ERR(map);
+ }
+
+ /* check whether we recorded this map already */
+ for (i = 0; i < env->used_map_cnt; i++) {
+ if (env->used_maps[i] == map) {
+ *reused = true;
+ return i;
+ }
+ }
+
+ if (env->used_map_cnt >= MAX_USED_MAPS) {
+ verbose(env, "The total number of maps per program has reached the limit of %u\n",
+ MAX_USED_MAPS);
+ return -E2BIG;
+ }
+
+ if (env->prog->sleepable)
+ atomic64_inc(&map->sleepable_refcnt);
+
+ /* hold the map. If the program is rejected by verifier,
+ * the map will be released by release_maps() or it
+ * will be used by the valid program until it's unloaded
+ * and all maps are released in bpf_free_used_maps()
+ */
+ bpf_map_inc(map);
+
+ *reused = false;
+ env->used_maps[env->used_map_cnt++] = map;
+
+ return env->used_map_cnt - 1;
+}
+
/* find and rewrite pseudo imm in ld_imm64 instructions:
*
* 1. if it accesses map FD, replace it with actual map pointer.
@@ -18419,7 +18978,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
- int i, j, err;
+ int i, err;
err = bpf_prog_calc_tag(env->prog);
if (err)
@@ -18436,9 +18995,10 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
struct bpf_insn_aux_data *aux;
struct bpf_map *map;
- struct fd f;
+ int map_idx;
u64 addr;
u32 fd;
+ bool reused;
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
@@ -18499,20 +19059,18 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
break;
}
- f = fdget(fd);
- map = __bpf_map_get(f);
- if (IS_ERR(map)) {
- verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
- return PTR_ERR(map);
- }
+ map_idx = add_used_map_from_fd(env, fd, &reused);
+ if (map_idx < 0)
+ return map_idx;
+ map = env->used_maps[map_idx];
+
+ aux = &env->insn_aux_data[i];
+ aux->map_index = map_idx;
err = check_map_prog_compatibility(env, map, env->prog);
- if (err) {
- fdput(f);
+ if (err)
return err;
- }
- aux = &env->insn_aux_data[i];
if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
addr = (unsigned long)map;
@@ -18521,13 +19079,11 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
if (off >= BPF_MAX_VAR_OFF) {
verbose(env, "direct value offset of %u is not allowed\n", off);
- fdput(f);
return -EINVAL;
}
if (!map->ops->map_direct_value_addr) {
verbose(env, "no direct value access support for this map type\n");
- fdput(f);
return -EINVAL;
}
@@ -18535,7 +19091,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
if (err) {
verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
map->value_size, off);
- fdput(f);
return err;
}
@@ -18546,70 +19101,39 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
insn[0].imm = (u32)addr;
insn[1].imm = addr >> 32;
- /* check whether we recorded this map already */
- for (j = 0; j < env->used_map_cnt; j++) {
- if (env->used_maps[j] == map) {
- aux->map_index = j;
- fdput(f);
- goto next_insn;
- }
- }
-
- if (env->used_map_cnt >= MAX_USED_MAPS) {
- verbose(env, "The total number of maps per program has reached the limit of %u\n",
- MAX_USED_MAPS);
- fdput(f);
- return -E2BIG;
- }
-
- if (env->prog->sleepable)
- atomic64_inc(&map->sleepable_refcnt);
- /* hold the map. If the program is rejected by verifier,
- * the map will be released by release_maps() or it
- * will be used by the valid program until it's unloaded
- * and all maps are released in bpf_free_used_maps()
- */
- bpf_map_inc(map);
-
- aux->map_index = env->used_map_cnt;
- env->used_maps[env->used_map_cnt++] = map;
+ /* proceed with extra checks only if its newly added used map */
+ if (reused)
+ goto next_insn;
if (bpf_map_is_cgroup_storage(map) &&
bpf_cgroup_storage_assign(env->prog->aux, map)) {
verbose(env, "only one cgroup storage of each type is allowed\n");
- fdput(f);
return -EBUSY;
}
if (map->map_type == BPF_MAP_TYPE_ARENA) {
if (env->prog->aux->arena) {
verbose(env, "Only one arena per program\n");
- fdput(f);
return -EBUSY;
}
if (!env->allow_ptr_leaks || !env->bpf_capable) {
verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n");
- fdput(f);
return -EPERM;
}
if (!env->prog->jit_requested) {
verbose(env, "JIT is required to use arena\n");
- fdput(f);
return -EOPNOTSUPP;
}
if (!bpf_jit_supports_arena()) {
verbose(env, "JIT doesn't support arena\n");
- fdput(f);
return -EOPNOTSUPP;
}
env->prog->aux->arena = (void *)map;
if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) {
verbose(env, "arena's user address must be set via map_extra or mmap()\n");
- fdput(f);
return -EINVAL;
}
}
- fdput(f);
next_insn:
insn++;
i++;
@@ -18765,6 +19289,9 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
for (i = 0; i < insn_cnt; i++, insn++) {
u8 code = insn->code;
+ if (tgt_idx <= i && i < tgt_idx + delta)
+ continue;
+
if ((BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) ||
BPF_OP(code) == BPF_CALL || BPF_OP(code) == BPF_EXIT)
continue;
@@ -19024,9 +19551,11 @@ static int opt_remove_dead_code(struct bpf_verifier_env *env)
return 0;
}
+static const struct bpf_insn NOP = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+
static int opt_remove_nops(struct bpf_verifier_env *env)
{
- const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+ const struct bpf_insn ja = NOP;
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i, err;
@@ -19151,14 +19680,39 @@ apply_patch_buffer:
*/
static int convert_ctx_accesses(struct bpf_verifier_env *env)
{
+ struct bpf_subprog_info *subprogs = env->subprog_info;
const struct bpf_verifier_ops *ops = env->ops;
- int i, cnt, size, ctx_field_size, delta = 0;
+ int i, cnt, size, ctx_field_size, delta = 0, epilogue_cnt = 0;
const int insn_cnt = env->prog->len;
- struct bpf_insn insn_buf[16], *insn;
+ struct bpf_insn *epilogue_buf = env->epilogue_buf;
+ struct bpf_insn *insn_buf = env->insn_buf;
+ struct bpf_insn *insn;
u32 target_size, size_default, off;
struct bpf_prog *new_prog;
enum bpf_access_type type;
bool is_narrower_load;
+ int epilogue_idx = 0;
+
+ if (ops->gen_epilogue) {
+ epilogue_cnt = ops->gen_epilogue(epilogue_buf, env->prog,
+ -(subprogs[0].stack_depth + 8));
+ if (epilogue_cnt >= INSN_BUF_SIZE) {
+ verbose(env, "bpf verifier is misconfigured\n");
+ return -EINVAL;
+ } else if (epilogue_cnt) {
+ /* Save the ARG_PTR_TO_CTX for the epilogue to use */
+ cnt = 0;
+ subprogs[0].stack_depth += 8;
+ insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_1,
+ -subprogs[0].stack_depth);
+ insn_buf[cnt++] = env->prog->insnsi[0];
+ new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+ env->prog = new_prog;
+ delta += cnt - 1;
+ }
+ }
if (ops->gen_prologue || env->seen_direct_write) {
if (!ops->gen_prologue) {
@@ -19167,7 +19721,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
}
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
env->prog);
- if (cnt >= ARRAY_SIZE(insn_buf)) {
+ if (cnt >= INSN_BUF_SIZE) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
} else if (cnt) {
@@ -19180,6 +19734,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
}
}
+ if (delta)
+ WARN_ON(adjust_jmp_off(env->prog, 0, delta));
+
if (bpf_prog_is_offloaded(env->prog->aux))
return 0;
@@ -19212,6 +19769,25 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
insn->code = BPF_STX | BPF_PROBE_ATOMIC | BPF_SIZE(insn->code);
env->prog->aux->num_exentries++;
continue;
+ } else if (insn->code == (BPF_JMP | BPF_EXIT) &&
+ epilogue_cnt &&
+ i + delta < subprogs[1].start) {
+ /* Generate epilogue for the main prog */
+ if (epilogue_idx) {
+ /* jump back to the earlier generated epilogue */
+ insn_buf[0] = BPF_JMP32_A(epilogue_idx - i - delta - 1);
+ cnt = 1;
+ } else {
+ memcpy(insn_buf, epilogue_buf,
+ epilogue_cnt * sizeof(*epilogue_buf));
+ cnt = epilogue_cnt;
+ /* epilogue_idx cannot be 0. It must have at
+ * least one ctx ptr saving insn before the
+ * epilogue.
+ */
+ epilogue_idx = i + delta;
+ }
+ goto patch_insn_buf;
} else {
continue;
}
@@ -19314,7 +19890,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
target_size = 0;
cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
&target_size);
- if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
+ if (cnt == 0 || cnt >= INSN_BUF_SIZE ||
(ctx_field_size && !target_size)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
@@ -19323,7 +19899,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
if (is_narrower_load && size < target_size) {
u8 shift = bpf_ctx_narrow_access_offset(
off, size, size_default) * 8;
- if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
+ if (shift && cnt + 1 >= INSN_BUF_SIZE) {
verbose(env, "bpf verifier narrow ctx load misconfigured\n");
return -EINVAL;
}
@@ -19348,6 +19924,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
insn->dst_reg, insn->dst_reg,
size * 8, 0);
+patch_insn_buf:
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
@@ -19868,7 +20445,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
const int insn_cnt = prog->len;
const struct bpf_map_ops *ops;
struct bpf_insn_aux_data *aux;
- struct bpf_insn insn_buf[16];
+ struct bpf_insn *insn_buf = env->insn_buf;
struct bpf_prog *new_prog;
struct bpf_map *map_ptr;
int i, ret, cnt, delta = 0, cur_subprog = 0;
@@ -19911,13 +20488,46 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
/* Convert BPF_CLASS(insn->code) == BPF_ALU64 to 32-bit ALU */
insn->code = BPF_ALU | BPF_OP(insn->code) | BPF_SRC(insn->code);
- /* Make divide-by-zero exceptions impossible. */
+ /* Make sdiv/smod divide-by-minus-one exceptions impossible. */
+ if ((insn->code == (BPF_ALU64 | BPF_MOD | BPF_K) ||
+ insn->code == (BPF_ALU64 | BPF_DIV | BPF_K) ||
+ insn->code == (BPF_ALU | BPF_MOD | BPF_K) ||
+ insn->code == (BPF_ALU | BPF_DIV | BPF_K)) &&
+ insn->off == 1 && insn->imm == -1) {
+ bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
+ bool isdiv = BPF_OP(insn->code) == BPF_DIV;
+ struct bpf_insn *patchlet;
+ struct bpf_insn chk_and_sdiv[] = {
+ BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
+ BPF_NEG | BPF_K, insn->dst_reg,
+ 0, 0, 0),
+ };
+ struct bpf_insn chk_and_smod[] = {
+ BPF_MOV32_IMM(insn->dst_reg, 0),
+ };
+
+ patchlet = isdiv ? chk_and_sdiv : chk_and_smod;
+ cnt = isdiv ? ARRAY_SIZE(chk_and_sdiv) : ARRAY_SIZE(chk_and_smod);
+
+ new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ goto next_insn;
+ }
+
+ /* Make divide-by-zero and divide-by-minus-one exceptions impossible. */
if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
bool isdiv = BPF_OP(insn->code) == BPF_DIV;
+ bool is_sdiv = isdiv && insn->off == 1;
+ bool is_smod = !isdiv && insn->off == 1;
struct bpf_insn *patchlet;
struct bpf_insn chk_and_div[] = {
/* [R,W]x div 0 -> 0 */
@@ -19937,10 +20547,62 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
};
+ struct bpf_insn chk_and_sdiv[] = {
+ /* [R,W]x sdiv 0 -> 0
+ * LLONG_MIN sdiv -1 -> LLONG_MIN
+ * INT_MIN sdiv -1 -> INT_MIN
+ */
+ BPF_MOV64_REG(BPF_REG_AX, insn->src_reg),
+ BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
+ BPF_ADD | BPF_K, BPF_REG_AX,
+ 0, 0, 1),
+ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ BPF_JGT | BPF_K, BPF_REG_AX,
+ 0, 4, 1),
+ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ BPF_JEQ | BPF_K, BPF_REG_AX,
+ 0, 1, 0),
+ BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
+ BPF_MOV | BPF_K, insn->dst_reg,
+ 0, 0, 0),
+ /* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */
+ BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
+ BPF_NEG | BPF_K, insn->dst_reg,
+ 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ *insn,
+ };
+ struct bpf_insn chk_and_smod[] = {
+ /* [R,W]x mod 0 -> [R,W]x */
+ /* [R,W]x mod -1 -> 0 */
+ BPF_MOV64_REG(BPF_REG_AX, insn->src_reg),
+ BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
+ BPF_ADD | BPF_K, BPF_REG_AX,
+ 0, 0, 1),
+ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ BPF_JGT | BPF_K, BPF_REG_AX,
+ 0, 3, 1),
+ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ BPF_JEQ | BPF_K, BPF_REG_AX,
+ 0, 3 + (is64 ? 0 : 1), 1),
+ BPF_MOV32_IMM(insn->dst_reg, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ *insn,
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
+ };
- patchlet = isdiv ? chk_and_div : chk_and_mod;
- cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
- ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
+ if (is_sdiv) {
+ patchlet = chk_and_sdiv;
+ cnt = ARRAY_SIZE(chk_and_sdiv);
+ } else if (is_smod) {
+ patchlet = chk_and_smod;
+ cnt = ARRAY_SIZE(chk_and_smod) - (is64 ? 2 : 0);
+ } else {
+ patchlet = isdiv ? chk_and_div : chk_and_mod;
+ cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
+ ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
+ }
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
if (!new_prog)
@@ -19987,7 +20649,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
(BPF_MODE(insn->code) == BPF_ABS ||
BPF_MODE(insn->code) == BPF_IND)) {
cnt = env->ops->gen_ld_abs(insn, insn_buf);
- if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
+ if (cnt == 0 || cnt >= INSN_BUF_SIZE) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
@@ -20280,7 +20942,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
cnt = ops->map_gen_lookup(map_ptr, insn_buf);
if (cnt == -EOPNOTSUPP)
goto patch_map_ops_generic;
- if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
+ if (cnt <= 0 || cnt >= INSN_BUF_SIZE) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
@@ -20382,7 +21044,7 @@ patch_map_ops_generic:
#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
/* Implement bpf_get_smp_processor_id() inline. */
if (insn->imm == BPF_FUNC_get_smp_processor_id &&
- prog->jit_requested && bpf_jit_supports_percpu_insn()) {
+ verifier_inlines_helper_call(env, insn->imm)) {
/* BPF_FUNC_get_smp_processor_id inlining is an
* optimization, so if pcpu_hot.cpu_number is ever
* changed in some incompatible and hard to support
@@ -20640,7 +21302,7 @@ static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
int position,
s32 stack_base,
u32 callback_subprogno,
- u32 *cnt)
+ u32 *total_cnt)
{
s32 r6_offset = stack_base + 0 * BPF_REG_SIZE;
s32 r7_offset = stack_base + 1 * BPF_REG_SIZE;
@@ -20649,55 +21311,56 @@ static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
int reg_loop_cnt = BPF_REG_7;
int reg_loop_ctx = BPF_REG_8;
+ struct bpf_insn *insn_buf = env->insn_buf;
struct bpf_prog *new_prog;
u32 callback_start;
u32 call_insn_offset;
s32 callback_offset;
+ u32 cnt = 0;
/* This represents an inlined version of bpf_iter.c:bpf_loop,
* be careful to modify this code in sync.
*/
- struct bpf_insn insn_buf[] = {
- /* Return error and jump to the end of the patch if
- * expected number of iterations is too big.
- */
- BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2),
- BPF_MOV32_IMM(BPF_REG_0, -E2BIG),
- BPF_JMP_IMM(BPF_JA, 0, 0, 16),
- /* spill R6, R7, R8 to use these as loop vars */
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset),
- /* initialize loop vars */
- BPF_MOV64_REG(reg_loop_max, BPF_REG_1),
- BPF_MOV32_IMM(reg_loop_cnt, 0),
- BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3),
- /* loop header,
- * if reg_loop_cnt >= reg_loop_max skip the loop body
- */
- BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5),
- /* callback call,
- * correct callback offset would be set after patching
- */
- BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt),
- BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx),
- BPF_CALL_REL(0),
- /* increment loop counter */
- BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1),
- /* jump to loop header if callback returned 0 */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6),
- /* return value of bpf_loop,
- * set R0 to the number of iterations
- */
- BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt),
- /* restore original values of R6, R7, R8 */
- BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset),
- BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset),
- };
- *cnt = ARRAY_SIZE(insn_buf);
- new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt);
+ /* Return error and jump to the end of the patch if
+ * expected number of iterations is too big.
+ */
+ insn_buf[cnt++] = BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2);
+ insn_buf[cnt++] = BPF_MOV32_IMM(BPF_REG_0, -E2BIG);
+ insn_buf[cnt++] = BPF_JMP_IMM(BPF_JA, 0, 0, 16);
+ /* spill R6, R7, R8 to use these as loop vars */
+ insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset);
+ insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset);
+ insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset);
+ /* initialize loop vars */
+ insn_buf[cnt++] = BPF_MOV64_REG(reg_loop_max, BPF_REG_1);
+ insn_buf[cnt++] = BPF_MOV32_IMM(reg_loop_cnt, 0);
+ insn_buf[cnt++] = BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3);
+ /* loop header,
+ * if reg_loop_cnt >= reg_loop_max skip the loop body
+ */
+ insn_buf[cnt++] = BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5);
+ /* callback call,
+ * correct callback offset would be set after patching
+ */
+ insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt);
+ insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx);
+ insn_buf[cnt++] = BPF_CALL_REL(0);
+ /* increment loop counter */
+ insn_buf[cnt++] = BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1);
+ /* jump to loop header if callback returned 0 */
+ insn_buf[cnt++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6);
+ /* return value of bpf_loop,
+ * set R0 to the number of iterations
+ */
+ insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt);
+ /* restore original values of R6, R7, R8 */
+ insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset);
+ insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset);
+ insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset);
+
+ *total_cnt = cnt;
+ new_prog = bpf_patch_insn_data(env, position, insn_buf, cnt);
if (!new_prog)
return new_prog;
@@ -20772,6 +21435,40 @@ static int optimize_bpf_loop(struct bpf_verifier_env *env)
return 0;
}
+/* Remove unnecessary spill/fill pairs, members of fastcall pattern,
+ * adjust subprograms stack depth when possible.
+ */
+static int remove_fastcall_spills_fills(struct bpf_verifier_env *env)
+{
+ struct bpf_subprog_info *subprog = env->subprog_info;
+ struct bpf_insn_aux_data *aux = env->insn_aux_data;
+ struct bpf_insn *insn = env->prog->insnsi;
+ int insn_cnt = env->prog->len;
+ u32 spills_num;
+ bool modified = false;
+ int i, j;
+
+ for (i = 0; i < insn_cnt; i++, insn++) {
+ if (aux[i].fastcall_spills_num > 0) {
+ spills_num = aux[i].fastcall_spills_num;
+ /* NOPs would be removed by opt_remove_nops() */
+ for (j = 1; j <= spills_num; ++j) {
+ *(insn - j) = NOP;
+ *(insn + j) = NOP;
+ }
+ modified = true;
+ }
+ if ((subprog + 1)->start == i + 1) {
+ if (modified && !subprog->keep_fastcall_stack)
+ subprog->stack_depth = -subprog->fastcall_stack_off;
+ subprog++;
+ modified = false;
+ }
+ }
+
+ return 0;
+}
+
static void free_states(struct bpf_verifier_env *env)
{
struct bpf_verifier_state_list *sl, *sln;
@@ -21045,6 +21742,7 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
u32 btf_id, member_idx;
struct btf *btf;
const char *mname;
+ int err;
if (!prog->gpl_compatible) {
verbose(env, "struct ops programs must have a GPL compatible license\n");
@@ -21092,8 +21790,15 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
return -EINVAL;
}
+ err = bpf_struct_ops_supported(st_ops, __btf_member_bit_offset(t, member) / 8);
+ if (err) {
+ verbose(env, "attach to unsupported member %s of struct %s\n",
+ mname, st_ops->name);
+ return err;
+ }
+
if (st_ops->check_member) {
- int err = st_ops->check_member(t, member, prog);
+ err = st_ops->check_member(t, member, prog);
if (err) {
verbose(env, "attach to unsupported member %s of struct %s\n",
@@ -21154,11 +21859,13 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
{
bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
bool prog_tracing = prog->type == BPF_PROG_TYPE_TRACING;
+ char trace_symbol[KSYM_SYMBOL_LEN];
const char prefix[] = "btf_trace_";
+ struct bpf_raw_event_map *btp;
int ret = 0, subprog = -1, i;
const struct btf_type *t;
bool conservative = true;
- const char *tname;
+ const char *tname, *fname;
struct btf *btf;
long addr = 0;
struct module *mod = NULL;
@@ -21289,10 +21996,34 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
return -EINVAL;
}
tname += sizeof(prefix) - 1;
- t = btf_type_by_id(btf, t->type);
- if (!btf_type_is_ptr(t))
- /* should never happen in valid vmlinux build */
+
+ /* The func_proto of "btf_trace_##tname" is generated from typedef without argument
+ * names. Thus using bpf_raw_event_map to get argument names.
+ */
+ btp = bpf_get_raw_tracepoint(tname);
+ if (!btp)
return -EINVAL;
+ fname = kallsyms_lookup((unsigned long)btp->bpf_func, NULL, NULL, NULL,
+ trace_symbol);
+ bpf_put_raw_tracepoint(btp);
+
+ if (fname)
+ ret = btf_find_by_name_kind(btf, fname, BTF_KIND_FUNC);
+
+ if (!fname || ret < 0) {
+ bpf_log(log, "Cannot find btf of tracepoint template, fall back to %s%s.\n",
+ prefix, tname);
+ t = btf_type_by_id(btf, t->type);
+ if (!btf_type_is_ptr(t))
+ /* should never happen in valid vmlinux build */
+ return -EINVAL;
+ } else {
+ t = btf_type_by_id(btf, ret);
+ if (!btf_type_is_func(t))
+ /* should never happen in valid vmlinux build */
+ return -EINVAL;
+ }
+
t = btf_type_by_id(btf, t->type);
if (!btf_type_is_func_proto(t))
/* should never happen in valid vmlinux build */
@@ -21678,6 +22409,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (ret < 0)
goto skip_full_check;
+ ret = mark_fastcall_patterns(env);
+ if (ret < 0)
+ goto skip_full_check;
+
ret = do_check_main(env);
ret = ret ?: do_check_subprogs(env);
@@ -21687,6 +22422,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
skip_full_check:
kvfree(env->explored_states);
+ /* might decrease stack depth, keep it before passes that
+ * allocate additional slots.
+ */
+ if (ret == 0)
+ ret = remove_fastcall_spills_fills(env);
+
if (ret == 0)
ret = check_max_stack_depth(env);
diff --git a/kernel/cgroup/Makefile b/kernel/cgroup/Makefile
index 12f8457ad1f9..a5c9359d516f 100644
--- a/kernel/cgroup/Makefile
+++ b/kernel/cgroup/Makefile
@@ -5,5 +5,6 @@ obj-$(CONFIG_CGROUP_FREEZER) += legacy_freezer.o
obj-$(CONFIG_CGROUP_PIDS) += pids.o
obj-$(CONFIG_CGROUP_RDMA) += rdma.o
obj-$(CONFIG_CPUSETS) += cpuset.o
+obj-$(CONFIG_CPUSETS_V1) += cpuset-v1.o
obj-$(CONFIG_CGROUP_MISC) += misc.o
obj-$(CONFIG_CGROUP_DEBUG) += debug.o
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index 520b90dd97ec..c964dd7ff967 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -81,6 +81,8 @@ struct cgroup_file_ctx {
struct {
struct cgroup_pidlist *pidlist;
} procs1;
+
+ struct cgroup_of_peak peak;
};
/*
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index b9dbf6bf2779..e28d5f0d20ed 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -46,6 +46,12 @@ bool cgroup1_ssid_disabled(int ssid)
return cgroup_no_v1_mask & (1 << ssid);
}
+static bool cgroup1_subsys_absent(struct cgroup_subsys *ss)
+{
+ /* Check also dfl_cftypes for file-less controllers, i.e. perf_event */
+ return ss->legacy_cftypes == NULL && ss->dfl_cftypes;
+}
+
/**
* cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
* @from: attach to all cgroups of a given task
@@ -675,11 +681,14 @@ int proc_cgroupstats_show(struct seq_file *m, void *v)
* cgroup_mutex contention.
*/
- for_each_subsys(ss, i)
+ for_each_subsys(ss, i) {
+ if (cgroup1_subsys_absent(ss))
+ continue;
seq_printf(m, "%s\t%d\t%d\t%d\n",
ss->legacy_name, ss->root->hierarchy_id,
atomic_read(&ss->root->nr_cgrps),
cgroup_ssid_enabled(i));
+ }
return 0;
}
@@ -932,7 +941,8 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
if (ret != -ENOPARAM)
return ret;
for_each_subsys(ss, i) {
- if (strcmp(param->key, ss->legacy_name))
+ if (strcmp(param->key, ss->legacy_name) ||
+ cgroup1_subsys_absent(ss))
continue;
if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
return invalfc(fc, "Disabled controller '%s'",
@@ -1024,7 +1034,8 @@ static int check_cgroupfs_options(struct fs_context *fc)
mask = ~((u16)1 << cpuset_cgrp_id);
#endif
for_each_subsys(ss, i)
- if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
+ if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i) &&
+ !cgroup1_subsys_absent(ss))
enabled |= 1 << i;
ctx->subsys_mask &= enabled;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index c8e4b62b436a..5886b95c6eae 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1972,6 +1972,13 @@ static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param
return -EINVAL;
}
+struct cgroup_of_peak *of_peak(struct kernfs_open_file *of)
+{
+ struct cgroup_file_ctx *ctx = of->priv;
+
+ return &ctx->peak;
+}
+
static void apply_cgroup_root_flags(unsigned int root_flags)
{
if (current->nsproxy->cgroup_ns == &init_cgroup_ns) {
@@ -2331,7 +2338,7 @@ static struct file_system_type cgroup2_fs_type = {
.fs_flags = FS_USERNS_MOUNT,
};
-#ifdef CONFIG_CPUSETS
+#ifdef CONFIG_CPUSETS_V1
static const struct fs_context_operations cpuset_fs_context_ops = {
.get_tree = cgroup1_get_tree,
.free = cgroup_fs_context_free,
@@ -3669,12 +3676,40 @@ static int cgroup_events_show(struct seq_file *seq, void *v)
static int cgroup_stat_show(struct seq_file *seq, void *v)
{
struct cgroup *cgroup = seq_css(seq)->cgroup;
+ struct cgroup_subsys_state *css;
+ int dying_cnt[CGROUP_SUBSYS_COUNT];
+ int ssid;
seq_printf(seq, "nr_descendants %d\n",
cgroup->nr_descendants);
+
+ /*
+ * Show the number of live and dying csses associated with each of
+ * non-inhibited cgroup subsystems that is bound to cgroup v2.
+ *
+ * Without proper lock protection, racing is possible. So the
+ * numbers may not be consistent when that happens.
+ */
+ rcu_read_lock();
+ for (ssid = 0; ssid < CGROUP_SUBSYS_COUNT; ssid++) {
+ dying_cnt[ssid] = -1;
+ if ((BIT(ssid) & cgrp_dfl_inhibit_ss_mask) ||
+ (cgroup_subsys[ssid]->root != &cgrp_dfl_root))
+ continue;
+ css = rcu_dereference_raw(cgroup->subsys[ssid]);
+ dying_cnt[ssid] = cgroup->nr_dying_subsys[ssid];
+ seq_printf(seq, "nr_subsys_%s %d\n", cgroup_subsys[ssid]->name,
+ css ? (css->nr_descendants + 1) : 0);
+ }
+
seq_printf(seq, "nr_dying_descendants %d\n",
cgroup->nr_dying_descendants);
-
+ for (ssid = 0; ssid < CGROUP_SUBSYS_COUNT; ssid++) {
+ if (dying_cnt[ssid] >= 0)
+ seq_printf(seq, "nr_dying_subsys_%s %d\n",
+ cgroup_subsys[ssid]->name, dying_cnt[ssid]);
+ }
+ rcu_read_unlock();
return 0;
}
@@ -4096,7 +4131,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
* If namespaces are delegation boundaries, disallow writes to
* files in an non-init namespace root from inside the namespace
* except for the files explicitly marked delegatable -
- * cgroup.procs and cgroup.subtree_control.
+ * eg. cgroup.procs, cgroup.threads and cgroup.subtree_control.
*/
if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) &&
!(cft->flags & CFTYPE_NS_DELEGATABLE) &&
@@ -4595,8 +4630,9 @@ struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
*
* While this function requires cgroup_mutex or RCU read locking, it
* doesn't require the whole traversal to be contained in a single critical
- * section. This function will return the correct next descendant as long
- * as both @pos and @root are accessible and @pos is a descendant of @root.
+ * section. Additionally, it isn't necessary to hold onto a reference to @pos.
+ * This function will return the correct next descendant as long as both @pos
+ * and @root are accessible and @pos is a descendant of @root.
*
* If a subsystem synchronizes ->css_online() and the start of iteration, a
* css which finished ->css_online() is guaranteed to be visible in the
@@ -4644,8 +4680,9 @@ EXPORT_SYMBOL_GPL(css_next_descendant_pre);
*
* While this function requires cgroup_mutex or RCU read locking, it
* doesn't require the whole traversal to be contained in a single critical
- * section. This function will return the correct rightmost descendant as
- * long as @pos is accessible.
+ * section. Additionally, it isn't necessary to hold onto a reference to @pos.
+ * This function will return the correct rightmost descendant as long as @pos
+ * is accessible.
*/
struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state *pos)
@@ -4689,9 +4726,9 @@ css_leftmost_descendant(struct cgroup_subsys_state *pos)
*
* While this function requires cgroup_mutex or RCU read locking, it
* doesn't require the whole traversal to be contained in a single critical
- * section. This function will return the correct next descendant as long
- * as both @pos and @cgroup are accessible and @pos is a descendant of
- * @cgroup.
+ * section. Additionally, it isn't necessary to hold onto a reference to @pos.
+ * This function will return the correct next descendant as long as both @pos
+ * and @cgroup are accessible and @pos is a descendant of @cgroup.
*
* If a subsystem synchronizes ->css_online() and the start of iteration, a
* css which finished ->css_online() is guaranteed to be visible in the
@@ -5424,6 +5461,8 @@ static void css_release_work_fn(struct work_struct *work)
list_del_rcu(&css->sibling);
if (ss) {
+ struct cgroup *parent_cgrp;
+
/* css release path */
if (!list_empty(&css->rstat_css_node)) {
cgroup_rstat_flush(cgrp);
@@ -5433,6 +5472,21 @@ static void css_release_work_fn(struct work_struct *work)
cgroup_idr_replace(&ss->css_idr, NULL, css->id);
if (ss->css_released)
ss->css_released(css);
+
+ cgrp->nr_dying_subsys[ss->id]--;
+ /*
+ * When a css is released and ready to be freed, its
+ * nr_descendants must be zero. However, the corresponding
+ * cgrp->nr_dying_subsys[ss->id] may not be 0 if a subsystem
+ * is activated and deactivated multiple times with one or
+ * more of its previous activation leaving behind dying csses.
+ */
+ WARN_ON_ONCE(css->nr_descendants);
+ parent_cgrp = cgroup_parent(cgrp);
+ while (parent_cgrp) {
+ parent_cgrp->nr_dying_subsys[ss->id]--;
+ parent_cgrp = cgroup_parent(parent_cgrp);
+ }
} else {
struct cgroup *tcgrp;
@@ -5517,8 +5571,11 @@ static int online_css(struct cgroup_subsys_state *css)
rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
atomic_inc(&css->online_cnt);
- if (css->parent)
+ if (css->parent) {
atomic_inc(&css->parent->online_cnt);
+ while ((css = css->parent))
+ css->nr_descendants++;
+ }
}
return ret;
}
@@ -5540,6 +5597,16 @@ static void offline_css(struct cgroup_subsys_state *css)
RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
wake_up_all(&css->cgroup->offline_waitq);
+
+ css->cgroup->nr_dying_subsys[ss->id]++;
+ /*
+ * Parent css and cgroup cannot be freed until after the freeing
+ * of child css, see css_free_rwork_fn().
+ */
+ while ((css = css->parent)) {
+ css->nr_descendants--;
+ css->cgroup->nr_dying_subsys[ss->id]++;
+ }
}
/**
@@ -6178,7 +6245,7 @@ int __init cgroup_init(void)
WARN_ON(register_filesystem(&cgroup_fs_type));
WARN_ON(register_filesystem(&cgroup2_fs_type));
WARN_ON(!proc_create_single("cgroups", 0, NULL, proc_cgroupstats_show));
-#ifdef CONFIG_CPUSETS
+#ifdef CONFIG_CPUSETS_V1
WARN_ON(register_filesystem(&cpuset_fs_type));
#endif
@@ -6901,10 +6968,10 @@ struct cgroup *cgroup_v1v2_get_from_fd(int fd)
{
struct cgroup *cgrp;
struct fd f = fdget_raw(fd);
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-EBADF);
- cgrp = cgroup_v1v2_get_from_file(f.file);
+ cgrp = cgroup_v1v2_get_from_file(fd_file(f));
fdput(f);
return cgrp;
}
diff --git a/kernel/cgroup/cpuset-internal.h b/kernel/cgroup/cpuset-internal.h
new file mode 100644
index 000000000000..976a8bc3ff60
--- /dev/null
+++ b/kernel/cgroup/cpuset-internal.h
@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __CPUSET_INTERNAL_H
+#define __CPUSET_INTERNAL_H
+
+#include <linux/cgroup.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpuset.h>
+#include <linux/spinlock.h>
+#include <linux/union_find.h>
+
+/* See "Frequency meter" comments, below. */
+
+struct fmeter {
+ int cnt; /* unprocessed events count */
+ int val; /* most recent output value */
+ time64_t time; /* clock (secs) when val computed */
+ spinlock_t lock; /* guards read or write of above */
+};
+
+/*
+ * Invalid partition error code
+ */
+enum prs_errcode {
+ PERR_NONE = 0,
+ PERR_INVCPUS,
+ PERR_INVPARENT,
+ PERR_NOTPART,
+ PERR_NOTEXCL,
+ PERR_NOCPUS,
+ PERR_HOTPLUG,
+ PERR_CPUSEMPTY,
+ PERR_HKEEPING,
+ PERR_ACCESS,
+};
+
+/* bits in struct cpuset flags field */
+typedef enum {
+ CS_ONLINE,
+ CS_CPU_EXCLUSIVE,
+ CS_MEM_EXCLUSIVE,
+ CS_MEM_HARDWALL,
+ CS_MEMORY_MIGRATE,
+ CS_SCHED_LOAD_BALANCE,
+ CS_SPREAD_PAGE,
+ CS_SPREAD_SLAB,
+} cpuset_flagbits_t;
+
+/* The various types of files and directories in a cpuset file system */
+
+typedef enum {
+ FILE_MEMORY_MIGRATE,
+ FILE_CPULIST,
+ FILE_MEMLIST,
+ FILE_EFFECTIVE_CPULIST,
+ FILE_EFFECTIVE_MEMLIST,
+ FILE_SUBPARTS_CPULIST,
+ FILE_EXCLUSIVE_CPULIST,
+ FILE_EFFECTIVE_XCPULIST,
+ FILE_ISOLATED_CPULIST,
+ FILE_CPU_EXCLUSIVE,
+ FILE_MEM_EXCLUSIVE,
+ FILE_MEM_HARDWALL,
+ FILE_SCHED_LOAD_BALANCE,
+ FILE_PARTITION_ROOT,
+ FILE_SCHED_RELAX_DOMAIN_LEVEL,
+ FILE_MEMORY_PRESSURE_ENABLED,
+ FILE_MEMORY_PRESSURE,
+ FILE_SPREAD_PAGE,
+ FILE_SPREAD_SLAB,
+} cpuset_filetype_t;
+
+struct cpuset {
+ struct cgroup_subsys_state css;
+
+ unsigned long flags; /* "unsigned long" so bitops work */
+
+ /*
+ * On default hierarchy:
+ *
+ * The user-configured masks can only be changed by writing to
+ * cpuset.cpus and cpuset.mems, and won't be limited by the
+ * parent masks.
+ *
+ * The effective masks is the real masks that apply to the tasks
+ * in the cpuset. They may be changed if the configured masks are
+ * changed or hotplug happens.
+ *
+ * effective_mask == configured_mask & parent's effective_mask,
+ * and if it ends up empty, it will inherit the parent's mask.
+ *
+ *
+ * On legacy hierarchy:
+ *
+ * The user-configured masks are always the same with effective masks.
+ */
+
+ /* user-configured CPUs and Memory Nodes allow to tasks */
+ cpumask_var_t cpus_allowed;
+ nodemask_t mems_allowed;
+
+ /* effective CPUs and Memory Nodes allow to tasks */
+ cpumask_var_t effective_cpus;
+ nodemask_t effective_mems;
+
+ /*
+ * Exclusive CPUs dedicated to current cgroup (default hierarchy only)
+ *
+ * The effective_cpus of a valid partition root comes solely from its
+ * effective_xcpus and some of the effective_xcpus may be distributed
+ * to sub-partitions below & hence excluded from its effective_cpus.
+ * For a valid partition root, its effective_cpus have no relationship
+ * with cpus_allowed unless its exclusive_cpus isn't set.
+ *
+ * This value will only be set if either exclusive_cpus is set or
+ * when this cpuset becomes a local partition root.
+ */
+ cpumask_var_t effective_xcpus;
+
+ /*
+ * Exclusive CPUs as requested by the user (default hierarchy only)
+ *
+ * Its value is independent of cpus_allowed and designates the set of
+ * CPUs that can be granted to the current cpuset or its children when
+ * it becomes a valid partition root. The effective set of exclusive
+ * CPUs granted (effective_xcpus) depends on whether those exclusive
+ * CPUs are passed down by its ancestors and not yet taken up by
+ * another sibling partition root along the way.
+ *
+ * If its value isn't set, it defaults to cpus_allowed.
+ */
+ cpumask_var_t exclusive_cpus;
+
+ /*
+ * This is old Memory Nodes tasks took on.
+ *
+ * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
+ * - A new cpuset's old_mems_allowed is initialized when some
+ * task is moved into it.
+ * - old_mems_allowed is used in cpuset_migrate_mm() when we change
+ * cpuset.mems_allowed and have tasks' nodemask updated, and
+ * then old_mems_allowed is updated to mems_allowed.
+ */
+ nodemask_t old_mems_allowed;
+
+ struct fmeter fmeter; /* memory_pressure filter */
+
+ /*
+ * Tasks are being attached to this cpuset. Used to prevent
+ * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
+ */
+ int attach_in_progress;
+
+ /* for custom sched domain */
+ int relax_domain_level;
+
+ /* number of valid local child partitions */
+ int nr_subparts;
+
+ /* partition root state */
+ int partition_root_state;
+
+ /*
+ * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
+ * know when to rebuild associated root domain bandwidth information.
+ */
+ int nr_deadline_tasks;
+ int nr_migrate_dl_tasks;
+ u64 sum_migrate_dl_bw;
+
+ /* Invalid partition error code, not lock protected */
+ enum prs_errcode prs_err;
+
+ /* Handle for cpuset.cpus.partition */
+ struct cgroup_file partition_file;
+
+ /* Remote partition silbling list anchored at remote_children */
+ struct list_head remote_sibling;
+
+ /* Used to merge intersecting subsets for generate_sched_domains */
+ struct uf_node node;
+};
+
+static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct cpuset, css) : NULL;
+}
+
+/* Retrieve the cpuset for a task */
+static inline struct cpuset *task_cs(struct task_struct *task)
+{
+ return css_cs(task_css(task, cpuset_cgrp_id));
+}
+
+static inline struct cpuset *parent_cs(struct cpuset *cs)
+{
+ return css_cs(cs->css.parent);
+}
+
+/* convenient tests for these bits */
+static inline bool is_cpuset_online(struct cpuset *cs)
+{
+ return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
+}
+
+static inline int is_cpu_exclusive(const struct cpuset *cs)
+{
+ return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
+}
+
+static inline int is_mem_exclusive(const struct cpuset *cs)
+{
+ return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
+}
+
+static inline int is_mem_hardwall(const struct cpuset *cs)
+{
+ return test_bit(CS_MEM_HARDWALL, &cs->flags);
+}
+
+static inline int is_sched_load_balance(const struct cpuset *cs)
+{
+ return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
+}
+
+static inline int is_memory_migrate(const struct cpuset *cs)
+{
+ return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
+}
+
+static inline int is_spread_page(const struct cpuset *cs)
+{
+ return test_bit(CS_SPREAD_PAGE, &cs->flags);
+}
+
+static inline int is_spread_slab(const struct cpuset *cs)
+{
+ return test_bit(CS_SPREAD_SLAB, &cs->flags);
+}
+
+/**
+ * cpuset_for_each_child - traverse online children of a cpuset
+ * @child_cs: loop cursor pointing to the current child
+ * @pos_css: used for iteration
+ * @parent_cs: target cpuset to walk children of
+ *
+ * Walk @child_cs through the online children of @parent_cs. Must be used
+ * with RCU read locked.
+ */
+#define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
+ css_for_each_child((pos_css), &(parent_cs)->css) \
+ if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
+
+/**
+ * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
+ * @des_cs: loop cursor pointing to the current descendant
+ * @pos_css: used for iteration
+ * @root_cs: target cpuset to walk ancestor of
+ *
+ * Walk @des_cs through the online descendants of @root_cs. Must be used
+ * with RCU read locked. The caller may modify @pos_css by calling
+ * css_rightmost_descendant() to skip subtree. @root_cs is included in the
+ * iteration and the first node to be visited.
+ */
+#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
+ css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
+ if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
+
+void rebuild_sched_domains_locked(void);
+void cpuset_callback_lock_irq(void);
+void cpuset_callback_unlock_irq(void);
+void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus);
+void cpuset_update_tasks_nodemask(struct cpuset *cs);
+int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on);
+ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off);
+int cpuset_common_seq_show(struct seq_file *sf, void *v);
+
+/*
+ * cpuset-v1.c
+ */
+#ifdef CONFIG_CPUSETS_V1
+extern struct cftype cpuset1_files[];
+void fmeter_init(struct fmeter *fmp);
+void cpuset1_update_task_spread_flags(struct cpuset *cs,
+ struct task_struct *tsk);
+void cpuset1_update_tasks_flags(struct cpuset *cs);
+void cpuset1_hotplug_update_tasks(struct cpuset *cs,
+ struct cpumask *new_cpus, nodemask_t *new_mems,
+ bool cpus_updated, bool mems_updated);
+int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial);
+#else
+static inline void fmeter_init(struct fmeter *fmp) {}
+static inline void cpuset1_update_task_spread_flags(struct cpuset *cs,
+ struct task_struct *tsk) {}
+static inline void cpuset1_update_tasks_flags(struct cpuset *cs) {}
+static inline void cpuset1_hotplug_update_tasks(struct cpuset *cs,
+ struct cpumask *new_cpus, nodemask_t *new_mems,
+ bool cpus_updated, bool mems_updated) {}
+static inline int cpuset1_validate_change(struct cpuset *cur,
+ struct cpuset *trial) { return 0; }
+#endif /* CONFIG_CPUSETS_V1 */
+
+#endif /* __CPUSET_INTERNAL_H */
diff --git a/kernel/cgroup/cpuset-v1.c b/kernel/cgroup/cpuset-v1.c
new file mode 100644
index 000000000000..25c1d7b77e2f
--- /dev/null
+++ b/kernel/cgroup/cpuset-v1.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "cpuset-internal.h"
+
+/*
+ * Legacy hierarchy call to cgroup_transfer_tasks() is handled asynchrously
+ */
+struct cpuset_remove_tasks_struct {
+ struct work_struct work;
+ struct cpuset *cs;
+};
+
+/*
+ * Frequency meter - How fast is some event occurring?
+ *
+ * These routines manage a digitally filtered, constant time based,
+ * event frequency meter. There are four routines:
+ * fmeter_init() - initialize a frequency meter.
+ * fmeter_markevent() - called each time the event happens.
+ * fmeter_getrate() - returns the recent rate of such events.
+ * fmeter_update() - internal routine used to update fmeter.
+ *
+ * A common data structure is passed to each of these routines,
+ * which is used to keep track of the state required to manage the
+ * frequency meter and its digital filter.
+ *
+ * The filter works on the number of events marked per unit time.
+ * The filter is single-pole low-pass recursive (IIR). The time unit
+ * is 1 second. Arithmetic is done using 32-bit integers scaled to
+ * simulate 3 decimal digits of precision (multiplied by 1000).
+ *
+ * With an FM_COEF of 933, and a time base of 1 second, the filter
+ * has a half-life of 10 seconds, meaning that if the events quit
+ * happening, then the rate returned from the fmeter_getrate()
+ * will be cut in half each 10 seconds, until it converges to zero.
+ *
+ * It is not worth doing a real infinitely recursive filter. If more
+ * than FM_MAXTICKS ticks have elapsed since the last filter event,
+ * just compute FM_MAXTICKS ticks worth, by which point the level
+ * will be stable.
+ *
+ * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
+ * arithmetic overflow in the fmeter_update() routine.
+ *
+ * Given the simple 32 bit integer arithmetic used, this meter works
+ * best for reporting rates between one per millisecond (msec) and
+ * one per 32 (approx) seconds. At constant rates faster than one
+ * per msec it maxes out at values just under 1,000,000. At constant
+ * rates between one per msec, and one per second it will stabilize
+ * to a value N*1000, where N is the rate of events per second.
+ * At constant rates between one per second and one per 32 seconds,
+ * it will be choppy, moving up on the seconds that have an event,
+ * and then decaying until the next event. At rates slower than
+ * about one in 32 seconds, it decays all the way back to zero between
+ * each event.
+ */
+
+#define FM_COEF 933 /* coefficient for half-life of 10 secs */
+#define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */
+#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
+#define FM_SCALE 1000 /* faux fixed point scale */
+
+/* Initialize a frequency meter */
+void fmeter_init(struct fmeter *fmp)
+{
+ fmp->cnt = 0;
+ fmp->val = 0;
+ fmp->time = 0;
+ spin_lock_init(&fmp->lock);
+}
+
+/* Internal meter update - process cnt events and update value */
+static void fmeter_update(struct fmeter *fmp)
+{
+ time64_t now;
+ u32 ticks;
+
+ now = ktime_get_seconds();
+ ticks = now - fmp->time;
+
+ if (ticks == 0)
+ return;
+
+ ticks = min(FM_MAXTICKS, ticks);
+ while (ticks-- > 0)
+ fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
+ fmp->time = now;
+
+ fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
+ fmp->cnt = 0;
+}
+
+/* Process any previous ticks, then bump cnt by one (times scale). */
+static void fmeter_markevent(struct fmeter *fmp)
+{
+ spin_lock(&fmp->lock);
+ fmeter_update(fmp);
+ fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
+ spin_unlock(&fmp->lock);
+}
+
+/* Process any previous ticks, then return current value. */
+static int fmeter_getrate(struct fmeter *fmp)
+{
+ int val;
+
+ spin_lock(&fmp->lock);
+ fmeter_update(fmp);
+ val = fmp->val;
+ spin_unlock(&fmp->lock);
+ return val;
+}
+
+/*
+ * Collection of memory_pressure is suppressed unless
+ * this flag is enabled by writing "1" to the special
+ * cpuset file 'memory_pressure_enabled' in the root cpuset.
+ */
+
+int cpuset_memory_pressure_enabled __read_mostly;
+
+/*
+ * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
+ *
+ * Keep a running average of the rate of synchronous (direct)
+ * page reclaim efforts initiated by tasks in each cpuset.
+ *
+ * This represents the rate at which some task in the cpuset
+ * ran low on memory on all nodes it was allowed to use, and
+ * had to enter the kernels page reclaim code in an effort to
+ * create more free memory by tossing clean pages or swapping
+ * or writing dirty pages.
+ *
+ * Display to user space in the per-cpuset read-only file
+ * "memory_pressure". Value displayed is an integer
+ * representing the recent rate of entry into the synchronous
+ * (direct) page reclaim by any task attached to the cpuset.
+ */
+
+void __cpuset_memory_pressure_bump(void)
+{
+ rcu_read_lock();
+ fmeter_markevent(&task_cs(current)->fmeter);
+ rcu_read_unlock();
+}
+
+static int update_relax_domain_level(struct cpuset *cs, s64 val)
+{
+#ifdef CONFIG_SMP
+ if (val < -1 || val > sched_domain_level_max + 1)
+ return -EINVAL;
+#endif
+
+ if (val != cs->relax_domain_level) {
+ cs->relax_domain_level = val;
+ if (!cpumask_empty(cs->cpus_allowed) &&
+ is_sched_load_balance(cs))
+ rebuild_sched_domains_locked();
+ }
+
+ return 0;
+}
+
+static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
+ s64 val)
+{
+ struct cpuset *cs = css_cs(css);
+ cpuset_filetype_t type = cft->private;
+ int retval = -ENODEV;
+
+ cpus_read_lock();
+ cpuset_lock();
+ if (!is_cpuset_online(cs))
+ goto out_unlock;
+
+ switch (type) {
+ case FILE_SCHED_RELAX_DOMAIN_LEVEL:
+ retval = update_relax_domain_level(cs, val);
+ break;
+ default:
+ retval = -EINVAL;
+ break;
+ }
+out_unlock:
+ cpuset_unlock();
+ cpus_read_unlock();
+ return retval;
+}
+
+static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+ struct cpuset *cs = css_cs(css);
+ cpuset_filetype_t type = cft->private;
+
+ switch (type) {
+ case FILE_SCHED_RELAX_DOMAIN_LEVEL:
+ return cs->relax_domain_level;
+ default:
+ BUG();
+ }
+
+ /* Unreachable but makes gcc happy */
+ return 0;
+}
+
+/*
+ * update task's spread flag if cpuset's page/slab spread flag is set
+ *
+ * Call with callback_lock or cpuset_mutex held. The check can be skipped
+ * if on default hierarchy.
+ */
+void cpuset1_update_task_spread_flags(struct cpuset *cs,
+ struct task_struct *tsk)
+{
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
+ return;
+
+ if (is_spread_page(cs))
+ task_set_spread_page(tsk);
+ else
+ task_clear_spread_page(tsk);
+
+ if (is_spread_slab(cs))
+ task_set_spread_slab(tsk);
+ else
+ task_clear_spread_slab(tsk);
+}
+
+/**
+ * cpuset1_update_tasks_flags - update the spread flags of tasks in the cpuset.
+ * @cs: the cpuset in which each task's spread flags needs to be changed
+ *
+ * Iterate through each task of @cs updating its spread flags. As this
+ * function is called with cpuset_mutex held, cpuset membership stays
+ * stable.
+ */
+void cpuset1_update_tasks_flags(struct cpuset *cs)
+{
+ struct css_task_iter it;
+ struct task_struct *task;
+
+ css_task_iter_start(&cs->css, 0, &it);
+ while ((task = css_task_iter_next(&it)))
+ cpuset1_update_task_spread_flags(cs, task);
+ css_task_iter_end(&it);
+}
+
+/*
+ * If CPU and/or memory hotplug handlers, below, unplug any CPUs
+ * or memory nodes, we need to walk over the cpuset hierarchy,
+ * removing that CPU or node from all cpusets. If this removes the
+ * last CPU or node from a cpuset, then move the tasks in the empty
+ * cpuset to its next-highest non-empty parent.
+ */
+static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
+{
+ struct cpuset *parent;
+
+ /*
+ * Find its next-highest non-empty parent, (top cpuset
+ * has online cpus, so can't be empty).
+ */
+ parent = parent_cs(cs);
+ while (cpumask_empty(parent->cpus_allowed) ||
+ nodes_empty(parent->mems_allowed))
+ parent = parent_cs(parent);
+
+ if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
+ pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
+ pr_cont_cgroup_name(cs->css.cgroup);
+ pr_cont("\n");
+ }
+}
+
+static void cpuset_migrate_tasks_workfn(struct work_struct *work)
+{
+ struct cpuset_remove_tasks_struct *s;
+
+ s = container_of(work, struct cpuset_remove_tasks_struct, work);
+ remove_tasks_in_empty_cpuset(s->cs);
+ css_put(&s->cs->css);
+ kfree(s);
+}
+
+void cpuset1_hotplug_update_tasks(struct cpuset *cs,
+ struct cpumask *new_cpus, nodemask_t *new_mems,
+ bool cpus_updated, bool mems_updated)
+{
+ bool is_empty;
+
+ cpuset_callback_lock_irq();
+ cpumask_copy(cs->cpus_allowed, new_cpus);
+ cpumask_copy(cs->effective_cpus, new_cpus);
+ cs->mems_allowed = *new_mems;
+ cs->effective_mems = *new_mems;
+ cpuset_callback_unlock_irq();
+
+ /*
+ * Don't call cpuset_update_tasks_cpumask() if the cpuset becomes empty,
+ * as the tasks will be migrated to an ancestor.
+ */
+ if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
+ cpuset_update_tasks_cpumask(cs, new_cpus);
+ if (mems_updated && !nodes_empty(cs->mems_allowed))
+ cpuset_update_tasks_nodemask(cs);
+
+ is_empty = cpumask_empty(cs->cpus_allowed) ||
+ nodes_empty(cs->mems_allowed);
+
+ /*
+ * Move tasks to the nearest ancestor with execution resources,
+ * This is full cgroup operation which will also call back into
+ * cpuset. Execute it asynchronously using workqueue.
+ */
+ if (is_empty && cs->css.cgroup->nr_populated_csets &&
+ css_tryget_online(&cs->css)) {
+ struct cpuset_remove_tasks_struct *s;
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (WARN_ON_ONCE(!s)) {
+ css_put(&cs->css);
+ return;
+ }
+
+ s->cs = cs;
+ INIT_WORK(&s->work, cpuset_migrate_tasks_workfn);
+ schedule_work(&s->work);
+ }
+}
+
+/*
+ * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
+ *
+ * One cpuset is a subset of another if all its allowed CPUs and
+ * Memory Nodes are a subset of the other, and its exclusive flags
+ * are only set if the other's are set. Call holding cpuset_mutex.
+ */
+
+static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
+{
+ return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
+ nodes_subset(p->mems_allowed, q->mems_allowed) &&
+ is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
+ is_mem_exclusive(p) <= is_mem_exclusive(q);
+}
+
+/*
+ * cpuset1_validate_change() - Validate conditions specific to legacy (v1)
+ * behavior.
+ */
+int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial)
+{
+ struct cgroup_subsys_state *css;
+ struct cpuset *c, *par;
+ int ret;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ /* Each of our child cpusets must be a subset of us */
+ ret = -EBUSY;
+ cpuset_for_each_child(c, css, cur)
+ if (!is_cpuset_subset(c, trial))
+ goto out;
+
+ /* On legacy hierarchy, we must be a subset of our parent cpuset. */
+ ret = -EACCES;
+ par = parent_cs(cur);
+ if (par && !is_cpuset_subset(trial, par))
+ goto out;
+
+ ret = 0;
+out:
+ return ret;
+}
+
+static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+ struct cpuset *cs = css_cs(css);
+ cpuset_filetype_t type = cft->private;
+
+ switch (type) {
+ case FILE_CPU_EXCLUSIVE:
+ return is_cpu_exclusive(cs);
+ case FILE_MEM_EXCLUSIVE:
+ return is_mem_exclusive(cs);
+ case FILE_MEM_HARDWALL:
+ return is_mem_hardwall(cs);
+ case FILE_SCHED_LOAD_BALANCE:
+ return is_sched_load_balance(cs);
+ case FILE_MEMORY_MIGRATE:
+ return is_memory_migrate(cs);
+ case FILE_MEMORY_PRESSURE_ENABLED:
+ return cpuset_memory_pressure_enabled;
+ case FILE_MEMORY_PRESSURE:
+ return fmeter_getrate(&cs->fmeter);
+ case FILE_SPREAD_PAGE:
+ return is_spread_page(cs);
+ case FILE_SPREAD_SLAB:
+ return is_spread_slab(cs);
+ default:
+ BUG();
+ }
+
+ /* Unreachable but makes gcc happy */
+ return 0;
+}
+
+static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 val)
+{
+ struct cpuset *cs = css_cs(css);
+ cpuset_filetype_t type = cft->private;
+ int retval = 0;
+
+ cpus_read_lock();
+ cpuset_lock();
+ if (!is_cpuset_online(cs)) {
+ retval = -ENODEV;
+ goto out_unlock;
+ }
+
+ switch (type) {
+ case FILE_CPU_EXCLUSIVE:
+ retval = cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, val);
+ break;
+ case FILE_MEM_EXCLUSIVE:
+ retval = cpuset_update_flag(CS_MEM_EXCLUSIVE, cs, val);
+ break;
+ case FILE_MEM_HARDWALL:
+ retval = cpuset_update_flag(CS_MEM_HARDWALL, cs, val);
+ break;
+ case FILE_SCHED_LOAD_BALANCE:
+ retval = cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
+ break;
+ case FILE_MEMORY_MIGRATE:
+ retval = cpuset_update_flag(CS_MEMORY_MIGRATE, cs, val);
+ break;
+ case FILE_MEMORY_PRESSURE_ENABLED:
+ cpuset_memory_pressure_enabled = !!val;
+ break;
+ case FILE_SPREAD_PAGE:
+ retval = cpuset_update_flag(CS_SPREAD_PAGE, cs, val);
+ break;
+ case FILE_SPREAD_SLAB:
+ retval = cpuset_update_flag(CS_SPREAD_SLAB, cs, val);
+ break;
+ default:
+ retval = -EINVAL;
+ break;
+ }
+out_unlock:
+ cpuset_unlock();
+ cpus_read_unlock();
+ return retval;
+}
+
+/*
+ * for the common functions, 'private' gives the type of file
+ */
+
+struct cftype cpuset1_files[] = {
+ {
+ .name = "cpus",
+ .seq_show = cpuset_common_seq_show,
+ .write = cpuset_write_resmask,
+ .max_write_len = (100U + 6 * NR_CPUS),
+ .private = FILE_CPULIST,
+ },
+
+ {
+ .name = "mems",
+ .seq_show = cpuset_common_seq_show,
+ .write = cpuset_write_resmask,
+ .max_write_len = (100U + 6 * MAX_NUMNODES),
+ .private = FILE_MEMLIST,
+ },
+
+ {
+ .name = "effective_cpus",
+ .seq_show = cpuset_common_seq_show,
+ .private = FILE_EFFECTIVE_CPULIST,
+ },
+
+ {
+ .name = "effective_mems",
+ .seq_show = cpuset_common_seq_show,
+ .private = FILE_EFFECTIVE_MEMLIST,
+ },
+
+ {
+ .name = "cpu_exclusive",
+ .read_u64 = cpuset_read_u64,
+ .write_u64 = cpuset_write_u64,
+ .private = FILE_CPU_EXCLUSIVE,
+ },
+
+ {
+ .name = "mem_exclusive",
+ .read_u64 = cpuset_read_u64,
+ .write_u64 = cpuset_write_u64,
+ .private = FILE_MEM_EXCLUSIVE,
+ },
+
+ {
+ .name = "mem_hardwall",
+ .read_u64 = cpuset_read_u64,
+ .write_u64 = cpuset_write_u64,
+ .private = FILE_MEM_HARDWALL,
+ },
+
+ {
+ .name = "sched_load_balance",
+ .read_u64 = cpuset_read_u64,
+ .write_u64 = cpuset_write_u64,
+ .private = FILE_SCHED_LOAD_BALANCE,
+ },
+
+ {
+ .name = "sched_relax_domain_level",
+ .read_s64 = cpuset_read_s64,
+ .write_s64 = cpuset_write_s64,
+ .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
+ },
+
+ {
+ .name = "memory_migrate",
+ .read_u64 = cpuset_read_u64,
+ .write_u64 = cpuset_write_u64,
+ .private = FILE_MEMORY_MIGRATE,
+ },
+
+ {
+ .name = "memory_pressure",
+ .read_u64 = cpuset_read_u64,
+ .private = FILE_MEMORY_PRESSURE,
+ },
+
+ {
+ .name = "memory_spread_page",
+ .read_u64 = cpuset_read_u64,
+ .write_u64 = cpuset_write_u64,
+ .private = FILE_SPREAD_PAGE,
+ },
+
+ {
+ /* obsolete, may be removed in the future */
+ .name = "memory_spread_slab",
+ .read_u64 = cpuset_read_u64,
+ .write_u64 = cpuset_write_u64,
+ .private = FILE_SPREAD_SLAB,
+ },
+
+ {
+ .name = "memory_pressure_enabled",
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ .read_u64 = cpuset_read_u64,
+ .write_u64 = cpuset_write_u64,
+ .private = FILE_MEMORY_PRESSURE_ENABLED,
+ },
+
+ { } /* terminate */
+};
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 4bd9e50bcc8e..a4dd285cdf39 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -22,11 +22,8 @@
* distribution for more details.
*/
#include "cgroup-internal.h"
+#include "cpuset-internal.h"
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
-#include <linux/cpuset.h>
-#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -40,10 +37,8 @@
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/security.h>
-#include <linux/spinlock.h>
#include <linux/oom.h>
#include <linux/sched/isolation.h>
-#include <linux/cgroup.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
@@ -57,30 +52,6 @@ DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
*/
DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
-/* See "Frequency meter" comments, below. */
-
-struct fmeter {
- int cnt; /* unprocessed events count */
- int val; /* most recent output value */
- time64_t time; /* clock (secs) when val computed */
- spinlock_t lock; /* guards read or write of above */
-};
-
-/*
- * Invalid partition error code
- */
-enum prs_errcode {
- PERR_NONE = 0,
- PERR_INVCPUS,
- PERR_INVPARENT,
- PERR_NOTPART,
- PERR_NOTEXCL,
- PERR_NOCPUS,
- PERR_HOTPLUG,
- PERR_CPUSEMPTY,
- PERR_HKEEPING,
-};
-
static const char * const perr_strings[] = {
[PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus.exclusive",
[PERR_INVPARENT] = "Parent is an invalid partition root",
@@ -90,133 +61,7 @@ static const char * const perr_strings[] = {
[PERR_HOTPLUG] = "No cpu available due to hotplug",
[PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
[PERR_HKEEPING] = "partition config conflicts with housekeeping setup",
-};
-
-struct cpuset {
- struct cgroup_subsys_state css;
-
- unsigned long flags; /* "unsigned long" so bitops work */
-
- /*
- * On default hierarchy:
- *
- * The user-configured masks can only be changed by writing to
- * cpuset.cpus and cpuset.mems, and won't be limited by the
- * parent masks.
- *
- * The effective masks is the real masks that apply to the tasks
- * in the cpuset. They may be changed if the configured masks are
- * changed or hotplug happens.
- *
- * effective_mask == configured_mask & parent's effective_mask,
- * and if it ends up empty, it will inherit the parent's mask.
- *
- *
- * On legacy hierarchy:
- *
- * The user-configured masks are always the same with effective masks.
- */
-
- /* user-configured CPUs and Memory Nodes allow to tasks */
- cpumask_var_t cpus_allowed;
- nodemask_t mems_allowed;
-
- /* effective CPUs and Memory Nodes allow to tasks */
- cpumask_var_t effective_cpus;
- nodemask_t effective_mems;
-
- /*
- * Exclusive CPUs dedicated to current cgroup (default hierarchy only)
- *
- * The effective_cpus of a valid partition root comes solely from its
- * effective_xcpus and some of the effective_xcpus may be distributed
- * to sub-partitions below & hence excluded from its effective_cpus.
- * For a valid partition root, its effective_cpus have no relationship
- * with cpus_allowed unless its exclusive_cpus isn't set.
- *
- * This value will only be set if either exclusive_cpus is set or
- * when this cpuset becomes a local partition root.
- */
- cpumask_var_t effective_xcpus;
-
- /*
- * Exclusive CPUs as requested by the user (default hierarchy only)
- *
- * Its value is independent of cpus_allowed and designates the set of
- * CPUs that can be granted to the current cpuset or its children when
- * it becomes a valid partition root. The effective set of exclusive
- * CPUs granted (effective_xcpus) depends on whether those exclusive
- * CPUs are passed down by its ancestors and not yet taken up by
- * another sibling partition root along the way.
- *
- * If its value isn't set, it defaults to cpus_allowed.
- */
- cpumask_var_t exclusive_cpus;
-
- /*
- * This is old Memory Nodes tasks took on.
- *
- * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
- * - A new cpuset's old_mems_allowed is initialized when some
- * task is moved into it.
- * - old_mems_allowed is used in cpuset_migrate_mm() when we change
- * cpuset.mems_allowed and have tasks' nodemask updated, and
- * then old_mems_allowed is updated to mems_allowed.
- */
- nodemask_t old_mems_allowed;
-
- struct fmeter fmeter; /* memory_pressure filter */
-
- /*
- * Tasks are being attached to this cpuset. Used to prevent
- * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
- */
- int attach_in_progress;
-
- /* partition number for rebuild_sched_domains() */
- int pn;
-
- /* for custom sched domain */
- int relax_domain_level;
-
- /* number of valid local child partitions */
- int nr_subparts;
-
- /* partition root state */
- int partition_root_state;
-
- /*
- * Default hierarchy only:
- * use_parent_ecpus - set if using parent's effective_cpus
- * child_ecpus_count - # of children with use_parent_ecpus set
- */
- int use_parent_ecpus;
- int child_ecpus_count;
-
- /*
- * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
- * know when to rebuild associated root domain bandwidth information.
- */
- int nr_deadline_tasks;
- int nr_migrate_dl_tasks;
- u64 sum_migrate_dl_bw;
-
- /* Invalid partition error code, not lock protected */
- enum prs_errcode prs_err;
-
- /* Handle for cpuset.cpus.partition */
- struct cgroup_file partition_file;
-
- /* Remote partition silbling list anchored at remote_children */
- struct list_head remote_sibling;
-};
-
-/*
- * Legacy hierarchy call to cgroup_transfer_tasks() is handled asynchrously
- */
-struct cpuset_remove_tasks_struct {
- struct work_struct work;
- struct cpuset *cs;
+ [PERR_ACCESS] = "Enable partition not permitted",
};
/*
@@ -229,6 +74,12 @@ static cpumask_var_t subpartitions_cpus;
*/
static cpumask_var_t isolated_cpus;
+/*
+ * Housekeeping (HK_TYPE_DOMAIN) CPUs at boot
+ */
+static cpumask_var_t boot_hk_cpus;
+static bool have_boot_isolcpus;
+
/* List of remote partition root children */
static struct list_head remote_children;
@@ -279,22 +130,6 @@ struct tmpmasks {
cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
};
-static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
-{
- return css ? container_of(css, struct cpuset, css) : NULL;
-}
-
-/* Retrieve the cpuset for a task */
-static inline struct cpuset *task_cs(struct task_struct *task)
-{
- return css_cs(task_css(task, cpuset_cgrp_id));
-}
-
-static inline struct cpuset *parent_cs(struct cpuset *cs)
-{
- return css_cs(cs->css.parent);
-}
-
void inc_dl_tasks_cs(struct task_struct *p)
{
struct cpuset *cs = task_cs(p);
@@ -309,59 +144,6 @@ void dec_dl_tasks_cs(struct task_struct *p)
cs->nr_deadline_tasks--;
}
-/* bits in struct cpuset flags field */
-typedef enum {
- CS_ONLINE,
- CS_CPU_EXCLUSIVE,
- CS_MEM_EXCLUSIVE,
- CS_MEM_HARDWALL,
- CS_MEMORY_MIGRATE,
- CS_SCHED_LOAD_BALANCE,
- CS_SPREAD_PAGE,
- CS_SPREAD_SLAB,
-} cpuset_flagbits_t;
-
-/* convenient tests for these bits */
-static inline bool is_cpuset_online(struct cpuset *cs)
-{
- return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
-}
-
-static inline int is_cpu_exclusive(const struct cpuset *cs)
-{
- return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
-}
-
-static inline int is_mem_exclusive(const struct cpuset *cs)
-{
- return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
-}
-
-static inline int is_mem_hardwall(const struct cpuset *cs)
-{
- return test_bit(CS_MEM_HARDWALL, &cs->flags);
-}
-
-static inline int is_sched_load_balance(const struct cpuset *cs)
-{
- return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
-}
-
-static inline int is_memory_migrate(const struct cpuset *cs)
-{
- return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
-}
-
-static inline int is_spread_page(const struct cpuset *cs)
-{
- return test_bit(CS_SPREAD_PAGE, &cs->flags);
-}
-
-static inline int is_spread_slab(const struct cpuset *cs)
-{
- return test_bit(CS_SPREAD_SLAB, &cs->flags);
-}
-
static inline int is_partition_valid(const struct cpuset *cs)
{
return cs->partition_root_state > 0;
@@ -403,34 +185,6 @@ static struct cpuset top_cpuset = {
.remote_sibling = LIST_HEAD_INIT(top_cpuset.remote_sibling),
};
-/**
- * cpuset_for_each_child - traverse online children of a cpuset
- * @child_cs: loop cursor pointing to the current child
- * @pos_css: used for iteration
- * @parent_cs: target cpuset to walk children of
- *
- * Walk @child_cs through the online children of @parent_cs. Must be used
- * with RCU read locked.
- */
-#define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
- css_for_each_child((pos_css), &(parent_cs)->css) \
- if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
-
-/**
- * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
- * @des_cs: loop cursor pointing to the current descendant
- * @pos_css: used for iteration
- * @root_cs: target cpuset to walk ancestor of
- *
- * Walk @des_cs through the online descendants of @root_cs. Must be used
- * with RCU read locked. The caller may modify @pos_css by calling
- * css_rightmost_descendant() to skip subtree. @root_cs is included in the
- * iteration and the first node to be visited.
- */
-#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
- css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
- if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
-
/*
* There are two global locks guarding cpuset structures - cpuset_mutex and
* callback_lock. We also require taking task_lock() when dereferencing a
@@ -484,6 +238,16 @@ void cpuset_unlock(void)
static DEFINE_SPINLOCK(callback_lock);
+void cpuset_callback_lock_irq(void)
+{
+ spin_lock_irq(&callback_lock);
+}
+
+void cpuset_callback_unlock_irq(void)
+{
+ spin_unlock_irq(&callback_lock);
+}
+
static struct workqueue_struct *cpuset_migrate_mm_wq;
static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
@@ -500,6 +264,26 @@ static inline void check_insane_mems_config(nodemask_t *nodes)
}
/*
+ * decrease cs->attach_in_progress.
+ * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
+ */
+static inline void dec_attach_in_progress_locked(struct cpuset *cs)
+{
+ lockdep_assert_held(&cpuset_mutex);
+
+ cs->attach_in_progress--;
+ if (!cs->attach_in_progress)
+ wake_up(&cpuset_attach_wq);
+}
+
+static inline void dec_attach_in_progress(struct cpuset *cs)
+{
+ mutex_lock(&cpuset_mutex);
+ dec_attach_in_progress_locked(cs);
+ mutex_unlock(&cpuset_mutex);
+}
+
+/*
* Cgroup v2 behavior is used on the "cpus" and "mems" control files when
* on default hierarchy or when the cpuset_v2_mode flag is set by mounting
* the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
@@ -596,45 +380,6 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
}
-/*
- * update task's spread flag if cpuset's page/slab spread flag is set
- *
- * Call with callback_lock or cpuset_mutex held. The check can be skipped
- * if on default hierarchy.
- */
-static void cpuset_update_task_spread_flags(struct cpuset *cs,
- struct task_struct *tsk)
-{
- if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
- return;
-
- if (is_spread_page(cs))
- task_set_spread_page(tsk);
- else
- task_clear_spread_page(tsk);
-
- if (is_spread_slab(cs))
- task_set_spread_slab(tsk);
- else
- task_clear_spread_slab(tsk);
-}
-
-/*
- * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
- *
- * One cpuset is a subset of another if all its allowed CPUs and
- * Memory Nodes are a subset of the other, and its exclusive flags
- * are only set if the other's are set. Call holding cpuset_mutex.
- */
-
-static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
-{
- return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
- nodes_subset(p->mems_allowed, q->mems_allowed) &&
- is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
- is_mem_exclusive(p) <= is_mem_exclusive(q);
-}
-
/**
* alloc_cpumasks - allocate three cpumasks for cpuset
* @cs: the cpuset that have cpumasks to be allocated.
@@ -750,13 +495,6 @@ static inline bool xcpus_empty(struct cpuset *cs)
cpumask_empty(cs->exclusive_cpus);
}
-static inline struct cpumask *fetch_xcpus(struct cpuset *cs)
-{
- return !cpumask_empty(cs->exclusive_cpus) ? cs->exclusive_cpus :
- cpumask_empty(cs->effective_xcpus) ? cs->cpus_allowed
- : cs->effective_xcpus;
-}
-
/*
* cpusets_are_exclusive() - check if two cpusets are exclusive
*
@@ -764,8 +502,8 @@ static inline struct cpumask *fetch_xcpus(struct cpuset *cs)
*/
static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
{
- struct cpumask *xcpus1 = fetch_xcpus(cs1);
- struct cpumask *xcpus2 = fetch_xcpus(cs2);
+ struct cpumask *xcpus1 = user_xcpus(cs1);
+ struct cpumask *xcpus2 = user_xcpus(cs2);
if (cpumask_intersects(xcpus1, xcpus2))
return false;
@@ -773,35 +511,6 @@ static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
}
/*
- * validate_change_legacy() - Validate conditions specific to legacy (v1)
- * behavior.
- */
-static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
-{
- struct cgroup_subsys_state *css;
- struct cpuset *c, *par;
- int ret;
-
- WARN_ON_ONCE(!rcu_read_lock_held());
-
- /* Each of our child cpusets must be a subset of us */
- ret = -EBUSY;
- cpuset_for_each_child(c, css, cur)
- if (!is_cpuset_subset(c, trial))
- goto out;
-
- /* On legacy hierarchy, we must be a subset of our parent cpuset. */
- ret = -EACCES;
- par = parent_cs(cur);
- if (par && !is_cpuset_subset(trial, par))
- goto out;
-
- ret = 0;
-out:
- return ret;
-}
-
-/*
* validate_change() - Used to validate that any proposed cpuset change
* follows the structural rules for cpusets.
*
@@ -830,7 +539,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
rcu_read_lock();
if (!is_in_v2_mode())
- ret = validate_change_legacy(cur, trial);
+ ret = cpuset1_validate_change(cur, trial);
if (ret)
goto out;
@@ -996,18 +705,15 @@ static inline int nr_cpusets(void)
* were changed (added or removed.)
*
* Finding the best partition (set of domains):
- * The triple nested loops below over i, j, k scan over the
- * load balanced cpusets (using the array of cpuset pointers in
- * csa[]) looking for pairs of cpusets that have overlapping
- * cpus_allowed, but which don't have the same 'pn' partition
- * number and gives them in the same partition number. It keeps
- * looping on the 'restart' label until it can no longer find
- * any such pairs.
+ * The double nested loops below over i, j scan over the load
+ * balanced cpusets (using the array of cpuset pointers in csa[])
+ * looking for pairs of cpusets that have overlapping cpus_allowed
+ * and merging them using a union-find algorithm.
+ *
+ * The union of the cpus_allowed masks from the set of all cpusets
+ * having the same root then form the one element of the partition
+ * (one sched domain) to be passed to partition_sched_domains().
*
- * The union of the cpus_allowed masks from the set of
- * all cpusets having the same 'pn' value then form the one
- * element of the partition (one sched domain) to be passed to
- * partition_sched_domains().
*/
static int generate_sched_domains(cpumask_var_t **domains,
struct sched_domain_attr **attributes)
@@ -1015,7 +721,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
struct cpuset *cp; /* top-down scan of cpusets */
struct cpuset **csa; /* array of all cpuset ptrs */
int csn; /* how many cpuset ptrs in csa so far */
- int i, j, k; /* indices for partition finding loops */
+ int i, j; /* indices for partition finding loops */
cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
struct sched_domain_attr *dattr; /* attributes for custom domains */
int ndoms = 0; /* number of sched domains in result */
@@ -1023,6 +729,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
struct cgroup_subsys_state *pos_css;
bool root_load_balance = is_sched_load_balance(&top_cpuset);
bool cgrpv2 = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
+ int nslot_update;
doms = NULL;
dattr = NULL;
@@ -1111,32 +818,28 @@ v2:
goto single_root_domain;
for (i = 0; i < csn; i++)
- csa[i]->pn = i;
- ndoms = csn;
+ uf_node_init(&csa[i]->node);
-restart:
- /* Find the best partition (set of sched domains) */
+ /* Merge overlapping cpusets */
for (i = 0; i < csn; i++) {
- struct cpuset *a = csa[i];
- int apn = a->pn;
-
- for (j = 0; j < csn; j++) {
- struct cpuset *b = csa[j];
- int bpn = b->pn;
-
- if (apn != bpn && cpusets_overlap(a, b)) {
- for (k = 0; k < csn; k++) {
- struct cpuset *c = csa[k];
-
- if (c->pn == bpn)
- c->pn = apn;
- }
- ndoms--; /* one less element */
- goto restart;
+ for (j = i + 1; j < csn; j++) {
+ if (cpusets_overlap(csa[i], csa[j])) {
+ /*
+ * Cgroup v2 shouldn't pass down overlapping
+ * partition root cpusets.
+ */
+ WARN_ON_ONCE(cgrpv2);
+ uf_union(&csa[i]->node, &csa[j]->node);
}
}
}
+ /* Count the total number of domains */
+ for (i = 0; i < csn; i++) {
+ if (uf_find(&csa[i]->node) == &csa[i]->node)
+ ndoms++;
+ }
+
/*
* Now we know how many domains to create.
* Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
@@ -1167,44 +870,25 @@ restart:
}
for (nslot = 0, i = 0; i < csn; i++) {
- struct cpuset *a = csa[i];
- struct cpumask *dp;
- int apn = a->pn;
-
- if (apn < 0) {
- /* Skip completed partitions */
- continue;
- }
-
- dp = doms[nslot];
-
- if (nslot == ndoms) {
- static int warnings = 10;
- if (warnings) {
- pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
- nslot, ndoms, csn, i, apn);
- warnings--;
- }
- continue;
- }
-
- cpumask_clear(dp);
- if (dattr)
- *(dattr + nslot) = SD_ATTR_INIT;
+ nslot_update = 0;
for (j = i; j < csn; j++) {
- struct cpuset *b = csa[j];
-
- if (apn == b->pn) {
- cpumask_or(dp, dp, b->effective_cpus);
+ if (uf_find(&csa[j]->node) == &csa[i]->node) {
+ struct cpumask *dp = doms[nslot];
+
+ if (i == j) {
+ nslot_update = 1;
+ cpumask_clear(dp);
+ if (dattr)
+ *(dattr + nslot) = SD_ATTR_INIT;
+ }
+ cpumask_or(dp, dp, csa[j]->effective_cpus);
cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
if (dattr)
- update_domain_attr_tree(dattr + nslot, b);
-
- /* Done with this partition */
- b->pn = -1;
+ update_domain_attr_tree(dattr + nslot, csa[j]);
}
}
- nslot++;
+ if (nslot_update)
+ nslot++;
}
BUG_ON(nslot != ndoms);
@@ -1296,7 +980,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
*
* Call with cpuset_mutex held. Takes cpus_read_lock().
*/
-static void rebuild_sched_domains_locked(void)
+void rebuild_sched_domains_locked(void)
{
struct cgroup_subsys_state *pos_css;
struct sched_domain_attr *attr;
@@ -1348,7 +1032,7 @@ static void rebuild_sched_domains_locked(void)
partition_and_rebuild_sched_domains(ndoms, doms, attr);
}
#else /* !CONFIG_SMP */
-static void rebuild_sched_domains_locked(void)
+void rebuild_sched_domains_locked(void)
{
}
#endif /* CONFIG_SMP */
@@ -1368,7 +1052,7 @@ void rebuild_sched_domains(void)
}
/**
- * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
+ * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
* @new_cpus: the temp variable for the new effective_cpus mask
*
@@ -1378,7 +1062,7 @@ void rebuild_sched_domains(void)
* is used instead of effective_cpus to make sure all offline CPUs are also
* included as hotplug code won't update cpumasks for tasks in top_cpuset.
*/
-static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
+void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
{
struct css_task_iter it;
struct task_struct *task;
@@ -1428,8 +1112,6 @@ enum partition_cmd {
partcmd_invalidate, /* Make partition invalid */
};
-static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
- int turning_on);
static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
struct tmpmasks *tmp);
@@ -1443,11 +1125,11 @@ static int update_partition_exclusive(struct cpuset *cs, int new_prs)
bool exclusive = (new_prs > PRS_MEMBER);
if (exclusive && !is_cpu_exclusive(cs)) {
- if (update_flag(CS_CPU_EXCLUSIVE, cs, 1))
+ if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
return PERR_NOTEXCL;
} else if (!exclusive && is_cpu_exclusive(cs)) {
/* Turning off CS_CPU_EXCLUSIVE will not return error */
- update_flag(CS_CPU_EXCLUSIVE, cs, 0);
+ cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
}
return 0;
}
@@ -1516,12 +1198,8 @@ static void reset_partition_data(struct cpuset *cs)
if (is_cpu_exclusive(cs))
clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
}
- if (!cpumask_and(cs->effective_cpus,
- parent->effective_cpus, cs->cpus_allowed)) {
- cs->use_parent_ecpus = true;
- parent->child_ecpus_count++;
+ if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed))
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
- }
}
/*
@@ -1662,7 +1340,7 @@ static inline bool is_local_partition(struct cpuset *cs)
* @cs: the cpuset to update
* @new_prs: new partition_root_state
* @tmp: temparary masks
- * Return: 1 if successful, 0 if error
+ * Return: 0 if successful, errcode if error
*
* Enable the current cpuset to become a remote partition root taking CPUs
* directly from the top cpuset. cpuset_mutex must be held by the caller.
@@ -1676,7 +1354,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
* The user must have sysadmin privilege.
*/
if (!capable(CAP_SYS_ADMIN))
- return 0;
+ return PERR_ACCESS;
/*
* The requested exclusive_cpus must not be allocated to other
@@ -1690,26 +1368,20 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
if (cpumask_empty(tmp->new_cpus) ||
cpumask_intersects(tmp->new_cpus, subpartitions_cpus) ||
cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
- return 0;
+ return PERR_INVCPUS;
spin_lock_irq(&callback_lock);
isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
list_add(&cs->remote_sibling, &remote_children);
- if (cs->use_parent_ecpus) {
- struct cpuset *parent = parent_cs(cs);
-
- cs->use_parent_ecpus = false;
- parent->child_ecpus_count--;
- }
spin_unlock_irq(&callback_lock);
update_unbound_workqueue_cpumask(isolcpus_updated);
/*
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
*/
- update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
+ cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
- return 1;
+ return 0;
}
/*
@@ -1743,7 +1415,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
/*
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
*/
- update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
+ cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
}
@@ -1795,7 +1467,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
/*
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
*/
- update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
+ cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
return;
@@ -1850,15 +1522,15 @@ static void remote_partition_check(struct cpuset *cs, struct cpumask *newmask,
* @new_cpus: cpu mask
* Return: true if there is conflict, false otherwise
*
- * CPUs outside of housekeeping_cpumask(HK_TYPE_DOMAIN) can only be used in
- * an isolated partition.
+ * CPUs outside of boot_hk_cpus, if defined, can only be used in an
+ * isolated partition.
*/
static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
{
- const struct cpumask *hk_domain = housekeeping_cpumask(HK_TYPE_DOMAIN);
- bool all_in_hk = cpumask_subset(new_cpus, hk_domain);
+ if (!have_boot_isolcpus)
+ return false;
- if (!all_in_hk && (prstate != PRS_ISOLATED))
+ if ((prstate != PRS_ISOLATED) && !cpumask_subset(new_cpus, boot_hk_cpus))
return true;
return false;
@@ -2167,7 +1839,7 @@ write_error:
update_partition_exclusive(cs, new_prs);
if (adding || deleting) {
- update_tasks_cpumask(parent, tmp->addmask);
+ cpuset_update_tasks_cpumask(parent, tmp->addmask);
update_sibling_cpumasks(parent, cs, tmp);
}
@@ -2325,17 +1997,8 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
* it is a partition root that has explicitly distributed
* out all its CPUs.
*/
- if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus)) {
+ if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
cpumask_copy(tmp->new_cpus, parent->effective_cpus);
- if (!cp->use_parent_ecpus) {
- cp->use_parent_ecpus = true;
- parent->child_ecpus_count++;
- }
- } else if (cp->use_parent_ecpus) {
- cp->use_parent_ecpus = false;
- WARN_ON_ONCE(!parent->child_ecpus_count);
- parent->child_ecpus_count--;
- }
if (remote)
goto get_css;
@@ -2359,7 +2022,7 @@ update_parent_effective:
/*
* update_parent_effective_cpumask() should have been called
* for cs already in update_cpumask(). We should also call
- * update_tasks_cpumask() again for tasks in the parent
+ * cpuset_update_tasks_cpumask() again for tasks in the parent
* cpuset if the parent's effective_cpus changes.
*/
if ((cp != cs) && old_prs) {
@@ -2416,7 +2079,7 @@ get_css:
WARN_ON(!is_in_v2_mode() &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
- update_tasks_cpumask(cp, cp->effective_cpus);
+ cpuset_update_tasks_cpumask(cp, cp->effective_cpus);
/*
* On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
@@ -2472,8 +2135,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
* Check all its siblings and call update_cpumasks_hier()
* if their effective_cpus will need to be changed.
*
- * With the addition of effective_xcpus which is a subset of
- * cpus_allowed. It is possible a change in parent's effective_cpus
+ * It is possible a change in parent's effective_cpus
* due to a change in a child partition's effective_xcpus will impact
* its siblings even if they do not inherit parent's effective_cpus
* directly.
@@ -2487,8 +2149,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
cpuset_for_each_child(sibling, pos_css, parent) {
if (sibling == cs)
continue;
- if (!sibling->use_parent_ecpus &&
- !is_partition_valid(sibling)) {
+ if (!is_partition_valid(sibling)) {
compute_effective_cpumask(tmp->new_cpus, sibling,
parent);
if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
@@ -2598,7 +2259,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
invalidate = true;
rcu_read_lock();
cpuset_for_each_child(cp, css, parent) {
- struct cpumask *xcpus = fetch_xcpus(trialcs);
+ struct cpumask *xcpus = user_xcpus(trialcs);
if (is_partition_valid(cp) &&
cpumask_intersects(xcpus, cp->effective_xcpus)) {
@@ -2845,14 +2506,14 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
static void *cpuset_being_rebound;
/**
- * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
+ * cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
*
* Iterate through each task of @cs updating its mems_allowed to the
* effective cpuset's. As this function is called with cpuset_mutex held,
* cpuset membership stays stable.
*/
-static void update_tasks_nodemask(struct cpuset *cs)
+void cpuset_update_tasks_nodemask(struct cpuset *cs)
{
static nodemask_t newmems; /* protected by cpuset_mutex */
struct css_task_iter it;
@@ -2950,7 +2611,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
WARN_ON(!is_in_v2_mode() &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
- update_tasks_nodemask(cp);
+ cpuset_update_tasks_nodemask(cp);
rcu_read_lock();
css_put(&cp->css);
@@ -3036,44 +2697,8 @@ bool current_cpuset_is_being_rebound(void)
return ret;
}
-static int update_relax_domain_level(struct cpuset *cs, s64 val)
-{
-#ifdef CONFIG_SMP
- if (val < -1 || val > sched_domain_level_max + 1)
- return -EINVAL;
-#endif
-
- if (val != cs->relax_domain_level) {
- cs->relax_domain_level = val;
- if (!cpumask_empty(cs->cpus_allowed) &&
- is_sched_load_balance(cs))
- rebuild_sched_domains_locked();
- }
-
- return 0;
-}
-
-/**
- * update_tasks_flags - update the spread flags of tasks in the cpuset.
- * @cs: the cpuset in which each task's spread flags needs to be changed
- *
- * Iterate through each task of @cs updating its spread flags. As this
- * function is called with cpuset_mutex held, cpuset membership stays
- * stable.
- */
-static void update_tasks_flags(struct cpuset *cs)
-{
- struct css_task_iter it;
- struct task_struct *task;
-
- css_task_iter_start(&cs->css, 0, &it);
- while ((task = css_task_iter_next(&it)))
- cpuset_update_task_spread_flags(cs, task);
- css_task_iter_end(&it);
-}
-
/*
- * update_flag - read a 0 or a 1 in a file and update associated flag
+ * cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
* bit: the bit to update (see cpuset_flagbits_t)
* cs: the cpuset to update
* turning_on: whether the flag is being set or cleared
@@ -3081,7 +2706,7 @@ static void update_tasks_flags(struct cpuset *cs)
* Call with cpuset_mutex held.
*/
-static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
+int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
int turning_on)
{
struct cpuset *trialcs;
@@ -3117,7 +2742,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
rebuild_sched_domains_locked();
if (spread_flag_changed)
- update_tasks_flags(cs);
+ cpuset1_update_tasks_flags(cs);
out:
free_cpuset(trialcs);
return err;
@@ -3166,9 +2791,6 @@ static int update_prstate(struct cpuset *cs, int new_prs)
goto out;
if (!old_prs) {
- enum partition_cmd cmd = (new_prs == PRS_ROOT)
- ? partcmd_enable : partcmd_enablei;
-
/*
* cpus_allowed and exclusive_cpus cannot be both empty.
*/
@@ -3177,13 +2799,18 @@ static int update_prstate(struct cpuset *cs, int new_prs)
goto out;
}
- err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
/*
- * If an attempt to become local partition root fails,
- * try to become a remote partition root instead.
+ * If parent is valid partition, enable local partiion.
+ * Otherwise, enable a remote partition.
*/
- if (err && remote_partition_enable(cs, new_prs, &tmpmask))
- err = 0;
+ if (is_partition_valid(parent)) {
+ enum partition_cmd cmd = (new_prs == PRS_ROOT)
+ ? partcmd_enable : partcmd_enablei;
+
+ err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
+ } else {
+ err = remote_partition_enable(cs, new_prs, &tmpmask);
+ }
} else if (old_prs && new_prs) {
/*
* A change in load balance state only, no change in cpumasks.
@@ -3236,107 +2863,6 @@ out:
return 0;
}
-/*
- * Frequency meter - How fast is some event occurring?
- *
- * These routines manage a digitally filtered, constant time based,
- * event frequency meter. There are four routines:
- * fmeter_init() - initialize a frequency meter.
- * fmeter_markevent() - called each time the event happens.
- * fmeter_getrate() - returns the recent rate of such events.
- * fmeter_update() - internal routine used to update fmeter.
- *
- * A common data structure is passed to each of these routines,
- * which is used to keep track of the state required to manage the
- * frequency meter and its digital filter.
- *
- * The filter works on the number of events marked per unit time.
- * The filter is single-pole low-pass recursive (IIR). The time unit
- * is 1 second. Arithmetic is done using 32-bit integers scaled to
- * simulate 3 decimal digits of precision (multiplied by 1000).
- *
- * With an FM_COEF of 933, and a time base of 1 second, the filter
- * has a half-life of 10 seconds, meaning that if the events quit
- * happening, then the rate returned from the fmeter_getrate()
- * will be cut in half each 10 seconds, until it converges to zero.
- *
- * It is not worth doing a real infinitely recursive filter. If more
- * than FM_MAXTICKS ticks have elapsed since the last filter event,
- * just compute FM_MAXTICKS ticks worth, by which point the level
- * will be stable.
- *
- * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
- * arithmetic overflow in the fmeter_update() routine.
- *
- * Given the simple 32 bit integer arithmetic used, this meter works
- * best for reporting rates between one per millisecond (msec) and
- * one per 32 (approx) seconds. At constant rates faster than one
- * per msec it maxes out at values just under 1,000,000. At constant
- * rates between one per msec, and one per second it will stabilize
- * to a value N*1000, where N is the rate of events per second.
- * At constant rates between one per second and one per 32 seconds,
- * it will be choppy, moving up on the seconds that have an event,
- * and then decaying until the next event. At rates slower than
- * about one in 32 seconds, it decays all the way back to zero between
- * each event.
- */
-
-#define FM_COEF 933 /* coefficient for half-life of 10 secs */
-#define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */
-#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
-#define FM_SCALE 1000 /* faux fixed point scale */
-
-/* Initialize a frequency meter */
-static void fmeter_init(struct fmeter *fmp)
-{
- fmp->cnt = 0;
- fmp->val = 0;
- fmp->time = 0;
- spin_lock_init(&fmp->lock);
-}
-
-/* Internal meter update - process cnt events and update value */
-static void fmeter_update(struct fmeter *fmp)
-{
- time64_t now;
- u32 ticks;
-
- now = ktime_get_seconds();
- ticks = now - fmp->time;
-
- if (ticks == 0)
- return;
-
- ticks = min(FM_MAXTICKS, ticks);
- while (ticks-- > 0)
- fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
- fmp->time = now;
-
- fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
- fmp->cnt = 0;
-}
-
-/* Process any previous ticks, then bump cnt by one (times scale). */
-static void fmeter_markevent(struct fmeter *fmp)
-{
- spin_lock(&fmp->lock);
- fmeter_update(fmp);
- fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
- spin_unlock(&fmp->lock);
-}
-
-/* Process any previous ticks, then return current value. */
-static int fmeter_getrate(struct fmeter *fmp)
-{
- int val;
-
- spin_lock(&fmp->lock);
- fmeter_update(fmp);
- val = fmp->val;
- spin_unlock(&fmp->lock);
- return val;
-}
-
static struct cpuset *cpuset_attach_old_cs;
/*
@@ -3445,9 +2971,7 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
cs = css_cs(css);
mutex_lock(&cpuset_mutex);
- cs->attach_in_progress--;
- if (!cs->attach_in_progress)
- wake_up(&cpuset_attach_wq);
+ dec_attach_in_progress_locked(cs);
if (cs->nr_migrate_dl_tasks) {
int cpu = cpumask_any(cs->effective_cpus);
@@ -3483,7 +3007,7 @@ static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
- cpuset_update_task_spread_flags(cs, task);
+ cpuset1_update_task_spread_flags(cs, task);
}
static void cpuset_attach(struct cgroup_taskset *tset)
@@ -3562,116 +3086,15 @@ out:
reset_migrate_dl_data(cs);
}
- cs->attach_in_progress--;
- if (!cs->attach_in_progress)
- wake_up(&cpuset_attach_wq);
-
- mutex_unlock(&cpuset_mutex);
-}
-
-/* The various types of files and directories in a cpuset file system */
-
-typedef enum {
- FILE_MEMORY_MIGRATE,
- FILE_CPULIST,
- FILE_MEMLIST,
- FILE_EFFECTIVE_CPULIST,
- FILE_EFFECTIVE_MEMLIST,
- FILE_SUBPARTS_CPULIST,
- FILE_EXCLUSIVE_CPULIST,
- FILE_EFFECTIVE_XCPULIST,
- FILE_ISOLATED_CPULIST,
- FILE_CPU_EXCLUSIVE,
- FILE_MEM_EXCLUSIVE,
- FILE_MEM_HARDWALL,
- FILE_SCHED_LOAD_BALANCE,
- FILE_PARTITION_ROOT,
- FILE_SCHED_RELAX_DOMAIN_LEVEL,
- FILE_MEMORY_PRESSURE_ENABLED,
- FILE_MEMORY_PRESSURE,
- FILE_SPREAD_PAGE,
- FILE_SPREAD_SLAB,
-} cpuset_filetype_t;
-
-static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
- u64 val)
-{
- struct cpuset *cs = css_cs(css);
- cpuset_filetype_t type = cft->private;
- int retval = 0;
+ dec_attach_in_progress_locked(cs);
- cpus_read_lock();
- mutex_lock(&cpuset_mutex);
- if (!is_cpuset_online(cs)) {
- retval = -ENODEV;
- goto out_unlock;
- }
-
- switch (type) {
- case FILE_CPU_EXCLUSIVE:
- retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
- break;
- case FILE_MEM_EXCLUSIVE:
- retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
- break;
- case FILE_MEM_HARDWALL:
- retval = update_flag(CS_MEM_HARDWALL, cs, val);
- break;
- case FILE_SCHED_LOAD_BALANCE:
- retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
- break;
- case FILE_MEMORY_MIGRATE:
- retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
- break;
- case FILE_MEMORY_PRESSURE_ENABLED:
- cpuset_memory_pressure_enabled = !!val;
- break;
- case FILE_SPREAD_PAGE:
- retval = update_flag(CS_SPREAD_PAGE, cs, val);
- break;
- case FILE_SPREAD_SLAB:
- retval = update_flag(CS_SPREAD_SLAB, cs, val);
- break;
- default:
- retval = -EINVAL;
- break;
- }
-out_unlock:
mutex_unlock(&cpuset_mutex);
- cpus_read_unlock();
- return retval;
-}
-
-static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
- s64 val)
-{
- struct cpuset *cs = css_cs(css);
- cpuset_filetype_t type = cft->private;
- int retval = -ENODEV;
-
- cpus_read_lock();
- mutex_lock(&cpuset_mutex);
- if (!is_cpuset_online(cs))
- goto out_unlock;
-
- switch (type) {
- case FILE_SCHED_RELAX_DOMAIN_LEVEL:
- retval = update_relax_domain_level(cs, val);
- break;
- default:
- retval = -EINVAL;
- break;
- }
-out_unlock:
- mutex_unlock(&cpuset_mutex);
- cpus_read_unlock();
- return retval;
}
/*
* Common handling for a write to a "cpus" or "mems" file.
*/
-static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
+ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct cpuset *cs = css_cs(of_css(of));
@@ -3746,7 +3169,7 @@ out_unlock:
* and since these maps can change value dynamically, one could read
* gibberish by doing partial reads while a list was changing.
*/
-static int cpuset_common_seq_show(struct seq_file *sf, void *v)
+int cpuset_common_seq_show(struct seq_file *sf, void *v)
{
struct cpuset *cs = css_cs(seq_css(sf));
cpuset_filetype_t type = seq_cft(sf)->private;
@@ -3787,52 +3210,6 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
return ret;
}
-static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
-{
- struct cpuset *cs = css_cs(css);
- cpuset_filetype_t type = cft->private;
- switch (type) {
- case FILE_CPU_EXCLUSIVE:
- return is_cpu_exclusive(cs);
- case FILE_MEM_EXCLUSIVE:
- return is_mem_exclusive(cs);
- case FILE_MEM_HARDWALL:
- return is_mem_hardwall(cs);
- case FILE_SCHED_LOAD_BALANCE:
- return is_sched_load_balance(cs);
- case FILE_MEMORY_MIGRATE:
- return is_memory_migrate(cs);
- case FILE_MEMORY_PRESSURE_ENABLED:
- return cpuset_memory_pressure_enabled;
- case FILE_MEMORY_PRESSURE:
- return fmeter_getrate(&cs->fmeter);
- case FILE_SPREAD_PAGE:
- return is_spread_page(cs);
- case FILE_SPREAD_SLAB:
- return is_spread_slab(cs);
- default:
- BUG();
- }
-
- /* Unreachable but makes gcc happy */
- return 0;
-}
-
-static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
-{
- struct cpuset *cs = css_cs(css);
- cpuset_filetype_t type = cft->private;
- switch (type) {
- case FILE_SCHED_RELAX_DOMAIN_LEVEL:
- return cs->relax_domain_level;
- default:
- BUG();
- }
-
- /* Unreachable but makes gcc happy */
- return 0;
-}
-
static int sched_partition_show(struct seq_file *seq, void *v)
{
struct cpuset *cs = css_cs(seq_css(seq));
@@ -3897,113 +3274,6 @@ out_unlock:
}
/*
- * for the common functions, 'private' gives the type of file
- */
-
-static struct cftype legacy_files[] = {
- {
- .name = "cpus",
- .seq_show = cpuset_common_seq_show,
- .write = cpuset_write_resmask,
- .max_write_len = (100U + 6 * NR_CPUS),
- .private = FILE_CPULIST,
- },
-
- {
- .name = "mems",
- .seq_show = cpuset_common_seq_show,
- .write = cpuset_write_resmask,
- .max_write_len = (100U + 6 * MAX_NUMNODES),
- .private = FILE_MEMLIST,
- },
-
- {
- .name = "effective_cpus",
- .seq_show = cpuset_common_seq_show,
- .private = FILE_EFFECTIVE_CPULIST,
- },
-
- {
- .name = "effective_mems",
- .seq_show = cpuset_common_seq_show,
- .private = FILE_EFFECTIVE_MEMLIST,
- },
-
- {
- .name = "cpu_exclusive",
- .read_u64 = cpuset_read_u64,
- .write_u64 = cpuset_write_u64,
- .private = FILE_CPU_EXCLUSIVE,
- },
-
- {
- .name = "mem_exclusive",
- .read_u64 = cpuset_read_u64,
- .write_u64 = cpuset_write_u64,
- .private = FILE_MEM_EXCLUSIVE,
- },
-
- {
- .name = "mem_hardwall",
- .read_u64 = cpuset_read_u64,
- .write_u64 = cpuset_write_u64,
- .private = FILE_MEM_HARDWALL,
- },
-
- {
- .name = "sched_load_balance",
- .read_u64 = cpuset_read_u64,
- .write_u64 = cpuset_write_u64,
- .private = FILE_SCHED_LOAD_BALANCE,
- },
-
- {
- .name = "sched_relax_domain_level",
- .read_s64 = cpuset_read_s64,
- .write_s64 = cpuset_write_s64,
- .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
- },
-
- {
- .name = "memory_migrate",
- .read_u64 = cpuset_read_u64,
- .write_u64 = cpuset_write_u64,
- .private = FILE_MEMORY_MIGRATE,
- },
-
- {
- .name = "memory_pressure",
- .read_u64 = cpuset_read_u64,
- .private = FILE_MEMORY_PRESSURE,
- },
-
- {
- .name = "memory_spread_page",
- .read_u64 = cpuset_read_u64,
- .write_u64 = cpuset_write_u64,
- .private = FILE_SPREAD_PAGE,
- },
-
- {
- /* obsolete, may be removed in the future */
- .name = "memory_spread_slab",
- .read_u64 = cpuset_read_u64,
- .write_u64 = cpuset_write_u64,
- .private = FILE_SPREAD_SLAB,
- },
-
- {
- .name = "memory_pressure_enabled",
- .flags = CFTYPE_ONLY_ON_ROOT,
- .read_u64 = cpuset_read_u64,
- .write_u64 = cpuset_write_u64,
- .private = FILE_MEMORY_PRESSURE_ENABLED,
- },
-
- { } /* terminate */
-};
-
-/*
* This is currently a minimal set for the default hierarchy. It can be
* expanded later on by migrating more features and control files from v1.
*/
@@ -4150,8 +3420,6 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
if (is_in_v2_mode()) {
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
cs->effective_mems = parent->effective_mems;
- cs->use_parent_ecpus = true;
- parent->child_ecpus_count++;
}
spin_unlock_irq(&callback_lock);
@@ -4215,14 +3483,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
is_sched_load_balance(cs))
- update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
-
- if (cs->use_parent_ecpus) {
- struct cpuset *parent = parent_cs(cs);
-
- cs->use_parent_ecpus = false;
- parent->child_ecpus_count--;
- }
+ cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
cpuset_dec();
clear_bit(CS_ONLINE, &cs->flags);
@@ -4312,11 +3573,7 @@ static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
if (same_cs)
return;
- mutex_lock(&cpuset_mutex);
- cs->attach_in_progress--;
- if (!cs->attach_in_progress)
- wake_up(&cpuset_attach_wq);
- mutex_unlock(&cpuset_mutex);
+ dec_attach_in_progress(cs);
}
/*
@@ -4348,10 +3605,7 @@ static void cpuset_fork(struct task_struct *task)
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
cpuset_attach_task(cs, task);
- cs->attach_in_progress--;
- if (!cs->attach_in_progress)
- wake_up(&cpuset_attach_wq);
-
+ dec_attach_in_progress_locked(cs);
mutex_unlock(&cpuset_mutex);
}
@@ -4368,7 +3622,9 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
.can_fork = cpuset_can_fork,
.cancel_fork = cpuset_cancel_fork,
.fork = cpuset_fork,
- .legacy_cftypes = legacy_files,
+#ifdef CONFIG_CPUSETS_V1
+ .legacy_cftypes = cpuset1_files,
+#endif
.dfl_cftypes = dfl_files,
.early_init = true,
.threaded = true,
@@ -4401,91 +3657,14 @@ int __init cpuset_init(void)
BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
- return 0;
-}
-
-/*
- * If CPU and/or memory hotplug handlers, below, unplug any CPUs
- * or memory nodes, we need to walk over the cpuset hierarchy,
- * removing that CPU or node from all cpusets. If this removes the
- * last CPU or node from a cpuset, then move the tasks in the empty
- * cpuset to its next-highest non-empty parent.
- */
-static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
-{
- struct cpuset *parent;
-
- /*
- * Find its next-highest non-empty parent, (top cpuset
- * has online cpus, so can't be empty).
- */
- parent = parent_cs(cs);
- while (cpumask_empty(parent->cpus_allowed) ||
- nodes_empty(parent->mems_allowed))
- parent = parent_cs(parent);
-
- if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
- pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
- pr_cont_cgroup_name(cs->css.cgroup);
- pr_cont("\n");
+ have_boot_isolcpus = housekeeping_enabled(HK_TYPE_DOMAIN);
+ if (have_boot_isolcpus) {
+ BUG_ON(!alloc_cpumask_var(&boot_hk_cpus, GFP_KERNEL));
+ cpumask_copy(boot_hk_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN));
+ cpumask_andnot(isolated_cpus, cpu_possible_mask, boot_hk_cpus);
}
-}
-
-static void cpuset_migrate_tasks_workfn(struct work_struct *work)
-{
- struct cpuset_remove_tasks_struct *s;
-
- s = container_of(work, struct cpuset_remove_tasks_struct, work);
- remove_tasks_in_empty_cpuset(s->cs);
- css_put(&s->cs->css);
- kfree(s);
-}
-
-static void
-hotplug_update_tasks_legacy(struct cpuset *cs,
- struct cpumask *new_cpus, nodemask_t *new_mems,
- bool cpus_updated, bool mems_updated)
-{
- bool is_empty;
-
- spin_lock_irq(&callback_lock);
- cpumask_copy(cs->cpus_allowed, new_cpus);
- cpumask_copy(cs->effective_cpus, new_cpus);
- cs->mems_allowed = *new_mems;
- cs->effective_mems = *new_mems;
- spin_unlock_irq(&callback_lock);
-
- /*
- * Don't call update_tasks_cpumask() if the cpuset becomes empty,
- * as the tasks will be migrated to an ancestor.
- */
- if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
- update_tasks_cpumask(cs, new_cpus);
- if (mems_updated && !nodes_empty(cs->mems_allowed))
- update_tasks_nodemask(cs);
-
- is_empty = cpumask_empty(cs->cpus_allowed) ||
- nodes_empty(cs->mems_allowed);
-
- /*
- * Move tasks to the nearest ancestor with execution resources,
- * This is full cgroup operation which will also call back into
- * cpuset. Execute it asynchronously using workqueue.
- */
- if (is_empty && cs->css.cgroup->nr_populated_csets &&
- css_tryget_online(&cs->css)) {
- struct cpuset_remove_tasks_struct *s;
- s = kzalloc(sizeof(*s), GFP_KERNEL);
- if (WARN_ON_ONCE(!s)) {
- css_put(&cs->css);
- return;
- }
-
- s->cs = cs;
- INIT_WORK(&s->work, cpuset_migrate_tasks_workfn);
- schedule_work(&s->work);
- }
+ return 0;
}
static void
@@ -4505,9 +3684,9 @@ hotplug_update_tasks(struct cpuset *cs,
spin_unlock_irq(&callback_lock);
if (cpus_updated)
- update_tasks_cpumask(cs, new_cpus);
+ cpuset_update_tasks_cpumask(cs, new_cpus);
if (mems_updated)
- update_tasks_nodemask(cs);
+ cpuset_update_tasks_nodemask(cs);
}
void cpuset_force_rebuild(void)
@@ -4608,7 +3787,7 @@ update_tasks:
hotplug_update_tasks(cs, &new_cpus, &new_mems,
cpus_updated, mems_updated);
else
- hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
+ cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
cpus_updated, mems_updated);
unlock:
@@ -4693,7 +3872,7 @@ static void cpuset_handle_hotplug(void)
top_cpuset.mems_allowed = new_mems;
top_cpuset.effective_mems = new_mems;
spin_unlock_irq(&callback_lock);
- update_tasks_nodemask(&top_cpuset);
+ cpuset_update_tasks_nodemask(&top_cpuset);
}
mutex_unlock(&cpuset_mutex);
@@ -5033,19 +4212,6 @@ int cpuset_mem_spread_node(void)
}
/**
- * cpuset_slab_spread_node() - On which node to begin search for a slab page
- */
-int cpuset_slab_spread_node(void)
-{
- if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
- current->cpuset_slab_spread_rotor =
- node_random(&current->mems_allowed);
-
- return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
-}
-EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
-
-/**
* cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
* @tsk1: pointer to task_struct of some task.
* @tsk2: pointer to task_struct of some other task.
@@ -5083,39 +4249,6 @@ void cpuset_print_current_mems_allowed(void)
rcu_read_unlock();
}
-/*
- * Collection of memory_pressure is suppressed unless
- * this flag is enabled by writing "1" to the special
- * cpuset file 'memory_pressure_enabled' in the root cpuset.
- */
-
-int cpuset_memory_pressure_enabled __read_mostly;
-
-/*
- * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
- *
- * Keep a running average of the rate of synchronous (direct)
- * page reclaim efforts initiated by tasks in each cpuset.
- *
- * This represents the rate at which some task in the cpuset
- * ran low on memory on all nodes it was allowed to use, and
- * had to enter the kernels page reclaim code in an effort to
- * create more free memory by tossing clean pages or swapping
- * or writing dirty pages.
- *
- * Display to user space in the per-cpuset read-only file
- * "memory_pressure". Value displayed is an integer
- * representing the recent rate of entry into the synchronous
- * (direct) page reclaim by any task attached to the cpuset.
- */
-
-void __cpuset_memory_pressure_bump(void)
-{
- rcu_read_lock();
- fmeter_markevent(&task_cs(current)->fmeter);
- rcu_read_unlock();
-}
-
#ifdef CONFIG_PROC_PID_CPUSET
/*
* proc_cpuset_show()
diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c
index f5cb0ec45b9d..8f61114c36dd 100644
--- a/kernel/cgroup/pids.c
+++ b/kernel/cgroup/pids.c
@@ -244,7 +244,6 @@ static void pids_event(struct pids_cgroup *pids_forking,
struct pids_cgroup *pids_over_limit)
{
struct pids_cgroup *p = pids_forking;
- bool limit = false;
/* Only log the first time limit is hit. */
if (atomic64_inc_return(&p->events_local[PIDCG_FORKFAIL]) == 1) {
@@ -252,20 +251,17 @@ static void pids_event(struct pids_cgroup *pids_forking,
pr_cont_cgroup_path(p->css.cgroup);
pr_cont("\n");
}
- cgroup_file_notify(&p->events_local_file);
if (!cgroup_subsys_on_dfl(pids_cgrp_subsys) ||
- cgrp_dfl_root.flags & CGRP_ROOT_PIDS_LOCAL_EVENTS)
+ cgrp_dfl_root.flags & CGRP_ROOT_PIDS_LOCAL_EVENTS) {
+ cgroup_file_notify(&p->events_local_file);
return;
+ }
- for (; parent_pids(p); p = parent_pids(p)) {
- if (p == pids_over_limit) {
- limit = true;
- atomic64_inc(&p->events_local[PIDCG_MAX]);
- cgroup_file_notify(&p->events_local_file);
- }
- if (limit)
- atomic64_inc(&p->events[PIDCG_MAX]);
+ atomic64_inc(&pids_over_limit->events_local[PIDCG_MAX]);
+ cgroup_file_notify(&pids_over_limit->events_local_file);
+ for (p = pids_over_limit; parent_pids(p); p = parent_pids(p)) {
+ atomic64_inc(&p->events[PIDCG_MAX]);
cgroup_file_notify(&p->events_file);
}
}
@@ -276,15 +272,10 @@ static void pids_event(struct pids_cgroup *pids_forking,
*/
static int pids_can_fork(struct task_struct *task, struct css_set *cset)
{
- struct cgroup_subsys_state *css;
struct pids_cgroup *pids, *pids_over_limit;
int err;
- if (cset)
- css = cset->subsys[pids_cgrp_id];
- else
- css = task_css_check(current, pids_cgrp_id, true);
- pids = css_pids(css);
+ pids = css_pids(cset->subsys[pids_cgrp_id]);
err = pids_try_charge(pids, 1, &pids_over_limit);
if (err)
pids_event(pids, pids_over_limit);
@@ -294,14 +285,9 @@ static int pids_can_fork(struct task_struct *task, struct css_set *cset)
static void pids_cancel_fork(struct task_struct *task, struct css_set *cset)
{
- struct cgroup_subsys_state *css;
struct pids_cgroup *pids;
- if (cset)
- css = cset->subsys[pids_cgrp_id];
- else
- css = task_css_check(current, pids_cgrp_id, true);
- pids = css_pids(css);
+ pids = css_pids(cset->subsys[pids_cgrp_id]);
pids_uncharge(pids, 1);
}
diff --git a/kernel/configs/tiny.config b/kernel/configs/tiny.config
index 00009f7d0835..b753695c5a8f 100644
--- a/kernel/configs/tiny.config
+++ b/kernel/configs/tiny.config
@@ -1,10 +1,4 @@
-# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_KERNEL_GZIP is not set
-# CONFIG_KERNEL_BZIP2 is not set
-# CONFIG_KERNEL_LZMA is not set
CONFIG_KERNEL_XZ=y
-# CONFIG_KERNEL_LZO is not set
-# CONFIG_KERNEL_LZ4 is not set
CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 24b1e1143260..938c48952d26 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -28,34 +28,34 @@
DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
- .dynticks_nesting = 1,
- .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
+ .nesting = 1,
+ .nmi_nesting = CT_NESTING_IRQ_NONIDLE,
#endif
- .state = ATOMIC_INIT(RCU_DYNTICKS_IDX),
+ .state = ATOMIC_INIT(CT_RCU_WATCHING),
};
EXPORT_SYMBOL_GPL(context_tracking);
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
#define TPS(x) tracepoint_string(x)
-/* Record the current task on dyntick-idle entry. */
-static __always_inline void rcu_dynticks_task_enter(void)
+/* Record the current task on exiting RCU-tasks (dyntick-idle entry). */
+static __always_inline void rcu_task_exit(void)
{
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
}
-/* Record no current task on dyntick-idle exit. */
-static __always_inline void rcu_dynticks_task_exit(void)
+/* Record no current task on entering RCU-tasks (dyntick-idle exit). */
+static __always_inline void rcu_task_enter(void)
{
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
}
-/* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
-static __always_inline void rcu_dynticks_task_trace_enter(void)
+/* Turn on heavyweight RCU tasks trace readers on kernel exit. */
+static __always_inline void rcu_task_trace_heavyweight_enter(void)
{
#ifdef CONFIG_TASKS_TRACE_RCU
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
@@ -63,8 +63,8 @@ static __always_inline void rcu_dynticks_task_trace_enter(void)
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
}
-/* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
-static __always_inline void rcu_dynticks_task_trace_exit(void)
+/* Turn off heavyweight RCU tasks trace readers on kernel entry. */
+static __always_inline void rcu_task_trace_heavyweight_exit(void)
{
#ifdef CONFIG_TASKS_TRACE_RCU
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
@@ -87,10 +87,10 @@ static noinstr void ct_kernel_exit_state(int offset)
* critical sections, and we also must force ordering with the
* next idle sojourn.
*/
- rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
+ rcu_task_trace_heavyweight_enter(); // Before CT state update!
seq = ct_state_inc(offset);
// RCU is no longer watching. Better be in extended quiescent state!
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & RCU_DYNTICKS_IDX));
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & CT_RCU_WATCHING));
}
/*
@@ -109,15 +109,15 @@ static noinstr void ct_kernel_enter_state(int offset)
*/
seq = ct_state_inc(offset);
// RCU is now watching. Better not be in an extended quiescent state!
- rcu_dynticks_task_trace_exit(); // After ->dynticks update!
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & RCU_DYNTICKS_IDX));
+ rcu_task_trace_heavyweight_exit(); // After CT state update!
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & CT_RCU_WATCHING));
}
/*
* Enter an RCU extended quiescent state, which can be either the
* idle loop or adaptive-tickless usermode execution.
*
- * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
+ * We crowbar the ->nmi_nesting field to zero to allow for
* the possibility of usermode upcalls having messed up our count
* of interrupt nesting level during the prior busy period.
*/
@@ -125,19 +125,19 @@ static void noinstr ct_kernel_exit(bool user, int offset)
{
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
- WARN_ON_ONCE(ct_dynticks_nmi_nesting() != DYNTICK_IRQ_NONIDLE);
- WRITE_ONCE(ct->dynticks_nmi_nesting, 0);
+ WARN_ON_ONCE(ct_nmi_nesting() != CT_NESTING_IRQ_NONIDLE);
+ WRITE_ONCE(ct->nmi_nesting, 0);
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
- ct_dynticks_nesting() == 0);
- if (ct_dynticks_nesting() != 1) {
+ ct_nesting() == 0);
+ if (ct_nesting() != 1) {
// RCU will still be watching, so just do accounting and leave.
- ct->dynticks_nesting--;
+ ct->nesting--;
return;
}
instrumentation_begin();
lockdep_assert_irqs_disabled();
- trace_rcu_dyntick(TPS("Start"), ct_dynticks_nesting(), 0, ct_dynticks());
+ trace_rcu_watching(TPS("End"), ct_nesting(), 0, ct_rcu_watching());
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
rcu_preempt_deferred_qs(current);
@@ -145,18 +145,18 @@ static void noinstr ct_kernel_exit(bool user, int offset)
instrument_atomic_write(&ct->state, sizeof(ct->state));
instrumentation_end();
- WRITE_ONCE(ct->dynticks_nesting, 0); /* Avoid irq-access tearing. */
+ WRITE_ONCE(ct->nesting, 0); /* Avoid irq-access tearing. */
// RCU is watching here ...
ct_kernel_exit_state(offset);
// ... but is no longer watching here.
- rcu_dynticks_task_enter();
+ rcu_task_exit();
}
/*
* Exit an RCU extended quiescent state, which can be either the
* idle loop or adaptive-tickless usermode execution.
*
- * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
+ * We crowbar the ->nmi_nesting field to CT_NESTING_IRQ_NONIDLE to
* allow for the possibility of usermode upcalls messing up our count of
* interrupt nesting level during the busy period that is just now starting.
*/
@@ -166,14 +166,14 @@ static void noinstr ct_kernel_enter(bool user, int offset)
long oldval;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled());
- oldval = ct_dynticks_nesting();
+ oldval = ct_nesting();
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
if (oldval) {
// RCU was already watching, so just do accounting and leave.
- ct->dynticks_nesting++;
+ ct->nesting++;
return;
}
- rcu_dynticks_task_exit();
+ rcu_task_enter();
// RCU is not watching here ...
ct_kernel_enter_state(offset);
// ... but is watching here.
@@ -182,11 +182,11 @@ static void noinstr ct_kernel_enter(bool user, int offset)
// instrumentation for the noinstr ct_kernel_enter_state()
instrument_atomic_write(&ct->state, sizeof(ct->state));
- trace_rcu_dyntick(TPS("End"), ct_dynticks_nesting(), 1, ct_dynticks());
+ trace_rcu_watching(TPS("Start"), ct_nesting(), 1, ct_rcu_watching());
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
- WRITE_ONCE(ct->dynticks_nesting, 1);
- WARN_ON_ONCE(ct_dynticks_nmi_nesting());
- WRITE_ONCE(ct->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
+ WRITE_ONCE(ct->nesting, 1);
+ WARN_ON_ONCE(ct_nmi_nesting());
+ WRITE_ONCE(ct->nmi_nesting, CT_NESTING_IRQ_NONIDLE);
instrumentation_end();
}
@@ -194,7 +194,7 @@ static void noinstr ct_kernel_enter(bool user, int offset)
* ct_nmi_exit - inform RCU of exit from NMI context
*
* If we are returning from the outermost NMI handler that interrupted an
- * RCU-idle period, update ct->state and ct->dynticks_nmi_nesting
+ * RCU-idle period, update ct->state and ct->nmi_nesting
* to let the RCU grace-period handling know that the CPU is back to
* being RCU-idle.
*
@@ -207,47 +207,47 @@ void noinstr ct_nmi_exit(void)
instrumentation_begin();
/*
- * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
+ * Check for ->nmi_nesting underflow and bad CT state.
* (We are exiting an NMI handler, so RCU better be paying attention
* to us!)
*/
- WARN_ON_ONCE(ct_dynticks_nmi_nesting() <= 0);
- WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
+ WARN_ON_ONCE(ct_nmi_nesting() <= 0);
+ WARN_ON_ONCE(!rcu_is_watching_curr_cpu());
/*
* If the nesting level is not 1, the CPU wasn't RCU-idle, so
* leave it in non-RCU-idle state.
*/
- if (ct_dynticks_nmi_nesting() != 1) {
- trace_rcu_dyntick(TPS("--="), ct_dynticks_nmi_nesting(), ct_dynticks_nmi_nesting() - 2,
- ct_dynticks());
- WRITE_ONCE(ct->dynticks_nmi_nesting, /* No store tearing. */
- ct_dynticks_nmi_nesting() - 2);
+ if (ct_nmi_nesting() != 1) {
+ trace_rcu_watching(TPS("--="), ct_nmi_nesting(), ct_nmi_nesting() - 2,
+ ct_rcu_watching());
+ WRITE_ONCE(ct->nmi_nesting, /* No store tearing. */
+ ct_nmi_nesting() - 2);
instrumentation_end();
return;
}
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
- trace_rcu_dyntick(TPS("Startirq"), ct_dynticks_nmi_nesting(), 0, ct_dynticks());
- WRITE_ONCE(ct->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
+ trace_rcu_watching(TPS("Endirq"), ct_nmi_nesting(), 0, ct_rcu_watching());
+ WRITE_ONCE(ct->nmi_nesting, 0); /* Avoid store tearing. */
// instrumentation for the noinstr ct_kernel_exit_state()
instrument_atomic_write(&ct->state, sizeof(ct->state));
instrumentation_end();
// RCU is watching here ...
- ct_kernel_exit_state(RCU_DYNTICKS_IDX);
+ ct_kernel_exit_state(CT_RCU_WATCHING);
// ... but is no longer watching here.
if (!in_nmi())
- rcu_dynticks_task_enter();
+ rcu_task_exit();
}
/**
* ct_nmi_enter - inform RCU of entry to NMI context
*
* If the CPU was idle from RCU's viewpoint, update ct->state and
- * ct->dynticks_nmi_nesting to let the RCU grace-period handling know
+ * ct->nmi_nesting to let the RCU grace-period handling know
* that the CPU is active. This implementation permits nested NMIs, as
* long as the nesting level does not overflow an int. (You will probably
* run out of stack space first.)
@@ -261,27 +261,27 @@ void noinstr ct_nmi_enter(void)
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
/* Complain about underflow. */
- WARN_ON_ONCE(ct_dynticks_nmi_nesting() < 0);
+ WARN_ON_ONCE(ct_nmi_nesting() < 0);
/*
- * If idle from RCU viewpoint, atomically increment ->dynticks
- * to mark non-idle and increment ->dynticks_nmi_nesting by one.
- * Otherwise, increment ->dynticks_nmi_nesting by two. This means
- * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
+ * If idle from RCU viewpoint, atomically increment CT state
+ * to mark non-idle and increment ->nmi_nesting by one.
+ * Otherwise, increment ->nmi_nesting by two. This means
+ * if ->nmi_nesting is equal to one, we are guaranteed
* to be in the outermost NMI handler that interrupted an RCU-idle
* period (observation due to Andy Lutomirski).
*/
- if (rcu_dynticks_curr_cpu_in_eqs()) {
+ if (!rcu_is_watching_curr_cpu()) {
if (!in_nmi())
- rcu_dynticks_task_exit();
+ rcu_task_enter();
// RCU is not watching here ...
- ct_kernel_enter_state(RCU_DYNTICKS_IDX);
+ ct_kernel_enter_state(CT_RCU_WATCHING);
// ... but is watching here.
instrumentation_begin();
- // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
+ // instrumentation for the noinstr rcu_is_watching_curr_cpu()
instrument_atomic_read(&ct->state, sizeof(ct->state));
// instrumentation for the noinstr ct_kernel_enter_state()
instrument_atomic_write(&ct->state, sizeof(ct->state));
@@ -294,12 +294,12 @@ void noinstr ct_nmi_enter(void)
instrumentation_begin();
}
- trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
- ct_dynticks_nmi_nesting(),
- ct_dynticks_nmi_nesting() + incby, ct_dynticks());
+ trace_rcu_watching(incby == 1 ? TPS("Startirq") : TPS("++="),
+ ct_nmi_nesting(),
+ ct_nmi_nesting() + incby, ct_rcu_watching());
instrumentation_end();
- WRITE_ONCE(ct->dynticks_nmi_nesting, /* Prevent store tearing. */
- ct_dynticks_nmi_nesting() + incby);
+ WRITE_ONCE(ct->nmi_nesting, /* Prevent store tearing. */
+ ct_nmi_nesting() + incby);
barrier();
}
@@ -317,7 +317,7 @@ void noinstr ct_nmi_enter(void)
void noinstr ct_idle_enter(void)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled());
- ct_kernel_exit(false, RCU_DYNTICKS_IDX + CONTEXT_IDLE);
+ ct_kernel_exit(false, CT_RCU_WATCHING + CT_STATE_IDLE);
}
EXPORT_SYMBOL_GPL(ct_idle_enter);
@@ -335,7 +335,7 @@ void noinstr ct_idle_exit(void)
unsigned long flags;
raw_local_irq_save(flags);
- ct_kernel_enter(false, RCU_DYNTICKS_IDX - CONTEXT_IDLE);
+ ct_kernel_enter(false, CT_RCU_WATCHING - CT_STATE_IDLE);
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(ct_idle_exit);
@@ -485,7 +485,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
* user_exit() or ct_irq_enter(). Let's remove RCU's dependency
* on the tick.
*/
- if (state == CONTEXT_USER) {
+ if (state == CT_STATE_USER) {
instrumentation_begin();
trace_user_enter(0);
vtime_user_enter(current);
@@ -504,7 +504,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
* CPU doesn't need to maintain the tick for RCU maintenance purposes
* when the CPU runs in userspace.
*/
- ct_kernel_exit(true, RCU_DYNTICKS_IDX + state);
+ ct_kernel_exit(true, CT_RCU_WATCHING + state);
/*
* Special case if we only track user <-> kernel transitions for tickless
@@ -534,7 +534,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
/*
* Tracking for vtime and RCU EQS. Make sure we don't race
* with NMIs. OTOH we don't care about ordering here since
- * RCU only requires RCU_DYNTICKS_IDX increments to be fully
+ * RCU only requires CT_RCU_WATCHING increments to be fully
* ordered.
*/
raw_atomic_add(state, &ct->state);
@@ -620,8 +620,8 @@ void noinstr __ct_user_exit(enum ctx_state state)
* Exit RCU idle mode while entering the kernel because it can
* run a RCU read side critical section anytime.
*/
- ct_kernel_enter(true, RCU_DYNTICKS_IDX - state);
- if (state == CONTEXT_USER) {
+ ct_kernel_enter(true, CT_RCU_WATCHING - state);
+ if (state == CT_STATE_USER) {
instrumentation_begin();
vtime_user_exit(current);
trace_user_exit(0);
@@ -634,17 +634,17 @@ void noinstr __ct_user_exit(enum ctx_state state)
* In this we case we don't care about any concurrency/ordering.
*/
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
- raw_atomic_set(&ct->state, CONTEXT_KERNEL);
+ raw_atomic_set(&ct->state, CT_STATE_KERNEL);
} else {
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
/* Tracking for vtime only, no concurrent RCU EQS accounting */
- raw_atomic_set(&ct->state, CONTEXT_KERNEL);
+ raw_atomic_set(&ct->state, CT_STATE_KERNEL);
} else {
/*
* Tracking for vtime and RCU EQS. Make sure we don't race
* with NMIs. OTOH we don't care about ordering here since
- * RCU only requires RCU_DYNTICKS_IDX increments to be fully
+ * RCU only requires CT_RCU_WATCHING increments to be fully
* ordered.
*/
raw_atomic_sub(state, &ct->state);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b1fd2a3db91a..d293d52a3e00 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -330,7 +330,7 @@ static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state st
/* Poll for one millisecond */
arch_cpuhp_sync_state_poll();
} else {
- usleep_range_state(USEC_PER_MSEC, 2 * USEC_PER_MSEC, TASK_UNINTERRUPTIBLE);
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
}
sync = atomic_read(st);
}
@@ -1808,6 +1808,7 @@ static int __init parallel_bringup_parse_param(char *arg)
}
early_param("cpuhp.parallel", parallel_bringup_parse_param);
+#ifdef CONFIG_HOTPLUG_SMT
static inline bool cpuhp_smt_aware(void)
{
return cpu_smt_max_threads > 1;
@@ -1817,6 +1818,21 @@ static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
{
return cpu_primary_thread_mask;
}
+#else
+static inline bool cpuhp_smt_aware(void)
+{
+ return false;
+}
+static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
+{
+ return cpu_none_mask;
+}
+#endif
+
+bool __weak arch_cpuhp_init_parallel_bringup(void)
+{
+ return true;
+}
/*
* On architectures which have enabled parallel bringup this invokes all BP
@@ -2689,9 +2705,7 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
return ret;
}
-/**
- * Check if the core a CPU belongs to is online
- */
+/* Check if the core a CPU belongs to is online */
#if !defined(topology_is_core_online)
static inline bool topology_is_core_online(unsigned int cpu)
{
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 63cf89393c6e..c1048893f4b6 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -505,7 +505,7 @@ int crash_check_hotplug_support(void)
crash_hotplug_lock();
/* Obtain lock while reading crash information */
if (!kexec_trylock()) {
- pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n");
+ pr_info("kexec_trylock() failed, kdump image may be inaccurate\n");
crash_hotplug_unlock();
return 0;
}
@@ -520,18 +520,25 @@ int crash_check_hotplug_support(void)
}
/*
- * To accurately reflect hot un/plug changes of cpu and memory resources
- * (including onling and offlining of those resources), the elfcorehdr
- * (which is passed to the crash kernel via the elfcorehdr= parameter)
- * must be updated with the new list of CPUs and memories.
+ * To accurately reflect hot un/plug changes of CPU and Memory resources
+ * (including onling and offlining of those resources), the relevant
+ * kexec segments must be updated with latest CPU and Memory resources.
*
- * In order to make changes to elfcorehdr, two conditions are needed:
- * First, the segment containing the elfcorehdr must be large enough
- * to permit a growing number of resources; the elfcorehdr memory size
- * is based on NR_CPUS_DEFAULT and CRASH_MAX_MEMORY_RANGES.
- * Second, purgatory must explicitly exclude the elfcorehdr from the
- * list of segments it checks (since the elfcorehdr changes and thus
- * would require an update to purgatory itself to update the digest).
+ * Architectures must ensure two things for all segments that need
+ * updating during hotplug events:
+ *
+ * 1. Segments must be large enough to accommodate a growing number of
+ * resources.
+ * 2. Exclude the segments from SHA verification.
+ *
+ * For example, on most architectures, the elfcorehdr (which is passed
+ * to the crash kernel via the elfcorehdr= parameter) must include the
+ * new list of CPUs and memory. To make changes to the elfcorehdr, it
+ * should be large enough to permit a growing number of CPU and Memory
+ * resources. One can estimate the elfcorehdr memory size based on
+ * NR_CPUS_DEFAULT and CRASH_MAX_MEMORY_RANGES. The elfcorehdr is
+ * excluded from SHA verification by default if the architecture
+ * supports crash hotplug.
*/
static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu, void *arg)
{
@@ -540,7 +547,7 @@ static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu,
crash_hotplug_lock();
/* Obtain lock while changing crash information */
if (!kexec_trylock()) {
- pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n");
+ pr_info("kexec_trylock() failed, kdump image may be inaccurate\n");
crash_hotplug_unlock();
return;
}
diff --git a/kernel/crash_reserve.c b/kernel/crash_reserve.c
index 64d44a52c011..a620fb4b2116 100644
--- a/kernel/crash_reserve.c
+++ b/kernel/crash_reserve.c
@@ -335,6 +335,9 @@ int __init parse_crashkernel(char *cmdline,
if (!*crash_size)
ret = -EINVAL;
+ if (*crash_size >= system_ram)
+ ret = -EINVAL;
+
return ret;
}
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index c06e56be0ca1..4c0dcd909121 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -8,8 +8,7 @@ config HAS_DMA
depends on !NO_DMA
default y
-config DMA_OPS
- depends on HAS_DMA
+config DMA_OPS_HELPERS
bool
#
@@ -109,8 +108,8 @@ config DMA_BOUNCE_UNALIGNED_KMALLOC
config DMA_NEED_SYNC
def_bool ARCH_HAS_SYNC_DMA_FOR_DEVICE || ARCH_HAS_SYNC_DMA_FOR_CPU || \
- ARCH_HAS_SYNC_DMA_FOR_CPU_ALL || DMA_API_DEBUG || DMA_OPS || \
- SWIOTLB
+ ARCH_HAS_SYNC_DMA_FOR_CPU_ALL || DMA_API_DEBUG || \
+ ARCH_HAS_DMA_OPS || SWIOTLB
config DMA_RESTRICTED_POOL
bool "DMA Restricted Pool"
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
index 21926e46ef4f..6977033444a3 100644
--- a/kernel/dma/Makefile
+++ b/kernel/dma/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_HAS_DMA) += mapping.o direct.o
-obj-$(CONFIG_DMA_OPS) += ops_helpers.o
-obj-$(CONFIG_DMA_OPS) += dummy.o
+obj-$(CONFIG_DMA_OPS_HELPERS) += ops_helpers.o
+obj-$(CONFIG_ARCH_HAS_DMA_OPS) += dummy.o
obj-$(CONFIG_DMA_CMA) += contiguous.o
obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 4480a3cd92e0..5b4e6d3bf7bc 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -20,7 +20,7 @@
* it for entirely different regions. In that case the arch code needs to
* override the variable below for dma-direct to work properly.
*/
-unsigned int zone_dma_bits __ro_after_init = 24;
+u64 zone_dma_limit __ro_after_init = DMA_BIT_MASK(24);
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
phys_addr_t phys)
@@ -59,7 +59,7 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
* zones.
*/
*phys_limit = dma_to_phys(dev, dma_limit);
- if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
+ if (*phys_limit <= zone_dma_limit)
return GFP_DMA;
if (*phys_limit <= DMA_BIT_MASK(32))
return GFP_DMA32;
@@ -140,7 +140,7 @@ again:
if (!page)
page = alloc_pages_node(node, gfp, get_order(size));
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
- dma_free_contiguous(dev, page, size);
+ __free_pages(page, get_order(size));
page = NULL;
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
@@ -580,7 +580,7 @@ int dma_direct_supported(struct device *dev, u64 mask)
* part of the check.
*/
if (IS_ENABLED(CONFIG_ZONE_DMA))
- min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
+ min_mask = min_t(u64, min_mask, zone_dma_limit);
return mask >= phys_to_dma_unencrypted(dev, min_mask);
}
diff --git a/kernel/dma/dummy.c b/kernel/dma/dummy.c
index b492d59ac77e..92de80e5b057 100644
--- a/kernel/dma/dummy.c
+++ b/kernel/dma/dummy.c
@@ -17,6 +17,15 @@ static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page,
{
return DMA_MAPPING_ERROR;
}
+static void dma_dummy_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ /*
+ * Dummy ops doesn't support map_page, so unmap_page should never be
+ * called.
+ */
+ WARN_ON_ONCE(true);
+}
static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
@@ -25,6 +34,16 @@ static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl,
return -EINVAL;
}
+static void dma_dummy_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ /*
+ * Dummy ops doesn't support map_sg, so unmap_sg should never be called.
+ */
+ WARN_ON_ONCE(true);
+}
+
static int dma_dummy_supported(struct device *hwdev, u64 mask)
{
return 0;
@@ -33,6 +52,8 @@ static int dma_dummy_supported(struct device *hwdev, u64 mask)
const struct dma_map_ops dma_dummy_ops = {
.mmap = dma_dummy_mmap,
.map_page = dma_dummy_map_page,
+ .unmap_page = dma_dummy_unmap_page,
.map_sg = dma_dummy_map_sg,
+ .unmap_sg = dma_dummy_unmap_sg,
.dma_supported = dma_dummy_supported,
};
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index b1c18058d55f..864a1121bf08 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -10,6 +10,7 @@
#include <linux/dma-map-ops.h>
#include <linux/export.h>
#include <linux/gfp.h>
+#include <linux/iommu-dma.h>
#include <linux/kmsan.h>
#include <linux/of_device.h>
#include <linux/slab.h>
@@ -17,6 +18,9 @@
#include "debug.h"
#include "direct.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/dma.h>
+
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
@@ -116,8 +120,12 @@ EXPORT_SYMBOL(dmam_alloc_attrs);
static bool dma_go_direct(struct device *dev, dma_addr_t mask,
const struct dma_map_ops *ops)
{
+ if (use_dma_iommu(dev))
+ return false;
+
if (likely(!ops))
return true;
+
#ifdef CONFIG_DMA_OPS_BYPASS
if (dev->dma_ops_bypass)
return min_not_zero(mask, dev->bus_dma_limit) >=
@@ -159,9 +167,13 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
if (dma_map_direct(dev, ops) ||
arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
+ else if (use_dma_iommu(dev))
+ addr = iommu_dma_map_page(dev, page, offset, size, dir, attrs);
else
addr = ops->map_page(dev, page, offset, size, dir, attrs);
kmsan_handle_dma(page, offset, size, dir);
+ trace_dma_map_page(dev, page_to_phys(page) + offset, addr, size, dir,
+ attrs);
debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
return addr;
@@ -177,8 +189,11 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
if (dma_map_direct(dev, ops) ||
arch_dma_unmap_page_direct(dev, addr + size))
dma_direct_unmap_page(dev, addr, size, dir, attrs);
- else if (ops->unmap_page)
+ else if (use_dma_iommu(dev))
+ iommu_dma_unmap_page(dev, addr, size, dir, attrs);
+ else
ops->unmap_page(dev, addr, size, dir, attrs);
+ trace_dma_unmap_page(dev, addr, size, dir, attrs);
debug_dma_unmap_page(dev, addr, size, dir);
}
EXPORT_SYMBOL(dma_unmap_page_attrs);
@@ -197,11 +212,14 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
if (dma_map_direct(dev, ops) ||
arch_dma_map_sg_direct(dev, sg, nents))
ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
+ else if (use_dma_iommu(dev))
+ ents = iommu_dma_map_sg(dev, sg, nents, dir, attrs);
else
ents = ops->map_sg(dev, sg, nents, dir, attrs);
if (ents > 0) {
kmsan_handle_dma_sg(sg, nents, dir);
+ trace_dma_map_sg(dev, sg, nents, ents, dir, attrs);
debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
} else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
ents != -EIO && ents != -EREMOTEIO)) {
@@ -287,10 +305,13 @@ void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
+ trace_dma_unmap_sg(dev, sg, nents, dir, attrs);
debug_dma_unmap_sg(dev, sg, nents, dir);
if (dma_map_direct(dev, ops) ||
arch_dma_unmap_sg_direct(dev, sg, nents))
dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
+ else if (use_dma_iommu(dev))
+ iommu_dma_unmap_sg(dev, sg, nents, dir, attrs);
else if (ops->unmap_sg)
ops->unmap_sg(dev, sg, nents, dir, attrs);
}
@@ -309,9 +330,12 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
if (dma_map_direct(dev, ops))
addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
+ else if (use_dma_iommu(dev))
+ addr = iommu_dma_map_resource(dev, phys_addr, size, dir, attrs);
else if (ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
+ trace_dma_map_resource(dev, phys_addr, addr, size, dir, attrs);
debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
return addr;
}
@@ -323,8 +347,13 @@ void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (!dma_map_direct(dev, ops) && ops->unmap_resource)
+ if (dma_map_direct(dev, ops))
+ ; /* nothing to do: uncached and no swiotlb */
+ else if (use_dma_iommu(dev))
+ iommu_dma_unmap_resource(dev, addr, size, dir, attrs);
+ else if (ops->unmap_resource)
ops->unmap_resource(dev, addr, size, dir, attrs);
+ trace_dma_unmap_resource(dev, addr, size, dir, attrs);
debug_dma_unmap_resource(dev, addr, size, dir);
}
EXPORT_SYMBOL(dma_unmap_resource);
@@ -338,8 +367,11 @@ void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops))
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+ else if (use_dma_iommu(dev))
+ iommu_dma_sync_single_for_cpu(dev, addr, size, dir);
else if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr, size, dir);
+ trace_dma_sync_single_for_cpu(dev, addr, size, dir);
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
}
EXPORT_SYMBOL(__dma_sync_single_for_cpu);
@@ -352,8 +384,11 @@ void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops))
dma_direct_sync_single_for_device(dev, addr, size, dir);
+ else if (use_dma_iommu(dev))
+ iommu_dma_sync_single_for_device(dev, addr, size, dir);
else if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr, size, dir);
+ trace_dma_sync_single_for_device(dev, addr, size, dir);
debug_dma_sync_single_for_device(dev, addr, size, dir);
}
EXPORT_SYMBOL(__dma_sync_single_for_device);
@@ -366,8 +401,11 @@ void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops))
dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
+ else if (use_dma_iommu(dev))
+ iommu_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
else if (ops->sync_sg_for_cpu)
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
+ trace_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
EXPORT_SYMBOL(__dma_sync_sg_for_cpu);
@@ -380,8 +418,11 @@ void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops))
dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
+ else if (use_dma_iommu(dev))
+ iommu_dma_sync_sg_for_device(dev, sg, nelems, dir);
else if (ops->sync_sg_for_device)
ops->sync_sg_for_device(dev, sg, nelems, dir);
+ trace_dma_sync_sg_for_device(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
}
EXPORT_SYMBOL(__dma_sync_sg_for_device);
@@ -405,7 +446,7 @@ static void dma_setup_need_sync(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
- if (dma_map_direct(dev, ops) || (ops->flags & DMA_F_CAN_SKIP_SYNC))
+ if (dma_map_direct(dev, ops) || use_dma_iommu(dev))
/*
* dma_skip_sync will be reset to %false on first SWIOTLB buffer
* mapping, if any. During the device initialization, it's
@@ -446,6 +487,9 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
if (dma_alloc_direct(dev, ops))
return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
size, attrs);
+ if (use_dma_iommu(dev))
+ return iommu_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr,
+ size, attrs);
if (!ops->get_sgtable)
return -ENXIO;
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
@@ -482,6 +526,8 @@ bool dma_can_mmap(struct device *dev)
if (dma_alloc_direct(dev, ops))
return dma_direct_can_mmap(dev);
+ if (use_dma_iommu(dev))
+ return true;
return ops->mmap != NULL;
}
EXPORT_SYMBOL_GPL(dma_can_mmap);
@@ -508,6 +554,9 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
if (dma_alloc_direct(dev, ops))
return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
attrs);
+ if (use_dma_iommu(dev))
+ return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size,
+ attrs);
if (!ops->mmap)
return -ENXIO;
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
@@ -520,6 +569,10 @@ u64 dma_get_required_mask(struct device *dev)
if (dma_alloc_direct(dev, ops))
return dma_direct_get_required_mask(dev);
+
+ if (use_dma_iommu(dev))
+ return DMA_BIT_MASK(32);
+
if (ops->get_required_mask)
return ops->get_required_mask(dev);
@@ -559,11 +612,14 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
if (dma_alloc_direct(dev, ops))
cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
+ else if (use_dma_iommu(dev))
+ cpu_addr = iommu_dma_alloc(dev, size, dma_handle, flag, attrs);
else if (ops->alloc)
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
else
return NULL;
+ trace_dma_alloc(dev, cpu_addr, *dma_handle, size, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
return cpu_addr;
}
@@ -588,9 +644,12 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
if (!cpu_addr)
return;
+ trace_dma_free(dev, cpu_addr, dma_handle, size, attrs);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
if (dma_alloc_direct(dev, ops))
dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
+ else if (use_dma_iommu(dev))
+ iommu_dma_free(dev, size, cpu_addr, dma_handle, attrs);
else if (ops->free)
ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
@@ -611,6 +670,8 @@ static struct page *__dma_alloc_pages(struct device *dev, size_t size,
size = PAGE_ALIGN(size);
if (dma_alloc_direct(dev, ops))
return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
+ if (use_dma_iommu(dev))
+ return dma_common_alloc_pages(dev, size, dma_handle, dir, gfp);
if (!ops->alloc_pages_op)
return NULL;
return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp);
@@ -621,8 +682,11 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
{
struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
- if (page)
+ if (page) {
+ trace_dma_map_page(dev, page_to_phys(page), *dma_handle, size,
+ dir, 0);
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
+ }
return page;
}
EXPORT_SYMBOL_GPL(dma_alloc_pages);
@@ -635,6 +699,8 @@ static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
size = PAGE_ALIGN(size);
if (dma_alloc_direct(dev, ops))
dma_direct_free_pages(dev, size, page, dma_handle, dir);
+ else if (use_dma_iommu(dev))
+ dma_common_free_pages(dev, size, page, dma_handle, dir);
else if (ops->free_pages)
ops->free_pages(dev, size, page, dma_handle, dir);
}
@@ -642,6 +708,7 @@ static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
void dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir)
{
+ trace_dma_unmap_page(dev, dma_handle, size, dir, 0);
debug_dma_unmap_page(dev, dma_handle, size, dir);
__dma_free_pages(dev, size, page, dma_handle, dir);
}
@@ -687,7 +754,6 @@ out_free_sgt:
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
struct sg_table *sgt;
if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
@@ -695,13 +761,14 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
if (WARN_ON_ONCE(gfp & __GFP_COMP))
return NULL;
- if (ops && ops->alloc_noncontiguous)
- sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
+ if (use_dma_iommu(dev))
+ sgt = iommu_dma_alloc_noncontiguous(dev, size, dir, gfp, attrs);
else
sgt = alloc_single_sgt(dev, size, dir, gfp);
if (sgt) {
sgt->nents = 1;
+ trace_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
}
return sgt;
@@ -720,11 +787,11 @@ static void free_single_sgt(struct device *dev, size_t size,
void dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
+ trace_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir, 0);
debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
- if (ops && ops->free_noncontiguous)
- ops->free_noncontiguous(dev, size, sgt, dir);
+
+ if (use_dma_iommu(dev))
+ iommu_dma_free_noncontiguous(dev, size, sgt, dir);
else
free_single_sgt(dev, size, sgt, dir);
}
@@ -733,37 +800,26 @@ EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
void *dma_vmap_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- if (ops && ops->alloc_noncontiguous)
- return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
+ if (use_dma_iommu(dev))
+ return iommu_dma_vmap_noncontiguous(dev, size, sgt);
+
return page_address(sg_page(sgt->sgl));
}
EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (ops && ops->alloc_noncontiguous)
- vunmap(vaddr);
+ if (use_dma_iommu(dev))
+ iommu_dma_vunmap_noncontiguous(dev, vaddr);
}
EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
size_t size, struct sg_table *sgt)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (ops && ops->alloc_noncontiguous) {
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- if (vma->vm_pgoff >= count ||
- vma_pages(vma) > count - vma->vm_pgoff)
- return -ENXIO;
- return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
- }
+ if (use_dma_iommu(dev))
+ return iommu_dma_mmap_noncontiguous(dev, vma, size, sgt);
return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
}
EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
@@ -772,32 +828,37 @@ static int dma_supported(struct device *dev, u64 mask)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
+ if (use_dma_iommu(dev)) {
+ if (WARN_ON(ops))
+ return false;
+ return true;
+ }
+
/*
- * ->dma_supported sets the bypass flag, so we must always call
- * into the method here unless the device is truly direct mapped.
+ * ->dma_supported sets and clears the bypass flag, so ignore it here
+ * and always call into the method if there is one.
*/
- if (!ops)
- return dma_direct_supported(dev, mask);
- if (!ops->dma_supported)
- return 1;
- return ops->dma_supported(dev, mask);
+ if (ops) {
+ if (!ops->dma_supported)
+ return true;
+ return ops->dma_supported(dev, mask);
+ }
+
+ return dma_direct_supported(dev, mask);
}
bool dma_pci_p2pdma_supported(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
- /* if ops is not set, dma direct will be used which supports P2PDMA */
- if (!ops)
- return true;
-
/*
* Note: dma_ops_bypass is not checked here because P2PDMA should
* not be used with dma mapping ops that do not have support even
* if the specific device is bypassing them.
*/
- return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED;
+ /* if ops is not set, dma direct and default IOMMU support P2PDMA */
+ return !ops;
}
EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
@@ -852,7 +913,7 @@ bool dma_addressing_limited(struct device *dev)
dma_get_required_mask(dev))
return true;
- if (unlikely(ops))
+ if (unlikely(ops) || use_dma_iommu(dev))
return false;
return !dma_direct_all_ram_mapped(dev);
}
@@ -865,6 +926,8 @@ size_t dma_max_mapping_size(struct device *dev)
if (dma_map_direct(dev, ops))
size = dma_direct_max_mapping_size(dev);
+ else if (use_dma_iommu(dev))
+ size = iommu_dma_max_mapping_size(dev);
else if (ops && ops->max_mapping_size)
size = ops->max_mapping_size(dev);
@@ -877,7 +940,9 @@ size_t dma_opt_mapping_size(struct device *dev)
const struct dma_map_ops *ops = get_dma_ops(dev);
size_t size = SIZE_MAX;
- if (ops && ops->opt_mapping_size)
+ if (use_dma_iommu(dev))
+ size = iommu_dma_opt_mapping_size();
+ else if (ops && ops->opt_mapping_size)
size = ops->opt_mapping_size();
return min(dma_max_mapping_size(dev), size);
@@ -888,6 +953,9 @@ unsigned long dma_get_merge_boundary(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
+ if (use_dma_iommu(dev))
+ return iommu_dma_get_merge_boundary(dev);
+
if (!ops || !ops->get_merge_boundary)
return 0; /* can't merge */
diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c
index af4a6ef48ce0..9afd569eadb9 100644
--- a/kernel/dma/ops_helpers.c
+++ b/kernel/dma/ops_helpers.c
@@ -4,6 +4,7 @@
* the allocated memory contains normal pages in the direct kernel mapping.
*/
#include <linux/dma-map-ops.h>
+#include <linux/iommu-dma.h>
static struct page *dma_common_vaddr_to_page(void *cpu_addr)
{
@@ -70,8 +71,12 @@ struct page *dma_common_alloc_pages(struct device *dev, size_t size,
if (!page)
return NULL;
- *dma_handle = ops->map_page(dev, page, 0, size, dir,
- DMA_ATTR_SKIP_CPU_SYNC);
+ if (use_dma_iommu(dev))
+ *dma_handle = iommu_dma_map_page(dev, page, 0, size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ else
+ *dma_handle = ops->map_page(dev, page, 0, size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
if (*dma_handle == DMA_MAPPING_ERROR) {
dma_free_contiguous(dev, page, size);
return NULL;
@@ -86,7 +91,10 @@ void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
{
const struct dma_map_ops *ops = get_dma_ops(dev);
- if (ops->unmap_page)
+ if (use_dma_iommu(dev))
+ iommu_dma_unmap_page(dev, dma_handle, size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ else if (ops->unmap_page)
ops->unmap_page(dev, dma_handle, size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
dma_free_contiguous(dev, page, size);
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index d10613eb0f63..7b04f7575796 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -70,9 +70,9 @@ static bool cma_in_zone(gfp_t gfp)
/* CMA can't cross zone boundaries, see cma_activate_area() */
end = cma_get_base(cma) + size - 1;
if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
- return end <= DMA_BIT_MASK(zone_dma_bits);
+ return end <= zone_dma_limit;
if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
- return end <= DMA_BIT_MASK(32);
+ return end <= max(DMA_BIT_MASK(32), zone_dma_limit);
return true;
}
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index 27596f3b4aef..9e2afad1c615 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -10,8 +10,10 @@ struct page **dma_common_find_pages(void *cpu_addr)
{
struct vm_struct *area = find_vm_area(cpu_addr);
- if (!area || area->flags != VM_DMA_COHERENT)
+ if (!area || !(area->flags & VM_DMA_COHERENT))
return NULL;
+ WARN(area->flags != VM_DMA_COHERENT,
+ "unexpected flags in area: %p\n", cpu_addr);
return area->pages;
}
@@ -61,7 +63,7 @@ void dma_common_free_remap(void *cpu_addr, size_t size)
{
struct vm_struct *area = find_vm_area(cpu_addr);
- if (!area || area->flags != VM_DMA_COHERENT) {
+ if (!area || !(area->flags & VM_DMA_COHERENT)) {
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
return;
}
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index df68d29740a0..abcf3fa63a56 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -450,9 +450,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
if (!remap)
io_tlb_default_mem.can_grow = true;
if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
- io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits);
+ io_tlb_default_mem.phys_limit = zone_dma_limit;
else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
- io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32);
+ io_tlb_default_mem.phys_limit = max(DMA_BIT_MASK(32), zone_dma_limit);
else
io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
#endif
@@ -629,7 +629,7 @@ static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
}
gfp &= ~GFP_ZONEMASK;
- if (phys_limit <= DMA_BIT_MASK(zone_dma_bits))
+ if (phys_limit <= zone_dma_limit)
gfp |= __GFP_DMA;
else if (phys_limit <= DMA_BIT_MASK(32))
gfp |= __GFP_DMA32;
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index 90843cc38588..5b6934e23c21 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -182,7 +182,7 @@ static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
unsigned long nr = syscall_get_nr(current, regs);
- CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
+ CT_WARN_ON(ct_state() != CT_STATE_KERNEL);
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8a6c6bbcd658..5a8071c45c80 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -155,20 +155,55 @@ static int cpu_function_call(int cpu, remote_function_f func, void *info)
return data.ret;
}
+enum event_type_t {
+ EVENT_FLEXIBLE = 0x01,
+ EVENT_PINNED = 0x02,
+ EVENT_TIME = 0x04,
+ EVENT_FROZEN = 0x08,
+ /* see ctx_resched() for details */
+ EVENT_CPU = 0x10,
+ EVENT_CGROUP = 0x20,
+
+ /* compound helpers */
+ EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+ EVENT_TIME_FROZEN = EVENT_TIME | EVENT_FROZEN,
+};
+
+static inline void __perf_ctx_lock(struct perf_event_context *ctx)
+{
+ raw_spin_lock(&ctx->lock);
+ WARN_ON_ONCE(ctx->is_active & EVENT_FROZEN);
+}
+
static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
- raw_spin_lock(&cpuctx->ctx.lock);
+ __perf_ctx_lock(&cpuctx->ctx);
if (ctx)
- raw_spin_lock(&ctx->lock);
+ __perf_ctx_lock(ctx);
+}
+
+static inline void __perf_ctx_unlock(struct perf_event_context *ctx)
+{
+ /*
+ * If ctx_sched_in() didn't again set any ALL flags, clean up
+ * after ctx_sched_out() by clearing is_active.
+ */
+ if (ctx->is_active & EVENT_FROZEN) {
+ if (!(ctx->is_active & EVENT_ALL))
+ ctx->is_active = 0;
+ else
+ ctx->is_active &= ~EVENT_FROZEN;
+ }
+ raw_spin_unlock(&ctx->lock);
}
static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
if (ctx)
- raw_spin_unlock(&ctx->lock);
- raw_spin_unlock(&cpuctx->ctx.lock);
+ __perf_ctx_unlock(ctx);
+ __perf_ctx_unlock(&cpuctx->ctx);
}
#define TASK_TOMBSTONE ((void *)-1L)
@@ -264,6 +299,7 @@ static void event_function_call(struct perf_event *event, event_f func, void *da
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
+ struct perf_cpu_context *cpuctx;
struct event_function_struct efs = {
.event = event,
.func = func,
@@ -291,22 +327,25 @@ again:
if (!task_function_call(task, event_function, &efs))
return;
- raw_spin_lock_irq(&ctx->lock);
+ local_irq_disable();
+ cpuctx = this_cpu_ptr(&perf_cpu_context);
+ perf_ctx_lock(cpuctx, ctx);
/*
* Reload the task pointer, it might have been changed by
* a concurrent perf_event_context_sched_out().
*/
task = ctx->task;
- if (task == TASK_TOMBSTONE) {
- raw_spin_unlock_irq(&ctx->lock);
- return;
- }
+ if (task == TASK_TOMBSTONE)
+ goto unlock;
if (ctx->is_active) {
- raw_spin_unlock_irq(&ctx->lock);
+ perf_ctx_unlock(cpuctx, ctx);
+ local_irq_enable();
goto again;
}
func(event, NULL, ctx, data);
- raw_spin_unlock_irq(&ctx->lock);
+unlock:
+ perf_ctx_unlock(cpuctx, ctx);
+ local_irq_enable();
}
/*
@@ -369,16 +408,6 @@ unlock:
(PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
-enum event_type_t {
- EVENT_FLEXIBLE = 0x1,
- EVENT_PINNED = 0x2,
- EVENT_TIME = 0x4,
- /* see ctx_resched() for details */
- EVENT_CPU = 0x8,
- EVENT_CGROUP = 0x10,
- EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
-};
-
/*
* perf_sched_events : >0 events exist
*/
@@ -407,6 +436,11 @@ static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;
static cpumask_var_t perf_online_mask;
+static cpumask_var_t perf_online_core_mask;
+static cpumask_var_t perf_online_die_mask;
+static cpumask_var_t perf_online_cluster_mask;
+static cpumask_var_t perf_online_pkg_mask;
+static cpumask_var_t perf_online_sys_mask;
static struct kmem_cache *perf_event_cache;
/*
@@ -685,30 +719,32 @@ do { \
___p; \
})
+#define for_each_epc(_epc, _ctx, _pmu, _cgroup) \
+ list_for_each_entry(_epc, &((_ctx)->pmu_ctx_list), pmu_ctx_entry) \
+ if (_cgroup && !_epc->nr_cgroups) \
+ continue; \
+ else if (_pmu && _epc->pmu != _pmu) \
+ continue; \
+ else
+
static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
{
struct perf_event_pmu_context *pmu_ctx;
- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
- if (cgroup && !pmu_ctx->nr_cgroups)
- continue;
+ for_each_epc(pmu_ctx, ctx, NULL, cgroup)
perf_pmu_disable(pmu_ctx->pmu);
- }
}
static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
{
struct perf_event_pmu_context *pmu_ctx;
- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
- if (cgroup && !pmu_ctx->nr_cgroups)
- continue;
+ for_each_epc(pmu_ctx, ctx, NULL, cgroup)
perf_pmu_enable(pmu_ctx->pmu);
- }
}
-static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type);
-static void ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type);
+static void ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type);
+static void ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type);
#ifdef CONFIG_CGROUP_PERF
@@ -865,7 +901,7 @@ static void perf_cgroup_switch(struct task_struct *task)
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_ctx_disable(&cpuctx->ctx, true);
- ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
+ ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
/*
* must not be done before ctxswout due
* to update_cgrp_time_from_cpuctx() in
@@ -877,7 +913,7 @@ static void perf_cgroup_switch(struct task_struct *task)
* perf_cgroup_set_timestamp() in ctx_sched_in()
* to not have to pass task around
*/
- ctx_sched_in(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
+ ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
perf_ctx_enable(&cpuctx->ctx, true);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -933,10 +969,10 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
struct fd f = fdget(fd);
int ret = 0;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- css = css_tryget_online_from_dir(f.file->f_path.dentry,
+ css = css_tryget_online_from_dir(fd_file(f)->f_path.dentry,
&perf_event_cgrp_subsys);
if (IS_ERR(css)) {
ret = PTR_ERR(css);
@@ -1769,6 +1805,14 @@ perf_event_groups_next(struct perf_event *event, struct pmu *pmu)
typeof(*event), group_node))
/*
+ * Does the event attribute request inherit with PERF_SAMPLE_READ
+ */
+static inline bool has_inherit_and_sample_read(struct perf_event_attr *attr)
+{
+ return attr->inherit && (attr->sample_type & PERF_SAMPLE_READ);
+}
+
+/*
* Add an event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
@@ -1798,6 +1842,8 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
ctx->nr_user++;
if (event->attr.inherit_stat)
ctx->nr_stat++;
+ if (has_inherit_and_sample_read(&event->attr))
+ local_inc(&ctx->nr_no_switch_fast);
if (event->state > PERF_EVENT_STATE_OFF)
perf_cgroup_event_enable(event, ctx);
@@ -2022,6 +2068,8 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
ctx->nr_user--;
if (event->attr.inherit_stat)
ctx->nr_stat--;
+ if (has_inherit_and_sample_read(&event->attr))
+ local_dec(&ctx->nr_no_switch_fast);
list_del_rcu(&event->event_entry);
@@ -2317,6 +2365,45 @@ group_sched_out(struct perf_event *group_event, struct perf_event_context *ctx)
event_sched_out(event, ctx);
}
+static inline void
+__ctx_time_update(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx, bool final)
+{
+ if (ctx->is_active & EVENT_TIME) {
+ if (ctx->is_active & EVENT_FROZEN)
+ return;
+ update_context_time(ctx);
+ update_cgrp_time_from_cpuctx(cpuctx, final);
+ }
+}
+
+static inline void
+ctx_time_update(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx)
+{
+ __ctx_time_update(cpuctx, ctx, false);
+}
+
+/*
+ * To be used inside perf_ctx_lock() / perf_ctx_unlock(). Lasts until perf_ctx_unlock().
+ */
+static inline void
+ctx_time_freeze(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx)
+{
+ ctx_time_update(cpuctx, ctx);
+ if (ctx->is_active & EVENT_TIME)
+ ctx->is_active |= EVENT_FROZEN;
+}
+
+static inline void
+ctx_time_update_event(struct perf_event_context *ctx, struct perf_event *event)
+{
+ if (ctx->is_active & EVENT_TIME) {
+ if (ctx->is_active & EVENT_FROZEN)
+ return;
+ update_context_time(ctx);
+ update_cgrp_time_from_event(event);
+ }
+}
+
#define DETACH_GROUP 0x01UL
#define DETACH_CHILD 0x02UL
#define DETACH_DEAD 0x04UL
@@ -2336,10 +2423,7 @@ __perf_remove_from_context(struct perf_event *event,
struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx;
unsigned long flags = (unsigned long)info;
- if (ctx->is_active & EVENT_TIME) {
- update_context_time(ctx);
- update_cgrp_time_from_cpuctx(cpuctx, false);
- }
+ ctx_time_update(cpuctx, ctx);
/*
* Ensure event_sched_out() switches to OFF, at the very least
@@ -2424,12 +2508,8 @@ static void __perf_event_disable(struct perf_event *event,
if (event->state < PERF_EVENT_STATE_INACTIVE)
return;
- if (ctx->is_active & EVENT_TIME) {
- update_context_time(ctx);
- update_cgrp_time_from_event(event);
- }
-
perf_pmu_disable(event->pmu_ctx->pmu);
+ ctx_time_update_event(ctx, event);
if (event == event->group_leader)
group_sched_out(event, ctx);
@@ -2645,7 +2725,8 @@ static void add_event_to_ctx(struct perf_event *event,
}
static void task_ctx_sched_out(struct perf_event_context *ctx,
- enum event_type_t event_type)
+ struct pmu *pmu,
+ enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
@@ -2655,18 +2736,19 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;
- ctx_sched_out(ctx, event_type);
+ ctx_sched_out(ctx, pmu, event_type);
}
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx)
+ struct perf_event_context *ctx,
+ struct pmu *pmu)
{
- ctx_sched_in(&cpuctx->ctx, EVENT_PINNED);
+ ctx_sched_in(&cpuctx->ctx, pmu, EVENT_PINNED);
if (ctx)
- ctx_sched_in(ctx, EVENT_PINNED);
- ctx_sched_in(&cpuctx->ctx, EVENT_FLEXIBLE);
+ ctx_sched_in(ctx, pmu, EVENT_PINNED);
+ ctx_sched_in(&cpuctx->ctx, pmu, EVENT_FLEXIBLE);
if (ctx)
- ctx_sched_in(ctx, EVENT_FLEXIBLE);
+ ctx_sched_in(ctx, pmu, EVENT_FLEXIBLE);
}
/*
@@ -2684,16 +2766,12 @@ static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
* event_type is a bit mask of the types of events involved. For CPU events,
* event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
*/
-/*
- * XXX: ctx_resched() reschedule entire perf_event_context while adding new
- * event to the context or enabling existing event in the context. We can
- * probably optimize it by rescheduling only affected pmu_ctx.
- */
static void ctx_resched(struct perf_cpu_context *cpuctx,
struct perf_event_context *task_ctx,
- enum event_type_t event_type)
+ struct pmu *pmu, enum event_type_t event_type)
{
bool cpu_event = !!(event_type & EVENT_CPU);
+ struct perf_event_pmu_context *epc;
/*
* If pinned groups are involved, flexible groups also need to be
@@ -2704,10 +2782,14 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
event_type &= EVENT_ALL;
- perf_ctx_disable(&cpuctx->ctx, false);
+ for_each_epc(epc, &cpuctx->ctx, pmu, false)
+ perf_pmu_disable(epc->pmu);
+
if (task_ctx) {
- perf_ctx_disable(task_ctx, false);
- task_ctx_sched_out(task_ctx, event_type);
+ for_each_epc(epc, task_ctx, pmu, false)
+ perf_pmu_disable(epc->pmu);
+
+ task_ctx_sched_out(task_ctx, pmu, event_type);
}
/*
@@ -2718,15 +2800,19 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
* - otherwise, do nothing more.
*/
if (cpu_event)
- ctx_sched_out(&cpuctx->ctx, event_type);
+ ctx_sched_out(&cpuctx->ctx, pmu, event_type);
else if (event_type & EVENT_PINNED)
- ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
+ ctx_sched_out(&cpuctx->ctx, pmu, EVENT_FLEXIBLE);
- perf_event_sched_in(cpuctx, task_ctx);
+ perf_event_sched_in(cpuctx, task_ctx, pmu);
- perf_ctx_enable(&cpuctx->ctx, false);
- if (task_ctx)
- perf_ctx_enable(task_ctx, false);
+ for_each_epc(epc, &cpuctx->ctx, pmu, false)
+ perf_pmu_enable(epc->pmu);
+
+ if (task_ctx) {
+ for_each_epc(epc, task_ctx, pmu, false)
+ perf_pmu_enable(epc->pmu);
+ }
}
void perf_pmu_resched(struct pmu *pmu)
@@ -2735,7 +2821,7 @@ void perf_pmu_resched(struct pmu *pmu)
struct perf_event_context *task_ctx = cpuctx->task_ctx;
perf_ctx_lock(cpuctx, task_ctx);
- ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU);
+ ctx_resched(cpuctx, task_ctx, pmu, EVENT_ALL|EVENT_CPU);
perf_ctx_unlock(cpuctx, task_ctx);
}
@@ -2791,9 +2877,10 @@ static int __perf_install_in_context(void *info)
#endif
if (reprogram) {
- ctx_sched_out(ctx, EVENT_TIME);
+ ctx_time_freeze(cpuctx, ctx);
add_event_to_ctx(event, ctx);
- ctx_resched(cpuctx, task_ctx, get_event_type(event));
+ ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu,
+ get_event_type(event));
} else {
add_event_to_ctx(event, ctx);
}
@@ -2936,8 +3023,7 @@ static void __perf_event_enable(struct perf_event *event,
event->state <= PERF_EVENT_STATE_ERROR)
return;
- if (ctx->is_active)
- ctx_sched_out(ctx, EVENT_TIME);
+ ctx_time_freeze(cpuctx, ctx);
perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
perf_cgroup_event_enable(event, ctx);
@@ -2945,25 +3031,21 @@ static void __perf_event_enable(struct perf_event *event,
if (!ctx->is_active)
return;
- if (!event_filter_match(event)) {
- ctx_sched_in(ctx, EVENT_TIME);
+ if (!event_filter_match(event))
return;
- }
/*
* If the event is in a group and isn't the group leader,
* then don't put it on unless the group is on.
*/
- if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
- ctx_sched_in(ctx, EVENT_TIME);
+ if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
return;
- }
task_ctx = cpuctx->task_ctx;
if (ctx->task)
WARN_ON_ONCE(task_ctx != ctx);
- ctx_resched(cpuctx, task_ctx, get_event_type(event));
+ ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, get_event_type(event));
}
/*
@@ -3231,7 +3313,7 @@ static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx,
struct perf_event *event, *tmp;
struct pmu *pmu = pmu_ctx->pmu;
- if (ctx->task && !ctx->is_active) {
+ if (ctx->task && !(ctx->is_active & EVENT_ALL)) {
struct perf_cpu_pmu_context *cpc;
cpc = this_cpu_ptr(pmu->cpu_pmu_context);
@@ -3239,7 +3321,7 @@ static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx,
cpc->task_epc = NULL;
}
- if (!event_type)
+ if (!(event_type & EVENT_ALL))
return;
perf_pmu_disable(pmu);
@@ -3265,8 +3347,17 @@ static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx,
perf_pmu_enable(pmu);
}
+/*
+ * Be very careful with the @pmu argument since this will change ctx state.
+ * The @pmu argument works for ctx_resched(), because that is symmetric in
+ * ctx_sched_out() / ctx_sched_in() usage and the ctx state ends up invariant.
+ *
+ * However, if you were to be asymmetrical, you could end up with messed up
+ * state, eg. ctx->is_active cleared even though most EPCs would still actually
+ * be active.
+ */
static void
-ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
+ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_event_pmu_context *pmu_ctx;
@@ -3297,34 +3388,36 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
*
* would only update time for the pinned events.
*/
- if (is_active & EVENT_TIME) {
- /* update (and stop) ctx time */
- update_context_time(ctx);
- update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx);
+ __ctx_time_update(cpuctx, ctx, ctx == &cpuctx->ctx);
+
+ /*
+ * CPU-release for the below ->is_active store,
+ * see __load_acquire() in perf_event_time_now()
+ */
+ barrier();
+ ctx->is_active &= ~event_type;
+
+ if (!(ctx->is_active & EVENT_ALL)) {
/*
- * CPU-release for the below ->is_active store,
- * see __load_acquire() in perf_event_time_now()
+ * For FROZEN, preserve TIME|FROZEN such that perf_event_time_now()
+ * does not observe a hole. perf_ctx_unlock() will clean up.
*/
- barrier();
+ if (ctx->is_active & EVENT_FROZEN)
+ ctx->is_active &= EVENT_TIME_FROZEN;
+ else
+ ctx->is_active = 0;
}
- ctx->is_active &= ~event_type;
- if (!(ctx->is_active & EVENT_ALL))
- ctx->is_active = 0;
-
if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
- if (!ctx->is_active)
+ if (!(ctx->is_active & EVENT_ALL))
cpuctx->task_ctx = NULL;
}
is_active ^= ctx->is_active; /* changed bits */
- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
- if (cgroup && !pmu_ctx->nr_cgroups)
- continue;
+ for_each_epc(pmu_ctx, ctx, pmu, cgroup)
__pmu_ctx_sched_out(pmu_ctx, is_active);
- }
}
/*
@@ -3517,12 +3610,17 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
perf_ctx_disable(ctx, false);
- /* PMIs are disabled; ctx->nr_pending is stable. */
- if (local_read(&ctx->nr_pending) ||
- local_read(&next_ctx->nr_pending)) {
+ /* PMIs are disabled; ctx->nr_no_switch_fast is stable. */
+ if (local_read(&ctx->nr_no_switch_fast) ||
+ local_read(&next_ctx->nr_no_switch_fast)) {
/*
* Must not swap out ctx when there's pending
* events that rely on the ctx->task relation.
+ *
+ * Likewise, when a context contains inherit +
+ * SAMPLE_READ events they should be switched
+ * out using the slow path so that they are
+ * treated as if they were distinct contexts.
*/
raw_spin_unlock(&next_ctx->lock);
rcu_read_unlock();
@@ -3563,7 +3661,7 @@ unlock:
inside_switch:
perf_ctx_sched_task_cb(ctx, false);
- task_ctx_sched_out(ctx, EVENT_ALL);
+ task_ctx_sched_out(ctx, NULL, EVENT_ALL);
perf_ctx_enable(ctx, false);
raw_spin_unlock(&ctx->lock);
@@ -3861,29 +3959,22 @@ static void pmu_groups_sched_in(struct perf_event_context *ctx,
merge_sched_in, &can_add_hw);
}
-static void ctx_groups_sched_in(struct perf_event_context *ctx,
- struct perf_event_groups *groups,
- bool cgroup)
+static void __pmu_ctx_sched_in(struct perf_event_pmu_context *pmu_ctx,
+ enum event_type_t event_type)
{
- struct perf_event_pmu_context *pmu_ctx;
-
- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
- if (cgroup && !pmu_ctx->nr_cgroups)
- continue;
- pmu_groups_sched_in(ctx, groups, pmu_ctx->pmu);
- }
-}
+ struct perf_event_context *ctx = pmu_ctx->ctx;
-static void __pmu_ctx_sched_in(struct perf_event_context *ctx,
- struct pmu *pmu)
-{
- pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu);
+ if (event_type & EVENT_PINNED)
+ pmu_groups_sched_in(ctx, &ctx->pinned_groups, pmu_ctx->pmu);
+ if (event_type & EVENT_FLEXIBLE)
+ pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu_ctx->pmu);
}
static void
-ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
+ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
+ struct perf_event_pmu_context *pmu_ctx;
int is_active = ctx->is_active;
bool cgroup = event_type & EVENT_CGROUP;
@@ -3907,7 +3998,7 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
ctx->is_active |= (event_type | EVENT_TIME);
if (ctx->task) {
- if (!is_active)
+ if (!(is_active & EVENT_ALL))
cpuctx->task_ctx = ctx;
else
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
@@ -3919,12 +4010,16 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
- if (is_active & EVENT_PINNED)
- ctx_groups_sched_in(ctx, &ctx->pinned_groups, cgroup);
+ if (is_active & EVENT_PINNED) {
+ for_each_epc(pmu_ctx, ctx, pmu, cgroup)
+ __pmu_ctx_sched_in(pmu_ctx, EVENT_PINNED);
+ }
/* Then walk through the lower prio flexible groups */
- if (is_active & EVENT_FLEXIBLE)
- ctx_groups_sched_in(ctx, &ctx->flexible_groups, cgroup);
+ if (is_active & EVENT_FLEXIBLE) {
+ for_each_epc(pmu_ctx, ctx, pmu, cgroup)
+ __pmu_ctx_sched_in(pmu_ctx, EVENT_FLEXIBLE);
+ }
}
static void perf_event_context_sched_in(struct task_struct *task)
@@ -3967,10 +4062,10 @@ static void perf_event_context_sched_in(struct task_struct *task)
*/
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
perf_ctx_disable(&cpuctx->ctx, false);
- ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
+ ctx_sched_out(&cpuctx->ctx, NULL, EVENT_FLEXIBLE);
}
- perf_event_sched_in(cpuctx, ctx);
+ perf_event_sched_in(cpuctx, ctx, NULL);
perf_ctx_sched_task_cb(cpuctx->task_ctx, true);
@@ -4093,7 +4188,11 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bo
period = perf_calculate_period(event, nsec, count);
delta = (s64)(period - hwc->sample_period);
- delta = (delta + 7) / 8; /* low pass filter */
+ if (delta >= 0)
+ delta += 7;
+ else
+ delta -= 7;
+ delta /= 8; /* low pass filter */
sample_period = hwc->sample_period + delta;
@@ -4311,14 +4410,14 @@ static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc)
update_context_time(&cpuctx->ctx);
__pmu_ctx_sched_out(cpu_epc, EVENT_FLEXIBLE);
rotate_ctx(&cpuctx->ctx, cpu_event);
- __pmu_ctx_sched_in(&cpuctx->ctx, pmu);
+ __pmu_ctx_sched_in(cpu_epc, EVENT_FLEXIBLE);
}
if (task_event)
rotate_ctx(task_epc->ctx, task_event);
if (task_event || (task_epc && cpu_event))
- __pmu_ctx_sched_in(task_epc->ctx, pmu);
+ __pmu_ctx_sched_in(task_epc, EVENT_FLEXIBLE);
perf_pmu_enable(pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -4384,7 +4483,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
cpuctx = this_cpu_ptr(&perf_cpu_context);
perf_ctx_lock(cpuctx, ctx);
- ctx_sched_out(ctx, EVENT_TIME);
+ ctx_time_freeze(cpuctx, ctx);
list_for_each_entry(event, &ctx->event_list, event_entry) {
enabled |= event_enable_on_exec(event, ctx);
@@ -4396,9 +4495,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
*/
if (enabled) {
clone_ctx = unclone_ctx(ctx);
- ctx_resched(cpuctx, ctx, event_type);
- } else {
- ctx_sched_in(ctx, EVENT_TIME);
+ ctx_resched(cpuctx, ctx, NULL, event_type);
}
perf_ctx_unlock(cpuctx, ctx);
@@ -4459,16 +4556,24 @@ struct perf_read_data {
int ret;
};
+static inline const struct cpumask *perf_scope_cpu_topology_cpumask(unsigned int scope, int cpu);
+
static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
{
+ int local_cpu = smp_processor_id();
u16 local_pkg, event_pkg;
if ((unsigned)event_cpu >= nr_cpu_ids)
return event_cpu;
- if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
- int local_cpu = smp_processor_id();
+ if (event->group_caps & PERF_EV_CAP_READ_SCOPE) {
+ const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(event->pmu->scope, event_cpu);
+
+ if (cpumask && cpumask_test_cpu(local_cpu, cpumask))
+ return local_cpu;
+ }
+ if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
event_pkg = topology_physical_package_id(event_cpu);
local_pkg = topology_physical_package_id(local_cpu);
@@ -4501,10 +4606,7 @@ static void __perf_event_read(void *info)
return;
raw_spin_lock(&ctx->lock);
- if (ctx->is_active & EVENT_TIME) {
- update_context_time(ctx);
- update_cgrp_time_from_event(event);
- }
+ ctx_time_update_event(ctx, event);
perf_event_update_time(event);
if (data->group)
@@ -4539,8 +4641,11 @@ unlock:
raw_spin_unlock(&ctx->lock);
}
-static inline u64 perf_event_count(struct perf_event *event)
+static inline u64 perf_event_count(struct perf_event *event, bool self)
{
+ if (self)
+ return local64_read(&event->count);
+
return local64_read(&event->count) + atomic64_read(&event->child_count);
}
@@ -4701,10 +4806,7 @@ again:
* May read while context is not active (e.g., thread is
* blocked), in that case we cannot update context time
*/
- if (ctx->is_active & EVENT_TIME) {
- update_context_time(ctx);
- update_cgrp_time_from_event(event);
- }
+ ctx_time_update_event(ctx, event);
perf_event_update_time(event);
if (group)
@@ -5205,7 +5307,7 @@ static void perf_pending_task_sync(struct perf_event *event)
*/
if (task_work_cancel(current, head)) {
event->pending_work = 0;
- local_dec(&event->ctx->nr_pending);
+ local_dec(&event->ctx->nr_no_switch_fast);
return;
}
@@ -5499,7 +5601,7 @@ static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *
mutex_lock(&event->child_mutex);
(void)perf_event_read(event, false);
- total += perf_event_count(event);
+ total += perf_event_count(event, false);
*enabled += event->total_time_enabled +
atomic64_read(&event->child_total_time_enabled);
@@ -5508,7 +5610,7 @@ static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *
list_for_each_entry(child, &event->child_list, child_list) {
(void)perf_event_read(child, false);
- total += perf_event_count(child);
+ total += perf_event_count(child, false);
*enabled += child->total_time_enabled;
*running += child->total_time_running;
}
@@ -5590,14 +5692,14 @@ static int __perf_read_group_add(struct perf_event *leader,
/*
* Write {count,id} tuples for every sibling.
*/
- values[n++] += perf_event_count(leader);
+ values[n++] += perf_event_count(leader, false);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
if (read_format & PERF_FORMAT_LOST)
values[n++] = atomic64_read(&leader->lost_samples);
for_each_sibling_event(sub, leader) {
- values[n++] += perf_event_count(sub);
+ values[n++] += perf_event_count(sub, false);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
if (read_format & PERF_FORMAT_LOST)
@@ -5899,10 +6001,10 @@ static const struct file_operations perf_fops;
static inline int perf_fget_light(int fd, struct fd *p)
{
struct fd f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (f.file->f_op != &perf_fops) {
+ if (fd_file(f)->f_op != &perf_fops) {
fdput(f);
return -EBADF;
}
@@ -5962,7 +6064,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
ret = perf_fget_light(arg, &output);
if (ret)
return ret;
- output_event = output.file->private_data;
+ output_event = fd_file(output)->private_data;
ret = perf_event_set_output(event, output_event);
fdput(output);
} else {
@@ -6177,7 +6279,7 @@ void perf_event_update_userpage(struct perf_event *event)
++userpg->lock;
barrier();
userpg->index = perf_event_index(event);
- userpg->offset = perf_event_count(event);
+ userpg->offset = perf_event_count(event, false);
if (userpg->index)
userpg->offset -= local64_read(&event->hw.prev_count);
@@ -6874,7 +6976,7 @@ static void perf_pending_task(struct callback_head *head)
if (event->pending_work) {
event->pending_work = 0;
perf_sigtrap(event);
- local_dec(&event->ctx->nr_pending);
+ local_dec(&event->ctx->nr_no_switch_fast);
rcuwait_wake_up(&event->pending_work_wait);
}
rcu_read_unlock();
@@ -7256,7 +7358,7 @@ static void perf_output_read_one(struct perf_output_handle *handle,
u64 values[5];
int n = 0;
- values[n++] = perf_event_count(event);
+ values[n++] = perf_event_count(event, has_inherit_and_sample_read(&event->attr));
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] = enabled +
atomic64_read(&event->child_total_time_enabled);
@@ -7274,14 +7376,15 @@ static void perf_output_read_one(struct perf_output_handle *handle,
}
static void perf_output_read_group(struct perf_output_handle *handle,
- struct perf_event *event,
- u64 enabled, u64 running)
+ struct perf_event *event,
+ u64 enabled, u64 running)
{
struct perf_event *leader = event->group_leader, *sub;
u64 read_format = event->attr.read_format;
unsigned long flags;
u64 values[6];
int n = 0;
+ bool self = has_inherit_and_sample_read(&event->attr);
/*
* Disabling interrupts avoids all counter scheduling
@@ -7301,7 +7404,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
(leader->state == PERF_EVENT_STATE_ACTIVE))
leader->pmu->read(leader);
- values[n++] = perf_event_count(leader);
+ values[n++] = perf_event_count(leader, self);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
if (read_format & PERF_FORMAT_LOST)
@@ -7316,7 +7419,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
(sub->state == PERF_EVENT_STATE_ACTIVE))
sub->pmu->read(sub);
- values[n++] = perf_event_count(sub);
+ values[n++] = perf_event_count(sub, self);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
if (read_format & PERF_FORMAT_LOST)
@@ -7337,6 +7440,10 @@ static void perf_output_read_group(struct perf_output_handle *handle,
* The problem is that its both hard and excessively expensive to iterate the
* child list, not to mention that its impossible to IPI the children running
* on another CPU, from interrupt/NMI context.
+ *
+ * Instead the combination of PERF_SAMPLE_READ and inherit will track per-thread
+ * counts rather than attempting to accumulate some value across all children on
+ * all cores.
*/
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
@@ -8857,7 +8964,7 @@ got_name:
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
if (atomic_read(&nr_build_id_events))
- build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size);
+ build_id_parse_nofault(vma, mmap_event->build_id, &mmap_event->build_id_size);
perf_iterate_sb(perf_event_mmap_output,
mmap_event,
@@ -9747,7 +9854,7 @@ static int __perf_event_overflow(struct perf_event *event,
if (!event->pending_work &&
!task_work_add(current, &event->pending_task, notify_mode)) {
event->pending_work = pending_id;
- local_inc(&event->ctx->nr_pending);
+ local_inc(&event->ctx->nr_no_switch_fast);
event->pending_addr = 0;
if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
@@ -11484,10 +11591,60 @@ perf_event_mux_interval_ms_store(struct device *dev,
}
static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
+static inline const struct cpumask *perf_scope_cpu_topology_cpumask(unsigned int scope, int cpu)
+{
+ switch (scope) {
+ case PERF_PMU_SCOPE_CORE:
+ return topology_sibling_cpumask(cpu);
+ case PERF_PMU_SCOPE_DIE:
+ return topology_die_cpumask(cpu);
+ case PERF_PMU_SCOPE_CLUSTER:
+ return topology_cluster_cpumask(cpu);
+ case PERF_PMU_SCOPE_PKG:
+ return topology_core_cpumask(cpu);
+ case PERF_PMU_SCOPE_SYS_WIDE:
+ return cpu_online_mask;
+ }
+
+ return NULL;
+}
+
+static inline struct cpumask *perf_scope_cpumask(unsigned int scope)
+{
+ switch (scope) {
+ case PERF_PMU_SCOPE_CORE:
+ return perf_online_core_mask;
+ case PERF_PMU_SCOPE_DIE:
+ return perf_online_die_mask;
+ case PERF_PMU_SCOPE_CLUSTER:
+ return perf_online_cluster_mask;
+ case PERF_PMU_SCOPE_PKG:
+ return perf_online_pkg_mask;
+ case PERF_PMU_SCOPE_SYS_WIDE:
+ return perf_online_sys_mask;
+ }
+
+ return NULL;
+}
+
+static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct cpumask *mask = perf_scope_cpumask(pmu->scope);
+
+ if (mask)
+ return cpumap_print_to_pagebuf(true, buf, mask);
+ return 0;
+}
+
+static DEVICE_ATTR_RO(cpumask);
+
static struct attribute *pmu_dev_attrs[] = {
&dev_attr_type.attr,
&dev_attr_perf_event_mux_interval_ms.attr,
&dev_attr_nr_addr_filters.attr,
+ &dev_attr_cpumask.attr,
NULL,
};
@@ -11499,6 +11656,10 @@ static umode_t pmu_dev_is_visible(struct kobject *kobj, struct attribute *a, int
if (n == 2 && !pmu->nr_addr_filters)
return 0;
+ /* cpumask */
+ if (n == 3 && pmu->scope == PERF_PMU_SCOPE_NONE)
+ return 0;
+
return a->mode;
}
@@ -11583,6 +11744,11 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
goto free_pdc;
}
+ if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE, "Can not register a pmu with an invalid scope.\n")) {
+ ret = -EINVAL;
+ goto free_pdc;
+ }
+
pmu->name = name;
if (type >= 0)
@@ -11737,6 +11903,22 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
event_has_any_exclude_flag(event))
ret = -EINVAL;
+ if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) {
+ const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu);
+ struct cpumask *pmu_cpumask = perf_scope_cpumask(pmu->scope);
+ int cpu;
+
+ if (pmu_cpumask && cpumask) {
+ cpu = cpumask_any_and(pmu_cpumask, cpumask);
+ if (cpu >= nr_cpu_ids)
+ ret = -ENODEV;
+ else
+ event->event_caps |= PERF_EV_CAP_READ_SCOPE;
+ } else {
+ ret = -ENODEV;
+ }
+ }
+
if (ret && event->destroy)
event->destroy(event);
}
@@ -12064,10 +12246,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
local64_set(&hwc->period_left, hwc->sample_period);
/*
- * We currently do not support PERF_SAMPLE_READ on inherited events.
+ * We do not support PERF_SAMPLE_READ on inherited events unless
+ * PERF_SAMPLE_TID is also selected, which allows inherited events to
+ * collect per-thread samples.
* See perf_output_read().
*/
- if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
+ if (has_inherit_and_sample_read(attr) && !(attr->sample_type & PERF_SAMPLE_TID))
goto err_ns;
if (!has_branch_stack(event))
@@ -12481,7 +12665,7 @@ SYSCALL_DEFINE5(perf_event_open,
struct perf_event_attr attr;
struct perf_event_context *ctx;
struct file *event_file = NULL;
- struct fd group = {NULL, 0};
+ struct fd group = EMPTY_FD;
struct task_struct *task = NULL;
struct pmu *pmu;
int event_fd;
@@ -12556,7 +12740,7 @@ SYSCALL_DEFINE5(perf_event_open,
err = perf_fget_light(group_fd, &group);
if (err)
goto err_fd;
- group_leader = group.file->private_data;
+ group_leader = fd_file(group)->private_data;
if (flags & PERF_FLAG_FD_OUTPUT)
output_event = group_leader;
if (flags & PERF_FLAG_FD_NO_GROUP)
@@ -13091,7 +13275,7 @@ static void sync_child_event(struct perf_event *child_event)
perf_event_read_event(child_event, task);
}
- child_val = perf_event_count(child_event);
+ child_val = perf_event_count(child_event, false);
/*
* Add back the child's count to the parent's count:
@@ -13182,7 +13366,7 @@ static void perf_event_exit_task_context(struct task_struct *child)
* in.
*/
raw_spin_lock_irq(&child_ctx->lock);
- task_ctx_sched_out(child_ctx, EVENT_ALL);
+ task_ctx_sched_out(child_ctx, NULL, EVENT_ALL);
/*
* Now that the context is inactive, destroy the task <-> ctx relation
@@ -13358,6 +13542,15 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
return &event->attr;
}
+int perf_allow_kernel(struct perf_event_attr *attr)
+{
+ if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
+ return -EACCES;
+
+ return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
+}
+EXPORT_SYMBOL_GPL(perf_allow_kernel);
+
/*
* Inherit an event from parent task to child task.
*
@@ -13688,6 +13881,12 @@ static void __init perf_event_init_all_cpus(void)
int cpu;
zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
+ zalloc_cpumask_var(&perf_online_core_mask, GFP_KERNEL);
+ zalloc_cpumask_var(&perf_online_die_mask, GFP_KERNEL);
+ zalloc_cpumask_var(&perf_online_cluster_mask, GFP_KERNEL);
+ zalloc_cpumask_var(&perf_online_pkg_mask, GFP_KERNEL);
+ zalloc_cpumask_var(&perf_online_sys_mask, GFP_KERNEL);
+
for_each_possible_cpu(cpu) {
swhash = &per_cpu(swevent_htable, cpu);
@@ -13731,12 +13930,46 @@ static void __perf_event_exit_context(void *__info)
struct perf_event *event;
raw_spin_lock(&ctx->lock);
- ctx_sched_out(ctx, EVENT_TIME);
+ ctx_sched_out(ctx, NULL, EVENT_TIME);
list_for_each_entry(event, &ctx->event_list, event_entry)
__perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
raw_spin_unlock(&ctx->lock);
}
+static void perf_event_clear_cpumask(unsigned int cpu)
+{
+ int target[PERF_PMU_MAX_SCOPE];
+ unsigned int scope;
+ struct pmu *pmu;
+
+ cpumask_clear_cpu(cpu, perf_online_mask);
+
+ for (scope = PERF_PMU_SCOPE_NONE + 1; scope < PERF_PMU_MAX_SCOPE; scope++) {
+ const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(scope, cpu);
+ struct cpumask *pmu_cpumask = perf_scope_cpumask(scope);
+
+ target[scope] = -1;
+ if (WARN_ON_ONCE(!pmu_cpumask || !cpumask))
+ continue;
+
+ if (!cpumask_test_and_clear_cpu(cpu, pmu_cpumask))
+ continue;
+ target[scope] = cpumask_any_but(cpumask, cpu);
+ if (target[scope] < nr_cpu_ids)
+ cpumask_set_cpu(target[scope], pmu_cpumask);
+ }
+
+ /* migrate */
+ list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) {
+ if (pmu->scope == PERF_PMU_SCOPE_NONE ||
+ WARN_ON_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE))
+ continue;
+
+ if (target[pmu->scope] >= 0 && target[pmu->scope] < nr_cpu_ids)
+ perf_pmu_migrate_context(pmu, cpu, target[pmu->scope]);
+ }
+}
+
static void perf_event_exit_cpu_context(int cpu)
{
struct perf_cpu_context *cpuctx;
@@ -13744,6 +13977,11 @@ static void perf_event_exit_cpu_context(int cpu)
// XXX simplify cpuctx->online
mutex_lock(&pmus_lock);
+ /*
+ * Clear the cpumasks, and migrate to other CPUs if possible.
+ * Must be invoked before the __perf_event_exit_context.
+ */
+ perf_event_clear_cpumask(cpu);
cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
ctx = &cpuctx->ctx;
@@ -13751,7 +13989,6 @@ static void perf_event_exit_cpu_context(int cpu)
smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
cpuctx->online = 0;
mutex_unlock(&ctx->mutex);
- cpumask_clear_cpu(cpu, perf_online_mask);
mutex_unlock(&pmus_lock);
}
#else
@@ -13760,6 +13997,42 @@ static void perf_event_exit_cpu_context(int cpu) { }
#endif
+static void perf_event_setup_cpumask(unsigned int cpu)
+{
+ struct cpumask *pmu_cpumask;
+ unsigned int scope;
+
+ /*
+ * Early boot stage, the cpumask hasn't been set yet.
+ * The perf_online_<domain>_masks includes the first CPU of each domain.
+ * Always unconditionally set the boot CPU for the perf_online_<domain>_masks.
+ */
+ if (cpumask_empty(perf_online_mask)) {
+ for (scope = PERF_PMU_SCOPE_NONE + 1; scope < PERF_PMU_MAX_SCOPE; scope++) {
+ pmu_cpumask = perf_scope_cpumask(scope);
+ if (WARN_ON_ONCE(!pmu_cpumask))
+ continue;
+ cpumask_set_cpu(cpu, pmu_cpumask);
+ }
+ goto end;
+ }
+
+ for (scope = PERF_PMU_SCOPE_NONE + 1; scope < PERF_PMU_MAX_SCOPE; scope++) {
+ const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(scope, cpu);
+
+ pmu_cpumask = perf_scope_cpumask(scope);
+
+ if (WARN_ON_ONCE(!pmu_cpumask || !cpumask))
+ continue;
+
+ if (!cpumask_empty(cpumask) &&
+ cpumask_any_and(pmu_cpumask, cpumask) >= nr_cpu_ids)
+ cpumask_set_cpu(cpu, pmu_cpumask);
+ }
+end:
+ cpumask_set_cpu(cpu, perf_online_mask);
+}
+
int perf_event_init_cpu(unsigned int cpu)
{
struct perf_cpu_context *cpuctx;
@@ -13768,7 +14041,7 @@ int perf_event_init_cpu(unsigned int cpu)
perf_swevent_init_cpu(cpu);
mutex_lock(&pmus_lock);
- cpumask_set_cpu(cpu, perf_online_mask);
+ perf_event_setup_cpumask(cpu);
cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
ctx = &cpuctx->ctx;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 50d7949be2b1..2ec796e2f055 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -40,6 +40,9 @@ static struct rb_root uprobes_tree = RB_ROOT;
#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */
+static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock);
+
+DEFINE_STATIC_SRCU(uprobes_srcu);
#define UPROBES_HASH_SZ 13
/* serialize uprobe->pending_list */
@@ -57,8 +60,9 @@ struct uprobe {
struct rw_semaphore register_rwsem;
struct rw_semaphore consumer_rwsem;
struct list_head pending_list;
- struct uprobe_consumer *consumers;
+ struct list_head consumers;
struct inode *inode; /* Also hold a ref to inode */
+ struct rcu_head rcu;
loff_t offset;
loff_t ref_ctr_offset;
unsigned long flags;
@@ -99,8 +103,7 @@ struct xol_area {
atomic_t slot_count; /* number of in-use slots */
unsigned long *bitmap; /* 0 = free slot */
- struct vm_special_mapping xol_mapping;
- struct page *pages[2];
+ struct page *page;
/*
* We keep the vma's vm_start rather than a pointer to the vma
* itself. The probed process or a naughty kernel module could make
@@ -109,6 +112,11 @@ struct xol_area {
unsigned long vaddr; /* Page(s) of instruction slots */
};
+static void uprobe_warn(struct task_struct *t, const char *msg)
+{
+ pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg);
+}
+
/*
* valid_vma: Verify if the specified vma is an executable vma
* Relax restrictions while unregistering: vm_flags might have
@@ -453,7 +461,7 @@ static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
* @vaddr: the virtual address to store the opcode.
* @opcode: opcode to be written at @vaddr.
*
- * Called with mm->mmap_lock held for write.
+ * Called with mm->mmap_lock held for read or write.
* Return 0 (success) or a negative errno.
*/
int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
@@ -587,25 +595,63 @@ set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long v
*(uprobe_opcode_t *)&auprobe->insn);
}
+/* uprobe should have guaranteed positive refcount */
static struct uprobe *get_uprobe(struct uprobe *uprobe)
{
refcount_inc(&uprobe->ref);
return uprobe;
}
+/*
+ * uprobe should have guaranteed lifetime, which can be either of:
+ * - caller already has refcount taken (and wants an extra one);
+ * - uprobe is RCU protected and won't be freed until after grace period;
+ * - we are holding uprobes_treelock (for read or write, doesn't matter).
+ */
+static struct uprobe *try_get_uprobe(struct uprobe *uprobe)
+{
+ if (refcount_inc_not_zero(&uprobe->ref))
+ return uprobe;
+ return NULL;
+}
+
+static inline bool uprobe_is_active(struct uprobe *uprobe)
+{
+ return !RB_EMPTY_NODE(&uprobe->rb_node);
+}
+
+static void uprobe_free_rcu(struct rcu_head *rcu)
+{
+ struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu);
+
+ kfree(uprobe);
+}
+
static void put_uprobe(struct uprobe *uprobe)
{
- if (refcount_dec_and_test(&uprobe->ref)) {
- /*
- * If application munmap(exec_vma) before uprobe_unregister()
- * gets called, we don't get a chance to remove uprobe from
- * delayed_uprobe_list from remove_breakpoint(). Do it here.
- */
- mutex_lock(&delayed_uprobe_lock);
- delayed_uprobe_remove(uprobe, NULL);
- mutex_unlock(&delayed_uprobe_lock);
- kfree(uprobe);
+ if (!refcount_dec_and_test(&uprobe->ref))
+ return;
+
+ write_lock(&uprobes_treelock);
+
+ if (uprobe_is_active(uprobe)) {
+ write_seqcount_begin(&uprobes_seqcount);
+ rb_erase(&uprobe->rb_node, &uprobes_tree);
+ write_seqcount_end(&uprobes_seqcount);
}
+
+ write_unlock(&uprobes_treelock);
+
+ /*
+ * If application munmap(exec_vma) before uprobe_unregister()
+ * gets called, we don't get a chance to remove uprobe from
+ * delayed_uprobe_list from remove_breakpoint(). Do it here.
+ */
+ mutex_lock(&delayed_uprobe_lock);
+ delayed_uprobe_remove(uprobe, NULL);
+ mutex_unlock(&delayed_uprobe_lock);
+
+ call_srcu(&uprobes_srcu, &uprobe->rcu, uprobe_free_rcu);
}
static __always_inline
@@ -647,62 +693,86 @@ static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
}
-static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
+/*
+ * Assumes being inside RCU protected region.
+ * No refcount is taken on returned uprobe.
+ */
+static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset)
{
struct __uprobe_key key = {
.inode = inode,
.offset = offset,
};
- struct rb_node *node = rb_find(&key, &uprobes_tree, __uprobe_cmp_key);
+ struct rb_node *node;
+ unsigned int seq;
+
+ lockdep_assert(srcu_read_lock_held(&uprobes_srcu));
- if (node)
- return get_uprobe(__node_2_uprobe(node));
+ do {
+ seq = read_seqcount_begin(&uprobes_seqcount);
+ node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key);
+ /*
+ * Lockless RB-tree lookups can result only in false negatives.
+ * If the element is found, it is correct and can be returned
+ * under RCU protection. If we find nothing, we need to
+ * validate that seqcount didn't change. If it did, we have to
+ * try again as we might have missed the element (false
+ * negative). If seqcount is unchanged, search truly failed.
+ */
+ if (node)
+ return __node_2_uprobe(node);
+ } while (read_seqcount_retry(&uprobes_seqcount, seq));
return NULL;
}
/*
- * Find a uprobe corresponding to a given inode:offset
- * Acquires uprobes_treelock
+ * Attempt to insert a new uprobe into uprobes_tree.
+ *
+ * If uprobe already exists (for given inode+offset), we just increment
+ * refcount of previously existing uprobe.
+ *
+ * If not, a provided new instance of uprobe is inserted into the tree (with
+ * assumed initial refcount == 1).
+ *
+ * In any case, we return a uprobe instance that ends up being in uprobes_tree.
+ * Caller has to clean up new uprobe instance, if it ended up not being
+ * inserted into the tree.
+ *
+ * We assume that uprobes_treelock is held for writing.
*/
-static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
-{
- struct uprobe *uprobe;
-
- read_lock(&uprobes_treelock);
- uprobe = __find_uprobe(inode, offset);
- read_unlock(&uprobes_treelock);
-
- return uprobe;
-}
-
static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
{
struct rb_node *node;
+again:
+ node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
+ if (node) {
+ struct uprobe *u = __node_2_uprobe(node);
+
+ if (!try_get_uprobe(u)) {
+ rb_erase(node, &uprobes_tree);
+ RB_CLEAR_NODE(&u->rb_node);
+ goto again;
+ }
- node = rb_find_add(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
- if (node)
- return get_uprobe(__node_2_uprobe(node));
+ return u;
+ }
- /* get access + creation ref */
- refcount_set(&uprobe->ref, 2);
- return NULL;
+ return uprobe;
}
/*
- * Acquire uprobes_treelock.
- * Matching uprobe already exists in rbtree;
- * increment (access refcount) and return the matching uprobe.
- *
- * No matching uprobe; insert the uprobe in rb_tree;
- * get a double refcount (access + creation) and return NULL.
+ * Acquire uprobes_treelock and insert uprobe into uprobes_tree
+ * (or reuse existing one, see __insert_uprobe() comments above).
*/
static struct uprobe *insert_uprobe(struct uprobe *uprobe)
{
struct uprobe *u;
write_lock(&uprobes_treelock);
+ write_seqcount_begin(&uprobes_seqcount);
u = __insert_uprobe(uprobe);
+ write_seqcount_end(&uprobes_seqcount);
write_unlock(&uprobes_treelock);
return u;
@@ -725,18 +795,21 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
if (!uprobe)
- return NULL;
+ return ERR_PTR(-ENOMEM);
uprobe->inode = inode;
uprobe->offset = offset;
uprobe->ref_ctr_offset = ref_ctr_offset;
+ INIT_LIST_HEAD(&uprobe->consumers);
init_rwsem(&uprobe->register_rwsem);
init_rwsem(&uprobe->consumer_rwsem);
+ RB_CLEAR_NODE(&uprobe->rb_node);
+ refcount_set(&uprobe->ref, 1);
/* add to uprobes_tree, sorted on inode:offset */
cur_uprobe = insert_uprobe(uprobe);
/* a uprobe exists for this inode:offset combination */
- if (cur_uprobe) {
+ if (cur_uprobe != uprobe) {
if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
ref_ctr_mismatch_warn(cur_uprobe, uprobe);
put_uprobe(cur_uprobe);
@@ -753,32 +826,19 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
down_write(&uprobe->consumer_rwsem);
- uc->next = uprobe->consumers;
- uprobe->consumers = uc;
+ list_add_rcu(&uc->cons_node, &uprobe->consumers);
up_write(&uprobe->consumer_rwsem);
}
/*
* For uprobe @uprobe, delete the consumer @uc.
- * Return true if the @uc is deleted successfully
- * or return false.
+ * Should never be called with consumer that's not part of @uprobe->consumers.
*/
-static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
+static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
- struct uprobe_consumer **con;
- bool ret = false;
-
down_write(&uprobe->consumer_rwsem);
- for (con = &uprobe->consumers; *con; con = &(*con)->next) {
- if (*con == uc) {
- *con = uc->next;
- ret = true;
- break;
- }
- }
+ list_del_rcu(&uc->cons_node);
up_write(&uprobe->consumer_rwsem);
-
- return ret;
}
static int __copy_insn(struct address_space *mapping, struct file *filp,
@@ -863,21 +923,20 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
return ret;
}
-static inline bool consumer_filter(struct uprobe_consumer *uc,
- enum uprobe_filter_ctx ctx, struct mm_struct *mm)
+static inline bool consumer_filter(struct uprobe_consumer *uc, struct mm_struct *mm)
{
- return !uc->filter || uc->filter(uc, ctx, mm);
+ return !uc->filter || uc->filter(uc, mm);
}
-static bool filter_chain(struct uprobe *uprobe,
- enum uprobe_filter_ctx ctx, struct mm_struct *mm)
+static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm)
{
struct uprobe_consumer *uc;
bool ret = false;
down_read(&uprobe->consumer_rwsem);
- for (uc = uprobe->consumers; uc; uc = uc->next) {
- ret = consumer_filter(uc, ctx, mm);
+ list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
+ srcu_read_lock_held(&uprobes_srcu)) {
+ ret = consumer_filter(uc, mm);
if (ret)
break;
}
@@ -921,27 +980,6 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vad
return set_orig_insn(&uprobe->arch, mm, vaddr);
}
-static inline bool uprobe_is_active(struct uprobe *uprobe)
-{
- return !RB_EMPTY_NODE(&uprobe->rb_node);
-}
-/*
- * There could be threads that have already hit the breakpoint. They
- * will recheck the current insn and restart if find_uprobe() fails.
- * See find_active_uprobe().
- */
-static void delete_uprobe(struct uprobe *uprobe)
-{
- if (WARN_ON(!uprobe_is_active(uprobe)))
- return;
-
- write_lock(&uprobes_treelock);
- rb_erase(&uprobe->rb_node, &uprobes_tree);
- write_unlock(&uprobes_treelock);
- RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
- put_uprobe(uprobe);
-}
-
struct map_info {
struct map_info *next;
struct mm_struct *mm;
@@ -1046,7 +1084,13 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
if (err && is_register)
goto free;
-
+ /*
+ * We take mmap_lock for writing to avoid the race with
+ * find_active_uprobe_rcu() which takes mmap_lock for reading.
+ * Thus this install_breakpoint() can not make
+ * is_trap_at_addr() true right after find_uprobe_rcu()
+ * returns NULL in find_active_uprobe_rcu().
+ */
mmap_write_lock(mm);
vma = find_vma(mm, info->vaddr);
if (!vma || !valid_vma(vma, is_register) ||
@@ -1059,12 +1103,10 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
if (is_register) {
/* consult only the "caller", new consumer. */
- if (consumer_filter(new,
- UPROBE_FILTER_REGISTER, mm))
+ if (consumer_filter(new, mm))
err = install_breakpoint(uprobe, mm, vma, info->vaddr);
} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
- if (!filter_chain(uprobe,
- UPROBE_FILTER_UNREGISTER, mm))
+ if (!filter_chain(uprobe, mm))
err |= remove_breakpoint(uprobe, mm, info->vaddr);
}
@@ -1079,152 +1121,140 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
return err;
}
-static void
-__uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
+/**
+ * uprobe_unregister_nosync - unregister an already registered probe.
+ * @uprobe: uprobe to remove
+ * @uc: identify which probe if multiple probes are colocated.
+ */
+void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
int err;
- if (WARN_ON(!consumer_del(uprobe, uc)))
- return;
-
+ down_write(&uprobe->register_rwsem);
+ consumer_del(uprobe, uc);
err = register_for_each_vma(uprobe, NULL);
- /* TODO : cant unregister? schedule a worker thread */
- if (!uprobe->consumers && !err)
- delete_uprobe(uprobe);
-}
-
-/*
- * uprobe_unregister - unregister an already registered probe.
- * @inode: the file in which the probe has to be removed.
- * @offset: offset from the start of the file.
- * @uc: identify which probe if multiple probes are colocated.
- */
-void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
-{
- struct uprobe *uprobe;
+ up_write(&uprobe->register_rwsem);
- uprobe = find_uprobe(inode, offset);
- if (WARN_ON(!uprobe))
+ /* TODO : cant unregister? schedule a worker thread */
+ if (unlikely(err)) {
+ uprobe_warn(current, "unregister, leaking uprobe");
return;
+ }
- down_write(&uprobe->register_rwsem);
- __uprobe_unregister(uprobe, uc);
- up_write(&uprobe->register_rwsem);
put_uprobe(uprobe);
}
-EXPORT_SYMBOL_GPL(uprobe_unregister);
+EXPORT_SYMBOL_GPL(uprobe_unregister_nosync);
-/*
- * __uprobe_register - register a probe
+void uprobe_unregister_sync(void)
+{
+ /*
+ * Now that handler_chain() and handle_uretprobe_chain() iterate over
+ * uprobe->consumers list under RCU protection without holding
+ * uprobe->register_rwsem, we need to wait for RCU grace period to
+ * make sure that we can't call into just unregistered
+ * uprobe_consumer's callbacks anymore. If we don't do that, fast and
+ * unlucky enough caller can free consumer's memory and cause
+ * handler_chain() or handle_uretprobe_chain() to do an use-after-free.
+ */
+ synchronize_srcu(&uprobes_srcu);
+}
+EXPORT_SYMBOL_GPL(uprobe_unregister_sync);
+
+/**
+ * uprobe_register - register a probe
* @inode: the file in which the probe has to be placed.
* @offset: offset from the start of the file.
+ * @ref_ctr_offset: offset of SDT marker / reference counter
* @uc: information on howto handle the probe..
*
- * Apart from the access refcount, __uprobe_register() takes a creation
+ * Apart from the access refcount, uprobe_register() takes a creation
* refcount (thro alloc_uprobe) if and only if this @uprobe is getting
* inserted into the rbtree (i.e first consumer for a @inode:@offset
* tuple). Creation refcount stops uprobe_unregister from freeing the
* @uprobe even before the register operation is complete. Creation
* refcount is released when the last @uc for the @uprobe
- * unregisters. Caller of __uprobe_register() is required to keep @inode
+ * unregisters. Caller of uprobe_register() is required to keep @inode
* (and the containing mount) referenced.
*
- * Return errno if it cannot successully install probes
- * else return 0 (success)
+ * Return: pointer to the new uprobe on success or an ERR_PTR on failure.
*/
-static int __uprobe_register(struct inode *inode, loff_t offset,
- loff_t ref_ctr_offset, struct uprobe_consumer *uc)
+struct uprobe *uprobe_register(struct inode *inode,
+ loff_t offset, loff_t ref_ctr_offset,
+ struct uprobe_consumer *uc)
{
struct uprobe *uprobe;
int ret;
/* Uprobe must have at least one set consumer */
if (!uc->handler && !uc->ret_handler)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
if (!inode->i_mapping->a_ops->read_folio &&
!shmem_mapping(inode->i_mapping))
- return -EIO;
+ return ERR_PTR(-EIO);
/* Racy, just to catch the obvious mistakes */
if (offset > i_size_read(inode))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
/*
* This ensures that copy_from_page(), copy_to_page() and
* __update_ref_ctr() can't cross page boundary.
*/
if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- retry:
uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
- if (!uprobe)
- return -ENOMEM;
if (IS_ERR(uprobe))
- return PTR_ERR(uprobe);
+ return uprobe;
- /*
- * We can race with uprobe_unregister()->delete_uprobe().
- * Check uprobe_is_active() and retry if it is false.
- */
down_write(&uprobe->register_rwsem);
- ret = -EAGAIN;
- if (likely(uprobe_is_active(uprobe))) {
- consumer_add(uprobe, uc);
- ret = register_for_each_vma(uprobe, uc);
- if (ret)
- __uprobe_unregister(uprobe, uc);
- }
+ consumer_add(uprobe, uc);
+ ret = register_for_each_vma(uprobe, uc);
up_write(&uprobe->register_rwsem);
- put_uprobe(uprobe);
- if (unlikely(ret == -EAGAIN))
- goto retry;
- return ret;
-}
+ if (ret) {
+ uprobe_unregister_nosync(uprobe, uc);
+ /*
+ * Registration might have partially succeeded, so we can have
+ * this consumer being called right at this time. We need to
+ * sync here. It's ok, it's unlikely slow path.
+ */
+ uprobe_unregister_sync();
+ return ERR_PTR(ret);
+ }
-int uprobe_register(struct inode *inode, loff_t offset,
- struct uprobe_consumer *uc)
-{
- return __uprobe_register(inode, offset, 0, uc);
+ return uprobe;
}
EXPORT_SYMBOL_GPL(uprobe_register);
-int uprobe_register_refctr(struct inode *inode, loff_t offset,
- loff_t ref_ctr_offset, struct uprobe_consumer *uc)
-{
- return __uprobe_register(inode, offset, ref_ctr_offset, uc);
-}
-EXPORT_SYMBOL_GPL(uprobe_register_refctr);
-
-/*
- * uprobe_apply - unregister an already registered probe.
- * @inode: the file in which the probe has to be removed.
- * @offset: offset from the start of the file.
+/**
+ * uprobe_apply - add or remove the breakpoints according to @uc->filter
+ * @uprobe: uprobe which "owns" the breakpoint
* @uc: consumer which wants to add more or remove some breakpoints
* @add: add or remove the breakpoints
+ * Return: 0 on success or negative error code.
*/
-int uprobe_apply(struct inode *inode, loff_t offset,
- struct uprobe_consumer *uc, bool add)
+int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add)
{
- struct uprobe *uprobe;
struct uprobe_consumer *con;
- int ret = -ENOENT;
-
- uprobe = find_uprobe(inode, offset);
- if (WARN_ON(!uprobe))
- return ret;
+ int ret = -ENOENT, srcu_idx;
down_write(&uprobe->register_rwsem);
- for (con = uprobe->consumers; con && con != uc ; con = con->next)
- ;
- if (con)
- ret = register_for_each_vma(uprobe, add ? uc : NULL);
+
+ srcu_idx = srcu_read_lock(&uprobes_srcu);
+ list_for_each_entry_srcu(con, &uprobe->consumers, cons_node,
+ srcu_read_lock_held(&uprobes_srcu)) {
+ if (con == uc) {
+ ret = register_for_each_vma(uprobe, add ? uc : NULL);
+ break;
+ }
+ }
+ srcu_read_unlock(&uprobes_srcu, srcu_idx);
+
up_write(&uprobe->register_rwsem);
- put_uprobe(uprobe);
return ret;
}
@@ -1305,15 +1335,17 @@ static void build_probe_list(struct inode *inode,
u = rb_entry(t, struct uprobe, rb_node);
if (u->inode != inode || u->offset < min)
break;
- list_add(&u->pending_list, head);
- get_uprobe(u);
+ /* if uprobe went away, it's safe to ignore it */
+ if (try_get_uprobe(u))
+ list_add(&u->pending_list, head);
}
for (t = n; (t = rb_next(t)); ) {
u = rb_entry(t, struct uprobe, rb_node);
if (u->inode != inode || u->offset > max)
break;
- list_add(&u->pending_list, head);
- get_uprobe(u);
+ /* if uprobe went away, it's safe to ignore it */
+ if (try_get_uprobe(u))
+ list_add(&u->pending_list, head);
}
}
read_unlock(&uprobes_treelock);
@@ -1384,7 +1416,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
*/
list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
if (!fatal_signal_pending(current) &&
- filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
+ filter_chain(uprobe, vma->vm_mm)) {
unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
}
@@ -1433,6 +1465,21 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
}
+static vm_fault_t xol_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct xol_area *area = vma->vm_mm->uprobes_state.xol_area;
+
+ vmf->page = area->page;
+ get_page(vmf->page);
+ return 0;
+}
+
+static const struct vm_special_mapping xol_mapping = {
+ .name = "[uprobes]",
+ .fault = xol_fault,
+};
+
/* Slot allocation for XOL */
static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
{
@@ -1459,7 +1506,7 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
- &area->xol_mapping);
+ &xol_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto fail;
@@ -1498,12 +1545,9 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
if (!area->bitmap)
goto free_area;
- area->xol_mapping.name = "[uprobes]";
- area->xol_mapping.pages = area->pages;
- area->pages[0] = alloc_page(GFP_HIGHUSER);
- if (!area->pages[0])
+ area->page = alloc_page(GFP_HIGHUSER);
+ if (!area->page)
goto free_bitmap;
- area->pages[1] = NULL;
area->vaddr = vaddr;
init_waitqueue_head(&area->wq);
@@ -1511,12 +1555,12 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
set_bit(0, area->bitmap);
atomic_set(&area->slot_count, 1);
insns = arch_uprobe_trampoline(&insns_size);
- arch_uprobe_copy_ixol(area->pages[0], 0, insns, insns_size);
+ arch_uprobe_copy_ixol(area->page, 0, insns, insns_size);
if (!xol_add_vma(mm, area))
return area;
- __free_page(area->pages[0]);
+ __free_page(area->page);
free_bitmap:
kfree(area->bitmap);
free_area:
@@ -1558,7 +1602,7 @@ void uprobe_clear_state(struct mm_struct *mm)
if (!area)
return;
- put_page(area->pages[0]);
+ put_page(area->page);
kfree(area->bitmap);
kfree(area);
}
@@ -1625,7 +1669,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
if (unlikely(!xol_vaddr))
return 0;
- arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
+ arch_uprobe_copy_ixol(area->page, xol_vaddr,
&uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
return xol_vaddr;
@@ -1770,6 +1814,12 @@ static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
return -ENOMEM;
*n = *o;
+ /*
+ * uprobe's refcnt has to be positive at this point, kept by
+ * utask->return_instances items; return_instances can't be
+ * removed right now, as task is blocked due to duping; so
+ * get_uprobe() is safe to use here.
+ */
get_uprobe(n->uprobe);
n->next = NULL;
@@ -1781,12 +1831,6 @@ static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
return 0;
}
-static void uprobe_warn(struct task_struct *t, const char *msg)
-{
- pr_warn("uprobe: %s:%d failed to %s\n",
- current->comm, current->pid, msg);
-}
-
static void dup_xol_work(struct callback_head *work)
{
if (current->flags & PF_EXITING)
@@ -1883,9 +1927,13 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
return;
}
+ /* we need to bump refcount to store uprobe in utask */
+ if (!try_get_uprobe(uprobe))
+ return;
+
ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
if (!ri)
- return;
+ goto fail;
trampoline_vaddr = uprobe_get_trampoline_vaddr();
orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
@@ -1912,8 +1960,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
}
orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
}
-
- ri->uprobe = get_uprobe(uprobe);
+ ri->uprobe = uprobe;
ri->func = instruction_pointer(regs);
ri->stack = user_stack_pointer(regs);
ri->orig_ret_vaddr = orig_ret_vaddr;
@@ -1924,8 +1971,9 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
utask->return_instances = ri;
return;
- fail:
+fail:
kfree(ri);
+ put_uprobe(uprobe);
}
/* Prepare to single-step probed instruction out of line. */
@@ -1940,9 +1988,14 @@ pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
if (!utask)
return -ENOMEM;
+ if (!try_get_uprobe(uprobe))
+ return -EINVAL;
+
xol_vaddr = xol_get_insn_slot(uprobe);
- if (!xol_vaddr)
- return -ENOMEM;
+ if (!xol_vaddr) {
+ err = -ENOMEM;
+ goto err_out;
+ }
utask->xol_vaddr = xol_vaddr;
utask->vaddr = bp_vaddr;
@@ -1950,12 +2003,15 @@ pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
err = arch_uprobe_pre_xol(&uprobe->arch, regs);
if (unlikely(err)) {
xol_free_insn_slot(current);
- return err;
+ goto err_out;
}
utask->active_uprobe = uprobe;
utask->state = UTASK_SSTEP;
return 0;
+err_out:
+ put_uprobe(uprobe);
+ return err;
}
/*
@@ -2028,13 +2084,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
if (likely(result == 0))
goto out;
- /*
- * The NULL 'tsk' here ensures that any faults that occur here
- * will not be accounted to the task. 'mm' *is* current->mm,
- * but we treat this as a 'remote' access since it is
- * essentially a kernel access to the memory.
- */
- result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page, NULL);
+ result = get_user_pages(vaddr, 1, FOLL_FORCE, &page);
if (result < 0)
return result;
@@ -2045,7 +2095,8 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
return is_trap_insn(&opcode);
}
-static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
+/* assumes being inside RCU protected region */
+static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp)
{
struct mm_struct *mm = current->mm;
struct uprobe *uprobe = NULL;
@@ -2058,7 +2109,7 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
struct inode *inode = file_inode(vma->vm_file);
loff_t offset = vaddr_to_offset(vma, bp_vaddr);
- uprobe = find_uprobe(inode, offset);
+ uprobe = find_uprobe_rcu(inode, offset);
}
if (!uprobe)
@@ -2079,9 +2130,12 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
struct uprobe_consumer *uc;
int remove = UPROBE_HANDLER_REMOVE;
bool need_prep = false; /* prepare return uprobe, when needed */
+ bool has_consumers = false;
+
+ current->utask->auprobe = &uprobe->arch;
- down_read(&uprobe->register_rwsem);
- for (uc = uprobe->consumers; uc; uc = uc->next) {
+ list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
+ srcu_read_lock_held(&uprobes_srcu)) {
int rc = 0;
if (uc->handler) {
@@ -2094,16 +2148,24 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
need_prep = true;
remove &= rc;
+ has_consumers = true;
}
+ current->utask->auprobe = NULL;
if (need_prep && !remove)
prepare_uretprobe(uprobe, regs); /* put bp at return */
- if (remove && uprobe->consumers) {
- WARN_ON(!uprobe_is_active(uprobe));
- unapply_uprobe(uprobe, current->mm);
+ if (remove && has_consumers) {
+ down_read(&uprobe->register_rwsem);
+
+ /* re-check that removal is still required, this time under lock */
+ if (!filter_chain(uprobe, current->mm)) {
+ WARN_ON(!uprobe_is_active(uprobe));
+ unapply_uprobe(uprobe, current->mm);
+ }
+
+ up_read(&uprobe->register_rwsem);
}
- up_read(&uprobe->register_rwsem);
}
static void
@@ -2111,13 +2173,15 @@ handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
{
struct uprobe *uprobe = ri->uprobe;
struct uprobe_consumer *uc;
+ int srcu_idx;
- down_read(&uprobe->register_rwsem);
- for (uc = uprobe->consumers; uc; uc = uc->next) {
+ srcu_idx = srcu_read_lock(&uprobes_srcu);
+ list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
+ srcu_read_lock_held(&uprobes_srcu)) {
if (uc->ret_handler)
uc->ret_handler(uc, ri->func, regs);
}
- up_read(&uprobe->register_rwsem);
+ srcu_read_unlock(&uprobes_srcu, srcu_idx);
}
static struct return_instance *find_next_ret_chain(struct return_instance *ri)
@@ -2202,13 +2266,15 @@ static void handle_swbp(struct pt_regs *regs)
{
struct uprobe *uprobe;
unsigned long bp_vaddr;
- int is_swbp;
+ int is_swbp, srcu_idx;
bp_vaddr = uprobe_get_swbp_addr(regs);
if (bp_vaddr == uprobe_get_trampoline_vaddr())
return uprobe_handle_trampoline(regs);
- uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
+ srcu_idx = srcu_read_lock(&uprobes_srcu);
+
+ uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp);
if (!uprobe) {
if (is_swbp > 0) {
/* No matching uprobe; signal SIGTRAP. */
@@ -2224,7 +2290,7 @@ static void handle_swbp(struct pt_regs *regs)
*/
instruction_pointer_set(regs, bp_vaddr);
}
- return;
+ goto out;
}
/* change it in advance for ->handler() and restart */
@@ -2259,12 +2325,12 @@ static void handle_swbp(struct pt_regs *regs)
if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
goto out;
- if (!pre_ssout(uprobe, regs, bp_vaddr))
- return;
+ if (pre_ssout(uprobe, regs, bp_vaddr))
+ goto out;
- /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
out:
- put_uprobe(uprobe);
+ /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
+ srcu_read_unlock(&uprobes_srcu, srcu_idx);
}
/*
diff --git a/kernel/exit.c b/kernel/exit.c
index 7430852a8571..619f0014c33b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -428,7 +428,7 @@ static void coredump_task_exit(struct task_struct *tsk)
complete(&core_state->startup);
for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE);
+ set_current_state(TASK_IDLE|TASK_FREEZABLE);
if (!self.task) /* see coredump_finish() */
break;
schedule();
@@ -778,6 +778,62 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
}
#ifdef CONFIG_DEBUG_STACK_USAGE
+unsigned long stack_not_used(struct task_struct *p)
+{
+ unsigned long *n = end_of_stack(p);
+
+ do { /* Skip over canary */
+# ifdef CONFIG_STACK_GROWSUP
+ n--;
+# else
+ n++;
+# endif
+ } while (!*n);
+
+# ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long)end_of_stack(p) - (unsigned long)n;
+# else
+ return (unsigned long)n - (unsigned long)end_of_stack(p);
+# endif
+}
+
+/* Count the maximum pages reached in kernel stacks */
+static inline void kstack_histogram(unsigned long used_stack)
+{
+#ifdef CONFIG_VM_EVENT_COUNTERS
+ if (used_stack <= 1024)
+ count_vm_event(KSTACK_1K);
+#if THREAD_SIZE > 1024
+ else if (used_stack <= 2048)
+ count_vm_event(KSTACK_2K);
+#endif
+#if THREAD_SIZE > 2048
+ else if (used_stack <= 4096)
+ count_vm_event(KSTACK_4K);
+#endif
+#if THREAD_SIZE > 4096
+ else if (used_stack <= 8192)
+ count_vm_event(KSTACK_8K);
+#endif
+#if THREAD_SIZE > 8192
+ else if (used_stack <= 16384)
+ count_vm_event(KSTACK_16K);
+#endif
+#if THREAD_SIZE > 16384
+ else if (used_stack <= 32768)
+ count_vm_event(KSTACK_32K);
+#endif
+#if THREAD_SIZE > 32768
+ else if (used_stack <= 65536)
+ count_vm_event(KSTACK_64K);
+#endif
+#if THREAD_SIZE > 65536
+ else
+ count_vm_event(KSTACK_REST);
+#endif
+#endif /* CONFIG_VM_EVENT_COUNTERS */
+}
+
static void check_stack_usage(void)
{
static DEFINE_SPINLOCK(low_water_lock);
@@ -785,6 +841,7 @@ static void check_stack_usage(void)
unsigned long free;
free = stack_not_used(current);
+ kstack_histogram(THREAD_SIZE - free);
if (free >= lowest_to_date)
return;
diff --git a/kernel/fork.c b/kernel/fork.c
index cc760491f201..60c0b4868fd4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -23,6 +23,7 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/cputime.h>
+#include <linux/sched/ext.h>
#include <linux/seq_file.h>
#include <linux/rtmutex.h>
#include <linux/init.h>
@@ -832,7 +833,7 @@ static void check_mm(struct mm_struct *mm)
pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
mm_pgtables_bytes(mm));
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
#endif
}
@@ -969,6 +970,7 @@ void __put_task_struct(struct task_struct *tsk)
WARN_ON(refcount_read(&tsk->usage));
WARN_ON(tsk == current);
+ sched_ext_free(tsk);
io_uring_free(tsk);
cgroup_free(tsk);
task_numa_free(tsk, true);
@@ -997,7 +999,7 @@ void __init __weak arch_task_cache_init(void) { }
static void __init set_max_threads(unsigned int max_threads_suggested)
{
u64 threads;
- unsigned long nr_pages = PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size());
+ unsigned long nr_pages = memblock_estimated_nr_free_pages();
/*
* The number of threads shall be limited such that the thread
@@ -1276,7 +1278,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
RCU_INIT_POINTER(mm->exe_file, NULL);
mmu_notifier_subscriptions_init(mm);
init_tlb_flush_pending(mm);
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
mm->pmd_huge_pte = NULL;
#endif
mm_init_uprobes_state(mm);
@@ -1861,7 +1863,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
prev_cputime_init(&sig->prev_cputime);
#ifdef CONFIG_POSIX_TIMERS
- INIT_LIST_HEAD(&sig->posix_timers);
+ INIT_HLIST_HEAD(&sig->posix_timers);
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sig->real_timer.function = it_real_fn;
#endif
@@ -2311,7 +2313,6 @@ __latent_entropy struct task_struct *copy_process(
#endif
#ifdef CONFIG_CPUSETS
p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
- p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -2347,7 +2348,7 @@ __latent_entropy struct task_struct *copy_process(
retval = perf_event_init_task(p, clone_flags);
if (retval)
- goto bad_fork_cleanup_policy;
+ goto bad_fork_sched_cancel_fork;
retval = audit_alloc(p);
if (retval)
goto bad_fork_cleanup_perf;
@@ -2480,7 +2481,9 @@ __latent_entropy struct task_struct *copy_process(
* cgroup specific, it unconditionally needs to place the task on a
* runqueue.
*/
- sched_cgroup_fork(p, args);
+ retval = sched_cgroup_fork(p, args);
+ if (retval)
+ goto bad_fork_cancel_cgroup;
/*
* From this point on we must avoid any synchronous user-space
@@ -2526,13 +2529,13 @@ __latent_entropy struct task_struct *copy_process(
/* Don't start children in a dying pid namespace */
if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
retval = -ENOMEM;
- goto bad_fork_cancel_cgroup;
+ goto bad_fork_core_free;
}
/* Let kill terminate clone/fork in the middle */
if (fatal_signal_pending(current)) {
retval = -EINTR;
- goto bad_fork_cancel_cgroup;
+ goto bad_fork_core_free;
}
/* No more failure paths after this point. */
@@ -2606,10 +2609,11 @@ __latent_entropy struct task_struct *copy_process(
return p;
-bad_fork_cancel_cgroup:
+bad_fork_core_free:
sched_core_free(p);
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
+bad_fork_cancel_cgroup:
cgroup_cancel_fork(p, args);
bad_fork_put_pidfd:
if (clone_flags & CLONE_PIDFD) {
@@ -2648,6 +2652,8 @@ bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_perf:
perf_event_free_task(p);
+bad_fork_sched_cancel_fork:
+ sched_cancel_fork(p);
bad_fork_cleanup_policy:
lockdep_free_task(p);
#ifdef CONFIG_NUMA
diff --git a/kernel/freezer.c b/kernel/freezer.c
index f57aaf96b829..44bbd7dbd2c8 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -72,7 +72,7 @@ bool __refrigerator(bool check_kthr_stop)
bool freeze;
raw_spin_lock_irq(&current->pi_lock);
- set_current_state(TASK_FROZEN);
+ WRITE_ONCE(current->__state, TASK_FROZEN);
/* unstale saved_state so that __thaw_task() will wake us up */
current->saved_state = TASK_RUNNING;
raw_spin_unlock_irq(&current->pi_lock);
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index 06a1f091be81..136768ae2637 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -34,6 +34,7 @@
#include <linux/compat.h>
#include <linux/jhash.h>
#include <linux/pagemap.h>
+#include <linux/debugfs.h>
#include <linux/plist.h>
#include <linux/memblock.h>
#include <linux/fault-inject.h>
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index dc94e0bf2c94..271e9139de77 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -198,7 +198,7 @@ __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
irqd_clr_managed_shutdown(d);
- if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
+ if (!cpumask_intersects(aff, cpu_online_mask)) {
/*
* Catch code which fiddles with enable_irq() on a managed
* and potentially shutdown IRQ. Chained interrupt
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index eb8628390156..15a7654eff68 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -37,7 +37,7 @@ static inline bool irq_needs_fixup(struct irq_data *d)
* has been removed from the online mask already.
*/
if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
- cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
+ !cpumask_intersects(m, cpu_online_mask)) {
/*
* If this happens then there was a missed IRQ fixup at some
* point. Warn about it and enforce fixup.
@@ -110,7 +110,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (maskchip && chip->irq_mask)
chip->irq_mask(d);
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ if (!cpumask_intersects(affinity, cpu_online_mask)) {
/*
* If the interrupt is managed, then shut it down and leave
* the affinity untouched.
diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c
index 3d4036db15ac..1a3d483548e2 100644
--- a/kernel/irq/irq_sim.c
+++ b/kernel/irq/irq_sim.c
@@ -13,7 +13,6 @@
struct irq_sim_work_ctx {
struct irq_work work;
- int irq_base;
unsigned int irq_count;
unsigned long *pending;
struct irq_domain *domain;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index cea8f6874b1f..e0bff21f30e0 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -128,72 +128,98 @@ void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
-static int irq_domain_set_name(struct irq_domain *domain,
- const struct fwnode_handle *fwnode,
- enum irq_domain_bus_token bus_token)
+static int alloc_name(struct irq_domain *domain, char *base, enum irq_domain_bus_token bus_token)
+{
+ if (bus_token == DOMAIN_BUS_ANY)
+ domain->name = kasprintf(GFP_KERNEL, "%s", base);
+ else
+ domain->name = kasprintf(GFP_KERNEL, "%s-%d", base, bus_token);
+ if (!domain->name)
+ return -ENOMEM;
+
+ domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ return 0;
+}
+
+static int alloc_fwnode_name(struct irq_domain *domain, const struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token, const char *suffix)
+{
+ const char *sep = suffix ? "-" : "";
+ const char *suf = suffix ? : "";
+ char *name;
+
+ if (bus_token == DOMAIN_BUS_ANY)
+ name = kasprintf(GFP_KERNEL, "%pfw%s%s", fwnode, sep, suf);
+ else
+ name = kasprintf(GFP_KERNEL, "%pfw%s%s-%d", fwnode, sep, suf, bus_token);
+ if (!name)
+ return -ENOMEM;
+
+ /*
+ * fwnode paths contain '/', which debugfs is legitimately unhappy
+ * about. Replace them with ':', which does the trick and is not as
+ * offensive as '\'...
+ */
+ domain->name = strreplace(name, '/', ':');
+ domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ return 0;
+}
+
+static int alloc_unknown_name(struct irq_domain *domain, enum irq_domain_bus_token bus_token)
{
static atomic_t unknown_domains;
- struct irqchip_fwid *fwid;
+ int id = atomic_inc_return(&unknown_domains);
+
+ if (bus_token == DOMAIN_BUS_ANY)
+ domain->name = kasprintf(GFP_KERNEL, "unknown-%d", id);
+ else
+ domain->name = kasprintf(GFP_KERNEL, "unknown-%d-%d", id, bus_token);
+ if (!domain->name)
+ return -ENOMEM;
+
+ domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ return 0;
+}
+
+static int irq_domain_set_name(struct irq_domain *domain, const struct irq_domain_info *info)
+{
+ enum irq_domain_bus_token bus_token = info->bus_token;
+ const struct fwnode_handle *fwnode = info->fwnode;
if (is_fwnode_irqchip(fwnode)) {
- fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
+ struct irqchip_fwid *fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
+
+ /*
+ * The name_suffix is only intended to be used to avoid a name
+ * collision when multiple domains are created for a single
+ * device and the name is picked using a real device node.
+ * (Typical use-case is regmap-IRQ controllers for devices
+ * providing more than one physical IRQ.) There should be no
+ * need to use name_suffix with irqchip-fwnode.
+ */
+ if (info->name_suffix)
+ return -EINVAL;
switch (fwid->type) {
case IRQCHIP_FWNODE_NAMED:
case IRQCHIP_FWNODE_NAMED_ID:
- domain->name = bus_token ?
- kasprintf(GFP_KERNEL, "%s-%d",
- fwid->name, bus_token) :
- kstrdup(fwid->name, GFP_KERNEL);
- if (!domain->name)
- return -ENOMEM;
- domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
- break;
+ return alloc_name(domain, fwid->name, bus_token);
default:
domain->name = fwid->name;
- if (bus_token) {
- domain->name = kasprintf(GFP_KERNEL, "%s-%d",
- fwid->name, bus_token);
- if (!domain->name)
- return -ENOMEM;
- domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
- }
- break;
+ if (bus_token != DOMAIN_BUS_ANY)
+ return alloc_name(domain, fwid->name, bus_token);
}
- } else if (is_of_node(fwnode) || is_acpi_device_node(fwnode) ||
- is_software_node(fwnode)) {
- char *name;
-
- /*
- * fwnode paths contain '/', which debugfs is legitimately
- * unhappy about. Replace them with ':', which does
- * the trick and is not as offensive as '\'...
- */
- name = bus_token ?
- kasprintf(GFP_KERNEL, "%pfw-%d", fwnode, bus_token) :
- kasprintf(GFP_KERNEL, "%pfw", fwnode);
- if (!name)
- return -ENOMEM;
- domain->name = strreplace(name, '/', ':');
- domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ } else if (is_of_node(fwnode) || is_acpi_device_node(fwnode) || is_software_node(fwnode)) {
+ return alloc_fwnode_name(domain, fwnode, bus_token, info->name_suffix);
}
- if (!domain->name) {
- if (fwnode)
- pr_err("Invalid fwnode type for irqdomain\n");
- domain->name = bus_token ?
- kasprintf(GFP_KERNEL, "unknown-%d-%d",
- atomic_inc_return(&unknown_domains),
- bus_token) :
- kasprintf(GFP_KERNEL, "unknown-%d",
- atomic_inc_return(&unknown_domains));
- if (!domain->name)
- return -ENOMEM;
- domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
- }
+ if (domain->name)
+ return 0;
- return 0;
+ if (fwnode)
+ pr_err("Invalid fwnode type for irqdomain\n");
+ return alloc_unknown_name(domain, bus_token);
}
static struct irq_domain *__irq_domain_create(const struct irq_domain_info *info)
@@ -211,7 +237,7 @@ static struct irq_domain *__irq_domain_create(const struct irq_domain_info *info
if (!domain)
return ERR_PTR(-ENOMEM);
- err = irq_domain_set_name(domain, info->fwnode, info->bus_token);
+ err = irq_domain_set_name(domain, info);
if (err) {
kfree(domain);
return ERR_PTR(err);
@@ -267,13 +293,20 @@ static void irq_domain_free(struct irq_domain *domain)
kfree(domain);
}
-/**
- * irq_domain_instantiate() - Instantiate a new irq domain data structure
- * @info: Domain information pointer pointing to the information for this domain
- *
- * Return: A pointer to the instantiated irq domain or an ERR_PTR value.
- */
-struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info)
+static void irq_domain_instantiate_descs(const struct irq_domain_info *info)
+{
+ if (!IS_ENABLED(CONFIG_SPARSE_IRQ))
+ return;
+
+ if (irq_alloc_descs(info->virq_base, info->virq_base, info->size,
+ of_node_to_nid(to_of_node(info->fwnode))) < 0) {
+ pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
+ info->virq_base);
+ }
+}
+
+static struct irq_domain *__irq_domain_instantiate(const struct irq_domain_info *info,
+ bool cond_alloc_descs, bool force_associate)
{
struct irq_domain *domain;
int err;
@@ -306,6 +339,19 @@ struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info)
__irq_domain_publish(domain);
+ if (cond_alloc_descs && info->virq_base > 0)
+ irq_domain_instantiate_descs(info);
+
+ /*
+ * Legacy interrupt domains have a fixed Linux interrupt number
+ * associated. Other interrupt domains can request association by
+ * providing a Linux interrupt number > 0.
+ */
+ if (force_associate || info->virq_base > 0) {
+ irq_domain_associate_many(domain, info->virq_base, info->hwirq_base,
+ info->size - info->hwirq_base);
+ }
+
return domain;
err_domain_gc_remove:
@@ -315,6 +361,17 @@ err_domain_free:
irq_domain_free(domain);
return ERR_PTR(err);
}
+
+/**
+ * irq_domain_instantiate() - Instantiate a new irq domain data structure
+ * @info: Domain information pointer pointing to the information for this domain
+ *
+ * Return: A pointer to the instantiated irq domain or an ERR_PTR value.
+ */
+struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info)
+{
+ return __irq_domain_instantiate(info, false, false);
+}
EXPORT_SYMBOL_GPL(irq_domain_instantiate);
/**
@@ -413,28 +470,13 @@ struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode,
.fwnode = fwnode,
.size = size,
.hwirq_max = size,
+ .virq_base = first_irq,
.ops = ops,
.host_data = host_data,
};
- struct irq_domain *domain;
-
- domain = irq_domain_instantiate(&info);
- if (IS_ERR(domain))
- return NULL;
+ struct irq_domain *domain = __irq_domain_instantiate(&info, true, false);
- if (first_irq > 0) {
- if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
- /* attempt to allocated irq_descs */
- int rc = irq_alloc_descs(first_irq, first_irq, size,
- of_node_to_nid(to_of_node(fwnode)));
- if (rc < 0)
- pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
- first_irq);
- }
- irq_domain_associate_many(domain, first_irq, 0, size);
- }
-
- return domain;
+ return IS_ERR(domain) ? NULL : domain;
}
EXPORT_SYMBOL_GPL(irq_domain_create_simple);
@@ -476,18 +518,14 @@ struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
.fwnode = fwnode,
.size = first_hwirq + size,
.hwirq_max = first_hwirq + size,
+ .hwirq_base = first_hwirq,
+ .virq_base = first_irq,
.ops = ops,
.host_data = host_data,
};
- struct irq_domain *domain;
+ struct irq_domain *domain = __irq_domain_instantiate(&info, false, true);
- domain = irq_domain_instantiate(&info);
- if (IS_ERR(domain))
- return NULL;
-
- irq_domain_associate_many(domain, first_irq, first_hwirq, size);
-
- return domain;
+ return IS_ERR(domain) ? NULL : domain;
}
EXPORT_SYMBOL_GPL(irq_domain_create_legacy);
@@ -1365,7 +1403,7 @@ static int irq_domain_trim_hierarchy(unsigned int virq)
tail = NULL;
/* The first entry must have a valid irqchip */
- if (!irq_data->chip || IS_ERR(irq_data->chip))
+ if (IS_ERR_OR_NULL(irq_data->chip))
return -EINVAL;
/*
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index dd53298ef1a5..f0803d6bd296 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -218,21 +218,20 @@ static void irq_validate_effective_affinity(struct irq_data *data)
static inline void irq_validate_effective_affinity(struct irq_data *data) { }
#endif
+static DEFINE_PER_CPU(struct cpumask, __tmp_mask);
+
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
+ struct cpumask *tmp_mask = this_cpu_ptr(&__tmp_mask);
struct irq_desc *desc = irq_data_to_desc(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
const struct cpumask *prog_mask;
int ret;
- static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
- static struct cpumask tmp_mask;
-
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
- raw_spin_lock(&tmp_mask_lock);
/*
* If this is a managed interrupt and housekeeping is enabled on
* it check whether the requested affinity mask intersects with
@@ -258,11 +257,11 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
- cpumask_and(&tmp_mask, mask, hk_mask);
- if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
+ cpumask_and(tmp_mask, mask, hk_mask);
+ if (!cpumask_intersects(tmp_mask, cpu_online_mask))
prog_mask = mask;
else
- prog_mask = &tmp_mask;
+ prog_mask = tmp_mask;
} else {
prog_mask = mask;
}
@@ -272,16 +271,14 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
* unless we are being asked to force the affinity (in which
* case we do as we are told).
*/
- cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
- if (!force && !cpumask_empty(&tmp_mask))
- ret = chip->irq_set_affinity(data, &tmp_mask, force);
+ cpumask_and(tmp_mask, prog_mask, cpu_online_mask);
+ if (!force && !cpumask_empty(tmp_mask))
+ ret = chip->irq_set_affinity(data, tmp_mask, force);
else if (force)
ret = chip->irq_set_affinity(data, mask, force);
else
ret = -EINVAL;
- raw_spin_unlock(&tmp_mask_lock);
-
switch (ret) {
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE:
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 61ca924ef4b4..eb150afd671f 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -26,7 +26,7 @@ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
* The outgoing CPU might be the last online target in a pending
* interrupt move. If that's the case clear the pending move bit.
*/
- if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
+ if (!cpumask_intersects(desc->pending_mask, cpu_online_mask)) {
irqd_clr_move_pending(data);
return false;
}
@@ -74,7 +74,7 @@ void irq_move_masked_irq(struct irq_data *idata)
* For correct operation this depends on the caller
* masking the irqs.
*/
- if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
+ if (cpumask_intersects(desc->pending_mask, cpu_online_mask)) {
int ret;
ret = irq_do_set_affinity(data, desc->pending_mask, false);
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 5fa0547ece0c..3a24d6b5f559 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -82,7 +82,7 @@ static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
desc->dev = dev;
desc->nvec_used = nvec;
if (affinity) {
- desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL);
+ desc->affinity = kmemdup_array(affinity, nvec, sizeof(*desc->affinity), GFP_KERNEL);
if (!desc->affinity) {
kfree(desc);
return NULL;
@@ -832,7 +832,7 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info)
struct irq_chip *chip = info->chip;
BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
- if (!chip->irq_set_affinity)
+ if (!chip->irq_set_affinity && !(info->flags & MSI_FLAG_NO_AFFINITY))
chip->irq_set_affinity = msi_domain_set_affinity;
}
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 8cccdf40725a..9081ada81c3d 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -52,10 +52,8 @@ static int show_irq_affinity(int type, struct seq_file *m)
case AFFINITY:
case AFFINITY_LIST:
mask = desc->irq_common_data.affinity;
-#ifdef CONFIG_GENERIC_PENDING_IRQ
- if (irqd_is_setaffinity_pending(&desc->irq_data))
- mask = desc->pending_mask;
-#endif
+ if (irq_move_pending(&desc->irq_data))
+ mask = irq_desc_get_pending_mask(desc);
break;
case EFFECTIVE:
case EFFECTIVE_LIST:
@@ -142,7 +140,7 @@ static ssize_t write_irq_affinity(int type, struct file *file,
int err;
if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
- return -EIO;
+ return -EPERM;
if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
return -ENOMEM;
@@ -362,8 +360,13 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
goto out_unlock;
#ifdef CONFIG_SMP
+ umode_t umode = S_IRUGO;
+
+ if (irq_can_set_affinity_usr(desc->irq_data.irq))
+ umode |= S_IWUSR;
+
/* create /proc/irq/<irq>/smp_affinity */
- proc_create_data("smp_affinity", 0644, desc->dir,
+ proc_create_data("smp_affinity", umode, desc->dir,
&irq_affinity_proc_ops, irqp);
/* create /proc/irq/<irq>/affinity_hint */
@@ -371,7 +374,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
irq_affinity_hint_proc_show, irqp);
/* create /proc/irq/<irq>/smp_affinity_list */
- proc_create_data("smp_affinity_list", 0644, desc->dir,
+ proc_create_data("smp_affinity_list", umode, desc->dir,
&irq_affinity_list_proc_ops, irqp);
proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 274b6b7c718d..28a6be6e64fd 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/hashtable.h>
#include <linux/init.h>
+#include <linux/jiffies.h>
#include <linux/kmsan-checks.h>
#include <linux/mm.h>
#include <linux/preempt.h>
@@ -1067,6 +1068,32 @@ u64 kcov_common_handle(void)
}
EXPORT_SYMBOL(kcov_common_handle);
+#ifdef CONFIG_KCOV_SELFTEST
+static void __init selftest(void)
+{
+ unsigned long start;
+
+ pr_err("running self test\n");
+ /*
+ * Test that interrupts don't produce spurious coverage.
+ * The coverage callback filters out interrupt code, but only
+ * after the handler updates preempt count. Some code periodically
+ * leaks out of that section and leads to spurious coverage.
+ * It's hard to call the actual interrupt handler directly,
+ * so we just loop here for a bit waiting for a timer interrupt.
+ * We set kcov_mode to enable tracing, but don't setup the area,
+ * so any attempt to trace will crash. Note: we must not call any
+ * potentially traced functions in this region.
+ */
+ start = jiffies;
+ current->kcov_mode = KCOV_MODE_TRACE_PC;
+ while ((jiffies - start) * MSEC_PER_SEC / HZ < 300)
+ ;
+ current->kcov_mode = 0;
+ pr_err("done running self test\n");
+}
+#endif
+
static int __init kcov_init(void)
{
int cpu;
@@ -1086,6 +1113,10 @@ static int __init kcov_init(void)
*/
debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
+#ifdef CONFIG_KCOV_SELFTEST
+ selftest();
+#endif
+
return 0;
}
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 1d1d1b0e4248..53b21ae30e00 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -225,7 +225,7 @@ debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *o
{
char kbuf[KSYM_NAME_LEN];
char *arg;
- int read_len = count < (sizeof(kbuf) - 1) ? count : (sizeof(kbuf) - 1);
+ const size_t read_len = min(count, sizeof(kbuf) - 1);
if (copy_from_user(kbuf, buf, read_len))
return -EFAULT;
diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h
index 2595defe8c0d..d35d9792402d 100644
--- a/kernel/kexec_internal.h
+++ b/kernel/kexec_internal.h
@@ -23,7 +23,8 @@ int kimage_is_destination_range(struct kimage *image,
extern atomic_t __kexec_lock;
static inline bool kexec_trylock(void)
{
- return atomic_cmpxchg_acquire(&__kexec_lock, 0, 1) == 0;
+ int old = 0;
+ return atomic_try_cmpxchg_acquire(&__kexec_lock, &old, 1);
}
static inline void kexec_unlock(void)
{
diff --git a/kernel/kthread.c b/kernel/kthread.c
index f7be976ff88a..db4ceb0f503c 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -845,8 +845,16 @@ repeat:
* event only cares about the address.
*/
trace_sched_kthread_work_execute_end(work, func);
- } else if (!freezing(current))
+ } else if (!freezing(current)) {
schedule();
+ } else {
+ /*
+ * Handle the case where the current remains
+ * TASK_INTERRUPTIBLE. try_to_freeze() expects
+ * the current to be TASK_RUNNING.
+ */
+ __set_current_state(TASK_RUNNING);
+ }
try_to_freeze();
cond_resched();
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 0349f957e672..7963deac33c3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -56,6 +56,7 @@
#include <linux/kprobes.h>
#include <linux/lockdep.h>
#include <linux/context_tracking.h>
+#include <linux/console.h>
#include <asm/sections.h>
@@ -573,8 +574,10 @@ static struct lock_trace *save_trace(void)
if (!debug_locks_off_graph_unlock())
return NULL;
+ nbcon_cpu_emergency_enter();
print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
dump_stack();
+ nbcon_cpu_emergency_exit();
return NULL;
}
@@ -887,11 +890,13 @@ look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
instrumentation_begin();
debug_locks_off();
+ nbcon_cpu_emergency_enter();
printk(KERN_ERR
"BUG: looking up invalid subclass: %u\n", subclass);
printk(KERN_ERR
"turning off the locking correctness validator.\n");
dump_stack();
+ nbcon_cpu_emergency_exit();
instrumentation_end();
return NULL;
}
@@ -968,11 +973,13 @@ static bool assign_lock_key(struct lockdep_map *lock)
else {
/* Debug-check: all keys must be persistent! */
debug_locks_off();
+ nbcon_cpu_emergency_enter();
pr_err("INFO: trying to register non-static key.\n");
pr_err("The code is fine but needs lockdep annotation, or maybe\n");
pr_err("you didn't initialize this object before use?\n");
pr_err("turning off the locking correctness validator.\n");
dump_stack();
+ nbcon_cpu_emergency_exit();
return false;
}
@@ -1316,8 +1323,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
return NULL;
}
+ nbcon_cpu_emergency_enter();
print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
dump_stack();
+ nbcon_cpu_emergency_exit();
return NULL;
}
nr_lock_classes++;
@@ -1349,11 +1358,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
if (verbose(class)) {
graph_unlock();
+ nbcon_cpu_emergency_enter();
printk("\nnew class %px: %s", class->key, class->name);
if (class->name_version > 1)
printk(KERN_CONT "#%d", class->name_version);
printk(KERN_CONT "\n");
dump_stack();
+ nbcon_cpu_emergency_exit();
if (!graph_lock()) {
return NULL;
@@ -1392,8 +1403,10 @@ static struct lock_list *alloc_list_entry(void)
if (!debug_locks_off_graph_unlock())
return NULL;
+ nbcon_cpu_emergency_enter();
print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
dump_stack();
+ nbcon_cpu_emergency_exit();
return NULL;
}
nr_list_entries++;
@@ -2039,6 +2052,8 @@ static noinline void print_circular_bug(struct lock_list *this,
depth = get_lock_depth(target);
+ nbcon_cpu_emergency_enter();
+
print_circular_bug_header(target, depth, check_src, check_tgt);
parent = get_lock_parent(target);
@@ -2057,6 +2072,8 @@ static noinline void print_circular_bug(struct lock_list *this,
printk("\nstack backtrace:\n");
dump_stack();
+
+ nbcon_cpu_emergency_exit();
}
static noinline void print_bfs_bug(int ret)
@@ -2569,6 +2586,8 @@ print_bad_irq_dependency(struct task_struct *curr,
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return;
+ nbcon_cpu_emergency_enter();
+
pr_warn("\n");
pr_warn("=====================================================\n");
pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
@@ -2618,11 +2637,13 @@ print_bad_irq_dependency(struct task_struct *curr,
pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
next_root->trace = save_trace();
if (!next_root->trace)
- return;
+ goto out;
print_shortest_lock_dependencies(forwards_entry, next_root);
pr_warn("\nstack backtrace:\n");
dump_stack();
+out:
+ nbcon_cpu_emergency_exit();
}
static const char *state_names[] = {
@@ -2987,6 +3008,8 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return;
+ nbcon_cpu_emergency_enter();
+
pr_warn("\n");
pr_warn("============================================\n");
pr_warn("WARNING: possible recursive locking detected\n");
@@ -3009,6 +3032,8 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
pr_warn("\nstack backtrace:\n");
dump_stack();
+
+ nbcon_cpu_emergency_exit();
}
/*
@@ -3606,6 +3631,8 @@ static void print_collision(struct task_struct *curr,
struct held_lock *hlock_next,
struct lock_chain *chain)
{
+ nbcon_cpu_emergency_enter();
+
pr_warn("\n");
pr_warn("============================\n");
pr_warn("WARNING: chain_key collision\n");
@@ -3622,6 +3649,8 @@ static void print_collision(struct task_struct *curr,
pr_warn("\nstack backtrace:\n");
dump_stack();
+
+ nbcon_cpu_emergency_exit();
}
#endif
@@ -3712,8 +3741,10 @@ static inline int add_chain_cache(struct task_struct *curr,
if (!debug_locks_off_graph_unlock())
return 0;
+ nbcon_cpu_emergency_enter();
print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
dump_stack();
+ nbcon_cpu_emergency_exit();
return 0;
}
chain->chain_key = chain_key;
@@ -3730,8 +3761,10 @@ static inline int add_chain_cache(struct task_struct *curr,
if (!debug_locks_off_graph_unlock())
return 0;
+ nbcon_cpu_emergency_enter();
print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
dump_stack();
+ nbcon_cpu_emergency_exit();
return 0;
}
@@ -3970,6 +4003,8 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
if (!debug_locks_off() || debug_locks_silent)
return;
+ nbcon_cpu_emergency_enter();
+
pr_warn("\n");
pr_warn("================================\n");
pr_warn("WARNING: inconsistent lock state\n");
@@ -3998,6 +4033,8 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
pr_warn("\nstack backtrace:\n");
dump_stack();
+
+ nbcon_cpu_emergency_exit();
}
/*
@@ -4032,6 +4069,8 @@ print_irq_inversion_bug(struct task_struct *curr,
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return;
+ nbcon_cpu_emergency_enter();
+
pr_warn("\n");
pr_warn("========================================================\n");
pr_warn("WARNING: possible irq lock inversion dependency detected\n");
@@ -4072,11 +4111,13 @@ print_irq_inversion_bug(struct task_struct *curr,
pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
root->trace = save_trace();
if (!root->trace)
- return;
+ goto out;
print_shortest_lock_dependencies(other, root);
pr_warn("\nstack backtrace:\n");
dump_stack();
+out:
+ nbcon_cpu_emergency_exit();
}
/*
@@ -4153,6 +4194,8 @@ void print_irqtrace_events(struct task_struct *curr)
{
const struct irqtrace_events *trace = &curr->irqtrace;
+ nbcon_cpu_emergency_enter();
+
printk("irq event stamp: %u\n", trace->irq_events);
printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
@@ -4166,6 +4209,8 @@ void print_irqtrace_events(struct task_struct *curr)
printk("softirqs last disabled at (%u): [<%px>] %pS\n",
trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
(void *)trace->softirq_disable_ip);
+
+ nbcon_cpu_emergency_exit();
}
static int HARDIRQ_verbose(struct lock_class *class)
@@ -4686,10 +4731,12 @@ unlock:
* We must printk outside of the graph_lock:
*/
if (ret == 2) {
+ nbcon_cpu_emergency_enter();
printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
print_lock(this);
print_irqtrace_events(curr);
dump_stack();
+ nbcon_cpu_emergency_exit();
}
return ret;
@@ -4730,6 +4777,8 @@ print_lock_invalid_wait_context(struct task_struct *curr,
if (debug_locks_silent)
return 0;
+ nbcon_cpu_emergency_enter();
+
pr_warn("\n");
pr_warn("=============================\n");
pr_warn("[ BUG: Invalid wait context ]\n");
@@ -4749,6 +4798,8 @@ print_lock_invalid_wait_context(struct task_struct *curr,
pr_warn("stack backtrace:\n");
dump_stack();
+ nbcon_cpu_emergency_exit();
+
return 0;
}
@@ -4956,6 +5007,8 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
if (debug_locks_silent)
return;
+ nbcon_cpu_emergency_enter();
+
pr_warn("\n");
pr_warn("==================================\n");
pr_warn("WARNING: Nested lock was not taken\n");
@@ -4976,6 +5029,8 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
pr_warn("\nstack backtrace:\n");
dump_stack();
+
+ nbcon_cpu_emergency_exit();
}
static int __lock_is_held(const struct lockdep_map *lock, int read);
@@ -5024,11 +5079,13 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
debug_class_ops_inc(class);
if (very_verbose(class)) {
+ nbcon_cpu_emergency_enter();
printk("\nacquire class [%px] %s", class->key, class->name);
if (class->name_version > 1)
printk(KERN_CONT "#%d", class->name_version);
printk(KERN_CONT "\n");
dump_stack();
+ nbcon_cpu_emergency_exit();
}
/*
@@ -5155,6 +5212,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
#endif
if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
debug_locks_off();
+ nbcon_cpu_emergency_enter();
print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
printk(KERN_DEBUG "depth: %i max: %lu!\n",
curr->lockdep_depth, MAX_LOCK_DEPTH);
@@ -5162,6 +5220,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
lockdep_print_held_locks(current);
debug_show_all_locks();
dump_stack();
+ nbcon_cpu_emergency_exit();
return 0;
}
@@ -5181,6 +5240,8 @@ static void print_unlock_imbalance_bug(struct task_struct *curr,
if (debug_locks_silent)
return;
+ nbcon_cpu_emergency_enter();
+
pr_warn("\n");
pr_warn("=====================================\n");
pr_warn("WARNING: bad unlock balance detected!\n");
@@ -5197,6 +5258,8 @@ static void print_unlock_imbalance_bug(struct task_struct *curr,
pr_warn("\nstack backtrace:\n");
dump_stack();
+
+ nbcon_cpu_emergency_exit();
}
static noinstr int match_held_lock(const struct held_lock *hlock,
@@ -5901,6 +5964,8 @@ static void print_lock_contention_bug(struct task_struct *curr,
if (debug_locks_silent)
return;
+ nbcon_cpu_emergency_enter();
+
pr_warn("\n");
pr_warn("=================================\n");
pr_warn("WARNING: bad contention detected!\n");
@@ -5917,6 +5982,8 @@ static void print_lock_contention_bug(struct task_struct *curr,
pr_warn("\nstack backtrace:\n");
dump_stack();
+
+ nbcon_cpu_emergency_exit();
}
static void
@@ -6536,6 +6603,8 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
if (debug_locks_silent)
return;
+ nbcon_cpu_emergency_enter();
+
pr_warn("\n");
pr_warn("=========================\n");
pr_warn("WARNING: held lock freed!\n");
@@ -6548,6 +6617,8 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
pr_warn("\nstack backtrace:\n");
dump_stack();
+
+ nbcon_cpu_emergency_exit();
}
static inline int not_in_range(const void* mem_from, unsigned long mem_len,
@@ -6594,6 +6665,8 @@ static void print_held_locks_bug(void)
if (debug_locks_silent)
return;
+ nbcon_cpu_emergency_enter();
+
pr_warn("\n");
pr_warn("====================================\n");
pr_warn("WARNING: %s/%d still has locks held!\n",
@@ -6603,6 +6676,8 @@ static void print_held_locks_bug(void)
lockdep_print_held_locks(current);
pr_warn("\nstack backtrace:\n");
dump_stack();
+
+ nbcon_cpu_emergency_exit();
}
void debug_check_no_locks_held(void)
@@ -6660,6 +6735,7 @@ asmlinkage __visible void lockdep_sys_exit(void)
if (unlikely(curr->lockdep_depth)) {
if (!debug_locks_off())
return;
+ nbcon_cpu_emergency_enter();
pr_warn("\n");
pr_warn("================================================\n");
pr_warn("WARNING: lock held when returning to user space!\n");
@@ -6668,6 +6744,7 @@ asmlinkage __visible void lockdep_sys_exit(void)
pr_warn("%s/%d is leaving the kernel with locks still held!\n",
curr->comm, curr->pid);
lockdep_print_held_locks(curr);
+ nbcon_cpu_emergency_exit();
}
/*
@@ -6684,6 +6761,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
bool rcu = warn_rcu_enter();
/* Note: the following can be executed concurrently, so be careful. */
+ nbcon_cpu_emergency_enter();
pr_warn("\n");
pr_warn("=============================\n");
pr_warn("WARNING: suspicious RCU usage\n");
@@ -6722,6 +6800,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
lockdep_print_held_locks(curr);
pr_warn("\nstack backtrace:\n");
dump_stack();
+ nbcon_cpu_emergency_exit();
warn_rcu_exit(rcu);
}
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index fba1229f1de6..ebebd0eec7f6 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -347,7 +347,7 @@ static __always_inline int __waiter_prio(struct task_struct *task)
{
int prio = task->prio;
- if (!rt_prio(prio))
+ if (!rt_or_dl_prio(prio))
return DEFAULT_PRIO;
return prio;
@@ -435,7 +435,7 @@ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
* Note that RT tasks are excluded from same priority (lateral)
* steals to prevent the introduction of an unbounded latency.
*/
- if (rt_prio(waiter->tree.prio) || dl_prio(waiter->tree.prio))
+ if (rt_or_dl_prio(waiter->tree.prio))
return false;
return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree);
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 33cac79e3994..5ded7dff46ef 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -631,7 +631,7 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
* if it is an RT task or wait in the wait queue
* for too long.
*/
- if (has_handoff || (!rt_task(waiter->task) &&
+ if (has_handoff || (!rt_or_dl_task(waiter->task) &&
!time_after(jiffies, waiter->timeout)))
return false;
@@ -914,7 +914,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
if (owner_state != OWNER_WRITER) {
if (need_resched())
break;
- if (rt_task(current) &&
+ if (rt_or_dl_task(current) &&
(prev_owner_state != OWNER_WRITER))
break;
}
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 78719e1ef1b1..10a5736a21c2 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -697,3 +697,4 @@ module_exit(test_ww_mutex_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("API test facility for ww_mutexes");
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
index 3ad2cc4823e5..76d204b7d29c 100644
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -237,7 +237,7 @@ __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
int a_prio = a->task->prio;
int b_prio = b->task->prio;
- if (rt_prio(a_prio) || rt_prio(b_prio)) {
+ if (rt_or_dl_prio(a_prio) || rt_or_dl_prio(b_prio)) {
if (a_prio > b_prio)
return true;
diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig
index 4047b6d48255..05a9a06a140c 100644
--- a/kernel/module/Kconfig
+++ b/kernel/module/Kconfig
@@ -160,6 +160,7 @@ config MODULE_UNLOAD_TAINT_TRACKING
config MODVERSIONS
bool "Module versioning support"
+ depends on !COMPILE_TEST
help
Usually, you have to use modules compiled with your kernel.
Saying Y here makes it sometimes possible to use modules
diff --git a/kernel/module/Makefile b/kernel/module/Makefile
index a10b2b9a6fdf..50ffcc413b54 100644
--- a/kernel/module/Makefile
+++ b/kernel/module/Makefile
@@ -5,7 +5,7 @@
# These are called from save_stack_trace() on slub debug path,
# and produce insane amounts of uninteresting coverage.
-KCOV_INSTRUMENT_module.o := n
+KCOV_INSTRUMENT_main.o := n
obj-y += main.o
obj-y += strict_rwx.o
diff --git a/kernel/module/main.c b/kernel/module/main.c
index 71396e297499..49b9bca9de12 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -3234,7 +3234,7 @@ SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
return -EINVAL;
f = fdget(fd);
- err = idempotent_init_module(f.file, uargs, flags);
+ err = idempotent_init_module(fd_file(f), uargs, flags);
fdput(f);
return err;
}
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 6ec3deec68c2..dc952c3b05af 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -550,15 +550,15 @@ SYSCALL_DEFINE2(setns, int, fd, int, flags)
struct nsset nsset = {};
int err = 0;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (proc_ns_file(f.file)) {
- ns = get_proc_ns(file_inode(f.file));
+ if (proc_ns_file(fd_file(f))) {
+ ns = get_proc_ns(file_inode(fd_file(f)));
if (flags && (ns->ops->type != flags))
err = -EINVAL;
flags = ns->ops->type;
- } else if (!IS_ERR(pidfd_pid(f.file))) {
+ } else if (!IS_ERR(pidfd_pid(fd_file(f)))) {
err = check_setns_flags(flags);
} else {
err = -EINVAL;
@@ -570,10 +570,10 @@ SYSCALL_DEFINE2(setns, int, fd, int, flags)
if (err)
goto out;
- if (proc_ns_file(f.file))
+ if (proc_ns_file(fd_file(f)))
err = validate_ns(&nsset, ns);
else
- err = validate_nsset(&nsset, pidfd_pid(f.file));
+ err = validate_nsset(&nsset, pidfd_pid(fd_file(f)));
if (!err) {
commit_nsset(&nsset);
perf_event_namespaces(current);
diff --git a/kernel/numa.c b/kernel/numa.c
deleted file mode 100644
index 67ca6b8585c0..000000000000
--- a/kernel/numa.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <linux/printk.h>
-#include <linux/numa.h>
-
-/* Stub functions: */
-
-#ifndef memory_add_physaddr_to_nid
-int memory_add_physaddr_to_nid(u64 start)
-{
- pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
- start);
- return 0;
-}
-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
-#endif
-
-#ifndef phys_to_target_node
-int phys_to_target_node(u64 start)
-{
- pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
- start);
- return 0;
-}
-EXPORT_SYMBOL_GPL(phys_to_target_node);
-#endif
diff --git a/kernel/padata.c b/kernel/padata.c
index 0fa6c2895460..d899f34558af 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -404,7 +404,8 @@ void padata_do_serial(struct padata_priv *padata)
/* Sort in ascending order of sequence number. */
list_for_each_prev(pos, &reorder->list) {
cur = list_entry(pos, struct padata_priv, list);
- if (cur->seq_nr < padata->seq_nr)
+ /* Compare by difference to consider integer wrap around */
+ if ((signed int)(cur->seq_nr - padata->seq_nr) < 0)
break;
}
list_add(&padata->list, pos);
@@ -512,9 +513,12 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
* thread function. Load balance large jobs between threads by
* increasing the number of chunks, guarantee at least the minimum
* chunk size from the caller, and honor the caller's alignment.
+ * Ensure chunk_size is at least 1 to prevent divide-by-0
+ * panic in padata_mt_helper().
*/
ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
ps.chunk_size = max(ps.chunk_size, job->min_chunk);
+ ps.chunk_size = max(ps.chunk_size, 1ul);
ps.chunk_size = roundup(ps.chunk_size, job->align);
/*
diff --git a/kernel/panic.c b/kernel/panic.c
index 2a0449144f82..fbc59b3b64d0 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -374,6 +374,8 @@ void panic(const char *fmt, ...)
panic_other_cpus_shutdown(_crash_kexec_post_notifiers);
+ printk_legacy_allow_panic_sync();
+
/*
* Run any panic handlers, including those that might need to
* add information to the kmsg dump output.
@@ -382,7 +384,7 @@ void panic(const char *fmt, ...)
panic_print_sys_info(false);
- kmsg_dump(KMSG_DUMP_PANIC);
+ kmsg_dump_desc(KMSG_DUMP_PANIC, buf);
/*
* If you doubt kdump always works fine in any situation,
@@ -463,6 +465,7 @@ void panic(const char *fmt, ...)
* Explicitly flush the kernel log buffer one last time.
*/
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
+ nbcon_atomic_flush_unsafe();
local_irq_enable();
for (i = 0; ; i += PANIC_TIMER_STEP) {
@@ -682,6 +685,7 @@ bool oops_may_print(void)
*/
void oops_enter(void)
{
+ nbcon_cpu_emergency_enter();
tracing_off();
/* can't trust the integrity of the kernel anymore: */
debug_locks_off();
@@ -704,6 +708,7 @@ void oops_exit(void)
{
do_oops_enter_exit();
print_oops_end_marker();
+ nbcon_cpu_emergency_exit();
kmsg_dump(KMSG_DUMP_OOPS);
}
@@ -715,6 +720,8 @@ struct warn_args {
void __warn(const char *file, int line, void *caller, unsigned taint,
struct pt_regs *regs, struct warn_args *args)
{
+ nbcon_cpu_emergency_enter();
+
disable_trace_on_warning();
if (file)
@@ -750,6 +757,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
/* Just a warning, don't kill lockdep. */
add_taint(taint, LOCKDEP_STILL_OK);
+
+ nbcon_cpu_emergency_exit();
}
#ifdef CONFIG_BUG
diff --git a/kernel/pid.c b/kernel/pid.c
index da76ed1873f7..2715afb77eab 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -540,13 +540,13 @@ struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
struct pid *pid;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-EBADF);
- pid = pidfd_pid(f.file);
+ pid = pidfd_pid(fd_file(f));
if (!IS_ERR(pid)) {
get_pid(pid);
- *flags = f.file->f_flags;
+ *flags = fd_file(f)->f_flags;
}
fdput(f);
@@ -755,10 +755,10 @@ SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
return -EINVAL;
f = fdget(pidfd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- pid = pidfd_pid(f.file);
+ pid = pidfd_pid(fd_file(f));
if (IS_ERR(pid))
ret = PTR_ERR(pid);
else
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 0a213f69a9e4..e35829d36039 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -1123,11 +1123,11 @@ static const char * const hibernation_modes[] = {
static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
+ ssize_t count = 0;
int i;
- char *start = buf;
if (!hibernation_available())
- return sprintf(buf, "[disabled]\n");
+ return sysfs_emit(buf, "[disabled]\n");
for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
if (!hibernation_modes[i])
@@ -1147,12 +1147,16 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
continue;
}
if (i == hibernation_mode)
- buf += sprintf(buf, "[%s] ", hibernation_modes[i]);
+ count += sysfs_emit_at(buf, count, "[%s] ", hibernation_modes[i]);
else
- buf += sprintf(buf, "%s ", hibernation_modes[i]);
+ count += sysfs_emit_at(buf, count, "%s ", hibernation_modes[i]);
}
- buf += sprintf(buf, "\n");
- return buf-start;
+
+ /* Convert the last space to a newline if needed. */
+ if (count > 0)
+ buf[count - 1] = '\n';
+
+ return count;
}
static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
@@ -1210,8 +1214,8 @@ power_attr(disk);
static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d:%d\n", MAJOR(swsusp_resume_device),
- MINOR(swsusp_resume_device));
+ return sysfs_emit(buf, "%d:%d\n", MAJOR(swsusp_resume_device),
+ MINOR(swsusp_resume_device));
}
static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
@@ -1270,7 +1274,7 @@ power_attr(resume);
static ssize_t resume_offset_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%llu\n", (unsigned long long)swsusp_resume_block);
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)swsusp_resume_block);
}
static ssize_t resume_offset_store(struct kobject *kobj,
@@ -1293,7 +1297,7 @@ power_attr(resume_offset);
static ssize_t image_size_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
- return sprintf(buf, "%lu\n", image_size);
+ return sysfs_emit(buf, "%lu\n", image_size);
}
static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *attr,
@@ -1314,7 +1318,7 @@ power_attr(image_size);
static ssize_t reserved_size_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%lu\n", reserved_size);
+ return sysfs_emit(buf, "%lu\n", reserved_size);
}
static ssize_t reserved_size_store(struct kobject *kobj,
diff --git a/kernel/power/main.c b/kernel/power/main.c
index a9e0693aaf69..6254814d4817 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -115,7 +115,7 @@ int pm_async_enabled = 1;
static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", pm_async_enabled);
+ return sysfs_emit(buf, "%d\n", pm_async_enabled);
}
static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
@@ -139,7 +139,7 @@ power_attr(pm_async);
static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
- char *s = buf;
+ ssize_t count = 0;
suspend_state_t i;
for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) {
@@ -149,17 +149,17 @@ static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr,
const char *label = mem_sleep_states[i];
if (mem_sleep_current == i)
- s += sprintf(s, "[%s] ", label);
+ count += sysfs_emit_at(buf, count, "[%s] ", label);
else
- s += sprintf(s, "%s ", label);
+ count += sysfs_emit_at(buf, count, "%s ", label);
}
}
/* Convert the last space to a newline if needed. */
- if (s != buf)
- *(s-1) = '\n';
+ if (count > 0)
+ buf[count - 1] = '\n';
- return (s - buf);
+ return count;
}
static suspend_state_t decode_suspend_state(const char *buf, size_t n)
@@ -220,7 +220,7 @@ bool sync_on_suspend_enabled = !IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC);
static ssize_t sync_on_suspend_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", sync_on_suspend_enabled);
+ return sysfs_emit(buf, "%d\n", sync_on_suspend_enabled);
}
static ssize_t sync_on_suspend_store(struct kobject *kobj,
@@ -257,22 +257,22 @@ static const char * const pm_tests[__TEST_AFTER_LAST] = {
static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
- char *s = buf;
+ ssize_t count = 0;
int level;
for (level = TEST_FIRST; level <= TEST_MAX; level++)
if (pm_tests[level]) {
if (level == pm_test_level)
- s += sprintf(s, "[%s] ", pm_tests[level]);
+ count += sysfs_emit_at(buf, count, "[%s] ", pm_tests[level]);
else
- s += sprintf(s, "%s ", pm_tests[level]);
+ count += sysfs_emit_at(buf, count, "%s ", pm_tests[level]);
}
- if (s != buf)
- /* convert the last space to a newline */
- *(s-1) = '\n';
+ /* Convert the last space to a newline if needed. */
+ if (count > 0)
+ buf[count - 1] = '\n';
- return (s - buf);
+ return count;
}
static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
@@ -390,7 +390,7 @@ static const char * const suspend_step_names[] = {
static ssize_t _name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf) \
{ \
- return sprintf(buf, format_str, suspend_stats._name); \
+ return sysfs_emit(buf, format_str, suspend_stats._name);\
} \
static struct kobj_attribute _name = __ATTR_RO(_name)
@@ -404,7 +404,7 @@ suspend_attr(max_hw_sleep, "%llu\n");
static ssize_t _name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf) \
{ \
- return sprintf(buf, "%u\n", \
+ return sysfs_emit(buf, "%u\n", \
suspend_stats.step_failures[step-1]); \
} \
static struct kobj_attribute _name = __ATTR_RO(_name)
@@ -428,7 +428,7 @@ static ssize_t last_failed_dev_show(struct kobject *kobj,
index %= REC_FAILED_NUM;
last_failed_dev = suspend_stats.failed_devs[index];
- return sprintf(buf, "%s\n", last_failed_dev);
+ return sysfs_emit(buf, "%s\n", last_failed_dev);
}
static struct kobj_attribute last_failed_dev = __ATTR_RO(last_failed_dev);
@@ -442,7 +442,7 @@ static ssize_t last_failed_errno_show(struct kobject *kobj,
index %= REC_FAILED_NUM;
last_failed_errno = suspend_stats.errno[index];
- return sprintf(buf, "%d\n", last_failed_errno);
+ return sysfs_emit(buf, "%d\n", last_failed_errno);
}
static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno);
@@ -456,7 +456,7 @@ static ssize_t last_failed_step_show(struct kobject *kobj,
index %= REC_FAILED_NUM;
step = suspend_stats.failed_steps[index];
- return sprintf(buf, "%s\n", suspend_step_names[step]);
+ return sysfs_emit(buf, "%s\n", suspend_step_names[step]);
}
static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step);
@@ -571,7 +571,7 @@ bool pm_print_times_enabled;
static ssize_t pm_print_times_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", pm_print_times_enabled);
+ return sysfs_emit(buf, "%d\n", pm_print_times_enabled);
}
static ssize_t pm_print_times_store(struct kobject *kobj,
@@ -604,7 +604,7 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
if (!pm_wakeup_irq())
return -ENODATA;
- return sprintf(buf, "%u\n", pm_wakeup_irq());
+ return sysfs_emit(buf, "%u\n", pm_wakeup_irq());
}
power_attr_ro(pm_wakeup_irq);
@@ -620,7 +620,7 @@ EXPORT_SYMBOL_GPL(pm_debug_messages_should_print);
static ssize_t pm_debug_messages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", pm_debug_messages_on);
+ return sysfs_emit(buf, "%d\n", pm_debug_messages_on);
}
static ssize_t pm_debug_messages_store(struct kobject *kobj,
@@ -668,21 +668,23 @@ struct kobject *power_kobj;
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
- char *s = buf;
+ ssize_t count = 0;
#ifdef CONFIG_SUSPEND
suspend_state_t i;
for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
if (pm_states[i])
- s += sprintf(s,"%s ", pm_states[i]);
+ count += sysfs_emit_at(buf, count, "%s ", pm_states[i]);
#endif
if (hibernation_available())
- s += sprintf(s, "disk ");
- if (s != buf)
- /* convert the last space to a newline */
- *(s-1) = '\n';
- return (s - buf);
+ count += sysfs_emit_at(buf, count, "disk ");
+
+ /* Convert the last space to a newline if needed. */
+ if (count > 0)
+ buf[count - 1] = '\n';
+
+ return count;
}
static suspend_state_t decode_state(const char *buf, size_t n)
@@ -782,7 +784,7 @@ static ssize_t wakeup_count_show(struct kobject *kobj,
unsigned int val;
return pm_get_wakeup_count(&val, true) ?
- sprintf(buf, "%u\n", val) : -EINTR;
+ sysfs_emit(buf, "%u\n", val) : -EINTR;
}
static ssize_t wakeup_count_store(struct kobject *kobj,
@@ -824,17 +826,17 @@ static ssize_t autosleep_show(struct kobject *kobj,
suspend_state_t state = pm_autosleep_state();
if (state == PM_SUSPEND_ON)
- return sprintf(buf, "off\n");
+ return sysfs_emit(buf, "off\n");
#ifdef CONFIG_SUSPEND
if (state < PM_SUSPEND_MAX)
- return sprintf(buf, "%s\n", pm_states[state] ?
+ return sysfs_emit(buf, "%s\n", pm_states[state] ?
pm_states[state] : "error");
#endif
#ifdef CONFIG_HIBERNATION
- return sprintf(buf, "disk\n");
+ return sysfs_emit(buf, "disk\n");
#else
- return sprintf(buf, "error");
+ return sysfs_emit(buf, "error\n");
#endif
}
@@ -903,7 +905,7 @@ int pm_trace_enabled;
static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", pm_trace_enabled);
+ return sysfs_emit(buf, "%d\n", pm_trace_enabled);
}
static ssize_t
@@ -940,7 +942,7 @@ power_attr_ro(pm_trace_dev_match);
static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%u\n", freeze_timeout_msecs);
+ return sysfs_emit(buf, "%u\n", freeze_timeout_msecs);
}
static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 405eddbda4fc..30894d8f0a78 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1365,11 +1365,6 @@ static unsigned int count_highmem_pages(void)
}
return n;
}
-#else
-static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
-{
- return NULL;
-}
#endif /* CONFIG_HIGHMEM */
/**
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index 19dcc5832651..3fcb48502adb 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -2,11 +2,12 @@
/*
* internal.h - printk internal definitions
*/
-#include <linux/percpu.h>
#include <linux/console.h>
-#include "printk_ringbuffer.h"
+#include <linux/percpu.h>
+#include <linux/types.h>
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
+struct ctl_table;
void __init printk_sysctl_init(void);
int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
@@ -20,6 +21,19 @@ int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
(con->flags & CON_BOOT) ? "boot" : "", \
con->name, con->index, ##__VA_ARGS__)
+/*
+ * Identify if legacy printing is forced in a dedicated kthread. If
+ * true, all printing via console lock occurs within a dedicated
+ * legacy printer thread. The only exception is on panic, after the
+ * nbcon consoles have had their chance to print the panic messages
+ * first.
+ */
+#ifdef CONFIG_PREEMPT_RT
+# define force_legacy_kthread() (true)
+#else
+# define force_legacy_kthread() (false)
+#endif
+
#ifdef CONFIG_PRINTK
#ifdef CONFIG_PRINTK_CALLER
@@ -43,7 +57,11 @@ enum printk_info_flags {
LOG_CONT = 8, /* text is a fragment of a continuation line */
};
+struct printk_ringbuffer;
+struct dev_printk_info;
+
extern struct printk_ringbuffer *prb;
+extern bool printk_kthreads_running;
__printf(4, 0)
int vprintk_store(int facility, int level,
@@ -53,6 +71,9 @@ int vprintk_store(int facility, int level,
__printf(1, 0) int vprintk_default(const char *fmt, va_list args);
__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
+void __printk_safe_enter(void);
+void __printk_safe_exit(void);
+
bool printk_percpu_data_ready(void);
#define printk_safe_enter_irqsave(flags) \
@@ -68,15 +89,85 @@ bool printk_percpu_data_ready(void);
} while (0)
void defer_console_output(void);
+bool is_printk_legacy_deferred(void);
u16 printk_parse_prefix(const char *text, int *level,
enum printk_info_flags *flags);
+void console_lock_spinning_enable(void);
+int console_lock_spinning_disable_and_check(int cookie);
u64 nbcon_seq_read(struct console *con);
void nbcon_seq_force(struct console *con, u64 seq);
bool nbcon_alloc(struct console *con);
-void nbcon_init(struct console *con);
void nbcon_free(struct console *con);
+enum nbcon_prio nbcon_get_default_prio(void);
+void nbcon_atomic_flush_pending(void);
+bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
+ int cookie, bool use_atomic);
+bool nbcon_kthread_create(struct console *con);
+void nbcon_kthread_stop(struct console *con);
+void nbcon_kthreads_wake(void);
+
+/*
+ * Check if the given console is currently capable and allowed to print
+ * records. Note that this function does not consider the current context,
+ * which can also play a role in deciding if @con can be used to print
+ * records.
+ */
+static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
+{
+ if (!(flags & CON_ENABLED))
+ return false;
+
+ if ((flags & CON_SUSPENDED))
+ return false;
+
+ if (flags & CON_NBCON) {
+ /* The write_atomic() callback is optional. */
+ if (use_atomic && !con->write_atomic)
+ return false;
+
+ /*
+ * For the !use_atomic case, @printk_kthreads_running is not
+ * checked because the write_thread() callback is also used
+ * via the legacy loop when the printer threads are not
+ * available.
+ */
+ } else {
+ if (!con->write)
+ return false;
+ }
+
+ /*
+ * Console drivers may assume that per-cpu resources have been
+ * allocated. So unless they're explicitly marked as being able to
+ * cope (CON_ANYTIME) don't call them until this CPU is officially up.
+ */
+ if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
+ return false;
+
+ return true;
+}
+
+/**
+ * nbcon_kthread_wake - Wake up a console printing thread
+ * @con: Console to operate on
+ */
+static inline void nbcon_kthread_wake(struct console *con)
+{
+ /*
+ * Guarantee any new records can be seen by tasks preparing to wait
+ * before this context checks if the rcuwait is empty.
+ *
+ * The full memory barrier in rcuwait_wake_up() pairs with the full
+ * memory barrier within set_current_state() of
+ * ___rcuwait_wait_event(), which is called after prepare_to_rcuwait()
+ * adds the waiter but before it has checked the wait condition.
+ *
+ * This pairs with nbcon_kthread_func:A.
+ */
+ rcuwait_wake_up(&con->rcuwait); /* LMM(nbcon_kthread_wake:A) */
+}
#else
@@ -84,6 +175,8 @@ void nbcon_free(struct console *con);
#define PRINTK_MESSAGE_MAX 0
#define PRINTKRB_RECORD_MAX 0
+#define printk_kthreads_running (false)
+
/*
* In !PRINTK builds we still export console_sem
* semaphore and some of console functions (console_unlock()/etc.), so
@@ -93,14 +186,119 @@ void nbcon_free(struct console *con);
#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
static inline bool printk_percpu_data_ready(void) { return false; }
+static inline void defer_console_output(void) { }
+static inline bool is_printk_legacy_deferred(void) { return false; }
static inline u64 nbcon_seq_read(struct console *con) { return 0; }
static inline void nbcon_seq_force(struct console *con, u64 seq) { }
static inline bool nbcon_alloc(struct console *con) { return false; }
-static inline void nbcon_init(struct console *con) { }
static inline void nbcon_free(struct console *con) { }
+static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
+static inline void nbcon_atomic_flush_pending(void) { }
+static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
+ int cookie, bool use_atomic) { return false; }
+static inline void nbcon_kthread_wake(struct console *con) { }
+static inline void nbcon_kthreads_wake(void) { }
+
+static inline bool console_is_usable(struct console *con, short flags,
+ bool use_atomic) { return false; }
#endif /* CONFIG_PRINTK */
+extern bool have_boot_console;
+extern bool have_nbcon_console;
+extern bool have_legacy_console;
+extern bool legacy_allow_panic_sync;
+
+/**
+ * struct console_flush_type - Define available console flush methods
+ * @nbcon_atomic: Flush directly using nbcon_atomic() callback
+ * @nbcon_offload: Offload flush to printer thread
+ * @legacy_direct: Call the legacy loop in this context
+ * @legacy_offload: Offload the legacy loop into IRQ or legacy thread
+ *
+ * Note that the legacy loop also flushes the nbcon consoles.
+ */
+struct console_flush_type {
+ bool nbcon_atomic;
+ bool nbcon_offload;
+ bool legacy_direct;
+ bool legacy_offload;
+};
+
+/*
+ * Identify which console flushing methods should be used in the context of
+ * the caller.
+ */
+static inline void printk_get_console_flush_type(struct console_flush_type *ft)
+{
+ memset(ft, 0, sizeof(*ft));
+
+ switch (nbcon_get_default_prio()) {
+ case NBCON_PRIO_NORMAL:
+ if (have_nbcon_console && !have_boot_console) {
+ if (printk_kthreads_running)
+ ft->nbcon_offload = true;
+ else
+ ft->nbcon_atomic = true;
+ }
+
+ /* Legacy consoles are flushed directly when possible. */
+ if (have_legacy_console || have_boot_console) {
+ if (!is_printk_legacy_deferred())
+ ft->legacy_direct = true;
+ else
+ ft->legacy_offload = true;
+ }
+ break;
+
+ case NBCON_PRIO_EMERGENCY:
+ if (have_nbcon_console && !have_boot_console)
+ ft->nbcon_atomic = true;
+
+ /* Legacy consoles are flushed directly when possible. */
+ if (have_legacy_console || have_boot_console) {
+ if (!is_printk_legacy_deferred())
+ ft->legacy_direct = true;
+ else
+ ft->legacy_offload = true;
+ }
+ break;
+
+ case NBCON_PRIO_PANIC:
+ /*
+ * In panic, the nbcon consoles will directly print. But
+ * only allowed if there are no boot consoles.
+ */
+ if (have_nbcon_console && !have_boot_console)
+ ft->nbcon_atomic = true;
+
+ if (have_legacy_console || have_boot_console) {
+ /*
+ * This is the same decision as NBCON_PRIO_NORMAL
+ * except that offloading never occurs in panic.
+ *
+ * Note that console_flush_on_panic() will flush
+ * legacy consoles anyway, even if unsafe.
+ */
+ if (!is_printk_legacy_deferred())
+ ft->legacy_direct = true;
+
+ /*
+ * In panic, if nbcon atomic printing occurs,
+ * the legacy consoles must remain silent until
+ * explicitly allowed.
+ */
+ if (ft->nbcon_atomic && !legacy_allow_panic_sync)
+ ft->legacy_direct = false;
+ }
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
extern struct printk_buffers printk_shared_pbufs;
/**
@@ -135,4 +333,5 @@ bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
#ifdef CONFIG_PRINTK
void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped);
+void console_prepend_replay(struct printk_message *pmsg);
#endif
diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c
index c8093bcc01fe..fd12efcc4aed 100644
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
@@ -2,11 +2,25 @@
// Copyright (C) 2022 Linutronix GmbH, John Ogness
// Copyright (C) 2022 Intel, Thomas Gleixner
-#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/bug.h>
#include <linux/console.h>
#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/kthread.h>
+#include <linux/minmax.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
#include "internal.h"
+#include "printk_ringbuffer.h"
/*
* Printk console printing implementation for consoles which does not depend
* on the legacy style console_lock mechanism.
@@ -172,9 +186,6 @@ void nbcon_seq_force(struct console *con, u64 seq)
u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __u64seq_to_ulseq(valid_seq));
-
- /* Clear con->seq since nbcon consoles use con->nbcon_seq instead. */
- con->seq = 0;
}
/**
@@ -231,6 +242,13 @@ static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
struct nbcon_state new;
do {
+ /*
+ * Panic does not imply that the console is owned. However, it
+ * is critical that non-panic CPUs during panic are unable to
+ * acquire ownership in order to satisfy the assumptions of
+ * nbcon_waiter_matches(). In particular, the assumption that
+ * lower priorities are ignored during panic.
+ */
if (other_cpu_in_panic())
return -EPERM;
@@ -262,18 +280,29 @@ static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
/*
* The request context is well defined by the @req_prio because:
*
- * - Only a context with a higher priority can take over the request.
+ * - Only a context with a priority higher than the owner can become
+ * a waiter.
+ * - Only a context with a priority higher than the waiter can
+ * directly take over the request.
* - There are only three priorities.
* - Only one CPU is allowed to request PANIC priority.
* - Lower priorities are ignored during panic() until reboot.
*
* As a result, the following scenario is *not* possible:
*
- * 1. Another context with a higher priority directly takes ownership.
- * 2. The higher priority context releases the ownership.
- * 3. A lower priority context takes the ownership.
- * 4. Another context with the same priority as this context
+ * 1. This context is currently a waiter.
+ * 2. Another context with a higher priority than this context
+ * directly takes ownership.
+ * 3. The higher priority context releases the ownership.
+ * 4. Another lower priority context takes the ownership.
+ * 5. Another context with the same priority as this context
* creates a request and starts waiting.
+ *
+ * Event #1 implies this context is EMERGENCY.
+ * Event #2 implies the new context is PANIC.
+ * Event #3 occurs when panic() has flushed the console.
+ * Events #4 and #5 are not possible due to the other_cpu_in_panic()
+ * check in nbcon_context_try_acquire_direct().
*/
return (cur->req_prio == expected_prio);
@@ -531,6 +560,7 @@ static struct printk_buffers panic_nbcon_pbufs;
* nbcon_context_try_acquire - Try to acquire nbcon console
* @ctxt: The context of the caller
*
+ * Context: Under @ctxt->con->device_lock() or local_irq_save().
* Return: True if the console was acquired. False otherwise.
*
* If the caller allowed an unsafe hostile takeover, on success the
@@ -538,7 +568,6 @@ static struct printk_buffers panic_nbcon_pbufs;
* in an unsafe state. Otherwise, on success the caller may assume
* the console is not in an unsafe state.
*/
-__maybe_unused
static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
{
unsigned int cpu = smp_processor_id();
@@ -581,11 +610,29 @@ static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu,
int expected_prio)
{
/*
- * Since consoles can only be acquired by higher priorities,
- * owning contexts are uniquely identified by @prio. However,
- * since contexts can unexpectedly lose ownership, it is
- * possible that later another owner appears with the same
- * priority. For this reason @cpu is also needed.
+ * A similar function, nbcon_waiter_matches(), only deals with
+ * EMERGENCY and PANIC priorities. However, this function must also
+ * deal with the NORMAL priority, which requires additional checks
+ * and constraints.
+ *
+ * For the case where preemption and interrupts are disabled, it is
+ * enough to also verify that the owning CPU has not changed.
+ *
+ * For the case where preemption or interrupts are enabled, an
+ * external synchronization method *must* be used. In particular,
+ * the driver-specific locking mechanism used in device_lock()
+ * (including disabling migration) should be used. It prevents
+ * scenarios such as:
+ *
+ * 1. [Task A] owns a context with NBCON_PRIO_NORMAL on [CPU X] and
+ * is scheduled out.
+ * 2. Another context takes over the lock with NBCON_PRIO_EMERGENCY
+ * and releases it.
+ * 3. [Task B] acquires a context with NBCON_PRIO_NORMAL on [CPU X]
+ * and is scheduled out.
+ * 4. [Task A] gets running on [CPU X] and sees that the console is
+ * still owned by a task on [CPU X] with NBON_PRIO_NORMAL. Thus
+ * [Task A] thinks it is the owner when it is not.
*/
if (cur->prio != expected_prio)
@@ -784,6 +831,19 @@ out:
return nbcon_context_can_proceed(ctxt, &cur);
}
+static void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
+ char *buf, unsigned int len)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+ struct console *con = ctxt->console;
+ struct nbcon_state cur;
+
+ wctxt->outbuf = buf;
+ wctxt->len = len;
+ nbcon_state_read(con, &cur);
+ wctxt->unsafe_takeover = cur.unsafe_takeover;
+}
+
/**
* nbcon_enter_unsafe - Enter an unsafe region in the driver
* @wctxt: The write context that was handed to the write function
@@ -799,8 +859,12 @@ out:
bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt)
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+ bool is_owner;
- return nbcon_context_enter_unsafe(ctxt);
+ is_owner = nbcon_context_enter_unsafe(ctxt);
+ if (!is_owner)
+ nbcon_write_context_set_buf(wctxt, NULL, 0);
+ return is_owner;
}
EXPORT_SYMBOL_GPL(nbcon_enter_unsafe);
@@ -819,14 +883,47 @@ EXPORT_SYMBOL_GPL(nbcon_enter_unsafe);
bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt)
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+ bool ret;
- return nbcon_context_exit_unsafe(ctxt);
+ ret = nbcon_context_exit_unsafe(ctxt);
+ if (!ret)
+ nbcon_write_context_set_buf(wctxt, NULL, 0);
+ return ret;
}
EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
/**
+ * nbcon_reacquire_nobuf - Reacquire a console after losing ownership
+ * while printing
+ * @wctxt: The write context that was handed to the write callback
+ *
+ * Since ownership can be lost at any time due to handover or takeover, a
+ * printing context _must_ be prepared to back out immediately and
+ * carefully. However, there are scenarios where the printing context must
+ * reacquire ownership in order to finalize or revert hardware changes.
+ *
+ * This function allows a printing context to reacquire ownership using the
+ * same priority as its previous ownership.
+ *
+ * Note that after a successful reacquire the printing context will have no
+ * output buffer because that has been lost. This function cannot be used to
+ * resume printing.
+ */
+void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+
+ while (!nbcon_context_try_acquire(ctxt))
+ cpu_relax();
+
+ nbcon_write_context_set_buf(wctxt, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(nbcon_reacquire_nobuf);
+
+/**
* nbcon_emit_next_record - Emit a record in the acquired context
* @wctxt: The write context that will be handed to the write function
+ * @use_atomic: True if the write_atomic() callback is to be used
*
* Return: True if this context still owns the console. False if
* ownership was handed over or taken.
@@ -840,8 +937,7 @@ EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
* When true is returned, @wctxt->ctxt.backlog indicates whether there are
* still records pending in the ringbuffer,
*/
-__maybe_unused
-static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt)
+static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool use_atomic)
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
struct console *con = ctxt->console;
@@ -852,7 +948,22 @@ static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt)
unsigned long con_dropped;
struct nbcon_state cur;
unsigned long dropped;
- bool done;
+ unsigned long ulseq;
+
+ /*
+ * This function should never be called for consoles that have not
+ * implemented the necessary callback for writing: i.e. legacy
+ * consoles and, when atomic, nbcon consoles with no write_atomic().
+ * Handle it as if ownership was lost and try to continue.
+ *
+ * Note that for nbcon consoles the write_thread() callback is
+ * mandatory and was already checked in nbcon_alloc().
+ */
+ if (WARN_ON_ONCE((use_atomic && !con->write_atomic) ||
+ !(console_srcu_read_flags(con) & CON_NBCON))) {
+ nbcon_context_release(ctxt);
+ return false;
+ }
/*
* The printk buffers are filled within an unsafe section. This
@@ -878,6 +989,29 @@ static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt)
if (dropped && !is_extended)
console_prepend_dropped(&pmsg, dropped);
+ /*
+ * If the previous owner was assigned the same record, this context
+ * has taken over ownership and is replaying the record. Prepend a
+ * message to let the user know the record is replayed.
+ */
+ ulseq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_prev_seq));
+ if (__ulseq_to_u64seq(prb, ulseq) == pmsg.seq) {
+ console_prepend_replay(&pmsg);
+ } else {
+ /*
+ * Ensure this context is still the owner before trying to
+ * update @nbcon_prev_seq. Otherwise the value in @ulseq may
+ * not be from the previous owner and instead be some later
+ * value from the context that took over ownership.
+ */
+ nbcon_state_read(con, &cur);
+ if (!nbcon_context_can_proceed(ctxt, &cur))
+ return false;
+
+ atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_prev_seq), &ulseq,
+ __u64seq_to_ulseq(pmsg.seq));
+ }
+
if (!nbcon_context_exit_unsafe(ctxt))
return false;
@@ -886,22 +1020,27 @@ static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt)
goto update_con;
/* Initialize the write context for driver callbacks. */
- wctxt->outbuf = &pmsg.pbufs->outbuf[0];
- wctxt->len = pmsg.outbuf_len;
- nbcon_state_read(con, &cur);
- wctxt->unsafe_takeover = cur.unsafe_takeover;
+ nbcon_write_context_set_buf(wctxt, &pmsg.pbufs->outbuf[0], pmsg.outbuf_len);
- if (con->write_atomic) {
- done = con->write_atomic(con, wctxt);
- } else {
+ if (use_atomic)
+ con->write_atomic(con, wctxt);
+ else
+ con->write_thread(con, wctxt);
+
+ if (!wctxt->outbuf) {
+ /*
+ * Ownership was lost and reacquired by the driver. Handle it
+ * as if ownership was lost.
+ */
nbcon_context_release(ctxt);
- WARN_ON_ONCE(1);
- done = false;
+ return false;
}
- /* If not done, the emit was aborted. */
- if (!done)
- return false;
+ /*
+ * Ownership may have been lost but _not_ reacquired by the driver.
+ * This case is detected and handled when entering unsafe to update
+ * dropped/seq values.
+ */
/*
* Since any dropped message was successfully output, reset the
@@ -928,54 +1067,650 @@ update_con:
return nbcon_context_exit_unsafe(ctxt);
}
+/*
+ * nbcon_emit_one - Print one record for an nbcon console using the
+ * specified callback
+ * @wctxt: An initialized write context struct to use for this context
+ * @use_atomic: True if the write_atomic() callback is to be used
+ *
+ * Return: True, when a record has been printed and there are still
+ * pending records. The caller might want to continue flushing.
+ *
+ * False, when there is no pending record, or when the console
+ * context cannot be acquired, or the ownership has been lost.
+ * The caller should give up. Either the job is done, cannot be
+ * done, or will be handled by the owning context.
+ *
+ * This is an internal helper to handle the locking of the console before
+ * calling nbcon_emit_next_record().
+ */
+static bool nbcon_emit_one(struct nbcon_write_context *wctxt, bool use_atomic)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+ struct console *con = ctxt->console;
+ unsigned long flags;
+ bool ret = false;
+
+ if (!use_atomic) {
+ con->device_lock(con, &flags);
+
+ /*
+ * Ensure this stays on the CPU to make handover and
+ * takeover possible.
+ */
+ cant_migrate();
+ }
+
+ if (!nbcon_context_try_acquire(ctxt))
+ goto out;
+
+ /*
+ * nbcon_emit_next_record() returns false when the console was
+ * handed over or taken over. In both cases the context is no
+ * longer valid.
+ *
+ * The higher priority printing context takes over responsibility
+ * to print the pending records.
+ */
+ if (!nbcon_emit_next_record(wctxt, use_atomic))
+ goto out;
+
+ nbcon_context_release(ctxt);
+
+ ret = ctxt->backlog;
+out:
+ if (!use_atomic)
+ con->device_unlock(con, flags);
+ return ret;
+}
+
/**
- * nbcon_alloc - Allocate buffers needed by the nbcon console
- * @con: Console to allocate buffers for
+ * nbcon_kthread_should_wakeup - Check whether a printer thread should wakeup
+ * @con: Console to operate on
+ * @ctxt: The nbcon context from nbcon_context_try_acquire()
*
- * Return: True on success. False otherwise and the console cannot
- * be used.
+ * Return: True if the thread should shutdown or if the console is
+ * allowed to print and a record is available. False otherwise.
*
- * This is not part of nbcon_init() because buffer allocation must
- * be performed earlier in the console registration process.
+ * After the thread wakes up, it must first check if it should shutdown before
+ * attempting any printing.
*/
-bool nbcon_alloc(struct console *con)
+static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_context *ctxt)
{
- if (con->flags & CON_BOOT) {
+ bool ret = false;
+ short flags;
+ int cookie;
+
+ if (kthread_should_stop())
+ return true;
+
+ cookie = console_srcu_read_lock();
+
+ flags = console_srcu_read_flags(con);
+ if (console_is_usable(con, flags, false)) {
+ /* Bring the sequence in @ctxt up to date */
+ ctxt->seq = nbcon_seq_read(con);
+
+ ret = prb_read_valid(prb, ctxt->seq, NULL);
+ }
+
+ console_srcu_read_unlock(cookie);
+ return ret;
+}
+
+/**
+ * nbcon_kthread_func - The printer thread function
+ * @__console: Console to operate on
+ *
+ * Return: 0
+ */
+static int nbcon_kthread_func(void *__console)
+{
+ struct console *con = __console;
+ struct nbcon_write_context wctxt = {
+ .ctxt.console = con,
+ .ctxt.prio = NBCON_PRIO_NORMAL,
+ };
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
+ short con_flags;
+ bool backlog;
+ int cookie;
+
+wait_for_event:
+ /*
+ * Guarantee this task is visible on the rcuwait before
+ * checking the wake condition.
+ *
+ * The full memory barrier within set_current_state() of
+ * ___rcuwait_wait_event() pairs with the full memory
+ * barrier within rcuwait_has_sleeper().
+ *
+ * This pairs with rcuwait_has_sleeper:A and nbcon_kthread_wake:A.
+ */
+ rcuwait_wait_event(&con->rcuwait,
+ nbcon_kthread_should_wakeup(con, ctxt),
+ TASK_INTERRUPTIBLE); /* LMM(nbcon_kthread_func:A) */
+
+ do {
+ if (kthread_should_stop())
+ return 0;
+
+ backlog = false;
+
/*
- * Boot console printing is synchronized with legacy console
- * printing, so boot consoles can share the same global printk
- * buffers.
+ * Keep the srcu read lock around the entire operation so that
+ * synchronize_srcu() can guarantee that the kthread stopped
+ * or suspended printing.
*/
- con->pbufs = &printk_shared_pbufs;
+ cookie = console_srcu_read_lock();
+
+ con_flags = console_srcu_read_flags(con);
+
+ if (console_is_usable(con, con_flags, false))
+ backlog = nbcon_emit_one(&wctxt, false);
+
+ console_srcu_read_unlock(cookie);
+
+ cond_resched();
+
+ } while (backlog);
+
+ goto wait_for_event;
+}
+
+/**
+ * nbcon_irq_work - irq work to wake console printer thread
+ * @irq_work: The irq work to operate on
+ */
+static void nbcon_irq_work(struct irq_work *irq_work)
+{
+ struct console *con = container_of(irq_work, struct console, irq_work);
+
+ nbcon_kthread_wake(con);
+}
+
+static inline bool rcuwait_has_sleeper(struct rcuwait *w)
+{
+ /*
+ * Guarantee any new records can be seen by tasks preparing to wait
+ * before this context checks if the rcuwait is empty.
+ *
+ * This full memory barrier pairs with the full memory barrier within
+ * set_current_state() of ___rcuwait_wait_event(), which is called
+ * after prepare_to_rcuwait() adds the waiter but before it has
+ * checked the wait condition.
+ *
+ * This pairs with nbcon_kthread_func:A.
+ */
+ smp_mb(); /* LMM(rcuwait_has_sleeper:A) */
+ return rcuwait_active(w);
+}
+
+/**
+ * nbcon_kthreads_wake - Wake up printing threads using irq_work
+ */
+void nbcon_kthreads_wake(void)
+{
+ struct console *con;
+ int cookie;
+
+ if (!printk_kthreads_running)
+ return;
+
+ cookie = console_srcu_read_lock();
+ for_each_console_srcu(con) {
+ if (!(console_srcu_read_flags(con) & CON_NBCON))
+ continue;
+
+ /*
+ * Only schedule irq_work if the printing thread is
+ * actively waiting. If not waiting, the thread will
+ * notice by itself that it has work to do.
+ */
+ if (rcuwait_has_sleeper(&con->rcuwait))
+ irq_work_queue(&con->irq_work);
+ }
+ console_srcu_read_unlock(cookie);
+}
+
+/*
+ * nbcon_kthread_stop - Stop a console printer thread
+ * @con: Console to operate on
+ */
+void nbcon_kthread_stop(struct console *con)
+{
+ lockdep_assert_console_list_lock_held();
+
+ if (!con->kthread)
+ return;
+
+ kthread_stop(con->kthread);
+ con->kthread = NULL;
+}
+
+/**
+ * nbcon_kthread_create - Create a console printer thread
+ * @con: Console to operate on
+ *
+ * Return: True if the kthread was started or already exists.
+ * Otherwise false and @con must not be registered.
+ *
+ * This function is called when it will be expected that nbcon consoles are
+ * flushed using the kthread. The messages printed with NBCON_PRIO_NORMAL
+ * will be no longer flushed by the legacy loop. This is why failure must
+ * be fatal for console registration.
+ *
+ * If @con was already registered and this function fails, @con must be
+ * unregistered before the global state variable @printk_kthreads_running
+ * can be set.
+ */
+bool nbcon_kthread_create(struct console *con)
+{
+ struct task_struct *kt;
+
+ lockdep_assert_console_list_lock_held();
+
+ if (con->kthread)
+ return true;
+
+ kt = kthread_run(nbcon_kthread_func, con, "pr/%s%d", con->name, con->index);
+ if (WARN_ON(IS_ERR(kt))) {
+ con_printk(KERN_ERR, con, "failed to start printing thread\n");
+ return false;
+ }
+
+ con->kthread = kt;
+
+ /*
+ * It is important that console printing threads are scheduled
+ * shortly after a printk call and with generous runtime budgets.
+ */
+ sched_set_normal(con->kthread, -20);
+
+ return true;
+}
+
+/* Track the nbcon emergency nesting per CPU. */
+static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
+static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
+
+/**
+ * nbcon_get_cpu_emergency_nesting - Get the per CPU emergency nesting pointer
+ *
+ * Context: For reading, any context. For writing, any context which could
+ * not be migrated to another CPU.
+ * Return: Either a pointer to the per CPU emergency nesting counter of
+ * the current CPU or to the init data during early boot.
+ *
+ * The function is safe for reading per-CPU variables in any context because
+ * preemption is disabled if the current CPU is in the emergency state. See
+ * also nbcon_cpu_emergency_enter().
+ */
+static __ref unsigned int *nbcon_get_cpu_emergency_nesting(void)
+{
+ /*
+ * The value of __printk_percpu_data_ready gets set in normal
+ * context and before SMP initialization. As a result it could
+ * never change while inside an nbcon emergency section.
+ */
+ if (!printk_percpu_data_ready())
+ return &early_nbcon_pcpu_emergency_nesting;
+
+ return raw_cpu_ptr(&nbcon_pcpu_emergency_nesting);
+}
+
+/**
+ * nbcon_get_default_prio - The appropriate nbcon priority to use for nbcon
+ * printing on the current CPU
+ *
+ * Context: Any context.
+ * Return: The nbcon_prio to use for acquiring an nbcon console in this
+ * context for printing.
+ *
+ * The function is safe for reading per-CPU data in any context because
+ * preemption is disabled if the current CPU is in the emergency or panic
+ * state.
+ */
+enum nbcon_prio nbcon_get_default_prio(void)
+{
+ unsigned int *cpu_emergency_nesting;
+
+ if (this_cpu_in_panic())
+ return NBCON_PRIO_PANIC;
+
+ cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
+ if (*cpu_emergency_nesting)
+ return NBCON_PRIO_EMERGENCY;
+
+ return NBCON_PRIO_NORMAL;
+}
+
+/**
+ * nbcon_legacy_emit_next_record - Print one record for an nbcon console
+ * in legacy contexts
+ * @con: The console to print on
+ * @handover: Will be set to true if a printk waiter has taken over the
+ * console_lock, in which case the caller is no longer holding
+ * both the console_lock and the SRCU read lock. Otherwise it
+ * is set to false.
+ * @cookie: The cookie from the SRCU read lock.
+ * @use_atomic: Set true when called in an atomic or unknown context.
+ * It affects which nbcon callback will be used: write_atomic()
+ * or write_thread().
+ *
+ * When false, the write_thread() callback is used and would be
+ * called in a preemtible context unless disabled by the
+ * device_lock. The legacy handover is not allowed in this mode.
+ *
+ * Context: Any context except NMI.
+ * Return: True, when a record has been printed and there are still
+ * pending records. The caller might want to continue flushing.
+ *
+ * False, when there is no pending record, or when the console
+ * context cannot be acquired, or the ownership has been lost.
+ * The caller should give up. Either the job is done, cannot be
+ * done, or will be handled by the owning context.
+ *
+ * This function is meant to be called by console_flush_all() to print records
+ * on nbcon consoles from legacy context (printing via console unlocking).
+ * Essentially it is the nbcon version of console_emit_next_record().
+ */
+bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
+ int cookie, bool use_atomic)
+{
+ struct nbcon_write_context wctxt = { };
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
+ unsigned long flags;
+ bool progress;
+
+ ctxt->console = con;
+ ctxt->prio = nbcon_get_default_prio();
+
+ if (use_atomic) {
+ /*
+ * In an atomic or unknown context, use the same procedure as
+ * in console_emit_next_record(). It allows to handover.
+ */
+ printk_safe_enter_irqsave(flags);
+ console_lock_spinning_enable();
+ stop_critical_timings();
+ }
+
+ progress = nbcon_emit_one(&wctxt, use_atomic);
+
+ if (use_atomic) {
+ start_critical_timings();
+ *handover = console_lock_spinning_disable_and_check(cookie);
+ printk_safe_exit_irqrestore(flags);
} else {
- con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL);
- if (!con->pbufs) {
- con_printk(KERN_ERR, con, "failed to allocate printing buffer\n");
- return false;
+ /* Non-atomic does not perform legacy spinning handovers. */
+ *handover = false;
+ }
+
+ return progress;
+}
+
+/**
+ * __nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
+ * write_atomic() callback
+ * @con: The nbcon console to flush
+ * @stop_seq: Flush up until this record
+ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
+ *
+ * Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
+ * failure.
+ *
+ * Errors:
+ *
+ * -EPERM: Unable to acquire console ownership.
+ *
+ * -EAGAIN: Another context took over ownership while printing.
+ *
+ * -ENOENT: A record before @stop_seq is not available.
+ *
+ * If flushing up to @stop_seq was not successful, it only makes sense for the
+ * caller to try again when -EAGAIN was returned. When -EPERM is returned,
+ * this context is not allowed to acquire the console. When -ENOENT is
+ * returned, it cannot be expected that the unfinalized record will become
+ * available.
+ */
+static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
+ bool allow_unsafe_takeover)
+{
+ struct nbcon_write_context wctxt = { };
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
+ int err = 0;
+
+ ctxt->console = con;
+ ctxt->spinwait_max_us = 2000;
+ ctxt->prio = nbcon_get_default_prio();
+ ctxt->allow_unsafe_takeover = allow_unsafe_takeover;
+
+ if (!nbcon_context_try_acquire(ctxt))
+ return -EPERM;
+
+ while (nbcon_seq_read(con) < stop_seq) {
+ /*
+ * nbcon_emit_next_record() returns false when the console was
+ * handed over or taken over. In both cases the context is no
+ * longer valid.
+ */
+ if (!nbcon_emit_next_record(&wctxt, true))
+ return -EAGAIN;
+
+ if (!ctxt->backlog) {
+ /* Are there reserved but not yet finalized records? */
+ if (nbcon_seq_read(con) < stop_seq)
+ err = -ENOENT;
+ break;
}
}
- return true;
+ nbcon_context_release(ctxt);
+ return err;
+}
+
+/**
+ * nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
+ * write_atomic() callback
+ * @con: The nbcon console to flush
+ * @stop_seq: Flush up until this record
+ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
+ *
+ * This will stop flushing before @stop_seq if another context has ownership.
+ * That context is then responsible for the flushing. Likewise, if new records
+ * are added while this context was flushing and there is no other context
+ * to handle the printing, this context must also flush those records.
+ */
+static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
+ bool allow_unsafe_takeover)
+{
+ struct console_flush_type ft;
+ unsigned long flags;
+ int err;
+
+again:
+ /*
+ * Atomic flushing does not use console driver synchronization (i.e.
+ * it does not hold the port lock for uart consoles). Therefore IRQs
+ * must be disabled to avoid being interrupted and then calling into
+ * a driver that will deadlock trying to acquire console ownership.
+ */
+ local_irq_save(flags);
+
+ err = __nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
+
+ local_irq_restore(flags);
+
+ /*
+ * If there was a new owner (-EPERM, -EAGAIN), that context is
+ * responsible for completing.
+ *
+ * Do not wait for records not yet finalized (-ENOENT) to avoid a
+ * possible deadlock. They will either get flushed by the writer or
+ * eventually skipped on panic CPU.
+ */
+ if (err)
+ return;
+
+ /*
+ * If flushing was successful but more records are available, this
+ * context must flush those remaining records if the printer thread
+ * is not available do it.
+ */
+ printk_get_console_flush_type(&ft);
+ if (!ft.nbcon_offload &&
+ prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
+ stop_seq = prb_next_reserve_seq(prb);
+ goto again;
+ }
+}
+
+/**
+ * __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
+ * write_atomic() callback
+ * @stop_seq: Flush up until this record
+ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
+ */
+static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeover)
+{
+ struct console *con;
+ int cookie;
+
+ cookie = console_srcu_read_lock();
+ for_each_console_srcu(con) {
+ short flags = console_srcu_read_flags(con);
+
+ if (!(flags & CON_NBCON))
+ continue;
+
+ if (!console_is_usable(con, flags, true))
+ continue;
+
+ if (nbcon_seq_read(con) >= stop_seq)
+ continue;
+
+ nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
+ }
+ console_srcu_read_unlock(cookie);
+}
+
+/**
+ * nbcon_atomic_flush_pending - Flush all nbcon consoles using their
+ * write_atomic() callback
+ *
+ * Flush the backlog up through the currently newest record. Any new
+ * records added while flushing will not be flushed if there is another
+ * context available to handle the flushing. This is to avoid one CPU
+ * printing unbounded because other CPUs continue to add records.
+ */
+void nbcon_atomic_flush_pending(void)
+{
+ __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), false);
+}
+
+/**
+ * nbcon_atomic_flush_unsafe - Flush all nbcon consoles using their
+ * write_atomic() callback and allowing unsafe hostile takeovers
+ *
+ * Flush the backlog up through the currently newest record. Unsafe hostile
+ * takeovers will be performed, if necessary.
+ */
+void nbcon_atomic_flush_unsafe(void)
+{
+ __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), true);
+}
+
+/**
+ * nbcon_cpu_emergency_enter - Enter an emergency section where printk()
+ * messages for that CPU are flushed directly
+ *
+ * Context: Any context. Disables preemption.
+ *
+ * When within an emergency section, printk() calls will attempt to flush any
+ * pending messages in the ringbuffer.
+ */
+void nbcon_cpu_emergency_enter(void)
+{
+ unsigned int *cpu_emergency_nesting;
+
+ preempt_disable();
+
+ cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
+ (*cpu_emergency_nesting)++;
+}
+
+/**
+ * nbcon_cpu_emergency_exit - Exit an emergency section
+ *
+ * Context: Within an emergency section. Enables preemption.
+ */
+void nbcon_cpu_emergency_exit(void)
+{
+ unsigned int *cpu_emergency_nesting;
+
+ cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
+
+ if (!WARN_ON_ONCE(*cpu_emergency_nesting == 0))
+ (*cpu_emergency_nesting)--;
+
+ preempt_enable();
}
/**
- * nbcon_init - Initialize the nbcon console specific data
+ * nbcon_alloc - Allocate and init the nbcon console specific data
* @con: Console to initialize
*
- * nbcon_alloc() *must* be called and succeed before this function
- * is called.
+ * Return: True if the console was fully allocated and initialized.
+ * Otherwise @con must not be registered.
*
- * This function expects that the legacy @con->seq has been set.
+ * When allocation and init was successful, the console must be properly
+ * freed using nbcon_free() once it is no longer needed.
*/
-void nbcon_init(struct console *con)
+bool nbcon_alloc(struct console *con)
{
struct nbcon_state state = { };
- /* nbcon_alloc() must have been called and successful! */
- BUG_ON(!con->pbufs);
+ /* The write_thread() callback is mandatory. */
+ if (WARN_ON(!con->write_thread))
+ return false;
- nbcon_seq_force(con, con->seq);
+ rcuwait_init(&con->rcuwait);
+ init_irq_work(&con->irq_work, nbcon_irq_work);
+ atomic_long_set(&ACCESS_PRIVATE(con, nbcon_prev_seq), -1UL);
nbcon_state_set(con, &state);
+
+ /*
+ * Initialize @nbcon_seq to the highest possible sequence number so
+ * that practically speaking it will have nothing to print until a
+ * desired initial sequence number has been set via nbcon_seq_force().
+ */
+ atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), ULSEQ_MAX(prb));
+
+ if (con->flags & CON_BOOT) {
+ /*
+ * Boot console printing is synchronized with legacy console
+ * printing, so boot consoles can share the same global printk
+ * buffers.
+ */
+ con->pbufs = &printk_shared_pbufs;
+ } else {
+ con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL);
+ if (!con->pbufs) {
+ con_printk(KERN_ERR, con, "failed to allocate printing buffer\n");
+ return false;
+ }
+
+ if (printk_kthreads_running) {
+ if (!nbcon_kthread_create(con)) {
+ kfree(con->pbufs);
+ con->pbufs = NULL;
+ return false;
+ }
+ }
+ }
+
+ return true;
}
/**
@@ -986,6 +1721,9 @@ void nbcon_free(struct console *con)
{
struct nbcon_state state = { };
+ if (printk_kthreads_running)
+ nbcon_kthread_stop(con);
+
nbcon_state_set(con, &state);
/* Boot consoles share global printk buffers. */
@@ -994,3 +1732,85 @@ void nbcon_free(struct console *con)
con->pbufs = NULL;
}
+
+/**
+ * nbcon_device_try_acquire - Try to acquire nbcon console and enter unsafe
+ * section
+ * @con: The nbcon console to acquire
+ *
+ * Context: Under the locking mechanism implemented in
+ * @con->device_lock() including disabling migration.
+ * Return: True if the console was acquired. False otherwise.
+ *
+ * Console drivers will usually use their own internal synchronization
+ * mechasism to synchronize between console printing and non-printing
+ * activities (such as setting baud rates). However, nbcon console drivers
+ * supporting atomic consoles may also want to mark unsafe sections when
+ * performing non-printing activities in order to synchronize against their
+ * atomic_write() callback.
+ *
+ * This function acquires the nbcon console using priority NBCON_PRIO_NORMAL
+ * and marks it unsafe for handover/takeover.
+ */
+bool nbcon_device_try_acquire(struct console *con)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
+
+ cant_migrate();
+
+ memset(ctxt, 0, sizeof(*ctxt));
+ ctxt->console = con;
+ ctxt->prio = NBCON_PRIO_NORMAL;
+
+ if (!nbcon_context_try_acquire(ctxt))
+ return false;
+
+ if (!nbcon_context_enter_unsafe(ctxt))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(nbcon_device_try_acquire);
+
+/**
+ * nbcon_device_release - Exit unsafe section and release the nbcon console
+ * @con: The nbcon console acquired in nbcon_device_try_acquire()
+ */
+void nbcon_device_release(struct console *con)
+{
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
+ struct console_flush_type ft;
+ int cookie;
+
+ if (!nbcon_context_exit_unsafe(ctxt))
+ return;
+
+ nbcon_context_release(ctxt);
+
+ /*
+ * This context must flush any new records added while the console
+ * was locked if the printer thread is not available to do it. The
+ * console_srcu_read_lock must be taken to ensure the console is
+ * usable throughout flushing.
+ */
+ cookie = console_srcu_read_lock();
+ printk_get_console_flush_type(&ft);
+ if (console_is_usable(con, console_srcu_read_flags(con), true) &&
+ !ft.nbcon_offload &&
+ prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
+ /*
+ * If nbcon_atomic flushing is not available, fallback to
+ * using the legacy loop.
+ */
+ if (ft.nbcon_atomic) {
+ __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
+ } else if (ft.legacy_direct) {
+ if (console_trylock())
+ console_unlock();
+ } else if (ft.legacy_offload) {
+ printk_trigger_flush();
+ }
+ }
+ console_srcu_read_unlock(cookie);
+}
+EXPORT_SYMBOL_GPL(nbcon_device_release);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index c22b07049c38..beb808f4c367 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -34,6 +34,7 @@
#include <linux/security.h>
#include <linux/memblock.h>
#include <linux/syscalls.h>
+#include <linux/syscore_ops.h>
#include <linux/vmcore_info.h>
#include <linux/ratelimit.h>
#include <linux/kmsg_dump.h>
@@ -282,6 +283,7 @@ EXPORT_SYMBOL(console_list_unlock);
* Return: A cookie to pass to console_srcu_read_unlock().
*/
int console_srcu_read_lock(void)
+ __acquires(&console_srcu)
{
return srcu_read_lock_nmisafe(&console_srcu);
}
@@ -295,6 +297,7 @@ EXPORT_SYMBOL(console_srcu_read_lock);
* Counterpart to console_srcu_read_lock()
*/
void console_srcu_read_unlock(int cookie)
+ __releases(&console_srcu)
{
srcu_read_unlock_nmisafe(&console_srcu, cookie);
}
@@ -461,14 +464,43 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
static DEFINE_MUTEX(syslog_lock);
+/*
+ * Specifies if a legacy console is registered. If legacy consoles are
+ * present, it is necessary to perform the console lock/unlock dance
+ * whenever console flushing should occur.
+ */
+bool have_legacy_console;
+
+/*
+ * Specifies if an nbcon console is registered. If nbcon consoles are present,
+ * synchronous printing of legacy consoles will not occur during panic until
+ * the backtrace has been stored to the ringbuffer.
+ */
+bool have_nbcon_console;
+
+/*
+ * Specifies if a boot console is registered. If boot consoles are present,
+ * nbcon consoles cannot print simultaneously and must be synchronized by
+ * the console lock. This is because boot consoles and nbcon consoles may
+ * have mapped the same hardware.
+ */
+bool have_boot_console;
+
+/* See printk_legacy_allow_panic_sync() for details. */
+bool legacy_allow_panic_sync;
+
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
+static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
/* All 3 protected by @syslog_lock. */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static u64 syslog_seq;
static size_t syslog_partial;
static bool syslog_time;
+/* True when _all_ printer threads are available for printing. */
+bool printk_kthreads_running;
+
struct latched_seq {
seqcount_latch_t latch;
u64 val[2];
@@ -1850,7 +1882,7 @@ static bool console_waiter;
* there may be a waiter spinning (like a spinlock). Also it must be
* ready to hand over the lock at the end of the section.
*/
-static void console_lock_spinning_enable(void)
+void console_lock_spinning_enable(void)
{
/*
* Do not use spinning in panic(). The panic CPU wants to keep the lock.
@@ -1889,7 +1921,7 @@ lockdep:
*
* Return: 1 if the lock rights were passed, 0 otherwise.
*/
-static int console_lock_spinning_disable_and_check(int cookie)
+int console_lock_spinning_disable_and_check(int cookie)
{
int waiter;
@@ -2300,12 +2332,30 @@ out:
return ret;
}
+/*
+ * This acts as a one-way switch to allow legacy consoles to print from
+ * the printk() caller context on a panic CPU. It also attempts to flush
+ * the legacy consoles in this context.
+ */
+void printk_legacy_allow_panic_sync(void)
+{
+ struct console_flush_type ft;
+
+ legacy_allow_panic_sync = true;
+
+ printk_get_console_flush_type(&ft);
+ if (ft.legacy_direct) {
+ if (console_trylock())
+ console_unlock();
+ }
+}
+
asmlinkage int vprintk_emit(int facility, int level,
const struct dev_printk_info *dev_info,
const char *fmt, va_list args)
{
+ struct console_flush_type ft;
int printed_len;
- bool in_sched = false;
/* Suppress unimportant messages after panic happens */
if (unlikely(suppress_printk))
@@ -2319,17 +2369,26 @@ asmlinkage int vprintk_emit(int facility, int level,
if (other_cpu_in_panic() && !panic_triggering_all_cpu_backtrace)
return 0;
+ printk_get_console_flush_type(&ft);
+
+ /* If called from the scheduler, we can not call up(). */
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
- in_sched = true;
+ ft.legacy_offload |= ft.legacy_direct;
+ ft.legacy_direct = false;
}
printk_delay(level);
printed_len = vprintk_store(facility, level, dev_info, fmt, args);
- /* If called from the scheduler, we can not call up(). */
- if (!in_sched) {
+ if (ft.nbcon_atomic)
+ nbcon_atomic_flush_pending();
+
+ if (ft.nbcon_offload)
+ nbcon_kthreads_wake();
+
+ if (ft.legacy_direct) {
/*
* The caller may be holding system-critical or
* timing-sensitive locks. Disable preemption during
@@ -2349,7 +2408,7 @@ asmlinkage int vprintk_emit(int facility, int level,
preempt_enable();
}
- if (in_sched)
+ if (ft.legacy_offload)
defer_console_output();
else
wake_up_klogd();
@@ -2620,6 +2679,7 @@ int match_devname_and_update_preferred_console(const char *devname,
return -ENOENT;
}
+EXPORT_SYMBOL_GPL(match_devname_and_update_preferred_console);
bool console_suspend_enabled = true;
EXPORT_SYMBOL(console_suspend_enabled);
@@ -2677,6 +2737,7 @@ void suspend_console(void)
void resume_console(void)
{
+ struct console_flush_type ft;
struct console *con;
if (!console_suspend_enabled)
@@ -2694,6 +2755,12 @@ void resume_console(void)
*/
synchronize_srcu(&console_srcu);
+ printk_get_console_flush_type(&ft);
+ if (ft.nbcon_offload)
+ nbcon_kthreads_wake();
+ if (ft.legacy_offload)
+ defer_console_output();
+
pr_flush(1000, true);
}
@@ -2708,10 +2775,16 @@ void resume_console(void)
*/
static int console_cpu_notify(unsigned int cpu)
{
+ struct console_flush_type ft;
+
if (!cpuhp_tasks_frozen) {
- /* If trylock fails, someone else is doing the printing */
- if (console_trylock())
- console_unlock();
+ printk_get_console_flush_type(&ft);
+ if (ft.nbcon_atomic)
+ nbcon_atomic_flush_pending();
+ if (ft.legacy_direct) {
+ if (console_trylock())
+ console_unlock();
+ }
}
return 0;
}
@@ -2765,36 +2838,6 @@ int is_console_locked(void)
}
EXPORT_SYMBOL(is_console_locked);
-/*
- * Check if the given console is currently capable and allowed to print
- * records.
- *
- * Requires the console_srcu_read_lock.
- */
-static inline bool console_is_usable(struct console *con)
-{
- short flags = console_srcu_read_flags(con);
-
- if (!(flags & CON_ENABLED))
- return false;
-
- if ((flags & CON_SUSPENDED))
- return false;
-
- if (!con->write)
- return false;
-
- /*
- * Console drivers may assume that per-cpu resources have been
- * allocated. So unless they're explicitly marked as being able to
- * cope (CON_ANYTIME) don't call them until this CPU is officially up.
- */
- if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
- return false;
-
- return true;
-}
-
static void __console_unlock(void)
{
console_locked = 0;
@@ -2804,30 +2847,31 @@ static void __console_unlock(void)
#ifdef CONFIG_PRINTK
/*
- * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". This
- * is achieved by shifting the existing message over and inserting the dropped
- * message.
+ * Prepend the message in @pmsg->pbufs->outbuf. This is achieved by shifting
+ * the existing message over and inserting the scratchbuf message.
*
- * @pmsg is the printk message to prepend.
- *
- * @dropped is the dropped count to report in the dropped message.
+ * @pmsg is the original printk message.
+ * @fmt is the printf format of the message which will prepend the existing one.
*
- * If the message text in @pmsg->pbufs->outbuf does not have enough space for
- * the dropped message, the message text will be sufficiently truncated.
+ * If there is not enough space in @pmsg->pbufs->outbuf, the existing
+ * message text will be sufficiently truncated.
*
* If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
*/
-void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
+__printf(2, 3)
+static void console_prepend_message(struct printk_message *pmsg, const char *fmt, ...)
{
struct printk_buffers *pbufs = pmsg->pbufs;
const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
const size_t outbuf_sz = sizeof(pbufs->outbuf);
char *scratchbuf = &pbufs->scratchbuf[0];
char *outbuf = &pbufs->outbuf[0];
+ va_list args;
size_t len;
- len = scnprintf(scratchbuf, scratchbuf_sz,
- "** %lu printk messages dropped **\n", dropped);
+ va_start(args, fmt);
+ len = vscnprintf(scratchbuf, scratchbuf_sz, fmt, args);
+ va_end(args);
/*
* Make sure outbuf is sufficiently large before prepending.
@@ -2850,6 +2894,30 @@ void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
}
/*
+ * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message".
+ * @pmsg->outbuf_len is updated appropriately.
+ *
+ * @pmsg is the printk message to prepend.
+ *
+ * @dropped is the dropped count to report in the dropped message.
+ */
+void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
+{
+ console_prepend_message(pmsg, "** %lu printk messages dropped **\n", dropped);
+}
+
+/*
+ * Prepend the message in @pmsg->pbufs->outbuf with a "replay message".
+ * @pmsg->outbuf_len is updated appropriately.
+ *
+ * @pmsg is the printk message to prepend.
+ */
+void console_prepend_replay(struct printk_message *pmsg)
+{
+ console_prepend_message(pmsg, "** replaying previous printk message **\n");
+}
+
+/*
* Read and format the specified record (or a later record if the specified
* record is not available).
*
@@ -2915,6 +2983,34 @@ out:
}
/*
+ * Legacy console printing from printk() caller context does not respect
+ * raw_spinlock/spinlock nesting. For !PREEMPT_RT the lockdep warning is a
+ * false positive. For PREEMPT_RT the false positive condition does not
+ * occur.
+ *
+ * This map is used to temporarily establish LD_WAIT_SLEEP context for the
+ * console write() callback when legacy printing to avoid false positive
+ * lockdep complaints, thus allowing lockdep to continue to function for
+ * real issues.
+ */
+#ifdef CONFIG_PREEMPT_RT
+static inline void printk_legacy_allow_spinlock_enter(void) { }
+static inline void printk_legacy_allow_spinlock_exit(void) { }
+#else
+static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_SLEEP);
+
+static inline void printk_legacy_allow_spinlock_enter(void)
+{
+ lock_map_acquire_try(&printk_legacy_map);
+}
+
+static inline void printk_legacy_allow_spinlock_exit(void)
+{
+ lock_map_release(&printk_legacy_map);
+}
+#endif /* CONFIG_PREEMPT_RT */
+
+/*
* Used as the printk buffers for non-panic, serialized console printing.
* This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
* Its usage requires the console_lock held.
@@ -2963,31 +3059,46 @@ static bool console_emit_next_record(struct console *con, bool *handover, int co
con->dropped = 0;
}
- /*
- * While actively printing out messages, if another printk()
- * were to occur on another CPU, it may wait for this one to
- * finish. This task can not be preempted if there is a
- * waiter waiting to take over.
- *
- * Interrupts are disabled because the hand over to a waiter
- * must not be interrupted until the hand over is completed
- * (@console_waiter is cleared).
- */
- printk_safe_enter_irqsave(flags);
- console_lock_spinning_enable();
+ /* Write everything out to the hardware. */
- /* Do not trace print latency. */
- stop_critical_timings();
+ if (force_legacy_kthread() && !panic_in_progress()) {
+ /*
+ * With forced threading this function is in a task context
+ * (either legacy kthread or get_init_console_seq()). There
+ * is no need for concern about printk reentrance, handovers,
+ * or lockdep complaints.
+ */
- /* Write everything out to the hardware. */
- con->write(con, outbuf, pmsg.outbuf_len);
+ con->write(con, outbuf, pmsg.outbuf_len);
+ con->seq = pmsg.seq + 1;
+ } else {
+ /*
+ * While actively printing out messages, if another printk()
+ * were to occur on another CPU, it may wait for this one to
+ * finish. This task can not be preempted if there is a
+ * waiter waiting to take over.
+ *
+ * Interrupts are disabled because the hand over to a waiter
+ * must not be interrupted until the hand over is completed
+ * (@console_waiter is cleared).
+ */
+ printk_safe_enter_irqsave(flags);
+ console_lock_spinning_enable();
- start_critical_timings();
+ /* Do not trace print latency. */
+ stop_critical_timings();
- con->seq = pmsg.seq + 1;
+ printk_legacy_allow_spinlock_enter();
+ con->write(con, outbuf, pmsg.outbuf_len);
+ printk_legacy_allow_spinlock_exit();
- *handover = console_lock_spinning_disable_and_check(cookie);
- printk_safe_exit_irqrestore(flags);
+ start_critical_timings();
+
+ con->seq = pmsg.seq + 1;
+
+ *handover = console_lock_spinning_disable_and_check(cookie);
+ printk_safe_exit_irqrestore(flags);
+ }
skip:
return true;
}
@@ -3000,6 +3111,8 @@ static bool console_emit_next_record(struct console *con, bool *handover, int co
return false;
}
+static inline void printk_kthreads_check_locked(void) { }
+
#endif /* CONFIG_PRINTK */
/*
@@ -3027,6 +3140,7 @@ static bool console_emit_next_record(struct console *con, bool *handover, int co
*/
static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
{
+ struct console_flush_type ft;
bool any_usable = false;
struct console *con;
bool any_progress;
@@ -3038,15 +3152,34 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
do {
any_progress = false;
+ printk_get_console_flush_type(&ft);
+
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
+ short flags = console_srcu_read_flags(con);
+ u64 printk_seq;
bool progress;
- if (!console_is_usable(con))
+ /*
+ * console_flush_all() is only responsible for nbcon
+ * consoles when the nbcon consoles cannot print via
+ * their atomic or threaded flushing.
+ */
+ if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
+ continue;
+
+ if (!console_is_usable(con, flags, !do_cond_resched))
continue;
any_usable = true;
- progress = console_emit_next_record(con, handover, cookie);
+ if (flags & CON_NBCON) {
+ progress = nbcon_legacy_emit_next_record(con, handover, cookie,
+ !do_cond_resched);
+ printk_seq = nbcon_seq_read(con);
+ } else {
+ progress = console_emit_next_record(con, handover, cookie);
+ printk_seq = con->seq;
+ }
/*
* If a handover has occurred, the SRCU read lock
@@ -3056,8 +3189,8 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
return false;
/* Track the next of the highest seq flushed. */
- if (con->seq > *next_seq)
- *next_seq = con->seq;
+ if (printk_seq > *next_seq)
+ *next_seq = printk_seq;
if (!progress)
continue;
@@ -3080,19 +3213,7 @@ abandon:
return false;
}
-/**
- * console_unlock - unblock the console subsystem from printing
- *
- * Releases the console_lock which the caller holds to block printing of
- * the console subsystem.
- *
- * While the console_lock was held, console output may have been buffered
- * by printk(). If this is the case, console_unlock(); emits
- * the output prior to releasing the lock.
- *
- * console_unlock(); may be called from any context.
- */
-void console_unlock(void)
+static void __console_flush_and_unlock(void)
{
bool do_cond_resched;
bool handover;
@@ -3136,6 +3257,29 @@ void console_unlock(void)
*/
} while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
}
+
+/**
+ * console_unlock - unblock the legacy console subsystem from printing
+ *
+ * Releases the console_lock which the caller holds to block printing of
+ * the legacy console subsystem.
+ *
+ * While the console_lock was held, console output may have been buffered
+ * by printk(). If this is the case, console_unlock() emits the output on
+ * legacy consoles prior to releasing the lock.
+ *
+ * console_unlock(); may be called from any context.
+ */
+void console_unlock(void)
+{
+ struct console_flush_type ft;
+
+ printk_get_console_flush_type(&ft);
+ if (ft.legacy_direct)
+ __console_flush_and_unlock();
+ else
+ __console_unlock();
+}
EXPORT_SYMBOL(console_unlock);
/**
@@ -3258,6 +3402,7 @@ static void __console_rewind_all(void)
*/
void console_flush_on_panic(enum con_flush_mode mode)
{
+ struct console_flush_type ft;
bool handover;
u64 next_seq;
@@ -3281,7 +3426,13 @@ void console_flush_on_panic(enum con_flush_mode mode)
if (mode == CONSOLE_REPLAY_ALL)
__console_rewind_all();
- console_flush_all(false, &next_seq, &handover);
+ printk_get_console_flush_type(&ft);
+ if (ft.nbcon_atomic)
+ nbcon_atomic_flush_pending();
+
+ /* Flush legacy consoles once allowed, even when dangerous. */
+ if (legacy_allow_panic_sync)
+ console_flush_all(false, &next_seq, &handover);
}
/*
@@ -3338,13 +3489,236 @@ EXPORT_SYMBOL(console_stop);
void console_start(struct console *console)
{
+ struct console_flush_type ft;
+ bool is_nbcon;
+
console_list_lock();
console_srcu_write_flags(console, console->flags | CON_ENABLED);
+ is_nbcon = console->flags & CON_NBCON;
console_list_unlock();
+
+ /*
+ * Ensure that all SRCU list walks have completed. The related
+ * printing context must be able to see it is enabled so that
+ * it is guaranteed to wake up and resume printing.
+ */
+ synchronize_srcu(&console_srcu);
+
+ printk_get_console_flush_type(&ft);
+ if (is_nbcon && ft.nbcon_offload)
+ nbcon_kthread_wake(console);
+ else if (ft.legacy_offload)
+ defer_console_output();
+
__pr_flush(console, 1000, true);
}
EXPORT_SYMBOL(console_start);
+#ifdef CONFIG_PRINTK
+static int unregister_console_locked(struct console *console);
+
+/* True when system boot is far enough to create printer threads. */
+static bool printk_kthreads_ready __ro_after_init;
+
+static struct task_struct *printk_legacy_kthread;
+
+static bool legacy_kthread_should_wakeup(void)
+{
+ struct console_flush_type ft;
+ struct console *con;
+ bool ret = false;
+ int cookie;
+
+ if (kthread_should_stop())
+ return true;
+
+ printk_get_console_flush_type(&ft);
+
+ cookie = console_srcu_read_lock();
+ for_each_console_srcu(con) {
+ short flags = console_srcu_read_flags(con);
+ u64 printk_seq;
+
+ /*
+ * The legacy printer thread is only responsible for nbcon
+ * consoles when the nbcon consoles cannot print via their
+ * atomic or threaded flushing.
+ */
+ if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
+ continue;
+
+ if (!console_is_usable(con, flags, false))
+ continue;
+
+ if (flags & CON_NBCON) {
+ printk_seq = nbcon_seq_read(con);
+ } else {
+ /*
+ * It is safe to read @seq because only this
+ * thread context updates @seq.
+ */
+ printk_seq = con->seq;
+ }
+
+ if (prb_read_valid(prb, printk_seq, NULL)) {
+ ret = true;
+ break;
+ }
+ }
+ console_srcu_read_unlock(cookie);
+
+ return ret;
+}
+
+static int legacy_kthread_func(void *unused)
+{
+ for (;;) {
+ wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup());
+
+ if (kthread_should_stop())
+ break;
+
+ console_lock();
+ __console_flush_and_unlock();
+ }
+
+ return 0;
+}
+
+static bool legacy_kthread_create(void)
+{
+ struct task_struct *kt;
+
+ lockdep_assert_console_list_lock_held();
+
+ kt = kthread_run(legacy_kthread_func, NULL, "pr/legacy");
+ if (WARN_ON(IS_ERR(kt))) {
+ pr_err("failed to start legacy printing thread\n");
+ return false;
+ }
+
+ printk_legacy_kthread = kt;
+
+ /*
+ * It is important that console printing threads are scheduled
+ * shortly after a printk call and with generous runtime budgets.
+ */
+ sched_set_normal(printk_legacy_kthread, -20);
+
+ return true;
+}
+
+/**
+ * printk_kthreads_shutdown - shutdown all threaded printers
+ *
+ * On system shutdown all threaded printers are stopped. This allows printk
+ * to transition back to atomic printing, thus providing a robust mechanism
+ * for the final shutdown/reboot messages to be output.
+ */
+static void printk_kthreads_shutdown(void)
+{
+ struct console *con;
+
+ console_list_lock();
+ if (printk_kthreads_running) {
+ printk_kthreads_running = false;
+
+ for_each_console(con) {
+ if (con->flags & CON_NBCON)
+ nbcon_kthread_stop(con);
+ }
+
+ /*
+ * The threads may have been stopped while printing a
+ * backlog. Flush any records left over.
+ */
+ nbcon_atomic_flush_pending();
+ }
+ console_list_unlock();
+}
+
+static struct syscore_ops printk_syscore_ops = {
+ .shutdown = printk_kthreads_shutdown,
+};
+
+/*
+ * If appropriate, start nbcon kthreads and set @printk_kthreads_running.
+ * If any kthreads fail to start, those consoles are unregistered.
+ *
+ * Must be called under console_list_lock().
+ */
+static void printk_kthreads_check_locked(void)
+{
+ struct hlist_node *tmp;
+ struct console *con;
+
+ lockdep_assert_console_list_lock_held();
+
+ if (!printk_kthreads_ready)
+ return;
+
+ if (have_legacy_console || have_boot_console) {
+ if (!printk_legacy_kthread &&
+ force_legacy_kthread() &&
+ !legacy_kthread_create()) {
+ /*
+ * All legacy consoles must be unregistered. If there
+ * are any nbcon consoles, they will set up their own
+ * kthread.
+ */
+ hlist_for_each_entry_safe(con, tmp, &console_list, node) {
+ if (con->flags & CON_NBCON)
+ continue;
+
+ unregister_console_locked(con);
+ }
+ }
+ } else if (printk_legacy_kthread) {
+ kthread_stop(printk_legacy_kthread);
+ printk_legacy_kthread = NULL;
+ }
+
+ /*
+ * Printer threads cannot be started as long as any boot console is
+ * registered because there is no way to synchronize the hardware
+ * registers between boot console code and regular console code.
+ * It can only be known that there will be no new boot consoles when
+ * an nbcon console is registered.
+ */
+ if (have_boot_console || !have_nbcon_console) {
+ /* Clear flag in case all nbcon consoles unregistered. */
+ printk_kthreads_running = false;
+ return;
+ }
+
+ if (printk_kthreads_running)
+ return;
+
+ hlist_for_each_entry_safe(con, tmp, &console_list, node) {
+ if (!(con->flags & CON_NBCON))
+ continue;
+
+ if (!nbcon_kthread_create(con))
+ unregister_console_locked(con);
+ }
+
+ printk_kthreads_running = true;
+}
+
+static int __init printk_set_kthreads_ready(void)
+{
+ register_syscore_ops(&printk_syscore_ops);
+
+ console_list_lock();
+ printk_kthreads_ready = true;
+ printk_kthreads_check_locked();
+ console_list_unlock();
+
+ return 0;
+}
+early_initcall(printk_set_kthreads_ready);
+#endif /* CONFIG_PRINTK */
+
static int __read_mostly keep_bootcon;
static int __init keep_bootcon_setup(char *str)
@@ -3446,19 +3820,21 @@ static void try_enable_default_console(struct console *newcon)
newcon->flags |= CON_CONSDEV;
}
-static void console_init_seq(struct console *newcon, bool bootcon_registered)
+/* Return the starting sequence number for a newly registered console. */
+static u64 get_init_console_seq(struct console *newcon, bool bootcon_registered)
{
struct console *con;
bool handover;
+ u64 init_seq;
if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) {
/* Get a consistent copy of @syslog_seq. */
mutex_lock(&syslog_lock);
- newcon->seq = syslog_seq;
+ init_seq = syslog_seq;
mutex_unlock(&syslog_lock);
} else {
/* Begin with next message added to ringbuffer. */
- newcon->seq = prb_next_seq(prb);
+ init_seq = prb_next_seq(prb);
/*
* If any enabled boot consoles are due to be unregistered
@@ -3479,7 +3855,7 @@ static void console_init_seq(struct console *newcon, bool bootcon_registered)
* Flush all consoles and set the console to start at
* the next unprinted sequence number.
*/
- if (!console_flush_all(true, &newcon->seq, &handover)) {
+ if (!console_flush_all(true, &init_seq, &handover)) {
/*
* Flushing failed. Just choose the lowest
* sequence of the enabled boot consoles.
@@ -3492,19 +3868,30 @@ static void console_init_seq(struct console *newcon, bool bootcon_registered)
if (handover)
console_lock();
- newcon->seq = prb_next_seq(prb);
+ init_seq = prb_next_seq(prb);
for_each_console(con) {
- if ((con->flags & CON_BOOT) &&
- (con->flags & CON_ENABLED) &&
- con->seq < newcon->seq) {
- newcon->seq = con->seq;
+ u64 seq;
+
+ if (!(con->flags & CON_BOOT) ||
+ !(con->flags & CON_ENABLED)) {
+ continue;
}
+
+ if (con->flags & CON_NBCON)
+ seq = nbcon_seq_read(con);
+ else
+ seq = con->seq;
+
+ if (seq < init_seq)
+ init_seq = seq;
}
}
console_unlock();
}
}
+
+ return init_seq;
}
#define console_first() \
@@ -3533,9 +3920,12 @@ static int unregister_console_locked(struct console *console);
*/
void register_console(struct console *newcon)
{
- struct console *con;
+ bool use_device_lock = (newcon->flags & CON_NBCON) && newcon->write_atomic;
bool bootcon_registered = false;
bool realcon_registered = false;
+ struct console *con;
+ unsigned long flags;
+ u64 init_seq;
int err;
console_list_lock();
@@ -3613,10 +4003,31 @@ void register_console(struct console *newcon)
}
newcon->dropped = 0;
- console_init_seq(newcon, bootcon_registered);
+ init_seq = get_init_console_seq(newcon, bootcon_registered);
+
+ if (newcon->flags & CON_NBCON) {
+ have_nbcon_console = true;
+ nbcon_seq_force(newcon, init_seq);
+ } else {
+ have_legacy_console = true;
+ newcon->seq = init_seq;
+ }
+
+ if (newcon->flags & CON_BOOT)
+ have_boot_console = true;
- if (newcon->flags & CON_NBCON)
- nbcon_init(newcon);
+ /*
+ * If another context is actively using the hardware of this new
+ * console, it will not be aware of the nbcon synchronization. This
+ * is a risk that two contexts could access the hardware
+ * simultaneously if this new console is used for atomic printing
+ * and the other context is still using the hardware.
+ *
+ * Use the driver synchronization to ensure that the hardware is not
+ * in use while this new console transitions to being registered.
+ */
+ if (use_device_lock)
+ newcon->device_lock(newcon, &flags);
/*
* Put this console in the list - keep the
@@ -3642,6 +4053,10 @@ void register_console(struct console *newcon)
* register_console() completes.
*/
+ /* This new console is now registered. */
+ if (use_device_lock)
+ newcon->device_unlock(newcon, flags);
+
console_sysfs_notify();
/*
@@ -3662,6 +4077,9 @@ void register_console(struct console *newcon)
unregister_console_locked(con);
}
}
+
+ /* Changed console list, may require printer threads to start/stop. */
+ printk_kthreads_check_locked();
unlock:
console_list_unlock();
}
@@ -3670,6 +4088,12 @@ EXPORT_SYMBOL(register_console);
/* Must be called under console_list_lock(). */
static int unregister_console_locked(struct console *console)
{
+ bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
+ bool found_legacy_con = false;
+ bool found_nbcon_con = false;
+ bool found_boot_con = false;
+ unsigned long flags;
+ struct console *c;
int res;
lockdep_assert_console_list_lock_held();
@@ -3682,14 +4106,29 @@ static int unregister_console_locked(struct console *console)
if (res > 0)
return 0;
+ if (!console_is_registered_locked(console))
+ res = -ENODEV;
+ else if (console_is_usable(console, console->flags, true))
+ __pr_flush(console, 1000, true);
+
/* Disable it unconditionally */
console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
- if (!console_is_registered_locked(console))
- return -ENODEV;
+ if (res < 0)
+ return res;
+
+ /*
+ * Use the driver synchronization to ensure that the hardware is not
+ * in use while this console transitions to being unregistered.
+ */
+ if (use_device_lock)
+ console->device_lock(console, &flags);
hlist_del_init_rcu(&console->node);
+ if (use_device_lock)
+ console->device_unlock(console, flags);
+
/*
* <HISTORICAL>
* If this isn't the last console and it has CON_CONSDEV set, we
@@ -3717,6 +4156,29 @@ static int unregister_console_locked(struct console *console)
if (console->exit)
res = console->exit(console);
+ /*
+ * With this console gone, the global flags tracking registered
+ * console types may have changed. Update them.
+ */
+ for_each_console(c) {
+ if (c->flags & CON_BOOT)
+ found_boot_con = true;
+
+ if (c->flags & CON_NBCON)
+ found_nbcon_con = true;
+ else
+ found_legacy_con = true;
+ }
+ if (!found_boot_con)
+ have_boot_console = found_boot_con;
+ if (!found_legacy_con)
+ have_legacy_console = found_legacy_con;
+ if (!found_nbcon_con)
+ have_nbcon_console = found_nbcon_con;
+
+ /* Changed console list, may require printer threads to start/stop. */
+ printk_kthreads_check_locked();
+
return res;
}
@@ -3863,6 +4325,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
{
unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms);
unsigned long remaining_jiffies = timeout_jiffies;
+ struct console_flush_type ft;
struct console *c;
u64 last_diff = 0;
u64 printk_seq;
@@ -3871,13 +4334,22 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
u64 diff;
u64 seq;
+ /* Sorry, pr_flush() will not work this early. */
+ if (system_state < SYSTEM_SCHEDULING)
+ return false;
+
might_sleep();
seq = prb_next_reserve_seq(prb);
/* Flush the consoles so that records up to @seq are printed. */
- console_lock();
- console_unlock();
+ printk_get_console_flush_type(&ft);
+ if (ft.nbcon_atomic)
+ nbcon_atomic_flush_pending();
+ if (ft.legacy_direct) {
+ console_lock();
+ console_unlock();
+ }
for (;;) {
unsigned long begin_jiffies;
@@ -3890,6 +4362,12 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
* console->seq. Releasing console_lock flushes more
* records in case @seq is still not printed on all
* usable consoles.
+ *
+ * Holding the console_lock is not necessary if there
+ * are no legacy or boot consoles. However, such a
+ * console could register at any time. Always hold the
+ * console_lock as a precaution rather than
+ * synchronizing against register_console().
*/
console_lock();
@@ -3905,8 +4383,10 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
* that they make forward progress, so only increment
* @diff for usable consoles.
*/
- if (!console_is_usable(c))
+ if (!console_is_usable(c, flags, true) &&
+ !console_is_usable(c, flags, false)) {
continue;
+ }
if (flags & CON_NBCON) {
printk_seq = nbcon_seq_read(c);
@@ -3974,9 +4454,13 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
int pending = this_cpu_xchg(printk_pending, 0);
if (pending & PRINTK_PENDING_OUTPUT) {
- /* If trylock fails, someone else is doing the printing */
- if (console_trylock())
- console_unlock();
+ if (force_legacy_kthread()) {
+ if (printk_legacy_kthread)
+ wake_up_interruptible(&legacy_wait);
+ } else {
+ if (console_trylock())
+ console_unlock();
+ }
}
if (pending & PRINTK_PENDING_WAKEUP)
@@ -4184,16 +4668,21 @@ const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
/**
- * kmsg_dump - dump kernel log to kernel message dumpers.
+ * kmsg_dump_desc - dump kernel log to kernel message dumpers.
* @reason: the reason (oops, panic etc) for dumping
+ * @desc: a short string to describe what caused the panic or oops. Can be NULL
+ * if no additional description is available.
*
* Call each of the registered dumper's dump() callback, which can
* retrieve the kmsg records with kmsg_dump_get_line() or
* kmsg_dump_get_buffer().
*/
-void kmsg_dump(enum kmsg_dump_reason reason)
+void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
{
struct kmsg_dumper *dumper;
+ struct kmsg_dump_detail detail = {
+ .reason = reason,
+ .description = desc};
rcu_read_lock();
list_for_each_entry_rcu(dumper, &dump_list, list) {
@@ -4211,7 +4700,7 @@ void kmsg_dump(enum kmsg_dump_reason reason)
continue;
/* invoke dumper which will iterate over records */
- dumper->dump(dumper, reason);
+ dumper->dump(dumper, &detail);
}
rcu_read_unlock();
}
@@ -4382,8 +4871,17 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
*/
void console_try_replay_all(void)
{
+ struct console_flush_type ft;
+
+ printk_get_console_flush_type(&ft);
if (console_trylock()) {
__console_rewind_all();
+ if (ft.nbcon_atomic)
+ nbcon_atomic_flush_pending();
+ if (ft.nbcon_offload)
+ nbcon_kthreads_wake();
+ if (ft.legacy_offload)
+ defer_console_output();
/* Consoles are flushed as part of console_unlock(). */
console_unlock();
}
diff --git a/kernel/printk/printk_ringbuffer.h b/kernel/printk/printk_ringbuffer.h
index 52626d0f1fa3..4ef81349d9fb 100644
--- a/kernel/printk/printk_ringbuffer.h
+++ b/kernel/printk/printk_ringbuffer.h
@@ -4,7 +4,10 @@
#define _KERNEL_PRINTK_RINGBUFFER_H
#include <linux/atomic.h>
+#include <linux/bits.h>
#include <linux/dev_printk.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
/*
* Meta information about each stored message.
@@ -120,7 +123,7 @@ enum desc_state {
#define _DATA_SIZE(sz_bits) (1UL << (sz_bits))
#define _DESCS_COUNT(ct_bits) (1U << (ct_bits))
-#define DESC_SV_BITS (sizeof(unsigned long) * 8)
+#define DESC_SV_BITS BITS_PER_LONG
#define DESC_FLAGS_SHIFT (DESC_SV_BITS - 2)
#define DESC_FLAGS_MASK (3UL << DESC_FLAGS_SHIFT)
#define DESC_STATE(sv) (3UL & (sv >> DESC_FLAGS_SHIFT))
@@ -401,10 +404,12 @@ u64 prb_next_reserve_seq(struct printk_ringbuffer *rb);
#define __u64seq_to_ulseq(u64seq) (u64seq)
#define __ulseq_to_u64seq(rb, ulseq) (ulseq)
+#define ULSEQ_MAX(rb) (-1)
#else /* CONFIG_64BIT */
#define __u64seq_to_ulseq(u64seq) ((u32)u64seq)
+#define ULSEQ_MAX(rb) __u64seq_to_ulseq(prb_first_seq(rb) + 0x80000000UL)
static inline u64 __ulseq_to_u64seq(struct printk_ringbuffer *rb, u32 ulseq)
{
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index 6d10927a07d8..2b35a9d3919d 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -26,6 +26,29 @@ void __printk_safe_exit(void)
this_cpu_dec(printk_context);
}
+void __printk_deferred_enter(void)
+{
+ cant_migrate();
+ __printk_safe_enter();
+}
+
+void __printk_deferred_exit(void)
+{
+ cant_migrate();
+ __printk_safe_exit();
+}
+
+bool is_printk_legacy_deferred(void)
+{
+ /*
+ * The per-CPU variable @printk_context can be read safely in any
+ * context. CPU migration is always disabled when set.
+ */
+ return (force_legacy_kthread() ||
+ this_cpu_read(printk_context) ||
+ in_nmi());
+}
+
asmlinkage int vprintk(const char *fmt, va_list args)
{
#ifdef CONFIG_KGDB_KDB
@@ -38,7 +61,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
* Use the main logbuf even in NMI. But avoid calling console
* drivers that might have their own locks.
*/
- if (this_cpu_read(printk_context) || in_nmi())
+ if (is_printk_legacy_deferred())
return vprintk_deferred(fmt, args);
/* No obstacles. */
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 38238e595a61..feb3ac1dc5d5 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -54,9 +54,6 @@
* grace-period sequence number.
*/
-#define RCU_SEQ_CTR_SHIFT 2
-#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
-
/* Low-order bit definition for polled grace-period APIs. */
#define RCU_GET_STATE_COMPLETED 0x1
@@ -255,6 +252,11 @@ static inline void debug_rcu_head_callback(struct rcu_head *rhp)
kmem_dump_obj(rhp);
}
+static inline bool rcu_barrier_cb_is_done(struct rcu_head *rhp)
+{
+ return rhp->next == rhp;
+}
+
extern int rcu_cpu_stall_suppress_at_boot;
static inline bool rcu_stall_is_suppressed_at_boot(void)
@@ -606,7 +608,7 @@ void srcutorture_get_gp_data(struct srcu_struct *sp, int *flags,
#endif
#ifdef CONFIG_TINY_RCU
-static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; }
+static inline bool rcu_watching_zero_in_eqs(int cpu, int *vp) { return false; }
static inline unsigned long rcu_get_gp_seq(void) { return 0; }
static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
static inline unsigned long
@@ -619,7 +621,7 @@ static inline void rcu_fwd_progress_check(unsigned long j) { }
static inline void rcu_gp_slow_register(atomic_t *rgssp) { }
static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { }
#else /* #ifdef CONFIG_TINY_RCU */
-bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
+bool rcu_watching_zero_in_eqs(int cpu, int *vp);
unsigned long rcu_get_gp_seq(void);
unsigned long rcu_exp_batches_completed(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp);
diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c
index 1693ea22ef1b..298a2c573f02 100644
--- a/kernel/rcu/rcu_segcblist.c
+++ b/kernel/rcu/rcu_segcblist.c
@@ -261,17 +261,6 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
}
/*
- * Mark the specified rcu_segcblist structure as offloaded (or not)
- */
-void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload)
-{
- if (offload)
- rcu_segcblist_set_flags(rsclp, SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED);
- else
- rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED);
-}
-
-/*
* Does the specified rcu_segcblist structure contain callbacks that
* are ready to be invoked?
*/
diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h
index 4fe877f5f654..259904075636 100644
--- a/kernel/rcu/rcu_segcblist.h
+++ b/kernel/rcu/rcu_segcblist.h
@@ -89,16 +89,7 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
{
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
- rcu_segcblist_test_flags(rsclp, SEGCBLIST_LOCKING))
- return true;
-
- return false;
-}
-
-static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp)
-{
- if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
- !rcu_segcblist_test_flags(rsclp, SEGCBLIST_RCU_CORE))
+ rcu_segcblist_test_flags(rsclp, SEGCBLIST_OFFLOADED))
return true;
return false;
diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
index b53a9e8f5904..6d37596deb1f 100644
--- a/kernel/rcu/rcuscale.c
+++ b/kernel/rcu/rcuscale.c
@@ -39,6 +39,7 @@
#include <linux/torture.h>
#include <linux/vmalloc.h>
#include <linux/rcupdate_trace.h>
+#include <linux/sched/debug.h>
#include "rcu.h"
@@ -104,6 +105,20 @@ static char *scale_type = "rcu";
module_param(scale_type, charp, 0444);
MODULE_PARM_DESC(scale_type, "Type of RCU to scalability-test (rcu, srcu, ...)");
+// Structure definitions for custom fixed-per-task allocator.
+struct writer_mblock {
+ struct rcu_head wmb_rh;
+ struct llist_node wmb_node;
+ struct writer_freelist *wmb_wfl;
+};
+
+struct writer_freelist {
+ struct llist_head ws_lhg;
+ atomic_t ws_inflight;
+ struct llist_head ____cacheline_internodealigned_in_smp ws_lhp;
+ struct writer_mblock *ws_mblocks;
+};
+
static int nrealreaders;
static int nrealwriters;
static struct task_struct **writer_tasks;
@@ -111,6 +126,8 @@ static struct task_struct **reader_tasks;
static struct task_struct *shutdown_task;
static u64 **writer_durations;
+static bool *writer_done;
+static struct writer_freelist *writer_freelists;
static int *writer_n_durations;
static atomic_t n_rcu_scale_reader_started;
static atomic_t n_rcu_scale_writer_started;
@@ -120,7 +137,6 @@ static u64 t_rcu_scale_writer_started;
static u64 t_rcu_scale_writer_finished;
static unsigned long b_rcu_gp_test_started;
static unsigned long b_rcu_gp_test_finished;
-static DEFINE_PER_CPU(atomic_t, n_async_inflight);
#define MAX_MEAS 10000
#define MIN_MEAS 100
@@ -143,6 +159,7 @@ struct rcu_scale_ops {
void (*sync)(void);
void (*exp_sync)(void);
struct task_struct *(*rso_gp_kthread)(void);
+ void (*stats)(void);
const char *name;
};
@@ -224,6 +241,11 @@ static void srcu_scale_synchronize(void)
synchronize_srcu(srcu_ctlp);
}
+static void srcu_scale_stats(void)
+{
+ srcu_torture_stats_print(srcu_ctlp, scale_type, SCALE_FLAG);
+}
+
static void srcu_scale_synchronize_expedited(void)
{
synchronize_srcu_expedited(srcu_ctlp);
@@ -241,6 +263,7 @@ static struct rcu_scale_ops srcu_ops = {
.gp_barrier = srcu_rcu_barrier,
.sync = srcu_scale_synchronize,
.exp_sync = srcu_scale_synchronize_expedited,
+ .stats = srcu_scale_stats,
.name = "srcu"
};
@@ -270,6 +293,7 @@ static struct rcu_scale_ops srcud_ops = {
.gp_barrier = srcu_rcu_barrier,
.sync = srcu_scale_synchronize,
.exp_sync = srcu_scale_synchronize_expedited,
+ .stats = srcu_scale_stats,
.name = "srcud"
};
@@ -288,6 +312,11 @@ static void tasks_scale_read_unlock(int idx)
{
}
+static void rcu_tasks_scale_stats(void)
+{
+ rcu_tasks_torture_stats_print(scale_type, SCALE_FLAG);
+}
+
static struct rcu_scale_ops tasks_ops = {
.ptype = RCU_TASKS_FLAVOR,
.init = rcu_sync_scale_init,
@@ -300,6 +329,7 @@ static struct rcu_scale_ops tasks_ops = {
.sync = synchronize_rcu_tasks,
.exp_sync = synchronize_rcu_tasks,
.rso_gp_kthread = get_rcu_tasks_gp_kthread,
+ .stats = IS_ENABLED(CONFIG_TINY_RCU) ? NULL : rcu_tasks_scale_stats,
.name = "tasks"
};
@@ -326,6 +356,11 @@ static void tasks_rude_scale_read_unlock(int idx)
{
}
+static void rcu_tasks_rude_scale_stats(void)
+{
+ rcu_tasks_rude_torture_stats_print(scale_type, SCALE_FLAG);
+}
+
static struct rcu_scale_ops tasks_rude_ops = {
.ptype = RCU_TASKS_RUDE_FLAVOR,
.init = rcu_sync_scale_init,
@@ -333,11 +368,10 @@ static struct rcu_scale_ops tasks_rude_ops = {
.readunlock = tasks_rude_scale_read_unlock,
.get_gp_seq = rcu_no_completed,
.gp_diff = rcu_seq_diff,
- .async = call_rcu_tasks_rude,
- .gp_barrier = rcu_barrier_tasks_rude,
.sync = synchronize_rcu_tasks_rude,
.exp_sync = synchronize_rcu_tasks_rude,
.rso_gp_kthread = get_rcu_tasks_rude_gp_kthread,
+ .stats = IS_ENABLED(CONFIG_TINY_RCU) ? NULL : rcu_tasks_rude_scale_stats,
.name = "tasks-rude"
};
@@ -366,6 +400,11 @@ static void tasks_trace_scale_read_unlock(int idx)
rcu_read_unlock_trace();
}
+static void rcu_tasks_trace_scale_stats(void)
+{
+ rcu_tasks_trace_torture_stats_print(scale_type, SCALE_FLAG);
+}
+
static struct rcu_scale_ops tasks_tracing_ops = {
.ptype = RCU_TASKS_FLAVOR,
.init = rcu_sync_scale_init,
@@ -378,6 +417,7 @@ static struct rcu_scale_ops tasks_tracing_ops = {
.sync = synchronize_rcu_tasks_trace,
.exp_sync = synchronize_rcu_tasks_trace,
.rso_gp_kthread = get_rcu_tasks_trace_gp_kthread,
+ .stats = IS_ENABLED(CONFIG_TINY_RCU) ? NULL : rcu_tasks_trace_scale_stats,
.name = "tasks-tracing"
};
@@ -438,12 +478,52 @@ rcu_scale_reader(void *arg)
}
/*
+ * Allocate a writer_mblock structure for the specified rcu_scale_writer
+ * task.
+ */
+static struct writer_mblock *rcu_scale_alloc(long me)
+{
+ struct llist_node *llnp;
+ struct writer_freelist *wflp;
+ struct writer_mblock *wmbp;
+
+ if (WARN_ON_ONCE(!writer_freelists))
+ return NULL;
+ wflp = &writer_freelists[me];
+ if (llist_empty(&wflp->ws_lhp)) {
+ // ->ws_lhp is private to its rcu_scale_writer task.
+ wmbp = container_of(llist_del_all(&wflp->ws_lhg), struct writer_mblock, wmb_node);
+ wflp->ws_lhp.first = &wmbp->wmb_node;
+ }
+ llnp = llist_del_first(&wflp->ws_lhp);
+ if (!llnp)
+ return NULL;
+ return container_of(llnp, struct writer_mblock, wmb_node);
+}
+
+/*
+ * Free a writer_mblock structure to its rcu_scale_writer task.
+ */
+static void rcu_scale_free(struct writer_mblock *wmbp)
+{
+ struct writer_freelist *wflp;
+
+ if (!wmbp)
+ return;
+ wflp = wmbp->wmb_wfl;
+ llist_add(&wmbp->wmb_node, &wflp->ws_lhg);
+}
+
+/*
* Callback function for asynchronous grace periods from rcu_scale_writer().
*/
static void rcu_scale_async_cb(struct rcu_head *rhp)
{
- atomic_dec(this_cpu_ptr(&n_async_inflight));
- kfree(rhp);
+ struct writer_mblock *wmbp = container_of(rhp, struct writer_mblock, wmb_rh);
+ struct writer_freelist *wflp = wmbp->wmb_wfl;
+
+ atomic_dec(&wflp->ws_inflight);
+ rcu_scale_free(wmbp);
}
/*
@@ -456,12 +536,14 @@ rcu_scale_writer(void *arg)
int i_max;
unsigned long jdone;
long me = (long)arg;
- struct rcu_head *rhp = NULL;
+ bool selfreport = false;
bool started = false, done = false, alldone = false;
u64 t;
DEFINE_TORTURE_RANDOM(tr);
u64 *wdp;
u64 *wdpp = writer_durations[me];
+ struct writer_freelist *wflp = &writer_freelists[me];
+ struct writer_mblock *wmbp = NULL;
VERBOSE_SCALEOUT_STRING("rcu_scale_writer task started");
WARN_ON(!wdpp);
@@ -493,30 +575,34 @@ rcu_scale_writer(void *arg)
jdone = jiffies + minruntime * HZ;
do {
+ bool gp_succeeded = false;
+
if (writer_holdoff)
udelay(writer_holdoff);
if (writer_holdoff_jiffies)
schedule_timeout_idle(torture_random(&tr) % writer_holdoff_jiffies + 1);
wdp = &wdpp[i];
*wdp = ktime_get_mono_fast_ns();
- if (gp_async) {
-retry:
- if (!rhp)
- rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
- if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
- atomic_inc(this_cpu_ptr(&n_async_inflight));
- cur_ops->async(rhp, rcu_scale_async_cb);
- rhp = NULL;
+ if (gp_async && !WARN_ON_ONCE(!cur_ops->async)) {
+ if (!wmbp)
+ wmbp = rcu_scale_alloc(me);
+ if (wmbp && atomic_read(&wflp->ws_inflight) < gp_async_max) {
+ atomic_inc(&wflp->ws_inflight);
+ cur_ops->async(&wmbp->wmb_rh, rcu_scale_async_cb);
+ wmbp = NULL;
+ gp_succeeded = true;
} else if (!kthread_should_stop()) {
cur_ops->gp_barrier();
- goto retry;
} else {
- kfree(rhp); /* Because we are stopping. */
+ rcu_scale_free(wmbp); /* Because we are stopping. */
+ wmbp = NULL;
}
} else if (gp_exp) {
cur_ops->exp_sync();
+ gp_succeeded = true;
} else {
cur_ops->sync();
+ gp_succeeded = true;
}
t = ktime_get_mono_fast_ns();
*wdp = t - *wdp;
@@ -526,6 +612,7 @@ retry:
started = true;
if (!done && i >= MIN_MEAS && time_after(jiffies, jdone)) {
done = true;
+ WRITE_ONCE(writer_done[me], true);
sched_set_normal(current, 0);
pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n",
scale_type, SCALE_FLAG, me, MIN_MEAS);
@@ -551,11 +638,32 @@ retry:
if (done && !alldone &&
atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters)
alldone = true;
- if (started && !alldone && i < MAX_MEAS - 1)
+ if (done && !alldone && time_after(jiffies, jdone + HZ * 60)) {
+ static atomic_t dumped;
+ int i;
+
+ if (!atomic_xchg(&dumped, 1)) {
+ for (i = 0; i < nrealwriters; i++) {
+ if (writer_done[i])
+ continue;
+ pr_info("%s: Task %ld flags writer %d:\n", __func__, me, i);
+ sched_show_task(writer_tasks[i]);
+ }
+ if (cur_ops->stats)
+ cur_ops->stats();
+ }
+ }
+ if (!selfreport && time_after(jiffies, jdone + HZ * (70 + me))) {
+ pr_info("%s: Writer %ld self-report: started %d done %d/%d->%d i %d jdone %lu.\n",
+ __func__, me, started, done, writer_done[me], atomic_read(&n_rcu_scale_writer_finished), i, jiffies - jdone);
+ selfreport = true;
+ }
+ if (gp_succeeded && started && !alldone && i < MAX_MEAS - 1)
i++;
rcu_scale_wait_shutdown();
} while (!torture_must_stop());
- if (gp_async) {
+ if (gp_async && cur_ops->async) {
+ rcu_scale_free(wmbp);
cur_ops->gp_barrier();
}
writer_n_durations[me] = i_max + 1;
@@ -713,6 +821,7 @@ kfree_scale_cleanup(void)
torture_stop_kthread(kfree_scale_thread,
kfree_reader_tasks[i]);
kfree(kfree_reader_tasks);
+ kfree_reader_tasks = NULL;
}
torture_cleanup_end();
@@ -881,6 +990,7 @@ rcu_scale_cleanup(void)
torture_stop_kthread(rcu_scale_reader,
reader_tasks[i]);
kfree(reader_tasks);
+ reader_tasks = NULL;
}
if (writer_tasks) {
@@ -919,10 +1029,33 @@ rcu_scale_cleanup(void)
schedule_timeout_uninterruptible(1);
}
kfree(writer_durations[i]);
+ if (writer_freelists) {
+ int ctr = 0;
+ struct llist_node *llnp;
+ struct writer_freelist *wflp = &writer_freelists[i];
+
+ if (wflp->ws_mblocks) {
+ llist_for_each(llnp, wflp->ws_lhg.first)
+ ctr++;
+ llist_for_each(llnp, wflp->ws_lhp.first)
+ ctr++;
+ WARN_ONCE(ctr != gp_async_max,
+ "%s: ctr = %d gp_async_max = %d\n",
+ __func__, ctr, gp_async_max);
+ kfree(wflp->ws_mblocks);
+ }
+ }
}
kfree(writer_tasks);
+ writer_tasks = NULL;
kfree(writer_durations);
+ writer_durations = NULL;
kfree(writer_n_durations);
+ writer_n_durations = NULL;
+ kfree(writer_done);
+ writer_done = NULL;
+ kfree(writer_freelists);
+ writer_freelists = NULL;
}
/* Do torture-type-specific cleanup operations. */
@@ -949,8 +1082,9 @@ rcu_scale_shutdown(void *arg)
static int __init
rcu_scale_init(void)
{
- long i;
int firsterr = 0;
+ long i;
+ long j;
static struct rcu_scale_ops *scale_ops[] = {
&rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
};
@@ -1017,14 +1151,22 @@ rcu_scale_init(void)
}
while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders)
schedule_timeout_uninterruptible(1);
- writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
- GFP_KERNEL);
- writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
- GFP_KERNEL);
- writer_n_durations =
- kcalloc(nrealwriters, sizeof(*writer_n_durations),
- GFP_KERNEL);
- if (!writer_tasks || !writer_durations || !writer_n_durations) {
+ writer_tasks = kcalloc(nrealwriters, sizeof(writer_tasks[0]), GFP_KERNEL);
+ writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations), GFP_KERNEL);
+ writer_n_durations = kcalloc(nrealwriters, sizeof(*writer_n_durations), GFP_KERNEL);
+ writer_done = kcalloc(nrealwriters, sizeof(writer_done[0]), GFP_KERNEL);
+ if (gp_async) {
+ if (gp_async_max <= 0) {
+ pr_warn("%s: gp_async_max = %d must be greater than zero.\n",
+ __func__, gp_async_max);
+ WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST));
+ firsterr = -EINVAL;
+ goto unwind;
+ }
+ writer_freelists = kcalloc(nrealwriters, sizeof(writer_freelists[0]), GFP_KERNEL);
+ }
+ if (!writer_tasks || !writer_durations || !writer_n_durations || !writer_done ||
+ (gp_async && !writer_freelists)) {
SCALEOUT_ERRSTRING("out of memory");
firsterr = -ENOMEM;
goto unwind;
@@ -1037,6 +1179,24 @@ rcu_scale_init(void)
firsterr = -ENOMEM;
goto unwind;
}
+ if (writer_freelists) {
+ struct writer_freelist *wflp = &writer_freelists[i];
+
+ init_llist_head(&wflp->ws_lhg);
+ init_llist_head(&wflp->ws_lhp);
+ wflp->ws_mblocks = kcalloc(gp_async_max, sizeof(wflp->ws_mblocks[0]),
+ GFP_KERNEL);
+ if (!wflp->ws_mblocks) {
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
+ for (j = 0; j < gp_async_max; j++) {
+ struct writer_mblock *wmbp = &wflp->ws_mblocks[j];
+
+ wmbp->wmb_wfl = wflp;
+ llist_add(&wmbp->wmb_node, &wflp->ws_lhp);
+ }
+ }
firsterr = torture_create_kthread(rcu_scale_writer, (void *)i,
writer_tasks[i]);
if (torture_init_error(firsterr))
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 08bf7c669dd3..bb75dbf5c800 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -115,6 +115,7 @@ torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s
torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
+torture_param(int, stall_cpu_repeat, 0, "Number of additional stalls after the first one.");
torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of seconds to run/halt test");
@@ -366,8 +367,6 @@ struct rcu_torture_ops {
bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2);
unsigned long (*get_gp_state)(void);
void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
- unsigned long (*get_gp_completed)(void);
- void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp);
unsigned long (*start_gp_poll)(void);
void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
bool (*poll_gp_state)(unsigned long oldstate);
@@ -375,6 +374,8 @@ struct rcu_torture_ops {
bool (*poll_need_2gp)(bool poll, bool poll_full);
void (*cond_sync)(unsigned long oldstate);
void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
+ int poll_active;
+ int poll_active_full;
call_rcu_func_t call;
void (*cb_barrier)(void);
void (*fqs)(void);
@@ -553,8 +554,6 @@ static struct rcu_torture_ops rcu_ops = {
.get_comp_state_full = get_completed_synchronize_rcu_full,
.get_gp_state = get_state_synchronize_rcu,
.get_gp_state_full = get_state_synchronize_rcu_full,
- .get_gp_completed = get_completed_synchronize_rcu,
- .get_gp_completed_full = get_completed_synchronize_rcu_full,
.start_gp_poll = start_poll_synchronize_rcu,
.start_gp_poll_full = start_poll_synchronize_rcu_full,
.poll_gp_state = poll_state_synchronize_rcu,
@@ -562,6 +561,8 @@ static struct rcu_torture_ops rcu_ops = {
.poll_need_2gp = rcu_poll_need_2gp,
.cond_sync = cond_synchronize_rcu,
.cond_sync_full = cond_synchronize_rcu_full,
+ .poll_active = NUM_ACTIVE_RCU_POLL_OLDSTATE,
+ .poll_active_full = NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE,
.get_gp_state_exp = get_state_synchronize_rcu,
.start_gp_poll_exp = start_poll_synchronize_rcu_expedited,
.start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full,
@@ -740,9 +741,12 @@ static struct rcu_torture_ops srcu_ops = {
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
.exp_sync = srcu_torture_synchronize_expedited,
+ .same_gp_state = same_state_synchronize_srcu,
+ .get_comp_state = get_completed_synchronize_srcu,
.get_gp_state = srcu_torture_get_gp_state,
.start_gp_poll = srcu_torture_start_gp_poll,
.poll_gp_state = srcu_torture_poll_gp_state,
+ .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE,
.call = srcu_torture_call,
.cb_barrier = srcu_torture_barrier,
.stats = srcu_torture_stats,
@@ -780,9 +784,12 @@ static struct rcu_torture_ops srcud_ops = {
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
.exp_sync = srcu_torture_synchronize_expedited,
+ .same_gp_state = same_state_synchronize_srcu,
+ .get_comp_state = get_completed_synchronize_srcu,
.get_gp_state = srcu_torture_get_gp_state,
.start_gp_poll = srcu_torture_start_gp_poll,
.poll_gp_state = srcu_torture_poll_gp_state,
+ .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE,
.call = srcu_torture_call,
.cb_barrier = srcu_torture_barrier,
.stats = srcu_torture_stats,
@@ -915,11 +922,6 @@ static struct rcu_torture_ops tasks_ops = {
* Definitions for rude RCU-tasks torture testing.
*/
-static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
-{
- call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
-}
-
static struct rcu_torture_ops tasks_rude_ops = {
.ttype = RCU_TASKS_RUDE_FLAVOR,
.init = rcu_sync_torture_init,
@@ -927,11 +929,8 @@ static struct rcu_torture_ops tasks_rude_ops = {
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock_trivial,
.get_gp_seq = rcu_no_completed,
- .deferred_free = rcu_tasks_rude_torture_deferred_free,
.sync = synchronize_rcu_tasks_rude,
.exp_sync = synchronize_rcu_tasks_rude,
- .call = call_rcu_tasks_rude,
- .cb_barrier = rcu_barrier_tasks_rude,
.gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
.get_gp_data = rcu_tasks_rude_get_gp_data,
.cbflood_max = 50000,
@@ -1318,6 +1317,7 @@ static void rcu_torture_write_types(void)
} else if (gp_sync && !cur_ops->sync) {
pr_alert("%s: gp_sync without primitives.\n", __func__);
}
+ pr_alert("%s: Testing %d update types.\n", __func__, nsynctypes);
}
/*
@@ -1374,17 +1374,20 @@ rcu_torture_writer(void *arg)
int i;
int idx;
int oldnice = task_nice(current);
- struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE];
+ struct rcu_gp_oldstate *rgo = NULL;
+ int rgo_size = 0;
struct rcu_torture *rp;
struct rcu_torture *old_rp;
static DEFINE_TORTURE_RANDOM(rand);
unsigned long stallsdone = jiffies;
bool stutter_waited;
- unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE];
+ unsigned long *ulo = NULL;
+ int ulo_size = 0;
// If a new stall test is added, this must be adjusted.
if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu)
- stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * HZ;
+ stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) *
+ HZ * (stall_cpu_repeat + 1);
VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
if (!can_expedite)
pr_alert("%s" TORTURE_FLAG
@@ -1401,6 +1404,16 @@ rcu_torture_writer(void *arg)
torture_kthread_stopping("rcu_torture_writer");
return 0;
}
+ if (cur_ops->poll_active > 0) {
+ ulo = kzalloc(cur_ops->poll_active * sizeof(ulo[0]), GFP_KERNEL);
+ if (!WARN_ON(!ulo))
+ ulo_size = cur_ops->poll_active;
+ }
+ if (cur_ops->poll_active_full > 0) {
+ rgo = kzalloc(cur_ops->poll_active_full * sizeof(rgo[0]), GFP_KERNEL);
+ if (!WARN_ON(!rgo))
+ rgo_size = cur_ops->poll_active_full;
+ }
do {
rcu_torture_writer_state = RTWS_FIXED_DELAY;
@@ -1437,8 +1450,8 @@ rcu_torture_writer(void *arg)
rcu_torture_writer_state_getname(),
rcu_torture_writer_state,
cookie, cur_ops->get_gp_state());
- if (cur_ops->get_gp_completed) {
- cookie = cur_ops->get_gp_completed();
+ if (cur_ops->get_comp_state) {
+ cookie = cur_ops->get_comp_state();
WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
}
cur_ops->readunlock(idx);
@@ -1452,8 +1465,8 @@ rcu_torture_writer(void *arg)
rcu_torture_writer_state_getname(),
rcu_torture_writer_state,
cpumask_pr_args(cpu_online_mask));
- if (cur_ops->get_gp_completed_full) {
- cur_ops->get_gp_completed_full(&cookie_full);
+ if (cur_ops->get_comp_state_full) {
+ cur_ops->get_comp_state_full(&cookie_full);
WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
}
cur_ops->readunlock(idx);
@@ -1502,19 +1515,19 @@ rcu_torture_writer(void *arg)
break;
case RTWS_POLL_GET:
rcu_torture_writer_state = RTWS_POLL_GET;
- for (i = 0; i < ARRAY_SIZE(ulo); i++)
+ for (i = 0; i < ulo_size; i++)
ulo[i] = cur_ops->get_comp_state();
gp_snap = cur_ops->start_gp_poll();
rcu_torture_writer_state = RTWS_POLL_WAIT;
while (!cur_ops->poll_gp_state(gp_snap)) {
gp_snap1 = cur_ops->get_gp_state();
- for (i = 0; i < ARRAY_SIZE(ulo); i++)
+ for (i = 0; i < ulo_size; i++)
if (cur_ops->poll_gp_state(ulo[i]) ||
cur_ops->same_gp_state(ulo[i], gp_snap1)) {
ulo[i] = gp_snap1;
break;
}
- WARN_ON_ONCE(i >= ARRAY_SIZE(ulo));
+ WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size);
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
&rand);
}
@@ -1522,20 +1535,20 @@ rcu_torture_writer(void *arg)
break;
case RTWS_POLL_GET_FULL:
rcu_torture_writer_state = RTWS_POLL_GET_FULL;
- for (i = 0; i < ARRAY_SIZE(rgo); i++)
+ for (i = 0; i < rgo_size; i++)
cur_ops->get_comp_state_full(&rgo[i]);
cur_ops->start_gp_poll_full(&gp_snap_full);
rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
cur_ops->get_gp_state_full(&gp_snap1_full);
- for (i = 0; i < ARRAY_SIZE(rgo); i++)
+ for (i = 0; i < rgo_size; i++)
if (cur_ops->poll_gp_state_full(&rgo[i]) ||
cur_ops->same_gp_state_full(&rgo[i],
&gp_snap1_full)) {
rgo[i] = gp_snap1_full;
break;
}
- WARN_ON_ONCE(i >= ARRAY_SIZE(rgo));
+ WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size);
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
&rand);
}
@@ -1617,6 +1630,8 @@ rcu_torture_writer(void *arg)
pr_alert("%s" TORTURE_FLAG
" Dynamic grace-period expediting was disabled.\n",
torture_type);
+ kfree(ulo);
+ kfree(rgo);
rcu_torture_writer_state = RTWS_STOPPING;
torture_kthread_stopping("rcu_torture_writer");
return 0;
@@ -2370,7 +2385,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
"test_boost=%d/%d test_boost_interval=%d "
"test_boost_duration=%d shutdown_secs=%d "
"stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
- "stall_cpu_block=%d "
+ "stall_cpu_block=%d stall_cpu_repeat=%d "
"n_barrier_cbs=%d "
"onoff_interval=%d onoff_holdoff=%d "
"read_exit_delay=%d read_exit_burst=%d "
@@ -2382,7 +2397,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
test_boost, cur_ops->can_boost,
test_boost_interval, test_boost_duration, shutdown_secs,
stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
- stall_cpu_block,
+ stall_cpu_block, stall_cpu_repeat,
n_barrier_cbs,
onoff_interval, onoff_holdoff,
read_exit_delay, read_exit_burst,
@@ -2460,19 +2475,11 @@ static struct notifier_block rcu_torture_stall_block = {
* induces a CPU stall for the time specified by stall_cpu. If a new
* stall test is added, stallsdone in rcu_torture_writer() must be adjusted.
*/
-static int rcu_torture_stall(void *args)
+static void rcu_torture_stall_one(int rep, int irqsoff)
{
int idx;
- int ret;
unsigned long stop_at;
- VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
- if (rcu_cpu_stall_notifiers) {
- ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
- if (ret)
- pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
- __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
- }
if (stall_cpu_holdoff > 0) {
VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
@@ -2492,12 +2499,12 @@ static int rcu_torture_stall(void *args)
stop_at = ktime_get_seconds() + stall_cpu;
/* RCU CPU stall is expected behavior in following code. */
idx = cur_ops->readlock();
- if (stall_cpu_irqsoff)
+ if (irqsoff)
local_irq_disable();
else if (!stall_cpu_block)
preempt_disable();
- pr_alert("%s start on CPU %d.\n",
- __func__, raw_smp_processor_id());
+ pr_alert("%s start stall episode %d on CPU %d.\n",
+ __func__, rep + 1, raw_smp_processor_id());
while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) &&
!kthread_should_stop())
if (stall_cpu_block) {
@@ -2509,12 +2516,42 @@ static int rcu_torture_stall(void *args)
} else if (stall_no_softlockup) {
touch_softlockup_watchdog();
}
- if (stall_cpu_irqsoff)
+ if (irqsoff)
local_irq_enable();
else if (!stall_cpu_block)
preempt_enable();
cur_ops->readunlock(idx);
}
+}
+
+/*
+ * CPU-stall kthread. Invokes rcu_torture_stall_one() once, and then as many
+ * additional times as specified by the stall_cpu_repeat module parameter.
+ * Note that stall_cpu_irqsoff is ignored on the second and subsequent
+ * stall.
+ */
+static int rcu_torture_stall(void *args)
+{
+ int i;
+ int repeat = stall_cpu_repeat;
+ int ret;
+
+ VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
+ if (repeat < 0) {
+ repeat = 0;
+ WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST));
+ }
+ if (rcu_cpu_stall_notifiers) {
+ ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
+ if (ret)
+ pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
+ __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
+ }
+ for (i = 0; i <= repeat; i++) {
+ if (kthread_should_stop())
+ break;
+ rcu_torture_stall_one(i, i == 0 ? stall_cpu_irqsoff : 0);
+ }
pr_alert("%s end.\n", __func__);
if (rcu_cpu_stall_notifiers && !ret) {
ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block);
@@ -2680,7 +2717,7 @@ static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
rcu_torture_fwd_prog_cond_resched(freed);
if (tick_nohz_full_enabled()) {
local_irq_save(flags);
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
local_irq_restore(flags);
}
}
@@ -2830,7 +2867,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
if (tick_nohz_full_enabled()) {
local_irq_save(flags);
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
local_irq_restore(flags);
}
}
diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
index f4ea5b1ec068..0db9db73f57f 100644
--- a/kernel/rcu/refscale.c
+++ b/kernel/rcu/refscale.c
@@ -28,6 +28,7 @@
#include <linux/rcupdate_trace.h>
#include <linux/reboot.h>
#include <linux/sched.h>
+#include <linux/seq_buf.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/stat.h>
@@ -134,7 +135,7 @@ struct ref_scale_ops {
const char *name;
};
-static struct ref_scale_ops *cur_ops;
+static const struct ref_scale_ops *cur_ops;
static void un_delay(const int udl, const int ndl)
{
@@ -170,7 +171,7 @@ static bool rcu_sync_scale_init(void)
return true;
}
-static struct ref_scale_ops rcu_ops = {
+static const struct ref_scale_ops rcu_ops = {
.init = rcu_sync_scale_init,
.readsection = ref_rcu_read_section,
.delaysection = ref_rcu_delay_section,
@@ -204,7 +205,7 @@ static void srcu_ref_scale_delay_section(const int nloops, const int udl, const
}
}
-static struct ref_scale_ops srcu_ops = {
+static const struct ref_scale_ops srcu_ops = {
.init = rcu_sync_scale_init,
.readsection = srcu_ref_scale_read_section,
.delaysection = srcu_ref_scale_delay_section,
@@ -231,7 +232,7 @@ static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, c
un_delay(udl, ndl);
}
-static struct ref_scale_ops rcu_tasks_ops = {
+static const struct ref_scale_ops rcu_tasks_ops = {
.init = rcu_sync_scale_init,
.readsection = rcu_tasks_ref_scale_read_section,
.delaysection = rcu_tasks_ref_scale_delay_section,
@@ -270,7 +271,7 @@ static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, c
}
}
-static struct ref_scale_ops rcu_trace_ops = {
+static const struct ref_scale_ops rcu_trace_ops = {
.init = rcu_sync_scale_init,
.readsection = rcu_trace_ref_scale_read_section,
.delaysection = rcu_trace_ref_scale_delay_section,
@@ -309,7 +310,7 @@ static void ref_refcnt_delay_section(const int nloops, const int udl, const int
}
}
-static struct ref_scale_ops refcnt_ops = {
+static const struct ref_scale_ops refcnt_ops = {
.init = rcu_sync_scale_init,
.readsection = ref_refcnt_section,
.delaysection = ref_refcnt_delay_section,
@@ -346,7 +347,7 @@ static void ref_rwlock_delay_section(const int nloops, const int udl, const int
}
}
-static struct ref_scale_ops rwlock_ops = {
+static const struct ref_scale_ops rwlock_ops = {
.init = ref_rwlock_init,
.readsection = ref_rwlock_section,
.delaysection = ref_rwlock_delay_section,
@@ -383,7 +384,7 @@ static void ref_rwsem_delay_section(const int nloops, const int udl, const int n
}
}
-static struct ref_scale_ops rwsem_ops = {
+static const struct ref_scale_ops rwsem_ops = {
.init = ref_rwsem_init,
.readsection = ref_rwsem_section,
.delaysection = ref_rwsem_delay_section,
@@ -418,7 +419,7 @@ static void ref_lock_delay_section(const int nloops, const int udl, const int nd
preempt_enable();
}
-static struct ref_scale_ops lock_ops = {
+static const struct ref_scale_ops lock_ops = {
.readsection = ref_lock_section,
.delaysection = ref_lock_delay_section,
.name = "lock"
@@ -453,7 +454,7 @@ static void ref_lock_irq_delay_section(const int nloops, const int udl, const in
preempt_enable();
}
-static struct ref_scale_ops lock_irq_ops = {
+static const struct ref_scale_ops lock_irq_ops = {
.readsection = ref_lock_irq_section,
.delaysection = ref_lock_irq_delay_section,
.name = "lock-irq"
@@ -489,7 +490,7 @@ static void ref_acqrel_delay_section(const int nloops, const int udl, const int
preempt_enable();
}
-static struct ref_scale_ops acqrel_ops = {
+static const struct ref_scale_ops acqrel_ops = {
.readsection = ref_acqrel_section,
.delaysection = ref_acqrel_delay_section,
.name = "acqrel"
@@ -523,7 +524,7 @@ static void ref_clock_delay_section(const int nloops, const int udl, const int n
stopopts = x;
}
-static struct ref_scale_ops clock_ops = {
+static const struct ref_scale_ops clock_ops = {
.readsection = ref_clock_section,
.delaysection = ref_clock_delay_section,
.name = "clock"
@@ -555,7 +556,7 @@ static void ref_jiffies_delay_section(const int nloops, const int udl, const int
stopopts = x;
}
-static struct ref_scale_ops jiffies_ops = {
+static const struct ref_scale_ops jiffies_ops = {
.readsection = ref_jiffies_section,
.delaysection = ref_jiffies_delay_section,
.name = "jiffies"
@@ -705,9 +706,9 @@ static void refscale_typesafe_ctor(void *rtsp_in)
preempt_enable();
}
-static struct ref_scale_ops typesafe_ref_ops;
-static struct ref_scale_ops typesafe_lock_ops;
-static struct ref_scale_ops typesafe_seqlock_ops;
+static const struct ref_scale_ops typesafe_ref_ops;
+static const struct ref_scale_ops typesafe_lock_ops;
+static const struct ref_scale_ops typesafe_seqlock_ops;
// Initialize for a typesafe test.
static bool typesafe_init(void)
@@ -768,7 +769,7 @@ static void typesafe_cleanup(void)
}
// The typesafe_init() function distinguishes these structures by address.
-static struct ref_scale_ops typesafe_ref_ops = {
+static const struct ref_scale_ops typesafe_ref_ops = {
.init = typesafe_init,
.cleanup = typesafe_cleanup,
.readsection = typesafe_read_section,
@@ -776,7 +777,7 @@ static struct ref_scale_ops typesafe_ref_ops = {
.name = "typesafe_ref"
};
-static struct ref_scale_ops typesafe_lock_ops = {
+static const struct ref_scale_ops typesafe_lock_ops = {
.init = typesafe_init,
.cleanup = typesafe_cleanup,
.readsection = typesafe_read_section,
@@ -784,7 +785,7 @@ static struct ref_scale_ops typesafe_lock_ops = {
.name = "typesafe_lock"
};
-static struct ref_scale_ops typesafe_seqlock_ops = {
+static const struct ref_scale_ops typesafe_seqlock_ops = {
.init = typesafe_init,
.cleanup = typesafe_cleanup,
.readsection = typesafe_read_section,
@@ -891,32 +892,34 @@ static u64 process_durations(int n)
{
int i;
struct reader_task *rt;
- char buf1[64];
+ struct seq_buf s;
char *buf;
u64 sum = 0;
buf = kmalloc(800 + 64, GFP_KERNEL);
if (!buf)
return 0;
- buf[0] = 0;
- sprintf(buf, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)",
- exp_idx);
+ seq_buf_init(&s, buf, 800 + 64);
+
+ seq_buf_printf(&s, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)",
+ exp_idx);
for (i = 0; i < n && !torture_must_stop(); i++) {
rt = &(reader_tasks[i]);
- sprintf(buf1, "%d: %llu\t", i, rt->last_duration_ns);
if (i % 5 == 0)
- strcat(buf, "\n");
- if (strlen(buf) >= 800) {
- pr_alert("%s", buf);
- buf[0] = 0;
+ seq_buf_putc(&s, '\n');
+
+ if (seq_buf_used(&s) >= 800) {
+ pr_alert("%s", seq_buf_str(&s));
+ seq_buf_clear(&s);
}
- strcat(buf, buf1);
+
+ seq_buf_printf(&s, "%d: %llu\t", i, rt->last_duration_ns);
sum += rt->last_duration_ns;
}
- pr_alert("%s\n", buf);
+ pr_alert("%s\n", seq_buf_str(&s));
kfree(buf);
return sum;
@@ -1023,7 +1026,7 @@ end:
}
static void
-ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag)
+ref_scale_print_module_parms(const struct ref_scale_ops *cur_ops, const char *tag)
{
pr_alert("%s" SCALE_FLAG
"--- %s: verbose=%d verbose_batched=%d shutdown=%d holdoff=%d lookup_instances=%ld loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag,
@@ -1078,7 +1081,7 @@ ref_scale_init(void)
{
long i;
int firsterr = 0;
- static struct ref_scale_ops *scale_ops[] = {
+ static const struct ref_scale_ops *scale_ops[] = {
&rcu_ops, &srcu_ops, RCU_TRACE_OPS RCU_TASKS_OPS &refcnt_ops, &rwlock_ops,
&rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops, &jiffies_ops,
&typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops,
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index b24db425f16d..31706e3293bc 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -137,6 +137,7 @@ static void init_srcu_struct_data(struct srcu_struct *ssp)
sdp->srcu_cblist_invoking = false;
sdp->srcu_gp_seq_needed = ssp->srcu_sup->srcu_gp_seq;
sdp->srcu_gp_seq_needed_exp = ssp->srcu_sup->srcu_gp_seq;
+ sdp->srcu_barrier_head.next = &sdp->srcu_barrier_head;
sdp->mynode = NULL;
sdp->cpu = cpu;
INIT_WORK(&sdp->work, srcu_invoke_callbacks);
@@ -247,7 +248,7 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
mutex_init(&ssp->srcu_sup->srcu_cb_mutex);
mutex_init(&ssp->srcu_sup->srcu_gp_mutex);
ssp->srcu_idx = 0;
- ssp->srcu_sup->srcu_gp_seq = 0;
+ ssp->srcu_sup->srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL;
ssp->srcu_sup->srcu_barrier_seq = 0;
mutex_init(&ssp->srcu_sup->srcu_barrier_mutex);
atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 0);
@@ -258,7 +259,7 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
if (!ssp->sda)
goto err_free_sup;
init_srcu_struct_data(ssp);
- ssp->srcu_sup->srcu_gp_seq_needed_exp = 0;
+ ssp->srcu_sup->srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL;
ssp->srcu_sup->srcu_last_gp_end = ktime_get_mono_fast_ns();
if (READ_ONCE(ssp->srcu_sup->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC))
@@ -266,7 +267,8 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
WRITE_ONCE(ssp->srcu_sup->srcu_size_state, SRCU_SIZE_BIG);
}
ssp->srcu_sup->srcu_ssp = ssp;
- smp_store_release(&ssp->srcu_sup->srcu_gp_seq_needed, 0); /* Init done. */
+ smp_store_release(&ssp->srcu_sup->srcu_gp_seq_needed,
+ SRCU_GP_SEQ_INITIAL_VAL); /* Init done. */
return 0;
err_free_sda:
@@ -628,6 +630,7 @@ static unsigned long srcu_get_delay(struct srcu_struct *ssp)
if (time_after(j, gpstart))
jbase += j - gpstart;
if (!jbase) {
+ ASSERT_EXCLUSIVE_WRITER(sup->srcu_n_exp_nodelay);
WRITE_ONCE(sup->srcu_n_exp_nodelay, READ_ONCE(sup->srcu_n_exp_nodelay) + 1);
if (READ_ONCE(sup->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
jbase = 1;
@@ -1560,6 +1563,7 @@ static void srcu_barrier_cb(struct rcu_head *rhp)
struct srcu_data *sdp;
struct srcu_struct *ssp;
+ rhp->next = rhp; // Mark the callback as having been invoked.
sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
ssp = sdp->ssp;
if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt))
@@ -1818,6 +1822,7 @@ static void process_srcu(struct work_struct *work)
} else {
j = jiffies;
if (READ_ONCE(sup->reschedule_jiffies) == j) {
+ ASSERT_EXCLUSIVE_WRITER(sup->reschedule_count);
WRITE_ONCE(sup->reschedule_count, READ_ONCE(sup->reschedule_count) + 1);
if (READ_ONCE(sup->reschedule_count) > srcu_max_nodelay)
curdelay = 1;
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index ba3440a45b6d..6333f4ccf024 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -34,6 +34,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
* @rtp_blkd_tasks: List of tasks blocked as readers.
* @rtp_exit_list: List of tasks in the latter portion of do_exit().
* @cpu: CPU number corresponding to this entry.
+ * @index: Index of this CPU in rtpcp_array of the rcu_tasks structure.
* @rtpp: Pointer to the rcu_tasks structure.
*/
struct rcu_tasks_percpu {
@@ -49,6 +50,7 @@ struct rcu_tasks_percpu {
struct list_head rtp_blkd_tasks;
struct list_head rtp_exit_list;
int cpu;
+ int index;
struct rcu_tasks *rtpp;
};
@@ -63,7 +65,7 @@ struct rcu_tasks_percpu {
* @init_fract: Initial backoff sleep interval.
* @gp_jiffies: Time of last @gp_state transition.
* @gp_start: Most recent grace-period start in jiffies.
- * @tasks_gp_seq: Number of grace periods completed since boot.
+ * @tasks_gp_seq: Number of grace periods completed since boot in upper bits.
* @n_ipis: Number of IPIs sent to encourage grace periods to end.
* @n_ipis_fails: Number of IPI-send failures.
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
@@ -76,6 +78,7 @@ struct rcu_tasks_percpu {
* @call_func: This flavor's call_rcu()-equivalent function.
* @wait_state: Task state for synchronous grace-period waits (default TASK_UNINTERRUPTIBLE).
* @rtpcpu: This flavor's rcu_tasks_percpu structure.
+ * @rtpcp_array: Array of pointers to rcu_tasks_percpu structure of CPUs in cpu_possible_mask.
* @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
* @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
* @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
@@ -84,6 +87,7 @@ struct rcu_tasks_percpu {
* @barrier_q_count: Number of queues being waited on.
* @barrier_q_completion: Barrier wait/wakeup mechanism.
* @barrier_q_seq: Sequence number for barrier operations.
+ * @barrier_q_start: Most recent barrier start in jiffies.
* @name: This flavor's textual name.
* @kname: This flavor's kthread name.
*/
@@ -110,6 +114,7 @@ struct rcu_tasks {
call_rcu_func_t call_func;
unsigned int wait_state;
struct rcu_tasks_percpu __percpu *rtpcpu;
+ struct rcu_tasks_percpu **rtpcp_array;
int percpu_enqueue_shift;
int percpu_enqueue_lim;
int percpu_dequeue_lim;
@@ -118,6 +123,7 @@ struct rcu_tasks {
atomic_t barrier_q_count;
struct completion barrier_q_completion;
unsigned long barrier_q_seq;
+ unsigned long barrier_q_start;
char *name;
char *kname;
};
@@ -182,6 +188,8 @@ module_param(rcu_task_collapse_lim, int, 0444);
static int rcu_task_lazy_lim __read_mostly = 32;
module_param(rcu_task_lazy_lim, int, 0444);
+static int rcu_task_cpu_ids;
+
/* RCU tasks grace-period state for debugging. */
#define RTGS_INIT 0
#define RTGS_WAIT_WAIT_CBS 1
@@ -245,6 +253,8 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
int cpu;
int lim;
int shift;
+ int maxcpu;
+ int index = 0;
if (rcu_task_enqueue_lim < 0) {
rcu_task_enqueue_lim = 1;
@@ -254,14 +264,9 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
}
lim = rcu_task_enqueue_lim;
- if (lim > nr_cpu_ids)
- lim = nr_cpu_ids;
- shift = ilog2(nr_cpu_ids / lim);
- if (((nr_cpu_ids - 1) >> shift) >= lim)
- shift++;
- WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
- WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
- smp_store_release(&rtp->percpu_enqueue_lim, lim);
+ rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL);
+ BUG_ON(!rtp->rtpcp_array);
+
for_each_possible_cpu(cpu) {
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
@@ -273,14 +278,30 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
rtpcp->cpu = cpu;
rtpcp->rtpp = rtp;
+ rtpcp->index = index;
+ rtp->rtpcp_array[index] = rtpcp;
+ index++;
if (!rtpcp->rtp_blkd_tasks.next)
INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
if (!rtpcp->rtp_exit_list.next)
INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
+ rtpcp->barrier_q_head.next = &rtpcp->barrier_q_head;
+ maxcpu = cpu;
}
- pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name,
- data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust);
+ rcu_task_cpu_ids = maxcpu + 1;
+ if (lim > rcu_task_cpu_ids)
+ lim = rcu_task_cpu_ids;
+ shift = ilog2(rcu_task_cpu_ids / lim);
+ if (((rcu_task_cpu_ids - 1) >> shift) >= lim)
+ shift++;
+ WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
+ WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
+ smp_store_release(&rtp->percpu_enqueue_lim, lim);
+
+ pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d rcu_task_cpu_ids=%d.\n",
+ rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim),
+ rcu_task_cb_adjust, rcu_task_cpu_ids);
}
// Compute wakeup time for lazy callback timer.
@@ -339,6 +360,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
rcu_read_lock();
ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
+ WARN_ON_ONCE(chosen_cpu >= rcu_task_cpu_ids);
rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
@@ -348,7 +370,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
rtpcp->rtp_n_lock_retries = 0;
}
if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
- READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
+ READ_ONCE(rtp->percpu_enqueue_lim) != rcu_task_cpu_ids)
needadjust = true; // Defer adjustment to avoid deadlock.
}
// Queuing callbacks before initialization not yet supported.
@@ -368,10 +390,10 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
if (unlikely(needadjust)) {
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
- if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
+ if (rtp->percpu_enqueue_lim != rcu_task_cpu_ids) {
WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
- WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
- smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
+ WRITE_ONCE(rtp->percpu_dequeue_lim, rcu_task_cpu_ids);
+ smp_store_release(&rtp->percpu_enqueue_lim, rcu_task_cpu_ids);
pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
}
raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
@@ -388,6 +410,7 @@ static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
struct rcu_tasks *rtp;
struct rcu_tasks_percpu *rtpcp;
+ rhp->next = rhp; // Mark the callback as having been invoked.
rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
rtp = rtpcp->rtpp;
if (atomic_dec_and_test(&rtp->barrier_q_count))
@@ -396,7 +419,7 @@ static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
// Wait for all in-flight callbacks for the specified RCU Tasks flavor.
// Operates in a manner similar to rcu_barrier().
-static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
+static void __maybe_unused rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
{
int cpu;
unsigned long flags;
@@ -409,6 +432,7 @@ static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
mutex_unlock(&rtp->barrier_q_mutex);
return;
}
+ rtp->barrier_q_start = jiffies;
rcu_seq_start(&rtp->barrier_q_seq);
init_completion(&rtp->barrier_q_completion);
atomic_set(&rtp->barrier_q_count, 2);
@@ -444,6 +468,8 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
for (cpu = 0; cpu < dequeue_limit; cpu++) {
+ if (!cpu_possible(cpu))
+ continue;
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
/* Advance and accelerate any new callbacks. */
@@ -481,7 +507,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
if (rtp->percpu_enqueue_lim > 1) {
- WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
+ WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(rcu_task_cpu_ids));
smp_store_release(&rtp->percpu_enqueue_lim, 1);
rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
gpdone = false;
@@ -496,7 +522,9 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
}
if (rtp->percpu_dequeue_lim == 1) {
- for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
+ for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) {
+ if (!cpu_possible(cpu))
+ continue;
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
@@ -511,30 +539,32 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
// Advance callbacks and invoke any that are ready.
static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
{
- int cpu;
- int cpunext;
int cpuwq;
unsigned long flags;
int len;
+ int index;
struct rcu_head *rhp;
struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
struct rcu_tasks_percpu *rtpcp_next;
- cpu = rtpcp->cpu;
- cpunext = cpu * 2 + 1;
- if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
- rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
- cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
- queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
- cpunext++;
- if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
- rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
- cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
+ index = rtpcp->index * 2 + 1;
+ if (index < num_possible_cpus()) {
+ rtpcp_next = rtp->rtpcp_array[index];
+ if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
+ cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
+ index++;
+ if (index < num_possible_cpus()) {
+ rtpcp_next = rtp->rtpcp_array[index];
+ if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
+ cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
+ queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
+ }
+ }
}
}
- if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
+ if (rcu_segcblist_empty(&rtpcp->cblist))
return;
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
@@ -687,9 +717,7 @@ static void __init rcu_tasks_bootup_oddness(void)
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
}
-#endif /* #ifndef CONFIG_TINY_RCU */
-#ifndef CONFIG_TINY_RCU
/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
{
@@ -723,6 +751,53 @@ static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
rtp->lazy_jiffies,
s);
}
+
+/* Dump out more rcutorture-relevant state common to all RCU-tasks flavors. */
+static void rcu_tasks_torture_stats_print_generic(struct rcu_tasks *rtp, char *tt,
+ char *tf, char *tst)
+{
+ cpumask_var_t cm;
+ int cpu;
+ bool gotcb = false;
+ unsigned long j = jiffies;
+
+ pr_alert("%s%s Tasks%s RCU g%ld gp_start %lu gp_jiffies %lu gp_state %d (%s).\n",
+ tt, tf, tst, data_race(rtp->tasks_gp_seq),
+ j - data_race(rtp->gp_start), j - data_race(rtp->gp_jiffies),
+ data_race(rtp->gp_state), tasks_gp_state_getname(rtp));
+ pr_alert("\tEnqueue shift %d limit %d Dequeue limit %d gpseq %lu.\n",
+ data_race(rtp->percpu_enqueue_shift),
+ data_race(rtp->percpu_enqueue_lim),
+ data_race(rtp->percpu_dequeue_lim),
+ data_race(rtp->percpu_dequeue_gpseq));
+ (void)zalloc_cpumask_var(&cm, GFP_KERNEL);
+ pr_alert("\tCallback counts:");
+ for_each_possible_cpu(cpu) {
+ long n;
+ struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
+
+ if (cpumask_available(cm) && !rcu_barrier_cb_is_done(&rtpcp->barrier_q_head))
+ cpumask_set_cpu(cpu, cm);
+ n = rcu_segcblist_n_cbs(&rtpcp->cblist);
+ if (!n)
+ continue;
+ pr_cont(" %d:%ld", cpu, n);
+ gotcb = true;
+ }
+ if (gotcb)
+ pr_cont(".\n");
+ else
+ pr_cont(" (none).\n");
+ pr_alert("\tBarrier seq %lu start %lu count %d holdout CPUs ",
+ data_race(rtp->barrier_q_seq), j - data_race(rtp->barrier_q_start),
+ atomic_read(&rtp->barrier_q_count));
+ if (cpumask_available(cm) && !cpumask_empty(cm))
+ pr_cont(" %*pbl.\n", cpumask_pr_args(cm));
+ else
+ pr_cont("(none).\n");
+ free_cpumask_var(cm);
+}
+
#endif // #ifndef CONFIG_TINY_RCU
static void exit_tasks_rcu_finish_trace(struct task_struct *t);
@@ -1174,6 +1249,12 @@ void show_rcu_tasks_classic_gp_kthread(void)
show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
}
EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
+
+void rcu_tasks_torture_stats_print(char *tt, char *tf)
+{
+ rcu_tasks_torture_stats_print_generic(&rcu_tasks, tt, tf, "");
+}
+EXPORT_SYMBOL_GPL(rcu_tasks_torture_stats_print);
#endif // !defined(CONFIG_TINY_RCU)
struct task_struct *get_rcu_tasks_gp_kthread(void)
@@ -1244,13 +1325,12 @@ void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
////////////////////////////////////////////////////////////////////////
//
-// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
-// passing an empty function to schedule_on_each_cpu(). This approach
-// provides an asynchronous call_rcu_tasks_rude() API and batching of
-// concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
-// This invokes schedule_on_each_cpu() in order to send IPIs far and wide
-// and induces otherwise unnecessary context switches on all online CPUs,
-// whether idle or not.
+// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's
+// trick of passing an empty function to schedule_on_each_cpu().
+// This approach provides batching of concurrent calls to the synchronous
+// synchronize_rcu_tasks_rude() API. This invokes schedule_on_each_cpu()
+// in order to send IPIs far and wide and induces otherwise unnecessary
+// context switches on all online CPUs, whether idle or not.
//
// Callback handling is provided by the rcu_tasks_kthread() function.
//
@@ -1268,11 +1348,11 @@ static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
schedule_on_each_cpu(rcu_tasks_be_rude);
}
-void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
+static void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
"RCU Tasks Rude");
-/**
+/*
* call_rcu_tasks_rude() - Queue a callback rude task-based grace period
* @rhp: structure to be used for queueing the RCU updates.
* @func: actual callback function to be invoked after the grace period
@@ -1289,12 +1369,14 @@ DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
*
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
+ *
+ * This is no longer exported, and is instead reserved for use by
+ * synchronize_rcu_tasks_rude().
*/
-void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
+static void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
{
call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
}
-EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
/**
* synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
@@ -1320,26 +1402,9 @@ void synchronize_rcu_tasks_rude(void)
}
EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
-/**
- * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
- *
- * Although the current implementation is guaranteed to wait, it is not
- * obligated to, for example, if there are no pending callbacks.
- */
-void rcu_barrier_tasks_rude(void)
-{
- rcu_barrier_tasks_generic(&rcu_tasks_rude);
-}
-EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
-
-int rcu_tasks_rude_lazy_ms = -1;
-module_param(rcu_tasks_rude_lazy_ms, int, 0444);
-
static int __init rcu_spawn_tasks_rude_kthread(void)
{
rcu_tasks_rude.gp_sleep = HZ / 10;
- if (rcu_tasks_rude_lazy_ms >= 0)
- rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms);
rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
return 0;
}
@@ -1350,6 +1415,12 @@ void show_rcu_tasks_rude_gp_kthread(void)
show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
}
EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
+
+void rcu_tasks_rude_torture_stats_print(char *tt, char *tf)
+{
+ rcu_tasks_torture_stats_print_generic(&rcu_tasks_rude, tt, tf, "");
+}
+EXPORT_SYMBOL_GPL(rcu_tasks_rude_torture_stats_print);
#endif // !defined(CONFIG_TINY_RCU)
struct task_struct *get_rcu_tasks_rude_gp_kthread(void)
@@ -1613,7 +1684,7 @@ static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
// However, we cannot safely change its state.
n_heavy_reader_attempts++;
// Check for "running" idle tasks on offline CPUs.
- if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
+ if (!rcu_watching_zero_in_eqs(cpu, &t->trc_reader_nesting))
return -EINVAL; // No quiescent state, do it the hard way.
n_heavy_reader_updates++;
nesting = 0;
@@ -2027,6 +2098,12 @@ void show_rcu_tasks_trace_gp_kthread(void)
show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
}
EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
+
+void rcu_tasks_trace_torture_stats_print(char *tt, char *tf)
+{
+ rcu_tasks_torture_stats_print_generic(&rcu_tasks_trace, tt, tf, "");
+}
+EXPORT_SYMBOL_GPL(rcu_tasks_trace_torture_stats_print);
#endif // !defined(CONFIG_TINY_RCU)
struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
@@ -2070,17 +2147,13 @@ static struct rcu_tasks_test_desc tests[] = {
.notrun = IS_ENABLED(CONFIG_TASKS_RCU),
},
{
- .name = "call_rcu_tasks_rude()",
- /* If not defined, the test is skipped. */
- .notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
- },
- {
.name = "call_rcu_tasks_trace()",
/* If not defined, the test is skipped. */
.notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
}
};
+#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
static void test_rcu_tasks_callback(struct rcu_head *rhp)
{
struct rcu_tasks_test_desc *rttd =
@@ -2090,6 +2163,7 @@ static void test_rcu_tasks_callback(struct rcu_head *rhp)
rttd->notrun = false;
}
+#endif // #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
static void rcu_tasks_initiate_self_tests(void)
{
@@ -2102,16 +2176,14 @@ static void rcu_tasks_initiate_self_tests(void)
#ifdef CONFIG_TASKS_RUDE_RCU
pr_info("Running RCU Tasks Rude wait API self tests\n");
- tests[1].runstart = jiffies;
synchronize_rcu_tasks_rude();
- call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
#endif
#ifdef CONFIG_TASKS_TRACE_RCU
pr_info("Running RCU Tasks Trace wait API self tests\n");
- tests[2].runstart = jiffies;
+ tests[1].runstart = jiffies;
synchronize_rcu_tasks_trace();
- call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
+ call_rcu_tasks_trace(&tests[1].rh, test_rcu_tasks_callback);
#endif
}
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 4402d6f5f857..b3b3ce34df63 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -105,7 +105,7 @@ static inline bool rcu_reclaim_tiny(struct rcu_head *head)
}
/* Invoke the RCU callbacks whose grace period has elapsed. */
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
+static __latent_entropy void rcu_process_callbacks(void)
{
struct rcu_head *next, *list;
unsigned long flags;
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e641cc681901..a60616e69b66 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -79,9 +79,6 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *);
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
.gpwrap = true,
-#ifdef CONFIG_RCU_NOCB_CPU
- .cblist.flags = SEGCBLIST_RCU_CORE,
-#endif
};
static struct rcu_state rcu_state = {
.level = { &rcu_state.node[0] },
@@ -97,6 +94,9 @@ static struct rcu_state rcu_state = {
.srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
rcu_sr_normal_gp_cleanup_work),
.srs_cleanups_pending = ATOMIC_INIT(0),
+#ifdef CONFIG_RCU_NOCB_CPU
+ .nocb_mutex = __MUTEX_INITIALIZER(rcu_state.nocb_mutex),
+#endif
};
/* Dump rcu_node combining tree at boot to verify correct setup. */
@@ -283,37 +283,45 @@ void rcu_softirq_qs(void)
}
/*
- * Reset the current CPU's ->dynticks counter to indicate that the
+ * Reset the current CPU's RCU_WATCHING counter to indicate that the
* newly onlined CPU is no longer in an extended quiescent state.
* This will either leave the counter unchanged, or increment it
* to the next non-quiescent value.
*
* The non-atomic test/increment sequence works because the upper bits
- * of the ->dynticks counter are manipulated only by the corresponding CPU,
+ * of the ->state variable are manipulated only by the corresponding CPU,
* or when the corresponding CPU is offline.
*/
-static void rcu_dynticks_eqs_online(void)
+static void rcu_watching_online(void)
{
- if (ct_dynticks() & RCU_DYNTICKS_IDX)
+ if (ct_rcu_watching() & CT_RCU_WATCHING)
return;
- ct_state_inc(RCU_DYNTICKS_IDX);
+ ct_state_inc(CT_RCU_WATCHING);
}
/*
- * Return true if the snapshot returned from rcu_dynticks_snap()
+ * Return true if the snapshot returned from ct_rcu_watching()
* indicates that RCU is in an extended quiescent state.
*/
-static bool rcu_dynticks_in_eqs(int snap)
+static bool rcu_watching_snap_in_eqs(int snap)
{
- return !(snap & RCU_DYNTICKS_IDX);
+ return !(snap & CT_RCU_WATCHING);
}
-/*
- * Return true if the CPU corresponding to the specified rcu_data
- * structure has spent some time in an extended quiescent state since
- * rcu_dynticks_snap() returned the specified snapshot.
+/**
+ * rcu_watching_snap_stopped_since() - Has RCU stopped watching a given CPU
+ * since the specified @snap?
+ *
+ * @rdp: The rcu_data corresponding to the CPU for which to check EQS.
+ * @snap: rcu_watching snapshot taken when the CPU wasn't in an EQS.
+ *
+ * Returns true if the CPU corresponding to @rdp has spent some time in an
+ * extended quiescent state since @snap. Note that this doesn't check if it
+ * /still/ is in an EQS, just that it went through one since @snap.
+ *
+ * This is meant to be used in a loop waiting for a CPU to go through an EQS.
*/
-static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
+static bool rcu_watching_snap_stopped_since(struct rcu_data *rdp, int snap)
{
/*
* The first failing snapshot is already ordered against the accesses
@@ -323,26 +331,29 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
* performed by the remote CPU prior to entering idle and therefore can
* rely solely on acquire semantics.
*/
- return snap != ct_dynticks_cpu_acquire(rdp->cpu);
+ if (WARN_ON_ONCE(rcu_watching_snap_in_eqs(snap)))
+ return true;
+
+ return snap != ct_rcu_watching_cpu_acquire(rdp->cpu);
}
/*
* Return true if the referenced integer is zero while the specified
* CPU remains within a single extended quiescent state.
*/
-bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
+bool rcu_watching_zero_in_eqs(int cpu, int *vp)
{
int snap;
// If not quiescent, force back to earlier extended quiescent state.
- snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX;
- smp_rmb(); // Order ->dynticks and *vp reads.
+ snap = ct_rcu_watching_cpu(cpu) & ~CT_RCU_WATCHING;
+ smp_rmb(); // Order CT state and *vp reads.
if (READ_ONCE(*vp))
return false; // Non-zero, so report failure;
- smp_rmb(); // Order *vp read and ->dynticks re-read.
+ smp_rmb(); // Order *vp read and CT state re-read.
// If still in the same extended quiescent state, we are good!
- return snap == ct_dynticks_cpu(cpu);
+ return snap == ct_rcu_watching_cpu(cpu);
}
/*
@@ -356,17 +367,17 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
*
* The caller must have disabled interrupts and must not be idle.
*/
-notrace void rcu_momentary_dyntick_idle(void)
+notrace void rcu_momentary_eqs(void)
{
int seq;
raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
- seq = ct_state_inc(2 * RCU_DYNTICKS_IDX);
+ seq = ct_state_inc(2 * CT_RCU_WATCHING);
/* It is illegal to call this from idle state. */
- WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX));
+ WARN_ON_ONCE(!(seq & CT_RCU_WATCHING));
rcu_preempt_deferred_qs(current);
}
-EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
+EXPORT_SYMBOL_GPL(rcu_momentary_eqs);
/**
* rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
@@ -388,13 +399,13 @@ static int rcu_is_cpu_rrupt_from_idle(void)
lockdep_assert_irqs_disabled();
/* Check for counter underflows */
- RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0,
- "RCU dynticks_nesting counter underflow!");
- RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0,
- "RCU dynticks_nmi_nesting counter underflow/zero!");
+ RCU_LOCKDEP_WARN(ct_nesting() < 0,
+ "RCU nesting counter underflow!");
+ RCU_LOCKDEP_WARN(ct_nmi_nesting() <= 0,
+ "RCU nmi_nesting counter underflow/zero!");
/* Are we at first interrupt nesting level? */
- nesting = ct_dynticks_nmi_nesting();
+ nesting = ct_nmi_nesting();
if (nesting > 1)
return false;
@@ -404,7 +415,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
WARN_ON_ONCE(!nesting && !is_idle_task(current));
/* Does CPU appear to be idle from an RCU standpoint? */
- return ct_dynticks_nesting() == 0;
+ return ct_nesting() == 0;
}
#define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
@@ -596,12 +607,12 @@ void rcu_irq_exit_check_preempt(void)
{
lockdep_assert_irqs_disabled();
- RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0,
- "RCU dynticks_nesting counter underflow/zero!");
- RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() !=
- DYNTICK_IRQ_NONIDLE,
- "Bad RCU dynticks_nmi_nesting counter\n");
- RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
+ RCU_LOCKDEP_WARN(ct_nesting() <= 0,
+ "RCU nesting counter underflow/zero!");
+ RCU_LOCKDEP_WARN(ct_nmi_nesting() !=
+ CT_NESTING_IRQ_NONIDLE,
+ "Bad RCU nmi_nesting counter\n");
+ RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(),
"RCU in extended quiescent state!");
}
#endif /* #ifdef CONFIG_PROVE_RCU */
@@ -641,7 +652,7 @@ void __rcu_irq_enter_check_tick(void)
if (in_nmi())
return;
- RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
+ RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(),
"Illegal rcu_irq_enter_check_tick() from extended quiescent state");
if (!tick_nohz_full_cpu(rdp->cpu) ||
@@ -723,7 +734,7 @@ notrace bool rcu_is_watching(void)
bool ret;
preempt_disable_notrace();
- ret = !rcu_dynticks_curr_cpu_in_eqs();
+ ret = rcu_is_watching_curr_cpu();
preempt_enable_notrace();
return ret;
}
@@ -765,11 +776,11 @@ static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
}
/*
- * Snapshot the specified CPU's dynticks counter so that we can later
+ * Snapshot the specified CPU's RCU_WATCHING counter so that we can later
* credit them with an implicit quiescent state. Return 1 if this CPU
* is in dynticks idle mode, which is an extended quiescent state.
*/
-static int dyntick_save_progress_counter(struct rcu_data *rdp)
+static int rcu_watching_snap_save(struct rcu_data *rdp)
{
/*
* Full ordering between remote CPU's post idle accesses and updater's
@@ -782,8 +793,8 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
* Ordering between remote CPU's pre idle accesses and post grace period
* updater's accesses is enforced by the below acquire semantic.
*/
- rdp->dynticks_snap = ct_dynticks_cpu_acquire(rdp->cpu);
- if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
+ rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu);
+ if (rcu_watching_snap_in_eqs(rdp->watching_snap)) {
trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rcu_gpnum_ovf(rdp->mynode, rdp);
return 1;
@@ -794,14 +805,14 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
/*
* Returns positive if the specified CPU has passed through a quiescent state
* by virtue of being in or having passed through an dynticks idle state since
- * the last call to dyntick_save_progress_counter() for this same CPU, or by
+ * the last call to rcu_watching_snap_save() for this same CPU, or by
* virtue of having been offline.
*
* Returns negative if the specified CPU needs a force resched.
*
* Returns zero otherwise.
*/
-static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+static int rcu_watching_snap_recheck(struct rcu_data *rdp)
{
unsigned long jtsq;
int ret = 0;
@@ -815,7 +826,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
* read-side critical section that started before the beginning
* of the current RCU grace period.
*/
- if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
+ if (rcu_watching_snap_stopped_since(rdp, rdp->watching_snap)) {
trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rcu_gpnum_ovf(rnp, rdp);
return 1;
@@ -1649,7 +1660,7 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
* the done tail list manipulations are protected here.
*/
done = smp_load_acquire(&rcu_state.srs_done_tail);
- if (!done)
+ if (WARN_ON_ONCE(!done))
return;
WARN_ON_ONCE(!rcu_sr_is_wait_head(done));
@@ -1984,10 +1995,10 @@ static void rcu_gp_fqs(bool first_time)
if (first_time) {
/* Collect dyntick-idle snapshots. */
- force_qs_rnp(dyntick_save_progress_counter);
+ force_qs_rnp(rcu_watching_snap_save);
} else {
/* Handle dyntick-idle and offline CPUs. */
- force_qs_rnp(rcu_implicit_dynticks_qs);
+ force_qs_rnp(rcu_watching_snap_recheck);
}
/* Clear flag to prevent immediate re-entry. */
if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
@@ -2383,7 +2394,6 @@ rcu_report_qs_rdp(struct rcu_data *rdp)
{
unsigned long flags;
unsigned long mask;
- bool needacc = false;
struct rcu_node *rnp;
WARN_ON_ONCE(rdp->cpu != smp_processor_id());
@@ -2420,23 +2430,11 @@ rcu_report_qs_rdp(struct rcu_data *rdp)
* to return true. So complain, but don't awaken.
*/
WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp));
- } else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
- /*
- * ...but NOCB kthreads may miss or delay callbacks acceleration
- * if in the middle of a (de-)offloading process.
- */
- needacc = true;
}
rcu_disable_urgency_upon_qs(rdp);
rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
/* ^^^ Released rnp->lock */
-
- if (needacc) {
- rcu_nocb_lock_irqsave(rdp, flags);
- rcu_accelerate_cbs_unlocked(rnp, rdp);
- rcu_nocb_unlock_irqrestore(rdp, flags);
- }
}
}
@@ -2791,24 +2789,6 @@ static __latent_entropy void rcu_core(void)
unsigned long flags;
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
struct rcu_node *rnp = rdp->mynode;
- /*
- * On RT rcu_core() can be preempted when IRQs aren't disabled.
- * Therefore this function can race with concurrent NOCB (de-)offloading
- * on this CPU and the below condition must be considered volatile.
- * However if we race with:
- *
- * _ Offloading: In the worst case we accelerate or process callbacks
- * concurrently with NOCB kthreads. We are guaranteed to
- * call rcu_nocb_lock() if that happens.
- *
- * _ Deoffloading: In the worst case we miss callbacks acceleration or
- * processing. This is fine because the early stage
- * of deoffloading invokes rcu_core() after setting
- * SEGCBLIST_RCU_CORE. So we guarantee that we'll process
- * what could have been dismissed without the need to wait
- * for the next rcu_pending() check in the next jiffy.
- */
- const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
if (cpu_is_offline(smp_processor_id()))
return;
@@ -2828,17 +2808,17 @@ static __latent_entropy void rcu_core(void)
/* No grace period and unregistered callbacks? */
if (!rcu_gp_in_progress() &&
- rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
- rcu_nocb_lock_irqsave(rdp, flags);
+ rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) {
+ local_irq_save(flags);
if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
rcu_accelerate_cbs_unlocked(rnp, rdp);
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ local_irq_restore(flags);
}
rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
/* If there are callbacks ready, invoke them. */
- if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
+ if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) &&
likely(READ_ONCE(rcu_scheduler_fully_active))) {
rcu_do_batch(rdp);
/* Re-invoke RCU core processing if there are callbacks remaining. */
@@ -2855,7 +2835,7 @@ static __latent_entropy void rcu_core(void)
queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
}
-static void rcu_core_si(struct softirq_action *h)
+static void rcu_core_si(void)
{
rcu_core();
}
@@ -3227,7 +3207,7 @@ struct kvfree_rcu_bulk_data {
struct list_head list;
struct rcu_gp_oldstate gp_snap;
unsigned long nr_records;
- void *records[];
+ void *records[] __counted_by(nr_records);
};
/*
@@ -3539,10 +3519,10 @@ schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
if (delayed_work_pending(&krcp->monitor_work)) {
delay_left = krcp->monitor_work.timer.expires - jiffies;
if (delay < delay_left)
- mod_delayed_work(system_wq, &krcp->monitor_work, delay);
+ mod_delayed_work(system_unbound_wq, &krcp->monitor_work, delay);
return;
}
- queue_delayed_work(system_wq, &krcp->monitor_work, delay);
+ queue_delayed_work(system_unbound_wq, &krcp->monitor_work, delay);
}
static void
@@ -3584,18 +3564,15 @@ kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
}
/*
- * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
+ * Return: %true if a work is queued, %false otherwise.
*/
-static void kfree_rcu_monitor(struct work_struct *work)
+static bool
+kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp)
{
- struct kfree_rcu_cpu *krcp = container_of(work,
- struct kfree_rcu_cpu, monitor_work.work);
unsigned long flags;
+ bool queued = false;
int i, j;
- // Drain ready for reclaim.
- kvfree_rcu_drain_ready(krcp);
-
raw_spin_lock_irqsave(&krcp->lock, flags);
// Attempt to start a new batch.
@@ -3634,11 +3611,27 @@ static void kfree_rcu_monitor(struct work_struct *work)
// be that the work is in the pending state when
// channels have been detached following by each
// other.
- queue_rcu_work(system_wq, &krwp->rcu_work);
+ queued = queue_rcu_work(system_unbound_wq, &krwp->rcu_work);
}
}
raw_spin_unlock_irqrestore(&krcp->lock, flags);
+ return queued;
+}
+
+/*
+ * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
+ */
+static void kfree_rcu_monitor(struct work_struct *work)
+{
+ struct kfree_rcu_cpu *krcp = container_of(work,
+ struct kfree_rcu_cpu, monitor_work.work);
+
+ // Drain ready for reclaim.
+ kvfree_rcu_drain_ready(krcp);
+
+ // Queue a batch for a rest.
+ kvfree_rcu_queue_batch(krcp);
// If there is nothing to detach, it means that our job is
// successfully done here. In case of having at least one
@@ -3704,7 +3697,7 @@ run_page_cache_worker(struct kfree_rcu_cpu *krcp)
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
!atomic_xchg(&krcp->work_in_progress, 1)) {
if (atomic_read(&krcp->backoff_page_cache_fill)) {
- queue_delayed_work(system_wq,
+ queue_delayed_work(system_unbound_wq,
&krcp->page_cache_work,
msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
} else {
@@ -3767,7 +3760,8 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
}
// Finally insert and update the GP for this page.
- bnode->records[bnode->nr_records++] = ptr;
+ bnode->nr_records++;
+ bnode->records[bnode->nr_records - 1] = ptr;
get_state_synchronize_rcu_full(&bnode->gp_snap);
atomic_inc(&(*krcp)->bulk_count[idx]);
@@ -3859,6 +3853,86 @@ unlock_return:
}
EXPORT_SYMBOL_GPL(kvfree_call_rcu);
+/**
+ * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
+ *
+ * Note that a single argument of kvfree_rcu() call has a slow path that
+ * triggers synchronize_rcu() following by freeing a pointer. It is done
+ * before the return from the function. Therefore for any single-argument
+ * call that will result in a kfree() to a cache that is to be destroyed
+ * during module exit, it is developer's responsibility to ensure that all
+ * such calls have returned before the call to kmem_cache_destroy().
+ */
+void kvfree_rcu_barrier(void)
+{
+ struct kfree_rcu_cpu_work *krwp;
+ struct kfree_rcu_cpu *krcp;
+ bool queued;
+ int i, cpu;
+
+ /*
+ * Firstly we detach objects and queue them over an RCU-batch
+ * for all CPUs. Finally queued works are flushed for each CPU.
+ *
+ * Please note. If there are outstanding batches for a particular
+ * CPU, those have to be finished first following by queuing a new.
+ */
+ for_each_possible_cpu(cpu) {
+ krcp = per_cpu_ptr(&krc, cpu);
+
+ /*
+ * Check if this CPU has any objects which have been queued for a
+ * new GP completion. If not(means nothing to detach), we are done
+ * with it. If any batch is pending/running for this "krcp", below
+ * per-cpu flush_rcu_work() waits its completion(see last step).
+ */
+ if (!need_offload_krc(krcp))
+ continue;
+
+ while (1) {
+ /*
+ * If we are not able to queue a new RCU work it means:
+ * - batches for this CPU are still in flight which should
+ * be flushed first and then repeat;
+ * - no objects to detach, because of concurrency.
+ */
+ queued = kvfree_rcu_queue_batch(krcp);
+
+ /*
+ * Bail out, if there is no need to offload this "krcp"
+ * anymore. As noted earlier it can run concurrently.
+ */
+ if (queued || !need_offload_krc(krcp))
+ break;
+
+ /* There are ongoing batches. */
+ for (i = 0; i < KFREE_N_BATCHES; i++) {
+ krwp = &(krcp->krw_arr[i]);
+ flush_rcu_work(&krwp->rcu_work);
+ }
+ }
+ }
+
+ /*
+ * Now we guarantee that all objects are flushed.
+ */
+ for_each_possible_cpu(cpu) {
+ krcp = per_cpu_ptr(&krc, cpu);
+
+ /*
+ * A monitor work can drain ready to reclaim objects
+ * directly. Wait its completion if running or pending.
+ */
+ cancel_delayed_work_sync(&krcp->monitor_work);
+
+ for (i = 0; i < KFREE_N_BATCHES; i++) {
+ krwp = &(krcp->krw_arr[i]);
+ flush_rcu_work(&krwp->rcu_work);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
+
static unsigned long
kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
@@ -4403,6 +4477,7 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
{
unsigned long __maybe_unused s = rcu_state.barrier_sequence;
+ rhp->next = rhp; // Mark the callback as having been invoked.
if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
rcu_barrier_trace(TPS("LastCB"), -1, s);
complete(&rcu_state.barrier_completion);
@@ -4804,8 +4879,8 @@ rcu_boot_init_percpu_data(int cpu)
/* Set up local state, ensuring consistent view of global state. */
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
INIT_WORK(&rdp->strict_work, strict_work_handler);
- WARN_ON_ONCE(ct->dynticks_nesting != 1);
- WARN_ON_ONCE(rcu_dynticks_in_eqs(ct_dynticks_cpu(cpu)));
+ WARN_ON_ONCE(ct->nesting != 1);
+ WARN_ON_ONCE(rcu_watching_snap_in_eqs(ct_rcu_watching_cpu(cpu)));
rdp->barrier_seq_snap = rcu_state.barrier_sequence;
rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
rdp->rcu_ofl_gp_state = RCU_GP_CLEANED;
@@ -4898,7 +4973,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
rdp->blimit = blimit;
- ct->dynticks_nesting = 1; /* CPU not up, no tearing. */
+ ct->nesting = 1; /* CPU not up, no tearing. */
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
/*
@@ -5058,7 +5133,7 @@ void rcutree_report_cpu_starting(unsigned int cpu)
rnp = rdp->mynode;
mask = rdp->grpmask;
arch_spin_lock(&rcu_state.ofl_lock);
- rcu_dynticks_eqs_online();
+ rcu_watching_online();
raw_spin_lock(&rcu_state.barrier_lock);
raw_spin_lock_rcu_node(rnp);
WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
@@ -5424,6 +5499,8 @@ static void __init rcu_init_one(void)
while (i > rnp->grphi)
rnp++;
per_cpu_ptr(&rcu_data, i)->mynode = rnp;
+ per_cpu_ptr(&rcu_data, i)->barrier_head.next =
+ &per_cpu_ptr(&rcu_data, i)->barrier_head;
rcu_boot_init_percpu_data(i);
}
}
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index fcf2b4aa3441..a9a811d9d7a3 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -206,7 +206,7 @@ struct rcu_data {
long blimit; /* Upper limit on a processed batch */
/* 3) dynticks interface. */
- int dynticks_snap; /* Per-GP tracking for dynticks. */
+ int watching_snap; /* Per-GP tracking for dynticks. */
bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
bool rcu_urgent_qs; /* GP old need light quiescent state. */
bool rcu_forced_tick; /* Forced tick to provide QS. */
@@ -215,7 +215,7 @@ struct rcu_data {
/* 4) rcu_barrier(), OOM callbacks, and expediting. */
unsigned long barrier_seq_snap; /* Snap of rcu_state.barrier_sequence. */
struct rcu_head barrier_head;
- int exp_dynticks_snap; /* Double-check need for IPI. */
+ int exp_watching_snap; /* Double-check need for IPI. */
/* 5) Callback offloading. */
#ifdef CONFIG_RCU_NOCB_CPU
@@ -411,7 +411,6 @@ struct rcu_state {
arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
/* Synchronize offline with */
/* GP pre-initialization. */
- int nocb_is_setup; /* nocb is setup from boot */
/* synchronize_rcu() part. */
struct llist_head srs_next; /* request a GP users. */
@@ -420,6 +419,11 @@ struct rcu_state {
struct sr_wait_node srs_wait_nodes[SR_NORMAL_GP_WAIT_HEAD_MAX];
struct work_struct srs_cleanup_work;
atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */
+
+#ifdef CONFIG_RCU_NOCB_CPU
+ struct mutex nocb_mutex; /* Guards (de-)offloading */
+ int nocb_is_setup; /* nocb is setup from boot */
+#endif
};
/* Values for rcu_state structure's gp_flags field. */
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 4acd29d16fdb..fb664d3a01c9 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -7,6 +7,7 @@
* Authors: Paul E. McKenney <paulmck@linux.ibm.com>
*/
+#include <linux/console.h>
#include <linux/lockdep.h>
static void rcu_exp_handler(void *unused);
@@ -376,11 +377,11 @@ static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
* post grace period updater's accesses is enforced by the
* below acquire semantic.
*/
- snap = ct_dynticks_cpu_acquire(cpu);
- if (rcu_dynticks_in_eqs(snap))
+ snap = ct_rcu_watching_cpu_acquire(cpu);
+ if (rcu_watching_snap_in_eqs(snap))
mask_ofl_test |= mask;
else
- rdp->exp_dynticks_snap = snap;
+ rdp->exp_watching_snap = snap;
}
}
mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
@@ -400,7 +401,7 @@ static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
unsigned long mask = rdp->grpmask;
retry_ipi:
- if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
+ if (rcu_watching_snap_stopped_since(rdp, rdp->exp_watching_snap)) {
mask_ofl_test |= mask;
continue;
}
@@ -543,6 +544,67 @@ static bool synchronize_rcu_expedited_wait_once(long tlimit)
}
/*
+ * Print out an expedited RCU CPU stall warning message.
+ */
+static void synchronize_rcu_expedited_stall(unsigned long jiffies_start, unsigned long j)
+{
+ int cpu;
+ unsigned long mask;
+ int ndetected;
+ struct rcu_node *rnp;
+ struct rcu_node *rnp_root = rcu_get_root();
+
+ if (READ_ONCE(csd_lock_suppress_rcu_stall) && csd_lock_is_stuck()) {
+ pr_err("INFO: %s detected expedited stalls, but suppressed full report due to a stuck CSD-lock.\n", rcu_state.name);
+ return;
+ }
+ pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", rcu_state.name);
+ ndetected = 0;
+ rcu_for_each_leaf_node(rnp) {
+ ndetected += rcu_print_task_exp_stall(rnp);
+ for_each_leaf_node_possible_cpu(rnp, cpu) {
+ struct rcu_data *rdp;
+
+ mask = leaf_node_cpu_bit(rnp, cpu);
+ if (!(READ_ONCE(rnp->expmask) & mask))
+ continue;
+ ndetected++;
+ rdp = per_cpu_ptr(&rcu_data, cpu);
+ pr_cont(" %d-%c%c%c%c", cpu,
+ "O."[!!cpu_online(cpu)],
+ "o."[!!(rdp->grpmask & rnp->expmaskinit)],
+ "N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
+ "D."[!!data_race(rdp->cpu_no_qs.b.exp)]);
+ }
+ }
+ pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
+ j - jiffies_start, rcu_state.expedited_sequence, data_race(rnp_root->expmask),
+ ".T"[!!data_race(rnp_root->exp_tasks)]);
+ if (ndetected) {
+ pr_err("blocking rcu_node structures (internal RCU debug):");
+ rcu_for_each_node_breadth_first(rnp) {
+ if (rnp == rnp_root)
+ continue; /* printed unconditionally */
+ if (sync_rcu_exp_done_unlocked(rnp))
+ continue;
+ pr_cont(" l=%u:%d-%d:%#lx/%c",
+ rnp->level, rnp->grplo, rnp->grphi, data_race(rnp->expmask),
+ ".T"[!!data_race(rnp->exp_tasks)]);
+ }
+ pr_cont("\n");
+ }
+ rcu_for_each_leaf_node(rnp) {
+ for_each_leaf_node_possible_cpu(rnp, cpu) {
+ mask = leaf_node_cpu_bit(rnp, cpu);
+ if (!(READ_ONCE(rnp->expmask) & mask))
+ continue;
+ dump_cpu_task(cpu);
+ }
+ rcu_exp_print_detail_task_stall_rnp(rnp);
+ }
+}
+
+/*
* Wait for the expedited grace period to elapse, issuing any needed
* RCU CPU stall warnings along the way.
*/
@@ -553,10 +615,8 @@ static void synchronize_rcu_expedited_wait(void)
unsigned long jiffies_stall;
unsigned long jiffies_start;
unsigned long mask;
- int ndetected;
struct rcu_data *rdp;
struct rcu_node *rnp;
- struct rcu_node *rnp_root = rcu_get_root();
unsigned long flags;
trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
@@ -590,59 +650,17 @@ static void synchronize_rcu_expedited_wait(void)
return;
if (rcu_stall_is_suppressed())
continue;
+
+ nbcon_cpu_emergency_enter();
+
j = jiffies;
rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_EXP, (void *)(j - jiffies_start));
trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
- pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
- rcu_state.name);
- ndetected = 0;
- rcu_for_each_leaf_node(rnp) {
- ndetected += rcu_print_task_exp_stall(rnp);
- for_each_leaf_node_possible_cpu(rnp, cpu) {
- struct rcu_data *rdp;
-
- mask = leaf_node_cpu_bit(rnp, cpu);
- if (!(READ_ONCE(rnp->expmask) & mask))
- continue;
- ndetected++;
- rdp = per_cpu_ptr(&rcu_data, cpu);
- pr_cont(" %d-%c%c%c%c", cpu,
- "O."[!!cpu_online(cpu)],
- "o."[!!(rdp->grpmask & rnp->expmaskinit)],
- "N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
- "D."[!!data_race(rdp->cpu_no_qs.b.exp)]);
- }
- }
- pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
- j - jiffies_start, rcu_state.expedited_sequence,
- data_race(rnp_root->expmask),
- ".T"[!!data_race(rnp_root->exp_tasks)]);
- if (ndetected) {
- pr_err("blocking rcu_node structures (internal RCU debug):");
- rcu_for_each_node_breadth_first(rnp) {
- if (rnp == rnp_root)
- continue; /* printed unconditionally */
- if (sync_rcu_exp_done_unlocked(rnp))
- continue;
- pr_cont(" l=%u:%d-%d:%#lx/%c",
- rnp->level, rnp->grplo, rnp->grphi,
- data_race(rnp->expmask),
- ".T"[!!data_race(rnp->exp_tasks)]);
- }
- pr_cont("\n");
- }
- rcu_for_each_leaf_node(rnp) {
- for_each_leaf_node_possible_cpu(rnp, cpu) {
- mask = leaf_node_cpu_bit(rnp, cpu);
- if (!(READ_ONCE(rnp->expmask) & mask))
- continue;
- preempt_disable(); // For smp_processor_id() in dump_cpu_task().
- dump_cpu_task(cpu);
- preempt_enable();
- }
- rcu_exp_print_detail_task_stall_rnp(rnp);
- }
+ synchronize_rcu_expedited_stall(jiffies_start, j);
jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
+
+ nbcon_cpu_emergency_exit();
+
panic_on_rcu_stall();
}
}
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 3ce30841119a..97b99cd06923 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -16,10 +16,6 @@
#ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
-static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
-{
- return lockdep_is_held(&rdp->nocb_lock);
-}
static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
{
@@ -220,7 +216,7 @@ static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
if (needwake) {
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
- wake_up_process(rdp_gp->nocb_gp_kthread);
+ swake_up_one_online(&rdp_gp->nocb_gp_wq);
}
return needwake;
@@ -413,14 +409,6 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
return false;
}
- // In the process of (de-)offloading: no bypassing, but
- // locking.
- if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
- rcu_nocb_lock(rdp);
- *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
- return false; /* Not offloaded, no bypassing. */
- }
-
// Don't use ->nocb_bypass during early boot.
if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
rcu_nocb_lock(rdp);
@@ -505,7 +493,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
}
rcu_nocb_bypass_unlock(rdp);
- smp_mb(); /* Order enqueue before wake. */
+
// A wake up of the grace period kthread or timer adjustment
// needs to be done only if:
// 1. Bypass list was fully empty before (this is the first
@@ -616,37 +604,33 @@ static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
}
}
-static int nocb_gp_toggle_rdp(struct rcu_data *rdp)
+static void nocb_gp_toggle_rdp(struct rcu_data *rdp_gp, struct rcu_data *rdp)
{
struct rcu_segcblist *cblist = &rdp->cblist;
unsigned long flags;
- int ret;
- rcu_nocb_lock_irqsave(rdp, flags);
- if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED) &&
- !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
+ /*
+ * Locking orders future de-offloaded callbacks enqueue against previous
+ * handling of this rdp. Ie: Make sure rcuog is done with this rdp before
+ * deoffloaded callbacks can be enqueued.
+ */
+ raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
+ if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
/*
* Offloading. Set our flag and notify the offload worker.
* We will handle this rdp until it ever gets de-offloaded.
*/
- rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
- ret = 1;
- } else if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED) &&
- rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
+ list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
+ rcu_segcblist_set_flags(cblist, SEGCBLIST_OFFLOADED);
+ } else {
/*
* De-offloading. Clear our flag and notify the de-offload worker.
* We will ignore this rdp until it ever gets re-offloaded.
*/
- rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
- ret = 0;
- } else {
- WARN_ON_ONCE(1);
- ret = -1;
+ list_del(&rdp->nocb_entry_rdp);
+ rcu_segcblist_clear_flags(cblist, SEGCBLIST_OFFLOADED);
}
-
- rcu_nocb_unlock_irqrestore(rdp, flags);
-
- return ret;
+ raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
}
static void nocb_gp_sleep(struct rcu_data *my_rdp, int cpu)
@@ -853,14 +837,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
}
if (rdp_toggling) {
- int ret;
-
- ret = nocb_gp_toggle_rdp(rdp_toggling);
- if (ret == 1)
- list_add_tail(&rdp_toggling->nocb_entry_rdp, &my_rdp->nocb_head_rdp);
- else if (ret == 0)
- list_del(&rdp_toggling->nocb_entry_rdp);
-
+ nocb_gp_toggle_rdp(my_rdp, rdp_toggling);
swake_up_one(&rdp_toggling->nocb_state_wq);
}
@@ -917,7 +894,7 @@ static void nocb_cb_wait(struct rcu_data *rdp)
WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
local_irq_save(flags);
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
local_irq_restore(flags);
/*
* Disable BH to provide the expected environment. Also, when
@@ -1030,16 +1007,11 @@ void rcu_nocb_flush_deferred_wakeup(void)
}
EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
-static int rdp_offload_toggle(struct rcu_data *rdp,
- bool offload, unsigned long flags)
- __releases(rdp->nocb_lock)
+static int rcu_nocb_queue_toggle_rdp(struct rcu_data *rdp)
{
- struct rcu_segcblist *cblist = &rdp->cblist;
struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
bool wake_gp = false;
-
- rcu_segcblist_offload(cblist, offload);
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ unsigned long flags;
raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
// Queue this rdp for add/del to/from the list to iterate on rcuog
@@ -1053,88 +1025,73 @@ static int rdp_offload_toggle(struct rcu_data *rdp,
return wake_gp;
}
-static long rcu_nocb_rdp_deoffload(void *arg)
+static bool rcu_nocb_rdp_deoffload_wait_cond(struct rcu_data *rdp)
{
- struct rcu_data *rdp = arg;
- struct rcu_segcblist *cblist = &rdp->cblist;
unsigned long flags;
- int wake_gp;
- struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+ bool ret;
/*
- * rcu_nocb_rdp_deoffload() may be called directly if
- * rcuog/o[p] spawn failed, because at this time the rdp->cpu
- * is not online yet.
+ * Locking makes sure rcuog is done handling this rdp before deoffloaded
+ * enqueue can happen. Also it keeps the SEGCBLIST_OFFLOADED flag stable
+ * while the ->nocb_lock is held.
*/
- WARN_ON_ONCE((rdp->cpu != raw_smp_processor_id()) && cpu_online(rdp->cpu));
+ raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
+ ret = !rcu_segcblist_test_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
+ raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
+
+ return ret;
+}
+
+static int rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
+{
+ unsigned long flags;
+ int wake_gp;
+ struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+ /* CPU must be offline, unless it's early boot */
+ WARN_ON_ONCE(cpu_online(rdp->cpu) && rdp->cpu != raw_smp_processor_id());
pr_info("De-offloading %d\n", rdp->cpu);
- rcu_nocb_lock_irqsave(rdp, flags);
- /*
- * Flush once and for all now. This suffices because we are
- * running on the target CPU holding ->nocb_lock (thus having
- * interrupts disabled), and because rdp_offload_toggle()
- * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
- * Thus future calls to rcu_segcblist_completely_offloaded() will
- * return false, which means that future calls to rcu_nocb_try_bypass()
- * will refuse to put anything into the bypass.
- */
- WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
+ /* Flush all callbacks from segcblist and bypass */
+ rcu_barrier();
+
/*
- * Start with invoking rcu_core() early. This way if the current thread
- * happens to preempt an ongoing call to rcu_core() in the middle,
- * leaving some work dismissed because rcu_core() still thinks the rdp is
- * completely offloaded, we are guaranteed a nearby future instance of
- * rcu_core() to catch up.
+ * Make sure the rcuoc kthread isn't in the middle of a nocb locked
+ * sequence while offloading is deactivated, along with nocb locking.
*/
- rcu_segcblist_set_flags(cblist, SEGCBLIST_RCU_CORE);
- invoke_rcu_core();
- wake_gp = rdp_offload_toggle(rdp, false, flags);
+ if (rdp->nocb_cb_kthread)
+ kthread_park(rdp->nocb_cb_kthread);
+
+ rcu_nocb_lock_irqsave(rdp, flags);
+ WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+ WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist));
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+
+ wake_gp = rcu_nocb_queue_toggle_rdp(rdp);
mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
+
if (rdp_gp->nocb_gp_kthread) {
if (wake_gp)
wake_up_process(rdp_gp->nocb_gp_kthread);
swait_event_exclusive(rdp->nocb_state_wq,
- !rcu_segcblist_test_flags(cblist,
- SEGCBLIST_KTHREAD_GP));
- if (rdp->nocb_cb_kthread)
- kthread_park(rdp->nocb_cb_kthread);
+ rcu_nocb_rdp_deoffload_wait_cond(rdp));
} else {
/*
* No kthread to clear the flags for us or remove the rdp from the nocb list
* to iterate. Do it here instead. Locking doesn't look stricly necessary
* but we stick to paranoia in this rare path.
*/
- rcu_nocb_lock_irqsave(rdp, flags);
- rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
+ rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
+ raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
list_del(&rdp->nocb_entry_rdp);
}
- mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
-
- /*
- * Lock one last time to acquire latest callback updates from kthreads
- * so we can later handle callbacks locally without locking.
- */
- rcu_nocb_lock_irqsave(rdp, flags);
- /*
- * Theoretically we could clear SEGCBLIST_LOCKING after the nocb
- * lock is released but how about being paranoid for once?
- */
- rcu_segcblist_clear_flags(cblist, SEGCBLIST_LOCKING);
- /*
- * Without SEGCBLIST_LOCKING, we can't use
- * rcu_nocb_unlock_irqrestore() anymore.
- */
- raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
-
- /* Sanity check */
- WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+ mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
return 0;
}
@@ -1145,33 +1102,42 @@ int rcu_nocb_cpu_deoffload(int cpu)
int ret = 0;
cpus_read_lock();
- mutex_lock(&rcu_state.barrier_mutex);
+ mutex_lock(&rcu_state.nocb_mutex);
if (rcu_rdp_is_offloaded(rdp)) {
- if (cpu_online(cpu)) {
- ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
+ if (!cpu_online(cpu)) {
+ ret = rcu_nocb_rdp_deoffload(rdp);
if (!ret)
cpumask_clear_cpu(cpu, rcu_nocb_mask);
} else {
- pr_info("NOCB: Cannot CB-deoffload offline CPU %d\n", rdp->cpu);
+ pr_info("NOCB: Cannot CB-deoffload online CPU %d\n", rdp->cpu);
ret = -EINVAL;
}
}
- mutex_unlock(&rcu_state.barrier_mutex);
+ mutex_unlock(&rcu_state.nocb_mutex);
cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
-static long rcu_nocb_rdp_offload(void *arg)
+static bool rcu_nocb_rdp_offload_wait_cond(struct rcu_data *rdp)
{
- struct rcu_data *rdp = arg;
- struct rcu_segcblist *cblist = &rdp->cblist;
unsigned long flags;
+ bool ret;
+
+ raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
+ ret = rcu_segcblist_test_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
+ raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
+
+ return ret;
+}
+
+static int rcu_nocb_rdp_offload(struct rcu_data *rdp)
+{
int wake_gp;
struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
- WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
+ WARN_ON_ONCE(cpu_online(rdp->cpu));
/*
* For now we only support re-offload, ie: the rdp must have been
* offloaded on boot first.
@@ -1184,44 +1150,17 @@ static long rcu_nocb_rdp_offload(void *arg)
pr_info("Offloading %d\n", rdp->cpu);
- /*
- * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING
- * is set.
- */
- raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
+ WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+ WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist));
- /*
- * We didn't take the nocb lock while working on the
- * rdp->cblist with SEGCBLIST_LOCKING cleared (pure softirq/rcuc mode).
- * Every modifications that have been done previously on
- * rdp->cblist must be visible remotely by the nocb kthreads
- * upon wake up after reading the cblist flags.
- *
- * The layout against nocb_lock enforces that ordering:
- *
- * __rcu_nocb_rdp_offload() nocb_cb_wait()/nocb_gp_wait()
- * ------------------------- ----------------------------
- * WRITE callbacks rcu_nocb_lock()
- * rcu_nocb_lock() READ flags
- * WRITE flags READ callbacks
- * rcu_nocb_unlock() rcu_nocb_unlock()
- */
- wake_gp = rdp_offload_toggle(rdp, true, flags);
+ wake_gp = rcu_nocb_queue_toggle_rdp(rdp);
if (wake_gp)
wake_up_process(rdp_gp->nocb_gp_kthread);
- kthread_unpark(rdp->nocb_cb_kthread);
-
swait_event_exclusive(rdp->nocb_state_wq,
- rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
+ rcu_nocb_rdp_offload_wait_cond(rdp));
- /*
- * All kthreads are ready to work, we can finally relieve rcu_core() and
- * enable nocb bypass.
- */
- rcu_nocb_lock_irqsave(rdp, flags);
- rcu_segcblist_clear_flags(cblist, SEGCBLIST_RCU_CORE);
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ kthread_unpark(rdp->nocb_cb_kthread);
return 0;
}
@@ -1232,18 +1171,18 @@ int rcu_nocb_cpu_offload(int cpu)
int ret = 0;
cpus_read_lock();
- mutex_lock(&rcu_state.barrier_mutex);
+ mutex_lock(&rcu_state.nocb_mutex);
if (!rcu_rdp_is_offloaded(rdp)) {
- if (cpu_online(cpu)) {
- ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
+ if (!cpu_online(cpu)) {
+ ret = rcu_nocb_rdp_offload(rdp);
if (!ret)
cpumask_set_cpu(cpu, rcu_nocb_mask);
} else {
- pr_info("NOCB: Cannot CB-offload offline CPU %d\n", rdp->cpu);
+ pr_info("NOCB: Cannot CB-offload online CPU %d\n", rdp->cpu);
ret = -EINVAL;
}
}
- mutex_unlock(&rcu_state.barrier_mutex);
+ mutex_unlock(&rcu_state.nocb_mutex);
cpus_read_unlock();
return ret;
@@ -1261,7 +1200,7 @@ lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
return 0;
/* Protect rcu_nocb_mask against concurrent (de-)offloading. */
- if (!mutex_trylock(&rcu_state.barrier_mutex))
+ if (!mutex_trylock(&rcu_state.nocb_mutex))
return 0;
/* Snapshot count of all CPUs */
@@ -1271,7 +1210,7 @@ lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
count += READ_ONCE(rdp->lazy_len);
}
- mutex_unlock(&rcu_state.barrier_mutex);
+ mutex_unlock(&rcu_state.nocb_mutex);
return count ? count : SHRINK_EMPTY;
}
@@ -1289,9 +1228,9 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
* Protect against concurrent (de-)offloading. Otherwise nocb locking
* may be ignored or imbalanced.
*/
- if (!mutex_trylock(&rcu_state.barrier_mutex)) {
+ if (!mutex_trylock(&rcu_state.nocb_mutex)) {
/*
- * But really don't insist if barrier_mutex is contended since we
+ * But really don't insist if nocb_mutex is contended since we
* can't guarantee that it will never engage in a dependency
* chain involving memory allocation. The lock is seldom contended
* anyway.
@@ -1330,7 +1269,7 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
break;
}
- mutex_unlock(&rcu_state.barrier_mutex);
+ mutex_unlock(&rcu_state.nocb_mutex);
return count ? count : SHRINK_STOP;
}
@@ -1396,9 +1335,7 @@ void __init rcu_init_nohz(void)
rdp = per_cpu_ptr(&rcu_data, cpu);
if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist);
- rcu_segcblist_offload(&rdp->cblist, true);
- rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
- rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE);
+ rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
}
rcu_organize_nocb_kthreads();
}
@@ -1446,7 +1383,7 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
"rcuog/%d", rdp_gp->cpu);
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) {
mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
- goto end;
+ goto err;
}
WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
if (kthread_prio)
@@ -1458,7 +1395,7 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
t = kthread_create(rcu_nocb_cb_kthread, rdp,
"rcuo%c/%d", rcu_state.abbr, cpu);
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
- goto end;
+ goto err;
if (rcu_rdp_is_offloaded(rdp))
wake_up_process(t);
@@ -1471,13 +1408,21 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
WRITE_ONCE(rdp->nocb_cb_kthread, t);
WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
return;
-end:
- mutex_lock(&rcu_state.barrier_mutex);
+
+err:
+ /*
+ * No need to protect against concurrent rcu_barrier()
+ * because the number of callbacks should be 0 for a non-boot CPU,
+ * therefore rcu_barrier() shouldn't even try to grab the nocb_lock.
+ * But hold nocb_mutex to avoid nocb_lock imbalance from shrinker.
+ */
+ WARN_ON_ONCE(system_state > SYSTEM_BOOTING && rcu_segcblist_n_cbs(&rdp->cblist));
+ mutex_lock(&rcu_state.nocb_mutex);
if (rcu_rdp_is_offloaded(rdp)) {
rcu_nocb_rdp_deoffload(rdp);
cpumask_clear_cpu(cpu, rcu_nocb_mask);
}
- mutex_unlock(&rcu_state.barrier_mutex);
+ mutex_unlock(&rcu_state.nocb_mutex);
}
/* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */
@@ -1653,16 +1598,6 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
-static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
-{
- return 0;
-}
-
-static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
-{
- return false;
-}
-
/* No ->nocb_lock to acquire. */
static void rcu_nocb_lock(struct rcu_data *rdp)
{
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index c569da65b421..1c7cbd145d5e 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -24,10 +24,11 @@ static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
* timers have their own means of synchronization against the
* offloaded state updaters.
*/
- RCU_LOCKDEP_WARN(
+ RCU_NOCB_LOCKDEP_WARN(
!(lockdep_is_held(&rcu_state.barrier_mutex) ||
(IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) ||
- rcu_lockdep_is_held_nocb(rdp) ||
+ lockdep_is_held(&rdp->nocb_lock) ||
+ lockdep_is_held(&rcu_state.nocb_mutex) ||
(!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible()) &&
rdp == this_cpu_ptr(&rcu_data)) ||
rcu_current_is_nocb_kthread(rdp)),
@@ -869,7 +870,7 @@ static void rcu_qs(void)
/*
* Register an urgently needed quiescent state. If there is an
- * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
+ * emergency, invoke rcu_momentary_eqs() to do a heavy-weight
* dyntick-idle quiescent state visible to other CPUs, which will in
* some cases serve for expedited as well as normal grace periods.
* Either way, register a lightweight quiescent state.
@@ -889,7 +890,7 @@ void rcu_all_qs(void)
this_cpu_write(rcu_data.rcu_urgent_qs, false);
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
local_irq_save(flags);
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
local_irq_restore(flags);
}
rcu_qs();
@@ -909,7 +910,7 @@ void rcu_note_context_switch(bool preempt)
goto out;
this_cpu_write(rcu_data.rcu_urgent_qs, false);
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
out:
rcu_tasks_qs(current, preempt);
trace_rcu_utilization(TPS("End context switch"));
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index 4b0e9d7c4c68..4432db6d0b99 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -7,8 +7,10 @@
* Author: Paul E. McKenney <paulmck@linux.ibm.com>
*/
+#include <linux/console.h>
#include <linux/kvm_para.h>
#include <linux/rcu_notifier.h>
+#include <linux/smp.h>
//////////////////////////////////////////////////////////////////////////////
//
@@ -370,6 +372,7 @@ static void rcu_dump_cpu_stacks(void)
struct rcu_node *rnp;
rcu_for_each_leaf_node(rnp) {
+ printk_deferred_enter();
raw_spin_lock_irqsave_rcu_node(rnp, flags);
for_each_leaf_node_possible_cpu(rnp, cpu)
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
@@ -379,6 +382,7 @@ static void rcu_dump_cpu_stacks(void)
dump_cpu_task(cpu);
}
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ printk_deferred_exit();
}
}
@@ -501,7 +505,7 @@ static void print_cpu_stall_info(int cpu)
}
delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
falsepositive = rcu_is_gp_kthread_starving(NULL) &&
- rcu_dynticks_in_eqs(ct_dynticks_cpu(cpu));
+ rcu_watching_snap_in_eqs(ct_rcu_watching_cpu(cpu));
rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
if (rcuc_starved)
// Print signed value, as negative values indicate a probable bug.
@@ -515,8 +519,8 @@ static void print_cpu_stall_info(int cpu)
rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
"!."[!delta],
ticks_value, ticks_title,
- ct_dynticks_cpu(cpu) & 0xffff,
- ct_dynticks_nesting_cpu(cpu), ct_dynticks_nmi_nesting_cpu(cpu),
+ ct_rcu_watching_cpu(cpu) & 0xffff,
+ ct_nesting_cpu(cpu), ct_nmi_nesting_cpu(cpu),
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
rcuc_starved ? buf : "",
@@ -605,6 +609,8 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
if (rcu_stall_is_suppressed())
return;
+ nbcon_cpu_emergency_enter();
+
/*
* OK, time to rat on our buddy...
* See Documentation/RCU/stallwarn.rst for info on how to debug
@@ -657,6 +663,8 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
rcu_check_gp_kthread_expired_fqs_timer();
rcu_check_gp_kthread_starvation();
+ nbcon_cpu_emergency_exit();
+
panic_on_rcu_stall();
rcu_force_quiescent_state(); /* Kick them all. */
@@ -677,6 +685,8 @@ static void print_cpu_stall(unsigned long gps)
if (rcu_stall_is_suppressed())
return;
+ nbcon_cpu_emergency_enter();
+
/*
* OK, time to rat on ourselves...
* See Documentation/RCU/stallwarn.rst for info on how to debug
@@ -706,6 +716,8 @@ static void print_cpu_stall(unsigned long gps)
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ nbcon_cpu_emergency_exit();
+
panic_on_rcu_stall();
/*
@@ -719,6 +731,9 @@ static void print_cpu_stall(unsigned long gps)
set_preempt_need_resched();
}
+static bool csd_lock_suppress_rcu_stall;
+module_param(csd_lock_suppress_rcu_stall, bool, 0644);
+
static void check_cpu_stall(struct rcu_data *rdp)
{
bool self_detected;
@@ -791,7 +806,9 @@ static void check_cpu_stall(struct rcu_data *rdp)
return;
rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_NORM, (void *)j - gps);
- if (self_detected) {
+ if (READ_ONCE(csd_lock_suppress_rcu_stall) && csd_lock_is_stuck()) {
+ pr_err("INFO: %s detected stall, but suppressed full report due to a stuck CSD-lock.\n", rcu_state.name);
+ } else if (self_detected) {
/* We haven't checked in, so go dump stack. */
print_cpu_stall(gps);
} else {
diff --git a/kernel/resource.c b/kernel/resource.c
index a83040fde236..b730bd28b422 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -450,8 +450,7 @@ int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
/* re-alloc */
struct resource *rams_new;
- rams_new = kvrealloc(rams, rams_size * sizeof(struct resource),
- (rams_size + 16) * sizeof(struct resource),
+ rams_new = kvrealloc(rams, (rams_size + 16) * sizeof(struct resource),
GFP_KERNEL);
if (!rams_new)
goto out;
@@ -540,20 +539,62 @@ static int __region_intersects(struct resource *parent, resource_size_t start,
size_t size, unsigned long flags,
unsigned long desc)
{
- struct resource res;
+ resource_size_t ostart, oend;
int type = 0; int other = 0;
- struct resource *p;
+ struct resource *p, *dp;
+ bool is_type, covered;
+ struct resource res;
res.start = start;
res.end = start + size - 1;
for (p = parent->child; p ; p = p->sibling) {
- bool is_type = (((p->flags & flags) == flags) &&
- ((desc == IORES_DESC_NONE) ||
- (desc == p->desc)));
-
- if (resource_overlaps(p, &res))
- is_type ? type++ : other++;
+ if (!resource_overlaps(p, &res))
+ continue;
+ is_type = (p->flags & flags) == flags &&
+ (desc == IORES_DESC_NONE || desc == p->desc);
+ if (is_type) {
+ type++;
+ continue;
+ }
+ /*
+ * Continue to search in descendant resources as if the
+ * matched descendant resources cover some ranges of 'p'.
+ *
+ * |------------- "CXL Window 0" ------------|
+ * |-- "System RAM" --|
+ *
+ * will behave similar as the following fake resource
+ * tree when searching "System RAM".
+ *
+ * |-- "System RAM" --||-- "CXL Window 0a" --|
+ */
+ covered = false;
+ ostart = max(res.start, p->start);
+ oend = min(res.end, p->end);
+ for_each_resource(p, dp, false) {
+ if (!resource_overlaps(dp, &res))
+ continue;
+ is_type = (dp->flags & flags) == flags &&
+ (desc == IORES_DESC_NONE || desc == dp->desc);
+ if (is_type) {
+ type++;
+ /*
+ * Range from 'ostart' to 'dp->start'
+ * isn't covered by matched resource.
+ */
+ if (dp->start > ostart)
+ break;
+ if (dp->end >= oend) {
+ covered = true;
+ break;
+ }
+ /* Remove covered range */
+ ostart = max(ostart, dp->end + 1);
+ }
+ }
+ if (!covered)
+ other++;
}
if (type == 0)
@@ -1818,7 +1859,11 @@ EXPORT_SYMBOL(resource_list_free);
#ifdef CONFIG_GET_FREE_REGION
#define GFR_DESCENDING (1UL << 0)
#define GFR_REQUEST_REGION (1UL << 1)
-#define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
+#ifdef PA_SECTION_SHIFT
+#define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
+#else
+#define GFR_DEFAULT_ALIGN PAGE_SIZE
+#endif
static resource_size_t gfr_start(struct resource *base, resource_size_t size,
resource_size_t align, unsigned long flags)
@@ -1830,7 +1875,7 @@ static resource_size_t gfr_start(struct resource *base, resource_size_t size,
return end - size + 1;
}
- return ALIGN(base->start, align);
+ return ALIGN(max(base->start, align), align);
}
static bool gfr_continue(struct resource *base, resource_size_t addr,
@@ -2004,7 +2049,7 @@ struct resource *alloc_free_mem_region(struct resource *base,
return get_free_mem_region(NULL, base, size, align, name,
IORES_DESC_NONE, flags);
}
-EXPORT_SYMBOL_NS_GPL(alloc_free_mem_region, CXL);
+EXPORT_SYMBOL_GPL(alloc_free_mem_region);
#endif /* CONFIG_GET_FREE_REGION */
static int __init strict_iomem(char *str)
diff --git a/kernel/resource_kunit.c b/kernel/resource_kunit.c
index 0e509985a44a..42d2d8d20f5d 100644
--- a/kernel/resource_kunit.c
+++ b/kernel/resource_kunit.c
@@ -7,6 +7,8 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/sizes.h>
+#include <linux/mm.h>
#define R0_START 0x0000
#define R0_END 0xffff
@@ -137,9 +139,150 @@ static void resource_test_intersection(struct kunit *test)
} while (++i < ARRAY_SIZE(results_for_intersection));
}
+/*
+ * The test resource tree for region_intersects() test:
+ *
+ * BASE-BASE+1M-1 : Test System RAM 0
+ * # hole 0 (BASE+1M-BASE+2M)
+ * BASE+2M-BASE+3M-1 : Test CXL Window 0
+ * BASE+3M-BASE+4M-1 : Test System RAM 1
+ * BASE+4M-BASE+7M-1 : Test CXL Window 1
+ * BASE+4M-BASE+5M-1 : Test System RAM 2
+ * BASE+4M+128K-BASE+4M+256K-1: Test Code
+ * BASE+5M-BASE+6M-1 : Test System RAM 3
+ */
+#define RES_TEST_RAM0_OFFSET 0
+#define RES_TEST_RAM0_SIZE SZ_1M
+#define RES_TEST_HOLE0_OFFSET (RES_TEST_RAM0_OFFSET + RES_TEST_RAM0_SIZE)
+#define RES_TEST_HOLE0_SIZE SZ_1M
+#define RES_TEST_WIN0_OFFSET (RES_TEST_HOLE0_OFFSET + RES_TEST_HOLE0_SIZE)
+#define RES_TEST_WIN0_SIZE SZ_1M
+#define RES_TEST_RAM1_OFFSET (RES_TEST_WIN0_OFFSET + RES_TEST_WIN0_SIZE)
+#define RES_TEST_RAM1_SIZE SZ_1M
+#define RES_TEST_WIN1_OFFSET (RES_TEST_RAM1_OFFSET + RES_TEST_RAM1_SIZE)
+#define RES_TEST_WIN1_SIZE (SZ_1M * 3)
+#define RES_TEST_RAM2_OFFSET RES_TEST_WIN1_OFFSET
+#define RES_TEST_RAM2_SIZE SZ_1M
+#define RES_TEST_CODE_OFFSET (RES_TEST_RAM2_OFFSET + SZ_128K)
+#define RES_TEST_CODE_SIZE SZ_128K
+#define RES_TEST_RAM3_OFFSET (RES_TEST_RAM2_OFFSET + RES_TEST_RAM2_SIZE)
+#define RES_TEST_RAM3_SIZE SZ_1M
+#define RES_TEST_TOTAL_SIZE ((RES_TEST_WIN1_OFFSET + RES_TEST_WIN1_SIZE))
+
+static void remove_free_resource(void *ctx)
+{
+ struct resource *res = (struct resource *)ctx;
+
+ remove_resource(res);
+ kfree(res);
+}
+
+static void resource_test_request_region(struct kunit *test, struct resource *parent,
+ resource_size_t start, resource_size_t size,
+ const char *name, unsigned long flags)
+{
+ struct resource *res;
+
+ res = __request_region(parent, start, size, name, flags);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+ kunit_add_action_or_reset(test, remove_free_resource, res);
+}
+
+static void resource_test_insert_resource(struct kunit *test, struct resource *parent,
+ resource_size_t start, resource_size_t size,
+ const char *name, unsigned long flags)
+{
+ struct resource *res;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ res->name = name;
+ res->start = start;
+ res->end = start + size - 1;
+ res->flags = flags;
+ if (insert_resource(parent, res)) {
+ kfree(res);
+ KUNIT_FAIL_AND_ABORT(test, "Fail to insert resource %pR\n", res);
+ }
+
+ kunit_add_action_or_reset(test, remove_free_resource, res);
+}
+
+static void resource_test_region_intersects(struct kunit *test)
+{
+ unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ struct resource *parent;
+ resource_size_t start;
+
+ /* Find an iomem_resource hole to hold test resources */
+ parent = alloc_free_mem_region(&iomem_resource, RES_TEST_TOTAL_SIZE, SZ_1M,
+ "test resources");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+ start = parent->start;
+ kunit_add_action_or_reset(test, remove_free_resource, parent);
+
+ resource_test_request_region(test, parent, start + RES_TEST_RAM0_OFFSET,
+ RES_TEST_RAM0_SIZE, "Test System RAM 0", flags);
+ resource_test_insert_resource(test, parent, start + RES_TEST_WIN0_OFFSET,
+ RES_TEST_WIN0_SIZE, "Test CXL Window 0",
+ IORESOURCE_MEM);
+ resource_test_request_region(test, parent, start + RES_TEST_RAM1_OFFSET,
+ RES_TEST_RAM1_SIZE, "Test System RAM 1", flags);
+ resource_test_insert_resource(test, parent, start + RES_TEST_WIN1_OFFSET,
+ RES_TEST_WIN1_SIZE, "Test CXL Window 1",
+ IORESOURCE_MEM);
+ resource_test_request_region(test, parent, start + RES_TEST_RAM2_OFFSET,
+ RES_TEST_RAM2_SIZE, "Test System RAM 2", flags);
+ resource_test_insert_resource(test, parent, start + RES_TEST_CODE_OFFSET,
+ RES_TEST_CODE_SIZE, "Test Code", flags);
+ resource_test_request_region(test, parent, start + RES_TEST_RAM3_OFFSET,
+ RES_TEST_RAM3_SIZE, "Test System RAM 3", flags);
+ kunit_release_action(test, remove_free_resource, parent);
+
+ KUNIT_EXPECT_EQ(test, REGION_INTERSECTS,
+ region_intersects(start + RES_TEST_RAM0_OFFSET, PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_INTERSECTS,
+ region_intersects(start + RES_TEST_RAM0_OFFSET +
+ RES_TEST_RAM0_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_DISJOINT,
+ region_intersects(start + RES_TEST_HOLE0_OFFSET, PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_DISJOINT,
+ region_intersects(start + RES_TEST_HOLE0_OFFSET +
+ RES_TEST_HOLE0_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_MIXED,
+ region_intersects(start + RES_TEST_WIN0_OFFSET +
+ RES_TEST_WIN0_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_INTERSECTS,
+ region_intersects(start + RES_TEST_RAM1_OFFSET +
+ RES_TEST_RAM1_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_INTERSECTS,
+ region_intersects(start + RES_TEST_RAM2_OFFSET +
+ RES_TEST_RAM2_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_INTERSECTS,
+ region_intersects(start + RES_TEST_CODE_OFFSET, PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_INTERSECTS,
+ region_intersects(start + RES_TEST_RAM2_OFFSET,
+ RES_TEST_RAM2_SIZE + PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_MIXED,
+ region_intersects(start + RES_TEST_RAM3_OFFSET,
+ RES_TEST_RAM3_SIZE + PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+}
+
static struct kunit_case resource_test_cases[] = {
KUNIT_CASE(resource_test_union),
KUNIT_CASE(resource_test_intersection),
+ KUNIT_CASE(resource_test_region_intersects),
{}
};
diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
index 39c315182b35..fae1f5c921eb 100644
--- a/kernel/sched/build_policy.c
+++ b/kernel/sched/build_policy.c
@@ -16,18 +16,25 @@
#include <linux/sched/clock.h>
#include <linux/sched/cputime.h>
#include <linux/sched/hotplug.h>
+#include <linux/sched/isolation.h>
#include <linux/sched/posix-timers.h>
#include <linux/sched/rt.h>
#include <linux/cpuidle.h>
#include <linux/jiffies.h>
+#include <linux/kobject.h>
#include <linux/livepatch.h>
+#include <linux/pm.h>
#include <linux/psi.h>
+#include <linux/rhashtable.h>
+#include <linux/seq_buf.h>
#include <linux/seqlock_api.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/tsacct_kern.h>
#include <linux/vtime.h>
+#include <linux/sysrq.h>
+#include <linux/percpu-rwsem.h>
#include <uapi/linux/sched/types.h>
@@ -52,4 +59,8 @@
#include "cputime.c"
#include "deadline.c"
+#ifdef CONFIG_SCHED_CLASS_EXT
+# include "ext.c"
+#endif
+
#include "syscalls.c"
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f3951e4a55e5..43e453ab7e20 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -163,13 +163,19 @@ static inline int __task_prio(const struct task_struct *p)
if (p->sched_class == &stop_sched_class) /* trumps deadline */
return -2;
- if (rt_prio(p->prio)) /* includes deadline */
+ if (p->dl_server)
+ return -1; /* deadline */
+
+ if (rt_or_dl_prio(p->prio))
return p->prio; /* [-1, 99] */
if (p->sched_class == &idle_sched_class)
return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
- return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
+ if (task_on_scx(p))
+ return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
+
+ return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
}
/*
@@ -192,12 +198,33 @@ static inline bool prio_less(const struct task_struct *a,
if (-pb < -pa)
return false;
- if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
- return !dl_time_before(a->dl.deadline, b->dl.deadline);
+ if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
+ const struct sched_dl_entity *a_dl, *b_dl;
+
+ a_dl = &a->dl;
+ /*
+ * Since,'a' and 'b' can be CFS tasks served by DL server,
+ * __task_prio() can return -1 (for DL) even for those. In that
+ * case, get to the dl_server's DL entity.
+ */
+ if (a->dl_server)
+ a_dl = a->dl_server;
+
+ b_dl = &b->dl;
+ if (b->dl_server)
+ b_dl = b->dl_server;
+
+ return !dl_time_before(a_dl->deadline, b_dl->deadline);
+ }
if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
return cfs_prio_less(a, b, in_fi);
+#ifdef CONFIG_SCHED_CLASS_EXT
+ if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */
+ return scx_prio_less(a, b, in_fi);
+#endif
+
return false;
}
@@ -240,6 +267,9 @@ static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
void sched_core_enqueue(struct rq *rq, struct task_struct *p)
{
+ if (p->se.sched_delayed)
+ return;
+
rq->core->core_task_seq++;
if (!p->core_cookie)
@@ -250,6 +280,9 @@ void sched_core_enqueue(struct rq *rq, struct task_struct *p)
void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
{
+ if (p->se.sched_delayed)
+ return;
+
rq->core->core_task_seq++;
if (sched_core_enqueued(p)) {
@@ -1255,11 +1288,14 @@ bool sched_can_stop_tick(struct rq *rq)
return true;
/*
- * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
- * if there's more than one we need the tick for involuntary
- * preemption.
+ * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
+ * left. For CFS, if there's more than one we need the tick for
+ * involuntary preemption. For SCX, ask.
*/
- if (rq->nr_running > 1)
+ if (scx_enabled() && !scx_can_stop_tick(rq))
+ return false;
+
+ if (rq->cfs.nr_running > 1)
return false;
/*
@@ -1269,7 +1305,7 @@ bool sched_can_stop_tick(struct rq *rq)
* dequeued by migrating while the constrained task continues to run.
* E.g. going from 2->1 without going through pick_next_task().
*/
- if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) {
+ if (__need_bw_check(rq, rq->curr)) {
if (cfs_task_bw_constrained(rq->curr))
return false;
}
@@ -1341,8 +1377,8 @@ void set_load_weight(struct task_struct *p, bool update_load)
* SCHED_OTHER tasks have to update their load when changing their
* weight
*/
- if (update_load && p->sched_class == &fair_sched_class)
- reweight_task(p, &lw);
+ if (update_load && p->sched_class->reweight_task)
+ p->sched_class->reweight_task(task_rq(p), p, &lw);
else
p->se.load = lw;
}
@@ -1672,6 +1708,9 @@ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
if (unlikely(!p->sched_class->uclamp_enabled))
return;
+ if (p->se.sched_delayed)
+ return;
+
for_each_clamp_id(clamp_id)
uclamp_rq_inc_id(rq, p, clamp_id);
@@ -1696,6 +1735,9 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
if (unlikely(!p->sched_class->uclamp_enabled))
return;
+ if (p->se.sched_delayed)
+ return;
+
for_each_clamp_id(clamp_id)
uclamp_rq_dec_id(rq, p, clamp_id);
}
@@ -1975,14 +2017,21 @@ void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
}
- uclamp_rq_inc(rq, p);
p->sched_class->enqueue_task(rq, p, flags);
+ /*
+ * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear
+ * ->sched_delayed.
+ */
+ uclamp_rq_inc(rq, p);
if (sched_core_enabled(rq))
sched_core_enqueue(rq, p);
}
-void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
+/*
+ * Must only return false when DEQUEUE_SLEEP.
+ */
+inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
if (sched_core_enabled(rq))
sched_core_dequeue(rq, p, flags);
@@ -1995,8 +2044,12 @@ void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
psi_dequeue(p, flags & DEQUEUE_SLEEP);
}
+ /*
+ * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
+ * and mark the task ->sched_delayed.
+ */
uclamp_rq_dec(rq, p);
- p->sched_class->dequeue_task(rq, p, flags);
+ return p->sched_class->dequeue_task(rq, p, flags);
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
@@ -2014,12 +2067,25 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
- WRITE_ONCE(p->on_rq, (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING);
+ SCHED_WARN_ON(flags & DEQUEUE_SLEEP);
+
+ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
ASSERT_EXCLUSIVE_WRITER(p->on_rq);
+ /*
+ * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
+ * dequeue_task() and cleared *after* enqueue_task().
+ */
+
dequeue_task(rq, p, flags);
}
+static void block_task(struct rq *rq, struct task_struct *p, int flags)
+{
+ if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
+ __block_task(rq, p);
+}
+
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
@@ -2032,6 +2098,17 @@ inline int task_curr(const struct task_struct *p)
}
/*
+ * ->switching_to() is called with the pi_lock and rq_lock held and must not
+ * mess with locking.
+ */
+void check_class_changing(struct rq *rq, struct task_struct *p,
+ const struct sched_class *prev_class)
+{
+ if (prev_class != p->sched_class && p->sched_class->switching_to)
+ p->sched_class->switching_to(rq, p);
+}
+
+/*
* switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
* use the balance_callback list if you want balancing.
*
@@ -2233,6 +2310,12 @@ void migrate_disable(void)
struct task_struct *p = current;
if (p->migration_disabled) {
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ *Warn about overflow half-way through the range.
+ */
+ WARN_ON_ONCE((s16)p->migration_disabled < 0);
+#endif
p->migration_disabled++;
return;
}
@@ -2251,14 +2334,20 @@ void migrate_enable(void)
.flags = SCA_MIGRATE_ENABLE,
};
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ * Check both overflow from migrate_disable() and superfluous
+ * migrate_enable().
+ */
+ if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
+ return;
+#endif
+
if (p->migration_disabled > 1) {
p->migration_disabled--;
return;
}
- if (WARN_ON_ONCE(!p->migration_disabled))
- return;
-
/*
* Ensure stop_task runs either before or after this, and that
* __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
@@ -2289,7 +2378,7 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
/* When not in the task's cpumask, no point in looking further. */
- if (!cpumask_test_cpu(cpu, p->cpus_ptr))
+ if (!task_allowed_on_cpu(p, cpu))
return false;
/* migrate_disabled() must be allowed to finish. */
@@ -2298,7 +2387,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
/* Non kernel threads are not allowed during either online or offline. */
if (!(p->flags & PF_KTHREAD))
- return cpu_active(cpu) && task_cpu_possible(cpu, p);
+ return cpu_active(cpu);
/* KTHREAD_IS_PER_CPU is always allowed. */
if (kthread_is_per_cpu(p))
@@ -3607,8 +3696,6 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
rq->idle_stamp = 0;
}
#endif
-
- p->dl_server = NULL;
}
/*
@@ -3644,12 +3731,14 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
rq = __task_rq_lock(p, &rf);
if (task_on_rq_queued(p)) {
+ update_rq_clock(rq);
+ if (p->se.sched_delayed)
+ enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
if (!task_on_cpu(rq, p)) {
/*
* When on_rq && !on_cpu the task is preempted, see if
* it should preempt the task that is current now.
*/
- update_rq_clock(rq);
wakeup_preempt(rq, p, wake_flags);
}
ttwu_do_wakeup(p);
@@ -3776,6 +3865,15 @@ bool cpus_share_resources(int this_cpu, int that_cpu)
static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
{
/*
+ * The BPF scheduler may depend on select_task_rq() being invoked during
+ * wakeups. In addition, @p may end up executing on a different CPU
+ * regardless of what happens in the wakeup path making the ttwu_queue
+ * optimization less meaningful. Skip if on SCX.
+ */
+ if (task_on_scx(p))
+ return false;
+
+ /*
* Do not complicate things with the async wake_list while the CPU is
* in hotplug state.
*/
@@ -4029,11 +4127,16 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* case the whole 'p->on_rq && ttwu_runnable()' case below
* without taking any locks.
*
+ * Specifically, given current runs ttwu() we must be before
+ * schedule()'s block_task(), as such this must not observe
+ * sched_delayed.
+ *
* In particular:
* - we rely on Program-Order guarantees for all the ordering,
* - we're serialized against set_special_state() by virtue of
* it disabling IRQs (this allows not taking ->pi_lock).
*/
+ SCHED_WARN_ON(p->se.sched_delayed);
if (!ttwu_state_match(p, state, &success))
goto out;
@@ -4322,9 +4425,11 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.nr_migrations = 0;
p->se.vruntime = 0;
p->se.vlag = 0;
- p->se.slice = sysctl_sched_base_slice;
INIT_LIST_HEAD(&p->se.group_node);
+ /* A delayed task cannot be in clone(). */
+ SCHED_WARN_ON(p->se.sched_delayed);
+
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = NULL;
#endif
@@ -4342,6 +4447,10 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->rt.on_rq = 0;
p->rt.on_list = 0;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ init_scx_entity(&p->scx);
+#endif
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
@@ -4572,6 +4681,8 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->prio = p->normal_prio = p->static_prio;
set_load_weight(p, false);
+ p->se.custom_slice = 0;
+ p->se.slice = sysctl_sched_base_slice;
/*
* We don't need the reset flag anymore after the fork. It has
@@ -4582,10 +4693,18 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
if (dl_prio(p->prio))
return -EAGAIN;
- else if (rt_prio(p->prio))
+
+ scx_pre_fork(p);
+
+ if (rt_prio(p->prio)) {
p->sched_class = &rt_sched_class;
- else
+#ifdef CONFIG_SCHED_CLASS_EXT
+ } else if (task_should_scx(p)) {
+ p->sched_class = &ext_sched_class;
+#endif
+ } else {
p->sched_class = &fair_sched_class;
+ }
init_entity_runnable_average(&p->se);
@@ -4605,7 +4724,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
return 0;
}
-void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
+int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
{
unsigned long flags;
@@ -4632,11 +4751,19 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ return scx_fork(p);
+}
+
+void sched_cancel_fork(struct task_struct *p)
+{
+ scx_cancel_fork(p);
}
void sched_post_fork(struct task_struct *p)
{
uclamp_post_fork(p);
+ scx_post_fork(p);
}
unsigned long to_ratio(u64 period, u64 runtime)
@@ -4686,7 +4813,7 @@ void wake_up_new_task(struct task_struct *p)
update_rq_clock(rq);
post_init_entity_util_avg(p);
- activate_task(rq, p, ENQUEUE_NOCLOCK);
+ activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
trace_sched_wakeup_new(p);
wakeup_preempt(rq, p, WF_FORK);
#ifdef CONFIG_SMP
@@ -5469,6 +5596,7 @@ void sched_tick(void)
calc_global_load_tick(rq);
sched_core_tick(rq);
task_tick_mm_cid(rq, curr);
+ scx_tick(rq);
rq_unlock(rq, &rf);
@@ -5481,8 +5609,10 @@ void sched_tick(void)
wq_worker_tick(curr);
#ifdef CONFIG_SMP
- rq->idle_balance = idle_cpu(cpu);
- sched_balance_trigger(rq);
+ if (!scx_switched_all()) {
+ rq->idle_balance = idle_cpu(cpu);
+ sched_balance_trigger(rq);
+ }
#endif
}
@@ -5762,18 +5892,29 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
preempt_count_set(PREEMPT_DISABLED);
}
rcu_sleep_check();
- SCHED_WARN_ON(ct_state() == CONTEXT_USER);
+ SCHED_WARN_ON(ct_state() == CT_STATE_USER);
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
schedstat_inc(this_rq()->sched_count);
}
-static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
- struct rq_flags *rf)
+static void prev_balance(struct rq *rq, struct task_struct *prev,
+ struct rq_flags *rf)
{
-#ifdef CONFIG_SMP
+ const struct sched_class *start_class = prev->sched_class;
const struct sched_class *class;
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+ /*
+ * SCX requires a balance() call before every pick_next_task() including
+ * when waking up from SCHED_IDLE. If @start_class is below SCX, start
+ * from SCX instead.
+ */
+ if (scx_enabled() && sched_class_above(&ext_sched_class, start_class))
+ start_class = &ext_sched_class;
+#endif
+
/*
* We must do the balancing pass before put_prev_task(), such
* that when we release the rq->lock the task is in the same
@@ -5782,13 +5923,10 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
* We can terminate the balance pass as soon as we know there is
* a runnable task of @class priority or higher.
*/
- for_class_range(class, prev->sched_class, &idle_sched_class) {
- if (class->balance(rq, prev, rf))
+ for_active_class_range(class, start_class, &idle_sched_class) {
+ if (class->balance && class->balance(rq, prev, rf))
break;
}
-#endif
-
- put_prev_task(rq, prev);
}
/*
@@ -5800,6 +5938,11 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
const struct sched_class *class;
struct task_struct *p;
+ rq->dl_server = NULL;
+
+ if (scx_enabled())
+ goto restart;
+
/*
* Optimization: we know that if all tasks are in the fair class we can
* call that function directly, but only if the @prev task wasn't of a
@@ -5815,35 +5958,28 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
/* Assume the next prioritized class is idle_sched_class */
if (!p) {
- put_prev_task(rq, prev);
- p = pick_next_task_idle(rq);
+ p = pick_task_idle(rq);
+ put_prev_set_next_task(rq, prev, p);
}
- /*
- * This is the fast path; it cannot be a DL server pick;
- * therefore even if @p == @prev, ->dl_server must be NULL.
- */
- if (p->dl_server)
- p->dl_server = NULL;
-
return p;
}
restart:
- put_prev_task_balance(rq, prev, rf);
-
- /*
- * We've updated @prev and no longer need the server link, clear it.
- * Must be done before ->pick_next_task() because that can (re)set
- * ->dl_server.
- */
- if (prev->dl_server)
- prev->dl_server = NULL;
+ prev_balance(rq, prev, rf);
- for_each_class(class) {
- p = class->pick_next_task(rq);
- if (p)
- return p;
+ for_each_active_class(class) {
+ if (class->pick_next_task) {
+ p = class->pick_next_task(rq, prev);
+ if (p)
+ return p;
+ } else {
+ p = class->pick_task(rq);
+ if (p) {
+ put_prev_set_next_task(rq, prev, p);
+ return p;
+ }
+ }
}
BUG(); /* The idle class should always have a runnable task. */
@@ -5873,7 +6009,9 @@ static inline struct task_struct *pick_task(struct rq *rq)
const struct sched_class *class;
struct task_struct *p;
- for_each_class(class) {
+ rq->dl_server = NULL;
+
+ for_each_active_class(class) {
p = class->pick_task(rq);
if (p)
return p;
@@ -5911,6 +6049,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
* another cpu during offline.
*/
rq->core_pick = NULL;
+ rq->core_dl_server = NULL;
return __pick_next_task(rq, prev, rf);
}
@@ -5929,16 +6068,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
next = rq->core_pick;
- if (next != prev) {
- put_prev_task(rq, prev);
- set_next_task(rq, next);
- }
-
+ rq->dl_server = rq->core_dl_server;
rq->core_pick = NULL;
- goto out;
+ rq->core_dl_server = NULL;
+ goto out_set_next;
}
- put_prev_task_balance(rq, prev, rf);
+ prev_balance(rq, prev, rf);
smt_mask = cpu_smt_mask(cpu);
need_sync = !!rq->core->core_cookie;
@@ -5979,6 +6115,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
next = pick_task(rq);
if (!next->core_cookie) {
rq->core_pick = NULL;
+ rq->core_dl_server = NULL;
/*
* For robustness, update the min_vruntime_fi for
* unconstrained picks as well.
@@ -6006,7 +6143,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if (i != cpu && (rq_i != rq->core || !core_clock_updated))
update_rq_clock(rq_i);
- p = rq_i->core_pick = pick_task(rq_i);
+ rq_i->core_pick = p = pick_task(rq_i);
+ rq_i->core_dl_server = rq_i->dl_server;
+
if (!max || prio_less(max, p, fi_before))
max = p;
}
@@ -6030,6 +6169,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
rq_i->core_pick = p;
+ rq_i->core_dl_server = NULL;
if (p == rq_i->idle) {
if (rq_i->nr_running) {
@@ -6090,6 +6230,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if (i == cpu) {
rq_i->core_pick = NULL;
+ rq_i->core_dl_server = NULL;
continue;
}
@@ -6098,6 +6239,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if (rq_i->curr == rq_i->core_pick) {
rq_i->core_pick = NULL;
+ rq_i->core_dl_server = NULL;
continue;
}
@@ -6105,8 +6247,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
out_set_next:
- set_next_task(rq, next);
-out:
+ put_prev_set_next_task(rq, prev, next);
if (rq->core->core_forceidle_count && next == rq->idle)
queue_core_balance(rq);
@@ -6342,19 +6483,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
* Constants for the sched_mode argument of __schedule().
*
* The mode argument allows RT enabled kernels to differentiate a
- * preemption from blocking on an 'sleeping' spin/rwlock. Note that
- * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
- * optimize the AND operation out and just check for zero.
+ * preemption from blocking on an 'sleeping' spin/rwlock.
*/
-#define SM_NONE 0x0
-#define SM_PREEMPT 0x1
-#define SM_RTLOCK_WAIT 0x2
-
-#ifndef CONFIG_PREEMPT_RT
-# define SM_MASK_PREEMPT (~0U)
-#else
-# define SM_MASK_PREEMPT SM_PREEMPT
-#endif
+#define SM_IDLE (-1)
+#define SM_NONE 0
+#define SM_PREEMPT 1
+#define SM_RTLOCK_WAIT 2
/*
* __schedule() is the main scheduler function.
@@ -6395,9 +6529,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
*
* WARNING: must be called with preemption disabled!
*/
-static void __sched notrace __schedule(unsigned int sched_mode)
+static void __sched notrace __schedule(int sched_mode)
{
struct task_struct *prev, *next;
+ /*
+ * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
+ * as a preemption by schedule_debug() and RCU.
+ */
+ bool preempt = sched_mode > SM_NONE;
unsigned long *switch_count;
unsigned long prev_state;
struct rq_flags rf;
@@ -6408,13 +6547,13 @@ static void __sched notrace __schedule(unsigned int sched_mode)
rq = cpu_rq(cpu);
prev = rq->curr;
- schedule_debug(prev, !!sched_mode);
+ schedule_debug(prev, preempt);
if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
hrtick_clear(rq);
local_irq_disable();
- rcu_note_context_switch(!!sched_mode);
+ rcu_note_context_switch(preempt);
/*
* Make sure that signal_pending_state()->signal_pending() below
@@ -6443,22 +6582,33 @@ static void __sched notrace __schedule(unsigned int sched_mode)
switch_count = &prev->nivcsw;
+ /* Task state changes only considers SM_PREEMPT as preemption */
+ preempt = sched_mode == SM_PREEMPT;
+
/*
* We must load prev->state once (task_struct::state is volatile), such
* that we form a control dependency vs deactivate_task() below.
*/
prev_state = READ_ONCE(prev->__state);
- if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
+ if (sched_mode == SM_IDLE) {
+ /* SCX must consult the BPF scheduler to tell if rq is empty */
+ if (!rq->nr_running && !scx_enabled()) {
+ next = prev;
+ goto picked;
+ }
+ } else if (!preempt && prev_state) {
if (signal_pending_state(prev_state, prev)) {
WRITE_ONCE(prev->__state, TASK_RUNNING);
} else {
+ int flags = DEQUEUE_NOCLOCK;
+
prev->sched_contributes_to_load =
(prev_state & TASK_UNINTERRUPTIBLE) &&
!(prev_state & TASK_NOLOAD) &&
!(prev_state & TASK_FROZEN);
- if (prev->sched_contributes_to_load)
- rq->nr_uninterruptible++;
+ if (unlikely(is_special_task_state(prev_state)))
+ flags |= DEQUEUE_SPECIAL;
/*
* __schedule() ttwu()
@@ -6471,17 +6621,13 @@ static void __sched notrace __schedule(unsigned int sched_mode)
*
* After this, schedule() must not care about p->state any more.
*/
- deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
-
- if (prev->in_iowait) {
- atomic_inc(&rq->nr_iowait);
- delayacct_blkio_start();
- }
+ block_task(rq, prev, flags);
}
switch_count = &prev->nvcsw;
}
next = pick_next_task(rq, prev, &rf);
+picked:
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
@@ -6523,7 +6669,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
psi_account_irqtime(rq, prev, next);
psi_sched_switch(prev, next, !task_on_rq_queued(prev));
- trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
+ trace_sched_switch(preempt, prev, next, prev_state);
/* Also unlocks the rq: */
rq = context_switch(rq, prev, next, &rf);
@@ -6599,7 +6745,7 @@ static void sched_update_worker(struct task_struct *tsk)
}
}
-static __always_inline void __schedule_loop(unsigned int sched_mode)
+static __always_inline void __schedule_loop(int sched_mode)
{
do {
preempt_disable();
@@ -6644,7 +6790,7 @@ void __sched schedule_idle(void)
*/
WARN_ON_ONCE(current->__state);
do {
- __schedule(SM_NONE);
+ __schedule(SM_IDLE);
} while (need_resched());
}
@@ -6658,7 +6804,7 @@ asmlinkage __visible void __sched schedule_user(void)
* we find a better solution.
*
* NB: There are buggy callers of this function. Ideally we
- * should warn if prev_state != CONTEXT_USER, but that will trigger
+ * should warn if prev_state != CT_STATE_USER, but that will trigger
* too frequently to make sense yet.
*/
enum ctx_state prev_state = exception_enter();
@@ -6870,6 +7016,10 @@ void __setscheduler_prio(struct task_struct *p, int prio)
p->sched_class = &dl_sched_class;
else if (rt_prio(prio))
p->sched_class = &rt_sched_class;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ else if (task_should_scx(p))
+ p->sched_class = &ext_sched_class;
+#endif
else
p->sched_class = &fair_sched_class;
@@ -7015,6 +7165,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
}
__setscheduler_prio(p, prio);
+ check_class_changing(rq, p, prev_class);
if (queued)
enqueue_task(rq, p, queue_flag);
@@ -7405,7 +7556,7 @@ EXPORT_SYMBOL(io_schedule);
void sched_show_task(struct task_struct *p)
{
- unsigned long free = 0;
+ unsigned long free;
int ppid;
if (!try_get_task_stack(p))
@@ -7415,9 +7566,7 @@ void sched_show_task(struct task_struct *p)
if (task_is_running(p))
pr_cont(" running task ");
-#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
-#endif
ppid = 0;
rcu_read_lock();
if (pid_alive(p))
@@ -7429,6 +7578,7 @@ void sched_show_task(struct task_struct *p)
print_worker_info(KERN_INFO, p);
print_stop_info(KERN_INFO, p);
+ print_scx_info(KERN_INFO, p);
show_stack(p, NULL, KERN_INFO);
put_task_stack(p);
}
@@ -7957,6 +8107,8 @@ int sched_cpu_activate(unsigned int cpu)
cpuset_cpu_active();
}
+ scx_rq_activate(rq);
+
/*
* Put the rq online, if not already. This happens:
*
@@ -8006,6 +8158,8 @@ int sched_cpu_deactivate(unsigned int cpu)
sched_set_rq_offline(rq, cpu);
+ scx_rq_deactivate(rq);
+
/*
* When going down, decrement the number of cores with SMT present.
*/
@@ -8190,11 +8344,15 @@ void __init sched_init(void)
int i;
/* Make sure the linker didn't screw up */
- BUG_ON(&idle_sched_class != &fair_sched_class + 1 ||
- &fair_sched_class != &rt_sched_class + 1 ||
- &rt_sched_class != &dl_sched_class + 1);
#ifdef CONFIG_SMP
- BUG_ON(&dl_sched_class != &stop_sched_class + 1);
+ BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
+#endif
+ BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
+ BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
+ BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
+#ifdef CONFIG_SCHED_CLASS_EXT
+ BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
+ BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
#endif
wait_bit_init();
@@ -8218,6 +8376,9 @@ void __init sched_init(void)
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
+#ifdef CONFIG_EXT_GROUP_SCHED
+ root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
+#endif /* CONFIG_EXT_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
root_task_group.rt_se = (struct sched_rt_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
@@ -8228,8 +8389,6 @@ void __init sched_init(void)
#endif /* CONFIG_RT_GROUP_SCHED */
}
- init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
-
#ifdef CONFIG_SMP
init_defrootdomain();
#endif
@@ -8284,8 +8443,13 @@ void __init sched_init(void)
init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
- rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
#ifdef CONFIG_RT_GROUP_SCHED
+ /*
+ * This is required for init cpu because rt.c:__enable_runtime()
+ * starts working after scheduler_running, which is not the case
+ * yet.
+ */
+ rq->rt.rt_runtime = global_rt_runtime();
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
#endif
#ifdef CONFIG_SMP
@@ -8317,10 +8481,12 @@ void __init sched_init(void)
#endif /* CONFIG_SMP */
hrtick_rq_init(rq);
atomic_set(&rq->nr_iowait, 0);
+ fair_server_init(rq);
#ifdef CONFIG_SCHED_CORE
rq->core = rq;
rq->core_pick = NULL;
+ rq->core_dl_server = NULL;
rq->core_enabled = 0;
rq->core_tree = RB_ROOT;
rq->core_forceidle_count = 0;
@@ -8333,6 +8499,7 @@ void __init sched_init(void)
}
set_load_weight(&init_task, false);
+ init_task.se.slice = sysctl_sched_base_slice,
/*
* The boot idle thread does lazy MMU switching as well:
@@ -8363,6 +8530,7 @@ void __init sched_init(void)
balance_push_set(smp_processor_id(), false);
#endif
init_sched_fair_class();
+ init_sched_ext_class();
psi_init();
@@ -8548,7 +8716,7 @@ void normalize_rt_tasks(void)
schedstat_set(p->stats.sleep_start, 0);
schedstat_set(p->stats.block_start, 0);
- if (!dl_task(p) && !rt_task(p)) {
+ if (!rt_or_dl_task(p)) {
/*
* Renice negative nice level userspace
* tasks back to 0:
@@ -8648,6 +8816,7 @@ struct task_group *sched_create_group(struct task_group *parent)
if (!alloc_rt_sched_group(tg, parent))
goto err;
+ scx_group_set_weight(tg, CGROUP_WEIGHT_DFL);
alloc_uclamp_sched_group(tg, parent);
return tg;
@@ -8775,6 +8944,7 @@ void sched_move_task(struct task_struct *tsk)
put_prev_task(rq, tsk);
sched_change_group(tsk, group);
+ scx_move_task(tsk);
if (queued)
enqueue_task(rq, tsk, queue_flags);
@@ -8789,11 +8959,6 @@ void sched_move_task(struct task_struct *tsk)
}
}
-static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
-{
- return css ? container_of(css, struct task_group, css) : NULL;
-}
-
static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
@@ -8817,6 +8982,11 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
struct task_group *parent = css_tg(css->parent);
+ int ret;
+
+ ret = scx_tg_online(tg);
+ if (ret)
+ return ret;
if (parent)
sched_online_group(tg, parent);
@@ -8831,6 +9001,13 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
return 0;
}
+static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
+{
+ struct task_group *tg = css_tg(css);
+
+ scx_tg_offline(tg);
+}
+
static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
@@ -8848,9 +9025,9 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
sched_unregister_group(tg);
}
-#ifdef CONFIG_RT_GROUP_SCHED
static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
{
+#ifdef CONFIG_RT_GROUP_SCHED
struct task_struct *task;
struct cgroup_subsys_state *css;
@@ -8858,9 +9035,9 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
if (!sched_rt_can_attach(css_tg(css), task))
return -EINVAL;
}
- return 0;
-}
#endif
+ return scx_cgroup_can_attach(tset);
+}
static void cpu_cgroup_attach(struct cgroup_taskset *tset)
{
@@ -8869,6 +9046,13 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
cgroup_taskset_for_each(task, css, tset)
sched_move_task(task);
+
+ scx_cgroup_finish_attach();
+}
+
+static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
+{
+ scx_cgroup_cancel_attach(tset);
}
#ifdef CONFIG_UCLAMP_TASK_GROUP
@@ -9045,22 +9229,36 @@ static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
}
#endif /* CONFIG_UCLAMP_TASK_GROUP */
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
+static unsigned long tg_weight(struct task_group *tg)
+{
#ifdef CONFIG_FAIR_GROUP_SCHED
+ return scale_load_down(tg->shares);
+#else
+ return sched_weight_from_cgroup(tg->scx_weight);
+#endif
+}
+
static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 shareval)
{
+ int ret;
+
if (shareval > scale_load_down(ULONG_MAX))
shareval = MAX_SHARES;
- return sched_group_set_shares(css_tg(css), scale_load(shareval));
+ ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
+ if (!ret)
+ scx_group_set_weight(css_tg(css),
+ sched_weight_to_cgroup(shareval));
+ return ret;
}
static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- struct task_group *tg = css_tg(css);
-
- return (u64) scale_load_down(tg->shares);
+ return tg_weight(css_tg(css));
}
+#endif /* CONFIG_GROUP_SCHED_WEIGHT */
#ifdef CONFIG_CFS_BANDWIDTH
static DEFINE_MUTEX(cfs_constraints_mutex);
@@ -9406,7 +9604,6 @@ static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
return 0;
}
#endif /* CONFIG_CFS_BANDWIDTH */
-#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
@@ -9434,7 +9631,7 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
}
#endif /* CONFIG_RT_GROUP_SCHED */
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
@@ -9444,12 +9641,17 @@ static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
struct cftype *cft, s64 idle)
{
- return sched_group_set_idle(css_tg(css), idle);
+ int ret;
+
+ ret = sched_group_set_idle(css_tg(css), idle);
+ if (!ret)
+ scx_group_set_idle(css_tg(css), idle);
+ return ret;
}
#endif
static struct cftype cpu_legacy_files[] = {
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
{
.name = "shares",
.read_u64 = cpu_shares_read_u64,
@@ -9559,38 +9761,35 @@ static int cpu_local_stat_show(struct seq_file *sf,
return 0;
}
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
+
static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- struct task_group *tg = css_tg(css);
- u64 weight = scale_load_down(tg->shares);
-
- return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
+ return sched_weight_to_cgroup(tg_weight(css_tg(css)));
}
static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
- struct cftype *cft, u64 weight)
+ struct cftype *cft, u64 cgrp_weight)
{
- /*
- * cgroup weight knobs should use the common MIN, DFL and MAX
- * values which are 1, 100 and 10000 respectively. While it loses
- * a bit of range on both ends, it maps pretty well onto the shares
- * value used by scheduler and the round-trip conversions preserve
- * the original value over the entire range.
- */
- if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
+ unsigned long weight;
+ int ret;
+
+ if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
return -ERANGE;
- weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
+ weight = sched_weight_from_cgroup(cgrp_weight);
- return sched_group_set_shares(css_tg(css), scale_load(weight));
+ ret = sched_group_set_shares(css_tg(css), scale_load(weight));
+ if (!ret)
+ scx_group_set_weight(css_tg(css), cgrp_weight);
+ return ret;
}
static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- unsigned long weight = scale_load_down(css_tg(css)->shares);
+ unsigned long weight = tg_weight(css_tg(css));
int last_delta = INT_MAX;
int prio, delta;
@@ -9609,7 +9808,7 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
struct cftype *cft, s64 nice)
{
unsigned long weight;
- int idx;
+ int idx, ret;
if (nice < MIN_NICE || nice > MAX_NICE)
return -ERANGE;
@@ -9618,9 +9817,13 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
idx = array_index_nospec(idx, 40);
weight = sched_prio_to_weight[idx];
- return sched_group_set_shares(css_tg(css), scale_load(weight));
+ ret = sched_group_set_shares(css_tg(css), scale_load(weight));
+ if (!ret)
+ scx_group_set_weight(css_tg(css),
+ sched_weight_to_cgroup(weight));
+ return ret;
}
-#endif
+#endif /* CONFIG_GROUP_SCHED_WEIGHT */
static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
long period, long quota)
@@ -9680,7 +9883,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of,
#endif
static struct cftype cpu_files[] = {
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
{
.name = "weight",
.flags = CFTYPE_NOT_ON_ROOT,
@@ -9734,14 +9937,14 @@ static struct cftype cpu_files[] = {
struct cgroup_subsys cpu_cgrp_subsys = {
.css_alloc = cpu_cgroup_css_alloc,
.css_online = cpu_cgroup_css_online,
+ .css_offline = cpu_cgroup_css_offline,
.css_released = cpu_cgroup_css_released,
.css_free = cpu_cgroup_css_free,
.css_extra_stat_show = cpu_extra_stat_show,
.css_local_stat_show = cpu_local_stat_show,
-#ifdef CONFIG_RT_GROUP_SCHED
.can_attach = cpu_cgroup_can_attach,
-#endif
.attach = cpu_cgroup_attach,
+ .cancel_attach = cpu_cgroup_cancel_attach,
.legacy_cftypes = cpu_legacy_files,
.dfl_cftypes = cpu_files,
.early_init = true,
@@ -9752,7 +9955,7 @@ struct cgroup_subsys cpu_cgrp_subsys = {
void dump_cpu_task(int cpu)
{
- if (cpu == smp_processor_id() && in_hardirq()) {
+ if (in_hardirq() && cpu == smp_processor_id()) {
struct pt_regs *regs;
regs = get_irq_regs();
@@ -10331,3 +10534,38 @@ void sched_mm_cid_fork(struct task_struct *t)
t->mm_cid_active = 1;
}
#endif
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
+ struct sched_enq_and_set_ctx *ctx)
+{
+ struct rq *rq = task_rq(p);
+
+ lockdep_assert_rq_held(rq);
+
+ *ctx = (struct sched_enq_and_set_ctx){
+ .p = p,
+ .queue_flags = queue_flags,
+ .queued = task_on_rq_queued(p),
+ .running = task_current(rq, p),
+ };
+
+ update_rq_clock(rq);
+ if (ctx->queued)
+ dequeue_task(rq, p, queue_flags | DEQUEUE_NOCLOCK);
+ if (ctx->running)
+ put_prev_task(rq, p);
+}
+
+void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx)
+{
+ struct rq *rq = task_rq(ctx->p);
+
+ lockdep_assert_rq_held(rq);
+
+ if (ctx->queued)
+ enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK);
+ if (ctx->running)
+ set_next_task(rq, ctx->p);
+}
+#endif /* CONFIG_SCHED_CLASS_EXT */
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index eece6244f9d2..c6ba15388ea7 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -197,8 +197,10 @@ unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
static void sugov_get_util(struct sugov_cpu *sg_cpu, unsigned long boost)
{
- unsigned long min, max, util = cpu_util_cfs_boost(sg_cpu->cpu);
+ unsigned long min, max, util = scx_cpuperf_target(sg_cpu->cpu);
+ if (!scx_switched_all())
+ util += cpu_util_cfs_boost(sg_cpu->cpu);
util = effective_cpu_util(sg_cpu->cpu, util, &min, &max);
util = max(util, boost);
sg_cpu->bw_min = min;
@@ -325,16 +327,35 @@ static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
}
#ifdef CONFIG_NO_HZ_COMMON
-static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
+static bool sugov_hold_freq(struct sugov_cpu *sg_cpu)
{
- unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
- bool ret = idle_calls == sg_cpu->saved_idle_calls;
+ unsigned long idle_calls;
+ bool ret;
+
+ /*
+ * The heuristics in this function is for the fair class. For SCX, the
+ * performance target comes directly from the BPF scheduler. Let's just
+ * follow it.
+ */
+ if (scx_switched_all())
+ return false;
+
+ /* if capped by uclamp_max, always update to be in compliance */
+ if (uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)))
+ return false;
+
+ /*
+ * Maintain the frequency if the CPU has not been idle recently, as
+ * reduction is likely to be premature.
+ */
+ idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
+ ret = idle_calls == sg_cpu->saved_idle_calls;
sg_cpu->saved_idle_calls = idle_calls;
return ret;
}
#else
-static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+static inline bool sugov_hold_freq(struct sugov_cpu *sg_cpu) { return false; }
#endif /* CONFIG_NO_HZ_COMMON */
/*
@@ -382,14 +403,8 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
return;
next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap);
- /*
- * Do not reduce the frequency if the CPU has not been idle
- * recently, as the reduction is likely to be premature then.
- *
- * Except when the rq is capped by uclamp_max.
- */
- if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
- sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq &&
+
+ if (sugov_hold_freq(sg_cpu) && next_f < sg_policy->next_freq &&
!sg_policy->need_freq_update) {
next_f = sg_policy->next_freq;
@@ -436,14 +451,7 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
return;
- /*
- * Do not reduce the target performance level if the CPU has not been
- * idle recently, as the reduction is likely to be premature then.
- *
- * Except when the rq is capped by uclamp_max.
- */
- if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
- sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
+ if (sugov_hold_freq(sg_cpu) && sg_cpu->util < prev_util)
sg_cpu->util = prev_util;
cpufreq_driver_adjust_perf(sg_cpu->cpu, sg_cpu->bw_min,
@@ -654,9 +662,9 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
* Fake (unused) bandwidth; workaround to "fix"
* priority inheritance.
*/
- .sched_runtime = 1000000,
- .sched_deadline = 10000000,
- .sched_period = 10000000,
+ .sched_runtime = NSEC_PER_MSEC,
+ .sched_deadline = 10 * NSEC_PER_MSEC,
+ .sched_period = 10 * NSEC_PER_MSEC,
};
struct cpufreq_policy *policy = sg_policy->policy;
int ret;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index f59e5c19d944..9ce93d0bf452 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -320,19 +320,12 @@ void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
__sub_running_bw(dl_se->dl_bw, dl_rq);
}
-static void dl_change_utilization(struct task_struct *p, u64 new_bw)
+static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_se, u64 new_bw)
{
- struct rq *rq;
-
- WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
-
- if (task_on_rq_queued(p))
- return;
+ if (dl_se->dl_non_contending) {
+ sub_running_bw(dl_se, &rq->dl);
+ dl_se->dl_non_contending = 0;
- rq = task_rq(p);
- if (p->dl.dl_non_contending) {
- sub_running_bw(&p->dl, &rq->dl);
- p->dl.dl_non_contending = 0;
/*
* If the timer handler is currently running and the
* timer cannot be canceled, inactive_task_timer()
@@ -340,13 +333,25 @@ static void dl_change_utilization(struct task_struct *p, u64 new_bw)
* will not touch the rq's active utilization,
* so we are still safe.
*/
- if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
- put_task_struct(p);
+ if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
+ if (!dl_server(dl_se))
+ put_task_struct(dl_task_of(dl_se));
+ }
}
- __sub_rq_bw(p->dl.dl_bw, &rq->dl);
+ __sub_rq_bw(dl_se->dl_bw, &rq->dl);
__add_rq_bw(new_bw, &rq->dl);
}
+static void dl_change_utilization(struct task_struct *p, u64 new_bw)
+{
+ WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
+
+ if (task_on_rq_queued(p))
+ return;
+
+ dl_rq_change_utilization(task_rq(p), &p->dl, new_bw);
+}
+
static void __dl_clear_params(struct sched_dl_entity *dl_se);
/*
@@ -771,6 +776,15 @@ static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
/* for non-boosted task, pi_of(dl_se) == dl_se */
dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
dl_se->runtime = pi_of(dl_se)->dl_runtime;
+
+ /*
+ * If it is a deferred reservation, and the server
+ * is not handling an starvation case, defer it.
+ */
+ if (dl_se->dl_defer & !dl_se->dl_defer_running) {
+ dl_se->dl_throttled = 1;
+ dl_se->dl_defer_armed = 1;
+ }
}
/*
@@ -809,6 +823,9 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
replenish_dl_new_period(dl_se, rq);
}
+static int start_dl_timer(struct sched_dl_entity *dl_se);
+static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t);
+
/*
* Pure Earliest Deadline First (EDF) scheduling does not deal with the
* possibility of a entity lasting more than what it declared, and thus
@@ -837,9 +854,18 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se)
/*
* This could be the case for a !-dl task that is boosted.
* Just go with full inherited parameters.
+ *
+ * Or, it could be the case of a deferred reservation that
+ * was not able to consume its runtime in background and
+ * reached this point with current u > U.
+ *
+ * In both cases, set a new period.
*/
- if (dl_se->dl_deadline == 0)
- replenish_dl_new_period(dl_se, rq);
+ if (dl_se->dl_deadline == 0 ||
+ (dl_se->dl_defer_armed && dl_entity_overflow(dl_se, rq_clock(rq)))) {
+ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
+ dl_se->runtime = pi_of(dl_se)->dl_runtime;
+ }
if (dl_se->dl_yielded && dl_se->runtime > 0)
dl_se->runtime = 0;
@@ -873,6 +899,44 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se)
dl_se->dl_yielded = 0;
if (dl_se->dl_throttled)
dl_se->dl_throttled = 0;
+
+ /*
+ * If this is the replenishment of a deferred reservation,
+ * clear the flag and return.
+ */
+ if (dl_se->dl_defer_armed) {
+ dl_se->dl_defer_armed = 0;
+ return;
+ }
+
+ /*
+ * A this point, if the deferred server is not armed, and the deadline
+ * is in the future, if it is not running already, throttle the server
+ * and arm the defer timer.
+ */
+ if (dl_se->dl_defer && !dl_se->dl_defer_running &&
+ dl_time_before(rq_clock(dl_se->rq), dl_se->deadline - dl_se->runtime)) {
+ if (!is_dl_boosted(dl_se) && dl_se->server_has_tasks(dl_se)) {
+
+ /*
+ * Set dl_se->dl_defer_armed and dl_throttled variables to
+ * inform the start_dl_timer() that this is a deferred
+ * activation.
+ */
+ dl_se->dl_defer_armed = 1;
+ dl_se->dl_throttled = 1;
+ if (!start_dl_timer(dl_se)) {
+ /*
+ * If for whatever reason (delays), a previous timer was
+ * queued but not serviced, cancel it and clean the
+ * deferrable server variables intended for start_dl_timer().
+ */
+ hrtimer_try_to_cancel(&dl_se->dl_timer);
+ dl_se->dl_defer_armed = 0;
+ dl_se->dl_throttled = 0;
+ }
+ }
+ }
}
/*
@@ -1023,6 +1087,15 @@ static void update_dl_entity(struct sched_dl_entity *dl_se)
}
replenish_dl_new_period(dl_se, rq);
+ } else if (dl_server(dl_se) && dl_se->dl_defer) {
+ /*
+ * The server can still use its previous deadline, so check if
+ * it left the dl_defer_running state.
+ */
+ if (!dl_se->dl_defer_running) {
+ dl_se->dl_defer_armed = 1;
+ dl_se->dl_throttled = 1;
+ }
}
}
@@ -1055,8 +1128,21 @@ static int start_dl_timer(struct sched_dl_entity *dl_se)
* We want the timer to fire at the deadline, but considering
* that it is actually coming from rq->clock and not from
* hrtimer's time base reading.
+ *
+ * The deferred reservation will have its timer set to
+ * (deadline - runtime). At that point, the CBS rule will decide
+ * if the current deadline can be used, or if a replenishment is
+ * required to avoid add too much pressure on the system
+ * (current u > U).
*/
- act = ns_to_ktime(dl_next_period(dl_se));
+ if (dl_se->dl_defer_armed) {
+ WARN_ON_ONCE(!dl_se->dl_throttled);
+ act = ns_to_ktime(dl_se->deadline - dl_se->runtime);
+ } else {
+ /* act = deadline - rel-deadline + period */
+ act = ns_to_ktime(dl_next_period(dl_se));
+ }
+
now = hrtimer_cb_get_time(timer);
delta = ktime_to_ns(now) - rq_clock(rq);
act = ktime_add_ns(act, delta);
@@ -1106,6 +1192,62 @@ static void __push_dl_task(struct rq *rq, struct rq_flags *rf)
#endif
}
+/* a defer timer will not be reset if the runtime consumed was < dl_server_min_res */
+static const u64 dl_server_min_res = 1 * NSEC_PER_MSEC;
+
+static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_dl_entity *dl_se)
+{
+ struct rq *rq = rq_of_dl_se(dl_se);
+ u64 fw;
+
+ scoped_guard (rq_lock, rq) {
+ struct rq_flags *rf = &scope.rf;
+
+ if (!dl_se->dl_throttled || !dl_se->dl_runtime)
+ return HRTIMER_NORESTART;
+
+ sched_clock_tick();
+ update_rq_clock(rq);
+
+ if (!dl_se->dl_runtime)
+ return HRTIMER_NORESTART;
+
+ if (!dl_se->server_has_tasks(dl_se)) {
+ replenish_dl_entity(dl_se);
+ return HRTIMER_NORESTART;
+ }
+
+ if (dl_se->dl_defer_armed) {
+ /*
+ * First check if the server could consume runtime in background.
+ * If so, it is possible to push the defer timer for this amount
+ * of time. The dl_server_min_res serves as a limit to avoid
+ * forwarding the timer for a too small amount of time.
+ */
+ if (dl_time_before(rq_clock(dl_se->rq),
+ (dl_se->deadline - dl_se->runtime - dl_server_min_res))) {
+
+ /* reset the defer timer */
+ fw = dl_se->deadline - rq_clock(dl_se->rq) - dl_se->runtime;
+
+ hrtimer_forward_now(timer, ns_to_ktime(fw));
+ return HRTIMER_RESTART;
+ }
+
+ dl_se->dl_defer_running = 1;
+ }
+
+ enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
+
+ if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &dl_se->rq->curr->dl))
+ resched_curr(rq);
+
+ __push_dl_task(rq, rf);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
/*
* This is the bandwidth enforcement timer callback. If here, we know
* a task is not on its dl_rq, since the fact that the timer was running
@@ -1128,28 +1270,8 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
struct rq_flags rf;
struct rq *rq;
- if (dl_server(dl_se)) {
- struct rq *rq = rq_of_dl_se(dl_se);
- struct rq_flags rf;
-
- rq_lock(rq, &rf);
- if (dl_se->dl_throttled) {
- sched_clock_tick();
- update_rq_clock(rq);
-
- if (dl_se->server_has_tasks(dl_se)) {
- enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
- resched_curr(rq);
- __push_dl_task(rq, &rf);
- } else {
- replenish_dl_entity(dl_se);
- }
-
- }
- rq_unlock(rq, &rf);
-
- return HRTIMER_NORESTART;
- }
+ if (dl_server(dl_se))
+ return dl_server_timer(timer, dl_se);
p = dl_task_of(dl_se);
rq = task_rq_lock(p, &rf);
@@ -1319,22 +1441,10 @@ static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
return (delta * u_act) >> BW_SHIFT;
}
-static inline void
-update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
- int flags);
-static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
+s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
{
s64 scaled_delta_exec;
- if (unlikely(delta_exec <= 0)) {
- if (unlikely(dl_se->dl_yielded))
- goto throttle;
- return;
- }
-
- if (dl_entity_is_special(dl_se))
- return;
-
/*
* For tasks that participate in GRUB, we implement GRUB-PA: the
* spare reclaimed bandwidth is used to clock down frequency.
@@ -1353,8 +1463,64 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
}
+ return scaled_delta_exec;
+}
+
+static inline void
+update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
+ int flags);
+static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
+{
+ s64 scaled_delta_exec;
+
+ if (unlikely(delta_exec <= 0)) {
+ if (unlikely(dl_se->dl_yielded))
+ goto throttle;
+ return;
+ }
+
+ if (dl_server(dl_se) && dl_se->dl_throttled && !dl_se->dl_defer)
+ return;
+
+ if (dl_entity_is_special(dl_se))
+ return;
+
+ scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);
+
dl_se->runtime -= scaled_delta_exec;
+ /*
+ * The fair server can consume its runtime while throttled (not queued/
+ * running as regular CFS).
+ *
+ * If the server consumes its entire runtime in this state. The server
+ * is not required for the current period. Thus, reset the server by
+ * starting a new period, pushing the activation.
+ */
+ if (dl_se->dl_defer && dl_se->dl_throttled && dl_runtime_exceeded(dl_se)) {
+ /*
+ * If the server was previously activated - the starving condition
+ * took place, it this point it went away because the fair scheduler
+ * was able to get runtime in background. So return to the initial
+ * state.
+ */
+ dl_se->dl_defer_running = 0;
+
+ hrtimer_try_to_cancel(&dl_se->dl_timer);
+
+ replenish_dl_new_period(dl_se, dl_se->rq);
+
+ /*
+ * Not being able to start the timer seems problematic. If it could not
+ * be started for whatever reason, we need to "unthrottle" the DL server
+ * and queue right away. Otherwise nothing might queue it. That's similar
+ * to what enqueue_dl_entity() does on start_dl_timer==0. For now, just warn.
+ */
+ WARN_ON_ONCE(!start_dl_timer(dl_se));
+
+ return;
+ }
+
throttle:
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
dl_se->dl_throttled = 1;
@@ -1382,6 +1548,14 @@ throttle:
}
/*
+ * The fair server (sole dl_server) does not account for real-time
+ * workload because it is running fair work.
+ */
+ if (dl_se == &rq->fair_server)
+ return;
+
+#ifdef CONFIG_RT_GROUP_SCHED
+ /*
* Because -- for now -- we share the rt bandwidth, we need to
* account our runtime there too, otherwise actual rt tasks
* would be able to exceed the shared quota.
@@ -1405,34 +1579,155 @@ throttle:
rt_rq->rt_time += delta_exec;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
+#endif
+}
+
+/*
+ * In the non-defer mode, the idle time is not accounted, as the
+ * server provides a guarantee.
+ *
+ * If the dl_server is in defer mode, the idle time is also considered
+ * as time available for the fair server, avoiding a penalty for the
+ * rt scheduler that did not consumed that time.
+ */
+void dl_server_update_idle_time(struct rq *rq, struct task_struct *p)
+{
+ s64 delta_exec, scaled_delta_exec;
+
+ if (!rq->fair_server.dl_defer)
+ return;
+
+ /* no need to discount more */
+ if (rq->fair_server.runtime < 0)
+ return;
+
+ delta_exec = rq_clock_task(rq) - p->se.exec_start;
+ if (delta_exec < 0)
+ return;
+
+ scaled_delta_exec = dl_scaled_delta_exec(rq, &rq->fair_server, delta_exec);
+
+ rq->fair_server.runtime -= scaled_delta_exec;
+
+ if (rq->fair_server.runtime < 0) {
+ rq->fair_server.dl_defer_running = 0;
+ rq->fair_server.runtime = 0;
+ }
+
+ p->se.exec_start = rq_clock_task(rq);
}
void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
{
- update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
+ /* 0 runtime = fair server disabled */
+ if (dl_se->dl_runtime)
+ update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
}
void dl_server_start(struct sched_dl_entity *dl_se)
{
+ struct rq *rq = dl_se->rq;
+
+ /*
+ * XXX: the apply do not work fine at the init phase for the
+ * fair server because things are not yet set. We need to improve
+ * this before getting generic.
+ */
if (!dl_server(dl_se)) {
+ u64 runtime = 50 * NSEC_PER_MSEC;
+ u64 period = 1000 * NSEC_PER_MSEC;
+
+ dl_server_apply_params(dl_se, runtime, period, 1);
+
dl_se->dl_server = 1;
+ dl_se->dl_defer = 1;
setup_new_dl_entity(dl_se);
}
+
+ if (!dl_se->dl_runtime)
+ return;
+
enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
+ if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
+ resched_curr(dl_se->rq);
}
void dl_server_stop(struct sched_dl_entity *dl_se)
{
+ if (!dl_se->dl_runtime)
+ return;
+
dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
+ hrtimer_try_to_cancel(&dl_se->dl_timer);
+ dl_se->dl_defer_armed = 0;
+ dl_se->dl_throttled = 0;
}
void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
dl_server_has_tasks_f has_tasks,
- dl_server_pick_f pick)
+ dl_server_pick_f pick_task)
{
dl_se->rq = rq;
dl_se->server_has_tasks = has_tasks;
- dl_se->server_pick = pick;
+ dl_se->server_pick_task = pick_task;
+}
+
+void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq)
+{
+ u64 new_bw = dl_se->dl_bw;
+ int cpu = cpu_of(rq);
+ struct dl_bw *dl_b;
+
+ dl_b = dl_bw_of(cpu_of(rq));
+ guard(raw_spinlock)(&dl_b->lock);
+
+ if (!dl_bw_cpus(cpu))
+ return;
+
+ __dl_add(dl_b, new_bw, dl_bw_cpus(cpu));
+}
+
+int dl_server_apply_params(struct sched_dl_entity *dl_se, u64 runtime, u64 period, bool init)
+{
+ u64 old_bw = init ? 0 : to_ratio(dl_se->dl_period, dl_se->dl_runtime);
+ u64 new_bw = to_ratio(period, runtime);
+ struct rq *rq = dl_se->rq;
+ int cpu = cpu_of(rq);
+ struct dl_bw *dl_b;
+ unsigned long cap;
+ int retval = 0;
+ int cpus;
+
+ dl_b = dl_bw_of(cpu);
+ guard(raw_spinlock)(&dl_b->lock);
+
+ cpus = dl_bw_cpus(cpu);
+ cap = dl_bw_capacity(cpu);
+
+ if (__dl_overflow(dl_b, cap, old_bw, new_bw))
+ return -EBUSY;
+
+ if (init) {
+ __add_rq_bw(new_bw, &rq->dl);
+ __dl_add(dl_b, new_bw, cpus);
+ } else {
+ __dl_sub(dl_b, dl_se->dl_bw, cpus);
+ __dl_add(dl_b, new_bw, cpus);
+
+ dl_rq_change_utilization(rq, dl_se, new_bw);
+ }
+
+ dl_se->dl_runtime = runtime;
+ dl_se->dl_deadline = period;
+ dl_se->dl_period = period;
+
+ dl_se->runtime = 0;
+ dl_se->deadline = 0;
+
+ dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
+ dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
+
+ return retval;
}
/*
@@ -1599,46 +1894,40 @@ static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
}
-static inline struct sched_statistics *
+static __always_inline struct sched_statistics *
__schedstats_from_dl_se(struct sched_dl_entity *dl_se)
{
+ if (!schedstat_enabled())
+ return NULL;
+
+ if (dl_server(dl_se))
+ return NULL;
+
return &dl_task_of(dl_se)->stats;
}
static inline void
update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
{
- struct sched_statistics *stats;
-
- if (!schedstat_enabled())
- return;
-
- stats = __schedstats_from_dl_se(dl_se);
- __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
+ struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
+ if (stats)
+ __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
}
static inline void
update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
{
- struct sched_statistics *stats;
-
- if (!schedstat_enabled())
- return;
-
- stats = __schedstats_from_dl_se(dl_se);
- __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
+ struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
+ if (stats)
+ __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
}
static inline void
update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
{
- struct sched_statistics *stats;
-
- if (!schedstat_enabled())
- return;
-
- stats = __schedstats_from_dl_se(dl_se);
- __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
+ struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
+ if (stats)
+ __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
}
static inline void
@@ -1735,7 +2024,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
* be counted in the active utilization; hence, we need to call
* add_running_bw().
*/
- if (dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
+ if (!dl_se->dl_defer && dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
if (flags & ENQUEUE_WAKEUP)
task_contending(dl_se, flags);
@@ -1757,6 +2046,25 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
setup_new_dl_entity(dl_se);
}
+ /*
+ * If the reservation is still throttled, e.g., it got replenished but is a
+ * deferred task and still got to wait, don't enqueue.
+ */
+ if (dl_se->dl_throttled && start_dl_timer(dl_se))
+ return;
+
+ /*
+ * We're about to enqueue, make sure we're not ->dl_throttled!
+ * In case the timer was not started, say because the defer time
+ * has passed, mark as not throttled and mark unarmed.
+ * Also cancel earlier timers, since letting those run is pointless.
+ */
+ if (dl_se->dl_throttled) {
+ hrtimer_try_to_cancel(&dl_se->dl_timer);
+ dl_se->dl_defer_armed = 0;
+ dl_se->dl_throttled = 0;
+ }
+
__enqueue_dl_entity(dl_se);
}
@@ -1846,7 +2154,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
enqueue_pushable_dl_task(rq, p);
}
-static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
+static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
update_curr_dl(rq);
@@ -1856,6 +2164,8 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
dequeue_dl_entity(&p->dl, flags);
if (!p->dl.dl_throttled && !dl_server(&p->dl))
dequeue_pushable_dl_task(rq, p);
+
+ return true;
}
/*
@@ -2074,6 +2384,9 @@ static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
deadline_queue_push_tasks(rq);
+
+ if (hrtick_enabled(rq))
+ start_hrtick_dl(rq, &p->dl);
}
static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
@@ -2086,7 +2399,11 @@ static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
return __node_2_dle(left);
}
-static struct task_struct *pick_task_dl(struct rq *rq)
+/*
+ * __pick_next_task_dl - Helper to pick the next -deadline task to run.
+ * @rq: The runqueue to pick the next task from.
+ */
+static struct task_struct *__pick_task_dl(struct rq *rq)
{
struct sched_dl_entity *dl_se;
struct dl_rq *dl_rq = &rq->dl;
@@ -2100,14 +2417,13 @@ again:
WARN_ON_ONCE(!dl_se);
if (dl_server(dl_se)) {
- p = dl_se->server_pick(dl_se);
+ p = dl_se->server_pick_task(dl_se);
if (!p) {
- WARN_ON_ONCE(1);
dl_se->dl_yielded = 1;
update_curr_dl_se(rq, dl_se, 0);
goto again;
}
- p->dl_server = dl_se;
+ rq->dl_server = dl_se;
} else {
p = dl_task_of(dl_se);
}
@@ -2115,24 +2431,12 @@ again:
return p;
}
-static struct task_struct *pick_next_task_dl(struct rq *rq)
+static struct task_struct *pick_task_dl(struct rq *rq)
{
- struct task_struct *p;
-
- p = pick_task_dl(rq);
- if (!p)
- return p;
-
- if (!p->dl_server)
- set_next_task_dl(rq, p, true);
-
- if (hrtick_enabled(rq))
- start_hrtick_dl(rq, &p->dl);
-
- return p;
+ return __pick_task_dl(rq);
}
-static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
+static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)
{
struct sched_dl_entity *dl_se = &p->dl;
struct dl_rq *dl_rq = &rq->dl;
@@ -2824,13 +3128,12 @@ DEFINE_SCHED_CLASS(dl) = {
.wakeup_preempt = wakeup_preempt_dl,
- .pick_next_task = pick_next_task_dl,
+ .pick_task = pick_task_dl,
.put_prev_task = put_prev_task_dl,
.set_next_task = set_next_task_dl,
#ifdef CONFIG_SMP
.balance = balance_dl,
- .pick_task = pick_task_dl,
.select_task_rq = select_task_rq_dl,
.migrate_task_rq = migrate_task_rq_dl,
.set_cpus_allowed = set_cpus_allowed_dl,
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index c1eb9a1afd13..f4035c7a0fa1 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -333,8 +333,165 @@ static const struct file_operations sched_debug_fops = {
.release = seq_release,
};
+enum dl_param {
+ DL_RUNTIME = 0,
+ DL_PERIOD,
+};
+
+static unsigned long fair_server_period_max = (1UL << 22) * NSEC_PER_USEC; /* ~4 seconds */
+static unsigned long fair_server_period_min = (100) * NSEC_PER_USEC; /* 100 us */
+
+static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos, enum dl_param param)
+{
+ long cpu = (long) ((struct seq_file *) filp->private_data)->private;
+ struct rq *rq = cpu_rq(cpu);
+ u64 runtime, period;
+ size_t err;
+ int retval;
+ u64 value;
+
+ err = kstrtoull_from_user(ubuf, cnt, 10, &value);
+ if (err)
+ return err;
+
+ scoped_guard (rq_lock_irqsave, rq) {
+ runtime = rq->fair_server.dl_runtime;
+ period = rq->fair_server.dl_period;
+
+ switch (param) {
+ case DL_RUNTIME:
+ if (runtime == value)
+ break;
+ runtime = value;
+ break;
+ case DL_PERIOD:
+ if (value == period)
+ break;
+ period = value;
+ break;
+ }
+
+ if (runtime > period ||
+ period > fair_server_period_max ||
+ period < fair_server_period_min) {
+ return -EINVAL;
+ }
+
+ if (rq->cfs.h_nr_running) {
+ update_rq_clock(rq);
+ dl_server_stop(&rq->fair_server);
+ }
+
+ retval = dl_server_apply_params(&rq->fair_server, runtime, period, 0);
+ if (retval)
+ cnt = retval;
+
+ if (!runtime)
+ printk_deferred("Fair server disabled in CPU %d, system may crash due to starvation.\n",
+ cpu_of(rq));
+
+ if (rq->cfs.h_nr_running)
+ dl_server_start(&rq->fair_server);
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static size_t sched_fair_server_show(struct seq_file *m, void *v, enum dl_param param)
+{
+ unsigned long cpu = (unsigned long) m->private;
+ struct rq *rq = cpu_rq(cpu);
+ u64 value;
+
+ switch (param) {
+ case DL_RUNTIME:
+ value = rq->fair_server.dl_runtime;
+ break;
+ case DL_PERIOD:
+ value = rq->fair_server.dl_period;
+ break;
+ }
+
+ seq_printf(m, "%llu\n", value);
+ return 0;
+
+}
+
+static ssize_t
+sched_fair_server_runtime_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return sched_fair_server_write(filp, ubuf, cnt, ppos, DL_RUNTIME);
+}
+
+static int sched_fair_server_runtime_show(struct seq_file *m, void *v)
+{
+ return sched_fair_server_show(m, v, DL_RUNTIME);
+}
+
+static int sched_fair_server_runtime_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, sched_fair_server_runtime_show, inode->i_private);
+}
+
+static const struct file_operations fair_server_runtime_fops = {
+ .open = sched_fair_server_runtime_open,
+ .write = sched_fair_server_runtime_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t
+sched_fair_server_period_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return sched_fair_server_write(filp, ubuf, cnt, ppos, DL_PERIOD);
+}
+
+static int sched_fair_server_period_show(struct seq_file *m, void *v)
+{
+ return sched_fair_server_show(m, v, DL_PERIOD);
+}
+
+static int sched_fair_server_period_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, sched_fair_server_period_show, inode->i_private);
+}
+
+static const struct file_operations fair_server_period_fops = {
+ .open = sched_fair_server_period_open,
+ .write = sched_fair_server_period_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static struct dentry *debugfs_sched;
+static void debugfs_fair_server_init(void)
+{
+ struct dentry *d_fair;
+ unsigned long cpu;
+
+ d_fair = debugfs_create_dir("fair_server", debugfs_sched);
+ if (!d_fair)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ struct dentry *d_cpu;
+ char buf[32];
+
+ snprintf(buf, sizeof(buf), "cpu%lu", cpu);
+ d_cpu = debugfs_create_dir(buf, d_fair);
+
+ debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &fair_server_runtime_fops);
+ debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &fair_server_period_fops);
+ }
+}
+
static __init int sched_init_debug(void)
{
struct dentry __maybe_unused *numa;
@@ -374,6 +531,8 @@ static __init int sched_init_debug(void)
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
+ debugfs_fair_server_init();
+
return 0;
}
late_initcall(sched_init_debug);
@@ -580,27 +739,27 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
else
SEQ_printf(m, " %c", task_state_to_char(p));
- SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
+ SEQ_printf(m, " %15s %5d %9Ld.%06ld %c %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
p->comm, task_pid_nr(p),
SPLIT_NS(p->se.vruntime),
entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
SPLIT_NS(p->se.deadline),
+ p->se.custom_slice ? 'S' : ' ',
SPLIT_NS(p->se.slice),
SPLIT_NS(p->se.sum_exec_runtime),
(long long)(p->nvcsw + p->nivcsw),
p->prio);
- SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld %9lld.%06ld",
+ SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld",
SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
- SPLIT_NS(p->se.sum_exec_runtime),
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
#ifdef CONFIG_NUMA_BALANCING
- SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
+ SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif
#ifdef CONFIG_CGROUP_SCHED
- SEQ_printf_task_group_path(m, task_group(p), " %s")
+ SEQ_printf_task_group_path(m, task_group(p), " %s")
#endif
SEQ_printf(m, "\n");
@@ -612,10 +771,26 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
SEQ_printf(m, "\n");
SEQ_printf(m, "runnable tasks:\n");
- SEQ_printf(m, " S task PID tree-key switches prio"
- " wait-time sum-exec sum-sleep\n");
+ SEQ_printf(m, " S task PID vruntime eligible "
+ "deadline slice sum-exec switches "
+ "prio wait-time sum-sleep sum-block"
+#ifdef CONFIG_NUMA_BALANCING
+ " node group-id"
+#endif
+#ifdef CONFIG_CGROUP_SCHED
+ " group-path"
+#endif
+ "\n");
SEQ_printf(m, "-------------------------------------------------------"
- "------------------------------------------------------\n");
+ "------------------------------------------------------"
+ "------------------------------------------------------"
+#ifdef CONFIG_NUMA_BALANCING
+ "--------------"
+#endif
+#ifdef CONFIG_CGROUP_SCHED
+ "--------------"
+#endif
+ "\n");
rcu_read_lock();
for_each_process_thread(g, p) {
@@ -641,8 +816,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, "\n");
SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
#endif
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
- SPLIT_NS(cfs_rq->exec_clock));
raw_spin_rq_lock_irqsave(rq, flags);
root = __pick_root_entity(cfs_rq);
@@ -669,8 +842,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SPLIT_NS(right_vruntime));
spread = right_vruntime - left_vruntime;
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
- SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
- cfs_rq->nr_spread_over);
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
@@ -730,9 +901,12 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
PU(rt_nr_running);
+
+#ifdef CONFIG_RT_GROUP_SCHED
P(rt_throttled);
PN(rt_time);
PN(rt_runtime);
+#endif
#undef PN
#undef PU
@@ -1090,6 +1264,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(dl.runtime);
P(dl.deadline);
}
+#ifdef CONFIG_SCHED_CLASS_EXT
+ __PS("ext.enabled", task_on_scx(p));
+#endif
#undef PN_SCHEDSTAT
#undef P_SCHEDSTAT
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
new file mode 100644
index 000000000000..c09e3dc38c34
--- /dev/null
+++ b/kernel/sched/ext.c
@@ -0,0 +1,7191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
+
+enum scx_consts {
+ SCX_DSP_DFL_MAX_BATCH = 32,
+ SCX_DSP_MAX_LOOPS = 32,
+ SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ,
+
+ SCX_EXIT_BT_LEN = 64,
+ SCX_EXIT_MSG_LEN = 1024,
+ SCX_EXIT_DUMP_DFL_LEN = 32768,
+
+ SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE,
+};
+
+enum scx_exit_kind {
+ SCX_EXIT_NONE,
+ SCX_EXIT_DONE,
+
+ SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */
+ SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */
+ SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */
+ SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */
+
+ SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */
+ SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */
+ SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */
+};
+
+/*
+ * An exit code can be specified when exiting with scx_bpf_exit() or
+ * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN
+ * respectively. The codes are 64bit of the format:
+ *
+ * Bits: [63 .. 48 47 .. 32 31 .. 0]
+ * [ SYS ACT ] [ SYS RSN ] [ USR ]
+ *
+ * SYS ACT: System-defined exit actions
+ * SYS RSN: System-defined exit reasons
+ * USR : User-defined exit codes and reasons
+ *
+ * Using the above, users may communicate intention and context by ORing system
+ * actions and/or system reasons with a user-defined exit code.
+ */
+enum scx_exit_code {
+ /* Reasons */
+ SCX_ECODE_RSN_HOTPLUG = 1LLU << 32,
+
+ /* Actions */
+ SCX_ECODE_ACT_RESTART = 1LLU << 48,
+};
+
+/*
+ * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
+ * being disabled.
+ */
+struct scx_exit_info {
+ /* %SCX_EXIT_* - broad category of the exit reason */
+ enum scx_exit_kind kind;
+
+ /* exit code if gracefully exiting */
+ s64 exit_code;
+
+ /* textual representation of the above */
+ const char *reason;
+
+ /* backtrace if exiting due to an error */
+ unsigned long *bt;
+ u32 bt_len;
+
+ /* informational message */
+ char *msg;
+
+ /* debug dump */
+ char *dump;
+};
+
+/* sched_ext_ops.flags */
+enum scx_ops_flags {
+ /*
+ * Keep built-in idle tracking even if ops.update_idle() is implemented.
+ */
+ SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
+
+ /*
+ * By default, if there are no other task to run on the CPU, ext core
+ * keeps running the current task even after its slice expires. If this
+ * flag is specified, such tasks are passed to ops.enqueue() with
+ * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
+ */
+ SCX_OPS_ENQ_LAST = 1LLU << 1,
+
+ /*
+ * An exiting task may schedule after PF_EXITING is set. In such cases,
+ * bpf_task_from_pid() may not be able to find the task and if the BPF
+ * scheduler depends on pid lookup for dispatching, the task will be
+ * lost leading to various issues including RCU grace period stalls.
+ *
+ * To mask this problem, by default, unhashed tasks are automatically
+ * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
+ * depend on pid lookups and wants to handle these tasks directly, the
+ * following flag can be used.
+ */
+ SCX_OPS_ENQ_EXITING = 1LLU << 2,
+
+ /*
+ * If set, only tasks with policy set to SCHED_EXT are attached to
+ * sched_ext. If clear, SCHED_NORMAL tasks are also included.
+ */
+ SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
+
+ /*
+ * CPU cgroup support flags
+ */
+ SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* cpu.weight */
+
+ SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
+ SCX_OPS_ENQ_LAST |
+ SCX_OPS_ENQ_EXITING |
+ SCX_OPS_SWITCH_PARTIAL |
+ SCX_OPS_HAS_CGROUP_WEIGHT,
+};
+
+/* argument container for ops.init_task() */
+struct scx_init_task_args {
+ /*
+ * Set if ops.init_task() is being invoked on the fork path, as opposed
+ * to the scheduler transition path.
+ */
+ bool fork;
+#ifdef CONFIG_EXT_GROUP_SCHED
+ /* the cgroup the task is joining */
+ struct cgroup *cgroup;
+#endif
+};
+
+/* argument container for ops.exit_task() */
+struct scx_exit_task_args {
+ /* Whether the task exited before running on sched_ext. */
+ bool cancelled;
+};
+
+/* argument container for ops->cgroup_init() */
+struct scx_cgroup_init_args {
+ /* the weight of the cgroup [1..10000] */
+ u32 weight;
+};
+
+enum scx_cpu_preempt_reason {
+ /* next task is being scheduled by &sched_class_rt */
+ SCX_CPU_PREEMPT_RT,
+ /* next task is being scheduled by &sched_class_dl */
+ SCX_CPU_PREEMPT_DL,
+ /* next task is being scheduled by &sched_class_stop */
+ SCX_CPU_PREEMPT_STOP,
+ /* unknown reason for SCX being preempted */
+ SCX_CPU_PREEMPT_UNKNOWN,
+};
+
+/*
+ * Argument container for ops->cpu_acquire(). Currently empty, but may be
+ * expanded in the future.
+ */
+struct scx_cpu_acquire_args {};
+
+/* argument container for ops->cpu_release() */
+struct scx_cpu_release_args {
+ /* the reason the CPU was preempted */
+ enum scx_cpu_preempt_reason reason;
+
+ /* the task that's going to be scheduled on the CPU */
+ struct task_struct *task;
+};
+
+/*
+ * Informational context provided to dump operations.
+ */
+struct scx_dump_ctx {
+ enum scx_exit_kind kind;
+ s64 exit_code;
+ const char *reason;
+ u64 at_ns;
+ u64 at_jiffies;
+};
+
+/**
+ * struct sched_ext_ops - Operation table for BPF scheduler implementation
+ *
+ * Userland can implement an arbitrary scheduling policy by implementing and
+ * loading operations in this table.
+ */
+struct sched_ext_ops {
+ /**
+ * select_cpu - Pick the target CPU for a task which is being woken up
+ * @p: task being woken up
+ * @prev_cpu: the cpu @p was on before sleeping
+ * @wake_flags: SCX_WAKE_*
+ *
+ * Decision made here isn't final. @p may be moved to any CPU while it
+ * is getting dispatched for execution later. However, as @p is not on
+ * the rq at this point, getting the eventual execution CPU right here
+ * saves a small bit of overhead down the line.
+ *
+ * If an idle CPU is returned, the CPU is kicked and will try to
+ * dispatch. While an explicit custom mechanism can be added,
+ * select_cpu() serves as the default way to wake up idle CPUs.
+ *
+ * @p may be dispatched directly by calling scx_bpf_dispatch(). If @p
+ * is dispatched, the ops.enqueue() callback will be skipped. Finally,
+ * if @p is dispatched to SCX_DSQ_LOCAL, it will be dispatched to the
+ * local DSQ of whatever CPU is returned by this callback.
+ */
+ s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
+
+ /**
+ * enqueue - Enqueue a task on the BPF scheduler
+ * @p: task being enqueued
+ * @enq_flags: %SCX_ENQ_*
+ *
+ * @p is ready to run. Dispatch directly by calling scx_bpf_dispatch()
+ * or enqueue on the BPF scheduler. If not directly dispatched, the bpf
+ * scheduler owns @p and if it fails to dispatch @p, the task will
+ * stall.
+ *
+ * If @p was dispatched from ops.select_cpu(), this callback is
+ * skipped.
+ */
+ void (*enqueue)(struct task_struct *p, u64 enq_flags);
+
+ /**
+ * dequeue - Remove a task from the BPF scheduler
+ * @p: task being dequeued
+ * @deq_flags: %SCX_DEQ_*
+ *
+ * Remove @p from the BPF scheduler. This is usually called to isolate
+ * the task while updating its scheduling properties (e.g. priority).
+ *
+ * The ext core keeps track of whether the BPF side owns a given task or
+ * not and can gracefully ignore spurious dispatches from BPF side,
+ * which makes it safe to not implement this method. However, depending
+ * on the scheduling logic, this can lead to confusing behaviors - e.g.
+ * scheduling position not being updated across a priority change.
+ */
+ void (*dequeue)(struct task_struct *p, u64 deq_flags);
+
+ /**
+ * dispatch - Dispatch tasks from the BPF scheduler and/or consume DSQs
+ * @cpu: CPU to dispatch tasks for
+ * @prev: previous task being switched out
+ *
+ * Called when a CPU's local dsq is empty. The operation should dispatch
+ * one or more tasks from the BPF scheduler into the DSQs using
+ * scx_bpf_dispatch() and/or consume user DSQs into the local DSQ using
+ * scx_bpf_consume().
+ *
+ * The maximum number of times scx_bpf_dispatch() can be called without
+ * an intervening scx_bpf_consume() is specified by
+ * ops.dispatch_max_batch. See the comments on top of the two functions
+ * for more details.
+ *
+ * When not %NULL, @prev is an SCX task with its slice depleted. If
+ * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
+ * @prev->scx.flags, it is not enqueued yet and will be enqueued after
+ * ops.dispatch() returns. To keep executing @prev, return without
+ * dispatching or consuming any tasks. Also see %SCX_OPS_ENQ_LAST.
+ */
+ void (*dispatch)(s32 cpu, struct task_struct *prev);
+
+ /**
+ * tick - Periodic tick
+ * @p: task running currently
+ *
+ * This operation is called every 1/HZ seconds on CPUs which are
+ * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
+ * immediate dispatch cycle on the CPU.
+ */
+ void (*tick)(struct task_struct *p);
+
+ /**
+ * runnable - A task is becoming runnable on its associated CPU
+ * @p: task becoming runnable
+ * @enq_flags: %SCX_ENQ_*
+ *
+ * This and the following three functions can be used to track a task's
+ * execution state transitions. A task becomes ->runnable() on a CPU,
+ * and then goes through one or more ->running() and ->stopping() pairs
+ * as it runs on the CPU, and eventually becomes ->quiescent() when it's
+ * done running on the CPU.
+ *
+ * @p is becoming runnable on the CPU because it's
+ *
+ * - waking up (%SCX_ENQ_WAKEUP)
+ * - being moved from another CPU
+ * - being restored after temporarily taken off the queue for an
+ * attribute change.
+ *
+ * This and ->enqueue() are related but not coupled. This operation
+ * notifies @p's state transition and may not be followed by ->enqueue()
+ * e.g. when @p is being dispatched to a remote CPU, or when @p is
+ * being enqueued on a CPU experiencing a hotplug event. Likewise, a
+ * task may be ->enqueue()'d without being preceded by this operation
+ * e.g. after exhausting its slice.
+ */
+ void (*runnable)(struct task_struct *p, u64 enq_flags);
+
+ /**
+ * running - A task is starting to run on its associated CPU
+ * @p: task starting to run
+ *
+ * See ->runnable() for explanation on the task state notifiers.
+ */
+ void (*running)(struct task_struct *p);
+
+ /**
+ * stopping - A task is stopping execution
+ * @p: task stopping to run
+ * @runnable: is task @p still runnable?
+ *
+ * See ->runnable() for explanation on the task state notifiers. If
+ * !@runnable, ->quiescent() will be invoked after this operation
+ * returns.
+ */
+ void (*stopping)(struct task_struct *p, bool runnable);
+
+ /**
+ * quiescent - A task is becoming not runnable on its associated CPU
+ * @p: task becoming not runnable
+ * @deq_flags: %SCX_DEQ_*
+ *
+ * See ->runnable() for explanation on the task state notifiers.
+ *
+ * @p is becoming quiescent on the CPU because it's
+ *
+ * - sleeping (%SCX_DEQ_SLEEP)
+ * - being moved to another CPU
+ * - being temporarily taken off the queue for an attribute change
+ * (%SCX_DEQ_SAVE)
+ *
+ * This and ->dequeue() are related but not coupled. This operation
+ * notifies @p's state transition and may not be preceded by ->dequeue()
+ * e.g. when @p is being dispatched to a remote CPU.
+ */
+ void (*quiescent)(struct task_struct *p, u64 deq_flags);
+
+ /**
+ * yield - Yield CPU
+ * @from: yielding task
+ * @to: optional yield target task
+ *
+ * If @to is NULL, @from is yielding the CPU to other runnable tasks.
+ * The BPF scheduler should ensure that other available tasks are
+ * dispatched before the yielding task. Return value is ignored in this
+ * case.
+ *
+ * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
+ * scheduler can implement the request, return %true; otherwise, %false.
+ */
+ bool (*yield)(struct task_struct *from, struct task_struct *to);
+
+ /**
+ * core_sched_before - Task ordering for core-sched
+ * @a: task A
+ * @b: task B
+ *
+ * Used by core-sched to determine the ordering between two tasks. See
+ * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
+ * core-sched.
+ *
+ * Both @a and @b are runnable and may or may not currently be queued on
+ * the BPF scheduler. Should return %true if @a should run before @b.
+ * %false if there's no required ordering or @b should run before @a.
+ *
+ * If not specified, the default is ordering them according to when they
+ * became runnable.
+ */
+ bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
+
+ /**
+ * set_weight - Set task weight
+ * @p: task to set weight for
+ * @weight: new weight [1..10000]
+ *
+ * Update @p's weight to @weight.
+ */
+ void (*set_weight)(struct task_struct *p, u32 weight);
+
+ /**
+ * set_cpumask - Set CPU affinity
+ * @p: task to set CPU affinity for
+ * @cpumask: cpumask of cpus that @p can run on
+ *
+ * Update @p's CPU affinity to @cpumask.
+ */
+ void (*set_cpumask)(struct task_struct *p,
+ const struct cpumask *cpumask);
+
+ /**
+ * update_idle - Update the idle state of a CPU
+ * @cpu: CPU to udpate the idle state for
+ * @idle: whether entering or exiting the idle state
+ *
+ * This operation is called when @rq's CPU goes or leaves the idle
+ * state. By default, implementing this operation disables the built-in
+ * idle CPU tracking and the following helpers become unavailable:
+ *
+ * - scx_bpf_select_cpu_dfl()
+ * - scx_bpf_test_and_clear_cpu_idle()
+ * - scx_bpf_pick_idle_cpu()
+ *
+ * The user also must implement ops.select_cpu() as the default
+ * implementation relies on scx_bpf_select_cpu_dfl().
+ *
+ * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
+ * tracking.
+ */
+ void (*update_idle)(s32 cpu, bool idle);
+
+ /**
+ * cpu_acquire - A CPU is becoming available to the BPF scheduler
+ * @cpu: The CPU being acquired by the BPF scheduler.
+ * @args: Acquire arguments, see the struct definition.
+ *
+ * A CPU that was previously released from the BPF scheduler is now once
+ * again under its control.
+ */
+ void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
+
+ /**
+ * cpu_release - A CPU is taken away from the BPF scheduler
+ * @cpu: The CPU being released by the BPF scheduler.
+ * @args: Release arguments, see the struct definition.
+ *
+ * The specified CPU is no longer under the control of the BPF
+ * scheduler. This could be because it was preempted by a higher
+ * priority sched_class, though there may be other reasons as well. The
+ * caller should consult @args->reason to determine the cause.
+ */
+ void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
+
+ /**
+ * init_task - Initialize a task to run in a BPF scheduler
+ * @p: task to initialize for BPF scheduling
+ * @args: init arguments, see the struct definition
+ *
+ * Either we're loading a BPF scheduler or a new task is being forked.
+ * Initialize @p for BPF scheduling. This operation may block and can
+ * be used for allocations, and is called exactly once for a task.
+ *
+ * Return 0 for success, -errno for failure. An error return while
+ * loading will abort loading of the BPF scheduler. During a fork, it
+ * will abort that specific fork.
+ */
+ s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
+
+ /**
+ * exit_task - Exit a previously-running task from the system
+ * @p: task to exit
+ *
+ * @p is exiting or the BPF scheduler is being unloaded. Perform any
+ * necessary cleanup for @p.
+ */
+ void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
+
+ /**
+ * enable - Enable BPF scheduling for a task
+ * @p: task to enable BPF scheduling for
+ *
+ * Enable @p for BPF scheduling. enable() is called on @p any time it
+ * enters SCX, and is always paired with a matching disable().
+ */
+ void (*enable)(struct task_struct *p);
+
+ /**
+ * disable - Disable BPF scheduling for a task
+ * @p: task to disable BPF scheduling for
+ *
+ * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
+ * Disable BPF scheduling for @p. A disable() call is always matched
+ * with a prior enable() call.
+ */
+ void (*disable)(struct task_struct *p);
+
+ /**
+ * dump - Dump BPF scheduler state on error
+ * @ctx: debug dump context
+ *
+ * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
+ */
+ void (*dump)(struct scx_dump_ctx *ctx);
+
+ /**
+ * dump_cpu - Dump BPF scheduler state for a CPU on error
+ * @ctx: debug dump context
+ * @cpu: CPU to generate debug dump for
+ * @idle: @cpu is currently idle without any runnable tasks
+ *
+ * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
+ * @cpu. If @idle is %true and this operation doesn't produce any
+ * output, @cpu is skipped for dump.
+ */
+ void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
+
+ /**
+ * dump_task - Dump BPF scheduler state for a runnable task on error
+ * @ctx: debug dump context
+ * @p: runnable task to generate debug dump for
+ *
+ * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
+ * @p.
+ */
+ void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
+
+#ifdef CONFIG_EXT_GROUP_SCHED
+ /**
+ * cgroup_init - Initialize a cgroup
+ * @cgrp: cgroup being initialized
+ * @args: init arguments, see the struct definition
+ *
+ * Either the BPF scheduler is being loaded or @cgrp created, initialize
+ * @cgrp for sched_ext. This operation may block.
+ *
+ * Return 0 for success, -errno for failure. An error return while
+ * loading will abort loading of the BPF scheduler. During cgroup
+ * creation, it will abort the specific cgroup creation.
+ */
+ s32 (*cgroup_init)(struct cgroup *cgrp,
+ struct scx_cgroup_init_args *args);
+
+ /**
+ * cgroup_exit - Exit a cgroup
+ * @cgrp: cgroup being exited
+ *
+ * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
+ * @cgrp for sched_ext. This operation my block.
+ */
+ void (*cgroup_exit)(struct cgroup *cgrp);
+
+ /**
+ * cgroup_prep_move - Prepare a task to be moved to a different cgroup
+ * @p: task being moved
+ * @from: cgroup @p is being moved from
+ * @to: cgroup @p is being moved to
+ *
+ * Prepare @p for move from cgroup @from to @to. This operation may
+ * block and can be used for allocations.
+ *
+ * Return 0 for success, -errno for failure. An error return aborts the
+ * migration.
+ */
+ s32 (*cgroup_prep_move)(struct task_struct *p,
+ struct cgroup *from, struct cgroup *to);
+
+ /**
+ * cgroup_move - Commit cgroup move
+ * @p: task being moved
+ * @from: cgroup @p is being moved from
+ * @to: cgroup @p is being moved to
+ *
+ * Commit the move. @p is dequeued during this operation.
+ */
+ void (*cgroup_move)(struct task_struct *p,
+ struct cgroup *from, struct cgroup *to);
+
+ /**
+ * cgroup_cancel_move - Cancel cgroup move
+ * @p: task whose cgroup move is being canceled
+ * @from: cgroup @p was being moved from
+ * @to: cgroup @p was being moved to
+ *
+ * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
+ * Undo the preparation.
+ */
+ void (*cgroup_cancel_move)(struct task_struct *p,
+ struct cgroup *from, struct cgroup *to);
+
+ /**
+ * cgroup_set_weight - A cgroup's weight is being changed
+ * @cgrp: cgroup whose weight is being updated
+ * @weight: new weight [1..10000]
+ *
+ * Update @tg's weight to @weight.
+ */
+ void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
+#endif /* CONFIG_CGROUPS */
+
+ /*
+ * All online ops must come before ops.cpu_online().
+ */
+
+ /**
+ * cpu_online - A CPU became online
+ * @cpu: CPU which just came up
+ *
+ * @cpu just came online. @cpu will not call ops.enqueue() or
+ * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
+ */
+ void (*cpu_online)(s32 cpu);
+
+ /**
+ * cpu_offline - A CPU is going offline
+ * @cpu: CPU which is going offline
+ *
+ * @cpu is going offline. @cpu will not call ops.enqueue() or
+ * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
+ */
+ void (*cpu_offline)(s32 cpu);
+
+ /*
+ * All CPU hotplug ops must come before ops.init().
+ */
+
+ /**
+ * init - Initialize the BPF scheduler
+ */
+ s32 (*init)(void);
+
+ /**
+ * exit - Clean up after the BPF scheduler
+ * @info: Exit info
+ */
+ void (*exit)(struct scx_exit_info *info);
+
+ /**
+ * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch
+ */
+ u32 dispatch_max_batch;
+
+ /**
+ * flags - %SCX_OPS_* flags
+ */
+ u64 flags;
+
+ /**
+ * timeout_ms - The maximum amount of time, in milliseconds, that a
+ * runnable task should be able to wait before being scheduled. The
+ * maximum timeout may not exceed the default timeout of 30 seconds.
+ *
+ * Defaults to the maximum allowed timeout value of 30 seconds.
+ */
+ u32 timeout_ms;
+
+ /**
+ * exit_dump_len - scx_exit_info.dump buffer length. If 0, the default
+ * value of 32768 is used.
+ */
+ u32 exit_dump_len;
+
+ /**
+ * hotplug_seq - A sequence number that may be set by the scheduler to
+ * detect when a hotplug event has occurred during the loading process.
+ * If 0, no detection occurs. Otherwise, the scheduler will fail to
+ * load if the sequence number does not match @scx_hotplug_seq on the
+ * enable path.
+ */
+ u64 hotplug_seq;
+
+ /**
+ * name - BPF scheduler's name
+ *
+ * Must be a non-zero valid BPF object name including only isalnum(),
+ * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
+ * BPF scheduler is enabled.
+ */
+ char name[SCX_OPS_NAME_LEN];
+};
+
+enum scx_opi {
+ SCX_OPI_BEGIN = 0,
+ SCX_OPI_NORMAL_BEGIN = 0,
+ SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online),
+ SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online),
+ SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init),
+ SCX_OPI_END = SCX_OP_IDX(init),
+};
+
+enum scx_wake_flags {
+ /* expose select WF_* flags as enums */
+ SCX_WAKE_FORK = WF_FORK,
+ SCX_WAKE_TTWU = WF_TTWU,
+ SCX_WAKE_SYNC = WF_SYNC,
+};
+
+enum scx_enq_flags {
+ /* expose select ENQUEUE_* flags as enums */
+ SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP,
+ SCX_ENQ_HEAD = ENQUEUE_HEAD,
+
+ /* high 32bits are SCX specific */
+
+ /*
+ * Set the following to trigger preemption when calling
+ * scx_bpf_dispatch() with a local dsq as the target. The slice of the
+ * current task is cleared to zero and the CPU is kicked into the
+ * scheduling path. Implies %SCX_ENQ_HEAD.
+ */
+ SCX_ENQ_PREEMPT = 1LLU << 32,
+
+ /*
+ * The task being enqueued was previously enqueued on the current CPU's
+ * %SCX_DSQ_LOCAL, but was removed from it in a call to the
+ * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
+ * invoked in a ->cpu_release() callback, and the task is again
+ * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
+ * task will not be scheduled on the CPU until at least the next invocation
+ * of the ->cpu_acquire() callback.
+ */
+ SCX_ENQ_REENQ = 1LLU << 40,
+
+ /*
+ * The task being enqueued is the only task available for the cpu. By
+ * default, ext core keeps executing such tasks but when
+ * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
+ * %SCX_ENQ_LAST flag set.
+ *
+ * The BPF scheduler is responsible for triggering a follow-up
+ * scheduling event. Otherwise, Execution may stall.
+ */
+ SCX_ENQ_LAST = 1LLU << 41,
+
+ /* high 8 bits are internal */
+ __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56,
+
+ SCX_ENQ_CLEAR_OPSS = 1LLU << 56,
+ SCX_ENQ_DSQ_PRIQ = 1LLU << 57,
+};
+
+enum scx_deq_flags {
+ /* expose select DEQUEUE_* flags as enums */
+ SCX_DEQ_SLEEP = DEQUEUE_SLEEP,
+
+ /* high 32bits are SCX specific */
+
+ /*
+ * The generic core-sched layer decided to execute the task even though
+ * it hasn't been dispatched yet. Dequeue from the BPF side.
+ */
+ SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32,
+};
+
+enum scx_pick_idle_cpu_flags {
+ SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */
+};
+
+enum scx_kick_flags {
+ /*
+ * Kick the target CPU if idle. Guarantees that the target CPU goes
+ * through at least one full scheduling cycle before going idle. If the
+ * target CPU can be determined to be currently not idle and going to go
+ * through a scheduling cycle before going idle, noop.
+ */
+ SCX_KICK_IDLE = 1LLU << 0,
+
+ /*
+ * Preempt the current task and execute the dispatch path. If the
+ * current task of the target CPU is an SCX task, its ->scx.slice is
+ * cleared to zero before the scheduling path is invoked so that the
+ * task expires and the dispatch path is invoked.
+ */
+ SCX_KICK_PREEMPT = 1LLU << 1,
+
+ /*
+ * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
+ * return after the target CPU finishes picking the next task.
+ */
+ SCX_KICK_WAIT = 1LLU << 2,
+};
+
+enum scx_tg_flags {
+ SCX_TG_ONLINE = 1U << 0,
+ SCX_TG_INITED = 1U << 1,
+};
+
+enum scx_ops_enable_state {
+ SCX_OPS_PREPPING,
+ SCX_OPS_ENABLING,
+ SCX_OPS_ENABLED,
+ SCX_OPS_DISABLING,
+ SCX_OPS_DISABLED,
+};
+
+static const char *scx_ops_enable_state_str[] = {
+ [SCX_OPS_PREPPING] = "prepping",
+ [SCX_OPS_ENABLING] = "enabling",
+ [SCX_OPS_ENABLED] = "enabled",
+ [SCX_OPS_DISABLING] = "disabling",
+ [SCX_OPS_DISABLED] = "disabled",
+};
+
+/*
+ * sched_ext_entity->ops_state
+ *
+ * Used to track the task ownership between the SCX core and the BPF scheduler.
+ * State transitions look as follows:
+ *
+ * NONE -> QUEUEING -> QUEUED -> DISPATCHING
+ * ^ | |
+ * | v v
+ * \-------------------------------/
+ *
+ * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
+ * sites for explanations on the conditions being waited upon and why they are
+ * safe. Transitions out of them into NONE or QUEUED must store_release and the
+ * waiters should load_acquire.
+ *
+ * Tracking scx_ops_state enables sched_ext core to reliably determine whether
+ * any given task can be dispatched by the BPF scheduler at all times and thus
+ * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
+ * to try to dispatch any task anytime regardless of its state as the SCX core
+ * can safely reject invalid dispatches.
+ */
+enum scx_ops_state {
+ SCX_OPSS_NONE, /* owned by the SCX core */
+ SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */
+ SCX_OPSS_QUEUED, /* owned by the BPF scheduler */
+ SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */
+
+ /*
+ * QSEQ brands each QUEUED instance so that, when dispatch races
+ * dequeue/requeue, the dispatcher can tell whether it still has a claim
+ * on the task being dispatched.
+ *
+ * As some 32bit archs can't do 64bit store_release/load_acquire,
+ * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
+ * 32bit machines. The dispatch race window QSEQ protects is very narrow
+ * and runs with IRQ disabled. 30 bits should be sufficient.
+ */
+ SCX_OPSS_QSEQ_SHIFT = 2,
+};
+
+/* Use macros to ensure that the type is unsigned long for the masks */
+#define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
+#define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK)
+
+/*
+ * During exit, a task may schedule after losing its PIDs. When disabling the
+ * BPF scheduler, we need to be able to iterate tasks in every state to
+ * guarantee system safety. Maintain a dedicated task list which contains every
+ * task between its fork and eventual free.
+ */
+static DEFINE_SPINLOCK(scx_tasks_lock);
+static LIST_HEAD(scx_tasks);
+
+/* ops enable/disable */
+static struct kthread_worker *scx_ops_helper;
+static DEFINE_MUTEX(scx_ops_enable_mutex);
+DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
+DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
+static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
+static atomic_t scx_ops_bypass_depth = ATOMIC_INIT(0);
+static bool scx_switching_all;
+DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
+
+static struct sched_ext_ops scx_ops;
+static bool scx_warned_zero_slice;
+
+static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last);
+static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting);
+static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
+static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
+
+static struct static_key_false scx_has_op[SCX_OPI_END] =
+ { [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
+
+static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
+static struct scx_exit_info *scx_exit_info;
+
+static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
+static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
+
+/*
+ * A monotically increasing sequence number that is incremented every time a
+ * scheduler is enabled. This can be used by to check if any custom sched_ext
+ * scheduler has ever been used in the system.
+ */
+static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
+
+/*
+ * The maximum amount of time in jiffies that a task may be runnable without
+ * being scheduled on a CPU. If this timeout is exceeded, it will trigger
+ * scx_ops_error().
+ */
+static unsigned long scx_watchdog_timeout;
+
+/*
+ * The last time the delayed work was run. This delayed work relies on
+ * ksoftirqd being able to run to service timer interrupts, so it's possible
+ * that this work itself could get wedged. To account for this, we check that
+ * it's not stalled in the timer tick, and trigger an error if it is.
+ */
+static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
+
+static struct delayed_work scx_watchdog_work;
+
+/* idle tracking */
+#ifdef CONFIG_SMP
+#ifdef CONFIG_CPUMASK_OFFSTACK
+#define CL_ALIGNED_IF_ONSTACK
+#else
+#define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp
+#endif
+
+static struct {
+ cpumask_var_t cpu;
+ cpumask_var_t smt;
+} idle_masks CL_ALIGNED_IF_ONSTACK;
+
+#endif /* CONFIG_SMP */
+
+/* for %SCX_KICK_WAIT */
+static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
+
+/*
+ * Direct dispatch marker.
+ *
+ * Non-NULL values are used for direct dispatch from enqueue path. A valid
+ * pointer points to the task currently being enqueued. An ERR_PTR value is used
+ * to indicate that direct dispatch has already happened.
+ */
+static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
+
+/* dispatch queues */
+static struct scx_dispatch_q __cacheline_aligned_in_smp scx_dsq_global;
+
+static const struct rhashtable_params dsq_hash_params = {
+ .key_len = 8,
+ .key_offset = offsetof(struct scx_dispatch_q, id),
+ .head_offset = offsetof(struct scx_dispatch_q, hash_node),
+};
+
+static struct rhashtable dsq_hash;
+static LLIST_HEAD(dsqs_to_free);
+
+/* dispatch buf */
+struct scx_dsp_buf_ent {
+ struct task_struct *task;
+ unsigned long qseq;
+ u64 dsq_id;
+ u64 enq_flags;
+};
+
+static u32 scx_dsp_max_batch;
+
+struct scx_dsp_ctx {
+ struct rq *rq;
+ u32 cursor;
+ u32 nr_tasks;
+ struct scx_dsp_buf_ent buf[];
+};
+
+static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
+
+/* string formatting from BPF */
+struct scx_bstr_buf {
+ u64 data[MAX_BPRINTF_VARARGS];
+ char line[SCX_EXIT_MSG_LEN];
+};
+
+static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
+static struct scx_bstr_buf scx_exit_bstr_buf;
+
+/* ops debug dump */
+struct scx_dump_data {
+ s32 cpu;
+ bool first;
+ s32 cursor;
+ struct seq_buf *s;
+ const char *prefix;
+ struct scx_bstr_buf buf;
+};
+
+static struct scx_dump_data scx_dump_data = {
+ .cpu = -1,
+};
+
+/* /sys/kernel/sched_ext interface */
+static struct kset *scx_kset;
+static struct kobject *scx_root_kobj;
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/sched_ext.h>
+
+static void process_ddsp_deferred_locals(struct rq *rq);
+static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
+static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
+ s64 exit_code,
+ const char *fmt, ...);
+
+#define scx_ops_error_kind(err, fmt, args...) \
+ scx_ops_exit_kind((err), 0, fmt, ##args)
+
+#define scx_ops_exit(code, fmt, args...) \
+ scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args)
+
+#define scx_ops_error(fmt, args...) \
+ scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args)
+
+#define SCX_HAS_OP(op) static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
+
+static long jiffies_delta_msecs(unsigned long at, unsigned long now)
+{
+ if (time_after(at, now))
+ return jiffies_to_msecs(at - now);
+ else
+ return -(long)jiffies_to_msecs(now - at);
+}
+
+/* if the highest set bit is N, return a mask with bits [N+1, 31] set */
+static u32 higher_bits(u32 flags)
+{
+ return ~((1 << fls(flags)) - 1);
+}
+
+/* return the mask with only the highest bit set */
+static u32 highest_bit(u32 flags)
+{
+ int bit = fls(flags);
+ return ((u64)1 << bit) >> 1;
+}
+
+static bool u32_before(u32 a, u32 b)
+{
+ return (s32)(a - b) < 0;
+}
+
+/*
+ * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
+ * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
+ * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
+ * whether it's running from an allowed context.
+ *
+ * @mask is constant, always inline to cull the mask calculations.
+ */
+static __always_inline void scx_kf_allow(u32 mask)
+{
+ /* nesting is allowed only in increasing scx_kf_mask order */
+ WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
+ "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
+ current->scx.kf_mask, mask);
+ current->scx.kf_mask |= mask;
+ barrier();
+}
+
+static void scx_kf_disallow(u32 mask)
+{
+ barrier();
+ current->scx.kf_mask &= ~mask;
+}
+
+#define SCX_CALL_OP(mask, op, args...) \
+do { \
+ if (mask) { \
+ scx_kf_allow(mask); \
+ scx_ops.op(args); \
+ scx_kf_disallow(mask); \
+ } else { \
+ scx_ops.op(args); \
+ } \
+} while (0)
+
+#define SCX_CALL_OP_RET(mask, op, args...) \
+({ \
+ __typeof__(scx_ops.op(args)) __ret; \
+ if (mask) { \
+ scx_kf_allow(mask); \
+ __ret = scx_ops.op(args); \
+ scx_kf_disallow(mask); \
+ } else { \
+ __ret = scx_ops.op(args); \
+ } \
+ __ret; \
+})
+
+/*
+ * Some kfuncs are allowed only on the tasks that are subjects of the
+ * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
+ * restrictions, the following SCX_CALL_OP_*() variants should be used when
+ * invoking scx_ops operations that take task arguments. These can only be used
+ * for non-nesting operations due to the way the tasks are tracked.
+ *
+ * kfuncs which can only operate on such tasks can in turn use
+ * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
+ * the specific task.
+ */
+#define SCX_CALL_OP_TASK(mask, op, task, args...) \
+do { \
+ BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
+ current->scx.kf_tasks[0] = task; \
+ SCX_CALL_OP(mask, op, task, ##args); \
+ current->scx.kf_tasks[0] = NULL; \
+} while (0)
+
+#define SCX_CALL_OP_TASK_RET(mask, op, task, args...) \
+({ \
+ __typeof__(scx_ops.op(task, ##args)) __ret; \
+ BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
+ current->scx.kf_tasks[0] = task; \
+ __ret = SCX_CALL_OP_RET(mask, op, task, ##args); \
+ current->scx.kf_tasks[0] = NULL; \
+ __ret; \
+})
+
+#define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...) \
+({ \
+ __typeof__(scx_ops.op(task0, task1, ##args)) __ret; \
+ BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
+ current->scx.kf_tasks[0] = task0; \
+ current->scx.kf_tasks[1] = task1; \
+ __ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args); \
+ current->scx.kf_tasks[0] = NULL; \
+ current->scx.kf_tasks[1] = NULL; \
+ __ret; \
+})
+
+/* @mask is constant, always inline to cull unnecessary branches */
+static __always_inline bool scx_kf_allowed(u32 mask)
+{
+ if (unlikely(!(current->scx.kf_mask & mask))) {
+ scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
+ mask, current->scx.kf_mask);
+ return false;
+ }
+
+ /*
+ * Enforce nesting boundaries. e.g. A kfunc which can be called from
+ * DISPATCH must not be called if we're running DEQUEUE which is nested
+ * inside ops.dispatch(). We don't need to check boundaries for any
+ * blocking kfuncs as the verifier ensures they're only called from
+ * sleepable progs.
+ */
+ if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
+ (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
+ scx_ops_error("cpu_release kfunc called from a nested operation");
+ return false;
+ }
+
+ if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
+ (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
+ scx_ops_error("dispatch kfunc called from a nested operation");
+ return false;
+ }
+
+ return true;
+}
+
+/* see SCX_CALL_OP_TASK() */
+static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
+ struct task_struct *p)
+{
+ if (!scx_kf_allowed(mask))
+ return false;
+
+ if (unlikely((p != current->scx.kf_tasks[0] &&
+ p != current->scx.kf_tasks[1]))) {
+ scx_ops_error("called on a task not being operated on");
+ return false;
+ }
+
+ return true;
+}
+
+static bool scx_kf_allowed_if_unlocked(void)
+{
+ return !current->scx.kf_mask;
+}
+
+/**
+ * nldsq_next_task - Iterate to the next task in a non-local DSQ
+ * @dsq: user dsq being interated
+ * @cur: current position, %NULL to start iteration
+ * @rev: walk backwards
+ *
+ * Returns %NULL when iteration is finished.
+ */
+static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
+ struct task_struct *cur, bool rev)
+{
+ struct list_head *list_node;
+ struct scx_dsq_list_node *dsq_lnode;
+
+ lockdep_assert_held(&dsq->lock);
+
+ if (cur)
+ list_node = &cur->scx.dsq_list.node;
+ else
+ list_node = &dsq->list;
+
+ /* find the next task, need to skip BPF iteration cursors */
+ do {
+ if (rev)
+ list_node = list_node->prev;
+ else
+ list_node = list_node->next;
+
+ if (list_node == &dsq->list)
+ return NULL;
+
+ dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
+ node);
+ } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
+
+ return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
+}
+
+#define nldsq_for_each_task(p, dsq) \
+ for ((p) = nldsq_next_task((dsq), NULL, false); (p); \
+ (p) = nldsq_next_task((dsq), (p), false))
+
+
+/*
+ * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
+ * dispatch order. BPF-visible iterator is opaque and larger to allow future
+ * changes without breaking backward compatibility. Can be used with
+ * bpf_for_each(). See bpf_iter_scx_dsq_*().
+ */
+enum scx_dsq_iter_flags {
+ /* iterate in the reverse dispatch order */
+ SCX_DSQ_ITER_REV = 1U << 16,
+
+ __SCX_DSQ_ITER_HAS_SLICE = 1U << 30,
+ __SCX_DSQ_ITER_HAS_VTIME = 1U << 31,
+
+ __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV,
+ __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS |
+ __SCX_DSQ_ITER_HAS_SLICE |
+ __SCX_DSQ_ITER_HAS_VTIME,
+};
+
+struct bpf_iter_scx_dsq_kern {
+ struct scx_dsq_list_node cursor;
+ struct scx_dispatch_q *dsq;
+ u64 slice;
+ u64 vtime;
+} __attribute__((aligned(8)));
+
+struct bpf_iter_scx_dsq {
+ u64 __opaque[6];
+} __attribute__((aligned(8)));
+
+
+/*
+ * SCX task iterator.
+ */
+struct scx_task_iter {
+ struct sched_ext_entity cursor;
+ struct task_struct *locked;
+ struct rq *rq;
+ struct rq_flags rf;
+};
+
+/**
+ * scx_task_iter_init - Initialize a task iterator
+ * @iter: iterator to init
+ *
+ * Initialize @iter. Must be called with scx_tasks_lock held. Once initialized,
+ * @iter must eventually be exited with scx_task_iter_exit().
+ *
+ * scx_tasks_lock may be released between this and the first next() call or
+ * between any two next() calls. If scx_tasks_lock is released between two
+ * next() calls, the caller is responsible for ensuring that the task being
+ * iterated remains accessible either through RCU read lock or obtaining a
+ * reference count.
+ *
+ * All tasks which existed when the iteration started are guaranteed to be
+ * visited as long as they still exist.
+ */
+static void scx_task_iter_init(struct scx_task_iter *iter)
+{
+ lockdep_assert_held(&scx_tasks_lock);
+
+ BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
+ ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
+
+ iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
+ list_add(&iter->cursor.tasks_node, &scx_tasks);
+ iter->locked = NULL;
+}
+
+/**
+ * scx_task_iter_rq_unlock - Unlock rq locked by a task iterator
+ * @iter: iterator to unlock rq for
+ *
+ * If @iter is in the middle of a locked iteration, it may be locking the rq of
+ * the task currently being visited. Unlock the rq if so. This function can be
+ * safely called anytime during an iteration.
+ *
+ * Returns %true if the rq @iter was locking is unlocked. %false if @iter was
+ * not locking an rq.
+ */
+static bool scx_task_iter_rq_unlock(struct scx_task_iter *iter)
+{
+ if (iter->locked) {
+ task_rq_unlock(iter->rq, iter->locked, &iter->rf);
+ iter->locked = NULL;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/**
+ * scx_task_iter_exit - Exit a task iterator
+ * @iter: iterator to exit
+ *
+ * Exit a previously initialized @iter. Must be called with scx_tasks_lock held.
+ * If the iterator holds a task's rq lock, that rq lock is released. See
+ * scx_task_iter_init() for details.
+ */
+static void scx_task_iter_exit(struct scx_task_iter *iter)
+{
+ lockdep_assert_held(&scx_tasks_lock);
+
+ scx_task_iter_rq_unlock(iter);
+ list_del_init(&iter->cursor.tasks_node);
+}
+
+/**
+ * scx_task_iter_next - Next task
+ * @iter: iterator to walk
+ *
+ * Visit the next task. See scx_task_iter_init() for details.
+ */
+static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
+{
+ struct list_head *cursor = &iter->cursor.tasks_node;
+ struct sched_ext_entity *pos;
+
+ lockdep_assert_held(&scx_tasks_lock);
+
+ list_for_each_entry(pos, cursor, tasks_node) {
+ if (&pos->tasks_node == &scx_tasks)
+ return NULL;
+ if (!(pos->flags & SCX_TASK_CURSOR)) {
+ list_move(cursor, &pos->tasks_node);
+ return container_of(pos, struct task_struct, scx);
+ }
+ }
+
+ /* can't happen, should always terminate at scx_tasks above */
+ BUG();
+}
+
+/**
+ * scx_task_iter_next_locked - Next non-idle task with its rq locked
+ * @iter: iterator to walk
+ * @include_dead: Whether we should include dead tasks in the iteration
+ *
+ * Visit the non-idle task with its rq lock held. Allows callers to specify
+ * whether they would like to filter out dead tasks. See scx_task_iter_init()
+ * for details.
+ */
+static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
+{
+ struct task_struct *p;
+
+ scx_task_iter_rq_unlock(iter);
+
+ while ((p = scx_task_iter_next(iter))) {
+ /*
+ * scx_task_iter is used to prepare and move tasks into SCX
+ * while loading the BPF scheduler and vice-versa while
+ * unloading. The init_tasks ("swappers") should be excluded
+ * from the iteration because:
+ *
+ * - It's unsafe to use __setschduler_prio() on an init_task to
+ * determine the sched_class to use as it won't preserve its
+ * idle_sched_class.
+ *
+ * - ops.init/exit_task() can easily be confused if called with
+ * init_tasks as they, e.g., share PID 0.
+ *
+ * As init_tasks are never scheduled through SCX, they can be
+ * skipped safely. Note that is_idle_task() which tests %PF_IDLE
+ * doesn't work here:
+ *
+ * - %PF_IDLE may not be set for an init_task whose CPU hasn't
+ * yet been onlined.
+ *
+ * - %PF_IDLE can be set on tasks that are not init_tasks. See
+ * play_idle_precise() used by CONFIG_IDLE_INJECT.
+ *
+ * Test for idle_sched_class as only init_tasks are on it.
+ */
+ if (p->sched_class != &idle_sched_class)
+ break;
+ }
+ if (!p)
+ return NULL;
+
+ iter->rq = task_rq_lock(p, &iter->rf);
+ iter->locked = p;
+
+ return p;
+}
+
+static enum scx_ops_enable_state scx_ops_enable_state(void)
+{
+ return atomic_read(&scx_ops_enable_state_var);
+}
+
+static enum scx_ops_enable_state
+scx_ops_set_enable_state(enum scx_ops_enable_state to)
+{
+ return atomic_xchg(&scx_ops_enable_state_var, to);
+}
+
+static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to,
+ enum scx_ops_enable_state from)
+{
+ int from_v = from;
+
+ return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to);
+}
+
+static bool scx_rq_bypassing(struct rq *rq)
+{
+ return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
+}
+
+/**
+ * wait_ops_state - Busy-wait the specified ops state to end
+ * @p: target task
+ * @opss: state to wait the end of
+ *
+ * Busy-wait for @p to transition out of @opss. This can only be used when the
+ * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
+ * has load_acquire semantics to ensure that the caller can see the updates made
+ * in the enqueueing and dispatching paths.
+ */
+static void wait_ops_state(struct task_struct *p, unsigned long opss)
+{
+ do {
+ cpu_relax();
+ } while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
+}
+
+/**
+ * ops_cpu_valid - Verify a cpu number
+ * @cpu: cpu number which came from a BPF ops
+ * @where: extra information reported on error
+ *
+ * @cpu is a cpu number which came from the BPF scheduler and can be any value.
+ * Verify that it is in range and one of the possible cpus. If invalid, trigger
+ * an ops error.
+ */
+static bool ops_cpu_valid(s32 cpu, const char *where)
+{
+ if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) {
+ return true;
+ } else {
+ scx_ops_error("invalid CPU %d%s%s", cpu,
+ where ? " " : "", where ?: "");
+ return false;
+ }
+}
+
+/**
+ * ops_sanitize_err - Sanitize a -errno value
+ * @ops_name: operation to blame on failure
+ * @err: -errno value to sanitize
+ *
+ * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return
+ * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
+ * cause misbehaviors. For an example, a large negative return from
+ * ops.init_task() triggers an oops when passed up the call chain because the
+ * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
+ * handled as a pointer.
+ */
+static int ops_sanitize_err(const char *ops_name, s32 err)
+{
+ if (err < 0 && err >= -MAX_ERRNO)
+ return err;
+
+ scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err);
+ return -EPROTO;
+}
+
+static void run_deferred(struct rq *rq)
+{
+ process_ddsp_deferred_locals(rq);
+}
+
+#ifdef CONFIG_SMP
+static void deferred_bal_cb_workfn(struct rq *rq)
+{
+ run_deferred(rq);
+}
+#endif
+
+static void deferred_irq_workfn(struct irq_work *irq_work)
+{
+ struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
+
+ raw_spin_rq_lock(rq);
+ run_deferred(rq);
+ raw_spin_rq_unlock(rq);
+}
+
+/**
+ * schedule_deferred - Schedule execution of deferred actions on an rq
+ * @rq: target rq
+ *
+ * Schedule execution of deferred actions on @rq. Must be called with @rq
+ * locked. Deferred actions are executed with @rq locked but unpinned, and thus
+ * can unlock @rq to e.g. migrate tasks to other rqs.
+ */
+static void schedule_deferred(struct rq *rq)
+{
+ lockdep_assert_rq_held(rq);
+
+#ifdef CONFIG_SMP
+ /*
+ * If in the middle of waking up a task, task_woken_scx() will be called
+ * afterwards which will then run the deferred actions, no need to
+ * schedule anything.
+ */
+ if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
+ return;
+
+ /*
+ * If in balance, the balance callbacks will be called before rq lock is
+ * released. Schedule one.
+ */
+ if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
+ queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
+ deferred_bal_cb_workfn);
+ return;
+ }
+#endif
+ /*
+ * No scheduler hooks available. Queue an irq work. They are executed on
+ * IRQ re-enable which may take a bit longer than the scheduler hooks.
+ * The above WAKEUP and BALANCE paths should cover most of the cases and
+ * the time to IRQ re-enable shouldn't be long.
+ */
+ irq_work_queue(&rq->scx.deferred_irq_work);
+}
+
+/**
+ * touch_core_sched - Update timestamp used for core-sched task ordering
+ * @rq: rq to read clock from, must be locked
+ * @p: task to update the timestamp for
+ *
+ * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
+ * implement global or local-DSQ FIFO ordering for core-sched. Should be called
+ * when a task becomes runnable and its turn on the CPU ends (e.g. slice
+ * exhaustion).
+ */
+static void touch_core_sched(struct rq *rq, struct task_struct *p)
+{
+ lockdep_assert_rq_held(rq);
+
+#ifdef CONFIG_SCHED_CORE
+ /*
+ * It's okay to update the timestamp spuriously. Use
+ * sched_core_disabled() which is cheaper than enabled().
+ *
+ * As this is used to determine ordering between tasks of sibling CPUs,
+ * it may be better to use per-core dispatch sequence instead.
+ */
+ if (!sched_core_disabled())
+ p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
+#endif
+}
+
+/**
+ * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
+ * @rq: rq to read clock from, must be locked
+ * @p: task being dispatched
+ *
+ * If the BPF scheduler implements custom core-sched ordering via
+ * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
+ * ordering within each local DSQ. This function is called from dispatch paths
+ * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
+ */
+static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
+{
+ lockdep_assert_rq_held(rq);
+
+#ifdef CONFIG_SCHED_CORE
+ if (SCX_HAS_OP(core_sched_before))
+ touch_core_sched(rq, p);
+#endif
+}
+
+static void update_curr_scx(struct rq *rq)
+{
+ struct task_struct *curr = rq->curr;
+ s64 delta_exec;
+
+ delta_exec = update_curr_common(rq);
+ if (unlikely(delta_exec <= 0))
+ return;
+
+ if (curr->scx.slice != SCX_SLICE_INF) {
+ curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
+ if (!curr->scx.slice)
+ touch_core_sched(rq, curr);
+ }
+}
+
+static bool scx_dsq_priq_less(struct rb_node *node_a,
+ const struct rb_node *node_b)
+{
+ const struct task_struct *a =
+ container_of(node_a, struct task_struct, scx.dsq_priq);
+ const struct task_struct *b =
+ container_of(node_b, struct task_struct, scx.dsq_priq);
+
+ return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
+}
+
+static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
+{
+ /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
+ WRITE_ONCE(dsq->nr, dsq->nr + delta);
+}
+
+static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
+ u64 enq_flags)
+{
+ bool is_local = dsq->id == SCX_DSQ_LOCAL;
+
+ WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
+ WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
+ !RB_EMPTY_NODE(&p->scx.dsq_priq));
+
+ if (!is_local) {
+ raw_spin_lock(&dsq->lock);
+ if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
+ scx_ops_error("attempting to dispatch to a destroyed dsq");
+ /* fall back to the global dsq */
+ raw_spin_unlock(&dsq->lock);
+ dsq = &scx_dsq_global;
+ raw_spin_lock(&dsq->lock);
+ }
+ }
+
+ if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
+ (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
+ /*
+ * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
+ * their FIFO queues. To avoid confusion and accidentally
+ * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
+ * disallow any internal DSQ from doing vtime ordering of
+ * tasks.
+ */
+ scx_ops_error("cannot use vtime ordering for built-in DSQs");
+ enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
+ }
+
+ if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
+ struct rb_node *rbp;
+
+ /*
+ * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
+ * linked to both the rbtree and list on PRIQs, this can only be
+ * tested easily when adding the first task.
+ */
+ if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
+ nldsq_next_task(dsq, NULL, false)))
+ scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
+ dsq->id);
+
+ p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
+ rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
+
+ /*
+ * Find the previous task and insert after it on the list so
+ * that @dsq->list is vtime ordered.
+ */
+ rbp = rb_prev(&p->scx.dsq_priq);
+ if (rbp) {
+ struct task_struct *prev =
+ container_of(rbp, struct task_struct,
+ scx.dsq_priq);
+ list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
+ } else {
+ list_add(&p->scx.dsq_list.node, &dsq->list);
+ }
+ } else {
+ /* a FIFO DSQ shouldn't be using PRIQ enqueuing */
+ if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
+ scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
+ dsq->id);
+
+ if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
+ list_add(&p->scx.dsq_list.node, &dsq->list);
+ else
+ list_add_tail(&p->scx.dsq_list.node, &dsq->list);
+ }
+
+ /* seq records the order tasks are queued, used by BPF DSQ iterator */
+ dsq->seq++;
+ p->scx.dsq_seq = dsq->seq;
+
+ dsq_mod_nr(dsq, 1);
+ p->scx.dsq = dsq;
+
+ /*
+ * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
+ * direct dispatch path, but we clear them here because the direct
+ * dispatch verdict may be overridden on the enqueue path during e.g.
+ * bypass.
+ */
+ p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
+ p->scx.ddsp_enq_flags = 0;
+
+ /*
+ * We're transitioning out of QUEUEING or DISPATCHING. store_release to
+ * match waiters' load_acquire.
+ */
+ if (enq_flags & SCX_ENQ_CLEAR_OPSS)
+ atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
+
+ if (is_local) {
+ struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
+ bool preempt = false;
+
+ if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
+ rq->curr->sched_class == &ext_sched_class) {
+ rq->curr->scx.slice = 0;
+ preempt = true;
+ }
+
+ if (preempt || sched_class_above(&ext_sched_class,
+ rq->curr->sched_class))
+ resched_curr(rq);
+ } else {
+ raw_spin_unlock(&dsq->lock);
+ }
+}
+
+static void task_unlink_from_dsq(struct task_struct *p,
+ struct scx_dispatch_q *dsq)
+{
+ WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
+
+ if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
+ rb_erase(&p->scx.dsq_priq, &dsq->priq);
+ RB_CLEAR_NODE(&p->scx.dsq_priq);
+ p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
+ }
+
+ list_del_init(&p->scx.dsq_list.node);
+ dsq_mod_nr(dsq, -1);
+}
+
+static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
+{
+ struct scx_dispatch_q *dsq = p->scx.dsq;
+ bool is_local = dsq == &rq->scx.local_dsq;
+
+ if (!dsq) {
+ /*
+ * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
+ * Unlinking is all that's needed to cancel.
+ */
+ if (unlikely(!list_empty(&p->scx.dsq_list.node)))
+ list_del_init(&p->scx.dsq_list.node);
+
+ /*
+ * When dispatching directly from the BPF scheduler to a local
+ * DSQ, the task isn't associated with any DSQ but
+ * @p->scx.holding_cpu may be set under the protection of
+ * %SCX_OPSS_DISPATCHING.
+ */
+ if (p->scx.holding_cpu >= 0)
+ p->scx.holding_cpu = -1;
+
+ return;
+ }
+
+ if (!is_local)
+ raw_spin_lock(&dsq->lock);
+
+ /*
+ * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
+ * change underneath us.
+ */
+ if (p->scx.holding_cpu < 0) {
+ /* @p must still be on @dsq, dequeue */
+ task_unlink_from_dsq(p, dsq);
+ } else {
+ /*
+ * We're racing against dispatch_to_local_dsq() which already
+ * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
+ * holding_cpu which tells dispatch_to_local_dsq() that it lost
+ * the race.
+ */
+ WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
+ p->scx.holding_cpu = -1;
+ }
+ p->scx.dsq = NULL;
+
+ if (!is_local)
+ raw_spin_unlock(&dsq->lock);
+}
+
+static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
+{
+ return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
+}
+
+static struct scx_dispatch_q *find_non_local_dsq(u64 dsq_id)
+{
+ lockdep_assert(rcu_read_lock_any_held());
+
+ if (dsq_id == SCX_DSQ_GLOBAL)
+ return &scx_dsq_global;
+ else
+ return find_user_dsq(dsq_id);
+}
+
+static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
+ struct task_struct *p)
+{
+ struct scx_dispatch_q *dsq;
+
+ if (dsq_id == SCX_DSQ_LOCAL)
+ return &rq->scx.local_dsq;
+
+ if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
+ s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
+
+ if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
+ return &scx_dsq_global;
+
+ return &cpu_rq(cpu)->scx.local_dsq;
+ }
+
+ dsq = find_non_local_dsq(dsq_id);
+ if (unlikely(!dsq)) {
+ scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
+ dsq_id, p->comm, p->pid);
+ return &scx_dsq_global;
+ }
+
+ return dsq;
+}
+
+static void mark_direct_dispatch(struct task_struct *ddsp_task,
+ struct task_struct *p, u64 dsq_id,
+ u64 enq_flags)
+{
+ /*
+ * Mark that dispatch already happened from ops.select_cpu() or
+ * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
+ * which can never match a valid task pointer.
+ */
+ __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
+
+ /* @p must match the task on the enqueue path */
+ if (unlikely(p != ddsp_task)) {
+ if (IS_ERR(ddsp_task))
+ scx_ops_error("%s[%d] already direct-dispatched",
+ p->comm, p->pid);
+ else
+ scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
+ ddsp_task->comm, ddsp_task->pid,
+ p->comm, p->pid);
+ return;
+ }
+
+ WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
+ WARN_ON_ONCE(p->scx.ddsp_enq_flags);
+
+ p->scx.ddsp_dsq_id = dsq_id;
+ p->scx.ddsp_enq_flags = enq_flags;
+}
+
+static void direct_dispatch(struct task_struct *p, u64 enq_flags)
+{
+ struct rq *rq = task_rq(p);
+ struct scx_dispatch_q *dsq =
+ find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
+
+ touch_core_sched_dispatch(rq, p);
+
+ p->scx.ddsp_enq_flags |= enq_flags;
+
+ /*
+ * We are in the enqueue path with @rq locked and pinned, and thus can't
+ * double lock a remote rq and enqueue to its local DSQ. For
+ * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
+ * the enqueue so that it's executed when @rq can be unlocked.
+ */
+ if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
+ unsigned long opss;
+
+ opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
+
+ switch (opss & SCX_OPSS_STATE_MASK) {
+ case SCX_OPSS_NONE:
+ break;
+ case SCX_OPSS_QUEUEING:
+ /*
+ * As @p was never passed to the BPF side, _release is
+ * not strictly necessary. Still do it for consistency.
+ */
+ atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
+ break;
+ default:
+ WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
+ p->comm, p->pid, opss);
+ atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
+ break;
+ }
+
+ WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
+ list_add_tail(&p->scx.dsq_list.node,
+ &rq->scx.ddsp_deferred_locals);
+ schedule_deferred(rq);
+ return;
+ }
+
+ dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
+}
+
+static bool scx_rq_online(struct rq *rq)
+{
+ /*
+ * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
+ * the online state as seen from the BPF scheduler. cpu_active() test
+ * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
+ * stay set until the current scheduling operation is complete even if
+ * we aren't locking @rq.
+ */
+ return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
+}
+
+static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
+ int sticky_cpu)
+{
+ struct task_struct **ddsp_taskp;
+ unsigned long qseq;
+
+ WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
+
+ /* rq migration */
+ if (sticky_cpu == cpu_of(rq))
+ goto local_norefill;
+
+ /*
+ * If !scx_rq_online(), we already told the BPF scheduler that the CPU
+ * is offline and are just running the hotplug path. Don't bother the
+ * BPF scheduler.
+ */
+ if (!scx_rq_online(rq))
+ goto local;
+
+ if (scx_rq_bypassing(rq))
+ goto global;
+
+ if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
+ goto direct;
+
+ /* see %SCX_OPS_ENQ_EXITING */
+ if (!static_branch_unlikely(&scx_ops_enq_exiting) &&
+ unlikely(p->flags & PF_EXITING))
+ goto local;
+
+ if (!SCX_HAS_OP(enqueue))
+ goto global;
+
+ /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
+ qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
+
+ WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
+ atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
+
+ ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
+ WARN_ON_ONCE(*ddsp_taskp);
+ *ddsp_taskp = p;
+
+ SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags);
+
+ *ddsp_taskp = NULL;
+ if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
+ goto direct;
+
+ /*
+ * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
+ * dequeue may be waiting. The store_release matches their load_acquire.
+ */
+ atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
+ return;
+
+direct:
+ direct_dispatch(p, enq_flags);
+ return;
+
+local:
+ /*
+ * For task-ordering, slice refill must be treated as implying the end
+ * of the current slice. Otherwise, the longer @p stays on the CPU, the
+ * higher priority it becomes from scx_prio_less()'s POV.
+ */
+ touch_core_sched(rq, p);
+ p->scx.slice = SCX_SLICE_DFL;
+local_norefill:
+ dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags);
+ return;
+
+global:
+ touch_core_sched(rq, p); /* see the comment in local: */
+ p->scx.slice = SCX_SLICE_DFL;
+ dispatch_enqueue(&scx_dsq_global, p, enq_flags);
+}
+
+static bool task_runnable(const struct task_struct *p)
+{
+ return !list_empty(&p->scx.runnable_node);
+}
+
+static void set_task_runnable(struct rq *rq, struct task_struct *p)
+{
+ lockdep_assert_rq_held(rq);
+
+ if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
+ p->scx.runnable_at = jiffies;
+ p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
+ }
+
+ /*
+ * list_add_tail() must be used. scx_ops_bypass() depends on tasks being
+ * appened to the runnable_list.
+ */
+ list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
+}
+
+static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
+{
+ list_del_init(&p->scx.runnable_node);
+ if (reset_runnable_at)
+ p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
+}
+
+static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
+{
+ int sticky_cpu = p->scx.sticky_cpu;
+
+ if (enq_flags & ENQUEUE_WAKEUP)
+ rq->scx.flags |= SCX_RQ_IN_WAKEUP;
+
+ enq_flags |= rq->scx.extra_enq_flags;
+
+ if (sticky_cpu >= 0)
+ p->scx.sticky_cpu = -1;
+
+ /*
+ * Restoring a running task will be immediately followed by
+ * set_next_task_scx() which expects the task to not be on the BPF
+ * scheduler as tasks can only start running through local DSQs. Force
+ * direct-dispatch into the local DSQ by setting the sticky_cpu.
+ */
+ if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
+ sticky_cpu = cpu_of(rq);
+
+ if (p->scx.flags & SCX_TASK_QUEUED) {
+ WARN_ON_ONCE(!task_runnable(p));
+ goto out;
+ }
+
+ set_task_runnable(rq, p);
+ p->scx.flags |= SCX_TASK_QUEUED;
+ rq->scx.nr_running++;
+ add_nr_running(rq, 1);
+
+ if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p))
+ SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags);
+
+ if (enq_flags & SCX_ENQ_WAKEUP)
+ touch_core_sched(rq, p);
+
+ do_enqueue_task(rq, p, enq_flags, sticky_cpu);
+out:
+ rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
+}
+
+static void ops_dequeue(struct task_struct *p, u64 deq_flags)
+{
+ unsigned long opss;
+
+ /* dequeue is always temporary, don't reset runnable_at */
+ clr_task_runnable(p, false);
+
+ /* acquire ensures that we see the preceding updates on QUEUED */
+ opss = atomic_long_read_acquire(&p->scx.ops_state);
+
+ switch (opss & SCX_OPSS_STATE_MASK) {
+ case SCX_OPSS_NONE:
+ break;
+ case SCX_OPSS_QUEUEING:
+ /*
+ * QUEUEING is started and finished while holding @p's rq lock.
+ * As we're holding the rq lock now, we shouldn't see QUEUEING.
+ */
+ BUG();
+ case SCX_OPSS_QUEUED:
+ if (SCX_HAS_OP(dequeue))
+ SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags);
+
+ if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
+ SCX_OPSS_NONE))
+ break;
+ fallthrough;
+ case SCX_OPSS_DISPATCHING:
+ /*
+ * If @p is being dispatched from the BPF scheduler to a DSQ,
+ * wait for the transfer to complete so that @p doesn't get
+ * added to its DSQ after dequeueing is complete.
+ *
+ * As we're waiting on DISPATCHING with the rq locked, the
+ * dispatching side shouldn't try to lock the rq while
+ * DISPATCHING is set. See dispatch_to_local_dsq().
+ *
+ * DISPATCHING shouldn't have qseq set and control can reach
+ * here with NONE @opss from the above QUEUED case block.
+ * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
+ */
+ wait_ops_state(p, SCX_OPSS_DISPATCHING);
+ BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
+ break;
+ }
+}
+
+static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
+{
+ if (!(p->scx.flags & SCX_TASK_QUEUED)) {
+ WARN_ON_ONCE(task_runnable(p));
+ return true;
+ }
+
+ ops_dequeue(p, deq_flags);
+
+ /*
+ * A currently running task which is going off @rq first gets dequeued
+ * and then stops running. As we want running <-> stopping transitions
+ * to be contained within runnable <-> quiescent transitions, trigger
+ * ->stopping() early here instead of in put_prev_task_scx().
+ *
+ * @p may go through multiple stopping <-> running transitions between
+ * here and put_prev_task_scx() if task attribute changes occur while
+ * balance_scx() leaves @rq unlocked. However, they don't contain any
+ * information meaningful to the BPF scheduler and can be suppressed by
+ * skipping the callbacks if the task is !QUEUED.
+ */
+ if (SCX_HAS_OP(stopping) && task_current(rq, p)) {
+ update_curr_scx(rq);
+ SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false);
+ }
+
+ if (SCX_HAS_OP(quiescent) && !task_on_rq_migrating(p))
+ SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags);
+
+ if (deq_flags & SCX_DEQ_SLEEP)
+ p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
+ else
+ p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
+
+ p->scx.flags &= ~SCX_TASK_QUEUED;
+ rq->scx.nr_running--;
+ sub_nr_running(rq, 1);
+
+ dispatch_dequeue(rq, p);
+ return true;
+}
+
+static void yield_task_scx(struct rq *rq)
+{
+ struct task_struct *p = rq->curr;
+
+ if (SCX_HAS_OP(yield))
+ SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL);
+ else
+ p->scx.slice = 0;
+}
+
+static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
+{
+ struct task_struct *from = rq->curr;
+
+ if (SCX_HAS_OP(yield))
+ return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to);
+ else
+ return false;
+}
+
+static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
+ struct scx_dispatch_q *src_dsq,
+ struct rq *dst_rq)
+{
+ struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
+
+ /* @dsq is locked and @p is on @dst_rq */
+ lockdep_assert_held(&src_dsq->lock);
+ lockdep_assert_rq_held(dst_rq);
+
+ WARN_ON_ONCE(p->scx.holding_cpu >= 0);
+
+ if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
+ list_add(&p->scx.dsq_list.node, &dst_dsq->list);
+ else
+ list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
+
+ dsq_mod_nr(dst_dsq, 1);
+ p->scx.dsq = dst_dsq;
+}
+
+#ifdef CONFIG_SMP
+/**
+ * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
+ * @p: task to move
+ * @enq_flags: %SCX_ENQ_*
+ * @src_rq: rq to move the task from, locked on entry, released on return
+ * @dst_rq: rq to move the task into, locked on return
+ *
+ * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
+ */
+static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
+ struct rq *src_rq, struct rq *dst_rq)
+{
+ lockdep_assert_rq_held(src_rq);
+
+ /* the following marks @p MIGRATING which excludes dequeue */
+ deactivate_task(src_rq, p, 0);
+ set_task_cpu(p, cpu_of(dst_rq));
+ p->scx.sticky_cpu = cpu_of(dst_rq);
+
+ raw_spin_rq_unlock(src_rq);
+ raw_spin_rq_lock(dst_rq);
+
+ /*
+ * We want to pass scx-specific enq_flags but activate_task() will
+ * truncate the upper 32 bit. As we own @rq, we can pass them through
+ * @rq->scx.extra_enq_flags instead.
+ */
+ WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
+ WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
+ dst_rq->scx.extra_enq_flags = enq_flags;
+ activate_task(dst_rq, p, 0);
+ dst_rq->scx.extra_enq_flags = 0;
+}
+
+/*
+ * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
+ * differences:
+ *
+ * - is_cpu_allowed() asks "Can this task run on this CPU?" while
+ * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
+ * this CPU?".
+ *
+ * While migration is disabled, is_cpu_allowed() has to say "yes" as the task
+ * must be allowed to finish on the CPU that it's currently on regardless of
+ * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
+ * BPF scheduler shouldn't attempt to migrate a task which has migration
+ * disabled.
+ *
+ * - The BPF scheduler is bypassed while the rq is offline and we can always say
+ * no to the BPF scheduler initiated migrations while offline.
+ */
+static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
+ bool trigger_error)
+{
+ int cpu = cpu_of(rq);
+
+ /*
+ * We don't require the BPF scheduler to avoid dispatching to offline
+ * CPUs mostly for convenience but also because CPUs can go offline
+ * between scx_bpf_dispatch() calls and here. Trigger error iff the
+ * picked CPU is outside the allowed mask.
+ */
+ if (!task_allowed_on_cpu(p, cpu)) {
+ if (trigger_error)
+ scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]",
+ cpu_of(rq), p->comm, p->pid);
+ return false;
+ }
+
+ if (unlikely(is_migration_disabled(p)))
+ return false;
+
+ if (!scx_rq_online(rq))
+ return false;
+
+ return true;
+}
+
+/**
+ * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
+ * @p: target task
+ * @dsq: locked DSQ @p is currently on
+ * @src_rq: rq @p is currently on, stable with @dsq locked
+ *
+ * Called with @dsq locked but no rq's locked. We want to move @p to a different
+ * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
+ * required when transferring into a local DSQ. Even when transferring into a
+ * non-local DSQ, it's better to use the same mechanism to protect against
+ * dequeues and maintain the invariant that @p->scx.dsq can only change while
+ * @src_rq is locked, which e.g. scx_dump_task() depends on.
+ *
+ * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
+ * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
+ * this may race with dequeue, which can't drop the rq lock or fail, do a little
+ * dancing from our side.
+ *
+ * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
+ * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
+ * would be cleared to -1. While other cpus may have updated it to different
+ * values afterwards, as this operation can't be preempted or recurse, the
+ * holding_cpu can never become this CPU again before we're done. Thus, we can
+ * tell whether we lost to dequeue by testing whether the holding_cpu still
+ * points to this CPU. See dispatch_dequeue() for the counterpart.
+ *
+ * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
+ * still valid. %false if lost to dequeue.
+ */
+static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
+ struct scx_dispatch_q *dsq,
+ struct rq *src_rq)
+{
+ s32 cpu = raw_smp_processor_id();
+
+ lockdep_assert_held(&dsq->lock);
+
+ WARN_ON_ONCE(p->scx.holding_cpu >= 0);
+ task_unlink_from_dsq(p, dsq);
+ p->scx.holding_cpu = cpu;
+
+ raw_spin_unlock(&dsq->lock);
+ raw_spin_rq_lock(src_rq);
+
+ /* task_rq couldn't have changed if we're still the holding cpu */
+ return likely(p->scx.holding_cpu == cpu) &&
+ !WARN_ON_ONCE(src_rq != task_rq(p));
+}
+
+static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
+ struct scx_dispatch_q *dsq, struct rq *src_rq)
+{
+ raw_spin_rq_unlock(this_rq);
+
+ if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
+ move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
+ return true;
+ } else {
+ raw_spin_rq_unlock(src_rq);
+ raw_spin_rq_lock(this_rq);
+ return false;
+ }
+}
+#else /* CONFIG_SMP */
+static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_error) { return false; }
+static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
+#endif /* CONFIG_SMP */
+
+static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
+{
+ struct task_struct *p;
+retry:
+ /*
+ * The caller can't expect to successfully consume a task if the task's
+ * addition to @dsq isn't guaranteed to be visible somehow. Test
+ * @dsq->list without locking and skip if it seems empty.
+ */
+ if (list_empty(&dsq->list))
+ return false;
+
+ raw_spin_lock(&dsq->lock);
+
+ nldsq_for_each_task(p, dsq) {
+ struct rq *task_rq = task_rq(p);
+
+ if (rq == task_rq) {
+ task_unlink_from_dsq(p, dsq);
+ move_local_task_to_local_dsq(p, 0, dsq, rq);
+ raw_spin_unlock(&dsq->lock);
+ return true;
+ }
+
+ if (task_can_run_on_remote_rq(p, rq, false)) {
+ if (likely(consume_remote_task(rq, p, dsq, task_rq)))
+ return true;
+ goto retry;
+ }
+ }
+
+ raw_spin_unlock(&dsq->lock);
+ return false;
+}
+
+/**
+ * dispatch_to_local_dsq - Dispatch a task to a local dsq
+ * @rq: current rq which is locked
+ * @dst_dsq: destination DSQ
+ * @p: task to dispatch
+ * @enq_flags: %SCX_ENQ_*
+ *
+ * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
+ * DSQ. This function performs all the synchronization dancing needed because
+ * local DSQs are protected with rq locks.
+ *
+ * The caller must have exclusive ownership of @p (e.g. through
+ * %SCX_OPSS_DISPATCHING).
+ */
+static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
+ struct task_struct *p, u64 enq_flags)
+{
+ struct rq *src_rq = task_rq(p);
+ struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
+
+ /*
+ * We're synchronized against dequeue through DISPATCHING. As @p can't
+ * be dequeued, its task_rq and cpus_allowed are stable too.
+ *
+ * If dispatching to @rq that @p is already on, no lock dancing needed.
+ */
+ if (rq == src_rq && rq == dst_rq) {
+ dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
+ return;
+ }
+
+#ifdef CONFIG_SMP
+ if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
+ dispatch_enqueue(&scx_dsq_global, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
+ return;
+ }
+
+ /*
+ * @p is on a possibly remote @src_rq which we need to lock to move the
+ * task. If dequeue is in progress, it'd be locking @src_rq and waiting
+ * on DISPATCHING, so we can't grab @src_rq lock while holding
+ * DISPATCHING.
+ *
+ * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
+ * we're moving from a DSQ and use the same mechanism - mark the task
+ * under transfer with holding_cpu, release DISPATCHING and then follow
+ * the same protocol. See unlink_dsq_and_lock_src_rq().
+ */
+ p->scx.holding_cpu = raw_smp_processor_id();
+
+ /* store_release ensures that dequeue sees the above */
+ atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
+
+ /* switch to @src_rq lock */
+ if (rq != src_rq) {
+ raw_spin_rq_unlock(rq);
+ raw_spin_rq_lock(src_rq);
+ }
+
+ /* task_rq couldn't have changed if we're still the holding cpu */
+ if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
+ !WARN_ON_ONCE(src_rq != task_rq(p))) {
+ /*
+ * If @p is staying on the same rq, there's no need to go
+ * through the full deactivate/activate cycle. Optimize by
+ * abbreviating move_remote_task_to_local_dsq().
+ */
+ if (src_rq == dst_rq) {
+ p->scx.holding_cpu = -1;
+ dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags);
+ } else {
+ move_remote_task_to_local_dsq(p, enq_flags,
+ src_rq, dst_rq);
+ }
+
+ /* if the destination CPU is idle, wake it up */
+ if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
+ resched_curr(dst_rq);
+ }
+
+ /* switch back to @rq lock */
+ if (rq != dst_rq) {
+ raw_spin_rq_unlock(dst_rq);
+ raw_spin_rq_lock(rq);
+ }
+#else /* CONFIG_SMP */
+ BUG(); /* control can not reach here on UP */
+#endif /* CONFIG_SMP */
+}
+
+/**
+ * finish_dispatch - Asynchronously finish dispatching a task
+ * @rq: current rq which is locked
+ * @p: task to finish dispatching
+ * @qseq_at_dispatch: qseq when @p started getting dispatched
+ * @dsq_id: destination DSQ ID
+ * @enq_flags: %SCX_ENQ_*
+ *
+ * Dispatching to local DSQs may need to wait for queueing to complete or
+ * require rq lock dancing. As we don't wanna do either while inside
+ * ops.dispatch() to avoid locking order inversion, we split dispatching into
+ * two parts. scx_bpf_dispatch() which is called by ops.dispatch() records the
+ * task and its qseq. Once ops.dispatch() returns, this function is called to
+ * finish up.
+ *
+ * There is no guarantee that @p is still valid for dispatching or even that it
+ * was valid in the first place. Make sure that the task is still owned by the
+ * BPF scheduler and claim the ownership before dispatching.
+ */
+static void finish_dispatch(struct rq *rq, struct task_struct *p,
+ unsigned long qseq_at_dispatch,
+ u64 dsq_id, u64 enq_flags)
+{
+ struct scx_dispatch_q *dsq;
+ unsigned long opss;
+
+ touch_core_sched_dispatch(rq, p);
+retry:
+ /*
+ * No need for _acquire here. @p is accessed only after a successful
+ * try_cmpxchg to DISPATCHING.
+ */
+ opss = atomic_long_read(&p->scx.ops_state);
+
+ switch (opss & SCX_OPSS_STATE_MASK) {
+ case SCX_OPSS_DISPATCHING:
+ case SCX_OPSS_NONE:
+ /* someone else already got to it */
+ return;
+ case SCX_OPSS_QUEUED:
+ /*
+ * If qseq doesn't match, @p has gone through at least one
+ * dispatch/dequeue and re-enqueue cycle between
+ * scx_bpf_dispatch() and here and we have no claim on it.
+ */
+ if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
+ return;
+
+ /*
+ * While we know @p is accessible, we don't yet have a claim on
+ * it - the BPF scheduler is allowed to dispatch tasks
+ * spuriously and there can be a racing dequeue attempt. Let's
+ * claim @p by atomically transitioning it from QUEUED to
+ * DISPATCHING.
+ */
+ if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
+ SCX_OPSS_DISPATCHING)))
+ break;
+ goto retry;
+ case SCX_OPSS_QUEUEING:
+ /*
+ * do_enqueue_task() is in the process of transferring the task
+ * to the BPF scheduler while holding @p's rq lock. As we aren't
+ * holding any kernel or BPF resource that the enqueue path may
+ * depend upon, it's safe to wait.
+ */
+ wait_ops_state(p, opss);
+ goto retry;
+ }
+
+ BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
+
+ dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
+
+ if (dsq->id == SCX_DSQ_LOCAL)
+ dispatch_to_local_dsq(rq, dsq, p, enq_flags);
+ else
+ dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
+}
+
+static void flush_dispatch_buf(struct rq *rq)
+{
+ struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ u32 u;
+
+ for (u = 0; u < dspc->cursor; u++) {
+ struct scx_dsp_buf_ent *ent = &dspc->buf[u];
+
+ finish_dispatch(rq, ent->task, ent->qseq, ent->dsq_id,
+ ent->enq_flags);
+ }
+
+ dspc->nr_tasks += dspc->cursor;
+ dspc->cursor = 0;
+}
+
+static int balance_one(struct rq *rq, struct task_struct *prev)
+{
+ struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ bool prev_on_scx = prev->sched_class == &ext_sched_class;
+ int nr_loops = SCX_DSP_MAX_LOOPS;
+
+ lockdep_assert_rq_held(rq);
+ rq->scx.flags |= SCX_RQ_IN_BALANCE;
+ rq->scx.flags &= ~SCX_RQ_BAL_KEEP;
+
+ if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
+ unlikely(rq->scx.cpu_released)) {
+ /*
+ * If the previous sched_class for the current CPU was not SCX,
+ * notify the BPF scheduler that it again has control of the
+ * core. This callback complements ->cpu_release(), which is
+ * emitted in scx_next_task_picked().
+ */
+ if (SCX_HAS_OP(cpu_acquire))
+ SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL);
+ rq->scx.cpu_released = false;
+ }
+
+ if (prev_on_scx) {
+ update_curr_scx(rq);
+
+ /*
+ * If @prev is runnable & has slice left, it has priority and
+ * fetching more just increases latency for the fetched tasks.
+ * Tell pick_task_scx() to keep running @prev. If the BPF
+ * scheduler wants to handle this explicitly, it should
+ * implement ->cpu_release().
+ *
+ * See scx_ops_disable_workfn() for the explanation on the
+ * bypassing test.
+ */
+ if ((prev->scx.flags & SCX_TASK_QUEUED) &&
+ prev->scx.slice && !scx_rq_bypassing(rq)) {
+ rq->scx.flags |= SCX_RQ_BAL_KEEP;
+ goto has_tasks;
+ }
+ }
+
+ /* if there already are tasks to run, nothing to do */
+ if (rq->scx.local_dsq.nr)
+ goto has_tasks;
+
+ if (consume_dispatch_q(rq, &scx_dsq_global))
+ goto has_tasks;
+
+ if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
+ goto no_tasks;
+
+ dspc->rq = rq;
+
+ /*
+ * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
+ * the local DSQ might still end up empty after a successful
+ * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
+ * produced some tasks, retry. The BPF scheduler may depend on this
+ * looping behavior to simplify its implementation.
+ */
+ do {
+ dspc->nr_tasks = 0;
+
+ SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq),
+ prev_on_scx ? prev : NULL);
+
+ flush_dispatch_buf(rq);
+
+ if (rq->scx.local_dsq.nr)
+ goto has_tasks;
+ if (consume_dispatch_q(rq, &scx_dsq_global))
+ goto has_tasks;
+
+ /*
+ * ops.dispatch() can trap us in this loop by repeatedly
+ * dispatching ineligible tasks. Break out once in a while to
+ * allow the watchdog to run. As IRQ can't be enabled in
+ * balance(), we want to complete this scheduling cycle and then
+ * start a new one. IOW, we want to call resched_curr() on the
+ * next, most likely idle, task, not the current one. Use
+ * scx_bpf_kick_cpu() for deferred kicking.
+ */
+ if (unlikely(!--nr_loops)) {
+ scx_bpf_kick_cpu(cpu_of(rq), 0);
+ break;
+ }
+ } while (dspc->nr_tasks);
+
+no_tasks:
+ /*
+ * Didn't find another task to run. Keep running @prev unless
+ * %SCX_OPS_ENQ_LAST is in effect.
+ */
+ if ((prev->scx.flags & SCX_TASK_QUEUED) &&
+ (!static_branch_unlikely(&scx_ops_enq_last) ||
+ scx_rq_bypassing(rq))) {
+ rq->scx.flags |= SCX_RQ_BAL_KEEP;
+ goto has_tasks;
+ }
+ rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
+ return false;
+
+has_tasks:
+ rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
+ return true;
+}
+
+static int balance_scx(struct rq *rq, struct task_struct *prev,
+ struct rq_flags *rf)
+{
+ int ret;
+
+ rq_unpin_lock(rq, rf);
+
+ ret = balance_one(rq, prev);
+
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * When core-sched is enabled, this ops.balance() call will be followed
+ * by pick_task_scx() on this CPU and the SMT siblings. Balance the
+ * siblings too.
+ */
+ if (sched_core_enabled(rq)) {
+ const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
+ int scpu;
+
+ for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
+ struct rq *srq = cpu_rq(scpu);
+ struct task_struct *sprev = srq->curr;
+
+ WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
+ update_rq_clock(srq);
+ balance_one(srq, sprev);
+ }
+ }
+#endif
+ rq_repin_lock(rq, rf);
+
+ return ret;
+}
+
+static void process_ddsp_deferred_locals(struct rq *rq)
+{
+ struct task_struct *p;
+
+ lockdep_assert_rq_held(rq);
+
+ /*
+ * Now that @rq can be unlocked, execute the deferred enqueueing of
+ * tasks directly dispatched to the local DSQs of other CPUs. See
+ * direct_dispatch(). Keep popping from the head instead of using
+ * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
+ * temporarily.
+ */
+ while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
+ struct task_struct, scx.dsq_list.node))) {
+ struct scx_dispatch_q *dsq;
+
+ list_del_init(&p->scx.dsq_list.node);
+
+ dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
+ if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
+ dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
+ }
+}
+
+static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
+{
+ if (p->scx.flags & SCX_TASK_QUEUED) {
+ /*
+ * Core-sched might decide to execute @p before it is
+ * dispatched. Call ops_dequeue() to notify the BPF scheduler.
+ */
+ ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC);
+ dispatch_dequeue(rq, p);
+ }
+
+ p->se.exec_start = rq_clock_task(rq);
+
+ /* see dequeue_task_scx() on why we skip when !QUEUED */
+ if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED))
+ SCX_CALL_OP_TASK(SCX_KF_REST, running, p);
+
+ clr_task_runnable(p, true);
+
+ /*
+ * @p is getting newly scheduled or got kicked after someone updated its
+ * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
+ */
+ if ((p->scx.slice == SCX_SLICE_INF) !=
+ (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
+ if (p->scx.slice == SCX_SLICE_INF)
+ rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
+ else
+ rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
+
+ sched_update_tick_dependency(rq);
+
+ /*
+ * For now, let's refresh the load_avgs just when transitioning
+ * in and out of nohz. In the future, we might want to add a
+ * mechanism which calls the following periodically on
+ * tick-stopped CPUs.
+ */
+ update_other_load_avgs(rq);
+ }
+}
+
+static enum scx_cpu_preempt_reason
+preempt_reason_from_class(const struct sched_class *class)
+{
+#ifdef CONFIG_SMP
+ if (class == &stop_sched_class)
+ return SCX_CPU_PREEMPT_STOP;
+#endif
+ if (class == &dl_sched_class)
+ return SCX_CPU_PREEMPT_DL;
+ if (class == &rt_sched_class)
+ return SCX_CPU_PREEMPT_RT;
+ return SCX_CPU_PREEMPT_UNKNOWN;
+}
+
+static void switch_class(struct rq *rq, struct task_struct *next)
+{
+ const struct sched_class *next_class = next->sched_class;
+
+#ifdef CONFIG_SMP
+ /*
+ * Pairs with the smp_load_acquire() issued by a CPU in
+ * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
+ * resched.
+ */
+ smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
+#endif
+ if (!static_branch_unlikely(&scx_ops_cpu_preempt))
+ return;
+
+ /*
+ * The callback is conceptually meant to convey that the CPU is no
+ * longer under the control of SCX. Therefore, don't invoke the callback
+ * if the next class is below SCX (in which case the BPF scheduler has
+ * actively decided not to schedule any tasks on the CPU).
+ */
+ if (sched_class_above(&ext_sched_class, next_class))
+ return;
+
+ /*
+ * At this point we know that SCX was preempted by a higher priority
+ * sched_class, so invoke the ->cpu_release() callback if we have not
+ * done so already. We only send the callback once between SCX being
+ * preempted, and it regaining control of the CPU.
+ *
+ * ->cpu_release() complements ->cpu_acquire(), which is emitted the
+ * next time that balance_scx() is invoked.
+ */
+ if (!rq->scx.cpu_released) {
+ if (SCX_HAS_OP(cpu_release)) {
+ struct scx_cpu_release_args args = {
+ .reason = preempt_reason_from_class(next_class),
+ .task = next,
+ };
+
+ SCX_CALL_OP(SCX_KF_CPU_RELEASE,
+ cpu_release, cpu_of(rq), &args);
+ }
+ rq->scx.cpu_released = true;
+ }
+}
+
+static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
+ struct task_struct *next)
+{
+ update_curr_scx(rq);
+
+ /* see dequeue_task_scx() on why we skip when !QUEUED */
+ if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED))
+ SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true);
+
+ if (p->scx.flags & SCX_TASK_QUEUED) {
+ set_task_runnable(rq, p);
+
+ /*
+ * If @p has slice left and is being put, @p is getting
+ * preempted by a higher priority scheduler class or core-sched
+ * forcing a different task. Leave it at the head of the local
+ * DSQ.
+ */
+ if (p->scx.slice && !scx_rq_bypassing(rq)) {
+ dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
+ return;
+ }
+
+ /*
+ * If @p is runnable but we're about to enter a lower
+ * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
+ * ops.enqueue() that @p is the only one available for this cpu,
+ * which should trigger an explicit follow-up scheduling event.
+ */
+ if (sched_class_above(&ext_sched_class, next->sched_class)) {
+ WARN_ON_ONCE(!static_branch_unlikely(&scx_ops_enq_last));
+ do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
+ } else {
+ do_enqueue_task(rq, p, 0, -1);
+ }
+ }
+
+ if (next && next->sched_class != &ext_sched_class)
+ switch_class(rq, next);
+}
+
+static struct task_struct *first_local_task(struct rq *rq)
+{
+ return list_first_entry_or_null(&rq->scx.local_dsq.list,
+ struct task_struct, scx.dsq_list.node);
+}
+
+static struct task_struct *pick_task_scx(struct rq *rq)
+{
+ struct task_struct *prev = rq->curr;
+ struct task_struct *p;
+
+ /*
+ * If balance_scx() is telling us to keep running @prev, replenish slice
+ * if necessary and keep running @prev. Otherwise, pop the first one
+ * from the local DSQ.
+ *
+ * WORKAROUND:
+ *
+ * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
+ * have gone through balance_scx(). Unfortunately, there currently is a
+ * bug where fair could say yes on balance() but no on pick_task(),
+ * which then ends up calling pick_task_scx() without preceding
+ * balance_scx().
+ *
+ * For now, ignore cases where $prev is not on SCX. This isn't great and
+ * can theoretically lead to stalls. However, for switch_all cases, this
+ * happens only while a BPF scheduler is being loaded or unloaded, and,
+ * for partial cases, fair will likely keep triggering this CPU.
+ *
+ * Once fair is fixed, restore WARN_ON_ONCE().
+ */
+ if ((rq->scx.flags & SCX_RQ_BAL_KEEP) &&
+ prev->sched_class == &ext_sched_class) {
+ p = prev;
+ if (!p->scx.slice)
+ p->scx.slice = SCX_SLICE_DFL;
+ } else {
+ p = first_local_task(rq);
+ if (!p)
+ return NULL;
+
+ if (unlikely(!p->scx.slice)) {
+ if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) {
+ printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in pick_next_task_scx()\n",
+ p->comm, p->pid);
+ scx_warned_zero_slice = true;
+ }
+ p->scx.slice = SCX_SLICE_DFL;
+ }
+ }
+
+ return p;
+}
+
+#ifdef CONFIG_SCHED_CORE
+/**
+ * scx_prio_less - Task ordering for core-sched
+ * @a: task A
+ * @b: task B
+ *
+ * Core-sched is implemented as an additional scheduling layer on top of the
+ * usual sched_class'es and needs to find out the expected task ordering. For
+ * SCX, core-sched calls this function to interrogate the task ordering.
+ *
+ * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
+ * to implement the default task ordering. The older the timestamp, the higher
+ * prority the task - the global FIFO ordering matching the default scheduling
+ * behavior.
+ *
+ * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
+ * implement FIFO ordering within each local DSQ. See pick_task_scx().
+ */
+bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
+ bool in_fi)
+{
+ /*
+ * The const qualifiers are dropped from task_struct pointers when
+ * calling ops.core_sched_before(). Accesses are controlled by the
+ * verifier.
+ */
+ if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a)))
+ return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before,
+ (struct task_struct *)a,
+ (struct task_struct *)b);
+ else
+ return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
+}
+#endif /* CONFIG_SCHED_CORE */
+
+#ifdef CONFIG_SMP
+
+static bool test_and_clear_cpu_idle(int cpu)
+{
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * SMT mask should be cleared whether we can claim @cpu or not. The SMT
+ * cluster is not wholly idle either way. This also prevents
+ * scx_pick_idle_cpu() from getting caught in an infinite loop.
+ */
+ if (sched_smt_active()) {
+ const struct cpumask *smt = cpu_smt_mask(cpu);
+
+ /*
+ * If offline, @cpu is not its own sibling and
+ * scx_pick_idle_cpu() can get caught in an infinite loop as
+ * @cpu is never cleared from idle_masks.smt. Ensure that @cpu
+ * is eventually cleared.
+ */
+ if (cpumask_intersects(smt, idle_masks.smt))
+ cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
+ else if (cpumask_test_cpu(cpu, idle_masks.smt))
+ __cpumask_clear_cpu(cpu, idle_masks.smt);
+ }
+#endif
+ return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu);
+}
+
+static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
+{
+ int cpu;
+
+retry:
+ if (sched_smt_active()) {
+ cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed);
+ if (cpu < nr_cpu_ids)
+ goto found;
+
+ if (flags & SCX_PICK_IDLE_CORE)
+ return -EBUSY;
+ }
+
+ cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed);
+ if (cpu >= nr_cpu_ids)
+ return -EBUSY;
+
+found:
+ if (test_and_clear_cpu_idle(cpu))
+ return cpu;
+ else
+ goto retry;
+}
+
+static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
+ u64 wake_flags, bool *found)
+{
+ s32 cpu;
+
+ *found = false;
+
+ if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ scx_ops_error("built-in idle tracking is disabled");
+ return prev_cpu;
+ }
+
+ /*
+ * If WAKE_SYNC, the waker's local DSQ is empty, and the system is
+ * under utilized, wake up @p to the local DSQ of the waker. Checking
+ * only for an empty local DSQ is insufficient as it could give the
+ * wakee an unfair advantage when the system is oversaturated.
+ * Checking only for the presence of idle CPUs is also insufficient as
+ * the local DSQ of the waker could have tasks piled up on it even if
+ * there is an idle core elsewhere on the system.
+ */
+ cpu = smp_processor_id();
+ if ((wake_flags & SCX_WAKE_SYNC) && p->nr_cpus_allowed > 1 &&
+ !cpumask_empty(idle_masks.cpu) && !(current->flags & PF_EXITING) &&
+ cpu_rq(cpu)->scx.local_dsq.nr == 0) {
+ if (cpumask_test_cpu(cpu, p->cpus_ptr))
+ goto cpu_found;
+ }
+
+ if (p->nr_cpus_allowed == 1) {
+ if (test_and_clear_cpu_idle(prev_cpu)) {
+ cpu = prev_cpu;
+ goto cpu_found;
+ } else {
+ return prev_cpu;
+ }
+ }
+
+ /*
+ * If CPU has SMT, any wholly idle CPU is likely a better pick than
+ * partially idle @prev_cpu.
+ */
+ if (sched_smt_active()) {
+ if (cpumask_test_cpu(prev_cpu, idle_masks.smt) &&
+ test_and_clear_cpu_idle(prev_cpu)) {
+ cpu = prev_cpu;
+ goto cpu_found;
+ }
+
+ cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE);
+ if (cpu >= 0)
+ goto cpu_found;
+ }
+
+ if (test_and_clear_cpu_idle(prev_cpu)) {
+ cpu = prev_cpu;
+ goto cpu_found;
+ }
+
+ cpu = scx_pick_idle_cpu(p->cpus_ptr, 0);
+ if (cpu >= 0)
+ goto cpu_found;
+
+ return prev_cpu;
+
+cpu_found:
+ *found = true;
+ return cpu;
+}
+
+static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
+{
+ /*
+ * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
+ * can be a good migration opportunity with low cache and memory
+ * footprint. Returning a CPU different than @prev_cpu triggers
+ * immediate rq migration. However, for SCX, as the current rq
+ * association doesn't dictate where the task is going to run, this
+ * doesn't fit well. If necessary, we can later add a dedicated method
+ * which can decide to preempt self to force it through the regular
+ * scheduling path.
+ */
+ if (unlikely(wake_flags & WF_EXEC))
+ return prev_cpu;
+
+ if (SCX_HAS_OP(select_cpu)) {
+ s32 cpu;
+ struct task_struct **ddsp_taskp;
+
+ ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
+ WARN_ON_ONCE(*ddsp_taskp);
+ *ddsp_taskp = p;
+
+ cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
+ select_cpu, p, prev_cpu, wake_flags);
+ *ddsp_taskp = NULL;
+ if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
+ return cpu;
+ else
+ return prev_cpu;
+ } else {
+ bool found;
+ s32 cpu;
+
+ cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found);
+ if (found) {
+ p->scx.slice = SCX_SLICE_DFL;
+ p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
+ }
+ return cpu;
+ }
+}
+
+static void task_woken_scx(struct rq *rq, struct task_struct *p)
+{
+ run_deferred(rq);
+}
+
+static void set_cpus_allowed_scx(struct task_struct *p,
+ struct affinity_context *ac)
+{
+ set_cpus_allowed_common(p, ac);
+
+ /*
+ * The effective cpumask is stored in @p->cpus_ptr which may temporarily
+ * differ from the configured one in @p->cpus_mask. Always tell the bpf
+ * scheduler the effective one.
+ *
+ * Fine-grained memory write control is enforced by BPF making the const
+ * designation pointless. Cast it away when calling the operation.
+ */
+ if (SCX_HAS_OP(set_cpumask))
+ SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
+ (struct cpumask *)p->cpus_ptr);
+}
+
+static void reset_idle_masks(void)
+{
+ /*
+ * Consider all online cpus idle. Should converge to the actual state
+ * quickly.
+ */
+ cpumask_copy(idle_masks.cpu, cpu_online_mask);
+ cpumask_copy(idle_masks.smt, cpu_online_mask);
+}
+
+void __scx_update_idle(struct rq *rq, bool idle)
+{
+ int cpu = cpu_of(rq);
+
+ if (SCX_HAS_OP(update_idle)) {
+ SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
+ if (!static_branch_unlikely(&scx_builtin_idle_enabled))
+ return;
+ }
+
+ if (idle)
+ cpumask_set_cpu(cpu, idle_masks.cpu);
+ else
+ cpumask_clear_cpu(cpu, idle_masks.cpu);
+
+#ifdef CONFIG_SCHED_SMT
+ if (sched_smt_active()) {
+ const struct cpumask *smt = cpu_smt_mask(cpu);
+
+ if (idle) {
+ /*
+ * idle_masks.smt handling is racy but that's fine as
+ * it's only for optimization and self-correcting.
+ */
+ for_each_cpu(cpu, smt) {
+ if (!cpumask_test_cpu(cpu, idle_masks.cpu))
+ return;
+ }
+ cpumask_or(idle_masks.smt, idle_masks.smt, smt);
+ } else {
+ cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
+ }
+ }
+#endif
+}
+
+static void handle_hotplug(struct rq *rq, bool online)
+{
+ int cpu = cpu_of(rq);
+
+ atomic_long_inc(&scx_hotplug_seq);
+
+ if (online && SCX_HAS_OP(cpu_online))
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
+ else if (!online && SCX_HAS_OP(cpu_offline))
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, cpu);
+ else
+ scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
+ "cpu %d going %s, exiting scheduler", cpu,
+ online ? "online" : "offline");
+}
+
+void scx_rq_activate(struct rq *rq)
+{
+ handle_hotplug(rq, true);
+}
+
+void scx_rq_deactivate(struct rq *rq)
+{
+ handle_hotplug(rq, false);
+}
+
+static void rq_online_scx(struct rq *rq)
+{
+ rq->scx.flags |= SCX_RQ_ONLINE;
+}
+
+static void rq_offline_scx(struct rq *rq)
+{
+ rq->scx.flags &= ~SCX_RQ_ONLINE;
+}
+
+#else /* CONFIG_SMP */
+
+static bool test_and_clear_cpu_idle(int cpu) { return false; }
+static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; }
+static void reset_idle_masks(void) {}
+
+#endif /* CONFIG_SMP */
+
+static bool check_rq_for_timeouts(struct rq *rq)
+{
+ struct task_struct *p;
+ struct rq_flags rf;
+ bool timed_out = false;
+
+ rq_lock_irqsave(rq, &rf);
+ list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
+ unsigned long last_runnable = p->scx.runnable_at;
+
+ if (unlikely(time_after(jiffies,
+ last_runnable + scx_watchdog_timeout))) {
+ u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
+
+ scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
+ "%s[%d] failed to run for %u.%03us",
+ p->comm, p->pid,
+ dur_ms / 1000, dur_ms % 1000);
+ timed_out = true;
+ break;
+ }
+ }
+ rq_unlock_irqrestore(rq, &rf);
+
+ return timed_out;
+}
+
+static void scx_watchdog_workfn(struct work_struct *work)
+{
+ int cpu;
+
+ WRITE_ONCE(scx_watchdog_timestamp, jiffies);
+
+ for_each_online_cpu(cpu) {
+ if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
+ break;
+
+ cond_resched();
+ }
+ queue_delayed_work(system_unbound_wq, to_delayed_work(work),
+ scx_watchdog_timeout / 2);
+}
+
+void scx_tick(struct rq *rq)
+{
+ unsigned long last_check;
+
+ if (!scx_enabled())
+ return;
+
+ last_check = READ_ONCE(scx_watchdog_timestamp);
+ if (unlikely(time_after(jiffies,
+ last_check + READ_ONCE(scx_watchdog_timeout)))) {
+ u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
+
+ scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
+ "watchdog failed to check in for %u.%03us",
+ dur_ms / 1000, dur_ms % 1000);
+ }
+
+ update_other_load_avgs(rq);
+}
+
+static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
+{
+ update_curr_scx(rq);
+
+ /*
+ * While disabling, always resched and refresh core-sched timestamp as
+ * we can't trust the slice management or ops.core_sched_before().
+ */
+ if (scx_rq_bypassing(rq)) {
+ curr->scx.slice = 0;
+ touch_core_sched(rq, curr);
+ } else if (SCX_HAS_OP(tick)) {
+ SCX_CALL_OP(SCX_KF_REST, tick, curr);
+ }
+
+ if (!curr->scx.slice)
+ resched_curr(rq);
+}
+
+#ifdef CONFIG_EXT_GROUP_SCHED
+static struct cgroup *tg_cgrp(struct task_group *tg)
+{
+ /*
+ * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
+ * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
+ * root cgroup.
+ */
+ if (tg && tg->css.cgroup)
+ return tg->css.cgroup;
+ else
+ return &cgrp_dfl_root.cgrp;
+}
+
+#define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg),
+
+#else /* CONFIG_EXT_GROUP_SCHED */
+
+#define SCX_INIT_TASK_ARGS_CGROUP(tg)
+
+#endif /* CONFIG_EXT_GROUP_SCHED */
+
+static enum scx_task_state scx_get_task_state(const struct task_struct *p)
+{
+ return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
+}
+
+static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
+{
+ enum scx_task_state prev_state = scx_get_task_state(p);
+ bool warn = false;
+
+ BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
+
+ switch (state) {
+ case SCX_TASK_NONE:
+ break;
+ case SCX_TASK_INIT:
+ warn = prev_state != SCX_TASK_NONE;
+ break;
+ case SCX_TASK_READY:
+ warn = prev_state == SCX_TASK_NONE;
+ break;
+ case SCX_TASK_ENABLED:
+ warn = prev_state != SCX_TASK_READY;
+ break;
+ default:
+ warn = true;
+ return;
+ }
+
+ WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
+ prev_state, state, p->comm, p->pid);
+
+ p->scx.flags &= ~SCX_TASK_STATE_MASK;
+ p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
+}
+
+static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork)
+{
+ int ret;
+
+ p->scx.disallow = false;
+
+ if (SCX_HAS_OP(init_task)) {
+ struct scx_init_task_args args = {
+ SCX_INIT_TASK_ARGS_CGROUP(tg)
+ .fork = fork,
+ };
+
+ ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, p, &args);
+ if (unlikely(ret)) {
+ ret = ops_sanitize_err("init_task", ret);
+ return ret;
+ }
+ }
+
+ scx_set_task_state(p, SCX_TASK_INIT);
+
+ if (p->scx.disallow) {
+ if (!fork) {
+ struct rq *rq;
+ struct rq_flags rf;
+
+ rq = task_rq_lock(p, &rf);
+
+ /*
+ * We're in the load path and @p->policy will be applied
+ * right after. Reverting @p->policy here and rejecting
+ * %SCHED_EXT transitions from scx_check_setscheduler()
+ * guarantees that if ops.init_task() sets @p->disallow,
+ * @p can never be in SCX.
+ */
+ if (p->policy == SCHED_EXT) {
+ p->policy = SCHED_NORMAL;
+ atomic_long_inc(&scx_nr_rejected);
+ }
+
+ task_rq_unlock(rq, p, &rf);
+ } else if (p->policy == SCHED_EXT) {
+ scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork",
+ p->comm, p->pid);
+ }
+ }
+
+ p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
+ return 0;
+}
+
+static void scx_ops_enable_task(struct task_struct *p)
+{
+ u32 weight;
+
+ lockdep_assert_rq_held(task_rq(p));
+
+ /*
+ * Set the weight before calling ops.enable() so that the scheduler
+ * doesn't see a stale value if they inspect the task struct.
+ */
+ if (task_has_idle_policy(p))
+ weight = WEIGHT_IDLEPRIO;
+ else
+ weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
+
+ p->scx.weight = sched_weight_to_cgroup(weight);
+
+ if (SCX_HAS_OP(enable))
+ SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
+ scx_set_task_state(p, SCX_TASK_ENABLED);
+
+ if (SCX_HAS_OP(set_weight))
+ SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
+}
+
+static void scx_ops_disable_task(struct task_struct *p)
+{
+ lockdep_assert_rq_held(task_rq(p));
+ WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
+
+ if (SCX_HAS_OP(disable))
+ SCX_CALL_OP(SCX_KF_REST, disable, p);
+ scx_set_task_state(p, SCX_TASK_READY);
+}
+
+static void scx_ops_exit_task(struct task_struct *p)
+{
+ struct scx_exit_task_args args = {
+ .cancelled = false,
+ };
+
+ lockdep_assert_rq_held(task_rq(p));
+
+ switch (scx_get_task_state(p)) {
+ case SCX_TASK_NONE:
+ return;
+ case SCX_TASK_INIT:
+ args.cancelled = true;
+ break;
+ case SCX_TASK_READY:
+ break;
+ case SCX_TASK_ENABLED:
+ scx_ops_disable_task(p);
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ return;
+ }
+
+ if (SCX_HAS_OP(exit_task))
+ SCX_CALL_OP(SCX_KF_REST, exit_task, p, &args);
+ scx_set_task_state(p, SCX_TASK_NONE);
+}
+
+void init_scx_entity(struct sched_ext_entity *scx)
+{
+ /*
+ * init_idle() calls this function again after fork sequence is
+ * complete. Don't touch ->tasks_node as it's already linked.
+ */
+ memset(scx, 0, offsetof(struct sched_ext_entity, tasks_node));
+
+ INIT_LIST_HEAD(&scx->dsq_list.node);
+ RB_CLEAR_NODE(&scx->dsq_priq);
+ scx->sticky_cpu = -1;
+ scx->holding_cpu = -1;
+ INIT_LIST_HEAD(&scx->runnable_node);
+ scx->runnable_at = jiffies;
+ scx->ddsp_dsq_id = SCX_DSQ_INVALID;
+ scx->slice = SCX_SLICE_DFL;
+}
+
+void scx_pre_fork(struct task_struct *p)
+{
+ /*
+ * BPF scheduler enable/disable paths want to be able to iterate and
+ * update all tasks which can become complex when racing forks. As
+ * enable/disable are very cold paths, let's use a percpu_rwsem to
+ * exclude forks.
+ */
+ percpu_down_read(&scx_fork_rwsem);
+}
+
+int scx_fork(struct task_struct *p)
+{
+ percpu_rwsem_assert_held(&scx_fork_rwsem);
+
+ if (scx_enabled())
+ return scx_ops_init_task(p, task_group(p), true);
+ else
+ return 0;
+}
+
+void scx_post_fork(struct task_struct *p)
+{
+ if (scx_enabled()) {
+ scx_set_task_state(p, SCX_TASK_READY);
+
+ /*
+ * Enable the task immediately if it's running on sched_ext.
+ * Otherwise, it'll be enabled in switching_to_scx() if and
+ * when it's ever configured to run with a SCHED_EXT policy.
+ */
+ if (p->sched_class == &ext_sched_class) {
+ struct rq_flags rf;
+ struct rq *rq;
+
+ rq = task_rq_lock(p, &rf);
+ scx_ops_enable_task(p);
+ task_rq_unlock(rq, p, &rf);
+ }
+ }
+
+ spin_lock_irq(&scx_tasks_lock);
+ list_add_tail(&p->scx.tasks_node, &scx_tasks);
+ spin_unlock_irq(&scx_tasks_lock);
+
+ percpu_up_read(&scx_fork_rwsem);
+}
+
+void scx_cancel_fork(struct task_struct *p)
+{
+ if (scx_enabled()) {
+ struct rq *rq;
+ struct rq_flags rf;
+
+ rq = task_rq_lock(p, &rf);
+ WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
+ scx_ops_exit_task(p);
+ task_rq_unlock(rq, p, &rf);
+ }
+
+ percpu_up_read(&scx_fork_rwsem);
+}
+
+void sched_ext_free(struct task_struct *p)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scx_tasks_lock, flags);
+ list_del_init(&p->scx.tasks_node);
+ spin_unlock_irqrestore(&scx_tasks_lock, flags);
+
+ /*
+ * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY ->
+ * ENABLED transitions can't race us. Disable ops for @p.
+ */
+ if (scx_get_task_state(p) != SCX_TASK_NONE) {
+ struct rq_flags rf;
+ struct rq *rq;
+
+ rq = task_rq_lock(p, &rf);
+ scx_ops_exit_task(p);
+ task_rq_unlock(rq, p, &rf);
+ }
+}
+
+static void reweight_task_scx(struct rq *rq, struct task_struct *p,
+ const struct load_weight *lw)
+{
+ lockdep_assert_rq_held(task_rq(p));
+
+ p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
+ if (SCX_HAS_OP(set_weight))
+ SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
+}
+
+static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
+{
+}
+
+static void switching_to_scx(struct rq *rq, struct task_struct *p)
+{
+ scx_ops_enable_task(p);
+
+ /*
+ * set_cpus_allowed_scx() is not called while @p is associated with a
+ * different scheduler class. Keep the BPF scheduler up-to-date.
+ */
+ if (SCX_HAS_OP(set_cpumask))
+ SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
+ (struct cpumask *)p->cpus_ptr);
+}
+
+static void switched_from_scx(struct rq *rq, struct task_struct *p)
+{
+ scx_ops_disable_task(p);
+}
+
+static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
+static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
+
+int scx_check_setscheduler(struct task_struct *p, int policy)
+{
+ lockdep_assert_rq_held(task_rq(p));
+
+ /* if disallow, reject transitioning into SCX */
+ if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
+ p->policy != policy && policy == SCHED_EXT)
+ return -EACCES;
+
+ return 0;
+}
+
+#ifdef CONFIG_NO_HZ_FULL
+bool scx_can_stop_tick(struct rq *rq)
+{
+ struct task_struct *p = rq->curr;
+
+ if (scx_rq_bypassing(rq))
+ return false;
+
+ if (p->sched_class != &ext_sched_class)
+ return true;
+
+ /*
+ * @rq can dispatch from different DSQs, so we can't tell whether it
+ * needs the tick or not by looking at nr_running. Allow stopping ticks
+ * iff the BPF scheduler indicated so. See set_next_task_scx().
+ */
+ return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
+}
+#endif
+
+#ifdef CONFIG_EXT_GROUP_SCHED
+
+DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
+static bool cgroup_warned_missing_weight;
+static bool cgroup_warned_missing_idle;
+
+static void scx_cgroup_warn_missing_weight(struct task_group *tg)
+{
+ if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
+ cgroup_warned_missing_weight)
+ return;
+
+ if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
+ return;
+
+ pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
+ scx_ops.name);
+ cgroup_warned_missing_weight = true;
+}
+
+static void scx_cgroup_warn_missing_idle(struct task_group *tg)
+{
+ if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
+ cgroup_warned_missing_idle)
+ return;
+
+ if (!tg->idle)
+ return;
+
+ pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
+ scx_ops.name);
+ cgroup_warned_missing_idle = true;
+}
+
+int scx_tg_online(struct task_group *tg)
+{
+ int ret = 0;
+
+ WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
+
+ percpu_down_read(&scx_cgroup_rwsem);
+
+ scx_cgroup_warn_missing_weight(tg);
+
+ if (SCX_HAS_OP(cgroup_init)) {
+ struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
+
+ ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
+ tg->css.cgroup, &args);
+ if (!ret)
+ tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
+ else
+ ret = ops_sanitize_err("cgroup_init", ret);
+ } else {
+ tg->scx_flags |= SCX_TG_ONLINE;
+ }
+
+ percpu_up_read(&scx_cgroup_rwsem);
+ return ret;
+}
+
+void scx_tg_offline(struct task_group *tg)
+{
+ WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
+
+ percpu_down_read(&scx_cgroup_rwsem);
+
+ if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED))
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, tg->css.cgroup);
+ tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
+
+ percpu_up_read(&scx_cgroup_rwsem);
+}
+
+int scx_cgroup_can_attach(struct cgroup_taskset *tset)
+{
+ struct cgroup_subsys_state *css;
+ struct task_struct *p;
+ int ret;
+
+ /* released in scx_finish/cancel_attach() */
+ percpu_down_read(&scx_cgroup_rwsem);
+
+ if (!scx_enabled())
+ return 0;
+
+ cgroup_taskset_for_each(p, css, tset) {
+ struct cgroup *from = tg_cgrp(task_group(p));
+ struct cgroup *to = tg_cgrp(css_tg(css));
+
+ WARN_ON_ONCE(p->scx.cgrp_moving_from);
+
+ /*
+ * sched_move_task() omits identity migrations. Let's match the
+ * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
+ * always match one-to-one.
+ */
+ if (from == to)
+ continue;
+
+ if (SCX_HAS_OP(cgroup_prep_move)) {
+ ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move,
+ p, from, css->cgroup);
+ if (ret)
+ goto err;
+ }
+
+ p->scx.cgrp_moving_from = from;
+ }
+
+ return 0;
+
+err:
+ cgroup_taskset_for_each(p, css, tset) {
+ if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
+ p->scx.cgrp_moving_from, css->cgroup);
+ p->scx.cgrp_moving_from = NULL;
+ }
+
+ percpu_up_read(&scx_cgroup_rwsem);
+ return ops_sanitize_err("cgroup_prep_move", ret);
+}
+
+void scx_move_task(struct task_struct *p)
+{
+ if (!scx_enabled())
+ return;
+
+ /*
+ * We're called from sched_move_task() which handles both cgroup and
+ * autogroup moves. Ignore the latter.
+ *
+ * Also ignore exiting tasks, because in the exit path tasks transition
+ * from the autogroup to the root group, so task_group_is_autogroup()
+ * alone isn't able to catch exiting autogroup tasks. This is safe for
+ * cgroup_move(), because cgroup migrations never happen for PF_EXITING
+ * tasks.
+ */
+ if (task_group_is_autogroup(task_group(p)) || (p->flags & PF_EXITING))
+ return;
+
+ /*
+ * @p must have ops.cgroup_prep_move() called on it and thus
+ * cgrp_moving_from set.
+ */
+ if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
+ SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p,
+ p->scx.cgrp_moving_from, tg_cgrp(task_group(p)));
+ p->scx.cgrp_moving_from = NULL;
+}
+
+void scx_cgroup_finish_attach(void)
+{
+ percpu_up_read(&scx_cgroup_rwsem);
+}
+
+void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
+{
+ struct cgroup_subsys_state *css;
+ struct task_struct *p;
+
+ if (!scx_enabled())
+ goto out_unlock;
+
+ cgroup_taskset_for_each(p, css, tset) {
+ if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
+ p->scx.cgrp_moving_from, css->cgroup);
+ p->scx.cgrp_moving_from = NULL;
+ }
+out_unlock:
+ percpu_up_read(&scx_cgroup_rwsem);
+}
+
+void scx_group_set_weight(struct task_group *tg, unsigned long weight)
+{
+ percpu_down_read(&scx_cgroup_rwsem);
+
+ if (tg->scx_weight != weight) {
+ if (SCX_HAS_OP(cgroup_set_weight))
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
+ tg_cgrp(tg), weight);
+ tg->scx_weight = weight;
+ }
+
+ percpu_up_read(&scx_cgroup_rwsem);
+}
+
+void scx_group_set_idle(struct task_group *tg, bool idle)
+{
+ percpu_down_read(&scx_cgroup_rwsem);
+ scx_cgroup_warn_missing_idle(tg);
+ percpu_up_read(&scx_cgroup_rwsem);
+}
+
+static void scx_cgroup_lock(void)
+{
+ percpu_down_write(&scx_cgroup_rwsem);
+}
+
+static void scx_cgroup_unlock(void)
+{
+ percpu_up_write(&scx_cgroup_rwsem);
+}
+
+#else /* CONFIG_EXT_GROUP_SCHED */
+
+static inline void scx_cgroup_lock(void) {}
+static inline void scx_cgroup_unlock(void) {}
+
+#endif /* CONFIG_EXT_GROUP_SCHED */
+
+/*
+ * Omitted operations:
+ *
+ * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
+ * isn't tied to the CPU at that point. Preemption is implemented by resetting
+ * the victim task's slice to 0 and triggering reschedule on the target CPU.
+ *
+ * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
+ *
+ * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
+ * their current sched_class. Call them directly from sched core instead.
+ */
+DEFINE_SCHED_CLASS(ext) = {
+ .enqueue_task = enqueue_task_scx,
+ .dequeue_task = dequeue_task_scx,
+ .yield_task = yield_task_scx,
+ .yield_to_task = yield_to_task_scx,
+
+ .wakeup_preempt = wakeup_preempt_scx,
+
+ .balance = balance_scx,
+ .pick_task = pick_task_scx,
+
+ .put_prev_task = put_prev_task_scx,
+ .set_next_task = set_next_task_scx,
+
+#ifdef CONFIG_SMP
+ .select_task_rq = select_task_rq_scx,
+ .task_woken = task_woken_scx,
+ .set_cpus_allowed = set_cpus_allowed_scx,
+
+ .rq_online = rq_online_scx,
+ .rq_offline = rq_offline_scx,
+#endif
+
+ .task_tick = task_tick_scx,
+
+ .switching_to = switching_to_scx,
+ .switched_from = switched_from_scx,
+ .switched_to = switched_to_scx,
+ .reweight_task = reweight_task_scx,
+ .prio_changed = prio_changed_scx,
+
+ .update_curr = update_curr_scx,
+
+#ifdef CONFIG_UCLAMP_TASK
+ .uclamp_enabled = 1,
+#endif
+};
+
+static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
+{
+ memset(dsq, 0, sizeof(*dsq));
+
+ raw_spin_lock_init(&dsq->lock);
+ INIT_LIST_HEAD(&dsq->list);
+ dsq->id = dsq_id;
+}
+
+static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
+{
+ struct scx_dispatch_q *dsq;
+ int ret;
+
+ if (dsq_id & SCX_DSQ_FLAG_BUILTIN)
+ return ERR_PTR(-EINVAL);
+
+ dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
+ if (!dsq)
+ return ERR_PTR(-ENOMEM);
+
+ init_dsq(dsq, dsq_id);
+
+ ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
+ dsq_hash_params);
+ if (ret) {
+ kfree(dsq);
+ return ERR_PTR(ret);
+ }
+ return dsq;
+}
+
+static void free_dsq_irq_workfn(struct irq_work *irq_work)
+{
+ struct llist_node *to_free = llist_del_all(&dsqs_to_free);
+ struct scx_dispatch_q *dsq, *tmp_dsq;
+
+ llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
+ kfree_rcu(dsq, rcu);
+}
+
+static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
+
+static void destroy_dsq(u64 dsq_id)
+{
+ struct scx_dispatch_q *dsq;
+ unsigned long flags;
+
+ rcu_read_lock();
+
+ dsq = find_user_dsq(dsq_id);
+ if (!dsq)
+ goto out_unlock_rcu;
+
+ raw_spin_lock_irqsave(&dsq->lock, flags);
+
+ if (dsq->nr) {
+ scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)",
+ dsq->id, dsq->nr);
+ goto out_unlock_dsq;
+ }
+
+ if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params))
+ goto out_unlock_dsq;
+
+ /*
+ * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
+ * queueing more tasks. As this function can be called from anywhere,
+ * freeing is bounced through an irq work to avoid nesting RCU
+ * operations inside scheduler locks.
+ */
+ dsq->id = SCX_DSQ_INVALID;
+ llist_add(&dsq->free_node, &dsqs_to_free);
+ irq_work_queue(&free_dsq_irq_work);
+
+out_unlock_dsq:
+ raw_spin_unlock_irqrestore(&dsq->lock, flags);
+out_unlock_rcu:
+ rcu_read_unlock();
+}
+
+#ifdef CONFIG_EXT_GROUP_SCHED
+static void scx_cgroup_exit(void)
+{
+ struct cgroup_subsys_state *css;
+
+ percpu_rwsem_assert_held(&scx_cgroup_rwsem);
+
+ /*
+ * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
+ * cgroups and exit all the inited ones, all online cgroups are exited.
+ */
+ rcu_read_lock();
+ css_for_each_descendant_post(css, &root_task_group.css) {
+ struct task_group *tg = css_tg(css);
+
+ if (!(tg->scx_flags & SCX_TG_INITED))
+ continue;
+ tg->scx_flags &= ~SCX_TG_INITED;
+
+ if (!scx_ops.cgroup_exit)
+ continue;
+
+ if (WARN_ON_ONCE(!css_tryget(css)))
+ continue;
+ rcu_read_unlock();
+
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup);
+
+ rcu_read_lock();
+ css_put(css);
+ }
+ rcu_read_unlock();
+}
+
+static int scx_cgroup_init(void)
+{
+ struct cgroup_subsys_state *css;
+ int ret;
+
+ percpu_rwsem_assert_held(&scx_cgroup_rwsem);
+
+ cgroup_warned_missing_weight = false;
+ cgroup_warned_missing_idle = false;
+
+ /*
+ * scx_tg_on/offline() are excluded thorugh scx_cgroup_rwsem. If we walk
+ * cgroups and init, all online cgroups are initialized.
+ */
+ rcu_read_lock();
+ css_for_each_descendant_pre(css, &root_task_group.css) {
+ struct task_group *tg = css_tg(css);
+ struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
+
+ scx_cgroup_warn_missing_weight(tg);
+ scx_cgroup_warn_missing_idle(tg);
+
+ if ((tg->scx_flags &
+ (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
+ continue;
+
+ if (!scx_ops.cgroup_init) {
+ tg->scx_flags |= SCX_TG_INITED;
+ continue;
+ }
+
+ if (WARN_ON_ONCE(!css_tryget(css)))
+ continue;
+ rcu_read_unlock();
+
+ ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
+ css->cgroup, &args);
+ if (ret) {
+ css_put(css);
+ return ret;
+ }
+ tg->scx_flags |= SCX_TG_INITED;
+
+ rcu_read_lock();
+ css_put(css);
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+#else
+static void scx_cgroup_exit(void) {}
+static int scx_cgroup_init(void) { return 0; }
+#endif
+
+
+/********************************************************************************
+ * Sysfs interface and ops enable/disable.
+ */
+
+#define SCX_ATTR(_name) \
+ static struct kobj_attribute scx_attr_##_name = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = scx_attr_##_name##_show, \
+ }
+
+static ssize_t scx_attr_state_show(struct kobject *kobj,
+ struct kobj_attribute *ka, char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ scx_ops_enable_state_str[scx_ops_enable_state()]);
+}
+SCX_ATTR(state);
+
+static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
+ struct kobj_attribute *ka, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
+}
+SCX_ATTR(switch_all);
+
+static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
+ struct kobj_attribute *ka, char *buf)
+{
+ return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
+}
+SCX_ATTR(nr_rejected);
+
+static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
+ struct kobj_attribute *ka, char *buf)
+{
+ return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
+}
+SCX_ATTR(hotplug_seq);
+
+static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
+ struct kobj_attribute *ka, char *buf)
+{
+ return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
+}
+SCX_ATTR(enable_seq);
+
+static struct attribute *scx_global_attrs[] = {
+ &scx_attr_state.attr,
+ &scx_attr_switch_all.attr,
+ &scx_attr_nr_rejected.attr,
+ &scx_attr_hotplug_seq.attr,
+ &scx_attr_enable_seq.attr,
+ NULL,
+};
+
+static const struct attribute_group scx_global_attr_group = {
+ .attrs = scx_global_attrs,
+};
+
+static void scx_kobj_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static ssize_t scx_attr_ops_show(struct kobject *kobj,
+ struct kobj_attribute *ka, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", scx_ops.name);
+}
+SCX_ATTR(ops);
+
+static struct attribute *scx_sched_attrs[] = {
+ &scx_attr_ops.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(scx_sched);
+
+static const struct kobj_type scx_ktype = {
+ .release = scx_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = scx_sched_groups,
+};
+
+static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
+{
+ return add_uevent_var(env, "SCXOPS=%s", scx_ops.name);
+}
+
+static const struct kset_uevent_ops scx_uevent_ops = {
+ .uevent = scx_uevent,
+};
+
+/*
+ * Used by sched_fork() and __setscheduler_prio() to pick the matching
+ * sched_class. dl/rt are already handled.
+ */
+bool task_should_scx(struct task_struct *p)
+{
+ if (!scx_enabled() ||
+ unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
+ return false;
+ if (READ_ONCE(scx_switching_all))
+ return true;
+ return p->policy == SCHED_EXT;
+}
+
+/**
+ * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress
+ *
+ * Bypassing guarantees that all runnable tasks make forward progress without
+ * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
+ * be held by tasks that the BPF scheduler is forgetting to run, which
+ * unfortunately also excludes toggling the static branches.
+ *
+ * Let's work around by overriding a couple ops and modifying behaviors based on
+ * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
+ * to force global FIFO scheduling.
+ *
+ * a. ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
+ * %SCX_OPS_ENQ_LAST is also ignored.
+ *
+ * b. ops.dispatch() is ignored.
+ *
+ * c. balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
+ * can't be trusted. Whenever a tick triggers, the running task is rotated to
+ * the tail of the queue with core_sched_at touched.
+ *
+ * d. pick_next_task() suppresses zero slice warning.
+ *
+ * e. scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
+ * operations.
+ *
+ * f. scx_prio_less() reverts to the default core_sched_at order.
+ */
+static void scx_ops_bypass(bool bypass)
+{
+ int depth, cpu;
+
+ if (bypass) {
+ depth = atomic_inc_return(&scx_ops_bypass_depth);
+ WARN_ON_ONCE(depth <= 0);
+ if (depth != 1)
+ return;
+ } else {
+ depth = atomic_dec_return(&scx_ops_bypass_depth);
+ WARN_ON_ONCE(depth < 0);
+ if (depth != 0)
+ return;
+ }
+
+ /*
+ * No task property is changing. We just need to make sure all currently
+ * queued tasks are re-queued according to the new scx_rq_bypassing()
+ * state. As an optimization, walk each rq's runnable_list instead of
+ * the scx_tasks list.
+ *
+ * This function can't trust the scheduler and thus can't use
+ * cpus_read_lock(). Walk all possible CPUs instead of online.
+ */
+ for_each_possible_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
+ struct task_struct *p, *n;
+
+ rq_lock_irqsave(rq, &rf);
+
+ if (bypass) {
+ WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
+ rq->scx.flags |= SCX_RQ_BYPASSING;
+ } else {
+ WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
+ rq->scx.flags &= ~SCX_RQ_BYPASSING;
+ }
+
+ /*
+ * We need to guarantee that no tasks are on the BPF scheduler
+ * while bypassing. Either we see enabled or the enable path
+ * sees scx_rq_bypassing() before moving tasks to SCX.
+ */
+ if (!scx_enabled()) {
+ rq_unlock_irqrestore(rq, &rf);
+ continue;
+ }
+
+ /*
+ * The use of list_for_each_entry_safe_reverse() is required
+ * because each task is going to be removed from and added back
+ * to the runnable_list during iteration. Because they're added
+ * to the tail of the list, safe reverse iteration can still
+ * visit all nodes.
+ */
+ list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
+ scx.runnable_node) {
+ struct sched_enq_and_set_ctx ctx;
+
+ /* cycling deq/enq is enough, see the function comment */
+ sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
+ sched_enq_and_set_task(&ctx);
+ }
+
+ rq_unlock_irqrestore(rq, &rf);
+
+ /* kick to restore ticks */
+ resched_cpu(cpu);
+ }
+}
+
+static void free_exit_info(struct scx_exit_info *ei)
+{
+ kfree(ei->dump);
+ kfree(ei->msg);
+ kfree(ei->bt);
+ kfree(ei);
+}
+
+static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
+{
+ struct scx_exit_info *ei;
+
+ ei = kzalloc(sizeof(*ei), GFP_KERNEL);
+ if (!ei)
+ return NULL;
+
+ ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
+ ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
+ ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
+
+ if (!ei->bt || !ei->msg || !ei->dump) {
+ free_exit_info(ei);
+ return NULL;
+ }
+
+ return ei;
+}
+
+static const char *scx_exit_reason(enum scx_exit_kind kind)
+{
+ switch (kind) {
+ case SCX_EXIT_UNREG:
+ return "unregistered from user space";
+ case SCX_EXIT_UNREG_BPF:
+ return "unregistered from BPF";
+ case SCX_EXIT_UNREG_KERN:
+ return "unregistered from the main kernel";
+ case SCX_EXIT_SYSRQ:
+ return "disabled by sysrq-S";
+ case SCX_EXIT_ERROR:
+ return "runtime error";
+ case SCX_EXIT_ERROR_BPF:
+ return "scx_bpf_error";
+ case SCX_EXIT_ERROR_STALL:
+ return "runnable task stall";
+ default:
+ return "<UNKNOWN>";
+ }
+}
+
+static void scx_ops_disable_workfn(struct kthread_work *work)
+{
+ struct scx_exit_info *ei = scx_exit_info;
+ struct scx_task_iter sti;
+ struct task_struct *p;
+ struct rhashtable_iter rht_iter;
+ struct scx_dispatch_q *dsq;
+ int i, kind;
+
+ kind = atomic_read(&scx_exit_kind);
+ while (true) {
+ /*
+ * NONE indicates that a new scx_ops has been registered since
+ * disable was scheduled - don't kill the new ops. DONE
+ * indicates that the ops has already been disabled.
+ */
+ if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)
+ return;
+ if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE))
+ break;
+ }
+ ei->kind = kind;
+ ei->reason = scx_exit_reason(ei->kind);
+
+ /* guarantee forward progress by bypassing scx_ops */
+ scx_ops_bypass(true);
+
+ switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) {
+ case SCX_OPS_DISABLING:
+ WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
+ break;
+ case SCX_OPS_DISABLED:
+ pr_warn("sched_ext: ops error detected without ops (%s)\n",
+ scx_exit_info->msg);
+ WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
+ SCX_OPS_DISABLING);
+ goto done;
+ default:
+ break;
+ }
+
+ /*
+ * Here, every runnable task is guaranteed to make forward progress and
+ * we can safely use blocking synchronization constructs. Actually
+ * disable ops.
+ */
+ mutex_lock(&scx_ops_enable_mutex);
+
+ static_branch_disable(&__scx_switched_all);
+ WRITE_ONCE(scx_switching_all, false);
+
+ /*
+ * Avoid racing against fork and cgroup changes. See scx_ops_enable()
+ * for explanation on the locking order.
+ */
+ percpu_down_write(&scx_fork_rwsem);
+ cpus_read_lock();
+ scx_cgroup_lock();
+
+ spin_lock_irq(&scx_tasks_lock);
+ scx_task_iter_init(&sti);
+ /*
+ * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
+ * must be switched out and exited synchronously.
+ */
+ while ((p = scx_task_iter_next_locked(&sti))) {
+ const struct sched_class *old_class = p->sched_class;
+ struct sched_enq_and_set_ctx ctx;
+
+ sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
+
+ p->scx.slice = min_t(u64, p->scx.slice, SCX_SLICE_DFL);
+ __setscheduler_prio(p, p->prio);
+ check_class_changing(task_rq(p), p, old_class);
+
+ sched_enq_and_set_task(&ctx);
+
+ check_class_changed(task_rq(p), p, old_class, p->prio);
+ scx_ops_exit_task(p);
+ }
+ scx_task_iter_exit(&sti);
+ spin_unlock_irq(&scx_tasks_lock);
+
+ /* no task is on scx, turn off all the switches and flush in-progress calls */
+ static_branch_disable_cpuslocked(&__scx_ops_enabled);
+ for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
+ static_branch_disable_cpuslocked(&scx_has_op[i]);
+ static_branch_disable_cpuslocked(&scx_ops_enq_last);
+ static_branch_disable_cpuslocked(&scx_ops_enq_exiting);
+ static_branch_disable_cpuslocked(&scx_ops_cpu_preempt);
+ static_branch_disable_cpuslocked(&scx_builtin_idle_enabled);
+ synchronize_rcu();
+
+ scx_cgroup_exit();
+
+ scx_cgroup_unlock();
+ cpus_read_unlock();
+ percpu_up_write(&scx_fork_rwsem);
+
+ if (ei->kind >= SCX_EXIT_ERROR) {
+ pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
+ scx_ops.name, ei->reason);
+
+ if (ei->msg[0] != '\0')
+ pr_err("sched_ext: %s: %s\n", scx_ops.name, ei->msg);
+#ifdef CONFIG_STACKTRACE
+ stack_trace_print(ei->bt, ei->bt_len, 2);
+#endif
+ } else {
+ pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
+ scx_ops.name, ei->reason);
+ }
+
+ if (scx_ops.exit)
+ SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei);
+
+ cancel_delayed_work_sync(&scx_watchdog_work);
+
+ /*
+ * Delete the kobject from the hierarchy eagerly in addition to just
+ * dropping a reference. Otherwise, if the object is deleted
+ * asynchronously, sysfs could observe an object of the same name still
+ * in the hierarchy when another scheduler is loaded.
+ */
+ kobject_del(scx_root_kobj);
+ kobject_put(scx_root_kobj);
+ scx_root_kobj = NULL;
+
+ memset(&scx_ops, 0, sizeof(scx_ops));
+
+ rhashtable_walk_enter(&dsq_hash, &rht_iter);
+ do {
+ rhashtable_walk_start(&rht_iter);
+
+ while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
+ destroy_dsq(dsq->id);
+
+ rhashtable_walk_stop(&rht_iter);
+ } while (dsq == ERR_PTR(-EAGAIN));
+ rhashtable_walk_exit(&rht_iter);
+
+ free_percpu(scx_dsp_ctx);
+ scx_dsp_ctx = NULL;
+ scx_dsp_max_batch = 0;
+
+ free_exit_info(scx_exit_info);
+ scx_exit_info = NULL;
+
+ mutex_unlock(&scx_ops_enable_mutex);
+
+ WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
+ SCX_OPS_DISABLING);
+done:
+ scx_ops_bypass(false);
+}
+
+static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn);
+
+static void schedule_scx_ops_disable_work(void)
+{
+ struct kthread_worker *helper = READ_ONCE(scx_ops_helper);
+
+ /*
+ * We may be called spuriously before the first bpf_sched_ext_reg(). If
+ * scx_ops_helper isn't set up yet, there's nothing to do.
+ */
+ if (helper)
+ kthread_queue_work(helper, &scx_ops_disable_work);
+}
+
+static void scx_ops_disable(enum scx_exit_kind kind)
+{
+ int none = SCX_EXIT_NONE;
+
+ if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
+ kind = SCX_EXIT_ERROR;
+
+ atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
+
+ schedule_scx_ops_disable_work();
+}
+
+static void dump_newline(struct seq_buf *s)
+{
+ trace_sched_ext_dump("");
+
+ /* @s may be zero sized and seq_buf triggers WARN if so */
+ if (s->size)
+ seq_buf_putc(s, '\n');
+}
+
+static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
+{
+ va_list args;
+
+#ifdef CONFIG_TRACEPOINTS
+ if (trace_sched_ext_dump_enabled()) {
+ /* protected by scx_dump_state()::dump_lock */
+ static char line_buf[SCX_EXIT_MSG_LEN];
+
+ va_start(args, fmt);
+ vscnprintf(line_buf, sizeof(line_buf), fmt, args);
+ va_end(args);
+
+ trace_sched_ext_dump(line_buf);
+ }
+#endif
+ /* @s may be zero sized and seq_buf triggers WARN if so */
+ if (s->size) {
+ va_start(args, fmt);
+ seq_buf_vprintf(s, fmt, args);
+ va_end(args);
+
+ seq_buf_putc(s, '\n');
+ }
+}
+
+static void dump_stack_trace(struct seq_buf *s, const char *prefix,
+ const unsigned long *bt, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ dump_line(s, "%s%pS", prefix, (void *)bt[i]);
+}
+
+static void ops_dump_init(struct seq_buf *s, const char *prefix)
+{
+ struct scx_dump_data *dd = &scx_dump_data;
+
+ lockdep_assert_irqs_disabled();
+
+ dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */
+ dd->first = true;
+ dd->cursor = 0;
+ dd->s = s;
+ dd->prefix = prefix;
+}
+
+static void ops_dump_flush(void)
+{
+ struct scx_dump_data *dd = &scx_dump_data;
+ char *line = dd->buf.line;
+
+ if (!dd->cursor)
+ return;
+
+ /*
+ * There's something to flush and this is the first line. Insert a blank
+ * line to distinguish ops dump.
+ */
+ if (dd->first) {
+ dump_newline(dd->s);
+ dd->first = false;
+ }
+
+ /*
+ * There may be multiple lines in $line. Scan and emit each line
+ * separately.
+ */
+ while (true) {
+ char *end = line;
+ char c;
+
+ while (*end != '\n' && *end != '\0')
+ end++;
+
+ /*
+ * If $line overflowed, it may not have newline at the end.
+ * Always emit with a newline.
+ */
+ c = *end;
+ *end = '\0';
+ dump_line(dd->s, "%s%s", dd->prefix, line);
+ if (c == '\0')
+ break;
+
+ /* move to the next line */
+ end++;
+ if (*end == '\0')
+ break;
+ line = end;
+ }
+
+ dd->cursor = 0;
+}
+
+static void ops_dump_exit(void)
+{
+ ops_dump_flush();
+ scx_dump_data.cpu = -1;
+}
+
+static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
+ struct task_struct *p, char marker)
+{
+ static unsigned long bt[SCX_EXIT_BT_LEN];
+ char dsq_id_buf[19] = "(n/a)";
+ unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
+ unsigned int bt_len = 0;
+
+ if (p->scx.dsq)
+ scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
+ (unsigned long long)p->scx.dsq->id);
+
+ dump_newline(s);
+ dump_line(s, " %c%c %s[%d] %+ldms",
+ marker, task_state_to_char(p), p->comm, p->pid,
+ jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
+ dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
+ scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
+ p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
+ ops_state >> SCX_OPSS_QSEQ_SHIFT);
+ dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu",
+ p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf,
+ p->scx.dsq_vtime);
+ dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
+
+ if (SCX_HAS_OP(dump_task)) {
+ ops_dump_init(s, " ");
+ SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p);
+ ops_dump_exit();
+ }
+
+#ifdef CONFIG_STACKTRACE
+ bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
+#endif
+ if (bt_len) {
+ dump_newline(s);
+ dump_stack_trace(s, " ", bt, bt_len);
+ }
+}
+
+static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
+{
+ static DEFINE_SPINLOCK(dump_lock);
+ static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
+ struct scx_dump_ctx dctx = {
+ .kind = ei->kind,
+ .exit_code = ei->exit_code,
+ .reason = ei->reason,
+ .at_ns = ktime_get_ns(),
+ .at_jiffies = jiffies,
+ };
+ struct seq_buf s;
+ unsigned long flags;
+ char *buf;
+ int cpu;
+
+ spin_lock_irqsave(&dump_lock, flags);
+
+ seq_buf_init(&s, ei->dump, dump_len);
+
+ if (ei->kind == SCX_EXIT_NONE) {
+ dump_line(&s, "Debug dump triggered by %s", ei->reason);
+ } else {
+ dump_line(&s, "%s[%d] triggered exit kind %d:",
+ current->comm, current->pid, ei->kind);
+ dump_line(&s, " %s (%s)", ei->reason, ei->msg);
+ dump_newline(&s);
+ dump_line(&s, "Backtrace:");
+ dump_stack_trace(&s, " ", ei->bt, ei->bt_len);
+ }
+
+ if (SCX_HAS_OP(dump)) {
+ ops_dump_init(&s, "");
+ SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx);
+ ops_dump_exit();
+ }
+
+ dump_newline(&s);
+ dump_line(&s, "CPU states");
+ dump_line(&s, "----------");
+
+ for_each_possible_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
+ struct task_struct *p;
+ struct seq_buf ns;
+ size_t avail, used;
+ bool idle;
+
+ rq_lock(rq, &rf);
+
+ idle = list_empty(&rq->scx.runnable_list) &&
+ rq->curr->sched_class == &idle_sched_class;
+
+ if (idle && !SCX_HAS_OP(dump_cpu))
+ goto next;
+
+ /*
+ * We don't yet know whether ops.dump_cpu() will produce output
+ * and we may want to skip the default CPU dump if it doesn't.
+ * Use a nested seq_buf to generate the standard dump so that we
+ * can decide whether to commit later.
+ */
+ avail = seq_buf_get_buf(&s, &buf);
+ seq_buf_init(&ns, buf, avail);
+
+ dump_newline(&ns);
+ dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
+ cpu, rq->scx.nr_running, rq->scx.flags,
+ rq->scx.cpu_released, rq->scx.ops_qseq,
+ rq->scx.pnt_seq);
+ dump_line(&ns, " curr=%s[%d] class=%ps",
+ rq->curr->comm, rq->curr->pid,
+ rq->curr->sched_class);
+ if (!cpumask_empty(rq->scx.cpus_to_kick))
+ dump_line(&ns, " cpus_to_kick : %*pb",
+ cpumask_pr_args(rq->scx.cpus_to_kick));
+ if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
+ dump_line(&ns, " idle_to_kick : %*pb",
+ cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
+ if (!cpumask_empty(rq->scx.cpus_to_preempt))
+ dump_line(&ns, " cpus_to_preempt: %*pb",
+ cpumask_pr_args(rq->scx.cpus_to_preempt));
+ if (!cpumask_empty(rq->scx.cpus_to_wait))
+ dump_line(&ns, " cpus_to_wait : %*pb",
+ cpumask_pr_args(rq->scx.cpus_to_wait));
+
+ used = seq_buf_used(&ns);
+ if (SCX_HAS_OP(dump_cpu)) {
+ ops_dump_init(&ns, " ");
+ SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle);
+ ops_dump_exit();
+ }
+
+ /*
+ * If idle && nothing generated by ops.dump_cpu(), there's
+ * nothing interesting. Skip.
+ */
+ if (idle && used == seq_buf_used(&ns))
+ goto next;
+
+ /*
+ * $s may already have overflowed when $ns was created. If so,
+ * calling commit on it will trigger BUG.
+ */
+ if (avail) {
+ seq_buf_commit(&s, seq_buf_used(&ns));
+ if (seq_buf_has_overflowed(&ns))
+ seq_buf_set_overflow(&s);
+ }
+
+ if (rq->curr->sched_class == &ext_sched_class)
+ scx_dump_task(&s, &dctx, rq->curr, '*');
+
+ list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
+ scx_dump_task(&s, &dctx, p, ' ');
+ next:
+ rq_unlock(rq, &rf);
+ }
+
+ if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
+ memcpy(ei->dump + dump_len - sizeof(trunc_marker),
+ trunc_marker, sizeof(trunc_marker));
+
+ spin_unlock_irqrestore(&dump_lock, flags);
+}
+
+static void scx_ops_error_irq_workfn(struct irq_work *irq_work)
+{
+ struct scx_exit_info *ei = scx_exit_info;
+
+ if (ei->kind >= SCX_EXIT_ERROR)
+ scx_dump_state(ei, scx_ops.exit_dump_len);
+
+ schedule_scx_ops_disable_work();
+}
+
+static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn);
+
+static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
+ s64 exit_code,
+ const char *fmt, ...)
+{
+ struct scx_exit_info *ei = scx_exit_info;
+ int none = SCX_EXIT_NONE;
+ va_list args;
+
+ if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
+ return;
+
+ ei->exit_code = exit_code;
+#ifdef CONFIG_STACKTRACE
+ if (kind >= SCX_EXIT_ERROR)
+ ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
+#endif
+ va_start(args, fmt);
+ vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
+ va_end(args);
+
+ /*
+ * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
+ * in scx_ops_disable_workfn().
+ */
+ ei->kind = kind;
+ ei->reason = scx_exit_reason(ei->kind);
+
+ irq_work_queue(&scx_ops_error_irq_work);
+}
+
+static struct kthread_worker *scx_create_rt_helper(const char *name)
+{
+ struct kthread_worker *helper;
+
+ helper = kthread_create_worker(0, name);
+ if (helper)
+ sched_set_fifo(helper->task);
+ return helper;
+}
+
+static void check_hotplug_seq(const struct sched_ext_ops *ops)
+{
+ unsigned long long global_hotplug_seq;
+
+ /*
+ * If a hotplug event has occurred between when a scheduler was
+ * initialized, and when we were able to attach, exit and notify user
+ * space about it.
+ */
+ if (ops->hotplug_seq) {
+ global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
+ if (ops->hotplug_seq != global_hotplug_seq) {
+ scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
+ "expected hotplug seq %llu did not match actual %llu",
+ ops->hotplug_seq, global_hotplug_seq);
+ }
+ }
+}
+
+static int validate_ops(const struct sched_ext_ops *ops)
+{
+ /*
+ * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
+ * ops.enqueue() callback isn't implemented.
+ */
+ if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
+ scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
+{
+ struct scx_task_iter sti;
+ struct task_struct *p;
+ unsigned long timeout;
+ int i, cpu, ret;
+
+ if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
+ cpu_possible_mask)) {
+ pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation");
+ return -EINVAL;
+ }
+
+ mutex_lock(&scx_ops_enable_mutex);
+
+ if (!scx_ops_helper) {
+ WRITE_ONCE(scx_ops_helper,
+ scx_create_rt_helper("sched_ext_ops_helper"));
+ if (!scx_ops_helper) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+ }
+
+ if (scx_ops_enable_state() != SCX_OPS_DISABLED) {
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL);
+ if (!scx_root_kobj) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ scx_root_kobj->kset = scx_kset;
+ ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root");
+ if (ret < 0)
+ goto err;
+
+ scx_exit_info = alloc_exit_info(ops->exit_dump_len);
+ if (!scx_exit_info) {
+ ret = -ENOMEM;
+ goto err_del;
+ }
+
+ /*
+ * Set scx_ops, transition to PREPPING and clear exit info to arm the
+ * disable path. Failure triggers full disabling from here on.
+ */
+ scx_ops = *ops;
+
+ WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_PREPPING) !=
+ SCX_OPS_DISABLED);
+
+ atomic_set(&scx_exit_kind, SCX_EXIT_NONE);
+ scx_warned_zero_slice = false;
+
+ atomic_long_set(&scx_nr_rejected, 0);
+
+ for_each_possible_cpu(cpu)
+ cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
+
+ /*
+ * Keep CPUs stable during enable so that the BPF scheduler can track
+ * online CPUs by watching ->on/offline_cpu() after ->init().
+ */
+ cpus_read_lock();
+
+ if (scx_ops.init) {
+ ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
+ if (ret) {
+ ret = ops_sanitize_err("init", ret);
+ goto err_disable_unlock_cpus;
+ }
+ }
+
+ for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
+ if (((void (**)(void))ops)[i])
+ static_branch_enable_cpuslocked(&scx_has_op[i]);
+
+ cpus_read_unlock();
+
+ ret = validate_ops(ops);
+ if (ret)
+ goto err_disable;
+
+ WARN_ON_ONCE(scx_dsp_ctx);
+ scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
+ scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
+ scx_dsp_max_batch),
+ __alignof__(struct scx_dsp_ctx));
+ if (!scx_dsp_ctx) {
+ ret = -ENOMEM;
+ goto err_disable;
+ }
+
+ if (ops->timeout_ms)
+ timeout = msecs_to_jiffies(ops->timeout_ms);
+ else
+ timeout = SCX_WATCHDOG_MAX_TIMEOUT;
+
+ WRITE_ONCE(scx_watchdog_timeout, timeout);
+ WRITE_ONCE(scx_watchdog_timestamp, jiffies);
+ queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
+ scx_watchdog_timeout / 2);
+
+ /*
+ * Lock out forks, cgroup on/offlining and moves before opening the
+ * floodgate so that they don't wander into the operations prematurely.
+ *
+ * We don't need to keep the CPUs stable but static_branch_*() requires
+ * cpus_read_lock() and scx_cgroup_rwsem must nest inside
+ * cpu_hotplug_lock because of the following dependency chain:
+ *
+ * cpu_hotplug_lock --> cgroup_threadgroup_rwsem --> scx_cgroup_rwsem
+ *
+ * So, we need to do cpus_read_lock() before scx_cgroup_lock() and use
+ * static_branch_*_cpuslocked().
+ *
+ * Note that cpu_hotplug_lock must nest inside scx_fork_rwsem due to the
+ * following dependency chain:
+ *
+ * scx_fork_rwsem --> pernet_ops_rwsem --> cpu_hotplug_lock
+ */
+ percpu_down_write(&scx_fork_rwsem);
+ cpus_read_lock();
+ scx_cgroup_lock();
+
+ check_hotplug_seq(ops);
+
+ for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
+ if (((void (**)(void))ops)[i])
+ static_branch_enable_cpuslocked(&scx_has_op[i]);
+
+ if (ops->flags & SCX_OPS_ENQ_LAST)
+ static_branch_enable_cpuslocked(&scx_ops_enq_last);
+
+ if (ops->flags & SCX_OPS_ENQ_EXITING)
+ static_branch_enable_cpuslocked(&scx_ops_enq_exiting);
+ if (scx_ops.cpu_acquire || scx_ops.cpu_release)
+ static_branch_enable_cpuslocked(&scx_ops_cpu_preempt);
+
+ if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
+ reset_idle_masks();
+ static_branch_enable_cpuslocked(&scx_builtin_idle_enabled);
+ } else {
+ static_branch_disable_cpuslocked(&scx_builtin_idle_enabled);
+ }
+
+ /*
+ * All cgroups should be initialized before letting in tasks. cgroup
+ * on/offlining and task migrations are already locked out.
+ */
+ ret = scx_cgroup_init();
+ if (ret)
+ goto err_disable_unlock_all;
+
+ static_branch_enable_cpuslocked(&__scx_ops_enabled);
+
+ /*
+ * Enable ops for every task. Fork is excluded by scx_fork_rwsem
+ * preventing new tasks from being added. No need to exclude tasks
+ * leaving as sched_ext_free() can handle both prepped and enabled
+ * tasks. Prep all tasks first and then enable them with preemption
+ * disabled.
+ */
+ spin_lock_irq(&scx_tasks_lock);
+
+ scx_task_iter_init(&sti);
+ while ((p = scx_task_iter_next_locked(&sti))) {
+ /*
+ * @p may already be dead, have lost all its usages counts and
+ * be waiting for RCU grace period before being freed. @p can't
+ * be initialized for SCX in such cases and should be ignored.
+ */
+ if (!tryget_task_struct(p))
+ continue;
+
+ scx_task_iter_rq_unlock(&sti);
+ spin_unlock_irq(&scx_tasks_lock);
+
+ ret = scx_ops_init_task(p, task_group(p), false);
+ if (ret) {
+ put_task_struct(p);
+ spin_lock_irq(&scx_tasks_lock);
+ scx_task_iter_exit(&sti);
+ spin_unlock_irq(&scx_tasks_lock);
+ pr_err("sched_ext: ops.init_task() failed (%d) for %s[%d] while loading\n",
+ ret, p->comm, p->pid);
+ goto err_disable_unlock_all;
+ }
+
+ put_task_struct(p);
+ spin_lock_irq(&scx_tasks_lock);
+ }
+ scx_task_iter_exit(&sti);
+
+ /*
+ * All tasks are prepped but are still ops-disabled. Ensure that
+ * %current can't be scheduled out and switch everyone.
+ * preempt_disable() is necessary because we can't guarantee that
+ * %current won't be starved if scheduled out while switching.
+ */
+ preempt_disable();
+
+ /*
+ * From here on, the disable path must assume that tasks have ops
+ * enabled and need to be recovered.
+ *
+ * Transition to ENABLING fails iff the BPF scheduler has already
+ * triggered scx_bpf_error(). Returning an error code here would lose
+ * the recorded error information. Exit indicating success so that the
+ * error is notified through ops.exit() with all the details.
+ */
+ if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLING, SCX_OPS_PREPPING)) {
+ preempt_enable();
+ spin_unlock_irq(&scx_tasks_lock);
+ WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
+ ret = 0;
+ goto err_disable_unlock_all;
+ }
+
+ /*
+ * We're fully committed and can't fail. The PREPPED -> ENABLED
+ * transitions here are synchronized against sched_ext_free() through
+ * scx_tasks_lock.
+ */
+ WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
+
+ scx_task_iter_init(&sti);
+ while ((p = scx_task_iter_next_locked(&sti))) {
+ const struct sched_class *old_class = p->sched_class;
+ struct sched_enq_and_set_ctx ctx;
+
+ sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
+
+ scx_set_task_state(p, SCX_TASK_READY);
+ __setscheduler_prio(p, p->prio);
+ check_class_changing(task_rq(p), p, old_class);
+
+ sched_enq_and_set_task(&ctx);
+
+ check_class_changed(task_rq(p), p, old_class, p->prio);
+ }
+ scx_task_iter_exit(&sti);
+
+ spin_unlock_irq(&scx_tasks_lock);
+ preempt_enable();
+ scx_cgroup_unlock();
+ cpus_read_unlock();
+ percpu_up_write(&scx_fork_rwsem);
+
+ /* see above ENABLING transition for the explanation on exiting with 0 */
+ if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) {
+ WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
+ ret = 0;
+ goto err_disable;
+ }
+
+ if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
+ static_branch_enable(&__scx_switched_all);
+
+ pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
+ scx_ops.name, scx_switched_all() ? "" : " (partial)");
+ kobject_uevent(scx_root_kobj, KOBJ_ADD);
+ mutex_unlock(&scx_ops_enable_mutex);
+
+ atomic_long_inc(&scx_enable_seq);
+
+ return 0;
+
+err_del:
+ kobject_del(scx_root_kobj);
+err:
+ kobject_put(scx_root_kobj);
+ scx_root_kobj = NULL;
+ if (scx_exit_info) {
+ free_exit_info(scx_exit_info);
+ scx_exit_info = NULL;
+ }
+err_unlock:
+ mutex_unlock(&scx_ops_enable_mutex);
+ return ret;
+
+err_disable_unlock_all:
+ scx_cgroup_unlock();
+ percpu_up_write(&scx_fork_rwsem);
+err_disable_unlock_cpus:
+ cpus_read_unlock();
+err_disable:
+ mutex_unlock(&scx_ops_enable_mutex);
+ /* must be fully disabled before returning */
+ scx_ops_disable(SCX_EXIT_ERROR);
+ kthread_flush_work(&scx_ops_disable_work);
+ return ret;
+}
+
+
+/********************************************************************************
+ * bpf_struct_ops plumbing.
+ */
+#include <linux/bpf_verifier.h>
+#include <linux/bpf.h>
+#include <linux/btf.h>
+
+extern struct btf *btf_vmlinux;
+static const struct btf_type *task_struct_type;
+static u32 task_struct_type_id;
+
+static bool set_arg_maybe_null(const char *op, int arg_n, int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ struct btf *btf = bpf_get_btf_vmlinux();
+ const struct bpf_struct_ops_desc *st_ops_desc;
+ const struct btf_member *member;
+ const struct btf_type *t;
+ u32 btf_id, member_idx;
+ const char *mname;
+
+ /* struct_ops op args are all sequential, 64-bit numbers */
+ if (off != arg_n * sizeof(__u64))
+ return false;
+
+ /* btf_id should be the type id of struct sched_ext_ops */
+ btf_id = prog->aux->attach_btf_id;
+ st_ops_desc = bpf_struct_ops_find(btf, btf_id);
+ if (!st_ops_desc)
+ return false;
+
+ /* BTF type of struct sched_ext_ops */
+ t = st_ops_desc->type;
+
+ member_idx = prog->expected_attach_type;
+ if (member_idx >= btf_type_vlen(t))
+ return false;
+
+ /*
+ * Get the member name of this struct_ops program, which corresponds to
+ * a field in struct sched_ext_ops. For example, the member name of the
+ * dispatch struct_ops program (callback) is "dispatch".
+ */
+ member = &btf_type_member(t)[member_idx];
+ mname = btf_name_by_offset(btf_vmlinux, member->name_off);
+
+ if (!strcmp(mname, op)) {
+ /*
+ * The value is a pointer to a type (struct task_struct) given
+ * by a BTF ID (PTR_TO_BTF_ID). It is trusted (PTR_TRUSTED),
+ * however, can be a NULL (PTR_MAYBE_NULL). The BPF program
+ * should check the pointer to make sure it is not NULL before
+ * using it, or the verifier will reject the program.
+ *
+ * Longer term, this is something that should be addressed by
+ * BTF, and be fully contained within the verifier.
+ */
+ info->reg_type = PTR_MAYBE_NULL | PTR_TO_BTF_ID | PTR_TRUSTED;
+ info->btf = btf_vmlinux;
+ info->btf_id = task_struct_type_id;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool bpf_scx_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (type != BPF_READ)
+ return false;
+ if (set_arg_maybe_null("dispatch", 1, off, size, type, prog, info) ||
+ set_arg_maybe_null("yield", 1, off, size, type, prog, info))
+ return true;
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
+ return false;
+ if (off % size != 0)
+ return false;
+
+ return btf_ctx_access(off, size, type, prog, info);
+}
+
+static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg, int off,
+ int size)
+{
+ const struct btf_type *t;
+
+ t = btf_type_by_id(reg->btf, reg->btf_id);
+ if (t == task_struct_type) {
+ if (off >= offsetof(struct task_struct, scx.slice) &&
+ off + size <= offsetofend(struct task_struct, scx.slice))
+ return SCALAR_VALUE;
+ if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
+ off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
+ return SCALAR_VALUE;
+ if (off >= offsetof(struct task_struct, scx.disallow) &&
+ off + size <= offsetofend(struct task_struct, scx.disallow))
+ return SCALAR_VALUE;
+ }
+
+ return -EACCES;
+}
+
+static const struct bpf_func_proto *
+bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ switch (func_id) {
+ case BPF_FUNC_task_storage_get:
+ return &bpf_task_storage_get_proto;
+ case BPF_FUNC_task_storage_delete:
+ return &bpf_task_storage_delete_proto;
+ default:
+ return bpf_base_func_proto(func_id, prog);
+ }
+}
+
+static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
+ .get_func_proto = bpf_scx_get_func_proto,
+ .is_valid_access = bpf_scx_is_valid_access,
+ .btf_struct_access = bpf_scx_btf_struct_access,
+};
+
+static int bpf_scx_init_member(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata)
+{
+ const struct sched_ext_ops *uops = udata;
+ struct sched_ext_ops *ops = kdata;
+ u32 moff = __btf_member_bit_offset(t, member) / 8;
+ int ret;
+
+ switch (moff) {
+ case offsetof(struct sched_ext_ops, dispatch_max_batch):
+ if (*(u32 *)(udata + moff) > INT_MAX)
+ return -E2BIG;
+ ops->dispatch_max_batch = *(u32 *)(udata + moff);
+ return 1;
+ case offsetof(struct sched_ext_ops, flags):
+ if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
+ return -EINVAL;
+ ops->flags = *(u64 *)(udata + moff);
+ return 1;
+ case offsetof(struct sched_ext_ops, name):
+ ret = bpf_obj_name_cpy(ops->name, uops->name,
+ sizeof(ops->name));
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -EINVAL;
+ return 1;
+ case offsetof(struct sched_ext_ops, timeout_ms):
+ if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
+ SCX_WATCHDOG_MAX_TIMEOUT)
+ return -E2BIG;
+ ops->timeout_ms = *(u32 *)(udata + moff);
+ return 1;
+ case offsetof(struct sched_ext_ops, exit_dump_len):
+ ops->exit_dump_len =
+ *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
+ return 1;
+ case offsetof(struct sched_ext_ops, hotplug_seq):
+ ops->hotplug_seq = *(u64 *)(udata + moff);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int bpf_scx_check_member(const struct btf_type *t,
+ const struct btf_member *member,
+ const struct bpf_prog *prog)
+{
+ u32 moff = __btf_member_bit_offset(t, member) / 8;
+
+ switch (moff) {
+ case offsetof(struct sched_ext_ops, init_task):
+#ifdef CONFIG_EXT_GROUP_SCHED
+ case offsetof(struct sched_ext_ops, cgroup_init):
+ case offsetof(struct sched_ext_ops, cgroup_exit):
+ case offsetof(struct sched_ext_ops, cgroup_prep_move):
+#endif
+ case offsetof(struct sched_ext_ops, cpu_online):
+ case offsetof(struct sched_ext_ops, cpu_offline):
+ case offsetof(struct sched_ext_ops, init):
+ case offsetof(struct sched_ext_ops, exit):
+ break;
+ default:
+ if (prog->sleepable)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bpf_scx_reg(void *kdata, struct bpf_link *link)
+{
+ return scx_ops_enable(kdata, link);
+}
+
+static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
+{
+ scx_ops_disable(SCX_EXIT_UNREG);
+ kthread_flush_work(&scx_ops_disable_work);
+}
+
+static int bpf_scx_init(struct btf *btf)
+{
+ s32 type_id;
+
+ type_id = btf_find_by_name_kind(btf, "task_struct", BTF_KIND_STRUCT);
+ if (type_id < 0)
+ return -EINVAL;
+ task_struct_type = btf_type_by_id(btf, type_id);
+ task_struct_type_id = type_id;
+
+ return 0;
+}
+
+static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
+{
+ /*
+ * sched_ext does not support updating the actively-loaded BPF
+ * scheduler, as registering a BPF scheduler can always fail if the
+ * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
+ * etc. Similarly, we can always race with unregistration happening
+ * elsewhere, such as with sysrq.
+ */
+ return -EOPNOTSUPP;
+}
+
+static int bpf_scx_validate(void *kdata)
+{
+ return 0;
+}
+
+static s32 select_cpu_stub(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
+static void enqueue_stub(struct task_struct *p, u64 enq_flags) {}
+static void dequeue_stub(struct task_struct *p, u64 enq_flags) {}
+static void dispatch_stub(s32 prev_cpu, struct task_struct *p) {}
+static void tick_stub(struct task_struct *p) {}
+static void runnable_stub(struct task_struct *p, u64 enq_flags) {}
+static void running_stub(struct task_struct *p) {}
+static void stopping_stub(struct task_struct *p, bool runnable) {}
+static void quiescent_stub(struct task_struct *p, u64 deq_flags) {}
+static bool yield_stub(struct task_struct *from, struct task_struct *to) { return false; }
+static bool core_sched_before_stub(struct task_struct *a, struct task_struct *b) { return false; }
+static void set_weight_stub(struct task_struct *p, u32 weight) {}
+static void set_cpumask_stub(struct task_struct *p, const struct cpumask *mask) {}
+static void update_idle_stub(s32 cpu, bool idle) {}
+static void cpu_acquire_stub(s32 cpu, struct scx_cpu_acquire_args *args) {}
+static void cpu_release_stub(s32 cpu, struct scx_cpu_release_args *args) {}
+static s32 init_task_stub(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
+static void exit_task_stub(struct task_struct *p, struct scx_exit_task_args *args) {}
+static void enable_stub(struct task_struct *p) {}
+static void disable_stub(struct task_struct *p) {}
+#ifdef CONFIG_EXT_GROUP_SCHED
+static s32 cgroup_init_stub(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
+static void cgroup_exit_stub(struct cgroup *cgrp) {}
+static s32 cgroup_prep_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
+static void cgroup_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
+static void cgroup_cancel_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
+static void cgroup_set_weight_stub(struct cgroup *cgrp, u32 weight) {}
+#endif
+static void cpu_online_stub(s32 cpu) {}
+static void cpu_offline_stub(s32 cpu) {}
+static s32 init_stub(void) { return -EINVAL; }
+static void exit_stub(struct scx_exit_info *info) {}
+static void dump_stub(struct scx_dump_ctx *ctx) {}
+static void dump_cpu_stub(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
+static void dump_task_stub(struct scx_dump_ctx *ctx, struct task_struct *p) {}
+
+static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
+ .select_cpu = select_cpu_stub,
+ .enqueue = enqueue_stub,
+ .dequeue = dequeue_stub,
+ .dispatch = dispatch_stub,
+ .tick = tick_stub,
+ .runnable = runnable_stub,
+ .running = running_stub,
+ .stopping = stopping_stub,
+ .quiescent = quiescent_stub,
+ .yield = yield_stub,
+ .core_sched_before = core_sched_before_stub,
+ .set_weight = set_weight_stub,
+ .set_cpumask = set_cpumask_stub,
+ .update_idle = update_idle_stub,
+ .cpu_acquire = cpu_acquire_stub,
+ .cpu_release = cpu_release_stub,
+ .init_task = init_task_stub,
+ .exit_task = exit_task_stub,
+ .enable = enable_stub,
+ .disable = disable_stub,
+#ifdef CONFIG_EXT_GROUP_SCHED
+ .cgroup_init = cgroup_init_stub,
+ .cgroup_exit = cgroup_exit_stub,
+ .cgroup_prep_move = cgroup_prep_move_stub,
+ .cgroup_move = cgroup_move_stub,
+ .cgroup_cancel_move = cgroup_cancel_move_stub,
+ .cgroup_set_weight = cgroup_set_weight_stub,
+#endif
+ .cpu_online = cpu_online_stub,
+ .cpu_offline = cpu_offline_stub,
+ .init = init_stub,
+ .exit = exit_stub,
+ .dump = dump_stub,
+ .dump_cpu = dump_cpu_stub,
+ .dump_task = dump_task_stub,
+};
+
+static struct bpf_struct_ops bpf_sched_ext_ops = {
+ .verifier_ops = &bpf_scx_verifier_ops,
+ .reg = bpf_scx_reg,
+ .unreg = bpf_scx_unreg,
+ .check_member = bpf_scx_check_member,
+ .init_member = bpf_scx_init_member,
+ .init = bpf_scx_init,
+ .update = bpf_scx_update,
+ .validate = bpf_scx_validate,
+ .name = "sched_ext_ops",
+ .owner = THIS_MODULE,
+ .cfi_stubs = &__bpf_ops_sched_ext_ops
+};
+
+
+/********************************************************************************
+ * System integration and init.
+ */
+
+static void sysrq_handle_sched_ext_reset(u8 key)
+{
+ if (scx_ops_helper)
+ scx_ops_disable(SCX_EXIT_SYSRQ);
+ else
+ pr_info("sched_ext: BPF scheduler not yet used\n");
+}
+
+static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
+ .handler = sysrq_handle_sched_ext_reset,
+ .help_msg = "reset-sched-ext(S)",
+ .action_msg = "Disable sched_ext and revert all tasks to CFS",
+ .enable_mask = SYSRQ_ENABLE_RTNICE,
+};
+
+static void sysrq_handle_sched_ext_dump(u8 key)
+{
+ struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
+
+ if (scx_enabled())
+ scx_dump_state(&ei, 0);
+}
+
+static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
+ .handler = sysrq_handle_sched_ext_dump,
+ .help_msg = "dump-sched-ext(D)",
+ .action_msg = "Trigger sched_ext debug dump",
+ .enable_mask = SYSRQ_ENABLE_RTNICE,
+};
+
+static bool can_skip_idle_kick(struct rq *rq)
+{
+ lockdep_assert_rq_held(rq);
+
+ /*
+ * We can skip idle kicking if @rq is going to go through at least one
+ * full SCX scheduling cycle before going idle. Just checking whether
+ * curr is not idle is insufficient because we could be racing
+ * balance_one() trying to pull the next task from a remote rq, which
+ * may fail, and @rq may become idle afterwards.
+ *
+ * The race window is small and we don't and can't guarantee that @rq is
+ * only kicked while idle anyway. Skip only when sure.
+ */
+ return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
+}
+
+static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
+{
+ struct rq *rq = cpu_rq(cpu);
+ struct scx_rq *this_scx = &this_rq->scx;
+ bool should_wait = false;
+ unsigned long flags;
+
+ raw_spin_rq_lock_irqsave(rq, flags);
+
+ /*
+ * During CPU hotplug, a CPU may depend on kicking itself to make
+ * forward progress. Allow kicking self regardless of online state.
+ */
+ if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
+ if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
+ if (rq->curr->sched_class == &ext_sched_class)
+ rq->curr->scx.slice = 0;
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
+ }
+
+ if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
+ pseqs[cpu] = rq->scx.pnt_seq;
+ should_wait = true;
+ }
+
+ resched_curr(rq);
+ } else {
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
+ }
+
+ raw_spin_rq_unlock_irqrestore(rq, flags);
+
+ return should_wait;
+}
+
+static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ raw_spin_rq_lock_irqsave(rq, flags);
+
+ if (!can_skip_idle_kick(rq) &&
+ (cpu_online(cpu) || cpu == cpu_of(this_rq)))
+ resched_curr(rq);
+
+ raw_spin_rq_unlock_irqrestore(rq, flags);
+}
+
+static void kick_cpus_irq_workfn(struct irq_work *irq_work)
+{
+ struct rq *this_rq = this_rq();
+ struct scx_rq *this_scx = &this_rq->scx;
+ unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
+ bool should_wait = false;
+ s32 cpu;
+
+ for_each_cpu(cpu, this_scx->cpus_to_kick) {
+ should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
+ }
+
+ for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
+ kick_one_cpu_if_idle(cpu, this_rq);
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
+ }
+
+ if (!should_wait)
+ return;
+
+ for_each_cpu(cpu, this_scx->cpus_to_wait) {
+ unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
+
+ if (cpu != cpu_of(this_rq)) {
+ /*
+ * Pairs with smp_store_release() issued by this CPU in
+ * scx_next_task_picked() on the resched path.
+ *
+ * We busy-wait here to guarantee that no other task can
+ * be scheduled on our core before the target CPU has
+ * entered the resched path.
+ */
+ while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
+ cpu_relax();
+ }
+
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
+ }
+}
+
+/**
+ * print_scx_info - print out sched_ext scheduler state
+ * @log_lvl: the log level to use when printing
+ * @p: target task
+ *
+ * If a sched_ext scheduler is enabled, print the name and state of the
+ * scheduler. If @p is on sched_ext, print further information about the task.
+ *
+ * This function can be safely called on any task as long as the task_struct
+ * itself is accessible. While safe, this function isn't synchronized and may
+ * print out mixups or garbages of limited length.
+ */
+void print_scx_info(const char *log_lvl, struct task_struct *p)
+{
+ enum scx_ops_enable_state state = scx_ops_enable_state();
+ const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
+ char runnable_at_buf[22] = "?";
+ struct sched_class *class;
+ unsigned long runnable_at;
+
+ if (state == SCX_OPS_DISABLED)
+ return;
+
+ /*
+ * Carefully check if the task was running on sched_ext, and then
+ * carefully copy the time it's been runnable, and its state.
+ */
+ if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
+ class != &ext_sched_class) {
+ printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name,
+ scx_ops_enable_state_str[state], all);
+ return;
+ }
+
+ if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
+ sizeof(runnable_at)))
+ scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
+ jiffies_delta_msecs(runnable_at, jiffies));
+
+ /* print everything onto one line to conserve console space */
+ printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
+ log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all,
+ runnable_at_buf);
+}
+
+static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ /*
+ * SCX schedulers often have userspace components which are sometimes
+ * involved in critial scheduling paths. PM operations involve freezing
+ * userspace which can lead to scheduling misbehaviors including stalls.
+ * Let's bypass while PM operations are in progress.
+ */
+ switch (event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
+ scx_ops_bypass(true);
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ case PM_POST_RESTORE:
+ scx_ops_bypass(false);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block scx_pm_notifier = {
+ .notifier_call = scx_pm_handler,
+};
+
+void __init init_sched_ext_class(void)
+{
+ s32 cpu, v;
+
+ /*
+ * The following is to prevent the compiler from optimizing out the enum
+ * definitions so that BPF scheduler implementations can use them
+ * through the generated vmlinux.h.
+ */
+ WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
+ SCX_TG_ONLINE);
+
+ BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
+ init_dsq(&scx_dsq_global, SCX_DSQ_GLOBAL);
+#ifdef CONFIG_SMP
+ BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
+ BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL));
+#endif
+ scx_kick_cpus_pnt_seqs =
+ __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
+ __alignof__(scx_kick_cpus_pnt_seqs[0]));
+ BUG_ON(!scx_kick_cpus_pnt_seqs);
+
+ for_each_possible_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+
+ init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
+ INIT_LIST_HEAD(&rq->scx.runnable_list);
+ INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
+
+ BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL));
+ init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
+ init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
+
+ if (cpu_online(cpu))
+ cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
+ }
+
+ register_sysrq_key('S', &sysrq_sched_ext_reset_op);
+ register_sysrq_key('D', &sysrq_sched_ext_dump_op);
+ INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
+}
+
+
+/********************************************************************************
+ * Helpers that can be called from the BPF scheduler.
+ */
+#include <linux/btf_ids.h>
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
+ * @p: task_struct to select a CPU for
+ * @prev_cpu: CPU @p was on previously
+ * @wake_flags: %SCX_WAKE_* flags
+ * @is_idle: out parameter indicating whether the returned CPU is idle
+ *
+ * Can only be called from ops.select_cpu() if the built-in CPU selection is
+ * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
+ * @p, @prev_cpu and @wake_flags match ops.select_cpu().
+ *
+ * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
+ * currently idle and thus a good candidate for direct dispatching.
+ */
+__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
+ u64 wake_flags, bool *is_idle)
+{
+ if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) {
+ *is_idle = false;
+ return prev_cpu;
+ }
+#ifdef CONFIG_SMP
+ return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle);
+#else
+ *is_idle = false;
+ return prev_cpu;
+#endif
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
+BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
+BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_select_cpu,
+};
+
+static bool scx_dispatch_preamble(struct task_struct *p, u64 enq_flags)
+{
+ if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
+ return false;
+
+ lockdep_assert_irqs_disabled();
+
+ if (unlikely(!p)) {
+ scx_ops_error("called with NULL task");
+ return false;
+ }
+
+ if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
+ scx_ops_error("invalid enq_flags 0x%llx", enq_flags);
+ return false;
+ }
+
+ return true;
+}
+
+static void scx_dispatch_commit(struct task_struct *p, u64 dsq_id, u64 enq_flags)
+{
+ struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ struct task_struct *ddsp_task;
+
+ ddsp_task = __this_cpu_read(direct_dispatch_task);
+ if (ddsp_task) {
+ mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
+ return;
+ }
+
+ if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
+ scx_ops_error("dispatch buffer overflow");
+ return;
+ }
+
+ dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
+ .task = p,
+ .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
+ .dsq_id = dsq_id,
+ .enq_flags = enq_flags,
+ };
+}
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_dispatch - Dispatch a task into the FIFO queue of a DSQ
+ * @p: task_struct to dispatch
+ * @dsq_id: DSQ to dispatch to
+ * @slice: duration @p can run for in nsecs, 0 to keep the current value
+ * @enq_flags: SCX_ENQ_*
+ *
+ * Dispatch @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe
+ * to call this function spuriously. Can be called from ops.enqueue(),
+ * ops.select_cpu(), and ops.dispatch().
+ *
+ * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
+ * and @p must match the task being enqueued. Also, %SCX_DSQ_LOCAL_ON can't be
+ * used to target the local DSQ of a CPU other than the enqueueing one. Use
+ * ops.select_cpu() to be on the target CPU in the first place.
+ *
+ * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
+ * will be directly dispatched to the corresponding dispatch queue after
+ * ops.select_cpu() returns. If @p is dispatched to SCX_DSQ_LOCAL, it will be
+ * dispatched to the local DSQ of the CPU returned by ops.select_cpu().
+ * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
+ * task is dispatched.
+ *
+ * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
+ * and this function can be called upto ops.dispatch_max_batch times to dispatch
+ * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
+ * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
+ *
+ * This function doesn't have any locking restrictions and may be called under
+ * BPF locks (in the future when BPF introduces more flexible locking).
+ *
+ * @p is allowed to run for @slice. The scheduling path is triggered on slice
+ * exhaustion. If zero, the current residual slice is maintained. If
+ * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
+ * scx_bpf_kick_cpu() to trigger scheduling.
+ */
+__bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
+ u64 enq_flags)
+{
+ if (!scx_dispatch_preamble(p, enq_flags))
+ return;
+
+ if (slice)
+ p->scx.slice = slice;
+ else
+ p->scx.slice = p->scx.slice ?: 1;
+
+ scx_dispatch_commit(p, dsq_id, enq_flags);
+}
+
+/**
+ * scx_bpf_dispatch_vtime - Dispatch a task into the vtime priority queue of a DSQ
+ * @p: task_struct to dispatch
+ * @dsq_id: DSQ to dispatch to
+ * @slice: duration @p can run for in nsecs, 0 to keep the current value
+ * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
+ * @enq_flags: SCX_ENQ_*
+ *
+ * Dispatch @p into the vtime priority queue of the DSQ identified by @dsq_id.
+ * Tasks queued into the priority queue are ordered by @vtime and always
+ * consumed after the tasks in the FIFO queue. All other aspects are identical
+ * to scx_bpf_dispatch().
+ *
+ * @vtime ordering is according to time_before64() which considers wrapping. A
+ * numerically larger vtime may indicate an earlier position in the ordering and
+ * vice-versa.
+ */
+__bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
+ u64 slice, u64 vtime, u64 enq_flags)
+{
+ if (!scx_dispatch_preamble(p, enq_flags))
+ return;
+
+ if (slice)
+ p->scx.slice = slice;
+ else
+ p->scx.slice = p->scx.slice ?: 1;
+
+ p->scx.dsq_vtime = vtime;
+
+ scx_dispatch_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
+BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
+BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_enqueue_dispatch,
+};
+
+static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
+ struct task_struct *p, u64 dsq_id,
+ u64 enq_flags)
+{
+ struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
+ struct rq *this_rq, *src_rq, *dst_rq, *locked_rq;
+ bool dispatched = false;
+ bool in_balance;
+ unsigned long flags;
+
+ if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH))
+ return false;
+
+ /*
+ * Can be called from either ops.dispatch() locking this_rq() or any
+ * context where no rq lock is held. If latter, lock @p's task_rq which
+ * we'll likely need anyway.
+ */
+ src_rq = task_rq(p);
+
+ local_irq_save(flags);
+ this_rq = this_rq();
+ in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
+
+ if (in_balance) {
+ if (this_rq != src_rq) {
+ raw_spin_rq_unlock(this_rq);
+ raw_spin_rq_lock(src_rq);
+ }
+ } else {
+ raw_spin_rq_lock(src_rq);
+ }
+
+ locked_rq = src_rq;
+ raw_spin_lock(&src_dsq->lock);
+
+ /*
+ * Did someone else get to it? @p could have already left $src_dsq, got
+ * re-enqueud, or be in the process of being consumed by someone else.
+ */
+ if (unlikely(p->scx.dsq != src_dsq ||
+ u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
+ p->scx.holding_cpu >= 0) ||
+ WARN_ON_ONCE(src_rq != task_rq(p))) {
+ raw_spin_unlock(&src_dsq->lock);
+ goto out;
+ }
+
+ /* @p is still on $src_dsq and stable, determine the destination */
+ dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
+
+ if (dst_dsq->id == SCX_DSQ_LOCAL) {
+ dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
+ if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
+ dst_dsq = &scx_dsq_global;
+ dst_rq = src_rq;
+ }
+ } else {
+ /* no need to migrate if destination is a non-local DSQ */
+ dst_rq = src_rq;
+ }
+
+ /*
+ * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
+ * CPU, @p will be migrated.
+ */
+ if (dst_dsq->id == SCX_DSQ_LOCAL) {
+ /* @p is going from a non-local DSQ to a local DSQ */
+ if (src_rq == dst_rq) {
+ task_unlink_from_dsq(p, src_dsq);
+ move_local_task_to_local_dsq(p, enq_flags,
+ src_dsq, dst_rq);
+ raw_spin_unlock(&src_dsq->lock);
+ } else {
+ raw_spin_unlock(&src_dsq->lock);
+ move_remote_task_to_local_dsq(p, enq_flags,
+ src_rq, dst_rq);
+ locked_rq = dst_rq;
+ }
+ } else {
+ /*
+ * @p is going from a non-local DSQ to a non-local DSQ. As
+ * $src_dsq is already locked, do an abbreviated dequeue.
+ */
+ task_unlink_from_dsq(p, src_dsq);
+ p->scx.dsq = NULL;
+ raw_spin_unlock(&src_dsq->lock);
+
+ if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
+ p->scx.dsq_vtime = kit->vtime;
+ dispatch_enqueue(dst_dsq, p, enq_flags);
+ }
+
+ if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
+ p->scx.slice = kit->slice;
+
+ dispatched = true;
+out:
+ if (in_balance) {
+ if (this_rq != locked_rq) {
+ raw_spin_rq_unlock(locked_rq);
+ raw_spin_rq_lock(this_rq);
+ }
+ } else {
+ raw_spin_rq_unlock_irqrestore(locked_rq, flags);
+ }
+
+ kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
+ __SCX_DSQ_ITER_HAS_VTIME);
+ return dispatched;
+}
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
+ *
+ * Can only be called from ops.dispatch().
+ */
+__bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
+{
+ if (!scx_kf_allowed(SCX_KF_DISPATCH))
+ return 0;
+
+ return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
+}
+
+/**
+ * scx_bpf_dispatch_cancel - Cancel the latest dispatch
+ *
+ * Cancel the latest dispatch. Can be called multiple times to cancel further
+ * dispatches. Can only be called from ops.dispatch().
+ */
+__bpf_kfunc void scx_bpf_dispatch_cancel(void)
+{
+ struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+
+ if (!scx_kf_allowed(SCX_KF_DISPATCH))
+ return;
+
+ if (dspc->cursor > 0)
+ dspc->cursor--;
+ else
+ scx_ops_error("dispatch buffer underflow");
+}
+
+/**
+ * scx_bpf_consume - Transfer a task from a DSQ to the current CPU's local DSQ
+ * @dsq_id: DSQ to consume
+ *
+ * Consume a task from the non-local DSQ identified by @dsq_id and transfer it
+ * to the current CPU's local DSQ for execution. Can only be called from
+ * ops.dispatch().
+ *
+ * This function flushes the in-flight dispatches from scx_bpf_dispatch() before
+ * trying to consume the specified DSQ. It may also grab rq locks and thus can't
+ * be called under any BPF locks.
+ *
+ * Returns %true if a task has been consumed, %false if there isn't any task to
+ * consume.
+ */
+__bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
+{
+ struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ struct scx_dispatch_q *dsq;
+
+ if (!scx_kf_allowed(SCX_KF_DISPATCH))
+ return false;
+
+ flush_dispatch_buf(dspc->rq);
+
+ dsq = find_non_local_dsq(dsq_id);
+ if (unlikely(!dsq)) {
+ scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
+ return false;
+ }
+
+ if (consume_dispatch_q(dspc->rq, dsq)) {
+ /*
+ * A successfully consumed task can be dequeued before it starts
+ * running while the CPU is trying to migrate other dispatched
+ * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
+ * local DSQ.
+ */
+ dspc->nr_tasks++;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/**
+ * scx_bpf_dispatch_from_dsq_set_slice - Override slice when dispatching from DSQ
+ * @it__iter: DSQ iterator in progress
+ * @slice: duration the dispatched task can run for in nsecs
+ *
+ * Override the slice of the next task that will be dispatched from @it__iter
+ * using scx_bpf_dispatch_from_dsq[_vtime](). If this function is not called,
+ * the previous slice duration is kept.
+ */
+__bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
+ struct bpf_iter_scx_dsq *it__iter, u64 slice)
+{
+ struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
+
+ kit->slice = slice;
+ kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
+}
+
+/**
+ * scx_bpf_dispatch_from_dsq_set_vtime - Override vtime when dispatching from DSQ
+ * @it__iter: DSQ iterator in progress
+ * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
+ *
+ * Override the vtime of the next task that will be dispatched from @it__iter
+ * using scx_bpf_dispatch_from_dsq_vtime(). If this function is not called, the
+ * previous slice vtime is kept. If scx_bpf_dispatch_from_dsq() is used to
+ * dispatch the next task, the override is ignored and cleared.
+ */
+__bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
+ struct bpf_iter_scx_dsq *it__iter, u64 vtime)
+{
+ struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
+
+ kit->vtime = vtime;
+ kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
+}
+
+/**
+ * scx_bpf_dispatch_from_dsq - Move a task from DSQ iteration to a DSQ
+ * @it__iter: DSQ iterator in progress
+ * @p: task to transfer
+ * @dsq_id: DSQ to move @p to
+ * @enq_flags: SCX_ENQ_*
+ *
+ * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
+ * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
+ * be the destination.
+ *
+ * For the transfer to be successful, @p must still be on the DSQ and have been
+ * queued before the DSQ iteration started. This function doesn't care whether
+ * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
+ * been queued before the iteration started.
+ *
+ * @p's slice is kept by default. Use scx_bpf_dispatch_from_dsq_set_slice() to
+ * update.
+ *
+ * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
+ * lock (e.g. BPF timers or SYSCALL programs).
+ *
+ * Returns %true if @p has been consumed, %false if @p had already been consumed
+ * or dequeued.
+ */
+__bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
+ struct task_struct *p, u64 dsq_id,
+ u64 enq_flags)
+{
+ return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter,
+ p, dsq_id, enq_flags);
+}
+
+/**
+ * scx_bpf_dispatch_vtime_from_dsq - Move a task from DSQ iteration to a PRIQ DSQ
+ * @it__iter: DSQ iterator in progress
+ * @p: task to transfer
+ * @dsq_id: DSQ to move @p to
+ * @enq_flags: SCX_ENQ_*
+ *
+ * Transfer @p which is on the DSQ currently iterated by @it__iter to the
+ * priority queue of the DSQ specified by @dsq_id. The destination must be a
+ * user DSQ as only user DSQs support priority queue.
+ *
+ * @p's slice and vtime are kept by default. Use
+ * scx_bpf_dispatch_from_dsq_set_slice() and
+ * scx_bpf_dispatch_from_dsq_set_vtime() to update.
+ *
+ * All other aspects are identical to scx_bpf_dispatch_from_dsq(). See
+ * scx_bpf_dispatch_vtime() for more information on @vtime.
+ */
+__bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
+ struct task_struct *p, u64 dsq_id,
+ u64 enq_flags)
+{
+ return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter,
+ p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
+BTF_ID_FLAGS(func, scx_bpf_consume)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
+BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_dispatch,
+};
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
+ *
+ * Iterate over all of the tasks currently enqueued on the local DSQ of the
+ * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
+ * processed tasks. Can only be called from ops.cpu_release().
+ */
+__bpf_kfunc u32 scx_bpf_reenqueue_local(void)
+{
+ LIST_HEAD(tasks);
+ u32 nr_enqueued = 0;
+ struct rq *rq;
+ struct task_struct *p, *n;
+
+ if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
+ return 0;
+
+ rq = cpu_rq(smp_processor_id());
+ lockdep_assert_rq_held(rq);
+
+ /*
+ * The BPF scheduler may choose to dispatch tasks back to
+ * @rq->scx.local_dsq. Move all candidate tasks off to a private list
+ * first to avoid processing the same tasks repeatedly.
+ */
+ list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
+ scx.dsq_list.node) {
+ /*
+ * If @p is being migrated, @p's current CPU may not agree with
+ * its allowed CPUs and the migration_cpu_stop is about to
+ * deactivate and re-activate @p anyway. Skip re-enqueueing.
+ *
+ * While racing sched property changes may also dequeue and
+ * re-enqueue a migrating task while its current CPU and allowed
+ * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
+ * the current local DSQ for running tasks and thus are not
+ * visible to the BPF scheduler.
+ */
+ if (p->migration_pending)
+ continue;
+
+ dispatch_dequeue(rq, p);
+ list_add_tail(&p->scx.dsq_list.node, &tasks);
+ }
+
+ list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
+ list_del_init(&p->scx.dsq_list.node);
+ do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
+ nr_enqueued++;
+ }
+
+ return nr_enqueued;
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
+BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
+BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_cpu_release,
+};
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_create_dsq - Create a custom DSQ
+ * @dsq_id: DSQ to create
+ * @node: NUMA node to allocate from
+ *
+ * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
+ * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
+ */
+__bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
+{
+ if (unlikely(node >= (int)nr_node_ids ||
+ (node < 0 && node != NUMA_NO_NODE)))
+ return -EINVAL;
+ return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
+BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
+BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_unlocked,
+};
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_kick_cpu - Trigger reschedule on a CPU
+ * @cpu: cpu to kick
+ * @flags: %SCX_KICK_* flags
+ *
+ * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
+ * trigger rescheduling on a busy CPU. This can be called from any online
+ * scx_ops operation and the actual kicking is performed asynchronously through
+ * an irq work.
+ */
+__bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
+{
+ struct rq *this_rq;
+ unsigned long irq_flags;
+
+ if (!ops_cpu_valid(cpu, NULL))
+ return;
+
+ local_irq_save(irq_flags);
+
+ this_rq = this_rq();
+
+ /*
+ * While bypassing for PM ops, IRQ handling may not be online which can
+ * lead to irq_work_queue() malfunction such as infinite busy wait for
+ * IRQ status update. Suppress kicking.
+ */
+ if (scx_rq_bypassing(this_rq))
+ goto out;
+
+ /*
+ * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
+ * rq locks. We can probably be smarter and avoid bouncing if called
+ * from ops which don't hold a rq lock.
+ */
+ if (flags & SCX_KICK_IDLE) {
+ struct rq *target_rq = cpu_rq(cpu);
+
+ if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
+ scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
+
+ if (raw_spin_rq_trylock(target_rq)) {
+ if (can_skip_idle_kick(target_rq)) {
+ raw_spin_rq_unlock(target_rq);
+ goto out;
+ }
+ raw_spin_rq_unlock(target_rq);
+ }
+ cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
+ } else {
+ cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
+
+ if (flags & SCX_KICK_PREEMPT)
+ cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
+ if (flags & SCX_KICK_WAIT)
+ cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
+ }
+
+ irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
+out:
+ local_irq_restore(irq_flags);
+}
+
+/**
+ * scx_bpf_dsq_nr_queued - Return the number of queued tasks
+ * @dsq_id: id of the DSQ
+ *
+ * Return the number of tasks in the DSQ matching @dsq_id. If not found,
+ * -%ENOENT is returned.
+ */
+__bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
+{
+ struct scx_dispatch_q *dsq;
+ s32 ret;
+
+ preempt_disable();
+
+ if (dsq_id == SCX_DSQ_LOCAL) {
+ ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
+ goto out;
+ } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
+ s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
+
+ if (ops_cpu_valid(cpu, NULL)) {
+ ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
+ goto out;
+ }
+ } else {
+ dsq = find_non_local_dsq(dsq_id);
+ if (dsq) {
+ ret = READ_ONCE(dsq->nr);
+ goto out;
+ }
+ }
+ ret = -ENOENT;
+out:
+ preempt_enable();
+ return ret;
+}
+
+/**
+ * scx_bpf_destroy_dsq - Destroy a custom DSQ
+ * @dsq_id: DSQ to destroy
+ *
+ * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
+ * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
+ * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
+ * which doesn't exist. Can be called from any online scx_ops operations.
+ */
+__bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
+{
+ destroy_dsq(dsq_id);
+}
+
+/**
+ * bpf_iter_scx_dsq_new - Create a DSQ iterator
+ * @it: iterator to initialize
+ * @dsq_id: DSQ to iterate
+ * @flags: %SCX_DSQ_ITER_*
+ *
+ * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
+ * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
+ * tasks which are already queued when this function is invoked.
+ */
+__bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
+ u64 flags)
+{
+ struct bpf_iter_scx_dsq_kern *kit = (void *)it;
+
+ BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
+ sizeof(struct bpf_iter_scx_dsq));
+ BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
+ __alignof__(struct bpf_iter_scx_dsq));
+
+ if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
+ return -EINVAL;
+
+ kit->dsq = find_non_local_dsq(dsq_id);
+ if (!kit->dsq)
+ return -ENOENT;
+
+ INIT_LIST_HEAD(&kit->cursor.node);
+ kit->cursor.flags |= SCX_DSQ_LNODE_ITER_CURSOR | flags;
+ kit->cursor.priv = READ_ONCE(kit->dsq->seq);
+
+ return 0;
+}
+
+/**
+ * bpf_iter_scx_dsq_next - Progress a DSQ iterator
+ * @it: iterator to progress
+ *
+ * Return the next task. See bpf_iter_scx_dsq_new().
+ */
+__bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
+{
+ struct bpf_iter_scx_dsq_kern *kit = (void *)it;
+ bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
+ struct task_struct *p;
+ unsigned long flags;
+
+ if (!kit->dsq)
+ return NULL;
+
+ raw_spin_lock_irqsave(&kit->dsq->lock, flags);
+
+ if (list_empty(&kit->cursor.node))
+ p = NULL;
+ else
+ p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
+
+ /*
+ * Only tasks which were queued before the iteration started are
+ * visible. This bounds BPF iterations and guarantees that vtime never
+ * jumps in the other direction while iterating.
+ */
+ do {
+ p = nldsq_next_task(kit->dsq, p, rev);
+ } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
+
+ if (p) {
+ if (rev)
+ list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
+ else
+ list_move(&kit->cursor.node, &p->scx.dsq_list.node);
+ } else {
+ list_del_init(&kit->cursor.node);
+ }
+
+ raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
+
+ return p;
+}
+
+/**
+ * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
+ * @it: iterator to destroy
+ *
+ * Undo scx_iter_scx_dsq_new().
+ */
+__bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
+{
+ struct bpf_iter_scx_dsq_kern *kit = (void *)it;
+
+ if (!kit->dsq)
+ return;
+
+ if (!list_empty(&kit->cursor.node)) {
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&kit->dsq->lock, flags);
+ list_del_init(&kit->cursor.node);
+ raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
+ }
+ kit->dsq = NULL;
+}
+
+__bpf_kfunc_end_defs();
+
+static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
+ char *fmt, unsigned long long *data, u32 data__sz)
+{
+ struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
+ s32 ret;
+
+ if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
+ (data__sz && !data)) {
+ scx_ops_error("invalid data=%p and data__sz=%u",
+ (void *)data, data__sz);
+ return -EINVAL;
+ }
+
+ ret = copy_from_kernel_nofault(data_buf, data, data__sz);
+ if (ret < 0) {
+ scx_ops_error("failed to read data fields (%d)", ret);
+ return ret;
+ }
+
+ ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
+ &bprintf_data);
+ if (ret < 0) {
+ scx_ops_error("format preparation failed (%d)", ret);
+ return ret;
+ }
+
+ ret = bstr_printf(line_buf, line_size, fmt,
+ bprintf_data.bin_args);
+ bpf_bprintf_cleanup(&bprintf_data);
+ if (ret < 0) {
+ scx_ops_error("(\"%s\", %p, %u) failed to format",
+ fmt, data, data__sz);
+ return ret;
+ }
+
+ return ret;
+}
+
+static s32 bstr_format(struct scx_bstr_buf *buf,
+ char *fmt, unsigned long long *data, u32 data__sz)
+{
+ return __bstr_format(buf->data, buf->line, sizeof(buf->line),
+ fmt, data, data__sz);
+}
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
+ * @exit_code: Exit value to pass to user space via struct scx_exit_info.
+ * @fmt: error message format string
+ * @data: format string parameters packaged using ___bpf_fill() macro
+ * @data__sz: @data len, must end in '__sz' for the verifier
+ *
+ * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
+ * disabling.
+ */
+__bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
+ unsigned long long *data, u32 data__sz)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
+ if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
+ scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s",
+ scx_exit_bstr_buf.line);
+ raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
+}
+
+/**
+ * scx_bpf_error_bstr - Indicate fatal error
+ * @fmt: error message format string
+ * @data: format string parameters packaged using ___bpf_fill() macro
+ * @data__sz: @data len, must end in '__sz' for the verifier
+ *
+ * Indicate that the BPF scheduler encountered a fatal error and initiate ops
+ * disabling.
+ */
+__bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
+ u32 data__sz)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
+ if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
+ scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s",
+ scx_exit_bstr_buf.line);
+ raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
+}
+
+/**
+ * scx_bpf_dump - Generate extra debug dump specific to the BPF scheduler
+ * @fmt: format string
+ * @data: format string parameters packaged using ___bpf_fill() macro
+ * @data__sz: @data len, must end in '__sz' for the verifier
+ *
+ * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
+ * dump_task() to generate extra debug dump specific to the BPF scheduler.
+ *
+ * The extra dump may be multiple lines. A single line may be split over
+ * multiple calls. The last line is automatically terminated.
+ */
+__bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
+ u32 data__sz)
+{
+ struct scx_dump_data *dd = &scx_dump_data;
+ struct scx_bstr_buf *buf = &dd->buf;
+ s32 ret;
+
+ if (raw_smp_processor_id() != dd->cpu) {
+ scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends");
+ return;
+ }
+
+ /* append the formatted string to the line buf */
+ ret = __bstr_format(buf->data, buf->line + dd->cursor,
+ sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
+ if (ret < 0) {
+ dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
+ dd->prefix, fmt, data, data__sz, ret);
+ return;
+ }
+
+ dd->cursor += ret;
+ dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
+
+ if (!dd->cursor)
+ return;
+
+ /*
+ * If the line buf overflowed or ends in a newline, flush it into the
+ * dump. This is to allow the caller to generate a single line over
+ * multiple calls. As ops_dump_flush() can also handle multiple lines in
+ * the line buf, the only case which can lead to an unexpected
+ * truncation is when the caller keeps generating newlines in the middle
+ * instead of the end consecutively. Don't do that.
+ */
+ if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
+ ops_dump_flush();
+}
+
+/**
+ * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
+ * @cpu: CPU of interest
+ *
+ * Return the maximum relative capacity of @cpu in relation to the most
+ * performant CPU in the system. The return value is in the range [1,
+ * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
+ */
+__bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
+{
+ if (ops_cpu_valid(cpu, NULL))
+ return arch_scale_cpu_capacity(cpu);
+ else
+ return SCX_CPUPERF_ONE;
+}
+
+/**
+ * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
+ * @cpu: CPU of interest
+ *
+ * Return the current relative performance of @cpu in relation to its maximum.
+ * The return value is in the range [1, %SCX_CPUPERF_ONE].
+ *
+ * The current performance level of a CPU in relation to the maximum performance
+ * available in the system can be calculated as follows:
+ *
+ * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
+ *
+ * The result is in the range [1, %SCX_CPUPERF_ONE].
+ */
+__bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
+{
+ if (ops_cpu_valid(cpu, NULL))
+ return arch_scale_freq_capacity(cpu);
+ else
+ return SCX_CPUPERF_ONE;
+}
+
+/**
+ * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
+ * @cpu: CPU of interest
+ * @perf: target performance level [0, %SCX_CPUPERF_ONE]
+ * @flags: %SCX_CPUPERF_* flags
+ *
+ * Set the target performance level of @cpu to @perf. @perf is in linear
+ * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
+ * schedutil cpufreq governor chooses the target frequency.
+ *
+ * The actual performance level chosen, CPU grouping, and the overhead and
+ * latency of the operations are dependent on the hardware and cpufreq driver in
+ * use. Consult hardware and cpufreq documentation for more information. The
+ * current performance level can be monitored using scx_bpf_cpuperf_cur().
+ */
+__bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
+{
+ if (unlikely(perf > SCX_CPUPERF_ONE)) {
+ scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
+ return;
+ }
+
+ if (ops_cpu_valid(cpu, NULL)) {
+ struct rq *rq = cpu_rq(cpu);
+
+ rq->scx.cpuperf_target = perf;
+
+ rcu_read_lock_sched_notrace();
+ cpufreq_update_util(cpu_rq(cpu), 0);
+ rcu_read_unlock_sched_notrace();
+ }
+}
+
+/**
+ * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
+ *
+ * All valid CPU IDs in the system are smaller than the returned value.
+ */
+__bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
+{
+ return nr_cpu_ids;
+}
+
+/**
+ * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
+{
+ return cpu_possible_mask;
+}
+
+/**
+ * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
+{
+ return cpu_online_mask;
+}
+
+/**
+ * scx_bpf_put_cpumask - Release a possible/online cpumask
+ * @cpumask: cpumask to release
+ */
+__bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
+{
+ /*
+ * Empty function body because we aren't actually acquiring or releasing
+ * a reference to a global cpumask, which is read-only in the caller and
+ * is never released. The acquire / release semantics here are just used
+ * to make the cpumask is a trusted pointer in the caller.
+ */
+}
+
+/**
+ * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
+ * per-CPU cpumask.
+ *
+ * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
+{
+ if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ scx_ops_error("built-in idle tracking is disabled");
+ return cpu_none_mask;
+ }
+
+#ifdef CONFIG_SMP
+ return idle_masks.cpu;
+#else
+ return cpu_none_mask;
+#endif
+}
+
+/**
+ * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
+ * per-physical-core cpumask. Can be used to determine if an entire physical
+ * core is free.
+ *
+ * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
+{
+ if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ scx_ops_error("built-in idle tracking is disabled");
+ return cpu_none_mask;
+ }
+
+#ifdef CONFIG_SMP
+ if (sched_smt_active())
+ return idle_masks.smt;
+ else
+ return idle_masks.cpu;
+#else
+ return cpu_none_mask;
+#endif
+}
+
+/**
+ * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
+ * either the percpu, or SMT idle-tracking cpumask.
+ */
+__bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
+{
+ /*
+ * Empty function body because we aren't actually acquiring or releasing
+ * a reference to a global idle cpumask, which is read-only in the
+ * caller and is never released. The acquire / release semantics here
+ * are just used to make the cpumask a trusted pointer in the caller.
+ */
+}
+
+/**
+ * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
+ * @cpu: cpu to test and clear idle for
+ *
+ * Returns %true if @cpu was idle and its idle state was successfully cleared.
+ * %false otherwise.
+ *
+ * Unavailable if ops.update_idle() is implemented and
+ * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
+ */
+__bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
+{
+ if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ scx_ops_error("built-in idle tracking is disabled");
+ return false;
+ }
+
+ if (ops_cpu_valid(cpu, NULL))
+ return test_and_clear_cpu_idle(cpu);
+ else
+ return false;
+}
+
+/**
+ * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
+ * @cpus_allowed: Allowed cpumask
+ * @flags: %SCX_PICK_IDLE_CPU_* flags
+ *
+ * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
+ * number on success. -%EBUSY if no matching cpu was found.
+ *
+ * Idle CPU tracking may race against CPU scheduling state transitions. For
+ * example, this function may return -%EBUSY as CPUs are transitioning into the
+ * idle state. If the caller then assumes that there will be dispatch events on
+ * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
+ * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
+ * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
+ * event in the near future.
+ *
+ * Unavailable if ops.update_idle() is implemented and
+ * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
+ */
+__bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
+ u64 flags)
+{
+ if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ scx_ops_error("built-in idle tracking is disabled");
+ return -EBUSY;
+ }
+
+ return scx_pick_idle_cpu(cpus_allowed, flags);
+}
+
+/**
+ * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
+ * @cpus_allowed: Allowed cpumask
+ * @flags: %SCX_PICK_IDLE_CPU_* flags
+ *
+ * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
+ * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
+ * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
+ * empty.
+ *
+ * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
+ * set, this function can't tell which CPUs are idle and will always pick any
+ * CPU.
+ */
+__bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
+ u64 flags)
+{
+ s32 cpu;
+
+ if (static_branch_likely(&scx_builtin_idle_enabled)) {
+ cpu = scx_pick_idle_cpu(cpus_allowed, flags);
+ if (cpu >= 0)
+ return cpu;
+ }
+
+ cpu = cpumask_any_distribute(cpus_allowed);
+ if (cpu < nr_cpu_ids)
+ return cpu;
+ else
+ return -EBUSY;
+}
+
+/**
+ * scx_bpf_task_running - Is task currently running?
+ * @p: task of interest
+ */
+__bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
+{
+ return task_rq(p)->curr == p;
+}
+
+/**
+ * scx_bpf_task_cpu - CPU a task is currently associated with
+ * @p: task of interest
+ */
+__bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
+{
+ return task_cpu(p);
+}
+
+/**
+ * scx_bpf_cpu_rq - Fetch the rq of a CPU
+ * @cpu: CPU of the rq
+ */
+__bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
+{
+ if (!ops_cpu_valid(cpu, NULL))
+ return NULL;
+
+ return cpu_rq(cpu);
+}
+
+/**
+ * scx_bpf_task_cgroup - Return the sched cgroup of a task
+ * @p: task of interest
+ *
+ * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
+ * from the scheduler's POV. SCX operations should use this function to
+ * determine @p's current cgroup as, unlike following @p->cgroups,
+ * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
+ * rq-locked operations. Can be called on the parameter tasks of rq-locked
+ * operations. The restriction guarantees that @p's rq is locked by the caller.
+ */
+#ifdef CONFIG_CGROUP_SCHED
+__bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
+{
+ struct task_group *tg = p->sched_task_group;
+ struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
+
+ if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
+ goto out;
+
+ /*
+ * A task_group may either be a cgroup or an autogroup. In the latter
+ * case, @tg->css.cgroup is %NULL. A task_group can't become the other
+ * kind once created.
+ */
+ if (tg && tg->css.cgroup)
+ cgrp = tg->css.cgroup;
+ else
+ cgrp = &cgrp_dfl_root.cgrp;
+out:
+ cgroup_get(cgrp);
+ return cgrp;
+}
+#endif
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(scx_kfunc_ids_any)
+BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
+BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
+BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
+BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
+BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
+BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
+BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
+BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
+BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
+BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
+BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
+BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
+#ifdef CONFIG_CGROUP_SCHED
+BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
+#endif
+BTF_KFUNCS_END(scx_kfunc_ids_any)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_any = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_any,
+};
+
+static int __init scx_init(void)
+{
+ int ret;
+
+ /*
+ * kfunc registration can't be done from init_sched_ext_class() as
+ * register_btf_kfunc_id_set() needs most of the system to be up.
+ *
+ * Some kfuncs are context-sensitive and can only be called from
+ * specific SCX ops. They are grouped into BTF sets accordingly.
+ * Unfortunately, BPF currently doesn't have a way of enforcing such
+ * restrictions. Eventually, the verifier should be able to enforce
+ * them. For now, register them the same and make each kfunc explicitly
+ * check using scx_kf_allowed().
+ */
+ if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &scx_kfunc_set_select_cpu)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &scx_kfunc_set_enqueue_dispatch)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &scx_kfunc_set_dispatch)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &scx_kfunc_set_cpu_release)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &scx_kfunc_set_unlocked)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
+ &scx_kfunc_set_unlocked)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &scx_kfunc_set_any)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
+ &scx_kfunc_set_any)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
+ &scx_kfunc_set_any))) {
+ pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
+ return ret;
+ }
+
+ ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
+ if (ret) {
+ pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
+ return ret;
+ }
+
+ ret = register_pm_notifier(&scx_pm_notifier);
+ if (ret) {
+ pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
+ return ret;
+ }
+
+ scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
+ if (!scx_kset) {
+ pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
+ return -ENOMEM;
+ }
+
+ ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
+ if (ret < 0) {
+ pr_err("sched_ext: Failed to add global attributes\n");
+ return ret;
+ }
+
+ return 0;
+}
+__initcall(scx_init);
diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
new file mode 100644
index 000000000000..246019519231
--- /dev/null
+++ b/kernel/sched/ext.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#ifdef CONFIG_SCHED_CLASS_EXT
+
+void scx_tick(struct rq *rq);
+void init_scx_entity(struct sched_ext_entity *scx);
+void scx_pre_fork(struct task_struct *p);
+int scx_fork(struct task_struct *p);
+void scx_post_fork(struct task_struct *p);
+void scx_cancel_fork(struct task_struct *p);
+bool scx_can_stop_tick(struct rq *rq);
+void scx_rq_activate(struct rq *rq);
+void scx_rq_deactivate(struct rq *rq);
+int scx_check_setscheduler(struct task_struct *p, int policy);
+bool task_should_scx(struct task_struct *p);
+void init_sched_ext_class(void);
+
+static inline u32 scx_cpuperf_target(s32 cpu)
+{
+ if (scx_enabled())
+ return cpu_rq(cpu)->scx.cpuperf_target;
+ else
+ return 0;
+}
+
+static inline bool task_on_scx(const struct task_struct *p)
+{
+ return scx_enabled() && p->sched_class == &ext_sched_class;
+}
+
+#ifdef CONFIG_SCHED_CORE
+bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
+ bool in_fi);
+#endif
+
+#else /* CONFIG_SCHED_CLASS_EXT */
+
+static inline void scx_tick(struct rq *rq) {}
+static inline void scx_pre_fork(struct task_struct *p) {}
+static inline int scx_fork(struct task_struct *p) { return 0; }
+static inline void scx_post_fork(struct task_struct *p) {}
+static inline void scx_cancel_fork(struct task_struct *p) {}
+static inline u32 scx_cpuperf_target(s32 cpu) { return 0; }
+static inline bool scx_can_stop_tick(struct rq *rq) { return true; }
+static inline void scx_rq_activate(struct rq *rq) {}
+static inline void scx_rq_deactivate(struct rq *rq) {}
+static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; }
+static inline bool task_on_scx(const struct task_struct *p) { return false; }
+static inline void init_sched_ext_class(void) {}
+
+#endif /* CONFIG_SCHED_CLASS_EXT */
+
+#if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
+void __scx_update_idle(struct rq *rq, bool idle);
+
+static inline void scx_update_idle(struct rq *rq, bool idle)
+{
+ if (scx_enabled())
+ __scx_update_idle(rq, idle);
+}
+#else
+static inline void scx_update_idle(struct rq *rq, bool idle) {}
+#endif
+
+#ifdef CONFIG_CGROUP_SCHED
+#ifdef CONFIG_EXT_GROUP_SCHED
+int scx_tg_online(struct task_group *tg);
+void scx_tg_offline(struct task_group *tg);
+int scx_cgroup_can_attach(struct cgroup_taskset *tset);
+void scx_move_task(struct task_struct *p);
+void scx_cgroup_finish_attach(void);
+void scx_cgroup_cancel_attach(struct cgroup_taskset *tset);
+void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
+void scx_group_set_idle(struct task_group *tg, bool idle);
+#else /* CONFIG_EXT_GROUP_SCHED */
+static inline int scx_tg_online(struct task_group *tg) { return 0; }
+static inline void scx_tg_offline(struct task_group *tg) {}
+static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }
+static inline void scx_move_task(struct task_struct *p) {}
+static inline void scx_cgroup_finish_attach(void) {}
+static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {}
+static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {}
+static inline void scx_group_set_idle(struct task_group *tg, bool idle) {}
+#endif /* CONFIG_EXT_GROUP_SCHED */
+#endif /* CONFIG_CGROUP_SCHED */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9057584ec06d..225b31aaee55 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -511,7 +511,7 @@ static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
static int se_is_idle(struct sched_entity *se)
{
- return 0;
+ return task_has_idle_policy(task_of(se));
}
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -779,8 +779,22 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
}
/* ensure we never gain time by being placed backwards. */
- u64_u32_store(cfs_rq->min_vruntime,
- __update_min_vruntime(cfs_rq, vruntime));
+ cfs_rq->min_vruntime = __update_min_vruntime(cfs_rq, vruntime);
+}
+
+static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq)
+{
+ struct sched_entity *root = __pick_root_entity(cfs_rq);
+ struct sched_entity *curr = cfs_rq->curr;
+ u64 min_slice = ~0ULL;
+
+ if (curr && curr->on_rq)
+ min_slice = curr->slice;
+
+ if (root)
+ min_slice = min(min_slice, root->min_slice);
+
+ return min_slice;
}
static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
@@ -799,19 +813,34 @@ static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node
}
}
+static inline void __min_slice_update(struct sched_entity *se, struct rb_node *node)
+{
+ if (node) {
+ struct sched_entity *rse = __node_2_se(node);
+ if (rse->min_slice < se->min_slice)
+ se->min_slice = rse->min_slice;
+ }
+}
+
/*
* se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
*/
static inline bool min_vruntime_update(struct sched_entity *se, bool exit)
{
u64 old_min_vruntime = se->min_vruntime;
+ u64 old_min_slice = se->min_slice;
struct rb_node *node = &se->run_node;
se->min_vruntime = se->vruntime;
__min_vruntime_update(se, node->rb_right);
__min_vruntime_update(se, node->rb_left);
- return se->min_vruntime == old_min_vruntime;
+ se->min_slice = se->slice;
+ __min_slice_update(se, node->rb_right);
+ __min_slice_update(se, node->rb_left);
+
+ return se->min_vruntime == old_min_vruntime &&
+ se->min_slice == old_min_slice;
}
RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
@@ -824,6 +853,7 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
avg_vruntime_add(cfs_rq, se);
se->min_vruntime = se->vruntime;
+ se->min_slice = se->slice;
rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
__entity_less, &min_vruntime_cb);
}
@@ -974,17 +1004,18 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
* XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
* this is probably good enough.
*/
-static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if ((s64)(se->vruntime - se->deadline) < 0)
- return;
+ return false;
/*
* For EEVDF the virtual time slope is determined by w_i (iow.
* nice) while the request time r_i is determined by
* sysctl_sched_base_slice.
*/
- se->slice = sysctl_sched_base_slice;
+ if (!se->custom_slice)
+ se->slice = sysctl_sched_base_slice;
/*
* EEVDF: vd_i = ve_i + r_i / w_i
@@ -994,10 +1025,7 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
/*
* The task has consumed its request, reschedule.
*/
- if (cfs_rq->nr_running > 1) {
- resched_curr(rq_of(cfs_rq));
- clear_buddies(cfs_rq, se);
- }
+ return true;
}
#include "pelt.h"
@@ -1135,6 +1163,38 @@ static inline void update_curr_task(struct task_struct *p, s64 delta_exec)
dl_server_update(p->dl_server, delta_exec);
}
+static inline bool did_preempt_short(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+{
+ if (!sched_feat(PREEMPT_SHORT))
+ return false;
+
+ if (curr->vlag == curr->deadline)
+ return false;
+
+ return !entity_eligible(cfs_rq, curr);
+}
+
+static inline bool do_preempt_short(struct cfs_rq *cfs_rq,
+ struct sched_entity *pse, struct sched_entity *se)
+{
+ if (!sched_feat(PREEMPT_SHORT))
+ return false;
+
+ if (pse->slice >= se->slice)
+ return false;
+
+ if (!entity_eligible(cfs_rq, pse))
+ return false;
+
+ if (entity_before(pse, se))
+ return true;
+
+ if (!entity_eligible(cfs_rq, se))
+ return true;
+
+ return false;
+}
+
/*
* Used by other classes to account runtime.
*/
@@ -1156,23 +1216,44 @@ s64 update_curr_common(struct rq *rq)
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
+ struct rq *rq = rq_of(cfs_rq);
s64 delta_exec;
+ bool resched;
if (unlikely(!curr))
return;
- delta_exec = update_curr_se(rq_of(cfs_rq), curr);
+ delta_exec = update_curr_se(rq, curr);
if (unlikely(delta_exec <= 0))
return;
curr->vruntime += calc_delta_fair(delta_exec, curr);
- update_deadline(cfs_rq, curr);
+ resched = update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);
- if (entity_is_task(curr))
- update_curr_task(task_of(curr), delta_exec);
+ if (entity_is_task(curr)) {
+ struct task_struct *p = task_of(curr);
+
+ update_curr_task(p, delta_exec);
+
+ /*
+ * Any fair task that runs outside of fair_server should
+ * account against fair_server such that it can account for
+ * this time and possibly avoid running this period.
+ */
+ if (p->dl_server != &rq->fair_server)
+ dl_server_update(&rq->fair_server, delta_exec);
+ }
account_cfs_rq_runtime(cfs_rq, delta_exec);
+
+ if (rq->nr_running == 1)
+ return;
+
+ if (resched || did_preempt_short(cfs_rq, curr)) {
+ resched_curr(rq);
+ clear_buddies(cfs_rq, curr);
+ }
}
static void update_curr_fair(struct rq *rq)
@@ -1742,7 +1823,7 @@ static bool pgdat_free_space_enough(struct pglist_data *pgdat)
continue;
if (zone_watermark_ok(zone, 0,
- wmark_pages(zone, WMARK_PROMO) + enough_wmark,
+ promo_wmark_pages(zone) + enough_wmark,
ZONE_MOVABLE, 0))
return true;
}
@@ -1840,8 +1921,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio,
* The pages in slow memory node should be migrated according
* to hot/cold instead of private/shared.
*/
- if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
- !node_is_toptier(src_nid)) {
+ if (folio_use_access_time(folio)) {
struct pglist_data *pgdat;
unsigned long rate_limit;
unsigned int latency, th, def_th;
@@ -3188,6 +3268,15 @@ static bool vma_is_accessed(struct mm_struct *mm, struct vm_area_struct *vma)
return true;
}
+ /*
+ * This vma has not been accessed for a while, and if the number
+ * the threads in the same process is low, which means no other
+ * threads can help scan this vma, force a vma scan.
+ */
+ if (READ_ONCE(mm->numa_scan_seq) >
+ (vma->numab_state->prev_scan_seq + get_nr_threads(current)))
+ return true;
+
return false;
}
@@ -3835,7 +3924,8 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
}
}
-void reweight_task(struct task_struct *p, const struct load_weight *lw)
+static void reweight_task_fair(struct rq *rq, struct task_struct *p,
+ const struct load_weight *lw)
{
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -5178,7 +5268,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
u64 vslice, vruntime = avg_vruntime(cfs_rq);
s64 lag = 0;
- se->slice = sysctl_sched_base_slice;
+ if (!se->custom_slice)
+ se->slice = sysctl_sched_base_slice;
vslice = calc_delta_fair(se->slice, se);
/*
@@ -5259,6 +5350,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se->vruntime = vruntime - lag;
+ if (sched_feat(PLACE_REL_DEADLINE) && se->rel_deadline) {
+ se->deadline += se->vruntime;
+ se->rel_deadline = 0;
+ return;
+ }
+
/*
* When joining the competition; the existing tasks will be,
* on average, halfway through their slice, as such start tasks
@@ -5279,6 +5376,9 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
static inline bool cfs_bandwidth_used(void);
static void
+requeue_delayed_entity(struct sched_entity *se);
+
+static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
bool curr = cfs_rq->curr == se;
@@ -5365,20 +5465,48 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-static void
+static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
+{
+ se->sched_delayed = 0;
+ if (sched_feat(DELAY_ZERO) && se->vlag > 0)
+ se->vlag = 0;
+}
+
+static bool
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- int action = UPDATE_TG;
+ bool sleep = flags & DEQUEUE_SLEEP;
+ update_curr(cfs_rq);
+
+ if (flags & DEQUEUE_DELAYED) {
+ SCHED_WARN_ON(!se->sched_delayed);
+ } else {
+ bool delay = sleep;
+ /*
+ * DELAY_DEQUEUE relies on spurious wakeups, special task
+ * states must not suffer spurious wakeups, excempt them.
+ */
+ if (flags & DEQUEUE_SPECIAL)
+ delay = false;
+
+ SCHED_WARN_ON(delay && se->sched_delayed);
+
+ if (sched_feat(DELAY_DEQUEUE) && delay &&
+ !entity_eligible(cfs_rq, se)) {
+ if (cfs_rq->next == se)
+ cfs_rq->next = NULL;
+ update_load_avg(cfs_rq, se, 0);
+ se->sched_delayed = 1;
+ return false;
+ }
+ }
+
+ int action = UPDATE_TG;
if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
action |= DO_DETACH;
/*
- * Update run-time statistics of the 'current'.
- */
- update_curr(cfs_rq);
-
- /*
* When dequeuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
* - For group_entity, update its runnable_weight to reflect the new
@@ -5395,6 +5523,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
clear_buddies(cfs_rq, se);
update_entity_lag(cfs_rq, se);
+ if (sched_feat(PLACE_REL_DEADLINE) && !sleep) {
+ se->deadline -= se->vruntime;
+ se->rel_deadline = 1;
+ }
+
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
@@ -5414,8 +5547,13 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
+ if (flags & DEQUEUE_DELAYED)
+ finish_delayed_dequeue_entity(se);
+
if (cfs_rq->nr_running == 0)
update_idle_cfs_rq_clock_pelt(cfs_rq);
+
+ return true;
}
static void
@@ -5441,6 +5579,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
update_stats_curr_start(cfs_rq, se);
+ SCHED_WARN_ON(cfs_rq->curr);
cfs_rq->curr = se;
/*
@@ -5461,6 +5600,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
+static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags);
+
/*
* Pick the next process, keeping these things in mind, in this order:
* 1) keep things fair between processes/task groups
@@ -5469,16 +5610,26 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
* 4) do not run the "skip" process, if something else is available
*/
static struct sched_entity *
-pick_next_entity(struct cfs_rq *cfs_rq)
+pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
{
/*
* Enabling NEXT_BUDDY will affect latency but not fairness.
*/
if (sched_feat(NEXT_BUDDY) &&
- cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
+ cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next)) {
+ /* ->next will never be delayed */
+ SCHED_WARN_ON(cfs_rq->next->sched_delayed);
return cfs_rq->next;
+ }
- return pick_eevdf(cfs_rq);
+ struct sched_entity *se = pick_eevdf(cfs_rq);
+ if (se->sched_delayed) {
+ dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
+ SCHED_WARN_ON(se->sched_delayed);
+ SCHED_WARN_ON(se->on_rq);
+ return NULL;
+ }
+ return se;
}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
@@ -5502,6 +5653,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
/* in !on_rq case, update occurred at dequeue */
update_load_avg(cfs_rq, prev, 0);
}
+ SCHED_WARN_ON(cfs_rq->curr != prev);
cfs_rq->curr = NULL;
}
@@ -5765,6 +5917,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
long task_delta, idle_task_delta, dequeue = 1;
+ long rq_h_nr_running = rq->cfs.h_nr_running;
raw_spin_lock(&cfs_b->lock);
/* This will start the period timer if necessary */
@@ -5798,11 +5951,21 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
idle_task_delta = cfs_rq->idle_h_nr_running;
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
+ int flags;
+
/* throttled entity or throttle-on-deactivate */
if (!se->on_rq)
goto done;
- dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
+ /*
+ * Abuse SPECIAL to avoid delayed dequeue in this instance.
+ * This avoids teaching dequeue_entities() about throttled
+ * entities and keeps things relatively simple.
+ */
+ flags = DEQUEUE_SLEEP | DEQUEUE_SPECIAL;
+ if (se->sched_delayed)
+ flags |= DEQUEUE_DELAYED;
+ dequeue_entity(qcfs_rq, se, flags);
if (cfs_rq_is_idle(group_cfs_rq(se)))
idle_task_delta = cfs_rq->h_nr_running;
@@ -5836,6 +5999,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
/* At this point se is NULL and we are at root level*/
sub_nr_running(rq, task_delta);
+ /* Stop the fair server if throttling resulted in no runnable tasks */
+ if (rq_h_nr_running && !rq->cfs.h_nr_running)
+ dl_server_stop(&rq->fair_server);
done:
/*
* Note: distribution will already see us throttled via the
@@ -5854,6 +6020,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
long task_delta, idle_task_delta;
+ long rq_h_nr_running = rq->cfs.h_nr_running;
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -5891,8 +6058,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
- if (se->on_rq)
+ if (se->on_rq) {
+ SCHED_WARN_ON(se->sched_delayed);
break;
+ }
enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
if (cfs_rq_is_idle(group_cfs_rq(se)))
@@ -5923,6 +6092,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
goto unthrottle_throttle;
}
+ /* Start the fair server if un-throttling resulted in new runnable tasks */
+ if (!rq_h_nr_running && rq->cfs.h_nr_running)
+ dl_server_start(&rq->fair_server);
+
/* At this point se is NULL and we are at root level*/
add_nr_running(rq, task_delta);
@@ -6555,7 +6728,7 @@ static void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p)
{
int cpu = cpu_of(rq);
- if (!sched_feat(HZ_BW) || !cfs_bandwidth_used())
+ if (!cfs_bandwidth_used())
return;
if (!tick_nohz_full_cpu(cpu))
@@ -6738,6 +6911,37 @@ static int sched_idle_cpu(int cpu)
}
#endif
+static void
+requeue_delayed_entity(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ /*
+ * se->sched_delayed should imply: se->on_rq == 1.
+ * Because a delayed entity is one that is still on
+ * the runqueue competing until elegibility.
+ */
+ SCHED_WARN_ON(!se->sched_delayed);
+ SCHED_WARN_ON(!se->on_rq);
+
+ if (sched_feat(DELAY_ZERO)) {
+ update_entity_lag(cfs_rq, se);
+ if (se->vlag > 0) {
+ cfs_rq->nr_running--;
+ if (se != cfs_rq->curr)
+ __dequeue_entity(cfs_rq, se);
+ se->vlag = 0;
+ place_entity(cfs_rq, se, 0);
+ if (se != cfs_rq->curr)
+ __enqueue_entity(cfs_rq, se);
+ cfs_rq->nr_running++;
+ }
+ }
+
+ update_load_avg(cfs_rq, se, 0);
+ se->sched_delayed = 0;
+}
+
/*
* The enqueue_task method is called before nr_running is
* increased. Here we update the fair scheduling stats and
@@ -6750,6 +6954,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct sched_entity *se = &p->se;
int idle_h_nr_running = task_has_idle_policy(p);
int task_new = !(flags & ENQUEUE_WAKEUP);
+ int rq_h_nr_running = rq->cfs.h_nr_running;
+ u64 slice = 0;
/*
* The code below (indirectly) updates schedutil which looks at
@@ -6757,7 +6963,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
* Let's add the task's estimated utilization to the cfs_rq's
* estimated utilization, before we update schedutil.
*/
- util_est_enqueue(&rq->cfs, p);
+ if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & ENQUEUE_RESTORE))))
+ util_est_enqueue(&rq->cfs, p);
+
+ if (flags & ENQUEUE_DELAYED) {
+ requeue_delayed_entity(se);
+ return;
+ }
/*
* If in_iowait is set, the code below may not trigger any cpufreq
@@ -6768,10 +6980,24 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
for_each_sched_entity(se) {
- if (se->on_rq)
+ if (se->on_rq) {
+ if (se->sched_delayed)
+ requeue_delayed_entity(se);
break;
+ }
cfs_rq = cfs_rq_of(se);
+
+ /*
+ * Basically set the slice of group entries to the min_slice of
+ * their respective cfs_rq. This ensures the group can service
+ * its entities in the desired time-frame.
+ */
+ if (slice) {
+ se->slice = slice;
+ se->custom_slice = 1;
+ }
enqueue_entity(cfs_rq, se, flags);
+ slice = cfs_rq_min_slice(cfs_rq);
cfs_rq->h_nr_running++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
@@ -6793,6 +7019,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
se_update_runnable(se);
update_cfs_group(se);
+ se->slice = slice;
+ slice = cfs_rq_min_slice(cfs_rq);
+
cfs_rq->h_nr_running++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
@@ -6804,6 +7033,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
goto enqueue_throttle;
}
+ if (!rq_h_nr_running && rq->cfs.h_nr_running) {
+ /* Account for idle runtime */
+ if (!rq->nr_running)
+ dl_server_update_idle_time(rq, rq->curr);
+ dl_server_start(&rq->fair_server);
+ }
+
/* At this point se is NULL and we are at root level*/
add_nr_running(rq, 1);
@@ -6833,36 +7069,59 @@ enqueue_throttle:
static void set_next_buddy(struct sched_entity *se);
/*
- * The dequeue_task method is called before nr_running is
- * decreased. We remove the task from the rbtree and
- * update the fair scheduling stats:
+ * Basically dequeue_task_fair(), except it can deal with dequeue_entity()
+ * failing half-way through and resume the dequeue later.
+ *
+ * Returns:
+ * -1 - dequeue delayed
+ * 0 - dequeue throttled
+ * 1 - dequeue complete
*/
-static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
{
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
- int task_sleep = flags & DEQUEUE_SLEEP;
- int idle_h_nr_running = task_has_idle_policy(p);
bool was_sched_idle = sched_idle_rq(rq);
+ int rq_h_nr_running = rq->cfs.h_nr_running;
+ bool task_sleep = flags & DEQUEUE_SLEEP;
+ bool task_delayed = flags & DEQUEUE_DELAYED;
+ struct task_struct *p = NULL;
+ int idle_h_nr_running = 0;
+ int h_nr_running = 0;
+ struct cfs_rq *cfs_rq;
+ u64 slice = 0;
- util_est_dequeue(&rq->cfs, p);
+ if (entity_is_task(se)) {
+ p = task_of(se);
+ h_nr_running = 1;
+ idle_h_nr_running = task_has_idle_policy(p);
+ } else {
+ cfs_rq = group_cfs_rq(se);
+ slice = cfs_rq_min_slice(cfs_rq);
+ }
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- dequeue_entity(cfs_rq, se, flags);
- cfs_rq->h_nr_running--;
+ if (!dequeue_entity(cfs_rq, se, flags)) {
+ if (p && &p->se == se)
+ return -1;
+
+ break;
+ }
+
+ cfs_rq->h_nr_running -= h_nr_running;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = 1;
+ idle_h_nr_running = h_nr_running;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
- goto dequeue_throttle;
+ return 0;
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) {
+ slice = cfs_rq_min_slice(cfs_rq);
+
/* Avoid re-evaluating load for this entity: */
se = parent_entity(se);
/*
@@ -6874,6 +7133,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
break;
}
flags |= DEQUEUE_SLEEP;
+ flags &= ~(DEQUEUE_DELAYED | DEQUEUE_SPECIAL);
}
for_each_sched_entity(se) {
@@ -6883,28 +7143,61 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
se_update_runnable(se);
update_cfs_group(se);
- cfs_rq->h_nr_running--;
+ se->slice = slice;
+ slice = cfs_rq_min_slice(cfs_rq);
+
+ cfs_rq->h_nr_running -= h_nr_running;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = 1;
+ idle_h_nr_running = h_nr_running;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
- goto dequeue_throttle;
-
+ return 0;
}
- /* At this point se is NULL and we are at root level*/
- sub_nr_running(rq, 1);
+ sub_nr_running(rq, h_nr_running);
+
+ if (rq_h_nr_running && !rq->cfs.h_nr_running)
+ dl_server_stop(&rq->fair_server);
/* balance early to pull high priority tasks */
if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
rq->next_balance = jiffies;
-dequeue_throttle:
- util_est_update(&rq->cfs, p, task_sleep);
+ if (p && task_delayed) {
+ SCHED_WARN_ON(!task_sleep);
+ SCHED_WARN_ON(p->on_rq != 1);
+
+ /* Fix-up what dequeue_task_fair() skipped */
+ hrtick_update(rq);
+
+ /* Fix-up what block_task() skipped. */
+ __block_task(rq, p);
+ }
+
+ return 1;
+}
+
+/*
+ * The dequeue_task method is called before nr_running is
+ * decreased. We remove the task from the rbtree and
+ * update the fair scheduling stats:
+ */
+static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+{
+ if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & DEQUEUE_SAVE))))
+ util_est_dequeue(&rq->cfs, p);
+
+ if (dequeue_entities(rq, &p->se, flags) < 0) {
+ util_est_update(&rq->cfs, p, DEQUEUE_SLEEP);
+ return false;
+ }
+
+ util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP);
hrtick_update(rq);
+ return true;
}
#ifdef CONFIG_SMP
@@ -7803,6 +8096,105 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
}
/*
+ * This function computes an effective utilization for the given CPU, to be
+ * used for frequency selection given the linear relation: f = u * f_max.
+ *
+ * The scheduler tracks the following metrics:
+ *
+ * cpu_util_{cfs,rt,dl,irq}()
+ * cpu_bw_dl()
+ *
+ * Where the cfs,rt and dl util numbers are tracked with the same metric and
+ * synchronized windows and are thus directly comparable.
+ *
+ * The cfs,rt,dl utilization are the running times measured with rq->clock_task
+ * which excludes things like IRQ and steal-time. These latter are then accrued
+ * in the IRQ utilization.
+ *
+ * The DL bandwidth number OTOH is not a measured metric but a value computed
+ * based on the task model parameters and gives the minimal utilization
+ * required to meet deadlines.
+ */
+unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
+ unsigned long *min,
+ unsigned long *max)
+{
+ unsigned long util, irq, scale;
+ struct rq *rq = cpu_rq(cpu);
+
+ scale = arch_scale_cpu_capacity(cpu);
+
+ /*
+ * Early check to see if IRQ/steal time saturates the CPU, can be
+ * because of inaccuracies in how we track these -- see
+ * update_irq_load_avg().
+ */
+ irq = cpu_util_irq(rq);
+ if (unlikely(irq >= scale)) {
+ if (min)
+ *min = scale;
+ if (max)
+ *max = scale;
+ return scale;
+ }
+
+ if (min) {
+ /*
+ * The minimum utilization returns the highest level between:
+ * - the computed DL bandwidth needed with the IRQ pressure which
+ * steals time to the deadline task.
+ * - The minimum performance requirement for CFS and/or RT.
+ */
+ *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN));
+
+ /*
+ * When an RT task is runnable and uclamp is not used, we must
+ * ensure that the task will run at maximum compute capacity.
+ */
+ if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt))
+ *min = max(*min, scale);
+ }
+
+ /*
+ * Because the time spend on RT/DL tasks is visible as 'lost' time to
+ * CFS tasks and we use the same metric to track the effective
+ * utilization (PELT windows are synchronized) we can directly add them
+ * to obtain the CPU's actual utilization.
+ */
+ util = util_cfs + cpu_util_rt(rq);
+ util += cpu_util_dl(rq);
+
+ /*
+ * The maximum hint is a soft bandwidth requirement, which can be lower
+ * than the actual utilization because of uclamp_max requirements.
+ */
+ if (max)
+ *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX));
+
+ if (util >= scale)
+ return scale;
+
+ /*
+ * There is still idle time; further improve the number by using the
+ * IRQ metric. Because IRQ/steal time is hidden from the task clock we
+ * need to scale the task numbers:
+ *
+ * max - irq
+ * U' = irq + --------- * U
+ * max
+ */
+ util = scale_irq_capacity(util, irq, scale);
+ util += irq;
+
+ return min(scale, util);
+}
+
+unsigned long sched_cpu_util(int cpu)
+{
+ return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL);
+}
+
+/*
* energy_env - Utilization landscape for energy estimation.
* @task_busy_time: Utilization contribution by the task for which we test the
* placement. Given by eenv_task_busy_time().
@@ -8286,7 +8678,21 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
static void task_dead_fair(struct task_struct *p)
{
- remove_entity_load_avg(&p->se);
+ struct sched_entity *se = &p->se;
+
+ if (se->sched_delayed) {
+ struct rq_flags rf;
+ struct rq *rq;
+
+ rq = task_rq_lock(p, &rf);
+ if (se->sched_delayed) {
+ update_rq_clock(rq);
+ dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
+ }
+ task_rq_unlock(rq, p, &rf);
+ }
+
+ remove_entity_load_avg(se);
}
/*
@@ -8322,7 +8728,7 @@ static void set_cpus_allowed_fair(struct task_struct *p, struct affinity_context
static int
balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
- if (rq->nr_running)
+ if (sched_fair_runnable(rq))
return 1;
return sched_balance_newidle(rq, rf) != 0;
@@ -8381,16 +8787,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
if (test_tsk_need_resched(curr))
return;
- /* Idle tasks are by definition preempted by non-idle tasks. */
- if (unlikely(task_has_idle_policy(curr)) &&
- likely(!task_has_idle_policy(p)))
- goto preempt;
-
- /*
- * Batch and idle tasks do not preempt non-idle tasks (their preemption
- * is driven by the tick):
- */
- if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
+ if (!sched_feat(WAKEUP_PREEMPTION))
return;
find_matching_se(&se, &pse);
@@ -8400,7 +8797,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
pse_is_idle = se_is_idle(pse);
/*
- * Preempt an idle group in favor of a non-idle group (and don't preempt
+ * Preempt an idle entity in favor of a non-idle entity (and don't preempt
* in the inverse case).
*/
if (cse_is_idle && !pse_is_idle)
@@ -8408,11 +8805,26 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
if (cse_is_idle != pse_is_idle)
return;
+ /*
+ * BATCH and IDLE tasks do not preempt others.
+ */
+ if (unlikely(!normal_policy(p->policy)))
+ return;
+
cfs_rq = cfs_rq_of(se);
update_curr(cfs_rq);
+ /*
+ * If @p has a shorter slice than current and @p is eligible, override
+ * current's slice protection in order to allow preemption.
+ *
+ * Note that even if @p does not turn out to be the most eligible
+ * task at this moment, current's slice protection will be lost.
+ */
+ if (do_preempt_short(cfs_rq, pse, se) && se->vlag == se->deadline)
+ se->vlag = se->deadline + 1;
/*
- * XXX pick_eevdf(cfs_rq) != se ?
+ * If @p has become the most eligible task, force preemption.
*/
if (pick_eevdf(cfs_rq) == pse)
goto preempt;
@@ -8423,7 +8835,6 @@ preempt:
resched_curr(rq);
}
-#ifdef CONFIG_SMP
static struct task_struct *pick_task_fair(struct rq *rq)
{
struct sched_entity *se;
@@ -8435,95 +8846,58 @@ again:
return NULL;
do {
- struct sched_entity *curr = cfs_rq->curr;
-
- /* When we pick for a remote RQ, we'll not have done put_prev_entity() */
- if (curr) {
- if (curr->on_rq)
- update_curr(cfs_rq);
- else
- curr = NULL;
+ /* Might not have done put_prev_entity() */
+ if (cfs_rq->curr && cfs_rq->curr->on_rq)
+ update_curr(cfs_rq);
- if (unlikely(check_cfs_rq_runtime(cfs_rq)))
- goto again;
- }
+ if (unlikely(check_cfs_rq_runtime(cfs_rq)))
+ goto again;
- se = pick_next_entity(cfs_rq);
+ se = pick_next_entity(rq, cfs_rq);
+ if (!se)
+ goto again;
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
return task_of(se);
}
-#endif
+
+static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first);
+static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first);
struct task_struct *
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
- struct cfs_rq *cfs_rq = &rq->cfs;
struct sched_entity *se;
struct task_struct *p;
int new_tasks;
again:
- if (!sched_fair_runnable(rq))
+ p = pick_task_fair(rq);
+ if (!p)
goto idle;
+ se = &p->se;
#ifdef CONFIG_FAIR_GROUP_SCHED
- if (!prev || prev->sched_class != &fair_sched_class)
+ if (prev->sched_class != &fair_sched_class)
goto simple;
+ __put_prev_set_next_dl_server(rq, prev, p);
+
/*
* Because of the set_next_buddy() in dequeue_task_fair() it is rather
* likely that a next task is from the same cgroup as the current.
*
* Therefore attempt to avoid putting and setting the entire cgroup
* hierarchy, only change the part that actually changes.
- */
-
- do {
- struct sched_entity *curr = cfs_rq->curr;
-
- /*
- * Since we got here without doing put_prev_entity() we also
- * have to consider cfs_rq->curr. If it is still a runnable
- * entity, update_curr() will update its vruntime, otherwise
- * forget we've ever seen it.
- */
- if (curr) {
- if (curr->on_rq)
- update_curr(cfs_rq);
- else
- curr = NULL;
-
- /*
- * This call to check_cfs_rq_runtime() will do the
- * throttle and dequeue its entity in the parent(s).
- * Therefore the nr_running test will indeed
- * be correct.
- */
- if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
- cfs_rq = &rq->cfs;
-
- if (!cfs_rq->nr_running)
- goto idle;
-
- goto simple;
- }
- }
-
- se = pick_next_entity(cfs_rq);
- cfs_rq = group_cfs_rq(se);
- } while (cfs_rq);
-
- p = task_of(se);
-
- /*
+ *
* Since we haven't yet done put_prev_entity and if the selected task
* is a different task than we started out with, try and touch the
* least amount of cfs_rqs.
*/
if (prev != p) {
struct sched_entity *pse = &prev->se;
+ struct cfs_rq *cfs_rq;
while (!(cfs_rq = is_same_group(se, pse))) {
int se_depth = se->depth;
@@ -8541,38 +8915,15 @@ again:
put_prev_entity(cfs_rq, pse);
set_next_entity(cfs_rq, se);
- }
-
- goto done;
-simple:
-#endif
- if (prev)
- put_prev_task(rq, prev);
- do {
- se = pick_next_entity(cfs_rq);
- set_next_entity(cfs_rq, se);
- cfs_rq = group_cfs_rq(se);
- } while (cfs_rq);
+ __set_next_task_fair(rq, p, true);
+ }
- p = task_of(se);
+ return p;
-done: __maybe_unused;
-#ifdef CONFIG_SMP
- /*
- * Move the next running task to the front of
- * the list, so our cfs_tasks list becomes MRU
- * one.
- */
- list_move(&p->se.group_node, &rq->cfs_tasks);
+simple:
#endif
-
- if (hrtick_enabled_fair(rq))
- hrtick_start_fair(rq, p);
-
- update_misfit_status(p, rq);
- sched_fair_update_stop_tick(rq, p);
-
+ put_prev_set_next_task(rq, prev, p);
return p;
idle:
@@ -8601,15 +8952,34 @@ idle:
return NULL;
}
-static struct task_struct *__pick_next_task_fair(struct rq *rq)
+static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_struct *prev)
+{
+ return pick_next_task_fair(rq, prev, NULL);
+}
+
+static bool fair_server_has_tasks(struct sched_dl_entity *dl_se)
{
- return pick_next_task_fair(rq, NULL, NULL);
+ return !!dl_se->rq->cfs.nr_running;
+}
+
+static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se)
+{
+ return pick_task_fair(dl_se->rq);
+}
+
+void fair_server_init(struct rq *rq)
+{
+ struct sched_dl_entity *dl_se = &rq->fair_server;
+
+ init_dl_entity(dl_se);
+
+ dl_server_init(dl_se, rq, fair_server_has_tasks, fair_server_pick_task);
}
/*
* Account for a descheduled task:
*/
-static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
+static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{
struct sched_entity *se = &prev->se;
struct cfs_rq *cfs_rq;
@@ -9347,28 +9717,18 @@ static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {
static bool __update_blocked_others(struct rq *rq, bool *done)
{
- const struct sched_class *curr_class;
- u64 now = rq_clock_pelt(rq);
- unsigned long hw_pressure;
- bool decayed;
+ bool updated;
/*
* update_load_avg() can call cpufreq_update_util(). Make sure that RT,
* DL and IRQ signals have been updated before updating CFS.
*/
- curr_class = rq->curr->sched_class;
-
- hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
-
- decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
- update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
- update_hw_load_avg(now, rq, hw_pressure) |
- update_irq_load_avg(rq, 0);
+ updated = update_other_load_avgs(rq);
if (others_have_blocked(rq))
*done = false;
- return decayed;
+ return updated;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -12483,7 +12843,7 @@ out:
* - indirectly from a remote scheduler_tick() for NOHZ idle balancing
* through the SMP cross-call nohz_csd_func()
*/
-static __latent_entropy void sched_balance_softirq(struct softirq_action *h)
+static __latent_entropy void sched_balance_softirq(void)
{
struct rq *this_rq = this_rq();
enum cpu_idle_type idle = this_rq->idle_balance;
@@ -12702,22 +13062,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
*/
static void task_fork_fair(struct task_struct *p)
{
- struct sched_entity *se = &p->se, *curr;
- struct cfs_rq *cfs_rq;
- struct rq *rq = this_rq();
- struct rq_flags rf;
-
- rq_lock(rq, &rf);
- update_rq_clock(rq);
-
set_task_max_allowed_capacity(p);
-
- cfs_rq = task_cfs_rq(current);
- curr = cfs_rq->curr;
- if (curr)
- update_curr(cfs_rq);
- place_entity(cfs_rq, se, ENQUEUE_INITIAL);
- rq_unlock(rq, &rf);
}
/*
@@ -12829,10 +13174,28 @@ static void attach_task_cfs_rq(struct task_struct *p)
static void switched_from_fair(struct rq *rq, struct task_struct *p)
{
detach_task_cfs_rq(p);
+ /*
+ * Since this is called after changing class, this is a little weird
+ * and we cannot use DEQUEUE_DELAYED.
+ */
+ if (p->se.sched_delayed) {
+ /* First, dequeue it from its new class' structures */
+ dequeue_task(rq, p, DEQUEUE_NOCLOCK | DEQUEUE_SLEEP);
+ /*
+ * Now, clean up the fair_sched_class side of things
+ * related to sched_delayed being true and that wasn't done
+ * due to the generic dequeue not using DEQUEUE_DELAYED.
+ */
+ finish_delayed_dequeue_entity(&p->se);
+ p->se.rel_deadline = 0;
+ __block_task(rq, p);
+ }
}
static void switched_to_fair(struct rq *rq, struct task_struct *p)
{
+ SCHED_WARN_ON(p->se.sched_delayed);
+
attach_task_cfs_rq(p);
set_task_max_allowed_capacity(p);
@@ -12850,12 +13213,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
}
}
-/* Account for a task changing its policy or group.
- *
- * This routine is mostly called to set cfs_rq->curr field when a task
- * migrates between groups/classes.
- */
-static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
{
struct sched_entity *se = &p->se;
@@ -12868,6 +13226,27 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
list_move(&se->group_node, &rq->cfs_tasks);
}
#endif
+ if (!first)
+ return;
+
+ SCHED_WARN_ON(se->sched_delayed);
+
+ if (hrtick_enabled_fair(rq))
+ hrtick_start_fair(rq, p);
+
+ update_misfit_status(p, rq);
+ sched_fair_update_stop_tick(rq, p);
+}
+
+/*
+ * Account for a task changing its policy or group.
+ *
+ * This routine is mostly called to set cfs_rq->curr field when a task
+ * migrates between groups/classes.
+ */
+static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+{
+ struct sched_entity *se = &p->se;
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -12876,12 +13255,14 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
/* ensure bandwidth has been allocated on our new cfs_rq */
account_cfs_rq_runtime(cfs_rq, 0);
}
+
+ __set_next_task_fair(rq, p, first);
}
void init_cfs_rq(struct cfs_rq *cfs_rq)
{
cfs_rq->tasks_timeline = RB_ROOT_CACHED;
- u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20)));
+ cfs_rq->min_vruntime = (u64)(-(1LL << 20));
#ifdef CONFIG_SMP
raw_spin_lock_init(&cfs_rq->removed.lock);
#endif
@@ -12983,28 +13364,35 @@ void online_fair_sched_group(struct task_group *tg)
void unregister_fair_sched_group(struct task_group *tg)
{
- unsigned long flags;
- struct rq *rq;
int cpu;
destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
for_each_possible_cpu(cpu) {
- if (tg->se[cpu])
- remove_entity_load_avg(tg->se[cpu]);
+ struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
+ struct sched_entity *se = tg->se[cpu];
+ struct rq *rq = cpu_rq(cpu);
+
+ if (se) {
+ if (se->sched_delayed) {
+ guard(rq_lock_irqsave)(rq);
+ if (se->sched_delayed) {
+ update_rq_clock(rq);
+ dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
+ }
+ list_del_leaf_cfs_rq(cfs_rq);
+ }
+ remove_entity_load_avg(se);
+ }
/*
* Only empty task groups can be destroyed; so we can speculatively
* check on_list without danger of it being re-added.
*/
- if (!tg->cfs_rq[cpu]->on_list)
- continue;
-
- rq = cpu_rq(cpu);
-
- raw_spin_rq_lock_irqsave(rq, flags);
- list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
- raw_spin_rq_unlock_irqrestore(rq, flags);
+ if (cfs_rq->on_list) {
+ guard(rq_lock_irqsave)(rq);
+ list_del_leaf_cfs_rq(cfs_rq);
+ }
}
}
@@ -13194,13 +13582,13 @@ DEFINE_SCHED_CLASS(fair) = {
.wakeup_preempt = check_preempt_wakeup_fair,
+ .pick_task = pick_task_fair,
.pick_next_task = __pick_next_task_fair,
.put_prev_task = put_prev_task_fair,
.set_next_task = set_next_task_fair,
#ifdef CONFIG_SMP
.balance = balance_fair,
- .pick_task = pick_task_fair,
.select_task_rq = select_task_rq_fair,
.migrate_task_rq = migrate_task_rq_fair,
@@ -13214,6 +13602,7 @@ DEFINE_SCHED_CLASS(fair) = {
.task_tick = task_tick_fair,
.task_fork = task_fork_fair,
+ .reweight_task = reweight_task_fair,
.prio_changed = prio_changed_fair,
.switched_from = switched_from_fair,
.switched_to = switched_to_fair,
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 143f55df890b..290874079f60 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -5,8 +5,24 @@
* sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled.
*/
SCHED_FEAT(PLACE_LAG, true)
+/*
+ * Give new tasks half a slice to ease into the competition.
+ */
SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
+/*
+ * Preserve relative virtual deadline on 'migration'.
+ */
+SCHED_FEAT(PLACE_REL_DEADLINE, true)
+/*
+ * Inhibit (wakeup) preemption until the current task has either matched the
+ * 0-lag point or until is has exhausted it's slice.
+ */
SCHED_FEAT(RUN_TO_PARITY, true)
+/*
+ * Allow wakeup of tasks with a shorter slice to cancel RESPECT_SLICE for
+ * current.
+ */
+SCHED_FEAT(PREEMPT_SHORT, true)
/*
* Prefer to schedule the task we woke last (assuming it failed
@@ -22,6 +38,18 @@ SCHED_FEAT(NEXT_BUDDY, false)
SCHED_FEAT(CACHE_HOT_BUDDY, true)
/*
+ * Delay dequeueing tasks until they get selected or woken.
+ *
+ * By delaying the dequeue for non-eligible tasks, they remain in the
+ * competition and can burn off their negative lag. When they get selected
+ * they'll have positive lag by definition.
+ *
+ * DELAY_ZERO clips the lag on dequeue (or wakeup) to 0.
+ */
+SCHED_FEAT(DELAY_DEQUEUE, true)
+SCHED_FEAT(DELAY_ZERO, true)
+
+/*
* Allow wakeup-time preemption of the current task:
*/
SCHED_FEAT(WAKEUP_PREEMPTION, true)
@@ -85,5 +113,3 @@ SCHED_FEAT(WA_BIAS, true)
SCHED_FEAT(UTIL_EST, true)
SCHED_FEAT(LATENCY_WARN, false)
-
-SCHED_FEAT(HZ_BW, true)
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 6e78d071beb5..d2f096bb274c 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -450,43 +450,37 @@ static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
resched_curr(rq);
}
-static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
+static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{
+ dl_server_update_idle_time(rq, prev);
+ scx_update_idle(rq, false);
}
static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
{
update_idle_core(rq);
+ scx_update_idle(rq, true);
schedstat_inc(rq->sched_goidle);
+ next->se.exec_start = rq_clock_task(rq);
}
-#ifdef CONFIG_SMP
-static struct task_struct *pick_task_idle(struct rq *rq)
+struct task_struct *pick_task_idle(struct rq *rq)
{
return rq->idle;
}
-#endif
-
-struct task_struct *pick_next_task_idle(struct rq *rq)
-{
- struct task_struct *next = rq->idle;
-
- set_next_task_idle(rq, next, true);
-
- return next;
-}
/*
* It is not legal to sleep in the idle task - print a warning
* message if some code attempts to do it:
*/
-static void
+static bool
dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
{
raw_spin_rq_unlock_irq(rq);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
raw_spin_rq_lock_irq(rq);
+ return true;
}
/*
@@ -528,13 +522,12 @@ DEFINE_SCHED_CLASS(idle) = {
.wakeup_preempt = wakeup_preempt_idle,
- .pick_next_task = pick_next_task_idle,
+ .pick_task = pick_task_idle,
.put_prev_task = put_prev_task_idle,
.set_next_task = set_next_task_idle,
#ifdef CONFIG_SMP
.balance = balance_idle,
- .pick_task = pick_task_idle,
.select_task_rq = select_task_rq_idle,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index fa52906a4478..a9c65d97b3ca 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -467,3 +467,23 @@ int update_irq_load_avg(struct rq *rq, u64 running)
return ret;
}
#endif
+
+/*
+ * Load avg and utiliztion metrics need to be updated periodically and before
+ * consumption. This function updates the metrics for all subsystems except for
+ * the fair class. @rq must be locked and have its clock updated.
+ */
+bool update_other_load_avgs(struct rq *rq)
+{
+ u64 now = rq_clock_pelt(rq);
+ const struct sched_class *curr_class = rq->curr->sched_class;
+ unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
+
+ lockdep_assert_rq_held(rq);
+
+ /* hw_pressure doesn't care about invariance */
+ return update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
+ update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
+ update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure) |
+ update_irq_load_avg(rq, 0);
+}
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index 2150062949d4..f4f6a0875c66 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -6,6 +6,7 @@ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se
int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
+bool update_other_load_avgs(struct rq *rq);
#ifdef CONFIG_SCHED_HW_PRESSURE
int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 310523c1b9e3..172c588de542 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -8,10 +8,6 @@ int sched_rr_timeslice = RR_TIMESLICE;
/* More than 4 hours if BW_SHIFT equals 20. */
static const u64 max_rt_runtime = MAX_BW;
-static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
-
-struct rt_bandwidth def_rt_bandwidth;
-
/*
* period over which we measure -rt task CPU usage in us.
* default: 1s
@@ -66,6 +62,40 @@ static int __init sched_rt_sysctl_init(void)
late_initcall(sched_rt_sysctl_init);
#endif
+void init_rt_rq(struct rt_rq *rt_rq)
+{
+ struct rt_prio_array *array;
+ int i;
+
+ array = &rt_rq->active;
+ for (i = 0; i < MAX_RT_PRIO; i++) {
+ INIT_LIST_HEAD(array->queue + i);
+ __clear_bit(i, array->bitmap);
+ }
+ /* delimiter for bitsearch: */
+ __set_bit(MAX_RT_PRIO, array->bitmap);
+
+#if defined CONFIG_SMP
+ rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
+ rt_rq->highest_prio.next = MAX_RT_PRIO-1;
+ rt_rq->overloaded = 0;
+ plist_head_init(&rt_rq->pushable_tasks);
+#endif /* CONFIG_SMP */
+ /* We start is dequeued state, because no RT tasks are queued */
+ rt_rq->rt_queued = 0;
+
+#ifdef CONFIG_RT_GROUP_SCHED
+ rt_rq->rt_time = 0;
+ rt_rq->rt_throttled = 0;
+ rt_rq->rt_runtime = 0;
+ raw_spin_lock_init(&rt_rq->rt_runtime_lock);
+#endif
+}
+
+#ifdef CONFIG_RT_GROUP_SCHED
+
+static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
+
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
struct rt_bandwidth *rt_b =
@@ -130,35 +160,6 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
do_start_rt_bandwidth(rt_b);
}
-void init_rt_rq(struct rt_rq *rt_rq)
-{
- struct rt_prio_array *array;
- int i;
-
- array = &rt_rq->active;
- for (i = 0; i < MAX_RT_PRIO; i++) {
- INIT_LIST_HEAD(array->queue + i);
- __clear_bit(i, array->bitmap);
- }
- /* delimiter for bit-search: */
- __set_bit(MAX_RT_PRIO, array->bitmap);
-
-#if defined CONFIG_SMP
- rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
- rt_rq->highest_prio.next = MAX_RT_PRIO-1;
- rt_rq->overloaded = 0;
- plist_head_init(&rt_rq->pushable_tasks);
-#endif /* CONFIG_SMP */
- /* We start is dequeued state, because no RT tasks are queued */
- rt_rq->rt_queued = 0;
-
- rt_rq->rt_time = 0;
- rt_rq->rt_throttled = 0;
- rt_rq->rt_runtime = 0;
- raw_spin_lock_init(&rt_rq->rt_runtime_lock);
-}
-
-#ifdef CONFIG_RT_GROUP_SCHED
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
{
hrtimer_cancel(&rt_b->rt_period_timer);
@@ -195,7 +196,6 @@ void unregister_rt_sched_group(struct task_group *tg)
{
if (tg->rt_se)
destroy_rt_bandwidth(&tg->rt_bandwidth);
-
}
void free_rt_sched_group(struct task_group *tg)
@@ -253,8 +253,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
if (!tg->rt_se)
goto err;
- init_rt_bandwidth(&tg->rt_bandwidth,
- ktime_to_ns(def_rt_bandwidth.rt_period), 0);
+ init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(global_rt_period()), 0);
for_each_possible_cpu(i) {
rt_rq = kzalloc_node(sizeof(struct rt_rq),
@@ -604,70 +603,6 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
return &rt_rq->tg->rt_bandwidth;
}
-#else /* !CONFIG_RT_GROUP_SCHED */
-
-static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
-{
- return rt_rq->rt_runtime;
-}
-
-static inline u64 sched_rt_period(struct rt_rq *rt_rq)
-{
- return ktime_to_ns(def_rt_bandwidth.rt_period);
-}
-
-typedef struct rt_rq *rt_rq_iter_t;
-
-#define for_each_rt_rq(rt_rq, iter, rq) \
- for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
-
-#define for_each_sched_rt_entity(rt_se) \
- for (; rt_se; rt_se = NULL)
-
-static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
-{
- return NULL;
-}
-
-static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
-{
- struct rq *rq = rq_of_rt_rq(rt_rq);
-
- if (!rt_rq->rt_nr_running)
- return;
-
- enqueue_top_rt_rq(rt_rq);
- resched_curr(rq);
-}
-
-static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
-{
- dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
-}
-
-static inline int rt_rq_throttled(struct rt_rq *rt_rq)
-{
- return rt_rq->rt_throttled;
-}
-
-static inline const struct cpumask *sched_rt_period_mask(void)
-{
- return cpu_online_mask;
-}
-
-static inline
-struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
-{
- return &cpu_rq(cpu)->rt;
-}
-
-static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
-{
- return &def_rt_bandwidth;
-}
-
-#endif /* CONFIG_RT_GROUP_SCHED */
-
bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
{
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
@@ -859,7 +794,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
const struct cpumask *span;
span = sched_rt_period_mask();
-#ifdef CONFIG_RT_GROUP_SCHED
+
/*
* FIXME: isolated CPUs should really leave the root task group,
* whether they are isolcpus or were isolated via cpusets, lest
@@ -871,7 +806,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
*/
if (rt_b == &root_task_group.rt_bandwidth)
span = cpu_online_mask;
-#endif
+
for_each_cpu(i, span) {
int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
@@ -938,18 +873,6 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
return idle;
}
-static inline int rt_se_prio(struct sched_rt_entity *rt_se)
-{
-#ifdef CONFIG_RT_GROUP_SCHED
- struct rt_rq *rt_rq = group_rt_rq(rt_se);
-
- if (rt_rq)
- return rt_rq->highest_prio.curr;
-#endif
-
- return rt_task_of(rt_se)->prio;
-}
-
static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
{
u64 runtime = sched_rt_runtime(rt_rq);
@@ -993,6 +916,72 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
return 0;
}
+#else /* !CONFIG_RT_GROUP_SCHED */
+
+typedef struct rt_rq *rt_rq_iter_t;
+
+#define for_each_rt_rq(rt_rq, iter, rq) \
+ for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
+
+#define for_each_sched_rt_entity(rt_se) \
+ for (; rt_se; rt_se = NULL)
+
+static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
+{
+ return NULL;
+}
+
+static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
+{
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
+ if (!rt_rq->rt_nr_running)
+ return;
+
+ enqueue_top_rt_rq(rt_rq);
+ resched_curr(rq);
+}
+
+static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
+{
+ dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
+}
+
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+ return false;
+}
+
+static inline const struct cpumask *sched_rt_period_mask(void)
+{
+ return cpu_online_mask;
+}
+
+static inline
+struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+{
+ return &cpu_rq(cpu)->rt;
+}
+
+#ifdef CONFIG_SMP
+static void __enable_runtime(struct rq *rq) { }
+static void __disable_runtime(struct rq *rq) { }
+#endif
+
+#endif /* CONFIG_RT_GROUP_SCHED */
+
+static inline int rt_se_prio(struct sched_rt_entity *rt_se)
+{
+#ifdef CONFIG_RT_GROUP_SCHED
+ struct rt_rq *rt_rq = group_rt_rq(rt_se);
+
+ if (rt_rq)
+ return rt_rq->highest_prio.curr;
+#endif
+
+ return rt_task_of(rt_se)->prio;
+}
+
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
@@ -1000,7 +989,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
static void update_curr_rt(struct rq *rq)
{
struct task_struct *curr = rq->curr;
- struct sched_rt_entity *rt_se = &curr->rt;
s64 delta_exec;
if (curr->sched_class != &rt_sched_class)
@@ -1010,6 +998,9 @@ static void update_curr_rt(struct rq *rq)
if (unlikely(delta_exec <= 0))
return;
+#ifdef CONFIG_RT_GROUP_SCHED
+ struct sched_rt_entity *rt_se = &curr->rt;
+
if (!rt_bandwidth_enabled())
return;
@@ -1028,6 +1019,7 @@ static void update_curr_rt(struct rq *rq)
do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
}
}
+#endif
}
static void
@@ -1184,7 +1176,6 @@ dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
- start_rt_bandwidth(&def_rt_bandwidth);
}
static inline
@@ -1492,7 +1483,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
enqueue_pushable_task(rq, p);
}
-static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
+static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
@@ -1500,6 +1491,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
dequeue_rt_entity(rt_se, flags);
dequeue_pushable_task(rq, p);
+
+ return true;
}
/*
@@ -1755,17 +1748,7 @@ static struct task_struct *pick_task_rt(struct rq *rq)
return p;
}
-static struct task_struct *pick_next_task_rt(struct rq *rq)
-{
- struct task_struct *p = pick_task_rt(rq);
-
- if (p)
- set_next_task_rt(rq, p, true);
-
- return p;
-}
-
-static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
+static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next)
{
struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq = &rq->rt;
@@ -2652,13 +2635,12 @@ DEFINE_SCHED_CLASS(rt) = {
.wakeup_preempt = wakeup_preempt_rt,
- .pick_next_task = pick_next_task_rt,
+ .pick_task = pick_task_rt,
.put_prev_task = put_prev_task_rt,
.set_next_task = set_next_task_rt,
#ifdef CONFIG_SMP
.balance = balance_rt,
- .pick_task = pick_task_rt,
.select_task_rq = select_task_rq_rt,
.set_cpus_allowed = set_cpus_allowed_common,
.rq_online = rq_online_rt,
@@ -2912,19 +2894,6 @@ int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
#ifdef CONFIG_SYSCTL
static int sched_rt_global_constraints(void)
{
- unsigned long flags;
- int i;
-
- raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
- for_each_possible_cpu(i) {
- struct rt_rq *rt_rq = &cpu_rq(i)->rt;
-
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- rt_rq->rt_runtime = global_rt_runtime();
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- }
- raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
-
return 0;
}
#endif /* CONFIG_SYSCTL */
@@ -2944,12 +2913,6 @@ static int sched_rt_global_validate(void)
static void sched_rt_do_global(void)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
- def_rt_bandwidth.rt_runtime = global_rt_runtime();
- def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
- raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
}
static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4c36cc680361..b1c3588a8f00 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -68,6 +68,7 @@
#include <linux/wait_api.h>
#include <linux/wait_bit.h>
#include <linux/workqueue_api.h>
+#include <linux/delayacct.h>
#include <trace/events/power.h>
#include <trace/events/sched.h>
@@ -192,9 +193,18 @@ static inline int idle_policy(int policy)
return policy == SCHED_IDLE;
}
+static inline int normal_policy(int policy)
+{
+#ifdef CONFIG_SCHED_CLASS_EXT
+ if (policy == SCHED_EXT)
+ return true;
+#endif
+ return policy == SCHED_NORMAL;
+}
+
static inline int fair_policy(int policy)
{
- return policy == SCHED_NORMAL || policy == SCHED_BATCH;
+ return normal_policy(policy) || policy == SCHED_BATCH;
}
static inline int rt_policy(int policy)
@@ -245,6 +255,24 @@ static inline void update_avg(u64 *avg, u64 sample)
(val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
/*
+ * cgroup weight knobs should use the common MIN, DFL and MAX values which are
+ * 1, 100 and 10000 respectively. While it loses a bit of range on both ends, it
+ * maps pretty well onto the shares value used by scheduler and the round-trip
+ * conversions preserve the original value over the entire range.
+ */
+static inline unsigned long sched_weight_from_cgroup(unsigned long cgrp_weight)
+{
+ return DIV_ROUND_CLOSEST_ULL(cgrp_weight * 1024, CGROUP_WEIGHT_DFL);
+}
+
+static inline unsigned long sched_weight_to_cgroup(unsigned long weight)
+{
+ return clamp_t(unsigned long,
+ DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024),
+ CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
+}
+
+/*
* !! For sched_setattr_nocheck() (kernel) only !!
*
* This is actually gross. :(
@@ -335,7 +363,7 @@ extern bool __checkparam_dl(const struct sched_attr *attr);
extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int dl_bw_check_overflow(int cpu);
-
+extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec);
/*
* SCHED_DEADLINE supports servers (nested scheduling) with the following
* interface:
@@ -361,7 +389,14 @@ extern void dl_server_start(struct sched_dl_entity *dl_se);
extern void dl_server_stop(struct sched_dl_entity *dl_se);
extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
dl_server_has_tasks_f has_tasks,
- dl_server_pick_f pick);
+ dl_server_pick_f pick_task);
+
+extern void dl_server_update_idle_time(struct rq *rq,
+ struct task_struct *p);
+extern void fair_server_init(struct rq *rq);
+extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq);
+extern int dl_server_apply_params(struct sched_dl_entity *dl_se,
+ u64 runtime, u64 period, bool init);
#ifdef CONFIG_CGROUP_SCHED
@@ -397,16 +432,17 @@ struct cfs_bandwidth {
struct task_group {
struct cgroup_subsys_state css;
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
+ /* A positive value indicates that this is a SCHED_IDLE group. */
+ int idle;
+#endif
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/* schedulable entities of this group on each CPU */
struct sched_entity **se;
/* runqueue "owned" by this group on each CPU */
struct cfs_rq **cfs_rq;
unsigned long shares;
-
- /* A positive value indicates that this is a SCHED_IDLE group. */
- int idle;
-
#ifdef CONFIG_SMP
/*
* load_avg can be heavily contended at clock tick time, so put
@@ -424,6 +460,11 @@ struct task_group {
struct rt_bandwidth rt_bandwidth;
#endif
+#ifdef CONFIG_EXT_GROUP_SCHED
+ u32 scx_flags; /* SCX_TG_* */
+ u32 scx_weight;
+#endif
+
struct rcu_head rcu;
struct list_head list;
@@ -448,7 +489,7 @@ struct task_group {
};
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
/*
@@ -479,6 +520,11 @@ static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
return walk_tg_tree_from(&root_task_group, down, up, data);
}
+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct task_group, css) : NULL;
+}
+
extern int tg_nop(struct task_group *tg, void *data);
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -535,6 +581,9 @@ extern void set_task_rq_fair(struct sched_entity *se,
static inline void set_task_rq_fair(struct sched_entity *se,
struct cfs_rq *prev, struct cfs_rq *next) { }
#endif /* CONFIG_SMP */
+#else /* !CONFIG_FAIR_GROUP_SCHED */
+static inline int sched_group_set_shares(struct task_group *tg, unsigned long shares) { return 0; }
+static inline int sched_group_set_idle(struct task_group *tg, long idle) { return 0; }
#endif /* CONFIG_FAIR_GROUP_SCHED */
#else /* CONFIG_CGROUP_SCHED */
@@ -588,6 +637,11 @@ do { \
# define u64_u32_load(var) u64_u32_load_copy(var, var##_copy)
# define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val)
+struct balance_callback {
+ struct balance_callback *next;
+ void (*func)(struct rq *rq);
+};
+
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct load_weight load;
@@ -599,17 +653,12 @@ struct cfs_rq {
s64 avg_vruntime;
u64 avg_load;
- u64 exec_clock;
u64 min_vruntime;
#ifdef CONFIG_SCHED_CORE
unsigned int forceidle_seq;
u64 min_vruntime_fi;
#endif
-#ifndef CONFIG_64BIT
- u64 min_vruntime_copy;
-#endif
-
struct rb_root_cached tasks_timeline;
/*
@@ -619,10 +668,6 @@ struct cfs_rq {
struct sched_entity *curr;
struct sched_entity *next;
-#ifdef CONFIG_SCHED_DEBUG
- unsigned int nr_spread_over;
-#endif
-
#ifdef CONFIG_SMP
/*
* CFS load tracking
@@ -696,6 +741,44 @@ struct cfs_rq {
#endif /* CONFIG_FAIR_GROUP_SCHED */
};
+#ifdef CONFIG_SCHED_CLASS_EXT
+/* scx_rq->flags, protected by the rq lock */
+enum scx_rq_flags {
+ /*
+ * A hotplugged CPU starts scheduling before rq_online_scx(). Track
+ * ops.cpu_on/offline() state so that ops.enqueue/dispatch() are called
+ * only while the BPF scheduler considers the CPU to be online.
+ */
+ SCX_RQ_ONLINE = 1 << 0,
+ SCX_RQ_CAN_STOP_TICK = 1 << 1,
+ SCX_RQ_BAL_KEEP = 1 << 2, /* balance decided to keep current */
+ SCX_RQ_BYPASSING = 1 << 3,
+
+ SCX_RQ_IN_WAKEUP = 1 << 16,
+ SCX_RQ_IN_BALANCE = 1 << 17,
+};
+
+struct scx_rq {
+ struct scx_dispatch_q local_dsq;
+ struct list_head runnable_list; /* runnable tasks on this rq */
+ struct list_head ddsp_deferred_locals; /* deferred ddsps from enq */
+ unsigned long ops_qseq;
+ u64 extra_enq_flags; /* see move_task_to_local_dsq() */
+ u32 nr_running;
+ u32 flags;
+ u32 cpuperf_target; /* [0, SCHED_CAPACITY_SCALE] */
+ bool cpu_released;
+ cpumask_var_t cpus_to_kick;
+ cpumask_var_t cpus_to_kick_if_idle;
+ cpumask_var_t cpus_to_preempt;
+ cpumask_var_t cpus_to_wait;
+ unsigned long pnt_seq;
+ struct balance_callback deferred_bal_cb;
+ struct irq_work deferred_irq_work;
+ struct irq_work kick_cpus_irq_work;
+};
+#endif /* CONFIG_SCHED_CLASS_EXT */
+
static inline int rt_bandwidth_enabled(void)
{
return sysctl_sched_rt_runtime >= 0;
@@ -726,13 +809,13 @@ struct rt_rq {
#endif /* CONFIG_SMP */
int rt_queued;
+#ifdef CONFIG_RT_GROUP_SCHED
int rt_throttled;
u64 rt_time;
u64 rt_runtime;
/* Nests inside the rq lock: */
raw_spinlock_t rt_runtime_lock;
-#ifdef CONFIG_RT_GROUP_SCHED
unsigned int rt_nr_boosted;
struct rq *rq;
@@ -820,6 +903,9 @@ static inline void se_update_runnable(struct sched_entity *se)
static inline long se_runnable(struct sched_entity *se)
{
+ if (se->sched_delayed)
+ return false;
+
if (entity_is_task(se))
return !!se->on_rq;
else
@@ -834,6 +920,9 @@ static inline void se_update_runnable(struct sched_entity *se) { }
static inline long se_runnable(struct sched_entity *se)
{
+ if (se->sched_delayed)
+ return false;
+
return !!se->on_rq;
}
@@ -996,11 +1085,6 @@ struct uclamp_rq {
DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
#endif /* CONFIG_UCLAMP_TASK */
-struct balance_callback {
- struct balance_callback *next;
- void (*func)(struct rq *rq);
-};
-
/*
* This is the main, per-CPU runqueue data structure.
*
@@ -1043,6 +1127,11 @@ struct rq {
struct cfs_rq cfs;
struct rt_rq rt;
struct dl_rq dl;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ struct scx_rq scx;
+#endif
+
+ struct sched_dl_entity fair_server;
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this CPU: */
@@ -1059,6 +1148,7 @@ struct rq {
unsigned int nr_uninterruptible;
struct task_struct __rcu *curr;
+ struct sched_dl_entity *dl_server;
struct task_struct *idle;
struct task_struct *stop;
unsigned long next_balance;
@@ -1158,7 +1248,6 @@ struct rq {
/* latency stats */
struct sched_info rq_sched_info;
unsigned long long rq_cpu_time;
- /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
/* sys_sched_yield() stats */
unsigned int yld_count;
@@ -1187,6 +1276,7 @@ struct rq {
/* per rq */
struct rq *core;
struct task_struct *core_pick;
+ struct sched_dl_entity *core_dl_server;
unsigned int core_enabled;
unsigned int core_sched_seq;
struct rb_root core_tree;
@@ -2247,11 +2337,13 @@ extern const u32 sched_prio_to_wmult[40];
*
*/
-#define DEQUEUE_SLEEP 0x01
+#define DEQUEUE_SLEEP 0x01 /* Matches ENQUEUE_WAKEUP */
#define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */
#define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */
#define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */
+#define DEQUEUE_SPECIAL 0x10
#define DEQUEUE_MIGRATING 0x100 /* Matches ENQUEUE_MIGRATING */
+#define DEQUEUE_DELAYED 0x200 /* Matches ENQUEUE_DELAYED */
#define ENQUEUE_WAKEUP 0x01
#define ENQUEUE_RESTORE 0x02
@@ -2267,6 +2359,7 @@ extern const u32 sched_prio_to_wmult[40];
#endif
#define ENQUEUE_INITIAL 0x80
#define ENQUEUE_MIGRATING 0x100
+#define ENQUEUE_DELAYED 0x200
#define RETRY_TASK ((void *)-1UL)
@@ -2285,23 +2378,31 @@ struct sched_class {
#endif
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
- void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
+ bool (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*yield_task) (struct rq *rq);
bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags);
- struct task_struct *(*pick_next_task)(struct rq *rq);
+ int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
+ struct task_struct *(*pick_task)(struct rq *rq);
+ /*
+ * Optional! When implemented pick_next_task() should be equivalent to:
+ *
+ * next = pick_task();
+ * if (next) {
+ * put_prev_task(prev);
+ * set_next_task_first(next);
+ * }
+ */
+ struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev);
- void (*put_prev_task)(struct rq *rq, struct task_struct *p);
+ void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next);
void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
#ifdef CONFIG_SMP
- int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
- struct task_struct * (*pick_task)(struct rq *rq);
-
void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
@@ -2323,8 +2424,11 @@ struct sched_class {
* cannot assume the switched_from/switched_to pair is serialized by
* rq->lock. They are however serialized by p->pi_lock.
*/
+ void (*switching_to) (struct rq *this_rq, struct task_struct *task);
void (*switched_from)(struct rq *this_rq, struct task_struct *task);
void (*switched_to) (struct rq *this_rq, struct task_struct *task);
+ void (*reweight_task)(struct rq *this_rq, struct task_struct *task,
+ const struct load_weight *lw);
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio);
@@ -2345,7 +2449,7 @@ struct sched_class {
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{
WARN_ON_ONCE(rq->curr != prev);
- prev->sched_class->put_prev_task(rq, prev);
+ prev->sched_class->put_prev_task(rq, prev, NULL);
}
static inline void set_next_task(struct rq *rq, struct task_struct *next)
@@ -2353,6 +2457,30 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next)
next->sched_class->set_next_task(rq, next, false);
}
+static inline void
+__put_prev_set_next_dl_server(struct rq *rq,
+ struct task_struct *prev,
+ struct task_struct *next)
+{
+ prev->dl_server = NULL;
+ next->dl_server = rq->dl_server;
+ rq->dl_server = NULL;
+}
+
+static inline void put_prev_set_next_task(struct rq *rq,
+ struct task_struct *prev,
+ struct task_struct *next)
+{
+ WARN_ON_ONCE(rq->curr != prev);
+
+ __put_prev_set_next_dl_server(rq, prev, next);
+
+ if (next == prev)
+ return;
+
+ prev->sched_class->put_prev_task(rq, prev, next);
+ next->sched_class->set_next_task(rq, next, true);
+}
/*
* Helper to define a sched_class instance; each one is placed in a separate
@@ -2373,19 +2501,54 @@ const struct sched_class name##_sched_class \
extern struct sched_class __sched_class_highest[];
extern struct sched_class __sched_class_lowest[];
+extern const struct sched_class stop_sched_class;
+extern const struct sched_class dl_sched_class;
+extern const struct sched_class rt_sched_class;
+extern const struct sched_class fair_sched_class;
+extern const struct sched_class idle_sched_class;
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+extern const struct sched_class ext_sched_class;
+
+DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); /* SCX BPF scheduler loaded */
+DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */
+
+#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled)
+#define scx_switched_all() static_branch_unlikely(&__scx_switched_all)
+#else /* !CONFIG_SCHED_CLASS_EXT */
+#define scx_enabled() false
+#define scx_switched_all() false
+#endif /* !CONFIG_SCHED_CLASS_EXT */
+
+/*
+ * Iterate only active classes. SCX can take over all fair tasks or be
+ * completely disabled. If the former, skip fair. If the latter, skip SCX.
+ */
+static inline const struct sched_class *next_active_class(const struct sched_class *class)
+{
+ class++;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ if (scx_switched_all() && class == &fair_sched_class)
+ class++;
+ if (!scx_enabled() && class == &ext_sched_class)
+ class++;
+#endif
+ return class;
+}
+
#define for_class_range(class, _from, _to) \
for (class = (_from); class < (_to); class++)
#define for_each_class(class) \
for_class_range(class, __sched_class_highest, __sched_class_lowest)
-#define sched_class_above(_a, _b) ((_a) < (_b))
+#define for_active_class_range(class, _from, _to) \
+ for (class = (_from); class != (_to); class = next_active_class(class))
-extern const struct sched_class stop_sched_class;
-extern const struct sched_class dl_sched_class;
-extern const struct sched_class rt_sched_class;
-extern const struct sched_class fair_sched_class;
-extern const struct sched_class idle_sched_class;
+#define for_each_active_class(class) \
+ for_active_class_range(class, __sched_class_highest, __sched_class_lowest)
+
+#define sched_class_above(_a, _b) ((_a) < (_b))
static inline bool sched_stop_runnable(struct rq *rq)
{
@@ -2408,7 +2571,7 @@ static inline bool sched_fair_runnable(struct rq *rq)
}
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
-extern struct task_struct *pick_next_task_idle(struct rq *rq);
+extern struct task_struct *pick_task_idle(struct rq *rq);
#define SCA_CHECK 0x01
#define SCA_MIGRATE_DISABLE 0x02
@@ -2424,6 +2587,19 @@ extern void sched_balance_trigger(struct rq *rq);
extern int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx);
extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);
+static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu)
+{
+ /* When not in the task's cpumask, no point in looking further. */
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr))
+ return false;
+
+ /* Can @cpu run a user thread? */
+ if (!(p->flags & PF_KTHREAD) && !task_cpu_possible(cpu, p))
+ return false;
+
+ return true;
+}
+
static inline cpumask_t *alloc_user_cpus_ptr(int node)
{
/*
@@ -2457,6 +2633,11 @@ extern int push_cpu_stop(void *arg);
#else /* !CONFIG_SMP: */
+static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu)
+{
+ return true;
+}
+
static inline int __set_cpus_allowed_ptr(struct task_struct *p,
struct affinity_context *ctx)
{
@@ -2510,12 +2691,9 @@ extern void init_sched_dl_class(void);
extern void init_sched_rt_class(void);
extern void init_sched_fair_class(void);
-extern void reweight_task(struct task_struct *p, const struct load_weight *lw);
-
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
-extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
@@ -2586,6 +2764,19 @@ static inline void sub_nr_running(struct rq *rq, unsigned count)
sched_update_tick_dependency(rq);
}
+static inline void __block_task(struct rq *rq, struct task_struct *p)
+{
+ WRITE_ONCE(p->on_rq, 0);
+ ASSERT_EXCLUSIVE_WRITER(p->on_rq);
+ if (p->sched_contributes_to_load)
+ rq->nr_uninterruptible++;
+
+ if (p->in_iowait) {
+ atomic_inc(&rq->nr_iowait);
+ delayacct_blkio_start();
+ }
+}
+
extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
@@ -3099,6 +3290,8 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
return READ_ONCE(rq->avg_rt.util_avg);
}
+#else /* !CONFIG_SMP */
+static inline bool update_other_load_avgs(struct rq *rq) { return false; }
#endif /* CONFIG_SMP */
#ifdef CONFIG_UCLAMP_TASK
@@ -3607,8 +3800,10 @@ extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *c
extern void __setscheduler_prio(struct task_struct *p, int prio);
extern void set_load_weight(struct task_struct *p, bool update_load);
extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
-extern void dequeue_task(struct rq *rq, struct task_struct *p, int flags);
+extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags);
+extern void check_class_changing(struct rq *rq, struct task_struct *p,
+ const struct sched_class *prev_class);
extern void check_class_changed(struct rq *rq, struct task_struct *p,
const struct sched_class *prev_class,
int oldprio);
@@ -3629,4 +3824,24 @@ static inline void balance_callbacks(struct rq *rq, struct balance_callback *hea
#endif
+#ifdef CONFIG_SCHED_CLASS_EXT
+/*
+ * Used by SCX in the enable/disable paths to move tasks between sched_classes
+ * and establish invariants.
+ */
+struct sched_enq_and_set_ctx {
+ struct task_struct *p;
+ int queue_flags;
+ bool queued;
+ bool running;
+};
+
+void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
+ struct sched_enq_and_set_ctx *ctx);
+void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx);
+
+#endif /* CONFIG_SCHED_CLASS_EXT */
+
+#include "ext.h"
+
#endif /* _KERNEL_SCHED_SCHED_H */
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index b1b8fe61c532..058dd42e3d9b 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -41,26 +41,17 @@ static struct task_struct *pick_task_stop(struct rq *rq)
return rq->stop;
}
-static struct task_struct *pick_next_task_stop(struct rq *rq)
-{
- struct task_struct *p = pick_task_stop(rq);
-
- if (p)
- set_next_task_stop(rq, p, true);
-
- return p;
-}
-
static void
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
add_nr_running(rq, 1);
}
-static void
+static bool
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
sub_nr_running(rq, 1);
+ return true;
}
static void yield_task_stop(struct rq *rq)
@@ -68,7 +59,7 @@ static void yield_task_stop(struct rq *rq)
BUG(); /* the stop task should never yield, its pointless. */
}
-static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
+static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{
update_curr_common(rq);
}
@@ -111,13 +102,12 @@ DEFINE_SCHED_CLASS(stop) = {
.wakeup_preempt = wakeup_preempt_stop,
- .pick_next_task = pick_next_task_stop,
+ .pick_task = pick_task_stop,
.put_prev_task = put_prev_task_stop,
.set_next_task = set_next_task_stop,
#ifdef CONFIG_SMP
.balance = balance_stop,
- .pick_task = pick_task_stop,
.select_task_rq = select_task_rq_stop,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
index ae1b42775ef9..aa70beee9895 100644
--- a/kernel/sched/syscalls.c
+++ b/kernel/sched/syscalls.c
@@ -57,7 +57,7 @@ static int effective_prio(struct task_struct *p)
* keep the priority unchanged. Otherwise, update priority
* to the normal priority:
*/
- if (!rt_prio(p->prio))
+ if (!rt_or_dl_prio(p->prio))
return p->normal_prio;
return p->prio;
}
@@ -258,107 +258,6 @@ int sched_core_idle_cpu(int cpu)
#endif
-#ifdef CONFIG_SMP
-/*
- * This function computes an effective utilization for the given CPU, to be
- * used for frequency selection given the linear relation: f = u * f_max.
- *
- * The scheduler tracks the following metrics:
- *
- * cpu_util_{cfs,rt,dl,irq}()
- * cpu_bw_dl()
- *
- * Where the cfs,rt and dl util numbers are tracked with the same metric and
- * synchronized windows and are thus directly comparable.
- *
- * The cfs,rt,dl utilization are the running times measured with rq->clock_task
- * which excludes things like IRQ and steal-time. These latter are then accrued
- * in the IRQ utilization.
- *
- * The DL bandwidth number OTOH is not a measured metric but a value computed
- * based on the task model parameters and gives the minimal utilization
- * required to meet deadlines.
- */
-unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
- unsigned long *min,
- unsigned long *max)
-{
- unsigned long util, irq, scale;
- struct rq *rq = cpu_rq(cpu);
-
- scale = arch_scale_cpu_capacity(cpu);
-
- /*
- * Early check to see if IRQ/steal time saturates the CPU, can be
- * because of inaccuracies in how we track these -- see
- * update_irq_load_avg().
- */
- irq = cpu_util_irq(rq);
- if (unlikely(irq >= scale)) {
- if (min)
- *min = scale;
- if (max)
- *max = scale;
- return scale;
- }
-
- if (min) {
- /*
- * The minimum utilization returns the highest level between:
- * - the computed DL bandwidth needed with the IRQ pressure which
- * steals time to the deadline task.
- * - The minimum performance requirement for CFS and/or RT.
- */
- *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN));
-
- /*
- * When an RT task is runnable and uclamp is not used, we must
- * ensure that the task will run at maximum compute capacity.
- */
- if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt))
- *min = max(*min, scale);
- }
-
- /*
- * Because the time spend on RT/DL tasks is visible as 'lost' time to
- * CFS tasks and we use the same metric to track the effective
- * utilization (PELT windows are synchronized) we can directly add them
- * to obtain the CPU's actual utilization.
- */
- util = util_cfs + cpu_util_rt(rq);
- util += cpu_util_dl(rq);
-
- /*
- * The maximum hint is a soft bandwidth requirement, which can be lower
- * than the actual utilization because of uclamp_max requirements.
- */
- if (max)
- *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX));
-
- if (util >= scale)
- return scale;
-
- /*
- * There is still idle time; further improve the number by using the
- * IRQ metric. Because IRQ/steal time is hidden from the task clock we
- * need to scale the task numbers:
- *
- * max - irq
- * U' = irq + --------- * U
- * max
- */
- util = scale_irq_capacity(util, irq, scale);
- util += irq;
-
- return min(scale, util);
-}
-
-unsigned long sched_cpu_util(int cpu)
-{
- return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL);
-}
-#endif /* CONFIG_SMP */
-
/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
@@ -401,10 +300,28 @@ static void __setscheduler_params(struct task_struct *p,
p->policy = policy;
- if (dl_policy(policy))
+ if (dl_policy(policy)) {
__setparam_dl(p, attr);
- else if (fair_policy(policy))
+ } else if (fair_policy(policy)) {
p->static_prio = NICE_TO_PRIO(attr->sched_nice);
+ if (attr->sched_runtime) {
+ p->se.custom_slice = 1;
+ p->se.slice = clamp_t(u64, attr->sched_runtime,
+ NSEC_PER_MSEC/10, /* HZ=1000 * 10 */
+ NSEC_PER_MSEC*100); /* HZ=100 / 10 */
+ } else {
+ p->se.custom_slice = 0;
+ p->se.slice = sysctl_sched_base_slice;
+ }
+ }
+
+ /* rt-policy tasks do not have a timerslack */
+ if (rt_or_dl_task_policy(p)) {
+ p->timer_slack_ns = 0;
+ } else if (p->timer_slack_ns == 0) {
+ /* when switching back to non-rt policy, restore timerslack */
+ p->timer_slack_ns = p->default_timer_slack_ns;
+ }
/*
* __sched_setscheduler() ensures attr->sched_priority == 0 when
@@ -695,12 +612,18 @@ recheck:
goto unlock;
}
+ retval = scx_check_setscheduler(p, policy);
+ if (retval)
+ goto unlock;
+
/*
* If not changing anything there's no need to proceed further,
* but store a possible modification of reset_on_fork.
*/
if (unlikely(policy == p->policy)) {
- if (fair_policy(policy) && attr->sched_nice != task_nice(p))
+ if (fair_policy(policy) &&
+ (attr->sched_nice != task_nice(p) ||
+ (attr->sched_runtime != p->se.slice)))
goto change;
if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
goto change;
@@ -797,6 +720,7 @@ change:
__setscheduler_prio(p, newprio);
}
__setscheduler_uclamp(p, attr);
+ check_class_changing(rq, p, prev_class);
if (queued) {
/*
@@ -846,6 +770,9 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
.sched_nice = PRIO_TO_NICE(p->static_prio),
};
+ if (p->se.custom_slice)
+ attr.sched_runtime = p->se.slice;
+
/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
@@ -1012,12 +939,14 @@ err_size:
static void get_params(struct task_struct *p, struct sched_attr *attr)
{
- if (task_has_dl_policy(p))
+ if (task_has_dl_policy(p)) {
__getparam_dl(p, attr);
- else if (task_has_rt_policy(p))
+ } else if (task_has_rt_policy(p)) {
attr->sched_priority = p->rt_priority;
- else
+ } else {
attr->sched_nice = task_nice(p);
+ attr->sched_runtime = p->se.slice;
+ }
}
/**
@@ -1602,6 +1531,7 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
+ case SCHED_EXT:
ret = 0;
break;
}
@@ -1629,6 +1559,7 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
+ case SCHED_EXT:
ret = 0;
}
return ret;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 76504b776d03..9748a4c8d668 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -516,6 +516,14 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
+ /*
+ * Because the rq is not a task, dl_add_task_root_domain() did not
+ * move the fair server bw to the rd if it already started.
+ * Add it now.
+ */
+ if (rq->fair_server.dl_server)
+ __dl_server_attach_root(&rq->fair_server, rq);
+
rq_unlock_irqrestore(rq, &rf);
if (old_rd)
diff --git a/kernel/signal.c b/kernel/signal.c
index 60c737e423a1..6e57036f947f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -618,20 +618,18 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
}
/*
- * Dequeue a signal and return the element to the caller, which is
- * expected to free it.
- *
- * All callers have to hold the siglock.
+ * Try to dequeue a signal. If a deliverable signal is found fill in the
+ * caller provided siginfo and return the signal number. Otherwise return
+ * 0.
*/
-int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
- kernel_siginfo_t *info, enum pid_type *type)
+int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type)
{
+ struct task_struct *tsk = current;
bool resched_timer = false;
int signr;
- /* We only dequeue private signals from ourselves, we don't let
- * signalfd steal them
- */
+ lockdep_assert_held(&tsk->sighand->siglock);
+
*type = PIDTYPE_PID;
signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
if (!signr) {
@@ -1940,10 +1938,11 @@ struct sigqueue *sigqueue_alloc(void)
void sigqueue_free(struct sigqueue *q)
{
- unsigned long flags;
spinlock_t *lock = &current->sighand->siglock;
+ unsigned long flags;
- BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
+ if (WARN_ON_ONCE(!(q->flags & SIGQUEUE_PREALLOC)))
+ return;
/*
* We must hold ->siglock while testing q->list
* to serialize with collect_signal() or with
@@ -1971,7 +1970,10 @@ int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
unsigned long flags;
int ret, result;
- BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
+ if (WARN_ON_ONCE(!(q->flags & SIGQUEUE_PREALLOC)))
+ return 0;
+ if (WARN_ON_ONCE(q->info.si_code != SI_TIMER))
+ return 0;
ret = -1;
rcu_read_lock();
@@ -2006,7 +2008,6 @@ int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
* If an SI_TIMER entry is already queue just increment
* the overrun count.
*/
- BUG_ON(q->info.si_code != SI_TIMER);
q->info.si_overrun++;
result = TRACE_SIGNAL_ALREADY_PENDING;
goto out;
@@ -2793,8 +2794,7 @@ relock:
type = PIDTYPE_PID;
signr = dequeue_synchronous_signal(&ksig->info);
if (!signr)
- signr = dequeue_signal(current, &current->blocked,
- &ksig->info, &type);
+ signr = dequeue_signal(&current->blocked, &ksig->info, &type);
if (!signr)
break; /* will return 0 */
@@ -2888,6 +2888,8 @@ relock:
current->flags |= PF_SIGNALED;
if (sig_kernel_coredump(signr)) {
+ int ret;
+
if (print_fatal_signals)
print_fatal_signal(signr);
proc_coredump_connector(current);
@@ -2899,7 +2901,24 @@ relock:
* first and our do_group_exit call below will use
* that value and ignore the one we pass it.
*/
- do_coredump(&ksig->info);
+ ret = do_coredump(&ksig->info);
+ if (ret)
+ coredump_report_failure("coredump has not been created, error %d",
+ ret);
+ else if (!IS_ENABLED(CONFIG_COREDUMP)) {
+ /*
+ * Coredumps are not available, can't fail collecting
+ * the coredump.
+ *
+ * Leave a note though that the coredump is going to be
+ * not created. This is not an error or a warning as disabling
+ * support in the kernel for coredumps isn't commonplace, and
+ * the user must've built the kernel with the custom config so
+ * let them know all works as desired.
+ */
+ coredump_report("no coredump collected as "
+ "that is disabled in the kernel configuration");
+ }
}
/*
@@ -3648,7 +3667,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
signotset(&mask);
spin_lock_irq(&tsk->sighand->siglock);
- sig = dequeue_signal(tsk, &mask, info, &type);
+ sig = dequeue_signal(&mask, info, &type);
if (!sig && timeout) {
/*
* None ready, temporarily unblock those we're interested
@@ -3667,7 +3686,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
spin_lock_irq(&tsk->sighand->siglock);
__set_task_blocked(tsk, &tsk->real_blocked);
sigemptyset(&tsk->real_blocked);
- sig = dequeue_signal(tsk, &mask, info, &type);
+ sig = dequeue_signal(&mask, info, &type);
}
spin_unlock_irq(&tsk->sighand->siglock);
@@ -3922,11 +3941,11 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
return -EINVAL;
f = fdget(pidfd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
/* Is this a pidfd? */
- pid = pidfd_to_pid(f.file);
+ pid = pidfd_to_pid(fd_file(f));
if (IS_ERR(pid)) {
ret = PTR_ERR(pid);
goto err;
@@ -3939,7 +3958,7 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
switch (flags) {
case 0:
/* Infer scope from the type of pidfd. */
- if (f.file->f_flags & PIDFD_THREAD)
+ if (fd_file(f)->f_flags & PIDFD_THREAD)
type = PIDTYPE_PID;
else
type = PIDTYPE_TGID;
diff --git a/kernel/smp.c b/kernel/smp.c
index aaffecdad319..f25e20617b7e 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -208,12 +208,25 @@ static int csd_lock_wait_getcpu(call_single_data_t *csd)
return -1;
}
+static atomic_t n_csd_lock_stuck;
+
+/**
+ * csd_lock_is_stuck - Has a CSD-lock acquisition been stuck too long?
+ *
+ * Returns @true if a CSD-lock acquisition is stuck and has been stuck
+ * long enough for a "non-responsive CSD lock" message to be printed.
+ */
+bool csd_lock_is_stuck(void)
+{
+ return !!atomic_read(&n_csd_lock_stuck);
+}
+
/*
* Complain if too much time spent waiting. Note that only
* the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
* so waiting on other types gets much less information.
*/
-static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
+static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id, unsigned long *nmessages)
{
int cpu = -1;
int cpux;
@@ -229,15 +242,26 @@ static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, in
cpu = csd_lock_wait_getcpu(csd);
pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
*bug_id, raw_smp_processor_id(), cpu);
+ atomic_dec(&n_csd_lock_stuck);
return true;
}
ts2 = sched_clock();
/* How long since we last checked for a stuck CSD lock.*/
ts_delta = ts2 - *ts1;
- if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0))
+ if (likely(ts_delta <= csd_lock_timeout_ns * (*nmessages + 1) *
+ (!*nmessages ? 1 : (ilog2(num_online_cpus()) / 2 + 1)) ||
+ csd_lock_timeout_ns == 0))
return false;
+ if (ts0 > ts2) {
+ /* Our own sched_clock went backward; don't blame another CPU. */
+ ts_delta = ts0 - ts2;
+ pr_alert("sched_clock on CPU %d went backward by %llu ns\n", raw_smp_processor_id(), ts_delta);
+ *ts1 = ts2;
+ return false;
+ }
+
firsttime = !*bug_id;
if (firsttime)
*bug_id = atomic_inc_return(&csd_bug_count);
@@ -249,9 +273,12 @@ static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, in
cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
/* How long since this CSD lock was stuck. */
ts_delta = ts2 - ts0;
- pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
- firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts_delta,
+ pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %lld ns for CPU#%02d %pS(%ps).\n",
+ firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), (s64)ts_delta,
cpu, csd->func, csd->info);
+ (*nmessages)++;
+ if (firsttime)
+ atomic_inc(&n_csd_lock_stuck);
/*
* If the CSD lock is still stuck after 5 minutes, it is unlikely
* to become unstuck. Use a signed comparison to avoid triggering
@@ -290,12 +317,13 @@ static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, in
*/
static void __csd_lock_wait(call_single_data_t *csd)
{
+ unsigned long nmessages = 0;
int bug_id = 0;
u64 ts0, ts1;
ts1 = ts0 = sched_clock();
for (;;) {
- if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
+ if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id, &nmessages))
break;
cpu_relax();
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 02582017759a..d082e7840f88 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -551,7 +551,7 @@ restart:
kstat_incr_softirqs_this_cpu(vec_nr);
trace_softirq_entry(vec_nr);
- h->action(h);
+ h->action();
trace_softirq_exit(vec_nr);
if (unlikely(prev_count != preempt_count())) {
pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
@@ -700,7 +700,7 @@ void __raise_softirq_irqoff(unsigned int nr)
or_softirq_pending(1UL << nr);
}
-void open_softirq(int nr, void (*action)(struct softirq_action *))
+void open_softirq(int nr, void (*action)(void))
{
softirq_vec[nr].action = action;
}
@@ -760,8 +760,7 @@ static bool tasklet_clear_sched(struct tasklet_struct *t)
return false;
}
-static void tasklet_action_common(struct softirq_action *a,
- struct tasklet_head *tl_head,
+static void tasklet_action_common(struct tasklet_head *tl_head,
unsigned int softirq_nr)
{
struct tasklet_struct *list;
@@ -805,16 +804,16 @@ static void tasklet_action_common(struct softirq_action *a,
}
}
-static __latent_entropy void tasklet_action(struct softirq_action *a)
+static __latent_entropy void tasklet_action(void)
{
workqueue_softirq_action(false);
- tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
+ tasklet_action_common(this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
}
-static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
+static __latent_entropy void tasklet_hi_action(void)
{
workqueue_softirq_action(true);
- tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
+ tasklet_action_common(this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
}
void tasklet_setup(struct tasklet_struct *t,
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index cedb17ba158a..da821ce258ea 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -251,7 +251,7 @@ static int multi_cpu_stop(void *data)
*/
touch_nmi_watchdog();
}
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
} while (curstate != MULTI_STOP_EXIT);
local_irq_restore(flags);
diff --git a/kernel/sys.c b/kernel/sys.c
index 3a2df1bd9f64..4da31f28fda8 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1916,10 +1916,10 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
int err;
exe = fdget(fd);
- if (!exe.file)
+ if (!fd_file(exe))
return -EBADF;
- inode = file_inode(exe.file);
+ inode = file_inode(fd_file(exe));
/*
* Because the original mm->exe_file points to executable file, make
@@ -1927,14 +1927,14 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
* overall picture.
*/
err = -EACCES;
- if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
+ if (!S_ISREG(inode->i_mode) || path_noexec(&fd_file(exe)->f_path))
goto exit;
- err = file_permission(exe.file, MAY_EXEC);
+ err = file_permission(fd_file(exe), MAY_EXEC);
if (err)
goto exit;
- err = replace_mm_exe_file(mm, exe.file);
+ err = replace_mm_exe_file(mm, fd_file(exe));
exit:
fdput(exe);
return err;
@@ -2557,6 +2557,8 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
error = current->timer_slack_ns;
break;
case PR_SET_TIMERSLACK:
+ if (rt_or_dl_task_policy(current))
+ break;
if (arg2 <= 0)
current->timer_slack_ns =
current->default_timer_slack_ns;
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 4354ea231fab..0700f40c53ac 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -419,7 +419,7 @@ static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return 0;
size = nla_total_size(sizeof(struct cgroupstats));
@@ -440,7 +440,7 @@ static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
stats = nla_data(na);
memset(stats, 0, sizeof(*stats));
- rc = cgroupstats_build(stats, f.file->f_path.dentry);
+ rc = cgroupstats_build(stats, fd_file(f)->f_path.dentry);
if (rc < 0) {
nlmsg_free(rep_skb);
goto err;
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 5abfa4390673..8bf888641694 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -493,7 +493,7 @@ static u64 __alarm_forward_now(struct alarm *alarm, ktime_t interval, bool throt
* promised in the context of posix_timer_fn() never
* materialized, but someone should really work on it.
*
- * To prevent DOS fake @now to be 1 jiffie out which keeps
+ * To prevent DOS fake @now to be 1 jiffy out which keeps
* the overrun accounting correct but creates an
* inconsistency vs. timer_gettime(2).
*/
@@ -574,15 +574,10 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
it.alarm.alarmtimer);
enum alarmtimer_restart result = ALARMTIMER_NORESTART;
unsigned long flags;
- int si_private = 0;
spin_lock_irqsave(&ptr->it_lock, flags);
- ptr->it_active = 0;
- if (ptr->it_interval)
- si_private = ++ptr->it_requeue_pending;
-
- if (posix_timer_event(ptr, si_private) && ptr->it_interval) {
+ if (posix_timer_queue_signal(ptr) && ptr->it_interval) {
/*
* Handle ignored signals and rearm the timer. This will go
* away once we handle ignored signals proper. Ensure that
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 60a6484831b1..78c7bd64d0dd 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -190,7 +190,7 @@ int clockevents_tick_resume(struct clock_event_device *dev)
#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
-/* Limit min_delta to a jiffie */
+/* Limit min_delta to a jiffy */
#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
/**
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index d0538a75f4c6..23336eecb4f4 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -113,7 +113,6 @@ static u64 suspend_start;
/*
* Threshold: 0.0312s, when doubled: 0.0625s.
- * Also a default for cs->uncertainty_margin when registering clocks.
*/
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 5)
@@ -125,6 +124,13 @@ static u64 suspend_start;
*
* The default of 500 parts per million is based on NTP's limits.
* If a clocksource is good enough for NTP, it is good enough for us!
+ *
+ * In other words, by default, even if a clocksource is extremely
+ * precise (for example, with a sub-nanosecond period), the maximum
+ * permissible skew between the clocksource watchdog and the clocksource
+ * under test is not permitted to go below the 500ppm minimum defined
+ * by MAX_SKEW_USEC. This 500ppm minimum may be overridden using the
+ * CLOCKSOURCE_WATCHDOG_MAX_SKEW_US Kconfig option.
*/
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US
#define MAX_SKEW_USEC CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US
@@ -132,6 +138,13 @@ static u64 suspend_start;
#define MAX_SKEW_USEC (125 * WATCHDOG_INTERVAL / HZ)
#endif
+/*
+ * Default for maximum permissible skew when cs->uncertainty_margin is
+ * not specified, and the lower bound even when cs->uncertainty_margin
+ * is specified. This is also the default that is used when registering
+ * clocks with unspecifed cs->uncertainty_margin, so this macro is used
+ * even in CONFIG_CLOCKSOURCE_WATCHDOG=n kernels.
+ */
#define WATCHDOG_MAX_SKEW (MAX_SKEW_USEC * NSEC_PER_USEC)
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
@@ -231,6 +244,7 @@ enum wd_read_status {
static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
{
+ int64_t md = 2 * watchdog->uncertainty_margin;
unsigned int nretries, max_retries;
int64_t wd_delay, wd_seq_delay;
u64 wd_end, wd_end2;
@@ -245,7 +259,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
local_irq_enable();
wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end);
- if (wd_delay <= WATCHDOG_MAX_SKEW) {
+ if (wd_delay <= md + cs->uncertainty_margin) {
if (nretries > 1 && nretries >= max_retries) {
pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
smp_processor_id(), watchdog->name, nretries);
@@ -258,12 +272,12 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
* there is too much external interferences that cause
* significant delay in reading both clocksource and watchdog.
*
- * If consecutive WD read-back delay > WATCHDOG_MAX_SKEW/2,
- * report system busy, reinit the watchdog and skip the current
+ * If consecutive WD read-back delay > md, report
+ * system busy, reinit the watchdog and skip the current
* watchdog test.
*/
wd_seq_delay = cycles_to_nsec_safe(watchdog, wd_end, wd_end2);
- if (wd_seq_delay > WATCHDOG_MAX_SKEW/2)
+ if (wd_seq_delay > md)
goto skip_test;
}
@@ -1146,14 +1160,19 @@ void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq
}
/*
- * If the uncertainty margin is not specified, calculate it.
- * If both scale and freq are non-zero, calculate the clock
- * period, but bound below at 2*WATCHDOG_MAX_SKEW. However,
- * if either of scale or freq is zero, be very conservative and
- * take the tens-of-milliseconds WATCHDOG_THRESHOLD value for the
- * uncertainty margin. Allow stupidly small uncertainty margins
- * to be specified by the caller for testing purposes, but warn
- * to discourage production use of this capability.
+ * If the uncertainty margin is not specified, calculate it. If
+ * both scale and freq are non-zero, calculate the clock period, but
+ * bound below at 2*WATCHDOG_MAX_SKEW, that is, 500ppm by default.
+ * However, if either of scale or freq is zero, be very conservative
+ * and take the tens-of-milliseconds WATCHDOG_THRESHOLD value
+ * for the uncertainty margin. Allow stupidly small uncertainty
+ * margins to be specified by the caller for testing purposes,
+ * but warn to discourage production use of this capability.
+ *
+ * Bottom line: The sum of the uncertainty margins of the
+ * watchdog clocksource and the clocksource under test will be at
+ * least 500ppm by default. For more information, please see the
+ * comment preceding CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US above.
*/
if (scale && freq && !cs->uncertainty_margin) {
cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index b8ee320208d4..cddcd08ea827 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1177,7 +1177,7 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
/*
* CONFIG_TIME_LOW_RES indicates that the system has no way to return
* granular time values. For relative timers we add hrtimer_resolution
- * (i.e. one jiffie) to prevent short timeouts.
+ * (i.e. one jiffy) to prevent short timeouts.
*/
timer->is_rel = mode & HRTIMER_MODE_REL;
if (timer->is_rel)
@@ -1351,11 +1351,13 @@ static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base)
}
static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base)
+ __acquires(&base->softirq_expiry_lock)
{
spin_lock(&base->softirq_expiry_lock);
}
static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
+ __releases(&base->softirq_expiry_lock)
{
spin_unlock(&base->softirq_expiry_lock);
}
@@ -1757,7 +1759,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
}
}
-static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
+static __latent_entropy void hrtimer_run_softirq(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
unsigned long flags;
@@ -1975,7 +1977,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
* expiry.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
- if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT))
+ if (rt_or_dl_task_policy(current) && !(mode & HRTIMER_MODE_SOFT))
mode |= HRTIMER_MODE_HARD;
}
@@ -2072,14 +2074,9 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
struct restart_block *restart;
struct hrtimer_sleeper t;
int ret = 0;
- u64 slack;
-
- slack = current->timer_slack_ns;
- if (rt_task(current))
- slack = 0;
hrtimer_init_sleeper_on_stack(&t, clockid, mode);
- hrtimer_set_expires_range_ns(&t.timer, rqtp, slack);
+ hrtimer_set_expires_range_ns(&t.timer, rqtp, current->timer_slack_ns);
ret = do_nanosleep(&t, mode);
if (ret != -ERESTART_RESTARTBLOCK)
goto out;
@@ -2249,7 +2246,7 @@ void __init hrtimers_init(void)
/**
* schedule_hrtimeout_range_clock - sleep until timeout
* @expires: timeout value (ktime_t)
- * @delta: slack in expires timeout (ktime_t) for SCHED_OTHER tasks
+ * @delta: slack in expires timeout (ktime_t)
* @mode: timer mode
* @clock_id: timer clock to be used
*/
@@ -2276,13 +2273,6 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
return -EINTR;
}
- /*
- * Override any slack passed by the user if under
- * rt contraints.
- */
- if (rt_task(current))
- delta = 0;
-
hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
hrtimer_sleeper_start_expires(&t, mode);
@@ -2302,7 +2292,7 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_range_clock);
/**
* schedule_hrtimeout_range - sleep until timeout
* @expires: timeout value (ktime_t)
- * @delta: slack in expires timeout (ktime_t) for SCHED_OTHER tasks
+ * @delta: slack in expires timeout (ktime_t)
* @mode: timer mode
*
* Make the current task sleep until the given expiry time has
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 8d2dd214ec68..802b336f4b8c 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -660,9 +660,17 @@ rearm:
sched_sync_hw_clock(offset_nsec, res != 0);
}
-void ntp_notify_cmos_timer(void)
+void ntp_notify_cmos_timer(bool offset_set)
{
/*
+ * If the time jumped (using ADJ_SETOFFSET) cancels sync timer,
+ * which may have been running if the time was synchronized
+ * prior to the ADJ_SETOFFSET call.
+ */
+ if (offset_set)
+ hrtimer_cancel(&sync_hrtimer);
+
+ /*
* When the work is currently executed but has not yet the timer
* rearmed this queues the work immediately again. No big issue,
* just a pointless work scheduled.
diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h
index 23d1b74c3065..5a633dce9057 100644
--- a/kernel/time/ntp_internal.h
+++ b/kernel/time/ntp_internal.h
@@ -14,9 +14,9 @@ extern int __do_adjtimex(struct __kernel_timex *txc,
extern void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts);
#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
-extern void ntp_notify_cmos_timer(void);
+extern void ntp_notify_cmos_timer(bool offset_set);
#else
-static inline void ntp_notify_cmos_timer(void) { }
+static inline void ntp_notify_cmos_timer(bool offset_set) { }
#endif
#endif /* _LINUX_NTP_INTERNAL_H */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index e9c6f9d0e42c..6bcee4704059 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -453,6 +453,7 @@ static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
struct cpu_timer *ctmr = &timer->it.cpu;
struct posix_cputimer_base *base;
+ timer->it_active = 0;
if (!cpu_timer_dequeue(ctmr))
return;
@@ -559,6 +560,7 @@ static void arm_timer(struct k_itimer *timer, struct task_struct *p)
struct cpu_timer *ctmr = &timer->it.cpu;
u64 newexp = cpu_timer_getexpires(ctmr);
+ timer->it_active = 1;
if (!cpu_timer_enqueue(&base->tqhead, ctmr))
return;
@@ -584,12 +586,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
{
struct cpu_timer *ctmr = &timer->it.cpu;
- if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
- /*
- * User don't want any signal.
- */
- cpu_timer_setexpires(ctmr, 0);
- } else if (unlikely(timer->sigq == NULL)) {
+ timer->it_active = 0;
+ if (unlikely(timer->sigq == NULL)) {
/*
* This a special case for clock_nanosleep,
* not a normal timer from sys_timer_create.
@@ -600,9 +598,9 @@ static void cpu_timer_fire(struct k_itimer *timer)
/*
* One-shot timer. Clear it as soon as it's fired.
*/
- posix_timer_event(timer, 0);
+ posix_timer_queue_signal(timer);
cpu_timer_setexpires(ctmr, 0);
- } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
+ } else if (posix_timer_queue_signal(timer)) {
/*
* The signal did not get queued because the signal
* was ignored, so we won't get any callback to
@@ -614,6 +612,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
}
}
+static void __posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp, u64 now);
+
/*
* Guts of sys_timer_settime for CPU timers.
* This is called with the timer locked and interrupts disabled.
@@ -623,9 +623,10 @@ static void cpu_timer_fire(struct k_itimer *timer)
static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
struct itimerspec64 *new, struct itimerspec64 *old)
{
+ bool sigev_none = timer->it_sigev_notify == SIGEV_NONE;
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
- u64 old_expires, new_expires, old_incr, val;
struct cpu_timer *ctmr = &timer->it.cpu;
+ u64 old_expires, new_expires, now;
struct sighand_struct *sighand;
struct task_struct *p;
unsigned long flags;
@@ -662,10 +663,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
return -ESRCH;
}
- /*
- * Disarm any old timer after extracting its expiry time.
- */
- old_incr = timer->it_interval;
+ /* Retrieve the current expiry time before disarming the timer */
old_expires = cpu_timer_getexpires(ctmr);
if (unlikely(timer->it.cpu.firing)) {
@@ -673,157 +671,122 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
ret = TIMER_RETRY;
} else {
cpu_timer_dequeue(ctmr);
+ timer->it_active = 0;
}
/*
- * We need to sample the current value to convert the new
- * value from to relative and absolute, and to convert the
- * old value from absolute to relative. To set a process
- * timer, we need a sample to balance the thread expiry
- * times (in arm_timer). With an absolute time, we must
- * check if it's already passed. In short, we need a sample.
+ * Sample the current clock for saving the previous setting
+ * and for rearming the timer.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock))
- val = cpu_clock_sample(clkid, p);
+ now = cpu_clock_sample(clkid, p);
else
- val = cpu_clock_sample_group(clkid, p, true);
+ now = cpu_clock_sample_group(clkid, p, !sigev_none);
+ /* Retrieve the previous expiry value if requested. */
if (old) {
- if (old_expires == 0) {
- old->it_value.tv_sec = 0;
- old->it_value.tv_nsec = 0;
- } else {
- /*
- * Update the timer in case it has overrun already.
- * If it has, we'll report it as having overrun and
- * with the next reloaded timer already ticking,
- * though we are swallowing that pending
- * notification here to install the new setting.
- */
- u64 exp = bump_cpu_timer(timer, val);
-
- if (val < exp) {
- old_expires = exp - val;
- old->it_value = ns_to_timespec64(old_expires);
- } else {
- old->it_value.tv_nsec = 1;
- old->it_value.tv_sec = 0;
- }
- }
+ old->it_value = (struct timespec64){ };
+ if (old_expires)
+ __posix_cpu_timer_get(timer, old, now);
}
+ /* Retry if the timer expiry is running concurrently */
if (unlikely(ret)) {
- /*
- * We are colliding with the timer actually firing.
- * Punt after filling in the timer's old value, and
- * disable this firing since we are already reporting
- * it as an overrun (thanks to bump_cpu_timer above).
- */
unlock_task_sighand(p, &flags);
goto out;
}
- if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
- new_expires += val;
- }
+ /* Convert relative expiry time to absolute */
+ if (new_expires && !(timer_flags & TIMER_ABSTIME))
+ new_expires += now;
+
+ /* Set the new expiry time (might be 0) */
+ cpu_timer_setexpires(ctmr, new_expires);
/*
- * Install the new expiry time (or zero).
- * For a timer with no notification action, we don't actually
- * arm the timer (we'll just fake it for timer_gettime).
+ * Arm the timer if it is not disabled, the new expiry value has
+ * not yet expired and the timer requires signal delivery.
+ * SIGEV_NONE timers are never armed. In case the timer is not
+ * armed, enforce the reevaluation of the timer base so that the
+ * process wide cputime counter can be disabled eventually.
*/
- cpu_timer_setexpires(ctmr, new_expires);
- if (new_expires != 0 && val < new_expires) {
- arm_timer(timer, p);
+ if (likely(!sigev_none)) {
+ if (new_expires && now < new_expires)
+ arm_timer(timer, p);
+ else
+ trigger_base_recalc_expires(timer, p);
}
unlock_task_sighand(p, &flags);
+
+ posix_timer_set_common(timer, new);
+
/*
- * Install the new reload setting, and
- * set up the signal and overrun bookkeeping.
+ * If the new expiry time was already in the past the timer was not
+ * queued. Fire it immediately even if the thread never runs to
+ * accumulate more time on this clock.
*/
- timer->it_interval = timespec64_to_ktime(new->it_interval);
+ if (!sigev_none && new_expires && now >= new_expires)
+ cpu_timer_fire(timer);
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+static void __posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp, u64 now)
+{
+ bool sigev_none = timer->it_sigev_notify == SIGEV_NONE;
+ u64 expires, iv = timer->it_interval;
/*
- * This acts as a modification timestamp for the timer,
- * so any automatic reload attempt will punt on seeing
- * that we have reset the timer manually.
+ * Make sure that interval timers are moved forward for the
+ * following cases:
+ * - SIGEV_NONE timers which are never armed
+ * - Timers which expired, but the signal has not yet been
+ * delivered
*/
- timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
- ~REQUEUE_PENDING;
- timer->it_overrun_last = 0;
- timer->it_overrun = -1;
-
- if (val >= new_expires) {
- if (new_expires != 0) {
- /*
- * The designated time already passed, so we notify
- * immediately, even if the thread never runs to
- * accumulate more time on this clock.
- */
- cpu_timer_fire(timer);
- }
+ if (iv && ((timer->it_requeue_pending & REQUEUE_PENDING) || sigev_none))
+ expires = bump_cpu_timer(timer, now);
+ else
+ expires = cpu_timer_getexpires(&timer->it.cpu);
+ /*
+ * Expired interval timers cannot have a remaining time <= 0.
+ * The kernel has to move them forward so that the next
+ * timer expiry is > @now.
+ */
+ if (now < expires) {
+ itp->it_value = ns_to_timespec64(expires - now);
+ } else {
/*
- * Make sure we don't keep around the process wide cputime
- * counter or the tick dependency if they are not necessary.
+ * A single shot SIGEV_NONE timer must return 0, when it is
+ * expired! Timers which have a real signal delivery mode
+ * must return a remaining time greater than 0 because the
+ * signal has not yet been delivered.
*/
- sighand = lock_task_sighand(p, &flags);
- if (!sighand)
- goto out;
-
- if (!cpu_timer_queued(ctmr))
- trigger_base_recalc_expires(timer, p);
-
- unlock_task_sighand(p, &flags);
+ if (!sigev_none)
+ itp->it_value.tv_nsec = 1;
}
- out:
- rcu_read_unlock();
- if (old)
- old->it_interval = ns_to_timespec64(old_incr);
-
- return ret;
}
static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
{
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
- struct cpu_timer *ctmr = &timer->it.cpu;
- u64 now, expires = cpu_timer_getexpires(ctmr);
struct task_struct *p;
+ u64 now;
rcu_read_lock();
p = cpu_timer_task_rcu(timer);
- if (!p)
- goto out;
+ if (p && cpu_timer_getexpires(&timer->it.cpu)) {
+ itp->it_interval = ktime_to_timespec64(timer->it_interval);
- /*
- * Easy part: convert the reload time.
- */
- itp->it_interval = ktime_to_timespec64(timer->it_interval);
-
- if (!expires)
- goto out;
-
- /*
- * Sample the clock to take the difference with the expiry time.
- */
- if (CPUCLOCK_PERTHREAD(timer->it_clock))
- now = cpu_clock_sample(clkid, p);
- else
- now = cpu_clock_sample_group(clkid, p, false);
+ if (CPUCLOCK_PERTHREAD(timer->it_clock))
+ now = cpu_clock_sample(clkid, p);
+ else
+ now = cpu_clock_sample_group(clkid, p, false);
- if (now < expires) {
- itp->it_value = ns_to_timespec64(expires - now);
- } else {
- /*
- * The timer should have expired already, but the firing
- * hasn't taken place yet. Say it's just about to expire.
- */
- itp->it_value.tv_nsec = 1;
- itp->it_value.tv_sec = 0;
+ __posix_cpu_timer_get(timer, itp, now);
}
-out:
rcu_read_unlock();
}
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index b924f0f096fa..4576aaed13b2 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -277,10 +277,17 @@ void posixtimer_rearm(struct kernel_siginfo *info)
unlock_timer(timr, flags);
}
-int posix_timer_event(struct k_itimer *timr, int si_private)
+int posix_timer_queue_signal(struct k_itimer *timr)
{
+ int ret, si_private = 0;
enum pid_type type;
- int ret;
+
+ lockdep_assert_held(&timr->it_lock);
+
+ timr->it_active = 0;
+ if (timr->it_interval)
+ si_private = ++timr->it_requeue_pending;
+
/*
* FIXME: if ->sigq is queued we can race with
* dequeue_signal()->posixtimer_rearm().
@@ -309,19 +316,13 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
*/
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
{
+ struct k_itimer *timr = container_of(timer, struct k_itimer, it.real.timer);
enum hrtimer_restart ret = HRTIMER_NORESTART;
- struct k_itimer *timr;
unsigned long flags;
- int si_private = 0;
- timr = container_of(timer, struct k_itimer, it.real.timer);
spin_lock_irqsave(&timr->it_lock, flags);
- timr->it_active = 0;
- if (timr->it_interval != 0)
- si_private = ++timr->it_requeue_pending;
-
- if (posix_timer_event(timr, si_private)) {
+ if (posix_timer_queue_signal(timr)) {
/*
* The signal was not queued due to SIG_IGN. As a
* consequence the timer is not going to be rearmed from
@@ -338,14 +339,14 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
* change to the signal handling code.
*
* For now let timers with an interval less than a
- * jiffie expire every jiffie and recheck for a
+ * jiffy expire every jiffy and recheck for a
* valid signal handler.
*
* This avoids interrupt starvation in case of a
* very small interval, which would expire the
* timer immediately again.
*
- * Moving now ahead of time by one jiffie tricks
+ * Moving now ahead of time by one jiffy tricks
* hrtimer_forward() to expire the timer later,
* while it still maintains the overrun accuracy
* for the price of a slight inconsistency in the
@@ -515,7 +516,7 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event,
spin_lock_irq(&current->sighand->siglock);
/* This makes the timer valid in the hash table */
WRITE_ONCE(new_timer->it_signal, current->signal);
- list_add(&new_timer->list, &current->signal->posix_timers);
+ hlist_add_head(&new_timer->list, &current->signal->posix_timers);
spin_unlock_irq(&current->sighand->siglock);
/*
* After unlocking sighand::siglock @new_timer is subject to
@@ -856,6 +857,23 @@ static struct k_itimer *timer_wait_running(struct k_itimer *timer,
return lock_timer(timer_id, flags);
}
+/*
+ * Set up the new interval and reset the signal delivery data
+ */
+void posix_timer_set_common(struct k_itimer *timer, struct itimerspec64 *new_setting)
+{
+ if (new_setting->it_value.tv_sec || new_setting->it_value.tv_nsec)
+ timer->it_interval = timespec64_to_ktime(new_setting->it_interval);
+ else
+ timer->it_interval = 0;
+
+ /* Prevent reloading in case there is a signal pending */
+ timer->it_requeue_pending = (timer->it_requeue_pending + 2) & ~REQUEUE_PENDING;
+ /* Reset overrun accounting */
+ timer->it_overrun_last = 0;
+ timer->it_overrun = -1LL;
+}
+
/* Set a POSIX.1b interval timer. */
int common_timer_set(struct k_itimer *timr, int flags,
struct itimerspec64 *new_setting,
@@ -878,15 +896,12 @@ int common_timer_set(struct k_itimer *timr, int flags,
return TIMER_RETRY;
timr->it_active = 0;
- timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
- ~REQUEUE_PENDING;
- timr->it_overrun_last = 0;
+ posix_timer_set_common(timr, new_setting);
- /* Switch off the timer when it_value is zero */
+ /* Keep timer disarmed when it_value is zero */
if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
return 0;
- timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
expires = timespec64_to_ktime(new_setting->it_value);
if (flags & TIMER_ABSTIME)
expires = timens_ktime_to_host(timr->it_clock, expires);
@@ -904,7 +919,7 @@ static int do_timer_settime(timer_t timer_id, int tmr_flags,
const struct k_clock *kc;
struct k_itimer *timr;
unsigned long flags;
- int error = 0;
+ int error;
if (!timespec64_valid(&new_spec64->it_interval) ||
!timespec64_valid(&new_spec64->it_value))
@@ -918,6 +933,9 @@ retry:
if (!timr)
return -EINVAL;
+ if (old_spec64)
+ old_spec64->it_interval = ktime_to_timespec64(timr->it_interval);
+
kc = timr->kclock;
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
@@ -1021,7 +1039,7 @@ retry_delete:
}
spin_lock(&current->sighand->siglock);
- list_del(&timer->list);
+ hlist_del(&timer->list);
spin_unlock(&current->sighand->siglock);
/*
* A concurrent lookup could check timer::it_signal lockless. It
@@ -1071,7 +1089,7 @@ retry_delete:
goto retry_delete;
}
- list_del(&timer->list);
+ hlist_del(&timer->list);
/*
* Setting timer::it_signal to NULL is technically not required
@@ -1092,22 +1110,19 @@ retry_delete:
*/
void exit_itimers(struct task_struct *tsk)
{
- struct list_head timers;
- struct k_itimer *tmr;
+ struct hlist_head timers;
- if (list_empty(&tsk->signal->posix_timers))
+ if (hlist_empty(&tsk->signal->posix_timers))
return;
/* Protect against concurrent read via /proc/$PID/timers */
spin_lock_irq(&tsk->sighand->siglock);
- list_replace_init(&tsk->signal->posix_timers, &timers);
+ hlist_move_list(&tsk->signal->posix_timers, &timers);
spin_unlock_irq(&tsk->sighand->siglock);
/* The timers are not longer accessible via tsk::signal */
- while (!list_empty(&timers)) {
- tmr = list_first_entry(&timers, struct k_itimer, list);
- itimer_delete(tmr);
- }
+ while (!hlist_empty(&timers))
+ itimer_delete(hlist_entry(timers.first, struct k_itimer, list));
}
SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h
index f32a2ebba9b8..4784ea65f685 100644
--- a/kernel/time/posix-timers.h
+++ b/kernel/time/posix-timers.h
@@ -36,10 +36,11 @@ extern const struct k_clock clock_process;
extern const struct k_clock clock_thread;
extern const struct k_clock alarm_clock;
-int posix_timer_event(struct k_itimer *timr, int si_private);
+int posix_timer_queue_signal(struct k_itimer *timr);
void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting);
int common_timer_set(struct k_itimer *timr, int flags,
struct itimerspec64 *new_setting,
struct itimerspec64 *old_setting);
+void posix_timer_set_common(struct k_itimer *timer, struct itimerspec64 *new_setting);
int common_timer_del(struct k_itimer *timer);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 5391e4167d60..7e6f409bf311 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -2553,6 +2553,7 @@ int do_adjtimex(struct __kernel_timex *txc)
{
struct timekeeper *tk = &tk_core.timekeeper;
struct audit_ntp_data ad;
+ bool offset_set = false;
bool clock_set = false;
struct timespec64 ts;
unsigned long flags;
@@ -2575,6 +2576,7 @@ int do_adjtimex(struct __kernel_timex *txc)
if (ret)
return ret;
+ offset_set = delta.tv_sec != 0;
audit_tk_injoffset(delta);
}
@@ -2608,7 +2610,7 @@ int do_adjtimex(struct __kernel_timex *txc)
if (clock_set)
clock_was_set(CLOCK_SET_WALL);
- ntp_notify_cmos_timer();
+ ntp_notify_cmos_timer(offset_set);
return ret;
}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 64b0d8a0aa0f..0fc9d066a7be 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -365,7 +365,7 @@ static unsigned long round_jiffies_common(unsigned long j, int cpu,
rem = j % HZ;
/*
- * If the target jiffie is just after a whole second (which can happen
+ * If the target jiffy is just after a whole second (which can happen
* due to delays of the timer irq, long irq off times etc etc) then
* we should round down to the whole second, not up. Use 1/4th second
* as cutoff for this rounding as an extreme upper bound for this.
@@ -672,7 +672,7 @@ static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
* Set the next expiry time and kick the CPU so it
* can reevaluate the wheel:
*/
- base->next_expiry = bucket_expiry;
+ WRITE_ONCE(base->next_expiry, bucket_expiry);
base->timers_pending = true;
base->next_expiry_recalc = false;
trigger_dyntick_cpu(base, timer);
@@ -1561,6 +1561,8 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
* the waiter to acquire the lock and make progress.
*/
static void timer_sync_wait_running(struct timer_base *base)
+ __releases(&base->lock) __releases(&base->expiry_lock)
+ __acquires(&base->expiry_lock) __acquires(&base->lock)
{
if (atomic_read(&base->timer_waiters)) {
raw_spin_unlock_irq(&base->lock);
@@ -1898,7 +1900,7 @@ static int next_pending_bucket(struct timer_base *base, unsigned offset,
*
* Store next expiry time in base->next_expiry.
*/
-static void next_expiry_recalc(struct timer_base *base)
+static void timer_recalc_next_expiry(struct timer_base *base)
{
unsigned long clk, next, adj;
unsigned lvl, offset = 0;
@@ -1928,7 +1930,7 @@ static void next_expiry_recalc(struct timer_base *base)
* bits are zero, we look at the next level as is. If not we
* need to advance it by one because that's going to be the
* next expiring bucket in that level. base->clk is the next
- * expiring jiffie. So in case of:
+ * expiring jiffy. So in case of:
*
* LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
* 0 0 0 0 0 0
@@ -1964,7 +1966,7 @@ static void next_expiry_recalc(struct timer_base *base)
clk += adj;
}
- base->next_expiry = next;
+ WRITE_ONCE(base->next_expiry, next);
base->next_expiry_recalc = false;
base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
}
@@ -1993,7 +1995,7 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
return basem;
/*
- * Round up to the next jiffie. High resolution timers are
+ * Round up to the next jiffy. High resolution timers are
* off, so the hrtimers are expired in the tick and we need to
* make sure that this tick really expires the timer to avoid
* a ping pong of the nohz stop code.
@@ -2007,7 +2009,7 @@ static unsigned long next_timer_interrupt(struct timer_base *base,
unsigned long basej)
{
if (base->next_expiry_recalc)
- next_expiry_recalc(base);
+ timer_recalc_next_expiry(base);
/*
* Move next_expiry for the empty base into the future to prevent an
@@ -2018,7 +2020,7 @@ static unsigned long next_timer_interrupt(struct timer_base *base,
* easy comparable to find out which base holds the first pending timer.
*/
if (!base->timers_pending)
- base->next_expiry = basej + NEXT_TIMER_MAX_DELTA;
+ WRITE_ONCE(base->next_expiry, basej + NEXT_TIMER_MAX_DELTA);
return base->next_expiry;
}
@@ -2252,7 +2254,7 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
base_global, &tevt);
/*
- * If the next event is only one jiffie ahead there is no need to call
+ * If the next event is only one jiffy ahead there is no need to call
* timer migration hierarchy related functions. The value for the next
* global timer in @tevt struct equals then KTIME_MAX. This is also
* true, when the timer base is idle.
@@ -2411,7 +2413,7 @@ static inline void __run_timers(struct timer_base *base)
* jiffies to avoid endless requeuing to current jiffies.
*/
base->clk++;
- next_expiry_recalc(base);
+ timer_recalc_next_expiry(base);
while (levels--)
expire_timers(base, heads + levels);
@@ -2440,7 +2442,7 @@ static void run_timer_base(int index)
/*
* This function runs timers and the timer-tq in bottom half context.
*/
-static __latent_entropy void run_timer_softirq(struct softirq_action *h)
+static __latent_entropy void run_timer_softirq(void)
{
run_timer_base(BASE_LOCAL);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) {
@@ -2462,8 +2464,40 @@ static void run_local_timers(void)
hrtimer_run_queues();
for (int i = 0; i < NR_BASES; i++, base++) {
- /* Raise the softirq only if required. */
- if (time_after_eq(jiffies, base->next_expiry) ||
+ /*
+ * Raise the softirq only if required.
+ *
+ * timer_base::next_expiry can be written by a remote CPU while
+ * holding the lock. If this write happens at the same time than
+ * the lockless local read, sanity checker could complain about
+ * data corruption.
+ *
+ * There are two possible situations where
+ * timer_base::next_expiry is written by a remote CPU:
+ *
+ * 1. Remote CPU expires global timers of this CPU and updates
+ * timer_base::next_expiry of BASE_GLOBAL afterwards in
+ * next_timer_interrupt() or timer_recalc_next_expiry(). The
+ * worst outcome is a superfluous raise of the timer softirq
+ * when the not yet updated value is read.
+ *
+ * 2. A new first pinned timer is enqueued by a remote CPU
+ * and therefore timer_base::next_expiry of BASE_LOCAL is
+ * updated. When this update is missed, this isn't a
+ * problem, as an IPI is executed nevertheless when the CPU
+ * was idle before. When the CPU wasn't idle but the update
+ * is missed, then the timer would expire one jiffy late -
+ * bad luck.
+ *
+ * Those unlikely corner cases where the worst outcome is only a
+ * one jiffy delay or a superfluous raise of the softirq are
+ * not that expensive as doing the check always while holding
+ * the lock.
+ *
+ * Possible remote writers are using WRITE_ONCE(). Local reader
+ * uses therefore READ_ONCE().
+ */
+ if (time_after_eq(jiffies, READ_ONCE(base->next_expiry)) ||
(i == BASE_DEF && tmigr_requires_handle_remote())) {
raise_softirq(TIMER_SOFTIRQ);
return;
@@ -2730,7 +2764,7 @@ void __init init_timers(void)
*/
void msleep(unsigned int msecs)
{
- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+ unsigned long timeout = msecs_to_jiffies(msecs);
while (timeout)
timeout = schedule_timeout_uninterruptible(timeout);
@@ -2744,7 +2778,7 @@ EXPORT_SYMBOL(msleep);
*/
unsigned long msleep_interruptible(unsigned int msecs)
{
- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+ unsigned long timeout = msecs_to_jiffies(msecs);
while (timeout && !signal_pending(current))
timeout = schedule_timeout_interruptible(timeout);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index cd098846e251..a582cd25ca87 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -24,7 +24,6 @@
#include <linux/key.h>
#include <linux/verification.h>
#include <linux/namei.h>
-#include <linux/fileattr.h>
#include <net/bpf_sk_storage.h>
@@ -798,29 +797,6 @@ const struct bpf_func_proto bpf_task_pt_regs_proto = {
.ret_btf_id = &bpf_task_pt_regs_ids[0],
};
-BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
-{
- struct bpf_array *array = container_of(map, struct bpf_array, map);
- struct cgroup *cgrp;
-
- if (unlikely(idx >= array->map.max_entries))
- return -E2BIG;
-
- cgrp = READ_ONCE(array->ptrs[idx]);
- if (unlikely(!cgrp))
- return -EAGAIN;
-
- return task_under_cgroup_hierarchy(current, cgrp);
-}
-
-static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
- .func = bpf_current_task_under_cgroup,
- .gpl_only = false,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_CONST_MAP_PTR,
- .arg2_type = ARG_ANYTHING,
-};
-
struct send_signal_irq_work {
struct irq_work irq_work;
struct task_struct *task;
@@ -1226,7 +1202,8 @@ static const struct bpf_func_proto bpf_get_func_arg_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
- .arg3_type = ARG_PTR_TO_LONG,
+ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg3_size = sizeof(u64),
};
BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
@@ -1242,7 +1219,8 @@ static const struct bpf_func_proto bpf_get_func_ret_proto = {
.func = get_func_ret,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_LONG,
+ .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg2_size = sizeof(u64),
};
BPF_CALL_1(get_func_arg_cnt, void *, ctx)
@@ -1439,73 +1417,6 @@ static int __init bpf_key_sig_kfuncs_init(void)
late_initcall(bpf_key_sig_kfuncs_init);
#endif /* CONFIG_KEYS */
-/* filesystem kfuncs */
-__bpf_kfunc_start_defs();
-
-/**
- * bpf_get_file_xattr - get xattr of a file
- * @file: file to get xattr from
- * @name__str: name of the xattr
- * @value_p: output buffer of the xattr value
- *
- * Get xattr *name__str* of *file* and store the output in *value_ptr*.
- *
- * For security reasons, only *name__str* with prefix "user." is allowed.
- *
- * Return: 0 on success, a negative value on error.
- */
-__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
- struct bpf_dynptr *value_p)
-{
- struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p;
- struct dentry *dentry;
- u32 value_len;
- void *value;
- int ret;
-
- if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
- return -EPERM;
-
- value_len = __bpf_dynptr_size(value_ptr);
- value = __bpf_dynptr_data_rw(value_ptr, value_len);
- if (!value)
- return -EINVAL;
-
- dentry = file_dentry(file);
- ret = inode_permission(&nop_mnt_idmap, dentry->d_inode, MAY_READ);
- if (ret)
- return ret;
- return __vfs_getxattr(dentry, dentry->d_inode, name__str, value, value_len);
-}
-
-__bpf_kfunc_end_defs();
-
-BTF_KFUNCS_START(fs_kfunc_set_ids)
-BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
-BTF_KFUNCS_END(fs_kfunc_set_ids)
-
-static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id)
-{
- if (!btf_id_set8_contains(&fs_kfunc_set_ids, kfunc_id))
- return 0;
-
- /* Only allow to attach from LSM hooks, to avoid recursion */
- return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0;
-}
-
-static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
- .owner = THIS_MODULE,
- .set = &fs_kfunc_set_ids,
- .filter = bpf_get_file_xattr_filter,
-};
-
-static int __init bpf_fs_kfuncs_init(void)
-{
- return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
-}
-
-late_initcall(bpf_fs_kfuncs_init);
-
static const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -1548,8 +1459,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_numa_node_id_proto;
case BPF_FUNC_perf_event_read:
return &bpf_perf_event_read_proto;
- case BPF_FUNC_current_task_under_cgroup:
- return &bpf_current_task_under_cgroup_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
case BPF_FUNC_probe_write_user:
@@ -1578,6 +1487,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_cgrp_storage_get_proto;
case BPF_FUNC_cgrp_storage_delete:
return &bpf_cgrp_storage_delete_proto;
+ case BPF_FUNC_current_task_under_cgroup:
+ return &bpf_current_task_under_cgroup_proto;
#endif
case BPF_FUNC_send_signal:
return &bpf_send_signal_proto;
@@ -1598,7 +1509,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_jiffies64:
return &bpf_jiffies64_proto;
case BPF_FUNC_get_task_stack:
- return &bpf_get_task_stack_proto;
+ return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
+ : &bpf_get_task_stack_proto;
case BPF_FUNC_copy_from_user:
return &bpf_copy_from_user_proto;
case BPF_FUNC_copy_from_user_task:
@@ -1654,7 +1566,7 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_stackid:
return &bpf_get_stackid_proto;
case BPF_FUNC_get_stack:
- return &bpf_get_stack_proto;
+ return prog->sleepable ? &bpf_get_stack_sleepable_proto : &bpf_get_stack_proto;
#ifdef CONFIG_BPF_KPROBE_OVERRIDE
case BPF_FUNC_override_return:
return &bpf_override_return_proto;
@@ -3160,6 +3072,7 @@ struct bpf_uprobe {
loff_t offset;
unsigned long ref_ctr_offset;
u64 cookie;
+ struct uprobe *uprobe;
struct uprobe_consumer consumer;
};
@@ -3178,15 +3091,15 @@ struct bpf_uprobe_multi_run_ctx {
struct bpf_uprobe *uprobe;
};
-static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes,
- u32 cnt)
+static void bpf_uprobe_unregister(struct bpf_uprobe *uprobes, u32 cnt)
{
u32 i;
- for (i = 0; i < cnt; i++) {
- uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset,
- &uprobes[i].consumer);
- }
+ for (i = 0; i < cnt; i++)
+ uprobe_unregister_nosync(uprobes[i].uprobe, &uprobes[i].consumer);
+
+ if (cnt)
+ uprobe_unregister_sync();
}
static void bpf_uprobe_multi_link_release(struct bpf_link *link)
@@ -3194,7 +3107,7 @@ static void bpf_uprobe_multi_link_release(struct bpf_link *link)
struct bpf_uprobe_multi_link *umulti_link;
umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
- bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
+ bpf_uprobe_unregister(umulti_link->uprobes, umulti_link->cnt);
if (umulti_link->task)
put_task_struct(umulti_link->task);
path_put(&umulti_link->path);
@@ -3298,7 +3211,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
struct bpf_run_ctx *old_run_ctx;
int err = 0;
- if (link->task && current->mm != link->task->mm)
+ if (link->task && !same_thread_group(current, link->task))
return 0;
if (sleepable)
@@ -3322,8 +3235,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
}
static bool
-uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx,
- struct mm_struct *mm)
+uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm)
{
struct bpf_uprobe *uprobe;
@@ -3480,22 +3392,26 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
&bpf_uprobe_multi_link_lops, prog);
for (i = 0; i < cnt; i++) {
- err = uprobe_register_refctr(d_real_inode(link->path.dentry),
- uprobes[i].offset,
- uprobes[i].ref_ctr_offset,
- &uprobes[i].consumer);
- if (err) {
- bpf_uprobe_unregister(&path, uprobes, i);
- goto error_free;
+ uprobes[i].uprobe = uprobe_register(d_real_inode(link->path.dentry),
+ uprobes[i].offset,
+ uprobes[i].ref_ctr_offset,
+ &uprobes[i].consumer);
+ if (IS_ERR(uprobes[i].uprobe)) {
+ err = PTR_ERR(uprobes[i].uprobe);
+ link->cnt = i;
+ goto error_unregister;
}
}
err = bpf_link_prime(&link->link, &link_primer);
if (err)
- goto error_free;
+ goto error_unregister;
return bpf_link_settle(&link_primer);
+error_unregister:
+ bpf_uprobe_unregister(uprobes, link->cnt);
+
error_free:
kvfree(uprobes);
kfree(link);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index cebd879a30cb..77dc0b25140e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -32,6 +32,8 @@
#include <asm/local64.h>
#include <asm/local.h>
+#include "trace.h"
+
/*
* The "absolute" timestamp in the buffer is only 59 bits.
* If a clock has the 5 MSBs set, it needs to be saved and
@@ -42,6 +44,21 @@
static void update_pages_handler(struct work_struct *work);
+#define RING_BUFFER_META_MAGIC 0xBADFEED
+
+struct ring_buffer_meta {
+ int magic;
+ int struct_size;
+ unsigned long text_addr;
+ unsigned long data_addr;
+ unsigned long first_buffer;
+ unsigned long head_buffer;
+ unsigned long commit_buffer;
+ __u32 subbuf_size;
+ __u32 nr_subbufs;
+ int buffers[];
+};
+
/*
* The ring buffer header is special. We must manually up keep it.
*/
@@ -342,7 +359,8 @@ struct buffer_page {
local_t entries; /* entries on this page */
unsigned long real_end; /* real end of data */
unsigned order; /* order of the page */
- u32 id; /* ID for external mapping */
+ u32 id:30; /* ID for external mapping */
+ u32 range:1; /* Mapped via a range */
struct buffer_data_page *page; /* Actual data page */
};
@@ -373,7 +391,9 @@ static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
static void free_buffer_page(struct buffer_page *bpage)
{
- free_pages((unsigned long)bpage->page, bpage->order);
+ /* Range pages are not to be freed */
+ if (!bpage->range)
+ free_pages((unsigned long)bpage->page, bpage->order);
kfree(bpage);
}
@@ -491,9 +511,11 @@ struct ring_buffer_per_cpu {
unsigned long pages_removed;
unsigned int mapped;
+ unsigned int user_mapped; /* user space mapping */
struct mutex mapping_lock;
unsigned long *subbuf_ids; /* ID to subbuf VA */
struct trace_buffer_meta *meta_page;
+ struct ring_buffer_meta *ring_meta;
/* ring buffer pages to update, > 0 to add, < 0 to remove */
long nr_pages_to_update;
@@ -523,6 +545,12 @@ struct trace_buffer {
struct rb_irq_work irq_work;
bool time_stamp_abs;
+ unsigned long range_addr_start;
+ unsigned long range_addr_end;
+
+ long last_text_delta;
+ long last_data_delta;
+
unsigned int subbuf_size;
unsigned int subbuf_order;
unsigned int max_data_size;
@@ -1239,6 +1267,11 @@ static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
* Set the previous list pointer to have the HEAD flag.
*/
rb_set_list_to_head(head->list.prev);
+
+ if (cpu_buffer->ring_meta) {
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ meta->head_buffer = (unsigned long)head->page;
+ }
}
static void rb_list_head_clear(struct list_head *list)
@@ -1478,9 +1511,484 @@ static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
}
}
+/*
+ * Take an address, add the meta data size as well as the array of
+ * array subbuffer indexes, then align it to a subbuffer size.
+ *
+ * This is used to help find the next per cpu subbuffer within a mapped range.
+ */
+static unsigned long
+rb_range_align_subbuf(unsigned long addr, int subbuf_size, int nr_subbufs)
+{
+ addr += sizeof(struct ring_buffer_meta) +
+ sizeof(int) * nr_subbufs;
+ return ALIGN(addr, subbuf_size);
+}
+
+/*
+ * Return the ring_buffer_meta for a given @cpu.
+ */
+static void *rb_range_meta(struct trace_buffer *buffer, int nr_pages, int cpu)
+{
+ int subbuf_size = buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
+ unsigned long ptr = buffer->range_addr_start;
+ struct ring_buffer_meta *meta;
+ int nr_subbufs;
+
+ if (!ptr)
+ return NULL;
+
+ /* When nr_pages passed in is zero, the first meta has already been initialized */
+ if (!nr_pages) {
+ meta = (struct ring_buffer_meta *)ptr;
+ nr_subbufs = meta->nr_subbufs;
+ } else {
+ meta = NULL;
+ /* Include the reader page */
+ nr_subbufs = nr_pages + 1;
+ }
+
+ /*
+ * The first chunk may not be subbuffer aligned, where as
+ * the rest of the chunks are.
+ */
+ if (cpu) {
+ ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs);
+ ptr += subbuf_size * nr_subbufs;
+
+ /* We can use multiplication to find chunks greater than 1 */
+ if (cpu > 1) {
+ unsigned long size;
+ unsigned long p;
+
+ /* Save the beginning of this CPU chunk */
+ p = ptr;
+ ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs);
+ ptr += subbuf_size * nr_subbufs;
+
+ /* Now all chunks after this are the same size */
+ size = ptr - p;
+ ptr += size * (cpu - 2);
+ }
+ }
+ return (void *)ptr;
+}
+
+/* Return the start of subbufs given the meta pointer */
+static void *rb_subbufs_from_meta(struct ring_buffer_meta *meta)
+{
+ int subbuf_size = meta->subbuf_size;
+ unsigned long ptr;
+
+ ptr = (unsigned long)meta;
+ ptr = rb_range_align_subbuf(ptr, subbuf_size, meta->nr_subbufs);
+
+ return (void *)ptr;
+}
+
+/*
+ * Return a specific sub-buffer for a given @cpu defined by @idx.
+ */
+static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx)
+{
+ struct ring_buffer_meta *meta;
+ unsigned long ptr;
+ int subbuf_size;
+
+ meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu);
+ if (!meta)
+ return NULL;
+
+ if (WARN_ON_ONCE(idx >= meta->nr_subbufs))
+ return NULL;
+
+ subbuf_size = meta->subbuf_size;
+
+ /* Map this buffer to the order that's in meta->buffers[] */
+ idx = meta->buffers[idx];
+
+ ptr = (unsigned long)rb_subbufs_from_meta(meta);
+
+ ptr += subbuf_size * idx;
+ if (ptr + subbuf_size > cpu_buffer->buffer->range_addr_end)
+ return NULL;
+
+ return (void *)ptr;
+}
+
+/*
+ * See if the existing memory contains valid ring buffer data.
+ * As the previous kernel must be the same as this kernel, all
+ * the calculations (size of buffers and number of buffers)
+ * must be the same.
+ */
+static bool rb_meta_valid(struct ring_buffer_meta *meta, int cpu,
+ struct trace_buffer *buffer, int nr_pages)
+{
+ int subbuf_size = PAGE_SIZE;
+ struct buffer_data_page *subbuf;
+ unsigned long buffers_start;
+ unsigned long buffers_end;
+ int i;
+
+ /* Check the meta magic and meta struct size */
+ if (meta->magic != RING_BUFFER_META_MAGIC ||
+ meta->struct_size != sizeof(*meta)) {
+ pr_info("Ring buffer boot meta[%d] mismatch of magic or struct size\n", cpu);
+ return false;
+ }
+
+ /* The subbuffer's size and number of subbuffers must match */
+ if (meta->subbuf_size != subbuf_size ||
+ meta->nr_subbufs != nr_pages + 1) {
+ pr_info("Ring buffer boot meta [%d] mismatch of subbuf_size/nr_pages\n", cpu);
+ return false;
+ }
+
+ buffers_start = meta->first_buffer;
+ buffers_end = meta->first_buffer + (subbuf_size * meta->nr_subbufs);
+
+ /* Is the head and commit buffers within the range of buffers? */
+ if (meta->head_buffer < buffers_start ||
+ meta->head_buffer >= buffers_end) {
+ pr_info("Ring buffer boot meta [%d] head buffer out of range\n", cpu);
+ return false;
+ }
+
+ if (meta->commit_buffer < buffers_start ||
+ meta->commit_buffer >= buffers_end) {
+ pr_info("Ring buffer boot meta [%d] commit buffer out of range\n", cpu);
+ return false;
+ }
+
+ subbuf = rb_subbufs_from_meta(meta);
+
+ /* Is the meta buffers and the subbufs themselves have correct data? */
+ for (i = 0; i < meta->nr_subbufs; i++) {
+ if (meta->buffers[i] < 0 ||
+ meta->buffers[i] >= meta->nr_subbufs) {
+ pr_info("Ring buffer boot meta [%d] array out of range\n", cpu);
+ return false;
+ }
+
+ if ((unsigned)local_read(&subbuf->commit) > subbuf_size) {
+ pr_info("Ring buffer boot meta [%d] buffer invalid commit\n", cpu);
+ return false;
+ }
+
+ subbuf = (void *)subbuf + subbuf_size;
+ }
+
+ return true;
+}
+
+static int rb_meta_subbuf_idx(struct ring_buffer_meta *meta, void *subbuf);
+
+static int rb_read_data_buffer(struct buffer_data_page *dpage, int tail, int cpu,
+ unsigned long long *timestamp, u64 *delta_ptr)
+{
+ struct ring_buffer_event *event;
+ u64 ts, delta;
+ int events = 0;
+ int e;
+
+ *delta_ptr = 0;
+ *timestamp = 0;
+
+ ts = dpage->time_stamp;
+
+ for (e = 0; e < tail; e += rb_event_length(event)) {
+
+ event = (struct ring_buffer_event *)(dpage->data + e);
+
+ switch (event->type_len) {
+
+ case RINGBUF_TYPE_TIME_EXTEND:
+ delta = rb_event_time_stamp(event);
+ ts += delta;
+ break;
+
+ case RINGBUF_TYPE_TIME_STAMP:
+ delta = rb_event_time_stamp(event);
+ delta = rb_fix_abs_ts(delta, ts);
+ if (delta < ts) {
+ *delta_ptr = delta;
+ *timestamp = ts;
+ return -1;
+ }
+ ts = delta;
+ break;
+
+ case RINGBUF_TYPE_PADDING:
+ if (event->time_delta == 1)
+ break;
+ fallthrough;
+ case RINGBUF_TYPE_DATA:
+ events++;
+ ts += event->time_delta;
+ break;
+
+ default:
+ return -1;
+ }
+ }
+ *timestamp = ts;
+ return events;
+}
+
+static int rb_validate_buffer(struct buffer_data_page *dpage, int cpu)
+{
+ unsigned long long ts;
+ u64 delta;
+ int tail;
+
+ tail = local_read(&dpage->commit);
+ return rb_read_data_buffer(dpage, tail, cpu, &ts, &delta);
+}
+
+/* If the meta data has been validated, now validate the events */
+static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ struct buffer_page *head_page;
+ unsigned long entry_bytes = 0;
+ unsigned long entries = 0;
+ int ret;
+ int i;
+
+ if (!meta || !meta->head_buffer)
+ return;
+
+ /* Do the reader page first */
+ ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu);
+ if (ret < 0) {
+ pr_info("Ring buffer reader page is invalid\n");
+ goto invalid;
+ }
+ entries += ret;
+ entry_bytes += local_read(&cpu_buffer->reader_page->page->commit);
+ local_set(&cpu_buffer->reader_page->entries, ret);
+
+ head_page = cpu_buffer->head_page;
+
+ /* If both the head and commit are on the reader_page then we are done. */
+ if (head_page == cpu_buffer->reader_page &&
+ head_page == cpu_buffer->commit_page)
+ goto done;
+
+ /* Iterate until finding the commit page */
+ for (i = 0; i < meta->nr_subbufs + 1; i++, rb_inc_page(&head_page)) {
+
+ /* Reader page has already been done */
+ if (head_page == cpu_buffer->reader_page)
+ continue;
+
+ ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu);
+ if (ret < 0) {
+ pr_info("Ring buffer meta [%d] invalid buffer page\n",
+ cpu_buffer->cpu);
+ goto invalid;
+ }
+ entries += ret;
+ entry_bytes += local_read(&head_page->page->commit);
+ local_set(&cpu_buffer->head_page->entries, ret);
+
+ if (head_page == cpu_buffer->commit_page)
+ break;
+ }
+
+ if (head_page != cpu_buffer->commit_page) {
+ pr_info("Ring buffer meta [%d] commit page not found\n",
+ cpu_buffer->cpu);
+ goto invalid;
+ }
+ done:
+ local_set(&cpu_buffer->entries, entries);
+ local_set(&cpu_buffer->entries_bytes, entry_bytes);
+
+ pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu);
+ return;
+
+ invalid:
+ /* The content of the buffers are invalid, reset the meta data */
+ meta->head_buffer = 0;
+ meta->commit_buffer = 0;
+
+ /* Reset the reader page */
+ local_set(&cpu_buffer->reader_page->entries, 0);
+ local_set(&cpu_buffer->reader_page->page->commit, 0);
+
+ /* Reset all the subbuffers */
+ for (i = 0; i < meta->nr_subbufs - 1; i++, rb_inc_page(&head_page)) {
+ local_set(&head_page->entries, 0);
+ local_set(&head_page->page->commit, 0);
+ }
+}
+
+/* Used to calculate data delta */
+static char rb_data_ptr[] = "";
+
+#define THIS_TEXT_PTR ((unsigned long)rb_meta_init_text_addr)
+#define THIS_DATA_PTR ((unsigned long)rb_data_ptr)
+
+static void rb_meta_init_text_addr(struct ring_buffer_meta *meta)
+{
+ meta->text_addr = THIS_TEXT_PTR;
+ meta->data_addr = THIS_DATA_PTR;
+}
+
+static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages)
+{
+ struct ring_buffer_meta *meta;
+ unsigned long delta;
+ void *subbuf;
+ int cpu;
+ int i;
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ void *next_meta;
+
+ meta = rb_range_meta(buffer, nr_pages, cpu);
+
+ if (rb_meta_valid(meta, cpu, buffer, nr_pages)) {
+ /* Make the mappings match the current address */
+ subbuf = rb_subbufs_from_meta(meta);
+ delta = (unsigned long)subbuf - meta->first_buffer;
+ meta->first_buffer += delta;
+ meta->head_buffer += delta;
+ meta->commit_buffer += delta;
+ buffer->last_text_delta = THIS_TEXT_PTR - meta->text_addr;
+ buffer->last_data_delta = THIS_DATA_PTR - meta->data_addr;
+ continue;
+ }
+
+ if (cpu < nr_cpu_ids - 1)
+ next_meta = rb_range_meta(buffer, nr_pages, cpu + 1);
+ else
+ next_meta = (void *)buffer->range_addr_end;
+
+ memset(meta, 0, next_meta - (void *)meta);
+
+ meta->magic = RING_BUFFER_META_MAGIC;
+ meta->struct_size = sizeof(*meta);
+
+ meta->nr_subbufs = nr_pages + 1;
+ meta->subbuf_size = PAGE_SIZE;
+
+ subbuf = rb_subbufs_from_meta(meta);
+
+ meta->first_buffer = (unsigned long)subbuf;
+ rb_meta_init_text_addr(meta);
+
+ /*
+ * The buffers[] array holds the order of the sub-buffers
+ * that are after the meta data. The sub-buffers may
+ * be swapped out when read and inserted into a different
+ * location of the ring buffer. Although their addresses
+ * remain the same, the buffers[] array contains the
+ * index into the sub-buffers holding their actual order.
+ */
+ for (i = 0; i < meta->nr_subbufs; i++) {
+ meta->buffers[i] = i;
+ rb_init_page(subbuf);
+ subbuf += meta->subbuf_size;
+ }
+ }
+}
+
+static void *rbm_start(struct seq_file *m, loff_t *pos)
+{
+ struct ring_buffer_per_cpu *cpu_buffer = m->private;
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ unsigned long val;
+
+ if (!meta)
+ return NULL;
+
+ if (*pos > meta->nr_subbufs)
+ return NULL;
+
+ val = *pos;
+ val++;
+
+ return (void *)val;
+}
+
+static void *rbm_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+
+ return rbm_start(m, pos);
+}
+
+static int rbm_show(struct seq_file *m, void *v)
+{
+ struct ring_buffer_per_cpu *cpu_buffer = m->private;
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ unsigned long val = (unsigned long)v;
+
+ if (val == 1) {
+ seq_printf(m, "head_buffer: %d\n",
+ rb_meta_subbuf_idx(meta, (void *)meta->head_buffer));
+ seq_printf(m, "commit_buffer: %d\n",
+ rb_meta_subbuf_idx(meta, (void *)meta->commit_buffer));
+ seq_printf(m, "subbuf_size: %d\n", meta->subbuf_size);
+ seq_printf(m, "nr_subbufs: %d\n", meta->nr_subbufs);
+ return 0;
+ }
+
+ val -= 2;
+ seq_printf(m, "buffer[%ld]: %d\n", val, meta->buffers[val]);
+
+ return 0;
+}
+
+static void rbm_stop(struct seq_file *m, void *p)
+{
+}
+
+static const struct seq_operations rb_meta_seq_ops = {
+ .start = rbm_start,
+ .next = rbm_next,
+ .show = rbm_show,
+ .stop = rbm_stop,
+};
+
+int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu)
+{
+ struct seq_file *m;
+ int ret;
+
+ ret = seq_open(file, &rb_meta_seq_ops);
+ if (ret)
+ return ret;
+
+ m = file->private_data;
+ m->private = buffer->buffers[cpu];
+
+ return 0;
+}
+
+/* Map the buffer_pages to the previous head and commit pages */
+static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *bpage)
+{
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+
+ if (meta->head_buffer == (unsigned long)bpage->page)
+ cpu_buffer->head_page = bpage;
+
+ if (meta->commit_buffer == (unsigned long)bpage->page) {
+ cpu_buffer->commit_page = bpage;
+ cpu_buffer->tail_page = bpage;
+ }
+}
+
static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
long nr_pages, struct list_head *pages)
{
+ struct trace_buffer *buffer = cpu_buffer->buffer;
+ struct ring_buffer_meta *meta = NULL;
struct buffer_page *bpage, *tmp;
bool user_thread = current->mm != NULL;
gfp_t mflags;
@@ -1515,6 +2023,10 @@ static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
*/
if (user_thread)
set_current_oom_origin();
+
+ if (buffer->range_addr_start)
+ meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu);
+
for (i = 0; i < nr_pages; i++) {
struct page *page;
@@ -1525,16 +2037,32 @@ static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
rb_check_bpage(cpu_buffer, bpage);
- list_add(&bpage->list, pages);
-
- page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
- mflags | __GFP_COMP | __GFP_ZERO,
- cpu_buffer->buffer->subbuf_order);
- if (!page)
- goto free_pages;
- bpage->page = page_address(page);
+ /*
+ * Append the pages as for mapped buffers we want to keep
+ * the order
+ */
+ list_add_tail(&bpage->list, pages);
+
+ if (meta) {
+ /* A range was given. Use that for the buffer page */
+ bpage->page = rb_range_buffer(cpu_buffer, i + 1);
+ if (!bpage->page)
+ goto free_pages;
+ /* If this is valid from a previous boot */
+ if (meta->head_buffer)
+ rb_meta_buffer_update(cpu_buffer, bpage);
+ bpage->range = 1;
+ bpage->id = i + 1;
+ } else {
+ page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
+ mflags | __GFP_COMP | __GFP_ZERO,
+ cpu_buffer->buffer->subbuf_order);
+ if (!page)
+ goto free_pages;
+ bpage->page = page_address(page);
+ rb_init_page(bpage->page);
+ }
bpage->order = cpu_buffer->buffer->subbuf_order;
- rb_init_page(bpage->page);
if (user_thread && fatal_signal_pending(current))
goto free_pages;
@@ -1584,6 +2112,7 @@ static struct ring_buffer_per_cpu *
rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ struct ring_buffer_meta *meta;
struct buffer_page *bpage;
struct page *page;
int ret;
@@ -1614,12 +2143,28 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
cpu_buffer->reader_page = bpage;
- page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
- cpu_buffer->buffer->subbuf_order);
- if (!page)
- goto fail_free_reader;
- bpage->page = page_address(page);
- rb_init_page(bpage->page);
+ if (buffer->range_addr_start) {
+ /*
+ * Range mapped buffers have the same restrictions as memory
+ * mapped ones do.
+ */
+ cpu_buffer->mapped = 1;
+ cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu);
+ bpage->page = rb_range_buffer(cpu_buffer, 0);
+ if (!bpage->page)
+ goto fail_free_reader;
+ if (cpu_buffer->ring_meta->head_buffer)
+ rb_meta_buffer_update(cpu_buffer, bpage);
+ bpage->range = 1;
+ } else {
+ page = alloc_pages_node(cpu_to_node(cpu),
+ GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
+ cpu_buffer->buffer->subbuf_order);
+ if (!page)
+ goto fail_free_reader;
+ bpage->page = page_address(page);
+ rb_init_page(bpage->page);
+ }
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages);
@@ -1628,11 +2173,35 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
if (ret < 0)
goto fail_free_reader;
- cpu_buffer->head_page
- = list_entry(cpu_buffer->pages, struct buffer_page, list);
- cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
+ rb_meta_validate_events(cpu_buffer);
+
+ /* If the boot meta was valid then this has already been updated */
+ meta = cpu_buffer->ring_meta;
+ if (!meta || !meta->head_buffer ||
+ !cpu_buffer->head_page || !cpu_buffer->commit_page || !cpu_buffer->tail_page) {
+ if (meta && meta->head_buffer &&
+ (cpu_buffer->head_page || cpu_buffer->commit_page || cpu_buffer->tail_page)) {
+ pr_warn("Ring buffer meta buffers not all mapped\n");
+ if (!cpu_buffer->head_page)
+ pr_warn(" Missing head_page\n");
+ if (!cpu_buffer->commit_page)
+ pr_warn(" Missing commit_page\n");
+ if (!cpu_buffer->tail_page)
+ pr_warn(" Missing tail_page\n");
+ }
- rb_head_page_activate(cpu_buffer);
+ cpu_buffer->head_page
+ = list_entry(cpu_buffer->pages, struct buffer_page, list);
+ cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
+
+ rb_head_page_activate(cpu_buffer);
+
+ if (cpu_buffer->ring_meta)
+ meta->commit_buffer = meta->head_buffer;
+ } else {
+ /* The valid meta buffer still needs to activate the head page */
+ rb_head_page_activate(cpu_buffer);
+ }
return cpu_buffer;
@@ -1669,22 +2238,14 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
kfree(cpu_buffer);
}
-/**
- * __ring_buffer_alloc - allocate a new ring_buffer
- * @size: the size in bytes per cpu that is needed.
- * @flags: attributes to set for the ring buffer.
- * @key: ring buffer reader_lock_key.
- *
- * Currently the only flag that is available is the RB_FL_OVERWRITE
- * flag. This flag means that the buffer will overwrite old data
- * when the buffer wraps. If this flag is not set, the buffer will
- * drop data when the tail hits the head.
- */
-struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
- struct lock_class_key *key)
+static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
+ int order, unsigned long start,
+ unsigned long end,
+ struct lock_class_key *key)
{
struct trace_buffer *buffer;
long nr_pages;
+ int subbuf_size;
int bsize;
int cpu;
int ret;
@@ -1698,14 +2259,13 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
goto fail_free_buffer;
- /* Default buffer page size - one system page */
- buffer->subbuf_order = 0;
- buffer->subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE;
+ buffer->subbuf_order = order;
+ subbuf_size = (PAGE_SIZE << order);
+ buffer->subbuf_size = subbuf_size - BUF_PAGE_HDR_SIZE;
/* Max payload is buffer page size - header (8bytes) */
buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
- nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
buffer->flags = flags;
buffer->clock = trace_clock_local;
buffer->reader_lock_key = key;
@@ -1713,10 +2273,6 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
init_waitqueue_head(&buffer->irq_work.waiters);
- /* need at least two pages */
- if (nr_pages < 2)
- nr_pages = 2;
-
buffer->cpus = nr_cpu_ids;
bsize = sizeof(void *) * nr_cpu_ids;
@@ -1725,6 +2281,56 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
if (!buffer->buffers)
goto fail_free_cpumask;
+ /* If start/end are specified, then that overrides size */
+ if (start && end) {
+ unsigned long ptr;
+ int n;
+
+ size = end - start;
+ size = size / nr_cpu_ids;
+
+ /*
+ * The number of sub-buffers (nr_pages) is determined by the
+ * total size allocated minus the meta data size.
+ * Then that is divided by the number of per CPU buffers
+ * needed, plus account for the integer array index that
+ * will be appended to the meta data.
+ */
+ nr_pages = (size - sizeof(struct ring_buffer_meta)) /
+ (subbuf_size + sizeof(int));
+ /* Need at least two pages plus the reader page */
+ if (nr_pages < 3)
+ goto fail_free_buffers;
+
+ again:
+ /* Make sure that the size fits aligned */
+ for (n = 0, ptr = start; n < nr_cpu_ids; n++) {
+ ptr += sizeof(struct ring_buffer_meta) +
+ sizeof(int) * nr_pages;
+ ptr = ALIGN(ptr, subbuf_size);
+ ptr += subbuf_size * nr_pages;
+ }
+ if (ptr > end) {
+ if (nr_pages <= 3)
+ goto fail_free_buffers;
+ nr_pages--;
+ goto again;
+ }
+
+ /* nr_pages should not count the reader page */
+ nr_pages--;
+ buffer->range_addr_start = start;
+ buffer->range_addr_end = end;
+
+ rb_range_meta_init(buffer, nr_pages);
+ } else {
+
+ /* need at least two pages */
+ nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
+ if (nr_pages < 2)
+ nr_pages = 2;
+ }
+
cpu = raw_smp_processor_id();
cpumask_set_cpu(cpu, buffer->cpumask);
buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
@@ -1753,9 +2359,73 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
kfree(buffer);
return NULL;
}
+
+/**
+ * __ring_buffer_alloc - allocate a new ring_buffer
+ * @size: the size in bytes per cpu that is needed.
+ * @flags: attributes to set for the ring buffer.
+ * @key: ring buffer reader_lock_key.
+ *
+ * Currently the only flag that is available is the RB_FL_OVERWRITE
+ * flag. This flag means that the buffer will overwrite old data
+ * when the buffer wraps. If this flag is not set, the buffer will
+ * drop data when the tail hits the head.
+ */
+struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
+ struct lock_class_key *key)
+{
+ /* Default buffer page size - one system page */
+ return alloc_buffer(size, flags, 0, 0, 0,key);
+
+}
EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
/**
+ * __ring_buffer_alloc_range - allocate a new ring_buffer from existing memory
+ * @size: the size in bytes per cpu that is needed.
+ * @flags: attributes to set for the ring buffer.
+ * @start: start of allocated range
+ * @range_size: size of allocated range
+ * @order: sub-buffer order
+ * @key: ring buffer reader_lock_key.
+ *
+ * Currently the only flag that is available is the RB_FL_OVERWRITE
+ * flag. This flag means that the buffer will overwrite old data
+ * when the buffer wraps. If this flag is not set, the buffer will
+ * drop data when the tail hits the head.
+ */
+struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
+ int order, unsigned long start,
+ unsigned long range_size,
+ struct lock_class_key *key)
+{
+ return alloc_buffer(size, flags, order, start, start + range_size, key);
+}
+
+/**
+ * ring_buffer_last_boot_delta - return the delta offset from last boot
+ * @buffer: The buffer to return the delta from
+ * @text: Return text delta
+ * @data: Return data delta
+ *
+ * Returns: The true if the delta is non zero
+ */
+bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, long *text,
+ long *data)
+{
+ if (!buffer)
+ return false;
+
+ if (!buffer->last_text_delta)
+ return false;
+
+ *text = buffer->last_text_delta;
+ *data = buffer->last_data_delta;
+
+ return true;
+}
+
+/**
* ring_buffer_free - free a ring buffer.
* @buffer: the buffer to free.
*/
@@ -2364,6 +3034,52 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
iter->next_event = 0;
}
+/* Return the index into the sub-buffers for a given sub-buffer */
+static int rb_meta_subbuf_idx(struct ring_buffer_meta *meta, void *subbuf)
+{
+ void *subbuf_array;
+
+ subbuf_array = (void *)meta + sizeof(int) * meta->nr_subbufs;
+ subbuf_array = (void *)ALIGN((unsigned long)subbuf_array, meta->subbuf_size);
+ return (subbuf - subbuf_array) / meta->subbuf_size;
+}
+
+static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *next_page)
+{
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ unsigned long old_head = (unsigned long)next_page->page;
+ unsigned long new_head;
+
+ rb_inc_page(&next_page);
+ new_head = (unsigned long)next_page->page;
+
+ /*
+ * Only move it forward once, if something else came in and
+ * moved it forward, then we don't want to touch it.
+ */
+ (void)cmpxchg(&meta->head_buffer, old_head, new_head);
+}
+
+static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *reader)
+{
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ void *old_reader = cpu_buffer->reader_page->page;
+ void *new_reader = reader->page;
+ int id;
+
+ id = reader->id;
+ cpu_buffer->reader_page->id = id;
+ reader->id = 0;
+
+ meta->buffers[0] = rb_meta_subbuf_idx(meta, new_reader);
+ meta->buffers[id] = rb_meta_subbuf_idx(meta, old_reader);
+
+ /* The head pointer is the one after the reader */
+ rb_update_meta_head(cpu_buffer, reader);
+}
+
/*
* rb_handle_head_page - writer hit the head page
*
@@ -2413,6 +3129,8 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
local_inc(&cpu_buffer->pages_lost);
+ if (cpu_buffer->ring_meta)
+ rb_update_meta_head(cpu_buffer, next_page);
/*
* The entries will be zeroed out when we move the
* tail page.
@@ -2974,6 +3692,10 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->commit_page->page->commit,
rb_page_write(cpu_buffer->commit_page));
rb_inc_page(&cpu_buffer->commit_page);
+ if (cpu_buffer->ring_meta) {
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ meta->commit_buffer = (unsigned long)cpu_buffer->commit_page->page;
+ }
/* add barrier to keep gcc from optimizing too much */
barrier();
}
@@ -3420,11 +4142,10 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
struct rb_event_info *info,
unsigned long tail)
{
- struct ring_buffer_event *event;
struct buffer_data_page *bpage;
u64 ts, delta;
bool full = false;
- int e;
+ int ret;
bpage = info->tail_page->page;
@@ -3450,39 +4171,12 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
goto out;
- ts = bpage->time_stamp;
-
- for (e = 0; e < tail; e += rb_event_length(event)) {
-
- event = (struct ring_buffer_event *)(bpage->data + e);
-
- switch (event->type_len) {
-
- case RINGBUF_TYPE_TIME_EXTEND:
- delta = rb_event_time_stamp(event);
- ts += delta;
- break;
-
- case RINGBUF_TYPE_TIME_STAMP:
- delta = rb_event_time_stamp(event);
- delta = rb_fix_abs_ts(delta, ts);
- if (delta < ts) {
- buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
- cpu_buffer->cpu, ts, delta);
- }
- ts = delta;
- break;
-
- case RINGBUF_TYPE_PADDING:
- if (event->time_delta == 1)
- break;
- fallthrough;
- case RINGBUF_TYPE_DATA:
- ts += event->time_delta;
- break;
-
- default:
- RB_WARN_ON(cpu_buffer, 1);
+ ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta);
+ if (ret < 0) {
+ if (delta < ts) {
+ buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
+ cpu_buffer->cpu, ts, delta);
+ goto out;
}
}
if ((full && ts > info->ts) ||
@@ -4591,6 +5285,9 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
if (!ret)
goto spin;
+ if (cpu_buffer->ring_meta)
+ rb_update_meta_reader(cpu_buffer, reader);
+
/*
* Yay! We succeeded in replacing the page.
*
@@ -5212,6 +5909,9 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
{
struct trace_buffer_meta *meta = cpu_buffer->meta_page;
+ if (!meta)
+ return;
+
meta->reader.read = cpu_buffer->reader_page->read;
meta->reader.id = cpu_buffer->reader_page->id;
meta->reader.lost_events = cpu_buffer->lost_events;
@@ -5268,11 +5968,16 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->lost_events = 0;
cpu_buffer->last_overrun = 0;
- if (cpu_buffer->mapped)
- rb_update_meta_page(cpu_buffer);
-
rb_head_page_activate(cpu_buffer);
cpu_buffer->pages_removed = 0;
+
+ if (cpu_buffer->mapped) {
+ rb_update_meta_page(cpu_buffer);
+ if (cpu_buffer->ring_meta) {
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ meta->commit_buffer = meta->head_buffer;
+ }
+ }
}
/* Must have disabled the cpu buffer then done a synchronize_rcu */
@@ -5303,6 +6008,7 @@ static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ struct ring_buffer_meta *meta;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
@@ -5321,6 +6027,11 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&cpu_buffer->resize_disabled);
+ /* Make sure persistent meta now uses this buffer's addresses */
+ meta = rb_range_meta(buffer, 0, cpu_buffer->cpu);
+ if (meta)
+ rb_meta_init_text_addr(meta);
+
mutex_unlock(&buffer->mutex);
}
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
@@ -5335,6 +6046,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ struct ring_buffer_meta *meta;
int cpu;
/* prevent another thread from changing buffer sizes */
@@ -5362,6 +6074,11 @@ void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
reset_disabled_cpu_buffer(cpu_buffer);
+ /* Make sure persistent meta now uses this buffer's addresses */
+ meta = rb_range_meta(buffer, 0, cpu_buffer->cpu);
+ if (meta)
+ rb_meta_init_text_addr(meta);
+
atomic_dec(&cpu_buffer->record_disabled);
atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
}
@@ -6135,10 +6852,10 @@ static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
/* install subbuf ID to kern VA translation */
cpu_buffer->subbuf_ids = subbuf_ids;
- meta->meta_page_size = PAGE_SIZE;
meta->meta_struct_len = sizeof(*meta);
meta->nr_subbufs = nr_subbufs;
meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
+ meta->meta_page_size = meta->subbuf_size;
rb_update_meta_page(cpu_buffer);
}
@@ -6155,7 +6872,7 @@ rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
mutex_lock(&cpu_buffer->mapping_lock);
- if (!cpu_buffer->mapped) {
+ if (!cpu_buffer->user_mapped) {
mutex_unlock(&cpu_buffer->mapping_lock);
return ERR_PTR(-ENODEV);
}
@@ -6179,19 +6896,26 @@ static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,
lockdep_assert_held(&cpu_buffer->mapping_lock);
+ /* mapped is always greater or equal to user_mapped */
+ if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped))
+ return -EINVAL;
+
if (inc && cpu_buffer->mapped == UINT_MAX)
return -EBUSY;
- if (WARN_ON(!inc && cpu_buffer->mapped == 0))
+ if (WARN_ON(!inc && cpu_buffer->user_mapped == 0))
return -EINVAL;
mutex_lock(&cpu_buffer->buffer->mutex);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- if (inc)
+ if (inc) {
+ cpu_buffer->user_mapped++;
cpu_buffer->mapped++;
- else
+ } else {
+ cpu_buffer->user_mapped--;
cpu_buffer->mapped--;
+ }
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
mutex_unlock(&cpu_buffer->buffer->mutex);
@@ -6214,7 +6938,7 @@ static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,
static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
struct vm_area_struct *vma)
{
- unsigned long nr_subbufs, nr_pages, vma_pages, pgoff = vma->vm_pgoff;
+ unsigned long nr_subbufs, nr_pages, nr_vma_pages, pgoff = vma->vm_pgoff;
unsigned int subbuf_pages, subbuf_order;
struct page **pages;
int p = 0, s = 0;
@@ -6225,6 +6949,12 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
!(vma->vm_flags & VM_MAYSHARE))
return -EPERM;
+ subbuf_order = cpu_buffer->buffer->subbuf_order;
+ subbuf_pages = 1 << subbuf_order;
+
+ if (subbuf_order && pgoff % subbuf_pages)
+ return -EINVAL;
+
/*
* Make sure the mapping cannot become writable later. Also tell the VM
* to not touch these pages (VM_DONTCOPY | VM_DONTEXPAND).
@@ -6234,37 +6964,38 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
lockdep_assert_held(&cpu_buffer->mapping_lock);
- subbuf_order = cpu_buffer->buffer->subbuf_order;
- subbuf_pages = 1 << subbuf_order;
-
nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */
- nr_pages = ((nr_subbufs) << subbuf_order) - pgoff + 1; /* + meta-page */
+ nr_pages = ((nr_subbufs + 1) << subbuf_order) - pgoff; /* + meta-page */
- vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- if (!vma_pages || vma_pages > nr_pages)
+ nr_vma_pages = vma_pages(vma);
+ if (!nr_vma_pages || nr_vma_pages > nr_pages)
return -EINVAL;
- nr_pages = vma_pages;
+ nr_pages = nr_vma_pages;
pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
return -ENOMEM;
if (!pgoff) {
+ unsigned long meta_page_padding;
+
pages[p++] = virt_to_page(cpu_buffer->meta_page);
/*
- * TODO: Align sub-buffers on their size, once
- * vm_insert_pages() supports the zero-page.
+ * Pad with the zero-page to align the meta-page with the
+ * sub-buffers.
*/
- } else {
- /* Skip the meta-page */
- pgoff--;
+ meta_page_padding = subbuf_pages - 1;
+ while (meta_page_padding-- && p < nr_pages) {
+ unsigned long __maybe_unused zero_addr =
+ vma->vm_start + (PAGE_SIZE * p);
- if (pgoff % subbuf_pages) {
- err = -EINVAL;
- goto out;
+ pages[p++] = ZERO_PAGE(zero_addr);
}
+ } else {
+ /* Skip the meta-page */
+ pgoff -= subbuf_pages;
s += pgoff / subbuf_pages;
}
@@ -6316,7 +7047,7 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
mutex_lock(&cpu_buffer->mapping_lock);
- if (cpu_buffer->mapped) {
+ if (cpu_buffer->user_mapped) {
err = __rb_map_vma(cpu_buffer, vma);
if (!err)
err = __rb_inc_dec_mapped(cpu_buffer, true);
@@ -6347,12 +7078,15 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
*/
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_setup_ids_meta_page(cpu_buffer, subbuf_ids);
+
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
err = __rb_map_vma(cpu_buffer, vma);
if (!err) {
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- cpu_buffer->mapped = 1;
+ /* This is the first time it is mapped by user */
+ cpu_buffer->mapped++;
+ cpu_buffer->user_mapped = 1;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
} else {
kfree(cpu_buffer->subbuf_ids);
@@ -6380,10 +7114,10 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
mutex_lock(&cpu_buffer->mapping_lock);
- if (!cpu_buffer->mapped) {
+ if (!cpu_buffer->user_mapped) {
err = -ENODEV;
goto out;
- } else if (cpu_buffer->mapped > 1) {
+ } else if (cpu_buffer->user_mapped > 1) {
__rb_inc_dec_mapped(cpu_buffer, false);
goto out;
}
@@ -6391,7 +7125,10 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
mutex_lock(&buffer->mutex);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- cpu_buffer->mapped = 0;
+ /* This is the last user space mapping */
+ if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped))
+ cpu_buffer->mapped--;
+ cpu_buffer->user_mapped = 0;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index edf6bc817aa1..b4f348b4653f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -482,7 +482,7 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_export);
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
- TRACE_ITER_HASH_PTR)
+ TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK)
/* trace_options that are only supported by global_trace */
#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
@@ -490,7 +490,7 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_export);
/* trace_flags that are default zero for instances */
#define ZEROED_TRACE_FLAGS \
- (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
+ (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK)
/*
* The global_trace is the descriptor that holds the top-level tracing
@@ -500,6 +500,29 @@ static struct trace_array global_trace = {
.trace_flags = TRACE_DEFAULT_FLAGS,
};
+static struct trace_array *printk_trace = &global_trace;
+
+static __always_inline bool printk_binsafe(struct trace_array *tr)
+{
+ /*
+ * The binary format of traceprintk can cause a crash if used
+ * by a buffer from another boot. Force the use of the
+ * non binary version of trace_printk if the trace_printk
+ * buffer is a boot mapped ring buffer.
+ */
+ return !(tr->flags & TRACE_ARRAY_FL_BOOT);
+}
+
+static void update_printk_trace(struct trace_array *tr)
+{
+ if (printk_trace == tr)
+ return;
+
+ printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK;
+ printk_trace = tr;
+ tr->trace_flags |= TRACE_ITER_TRACE_PRINTK;
+}
+
void trace_set_ring_buffer_expanded(struct trace_array *tr)
{
if (!tr)
@@ -1117,7 +1140,7 @@ EXPORT_SYMBOL_GPL(__trace_array_puts);
*/
int __trace_puts(unsigned long ip, const char *str, int size)
{
- return __trace_array_puts(&global_trace, ip, str, size);
+ return __trace_array_puts(printk_trace, ip, str, size);
}
EXPORT_SYMBOL_GPL(__trace_puts);
@@ -1128,6 +1151,7 @@ EXPORT_SYMBOL_GPL(__trace_puts);
*/
int __trace_bputs(unsigned long ip, const char *str)
{
+ struct trace_array *tr = READ_ONCE(printk_trace);
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct bputs_entry *entry;
@@ -1135,14 +1159,17 @@ int __trace_bputs(unsigned long ip, const char *str)
int size = sizeof(struct bputs_entry);
int ret = 0;
- if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
+ if (!printk_binsafe(tr))
+ return __trace_puts(ip, str, strlen(str));
+
+ if (!(tr->trace_flags & TRACE_ITER_PRINTK))
return 0;
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
trace_ctx = tracing_gen_ctx();
- buffer = global_trace.array_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
@@ -1155,7 +1182,7 @@ int __trace_bputs(unsigned long ip, const char *str)
entry->str = str;
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
+ ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
ret = 1;
out:
@@ -2226,10 +2253,6 @@ static __init int init_trace_selftests(void)
}
core_initcall(init_trace_selftests);
#else
-static inline int run_tracer_selftest(struct tracer *type)
-{
- return 0;
-}
static inline int do_run_tracer_selftest(struct tracer *type)
{
return 0;
@@ -3025,7 +3048,7 @@ void trace_dump_stack(int skip)
/* Skip 1 to skip this function. */
skip++;
#endif
- __ftrace_trace_stack(global_trace.array_buffer.buffer,
+ __ftrace_trace_stack(printk_trace->array_buffer.buffer,
tracing_gen_ctx(), skip, NULL);
}
EXPORT_SYMBOL_GPL(trace_dump_stack);
@@ -3244,12 +3267,15 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
struct trace_event_call *call = &event_bprint;
struct ring_buffer_event *event;
struct trace_buffer *buffer;
- struct trace_array *tr = &global_trace;
+ struct trace_array *tr = READ_ONCE(printk_trace);
struct bprint_entry *entry;
unsigned int trace_ctx;
char *tbuffer;
int len = 0, size;
+ if (!printk_binsafe(tr))
+ return trace_vprintk(ip, fmt, args);
+
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
@@ -3342,7 +3368,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
memcpy(&entry->buf, tbuffer, len + 1);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
+ ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
}
out:
@@ -3438,7 +3464,7 @@ int trace_array_printk_buf(struct trace_buffer *buffer,
int ret;
va_list ap;
- if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
+ if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK))
return 0;
va_start(ap, fmt);
@@ -3450,7 +3476,7 @@ int trace_array_printk_buf(struct trace_buffer *buffer,
__printf(2, 0)
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
- return trace_array_vprintk(&global_trace, ip, fmt, args);
+ return trace_array_vprintk(printk_trace, ip, fmt, args);
}
EXPORT_SYMBOL_GPL(trace_vprintk);
@@ -3671,8 +3697,11 @@ static void test_can_verify(void)
void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
va_list ap)
{
+ long text_delta = iter->tr->text_delta;
+ long data_delta = iter->tr->data_delta;
const char *p = fmt;
const char *str;
+ bool good;
int i, j;
if (WARN_ON_ONCE(!fmt))
@@ -3691,7 +3720,10 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
j = 0;
- /* We only care about %s and variants */
+ /*
+ * We only care about %s and variants
+ * as well as %p[sS] if delta is non-zero
+ */
for (i = 0; p[i]; i++) {
if (i + 1 >= iter->fmt_size) {
/*
@@ -3720,6 +3752,11 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
}
if (p[i+j] == 's')
break;
+
+ if (text_delta && p[i+1] == 'p' &&
+ ((p[i+2] == 's' || p[i+2] == 'S')))
+ break;
+
star = false;
}
j = 0;
@@ -3733,6 +3770,24 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
iter->fmt[i] = '\0';
trace_seq_vprintf(&iter->seq, iter->fmt, ap);
+ /* Add delta to %pS pointers */
+ if (p[i+1] == 'p') {
+ unsigned long addr;
+ char fmt[4];
+
+ fmt[0] = '%';
+ fmt[1] = 'p';
+ fmt[2] = p[i+2]; /* Either %ps or %pS */
+ fmt[3] = '\0';
+
+ addr = va_arg(ap, unsigned long);
+ addr += text_delta;
+ trace_seq_printf(&iter->seq, fmt, (void *)addr);
+
+ p += i + 3;
+ continue;
+ }
+
/*
* If iter->seq is full, the above call no longer guarantees
* that ap is in sync with fmt processing, and further calls
@@ -3751,6 +3806,14 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
/* The ap now points to the string data of the %s */
str = va_arg(ap, const char *);
+ good = trace_safe_str(iter, str, star, len);
+
+ /* Could be from the last boot */
+ if (data_delta && !good) {
+ str += data_delta;
+ good = trace_safe_str(iter, str, star, len);
+ }
+
/*
* If you hit this warning, it is likely that the
* trace event in question used %s on a string that
@@ -3760,8 +3823,7 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
* instead. See samples/trace_events/trace-events-sample.h
* for reference.
*/
- if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
- "fmt: '%s' current_buffer: '%s'",
+ if (WARN_ONCE(!good, "fmt: '%s' current_buffer: '%s'",
fmt, seq_buf_str(&iter->seq.seq))) {
int ret;
@@ -4923,6 +4985,11 @@ static int tracing_open(struct inode *inode, struct file *file)
static bool
trace_ok_for_array(struct tracer *t, struct trace_array *tr)
{
+#ifdef CONFIG_TRACER_SNAPSHOT
+ /* arrays with mapped buffer range do not have snapshots */
+ if (tr->range_addr_start && t->use_max_tr)
+ return false;
+#endif
return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
}
@@ -5015,7 +5082,7 @@ static int show_traces_open(struct inode *inode, struct file *file)
return 0;
}
-static int show_traces_release(struct inode *inode, struct file *file)
+static int tracing_seq_release(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
@@ -5056,7 +5123,7 @@ static const struct file_operations show_traces_fops = {
.open = show_traces_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = show_traces_release,
+ .release = tracing_seq_release,
};
static ssize_t
@@ -5241,7 +5308,8 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
{
if ((mask == TRACE_ITER_RECORD_TGID) ||
- (mask == TRACE_ITER_RECORD_CMD))
+ (mask == TRACE_ITER_RECORD_CMD) ||
+ (mask == TRACE_ITER_TRACE_PRINTK))
lockdep_assert_held(&event_mutex);
/* do nothing if flag is already set */
@@ -5253,6 +5321,25 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
if (tr->current_trace->flag_changed(tr, mask, !!enabled))
return -EINVAL;
+ if (mask == TRACE_ITER_TRACE_PRINTK) {
+ if (enabled) {
+ update_printk_trace(tr);
+ } else {
+ /*
+ * The global_trace cannot clear this.
+ * It's flag only gets cleared if another instance sets it.
+ */
+ if (printk_trace == &global_trace)
+ return -EINVAL;
+ /*
+ * An instance must always have it set.
+ * by default, that's the global_trace instane.
+ */
+ if (printk_trace == tr)
+ update_printk_trace(&global_trace);
+ }
+ }
+
if (enabled)
tr->trace_flags |= mask;
else
@@ -6038,6 +6125,18 @@ out:
return ret;
}
+static void update_last_data(struct trace_array *tr)
+{
+ if (!tr->text_delta && !tr->data_delta)
+ return;
+
+ /* Clear old data */
+ tracing_reset_online_cpus(&tr->array_buffer);
+
+ /* Using current data now */
+ tr->text_delta = 0;
+ tr->data_delta = 0;
+}
/**
* tracing_update_buffers - used by tracing facility to expand ring buffers
@@ -6055,6 +6154,9 @@ int tracing_update_buffers(struct trace_array *tr)
int ret = 0;
mutex_lock(&trace_types_lock);
+
+ update_last_data(tr);
+
if (!tr->ring_buffer_expanded)
ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
RING_BUFFER_ALL_CPUS);
@@ -6110,6 +6212,8 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
mutex_lock(&trace_types_lock);
+ update_last_data(tr);
+
if (!tr->ring_buffer_expanded) {
ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
RING_BUFFER_ALL_CPUS);
@@ -6858,6 +6962,37 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
}
static ssize_t
+tracing_last_boot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_array *tr = filp->private_data;
+ struct seq_buf seq;
+ char buf[64];
+
+ seq_buf_init(&seq, buf, 64);
+
+ seq_buf_printf(&seq, "text delta:\t%ld\n", tr->text_delta);
+ seq_buf_printf(&seq, "data delta:\t%ld\n", tr->data_delta);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, seq_buf_used(&seq));
+}
+
+static int tracing_buffer_meta_open(struct inode *inode, struct file *filp)
+{
+ struct trace_array *tr = inode->i_private;
+ int cpu = tracing_get_cpu(inode);
+ int ret;
+
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
+
+ ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu);
+ if (ret < 0)
+ __trace_array_put(tr);
+ return ret;
+}
+
+static ssize_t
tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
@@ -7433,6 +7568,13 @@ static const struct file_operations tracing_entries_fops = {
.release = tracing_release_generic_tr,
};
+static const struct file_operations tracing_buffer_meta_fops = {
+ .open = tracing_buffer_meta_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = tracing_seq_release,
+};
+
static const struct file_operations tracing_total_entries_fops = {
.open = tracing_open_generic_tr,
.read = tracing_total_entries_read,
@@ -7473,6 +7615,13 @@ static const struct file_operations trace_time_stamp_mode_fops = {
.release = tracing_single_release_tr,
};
+static const struct file_operations last_boot_fops = {
+ .open = tracing_open_generic_tr,
+ .read = tracing_last_boot_read,
+ .llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
+};
+
#ifdef CONFIG_TRACER_SNAPSHOT
static const struct file_operations snapshot_fops = {
.open = tracing_snapshot_open,
@@ -8665,12 +8814,17 @@ tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
tr, cpu, &tracing_entries_fops);
+ if (tr->range_addr_start)
+ trace_create_cpu_file("buffer_meta", TRACE_MODE_READ, d_cpu,
+ tr, cpu, &tracing_buffer_meta_fops);
#ifdef CONFIG_TRACER_SNAPSHOT
- trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
- tr, cpu, &snapshot_fops);
+ if (!tr->range_addr_start) {
+ trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
+ tr, cpu, &snapshot_fops);
- trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
- tr, cpu, &snapshot_raw_fops);
+ trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
+ tr, cpu, &snapshot_raw_fops);
+ }
#endif
}
@@ -9207,7 +9361,21 @@ allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size
buf->tr = tr;
- buf->buffer = ring_buffer_alloc(size, rb_flags);
+ if (tr->range_addr_start && tr->range_addr_size) {
+ buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
+ tr->range_addr_start,
+ tr->range_addr_size);
+
+ ring_buffer_last_boot_delta(buf->buffer,
+ &tr->text_delta, &tr->data_delta);
+ /*
+ * This is basically the same as a mapped buffer,
+ * with the same restrictions.
+ */
+ tr->mapped++;
+ } else {
+ buf->buffer = ring_buffer_alloc(size, rb_flags);
+ }
if (!buf->buffer)
return -ENOMEM;
@@ -9244,6 +9412,10 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
return ret;
#ifdef CONFIG_TRACER_MAX_TRACE
+ /* Fix mapped buffer trace arrays do not have snapshot buffers */
+ if (tr->range_addr_start)
+ return 0;
+
ret = allocate_trace_buffer(tr, &tr->max_buffer,
allocate_snapshot ? size : 1);
if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
@@ -9344,7 +9516,9 @@ static int trace_array_create_dir(struct trace_array *tr)
}
static struct trace_array *
-trace_array_create_systems(const char *name, const char *systems)
+trace_array_create_systems(const char *name, const char *systems,
+ unsigned long range_addr_start,
+ unsigned long range_addr_size)
{
struct trace_array *tr;
int ret;
@@ -9370,6 +9544,10 @@ trace_array_create_systems(const char *name, const char *systems)
goto out_free_tr;
}
+ /* Only for boot up memory mapped ring buffers */
+ tr->range_addr_start = range_addr_start;
+ tr->range_addr_size = range_addr_size;
+
tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
@@ -9427,7 +9605,7 @@ trace_array_create_systems(const char *name, const char *systems)
static struct trace_array *trace_array_create(const char *name)
{
- return trace_array_create_systems(name, NULL);
+ return trace_array_create_systems(name, NULL, 0, 0);
}
static int instance_mkdir(const char *name)
@@ -9452,6 +9630,31 @@ out_unlock:
return ret;
}
+static u64 map_pages(u64 start, u64 size)
+{
+ struct page **pages;
+ phys_addr_t page_start;
+ unsigned int page_count;
+ unsigned int i;
+ void *vaddr;
+
+ page_count = DIV_ROUND_UP(size, PAGE_SIZE);
+
+ page_start = start;
+ pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return 0;
+
+ for (i = 0; i < page_count; i++) {
+ phys_addr_t addr = page_start + i * PAGE_SIZE;
+ pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
+ }
+ vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
+ kfree(pages);
+
+ return (u64)(unsigned long)vaddr;
+}
+
/**
* trace_array_get_by_name - Create/Lookup a trace array, given its name.
* @name: The name of the trace array to be looked up/created.
@@ -9481,7 +9684,7 @@ struct trace_array *trace_array_get_by_name(const char *name, const char *system
goto out_unlock;
}
- tr = trace_array_create_systems(name, systems);
+ tr = trace_array_create_systems(name, systems, 0, 0);
if (IS_ERR(tr))
tr = NULL;
@@ -9511,6 +9714,9 @@ static int __remove_instance(struct trace_array *tr)
set_tracer_flag(tr, 1 << i, 0);
}
+ if (printk_trace == tr)
+ update_printk_trace(&global_trace);
+
tracing_set_nop(tr);
clear_ftrace_function_probes(tr);
event_trace_del_tracer(tr);
@@ -9673,10 +9879,15 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
if (ftrace_create_function_files(tr, d_tracer))
MEM_FAIL(1, "Could not allocate function filter files");
+ if (tr->range_addr_start) {
+ trace_create_file("last_boot_info", TRACE_MODE_READ, d_tracer,
+ tr, &last_boot_fops);
#ifdef CONFIG_TRACER_SNAPSHOT
- trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
- tr, &snapshot_fops);
+ } else {
+ trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
+ tr, &snapshot_fops);
#endif
+ }
trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
tr, &tracing_err_log_fops);
@@ -10296,6 +10507,7 @@ __init static void enable_instances(void)
{
struct trace_array *tr;
char *curr_str;
+ char *name;
char *str;
char *tok;
@@ -10304,19 +10516,107 @@ __init static void enable_instances(void)
str = boot_instance_info;
while ((curr_str = strsep(&str, "\t"))) {
+ phys_addr_t start = 0;
+ phys_addr_t size = 0;
+ unsigned long addr = 0;
+ bool traceprintk = false;
+ bool traceoff = false;
+ char *flag_delim;
+ char *addr_delim;
tok = strsep(&curr_str, ",");
- if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
- do_allocate_snapshot(tok);
+ flag_delim = strchr(tok, '^');
+ addr_delim = strchr(tok, '@');
- tr = trace_array_get_by_name(tok, NULL);
- if (!tr) {
- pr_warn("Failed to create instance buffer %s\n", curr_str);
+ if (addr_delim)
+ *addr_delim++ = '\0';
+
+ if (flag_delim)
+ *flag_delim++ = '\0';
+
+ name = tok;
+
+ if (flag_delim) {
+ char *flag;
+
+ while ((flag = strsep(&flag_delim, "^"))) {
+ if (strcmp(flag, "traceoff") == 0) {
+ traceoff = true;
+ } else if ((strcmp(flag, "printk") == 0) ||
+ (strcmp(flag, "traceprintk") == 0) ||
+ (strcmp(flag, "trace_printk") == 0)) {
+ traceprintk = true;
+ } else {
+ pr_info("Tracing: Invalid instance flag '%s' for %s\n",
+ flag, name);
+ }
+ }
+ }
+
+ tok = addr_delim;
+ if (tok && isdigit(*tok)) {
+ start = memparse(tok, &tok);
+ if (!start) {
+ pr_warn("Tracing: Invalid boot instance address for %s\n",
+ name);
+ continue;
+ }
+ if (*tok != ':') {
+ pr_warn("Tracing: No size specified for instance %s\n", name);
+ continue;
+ }
+ tok++;
+ size = memparse(tok, &tok);
+ if (!size) {
+ pr_warn("Tracing: Invalid boot instance size for %s\n",
+ name);
+ continue;
+ }
+ } else if (tok) {
+ if (!reserve_mem_find_by_name(tok, &start, &size)) {
+ start = 0;
+ pr_warn("Failed to map boot instance %s to %s\n", name, tok);
+ continue;
+ }
+ }
+
+ if (start) {
+ addr = map_pages(start, size);
+ if (addr) {
+ pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n",
+ name, &start, (unsigned long)size);
+ } else {
+ pr_warn("Tracing: Failed to map boot instance %s\n", name);
+ continue;
+ }
+ } else {
+ /* Only non mapped buffers have snapshot buffers */
+ if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
+ do_allocate_snapshot(name);
+ }
+
+ tr = trace_array_create_systems(name, NULL, addr, size);
+ if (IS_ERR(tr)) {
+ pr_warn("Tracing: Failed to create instance buffer %s\n", curr_str);
continue;
}
- /* Allow user space to delete it */
- trace_array_put(tr);
+
+ if (traceoff)
+ tracer_tracing_off(tr);
+
+ if (traceprintk)
+ update_printk_trace(tr);
+
+ /*
+ * If start is set, then this is a mapped buffer, and
+ * cannot be deleted by user space, so keep the reference
+ * to it.
+ */
+ if (start)
+ tr->flags |= TRACE_ARRAY_FL_BOOT;
+ else
+ trace_array_put(tr);
while ((tok = strsep(&curr_str, ","))) {
early_enable_events(tr, tok, true);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index bd3e3069300e..c866991b9c78 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -336,7 +336,6 @@ struct trace_array {
bool allocated_snapshot;
spinlock_t snapshot_trigger_lock;
unsigned int snapshot;
- unsigned int mapped;
unsigned long max_latency;
#ifdef CONFIG_FSNOTIFY
struct dentry *d_max_latency;
@@ -344,6 +343,13 @@ struct trace_array {
struct irq_work fsnotify_irqwork;
#endif
#endif
+ /* The below is for memory mapped ring buffer */
+ unsigned int mapped;
+ unsigned long range_addr_start;
+ unsigned long range_addr_size;
+ long text_delta;
+ long data_delta;
+
struct trace_pid_list __rcu *filtered_pids;
struct trace_pid_list __rcu *filtered_no_pids;
/*
@@ -423,7 +429,8 @@ struct trace_array {
};
enum {
- TRACE_ARRAY_FL_GLOBAL = (1 << 0)
+ TRACE_ARRAY_FL_GLOBAL = BIT(0),
+ TRACE_ARRAY_FL_BOOT = BIT(1),
};
extern struct list_head ftrace_trace_arrays;
@@ -644,6 +651,8 @@ trace_buffer_lock_reserve(struct trace_buffer *buffer,
unsigned long len,
unsigned int trace_ctx);
+int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu);
+
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
@@ -1312,6 +1321,7 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
C(IRQ_INFO, "irq-info"), \
C(MARKERS, "markers"), \
C(EVENT_FORK, "event-fork"), \
+ C(TRACE_PRINTK, "trace_printk_dest"), \
C(PAUSE_ON_TRACE, "pause-on-trace"), \
C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \
FUNCTION_FLAGS \
diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
index 62e6a8f4aae9..a079abd8955b 100644
--- a/kernel/trace/trace_fprobe.c
+++ b/kernel/trace/trace_fprobe.c
@@ -21,6 +21,7 @@
#define FPROBE_EVENT_SYSTEM "fprobes"
#define TRACEPOINT_EVENT_SYSTEM "tracepoints"
#define RETHOOK_MAXACTIVE_MAX 4096
+#define TRACEPOINT_STUB ERR_PTR(-ENOENT)
static int trace_fprobe_create(const char *raw_command);
static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev);
@@ -385,6 +386,7 @@ static struct trace_fprobe *alloc_trace_fprobe(const char *group,
const char *event,
const char *symbol,
struct tracepoint *tpoint,
+ struct module *mod,
int maxactive,
int nargs, bool is_return)
{
@@ -405,6 +407,7 @@ static struct trace_fprobe *alloc_trace_fprobe(const char *group,
tf->fp.entry_handler = fentry_dispatcher;
tf->tpoint = tpoint;
+ tf->mod = mod;
tf->fp.nr_maxactive = maxactive;
ret = trace_probe_init(&tf->tp, event, group, false, nargs);
@@ -672,6 +675,24 @@ static int unregister_fprobe_event(struct trace_fprobe *tf)
return trace_probe_unregister_event_call(&tf->tp);
}
+static int __regsiter_tracepoint_fprobe(struct trace_fprobe *tf)
+{
+ struct tracepoint *tpoint = tf->tpoint;
+ unsigned long ip = (unsigned long)tpoint->probestub;
+ int ret;
+
+ /*
+ * Here, we do 2 steps to enable fprobe on a tracepoint.
+ * At first, put __probestub_##TP function on the tracepoint
+ * and put a fprobe on the stub function.
+ */
+ ret = tracepoint_probe_register_prio_may_exist(tpoint,
+ tpoint->probestub, NULL, 0);
+ if (ret < 0)
+ return ret;
+ return register_fprobe_ips(&tf->fp, &ip, 1);
+}
+
/* Internal register function - just handle fprobe and flags */
static int __register_trace_fprobe(struct trace_fprobe *tf)
{
@@ -698,18 +719,12 @@ static int __register_trace_fprobe(struct trace_fprobe *tf)
tf->fp.flags |= FPROBE_FL_DISABLED;
if (trace_fprobe_is_tracepoint(tf)) {
- struct tracepoint *tpoint = tf->tpoint;
- unsigned long ip = (unsigned long)tpoint->probestub;
- /*
- * Here, we do 2 steps to enable fprobe on a tracepoint.
- * At first, put __probestub_##TP function on the tracepoint
- * and put a fprobe on the stub function.
- */
- ret = tracepoint_probe_register_prio_may_exist(tpoint,
- tpoint->probestub, NULL, 0);
- if (ret < 0)
- return ret;
- return register_fprobe_ips(&tf->fp, &ip, 1);
+
+ /* This tracepoint is not loaded yet */
+ if (tf->tpoint == TRACEPOINT_STUB)
+ return 0;
+
+ return __regsiter_tracepoint_fprobe(tf);
}
/* TODO: handle filter, nofilter or symbol list */
@@ -862,20 +877,106 @@ end:
return ret;
}
+struct __find_tracepoint_cb_data {
+ const char *tp_name;
+ struct tracepoint *tpoint;
+ struct module *mod;
+};
+
+static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mod, void *priv)
+{
+ struct __find_tracepoint_cb_data *data = priv;
+
+ if (!data->tpoint && !strcmp(data->tp_name, tp->name)) {
+ data->tpoint = tp;
+ if (!data->mod) {
+ data->mod = mod;
+ if (!try_module_get(data->mod)) {
+ data->tpoint = NULL;
+ data->mod = NULL;
+ }
+ }
+ }
+}
+
+static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
+{
+ struct __find_tracepoint_cb_data *data = priv;
+
+ if (!data->tpoint && !strcmp(data->tp_name, tp->name))
+ data->tpoint = tp;
+}
+
+/*
+ * Find a tracepoint from kernel and module. If the tracepoint is in a module,
+ * this increments the module refcount to prevent unloading until the
+ * trace_fprobe is registered to the list. After registering the trace_fprobe
+ * on the trace_fprobe list, the module refcount is decremented because
+ * tracepoint_probe_module_cb will handle it.
+ */
+static struct tracepoint *find_tracepoint(const char *tp_name,
+ struct module **tp_mod)
+{
+ struct __find_tracepoint_cb_data data = {
+ .tp_name = tp_name,
+ .mod = NULL,
+ };
+
+ for_each_kernel_tracepoint(__find_tracepoint_cb, &data);
+
+ if (!data.tpoint && IS_ENABLED(CONFIG_MODULES)) {
+ for_each_module_tracepoint(__find_tracepoint_module_cb, &data);
+ *tp_mod = data.mod;
+ }
+
+ return data.tpoint;
+}
+
#ifdef CONFIG_MODULES
+static void reenable_trace_fprobe(struct trace_fprobe *tf)
+{
+ struct trace_probe *tp = &tf->tp;
+
+ list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
+ __enable_trace_fprobe(tf);
+ }
+}
+
+static struct tracepoint *find_tracepoint_in_module(struct module *mod,
+ const char *tp_name)
+{
+ struct __find_tracepoint_cb_data data = {
+ .tp_name = tp_name,
+ .mod = mod,
+ };
+
+ for_each_tracepoint_in_module(mod, __find_tracepoint_module_cb, &data);
+ return data.tpoint;
+}
+
static int __tracepoint_probe_module_cb(struct notifier_block *self,
unsigned long val, void *data)
{
struct tp_module *tp_mod = data;
+ struct tracepoint *tpoint;
struct trace_fprobe *tf;
struct dyn_event *pos;
- if (val != MODULE_STATE_GOING)
+ if (val != MODULE_STATE_GOING && val != MODULE_STATE_COMING)
return NOTIFY_DONE;
mutex_lock(&event_mutex);
for_each_trace_fprobe(tf, pos) {
- if (tp_mod->mod == tf->mod) {
+ if (val == MODULE_STATE_COMING && tf->tpoint == TRACEPOINT_STUB) {
+ tpoint = find_tracepoint_in_module(tp_mod->mod, tf->symbol);
+ if (tpoint) {
+ tf->tpoint = tpoint;
+ tf->mod = tp_mod->mod;
+ if (!WARN_ON_ONCE(__regsiter_tracepoint_fprobe(tf)) &&
+ trace_probe_is_enabled(&tf->tp))
+ reenable_trace_fprobe(tf);
+ }
+ } else if (val == MODULE_STATE_GOING && tp_mod->mod == tf->mod) {
tracepoint_probe_unregister(tf->tpoint,
tf->tpoint->probestub, NULL);
tf->tpoint = NULL;
@@ -892,30 +993,6 @@ static struct notifier_block tracepoint_module_nb = {
};
#endif /* CONFIG_MODULES */
-struct __find_tracepoint_cb_data {
- const char *tp_name;
- struct tracepoint *tpoint;
-};
-
-static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
-{
- struct __find_tracepoint_cb_data *data = priv;
-
- if (!data->tpoint && !strcmp(data->tp_name, tp->name))
- data->tpoint = tp;
-}
-
-static struct tracepoint *find_tracepoint(const char *tp_name)
-{
- struct __find_tracepoint_cb_data data = {
- .tp_name = tp_name,
- };
-
- for_each_kernel_tracepoint(__find_tracepoint_cb, &data);
-
- return data.tpoint;
-}
-
static int parse_symbol_and_return(int argc, const char *argv[],
char **symbol, bool *is_return,
bool is_tracepoint)
@@ -996,6 +1073,7 @@ static int __trace_fprobe_create(int argc, const char *argv[])
char abuf[MAX_BTF_ARGS_LEN];
char *dbuf = NULL;
bool is_tracepoint = false;
+ struct module *tp_mod = NULL;
struct tracepoint *tpoint = NULL;
struct traceprobe_parse_context ctx = {
.flags = TPARG_FL_KERNEL | TPARG_FL_FPROBE,
@@ -1080,15 +1158,20 @@ static int __trace_fprobe_create(int argc, const char *argv[])
if (is_tracepoint) {
ctx.flags |= TPARG_FL_TPOINT;
- tpoint = find_tracepoint(symbol);
- if (!tpoint) {
+ tpoint = find_tracepoint(symbol, &tp_mod);
+ if (tpoint) {
+ ctx.funcname = kallsyms_lookup(
+ (unsigned long)tpoint->probestub,
+ NULL, NULL, NULL, sbuf);
+ } else if (IS_ENABLED(CONFIG_MODULES)) {
+ /* This *may* be loaded afterwards */
+ tpoint = TRACEPOINT_STUB;
+ ctx.funcname = symbol;
+ } else {
trace_probe_log_set_index(1);
trace_probe_log_err(0, NO_TRACEPOINT);
goto parse_error;
}
- ctx.funcname = kallsyms_lookup(
- (unsigned long)tpoint->probestub,
- NULL, NULL, NULL, sbuf);
} else
ctx.funcname = symbol;
@@ -1110,8 +1193,8 @@ static int __trace_fprobe_create(int argc, const char *argv[])
goto out;
/* setup a probe */
- tf = alloc_trace_fprobe(group, event, symbol, tpoint, maxactive,
- argc, is_return);
+ tf = alloc_trace_fprobe(group, event, symbol, tpoint, tp_mod,
+ maxactive, argc, is_return);
if (IS_ERR(tf)) {
ret = PTR_ERR(tf);
/* This must return -ENOMEM, else there is a bug */
@@ -1119,10 +1202,6 @@ static int __trace_fprobe_create(int argc, const char *argv[])
goto out; /* We know tf is not allocated */
}
- if (is_tracepoint)
- tf->mod = __module_text_address(
- (unsigned long)tf->tpoint->probestub);
-
/* parse arguments */
for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
trace_probe_log_set_index(i + 2);
@@ -1155,6 +1234,8 @@ static int __trace_fprobe_create(int argc, const char *argv[])
}
out:
+ if (tp_mod)
+ module_put(tp_mod);
traceprobe_finish_parse(&ctx);
trace_probe_log_clear();
kfree(new_argv);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 13d0387ac6a6..a569daaac4c4 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -544,6 +544,8 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
struct trace_seq *s = &iter->seq;
struct trace_entry *ent = iter->ent;
+ addr += iter->tr->text_delta;
+
if (addr < (unsigned long)__irqentry_text_start ||
addr >= (unsigned long)__irqentry_text_end)
return;
@@ -710,6 +712,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
struct ftrace_graph_ret *graph_ret;
struct ftrace_graph_ent *call;
unsigned long long duration;
+ unsigned long func;
int cpu = iter->cpu;
int i;
@@ -717,6 +720,8 @@ print_graph_entry_leaf(struct trace_iterator *iter,
call = &entry->graph_ent;
duration = graph_ret->rettime - graph_ret->calltime;
+ func = call->func + iter->tr->text_delta;
+
if (data) {
struct fgraph_cpu_data *cpu_data;
@@ -747,10 +752,10 @@ print_graph_entry_leaf(struct trace_iterator *iter,
* enabled.
*/
if (flags & __TRACE_GRAPH_PRINT_RETVAL)
- print_graph_retval(s, graph_ret->retval, true, (void *)call->func,
+ print_graph_retval(s, graph_ret->retval, true, (void *)func,
!!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
else
- trace_seq_printf(s, "%ps();\n", (void *)call->func);
+ trace_seq_printf(s, "%ps();\n", (void *)func);
print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
cpu, iter->ent->pid, flags);
@@ -766,6 +771,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
struct ftrace_graph_ent *call = &entry->graph_ent;
struct fgraph_data *data = iter->private;
struct trace_array *tr = iter->tr;
+ unsigned long func;
int i;
if (data) {
@@ -788,7 +794,9 @@ print_graph_entry_nested(struct trace_iterator *iter,
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
trace_seq_putc(s, ' ');
- trace_seq_printf(s, "%ps() {\n", (void *)call->func);
+ func = call->func + iter->tr->text_delta;
+
+ trace_seq_printf(s, "%ps() {\n", (void *)func);
if (trace_seq_has_overflowed(s))
return TRACE_TYPE_PARTIAL_LINE;
@@ -863,6 +871,8 @@ check_irq_entry(struct trace_iterator *iter, u32 flags,
int *depth_irq;
struct fgraph_data *data = iter->private;
+ addr += iter->tr->text_delta;
+
/*
* If we are either displaying irqs, or we got called as
* a graph event and private data does not exist,
@@ -990,11 +1000,14 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
unsigned long long duration = trace->rettime - trace->calltime;
struct fgraph_data *data = iter->private;
struct trace_array *tr = iter->tr;
+ unsigned long func;
pid_t pid = ent->pid;
int cpu = iter->cpu;
int func_match = 1;
int i;
+ func = trace->func + iter->tr->text_delta;
+
if (check_irq_return(iter, flags, trace->depth))
return TRACE_TYPE_HANDLED;
@@ -1033,7 +1046,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
* function-retval option is enabled.
*/
if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
- print_graph_retval(s, trace->retval, false, (void *)trace->func,
+ print_graph_retval(s, trace->retval, false, (void *)func,
!!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
} else {
/*
@@ -1046,7 +1059,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
trace_seq_puts(s, "}\n");
else
- trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
+ trace_seq_printf(s, "} /* %ps */\n", (void *)func);
}
/* Overrun */
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index bbe47781617e..1439064f65d6 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -228,6 +228,11 @@ static inline struct osnoise_variables *this_cpu_osn_var(void)
return this_cpu_ptr(&per_cpu_osnoise_var);
}
+/*
+ * Protect the interface.
+ */
+static struct mutex interface_lock;
+
#ifdef CONFIG_TIMERLAT_TRACER
/*
* Runtime information for the timer mode.
@@ -253,11 +258,6 @@ static inline struct timerlat_variables *this_cpu_tmr_var(void)
}
/*
- * Protect the interface.
- */
-static struct mutex interface_lock;
-
-/*
* tlat_var_reset - Reset the values of the given timerlat_variables
*/
static inline void tlat_var_reset(void)
@@ -1541,7 +1541,7 @@ static int run_osnoise(void)
* This will eventually cause unwarranted noise as PREEMPT_RCU
* will force preemption as the means of ending the current
* grace period. We avoid this problem by calling
- * rcu_momentary_dyntick_idle(), which performs a zero duration
+ * rcu_momentary_eqs(), which performs a zero duration
* EQS allowing PREEMPT_RCU to end the current grace period.
* This call shouldn't be wrapped inside an RCU critical
* section.
@@ -1553,7 +1553,7 @@ static int run_osnoise(void)
if (!disable_irq)
local_irq_disable();
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
if (!disable_irq)
local_irq_enable();
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index d8b302d01083..868f2f912f28 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -990,8 +990,11 @@ enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
}
static void print_fn_trace(struct trace_seq *s, unsigned long ip,
- unsigned long parent_ip, int flags)
+ unsigned long parent_ip, long delta, int flags)
{
+ ip += delta;
+ parent_ip += delta;
+
seq_print_ip_sym(s, ip, flags);
if ((flags & TRACE_ITER_PRINT_PARENT) && parent_ip) {
@@ -1009,7 +1012,7 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
trace_assign_type(field, iter->ent);
- print_fn_trace(s, field->ip, field->parent_ip, flags);
+ print_fn_trace(s, field->ip, field->parent_ip, iter->tr->text_delta, flags);
trace_seq_putc(s, '\n');
return trace_handle_return(s);
@@ -1230,6 +1233,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
struct trace_seq *s = &iter->seq;
unsigned long *p;
unsigned long *end;
+ long delta = iter->tr->text_delta;
trace_assign_type(field, iter->ent);
end = (unsigned long *)((long)iter->ent + iter->ent_size);
@@ -1242,7 +1246,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
break;
trace_seq_puts(s, " => ");
- seq_print_ip_sym(s, *p, flags);
+ seq_print_ip_sym(s, (*p) + delta, flags);
trace_seq_putc(s, '\n');
}
@@ -1587,10 +1591,13 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter,
{
struct print_entry *field;
struct trace_seq *s = &iter->seq;
+ unsigned long ip;
trace_assign_type(field, iter->ent);
- seq_print_ip_sym(s, field->ip, flags);
+ ip = field->ip + iter->tr->text_delta;
+
+ seq_print_ip_sym(s, ip, flags);
trace_seq_printf(s, ": %s", field->buf);
return trace_handle_return(s);
@@ -1674,7 +1681,7 @@ trace_func_repeats_print(struct trace_iterator *iter, int flags,
trace_assign_type(field, iter->ent);
- print_fn_trace(s, field->ip, field->parent_ip, flags);
+ print_fn_trace(s, field->ip, field->parent_ip, iter->tr->text_delta, flags);
trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
trace_print_time(s, iter,
iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 130ca7e7787e..ae2ace5e515a 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -547,7 +547,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
* - wakeup_dl handles tasks belonging to sched_dl class only.
*/
if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
- (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
+ (wakeup_rt && !rt_or_dl_task(p)) ||
(!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
return;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 9c581d6da843..785733245ead 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -564,6 +564,7 @@ static int perf_call_bpf_enter(struct trace_event_call *call, struct pt_regs *re
BUILD_BUG_ON(sizeof(param.ent) < sizeof(void *));
/* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. &param) */
+ perf_fetch_caller_regs(regs);
*(struct pt_regs **)&param = regs;
param.syscall_nr = rec->nr;
for (i = 0; i < sys_data->nb_args; i++)
@@ -575,6 +576,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
{
struct syscall_metadata *sys_data;
struct syscall_trace_enter *rec;
+ struct pt_regs *fake_regs;
struct hlist_head *head;
unsigned long args[6];
bool valid_prog_array;
@@ -602,7 +604,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
size = ALIGN(size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- rec = perf_trace_buf_alloc(size, NULL, &rctx);
+ rec = perf_trace_buf_alloc(size, &fake_regs, &rctx);
if (!rec)
return;
@@ -611,7 +613,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args);
if ((valid_prog_array &&
- !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) ||
+ !perf_call_bpf_enter(sys_data->enter_event, fake_regs, sys_data, rec)) ||
hlist_empty(head)) {
perf_swevent_put_recursion_context(rctx);
return;
@@ -666,6 +668,7 @@ static int perf_call_bpf_exit(struct trace_event_call *call, struct pt_regs *reg
} __aligned(8) param;
/* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. &param) */
+ perf_fetch_caller_regs(regs);
*(struct pt_regs **)&param = regs;
param.syscall_nr = rec->nr;
param.ret = rec->ret;
@@ -676,6 +679,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
{
struct syscall_metadata *sys_data;
struct syscall_trace_exit *rec;
+ struct pt_regs *fake_regs;
struct hlist_head *head;
bool valid_prog_array;
int syscall_nr;
@@ -701,7 +705,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- rec = perf_trace_buf_alloc(size, NULL, &rctx);
+ rec = perf_trace_buf_alloc(size, &fake_regs, &rctx);
if (!rec)
return;
@@ -709,7 +713,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
rec->ret = syscall_get_return_value(current, regs);
if ((valid_prog_array &&
- !perf_call_bpf_exit(sys_data->exit_event, regs, rec)) ||
+ !perf_call_bpf_exit(sys_data->exit_event, fake_regs, rec)) ||
hlist_empty(head)) {
perf_swevent_put_recursion_context(rctx);
return;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index c98e3b3386ba..c40531d2cbad 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -17,6 +17,7 @@
#include <linux/string.h>
#include <linux/rculist.h>
#include <linux/filter.h>
+#include <linux/percpu.h>
#include "trace_dynevent.h"
#include "trace_probe.h"
@@ -58,11 +59,11 @@ struct trace_uprobe {
struct dyn_event devent;
struct uprobe_consumer consumer;
struct path path;
- struct inode *inode;
char *filename;
+ struct uprobe *uprobe;
unsigned long offset;
unsigned long ref_ctr_offset;
- unsigned long nhit;
+ unsigned long __percpu *nhits;
struct trace_probe tp;
};
@@ -337,6 +338,12 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
if (!tu)
return ERR_PTR(-ENOMEM);
+ tu->nhits = alloc_percpu(unsigned long);
+ if (!tu->nhits) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
ret = trace_probe_init(&tu->tp, event, group, true, nargs);
if (ret < 0)
goto error;
@@ -349,6 +356,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
return tu;
error:
+ free_percpu(tu->nhits);
kfree(tu);
return ERR_PTR(ret);
@@ -362,6 +370,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu)
path_put(&tu->path);
trace_probe_cleanup(&tu->tp);
kfree(tu->filename);
+ free_percpu(tu->nhits);
kfree(tu);
}
@@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
{
struct dyn_event *ev = v;
struct trace_uprobe *tu;
+ unsigned long nhits;
+ int cpu;
if (!is_trace_uprobe(ev))
return 0;
tu = to_trace_uprobe(ev);
+
+ nhits = 0;
+ for_each_possible_cpu(cpu) {
+ nhits += per_cpu(*tu->nhits, cpu);
+ }
+
seq_printf(m, " %s %-44s %15lu\n", tu->filename,
- trace_probe_name(&tu->tp), tu->nhit);
+ trace_probe_name(&tu->tp), nhits);
return 0;
}
@@ -1078,43 +1095,40 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e
return trace_handle_return(s);
}
-typedef bool (*filter_func_t)(struct uprobe_consumer *self,
- enum uprobe_filter_ctx ctx,
- struct mm_struct *mm);
+typedef bool (*filter_func_t)(struct uprobe_consumer *self, struct mm_struct *mm);
static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
{
- int ret;
+ struct inode *inode = d_real_inode(tu->path.dentry);
+ struct uprobe *uprobe;
tu->consumer.filter = filter;
- tu->inode = d_real_inode(tu->path.dentry);
+ uprobe = uprobe_register(inode, tu->offset, tu->ref_ctr_offset, &tu->consumer);
+ if (IS_ERR(uprobe))
+ return PTR_ERR(uprobe);
- if (tu->ref_ctr_offset)
- ret = uprobe_register_refctr(tu->inode, tu->offset,
- tu->ref_ctr_offset, &tu->consumer);
- else
- ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
-
- if (ret)
- tu->inode = NULL;
-
- return ret;
+ tu->uprobe = uprobe;
+ return 0;
}
static void __probe_event_disable(struct trace_probe *tp)
{
struct trace_uprobe *tu;
+ bool sync = false;
tu = container_of(tp, struct trace_uprobe, tp);
WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
- if (!tu->inode)
+ if (!tu->uprobe)
continue;
- uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
- tu->inode = NULL;
+ uprobe_unregister_nosync(tu->uprobe, &tu->consumer);
+ sync = true;
+ tu->uprobe = NULL;
}
+ if (sync)
+ uprobe_unregister_sync();
}
static int probe_event_enable(struct trace_event_call *call,
@@ -1310,7 +1324,7 @@ static int uprobe_perf_close(struct trace_event_call *call,
return 0;
list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
- ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
+ ret = uprobe_apply(tu->uprobe, &tu->consumer, false);
if (ret)
break;
}
@@ -1334,7 +1348,7 @@ static int uprobe_perf_open(struct trace_event_call *call,
return 0;
list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
- err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
+ err = uprobe_apply(tu->uprobe, &tu->consumer, true);
if (err) {
uprobe_perf_close(call, event);
break;
@@ -1344,8 +1358,7 @@ static int uprobe_perf_open(struct trace_event_call *call,
return err;
}
-static bool uprobe_perf_filter(struct uprobe_consumer *uc,
- enum uprobe_filter_ctx ctx, struct mm_struct *mm)
+static bool uprobe_perf_filter(struct uprobe_consumer *uc, struct mm_struct *mm)
{
struct trace_uprobe_filter *filter;
struct trace_uprobe *tu;
@@ -1431,7 +1444,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
struct uprobe_cpu_buffer **ucbp)
{
- if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
+ if (!uprobe_perf_filter(&tu->consumer, current->mm))
return UPROBE_HANDLER_REMOVE;
if (!is_ret_probe(tu))
@@ -1512,7 +1525,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
int ret = 0;
tu = container_of(con, struct trace_uprobe, consumer);
- tu->nhit++;
+
+ this_cpu_inc(*tu->nhits);
udd.tu = tu;
udd.bp_addr = instruction_pointer(regs);
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 8d1507dd0724..8879da16ef4d 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -735,6 +735,48 @@ static __init int init_tracepoints(void)
return ret;
}
__initcall(init_tracepoints);
+
+/**
+ * for_each_tracepoint_in_module - iteration on all tracepoints in a module
+ * @mod: module
+ * @fct: callback
+ * @priv: private data
+ */
+void for_each_tracepoint_in_module(struct module *mod,
+ void (*fct)(struct tracepoint *tp,
+ struct module *mod, void *priv),
+ void *priv)
+{
+ tracepoint_ptr_t *begin, *end, *iter;
+
+ lockdep_assert_held(&tracepoint_module_list_mutex);
+
+ if (!mod)
+ return;
+
+ begin = mod->tracepoints_ptrs;
+ end = mod->tracepoints_ptrs + mod->num_tracepoints;
+
+ for (iter = begin; iter < end; iter++)
+ fct(tracepoint_ptr_deref(iter), mod, priv);
+}
+
+/**
+ * for_each_module_tracepoint - iteration on all tracepoints in all modules
+ * @fct: callback
+ * @priv: private data
+ */
+void for_each_module_tracepoint(void (*fct)(struct tracepoint *tp,
+ struct module *mod, void *priv),
+ void *priv)
+{
+ struct tp_module *tp_mod;
+
+ mutex_lock(&tracepoint_module_list_mutex);
+ list_for_each_entry(tp_mod, &tracepoint_module_list, list)
+ for_each_tracepoint_in_module(tp_mod->mod, fct, priv);
+ mutex_unlock(&tracepoint_module_list_mutex);
+}
#endif /* CONFIG_MODULES */
/**
diff --git a/kernel/user.c b/kernel/user.c
index aa1162deafe4..f46b1d41163b 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -36,33 +36,33 @@ EXPORT_SYMBOL_GPL(init_binfmt_misc);
*/
struct user_namespace init_user_ns = {
.uid_map = {
- .nr_extents = 1,
{
.extent[0] = {
.first = 0,
.lower_first = 0,
.count = 4294967295U,
},
+ .nr_extents = 1,
},
},
.gid_map = {
- .nr_extents = 1,
{
.extent[0] = {
.first = 0,
.lower_first = 0,
.count = 4294967295U,
},
+ .nr_extents = 1,
},
},
.projid_map = {
- .nr_extents = 1,
{
.extent[0] = {
.first = 0,
.lower_first = 0,
.count = 4294967295U,
},
+ .nr_extents = 1,
},
},
.ns.count = REFCOUNT_INIT(3),
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 0b0b95418b16..aa0b2e47f2f2 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -853,9 +853,8 @@ static int sort_idmaps(struct uid_gid_map *map)
cmp_extents_forward, NULL);
/* Only copy the memory from forward we actually need. */
- map->reverse = kmemdup(map->forward,
- map->nr_extents * sizeof(struct uid_gid_extent),
- GFP_KERNEL);
+ map->reverse = kmemdup_array(map->forward, map->nr_extents,
+ sizeof(struct uid_gid_extent), GFP_KERNEL);
if (!map->reverse)
return -ENOMEM;
diff --git a/kernel/vmcore_info.c b/kernel/vmcore_info.c
index 8b4f8cc2e0ec..1fec61603ef3 100644
--- a/kernel/vmcore_info.c
+++ b/kernel/vmcore_info.c
@@ -198,17 +198,17 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PG_private);
VMCOREINFO_NUMBER(PG_swapcache);
VMCOREINFO_NUMBER(PG_swapbacked);
-#define PAGE_SLAB_MAPCOUNT_VALUE (~PG_slab)
+#define PAGE_SLAB_MAPCOUNT_VALUE (PGTY_slab << 24)
VMCOREINFO_NUMBER(PAGE_SLAB_MAPCOUNT_VALUE);
#ifdef CONFIG_MEMORY_FAILURE
VMCOREINFO_NUMBER(PG_hwpoison);
#endif
VMCOREINFO_NUMBER(PG_head_mask);
-#define PAGE_BUDDY_MAPCOUNT_VALUE (~PG_buddy)
+#define PAGE_BUDDY_MAPCOUNT_VALUE (PGTY_buddy << 24)
VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
-#define PAGE_HUGETLB_MAPCOUNT_VALUE (~PG_hugetlb)
+#define PAGE_HUGETLB_MAPCOUNT_VALUE (PGTY_hugetlb << 24)
VMCOREINFO_NUMBER(PAGE_HUGETLB_MAPCOUNT_VALUE);
-#define PAGE_OFFLINE_MAPCOUNT_VALUE (~PG_offline)
+#define PAGE_OFFLINE_MAPCOUNT_VALUE (PGTY_offline << 24)
VMCOREINFO_NUMBER(PAGE_OFFLINE_MAPCOUNT_VALUE);
#ifdef CONFIG_KALLSYMS
diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
index 03b90d7d2175..d36242fd4936 100644
--- a/kernel/watch_queue.c
+++ b/kernel/watch_queue.c
@@ -666,8 +666,8 @@ struct watch_queue *get_watch_queue(int fd)
struct fd f;
f = fdget(fd);
- if (f.file) {
- pipe = get_pipe_info(f.file, false);
+ if (fd_file(f)) {
+ pipe = get_pipe_info(fd_file(f), false);
if (pipe && pipe->watch_queue) {
wqueue = pipe->watch_queue;
kref_get(&wqueue->usage);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 830a83895493..262691ba62b7 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -1203,7 +1203,10 @@ static void __init lockup_detector_delay_init(struct work_struct *work)
ret = watchdog_hardlockup_probe();
if (ret) {
- pr_info("Delayed init of the lockup detector failed: %d\n", ret);
+ if (ret == -ENODEV)
+ pr_info("NMI not fully supported\n");
+ else
+ pr_info("Delayed init of the lockup detector failed: %d\n", ret);
pr_info("Hard watchdog permanently disabled\n");
return;
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e7b005ff3750..9949ffad8df0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -364,7 +364,8 @@ struct workqueue_struct {
#ifdef CONFIG_LOCKDEP
char *lock_name;
struct lock_class_key key;
- struct lockdep_map lockdep_map;
+ struct lockdep_map __lockdep_map;
+ struct lockdep_map *lockdep_map;
#endif
char name[WQ_NAME_LEN]; /* I: workqueue name */
@@ -476,16 +477,13 @@ static bool wq_debug_force_rr_cpu = false;
module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
/* to raise softirq for the BH worker pools on other CPUs */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS],
- bh_pool_irq_works);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS], bh_pool_irq_works);
/* the BH worker pools */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
- bh_worker_pools);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], bh_worker_pools);
/* the per-cpu worker pools */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
- cpu_worker_pools);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
@@ -2709,7 +2707,6 @@ static void detach_worker(struct worker *worker)
unbind_worker(worker);
list_del(&worker->node);
- worker->pool = NULL;
}
/**
@@ -2729,6 +2726,7 @@ static void worker_detach_from_pool(struct worker *worker)
mutex_lock(&wq_pool_attach_mutex);
detach_worker(worker);
+ worker->pool = NULL;
mutex_unlock(&wq_pool_attach_mutex);
/* clear leftover flags without pool->lock after it is detached */
@@ -3203,7 +3201,7 @@ __acquires(&pool->lock)
lockdep_start_depth = lockdep_depth(current);
/* see drain_dead_softirq_workfn() */
if (!bh_draining)
- lock_map_acquire(&pwq->wq->lockdep_map);
+ lock_map_acquire(pwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
/*
* Strictly speaking we should mark the invariant state without holding
@@ -3237,7 +3235,7 @@ __acquires(&pool->lock)
pwq->stats[PWQ_STAT_COMPLETED]++;
lock_map_release(&lockdep_map);
if (!bh_draining)
- lock_map_release(&pwq->wq->lockdep_map);
+ lock_map_release(pwq->wq->lockdep_map);
if (unlikely((worker->task && in_atomic()) ||
lockdep_depth(current) != lockdep_start_depth ||
@@ -3349,7 +3347,11 @@ woke_up:
if (unlikely(worker->flags & WORKER_DIE)) {
raw_spin_unlock_irq(&pool->lock);
set_pf_worker(false);
-
+ /*
+ * The worker is dead and PF_WQ_WORKER is cleared, worker->pool
+ * shouldn't be accessed, reset it to NULL in case otherwise.
+ */
+ worker->pool = NULL;
ida_free(&pool->worker_ida, worker->id);
return 0;
}
@@ -3869,11 +3871,14 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
static void touch_wq_lockdep_map(struct workqueue_struct *wq)
{
#ifdef CONFIG_LOCKDEP
+ if (unlikely(!wq->lockdep_map))
+ return;
+
if (wq->flags & WQ_BH)
local_bh_disable();
- lock_map_acquire(&wq->lockdep_map);
- lock_map_release(&wq->lockdep_map);
+ lock_map_acquire(wq->lockdep_map);
+ lock_map_release(wq->lockdep_map);
if (wq->flags & WQ_BH)
local_bh_enable();
@@ -3907,7 +3912,7 @@ void __flush_workqueue(struct workqueue_struct *wq)
struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list),
.flush_color = -1,
- .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
+ .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, (*wq->lockdep_map)),
};
int next_color;
@@ -4772,16 +4777,23 @@ static void wq_init_lockdep(struct workqueue_struct *wq)
lock_name = wq->name;
wq->lock_name = lock_name;
- lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
+ wq->lockdep_map = &wq->__lockdep_map;
+ lockdep_init_map(wq->lockdep_map, lock_name, &wq->key, 0);
}
static void wq_unregister_lockdep(struct workqueue_struct *wq)
{
+ if (wq->lockdep_map != &wq->__lockdep_map)
+ return;
+
lockdep_unregister_key(&wq->key);
}
static void wq_free_lockdep(struct workqueue_struct *wq)
{
+ if (wq->lockdep_map != &wq->__lockdep_map)
+ return;
+
if (wq->lock_name != wq->name)
kfree(wq->lock_name);
}
@@ -5615,12 +5627,10 @@ static void wq_adjust_max_active(struct workqueue_struct *wq)
} while (activated);
}
-__printf(1, 4)
-struct workqueue_struct *alloc_workqueue(const char *fmt,
- unsigned int flags,
- int max_active, ...)
+static struct workqueue_struct *__alloc_workqueue(const char *fmt,
+ unsigned int flags,
+ int max_active, va_list args)
{
- va_list args;
struct workqueue_struct *wq;
size_t wq_size;
int name_len;
@@ -5652,9 +5662,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
goto err_free_wq;
}
- va_start(args, max_active);
name_len = vsnprintf(wq->name, sizeof(wq->name), fmt, args);
- va_end(args);
if (name_len >= WQ_NAME_LEN)
pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n",
@@ -5684,12 +5692,11 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
INIT_LIST_HEAD(&wq->flusher_overflow);
INIT_LIST_HEAD(&wq->maydays);
- wq_init_lockdep(wq);
INIT_LIST_HEAD(&wq->list);
if (flags & WQ_UNBOUND) {
if (alloc_node_nr_active(wq->node_nr_active) < 0)
- goto err_unreg_lockdep;
+ goto err_free_wq;
}
/*
@@ -5728,9 +5735,6 @@ err_unlock_free_node_nr_active:
kthread_flush_worker(pwq_release_worker);
free_node_nr_active(wq->node_nr_active);
}
-err_unreg_lockdep:
- wq_unregister_lockdep(wq);
- wq_free_lockdep(wq);
err_free_wq:
free_workqueue_attrs(wq->unbound_attrs);
kfree(wq);
@@ -5741,8 +5745,49 @@ err_destroy:
destroy_workqueue(wq);
return NULL;
}
+
+__printf(1, 4)
+struct workqueue_struct *alloc_workqueue(const char *fmt,
+ unsigned int flags,
+ int max_active, ...)
+{
+ struct workqueue_struct *wq;
+ va_list args;
+
+ va_start(args, max_active);
+ wq = __alloc_workqueue(fmt, flags, max_active, args);
+ va_end(args);
+ if (!wq)
+ return NULL;
+
+ wq_init_lockdep(wq);
+
+ return wq;
+}
EXPORT_SYMBOL_GPL(alloc_workqueue);
+#ifdef CONFIG_LOCKDEP
+__printf(1, 5)
+struct workqueue_struct *
+alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags,
+ int max_active, struct lockdep_map *lockdep_map, ...)
+{
+ struct workqueue_struct *wq;
+ va_list args;
+
+ va_start(args, lockdep_map);
+ wq = __alloc_workqueue(fmt, flags, max_active, args);
+ va_end(args);
+ if (!wq)
+ return NULL;
+
+ wq->lockdep_map = lockdep_map;
+
+ return wq;
+}
+EXPORT_SYMBOL_GPL(alloc_workqueue_lockdep_map);
+#endif
+
static bool pwq_busy(struct pool_workqueue *pwq)
{
int i;
@@ -7402,6 +7447,9 @@ static struct timer_list wq_watchdog_timer;
static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
+static unsigned int wq_panic_on_stall;
+module_param_named(panic_on_stall, wq_panic_on_stall, uint, 0644);
+
/*
* Show workers that might prevent the processing of pending work items.
* The only candidates are CPU-bound workers in the running state.
@@ -7453,6 +7501,16 @@ static void show_cpu_pools_hogs(void)
rcu_read_unlock();
}
+static void panic_on_wq_watchdog(void)
+{
+ static unsigned int wq_stall;
+
+ if (wq_panic_on_stall) {
+ wq_stall++;
+ BUG_ON(wq_stall >= wq_panic_on_stall);
+ }
+}
+
static void wq_watchdog_reset_touched(void)
{
int cpu;
@@ -7525,6 +7583,9 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
if (cpu_pool_stall)
show_cpu_pools_hogs();
+ if (lockup_detected)
+ panic_on_wq_watchdog();
+
wq_watchdog_reset_touched();
mod_timer(&wq_watchdog_timer, jiffies + thresh);
}
diff --git a/lib/.gitignore b/lib/.gitignore
index 54596b634ecb..101a4aa92fb5 100644
--- a/lib/.gitignore
+++ b/lib/.gitignore
@@ -5,5 +5,3 @@
/gen_crc32table
/gen_crc64table
/oid_registry_data.c
-/test_fortify.log
-/test_fortify/*.log
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a30c03a66172..7315f643817a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -97,7 +97,7 @@ config BOOT_PRINTK_DELAY
using "boot_delay=N".
It is likely that you would also need to use "lpj=M" to preset
- the "loops per jiffie" value.
+ the "loops per jiffy" value.
See a previous boot log for the "lpj" value to use for your
system, and then set "lpj=M" before setting "boot_delay=N".
NOTE: Using this option may adversely affect SMP systems.
@@ -379,13 +379,15 @@ config DEBUG_INFO_BTF
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
depends on BPF_SYSCALL
- depends on !DEBUG_INFO_DWARF5 || PAHOLE_VERSION >= 121
+ depends on PAHOLE_VERSION >= 116
+ depends on DEBUG_INFO_DWARF4 || PAHOLE_VERSION >= 121
# pahole uses elfutils, which does not have support for Hexagon relocations
depends on !HEXAGON
help
Generate deduplicated BTF type information from DWARF debug info.
- Turning this on expects presence of pahole tool, which will convert
- DWARF type info into equivalent deduplicated BTF type info.
+ Turning this on requires pahole v1.16 or later (v1.21 or later to
+ support DWARF 5), which will convert DWARF type info into equivalent
+ deduplicated BTF type info.
config PAHOLE_HAS_SPLIT_BTF
def_bool PAHOLE_VERSION >= 119
@@ -571,6 +573,21 @@ config VMLINUX_MAP
pieces of code get eliminated with
CONFIG_LD_DEAD_CODE_DATA_ELIMINATION.
+config BUILTIN_MODULE_RANGES
+ bool "Generate address range information for builtin modules"
+ depends on !LTO
+ depends on VMLINUX_MAP
+ help
+ When modules are built into the kernel, there will be no module name
+ associated with its symbols in /proc/kallsyms. Tracers may want to
+ identify symbols by module name and symbol name regardless of whether
+ the module is configured as loadable or not.
+
+ This option generates modules.builtin.ranges in the build tree with
+ offset ranges (per ELF section) for the module(s) they belong to.
+ It also records an anchor symbol to determine the load address of the
+ section.
+
config DEBUG_FORCE_WEAK_PER_CPU
bool "Force weak per-cpu definitions"
depends on DEBUG_KERNEL
@@ -1515,7 +1532,7 @@ config LOCKDEP_BITS
config LOCKDEP_CHAINS_BITS
int "Bitsize for MAX_LOCKDEP_CHAINS"
depends on LOCKDEP && !LOCKDEP_SMALL
- range 10 30
+ range 10 21
default 16
help
Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message.
@@ -1614,6 +1631,7 @@ config SCF_TORTURE_TEST
config CSD_LOCK_WAIT_DEBUG
bool "Debugging for csd_lock_wait(), called from smp_call_function*()"
depends on DEBUG_KERNEL
+ depends on SMP
depends on 64BIT
default n
help
@@ -2173,6 +2191,14 @@ config KCOV_IRQ_AREA_SIZE
soft interrupts. This specifies the size of those areas in the
number of unsigned long words.
+config KCOV_SELFTEST
+ bool "Perform short selftests on boot"
+ depends on KCOV
+ help
+ Run short KCOV coverage collection selftests on boot.
+ On test failure, causes the kernel to panic. Recommended to be
+ enabled, ensuring critical functionality works as intended.
+
menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
default y
@@ -2280,6 +2306,16 @@ config TEST_DIV64
If unsure, say N.
+config TEST_MULDIV64
+ tristate "mul_u64_u64_div_u64() test"
+ depends on DEBUG_KERNEL || m
+ help
+ Enable this to turn on 'mul_u64_u64_div_u64()' function test.
+ This test is executed only once during system boot (so affects
+ only boot time), or at module load time.
+
+ If unsure, say N.
+
config TEST_IOV_ITER
tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2616,6 +2652,7 @@ config RESOURCE_KUNIT_TEST
tristate "KUnit test for resource API" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
+ select GET_FREE_REGION
help
This builds the resource API unit test.
Tests the logic of API provided by resource.c and ioport.h.
@@ -3051,3 +3088,19 @@ config RUST_KERNEL_DOCTESTS
endmenu # "Rust"
endmenu # Kernel hacking
+
+config INT_POW_TEST
+ tristate "Integer exponentiation (int_pow) test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the int_pow function,
+ which performs integer exponentiation. The test suite is designed to
+ verify that the implementation of int_pow correctly computes the power
+ of a given base raised to a given exponent.
+
+ Enabling this option will include tests that check various scenarios
+ and edge cases to ensure the accuracy and reliability of the exponentiation
+ function.
+
+ If unsure, say N
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index bdda600f8dfb..1d4aa7a83b3a 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -29,8 +29,8 @@ config UBSAN_TRAP
Also note that selecting Y will cause your kernel to Oops
with an "illegal instruction" error with no further details
- when a UBSAN violation occurs. (Except on arm64, which will
- report which Sanitizer failed.) This may make it hard to
+ when a UBSAN violation occurs. (Except on arm64 and x86, which
+ will report which Sanitizer failed.) This may make it hard to
determine whether an Oops was caused by UBSAN or to figure
out the details of a UBSAN violation. It makes the kernel log
output less useful for bug reports.
diff --git a/lib/Makefile b/lib/Makefile
index 322bb127b4dc..773adf88af41 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -14,6 +14,7 @@ KCOV_INSTRUMENT_list_debug.o := n
KCOV_INSTRUMENT_debugobjects.o := n
KCOV_INSTRUMENT_dynamic_debug.o := n
KCOV_INSTRUMENT_fault-inject.o := n
+KCOV_INSTRUMENT_find_bit.o := n
# string.o implements standard library functions like memset/memcpy etc.
# Use -ffreestanding to ensure that the compiler does not try to "optimize"
@@ -34,7 +35,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
nmi_backtrace.o win_minmax.o memcat_p.o \
- buildid.o objpool.o
+ buildid.o objpool.o union_find.o
lib-$(CONFIG_PRINTK) += dump_stack.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -393,40 +394,4 @@ obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o
-# FORTIFY_SOURCE compile-time behavior tests
-TEST_FORTIFY_SRCS = $(wildcard $(src)/test_fortify/*-*.c)
-TEST_FORTIFY_LOGS = $(patsubst $(src)/%.c, %.log, $(TEST_FORTIFY_SRCS))
-TEST_FORTIFY_LOG = test_fortify.log
-
-quiet_cmd_test_fortify = TEST $@
- cmd_test_fortify = $(CONFIG_SHELL) $(srctree)/scripts/test_fortify.sh \
- $< $@ "$(NM)" $(CC) $(c_flags) \
- $(call cc-disable-warning,fortify-source) \
- -DKBUILD_EXTRA_WARN1
-
-targets += $(TEST_FORTIFY_LOGS)
-clean-files += $(TEST_FORTIFY_LOGS)
-clean-files += $(addsuffix .o, $(TEST_FORTIFY_LOGS))
-$(obj)/test_fortify/%.log: $(src)/test_fortify/%.c \
- $(src)/test_fortify/test_fortify.h \
- $(srctree)/include/linux/fortify-string.h \
- $(srctree)/scripts/test_fortify.sh \
- FORCE
- $(call if_changed,test_fortify)
-
-quiet_cmd_gen_fortify_log = GEN $@
- cmd_gen_fortify_log = cat </dev/null $(filter-out FORCE,$^) 2>/dev/null > $@ || true
-
-targets += $(TEST_FORTIFY_LOG)
-clean-files += $(TEST_FORTIFY_LOG)
-$(obj)/$(TEST_FORTIFY_LOG): $(addprefix $(obj)/, $(TEST_FORTIFY_LOGS)) FORCE
- $(call if_changed,gen_fortify_log)
-
-# Fake dependency to trigger the fortify tests.
-ifeq ($(CONFIG_FORTIFY_SOURCE),y)
-$(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG)
-endif
-
-# Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined.
-# Pass CFLAGS_KASAN to avoid warnings.
-$(foreach x, $(patsubst %.log,%.o,$(TEST_FORTIFY_LOGS)), $(eval KASAN_SANITIZE_$(x) := y))
+subdir-$(CONFIG_FORTIFY_SOURCE) += test_fortify
diff --git a/lib/bcd.c b/lib/bcd.c
index 7e4750b6e801..c5e79ba9cd7b 100644
--- a/lib/bcd.c
+++ b/lib/bcd.c
@@ -10,6 +10,8 @@ EXPORT_SYMBOL(_bcd2bin);
unsigned char _bin2bcd(unsigned val)
{
- return ((val / 10) << 4) + val % 10;
+ const unsigned int t = (val * 103) >> 10;
+
+ return (t << 4) | (val - t * 10);
}
EXPORT_SYMBOL(_bin2bcd);
diff --git a/lib/buildid.c b/lib/buildid.c
index e02b5507418b..290641d92ac1 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -8,154 +8,302 @@
#define BUILD_ID 3
+#define MAX_PHDR_CNT 256
+
+struct freader {
+ void *buf;
+ u32 buf_sz;
+ int err;
+ union {
+ struct {
+ struct file *file;
+ struct folio *folio;
+ void *addr;
+ loff_t folio_off;
+ bool may_fault;
+ };
+ struct {
+ const char *data;
+ u64 data_sz;
+ };
+ };
+};
+
+static void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz,
+ struct file *file, bool may_fault)
+{
+ memset(r, 0, sizeof(*r));
+ r->buf = buf;
+ r->buf_sz = buf_sz;
+ r->file = file;
+ r->may_fault = may_fault;
+}
+
+static void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz)
+{
+ memset(r, 0, sizeof(*r));
+ r->data = data;
+ r->data_sz = data_sz;
+}
+
+static void freader_put_folio(struct freader *r)
+{
+ if (!r->folio)
+ return;
+ kunmap_local(r->addr);
+ folio_put(r->folio);
+ r->folio = NULL;
+}
+
+static int freader_get_folio(struct freader *r, loff_t file_off)
+{
+ /* check if we can just reuse current folio */
+ if (r->folio && file_off >= r->folio_off &&
+ file_off < r->folio_off + folio_size(r->folio))
+ return 0;
+
+ freader_put_folio(r);
+
+ r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT);
+
+ /* if sleeping is allowed, wait for the page, if necessary */
+ if (r->may_fault && (IS_ERR(r->folio) || !folio_test_uptodate(r->folio))) {
+ filemap_invalidate_lock_shared(r->file->f_mapping);
+ r->folio = read_cache_folio(r->file->f_mapping, file_off >> PAGE_SHIFT,
+ NULL, r->file);
+ filemap_invalidate_unlock_shared(r->file->f_mapping);
+ }
+
+ if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) {
+ if (!IS_ERR(r->folio))
+ folio_put(r->folio);
+ r->folio = NULL;
+ return -EFAULT;
+ }
+
+ r->folio_off = folio_pos(r->folio);
+ r->addr = kmap_local_folio(r->folio, 0);
+
+ return 0;
+}
+
+static const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz)
+{
+ size_t folio_sz;
+
+ /* provided internal temporary buffer should be sized correctly */
+ if (WARN_ON(r->buf && sz > r->buf_sz)) {
+ r->err = -E2BIG;
+ return NULL;
+ }
+
+ if (unlikely(file_off + sz < file_off)) {
+ r->err = -EOVERFLOW;
+ return NULL;
+ }
+
+ /* working with memory buffer is much more straightforward */
+ if (!r->buf) {
+ if (file_off + sz > r->data_sz) {
+ r->err = -ERANGE;
+ return NULL;
+ }
+ return r->data + file_off;
+ }
+
+ /* fetch or reuse folio for given file offset */
+ r->err = freader_get_folio(r, file_off);
+ if (r->err)
+ return NULL;
+
+ /* if requested data is crossing folio boundaries, we have to copy
+ * everything into our local buffer to keep a simple linear memory
+ * access interface
+ */
+ folio_sz = folio_size(r->folio);
+ if (file_off + sz > r->folio_off + folio_sz) {
+ int part_sz = r->folio_off + folio_sz - file_off;
+
+ /* copy the part that resides in the current folio */
+ memcpy(r->buf, r->addr + (file_off - r->folio_off), part_sz);
+
+ /* fetch next folio */
+ r->err = freader_get_folio(r, r->folio_off + folio_sz);
+ if (r->err)
+ return NULL;
+
+ /* copy the rest of requested data */
+ memcpy(r->buf + part_sz, r->addr, sz - part_sz);
+
+ return r->buf;
+ }
+
+ /* if data fits in a single folio, just return direct pointer */
+ return r->addr + (file_off - r->folio_off);
+}
+
+static void freader_cleanup(struct freader *r)
+{
+ if (!r->buf)
+ return; /* non-file-backed mode */
+
+ freader_put_folio(r);
+}
+
/*
* Parse build id from the note segment. This logic can be shared between
* 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
* identical.
*/
-static int parse_build_id_buf(unsigned char *build_id,
- __u32 *size,
- const void *note_start,
- Elf32_Word note_size)
+static int parse_build_id(struct freader *r, unsigned char *build_id, __u32 *size,
+ loff_t note_off, Elf32_Word note_size)
{
- Elf32_Word note_offs = 0, new_offs;
+ const char note_name[] = "GNU";
+ const size_t note_name_sz = sizeof(note_name);
+ u32 build_id_off, new_off, note_end, name_sz, desc_sz;
+ const Elf32_Nhdr *nhdr;
+ const char *data;
+
+ if (check_add_overflow(note_off, note_size, &note_end))
+ return -EINVAL;
- while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
- Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
+ while (note_end - note_off > sizeof(Elf32_Nhdr) + note_name_sz) {
+ nhdr = freader_fetch(r, note_off, sizeof(Elf32_Nhdr) + note_name_sz);
+ if (!nhdr)
+ return r->err;
+
+ name_sz = READ_ONCE(nhdr->n_namesz);
+ desc_sz = READ_ONCE(nhdr->n_descsz);
+
+ new_off = note_off + sizeof(Elf32_Nhdr);
+ if (check_add_overflow(new_off, ALIGN(name_sz, 4), &new_off) ||
+ check_add_overflow(new_off, ALIGN(desc_sz, 4), &new_off) ||
+ new_off > note_end)
+ break;
if (nhdr->n_type == BUILD_ID &&
- nhdr->n_namesz == sizeof("GNU") &&
- !strcmp((char *)(nhdr + 1), "GNU") &&
- nhdr->n_descsz > 0 &&
- nhdr->n_descsz <= BUILD_ID_SIZE_MAX) {
- memcpy(build_id,
- note_start + note_offs +
- ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
- nhdr->n_descsz);
- memset(build_id + nhdr->n_descsz, 0,
- BUILD_ID_SIZE_MAX - nhdr->n_descsz);
+ name_sz == note_name_sz &&
+ memcmp(nhdr + 1, note_name, note_name_sz) == 0 &&
+ desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) {
+ build_id_off = note_off + sizeof(Elf32_Nhdr) + ALIGN(note_name_sz, 4);
+
+ /* freader_fetch() will invalidate nhdr pointer */
+ data = freader_fetch(r, build_id_off, desc_sz);
+ if (!data)
+ return r->err;
+
+ memcpy(build_id, data, desc_sz);
+ memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz);
if (size)
- *size = nhdr->n_descsz;
+ *size = desc_sz;
return 0;
}
- new_offs = note_offs + sizeof(Elf32_Nhdr) +
- ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
- if (new_offs <= note_offs) /* overflow */
- break;
- note_offs = new_offs;
+
+ note_off = new_off;
}
return -EINVAL;
}
-static inline int parse_build_id(const void *page_addr,
- unsigned char *build_id,
- __u32 *size,
- const void *note_start,
- Elf32_Word note_size)
+/* Parse build ID from 32-bit ELF */
+static int get_build_id_32(struct freader *r, unsigned char *build_id, __u32 *size)
{
- /* check for overflow */
- if (note_start < page_addr || note_start + note_size < note_start)
- return -EINVAL;
+ const Elf32_Ehdr *ehdr;
+ const Elf32_Phdr *phdr;
+ __u32 phnum, phoff, i;
- /* only supports note that fits in the first page */
- if (note_start + note_size > page_addr + PAGE_SIZE)
- return -EINVAL;
+ ehdr = freader_fetch(r, 0, sizeof(Elf32_Ehdr));
+ if (!ehdr)
+ return r->err;
- return parse_build_id_buf(build_id, size, note_start, note_size);
-}
+ /* subsequent freader_fetch() calls invalidate pointers, so remember locally */
+ phnum = READ_ONCE(ehdr->e_phnum);
+ phoff = READ_ONCE(ehdr->e_phoff);
-/* Parse build ID from 32-bit ELF */
-static int get_build_id_32(const void *page_addr, unsigned char *build_id,
- __u32 *size)
-{
- Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
- Elf32_Phdr *phdr;
- int i;
-
- /*
- * FIXME
- * Neither ELF spec nor ELF loader require that program headers
- * start immediately after ELF header.
- */
- if (ehdr->e_phoff != sizeof(Elf32_Ehdr))
- return -EINVAL;
- /* only supports phdr that fits in one page */
- if (ehdr->e_phnum >
- (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
+ /* set upper bound on amount of segments (phdrs) we iterate */
+ if (phnum > MAX_PHDR_CNT)
+ phnum = MAX_PHDR_CNT;
+
+ /* check that phoff is not large enough to cause an overflow */
+ if (phoff + phnum * sizeof(Elf32_Phdr) < phoff)
return -EINVAL;
- phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
+ for (i = 0; i < phnum; ++i) {
+ phdr = freader_fetch(r, phoff + i * sizeof(Elf32_Phdr), sizeof(Elf32_Phdr));
+ if (!phdr)
+ return r->err;
- for (i = 0; i < ehdr->e_phnum; ++i) {
- if (phdr[i].p_type == PT_NOTE &&
- !parse_build_id(page_addr, build_id, size,
- page_addr + phdr[i].p_offset,
- phdr[i].p_filesz))
+ if (phdr->p_type == PT_NOTE &&
+ !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset),
+ READ_ONCE(phdr->p_filesz)))
return 0;
}
return -EINVAL;
}
/* Parse build ID from 64-bit ELF */
-static int get_build_id_64(const void *page_addr, unsigned char *build_id,
- __u32 *size)
+static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *size)
{
- Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
- Elf64_Phdr *phdr;
- int i;
-
- /*
- * FIXME
- * Neither ELF spec nor ELF loader require that program headers
- * start immediately after ELF header.
- */
- if (ehdr->e_phoff != sizeof(Elf64_Ehdr))
- return -EINVAL;
- /* only supports phdr that fits in one page */
- if (ehdr->e_phnum >
- (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
+ const Elf64_Ehdr *ehdr;
+ const Elf64_Phdr *phdr;
+ __u32 phnum, i;
+ __u64 phoff;
+
+ ehdr = freader_fetch(r, 0, sizeof(Elf64_Ehdr));
+ if (!ehdr)
+ return r->err;
+
+ /* subsequent freader_fetch() calls invalidate pointers, so remember locally */
+ phnum = READ_ONCE(ehdr->e_phnum);
+ phoff = READ_ONCE(ehdr->e_phoff);
+
+ /* set upper bound on amount of segments (phdrs) we iterate */
+ if (phnum > MAX_PHDR_CNT)
+ phnum = MAX_PHDR_CNT;
+
+ /* check that phoff is not large enough to cause an overflow */
+ if (phoff + phnum * sizeof(Elf64_Phdr) < phoff)
return -EINVAL;
- phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
+ for (i = 0; i < phnum; ++i) {
+ phdr = freader_fetch(r, phoff + i * sizeof(Elf64_Phdr), sizeof(Elf64_Phdr));
+ if (!phdr)
+ return r->err;
- for (i = 0; i < ehdr->e_phnum; ++i) {
- if (phdr[i].p_type == PT_NOTE &&
- !parse_build_id(page_addr, build_id, size,
- page_addr + phdr[i].p_offset,
- phdr[i].p_filesz))
+ if (phdr->p_type == PT_NOTE &&
+ !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset),
+ READ_ONCE(phdr->p_filesz)))
return 0;
}
+
return -EINVAL;
}
-/*
- * Parse build ID of ELF file mapped to vma
- * @vma: vma object
- * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
- * @size: returns actual build id size in case of success
- *
- * Return: 0 on success, -EINVAL otherwise
- */
-int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
- __u32 *size)
+/* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */
+#define MAX_FREADER_BUF_SZ 64
+
+static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ __u32 *size, bool may_fault)
{
- Elf32_Ehdr *ehdr;
- struct page *page;
- void *page_addr;
+ const Elf32_Ehdr *ehdr;
+ struct freader r;
+ char buf[MAX_FREADER_BUF_SZ];
int ret;
/* only works for page backed storage */
if (!vma->vm_file)
return -EINVAL;
- page = find_get_page(vma->vm_file->f_mapping, 0);
- if (!page)
- return -EFAULT; /* page not mapped */
+ freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault);
+
+ /* fetch first 18 bytes of ELF header for checks */
+ ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type));
+ if (!ehdr) {
+ ret = r.err;
+ goto out;
+ }
ret = -EINVAL;
- page_addr = kmap_local_page(page);
- ehdr = (Elf32_Ehdr *)page_addr;
/* compare magic x7f "ELF" */
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
@@ -166,15 +314,46 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
goto out;
if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
- ret = get_build_id_32(page_addr, build_id, size);
+ ret = get_build_id_32(&r, build_id, size);
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
- ret = get_build_id_64(page_addr, build_id, size);
+ ret = get_build_id_64(&r, build_id, size);
out:
- kunmap_local(page_addr);
- put_page(page);
+ freader_cleanup(&r);
return ret;
}
+/*
+ * Parse build ID of ELF file mapped to vma
+ * @vma: vma object
+ * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
+ * @size: returns actual build id size in case of success
+ *
+ * Assumes no page fault can be taken, so if relevant portions of ELF file are
+ * not already paged in, fetching of build ID fails.
+ *
+ * Return: 0 on success; negative error, otherwise
+ */
+int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)
+{
+ return __build_id_parse(vma, build_id, size, false /* !may_fault */);
+}
+
+/*
+ * Parse build ID of ELF file mapped to VMA
+ * @vma: vma object
+ * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
+ * @size: returns actual build id size in case of success
+ *
+ * Assumes faultable context and can cause page faults to bring in file data
+ * into page cache.
+ *
+ * Return: 0 on success; negative error, otherwise
+ */
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)
+{
+ return __build_id_parse(vma, build_id, size, true /* may_fault */);
+}
+
/**
* build_id_parse_buf - Get build ID from a buffer
* @buf: ELF note section(s) to parse
@@ -185,7 +364,15 @@ out:
*/
int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
{
- return parse_build_id_buf(build_id, NULL, buf, buf_size);
+ struct freader r;
+ int err;
+
+ freader_init_from_mem(&r, buf, buf_size);
+
+ err = parse_build_id(&r, build_id, NULL, 0, buf_size);
+
+ freader_cleanup(&r);
+ return err;
}
#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
index 4e4d081a1d3b..be04aa42125c 100644
--- a/lib/checksum_kunit.c
+++ b/lib/checksum_kunit.c
@@ -468,12 +468,9 @@ static __wsum to_wsum(u32 x)
static void assert_setup_correct(struct kunit *test)
{
- CHECK_EQ(sizeof(random_buf) / sizeof(random_buf[0]), MAX_LEN);
- CHECK_EQ(sizeof(expected_results) / sizeof(expected_results[0]),
- MAX_LEN);
- CHECK_EQ(sizeof(init_sums_no_overflow) /
- sizeof(init_sums_no_overflow[0]),
- MAX_LEN);
+ CHECK_EQ(ARRAY_SIZE(random_buf), MAX_LEN);
+ CHECK_EQ(ARRAY_SIZE(expected_results), MAX_LEN);
+ CHECK_EQ(ARRAY_SIZE(init_sums_no_overflow), MAX_LEN);
}
/*
diff --git a/lib/closure.c b/lib/closure.c
index 116afae2eed9..2bfe7d2a0048 100644
--- a/lib/closure.c
+++ b/lib/closure.c
@@ -278,7 +278,7 @@ static int debug_show(struct seq_file *f, void *data)
seq_printf(f, " W %pS\n",
(void *) cl->waiting_on);
- seq_puts(f, "\n");
+ seq_putc(f, '\n');
}
spin_unlock_irq(&closure_list_lock);
diff --git a/lib/crypto/mpi/Makefile b/lib/crypto/mpi/Makefile
index 6e6ef9a34fe1..9ad84079025a 100644
--- a/lib/crypto/mpi/Makefile
+++ b/lib/crypto/mpi/Makefile
@@ -13,14 +13,12 @@ mpi-y = \
generic_mpih-rshift.o \
generic_mpih-sub1.o \
generic_mpih-add1.o \
- ec.o \
mpicoder.o \
mpi-add.o \
mpi-bit.o \
mpi-cmp.o \
mpi-sub-ui.o \
mpi-div.o \
- mpi-inv.o \
mpi-mod.o \
mpi-mul.o \
mpih-cmp.o \
diff --git a/lib/crypto/mpi/ec.c b/lib/crypto/mpi/ec.c
deleted file mode 100644
index 4781f00982ef..000000000000
--- a/lib/crypto/mpi/ec.c
+++ /dev/null
@@ -1,1507 +0,0 @@
-/* ec.c - Elliptic Curve functions
- * Copyright (C) 2007 Free Software Foundation, Inc.
- * Copyright (C) 2013 g10 Code GmbH
- *
- * This file is part of Libgcrypt.
- *
- * Libgcrypt is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * Libgcrypt is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "mpi-internal.h"
-#include "longlong.h"
-
-#define point_init(a) mpi_point_init((a))
-#define point_free(a) mpi_point_free_parts((a))
-
-#define log_error(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
-#define log_fatal(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
-
-#define DIM(v) (sizeof(v)/sizeof((v)[0]))
-
-
-/* Create a new point option. NBITS gives the size in bits of one
- * coordinate; it is only used to pre-allocate some resources and
- * might also be passed as 0 to use a default value.
- */
-MPI_POINT mpi_point_new(unsigned int nbits)
-{
- MPI_POINT p;
-
- (void)nbits; /* Currently not used. */
-
- p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (p)
- mpi_point_init(p);
- return p;
-}
-EXPORT_SYMBOL_GPL(mpi_point_new);
-
-/* Release the point object P. P may be NULL. */
-void mpi_point_release(MPI_POINT p)
-{
- if (p) {
- mpi_point_free_parts(p);
- kfree(p);
- }
-}
-EXPORT_SYMBOL_GPL(mpi_point_release);
-
-/* Initialize the fields of a point object. gcry_mpi_point_free_parts
- * may be used to release the fields.
- */
-void mpi_point_init(MPI_POINT p)
-{
- p->x = mpi_new(0);
- p->y = mpi_new(0);
- p->z = mpi_new(0);
-}
-EXPORT_SYMBOL_GPL(mpi_point_init);
-
-/* Release the parts of a point object. */
-void mpi_point_free_parts(MPI_POINT p)
-{
- mpi_free(p->x); p->x = NULL;
- mpi_free(p->y); p->y = NULL;
- mpi_free(p->z); p->z = NULL;
-}
-EXPORT_SYMBOL_GPL(mpi_point_free_parts);
-
-/* Set the value from S into D. */
-static void point_set(MPI_POINT d, MPI_POINT s)
-{
- mpi_set(d->x, s->x);
- mpi_set(d->y, s->y);
- mpi_set(d->z, s->z);
-}
-
-static void point_resize(MPI_POINT p, struct mpi_ec_ctx *ctx)
-{
- size_t nlimbs = ctx->p->nlimbs;
-
- mpi_resize(p->x, nlimbs);
- p->x->nlimbs = nlimbs;
- mpi_resize(p->z, nlimbs);
- p->z->nlimbs = nlimbs;
-
- if (ctx->model != MPI_EC_MONTGOMERY) {
- mpi_resize(p->y, nlimbs);
- p->y->nlimbs = nlimbs;
- }
-}
-
-static void point_swap_cond(MPI_POINT d, MPI_POINT s, unsigned long swap,
- struct mpi_ec_ctx *ctx)
-{
- mpi_swap_cond(d->x, s->x, swap);
- if (ctx->model != MPI_EC_MONTGOMERY)
- mpi_swap_cond(d->y, s->y, swap);
- mpi_swap_cond(d->z, s->z, swap);
-}
-
-
-/* W = W mod P. */
-static void ec_mod(MPI w, struct mpi_ec_ctx *ec)
-{
- if (ec->t.p_barrett)
- mpi_mod_barrett(w, w, ec->t.p_barrett);
- else
- mpi_mod(w, w, ec->p);
-}
-
-static void ec_addm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
-{
- mpi_add(w, u, v);
- ec_mod(w, ctx);
-}
-
-static void ec_subm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec)
-{
- mpi_sub(w, u, v);
- while (w->sign)
- mpi_add(w, w, ec->p);
- /*ec_mod(w, ec);*/
-}
-
-static void ec_mulm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
-{
- mpi_mul(w, u, v);
- ec_mod(w, ctx);
-}
-
-/* W = 2 * U mod P. */
-static void ec_mul2(MPI w, MPI u, struct mpi_ec_ctx *ctx)
-{
- mpi_lshift(w, u, 1);
- ec_mod(w, ctx);
-}
-
-static void ec_powm(MPI w, const MPI b, const MPI e,
- struct mpi_ec_ctx *ctx)
-{
- mpi_powm(w, b, e, ctx->p);
- /* mpi_abs(w); */
-}
-
-/* Shortcut for
- * ec_powm(B, B, mpi_const(MPI_C_TWO), ctx);
- * for easier optimization.
- */
-static void ec_pow2(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
-{
- /* Using mpi_mul is slightly faster (at least on amd64). */
- /* mpi_powm(w, b, mpi_const(MPI_C_TWO), ctx->p); */
- ec_mulm(w, b, b, ctx);
-}
-
-/* Shortcut for
- * ec_powm(B, B, mpi_const(MPI_C_THREE), ctx);
- * for easier optimization.
- */
-static void ec_pow3(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
-{
- mpi_powm(w, b, mpi_const(MPI_C_THREE), ctx->p);
-}
-
-static void ec_invm(MPI x, MPI a, struct mpi_ec_ctx *ctx)
-{
- if (!mpi_invm(x, a, ctx->p))
- log_error("ec_invm: inverse does not exist:\n");
-}
-
-static void mpih_set_cond(mpi_ptr_t wp, mpi_ptr_t up,
- mpi_size_t usize, unsigned long set)
-{
- mpi_size_t i;
- mpi_limb_t mask = ((mpi_limb_t)0) - set;
- mpi_limb_t x;
-
- for (i = 0; i < usize; i++) {
- x = mask & (wp[i] ^ up[i]);
- wp[i] = wp[i] ^ x;
- }
-}
-
-/* Routines for 2^255 - 19. */
-
-#define LIMB_SIZE_25519 ((256+BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB)
-
-static void ec_addm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
-{
- mpi_ptr_t wp, up, vp;
- mpi_size_t wsize = LIMB_SIZE_25519;
- mpi_limb_t n[LIMB_SIZE_25519];
- mpi_limb_t borrow;
-
- if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
- log_bug("addm_25519: different sizes\n");
-
- memset(n, 0, sizeof(n));
- up = u->d;
- vp = v->d;
- wp = w->d;
-
- mpihelp_add_n(wp, up, vp, wsize);
- borrow = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
- mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
- mpihelp_add_n(wp, wp, n, wsize);
- wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
-}
-
-static void ec_subm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
-{
- mpi_ptr_t wp, up, vp;
- mpi_size_t wsize = LIMB_SIZE_25519;
- mpi_limb_t n[LIMB_SIZE_25519];
- mpi_limb_t borrow;
-
- if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
- log_bug("subm_25519: different sizes\n");
-
- memset(n, 0, sizeof(n));
- up = u->d;
- vp = v->d;
- wp = w->d;
-
- borrow = mpihelp_sub_n(wp, up, vp, wsize);
- mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
- mpihelp_add_n(wp, wp, n, wsize);
- wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
-}
-
-static void ec_mulm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
-{
- mpi_ptr_t wp, up, vp;
- mpi_size_t wsize = LIMB_SIZE_25519;
- mpi_limb_t n[LIMB_SIZE_25519*2];
- mpi_limb_t m[LIMB_SIZE_25519+1];
- mpi_limb_t cy;
- int msb;
-
- (void)ctx;
- if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
- log_bug("mulm_25519: different sizes\n");
-
- up = u->d;
- vp = v->d;
- wp = w->d;
-
- mpihelp_mul_n(n, up, vp, wsize);
- memcpy(wp, n, wsize * BYTES_PER_MPI_LIMB);
- wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
-
- memcpy(m, n+LIMB_SIZE_25519-1, (wsize+1) * BYTES_PER_MPI_LIMB);
- mpihelp_rshift(m, m, LIMB_SIZE_25519+1, (255 % BITS_PER_MPI_LIMB));
-
- memcpy(n, m, wsize * BYTES_PER_MPI_LIMB);
- cy = mpihelp_lshift(m, m, LIMB_SIZE_25519, 4);
- m[LIMB_SIZE_25519] = cy;
- cy = mpihelp_add_n(m, m, n, wsize);
- m[LIMB_SIZE_25519] += cy;
- cy = mpihelp_add_n(m, m, n, wsize);
- m[LIMB_SIZE_25519] += cy;
- cy = mpihelp_add_n(m, m, n, wsize);
- m[LIMB_SIZE_25519] += cy;
-
- cy = mpihelp_add_n(wp, wp, m, wsize);
- m[LIMB_SIZE_25519] += cy;
-
- memset(m, 0, wsize * BYTES_PER_MPI_LIMB);
- msb = (wp[LIMB_SIZE_25519-1] >> (255 % BITS_PER_MPI_LIMB));
- m[0] = (m[LIMB_SIZE_25519] * 2 + msb) * 19;
- wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
- mpihelp_add_n(wp, wp, m, wsize);
-
- m[0] = 0;
- cy = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
- mpih_set_cond(m, ctx->p->d, wsize, (cy != 0UL));
- mpihelp_add_n(wp, wp, m, wsize);
-}
-
-static void ec_mul2_25519(MPI w, MPI u, struct mpi_ec_ctx *ctx)
-{
- ec_addm_25519(w, u, u, ctx);
-}
-
-static void ec_pow2_25519(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
-{
- ec_mulm_25519(w, b, b, ctx);
-}
-
-/* Routines for 2^448 - 2^224 - 1. */
-
-#define LIMB_SIZE_448 ((448+BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB)
-#define LIMB_SIZE_HALF_448 ((LIMB_SIZE_448+1)/2)
-
-static void ec_addm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
-{
- mpi_ptr_t wp, up, vp;
- mpi_size_t wsize = LIMB_SIZE_448;
- mpi_limb_t n[LIMB_SIZE_448];
- mpi_limb_t cy;
-
- if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
- log_bug("addm_448: different sizes\n");
-
- memset(n, 0, sizeof(n));
- up = u->d;
- vp = v->d;
- wp = w->d;
-
- cy = mpihelp_add_n(wp, up, vp, wsize);
- mpih_set_cond(n, ctx->p->d, wsize, (cy != 0UL));
- mpihelp_sub_n(wp, wp, n, wsize);
-}
-
-static void ec_subm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
-{
- mpi_ptr_t wp, up, vp;
- mpi_size_t wsize = LIMB_SIZE_448;
- mpi_limb_t n[LIMB_SIZE_448];
- mpi_limb_t borrow;
-
- if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
- log_bug("subm_448: different sizes\n");
-
- memset(n, 0, sizeof(n));
- up = u->d;
- vp = v->d;
- wp = w->d;
-
- borrow = mpihelp_sub_n(wp, up, vp, wsize);
- mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
- mpihelp_add_n(wp, wp, n, wsize);
-}
-
-static void ec_mulm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
-{
- mpi_ptr_t wp, up, vp;
- mpi_size_t wsize = LIMB_SIZE_448;
- mpi_limb_t n[LIMB_SIZE_448*2];
- mpi_limb_t a2[LIMB_SIZE_HALF_448];
- mpi_limb_t a3[LIMB_SIZE_HALF_448];
- mpi_limb_t b0[LIMB_SIZE_HALF_448];
- mpi_limb_t b1[LIMB_SIZE_HALF_448];
- mpi_limb_t cy;
- int i;
-#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
- mpi_limb_t b1_rest, a3_rest;
-#endif
-
- if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
- log_bug("mulm_448: different sizes\n");
-
- up = u->d;
- vp = v->d;
- wp = w->d;
-
- mpihelp_mul_n(n, up, vp, wsize);
-
- for (i = 0; i < (wsize + 1) / 2; i++) {
- b0[i] = n[i];
- b1[i] = n[i+wsize/2];
- a2[i] = n[i+wsize];
- a3[i] = n[i+wsize+wsize/2];
- }
-
-#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
- b0[LIMB_SIZE_HALF_448-1] &= ((mpi_limb_t)1UL << 32)-1;
- a2[LIMB_SIZE_HALF_448-1] &= ((mpi_limb_t)1UL << 32)-1;
-
- b1_rest = 0;
- a3_rest = 0;
-
- for (i = (wsize + 1) / 2 - 1; i >= 0; i--) {
- mpi_limb_t b1v, a3v;
- b1v = b1[i];
- a3v = a3[i];
- b1[i] = (b1_rest << 32) | (b1v >> 32);
- a3[i] = (a3_rest << 32) | (a3v >> 32);
- b1_rest = b1v & (((mpi_limb_t)1UL << 32)-1);
- a3_rest = a3v & (((mpi_limb_t)1UL << 32)-1);
- }
-#endif
-
- cy = mpihelp_add_n(b0, b0, a2, LIMB_SIZE_HALF_448);
- cy += mpihelp_add_n(b0, b0, a3, LIMB_SIZE_HALF_448);
- for (i = 0; i < (wsize + 1) / 2; i++)
- wp[i] = b0[i];
-#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
- wp[LIMB_SIZE_HALF_448-1] &= (((mpi_limb_t)1UL << 32)-1);
-#endif
-
-#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
- cy = b0[LIMB_SIZE_HALF_448-1] >> 32;
-#endif
-
- cy = mpihelp_add_1(b1, b1, LIMB_SIZE_HALF_448, cy);
- cy += mpihelp_add_n(b1, b1, a2, LIMB_SIZE_HALF_448);
- cy += mpihelp_add_n(b1, b1, a3, LIMB_SIZE_HALF_448);
- cy += mpihelp_add_n(b1, b1, a3, LIMB_SIZE_HALF_448);
-#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
- b1_rest = 0;
- for (i = (wsize + 1) / 2 - 1; i >= 0; i--) {
- mpi_limb_t b1v = b1[i];
- b1[i] = (b1_rest << 32) | (b1v >> 32);
- b1_rest = b1v & (((mpi_limb_t)1UL << 32)-1);
- }
- wp[LIMB_SIZE_HALF_448-1] |= (b1_rest << 32);
-#endif
- for (i = 0; i < wsize / 2; i++)
- wp[i+(wsize + 1) / 2] = b1[i];
-
-#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
- cy = b1[LIMB_SIZE_HALF_448-1];
-#endif
-
- memset(n, 0, wsize * BYTES_PER_MPI_LIMB);
-
-#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
- n[LIMB_SIZE_HALF_448-1] = cy << 32;
-#else
- n[LIMB_SIZE_HALF_448] = cy;
-#endif
- n[0] = cy;
- mpihelp_add_n(wp, wp, n, wsize);
-
- memset(n, 0, wsize * BYTES_PER_MPI_LIMB);
- cy = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
- mpih_set_cond(n, ctx->p->d, wsize, (cy != 0UL));
- mpihelp_add_n(wp, wp, n, wsize);
-}
-
-static void ec_mul2_448(MPI w, MPI u, struct mpi_ec_ctx *ctx)
-{
- ec_addm_448(w, u, u, ctx);
-}
-
-static void ec_pow2_448(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
-{
- ec_mulm_448(w, b, b, ctx);
-}
-
-struct field_table {
- const char *p;
-
- /* computation routines for the field. */
- void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
- void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
- void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
- void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx);
- void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx);
-};
-
-static const struct field_table field_table[] = {
- {
- "0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED",
- ec_addm_25519,
- ec_subm_25519,
- ec_mulm_25519,
- ec_mul2_25519,
- ec_pow2_25519
- },
- {
- "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE"
- "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
- ec_addm_448,
- ec_subm_448,
- ec_mulm_448,
- ec_mul2_448,
- ec_pow2_448
- },
- { NULL, NULL, NULL, NULL, NULL, NULL },
-};
-
-/* Force recomputation of all helper variables. */
-static void mpi_ec_get_reset(struct mpi_ec_ctx *ec)
-{
- ec->t.valid.a_is_pminus3 = 0;
- ec->t.valid.two_inv_p = 0;
-}
-
-/* Accessor for helper variable. */
-static int ec_get_a_is_pminus3(struct mpi_ec_ctx *ec)
-{
- MPI tmp;
-
- if (!ec->t.valid.a_is_pminus3) {
- ec->t.valid.a_is_pminus3 = 1;
- tmp = mpi_alloc_like(ec->p);
- mpi_sub_ui(tmp, ec->p, 3);
- ec->t.a_is_pminus3 = !mpi_cmp(ec->a, tmp);
- mpi_free(tmp);
- }
-
- return ec->t.a_is_pminus3;
-}
-
-/* Accessor for helper variable. */
-static MPI ec_get_two_inv_p(struct mpi_ec_ctx *ec)
-{
- if (!ec->t.valid.two_inv_p) {
- ec->t.valid.two_inv_p = 1;
- if (!ec->t.two_inv_p)
- ec->t.two_inv_p = mpi_alloc(0);
- ec_invm(ec->t.two_inv_p, mpi_const(MPI_C_TWO), ec);
- }
- return ec->t.two_inv_p;
-}
-
-static const char *const curve25519_bad_points[] = {
- "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed",
- "0x0000000000000000000000000000000000000000000000000000000000000000",
- "0x0000000000000000000000000000000000000000000000000000000000000001",
- "0x00b8495f16056286fdb1329ceb8d09da6ac49ff1fae35616aeb8413b7c7aebe0",
- "0x57119fd0dd4e22d8868e1c58c45c44045bef839c55b1d0b1248c50a3bc959c5f",
- "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec",
- "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee",
- NULL
-};
-
-static const char *const curve448_bad_points[] = {
- "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
- "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
- "0x00000000000000000000000000000000000000000000000000000000"
- "00000000000000000000000000000000000000000000000000000000",
- "0x00000000000000000000000000000000000000000000000000000000"
- "00000000000000000000000000000000000000000000000000000001",
- "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
- "fffffffffffffffffffffffffffffffffffffffffffffffffffffffe",
- "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
- "00000000000000000000000000000000000000000000000000000000",
- NULL
-};
-
-static const char *const *bad_points_table[] = {
- curve25519_bad_points,
- curve448_bad_points,
-};
-
-static void mpi_ec_coefficient_normalize(MPI a, MPI p)
-{
- if (a->sign) {
- mpi_resize(a, p->nlimbs);
- mpihelp_sub_n(a->d, p->d, a->d, p->nlimbs);
- a->nlimbs = p->nlimbs;
- a->sign = 0;
- }
-}
-
-/* This function initialized a context for elliptic curve based on the
- * field GF(p). P is the prime specifying this field, A is the first
- * coefficient. CTX is expected to be zeroized.
- */
-void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
- enum ecc_dialects dialect,
- int flags, MPI p, MPI a, MPI b)
-{
- int i;
- static int use_barrett = -1 /* TODO: 1 or -1 */;
-
- mpi_ec_coefficient_normalize(a, p);
- mpi_ec_coefficient_normalize(b, p);
-
- /* Fixme: Do we want to check some constraints? e.g. a < p */
-
- ctx->model = model;
- ctx->dialect = dialect;
- ctx->flags = flags;
- if (dialect == ECC_DIALECT_ED25519)
- ctx->nbits = 256;
- else
- ctx->nbits = mpi_get_nbits(p);
- ctx->p = mpi_copy(p);
- ctx->a = mpi_copy(a);
- ctx->b = mpi_copy(b);
-
- ctx->d = NULL;
- ctx->t.two_inv_p = NULL;
-
- ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL;
-
- mpi_ec_get_reset(ctx);
-
- if (model == MPI_EC_MONTGOMERY) {
- for (i = 0; i < DIM(bad_points_table); i++) {
- MPI p_candidate = mpi_scanval(bad_points_table[i][0]);
- int match_p = !mpi_cmp(ctx->p, p_candidate);
- int j;
-
- mpi_free(p_candidate);
- if (!match_p)
- continue;
-
- for (j = 0; i < DIM(ctx->t.scratch) && bad_points_table[i][j]; j++)
- ctx->t.scratch[j] = mpi_scanval(bad_points_table[i][j]);
- }
- } else {
- /* Allocate scratch variables. */
- for (i = 0; i < DIM(ctx->t.scratch); i++)
- ctx->t.scratch[i] = mpi_alloc_like(ctx->p);
- }
-
- ctx->addm = ec_addm;
- ctx->subm = ec_subm;
- ctx->mulm = ec_mulm;
- ctx->mul2 = ec_mul2;
- ctx->pow2 = ec_pow2;
-
- for (i = 0; field_table[i].p; i++) {
- MPI f_p;
-
- f_p = mpi_scanval(field_table[i].p);
- if (!f_p)
- break;
-
- if (!mpi_cmp(p, f_p)) {
- ctx->addm = field_table[i].addm;
- ctx->subm = field_table[i].subm;
- ctx->mulm = field_table[i].mulm;
- ctx->mul2 = field_table[i].mul2;
- ctx->pow2 = field_table[i].pow2;
- mpi_free(f_p);
-
- mpi_resize(ctx->a, ctx->p->nlimbs);
- ctx->a->nlimbs = ctx->p->nlimbs;
-
- mpi_resize(ctx->b, ctx->p->nlimbs);
- ctx->b->nlimbs = ctx->p->nlimbs;
-
- for (i = 0; i < DIM(ctx->t.scratch) && ctx->t.scratch[i]; i++)
- ctx->t.scratch[i]->nlimbs = ctx->p->nlimbs;
-
- break;
- }
-
- mpi_free(f_p);
- }
-}
-EXPORT_SYMBOL_GPL(mpi_ec_init);
-
-void mpi_ec_deinit(struct mpi_ec_ctx *ctx)
-{
- int i;
-
- mpi_barrett_free(ctx->t.p_barrett);
-
- /* Domain parameter. */
- mpi_free(ctx->p);
- mpi_free(ctx->a);
- mpi_free(ctx->b);
- mpi_point_release(ctx->G);
- mpi_free(ctx->n);
-
- /* The key. */
- mpi_point_release(ctx->Q);
- mpi_free(ctx->d);
-
- /* Private data of ec.c. */
- mpi_free(ctx->t.two_inv_p);
-
- for (i = 0; i < DIM(ctx->t.scratch); i++)
- mpi_free(ctx->t.scratch[i]);
-}
-EXPORT_SYMBOL_GPL(mpi_ec_deinit);
-
-/* Compute the affine coordinates from the projective coordinates in
- * POINT. Set them into X and Y. If one coordinate is not required,
- * X or Y may be passed as NULL. CTX is the usual context. Returns: 0
- * on success or !0 if POINT is at infinity.
- */
-int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx)
-{
- if (!mpi_cmp_ui(point->z, 0))
- return -1;
-
- switch (ctx->model) {
- case MPI_EC_WEIERSTRASS: /* Using Jacobian coordinates. */
- {
- MPI z1, z2, z3;
-
- z1 = mpi_new(0);
- z2 = mpi_new(0);
- ec_invm(z1, point->z, ctx); /* z1 = z^(-1) mod p */
- ec_mulm(z2, z1, z1, ctx); /* z2 = z^(-2) mod p */
-
- if (x)
- ec_mulm(x, point->x, z2, ctx);
-
- if (y) {
- z3 = mpi_new(0);
- ec_mulm(z3, z2, z1, ctx); /* z3 = z^(-3) mod p */
- ec_mulm(y, point->y, z3, ctx);
- mpi_free(z3);
- }
-
- mpi_free(z2);
- mpi_free(z1);
- }
- return 0;
-
- case MPI_EC_MONTGOMERY:
- {
- if (x)
- mpi_set(x, point->x);
-
- if (y) {
- log_fatal("%s: Getting Y-coordinate on %s is not supported\n",
- "mpi_ec_get_affine", "Montgomery");
- return -1;
- }
- }
- return 0;
-
- case MPI_EC_EDWARDS:
- {
- MPI z;
-
- z = mpi_new(0);
- ec_invm(z, point->z, ctx);
-
- mpi_resize(z, ctx->p->nlimbs);
- z->nlimbs = ctx->p->nlimbs;
-
- if (x) {
- mpi_resize(x, ctx->p->nlimbs);
- x->nlimbs = ctx->p->nlimbs;
- ctx->mulm(x, point->x, z, ctx);
- }
- if (y) {
- mpi_resize(y, ctx->p->nlimbs);
- y->nlimbs = ctx->p->nlimbs;
- ctx->mulm(y, point->y, z, ctx);
- }
-
- mpi_free(z);
- }
- return 0;
-
- default:
- return -1;
- }
-}
-EXPORT_SYMBOL_GPL(mpi_ec_get_affine);
-
-/* RESULT = 2 * POINT (Weierstrass version). */
-static void dup_point_weierstrass(MPI_POINT result,
- MPI_POINT point, struct mpi_ec_ctx *ctx)
-{
-#define x3 (result->x)
-#define y3 (result->y)
-#define z3 (result->z)
-#define t1 (ctx->t.scratch[0])
-#define t2 (ctx->t.scratch[1])
-#define t3 (ctx->t.scratch[2])
-#define l1 (ctx->t.scratch[3])
-#define l2 (ctx->t.scratch[4])
-#define l3 (ctx->t.scratch[5])
-
- if (!mpi_cmp_ui(point->y, 0) || !mpi_cmp_ui(point->z, 0)) {
- /* P_y == 0 || P_z == 0 => [1:1:0] */
- mpi_set_ui(x3, 1);
- mpi_set_ui(y3, 1);
- mpi_set_ui(z3, 0);
- } else {
- if (ec_get_a_is_pminus3(ctx)) {
- /* Use the faster case. */
- /* L1 = 3(X - Z^2)(X + Z^2) */
- /* T1: used for Z^2. */
- /* T2: used for the right term. */
- ec_pow2(t1, point->z, ctx);
- ec_subm(l1, point->x, t1, ctx);
- ec_mulm(l1, l1, mpi_const(MPI_C_THREE), ctx);
- ec_addm(t2, point->x, t1, ctx);
- ec_mulm(l1, l1, t2, ctx);
- } else {
- /* Standard case. */
- /* L1 = 3X^2 + aZ^4 */
- /* T1: used for aZ^4. */
- ec_pow2(l1, point->x, ctx);
- ec_mulm(l1, l1, mpi_const(MPI_C_THREE), ctx);
- ec_powm(t1, point->z, mpi_const(MPI_C_FOUR), ctx);
- ec_mulm(t1, t1, ctx->a, ctx);
- ec_addm(l1, l1, t1, ctx);
- }
- /* Z3 = 2YZ */
- ec_mulm(z3, point->y, point->z, ctx);
- ec_mul2(z3, z3, ctx);
-
- /* L2 = 4XY^2 */
- /* T2: used for Y2; required later. */
- ec_pow2(t2, point->y, ctx);
- ec_mulm(l2, t2, point->x, ctx);
- ec_mulm(l2, l2, mpi_const(MPI_C_FOUR), ctx);
-
- /* X3 = L1^2 - 2L2 */
- /* T1: used for L2^2. */
- ec_pow2(x3, l1, ctx);
- ec_mul2(t1, l2, ctx);
- ec_subm(x3, x3, t1, ctx);
-
- /* L3 = 8Y^4 */
- /* T2: taken from above. */
- ec_pow2(t2, t2, ctx);
- ec_mulm(l3, t2, mpi_const(MPI_C_EIGHT), ctx);
-
- /* Y3 = L1(L2 - X3) - L3 */
- ec_subm(y3, l2, x3, ctx);
- ec_mulm(y3, y3, l1, ctx);
- ec_subm(y3, y3, l3, ctx);
- }
-
-#undef x3
-#undef y3
-#undef z3
-#undef t1
-#undef t2
-#undef t3
-#undef l1
-#undef l2
-#undef l3
-}
-
-/* RESULT = 2 * POINT (Montgomery version). */
-static void dup_point_montgomery(MPI_POINT result,
- MPI_POINT point, struct mpi_ec_ctx *ctx)
-{
- (void)result;
- (void)point;
- (void)ctx;
- log_fatal("%s: %s not yet supported\n",
- "mpi_ec_dup_point", "Montgomery");
-}
-
-/* RESULT = 2 * POINT (Twisted Edwards version). */
-static void dup_point_edwards(MPI_POINT result,
- MPI_POINT point, struct mpi_ec_ctx *ctx)
-{
-#define X1 (point->x)
-#define Y1 (point->y)
-#define Z1 (point->z)
-#define X3 (result->x)
-#define Y3 (result->y)
-#define Z3 (result->z)
-#define B (ctx->t.scratch[0])
-#define C (ctx->t.scratch[1])
-#define D (ctx->t.scratch[2])
-#define E (ctx->t.scratch[3])
-#define F (ctx->t.scratch[4])
-#define H (ctx->t.scratch[5])
-#define J (ctx->t.scratch[6])
-
- /* Compute: (X_3 : Y_3 : Z_3) = 2( X_1 : Y_1 : Z_1 ) */
-
- /* B = (X_1 + Y_1)^2 */
- ctx->addm(B, X1, Y1, ctx);
- ctx->pow2(B, B, ctx);
-
- /* C = X_1^2 */
- /* D = Y_1^2 */
- ctx->pow2(C, X1, ctx);
- ctx->pow2(D, Y1, ctx);
-
- /* E = aC */
- if (ctx->dialect == ECC_DIALECT_ED25519)
- ctx->subm(E, ctx->p, C, ctx);
- else
- ctx->mulm(E, ctx->a, C, ctx);
-
- /* F = E + D */
- ctx->addm(F, E, D, ctx);
-
- /* H = Z_1^2 */
- ctx->pow2(H, Z1, ctx);
-
- /* J = F - 2H */
- ctx->mul2(J, H, ctx);
- ctx->subm(J, F, J, ctx);
-
- /* X_3 = (B - C - D) · J */
- ctx->subm(X3, B, C, ctx);
- ctx->subm(X3, X3, D, ctx);
- ctx->mulm(X3, X3, J, ctx);
-
- /* Y_3 = F · (E - D) */
- ctx->subm(Y3, E, D, ctx);
- ctx->mulm(Y3, Y3, F, ctx);
-
- /* Z_3 = F · J */
- ctx->mulm(Z3, F, J, ctx);
-
-#undef X1
-#undef Y1
-#undef Z1
-#undef X3
-#undef Y3
-#undef Z3
-#undef B
-#undef C
-#undef D
-#undef E
-#undef F
-#undef H
-#undef J
-}
-
-/* RESULT = 2 * POINT */
-static void
-mpi_ec_dup_point(MPI_POINT result, MPI_POINT point, struct mpi_ec_ctx *ctx)
-{
- switch (ctx->model) {
- case MPI_EC_WEIERSTRASS:
- dup_point_weierstrass(result, point, ctx);
- break;
- case MPI_EC_MONTGOMERY:
- dup_point_montgomery(result, point, ctx);
- break;
- case MPI_EC_EDWARDS:
- dup_point_edwards(result, point, ctx);
- break;
- }
-}
-
-/* RESULT = P1 + P2 (Weierstrass version).*/
-static void add_points_weierstrass(MPI_POINT result,
- MPI_POINT p1, MPI_POINT p2,
- struct mpi_ec_ctx *ctx)
-{
-#define x1 (p1->x)
-#define y1 (p1->y)
-#define z1 (p1->z)
-#define x2 (p2->x)
-#define y2 (p2->y)
-#define z2 (p2->z)
-#define x3 (result->x)
-#define y3 (result->y)
-#define z3 (result->z)
-#define l1 (ctx->t.scratch[0])
-#define l2 (ctx->t.scratch[1])
-#define l3 (ctx->t.scratch[2])
-#define l4 (ctx->t.scratch[3])
-#define l5 (ctx->t.scratch[4])
-#define l6 (ctx->t.scratch[5])
-#define l7 (ctx->t.scratch[6])
-#define l8 (ctx->t.scratch[7])
-#define l9 (ctx->t.scratch[8])
-#define t1 (ctx->t.scratch[9])
-#define t2 (ctx->t.scratch[10])
-
- if ((!mpi_cmp(x1, x2)) && (!mpi_cmp(y1, y2)) && (!mpi_cmp(z1, z2))) {
- /* Same point; need to call the duplicate function. */
- mpi_ec_dup_point(result, p1, ctx);
- } else if (!mpi_cmp_ui(z1, 0)) {
- /* P1 is at infinity. */
- mpi_set(x3, p2->x);
- mpi_set(y3, p2->y);
- mpi_set(z3, p2->z);
- } else if (!mpi_cmp_ui(z2, 0)) {
- /* P2 is at infinity. */
- mpi_set(x3, p1->x);
- mpi_set(y3, p1->y);
- mpi_set(z3, p1->z);
- } else {
- int z1_is_one = !mpi_cmp_ui(z1, 1);
- int z2_is_one = !mpi_cmp_ui(z2, 1);
-
- /* l1 = x1 z2^2 */
- /* l2 = x2 z1^2 */
- if (z2_is_one)
- mpi_set(l1, x1);
- else {
- ec_pow2(l1, z2, ctx);
- ec_mulm(l1, l1, x1, ctx);
- }
- if (z1_is_one)
- mpi_set(l2, x2);
- else {
- ec_pow2(l2, z1, ctx);
- ec_mulm(l2, l2, x2, ctx);
- }
- /* l3 = l1 - l2 */
- ec_subm(l3, l1, l2, ctx);
- /* l4 = y1 z2^3 */
- ec_powm(l4, z2, mpi_const(MPI_C_THREE), ctx);
- ec_mulm(l4, l4, y1, ctx);
- /* l5 = y2 z1^3 */
- ec_powm(l5, z1, mpi_const(MPI_C_THREE), ctx);
- ec_mulm(l5, l5, y2, ctx);
- /* l6 = l4 - l5 */
- ec_subm(l6, l4, l5, ctx);
-
- if (!mpi_cmp_ui(l3, 0)) {
- if (!mpi_cmp_ui(l6, 0)) {
- /* P1 and P2 are the same - use duplicate function. */
- mpi_ec_dup_point(result, p1, ctx);
- } else {
- /* P1 is the inverse of P2. */
- mpi_set_ui(x3, 1);
- mpi_set_ui(y3, 1);
- mpi_set_ui(z3, 0);
- }
- } else {
- /* l7 = l1 + l2 */
- ec_addm(l7, l1, l2, ctx);
- /* l8 = l4 + l5 */
- ec_addm(l8, l4, l5, ctx);
- /* z3 = z1 z2 l3 */
- ec_mulm(z3, z1, z2, ctx);
- ec_mulm(z3, z3, l3, ctx);
- /* x3 = l6^2 - l7 l3^2 */
- ec_pow2(t1, l6, ctx);
- ec_pow2(t2, l3, ctx);
- ec_mulm(t2, t2, l7, ctx);
- ec_subm(x3, t1, t2, ctx);
- /* l9 = l7 l3^2 - 2 x3 */
- ec_mul2(t1, x3, ctx);
- ec_subm(l9, t2, t1, ctx);
- /* y3 = (l9 l6 - l8 l3^3)/2 */
- ec_mulm(l9, l9, l6, ctx);
- ec_powm(t1, l3, mpi_const(MPI_C_THREE), ctx); /* fixme: Use saved value*/
- ec_mulm(t1, t1, l8, ctx);
- ec_subm(y3, l9, t1, ctx);
- ec_mulm(y3, y3, ec_get_two_inv_p(ctx), ctx);
- }
- }
-
-#undef x1
-#undef y1
-#undef z1
-#undef x2
-#undef y2
-#undef z2
-#undef x3
-#undef y3
-#undef z3
-#undef l1
-#undef l2
-#undef l3
-#undef l4
-#undef l5
-#undef l6
-#undef l7
-#undef l8
-#undef l9
-#undef t1
-#undef t2
-}
-
-/* RESULT = P1 + P2 (Montgomery version).*/
-static void add_points_montgomery(MPI_POINT result,
- MPI_POINT p1, MPI_POINT p2,
- struct mpi_ec_ctx *ctx)
-{
- (void)result;
- (void)p1;
- (void)p2;
- (void)ctx;
- log_fatal("%s: %s not yet supported\n",
- "mpi_ec_add_points", "Montgomery");
-}
-
-/* RESULT = P1 + P2 (Twisted Edwards version).*/
-static void add_points_edwards(MPI_POINT result,
- MPI_POINT p1, MPI_POINT p2,
- struct mpi_ec_ctx *ctx)
-{
-#define X1 (p1->x)
-#define Y1 (p1->y)
-#define Z1 (p1->z)
-#define X2 (p2->x)
-#define Y2 (p2->y)
-#define Z2 (p2->z)
-#define X3 (result->x)
-#define Y3 (result->y)
-#define Z3 (result->z)
-#define A (ctx->t.scratch[0])
-#define B (ctx->t.scratch[1])
-#define C (ctx->t.scratch[2])
-#define D (ctx->t.scratch[3])
-#define E (ctx->t.scratch[4])
-#define F (ctx->t.scratch[5])
-#define G (ctx->t.scratch[6])
-#define tmp (ctx->t.scratch[7])
-
- point_resize(result, ctx);
-
- /* Compute: (X_3 : Y_3 : Z_3) = (X_1 : Y_1 : Z_1) + (X_2 : Y_2 : Z_3) */
-
- /* A = Z1 · Z2 */
- ctx->mulm(A, Z1, Z2, ctx);
-
- /* B = A^2 */
- ctx->pow2(B, A, ctx);
-
- /* C = X1 · X2 */
- ctx->mulm(C, X1, X2, ctx);
-
- /* D = Y1 · Y2 */
- ctx->mulm(D, Y1, Y2, ctx);
-
- /* E = d · C · D */
- ctx->mulm(E, ctx->b, C, ctx);
- ctx->mulm(E, E, D, ctx);
-
- /* F = B - E */
- ctx->subm(F, B, E, ctx);
-
- /* G = B + E */
- ctx->addm(G, B, E, ctx);
-
- /* X_3 = A · F · ((X_1 + Y_1) · (X_2 + Y_2) - C - D) */
- ctx->addm(tmp, X1, Y1, ctx);
- ctx->addm(X3, X2, Y2, ctx);
- ctx->mulm(X3, X3, tmp, ctx);
- ctx->subm(X3, X3, C, ctx);
- ctx->subm(X3, X3, D, ctx);
- ctx->mulm(X3, X3, F, ctx);
- ctx->mulm(X3, X3, A, ctx);
-
- /* Y_3 = A · G · (D - aC) */
- if (ctx->dialect == ECC_DIALECT_ED25519) {
- ctx->addm(Y3, D, C, ctx);
- } else {
- ctx->mulm(Y3, ctx->a, C, ctx);
- ctx->subm(Y3, D, Y3, ctx);
- }
- ctx->mulm(Y3, Y3, G, ctx);
- ctx->mulm(Y3, Y3, A, ctx);
-
- /* Z_3 = F · G */
- ctx->mulm(Z3, F, G, ctx);
-
-
-#undef X1
-#undef Y1
-#undef Z1
-#undef X2
-#undef Y2
-#undef Z2
-#undef X3
-#undef Y3
-#undef Z3
-#undef A
-#undef B
-#undef C
-#undef D
-#undef E
-#undef F
-#undef G
-#undef tmp
-}
-
-/* Compute a step of Montgomery Ladder (only use X and Z in the point).
- * Inputs: P1, P2, and x-coordinate of DIF = P1 - P1.
- * Outputs: PRD = 2 * P1 and SUM = P1 + P2.
- */
-static void montgomery_ladder(MPI_POINT prd, MPI_POINT sum,
- MPI_POINT p1, MPI_POINT p2, MPI dif_x,
- struct mpi_ec_ctx *ctx)
-{
- ctx->addm(sum->x, p2->x, p2->z, ctx);
- ctx->subm(p2->z, p2->x, p2->z, ctx);
- ctx->addm(prd->x, p1->x, p1->z, ctx);
- ctx->subm(p1->z, p1->x, p1->z, ctx);
- ctx->mulm(p2->x, p1->z, sum->x, ctx);
- ctx->mulm(p2->z, prd->x, p2->z, ctx);
- ctx->pow2(p1->x, prd->x, ctx);
- ctx->pow2(p1->z, p1->z, ctx);
- ctx->addm(sum->x, p2->x, p2->z, ctx);
- ctx->subm(p2->z, p2->x, p2->z, ctx);
- ctx->mulm(prd->x, p1->x, p1->z, ctx);
- ctx->subm(p1->z, p1->x, p1->z, ctx);
- ctx->pow2(sum->x, sum->x, ctx);
- ctx->pow2(sum->z, p2->z, ctx);
- ctx->mulm(prd->z, p1->z, ctx->a, ctx); /* CTX->A: (a-2)/4 */
- ctx->mulm(sum->z, sum->z, dif_x, ctx);
- ctx->addm(prd->z, p1->x, prd->z, ctx);
- ctx->mulm(prd->z, prd->z, p1->z, ctx);
-}
-
-/* RESULT = P1 + P2 */
-void mpi_ec_add_points(MPI_POINT result,
- MPI_POINT p1, MPI_POINT p2,
- struct mpi_ec_ctx *ctx)
-{
- switch (ctx->model) {
- case MPI_EC_WEIERSTRASS:
- add_points_weierstrass(result, p1, p2, ctx);
- break;
- case MPI_EC_MONTGOMERY:
- add_points_montgomery(result, p1, p2, ctx);
- break;
- case MPI_EC_EDWARDS:
- add_points_edwards(result, p1, p2, ctx);
- break;
- }
-}
-EXPORT_SYMBOL_GPL(mpi_ec_add_points);
-
-/* Scalar point multiplication - the main function for ECC. If takes
- * an integer SCALAR and a POINT as well as the usual context CTX.
- * RESULT will be set to the resulting point.
- */
-void mpi_ec_mul_point(MPI_POINT result,
- MPI scalar, MPI_POINT point,
- struct mpi_ec_ctx *ctx)
-{
- MPI x1, y1, z1, k, h, yy;
- unsigned int i, loops;
- struct gcry_mpi_point p1, p2, p1inv;
-
- if (ctx->model == MPI_EC_EDWARDS) {
- /* Simple left to right binary method. Algorithm 3.27 from
- * {author={Hankerson, Darrel and Menezes, Alfred J. and Vanstone, Scott},
- * title = {Guide to Elliptic Curve Cryptography},
- * year = {2003}, isbn = {038795273X},
- * url = {http://www.cacr.math.uwaterloo.ca/ecc/},
- * publisher = {Springer-Verlag New York, Inc.}}
- */
- unsigned int nbits;
- int j;
-
- if (mpi_cmp(scalar, ctx->p) >= 0)
- nbits = mpi_get_nbits(scalar);
- else
- nbits = mpi_get_nbits(ctx->p);
-
- mpi_set_ui(result->x, 0);
- mpi_set_ui(result->y, 1);
- mpi_set_ui(result->z, 1);
- point_resize(point, ctx);
-
- point_resize(result, ctx);
- point_resize(point, ctx);
-
- for (j = nbits-1; j >= 0; j--) {
- mpi_ec_dup_point(result, result, ctx);
- if (mpi_test_bit(scalar, j))
- mpi_ec_add_points(result, result, point, ctx);
- }
- return;
- } else if (ctx->model == MPI_EC_MONTGOMERY) {
- unsigned int nbits;
- int j;
- struct gcry_mpi_point p1_, p2_;
- MPI_POINT q1, q2, prd, sum;
- unsigned long sw;
- mpi_size_t rsize;
-
- /* Compute scalar point multiplication with Montgomery Ladder.
- * Note that we don't use Y-coordinate in the points at all.
- * RESULT->Y will be filled by zero.
- */
-
- nbits = mpi_get_nbits(scalar);
- point_init(&p1);
- point_init(&p2);
- point_init(&p1_);
- point_init(&p2_);
- mpi_set_ui(p1.x, 1);
- mpi_free(p2.x);
- p2.x = mpi_copy(point->x);
- mpi_set_ui(p2.z, 1);
-
- point_resize(&p1, ctx);
- point_resize(&p2, ctx);
- point_resize(&p1_, ctx);
- point_resize(&p2_, ctx);
-
- mpi_resize(point->x, ctx->p->nlimbs);
- point->x->nlimbs = ctx->p->nlimbs;
-
- q1 = &p1;
- q2 = &p2;
- prd = &p1_;
- sum = &p2_;
-
- for (j = nbits-1; j >= 0; j--) {
- sw = mpi_test_bit(scalar, j);
- point_swap_cond(q1, q2, sw, ctx);
- montgomery_ladder(prd, sum, q1, q2, point->x, ctx);
- point_swap_cond(prd, sum, sw, ctx);
- swap(q1, prd);
- swap(q2, sum);
- }
-
- mpi_clear(result->y);
- sw = (nbits & 1);
- point_swap_cond(&p1, &p1_, sw, ctx);
-
- rsize = p1.z->nlimbs;
- MPN_NORMALIZE(p1.z->d, rsize);
- if (rsize == 0) {
- mpi_set_ui(result->x, 1);
- mpi_set_ui(result->z, 0);
- } else {
- z1 = mpi_new(0);
- ec_invm(z1, p1.z, ctx);
- ec_mulm(result->x, p1.x, z1, ctx);
- mpi_set_ui(result->z, 1);
- mpi_free(z1);
- }
-
- point_free(&p1);
- point_free(&p2);
- point_free(&p1_);
- point_free(&p2_);
- return;
- }
-
- x1 = mpi_alloc_like(ctx->p);
- y1 = mpi_alloc_like(ctx->p);
- h = mpi_alloc_like(ctx->p);
- k = mpi_copy(scalar);
- yy = mpi_copy(point->y);
-
- if (mpi_has_sign(k)) {
- k->sign = 0;
- ec_invm(yy, yy, ctx);
- }
-
- if (!mpi_cmp_ui(point->z, 1)) {
- mpi_set(x1, point->x);
- mpi_set(y1, yy);
- } else {
- MPI z2, z3;
-
- z2 = mpi_alloc_like(ctx->p);
- z3 = mpi_alloc_like(ctx->p);
- ec_mulm(z2, point->z, point->z, ctx);
- ec_mulm(z3, point->z, z2, ctx);
- ec_invm(z2, z2, ctx);
- ec_mulm(x1, point->x, z2, ctx);
- ec_invm(z3, z3, ctx);
- ec_mulm(y1, yy, z3, ctx);
- mpi_free(z2);
- mpi_free(z3);
- }
- z1 = mpi_copy(mpi_const(MPI_C_ONE));
-
- mpi_mul(h, k, mpi_const(MPI_C_THREE)); /* h = 3k */
- loops = mpi_get_nbits(h);
- if (loops < 2) {
- /* If SCALAR is zero, the above mpi_mul sets H to zero and thus
- * LOOPs will be zero. To avoid an underflow of I in the main
- * loop we set LOOP to 2 and the result to (0,0,0).
- */
- loops = 2;
- mpi_clear(result->x);
- mpi_clear(result->y);
- mpi_clear(result->z);
- } else {
- mpi_set(result->x, point->x);
- mpi_set(result->y, yy);
- mpi_set(result->z, point->z);
- }
- mpi_free(yy); yy = NULL;
-
- p1.x = x1; x1 = NULL;
- p1.y = y1; y1 = NULL;
- p1.z = z1; z1 = NULL;
- point_init(&p2);
- point_init(&p1inv);
-
- /* Invert point: y = p - y mod p */
- point_set(&p1inv, &p1);
- ec_subm(p1inv.y, ctx->p, p1inv.y, ctx);
-
- for (i = loops-2; i > 0; i--) {
- mpi_ec_dup_point(result, result, ctx);
- if (mpi_test_bit(h, i) == 1 && mpi_test_bit(k, i) == 0) {
- point_set(&p2, result);
- mpi_ec_add_points(result, &p2, &p1, ctx);
- }
- if (mpi_test_bit(h, i) == 0 && mpi_test_bit(k, i) == 1) {
- point_set(&p2, result);
- mpi_ec_add_points(result, &p2, &p1inv, ctx);
- }
- }
-
- point_free(&p1);
- point_free(&p2);
- point_free(&p1inv);
- mpi_free(h);
- mpi_free(k);
-}
-EXPORT_SYMBOL_GPL(mpi_ec_mul_point);
-
-/* Return true if POINT is on the curve described by CTX. */
-int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx)
-{
- int res = 0;
- MPI x, y, w;
-
- x = mpi_new(0);
- y = mpi_new(0);
- w = mpi_new(0);
-
- /* Check that the point is in range. This needs to be done here and
- * not after conversion to affine coordinates.
- */
- if (mpi_cmpabs(point->x, ctx->p) >= 0)
- goto leave;
- if (mpi_cmpabs(point->y, ctx->p) >= 0)
- goto leave;
- if (mpi_cmpabs(point->z, ctx->p) >= 0)
- goto leave;
-
- switch (ctx->model) {
- case MPI_EC_WEIERSTRASS:
- {
- MPI xxx;
-
- if (mpi_ec_get_affine(x, y, point, ctx))
- goto leave;
-
- xxx = mpi_new(0);
-
- /* y^2 == x^3 + a·x + b */
- ec_pow2(y, y, ctx);
-
- ec_pow3(xxx, x, ctx);
- ec_mulm(w, ctx->a, x, ctx);
- ec_addm(w, w, ctx->b, ctx);
- ec_addm(w, w, xxx, ctx);
-
- if (!mpi_cmp(y, w))
- res = 1;
-
- mpi_free(xxx);
- }
- break;
-
- case MPI_EC_MONTGOMERY:
- {
-#define xx y
- /* With Montgomery curve, only X-coordinate is valid. */
- if (mpi_ec_get_affine(x, NULL, point, ctx))
- goto leave;
-
- /* The equation is: b * y^2 == x^3 + a · x^2 + x */
- /* We check if right hand is quadratic residue or not by
- * Euler's criterion.
- */
- /* CTX->A has (a-2)/4 and CTX->B has b^-1 */
- ec_mulm(w, ctx->a, mpi_const(MPI_C_FOUR), ctx);
- ec_addm(w, w, mpi_const(MPI_C_TWO), ctx);
- ec_mulm(w, w, x, ctx);
- ec_pow2(xx, x, ctx);
- ec_addm(w, w, xx, ctx);
- ec_addm(w, w, mpi_const(MPI_C_ONE), ctx);
- ec_mulm(w, w, x, ctx);
- ec_mulm(w, w, ctx->b, ctx);
-#undef xx
- /* Compute Euler's criterion: w^(p-1)/2 */
-#define p_minus1 y
- ec_subm(p_minus1, ctx->p, mpi_const(MPI_C_ONE), ctx);
- mpi_rshift(p_minus1, p_minus1, 1);
- ec_powm(w, w, p_minus1, ctx);
-
- res = !mpi_cmp_ui(w, 1);
-#undef p_minus1
- }
- break;
-
- case MPI_EC_EDWARDS:
- {
- if (mpi_ec_get_affine(x, y, point, ctx))
- goto leave;
-
- mpi_resize(w, ctx->p->nlimbs);
- w->nlimbs = ctx->p->nlimbs;
-
- /* a · x^2 + y^2 - 1 - b · x^2 · y^2 == 0 */
- ctx->pow2(x, x, ctx);
- ctx->pow2(y, y, ctx);
- if (ctx->dialect == ECC_DIALECT_ED25519)
- ctx->subm(w, ctx->p, x, ctx);
- else
- ctx->mulm(w, ctx->a, x, ctx);
- ctx->addm(w, w, y, ctx);
- ctx->mulm(x, x, y, ctx);
- ctx->mulm(x, x, ctx->b, ctx);
- ctx->subm(w, w, x, ctx);
- if (!mpi_cmp_ui(w, 1))
- res = 1;
- }
- break;
- }
-
-leave:
- mpi_free(w);
- mpi_free(x);
- mpi_free(y);
-
- return res;
-}
-EXPORT_SYMBOL_GPL(mpi_ec_curve_point);
diff --git a/lib/crypto/mpi/mpi-add.c b/lib/crypto/mpi/mpi-add.c
index 9056fc5167fc..3015140d4860 100644
--- a/lib/crypto/mpi/mpi-add.c
+++ b/lib/crypto/mpi/mpi-add.c
@@ -13,62 +13,12 @@
#include "mpi-internal.h"
-/****************
- * Add the unsigned integer V to the mpi-integer U and store the
- * result in W. U and V may be the same.
- */
-void mpi_add_ui(MPI w, MPI u, unsigned long v)
-{
- mpi_ptr_t wp, up;
- mpi_size_t usize, wsize;
- int usign, wsign;
-
- usize = u->nlimbs;
- usign = u->sign;
- wsign = 0;
-
- /* If not space for W (and possible carry), increase space. */
- wsize = usize + 1;
- if (w->alloced < wsize)
- mpi_resize(w, wsize);
-
- /* These must be after realloc (U may be the same as W). */
- up = u->d;
- wp = w->d;
-
- if (!usize) { /* simple */
- wp[0] = v;
- wsize = v ? 1:0;
- } else if (!usign) { /* mpi is not negative */
- mpi_limb_t cy;
- cy = mpihelp_add_1(wp, up, usize, v);
- wp[usize] = cy;
- wsize = usize + cy;
- } else {
- /* The signs are different. Need exact comparison to determine
- * which operand to subtract from which.
- */
- if (usize == 1 && up[0] < v) {
- wp[0] = v - up[0];
- wsize = 1;
- } else {
- mpihelp_sub_1(wp, up, usize, v);
- /* Size can decrease with at most one limb. */
- wsize = usize - (wp[usize-1] == 0);
- wsign = 1;
- }
- }
-
- w->nlimbs = wsize;
- w->sign = wsign;
-}
-
-
-void mpi_add(MPI w, MPI u, MPI v)
+int mpi_add(MPI w, MPI u, MPI v)
{
mpi_ptr_t wp, up, vp;
mpi_size_t usize, vsize, wsize;
int usign, vsign, wsign;
+ int err;
if (u->nlimbs < v->nlimbs) { /* Swap U and V. */
usize = v->nlimbs;
@@ -76,7 +26,9 @@ void mpi_add(MPI w, MPI u, MPI v)
vsize = u->nlimbs;
vsign = u->sign;
wsize = usize + 1;
- RESIZE_IF_NEEDED(w, wsize);
+ err = RESIZE_IF_NEEDED(w, wsize);
+ if (err)
+ return err;
/* These must be after realloc (u or v may be the same as w). */
up = v->d;
vp = u->d;
@@ -86,7 +38,9 @@ void mpi_add(MPI w, MPI u, MPI v)
vsize = v->nlimbs;
vsign = v->sign;
wsize = usize + 1;
- RESIZE_IF_NEEDED(w, wsize);
+ err = RESIZE_IF_NEEDED(w, wsize);
+ if (err)
+ return err;
/* These must be after realloc (u or v may be the same as w). */
up = u->d;
vp = v->d;
@@ -128,28 +82,37 @@ void mpi_add(MPI w, MPI u, MPI v)
w->nlimbs = wsize;
w->sign = wsign;
+ return 0;
}
EXPORT_SYMBOL_GPL(mpi_add);
-void mpi_sub(MPI w, MPI u, MPI v)
+int mpi_sub(MPI w, MPI u, MPI v)
{
- MPI vv = mpi_copy(v);
+ int err;
+ MPI vv;
+
+ vv = mpi_copy(v);
+ if (!vv)
+ return -ENOMEM;
+
vv->sign = !vv->sign;
- mpi_add(w, u, vv);
+ err = mpi_add(w, u, vv);
mpi_free(vv);
+
+ return err;
}
EXPORT_SYMBOL_GPL(mpi_sub);
-void mpi_addm(MPI w, MPI u, MPI v, MPI m)
+int mpi_addm(MPI w, MPI u, MPI v, MPI m)
{
- mpi_add(w, u, v);
- mpi_mod(w, w, m);
+ return mpi_add(w, u, v) ?:
+ mpi_mod(w, w, m);
}
EXPORT_SYMBOL_GPL(mpi_addm);
-void mpi_subm(MPI w, MPI u, MPI v, MPI m)
+int mpi_subm(MPI w, MPI u, MPI v, MPI m)
{
- mpi_sub(w, u, v);
- mpi_mod(w, w, m);
+ return mpi_sub(w, u, v) ?:
+ mpi_mod(w, w, m);
}
EXPORT_SYMBOL_GPL(mpi_subm);
diff --git a/lib/crypto/mpi/mpi-bit.c b/lib/crypto/mpi/mpi-bit.c
index e08fc202ea5c..835a2f0622a0 100644
--- a/lib/crypto/mpi/mpi-bit.c
+++ b/lib/crypto/mpi/mpi-bit.c
@@ -32,7 +32,6 @@ void mpi_normalize(MPI a)
for (; a->nlimbs && !a->d[a->nlimbs - 1]; a->nlimbs--)
;
}
-EXPORT_SYMBOL_GPL(mpi_normalize);
/****************
* Return the number of bits in A.
@@ -77,9 +76,10 @@ EXPORT_SYMBOL_GPL(mpi_test_bit);
/****************
* Set bit N of A.
*/
-void mpi_set_bit(MPI a, unsigned int n)
+int mpi_set_bit(MPI a, unsigned int n)
{
unsigned int i, limbno, bitno;
+ int err;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
@@ -87,106 +87,31 @@ void mpi_set_bit(MPI a, unsigned int n)
if (limbno >= a->nlimbs) {
for (i = a->nlimbs; i < a->alloced; i++)
a->d[i] = 0;
- mpi_resize(a, limbno+1);
+ err = mpi_resize(a, limbno+1);
+ if (err)
+ return err;
a->nlimbs = limbno+1;
}
a->d[limbno] |= (A_LIMB_1<<bitno);
-}
-
-/****************
- * Set bit N of A. and clear all bits above
- */
-void mpi_set_highbit(MPI a, unsigned int n)
-{
- unsigned int i, limbno, bitno;
-
- limbno = n / BITS_PER_MPI_LIMB;
- bitno = n % BITS_PER_MPI_LIMB;
-
- if (limbno >= a->nlimbs) {
- for (i = a->nlimbs; i < a->alloced; i++)
- a->d[i] = 0;
- mpi_resize(a, limbno+1);
- a->nlimbs = limbno+1;
- }
- a->d[limbno] |= (A_LIMB_1<<bitno);
- for (bitno++; bitno < BITS_PER_MPI_LIMB; bitno++)
- a->d[limbno] &= ~(A_LIMB_1 << bitno);
- a->nlimbs = limbno+1;
-}
-EXPORT_SYMBOL_GPL(mpi_set_highbit);
-
-/****************
- * clear bit N of A and all bits above
- */
-void mpi_clear_highbit(MPI a, unsigned int n)
-{
- unsigned int limbno, bitno;
-
- limbno = n / BITS_PER_MPI_LIMB;
- bitno = n % BITS_PER_MPI_LIMB;
-
- if (limbno >= a->nlimbs)
- return; /* not allocated, therefore no need to clear bits :-) */
-
- for ( ; bitno < BITS_PER_MPI_LIMB; bitno++)
- a->d[limbno] &= ~(A_LIMB_1 << bitno);
- a->nlimbs = limbno+1;
-}
-
-/****************
- * Clear bit N of A.
- */
-void mpi_clear_bit(MPI a, unsigned int n)
-{
- unsigned int limbno, bitno;
-
- limbno = n / BITS_PER_MPI_LIMB;
- bitno = n % BITS_PER_MPI_LIMB;
-
- if (limbno >= a->nlimbs)
- return; /* Don't need to clear this bit, it's far too left. */
- a->d[limbno] &= ~(A_LIMB_1 << bitno);
-}
-EXPORT_SYMBOL_GPL(mpi_clear_bit);
-
-
-/****************
- * Shift A by COUNT limbs to the right
- * This is used only within the MPI library
- */
-void mpi_rshift_limbs(MPI a, unsigned int count)
-{
- mpi_ptr_t ap = a->d;
- mpi_size_t n = a->nlimbs;
- unsigned int i;
-
- if (count >= n) {
- a->nlimbs = 0;
- return;
- }
-
- for (i = 0; i < n - count; i++)
- ap[i] = ap[i+count];
- ap[i] = 0;
- a->nlimbs -= count;
+ return 0;
}
/*
* Shift A by N bits to the right.
*/
-void mpi_rshift(MPI x, MPI a, unsigned int n)
+int mpi_rshift(MPI x, MPI a, unsigned int n)
{
mpi_size_t xsize;
unsigned int i;
unsigned int nlimbs = (n/BITS_PER_MPI_LIMB);
unsigned int nbits = (n%BITS_PER_MPI_LIMB);
+ int err;
if (x == a) {
/* In-place operation. */
if (nlimbs >= x->nlimbs) {
x->nlimbs = 0;
- return;
+ return 0;
}
if (nlimbs) {
@@ -201,7 +126,9 @@ void mpi_rshift(MPI x, MPI a, unsigned int n)
/* Copy and shift by more or equal bits than in a limb. */
xsize = a->nlimbs;
x->sign = a->sign;
- RESIZE_IF_NEEDED(x, xsize);
+ err = RESIZE_IF_NEEDED(x, xsize);
+ if (err)
+ return err;
x->nlimbs = xsize;
for (i = 0; i < a->nlimbs; i++)
x->d[i] = a->d[i];
@@ -209,7 +136,7 @@ void mpi_rshift(MPI x, MPI a, unsigned int n)
if (nlimbs >= x->nlimbs) {
x->nlimbs = 0;
- return;
+ return 0;
}
for (i = 0; i < x->nlimbs - nlimbs; i++)
@@ -223,7 +150,9 @@ void mpi_rshift(MPI x, MPI a, unsigned int n)
/* Copy and shift by less than bits in a limb. */
xsize = a->nlimbs;
x->sign = a->sign;
- RESIZE_IF_NEEDED(x, xsize);
+ err = RESIZE_IF_NEEDED(x, xsize);
+ if (err)
+ return err;
x->nlimbs = xsize;
if (xsize) {
@@ -239,68 +168,7 @@ void mpi_rshift(MPI x, MPI a, unsigned int n)
}
}
MPN_NORMALIZE(x->d, x->nlimbs);
-}
-EXPORT_SYMBOL_GPL(mpi_rshift);
-
-/****************
- * Shift A by COUNT limbs to the left
- * This is used only within the MPI library
- */
-void mpi_lshift_limbs(MPI a, unsigned int count)
-{
- mpi_ptr_t ap;
- int n = a->nlimbs;
- int i;
- if (!count || !n)
- return;
-
- RESIZE_IF_NEEDED(a, n+count);
-
- ap = a->d;
- for (i = n-1; i >= 0; i--)
- ap[i+count] = ap[i];
- for (i = 0; i < count; i++)
- ap[i] = 0;
- a->nlimbs += count;
-}
-
-/*
- * Shift A by N bits to the left.
- */
-void mpi_lshift(MPI x, MPI a, unsigned int n)
-{
- unsigned int nlimbs = (n/BITS_PER_MPI_LIMB);
- unsigned int nbits = (n%BITS_PER_MPI_LIMB);
-
- if (x == a && !n)
- return; /* In-place shift with an amount of zero. */
-
- if (x != a) {
- /* Copy A to X. */
- unsigned int alimbs = a->nlimbs;
- int asign = a->sign;
- mpi_ptr_t xp, ap;
-
- RESIZE_IF_NEEDED(x, alimbs+nlimbs+1);
- xp = x->d;
- ap = a->d;
- MPN_COPY(xp, ap, alimbs);
- x->nlimbs = alimbs;
- x->flags = a->flags;
- x->sign = asign;
- }
-
- if (nlimbs && !nbits) {
- /* Shift a full number of limbs. */
- mpi_lshift_limbs(x, nlimbs);
- } else if (n) {
- /* We use a very dump approach: Shift left by the number of
- * limbs plus one and than fix it up by an rshift.
- */
- mpi_lshift_limbs(x, nlimbs+1);
- mpi_rshift(x, x, BITS_PER_MPI_LIMB - nbits);
- }
-
- MPN_NORMALIZE(x->d, x->nlimbs);
+ return 0;
}
+EXPORT_SYMBOL_GPL(mpi_rshift);
diff --git a/lib/crypto/mpi/mpi-cmp.c b/lib/crypto/mpi/mpi-cmp.c
index 0835b6213235..ceaebe181cd7 100644
--- a/lib/crypto/mpi/mpi-cmp.c
+++ b/lib/crypto/mpi/mpi-cmp.c
@@ -45,54 +45,28 @@ int mpi_cmp_ui(MPI u, unsigned long v)
}
EXPORT_SYMBOL_GPL(mpi_cmp_ui);
-static int do_mpi_cmp(MPI u, MPI v, int absmode)
+int mpi_cmp(MPI u, MPI v)
{
- mpi_size_t usize;
- mpi_size_t vsize;
- int usign;
- int vsign;
+ mpi_size_t usize, vsize;
int cmp;
mpi_normalize(u);
mpi_normalize(v);
-
usize = u->nlimbs;
vsize = v->nlimbs;
- usign = absmode ? 0 : u->sign;
- vsign = absmode ? 0 : v->sign;
-
- /* Compare sign bits. */
-
- if (!usign && vsign)
+ if (!u->sign && v->sign)
return 1;
- if (usign && !vsign)
+ if (u->sign && !v->sign)
return -1;
-
- /* U and V are either both positive or both negative. */
-
- if (usize != vsize && !usign && !vsign)
+ if (usize != vsize && !u->sign && !v->sign)
return usize - vsize;
- if (usize != vsize && usign && vsign)
- return vsize + usize;
+ if (usize != vsize && u->sign && v->sign)
+ return vsize - usize;
if (!usize)
return 0;
cmp = mpihelp_cmp(u->d, v->d, usize);
- if (!cmp)
- return 0;
- if ((cmp < 0?1:0) == (usign?1:0))
- return 1;
-
- return -1;
-}
-
-int mpi_cmp(MPI u, MPI v)
-{
- return do_mpi_cmp(u, v, 0);
+ if (u->sign)
+ return -cmp;
+ return cmp;
}
EXPORT_SYMBOL_GPL(mpi_cmp);
-
-int mpi_cmpabs(MPI u, MPI v)
-{
- return do_mpi_cmp(u, v, 1);
-}
-EXPORT_SYMBOL_GPL(mpi_cmpabs);
diff --git a/lib/crypto/mpi/mpi-div.c b/lib/crypto/mpi/mpi-div.c
index 45beab8b9e9e..6e5044e72595 100644
--- a/lib/crypto/mpi/mpi-div.c
+++ b/lib/crypto/mpi/mpi-div.c
@@ -14,13 +14,13 @@
#include "mpi-internal.h"
#include "longlong.h"
-void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den);
-void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor);
+int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den);
-void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor)
+int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor)
{
int divisor_sign = divisor->sign;
MPI temp_divisor = NULL;
+ int err;
/* We need the original value of the divisor after the remainder has been
* preliminary calculated. We have to copy it to temporary space if it's
@@ -28,44 +28,22 @@ void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor)
*/
if (rem == divisor) {
temp_divisor = mpi_copy(divisor);
+ if (!temp_divisor)
+ return -ENOMEM;
divisor = temp_divisor;
}
- mpi_tdiv_r(rem, dividend, divisor);
+ err = mpi_tdiv_r(rem, dividend, divisor);
+ if (err)
+ goto free_temp_divisor;
if (((divisor_sign?1:0) ^ (dividend->sign?1:0)) && rem->nlimbs)
- mpi_add(rem, rem, divisor);
+ err = mpi_add(rem, rem, divisor);
- if (temp_divisor)
- mpi_free(temp_divisor);
-}
+free_temp_divisor:
+ mpi_free(temp_divisor);
-void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor)
-{
- MPI tmp = mpi_alloc(mpi_get_nlimbs(quot));
- mpi_fdiv_qr(quot, tmp, dividend, divisor);
- mpi_free(tmp);
-}
-
-void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor)
-{
- int divisor_sign = divisor->sign;
- MPI temp_divisor = NULL;
-
- if (quot == divisor || rem == divisor) {
- temp_divisor = mpi_copy(divisor);
- divisor = temp_divisor;
- }
-
- mpi_tdiv_qr(quot, rem, dividend, divisor);
-
- if ((divisor_sign ^ dividend->sign) && rem->nlimbs) {
- mpi_sub_ui(quot, quot, 1);
- mpi_add(rem, rem, divisor);
- }
-
- if (temp_divisor)
- mpi_free(temp_divisor);
+ return err;
}
/* If den == quot, den needs temporary storage.
@@ -75,12 +53,12 @@ void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor)
* i.e no extra storage should be allocated.
*/
-void mpi_tdiv_r(MPI rem, MPI num, MPI den)
+int mpi_tdiv_r(MPI rem, MPI num, MPI den)
{
- mpi_tdiv_qr(NULL, rem, num, den);
+ return mpi_tdiv_qr(NULL, rem, num, den);
}
-void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
+int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
{
mpi_ptr_t np, dp;
mpi_ptr_t qp, rp;
@@ -93,13 +71,16 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
mpi_limb_t q_limb;
mpi_ptr_t marker[5];
int markidx = 0;
+ int err;
/* Ensure space is enough for quotient and remainder.
* We need space for an extra limb in the remainder, because it's
* up-shifted (normalized) below.
*/
rsize = nsize + 1;
- mpi_resize(rem, rsize);
+ err = mpi_resize(rem, rsize);
+ if (err)
+ return err;
qsize = rsize - dsize; /* qsize cannot be bigger than this. */
if (qsize <= 0) {
@@ -115,11 +96,14 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
quot->nlimbs = 0;
quot->sign = 0;
}
- return;
+ return 0;
}
- if (quot)
- mpi_resize(quot, qsize);
+ if (quot) {
+ err = mpi_resize(quot, qsize);
+ if (err)
+ return err;
+ }
/* Read pointers here, when reallocation is finished. */
np = num->d;
@@ -141,10 +125,10 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
rsize = rlimb != 0?1:0;
rem->nlimbs = rsize;
rem->sign = sign_remainder;
- return;
+ return 0;
}
-
+ err = -ENOMEM;
if (quot) {
qp = quot->d;
/* Make sure QP and NP point to different objects. Otherwise the
@@ -152,6 +136,8 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
*/
if (qp == np) { /* Copy NP object to temporary space. */
np = marker[markidx++] = mpi_alloc_limb_space(nsize);
+ if (!np)
+ goto out_free_marker;
MPN_COPY(np, qp, nsize);
}
} else /* Put quotient at top of remainder. */
@@ -172,6 +158,8 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
* the original contents of the denominator.
*/
tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
+ if (!tp)
+ goto out_free_marker;
mpihelp_lshift(tp, dp, dsize, normalization_steps);
dp = tp;
@@ -193,6 +181,8 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
mpi_ptr_t tp;
tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
+ if (!tp)
+ goto out_free_marker;
MPN_COPY(tp, dp, dsize);
dp = tp;
}
@@ -227,8 +217,14 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
rem->nlimbs = rsize;
rem->sign = sign_remainder;
+
+ err = 0;
+
+out_free_marker:
while (markidx) {
markidx--;
mpi_free_limb_space(marker[markidx]);
}
+
+ return err;
}
diff --git a/lib/crypto/mpi/mpi-internal.h b/lib/crypto/mpi/mpi-internal.h
index 554002182db1..8a4f49e3043c 100644
--- a/lib/crypto/mpi/mpi-internal.h
+++ b/lib/crypto/mpi/mpi-internal.h
@@ -52,11 +52,12 @@
typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */
typedef int mpi_size_t; /* (must be a signed type) */
-#define RESIZE_IF_NEEDED(a, b) \
- do { \
- if ((a)->alloced < (b)) \
- mpi_resize((a), (b)); \
- } while (0)
+static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
+{
+ if (a->alloced < b)
+ return mpi_resize(a, b);
+ return 0;
+}
/* Copy N limbs from S to D. */
#define MPN_COPY(d, s, n) \
@@ -66,14 +67,6 @@ typedef int mpi_size_t; /* (must be a signed type) */
(d)[_i] = (s)[_i]; \
} while (0)
-#define MPN_COPY_INCR(d, s, n) \
- do { \
- mpi_size_t _i; \
- for (_i = 0; _i < (n); _i++) \
- (d)[_i] = (s)[_i]; \
- } while (0)
-
-
#define MPN_COPY_DECR(d, s, n) \
do { \
mpi_size_t _i; \
@@ -181,8 +174,6 @@ int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size);
void mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size,
mpi_ptr_t tspace);
-void mpihelp_mul_n(mpi_ptr_t prodp,
- mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size);
int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
mpi_ptr_t up, mpi_size_t usize,
diff --git a/lib/crypto/mpi/mpi-inv.c b/lib/crypto/mpi/mpi-inv.c
deleted file mode 100644
index 61e37d18f793..000000000000
--- a/lib/crypto/mpi/mpi-inv.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/* mpi-inv.c - MPI functions
- * Copyright (C) 1998, 2001, 2002, 2003 Free Software Foundation, Inc.
- *
- * This file is part of Libgcrypt.
- *
- * Libgcrypt is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * Libgcrypt is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "mpi-internal.h"
-
-/****************
- * Calculate the multiplicative inverse X of A mod N
- * That is: Find the solution x for
- * 1 = (a*x) mod n
- */
-int mpi_invm(MPI x, MPI a, MPI n)
-{
- /* Extended Euclid's algorithm (See TAOCP Vol II, 4.5.2, Alg X)
- * modified according to Michael Penk's solution for Exercise 35
- * with further enhancement
- */
- MPI u, v, u1, u2 = NULL, u3, v1, v2 = NULL, v3, t1, t2 = NULL, t3;
- unsigned int k;
- int sign;
- int odd;
-
- if (!mpi_cmp_ui(a, 0))
- return 0; /* Inverse does not exists. */
- if (!mpi_cmp_ui(n, 1))
- return 0; /* Inverse does not exists. */
-
- u = mpi_copy(a);
- v = mpi_copy(n);
-
- for (k = 0; !mpi_test_bit(u, 0) && !mpi_test_bit(v, 0); k++) {
- mpi_rshift(u, u, 1);
- mpi_rshift(v, v, 1);
- }
- odd = mpi_test_bit(v, 0);
-
- u1 = mpi_alloc_set_ui(1);
- if (!odd)
- u2 = mpi_alloc_set_ui(0);
- u3 = mpi_copy(u);
- v1 = mpi_copy(v);
- if (!odd) {
- v2 = mpi_alloc(mpi_get_nlimbs(u));
- mpi_sub(v2, u1, u); /* U is used as const 1 */
- }
- v3 = mpi_copy(v);
- if (mpi_test_bit(u, 0)) { /* u is odd */
- t1 = mpi_alloc_set_ui(0);
- if (!odd) {
- t2 = mpi_alloc_set_ui(1);
- t2->sign = 1;
- }
- t3 = mpi_copy(v);
- t3->sign = !t3->sign;
- goto Y4;
- } else {
- t1 = mpi_alloc_set_ui(1);
- if (!odd)
- t2 = mpi_alloc_set_ui(0);
- t3 = mpi_copy(u);
- }
-
- do {
- do {
- if (!odd) {
- if (mpi_test_bit(t1, 0) || mpi_test_bit(t2, 0)) {
- /* one is odd */
- mpi_add(t1, t1, v);
- mpi_sub(t2, t2, u);
- }
- mpi_rshift(t1, t1, 1);
- mpi_rshift(t2, t2, 1);
- mpi_rshift(t3, t3, 1);
- } else {
- if (mpi_test_bit(t1, 0))
- mpi_add(t1, t1, v);
- mpi_rshift(t1, t1, 1);
- mpi_rshift(t3, t3, 1);
- }
-Y4:
- ;
- } while (!mpi_test_bit(t3, 0)); /* while t3 is even */
-
- if (!t3->sign) {
- mpi_set(u1, t1);
- if (!odd)
- mpi_set(u2, t2);
- mpi_set(u3, t3);
- } else {
- mpi_sub(v1, v, t1);
- sign = u->sign; u->sign = !u->sign;
- if (!odd)
- mpi_sub(v2, u, t2);
- u->sign = sign;
- sign = t3->sign; t3->sign = !t3->sign;
- mpi_set(v3, t3);
- t3->sign = sign;
- }
- mpi_sub(t1, u1, v1);
- if (!odd)
- mpi_sub(t2, u2, v2);
- mpi_sub(t3, u3, v3);
- if (t1->sign) {
- mpi_add(t1, t1, v);
- if (!odd)
- mpi_sub(t2, t2, u);
- }
- } while (mpi_cmp_ui(t3, 0)); /* while t3 != 0 */
- /* mpi_lshift( u3, k ); */
- mpi_set(x, u1);
-
- mpi_free(u1);
- mpi_free(v1);
- mpi_free(t1);
- if (!odd) {
- mpi_free(u2);
- mpi_free(v2);
- mpi_free(t2);
- }
- mpi_free(u3);
- mpi_free(v3);
- mpi_free(t3);
-
- mpi_free(u);
- mpi_free(v);
- return 1;
-}
-EXPORT_SYMBOL_GPL(mpi_invm);
diff --git a/lib/crypto/mpi/mpi-mod.c b/lib/crypto/mpi/mpi-mod.c
index 54fcc01564d9..d5fdaec3d0b6 100644
--- a/lib/crypto/mpi/mpi-mod.c
+++ b/lib/crypto/mpi/mpi-mod.c
@@ -5,153 +5,9 @@
* This file is part of Libgcrypt.
*/
-
#include "mpi-internal.h"
-#include "longlong.h"
-
-/* Context used with Barrett reduction. */
-struct barrett_ctx_s {
- MPI m; /* The modulus - may not be modified. */
- int m_copied; /* If true, M needs to be released. */
- int k;
- MPI y;
- MPI r1; /* Helper MPI. */
- MPI r2; /* Helper MPI. */
- MPI r3; /* Helper MPI allocated on demand. */
-};
-
-
-
-void mpi_mod(MPI rem, MPI dividend, MPI divisor)
-{
- mpi_fdiv_r(rem, dividend, divisor);
-}
-
-/* This function returns a new context for Barrett based operations on
- * the modulus M. This context needs to be released using
- * _gcry_mpi_barrett_free. If COPY is true M will be transferred to
- * the context and the user may change M. If COPY is false, M may not
- * be changed until gcry_mpi_barrett_free has been called.
- */
-mpi_barrett_t mpi_barrett_init(MPI m, int copy)
-{
- mpi_barrett_t ctx;
- MPI tmp;
-
- mpi_normalize(m);
- ctx = kcalloc(1, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return NULL;
-
- if (copy) {
- ctx->m = mpi_copy(m);
- ctx->m_copied = 1;
- } else
- ctx->m = m;
-
- ctx->k = mpi_get_nlimbs(m);
- tmp = mpi_alloc(ctx->k + 1);
-
- /* Barrett precalculation: y = floor(b^(2k) / m). */
- mpi_set_ui(tmp, 1);
- mpi_lshift_limbs(tmp, 2 * ctx->k);
- mpi_fdiv_q(tmp, tmp, m);
-
- ctx->y = tmp;
- ctx->r1 = mpi_alloc(2 * ctx->k + 1);
- ctx->r2 = mpi_alloc(2 * ctx->k + 1);
-
- return ctx;
-}
-
-void mpi_barrett_free(mpi_barrett_t ctx)
-{
- if (ctx) {
- mpi_free(ctx->y);
- mpi_free(ctx->r1);
- mpi_free(ctx->r2);
- if (ctx->r3)
- mpi_free(ctx->r3);
- if (ctx->m_copied)
- mpi_free(ctx->m);
- kfree(ctx);
- }
-}
-
-
-/* R = X mod M
- *
- * Using Barrett reduction. Before using this function
- * _gcry_mpi_barrett_init must have been called to do the
- * precalculations. CTX is the context created by this precalculation
- * and also conveys M. If the Barret reduction could no be done a
- * straightforward reduction method is used.
- *
- * We assume that these conditions are met:
- * Input: x =(x_2k-1 ...x_0)_b
- * m =(m_k-1 ....m_0)_b with m_k-1 != 0
- * Output: r = x mod m
- */
-void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx)
-{
- MPI m = ctx->m;
- int k = ctx->k;
- MPI y = ctx->y;
- MPI r1 = ctx->r1;
- MPI r2 = ctx->r2;
- int sign;
-
- mpi_normalize(x);
- if (mpi_get_nlimbs(x) > 2*k) {
- mpi_mod(r, x, m);
- return;
- }
-
- sign = x->sign;
- x->sign = 0;
-
- /* 1. q1 = floor( x / b^k-1)
- * q2 = q1 * y
- * q3 = floor( q2 / b^k+1 )
- * Actually, we don't need qx, we can work direct on r2
- */
- mpi_set(r2, x);
- mpi_rshift_limbs(r2, k-1);
- mpi_mul(r2, r2, y);
- mpi_rshift_limbs(r2, k+1);
-
- /* 2. r1 = x mod b^k+1
- * r2 = q3 * m mod b^k+1
- * r = r1 - r2
- * 3. if r < 0 then r = r + b^k+1
- */
- mpi_set(r1, x);
- if (r1->nlimbs > k+1) /* Quick modulo operation. */
- r1->nlimbs = k+1;
- mpi_mul(r2, r2, m);
- if (r2->nlimbs > k+1) /* Quick modulo operation. */
- r2->nlimbs = k+1;
- mpi_sub(r, r1, r2);
-
- if (mpi_has_sign(r)) {
- if (!ctx->r3) {
- ctx->r3 = mpi_alloc(k + 2);
- mpi_set_ui(ctx->r3, 1);
- mpi_lshift_limbs(ctx->r3, k + 1);
- }
- mpi_add(r, r, ctx->r3);
- }
-
- /* 4. while r >= m do r = r - m */
- while (mpi_cmp(r, m) >= 0)
- mpi_sub(r, r, m);
-
- x->sign = sign;
-}
-
-void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx)
+int mpi_mod(MPI rem, MPI dividend, MPI divisor)
{
- mpi_mul(w, u, v);
- mpi_mod_barrett(w, w, ctx);
+ return mpi_fdiv_r(rem, dividend, divisor);
}
diff --git a/lib/crypto/mpi/mpi-mul.c b/lib/crypto/mpi/mpi-mul.c
index 7f4eda8560dc..892a246216b9 100644
--- a/lib/crypto/mpi/mpi-mul.c
+++ b/lib/crypto/mpi/mpi-mul.c
@@ -13,7 +13,7 @@
#include "mpi-internal.h"
-void mpi_mul(MPI w, MPI u, MPI v)
+int mpi_mul(MPI w, MPI u, MPI v)
{
mpi_size_t usize, vsize, wsize;
mpi_ptr_t up, vp, wp;
@@ -21,6 +21,7 @@ void mpi_mul(MPI w, MPI u, MPI v)
int usign, vsign, sign_product;
int assign_wp = 0;
mpi_ptr_t tmp_limb = NULL;
+ int err;
if (u->nlimbs < v->nlimbs) {
/* Swap U and V. */
@@ -46,15 +47,21 @@ void mpi_mul(MPI w, MPI u, MPI v)
if (w->alloced < wsize) {
if (wp == up || wp == vp) {
wp = mpi_alloc_limb_space(wsize);
+ if (!wp)
+ return -ENOMEM;
assign_wp = 1;
} else {
- mpi_resize(w, wsize);
+ err = mpi_resize(w, wsize);
+ if (err)
+ return err;
wp = w->d;
}
} else { /* Make U and V not overlap with W. */
if (wp == up) {
/* W and U are identical. Allocate temporary space for U. */
up = tmp_limb = mpi_alloc_limb_space(usize);
+ if (!up)
+ return -ENOMEM;
/* Is V identical too? Keep it identical with U. */
if (wp == vp)
vp = up;
@@ -63,6 +70,8 @@ void mpi_mul(MPI w, MPI u, MPI v)
} else if (wp == vp) {
/* W and V are identical. Allocate temporary space for V. */
vp = tmp_limb = mpi_alloc_limb_space(vsize);
+ if (!vp)
+ return -ENOMEM;
/* Copy to the temporary space. */
MPN_COPY(vp, wp, vsize);
}
@@ -71,7 +80,12 @@ void mpi_mul(MPI w, MPI u, MPI v)
if (!vsize)
wsize = 0;
else {
- mpihelp_mul(wp, up, usize, vp, vsize, &cy);
+ err = mpihelp_mul(wp, up, usize, vp, vsize, &cy);
+ if (err) {
+ if (assign_wp)
+ mpi_free_limb_space(wp);
+ goto free_tmp_limb;
+ }
wsize -= cy ? 0:1;
}
@@ -79,14 +93,17 @@ void mpi_mul(MPI w, MPI u, MPI v)
mpi_assign_limb_space(w, wp, wsize);
w->nlimbs = wsize;
w->sign = sign_product;
+
+free_tmp_limb:
if (tmp_limb)
mpi_free_limb_space(tmp_limb);
+ return err;
}
EXPORT_SYMBOL_GPL(mpi_mul);
-void mpi_mulm(MPI w, MPI u, MPI v, MPI m)
+int mpi_mulm(MPI w, MPI u, MPI v, MPI m)
{
- mpi_mul(w, u, v);
- mpi_tdiv_r(w, w, m);
+ return mpi_mul(w, u, v) ?:
+ mpi_tdiv_r(w, w, m);
}
EXPORT_SYMBOL_GPL(mpi_mulm);
diff --git a/lib/crypto/mpi/mpicoder.c b/lib/crypto/mpi/mpicoder.c
index 3cb6bd148fa9..dde01030807d 100644
--- a/lib/crypto/mpi/mpicoder.c
+++ b/lib/crypto/mpi/mpicoder.c
@@ -25,7 +25,6 @@
#include <linux/string.h>
#include "mpi-internal.h"
-#define MAX_EXTERN_SCAN_BYTES (16*1024*1024)
#define MAX_EXTERN_MPI_BITS 16384
/**
@@ -110,112 +109,6 @@ MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
}
EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
-/****************
- * Fill the mpi VAL from the hex string in STR.
- */
-int mpi_fromstr(MPI val, const char *str)
-{
- int sign = 0;
- int prepend_zero = 0;
- int i, j, c, c1, c2;
- unsigned int nbits, nbytes, nlimbs;
- mpi_limb_t a;
-
- if (*str == '-') {
- sign = 1;
- str++;
- }
-
- /* Skip optional hex prefix. */
- if (*str == '0' && str[1] == 'x')
- str += 2;
-
- nbits = strlen(str);
- if (nbits > MAX_EXTERN_SCAN_BYTES) {
- mpi_clear(val);
- return -EINVAL;
- }
- nbits *= 4;
- if ((nbits % 8))
- prepend_zero = 1;
-
- nbytes = (nbits+7) / 8;
- nlimbs = (nbytes+BYTES_PER_MPI_LIMB-1) / BYTES_PER_MPI_LIMB;
-
- if (val->alloced < nlimbs)
- mpi_resize(val, nlimbs);
-
- i = BYTES_PER_MPI_LIMB - (nbytes % BYTES_PER_MPI_LIMB);
- i %= BYTES_PER_MPI_LIMB;
- j = val->nlimbs = nlimbs;
- val->sign = sign;
- for (; j > 0; j--) {
- a = 0;
- for (; i < BYTES_PER_MPI_LIMB; i++) {
- if (prepend_zero) {
- c1 = '0';
- prepend_zero = 0;
- } else
- c1 = *str++;
-
- if (!c1) {
- mpi_clear(val);
- return -EINVAL;
- }
- c2 = *str++;
- if (!c2) {
- mpi_clear(val);
- return -EINVAL;
- }
- if (c1 >= '0' && c1 <= '9')
- c = c1 - '0';
- else if (c1 >= 'a' && c1 <= 'f')
- c = c1 - 'a' + 10;
- else if (c1 >= 'A' && c1 <= 'F')
- c = c1 - 'A' + 10;
- else {
- mpi_clear(val);
- return -EINVAL;
- }
- c <<= 4;
- if (c2 >= '0' && c2 <= '9')
- c |= c2 - '0';
- else if (c2 >= 'a' && c2 <= 'f')
- c |= c2 - 'a' + 10;
- else if (c2 >= 'A' && c2 <= 'F')
- c |= c2 - 'A' + 10;
- else {
- mpi_clear(val);
- return -EINVAL;
- }
- a <<= 8;
- a |= c;
- }
- i = 0;
- val->d[j-1] = a;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mpi_fromstr);
-
-MPI mpi_scanval(const char *string)
-{
- MPI a;
-
- a = mpi_alloc(0);
- if (!a)
- return NULL;
-
- if (mpi_fromstr(a, string)) {
- mpi_free(a);
- return NULL;
- }
- mpi_normalize(a);
- return a;
-}
-EXPORT_SYMBOL_GPL(mpi_scanval);
-
static int count_lzeros(MPI a)
{
mpi_limb_t alimb;
@@ -521,232 +414,3 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
return val;
}
EXPORT_SYMBOL_GPL(mpi_read_raw_from_sgl);
-
-/* Perform a two's complement operation on buffer P of size N bytes. */
-static void twocompl(unsigned char *p, unsigned int n)
-{
- int i;
-
- for (i = n-1; i >= 0 && !p[i]; i--)
- ;
- if (i >= 0) {
- if ((p[i] & 0x01))
- p[i] = (((p[i] ^ 0xfe) | 0x01) & 0xff);
- else if ((p[i] & 0x02))
- p[i] = (((p[i] ^ 0xfc) | 0x02) & 0xfe);
- else if ((p[i] & 0x04))
- p[i] = (((p[i] ^ 0xf8) | 0x04) & 0xfc);
- else if ((p[i] & 0x08))
- p[i] = (((p[i] ^ 0xf0) | 0x08) & 0xf8);
- else if ((p[i] & 0x10))
- p[i] = (((p[i] ^ 0xe0) | 0x10) & 0xf0);
- else if ((p[i] & 0x20))
- p[i] = (((p[i] ^ 0xc0) | 0x20) & 0xe0);
- else if ((p[i] & 0x40))
- p[i] = (((p[i] ^ 0x80) | 0x40) & 0xc0);
- else
- p[i] = 0x80;
-
- for (i--; i >= 0; i--)
- p[i] ^= 0xff;
- }
-}
-
-int mpi_print(enum gcry_mpi_format format, unsigned char *buffer,
- size_t buflen, size_t *nwritten, MPI a)
-{
- unsigned int nbits = mpi_get_nbits(a);
- size_t len;
- size_t dummy_nwritten;
- int negative;
-
- if (!nwritten)
- nwritten = &dummy_nwritten;
-
- /* Libgcrypt does no always care to set clear the sign if the value
- * is 0. For printing this is a bit of a surprise, in particular
- * because if some of the formats don't support negative numbers but
- * should be able to print a zero. Thus we need this extra test
- * for a negative number.
- */
- if (a->sign && mpi_cmp_ui(a, 0))
- negative = 1;
- else
- negative = 0;
-
- len = buflen;
- *nwritten = 0;
- if (format == GCRYMPI_FMT_STD) {
- unsigned char *tmp;
- int extra = 0;
- unsigned int n;
-
- tmp = mpi_get_buffer(a, &n, NULL);
- if (!tmp)
- return -EINVAL;
-
- if (negative) {
- twocompl(tmp, n);
- if (!(*tmp & 0x80)) {
- /* Need to extend the sign. */
- n++;
- extra = 2;
- }
- } else if (n && (*tmp & 0x80)) {
- /* Positive but the high bit of the returned buffer is set.
- * Thus we need to print an extra leading 0x00 so that the
- * output is interpreted as a positive number.
- */
- n++;
- extra = 1;
- }
-
- if (buffer && n > len) {
- /* The provided buffer is too short. */
- kfree(tmp);
- return -E2BIG;
- }
- if (buffer) {
- unsigned char *s = buffer;
-
- if (extra == 1)
- *s++ = 0;
- else if (extra)
- *s++ = 0xff;
- memcpy(s, tmp, n-!!extra);
- }
- kfree(tmp);
- *nwritten = n;
- return 0;
- } else if (format == GCRYMPI_FMT_USG) {
- unsigned int n = (nbits + 7)/8;
-
- /* Note: We ignore the sign for this format. */
- /* FIXME: for performance reasons we should put this into
- * mpi_aprint because we can then use the buffer directly.
- */
-
- if (buffer && n > len)
- return -E2BIG;
- if (buffer) {
- unsigned char *tmp;
-
- tmp = mpi_get_buffer(a, &n, NULL);
- if (!tmp)
- return -EINVAL;
- memcpy(buffer, tmp, n);
- kfree(tmp);
- }
- *nwritten = n;
- return 0;
- } else if (format == GCRYMPI_FMT_PGP) {
- unsigned int n = (nbits + 7)/8;
-
- /* The PGP format can only handle unsigned integers. */
- if (negative)
- return -EINVAL;
-
- if (buffer && n+2 > len)
- return -E2BIG;
-
- if (buffer) {
- unsigned char *tmp;
- unsigned char *s = buffer;
-
- s[0] = nbits >> 8;
- s[1] = nbits;
-
- tmp = mpi_get_buffer(a, &n, NULL);
- if (!tmp)
- return -EINVAL;
- memcpy(s+2, tmp, n);
- kfree(tmp);
- }
- *nwritten = n+2;
- return 0;
- } else if (format == GCRYMPI_FMT_SSH) {
- unsigned char *tmp;
- int extra = 0;
- unsigned int n;
-
- tmp = mpi_get_buffer(a, &n, NULL);
- if (!tmp)
- return -EINVAL;
-
- if (negative) {
- twocompl(tmp, n);
- if (!(*tmp & 0x80)) {
- /* Need to extend the sign. */
- n++;
- extra = 2;
- }
- } else if (n && (*tmp & 0x80)) {
- n++;
- extra = 1;
- }
-
- if (buffer && n+4 > len) {
- kfree(tmp);
- return -E2BIG;
- }
-
- if (buffer) {
- unsigned char *s = buffer;
-
- *s++ = n >> 24;
- *s++ = n >> 16;
- *s++ = n >> 8;
- *s++ = n;
- if (extra == 1)
- *s++ = 0;
- else if (extra)
- *s++ = 0xff;
- memcpy(s, tmp, n-!!extra);
- }
- kfree(tmp);
- *nwritten = 4+n;
- return 0;
- } else if (format == GCRYMPI_FMT_HEX) {
- unsigned char *tmp;
- int i;
- int extra = 0;
- unsigned int n = 0;
-
- tmp = mpi_get_buffer(a, &n, NULL);
- if (!tmp)
- return -EINVAL;
- if (!n || (*tmp & 0x80))
- extra = 2;
-
- if (buffer && 2*n + extra + negative + 1 > len) {
- kfree(tmp);
- return -E2BIG;
- }
- if (buffer) {
- unsigned char *s = buffer;
-
- if (negative)
- *s++ = '-';
- if (extra) {
- *s++ = '0';
- *s++ = '0';
- }
-
- for (i = 0; i < n; i++) {
- unsigned int c = tmp[i];
-
- *s++ = (c >> 4) < 10 ? '0'+(c>>4) : 'A'+(c>>4)-10;
- c &= 15;
- *s++ = c < 10 ? '0'+c : 'A'+c-10;
- }
- *s++ = 0;
- *nwritten = s - buffer;
- } else {
- *nwritten = 2*n + extra + negative + 1;
- }
- kfree(tmp);
- return 0;
- } else
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(mpi_print);
diff --git a/lib/crypto/mpi/mpih-mul.c b/lib/crypto/mpi/mpih-mul.c
index e5f1c84e3c48..a93647564054 100644
--- a/lib/crypto/mpi/mpih-mul.c
+++ b/lib/crypto/mpi/mpih-mul.c
@@ -317,31 +317,6 @@ mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
}
}
-
-void mpihelp_mul_n(mpi_ptr_t prodp,
- mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
-{
- if (up == vp) {
- if (size < KARATSUBA_THRESHOLD)
- mpih_sqr_n_basecase(prodp, up, size);
- else {
- mpi_ptr_t tspace;
- tspace = mpi_alloc_limb_space(2 * size);
- mpih_sqr_n(prodp, up, size, tspace);
- mpi_free_limb_space(tspace);
- }
- } else {
- if (size < KARATSUBA_THRESHOLD)
- mul_n_basecase(prodp, up, vp, size);
- else {
- mpi_ptr_t tspace;
- tspace = mpi_alloc_limb_space(2 * size);
- mul_n(prodp, up, vp, size, tspace);
- mpi_free_limb_space(tspace);
- }
- }
-}
-
int
mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
mpi_ptr_t up, mpi_size_t usize,
diff --git a/lib/crypto/mpi/mpiutil.c b/lib/crypto/mpi/mpiutil.c
index aa8c46544af8..979ece5a81d2 100644
--- a/lib/crypto/mpi/mpiutil.c
+++ b/lib/crypto/mpi/mpiutil.c
@@ -20,63 +20,6 @@
#include "mpi-internal.h"
-/* Constants allocated right away at startup. */
-static MPI constants[MPI_NUMBER_OF_CONSTANTS];
-
-/* Initialize the MPI subsystem. This is called early and allows to
- * do some initialization without taking care of threading issues.
- */
-static int __init mpi_init(void)
-{
- int idx;
- unsigned long value;
-
- for (idx = 0; idx < MPI_NUMBER_OF_CONSTANTS; idx++) {
- switch (idx) {
- case MPI_C_ZERO:
- value = 0;
- break;
- case MPI_C_ONE:
- value = 1;
- break;
- case MPI_C_TWO:
- value = 2;
- break;
- case MPI_C_THREE:
- value = 3;
- break;
- case MPI_C_FOUR:
- value = 4;
- break;
- case MPI_C_EIGHT:
- value = 8;
- break;
- default:
- pr_err("MPI: invalid mpi_const selector %d\n", idx);
- return -EFAULT;
- }
- constants[idx] = mpi_alloc_set_ui(value);
- constants[idx]->flags = (16|32);
- }
-
- return 0;
-}
-postcore_initcall(mpi_init);
-
-/* Return a constant MPI descripbed by NO which is one of the
- * MPI_C_xxx macros. There is no need to copy this returned value; it
- * may be used directly.
- */
-MPI mpi_const(enum gcry_mpi_constants no)
-{
- if ((int)no < 0 || no > MPI_NUMBER_OF_CONSTANTS)
- pr_err("MPI: invalid mpi_const selector %d\n", no);
- if (!constants[no])
- pr_err("MPI: MPI subsystem not initialized\n");
- return constants[no];
-}
-EXPORT_SYMBOL_GPL(mpi_const);
-
/****************
* Note: It was a bad idea to use the number of limbs to allocate
* because on a alpha the limbs are large but we normally need
@@ -163,15 +106,6 @@ int mpi_resize(MPI a, unsigned nlimbs)
return 0;
}
-void mpi_clear(MPI a)
-{
- if (!a)
- return;
- a->nlimbs = 0;
- a->flags = 0;
-}
-EXPORT_SYMBOL_GPL(mpi_clear);
-
void mpi_free(MPI a)
{
if (!a)
@@ -199,6 +133,8 @@ MPI mpi_copy(MPI a)
if (a) {
b = mpi_alloc(a->nlimbs);
+ if (!b)
+ return NULL;
b->nlimbs = a->nlimbs;
b->sign = a->sign;
b->flags = a->flags;
@@ -210,121 +146,5 @@ MPI mpi_copy(MPI a)
return b;
}
-/****************
- * This function allocates an MPI which is optimized to hold
- * a value as large as the one given in the argument and allocates it
- * with the same flags as A.
- */
-MPI mpi_alloc_like(MPI a)
-{
- MPI b;
-
- if (a) {
- b = mpi_alloc(a->nlimbs);
- b->nlimbs = 0;
- b->sign = 0;
- b->flags = a->flags;
- } else
- b = NULL;
-
- return b;
-}
-
-
-/* Set U into W and release U. If W is NULL only U will be released. */
-void mpi_snatch(MPI w, MPI u)
-{
- if (w) {
- mpi_assign_limb_space(w, u->d, u->alloced);
- w->nlimbs = u->nlimbs;
- w->sign = u->sign;
- w->flags = u->flags;
- u->alloced = 0;
- u->nlimbs = 0;
- u->d = NULL;
- }
- mpi_free(u);
-}
-
-
-MPI mpi_set(MPI w, MPI u)
-{
- mpi_ptr_t wp, up;
- mpi_size_t usize = u->nlimbs;
- int usign = u->sign;
-
- if (!w)
- w = mpi_alloc(mpi_get_nlimbs(u));
- RESIZE_IF_NEEDED(w, usize);
- wp = w->d;
- up = u->d;
- MPN_COPY(wp, up, usize);
- w->nlimbs = usize;
- w->flags = u->flags;
- w->flags &= ~(16|32); /* Reset the immutable and constant flags. */
- w->sign = usign;
- return w;
-}
-EXPORT_SYMBOL_GPL(mpi_set);
-
-MPI mpi_set_ui(MPI w, unsigned long u)
-{
- if (!w)
- w = mpi_alloc(1);
- /* FIXME: If U is 0 we have no need to resize and thus possible
- * allocating the limbs.
- */
- RESIZE_IF_NEEDED(w, 1);
- w->d[0] = u;
- w->nlimbs = u ? 1 : 0;
- w->sign = 0;
- w->flags = 0;
- return w;
-}
-EXPORT_SYMBOL_GPL(mpi_set_ui);
-
-MPI mpi_alloc_set_ui(unsigned long u)
-{
- MPI w = mpi_alloc(1);
- w->d[0] = u;
- w->nlimbs = u ? 1 : 0;
- w->sign = 0;
- return w;
-}
-
-/****************
- * Swap the value of A and B, when SWAP is 1.
- * Leave the value when SWAP is 0.
- * This implementation should be constant-time regardless of SWAP.
- */
-void mpi_swap_cond(MPI a, MPI b, unsigned long swap)
-{
- mpi_size_t i;
- mpi_size_t nlimbs;
- mpi_limb_t mask = ((mpi_limb_t)0) - swap;
- mpi_limb_t x;
-
- if (a->alloced > b->alloced)
- nlimbs = b->alloced;
- else
- nlimbs = a->alloced;
- if (a->nlimbs > nlimbs || b->nlimbs > nlimbs)
- return;
-
- for (i = 0; i < nlimbs; i++) {
- x = mask & (a->d[i] ^ b->d[i]);
- a->d[i] = a->d[i] ^ x;
- b->d[i] = b->d[i] ^ x;
- }
-
- x = mask & (a->nlimbs ^ b->nlimbs);
- a->nlimbs = a->nlimbs ^ x;
- b->nlimbs = b->nlimbs ^ x;
-
- x = mask & (a->sign ^ b->sign);
- a->sign = a->sign ^ x;
- b->sign = b->sign ^ x;
-}
-
MODULE_DESCRIPTION("Multiprecision maths library");
MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 7cea91e193a8..5ce473ad499b 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -70,10 +70,10 @@ static HLIST_HEAD(obj_to_free);
* made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
* can be off.
*/
-static int obj_pool_min_free = ODEBUG_POOL_SIZE;
-static int obj_pool_free = ODEBUG_POOL_SIZE;
+static int __data_racy obj_pool_min_free = ODEBUG_POOL_SIZE;
+static int __data_racy obj_pool_free = ODEBUG_POOL_SIZE;
static int obj_pool_used;
-static int obj_pool_max_used;
+static int __data_racy obj_pool_max_used;
static bool obj_freeing;
/* The number of objs on the global free list */
static int obj_nr_tofree;
@@ -84,9 +84,9 @@ static int __data_racy debug_objects_fixups __read_mostly;
static int __data_racy debug_objects_warnings __read_mostly;
static int __data_racy debug_objects_enabled __read_mostly
= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
-static int __data_racy debug_objects_pool_size __read_mostly
+static int debug_objects_pool_size __ro_after_init
= ODEBUG_POOL_SIZE;
-static int __data_racy debug_objects_pool_min_level __read_mostly
+static int debug_objects_pool_min_level __ro_after_init
= ODEBUG_POOL_MIN_LEVEL;
static const struct debug_obj_descr *descr_test __read_mostly;
@@ -95,8 +95,8 @@ static struct kmem_cache *obj_cache __ro_after_init;
/*
* Track numbers of kmem_cache_alloc()/free() calls done.
*/
-static int debug_objects_allocated;
-static int debug_objects_freed;
+static int __data_racy debug_objects_allocated;
+static int __data_racy debug_objects_freed;
static void free_obj_work(struct work_struct *work);
static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
@@ -135,20 +135,19 @@ static void fill_pool(void)
return;
/*
- * Reuse objs from the global free list; they will be reinitialized
- * when allocating.
+ * Reuse objs from the global obj_to_free list; they will be
+ * reinitialized when allocating.
*
- * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
- * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
- * sections.
+ * obj_nr_tofree is checked locklessly; the READ_ONCE() pairs with
+ * the WRITE_ONCE() in pool_lock critical sections.
*/
- while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
+ if (READ_ONCE(obj_nr_tofree)) {
raw_spin_lock_irqsave(&pool_lock, flags);
/*
* Recheck with the lock held as the worker thread might have
* won the race and freed the global free list already.
*/
- while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
+ while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
hlist_del(&obj->node);
WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index 842894158944..32138bb8ef77 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
/*
@@ -103,12 +102,11 @@
#ifdef STATIC
# define XZ_PREBOOT
#else
-#include <linux/decompress/unxz.h>
+# include <linux/decompress/unxz.h>
#endif
#ifdef __KERNEL__
# include <linux/decompress/mm.h>
#endif
-#define XZ_EXTERN STATIC
#ifndef XZ_PREBOOT
# include <linux/slab.h>
@@ -127,11 +125,21 @@
#ifdef CONFIG_X86
# define XZ_DEC_X86
#endif
-#ifdef CONFIG_PPC
+#if defined(CONFIG_PPC) && defined(CONFIG_CPU_BIG_ENDIAN)
# define XZ_DEC_POWERPC
#endif
#ifdef CONFIG_ARM
-# define XZ_DEC_ARM
+# ifdef CONFIG_THUMB2_KERNEL
+# define XZ_DEC_ARMTHUMB
+# else
+# define XZ_DEC_ARM
+# endif
+#endif
+#ifdef CONFIG_ARM64
+# define XZ_DEC_ARM64
+#endif
+#ifdef CONFIG_RISCV
+# define XZ_DEC_RISCV
#endif
#ifdef CONFIG_SPARC
# define XZ_DEC_SPARC
@@ -220,7 +228,7 @@ void *memmove(void *dest, const void *src, size_t size)
#endif
/*
- * Since we need memmove anyway, would use it as memcpy too.
+ * Since we need memmove anyway, we could use it as memcpy too.
* Commented out for now to avoid breaking things.
*/
/*
@@ -390,17 +398,17 @@ error_alloc_state:
}
/*
- * This macro is used by architecture-specific files to decompress
+ * This function is used by architecture-specific files to decompress
* the kernel image.
*/
#ifdef XZ_PREBOOT
-STATIC int INIT __decompress(unsigned char *buf, long len,
- long (*fill)(void*, unsigned long),
- long (*flush)(void*, unsigned long),
- unsigned char *out_buf, long olen,
- long *pos,
- void (*error)(char *x))
+STATIC int INIT __decompress(unsigned char *in, long in_size,
+ long (*fill)(void *dest, unsigned long size),
+ long (*flush)(void *src, unsigned long size),
+ unsigned char *out, long out_size,
+ long *in_used,
+ void (*error)(char *x))
{
- return unxz(buf, len, fill, flush, out_buf, pos, error);
+ return unxz(in, in_size, fill, flush, out, in_used, error);
}
#endif
diff --git a/lib/dim/Makefile b/lib/dim/Makefile
index c4cc4026c451..5b9bfaac7ac1 100644
--- a/lib/dim/Makefile
+++ b/lib/dim/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_DIMLIB) += dimlib.o
-dimlib-objs := dim.o net_dim.o rdma_dim.o
+dimlib-y := dim.o net_dim.o rdma_dim.o
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 1a996fbbf50a..388da1aea14a 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -73,6 +73,7 @@ void dump_stack_print_info(const char *log_lvl)
print_worker_info(log_lvl, current);
print_stop_info(log_lvl, current);
+ print_scx_info(log_lvl, current);
}
/**
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index f2c5e7910bb1..5a007952f7f2 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -1147,7 +1147,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
iter->table->mod_name, dp->function,
ddebug_describe_flags(dp->flags, &flags));
seq_escape_str(m, dp->format, ESCAPE_SPACE, "\t\r\n\"");
- seq_puts(m, "\"");
+ seq_putc(m, '"');
if (dp->class_id != _DPRINTK_CLASS_DFLT) {
class = ddebug_class_name(iter, dp);
@@ -1156,7 +1156,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
else
seq_printf(m, " class unknown, _id:%d", dp->class_id);
}
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
return 0;
}
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index d608f9b48c10..52eb6ba29698 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -2,6 +2,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/random.h>
+#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/stat.h>
#include <linux/types.h>
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index f9ad60a9c7bd..ecb638d4cde1 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -306,8 +306,7 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
orig = kvmalloc(prev_size, gfp); \
KUNIT_EXPECT_TRUE(test, orig != NULL); \
checker(((expected_pages) * PAGE_SIZE) * 2, \
- kvrealloc(orig, prev_size, \
- ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
+ kvrealloc(orig, ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
kvfree(p)); \
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index fa692c86f069..79e067b51488 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -5,99 +5,31 @@
#include <linux/gfp.h>
#include <linux/kmemleak.h>
-#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *))
-#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
-
-struct genradix_node {
- union {
- /* Interior node: */
- struct genradix_node *children[GENRADIX_ARY];
-
- /* Leaf: */
- u8 data[GENRADIX_NODE_SIZE];
- };
-};
-
-static inline int genradix_depth_shift(unsigned depth)
-{
- return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth;
-}
-
-/*
- * Returns size (of data, in bytes) that a tree of a given depth holds:
- */
-static inline size_t genradix_depth_size(unsigned depth)
-{
- return 1UL << genradix_depth_shift(depth);
-}
-
-/* depth that's needed for a genradix that can address up to ULONG_MAX: */
-#define GENRADIX_MAX_DEPTH \
- DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT)
-
-#define GENRADIX_DEPTH_MASK \
- ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
-
-static inline unsigned genradix_root_to_depth(struct genradix_root *r)
-{
- return (unsigned long) r & GENRADIX_DEPTH_MASK;
-}
-
-static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
-{
- return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
-}
-
/*
* Returns pointer to the specified byte @offset within @radix, or NULL if not
* allocated
*/
void *__genradix_ptr(struct __genradix *radix, size_t offset)
{
- struct genradix_root *r = READ_ONCE(radix->root);
- struct genradix_node *n = genradix_root_to_node(r);
- unsigned level = genradix_root_to_depth(r);
-
- if (ilog2(offset) >= genradix_depth_shift(level))
- return NULL;
-
- while (1) {
- if (!n)
- return NULL;
- if (!level)
- break;
-
- level--;
-
- n = n->children[offset >> genradix_depth_shift(level)];
- offset &= genradix_depth_size(level) - 1;
- }
-
- return &n->data[offset];
+ return __genradix_ptr_inlined(radix, offset);
}
EXPORT_SYMBOL(__genradix_ptr);
-static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
-{
- return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
-}
-
-static inline void genradix_free_node(struct genradix_node *node)
-{
- kfree(node);
-}
-
/*
* Returns pointer to the specified byte @offset within @radix, allocating it if
* necessary - newly allocated slots are always zeroed out:
*/
void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
+ struct genradix_node **preallocated,
gfp_t gfp_mask)
{
struct genradix_root *v = READ_ONCE(radix->root);
struct genradix_node *n, *new_node = NULL;
unsigned level;
+ if (preallocated)
+ swap(new_node, *preallocated);
+
/* Increase tree depth if necessary: */
while (1) {
struct genradix_root *r = v, *new_root;
@@ -281,7 +213,7 @@ int __genradix_prealloc(struct __genradix *radix, size_t size,
size_t offset;
for (offset = 0; offset < size; offset += GENRADIX_NODE_SIZE)
- if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
+ if (!__genradix_ptr_alloc(radix, offset, NULL, gfp_mask))
return -ENOMEM;
return 0;
diff --git a/lib/glob.c b/lib/glob.c
index 15b73f490720..aa57900d2062 100644
--- a/lib/glob.c
+++ b/lib/glob.c
@@ -68,6 +68,8 @@ bool __pure glob_match(char const *pat, char const *str)
back_str = --str; /* Allow zero-length match */
break;
case '[': { /* Character class */
+ if (c == '\0') /* No possible match */
+ return false;
bool match = false, inverted = (*pat == '!');
char const *class = pat + inverted;
unsigned char a = *class++;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 4a6a9f419bd7..97003155bfac 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -527,6 +527,39 @@ static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
i->__iov = iov;
}
+static void iov_iter_folioq_advance(struct iov_iter *i, size_t size)
+{
+ const struct folio_queue *folioq = i->folioq;
+ unsigned int slot = i->folioq_slot;
+
+ if (!i->count)
+ return;
+ i->count -= size;
+
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = folioq->next;
+ slot = 0;
+ }
+
+ size += i->iov_offset; /* From beginning of current segment. */
+ do {
+ size_t fsize = folioq_folio_size(folioq, slot);
+
+ if (likely(size < fsize))
+ break;
+ size -= fsize;
+ slot++;
+ if (slot >= folioq_nr_slots(folioq) && folioq->next) {
+ folioq = folioq->next;
+ slot = 0;
+ }
+ } while (size);
+
+ i->iov_offset = size;
+ i->folioq_slot = slot;
+ i->folioq = folioq;
+}
+
void iov_iter_advance(struct iov_iter *i, size_t size)
{
if (unlikely(i->count < size))
@@ -539,12 +572,40 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
iov_iter_iovec_advance(i, size);
} else if (iov_iter_is_bvec(i)) {
iov_iter_bvec_advance(i, size);
+ } else if (iov_iter_is_folioq(i)) {
+ iov_iter_folioq_advance(i, size);
} else if (iov_iter_is_discard(i)) {
i->count -= size;
}
}
EXPORT_SYMBOL(iov_iter_advance);
+static void iov_iter_folioq_revert(struct iov_iter *i, size_t unroll)
+{
+ const struct folio_queue *folioq = i->folioq;
+ unsigned int slot = i->folioq_slot;
+
+ for (;;) {
+ size_t fsize;
+
+ if (slot == 0) {
+ folioq = folioq->prev;
+ slot = folioq_nr_slots(folioq);
+ }
+ slot--;
+
+ fsize = folioq_folio_size(folioq, slot);
+ if (unroll <= fsize) {
+ i->iov_offset = fsize - unroll;
+ break;
+ }
+ unroll -= fsize;
+ }
+
+ i->folioq_slot = slot;
+ i->folioq = folioq;
+}
+
void iov_iter_revert(struct iov_iter *i, size_t unroll)
{
if (!unroll)
@@ -576,6 +637,9 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
}
unroll -= n;
}
+ } else if (iov_iter_is_folioq(i)) {
+ i->iov_offset = 0;
+ iov_iter_folioq_revert(i, unroll);
} else { /* same logics for iovec and kvec */
const struct iovec *iov = iter_iov(i);
while (1) {
@@ -603,6 +667,9 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i)
if (iov_iter_is_bvec(i))
return min(i->count, i->bvec->bv_len - i->iov_offset);
}
+ if (unlikely(iov_iter_is_folioq(i)))
+ return !i->count ? 0 :
+ umin(folioq_folio_size(i->folioq, i->folioq_slot), i->count);
return i->count;
}
EXPORT_SYMBOL(iov_iter_single_seg_count);
@@ -640,6 +707,36 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
EXPORT_SYMBOL(iov_iter_bvec);
/**
+ * iov_iter_folio_queue - Initialise an I/O iterator to use the folios in a folio queue
+ * @i: The iterator to initialise.
+ * @direction: The direction of the transfer.
+ * @folioq: The starting point in the folio queue.
+ * @first_slot: The first slot in the folio queue to use
+ * @offset: The offset into the folio in the first slot to start at
+ * @count: The size of the I/O buffer in bytes.
+ *
+ * Set up an I/O iterator to either draw data out of the pages attached to an
+ * inode or to inject data into those pages. The pages *must* be prevented
+ * from evaporation, either by taking a ref on them or locking them by the
+ * caller.
+ */
+void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
+ const struct folio_queue *folioq, unsigned int first_slot,
+ unsigned int offset, size_t count)
+{
+ BUG_ON(direction & ~1);
+ *i = (struct iov_iter) {
+ .iter_type = ITER_FOLIOQ,
+ .data_source = direction,
+ .folioq = folioq,
+ .folioq_slot = first_slot,
+ .count = count,
+ .iov_offset = offset,
+ };
+}
+EXPORT_SYMBOL(iov_iter_folio_queue);
+
+/**
* iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
* @i: The iterator to initialise.
* @direction: The direction of the transfer.
@@ -765,12 +862,19 @@ bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
if (iov_iter_is_bvec(i))
return iov_iter_aligned_bvec(i, addr_mask, len_mask);
+ /* With both xarray and folioq types, we're dealing with whole folios. */
if (iov_iter_is_xarray(i)) {
if (i->count & len_mask)
return false;
if ((i->xarray_start + i->iov_offset) & addr_mask)
return false;
}
+ if (iov_iter_is_folioq(i)) {
+ if (i->count & len_mask)
+ return false;
+ if (i->iov_offset & addr_mask)
+ return false;
+ }
return true;
}
@@ -835,6 +939,9 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
if (iov_iter_is_bvec(i))
return iov_iter_alignment_bvec(i);
+ /* With both xarray and folioq types, we're dealing with whole folios. */
+ if (iov_iter_is_folioq(i))
+ return i->iov_offset | i->count;
if (iov_iter_is_xarray(i))
return (i->xarray_start + i->iov_offset) | i->count;
@@ -887,6 +994,62 @@ static int want_pages_array(struct page ***res, size_t size,
return count;
}
+static ssize_t iter_folioq_get_pages(struct iov_iter *iter,
+ struct page ***ppages, size_t maxsize,
+ unsigned maxpages, size_t *_start_offset)
+{
+ const struct folio_queue *folioq = iter->folioq;
+ struct page **pages;
+ unsigned int slot = iter->folioq_slot;
+ size_t extracted = 0, count = iter->count, iov_offset = iter->iov_offset;
+
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = folioq->next;
+ slot = 0;
+ if (WARN_ON(iov_offset != 0))
+ return -EIO;
+ }
+
+ maxpages = want_pages_array(ppages, maxsize, iov_offset & ~PAGE_MASK, maxpages);
+ if (!maxpages)
+ return -ENOMEM;
+ *_start_offset = iov_offset & ~PAGE_MASK;
+ pages = *ppages;
+
+ for (;;) {
+ struct folio *folio = folioq_folio(folioq, slot);
+ size_t offset = iov_offset, fsize = folioq_folio_size(folioq, slot);
+ size_t part = PAGE_SIZE - offset % PAGE_SIZE;
+
+ part = umin(part, umin(maxsize - extracted, fsize - offset));
+ count -= part;
+ iov_offset += part;
+ extracted += part;
+
+ *pages = folio_page(folio, offset / PAGE_SIZE);
+ get_page(*pages);
+ pages++;
+ maxpages--;
+ if (maxpages == 0 || extracted >= maxsize)
+ break;
+
+ if (offset >= fsize) {
+ iov_offset = 0;
+ slot++;
+ if (slot == folioq_nr_slots(folioq) && folioq->next) {
+ folioq = folioq->next;
+ slot = 0;
+ }
+ }
+ }
+
+ iter->count = count;
+ iter->iov_offset = iov_offset;
+ iter->folioq = folioq;
+ iter->folioq_slot = slot;
+ return extracted;
+}
+
static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
pgoff_t index, unsigned int nr_pages)
{
@@ -1034,6 +1197,8 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
}
return maxsize;
}
+ if (iov_iter_is_folioq(i))
+ return iter_folioq_get_pages(i, pages, maxsize, maxpages, start);
if (iov_iter_is_xarray(i))
return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
return -EFAULT;
@@ -1118,6 +1283,11 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
return iov_npages(i, maxpages);
if (iov_iter_is_bvec(i))
return bvec_npages(i, maxpages);
+ if (iov_iter_is_folioq(i)) {
+ unsigned offset = i->iov_offset % PAGE_SIZE;
+ int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
+ return min(npages, maxpages);
+ }
if (iov_iter_is_xarray(i)) {
unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
@@ -1399,6 +1569,68 @@ void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
}
/*
+ * Extract a list of contiguous pages from an ITER_FOLIOQ iterator. This does
+ * not get references on the pages, nor does it get a pin on them.
+ */
+static ssize_t iov_iter_extract_folioq_pages(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ unsigned int maxpages,
+ iov_iter_extraction_t extraction_flags,
+ size_t *offset0)
+{
+ const struct folio_queue *folioq = i->folioq;
+ struct page **p;
+ unsigned int nr = 0;
+ size_t extracted = 0, offset, slot = i->folioq_slot;
+
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = folioq->next;
+ slot = 0;
+ if (WARN_ON(i->iov_offset != 0))
+ return -EIO;
+ }
+
+ offset = i->iov_offset & ~PAGE_MASK;
+ *offset0 = offset;
+
+ maxpages = want_pages_array(pages, maxsize, offset, maxpages);
+ if (!maxpages)
+ return -ENOMEM;
+ p = *pages;
+
+ for (;;) {
+ struct folio *folio = folioq_folio(folioq, slot);
+ size_t offset = i->iov_offset, fsize = folioq_folio_size(folioq, slot);
+ size_t part = PAGE_SIZE - offset % PAGE_SIZE;
+
+ if (offset < fsize) {
+ part = umin(part, umin(maxsize - extracted, fsize - offset));
+ i->count -= part;
+ i->iov_offset += part;
+ extracted += part;
+
+ p[nr++] = folio_page(folio, offset / PAGE_SIZE);
+ }
+
+ if (nr >= maxpages || extracted >= maxsize)
+ break;
+
+ if (i->iov_offset >= fsize) {
+ i->iov_offset = 0;
+ slot++;
+ if (slot == folioq_nr_slots(folioq) && folioq->next) {
+ folioq = folioq->next;
+ slot = 0;
+ }
+ }
+ }
+
+ i->folioq = folioq;
+ i->folioq_slot = slot;
+ return extracted;
+}
+
+/*
* Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not
* get references on the pages, nor does it get a pin on them.
*/
@@ -1618,8 +1850,8 @@ static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
* added to the pages, but refs will not be taken.
* iov_iter_extract_will_pin() will return true.
*
- * (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are
- * merely listed; no extra refs or pins are obtained.
+ * (*) If the iterator is ITER_KVEC, ITER_BVEC, ITER_FOLIOQ or ITER_XARRAY, the
+ * pages are merely listed; no extra refs or pins are obtained.
* iov_iter_extract_will_pin() will return 0.
*
* Note also:
@@ -1654,6 +1886,10 @@ ssize_t iov_iter_extract_pages(struct iov_iter *i,
return iov_iter_extract_bvec_pages(i, pages, maxsize,
maxpages, extraction_flags,
offset0);
+ if (iov_iter_is_folioq(i))
+ return iov_iter_extract_folioq_pages(i, pages, maxsize,
+ maxpages, extraction_flags,
+ offset0);
if (iov_iter_is_xarray(i))
return iov_iter_extract_xarray_pages(i, pages, maxsize,
maxpages, extraction_flags,
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
index 2d5329a42105..08b242bbdbdf 100644
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -75,7 +75,7 @@ void irq_poll_complete(struct irq_poll *iop)
}
EXPORT_SYMBOL(irq_poll_complete);
-static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
+static void __latent_entropy irq_poll_softirq(void)
{
struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
int rearm = 0, budget = irq_poll_budget;
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index 30f6bbf04a4a..5aa51978e456 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -9,7 +9,8 @@ kunit-objs += test.o \
try-catch.o \
executor.o \
attributes.o \
- device.o
+ device.o \
+ platform.o
ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
kunit-objs += debugfs.o
@@ -19,6 +20,7 @@ endif
obj-y += hooks.o
obj-$(CONFIG_KUNIT_TEST) += kunit-test.o
+obj-$(CONFIG_KUNIT_TEST) += platform-test.o
# string-stream-test compiles built-in only.
ifeq ($(CONFIG_KUNIT_TEST),y)
diff --git a/lib/kunit/platform-test.c b/lib/kunit/platform-test.c
new file mode 100644
index 000000000000..e3debb8fbcef
--- /dev/null
+++ b/lib/kunit/platform-test.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for KUnit platform driver infrastructure.
+ */
+
+#include <linux/platform_device.h>
+
+#include <kunit/platform_device.h>
+#include <kunit/test.h>
+
+/*
+ * Test that kunit_platform_device_alloc() creates a platform device.
+ */
+static void kunit_platform_device_alloc_test(struct kunit *test)
+{
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ kunit_platform_device_alloc(test, "kunit-platform", 1));
+}
+
+/*
+ * Test that kunit_platform_device_add() registers a platform device on the
+ * platform bus with the proper name and id.
+ */
+static void kunit_platform_device_add_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ const char *name = "kunit-platform-add";
+ const int id = -1;
+
+ pdev = kunit_platform_device_alloc(test, name, id);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ KUNIT_EXPECT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+ KUNIT_EXPECT_TRUE(test, dev_is_platform(&pdev->dev));
+ KUNIT_EXPECT_STREQ(test, pdev->name, name);
+ KUNIT_EXPECT_EQ(test, pdev->id, id);
+}
+
+/*
+ * Test that kunit_platform_device_add() called twice with the same device name
+ * and id fails the second time and properly cleans up.
+ */
+static void kunit_platform_device_add_twice_fails_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ const char *name = "kunit-platform-add-2";
+ const int id = -1;
+
+ pdev = kunit_platform_device_alloc(test, name, id);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+
+ pdev = kunit_platform_device_alloc(test, name, id);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ KUNIT_EXPECT_NE(test, 0, kunit_platform_device_add(test, pdev));
+}
+
+static int kunit_platform_device_find_by_name(struct device *dev, const void *data)
+{
+ return strcmp(dev_name(dev), data) == 0;
+}
+
+/*
+ * Test that kunit_platform_device_add() cleans up by removing the platform
+ * device when the test finishes. */
+static void kunit_platform_device_add_cleans_up(struct kunit *test)
+{
+ struct platform_device *pdev;
+ const char *name = "kunit-platform-clean";
+ const int id = -1;
+ struct kunit fake;
+ struct device *dev;
+
+ kunit_init_test(&fake, "kunit_platform_device_add_fake_test", NULL);
+ KUNIT_ASSERT_EQ(test, fake.status, KUNIT_SUCCESS);
+
+ pdev = kunit_platform_device_alloc(&fake, name, id);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(&fake, pdev));
+ dev = bus_find_device(&platform_bus_type, NULL, name,
+ kunit_platform_device_find_by_name);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ put_device(dev);
+
+ /* Remove pdev */
+ kunit_cleanup(&fake);
+
+ /*
+ * Failing to migrate the kunit_resource would lead to an extra
+ * put_device() call on the platform device. The best we can do here is
+ * make sure the device no longer exists on the bus, but if something
+ * is wrong we'll see a refcount underflow here. We can't test for a
+ * refcount underflow because the kref matches the lifetime of the
+ * device which should already be freed and could be used by something
+ * else.
+ */
+ dev = bus_find_device(&platform_bus_type, NULL, name,
+ kunit_platform_device_find_by_name);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, dev);
+ put_device(dev);
+}
+
+/*
+ * Test suite for struct platform_device kunit APIs
+ */
+static struct kunit_case kunit_platform_device_test_cases[] = {
+ KUNIT_CASE(kunit_platform_device_alloc_test),
+ KUNIT_CASE(kunit_platform_device_add_test),
+ KUNIT_CASE(kunit_platform_device_add_twice_fails_test),
+ KUNIT_CASE(kunit_platform_device_add_cleans_up),
+ {}
+};
+
+static struct kunit_suite kunit_platform_device_suite = {
+ .name = "kunit_platform_device",
+ .test_cases = kunit_platform_device_test_cases,
+};
+
+struct kunit_platform_driver_test_context {
+ struct platform_driver pdrv;
+ const char *data;
+};
+
+static const char * const test_data = "test data";
+
+static inline struct kunit_platform_driver_test_context *
+to_test_context(struct platform_device *pdev)
+{
+ return container_of(to_platform_driver(pdev->dev.driver),
+ struct kunit_platform_driver_test_context,
+ pdrv);
+}
+
+static int kunit_platform_driver_probe(struct platform_device *pdev)
+{
+ struct kunit_platform_driver_test_context *ctx;
+
+ ctx = to_test_context(pdev);
+ ctx->data = test_data;
+
+ return 0;
+}
+
+/* Test that kunit_platform_driver_register() registers a driver that probes. */
+static void kunit_platform_driver_register_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct kunit_platform_driver_test_context *ctx;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ const char *name = "kunit-platform-register";
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ pdev = kunit_platform_device_alloc(test, name, -1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+
+ ctx->pdrv.probe = kunit_platform_driver_probe;
+ ctx->pdrv.driver.name = name;
+ ctx->pdrv.driver.owner = THIS_MODULE;
+
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_prepare_wait_for_probe(test, pdev, &comp));
+
+ KUNIT_EXPECT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ KUNIT_EXPECT_NE(test, 0, wait_for_completion_timeout(&comp, 3 * HZ));
+ KUNIT_EXPECT_STREQ(test, ctx->data, test_data);
+}
+
+/*
+ * Test that kunit_platform_device_prepare_wait_for_probe() completes the completion
+ * when the device is already probed.
+ */
+static void kunit_platform_device_prepare_wait_for_probe_completes_when_already_probed(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct kunit_platform_driver_test_context *ctx;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ const char *name = "kunit-platform-wait";
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ pdev = kunit_platform_device_alloc(test, name, -1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+
+ ctx->pdrv.probe = kunit_platform_driver_probe;
+ ctx->pdrv.driver.name = name;
+ ctx->pdrv.driver.owner = THIS_MODULE;
+
+ /* Make sure driver has actually probed */
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_prepare_wait_for_probe(test, pdev, &comp));
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ KUNIT_ASSERT_NE(test, 0, wait_for_completion_timeout(&comp, 3 * HZ));
+
+ reinit_completion(&comp);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_prepare_wait_for_probe(test, pdev, &comp));
+
+ KUNIT_EXPECT_NE(test, 0, wait_for_completion_timeout(&comp, HZ));
+}
+
+static struct kunit_case kunit_platform_driver_test_cases[] = {
+ KUNIT_CASE(kunit_platform_driver_register_test),
+ KUNIT_CASE(kunit_platform_device_prepare_wait_for_probe_completes_when_already_probed),
+ {}
+};
+
+/*
+ * Test suite for struct platform_driver kunit APIs
+ */
+static struct kunit_suite kunit_platform_driver_suite = {
+ .name = "kunit_platform_driver",
+ .test_cases = kunit_platform_driver_test_cases,
+};
+
+kunit_test_suites(
+ &kunit_platform_device_suite,
+ &kunit_platform_driver_suite,
+);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit test for KUnit platform driver infrastructure");
diff --git a/lib/kunit/platform.c b/lib/kunit/platform.c
new file mode 100644
index 000000000000..0b518de26065
--- /dev/null
+++ b/lib/kunit/platform.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test managed platform driver
+ */
+
+#include <linux/completion.h>
+#include <linux/device/bus.h>
+#include <linux/device/driver.h>
+#include <linux/platform_device.h>
+
+#include <kunit/platform_device.h>
+#include <kunit/resource.h>
+
+struct kunit_platform_device_alloc_params {
+ const char *name;
+ int id;
+};
+
+static int kunit_platform_device_alloc_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_platform_device_alloc_params *params = context;
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc(params->name, params->id);
+ if (!pdev)
+ return -ENOMEM;
+
+ res->data = pdev;
+
+ return 0;
+}
+
+static void kunit_platform_device_alloc_exit(struct kunit_resource *res)
+{
+ struct platform_device *pdev = res->data;
+
+ platform_device_put(pdev);
+}
+
+/**
+ * kunit_platform_device_alloc() - Allocate a KUnit test managed platform device
+ * @test: test context
+ * @name: device name of platform device to alloc
+ * @id: identifier of platform device to alloc.
+ *
+ * Allocate a test managed platform device. The device is put when the test completes.
+ *
+ * Return: Allocated platform device on success, NULL on failure.
+ */
+struct platform_device *
+kunit_platform_device_alloc(struct kunit *test, const char *name, int id)
+{
+ struct kunit_platform_device_alloc_params params = {
+ .name = name,
+ .id = id,
+ };
+
+ return kunit_alloc_resource(test,
+ kunit_platform_device_alloc_init,
+ kunit_platform_device_alloc_exit,
+ GFP_KERNEL, &params);
+}
+EXPORT_SYMBOL_GPL(kunit_platform_device_alloc);
+
+static void kunit_platform_device_add_exit(struct kunit_resource *res)
+{
+ struct platform_device *pdev = res->data;
+
+ platform_device_unregister(pdev);
+}
+
+static bool
+kunit_platform_device_alloc_match(struct kunit *test,
+ struct kunit_resource *res, void *match_data)
+{
+ struct platform_device *pdev = match_data;
+
+ return res->data == pdev && res->free == kunit_platform_device_alloc_exit;
+}
+
+KUNIT_DEFINE_ACTION_WRAPPER(platform_device_unregister_wrapper,
+ platform_device_unregister, struct platform_device *);
+/**
+ * kunit_platform_device_add() - Register a KUnit test managed platform device
+ * @test: test context
+ * @pdev: platform device to add
+ *
+ * Register a test managed platform device. The device is unregistered when the
+ * test completes.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int kunit_platform_device_add(struct kunit *test, struct platform_device *pdev)
+{
+ struct kunit_resource *res;
+ int ret;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ return ret;
+
+ res = kunit_find_resource(test, kunit_platform_device_alloc_match, pdev);
+ if (res) {
+ /*
+ * Transfer the reference count of the platform device if it
+ * was allocated with kunit_platform_device_alloc(). In this
+ * case, calling platform_device_put() when the test exits from
+ * kunit_platform_device_alloc_exit() would lead to reference
+ * count underflow because platform_device_unregister_wrapper()
+ * calls platform_device_unregister() which also calls
+ * platform_device_put().
+ *
+ * Usually callers transfer the refcount initialized in
+ * platform_device_alloc() to platform_device_add() by calling
+ * platform_device_unregister() when platform_device_add()
+ * succeeds or platform_device_put() when it fails. KUnit has to
+ * keep this straight by redirecting the free routine for the
+ * resource to the right function. Luckily this only has to
+ * account for the success scenario.
+ */
+ res->free = kunit_platform_device_add_exit;
+ kunit_put_resource(res);
+ } else {
+ ret = kunit_add_action_or_reset(test, platform_device_unregister_wrapper, pdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_platform_device_add);
+
+struct kunit_platform_device_probe_nb {
+ struct completion *x;
+ struct device *dev;
+ struct notifier_block nb;
+};
+
+static int kunit_platform_device_probe_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct kunit_platform_device_probe_nb *knb;
+ struct device *dev = data;
+
+ knb = container_of(nb, struct kunit_platform_device_probe_nb, nb);
+ if (event != BUS_NOTIFY_BOUND_DRIVER || knb->dev != dev)
+ return NOTIFY_DONE;
+
+ complete(knb->x);
+
+ return NOTIFY_OK;
+}
+
+static void kunit_platform_device_probe_nb_remove(void *nb)
+{
+ bus_unregister_notifier(&platform_bus_type, nb);
+}
+
+/**
+ * kunit_platform_device_prepare_wait_for_probe() - Prepare a completion
+ * variable to wait for a platform device to probe
+ * @test: test context
+ * @pdev: platform device to prepare to wait for probe of
+ * @x: completion variable completed when @dev has probed
+ *
+ * Prepare a completion variable @x to wait for @pdev to probe. Waiting on the
+ * completion forces a preemption, allowing the platform driver to probe.
+ *
+ * Example
+ *
+ * .. code-block:: c
+ *
+ * static int kunit_platform_driver_probe(struct platform_device *pdev)
+ * {
+ * return 0;
+ * }
+ *
+ * static void kunit_platform_driver_test(struct kunit *test)
+ * {
+ * struct platform_device *pdev;
+ * struct platform_driver *pdrv;
+ * DECLARE_COMPLETION_ONSTACK(comp);
+ *
+ * pdev = kunit_platform_device_alloc(test, "kunit-platform", -1);
+ * KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ * KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+ *
+ * pdrv = kunit_kzalloc(test, sizeof(*pdrv), GFP_KERNEL);
+ * KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdrv);
+ *
+ * pdrv->probe = kunit_platform_driver_probe;
+ * pdrv->driver.name = "kunit-platform";
+ * pdrv->driver.owner = THIS_MODULE;
+ *
+ * KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_prepare_wait_for_probe(test, pdev, &comp));
+ * KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, pdrv));
+ *
+ * KUNIT_EXPECT_NE(test, 0, wait_for_completion_timeout(&comp, 3 * HZ));
+ * }
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int kunit_platform_device_prepare_wait_for_probe(struct kunit *test,
+ struct platform_device *pdev,
+ struct completion *x)
+{
+ struct device *dev = &pdev->dev;
+ struct kunit_platform_device_probe_nb *knb;
+ bool bound;
+
+ knb = kunit_kzalloc(test, sizeof(*knb), GFP_KERNEL);
+ if (!knb)
+ return -ENOMEM;
+
+ knb->nb.notifier_call = kunit_platform_device_probe_notify;
+ knb->dev = dev;
+ knb->x = x;
+
+ device_lock(dev);
+ bound = device_is_bound(dev);
+ if (bound) {
+ device_unlock(dev);
+ complete(x);
+ kunit_kfree(test, knb);
+ return 0;
+ }
+
+ bus_register_notifier(&platform_bus_type, &knb->nb);
+ device_unlock(&pdev->dev);
+
+ return kunit_add_action_or_reset(test, kunit_platform_device_probe_nb_remove, &knb->nb);
+}
+EXPORT_SYMBOL_GPL(kunit_platform_device_prepare_wait_for_probe);
+
+KUNIT_DEFINE_ACTION_WRAPPER(platform_driver_unregister_wrapper,
+ platform_driver_unregister, struct platform_driver *);
+/**
+ * kunit_platform_driver_register() - Register a KUnit test managed platform driver
+ * @test: test context
+ * @drv: platform driver to register
+ *
+ * Register a test managed platform driver. This allows callers to embed the
+ * @drv in a container structure and use container_of() in the probe function
+ * to pass information to KUnit tests.
+ *
+ * Example
+ *
+ * .. code-block:: c
+ *
+ * struct kunit_test_context {
+ * struct platform_driver pdrv;
+ * const char *data;
+ * };
+ *
+ * static inline struct kunit_test_context *
+ * to_test_context(struct platform_device *pdev)
+ * {
+ * return container_of(to_platform_driver(pdev->dev.driver),
+ * struct kunit_test_context,
+ * pdrv);
+ * }
+ *
+ * static int kunit_platform_driver_probe(struct platform_device *pdev)
+ * {
+ * struct kunit_test_context *ctx;
+ *
+ * ctx = to_test_context(pdev);
+ * ctx->data = "test data";
+ *
+ * return 0;
+ * }
+ *
+ * static void kunit_platform_driver_test(struct kunit *test)
+ * {
+ * struct kunit_test_context *ctx;
+ *
+ * ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ * KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ *
+ * ctx->pdrv.probe = kunit_platform_driver_probe;
+ * ctx->pdrv.driver.name = "kunit-platform";
+ * ctx->pdrv.driver.owner = THIS_MODULE;
+ *
+ * KUNIT_EXPECT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ * <... wait for driver to probe ...>
+ * KUNIT_EXPECT_STREQ(test, ctx->data, "test data");
+ * }
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int kunit_platform_driver_register(struct kunit *test,
+ struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, platform_driver_unregister_wrapper, drv);
+}
+EXPORT_SYMBOL_GPL(kunit_platform_driver_register);
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
index 27e0c8ee71d8..13e15687675a 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/kunit_iov_iter.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/bvec.h>
+#include <linux/folio_queue.h>
#include <kunit/test.h>
MODULE_DESCRIPTION("iov_iter testing");
@@ -62,6 +63,9 @@ static void *__init iov_kunit_create_buffer(struct kunit *test,
KUNIT_ASSERT_EQ(test, got, npages);
}
+ for (int i = 0; i < npages; i++)
+ pages[i]->index = i;
+
buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
@@ -362,6 +366,179 @@ stop:
KUNIT_SUCCEED(test);
}
+static void iov_kunit_destroy_folioq(void *data)
+{
+ struct folio_queue *folioq, *next;
+
+ for (folioq = data; folioq; folioq = next) {
+ next = folioq->next;
+ for (int i = 0; i < folioq_nr_slots(folioq); i++)
+ if (folioq_folio(folioq, i))
+ folio_put(folioq_folio(folioq, i));
+ kfree(folioq);
+ }
+}
+
+static void __init iov_kunit_load_folioq(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct folio_queue *folioq,
+ struct page **pages, size_t npages)
+{
+ struct folio_queue *p = folioq;
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < npages; i++) {
+ if (folioq_full(p)) {
+ p->next = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p->next);
+ folioq_init(p->next);
+ p->next->prev = p;
+ p = p->next;
+ }
+ folioq_append(p, page_folio(pages[i]));
+ size += PAGE_SIZE;
+ }
+ iov_iter_folio_queue(iter, dir, folioq, 0, 0, size);
+}
+
+static struct folio_queue *iov_kunit_create_folioq(struct kunit *test)
+{
+ struct folio_queue *folioq;
+
+ folioq = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, folioq);
+ kunit_add_action_or_reset(test, iov_kunit_destroy_folioq, folioq);
+ folioq_init(folioq);
+ return folioq;
+}
+
+/*
+ * Test copying to a ITER_FOLIOQ-type iterator.
+ */
+static void __init iov_kunit_copy_to_folioq(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct folio_queue *folioq;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ folioq = iov_kunit_create_folioq(test);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_folio_queue(&iter, READ, folioq, 0, 0, pr->to);
+ iov_iter_advance(&iter, pr->from);
+ copied = copy_to_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
+ i += size;
+ if (test->status == KUNIT_FAILURE)
+ goto stop;
+ }
+
+ /* Build the expected image in the scratch buffer. */
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++)
+ for (i = pr->from; i < pr->to; i++)
+ scratch[i] = pattern(patt++);
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+stop:
+ KUNIT_SUCCEED(test);
+}
+
+/*
+ * Test copying from a ITER_FOLIOQ-type iterator.
+ */
+static void __init iov_kunit_copy_from_folioq(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct folio_queue *folioq;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ folioq = iov_kunit_create_folioq(test);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
+ iov_iter_advance(&iter, pr->from);
+ copied = copy_from_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
+ i += size;
+ }
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED(test);
+}
+
static void iov_kunit_destroy_xarray(void *data)
{
struct xarray *xarray = data;
@@ -678,6 +855,85 @@ stop:
}
/*
+ * Test the extraction of ITER_FOLIOQ-type iterators.
+ */
+static void __init iov_kunit_extract_pages_folioq(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct folio_queue *folioq;
+ struct iov_iter iter;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ folioq = iov_kunit_create_folioq(test);
+
+ iov_kunit_create_buffer(test, &bpages, npages);
+ iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
+
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ from = pr->from;
+ size = pr->to - from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
+ iov_iter_advance(&iter, from);
+
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ if (len == 0)
+ break;
+ size -= len;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ ix = from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ goto stop;
+ } while (iov_iter_count(&iter) > 0);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ }
+
+stop:
+ KUNIT_SUCCEED(test);
+}
+
+/*
* Test the extraction of ITER_XARRAY-type iterators.
*/
static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
@@ -761,10 +1017,13 @@ static struct kunit_case __refdata iov_kunit_cases[] = {
KUNIT_CASE(iov_kunit_copy_from_kvec),
KUNIT_CASE(iov_kunit_copy_to_bvec),
KUNIT_CASE(iov_kunit_copy_from_bvec),
+ KUNIT_CASE(iov_kunit_copy_to_folioq),
+ KUNIT_CASE(iov_kunit_copy_from_folioq),
KUNIT_CASE(iov_kunit_copy_to_xarray),
KUNIT_CASE(iov_kunit_copy_from_xarray),
KUNIT_CASE(iov_kunit_extract_pages_kvec),
KUNIT_CASE(iov_kunit_extract_pages_bvec),
+ KUNIT_CASE(iov_kunit_extract_pages_folioq),
KUNIT_CASE(iov_kunit_extract_pages_xarray),
{}
};
diff --git a/lib/list-test.c b/lib/list-test.c
index 37cbc33e9fdb..4f3dc75baec1 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -102,6 +102,8 @@ static void list_test_list_replace(struct kunit *test)
/* now: [list] -> a_new -> b */
KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.prev, &list);
}
static void list_test_list_replace_init(struct kunit *test)
@@ -118,6 +120,8 @@ static void list_test_list_replace_init(struct kunit *test)
/* now: [list] -> a_new -> b */
KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.prev, &list);
/* check a_old is empty (initialized) */
KUNIT_EXPECT_TRUE(test, list_empty_careful(&a_old));
@@ -404,10 +408,13 @@ static void list_test_list_cut_position(struct kunit *test)
KUNIT_EXPECT_EQ(test, i, 2);
+ i = 0;
list_for_each(cur, &list1) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
+
+ KUNIT_EXPECT_EQ(test, i, 1);
}
static void list_test_list_cut_before(struct kunit *test)
@@ -432,10 +439,13 @@ static void list_test_list_cut_before(struct kunit *test)
KUNIT_EXPECT_EQ(test, i, 1);
+ i = 0;
list_for_each(cur, &list1) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
+
+ KUNIT_EXPECT_EQ(test, i, 2);
}
static void list_test_list_splice(struct kunit *test)
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index b3d9187611de..9e0d469c7658 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -243,7 +243,7 @@ static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
BUG_ON(!lc);
BUG_ON(!lc->nr_elements);
- hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
+ hlist_for_each_entry(e, lc_hash_slot(lc, enr), collision) {
/* "about to be changed" elements, pending transaction commit,
* are hashed by their "new number". "Normal" elements have
* lc_number == lc_new_number. */
@@ -303,7 +303,7 @@ void lc_del(struct lru_cache *lc, struct lc_element *e)
BUG_ON(e->refcnt);
e->lc_number = e->lc_new_number = LC_FREE;
- hlist_del_init(&e->colision);
+ hlist_del_init(&e->collision);
list_move(&e->list, &lc->free);
RETURN();
}
@@ -324,9 +324,9 @@ static struct lc_element *lc_prepare_for_change(struct lru_cache *lc, unsigned n
PARANOIA_LC_ELEMENT(lc, e);
e->lc_new_number = new_number;
- if (!hlist_unhashed(&e->colision))
- __hlist_del(&e->colision);
- hlist_add_head(&e->colision, lc_hash_slot(lc, new_number));
+ if (!hlist_unhashed(&e->collision))
+ __hlist_del(&e->collision);
+ hlist_add_head(&e->collision, lc_hash_slot(lc, new_number));
list_move(&e->list, &lc->to_be_changed);
return e;
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index e7ac8694b797..bc45594ad2a8 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -621,6 +621,7 @@ void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
LZ4_streamHCPtr->internal_donotuse.base = NULL;
LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel;
}
+EXPORT_SYMBOL(LZ4_resetStreamHC);
int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
const char *dictionary,
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 6df3a8b95808..20990ecba2dd 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -348,17 +348,17 @@ static inline void *mte_safe_root(const struct maple_enode *node)
return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
}
-static inline void *mte_set_full(const struct maple_enode *node)
+static inline void __maybe_unused *mte_set_full(const struct maple_enode *node)
{
return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
}
-static inline void *mte_clear_full(const struct maple_enode *node)
+static inline void __maybe_unused *mte_clear_full(const struct maple_enode *node)
{
return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
}
-static inline bool mte_has_null(const struct maple_enode *node)
+static inline bool __maybe_unused mte_has_null(const struct maple_enode *node)
{
return (unsigned long)node & MAPLE_ENODE_NULL;
}
@@ -474,6 +474,7 @@ enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
/*
* mas_set_parent() - Set the parent node and encode the slot
+ * @mas: The maple state
* @enode: The encoded maple node.
* @parent: The encoded maple node that is the parent of @enode.
* @slot: The slot that @enode resides in @parent.
@@ -534,7 +535,7 @@ unsigned int mte_parent_slot(const struct maple_enode *enode)
/*
* mte_parent() - Get the parent of @node.
- * @node: The encoded maple node.
+ * @enode: The encoded maple node.
*
* Return: The parent maple node.
*/
@@ -641,8 +642,8 @@ static inline unsigned int mas_alloc_req(const struct ma_state *mas)
/*
* ma_pivots() - Get a pointer to the maple node pivots.
- * @node - the maple node
- * @type - the node type
+ * @node: the maple node
+ * @type: the node type
*
* In the event of a dead node, this array may be %NULL
*
@@ -665,8 +666,8 @@ static inline unsigned long *ma_pivots(struct maple_node *node,
/*
* ma_gaps() - Get a pointer to the maple node gaps.
- * @node - the maple node
- * @type - the node type
+ * @node: the maple node
+ * @type: the node type
*
* Return: A pointer to the maple node gaps
*/
@@ -880,8 +881,6 @@ static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
* @mt: The maple tree
* @mn: The maple node
* @type: The maple node type
- * @offset: The offset of the highest sub-gap in this node.
- * @end: The end of the data in this node.
*/
static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
enum maple_type type)
@@ -939,7 +938,7 @@ static inline unsigned char ma_meta_gap(struct maple_node *mn)
/*
* ma_set_meta_gap() - Set the largest gap location in a nodes metadata
* @mn: The maple node
- * @mn: The maple node type
+ * @mt: The maple node type
* @offset: The location of the largest gap.
*/
static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
@@ -953,8 +952,8 @@ static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
/*
* mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
- * @mat - the ma_topiary, a linked list of dead nodes.
- * @dead_enode - the node to be marked as dead and added to the tail of the list
+ * @mat: the ma_topiary, a linked list of dead nodes.
+ * @dead_enode: the node to be marked as dead and added to the tail of the list
*
* Add the @dead_enode to the linked list in @mat.
*/
@@ -977,8 +976,8 @@ static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
bool free);
/*
* mas_mat_destroy() - Free all nodes and subtrees in a dead list.
- * @mas - the maple state
- * @mat - the ma_topiary linked list of dead nodes to free.
+ * @mas: the maple state
+ * @mat: the ma_topiary linked list of dead nodes to free.
*
* Destroy walk a dead list.
*/
@@ -999,7 +998,7 @@ static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
}
/*
* mas_descend() - Descend into the slot stored in the ma_state.
- * @mas - the maple state.
+ * @mas: the maple state.
*
* Note: Not RCU safe, only use in write side or debug code.
*/
@@ -1346,8 +1345,8 @@ static void mas_node_count(struct ma_state *mas, int count)
* Return:
* - If mas->node is an error or not mas_start, return NULL.
* - If it's an empty tree: NULL & mas->status == ma_none
- * - If it's a single entry: The entry & mas->status == mas_root
- * - If it's a tree: NULL & mas->status == safe root node.
+ * - If it's a single entry: The entry & mas->status == ma_root
+ * - If it's a tree: NULL & mas->status == ma_active
*/
static inline struct maple_enode *mas_start(struct ma_state *mas)
{
@@ -1372,9 +1371,9 @@ retry:
return NULL;
}
+ mas->node = NULL;
/* empty tree */
if (unlikely(!root)) {
- mas->node = NULL;
mas->status = ma_none;
mas->offset = MAPLE_NODE_SLOTS;
return NULL;
@@ -1462,7 +1461,7 @@ static inline unsigned char mas_data_end(struct ma_state *mas)
/*
* mas_leaf_max_gap() - Returns the largest gap in a leaf node
- * @mas - the maple state
+ * @mas: the maple state
*
* Return: The maximum gap in the leaf.
*/
@@ -1544,7 +1543,7 @@ static unsigned long mas_leaf_max_gap(struct ma_state *mas)
* @node: The maple node
* @gaps: The pointer to the gaps
* @mt: The maple node type
- * @*off: Pointer to store the offset location of the gap.
+ * @off: Pointer to store the offset location of the gap.
*
* Uses the metadata data end to scan backwards across set gaps.
*
@@ -1651,7 +1650,7 @@ ascend:
/*
* mas_update_gap() - Update a nodes gaps and propagate up if necessary.
- * @mas - the maple state.
+ * @mas: the maple state.
*/
static inline void mas_update_gap(struct ma_state *mas)
{
@@ -1678,8 +1677,8 @@ static inline void mas_update_gap(struct ma_state *mas)
/*
* mas_adopt_children() - Set the parent pointer of all nodes in @parent to
* @parent with the slot encoded.
- * @mas - the maple state (for the tree)
- * @parent - the maple encoded node containing the children.
+ * @mas: the maple state (for the tree)
+ * @parent: the maple encoded node containing the children.
*/
static inline void mas_adopt_children(struct ma_state *mas,
struct maple_enode *parent)
@@ -1701,8 +1700,8 @@ static inline void mas_adopt_children(struct ma_state *mas,
/*
* mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
* node as dead.
- * @mas - the maple state with the new node
- * @old_enode - The old maple encoded node to replace.
+ * @mas: the maple state with the new node
+ * @old_enode: The old maple encoded node to replace.
*/
static inline void mas_put_in_tree(struct ma_state *mas,
struct maple_enode *old_enode)
@@ -1730,8 +1729,8 @@ static inline void mas_put_in_tree(struct ma_state *mas,
* mas_replace_node() - Replace a node by putting it in the tree, marking it
* dead, and freeing it.
* the parent encoding to locate the maple node in the tree.
- * @mas - the ma_state with @mas->node pointing to the new node.
- * @old_enode - The old maple encoded node.
+ * @mas: the ma_state with @mas->node pointing to the new node.
+ * @old_enode: The old maple encoded node.
*/
static inline void mas_replace_node(struct ma_state *mas,
struct maple_enode *old_enode)
@@ -1796,7 +1795,6 @@ static inline void mab_shift_right(struct maple_big_node *b_node,
/*
* mab_middle_node() - Check if a middle node is needed (unlikely)
* @b_node: the maple_big_node that contains the data.
- * @size: the amount of data in the b_node
* @split: the potential split location
* @slot_count: the size that can be stored in a single node being considered.
*
@@ -1844,6 +1842,7 @@ static inline int mab_no_null_split(struct maple_big_node *b_node,
/*
* mab_calc_split() - Calculate the split location and if there needs to be two
* splits.
+ * @mas: The maple state
* @bn: The maple_big_node with the data
* @mid_split: The second split, if required. 0 otherwise.
*
@@ -2177,7 +2176,8 @@ static inline bool mas_next_sibling(struct ma_state *mas)
}
/*
- * mte_node_or_none() - Set the enode and state.
+ * mas_node_or_none() - Set the enode and state.
+ * @mas: the maple state
* @enode: The encoded maple node.
*
* Set the node to the enode and the status.
@@ -2228,7 +2228,6 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
/*
* mast_rebalance_next() - Rebalance against the next node
* @mast: The maple subtree state
- * @old_r: The encoded maple node to the right (next node).
*/
static inline void mast_rebalance_next(struct maple_subtree_state *mast)
{
@@ -2242,7 +2241,6 @@ static inline void mast_rebalance_next(struct maple_subtree_state *mast)
/*
* mast_rebalance_prev() - Rebalance against the previous node
* @mast: The maple subtree state
- * @old_l: The encoded maple node to the left (previous node)
*/
static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
{
@@ -2393,9 +2391,9 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas,
/*
* mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
* pointer.
- * @b_node - the big node to add the entry
- * @mas - the maple state to get the pivot (mas->max)
- * @entry - the entry to add, if NULL nothing happens.
+ * @b_node: the big node to add the entry
+ * @mas: the maple state to get the pivot (mas->max)
+ * @entry: the entry to add, if NULL nothing happens.
*/
static inline void mab_set_b_end(struct maple_big_node *b_node,
struct ma_state *mas,
@@ -2414,11 +2412,11 @@ static inline void mab_set_b_end(struct maple_big_node *b_node,
* mas_set_split_parent() - combine_then_separate helper function. Sets the parent
* of @mas->node to either @left or @right, depending on @slot and @split
*
- * @mas - the maple state with the node that needs a parent
- * @left - possible parent 1
- * @right - possible parent 2
- * @slot - the slot the mas->node was placed
- * @split - the split location between @left and @right
+ * @mas: the maple state with the node that needs a parent
+ * @left: possible parent 1
+ * @right: possible parent 2
+ * @slot: the slot the mas->node was placed
+ * @split: the split location between @left and @right
*/
static inline void mas_set_split_parent(struct ma_state *mas,
struct maple_enode *left,
@@ -2438,11 +2436,11 @@ static inline void mas_set_split_parent(struct ma_state *mas,
/*
* mte_mid_split_check() - Check if the next node passes the mid-split
- * @**l: Pointer to left encoded maple node.
- * @**m: Pointer to middle encoded maple node.
- * @**r: Pointer to right encoded maple node.
+ * @l: Pointer to left encoded maple node.
+ * @m: Pointer to middle encoded maple node.
+ * @r: Pointer to right encoded maple node.
* @slot: The offset
- * @*split: The split location.
+ * @split: The split location.
* @mid_split: The middle split.
*/
static inline void mte_mid_split_check(struct maple_enode **l,
@@ -2466,10 +2464,10 @@ static inline void mte_mid_split_check(struct maple_enode **l,
/*
* mast_set_split_parents() - Helper function to set three nodes parents. Slot
* is taken from @mast->l.
- * @mast - the maple subtree state
- * @left - the left node
- * @right - the right node
- * @split - the split location.
+ * @mast: the maple subtree state
+ * @left: the left node
+ * @right: the right node
+ * @split: the split location.
*/
static inline void mast_set_split_parents(struct maple_subtree_state *mast,
struct maple_enode *left,
@@ -2503,7 +2501,6 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast,
/*
* mas_topiary_node() - Dispose of a single node
* @mas: The maple state for pushing nodes
- * @enode: The encoded maple node
* @in_rcu: If the tree is in rcu mode
*
* The node will either be RCU freed or pushed back on the maple state.
@@ -2635,7 +2632,7 @@ static inline void mas_topiary_replace(struct ma_state *mas,
/*
* mas_wmb_replace() - Write memory barrier and replace
* @mas: The maple state
- * @old: The old maple encoded node that is being replaced.
+ * @old_enode: The old maple encoded node that is being replaced.
*
* Updates gap as necessary.
*/
@@ -2823,10 +2820,8 @@ dead_node:
* orig_l_mas->last is used in mas_consume to find the slots that will need to
* be either freed or destroyed. orig_l_mas->depth keeps track of the height of
* the new sub-tree in case the sub-tree becomes the full tree.
- *
- * Return: the number of elements in b_node during the last loop.
*/
-static int mas_spanning_rebalance(struct ma_state *mas,
+static void mas_spanning_rebalance(struct ma_state *mas,
struct maple_subtree_state *mast, unsigned char count)
{
unsigned char split, mid_split;
@@ -2942,7 +2937,7 @@ new_root:
mas->offset = l_mas.offset;
mas_wmb_replace(mas, old_enode);
mtree_range_walk(mas);
- return mast->bn->b_end;
+ return;
}
/*
@@ -2952,10 +2947,8 @@ new_root:
*
* Rebalance two nodes into a single node or two new nodes that are sufficient.
* Continue upwards until tree is sufficient.
- *
- * Return: the number of elements in b_node during the last loop.
*/
-static inline int mas_rebalance(struct ma_state *mas,
+static inline void mas_rebalance(struct ma_state *mas,
struct maple_big_node *b_node)
{
char empty_count = mas_mt_height(mas);
@@ -2976,9 +2969,6 @@ static inline int mas_rebalance(struct ma_state *mas,
* tries to combine the data in the same way. If one node contains the
* entire range of the tree, then that node is used as a new root node.
*/
- mas_node_count(mas, empty_count * 2 - 1);
- if (mas_is_err(mas))
- return 0;
mast.orig_l = &l_mas;
mast.orig_r = &r_mas;
@@ -3029,11 +3019,6 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end
/* set up node. */
if (in_rcu) {
- /* Allocate for both left and right as well as parent. */
- mas_node_count(mas, 3);
- if (mas_is_err(mas))
- return;
-
newnode = mas_pop_node(mas);
} else {
newnode = &reuse;
@@ -3308,9 +3293,8 @@ static inline bool mas_push_data(struct ma_state *mas, int height,
* mas_split() - Split data that is too big for one node into two.
* @mas: The maple state
* @b_node: The maple big node
- * Return: 1 on success, 0 on failure.
*/
-static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
+static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
{
struct maple_subtree_state mast;
int height = 0;
@@ -3341,10 +3325,6 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
trace_ma_op(__func__, mas);
mas->depth = mas_mt_height(mas);
- /* Allocation failures will happen early. */
- mas_node_count(mas, 1 + mas->depth * 2);
- if (mas_is_err(mas))
- return 0;
mast.l = &l_mas;
mast.r = &r_mas;
@@ -3392,75 +3372,25 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
mas->node = l_mas.node;
mas_wmb_replace(mas, old);
mtree_range_walk(mas);
- return 1;
-}
-
-/*
- * mas_reuse_node() - Reuse the node to store the data.
- * @wr_mas: The maple write state
- * @bn: The maple big node
- * @end: The end of the data.
- *
- * Will always return false in RCU mode.
- *
- * Return: True if node was reused, false otherwise.
- */
-static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
- struct maple_big_node *bn, unsigned char end)
-{
- /* Need to be rcu safe. */
- if (mt_in_rcu(wr_mas->mas->tree))
- return false;
-
- if (end > bn->b_end) {
- int clear = mt_slots[wr_mas->type] - bn->b_end;
-
- memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
- memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
- }
- mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
- return true;
+ return;
}
/*
* mas_commit_b_node() - Commit the big node into the tree.
* @wr_mas: The maple write state
* @b_node: The maple big node
- * @end: The end of the data.
*/
-static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
- struct maple_big_node *b_node, unsigned char end)
+static noinline_for_kasan void mas_commit_b_node(struct ma_wr_state *wr_mas,
+ struct maple_big_node *b_node)
{
- struct maple_node *node;
- struct maple_enode *old_enode;
- unsigned char b_end = b_node->b_end;
- enum maple_type b_type = b_node->type;
-
- old_enode = wr_mas->mas->node;
- if ((b_end < mt_min_slots[b_type]) &&
- (!mte_is_root(old_enode)) &&
- (mas_mt_height(wr_mas->mas) > 1))
- return mas_rebalance(wr_mas->mas, b_node);
+ enum store_type type = wr_mas->mas->store_type;
- if (b_end >= mt_slots[b_type])
- return mas_split(wr_mas->mas, b_node);
+ WARN_ON_ONCE(type != wr_rebalance && type != wr_split_store);
- if (mas_reuse_node(wr_mas, b_node, end))
- goto reuse_node;
-
- mas_node_count(wr_mas->mas, 1);
- if (mas_is_err(wr_mas->mas))
- return 0;
+ if (type == wr_rebalance)
+ return mas_rebalance(wr_mas->mas, b_node);
- node = mas_pop_node(wr_mas->mas);
- node->parent = mas_mn(wr_mas->mas)->parent;
- wr_mas->mas->node = mt_mk_node(node, b_type);
- mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
- mas_replace_node(wr_mas->mas, old_enode);
-reuse_node:
- mas_update_gap(wr_mas->mas);
- wr_mas->mas->end = b_end;
- return 1;
+ return mas_split(wr_mas->mas, b_node);
}
/*
@@ -3477,10 +3407,6 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
unsigned long *pivots;
int slot = 0;
- mas_node_count(mas, 1);
- if (unlikely(mas_is_err(mas)))
- return 0;
-
node = mas_pop_node(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
@@ -3526,10 +3452,7 @@ static inline void mas_store_root(struct ma_state *mas, void *entry)
/*
* mas_is_span_wr() - Check if the write needs to be treated as a write that
* spans the node.
- * @mas: The maple state
- * @piv: The pivot value being written
- * @type: The maple node type
- * @entry: The data to write
+ * @wr_mas: The maple write state
*
* Spanning writes are writes that start in one node and end in another OR if
* the write of a %NULL will cause the node to end with a %NULL.
@@ -3730,10 +3653,8 @@ static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
* @entry: The entry to store.
*
* Only valid when the index == 0 and the last == ULONG_MAX
- *
- * Return 0 on error, 1 on success.
*/
-static inline int mas_new_root(struct ma_state *mas, void *entry)
+static inline void mas_new_root(struct ma_state *mas, void *entry)
{
struct maple_enode *root = mas_root_locked(mas);
enum maple_type type = maple_leaf_64;
@@ -3749,10 +3670,6 @@ static inline int mas_new_root(struct ma_state *mas, void *entry)
goto done;
}
- mas_node_count(mas, 1);
- if (mas_is_err(mas))
- return 0;
-
node = mas_pop_node(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
@@ -3769,7 +3686,7 @@ done:
if (xa_is_node(root))
mte_destroy_walk(root, mas->tree);
- return 1;
+ return;
}
/*
* mas_wr_spanning_store() - Create a subtree with the store operation completed
@@ -3777,10 +3694,8 @@ done:
* Note that mas is expected to point to the node which caused the store to
* span.
* @wr_mas: The maple write state
- *
- * Return: 0 on error, positive on success.
*/
-static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
+static noinline void mas_wr_spanning_store(struct ma_wr_state *wr_mas)
{
struct maple_subtree_state mast;
struct maple_big_node b_node;
@@ -3815,9 +3730,6 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
* entries per level plus a new root.
*/
height = mas_mt_height(mas);
- mas_node_count(mas, 1 + height * 3);
- if (mas_is_err(mas))
- return 0;
/*
* Set up right side. Need to get to the next offset after the spanning
@@ -3875,10 +3787,8 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
* @wr_mas: The maple write state
*
* Attempts to reuse the node, but may allocate.
- *
- * Return: True if stored, false otherwise
*/
-static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
+static inline void mas_wr_node_store(struct ma_wr_state *wr_mas,
unsigned char new_end)
{
struct ma_state *mas = wr_mas->mas;
@@ -3889,11 +3799,6 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
bool in_rcu = mt_in_rcu(mas->tree);
- /* Check if there is enough data. The room is enough. */
- if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
- !(mas->mas_flags & MA_STATE_BULK))
- return false;
-
if (mas->last == wr_mas->end_piv)
offset_end++; /* don't copy this offset */
else if (unlikely(wr_mas->r_max == ULONG_MAX))
@@ -3901,10 +3806,6 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
/* set up node. */
if (in_rcu) {
- mas_node_count(mas, 1);
- if (mas_is_err(mas))
- return false;
-
newnode = mas_pop_node(mas);
} else {
memset(&reuse, 0, sizeof(struct maple_node));
@@ -3960,16 +3861,14 @@ done:
trace_ma_write(__func__, mas, 0, wr_mas->entry);
mas_update_gap(mas);
mas->end = new_end;
- return true;
+ return;
}
/*
* mas_wr_slot_store: Attempt to store a value in a slot.
* @wr_mas: the maple write state
- *
- * Return: True if stored, false otherwise
*/
-static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
+static inline void mas_wr_slot_store(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
unsigned char offset = mas->offset;
@@ -4001,7 +3900,7 @@ static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
wr_mas->pivots[offset + 1] = mas->last;
mas->offset++; /* Keep mas accurate. */
} else {
- return false;
+ return;
}
trace_ma_write(__func__, mas, 0, wr_mas->entry);
@@ -4012,7 +3911,7 @@ static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
if (!wr_mas->entry || gap)
mas_update_gap(mas);
- return true;
+ return;
}
static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
@@ -4061,9 +3960,6 @@ static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
else
wr_mas->end_piv = wr_mas->mas->max;
-
- if (!wr_mas->entry)
- mas_wr_extend_null(wr_mas);
}
static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
@@ -4089,23 +3985,13 @@ static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
* This is currently unsafe in rcu mode since the end of the node may be cached
* by readers while the node contents may be updated which could result in
* inaccurate information.
- *
- * Return: True if appended, false otherwise
*/
-static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
+static inline void mas_wr_append(struct ma_wr_state *wr_mas,
unsigned char new_end)
{
- struct ma_state *mas;
+ struct ma_state *mas = wr_mas->mas;
void __rcu **slots;
- unsigned char end;
-
- mas = wr_mas->mas;
- if (mt_in_rcu(mas->tree))
- return false;
-
- end = mas->end;
- if (mas->offset != end)
- return false;
+ unsigned char end = mas->end;
if (new_end < mt_pivots[wr_mas->type]) {
wr_mas->pivots[new_end] = wr_mas->pivots[end];
@@ -4139,7 +4025,7 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
mas->end = new_end;
trace_ma_write(__func__, mas, new_end, wr_mas->entry);
- return true;
+ return;
}
/*
@@ -4155,76 +4041,235 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas)
trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
memset(&b_node, 0, sizeof(struct maple_big_node));
mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
- mas_commit_b_node(wr_mas, &b_node, wr_mas->mas->end);
+ mas_commit_b_node(wr_mas, &b_node);
}
-static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
+/*
+ * mas_wr_store_entry() - Internal call to store a value
+ * @wr_mas: The maple write state
+ */
+static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
- unsigned char new_end;
+ unsigned char new_end = mas_wr_new_end(wr_mas);
- /* Direct replacement */
- if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
+ switch (mas->store_type) {
+ case wr_invalid:
+ MT_BUG_ON(mas->tree, 1);
+ return;
+ case wr_new_root:
+ mas_new_root(mas, wr_mas->entry);
+ break;
+ case wr_store_root:
+ mas_store_root(mas, wr_mas->entry);
+ break;
+ case wr_exact_fit:
rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
if (!!wr_mas->entry ^ !!wr_mas->content)
mas_update_gap(mas);
- return;
+ break;
+ case wr_append:
+ mas_wr_append(wr_mas, new_end);
+ break;
+ case wr_slot_store:
+ mas_wr_slot_store(wr_mas);
+ break;
+ case wr_node_store:
+ mas_wr_node_store(wr_mas, new_end);
+ break;
+ case wr_spanning_store:
+ mas_wr_spanning_store(wr_mas);
+ break;
+ case wr_split_store:
+ case wr_rebalance:
+ mas_wr_bnode(wr_mas);
+ break;
+ }
+
+ return;
+}
+
+static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ if (!mas_is_active(mas)) {
+ if (mas_is_start(mas))
+ goto set_content;
+
+ if (unlikely(mas_is_paused(mas)))
+ goto reset;
+
+ if (unlikely(mas_is_none(mas)))
+ goto reset;
+
+ if (unlikely(mas_is_overflow(mas)))
+ goto reset;
+
+ if (unlikely(mas_is_underflow(mas)))
+ goto reset;
}
/*
- * new_end exceeds the size of the maple node and cannot enter the fast
- * path.
+ * A less strict version of mas_is_span_wr() where we allow spanning
+ * writes within this node. This is to stop partial walks in
+ * mas_prealloc() from being reset.
*/
- new_end = mas_wr_new_end(wr_mas);
- if (new_end >= mt_slots[wr_mas->type])
- goto slow_path;
-
- /* Attempt to append */
- if (mas_wr_append(wr_mas, new_end))
- return;
+ if (mas->last > mas->max)
+ goto reset;
- if (new_end == mas->end && mas_wr_slot_store(wr_mas))
- return;
+ if (wr_mas->entry)
+ goto set_content;
- if (mas_wr_node_store(wr_mas, new_end))
- return;
+ if (mte_is_leaf(mas->node) && mas->last == mas->max)
+ goto reset;
- if (mas_is_err(mas))
- return;
+ goto set_content;
-slow_path:
- mas_wr_bnode(wr_mas);
+reset:
+ mas_reset(mas);
+set_content:
+ wr_mas->content = mas_start(mas);
}
-/*
- * mas_wr_store_entry() - Internal call to store a value
+/**
+ * mas_prealloc_calc() - Calculate number of nodes needed for a
+ * given store oepration
* @mas: The maple state
- * @entry: The entry to store.
+ * @entry: The entry to store into the tree
*
- * Return: The contents that was stored at the index.
+ * Return: Number of nodes required for preallocation.
*/
-static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
+static inline int mas_prealloc_calc(struct ma_state *mas, void *entry)
+{
+ int ret = mas_mt_height(mas) * 3 + 1;
+
+ switch (mas->store_type) {
+ case wr_invalid:
+ WARN_ON_ONCE(1);
+ break;
+ case wr_new_root:
+ ret = 1;
+ break;
+ case wr_store_root:
+ if (likely((mas->last != 0) || (mas->index != 0)))
+ ret = 1;
+ else if (((unsigned long) (entry) & 3) == 2)
+ ret = 1;
+ else
+ ret = 0;
+ break;
+ case wr_spanning_store:
+ ret = mas_mt_height(mas) * 3 + 1;
+ break;
+ case wr_split_store:
+ ret = mas_mt_height(mas) * 2 + 1;
+ break;
+ case wr_rebalance:
+ ret = mas_mt_height(mas) * 2 - 1;
+ break;
+ case wr_node_store:
+ ret = mt_in_rcu(mas->tree) ? 1 : 0;
+ break;
+ case wr_append:
+ case wr_exact_fit:
+ case wr_slot_store:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/*
+ * mas_wr_store_type() - Set the store type for a given
+ * store operation.
+ * @wr_mas: The maple write state
+ */
+static inline void mas_wr_store_type(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
+ unsigned char new_end;
- wr_mas->content = mas_start(mas);
- if (mas_is_none(mas) || mas_is_ptr(mas)) {
- mas_store_root(mas, wr_mas->entry);
+ if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) {
+ mas->store_type = wr_store_root;
return;
}
if (unlikely(!mas_wr_walk(wr_mas))) {
- mas_wr_spanning_store(wr_mas);
+ mas->store_type = wr_spanning_store;
return;
}
/* At this point, we are at the leaf node that needs to be altered. */
mas_wr_end_piv(wr_mas);
- /* New root for a single pointer */
- if (unlikely(!mas->index && mas->last == ULONG_MAX))
- mas_new_root(mas, wr_mas->entry);
- else
- mas_wr_modify(wr_mas);
+ if (!wr_mas->entry)
+ mas_wr_extend_null(wr_mas);
+
+ new_end = mas_wr_new_end(wr_mas);
+ if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last)) {
+ mas->store_type = wr_exact_fit;
+ return;
+ }
+
+ if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
+ mas->store_type = wr_new_root;
+ return;
+ }
+
+ /* Potential spanning rebalance collapsing a node */
+ if (new_end < mt_min_slots[wr_mas->type]) {
+ if (!mte_is_root(mas->node)) {
+ mas->store_type = wr_rebalance;
+ return;
+ }
+ mas->store_type = wr_node_store;
+ return;
+ }
+
+ if (new_end >= mt_slots[wr_mas->type]) {
+ mas->store_type = wr_split_store;
+ return;
+ }
+
+ if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end)) {
+ mas->store_type = wr_append;
+ return;
+ }
+
+ if ((new_end == mas->end) && (!mt_in_rcu(mas->tree) ||
+ (wr_mas->offset_end - mas->offset == 1))) {
+ mas->store_type = wr_slot_store;
+ return;
+ }
+
+ if (mte_is_root(mas->node) || (new_end >= mt_min_slots[wr_mas->type]) ||
+ (mas->mas_flags & MA_STATE_BULK)) {
+ mas->store_type = wr_node_store;
+ return;
+ }
+
+ mas->store_type = wr_invalid;
+ MAS_WARN_ON(mas, 1);
+}
+
+/**
+ * mas_wr_preallocate() - Preallocate enough nodes for a store operation
+ * @wr_mas: The maple write state
+ * @entry: The entry that will be stored
+ *
+ */
+static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry)
+{
+ struct ma_state *mas = wr_mas->mas;
+ int request;
+
+ mas_wr_prealloc_setup(wr_mas);
+ mas_wr_store_type(wr_mas);
+ request = mas_prealloc_calc(mas, entry);
+ if (!request)
+ return;
+
+ mas_node_count(mas, request);
}
/**
@@ -4257,26 +4302,24 @@ static inline void *mas_insert(struct ma_state *mas, void *entry)
if (wr_mas.content)
goto exists;
- if (mas_is_none(mas) || mas_is_ptr(mas)) {
- mas_store_root(mas, entry);
+ mas_wr_preallocate(&wr_mas, entry);
+ if (mas_is_err(mas))
return NULL;
- }
/* spanning writes always overwrite something */
- if (!mas_wr_walk(&wr_mas))
+ if (mas->store_type == wr_spanning_store)
goto exists;
/* At this point, we are at the leaf node that needs to be altered. */
- wr_mas.offset_end = mas->offset;
- wr_mas.end_piv = wr_mas.r_max;
+ if (mas->store_type != wr_new_root && mas->store_type != wr_store_root) {
+ wr_mas.offset_end = mas->offset;
+ wr_mas.end_piv = wr_mas.r_max;
- if (wr_mas.content || (mas->last > wr_mas.r_max))
- goto exists;
-
- if (!entry)
- return NULL;
+ if (wr_mas.content || (mas->last > wr_mas.r_max))
+ goto exists;
+ }
- mas_wr_modify(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
return wr_mas.content;
exists:
@@ -4331,6 +4374,7 @@ int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
if (*next == 0)
mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED;
+ mas_destroy(mas);
return ret;
}
EXPORT_SYMBOL(mas_alloc_cyclic);
@@ -4440,9 +4484,8 @@ no_entry:
* mas_prev_slot() - Get the entry in the previous slot
*
* @mas: The maple state
- * @max: The minimum starting range
+ * @min: The minimum starting range
* @empty: Can be empty
- * @set_underflow: Set the @mas->node to underflow state on limit.
*
* Return: The entry in the previous slot which is possibly NULL
*/
@@ -4525,6 +4568,7 @@ underflow:
/*
* mas_next_node() - Get the next node at the same level in the tree.
* @mas: The maple state
+ * @node: The maple node
* @max: The maximum pivot value to check.
*
* The next value will be mas->node[mas->offset] or the status will have
@@ -4615,8 +4659,6 @@ overflow:
* @mas: The maple state
* @max: The maximum starting range
* @empty: Can be empty
- * @set_overflow: Should @mas->node be set to overflow when the limit is
- * reached.
*
* Return: The entry in the next slot which is possibly NULL
*/
@@ -5150,9 +5192,9 @@ EXPORT_SYMBOL_GPL(mas_empty_area_rev);
/*
* mte_dead_leaves() - Mark all leaves of a node as dead.
- * @mas: The maple state
+ * @enode: the encoded node
+ * @mt: the maple tree
* @slots: Pointer to the slot array
- * @type: The maple node type
*
* Must hold the write lock.
*
@@ -5358,47 +5400,6 @@ static inline void mte_destroy_walk(struct maple_enode *enode,
mt_destroy_walk(enode, mt, true);
}
}
-
-static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
-{
- if (!mas_is_active(wr_mas->mas)) {
- if (mas_is_start(wr_mas->mas))
- return;
-
- if (unlikely(mas_is_paused(wr_mas->mas)))
- goto reset;
-
- if (unlikely(mas_is_none(wr_mas->mas)))
- goto reset;
-
- if (unlikely(mas_is_overflow(wr_mas->mas)))
- goto reset;
-
- if (unlikely(mas_is_underflow(wr_mas->mas)))
- goto reset;
- }
-
- /*
- * A less strict version of mas_is_span_wr() where we allow spanning
- * writes within this node. This is to stop partial walks in
- * mas_prealloc() from being reset.
- */
- if (wr_mas->mas->last > wr_mas->mas->max)
- goto reset;
-
- if (wr_mas->entry)
- return;
-
- if (mte_is_leaf(wr_mas->mas->node) &&
- wr_mas->mas->last == wr_mas->mas->max)
- goto reset;
-
- return;
-
-reset:
- mas_reset(wr_mas->mas);
-}
-
/* Interface */
/**
@@ -5407,13 +5408,12 @@ reset:
* @entry: The entry to store.
*
* The @mas->index and @mas->last is used to set the range for the @entry.
- * Note: The @mas should have pre-allocated entries to ensure there is memory to
- * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
*
* Return: the first entry between mas->index and mas->last or %NULL.
*/
void *mas_store(struct ma_state *mas, void *entry)
{
+ int request;
MA_WR_STATE(wr_mas, mas, entry);
trace_ma_write(__func__, mas, 0, entry);
@@ -5434,8 +5434,25 @@ void *mas_store(struct ma_state *mas, void *entry)
* want to examine what happens if a single store operation was to
* overwrite multiple entries within a self-balancing B-Tree.
*/
- mas_wr_store_setup(&wr_mas);
+ mas_wr_prealloc_setup(&wr_mas);
+ mas_wr_store_type(&wr_mas);
+ if (mas->mas_flags & MA_STATE_PREALLOC) {
+ mas_wr_store_entry(&wr_mas);
+ MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
+ return wr_mas.content;
+ }
+
+ request = mas_prealloc_calc(mas, entry);
+ if (!request)
+ goto store;
+
+ mas_node_count(mas, request);
+ if (mas_is_err(mas))
+ return NULL;
+
+store:
mas_wr_store_entry(&wr_mas);
+ mas_destroy(mas);
return wr_mas.content;
}
EXPORT_SYMBOL_GPL(mas_store);
@@ -5451,19 +5468,28 @@ EXPORT_SYMBOL_GPL(mas_store);
*/
int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
{
+ unsigned long index = mas->index;
+ unsigned long last = mas->last;
MA_WR_STATE(wr_mas, mas, entry);
+ int ret = 0;
- mas_wr_store_setup(&wr_mas);
- trace_ma_write(__func__, mas, 0, entry);
retry:
- mas_wr_store_entry(&wr_mas);
- if (unlikely(mas_nomem(mas, gfp)))
+ mas_wr_preallocate(&wr_mas, entry);
+ if (unlikely(mas_nomem(mas, gfp))) {
+ if (!entry)
+ __mas_set_range(mas, index, last);
goto retry;
+ }
- if (unlikely(mas_is_err(mas)))
- return xa_err(mas->node);
+ if (mas_is_err(mas)) {
+ ret = xa_err(mas->node);
+ goto out;
+ }
- return 0;
+ mas_wr_store_entry(&wr_mas);
+out:
+ mas_destroy(mas);
+ return ret;
}
EXPORT_SYMBOL_GPL(mas_store_gfp);
@@ -5477,7 +5503,19 @@ void mas_store_prealloc(struct ma_state *mas, void *entry)
{
MA_WR_STATE(wr_mas, mas, entry);
- mas_wr_store_setup(&wr_mas);
+ if (mas->store_type == wr_store_root) {
+ mas_wr_prealloc_setup(&wr_mas);
+ goto store;
+ }
+
+ mas_wr_walk_descend(&wr_mas);
+ if (mas->store_type != wr_spanning_store) {
+ /* set wr_mas->content to current slot */
+ wr_mas.content = mas_slot_locked(mas, wr_mas.slots, mas->offset);
+ mas_wr_end_piv(&wr_mas);
+ }
+
+store:
trace_ma_write(__func__, mas, 0, entry);
mas_wr_store_entry(&wr_mas);
MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
@@ -5496,70 +5534,25 @@ EXPORT_SYMBOL_GPL(mas_store_prealloc);
int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
{
MA_WR_STATE(wr_mas, mas, entry);
- unsigned char node_size;
- int request = 1;
- int ret;
-
-
- if (unlikely(!mas->index && mas->last == ULONG_MAX))
- goto ask_now;
-
- mas_wr_store_setup(&wr_mas);
- wr_mas.content = mas_start(mas);
- /* Root expand */
- if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
- goto ask_now;
-
- if (unlikely(!mas_wr_walk(&wr_mas))) {
- /* Spanning store, use worst case for now */
- request = 1 + mas_mt_height(mas) * 3;
- goto ask_now;
- }
-
- /* At this point, we are at the leaf node that needs to be altered. */
- /* Exact fit, no nodes needed. */
- if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
- return 0;
-
- mas_wr_end_piv(&wr_mas);
- node_size = mas_wr_new_end(&wr_mas);
+ int ret = 0;
+ int request;
- /* Slot store, does not require additional nodes */
- if (node_size == mas->end) {
- /* reuse node */
- if (!mt_in_rcu(mas->tree))
- return 0;
- /* shifting boundary */
- if (wr_mas.offset_end - mas->offset == 1)
- return 0;
- }
+ mas_wr_prealloc_setup(&wr_mas);
+ mas_wr_store_type(&wr_mas);
+ request = mas_prealloc_calc(mas, entry);
+ if (!request)
+ return ret;
- if (node_size >= mt_slots[wr_mas.type]) {
- /* Split, worst case for now. */
- request = 1 + mas_mt_height(mas) * 2;
- goto ask_now;
+ mas_node_count_gfp(mas, request, gfp);
+ if (mas_is_err(mas)) {
+ mas_set_alloc_req(mas, 0);
+ ret = xa_err(mas->node);
+ mas_destroy(mas);
+ mas_reset(mas);
+ return ret;
}
- /* New root needs a single node */
- if (unlikely(mte_is_root(mas->node)))
- goto ask_now;
-
- /* Potential spanning rebalance collapsing a node, use worst-case */
- if (node_size - 1 <= mt_min_slots[wr_mas.type])
- request = mas_mt_height(mas) * 2 - 1;
-
- /* node store, slot store needs one node */
-ask_now:
- mas_node_count_gfp(mas, request, gfp);
mas->mas_flags |= MA_STATE_PREALLOC;
- if (likely(!mas_is_err(mas)))
- return 0;
-
- mas_set_alloc_req(mas, 0);
- ret = xa_err(mas->node);
- mas_reset(mas);
- mas_destroy(mas);
- mas_reset(mas);
return ret;
}
EXPORT_SYMBOL_GPL(mas_preallocate);
@@ -5585,7 +5578,8 @@ void mas_destroy(struct ma_state *mas)
*/
if (mas->mas_flags & MA_STATE_REBALANCE) {
unsigned char end;
-
+ if (mas_is_err(mas))
+ mas_reset(mas);
mas_start(mas);
mtree_range_walk(mas);
end = mas->end + 1;
@@ -6245,24 +6239,32 @@ EXPORT_SYMBOL_GPL(mas_find_range_rev);
void *mas_erase(struct ma_state *mas)
{
void *entry;
+ unsigned long index = mas->index;
MA_WR_STATE(wr_mas, mas, NULL);
if (!mas_is_active(mas) || !mas_is_start(mas))
mas->status = ma_start;
- /* Retry unnecessary when holding the write lock. */
+write_retry:
entry = mas_state_walk(mas);
if (!entry)
return NULL;
-write_retry:
/* Must reset to ensure spanning writes of last slot are detected */
mas_reset(mas);
- mas_wr_store_setup(&wr_mas);
- mas_wr_store_entry(&wr_mas);
- if (mas_nomem(mas, GFP_KERNEL))
+ mas_wr_preallocate(&wr_mas, NULL);
+ if (mas_nomem(mas, GFP_KERNEL)) {
+ /* in case the range of entry changed when unlocked */
+ mas->index = mas->last = index;
goto write_retry;
+ }
+ if (mas_is_err(mas))
+ goto out;
+
+ mas_wr_store_entry(&wr_mas);
+out:
+ mas_destroy(mas);
return entry;
}
EXPORT_SYMBOL_GPL(mas_erase);
@@ -6277,10 +6279,8 @@ EXPORT_SYMBOL_GPL(mas_erase);
bool mas_nomem(struct ma_state *mas, gfp_t gfp)
__must_hold(mas->tree->ma_lock)
{
- if (likely(mas->node != MA_ERROR(-ENOMEM))) {
- mas_destroy(mas);
+ if (likely(mas->node != MA_ERROR(-ENOMEM)))
return false;
- }
if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
mtree_unlock(mas->tree);
@@ -6357,7 +6357,7 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index,
unsigned long last, void *entry, gfp_t gfp)
{
MA_STATE(mas, mt, index, last);
- MA_WR_STATE(wr_mas, &mas, entry);
+ int ret = 0;
trace_ma_write(__func__, &mas, 0, entry);
if (WARN_ON_ONCE(xa_is_advanced(entry)))
@@ -6367,16 +6367,10 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index,
return -EINVAL;
mtree_lock(mt);
-retry:
- mas_wr_store_entry(&wr_mas);
- if (mas_nomem(&mas, gfp))
- goto retry;
-
+ ret = mas_store_gfp(&mas, entry, gfp);
mtree_unlock(mt);
- if (mas_is_err(&mas))
- return xa_err(mas.node);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(mtree_store_range);
@@ -6412,6 +6406,7 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long first,
unsigned long last, void *entry, gfp_t gfp)
{
MA_STATE(ms, mt, first, last);
+ int ret = 0;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
@@ -6427,9 +6422,10 @@ retry:
mtree_unlock(mt);
if (mas_is_err(&ms))
- return xa_err(ms.node);
+ ret = xa_err(ms.node);
- return 0;
+ mas_destroy(&ms);
+ return ret;
}
EXPORT_SYMBOL(mtree_insert_range);
@@ -6484,6 +6480,7 @@ retry:
unlock:
mtree_unlock(mt);
+ mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_range);
@@ -6565,6 +6562,7 @@ retry:
unlock:
mtree_unlock(mt);
+ mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_rrange);
@@ -6997,6 +6995,19 @@ void mt_set_non_kernel(unsigned int val)
kmem_cache_set_non_kernel(maple_node_cache, val);
}
+extern void kmem_cache_set_callback(struct kmem_cache *cachep,
+ void (*callback)(void *));
+void mt_set_callback(void (*callback)(void *))
+{
+ kmem_cache_set_callback(maple_node_cache, callback);
+}
+
+extern void kmem_cache_set_private(struct kmem_cache *cachep, void *private);
+void mt_set_private(void *private)
+{
+ kmem_cache_set_private(maple_node_cache, private);
+}
+
extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
unsigned long mt_get_alloc_size(void)
{
@@ -7181,7 +7192,6 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
enum mt_dump_format format)
{
struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
- bool leaf = mte_is_leaf(entry);
unsigned long first = min;
int i;
@@ -7215,19 +7225,22 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
break;
if (last == 0 && i > 0)
break;
- if (leaf)
- mt_dump_entry(mt_slot(mt, node->slot, i),
- first, last, depth + 1, format);
- else if (node->slot[i])
+ if (node->slot[i])
mt_dump_node(mt, mt_slot(mt, node->slot, i),
first, last, depth + 1, format);
if (last == max)
break;
if (last > max) {
- pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ switch(format) {
+ case mt_dump_hex:
+ pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
node, last, max, i);
- break;
+ break;
+ case mt_dump_dec:
+ pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ node, last, max, i);
+ }
}
first = last + 1;
}
@@ -7627,6 +7640,40 @@ void mas_dump(const struct ma_state *mas)
break;
}
+ pr_err("Store Type: ");
+ switch (mas->store_type) {
+ case wr_invalid:
+ pr_err("invalid store type\n");
+ break;
+ case wr_new_root:
+ pr_err("new_root\n");
+ break;
+ case wr_store_root:
+ pr_err("store_root\n");
+ break;
+ case wr_exact_fit:
+ pr_err("exact_fit\n");
+ break;
+ case wr_split_store:
+ pr_err("split_store\n");
+ break;
+ case wr_slot_store:
+ pr_err("slot_store\n");
+ break;
+ case wr_append:
+ pr_err("append\n");
+ break;
+ case wr_node_store:
+ pr_err("node_store\n");
+ break;
+ case wr_spanning_store:
+ pr_err("spanning_store\n");
+ break;
+ case wr_rebalance:
+ pr_err("rebalance\n");
+ break;
+ }
+
pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
mas->index, mas->last);
pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
diff --git a/lib/math/Makefile b/lib/math/Makefile
index 91fcdb0c9efe..3ef11305f8d2 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -5,5 +5,7 @@ obj-$(CONFIG_CORDIC) += cordic.o
obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
obj-$(CONFIG_RATIONAL) += rational.o
+obj-$(CONFIG_INT_POW_TEST) += tests/int_pow_kunit.o
obj-$(CONFIG_TEST_DIV64) += test_div64.o
+obj-$(CONFIG_TEST_MULDIV64) += test_mul_u64_u64_div_u64.o
obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 191761b1b623..5faa29208bdb 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -186,55 +186,84 @@ EXPORT_SYMBOL(iter_div_u64_rem);
#ifndef mul_u64_u64_div_u64
u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
{
- u64 res = 0, div, rem;
- int shift;
+ if (ilog2(a) + ilog2(b) <= 62)
+ return div64_u64(a * b, c);
- /* can a * b overflow ? */
- if (ilog2(a) + ilog2(b) > 62) {
- /*
- * Note that the algorithm after the if block below might lose
- * some precision and the result is more exact for b > a. So
- * exchange a and b if a is bigger than b.
- *
- * For example with a = 43980465100800, b = 100000000, c = 1000000000
- * the below calculation doesn't modify b at all because div == 0
- * and then shift becomes 45 + 26 - 62 = 9 and so the result
- * becomes 4398035251080. However with a and b swapped the exact
- * result is calculated (i.e. 4398046510080).
- */
- if (a > b)
- swap(a, b);
+#if defined(__SIZEOF_INT128__)
+
+ /* native 64x64=128 bits multiplication */
+ u128 prod = (u128)a * b;
+ u64 n_lo = prod, n_hi = prod >> 64;
+
+#else
+
+ /* perform a 64x64=128 bits multiplication manually */
+ u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
+ u64 x, y, z;
+
+ x = (u64)a_lo * b_lo;
+ y = (u64)a_lo * b_hi + (u32)(x >> 32);
+ z = (u64)a_hi * b_hi + (u32)(y >> 32);
+ y = (u64)a_hi * b_lo + (u32)y;
+ z += (u32)(y >> 32);
+ x = (y << 32) + (u32)x;
+
+ u64 n_lo = x, n_hi = z;
+
+#endif
+
+ /* make sure c is not zero, trigger exception otherwise */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdiv-by-zero"
+ if (unlikely(c == 0))
+ return 1/0;
+#pragma GCC diagnostic pop
+
+ int shift = __builtin_ctzll(c);
+ /* try reducing the fraction in case the dividend becomes <= 64 bits */
+ if ((n_hi >> shift) == 0) {
+ u64 n = shift ? (n_lo >> shift) | (n_hi << (64 - shift)) : n_lo;
+
+ return div64_u64(n, c >> shift);
/*
- * (b * a) / c is equal to
- *
- * (b / c) * a +
- * (b % c) * a / c
- *
- * if nothing overflows. Can the 1st multiplication
- * overflow? Yes, but we do not care: this can only
- * happen if the end result can't fit in u64 anyway.
- *
- * So the code below does
- *
- * res = (b / c) * a;
- * b = b % c;
+ * The remainder value if needed would be:
+ * res = div64_u64_rem(n, c >> shift, &rem);
+ * rem = (rem << shift) + (n_lo - (n << shift));
*/
- div = div64_u64_rem(b, c, &rem);
- res = div * a;
- b = rem;
-
- shift = ilog2(a) + ilog2(b) - 62;
- if (shift > 0) {
- /* drop precision */
- b >>= shift;
- c >>= shift;
- if (!c)
- return res;
- }
}
- return res + div64_u64(a * b, c);
+ if (n_hi >= c) {
+ /* overflow: result is unrepresentable in a u64 */
+ return -1;
+ }
+
+ /* Do the full 128 by 64 bits division */
+
+ shift = __builtin_clzll(c);
+ c <<= shift;
+
+ int p = 64 + shift;
+ u64 res = 0;
+ bool carry;
+
+ do {
+ carry = n_hi >> 63;
+ shift = carry ? 1 : __builtin_clzll(n_hi);
+ if (p < shift)
+ break;
+ p -= shift;
+ n_hi <<= shift;
+ n_hi |= n_lo >> (64 - shift);
+ n_lo <<= shift;
+ if (carry || (n_hi >= c)) {
+ n_hi -= c;
+ res |= 1ULL << p;
+ }
+ } while (n_hi);
+ /* The remainder value if needed would be n_hi << p */
+
+ return res;
}
EXPORT_SYMBOL(mul_u64_u64_div_u64);
#endif
diff --git a/lib/math/test_mul_u64_u64_div_u64.c b/lib/math/test_mul_u64_u64_div_u64.c
new file mode 100644
index 000000000000..58d058de4e73
--- /dev/null
+++ b/lib/math/test_mul_u64_u64_div_u64.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 BayLibre SAS
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/math64.h>
+
+typedef struct { u64 a; u64 b; u64 c; u64 result; } test_params;
+
+static test_params test_values[] = {
+/* this contains many edge values followed by a couple random values */
+{ 0xb, 0x7, 0x3, 0x19 },
+{ 0xffff0000, 0xffff0000, 0xf, 0x1110eeef00000000 },
+{ 0xffffffff, 0xffffffff, 0x1, 0xfffffffe00000001 },
+{ 0xffffffff, 0xffffffff, 0x2, 0x7fffffff00000000 },
+{ 0x1ffffffff, 0xffffffff, 0x2, 0xfffffffe80000000 },
+{ 0x1ffffffff, 0xffffffff, 0x3, 0xaaaaaaa9aaaaaaab },
+{ 0x1ffffffff, 0x1ffffffff, 0x4, 0xffffffff00000000 },
+{ 0xffff000000000000, 0xffff000000000000, 0xffff000000000001, 0xfffeffffffffffff },
+{ 0x3333333333333333, 0x3333333333333333, 0x5555555555555555, 0x1eb851eb851eb851 },
+{ 0x7fffffffffffffff, 0x2, 0x3, 0x5555555555555554 },
+{ 0xffffffffffffffff, 0x2, 0x8000000000000000, 0x3 },
+{ 0xffffffffffffffff, 0x2, 0xc000000000000000, 0x2 },
+{ 0xffffffffffffffff, 0x4000000000000004, 0x8000000000000000, 0x8000000000000007 },
+{ 0xffffffffffffffff, 0x4000000000000001, 0x8000000000000000, 0x8000000000000001 },
+{ 0xffffffffffffffff, 0x8000000000000001, 0xffffffffffffffff, 0x8000000000000001 },
+{ 0xfffffffffffffffe, 0x8000000000000001, 0xffffffffffffffff, 0x8000000000000000 },
+{ 0xffffffffffffffff, 0x8000000000000001, 0xfffffffffffffffe, 0x8000000000000001 },
+{ 0xffffffffffffffff, 0x8000000000000001, 0xfffffffffffffffd, 0x8000000000000002 },
+{ 0x7fffffffffffffff, 0xffffffffffffffff, 0xc000000000000000, 0xaaaaaaaaaaaaaaa8 },
+{ 0xffffffffffffffff, 0x7fffffffffffffff, 0xa000000000000000, 0xccccccccccccccca },
+{ 0xffffffffffffffff, 0x7fffffffffffffff, 0x9000000000000000, 0xe38e38e38e38e38b },
+{ 0x7fffffffffffffff, 0x7fffffffffffffff, 0x5000000000000000, 0xccccccccccccccc9 },
+{ 0xffffffffffffffff, 0xfffffffffffffffe, 0xffffffffffffffff, 0xfffffffffffffffe },
+{ 0xe6102d256d7ea3ae, 0x70a77d0be4c31201, 0xd63ec35ab3220357, 0x78f8bf8cc86c6e18 },
+{ 0xf53bae05cb86c6e1, 0x3847b32d2f8d32e0, 0xcfd4f55a647f403c, 0x42687f79d8998d35 },
+{ 0x9951c5498f941092, 0x1f8c8bfdf287a251, 0xa3c8dc5f81ea3fe2, 0x1d887cb25900091f },
+{ 0x374fee9daa1bb2bb, 0x0d0bfbff7b8ae3ef, 0xc169337bd42d5179, 0x03bb2dbaffcbb961 },
+{ 0xeac0d03ac10eeaf0, 0x89be05dfa162ed9b, 0x92bb1679a41f0e4b, 0xdc5f5cc9e270d216 },
+};
+
+/*
+ * The above table can be verified with the following shell script:
+ *
+ * #!/bin/sh
+ * sed -ne 's/^{ \+\(.*\), \+\(.*\), \+\(.*\), \+\(.*\) },$/\1 \2 \3 \4/p' \
+ * lib/math/test_mul_u64_u64_div_u64.c |
+ * while read a b c r; do
+ * expected=$( printf "obase=16; ibase=16; %X * %X / %X\n" $a $b $c | bc )
+ * given=$( printf "%X\n" $r )
+ * if [ "$expected" = "$given" ]; then
+ * echo "$a * $b / $c = $r OK"
+ * else
+ * echo "$a * $b / $c = $r is wrong" >&2
+ * echo "should be equivalent to 0x$expected" >&2
+ * exit 1
+ * fi
+ * done
+ */
+
+static int __init test_init(void)
+{
+ int i;
+
+ pr_info("Starting mul_u64_u64_div_u64() test\n");
+
+ for (i = 0; i < ARRAY_SIZE(test_values); i++) {
+ u64 a = test_values[i].a;
+ u64 b = test_values[i].b;
+ u64 c = test_values[i].c;
+ u64 expected_result = test_values[i].result;
+ u64 result = mul_u64_u64_div_u64(a, b, c);
+
+ if (result != expected_result) {
+ pr_err("ERROR: 0x%016llx * 0x%016llx / 0x%016llx\n", a, b, c);
+ pr_err("ERROR: expected result: %016llx\n", expected_result);
+ pr_err("ERROR: obtained result: %016llx\n", result);
+ }
+ }
+
+ pr_info("Completed mul_u64_u64_div_u64() test\n");
+ return 0;
+}
+
+static void __exit test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
+
+MODULE_AUTHOR("Nicolas Pitre");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("mul_u64_u64_div_u64() test module");
diff --git a/lib/math/tests/Makefile b/lib/math/tests/Makefile
new file mode 100644
index 000000000000..6a169123320a
--- /dev/null
+++ b/lib/math/tests/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_INT_POW_TEST) += int_pow_kunit.o
diff --git a/lib/math/tests/int_pow_kunit.c b/lib/math/tests/int_pow_kunit.c
new file mode 100644
index 000000000000..34b33677d458
--- /dev/null
+++ b/lib/math/tests/int_pow_kunit.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+#include <linux/math.h>
+
+struct test_case_params {
+ u64 base;
+ unsigned int exponent;
+ u64 expected_result;
+ const char *name;
+};
+
+static const struct test_case_params params[] = {
+ { 64, 0, 1, "Power of zero" },
+ { 64, 1, 64, "Power of one"},
+ { 0, 5, 0, "Base zero" },
+ { 1, 64, 1, "Base one" },
+ { 2, 2, 4, "Two squared"},
+ { 2, 3, 8, "Two cubed"},
+ { 5, 5, 3125, "Five raised to the fifth power" },
+ { U64_MAX, 1, U64_MAX, "Max base" },
+ { 2, 63, 9223372036854775808ULL, "Large result"},
+};
+
+static void get_desc(const struct test_case_params *tc, char *desc)
+{
+ strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(int_pow, params, get_desc);
+
+static void int_pow_test(struct kunit *test)
+{
+ const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
+
+ KUNIT_EXPECT_EQ(test, tc->expected_result, int_pow(tc->base, tc->exponent));
+}
+
+static struct kunit_case math_int_pow_test_cases[] = {
+ KUNIT_CASE_PARAM(int_pow_test, int_pow_gen_params),
+ {}
+};
+
+static struct kunit_suite int_pow_test_suite = {
+ .name = "math-int_pow",
+ .test_cases = math_int_pow_test_cases,
+};
+
+kunit_test_suites(&int_pow_test_suite);
+
+MODULE_DESCRIPTION("math.int_pow KUnit test suite");
+MODULE_LICENSE("GPL");
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 51bc5246986d..2891f94a11c6 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -209,7 +209,7 @@ int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
INIT_LIST_HEAD(&fbc[i].list);
#endif
fbc[i].count = amount;
- fbc[i].counters = (void *)counters + (i * counter_size);
+ fbc[i].counters = (void __percpu *)counters + i * counter_size;
debug_percpu_counter_activate(&fbc[i]);
}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index dbbed19f8fff..6c902639728b 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -189,7 +189,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
size = nbuckets;
- if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
+ if (tbl == NULL && !gfpflags_allow_blocking(gfp)) {
tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
nbuckets = 0;
}
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 5e2e93307f0d..d3412984170c 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -65,7 +65,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map,
{
unsigned long mask, word_mask;
- guard(spinlock_irqsave)(&map->swap_lock);
+ guard(raw_spinlock_irqsave)(&map->swap_lock);
if (!map->cleared) {
if (depth == 0)
@@ -136,7 +136,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
}
for (i = 0; i < sb->map_nr; i++)
- spin_lock_init(&sb->map[i].swap_lock);
+ raw_spin_lock_init(&sb->map[i].swap_lock);
return 0;
}
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 7bc2220fea80..473b2646f71c 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -11,6 +11,7 @@
#include <linux/kmemleak.h>
#include <linux/bvec.h>
#include <linux/uio.h>
+#include <linux/folio_queue.h>
/**
* sg_next - return the next scatterlist entry in a list
@@ -1262,6 +1263,67 @@ static ssize_t extract_kvec_to_sg(struct iov_iter *iter,
}
/*
+ * Extract up to sg_max folios from an FOLIOQ-type iterator and add them to
+ * the scatterlist. The pages are not pinned.
+ */
+static ssize_t extract_folioq_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ const struct folio_queue *folioq = iter->folioq;
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ unsigned int slot = iter->folioq_slot;
+ ssize_t ret = 0;
+ size_t offset = iter->iov_offset;
+
+ BUG_ON(!folioq);
+
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = folioq->next;
+ if (WARN_ON_ONCE(!folioq))
+ return 0;
+ slot = 0;
+ }
+
+ do {
+ struct folio *folio = folioq_folio(folioq, slot);
+ size_t fsize = folioq_folio_size(folioq, slot);
+
+ if (offset < fsize) {
+ size_t part = umin(maxsize - ret, fsize - offset);
+
+ sg_set_page(sg, folio_page(folio, 0), part, offset);
+ sgtable->nents++;
+ sg++;
+ sg_max--;
+ offset += part;
+ ret += part;
+ }
+
+ if (offset >= fsize) {
+ offset = 0;
+ slot++;
+ if (slot >= folioq_nr_slots(folioq)) {
+ if (!folioq->next) {
+ WARN_ON_ONCE(ret < iter->count);
+ break;
+ }
+ folioq = folioq->next;
+ slot = 0;
+ }
+ }
+ } while (sg_max > 0 && ret < maxsize);
+
+ iter->folioq = folioq;
+ iter->folioq_slot = slot;
+ iter->iov_offset = offset;
+ iter->count -= ret;
+ return ret;
+}
+
+/*
* Extract up to sg_max folios from an XARRAY-type iterator and add them to
* the scatterlist. The pages are not pinned.
*/
@@ -1323,8 +1385,8 @@ static ssize_t extract_xarray_to_sg(struct iov_iter *iter,
* addition of @sg_max elements.
*
* The pages referred to by UBUF- and IOVEC-type iterators are extracted and
- * pinned; BVEC-, KVEC- and XARRAY-type are extracted but aren't pinned; PIPE-
- * and DISCARD-type are not supported.
+ * pinned; BVEC-, KVEC-, FOLIOQ- and XARRAY-type are extracted but aren't
+ * pinned; DISCARD-type is not supported.
*
* No end mark is placed on the scatterlist; that's left to the caller.
*
@@ -1356,6 +1418,9 @@ ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t maxsize,
case ITER_KVEC:
return extract_kvec_to_sg(iter, maxsize, sgtable, sg_max,
extraction_flags);
+ case ITER_FOLIOQ:
+ return extract_folioq_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
case ITER_XARRAY:
return extract_xarray_to_sg(iter, maxsize, sgtable, sg_max,
extraction_flags);
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index e6667a28c014..6e3a1e5a7142 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/rcupdate.h>
#include "../mm/slab.h"
static struct kunit_resource resource;
@@ -157,6 +158,34 @@ static void test_kmalloc_redzone_access(struct kunit *test)
kmem_cache_destroy(s);
}
+struct test_kfree_rcu_struct {
+ struct rcu_head rcu;
+};
+
+static void test_kfree_rcu(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu",
+ sizeof(struct test_kfree_rcu_struct),
+ SLAB_NO_MERGE);
+ struct test_kfree_rcu_struct *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kfree_rcu(p, rcu);
+ kmem_cache_destroy(s);
+
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+}
+
+static void test_leak_destroy(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu",
+ 64, SLAB_NO_MERGE);
+ kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_destroy(s);
+
+ KUNIT_EXPECT_EQ(test, 1, slab_errors);
+}
+
static int test_init(struct kunit *test)
{
slab_errors = 0;
@@ -177,6 +206,8 @@ static struct kunit_case test_cases[] = {
KUNIT_CASE(test_clobber_redzone_free),
KUNIT_CASE(test_kmalloc_redzone_access),
+ KUNIT_CASE(test_kfree_rcu),
+ KUNIT_CASE(test_leak_destroy),
{}
};
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 69ba49b853c7..4f887aa62fa0 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -321,6 +321,9 @@ int string_unescape(char *src, char *dst, size_t size, unsigned int flags)
{
char *out = dst;
+ if (!size)
+ size = SIZE_MAX;
+
while (*src && --size) {
if (src[0] == '\\' && src[1] != '\0' && size > 1) {
src++;
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 6432b8c3e431..989a12a67872 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -120,6 +120,15 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
if (unlikely(count <= 0))
return 0;
+ if (can_do_masked_user_access()) {
+ long retval;
+
+ src = masked_user_access_begin(src);
+ retval = do_strncpy_from_user(dst, src, count, count);
+ user_read_access_end();
+ return retval;
+ }
+
max_addr = TASK_SIZE_MAX;
src_addr = (unsigned long)untagged_addr(src);
if (likely(src_addr < max_addr)) {
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index feeb935a2299..6e489f9e90f1 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -96,6 +96,15 @@ long strnlen_user(const char __user *str, long count)
if (unlikely(count <= 0))
return 0;
+ if (can_do_masked_user_access()) {
+ long retval;
+
+ str = masked_user_access_begin(str);
+ retval = do_strnlen_user(str, count, count);
+ user_read_access_end();
+ return retval;
+ }
+
max_addr = TASK_SIZE_MAX;
src_addr = (unsigned long)untagged_addr(str);
if (likely(src_addr < max_addr)) {
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index ca4b0eea81a2..fa5edd6ef7f7 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -15077,8 +15077,7 @@ static struct skb_segment_test skb_segment_tests[] __initconst = {
.build_skb = build_test_skb_linear_no_head_frag,
.features = NETIF_F_SG | NETIF_F_FRAGLIST |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_GSO |
- NETIF_F_LLTX | NETIF_F_GRO |
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_STAG_TX
}
};
diff --git a/lib/test_fortify/.gitignore b/lib/test_fortify/.gitignore
new file mode 100644
index 000000000000..c1ba37d14b50
--- /dev/null
+++ b/lib/test_fortify/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/*.log
diff --git a/lib/test_fortify/Makefile b/lib/test_fortify/Makefile
new file mode 100644
index 000000000000..1c3f82ad8bb2
--- /dev/null
+++ b/lib/test_fortify/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y := $(call cc-disable-warning,fortify-source)
+
+quiet_cmd_test_fortify = TEST $@
+ cmd_test_fortify = $(CONFIG_SHELL) $(src)/test_fortify.sh \
+ $< $@ "$(NM)" $(CC) $(c_flags) -DKBUILD_EXTRA_WARN1
+
+$(obj)/%.log: $(src)/%.c $(src)/test_fortify.sh FORCE
+ $(call if_changed_dep,test_fortify)
+
+logs = $(patsubst $(src)/%.c, %.log, $(wildcard $(src)/*-*.c))
+targets += $(logs)
+
+quiet_cmd_gen_fortify_log = CAT $@
+ cmd_gen_fortify_log = cat $(or $(real-prereqs),/dev/null) > $@
+
+$(obj)/test_fortify.log: $(addprefix $(obj)/, $(logs)) FORCE
+ $(call if_changed,gen_fortify_log)
+
+# GCC<=7 does not always produce *.d files.
+# Run the tests only for GCC>=8 or Clang.
+always-$(call gcc-min-version, 80000) += test_fortify.log
+always-$(CONFIG_CC_IS_CLANG) += test_fortify.log
+
+# Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined.
+# Pass CFLAGS_KASAN to avoid warnings.
+KASAN_SANITIZE := y
diff --git a/scripts/test_fortify.sh b/lib/test_fortify/test_fortify.sh
index c2688ab8281d..c2688ab8281d 100644
--- a/scripts/test_fortify.sh
+++ b/lib/test_fortify/test_fortify.sh
diff --git a/lib/test_fpu_glue.c b/lib/test_fpu_glue.c
index 074f30301f29..c0596426370a 100644
--- a/lib/test_fpu_glue.c
+++ b/lib/test_fpu_glue.c
@@ -42,7 +42,7 @@ static int __init test_fpu_init(void)
return -EINVAL;
selftest_dir = debugfs_create_dir("selftest_helpers", NULL);
- if (!selftest_dir)
+ if (IS_ERR(selftest_dir))
return -ENOMEM;
debugfs_create_file_unsafe("test_fpu", 0444, selftest_dir, NULL,
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index ee20e1f9bae9..056f2e411d7b 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -799,10 +799,7 @@ static int dmirror_exclusive(struct dmirror *dmirror,
unsigned long mapped = 0;
int i;
- if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT))
- next = end;
- else
- next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT);
+ next = min(end, addr + (ARRAY_SIZE(pages) << PAGE_SHIFT));
ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
/*
diff --git a/lib/test_objpool.c b/lib/test_objpool.c
index bfdb81599832..5a3f6961a70f 100644
--- a/lib/test_objpool.c
+++ b/lib/test_objpool.c
@@ -687,4 +687,5 @@ static void __exit ot_mod_exit(void)
module_init(ot_mod_init);
module_exit(ot_mod_exit);
-MODULE_LICENSE("GPL"); \ No newline at end of file
+MODULE_DESCRIPTION("Test module for lockless object pool");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 965cb6f28527..8448b6d02bd9 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -641,26 +641,12 @@ page_flags_test(int section, int node, int zone, int last_cpupid,
test(cmp_buf, "%pGp", &flags);
}
-static void __init page_type_test(unsigned int page_type, const char *name,
- char *cmp_buf)
-{
- unsigned long size;
-
- size = scnprintf(cmp_buf, BUF_SIZE, "%#x(", page_type);
- if (page_type_has_type(page_type))
- size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
-
- snprintf(cmp_buf + size, BUF_SIZE - size, ")");
- test(cmp_buf, "%pGt", &page_type);
-}
-
static void __init
flags(void)
{
unsigned long flags;
char *cmp_buffer;
gfp_t gfp;
- unsigned int page_type;
cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
if (!cmp_buffer)
@@ -700,18 +686,6 @@ flags(void)
gfp |= __GFP_HIGH;
test(cmp_buffer, "%pGg", &gfp);
- page_type = ~0;
- page_type_test(page_type, "", cmp_buffer);
-
- page_type = 10;
- page_type_test(page_type, "", cmp_buffer);
-
- page_type = ~PG_buddy;
- page_type_test(page_type, "buddy", cmp_buffer);
-
- page_type = ~(PG_table | PG_buddy);
- page_type_test(page_type, "table|buddy", cmp_buffer);
-
kfree(cmp_buffer);
}
diff --git a/lib/union_find.c b/lib/union_find.c
new file mode 100644
index 000000000000..413b0f8adf7a
--- /dev/null
+++ b/lib/union_find.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/union_find.h>
+
+/**
+ * uf_find - Find the root of a node and perform path compression
+ * @node: the node to find the root of
+ *
+ * This function returns the root of the node by following the parent
+ * pointers. It also performs path compression, making the tree shallower.
+ *
+ * Returns the root node of the set containing node.
+ */
+struct uf_node *uf_find(struct uf_node *node)
+{
+ struct uf_node *parent;
+
+ while (node->parent != node) {
+ parent = node->parent;
+ node->parent = parent->parent;
+ node = parent;
+ }
+ return node;
+}
+
+/**
+ * uf_union - Merge two sets, using union by rank
+ * @node1: the first node
+ * @node2: the second node
+ *
+ * This function merges the sets containing node1 and node2, by comparing
+ * the ranks to keep the tree balanced.
+ */
+void uf_union(struct uf_node *node1, struct uf_node *node2)
+{
+ struct uf_node *root1 = uf_find(node1);
+ struct uf_node *root2 = uf_find(node2);
+
+ if (root1 == root2)
+ return;
+
+ if (root1->rank < root2->rank) {
+ root1->parent = root2;
+ } else if (root1->rank > root2->rank) {
+ root2->parent = root1;
+ } else {
+ root2->parent = root1;
+ root1->rank++;
+ }
+}
diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile
index 9f031eafc465..cedbf15f8087 100644
--- a/lib/vdso/Makefile
+++ b/lib/vdso/Makefile
@@ -4,6 +4,7 @@ GENERIC_VDSO_MK_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
GENERIC_VDSO_DIR := $(dir $(GENERIC_VDSO_MK_PATH))
c-gettimeofday-$(CONFIG_GENERIC_GETTIMEOFDAY) := $(addprefix $(GENERIC_VDSO_DIR), gettimeofday.c)
+c-getrandom-$(CONFIG_VDSO_GETRANDOM) := $(addprefix $(GENERIC_VDSO_DIR), getrandom.c)
# This cmd checks that the vdso library does not contain dynamic relocations.
# It has to be called after the linking of the vdso library and requires it
diff --git a/lib/vdso/getrandom.c b/lib/vdso/getrandom.c
index e1db228bc4f0..938ca539aaa6 100644
--- a/lib/vdso/getrandom.c
+++ b/lib/vdso/getrandom.c
@@ -3,15 +3,19 @@
* Copyright (C) 2022-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
-#include <linux/cache.h>
-#include <linux/kernel.h>
-#include <linux/time64.h>
+#include <linux/array_size.h>
+#include <linux/minmax.h>
#include <vdso/datapage.h>
#include <vdso/getrandom.h>
+#include <vdso/unaligned.h>
#include <asm/vdso/getrandom.h>
-#include <asm/vdso/vsyscall.h>
-#include <asm/unaligned.h>
#include <uapi/linux/mman.h>
+#include <uapi/linux/random.h>
+
+#undef PAGE_SIZE
+#undef PAGE_MASK
+#define PAGE_SIZE (1UL << CONFIG_PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
#define MEMCPY_AND_ZERO_SRC(type, dst, src, len) do { \
while (len >= sizeof(type)) { \
@@ -68,16 +72,17 @@ __cvdso_getrandom_data(const struct vdso_rng_data *rng_info, void *buffer, size_
struct vgetrandom_state *state = opaque_state;
size_t batch_len, nblocks, orig_len = len;
bool in_use, have_retried = false;
- unsigned long current_generation;
void *orig_buffer = buffer;
+ u64 current_generation;
u32 counter[2] = { 0 };
if (unlikely(opaque_len == ~0UL && !buffer && !len && !flags)) {
- *(struct vgetrandom_opaque_params *)opaque_state = (struct vgetrandom_opaque_params) {
- .size_of_opaque_state = sizeof(*state),
- .mmap_prot = PROT_READ | PROT_WRITE,
- .mmap_flags = MAP_DROPPABLE | MAP_ANONYMOUS
- };
+ struct vgetrandom_opaque_params *params = opaque_state;
+ params->size_of_opaque_state = sizeof(*state);
+ params->mmap_prot = PROT_READ | PROT_WRITE;
+ params->mmap_flags = MAP_DROPPABLE | MAP_ANONYMOUS;
+ for (size_t i = 0; i < ARRAY_SIZE(params->reserved); ++i)
+ params->reserved[i] = 0;
return 0;
}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 2d71b1115916..09f022ba1c05 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2054,25 +2054,6 @@ char *format_page_flags(char *buf, char *end, unsigned long flags)
return buf;
}
-static
-char *format_page_type(char *buf, char *end, unsigned int page_type)
-{
- buf = number(buf, end, page_type, default_flag_spec);
-
- if (buf < end)
- *buf = '(';
- buf++;
-
- if (page_type_has_type(page_type))
- buf = format_flags(buf, end, ~page_type, pagetype_names);
-
- if (buf < end)
- *buf = ')';
- buf++;
-
- return buf;
-}
-
static noinline_for_stack
char *flags_string(char *buf, char *end, void *flags_ptr,
struct printf_spec spec, const char *fmt)
@@ -2086,8 +2067,6 @@ char *flags_string(char *buf, char *end, void *flags_ptr,
switch (fmt[1]) {
case 'p':
return format_page_flags(buf, end, *(unsigned long *)flags_ptr);
- case 't':
- return format_page_type(buf, end, *(unsigned int *)flags_ptr);
case 'v':
flags = *(unsigned long *)flags_ptr;
names = vmaflag_names;
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index aef086a6bf2f..20aa459bfb3e 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -5,7 +5,8 @@ config XZ_DEC
help
LZMA2 compression algorithm and BCJ filters are supported using
the .xz file format as the container. For integrity checking,
- CRC32 is supported. See Documentation/staging/xz.rst for more information.
+ CRC32 is supported. See Documentation/staging/xz.rst for more
+ information.
if XZ_DEC
@@ -29,11 +30,21 @@ config XZ_DEC_ARMTHUMB
default y
select XZ_DEC_BCJ
+config XZ_DEC_ARM64
+ bool "ARM64 BCJ filter decoder" if EXPERT
+ default y
+ select XZ_DEC_BCJ
+
config XZ_DEC_SPARC
bool "SPARC BCJ filter decoder" if EXPERT
default y
select XZ_DEC_BCJ
+config XZ_DEC_RISCV
+ bool "RISC-V BCJ filter decoder" if EXPERT
+ default y
+ select XZ_DEC_BCJ
+
config XZ_DEC_MICROLZMA
bool "MicroLZMA decoder"
default n
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 88a2c35e1b59..6a7906a328ba 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* CRC32 using the polynomial from IEEE-802.3
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
/*
@@ -27,9 +26,9 @@
STATIC_RW_DATA uint32_t xz_crc32_table[256];
-XZ_EXTERN void xz_crc32_init(void)
+void xz_crc32_init(void)
{
- const uint32_t poly = CRC32_POLY_LE;
+ const uint32_t poly = 0xEDB88320;
uint32_t i;
uint32_t j;
@@ -46,7 +45,7 @@ XZ_EXTERN void xz_crc32_init(void)
return;
}
-XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
+uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
{
crc = ~crc;
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index ef449e97d1a1..8237db17eee3 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* Branch/Call/Jump (BCJ) filter decoders
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include "xz_private.h"
@@ -24,7 +23,9 @@ struct xz_dec_bcj {
BCJ_IA64 = 6, /* Big or little endian */
BCJ_ARM = 7, /* Little endian only */
BCJ_ARMTHUMB = 8, /* Little endian only */
- BCJ_SPARC = 9 /* Big or little endian */
+ BCJ_SPARC = 9, /* Big or little endian */
+ BCJ_ARM64 = 10, /* AArch64 */
+ BCJ_RISCV = 11 /* RV32GQC_Zfh, RV64GQC_Zfh */
} type;
/*
@@ -162,7 +163,9 @@ static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
size_t i;
uint32_t instr;
- for (i = 0; i + 4 <= size; i += 4) {
+ size &= ~(size_t)3;
+
+ for (i = 0; i < size; i += 4) {
instr = get_unaligned_be32(buf + i);
if ((instr & 0xFC000003) == 0x48000001) {
instr &= 0x03FFFFFC;
@@ -219,7 +222,9 @@ static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
/* Instruction normalized with bit_res for easier manipulation */
uint64_t norm;
- for (i = 0; i + 16 <= size; i += 16) {
+ size &= ~(size_t)15;
+
+ for (i = 0; i < size; i += 16) {
mask = branch_table[buf[i] & 0x1F];
for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) {
if (((mask >> slot) & 1) == 0)
@@ -267,7 +272,9 @@ static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
size_t i;
uint32_t addr;
- for (i = 0; i + 4 <= size; i += 4) {
+ size &= ~(size_t)3;
+
+ for (i = 0; i < size; i += 4) {
if (buf[i + 3] == 0xEB) {
addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8)
| ((uint32_t)buf[i + 2] << 16);
@@ -290,7 +297,12 @@ static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
size_t i;
uint32_t addr;
- for (i = 0; i + 4 <= size; i += 2) {
+ if (size < 4)
+ return 0;
+
+ size -= 4;
+
+ for (i = 0; i <= size; i += 2) {
if ((buf[i + 1] & 0xF8) == 0xF0
&& (buf[i + 3] & 0xF8) == 0xF8) {
addr = (((uint32_t)buf[i + 1] & 0x07) << 19)
@@ -318,7 +330,9 @@ static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
size_t i;
uint32_t instr;
- for (i = 0; i + 4 <= size; i += 4) {
+ size &= ~(size_t)3;
+
+ for (i = 0; i < size; i += 4) {
instr = get_unaligned_be32(buf + i);
if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) {
instr <<= 2;
@@ -334,6 +348,140 @@ static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
}
#endif
+#ifdef XZ_DEC_ARM64
+static size_t bcj_arm64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+ size_t i;
+ uint32_t instr;
+ uint32_t addr;
+
+ size &= ~(size_t)3;
+
+ for (i = 0; i < size; i += 4) {
+ instr = get_unaligned_le32(buf + i);
+
+ if ((instr >> 26) == 0x25) {
+ /* BL instruction */
+ addr = instr - ((s->pos + (uint32_t)i) >> 2);
+ instr = 0x94000000 | (addr & 0x03FFFFFF);
+ put_unaligned_le32(instr, buf + i);
+
+ } else if ((instr & 0x9F000000) == 0x90000000) {
+ /* ADRP instruction */
+ addr = ((instr >> 29) & 3) | ((instr >> 3) & 0x1FFFFC);
+
+ /* Only convert values in the range +/-512 MiB. */
+ if ((addr + 0x020000) & 0x1C0000)
+ continue;
+
+ addr -= (s->pos + (uint32_t)i) >> 12;
+
+ instr &= 0x9000001F;
+ instr |= (addr & 3) << 29;
+ instr |= (addr & 0x03FFFC) << 3;
+ instr |= (0U - (addr & 0x020000)) & 0xE00000;
+
+ put_unaligned_le32(instr, buf + i);
+ }
+ }
+
+ return i;
+}
+#endif
+
+#ifdef XZ_DEC_RISCV
+static size_t bcj_riscv(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+ size_t i;
+ uint32_t b1;
+ uint32_t b2;
+ uint32_t b3;
+ uint32_t instr;
+ uint32_t instr2;
+ uint32_t instr2_rs1;
+ uint32_t addr;
+
+ if (size < 8)
+ return 0;
+
+ size -= 8;
+
+ for (i = 0; i <= size; i += 2) {
+ instr = buf[i];
+
+ if (instr == 0xEF) {
+ /* JAL */
+ b1 = buf[i + 1];
+ if ((b1 & 0x0D) != 0)
+ continue;
+
+ b2 = buf[i + 2];
+ b3 = buf[i + 3];
+
+ addr = ((b1 & 0xF0) << 13) | (b2 << 9) | (b3 << 1);
+ addr -= s->pos + (uint32_t)i;
+
+ buf[i + 1] = (uint8_t)((b1 & 0x0F)
+ | ((addr >> 8) & 0xF0));
+
+ buf[i + 2] = (uint8_t)(((addr >> 16) & 0x0F)
+ | ((addr >> 7) & 0x10)
+ | ((addr << 4) & 0xE0));
+
+ buf[i + 3] = (uint8_t)(((addr >> 4) & 0x7F)
+ | ((addr >> 13) & 0x80));
+
+ i += 4 - 2;
+
+ } else if ((instr & 0x7F) == 0x17) {
+ /* AUIPC */
+ instr |= (uint32_t)buf[i + 1] << 8;
+ instr |= (uint32_t)buf[i + 2] << 16;
+ instr |= (uint32_t)buf[i + 3] << 24;
+
+ if (instr & 0xE80) {
+ /* AUIPC's rd doesn't equal x0 or x2. */
+ instr2 = get_unaligned_le32(buf + i + 4);
+
+ if (((instr << 8) ^ (instr2 - 3)) & 0xF8003) {
+ i += 6 - 2;
+ continue;
+ }
+
+ addr = (instr & 0xFFFFF000) + (instr2 >> 20);
+
+ instr = 0x17 | (2 << 7) | (instr2 << 12);
+ instr2 = addr;
+ } else {
+ /* AUIPC's rd equals x0 or x2. */
+ instr2_rs1 = instr >> 27;
+
+ if ((uint32_t)((instr - 0x3117) << 18)
+ >= (instr2_rs1 & 0x1D)) {
+ i += 4 - 2;
+ continue;
+ }
+
+ addr = get_unaligned_be32(buf + i + 4);
+ addr -= s->pos + (uint32_t)i;
+
+ instr2 = (instr >> 12) | (addr << 20);
+
+ instr = 0x17 | (instr2_rs1 << 7)
+ | ((addr + 0x800) & 0xFFFFF000);
+ }
+
+ put_unaligned_le32(instr, buf + i);
+ put_unaligned_le32(instr2, buf + i + 4);
+
+ i += 8 - 2;
+ }
+ }
+
+ return i;
+}
+#endif
+
/*
* Apply the selected BCJ filter. Update *pos and s->pos to match the amount
* of data that got filtered.
@@ -381,6 +529,16 @@ static void bcj_apply(struct xz_dec_bcj *s,
filtered = bcj_sparc(s, buf, size);
break;
#endif
+#ifdef XZ_DEC_ARM64
+ case BCJ_ARM64:
+ filtered = bcj_arm64(s, buf, size);
+ break;
+#endif
+#ifdef XZ_DEC_RISCV
+ case BCJ_RISCV:
+ filtered = bcj_riscv(s, buf, size);
+ break;
+#endif
default:
/* Never reached but silence compiler warnings. */
filtered = 0;
@@ -414,9 +572,8 @@ static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b)
* data in chunks of 1-16 bytes. To hide this issue, this function does
* some buffering.
*/
-XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
- struct xz_dec_lzma2 *lzma2,
- struct xz_buf *b)
+enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, struct xz_dec_lzma2 *lzma2,
+ struct xz_buf *b)
{
size_t out_start;
@@ -524,7 +681,7 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
return s->ret;
}
-XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
+struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
{
struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s != NULL)
@@ -533,7 +690,7 @@ XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
return s;
}
-XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
+enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
{
switch (id) {
#ifdef XZ_DEC_X86
@@ -554,6 +711,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
#ifdef XZ_DEC_SPARC
case BCJ_SPARC:
#endif
+#ifdef XZ_DEC_ARM64
+ case BCJ_ARM64:
+#endif
+#ifdef XZ_DEC_RISCV
+ case BCJ_RISCV:
+#endif
break;
default:
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index 27ce34520e78..83bb66b6016d 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* LZMA2 decoder
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include "xz_private.h"
@@ -961,8 +960,7 @@ static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
* Take care of the LZMA2 control layer, and forward the job of actual LZMA
* decoding or copying of uncompressed chunks to other functions.
*/
-XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
- struct xz_buf *b)
+enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, struct xz_buf *b)
{
uint32_t tmp;
@@ -1138,8 +1136,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
return XZ_OK;
}
-XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
- uint32_t dict_max)
+struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, uint32_t dict_max)
{
struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL)
@@ -1162,7 +1159,7 @@ XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
return s;
}
-XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
+enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
{
/* This limits dictionary size to 3 GiB to keep parsing simpler. */
if (props > 39)
@@ -1198,7 +1195,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
return XZ_OK;
}
-XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
+void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
{
if (DEC_IS_MULTI(s->dict.mode))
vfree(s->dict.buf);
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c
index 683570b93a8c..f9d003684d56 100644
--- a/lib/xz/xz_dec_stream.c
+++ b/lib/xz/xz_dec_stream.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* .xz Stream decoder
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include "xz_private.h"
@@ -747,7 +746,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
* actually succeeds (that's the price to pay of using the output buffer as
* the workspace).
*/
-XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
+enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
{
size_t in_start;
size_t out_start;
@@ -783,7 +782,7 @@ XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
return ret;
}
-XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max)
+struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max)
{
struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL)
@@ -813,7 +812,7 @@ error_bcj:
return NULL;
}
-XZ_EXTERN void xz_dec_reset(struct xz_dec *s)
+void xz_dec_reset(struct xz_dec *s)
{
s->sequence = SEQ_STREAM_HEADER;
s->allow_buf_error = false;
@@ -825,7 +824,7 @@ XZ_EXTERN void xz_dec_reset(struct xz_dec *s)
s->temp.size = STREAM_HEADER_SIZE;
}
-XZ_EXTERN void xz_dec_end(struct xz_dec *s)
+void xz_dec_end(struct xz_dec *s)
{
if (s != NULL) {
xz_dec_lzma2_end(s->lzma2);
diff --git a/lib/xz/xz_dec_syms.c b/lib/xz/xz_dec_syms.c
index 61098c67a413..f40817d65897 100644
--- a/lib/xz/xz_dec_syms.c
+++ b/lib/xz/xz_dec_syms.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* XZ decoder module information
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include <linux/module.h>
@@ -23,11 +22,6 @@ EXPORT_SYMBOL(xz_dec_microlzma_end);
#endif
MODULE_DESCRIPTION("XZ decompressor");
-MODULE_VERSION("1.1");
+MODULE_VERSION("1.2");
MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org> and Igor Pavlov");
-
-/*
- * This code is in the public domain, but in Linux it's simplest to just
- * say it's GPL and consider the authors as the copyright holders.
- */
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/xz/xz_dec_test.c b/lib/xz/xz_dec_test.c
index da28a19d6c98..53d3600f2ddb 100644
--- a/lib/xz/xz_dec_test.c
+++ b/lib/xz/xz_dec_test.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* XZ decoder tester
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include <linux/kernel.h>
@@ -212,9 +211,4 @@ module_exit(xz_dec_test_exit);
MODULE_DESCRIPTION("XZ decompressor tester");
MODULE_VERSION("1.0");
MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org>");
-
-/*
- * This code is in the public domain, but in Linux it's simplest to just
- * say it's GPL and consider the authors as the copyright holders.
- */
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/xz/xz_lzma2.h b/lib/xz/xz_lzma2.h
index 92d852d4f87a..d2632b7dfb9c 100644
--- a/lib/xz/xz_lzma2.h
+++ b/lib/xz/xz_lzma2.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* LZMA2 definitions
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef XZ_LZMA2_H
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
index bf1e94ec7873..5f1294a1408c 100644
--- a/lib/xz/xz_private.h
+++ b/lib/xz/xz_private.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* Private includes and definitions
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef XZ_PRIVATE_H
@@ -37,6 +36,12 @@
# ifdef CONFIG_XZ_DEC_SPARC
# define XZ_DEC_SPARC
# endif
+# ifdef CONFIG_XZ_DEC_ARM64
+# define XZ_DEC_ARM64
+# endif
+# ifdef CONFIG_XZ_DEC_RISCV
+# define XZ_DEC_RISCV
+# endif
# ifdef CONFIG_XZ_DEC_MICROLZMA
# define XZ_DEC_MICROLZMA
# endif
@@ -98,23 +103,19 @@
*/
#ifndef XZ_DEC_BCJ
# if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \
- || defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \
+ || defined(XZ_DEC_IA64) \
|| defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \
- || defined(XZ_DEC_SPARC)
+ || defined(XZ_DEC_SPARC) || defined(XZ_DEC_ARM64) \
+ || defined(XZ_DEC_RISCV)
# define XZ_DEC_BCJ
# endif
#endif
-#ifndef CRC32_POLY_LE
-#define CRC32_POLY_LE 0xedb88320
-#endif
-
/*
* Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
* before calling xz_dec_lzma2_run().
*/
-XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
- uint32_t dict_max);
+struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, uint32_t dict_max);
/*
* Decode the LZMA2 properties (one byte) and reset the decoder. Return
@@ -122,22 +123,20 @@ XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
* big enough, and XZ_OPTIONS_ERROR if props indicates something that this
* decoder doesn't support.
*/
-XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s,
- uint8_t props);
+enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props);
/* Decode raw LZMA2 stream from b->in to b->out. */
-XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
- struct xz_buf *b);
+enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, struct xz_buf *b);
/* Free the memory allocated for the LZMA2 decoder. */
-XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s);
+void xz_dec_lzma2_end(struct xz_dec_lzma2 *s);
#ifdef XZ_DEC_BCJ
/*
* Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before
* calling xz_dec_bcj_run().
*/
-XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call);
+struct xz_dec_bcj *xz_dec_bcj_create(bool single_call);
/*
* Decode the Filter ID of a BCJ filter. This implementation doesn't
@@ -145,16 +144,15 @@ XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call);
* is needed. Returns XZ_OK if the given Filter ID is supported.
* Otherwise XZ_OPTIONS_ERROR is returned.
*/
-XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id);
+enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id);
/*
* Decode raw BCJ + LZMA2 stream. This must be used only if there actually is
* a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run()
* must be called directly.
*/
-XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
- struct xz_dec_lzma2 *lzma2,
- struct xz_buf *b);
+enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, struct xz_dec_lzma2 *lzma2,
+ struct xz_buf *b);
/* Free the memory allocated for the BCJ filters. */
#define xz_dec_bcj_end(s) kfree(s)
diff --git a/lib/xz/xz_stream.h b/lib/xz/xz_stream.h
index 430bb3a0d195..55f9f6f94b78 100644
--- a/lib/xz/xz_stream.h
+++ b/lib/xz/xz_stream.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* Definitions for handling the .xz file format
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef XZ_STREAM_H
diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c
index f620cafca633..16bb995bc6c4 100644
--- a/lib/zstd/compress/zstd_compress.c
+++ b/lib/zstd/compress/zstd_compress.c
@@ -4810,6 +4810,8 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(
dictLoadMethod, cctxParams.cParams,
cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,
customMem);
+ if (!cdict)
+ return NULL;
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
diff --git a/lib/zstd/zstd_compress_module.c b/lib/zstd/zstd_compress_module.c
index 04e1b5c01d9b..bd8784449b31 100644
--- a/lib/zstd/zstd_compress_module.c
+++ b/lib/zstd/zstd_compress_module.c
@@ -66,6 +66,12 @@ int zstd_max_clevel(void)
}
EXPORT_SYMBOL(zstd_max_clevel);
+int zstd_default_clevel(void)
+{
+ return ZSTD_defaultCLevel();
+}
+EXPORT_SYMBOL(zstd_default_clevel);
+
size_t zstd_compress_bound(size_t src_size)
{
return ZSTD_compressBound(src_size);
@@ -79,6 +85,13 @@ zstd_parameters zstd_get_params(int level,
}
EXPORT_SYMBOL(zstd_get_params);
+zstd_compression_parameters zstd_get_cparams(int level,
+ unsigned long long estimated_src_size, size_t dict_size)
+{
+ return ZSTD_getCParams(level, estimated_src_size, dict_size);
+}
+EXPORT_SYMBOL(zstd_get_cparams);
+
size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
{
return ZSTD_estimateCCtxSize_usingCParams(*cparams);
@@ -93,6 +106,33 @@ zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
}
EXPORT_SYMBOL(zstd_init_cctx);
+zstd_cctx *zstd_create_cctx_advanced(zstd_custom_mem custom_mem)
+{
+ return ZSTD_createCCtx_advanced(custom_mem);
+}
+EXPORT_SYMBOL(zstd_create_cctx_advanced);
+
+size_t zstd_free_cctx(zstd_cctx *cctx)
+{
+ return ZSTD_freeCCtx(cctx);
+}
+EXPORT_SYMBOL(zstd_free_cctx);
+
+zstd_cdict *zstd_create_cdict_byreference(const void *dict, size_t dict_size,
+ zstd_compression_parameters cparams,
+ zstd_custom_mem custom_mem)
+{
+ return ZSTD_createCDict_advanced(dict, dict_size, ZSTD_dlm_byRef,
+ ZSTD_dct_auto, cparams, custom_mem);
+}
+EXPORT_SYMBOL(zstd_create_cdict_byreference);
+
+size_t zstd_free_cdict(zstd_cdict *cdict)
+{
+ return ZSTD_freeCDict(cdict);
+}
+EXPORT_SYMBOL(zstd_free_cdict);
+
size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size, const zstd_parameters *parameters)
{
@@ -101,6 +141,15 @@ size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
}
EXPORT_SYMBOL(zstd_compress_cctx);
+size_t zstd_compress_using_cdict(zstd_cctx *cctx, void *dst,
+ size_t dst_capacity, const void *src, size_t src_size,
+ const ZSTD_CDict *cdict)
+{
+ return ZSTD_compress_usingCDict(cctx, dst, dst_capacity,
+ src, src_size, cdict);
+}
+EXPORT_SYMBOL(zstd_compress_using_cdict);
+
size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams)
{
return ZSTD_estimateCStreamSize_usingCParams(*cparams);
diff --git a/lib/zstd/zstd_decompress_module.c b/lib/zstd/zstd_decompress_module.c
index f4ed952ed485..469fc3059be0 100644
--- a/lib/zstd/zstd_decompress_module.c
+++ b/lib/zstd/zstd_decompress_module.c
@@ -44,6 +44,33 @@ size_t zstd_dctx_workspace_bound(void)
}
EXPORT_SYMBOL(zstd_dctx_workspace_bound);
+zstd_dctx *zstd_create_dctx_advanced(zstd_custom_mem custom_mem)
+{
+ return ZSTD_createDCtx_advanced(custom_mem);
+}
+EXPORT_SYMBOL(zstd_create_dctx_advanced);
+
+size_t zstd_free_dctx(zstd_dctx *dctx)
+{
+ return ZSTD_freeDCtx(dctx);
+}
+EXPORT_SYMBOL(zstd_free_dctx);
+
+zstd_ddict *zstd_create_ddict_byreference(const void *dict, size_t dict_size,
+ zstd_custom_mem custom_mem)
+{
+ return ZSTD_createDDict_advanced(dict, dict_size, ZSTD_dlm_byRef,
+ ZSTD_dct_auto, custom_mem);
+
+}
+EXPORT_SYMBOL(zstd_create_ddict_byreference);
+
+size_t zstd_free_ddict(zstd_ddict *ddict)
+{
+ return ZSTD_freeDDict(ddict);
+}
+EXPORT_SYMBOL(zstd_free_ddict);
+
zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size)
{
if (workspace == NULL)
@@ -59,6 +86,15 @@ size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
}
EXPORT_SYMBOL(zstd_decompress_dctx);
+size_t zstd_decompress_using_ddict(zstd_dctx *dctx,
+ void *dst, size_t dst_capacity, const void* src, size_t src_size,
+ const zstd_ddict* ddict)
+{
+ return ZSTD_decompress_usingDDict(dctx, dst, dst_capacity, src,
+ src_size, ddict);
+}
+EXPORT_SYMBOL(zstd_decompress_using_ddict);
+
size_t zstd_dstream_workspace_bound(size_t max_window_size)
{
return ZSTD_estimateDStreamSize(max_window_size);
diff --git a/mm/Kconfig b/mm/Kconfig
index b72e7d040f78..09aebca1cae3 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -128,7 +128,7 @@ config ZSWAP_COMPRESSOR_DEFAULT
choice
prompt "Default allocator"
depends on ZSWAP
- default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if HAVE_ZSMALLOC
+ default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if MMU
default ZSWAP_ZPOOL_DEFAULT_ZBUD
help
Selects the default allocator for the compressed cache for
@@ -146,15 +146,17 @@ config ZSWAP_ZPOOL_DEFAULT_ZBUD
help
Use the zbud allocator as the default allocator.
-config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
- bool "z3fold"
- select Z3FOLD
+config ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
+ bool "z3foldi (DEPRECATED)"
+ select Z3FOLD_DEPRECATED
help
Use the z3fold allocator as the default allocator.
+ Deprecated and scheduled for removal in a few cycles,
+ see CONFIG_Z3FOLD_DEPRECATED.
+
config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
bool "zsmalloc"
- depends on HAVE_ZSMALLOC
select ZSMALLOC
help
Use the zsmalloc allocator as the default allocator.
@@ -164,7 +166,7 @@ config ZSWAP_ZPOOL_DEFAULT
string
depends on ZSWAP
default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
- default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD
+ default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
default ""
@@ -178,24 +180,29 @@ config ZBUD
deterministic reclaim properties that make it preferable to a higher
density approach when reclaim will be used.
-config Z3FOLD
- tristate "3:1 compression allocator (z3fold)"
+config Z3FOLD_DEPRECATED
+ tristate "3:1 compression allocator (z3fold) (DEPRECATED)"
depends on ZSWAP
help
+ Deprecated and scheduled for removal in a few cycles. If you have
+ a good reason for using Z3FOLD over ZSMALLOC, please contact
+ linux-mm@kvack.org and the zswap maintainers.
+
A special purpose allocator for storing compressed pages.
It is designed to store up to three compressed pages per physical
page. It is a ZBUD derivative so the simplicity and determinism are
still there.
-config HAVE_ZSMALLOC
- def_bool y
- depends on MMU
- depends on PAGE_SIZE_LESS_THAN_256KB # we want <= 64 KiB
+config Z3FOLD
+ tristate
+ default y if Z3FOLD_DEPRECATED=y
+ default m if Z3FOLD_DEPRECATED=m
+ depends on Z3FOLD_DEPRECATED
config ZSMALLOC
tristate
- prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
- depends on HAVE_ZSMALLOC
+ prompt "N:1 compression allocator (zsmalloc)" if (ZSWAP || ZRAM)
+ depends on MMU
help
zsmalloc is a slab-based memory allocator designed to store
pages of various compression levels efficiently. It achieves
@@ -585,17 +592,21 @@ config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
# at the same time (e.g. copy_page_range()).
# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
#
-config SPLIT_PTLOCK_CPUS
- int
- default "999999" if !MMU
- default "999999" if ARM && !CPU_CACHE_VIPT
- default "999999" if PARISC && !PA20
- default "999999" if SPARC32
- default "4"
+config SPLIT_PTE_PTLOCKS
+ def_bool y
+ depends on MMU
+ depends on NR_CPUS >= 4
+ depends on !ARM || CPU_CACHE_VIPT
+ depends on !PARISC || PA20
+ depends on !SPARC32
config ARCH_ENABLE_SPLIT_PMD_PTLOCK
bool
+config SPLIT_PMD_PTLOCKS
+ def_bool y
+ depends on SPLIT_PTE_PTLOCKS && ARCH_ENABLE_SPLIT_PMD_PTLOCK
+
#
# support for memory balloon
config MEMORY_BALLOON
@@ -877,6 +888,19 @@ endif # TRANSPARENT_HUGEPAGE
config PGTABLE_HAS_HUGE_LEAVES
def_bool TRANSPARENT_HUGEPAGE || HUGETLB_PAGE
+# TODO: Allow to be enabled without THP
+config ARCH_SUPPORTS_HUGE_PFNMAP
+ def_bool n
+ depends on TRANSPARENT_HUGEPAGE
+
+config ARCH_SUPPORTS_PMD_PFNMAP
+ def_bool y
+ depends on ARCH_SUPPORTS_HUGE_PFNMAP && HAVE_ARCH_TRANSPARENT_HUGEPAGE
+
+config ARCH_SUPPORTS_PUD_PFNMAP
+ def_bool y
+ depends on ARCH_SUPPORTS_HUGE_PFNMAP && HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+
#
# UP and nommu archs use km based percpu allocator
#
@@ -1081,13 +1105,10 @@ config ARCH_USES_HIGH_VMA_FLAGS
config ARCH_HAS_PKEYS
bool
-config ARCH_USES_PG_ARCH_X
+config ARCH_USES_PG_ARCH_2
+ bool
+config ARCH_USES_PG_ARCH_3
bool
- help
- Enable the definition of PG_arch_x page flags with x > 1. Only
- suitable for 64-bit architectures with CONFIG_FLATMEM or
- CONFIG_SPARSEMEM_VMEMMAP enabled, otherwise there may not be
- enough room for additional bits in page->flags.
config VM_EVENT_COUNTERS
default y
@@ -1263,6 +1284,17 @@ config IOMMU_MM_DATA
config EXECMEM
bool
+config NUMA_MEMBLKS
+ bool
+
+config NUMA_EMU
+ bool "NUMA emulation"
+ depends on NUMA_MEMBLKS
+ help
+ Enable NUMA emulation. A flat machine will be split
+ into virtual nodes when booted with "numa=fake=N", where N is the
+ number of nodes. This is only useful for debugging.
+
source "mm/damon/Kconfig"
endmenu
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index afc72fde0f03..41a58536531d 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -70,6 +70,38 @@ config SLUB_DEBUG_ON
off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying
"slab_debug=-".
+config SLUB_RCU_DEBUG
+ bool "Enable UAF detection in TYPESAFE_BY_RCU caches (for KASAN)"
+ depends on SLUB_DEBUG
+ # SLUB_RCU_DEBUG should build fine without KASAN, but is currently useless
+ # without KASAN, so mark it as a dependency of KASAN for now.
+ depends on KASAN
+ default KASAN_GENERIC || KASAN_SW_TAGS
+ help
+ Make SLAB_TYPESAFE_BY_RCU caches behave approximately as if the cache
+ was not marked as SLAB_TYPESAFE_BY_RCU and every caller used
+ kfree_rcu() instead.
+
+ This is intended for use in combination with KASAN, to enable KASAN to
+ detect use-after-free accesses in such caches.
+ (KFENCE is able to do that independent of this flag.)
+
+ This might degrade performance.
+ Unfortunately this also prevents a very specific bug pattern from
+ triggering (insufficient checks against an object being recycled
+ within the RCU grace period); so this option can be turned off even on
+ KASAN builds, in case you want to test for such a bug.
+
+ If you're using this for testing bugs / fuzzing and care about
+ catching all the bugs WAY more than performance, you might want to
+ also turn on CONFIG_RCU_STRICT_GRACE_PERIOD.
+
+ WARNING:
+ This is designed as a debugging feature, not a security feature.
+ Objects are sometimes recycled without RCU delay under memory pressure.
+
+ If unsure, say N.
+
config PAGE_OWNER
bool "Track page owner"
depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
diff --git a/mm/Makefile b/mm/Makefile
index d2915f8c9dc0..d5639b036166 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -37,7 +37,7 @@ mmu-y := nommu.o
mmu-$(CONFIG_MMU) := highmem.o memory.o mincore.o \
mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
msync.o page_vma_mapped.o pagewalk.o \
- pgtable-generic.o rmap.o vmalloc.o
+ pgtable-generic.o rmap.o vmalloc.o vma.o
ifdef CONFIG_CROSS_MEMORY_ATTACH
@@ -53,7 +53,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
readahead.o swap.o truncate.o vmscan.o shrinker.o \
shmem.o util.o mmzone.o vmstat.o backing-dev.o \
mm_init.o percpu.o slab_common.o \
- compaction.o show_mem.o shmem_quota.o\
+ compaction.o show_mem.o \
interval_tree.o list_lru.o workingset.o \
debug.o gup.o mmap_lock.o $(mmu-y)
@@ -117,6 +117,9 @@ obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
obj-$(CONFIG_Z3FOLD) += z3fold.o
obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o
obj-$(CONFIG_CMA) += cma.o
+obj-$(CONFIG_NUMA) += numa.o
+obj-$(CONFIG_NUMA_MEMBLKS) += numa_memblks.o
+obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o
obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o
obj-$(CONFIG_PAGE_TABLE_CHECK) += page_table_check.o
@@ -141,3 +144,4 @@ obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o
obj-$(CONFIG_GENERIC_IOREMAP) += ioremap.o
obj-$(CONFIG_SHRINKER_DEBUG) += shrinker_debug.o
obj-$(CONFIG_EXECMEM) += execmem.o
+obj-$(CONFIG_TMPFS_QUOTA) += shmem_quota.o
diff --git a/mm/cma.c b/mm/cma.c
index 3e9724716bad..2d9fae939283 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -202,7 +202,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
cma->order_per_bit = order_per_bit;
*res_cma = cma;
cma_area_count++;
- totalcma_pages += (size / PAGE_SIZE);
+ totalcma_pages += cma->count;
return 0;
}
@@ -403,18 +403,8 @@ static void cma_debug_show_areas(struct cma *cma)
spin_unlock_irq(&cma->lock);
}
-/**
- * cma_alloc() - allocate pages from contiguous area
- * @cma: Contiguous memory region for which the allocation is performed.
- * @count: Requested number of pages.
- * @align: Requested alignment of pages (in PAGE_SIZE order).
- * @no_warn: Avoid printing message about failed allocation
- *
- * This function allocates part of contiguous memory on specific
- * contiguous memory area.
- */
-struct page *cma_alloc(struct cma *cma, unsigned long count,
- unsigned int align, bool no_warn)
+static struct page *__cma_alloc(struct cma *cma, unsigned long count,
+ unsigned int align, gfp_t gfp)
{
unsigned long mask, offset;
unsigned long pfn = -1;
@@ -463,8 +453,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
mutex_lock(&cma_mutex);
- ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
- GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
+ ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp);
mutex_unlock(&cma_mutex);
if (ret == 0) {
page = pfn_to_page(pfn);
@@ -494,7 +483,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
page_kasan_tag_reset(nth_page(page, i));
}
- if (ret && !no_warn) {
+ if (ret && !(gfp & __GFP_NOWARN)) {
pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
__func__, cma->name, count, ret);
cma_debug_show_areas(cma);
@@ -513,6 +502,34 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
return page;
}
+/**
+ * cma_alloc() - allocate pages from contiguous area
+ * @cma: Contiguous memory region for which the allocation is performed.
+ * @count: Requested number of pages.
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
+ * @no_warn: Avoid printing message about failed allocation
+ *
+ * This function allocates part of contiguous memory on specific
+ * contiguous memory area.
+ */
+struct page *cma_alloc(struct cma *cma, unsigned long count,
+ unsigned int align, bool no_warn)
+{
+ return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
+}
+
+struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
+{
+ struct page *page;
+
+ if (WARN_ON(!order || !(gfp & __GFP_COMP)))
+ return NULL;
+
+ page = __cma_alloc(cma, 1 << order, order, gfp);
+
+ return page ? page_folio(page) : NULL;
+}
+
bool cma_pages_valid(struct cma *cma, const struct page *pages,
unsigned long count)
{
@@ -564,6 +581,14 @@ bool cma_release(struct cma *cma, const struct page *pages,
return true;
}
+bool cma_free_folio(struct cma *cma, const struct folio *folio)
+{
+ if (WARN_ON(!folio_test_large(folio)))
+ return false;
+
+ return cma_release(cma, &folio->page, folio_nr_pages(folio));
+}
+
int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
{
int i;
diff --git a/mm/compaction.c b/mm/compaction.c
index eb95e9b435d0..a2b16b08cbbf 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -23,6 +23,7 @@
#include <linux/freezer.h>
#include <linux/page_owner.h>
#include <linux/psi.h>
+#include <linux/cpuset.h>
#include "internal.h"
#ifdef CONFIG_COMPACTION
@@ -86,33 +87,6 @@ static struct page *mark_allocated_noprof(struct page *page, unsigned int order,
}
#define mark_allocated(...) alloc_hooks(mark_allocated_noprof(__VA_ARGS__))
-static void split_map_pages(struct list_head *freepages)
-{
- unsigned int i, order;
- struct page *page, *next;
- LIST_HEAD(tmp_list);
-
- for (order = 0; order < NR_PAGE_ORDERS; order++) {
- list_for_each_entry_safe(page, next, &freepages[order], lru) {
- unsigned int nr_pages;
-
- list_del(&page->lru);
-
- nr_pages = 1 << order;
-
- mark_allocated(page, order, __GFP_MOVABLE);
- if (order)
- split_page(page, order);
-
- for (i = 0; i < nr_pages; i++) {
- list_add(&page->lru, &tmp_list);
- page++;
- }
- }
- list_splice_init(&tmp_list, &freepages[0]);
- }
-}
-
static unsigned long release_free_list(struct list_head *freepages)
{
int order;
@@ -742,11 +716,11 @@ isolate_fail:
*
* Non-free pages, invalid PFNs, or zone boundaries within the
* [start_pfn, end_pfn) range are considered errors, cause function to
- * undo its actions and return zero.
+ * undo its actions and return zero. cc->freepages[] are empty.
*
* Otherwise, function returns one-past-the-last PFN of isolated page
* (which may be greater then end_pfn if end fell in a middle of
- * a free page).
+ * a free page). cc->freepages[] contain free pages isolated.
*/
unsigned long
isolate_freepages_range(struct compact_control *cc,
@@ -754,10 +728,9 @@ isolate_freepages_range(struct compact_control *cc,
{
unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
int order;
- struct list_head tmp_freepages[NR_PAGE_ORDERS];
for (order = 0; order < NR_PAGE_ORDERS; order++)
- INIT_LIST_HEAD(&tmp_freepages[order]);
+ INIT_LIST_HEAD(&cc->freepages[order]);
pfn = start_pfn;
block_start_pfn = pageblock_start_pfn(pfn);
@@ -788,7 +761,7 @@ isolate_freepages_range(struct compact_control *cc,
break;
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
- block_end_pfn, tmp_freepages, 0, true);
+ block_end_pfn, cc->freepages, 0, true);
/*
* In strict mode, isolate_freepages_block() returns 0 if
@@ -807,13 +780,10 @@ isolate_freepages_range(struct compact_control *cc,
if (pfn < end_pfn) {
/* Loop terminated early, cleanup. */
- release_free_list(tmp_freepages);
+ release_free_list(cc->freepages);
return 0;
}
- /* __isolate_free_page() does not map the pages */
- split_map_pages(tmp_freepages);
-
/* We don't use freelists for anything. */
return pfn;
}
@@ -2853,6 +2823,11 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
ac->highest_zoneidx, ac->nodemask) {
enum compact_result status;
+ if (cpusets_enabled() &&
+ (alloc_flags & ALLOC_CPUSET) &&
+ !__cpuset_zone_allowed(zone, gfp_mask))
+ continue;
+
if (prio > MIN_COMPACT_PRIORITY
&& compaction_deferred(zone, order)) {
rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 7a87628b76ab..a83f3b736d51 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -552,7 +552,13 @@ static unsigned int damon_accesses_bp_to_nr_accesses(
return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
}
-/* convert nr_accesses to access ratio in bp (per 10,000) */
+/*
+ * Convert nr_accesses to access ratio in bp (per 10,000).
+ *
+ * Callers should ensure attrs.aggr_interval is not zero, like
+ * damon_update_monitoring_results() does . Otherwise, divide-by-zero would
+ * happen.
+ */
static unsigned int damon_nr_accesses_to_accesses_bp(
unsigned int nr_accesses, struct damon_attrs *attrs)
{
@@ -1582,13 +1588,16 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
return;
/* Fill up the score histogram */
- memset(quota->histogram, 0, sizeof(quota->histogram));
+ memset(c->regions_score_histogram, 0,
+ sizeof(*c->regions_score_histogram) *
+ (DAMOS_MAX_SCORE + 1));
damon_for_each_target(t, c) {
damon_for_each_region(r, t) {
if (!__damos_valid_target(r, s))
continue;
score = c->ops.get_scheme_score(c, t, r, s);
- quota->histogram[score] += damon_sz_region(r);
+ c->regions_score_histogram[score] +=
+ damon_sz_region(r);
if (score > max_score)
max_score = score;
}
@@ -1596,7 +1605,7 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
/* Set the min score limit */
for (cumulated_sz = 0, score = max_score; ; score--) {
- cumulated_sz += quota->histogram[score];
+ cumulated_sz += c->regions_score_histogram[score];
if (cumulated_sz >= quota->esz || !score)
break;
}
@@ -1957,6 +1966,10 @@ static int kdamond_fn(void *data)
ctx->ops.init(ctx);
if (ctx->callback.before_start && ctx->callback.before_start(ctx))
goto done;
+ ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
+ sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
+ if (!ctx->regions_score_histogram)
+ goto done;
sz_limit = damon_region_sz_limit(ctx);
@@ -2034,6 +2047,7 @@ done:
ctx->callback.before_terminate(ctx);
if (ctx->ops.cleanup)
ctx->ops.cleanup(ctx);
+ kfree(ctx->regions_score_histogram);
pr_debug("kdamond (%d) finishes\n", current->pid);
mutex_lock(&ctx->kdamond_lock);
@@ -2205,4 +2219,4 @@ static int __init damon_init(void)
subsys_initcall(damon_init);
-#include "core-test.h"
+#include "tests/core-kunit.h"
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index 51a6f1cac385..b4213bc47e44 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -1145,4 +1145,4 @@ out:
module_init(damon_dbgfs_init);
-#include "dbgfs-test.h"
+#include "tests/dbgfs-kunit.h"
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index cffc755e7775..58145d59881d 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1882,4 +1882,4 @@ out:
}
subsys_initcall(damon_sysfs_init);
-#include "sysfs-test.h"
+#include "tests/sysfs-kunit.h"
diff --git a/mm/damon/tests/.kunitconfig b/mm/damon/tests/.kunitconfig
new file mode 100644
index 000000000000..a73be044fc9b
--- /dev/null
+++ b/mm/damon/tests/.kunitconfig
@@ -0,0 +1,22 @@
+# for DAMON core
+CONFIG_KUNIT=y
+CONFIG_DAMON=y
+CONFIG_DAMON_KUNIT_TEST=y
+
+# for DAMON vaddr ops
+CONFIG_MMU=y
+CONFIG_PAGE_IDLE_FLAG=y
+CONFIG_DAMON_VADDR=y
+CONFIG_DAMON_VADDR_KUNIT_TEST=y
+
+# for DAMON sysfs interface
+CONFIG_SYSFS=y
+CONFIG_DAMON_SYSFS=y
+CONFIG_DAMON_SYSFS_KUNIT_TEST=y
+
+# for DAMON debugfs interface
+CONFIG_DEBUG_FS=y
+CONFIG_DAMON_PADDR=y
+CONFIG_DAMON_DBGFS_DEPRECATED=y
+CONFIG_DAMON_DBGFS=y
+CONFIG_DAMON_DBGFS_KUNIT_TEST=y
diff --git a/mm/damon/core-test.h b/mm/damon/tests/core-kunit.h
index 0cee634f3544..cf22e09a3507 100644
--- a/mm/damon/core-test.h
+++ b/mm/damon/tests/core-kunit.h
@@ -246,16 +246,20 @@ static void damon_test_split_regions_of(struct kunit *test)
static void damon_test_ops_registration(struct kunit *test)
{
struct damon_ctx *c = damon_new_ctx();
- struct damon_operations ops, bak;
+ struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak;
+ bool need_cleanup = false;
+
+ /* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */
+ if (!damon_is_registered_ops(DAMON_OPS_VADDR)) {
+ bak.id = DAMON_OPS_VADDR;
+ KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0);
+ need_cleanup = true;
+ }
- /* DAMON_OPS_{V,P}ADDR are registered on subsys_initcall */
+ /* DAMON_OPS_VADDR is ensured to be registered */
KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0);
- KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_PADDR), 0);
/* Double-registration is prohibited */
- ops.id = DAMON_OPS_VADDR;
- KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
- ops.id = DAMON_OPS_PADDR;
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
/* Unknown ops id cannot be registered */
@@ -278,6 +282,13 @@ static void damon_test_ops_registration(struct kunit *test)
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
damon_destroy_ctx(c);
+
+ if (need_cleanup) {
+ mutex_lock(&damon_ops_lock);
+ damon_registered_ops[DAMON_OPS_VADDR] =
+ (struct damon_operations){};
+ mutex_unlock(&damon_ops_lock);
+ }
}
static void damon_test_set_regions(struct kunit *test)
@@ -309,6 +320,18 @@ static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
.aggr_interval = ((unsigned long)UINT_MAX + 1) * 10
};
+ /*
+ * In some cases such as 32bit architectures where UINT_MAX is
+ * ULONG_MAX, attrs.aggr_interval becomes zero. Calling
+ * damon_nr_accesses_to_accesses_bp() in the case will cause
+ * divide-by-zero. Such case is prohibited in normal execution since
+ * the caution is documented on the comment for the function, and
+ * damon_update_monitoring_results() does the check. Skip the test in
+ * the case.
+ */
+ if (!attrs.aggr_interval)
+ kunit_skip(test, "aggr_interval is zero.");
+
KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0);
}
diff --git a/mm/damon/dbgfs-test.h b/mm/damon/tests/dbgfs-kunit.h
index 2d85217f5ba4..d2ecfcc8db86 100644
--- a/mm/damon/dbgfs-test.h
+++ b/mm/damon/tests/dbgfs-kunit.h
@@ -73,6 +73,11 @@ static void damon_dbgfs_test_set_targets(struct kunit *test)
struct damon_ctx *ctx = dbgfs_new_ctx();
char buf[64];
+ if (!damon_is_registered_ops(DAMON_OPS_PADDR)) {
+ dbgfs_destroy_ctx(ctx);
+ kunit_skip(test, "PADDR not registered");
+ }
+
/* Make DAMON consider target has no pid */
damon_select_ops(ctx, DAMON_OPS_PADDR);
@@ -111,6 +116,11 @@ static void damon_dbgfs_test_set_init_regions(struct kunit *test)
int i, rc;
char buf[256];
+ if (!damon_is_registered_ops(DAMON_OPS_PADDR)) {
+ damon_destroy_ctx(ctx);
+ kunit_skip(test, "PADDR not registered");
+ }
+
damon_select_ops(ctx, DAMON_OPS_PADDR);
dbgfs_set_targets(ctx, 3, NULL);
diff --git a/mm/damon/sysfs-test.h b/mm/damon/tests/sysfs-kunit.h
index 1c9b596057a7..1c9b596057a7 100644
--- a/mm/damon/sysfs-test.h
+++ b/mm/damon/tests/sysfs-kunit.h
diff --git a/mm/damon/vaddr-test.h b/mm/damon/tests/vaddr-kunit.h
index 83626483f82b..a339d117150f 100644
--- a/mm/damon/vaddr-test.h
+++ b/mm/damon/tests/vaddr-kunit.h
@@ -77,7 +77,7 @@ static void damon_test_three_regions_in_vmas(struct kunit *test)
(struct vm_area_struct) {.vm_start = 307, .vm_end = 330},
};
- mt_init_flags(&mm.mm_mt, MM_MT_FLAGS);
+ mt_init_flags(&mm.mm_mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_USE_RCU);
if (__link_vmas(&mm.mm_mt, vmas, ARRAY_SIZE(vmas)))
kunit_skip(test, "Failed to create VMA tree");
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 58829baf8b5d..08cfd22b5249 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -126,6 +126,7 @@ static int __damon_va_three_regions(struct mm_struct *mm,
* If this is too slow, it can be optimised to examine the maple
* tree gaps.
*/
+ rcu_read_lock();
for_each_vma(vmi, vma) {
unsigned long gap;
@@ -146,6 +147,7 @@ static int __damon_va_three_regions(struct mm_struct *mm,
next:
prev = vma;
}
+ rcu_read_unlock();
if (!sz_range(&second_gap) || !sz_range(&first_gap))
return -EINVAL;
@@ -730,4 +732,4 @@ static int __init damon_va_initcall(void)
subsys_initcall(damon_va_initcall);
-#include "vaddr-test.h"
+#include "tests/vaddr-kunit.h"
diff --git a/mm/debug.c b/mm/debug.c
index 69e524c3e601..aa57d3ffd4ed 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -36,11 +36,6 @@ const struct trace_print_flags pageflag_names[] = {
{0, NULL}
};
-const struct trace_print_flags pagetype_names[] = {
- __def_pagetype_names,
- {0, NULL}
-};
-
const struct trace_print_flags gfpflag_names[] = {
__def_gfpflag_names,
{0, NULL}
@@ -51,6 +46,27 @@ const struct trace_print_flags vmaflag_names[] = {
{0, NULL}
};
+#define DEF_PAGETYPE_NAME(_name) [PGTY_##_name - 0xf0] = __stringify(_name)
+
+static const char *page_type_names[] = {
+ DEF_PAGETYPE_NAME(slab),
+ DEF_PAGETYPE_NAME(hugetlb),
+ DEF_PAGETYPE_NAME(offline),
+ DEF_PAGETYPE_NAME(guard),
+ DEF_PAGETYPE_NAME(table),
+ DEF_PAGETYPE_NAME(buddy),
+ DEF_PAGETYPE_NAME(unaccepted),
+};
+
+static const char *page_type_name(unsigned int page_type)
+{
+ unsigned i = (page_type >> 24) - 0xf0;
+
+ if (i >= ARRAY_SIZE(page_type_names))
+ return "unknown";
+ return page_type_names[i];
+}
+
static void __dump_folio(struct folio *folio, struct page *page,
unsigned long pfn, unsigned long idx)
{
@@ -58,7 +74,7 @@ static void __dump_folio(struct folio *folio, struct page *page,
int mapcount = atomic_read(&page->_mapcount);
char *type = "";
- mapcount = page_type_has_type(mapcount) ? 0 : mapcount + 1;
+ mapcount = page_mapcount_is_type(mapcount) ? 0 : mapcount + 1;
pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
folio_ref_count(folio), mapcount, mapping,
folio->index + idx, pfn);
@@ -92,7 +108,8 @@ static void __dump_folio(struct folio *folio, struct page *page,
pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
if (page_has_type(&folio->page))
- pr_warn("page_type: %pGt\n", &folio->page.page_type);
+ pr_warn("page_type: %x(%s)\n", folio->page.page_type >> 24,
+ page_type_name(folio->page.page_type));
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
sizeof(unsigned long), page,
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index e4969fb54da3..bc748f700a9e 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -231,10 +231,10 @@ static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
flush_dcache_page(page);
pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(pmd_write(pmd));
pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(!pmd_none(pmd));
pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
@@ -245,10 +245,10 @@ static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
pmd = pmd_mkwrite(pmd, args->vma);
pmd = pmd_mkdirty(pmd);
pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(!pmd_none(pmd));
pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
@@ -256,7 +256,7 @@ static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
flush_dcache_page(page);
pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(pmd_young(pmd));
/* Clear the pte entries */
@@ -357,12 +357,12 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
set_pud_at(args->mm, vaddr, args->pudp, pud);
flush_dcache_page(page);
pudp_set_wrprotect(args->mm, vaddr, args->pudp);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(pud_write(pud));
#ifndef __PAGETABLE_PMD_FOLDED
pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(!pud_none(pud));
#endif /* __PAGETABLE_PMD_FOLDED */
pud = pfn_pud(args->pud_pfn, args->page_prot);
@@ -374,12 +374,12 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
pud = pud_mkwrite(pud);
pud = pud_mkdirty(pud);
pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
#ifndef __PAGETABLE_PMD_FOLDED
pudp_huge_get_and_clear_full(args->vma, vaddr, args->pudp, 1);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(!pud_none(pud));
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -389,7 +389,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
set_pud_at(args->mm, vaddr, args->pudp, pud);
flush_dcache_page(page);
pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(pud_young(pud));
pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
@@ -441,7 +441,7 @@ static void __init pmd_huge_tests(struct pgtable_debug_args *args)
WRITE_ONCE(*args->pmdp, __pmd(0));
WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
WARN_ON(!pmd_clear_huge(args->pmdp));
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(!pmd_none(pmd));
}
@@ -461,7 +461,7 @@ static void __init pud_huge_tests(struct pgtable_debug_args *args)
WRITE_ONCE(*args->pudp, __pud(0));
WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
WARN_ON(!pud_clear_huge(args->pudp));
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(!pud_none(pud));
}
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
@@ -490,7 +490,7 @@ static void __init pgd_basic_tests(struct pgtable_debug_args *args)
#ifndef __PAGETABLE_PUD_FOLDED
static void __init pud_clear_tests(struct pgtable_debug_args *args)
{
- pud_t pud = READ_ONCE(*args->pudp);
+ pud_t pud = pudp_get(args->pudp);
if (mm_pmd_folded(args->mm))
return;
@@ -498,7 +498,7 @@ static void __init pud_clear_tests(struct pgtable_debug_args *args)
pr_debug("Validating PUD clear\n");
WARN_ON(pud_none(pud));
pud_clear(args->pudp);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(!pud_none(pud));
}
@@ -515,7 +515,7 @@ static void __init pud_populate_tests(struct pgtable_debug_args *args)
* Hence this must not qualify as pud_bad().
*/
pud_populate(args->mm, args->pudp, args->start_pmdp);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(pud_bad(pud));
}
#else /* !__PAGETABLE_PUD_FOLDED */
@@ -526,7 +526,7 @@ static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
#ifndef __PAGETABLE_P4D_FOLDED
static void __init p4d_clear_tests(struct pgtable_debug_args *args)
{
- p4d_t p4d = READ_ONCE(*args->p4dp);
+ p4d_t p4d = p4dp_get(args->p4dp);
if (mm_pud_folded(args->mm))
return;
@@ -534,7 +534,7 @@ static void __init p4d_clear_tests(struct pgtable_debug_args *args)
pr_debug("Validating P4D clear\n");
WARN_ON(p4d_none(p4d));
p4d_clear(args->p4dp);
- p4d = READ_ONCE(*args->p4dp);
+ p4d = p4dp_get(args->p4dp);
WARN_ON(!p4d_none(p4d));
}
@@ -553,13 +553,13 @@ static void __init p4d_populate_tests(struct pgtable_debug_args *args)
pud_clear(args->pudp);
p4d_clear(args->p4dp);
p4d_populate(args->mm, args->p4dp, args->start_pudp);
- p4d = READ_ONCE(*args->p4dp);
+ p4d = p4dp_get(args->p4dp);
WARN_ON(p4d_bad(p4d));
}
static void __init pgd_clear_tests(struct pgtable_debug_args *args)
{
- pgd_t pgd = READ_ONCE(*(args->pgdp));
+ pgd_t pgd = pgdp_get(args->pgdp);
if (mm_p4d_folded(args->mm))
return;
@@ -567,7 +567,7 @@ static void __init pgd_clear_tests(struct pgtable_debug_args *args)
pr_debug("Validating PGD clear\n");
WARN_ON(pgd_none(pgd));
pgd_clear(args->pgdp);
- pgd = READ_ONCE(*args->pgdp);
+ pgd = pgdp_get(args->pgdp);
WARN_ON(!pgd_none(pgd));
}
@@ -586,7 +586,7 @@ static void __init pgd_populate_tests(struct pgtable_debug_args *args)
p4d_clear(args->p4dp);
pgd_clear(args->pgdp);
pgd_populate(args->mm, args->pgdp, args->start_p4dp);
- pgd = READ_ONCE(*args->pgdp);
+ pgd = pgdp_get(args->pgdp);
WARN_ON(pgd_bad(pgd));
}
#else /* !__PAGETABLE_P4D_FOLDED */
@@ -627,12 +627,12 @@ static void __init pte_clear_tests(struct pgtable_debug_args *args)
static void __init pmd_clear_tests(struct pgtable_debug_args *args)
{
- pmd_t pmd = READ_ONCE(*args->pmdp);
+ pmd_t pmd = pmdp_get(args->pmdp);
pr_debug("Validating PMD clear\n");
WARN_ON(pmd_none(pmd));
pmd_clear(args->pmdp);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(!pmd_none(pmd));
}
@@ -646,7 +646,7 @@ static void __init pmd_populate_tests(struct pgtable_debug_args *args)
* Hence this must not qualify as pmd_bad().
*/
pmd_populate(args->mm, args->pmdp, args->start_ptep);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(pmd_bad(pmd));
}
@@ -1251,7 +1251,7 @@ static int __init init_args(struct pgtable_debug_args *args)
ret = -ENOMEM;
goto error;
}
- args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp));
+ args->start_ptep = pmd_pgtable(pmdp_get(args->pmdp));
WARN_ON(!args->start_ptep);
init_fixed_pfns(args);
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 6c39d42f16dc..532dee205c6e 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -193,10 +193,10 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
struct fd f = fdget(fd);
int ret;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- ret = vfs_fadvise(f.file, offset, len, advice);
+ ret = vfs_fadvise(fd_file(f), offset, len, advice);
fdput(f);
return ret;
diff --git a/mm/fail_page_alloc.c b/mm/fail_page_alloc.c
index 532851ce5132..7647096170e9 100644
--- a/mm/fail_page_alloc.c
+++ b/mm/fail_page_alloc.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/error-injection.h>
#include <linux/mm.h>
diff --git a/mm/failslab.c b/mm/failslab.c
index af16c2ed578f..c3901b136498 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/fault-inject.h>
#include <linux/error-injection.h>
+#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include "slab.h"
diff --git a/mm/filemap.c b/mm/filemap.c
index 0ca9c1377b68..bbaed3dd5049 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -46,6 +46,7 @@
#include <linux/pipe_fs_i.h>
#include <linux/splice.h>
#include <linux/rcupdate_wait.h>
+#include <linux/sched/mm.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include "internal.h"
@@ -112,8 +113,8 @@
* ->swap_lock (try_to_unmap_one)
* ->private_lock (try_to_unmap_one)
* ->i_pages lock (try_to_unmap_one)
- * ->lruvec->lru_lock (follow_page->mark_page_accessed)
- * ->lruvec->lru_lock (check_pte_range->isolate_lru_page)
+ * ->lruvec->lru_lock (follow_page_mask->mark_page_accessed)
+ * ->lruvec->lru_lock (check_pte_range->folio_isolate_lru)
* ->private_lock (folio_remove_rmap_pte->set_page_dirty)
* ->i_pages lock (folio_remove_rmap_pte->set_page_dirty)
* bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty)
@@ -530,7 +531,6 @@ static void __filemap_fdatawait_range(struct address_space *mapping,
struct folio *folio = fbatch.folios[i];
folio_wait_writeback(folio);
- folio_clear_error(folio);
}
folio_batch_release(&fbatch);
cond_resched();
@@ -859,6 +859,8 @@ noinline int __filemap_add_folio(struct address_space *mapping,
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
+ VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping),
+ folio);
mapping_set_update(&xas, mapping);
VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
@@ -1919,8 +1921,10 @@ repeat:
folio_wait_stable(folio);
no_page:
if (!folio && (fgp_flags & FGP_CREAT)) {
- unsigned order = FGF_GET_ORDER(fgp_flags);
+ unsigned int min_order = mapping_min_folio_order(mapping);
+ unsigned int order = max(min_order, FGF_GET_ORDER(fgp_flags));
int err;
+ index = mapping_align_index(mapping, index);
if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
gfp |= __GFP_WRITE;
@@ -1933,10 +1937,8 @@ no_page:
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
fgp_flags |= FGP_LOCK;
- if (!mapping_large_folio_support(mapping))
- order = 0;
- if (order > MAX_PAGECACHE_ORDER)
- order = MAX_PAGECACHE_ORDER;
+ if (order > mapping_max_folio_order(mapping))
+ order = mapping_max_folio_order(mapping);
/* If we're not aligned, allocate a smaller folio */
if (index & ((1UL << order) - 1))
order = __ffs(index);
@@ -1945,7 +1947,7 @@ no_page:
gfp_t alloc_gfp = gfp;
err = -ENOMEM;
- if (order > 0)
+ if (order > min_order)
alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
folio = filemap_alloc_folio(alloc_gfp, order);
if (!folio)
@@ -1960,7 +1962,7 @@ no_page:
break;
folio_put(folio);
folio = NULL;
- } while (order-- > 0);
+ } while (order-- > min_order);
if (err == -EEXIST)
goto repeat;
@@ -2047,17 +2049,20 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
if (!folio_batch_add(fbatch, folio))
break;
}
- rcu_read_unlock();
if (folio_batch_count(fbatch)) {
- unsigned long nr = 1;
+ unsigned long nr;
int idx = folio_batch_count(fbatch) - 1;
folio = fbatch->folios[idx];
if (!xa_is_value(folio))
nr = folio_nr_pages(folio);
- *start = indices[idx] + nr;
+ else
+ nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]);
+ *start = round_down(indices[idx] + nr, nr);
}
+ rcu_read_unlock();
+
return folio_batch_count(fbatch);
}
@@ -2089,10 +2094,17 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
rcu_read_lock();
while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
+ unsigned long base;
+ unsigned long nr;
+
if (!xa_is_value(folio)) {
- if (folio->index < *start)
+ nr = folio_nr_pages(folio);
+ base = folio->index;
+ /* Omit large folio which begins before the start */
+ if (base < *start)
goto put;
- if (folio_next_index(folio) - 1 > end)
+ /* Omit large folio which extends beyond the end */
+ if (base + nr - 1 > end)
goto put;
if (!folio_trylock(folio))
goto put;
@@ -2101,7 +2113,19 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
goto unlock;
VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
folio);
+ } else {
+ nr = 1 << xas_get_order(&xas);
+ base = xas.xa_index & ~(nr - 1);
+ /* Omit order>0 value which begins before the start */
+ if (base < *start)
+ continue;
+ /* Omit order>0 value which extends beyond the end */
+ if (base + nr - 1 > end)
+ break;
}
+
+ /* Update start now so that last update is correct on return */
+ *start = base + nr;
indices[fbatch->nr] = xas.xa_index;
if (!folio_batch_add(fbatch, folio))
break;
@@ -2113,15 +2137,6 @@ put:
}
rcu_read_unlock();
- if (folio_batch_count(fbatch)) {
- unsigned long nr = 1;
- int idx = folio_batch_count(fbatch) - 1;
-
- folio = fbatch->folios[idx];
- if (!xa_is_value(folio))
- nr = folio_nr_pages(folio);
- *start = indices[idx] + nr;
- }
return folio_batch_count(fbatch);
}
@@ -2342,13 +2357,6 @@ static int filemap_read_folio(struct file *file, filler_t filler,
unsigned long pflags;
int error;
- /*
- * A previous I/O error may have been due to temporary failures,
- * eg. multipath errors. PG_error will be set again if read_folio
- * fails.
- */
- folio_clear_error(folio);
-
/* Start the actual read. The read will unlock the page. */
if (unlikely(workingset))
psi_memstall_enter(&pflags);
@@ -2449,13 +2457,15 @@ unlock_mapping:
}
static int filemap_create_folio(struct file *file,
- struct address_space *mapping, pgoff_t index,
+ struct address_space *mapping, loff_t pos,
struct folio_batch *fbatch)
{
struct folio *folio;
int error;
+ unsigned int min_order = mapping_min_folio_order(mapping);
+ pgoff_t index;
- folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
+ folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order);
if (!folio)
return -ENOMEM;
@@ -2473,6 +2483,7 @@ static int filemap_create_folio(struct file *file,
* well to keep locking rules simple.
*/
filemap_invalidate_lock_shared(mapping);
+ index = (pos >> (PAGE_SHIFT + min_order)) << min_order;
error = filemap_add_folio(mapping, folio, index,
mapping_gfp_constraint(mapping, GFP_KERNEL));
if (error == -EEXIST)
@@ -2514,6 +2525,7 @@ static int filemap_get_pages(struct kiocb *iocb, size_t count,
pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
pgoff_t last_index;
struct folio *folio;
+ unsigned int flags;
int err = 0;
/* "last_index" is the index of the page beyond the end of the read */
@@ -2526,15 +2538,18 @@ retry:
if (!folio_batch_count(fbatch)) {
if (iocb->ki_flags & IOCB_NOIO)
return -EAGAIN;
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ flags = memalloc_noio_save();
page_cache_sync_readahead(mapping, ra, filp, index,
last_index - index);
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ memalloc_noio_restore(flags);
filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
}
if (!folio_batch_count(fbatch)) {
if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
return -EAGAIN;
- err = filemap_create_folio(filp, mapping,
- iocb->ki_pos >> PAGE_SHIFT, fbatch);
+ err = filemap_create_folio(filp, mapping, iocb->ki_pos, fbatch);
if (err == AOP_TRUNCATED_PAGE)
goto retry;
return err;
@@ -2556,6 +2571,7 @@ retry:
goto err;
}
+ trace_mm_filemap_get_pages(mapping, index, last_index - 1);
return 0;
err:
if (err < 0)
@@ -2712,14 +2728,12 @@ int kiocb_write_and_wait(struct kiocb *iocb, size_t count)
}
EXPORT_SYMBOL_GPL(kiocb_write_and_wait);
-int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
+int filemap_invalidate_pages(struct address_space *mapping,
+ loff_t pos, loff_t end, bool nowait)
{
- struct address_space *mapping = iocb->ki_filp->f_mapping;
- loff_t pos = iocb->ki_pos;
- loff_t end = pos + count - 1;
int ret;
- if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (nowait) {
/* we could block if there are any pages in the range */
if (filemap_range_has_page(mapping, pos, end))
return -EAGAIN;
@@ -2738,6 +2752,15 @@ int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
end >> PAGE_SHIFT);
}
+
+int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
+{
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+
+ return filemap_invalidate_pages(mapping, iocb->ki_pos,
+ iocb->ki_pos + count - 1,
+ iocb->ki_flags & IOCB_NOWAIT);
+}
EXPORT_SYMBOL_GPL(kiocb_invalidate_pages);
/**
@@ -2989,7 +3012,7 @@ unlock:
static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
{
if (xa_is_value(folio))
- return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
+ return PAGE_SIZE << xas_get_order(xas);
return folio_size(folio);
}
@@ -3287,6 +3310,8 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
if (unlikely(index >= max_idx))
return VM_FAULT_SIGBUS;
+ trace_mm_filemap_fault(mapping, index);
+
/*
* Do we have something in the page cache already?
*/
@@ -3604,7 +3629,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
struct address_space *mapping = file->f_mapping;
- pgoff_t last_pgoff = start_pgoff;
+ pgoff_t file_end, last_pgoff = start_pgoff;
unsigned long addr;
XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct folio *folio;
@@ -3630,6 +3655,10 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
goto out;
}
+ file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;
+ if (end_pgoff > file_end)
+ end_pgoff = file_end;
+
folio_type = mm_counter_file(folio);
do {
unsigned long end;
@@ -3653,6 +3682,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
add_mm_counter(vma->vm_mm, folio_type, rss);
pte_unmap_unlock(vmf->pte, vmf->ptl);
+ trace_mm_filemap_map_pages(mapping, start_pgoff, end_pgoff);
out:
rcu_read_unlock();
@@ -3750,9 +3780,11 @@ static struct folio *do_read_cache_folio(struct address_space *mapping,
repeat:
folio = filemap_get_folio(mapping, index);
if (IS_ERR(folio)) {
- folio = filemap_alloc_folio(gfp, 0);
+ folio = filemap_alloc_folio(gfp,
+ mapping_min_folio_order(mapping));
if (!folio)
return ERR_PTR(-ENOMEM);
+ index = mapping_align_index(mapping, index);
err = filemap_add_folio(mapping, folio, index, gfp);
if (unlikely(err)) {
folio_put(folio);
@@ -3987,7 +4019,6 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
ssize_t written = 0;
do {
- struct page *page;
struct folio *folio;
size_t offset; /* Offset into folio */
size_t bytes; /* Bytes to write to folio */
@@ -4017,11 +4048,10 @@ retry:
}
status = a_ops->write_begin(file, mapping, pos, bytes,
- &page, &fsdata);
+ &folio, &fsdata);
if (unlikely(status < 0))
break;
- folio = page_folio(page);
offset = offset_in_folio(folio, pos);
if (bytes > folio_size(folio) - offset)
bytes = folio_size(folio) - offset;
@@ -4033,7 +4063,7 @@ retry:
flush_dcache_folio(folio);
status = a_ops->write_end(file, mapping, pos, bytes, copied,
- page, fsdata);
+ folio, fsdata);
if (unlikely(status != copied)) {
iov_iter_revert(i, copied - max(status, 0L));
if (unlikely(status < 0))
@@ -4282,7 +4312,7 @@ static void filemap_cachestat(struct address_space *mapping,
if (xas_retry(&xas, folio))
continue;
- order = xa_get_order(xas.xa, xas.xa_index);
+ order = xas_get_order(&xas);
nr_pages = 1 << order;
folio_first_index = round_down(xas.xa_index, 1 << order);
folio_last_index = folio_first_index + nr_pages - 1;
@@ -4393,7 +4423,7 @@ SYSCALL_DEFINE4(cachestat, unsigned int, fd,
struct cachestat cs;
pgoff_t first_index, last_index;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
if (copy_from_user(&csr, cstat_range,
@@ -4403,7 +4433,7 @@ SYSCALL_DEFINE4(cachestat, unsigned int, fd,
}
/* hugetlbfs is not supported */
- if (is_file_hugepages(f.file)) {
+ if (is_file_hugepages(fd_file(f))) {
fdput(f);
return -EOPNOTSUPP;
}
@@ -4417,7 +4447,7 @@ SYSCALL_DEFINE4(cachestat, unsigned int, fd,
last_index =
csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT;
memset(&cs, 0, sizeof(struct cachestat));
- mapping = f.file->f_mapping;
+ mapping = fd_file(f)->f_mapping;
filemap_cachestat(mapping, first_index, last_index, &cs);
fdput(f);
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index f05906006b3c..80746182e9e8 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -92,15 +92,3 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
-
-bool isolate_lru_page(struct page *page)
-{
- if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
- return false;
- return folio_isolate_lru((struct folio *)page);
-}
-
-void putback_lru_page(struct page *page)
-{
- folio_putback_lru(page_folio(page));
-}
diff --git a/mm/gup.c b/mm/gup.c
index 54d0dc3831fb..8232c8c9c372 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -416,6 +416,19 @@ void unpin_user_pages(struct page **pages, unsigned long npages)
EXPORT_SYMBOL(unpin_user_pages);
/**
+ * unpin_user_folio() - release pages of a folio
+ * @folio: pointer to folio to be released
+ * @npages: number of pages of same folio
+ *
+ * Release npages of the folio
+ */
+void unpin_user_folio(struct folio *folio, unsigned long npages)
+{
+ gup_put_folio(folio, npages, FOLL_PIN);
+}
+EXPORT_SYMBOL(unpin_user_folio);
+
+/**
* unpin_folios() - release an array of gup-pinned folios.
* @folios: array of folios to be marked dirty and released.
* @nfolios: number of folios in the @folios array.
@@ -819,6 +832,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
struct dev_pagemap **pgmap)
{
struct mm_struct *mm = vma->vm_mm;
+ struct folio *folio;
struct page *page;
spinlock_t *ptl;
pte_t *ptep, pte;
@@ -876,6 +890,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
goto out;
}
}
+ folio = page_folio(page);
if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
page = ERR_PTR(-EMLINK);
@@ -886,7 +901,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
!PageAnonExclusive(page), page);
/* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
- ret = try_grab_folio(page_folio(page), 1, flags);
+ ret = try_grab_folio(folio, 1, flags);
if (unlikely(ret)) {
page = ERR_PTR(ret);
goto out;
@@ -898,7 +913,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
* Documentation/core-api/pin_user_pages.rst for details.
*/
if (flags & FOLL_PIN) {
- ret = arch_make_page_accessible(page);
+ ret = arch_make_folio_accessible(folio);
if (ret) {
unpin_user_page(page);
page = ERR_PTR(ret);
@@ -1070,28 +1085,6 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
return page;
}
-struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
- unsigned int foll_flags)
-{
- struct follow_page_context ctx = { NULL };
- struct page *page;
-
- if (vma_is_secretmem(vma))
- return NULL;
-
- if (WARN_ON_ONCE(foll_flags & FOLL_PIN))
- return NULL;
-
- /*
- * We never set FOLL_HONOR_NUMA_FAULT because callers don't expect
- * to fail on PROT_NONE-mapped pages.
- */
- page = follow_page_mask(vma, address, foll_flags, &ctx);
- if (ctx.pgmap)
- put_dev_pagemap(ctx.pgmap);
- return page;
-}
-
static int get_gate_page(struct mm_struct *mm, unsigned long address,
unsigned int gup_flags, struct vm_area_struct **vma,
struct page **page)
@@ -1153,19 +1146,19 @@ unmap:
* to 0 and -EBUSY returned.
*/
static int faultin_page(struct vm_area_struct *vma,
- unsigned long address, unsigned int *flags, bool unshare,
+ unsigned long address, unsigned int flags, bool unshare,
int *locked)
{
unsigned int fault_flags = 0;
vm_fault_t ret;
- if (*flags & FOLL_NOFAULT)
+ if (flags & FOLL_NOFAULT)
return -EFAULT;
- if (*flags & FOLL_WRITE)
+ if (flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
- if (*flags & FOLL_REMOTE)
+ if (flags & FOLL_REMOTE)
fault_flags |= FAULT_FLAG_REMOTE;
- if (*flags & FOLL_UNLOCKABLE) {
+ if (flags & FOLL_UNLOCKABLE) {
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
/*
* FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set
@@ -1173,12 +1166,12 @@ static int faultin_page(struct vm_area_struct *vma,
* That's because some callers may not be prepared to
* handle early exits caused by non-fatal signals.
*/
- if (*flags & FOLL_INTERRUPTIBLE)
+ if (flags & FOLL_INTERRUPTIBLE)
fault_flags |= FAULT_FLAG_INTERRUPTIBLE;
}
- if (*flags & FOLL_NOWAIT)
+ if (flags & FOLL_NOWAIT)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
- if (*flags & FOLL_TRIED) {
+ if (flags & FOLL_TRIED) {
/*
* Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
* can co-exist
@@ -1212,7 +1205,7 @@ static int faultin_page(struct vm_area_struct *vma,
}
if (ret & VM_FAULT_ERROR) {
- int err = vm_fault_to_errno(ret, *flags);
+ int err = vm_fault_to_errno(ret, flags);
if (err)
return err;
@@ -1437,7 +1430,6 @@ static long __get_user_pages(struct mm_struct *mm,
do {
struct page *page;
- unsigned int foll_flags = gup_flags;
unsigned int page_increm;
/* first iteration or cross vma bound */
@@ -1488,9 +1480,9 @@ retry:
}
cond_resched();
- page = follow_page_mask(vma, start, foll_flags, &ctx);
+ page = follow_page_mask(vma, start, gup_flags, &ctx);
if (!page || PTR_ERR(page) == -EMLINK) {
- ret = faultin_page(vma, start, &foll_flags,
+ ret = faultin_page(vma, start, gup_flags,
PTR_ERR(page) == -EMLINK, locked);
switch (ret) {
case 0:
@@ -1547,13 +1539,12 @@ next_page:
* large folio, this should never fail.
*/
if (try_grab_folio(folio, page_increm - 1,
- foll_flags)) {
+ gup_flags)) {
/*
* Release the 1st page ref if the
* folio is problematic, fail hard.
*/
- gup_put_folio(folio, 1,
- foll_flags);
+ gup_put_folio(folio, 1, gup_flags);
ret = -EFAULT;
goto out;
}
@@ -2357,7 +2348,7 @@ static int migrate_longterm_unpinnable_folios(
folio_get(folio);
gup_put_folio(folio, 1, FOLL_PIN);
- if (migrate_device_coherent_page(&folio->page)) {
+ if (migrate_device_coherent_folio(folio)) {
ret = -EBUSY;
goto err;
}
@@ -2519,7 +2510,7 @@ static bool is_valid_gup_args(struct page **pages, int *locked,
* These flags not allowed to be specified externally to the gup
* interfaces:
* - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
- * - FOLL_REMOTE is internal only and used on follow_page()
+ * - FOLL_REMOTE is internal only, set in (get|pin)_user_pages_remote()
* - FOLL_UNLOCKABLE is internal only and used if locked is !NULL
*/
if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS))
@@ -2921,7 +2912,7 @@ static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
* details.
*/
if (flags & FOLL_PIN) {
- ret = arch_make_page_accessible(page);
+ ret = arch_make_folio_accessible(folio);
if (ret) {
gup_put_folio(folio, 1, flags);
goto pte_unmap;
@@ -3060,6 +3051,9 @@ static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
return 0;
+ if (pmd_special(orig))
+ return 0;
+
if (pmd_devmap(orig)) {
if (unlikely(flags & FOLL_LONGTERM))
return 0;
@@ -3104,6 +3098,9 @@ static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
if (!pud_access_permitted(orig, flags & FOLL_WRITE))
return 0;
+ if (pud_special(orig))
+ return 0;
+
if (pud_devmap(orig)) {
if (unlikely(flags & FOLL_LONGTERM))
return 0;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 67c86a5d64a6..0580ac9e47b9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -40,6 +40,7 @@
#include <linux/memory-tiers.h>
#include <linux/compat.h>
#include <linux/pgalloc_tag.h>
+#include <linux/pagewalk.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
@@ -73,6 +74,7 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
struct shrink_control *sc);
static unsigned long deferred_split_scan(struct shrinker *shrink,
struct shrink_control *sc);
+static bool split_underused_thp = true;
static atomic_t huge_zero_refcount;
struct folio *huge_zero_folio __read_mostly;
@@ -80,6 +82,7 @@ unsigned long huge_zero_pfn __read_mostly = ~0UL;
unsigned long huge_anon_orders_always __read_mostly;
unsigned long huge_anon_orders_madvise __read_mostly;
unsigned long huge_anon_orders_inherit __read_mostly;
+static bool anon_orders_configured __initdata;
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
unsigned long vm_flags,
@@ -94,8 +97,8 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
/* Check the intersection of requested and supported orders. */
if (vma_is_anonymous(vma))
supported_orders = THP_ORDERS_ALL_ANON;
- else if (vma_is_dax(vma))
- supported_orders = THP_ORDERS_ALL_FILE_DAX;
+ else if (vma_is_special_huge(vma))
+ supported_orders = THP_ORDERS_ALL_SPECIAL;
else
supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
@@ -159,15 +162,10 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
* Must be done before hugepage flags check since shmem has its
* own flags.
*/
- if (!in_pf && shmem_file(vma->vm_file)) {
- bool global_huge = shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
- !enforce_sysfs, vma->vm_mm, vm_flags);
-
- if (!vma_is_anon_shmem(vma))
- return global_huge ? orders : 0;
+ if (!in_pf && shmem_file(vma->vm_file))
return shmem_allowable_huge_orders(file_inode(vma->vm_file),
- vma, vma->vm_pgoff, global_huge);
- }
+ vma, vma->vm_pgoff, 0,
+ !enforce_sysfs);
if (!vma_is_anonymous(vma)) {
/*
@@ -220,6 +218,8 @@ retry:
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
return false;
}
+ /* Ensure zero folio won't have large_rmappable flag set. */
+ folio_clear_large_rmappable(zero_folio);
preempt_disable();
if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
preempt_enable();
@@ -443,6 +443,27 @@ static ssize_t hpage_pmd_size_show(struct kobject *kobj,
static struct kobj_attribute hpage_pmd_size_attr =
__ATTR_RO(hpage_pmd_size);
+static ssize_t split_underused_thp_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", split_underused_thp);
+}
+
+static ssize_t split_underused_thp_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err = kstrtobool(buf, &split_underused_thp);
+
+ if (err < 0)
+ return err;
+
+ return count;
+}
+
+static struct kobj_attribute split_underused_thp_attr = __ATTR(
+ shrink_underused, 0644, split_underused_thp_show, split_underused_thp_store);
+
static struct attribute *hugepage_attr[] = {
&enabled_attr.attr,
&defrag_attr.attr,
@@ -451,6 +472,7 @@ static struct attribute *hugepage_attr[] = {
#ifdef CONFIG_SHMEM
&shmem_enabled_attr.attr,
#endif
+ &split_underused_thp_attr.attr,
NULL,
};
@@ -463,8 +485,8 @@ static void thpsize_release(struct kobject *kobj);
static DEFINE_SPINLOCK(huge_anon_orders_lock);
static LIST_HEAD(thpsize_list);
-static ssize_t thpsize_enabled_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t anon_enabled_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
int order = to_thpsize(kobj)->order;
const char *output;
@@ -481,9 +503,9 @@ static ssize_t thpsize_enabled_show(struct kobject *kobj,
return sysfs_emit(buf, "%s\n", output);
}
-static ssize_t thpsize_enabled_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static ssize_t anon_enabled_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int order = to_thpsize(kobj)->order;
ssize_t ret = count;
@@ -525,19 +547,35 @@ static ssize_t thpsize_enabled_store(struct kobject *kobj,
return ret;
}
-static struct kobj_attribute thpsize_enabled_attr =
- __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store);
+static struct kobj_attribute anon_enabled_attr =
+ __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store);
-static struct attribute *thpsize_attrs[] = {
- &thpsize_enabled_attr.attr,
+static struct attribute *anon_ctrl_attrs[] = {
+ &anon_enabled_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group anon_ctrl_attr_grp = {
+ .attrs = anon_ctrl_attrs,
+};
+
+static struct attribute *file_ctrl_attrs[] = {
#ifdef CONFIG_SHMEM
&thpsize_shmem_enabled_attr.attr,
#endif
NULL,
};
-static const struct attribute_group thpsize_attr_group = {
- .attrs = thpsize_attrs,
+static const struct attribute_group file_ctrl_attr_grp = {
+ .attrs = file_ctrl_attrs,
+};
+
+static struct attribute *any_ctrl_attrs[] = {
+ NULL,
+};
+
+static const struct attribute_group any_ctrl_attr_grp = {
+ .attrs = any_ctrl_attrs,
};
static const struct kobj_type thpsize_ktype = {
@@ -576,64 +614,136 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
+#ifdef CONFIG_SHMEM
DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC);
DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK);
DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE);
+#endif
DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
+DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
+DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED);
-static struct attribute *stats_attrs[] = {
+static struct attribute *anon_stats_attrs[] = {
&anon_fault_alloc_attr.attr,
&anon_fault_fallback_attr.attr,
&anon_fault_fallback_charge_attr.attr,
+#ifndef CONFIG_SHMEM
&swpout_attr.attr,
&swpout_fallback_attr.attr,
+#endif
+ &split_deferred_attr.attr,
+ &nr_anon_attr.attr,
+ &nr_anon_partially_mapped_attr.attr,
+ NULL,
+};
+
+static struct attribute_group anon_stats_attr_grp = {
+ .name = "stats",
+ .attrs = anon_stats_attrs,
+};
+
+static struct attribute *file_stats_attrs[] = {
+#ifdef CONFIG_SHMEM
&shmem_alloc_attr.attr,
&shmem_fallback_attr.attr,
&shmem_fallback_charge_attr.attr,
+#endif
+ NULL,
+};
+
+static struct attribute_group file_stats_attr_grp = {
+ .name = "stats",
+ .attrs = file_stats_attrs,
+};
+
+static struct attribute *any_stats_attrs[] = {
+#ifdef CONFIG_SHMEM
+ &swpout_attr.attr,
+ &swpout_fallback_attr.attr,
+#endif
&split_attr.attr,
&split_failed_attr.attr,
- &split_deferred_attr.attr,
NULL,
};
-static struct attribute_group stats_attr_group = {
+static struct attribute_group any_stats_attr_grp = {
.name = "stats",
- .attrs = stats_attrs,
+ .attrs = any_stats_attrs,
};
+static int sysfs_add_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+ int ret = -ENOENT;
+
+ /*
+ * If the group is named, try to merge first, assuming the subdirectory
+ * was already created. This avoids the warning emitted by
+ * sysfs_create_group() if the directory already exists.
+ */
+ if (grp->name)
+ ret = sysfs_merge_group(kobj, grp);
+ if (ret)
+ ret = sysfs_create_group(kobj, grp);
+
+ return ret;
+}
+
static struct thpsize *thpsize_create(int order, struct kobject *parent)
{
unsigned long size = (PAGE_SIZE << order) / SZ_1K;
struct thpsize *thpsize;
- int ret;
+ int ret = -ENOMEM;
thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
if (!thpsize)
- return ERR_PTR(-ENOMEM);
+ goto err;
+
+ thpsize->order = order;
ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
"hugepages-%lukB", size);
if (ret) {
kfree(thpsize);
- return ERR_PTR(ret);
+ goto err;
}
- ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group);
- if (ret) {
- kobject_put(&thpsize->kobj);
- return ERR_PTR(ret);
+
+ ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp);
+ if (ret)
+ goto err_put;
+
+ ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp);
+ if (ret)
+ goto err_put;
+
+ if (BIT(order) & THP_ORDERS_ALL_ANON) {
+ ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp);
+ if (ret)
+ goto err_put;
+
+ ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp);
+ if (ret)
+ goto err_put;
}
- ret = sysfs_create_group(&thpsize->kobj, &stats_attr_group);
- if (ret) {
- kobject_put(&thpsize->kobj);
- return ERR_PTR(ret);
+ if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) {
+ ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp);
+ if (ret)
+ goto err_put;
+
+ ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp);
+ if (ret)
+ goto err_put;
}
- thpsize->order = order;
return thpsize;
+err_put:
+ kobject_put(&thpsize->kobj);
+err:
+ return ERR_PTR(ret);
}
static void thpsize_release(struct kobject *kobj)
@@ -653,7 +763,8 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
* disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
* constant so we have to do this here.
*/
- huge_anon_orders_inherit = BIT(PMD_ORDER);
+ if (!anon_orders_configured)
+ huge_anon_orders_inherit = BIT(PMD_ORDER);
*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
if (unlikely(!*hugepage_kobj)) {
@@ -673,7 +784,7 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
goto remove_hp_group;
}
- orders = THP_ORDERS_ALL_ANON;
+ orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT;
order = highest_order(orders);
while (orders) {
thpsize = thpsize_create(order, *hugepage_kobj);
@@ -838,6 +949,100 @@ out:
}
__setup("transparent_hugepage=", setup_transparent_hugepage);
+static inline int get_order_from_str(const char *size_str)
+{
+ unsigned long size;
+ char *endptr;
+ int order;
+
+ size = memparse(size_str, &endptr);
+
+ if (!is_power_of_2(size))
+ goto err;
+ order = get_order(size);
+ if (BIT(order) & ~THP_ORDERS_ALL_ANON)
+ goto err;
+
+ return order;
+err:
+ pr_err("invalid size %s in thp_anon boot parameter\n", size_str);
+ return -EINVAL;
+}
+
+static char str_dup[PAGE_SIZE] __initdata;
+static int __init setup_thp_anon(char *str)
+{
+ char *token, *range, *policy, *subtoken;
+ unsigned long always, inherit, madvise;
+ char *start_size, *end_size;
+ int start, end, nr;
+ char *p;
+
+ if (!str || strlen(str) + 1 > PAGE_SIZE)
+ goto err;
+ strcpy(str_dup, str);
+
+ always = huge_anon_orders_always;
+ madvise = huge_anon_orders_madvise;
+ inherit = huge_anon_orders_inherit;
+ p = str_dup;
+ while ((token = strsep(&p, ";")) != NULL) {
+ range = strsep(&token, ":");
+ policy = token;
+
+ if (!policy)
+ goto err;
+
+ while ((subtoken = strsep(&range, ",")) != NULL) {
+ if (strchr(subtoken, '-')) {
+ start_size = strsep(&subtoken, "-");
+ end_size = subtoken;
+
+ start = get_order_from_str(start_size);
+ end = get_order_from_str(end_size);
+ } else {
+ start = end = get_order_from_str(subtoken);
+ }
+
+ if (start < 0 || end < 0 || start > end)
+ goto err;
+
+ nr = end - start + 1;
+ if (!strcmp(policy, "always")) {
+ bitmap_set(&always, start, nr);
+ bitmap_clear(&inherit, start, nr);
+ bitmap_clear(&madvise, start, nr);
+ } else if (!strcmp(policy, "madvise")) {
+ bitmap_set(&madvise, start, nr);
+ bitmap_clear(&inherit, start, nr);
+ bitmap_clear(&always, start, nr);
+ } else if (!strcmp(policy, "inherit")) {
+ bitmap_set(&inherit, start, nr);
+ bitmap_clear(&madvise, start, nr);
+ bitmap_clear(&always, start, nr);
+ } else if (!strcmp(policy, "never")) {
+ bitmap_clear(&inherit, start, nr);
+ bitmap_clear(&madvise, start, nr);
+ bitmap_clear(&always, start, nr);
+ } else {
+ pr_err("invalid policy %s in thp_anon boot parameter\n", policy);
+ goto err;
+ }
+ }
+ }
+
+ huge_anon_orders_always = always;
+ huge_anon_orders_madvise = madvise;
+ huge_anon_orders_inherit = inherit;
+ anon_orders_configured = true;
+ return 1;
+
+err:
+ pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str);
+ return 0;
+}
+__setup("thp_anon=", setup_thp_anon);
+
pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
@@ -1007,6 +1212,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
mm_inc_nr_ptes(vma->vm_mm);
+ deferred_split_folio(folio, false);
spin_unlock(vmf->ptl);
count_vm_event(THP_FAULT_ALLOC);
count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
@@ -1166,6 +1372,8 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pmd_mkdevmap(entry);
+ else
+ entry = pmd_mkspecial(entry);
if (write) {
entry = pmd_mkyoung(pmd_mkdirty(entry));
entry = maybe_pmd_mkwrite(entry, vma);
@@ -1249,10 +1457,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
ptl = pud_lock(mm, pud);
if (!pud_none(*pud)) {
if (write) {
- if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
- WARN_ON_ONCE(!is_huge_zero_pud(*pud));
+ if (WARN_ON_ONCE(pud_pfn(*pud) != pfn_t_to_pfn(pfn)))
goto out_unlock;
- }
entry = pud_mkyoung(*pud);
entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
if (pudp_set_access_flags(vma, addr, pud, entry, 1))
@@ -1264,6 +1470,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
entry = pud_mkhuge(pfn_t_pud(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pud_mkdevmap(entry);
+ else
+ entry = pud_mkspecial(entry);
if (write) {
entry = pud_mkyoung(pud_mkdirty(entry));
entry = maybe_pud_mkwrite(entry, vma);
@@ -1377,6 +1585,24 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pgtable_t pgtable = NULL;
int ret = -ENOMEM;
+ pmd = pmdp_get_lockless(src_pmd);
+ if (unlikely(pmd_special(pmd))) {
+ dst_ptl = pmd_lock(dst_mm, dst_pmd);
+ src_ptl = pmd_lockptr(src_mm, src_pmd);
+ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+ /*
+ * No need to recheck the pmd, it can't change with write
+ * mmap lock held here.
+ *
+ * Meanwhile, making sure it's not a CoW VMA with writable
+ * mapping, otherwise it means either the anon page wrongly
+ * applied special bit, or we made the PRIVATE mapping be
+ * able to wrongly write to the backend MMIO.
+ */
+ VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd));
+ goto set_pmd;
+ }
+
/* Skip if can be re-fill on fault */
if (!vma_is_anonymous(dst_vma))
return 0;
@@ -1458,7 +1684,9 @@ out_zero_page:
pmdp_set_wrprotect(src_mm, addr, src_pmd);
if (!userfaultfd_wp(dst_vma))
pmd = pmd_clear_uffd_wp(pmd);
- pmd = pmd_mkold(pmd_wrprotect(pmd));
+ pmd = pmd_wrprotect(pmd);
+set_pmd:
+ pmd = pmd_mkold(pmd);
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
ret = 0;
@@ -1501,20 +1729,14 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
goto out_unlock;
/*
- * When page table lock is held, the huge zero pud should not be
- * under splitting since we don't split the page itself, only pud to
- * a page table.
- */
- if (is_huge_zero_pud(pud)) {
- /* No huge zero pud yet */
- }
-
- /*
* TODO: once we support anonymous pages, use
* folio_try_dup_anon_rmap_*() and split if duplicating fails.
*/
- pudp_set_wrprotect(src_mm, addr, src_pud);
- pud = pud_mkold(pud_wrprotect(pud));
+ if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) {
+ pudp_set_wrprotect(src_mm, addr, src_pud);
+ pud = pud_wrprotect(pud);
+ }
+ pud = pud_mkold(pud);
set_pud_at(dst_mm, addr, dst_pud, pud);
ret = 0;
@@ -1673,22 +1895,23 @@ static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- pmd_t oldpmd = vmf->orig_pmd;
- pmd_t pmd;
struct folio *folio;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
int nid = NUMA_NO_NODE;
- int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
+ int target_nid, last_cpupid;
+ pmd_t pmd, old_pmd;
bool writable = false;
int flags = 0;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
- if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+ old_pmd = pmdp_get(vmf->pmd);
+
+ if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
spin_unlock(vmf->ptl);
return 0;
}
- pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+ pmd = pmd_modify(old_pmd, vma->vm_page_prot);
/*
* Detect now whether the PMD could be writable; this information
@@ -1703,18 +1926,10 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
if (!folio)
goto out_map;
- /* See similar comment in do_numa_page for explanation */
- if (!writable)
- flags |= TNF_NO_GROUP;
-
nid = folio_nid(folio);
- /*
- * For memory tiering mode, cpupid of slow memory page is used
- * to record page access time. So use default value.
- */
- if (node_is_toptier(nid))
- last_cpupid = folio_last_cpupid(folio);
- target_nid = numa_migrate_prep(folio, vmf, haddr, nid, &flags);
+
+ target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
+ &last_cpupid);
if (target_nid == NUMA_NO_NODE)
goto out_map;
if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
@@ -1734,13 +1949,13 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
flags |= TNF_MIGRATE_FAIL;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
- if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+ if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
spin_unlock(vmf->ptl);
return 0;
}
out_map:
/* Restore the PMD */
- pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+ pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
pmd = pmd_mkyoung(pmd);
if (writable)
pmd = pmd_mkwrite(pmd, vma);
@@ -2062,8 +2277,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
toptier)
goto unlock;
- if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
- !toptier)
+ if (folio_use_access_time(folio))
folio_xchg_access_time(folio,
jiffies_to_msecs(jiffies));
}
@@ -2116,6 +2330,53 @@ unlock:
return ret;
}
+/*
+ * Returns:
+ *
+ * - 0: if pud leaf changed from under us
+ * - 1: if pud can be skipped
+ * - HPAGE_PUD_NR: if pud was successfully processed
+ */
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pud_t oldpud, entry;
+ spinlock_t *ptl;
+
+ tlb_change_page_size(tlb, HPAGE_PUD_SIZE);
+
+ /* NUMA balancing doesn't apply to dax */
+ if (cp_flags & MM_CP_PROT_NUMA)
+ return 1;
+
+ /*
+ * Huge entries on userfault-wp only works with anonymous, while we
+ * don't have anonymous PUDs yet.
+ */
+ if (WARN_ON_ONCE(cp_flags & MM_CP_UFFD_WP_ALL))
+ return 1;
+
+ ptl = __pud_trans_huge_lock(pudp, vma);
+ if (!ptl)
+ return 0;
+
+ /*
+ * Can't clear PUD or it can race with concurrent zapping. See
+ * change_huge_pmd().
+ */
+ oldpud = pudp_invalidate(vma, addr, pudp);
+ entry = pud_modify(oldpud, newprot);
+ set_pud_at(mm, addr, pudp, entry);
+ tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE);
+
+ spin_unlock(ptl);
+ return HPAGE_PUD_NR;
+}
+#endif
+
#ifdef CONFIG_USERFAULTFD
/*
* The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
@@ -2295,12 +2556,14 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
pud_t *pud, unsigned long addr)
{
spinlock_t *ptl;
+ pud_t orig_pud;
ptl = __pud_trans_huge_lock(pud, vma);
if (!ptl)
return 0;
- pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
+ orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
+ arch_check_zapped_pud(vma, orig_pud);
tlb_remove_pud_tlb_entry(tlb, pud, addr);
if (vma_is_special_huge(vma)) {
spin_unlock(ptl);
@@ -2344,6 +2607,11 @@ out:
spin_unlock(ptl);
mmu_notifier_invalidate_range_end(&range);
}
+#else
+void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long address)
+{
+}
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
@@ -2778,7 +3046,7 @@ bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
return false;
}
-static void remap_page(struct folio *folio, unsigned long nr)
+static void remap_page(struct folio *folio, unsigned long nr, int flags)
{
int i = 0;
@@ -2786,7 +3054,7 @@ static void remap_page(struct folio *folio, unsigned long nr)
if (!folio_test_anon(folio))
return;
for (;;) {
- remove_migration_ptes(folio, folio, true);
+ remove_migration_ptes(folio, folio, RMP_LOCKED | flags);
i += folio_nr_pages(folio);
if (i >= nr)
break;
@@ -2794,25 +3062,25 @@ static void remap_page(struct folio *folio, unsigned long nr)
}
}
-static void lru_add_page_tail(struct page *head, struct page *tail,
+static void lru_add_page_tail(struct folio *folio, struct page *tail,
struct lruvec *lruvec, struct list_head *list)
{
- VM_BUG_ON_PAGE(!PageHead(head), head);
- VM_BUG_ON_PAGE(PageLRU(tail), head);
+ VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ VM_BUG_ON_FOLIO(PageLRU(tail), folio);
lockdep_assert_held(&lruvec->lru_lock);
if (list) {
/* page reclaim is reclaiming a huge page */
- VM_WARN_ON(PageLRU(head));
+ VM_WARN_ON(folio_test_lru(folio));
get_page(tail);
list_add_tail(&tail->lru, list);
} else {
/* head is still on lru (and we have it frozen) */
- VM_WARN_ON(!PageLRU(head));
- if (PageUnevictable(tail))
+ VM_WARN_ON(!folio_test_lru(folio));
+ if (folio_test_unevictable(folio))
tail->mlock_count = 0;
else
- list_add_tail(&tail->lru, &head->lru);
+ list_add_tail(&tail->lru, &folio->lru);
SetPageLRU(tail);
}
}
@@ -2855,8 +3123,10 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
(1L << PG_workingset) |
(1L << PG_locked) |
(1L << PG_unevictable) |
-#ifdef CONFIG_ARCH_USES_PG_ARCH_X
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
(1L << PG_arch_2) |
+#endif
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
(1L << PG_arch_3) |
#endif
(1L << PG_dirty) |
@@ -2911,7 +3181,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
* pages to show after the currently processed elements - e.g.
* migrate_pages
*/
- lru_add_page_tail(head, page_tail, lruvec, list);
+ lru_add_page_tail(folio, page_tail, lruvec, list);
}
static void __split_huge_page(struct page *page, struct list_head *list,
@@ -2974,7 +3244,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
/* Caller disabled irqs, so they are still disabled here */
split_page_owner(head, order, new_order);
- pgalloc_tag_split(head, 1 << order);
+ pgalloc_tag_split(folio, order, new_order);
/* See comment in __split_huge_page_tail() */
if (folio_test_anon(folio)) {
@@ -2994,7 +3264,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
if (nr_dropped)
shmem_uncharge(folio->mapping->host, nr_dropped);
- remap_page(folio, nr);
+ remap_page(folio, nr, PageAnon(head) ? RMP_USE_SHARED_ZEROPAGE : 0);
/*
* set page to its compound_head when split to non order-0 pages, so
@@ -3023,7 +3293,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
}
/* Racy check whether the huge page can be split */
-bool can_split_folio(struct folio *folio, int *pextra_pins)
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
{
int extra_pins;
@@ -3035,7 +3305,8 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
extra_pins = folio_nr_pages(folio);
if (pextra_pins)
*pextra_pins = extra_pins;
- return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
+ return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
+ caller_pins;
}
/*
@@ -3079,6 +3350,9 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
* released, or if some unexpected race happened (e.g., anon VMA disappeared,
* truncation).
*
+ * Callers should ensure that the order respects the address space mapping
+ * min-order if one is set for non-anonymous folios.
+ *
* Returns -EINVAL when trying to split to an order that is incompatible
* with the folio. Splitting to order 0 is compatible with all folios.
*/
@@ -3089,8 +3363,9 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
/* reset xarray order to new order after split */
XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
- struct anon_vma *anon_vma = NULL;
+ bool is_anon = folio_test_anon(folio);
struct address_space *mapping = NULL;
+ struct anon_vma *anon_vma = NULL;
int order = folio_order(folio);
int extra_pins, ret;
pgoff_t end;
@@ -3102,7 +3377,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
if (new_order >= folio_order(folio))
return -EINVAL;
- if (folio_test_anon(folio)) {
+ if (is_anon) {
/* order-1 is not supported for anonymous THP. */
if (new_order == 1) {
VM_WARN_ONCE(1, "Cannot split to order-1 folio");
@@ -3142,7 +3417,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
if (folio_test_writeback(folio))
return -EBUSY;
- if (folio_test_anon(folio)) {
+ if (is_anon) {
/*
* The caller does not necessarily hold an mmap_lock that would
* prevent the anon_vma disappearing so we first we take a
@@ -3160,6 +3435,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
mapping = NULL;
anon_vma_lock_write(anon_vma);
} else {
+ unsigned int min_order;
gfp_t gfp;
mapping = folio->mapping;
@@ -3170,6 +3446,14 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
goto out;
}
+ min_order = mapping_min_folio_order(folio->mapping);
+ if (new_order < min_order) {
+ VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
+ min_order);
+ ret = -EINVAL;
+ goto out;
+ }
+
gfp = current_gfp_context(mapping_gfp_mask(mapping) &
GFP_RECLAIM_MASK);
@@ -3203,7 +3487,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
* Racy check if we can split the page, before unmap_folio() will
* split PMDs
*/
- if (!can_split_folio(folio, &extra_pins)) {
+ if (!can_split_folio(folio, 1, &extra_pins)) {
ret = -EAGAIN;
goto out_unlock;
}
@@ -3229,6 +3513,11 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
if (folio_order(folio) > 1 &&
!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
+ if (folio_test_partially_mapped(folio)) {
+ __folio_clear_partially_mapped(folio);
+ mod_mthp_stat(folio_order(folio),
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ }
/*
* Reinitialize page_deferred_list after removing the
* page from the split_queue, otherwise a subsequent
@@ -3255,6 +3544,10 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
}
}
+ if (is_anon) {
+ mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
+ mod_mthp_stat(new_order, MTHP_STAT_NR_ANON, 1 << (order - new_order));
+ }
__split_huge_page(page, list, end, new_order);
ret = 0;
} else {
@@ -3263,7 +3556,7 @@ fail:
if (mapping)
xas_unlock(&xas);
local_irq_enable();
- remap_page(folio, folio_nr_pages(folio));
+ remap_page(folio, folio_nr_pages(folio), 0);
ret = -EAGAIN;
}
@@ -3282,6 +3575,30 @@ out:
return ret;
}
+int min_order_for_split(struct folio *folio)
+{
+ if (folio_test_anon(folio))
+ return 0;
+
+ if (!folio->mapping) {
+ if (folio_test_pmd_mappable(folio))
+ count_vm_event(THP_SPLIT_PAGE_FAILED);
+ return -EBUSY;
+ }
+
+ return mapping_min_folio_order(folio->mapping);
+}
+
+int split_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ int ret = min_order_for_split(folio);
+
+ if (ret < 0)
+ return ret;
+
+ return split_huge_page_to_list_to_order(&folio->page, list, ret);
+}
+
void __folio_undo_large_rmappable(struct folio *folio)
{
struct deferred_split *ds_queue;
@@ -3291,12 +3608,18 @@ void __folio_undo_large_rmappable(struct folio *folio)
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
if (!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
+ if (folio_test_partially_mapped(folio)) {
+ __folio_clear_partially_mapped(folio);
+ mod_mthp_stat(folio_order(folio),
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ }
list_del_init(&folio->_deferred_list);
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
}
-void deferred_split_folio(struct folio *folio)
+/* partially_mapped=false won't clear PG_partially_mapped folio flag */
+void deferred_split_folio(struct folio *folio, bool partially_mapped)
{
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
#ifdef CONFIG_MEMCG
@@ -3311,6 +3634,9 @@ void deferred_split_folio(struct folio *folio)
if (folio_order(folio) <= 1)
return;
+ if (!partially_mapped && !split_underused_thp)
+ return;
+
/*
* The try_to_unmap() in page reclaim path might reach here too,
* this may cause a race condition to corrupt deferred split queue.
@@ -3324,14 +3650,21 @@ void deferred_split_folio(struct folio *folio)
if (folio_test_swapcache(folio))
return;
- if (!list_empty(&folio->_deferred_list))
- return;
-
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+ if (partially_mapped) {
+ if (!folio_test_partially_mapped(folio)) {
+ __folio_set_partially_mapped(folio);
+ if (folio_test_pmd_mappable(folio))
+ count_vm_event(THP_DEFERRED_SPLIT_PAGE);
+ count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
+ mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1);
+
+ }
+ } else {
+ /* partially mapped folios cannot become non-partially mapped */
+ VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
+ }
if (list_empty(&folio->_deferred_list)) {
- if (folio_test_pmd_mappable(folio))
- count_vm_event(THP_DEFERRED_SPLIT_PAGE);
- count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
ds_queue->split_queue_len++;
#ifdef CONFIG_MEMCG
@@ -3356,6 +3689,39 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
return READ_ONCE(ds_queue->split_queue_len);
}
+static bool thp_underused(struct folio *folio)
+{
+ int num_zero_pages = 0, num_filled_pages = 0;
+ void *kaddr;
+ int i;
+
+ if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
+ return false;
+
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ kaddr = kmap_local_folio(folio, i * PAGE_SIZE);
+ if (!memchr_inv(kaddr, 0, PAGE_SIZE)) {
+ num_zero_pages++;
+ if (num_zero_pages > khugepaged_max_ptes_none) {
+ kunmap_local(kaddr);
+ return true;
+ }
+ } else {
+ /*
+ * Another path for early exit once the number
+ * of non-zero filled pages exceeds threshold.
+ */
+ num_filled_pages++;
+ if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) {
+ kunmap_local(kaddr);
+ return false;
+ }
+ }
+ kunmap_local(kaddr);
+ }
+ return false;
+}
+
static unsigned long deferred_split_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
@@ -3379,6 +3745,11 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
list_move(&folio->_deferred_list, &list);
} else {
/* We lost race with folio_put() */
+ if (folio_test_partially_mapped(folio)) {
+ __folio_clear_partially_mapped(folio);
+ mod_mthp_stat(folio_order(folio),
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ }
list_del_init(&folio->_deferred_list);
ds_queue->split_queue_len--;
}
@@ -3388,13 +3759,35 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
list_for_each_entry_safe(folio, next, &list, _deferred_list) {
+ bool did_split = false;
+ bool underused = false;
+
+ if (!folio_test_partially_mapped(folio)) {
+ underused = thp_underused(folio);
+ if (!underused)
+ goto next;
+ }
if (!folio_trylock(folio))
goto next;
- /* split_huge_page() removes page from list on success */
- if (!split_folio(folio))
+ if (!split_folio(folio)) {
+ did_split = true;
+ if (underused)
+ count_vm_event(THP_UNDERUSED_SPLIT_PAGE);
split++;
+ }
folio_unlock(folio);
next:
+ /*
+ * split_folio() removes folio from list on success.
+ * Only add back to the queue if folio is partially mapped.
+ * If thp_underused returns false, or if split_folio fails
+ * in the case it was underused, then consider it used and
+ * don't add it back to split_queue.
+ */
+ if (!did_split && !folio_test_partially_mapped(folio)) {
+ list_del_init(&folio->_deferred_list);
+ ds_queue->split_queue_len--;
+ }
folio_put(folio);
}
@@ -3480,16 +3873,11 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
vaddr_start &= PAGE_MASK;
vaddr_end &= PAGE_MASK;
- /* Find the task_struct from pid */
- rcu_read_lock();
- task = find_task_by_vpid(pid);
+ task = find_get_task_by_vpid(pid);
if (!task) {
- rcu_read_unlock();
ret = -ESRCH;
goto out;
}
- get_task_struct(task);
- rcu_read_unlock();
/* Find the mm_struct */
mm = get_task_mm(task);
@@ -3510,8 +3898,10 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
*/
for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
struct vm_area_struct *vma = vma_lookup(mm, addr);
- struct page *page;
+ struct folio_walk fw;
struct folio *folio;
+ struct address_space *mapping;
+ unsigned int target_order = new_order;
if (!vma)
break;
@@ -3522,17 +3912,20 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
continue;
}
- /* FOLL_DUMP to ignore special (like zero) pages */
- page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
-
- if (IS_ERR_OR_NULL(page))
+ folio = folio_walk_start(&fw, vma, addr, 0);
+ if (!folio)
continue;
- folio = page_folio(page);
if (!is_transparent_hugepage(folio))
goto next;
- if (new_order >= folio_order(folio))
+ if (!folio_test_anon(folio)) {
+ mapping = folio->mapping;
+ target_order = max(new_order,
+ mapping_min_folio_order(mapping));
+ }
+
+ if (target_order >= folio_order(folio))
goto next;
total++;
@@ -3542,18 +3935,29 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
* can be split or not. So skip the check here.
*/
if (!folio_test_private(folio) &&
- !can_split_folio(folio, NULL))
+ !can_split_folio(folio, 0, NULL))
goto next;
if (!folio_trylock(folio))
goto next;
+ folio_get(folio);
+ folio_walk_end(&fw, vma);
- if (!split_folio_to_order(folio, new_order))
+ if (!folio_test_anon(folio) && folio->mapping != mapping)
+ goto unlock;
+
+ if (!split_folio_to_order(folio, target_order))
split++;
+unlock:
+
folio_unlock(folio);
-next:
folio_put(folio);
+
+ cond_resched();
+ continue;
+next:
+ folio_walk_end(&fw, vma);
cond_resched();
}
mmap_read_unlock(mm);
@@ -3575,6 +3979,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
pgoff_t index;
int nr_pages = 1;
unsigned long total = 0, split = 0;
+ unsigned int min_order;
+ unsigned int target_order;
file = getname_kernel(file_path);
if (IS_ERR(file))
@@ -3588,6 +3994,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
file_path, off_start, off_end);
mapping = candidate->f_mapping;
+ min_order = mapping_min_folio_order(mapping);
+ target_order = max(new_order, min_order);
for (index = off_start; index < off_end; index += nr_pages) {
struct folio *folio = filemap_get_folio(mapping, index);
@@ -3602,15 +4010,19 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
total++;
nr_pages = folio_nr_pages(folio);
- if (new_order >= folio_order(folio))
+ if (target_order >= folio_order(folio))
goto next;
if (!folio_trylock(folio))
goto next;
- if (!split_folio_to_order(folio, new_order))
+ if (folio->mapping != mapping)
+ goto unlock;
+
+ if (!split_folio_to_order(folio, target_order))
split++;
+unlock:
folio_unlock(folio);
next:
folio_put(folio);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index aaf508be0a2b..def84d8bcf2d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -56,16 +56,6 @@ struct hstate hstates[HUGE_MAX_HSTATE];
#ifdef CONFIG_CMA
static struct cma *hugetlb_cma[MAX_NUMNODES];
static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
-static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
-{
- return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page,
- 1 << order);
-}
-#else
-static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
-{
- return false;
-}
#endif
static unsigned long hugetlb_cma_size __initdata;
@@ -82,14 +72,14 @@ static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
* Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
* free_huge_pages, and surplus_huge_pages.
*/
-DEFINE_SPINLOCK(hugetlb_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock);
/*
* Serializes faults on the same logical page. This is used to
* prevent spurious OOMs when the hugepage pool is fully utilized.
*/
-static int num_fault_mutexes;
-struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
+static int num_fault_mutexes __ro_after_init;
+struct mutex *hugetlb_fault_mutex_table __ro_after_init;
/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);
@@ -100,6 +90,17 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
+static void hugetlb_free_folio(struct folio *folio)
+{
+#ifdef CONFIG_CMA
+ int nid = folio_nid(folio);
+
+ if (cma_free_folio(hugetlb_cma[nid], folio))
+ return;
+#endif
+ folio_put(folio);
+}
+
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
if (spool->count)
@@ -1512,95 +1513,54 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
((node = hstate_next_node_to_free(hs, mask)) || 1); \
nr_nodes--)
-/* used to demote non-gigantic_huge pages as well */
-static void __destroy_compound_gigantic_folio(struct folio *folio,
- unsigned int order, bool demote)
-{
- int i;
- int nr_pages = 1 << order;
- struct page *p;
-
- atomic_set(&folio->_entire_mapcount, 0);
- atomic_set(&folio->_large_mapcount, 0);
- atomic_set(&folio->_pincount, 0);
-
- for (i = 1; i < nr_pages; i++) {
- p = folio_page(folio, i);
- p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE;
- p->mapping = NULL;
- clear_compound_head(p);
- if (!demote)
- set_page_refcounted(p);
- }
-
- __folio_clear_head(folio);
-}
-
-static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
- unsigned int order)
-{
- __destroy_compound_gigantic_folio(folio, order, true);
-}
-
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-static void destroy_compound_gigantic_folio(struct folio *folio,
- unsigned int order)
-{
- __destroy_compound_gigantic_folio(folio, order, false);
-}
-
-static void free_gigantic_folio(struct folio *folio, unsigned int order)
-{
- /*
- * If the page isn't allocated using the cma allocator,
- * cma_release() returns false.
- */
-#ifdef CONFIG_CMA
- int nid = folio_nid(folio);
-
- if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order))
- return;
-#endif
-
- free_contig_range(folio_pfn(folio), 1 << order);
-}
-
#ifdef CONFIG_CONTIG_ALLOC
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
- struct page *page;
- unsigned long nr_pages = pages_per_huge_page(h);
+ struct folio *folio;
+ int order = huge_page_order(h);
+ bool retried = false;
+
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
-
+retry:
+ folio = NULL;
#ifdef CONFIG_CMA
{
int node;
- if (hugetlb_cma[nid]) {
- page = cma_alloc(hugetlb_cma[nid], nr_pages,
- huge_page_order(h), true);
- if (page)
- return page_folio(page);
- }
+ if (hugetlb_cma[nid])
+ folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask);
- if (!(gfp_mask & __GFP_THISNODE)) {
+ if (!folio && !(gfp_mask & __GFP_THISNODE)) {
for_each_node_mask(node, *nodemask) {
if (node == nid || !hugetlb_cma[node])
continue;
- page = cma_alloc(hugetlb_cma[node], nr_pages,
- huge_page_order(h), true);
- if (page)
- return page_folio(page);
+ folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask);
+ if (folio)
+ break;
}
}
}
#endif
+ if (!folio) {
+ folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask);
+ if (!folio)
+ return NULL;
+ }
- page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
- return page ? page_folio(page) : NULL;
+ if (folio_ref_freeze(folio, 1))
+ return folio;
+
+ pr_warn("HugeTLB: unexpected refcount on PFN %lu\n", folio_pfn(folio));
+ hugetlb_free_folio(folio);
+ if (!retried) {
+ retried = true;
+ goto retry;
+ }
+ return NULL;
}
#else /* !CONFIG_CONTIG_ALLOC */
@@ -1617,10 +1577,6 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
{
return NULL;
}
-static inline void free_gigantic_folio(struct folio *folio,
- unsigned int order) { }
-static inline void destroy_compound_gigantic_folio(struct folio *folio,
- unsigned int order) { }
#endif
/*
@@ -1748,18 +1704,8 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
folio_ref_unfreeze(folio, 1);
- /*
- * Non-gigantic pages demoted from CMA allocated gigantic pages
- * need to be given back to CMA in free_gigantic_folio.
- */
- if (hstate_is_gigantic(h) ||
- hugetlb_cma_folio(folio, huge_page_order(h))) {
- destroy_compound_gigantic_folio(folio, huge_page_order(h));
- free_gigantic_folio(folio, huge_page_order(h));
- } else {
- INIT_LIST_HEAD(&folio->_deferred_list);
- folio_put(folio);
- }
+ INIT_LIST_HEAD(&folio->_deferred_list);
+ hugetlb_free_folio(folio);
}
/*
@@ -2032,95 +1978,6 @@ static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int ni
spin_unlock_irq(&hugetlb_lock);
}
-static bool __prep_compound_gigantic_folio(struct folio *folio,
- unsigned int order, bool demote)
-{
- int i, j;
- int nr_pages = 1 << order;
- struct page *p;
-
- __folio_clear_reserved(folio);
- for (i = 0; i < nr_pages; i++) {
- p = folio_page(folio, i);
-
- /*
- * For gigantic hugepages allocated through bootmem at
- * boot, it's safer to be consistent with the not-gigantic
- * hugepages and clear the PG_reserved bit from all tail pages
- * too. Otherwise drivers using get_user_pages() to access tail
- * pages may get the reference counting wrong if they see
- * PG_reserved set on a tail page (despite the head page not
- * having PG_reserved set). Enforcing this consistency between
- * head and tail pages allows drivers to optimize away a check
- * on the head page when they need know if put_page() is needed
- * after get_user_pages().
- */
- if (i != 0) /* head page cleared above */
- __ClearPageReserved(p);
- /*
- * Subtle and very unlikely
- *
- * Gigantic 'page allocators' such as memblock or cma will
- * return a set of pages with each page ref counted. We need
- * to turn this set of pages into a compound page with tail
- * page ref counts set to zero. Code such as speculative page
- * cache adding could take a ref on a 'to be' tail page.
- * We need to respect any increased ref count, and only set
- * the ref count to zero if count is currently 1. If count
- * is not 1, we return an error. An error return indicates
- * the set of pages can not be converted to a gigantic page.
- * The caller who allocated the pages should then discard the
- * pages using the appropriate free interface.
- *
- * In the case of demote, the ref count will be zero.
- */
- if (!demote) {
- if (!page_ref_freeze(p, 1)) {
- pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
- goto out_error;
- }
- } else {
- VM_BUG_ON_PAGE(page_count(p), p);
- }
- if (i != 0)
- set_compound_head(p, &folio->page);
- }
- __folio_set_head(folio);
- /* we rely on prep_new_hugetlb_folio to set the hugetlb flag */
- folio_set_order(folio, order);
- atomic_set(&folio->_entire_mapcount, -1);
- atomic_set(&folio->_large_mapcount, -1);
- atomic_set(&folio->_pincount, 0);
- return true;
-
-out_error:
- /* undo page modifications made above */
- for (j = 0; j < i; j++) {
- p = folio_page(folio, j);
- if (j != 0)
- clear_compound_head(p);
- set_page_refcounted(p);
- }
- /* need to clear PG_reserved on remaining tail pages */
- for (; j < nr_pages; j++) {
- p = folio_page(folio, j);
- __ClearPageReserved(p);
- }
- return false;
-}
-
-static bool prep_compound_gigantic_folio(struct folio *folio,
- unsigned int order)
-{
- return __prep_compound_gigantic_folio(folio, order, false);
-}
-
-static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
- unsigned int order)
-{
- return __prep_compound_gigantic_folio(folio, order, true);
-}
-
/*
* Find and lock address space (mapping) in write mode.
*
@@ -2159,7 +2016,6 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
*/
if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
alloc_try_hard = false;
- gfp_mask |= __GFP_COMP|__GFP_NOWARN;
if (alloc_try_hard)
gfp_mask |= __GFP_RETRY_MAYFAIL;
if (nid == NUMA_NO_NODE)
@@ -2206,48 +2062,16 @@ retry:
return folio;
}
-static struct folio *__alloc_fresh_hugetlb_folio(struct hstate *h,
- gfp_t gfp_mask, int nid, nodemask_t *nmask,
- nodemask_t *node_alloc_noretry)
-{
- struct folio *folio;
- bool retry = false;
-
-retry:
- if (hstate_is_gigantic(h))
- folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
- else
- folio = alloc_buddy_hugetlb_folio(h, gfp_mask,
- nid, nmask, node_alloc_noretry);
- if (!folio)
- return NULL;
-
- if (hstate_is_gigantic(h)) {
- if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
- /*
- * Rare failure to convert pages to compound page.
- * Free pages and try again - ONCE!
- */
- free_gigantic_folio(folio, huge_page_order(h));
- if (!retry) {
- retry = true;
- goto retry;
- }
- return NULL;
- }
- }
-
- return folio;
-}
-
static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
gfp_t gfp_mask, int nid, nodemask_t *nmask,
nodemask_t *node_alloc_noretry)
{
struct folio *folio;
- folio = __alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask,
- node_alloc_noretry);
+ if (hstate_is_gigantic(h))
+ folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
+ else
+ folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry);
if (folio)
init_new_hugetlb_folio(h, folio);
return folio;
@@ -2265,7 +2089,10 @@ static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
{
struct folio *folio;
- folio = __alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
+ if (hstate_is_gigantic(h))
+ folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
+ else
+ folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
if (!folio)
return NULL;
@@ -2549,9 +2376,8 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
if (mpol_is_preferred_many(mpol)) {
- gfp_t gfp = gfp_mask | __GFP_NOWARN;
+ gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
- gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
/* Fallback to all nodes if page==NULL */
@@ -3333,6 +3159,7 @@ static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) {
struct page *page = pfn_to_page(pfn);
+ __ClearPageReserved(folio_page(folio, pfn - head_pfn));
__init_single_page(page, pfn, zone, nid);
prep_compound_tail((struct page *)folio, pfn - head_pfn);
ret = page_ref_freeze(page, 1);
@@ -3921,101 +3748,120 @@ out:
return 0;
}
-static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
+static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst,
+ struct list_head *src_list)
{
- int i, nid = folio_nid(folio);
- struct hstate *target_hstate;
- struct page *subpage;
- struct folio *inner_folio;
- int rc = 0;
-
- target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
-
- remove_hugetlb_folio(h, folio, false);
- spin_unlock_irq(&hugetlb_lock);
+ long rc;
+ struct folio *folio, *next;
+ LIST_HEAD(dst_list);
+ LIST_HEAD(ret_list);
- /*
- * If vmemmap already existed for folio, the remove routine above would
- * have cleared the hugetlb folio flag. Hence the folio is technically
- * no longer a hugetlb folio. hugetlb_vmemmap_restore_folio can only be
- * passed hugetlb folios and will BUG otherwise.
- */
- if (folio_test_hugetlb(folio)) {
- rc = hugetlb_vmemmap_restore_folio(h, folio);
- if (rc) {
- /* Allocation of vmemmmap failed, we can not demote folio */
- spin_lock_irq(&hugetlb_lock);
- add_hugetlb_folio(h, folio, false);
- return rc;
- }
- }
-
- /*
- * Use destroy_compound_hugetlb_folio_for_demote for all huge page
- * sizes as it will not ref count folios.
- */
- destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
+ rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list);
+ list_splice_init(&ret_list, src_list);
/*
* Taking target hstate mutex synchronizes with set_max_huge_pages.
* Without the mutex, pages added to target hstate could be marked
* as surplus.
*
- * Note that we already hold h->resize_lock. To prevent deadlock,
+ * Note that we already hold src->resize_lock. To prevent deadlock,
* use the convention of always taking larger size hstate mutex first.
*/
- mutex_lock(&target_hstate->resize_lock);
- for (i = 0; i < pages_per_huge_page(h);
- i += pages_per_huge_page(target_hstate)) {
- subpage = folio_page(folio, i);
- inner_folio = page_folio(subpage);
- if (hstate_is_gigantic(target_hstate))
- prep_compound_gigantic_folio_for_demote(inner_folio,
- target_hstate->order);
- else
- prep_compound_page(subpage, target_hstate->order);
- folio_change_private(inner_folio, NULL);
- prep_new_hugetlb_folio(target_hstate, inner_folio, nid);
- free_huge_folio(inner_folio);
+ mutex_lock(&dst->resize_lock);
+
+ list_for_each_entry_safe(folio, next, src_list, lru) {
+ int i;
+
+ if (folio_test_hugetlb_vmemmap_optimized(folio))
+ continue;
+
+ list_del(&folio->lru);
+
+ split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst));
+ pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst));
+
+ for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) {
+ struct page *page = folio_page(folio, i);
+
+ page->mapping = NULL;
+ clear_compound_head(page);
+ prep_compound_page(page, dst->order);
+
+ init_new_hugetlb_folio(dst, page_folio(page));
+ list_add(&page->lru, &dst_list);
+ }
}
- mutex_unlock(&target_hstate->resize_lock);
- spin_lock_irq(&hugetlb_lock);
+ prep_and_add_allocated_folios(dst, &dst_list);
- /*
- * Not absolutely necessary, but for consistency update max_huge_pages
- * based on pool changes for the demoted page.
- */
- h->max_huge_pages--;
- target_hstate->max_huge_pages +=
- pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
+ mutex_unlock(&dst->resize_lock);
return rc;
}
-static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
+static long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed,
+ unsigned long nr_to_demote)
__must_hold(&hugetlb_lock)
{
int nr_nodes, node;
- struct folio *folio;
+ struct hstate *dst;
+ long rc = 0;
+ long nr_demoted = 0;
lockdep_assert_held(&hugetlb_lock);
/* We should never get here if no demote order */
- if (!h->demote_order) {
+ if (!src->demote_order) {
pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
return -EINVAL; /* internal error */
}
+ dst = size_to_hstate(PAGE_SIZE << src->demote_order);
- for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
- list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
+ for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) {
+ LIST_HEAD(list);
+ struct folio *folio, *next;
+
+ list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) {
if (folio_test_hwpoison(folio))
continue;
- return demote_free_hugetlb_folio(h, folio);
+
+ remove_hugetlb_folio(src, folio, false);
+ list_add(&folio->lru, &list);
+
+ if (++nr_demoted == nr_to_demote)
+ break;
+ }
+
+ spin_unlock_irq(&hugetlb_lock);
+
+ rc = demote_free_hugetlb_folios(src, dst, &list);
+
+ spin_lock_irq(&hugetlb_lock);
+
+ list_for_each_entry_safe(folio, next, &list, lru) {
+ list_del(&folio->lru);
+ add_hugetlb_folio(src, folio, false);
+
+ nr_demoted--;
}
+
+ if (rc < 0 || nr_demoted == nr_to_demote)
+ break;
}
/*
+ * Not absolutely necessary, but for consistency update max_huge_pages
+ * based on pool changes for the demoted page.
+ */
+ src->max_huge_pages -= nr_demoted;
+ dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst));
+
+ if (rc < 0)
+ return rc;
+
+ if (nr_demoted)
+ return nr_demoted;
+ /*
* Only way to get here is if all pages on free lists are poisoned.
* Return -EBUSY so that caller will not retry.
*/
@@ -4249,6 +4095,8 @@ static ssize_t demote_store(struct kobject *kobj,
spin_lock_irq(&hugetlb_lock);
while (nr_demote) {
+ long rc;
+
/*
* Check for available pages to demote each time thorough the
* loop as demote_pool_huge_page will drop hugetlb_lock.
@@ -4261,11 +4109,13 @@ static ssize_t demote_store(struct kobject *kobj,
if (!nr_available)
break;
- err = demote_pool_huge_page(h, n_mask);
- if (err)
+ rc = demote_pool_huge_page(h, n_mask, nr_demote);
+ if (rc < 0) {
+ err = rc;
break;
+ }
- nr_demote--;
+ nr_demote -= rc;
}
spin_unlock_irq(&hugetlb_lock);
@@ -6048,7 +5898,7 @@ retry_avoidcopy:
* When the original hugepage is shared one, it does not have
* anon_vma prepared.
*/
- ret = vmf_anon_prepare(vmf);
+ ret = __vmf_anon_prepare(vmf);
if (unlikely(ret))
goto out_release_all;
@@ -6247,7 +6097,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
}
if (!(vma->vm_flags & VM_MAYSHARE)) {
- ret = vmf_anon_prepare(vmf);
+ ret = __vmf_anon_prepare(vmf);
if (unlikely(ret))
goto out;
}
@@ -6378,6 +6228,14 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
folio_unlock(folio);
out:
hugetlb_vma_unlock_read(vma);
+
+ /*
+ * We must check to release the per-VMA lock. __vmf_anon_prepare() is
+ * the only way ret can be set to VM_FAULT_RETRY.
+ */
+ if (unlikely(ret & VM_FAULT_RETRY))
+ vma_end_read(vma);
+
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
return ret;
@@ -6599,6 +6457,14 @@ out_ptl:
}
out_mutex:
hugetlb_vma_unlock_read(vma);
+
+ /*
+ * We must check to release the per-VMA lock. __vmf_anon_prepare() in
+ * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY.
+ */
+ if (unlikely(ret & VM_FAULT_RETRY))
+ vma_end_read(vma);
+
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
/*
* Generally it's safe to hold refcount during waiting page lock. But
@@ -7211,7 +7077,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
return 0;
}
-#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static unsigned long page_table_shareable(struct vm_area_struct *svma,
struct vm_area_struct *vma,
unsigned long addr, pgoff_t idx)
@@ -7373,7 +7239,7 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
return 1;
}
-#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+#else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pud_t *pud)
@@ -7396,7 +7262,7 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{
return false;
}
-#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -7494,7 +7360,7 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
/* See description above. Architectures can provide their own version. */
__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
{
-#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
if (huge_page_size(h) == PMD_SIZE)
return PUD_SIZE - PMD_SIZE;
#endif
@@ -7503,10 +7369,6 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
-/*
- * These functions are overwritable if your architecture needs its own
- * behavior.
- */
bool isolate_hugetlb(struct folio *folio, struct list_head *list)
{
bool ret = true;
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 4ff238ba1250..e716c4671a15 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -114,10 +114,10 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
}
page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
idx),
- fault_parent);
+ fault_parent, false);
page_counter_init(
hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
- rsvd_parent);
+ rsvd_parent, false);
limit = round_down(PAGE_COUNTER_MAX,
pages_per_huge_page(&hstates[idx]));
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 0c3f56b3578e..57b7f591eee8 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -43,6 +43,8 @@ struct vmemmap_remap_walk {
#define VMEMMAP_SPLIT_NO_TLB_FLUSH BIT(0)
/* Skip the TLB flush when we remap the PTE */
#define VMEMMAP_REMAP_NO_TLB_FLUSH BIT(1)
+/* synchronize_rcu() to avoid writes from page_ref_add_unless() */
+#define VMEMMAP_SYNCHRONIZE_RCU BIT(2)
unsigned long flags;
};
@@ -457,6 +459,9 @@ static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
if (!folio_test_hugetlb_vmemmap_optimized(folio))
return 0;
+ if (flags & VMEMMAP_SYNCHRONIZE_RCU)
+ synchronize_rcu();
+
vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
vmemmap_reuse = vmemmap_start;
vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
@@ -489,10 +494,7 @@ static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
*/
int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
{
- /* avoid writes from page_ref_add_unless() while unfolding vmemmap */
- synchronize_rcu();
-
- return __hugetlb_vmemmap_restore_folio(h, folio, 0);
+ return __hugetlb_vmemmap_restore_folio(h, folio, VMEMMAP_SYNCHRONIZE_RCU);
}
/**
@@ -515,14 +517,14 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
struct folio *folio, *t_folio;
long restored = 0;
long ret = 0;
-
- /* avoid writes from page_ref_add_unless() while unfolding vmemmap */
- synchronize_rcu();
+ unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU;
list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
if (folio_test_hugetlb_vmemmap_optimized(folio)) {
- ret = __hugetlb_vmemmap_restore_folio(h, folio,
- VMEMMAP_REMAP_NO_TLB_FLUSH);
+ ret = __hugetlb_vmemmap_restore_folio(h, folio, flags);
+ /* only need to synchronize_rcu() once for each batch */
+ flags &= ~VMEMMAP_SYNCHRONIZE_RCU;
+
if (ret)
break;
restored++;
@@ -570,6 +572,9 @@ static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
return ret;
static_branch_inc(&hugetlb_optimize_vmemmap_key);
+
+ if (flags & VMEMMAP_SYNCHRONIZE_RCU)
+ synchronize_rcu();
/*
* Very Subtle
* If VMEMMAP_REMAP_NO_TLB_FLUSH is set, TLB flushing is not performed
@@ -617,10 +622,7 @@ void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
{
LIST_HEAD(vmemmap_pages);
- /* avoid writes from page_ref_add_unless() while folding vmemmap */
- synchronize_rcu();
-
- __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, 0);
+ __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, VMEMMAP_SYNCHRONIZE_RCU);
free_vmemmap_page_list(&vmemmap_pages);
}
@@ -647,6 +649,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
{
struct folio *folio;
LIST_HEAD(vmemmap_pages);
+ unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU;
list_for_each_entry(folio, folio_list, lru) {
int ret = hugetlb_vmemmap_split_folio(h, folio);
@@ -663,14 +666,12 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all();
- /* avoid writes from page_ref_add_unless() while folding vmemmap */
- synchronize_rcu();
-
list_for_each_entry(folio, folio_list, lru) {
int ret;
- ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
- VMEMMAP_REMAP_NO_TLB_FLUSH);
+ ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags);
+ /* only need to synchronize_rcu() once for each batch */
+ flags &= ~VMEMMAP_SYNCHRONIZE_RCU;
/*
* Pages to be freed may have been accumulated. If we
@@ -684,8 +685,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all();
free_vmemmap_page_list(&vmemmap_pages);
INIT_LIST_HEAD(&vmemmap_pages);
- __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
- VMEMMAP_REMAP_NO_TLB_FLUSH);
+ __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags);
}
}
diff --git a/mm/internal.h b/mm/internal.h
index b4d86436565b..93083bbeeefa 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -8,13 +8,19 @@
#define __MM_INTERNAL_H
#include <linux/fs.h>
+#include <linux/khugepaged.h>
#include <linux/mm.h>
+#include <linux/mm_inline.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/swap_cgroup.h>
#include <linux/tracepoint-defs.h>
+/* Internal core VMA manipulation functions. */
+#include "vma.h"
+
struct folio_batch;
/*
@@ -270,18 +276,22 @@ static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
{
pte_t expected_pte = pte_next_swp_offset(pte);
const pte_t *end_ptep = start_ptep + max_nr;
+ swp_entry_t entry = pte_to_swp_entry(pte);
pte_t *ptep = start_ptep + 1;
+ unsigned short cgroup_id;
VM_WARN_ON(max_nr < 1);
VM_WARN_ON(!is_swap_pte(pte));
- VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte)));
+ VM_WARN_ON(non_swap_entry(entry));
+ cgroup_id = lookup_swap_cgroup_id(entry);
while (ptep < end_ptep) {
pte = ptep_get(ptep);
if (!pte_same(pte, expected_pte))
break;
-
+ if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id)
+ break;
expected_pte = pte_next_swp_offset(expected_pte);
ptep++;
}
@@ -310,7 +320,16 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat)
wake_up(wqh);
}
-vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
+vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
+static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
+{
+ vm_fault_t ret = __vmf_anon_prepare(vmf);
+
+ if (unlikely(ret & VM_FAULT_RETRY))
+ vma_end_read(vmf->vma);
+ return ret;
+}
+
vm_fault_t do_swap_page(struct vm_fault *vmf);
void folio_rotate_reclaimable(struct folio *folio);
bool __folio_end_writeback(struct folio *folio);
@@ -406,9 +425,7 @@ extern unsigned long highest_memmap_pfn;
/*
* in mm/vmscan.c:
*/
-bool isolate_lru_page(struct page *page);
bool folio_isolate_lru(struct folio *folio);
-void putback_lru_page(struct page *page);
void folio_putback_lru(struct folio *folio);
extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
@@ -778,37 +795,6 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
return list_empty(&area->free_list[migratetype]);
}
-/*
- * These three helpers classifies VMAs for virtual memory accounting.
- */
-
-/*
- * Executable code area - executable, not writable, not stack
- */
-static inline bool is_exec_mapping(vm_flags_t flags)
-{
- return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
-}
-
-/*
- * Stack area (including shadow stacks)
- *
- * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
- * do_mmap() forbids all other combinations.
- */
-static inline bool is_stack_mapping(vm_flags_t flags)
-{
- return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
-}
-
-/*
- * Data area - private, writable, not stack
- */
-static inline bool is_data_mapping(vm_flags_t flags)
-{
- return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
-}
-
/* mm/util.c */
struct anon_vma *folio_anon_vma(struct folio *folio);
@@ -1069,6 +1055,8 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
/*
* mm/memory-failure.c
*/
+#ifdef CONFIG_MEMORY_FAILURE
+void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu);
void shake_folio(struct folio *folio);
extern int hwpoison_filter(struct page *p);
@@ -1089,6 +1077,12 @@ void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
unsigned long ksm_addr);
unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+#else
+static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
+{
+}
+#endif
+
extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
@@ -1165,7 +1159,6 @@ static inline void flush_tlb_batched_pending(struct mm_struct *mm)
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
extern const struct trace_print_flags pageflag_names[];
-extern const struct trace_print_flags pagetype_names[];
extern const struct trace_print_flags vmaflag_names[];
extern const struct trace_print_flags gfpflag_names[];
@@ -1217,11 +1210,12 @@ void vunmap_range_noflush(unsigned long start, unsigned long end);
void __vunmap_range_noflush(unsigned long start, unsigned long end);
-int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
- unsigned long addr, int page_nid, int *flags);
+int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
+ unsigned long addr, int *flags, bool writable,
+ int *last_cpupid);
void free_zone_device_folio(struct folio *folio);
-int migrate_device_coherent_page(struct page *page);
+int migrate_device_coherent_folio(struct folio *folio);
/*
* mm/gup.c
@@ -1237,13 +1231,6 @@ void touch_pud(struct vm_area_struct *vma, unsigned long addr,
void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, bool write);
-/*
- * mm/mmap.c
- */
-struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
- struct vm_area_struct *vma,
- unsigned long delta);
-
enum {
/* mark page accessed */
FOLL_TOUCH = 1 << 16,
@@ -1370,117 +1357,6 @@ static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte
return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
}
-static inline void vma_iter_config(struct vma_iterator *vmi,
- unsigned long index, unsigned long last)
-{
- __mas_set_range(&vmi->mas, index, last - 1);
-}
-
-static inline void vma_iter_reset(struct vma_iterator *vmi)
-{
- mas_reset(&vmi->mas);
-}
-
-static inline
-struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
-{
- return mas_prev_range(&vmi->mas, min);
-}
-
-static inline
-struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
-{
- return mas_next_range(&vmi->mas, max);
-}
-
-static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
- unsigned long max, unsigned long size)
-{
- return mas_empty_area(&vmi->mas, min, max - 1, size);
-}
-
-static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
- unsigned long max, unsigned long size)
-{
- return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
-}
-
-/*
- * VMA Iterator functions shared between nommu and mmap
- */
-static inline int vma_iter_prealloc(struct vma_iterator *vmi,
- struct vm_area_struct *vma)
-{
- return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
-}
-
-static inline void vma_iter_clear(struct vma_iterator *vmi)
-{
- mas_store_prealloc(&vmi->mas, NULL);
-}
-
-static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
-{
- return mas_walk(&vmi->mas);
-}
-
-/* Store a VMA with preallocated memory */
-static inline void vma_iter_store(struct vma_iterator *vmi,
- struct vm_area_struct *vma)
-{
-
-#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
- if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
- vmi->mas.index > vma->vm_start)) {
- pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
- vmi->mas.index, vma->vm_start, vma->vm_start,
- vma->vm_end, vmi->mas.index, vmi->mas.last);
- }
- if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
- vmi->mas.last < vma->vm_start)) {
- pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
- vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
- vmi->mas.index, vmi->mas.last);
- }
-#endif
-
- if (vmi->mas.status != ma_start &&
- ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
- vma_iter_invalidate(vmi);
-
- __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
- mas_store_prealloc(&vmi->mas, vma);
-}
-
-static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
- struct vm_area_struct *vma, gfp_t gfp)
-{
- if (vmi->mas.status != ma_start &&
- ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
- vma_iter_invalidate(vmi);
-
- __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
- mas_store_gfp(&vmi->mas, vma, gfp);
- if (unlikely(mas_is_err(&vmi->mas)))
- return -ENOMEM;
-
- return 0;
-}
-
-/*
- * VMA lock generalization
- */
-struct vma_prepare {
- struct vm_area_struct *vma;
- struct vm_area_struct *adj_next;
- struct file *file;
- struct address_space *mapping;
- struct anon_vma *anon_vma;
- struct vm_area_struct *insert;
- struct vm_area_struct *remove;
- struct vm_area_struct *remove2;
-};
-
void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long zone, int nid);
@@ -1497,27 +1373,11 @@ static inline int can_do_mseal(unsigned long flags)
return 0;
}
-bool can_modify_mm(struct mm_struct *mm, unsigned long start,
- unsigned long end);
-bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
- unsigned long end, int behavior);
#else
static inline int can_do_mseal(unsigned long flags)
{
return -EPERM;
}
-
-static inline bool can_modify_mm(struct mm_struct *mm, unsigned long start,
- unsigned long end)
-{
- return true;
-}
-
-static inline bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
- unsigned long end, int behavior)
-{
- return true;
-}
#endif
#ifdef CONFIG_SHRINKER_DEBUG
@@ -1569,13 +1429,18 @@ static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
void workingset_update_node(struct xa_node *node);
extern struct list_lru shadow_nodes;
-struct unlink_vma_file_batch {
- int count;
- struct vm_area_struct *vmas[8];
-};
+/* mremap.c */
+unsigned long move_page_tables(struct vm_area_struct *vma,
+ unsigned long old_addr, struct vm_area_struct *new_vma,
+ unsigned long new_addr, unsigned long len,
+ bool need_rmap_locks, bool for_stack);
-void unlink_file_vma_batch_init(struct unlink_vma_file_batch *);
-void unlink_file_vma_batch_add(struct unlink_vma_file_batch *, struct vm_area_struct *);
-void unlink_file_vma_batch_final(struct unlink_vma_file_batch *);
+#ifdef CONFIG_UNACCEPTED_MEMORY
+void accept_page(struct page *page);
+#else /* CONFIG_UNACCEPTED_MEMORY */
+static inline void accept_page(struct page *page)
+{
+}
+#endif /* CONFIG_UNACCEPTED_MEMORY */
#endif /* __MM_INTERNAL_H */
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 7634dd2a6128..b88543e5c0cc 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -44,7 +44,8 @@ ifndef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
CFLAGS_KASAN_TEST += -fno-builtin
endif
-CFLAGS_kasan_test.o := $(CFLAGS_KASAN_TEST)
+CFLAGS_kasan_test_c.o := $(CFLAGS_KASAN_TEST)
+RUSTFLAGS_kasan_test_rust.o := $(RUSTFLAGS_KASAN)
CFLAGS_kasan_test_module.o := $(CFLAGS_KASAN_TEST)
obj-y := common.o report.o
@@ -52,5 +53,10 @@ obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quaran
obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o tags.o report_tags.o
obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o tags.o report_tags.o
+kasan_test-objs := kasan_test_c.o
+ifdef CONFIG_RUST
+ kasan_test-objs += kasan_test_rust.o
+endif
+
obj-$(CONFIG_KASAN_KUNIT_TEST) += kasan_test.o
obj-$(CONFIG_KASAN_MODULE_TEST) += kasan_test_module.o
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 85e7c6b4575c..ed4873e18c75 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -208,15 +208,12 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
return (void *)object;
}
-static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
- unsigned long ip, bool init)
+/* Returns true when freeing the object is not safe. */
+static bool check_slab_allocation(struct kmem_cache *cache, void *object,
+ unsigned long ip)
{
- void *tagged_object;
-
- if (!kasan_arch_is_ready())
- return false;
+ void *tagged_object = object;
- tagged_object = object;
object = kasan_reset_tag(object);
if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
@@ -224,37 +221,47 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
return true;
}
- /* RCU slabs could be legally used after free within the RCU period. */
- if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
- return false;
-
if (!kasan_byte_accessible(tagged_object)) {
kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
return true;
}
+ return false;
+}
+
+static inline void poison_slab_object(struct kmem_cache *cache, void *object,
+ bool init, bool still_accessible)
+{
+ void *tagged_object = object;
+
+ object = kasan_reset_tag(object);
+
+ /* RCU slabs could be legally used after free within the RCU period. */
+ if (unlikely(still_accessible))
+ return;
+
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_SLAB_FREE, init);
if (kasan_stack_collection_enabled())
kasan_save_free_info(cache, tagged_object);
+}
- return false;
+bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
+ unsigned long ip)
+{
+ if (!kasan_arch_is_ready() || is_kfence_address(object))
+ return false;
+ return check_slab_allocation(cache, object, ip);
}
-bool __kasan_slab_free(struct kmem_cache *cache, void *object,
- unsigned long ip, bool init)
+bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
+ bool still_accessible)
{
- if (is_kfence_address(object))
+ if (!kasan_arch_is_ready() || is_kfence_address(object))
return false;
- /*
- * If the object is buggy, do not let slab put the object onto the
- * freelist. The object will thus never be allocated again and its
- * metadata will never get released.
- */
- if (poison_slab_object(cache, object, ip, init))
- return true;
+ poison_slab_object(cache, object, init, still_accessible);
/*
* If the object is put into quarantine, do not let slab put the object
@@ -504,11 +511,16 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
return true;
}
- if (is_kfence_address(ptr))
- return false;
+ if (is_kfence_address(ptr) || !kasan_arch_is_ready())
+ return true;
slab = folio_slab(folio);
- return !poison_slab_object(slab->slab_cache, ptr, ip, false);
+
+ if (check_slab_allocation(slab->slab_cache, ptr, ip))
+ return false;
+
+ poison_slab_object(slab->slab_cache, ptr, false, false);
+ return true;
}
void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index fb2b9ac0659a..f438a6cdc964 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -555,6 +555,12 @@ static inline bool kasan_arch_is_ready(void) { return true; }
void kasan_kunit_test_suite_start(void);
void kasan_kunit_test_suite_end(void);
+#ifdef CONFIG_RUST
+char kasan_test_rust_uaf(void);
+#else
+static inline char kasan_test_rust_uaf(void) { return '\0'; }
+#endif
+
#else /* CONFIG_KASAN_KUNIT_TEST */
static inline void kasan_kunit_test_suite_start(void) { }
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test_c.c
index 7b32be2a3cf0..a181e4780d9d 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test_c.c
@@ -996,6 +996,51 @@ static void kmem_cache_invalid_free(struct kunit *test)
kmem_cache_destroy(cache);
}
+static void kmem_cache_rcu_uaf(struct kunit *test)
+{
+ char *p;
+ size_t size = 200;
+ struct kmem_cache *cache;
+
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG);
+
+ cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ kmem_cache_destroy(cache);
+ return;
+ }
+ *p = 1;
+
+ rcu_read_lock();
+
+ /* Free the object - this will internally schedule an RCU callback. */
+ kmem_cache_free(cache, p);
+
+ /*
+ * We should still be allowed to access the object at this point because
+ * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
+ * critical section since before the kmem_cache_free().
+ */
+ READ_ONCE(*p);
+
+ rcu_read_unlock();
+
+ /*
+ * Wait for the RCU callback to execute; after this, the object should
+ * have actually been freed from KASAN's perspective.
+ */
+ rcu_barrier();
+
+ KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
+
+ kmem_cache_destroy(cache);
+}
+
static void empty_cache_ctor(void *object) { }
static void kmem_cache_double_destroy(struct kunit *test)
@@ -1899,6 +1944,16 @@ static void match_all_mem_tag(struct kunit *test)
kfree(ptr);
}
+/*
+ * Check that Rust performing a use-after-free using `unsafe` is detected.
+ * This is a smoke test to make sure that Rust is being sanitized properly.
+ */
+static void rust_uaf(struct kunit *test)
+{
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_RUST);
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf());
+}
+
static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_right),
KUNIT_CASE(kmalloc_oob_left),
@@ -1937,6 +1992,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmem_cache_oob),
KUNIT_CASE(kmem_cache_double_free),
KUNIT_CASE(kmem_cache_invalid_free),
+ KUNIT_CASE(kmem_cache_rcu_uaf),
KUNIT_CASE(kmem_cache_double_destroy),
KUNIT_CASE(kmem_cache_accounted),
KUNIT_CASE(kmem_cache_bulk),
@@ -1971,6 +2027,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(match_all_not_assigned),
KUNIT_CASE(match_all_ptr_tag),
KUNIT_CASE(match_all_mem_tag),
+ KUNIT_CASE(rust_uaf),
{}
};
diff --git a/mm/kasan/kasan_test_rust.rs b/mm/kasan/kasan_test_rust.rs
new file mode 100644
index 000000000000..caa7175964ef
--- /dev/null
+++ b/mm/kasan/kasan_test_rust.rs
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Helper crate for KASAN testing.
+//!
+//! Provides behavior to check the sanitization of Rust code.
+
+use core::ptr::addr_of_mut;
+use kernel::prelude::*;
+
+/// Trivial UAF - allocate a big vector, grab a pointer partway through,
+/// drop the vector, and touch it.
+#[no_mangle]
+pub extern "C" fn kasan_test_rust_uaf() -> u8 {
+ let mut v: Vec<u8> = Vec::new();
+ for _ in 0..4096 {
+ v.push(0x42, GFP_KERNEL).unwrap();
+ }
+ let ptr: *mut u8 = addr_of_mut!(v[2048]);
+ drop(v);
+ unsafe { *ptr }
+}
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index c5cb54fc696d..67fc321db79b 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -99,6 +99,10 @@ module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_inte
static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
+/* Allocation burst count: number of excess KFENCE allocations per sample. */
+static unsigned int kfence_burst __read_mostly;
+module_param_named(burst, kfence_burst, uint, 0644);
+
/* If true, use a deferrable timer. */
static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
module_param_named(deferrable, kfence_deferrable, bool, 0444);
@@ -269,6 +273,13 @@ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *m
return pageaddr;
}
+static inline bool kfence_obj_allocated(const struct kfence_metadata *meta)
+{
+ enum kfence_object_state state = READ_ONCE(meta->state);
+
+ return state == KFENCE_OBJECT_ALLOCATED || state == KFENCE_OBJECT_RCU_FREEING;
+}
+
/*
* Update the object's metadata state, including updating the alloc/free stacks
* depending on the state transition.
@@ -278,10 +289,14 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex
unsigned long *stack_entries, size_t num_stack_entries)
{
struct kfence_track *track =
- next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
+ next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track;
lockdep_assert_held(&meta->lock);
+ /* Stack has been saved when calling rcu, skip. */
+ if (READ_ONCE(meta->state) == KFENCE_OBJECT_RCU_FREEING)
+ goto out;
+
if (stack_entries) {
memcpy(track->stack_entries, stack_entries,
num_stack_entries * sizeof(stack_entries[0]));
@@ -297,6 +312,7 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex
track->cpu = raw_smp_processor_id();
track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
+out:
/*
* Pairs with READ_ONCE() in
* kfence_shutdown_cache(),
@@ -502,7 +518,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
raw_spin_lock_irqsave(&meta->lock, flags);
- if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
+ if (!kfence_obj_allocated(meta) || meta->addr != (unsigned long)addr) {
/* Invalid or double-free, bail out. */
atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
kfence_report_error((unsigned long)addr, false, NULL, meta,
@@ -780,7 +796,7 @@ static void kfence_check_all_canary(void)
for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
struct kfence_metadata *meta = &kfence_metadata[i];
- if (meta->state == KFENCE_OBJECT_ALLOCATED)
+ if (kfence_obj_allocated(meta))
check_canary(meta);
}
}
@@ -827,12 +843,12 @@ static void toggle_allocation_gate(struct work_struct *work)
if (!READ_ONCE(kfence_enabled))
return;
- atomic_set(&kfence_allocation_gate, 0);
+ atomic_set(&kfence_allocation_gate, -kfence_burst);
#ifdef CONFIG_KFENCE_STATIC_KEYS
/* Enable static key, and await allocation to happen. */
static_branch_enable(&kfence_allocation_key);
- wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
+ wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate) > 0);
/* Disable static key and reset timer. */
static_branch_disable(&kfence_allocation_key);
@@ -1006,12 +1022,11 @@ void kfence_shutdown_cache(struct kmem_cache *s)
* the lock will not help, as different critical section
* serialization will have the same outcome.
*/
- if (READ_ONCE(meta->cache) != s ||
- READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
+ if (READ_ONCE(meta->cache) != s || !kfence_obj_allocated(meta))
continue;
raw_spin_lock_irqsave(&meta->lock, flags);
- in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
+ in_use = meta->cache == s && kfence_obj_allocated(meta);
raw_spin_unlock_irqrestore(&meta->lock, flags);
if (in_use) {
@@ -1052,6 +1067,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
unsigned long stack_entries[KFENCE_STACK_DEPTH];
size_t num_stack_entries;
u32 alloc_stack_hash;
+ int allocation_gate;
/*
* Perform size check before switching kfence_allocation_gate, so that
@@ -1080,14 +1096,15 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
if (s->flags & SLAB_SKIP_KFENCE)
return NULL;
- if (atomic_inc_return(&kfence_allocation_gate) > 1)
+ allocation_gate = atomic_inc_return(&kfence_allocation_gate);
+ if (allocation_gate > 1)
return NULL;
#ifdef CONFIG_KFENCE_STATIC_KEYS
/*
* waitqueue_active() is fully ordered after the update of
* kfence_allocation_gate per atomic_inc_return().
*/
- if (waitqueue_active(&allocation_wait)) {
+ if (allocation_gate == 1 && waitqueue_active(&allocation_wait)) {
/*
* Calling wake_up() here may deadlock when allocations happen
* from within timer code. Use an irq_work to defer it.
@@ -1154,11 +1171,19 @@ void __kfence_free(void *addr)
* the object, as the object page may be recycled for other-typed
* objects once it has been freed. meta->cache may be NULL if the cache
* was destroyed.
+ * Save the stack trace here so that reports show where the user freed
+ * the object.
*/
- if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
+ if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) {
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&meta->lock, flags);
+ metadata_update_state(meta, KFENCE_OBJECT_RCU_FREEING, NULL, 0);
+ raw_spin_unlock_irqrestore(&meta->lock, flags);
call_rcu(&meta->rcu_head, rcu_guarded_free);
- else
+ } else {
kfence_guarded_free(addr, meta, false);
+ }
}
bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
@@ -1182,14 +1207,14 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
int distance = 0;
meta = addr_to_metadata(addr - PAGE_SIZE);
- if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
+ if (meta && kfence_obj_allocated(meta)) {
to_report = meta;
/* Data race ok; distance calculation approximate. */
distance = addr - data_race(meta->addr + meta->size);
}
meta = addr_to_metadata(addr + PAGE_SIZE);
- if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
+ if (meta && kfence_obj_allocated(meta)) {
/* Data race ok; distance calculation approximate. */
if (!to_report || distance > data_race(meta->addr) - addr)
to_report = meta;
diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
index db87a05047bd..dfba5ea06b01 100644
--- a/mm/kfence/kfence.h
+++ b/mm/kfence/kfence.h
@@ -38,6 +38,7 @@
enum kfence_object_state {
KFENCE_OBJECT_UNUSED, /* Object is unused. */
KFENCE_OBJECT_ALLOCATED, /* Object is currently allocated. */
+ KFENCE_OBJECT_RCU_FREEING, /* Object was allocated, and then being freed by rcu. */
KFENCE_OBJECT_FREED, /* Object was allocated, and then freed. */
};
diff --git a/mm/kfence/report.c b/mm/kfence/report.c
index c509aed326ce..451991a3a8f2 100644
--- a/mm/kfence/report.c
+++ b/mm/kfence/report.c
@@ -16,6 +16,7 @@
#include <linux/sprintf.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
+#include <linux/sched/clock.h>
#include <trace/events/error_report.h>
#include <asm/kfence.h>
@@ -108,11 +109,15 @@ static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadat
const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
u64 ts_sec = track->ts_nsec;
unsigned long rem_nsec = do_div(ts_sec, NSEC_PER_SEC);
+ u64 interval_nsec = local_clock() - meta->alloc_track.ts_nsec;
+ unsigned long rem_interval_nsec = do_div(interval_nsec, NSEC_PER_SEC);
/* Timestamp matches printk timestamp format. */
- seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus:\n",
- show_alloc ? "allocated" : "freed", track->pid,
- track->cpu, (unsigned long)ts_sec, rem_nsec / 1000);
+ seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus (%lu.%06lus ago):\n",
+ show_alloc ? "allocated" : meta->state == KFENCE_OBJECT_RCU_FREEING ?
+ "rcu freeing" : "freed", track->pid,
+ track->cpu, (unsigned long)ts_sec, rem_nsec / 1000,
+ (unsigned long)interval_nsec, rem_interval_nsec / 1000);
if (track->num_stack_entries) {
/* Skip allocation/free internals stack. */
@@ -145,7 +150,7 @@ void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *met
kfence_print_stack(seq, meta, true);
- if (meta->state == KFENCE_OBJECT_FREED) {
+ if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING) {
seq_con_printf(seq, "\n");
kfence_print_stack(seq, meta, false);
}
@@ -314,7 +319,7 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla
kpp->kp_slab_cache = meta->cache;
kpp->kp_objp = (void *)meta->addr;
kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
- if (meta->state == KFENCE_OBJECT_FREED)
+ if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING)
kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
/* get_stack_skipnr() ensures the first entry is outside allocator. */
kpp->kp_ret = kpp->kp_stack[0];
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index cdd1d8655a76..f9c39898eaff 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -85,7 +85,7 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
*
* Note that these are only respected if collapse was initiated by khugepaged.
*/
-static unsigned int khugepaged_max_ptes_none __read_mostly;
+unsigned int khugepaged_max_ptes_none __read_mostly;
static unsigned int khugepaged_max_ptes_swap __read_mostly;
static unsigned int khugepaged_max_ptes_shared __read_mostly;
@@ -546,12 +546,14 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
static bool is_refcount_suitable(struct folio *folio)
{
- int expected_refcount;
+ int expected_refcount = folio_mapcount(folio);
- expected_refcount = folio_mapcount(folio);
- if (folio_test_swapcache(folio))
+ if (!folio_test_anon(folio) || folio_test_swapcache(folio))
expected_refcount += folio_nr_pages(folio);
+ if (folio_test_private(folio))
+ expected_refcount++;
+
return folio_ref_count(folio) == expected_refcount;
}
@@ -625,8 +627,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
}
/*
- * We can do it before isolate_lru_page because the
- * page can't be freed from under us. NOTE: PG_lock
+ * We can do it before folio_isolate_lru because the
+ * folio can't be freed from under us. NOTE: PG_lock
* is needed to serialize against split_huge_page
* when invoked from the VM.
*/
@@ -1235,6 +1237,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
+ deferred_split_folio(folio, false);
spin_unlock(pmd_ptl);
folio = NULL;
@@ -1841,7 +1844,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
}
} while (1);
- for (index = start; index < end; index++) {
+ for (index = start; index < end;) {
xas_set(&xas, index);
folio = xas_load(&xas);
@@ -1860,18 +1863,19 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
}
}
nr_none++;
+ index++;
continue;
}
if (xa_is_value(folio) || !folio_test_uptodate(folio)) {
xas_unlock_irq(&xas);
/* swap in or instantiate fallocated page */
- if (shmem_get_folio(mapping->host, index,
+ if (shmem_get_folio(mapping->host, index, 0,
&folio, SGP_NOALLOC)) {
result = SCAN_FAIL;
goto xa_unlocked;
}
- /* drain lru cache to help isolate_lru_page() */
+ /* drain lru cache to help folio_isolate_lru() */
lru_add_drain();
} else if (folio_trylock(folio)) {
folio_get(folio);
@@ -1886,7 +1890,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
page_cache_sync_readahead(mapping, &file->f_ra,
file, index,
end - index);
- /* drain lru cache to help isolate_lru_page() */
+ /* drain lru cache to help folio_isolate_lru() */
lru_add_drain();
folio = filemap_lock_folio(mapping, index);
if (IS_ERR(folio)) {
@@ -1941,12 +1945,10 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
* we locked the first folio, then a THP might be there already.
* This will be discovered on the first iteration.
*/
- if (folio_test_large(folio)) {
- result = folio_order(folio) == HPAGE_PMD_ORDER &&
- folio->index == start
- /* Maybe PMD-mapped */
- ? SCAN_PTE_MAPPED_HUGEPAGE
- : SCAN_PAGE_COMPOUND;
+ if (folio_order(folio) == HPAGE_PMD_ORDER &&
+ folio->index == start) {
+ /* Maybe PMD-mapped */
+ result = SCAN_PTE_MAPPED_HUGEPAGE;
goto out_unlock;
}
@@ -1986,9 +1988,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
VM_BUG_ON_FOLIO(folio != xa_load(xas.xa, index), folio);
/*
- * We control three references to the folio:
+ * We control 2 + nr_pages references to the folio:
* - we hold a pin on it;
- * - one reference from page cache;
+ * - nr_pages reference from page cache;
* - one from lru_isolate_folio;
* If those are the only references, then any new usage
* of the folio will have to fetch it from the page
@@ -1996,7 +1998,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
* truncate, so any new usage will be blocked until we
* unlock folio after collapse/during rollback.
*/
- if (folio_ref_count(folio) != 3) {
+ if (folio_ref_count(folio) != 2 + folio_nr_pages(folio)) {
result = SCAN_PAGE_COUNT;
xas_unlock_irq(&xas);
folio_putback_lru(folio);
@@ -2007,6 +2009,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
* Accumulate the folios that are being collapsed.
*/
list_add_tail(&folio->lru, &pagelist);
+ index += folio_nr_pages(folio);
continue;
out_unlock:
folio_unlock(folio);
@@ -2054,17 +2057,22 @@ xa_unlocked:
index = start;
dst = folio_page(new_folio, 0);
list_for_each_entry(folio, &pagelist, lru) {
+ int i, nr_pages = folio_nr_pages(folio);
+
while (index < folio->index) {
clear_highpage(dst);
index++;
dst++;
}
- if (copy_mc_highpage(dst, folio_page(folio, 0)) > 0) {
- result = SCAN_COPY_MC;
- goto rollback;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (copy_mc_highpage(dst, folio_page(folio, i)) > 0) {
+ result = SCAN_COPY_MC;
+ goto rollback;
+ }
+ index++;
+ dst++;
}
- index++;
- dst++;
}
while (index < end) {
clear_highpage(dst);
@@ -2179,7 +2187,7 @@ immap_locked:
folio_clear_active(folio);
folio_clear_unevictable(folio);
folio_unlock(folio);
- folio_put_refs(folio, 3);
+ folio_put_refs(folio, 2 + folio_nr_pages(folio));
}
goto out;
@@ -2254,16 +2262,10 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
continue;
}
- /*
- * TODO: khugepaged should compact smaller compound pages
- * into a PMD sized page
- */
- if (folio_test_large(folio)) {
- result = folio_order(folio) == HPAGE_PMD_ORDER &&
- folio->index == start
- /* Maybe PMD-mapped */
- ? SCAN_PTE_MAPPED_HUGEPAGE
- : SCAN_PAGE_COMPOUND;
+ if (folio_order(folio) == HPAGE_PMD_ORDER &&
+ folio->index == start) {
+ /* Maybe PMD-mapped */
+ result = SCAN_PTE_MAPPED_HUGEPAGE;
/*
* For SCAN_PTE_MAPPED_HUGEPAGE, further processing
* by the caller won't touch the page cache, and so
@@ -2285,8 +2287,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
break;
}
- if (folio_ref_count(folio) !=
- 1 + folio_mapcount(folio) + folio_test_private(folio)) {
+ if (!is_refcount_suitable(folio)) {
result = SCAN_PAGE_COUNT;
break;
}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 764b08100570..0400f5e8ac60 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -224,6 +224,10 @@ static int kmemleak_error;
static unsigned long min_addr = ULONG_MAX;
static unsigned long max_addr;
+/* minimum and maximum address that may be valid per-CPU pointers */
+static unsigned long min_percpu_addr = ULONG_MAX;
+static unsigned long max_percpu_addr;
+
static struct task_struct *scan_thread;
/* used to avoid reporting of recently allocated objects */
static unsigned long jiffies_min_age;
@@ -294,13 +298,20 @@ static void hex_dump_object(struct seq_file *seq,
const u8 *ptr = (const u8 *)object->pointer;
size_t len;
- if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
+ if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
return;
+ if (object->flags & OBJECT_PERCPU)
+ ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer);
+
/* limit the number of lines to HEX_MAX_LINES */
len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
- warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
+ if (object->flags & OBJECT_PERCPU)
+ warn_or_seq_printf(seq, " hex dump (first %zu bytes on cpu %d):\n",
+ len, raw_smp_processor_id());
+ else
+ warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
kasan_disable_current();
warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
@@ -695,10 +706,14 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
/*
- * Only update min_addr and max_addr with object
- * storing virtual address.
+ * Only update min_addr and max_addr with object storing virtual
+ * address. And update min_percpu_addr max_percpu_addr for per-CPU
+ * objects.
*/
- if (!(objflags & (OBJECT_PHYS | OBJECT_PERCPU))) {
+ if (objflags & OBJECT_PERCPU) {
+ min_percpu_addr = min(min_percpu_addr, untagged_ptr);
+ max_percpu_addr = max(max_percpu_addr, untagged_ptr + size);
+ } else if (!(objflags & OBJECT_PHYS)) {
min_addr = min(min_addr, untagged_ptr);
max_addr = max(max_addr, untagged_ptr + size);
}
@@ -1055,12 +1070,8 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
{
pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
- /*
- * Percpu allocations are only scanned and not reported as leaks
- * (min_count is set to 0).
- */
- if (kmemleak_enabled && ptr && !IS_ERR(ptr))
- create_object_percpu((unsigned long)ptr, size, 0, gfp);
+ if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
+ create_object_percpu((__force unsigned long)ptr, size, 0, gfp);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
@@ -1134,8 +1145,8 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
{
pr_debug("%s(0x%px)\n", __func__, ptr);
- if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
- delete_object_full((unsigned long)ptr, OBJECT_PERCPU);
+ if (kmemleak_free_enabled && ptr && !IS_ERR_PCPU(ptr))
+ delete_object_full((__force unsigned long)ptr, OBJECT_PERCPU);
}
EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
@@ -1304,12 +1315,23 @@ static bool update_checksum(struct kmemleak_object *object)
{
u32 old_csum = object->checksum;
- if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
+ if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
return false;
kasan_disable_current();
kcsan_disable_current();
- object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
+ if (object->flags & OBJECT_PERCPU) {
+ unsigned int cpu;
+
+ object->checksum = 0;
+ for_each_possible_cpu(cpu) {
+ void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu);
+
+ object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size);
+ }
+ } else {
+ object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
+ }
kasan_enable_current();
kcsan_enable_current();
@@ -1340,6 +1362,64 @@ static void update_refs(struct kmemleak_object *object)
}
}
+static void pointer_update_refs(struct kmemleak_object *scanned,
+ unsigned long pointer, unsigned int objflags)
+{
+ struct kmemleak_object *object;
+ unsigned long untagged_ptr;
+ unsigned long excess_ref;
+
+ untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
+ if (objflags & OBJECT_PERCPU) {
+ if (untagged_ptr < min_percpu_addr || untagged_ptr >= max_percpu_addr)
+ return;
+ } else {
+ if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
+ return;
+ }
+
+ /*
+ * No need for get_object() here since we hold kmemleak_lock.
+ * object->use_count cannot be dropped to 0 while the object
+ * is still present in object_tree_root and object_list
+ * (with updates protected by kmemleak_lock).
+ */
+ object = __lookup_object(pointer, 1, objflags);
+ if (!object)
+ return;
+ if (object == scanned)
+ /* self referenced, ignore */
+ return;
+
+ /*
+ * Avoid the lockdep recursive warning on object->lock being
+ * previously acquired in scan_object(). These locks are
+ * enclosed by scan_mutex.
+ */
+ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
+ /* only pass surplus references (object already gray) */
+ if (color_gray(object)) {
+ excess_ref = object->excess_ref;
+ /* no need for update_refs() if object already gray */
+ } else {
+ excess_ref = 0;
+ update_refs(object);
+ }
+ raw_spin_unlock(&object->lock);
+
+ if (excess_ref) {
+ object = lookup_object(excess_ref, 0);
+ if (!object)
+ return;
+ if (object == scanned)
+ /* circular reference, ignore */
+ return;
+ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
+ update_refs(object);
+ raw_spin_unlock(&object->lock);
+ }
+}
+
/*
* Memory scanning is a long process and it needs to be interruptible. This
* function checks whether such interrupt condition occurred.
@@ -1372,13 +1452,10 @@ static void scan_block(void *_start, void *_end,
unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
unsigned long *end = _end - (BYTES_PER_POINTER - 1);
unsigned long flags;
- unsigned long untagged_ptr;
raw_spin_lock_irqsave(&kmemleak_lock, flags);
for (ptr = start; ptr < end; ptr++) {
- struct kmemleak_object *object;
unsigned long pointer;
- unsigned long excess_ref;
if (scan_should_stop())
break;
@@ -1387,50 +1464,8 @@ static void scan_block(void *_start, void *_end,
pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
kasan_enable_current();
- untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
- if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
- continue;
-
- /*
- * No need for get_object() here since we hold kmemleak_lock.
- * object->use_count cannot be dropped to 0 while the object
- * is still present in object_tree_root and object_list
- * (with updates protected by kmemleak_lock).
- */
- object = lookup_object(pointer, 1);
- if (!object)
- continue;
- if (object == scanned)
- /* self referenced, ignore */
- continue;
-
- /*
- * Avoid the lockdep recursive warning on object->lock being
- * previously acquired in scan_object(). These locks are
- * enclosed by scan_mutex.
- */
- raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
- /* only pass surplus references (object already gray) */
- if (color_gray(object)) {
- excess_ref = object->excess_ref;
- /* no need for update_refs() if object already gray */
- } else {
- excess_ref = 0;
- update_refs(object);
- }
- raw_spin_unlock(&object->lock);
-
- if (excess_ref) {
- object = lookup_object(excess_ref, 0);
- if (!object)
- continue;
- if (object == scanned)
- /* circular reference, ignore */
- continue;
- raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
- update_refs(object);
- raw_spin_unlock(&object->lock);
- }
+ pointer_update_refs(scanned, pointer, 0);
+ pointer_update_refs(scanned, pointer, OBJECT_PERCPU);
}
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
}
diff --git a/mm/ksm.c b/mm/ksm.c
index 14d9e53b1ec2..a2e2a521df0a 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -608,47 +608,6 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
return atomic_read(&mm->mm_users) == 0;
}
-static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next,
- struct mm_walk *walk)
-{
- struct page *page = NULL;
- spinlock_t *ptl;
- pte_t *pte;
- pte_t ptent;
- int ret;
-
- pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
- if (!pte)
- return 0;
- ptent = ptep_get(pte);
- if (pte_present(ptent)) {
- page = vm_normal_page(walk->vma, addr, ptent);
- } else if (!pte_none(ptent)) {
- swp_entry_t entry = pte_to_swp_entry(ptent);
-
- /*
- * As KSM pages remain KSM pages until freed, no need to wait
- * here for migration to end.
- */
- if (is_migration_entry(entry))
- page = pfn_swap_entry_to_page(entry);
- }
- /* return 1 if the page is an normal ksm page or KSM-placed zero page */
- ret = (page && PageKsm(page)) || is_ksm_zero_pte(ptent);
- pte_unmap_unlock(pte, ptl);
- return ret;
-}
-
-static const struct mm_walk_ops break_ksm_ops = {
- .pmd_entry = break_ksm_pmd_entry,
- .walk_lock = PGWALK_RDLOCK,
-};
-
-static const struct mm_walk_ops break_ksm_lock_vma_ops = {
- .pmd_entry = break_ksm_pmd_entry,
- .walk_lock = PGWALK_WRLOCK,
-};
-
/*
* We use break_ksm to break COW on a ksm page by triggering unsharing,
* such that the ksm page will get replaced by an exclusive anonymous page.
@@ -665,16 +624,26 @@ static const struct mm_walk_ops break_ksm_lock_vma_ops = {
static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma)
{
vm_fault_t ret = 0;
- const struct mm_walk_ops *ops = lock_vma ?
- &break_ksm_lock_vma_ops : &break_ksm_ops;
+
+ if (lock_vma)
+ vma_start_write(vma);
do {
- int ksm_page;
+ bool ksm_page = false;
+ struct folio_walk fw;
+ struct folio *folio;
cond_resched();
- ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL);
- if (WARN_ON_ONCE(ksm_page < 0))
- return ksm_page;
+ folio = folio_walk_start(&fw, vma, addr,
+ FW_MIGRATION | FW_ZEROPAGE);
+ if (folio) {
+ /* Small folio implies FW_LEVEL_PTE. */
+ if (!folio_test_large(folio) &&
+ (folio_test_ksm(folio) || is_ksm_zero_pte(fw.pte)))
+ ksm_page = true;
+ folio_walk_end(&fw, vma);
+ }
+
if (!ksm_page)
return 0;
ret = handle_mm_fault(vma, addr,
@@ -767,26 +736,28 @@ static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item)
struct mm_struct *mm = rmap_item->mm;
unsigned long addr = rmap_item->address;
struct vm_area_struct *vma;
- struct page *page;
+ struct page *page = NULL;
+ struct folio_walk fw;
+ struct folio *folio;
mmap_read_lock(mm);
vma = find_mergeable_vma(mm, addr);
if (!vma)
goto out;
- page = follow_page(vma, addr, FOLL_GET);
- if (IS_ERR_OR_NULL(page))
- goto out;
- if (is_zone_device_page(page))
- goto out_putpage;
- if (PageAnon(page)) {
+ folio = folio_walk_start(&fw, vma, addr, 0);
+ if (folio) {
+ if (!folio_is_zone_device(folio) &&
+ folio_test_anon(folio)) {
+ folio_get(folio);
+ page = fw.page;
+ }
+ folio_walk_end(&fw, vma);
+ }
+out:
+ if (page) {
flush_anon_page(vma, page, addr);
flush_dcache_page(page);
- } else {
-out_putpage:
- put_page(page);
-out:
- page = NULL;
}
mmap_read_unlock(mm);
return page;
@@ -938,12 +909,13 @@ again:
*/
while (!folio_try_get(folio)) {
/*
- * Another check for page->mapping != expected_mapping would
- * work here too. We have chosen the !PageSwapCache test to
- * optimize the common case, when the page is or is about to
- * be freed: PageSwapCache is cleared (under spin_lock_irq)
- * in the ref_freeze section of __remove_mapping(); but Anon
- * folio->mapping reset to NULL later, in free_pages_prepare().
+ * Another check for folio->mapping != expected_mapping
+ * would work here too. We have chosen to test the
+ * swapcache flag to optimize the common case, when the
+ * folio is or is about to be freed: the swapcache flag
+ * is cleared (under spin_lock_irq) in the ref_freeze
+ * section of __remove_mapping(); but anon folio->mapping
+ * is reset to NULL later, in free_pages_prepare().
*/
if (!folio_test_swapcache(folio))
goto stale;
@@ -974,7 +946,7 @@ again:
stale:
/*
- * We come here from above when page->mapping or !PageSwapCache
+ * We come here from above when folio->mapping or the swapcache flag
* suggests that the node is stale; but it might be under migration.
* We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
* before checking whether node->kpfn has been changed.
@@ -1481,7 +1453,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
goto out;
/*
- * We need the page lock to read a stable PageSwapCache in
+ * We need the folio lock to read a stable swapcache flag in
* write_protect_page(). We use trylock_page() instead of
* lock_page() because we don't want to wait here - we
* prefer to continue scanning and merging different pages,
@@ -2562,36 +2534,46 @@ next_mm:
ksm_scan.address = vma->vm_end;
while (ksm_scan.address < vma->vm_end) {
+ struct page *tmp_page = NULL;
+ struct folio_walk fw;
+ struct folio *folio;
+
if (ksm_test_exit(mm))
break;
- *page = follow_page(vma, ksm_scan.address, FOLL_GET);
- if (IS_ERR_OR_NULL(*page)) {
- ksm_scan.address += PAGE_SIZE;
- cond_resched();
- continue;
+
+ folio = folio_walk_start(&fw, vma, ksm_scan.address, 0);
+ if (folio) {
+ if (!folio_is_zone_device(folio) &&
+ folio_test_anon(folio)) {
+ folio_get(folio);
+ tmp_page = fw.page;
+ }
+ folio_walk_end(&fw, vma);
}
- if (is_zone_device_page(*page))
- goto next_page;
- if (PageAnon(*page)) {
- flush_anon_page(vma, *page, ksm_scan.address);
- flush_dcache_page(*page);
+
+ if (tmp_page) {
+ flush_anon_page(vma, tmp_page, ksm_scan.address);
+ flush_dcache_page(tmp_page);
rmap_item = get_next_rmap_item(mm_slot,
ksm_scan.rmap_list, ksm_scan.address);
if (rmap_item) {
ksm_scan.rmap_list =
&rmap_item->rmap_list;
- if (should_skip_rmap_item(*page, rmap_item))
+ if (should_skip_rmap_item(tmp_page, rmap_item)) {
+ folio_put(folio);
goto next_page;
+ }
ksm_scan.address += PAGE_SIZE;
- } else
- put_page(*page);
+ *page = tmp_page;
+ } else {
+ folio_put(folio);
+ }
mmap_read_unlock(mm);
return rmap_item;
}
next_page:
- put_page(*page);
ksm_scan.address += PAGE_SIZE;
cond_resched();
}
@@ -3142,7 +3124,7 @@ void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
* newfolio->mapping was set in advance; now we need smp_wmb()
* to make sure that the new stable_node->kpfn is visible
* to ksm_get_folio() before it can see that folio->mapping
- * has gone stale (or that folio_test_swapcache has been cleared).
+ * has gone stale (or that the swapcache flag has been cleared).
*/
smp_wmb();
folio_set_stable_node(folio, NULL);
diff --git a/mm/madvise.c b/mm/madvise.c
index 89089d84f8df..ff139e57cca2 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -1031,6 +1031,9 @@ static int madvise_vma_behavior(struct vm_area_struct *vma,
struct anon_vma_name *anon_name;
unsigned long new_flags = vma->vm_flags;
+ if (unlikely(!can_modify_vma_madv(vma, behavior)))
+ return -EPERM;
+
switch (behavior) {
case MADV_REMOVE:
return madvise_remove(vma, prev, start, end);
@@ -1448,15 +1451,6 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh
start = untagged_addr_remote(mm, start);
end = start + len;
- /*
- * Check if the address range is sealed for do_madvise().
- * can_modify_mm_madv assumes we have acquired the lock on MM.
- */
- if (unlikely(!can_modify_mm_madv(mm, start, end, behavior))) {
- error = -EPERM;
- goto out;
- }
-
blk_start_plug(&plug);
switch (behavior) {
case MADV_POPULATE_READ:
@@ -1470,7 +1464,6 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh
}
blk_finish_plug(&plug);
-out:
if (write)
mmap_write_unlock(mm);
else
@@ -1527,7 +1520,7 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
* Require CAP_SYS_NICE for influencing process performance. Note that
* only non-destructive hints are currently supported.
*/
- if (!capable(CAP_SYS_NICE)) {
+ if (mm != current->mm && !capable(CAP_SYS_NICE)) {
ret = -EPERM;
goto release_mm;
}
diff --git a/mm/memblock.c b/mm/memblock.c
index 3b9dc2d89b8a..0389ce5cd281 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1500,7 +1500,7 @@ done:
*
* Accept the memory of the allocated buffer.
*/
- accept_memory(found, found + size);
+ accept_memory(found, size);
return found;
}
@@ -1731,6 +1731,23 @@ phys_addr_t __init_memblock memblock_reserved_size(void)
return memblock.reserved.total_size;
}
+/**
+ * memblock_estimated_nr_free_pages - return estimated number of free pages
+ * from memblock point of view
+ *
+ * During bootup, subsystems might need a rough estimate of the number of free
+ * pages in the whole system, before precise numbers are available from the
+ * buddy. Especially with CONFIG_DEFERRED_STRUCT_PAGE_INIT, the numbers
+ * obtained from the buddy might be very imprecise during bootup.
+ *
+ * Return:
+ * An estimated number of free pages from memblock point of view.
+ */
+unsigned long __init memblock_estimated_nr_free_pages(void)
+{
+ return PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size());
+}
+
/* lowest address */
phys_addr_t __init_memblock memblock_start_of_DRAM(void)
{
diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c
index 417c96f2da28..81d8819f13cd 100644
--- a/mm/memcontrol-v1.c
+++ b/mm/memcontrol-v1.c
@@ -742,6 +742,9 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
return folio_file_page(folio, index);
}
+static void memcg1_check_events(struct mem_cgroup *memcg, int nid);
+static void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
+
/**
* mem_cgroup_move_account - move account of the folio
* @folio: The folio.
@@ -853,9 +856,9 @@ static int mem_cgroup_move_account(struct folio *folio,
nid = folio_nid(folio);
local_irq_disable();
- mem_cgroup_charge_statistics(to, nr_pages);
+ memcg1_charge_statistics(to, nr_pages);
memcg1_check_events(to, nid);
- mem_cgroup_charge_statistics(from, -nr_pages);
+ memcg1_charge_statistics(from, -nr_pages);
memcg1_check_events(from, nid);
local_irq_enable();
out:
@@ -1439,21 +1442,68 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg)
}
}
+/* Cgroup1: threshold notifications & softlimit tree updates */
+struct memcg1_events_percpu {
+ unsigned long nr_page_events;
+ unsigned long targets[MEM_CGROUP_NTARGETS];
+};
+
+static void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages)
+{
+ /* pagein of a big page is an event. So, ignore page size */
+ if (nr_pages > 0)
+ __count_memcg_events(memcg, PGPGIN, 1);
+ else {
+ __count_memcg_events(memcg, PGPGOUT, 1);
+ nr_pages = -nr_pages; /* for event */
+ }
+
+ __this_cpu_add(memcg->events_percpu->nr_page_events, nr_pages);
+}
+
+#define THRESHOLDS_EVENTS_TARGET 128
+#define SOFTLIMIT_EVENTS_TARGET 1024
+
+static bool memcg1_event_ratelimit(struct mem_cgroup *memcg,
+ enum mem_cgroup_events_target target)
+{
+ unsigned long val, next;
+
+ val = __this_cpu_read(memcg->events_percpu->nr_page_events);
+ next = __this_cpu_read(memcg->events_percpu->targets[target]);
+ /* from time_after() in jiffies.h */
+ if ((long)(next - val) < 0) {
+ switch (target) {
+ case MEM_CGROUP_TARGET_THRESH:
+ next = val + THRESHOLDS_EVENTS_TARGET;
+ break;
+ case MEM_CGROUP_TARGET_SOFTLIMIT:
+ next = val + SOFTLIMIT_EVENTS_TARGET;
+ break;
+ default:
+ break;
+ }
+ __this_cpu_write(memcg->events_percpu->targets[target], next);
+ return true;
+ }
+ return false;
+}
+
/*
* Check events in order.
*
*/
-void memcg1_check_events(struct mem_cgroup *memcg, int nid)
+static void memcg1_check_events(struct mem_cgroup *memcg, int nid)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
return;
/* threshold event is triggered in finer grain than soft limit */
- if (unlikely(mem_cgroup_event_ratelimit(memcg,
+ if (unlikely(memcg1_event_ratelimit(memcg,
MEM_CGROUP_TARGET_THRESH))) {
bool do_softlimit;
- do_softlimit = mem_cgroup_event_ratelimit(memcg,
+ do_softlimit = memcg1_event_ratelimit(memcg,
MEM_CGROUP_TARGET_SOFTLIMIT);
mem_cgroup_threshold(memcg);
if (unlikely(do_softlimit))
@@ -1461,6 +1511,43 @@ void memcg1_check_events(struct mem_cgroup *memcg, int nid)
}
}
+void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ memcg1_charge_statistics(memcg, folio_nr_pages(folio));
+ memcg1_check_events(memcg, folio_nid(folio));
+ local_irq_restore(flags);
+}
+
+void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg)
+{
+ /*
+ * Interrupts should be disabled here because the caller holds the
+ * i_pages lock which is taken with interrupts-off. It is
+ * important here to have the interrupts disabled because it is the
+ * only synchronisation we have for updating the per-CPU variables.
+ */
+ preempt_disable_nested();
+ VM_WARN_ON_IRQS_ENABLED();
+ memcg1_charge_statistics(memcg, -folio_nr_pages(folio));
+ preempt_enable_nested();
+ memcg1_check_events(memcg, folio_nid(folio));
+}
+
+void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
+ unsigned long nr_memory, int nid)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __count_memcg_events(memcg, PGPGOUT, pgpgout);
+ __this_cpu_add(memcg->events_percpu->nr_page_events, nr_memory);
+ memcg1_check_events(memcg, nid);
+ local_irq_restore(flags);
+}
+
static int compare_thresholds(const void *a, const void *b)
{
const struct mem_cgroup_threshold *_a = a;
@@ -1860,26 +1947,26 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
INIT_WORK(&event->remove, memcg_event_remove);
efile = fdget(efd);
- if (!efile.file) {
+ if (!fd_file(efile)) {
ret = -EBADF;
goto out_kfree;
}
- event->eventfd = eventfd_ctx_fileget(efile.file);
+ event->eventfd = eventfd_ctx_fileget(fd_file(efile));
if (IS_ERR(event->eventfd)) {
ret = PTR_ERR(event->eventfd);
goto out_put_efile;
}
cfile = fdget(cfd);
- if (!cfile.file) {
+ if (!fd_file(cfile)) {
ret = -EBADF;
goto out_put_eventfd;
}
/* the process need read permission on control file */
/* AV: shouldn't we check that it's been opened for read instead? */
- ret = file_permission(cfile.file, MAY_READ);
+ ret = file_permission(fd_file(cfile), MAY_READ);
if (ret < 0)
goto out_put_cfile;
@@ -1887,7 +1974,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
* The control file must be a regular cgroup1 file. As a regular cgroup
* file can't be renamed, it's safe to access its name afterwards.
*/
- cdentry = cfile.file->f_path.dentry;
+ cdentry = fd_file(cfile)->f_path.dentry;
if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
ret = -EINVAL;
goto out_put_cfile;
@@ -1907,9 +1994,15 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
event->register_event = mem_cgroup_usage_register_event;
event->unregister_event = mem_cgroup_usage_unregister_event;
} else if (!strcmp(name, "memory.oom_control")) {
+ pr_warn_once("oom_control is deprecated and will be removed. "
+ "Please report your usecase to linux-mm-@kvack.org"
+ " if you depend on this functionality. \n");
event->register_event = mem_cgroup_oom_register_event;
event->unregister_event = mem_cgroup_oom_unregister_event;
} else if (!strcmp(name, "memory.pressure_level")) {
+ pr_warn_once("pressure_level is deprecated and will be removed. "
+ "Please report your usecase to linux-mm-@kvack.org "
+ "if you depend on this functionality. \n");
event->register_event = vmpressure_register_event;
event->unregister_event = vmpressure_unregister_event;
} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
@@ -1939,7 +2032,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
if (ret)
goto out_put_css;
- vfs_poll(efile.file, &event->pt);
+ vfs_poll(fd_file(efile), &event->pt);
spin_lock_irq(&memcg->event_list_lock);
list_add(&event->list, &memcg->event_list);
@@ -2447,6 +2540,9 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
ret = 0;
break;
case _TCP:
+ pr_warn_once("kmem.tcp.limit_in_bytes is deprecated and will be removed. "
+ "Please report your usecase to linux-mm@kvack.org if you "
+ "depend on this functionality.\n");
ret = memcg_update_tcp_max(memcg, nr_pages);
break;
}
@@ -2455,6 +2551,9 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
ret = -EOPNOTSUPP;
} else {
+ pr_warn_once("soft_limit_in_bytes is deprecated and will be removed. "
+ "Please report your usecase to linux-mm@kvack.org if you "
+ "depend on this functionality.\n");
WRITE_ONCE(memcg->soft_limit, nr_pages);
ret = 0;
}
@@ -2748,6 +2847,10 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ pr_warn_once("oom_control is deprecated and will be removed. "
+ "Please report your usecase to linux-mm-@kvack.org if you "
+ "depend on this functionality. \n");
+
/* cannot set to root cgroup and only 0 and 1 are allowed */
if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
return -EINVAL;
@@ -2952,6 +3055,19 @@ bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
return false;
}
+bool memcg1_alloc_events(struct mem_cgroup *memcg)
+{
+ memcg->events_percpu = alloc_percpu_gfp(struct memcg1_events_percpu,
+ GFP_KERNEL_ACCOUNT);
+ return !!memcg->events_percpu;
+}
+
+void memcg1_free_events(struct mem_cgroup *memcg)
+{
+ if (memcg->events_percpu)
+ free_percpu(memcg->events_percpu);
+}
+
static int __init memcg1_init(void)
{
int node;
diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h
index 56d7eaa98274..c0672e25bcdb 100644
--- a/mm/memcontrol-v1.h
+++ b/mm/memcontrol-v1.h
@@ -7,7 +7,6 @@
/* Cgroup v1 and v2 common declarations */
-void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
unsigned int nr_pages);
@@ -56,8 +55,6 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS,
};
-bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
- enum mem_cgroup_events_target target);
unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);
void drain_all_stock(struct mem_cgroup *root_memcg);
@@ -71,6 +68,10 @@ int memory_stat_show(struct seq_file *m, void *v);
/* Cgroup v1-specific declarations */
#ifdef CONFIG_MEMCG_V1
+
+bool memcg1_alloc_events(struct mem_cgroup *memcg);
+void memcg1_free_events(struct mem_cgroup *memcg);
+
void memcg1_memcg_init(struct mem_cgroup *memcg);
void memcg1_remove_from_trees(struct mem_cgroup *memcg);
@@ -99,7 +100,10 @@ bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
void memcg1_oom_recover(struct mem_cgroup *memcg);
-void memcg1_check_events(struct mem_cgroup *memcg, int nid);
+void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
+void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg);
+void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
+ unsigned long nr_memory, int nid);
void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
@@ -120,6 +124,9 @@ extern struct cftype mem_cgroup_legacy_files[];
#else /* CONFIG_MEMCG_V1 */
+static inline bool memcg1_alloc_events(struct mem_cgroup *memcg) { return true; }
+static inline void memcg1_free_events(struct mem_cgroup *memcg) {}
+
static inline void memcg1_memcg_init(struct mem_cgroup *memcg) {}
static inline void memcg1_remove_from_trees(struct mem_cgroup *memcg) {}
static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg) {}
@@ -130,7 +137,14 @@ static inline bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked) {
static inline void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked) {}
static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {}
-static inline void memcg1_check_events(struct mem_cgroup *memcg, int nid) {}
+static inline void memcg1_commit_charge(struct folio *folio,
+ struct mem_cgroup *memcg) {}
+
+static inline void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg) {}
+
+static inline void memcg1_uncharge_batch(struct mem_cgroup *memcg,
+ unsigned long pgpgout,
+ unsigned long nr_memory, int nid) {}
static inline void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) {}
@@ -140,8 +154,6 @@ static inline bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr
gfp_t gfp_mask) { return true; }
static inline void memcg1_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) {}
-extern struct cftype memsw_files[];
-extern struct cftype mem_cgroup_legacy_files[];
#endif /* CONFIG_MEMCG_V1 */
#endif /* __MM_MEMCONTROL_V1_H */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d563fb515766..7845c64a2c57 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -25,6 +25,7 @@
* Copyright (C) 2020 Alibaba, Inc, Alex Shi
*/
+#include <linux/cgroup-defs.h>
#include <linux/page_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
@@ -41,6 +42,7 @@
#include <linux/rcupdate.h>
#include <linux/limits.h>
#include <linux/export.h>
+#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
@@ -93,9 +95,6 @@ static bool cgroup_memory_nobpf __ro_after_init;
static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
#endif
-#define THRESHOLDS_EVENTS_TARGET 128
-#define SOFTLIMIT_EVENTS_TARGET 1024
-
static inline bool task_is_dying(void)
{
return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
@@ -305,6 +304,12 @@ static const unsigned int memcg_node_stat_items[] = {
#ifdef CONFIG_SWAP
NR_SWAPCACHE,
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ PGPROMOTE_SUCCESS,
+#endif
+ PGDEMOTE_KSWAPD,
+ PGDEMOTE_DIRECT,
+ PGDEMOTE_KHUGEPAGED,
};
static const unsigned int memcg_stat_items[] = {
@@ -320,24 +325,27 @@ static const unsigned int memcg_stat_items[] = {
#define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
#define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
ARRAY_SIZE(memcg_stat_items))
-static int8_t mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
+#define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
+static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
static void init_memcg_stats(void)
{
- int8_t i, j = 0;
+ u8 i, j = 0;
+
+ BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
- BUILD_BUG_ON(MEMCG_NR_STAT >= S8_MAX);
+ memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
- for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i)
- mem_cgroup_stats_index[memcg_node_stat_items[i]] = ++j;
+ for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
+ mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
- for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i)
- mem_cgroup_stats_index[memcg_stat_items[i]] = ++j;
+ for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
+ mem_cgroup_stats_index[memcg_stat_items[i]] = j;
}
static inline int memcg_stats_index(int idx)
{
- return mem_cgroup_stats_index[idx] - 1;
+ return mem_cgroup_stats_index[idx];
}
struct lruvec_stats_percpu {
@@ -369,7 +377,7 @@ unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
return node_page_state(lruvec_pgdat(lruvec), idx);
i = memcg_stats_index(idx);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return 0;
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
@@ -392,7 +400,7 @@ unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
i = memcg_stats_index(idx);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return 0;
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
@@ -406,8 +414,10 @@ unsigned long lruvec_page_state_local(struct lruvec *lruvec,
/* Subset of vm_event_item to report for memcg event stats */
static const unsigned int memcg_vm_event_stat[] = {
+#ifdef CONFIG_MEMCG_V1
PGPGIN,
PGPGOUT,
+#endif
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_KHUGEPAGED,
@@ -432,24 +442,32 @@ static const unsigned int memcg_vm_event_stat[] = {
THP_SWPOUT,
THP_SWPOUT_FALLBACK,
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ NUMA_PAGE_MIGRATE,
+ NUMA_PTE_UPDATES,
+ NUMA_HINT_FAULTS,
+#endif
};
#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
-static int8_t mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
+static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
static void init_memcg_events(void)
{
- int8_t i;
+ u8 i;
- BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= S8_MAX);
+ BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
+
+ memset(mem_cgroup_events_index, U8_MAX,
+ sizeof(mem_cgroup_events_index));
for (i = 0; i < NR_MEMCG_EVENTS; ++i)
- mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
+ mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
}
static inline int memcg_events_index(enum vm_event_item idx)
{
- return mem_cgroup_events_index[idx] - 1;
+ return mem_cgroup_events_index[idx];
}
struct memcg_vmstats_percpu {
@@ -469,10 +487,6 @@ struct memcg_vmstats_percpu {
/* Delta calculation for lockless upward propagation */
long state_prev[MEMCG_VMSTAT_SIZE];
unsigned long events_prev[NR_MEMCG_EVENTS];
-
- /* Cgroup1: threshold notifications & softlimit tree updates */
- unsigned long nr_page_events;
- unsigned long targets[MEM_CGROUP_NTARGETS];
} ____cacheline_aligned;
struct memcg_vmstats {
@@ -621,7 +635,7 @@ unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
long x;
int i = memcg_stats_index(idx);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return 0;
x = READ_ONCE(memcg->vmstats->state[i]);
@@ -662,7 +676,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
if (mem_cgroup_disabled())
return;
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return;
__this_cpu_add(memcg->vmstats_percpu->state[i], val);
@@ -675,7 +689,7 @@ unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
long x;
int i = memcg_stats_index(idx);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return 0;
x = READ_ONCE(memcg->vmstats->state_local[i]);
@@ -694,7 +708,7 @@ static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
struct mem_cgroup *memcg;
int i = memcg_stats_index(idx);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return;
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
@@ -810,7 +824,7 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
if (mem_cgroup_disabled())
return;
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return;
memcg_stats_lock();
@@ -823,7 +837,7 @@ unsigned long memcg_events(struct mem_cgroup *memcg, int event)
{
int i = memcg_events_index(event);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, event))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
return 0;
return READ_ONCE(memcg->vmstats->events[i]);
@@ -833,50 +847,12 @@ unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
{
int i = memcg_events_index(event);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, event))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
return 0;
return READ_ONCE(memcg->vmstats->events_local[i]);
}
-void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages)
-{
- /* pagein of a big page is an event. So, ignore page size */
- if (nr_pages > 0)
- __count_memcg_events(memcg, PGPGIN, 1);
- else {
- __count_memcg_events(memcg, PGPGOUT, 1);
- nr_pages = -nr_pages; /* for event */
- }
-
- __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
-}
-
-bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
- enum mem_cgroup_events_target target)
-{
- unsigned long val, next;
-
- val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
- next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
- /* from time_after() in jiffies.h */
- if ((long)(next - val) < 0) {
- switch (target) {
- case MEM_CGROUP_TARGET_THRESH:
- next = val + THRESHOLDS_EVENTS_TARGET;
- break;
- case MEM_CGROUP_TARGET_SOFTLIMIT:
- next = val + SOFTLIMIT_EVENTS_TARGET;
- break;
- default:
- break;
- }
- __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
- return true;
- }
- return false;
-}
-
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
{
/*
@@ -971,6 +947,24 @@ again:
}
/**
+ * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
+ * @folio: folio from which memcg should be extracted.
+ */
+struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
+{
+ struct mem_cgroup *memcg = folio_memcg(folio);
+
+ if (mem_cgroup_disabled())
+ return NULL;
+
+ rcu_read_lock();
+ if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
+ memcg = root_mem_cgroup;
+ rcu_read_unlock();
+ return memcg;
+}
+
+/**
* mem_cgroup_iter - iterate over memory cgroup hierarchy
* @root: hierarchy root
* @prev: previously returned memcg, NULL on first invocation
@@ -992,9 +986,9 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup_reclaim_cookie *reclaim)
{
struct mem_cgroup_reclaim_iter *iter;
- struct cgroup_subsys_state *css = NULL;
- struct mem_cgroup *memcg = NULL;
- struct mem_cgroup *pos = NULL;
+ struct cgroup_subsys_state *css;
+ struct mem_cgroup *pos;
+ struct mem_cgroup *next;
if (mem_cgroup_disabled())
return NULL;
@@ -1003,81 +997,67 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
root = root_mem_cgroup;
rcu_read_lock();
+restart:
+ next = NULL;
if (reclaim) {
- struct mem_cgroup_per_node *mz;
+ int gen;
+ int nid = reclaim->pgdat->node_id;
- mz = root->nodeinfo[reclaim->pgdat->node_id];
- iter = &mz->iter;
+ iter = &root->nodeinfo[nid]->iter;
+ gen = atomic_read(&iter->generation);
/*
* On start, join the current reclaim iteration cycle.
* Exit when a concurrent walker completes it.
*/
if (!prev)
- reclaim->generation = iter->generation;
- else if (reclaim->generation != iter->generation)
+ reclaim->generation = gen;
+ else if (reclaim->generation != gen)
goto out_unlock;
- while (1) {
- pos = READ_ONCE(iter->position);
- if (!pos || css_tryget(&pos->css))
- break;
- /*
- * css reference reached zero, so iter->position will
- * be cleared by ->css_released. However, we should not
- * rely on this happening soon, because ->css_released
- * is called from a work queue, and by busy-waiting we
- * might block it. So we clear iter->position right
- * away.
- */
- (void)cmpxchg(&iter->position, pos, NULL);
- }
- } else if (prev) {
+ pos = READ_ONCE(iter->position);
+ } else
pos = prev;
- }
- if (pos)
- css = &pos->css;
-
- for (;;) {
- css = css_next_descendant_pre(css, &root->css);
- if (!css) {
- /*
- * Reclaimers share the hierarchy walk, and a
- * new one might jump in right at the end of
- * the hierarchy - make sure they see at least
- * one group and restart from the beginning.
- */
- if (!prev)
- continue;
- break;
- }
+ css = pos ? &pos->css : NULL;
+ while ((css = css_next_descendant_pre(css, &root->css))) {
/*
* Verify the css and acquire a reference. The root
* is provided by the caller, so we know it's alive
* and kicking, and don't take an extra reference.
*/
- if (css == &root->css || css_tryget(css)) {
- memcg = mem_cgroup_from_css(css);
+ if (css == &root->css || css_tryget(css))
break;
- }
}
+ next = mem_cgroup_from_css(css);
+
if (reclaim) {
/*
* The position could have already been updated by a competing
* thread, so check that the value hasn't changed since we read
* it to avoid reclaiming from the same cgroup twice.
*/
- (void)cmpxchg(&iter->position, pos, memcg);
+ if (cmpxchg(&iter->position, pos, next) != pos) {
+ if (css && css != &root->css)
+ css_put(css);
+ goto restart;
+ }
- if (pos)
- css_put(&pos->css);
+ if (!next) {
+ atomic_inc(&iter->generation);
- if (!memcg)
- iter->generation++;
+ /*
+ * Reclaimers share the hierarchy walk, and a
+ * new one might jump in right at the end of
+ * the hierarchy - make sure they see at least
+ * one group and restart from the beginning.
+ */
+ if (!prev)
+ goto restart;
+ }
}
out_unlock:
@@ -1085,7 +1065,7 @@ out_unlock:
if (prev && prev != root)
css_put(&prev->css);
- return memcg;
+ return next;
}
/**
@@ -1375,6 +1355,13 @@ static const struct memory_stat memory_stats[] = {
{ "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
{ "workingset_restore_file", WORKINGSET_RESTORE_FILE },
{ "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
+
+ { "pgdemote_kswapd", PGDEMOTE_KSWAPD },
+ { "pgdemote_direct", PGDEMOTE_DIRECT },
+ { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED },
+#ifdef CONFIG_NUMA_BALANCING
+ { "pgpromote_success", PGPROMOTE_SUCCESS },
+#endif
};
/* The actual unit of the state item, not the same as the output unit */
@@ -1399,6 +1386,9 @@ static int memcg_page_state_output_unit(int item)
/*
* Workingset state is actually in pages, but we export it to userspace
* as a scalar count of events, so special case it here.
+ *
+ * Demotion and promotion activities are exported in pages, consistent
+ * with their global counterparts.
*/
switch (item) {
case WORKINGSET_REFAULT_ANON:
@@ -1408,6 +1398,12 @@ static int memcg_page_state_output_unit(int item)
case WORKINGSET_RESTORE_ANON:
case WORKINGSET_RESTORE_FILE:
case WORKINGSET_NODERECLAIM:
+ case PGDEMOTE_KSWAPD:
+ case PGDEMOTE_DIRECT:
+ case PGDEMOTE_KHUGEPAGED:
+#ifdef CONFIG_NUMA_BALANCING
+ case PGPROMOTE_SUCCESS:
+#endif
return 1;
default:
return memcg_page_state_unit(item);
@@ -1466,10 +1462,11 @@ static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
memcg_events(memcg, PGSTEAL_KHUGEPAGED));
for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
+#ifdef CONFIG_MEMCG_V1
if (memcg_vm_event_stat[i] == PGPGIN ||
memcg_vm_event_stat[i] == PGPGOUT)
continue;
-
+#endif
seq_buf_printf(s, "%s %lu\n",
vm_event_name(memcg_vm_event_stat[i]),
memcg_events(memcg, memcg_vm_event_stat[i]));
@@ -2366,7 +2363,7 @@ void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
{
- VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
+ VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
/*
* Any of the following ensures page's memcg stability:
*
@@ -2388,11 +2385,7 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
{
css_get(&memcg->css);
commit_charge(folio, memcg);
-
- local_irq_disable();
- mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
- memcg1_check_events(memcg, folio_nid(folio));
- local_irq_enable();
+ memcg1_commit_charge(folio, memcg);
}
static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
@@ -2446,37 +2439,7 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
/*
* Returns a pointer to the memory cgroup to which the kernel object is charged.
- *
- * A passed kernel object can be a slab object, vmalloc object or a generic
- * kernel page, so different mechanisms for getting the memory cgroup pointer
- * should be used.
- *
- * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
- * can not know for sure how the kernel object is implemented.
- * mem_cgroup_from_obj() can be safely used in such cases.
- *
- * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
- * cgroup_mutex, etc.
- */
-struct mem_cgroup *mem_cgroup_from_obj(void *p)
-{
- struct folio *folio;
-
- if (mem_cgroup_disabled())
- return NULL;
-
- if (unlikely(is_vmalloc_addr(p)))
- folio = page_folio(vmalloc_to_page(p));
- else
- folio = virt_to_folio(p);
-
- return mem_cgroup_from_obj_folio(folio, p);
-}
-
-/*
- * Returns a pointer to the memory cgroup to which the kernel object is charged.
- * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
- * allocated using vmalloc().
+ * It is not suitable for objects allocated using vmalloc().
*
* A passed kernel object must be a slab object or a generic kernel page.
*
@@ -3057,12 +3020,11 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
void split_page_memcg(struct page *head, int old_order, int new_order)
{
struct folio *folio = page_folio(head);
- struct mem_cgroup *memcg = folio_memcg(folio);
int i;
unsigned int old_nr = 1 << old_order;
unsigned int new_nr = 1 << new_order;
- if (mem_cgroup_disabled() || !memcg)
+ if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
return;
for (i = new_nr; i < old_nr; i += new_nr)
@@ -3071,7 +3033,7 @@ void split_page_memcg(struct page *head, int old_order, int new_order)
if (folio_memcg_kmem(folio))
obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
else
- css_get_many(&memcg->css, old_nr / new_nr - 1);
+ css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1);
}
unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
@@ -3385,29 +3347,12 @@ static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
*/
#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
-static DEFINE_IDR(mem_cgroup_idr);
-static DEFINE_SPINLOCK(memcg_idr_lock);
-
-static int mem_cgroup_alloc_id(void)
-{
- int ret;
-
- idr_preload(GFP_KERNEL);
- spin_lock(&memcg_idr_lock);
- ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1,
- GFP_NOWAIT);
- spin_unlock(&memcg_idr_lock);
- idr_preload_end();
- return ret;
-}
+static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
{
if (memcg->id.id > 0) {
- spin_lock(&memcg_idr_lock);
- idr_remove(&mem_cgroup_idr, memcg->id.id);
- spin_unlock(&memcg_idr_lock);
-
+ xa_erase(&mem_cgroup_ids, memcg->id.id);
memcg->id.id = 0;
}
}
@@ -3442,7 +3387,7 @@ static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{
WARN_ON_ONCE(!rcu_read_lock_held());
- return idr_find(&mem_cgroup_idr, id);
+ return xa_load(&mem_cgroup_ids, id);
}
#ifdef CONFIG_SHRINKER_DEBUG
@@ -3517,6 +3462,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
for_each_node(node)
free_mem_cgroup_per_node_info(memcg, node);
+ memcg1_free_events(memcg);
kfree(memcg->vmstats);
free_percpu(memcg->vmstats_percpu);
kfree(memcg);
@@ -3535,17 +3481,17 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
struct mem_cgroup *memcg;
int node, cpu;
int __maybe_unused i;
- long error = -ENOMEM;
+ long error;
memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
if (!memcg)
- return ERR_PTR(error);
+ return ERR_PTR(-ENOMEM);
- memcg->id.id = mem_cgroup_alloc_id();
- if (memcg->id.id < 0) {
- error = memcg->id.id;
+ error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
+ XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
+ if (error)
goto fail;
- }
+ error = -ENOMEM;
memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
GFP_KERNEL_ACCOUNT);
@@ -3557,6 +3503,9 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
if (!memcg->vmstats_percpu)
goto fail;
+ if (!memcg1_alloc_events(memcg))
+ goto fail;
+
for_each_possible_cpu(cpu) {
if (parent)
pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
@@ -3574,6 +3523,9 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
INIT_WORK(&memcg->high_work, high_work_func);
vmpressure_init(&memcg->vmpressure);
+ INIT_LIST_HEAD(&memcg->memory_peaks);
+ INIT_LIST_HEAD(&memcg->swap_peaks);
+ spin_lock_init(&memcg->peaks_lock);
memcg->socket_pressure = jiffies;
memcg1_memcg_init(memcg);
memcg->kmemcg_id = -1;
@@ -3619,21 +3571,21 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (parent) {
WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
- page_counter_init(&memcg->memory, &parent->memory);
- page_counter_init(&memcg->swap, &parent->swap);
+ page_counter_init(&memcg->memory, &parent->memory, true);
+ page_counter_init(&memcg->swap, &parent->swap, false);
#ifdef CONFIG_MEMCG_V1
WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
- page_counter_init(&memcg->kmem, &parent->kmem);
- page_counter_init(&memcg->tcpmem, &parent->tcpmem);
+ page_counter_init(&memcg->kmem, &parent->kmem, false);
+ page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
#endif
} else {
init_memcg_stats();
init_memcg_events();
- page_counter_init(&memcg->memory, NULL);
- page_counter_init(&memcg->swap, NULL);
+ page_counter_init(&memcg->memory, NULL, true);
+ page_counter_init(&memcg->swap, NULL, false);
#ifdef CONFIG_MEMCG_V1
- page_counter_init(&memcg->kmem, NULL);
- page_counter_init(&memcg->tcpmem, NULL);
+ page_counter_init(&memcg->kmem, NULL, false);
+ page_counter_init(&memcg->tcpmem, NULL, false);
#endif
root_mem_cgroup = memcg;
return &memcg->css;
@@ -3682,9 +3634,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
* publish it here at the end of onlining. This matches the
* regular ID destruction during offlining.
*/
- spin_lock(&memcg_idr_lock);
- idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
- spin_unlock(&memcg_idr_lock);
+ xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
return 0;
offline_kmem:
@@ -3967,14 +3917,91 @@ static u64 memory_current_read(struct cgroup_subsys_state *css,
return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
}
-static u64 memory_peak_read(struct cgroup_subsys_state *css,
- struct cftype *cft)
+#define OFP_PEAK_UNSET (((-1UL)))
+
+static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct cgroup_of_peak *ofp = of_peak(sf->private);
+ u64 fd_peak = READ_ONCE(ofp->value), peak;
+
+ /* User wants global or local peak? */
+ if (fd_peak == OFP_PEAK_UNSET)
+ peak = pc->watermark;
+ else
+ peak = max(fd_peak, READ_ONCE(pc->local_watermark));
+
+ seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
+ return 0;
+}
+
+static int memory_peak_show(struct seq_file *sf, void *v)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
- return (u64)memcg->memory.watermark * PAGE_SIZE;
+ return peak_show(sf, v, &memcg->memory);
}
+static int peak_open(struct kernfs_open_file *of)
+{
+ struct cgroup_of_peak *ofp = of_peak(of);
+
+ ofp->value = OFP_PEAK_UNSET;
+ return 0;
+}
+
+static void peak_release(struct kernfs_open_file *of)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ struct cgroup_of_peak *ofp = of_peak(of);
+
+ if (ofp->value == OFP_PEAK_UNSET) {
+ /* fast path (no writes on this fd) */
+ return;
+ }
+ spin_lock(&memcg->peaks_lock);
+ list_del(&ofp->list);
+ spin_unlock(&memcg->peaks_lock);
+}
+
+static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
+ loff_t off, struct page_counter *pc,
+ struct list_head *watchers)
+{
+ unsigned long usage;
+ struct cgroup_of_peak *peer_ctx;
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ struct cgroup_of_peak *ofp = of_peak(of);
+
+ spin_lock(&memcg->peaks_lock);
+
+ usage = page_counter_read(pc);
+ WRITE_ONCE(pc->local_watermark, usage);
+
+ list_for_each_entry(peer_ctx, watchers, list)
+ if (usage > peer_ctx->value)
+ WRITE_ONCE(peer_ctx->value, usage);
+
+ /* initial write, register watcher */
+ if (ofp->value == -1)
+ list_add(&ofp->list, watchers);
+
+ WRITE_ONCE(ofp->value, usage);
+ spin_unlock(&memcg->peaks_lock);
+
+ return nbytes;
+}
+
+static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+
+ return peak_write(of, buf, nbytes, off, &memcg->memory,
+ &memcg->memory_peaks);
+}
+
+#undef OFP_PEAK_UNSET
+
static int memory_min_show(struct seq_file *m, void *v)
{
return seq_puts_memcg_tunable(m,
@@ -4324,7 +4351,10 @@ static struct cftype memory_files[] = {
{
.name = "peak",
.flags = CFTYPE_NOT_ON_ROOT,
- .read_u64 = memory_peak_read,
+ .open = peak_open,
+ .release = peak_release,
+ .seq_show = memory_peak_show,
+ .write = memory_peak_write,
},
{
.name = "min",
@@ -4528,14 +4558,15 @@ int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
/*
* mem_cgroup_swapin_uncharge_swap - uncharge swap slot
- * @entry: swap entry for which the page is charged
+ * @entry: the first swap entry for which the pages are charged
+ * @nr_pages: number of pages which will be uncharged
*
* Call this function after successfully adding the charged page to swapcache.
*
* Note: This function assumes the page for which swap slot is being uncharged
* is order 0 page.
*/
-void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
+void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
{
/*
* Cgroup1's unified memory+swap counter has been charged with the
@@ -4555,7 +4586,7 @@ void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
* let's not wait for it. The page already received a
* memory+swap charge, drop the swap entry duplicate.
*/
- mem_cgroup_uncharge_swap(entry, 1);
+ mem_cgroup_uncharge_swap(entry, nr_pages);
}
}
@@ -4574,8 +4605,6 @@ static inline void uncharge_gather_clear(struct uncharge_gather *ug)
static void uncharge_batch(const struct uncharge_gather *ug)
{
- unsigned long flags;
-
if (ug->nr_memory) {
page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
if (do_memsw_account())
@@ -4587,11 +4616,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
memcg1_oom_recover(ug->memcg);
}
- local_irq_save(flags);
- __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
- __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
- memcg1_check_events(ug->memcg, ug->nid);
- local_irq_restore(flags);
+ memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
/* drop reference from uncharge_folio */
css_put(&ug->memcg->css);
@@ -4606,7 +4631,8 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
VM_BUG_ON_FOLIO(folio_order(folio) > 1 &&
!folio_test_hugetlb(folio) &&
- !list_empty(&folio->_deferred_list), folio);
+ !list_empty(&folio->_deferred_list) &&
+ folio_test_partially_mapped(folio), folio);
/*
* Nobody should be changing or seriously looking at
@@ -4664,7 +4690,7 @@ void __mem_cgroup_uncharge(struct folio *folio)
struct uncharge_gather ug;
/* Don't touch folio->lru of any random page, pre-check: */
- if (!folio_memcg(folio))
+ if (!folio_memcg_charged(folio))
return;
uncharge_gather_clear(&ug);
@@ -4698,7 +4724,6 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
{
struct mem_cgroup *memcg;
long nr_pages = folio_nr_pages(new);
- unsigned long flags;
VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
@@ -4709,7 +4734,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
return;
/* Page cache replacement: new folio already charged? */
- if (folio_memcg(new))
+ if (folio_memcg_charged(new))
return;
memcg = folio_memcg(old);
@@ -4726,11 +4751,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
css_get(&memcg->css);
commit_charge(new, memcg);
-
- local_irq_save(flags);
- mem_cgroup_charge_statistics(memcg, nr_pages);
- memcg1_check_events(memcg, folio_nid(new));
- local_irq_restore(flags);
+ memcg1_commit_charge(new, memcg);
}
/**
@@ -4966,17 +4987,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
page_counter_uncharge(&memcg->memsw, nr_entries);
}
- /*
- * Interrupts should be disabled here because the caller holds the
- * i_pages lock which is taken with interrupts-off. It is
- * important here to have the interrupts disabled because it is the
- * only synchronisation we have for updating the per-CPU variables.
- */
- memcg_stats_lock();
- mem_cgroup_charge_statistics(memcg, -nr_entries);
- memcg_stats_unlock();
- memcg1_check_events(memcg, folio_nid(folio));
-
+ memcg1_swapout(folio, memcg);
css_put(&memcg->css);
}
@@ -5116,12 +5127,20 @@ static u64 swap_current_read(struct cgroup_subsys_state *css,
return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
}
-static u64 swap_peak_read(struct cgroup_subsys_state *css,
- struct cftype *cft)
+static int swap_peak_show(struct seq_file *sf, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
+
+ return peak_show(sf, v, &memcg->swap);
+}
+
+static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
- return (u64)memcg->swap.watermark * PAGE_SIZE;
+ return peak_write(of, buf, nbytes, off, &memcg->swap,
+ &memcg->swap_peaks);
}
static int swap_high_show(struct seq_file *m, void *v)
@@ -5205,7 +5224,10 @@ static struct cftype swap_files[] = {
{
.name = "swap.peak",
.flags = CFTYPE_NOT_ON_ROOT,
- .read_u64 = swap_peak_read,
+ .open = peak_open,
+ .release = peak_release,
+ .seq_show = swap_peak_show,
+ .write = swap_peak_write,
},
{
.name = "swap.events",
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 7066fc84f351..96ce31e5a203 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1554,6 +1554,32 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
return ret;
}
+void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
+{
+ if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
+ struct address_space *mapping;
+
+ /*
+ * For hugetlb folios in shared mappings, try_to_unmap
+ * could potentially call huge_pmd_unshare. Because of
+ * this, take semaphore in write mode here and set
+ * TTU_RMAP_LOCKED to indicate we have taken the lock
+ * at this higher level.
+ */
+ mapping = hugetlb_folio_mapping_lock_write(folio);
+ if (!mapping) {
+ pr_info("%#lx: could not lock mapping for mapped hugetlb folio\n",
+ folio_pfn(folio));
+ return;
+ }
+
+ try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
+ i_mmap_unlock_write(mapping);
+ } else {
+ try_to_unmap(folio, ttu);
+ }
+}
+
/*
* Do all that is necessary to remove user space mappings. Unmap
* the pages and send SIGBUS to the processes if the data was dirty.
@@ -1615,23 +1641,7 @@ static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
*/
collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
- if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
- /*
- * For hugetlb pages in shared mappings, try_to_unmap
- * could potentially call huge_pmd_unshare. Because of
- * this, take semaphore in write mode here and set
- * TTU_RMAP_LOCKED to indicate we have taken the lock
- * at this higher level.
- */
- mapping = hugetlb_folio_mapping_lock_write(folio);
- if (mapping) {
- try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
- i_mmap_unlock_write(mapping);
- } else
- pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
- } else {
- try_to_unmap(folio, ttu);
- }
+ unmap_poisoned_folio(folio, ttu);
unmap_success = !folio_mapped(folio);
if (!unmap_success)
@@ -2643,40 +2653,6 @@ EXPORT_SYMBOL(unpoison_memory);
#undef pr_fmt
#define pr_fmt(fmt) "Soft offline: " fmt
-static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
-{
- bool isolated = false;
-
- if (folio_test_hugetlb(folio)) {
- isolated = isolate_hugetlb(folio, pagelist);
- } else {
- bool lru = !__folio_test_movable(folio);
-
- if (lru)
- isolated = folio_isolate_lru(folio);
- else
- isolated = isolate_movable_page(&folio->page,
- ISOLATE_UNEVICTABLE);
-
- if (isolated) {
- list_add(&folio->lru, pagelist);
- if (lru)
- node_stat_add_folio(folio, NR_ISOLATED_ANON +
- folio_is_file_lru(folio));
- }
- }
-
- /*
- * If we succeed to isolate the folio, we grabbed another refcount on
- * the folio, so we can safely drop the one we got from get_any_page().
- * If we failed to isolate the folio, it means that we cannot go further
- * and we will return an error, so drop the reference we got from
- * get_any_page() as well.
- */
- folio_put(folio);
- return isolated;
-}
-
/*
* soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
* If the page is a non-dirty unmapped page-cache page, it simply invalidates.
@@ -2689,6 +2665,7 @@ static int soft_offline_in_use_page(struct page *page)
struct folio *folio = page_folio(page);
char const *msg_page[] = {"page", "hugepage"};
bool huge = folio_test_hugetlb(folio);
+ bool isolated;
LIST_HEAD(pagelist);
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
@@ -2728,7 +2705,18 @@ static int soft_offline_in_use_page(struct page *page)
return 0;
}
- if (mf_isolate_folio(folio, &pagelist)) {
+ isolated = isolate_folio_to_list(folio, &pagelist);
+
+ /*
+ * If we succeed to isolate the folio, we grabbed another refcount on
+ * the folio, so we can safely drop the one we got from get_any_page().
+ * If we failed to isolate the folio, it means that we cannot go further
+ * and we will return an error, so drop the reference we got from
+ * get_any_page() as well.
+ */
+ folio_put(folio);
+
+ if (isolated) {
ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
if (!ret) {
diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
index 4775b3a3dabe..9842acebd05e 100644
--- a/mm/memory-tiers.c
+++ b/mm/memory-tiers.c
@@ -6,6 +6,7 @@
#include <linux/memory.h>
#include <linux/memory-tiers.h>
#include <linux/notifier.h>
+#include <linux/sched/sysctl.h>
#include "internal.h"
@@ -50,6 +51,24 @@ static const struct bus_type memory_tier_subsys = {
.dev_name = "memory_tier",
};
+#ifdef CONFIG_NUMA_BALANCING
+/**
+ * folio_use_access_time - check if a folio reuses cpupid for page access time
+ * @folio: folio to check
+ *
+ * folio's _last_cpupid field is repurposed by memory tiering. In memory
+ * tiering mode, cpupid of slow memory folio (not toptier memory) is used to
+ * record page access time.
+ *
+ * Return: the folio _last_cpupid is used to record page access time
+ */
+bool folio_use_access_time(struct folio *folio)
+{
+ return (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
+ !node_is_toptier(folio_nid(folio));
+}
+#endif
+
#ifdef CONFIG_MIGRATION
static int top_tier_adistance;
/*
@@ -895,13 +914,14 @@ static int __init memory_tier_init(void)
WARN_ON(!node_demotion);
#endif
- guard(mutex)(&memory_tier_lock);
+ mutex_lock(&memory_tier_lock);
/*
* For now we can have 4 faster memory tiers with smaller adistance
* than default DRAM tier.
*/
default_dram_type = mt_find_alloc_memory_type(MEMTIER_ADISTANCE_DRAM,
&default_memory_types);
+ mutex_unlock(&memory_tier_lock);
if (IS_ERR(default_dram_type))
panic("%s() failed to allocate default DRAM tier\n", __func__);
@@ -921,8 +941,7 @@ bool numa_demotion_enabled = false;
static ssize_t demotion_enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%s\n",
- numa_demotion_enabled ? "true" : "false");
+ return sysfs_emit(buf, "%s\n", str_true_false(numa_demotion_enabled));
}
static ssize_t demotion_enabled_store(struct kobject *kobj,
diff --git a/mm/memory.c b/mm/memory.c
index 3c01d68065be..2366578015ad 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -666,17 +666,16 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
return NULL;
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd)
{
unsigned long pfn = pmd_pfn(pmd);
- /*
- * There is no pmd_special() but there may be special pmds, e.g.
- * in a direct-access (dax) mapping, so let's just replicate the
- * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
- */
+ /* Currently it's only used for huge pfnmaps */
+ if (unlikely(pmd_special(pmd)))
+ return NULL;
+
if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
if (vma->vm_flags & VM_MIXEDMAP) {
if (!pfn_valid(pfn))
@@ -927,8 +926,11 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
* We have a prealloc page, all good! Take it
* over and copy the page & arm it.
*/
+
+ if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma))
+ return -EHWPOISON;
+
*prealloc = NULL;
- copy_user_highpage(&new_folio->page, page, addr, src_vma);
__folio_mark_uptodate(new_folio);
folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(new_folio, dst_vma);
@@ -1167,8 +1169,9 @@ again:
/*
* If we need a pre-allocated page for this pte, drop the
* locks, allocate, and try again.
+ * If copy failed due to hwpoison in source page, break out.
*/
- if (unlikely(ret == -EAGAIN))
+ if (unlikely(ret == -EAGAIN || ret == -EHWPOISON))
break;
if (unlikely(prealloc)) {
/*
@@ -1198,7 +1201,7 @@ again:
goto out;
}
entry.val = 0;
- } else if (ret == -EBUSY) {
+ } else if (ret == -EBUSY || unlikely(ret == -EHWPOISON)) {
goto out;
} else if (ret == -EAGAIN) {
prealloc = folio_prealloc(src_mm, src_vma, addr, false);
@@ -2632,11 +2635,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
return 0;
}
-/*
- * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
- * must have pre-validated the caching bits of the pgprot_t.
- */
-int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
pgd_t *pgd;
@@ -2689,6 +2688,27 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
return 0;
}
+/*
+ * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
+ * must have pre-validated the caching bits of the pgprot_t.
+ */
+int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
+
+ if (!error)
+ return 0;
+
+ /*
+ * A partial pfn range mapping is dangerous: it does not
+ * maintain page reference counts, and callers may free
+ * pages due to the error. So zap it early.
+ */
+ zap_page_range_single(vma, addr, size, NULL);
+ return error;
+}
+
/**
* remap_pfn_range - remap kernel memory to userspace
* @vma: user vma to map to
@@ -3259,7 +3279,7 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
}
/**
- * vmf_anon_prepare - Prepare to handle an anonymous fault.
+ * __vmf_anon_prepare - Prepare to handle an anonymous fault.
* @vmf: The vm_fault descriptor passed from the fault handler.
*
* When preparing to insert an anonymous page into a VMA from a
@@ -3273,7 +3293,7 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
* Return: 0 if fault handling can proceed. Any other value should be
* returned to the caller.
*/
-vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
+vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
vm_fault_t ret = 0;
@@ -3281,10 +3301,8 @@ vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
if (likely(vma->anon_vma))
return 0;
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
- if (!mmap_read_trylock(vma->vm_mm)) {
- vma_end_read(vma);
+ if (!mmap_read_trylock(vma->vm_mm))
return VM_FAULT_RETRY;
- }
}
if (__anon_vma_prepare(vma))
ret = VM_FAULT_OOM;
@@ -3986,6 +4004,194 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
+ swp_entry_t entry;
+
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
+ vmf->address, false);
+ if (!folio)
+ return NULL;
+
+ entry = pte_to_swp_entry(vmf->orig_pte);
+ if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
+ GFP_KERNEL, entry)) {
+ folio_put(folio);
+ return NULL;
+ }
+
+ return folio;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int non_swapcache_batch(swp_entry_t entry, int max_nr)
+{
+ struct swap_info_struct *si = swp_swap_info(entry);
+ pgoff_t offset = swp_offset(entry);
+ int i;
+
+ /*
+ * While allocating a large folio and doing swap_read_folio, which is
+ * the case the being faulted pte doesn't have swapcache. We need to
+ * ensure all PTEs have no cache as well, otherwise, we might go to
+ * swap devices while the content is in swapcache.
+ */
+ for (i = 0; i < max_nr; i++) {
+ if ((si->swap_map[offset + i] & SWAP_HAS_CACHE))
+ return i;
+ }
+
+ return i;
+}
+
+/*
+ * Check if the PTEs within a range are contiguous swap entries
+ * and have consistent swapcache, zeromap.
+ */
+static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
+{
+ unsigned long addr;
+ swp_entry_t entry;
+ int idx;
+ pte_t pte;
+
+ addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
+ idx = (vmf->address - addr) / PAGE_SIZE;
+ pte = ptep_get(ptep);
+
+ if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
+ return false;
+ entry = pte_to_swp_entry(pte);
+ if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
+ return false;
+
+ /*
+ * swap_read_folio() can't handle the case a large folio is hybridly
+ * from different backends. And they are likely corner cases. Similar
+ * things might be added once zswap support large folios.
+ */
+ if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages))
+ return false;
+ if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages))
+ return false;
+
+ return true;
+}
+
+static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset,
+ unsigned long addr,
+ unsigned long orders)
+{
+ int order, nr;
+
+ order = highest_order(orders);
+
+ /*
+ * To swap in a THP with nr pages, we require that its first swap_offset
+ * is aligned with that number, as it was when the THP was swapped out.
+ * This helps filter out most invalid entries.
+ */
+ while (orders) {
+ nr = 1 << order;
+ if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr)
+ break;
+ order = next_order(&orders, order);
+ }
+
+ return orders;
+}
+
+static struct folio *alloc_swap_folio(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ unsigned long orders;
+ struct folio *folio;
+ unsigned long addr;
+ swp_entry_t entry;
+ spinlock_t *ptl;
+ pte_t *pte;
+ gfp_t gfp;
+ int order;
+
+ /*
+ * If uffd is active for the vma we need per-page fault fidelity to
+ * maintain the uffd semantics.
+ */
+ if (unlikely(userfaultfd_armed(vma)))
+ goto fallback;
+
+ /*
+ * A large swapped out folio could be partially or fully in zswap. We
+ * lack handling for such cases, so fallback to swapping in order-0
+ * folio.
+ */
+ if (!zswap_never_enabled())
+ goto fallback;
+
+ entry = pte_to_swp_entry(vmf->orig_pte);
+ /*
+ * Get a list of all the (large) orders below PMD_ORDER that are enabled
+ * and suitable for swapping THP.
+ */
+ orders = thp_vma_allowable_orders(vma, vma->vm_flags,
+ TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
+ orders = thp_vma_suitable_orders(vma, vmf->address, orders);
+ orders = thp_swap_suitable_orders(swp_offset(entry),
+ vmf->address, orders);
+
+ if (!orders)
+ goto fallback;
+
+ pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
+ vmf->address & PMD_MASK, &ptl);
+ if (unlikely(!pte))
+ goto fallback;
+
+ /*
+ * For do_swap_page, find the highest order where the aligned range is
+ * completely swap entries with contiguous swap offsets.
+ */
+ order = highest_order(orders);
+ while (orders) {
+ addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
+ if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ pte_unmap_unlock(pte, ptl);
+
+ /* Try allocating the highest of the remaining orders. */
+ gfp = vma_thp_gfp_mask(vma);
+ while (orders) {
+ addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
+ folio = vma_alloc_folio(gfp, order, vma, addr, true);
+ if (folio) {
+ if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
+ gfp, entry))
+ return folio;
+ folio_put(folio);
+ }
+ order = next_order(&orders, order);
+ }
+
+fallback:
+ return __alloc_swap_folio(vmf);
+}
+#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
+static inline bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
+{
+ return false;
+}
+
+static struct folio *alloc_swap_folio(struct vm_fault *vmf)
+{
+ return __alloc_swap_folio(vmf);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
/*
* We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
@@ -4074,35 +4280,34 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (!folio) {
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
__swap_count(entry) == 1) {
- /*
- * Prevent parallel swapin from proceeding with
- * the cache flag. Otherwise, another thread may
- * finish swapin first, free the entry, and swapout
- * reusing the same entry. It's undetectable as
- * pte_same() returns true due to entry reuse.
- */
- if (swapcache_prepare(entry)) {
- /* Relax a bit to prevent rapid repeated page faults */
- schedule_timeout_uninterruptible(1);
- goto out;
- }
- need_clear_cache = true;
-
/* skip swapcache */
- folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
- vma, vmf->address, false);
- page = &folio->page;
+ folio = alloc_swap_folio(vmf);
if (folio) {
__folio_set_locked(folio);
__folio_set_swapbacked(folio);
- if (mem_cgroup_swapin_charge_folio(folio,
- vma->vm_mm, GFP_KERNEL,
- entry)) {
- ret = VM_FAULT_OOM;
+ nr_pages = folio_nr_pages(folio);
+ if (folio_test_large(folio))
+ entry.val = ALIGN_DOWN(entry.val, nr_pages);
+ /*
+ * Prevent parallel swapin from proceeding with
+ * the cache flag. Otherwise, another thread
+ * may finish swapin first, free the entry, and
+ * swapout reusing the same entry. It's
+ * undetectable as pte_same() returns true due
+ * to entry reuse.
+ */
+ if (swapcache_prepare(entry, nr_pages)) {
+ /*
+ * Relax a bit to prevent rapid
+ * repeated page faults.
+ */
+ schedule_timeout_uninterruptible(1);
goto out_page;
}
- mem_cgroup_swapin_uncharge_swap(entry);
+ need_clear_cache = true;
+
+ mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
shadow = get_shadow_from_swap_cache(entry);
if (shadow)
@@ -4116,10 +4321,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio->private = NULL;
}
} else {
- page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
+ folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
vmf);
- if (page)
- folio = page_folio(page);
swapcache = folio;
}
@@ -4140,6 +4343,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
+ page = folio_file_page(folio, swp_offset(entry));
} else if (PageHWPoison(page)) {
/*
* hwpoisoned dirty swapcache pages are kept for killing
@@ -4209,6 +4413,24 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out_nomap;
}
+ /* allocated large folios for SWP_SYNCHRONOUS_IO */
+ if (folio_test_large(folio) && !folio_test_swapcache(folio)) {
+ unsigned long nr = folio_nr_pages(folio);
+ unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE);
+ unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE;
+ pte_t *folio_ptep = vmf->pte - idx;
+ pte_t folio_pte = ptep_get(folio_ptep);
+
+ if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
+ swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
+ goto out_nomap;
+
+ page_idx = idx;
+ address = folio_start;
+ ptep = folio_ptep;
+ goto check_folio;
+ }
+
nr_pages = 1;
page_idx = 0;
address = vmf->address;
@@ -4340,11 +4562,12 @@ check_folio:
folio_add_lru_vma(folio, vma);
} else if (!folio_test_anon(folio)) {
/*
- * We currently only expect small !anon folios, which are either
- * fully exclusive or fully shared. If we ever get large folios
- * here, we have to be careful.
+ * We currently only expect small !anon folios which are either
+ * fully exclusive or fully shared, or new allocated large
+ * folios which are fully exclusive. If we ever get large
+ * folios within swapcache here, we have to be careful.
*/
- VM_WARN_ON_ONCE(folio_test_large(folio));
+ VM_WARN_ON_ONCE(folio_test_large(folio) && folio_test_swapcache(folio));
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
} else {
@@ -4387,7 +4610,7 @@ unlock:
out:
/* Clear the swap cache pin for direct swapin after PTL unlock */
if (need_clear_cache)
- swapcache_clear(si, entry);
+ swapcache_clear(si, entry, nr_pages);
if (si)
put_swap_device(si);
return ret;
@@ -4403,7 +4626,7 @@ out_release:
folio_put(swapcache);
}
if (need_clear_cache)
- swapcache_clear(si, entry);
+ swapcache_clear(si, entry, nr_pages);
if (si)
put_swap_device(si);
return ret;
@@ -4597,9 +4820,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
folio_ref_add(folio, nr_pages - 1);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
-#endif
folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
setpte:
@@ -5094,10 +5315,14 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
if (ret & VM_FAULT_DONE_COW)
return ret;
- copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
+ if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) {
+ ret = VM_FAULT_HWPOISON;
+ goto unlock;
+ }
__folio_mark_uptodate(folio);
ret |= finish_fault(vmf);
+unlock:
unlock_page(vmf->page);
put_page(vmf->page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
@@ -5202,16 +5427,46 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
return ret;
}
-int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
- unsigned long addr, int page_nid, int *flags)
+int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
+ unsigned long addr, int *flags,
+ bool writable, int *last_cpupid)
{
struct vm_area_struct *vma = vmf->vma;
+ /*
+ * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
+ * much anyway since they can be in shared cache state. This misses
+ * the case where a mapping is writable but the process never writes
+ * to it but pte_write gets cleared during protection updates and
+ * pte_dirty has unpredictable behaviour between PTE scan updates,
+ * background writeback, dirty balancing and application behaviour.
+ */
+ if (!writable)
+ *flags |= TNF_NO_GROUP;
+
+ /*
+ * Flag if the folio is shared between multiple address spaces. This
+ * is later used when determining whether to group tasks together
+ */
+ if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
+ *flags |= TNF_SHARED;
+ /*
+ * For memory tiering mode, cpupid of slow memory page is used
+ * to record page access time. So use default value.
+ */
+ if (folio_use_access_time(folio))
+ *last_cpupid = (-1 & LAST_CPUPID_MASK);
+ else
+ *last_cpupid = folio_last_cpupid(folio);
+
/* Record the current PID acceesing VMA */
vma_set_access_pid_bit(vma);
count_vm_numa_event(NUMA_HINT_FAULTS);
- if (page_nid == numa_node_id()) {
+#ifdef CONFIG_NUMA_BALANCING
+ count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1);
+#endif
+ if (folio_nid(folio) == numa_node_id()) {
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
*flags |= TNF_FAULT_LOCAL;
}
@@ -5313,36 +5568,11 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
if (!folio || folio_is_zone_device(folio))
goto out_map;
- /*
- * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
- * much anyway since they can be in shared cache state. This misses
- * the case where a mapping is writable but the process never writes
- * to it but pte_write gets cleared during protection updates and
- * pte_dirty has unpredictable behaviour between PTE scan updates,
- * background writeback, dirty balancing and application behaviour.
- */
- if (!writable)
- flags |= TNF_NO_GROUP;
-
- /*
- * Flag if the folio is shared between multiple address spaces. This
- * is later used when determining whether to group tasks together
- */
- if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
- flags |= TNF_SHARED;
-
nid = folio_nid(folio);
nr_pages = folio_nr_pages(folio);
- /*
- * For memory tiering mode, cpupid of slow memory page is used
- * to record page access time. So use default value.
- */
- if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
- !node_is_toptier(nid))
- last_cpupid = (-1 & LAST_CPUPID_MASK);
- else
- last_cpupid = folio_last_cpupid(folio);
- target_nid = numa_migrate_prep(folio, vmf, vmf->address, nid, &flags);
+
+ target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
+ writable, &last_cpupid);
if (target_nid == NUMA_NO_NODE)
goto out_map;
if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
@@ -5998,10 +6228,6 @@ retry:
if (!vma_start_read(vma))
goto inval;
- /* Check since vm_start/vm_end might change before we lock the VMA */
- if (unlikely(address < vma->vm_start || address >= vma->vm_end))
- goto inval_end_read;
-
/* Check if the VMA got isolated after we found it */
if (vma->detached) {
vma_end_read(vma);
@@ -6009,6 +6235,16 @@ retry:
/* The area was replaced with another one */
goto retry;
}
+ /*
+ * At this point, we have a stable reference to a VMA: The VMA is
+ * locked and we know it hasn't already been isolated.
+ * From here on, we can access the VMA without worrying about which
+ * fields are accessible for RCU readers.
+ */
+
+ /* Check since vm_start/vm_end might change before we lock the VMA */
+ if (unlikely(address < vma->vm_start || address >= vma->vm_end))
+ goto inval_end_read;
rcu_read_unlock();
return vma;
@@ -6093,78 +6329,155 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
}
#endif /* __PAGETABLE_PMD_FOLDED */
+static inline void pfnmap_args_setup(struct follow_pfnmap_args *args,
+ spinlock_t *lock, pte_t *ptep,
+ pgprot_t pgprot, unsigned long pfn_base,
+ unsigned long addr_mask, bool writable,
+ bool special)
+{
+ args->lock = lock;
+ args->ptep = ptep;
+ args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT);
+ args->pgprot = pgprot;
+ args->writable = writable;
+ args->special = special;
+}
+
+static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma)
+{
+#ifdef CONFIG_LOCKDEP
+ struct address_space *mapping = vma->vm_file->f_mapping;
+
+ if (mapping)
+ lockdep_assert(lockdep_is_held(&vma->vm_file->f_mapping->i_mmap_rwsem) ||
+ lockdep_is_held(&vma->vm_mm->mmap_lock));
+ else
+ lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock));
+#endif
+}
+
/**
- * follow_pte - look up PTE at a user virtual address
- * @vma: the memory mapping
- * @address: user virtual address
- * @ptepp: location to store found PTE
- * @ptlp: location to store the lock for the PTE
+ * follow_pfnmap_start() - Look up a pfn mapping at a user virtual address
+ * @args: Pointer to struct @follow_pfnmap_args
+ *
+ * The caller needs to setup args->vma and args->address to point to the
+ * virtual address as the target of such lookup. On a successful return,
+ * the results will be put into other output fields.
*
- * On a successful return, the pointer to the PTE is stored in @ptepp;
- * the corresponding lock is taken and its location is stored in @ptlp.
+ * After the caller finished using the fields, the caller must invoke
+ * another follow_pfnmap_end() to proper releases the locks and resources
+ * of such look up request.
*
- * The contents of the PTE are only stable until @ptlp is released using
- * pte_unmap_unlock(). This function will fail if the PTE is non-present.
- * Present PTEs may include PTEs that map refcounted pages, such as
- * anonymous folios in COW mappings.
+ * During the start() and end() calls, the results in @args will be valid
+ * as proper locks will be held. After the end() is called, all the fields
+ * in @follow_pfnmap_args will be invalid to be further accessed. Further
+ * use of such information after end() may require proper synchronizations
+ * by the caller with page table updates, otherwise it can create a
+ * security bug.
*
- * Callers must be careful when relying on PTE content after
- * pte_unmap_unlock(). Especially if the PTE maps a refcounted page,
- * callers must protect against invalidation with MMU notifiers; otherwise
- * access to the PFN at a later point in time can trigger use-after-free.
+ * If the PTE maps a refcounted page, callers are responsible to protect
+ * against invalidation with MMU notifiers; otherwise access to the PFN at
+ * a later point in time can trigger use-after-free.
*
* Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
- * should be taken for read.
+ * should be taken for read, and the mmap semaphore cannot be released
+ * before the end() is invoked.
*
* This function must not be used to modify PTE content.
*
- * Return: zero on success, -ve otherwise.
+ * Return: zero on success, negative otherwise.
*/
-int follow_pte(struct vm_area_struct *vma, unsigned long address,
- pte_t **ptepp, spinlock_t **ptlp)
+int follow_pfnmap_start(struct follow_pfnmap_args *args)
{
+ struct vm_area_struct *vma = args->vma;
+ unsigned long address = args->address;
struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep;
+ spinlock_t *lock;
+ pgd_t *pgdp;
+ p4d_t *p4dp, p4d;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep, pte;
+
+ pfnmap_lockdep_assert(vma);
- mmap_assert_locked(mm);
if (unlikely(address < vma->vm_start || address >= vma->vm_end))
goto out;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
goto out;
-
- pgd = pgd_offset(mm, address);
- if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+retry:
+ pgdp = pgd_offset(mm, address);
+ if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
goto out;
- p4d = p4d_offset(pgd, address);
- if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
+ p4dp = p4d_offset(pgdp, address);
+ p4d = READ_ONCE(*p4dp);
+ if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
goto out;
- pud = pud_offset(p4d, address);
- if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+ pudp = pud_offset(p4dp, address);
+ pud = READ_ONCE(*pudp);
+ if (pud_none(pud))
goto out;
+ if (pud_leaf(pud)) {
+ lock = pud_lock(mm, pudp);
+ if (!unlikely(pud_leaf(pud))) {
+ spin_unlock(lock);
+ goto retry;
+ }
+ pfnmap_args_setup(args, lock, NULL, pud_pgprot(pud),
+ pud_pfn(pud), PUD_MASK, pud_write(pud),
+ pud_special(pud));
+ return 0;
+ }
- pmd = pmd_offset(pud, address);
- VM_BUG_ON(pmd_trans_huge(*pmd));
+ pmdp = pmd_offset(pudp, address);
+ pmd = pmdp_get_lockless(pmdp);
+ if (pmd_leaf(pmd)) {
+ lock = pmd_lock(mm, pmdp);
+ if (!unlikely(pmd_leaf(pmd))) {
+ spin_unlock(lock);
+ goto retry;
+ }
+ pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd),
+ pmd_pfn(pmd), PMD_MASK, pmd_write(pmd),
+ pmd_special(pmd));
+ return 0;
+ }
- ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
+ ptep = pte_offset_map_lock(mm, pmdp, address, &lock);
if (!ptep)
goto out;
- if (!pte_present(ptep_get(ptep)))
+ pte = ptep_get(ptep);
+ if (!pte_present(pte))
goto unlock;
- *ptepp = ptep;
+ pfnmap_args_setup(args, lock, ptep, pte_pgprot(pte),
+ pte_pfn(pte), PAGE_MASK, pte_write(pte),
+ pte_special(pte));
return 0;
unlock:
- pte_unmap_unlock(ptep, *ptlp);
+ pte_unmap_unlock(ptep, lock);
out:
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(follow_pte);
+EXPORT_SYMBOL_GPL(follow_pfnmap_start);
+
+/**
+ * follow_pfnmap_end(): End a follow_pfnmap_start() process
+ * @args: Pointer to struct @follow_pfnmap_args
+ *
+ * Must be used in pair of follow_pfnmap_start(). See the start() function
+ * above for more information.
+ */
+void follow_pfnmap_end(struct follow_pfnmap_args *args)
+{
+ if (args->lock)
+ spin_unlock(args->lock);
+ if (args->ptep)
+ pte_unmap(args->ptep);
+}
+EXPORT_SYMBOL_GPL(follow_pfnmap_end);
#ifdef CONFIG_HAVE_IOREMAP_PROT
/**
@@ -6185,34 +6498,34 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
resource_size_t phys_addr;
unsigned long prot = 0;
void __iomem *maddr;
- pte_t *ptep, pte;
- spinlock_t *ptl;
int offset = offset_in_page(addr);
int ret = -EINVAL;
+ bool writable;
+ struct follow_pfnmap_args args = { .vma = vma, .address = addr };
retry:
- if (follow_pte(vma, addr, &ptep, &ptl))
+ if (follow_pfnmap_start(&args))
return -EINVAL;
- pte = ptep_get(ptep);
- pte_unmap_unlock(ptep, ptl);
+ prot = pgprot_val(args.pgprot);
+ phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT;
+ writable = args.writable;
+ follow_pfnmap_end(&args);
- prot = pgprot_val(pte_pgprot(pte));
- phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
-
- if ((write & FOLL_WRITE) && !pte_write(pte))
+ if ((write & FOLL_WRITE) && !writable)
return -EINVAL;
maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
if (!maddr)
return -ENOMEM;
- if (follow_pte(vma, addr, &ptep, &ptl))
+ if (follow_pfnmap_start(&args))
goto out_unmap;
- if (!pte_same(pte, ptep_get(ptep))) {
- pte_unmap_unlock(ptep, ptl);
+ if ((prot != pgprot_val(args.pgprot)) ||
+ (phys_addr != (args.pfn << PAGE_SHIFT)) ||
+ (writable != args.writable)) {
+ follow_pfnmap_end(&args);
iounmap(maddr);
-
goto retry;
}
@@ -6221,7 +6534,7 @@ retry:
else
memcpy_fromio(buf, maddr + offset, len);
ret = len;
- pte_unmap_unlock(ptep, ptl);
+ follow_pfnmap_end(&args);
out_unmap:
iounmap(maddr);
@@ -6572,7 +6885,7 @@ long copy_folio_from_user(struct folio *dst_folio,
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
-#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS
static struct kmem_cache *page_ptl_cachep;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 951878ab627a..621ae1015106 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -366,7 +366,7 @@ struct page *pfn_to_online_page(unsigned long pfn)
}
EXPORT_SYMBOL_GPL(pfn_to_online_page);
-int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
+int __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
struct mhp_params *params)
{
const unsigned long end_pfn = pfn + nr_pages;
@@ -524,7 +524,7 @@ static void update_pgdat_span(struct pglist_data *pgdat)
pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
}
-void __ref remove_pfn_range_from_zone(struct zone *zone,
+void remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages)
{
@@ -629,7 +629,7 @@ int restore_online_page_callback(online_page_callback_t callback)
EXPORT_SYMBOL_GPL(restore_online_page_callback);
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
-void __ref generic_online_page(struct page *page, unsigned int order)
+void generic_online_page(struct page *page, unsigned int order)
{
__free_pages_core(page, order, MEMINIT_HOTPLUG);
}
@@ -741,7 +741,7 @@ static inline void section_taint_zone_device(unsigned long pfn)
* (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
* zone stats (e.g., nr_isolate_pageblock) are touched.
*/
-void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
+void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages,
struct vmem_altmap *altmap, int migratetype)
{
@@ -1143,7 +1143,7 @@ void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
/*
* Must be called with mem_hotplug_lock in write mode.
*/
-int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
+int online_pages(unsigned long pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group)
{
unsigned long flags;
@@ -1233,7 +1233,7 @@ failed_addition:
}
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
-static pg_data_t __ref *hotadd_init_pgdat(int nid)
+static pg_data_t *hotadd_init_pgdat(int nid)
{
struct pglist_data *pgdat;
@@ -1386,7 +1386,7 @@ bool mhp_supports_memmap_on_memory(void)
}
EXPORT_SYMBOL_GPL(mhp_supports_memmap_on_memory);
-static void __ref remove_memory_blocks_and_altmaps(u64 start, u64 size)
+static void remove_memory_blocks_and_altmaps(u64 start, u64 size)
{
unsigned long memblock_size = memory_block_size_bytes();
u64 cur_start;
@@ -1473,7 +1473,7 @@ out:
*
* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
*/
-int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
+int add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
{
struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
enum memblock_flags memblock_flags = MEMBLOCK_NONE;
@@ -1580,7 +1580,7 @@ error_mem_hotplug_end:
}
/* requires device_hotplug_lock, see add_memory_resource() */
-int __ref __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
+int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
{
struct resource *res;
int ret;
@@ -1772,67 +1772,59 @@ found:
static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
{
+ struct folio *folio;
unsigned long pfn;
- struct page *page, *head;
LIST_HEAD(source);
static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
- struct folio *folio;
- bool isolated;
+ struct page *page;
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
folio = page_folio(page);
- head = &folio->page;
- if (PageHuge(page)) {
- pfn = page_to_pfn(head) + compound_nr(head) - 1;
- isolate_hugetlb(folio, &source);
- continue;
- } else if (PageTransHuge(page))
- pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
+ /*
+ * No reference or lock is held on the folio, so it might
+ * be modified concurrently (e.g. split). As such,
+ * folio_nr_pages() may read garbage. This is fine as the outer
+ * loop will revisit the split folio later.
+ */
+ if (folio_test_large(folio))
+ pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
/*
* HWPoison pages have elevated reference counts so the migration would
* fail on them. It also doesn't make any sense to migrate them in the
* first place. Still try to unmap such a page in case it is still mapped
- * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
- * the unmap as the catch all safety net).
+ * (keep the unmap as the catch all safety net).
*/
- if (PageHWPoison(page)) {
+ if (folio_test_hwpoison(folio) ||
+ (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
if (WARN_ON(folio_test_lru(folio)))
folio_isolate_lru(folio);
if (folio_mapped(folio))
- try_to_unmap(folio, TTU_IGNORE_MLOCK);
+ unmap_poisoned_folio(folio, TTU_IGNORE_MLOCK);
continue;
}
- if (!get_page_unless_zero(page))
+ if (!folio_try_get(folio))
continue;
- /*
- * We can skip free pages. And we can deal with pages on
- * LRU and non-lru movable pages.
- */
- if (PageLRU(page))
- isolated = isolate_lru_page(page);
- else
- isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
- if (isolated) {
- list_add_tail(&page->lru, &source);
- if (!__PageMovable(page))
- inc_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_lru(page));
- } else {
+ if (unlikely(page_folio(page) != folio))
+ goto put_folio;
+
+ if (!isolate_folio_to_list(folio, &source)) {
if (__ratelimit(&migrate_rs)) {
- pr_warn("failed to isolate pfn %lx\n", pfn);
+ pr_warn("failed to isolate pfn %lx\n",
+ page_to_pfn(page));
dump_page(page, "isolation failed");
}
}
- put_page(page);
+put_folio:
+ folio_put(folio);
}
if (!list_empty(&source)) {
nodemask_t nmask = node_states[N_MEMORY];
@@ -1847,7 +1839,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* We have checked that migration range is on a single zone so
* we can use the nid of the first page to all the others.
*/
- mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
+ mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru));
/*
* try to allocate from a different node but reuse this node
@@ -1860,11 +1852,12 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
ret = migrate_pages(&source, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL);
if (ret) {
- list_for_each_entry(page, &source, lru) {
+ list_for_each_entry(folio, &source, lru) {
if (__ratelimit(&migrate_rs)) {
pr_warn("migrating pfn %lx failed ret:%d\n",
- page_to_pfn(page), ret);
- dump_page(page, "migration failure");
+ folio_pfn(folio), ret);
+ dump_page(&folio->page,
+ "migration failure");
}
}
putback_movable_pages(&source);
@@ -1939,7 +1932,7 @@ static int count_system_ram_pages_cb(unsigned long start_pfn,
/*
* Must be called with mem_hotplug_lock in write mode.
*/
-int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group)
{
const unsigned long end_pfn = start_pfn + nr_pages;
@@ -2240,7 +2233,7 @@ static int memory_blocks_have_altmaps(u64 start, u64 size)
return 1;
}
-static int __ref try_remove_memory(u64 start, u64 size)
+static int try_remove_memory(u64 start, u64 size)
{
int rc, nid = NUMA_NO_NODE;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b858e22b259d..b646fab3e45e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -676,8 +676,10 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
tlb_gather_mmu(&tlb, vma->vm_mm);
nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
- if (nr_updated > 0)
+ if (nr_updated > 0) {
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
+ count_memcg_events_mm(vma->vm_mm, NUMA_PTE_UPDATES, nr_updated);
+ }
tlb_finish_mmu(&tlb);
@@ -1951,7 +1953,7 @@ unsigned int mempolicy_slab_node(void)
zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, highest_zoneidx,
&policy->nodes);
- return z->zone ? zone_to_nid(z->zone) : node;
+ return zonelist_zone(z) ? zonelist_node_idx(z) : node;
}
case MPOL_LOCAL:
return node;
@@ -2809,7 +2811,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
node_zonelist(thisnid, GFP_HIGHUSER),
gfp_zone(GFP_HIGHUSER),
&pol->nodes);
- polnid = zone_to_nid(z->zone);
+ polnid = zonelist_node_idx(z);
break;
default:
diff --git a/mm/migrate.c b/mm/migrate.c
index 923ea80ba744..dfdb3a136bf8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -20,7 +20,6 @@
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/mm_inline.h>
-#include <linux/nsproxy.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/topology.h>
@@ -35,21 +34,16 @@
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <linux/hugetlb.h>
-#include <linux/hugetlb_cgroup.h>
#include <linux/gfp.h>
#include <linux/pfn_t.h>
-#include <linux/memremap.h>
-#include <linux/userfaultfd_k.h>
-#include <linux/balloon_compaction.h>
#include <linux/page_idle.h>
#include <linux/page_owner.h>
#include <linux/sched/mm.h>
#include <linux/ptrace.h>
-#include <linux/oom.h>
#include <linux/memory.h>
-#include <linux/random.h>
#include <linux/sched/sysctl.h>
#include <linux/memory-tiers.h>
+#include <linux/pagewalk.h>
#include <asm/tlbflush.h>
@@ -177,13 +171,83 @@ void putback_movable_pages(struct list_head *l)
}
}
+/* Must be called with an elevated refcount on the non-hugetlb folio */
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ bool isolated, lru;
+
+ if (folio_test_hugetlb(folio))
+ return isolate_hugetlb(folio, list);
+
+ lru = !__folio_test_movable(folio);
+ if (lru)
+ isolated = folio_isolate_lru(folio);
+ else
+ isolated = isolate_movable_page(&folio->page,
+ ISOLATE_UNEVICTABLE);
+
+ if (!isolated)
+ return false;
+
+ list_add(&folio->lru, list);
+ if (lru)
+ node_stat_add_folio(folio, NR_ISOLATED_ANON +
+ folio_is_file_lru(folio));
+
+ return true;
+}
+
+static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
+ struct folio *folio,
+ unsigned long idx)
+{
+ struct page *page = folio_page(folio, idx);
+ bool contains_data;
+ pte_t newpte;
+ void *addr;
+
+ VM_BUG_ON_PAGE(PageCompound(page), page);
+ VM_BUG_ON_PAGE(!PageAnon(page), page);
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page);
+
+ if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
+ mm_forbids_zeropage(pvmw->vma->vm_mm))
+ return false;
+
+ /*
+ * The pmd entry mapping the old thp was flushed and the pte mapping
+ * this subpage has been non present. If the subpage is only zero-filled
+ * then map it to the shared zeropage.
+ */
+ addr = kmap_local_page(page);
+ contains_data = memchr_inv(addr, 0, PAGE_SIZE);
+ kunmap_local(addr);
+
+ if (contains_data)
+ return false;
+
+ newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
+ pvmw->vma->vm_page_prot));
+ set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
+
+ dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
+ return true;
+}
+
+struct rmap_walk_arg {
+ struct folio *folio;
+ bool map_unused_to_zeropage;
+};
+
/*
* Restore a potential migration pte to a working pte entry
*/
static bool remove_migration_pte(struct folio *folio,
- struct vm_area_struct *vma, unsigned long addr, void *old)
+ struct vm_area_struct *vma, unsigned long addr, void *arg)
{
- DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
+ struct rmap_walk_arg *rmap_walk_arg = arg;
+ DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
while (page_vma_mapped_walk(&pvmw)) {
rmap_t rmap_flags = RMAP_NONE;
@@ -207,6 +271,9 @@ static bool remove_migration_pte(struct folio *folio,
continue;
}
#endif
+ if (rmap_walk_arg->map_unused_to_zeropage &&
+ try_to_map_unused_to_zeropage(&pvmw, folio, idx))
+ continue;
folio_get(folio);
pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
@@ -285,14 +352,21 @@ static bool remove_migration_pte(struct folio *folio,
* Get rid of all migration entries and replace them by
* references to the indicated page.
*/
-void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
+void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
{
+ struct rmap_walk_arg rmap_walk_arg = {
+ .folio = src,
+ .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
+ };
+
struct rmap_walk_control rwc = {
.rmap_one = remove_migration_pte,
- .arg = src,
+ .arg = &rmap_walk_arg,
};
- if (locked)
+ VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
+
+ if (flags & RMP_LOCKED)
rmap_walk_locked(dst, &rwc);
else
rmap_walk(dst, &rwc);
@@ -422,6 +496,8 @@ static int __folio_migrate_mapping(struct address_space *mapping,
/* No turning back from here */
newfolio->index = folio->index;
newfolio->mapping = folio->mapping;
+ if (folio_test_anon(folio) && folio_test_large(folio))
+ mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
if (folio_test_swapbacked(folio))
__folio_set_swapbacked(newfolio);
@@ -446,6 +522,8 @@ static int __folio_migrate_mapping(struct address_space *mapping,
*/
newfolio->index = folio->index;
newfolio->mapping = folio->mapping;
+ if (folio_test_anon(folio) && folio_test_large(folio))
+ mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
folio_ref_add(newfolio, nr); /* add cache reference */
if (folio_test_swapbacked(folio)) {
__folio_set_swapbacked(newfolio);
@@ -585,8 +663,6 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
{
int cpupid;
- if (folio_test_error(folio))
- folio_set_error(newfolio);
if (folio_test_referenced(folio))
folio_set_referenced(newfolio);
if (folio_test_uptodate(folio))
@@ -640,7 +716,8 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
folio_migrate_ksm(newfolio, folio);
/*
* Please do not reorder this without considering how mm/ksm.c's
- * ksm_get_folio() depends upon ksm_migrate_page() and PageSwapCache().
+ * ksm_get_folio() depends upon ksm_migrate_page() and the
+ * swapcache flag.
*/
if (folio_test_swapcache(folio))
folio_clear_swapcache(folio);
@@ -666,6 +743,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
folio_set_readahead(newfolio);
folio_copy_owner(newfolio, folio);
+ pgalloc_tag_copy(newfolio, folio);
mem_cgroup_migrate(folio, newfolio);
}
@@ -904,7 +982,7 @@ static int writeout(struct address_space *mapping, struct folio *folio)
* At this point we know that the migration attempt cannot
* be successful.
*/
- remove_migration_ptes(folio, folio, false);
+ remove_migration_ptes(folio, folio, 0);
rc = mapping->a_ops->writepage(&folio->page, &wbc);
@@ -1068,7 +1146,7 @@ static void migrate_folio_undo_src(struct folio *src,
struct list_head *ret)
{
if (page_was_mapped)
- remove_migration_ptes(src, src, false);
+ remove_migration_ptes(src, src, 0);
/* Drop an anon_vma reference if we took one */
if (anon_vma)
put_anon_vma(anon_vma);
@@ -1306,7 +1384,7 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
lru_add_drain();
if (old_page_state & PAGE_WAS_MAPPED)
- remove_migration_ptes(src, dst, false);
+ remove_migration_ptes(src, dst, 0);
out_unlock_both:
folio_unlock(dst);
@@ -1444,7 +1522,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
if (page_was_mapped)
remove_migration_ptes(src,
- rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
+ rc == MIGRATEPAGE_SUCCESS ? dst : src, 0);
unlock_put_anon:
folio_unlock(dst);
@@ -1682,7 +1760,8 @@ static int migrate_pages_batch(struct list_head *from,
* use _deferred_list.
*/
if (nr_pages > 2 &&
- !list_empty(&folio->_deferred_list)) {
+ !list_empty(&folio->_deferred_list) &&
+ folio_test_partially_mapped(folio)) {
if (!try_split_folio(folio, split_folios, mode)) {
nr_failed++;
stats->nr_thp_failed += is_thp;
@@ -2111,76 +2190,66 @@ static int do_move_pages_to_node(struct list_head *pagelist, int node)
return err;
}
+static int __add_folio_for_migration(struct folio *folio, int node,
+ struct list_head *pagelist, bool migrate_all)
+{
+ if (is_zero_folio(folio) || is_huge_zero_folio(folio))
+ return -EFAULT;
+
+ if (folio_is_zone_device(folio))
+ return -ENOENT;
+
+ if (folio_nid(folio) == node)
+ return 0;
+
+ if (folio_likely_mapped_shared(folio) && !migrate_all)
+ return -EACCES;
+
+ if (folio_test_hugetlb(folio)) {
+ if (isolate_hugetlb(folio, pagelist))
+ return 1;
+ } else if (folio_isolate_lru(folio)) {
+ list_add_tail(&folio->lru, pagelist);
+ node_stat_mod_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio),
+ folio_nr_pages(folio));
+ return 1;
+ }
+ return -EBUSY;
+}
+
/*
- * Resolves the given address to a struct page, isolates it from the LRU and
+ * Resolves the given address to a struct folio, isolates it from the LRU and
* puts it to the given pagelist.
* Returns:
- * errno - if the page cannot be found/isolated
+ * errno - if the folio cannot be found/isolated
* 0 - when it doesn't have to be migrated because it is already on the
* target node
* 1 - when it has been queued
*/
-static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
+static int add_folio_for_migration(struct mm_struct *mm, const void __user *p,
int node, struct list_head *pagelist, bool migrate_all)
{
struct vm_area_struct *vma;
- unsigned long addr;
- struct page *page;
+ struct folio_walk fw;
struct folio *folio;
- int err;
+ unsigned long addr;
+ int err = -EFAULT;
mmap_read_lock(mm);
addr = (unsigned long)untagged_addr_remote(mm, p);
- err = -EFAULT;
vma = vma_lookup(mm, addr);
- if (!vma || !vma_migratable(vma))
- goto out;
-
- /* FOLL_DUMP to ignore special (like zero) pages */
- page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
-
- err = PTR_ERR(page);
- if (IS_ERR(page))
- goto out;
-
- err = -ENOENT;
- if (!page)
- goto out;
-
- folio = page_folio(page);
- if (folio_is_zone_device(folio))
- goto out_putfolio;
-
- err = 0;
- if (folio_nid(folio) == node)
- goto out_putfolio;
-
- err = -EACCES;
- if (folio_likely_mapped_shared(folio) && !migrate_all)
- goto out_putfolio;
-
- err = -EBUSY;
- if (folio_test_hugetlb(folio)) {
- if (isolate_hugetlb(folio, pagelist))
- err = 1;
- } else {
- if (!folio_isolate_lru(folio))
- goto out_putfolio;
-
- err = 1;
- list_add_tail(&folio->lru, pagelist);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ if (vma && vma_migratable(vma)) {
+ folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
+ if (folio) {
+ err = __add_folio_for_migration(folio, node, pagelist,
+ migrate_all);
+ folio_walk_end(&fw, vma);
+ } else {
+ err = -ENOENT;
+ }
}
-out_putfolio:
- /*
- * Either remove the duplicate refcount from folio_isolate_lru()
- * or drop the folio ref if it was not isolated.
- */
- folio_put(folio);
-out:
mmap_read_unlock(mm);
return err;
}
@@ -2274,8 +2343,8 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
* Errors in the page lookup or isolation are not fatal and we simply
* report them via status
*/
- err = add_page_for_migration(mm, p, current_node, &pagelist,
- flags & MPOL_MF_MOVE_ALL);
+ err = add_folio_for_migration(mm, p, current_node, &pagelist,
+ flags & MPOL_MF_MOVE_ALL);
if (err > 0) {
/* The page is successfully queued for migration */
@@ -2331,28 +2400,26 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
for (i = 0; i < nr_pages; i++) {
unsigned long addr = (unsigned long)(*pages);
struct vm_area_struct *vma;
- struct page *page;
+ struct folio_walk fw;
+ struct folio *folio;
int err = -EFAULT;
vma = vma_lookup(mm, addr);
if (!vma)
goto set_status;
- /* FOLL_DUMP to ignore special (like zero) pages */
- page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
-
- err = PTR_ERR(page);
- if (IS_ERR(page))
- goto set_status;
-
- err = -ENOENT;
- if (!page)
- goto set_status;
-
- if (!is_zone_device_page(page))
- err = page_to_nid(page);
-
- put_page(page);
+ folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
+ if (folio) {
+ if (is_zero_folio(folio) || is_huge_zero_folio(folio))
+ err = -EFAULT;
+ else if (folio_is_zone_device(folio))
+ err = -ENOENT;
+ else
+ err = folio_nid(folio);
+ folio_walk_end(&fw, vma);
+ } else {
+ err = -ENOENT;
+ }
set_status:
*status = err;
@@ -2432,25 +2499,19 @@ static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
return current->mm;
}
- /* Find the mm_struct */
- rcu_read_lock();
- task = find_task_by_vpid(pid);
+ task = find_get_task_by_vpid(pid);
if (!task) {
- rcu_read_unlock();
return ERR_PTR(-ESRCH);
}
- get_task_struct(task);
/*
* Check if this process has the right to modify the specified
* process. Use the regular "ptrace_may_access()" checks.
*/
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
- rcu_read_unlock();
mm = ERR_PTR(-EPERM);
goto out;
}
- rcu_read_unlock();
mm = ERR_PTR(security_task_movememory(task));
if (IS_ERR(mm))
@@ -2526,7 +2587,7 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
if (!zone_watermark_ok(zone, 0,
high_wmark_pages(zone) +
nr_migrate_pages,
- ZONE_MOVABLE, 0))
+ ZONE_MOVABLE, ALLOC_CMA))
continue;
return true;
}
@@ -2627,6 +2688,8 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
int nr_remaining;
unsigned int nr_succeeded;
LIST_HEAD(migratepages);
+ struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio);
+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
list_add(&folio->lru, &migratepages);
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
@@ -2636,10 +2699,13 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
putback_movable_pages(&migratepages);
if (nr_succeeded) {
count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
- if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
- mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
- nr_succeeded);
+ count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded);
+ if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
+ && !node_is_toptier(folio_nid(folio))
+ && node_is_toptier(node))
+ mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded);
}
+ mem_cgroup_put(memcg);
BUG_ON(!list_empty(&migratepages));
return nr_remaining ? -EAGAIN : 0;
}
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 6d66dc1c6ffa..9cf26592ac93 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -328,8 +328,8 @@ static bool migrate_vma_check_page(struct page *page, struct page *fault_page)
/*
* One extra ref because caller holds an extra reference, either from
- * isolate_lru_page() for a regular page, or migrate_vma_collect() for
- * a device page.
+ * folio_isolate_lru() for a regular folio, or migrate_vma_collect() for
+ * a device folio.
*/
int extra = 1 + (page == fault_page);
@@ -379,33 +379,33 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
continue;
}
- /* ZONE_DEVICE pages are not on LRU */
- if (!is_zone_device_page(page)) {
- if (!PageLRU(page) && allow_drain) {
+ folio = page_folio(page);
+ /* ZONE_DEVICE folios are not on LRU */
+ if (!folio_is_zone_device(folio)) {
+ if (!folio_test_lru(folio) && allow_drain) {
/* Drain CPU's lru cache */
lru_add_drain_all();
allow_drain = false;
}
- if (!isolate_lru_page(page)) {
+ if (!folio_isolate_lru(folio)) {
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
restore++;
continue;
}
/* Drop the reference we took in collect */
- put_page(page);
+ folio_put(folio);
}
- folio = page_folio(page);
if (folio_mapped(folio))
try_to_migrate(folio, 0);
- if (page_mapped(page) ||
+ if (folio_mapped(folio) ||
!migrate_vma_check_page(page, fault_page)) {
- if (!is_zone_device_page(page)) {
- get_page(page);
- putback_lru_page(page);
+ if (!folio_is_zone_device(folio)) {
+ folio_get(folio);
+ folio_putback_lru(folio);
}
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
@@ -424,7 +424,7 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
continue;
folio = page_folio(page);
- remove_migration_ptes(folio, folio, false);
+ remove_migration_ptes(folio, folio, 0);
src_pfns[i] = 0;
folio_unlock(folio);
@@ -708,7 +708,7 @@ static void __migrate_device_pages(unsigned long *src_pfns,
/*
* The only time there is no vma is when called from
- * migrate_device_coherent_page(). However this isn't
+ * migrate_device_coherent_folio(). However this isn't
* called if the page could not be unmapped.
*/
VM_BUG_ON(!migrate);
@@ -815,42 +815,45 @@ void migrate_device_finalize(unsigned long *src_pfns,
unsigned long i;
for (i = 0; i < npages; i++) {
- struct folio *dst, *src;
+ struct folio *dst = NULL, *src = NULL;
struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
struct page *page = migrate_pfn_to_page(src_pfns[i]);
+ if (newpage)
+ dst = page_folio(newpage);
+
if (!page) {
- if (newpage) {
- unlock_page(newpage);
- put_page(newpage);
+ if (dst) {
+ folio_unlock(dst);
+ folio_put(dst);
}
continue;
}
- if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
- if (newpage) {
- unlock_page(newpage);
- put_page(newpage);
+ src = page_folio(page);
+
+ if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !dst) {
+ if (dst) {
+ folio_unlock(dst);
+ folio_put(dst);
}
- newpage = page;
+ dst = src;
}
- src = page_folio(page);
- dst = page_folio(newpage);
- remove_migration_ptes(src, dst, false);
+ remove_migration_ptes(src, dst, 0);
folio_unlock(src);
- if (is_zone_device_page(page))
- put_page(page);
+ if (folio_is_zone_device(src))
+ folio_put(src);
else
- putback_lru_page(page);
+ folio_putback_lru(src);
- if (newpage != page) {
- unlock_page(newpage);
- if (is_zone_device_page(newpage))
- put_page(newpage);
+ if (dst != src) {
+ folio_unlock(dst);
+ if (folio_is_zone_device(dst))
+ folio_put(dst);
else
- putback_lru_page(newpage);
+ folio_putback_lru(dst);
}
}
}
@@ -898,16 +901,17 @@ int migrate_device_range(unsigned long *src_pfns, unsigned long start,
unsigned long i, pfn;
for (pfn = start, i = 0; i < npages; pfn++, i++) {
- struct page *page = pfn_to_page(pfn);
+ struct folio *folio;
- if (!get_page_unless_zero(page)) {
+ folio = folio_get_nontail_page(pfn_to_page(pfn));
+ if (!folio) {
src_pfns[i] = 0;
continue;
}
- if (!trylock_page(page)) {
+ if (!folio_trylock(folio)) {
src_pfns[i] = 0;
- put_page(page);
+ folio_put(folio);
continue;
}
@@ -921,38 +925,38 @@ int migrate_device_range(unsigned long *src_pfns, unsigned long start,
EXPORT_SYMBOL(migrate_device_range);
/*
- * Migrate a device coherent page back to normal memory. The caller should have
- * a reference on page which will be copied to the new page if migration is
+ * Migrate a device coherent folio back to normal memory. The caller should have
+ * a reference on folio which will be copied to the new folio if migration is
* successful or dropped on failure.
*/
-int migrate_device_coherent_page(struct page *page)
+int migrate_device_coherent_folio(struct folio *folio)
{
unsigned long src_pfn, dst_pfn = 0;
- struct page *dpage;
+ struct folio *dfolio;
- WARN_ON_ONCE(PageCompound(page));
+ WARN_ON_ONCE(folio_test_large(folio));
- lock_page(page);
- src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE;
+ folio_lock(folio);
+ src_pfn = migrate_pfn(folio_pfn(folio)) | MIGRATE_PFN_MIGRATE;
/*
* We don't have a VMA and don't need to walk the page tables to find
- * the source page. So call migrate_vma_unmap() directly to unmap the
- * page as migrate_vma_setup() will fail if args.vma == NULL.
+ * the source folio. So call migrate_vma_unmap() directly to unmap the
+ * folio as migrate_vma_setup() will fail if args.vma == NULL.
*/
migrate_device_unmap(&src_pfn, 1, NULL);
if (!(src_pfn & MIGRATE_PFN_MIGRATE))
return -EBUSY;
- dpage = alloc_page(GFP_USER | __GFP_NOWARN);
- if (dpage) {
- lock_page(dpage);
- dst_pfn = migrate_pfn(page_to_pfn(dpage));
+ dfolio = folio_alloc(GFP_USER | __GFP_NOWARN, 0);
+ if (dfolio) {
+ folio_lock(dfolio);
+ dst_pfn = migrate_pfn(folio_pfn(dfolio));
}
migrate_device_pages(&src_pfn, &dst_pfn, 1);
if (src_pfn & MIGRATE_PFN_MIGRATE)
- copy_highpage(dpage, page);
+ folio_copy(dfolio, folio);
migrate_device_finalize(&src_pfn, &dst_pfn, 1);
if (src_pfn & MIGRATE_PFN_MIGRATE)
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 51960079875b..4ba5607aaf19 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1835,14 +1835,8 @@ void __init free_area_init(unsigned long *max_zone_pfn)
for_each_node(nid) {
pg_data_t *pgdat;
- if (!node_online(nid)) {
- /* Allocator not initialized yet */
- pgdat = arch_alloc_nodedata(nid);
- if (!pgdat)
- panic("Cannot allocate %zuB for node %d.\n",
- sizeof(*pgdat), nid);
- arch_refresh_nodedata(nid, pgdat);
- }
+ if (!node_online(nid))
+ alloc_offline_node_data(nid);
pgdat = NODE_DATA(nid);
free_area_init_node(nid);
@@ -1939,7 +1933,7 @@ static void __init deferred_free_pages(unsigned long pfn,
}
/* Accept chunks smaller than MAX_PAGE_ORDER upfront */
- accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages));
+ accept_memory(PFN_PHYS(pfn), nr_pages * PAGE_SIZE);
for (i = 0; i < nr_pages; i++, page++, pfn++) {
if (pageblock_aligned(pfn))
diff --git a/mm/mmap.c b/mm/mmap.c
index d0dfc85b209b..dd4b35a25aeb 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -76,16 +76,6 @@ int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
static bool ignore_rlimit_data;
core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
-static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
- struct vm_area_struct *vma, struct vm_area_struct *prev,
- struct vm_area_struct *next, unsigned long start,
- unsigned long end, unsigned long tree_end, bool mm_wr_locked);
-
-static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
-{
- return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
-}
-
/* Update vma->vm_page_prot to reflect vma->vm_flags. */
void vma_set_page_prot(struct vm_area_struct *vma)
{
@@ -102,100 +92,6 @@ void vma_set_page_prot(struct vm_area_struct *vma)
}
/*
- * Requires inode->i_mapping->i_mmap_rwsem
- */
-static void __remove_shared_vm_struct(struct vm_area_struct *vma,
- struct address_space *mapping)
-{
- if (vma_is_shared_maywrite(vma))
- mapping_unmap_writable(mapping);
-
- flush_dcache_mmap_lock(mapping);
- vma_interval_tree_remove(vma, &mapping->i_mmap);
- flush_dcache_mmap_unlock(mapping);
-}
-
-/*
- * Unlink a file-based vm structure from its interval tree, to hide
- * vma from rmap and vmtruncate before freeing its page tables.
- */
-void unlink_file_vma(struct vm_area_struct *vma)
-{
- struct file *file = vma->vm_file;
-
- if (file) {
- struct address_space *mapping = file->f_mapping;
- i_mmap_lock_write(mapping);
- __remove_shared_vm_struct(vma, mapping);
- i_mmap_unlock_write(mapping);
- }
-}
-
-void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
-{
- vb->count = 0;
-}
-
-static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
-{
- struct address_space *mapping;
- int i;
-
- mapping = vb->vmas[0]->vm_file->f_mapping;
- i_mmap_lock_write(mapping);
- for (i = 0; i < vb->count; i++) {
- VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
- __remove_shared_vm_struct(vb->vmas[i], mapping);
- }
- i_mmap_unlock_write(mapping);
-
- unlink_file_vma_batch_init(vb);
-}
-
-void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
- struct vm_area_struct *vma)
-{
- if (vma->vm_file == NULL)
- return;
-
- if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
- vb->count == ARRAY_SIZE(vb->vmas))
- unlink_file_vma_batch_process(vb);
-
- vb->vmas[vb->count] = vma;
- vb->count++;
-}
-
-void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
-{
- if (vb->count > 0)
- unlink_file_vma_batch_process(vb);
-}
-
-/*
- * Close a vm structure and free it.
- */
-static void remove_vma(struct vm_area_struct *vma, bool unreachable)
-{
- might_sleep();
- if (vma->vm_ops && vma->vm_ops->close)
- vma->vm_ops->close(vma);
- if (vma->vm_file)
- fput(vma->vm_file);
- mpol_put(vma_policy(vma));
- if (unreachable)
- __vm_area_free(vma);
- else
- vm_area_free(vma);
-}
-
-static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
- unsigned long min)
-{
- return mas_prev(&vmi->mas, min);
-}
-
-/*
* check_brk_limits() - Use platform specific check of range & verify mlock
* limits.
* @addr: The address to check
@@ -273,11 +169,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto out; /* mapping intersects with an existing non-brk vma. */
/*
* mm->brk must be protected by write mmap_lock.
- * do_vma_munmap() will drop the lock on success, so update it
- * before calling do_vma_munmap().
+ * do_vmi_align_munmap() will drop the lock on success, so
+ * update it before calling do_vma_munmap().
*/
mm->brk = brk;
- if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
+ if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf,
+ /* unlock = */ true))
goto out;
goto success_unlocked;
@@ -318,875 +215,6 @@ out:
return origbrk;
}
-#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
-static void validate_mm(struct mm_struct *mm)
-{
- int bug = 0;
- int i = 0;
- struct vm_area_struct *vma;
- VMA_ITERATOR(vmi, mm, 0);
-
- mt_validate(&mm->mm_mt);
- for_each_vma(vmi, vma) {
-#ifdef CONFIG_DEBUG_VM_RB
- struct anon_vma *anon_vma = vma->anon_vma;
- struct anon_vma_chain *avc;
-#endif
- unsigned long vmi_start, vmi_end;
- bool warn = 0;
-
- vmi_start = vma_iter_addr(&vmi);
- vmi_end = vma_iter_end(&vmi);
- if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
- warn = 1;
-
- if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
- warn = 1;
-
- if (warn) {
- pr_emerg("issue in %s\n", current->comm);
- dump_stack();
- dump_vma(vma);
- pr_emerg("tree range: %px start %lx end %lx\n", vma,
- vmi_start, vmi_end - 1);
- vma_iter_dump_tree(&vmi);
- }
-
-#ifdef CONFIG_DEBUG_VM_RB
- if (anon_vma) {
- anon_vma_lock_read(anon_vma);
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- anon_vma_interval_tree_verify(avc);
- anon_vma_unlock_read(anon_vma);
- }
-#endif
- i++;
- }
- if (i != mm->map_count) {
- pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
- bug = 1;
- }
- VM_BUG_ON_MM(bug, mm);
-}
-
-#else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
-#define validate_mm(mm) do { } while (0)
-#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
-
-/*
- * vma has some anon_vma assigned, and is already inserted on that
- * anon_vma's interval trees.
- *
- * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
- * vma must be removed from the anon_vma's interval trees using
- * anon_vma_interval_tree_pre_update_vma().
- *
- * After the update, the vma will be reinserted using
- * anon_vma_interval_tree_post_update_vma().
- *
- * The entire update must be protected by exclusive mmap_lock and by
- * the root anon_vma's mutex.
- */
-static inline void
-anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
-{
- struct anon_vma_chain *avc;
-
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
-}
-
-static inline void
-anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
-{
- struct anon_vma_chain *avc;
-
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
-}
-
-static unsigned long count_vma_pages_range(struct mm_struct *mm,
- unsigned long addr, unsigned long end)
-{
- VMA_ITERATOR(vmi, mm, addr);
- struct vm_area_struct *vma;
- unsigned long nr_pages = 0;
-
- for_each_vma_range(vmi, vma, end) {
- unsigned long vm_start = max(addr, vma->vm_start);
- unsigned long vm_end = min(end, vma->vm_end);
-
- nr_pages += PHYS_PFN(vm_end - vm_start);
- }
-
- return nr_pages;
-}
-
-static void __vma_link_file(struct vm_area_struct *vma,
- struct address_space *mapping)
-{
- if (vma_is_shared_maywrite(vma))
- mapping_allow_writable(mapping);
-
- flush_dcache_mmap_lock(mapping);
- vma_interval_tree_insert(vma, &mapping->i_mmap);
- flush_dcache_mmap_unlock(mapping);
-}
-
-static void vma_link_file(struct vm_area_struct *vma)
-{
- struct file *file = vma->vm_file;
- struct address_space *mapping;
-
- if (file) {
- mapping = file->f_mapping;
- i_mmap_lock_write(mapping);
- __vma_link_file(vma, mapping);
- i_mmap_unlock_write(mapping);
- }
-}
-
-static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
-{
- VMA_ITERATOR(vmi, mm, 0);
-
- vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
- if (vma_iter_prealloc(&vmi, vma))
- return -ENOMEM;
-
- vma_start_write(vma);
- vma_iter_store(&vmi, vma);
- vma_link_file(vma);
- mm->map_count++;
- validate_mm(mm);
- return 0;
-}
-
-/*
- * init_multi_vma_prep() - Initializer for struct vma_prepare
- * @vp: The vma_prepare struct
- * @vma: The vma that will be altered once locked
- * @next: The next vma if it is to be adjusted
- * @remove: The first vma to be removed
- * @remove2: The second vma to be removed
- */
-static inline void init_multi_vma_prep(struct vma_prepare *vp,
- struct vm_area_struct *vma, struct vm_area_struct *next,
- struct vm_area_struct *remove, struct vm_area_struct *remove2)
-{
- memset(vp, 0, sizeof(struct vma_prepare));
- vp->vma = vma;
- vp->anon_vma = vma->anon_vma;
- vp->remove = remove;
- vp->remove2 = remove2;
- vp->adj_next = next;
- if (!vp->anon_vma && next)
- vp->anon_vma = next->anon_vma;
-
- vp->file = vma->vm_file;
- if (vp->file)
- vp->mapping = vma->vm_file->f_mapping;
-
-}
-
-/*
- * init_vma_prep() - Initializer wrapper for vma_prepare struct
- * @vp: The vma_prepare struct
- * @vma: The vma that will be altered once locked
- */
-static inline void init_vma_prep(struct vma_prepare *vp,
- struct vm_area_struct *vma)
-{
- init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
-}
-
-
-/*
- * vma_prepare() - Helper function for handling locking VMAs prior to altering
- * @vp: The initialized vma_prepare struct
- */
-static inline void vma_prepare(struct vma_prepare *vp)
-{
- if (vp->file) {
- uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
-
- if (vp->adj_next)
- uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
- vp->adj_next->vm_end);
-
- i_mmap_lock_write(vp->mapping);
- if (vp->insert && vp->insert->vm_file) {
- /*
- * Put into interval tree now, so instantiated pages
- * are visible to arm/parisc __flush_dcache_page
- * throughout; but we cannot insert into address
- * space until vma start or end is updated.
- */
- __vma_link_file(vp->insert,
- vp->insert->vm_file->f_mapping);
- }
- }
-
- if (vp->anon_vma) {
- anon_vma_lock_write(vp->anon_vma);
- anon_vma_interval_tree_pre_update_vma(vp->vma);
- if (vp->adj_next)
- anon_vma_interval_tree_pre_update_vma(vp->adj_next);
- }
-
- if (vp->file) {
- flush_dcache_mmap_lock(vp->mapping);
- vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
- if (vp->adj_next)
- vma_interval_tree_remove(vp->adj_next,
- &vp->mapping->i_mmap);
- }
-
-}
-
-/*
- * vma_complete- Helper function for handling the unlocking after altering VMAs,
- * or for inserting a VMA.
- *
- * @vp: The vma_prepare struct
- * @vmi: The vma iterator
- * @mm: The mm_struct
- */
-static inline void vma_complete(struct vma_prepare *vp,
- struct vma_iterator *vmi, struct mm_struct *mm)
-{
- if (vp->file) {
- if (vp->adj_next)
- vma_interval_tree_insert(vp->adj_next,
- &vp->mapping->i_mmap);
- vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
- flush_dcache_mmap_unlock(vp->mapping);
- }
-
- if (vp->remove && vp->file) {
- __remove_shared_vm_struct(vp->remove, vp->mapping);
- if (vp->remove2)
- __remove_shared_vm_struct(vp->remove2, vp->mapping);
- } else if (vp->insert) {
- /*
- * split_vma has split insert from vma, and needs
- * us to insert it before dropping the locks
- * (it may either follow vma or precede it).
- */
- vma_iter_store(vmi, vp->insert);
- mm->map_count++;
- }
-
- if (vp->anon_vma) {
- anon_vma_interval_tree_post_update_vma(vp->vma);
- if (vp->adj_next)
- anon_vma_interval_tree_post_update_vma(vp->adj_next);
- anon_vma_unlock_write(vp->anon_vma);
- }
-
- if (vp->file) {
- i_mmap_unlock_write(vp->mapping);
- uprobe_mmap(vp->vma);
-
- if (vp->adj_next)
- uprobe_mmap(vp->adj_next);
- }
-
- if (vp->remove) {
-again:
- vma_mark_detached(vp->remove, true);
- if (vp->file) {
- uprobe_munmap(vp->remove, vp->remove->vm_start,
- vp->remove->vm_end);
- fput(vp->file);
- }
- if (vp->remove->anon_vma)
- anon_vma_merge(vp->vma, vp->remove);
- mm->map_count--;
- mpol_put(vma_policy(vp->remove));
- if (!vp->remove2)
- WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
- vm_area_free(vp->remove);
-
- /*
- * In mprotect's case 6 (see comments on vma_merge),
- * we are removing both mid and next vmas
- */
- if (vp->remove2) {
- vp->remove = vp->remove2;
- vp->remove2 = NULL;
- goto again;
- }
- }
- if (vp->insert && vp->file)
- uprobe_mmap(vp->insert);
- validate_mm(mm);
-}
-
-/*
- * dup_anon_vma() - Helper function to duplicate anon_vma
- * @dst: The destination VMA
- * @src: The source VMA
- * @dup: Pointer to the destination VMA when successful.
- *
- * Returns: 0 on success.
- */
-static inline int dup_anon_vma(struct vm_area_struct *dst,
- struct vm_area_struct *src, struct vm_area_struct **dup)
-{
- /*
- * Easily overlooked: when mprotect shifts the boundary, make sure the
- * expanding vma has anon_vma set if the shrinking vma had, to cover any
- * anon pages imported.
- */
- if (src->anon_vma && !dst->anon_vma) {
- int ret;
-
- vma_assert_write_locked(dst);
- dst->anon_vma = src->anon_vma;
- ret = anon_vma_clone(dst, src);
- if (ret)
- return ret;
-
- *dup = dst;
- }
-
- return 0;
-}
-
-/*
- * vma_expand - Expand an existing VMA
- *
- * @vmi: The vma iterator
- * @vma: The vma to expand
- * @start: The start of the vma
- * @end: The exclusive end of the vma
- * @pgoff: The page offset of vma
- * @next: The current of next vma.
- *
- * Expand @vma to @start and @end. Can expand off the start and end. Will
- * expand over @next if it's different from @vma and @end == @next->vm_end.
- * Checking if the @vma can expand and merge with @next needs to be handled by
- * the caller.
- *
- * Returns: 0 on success
- */
-int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff,
- struct vm_area_struct *next)
-{
- struct vm_area_struct *anon_dup = NULL;
- bool remove_next = false;
- struct vma_prepare vp;
-
- vma_start_write(vma);
- if (next && (vma != next) && (end == next->vm_end)) {
- int ret;
-
- remove_next = true;
- vma_start_write(next);
- ret = dup_anon_vma(vma, next, &anon_dup);
- if (ret)
- return ret;
- }
-
- init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
- /* Not merging but overwriting any part of next is not handled. */
- VM_WARN_ON(next && !vp.remove &&
- next != vma && end > next->vm_start);
- /* Only handles expanding */
- VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
-
- /* Note: vma iterator must be pointing to 'start' */
- vma_iter_config(vmi, start, end);
- if (vma_iter_prealloc(vmi, vma))
- goto nomem;
-
- vma_prepare(&vp);
- vma_adjust_trans_huge(vma, start, end, 0);
- vma_set_range(vma, start, end, pgoff);
- vma_iter_store(vmi, vma);
-
- vma_complete(&vp, vmi, vma->vm_mm);
- return 0;
-
-nomem:
- if (anon_dup)
- unlink_anon_vmas(anon_dup);
- return -ENOMEM;
-}
-
-/*
- * vma_shrink() - Reduce an existing VMAs memory area
- * @vmi: The vma iterator
- * @vma: The VMA to modify
- * @start: The new start
- * @end: The new end
- *
- * Returns: 0 on success, -ENOMEM otherwise
- */
-int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff)
-{
- struct vma_prepare vp;
-
- WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
-
- if (vma->vm_start < start)
- vma_iter_config(vmi, vma->vm_start, start);
- else
- vma_iter_config(vmi, end, vma->vm_end);
-
- if (vma_iter_prealloc(vmi, NULL))
- return -ENOMEM;
-
- vma_start_write(vma);
-
- init_vma_prep(&vp, vma);
- vma_prepare(&vp);
- vma_adjust_trans_huge(vma, start, end, 0);
-
- vma_iter_clear(vmi);
- vma_set_range(vma, start, end, pgoff);
- vma_complete(&vp, vmi, vma->vm_mm);
- return 0;
-}
-
-/*
- * If the vma has a ->close operation then the driver probably needs to release
- * per-vma resources, so we don't attempt to merge those if the caller indicates
- * the current vma may be removed as part of the merge.
- */
-static inline bool is_mergeable_vma(struct vm_area_struct *vma,
- struct file *file, unsigned long vm_flags,
- struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
- struct anon_vma_name *anon_name, bool may_remove_vma)
-{
- /*
- * VM_SOFTDIRTY should not prevent from VMA merging, if we
- * match the flags but dirty bit -- the caller should mark
- * merged VMA as dirty. If dirty bit won't be excluded from
- * comparison, we increase pressure on the memory system forcing
- * the kernel to generate new VMAs when old one could be
- * extended instead.
- */
- if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
- return false;
- if (vma->vm_file != file)
- return false;
- if (may_remove_vma && vma->vm_ops && vma->vm_ops->close)
- return false;
- if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
- return false;
- if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
- return false;
- return true;
-}
-
-static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
- struct anon_vma *anon_vma2, struct vm_area_struct *vma)
-{
- /*
- * The list_is_singular() test is to avoid merging VMA cloned from
- * parents. This can improve scalability caused by anon_vma lock.
- */
- if ((!anon_vma1 || !anon_vma2) && (!vma ||
- list_is_singular(&vma->anon_vma_chain)))
- return true;
- return anon_vma1 == anon_vma2;
-}
-
-/*
- * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
- * in front of (at a lower virtual address and file offset than) the vma.
- *
- * We cannot merge two vmas if they have differently assigned (non-NULL)
- * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
- *
- * We don't check here for the merged mmap wrapping around the end of pagecache
- * indices (16TB on ia32) because do_mmap() does not permit mmap's which
- * wrap, nor mmaps which cover the final page at index -1UL.
- *
- * We assume the vma may be removed as part of the merge.
- */
-static bool
-can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
- struct anon_vma *anon_vma, struct file *file,
- pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
- struct anon_vma_name *anon_name)
-{
- if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) &&
- is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
- if (vma->vm_pgoff == vm_pgoff)
- return true;
- }
- return false;
-}
-
-/*
- * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
- * beyond (at a higher virtual address and file offset than) the vma.
- *
- * We cannot merge two vmas if they have differently assigned (non-NULL)
- * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
- *
- * We assume that vma is not removed as part of the merge.
- */
-static bool
-can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
- struct anon_vma *anon_vma, struct file *file,
- pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
- struct anon_vma_name *anon_name)
-{
- if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) &&
- is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
- pgoff_t vm_pglen;
- vm_pglen = vma_pages(vma);
- if (vma->vm_pgoff + vm_pglen == vm_pgoff)
- return true;
- }
- return false;
-}
-
-/*
- * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
- * figure out whether that can be merged with its predecessor or its
- * successor. Or both (it neatly fills a hole).
- *
- * In most cases - when called for mmap, brk or mremap - [addr,end) is
- * certain not to be mapped by the time vma_merge is called; but when
- * called for mprotect, it is certain to be already mapped (either at
- * an offset within prev, or at the start of next), and the flags of
- * this area are about to be changed to vm_flags - and the no-change
- * case has already been eliminated.
- *
- * The following mprotect cases have to be considered, where **** is
- * the area passed down from mprotect_fixup, never extending beyond one
- * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
- * at the same address as **** and is of the same or larger span, and
- * NNNN the next vma after ****:
- *
- * **** **** ****
- * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC
- * cannot merge might become might become
- * PPNNNNNNNNNN PPPPPPPPPPCC
- * mmap, brk or case 4 below case 5 below
- * mremap move:
- * **** ****
- * PPPP NNNN PPPPCCCCNNNN
- * might become might become
- * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or
- * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or
- * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8
- *
- * It is important for case 8 that the vma CCCC overlapping the
- * region **** is never going to extended over NNNN. Instead NNNN must
- * be extended in region **** and CCCC must be removed. This way in
- * all cases where vma_merge succeeds, the moment vma_merge drops the
- * rmap_locks, the properties of the merged vma will be already
- * correct for the whole merged range. Some of those properties like
- * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
- * be correct for the whole merged range immediately after the
- * rmap_locks are released. Otherwise if NNNN would be removed and
- * CCCC would be extended over the NNNN range, remove_migration_ptes
- * or other rmap walkers (if working on addresses beyond the "end"
- * parameter) may establish ptes with the wrong permissions of CCCC
- * instead of the right permissions of NNNN.
- *
- * In the code below:
- * PPPP is represented by *prev
- * CCCC is represented by *curr or not represented at all (NULL)
- * NNNN is represented by *next or not represented at all (NULL)
- * **** is not represented - it will be merged and the vma containing the
- * area is returned, or the function will return NULL
- */
-static struct vm_area_struct
-*vma_merge(struct vma_iterator *vmi, struct vm_area_struct *prev,
- struct vm_area_struct *src, unsigned long addr, unsigned long end,
- unsigned long vm_flags, pgoff_t pgoff, struct mempolicy *policy,
- struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
- struct anon_vma_name *anon_name)
-{
- struct mm_struct *mm = src->vm_mm;
- struct anon_vma *anon_vma = src->anon_vma;
- struct file *file = src->vm_file;
- struct vm_area_struct *curr, *next, *res;
- struct vm_area_struct *vma, *adjust, *remove, *remove2;
- struct vm_area_struct *anon_dup = NULL;
- struct vma_prepare vp;
- pgoff_t vma_pgoff;
- int err = 0;
- bool merge_prev = false;
- bool merge_next = false;
- bool vma_expanded = false;
- unsigned long vma_start = addr;
- unsigned long vma_end = end;
- pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
- long adj_start = 0;
-
- /*
- * We later require that vma->vm_flags == vm_flags,
- * so this tests vma->vm_flags & VM_SPECIAL, too.
- */
- if (vm_flags & VM_SPECIAL)
- return NULL;
-
- /* Does the input range span an existing VMA? (cases 5 - 8) */
- curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
-
- if (!curr || /* cases 1 - 4 */
- end == curr->vm_end) /* cases 6 - 8, adjacent VMA */
- next = vma_lookup(mm, end);
- else
- next = NULL; /* case 5 */
-
- if (prev) {
- vma_start = prev->vm_start;
- vma_pgoff = prev->vm_pgoff;
-
- /* Can we merge the predecessor? */
- if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy)
- && can_vma_merge_after(prev, vm_flags, anon_vma, file,
- pgoff, vm_userfaultfd_ctx, anon_name)) {
- merge_prev = true;
- vma_prev(vmi);
- }
- }
-
- /* Can we merge the successor? */
- if (next && mpol_equal(policy, vma_policy(next)) &&
- can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen,
- vm_userfaultfd_ctx, anon_name)) {
- merge_next = true;
- }
-
- /* Verify some invariant that must be enforced by the caller. */
- VM_WARN_ON(prev && addr <= prev->vm_start);
- VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
- VM_WARN_ON(addr >= end);
-
- if (!merge_prev && !merge_next)
- return NULL; /* Not mergeable. */
-
- if (merge_prev)
- vma_start_write(prev);
-
- res = vma = prev;
- remove = remove2 = adjust = NULL;
-
- /* Can we merge both the predecessor and the successor? */
- if (merge_prev && merge_next &&
- is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
- vma_start_write(next);
- remove = next; /* case 1 */
- vma_end = next->vm_end;
- err = dup_anon_vma(prev, next, &anon_dup);
- if (curr) { /* case 6 */
- vma_start_write(curr);
- remove = curr;
- remove2 = next;
- /*
- * Note that the dup_anon_vma below cannot overwrite err
- * since the first caller would do nothing unless next
- * has an anon_vma.
- */
- if (!next->anon_vma)
- err = dup_anon_vma(prev, curr, &anon_dup);
- }
- } else if (merge_prev) { /* case 2 */
- if (curr) {
- vma_start_write(curr);
- if (end == curr->vm_end) { /* case 7 */
- /*
- * can_vma_merge_after() assumed we would not be
- * removing prev vma, so it skipped the check
- * for vm_ops->close, but we are removing curr
- */
- if (curr->vm_ops && curr->vm_ops->close)
- err = -EINVAL;
- remove = curr;
- } else { /* case 5 */
- adjust = curr;
- adj_start = (end - curr->vm_start);
- }
- if (!err)
- err = dup_anon_vma(prev, curr, &anon_dup);
- }
- } else { /* merge_next */
- vma_start_write(next);
- res = next;
- if (prev && addr < prev->vm_end) { /* case 4 */
- vma_start_write(prev);
- vma_end = addr;
- adjust = next;
- adj_start = -(prev->vm_end - addr);
- err = dup_anon_vma(next, prev, &anon_dup);
- } else {
- /*
- * Note that cases 3 and 8 are the ONLY ones where prev
- * is permitted to be (but is not necessarily) NULL.
- */
- vma = next; /* case 3 */
- vma_start = addr;
- vma_end = next->vm_end;
- vma_pgoff = next->vm_pgoff - pglen;
- if (curr) { /* case 8 */
- vma_pgoff = curr->vm_pgoff;
- vma_start_write(curr);
- remove = curr;
- err = dup_anon_vma(next, curr, &anon_dup);
- }
- }
- }
-
- /* Error in anon_vma clone. */
- if (err)
- goto anon_vma_fail;
-
- if (vma_start < vma->vm_start || vma_end > vma->vm_end)
- vma_expanded = true;
-
- if (vma_expanded) {
- vma_iter_config(vmi, vma_start, vma_end);
- } else {
- vma_iter_config(vmi, adjust->vm_start + adj_start,
- adjust->vm_end);
- }
-
- if (vma_iter_prealloc(vmi, vma))
- goto prealloc_fail;
-
- init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
- VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
- vp.anon_vma != adjust->anon_vma);
-
- vma_prepare(&vp);
- vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
- vma_set_range(vma, vma_start, vma_end, vma_pgoff);
-
- if (vma_expanded)
- vma_iter_store(vmi, vma);
-
- if (adj_start) {
- adjust->vm_start += adj_start;
- adjust->vm_pgoff += adj_start >> PAGE_SHIFT;
- if (adj_start < 0) {
- WARN_ON(vma_expanded);
- vma_iter_store(vmi, next);
- }
- }
-
- vma_complete(&vp, vmi, mm);
- khugepaged_enter_vma(res, vm_flags);
- return res;
-
-prealloc_fail:
- if (anon_dup)
- unlink_anon_vmas(anon_dup);
-
-anon_vma_fail:
- vma_iter_set(vmi, addr);
- vma_iter_load(vmi);
- return NULL;
-}
-
-/*
- * Rough compatibility check to quickly see if it's even worth looking
- * at sharing an anon_vma.
- *
- * They need to have the same vm_file, and the flags can only differ
- * in things that mprotect may change.
- *
- * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
- * we can merge the two vma's. For example, we refuse to merge a vma if
- * there is a vm_ops->close() function, because that indicates that the
- * driver is doing some kind of reference counting. But that doesn't
- * really matter for the anon_vma sharing case.
- */
-static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
-{
- return a->vm_end == b->vm_start &&
- mpol_equal(vma_policy(a), vma_policy(b)) &&
- a->vm_file == b->vm_file &&
- !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
- b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
-}
-
-/*
- * Do some basic sanity checking to see if we can re-use the anon_vma
- * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
- * the same as 'old', the other will be the new one that is trying
- * to share the anon_vma.
- *
- * NOTE! This runs with mmap_lock held for reading, so it is possible that
- * the anon_vma of 'old' is concurrently in the process of being set up
- * by another page fault trying to merge _that_. But that's ok: if it
- * is being set up, that automatically means that it will be a singleton
- * acceptable for merging, so we can do all of this optimistically. But
- * we do that READ_ONCE() to make sure that we never re-load the pointer.
- *
- * IOW: that the "list_is_singular()" test on the anon_vma_chain only
- * matters for the 'stable anon_vma' case (ie the thing we want to avoid
- * is to return an anon_vma that is "complex" due to having gone through
- * a fork).
- *
- * We also make sure that the two vma's are compatible (adjacent,
- * and with the same memory policies). That's all stable, even with just
- * a read lock on the mmap_lock.
- */
-static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
-{
- if (anon_vma_compatible(a, b)) {
- struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
-
- if (anon_vma && list_is_singular(&old->anon_vma_chain))
- return anon_vma;
- }
- return NULL;
-}
-
-/*
- * find_mergeable_anon_vma is used by anon_vma_prepare, to check
- * neighbouring vmas for a suitable anon_vma, before it goes off
- * to allocate a new anon_vma. It checks because a repetitive
- * sequence of mprotects and faults may otherwise lead to distinct
- * anon_vmas being allocated, preventing vma merge in subsequent
- * mprotect.
- */
-struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
-{
- struct anon_vma *anon_vma = NULL;
- struct vm_area_struct *prev, *next;
- VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
-
- /* Try next first. */
- next = vma_iter_load(&vmi);
- if (next) {
- anon_vma = reusable_anon_vma(next, vma, next);
- if (anon_vma)
- return anon_vma;
- }
-
- prev = vma_prev(&vmi);
- VM_BUG_ON_VMA(prev != vma, vma);
- prev = vma_prev(&vmi);
- /* Try prev next. */
- if (prev)
- anon_vma = reusable_anon_vma(prev, prev, vma);
-
- /*
- * We might reach here with anon_vma == NULL if we can't find
- * any reusable anon_vma.
- * There's no absolute need to look only at touching neighbours:
- * we could search further afield for "compatible" anon_vmas.
- * But it would probably just be a waste of time searching,
- * or lead to too many vmas hanging off the same anon_vma.
- * We're trying to allow mprotect remerging later on,
- * not trying to minimize memory used for anon_vmas.
- */
- return anon_vma;
-}
-
/*
* If a hint addr is less than mmap_min_addr change hint to be as
* low as possible but still greater than mmap_min_addr
@@ -1229,7 +257,7 @@ static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
return MAX_LFS_FILESIZE;
/* Special "we do even unsigned file positions" case */
- if (file->f_mode & FMODE_UNSIGNED_OFFSET)
+ if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET)
return 0;
/* Yes, random drivers might want more. But I'm tired of buggy drivers */
@@ -1549,85 +577,6 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
}
#endif /* __ARCH_WANT_SYS_OLD_MMAP */
-static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
-{
- return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
-}
-
-static bool vma_is_shared_writable(struct vm_area_struct *vma)
-{
- return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
- (VM_WRITE | VM_SHARED);
-}
-
-static bool vma_fs_can_writeback(struct vm_area_struct *vma)
-{
- /* No managed pages to writeback. */
- if (vma->vm_flags & VM_PFNMAP)
- return false;
-
- return vma->vm_file && vma->vm_file->f_mapping &&
- mapping_can_writeback(vma->vm_file->f_mapping);
-}
-
-/*
- * Does this VMA require the underlying folios to have their dirty state
- * tracked?
- */
-bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
-{
- /* Only shared, writable VMAs require dirty tracking. */
- if (!vma_is_shared_writable(vma))
- return false;
-
- /* Does the filesystem need to be notified? */
- if (vm_ops_needs_writenotify(vma->vm_ops))
- return true;
-
- /*
- * Even if the filesystem doesn't indicate a need for writenotify, if it
- * can writeback, dirty tracking is still required.
- */
- return vma_fs_can_writeback(vma);
-}
-
-/*
- * Some shared mappings will want the pages marked read-only
- * to track write events. If so, we'll downgrade vm_page_prot
- * to the private version (using protection_map[] without the
- * VM_SHARED bit).
- */
-bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
-{
- /* If it was private or non-writable, the write bit is already clear */
- if (!vma_is_shared_writable(vma))
- return false;
-
- /* The backer wishes to know when pages are first written to? */
- if (vm_ops_needs_writenotify(vma->vm_ops))
- return true;
-
- /* The open routine did something to the protections that pgprot_modify
- * won't preserve? */
- if (pgprot_val(vm_page_prot) !=
- pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
- return false;
-
- /*
- * Do we need to track softdirty? hugetlb does not support softdirty
- * tracking yet.
- */
- if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
- return true;
-
- /* Do we need write faults for uffd-wp tracking? */
- if (userfaultfd_wp(vma))
- return true;
-
- /* Can the mapping track the dirty pages? */
- return vma_fs_can_writeback(vma);
-}
-
/*
* We account for memory if it's a private writeable mapping,
* not hugepages and VM_NORESERVE wasn't set.
@@ -1754,6 +703,18 @@ retry:
}
/*
+ * Determine if the allocation needs to ensure that there is no
+ * existing mapping within it's guard gaps, for use as start_gap.
+ */
+static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
+{
+ if (vm_flags & VM_SHADOW_STACK)
+ return PAGE_SIZE;
+
+ return 0;
+}
+
+/*
* Search for an unmapped address range.
*
* We are looking for a range that:
@@ -1789,7 +750,7 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
unsigned long
generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
@@ -1814,6 +775,7 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr,
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = mmap_end;
+ info.start_gap = stack_guard_placement(vm_flags);
return vm_unmapped_area(&info);
}
@@ -1821,9 +783,10 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
- return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
+ return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
+ vm_flags);
}
#endif
@@ -1834,7 +797,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long
generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
@@ -1862,6 +825,7 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
+ info.start_gap = stack_guard_placement(vm_flags);
addr = vm_unmapped_area(&info);
/*
@@ -1885,26 +849,10 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
- return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
-}
-#endif
-
-#ifndef HAVE_ARCH_UNMAPPED_AREA_VMFLAGS
-unsigned long
-arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
-{
- return arch_get_unmapped_area(filp, addr, len, pgoff, flags);
-}
-
-unsigned long
-arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags, vm_flags_t vm_flags)
-{
- return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
+ return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
+ vm_flags);
}
#endif
@@ -1914,9 +862,9 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *fi
vm_flags_t vm_flags)
{
if (test_bit(MMF_TOPDOWN, &mm->flags))
- return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff,
- flags, vm_flags);
- return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, vm_flags);
+ return arch_get_unmapped_area_topdown(filp, addr, len, pgoff,
+ flags, vm_flags);
+ return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
}
unsigned long
@@ -1978,8 +926,8 @@ mm_get_unmapped_area(struct mm_struct *mm, struct file *file,
unsigned long pgoff, unsigned long flags)
{
if (test_bit(MMF_TOPDOWN, &mm->flags))
- return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags);
- return arch_get_unmapped_area(file, addr, len, pgoff, flags);
+ return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags, 0);
+ return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0);
}
EXPORT_SYMBOL(mm_get_unmapped_area);
@@ -2393,443 +1341,6 @@ success:
return vma;
}
-/*
- * Ok - we have the memory areas we should free on a maple tree so release them,
- * and do the vma updates.
- *
- * Called with the mm semaphore held.
- */
-static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
-{
- unsigned long nr_accounted = 0;
- struct vm_area_struct *vma;
-
- /* Update high watermark before we lower total_vm */
- update_hiwater_vm(mm);
- mas_for_each(mas, vma, ULONG_MAX) {
- long nrpages = vma_pages(vma);
-
- if (vma->vm_flags & VM_ACCOUNT)
- nr_accounted += nrpages;
- vm_stat_account(mm, vma->vm_flags, -nrpages);
- remove_vma(vma, false);
- }
- vm_unacct_memory(nr_accounted);
-}
-
-/*
- * Get rid of page table information in the indicated region.
- *
- * Called with the mm semaphore held.
- */
-static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
- struct vm_area_struct *vma, struct vm_area_struct *prev,
- struct vm_area_struct *next, unsigned long start,
- unsigned long end, unsigned long tree_end, bool mm_wr_locked)
-{
- struct mmu_gather tlb;
- unsigned long mt_start = mas->index;
-
- lru_add_drain();
- tlb_gather_mmu(&tlb, mm);
- update_hiwater_rss(mm);
- unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
- mas_set(mas, mt_start);
- free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
- next ? next->vm_start : USER_PGTABLES_CEILING,
- mm_wr_locked);
- tlb_finish_mmu(&tlb);
-}
-
-/*
- * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
- * has already been checked or doesn't make sense to fail.
- * VMA Iterator will point to the end VMA.
- */
-static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long addr, int new_below)
-{
- struct vma_prepare vp;
- struct vm_area_struct *new;
- int err;
-
- WARN_ON(vma->vm_start >= addr);
- WARN_ON(vma->vm_end <= addr);
-
- if (vma->vm_ops && vma->vm_ops->may_split) {
- err = vma->vm_ops->may_split(vma, addr);
- if (err)
- return err;
- }
-
- new = vm_area_dup(vma);
- if (!new)
- return -ENOMEM;
-
- if (new_below) {
- new->vm_end = addr;
- } else {
- new->vm_start = addr;
- new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
- }
-
- err = -ENOMEM;
- vma_iter_config(vmi, new->vm_start, new->vm_end);
- if (vma_iter_prealloc(vmi, new))
- goto out_free_vma;
-
- err = vma_dup_policy(vma, new);
- if (err)
- goto out_free_vmi;
-
- err = anon_vma_clone(new, vma);
- if (err)
- goto out_free_mpol;
-
- if (new->vm_file)
- get_file(new->vm_file);
-
- if (new->vm_ops && new->vm_ops->open)
- new->vm_ops->open(new);
-
- vma_start_write(vma);
- vma_start_write(new);
-
- init_vma_prep(&vp, vma);
- vp.insert = new;
- vma_prepare(&vp);
- vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
-
- if (new_below) {
- vma->vm_start = addr;
- vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
- } else {
- vma->vm_end = addr;
- }
-
- /* vma_complete stores the new vma */
- vma_complete(&vp, vmi, vma->vm_mm);
-
- /* Success. */
- if (new_below)
- vma_next(vmi);
- return 0;
-
-out_free_mpol:
- mpol_put(vma_policy(new));
-out_free_vmi:
- vma_iter_free(vmi);
-out_free_vma:
- vm_area_free(new);
- return err;
-}
-
-/*
- * Split a vma into two pieces at address 'addr', a new vma is allocated
- * either for the first part or the tail.
- */
-static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long addr, int new_below)
-{
- if (vma->vm_mm->map_count >= sysctl_max_map_count)
- return -ENOMEM;
-
- return __split_vma(vmi, vma, addr, new_below);
-}
-
-/*
- * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
- * context and anonymous VMA name within the range [start, end).
- *
- * As a result, we might be able to merge the newly modified VMA range with an
- * adjacent VMA with identical properties.
- *
- * If no merge is possible and the range does not span the entirety of the VMA,
- * we then need to split the VMA to accommodate the change.
- *
- * The function returns either the merged VMA, the original VMA if a split was
- * required instead, or an error if the split failed.
- */
-struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long vm_flags,
- struct mempolicy *policy,
- struct vm_userfaultfd_ctx uffd_ctx,
- struct anon_vma_name *anon_name)
-{
- pgoff_t pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
- struct vm_area_struct *merged;
-
- merged = vma_merge(vmi, prev, vma, start, end, vm_flags,
- pgoff, policy, uffd_ctx, anon_name);
- if (merged)
- return merged;
-
- if (vma->vm_start < start) {
- int err = split_vma(vmi, vma, start, 1);
-
- if (err)
- return ERR_PTR(err);
- }
-
- if (vma->vm_end > end) {
- int err = split_vma(vmi, vma, end, 0);
-
- if (err)
- return ERR_PTR(err);
- }
-
- return vma;
-}
-
-/*
- * Attempt to merge a newly mapped VMA with those adjacent to it. The caller
- * must ensure that [start, end) does not overlap any existing VMA.
- */
-static struct vm_area_struct
-*vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff)
-{
- return vma_merge(vmi, prev, vma, start, end, vma->vm_flags, pgoff,
- vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma));
-}
-
-/*
- * Expand vma by delta bytes, potentially merging with an immediately adjacent
- * VMA with identical properties.
- */
-struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
- struct vm_area_struct *vma,
- unsigned long delta)
-{
- pgoff_t pgoff = vma->vm_pgoff + vma_pages(vma);
-
- /* vma is specified as prev, so case 1 or 2 will apply. */
- return vma_merge(vmi, vma, vma, vma->vm_end, vma->vm_end + delta,
- vma->vm_flags, pgoff, vma_policy(vma),
- vma->vm_userfaultfd_ctx, anon_vma_name(vma));
-}
-
-/*
- * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
- * @vmi: The vma iterator
- * @vma: The starting vm_area_struct
- * @mm: The mm_struct
- * @start: The aligned start address to munmap.
- * @end: The aligned end address to munmap.
- * @uf: The userfaultfd list_head
- * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
- * success.
- *
- * Return: 0 on success and drops the lock if so directed, error and leaves the
- * lock held otherwise.
- */
-static int
-do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- struct mm_struct *mm, unsigned long start,
- unsigned long end, struct list_head *uf, bool unlock)
-{
- struct vm_area_struct *prev, *next = NULL;
- struct maple_tree mt_detach;
- int count = 0;
- int error = -ENOMEM;
- unsigned long locked_vm = 0;
- MA_STATE(mas_detach, &mt_detach, 0, 0);
- mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
- mt_on_stack(mt_detach);
-
- /*
- * If we need to split any vma, do it now to save pain later.
- *
- * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
- * unmapped vm_area_struct will remain in use: so lower split_vma
- * places tmp vma above, and higher split_vma places tmp vma below.
- */
-
- /* Does it split the first one? */
- if (start > vma->vm_start) {
-
- /*
- * Make sure that map_count on return from munmap() will
- * not exceed its limit; but let map_count go just above
- * its limit temporarily, to help free resources as expected.
- */
- if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
- goto map_count_exceeded;
-
- error = __split_vma(vmi, vma, start, 1);
- if (error)
- goto start_split_failed;
- }
-
- /*
- * Detach a range of VMAs from the mm. Using next as a temp variable as
- * it is always overwritten.
- */
- next = vma;
- do {
- /* Does it split the end? */
- if (next->vm_end > end) {
- error = __split_vma(vmi, next, end, 0);
- if (error)
- goto end_split_failed;
- }
- vma_start_write(next);
- mas_set(&mas_detach, count);
- error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
- if (error)
- goto munmap_gather_failed;
- vma_mark_detached(next, true);
- if (next->vm_flags & VM_LOCKED)
- locked_vm += vma_pages(next);
-
- count++;
- if (unlikely(uf)) {
- /*
- * If userfaultfd_unmap_prep returns an error the vmas
- * will remain split, but userland will get a
- * highly unexpected error anyway. This is no
- * different than the case where the first of the two
- * __split_vma fails, but we don't undo the first
- * split, despite we could. This is unlikely enough
- * failure that it's not worth optimizing it for.
- */
- error = userfaultfd_unmap_prep(next, start, end, uf);
-
- if (error)
- goto userfaultfd_error;
- }
-#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
- BUG_ON(next->vm_start < start);
- BUG_ON(next->vm_start > end);
-#endif
- } for_each_vma_range(*vmi, next, end);
-
-#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
- /* Make sure no VMAs are about to be lost. */
- {
- MA_STATE(test, &mt_detach, 0, 0);
- struct vm_area_struct *vma_mas, *vma_test;
- int test_count = 0;
-
- vma_iter_set(vmi, start);
- rcu_read_lock();
- vma_test = mas_find(&test, count - 1);
- for_each_vma_range(*vmi, vma_mas, end) {
- BUG_ON(vma_mas != vma_test);
- test_count++;
- vma_test = mas_next(&test, count - 1);
- }
- rcu_read_unlock();
- BUG_ON(count != test_count);
- }
-#endif
-
- while (vma_iter_addr(vmi) > start)
- vma_iter_prev_range(vmi);
-
- error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
- if (error)
- goto clear_tree_failed;
-
- /* Point of no return */
- mm->locked_vm -= locked_vm;
- mm->map_count -= count;
- if (unlock)
- mmap_write_downgrade(mm);
-
- prev = vma_iter_prev_range(vmi);
- next = vma_next(vmi);
- if (next)
- vma_iter_prev_range(vmi);
-
- /*
- * We can free page tables without write-locking mmap_lock because VMAs
- * were isolated before we downgraded mmap_lock.
- */
- mas_set(&mas_detach, 1);
- unmap_region(mm, &mas_detach, vma, prev, next, start, end, count,
- !unlock);
- /* Statistics and freeing VMAs */
- mas_set(&mas_detach, 0);
- remove_mt(mm, &mas_detach);
- validate_mm(mm);
- if (unlock)
- mmap_read_unlock(mm);
-
- __mt_destroy(&mt_detach);
- return 0;
-
-clear_tree_failed:
-userfaultfd_error:
-munmap_gather_failed:
-end_split_failed:
- mas_set(&mas_detach, 0);
- mas_for_each(&mas_detach, next, end)
- vma_mark_detached(next, false);
-
- __mt_destroy(&mt_detach);
-start_split_failed:
-map_count_exceeded:
- validate_mm(mm);
- return error;
-}
-
-/*
- * do_vmi_munmap() - munmap a given range.
- * @vmi: The vma iterator
- * @mm: The mm_struct
- * @start: The start address to munmap
- * @len: The length of the range to munmap
- * @uf: The userfaultfd list_head
- * @unlock: set to true if the user wants to drop the mmap_lock on success
- *
- * This function takes a @mas that is either pointing to the previous VMA or set
- * to MA_START and sets it up to remove the mapping(s). The @len will be
- * aligned and any arch_unmap work will be preformed.
- *
- * Return: 0 on success and drops the lock if so directed, error and leaves the
- * lock held otherwise.
- */
-int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
- unsigned long start, size_t len, struct list_head *uf,
- bool unlock)
-{
- unsigned long end;
- struct vm_area_struct *vma;
-
- if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
- return -EINVAL;
-
- end = start + PAGE_ALIGN(len);
- if (end == start)
- return -EINVAL;
-
- /*
- * Check if memory is sealed before arch_unmap.
- * Prevent unmapping a sealed VMA.
- * can_modify_mm assumes we have acquired the lock on MM.
- */
- if (unlikely(!can_modify_mm(mm, start, end)))
- return -EPERM;
-
- /* arch_unmap() might do unmaps itself. */
- arch_unmap(mm, start, end);
-
- /* Find the first overlapping VMA */
- vma = vma_find(vmi, end);
- if (!vma) {
- if (unlock)
- mmap_write_unlock(mm);
- return 0;
- }
-
- return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
-}
-
/* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
* @mm: The mm_struct
* @start: The start address to munmap
@@ -2852,100 +1363,67 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = NULL;
- struct vm_area_struct *next, *prev, *merge;
- pgoff_t pglen = len >> PAGE_SHIFT;
+ pgoff_t pglen = PHYS_PFN(len);
+ struct vm_area_struct *merge;
unsigned long charged = 0;
+ struct vma_munmap_struct vms;
+ struct ma_state mas_detach;
+ struct maple_tree mt_detach;
unsigned long end = addr + len;
- unsigned long merge_start = addr, merge_end = end;
bool writable_file_mapping = false;
- pgoff_t vm_pgoff;
- int error;
+ int error = -ENOMEM;
VMA_ITERATOR(vmi, mm, addr);
+ VMG_STATE(vmg, mm, &vmi, addr, end, vm_flags, pgoff);
- /* Check against address space limit. */
- if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
- unsigned long nr_pages;
-
- /*
- * MAP_FIXED may remove pages of mappings that intersects with
- * requested mapping. Account for the pages it would unmap.
- */
- nr_pages = count_vma_pages_range(mm, addr, end);
+ vmg.file = file;
+ /* Find the first overlapping VMA */
+ vma = vma_find(&vmi, end);
+ init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false);
+ if (vma) {
+ mt_init_flags(&mt_detach, vmi.mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
+ mt_on_stack(mt_detach);
+ mas_init(&mas_detach, &mt_detach, /* addr = */ 0);
+ /* Prepare to unmap any existing mapping in the area */
+ error = vms_gather_munmap_vmas(&vms, &mas_detach);
+ if (error)
+ goto gather_failed;
- if (!may_expand_vm(mm, vm_flags,
- (len >> PAGE_SHIFT) - nr_pages))
- return -ENOMEM;
+ vmg.next = vms.next;
+ vmg.prev = vms.prev;
+ vma = NULL;
+ } else {
+ vmg.next = vma_iter_next_rewind(&vmi, &vmg.prev);
}
- /* Unmap any existing mapping in the area */
- error = do_vmi_munmap(&vmi, mm, addr, len, uf, false);
- if (error == -EPERM)
- return error;
- else if (error)
- return -ENOMEM;
+ /* Check against address space limit. */
+ if (!may_expand_vm(mm, vm_flags, pglen - vms.nr_pages))
+ goto abort_munmap;
/*
* Private writable mapping: check memory availability
*/
if (accountable_mapping(file, vm_flags)) {
- charged = len >> PAGE_SHIFT;
- if (security_vm_enough_memory_mm(mm, charged))
- return -ENOMEM;
- vm_flags |= VM_ACCOUNT;
- }
-
- next = vma_next(&vmi);
- prev = vma_prev(&vmi);
- if (vm_flags & VM_SPECIAL) {
- if (prev)
- vma_iter_next_range(&vmi);
- goto cannot_expand;
- }
-
- /* Attempt to expand an old mapping */
- /* Check next */
- if (next && next->vm_start == end && !vma_policy(next) &&
- can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
- NULL_VM_UFFD_CTX, NULL)) {
- merge_end = next->vm_end;
- vma = next;
- vm_pgoff = next->vm_pgoff - pglen;
- }
+ charged = pglen;
+ charged -= vms.nr_accounted;
+ if (charged && security_vm_enough_memory_mm(mm, charged))
+ goto abort_munmap;
- /* Check prev */
- if (prev && prev->vm_end == addr && !vma_policy(prev) &&
- (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
- pgoff, vma->vm_userfaultfd_ctx, NULL) :
- can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
- NULL_VM_UFFD_CTX, NULL))) {
- merge_start = prev->vm_start;
- vma = prev;
- vm_pgoff = prev->vm_pgoff;
- } else if (prev) {
- vma_iter_next_range(&vmi);
+ vms.nr_accounted = 0;
+ vm_flags |= VM_ACCOUNT;
+ vmg.flags = vm_flags;
}
- /* Actually expand, if possible */
- if (vma &&
- !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
- khugepaged_enter_vma(vma, vm_flags);
+ vma = vma_merge_new_range(&vmg);
+ if (vma)
goto expanded;
- }
-
- if (vma == prev)
- vma_iter_set(&vmi, addr);
-cannot_expand:
-
/*
* Determine the object being mapped and call the appropriate
* specific mapper. the address has already been validated, but
* not unmapped, but the maps are removed from the list.
*/
vma = vm_area_alloc(mm);
- if (!vma) {
- error = -ENOMEM;
+ if (!vma)
goto unacct_error;
- }
vma_iter_config(&vmi, addr, end);
vma_set_range(vma, addr, end, pgoff);
@@ -2954,6 +1432,11 @@ cannot_expand:
if (file) {
vma->vm_file = get_file(file);
+ /*
+ * call_mmap() may map PTE, so ensure there are no existing PTEs
+ * and call the vm_ops close function if one exists.
+ */
+ vms_clean_up_area(&vms, &mas_detach);
error = call_mmap(file, vma);
if (error)
goto unmap_and_free_vma;
@@ -2979,10 +1462,11 @@ cannot_expand:
* If vm_flags changed after call_mmap(), we should try merge
* vma again as we may succeed this time.
*/
- if (unlikely(vm_flags != vma->vm_flags && prev)) {
- merge = vma_merge_new_vma(&vmi, prev, vma,
- vma->vm_start, vma->vm_end,
- vma->vm_pgoff);
+ if (unlikely(vm_flags != vma->vm_flags && vmg.prev)) {
+ vmg.flags = vma->vm_flags;
+ /* If this fails, state is reset ready for a reattempt. */
+ merge = vma_merge_new_range(&vmg);
+
if (merge) {
/*
* ->mmap() can change vma->vm_file and fput
@@ -2998,6 +1482,7 @@ cannot_expand:
vm_flags = vma->vm_flags;
goto unmap_writable;
}
+ vma_iter_config(&vmi, addr, end);
}
vm_flags = vma->vm_flags;
@@ -3030,7 +1515,7 @@ cannot_expand:
vma_link_file(vma);
/*
- * vma_merge() calls khugepaged_enter_vma() either, the below
+ * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
* call covers the non-merge case.
*/
khugepaged_enter_vma(vma, vma->vm_flags);
@@ -3044,14 +1529,17 @@ unmap_writable:
expanded:
perf_event_mmap(vma);
- vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
+ /* Unmap any existing mapping in the area */
+ vms_complete_munmap_vmas(&vms, &mas_detach);
+
+ vm_stat_account(mm, vm_flags, pglen);
if (vm_flags & VM_LOCKED) {
if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm))
vm_flags_clear(vma, VM_LOCKED_MASK);
else
- mm->locked_vm += (len >> PAGE_SHIFT);
+ mm->locked_vm += pglen;
}
if (file)
@@ -3072,7 +1560,7 @@ expanded:
return addr;
close_and_free_vma:
- if (file && vma->vm_ops && vma->vm_ops->close)
+ if (file && !vms.closed_vm_ops && vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (file || vma->vm_file) {
@@ -3082,8 +1570,7 @@ unmap_and_free_vma:
vma_iter_set(&vmi, vma->vm_end);
/* Undo any partial mapping done by a device driver. */
- unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start,
- vma->vm_end, vma->vm_end, true);
+ unmap_region(&vmi.mas, vma, vmg.prev, vmg.next);
}
if (writable_file_mapping)
mapping_unmap_writable(file->f_mapping);
@@ -3092,6 +1579,10 @@ free_vma:
unacct_error:
if (charged)
vm_unacct_memory(charged);
+
+abort_munmap:
+ vms_abort_munmap_vmas(&vms, &mas_detach);
+gather_failed:
validate_mm(mm);
return error;
}
@@ -3198,8 +1689,12 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
flags |= MAP_LOCKED;
file = get_file(vma->vm_file);
+ ret = security_mmap_file(vma->vm_file, prot, flags);
+ if (ret)
+ goto out_fput;
ret = do_mmap(vma->vm_file, start, size,
prot, flags, 0, pgoff, &populate, NULL);
+out_fput:
fput(file);
out:
mmap_write_unlock(mm);
@@ -3211,39 +1706,6 @@ out:
}
/*
- * do_vma_munmap() - Unmap a full or partial vma.
- * @vmi: The vma iterator pointing at the vma
- * @vma: The first vma to be munmapped
- * @start: the start of the address to unmap
- * @end: The end of the address to unmap
- * @uf: The userfaultfd list_head
- * @unlock: Drop the lock on success
- *
- * unmaps a VMA mapping when the vma iterator is already in position.
- * Does not handle alignment.
- *
- * Return: 0 on success drops the lock of so directed, error on failure and will
- * still hold the lock.
- */
-int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, struct list_head *uf,
- bool unlock)
-{
- struct mm_struct *mm = vma->vm_mm;
-
- /*
- * Check if memory is sealed before arch_unmap.
- * Prevent unmapping a sealed VMA.
- * can_modify_mm assumes we have acquired the lock on MM.
- */
- if (unlikely(!can_modify_mm(mm, start, end)))
- return -EPERM;
-
- arch_unmap(mm, start, end);
- return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
-}
-
-/*
* do_brk_flags() - Increase the brk vma if the flags match.
* @vmi: The vma iterator
* @addr: The start address
@@ -3259,7 +1721,6 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long addr, unsigned long len, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vma_prepare vp;
/*
* Check against address space limits by the changed size
@@ -3279,25 +1740,16 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
* Expand the existing vma if possible; Note that singular lists do not
* occur after forking, so the expand will only happen on new VMAs.
*/
- if (vma && vma->vm_end == addr && !vma_policy(vma) &&
- can_vma_merge_after(vma, flags, NULL, NULL,
- addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
- vma_iter_config(vmi, vma->vm_start, addr + len);
- if (vma_iter_prealloc(vmi, vma))
- goto unacct_fail;
+ if (vma && vma->vm_end == addr) {
+ VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
- vma_start_write(vma);
-
- init_vma_prep(&vp, vma);
- vma_prepare(&vp);
- vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
- vma->vm_end = addr + len;
- vm_flags_set(vma, VM_SOFTDIRTY);
- vma_iter_store(vmi, vma);
+ vmg.prev = vma;
+ vma_iter_next_range(vmi);
- vma_complete(&vp, vmi, mm);
- khugepaged_enter_vma(vma, flags);
- goto out;
+ if (vma_merge_new_range(&vmg))
+ goto out;
+ else if (vmg_nomem(&vmg))
+ goto unacct_fail;
}
if (vma)
@@ -3433,7 +1885,7 @@ void exit_mmap(struct mm_struct *mm)
do {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
- remove_vma(vma, true);
+ remove_vma(vma, /* unreachable = */ true, /* closed = */ false);
count++;
cond_resched();
vma = vma_next(&vmi);
@@ -3491,92 +1943,6 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
}
/*
- * Copy the vma structure to a new location in the same mm,
- * prior to moving page table entries, to effect an mremap move.
- */
-struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
- unsigned long addr, unsigned long len, pgoff_t pgoff,
- bool *need_rmap_locks)
-{
- struct vm_area_struct *vma = *vmap;
- unsigned long vma_start = vma->vm_start;
- struct mm_struct *mm = vma->vm_mm;
- struct vm_area_struct *new_vma, *prev;
- bool faulted_in_anon_vma = true;
- VMA_ITERATOR(vmi, mm, addr);
-
- /*
- * If anonymous vma has not yet been faulted, update new pgoff
- * to match new location, to increase its chance of merging.
- */
- if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
- pgoff = addr >> PAGE_SHIFT;
- faulted_in_anon_vma = false;
- }
-
- new_vma = find_vma_prev(mm, addr, &prev);
- if (new_vma && new_vma->vm_start < addr + len)
- return NULL; /* should never get here */
-
- new_vma = vma_merge_new_vma(&vmi, prev, vma, addr, addr + len, pgoff);
- if (new_vma) {
- /*
- * Source vma may have been merged into new_vma
- */
- if (unlikely(vma_start >= new_vma->vm_start &&
- vma_start < new_vma->vm_end)) {
- /*
- * The only way we can get a vma_merge with
- * self during an mremap is if the vma hasn't
- * been faulted in yet and we were allowed to
- * reset the dst vma->vm_pgoff to the
- * destination address of the mremap to allow
- * the merge to happen. mremap must change the
- * vm_pgoff linearity between src and dst vmas
- * (in turn preventing a vma_merge) to be
- * safe. It is only safe to keep the vm_pgoff
- * linear if there are no pages mapped yet.
- */
- VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
- *vmap = vma = new_vma;
- }
- *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
- } else {
- new_vma = vm_area_dup(vma);
- if (!new_vma)
- goto out;
- vma_set_range(new_vma, addr, addr + len, pgoff);
- if (vma_dup_policy(vma, new_vma))
- goto out_free_vma;
- if (anon_vma_clone(new_vma, vma))
- goto out_free_mempol;
- if (new_vma->vm_file)
- get_file(new_vma->vm_file);
- if (new_vma->vm_ops && new_vma->vm_ops->open)
- new_vma->vm_ops->open(new_vma);
- if (vma_link(mm, new_vma))
- goto out_vma_link;
- *need_rmap_locks = false;
- }
- return new_vma;
-
-out_vma_link:
- if (new_vma->vm_ops && new_vma->vm_ops->close)
- new_vma->vm_ops->close(new_vma);
-
- if (new_vma->vm_file)
- fput(new_vma->vm_file);
-
- unlink_anon_vmas(new_vma);
-out_free_mempol:
- mpol_put(vma_policy(new_vma));
-out_free_vma:
- vm_area_free(new_vma);
-out:
- return NULL;
-}
-
-/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
*/
@@ -3620,10 +1986,16 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
/*
+ * Close hook, called for unmap() and on the old vma for mremap().
+ *
* Having a close hook prevents vma merging regardless of flags.
*/
static void special_mapping_close(struct vm_area_struct *vma)
{
+ const struct vm_special_mapping *sm = vma->vm_private_data;
+
+ if (sm->close)
+ sm->close(sm, vma);
}
static const char *special_mapping_name(struct vm_area_struct *vma)
@@ -3665,27 +2037,17 @@ static const struct vm_operations_struct special_mapping_vmops = {
.may_split = special_mapping_split,
};
-static const struct vm_operations_struct legacy_special_mapping_vmops = {
- .close = special_mapping_close,
- .fault = special_mapping_fault,
-};
-
static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
pgoff_t pgoff;
struct page **pages;
+ struct vm_special_mapping *sm = vma->vm_private_data;
- if (vma->vm_ops == &legacy_special_mapping_vmops) {
- pages = vma->vm_private_data;
- } else {
- struct vm_special_mapping *sm = vma->vm_private_data;
-
- if (sm->fault)
- return sm->fault(sm, vmf->vma, vmf);
+ if (sm->fault)
+ return sm->fault(sm, vmf->vma, vmf);
- pages = sm->pages;
- }
+ pages = sm->pages;
for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
pgoff--;
@@ -3740,8 +2102,7 @@ bool vma_is_special_mapping(const struct vm_area_struct *vma,
const struct vm_special_mapping *sm)
{
return vma->vm_private_data == sm &&
- (vma->vm_ops == &special_mapping_vmops ||
- vma->vm_ops == &legacy_special_mapping_vmops);
+ vma->vm_ops == &special_mapping_vmops;
}
/*
@@ -3762,214 +2123,6 @@ struct vm_area_struct *_install_special_mapping(
&special_mapping_vmops);
}
-int install_special_mapping(struct mm_struct *mm,
- unsigned long addr, unsigned long len,
- unsigned long vm_flags, struct page **pages)
-{
- struct vm_area_struct *vma = __install_special_mapping(
- mm, addr, len, vm_flags, (void *)pages,
- &legacy_special_mapping_vmops);
-
- return PTR_ERR_OR_ZERO(vma);
-}
-
-static DEFINE_MUTEX(mm_all_locks_mutex);
-
-static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
-{
- if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
- /*
- * The LSB of head.next can't change from under us
- * because we hold the mm_all_locks_mutex.
- */
- down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
- /*
- * We can safely modify head.next after taking the
- * anon_vma->root->rwsem. If some other vma in this mm shares
- * the same anon_vma we won't take it again.
- *
- * No need of atomic instructions here, head.next
- * can't change from under us thanks to the
- * anon_vma->root->rwsem.
- */
- if (__test_and_set_bit(0, (unsigned long *)
- &anon_vma->root->rb_root.rb_root.rb_node))
- BUG();
- }
-}
-
-static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
-{
- if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
- /*
- * AS_MM_ALL_LOCKS can't change from under us because
- * we hold the mm_all_locks_mutex.
- *
- * Operations on ->flags have to be atomic because
- * even if AS_MM_ALL_LOCKS is stable thanks to the
- * mm_all_locks_mutex, there may be other cpus
- * changing other bitflags in parallel to us.
- */
- if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
- BUG();
- down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
- }
-}
-
-/*
- * This operation locks against the VM for all pte/vma/mm related
- * operations that could ever happen on a certain mm. This includes
- * vmtruncate, try_to_unmap, and all page faults.
- *
- * The caller must take the mmap_lock in write mode before calling
- * mm_take_all_locks(). The caller isn't allowed to release the
- * mmap_lock until mm_drop_all_locks() returns.
- *
- * mmap_lock in write mode is required in order to block all operations
- * that could modify pagetables and free pages without need of
- * altering the vma layout. It's also needed in write mode to avoid new
- * anon_vmas to be associated with existing vmas.
- *
- * A single task can't take more than one mm_take_all_locks() in a row
- * or it would deadlock.
- *
- * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
- * mapping->flags avoid to take the same lock twice, if more than one
- * vma in this mm is backed by the same anon_vma or address_space.
- *
- * We take locks in following order, accordingly to comment at beginning
- * of mm/rmap.c:
- * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
- * hugetlb mapping);
- * - all vmas marked locked
- * - all i_mmap_rwsem locks;
- * - all anon_vma->rwseml
- *
- * We can take all locks within these types randomly because the VM code
- * doesn't nest them and we protected from parallel mm_take_all_locks() by
- * mm_all_locks_mutex.
- *
- * mm_take_all_locks() and mm_drop_all_locks are expensive operations
- * that may have to take thousand of locks.
- *
- * mm_take_all_locks() can fail if it's interrupted by signals.
- */
-int mm_take_all_locks(struct mm_struct *mm)
-{
- struct vm_area_struct *vma;
- struct anon_vma_chain *avc;
- VMA_ITERATOR(vmi, mm, 0);
-
- mmap_assert_write_locked(mm);
-
- mutex_lock(&mm_all_locks_mutex);
-
- /*
- * vma_start_write() does not have a complement in mm_drop_all_locks()
- * because vma_start_write() is always asymmetrical; it marks a VMA as
- * being written to until mmap_write_unlock() or mmap_write_downgrade()
- * is reached.
- */
- for_each_vma(vmi, vma) {
- if (signal_pending(current))
- goto out_unlock;
- vma_start_write(vma);
- }
-
- vma_iter_init(&vmi, mm, 0);
- for_each_vma(vmi, vma) {
- if (signal_pending(current))
- goto out_unlock;
- if (vma->vm_file && vma->vm_file->f_mapping &&
- is_vm_hugetlb_page(vma))
- vm_lock_mapping(mm, vma->vm_file->f_mapping);
- }
-
- vma_iter_init(&vmi, mm, 0);
- for_each_vma(vmi, vma) {
- if (signal_pending(current))
- goto out_unlock;
- if (vma->vm_file && vma->vm_file->f_mapping &&
- !is_vm_hugetlb_page(vma))
- vm_lock_mapping(mm, vma->vm_file->f_mapping);
- }
-
- vma_iter_init(&vmi, mm, 0);
- for_each_vma(vmi, vma) {
- if (signal_pending(current))
- goto out_unlock;
- if (vma->anon_vma)
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- vm_lock_anon_vma(mm, avc->anon_vma);
- }
-
- return 0;
-
-out_unlock:
- mm_drop_all_locks(mm);
- return -EINTR;
-}
-
-static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
-{
- if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
- /*
- * The LSB of head.next can't change to 0 from under
- * us because we hold the mm_all_locks_mutex.
- *
- * We must however clear the bitflag before unlocking
- * the vma so the users using the anon_vma->rb_root will
- * never see our bitflag.
- *
- * No need of atomic instructions here, head.next
- * can't change from under us until we release the
- * anon_vma->root->rwsem.
- */
- if (!__test_and_clear_bit(0, (unsigned long *)
- &anon_vma->root->rb_root.rb_root.rb_node))
- BUG();
- anon_vma_unlock_write(anon_vma);
- }
-}
-
-static void vm_unlock_mapping(struct address_space *mapping)
-{
- if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
- /*
- * AS_MM_ALL_LOCKS can't change to 0 from under us
- * because we hold the mm_all_locks_mutex.
- */
- i_mmap_unlock_write(mapping);
- if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
- &mapping->flags))
- BUG();
- }
-}
-
-/*
- * The mmap_lock cannot be released by the caller until
- * mm_drop_all_locks() returns.
- */
-void mm_drop_all_locks(struct mm_struct *mm)
-{
- struct vm_area_struct *vma;
- struct anon_vma_chain *avc;
- VMA_ITERATOR(vmi, mm, 0);
-
- mmap_assert_write_locked(mm);
- BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
-
- for_each_vma(vmi, vma) {
- if (vma->anon_vma)
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- vm_unlock_anon_vma(avc->anon_vma);
- if (vma->vm_file && vma->vm_file->f_mapping)
- vm_unlock_mapping(vma->vm_file->f_mapping);
- }
-
- mutex_unlock(&mm_all_locks_mutex);
-}
-
/*
* initialise the percpu counter for VM
*/
@@ -4088,3 +2241,86 @@ static int __meminit init_reserve_notifier(void)
return 0;
}
subsys_initcall(init_reserve_notifier);
+
+/*
+ * Relocate a VMA downwards by shift bytes. There cannot be any VMAs between
+ * this VMA and its relocated range, which will now reside at [vma->vm_start -
+ * shift, vma->vm_end - shift).
+ *
+ * This function is almost certainly NOT what you want for anything other than
+ * early executable temporary stack relocation.
+ */
+int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
+{
+ /*
+ * The process proceeds as follows:
+ *
+ * 1) Use shift to calculate the new vma endpoints.
+ * 2) Extend vma to cover both the old and new ranges. This ensures the
+ * arguments passed to subsequent functions are consistent.
+ * 3) Move vma's page tables to the new range.
+ * 4) Free up any cleared pgd range.
+ * 5) Shrink the vma to cover only the new range.
+ */
+
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long old_start = vma->vm_start;
+ unsigned long old_end = vma->vm_end;
+ unsigned long length = old_end - old_start;
+ unsigned long new_start = old_start - shift;
+ unsigned long new_end = old_end - shift;
+ VMA_ITERATOR(vmi, mm, new_start);
+ VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff);
+ struct vm_area_struct *next;
+ struct mmu_gather tlb;
+
+ BUG_ON(new_start > new_end);
+
+ /*
+ * ensure there are no vmas between where we want to go
+ * and where we are
+ */
+ if (vma != vma_next(&vmi))
+ return -EFAULT;
+
+ vma_iter_prev_range(&vmi);
+ /*
+ * cover the whole range: [new_start, old_end)
+ */
+ vmg.vma = vma;
+ if (vma_expand(&vmg))
+ return -ENOMEM;
+
+ /*
+ * move the page tables downwards, on failure we rely on
+ * process cleanup to remove whatever mess we made.
+ */
+ if (length != move_page_tables(vma, old_start,
+ vma, new_start, length, false, true))
+ return -ENOMEM;
+
+ lru_add_drain();
+ tlb_gather_mmu(&tlb, mm);
+ next = vma_next(&vmi);
+ if (new_end > old_start) {
+ /*
+ * when the old and new regions overlap clear from new_end.
+ */
+ free_pgd_range(&tlb, new_end, old_end, new_end,
+ next ? next->vm_start : USER_PGTABLES_CEILING);
+ } else {
+ /*
+ * otherwise, clean from old_start; this is done to not touch
+ * the address space in [new_end, old_start) some architectures
+ * have constraints on va-space that make this illegal (IA64) -
+ * for the others its just a little faster.
+ */
+ free_pgd_range(&tlb, old_start, old_end, new_end,
+ next ? next->vm_start : USER_PGTABLES_CEILING);
+ }
+ tlb_finish_mmu(&tlb);
+
+ vma_prev(&vmi);
+ /* Shrink the vma to just the new range */
+ return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);
+}
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 8982e6139d07..fc18fe274505 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -19,6 +19,8 @@
#include <linux/sched/mm.h>
#include <linux/slab.h>
+#include "vma.h"
+
/* global SRCU for all MMs */
DEFINE_STATIC_SRCU(srcu);
diff --git a/mm/mmzone.c b/mm/mmzone.c
index c01896eca736..f9baa8882fbf 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -66,7 +66,7 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z,
z++;
else
while (zonelist_zone_idx(z) > highest_zoneidx ||
- (z->zone && !zref_in_nodemask(z, nodes)))
+ (zonelist_zone(z) && !zref_in_nodemask(z, nodes)))
z++;
return z;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 222ab434da54..0c5d6d06107d 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -161,8 +161,7 @@ static long change_pte_range(struct mmu_gather *tlb,
if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
toptier)
continue;
- if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
- !toptier)
+ if (folio_use_access_time(folio))
folio_xchg_access_time(folio,
jiffies_to_msecs(jiffies));
}
@@ -303,8 +302,9 @@ pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags)
{
/*
* pte markers only resides in pte level, if we need pte markers,
- * we need to split. We cannot wr-protect shmem thp because file
- * thp is handled differently when split by erasing the pmd so far.
+ * we need to split. For example, we cannot wr-protect a file thp
+ * (e.g. 2M shmem) because file thp is handled differently when
+ * split by erasing the pmd so far.
*/
return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma);
}
@@ -364,9 +364,6 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
unsigned long next;
long pages = 0;
unsigned long nr_huge_updates = 0;
- struct mmu_notifier_range range;
-
- range.start = 0;
pmd = pmd_offset(pud, addr);
do {
@@ -384,14 +381,6 @@ again:
if (pmd_none(*pmd))
goto next;
- /* invoke the mmu notifier if the pmd is populated */
- if (!range.start) {
- mmu_notifier_range_init(&range,
- MMU_NOTIFY_PROTECTION_VMA, 0,
- vma->vm_mm, addr, end);
- mmu_notifier_invalidate_range_start(&range);
- }
-
_pmd = pmdp_get_lockless(pmd);
if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) {
if ((next - addr != HPAGE_PMD_SIZE) ||
@@ -432,9 +421,6 @@ next:
cond_resched();
} while (pmd++, addr = next, addr != end);
- if (range.start)
- mmu_notifier_invalidate_range_end(&range);
-
if (nr_huge_updates)
count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
return pages;
@@ -444,21 +430,57 @@ static inline long change_pud_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
- pud_t *pud;
+ struct mmu_notifier_range range;
+ pud_t *pudp, pud;
unsigned long next;
long pages = 0, ret;
- pud = pud_offset(p4d, addr);
+ range.start = 0;
+
+ pudp = pud_offset(p4d, addr);
do {
+again:
next = pud_addr_end(addr, end);
- ret = change_prepare(vma, pud, pmd, addr, cp_flags);
- if (ret)
- return ret;
- if (pud_none_or_clear_bad(pud))
+ ret = change_prepare(vma, pudp, pmd, addr, cp_flags);
+ if (ret) {
+ pages = ret;
+ break;
+ }
+
+ pud = READ_ONCE(*pudp);
+ if (pud_none(pud))
continue;
- pages += change_pmd_range(tlb, vma, pud, addr, next, newprot,
+
+ if (!range.start) {
+ mmu_notifier_range_init(&range,
+ MMU_NOTIFY_PROTECTION_VMA, 0,
+ vma->vm_mm, addr, end);
+ mmu_notifier_invalidate_range_start(&range);
+ }
+
+ if (pud_leaf(pud)) {
+ if ((next - addr != PUD_SIZE) ||
+ pgtable_split_needed(vma, cp_flags)) {
+ __split_huge_pud(vma, pudp, addr);
+ goto again;
+ } else {
+ ret = change_huge_pud(tlb, vma, pudp,
+ addr, newprot, cp_flags);
+ if (ret == 0)
+ goto again;
+ /* huge pud was handled */
+ if (ret == HPAGE_PUD_NR)
+ pages += HPAGE_PUD_NR;
+ continue;
+ }
+ }
+
+ pages += change_pmd_range(tlb, vma, pudp, addr, next, newprot,
cp_flags);
- } while (pud++, addr = next, addr != end);
+ } while (pudp++, addr = next, addr != end);
+
+ if (range.start)
+ mmu_notifier_invalidate_range_end(&range);
return pages;
}
@@ -589,6 +611,9 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
unsigned long charged = 0;
int error;
+ if (!can_modify_vma(vma))
+ return -EPERM;
+
if (newflags == oldflags) {
*pprev = vma;
return 0;
@@ -747,15 +772,6 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
}
}
- /*
- * checking if memory is sealed.
- * can_modify_mm assumes we have acquired the lock on MM.
- */
- if (unlikely(!can_modify_mm(current->mm, start, end))) {
- error = -EPERM;
- goto out;
- }
-
prev = vma_prev(&vmi);
if (start > vma->vm_start)
prev = vma;
diff --git a/mm/mremap.c b/mm/mremap.c
index e7ae140fc640..24712f8dbb6b 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -902,19 +902,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
return -ENOMEM;
- /*
- * In mremap_to().
- * Move a VMA to another location, check if src addr is sealed.
- *
- * Place can_modify_mm here because mremap_to()
- * does its own checking for address range, and we only
- * check the sealing after passing those checks.
- *
- * can_modify_mm assumes we have acquired the lock on MM.
- */
- if (unlikely(!can_modify_mm(mm, addr, addr + old_len)))
- return -EPERM;
-
if (flags & MREMAP_FIXED) {
/*
* In mremap_to().
@@ -1052,6 +1039,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
goto out;
}
+ /* Don't allow remapping vmas when they have already been sealed */
+ if (!can_modify_vma(vma)) {
+ ret = -EPERM;
+ goto out;
+ }
+
if (is_vm_hugetlb_page(vma)) {
struct hstate *h __maybe_unused = hstate_vma(vma);
@@ -1080,19 +1073,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
}
/*
- * Below is shrink/expand case (not mremap_to())
- * Check if src address is sealed, if so, reject.
- * In other words, prevent shrinking or expanding a sealed VMA.
- *
- * Place can_modify_mm here so we can keep the logic related to
- * shrink/expand together.
- */
- if (unlikely(!can_modify_mm(mm, addr, addr + old_len))) {
- ret = -EPERM;
- goto out;
- }
-
- /*
* Always allow a shrinking remap: that just unmaps
* the unnecessary pages..
* do_vmi_munmap does all the needed commit accounting, and
diff --git a/mm/mseal.c b/mm/mseal.c
index 15bba28acc00..ece977bd21e1 100644
--- a/mm/mseal.c
+++ b/mm/mseal.c
@@ -16,28 +16,11 @@
#include <linux/sched.h>
#include "internal.h"
-static inline bool vma_is_sealed(struct vm_area_struct *vma)
-{
- return (vma->vm_flags & VM_SEALED);
-}
-
static inline void set_vma_sealed(struct vm_area_struct *vma)
{
vm_flags_set(vma, VM_SEALED);
}
-/*
- * check if a vma is sealed for modification.
- * return true, if modification is allowed.
- */
-static bool can_modify_vma(struct vm_area_struct *vma)
-{
- if (unlikely(vma_is_sealed(vma)))
- return false;
-
- return true;
-}
-
static bool is_madv_discard(int behavior)
{
switch (behavior) {
@@ -71,45 +54,15 @@ static bool is_ro_anon(struct vm_area_struct *vma)
}
/*
- * Check if the vmas of a memory range are allowed to be modified.
- * the memory ranger can have a gap (unallocated memory).
- * return true, if it is allowed.
- */
-bool can_modify_mm(struct mm_struct *mm, unsigned long start, unsigned long end)
-{
- struct vm_area_struct *vma;
-
- VMA_ITERATOR(vmi, mm, start);
-
- /* going through each vma to check. */
- for_each_vma_range(vmi, vma, end) {
- if (unlikely(!can_modify_vma(vma)))
- return false;
- }
-
- /* Allow by default. */
- return true;
-}
-
-/*
- * Check if the vmas of a memory range are allowed to be modified by madvise.
- * the memory ranger can have a gap (unallocated memory).
- * return true, if it is allowed.
+ * Check if a vma is allowed to be modified by madvise.
*/
-bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start, unsigned long end,
- int behavior)
+bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
{
- struct vm_area_struct *vma;
-
- VMA_ITERATOR(vmi, mm, start);
-
if (!is_madv_discard(behavior))
return true;
- /* going through each vma to check. */
- for_each_vma_range(vmi, vma, end)
- if (unlikely(is_ro_anon(vma) && !can_modify_vma(vma)))
- return false;
+ if (unlikely(!can_modify_vma(vma) && is_ro_anon(vma)))
+ return false;
/* Allow by default. */
return true;
@@ -256,7 +209,7 @@ static int apply_mm_seal(unsigned long start, unsigned long end)
*
* unseal() is not supported.
*/
-static int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
+int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
{
size_t len;
int ret = 0;
diff --git a/mm/nommu.c b/mm/nommu.c
index 7296e775e04e..385b0c15add8 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -126,6 +126,11 @@ void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
}
EXPORT_SYMBOL(__vmalloc_noprof);
+void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+{
+ return krealloc_noprof(p, size, (flags | __GFP_COMP) & ~__GFP_HIGHMEM);
+}
+
void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
@@ -1573,12 +1578,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
return ret;
}
-struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
- unsigned int foll_flags)
-{
- return NULL;
-}
-
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
diff --git a/mm/numa.c b/mm/numa.c
new file mode 100644
index 000000000000..e2eec07707d1
--- /dev/null
+++ b/mm/numa.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/memblock.h>
+#include <linux/printk.h>
+#include <linux/numa.h>
+#include <linux/numa_memblks.h>
+
+struct pglist_data *node_data[MAX_NUMNODES];
+EXPORT_SYMBOL(node_data);
+
+/* Allocate NODE_DATA for a node on the local memory */
+void __init alloc_node_data(int nid)
+{
+ const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
+ u64 nd_pa;
+ void *nd;
+ int tnid;
+
+ /* Allocate node data. Try node-local memory and then any node. */
+ nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ if (!nd_pa)
+ panic("Cannot allocate %zu bytes for node %d data\n",
+ nd_size, nid);
+ nd = __va(nd_pa);
+
+ /* report and initialize */
+ pr_info("NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
+ nd_pa, nd_pa + nd_size - 1);
+ tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
+ if (tnid != nid)
+ pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
+
+ node_data[nid] = nd;
+ memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
+}
+
+void __init alloc_offline_node_data(int nid)
+{
+ pg_data_t *pgdat;
+
+ pgdat = memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES);
+ if (!pgdat)
+ panic("Cannot allocate %zuB for node %d.\n",
+ sizeof(*pgdat), nid);
+
+ node_data[nid] = pgdat;
+}
+
+/* Stub functions: */
+
+#ifndef memory_add_physaddr_to_nid
+int memory_add_physaddr_to_nid(u64 start)
+{
+ pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
+ start);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+#endif
+
+#ifndef phys_to_target_node
+int phys_to_target_node(u64 start)
+{
+ pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
+ start);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phys_to_target_node);
+#endif
diff --git a/arch/x86/mm/numa_emulation.c b/mm/numa_emulation.c
index 9a9305367fdd..031fb9961bf7 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/mm/numa_emulation.c
@@ -6,9 +6,11 @@
#include <linux/errno.h>
#include <linux/topology.h>
#include <linux/memblock.h>
-#include <asm/dma.h>
+#include <linux/numa_memblks.h>
+#include <asm/numa.h>
-#include "numa_internal.h"
+#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
+#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
static int emu_nid_to_phys[MAX_NUMNODES];
static char *emu_cmdline __initdata;
@@ -125,7 +127,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
*/
while (!nodes_empty(physnode_mask)) {
for_each_node_mask(i, physnode_mask) {
- u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
+ u64 dma32_end = numa_emu_dma_end();
u64 start, limit, end;
int phys_blk;
@@ -272,7 +274,7 @@ static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
*/
while (!nodes_empty(physnode_mask)) {
for_each_node_mask(i, physnode_mask) {
- u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
+ u64 dma32_end = numa_emu_dma_end();
u64 start, limit, end;
int phys_blk;
@@ -445,15 +447,11 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
/* copy the physical distance table */
if (numa_dist_cnt) {
- u64 phys;
-
- phys = memblock_phys_alloc_range(phys_size, PAGE_SIZE, 0,
- PFN_PHYS(max_pfn_mapped));
- if (!phys) {
+ phys_dist = memblock_alloc(phys_size, PAGE_SIZE);
+ if (!phys_dist) {
pr_warn("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
goto no_emu;
}
- phys_dist = __va(phys);
for (i = 0; i < numa_dist_cnt; i++)
for (j = 0; j < numa_dist_cnt; j++)
@@ -477,19 +475,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
ei.blk[i].nid != NUMA_NO_NODE)
node_set(ei.blk[i].nid, numa_nodes_parsed);
- /*
- * Transform __apicid_to_node table to use emulated nids by
- * reverse-mapping phys_nid. The maps should always exist but fall
- * back to zero just in case.
- */
- for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) {
- if (__apicid_to_node[i] == NUMA_NO_NODE)
- continue;
- for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
- if (__apicid_to_node[i] == emu_nid_to_phys[j])
- break;
- __apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0;
- }
+ numa_emu_update_cpu_to_node(emu_nid_to_phys, ARRAY_SIZE(emu_nid_to_phys));
/* make sure all emulated nodes are mapped to a physical node */
for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
@@ -527,7 +513,7 @@ no_emu:
}
#ifndef CONFIG_DEBUG_PER_CPU_MAPS
-void numa_add_cpu(int cpu)
+void numa_add_cpu(unsigned int cpu)
{
int physnid, nid;
@@ -545,7 +531,7 @@ void numa_add_cpu(int cpu)
cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
}
-void numa_remove_cpu(int cpu)
+void numa_remove_cpu(unsigned int cpu)
{
int i;
@@ -553,7 +539,7 @@ void numa_remove_cpu(int cpu)
cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
}
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
-static void numa_set_cpumask(int cpu, bool enable)
+static void numa_set_cpumask(unsigned int cpu, bool enable)
{
int nid, physnid;
@@ -573,12 +559,12 @@ static void numa_set_cpumask(int cpu, bool enable)
}
}
-void numa_add_cpu(int cpu)
+void numa_add_cpu(unsigned int cpu)
{
numa_set_cpumask(cpu, true);
}
-void numa_remove_cpu(int cpu)
+void numa_remove_cpu(unsigned int cpu)
{
numa_set_cpumask(cpu, false);
}
diff --git a/mm/numa_memblks.c b/mm/numa_memblks.c
new file mode 100644
index 000000000000..be52b93a9c58
--- /dev/null
+++ b/mm/numa_memblks.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/array_size.h>
+#include <linux/sort.h>
+#include <linux/printk.h>
+#include <linux/memblock.h>
+#include <linux/numa.h>
+#include <linux/numa_memblks.h>
+
+static int numa_distance_cnt;
+static u8 *numa_distance;
+
+nodemask_t numa_nodes_parsed __initdata;
+
+static struct numa_meminfo numa_meminfo __initdata_or_meminfo;
+static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo;
+
+/*
+ * Set nodes, which have memory in @mi, in *@nodemask.
+ */
+static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
+ const struct numa_meminfo *mi)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
+ if (mi->blk[i].start != mi->blk[i].end &&
+ mi->blk[i].nid != NUMA_NO_NODE)
+ node_set(mi->blk[i].nid, *nodemask);
+}
+
+/**
+ * numa_reset_distance - Reset NUMA distance table
+ *
+ * The current table is freed. The next numa_set_distance() call will
+ * create a new one.
+ */
+void __init numa_reset_distance(void)
+{
+ size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
+
+ /* numa_distance could be 1LU marking allocation failure, test cnt */
+ if (numa_distance_cnt)
+ memblock_free(numa_distance, size);
+ numa_distance_cnt = 0;
+ numa_distance = NULL; /* enable table creation */
+}
+
+static int __init numa_alloc_distance(void)
+{
+ nodemask_t nodes_parsed;
+ size_t size;
+ int i, j, cnt = 0;
+
+ /* size the new table and allocate it */
+ nodes_parsed = numa_nodes_parsed;
+ numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
+
+ for_each_node_mask(i, nodes_parsed)
+ cnt = i;
+ cnt++;
+ size = cnt * cnt * sizeof(numa_distance[0]);
+
+ numa_distance = memblock_alloc(size, PAGE_SIZE);
+ if (!numa_distance) {
+ pr_warn("Warning: can't allocate distance table!\n");
+ /* don't retry until explicitly reset */
+ numa_distance = (void *)1LU;
+ return -ENOMEM;
+ }
+
+ numa_distance_cnt = cnt;
+
+ /* fill with the default distances */
+ for (i = 0; i < cnt; i++)
+ for (j = 0; j < cnt; j++)
+ numa_distance[i * cnt + j] = i == j ?
+ LOCAL_DISTANCE : REMOTE_DISTANCE;
+ printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
+
+ return 0;
+}
+
+/**
+ * numa_set_distance - Set NUMA distance from one NUMA to another
+ * @from: the 'from' node to set distance
+ * @to: the 'to' node to set distance
+ * @distance: NUMA distance
+ *
+ * Set the distance from node @from to @to to @distance. If distance table
+ * doesn't exist, one which is large enough to accommodate all the currently
+ * known nodes will be created.
+ *
+ * If such table cannot be allocated, a warning is printed and further
+ * calls are ignored until the distance table is reset with
+ * numa_reset_distance().
+ *
+ * If @from or @to is higher than the highest known node or lower than zero
+ * at the time of table creation or @distance doesn't make sense, the call
+ * is ignored.
+ * This is to allow simplification of specific NUMA config implementations.
+ */
+void __init numa_set_distance(int from, int to, int distance)
+{
+ if (!numa_distance && numa_alloc_distance() < 0)
+ return;
+
+ if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
+ from < 0 || to < 0) {
+ pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
+ from, to, distance);
+ return;
+ }
+
+ if ((u8)distance != distance ||
+ (from == to && distance != LOCAL_DISTANCE)) {
+ pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
+ from, to, distance);
+ return;
+ }
+
+ numa_distance[from * numa_distance_cnt + to] = distance;
+}
+
+int __node_distance(int from, int to)
+{
+ if (from >= numa_distance_cnt || to >= numa_distance_cnt)
+ return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
+ return numa_distance[from * numa_distance_cnt + to];
+}
+EXPORT_SYMBOL(__node_distance);
+
+static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
+ struct numa_meminfo *mi)
+{
+ /* ignore zero length blks */
+ if (start == end)
+ return 0;
+
+ /* whine about and ignore invalid blks */
+ if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
+ pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
+ nid, start, end - 1);
+ return 0;
+ }
+
+ if (mi->nr_blks >= NR_NODE_MEMBLKS) {
+ pr_err("too many memblk ranges\n");
+ return -EINVAL;
+ }
+
+ mi->blk[mi->nr_blks].start = start;
+ mi->blk[mi->nr_blks].end = end;
+ mi->blk[mi->nr_blks].nid = nid;
+ mi->nr_blks++;
+ return 0;
+}
+
+/**
+ * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
+ * @idx: Index of memblk to remove
+ * @mi: numa_meminfo to remove memblk from
+ *
+ * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
+ * decrementing @mi->nr_blks.
+ */
+void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
+{
+ mi->nr_blks--;
+ memmove(&mi->blk[idx], &mi->blk[idx + 1],
+ (mi->nr_blks - idx) * sizeof(mi->blk[0]));
+}
+
+/**
+ * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another
+ * @dst: numa_meminfo to append block to
+ * @idx: Index of memblk to remove
+ * @src: numa_meminfo to remove memblk from
+ */
+static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx,
+ struct numa_meminfo *src)
+{
+ dst->blk[dst->nr_blks++] = src->blk[idx];
+ numa_remove_memblk_from(idx, src);
+}
+
+/**
+ * numa_add_memblk - Add one numa_memblk to numa_meminfo
+ * @nid: NUMA node ID of the new memblk
+ * @start: Start address of the new memblk
+ * @end: End address of the new memblk
+ *
+ * Add a new memblk to the default numa_meminfo.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init numa_add_memblk(int nid, u64 start, u64 end)
+{
+ return numa_add_memblk_to(nid, start, end, &numa_meminfo);
+}
+
+/**
+ * numa_cleanup_meminfo - Cleanup a numa_meminfo
+ * @mi: numa_meminfo to clean up
+ *
+ * Sanitize @mi by merging and removing unnecessary memblks. Also check for
+ * conflicts and clear unused memblks.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
+{
+ const u64 low = memblock_start_of_DRAM();
+ const u64 high = memblock_end_of_DRAM();
+ int i, j, k;
+
+ /* first, trim all entries */
+ for (i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *bi = &mi->blk[i];
+
+ /* move / save reserved memory ranges */
+ if (!memblock_overlaps_region(&memblock.memory,
+ bi->start, bi->end - bi->start)) {
+ numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi);
+ continue;
+ }
+
+ /* make sure all non-reserved blocks are inside the limits */
+ bi->start = max(bi->start, low);
+
+ /* preserve info for non-RAM areas above 'max_pfn': */
+ if (bi->end > high) {
+ numa_add_memblk_to(bi->nid, high, bi->end,
+ &numa_reserved_meminfo);
+ bi->end = high;
+ }
+
+ /* and there's no empty block */
+ if (bi->start >= bi->end)
+ numa_remove_memblk_from(i--, mi);
+ }
+
+ /* merge neighboring / overlapping entries */
+ for (i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *bi = &mi->blk[i];
+
+ for (j = i + 1; j < mi->nr_blks; j++) {
+ struct numa_memblk *bj = &mi->blk[j];
+ u64 start, end;
+
+ /*
+ * See whether there are overlapping blocks. Whine
+ * about but allow overlaps of the same nid. They
+ * will be merged below.
+ */
+ if (bi->end > bj->start && bi->start < bj->end) {
+ if (bi->nid != bj->nid) {
+ pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
+ bi->nid, bi->start, bi->end - 1,
+ bj->nid, bj->start, bj->end - 1);
+ return -EINVAL;
+ }
+ pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
+ bi->nid, bi->start, bi->end - 1,
+ bj->start, bj->end - 1);
+ }
+
+ /*
+ * Join together blocks on the same node, holes
+ * between which don't overlap with memory on other
+ * nodes.
+ */
+ if (bi->nid != bj->nid)
+ continue;
+ start = min(bi->start, bj->start);
+ end = max(bi->end, bj->end);
+ for (k = 0; k < mi->nr_blks; k++) {
+ struct numa_memblk *bk = &mi->blk[k];
+
+ if (bi->nid == bk->nid)
+ continue;
+ if (start < bk->end && end > bk->start)
+ break;
+ }
+ if (k < mi->nr_blks)
+ continue;
+ pr_info("NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
+ bi->nid, bi->start, bi->end - 1, bj->start,
+ bj->end - 1, start, end - 1);
+ bi->start = start;
+ bi->end = end;
+ numa_remove_memblk_from(j--, mi);
+ }
+ }
+
+ /* clear unused ones */
+ for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
+ mi->blk[i].start = mi->blk[i].end = 0;
+ mi->blk[i].nid = NUMA_NO_NODE;
+ }
+
+ return 0;
+}
+
+/*
+ * Mark all currently memblock-reserved physical memory (which covers the
+ * kernel's own memory ranges) as hot-unswappable.
+ */
+static void __init numa_clear_kernel_node_hotplug(void)
+{
+ nodemask_t reserved_nodemask = NODE_MASK_NONE;
+ struct memblock_region *mb_region;
+ int i;
+
+ /*
+ * We have to do some preprocessing of memblock regions, to
+ * make them suitable for reservation.
+ *
+ * At this time, all memory regions reserved by memblock are
+ * used by the kernel, but those regions are not split up
+ * along node boundaries yet, and don't necessarily have their
+ * node ID set yet either.
+ *
+ * So iterate over all parsed memory blocks and use those ranges to
+ * set the nid in memblock.reserved. This will split up the
+ * memblock regions along node boundaries and will set the node IDs
+ * as well.
+ */
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ struct numa_memblk *mb = numa_meminfo.blk + i;
+ int ret;
+
+ ret = memblock_set_node(mb->start, mb->end - mb->start,
+ &memblock.reserved, mb->nid);
+ WARN_ON_ONCE(ret);
+ }
+
+ /*
+ * Now go over all reserved memblock regions, to construct a
+ * node mask of all kernel reserved memory areas.
+ *
+ * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
+ * numa_meminfo might not include all memblock.reserved
+ * memory ranges, because quirks such as trim_snb_memory()
+ * reserve specific pages for Sandy Bridge graphics. ]
+ */
+ for_each_reserved_mem_region(mb_region) {
+ int nid = memblock_get_region_node(mb_region);
+
+ if (nid != MAX_NUMNODES)
+ node_set(nid, reserved_nodemask);
+ }
+
+ /*
+ * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
+ * belonging to the reserved node mask.
+ *
+ * Note that this will include memory regions that reside
+ * on nodes that contain kernel memory - entire nodes
+ * become hot-unpluggable:
+ */
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ struct numa_memblk *mb = numa_meminfo.blk + i;
+
+ if (!node_isset(mb->nid, reserved_nodemask))
+ continue;
+
+ memblock_clear_hotplug(mb->start, mb->end - mb->start);
+ }
+}
+
+static int __init numa_register_meminfo(struct numa_meminfo *mi)
+{
+ int i;
+
+ /* Account for nodes with cpus and no memory */
+ node_possible_map = numa_nodes_parsed;
+ numa_nodemask_from_meminfo(&node_possible_map, mi);
+ if (WARN_ON(nodes_empty(node_possible_map)))
+ return -EINVAL;
+
+ for (i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *mb = &mi->blk[i];
+
+ memblock_set_node(mb->start, mb->end - mb->start,
+ &memblock.memory, mb->nid);
+ }
+
+ /*
+ * At very early time, the kernel have to use some memory such as
+ * loading the kernel image. We cannot prevent this anyway. So any
+ * node the kernel resides in should be un-hotpluggable.
+ *
+ * And when we come here, alloc node data won't fail.
+ */
+ numa_clear_kernel_node_hotplug();
+
+ /*
+ * If sections array is gonna be used for pfn -> nid mapping, check
+ * whether its granularity is fine enough.
+ */
+ if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) {
+ unsigned long pfn_align = node_map_pfn_alignment();
+
+ if (pfn_align && pfn_align < PAGES_PER_SECTION) {
+ unsigned long node_align_mb = PFN_PHYS(pfn_align) >> 20;
+
+ unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) >> 20;
+
+ pr_warn("Node alignment %luMB < min %luMB, rejecting NUMA config\n",
+ node_align_mb, sect_align_mb);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int __init numa_memblks_init(int (*init_func)(void),
+ bool memblock_force_top_down)
+{
+ phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
+ int ret;
+
+ nodes_clear(numa_nodes_parsed);
+ nodes_clear(node_possible_map);
+ nodes_clear(node_online_map);
+ memset(&numa_meminfo, 0, sizeof(numa_meminfo));
+ WARN_ON(memblock_set_node(0, max_addr, &memblock.memory, NUMA_NO_NODE));
+ WARN_ON(memblock_set_node(0, max_addr, &memblock.reserved,
+ NUMA_NO_NODE));
+ /* In case that parsing SRAT failed. */
+ WARN_ON(memblock_clear_hotplug(0, max_addr));
+ numa_reset_distance();
+
+ ret = init_func();
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We reset memblock back to the top-down direction
+ * here because if we configured ACPI_NUMA, we have
+ * parsed SRAT in init_func(). It is ok to have the
+ * reset here even if we did't configure ACPI_NUMA
+ * or acpi numa init fails and fallbacks to dummy
+ * numa init.
+ */
+ if (memblock_force_top_down)
+ memblock_set_bottom_up(false);
+
+ ret = numa_cleanup_meminfo(&numa_meminfo);
+ if (ret < 0)
+ return ret;
+
+ numa_emulation(&numa_meminfo, numa_distance_cnt);
+
+ return numa_register_meminfo(&numa_meminfo);
+}
+
+static int __init cmp_memblk(const void *a, const void *b)
+{
+ const struct numa_memblk *ma = *(const struct numa_memblk **)a;
+ const struct numa_memblk *mb = *(const struct numa_memblk **)b;
+
+ return (ma->start > mb->start) - (ma->start < mb->start);
+}
+
+static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
+
+/**
+ * numa_fill_memblks - Fill gaps in numa_meminfo memblks
+ * @start: address to begin fill
+ * @end: address to end fill
+ *
+ * Find and extend numa_meminfo memblks to cover the physical
+ * address range @start-@end
+ *
+ * RETURNS:
+ * 0 : Success
+ * NUMA_NO_MEMBLK : No memblks exist in address range @start-@end
+ */
+
+int __init numa_fill_memblks(u64 start, u64 end)
+{
+ struct numa_memblk **blk = &numa_memblk_list[0];
+ struct numa_meminfo *mi = &numa_meminfo;
+ int count = 0;
+ u64 prev_end;
+
+ /*
+ * Create a list of pointers to numa_meminfo memblks that
+ * overlap start, end. The list is used to make in-place
+ * changes that fill out the numa_meminfo memblks.
+ */
+ for (int i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *bi = &mi->blk[i];
+
+ if (memblock_addrs_overlap(start, end - start, bi->start,
+ bi->end - bi->start)) {
+ blk[count] = &mi->blk[i];
+ count++;
+ }
+ }
+ if (!count)
+ return NUMA_NO_MEMBLK;
+
+ /* Sort the list of pointers in memblk->start order */
+ sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL);
+
+ /* Make sure the first/last memblks include start/end */
+ blk[0]->start = min(blk[0]->start, start);
+ blk[count - 1]->end = max(blk[count - 1]->end, end);
+
+ /*
+ * Fill any gaps by tracking the previous memblks
+ * end address and backfilling to it if needed.
+ */
+ prev_end = blk[0]->end;
+ for (int i = 1; i < count; i++) {
+ struct numa_memblk *curr = blk[i];
+
+ if (prev_end >= curr->start) {
+ if (prev_end < curr->end)
+ prev_end = curr->end;
+ } else {
+ curr->start = prev_end;
+ prev_end = curr->end;
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_NUMA_KEEP_MEMINFO
+static int meminfo_to_nid(struct numa_meminfo *mi, u64 start)
+{
+ int i;
+
+ for (i = 0; i < mi->nr_blks; i++)
+ if (mi->blk[i].start <= start && mi->blk[i].end > start)
+ return mi->blk[i].nid;
+ return NUMA_NO_NODE;
+}
+
+int phys_to_target_node(u64 start)
+{
+ int nid = meminfo_to_nid(&numa_meminfo, start);
+
+ /*
+ * Prefer online nodes, but if reserved memory might be
+ * hot-added continue the search with reserved ranges.
+ */
+ if (nid != NUMA_NO_NODE)
+ return nid;
+
+ return meminfo_to_nid(&numa_reserved_meminfo, start);
+}
+EXPORT_SYMBOL_GPL(phys_to_target_node);
+
+int memory_add_physaddr_to_nid(u64 start)
+{
+ int nid = meminfo_to_nid(&numa_meminfo, start);
+
+ if (nid == NUMA_NO_NODE)
+ nid = numa_meminfo.blk[0].nid;
+ return nid;
+}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+
+#endif /* CONFIG_NUMA_KEEP_MEMINFO */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 4430ac68e4c4..fcd4c1439cb9 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -418,7 +418,7 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
tsk = current;
- if (rt_task(tsk)) {
+ if (rt_or_dl_task(tsk)) {
bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
}
@@ -477,7 +477,7 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
else
dirty = vm_dirty_ratio * node_memory / 100;
- if (rt_task(tsk))
+ if (rt_or_dl_task(tsk))
dirty += dirty / 4;
/*
@@ -2612,7 +2612,7 @@ struct folio *writeback_iter(struct address_space *mapping,
done:
if (wbc->range_cyclic)
- mapping->writeback_index = folio->index + folio_nr_pages(folio);
+ mapping->writeback_index = folio_next_index(folio);
folio_batch_release(&wbc->fbatch);
return NULL;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 91ace8ca97e2..8afab64814dc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -286,9 +286,7 @@ EXPORT_SYMBOL(nr_online_nodes);
#endif
static bool page_contains_unaccepted(struct page *page, unsigned int order);
-static void accept_page(struct page *page, unsigned int order);
static bool cond_accept_memory(struct zone *zone, unsigned int order);
-static inline bool has_unaccepted_memory(void);
static bool __free_unaccepted(struct page *page);
int page_group_by_mobility_disabled __read_mostly;
@@ -322,6 +320,11 @@ static inline bool deferred_pages_enabled(void)
{
return false;
}
+
+static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order)
+{
+ return false;
+}
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
/* Return a pointer to the bitmap storing bits affecting a block of pages */
@@ -958,8 +961,9 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
break;
case 2:
/* the second tail page: deferred_list overlaps ->mapping */
- if (unlikely(!list_empty(&folio->_deferred_list))) {
- bad_page(page, "on deferred list");
+ if (unlikely(!list_empty(&folio->_deferred_list) &&
+ folio_test_partially_mapped(folio))) {
+ bad_page(page, "partially mapped folio on deferred list");
goto out;
}
break;
@@ -1087,8 +1091,11 @@ __always_inline bool free_pages_prepare(struct page *page,
(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
}
}
- if (PageMappingFlags(page))
+ if (PageMappingFlags(page)) {
+ if (PageAnon(page))
+ mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
page->mapping = NULL;
+ }
if (is_check_pages_enabled()) {
if (free_page_is_bad(page))
bad++;
@@ -1199,17 +1206,39 @@ static void free_pcppages_bulk(struct zone *zone, int count,
spin_unlock_irqrestore(&zone->lock, flags);
}
+/* Split a multi-block free page into its individual pageblocks. */
+static void split_large_buddy(struct zone *zone, struct page *page,
+ unsigned long pfn, int order, fpi_t fpi)
+{
+ unsigned long end = pfn + (1 << order);
+
+ VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order));
+ /* Caller removed page from freelist, buddy info cleared! */
+ VM_WARN_ON_ONCE(PageBuddy(page));
+
+ if (order > pageblock_order)
+ order = pageblock_order;
+
+ while (pfn != end) {
+ int mt = get_pfnblock_migratetype(page, pfn);
+
+ __free_one_page(page, pfn, zone, order, mt, fpi);
+ pfn += 1 << order;
+ page = pfn_to_page(pfn);
+ }
+}
+
static void free_one_page(struct zone *zone, struct page *page,
unsigned long pfn, unsigned int order,
fpi_t fpi_flags)
{
unsigned long flags;
- int migratetype;
spin_lock_irqsave(&zone->lock, flags);
- migratetype = get_pfnblock_migratetype(page, pfn);
- __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
+ split_large_buddy(zone, page, pfn, order, fpi_flags);
spin_unlock_irqrestore(&zone->lock, flags);
+
+ __count_vm_events(PGFREE, 1 << order);
}
static void __free_pages_ok(struct page *page, unsigned int order,
@@ -1218,12 +1247,8 @@ static void __free_pages_ok(struct page *page, unsigned int order,
unsigned long pfn = page_to_pfn(page);
struct zone *zone = page_zone(page);
- if (!free_pages_prepare(page, order))
- return;
-
- free_one_page(zone, page, pfn, order, fpi_flags);
-
- __count_vm_events(PGFREE, 1 << order);
+ if (free_pages_prepare(page, order))
+ free_one_page(zone, page, pfn, order, fpi_flags);
}
void __meminit __free_pages_core(struct page *page, unsigned int order,
@@ -1270,7 +1295,7 @@ void __meminit __free_pages_core(struct page *page, unsigned int order,
if (order == MAX_PAGE_ORDER && __free_unaccepted(page))
return;
- accept_page(page, order);
+ accept_memory(page_to_phys(page), PAGE_SIZE << order);
}
/*
@@ -1346,11 +1371,11 @@ struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
*
* -- nyc
*/
-static inline void expand(struct zone *zone, struct page *page,
- int low, int high, int migratetype)
+static inline unsigned int expand(struct zone *zone, struct page *page, int low,
+ int high, int migratetype)
{
- unsigned long size = 1 << high;
- unsigned long nr_added = 0;
+ unsigned int size = 1 << high;
+ unsigned int nr_added = 0;
while (high > low) {
high--;
@@ -1370,7 +1395,19 @@ static inline void expand(struct zone *zone, struct page *page,
set_buddy_order(&page[size], high);
nr_added += size;
}
- account_freepages(zone, nr_added, migratetype);
+
+ return nr_added;
+}
+
+static __always_inline void page_del_and_expand(struct zone *zone,
+ struct page *page, int low,
+ int high, int migratetype)
+{
+ int nr_pages = 1 << high;
+
+ __del_page_from_free_list(page, zone, high, migratetype);
+ nr_pages -= expand(zone, page, low, high, migratetype);
+ account_freepages(zone, -nr_pages, migratetype);
}
static void check_new_page_bad(struct page *page)
@@ -1540,8 +1577,9 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
page = get_page_from_free_area(area, migratetype);
if (!page)
continue;
- del_page_from_free_list(page, zone, current_order, migratetype);
- expand(zone, page, order, current_order, migratetype);
+
+ page_del_and_expand(zone, page, order, current_order,
+ migratetype);
trace_mm_page_alloc_zone_locked(page, order, migratetype,
pcp_allowed_order(order) &&
migratetype < MIGRATE_PCPTYPES);
@@ -1700,27 +1738,6 @@ static unsigned long find_large_buddy(unsigned long start_pfn)
return start_pfn;
}
-/* Split a multi-block free page into its individual pageblocks */
-static void split_large_buddy(struct zone *zone, struct page *page,
- unsigned long pfn, int order)
-{
- unsigned long end_pfn = pfn + (1 << order);
-
- VM_WARN_ON_ONCE(order <= pageblock_order);
- VM_WARN_ON_ONCE(pfn & (pageblock_nr_pages - 1));
-
- /* Caller removed page from freelist, buddy info cleared! */
- VM_WARN_ON_ONCE(PageBuddy(page));
-
- while (pfn != end_pfn) {
- int mt = get_pfnblock_migratetype(page, pfn);
-
- __free_one_page(page, pfn, zone, pageblock_order, mt, FPI_NONE);
- pfn += pageblock_nr_pages;
- page = pfn_to_page(pfn);
- }
-}
-
/**
* move_freepages_block_isolate - move free pages in block for page isolation
* @zone: the zone
@@ -1761,7 +1778,7 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
del_page_from_free_list(buddy, zone, order,
get_pfnblock_migratetype(buddy, pfn));
set_pageblock_migratetype(page, migratetype);
- split_large_buddy(zone, buddy, pfn, order);
+ split_large_buddy(zone, buddy, pfn, order, FPI_NONE);
return true;
}
@@ -1772,7 +1789,7 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
del_page_from_free_list(page, zone, order,
get_pfnblock_migratetype(page, pfn));
set_pageblock_migratetype(page, migratetype);
- split_large_buddy(zone, page, pfn, order);
+ split_large_buddy(zone, page, pfn, order, FPI_NONE);
return true;
}
move:
@@ -1892,9 +1909,12 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
/* Take ownership for orders >= pageblock_order */
if (current_order >= pageblock_order) {
+ unsigned int nr_added;
+
del_page_from_free_list(page, zone, current_order, block_type);
change_pageblock_range(page, current_order, start_type);
- expand(zone, page, order, current_order, start_type);
+ nr_added = expand(zone, page, order, current_order, start_type);
+ account_freepages(zone, nr_added, start_type);
return page;
}
@@ -1947,8 +1967,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
}
single_page:
- del_page_from_free_list(page, zone, current_order, block_type);
- expand(zone, page, order, current_order, block_type);
+ page_del_and_expand(zone, page, order, current_order, block_type);
return page;
}
@@ -2764,7 +2783,7 @@ void split_page(struct page *page, unsigned int order)
for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
split_page_owner(page, order, 0);
- pgalloc_tag_split(page, 1 << order);
+ pgalloc_tag_split(page_folio(page), order, 0);
split_page_memcg(page, order, 0);
}
EXPORT_SYMBOL_GPL(split_page);
@@ -3033,12 +3052,6 @@ struct page *rmqueue(struct zone *preferred_zone,
{
struct page *page;
- /*
- * We most definitely don't want callers attempting to
- * allocate greater than order-1 page units with __GFP_NOFAIL.
- */
- WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
-
if (likely(pcp_allowed_order(order))) {
page = rmqueue_pcplist(preferred_zone, zone, order,
migratetype, alloc_flags);
@@ -3357,7 +3370,7 @@ retry:
}
if (no_fallback && nr_online_nodes > 1 &&
- zone != ac->preferred_zoneref->zone) {
+ zone != zonelist_zone(ac->preferred_zoneref)) {
int local_nid;
/*
@@ -3365,7 +3378,7 @@ retry:
* fragmenting fallbacks. Locality is more important
* than fragmentation avoidance.
*/
- local_nid = zone_to_nid(ac->preferred_zoneref->zone);
+ local_nid = zonelist_node_idx(ac->preferred_zoneref);
if (zone_to_nid(zone) != local_nid) {
alloc_flags &= ~ALLOC_NOFRAGMENT;
goto retry;
@@ -3402,7 +3415,6 @@ check_alloc_wmark:
if (cond_accept_memory(zone, order))
goto try_this_zone;
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
* Watermark failed for this zone, but see if we can
* grow this zone if it contains deferred pages.
@@ -3411,14 +3423,13 @@ check_alloc_wmark:
if (_deferred_grow_zone(zone, order))
goto try_this_zone;
}
-#endif
/* Checked here to keep the fast path fast */
BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
if (alloc_flags & ALLOC_NO_WATERMARKS)
goto try_this_zone;
if (!node_reclaim_enabled() ||
- !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
+ !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
continue;
ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
@@ -3440,7 +3451,7 @@ check_alloc_wmark:
}
try_this_zone:
- page = rmqueue(ac->preferred_zoneref->zone, zone, order,
+ page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
gfp_mask, alloc_flags, ac->migratetype);
if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
@@ -3457,13 +3468,11 @@ try_this_zone:
if (cond_accept_memory(zone, order))
goto try_this_zone;
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/* Try again if zone has deferred pages */
if (deferred_pages_enabled()) {
if (_deferred_grow_zone(zone, order))
goto try_this_zone;
}
-#endif
}
}
@@ -4004,7 +4013,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
*/
if (alloc_flags & ALLOC_MIN_RESERVE)
alloc_flags &= ~ALLOC_CPUSET;
- } else if (unlikely(rt_task(current)) && in_task())
+ } else if (unlikely(rt_or_dl_task(current)) && in_task())
alloc_flags |= ALLOC_MIN_RESERVE;
alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
@@ -4100,6 +4109,11 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
unsigned long min_wmark = min_wmark_pages(zone);
bool wmark;
+ if (cpusets_enabled() &&
+ (alloc_flags & ALLOC_CPUSET) &&
+ !__cpuset_zone_allowed(zone, gfp_mask))
+ continue;
+
available = reclaimable = zone_reclaimable_pages(zone);
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
@@ -4175,6 +4189,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
{
bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
bool can_compact = gfp_compaction_allowed(gfp_mask);
+ bool nofail = gfp_mask & __GFP_NOFAIL;
const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
struct page *page = NULL;
unsigned int alloc_flags;
@@ -4187,6 +4202,25 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned int zonelist_iter_cookie;
int reserve_flags;
+ if (unlikely(nofail)) {
+ /*
+ * We most definitely don't want callers attempting to
+ * allocate greater than order-1 page units with __GFP_NOFAIL.
+ */
+ WARN_ON_ONCE(order > 1);
+ /*
+ * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM,
+ * otherwise, we may result in lockup.
+ */
+ WARN_ON_ONCE(!can_direct_reclaim);
+ /*
+ * PF_MEMALLOC request from this context is rather bizarre
+ * because we cannot reclaim anything and only can loop waiting
+ * for somebody to do a work for us.
+ */
+ WARN_ON_ONCE(current->flags & PF_MEMALLOC);
+ }
+
restart:
compaction_retries = 0;
no_progress_loops = 0;
@@ -4209,7 +4243,7 @@ restart:
*/
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
ac->highest_zoneidx, ac->nodemask);
- if (!ac->preferred_zoneref->zone)
+ if (!zonelist_zone(ac->preferred_zoneref))
goto nopage;
/*
@@ -4221,7 +4255,7 @@ restart:
struct zoneref *z = first_zones_zonelist(ac->zonelist,
ac->highest_zoneidx,
&cpuset_current_mems_allowed);
- if (!z->zone)
+ if (!zonelist_zone(z))
goto nopage;
}
@@ -4404,30 +4438,16 @@ nopage:
* Make sure that __GFP_NOFAIL request doesn't leak out and make sure
* we always retry
*/
- if (gfp_mask & __GFP_NOFAIL) {
+ if (unlikely(nofail)) {
/*
- * All existing users of the __GFP_NOFAIL are blockable, so warn
- * of any new users that actually require GFP_NOWAIT
+ * Lacking direct_reclaim we can't do anything to reclaim memory,
+ * we disregard these unreasonable nofail requests and still
+ * return NULL
*/
- if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
+ if (!can_direct_reclaim)
goto fail;
/*
- * PF_MEMALLOC request from this context is rather bizarre
- * because we cannot reclaim anything and only can loop waiting
- * for somebody to do a work for us
- */
- WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
-
- /*
- * non failing costly orders are a hard requirement which we
- * are not prepared for much so let's warn about these users
- * so that we can identify them and convert them to something
- * else.
- */
- WARN_ON_ONCE_GFP(costly_order, gfp_mask);
-
- /*
* Help non-failing allocations by giving some access to memory
* reserves normally used for high priority non-blocking
* allocations but do not use ALLOC_NO_WATERMARKS because this
@@ -4578,17 +4598,28 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
continue;
}
- if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
- zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
+ if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
+ zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
goto failed;
}
+ cond_accept_memory(zone, 0);
+retry_this_zone:
mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
if (zone_watermark_fast(zone, 0, mark,
zonelist_zone_idx(ac.preferred_zoneref),
alloc_flags, gfp)) {
break;
}
+
+ if (cond_accept_memory(zone, 0))
+ goto retry_this_zone;
+
+ /* Try again if zone has deferred pages */
+ if (deferred_pages_enabled()) {
+ if (_deferred_grow_zone(zone, 0))
+ goto retry_this_zone;
+ }
}
/*
@@ -4638,7 +4669,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
pcp_trylock_finish(UP_flags);
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
- zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
+ zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
out:
return nr_populated;
@@ -4696,7 +4727,7 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
* Forbid the first pass from falling back to types that fragment
* memory until all local zones are considered.
*/
- alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
+ alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp);
/* First allocation attempt */
page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
@@ -4950,7 +4981,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
struct page *last = page + nr;
split_page_owner(page, order, 0);
- pgalloc_tag_split(page, 1 << order);
+ pgalloc_tag_split(page_folio(page), order, 0);
split_page_memcg(page, order, 0);
while (page < --last)
set_page_refcounted(last);
@@ -5301,7 +5332,7 @@ int local_memory_node(int node)
z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
gfp_zone(GFP_KERNEL),
NULL);
- return zone_to_nid(z->zone);
+ return zonelist_node_idx(z);
}
#endif
@@ -6433,6 +6464,31 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
return (ret < 0) ? ret : 0;
}
+static void split_free_pages(struct list_head *list)
+{
+ int order;
+
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ struct page *page, *next;
+ int nr_pages = 1 << order;
+
+ list_for_each_entry_safe(page, next, &list[order], lru) {
+ int i;
+
+ post_alloc_hook(page, order, __GFP_MOVABLE);
+ if (!order)
+ continue;
+
+ split_page(page, order);
+
+ /* Add all subpages to the order-0 head, in sequence. */
+ list_del(&page->lru);
+ for (i = 0; i < nr_pages; i++)
+ list_add_tail(&page[i].lru, &list[0]);
+ }
+ }
+}
+
/**
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate
@@ -6545,12 +6601,25 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
goto done;
}
- /* Free head and tail (if any) */
- if (start != outer_start)
- free_contig_range(outer_start, start - outer_start);
- if (end != outer_end)
- free_contig_range(end, outer_end - end);
+ if (!(gfp_mask & __GFP_COMP)) {
+ split_free_pages(cc.freepages);
+ /* Free head and tail (if any) */
+ if (start != outer_start)
+ free_contig_range(outer_start, start - outer_start);
+ if (end != outer_end)
+ free_contig_range(end, outer_end - end);
+ } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) {
+ struct page *head = pfn_to_page(start);
+ int order = ilog2(end - start);
+
+ check_new_pages(head, order);
+ prep_new_page(head, order, gfp_mask, 0);
+ } else {
+ ret = -EINVAL;
+ WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n",
+ start, end, outer_start, outer_end);
+ }
done:
undo_isolate_page_range(start, end, migratetype);
return ret;
@@ -6659,6 +6728,18 @@ struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
void free_contig_range(unsigned long pfn, unsigned long nr_pages)
{
unsigned long count = 0;
+ struct folio *folio = pfn_folio(pfn);
+
+ if (folio_test_large(folio)) {
+ int expected = folio_nr_pages(folio);
+
+ if (nr_pages == expected)
+ folio_put(folio);
+ else
+ WARN(true, "PFN %lu: nr_pages %lu != expected %d\n",
+ pfn, nr_pages, expected);
+ return;
+ }
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
@@ -6927,23 +7008,50 @@ early_param("accept_memory", accept_memory_parse);
static bool page_contains_unaccepted(struct page *page, unsigned int order)
{
phys_addr_t start = page_to_phys(page);
- phys_addr_t end = start + (PAGE_SIZE << order);
- return range_contains_unaccepted_memory(start, end);
+ return range_contains_unaccepted_memory(start, PAGE_SIZE << order);
}
-static void accept_page(struct page *page, unsigned int order)
+static void __accept_page(struct zone *zone, unsigned long *flags,
+ struct page *page)
{
- phys_addr_t start = page_to_phys(page);
+ bool last;
+
+ list_del(&page->lru);
+ last = list_empty(&zone->unaccepted_pages);
+
+ account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
+ __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
+ __ClearPageUnaccepted(page);
+ spin_unlock_irqrestore(&zone->lock, *flags);
+
+ accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
+
+ __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
- accept_memory(start, start + (PAGE_SIZE << order));
+ if (last)
+ static_branch_dec(&zones_with_unaccepted_pages);
+}
+
+void accept_page(struct page *page)
+{
+ struct zone *zone = page_zone(page);
+ unsigned long flags;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ if (!PageUnaccepted(page)) {
+ spin_unlock_irqrestore(&zone->lock, flags);
+ return;
+ }
+
+ /* Unlocks zone->lock */
+ __accept_page(zone, &flags, page);
}
static bool try_to_accept_memory_one(struct zone *zone)
{
unsigned long flags;
struct page *page;
- bool last;
spin_lock_irqsave(&zone->lock, flags);
page = list_first_entry_or_null(&zone->unaccepted_pages,
@@ -6953,23 +7061,17 @@ static bool try_to_accept_memory_one(struct zone *zone)
return false;
}
- list_del(&page->lru);
- last = list_empty(&zone->unaccepted_pages);
-
- account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
- __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
- spin_unlock_irqrestore(&zone->lock, flags);
-
- accept_page(page, MAX_PAGE_ORDER);
-
- __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
-
- if (last)
- static_branch_dec(&zones_with_unaccepted_pages);
+ /* Unlocks zone->lock */
+ __accept_page(zone, &flags, page);
return true;
}
+static inline bool has_unaccepted_memory(void)
+{
+ return static_branch_unlikely(&zones_with_unaccepted_pages);
+}
+
static bool cond_accept_memory(struct zone *zone, unsigned int order)
{
long to_accept;
@@ -6981,8 +7083,8 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order)
if (list_empty(&zone->unaccepted_pages))
return false;
- /* How much to accept to get to high watermark? */
- to_accept = high_wmark_pages(zone) -
+ /* How much to accept to get to promo watermark? */
+ to_accept = promo_wmark_pages(zone) -
(zone_page_state(zone, NR_FREE_PAGES) -
__zone_watermark_unusable_free(zone, order, 0) -
zone_page_state(zone, NR_UNACCEPTED));
@@ -6997,11 +7099,6 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order)
return ret;
}
-static inline bool has_unaccepted_memory(void)
-{
- return static_branch_unlikely(&zones_with_unaccepted_pages);
-}
-
static bool __free_unaccepted(struct page *page)
{
struct zone *zone = page_zone(page);
@@ -7016,6 +7113,7 @@ static bool __free_unaccepted(struct page *page)
list_add_tail(&page->lru, &zone->unaccepted_pages);
account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
__mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
+ __SetPageUnaccepted(page);
spin_unlock_irqrestore(&zone->lock, flags);
if (first)
@@ -7031,20 +7129,11 @@ static bool page_contains_unaccepted(struct page *page, unsigned int order)
return false;
}
-static void accept_page(struct page *page, unsigned int order)
-{
-}
-
static bool cond_accept_memory(struct zone *zone, unsigned int order)
{
return false;
}
-static inline bool has_unaccepted_memory(void)
-{
- return false;
-}
-
static bool __free_unaccepted(struct page *page)
{
BUILD_BUG();
diff --git a/mm/page_counter.c b/mm/page_counter.c
index 0153f5bb3161..b249d15af9dd 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -13,6 +13,11 @@
#include <linux/bug.h>
#include <asm/page.h>
+static bool track_protection(struct page_counter *c)
+{
+ return c->protection_support;
+}
+
static void propagate_protected_usage(struct page_counter *c,
unsigned long usage)
{
@@ -57,7 +62,8 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
new = 0;
atomic_long_set(&counter->usage, new);
}
- propagate_protected_usage(counter, new);
+ if (track_protection(counter))
+ propagate_protected_usage(counter, new);
}
/**
@@ -70,18 +76,33 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
{
struct page_counter *c;
+ bool protection = track_protection(counter);
for (c = counter; c; c = c->parent) {
long new;
new = atomic_long_add_return(nr_pages, &c->usage);
- propagate_protected_usage(c, new);
+ if (protection)
+ propagate_protected_usage(c, new);
/*
* This is indeed racy, but we can live with some
* inaccuracy in the watermark.
+ *
+ * Notably, we have two watermarks to allow for both a globally
+ * visible peak and one that can be reset at a smaller scope.
+ *
+ * Since we reset both watermarks when the global reset occurs,
+ * we can guarantee that watermark >= local_watermark, so we
+ * don't need to do both comparisons every time.
+ *
+ * On systems with branch predictors, the inner condition should
+ * be almost free.
*/
- if (new > READ_ONCE(c->watermark))
- WRITE_ONCE(c->watermark, new);
+ if (new > READ_ONCE(c->local_watermark)) {
+ WRITE_ONCE(c->local_watermark, new);
+ if (new > READ_ONCE(c->watermark))
+ WRITE_ONCE(c->watermark, new);
+ }
}
}
@@ -99,6 +120,7 @@ bool page_counter_try_charge(struct page_counter *counter,
struct page_counter **fail)
{
struct page_counter *c;
+ bool protection = track_protection(counter);
for (c = counter; c; c = c->parent) {
long new;
@@ -128,13 +150,15 @@ bool page_counter_try_charge(struct page_counter *counter,
*fail = c;
goto failed;
}
- propagate_protected_usage(c, new);
- /*
- * Just like with failcnt, we can live with some
- * inaccuracy in the watermark.
- */
- if (new > READ_ONCE(c->watermark))
- WRITE_ONCE(c->watermark, new);
+ if (protection)
+ propagate_protected_usage(c, new);
+
+ /* see comment on page_counter_charge */
+ if (new > READ_ONCE(c->local_watermark)) {
+ WRITE_ONCE(c->local_watermark, new);
+ if (new > READ_ONCE(c->watermark))
+ WRITE_ONCE(c->watermark, new);
+ }
}
return true;
@@ -264,6 +288,7 @@ int page_counter_memparse(const char *buf, const char *max,
}
+#ifdef CONFIG_MEMCG
/*
* This function calculates an individual page counter's effective
* protection which is derived from its own memory.min/low, its
@@ -435,3 +460,4 @@ void page_counter_calculate_protection(struct page_counter *root,
atomic_long_read(&parent->children_low_usage),
recursive_protection));
}
+#endif /* CONFIG_MEMCG */
diff --git a/mm/page_io.c b/mm/page_io.c
index ff8c99ee3af7..78bc88acee79 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -172,6 +172,60 @@ bad_bmap:
goto out;
}
+static bool is_folio_zero_filled(struct folio *folio)
+{
+ unsigned int pos, last_pos;
+ unsigned long *data;
+ unsigned int i;
+
+ last_pos = PAGE_SIZE / sizeof(*data) - 1;
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ data = kmap_local_folio(folio, i * PAGE_SIZE);
+ /*
+ * Check last word first, incase the page is zero-filled at
+ * the start and has non-zero data at the end, which is common
+ * in real-world workloads.
+ */
+ if (data[last_pos]) {
+ kunmap_local(data);
+ return false;
+ }
+ for (pos = 0; pos < last_pos; pos++) {
+ if (data[pos]) {
+ kunmap_local(data);
+ return false;
+ }
+ }
+ kunmap_local(data);
+ }
+
+ return true;
+}
+
+static void swap_zeromap_folio_set(struct folio *folio)
+{
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
+ swp_entry_t entry;
+ unsigned int i;
+
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ entry = page_swap_entry(folio_page(folio, i));
+ set_bit(swp_offset(entry), sis->zeromap);
+ }
+}
+
+static void swap_zeromap_folio_clear(struct folio *folio)
+{
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
+ swp_entry_t entry;
+ unsigned int i;
+
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ entry = page_swap_entry(folio_page(folio, i));
+ clear_bit(swp_offset(entry), sis->zeromap);
+ }
+}
+
/*
* We may have stale swap cache pages in memory: notice
* them here and get rid of the unnecessary final write.
@@ -195,6 +249,25 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
folio_unlock(folio);
return ret;
}
+
+ /*
+ * Use a bitmap (zeromap) to avoid doing IO for zero-filled pages.
+ * The bits in zeromap are protected by the locked swapcache folio
+ * and atomic updates are used to protect against read-modify-write
+ * corruption due to other zero swap entries seeing concurrent updates.
+ */
+ if (is_folio_zero_filled(folio)) {
+ swap_zeromap_folio_set(folio);
+ folio_unlock(folio);
+ return 0;
+ } else {
+ /*
+ * Clear bits this folio occupies in the zeromap to prevent
+ * zero data being read in from any previous zero writes that
+ * occupied the same swap entries.
+ */
+ swap_zeromap_folio_clear(folio);
+ }
if (zswap_store(folio)) {
folio_unlock(folio);
return 0;
@@ -273,9 +346,7 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
* memory for allocating transmit buffers.
* Mark the page dirty and avoid
* folio_rotate_reclaimable but rate-limit the
- * messages but do not flag PageError like
- * the normal direct-to-bio case as it could
- * be temporary.
+ * messages.
*/
pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
ret, swap_dev_pos(page_swap_entry(page)));
@@ -429,6 +500,28 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
mempool_free(sio, sio_pool);
}
+static bool swap_read_folio_zeromap(struct folio *folio)
+{
+ int nr_pages = folio_nr_pages(folio);
+ bool is_zeromap;
+
+ /*
+ * Swapping in a large folio that is partially in the zeromap is not
+ * currently handled. Return true without marking the folio uptodate so
+ * that an IO error is emitted (e.g. do_swap_page() will sigbus).
+ */
+ if (WARN_ON_ONCE(swap_zeromap_batch(folio->swap, nr_pages,
+ &is_zeromap) != nr_pages))
+ return true;
+
+ if (!is_zeromap)
+ return false;
+
+ folio_zero_range(folio, 0, folio_size(folio));
+ folio_mark_uptodate(folio);
+ return true;
+}
+
static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
{
struct swap_info_struct *sis = swp_swap_info(folio->swap);
@@ -519,9 +612,18 @@ void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
}
delayacct_swapin_start();
- if (zswap_load(folio)) {
+ if (swap_read_folio_zeromap(folio)) {
+ folio_unlock(folio);
+ goto finish;
+ } else if (zswap_load(folio)) {
folio_unlock(folio);
- } else if (data_race(sis->flags & SWP_FS_OPS)) {
+ goto finish;
+ }
+
+ /* We have to read from slower devices. Increase zswap protection. */
+ zswap_folio_swapin(folio);
+
+ if (data_race(sis->flags & SWP_FS_OPS)) {
swap_read_folio_fs(folio, plug);
} else if (synchronous) {
swap_read_folio_bdev_sync(folio, sis);
@@ -529,6 +631,7 @@ void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
swap_read_folio_bdev_async(folio, sis);
}
+finish:
if (workingset) {
delayacct_thrashing_end(&in_thrashing);
psi_memstall_leave(&pflags);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 042937d5abe4..7e04047977cf 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -152,6 +152,9 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
unsigned long flags;
unsigned long check_unmovable_start, check_unmovable_end;
+ if (PageUnaccepted(page))
+ accept_page(page);
+
spin_lock_irqsave(&zone->lock, flags);
/*
@@ -367,6 +370,11 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
VM_BUG_ON(!page);
pfn = page_to_pfn(page);
+ if (PageUnaccepted(page)) {
+ pfn += MAX_ORDER_NR_PAGES;
+ continue;
+ }
+
if (PageBuddy(page)) {
int order = buddy_order(page);
@@ -395,30 +403,8 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
unsigned long head_pfn = page_to_pfn(head);
unsigned long nr_pages = compound_nr(head);
- if (head_pfn + nr_pages <= boundary_pfn) {
- pfn = head_pfn + nr_pages;
- continue;
- }
-
-#if defined CONFIG_COMPACTION || defined CONFIG_CMA
- if (PageHuge(page)) {
- int page_mt = get_pageblock_migratetype(page);
- struct compact_control cc = {
- .nr_migratepages = 0,
- .order = -1,
- .zone = page_zone(pfn_to_page(head_pfn)),
- .mode = MIGRATE_SYNC,
- .ignore_skip_hint = true,
- .no_set_skip_hint = true,
- .gfp_mask = gfp_flags,
- .alloc_contig = true,
- };
- INIT_LIST_HEAD(&cc.migratepages);
-
- ret = __alloc_contig_migrate_range(&cc, head_pfn,
- head_pfn + nr_pages, page_mt);
- if (ret)
- goto failed;
+ if (head_pfn + nr_pages <= boundary_pfn ||
+ PageHuge(page)) {
pfn = head_pfn + nr_pages;
continue;
}
@@ -432,7 +418,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
*/
VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
VM_WARN_ON_ONCE_PAGE(__PageMovable(page), page);
-#endif
+
goto failed;
}
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index ae2f08ce991b..461ea3bbd8d9 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -3,6 +3,8 @@
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
/*
* We want to know the real level where a entry is located ignoring any
@@ -654,3 +656,203 @@ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
return err;
}
+
+/**
+ * folio_walk_start - walk the page tables to a folio
+ * @fw: filled with information on success.
+ * @vma: the VMA.
+ * @addr: the virtual address to use for the page table walk.
+ * @flags: flags modifying which folios to walk to.
+ *
+ * Walk the page tables using @addr in a given @vma to a mapped folio and
+ * return the folio, making sure that the page table entry referenced by
+ * @addr cannot change until folio_walk_end() was called.
+ *
+ * As default, this function returns only folios that are not special (e.g., not
+ * the zeropage) and never returns folios that are supposed to be ignored by the
+ * VM as documented by vm_normal_page(). If requested, zeropages will be
+ * returned as well.
+ *
+ * As default, this function only considers present page table entries.
+ * If requested, it will also consider migration entries.
+ *
+ * If this function returns NULL it might either indicate "there is nothing" or
+ * "there is nothing suitable".
+ *
+ * On success, @fw is filled and the function returns the folio while the PTL
+ * is still held and folio_walk_end() must be called to clean up,
+ * releasing any held locks. The returned folio must *not* be used after the
+ * call to folio_walk_end(), unless a short-term folio reference is taken before
+ * that call.
+ *
+ * @fw->page will correspond to the page that is effectively referenced by
+ * @addr. However, for migration entries and shared zeropages @fw->page is
+ * set to NULL. Note that large folios might be mapped by multiple page table
+ * entries, and this function will always only lookup a single entry as
+ * specified by @addr, which might or might not cover more than a single page of
+ * the returned folio.
+ *
+ * This function must *not* be used as a naive replacement for
+ * get_user_pages() / pin_user_pages(), especially not to perform DMA or
+ * to carelessly modify page content. This function may *only* be used to grab
+ * short-term folio references, never to grab long-term folio references.
+ *
+ * Using the page table entry pointers in @fw for reading or modifying the
+ * entry should be avoided where possible: however, there might be valid
+ * use cases.
+ *
+ * WARNING: Modifying page table entries in hugetlb VMAs requires a lot of care.
+ * For example, PMD page table sharing might require prior unsharing. Also,
+ * logical hugetlb entries might span multiple physical page table entries,
+ * which *must* be modified in a single operation (set_huge_pte_at(),
+ * huge_ptep_set_*, ...). Note that the page table entry stored in @fw might
+ * not correspond to the first physical entry of a logical hugetlb entry.
+ *
+ * The mmap lock must be held in read mode.
+ *
+ * Return: folio pointer on success, otherwise NULL.
+ */
+struct folio *folio_walk_start(struct folio_walk *fw,
+ struct vm_area_struct *vma, unsigned long addr,
+ folio_walk_flags_t flags)
+{
+ unsigned long entry_size;
+ bool expose_page = true;
+ struct page *page;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep, pte;
+ spinlock_t *ptl;
+ pgd_t *pgdp;
+ p4d_t *p4dp;
+
+ mmap_assert_locked(vma->vm_mm);
+ vma_pgtable_walk_begin(vma);
+
+ if (WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end))
+ goto not_found;
+
+ pgdp = pgd_offset(vma->vm_mm, addr);
+ if (pgd_none_or_clear_bad(pgdp))
+ goto not_found;
+
+ p4dp = p4d_offset(pgdp, addr);
+ if (p4d_none_or_clear_bad(p4dp))
+ goto not_found;
+
+ pudp = pud_offset(p4dp, addr);
+ pud = pudp_get(pudp);
+ if (pud_none(pud))
+ goto not_found;
+ if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) && pud_leaf(pud)) {
+ ptl = pud_lock(vma->vm_mm, pudp);
+ pud = pudp_get(pudp);
+
+ entry_size = PUD_SIZE;
+ fw->level = FW_LEVEL_PUD;
+ fw->pudp = pudp;
+ fw->pud = pud;
+
+ if (!pud_present(pud) || pud_devmap(pud) || pud_special(pud)) {
+ spin_unlock(ptl);
+ goto not_found;
+ } else if (!pud_leaf(pud)) {
+ spin_unlock(ptl);
+ goto pmd_table;
+ }
+ /*
+ * TODO: vm_normal_page_pud() will be handy once we want to
+ * support PUD mappings in VM_PFNMAP|VM_MIXEDMAP VMAs.
+ */
+ page = pud_page(pud);
+ goto found;
+ }
+
+pmd_table:
+ VM_WARN_ON_ONCE(pud_leaf(*pudp));
+ pmdp = pmd_offset(pudp, addr);
+ pmd = pmdp_get_lockless(pmdp);
+ if (pmd_none(pmd))
+ goto not_found;
+ if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) && pmd_leaf(pmd)) {
+ ptl = pmd_lock(vma->vm_mm, pmdp);
+ pmd = pmdp_get(pmdp);
+
+ entry_size = PMD_SIZE;
+ fw->level = FW_LEVEL_PMD;
+ fw->pmdp = pmdp;
+ fw->pmd = pmd;
+
+ if (pmd_none(pmd)) {
+ spin_unlock(ptl);
+ goto not_found;
+ } else if (!pmd_leaf(pmd)) {
+ spin_unlock(ptl);
+ goto pte_table;
+ } else if (pmd_present(pmd)) {
+ page = vm_normal_page_pmd(vma, addr, pmd);
+ if (page) {
+ goto found;
+ } else if ((flags & FW_ZEROPAGE) &&
+ is_huge_zero_pmd(pmd)) {
+ page = pfn_to_page(pmd_pfn(pmd));
+ expose_page = false;
+ goto found;
+ }
+ } else if ((flags & FW_MIGRATION) &&
+ is_pmd_migration_entry(pmd)) {
+ swp_entry_t entry = pmd_to_swp_entry(pmd);
+
+ page = pfn_swap_entry_to_page(entry);
+ expose_page = false;
+ goto found;
+ }
+ spin_unlock(ptl);
+ goto not_found;
+ }
+
+pte_table:
+ VM_WARN_ON_ONCE(pmd_leaf(pmdp_get_lockless(pmdp)));
+ ptep = pte_offset_map_lock(vma->vm_mm, pmdp, addr, &ptl);
+ if (!ptep)
+ goto not_found;
+ pte = ptep_get(ptep);
+
+ entry_size = PAGE_SIZE;
+ fw->level = FW_LEVEL_PTE;
+ fw->ptep = ptep;
+ fw->pte = pte;
+
+ if (pte_present(pte)) {
+ page = vm_normal_page(vma, addr, pte);
+ if (page)
+ goto found;
+ if ((flags & FW_ZEROPAGE) &&
+ is_zero_pfn(pte_pfn(pte))) {
+ page = pfn_to_page(pte_pfn(pte));
+ expose_page = false;
+ goto found;
+ }
+ } else if (!pte_none(pte)) {
+ swp_entry_t entry = pte_to_swp_entry(pte);
+
+ if ((flags & FW_MIGRATION) &&
+ is_migration_entry(entry)) {
+ page = pfn_swap_entry_to_page(entry);
+ expose_page = false;
+ goto found;
+ }
+ }
+ pte_unmap_unlock(ptep, ptl);
+not_found:
+ vma_pgtable_walk_end(vma);
+ return NULL;
+found:
+ if (expose_page)
+ /* Note: Offset from the mapped page, not the folio start. */
+ fw->page = nth_page(page, (addr & (entry_size - 1)) >> PAGE_SHIFT);
+ else
+ fw->page = NULL;
+ fw->ptl = ptl;
+ return page_folio(page);
+}
diff --git a/mm/percpu.c b/mm/percpu.c
index 20d91af8c033..da21680ff294 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2217,37 +2217,6 @@ static void pcpu_balance_workfn(struct work_struct *work)
}
/**
- * pcpu_alloc_size - the size of the dynamic percpu area
- * @ptr: pointer to the dynamic percpu area
- *
- * Returns the size of the @ptr allocation. This is undefined for statically
- * defined percpu variables as there is no corresponding chunk->bound_map.
- *
- * RETURNS:
- * The size of the dynamic percpu area.
- *
- * CONTEXT:
- * Can be called from atomic context.
- */
-size_t pcpu_alloc_size(void __percpu *ptr)
-{
- struct pcpu_chunk *chunk;
- unsigned long bit_off, end;
- void *addr;
-
- if (!ptr)
- return 0;
-
- addr = __pcpu_ptr_to_addr(ptr);
- /* No pcpu_lock here: ptr has not been freed, so chunk is still alive */
- chunk = pcpu_chunk_addr_search(addr);
- bit_off = (addr - chunk->base_addr) / PCPU_MIN_ALLOC_SIZE;
- end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
- bit_off + 1);
- return (end - bit_off) * PCPU_MIN_ALLOC_SIZE;
-}
-
-/**
* free_percpu - free percpu area
* @ptr: pointer to area to free
*
diff --git a/mm/readahead.c b/mm/readahead.c
index 517c0be7ce66..3dc6c7a128dd 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -206,9 +206,10 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
unsigned long nr_to_read, unsigned long lookahead_size)
{
struct address_space *mapping = ractl->mapping;
- unsigned long index = readahead_index(ractl);
+ unsigned long ra_folio_index, index = readahead_index(ractl);
gfp_t gfp_mask = readahead_gfp_mask(mapping);
- unsigned long i;
+ unsigned long mark, i = 0;
+ unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
/*
* Partway through the readahead operation, we will have added
@@ -223,10 +224,24 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
unsigned int nofs = memalloc_nofs_save();
filemap_invalidate_lock_shared(mapping);
+ index = mapping_align_index(mapping, index);
+
+ /*
+ * As iterator `i` is aligned to min_nrpages, round_up the
+ * difference between nr_to_read and lookahead_size to mark the
+ * index that only has lookahead or "async_region" to set the
+ * readahead flag.
+ */
+ ra_folio_index = round_up(readahead_index(ractl) + nr_to_read - lookahead_size,
+ min_nrpages);
+ mark = ra_folio_index - index;
+ nr_to_read += readahead_index(ractl) - index;
+ ractl->_index = index;
+
/*
* Preallocate as many pages as we will need.
*/
- for (i = 0; i < nr_to_read; i++) {
+ while (i < nr_to_read) {
struct folio *folio = xa_load(&mapping->i_pages, index + i);
int ret;
@@ -240,12 +255,13 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
* not worth getting one just for that.
*/
read_pages(ractl);
- ractl->_index++;
- i = ractl->_index + ractl->_nr_pages - index - 1;
+ ractl->_index += min_nrpages;
+ i = ractl->_index + ractl->_nr_pages - index;
continue;
}
- folio = filemap_alloc_folio(gfp_mask, 0);
+ folio = filemap_alloc_folio(gfp_mask,
+ mapping_min_folio_order(mapping));
if (!folio)
break;
@@ -255,14 +271,15 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
if (ret == -ENOMEM)
break;
read_pages(ractl);
- ractl->_index++;
- i = ractl->_index + ractl->_nr_pages - index - 1;
+ ractl->_index += min_nrpages;
+ i = ractl->_index + ractl->_nr_pages - index;
continue;
}
- if (i == nr_to_read - lookahead_size)
+ if (i == mark)
folio_set_readahead(folio);
ractl->_workingset |= folio_test_workingset(folio);
- ractl->_nr_pages++;
+ ractl->_nr_pages += min_nrpages;
+ i += min_nrpages;
}
/*
@@ -438,26 +455,41 @@ void page_cache_ra_order(struct readahead_control *ractl,
struct address_space *mapping = ractl->mapping;
pgoff_t start = readahead_index(ractl);
pgoff_t index = start;
+ unsigned int min_order = mapping_min_folio_order(mapping);
pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
pgoff_t mark = index + ra->size - ra->async_size;
unsigned int nofs;
int err = 0;
gfp_t gfp = readahead_gfp_mask(mapping);
+ unsigned int min_ra_size = max(4, mapping_min_folio_nrpages(mapping));
- if (!mapping_large_folio_support(mapping) || ra->size < 4)
+ /*
+ * Fallback when size < min_nrpages as each folio should be
+ * at least min_nrpages anyway.
+ */
+ if (!mapping_large_folio_support(mapping) || ra->size < min_ra_size)
goto fallback;
limit = min(limit, index + ra->size - 1);
- if (new_order < MAX_PAGECACHE_ORDER)
+ if (new_order < mapping_max_folio_order(mapping))
new_order += 2;
- new_order = min_t(unsigned int, MAX_PAGECACHE_ORDER, new_order);
+ new_order = min(mapping_max_folio_order(mapping), new_order);
new_order = min_t(unsigned int, new_order, ilog2(ra->size));
+ new_order = max(new_order, min_order);
/* See comment in page_cache_ra_unbounded() */
nofs = memalloc_nofs_save();
filemap_invalidate_lock_shared(mapping);
+ /*
+ * If the new_order is greater than min_order and index is
+ * already aligned to new_order, then this will be noop as index
+ * aligned to new_order should also be aligned to min_order.
+ */
+ ractl->_index = mapping_align_index(mapping, index);
+ index = readahead_index(ractl);
+
while (index <= limit) {
unsigned int order = new_order;
@@ -465,7 +497,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
if (index & ((1UL << order) - 1))
order = __ffs(index);
/* Don't allocate pages past EOF */
- while (index + (1UL << order) - 1 > limit)
+ while (order > min_order && index + (1UL << order) - 1 > limit)
order--;
err = ra_alloc_folio(ractl, index, mark, order, gfp);
if (err)
@@ -646,7 +678,7 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
ret = -EBADF;
f = fdget(fd);
- if (!f.file || !(f.file->f_mode & FMODE_READ))
+ if (!fd_file(f) || !(fd_file(f)->f_mode & FMODE_READ))
goto out;
/*
@@ -655,12 +687,12 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
* on this file, then we must return -EINVAL.
*/
ret = -EINVAL;
- if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
- (!S_ISREG(file_inode(f.file)->i_mode) &&
- !S_ISBLK(file_inode(f.file)->i_mode)))
+ if (!fd_file(f)->f_mapping || !fd_file(f)->f_mapping->a_ops ||
+ (!S_ISREG(file_inode(fd_file(f))->i_mode) &&
+ !S_ISBLK(file_inode(fd_file(f))->i_mode)))
goto out;
- ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
+ ret = vfs_fadvise(fd_file(f), offset, count, POSIX_FADV_WILLNEED);
out:
fdput(f);
return ret;
@@ -703,8 +735,15 @@ void readahead_expand(struct readahead_control *ractl,
struct file_ra_state *ra = ractl->ra;
pgoff_t new_index, new_nr_pages;
gfp_t gfp_mask = readahead_gfp_mask(mapping);
+ unsigned long min_nrpages = mapping_min_folio_nrpages(mapping);
+ unsigned int min_order = mapping_min_folio_order(mapping);
new_index = new_start / PAGE_SIZE;
+ /*
+ * Readahead code should have aligned the ractl->_index to
+ * min_nrpages before calling readahead aops.
+ */
+ VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
/* Expand the leading edge downwards */
while (ractl->_index > new_index) {
@@ -714,9 +753,11 @@ void readahead_expand(struct readahead_control *ractl,
if (folio && !xa_is_value(folio))
return; /* Folio apparently present */
- folio = filemap_alloc_folio(gfp_mask, 0);
+ folio = filemap_alloc_folio(gfp_mask, min_order);
if (!folio)
return;
+
+ index = mapping_align_index(mapping, index);
if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
folio_put(folio);
return;
@@ -726,7 +767,7 @@ void readahead_expand(struct readahead_control *ractl,
ractl->_workingset = true;
psi_memstall_enter(&ractl->_pflags);
}
- ractl->_nr_pages++;
+ ractl->_nr_pages += min_nrpages;
ractl->_index = folio->index;
}
@@ -741,9 +782,11 @@ void readahead_expand(struct readahead_control *ractl,
if (folio && !xa_is_value(folio))
return; /* Folio apparently present */
- folio = filemap_alloc_folio(gfp_mask, 0);
+ folio = filemap_alloc_folio(gfp_mask, min_order);
if (!folio)
return;
+
+ index = mapping_align_index(mapping, index);
if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
folio_put(folio);
return;
@@ -753,10 +796,10 @@ void readahead_expand(struct readahead_control *ractl,
ractl->_workingset = true;
psi_memstall_enter(&ractl->_pflags);
}
- ractl->_nr_pages++;
+ ractl->_nr_pages += min_nrpages;
if (ra) {
- ra->size++;
- ra->async_size++;
+ ra->size += min_nrpages;
+ ra->async_size += min_nrpages;
}
}
}
diff --git a/mm/rmap.c b/mm/rmap.c
index 2490e727e2dc..a8797d1b3d49 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -75,6 +75,7 @@
#include <linux/memremap.h>
#include <linux/userfaultfd_k.h>
#include <linux/mm_inline.h>
+#include <linux/oom.h>
#include <asm/tlbflush.h>
@@ -870,6 +871,20 @@ static bool folio_referenced_one(struct folio *folio,
continue;
}
+ /*
+ * Skip the non-shared swapbacked folio mapped solely by
+ * the exiting or OOM-reaped process. This avoids redundant
+ * swap-out followed by an immediate unmap.
+ */
+ if ((!atomic_read(&vma->vm_mm->mm_users) ||
+ check_stable_address_space(vma->vm_mm)) &&
+ folio_test_anon(folio) && folio_test_swapbacked(folio) &&
+ !folio_likely_mapped_shared(folio)) {
+ pra->referenced = -1;
+ page_vma_mapped_walk_done(&pvmw);
+ return false;
+ }
+
if (pvmw.pte) {
if (lru_gen_enabled() &&
pte_young(ptep_get(pvmw.pte))) {
@@ -1143,25 +1158,25 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
{
atomic_t *mapped = &folio->_nr_pages_mapped;
const int orig_nr_pages = nr_pages;
- int first, nr = 0;
+ int first = 0, nr = 0;
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
switch (level) {
case RMAP_LEVEL_PTE:
if (!folio_test_large(folio)) {
- nr = atomic_inc_and_test(&page->_mapcount);
+ nr = atomic_inc_and_test(&folio->_mapcount);
break;
}
do {
- first = atomic_inc_and_test(&page->_mapcount);
- if (first) {
- first = atomic_inc_return_relaxed(mapped);
- if (first < ENTIRELY_MAPPED)
- nr++;
- }
+ first += atomic_inc_and_test(&page->_mapcount);
} while (page++, --nr_pages > 0);
+
+ if (first &&
+ atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED)
+ nr = first;
+
atomic_add(orig_nr_pages, &folio->_large_mapcount);
break;
case RMAP_LEVEL_PMD:
@@ -1452,6 +1467,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
}
__folio_mod_stat(folio, nr, nr_pmdmapped);
+ mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
}
static __always_inline void __folio_add_file_rmap(struct folio *folio,
@@ -1512,7 +1528,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
enum rmap_level level)
{
atomic_t *mapped = &folio->_nr_pages_mapped;
- int last, nr = 0, nr_pmdmapped = 0;
+ int last = 0, nr = 0, nr_pmdmapped = 0;
bool partially_mapped = false;
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
@@ -1520,20 +1536,19 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
switch (level) {
case RMAP_LEVEL_PTE:
if (!folio_test_large(folio)) {
- nr = atomic_add_negative(-1, &page->_mapcount);
+ nr = atomic_add_negative(-1, &folio->_mapcount);
break;
}
atomic_sub(nr_pages, &folio->_large_mapcount);
do {
- last = atomic_add_negative(-1, &page->_mapcount);
- if (last) {
- last = atomic_dec_return_relaxed(mapped);
- if (last < ENTIRELY_MAPPED)
- nr++;
- }
+ last += atomic_add_negative(-1, &page->_mapcount);
} while (page++, --nr_pages > 0);
+ if (last &&
+ atomic_sub_return_relaxed(last, mapped) < ENTIRELY_MAPPED)
+ nr = last;
+
partially_mapped = nr && atomic_read(mapped);
break;
case RMAP_LEVEL_PMD:
@@ -1553,22 +1568,20 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
}
}
- partially_mapped = nr < nr_pmdmapped;
+ partially_mapped = nr && nr < nr_pmdmapped;
break;
}
- if (nr) {
- /*
- * Queue anon large folio for deferred split if at least one
- * page of the folio is unmapped and at least one page
- * is still mapped.
- *
- * Check partially_mapped first to ensure it is a large folio.
- */
- if (folio_test_anon(folio) && partially_mapped &&
- list_empty(&folio->_deferred_list))
- deferred_split_folio(folio);
- }
+ /*
+ * Queue anon large folio for deferred split if at least one page of
+ * the folio is unmapped and at least one page is still mapped.
+ *
+ * Check partially_mapped first to ensure it is a large folio.
+ */
+ if (partially_mapped && folio_test_anon(folio) &&
+ !folio_test_partially_mapped(folio))
+ deferred_split_folio(folio, true);
+
__folio_mod_stat(folio, -nr, -nr_pmdmapped);
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 5a77acf6ac6a..4f11b5506363 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -155,7 +155,7 @@ static unsigned long shmem_default_max_inodes(void)
static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
- struct mm_struct *fault_mm, vm_fault_t *fault_type);
+ struct vm_area_struct *vma, vm_fault_t *fault_type);
static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
{
@@ -502,8 +502,8 @@ static int shmem_replace_entry(struct address_space *mapping,
* Sometimes, before we decide whether to proceed or to fail, we must check
* that an entry was not already brought back from swap by a racing thread.
*
- * Checking page is not enough: by the time a SwapCache page is locked, it
- * might be reused, and again be SwapCache, using the same swap as before.
+ * Checking folio is not enough: by the time a swapcache folio is locked, it
+ * might be reused, and again be swapcache, using the same swap as before.
*/
static bool shmem_confirm_swap(struct address_space *mapping,
pgoff_t index, swp_entry_t swap)
@@ -548,10 +548,12 @@ static bool shmem_confirm_swap(struct address_space *mapping,
static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
-static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
- bool shmem_huge_force, struct mm_struct *mm,
- unsigned long vm_flags)
+static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force,
+ struct vm_area_struct *vma,
+ unsigned long vm_flags)
{
+ struct mm_struct *mm = vma ? vma->vm_mm : NULL;
loff_t i_size;
if (!S_ISREG(inode->i_mode))
@@ -568,7 +570,8 @@ static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
return true;
case SHMEM_HUGE_WITHIN_SIZE:
index = round_up(index + 1, HPAGE_PMD_NR);
- i_size = round_up(i_size_read(inode), PAGE_SIZE);
+ i_size = max(write_end, i_size_read(inode));
+ i_size = round_up(i_size, PAGE_SIZE);
if (i_size >> PAGE_SHIFT >= index)
return true;
fallthrough;
@@ -581,14 +584,15 @@ static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
}
}
-bool shmem_is_huge(struct inode *inode, pgoff_t index,
- bool shmem_huge_force, struct mm_struct *mm,
- unsigned long vm_flags)
+static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force,
+ struct vm_area_struct *vma, unsigned long vm_flags)
{
if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
return false;
- return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags);
+ return __shmem_huge_global_enabled(inode, index, write_end,
+ shmem_huge_force, vma, vm_flags);
}
#if defined(CONFIG_SYSFS)
@@ -634,15 +638,14 @@ static const char *shmem_format_huge(int huge)
#endif
static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
- struct shrink_control *sc, unsigned long nr_to_split)
+ struct shrink_control *sc, unsigned long nr_to_free)
{
LIST_HEAD(list), *pos, *next;
- LIST_HEAD(to_remove);
struct inode *inode;
struct shmem_inode_info *info;
struct folio *folio;
unsigned long batch = sc ? sc->nr_to_scan : 128;
- int split = 0;
+ unsigned long split = 0, freed = 0;
if (list_empty(&sbinfo->shrinklist))
return SHRINK_STOP;
@@ -660,13 +663,6 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
goto next;
}
- /* Check if there's anything to gain */
- if (round_up(inode->i_size, PAGE_SIZE) ==
- round_up(inode->i_size, HPAGE_PMD_SIZE)) {
- list_move(&info->shrinklist, &to_remove);
- goto next;
- }
-
list_move(&info->shrinklist, &list);
next:
sbinfo->shrinklist_len--;
@@ -675,34 +671,36 @@ next:
}
spin_unlock(&sbinfo->shrinklist_lock);
- list_for_each_safe(pos, next, &to_remove) {
- info = list_entry(pos, struct shmem_inode_info, shrinklist);
- inode = &info->vfs_inode;
- list_del_init(&info->shrinklist);
- iput(inode);
- }
-
list_for_each_safe(pos, next, &list) {
+ pgoff_t next, end;
+ loff_t i_size;
int ret;
- pgoff_t index;
info = list_entry(pos, struct shmem_inode_info, shrinklist);
inode = &info->vfs_inode;
- if (nr_to_split && split >= nr_to_split)
+ if (nr_to_free && freed >= nr_to_free)
goto move_back;
- index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
- folio = filemap_get_folio(inode->i_mapping, index);
- if (IS_ERR(folio))
+ i_size = i_size_read(inode);
+ folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
+ if (!folio || xa_is_value(folio))
goto drop;
- /* No huge page at the end of the file: nothing to split */
+ /* No large folio at the end of the file: nothing to split */
if (!folio_test_large(folio)) {
folio_put(folio);
goto drop;
}
+ /* Check if there is anything to gain from splitting */
+ next = folio_next_index(folio);
+ end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
+ if (end <= folio->index || end >= next) {
+ folio_put(folio);
+ goto drop;
+ }
+
/*
* Move the inode on the list back to shrinklist if we failed
* to lock the page at this time.
@@ -723,6 +721,7 @@ next:
if (ret)
goto move_back;
+ freed += next - end;
split++;
drop:
list_del_init(&info->shrinklist);
@@ -767,10 +766,17 @@ static long shmem_unused_huge_count(struct super_block *sb,
#define shmem_huge SHMEM_HUGE_DENY
static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
- struct shrink_control *sc, unsigned long nr_to_split)
+ struct shrink_control *sc, unsigned long nr_to_free)
{
return 0;
}
+
+static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force,
+ struct vm_area_struct *vma, unsigned long vm_flags)
+{
+ return false;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
@@ -786,7 +792,6 @@ static int shmem_add_to_page_cache(struct folio *folio,
VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
- VM_BUG_ON(expected && folio_test_large(folio));
folio_ref_add(folio, nr);
folio->mapping = mapping;
@@ -842,23 +847,27 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
xa_unlock_irq(&mapping->i_pages);
- folio_put(folio);
+ folio_put_refs(folio, nr);
BUG_ON(error);
}
/*
- * Remove swap entry from page cache, free the swap and its page cache.
+ * Remove swap entry from page cache, free the swap and its page cache. Returns
+ * the number of pages being freed. 0 means entry not found in XArray (0 pages
+ * being freed).
*/
-static int shmem_free_swap(struct address_space *mapping,
- pgoff_t index, void *radswap)
+static long shmem_free_swap(struct address_space *mapping,
+ pgoff_t index, void *radswap)
{
+ int order = xa_get_order(&mapping->i_pages, index);
void *old;
old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
if (old != radswap)
- return -ENOENT;
- free_swap_and_cache(radix_to_swp_entry(radswap));
- return 0;
+ return 0;
+ free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order);
+
+ return 1 << order;
}
/*
@@ -881,7 +890,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
if (xas_retry(&xas, page))
continue;
if (xa_is_value(page))
- swapped++;
+ swapped += 1 << xas_get_order(&xas);
if (xas.xa_index == max)
break;
if (need_resched()) {
@@ -971,7 +980,7 @@ static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
* (although in some cases this is just a waste of time).
*/
folio = NULL;
- shmem_get_folio(inode, index, &folio, SGP_READ);
+ shmem_get_folio(inode, index, 0, &folio, SGP_READ);
return folio;
}
@@ -1010,7 +1019,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (xa_is_value(folio)) {
if (unfalloc)
continue;
- nr_swaps_freed += !shmem_free_swap(mapping,
+ nr_swaps_freed += shmem_free_swap(mapping,
indices[i], folio);
continue;
}
@@ -1077,14 +1086,17 @@ whole_folios:
folio = fbatch.folios[i];
if (xa_is_value(folio)) {
+ long swaps_freed;
+
if (unfalloc)
continue;
- if (shmem_free_swap(mapping, indices[i], folio)) {
+ swaps_freed = shmem_free_swap(mapping, indices[i], folio);
+ if (!swaps_freed) {
/* Swap was replaced by page: retry */
index = indices[i];
break;
}
- nr_swaps_freed++;
+ nr_swaps_freed += swaps_freed;
continue;
}
@@ -1156,7 +1168,7 @@ static int shmem_getattr(struct mnt_idmap *idmap,
STATX_ATTR_NODUMP);
generic_fillattr(idmap, request_mask, inode, stat);
- if (shmem_is_huge(inode, 0, false, NULL, 0))
+ if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
stat->blksize = HPAGE_PMD_SIZE;
if (request_mask & STATX_BTIME) {
@@ -1443,6 +1455,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
swp_entry_t swap;
pgoff_t index;
+ int nr_pages;
+ bool split = false;
/*
* Our capabilities prevent regular writeback or sync from ever calling
@@ -1461,20 +1475,33 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
goto redirty;
/*
- * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
- * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
- * and its shmem_writeback() needs them to be split when swapping.
+ * If CONFIG_THP_SWAP is not enabled, the large folio should be
+ * split when swapping.
+ *
+ * And shrinkage of pages beyond i_size does not split swap, so
+ * swapout of a large folio crossing i_size needs to split too
+ * (unless fallocate has been used to preallocate beyond EOF).
*/
if (folio_test_large(folio)) {
+ index = shmem_fallocend(inode,
+ DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
+ if ((index > folio->index && index < folio_next_index(folio)) ||
+ !IS_ENABLED(CONFIG_THP_SWAP))
+ split = true;
+ }
+
+ if (split) {
+try_split:
/* Ensure the subpages are still dirty */
folio_test_set_dirty(folio);
- if (split_huge_page(page) < 0)
+ if (split_huge_page_to_list_to_order(page, wbc->list, 0))
goto redirty;
folio = page_folio(page);
folio_clear_dirty(folio);
}
index = folio->index;
+ nr_pages = folio_nr_pages(folio);
/*
* This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
@@ -1509,8 +1536,12 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
}
swap = folio_alloc_swap(folio);
- if (!swap.val)
+ if (!swap.val) {
+ if (nr_pages > 1)
+ goto try_split;
+
goto redirty;
+ }
/*
* Add inode to shmem_unuse()'s list of swapped-out inodes,
@@ -1527,8 +1558,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
if (add_to_swap_cache(folio, swap,
__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
NULL) == 0) {
- shmem_recalc_inode(inode, 0, 1);
- swap_shmem_alloc(swap);
+ shmem_recalc_inode(inode, 0, nr_pages);
+ swap_shmem_alloc(swap, nr_pages);
shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
mutex_unlock(&shmem_swaplist_mutex);
@@ -1624,22 +1655,33 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
- bool global_huge)
+ loff_t write_end, bool shmem_huge_force)
{
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
- unsigned long vm_flags = vma->vm_flags;
+ unsigned long vm_flags = vma ? vma->vm_flags : 0;
+ bool global_huge;
loff_t i_size;
int order;
- if ((vm_flags & VM_NOHUGEPAGE) ||
- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+ if (vma && ((vm_flags & VM_NOHUGEPAGE) ||
+ test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
return 0;
/* If the hardware/firmware marked hugepage support disabled. */
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
return 0;
+ global_huge = shmem_huge_global_enabled(inode, index, write_end,
+ shmem_huge_force, vma, vm_flags);
+ if (!vma || !vma_is_anon_shmem(vma)) {
+ /*
+ * For tmpfs, we now only support PMD sized THP if huge page
+ * is enabled, otherwise fallback to order 0.
+ */
+ return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
+ }
+
/*
* Following the 'deny' semantics of the top level, force the huge
* option off from all mounts.
@@ -1680,20 +1722,30 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
struct address_space *mapping, pgoff_t index,
unsigned long orders)
{
- struct vm_area_struct *vma = vmf->vma;
+ struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
pgoff_t aligned_index;
unsigned long pages;
int order;
- orders = thp_vma_suitable_orders(vma, vmf->address, orders);
- if (!orders)
- return 0;
+ if (vma) {
+ orders = thp_vma_suitable_orders(vma, vmf->address, orders);
+ if (!orders)
+ return 0;
+ }
/* Find the highest order that can add into the page cache */
order = highest_order(orders);
while (orders) {
pages = 1UL << order;
aligned_index = round_down(index, pages);
+ /*
+ * Check for conflict before waiting on a huge allocation.
+ * Conflict might be that a huge page has just been allocated
+ * and added to page cache by a racing thread, or that there
+ * is already at least one small page in the huge extent.
+ * Be careful to retry when appropriate, but not forever!
+ * Elsewhere -EEXIST would be the right code, but not here.
+ */
if (!xa_find(&mapping->i_pages, &aligned_index,
aligned_index + pages - 1, XA_PRESENT))
break;
@@ -1731,7 +1783,6 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
- struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
unsigned long suitable_orders = 0;
struct folio *folio = NULL;
long pages;
@@ -1741,26 +1792,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
orders = 0;
if (orders > 0) {
- if (vma && vma_is_anon_shmem(vma)) {
- suitable_orders = shmem_suitable_orders(inode, vmf,
+ suitable_orders = shmem_suitable_orders(inode, vmf,
mapping, index, orders);
- } else if (orders & BIT(HPAGE_PMD_ORDER)) {
- pages = HPAGE_PMD_NR;
- suitable_orders = BIT(HPAGE_PMD_ORDER);
- index = round_down(index, HPAGE_PMD_NR);
-
- /*
- * Check for conflict before waiting on a huge allocation.
- * Conflict might be that a huge page has just been allocated
- * and added to page cache by a racing thread, or that there
- * is already at least one small page in the huge extent.
- * Be careful to retry when appropriate, but not forever!
- * Elsewhere -EEXIST would be the right code, but not here.
- */
- if (xa_find(&mapping->i_pages, &index,
- index + HPAGE_PMD_NR - 1, XA_PRESENT))
- return ERR_PTR(-E2BIG);
- }
order = highest_order(suitable_orders);
while (suitable_orders) {
@@ -1772,9 +1805,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
if (pages == HPAGE_PMD_NR)
count_vm_event(THP_FILE_FALLBACK);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
-#endif
order = next_order(&suitable_orders, order);
}
} else {
@@ -1799,10 +1830,8 @@ allocated:
count_vm_event(THP_FILE_FALLBACK);
count_vm_event(THP_FILE_FALLBACK_CHARGE);
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
-#endif
}
goto unlock;
}
@@ -1819,7 +1848,7 @@ allocated:
* Try to reclaim some space by splitting a few
* large folios beyond i_size on the filesystem.
*/
- shmem_unused_huge_shrink(sbinfo, NULL, 2);
+ shmem_unused_huge_shrink(sbinfo, NULL, pages);
/*
* And do a shmem_recalc_inode() to account for freed pages:
* except our folio is there in cache, so not quite balanced.
@@ -1867,30 +1896,35 @@ static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
}
static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
- struct shmem_inode_info *info, pgoff_t index)
+ struct shmem_inode_info *info, pgoff_t index,
+ struct vm_area_struct *vma)
{
- struct folio *old, *new;
- struct address_space *swap_mapping;
- swp_entry_t entry;
- pgoff_t swap_index;
- int error;
-
- old = *foliop;
- entry = old->swap;
- swap_index = swap_cache_index(entry);
- swap_mapping = swap_address_space(entry);
+ struct folio *new, *old = *foliop;
+ swp_entry_t entry = old->swap;
+ struct address_space *swap_mapping = swap_address_space(entry);
+ pgoff_t swap_index = swap_cache_index(entry);
+ XA_STATE(xas, &swap_mapping->i_pages, swap_index);
+ int nr_pages = folio_nr_pages(old);
+ int error = 0, i;
/*
* We have arrived here because our zones are constrained, so don't
* limit chance of success by further cpuset and node constraints.
*/
gfp &= ~GFP_CONSTRAINT_MASK;
- VM_BUG_ON_FOLIO(folio_test_large(old), old);
- new = shmem_alloc_folio(gfp, 0, info, index);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (nr_pages > 1) {
+ gfp_t huge_gfp = vma_thp_gfp_mask(vma);
+
+ gfp = limit_gfp_mask(huge_gfp, gfp);
+ }
+#endif
+
+ new = shmem_alloc_folio(gfp, folio_order(old), info, index);
if (!new)
return -ENOMEM;
- folio_get(new);
+ folio_ref_add(new, nr_pages);
folio_copy(new, old);
flush_dcache_folio(new);
@@ -1900,26 +1934,34 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
new->swap = entry;
folio_set_swapcache(new);
- /*
- * Our caller will very soon move newpage out of swapcache, but it's
- * a nice clean interface for us to replace oldpage by newpage there.
- */
+ /* Swap cache still stores N entries instead of a high-order entry */
xa_lock_irq(&swap_mapping->i_pages);
- error = shmem_replace_entry(swap_mapping, swap_index, old, new);
+ for (i = 0; i < nr_pages; i++) {
+ void *item = xas_load(&xas);
+
+ if (item != old) {
+ error = -ENOENT;
+ break;
+ }
+
+ xas_store(&xas, new);
+ xas_next(&xas);
+ }
if (!error) {
mem_cgroup_replace_folio(old, new);
- __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
- __lruvec_stat_mod_folio(new, NR_SHMEM, 1);
- __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
- __lruvec_stat_mod_folio(old, NR_SHMEM, -1);
+ __lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages);
+ __lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages);
+ __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages);
+ __lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages);
}
xa_unlock_irq(&swap_mapping->i_pages);
if (unlikely(error)) {
/*
- * Is this possible? I think not, now that our callers check
- * both PageSwapCache and page_private after getting page lock;
- * but be defensive. Reverse old to newpage for clear and free.
+ * Is this possible? I think not, now that our callers
+ * check both the swapcache flag and folio->private
+ * after getting the folio lock; but be defensive.
+ * Reverse old to newpage for clear and free.
*/
old = new;
} else {
@@ -1931,7 +1973,12 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
old->private = NULL;
folio_unlock(old);
- folio_put_refs(old, 2);
+ /*
+ * The old folio are removed from swap cache, drop the 'nr_pages'
+ * reference, as well as one temporary reference getting from swap
+ * cache.
+ */
+ folio_put_refs(old, nr_pages + 1);
return error;
}
@@ -1941,6 +1988,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping;
swp_entry_t swapin_error;
void *old;
+ int nr_pages;
swapin_error = make_poisoned_swp_entry();
old = xa_cmpxchg_irq(&mapping->i_pages, index,
@@ -1949,6 +1997,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
if (old != swp_to_radix_entry(swap))
return;
+ nr_pages = folio_nr_pages(folio);
folio_wait_writeback(folio);
delete_from_swap_cache(folio);
/*
@@ -1956,8 +2005,86 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
* won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
* in shmem_evict_inode().
*/
- shmem_recalc_inode(inode, -1, -1);
- swap_free(swap);
+ shmem_recalc_inode(inode, -nr_pages, -nr_pages);
+ swap_free_nr(swap, nr_pages);
+}
+
+static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
+ swp_entry_t swap, gfp_t gfp)
+{
+ struct address_space *mapping = inode->i_mapping;
+ XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
+ void *alloced_shadow = NULL;
+ int alloced_order = 0, i;
+
+ /* Convert user data gfp flags to xarray node gfp flags */
+ gfp &= GFP_RECLAIM_MASK;
+
+ for (;;) {
+ int order = -1, split_order = 0;
+ void *old = NULL;
+
+ xas_lock_irq(&xas);
+ old = xas_load(&xas);
+ if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) {
+ xas_set_err(&xas, -EEXIST);
+ goto unlock;
+ }
+
+ order = xas_get_order(&xas);
+
+ /* Swap entry may have changed before we re-acquire the lock */
+ if (alloced_order &&
+ (old != alloced_shadow || order != alloced_order)) {
+ xas_destroy(&xas);
+ alloced_order = 0;
+ }
+
+ /* Try to split large swap entry in pagecache */
+ if (order > 0) {
+ if (!alloced_order) {
+ split_order = order;
+ goto unlock;
+ }
+ xas_split(&xas, old, order);
+
+ /*
+ * Re-set the swap entry after splitting, and the swap
+ * offset of the original large entry must be continuous.
+ */
+ for (i = 0; i < 1 << order; i++) {
+ pgoff_t aligned_index = round_down(index, 1 << order);
+ swp_entry_t tmp;
+
+ tmp = swp_entry(swp_type(swap), swp_offset(swap) + i);
+ __xa_store(&mapping->i_pages, aligned_index + i,
+ swp_to_radix_entry(tmp), 0);
+ }
+ }
+
+unlock:
+ xas_unlock_irq(&xas);
+
+ /* split needed, alloc here and retry. */
+ if (split_order) {
+ xas_split_alloc(&xas, old, split_order, gfp);
+ if (xas_error(&xas))
+ goto error;
+ alloced_shadow = old;
+ alloced_order = split_order;
+ xas_reset(&xas);
+ continue;
+ }
+
+ if (!xas_nomem(&xas, gfp))
+ break;
+ }
+
+error:
+ if (xas_error(&xas))
+ return xas_error(&xas);
+
+ return alloced_order;
}
/*
@@ -1968,15 +2095,16 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
*/
static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct folio **foliop, enum sgp_type sgp,
- gfp_t gfp, struct mm_struct *fault_mm,
+ gfp_t gfp, struct vm_area_struct *vma,
vm_fault_t *fault_type)
{
struct address_space *mapping = inode->i_mapping;
+ struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
struct shmem_inode_info *info = SHMEM_I(inode);
struct swap_info_struct *si;
struct folio *folio = NULL;
swp_entry_t swap;
- int error;
+ int error, nr_pages;
VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
swap = radix_to_swp_entry(*foliop);
@@ -1996,12 +2124,37 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
/* Look it up and read it in.. */
folio = swap_cache_get_folio(swap, NULL, 0);
if (!folio) {
+ int split_order;
+
/* Or update major stats only when swapin succeeds?? */
if (fault_type) {
*fault_type |= VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(fault_mm, PGMAJFAULT);
}
+
+ /*
+ * Now swap device can only swap in order 0 folio, then we
+ * should split the large swap entry stored in the pagecache
+ * if necessary.
+ */
+ split_order = shmem_split_large_entry(inode, index, swap, gfp);
+ if (split_order < 0) {
+ error = split_order;
+ goto failed;
+ }
+
+ /*
+ * If the large swap entry has already been split, it is
+ * necessary to recalculate the new swap entry based on
+ * the old order alignment.
+ */
+ if (split_order > 0) {
+ pgoff_t offset = index - round_down(index, 1 << split_order);
+
+ swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
+ }
+
/* Here we actually start the io */
folio = shmem_swapin_cluster(swap, gfp, info, index);
if (!folio) {
@@ -2023,6 +2176,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
goto failed;
}
folio_wait_writeback(folio);
+ nr_pages = folio_nr_pages(folio);
/*
* Some architectures may have to restore extra metadata to the
@@ -2031,24 +2185,25 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
arch_swap_restore(folio_swap(swap, folio), folio);
if (shmem_should_replace_folio(folio, gfp)) {
- error = shmem_replace_folio(&folio, gfp, info, index);
+ error = shmem_replace_folio(&folio, gfp, info, index, vma);
if (error)
goto failed;
}
- error = shmem_add_to_page_cache(folio, mapping, index,
+ error = shmem_add_to_page_cache(folio, mapping,
+ round_down(index, nr_pages),
swp_to_radix_entry(swap), gfp);
if (error)
goto failed;
- shmem_recalc_inode(inode, 0, -1);
+ shmem_recalc_inode(inode, 0, -nr_pages);
if (sgp == SGP_WRITE)
folio_mark_accessed(folio);
delete_from_swap_cache(folio);
folio_mark_dirty(folio);
- swap_free(swap);
+ swap_free_nr(swap, nr_pages);
put_swap_device(si);
*foliop = folio;
@@ -2078,14 +2233,14 @@ unlock:
* vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
*/
static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
- struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
- struct vm_fault *vmf, vm_fault_t *fault_type)
+ loff_t write_end, struct folio **foliop, enum sgp_type sgp,
+ gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type)
{
struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
struct mm_struct *fault_mm;
struct folio *folio;
int error;
- bool alloced, huge;
+ bool alloced;
unsigned long orders = 0;
if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
@@ -2111,7 +2266,7 @@ repeat:
if (xa_is_value(folio)) {
error = shmem_swapin_folio(inode, index, &folio,
- sgp, gfp, fault_mm, fault_type);
+ sgp, gfp, vma, fault_type);
if (error == -EEXIST)
goto repeat;
@@ -2158,14 +2313,8 @@ repeat:
return 0;
}
- huge = shmem_is_huge(inode, index, false, fault_mm,
- vma ? vma->vm_flags : 0);
- /* Find hugepage orders that are allowed for anonymous shmem. */
- if (vma && vma_is_anon_shmem(vma))
- orders = shmem_allowable_huge_orders(inode, vma, index, huge);
- else if (huge)
- orders = BIT(HPAGE_PMD_ORDER);
-
+ /* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
+ orders = shmem_allowable_huge_orders(inode, vma, index, write_end, false);
if (orders > 0) {
gfp_t huge_gfp;
@@ -2176,9 +2325,7 @@ repeat:
if (!IS_ERR(folio)) {
if (folio_test_pmd_mappable(folio))
count_vm_event(THP_FILE_ALLOC);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
-#endif
goto alloced;
}
if (PTR_ERR(folio) == -EEXIST)
@@ -2198,7 +2345,7 @@ alloced:
alloced = true;
if (folio_test_large(folio) &&
DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
- folio_next_index(folio) - 1) {
+ folio_next_index(folio)) {
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
struct shmem_inode_info *info = SHMEM_I(inode);
/*
@@ -2268,6 +2415,7 @@ unlock:
* shmem_get_folio - find, and lock a shmem folio.
* @inode: inode to search
* @index: the page index.
+ * @write_end: end of a write, could extend inode size
* @foliop: pointer to the folio if found
* @sgp: SGP_* flags to control behavior
*
@@ -2287,10 +2435,10 @@ unlock:
* Context: May sleep.
* Return: 0 if successful, else a negative error code.
*/
-int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
- enum sgp_type sgp)
+int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
+ struct folio **foliop, enum sgp_type sgp)
{
- return shmem_get_folio_gfp(inode, index, foliop, sgp,
+ return shmem_get_folio_gfp(inode, index, write_end, foliop, sgp,
mapping_gfp_mask(inode->i_mapping), NULL, NULL);
}
EXPORT_SYMBOL_GPL(shmem_get_folio);
@@ -2385,7 +2533,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
}
WARN_ON_ONCE(vmf->page != NULL);
- err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
+ err = shmem_get_folio_gfp(inode, vmf->pgoff, 0, &folio, SGP_CACHE,
gfp, vmf, &ret);
if (err)
return vmf_error(err);
@@ -2878,7 +3026,7 @@ static const struct inode_operations shmem_short_symlink_operations;
static int
shmem_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
struct inode *inode = mapping->host;
struct shmem_inode_info *info = SHMEM_I(inode);
@@ -2895,27 +3043,26 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
return -EPERM;
}
- ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
+ ret = shmem_get_folio(inode, index, pos + len, &folio, SGP_WRITE);
if (ret)
return ret;
- *pagep = folio_file_page(folio, index);
- if (PageHWPoison(*pagep)) {
+ if (folio_test_hwpoison(folio) ||
+ (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
folio_unlock(folio);
folio_put(folio);
- *pagep = NULL;
return -EIO;
}
+ *foliop = folio;
return 0;
}
static int
shmem_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct folio *folio = page_folio(page);
struct inode *inode = mapping->host;
if (pos + copied > inode->i_size)
@@ -2966,7 +3113,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
break;
}
- error = shmem_get_folio(inode, index, &folio, SGP_READ);
+ error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
if (error) {
if (error == -EINVAL)
error = 0;
@@ -3142,7 +3289,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
if (*ppos >= i_size_read(inode))
break;
- error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio,
+ error = shmem_get_folio(inode, *ppos / PAGE_SIZE, 0, &folio,
SGP_READ);
if (error) {
if (error == -EINVAL)
@@ -3332,8 +3479,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
error = -ENOMEM;
else
- error = shmem_get_folio(inode, index, &folio,
- SGP_FALLOC);
+ error = shmem_get_folio(inode, index, offset + len,
+ &folio, SGP_FALLOC);
if (error) {
info->fallocend = undo_fallocend;
/* Remove the !uptodate folios we added */
@@ -3684,7 +3831,7 @@ static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
} else {
inode_nohighmem(inode);
inode->i_mapping->a_ops = &shmem_aops;
- error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
+ error = shmem_get_folio(inode, 0, 0, &folio, SGP_WRITE);
if (error)
goto out_remove_offset;
inode->i_op = &shmem_symlink_inode_operations;
@@ -3730,7 +3877,7 @@ static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
return ERR_PTR(-ECHILD);
}
} else {
- error = shmem_get_folio(inode, 0, &folio, SGP_READ);
+ error = shmem_get_folio(inode, 0, 0, &folio, SGP_READ);
if (error)
return ERR_PTR(error);
if (!folio)
@@ -4814,11 +4961,7 @@ void __init shmem_init(void)
shmem_init_inodecache();
#ifdef CONFIG_TMPFS_QUOTA
- error = register_quota_format(&shmem_quota_format);
- if (error < 0) {
- pr_err("Could not register quota format\n");
- goto out3;
- }
+ register_quota_format(&shmem_quota_format);
#endif
error = register_filesystem(&shmem_fs_type);
@@ -4853,7 +4996,6 @@ out1:
out2:
#ifdef CONFIG_TMPFS_QUOTA
unregister_quota_format(&shmem_quota_format);
-out3:
#endif
shmem_destroy_inodecache();
shm_mnt = ERR_PTR(error);
@@ -5198,7 +5340,7 @@ struct folio *shmem_read_folio_gfp(struct address_space *mapping,
struct folio *folio;
int error;
- error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
+ error = shmem_get_folio_gfp(inode, index, 0, &folio, SGP_CACHE,
gfp, NULL, NULL);
if (error)
return ERR_PTR(error);
diff --git a/mm/shmem_quota.c b/mm/shmem_quota.c
index ce514e700d2f..d1e32ac01407 100644
--- a/mm/shmem_quota.c
+++ b/mm/shmem_quota.c
@@ -34,8 +34,6 @@
#include <linux/quotaops.h>
#include <linux/quota.h>
-#ifdef CONFIG_TMPFS_QUOTA
-
/*
* The following constants define the amount of time given a user
* before the soft limits are treated as hard limits (usually resulting
@@ -351,4 +349,3 @@ const struct dquot_operations shmem_quota_operations = {
.mark_dirty = shmem_mark_dquot_dirty,
.get_next_id = shmem_get_next_id,
};
-#endif /* CONFIG_TMPFS_QUOTA */
diff --git a/mm/show_mem.c b/mm/show_mem.c
index bdb439551eef..ec885a398fa0 100644
--- a/mm/show_mem.c
+++ b/mm/show_mem.c
@@ -435,15 +435,18 @@ void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
struct codetag *ct = tags[i].ct;
struct alloc_tag *tag = ct_to_alloc_tag(ct);
struct alloc_tag_counters counter = alloc_tag_read(tag);
+ char bytes[10];
+
+ string_get_size(counter.bytes, 1, STRING_UNITS_2, bytes, sizeof(bytes));
/* Same as alloc_tag_to_text() but w/o intermediate buffer */
if (ct->modname)
- pr_notice("%12lli %8llu %s:%u [%s] func:%s\n",
- counter.bytes, counter.calls, ct->filename,
+ pr_notice("%12s %8llu %s:%u [%s] func:%s\n",
+ bytes, counter.calls, ct->filename,
ct->lineno, ct->modname, ct->function);
else
- pr_notice("%12lli %8llu %s:%u func:%s\n",
- counter.bytes, counter.calls, ct->filename,
+ pr_notice("%12s %8llu %s:%u func:%s\n",
+ bytes, counter.calls, ct->filename,
ct->lineno, ct->function);
}
}
diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c
index 12ea5486a3e9..4a85b94d12ce 100644
--- a/mm/shrinker_debug.c
+++ b/mm/shrinker_debug.c
@@ -114,7 +114,7 @@ static ssize_t shrinker_debugfs_scan_write(struct file *file,
int nid;
char kbuf[72];
- read_len = size < (sizeof(kbuf) - 1) ? size : (sizeof(kbuf) - 1);
+ read_len = min(size, sizeof(kbuf) - 1);
if (copy_from_user(kbuf, buf, read_len))
return -EFAULT;
kbuf[read_len] = '\0';
diff --git a/mm/slab.h b/mm/slab.h
index dcdb56b8e7f5..f22fb760b286 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -422,7 +422,9 @@ kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
gfp_t kmalloc_fix_flags(gfp_t flags);
/* Functions provided by the slab allocators */
-int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
+int do_kmem_cache_create(struct kmem_cache *s, const char *name,
+ unsigned int size, struct kmem_cache_args *args,
+ slab_flags_t flags);
void __init kmem_cache_init(void);
extern void create_boot_cache(struct kmem_cache *, const char *name,
@@ -443,6 +445,13 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s)
return (s->flags & SLAB_KMALLOC);
}
+static inline bool is_kmalloc_normal(struct kmem_cache *s)
+{
+ if (!is_kmalloc_cache(s))
+ return false;
+ return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
+}
+
/* Legal flag mask for kmem_cache_create(), for various configurations */
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
SLAB_CACHE_DMA32 | SLAB_PANIC | \
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 40b582a014b8..744324465615 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -40,11 +40,6 @@ LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
-static LIST_HEAD(slab_caches_to_rcu_destroy);
-static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
-static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
- slab_caches_to_rcu_destroy_workfn);
-
/*
* Set of flags that will prevent slab merging
*/
@@ -88,6 +83,19 @@ unsigned int kmem_cache_size(struct kmem_cache *s)
EXPORT_SYMBOL(kmem_cache_size);
#ifdef CONFIG_DEBUG_VM
+
+static bool kmem_cache_is_duplicate_name(const char *name)
+{
+ struct kmem_cache *s;
+
+ list_for_each_entry(s, &slab_caches, list) {
+ if (!strcmp(s->name, name))
+ return true;
+ }
+
+ return false;
+}
+
static int kmem_cache_sanity_check(const char *name, unsigned int size)
{
if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
@@ -95,6 +103,10 @@ static int kmem_cache_sanity_check(const char *name, unsigned int size)
return -EINVAL;
}
+ /* Duplicate names will confuse slabtop, et al */
+ WARN(kmem_cache_is_duplicate_name(name),
+ "kmem_cache of name '%s' already exists\n", name);
+
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
return 0;
}
@@ -169,14 +181,15 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
if (ctor)
return NULL;
- size = ALIGN(size, sizeof(void *));
- align = calculate_alignment(flags, align, size);
- size = ALIGN(size, align);
flags = kmem_cache_flags(flags, name);
if (flags & SLAB_NEVER_MERGE)
return NULL;
+ size = ALIGN(size, sizeof(void *));
+ align = calculate_alignment(flags, align, size);
+ size = ALIGN(size, align);
+
list_for_each_entry_reverse(s, &slab_caches, list) {
if (slab_unmergeable(s))
continue;
@@ -202,32 +215,29 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
}
static struct kmem_cache *create_cache(const char *name,
- unsigned int object_size, unsigned int align,
- slab_flags_t flags, unsigned int useroffset,
- unsigned int usersize, void (*ctor)(void *),
- struct kmem_cache *root_cache)
+ unsigned int object_size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags)
{
struct kmem_cache *s;
int err;
- if (WARN_ON(useroffset + usersize > object_size))
- useroffset = usersize = 0;
+ if (WARN_ON(args->useroffset + args->usersize > object_size))
+ args->useroffset = args->usersize = 0;
+
+ /* If a custom freelist pointer is requested make sure it's sane. */
+ err = -EINVAL;
+ if (args->use_freeptr_offset &&
+ (args->freeptr_offset >= object_size ||
+ !(flags & SLAB_TYPESAFE_BY_RCU) ||
+ !IS_ALIGNED(args->freeptr_offset, sizeof(freeptr_t))))
+ goto out;
err = -ENOMEM;
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
if (!s)
goto out;
-
- s->name = name;
- s->size = s->object_size = object_size;
- s->align = align;
- s->ctor = ctor;
-#ifdef CONFIG_HARDENED_USERCOPY
- s->useroffset = useroffset;
- s->usersize = usersize;
-#endif
-
- err = __kmem_cache_create(s, flags);
+ err = do_kmem_cache_create(s, name, object_size, args, flags);
if (err)
goto out_free_cache;
@@ -242,39 +252,24 @@ out:
}
/**
- * kmem_cache_create_usercopy - Create a cache with a region suitable
- * for copying to userspace
+ * __kmem_cache_create_args - Create a kmem cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
- * @size: The size of objects to be created in this cache.
- * @align: The required alignment for the objects.
- * @flags: SLAB flags
- * @useroffset: Usercopy region offset
- * @usersize: Usercopy region size
- * @ctor: A constructor for the objects.
- *
- * Cannot be called within a interrupt, but can be interrupted.
- * The @ctor is run when new pages are allocated by the cache.
- *
- * The flags are
- *
- * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
- * to catch references to uninitialised memory.
+ * @object_size: The size of objects to be created in this cache.
+ * @args: Additional arguments for the cache creation (see
+ * &struct kmem_cache_args).
+ * @flags: See %SLAB_* flags for an explanation of individual @flags.
*
- * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
- * for buffer overruns.
+ * Not to be called directly, use the kmem_cache_create() wrapper with the same
+ * parameters.
*
- * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
- * cacheline. This can be beneficial if you're counting cycles as closely
- * as davem.
+ * Context: Cannot be called within a interrupt, but can be interrupted.
*
* Return: a pointer to the cache on success, NULL on failure.
*/
-struct kmem_cache *
-kmem_cache_create_usercopy(const char *name,
- unsigned int size, unsigned int align,
- slab_flags_t flags,
- unsigned int useroffset, unsigned int usersize,
- void (*ctor)(void *))
+struct kmem_cache *__kmem_cache_create_args(const char *name,
+ unsigned int object_size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags)
{
struct kmem_cache *s = NULL;
const char *cache_name;
@@ -296,7 +291,7 @@ kmem_cache_create_usercopy(const char *name,
mutex_lock(&slab_mutex);
- err = kmem_cache_sanity_check(name, size);
+ err = kmem_cache_sanity_check(name, object_size);
if (err) {
goto out_unlock;
}
@@ -317,12 +312,14 @@ kmem_cache_create_usercopy(const char *name,
/* Fail closed on bad usersize of useroffset values. */
if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
- WARN_ON(!usersize && useroffset) ||
- WARN_ON(size < usersize || size - usersize < useroffset))
- usersize = useroffset = 0;
-
- if (!usersize)
- s = __kmem_cache_alias(name, size, align, flags, ctor);
+ WARN_ON(!args->usersize && args->useroffset) ||
+ WARN_ON(object_size < args->usersize ||
+ object_size - args->usersize < args->useroffset))
+ args->usersize = args->useroffset = 0;
+
+ if (!args->usersize)
+ s = __kmem_cache_alias(name, object_size, args->align, flags,
+ args->ctor);
if (s)
goto out_unlock;
@@ -332,9 +329,8 @@ kmem_cache_create_usercopy(const char *name,
goto out_unlock;
}
- s = create_cache(cache_name, size,
- calculate_alignment(flags, align, size),
- flags, useroffset, usersize, ctor, NULL);
+ args->align = calculate_alignment(flags, args->align, object_size);
+ s = create_cache(cache_name, object_size, args, flags);
if (IS_ERR(s)) {
err = PTR_ERR(s);
kfree_const(cache_name);
@@ -356,41 +352,7 @@ out_unlock:
}
return s;
}
-EXPORT_SYMBOL(kmem_cache_create_usercopy);
-
-/**
- * kmem_cache_create - Create a cache.
- * @name: A string which is used in /proc/slabinfo to identify this cache.
- * @size: The size of objects to be created in this cache.
- * @align: The required alignment for the objects.
- * @flags: SLAB flags
- * @ctor: A constructor for the objects.
- *
- * Cannot be called within a interrupt, but can be interrupted.
- * The @ctor is run when new pages are allocated by the cache.
- *
- * The flags are
- *
- * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
- * to catch references to uninitialised memory.
- *
- * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
- * for buffer overruns.
- *
- * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
- * cacheline. This can be beneficial if you're counting cycles as closely
- * as davem.
- *
- * Return: a pointer to the cache on success, NULL on failure.
- */
-struct kmem_cache *
-kmem_cache_create(const char *name, unsigned int size, unsigned int align,
- slab_flags_t flags, void (*ctor)(void *))
-{
- return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
- ctor);
-}
-EXPORT_SYMBOL(kmem_cache_create);
+EXPORT_SYMBOL(__kmem_cache_create_args);
static struct kmem_cache *kmem_buckets_cache __ro_after_init;
@@ -478,87 +440,25 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
fail:
for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++)
kmem_cache_destroy((*b)[idx]);
- kfree(b);
+ kmem_cache_free(kmem_buckets_cache, b);
return NULL;
}
EXPORT_SYMBOL(kmem_buckets_create);
-#ifdef SLAB_SUPPORTS_SYSFS
/*
* For a given kmem_cache, kmem_cache_destroy() should only be called
* once or there will be a use-after-free problem. The actual deletion
* and release of the kobject does not need slab_mutex or cpu_hotplug_lock
* protection. So they are now done without holding those locks.
- *
- * Note that there will be a slight delay in the deletion of sysfs files
- * if kmem_cache_release() is called indrectly from a work function.
*/
static void kmem_cache_release(struct kmem_cache *s)
{
- if (slab_state >= FULL) {
- sysfs_slab_unlink(s);
+ kfence_shutdown_cache(s);
+ if (__is_defined(SLAB_SUPPORTS_SYSFS) && slab_state >= FULL)
sysfs_slab_release(s);
- } else {
+ else
slab_kmem_cache_release(s);
- }
-}
-#else
-static void kmem_cache_release(struct kmem_cache *s)
-{
- slab_kmem_cache_release(s);
-}
-#endif
-
-static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
-{
- LIST_HEAD(to_destroy);
- struct kmem_cache *s, *s2;
-
- /*
- * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
- * @slab_caches_to_rcu_destroy list. The slab pages are freed
- * through RCU and the associated kmem_cache are dereferenced
- * while freeing the pages, so the kmem_caches should be freed only
- * after the pending RCU operations are finished. As rcu_barrier()
- * is a pretty slow operation, we batch all pending destructions
- * asynchronously.
- */
- mutex_lock(&slab_mutex);
- list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
- mutex_unlock(&slab_mutex);
-
- if (list_empty(&to_destroy))
- return;
-
- rcu_barrier();
-
- list_for_each_entry_safe(s, s2, &to_destroy, list) {
- debugfs_slab_release(s);
- kfence_shutdown_cache(s);
- kmem_cache_release(s);
- }
-}
-
-static int shutdown_cache(struct kmem_cache *s)
-{
- /* free asan quarantined objects */
- kasan_cache_shutdown(s);
-
- if (__kmem_cache_shutdown(s) != 0)
- return -EBUSY;
-
- list_del(&s->list);
-
- if (s->flags & SLAB_TYPESAFE_BY_RCU) {
- list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
- schedule_work(&slab_caches_to_rcu_destroy_work);
- } else {
- kfence_shutdown_cache(s);
- debugfs_slab_release(s);
- }
-
- return 0;
}
void slab_kmem_cache_release(struct kmem_cache *s)
@@ -570,29 +470,63 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s)
{
- int err = -EBUSY;
- bool rcu_set;
+ int err;
if (unlikely(!s) || !kasan_check_byte(s))
return;
+ /* in-flight kfree_rcu()'s may include objects from our cache */
+ kvfree_rcu_barrier();
+
+ if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) &&
+ (s->flags & SLAB_TYPESAFE_BY_RCU)) {
+ /*
+ * Under CONFIG_SLUB_RCU_DEBUG, when objects in a
+ * SLAB_TYPESAFE_BY_RCU slab are freed, SLUB will internally
+ * defer their freeing with call_rcu().
+ * Wait for such call_rcu() invocations here before actually
+ * destroying the cache.
+ *
+ * It doesn't matter that we haven't looked at the slab refcount
+ * yet - slabs with SLAB_TYPESAFE_BY_RCU can't be merged, so
+ * the refcount should be 1 here.
+ */
+ rcu_barrier();
+ }
+
cpus_read_lock();
mutex_lock(&slab_mutex);
- rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
-
s->refcount--;
- if (s->refcount)
- goto out_unlock;
+ if (s->refcount) {
+ mutex_unlock(&slab_mutex);
+ cpus_read_unlock();
+ return;
+ }
- err = shutdown_cache(s);
+ /* free asan quarantined objects */
+ kasan_cache_shutdown(s);
+
+ err = __kmem_cache_shutdown(s);
WARN(err, "%s %s: Slab cache still has objects when called from %pS",
__func__, s->name, (void *)_RET_IP_);
-out_unlock:
+
+ list_del(&s->list);
+
mutex_unlock(&slab_mutex);
cpus_read_unlock();
- if (!err && !rcu_set)
- kmem_cache_release(s);
+
+ if (slab_state >= FULL)
+ sysfs_slab_unlink(s);
+ debugfs_slab_release(s);
+
+ if (err)
+ return;
+
+ if (s->flags & SLAB_TYPESAFE_BY_RCU)
+ rcu_barrier();
+
+ kmem_cache_release(s);
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -704,9 +638,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
{
int err;
unsigned int align = ARCH_KMALLOC_MINALIGN;
-
- s->name = name;
- s->size = s->object_size = size;
+ struct kmem_cache_args kmem_args = {};
/*
* kmalloc caches guarantee alignment of at least the largest
@@ -715,14 +647,14 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
*/
if (flags & SLAB_KMALLOC)
align = max(align, 1U << (ffs(size) - 1));
- s->align = calculate_alignment(flags, align, size);
+ kmem_args.align = calculate_alignment(flags, align, size);
#ifdef CONFIG_HARDENED_USERCOPY
- s->useroffset = useroffset;
- s->usersize = usersize;
+ kmem_args.useroffset = useroffset;
+ kmem_args.usersize = usersize;
#endif
- err = __kmem_cache_create(s, flags);
+ err = do_kmem_cache_create(s, name, size, &kmem_args, flags);
if (err)
panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
@@ -1273,6 +1205,13 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
/* If the object still fits, repoison it precisely. */
if (ks >= new_size) {
+ /* Zero out spare memory. */
+ if (want_init_on_alloc(flags)) {
+ kasan_disable_current();
+ memset((void *)p + new_size, 0, ks - new_size);
+ kasan_enable_current();
+ }
+
p = kasan_krealloc((void *)p, new_size, flags);
return (void *)p;
}
@@ -1294,11 +1233,27 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
* @new_size: how many bytes of memory are required.
* @flags: the type of memory to allocate.
*
- * The contents of the object pointed to are preserved up to the
- * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
* If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
* is 0 and @p is not a %NULL pointer, the object pointed to is freed.
*
+ * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
+ * initial memory allocation, every subsequent call to this API for the same
+ * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
+ * __GFP_ZERO is not fully honored by this API.
+ *
+ * This is the case, since krealloc() only knows about the bucket size of an
+ * allocation (but not the exact size it was allocated with) and hence
+ * implements the following semantics for shrinking and growing buffers with
+ * __GFP_ZERO.
+ *
+ * new bucket
+ * 0 size size
+ * |--------|----------------|
+ * | keep | zero |
+ *
+ * In any case, the contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.
+ *
* Return: pointer to the allocated memory or %NULL in case of error
*/
void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
diff --git a/mm/slub.c b/mm/slub.c
index a77f354f8325..21f71cb6cc06 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -466,12 +466,6 @@ static struct workqueue_struct *flushwq;
*******************************************************************/
/*
- * freeptr_t represents a SLUB freelist pointer, which might be encoded
- * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
- */
-typedef struct { unsigned long v; } freeptr_t;
-
-/*
* Returns freelist pointer (ptr). With hardening, this is obfuscated
* with an XOR of the address where the pointer is held and a per-cache
* random number.
@@ -756,6 +750,50 @@ static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
return false;
}
+/*
+ * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
+ * family will round up the real request size to these fixed ones, so
+ * there could be an extra area than what is requested. Save the original
+ * request size in the meta data area, for better debug and sanity check.
+ */
+static inline void set_orig_size(struct kmem_cache *s,
+ void *object, unsigned int orig_size)
+{
+ void *p = kasan_reset_tag(object);
+ unsigned int kasan_meta_size;
+
+ if (!slub_debug_orig_size(s))
+ return;
+
+ /*
+ * KASAN can save its free meta data inside of the object at offset 0.
+ * If this meta data size is larger than 'orig_size', it will overlap
+ * the data redzone in [orig_size+1, object_size]. Thus, we adjust
+ * 'orig_size' to be as at least as big as KASAN's meta data.
+ */
+ kasan_meta_size = kasan_metadata_size(s, true);
+ if (kasan_meta_size > orig_size)
+ orig_size = kasan_meta_size;
+
+ p += get_info_end(s);
+ p += sizeof(struct track) * 2;
+
+ *(unsigned int *)p = orig_size;
+}
+
+static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
+{
+ void *p = kasan_reset_tag(object);
+
+ if (!slub_debug_orig_size(s))
+ return s->object_size;
+
+ p += get_info_end(s);
+ p += sizeof(struct track) * 2;
+
+ return *(unsigned int *)p;
+}
+
#ifdef CONFIG_SLUB_DEBUG
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
static DEFINE_SPINLOCK(object_map_lock);
@@ -985,50 +1023,6 @@ static void print_slab_info(const struct slab *slab)
&slab->__page_flags);
}
-/*
- * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
- * family will round up the real request size to these fixed ones, so
- * there could be an extra area than what is requested. Save the original
- * request size in the meta data area, for better debug and sanity check.
- */
-static inline void set_orig_size(struct kmem_cache *s,
- void *object, unsigned int orig_size)
-{
- void *p = kasan_reset_tag(object);
- unsigned int kasan_meta_size;
-
- if (!slub_debug_orig_size(s))
- return;
-
- /*
- * KASAN can save its free meta data inside of the object at offset 0.
- * If this meta data size is larger than 'orig_size', it will overlap
- * the data redzone in [orig_size+1, object_size]. Thus, we adjust
- * 'orig_size' to be as at least as big as KASAN's meta data.
- */
- kasan_meta_size = kasan_metadata_size(s, true);
- if (kasan_meta_size > orig_size)
- orig_size = kasan_meta_size;
-
- p += get_info_end(s);
- p += sizeof(struct track) * 2;
-
- *(unsigned int *)p = orig_size;
-}
-
-static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
-{
- void *p = kasan_reset_tag(object);
-
- if (!slub_debug_orig_size(s))
- return s->object_size;
-
- p += get_info_end(s);
- p += sizeof(struct track) * 2;
-
- return *(unsigned int *)p;
-}
-
void skip_orig_size_check(struct kmem_cache *s, const void *object)
{
set_orig_size(s, (void *)object, s->object_size);
@@ -1894,7 +1888,6 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
int objects) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
-
#ifndef CONFIG_SLUB_TINY
static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
void **freelist, void *nextfree)
@@ -2189,6 +2182,45 @@ void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
__memcg_slab_free_hook(s, slab, p, objects, obj_exts);
}
+
+static __fastpath_inline
+bool memcg_slab_post_charge(void *p, gfp_t flags)
+{
+ struct slabobj_ext *slab_exts;
+ struct kmem_cache *s;
+ struct folio *folio;
+ struct slab *slab;
+ unsigned long off;
+
+ folio = virt_to_folio(p);
+ if (!folio_test_slab(folio)) {
+ return folio_memcg_kmem(folio) ||
+ (__memcg_kmem_charge_page(folio_page(folio, 0), flags,
+ folio_order(folio)) == 0);
+ }
+
+ slab = folio_slab(folio);
+ s = slab->slab_cache;
+
+ /*
+ * Ignore KMALLOC_NORMAL cache to avoid possible circular dependency
+ * of slab_obj_exts being allocated from the same slab and thus the slab
+ * becoming effectively unfreeable.
+ */
+ if (is_kmalloc_normal(s))
+ return true;
+
+ /* Ignore already charged objects. */
+ slab_exts = slab_obj_exts(slab);
+ if (slab_exts) {
+ off = obj_to_index(s, slab, p);
+ if (unlikely(slab_exts[off].objcg))
+ return true;
+ }
+
+ return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p);
+}
+
#else /* CONFIG_MEMCG */
static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
struct list_lru *lru,
@@ -2202,18 +2234,37 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
void **p, int objects)
{
}
+
+static inline bool memcg_slab_post_charge(void *p, gfp_t flags)
+{
+ return true;
+}
#endif /* CONFIG_MEMCG */
+#ifdef CONFIG_SLUB_RCU_DEBUG
+static void slab_free_after_rcu_debug(struct rcu_head *rcu_head);
+
+struct rcu_delayed_free {
+ struct rcu_head head;
+ void *object;
+};
+#endif
+
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
*
* Returns true if freeing of the object can proceed, false if its reuse
- * was delayed by KASAN quarantine, or it was returned to KFENCE.
+ * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
+ * to KFENCE.
*/
static __always_inline
-bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
+bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
+ bool after_rcu_delay)
{
+ /* Are the object contents still accessible? */
+ bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay;
+
kmemleak_free_recursive(x, s->flags);
kmsan_slab_free(s, x);
@@ -2223,7 +2274,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
debug_check_no_obj_freed(x, s->object_size);
/* Use KCSAN to help debug racy use-after-free. */
- if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
+ if (!still_accessible)
__kcsan_check_access(x, s->object_size,
KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
@@ -2231,6 +2282,35 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
return false;
/*
+ * Give KASAN a chance to notice an invalid free operation before we
+ * modify the object.
+ */
+ if (kasan_slab_pre_free(s, x))
+ return false;
+
+#ifdef CONFIG_SLUB_RCU_DEBUG
+ if (still_accessible) {
+ struct rcu_delayed_free *delayed_free;
+
+ delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT);
+ if (delayed_free) {
+ /*
+ * Let KASAN track our call stack as a "related work
+ * creation", just like if the object had been freed
+ * normally via kfree_rcu().
+ * We have to do this manually because the rcu_head is
+ * not located inside the object.
+ */
+ kasan_record_aux_stack_noalloc(x);
+
+ delayed_free->object = x;
+ call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
+ return false;
+ }
+ }
+#endif /* CONFIG_SLUB_RCU_DEBUG */
+
+ /*
* As memory initialization might be integrated into KASAN,
* kasan_slab_free and initialization memset's must be
* kept together to avoid discrepancies in behavior.
@@ -2243,17 +2323,24 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
*/
if (unlikely(init)) {
int rsize;
- unsigned int inuse;
+ unsigned int inuse, orig_size;
inuse = get_info_end(s);
+ orig_size = get_orig_size(s, x);
if (!kasan_has_integrated_init())
- memset(kasan_reset_tag(x), 0, s->object_size);
+ memset(kasan_reset_tag(x), 0, orig_size);
rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
memset((char *)kasan_reset_tag(x) + inuse, 0,
s->size - inuse - rsize);
+ /*
+ * Restore orig_size, otherwize kmalloc redzone overwritten
+ * would be reported
+ */
+ set_orig_size(s, x, orig_size);
+
}
/* KASAN might put x into memory quarantine, delaying its reuse. */
- return !kasan_slab_free(s, x, init);
+ return !kasan_slab_free(s, x, init, still_accessible);
}
static __fastpath_inline
@@ -2267,7 +2354,7 @@ bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
bool init;
if (is_kfence_address(next)) {
- slab_free_hook(s, next, false);
+ slab_free_hook(s, next, false, false);
return false;
}
@@ -2282,7 +2369,7 @@ bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
next = get_freepointer(s, object);
/* If object's reuse doesn't have to be delayed */
- if (likely(slab_free_hook(s, object, init))) {
+ if (likely(slab_free_hook(s, object, init, false))) {
/* Move object to the new freelist */
set_freepointer(s, object, *head);
*head = object;
@@ -2322,7 +2409,11 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
struct slab *slab;
unsigned int order = oo_order(oo);
- folio = (struct folio *)alloc_pages_node(node, flags, order);
+ if (node == NUMA_NO_NODE)
+ folio = (struct folio *)alloc_pages(flags, order);
+ else
+ folio = (struct folio *)__alloc_pages_node(node, flags, order);
+
if (!folio)
return NULL;
@@ -3420,14 +3511,15 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
{
static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
+ int cpu = raw_smp_processor_id();
int node;
struct kmem_cache_node *n;
if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
return;
- pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
- nid, gfpflags, &gfpflags);
+ pr_warn("SLUB: Unable to allocate memory on CPU %u (of node %d) on node %d, gfp=%#x(%pGg)\n",
+ cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags);
pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
s->name, s->object_size, s->size, oo_order(s->oo),
oo_order(s->min));
@@ -3925,6 +4017,8 @@ static void *__slab_alloc_node(struct kmem_cache *s,
/*
* If the object has been wiped upon free, make sure it's fully initialized by
* zeroing out freelist pointer.
+ *
+ * Note that we also wipe custom freelist pointers.
*/
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
void *obj)
@@ -4066,6 +4160,15 @@ void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
}
EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof);
+bool kmem_cache_charge(void *objp, gfp_t gfpflags)
+{
+ if (!memcg_kmem_online())
+ return true;
+
+ return memcg_slab_post_charge(objp, gfpflags);
+}
+EXPORT_SYMBOL(kmem_cache_charge);
+
/**
* kmem_cache_alloc_node - Allocate an object on the specified node
* @s: The cache to allocate from.
@@ -4474,7 +4577,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
memcg_slab_free_hook(s, slab, &object, 1);
alloc_tagging_slab_free_hook(s, slab, &object, 1);
- if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
+ if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
do_slab_free(s, slab, object, object, 1, addr);
}
@@ -4483,7 +4586,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
static noinline
void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
{
- if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
+ if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_);
}
#endif
@@ -4502,6 +4605,33 @@ void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
do_slab_free(s, slab, head, tail, cnt, addr);
}
+#ifdef CONFIG_SLUB_RCU_DEBUG
+static void slab_free_after_rcu_debug(struct rcu_head *rcu_head)
+{
+ struct rcu_delayed_free *delayed_free =
+ container_of(rcu_head, struct rcu_delayed_free, head);
+ void *object = delayed_free->object;
+ struct slab *slab = virt_to_slab(object);
+ struct kmem_cache *s;
+
+ kfree(delayed_free);
+
+ if (WARN_ON(is_kfence_address(object)))
+ return;
+
+ /* find the object and the cache again */
+ if (WARN_ON(!slab))
+ return;
+ s = slab->slab_cache;
+ if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU)))
+ return;
+
+ /* resume freeing */
+ if (slab_free_hook(s, object, slab_want_init_on_free(s), true))
+ do_slab_free(s, slab, object, object, 1, _THIS_IP_);
+}
+#endif /* CONFIG_SLUB_RCU_DEBUG */
+
#ifdef CONFIG_KASAN_GENERIC
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
{
@@ -5152,7 +5282,7 @@ static void set_cpu_partial(struct kmem_cache *s)
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
*/
-static int calculate_sizes(struct kmem_cache *s)
+static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
{
slab_flags_t flags = s->flags;
unsigned int size = s->object_size;
@@ -5193,7 +5323,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->inuse = size;
- if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor ||
+ if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
+ (flags & SLAB_POISON) || s->ctor ||
((flags & SLAB_RED_ZONE) &&
(s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
/*
@@ -5214,6 +5345,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->offset = size;
size += sizeof(void *);
+ } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) {
+ s->offset = args->freeptr_offset;
} else {
/*
* Store freelist pointer near middle of object to keep
@@ -5288,65 +5421,6 @@ static int calculate_sizes(struct kmem_cache *s)
return !!oo_objects(s->oo);
}
-static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
-{
- s->flags = kmem_cache_flags(flags, s->name);
-#ifdef CONFIG_SLAB_FREELIST_HARDENED
- s->random = get_random_long();
-#endif
-
- if (!calculate_sizes(s))
- goto error;
- if (disable_higher_order_debug) {
- /*
- * Disable debugging flags that store metadata if the min slab
- * order increased.
- */
- if (get_order(s->size) > get_order(s->object_size)) {
- s->flags &= ~DEBUG_METADATA_FLAGS;
- s->offset = 0;
- if (!calculate_sizes(s))
- goto error;
- }
- }
-
-#ifdef system_has_freelist_aba
- if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
- /* Enable fast mode */
- s->flags |= __CMPXCHG_DOUBLE;
- }
-#endif
-
- /*
- * The larger the object size is, the more slabs we want on the partial
- * list to avoid pounding the page allocator excessively.
- */
- s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
- s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
-
- set_cpu_partial(s);
-
-#ifdef CONFIG_NUMA
- s->remote_node_defrag_ratio = 1000;
-#endif
-
- /* Initialize the pre-computed randomized freelist if slab is up */
- if (slab_state >= UP) {
- if (init_cache_random_seq(s))
- goto error;
- }
-
- if (!init_kmem_cache_nodes(s))
- goto error;
-
- if (alloc_kmem_cache_cpus(s))
- return 0;
-
-error:
- __kmem_cache_release(s);
- return -EINVAL;
-}
-
static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
const char *text)
{
@@ -5900,28 +5974,90 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
return s;
}
-int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
+int do_kmem_cache_create(struct kmem_cache *s, const char *name,
+ unsigned int size, struct kmem_cache_args *args,
+ slab_flags_t flags)
{
- int err;
+ int err = -EINVAL;
- err = kmem_cache_open(s, flags);
- if (err)
- return err;
+ s->name = name;
+ s->size = s->object_size = size;
+
+ s->flags = kmem_cache_flags(flags, s->name);
+#ifdef CONFIG_SLAB_FREELIST_HARDENED
+ s->random = get_random_long();
+#endif
+ s->align = args->align;
+ s->ctor = args->ctor;
+#ifdef CONFIG_HARDENED_USERCOPY
+ s->useroffset = args->useroffset;
+ s->usersize = args->usersize;
+#endif
+
+ if (!calculate_sizes(args, s))
+ goto out;
+ if (disable_higher_order_debug) {
+ /*
+ * Disable debugging flags that store metadata if the min slab
+ * order increased.
+ */
+ if (get_order(s->size) > get_order(s->object_size)) {
+ s->flags &= ~DEBUG_METADATA_FLAGS;
+ s->offset = 0;
+ if (!calculate_sizes(args, s))
+ goto out;
+ }
+ }
+
+#ifdef system_has_freelist_aba
+ if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
+ /* Enable fast mode */
+ s->flags |= __CMPXCHG_DOUBLE;
+ }
+#endif
+
+ /*
+ * The larger the object size is, the more slabs we want on the partial
+ * list to avoid pounding the page allocator excessively.
+ */
+ s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
+ s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
+
+ set_cpu_partial(s);
+
+#ifdef CONFIG_NUMA
+ s->remote_node_defrag_ratio = 1000;
+#endif
+
+ /* Initialize the pre-computed randomized freelist if slab is up */
+ if (slab_state >= UP) {
+ if (init_cache_random_seq(s))
+ goto out;
+ }
+
+ if (!init_kmem_cache_nodes(s))
+ goto out;
+
+ if (!alloc_kmem_cache_cpus(s))
+ goto out;
/* Mutex is not taken during early boot */
- if (slab_state <= UP)
- return 0;
+ if (slab_state <= UP) {
+ err = 0;
+ goto out;
+ }
err = sysfs_slab_add(s);
- if (err) {
- __kmem_cache_release(s);
- return err;
- }
+ if (err)
+ goto out;
if (s->flags & SLAB_STORE_USER)
debugfs_slab_add(s);
- return 0;
+out:
+ if (err)
+ __kmem_cache_release(s);
+ return err;
}
#ifdef SLAB_SUPPORTS_SYSFS
diff --git a/mm/swap.c b/mm/swap.c
index 9caf6b017cf0..835bdf324b76 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -47,31 +47,27 @@
int page_cluster;
const int page_cluster_max = 31;
-/* Protecting only lru_rotate.fbatch which requires disabling interrupts */
-struct lru_rotate {
- local_lock_t lock;
- struct folio_batch fbatch;
-};
-static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
- .lock = INIT_LOCAL_LOCK(lock),
-};
-
-/*
- * The following folio batches are grouped together because they are protected
- * by disabling preemption (and interrupts remain enabled).
- */
struct cpu_fbatches {
+ /*
+ * The following folio batches are grouped together because they are protected
+ * by disabling preemption (and interrupts remain enabled).
+ */
local_lock_t lock;
struct folio_batch lru_add;
struct folio_batch lru_deactivate_file;
struct folio_batch lru_deactivate;
struct folio_batch lru_lazyfree;
#ifdef CONFIG_SMP
- struct folio_batch activate;
+ struct folio_batch lru_activate;
#endif
+ /* Protecting the following batches which require disabling interrupts */
+ local_lock_t lock_irq;
+ struct folio_batch lru_move_tail;
};
+
static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
.lock = INIT_LOCAL_LOCK(lock),
+ .lock_irq = INIT_LOCAL_LOCK(lock_irq),
};
static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
@@ -117,7 +113,9 @@ void __folio_put(struct folio *folio)
if (unlikely(folio_is_zone_device(folio))) {
free_zone_device_folio(folio);
return;
- } else if (folio_test_hugetlb(folio)) {
+ }
+
+ if (folio_test_hugetlb(folio)) {
free_huge_folio(folio);
return;
}
@@ -162,7 +160,7 @@ EXPORT_SYMBOL(put_pages_list);
typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
-static void lru_add_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_add(struct lruvec *lruvec, struct folio *folio)
{
int was_unevictable = folio_test_clear_unevictable(folio);
long nr_pages = folio_nr_pages(folio);
@@ -222,23 +220,50 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
folios_put(fbatch);
}
-static void folio_batch_add_and_move(struct folio_batch *fbatch,
- struct folio *folio, move_fn_t move_fn)
+static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
+ struct folio *folio, move_fn_t move_fn,
+ bool on_lru, bool disable_irq)
{
- if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) &&
- !lru_cache_disabled())
+ unsigned long flags;
+
+ if (on_lru && !folio_test_clear_lru(folio))
return;
- folio_batch_move_lru(fbatch, move_fn);
+
+ folio_get(folio);
+
+ if (disable_irq)
+ local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
+ else
+ local_lock(&cpu_fbatches.lock);
+
+ if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) ||
+ lru_cache_disabled())
+ folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
+
+ if (disable_irq)
+ local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
+ else
+ local_unlock(&cpu_fbatches.lock);
}
-static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
+#define folio_batch_add_and_move(folio, op, on_lru) \
+ __folio_batch_add_and_move( \
+ &cpu_fbatches.op, \
+ folio, \
+ op, \
+ on_lru, \
+ offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \
+ )
+
+static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
{
- if (!folio_test_unevictable(folio)) {
- lruvec_del_folio(lruvec, folio);
- folio_clear_active(folio);
- lruvec_add_folio_tail(lruvec, folio);
- __count_vm_events(PGROTATED, folio_nr_pages(folio));
- }
+ if (folio_test_unevictable(folio))
+ return;
+
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_active(folio);
+ lruvec_add_folio_tail(lruvec, folio);
+ __count_vm_events(PGROTATED, folio_nr_pages(folio));
}
/*
@@ -250,22 +275,11 @@ static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
*/
void folio_rotate_reclaimable(struct folio *folio)
{
- if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
- !folio_test_unevictable(folio)) {
- struct folio_batch *fbatch;
- unsigned long flags;
-
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
+ if (folio_test_locked(folio) || folio_test_dirty(folio) ||
+ folio_test_unevictable(folio))
+ return;
- local_lock_irqsave(&lru_rotate.lock, flags);
- fbatch = this_cpu_ptr(&lru_rotate.fbatch);
- folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn);
- local_unlock_irqrestore(&lru_rotate.lock, flags);
- }
+ folio_batch_add_and_move(folio, lru_move_tail, true);
}
void lru_note_cost(struct lruvec *lruvec, bool file,
@@ -326,47 +340,38 @@ void lru_note_cost_refault(struct folio *folio)
folio_nr_pages(folio), 0);
}
-static void folio_activate_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_activate(struct lruvec *lruvec, struct folio *folio)
{
- if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
- long nr_pages = folio_nr_pages(folio);
+ long nr_pages = folio_nr_pages(folio);
- lruvec_del_folio(lruvec, folio);
- folio_set_active(folio);
- lruvec_add_folio(lruvec, folio);
- trace_mm_lru_activate(folio);
+ if (folio_test_active(folio) || folio_test_unevictable(folio))
+ return;
- __count_vm_events(PGACTIVATE, nr_pages);
- __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
- nr_pages);
- }
+
+ lruvec_del_folio(lruvec, folio);
+ folio_set_active(folio);
+ lruvec_add_folio(lruvec, folio);
+ trace_mm_lru_activate(folio);
+
+ __count_vm_events(PGACTIVATE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
}
#ifdef CONFIG_SMP
static void folio_activate_drain(int cpu)
{
- struct folio_batch *fbatch = &per_cpu(cpu_fbatches.activate, cpu);
+ struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu);
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, folio_activate_fn);
+ folio_batch_move_lru(fbatch, lru_activate);
}
void folio_activate(struct folio *folio)
{
- if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
- struct folio_batch *fbatch;
-
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
+ if (folio_test_active(folio) || folio_test_unevictable(folio))
+ return;
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.activate);
- folio_batch_add_and_move(fbatch, folio, folio_activate_fn);
- local_unlock(&cpu_fbatches.lock);
- }
+ folio_batch_add_and_move(folio, lru_activate, true);
}
#else
@@ -378,12 +383,13 @@ void folio_activate(struct folio *folio)
{
struct lruvec *lruvec;
- if (folio_test_clear_lru(folio)) {
- lruvec = folio_lruvec_lock_irq(folio);
- folio_activate_fn(lruvec, folio);
- unlock_page_lruvec_irq(lruvec);
- folio_set_lru(folio);
- }
+ if (!folio_test_clear_lru(folio))
+ return;
+
+ lruvec = folio_lruvec_lock_irq(folio);
+ lru_activate(lruvec, folio);
+ unlock_page_lruvec_irq(lruvec);
+ folio_set_lru(folio);
}
#endif
@@ -482,7 +488,7 @@ void folio_mark_accessed(struct folio *folio)
} else if (!folio_test_active(folio)) {
/*
* If the folio is on the LRU, queue it for activation via
- * cpu_fbatches.activate. Otherwise, assume the folio is in a
+ * cpu_fbatches.lru_activate. Otherwise, assume the folio is in a
* folio_batch, mark it active and it'll be moved to the active
* LRU on the next drain.
*/
@@ -509,8 +515,6 @@ EXPORT_SYMBOL(folio_mark_accessed);
*/
void folio_add_lru(struct folio *folio)
{
- struct folio_batch *fbatch;
-
VM_BUG_ON_FOLIO(folio_test_active(folio) &&
folio_test_unevictable(folio), folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
@@ -520,11 +524,7 @@ void folio_add_lru(struct folio *folio)
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
folio_set_active(folio);
- folio_get(folio);
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
- folio_batch_add_and_move(fbatch, folio, lru_add_fn);
- local_unlock(&cpu_fbatches.lock);
+ folio_batch_add_and_move(folio, lru_add, false);
}
EXPORT_SYMBOL(folio_add_lru);
@@ -567,7 +567,7 @@ void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
* written out by flusher threads as this is much more efficient
* than the single-page writeout from reclaim.
*/
-static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio)
{
bool active = folio_test_active(folio);
long nr_pages = folio_nr_pages(folio);
@@ -608,43 +608,43 @@ static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
}
}
-static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_deactivate(struct lruvec *lruvec, struct folio *folio)
{
- if (!folio_test_unevictable(folio) && (folio_test_active(folio) || lru_gen_enabled())) {
- long nr_pages = folio_nr_pages(folio);
+ long nr_pages = folio_nr_pages(folio);
- lruvec_del_folio(lruvec, folio);
- folio_clear_active(folio);
- folio_clear_referenced(folio);
- lruvec_add_folio(lruvec, folio);
+ if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
+ return;
- __count_vm_events(PGDEACTIVATE, nr_pages);
- __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
- nr_pages);
- }
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_active(folio);
+ folio_clear_referenced(folio);
+ lruvec_add_folio(lruvec, folio);
+
+ __count_vm_events(PGDEACTIVATE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
}
-static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
{
- if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
- !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) {
- long nr_pages = folio_nr_pages(folio);
+ long nr_pages = folio_nr_pages(folio);
- lruvec_del_folio(lruvec, folio);
- folio_clear_active(folio);
- folio_clear_referenced(folio);
- /*
- * Lazyfree folios are clean anonymous folios. They have
- * the swapbacked flag cleared, to distinguish them from normal
- * anonymous folios
- */
- folio_clear_swapbacked(folio);
- lruvec_add_folio(lruvec, folio);
+ if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
+ folio_test_swapcache(folio) || folio_test_unevictable(folio))
+ return;
- __count_vm_events(PGLAZYFREE, nr_pages);
- __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
- nr_pages);
- }
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_active(folio);
+ folio_clear_referenced(folio);
+ /*
+ * Lazyfree folios are clean anonymous folios. They have
+ * the swapbacked flag cleared, to distinguish them from normal
+ * anonymous folios
+ */
+ folio_clear_swapbacked(folio);
+ lruvec_add_folio(lruvec, folio);
+
+ __count_vm_events(PGLAZYFREE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
}
/*
@@ -658,30 +658,30 @@ void lru_add_drain_cpu(int cpu)
struct folio_batch *fbatch = &fbatches->lru_add;
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, lru_add_fn);
+ folio_batch_move_lru(fbatch, lru_add);
- fbatch = &per_cpu(lru_rotate.fbatch, cpu);
+ fbatch = &fbatches->lru_move_tail;
/* Disabling interrupts below acts as a compiler barrier. */
if (data_race(folio_batch_count(fbatch))) {
unsigned long flags;
/* No harm done if a racing interrupt already did this */
- local_lock_irqsave(&lru_rotate.lock, flags);
- folio_batch_move_lru(fbatch, lru_move_tail_fn);
- local_unlock_irqrestore(&lru_rotate.lock, flags);
+ local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
+ folio_batch_move_lru(fbatch, lru_move_tail);
+ local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
}
fbatch = &fbatches->lru_deactivate_file;
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, lru_deactivate_file_fn);
+ folio_batch_move_lru(fbatch, lru_deactivate_file);
fbatch = &fbatches->lru_deactivate;
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, lru_deactivate_fn);
+ folio_batch_move_lru(fbatch, lru_deactivate);
fbatch = &fbatches->lru_lazyfree;
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, lru_lazyfree_fn);
+ folio_batch_move_lru(fbatch, lru_lazyfree);
folio_activate_drain(cpu);
}
@@ -698,22 +698,11 @@ void lru_add_drain_cpu(int cpu)
*/
void deactivate_file_folio(struct folio *folio)
{
- struct folio_batch *fbatch;
-
/* Deactivating an unevictable folio will not accelerate reclaim */
if (folio_test_unevictable(folio))
return;
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
-
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file);
- folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn);
- local_unlock(&cpu_fbatches.lock);
+ folio_batch_add_and_move(folio, lru_deactivate_file, true);
}
/*
@@ -726,21 +715,10 @@ void deactivate_file_folio(struct folio *folio)
*/
void folio_deactivate(struct folio *folio)
{
- if (!folio_test_unevictable(folio) && (folio_test_active(folio) ||
- lru_gen_enabled())) {
- struct folio_batch *fbatch;
-
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
+ if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
+ return;
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate);
- folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn);
- local_unlock(&cpu_fbatches.lock);
- }
+ folio_batch_add_and_move(folio, lru_deactivate, true);
}
/**
@@ -752,21 +730,11 @@ void folio_deactivate(struct folio *folio)
*/
void folio_mark_lazyfree(struct folio *folio)
{
- if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
- !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) {
- struct folio_batch *fbatch;
-
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
+ if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
+ folio_test_swapcache(folio) || folio_test_unevictable(folio))
+ return;
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree);
- folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn);
- local_unlock(&cpu_fbatches.lock);
- }
+ folio_batch_add_and_move(folio, lru_lazyfree, true);
}
void lru_add_drain(void)
@@ -816,11 +784,11 @@ static bool cpu_needs_drain(unsigned int cpu)
/* Check these in order of likelihood that they're not zero */
return folio_batch_count(&fbatches->lru_add) ||
- data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
+ folio_batch_count(&fbatches->lru_move_tail) ||
folio_batch_count(&fbatches->lru_deactivate_file) ||
folio_batch_count(&fbatches->lru_deactivate) ||
folio_batch_count(&fbatches->lru_lazyfree) ||
- folio_batch_count(&fbatches->activate) ||
+ folio_batch_count(&fbatches->lru_activate) ||
need_mlock_drain(cpu) ||
has_bh_in_lru(cpu, NULL);
}
@@ -938,8 +906,8 @@ atomic_t lru_disable_count = ATOMIC_INIT(0);
/*
* lru_cache_disable() needs to be called before we start compiling
- * a list of pages to be migrated using isolate_lru_page().
- * It drains pages on LRU cache and then disable on all cpus until
+ * a list of folios to be migrated using folio_isolate_lru().
+ * It drains folios on LRU cache and then disable on all cpus until
* lru_cache_enable is called.
*
* Must be paired with a call to lru_cache_enable().
diff --git a/mm/swap.h b/mm/swap.h
index baa1fa946b34..ad2f121de970 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -59,7 +59,7 @@ void __delete_from_swap_cache(struct folio *folio,
void delete_from_swap_cache(struct folio *folio);
void clear_shadow_from_swap_cache(int type, unsigned long begin,
unsigned long end);
-void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry);
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr);
struct folio *swap_cache_get_folio(swp_entry_t entry,
struct vm_area_struct *vma, unsigned long addr);
struct folio *filemap_get_incore_folio(struct address_space *mapping,
@@ -73,13 +73,39 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
bool skip_if_exists);
struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct mempolicy *mpol, pgoff_t ilx);
-struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
- struct vm_fault *vmf);
+struct folio *swapin_readahead(swp_entry_t entry, gfp_t flag,
+ struct vm_fault *vmf);
static inline unsigned int folio_swap_flags(struct folio *folio)
{
return swp_swap_info(folio->swap)->flags;
}
+
+/*
+ * Return the count of contiguous swap entries that share the same
+ * zeromap status as the starting entry. If is_zeromap is not NULL,
+ * it will return the zeromap status of the starting entry.
+ */
+static inline int swap_zeromap_batch(swp_entry_t entry, int max_nr,
+ bool *is_zeromap)
+{
+ struct swap_info_struct *sis = swp_swap_info(entry);
+ unsigned long start = swp_offset(entry);
+ unsigned long end = start + max_nr;
+ bool first_bit;
+
+ first_bit = test_bit(start, sis->zeromap);
+ if (is_zeromap)
+ *is_zeromap = first_bit;
+
+ if (max_nr <= 1)
+ return max_nr;
+ if (first_bit)
+ return find_next_zero_bit(sis->zeromap, end, start) - start;
+ else
+ return find_next_bit(sis->zeromap, end, start) - start;
+}
+
#else /* CONFIG_SWAP */
struct swap_iocb;
static inline void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
@@ -109,7 +135,7 @@ static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
return NULL;
}
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
+static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
struct vm_fault *vmf)
{
return NULL;
@@ -120,7 +146,7 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
return 0;
}
-static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
+static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
{
}
@@ -171,5 +197,13 @@ static inline unsigned int folio_swap_flags(struct folio *folio)
{
return 0;
}
+
+static inline int swap_zeromap_batch(swp_entry_t entry, int max_nr,
+ bool *has_zeromap)
+{
+ return 0;
+}
+
#endif /* CONFIG_SWAP */
+
#endif /* _MM_SWAP_H */
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index db6c4a26cf59..da1278f0563b 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -161,6 +161,8 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
*/
unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
{
+ if (mem_cgroup_disabled())
+ return 0;
return lookup_swap_cgroup(ent, NULL)->id;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index a1726e49a5eb..4669f29cf555 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -435,6 +435,8 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
{
struct swap_info_struct *si;
struct folio *folio;
+ struct folio *new_folio = NULL;
+ struct folio *result = NULL;
void *shadow = NULL;
*new_page_allocated = false;
@@ -463,27 +465,28 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* else swap_off will be aborted if we return NULL.
*/
if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
- goto fail_put_swap;
+ goto put_and_return;
/*
- * Get a new folio to read into from swap. Allocate it now,
- * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
- * cause any racers to loop around until we add it to cache.
+ * Get a new folio to read into from swap. Allocate it now if
+ * new_folio not exist, before marking swap_map SWAP_HAS_CACHE,
+ * when -EEXIST will cause any racers to loop around until we
+ * add it to cache.
*/
- folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
- if (!folio)
- goto fail_put_swap;
+ if (!new_folio) {
+ new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
+ if (!new_folio)
+ goto put_and_return;
+ }
/*
* Swap entry may have been freed since our caller observed it.
*/
- err = swapcache_prepare(entry);
+ err = swapcache_prepare(entry, 1);
if (!err)
break;
-
- folio_put(folio);
- if (err != -EEXIST)
- goto fail_put_swap;
+ else if (err != -EEXIST)
+ goto put_and_return;
/*
* Protect against a recursive call to __read_swap_cache_async()
@@ -494,7 +497,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* __read_swap_cache_async() in the writeback path.
*/
if (skip_if_exists)
- goto fail_put_swap;
+ goto put_and_return;
/*
* We might race against __delete_from_swap_cache(), and
@@ -509,36 +512,37 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/*
* The swap entry is ours to swap in. Prepare the new folio.
*/
+ __folio_set_locked(new_folio);
+ __folio_set_swapbacked(new_folio);
- __folio_set_locked(folio);
- __folio_set_swapbacked(folio);
-
- if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
+ if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry))
goto fail_unlock;
/* May fail (-ENOMEM) if XArray node allocation failed. */
- if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
+ if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
goto fail_unlock;
- mem_cgroup_swapin_uncharge_swap(entry);
+ mem_cgroup_swapin_uncharge_swap(entry, 1);
if (shadow)
- workingset_refault(folio, shadow);
+ workingset_refault(new_folio, shadow);
- /* Caller will initiate read into locked folio */
- folio_add_lru(folio);
+ /* Caller will initiate read into locked new_folio */
+ folio_add_lru(new_folio);
*new_page_allocated = true;
+ folio = new_folio;
got_folio:
- put_swap_device(si);
- return folio;
+ result = folio;
+ goto put_and_return;
fail_unlock:
- put_swap_folio(folio, entry);
- folio_unlock(folio);
- folio_put(folio);
-fail_put_swap:
+ put_swap_folio(new_folio, entry);
+ folio_unlock(new_folio);
+put_and_return:
put_swap_device(si);
- return NULL;
+ if (!(*new_page_allocated) && new_folio)
+ folio_put(new_folio);
+ return result;
}
/*
@@ -698,10 +702,8 @@ skip:
/* The page was likely read above, so no need for plugging here */
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
- if (unlikely(page_allocated)) {
- zswap_folio_swapin(folio);
+ if (unlikely(page_allocated))
swap_read_folio(folio, NULL);
- }
return folio;
}
@@ -850,10 +852,8 @@ skip:
/* The folio was likely read above, so no need for plugging here */
folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
&page_allocated, false);
- if (unlikely(page_allocated)) {
- zswap_folio_swapin(folio);
+ if (unlikely(page_allocated))
swap_read_folio(folio, NULL);
- }
return folio;
}
@@ -863,13 +863,13 @@ skip:
* @gfp_mask: memory allocation flags
* @vmf: fault information
*
- * Returns the struct page for entry and addr, after queueing swapin.
+ * Returns the struct folio for entry and addr, after queueing swapin.
*
* It's a main entry function for swap readahead. By the configuration,
* it will read ahead blocks by cluster-based(ie, physical disk based)
* or vma-based(ie, virtual address based on faulty address) readahead.
*/
-struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
+struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
struct mempolicy *mpol;
@@ -882,9 +882,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
mpol_cond_put(mpol);
- if (!folio)
- return NULL;
- return folio_file_page(folio, swp_offset(entry));
+ return folio;
}
#ifdef CONFIG_SYSFS
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 38bdc439651a..0cded32414a1 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -53,6 +53,15 @@
static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
unsigned char);
static void free_swap_count_continuations(struct swap_info_struct *);
+static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry,
+ unsigned int nr_pages);
+static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
+ unsigned int nr_entries);
+static bool folio_swapcache_freeable(struct folio *folio);
+static struct swap_cluster_info *lock_cluster_or_swap_info(
+ struct swap_info_struct *si, unsigned long offset);
+static void unlock_cluster_or_swap_info(struct swap_info_struct *si,
+ struct swap_cluster_info *ci);
static DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
@@ -127,8 +136,44 @@ static inline unsigned char swap_count(unsigned char ent)
* corresponding page
*/
#define TTRS_UNMAPPED 0x2
-/* Reclaim the swap entry if swap is getting full*/
+/* Reclaim the swap entry if swap is getting full */
#define TTRS_FULL 0x4
+/* Reclaim directly, bypass the slot cache and don't touch device lock */
+#define TTRS_DIRECT 0x8
+
+static bool swap_is_has_cache(struct swap_info_struct *si,
+ unsigned long offset, int nr_pages)
+{
+ unsigned char *map = si->swap_map + offset;
+ unsigned char *map_end = map + nr_pages;
+
+ do {
+ VM_BUG_ON(!(*map & SWAP_HAS_CACHE));
+ if (*map != SWAP_HAS_CACHE)
+ return false;
+ } while (++map < map_end);
+
+ return true;
+}
+
+static bool swap_is_last_map(struct swap_info_struct *si,
+ unsigned long offset, int nr_pages, bool *has_cache)
+{
+ unsigned char *map = si->swap_map + offset;
+ unsigned char *map_end = map + nr_pages;
+ unsigned char count = *map;
+
+ if (swap_count(count) != 1)
+ return false;
+
+ while (++map < map_end) {
+ if (*map != count)
+ return false;
+ }
+
+ *has_cache = !!(count & SWAP_HAS_CACHE);
+ return true;
+}
/*
* returns number of pages in the folio that backs the swap entry. If positive,
@@ -139,12 +184,22 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
unsigned long offset, unsigned long flags)
{
swp_entry_t entry = swp_entry(si->type, offset);
+ struct address_space *address_space = swap_address_space(entry);
+ struct swap_cluster_info *ci;
struct folio *folio;
- int ret = 0;
+ int ret, nr_pages;
+ bool need_reclaim;
- folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
+ folio = filemap_get_folio(address_space, swap_cache_index(entry));
if (IS_ERR(folio))
return 0;
+
+ /* offset could point to the middle of a large folio */
+ entry = folio->swap;
+ offset = swp_offset(entry);
+ nr_pages = folio_nr_pages(folio);
+ ret = -nr_pages;
+
/*
* When this function is called from scan_swap_map_slots() and it's
* called by vmscan.c at reclaiming folios. So we hold a folio lock
@@ -152,14 +207,50 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
* case and you should use folio_free_swap() with explicit folio_lock()
* in usual operations.
*/
- if (folio_trylock(folio)) {
- if ((flags & TTRS_ANYWAY) ||
- ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
- ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)))
- ret = folio_free_swap(folio);
- folio_unlock(folio);
+ if (!folio_trylock(folio))
+ goto out;
+
+ need_reclaim = ((flags & TTRS_ANYWAY) ||
+ ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
+ ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)));
+ if (!need_reclaim || !folio_swapcache_freeable(folio))
+ goto out_unlock;
+
+ /*
+ * It's safe to delete the folio from swap cache only if the folio's
+ * swap_map is HAS_CACHE only, which means the slots have no page table
+ * reference or pending writeback, and can't be allocated to others.
+ */
+ ci = lock_cluster_or_swap_info(si, offset);
+ need_reclaim = swap_is_has_cache(si, offset, nr_pages);
+ unlock_cluster_or_swap_info(si, ci);
+ if (!need_reclaim)
+ goto out_unlock;
+
+ if (!(flags & TTRS_DIRECT)) {
+ /* Free through slot cache */
+ delete_from_swap_cache(folio);
+ folio_set_dirty(folio);
+ ret = nr_pages;
+ goto out_unlock;
}
- ret = ret ? folio_nr_pages(folio) : -folio_nr_pages(folio);
+
+ xa_lock_irq(&address_space->i_pages);
+ __delete_from_swap_cache(folio, entry, NULL);
+ xa_unlock_irq(&address_space->i_pages);
+ folio_ref_sub(folio, nr_pages);
+ folio_set_dirty(folio);
+
+ spin_lock(&si->lock);
+ /* Only sinple page folio can be backed by zswap */
+ if (nr_pages == 1)
+ zswap_invalidate(entry);
+ swap_entry_range_free(si, entry, nr_pages);
+ spin_unlock(&si->lock);
+ ret = nr_pages;
+out_unlock:
+ folio_unlock(folio);
+out:
folio_put(folio);
return ret;
}
@@ -290,62 +381,21 @@ static void discard_swap_cluster(struct swap_info_struct *si,
#endif
#define LATENCY_LIMIT 256
-static inline void cluster_set_flag(struct swap_cluster_info *info,
- unsigned int flag)
-{
- info->flags = flag;
-}
-
-static inline unsigned int cluster_count(struct swap_cluster_info *info)
-{
- return info->data;
-}
-
-static inline void cluster_set_count(struct swap_cluster_info *info,
- unsigned int c)
-{
- info->data = c;
-}
-
-static inline void cluster_set_count_flag(struct swap_cluster_info *info,
- unsigned int c, unsigned int f)
-{
- info->flags = f;
- info->data = c;
-}
-
-static inline unsigned int cluster_next(struct swap_cluster_info *info)
-{
- return info->data;
-}
-
-static inline void cluster_set_next(struct swap_cluster_info *info,
- unsigned int n)
-{
- info->data = n;
-}
-
-static inline void cluster_set_next_flag(struct swap_cluster_info *info,
- unsigned int n, unsigned int f)
-{
- info->flags = f;
- info->data = n;
-}
-
static inline bool cluster_is_free(struct swap_cluster_info *info)
{
return info->flags & CLUSTER_FLAG_FREE;
}
-static inline bool cluster_is_null(struct swap_cluster_info *info)
+static inline unsigned int cluster_index(struct swap_info_struct *si,
+ struct swap_cluster_info *ci)
{
- return info->flags & CLUSTER_FLAG_NEXT_NULL;
+ return ci - si->cluster_info;
}
-static inline void cluster_set_null(struct swap_cluster_info *info)
+static inline unsigned int cluster_offset(struct swap_info_struct *si,
+ struct swap_cluster_info *ci)
{
- info->flags = CLUSTER_FLAG_NEXT_NULL;
- info->data = 0;
+ return cluster_index(si, ci) * SWAPFILE_CLUSTER;
}
static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
@@ -394,65 +444,11 @@ static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
spin_unlock(&si->lock);
}
-static inline bool cluster_list_empty(struct swap_cluster_list *list)
-{
- return cluster_is_null(&list->head);
-}
-
-static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
-{
- return cluster_next(&list->head);
-}
-
-static void cluster_list_init(struct swap_cluster_list *list)
-{
- cluster_set_null(&list->head);
- cluster_set_null(&list->tail);
-}
-
-static void cluster_list_add_tail(struct swap_cluster_list *list,
- struct swap_cluster_info *ci,
- unsigned int idx)
-{
- if (cluster_list_empty(list)) {
- cluster_set_next_flag(&list->head, idx, 0);
- cluster_set_next_flag(&list->tail, idx, 0);
- } else {
- struct swap_cluster_info *ci_tail;
- unsigned int tail = cluster_next(&list->tail);
-
- /*
- * Nested cluster lock, but both cluster locks are
- * only acquired when we held swap_info_struct->lock
- */
- ci_tail = ci + tail;
- spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
- cluster_set_next(ci_tail, idx);
- spin_unlock(&ci_tail->lock);
- cluster_set_next_flag(&list->tail, idx, 0);
- }
-}
-
-static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
- struct swap_cluster_info *ci)
-{
- unsigned int idx;
-
- idx = cluster_next(&list->head);
- if (cluster_next(&list->tail) == idx) {
- cluster_set_null(&list->head);
- cluster_set_null(&list->tail);
- } else
- cluster_set_next_flag(&list->head,
- cluster_next(&ci[idx]), 0);
-
- return idx;
-}
-
/* Add a cluster to discard list and schedule it to do discard */
static void swap_cluster_schedule_discard(struct swap_info_struct *si,
- unsigned int idx)
+ struct swap_cluster_info *ci)
{
+ unsigned int idx = cluster_index(si, ci);
/*
* If scan_swap_map_slots() can't find a free cluster, it will check
* si->swap_map directly. To make sure the discarding cluster isn't
@@ -462,17 +458,23 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
SWAP_MAP_BAD, SWAPFILE_CLUSTER);
- cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
-
+ VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
+ list_move_tail(&ci->list, &si->discard_clusters);
+ ci->flags = 0;
schedule_work(&si->discard_work);
}
-static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
+static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
{
- struct swap_cluster_info *ci = si->cluster_info;
+ lockdep_assert_held(&si->lock);
+ lockdep_assert_held(&ci->lock);
- cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
- cluster_list_add_tail(&si->free_clusters, ci, idx);
+ if (ci->flags)
+ list_move_tail(&ci->list, &si->free_clusters);
+ else
+ list_add_tail(&ci->list, &si->free_clusters);
+ ci->flags = CLUSTER_FLAG_FREE;
+ ci->order = 0;
}
/*
@@ -481,24 +483,24 @@ static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
*/
static void swap_do_scheduled_discard(struct swap_info_struct *si)
{
- struct swap_cluster_info *info, *ci;
+ struct swap_cluster_info *ci;
unsigned int idx;
- info = si->cluster_info;
-
- while (!cluster_list_empty(&si->discard_clusters)) {
- idx = cluster_list_del_first(&si->discard_clusters, info);
+ while (!list_empty(&si->discard_clusters)) {
+ ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list);
+ list_del(&ci->list);
+ idx = cluster_index(si, ci);
spin_unlock(&si->lock);
discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
SWAPFILE_CLUSTER);
spin_lock(&si->lock);
- ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
- __free_cluster(si, idx);
+ spin_lock(&ci->lock);
+ __free_cluster(si, ci);
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
0, SWAPFILE_CLUSTER);
- unlock_cluster(ci);
+ spin_unlock(&ci->lock);
}
}
@@ -521,20 +523,15 @@ static void swap_users_ref_free(struct percpu_ref *ref)
complete(&si->comp);
}
-static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
+static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
{
- struct swap_cluster_info *ci = si->cluster_info;
-
- VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
- cluster_list_del_first(&si->free_clusters, ci);
- cluster_set_count_flag(ci + idx, 0, 0);
-}
+ VM_BUG_ON(ci->count != 0);
+ lockdep_assert_held(&si->lock);
+ lockdep_assert_held(&ci->lock);
-static void free_cluster(struct swap_info_struct *si, unsigned long idx)
-{
- struct swap_cluster_info *ci = si->cluster_info + idx;
+ if (ci->flags & CLUSTER_FLAG_FRAG)
+ si->frag_cluster_nr[ci->order]--;
- VM_BUG_ON(cluster_count(ci) != 0);
/*
* If the swap is discardable, prepare discard the cluster
* instead of free it immediately. The cluster will be freed
@@ -542,175 +539,371 @@ static void free_cluster(struct swap_info_struct *si, unsigned long idx)
*/
if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
(SWP_WRITEOK | SWP_PAGE_DISCARD)) {
- swap_cluster_schedule_discard(si, idx);
+ swap_cluster_schedule_discard(si, ci);
return;
}
- __free_cluster(si, idx);
+ __free_cluster(si, ci);
}
/*
- * The cluster corresponding to page_nr will be used. The cluster will be
- * removed from free cluster list and its usage counter will be increased by
- * count.
+ * The cluster corresponding to page_nr will be used. The cluster will not be
+ * added to free cluster list and its usage counter will be increased by 1.
+ * Only used for initialization.
*/
-static void add_cluster_info_page(struct swap_info_struct *p,
- struct swap_cluster_info *cluster_info, unsigned long page_nr,
- unsigned long count)
+static void inc_cluster_info_page(struct swap_info_struct *si,
+ struct swap_cluster_info *cluster_info, unsigned long page_nr)
{
unsigned long idx = page_nr / SWAPFILE_CLUSTER;
+ struct swap_cluster_info *ci;
if (!cluster_info)
return;
- if (cluster_is_free(&cluster_info[idx]))
- alloc_cluster(p, idx);
- VM_BUG_ON(cluster_count(&cluster_info[idx]) + count > SWAPFILE_CLUSTER);
- cluster_set_count(&cluster_info[idx],
- cluster_count(&cluster_info[idx]) + count);
-}
+ ci = cluster_info + idx;
+ ci->count++;
-/*
- * The cluster corresponding to page_nr will be used. The cluster will be
- * removed from free cluster list and its usage counter will be increased by 1.
- */
-static void inc_cluster_info_page(struct swap_info_struct *p,
- struct swap_cluster_info *cluster_info, unsigned long page_nr)
-{
- add_cluster_info_page(p, cluster_info, page_nr, 1);
+ VM_BUG_ON(ci->count > SWAPFILE_CLUSTER);
+ VM_BUG_ON(ci->flags);
}
/*
- * The cluster corresponding to page_nr decreases one usage. If the usage
- * counter becomes 0, which means no page in the cluster is in using, we can
- * optionally discard the cluster and add it to free cluster list.
+ * The cluster ci decreases @nr_pages usage. If the usage counter becomes 0,
+ * which means no page in the cluster is in use, we can optionally discard
+ * the cluster and add it to free cluster list.
*/
-static void dec_cluster_info_page(struct swap_info_struct *p,
- struct swap_cluster_info *cluster_info, unsigned long page_nr)
+static void dec_cluster_info_page(struct swap_info_struct *si,
+ struct swap_cluster_info *ci, int nr_pages)
{
- unsigned long idx = page_nr / SWAPFILE_CLUSTER;
-
- if (!cluster_info)
+ if (!si->cluster_info)
return;
- VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
- cluster_set_count(&cluster_info[idx],
- cluster_count(&cluster_info[idx]) - 1);
+ VM_BUG_ON(ci->count < nr_pages);
+ VM_BUG_ON(cluster_is_free(ci));
+ lockdep_assert_held(&si->lock);
+ lockdep_assert_held(&ci->lock);
+ ci->count -= nr_pages;
- if (cluster_count(&cluster_info[idx]) == 0)
- free_cluster(p, idx);
+ if (!ci->count) {
+ free_cluster(si, ci);
+ return;
+ }
+
+ if (!(ci->flags & CLUSTER_FLAG_NONFULL)) {
+ VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
+ if (ci->flags & CLUSTER_FLAG_FRAG)
+ si->frag_cluster_nr[ci->order]--;
+ list_move_tail(&ci->list, &si->nonfull_clusters[ci->order]);
+ ci->flags = CLUSTER_FLAG_NONFULL;
+ }
}
-/*
- * It's possible scan_swap_map_slots() uses a free cluster in the middle of free
- * cluster list. Avoiding such abuse to avoid list corruption.
- */
-static bool
-scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
- unsigned long offset, int order)
+static bool cluster_reclaim_range(struct swap_info_struct *si,
+ struct swap_cluster_info *ci,
+ unsigned long start, unsigned long end)
{
- struct percpu_cluster *percpu_cluster;
- bool conflict;
+ unsigned char *map = si->swap_map;
+ unsigned long offset;
- offset /= SWAPFILE_CLUSTER;
- conflict = !cluster_list_empty(&si->free_clusters) &&
- offset != cluster_list_first(&si->free_clusters) &&
- cluster_is_free(&si->cluster_info[offset]);
+ spin_unlock(&ci->lock);
+ spin_unlock(&si->lock);
- if (!conflict)
- return false;
+ for (offset = start; offset < end; offset++) {
+ switch (READ_ONCE(map[offset])) {
+ case 0:
+ continue;
+ case SWAP_HAS_CACHE:
+ if (__try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT) > 0)
+ continue;
+ goto out;
+ default:
+ goto out;
+ }
+ }
+out:
+ spin_lock(&si->lock);
+ spin_lock(&ci->lock);
+
+ /*
+ * Recheck the range no matter reclaim succeeded or not, the slot
+ * could have been be freed while we are not holding the lock.
+ */
+ for (offset = start; offset < end; offset++)
+ if (READ_ONCE(map[offset]))
+ return false;
- percpu_cluster = this_cpu_ptr(si->percpu_cluster);
- percpu_cluster->next[order] = SWAP_NEXT_INVALID;
return true;
}
-static inline bool swap_range_empty(char *swap_map, unsigned int start,
- unsigned int nr_pages)
+static bool cluster_scan_range(struct swap_info_struct *si,
+ struct swap_cluster_info *ci,
+ unsigned long start, unsigned int nr_pages)
{
- unsigned int i;
+ unsigned long offset, end = start + nr_pages;
+ unsigned char *map = si->swap_map;
+ bool need_reclaim = false;
- for (i = 0; i < nr_pages; i++) {
- if (swap_map[start + i])
+ for (offset = start; offset < end; offset++) {
+ switch (READ_ONCE(map[offset])) {
+ case 0:
+ continue;
+ case SWAP_HAS_CACHE:
+ if (!vm_swap_full())
+ return false;
+ need_reclaim = true;
+ continue;
+ default:
return false;
+ }
}
+ if (need_reclaim)
+ return cluster_reclaim_range(si, ci, start, end);
+
return true;
}
+static void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
+ unsigned int start, unsigned char usage,
+ unsigned int order)
+{
+ unsigned int nr_pages = 1 << order;
+
+ if (cluster_is_free(ci)) {
+ if (nr_pages < SWAPFILE_CLUSTER) {
+ list_move_tail(&ci->list, &si->nonfull_clusters[order]);
+ ci->flags = CLUSTER_FLAG_NONFULL;
+ }
+ ci->order = order;
+ }
+
+ memset(si->swap_map + start, usage, nr_pages);
+ swap_range_alloc(si, start, nr_pages);
+ ci->count += nr_pages;
+
+ if (ci->count == SWAPFILE_CLUSTER) {
+ VM_BUG_ON(!(ci->flags &
+ (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL | CLUSTER_FLAG_FRAG)));
+ if (ci->flags & CLUSTER_FLAG_FRAG)
+ si->frag_cluster_nr[ci->order]--;
+ list_move_tail(&ci->list, &si->full_clusters);
+ ci->flags = CLUSTER_FLAG_FULL;
+ }
+}
+
+static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigned long offset,
+ unsigned int *foundp, unsigned int order,
+ unsigned char usage)
+{
+ unsigned long start = offset & ~(SWAPFILE_CLUSTER - 1);
+ unsigned long end = min(start + SWAPFILE_CLUSTER, si->max);
+ unsigned int nr_pages = 1 << order;
+ struct swap_cluster_info *ci;
+
+ if (end < nr_pages)
+ return SWAP_NEXT_INVALID;
+ end -= nr_pages;
+
+ ci = lock_cluster(si, offset);
+ if (ci->count + nr_pages > SWAPFILE_CLUSTER) {
+ offset = SWAP_NEXT_INVALID;
+ goto done;
+ }
+
+ while (offset <= end) {
+ if (cluster_scan_range(si, ci, offset, nr_pages)) {
+ cluster_alloc_range(si, ci, offset, usage, order);
+ *foundp = offset;
+ if (ci->count == SWAPFILE_CLUSTER) {
+ offset = SWAP_NEXT_INVALID;
+ goto done;
+ }
+ offset += nr_pages;
+ break;
+ }
+ offset += nr_pages;
+ }
+ if (offset > end)
+ offset = SWAP_NEXT_INVALID;
+done:
+ unlock_cluster(ci);
+ return offset;
+}
+
+static void swap_reclaim_full_clusters(struct swap_info_struct *si)
+{
+ long to_scan = 1;
+ unsigned long offset, end;
+ struct swap_cluster_info *ci;
+ unsigned char *map = si->swap_map;
+ int nr_reclaim, total_reclaimed = 0;
+
+ if (atomic_long_read(&nr_swap_pages) <= SWAPFILE_CLUSTER)
+ to_scan = si->inuse_pages / SWAPFILE_CLUSTER;
+
+ while (!list_empty(&si->full_clusters)) {
+ ci = list_first_entry(&si->full_clusters, struct swap_cluster_info, list);
+ list_move_tail(&ci->list, &si->full_clusters);
+ offset = cluster_offset(si, ci);
+ end = min(si->max, offset + SWAPFILE_CLUSTER);
+ to_scan--;
+
+ while (offset < end) {
+ if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
+ spin_unlock(&si->lock);
+ nr_reclaim = __try_to_reclaim_swap(si, offset,
+ TTRS_ANYWAY | TTRS_DIRECT);
+ spin_lock(&si->lock);
+ if (nr_reclaim > 0) {
+ offset += nr_reclaim;
+ total_reclaimed += nr_reclaim;
+ continue;
+ } else if (nr_reclaim < 0) {
+ offset += -nr_reclaim;
+ continue;
+ }
+ }
+ offset++;
+ }
+ if (to_scan <= 0 || total_reclaimed)
+ break;
+ }
+}
+
/*
* Try to get swap entries with specified order from current cpu's swap entry
* pool (a cluster). This might involve allocating a new cluster for current CPU
* too.
*/
-static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
- unsigned long *offset, unsigned long *scan_base, int order)
+static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order,
+ unsigned char usage)
{
- unsigned int nr_pages = 1 << order;
struct percpu_cluster *cluster;
struct swap_cluster_info *ci;
- unsigned int tmp, max;
+ unsigned int offset, found = 0;
new_cluster:
+ lockdep_assert_held(&si->lock);
cluster = this_cpu_ptr(si->percpu_cluster);
- tmp = cluster->next[order];
- if (tmp == SWAP_NEXT_INVALID) {
- if (!cluster_list_empty(&si->free_clusters)) {
- tmp = cluster_next(&si->free_clusters.head) *
- SWAPFILE_CLUSTER;
- } else if (!cluster_list_empty(&si->discard_clusters)) {
- /*
- * we don't have free cluster but have some clusters in
- * discarding, do discard now and reclaim them, then
- * reread cluster_next_cpu since we dropped si->lock
- */
- swap_do_scheduled_discard(si);
- *scan_base = this_cpu_read(*si->cluster_next_cpu);
- *offset = *scan_base;
- goto new_cluster;
- } else
- return false;
+ offset = cluster->next[order];
+ if (offset) {
+ offset = alloc_swap_scan_cluster(si, offset, &found, order, usage);
+ if (found)
+ goto done;
}
- /*
- * Other CPUs can use our cluster if they can't find a free cluster,
- * check if there is still free entry in the cluster, maintaining
- * natural alignment.
- */
- max = min_t(unsigned long, si->max, ALIGN(tmp + 1, SWAPFILE_CLUSTER));
- if (tmp < max) {
- ci = lock_cluster(si, tmp);
- while (tmp < max) {
- if (swap_range_empty(si->swap_map, tmp, nr_pages))
+ if (!list_empty(&si->free_clusters)) {
+ ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list);
+ offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage);
+ VM_BUG_ON(!found);
+ goto done;
+ }
+
+ if (order < PMD_ORDER) {
+ unsigned int frags = 0;
+
+ while (!list_empty(&si->nonfull_clusters[order])) {
+ ci = list_first_entry(&si->nonfull_clusters[order],
+ struct swap_cluster_info, list);
+ list_move_tail(&ci->list, &si->frag_clusters[order]);
+ ci->flags = CLUSTER_FLAG_FRAG;
+ si->frag_cluster_nr[order]++;
+ offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
+ &found, order, usage);
+ frags++;
+ if (found)
break;
- tmp += nr_pages;
}
- unlock_cluster(ci);
+
+ if (!found) {
+ /*
+ * Nonfull clusters are moved to frag tail if we reached
+ * here, count them too, don't over scan the frag list.
+ */
+ while (frags < si->frag_cluster_nr[order]) {
+ ci = list_first_entry(&si->frag_clusters[order],
+ struct swap_cluster_info, list);
+ /*
+ * Rotate the frag list to iterate, they were all failing
+ * high order allocation or moved here due to per-CPU usage,
+ * this help keeping usable cluster ahead.
+ */
+ list_move_tail(&ci->list, &si->frag_clusters[order]);
+ offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
+ &found, order, usage);
+ frags++;
+ if (found)
+ break;
+ }
+ }
}
- if (tmp >= max) {
- cluster->next[order] = SWAP_NEXT_INVALID;
+
+ if (found)
+ goto done;
+
+ if (!list_empty(&si->discard_clusters)) {
+ /*
+ * we don't have free cluster but have some clusters in
+ * discarding, do discard now and reclaim them, then
+ * reread cluster_next_cpu since we dropped si->lock
+ */
+ swap_do_scheduled_discard(si);
goto new_cluster;
}
- *offset = tmp;
- *scan_base = tmp;
- tmp += nr_pages;
- cluster->next[order] = tmp < max ? tmp : SWAP_NEXT_INVALID;
- return true;
+
+ if (order)
+ goto done;
+
+ /* Order 0 stealing from higher order */
+ for (int o = 1; o < SWAP_NR_ORDERS; o++) {
+ /*
+ * Clusters here have at least one usable slots and can't fail order 0
+ * allocation, but reclaim may drop si->lock and race with another user.
+ */
+ while (!list_empty(&si->frag_clusters[o])) {
+ ci = list_first_entry(&si->frag_clusters[o],
+ struct swap_cluster_info, list);
+ offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
+ &found, 0, usage);
+ if (found)
+ goto done;
+ }
+
+ while (!list_empty(&si->nonfull_clusters[o])) {
+ ci = list_first_entry(&si->nonfull_clusters[o],
+ struct swap_cluster_info, list);
+ offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
+ &found, 0, usage);
+ if (found)
+ goto done;
+ }
+ }
+
+done:
+ /* Try reclaim from full clusters if device is nearfull */
+ if (vm_swap_full() && (!found || (si->pages - si->inuse_pages) < SWAPFILE_CLUSTER)) {
+ swap_reclaim_full_clusters(si);
+ if (!found && !order && si->pages != si->inuse_pages)
+ goto new_cluster;
+ }
+
+ cluster->next[order] = offset;
+ return found;
}
-static void __del_from_avail_list(struct swap_info_struct *p)
+static void __del_from_avail_list(struct swap_info_struct *si)
{
int nid;
- assert_spin_locked(&p->lock);
+ assert_spin_locked(&si->lock);
for_each_node(nid)
- plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
+ plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]);
}
-static void del_from_avail_list(struct swap_info_struct *p)
+static void del_from_avail_list(struct swap_info_struct *si)
{
spin_lock(&swap_avail_lock);
- __del_from_avail_list(p);
+ __del_from_avail_list(si);
spin_unlock(&swap_avail_lock);
}
@@ -731,13 +924,13 @@ static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
}
}
-static void add_to_avail_list(struct swap_info_struct *p)
+static void add_to_avail_list(struct swap_info_struct *si)
{
int nid;
spin_lock(&swap_avail_lock);
for_each_node(nid)
- plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
+ plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]);
spin_unlock(&swap_avail_lock);
}
@@ -747,6 +940,14 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
unsigned long begin = offset;
unsigned long end = offset + nr_entries - 1;
void (*swap_slot_free_notify)(struct block_device *, unsigned long);
+ unsigned int i;
+
+ /*
+ * Use atomic clear_bit operations only on zeromap instead of non-atomic
+ * bitmap_clear to prevent adjacent bits corruption due to simultaneous writes.
+ */
+ for (i = 0; i < nr_entries; i++)
+ clear_bit(offset + i, si->zeromap);
if (offset < si->lowest_bit)
si->lowest_bit = offset;
@@ -822,11 +1023,29 @@ static bool swap_offset_available_and_locked(struct swap_info_struct *si,
return false;
}
+static int cluster_alloc_swap(struct swap_info_struct *si,
+ unsigned char usage, int nr,
+ swp_entry_t slots[], int order)
+{
+ int n_ret = 0;
+
+ VM_BUG_ON(!si->cluster_info);
+
+ while (n_ret < nr) {
+ unsigned long offset = cluster_alloc_swap_entry(si, order, usage);
+
+ if (!offset)
+ break;
+ slots[n_ret++] = swp_entry(si->type, offset);
+ }
+
+ return n_ret;
+}
+
static int scan_swap_map_slots(struct swap_info_struct *si,
unsigned char usage, int nr,
swp_entry_t slots[], int order)
{
- struct swap_cluster_info *ci;
unsigned long offset;
unsigned long scan_base;
unsigned long last_in_cluster = 0;
@@ -865,26 +1084,16 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
return 0;
}
+ if (si->cluster_info)
+ return cluster_alloc_swap(si, usage, nr, slots, order);
+
si->flags += SWP_SCANNING;
- /*
- * Use percpu scan base for SSD to reduce lock contention on
- * cluster and swap cache. For HDD, sequential access is more
- * important.
- */
- if (si->flags & SWP_SOLIDSTATE)
- scan_base = this_cpu_read(*si->cluster_next_cpu);
- else
- scan_base = si->cluster_next;
+
+ /* For HDD, sequential access is more important. */
+ scan_base = si->cluster_next;
offset = scan_base;
- /* SSD algorithm */
- if (si->cluster_info) {
- if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base, order)) {
- if (order > 0)
- goto no_page;
- goto scan;
- }
- } else if (unlikely(!si->cluster_nr--)) {
+ if (unlikely(!si->cluster_nr--)) {
if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
si->cluster_nr = SWAPFILE_CLUSTER - 1;
goto checks;
@@ -895,8 +1104,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
/*
* If seek is expensive, start searching for new cluster from
* start of partition, to minimize the span of allocated swap.
- * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
- * case, just handled by scan_swap_map_try_ssd_cluster() above.
*/
scan_base = offset = si->lowest_bit;
last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
@@ -924,19 +1131,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
}
checks:
- if (si->cluster_info) {
- while (scan_swap_map_ssd_cluster_conflict(si, offset, order)) {
- /* take a break if we already got some slots */
- if (n_ret)
- goto done;
- if (!scan_swap_map_try_ssd_cluster(si, &offset,
- &scan_base, order)) {
- if (order > 0)
- goto no_page;
- goto scan;
- }
- }
- }
if (!(si->flags & SWP_WRITEOK))
goto no_page;
if (!si->highest_bit)
@@ -944,13 +1138,11 @@ checks:
if (offset > si->highest_bit)
scan_base = offset = si->lowest_bit;
- ci = lock_cluster(si, offset);
/* reuse swap entry of cache-only swap if not busy. */
if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
- unlock_cluster(ci);
spin_unlock(&si->lock);
- swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
+ swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
spin_lock(&si->lock);
/* entry was freed successfully, try to use this again */
if (swap_was_freed > 0)
@@ -959,15 +1151,12 @@ checks:
}
if (si->swap_map[offset]) {
- unlock_cluster(ci);
if (!n_ret)
goto scan;
else
goto done;
}
memset(si->swap_map + offset, usage, nr_pages);
- add_cluster_info_page(si, si->cluster_info, offset, nr_pages);
- unlock_cluster(ci);
swap_range_alloc(si, offset, nr_pages);
slots[n_ret++] = swp_entry(si->type, offset);
@@ -988,13 +1177,7 @@ checks:
latency_ration = LATENCY_LIMIT;
}
- /* try to get more slots in cluster */
- if (si->cluster_info) {
- if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base, order))
- goto checks;
- if (order > 0)
- goto done;
- } else if (si->cluster_nr && !si->swap_map[++offset]) {
+ if (si->cluster_nr && !si->swap_map[++offset]) {
/* non-ssd case, still more slots in cluster? */
--si->cluster_nr;
goto checks;
@@ -1055,19 +1238,6 @@ no_page:
return n_ret;
}
-static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
-{
- unsigned long offset = idx * SWAPFILE_CLUSTER;
- struct swap_cluster_info *ci;
-
- ci = lock_cluster(si, offset);
- memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
- cluster_set_count_flag(ci, 0, 0);
- free_cluster(si, idx);
- unlock_cluster(ci);
- swap_range_free(si, offset, SWAPFILE_CLUSTER);
-}
-
int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order)
{
int order = swap_entry_order(entry_order);
@@ -1148,22 +1318,22 @@ noswap:
static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
{
- struct swap_info_struct *p;
+ struct swap_info_struct *si;
unsigned long offset;
if (!entry.val)
goto out;
- p = swp_swap_info(entry);
- if (!p)
+ si = swp_swap_info(entry);
+ if (!si)
goto bad_nofile;
- if (data_race(!(p->flags & SWP_USED)))
+ if (data_race(!(si->flags & SWP_USED)))
goto bad_device;
offset = swp_offset(entry);
- if (offset >= p->max)
+ if (offset >= si->max)
goto bad_offset;
- if (data_race(!p->swap_map[swp_offset(entry)]))
+ if (data_race(!si->swap_map[swp_offset(entry)]))
goto bad_free;
- return p;
+ return si;
bad_free:
pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
@@ -1196,14 +1366,14 @@ static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
return p;
}
-static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
+static unsigned char __swap_entry_free_locked(struct swap_info_struct *si,
unsigned long offset,
unsigned char usage)
{
unsigned char count;
unsigned char has_cache;
- count = p->swap_map[offset];
+ count = si->swap_map[offset];
has_cache = count & SWAP_HAS_CACHE;
count &= ~SWAP_HAS_CACHE;
@@ -1219,7 +1389,7 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
count = 0;
} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
if (count == COUNT_CONTINUED) {
- if (swap_count_continued(p, offset, count))
+ if (swap_count_continued(si, offset, count))
count = SWAP_MAP_MAX | COUNT_CONTINUED;
else
count = SWAP_MAP_MAX;
@@ -1229,9 +1399,9 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
usage = count | has_cache;
if (usage)
- WRITE_ONCE(p->swap_map[offset], usage);
+ WRITE_ONCE(si->swap_map[offset], usage);
else
- WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE);
+ WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
return usage;
}
@@ -1310,66 +1480,121 @@ put_out:
return NULL;
}
-static unsigned char __swap_entry_free(struct swap_info_struct *p,
+static unsigned char __swap_entry_free(struct swap_info_struct *si,
swp_entry_t entry)
{
struct swap_cluster_info *ci;
unsigned long offset = swp_offset(entry);
unsigned char usage;
- ci = lock_cluster_or_swap_info(p, offset);
- usage = __swap_entry_free_locked(p, offset, 1);
- unlock_cluster_or_swap_info(p, ci);
+ ci = lock_cluster_or_swap_info(si, offset);
+ usage = __swap_entry_free_locked(si, offset, 1);
+ unlock_cluster_or_swap_info(si, ci);
if (!usage)
free_swap_slot(entry);
return usage;
}
-static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
+static bool __swap_entries_free(struct swap_info_struct *si,
+ swp_entry_t entry, int nr)
{
- struct swap_cluster_info *ci;
unsigned long offset = swp_offset(entry);
+ unsigned int type = swp_type(entry);
+ struct swap_cluster_info *ci;
+ bool has_cache = false;
unsigned char count;
+ int i;
+
+ if (nr <= 1 || swap_count(data_race(si->swap_map[offset])) != 1)
+ goto fallback;
+ /* cross into another cluster */
+ if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER)
+ goto fallback;
+
+ ci = lock_cluster_or_swap_info(si, offset);
+ if (!swap_is_last_map(si, offset, nr, &has_cache)) {
+ unlock_cluster_or_swap_info(si, ci);
+ goto fallback;
+ }
+ for (i = 0; i < nr; i++)
+ WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
+ unlock_cluster_or_swap_info(si, ci);
+
+ if (!has_cache) {
+ for (i = 0; i < nr; i++)
+ zswap_invalidate(swp_entry(si->type, offset + i));
+ spin_lock(&si->lock);
+ swap_entry_range_free(si, entry, nr);
+ spin_unlock(&si->lock);
+ }
+ return has_cache;
+
+fallback:
+ for (i = 0; i < nr; i++) {
+ if (data_race(si->swap_map[offset + i])) {
+ count = __swap_entry_free(si, swp_entry(type, offset + i));
+ if (count == SWAP_HAS_CACHE)
+ has_cache = true;
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ }
+ return has_cache;
+}
- ci = lock_cluster(p, offset);
- count = p->swap_map[offset];
- VM_BUG_ON(count != SWAP_HAS_CACHE);
- p->swap_map[offset] = 0;
- dec_cluster_info_page(p, p->cluster_info, offset);
+/*
+ * Drop the last HAS_CACHE flag of swap entries, caller have to
+ * ensure all entries belong to the same cgroup.
+ */
+static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry,
+ unsigned int nr_pages)
+{
+ unsigned long offset = swp_offset(entry);
+ unsigned char *map = si->swap_map + offset;
+ unsigned char *map_end = map + nr_pages;
+ struct swap_cluster_info *ci;
+
+ ci = lock_cluster(si, offset);
+ do {
+ VM_BUG_ON(*map != SWAP_HAS_CACHE);
+ *map = 0;
+ } while (++map < map_end);
+ dec_cluster_info_page(si, ci, nr_pages);
unlock_cluster(ci);
- mem_cgroup_uncharge_swap(entry, 1);
- swap_range_free(p, offset, 1);
+ mem_cgroup_uncharge_swap(entry, nr_pages);
+ swap_range_free(si, offset, nr_pages);
}
-static void cluster_swap_free_nr(struct swap_info_struct *sis,
- unsigned long offset, int nr_pages)
+static void cluster_swap_free_nr(struct swap_info_struct *si,
+ unsigned long offset, int nr_pages,
+ unsigned char usage)
{
struct swap_cluster_info *ci;
DECLARE_BITMAP(to_free, BITS_PER_LONG) = { 0 };
int i, nr;
- ci = lock_cluster_or_swap_info(sis, offset);
+ ci = lock_cluster_or_swap_info(si, offset);
while (nr_pages) {
nr = min(BITS_PER_LONG, nr_pages);
for (i = 0; i < nr; i++) {
- if (!__swap_entry_free_locked(sis, offset + i, 1))
+ if (!__swap_entry_free_locked(si, offset + i, usage))
bitmap_set(to_free, i, 1);
}
if (!bitmap_empty(to_free, BITS_PER_LONG)) {
- unlock_cluster_or_swap_info(sis, ci);
+ unlock_cluster_or_swap_info(si, ci);
for_each_set_bit(i, to_free, BITS_PER_LONG)
- free_swap_slot(swp_entry(sis->type, offset + i));
+ free_swap_slot(swp_entry(si->type, offset + i));
if (nr == nr_pages)
return;
bitmap_clear(to_free, 0, BITS_PER_LONG);
- ci = lock_cluster_or_swap_info(sis, offset);
+ ci = lock_cluster_or_swap_info(si, offset);
}
offset += nr;
nr_pages -= nr;
}
- unlock_cluster_or_swap_info(sis, ci);
+ unlock_cluster_or_swap_info(si, ci);
}
/*
@@ -1388,7 +1613,7 @@ void swap_free_nr(swp_entry_t entry, int nr_pages)
while (nr_pages) {
nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
- cluster_swap_free_nr(sis, offset, nr);
+ cluster_swap_free_nr(sis, offset, nr, 1);
offset += nr;
nr_pages -= nr;
}
@@ -1400,12 +1625,8 @@ void swap_free_nr(swp_entry_t entry, int nr_pages)
void put_swap_folio(struct folio *folio, swp_entry_t entry)
{
unsigned long offset = swp_offset(entry);
- unsigned long idx = offset / SWAPFILE_CLUSTER;
struct swap_cluster_info *ci;
struct swap_info_struct *si;
- unsigned char *map;
- unsigned int i, free_entries = 0;
- unsigned char val;
int size = 1 << swap_entry_order(folio_order(folio));
si = _swap_info_get(entry);
@@ -1413,24 +1634,14 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
return;
ci = lock_cluster_or_swap_info(si, offset);
- if (size == SWAPFILE_CLUSTER) {
- map = si->swap_map + offset;
- for (i = 0; i < SWAPFILE_CLUSTER; i++) {
- val = map[i];
- VM_BUG_ON(!(val & SWAP_HAS_CACHE));
- if (val == SWAP_HAS_CACHE)
- free_entries++;
- }
- if (free_entries == SWAPFILE_CLUSTER) {
- unlock_cluster_or_swap_info(si, ci);
- spin_lock(&si->lock);
- mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
- swap_free_cluster(si, idx);
- spin_unlock(&si->lock);
- return;
- }
+ if (size > 1 && swap_is_has_cache(si, offset, size)) {
+ unlock_cluster_or_swap_info(si, ci);
+ spin_lock(&si->lock);
+ swap_entry_range_free(si, entry, size);
+ spin_unlock(&si->lock);
+ return;
}
- for (i = 0; i < size; i++, entry.val++) {
+ for (int i = 0; i < size; i++, entry.val++) {
if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
unlock_cluster_or_swap_info(si, ci);
free_swap_slot(entry);
@@ -1470,7 +1681,7 @@ void swapcache_free_entries(swp_entry_t *entries, int n)
for (i = 0; i < n; ++i) {
p = swap_info_get_cont(entries[i], prev);
if (p)
- swap_entry_free(p, entries[i]);
+ swap_entry_range_free(p, entries[i], 1);
prev = p;
}
if (p)
@@ -1509,28 +1720,28 @@ int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
int swp_swapcount(swp_entry_t entry)
{
int count, tmp_count, n;
- struct swap_info_struct *p;
+ struct swap_info_struct *si;
struct swap_cluster_info *ci;
struct page *page;
pgoff_t offset;
unsigned char *map;
- p = _swap_info_get(entry);
- if (!p)
+ si = _swap_info_get(entry);
+ if (!si)
return 0;
offset = swp_offset(entry);
- ci = lock_cluster_or_swap_info(p, offset);
+ ci = lock_cluster_or_swap_info(si, offset);
- count = swap_count(p->swap_map[offset]);
+ count = swap_count(si->swap_map[offset]);
if (!(count & COUNT_CONTINUED))
goto out;
count &= ~COUNT_CONTINUED;
n = SWAP_MAP_MAX + 1;
- page = vmalloc_to_page(p->swap_map + offset);
+ page = vmalloc_to_page(si->swap_map + offset);
offset &= ~PAGE_MASK;
VM_BUG_ON(page_private(page) != SWP_CONTINUED);
@@ -1544,7 +1755,7 @@ int swp_swapcount(swp_entry_t entry)
n *= (SWAP_CONT_MAX + 1);
} while (tmp_count & COUNT_CONTINUED);
out:
- unlock_cluster_or_swap_info(p, ci);
+ unlock_cluster_or_swap_info(si, ci);
return count;
}
@@ -1590,16 +1801,7 @@ static bool folio_swapped(struct folio *folio)
return swap_page_trans_huge_swapped(si, entry, folio_order(folio));
}
-/**
- * folio_free_swap() - Free the swap space used for this folio.
- * @folio: The folio to remove.
- *
- * If swap is getting full, or if there are no more mappings of this folio,
- * then call folio_free_swap to free its swap space.
- *
- * Return: true if we were able to release the swap space.
- */
-bool folio_free_swap(struct folio *folio)
+static bool folio_swapcache_freeable(struct folio *folio)
{
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
@@ -1607,8 +1809,6 @@ bool folio_free_swap(struct folio *folio)
return false;
if (folio_test_writeback(folio))
return false;
- if (folio_swapped(folio))
- return false;
/*
* Once hibernation has begun to create its image of memory,
@@ -1628,6 +1828,25 @@ bool folio_free_swap(struct folio *folio)
if (pm_suspended_storage())
return false;
+ return true;
+}
+
+/**
+ * folio_free_swap() - Free the swap space used for this folio.
+ * @folio: The folio to remove.
+ *
+ * If swap is getting full, or if there are no more mappings of this folio,
+ * then call folio_free_swap to free its swap space.
+ *
+ * Return: true if we were able to release the swap space.
+ */
+bool folio_free_swap(struct folio *folio)
+{
+ if (!folio_swapcache_freeable(folio))
+ return false;
+ if (folio_swapped(folio))
+ return false;
+
delete_from_swap_cache(folio);
folio_set_dirty(folio);
return true;
@@ -1647,11 +1866,9 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
{
const unsigned long start_offset = swp_offset(entry);
const unsigned long end_offset = start_offset + nr;
- unsigned int type = swp_type(entry);
struct swap_info_struct *si;
bool any_only_cache = false;
unsigned long offset;
- unsigned char count;
if (non_swap_entry(entry))
return;
@@ -1666,15 +1883,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
/*
* First free all entries in the range.
*/
- for (offset = start_offset; offset < end_offset; offset++) {
- if (data_race(si->swap_map[offset])) {
- count = __swap_entry_free(si, swp_entry(type, offset));
- if (count == SWAP_HAS_CACHE)
- any_only_cache = true;
- } else {
- WARN_ON_ONCE(1);
- }
- }
+ any_only_cache = __swap_entries_free(si, entry, nr);
/*
* Short-circuit the below loop if none of the entries had their
@@ -1704,7 +1913,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
* to the next boundary.
*/
nr = __try_to_reclaim_swap(si, offset,
- TTRS_UNMAPPED | TTRS_FULL);
+ TTRS_UNMAPPED | TTRS_FULL);
if (nr == 0)
nr = 1;
else if (nr < 0)
@@ -1979,7 +2188,6 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
folio = swap_cache_get_folio(entry, vma, addr);
if (!folio) {
- struct page *page;
struct vm_fault vmf = {
.vma = vma,
.address = addr,
@@ -1987,10 +2195,8 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
.pmd = pmd,
};
- page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
+ folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
&vmf);
- if (page)
- folio = page_folio(page);
}
if (!folio) {
swp_count = READ_ONCE(si->swap_map[offset]);
@@ -2397,52 +2603,54 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
return generic_swapfile_activate(sis, swap_file, span);
}
-static int swap_node(struct swap_info_struct *p)
+static int swap_node(struct swap_info_struct *si)
{
struct block_device *bdev;
- if (p->bdev)
- bdev = p->bdev;
+ if (si->bdev)
+ bdev = si->bdev;
else
- bdev = p->swap_file->f_inode->i_sb->s_bdev;
+ bdev = si->swap_file->f_inode->i_sb->s_bdev;
return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
}
-static void setup_swap_info(struct swap_info_struct *p, int prio,
+static void setup_swap_info(struct swap_info_struct *si, int prio,
unsigned char *swap_map,
- struct swap_cluster_info *cluster_info)
+ struct swap_cluster_info *cluster_info,
+ unsigned long *zeromap)
{
int i;
if (prio >= 0)
- p->prio = prio;
+ si->prio = prio;
else
- p->prio = --least_priority;
+ si->prio = --least_priority;
/*
* the plist prio is negated because plist ordering is
* low-to-high, while swap ordering is high-to-low
*/
- p->list.prio = -p->prio;
+ si->list.prio = -si->prio;
for_each_node(i) {
- if (p->prio >= 0)
- p->avail_lists[i].prio = -p->prio;
+ if (si->prio >= 0)
+ si->avail_lists[i].prio = -si->prio;
else {
- if (swap_node(p) == i)
- p->avail_lists[i].prio = 1;
+ if (swap_node(si) == i)
+ si->avail_lists[i].prio = 1;
else
- p->avail_lists[i].prio = -p->prio;
+ si->avail_lists[i].prio = -si->prio;
}
}
- p->swap_map = swap_map;
- p->cluster_info = cluster_info;
+ si->swap_map = swap_map;
+ si->cluster_info = cluster_info;
+ si->zeromap = zeromap;
}
-static void _enable_swap_info(struct swap_info_struct *p)
+static void _enable_swap_info(struct swap_info_struct *si)
{
- p->flags |= SWP_WRITEOK;
- atomic_long_add(p->pages, &nr_swap_pages);
- total_swap_pages += p->pages;
+ si->flags |= SWP_WRITEOK;
+ atomic_long_add(si->pages, &nr_swap_pages);
+ total_swap_pages += si->pages;
assert_spin_locked(&swap_lock);
/*
@@ -2455,40 +2663,41 @@ static void _enable_swap_info(struct swap_info_struct *p)
* which allocates swap pages from the highest available priority
* swap_info_struct.
*/
- plist_add(&p->list, &swap_active_head);
+ plist_add(&si->list, &swap_active_head);
/* add to available list iff swap device is not full */
- if (p->highest_bit)
- add_to_avail_list(p);
+ if (si->highest_bit)
+ add_to_avail_list(si);
}
-static void enable_swap_info(struct swap_info_struct *p, int prio,
+static void enable_swap_info(struct swap_info_struct *si, int prio,
unsigned char *swap_map,
- struct swap_cluster_info *cluster_info)
+ struct swap_cluster_info *cluster_info,
+ unsigned long *zeromap)
{
spin_lock(&swap_lock);
- spin_lock(&p->lock);
- setup_swap_info(p, prio, swap_map, cluster_info);
- spin_unlock(&p->lock);
+ spin_lock(&si->lock);
+ setup_swap_info(si, prio, swap_map, cluster_info, zeromap);
+ spin_unlock(&si->lock);
spin_unlock(&swap_lock);
/*
* Finished initializing swap device, now it's safe to reference it.
*/
- percpu_ref_resurrect(&p->users);
+ percpu_ref_resurrect(&si->users);
spin_lock(&swap_lock);
- spin_lock(&p->lock);
- _enable_swap_info(p);
- spin_unlock(&p->lock);
+ spin_lock(&si->lock);
+ _enable_swap_info(si);
+ spin_unlock(&si->lock);
spin_unlock(&swap_lock);
}
-static void reinsert_swap_info(struct swap_info_struct *p)
+static void reinsert_swap_info(struct swap_info_struct *si)
{
spin_lock(&swap_lock);
- spin_lock(&p->lock);
- setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
- _enable_swap_info(p);
- spin_unlock(&p->lock);
+ spin_lock(&si->lock);
+ setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap);
+ _enable_swap_info(si);
+ spin_unlock(&si->lock);
spin_unlock(&swap_lock);
}
@@ -2511,6 +2720,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
struct swap_info_struct *p = NULL;
unsigned char *swap_map;
+ unsigned long *zeromap;
struct swap_cluster_info *cluster_info;
struct file *swap_file, *victim;
struct address_space *mapping;
@@ -2633,6 +2843,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
p->max = 0;
swap_map = p->swap_map;
p->swap_map = NULL;
+ zeromap = p->zeromap;
+ p->zeromap = NULL;
cluster_info = p->cluster_info;
p->cluster_info = NULL;
spin_unlock(&p->lock);
@@ -2645,6 +2857,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
free_percpu(p->cluster_next_cpu);
p->cluster_next_cpu = NULL;
vfree(swap_map);
+ kvfree(zeromap);
kvfree(cluster_info);
/* Destroy swap account information */
swap_cgroup_swapoff(p->type);
@@ -2874,20 +3087,20 @@ static struct swap_info_struct *alloc_swap_info(void)
return p;
}
-static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
+static int claim_swapfile(struct swap_info_struct *si, struct inode *inode)
{
if (S_ISBLK(inode->i_mode)) {
- p->bdev = I_BDEV(inode);
+ si->bdev = I_BDEV(inode);
/*
* Zoned block devices contain zones that have a sequential
* write only restriction. Hence zoned block devices are not
* suitable for swapping. Disallow them here.
*/
- if (bdev_is_zoned(p->bdev))
+ if (bdev_is_zoned(si->bdev))
return -EINVAL;
- p->flags |= SWP_BLKDEV;
+ si->flags |= SWP_BLKDEV;
} else if (S_ISREG(inode->i_mode)) {
- p->bdev = inode->i_sb->s_bdev;
+ si->bdev = inode->i_sb->s_bdev;
}
return 0;
@@ -2922,7 +3135,7 @@ __weak unsigned long arch_max_swapfile_size(void)
return generic_max_swapfile_size();
}
-static unsigned long read_swap_header(struct swap_info_struct *p,
+static unsigned long read_swap_header(struct swap_info_struct *si,
union swap_header *swap_header,
struct inode *inode)
{
@@ -2953,9 +3166,9 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
return 0;
}
- p->lowest_bit = 1;
- p->cluster_next = 1;
- p->cluster_nr = 0;
+ si->lowest_bit = 1;
+ si->cluster_next = 1;
+ si->cluster_nr = 0;
maxpages = swapfile_maximum_size;
last_page = swap_header->info.last_page;
@@ -2973,7 +3186,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
if ((unsigned int)maxpages == 0)
maxpages = UINT_MAX;
}
- p->highest_bit = maxpages - 1;
+ si->highest_bit = maxpages - 1;
if (!maxpages)
return 0;
@@ -2997,25 +3210,18 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
#define SWAP_CLUSTER_COLS \
max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
-static int setup_swap_map_and_extents(struct swap_info_struct *p,
+static int setup_swap_map_and_extents(struct swap_info_struct *si,
union swap_header *swap_header,
unsigned char *swap_map,
- struct swap_cluster_info *cluster_info,
unsigned long maxpages,
sector_t *span)
{
- unsigned int j, k;
unsigned int nr_good_pages;
+ unsigned long i;
int nr_extents;
- unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
- unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
- unsigned long i, idx;
nr_good_pages = maxpages - 1; /* omit header page */
- cluster_list_init(&p->free_clusters);
- cluster_list_init(&p->discard_clusters);
-
for (i = 0; i < swap_header->info.nr_badpages; i++) {
unsigned int page_nr = swap_header->info.badpages[i];
if (page_nr == 0 || page_nr > swap_header->info.last_page)
@@ -3023,40 +3229,87 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
if (page_nr < maxpages) {
swap_map[page_nr] = SWAP_MAP_BAD;
nr_good_pages--;
- /*
- * Haven't marked the cluster free yet, no list
- * operation involved
- */
- inc_cluster_info_page(p, cluster_info, page_nr);
}
}
- /* Haven't marked the cluster free yet, no list operation involved */
- for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
- inc_cluster_info_page(p, cluster_info, i);
-
if (nr_good_pages) {
swap_map[0] = SWAP_MAP_BAD;
- /*
- * Not mark the cluster free yet, no list
- * operation involved
- */
- inc_cluster_info_page(p, cluster_info, 0);
- p->max = maxpages;
- p->pages = nr_good_pages;
- nr_extents = setup_swap_extents(p, span);
+ si->max = maxpages;
+ si->pages = nr_good_pages;
+ nr_extents = setup_swap_extents(si, span);
if (nr_extents < 0)
return nr_extents;
- nr_good_pages = p->pages;
+ nr_good_pages = si->pages;
}
if (!nr_good_pages) {
pr_warn("Empty swap-file\n");
return -EINVAL;
}
+ return nr_extents;
+}
+
+static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
+ union swap_header *swap_header,
+ unsigned long maxpages)
+{
+ unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
+ unsigned long col = si->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
+ struct swap_cluster_info *cluster_info;
+ unsigned long i, j, k, idx;
+ int cpu, err = -ENOMEM;
+
+ cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL);
if (!cluster_info)
- return nr_extents;
+ goto err;
+
+ for (i = 0; i < nr_clusters; i++)
+ spin_lock_init(&cluster_info[i].lock);
+
+ si->cluster_next_cpu = alloc_percpu(unsigned int);
+ if (!si->cluster_next_cpu)
+ goto err_free;
+
+ /* Random start position to help with wear leveling */
+ for_each_possible_cpu(cpu)
+ per_cpu(*si->cluster_next_cpu, cpu) =
+ get_random_u32_inclusive(1, si->highest_bit);
+
+ si->percpu_cluster = alloc_percpu(struct percpu_cluster);
+ if (!si->percpu_cluster)
+ goto err_free;
+
+ for_each_possible_cpu(cpu) {
+ struct percpu_cluster *cluster;
+
+ cluster = per_cpu_ptr(si->percpu_cluster, cpu);
+ for (i = 0; i < SWAP_NR_ORDERS; i++)
+ cluster->next[i] = SWAP_NEXT_INVALID;
+ }
+
+ /*
+ * Mark unusable pages as unavailable. The clusters aren't
+ * marked free yet, so no list operations are involved yet.
+ *
+ * See setup_swap_map_and_extents(): header page, bad pages,
+ * and the EOF part of the last cluster.
+ */
+ inc_cluster_info_page(si, cluster_info, 0);
+ for (i = 0; i < swap_header->info.nr_badpages; i++)
+ inc_cluster_info_page(si, cluster_info,
+ swap_header->info.badpages[i]);
+ for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
+ inc_cluster_info_page(si, cluster_info, i);
+
+ INIT_LIST_HEAD(&si->free_clusters);
+ INIT_LIST_HEAD(&si->full_clusters);
+ INIT_LIST_HEAD(&si->discard_clusters);
+ for (i = 0; i < SWAP_NR_ORDERS; i++) {
+ INIT_LIST_HEAD(&si->nonfull_clusters[i]);
+ INIT_LIST_HEAD(&si->frag_clusters[i]);
+ si->frag_cluster_nr[i] = 0;
+ }
/*
* Reduce false cache line sharing between cluster_info and
@@ -3065,22 +3318,32 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
j = (k + col) % SWAP_CLUSTER_COLS;
for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
+ struct swap_cluster_info *ci;
idx = i * SWAP_CLUSTER_COLS + j;
+ ci = cluster_info + idx;
if (idx >= nr_clusters)
continue;
- if (cluster_count(&cluster_info[idx]))
+ if (ci->count) {
+ ci->flags = CLUSTER_FLAG_NONFULL;
+ list_add_tail(&ci->list, &si->nonfull_clusters[0]);
continue;
- cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
- cluster_list_add_tail(&p->free_clusters, cluster_info,
- idx);
+ }
+ ci->flags = CLUSTER_FLAG_FREE;
+ list_add_tail(&ci->list, &si->free_clusters);
}
}
- return nr_extents;
+
+ return cluster_info;
+
+err_free:
+ kvfree(cluster_info);
+err:
+ return ERR_PTR(err);
}
SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
{
- struct swap_info_struct *p;
+ struct swap_info_struct *si;
struct filename *name;
struct file *swap_file = NULL;
struct address_space *mapping;
@@ -3092,8 +3355,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
sector_t span;
unsigned long maxpages;
unsigned char *swap_map = NULL;
+ unsigned long *zeromap = NULL;
struct swap_cluster_info *cluster_info = NULL;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct inode *inode = NULL;
bool inced_nr_rotate_swap = false;
@@ -3106,11 +3370,11 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (!swap_avail_heads)
return -ENOMEM;
- p = alloc_swap_info();
- if (IS_ERR(p))
- return PTR_ERR(p);
+ si = alloc_swap_info();
+ if (IS_ERR(si))
+ return PTR_ERR(si);
- INIT_WORK(&p->discard_work, swap_discard_work);
+ INIT_WORK(&si->discard_work, swap_discard_work);
name = getname(specialfile);
if (IS_ERR(name)) {
@@ -3125,12 +3389,12 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
goto bad_swap;
}
- p->swap_file = swap_file;
+ si->swap_file = swap_file;
mapping = swap_file->f_mapping;
dentry = swap_file->f_path.dentry;
inode = mapping->host;
- error = claim_swapfile(p, inode);
+ error = claim_swapfile(si, inode);
if (unlikely(error))
goto bad_swap;
@@ -3151,14 +3415,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = -EINVAL;
goto bad_swap_unlock_inode;
}
- page = read_mapping_page(mapping, 0, swap_file);
- if (IS_ERR(page)) {
- error = PTR_ERR(page);
+ folio = read_mapping_folio(mapping, 0, swap_file);
+ if (IS_ERR(folio)) {
+ error = PTR_ERR(folio);
goto bad_swap_unlock_inode;
}
- swap_header = kmap(page);
+ swap_header = kmap_local_folio(folio, 0);
- maxpages = read_swap_header(p, swap_header, inode);
+ maxpages = read_swap_header(si, swap_header, inode);
if (unlikely(!maxpages)) {
error = -EINVAL;
goto bad_swap_unlock_inode;
@@ -3171,79 +3435,57 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
goto bad_swap_unlock_inode;
}
- if (p->bdev && bdev_stable_writes(p->bdev))
- p->flags |= SWP_STABLE_WRITES;
+ error = swap_cgroup_swapon(si->type, maxpages);
+ if (error)
+ goto bad_swap_unlock_inode;
- if (p->bdev && bdev_synchronous(p->bdev))
- p->flags |= SWP_SYNCHRONOUS_IO;
+ nr_extents = setup_swap_map_and_extents(si, swap_header, swap_map,
+ maxpages, &span);
+ if (unlikely(nr_extents < 0)) {
+ error = nr_extents;
+ goto bad_swap_unlock_inode;
+ }
- if (p->bdev && bdev_nonrot(p->bdev)) {
- int cpu, i;
- unsigned long ci, nr_cluster;
+ /*
+ * Use kvmalloc_array instead of bitmap_zalloc as the allocation order might
+ * be above MAX_PAGE_ORDER incase of a large swap file.
+ */
+ zeromap = kvmalloc_array(BITS_TO_LONGS(maxpages), sizeof(long),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!zeromap) {
+ error = -ENOMEM;
+ goto bad_swap_unlock_inode;
+ }
- p->flags |= SWP_SOLIDSTATE;
- p->cluster_next_cpu = alloc_percpu(unsigned int);
- if (!p->cluster_next_cpu) {
- error = -ENOMEM;
- goto bad_swap_unlock_inode;
- }
- /*
- * select a random position to start with to help wear leveling
- * SSD
- */
- for_each_possible_cpu(cpu) {
- per_cpu(*p->cluster_next_cpu, cpu) =
- get_random_u32_inclusive(1, p->highest_bit);
- }
- nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
+ if (si->bdev && bdev_stable_writes(si->bdev))
+ si->flags |= SWP_STABLE_WRITES;
- cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
- GFP_KERNEL);
- if (!cluster_info) {
- error = -ENOMEM;
- goto bad_swap_unlock_inode;
- }
+ if (si->bdev && bdev_synchronous(si->bdev))
+ si->flags |= SWP_SYNCHRONOUS_IO;
- for (ci = 0; ci < nr_cluster; ci++)
- spin_lock_init(&((cluster_info + ci)->lock));
+ if (si->bdev && bdev_nonrot(si->bdev)) {
+ si->flags |= SWP_SOLIDSTATE;
- p->percpu_cluster = alloc_percpu(struct percpu_cluster);
- if (!p->percpu_cluster) {
- error = -ENOMEM;
+ cluster_info = setup_clusters(si, swap_header, maxpages);
+ if (IS_ERR(cluster_info)) {
+ error = PTR_ERR(cluster_info);
+ cluster_info = NULL;
goto bad_swap_unlock_inode;
}
- for_each_possible_cpu(cpu) {
- struct percpu_cluster *cluster;
-
- cluster = per_cpu_ptr(p->percpu_cluster, cpu);
- for (i = 0; i < SWAP_NR_ORDERS; i++)
- cluster->next[i] = SWAP_NEXT_INVALID;
- }
} else {
atomic_inc(&nr_rotate_swap);
inced_nr_rotate_swap = true;
}
- error = swap_cgroup_swapon(p->type, maxpages);
- if (error)
- goto bad_swap_unlock_inode;
-
- nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
- cluster_info, maxpages, &span);
- if (unlikely(nr_extents < 0)) {
- error = nr_extents;
- goto bad_swap_unlock_inode;
- }
-
if ((swap_flags & SWAP_FLAG_DISCARD) &&
- p->bdev && bdev_max_discard_sectors(p->bdev)) {
+ si->bdev && bdev_max_discard_sectors(si->bdev)) {
/*
* When discard is enabled for swap with no particular
* policy flagged, we set all swap discard flags here in
* order to sustain backward compatibility with older
* swapon(8) releases.
*/
- p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
+ si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
SWP_PAGE_DISCARD);
/*
@@ -3253,24 +3495,24 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
* Now it's time to adjust the p->flags accordingly.
*/
if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
- p->flags &= ~SWP_PAGE_DISCARD;
+ si->flags &= ~SWP_PAGE_DISCARD;
else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
- p->flags &= ~SWP_AREA_DISCARD;
+ si->flags &= ~SWP_AREA_DISCARD;
/* issue a swapon-time discard if it's still required */
- if (p->flags & SWP_AREA_DISCARD) {
- int err = discard_swap(p);
+ if (si->flags & SWP_AREA_DISCARD) {
+ int err = discard_swap(si);
if (unlikely(err))
pr_err("swapon: discard_swap(%p): %d\n",
- p, err);
+ si, err);
}
}
- error = init_swap_address_space(p->type, maxpages);
+ error = init_swap_address_space(si->type, maxpages);
if (error)
goto bad_swap_unlock_inode;
- error = zswap_swapon(p->type, maxpages);
+ error = zswap_swapon(si->type, maxpages);
if (error)
goto free_swap_address_space;
@@ -3290,15 +3532,15 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (swap_flags & SWAP_FLAG_PREFER)
prio =
(swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
- enable_swap_info(p, prio, swap_map, cluster_info);
+ enable_swap_info(si, prio, swap_map, cluster_info, zeromap);
pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n",
- K(p->pages), name->name, p->prio, nr_extents,
+ K(si->pages), name->name, si->prio, nr_extents,
K((unsigned long long)span),
- (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
- (p->flags & SWP_DISCARDABLE) ? "D" : "",
- (p->flags & SWP_AREA_DISCARD) ? "s" : "",
- (p->flags & SWP_PAGE_DISCARD) ? "c" : "");
+ (si->flags & SWP_SOLIDSTATE) ? "SS" : "",
+ (si->flags & SWP_DISCARDABLE) ? "D" : "",
+ (si->flags & SWP_AREA_DISCARD) ? "s" : "",
+ (si->flags & SWP_PAGE_DISCARD) ? "c" : "");
mutex_unlock(&swapon_mutex);
atomic_inc(&proc_poll_event);
@@ -3307,34 +3549,33 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = 0;
goto out;
free_swap_zswap:
- zswap_swapoff(p->type);
+ zswap_swapoff(si->type);
free_swap_address_space:
- exit_swap_address_space(p->type);
+ exit_swap_address_space(si->type);
bad_swap_unlock_inode:
inode_unlock(inode);
bad_swap:
- free_percpu(p->percpu_cluster);
- p->percpu_cluster = NULL;
- free_percpu(p->cluster_next_cpu);
- p->cluster_next_cpu = NULL;
+ free_percpu(si->percpu_cluster);
+ si->percpu_cluster = NULL;
+ free_percpu(si->cluster_next_cpu);
+ si->cluster_next_cpu = NULL;
inode = NULL;
- destroy_swap_extents(p);
- swap_cgroup_swapoff(p->type);
+ destroy_swap_extents(si);
+ swap_cgroup_swapoff(si->type);
spin_lock(&swap_lock);
- p->swap_file = NULL;
- p->flags = 0;
+ si->swap_file = NULL;
+ si->flags = 0;
spin_unlock(&swap_lock);
vfree(swap_map);
+ kvfree(zeromap);
kvfree(cluster_info);
if (inced_nr_rotate_swap)
atomic_dec(&nr_rotate_swap);
if (swap_file)
filp_close(swap_file, NULL);
out:
- if (page && !IS_ERR(page)) {
- kunmap(page);
- put_page(page);
- }
+ if (!IS_ERR_OR_NULL(folio))
+ folio_release_kmap(folio, swap_header);
if (name)
putname(name);
if (inode)
@@ -3362,7 +3603,7 @@ void si_swapinfo(struct sysinfo *val)
}
/*
- * Verify that a swap entry is valid and increment its swap map count.
+ * Verify that nr swap entries are valid and increment their swap map counts.
*
* Returns error code in following case.
* - success -> 0
@@ -3372,63 +3613,76 @@ void si_swapinfo(struct sysinfo *val)
* - swap-cache reference is requested but the entry is not used. -> ENOENT
* - swap-mapped reference requested but needs continued swap count. -> ENOMEM
*/
-static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
+static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr)
{
- struct swap_info_struct *p;
+ struct swap_info_struct *si;
struct swap_cluster_info *ci;
unsigned long offset;
unsigned char count;
unsigned char has_cache;
- int err;
+ int err, i;
- p = swp_swap_info(entry);
+ si = swp_swap_info(entry);
offset = swp_offset(entry);
- ci = lock_cluster_or_swap_info(p, offset);
-
- count = p->swap_map[offset];
-
- /*
- * swapin_readahead() doesn't check if a swap entry is valid, so the
- * swap entry could be SWAP_MAP_BAD. Check here with lock held.
- */
- if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
- err = -ENOENT;
- goto unlock_out;
- }
+ VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
+ VM_WARN_ON(usage == 1 && nr > 1);
+ ci = lock_cluster_or_swap_info(si, offset);
- has_cache = count & SWAP_HAS_CACHE;
- count &= ~SWAP_HAS_CACHE;
err = 0;
+ for (i = 0; i < nr; i++) {
+ count = si->swap_map[offset + i];
- if (usage == SWAP_HAS_CACHE) {
+ /*
+ * swapin_readahead() doesn't check if a swap entry is valid, so the
+ * swap entry could be SWAP_MAP_BAD. Check here with lock held.
+ */
+ if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
+ err = -ENOENT;
+ goto unlock_out;
+ }
- /* set SWAP_HAS_CACHE if there is no cache and entry is used */
- if (!has_cache && count)
- has_cache = SWAP_HAS_CACHE;
- else if (has_cache) /* someone else added cache */
- err = -EEXIST;
- else /* no users remaining */
+ has_cache = count & SWAP_HAS_CACHE;
+ count &= ~SWAP_HAS_CACHE;
+
+ if (!count && !has_cache) {
err = -ENOENT;
+ } else if (usage == SWAP_HAS_CACHE) {
+ if (has_cache)
+ err = -EEXIST;
+ } else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) {
+ err = -EINVAL;
+ }
+
+ if (err)
+ goto unlock_out;
+ }
- } else if (count || has_cache) {
+ for (i = 0; i < nr; i++) {
+ count = si->swap_map[offset + i];
+ has_cache = count & SWAP_HAS_CACHE;
+ count &= ~SWAP_HAS_CACHE;
- if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
+ if (usage == SWAP_HAS_CACHE)
+ has_cache = SWAP_HAS_CACHE;
+ else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
count += usage;
- else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
- err = -EINVAL;
- else if (swap_count_continued(p, offset, count))
+ else if (swap_count_continued(si, offset + i, count))
count = COUNT_CONTINUED;
- else
+ else {
+ /*
+ * Don't need to rollback changes, because if
+ * usage == 1, there must be nr == 1.
+ */
err = -ENOMEM;
- } else
- err = -ENOENT; /* unused swap entry */
+ goto unlock_out;
+ }
- if (!err)
- WRITE_ONCE(p->swap_map[offset], count | has_cache);
+ WRITE_ONCE(si->swap_map[offset + i], count | has_cache);
+ }
unlock_out:
- unlock_cluster_or_swap_info(p, ci);
+ unlock_cluster_or_swap_info(si, ci);
return err;
}
@@ -3436,9 +3690,9 @@ unlock_out:
* Help swapoff by noting that swap entry belongs to shmem/tmpfs
* (in which case its reference count is never incremented).
*/
-void swap_shmem_alloc(swp_entry_t entry)
+void swap_shmem_alloc(swp_entry_t entry, int nr)
{
- __swap_duplicate(entry, SWAP_MAP_SHMEM);
+ __swap_duplicate(entry, SWAP_MAP_SHMEM, nr);
}
/*
@@ -3452,35 +3706,29 @@ int swap_duplicate(swp_entry_t entry)
{
int err = 0;
- while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
+ while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM)
err = add_swap_count_continuation(entry, GFP_ATOMIC);
return err;
}
/*
- * @entry: swap entry for which we allocate swap cache.
+ * @entry: first swap entry from which we allocate nr swap cache.
*
- * Called when allocating swap cache for existing swap entry,
+ * Called when allocating swap cache for existing swap entries,
* This can return error codes. Returns 0 at success.
* -EEXIST means there is a swap cache.
* Note: return code is different from swap_duplicate().
*/
-int swapcache_prepare(swp_entry_t entry)
+int swapcache_prepare(swp_entry_t entry, int nr)
{
- return __swap_duplicate(entry, SWAP_HAS_CACHE);
+ return __swap_duplicate(entry, SWAP_HAS_CACHE, nr);
}
-void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
{
- struct swap_cluster_info *ci;
unsigned long offset = swp_offset(entry);
- unsigned char usage;
- ci = lock_cluster_or_swap_info(si, offset);
- usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE);
- unlock_cluster_or_swap_info(si, ci);
- if (!usage)
- free_swap_slot(entry);
+ cluster_swap_free_nr(si, offset, nr, SWAP_HAS_CACHE);
}
struct swap_info_struct *swp_swap_info(swp_entry_t entry)
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index acc56c75ba99..ce13c4062647 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -391,7 +391,7 @@ static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
struct page *page;
int ret;
- ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC);
+ ret = shmem_get_folio(inode, pgoff, 0, &folio, SGP_NOALLOC);
/* Our caller expects us to return -EFAULT if we failed to find folio */
if (ret == -ENOENT)
ret = -EFAULT;
@@ -1763,3 +1763,171 @@ out:
VM_WARN_ON(!moved && !err);
return moved ? moved : err;
}
+
+static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
+
+ vm_flags_reset(vma, flags);
+ /*
+ * For shared mappings, we want to enable writenotify while
+ * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
+ * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
+ */
+ if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
+ vma_set_page_prot(vma);
+}
+
+static void userfaultfd_set_ctx(struct vm_area_struct *vma,
+ struct userfaultfd_ctx *ctx,
+ unsigned long flags)
+{
+ vma_start_write(vma);
+ vma->vm_userfaultfd_ctx = (struct vm_userfaultfd_ctx){ctx};
+ userfaultfd_set_vm_flags(vma,
+ (vma->vm_flags & ~__VM_UFFD_FLAGS) | flags);
+}
+
+void userfaultfd_reset_ctx(struct vm_area_struct *vma)
+{
+ userfaultfd_set_ctx(vma, NULL, 0);
+}
+
+struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+{
+ struct vm_area_struct *ret;
+
+ /* Reset ptes for the whole vma range if wr-protected */
+ if (userfaultfd_wp(vma))
+ uffd_wp_range(vma, start, end - start, false);
+
+ ret = vma_modify_flags_uffd(vmi, prev, vma, start, end,
+ vma->vm_flags & ~__VM_UFFD_FLAGS,
+ NULL_VM_UFFD_CTX);
+
+ /*
+ * In the vma_merge() successful mprotect-like case 8:
+ * the next vma was merged into the current one and
+ * the current one has not been updated yet.
+ */
+ if (!IS_ERR(ret))
+ userfaultfd_reset_ctx(ret);
+
+ return ret;
+}
+
+/* Assumes mmap write lock taken, and mm_struct pinned. */
+int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
+ struct vm_area_struct *vma,
+ unsigned long vm_flags,
+ unsigned long start, unsigned long end,
+ bool wp_async)
+{
+ VMA_ITERATOR(vmi, ctx->mm, start);
+ struct vm_area_struct *prev = vma_prev(&vmi);
+ unsigned long vma_end;
+ unsigned long new_flags;
+
+ if (vma->vm_start < start)
+ prev = vma;
+
+ for_each_vma_range(vmi, vma, end) {
+ cond_resched();
+
+ BUG_ON(!vma_can_userfault(vma, vm_flags, wp_async));
+ BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
+ vma->vm_userfaultfd_ctx.ctx != ctx);
+ WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
+
+ /*
+ * Nothing to do: this vma is already registered into this
+ * userfaultfd and with the right tracking mode too.
+ */
+ if (vma->vm_userfaultfd_ctx.ctx == ctx &&
+ (vma->vm_flags & vm_flags) == vm_flags)
+ goto skip;
+
+ if (vma->vm_start > start)
+ start = vma->vm_start;
+ vma_end = min(end, vma->vm_end);
+
+ new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
+ vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
+ new_flags,
+ (struct vm_userfaultfd_ctx){ctx});
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ /*
+ * In the vma_merge() successful mprotect-like case 8:
+ * the next vma was merged into the current one and
+ * the current one has not been updated yet.
+ */
+ userfaultfd_set_ctx(vma, ctx, vm_flags);
+
+ if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
+ hugetlb_unshare_all_pmds(vma);
+
+skip:
+ prev = vma;
+ start = vma->vm_end;
+ }
+
+ return 0;
+}
+
+void userfaultfd_release_new(struct userfaultfd_ctx *ctx)
+{
+ struct mm_struct *mm = ctx->mm;
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
+
+ /* the various vma->vm_userfaultfd_ctx still points to it */
+ mmap_write_lock(mm);
+ for_each_vma(vmi, vma) {
+ if (vma->vm_userfaultfd_ctx.ctx == ctx)
+ userfaultfd_reset_ctx(vma);
+ }
+ mmap_write_unlock(mm);
+}
+
+void userfaultfd_release_all(struct mm_struct *mm,
+ struct userfaultfd_ctx *ctx)
+{
+ struct vm_area_struct *vma, *prev;
+ VMA_ITERATOR(vmi, mm, 0);
+
+ if (!mmget_not_zero(mm))
+ return;
+
+ /*
+ * Flush page faults out of all CPUs. NOTE: all page faults
+ * must be retried without returning VM_FAULT_SIGBUS if
+ * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
+ * changes while handle_userfault released the mmap_lock. So
+ * it's critical that released is set to true (above), before
+ * taking the mmap_lock for writing.
+ */
+ mmap_write_lock(mm);
+ prev = NULL;
+ for_each_vma(vmi, vma) {
+ cond_resched();
+ BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
+ !!(vma->vm_flags & __VM_UFFD_FLAGS));
+ if (vma->vm_userfaultfd_ctx.ctx != ctx) {
+ prev = vma;
+ continue;
+ }
+
+ vma = userfaultfd_clear_vma(&vmi, prev, vma,
+ vma->vm_start, vma->vm_end);
+ prev = vma;
+ }
+ mmap_write_unlock(mm);
+ mmput(mm);
+}
diff --git a/mm/util.c b/mm/util.c
index bd283e2132e0..4f1275023eb7 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -463,7 +463,7 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
if (gap + pad > gap)
gap += pad;
- if (gap < MIN_GAP)
+ if (gap < MIN_GAP && MIN_GAP < MAX_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
@@ -608,6 +608,28 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
}
EXPORT_SYMBOL(vm_mmap);
+static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
+{
+ /*
+ * We want to attempt a large physically contiguous block first because
+ * it is less likely to fragment multiple larger blocks and therefore
+ * contribute to a long term fragmentation less than vmalloc fallback.
+ * However make sure that larger requests are not too disruptive - no
+ * OOM killer and no allocation failure warnings as we have a fallback.
+ */
+ if (size > PAGE_SIZE) {
+ flags |= __GFP_NOWARN;
+
+ if (!(flags & __GFP_RETRY_MAYFAIL))
+ flags |= __GFP_NORETRY;
+
+ /* nofail semantic is implemented by the vmalloc fallback */
+ flags &= ~__GFP_NOFAIL;
+ }
+
+ return flags;
+}
+
/**
* __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
* failure, fall back to non-contiguous (vmalloc) allocation.
@@ -627,32 +649,15 @@ EXPORT_SYMBOL(vm_mmap);
*/
void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
{
- gfp_t kmalloc_flags = flags;
void *ret;
/*
- * We want to attempt a large physically contiguous block first because
- * it is less likely to fragment multiple larger blocks and therefore
- * contribute to a long term fragmentation less than vmalloc fallback.
- * However make sure that larger requests are not too disruptive - no
- * OOM killer and no allocation failure warnings as we have a fallback.
- */
- if (size > PAGE_SIZE) {
- kmalloc_flags |= __GFP_NOWARN;
-
- if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
- kmalloc_flags |= __GFP_NORETRY;
-
- /* nofail semantic is implemented by the vmalloc fallback */
- kmalloc_flags &= ~__GFP_NOFAIL;
- }
-
- ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b), kmalloc_flags, node);
-
- /*
* It doesn't really make sense to fallback to vmalloc for sub page
* requests
*/
+ ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b),
+ kmalloc_gfp_adjust(flags, size),
+ node);
if (ret || size <= PAGE_SIZE)
return ret;
@@ -715,18 +720,53 @@ void kvfree_sensitive(const void *addr, size_t len)
}
EXPORT_SYMBOL(kvfree_sensitive);
-void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
+/**
+ * kvrealloc - reallocate memory; contents remain unchanged
+ * @p: object to reallocate memory for
+ * @size: the size to reallocate
+ * @flags: the flags for the page level allocator
+ *
+ * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
+ * and @p is not a %NULL pointer, the object pointed to is freed.
+ *
+ * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
+ * initial memory allocation, every subsequent call to this API for the same
+ * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
+ * __GFP_ZERO is not fully honored by this API.
+ *
+ * In any case, the contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.
+ *
+ * This function must not be called concurrently with itself or kvfree() for the
+ * same memory allocation.
+ *
+ * Return: pointer to the allocated memory or %NULL in case of error
+ */
+void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
{
- void *newp;
+ void *n;
- if (oldsize >= newsize)
- return (void *)p;
- newp = kvmalloc_noprof(newsize, flags);
- if (!newp)
- return NULL;
- memcpy(newp, p, oldsize);
- kvfree(p);
- return newp;
+ if (is_vmalloc_addr(p))
+ return vrealloc_noprof(p, size, flags);
+
+ n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
+ if (!n) {
+ /* We failed to krealloc(), fall back to kvmalloc(). */
+ n = kvmalloc_noprof(size, flags);
+ if (!n)
+ return NULL;
+
+ if (p) {
+ /* We already know that `p` is not a vmalloc address. */
+ kasan_disable_current();
+ memcpy(n, kasan_reset_tag(p), ksize(p));
+ kasan_enable_current();
+
+ kfree(p);
+ }
+ }
+
+ return n;
}
EXPORT_SYMBOL(kvrealloc_noprof);
diff --git a/mm/vma.c b/mm/vma.c
new file mode 100644
index 000000000000..4737afcb064c
--- /dev/null
+++ b/mm/vma.c
@@ -0,0 +1,2068 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/*
+ * VMA-specific functions.
+ */
+
+#include "vma_internal.h"
+#include "vma.h"
+
+static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
+{
+ struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev;
+
+ if (!mpol_equal(vmg->policy, vma_policy(vma)))
+ return false;
+ /*
+ * VM_SOFTDIRTY should not prevent from VMA merging, if we
+ * match the flags but dirty bit -- the caller should mark
+ * merged VMA as dirty. If dirty bit won't be excluded from
+ * comparison, we increase pressure on the memory system forcing
+ * the kernel to generate new VMAs when old one could be
+ * extended instead.
+ */
+ if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY)
+ return false;
+ if (vma->vm_file != vmg->file)
+ return false;
+ if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx))
+ return false;
+ if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name))
+ return false;
+ return true;
+}
+
+static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
+ struct anon_vma *anon_vma2, struct vm_area_struct *vma)
+{
+ /*
+ * The list_is_singular() test is to avoid merging VMA cloned from
+ * parents. This can improve scalability caused by anon_vma lock.
+ */
+ if ((!anon_vma1 || !anon_vma2) && (!vma ||
+ list_is_singular(&vma->anon_vma_chain)))
+ return true;
+ return anon_vma1 == anon_vma2;
+}
+
+/* Are the anon_vma's belonging to each VMA compatible with one another? */
+static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1,
+ struct vm_area_struct *vma2)
+{
+ return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL);
+}
+
+/*
+ * init_multi_vma_prep() - Initializer for struct vma_prepare
+ * @vp: The vma_prepare struct
+ * @vma: The vma that will be altered once locked
+ * @next: The next vma if it is to be adjusted
+ * @remove: The first vma to be removed
+ * @remove2: The second vma to be removed
+ */
+static void init_multi_vma_prep(struct vma_prepare *vp,
+ struct vm_area_struct *vma,
+ struct vm_area_struct *next,
+ struct vm_area_struct *remove,
+ struct vm_area_struct *remove2)
+{
+ memset(vp, 0, sizeof(struct vma_prepare));
+ vp->vma = vma;
+ vp->anon_vma = vma->anon_vma;
+ vp->remove = remove;
+ vp->remove2 = remove2;
+ vp->adj_next = next;
+ if (!vp->anon_vma && next)
+ vp->anon_vma = next->anon_vma;
+
+ vp->file = vma->vm_file;
+ if (vp->file)
+ vp->mapping = vma->vm_file->f_mapping;
+
+}
+
+/*
+ * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
+ * in front of (at a lower virtual address and file offset than) the vma.
+ *
+ * We cannot merge two vmas if they have differently assigned (non-NULL)
+ * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
+ *
+ * We don't check here for the merged mmap wrapping around the end of pagecache
+ * indices (16TB on ia32) because do_mmap() does not permit mmap's which
+ * wrap, nor mmaps which cover the final page at index -1UL.
+ *
+ * We assume the vma may be removed as part of the merge.
+ */
+static bool can_vma_merge_before(struct vma_merge_struct *vmg)
+{
+ pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
+
+ if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
+ is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) {
+ if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
+ * beyond (at a higher virtual address and file offset than) the vma.
+ *
+ * We cannot merge two vmas if they have differently assigned (non-NULL)
+ * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
+ *
+ * We assume that vma is not removed as part of the merge.
+ */
+static bool can_vma_merge_after(struct vma_merge_struct *vmg)
+{
+ if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
+ is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) {
+ if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
+ return true;
+ }
+ return false;
+}
+
+static void __vma_link_file(struct vm_area_struct *vma,
+ struct address_space *mapping)
+{
+ if (vma_is_shared_maywrite(vma))
+ mapping_allow_writable(mapping);
+
+ flush_dcache_mmap_lock(mapping);
+ vma_interval_tree_insert(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
+}
+
+/*
+ * Requires inode->i_mapping->i_mmap_rwsem
+ */
+static void __remove_shared_vm_struct(struct vm_area_struct *vma,
+ struct address_space *mapping)
+{
+ if (vma_is_shared_maywrite(vma))
+ mapping_unmap_writable(mapping);
+
+ flush_dcache_mmap_lock(mapping);
+ vma_interval_tree_remove(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
+}
+
+/*
+ * vma_prepare() - Helper function for handling locking VMAs prior to altering
+ * @vp: The initialized vma_prepare struct
+ */
+static void vma_prepare(struct vma_prepare *vp)
+{
+ if (vp->file) {
+ uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
+
+ if (vp->adj_next)
+ uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
+ vp->adj_next->vm_end);
+
+ i_mmap_lock_write(vp->mapping);
+ if (vp->insert && vp->insert->vm_file) {
+ /*
+ * Put into interval tree now, so instantiated pages
+ * are visible to arm/parisc __flush_dcache_page
+ * throughout; but we cannot insert into address
+ * space until vma start or end is updated.
+ */
+ __vma_link_file(vp->insert,
+ vp->insert->vm_file->f_mapping);
+ }
+ }
+
+ if (vp->anon_vma) {
+ anon_vma_lock_write(vp->anon_vma);
+ anon_vma_interval_tree_pre_update_vma(vp->vma);
+ if (vp->adj_next)
+ anon_vma_interval_tree_pre_update_vma(vp->adj_next);
+ }
+
+ if (vp->file) {
+ flush_dcache_mmap_lock(vp->mapping);
+ vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
+ if (vp->adj_next)
+ vma_interval_tree_remove(vp->adj_next,
+ &vp->mapping->i_mmap);
+ }
+
+}
+
+/*
+ * vma_complete- Helper function for handling the unlocking after altering VMAs,
+ * or for inserting a VMA.
+ *
+ * @vp: The vma_prepare struct
+ * @vmi: The vma iterator
+ * @mm: The mm_struct
+ */
+static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
+ struct mm_struct *mm)
+{
+ if (vp->file) {
+ if (vp->adj_next)
+ vma_interval_tree_insert(vp->adj_next,
+ &vp->mapping->i_mmap);
+ vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
+ flush_dcache_mmap_unlock(vp->mapping);
+ }
+
+ if (vp->remove && vp->file) {
+ __remove_shared_vm_struct(vp->remove, vp->mapping);
+ if (vp->remove2)
+ __remove_shared_vm_struct(vp->remove2, vp->mapping);
+ } else if (vp->insert) {
+ /*
+ * split_vma has split insert from vma, and needs
+ * us to insert it before dropping the locks
+ * (it may either follow vma or precede it).
+ */
+ vma_iter_store(vmi, vp->insert);
+ mm->map_count++;
+ }
+
+ if (vp->anon_vma) {
+ anon_vma_interval_tree_post_update_vma(vp->vma);
+ if (vp->adj_next)
+ anon_vma_interval_tree_post_update_vma(vp->adj_next);
+ anon_vma_unlock_write(vp->anon_vma);
+ }
+
+ if (vp->file) {
+ i_mmap_unlock_write(vp->mapping);
+ uprobe_mmap(vp->vma);
+
+ if (vp->adj_next)
+ uprobe_mmap(vp->adj_next);
+ }
+
+ if (vp->remove) {
+again:
+ vma_mark_detached(vp->remove, true);
+ if (vp->file) {
+ uprobe_munmap(vp->remove, vp->remove->vm_start,
+ vp->remove->vm_end);
+ fput(vp->file);
+ }
+ if (vp->remove->anon_vma)
+ anon_vma_merge(vp->vma, vp->remove);
+ mm->map_count--;
+ mpol_put(vma_policy(vp->remove));
+ if (!vp->remove2)
+ WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
+ vm_area_free(vp->remove);
+
+ /*
+ * In mprotect's case 6 (see comments on vma_merge),
+ * we are removing both mid and next vmas
+ */
+ if (vp->remove2) {
+ vp->remove = vp->remove2;
+ vp->remove2 = NULL;
+ goto again;
+ }
+ }
+ if (vp->insert && vp->file)
+ uprobe_mmap(vp->insert);
+}
+
+/*
+ * init_vma_prep() - Initializer wrapper for vma_prepare struct
+ * @vp: The vma_prepare struct
+ * @vma: The vma that will be altered once locked
+ */
+static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
+{
+ init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
+}
+
+/*
+ * Can the proposed VMA be merged with the left (previous) VMA taking into
+ * account the start position of the proposed range.
+ */
+static bool can_vma_merge_left(struct vma_merge_struct *vmg)
+
+{
+ return vmg->prev && vmg->prev->vm_end == vmg->start &&
+ can_vma_merge_after(vmg);
+}
+
+/*
+ * Can the proposed VMA be merged with the right (next) VMA taking into
+ * account the end position of the proposed range.
+ *
+ * In addition, if we can merge with the left VMA, ensure that left and right
+ * anon_vma's are also compatible.
+ */
+static bool can_vma_merge_right(struct vma_merge_struct *vmg,
+ bool can_merge_left)
+{
+ if (!vmg->next || vmg->end != vmg->next->vm_start ||
+ !can_vma_merge_before(vmg))
+ return false;
+
+ if (!can_merge_left)
+ return true;
+
+ /*
+ * If we can merge with prev (left) and next (right), indicating that
+ * each VMA's anon_vma is compatible with the proposed anon_vma, this
+ * does not mean prev and next are compatible with EACH OTHER.
+ *
+ * We therefore check this in addition to mergeability to either side.
+ */
+ return are_anon_vmas_compatible(vmg->prev, vmg->next);
+}
+
+/*
+ * Close a vm structure and free it.
+ */
+void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed)
+{
+ might_sleep();
+ if (!closed && vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ mpol_put(vma_policy(vma));
+ if (unreachable)
+ __vm_area_free(vma);
+ else
+ vm_area_free(vma);
+}
+
+/*
+ * Get rid of page table information in the indicated region.
+ *
+ * Called with the mm semaphore held.
+ */
+void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
+ struct vm_area_struct *prev, struct vm_area_struct *next)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct mmu_gather tlb;
+
+ lru_add_drain();
+ tlb_gather_mmu(&tlb, mm);
+ update_hiwater_rss(mm);
+ unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
+ /* mm_wr_locked = */ true);
+ mas_set(mas, vma->vm_end);
+ free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
+ next ? next->vm_start : USER_PGTABLES_CEILING,
+ /* mm_wr_locked = */ true);
+ tlb_finish_mmu(&tlb);
+}
+
+/*
+ * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
+ * has already been checked or doesn't make sense to fail.
+ * VMA Iterator will point to the original VMA.
+ */
+static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long addr, int new_below)
+{
+ struct vma_prepare vp;
+ struct vm_area_struct *new;
+ int err;
+
+ WARN_ON(vma->vm_start >= addr);
+ WARN_ON(vma->vm_end <= addr);
+
+ if (vma->vm_ops && vma->vm_ops->may_split) {
+ err = vma->vm_ops->may_split(vma, addr);
+ if (err)
+ return err;
+ }
+
+ new = vm_area_dup(vma);
+ if (!new)
+ return -ENOMEM;
+
+ if (new_below) {
+ new->vm_end = addr;
+ } else {
+ new->vm_start = addr;
+ new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
+ }
+
+ err = -ENOMEM;
+ vma_iter_config(vmi, new->vm_start, new->vm_end);
+ if (vma_iter_prealloc(vmi, new))
+ goto out_free_vma;
+
+ err = vma_dup_policy(vma, new);
+ if (err)
+ goto out_free_vmi;
+
+ err = anon_vma_clone(new, vma);
+ if (err)
+ goto out_free_mpol;
+
+ if (new->vm_file)
+ get_file(new->vm_file);
+
+ if (new->vm_ops && new->vm_ops->open)
+ new->vm_ops->open(new);
+
+ vma_start_write(vma);
+ vma_start_write(new);
+
+ init_vma_prep(&vp, vma);
+ vp.insert = new;
+ vma_prepare(&vp);
+ vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
+
+ if (new_below) {
+ vma->vm_start = addr;
+ vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
+ } else {
+ vma->vm_end = addr;
+ }
+
+ /* vma_complete stores the new vma */
+ vma_complete(&vp, vmi, vma->vm_mm);
+ validate_mm(vma->vm_mm);
+
+ /* Success. */
+ if (new_below)
+ vma_next(vmi);
+ else
+ vma_prev(vmi);
+
+ return 0;
+
+out_free_mpol:
+ mpol_put(vma_policy(new));
+out_free_vmi:
+ vma_iter_free(vmi);
+out_free_vma:
+ vm_area_free(new);
+ return err;
+}
+
+/*
+ * Split a vma into two pieces at address 'addr', a new vma is allocated
+ * either for the first part or the tail.
+ */
+static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long addr, int new_below)
+{
+ if (vma->vm_mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
+
+ return __split_vma(vmi, vma, addr, new_below);
+}
+
+/*
+ * vma has some anon_vma assigned, and is already inserted on that
+ * anon_vma's interval trees.
+ *
+ * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
+ * vma must be removed from the anon_vma's interval trees using
+ * anon_vma_interval_tree_pre_update_vma().
+ *
+ * After the update, the vma will be reinserted using
+ * anon_vma_interval_tree_post_update_vma().
+ *
+ * The entire update must be protected by exclusive mmap_lock and by
+ * the root anon_vma's mutex.
+ */
+void
+anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
+{
+ struct anon_vma_chain *avc;
+
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
+}
+
+void
+anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
+{
+ struct anon_vma_chain *avc;
+
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
+}
+
+/*
+ * dup_anon_vma() - Helper function to duplicate anon_vma
+ * @dst: The destination VMA
+ * @src: The source VMA
+ * @dup: Pointer to the destination VMA when successful.
+ *
+ * Returns: 0 on success.
+ */
+static int dup_anon_vma(struct vm_area_struct *dst,
+ struct vm_area_struct *src, struct vm_area_struct **dup)
+{
+ /*
+ * Easily overlooked: when mprotect shifts the boundary, make sure the
+ * expanding vma has anon_vma set if the shrinking vma had, to cover any
+ * anon pages imported.
+ */
+ if (src->anon_vma && !dst->anon_vma) {
+ int ret;
+
+ vma_assert_write_locked(dst);
+ dst->anon_vma = src->anon_vma;
+ ret = anon_vma_clone(dst, src);
+ if (ret)
+ return ret;
+
+ *dup = dst;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
+void validate_mm(struct mm_struct *mm)
+{
+ int bug = 0;
+ int i = 0;
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
+
+ mt_validate(&mm->mm_mt);
+ for_each_vma(vmi, vma) {
+#ifdef CONFIG_DEBUG_VM_RB
+ struct anon_vma *anon_vma = vma->anon_vma;
+ struct anon_vma_chain *avc;
+#endif
+ unsigned long vmi_start, vmi_end;
+ bool warn = 0;
+
+ vmi_start = vma_iter_addr(&vmi);
+ vmi_end = vma_iter_end(&vmi);
+ if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
+ warn = 1;
+
+ if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
+ warn = 1;
+
+ if (warn) {
+ pr_emerg("issue in %s\n", current->comm);
+ dump_stack();
+ dump_vma(vma);
+ pr_emerg("tree range: %px start %lx end %lx\n", vma,
+ vmi_start, vmi_end - 1);
+ vma_iter_dump_tree(&vmi);
+ }
+
+#ifdef CONFIG_DEBUG_VM_RB
+ if (anon_vma) {
+ anon_vma_lock_read(anon_vma);
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ anon_vma_interval_tree_verify(avc);
+ anon_vma_unlock_read(anon_vma);
+ }
+#endif
+ i++;
+ }
+ if (i != mm->map_count) {
+ pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
+ bug = 1;
+ }
+ VM_BUG_ON_MM(bug, mm);
+}
+#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
+
+/* Actually perform the VMA merge operation. */
+static int commit_merge(struct vma_merge_struct *vmg,
+ struct vm_area_struct *adjust,
+ struct vm_area_struct *remove,
+ struct vm_area_struct *remove2,
+ long adj_start,
+ bool expanded)
+{
+ struct vma_prepare vp;
+
+ init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2);
+
+ VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
+ vp.anon_vma != adjust->anon_vma);
+
+ if (expanded) {
+ /* Note: vma iterator must be pointing to 'start'. */
+ vma_iter_config(vmg->vmi, vmg->start, vmg->end);
+ } else {
+ vma_iter_config(vmg->vmi, adjust->vm_start + adj_start,
+ adjust->vm_end);
+ }
+
+ if (vma_iter_prealloc(vmg->vmi, vmg->vma))
+ return -ENOMEM;
+
+ vma_prepare(&vp);
+ vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start);
+ vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
+
+ if (expanded)
+ vma_iter_store(vmg->vmi, vmg->vma);
+
+ if (adj_start) {
+ adjust->vm_start += adj_start;
+ adjust->vm_pgoff += PHYS_PFN(adj_start);
+ if (adj_start < 0) {
+ WARN_ON(expanded);
+ vma_iter_store(vmg->vmi, adjust);
+ }
+ }
+
+ vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm);
+
+ return 0;
+}
+
+/* We can only remove VMAs when merging if they do not have a close hook. */
+static bool can_merge_remove_vma(struct vm_area_struct *vma)
+{
+ return !vma->vm_ops || !vma->vm_ops->close;
+}
+
+/*
+ * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
+ * attributes modified.
+ *
+ * @vmg: Describes the modifications being made to a VMA and associated
+ * metadata.
+ *
+ * When the attributes of a range within a VMA change, then it might be possible
+ * for immediately adjacent VMAs to be merged into that VMA due to having
+ * identical properties.
+ *
+ * This function checks for the existence of any such mergeable VMAs and updates
+ * the maple tree describing the @vmg->vma->vm_mm address space to account for
+ * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
+ *
+ * As part of this operation, if a merge occurs, the @vmg object will have its
+ * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
+ * calls to this function should reset these fields.
+ *
+ * Returns: The merged VMA if merge succeeds, or NULL otherwise.
+ *
+ * ASSUMPTIONS:
+ * - The caller must assign the VMA to be modifed to @vmg->vma.
+ * - The caller must have set @vmg->prev to the previous VMA, if there is one.
+ * - The caller must not set @vmg->next, as we determine this.
+ * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
+ * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
+ */
+static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *vmg)
+{
+ struct vm_area_struct *vma = vmg->vma;
+ struct vm_area_struct *prev = vmg->prev;
+ struct vm_area_struct *next, *res;
+ struct vm_area_struct *anon_dup = NULL;
+ struct vm_area_struct *adjust = NULL;
+ unsigned long start = vmg->start;
+ unsigned long end = vmg->end;
+ bool left_side = vma && start == vma->vm_start;
+ bool right_side = vma && end == vma->vm_end;
+ int err = 0;
+ long adj_start = 0;
+ bool merge_will_delete_vma, merge_will_delete_next;
+ bool merge_left, merge_right, merge_both;
+ bool expanded;
+
+ mmap_assert_write_locked(vmg->mm);
+ VM_WARN_ON(!vma); /* We are modifying a VMA, so caller must specify. */
+ VM_WARN_ON(vmg->next); /* We set this. */
+ VM_WARN_ON(prev && start <= prev->vm_start);
+ VM_WARN_ON(start >= end);
+ /*
+ * If vma == prev, then we are offset into a VMA. Otherwise, if we are
+ * not, we must span a portion of the VMA.
+ */
+ VM_WARN_ON(vma && ((vma != prev && vmg->start != vma->vm_start) ||
+ vmg->end > vma->vm_end));
+ /* The vmi must be positioned within vmg->vma. */
+ VM_WARN_ON(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
+ vma_iter_addr(vmg->vmi) < vma->vm_end));
+
+ vmg->state = VMA_MERGE_NOMERGE;
+
+ /*
+ * If a special mapping or if the range being modified is neither at the
+ * furthermost left or right side of the VMA, then we have no chance of
+ * merging and should abort.
+ */
+ if (vmg->flags & VM_SPECIAL || (!left_side && !right_side))
+ return NULL;
+
+ if (left_side)
+ merge_left = can_vma_merge_left(vmg);
+ else
+ merge_left = false;
+
+ if (right_side) {
+ next = vmg->next = vma_iter_next_range(vmg->vmi);
+ vma_iter_prev_range(vmg->vmi);
+
+ merge_right = can_vma_merge_right(vmg, merge_left);
+ } else {
+ merge_right = false;
+ next = NULL;
+ }
+
+ if (merge_left) /* If merging prev, position iterator there. */
+ vma_prev(vmg->vmi);
+ else if (!merge_right) /* If we have nothing to merge, abort. */
+ return NULL;
+
+ merge_both = merge_left && merge_right;
+ /* If we span the entire VMA, a merge implies it will be deleted. */
+ merge_will_delete_vma = left_side && right_side;
+
+ /*
+ * If we need to remove vma in its entirety but are unable to do so,
+ * we have no sensible recourse but to abort the merge.
+ */
+ if (merge_will_delete_vma && !can_merge_remove_vma(vma))
+ return NULL;
+
+ /*
+ * If we merge both VMAs, then next is also deleted. This implies
+ * merge_will_delete_vma also.
+ */
+ merge_will_delete_next = merge_both;
+
+ /*
+ * If we cannot delete next, then we can reduce the operation to merging
+ * prev and vma (thereby deleting vma).
+ */
+ if (merge_will_delete_next && !can_merge_remove_vma(next)) {
+ merge_will_delete_next = false;
+ merge_right = false;
+ merge_both = false;
+ }
+
+ /* No matter what happens, we will be adjusting vma. */
+ vma_start_write(vma);
+
+ if (merge_left)
+ vma_start_write(prev);
+
+ if (merge_right)
+ vma_start_write(next);
+
+ if (merge_both) {
+ /*
+ * |<----->|
+ * |-------*********-------|
+ * prev vma next
+ * extend delete delete
+ */
+
+ vmg->vma = prev;
+ vmg->start = prev->vm_start;
+ vmg->end = next->vm_end;
+ vmg->pgoff = prev->vm_pgoff;
+
+ /*
+ * We already ensured anon_vma compatibility above, so now it's
+ * simply a case of, if prev has no anon_vma object, which of
+ * next or vma contains the anon_vma we must duplicate.
+ */
+ err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup);
+ } else if (merge_left) {
+ /*
+ * |<----->| OR
+ * |<--------->|
+ * |-------*************
+ * prev vma
+ * extend shrink/delete
+ */
+
+ vmg->vma = prev;
+ vmg->start = prev->vm_start;
+ vmg->pgoff = prev->vm_pgoff;
+
+ if (!merge_will_delete_vma) {
+ adjust = vma;
+ adj_start = vmg->end - vma->vm_start;
+ }
+
+ err = dup_anon_vma(prev, vma, &anon_dup);
+ } else { /* merge_right */
+ /*
+ * |<----->| OR
+ * |<--------->|
+ * *************-------|
+ * vma next
+ * shrink/delete extend
+ */
+
+ pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
+
+ VM_WARN_ON(!merge_right);
+ /* If we are offset into a VMA, then prev must be vma. */
+ VM_WARN_ON(vmg->start > vma->vm_start && prev && vma != prev);
+
+ if (merge_will_delete_vma) {
+ vmg->vma = next;
+ vmg->end = next->vm_end;
+ vmg->pgoff = next->vm_pgoff - pglen;
+ } else {
+ /*
+ * We shrink vma and expand next.
+ *
+ * IMPORTANT: This is the ONLY case where the final
+ * merged VMA is NOT vmg->vma, but rather vmg->next.
+ */
+
+ vmg->start = vma->vm_start;
+ vmg->end = start;
+ vmg->pgoff = vma->vm_pgoff;
+
+ adjust = next;
+ adj_start = -(vma->vm_end - start);
+ }
+
+ err = dup_anon_vma(next, vma, &anon_dup);
+ }
+
+ if (err)
+ goto abort;
+
+ /*
+ * In nearly all cases, we expand vmg->vma. There is one exception -
+ * merge_right where we partially span the VMA. In this case we shrink
+ * the end of vmg->vma and adjust the start of vmg->next accordingly.
+ */
+ expanded = !merge_right || merge_will_delete_vma;
+
+ if (commit_merge(vmg, adjust,
+ merge_will_delete_vma ? vma : NULL,
+ merge_will_delete_next ? next : NULL,
+ adj_start, expanded)) {
+ if (anon_dup)
+ unlink_anon_vmas(anon_dup);
+
+ vmg->state = VMA_MERGE_ERROR_NOMEM;
+ return NULL;
+ }
+
+ res = merge_left ? prev : next;
+ khugepaged_enter_vma(res, vmg->flags);
+
+ vmg->state = VMA_MERGE_SUCCESS;
+ return res;
+
+abort:
+ vma_iter_set(vmg->vmi, start);
+ vma_iter_load(vmg->vmi);
+ vmg->state = VMA_MERGE_ERROR_NOMEM;
+ return NULL;
+}
+
+/*
+ * vma_merge_new_range - Attempt to merge a new VMA into address space
+ *
+ * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
+ * (exclusive), which we try to merge with any adjacent VMAs if possible.
+ *
+ * We are about to add a VMA to the address space starting at @vmg->start and
+ * ending at @vmg->end. There are three different possible scenarios:
+ *
+ * 1. There is a VMA with identical properties immediately adjacent to the
+ * proposed new VMA [@vmg->start, @vmg->end) either before or after it -
+ * EXPAND that VMA:
+ *
+ * Proposed: |-----| or |-----|
+ * Existing: |----| |----|
+ *
+ * 2. There are VMAs with identical properties immediately adjacent to the
+ * proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
+ * EXPAND the former and REMOVE the latter:
+ *
+ * Proposed: |-----|
+ * Existing: |----| |----|
+ *
+ * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
+ * VMAs do not have identical attributes - NO MERGE POSSIBLE.
+ *
+ * In instances where we can merge, this function returns the expanded VMA which
+ * will have its range adjusted accordingly and the underlying maple tree also
+ * adjusted.
+ *
+ * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
+ * to the VMA we expanded.
+ *
+ * This function adjusts @vmg to provide @vmg->next if not already specified,
+ * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
+ *
+ * ASSUMPTIONS:
+ * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
+ * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
+ other than VMAs that will be unmapped should the operation succeed.
+ * - The caller must have specified the previous vma in @vmg->prev.
+ * - The caller must have specified the next vma in @vmg->next.
+ * - The caller must have positioned the vmi at or before the gap.
+ */
+struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
+{
+ struct vm_area_struct *prev = vmg->prev;
+ struct vm_area_struct *next = vmg->next;
+ unsigned long start = vmg->start;
+ unsigned long end = vmg->end;
+ pgoff_t pgoff = vmg->pgoff;
+ pgoff_t pglen = PHYS_PFN(end - start);
+ bool can_merge_left, can_merge_right;
+
+ mmap_assert_write_locked(vmg->mm);
+ VM_WARN_ON(vmg->vma);
+ /* vmi must point at or before the gap. */
+ VM_WARN_ON(vma_iter_addr(vmg->vmi) > end);
+
+ vmg->state = VMA_MERGE_NOMERGE;
+
+ /* Special VMAs are unmergeable, also if no prev/next. */
+ if ((vmg->flags & VM_SPECIAL) || (!prev && !next))
+ return NULL;
+
+ can_merge_left = can_vma_merge_left(vmg);
+ can_merge_right = can_vma_merge_right(vmg, can_merge_left);
+
+ /* If we can merge with the next VMA, adjust vmg accordingly. */
+ if (can_merge_right) {
+ vmg->end = next->vm_end;
+ vmg->vma = next;
+ vmg->pgoff = next->vm_pgoff - pglen;
+ }
+
+ /* If we can merge with the previous VMA, adjust vmg accordingly. */
+ if (can_merge_left) {
+ vmg->start = prev->vm_start;
+ vmg->vma = prev;
+ vmg->pgoff = prev->vm_pgoff;
+
+ /*
+ * If this merge would result in removal of the next VMA but we
+ * are not permitted to do so, reduce the operation to merging
+ * prev and vma.
+ */
+ if (can_merge_right && !can_merge_remove_vma(next))
+ vmg->end = end;
+
+ vma_prev(vmg->vmi); /* Equivalent to going to the previous range */
+ }
+
+ /*
+ * Now try to expand adjacent VMA(s). This takes care of removing the
+ * following VMA if we have VMAs on both sides.
+ */
+ if (vmg->vma && !vma_expand(vmg)) {
+ khugepaged_enter_vma(vmg->vma, vmg->flags);
+ vmg->state = VMA_MERGE_SUCCESS;
+ return vmg->vma;
+ }
+
+ /* If expansion failed, reset state. Allows us to retry merge later. */
+ vmg->vma = NULL;
+ vmg->start = start;
+ vmg->end = end;
+ vmg->pgoff = pgoff;
+ if (vmg->vma == prev)
+ vma_iter_set(vmg->vmi, start);
+
+ return NULL;
+}
+
+/*
+ * vma_expand - Expand an existing VMA
+ *
+ * @vmg: Describes a VMA expansion operation.
+ *
+ * Expand @vma to vmg->start and vmg->end. Can expand off the start and end.
+ * Will expand over vmg->next if it's different from vmg->vma and vmg->end ==
+ * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with
+ * vmg->next needs to be handled by the caller.
+ *
+ * Returns: 0 on success.
+ *
+ * ASSUMPTIONS:
+ * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock.
+ * - The caller must have set @vmg->vma and @vmg->next.
+ */
+int vma_expand(struct vma_merge_struct *vmg)
+{
+ struct vm_area_struct *anon_dup = NULL;
+ bool remove_next = false;
+ struct vm_area_struct *vma = vmg->vma;
+ struct vm_area_struct *next = vmg->next;
+
+ mmap_assert_write_locked(vmg->mm);
+
+ vma_start_write(vma);
+ if (next && (vma != next) && (vmg->end == next->vm_end)) {
+ int ret;
+
+ remove_next = true;
+ /* This should already have been checked by this point. */
+ VM_WARN_ON(!can_merge_remove_vma(next));
+ vma_start_write(next);
+ ret = dup_anon_vma(vma, next, &anon_dup);
+ if (ret)
+ return ret;
+ }
+
+ /* Not merging but overwriting any part of next is not handled. */
+ VM_WARN_ON(next && !remove_next &&
+ next != vma && vmg->end > next->vm_start);
+ /* Only handles expanding */
+ VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end);
+
+ if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
+ goto nomem;
+
+ return 0;
+
+nomem:
+ vmg->state = VMA_MERGE_ERROR_NOMEM;
+ if (anon_dup)
+ unlink_anon_vmas(anon_dup);
+ return -ENOMEM;
+}
+
+/*
+ * vma_shrink() - Reduce an existing VMAs memory area
+ * @vmi: The vma iterator
+ * @vma: The VMA to modify
+ * @start: The new start
+ * @end: The new end
+ *
+ * Returns: 0 on success, -ENOMEM otherwise
+ */
+int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff)
+{
+ struct vma_prepare vp;
+
+ WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
+
+ if (vma->vm_start < start)
+ vma_iter_config(vmi, vma->vm_start, start);
+ else
+ vma_iter_config(vmi, end, vma->vm_end);
+
+ if (vma_iter_prealloc(vmi, NULL))
+ return -ENOMEM;
+
+ vma_start_write(vma);
+
+ init_vma_prep(&vp, vma);
+ vma_prepare(&vp);
+ vma_adjust_trans_huge(vma, start, end, 0);
+
+ vma_iter_clear(vmi);
+ vma_set_range(vma, start, end, pgoff);
+ vma_complete(&vp, vmi, vma->vm_mm);
+ validate_mm(vma->vm_mm);
+ return 0;
+}
+
+static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach, bool mm_wr_locked)
+{
+ struct mmu_gather tlb;
+
+ if (!vms->clear_ptes) /* Nothing to do */
+ return;
+
+ /*
+ * We can free page tables without write-locking mmap_lock because VMAs
+ * were isolated before we downgraded mmap_lock.
+ */
+ mas_set(mas_detach, 1);
+ lru_add_drain();
+ tlb_gather_mmu(&tlb, vms->vma->vm_mm);
+ update_hiwater_rss(vms->vma->vm_mm);
+ unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
+ vms->vma_count, mm_wr_locked);
+
+ mas_set(mas_detach, 1);
+ /* start and end may be different if there is no prev or next vma. */
+ free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start,
+ vms->unmap_end, mm_wr_locked);
+ tlb_finish_mmu(&tlb);
+ vms->clear_ptes = false;
+}
+
+void vms_clean_up_area(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach)
+{
+ struct vm_area_struct *vma;
+
+ if (!vms->nr_pages)
+ return;
+
+ vms_clear_ptes(vms, mas_detach, true);
+ mas_set(mas_detach, 0);
+ mas_for_each(mas_detach, vma, ULONG_MAX)
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+ vms->closed_vm_ops = true;
+}
+
+/*
+ * vms_complete_munmap_vmas() - Finish the munmap() operation
+ * @vms: The vma munmap struct
+ * @mas_detach: The maple state of the detached vmas
+ *
+ * This updates the mm_struct, unmaps the region, frees the resources
+ * used for the munmap() and may downgrade the lock - if requested. Everything
+ * needed to be done once the vma maple tree is updated.
+ */
+void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+
+ mm = current->mm;
+ mm->map_count -= vms->vma_count;
+ mm->locked_vm -= vms->locked_vm;
+ if (vms->unlock)
+ mmap_write_downgrade(mm);
+
+ if (!vms->nr_pages)
+ return;
+
+ vms_clear_ptes(vms, mas_detach, !vms->unlock);
+ /* Update high watermark before we lower total_vm */
+ update_hiwater_vm(mm);
+ /* Stat accounting */
+ WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
+ /* Paranoid bookkeeping */
+ VM_WARN_ON(vms->exec_vm > mm->exec_vm);
+ VM_WARN_ON(vms->stack_vm > mm->stack_vm);
+ VM_WARN_ON(vms->data_vm > mm->data_vm);
+ mm->exec_vm -= vms->exec_vm;
+ mm->stack_vm -= vms->stack_vm;
+ mm->data_vm -= vms->data_vm;
+
+ /* Remove and clean up vmas */
+ mas_set(mas_detach, 0);
+ mas_for_each(mas_detach, vma, ULONG_MAX)
+ remove_vma(vma, /* = */ false, vms->closed_vm_ops);
+
+ vm_unacct_memory(vms->nr_accounted);
+ validate_mm(mm);
+ if (vms->unlock)
+ mmap_read_unlock(mm);
+
+ __mt_destroy(mas_detach->tree);
+}
+
+/*
+ * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
+ * for removal at a later date. Handles splitting first and last if necessary
+ * and marking the vmas as isolated.
+ *
+ * @vms: The vma munmap struct
+ * @mas_detach: The maple state tracking the detached tree
+ *
+ * Return: 0 on success, error otherwise
+ */
+int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach)
+{
+ struct vm_area_struct *next = NULL;
+ int error;
+
+ /*
+ * If we need to split any vma, do it now to save pain later.
+ * Does it split the first one?
+ */
+ if (vms->start > vms->vma->vm_start) {
+
+ /*
+ * Make sure that map_count on return from munmap() will
+ * not exceed its limit; but let map_count go just above
+ * its limit temporarily, to help free resources as expected.
+ */
+ if (vms->end < vms->vma->vm_end &&
+ vms->vma->vm_mm->map_count >= sysctl_max_map_count) {
+ error = -ENOMEM;
+ goto map_count_exceeded;
+ }
+
+ /* Don't bother splitting the VMA if we can't unmap it anyway */
+ if (!can_modify_vma(vms->vma)) {
+ error = -EPERM;
+ goto start_split_failed;
+ }
+
+ error = __split_vma(vms->vmi, vms->vma, vms->start, 1);
+ if (error)
+ goto start_split_failed;
+ }
+ vms->prev = vma_prev(vms->vmi);
+ if (vms->prev)
+ vms->unmap_start = vms->prev->vm_end;
+
+ /*
+ * Detach a range of VMAs from the mm. Using next as a temp variable as
+ * it is always overwritten.
+ */
+ for_each_vma_range(*(vms->vmi), next, vms->end) {
+ long nrpages;
+
+ if (!can_modify_vma(next)) {
+ error = -EPERM;
+ goto modify_vma_failed;
+ }
+ /* Does it split the end? */
+ if (next->vm_end > vms->end) {
+ error = __split_vma(vms->vmi, next, vms->end, 0);
+ if (error)
+ goto end_split_failed;
+ }
+ vma_start_write(next);
+ mas_set(mas_detach, vms->vma_count++);
+ error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
+ if (error)
+ goto munmap_gather_failed;
+
+ vma_mark_detached(next, true);
+ nrpages = vma_pages(next);
+
+ vms->nr_pages += nrpages;
+ if (next->vm_flags & VM_LOCKED)
+ vms->locked_vm += nrpages;
+
+ if (next->vm_flags & VM_ACCOUNT)
+ vms->nr_accounted += nrpages;
+
+ if (is_exec_mapping(next->vm_flags))
+ vms->exec_vm += nrpages;
+ else if (is_stack_mapping(next->vm_flags))
+ vms->stack_vm += nrpages;
+ else if (is_data_mapping(next->vm_flags))
+ vms->data_vm += nrpages;
+
+ if (unlikely(vms->uf)) {
+ /*
+ * If userfaultfd_unmap_prep returns an error the vmas
+ * will remain split, but userland will get a
+ * highly unexpected error anyway. This is no
+ * different than the case where the first of the two
+ * __split_vma fails, but we don't undo the first
+ * split, despite we could. This is unlikely enough
+ * failure that it's not worth optimizing it for.
+ */
+ error = userfaultfd_unmap_prep(next, vms->start,
+ vms->end, vms->uf);
+ if (error)
+ goto userfaultfd_error;
+ }
+#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
+ BUG_ON(next->vm_start < vms->start);
+ BUG_ON(next->vm_start > vms->end);
+#endif
+ }
+
+ vms->next = vma_next(vms->vmi);
+ if (vms->next)
+ vms->unmap_end = vms->next->vm_start;
+
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+ /* Make sure no VMAs are about to be lost. */
+ {
+ MA_STATE(test, mas_detach->tree, 0, 0);
+ struct vm_area_struct *vma_mas, *vma_test;
+ int test_count = 0;
+
+ vma_iter_set(vms->vmi, vms->start);
+ rcu_read_lock();
+ vma_test = mas_find(&test, vms->vma_count - 1);
+ for_each_vma_range(*(vms->vmi), vma_mas, vms->end) {
+ BUG_ON(vma_mas != vma_test);
+ test_count++;
+ vma_test = mas_next(&test, vms->vma_count - 1);
+ }
+ rcu_read_unlock();
+ BUG_ON(vms->vma_count != test_count);
+ }
+#endif
+
+ while (vma_iter_addr(vms->vmi) > vms->start)
+ vma_iter_prev_range(vms->vmi);
+
+ vms->clear_ptes = true;
+ return 0;
+
+userfaultfd_error:
+munmap_gather_failed:
+end_split_failed:
+modify_vma_failed:
+ reattach_vmas(mas_detach);
+start_split_failed:
+map_count_exceeded:
+ return error;
+}
+
+/*
+ * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
+ * @vmi: The vma iterator
+ * @vma: The starting vm_area_struct
+ * @mm: The mm_struct
+ * @start: The aligned start address to munmap.
+ * @end: The aligned end address to munmap.
+ * @uf: The userfaultfd list_head
+ * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
+ * success.
+ *
+ * Return: 0 on success and drops the lock if so directed, error and leaves the
+ * lock held otherwise.
+ */
+int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ struct mm_struct *mm, unsigned long start, unsigned long end,
+ struct list_head *uf, bool unlock)
+{
+ struct maple_tree mt_detach;
+ MA_STATE(mas_detach, &mt_detach, 0, 0);
+ mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
+ mt_on_stack(mt_detach);
+ struct vma_munmap_struct vms;
+ int error;
+
+ init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock);
+ error = vms_gather_munmap_vmas(&vms, &mas_detach);
+ if (error)
+ goto gather_failed;
+
+ error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
+ if (error)
+ goto clear_tree_failed;
+
+ /* Point of no return */
+ vms_complete_munmap_vmas(&vms, &mas_detach);
+ return 0;
+
+clear_tree_failed:
+ reattach_vmas(&mas_detach);
+gather_failed:
+ validate_mm(mm);
+ return error;
+}
+
+/*
+ * do_vmi_munmap() - munmap a given range.
+ * @vmi: The vma iterator
+ * @mm: The mm_struct
+ * @start: The start address to munmap
+ * @len: The length of the range to munmap
+ * @uf: The userfaultfd list_head
+ * @unlock: set to true if the user wants to drop the mmap_lock on success
+ *
+ * This function takes a @mas that is either pointing to the previous VMA or set
+ * to MA_START and sets it up to remove the mapping(s). The @len will be
+ * aligned.
+ *
+ * Return: 0 on success and drops the lock if so directed, error and leaves the
+ * lock held otherwise.
+ */
+int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
+ unsigned long start, size_t len, struct list_head *uf,
+ bool unlock)
+{
+ unsigned long end;
+ struct vm_area_struct *vma;
+
+ if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
+ return -EINVAL;
+
+ end = start + PAGE_ALIGN(len);
+ if (end == start)
+ return -EINVAL;
+
+ /* Find the first overlapping VMA */
+ vma = vma_find(vmi, end);
+ if (!vma) {
+ if (unlock)
+ mmap_write_unlock(mm);
+ return 0;
+ }
+
+ return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
+}
+
+/*
+ * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
+ * context and anonymous VMA name within the range [start, end).
+ *
+ * As a result, we might be able to merge the newly modified VMA range with an
+ * adjacent VMA with identical properties.
+ *
+ * If no merge is possible and the range does not span the entirety of the VMA,
+ * we then need to split the VMA to accommodate the change.
+ *
+ * The function returns either the merged VMA, the original VMA if a split was
+ * required instead, or an error if the split failed.
+ */
+static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
+{
+ struct vm_area_struct *vma = vmg->vma;
+ struct vm_area_struct *merged;
+
+ /* First, try to merge. */
+ merged = vma_merge_existing_range(vmg);
+ if (merged)
+ return merged;
+
+ /* Split any preceding portion of the VMA. */
+ if (vma->vm_start < vmg->start) {
+ int err = split_vma(vmg->vmi, vma, vmg->start, 1);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ /* Split any trailing portion of the VMA. */
+ if (vma->vm_end > vmg->end) {
+ int err = split_vma(vmg->vmi, vma, vmg->end, 0);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+struct vm_area_struct *vma_modify_flags(
+ struct vma_iterator *vmi, struct vm_area_struct *prev,
+ struct vm_area_struct *vma, unsigned long start, unsigned long end,
+ unsigned long new_flags)
+{
+ VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
+
+ vmg.flags = new_flags;
+
+ return vma_modify(&vmg);
+}
+
+struct vm_area_struct
+*vma_modify_flags_name(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ unsigned long new_flags,
+ struct anon_vma_name *new_name)
+{
+ VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
+
+ vmg.flags = new_flags;
+ vmg.anon_name = new_name;
+
+ return vma_modify(&vmg);
+}
+
+struct vm_area_struct
+*vma_modify_policy(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct mempolicy *new_pol)
+{
+ VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
+
+ vmg.policy = new_pol;
+
+ return vma_modify(&vmg);
+}
+
+struct vm_area_struct
+*vma_modify_flags_uffd(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long new_flags,
+ struct vm_userfaultfd_ctx new_ctx)
+{
+ VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
+
+ vmg.flags = new_flags;
+ vmg.uffd_ctx = new_ctx;
+
+ return vma_modify(&vmg);
+}
+
+/*
+ * Expand vma by delta bytes, potentially merging with an immediately adjacent
+ * VMA with identical properties.
+ */
+struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
+ struct vm_area_struct *vma,
+ unsigned long delta)
+{
+ VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
+
+ vmg.next = vma_iter_next_rewind(vmi, NULL);
+ vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */
+
+ return vma_merge_new_range(&vmg);
+}
+
+void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
+{
+ vb->count = 0;
+}
+
+static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
+{
+ struct address_space *mapping;
+ int i;
+
+ mapping = vb->vmas[0]->vm_file->f_mapping;
+ i_mmap_lock_write(mapping);
+ for (i = 0; i < vb->count; i++) {
+ VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
+ __remove_shared_vm_struct(vb->vmas[i], mapping);
+ }
+ i_mmap_unlock_write(mapping);
+
+ unlink_file_vma_batch_init(vb);
+}
+
+void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
+ struct vm_area_struct *vma)
+{
+ if (vma->vm_file == NULL)
+ return;
+
+ if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
+ vb->count == ARRAY_SIZE(vb->vmas))
+ unlink_file_vma_batch_process(vb);
+
+ vb->vmas[vb->count] = vma;
+ vb->count++;
+}
+
+void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
+{
+ if (vb->count > 0)
+ unlink_file_vma_batch_process(vb);
+}
+
+/*
+ * Unlink a file-based vm structure from its interval tree, to hide
+ * vma from rmap and vmtruncate before freeing its page tables.
+ */
+void unlink_file_vma(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+
+ if (file) {
+ struct address_space *mapping = file->f_mapping;
+
+ i_mmap_lock_write(mapping);
+ __remove_shared_vm_struct(vma, mapping);
+ i_mmap_unlock_write(mapping);
+ }
+}
+
+void vma_link_file(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct address_space *mapping;
+
+ if (file) {
+ mapping = file->f_mapping;
+ i_mmap_lock_write(mapping);
+ __vma_link_file(vma, mapping);
+ i_mmap_unlock_write(mapping);
+ }
+}
+
+int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+ VMA_ITERATOR(vmi, mm, 0);
+
+ vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
+ if (vma_iter_prealloc(&vmi, vma))
+ return -ENOMEM;
+
+ vma_start_write(vma);
+ vma_iter_store(&vmi, vma);
+ vma_link_file(vma);
+ mm->map_count++;
+ validate_mm(mm);
+ return 0;
+}
+
+/*
+ * Copy the vma structure to a new location in the same mm,
+ * prior to moving page table entries, to effect an mremap move.
+ */
+struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+ unsigned long addr, unsigned long len, pgoff_t pgoff,
+ bool *need_rmap_locks)
+{
+ struct vm_area_struct *vma = *vmap;
+ unsigned long vma_start = vma->vm_start;
+ struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *new_vma;
+ bool faulted_in_anon_vma = true;
+ VMA_ITERATOR(vmi, mm, addr);
+ VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len);
+
+ /*
+ * If anonymous vma has not yet been faulted, update new pgoff
+ * to match new location, to increase its chance of merging.
+ */
+ if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
+ pgoff = addr >> PAGE_SHIFT;
+ faulted_in_anon_vma = false;
+ }
+
+ new_vma = find_vma_prev(mm, addr, &vmg.prev);
+ if (new_vma && new_vma->vm_start < addr + len)
+ return NULL; /* should never get here */
+
+ vmg.vma = NULL; /* New VMA range. */
+ vmg.pgoff = pgoff;
+ vmg.next = vma_iter_next_rewind(&vmi, NULL);
+ new_vma = vma_merge_new_range(&vmg);
+
+ if (new_vma) {
+ /*
+ * Source vma may have been merged into new_vma
+ */
+ if (unlikely(vma_start >= new_vma->vm_start &&
+ vma_start < new_vma->vm_end)) {
+ /*
+ * The only way we can get a vma_merge with
+ * self during an mremap is if the vma hasn't
+ * been faulted in yet and we were allowed to
+ * reset the dst vma->vm_pgoff to the
+ * destination address of the mremap to allow
+ * the merge to happen. mremap must change the
+ * vm_pgoff linearity between src and dst vmas
+ * (in turn preventing a vma_merge) to be
+ * safe. It is only safe to keep the vm_pgoff
+ * linear if there are no pages mapped yet.
+ */
+ VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
+ *vmap = vma = new_vma;
+ }
+ *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
+ } else {
+ new_vma = vm_area_dup(vma);
+ if (!new_vma)
+ goto out;
+ vma_set_range(new_vma, addr, addr + len, pgoff);
+ if (vma_dup_policy(vma, new_vma))
+ goto out_free_vma;
+ if (anon_vma_clone(new_vma, vma))
+ goto out_free_mempol;
+ if (new_vma->vm_file)
+ get_file(new_vma->vm_file);
+ if (new_vma->vm_ops && new_vma->vm_ops->open)
+ new_vma->vm_ops->open(new_vma);
+ if (vma_link(mm, new_vma))
+ goto out_vma_link;
+ *need_rmap_locks = false;
+ }
+ return new_vma;
+
+out_vma_link:
+ if (new_vma->vm_ops && new_vma->vm_ops->close)
+ new_vma->vm_ops->close(new_vma);
+
+ if (new_vma->vm_file)
+ fput(new_vma->vm_file);
+
+ unlink_anon_vmas(new_vma);
+out_free_mempol:
+ mpol_put(vma_policy(new_vma));
+out_free_vma:
+ vm_area_free(new_vma);
+out:
+ return NULL;
+}
+
+/*
+ * Rough compatibility check to quickly see if it's even worth looking
+ * at sharing an anon_vma.
+ *
+ * They need to have the same vm_file, and the flags can only differ
+ * in things that mprotect may change.
+ *
+ * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
+ * we can merge the two vma's. For example, we refuse to merge a vma if
+ * there is a vm_ops->close() function, because that indicates that the
+ * driver is doing some kind of reference counting. But that doesn't
+ * really matter for the anon_vma sharing case.
+ */
+static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
+{
+ return a->vm_end == b->vm_start &&
+ mpol_equal(vma_policy(a), vma_policy(b)) &&
+ a->vm_file == b->vm_file &&
+ !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
+ b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
+}
+
+/*
+ * Do some basic sanity checking to see if we can re-use the anon_vma
+ * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
+ * the same as 'old', the other will be the new one that is trying
+ * to share the anon_vma.
+ *
+ * NOTE! This runs with mmap_lock held for reading, so it is possible that
+ * the anon_vma of 'old' is concurrently in the process of being set up
+ * by another page fault trying to merge _that_. But that's ok: if it
+ * is being set up, that automatically means that it will be a singleton
+ * acceptable for merging, so we can do all of this optimistically. But
+ * we do that READ_ONCE() to make sure that we never re-load the pointer.
+ *
+ * IOW: that the "list_is_singular()" test on the anon_vma_chain only
+ * matters for the 'stable anon_vma' case (ie the thing we want to avoid
+ * is to return an anon_vma that is "complex" due to having gone through
+ * a fork).
+ *
+ * We also make sure that the two vma's are compatible (adjacent,
+ * and with the same memory policies). That's all stable, even with just
+ * a read lock on the mmap_lock.
+ */
+static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
+ struct vm_area_struct *a,
+ struct vm_area_struct *b)
+{
+ if (anon_vma_compatible(a, b)) {
+ struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
+
+ if (anon_vma && list_is_singular(&old->anon_vma_chain))
+ return anon_vma;
+ }
+ return NULL;
+}
+
+/*
+ * find_mergeable_anon_vma is used by anon_vma_prepare, to check
+ * neighbouring vmas for a suitable anon_vma, before it goes off
+ * to allocate a new anon_vma. It checks because a repetitive
+ * sequence of mprotects and faults may otherwise lead to distinct
+ * anon_vmas being allocated, preventing vma merge in subsequent
+ * mprotect.
+ */
+struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
+{
+ struct anon_vma *anon_vma = NULL;
+ struct vm_area_struct *prev, *next;
+ VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
+
+ /* Try next first. */
+ next = vma_iter_load(&vmi);
+ if (next) {
+ anon_vma = reusable_anon_vma(next, vma, next);
+ if (anon_vma)
+ return anon_vma;
+ }
+
+ prev = vma_prev(&vmi);
+ VM_BUG_ON_VMA(prev != vma, vma);
+ prev = vma_prev(&vmi);
+ /* Try prev next. */
+ if (prev)
+ anon_vma = reusable_anon_vma(prev, prev, vma);
+
+ /*
+ * We might reach here with anon_vma == NULL if we can't find
+ * any reusable anon_vma.
+ * There's no absolute need to look only at touching neighbours:
+ * we could search further afield for "compatible" anon_vmas.
+ * But it would probably just be a waste of time searching,
+ * or lead to too many vmas hanging off the same anon_vma.
+ * We're trying to allow mprotect remerging later on,
+ * not trying to minimize memory used for anon_vmas.
+ */
+ return anon_vma;
+}
+
+static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
+{
+ return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
+}
+
+static bool vma_is_shared_writable(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
+ (VM_WRITE | VM_SHARED);
+}
+
+static bool vma_fs_can_writeback(struct vm_area_struct *vma)
+{
+ /* No managed pages to writeback. */
+ if (vma->vm_flags & VM_PFNMAP)
+ return false;
+
+ return vma->vm_file && vma->vm_file->f_mapping &&
+ mapping_can_writeback(vma->vm_file->f_mapping);
+}
+
+/*
+ * Does this VMA require the underlying folios to have their dirty state
+ * tracked?
+ */
+bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
+{
+ /* Only shared, writable VMAs require dirty tracking. */
+ if (!vma_is_shared_writable(vma))
+ return false;
+
+ /* Does the filesystem need to be notified? */
+ if (vm_ops_needs_writenotify(vma->vm_ops))
+ return true;
+
+ /*
+ * Even if the filesystem doesn't indicate a need for writenotify, if it
+ * can writeback, dirty tracking is still required.
+ */
+ return vma_fs_can_writeback(vma);
+}
+
+/*
+ * Some shared mappings will want the pages marked read-only
+ * to track write events. If so, we'll downgrade vm_page_prot
+ * to the private version (using protection_map[] without the
+ * VM_SHARED bit).
+ */
+bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
+{
+ /* If it was private or non-writable, the write bit is already clear */
+ if (!vma_is_shared_writable(vma))
+ return false;
+
+ /* The backer wishes to know when pages are first written to? */
+ if (vm_ops_needs_writenotify(vma->vm_ops))
+ return true;
+
+ /* The open routine did something to the protections that pgprot_modify
+ * won't preserve? */
+ if (pgprot_val(vm_page_prot) !=
+ pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
+ return false;
+
+ /*
+ * Do we need to track softdirty? hugetlb does not support softdirty
+ * tracking yet.
+ */
+ if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
+ return true;
+
+ /* Do we need write faults for uffd-wp tracking? */
+ if (userfaultfd_wp(vma))
+ return true;
+
+ /* Can the mapping track the dirty pages? */
+ return vma_fs_can_writeback(vma);
+}
+
+static DEFINE_MUTEX(mm_all_locks_mutex);
+
+static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
+{
+ if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
+ /*
+ * The LSB of head.next can't change from under us
+ * because we hold the mm_all_locks_mutex.
+ */
+ down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
+ /*
+ * We can safely modify head.next after taking the
+ * anon_vma->root->rwsem. If some other vma in this mm shares
+ * the same anon_vma we won't take it again.
+ *
+ * No need of atomic instructions here, head.next
+ * can't change from under us thanks to the
+ * anon_vma->root->rwsem.
+ */
+ if (__test_and_set_bit(0, (unsigned long *)
+ &anon_vma->root->rb_root.rb_root.rb_node))
+ BUG();
+ }
+}
+
+static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
+{
+ if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
+ /*
+ * AS_MM_ALL_LOCKS can't change from under us because
+ * we hold the mm_all_locks_mutex.
+ *
+ * Operations on ->flags have to be atomic because
+ * even if AS_MM_ALL_LOCKS is stable thanks to the
+ * mm_all_locks_mutex, there may be other cpus
+ * changing other bitflags in parallel to us.
+ */
+ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
+ BUG();
+ down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
+ }
+}
+
+/*
+ * This operation locks against the VM for all pte/vma/mm related
+ * operations that could ever happen on a certain mm. This includes
+ * vmtruncate, try_to_unmap, and all page faults.
+ *
+ * The caller must take the mmap_lock in write mode before calling
+ * mm_take_all_locks(). The caller isn't allowed to release the
+ * mmap_lock until mm_drop_all_locks() returns.
+ *
+ * mmap_lock in write mode is required in order to block all operations
+ * that could modify pagetables and free pages without need of
+ * altering the vma layout. It's also needed in write mode to avoid new
+ * anon_vmas to be associated with existing vmas.
+ *
+ * A single task can't take more than one mm_take_all_locks() in a row
+ * or it would deadlock.
+ *
+ * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
+ * mapping->flags avoid to take the same lock twice, if more than one
+ * vma in this mm is backed by the same anon_vma or address_space.
+ *
+ * We take locks in following order, accordingly to comment at beginning
+ * of mm/rmap.c:
+ * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
+ * hugetlb mapping);
+ * - all vmas marked locked
+ * - all i_mmap_rwsem locks;
+ * - all anon_vma->rwseml
+ *
+ * We can take all locks within these types randomly because the VM code
+ * doesn't nest them and we protected from parallel mm_take_all_locks() by
+ * mm_all_locks_mutex.
+ *
+ * mm_take_all_locks() and mm_drop_all_locks are expensive operations
+ * that may have to take thousand of locks.
+ *
+ * mm_take_all_locks() can fail if it's interrupted by signals.
+ */
+int mm_take_all_locks(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+ struct anon_vma_chain *avc;
+ VMA_ITERATOR(vmi, mm, 0);
+
+ mmap_assert_write_locked(mm);
+
+ mutex_lock(&mm_all_locks_mutex);
+
+ /*
+ * vma_start_write() does not have a complement in mm_drop_all_locks()
+ * because vma_start_write() is always asymmetrical; it marks a VMA as
+ * being written to until mmap_write_unlock() or mmap_write_downgrade()
+ * is reached.
+ */
+ for_each_vma(vmi, vma) {
+ if (signal_pending(current))
+ goto out_unlock;
+ vma_start_write(vma);
+ }
+
+ vma_iter_init(&vmi, mm, 0);
+ for_each_vma(vmi, vma) {
+ if (signal_pending(current))
+ goto out_unlock;
+ if (vma->vm_file && vma->vm_file->f_mapping &&
+ is_vm_hugetlb_page(vma))
+ vm_lock_mapping(mm, vma->vm_file->f_mapping);
+ }
+
+ vma_iter_init(&vmi, mm, 0);
+ for_each_vma(vmi, vma) {
+ if (signal_pending(current))
+ goto out_unlock;
+ if (vma->vm_file && vma->vm_file->f_mapping &&
+ !is_vm_hugetlb_page(vma))
+ vm_lock_mapping(mm, vma->vm_file->f_mapping);
+ }
+
+ vma_iter_init(&vmi, mm, 0);
+ for_each_vma(vmi, vma) {
+ if (signal_pending(current))
+ goto out_unlock;
+ if (vma->anon_vma)
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ vm_lock_anon_vma(mm, avc->anon_vma);
+ }
+
+ return 0;
+
+out_unlock:
+ mm_drop_all_locks(mm);
+ return -EINTR;
+}
+
+static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
+{
+ if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
+ /*
+ * The LSB of head.next can't change to 0 from under
+ * us because we hold the mm_all_locks_mutex.
+ *
+ * We must however clear the bitflag before unlocking
+ * the vma so the users using the anon_vma->rb_root will
+ * never see our bitflag.
+ *
+ * No need of atomic instructions here, head.next
+ * can't change from under us until we release the
+ * anon_vma->root->rwsem.
+ */
+ if (!__test_and_clear_bit(0, (unsigned long *)
+ &anon_vma->root->rb_root.rb_root.rb_node))
+ BUG();
+ anon_vma_unlock_write(anon_vma);
+ }
+}
+
+static void vm_unlock_mapping(struct address_space *mapping)
+{
+ if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
+ /*
+ * AS_MM_ALL_LOCKS can't change to 0 from under us
+ * because we hold the mm_all_locks_mutex.
+ */
+ i_mmap_unlock_write(mapping);
+ if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
+ &mapping->flags))
+ BUG();
+ }
+}
+
+/*
+ * The mmap_lock cannot be released by the caller until
+ * mm_drop_all_locks() returns.
+ */
+void mm_drop_all_locks(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+ struct anon_vma_chain *avc;
+ VMA_ITERATOR(vmi, mm, 0);
+
+ mmap_assert_write_locked(mm);
+ BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
+
+ for_each_vma(vmi, vma) {
+ if (vma->anon_vma)
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ vm_unlock_anon_vma(avc->anon_vma);
+ if (vma->vm_file && vma->vm_file->f_mapping)
+ vm_unlock_mapping(vma->vm_file->f_mapping);
+ }
+
+ mutex_unlock(&mm_all_locks_mutex);
+}
diff --git a/mm/vma.h b/mm/vma.h
new file mode 100644
index 000000000000..819f994cf727
--- /dev/null
+++ b/mm/vma.h
@@ -0,0 +1,558 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * vma.h
+ *
+ * Core VMA manipulation API implemented in vma.c.
+ */
+#ifndef __MM_VMA_H
+#define __MM_VMA_H
+
+/*
+ * VMA lock generalization
+ */
+struct vma_prepare {
+ struct vm_area_struct *vma;
+ struct vm_area_struct *adj_next;
+ struct file *file;
+ struct address_space *mapping;
+ struct anon_vma *anon_vma;
+ struct vm_area_struct *insert;
+ struct vm_area_struct *remove;
+ struct vm_area_struct *remove2;
+};
+
+struct unlink_vma_file_batch {
+ int count;
+ struct vm_area_struct *vmas[8];
+};
+
+/*
+ * vma munmap operation
+ */
+struct vma_munmap_struct {
+ struct vma_iterator *vmi;
+ struct vm_area_struct *vma; /* The first vma to munmap */
+ struct vm_area_struct *prev; /* vma before the munmap area */
+ struct vm_area_struct *next; /* vma after the munmap area */
+ struct list_head *uf; /* Userfaultfd list_head */
+ unsigned long start; /* Aligned start addr (inclusive) */
+ unsigned long end; /* Aligned end addr (exclusive) */
+ unsigned long unmap_start; /* Unmap PTE start */
+ unsigned long unmap_end; /* Unmap PTE end */
+ int vma_count; /* Number of vmas that will be removed */
+ bool unlock; /* Unlock after the munmap */
+ bool clear_ptes; /* If there are outstanding PTE to be cleared */
+ bool closed_vm_ops; /* call_mmap() was encountered, so vmas may be closed */
+ /* 1 byte hole */
+ unsigned long nr_pages; /* Number of pages being removed */
+ unsigned long locked_vm; /* Number of locked pages */
+ unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
+ unsigned long exec_vm;
+ unsigned long stack_vm;
+ unsigned long data_vm;
+};
+
+enum vma_merge_state {
+ VMA_MERGE_START,
+ VMA_MERGE_ERROR_NOMEM,
+ VMA_MERGE_NOMERGE,
+ VMA_MERGE_SUCCESS,
+};
+
+/* Represents a VMA merge operation. */
+struct vma_merge_struct {
+ struct mm_struct *mm;
+ struct vma_iterator *vmi;
+ pgoff_t pgoff;
+ struct vm_area_struct *prev;
+ struct vm_area_struct *next; /* Modified by vma_merge(). */
+ struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
+ unsigned long start;
+ unsigned long end;
+ unsigned long flags;
+ struct file *file;
+ struct anon_vma *anon_vma;
+ struct mempolicy *policy;
+ struct vm_userfaultfd_ctx uffd_ctx;
+ struct anon_vma_name *anon_name;
+ enum vma_merge_state state;
+};
+
+static inline bool vmg_nomem(struct vma_merge_struct *vmg)
+{
+ return vmg->state == VMA_MERGE_ERROR_NOMEM;
+}
+
+/* Assumes addr >= vma->vm_start. */
+static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
+}
+
+#define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_) \
+ struct vma_merge_struct name = { \
+ .mm = mm_, \
+ .vmi = vmi_, \
+ .start = start_, \
+ .end = end_, \
+ .flags = flags_, \
+ .pgoff = pgoff_, \
+ .state = VMA_MERGE_START, \
+ }
+
+#define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \
+ struct vma_merge_struct name = { \
+ .mm = vma_->vm_mm, \
+ .vmi = vmi_, \
+ .prev = prev_, \
+ .next = NULL, \
+ .vma = vma_, \
+ .start = start_, \
+ .end = end_, \
+ .flags = vma_->vm_flags, \
+ .pgoff = vma_pgoff_offset(vma_, start_), \
+ .file = vma_->vm_file, \
+ .anon_vma = vma_->anon_vma, \
+ .policy = vma_policy(vma_), \
+ .uffd_ctx = vma_->vm_userfaultfd_ctx, \
+ .anon_name = anon_vma_name(vma_), \
+ .state = VMA_MERGE_START, \
+ }
+
+#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
+void validate_mm(struct mm_struct *mm);
+#else
+#define validate_mm(mm) do { } while (0)
+#endif
+
+/* Required for expand_downwards(). */
+void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma);
+
+/* Required for expand_downwards(). */
+void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma);
+
+int vma_expand(struct vma_merge_struct *vmg);
+int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff);
+
+static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
+ struct vm_area_struct *vma, gfp_t gfp)
+
+{
+ if (vmi->mas.status != ma_start &&
+ ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
+ vma_iter_invalidate(vmi);
+
+ __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
+ mas_store_gfp(&vmi->mas, vma, gfp);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+#ifdef CONFIG_MMU
+/*
+ * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
+ * @vms: The vma munmap struct
+ * @vmi: The vma iterator
+ * @vma: The first vm_area_struct to munmap
+ * @start: The aligned start address to munmap
+ * @end: The aligned end address to munmap
+ * @uf: The userfaultfd list_head
+ * @unlock: Unlock after the operation. Only unlocked on success
+ */
+static inline void init_vma_munmap(struct vma_munmap_struct *vms,
+ struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, struct list_head *uf,
+ bool unlock)
+{
+ vms->vmi = vmi;
+ vms->vma = vma;
+ if (vma) {
+ vms->start = start;
+ vms->end = end;
+ } else {
+ vms->start = vms->end = 0;
+ }
+ vms->unlock = unlock;
+ vms->uf = uf;
+ vms->vma_count = 0;
+ vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
+ vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
+ vms->unmap_start = FIRST_USER_ADDRESS;
+ vms->unmap_end = USER_PGTABLES_CEILING;
+ vms->clear_ptes = false;
+ vms->closed_vm_ops = false;
+}
+#endif
+
+int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach);
+
+void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach);
+
+void vms_clean_up_area(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach);
+
+/*
+ * reattach_vmas() - Undo any munmap work and free resources
+ * @mas_detach: The maple state with the detached maple tree
+ *
+ * Reattach any detached vmas and free up the maple tree used to track the vmas.
+ */
+static inline void reattach_vmas(struct ma_state *mas_detach)
+{
+ struct vm_area_struct *vma;
+
+ mas_set(mas_detach, 0);
+ mas_for_each(mas_detach, vma, ULONG_MAX)
+ vma_mark_detached(vma, false);
+
+ __mt_destroy(mas_detach->tree);
+}
+
+/*
+ * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
+ * operation.
+ * @vms: The vma unmap structure
+ * @mas_detach: The maple state with the detached maple tree
+ *
+ * Reattach any detached vmas, free up the maple tree used to track the vmas.
+ * If that's not possible because the ptes are cleared (and vm_ops->closed() may
+ * have been called), then a NULL is written over the vmas and the vmas are
+ * removed (munmap() completed).
+ */
+static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach)
+{
+ struct ma_state *mas = &vms->vmi->mas;
+ if (!vms->nr_pages)
+ return;
+
+ if (vms->clear_ptes)
+ return reattach_vmas(mas_detach);
+
+ /*
+ * Aborting cannot just call the vm_ops open() because they are often
+ * not symmetrical and state data has been lost. Resort to the old
+ * failure method of leaving a gap where the MAP_FIXED mapping failed.
+ */
+ mas_set_range(mas, vms->start, vms->end - 1);
+ if (unlikely(mas_store_gfp(mas, NULL, GFP_KERNEL))) {
+ pr_warn_once("%s: (%d) Unable to abort munmap() operation\n",
+ current->comm, current->pid);
+ /* Leaving vmas detached and in-tree may hamper recovery */
+ reattach_vmas(mas_detach);
+ } else {
+ /* Clean up the insertion of the unfortunate gap */
+ vms_complete_munmap_vmas(vms, mas_detach);
+ }
+}
+
+int
+do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct list_head *uf, bool unlock);
+
+int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
+ unsigned long start, size_t len, struct list_head *uf,
+ bool unlock);
+
+void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed);
+
+void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
+ struct vm_area_struct *prev, struct vm_area_struct *next);
+
+/* We are about to modify the VMA's flags. */
+struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
+ struct vm_area_struct *prev, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long new_flags);
+
+/* We are about to modify the VMA's flags and/or anon_name. */
+struct vm_area_struct
+*vma_modify_flags_name(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ unsigned long new_flags,
+ struct anon_vma_name *new_name);
+
+/* We are about to modify the VMA's memory policy. */
+struct vm_area_struct
+*vma_modify_policy(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct mempolicy *new_pol);
+
+/* We are about to modify the VMA's flags and/or uffd context. */
+struct vm_area_struct
+*vma_modify_flags_uffd(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long new_flags,
+ struct vm_userfaultfd_ctx new_ctx);
+
+struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
+
+struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
+ struct vm_area_struct *vma,
+ unsigned long delta);
+
+void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
+
+void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
+
+void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
+ struct vm_area_struct *vma);
+
+void unlink_file_vma(struct vm_area_struct *vma);
+
+void vma_link_file(struct vm_area_struct *vma);
+
+int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
+
+struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+ unsigned long addr, unsigned long len, pgoff_t pgoff,
+ bool *need_rmap_locks);
+
+struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
+
+bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
+bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
+
+int mm_take_all_locks(struct mm_struct *mm);
+void mm_drop_all_locks(struct mm_struct *mm);
+
+static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
+{
+ /*
+ * We want to check manually if we can change individual PTEs writable
+ * if we can't do that automatically for all PTEs in a mapping. For
+ * private mappings, that's always the case when we have write
+ * permissions as we properly have to handle COW.
+ */
+ if (vma->vm_flags & VM_SHARED)
+ return vma_wants_writenotify(vma, vma->vm_page_prot);
+ return !!(vma->vm_flags & VM_WRITE);
+}
+
+#ifdef CONFIG_MMU
+static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
+{
+ return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
+}
+#endif
+
+static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
+ unsigned long min)
+{
+ return mas_prev(&vmi->mas, min);
+}
+
+/*
+ * These three helpers classifies VMAs for virtual memory accounting.
+ */
+
+/*
+ * Executable code area - executable, not writable, not stack
+ */
+static inline bool is_exec_mapping(vm_flags_t flags)
+{
+ return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
+}
+
+/*
+ * Stack area (including shadow stacks)
+ *
+ * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
+ * do_mmap() forbids all other combinations.
+ */
+static inline bool is_stack_mapping(vm_flags_t flags)
+{
+ return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
+}
+
+/*
+ * Data area - private, writable, not stack
+ */
+static inline bool is_data_mapping(vm_flags_t flags)
+{
+ return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
+}
+
+
+static inline void vma_iter_config(struct vma_iterator *vmi,
+ unsigned long index, unsigned long last)
+{
+ __mas_set_range(&vmi->mas, index, last - 1);
+}
+
+static inline void vma_iter_reset(struct vma_iterator *vmi)
+{
+ mas_reset(&vmi->mas);
+}
+
+static inline
+struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
+{
+ return mas_prev_range(&vmi->mas, min);
+}
+
+static inline
+struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
+{
+ return mas_next_range(&vmi->mas, max);
+}
+
+static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
+ unsigned long max, unsigned long size)
+{
+ return mas_empty_area(&vmi->mas, min, max - 1, size);
+}
+
+static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
+ unsigned long max, unsigned long size)
+{
+ return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
+}
+
+/*
+ * VMA Iterator functions shared between nommu and mmap
+ */
+static inline int vma_iter_prealloc(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
+{
+ return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
+}
+
+static inline void vma_iter_clear(struct vma_iterator *vmi)
+{
+ mas_store_prealloc(&vmi->mas, NULL);
+}
+
+static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
+{
+ return mas_walk(&vmi->mas);
+}
+
+/* Store a VMA with preallocated memory */
+static inline void vma_iter_store(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
+{
+
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+ if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
+ vmi->mas.index > vma->vm_start)) {
+ pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
+ vmi->mas.index, vma->vm_start, vma->vm_start,
+ vma->vm_end, vmi->mas.index, vmi->mas.last);
+ }
+ if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
+ vmi->mas.last < vma->vm_start)) {
+ pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
+ vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
+ vmi->mas.index, vmi->mas.last);
+ }
+#endif
+
+ if (vmi->mas.status != ma_start &&
+ ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
+ vma_iter_invalidate(vmi);
+
+ __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
+ mas_store_prealloc(&vmi->mas, vma);
+}
+
+static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
+{
+ return vmi->mas.index;
+}
+
+static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
+{
+ return vmi->mas.last + 1;
+}
+
+static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
+ unsigned long count)
+{
+ return mas_expected_entries(&vmi->mas, count);
+}
+
+static inline
+struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
+{
+ return mas_prev_range(&vmi->mas, 0);
+}
+
+/*
+ * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
+ * if no previous VMA, to index 0.
+ */
+static inline
+struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
+ struct vm_area_struct **pprev)
+{
+ struct vm_area_struct *next = vma_next(vmi);
+ struct vm_area_struct *prev = vma_prev(vmi);
+
+ /*
+ * Consider the case where no previous VMA exists. We advance to the
+ * next VMA, skipping any gap, then rewind to the start of the range.
+ *
+ * If we were to unconditionally advance to the next range we'd wind up
+ * at the next VMA again, so we check to ensure there is a previous VMA
+ * to skip over.
+ */
+ if (prev)
+ vma_iter_next_range(vmi);
+
+ if (pprev)
+ *pprev = prev;
+
+ return next;
+}
+
+#ifdef CONFIG_64BIT
+
+static inline bool vma_is_sealed(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & VM_SEALED);
+}
+
+/*
+ * check if a vma is sealed for modification.
+ * return true, if modification is allowed.
+ */
+static inline bool can_modify_vma(struct vm_area_struct *vma)
+{
+ if (unlikely(vma_is_sealed(vma)))
+ return false;
+
+ return true;
+}
+
+bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
+
+#else
+
+static inline bool can_modify_vma(struct vm_area_struct *vma)
+{
+ return true;
+}
+
+static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
+{
+ return true;
+}
+
+#endif
+
+#endif /* __MM_VMA_H */
diff --git a/mm/vma_internal.h b/mm/vma_internal.h
new file mode 100644
index 000000000000..b930ab12a587
--- /dev/null
+++ b/mm/vma_internal.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * vma_internal.h
+ *
+ * Headers required by vma.c, which can be substituted accordingly when testing
+ * VMA functionality.
+ */
+
+#ifndef __MM_VMA_INTERNAL_H
+#define __MM_VMA_INTERNAL_H
+
+#include <linux/backing-dev.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/cacheflush.h>
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/huge_mm.h>
+#include <linux/hugetlb_inline.h>
+#include <linux/kernel.h>
+#include <linux/khugepaged.h>
+#include <linux/list.h>
+#include <linux/maple_tree.h>
+#include <linux/mempolicy.h>
+#include <linux/mm.h>
+#include <linux/mm_inline.h>
+#include <linux/mm_types.h>
+#include <linux/mman.h>
+#include <linux/mmap_lock.h>
+#include <linux/mmdebug.h>
+#include <linux/mmu_context.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/pfn.h>
+#include <linux/rcupdate.h>
+#include <linux/rmap.h>
+#include <linux/rwsem.h>
+#include <linux/sched/signal.h>
+#include <linux/swap.h>
+#include <linux/uprobes.h>
+#include <linux/userfaultfd_k.h>
+
+#include <asm/current.h>
+#include <asm/tlb.h>
+
+#include "internal.h"
+
+#endif /* __MM_VMA_INTERNAL_H */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a0df1e2e155a..634162271c00 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -105,7 +105,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (!pte)
return -ENOMEM;
do {
- if (!pte_none(ptep_get(pte))) {
+ if (unlikely(!pte_none(ptep_get(pte)))) {
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
dump_page(page, "remapping already mapped page");
@@ -1940,7 +1940,7 @@ static inline void setup_vmalloc_vm(struct vm_struct *vm,
{
vm->flags = flags;
vm->addr = (void *)va->va_start;
- vm->size = va->va_end - va->va_start;
+ vm->size = va_size(va);
vm->caller = caller;
va->vm = vm;
}
@@ -2018,7 +2018,7 @@ retry:
if (vm) {
vm->addr = (void *)va->va_start;
- vm->size = va->va_end - va->va_start;
+ vm->size = va_size(va);
va->vm = vm;
}
@@ -2131,23 +2131,18 @@ reclaim_list_global(struct list_head *head)
static void
decay_va_pool_node(struct vmap_node *vn, bool full_decay)
{
+ LIST_HEAD(decay_list);
+ struct rb_root decay_root = RB_ROOT;
struct vmap_area *va, *nva;
- struct list_head decay_list;
- struct rb_root decay_root;
unsigned long n_decay;
int i;
- decay_root = RB_ROOT;
- INIT_LIST_HEAD(&decay_list);
-
for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
- struct list_head tmp_list;
+ LIST_HEAD(tmp_list);
if (list_empty(&vn->pool[i].head))
continue;
- INIT_LIST_HEAD(&tmp_list);
-
/* Detach the pool, so no-one can access it. */
spin_lock(&vn->pool_lock);
list_replace_init(&vn->pool[i].head, &tmp_list);
@@ -2198,7 +2193,7 @@ static void purge_vmap_node(struct work_struct *work)
vn->nr_purged = 0;
list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
- unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
+ unsigned long nr = va_size(va) >> PAGE_SHIFT;
unsigned long orig_start = va->va_start;
unsigned long orig_end = va->va_end;
unsigned int vn_id = decode_vn_id(va->flags);
@@ -2344,8 +2339,8 @@ static void free_vmap_area_noflush(struct vmap_area *va)
if (WARN_ON_ONCE(!list_empty(&va->list)))
return;
- nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
- PAGE_SHIFT, &vmap_lazy_nr);
+ nr_lazy = atomic_long_add_return(va_size(va) >> PAGE_SHIFT,
+ &vmap_lazy_nr);
/*
* If it was request by a certain node we would like to
@@ -2941,8 +2936,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
if (WARN_ON_ONCE(!va))
return;
- debug_check_no_locks_freed((void *)va->va_start,
- (va->va_end - va->va_start));
+ debug_check_no_locks_freed((void *)va->va_start, va_size(va));
free_unmap_vmap_area(va);
}
EXPORT_SYMBOL(vm_unmap_ram);
@@ -3518,8 +3512,6 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
unsigned int order, unsigned int nr_pages, struct page **pages)
{
unsigned int nr_allocated = 0;
- gfp_t alloc_gfp = gfp;
- bool nofail = gfp & __GFP_NOFAIL;
struct page *page;
int i;
@@ -3530,9 +3522,6 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* more permissive.
*/
if (!order) {
- /* bulk allocator doesn't support nofail req. officially */
- gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
-
while (nr_allocated < nr_pages) {
unsigned int nr, nr_pages_request;
@@ -3550,12 +3539,11 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* but mempolicy wants to alloc memory by interleaving.
*/
if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
- nr = alloc_pages_bulk_array_mempolicy_noprof(bulk_gfp,
+ nr = alloc_pages_bulk_array_mempolicy_noprof(gfp,
nr_pages_request,
pages + nr_allocated);
-
else
- nr = alloc_pages_bulk_array_node_noprof(bulk_gfp, nid,
+ nr = alloc_pages_bulk_array_node_noprof(gfp, nid,
nr_pages_request,
pages + nr_allocated);
@@ -3569,30 +3557,24 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
if (nr != nr_pages_request)
break;
}
- } else if (gfp & __GFP_NOFAIL) {
- /*
- * Higher order nofail allocations are really expensive and
- * potentially dangerous (pre-mature OOM, disruptive reclaim
- * and compaction etc.
- */
- alloc_gfp &= ~__GFP_NOFAIL;
}
/* High-order pages or fallback path if "bulk" fails. */
while (nr_allocated < nr_pages) {
- if (!nofail && fatal_signal_pending(current))
+ if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current))
break;
if (nid == NUMA_NO_NODE)
- page = alloc_pages_noprof(alloc_gfp, order);
+ page = alloc_pages_noprof(gfp, order);
else
- page = alloc_pages_node_noprof(nid, alloc_gfp, order);
+ page = alloc_pages_node_noprof(nid, gfp, order);
+
if (unlikely(!page))
break;
/*
- * Higher order allocations must be able to be treated as
- * indepdenent small pages by callers (as they can with
+ * High-order allocations must be able to be treated as
+ * independent small pages by callers (as they can with
* small-page vmallocs). Some drivers do their own refcounting
* on vmalloc_to_page() pages, some use page->mapping,
* page->lru, etc.
@@ -3653,7 +3635,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
page_order = vm_area_page_order(area);
- area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
+ /*
+ * High-order nofail allocations are really expensive and
+ * potentially dangerous (pre-mature OOM, disruptive reclaim
+ * and compaction etc.
+ *
+ * Please note, the __vmalloc_node_range_noprof() falls-back
+ * to order-0 pages if high-order attempt is unsuccessful.
+ */
+ area->nr_pages = vm_area_alloc_pages((page_order ?
+ gfp_mask & ~__GFP_NOFAIL : gfp_mask) | __GFP_NOWARN,
node, page_order, nr_small_pages, area->pages);
atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
@@ -4033,6 +4024,76 @@ void *vzalloc_node_noprof(unsigned long size, int node)
}
EXPORT_SYMBOL(vzalloc_node_noprof);
+/**
+ * vrealloc - reallocate virtually contiguous memory; contents remain unchanged
+ * @p: object to reallocate memory for
+ * @size: the size to reallocate
+ * @flags: the flags for the page level allocator
+ *
+ * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and
+ * @p is not a %NULL pointer, the object pointed to is freed.
+ *
+ * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
+ * initial memory allocation, every subsequent call to this API for the same
+ * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
+ * __GFP_ZERO is not fully honored by this API.
+ *
+ * In any case, the contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.
+ *
+ * This function must not be called concurrently with itself or vfree() for the
+ * same memory allocation.
+ *
+ * Return: pointer to the allocated memory; %NULL if @size is zero or in case of
+ * failure
+ */
+void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+{
+ size_t old_size = 0;
+ void *n;
+
+ if (!size) {
+ vfree(p);
+ return NULL;
+ }
+
+ if (p) {
+ struct vm_struct *vm;
+
+ vm = find_vm_area(p);
+ if (unlikely(!vm)) {
+ WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
+ return NULL;
+ }
+
+ old_size = get_vm_area_size(vm);
+ }
+
+ /*
+ * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
+ * would be a good heuristic for when to shrink the vm_area?
+ */
+ if (size <= old_size) {
+ /* Zero out spare memory. */
+ if (want_init_on_alloc(flags))
+ memset((void *)p + size, 0, old_size - size);
+
+ return (void *)p;
+ }
+
+ /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
+ n = __vmalloc_noprof(size, flags);
+ if (!n)
+ return NULL;
+
+ if (p) {
+ memcpy(n, p, old_size);
+ vfree(p);
+ }
+
+ return n;
+}
+
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
@@ -4873,7 +4934,7 @@ static void show_purge_info(struct seq_file *m)
list_for_each_entry(va, &vn->lazy.head, list) {
seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
(void *)va->va_start, (void *)va->va_end,
- va->va_end - va->va_start);
+ va_size(va));
}
spin_unlock(&vn->lazy.lock);
}
@@ -4895,7 +4956,7 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
if (va->flags & VMAP_RAM)
seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
(void *)va->va_start, (void *)va->va_end,
- va->va_end - va->va_start);
+ va_size(va));
continue;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bd489c1af228..749cdc110c74 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -628,7 +628,7 @@ typedef enum {
* Calls ->writepage().
*/
static pageout_t pageout(struct folio *folio, struct address_space *mapping,
- struct swap_iocb **plug)
+ struct swap_iocb **plug, struct list_head *folio_list)
{
/*
* If the folio is dirty, only perform writeback if that write
@@ -676,6 +676,14 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
.swap_plug = plug,
};
+ /*
+ * The large shmem folio can be split if CONFIG_THP_SWAP is
+ * not enabled or contiguous swap entries are failed to
+ * allocate.
+ */
+ if (shmem_mapping(mapping) && folio_test_large(folio))
+ wbc.list = folio_list;
+
folio_set_reclaim(folio);
res = mapping->a_ops->writepage(&folio->page, &wbc);
if (res < 0)
@@ -863,7 +871,12 @@ static enum folio_references folio_check_references(struct folio *folio,
if (vm_flags & VM_LOCKED)
return FOLIOREF_ACTIVATE;
- /* rmap lock contention: rotate */
+ /*
+ * There are two cases to consider.
+ * 1) Rmap lock contention: rotate.
+ * 2) Skip the non-shared swapbacked folio mapped solely by
+ * the exiting or OOM-reaped process.
+ */
if (referenced_ptes == -1)
return FOLIOREF_KEEP;
@@ -1003,9 +1016,6 @@ static unsigned int demote_folio_list(struct list_head *demote_folios,
(unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
&nr_succeeded);
- mod_node_page_state(pgdat, PGDEMOTE_KSWAPD + reclaimer_offset(),
- nr_succeeded);
-
return nr_succeeded;
}
@@ -1222,13 +1232,14 @@ retry:
goto keep_locked;
if (folio_test_large(folio)) {
/* cannot split folio, skip it */
- if (!can_split_folio(folio, NULL))
+ if (!can_split_folio(folio, 1, NULL))
goto activate_locked;
/*
* Split partially mapped folios right away.
* We can free the unmapped pages without IO.
*/
- if (data_race(!list_empty(&folio->_deferred_list)) &&
+ if (data_race(!list_empty(&folio->_deferred_list) &&
+ folio_test_partially_mapped(folio)) &&
split_folio_to_list(folio, folio_list))
goto activate_locked;
}
@@ -1252,11 +1263,6 @@ retry:
goto activate_locked_split;
}
}
- } else if (folio_test_swapbacked(folio) &&
- folio_test_large(folio)) {
- /* Split shmem folio */
- if (split_folio_to_list(folio, folio_list))
- goto keep_locked;
}
/*
@@ -1357,12 +1363,25 @@ retry:
* starts and then write it out here.
*/
try_to_unmap_flush_dirty();
- switch (pageout(folio, mapping, &plug)) {
+ switch (pageout(folio, mapping, &plug, folio_list)) {
case PAGE_KEEP:
goto keep_locked;
case PAGE_ACTIVATE:
+ /*
+ * If shmem folio is split when writeback to swap,
+ * the tail pages will make their own pass through
+ * this function and be accounted then.
+ */
+ if (nr_pages > 1 && !folio_test_large(folio)) {
+ sc->nr_scanned -= (nr_pages - 1);
+ nr_pages = 1;
+ }
goto activate_locked;
case PAGE_SUCCESS:
+ if (nr_pages > 1 && !folio_test_large(folio)) {
+ sc->nr_scanned -= (nr_pages - 1);
+ nr_pages = 1;
+ }
stat->nr_pageout += nr_pages;
if (folio_test_writeback(folio))
@@ -1495,7 +1514,8 @@ keep:
/* 'folio_list' is always empty here */
/* Migrate folios selected for demotion */
- nr_reclaimed += demote_folio_list(&demote_folios, pgdat);
+ stat->nr_demoted = demote_folio_list(&demote_folios, pgdat);
+ nr_reclaimed += stat->nr_demoted;
/* Folios that could not be demoted are still in @demote_folios */
if (!list_empty(&demote_folios)) {
/* Folios which weren't demoted go back on @folio_list */
@@ -1941,6 +1961,8 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
spin_lock_irq(&lruvec->lru_lock);
move_folios_to_lru(lruvec, &folio_list);
+ __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(),
+ stat.nr_demoted);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
item = PGSTEAL_KSWAPD + reclaimer_offset();
if (!cgroup_reclaim(sc))
@@ -2239,10 +2261,11 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc)
target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
/*
- * Flush the memory cgroup stats, so that we read accurate per-memcg
- * lruvec stats for heuristics.
+ * Flush the memory cgroup stats in rate-limited way as we don't need
+ * most accurate stats here. We may switch to regular stats flushing
+ * in the future once it is cheap enough.
*/
- mem_cgroup_flush_stats(sc->target_mem_cgroup);
+ mem_cgroup_flush_stats_ratelimited(sc->target_mem_cgroup);
/*
* Determine the scan balance between anon and file LRUs.
@@ -3456,7 +3479,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area
goto next;
if (!pmd_trans_huge(pmd[i])) {
- if (should_clear_pmd_young())
+ if (!walk->force_scan && should_clear_pmd_young())
pmdp_test_and_clear_young(vma, addr, pmd + i);
goto next;
}
@@ -3543,7 +3566,7 @@ restart:
walk->mm_stats[MM_NONLEAF_TOTAL]++;
- if (should_clear_pmd_young()) {
+ if (!walk->force_scan && should_clear_pmd_young()) {
if (!pmd_young(val))
continue;
@@ -4300,7 +4323,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
}
/* ineligible */
- if (zone > sc->reclaim_idx) {
+ if (!folio_test_lru(folio) || zone > sc->reclaim_idx) {
gen = folio_inc_gen(lruvec, folio, false);
list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
@@ -6644,7 +6667,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
continue;
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
- mark = wmark_pages(zone, WMARK_PROMO);
+ mark = promo_wmark_pages(zone);
else
mark = high_wmark_pages(zone);
if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx))
@@ -7519,7 +7542,9 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
ret = __node_reclaim(pgdat, gfp_mask, order);
clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
- if (!ret)
+ if (ret)
+ count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS);
+ else
count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
return ret;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index e875f2a4915f..b5a4cea423e1 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1314,6 +1314,7 @@ const char * const vmstat_text[] = {
"pgsteal_file",
#ifdef CONFIG_NUMA
+ "zone_reclaim_success",
"zone_reclaim_failed",
#endif
"pginodesteal",
@@ -1384,6 +1385,7 @@ const char * const vmstat_text[] = {
"thp_split_page",
"thp_split_page_failed",
"thp_deferred_split_page",
+ "thp_underused_split_page",
"thp_split_pmd",
"thp_scan_exceed_none_pte",
"thp_scan_exceed_swap_pte",
@@ -1435,6 +1437,30 @@ const char * const vmstat_text[] = {
"vma_lock_retry",
"vma_lock_miss",
#endif
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ "kstack_1k",
+#if THREAD_SIZE > 1024
+ "kstack_2k",
+#endif
+#if THREAD_SIZE > 2048
+ "kstack_4k",
+#endif
+#if THREAD_SIZE > 4096
+ "kstack_8k",
+#endif
+#if THREAD_SIZE > 8192
+ "kstack_16k",
+#endif
+#if THREAD_SIZE > 16384
+ "kstack_32k",
+#endif
+#if THREAD_SIZE > 32768
+ "kstack_64k",
+#endif
+#if THREAD_SIZE > 65536
+ "kstack_rest",
+#endif
+#endif
#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
};
#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
@@ -1718,6 +1744,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
"\n min %lu"
"\n low %lu"
"\n high %lu"
+ "\n promo %lu"
"\n spanned %lu"
"\n present %lu"
"\n managed %lu"
@@ -1727,6 +1754,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
min_wmark_pages(zone),
low_wmark_pages(zone),
high_wmark_pages(zone),
+ promo_wmark_pages(zone),
zone->spanned_pages,
zone->present_pages,
zone_managed_pages(zone),
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 2ebfed32871b..379d24b4fef9 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -144,7 +144,7 @@ struct z3fold_pool {
const char *name;
spinlock_t lock;
spinlock_t stale_lock;
- struct list_head *unbuddied;
+ struct list_head __percpu *unbuddied;
struct list_head stale;
atomic64_t pages_nr;
struct kmem_cache *c_handle;
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 2d3163e4da96..16a07def09c9 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -20,7 +20,7 @@
* page->index: links together all component pages of a zspage
* For the huge page, this is always 0, so we use this field
* to store handle.
- * page->page_type: PG_zsmalloc, lower 16 bit locate the first object
+ * page->page_type: PGTY_zsmalloc, lower 24 bits locate the first object
* offset in a subpage of a zspage
*
* Usage of struct page flags:
@@ -54,6 +54,7 @@
#include <linux/vmalloc.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
+#include <linux/sprintf.h>
#include <linux/shrinker.h>
#include <linux/types.h>
#include <linux/debugfs.h>
@@ -293,17 +294,27 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
static int create_cache(struct zs_pool *pool)
{
- pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
- 0, 0, NULL);
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name);
+ if (!name)
+ return -ENOMEM;
+ pool->handle_cachep = kmem_cache_create(name, ZS_HANDLE_SIZE,
+ 0, 0, NULL);
+ kfree(name);
if (!pool->handle_cachep)
- return 1;
+ return -EINVAL;
- pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
- 0, 0, NULL);
+ name = kasprintf(GFP_KERNEL, "zspage-%s", pool->name);
+ if (!name)
+ return -ENOMEM;
+ pool->zspage_cachep = kmem_cache_create(name, sizeof(struct zspage),
+ 0, 0, NULL);
+ kfree(name);
if (!pool->zspage_cachep) {
kmem_cache_destroy(pool->handle_cachep);
pool->handle_cachep = NULL;
- return 1;
+ return -EINVAL;
}
return 0;
@@ -452,13 +463,7 @@ static inline struct page *get_first_page(struct zspage *zspage)
return first_page;
}
-#define FIRST_OBJ_PAGE_TYPE_MASK 0xffff
-
-static inline void reset_first_obj_offset(struct page *page)
-{
- VM_WARN_ON_ONCE(!PageZsmalloc(page));
- page->page_type |= FIRST_OBJ_PAGE_TYPE_MASK;
-}
+#define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff
static inline unsigned int get_first_obj_offset(struct page *page)
{
@@ -468,8 +473,8 @@ static inline unsigned int get_first_obj_offset(struct page *page)
static inline void set_first_obj_offset(struct page *page, unsigned int offset)
{
- /* With 16 bit available, we can support offsets into 64 KiB pages. */
- BUILD_BUG_ON(PAGE_SIZE > SZ_64K);
+ /* With 24 bits available, we can support offsets into 16 MiB pages. */
+ BUILD_BUG_ON(PAGE_SIZE > SZ_16M);
VM_WARN_ON_ONCE(!PageZsmalloc(page));
VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK);
page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK;
@@ -808,7 +813,6 @@ static void reset_page(struct page *page)
ClearPagePrivate(page);
set_page_private(page, 0);
page->index = 0;
- reset_first_obj_offset(page);
__ClearPageZsmalloc(page);
}
diff --git a/mm/zswap.c b/mm/zswap.c
index adeaf9c97fde..449914ea9919 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -44,8 +44,6 @@
**********************************/
/* The number of compressed pages currently stored in zswap */
atomic_t zswap_stored_pages = ATOMIC_INIT(0);
-/* The number of same-value filled pages currently stored in zswap */
-static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
/*
* The statistics below are not protected from concurrent access for
@@ -185,8 +183,11 @@ static struct shrinker *zswap_shrinker;
*
* swpentry - associated swap entry, the offset indexes into the red-black tree
* length - the length in bytes of the compressed page data. Needed during
- * decompression. For a same value filled page length is 0, and both
- * pool and lru are invalid and must be ignored.
+ * decompression.
+ * referenced - true if the entry recently entered the zswap pool. Unset by the
+ * writeback logic. The entry is only reclaimed by the writeback
+ * logic if referenced is unset. See comments in the shrinker
+ * section for context.
* pool - the zswap_pool the entry's data is in
* handle - zpool allocation handle that stores the compressed page data
* value - value of the same-value filled pages which have same content
@@ -196,11 +197,9 @@ static struct shrinker *zswap_shrinker;
struct zswap_entry {
swp_entry_t swpentry;
unsigned int length;
+ bool referenced;
struct zswap_pool *pool;
- union {
- unsigned long handle;
- unsigned long value;
- };
+ unsigned long handle;
struct obj_cgroup *objcg;
struct list_head lru;
};
@@ -700,11 +699,8 @@ static inline int entry_to_nid(struct zswap_entry *entry)
static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
{
- atomic_long_t *nr_zswap_protected;
- unsigned long lru_size, old, new;
int nid = entry_to_nid(entry);
struct mem_cgroup *memcg;
- struct lruvec *lruvec;
/*
* Note that it is safe to use rcu_read_lock() here, even in the face of
@@ -722,19 +718,6 @@ static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
memcg = mem_cgroup_from_entry(entry);
/* will always succeed */
list_lru_add(list_lru, &entry->lru, nid, memcg);
-
- /* Update the protection area */
- lru_size = list_lru_count_one(list_lru, nid, memcg);
- lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
- nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
- old = atomic_long_inc_return(nr_zswap_protected);
- /*
- * Decay to avoid overflow and adapt to changing workloads.
- * This is based on LRU reclaim cost decaying heuristics.
- */
- do {
- new = old > lru_size / 4 ? old / 2 : old;
- } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
rcu_read_unlock();
}
@@ -752,7 +735,7 @@ static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
void zswap_lruvec_state_init(struct lruvec *lruvec)
{
- atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
+ atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0);
}
void zswap_folio_swapin(struct folio *folio)
@@ -761,16 +744,29 @@ void zswap_folio_swapin(struct folio *folio)
if (folio) {
lruvec = folio_lruvec(folio);
- atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+ atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins);
}
}
+/*
+ * This function should be called when a memcg is being offlined.
+ *
+ * Since the global shrinker shrink_worker() may hold a reference
+ * of the memcg, we must check and release the reference in
+ * zswap_next_shrink.
+ *
+ * shrink_worker() must handle the case where this function releases
+ * the reference of memcg being shrunk.
+ */
void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
{
/* lock out zswap shrinker walking memcg tree */
spin_lock(&zswap_shrink_lock);
- if (zswap_next_shrink == memcg)
- zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
+ if (zswap_next_shrink == memcg) {
+ do {
+ zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
+ } while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink));
+ }
spin_unlock(&zswap_shrink_lock);
}
@@ -799,13 +795,9 @@ static void zswap_entry_cache_free(struct zswap_entry *entry)
*/
static void zswap_entry_free(struct zswap_entry *entry)
{
- if (!entry->length)
- atomic_dec(&zswap_same_filled_pages);
- else {
- zswap_lru_del(&zswap_list_lru, entry);
- zpool_free(entry->pool->zpool, entry->handle);
- zswap_pool_put(entry->pool);
- }
+ zswap_lru_del(&zswap_list_lru, entry);
+ zpool_free(entry->pool->zpool, entry->handle);
+ zswap_pool_put(entry->pool);
if (entry->objcg) {
obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
obj_cgroup_put(entry->objcg);
@@ -1082,6 +1074,28 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
/*********************************
* shrinker functions
**********************************/
+/*
+ * The dynamic shrinker is modulated by the following factors:
+ *
+ * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving
+ * the entry a second chance) before rotating it in the LRU list. If the
+ * entry is considered again by the shrinker, with its referenced bit unset,
+ * it is written back. The writeback rate as a result is dynamically
+ * adjusted by the pool activities - if the pool is dominated by new entries
+ * (i.e lots of recent zswapouts), these entries will be protected and
+ * the writeback rate will slow down. On the other hand, if the pool has a
+ * lot of stagnant entries, these entries will be reclaimed immediately,
+ * effectively increasing the writeback rate.
+ *
+ * 2. Swapins counter: If we observe swapins, it is a sign that we are
+ * overshrinking and should slow down. We maintain a swapins counter, which
+ * is consumed and subtract from the number of eligible objects on the LRU
+ * in zswap_shrinker_count().
+ *
+ * 3. Compression ratio. The better the workload compresses, the less gains we
+ * can expect from writeback. We scale down the number of objects available
+ * for reclaim by this ratio.
+ */
static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
spinlock_t *lock, void *arg)
{
@@ -1092,6 +1106,16 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
int writeback_result;
/*
+ * Second chance algorithm: if the entry has its referenced bit set, give it
+ * a second chance. Only clear the referenced bit and rotate it in the
+ * zswap's LRU list.
+ */
+ if (entry->referenced) {
+ entry->referenced = false;
+ return LRU_ROTATE;
+ }
+
+ /*
* As soon as we drop the LRU lock, the entry can be freed by
* a concurrent invalidation. This means the following:
*
@@ -1157,8 +1181,7 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc)
{
- struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
- unsigned long shrink_ret, nr_protected, lru_size;
+ unsigned long shrink_ret;
bool encountered_page_in_swapcache = false;
if (!zswap_shrinker_enabled ||
@@ -1167,25 +1190,6 @@ static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
return SHRINK_STOP;
}
- nr_protected =
- atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
- lru_size = list_lru_shrink_count(&zswap_list_lru, sc);
-
- /*
- * Abort if we are shrinking into the protected region.
- *
- * This short-circuiting is necessary because if we have too many multiple
- * concurrent reclaimers getting the freeable zswap object counts at the
- * same time (before any of them made reasonable progress), the total
- * number of reclaimed objects might be more than the number of unprotected
- * objects (i.e the reclaimers will reclaim into the protected area of the
- * zswap LRU).
- */
- if (nr_protected >= lru_size - sc->nr_to_scan) {
- sc->nr_scanned = 0;
- return SHRINK_STOP;
- }
-
shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
&encountered_page_in_swapcache);
@@ -1200,7 +1204,10 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
{
struct mem_cgroup *memcg = sc->memcg;
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
- unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
+ atomic_long_t *nr_disk_swapins =
+ &lruvec->zswap_lruvec_state.nr_disk_swapins;
+ unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur,
+ nr_remain;
if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
return 0;
@@ -1233,25 +1240,33 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
if (!nr_stored)
return 0;
- nr_protected =
- atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
+ if (!nr_freeable)
+ return 0;
+
/*
- * Subtract the lru size by an estimate of the number of pages
- * that should be protected.
+ * Subtract from the lru size the number of pages that are recently swapped
+ * in from disk. The idea is that had we protect the zswap's LRU by this
+ * amount of pages, these disk swapins would not have happened.
*/
- nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
+ nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins);
+ do {
+ if (nr_freeable >= nr_disk_swapins_cur)
+ nr_remain = 0;
+ else
+ nr_remain = nr_disk_swapins_cur - nr_freeable;
+ } while (!atomic_long_try_cmpxchg(
+ nr_disk_swapins, &nr_disk_swapins_cur, nr_remain));
+
+ nr_freeable -= nr_disk_swapins_cur - nr_remain;
+ if (!nr_freeable)
+ return 0;
/*
* Scale the number of freeable pages by the memory saving factor.
* This ensures that the better zswap compresses memory, the fewer
* pages we will evict to swap (as it will otherwise incur IO for
* relatively small memory saving).
- *
- * The memory saving factor calculated here takes same-filled pages into
- * account, but those are not freeable since they almost occupy no
- * space. Hence, we may scale nr_freeable down a little bit more than we
- * should if we have a lot of same-filled pages.
*/
return mult_frac(nr_freeable, nr_backing, nr_stored);
}
@@ -1274,10 +1289,10 @@ static struct shrinker *zswap_alloc_shrinker(void)
static int shrink_memcg(struct mem_cgroup *memcg)
{
- int nid, shrunk = 0;
+ int nid, shrunk = 0, scanned = 0;
if (!mem_cgroup_zswap_writeback_enabled(memcg))
- return -EINVAL;
+ return -ENOENT;
/*
* Skip zombies because their LRUs are reparented and we would be
@@ -1291,63 +1306,94 @@ static int shrink_memcg(struct mem_cgroup *memcg)
shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
&shrink_memcg_cb, NULL, &nr_to_walk);
+ scanned += 1 - nr_to_walk;
}
+
+ if (!scanned)
+ return -ENOENT;
+
return shrunk ? 0 : -EAGAIN;
}
static void shrink_worker(struct work_struct *w)
{
struct mem_cgroup *memcg;
- int ret, failures = 0;
+ int ret, failures = 0, attempts = 0;
unsigned long thr;
/* Reclaim down to the accept threshold */
thr = zswap_accept_thr_pages();
- /* global reclaim will select cgroup in a round-robin fashion. */
+ /*
+ * Global reclaim will select cgroup in a round-robin fashion from all
+ * online memcgs, but memcgs that have no pages in zswap and
+ * writeback-disabled memcgs (memory.zswap.writeback=0) are not
+ * candidates for shrinking.
+ *
+ * Shrinking will be aborted if we encounter the following
+ * MAX_RECLAIM_RETRIES times:
+ * - No writeback-candidate memcgs found in a memcg tree walk.
+ * - Shrinking a writeback-candidate memcg failed.
+ *
+ * We save iteration cursor memcg into zswap_next_shrink,
+ * which can be modified by the offline memcg cleaner
+ * zswap_memcg_offline_cleanup().
+ *
+ * Since the offline cleaner is called only once, we cannot leave an
+ * offline memcg reference in zswap_next_shrink.
+ * We can rely on the cleaner only if we get online memcg under lock.
+ *
+ * If we get an offline memcg, we cannot determine if the cleaner has
+ * already been called or will be called later. We must put back the
+ * reference before returning from this function. Otherwise, the
+ * offline memcg left in zswap_next_shrink will hold the reference
+ * until the next run of shrink_worker().
+ */
do {
- spin_lock(&zswap_shrink_lock);
- zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
- memcg = zswap_next_shrink;
-
/*
- * We need to retry if we have gone through a full round trip, or if we
- * got an offline memcg (or else we risk undoing the effect of the
- * zswap memcg offlining cleanup callback). This is not catastrophic
- * per se, but it will keep the now offlined memcg hostage for a while.
+ * Start shrinking from the next memcg after zswap_next_shrink.
+ * When the offline cleaner has already advanced the cursor,
+ * advancing the cursor here overlooks one memcg, but this
+ * should be negligibly rare.
*
- * Note that if we got an online memcg, we will keep the extra
- * reference in case the original reference obtained by mem_cgroup_iter
- * is dropped by the zswap memcg offlining callback, ensuring that the
- * memcg is not killed when we are reclaiming.
+ * If we get an online memcg, keep the extra reference in case
+ * the original one obtained by mem_cgroup_iter() is dropped by
+ * zswap_memcg_offline_cleanup() while we are shrinking the
+ * memcg.
*/
- if (!memcg) {
- spin_unlock(&zswap_shrink_lock);
- if (++failures == MAX_RECLAIM_RETRIES)
- break;
-
- goto resched;
- }
-
- if (!mem_cgroup_tryget_online(memcg)) {
- /* drop the reference from mem_cgroup_iter() */
- mem_cgroup_iter_break(NULL, memcg);
- zswap_next_shrink = NULL;
- spin_unlock(&zswap_shrink_lock);
+ spin_lock(&zswap_shrink_lock);
+ do {
+ memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
+ zswap_next_shrink = memcg;
+ } while (memcg && !mem_cgroup_tryget_online(memcg));
+ spin_unlock(&zswap_shrink_lock);
- if (++failures == MAX_RECLAIM_RETRIES)
+ if (!memcg) {
+ /*
+ * Continue shrinking without incrementing failures if
+ * we found candidate memcgs in the last tree walk.
+ */
+ if (!attempts && ++failures == MAX_RECLAIM_RETRIES)
break;
+ attempts = 0;
goto resched;
}
- spin_unlock(&zswap_shrink_lock);
ret = shrink_memcg(memcg);
/* drop the extra reference */
mem_cgroup_put(memcg);
- if (ret == -EINVAL)
- break;
+ /*
+ * There are no writeback-candidate pages in the memcg.
+ * This is not an issue as long as we can find another memcg
+ * with pages in zswap. Skip this without incrementing attempts
+ * and failures.
+ */
+ if (ret == -ENOENT)
+ continue;
+ ++attempts;
+
if (ret && ++failures == MAX_RECLAIM_RETRIES)
break;
resched:
@@ -1356,42 +1402,6 @@ resched:
}
/*********************************
-* same-filled functions
-**********************************/
-static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value)
-{
- unsigned long *data;
- unsigned long val;
- unsigned int pos, last_pos = PAGE_SIZE / sizeof(*data) - 1;
- bool ret = false;
-
- data = kmap_local_folio(folio, 0);
- val = data[0];
-
- if (val != data[last_pos])
- goto out;
-
- for (pos = 1; pos < last_pos; pos++) {
- if (val != data[pos])
- goto out;
- }
-
- *value = val;
- ret = true;
-out:
- kunmap_local(data);
- return ret;
-}
-
-static void zswap_fill_folio(struct folio *folio, unsigned long value)
-{
- unsigned long *data = kmap_local_folio(folio, 0);
-
- memset_l(data, value, PAGE_SIZE / sizeof(unsigned long));
- kunmap_local(data);
-}
-
-/*********************************
* main API
**********************************/
bool zswap_store(struct folio *folio)
@@ -1402,7 +1412,6 @@ bool zswap_store(struct folio *folio)
struct zswap_entry *entry, *old;
struct obj_cgroup *objcg = NULL;
struct mem_cgroup *memcg = NULL;
- unsigned long value;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
@@ -1435,13 +1444,6 @@ bool zswap_store(struct folio *folio)
goto reject;
}
- if (zswap_is_folio_same_filled(folio, &value)) {
- entry->length = 0;
- entry->value = value;
- atomic_inc(&zswap_same_filled_pages);
- goto store_entry;
- }
-
/* if entry is successfully added, it keeps the reference */
entry->pool = zswap_pool_current_get();
if (!entry->pool)
@@ -1459,9 +1461,9 @@ bool zswap_store(struct folio *folio)
if (!zswap_compress(folio, entry))
goto put_pool;
-store_entry:
entry->swpentry = swp;
entry->objcg = objcg;
+ entry->referenced = true;
old = xa_store(tree, offset, entry, GFP_KERNEL);
if (xa_is_err(old)) {
@@ -1507,13 +1509,9 @@ store_entry:
return true;
store_failed:
- if (!entry->length)
- atomic_dec(&zswap_same_filled_pages);
- else {
- zpool_free(entry->pool->zpool, entry->handle);
+ zpool_free(entry->pool->zpool, entry->handle);
put_pool:
- zswap_pool_put(entry->pool);
- }
+ zswap_pool_put(entry->pool);
freepage:
zswap_entry_cache_free(entry);
reject:
@@ -1576,10 +1574,7 @@ bool zswap_load(struct folio *folio)
if (!entry)
return false;
- if (entry->length)
- zswap_decompress(entry, folio);
- else
- zswap_fill_folio(folio, entry->value);
+ zswap_decompress(entry, folio);
count_vm_event(ZSWPIN);
if (entry->objcg)
@@ -1682,8 +1677,6 @@ static int zswap_debugfs_init(void)
zswap_debugfs_root, NULL, &total_size_fops);
debugfs_create_atomic_t("stored_pages", 0444,
zswap_debugfs_root, &zswap_stored_pages);
- debugfs_create_atomic_t("same_filled_pages", 0444,
- zswap_debugfs_root, &zswap_same_filled_pages);
return 0;
}
diff --git a/net/6lowpan/ndisc.c b/net/6lowpan/ndisc.c
index 16be8f8b2f8c..c40b98f7743c 100644
--- a/net/6lowpan/ndisc.c
+++ b/net/6lowpan/ndisc.c
@@ -11,11 +11,6 @@
#include "6lowpan_i.h"
-static int lowpan_ndisc_is_useropt(u8 nd_opt_type)
-{
- return nd_opt_type == ND_OPT_6CO;
-}
-
#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
#define NDISC_802154_SHORT_ADDR_LENGTH 1
static int lowpan_ndisc_parse_802154_options(const struct net_device *dev,
@@ -222,7 +217,6 @@ static void lowpan_ndisc_prefix_rcv_add_addr(struct net *net,
#endif
const struct ndisc_ops lowpan_ndisc_ops = {
- .is_useropt = lowpan_ndisc_is_useropt,
#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
.parse_options = lowpan_ndisc_parse_options,
.update = lowpan_ndisc_update,
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 217be32426b5..458040e8a0e0 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -564,17 +564,20 @@ static int vlan_dev_init(struct net_device *dev)
NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE |
NETIF_F_GSO_ENCAP_ALL |
NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
- NETIF_F_ALL_FCOE;
+ NETIF_F_FCOE_CRC | NETIF_F_FSO;
if (real_dev->vlan_features & NETIF_F_HW_MACSEC)
dev->hw_features |= NETIF_F_HW_MACSEC;
- dev->features |= dev->hw_features | NETIF_F_LLTX;
+ dev->features |= dev->hw_features;
+ dev->lltx = true;
+ dev->fcoe_mtu = true;
netif_inherit_tso_max(dev, real_dev);
if (dev->features & NETIF_F_VLAN_FEATURES)
netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
- dev->vlan_features = real_dev->vlan_features & ~NETIF_F_ALL_FCOE;
+ dev->vlan_features = real_dev->vlan_features &
+ ~(NETIF_F_FCOE_CRC | NETIF_F_FSO);
dev->hw_enc_features = vlan_tnl_features(real_dev);
dev->mpls_features = real_dev->mpls_features;
@@ -655,7 +658,6 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
lower_features |= NETIF_F_HW_CSUM;
features = netdev_intersect_features(features, lower_features);
features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE);
- features |= NETIF_F_LLTX;
return features;
}
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 87b959da00cd..fa67374bda49 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -238,9 +238,9 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
stats = dev_get_stats(vlandev, &temp);
seq_printf(seq,
- "%s VID: %d REORDER_HDR: %i dev->priv_flags: %llx\n",
+ "%s VID: %d REORDER_HDR: %i dev->priv_flags: %x\n",
vlandev->name, vlan->vlan_id,
- (int)(vlan->flags & 1), vlandev->priv_flags);
+ (int)(vlan->flags & 1), (u32)vlandev->priv_flags);
seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
diff --git a/net/Kconfig b/net/Kconfig
index d27d0deac0bf..a629f92dc86b 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -66,6 +66,12 @@ config SKB_DECRYPTED
config SKB_EXTENSIONS
bool
+config NET_DEVMEM
+ def_bool y
+ depends on DMA_SHARED_BUFFER
+ depends on GENERIC_ALLOCATOR
+ depends on PAGE_POOL
+
menu "Networking options"
source "net/packet/Kconfig"
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 30ecbc2ef1fd..2758aba47a2f 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1020,9 +1020,10 @@ static void batadv_softif_init_early(struct net_device *dev)
dev->netdev_ops = &batadv_netdev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = batadv_softif_free;
- dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
- dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->lltx = true;
+ dev->netns_local = true;
/* can't call min_mtu, because the needed variables
* have not been initialized yet
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 00840d5784fe..04f6398b3a40 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -287,7 +287,7 @@ struct batadv_frag_table_entry {
/** @lock: lock to protect the list of fragments */
spinlock_t lock;
- /** @timestamp: time (jiffie) of last received fragment */
+ /** @timestamp: time (jiffy) of last received fragment */
unsigned long timestamp;
/** @seqno: sequence number of the fragments in the list */
diff --git a/net/bluetooth/cmtp/Kconfig b/net/bluetooth/cmtp/Kconfig
index c8337786da6b..34e923466236 100644
--- a/net/bluetooth/cmtp/Kconfig
+++ b/net/bluetooth/cmtp/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config BT_CMTP
- tristate "CMTP protocol support"
- depends on BT_BREDR && ISDN_CAPI
+ tristate "CMTP protocol support (DEPRECATED)"
+ depends on BT_BREDR && ISDN_CAPI && DEPRECATED
help
CMTP (CAPI Message Transport Protocol) is a transport layer
for CAPI messages. CMTP is required for the Bluetooth Common
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index f3bedc3b613a..884703fda979 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -248,18 +248,10 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
break;
case CAPI_FUNCTION_GET_MANUFACTURER:
- if (skb->len < CAPI_MSG_BASELEN + 15)
- break;
-
- if (!info && ctrl) {
- int len = min_t(uint, CAPI_MANUFACTURER_LEN,
- skb->data[CAPI_MSG_BASELEN + 14]);
-
- memset(ctrl->manu, 0, CAPI_MANUFACTURER_LEN);
- strncpy(ctrl->manu,
- skb->data + CAPI_MSG_BASELEN + 15, len);
- }
-
+ if (!info && ctrl && skb->len > CAPI_MSG_BASELEN + 14)
+ strscpy_pad(ctrl->manu,
+ skb->data + CAPI_MSG_BASELEN + 15,
+ skb->data[CAPI_MSG_BASELEN + 14]);
break;
case CAPI_FUNCTION_GET_VERSION:
@@ -276,18 +268,10 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
break;
case CAPI_FUNCTION_GET_SERIAL_NUMBER:
- if (skb->len < CAPI_MSG_BASELEN + 17)
- break;
-
- if (!info && ctrl) {
- int len = min_t(uint, CAPI_SERIAL_LEN,
- skb->data[CAPI_MSG_BASELEN + 16]);
-
- memset(ctrl->serial, 0, CAPI_SERIAL_LEN);
- strncpy(ctrl->serial,
- skb->data + CAPI_MSG_BASELEN + 17, len);
- }
-
+ if (!info && ctrl && skb->len > CAPI_MSG_BASELEN + 16)
+ strscpy_pad(ctrl->serial,
+ skb->data + CAPI_MSG_BASELEN + 17,
+ skb->data[CAPI_MSG_BASELEN + 16]);
break;
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index c82502e213a8..d083117ee36c 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -106,8 +106,7 @@ void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
* where a timeout + cancel does indicate an actual failure.
*/
if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
- mgmt_connect_failed(hdev, &conn->dst, conn->type,
- conn->dst_type, status);
+ mgmt_connect_failed(hdev, conn, status);
/* The connection attempt was doing scan for new RPA, and is
* in scan phase. If params are not associated with any other
@@ -778,7 +777,6 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *c
if (!d)
return -ENOMEM;
- memset(d, 0, sizeof(*d));
d->big = big;
d->sync_handle = conn->sync_handle;
@@ -1250,8 +1248,7 @@ void hci_conn_failed(struct hci_conn *conn, u8 status)
hci_le_conn_failed(conn, status);
break;
case ACL_LINK:
- mgmt_connect_failed(hdev, &conn->dst, conn->type,
- conn->dst_type, status);
+ mgmt_connect_failed(hdev, conn, status);
break;
}
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 5533e6f561b3..40ccdef168d7 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -5380,7 +5380,10 @@ int hci_stop_discovery_sync(struct hci_dev *hdev)
if (!e)
return 0;
- return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
+ /* Ignore cancel errors since it should interfere with stopping
+ * of the discovery.
+ */
+ hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
}
return 0;
diff --git a/net/bluetooth/leds.c b/net/bluetooth/leds.c
index f46847632ffa..6e349704efe4 100644
--- a/net/bluetooth/leds.c
+++ b/net/bluetooth/leds.c
@@ -48,7 +48,7 @@ static int power_activate(struct led_classdev *led_cdev)
htrig = to_hci_basic_led_trigger(led_cdev->trigger);
powered = test_bit(HCI_UP, &htrig->hdev->flags);
- led_trigger_event(led_cdev->trigger, powered ? LED_FULL : LED_OFF);
+ led_set_brightness(led_cdev, powered ? LED_FULL : LED_OFF);
return 0;
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 279902e8bd8a..e4f564d6f6fb 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -9779,13 +9779,18 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
mgmt_pending_remove(cmd);
}
-void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status)
+void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
{
struct mgmt_ev_connect_failed ev;
- bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
+ mgmt_device_disconnected(hdev, &conn->dst, conn->type,
+ conn->dst_type, status, true);
+ return;
+ }
+
+ bacpy(&ev.addr.bdaddr, &conn->dst);
+ ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
ev.status = mgmt_status(status);
mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 3ea52b05adfb..f71f67c6896b 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -115,7 +115,7 @@ static int check_test_run_args(struct bpf_prog *prog, struct bpf_dummy_ops_test_
offset = btf_ctx_arg_offset(bpf_dummy_ops_btf, func_proto, arg_no);
info = find_ctx_arg_info(prog->aux, offset);
- if (info && (info->reg_type & PTR_MAYBE_NULL))
+ if (info && type_may_be_null(info->reg_type))
continue;
return -EINVAL;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index fb1115857e49..26b79feb385d 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -487,9 +487,11 @@ void br_dev_setup(struct net_device *dev)
dev->ethtool_ops = &br_ethtool_ops;
SET_NETDEV_DEVTYPE(dev, &br_type);
dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
+ dev->lltx = true;
+ dev->netns_local = true;
- dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+ dev->features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->vlan_features = COMMON_FEATURES;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 8f9c19d992ac..0e8bc0ea6175 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -36,6 +36,7 @@
#include <net/route.h>
#include <net/netfilter/br_netfilter.h>
#include <net/netns/generic.h>
+#include <net/inet_dscp.h>
#include <linux/uaccess.h>
#include "br_private.h"
@@ -402,7 +403,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
goto free_skb;
rt = ip_route_output(net, iph->daddr, 0,
- RT_TOS(iph->tos), 0,
+ iph->tos & INET_DSCP_MASK, 0,
RT_SCOPE_UNIVERSE);
if (!IS_ERR(rt)) {
/* - Bridged-and-DNAT'ed traffic doesn't
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index cbd0e3586c3f..3e67d4aff419 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1256,7 +1256,7 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
goto free_unlock;
}
- ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
+ ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
if (!ops) {
ret = -ENOMEM;
if (newinfo->nentries)
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index bd4d1b4d745f..d12a221366d6 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -142,7 +142,7 @@ static int nft_meta_bridge_set_init(const struct nft_ctx *ctx,
}
priv->len = len;
- err = nft_parse_register_load(tb[NFTA_META_SREG], &priv->sreg, len);
+ err = nft_parse_register_load(ctx, tb[NFTA_META_SREG], &priv->sreg, len);
if (err < 0)
return err;
@@ -168,8 +168,7 @@ static bool nft_meta_bridge_set_reduce(struct nft_regs_track *track,
}
static int nft_meta_bridge_set_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int hooks;
@@ -179,7 +178,7 @@ static int nft_meta_bridge_set_validate(const struct nft_ctx *ctx,
hooks = 1 << NF_BR_PRE_ROUTING;
break;
default:
- return nft_meta_set_validate(ctx, expr, data);
+ return nft_meta_set_validate(ctx, expr);
}
return nft_chain_validate_hooks(ctx->chain, hooks);
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 71b54fed7263..1cb5c16e97b7 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -170,8 +170,7 @@ out:
}
static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+ const struct nft_expr *expr)
{
return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_LOCAL_IN));
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 2ae8cfa3df88..96236d21b18e 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -298,10 +298,8 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
return dstpkt;
}
- if (expectlen > addlen)
- neededtailspace = expectlen;
- else
- neededtailspace = addlen;
+
+ neededtailspace = max(expectlen, addlen);
if (dst->tail + neededtailspace > dst->end) {
/* Create a dumplicate of 'dst' with more tail space */
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 47901bd4def1..94ad09e36df2 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -47,7 +47,6 @@ struct chnl_net {
struct caif_connect_request conn_req;
struct list_head list_field;
struct net_device *netdev;
- char name[256];
wait_queue_head_t netmgmt_wq;
/* Flow status to remember and control the transmission. */
bool flowenabled;
@@ -347,7 +346,6 @@ static int chnl_net_init(struct net_device *dev)
struct chnl_net *priv;
ASSERT_RTNL();
priv = netdev_priv(dev);
- strncpy(priv->name, dev->name, sizeof(priv->name));
INIT_LIST_HEAD(&priv->list_field);
return 0;
}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 46d3ec3aa44b..217049fa496e 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1471,8 +1471,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
/* remove device reference, if this is our bound device */
if (bo->bound && bo->ifindex == dev->ifindex) {
#if IS_ENABLED(CONFIG_PROC_FS)
- if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read)
+ if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) {
remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
+ bo->bcm_proc_read = NULL;
+ }
#endif
bo->bound = 0;
bo->ifindex = 0;
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index 4be73de5033c..319f47df3330 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1179,10 +1179,10 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
break;
case -ENETDOWN:
/* In this case we should get a netdev_event(), all active
- * sessions will be cleared by
- * j1939_cancel_all_active_sessions(). So handle this as an
- * error, but let j1939_cancel_all_active_sessions() do the
- * cleanup including propagation of the error to user space.
+ * sessions will be cleared by j1939_cancel_active_session().
+ * So handle this as an error, but let
+ * j1939_cancel_active_session() do the cleanup including
+ * propagation of the error to user space.
*/
break;
case -EOVERFLOW:
diff --git a/net/core/Makefile b/net/core/Makefile
index 62be9aef2528..c3ebbaf9c81e 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
obj-y += net-sysfs.o
obj-y += hotdata.o
+obj-y += netdev_rx_queue.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o page_pool_user.o
obj-$(CONFIG_PROC_FS) += net-procfs.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
@@ -43,3 +44,4 @@ obj-$(CONFIG_BPF_SYSCALL) += sock_map.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
obj-$(CONFIG_OF) += of_net.o
obj-$(CONFIG_NET_TEST) += net_test.o
+obj-$(CONFIG_NET_DEVMEM) += devmem.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index a40f733b37d7..f0693707aece 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -407,6 +407,9 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
return 0;
}
+ if (!skb_frags_readable(skb))
+ goto short_copy;
+
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
@@ -623,6 +626,9 @@ int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
{
int frag = skb_shinfo(skb)->nr_frags;
+ if (!skb_frags_readable(skb))
+ return -EFAULT;
+
while (length && iov_iter_count(from)) {
struct page *head, *last_head = NULL;
struct page *pages[MAX_SKB_FRAGS];
diff --git a/net/core/dev.c b/net/core/dev.c
index f66e61407883..cd479f5f22f6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -158,8 +158,10 @@
#include <net/page_pool/types.h>
#include <net/page_pool/helpers.h>
#include <net/rps.h>
+#include <linux/phy_link_topology.h>
#include "dev.h"
+#include "devmem.h"
#include "net-sysfs.h"
static DEFINE_SPINLOCK(ptype_lock);
@@ -3310,6 +3312,10 @@ int skb_checksum_help(struct sk_buff *skb)
return -EINVAL;
}
+ if (!skb_frags_readable(skb)) {
+ return -EFAULT;
+ }
+
/* Before computing a checksum, we should make sure no frag could
* be modified by an external entity : checksum could be wrong.
*/
@@ -3386,6 +3392,7 @@ int skb_crc32c_csum_help(struct sk_buff *skb)
out:
return ret;
}
+EXPORT_SYMBOL(skb_crc32c_csum_help);
__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
{
@@ -3431,8 +3438,9 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
if (!(dev->features & NETIF_F_HIGHDMA)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page = skb_frag_page(frag);
- if (PageHighMem(skb_frag_page(frag)))
+ if (page && PageHighMem(page))
return 1;
}
}
@@ -3705,7 +3713,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
next = skb->next;
skb_mark_not_on_list(skb);
- /* in case skb wont be segmented, point to itself */
+ /* in case skb won't be segmented, point to itself */
skb->prev = skb;
skb = validate_xmit_skb(skb, dev, again);
@@ -4245,13 +4253,6 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL(dev_pick_tx_zero);
-u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
-}
-EXPORT_SYMBOL(dev_pick_tx_cpu_id);
-
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -5248,7 +5249,7 @@ int netif_rx(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx);
-static __latent_entropy void net_tx_action(struct softirq_action *h)
+static __latent_entropy void net_tx_action(void)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -5725,10 +5726,9 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
struct packet_type *pt_curr = NULL;
/* Current (common) orig_dev of sublist */
struct net_device *od_curr = NULL;
- struct list_head sublist;
struct sk_buff *skb, *next;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct net_device *orig_dev = skb->dev;
struct packet_type *pt_prev = NULL;
@@ -5866,9 +5866,8 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
void netif_receive_skb_list_internal(struct list_head *head)
{
struct sk_buff *skb, *next;
- struct list_head sublist;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue),
skb);
@@ -6921,7 +6920,7 @@ static int napi_threaded_poll(void *data)
return 0;
}
-static __latent_entropy void net_rx_action(struct softirq_action *h)
+static __latent_entropy void net_rx_action(void)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
@@ -9272,7 +9271,7 @@ EXPORT_SYMBOL(netdev_port_same_parent_id);
*/
int dev_change_proto_down(struct net_device *dev, bool proto_down)
{
- if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
+ if (!dev->change_proto_down)
return -EOPNOTSUPP;
if (!netif_device_present(dev))
return -ENODEV;
@@ -9369,6 +9368,20 @@ u8 dev_xdp_prog_count(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
+int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ if (!dev->netdev_ops->ndo_bpf)
+ return -EOPNOTSUPP;
+
+ if (dev_get_min_mp_channel_count(dev)) {
+ NL_SET_ERR_MSG(bpf->extack, "unable to propagate XDP to device using memory provider");
+ return -EBUSY;
+ }
+
+ return dev->netdev_ops->ndo_bpf(dev, bpf);
+}
+EXPORT_SYMBOL_GPL(dev_xdp_propagate);
+
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
{
struct bpf_prog *prog = dev_xdp_prog(dev, mode);
@@ -9397,6 +9410,11 @@ static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
struct netdev_bpf xdp;
int err;
+ if (dev_get_min_mp_channel_count(dev)) {
+ NL_SET_ERR_MSG(extack, "unable to install XDP to device using memory provider");
+ return -EBUSY;
+ }
+
memset(&xdp, 0, sizeof(xdp));
xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
xdp.extack = extack;
@@ -9821,6 +9839,20 @@ err_out:
return err;
}
+u32 dev_get_min_mp_channel_count(const struct net_device *dev)
+{
+ int i;
+
+ ASSERT_RTNL();
+
+ for (i = dev->real_num_rx_queues - 1; i >= 0; i--)
+ if (dev->_rx[i].mp_params.mp_priv)
+ /* The channel count is the idx plus 1. */
+ return i + 1;
+
+ return 0;
+}
+
/**
* dev_index_reserve() - allocate an ifindex in a namespace
* @net: the applicable net namespace
@@ -10321,6 +10353,17 @@ static void netdev_do_free_pcpu_stats(struct net_device *dev)
}
}
+static void netdev_free_phy_link_topology(struct net_device *dev)
+{
+ struct phy_link_topology *topo = dev->link_topo;
+
+ if (IS_ENABLED(CONFIG_PHYLIB) && topo) {
+ xa_destroy(&topo->phys);
+ kfree(topo);
+ dev->link_topo = NULL;
+ }
+}
+
/**
* register_netdevice() - register a network device
* @dev: device to register
@@ -10868,7 +10911,7 @@ noinline void netdev_core_stats_inc(struct net_device *dev, u32 offset)
return;
}
- field = (__force unsigned long __percpu *)((__force void *)p + offset);
+ field = (unsigned long __percpu *)((void __percpu *)p + offset);
this_cpu_inc(*field);
}
EXPORT_SYMBOL_GPL(netdev_core_stats_inc);
@@ -11099,6 +11142,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
#ifdef CONFIG_NET_SCHED
hash_init(dev->qdisc_hash);
#endif
+
dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
setup(dev);
@@ -11120,7 +11164,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
if (!dev->ethtool)
goto free_all;
- strcpy(dev->name, name);
+ strscpy(dev->name, name);
dev->name_assign_type = name_assign_type;
dev->group = INIT_NETDEV_GROUP;
if (!dev->ethtool_ops)
@@ -11191,6 +11235,8 @@ void free_netdev(struct net_device *dev)
free_percpu(dev->xdp_bulkq);
dev->xdp_bulkq = NULL;
+ netdev_free_phy_link_topology(dev);
+
/* Compatibility with error handling in drivers */
if (dev->reg_state == NETREG_UNINITIALIZED ||
dev->reg_state == NETREG_DUMMY) {
@@ -11343,6 +11389,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
dev_tcx_uninstall(dev);
dev_xdp_uninstall(dev);
bpf_dev_bound_netdev_unregister(dev);
+ dev_dmabuf_uninstall(dev);
netdev_offload_xstats_disable_all(dev);
@@ -11407,7 +11454,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
* @head: list of devices
*
* Note: As most callers use a stack allocated list_head,
- * we force a list_del() to make sure stack wont be corrupted later.
+ * we force a list_del() to make sure stack won't be corrupted later.
*/
void unregister_netdevice_many(struct list_head *head)
{
@@ -11462,10 +11509,10 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
/* Don't allow namespace local devices to be moved. */
err = -EINVAL;
- if (dev->features & NETIF_F_NETNS_LOCAL)
+ if (dev->netns_local)
goto out;
- /* Ensure the device has been registrered */
+ /* Ensure the device has been registered */
if (dev->reg_state != NETREG_REGISTERED)
goto out;
@@ -11844,7 +11891,7 @@ static void __net_exit default_device_exit_net(struct net *net)
char fb_name[IFNAMSIZ];
/* Ignore unmoveable devices (i.e. loopback) */
- if (dev->features & NETIF_F_NETNS_LOCAL)
+ if (dev->netns_local)
continue;
/* Leave virtual devices for the generic cleanup */
@@ -11905,7 +11952,7 @@ static struct pernet_operations __net_initdata default_device_ops = {
static void __init net_dev_struct_check(void)
{
/* TX read-mostly hotpath */
- CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags_fast);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, netdev_ops);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, header_ops);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, _tx);
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index baa63dee2829..166e404f7c03 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -262,7 +262,7 @@ static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
}
/* This function only works where there is a strict 1-1 relationship
- * between source and destionation of they synch. If you ever need to
+ * between source and destination of they synch. If you ever need to
* sync addresses to more then 1 destination, you need to use
* __hw_addr_sync_multiple().
*/
@@ -299,8 +299,8 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
EXPORT_SYMBOL(__hw_addr_unsync);
/**
- * __hw_addr_sync_dev - Synchonize device's multicast list
- * @list: address list to syncronize
+ * __hw_addr_sync_dev - Synchronize device's multicast list
+ * @list: address list to synchronize
* @dev: device to sync
* @sync: function to call if address should be added
* @unsync: function to call if address should be removed
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 8592c052c0f4..473c437b6b53 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -317,8 +317,7 @@ static int dev_get_hwtstamp(struct net_device *dev, struct ifreq *ifr)
* should take precedence in front of hardware timestamping provided by the
* netdev. If the netdev driver needs to perform specific actions even for PHY
* timestamping to work properly (a switch port must trap the timestamped
- * frames and not forward them), it must set IFF_SEE_ALL_HWTSTAMP_REQUESTS in
- * dev->priv_flags.
+ * frames and not forward them), it must set dev->see_all_hwtstamp_requests.
*/
int dev_set_hwtstamp_phylib(struct net_device *dev,
struct kernel_hwtstamp_config *cfg,
@@ -332,13 +331,13 @@ int dev_set_hwtstamp_phylib(struct net_device *dev,
cfg->source = phy_ts ? HWTSTAMP_SOURCE_PHYLIB : HWTSTAMP_SOURCE_NETDEV;
- if (phy_ts && (dev->priv_flags & IFF_SEE_ALL_HWTSTAMP_REQUESTS)) {
+ if (phy_ts && dev->see_all_hwtstamp_requests) {
err = ops->ndo_hwtstamp_get(dev, &old_cfg);
if (err)
return err;
}
- if (!phy_ts || (dev->priv_flags & IFF_SEE_ALL_HWTSTAMP_REQUESTS)) {
+ if (!phy_ts || dev->see_all_hwtstamp_requests) {
err = ops->ndo_hwtstamp_set(dev, cfg, extack);
if (err) {
if (extack->_msg)
@@ -347,7 +346,7 @@ int dev_set_hwtstamp_phylib(struct net_device *dev,
}
}
- if (phy_ts && (dev->priv_flags & IFF_SEE_ALL_HWTSTAMP_REQUESTS))
+ if (phy_ts && dev->see_all_hwtstamp_requests)
changed = kernel_hwtstamp_config_changed(&old_cfg, cfg);
if (phy_ts) {
diff --git a/net/core/devmem.c b/net/core/devmem.c
new file mode 100644
index 000000000000..11b91c12ee11
--- /dev/null
+++ b/net/core/devmem.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Devmem TCP
+ *
+ * Authors: Mina Almasry <almasrymina@google.com>
+ * Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+ * Kaiyuan Zhang <kaiyuanz@google.com
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/genalloc.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <net/page_pool/helpers.h>
+#include <trace/events/page_pool.h>
+
+#include "devmem.h"
+#include "mp_dmabuf_devmem.h"
+#include "page_pool_priv.h"
+
+/* Device memory support */
+
+/* Protected by rtnl_lock() */
+static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
+
+static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
+ struct gen_pool_chunk *chunk,
+ void *not_used)
+{
+ struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
+
+ kvfree(owner->niovs);
+ kfree(owner);
+}
+
+static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
+{
+ struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);
+
+ return owner->base_dma_addr +
+ ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT);
+}
+
+void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
+{
+ size_t size, avail;
+
+ gen_pool_for_each_chunk(binding->chunk_pool,
+ net_devmem_dmabuf_free_chunk_owner, NULL);
+
+ size = gen_pool_size(binding->chunk_pool);
+ avail = gen_pool_avail(binding->chunk_pool);
+
+ if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu",
+ size, avail))
+ gen_pool_destroy(binding->chunk_pool);
+
+ dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
+ DMA_FROM_DEVICE);
+ dma_buf_detach(binding->dmabuf, binding->attachment);
+ dma_buf_put(binding->dmabuf);
+ xa_destroy(&binding->bound_rxqs);
+ kfree(binding);
+}
+
+struct net_iov *
+net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
+{
+ struct dmabuf_genpool_chunk_owner *owner;
+ unsigned long dma_addr;
+ struct net_iov *niov;
+ ssize_t offset;
+ ssize_t index;
+
+ dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
+ (void **)&owner);
+ if (!dma_addr)
+ return NULL;
+
+ offset = dma_addr - owner->base_dma_addr;
+ index = offset / PAGE_SIZE;
+ niov = &owner->niovs[index];
+
+ niov->pp_magic = 0;
+ niov->pp = NULL;
+ atomic_long_set(&niov->pp_ref_count, 0);
+
+ return niov;
+}
+
+void net_devmem_free_dmabuf(struct net_iov *niov)
+{
+ struct net_devmem_dmabuf_binding *binding = net_iov_binding(niov);
+ unsigned long dma_addr = net_devmem_get_dma_addr(niov);
+
+ if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr,
+ PAGE_SIZE)))
+ return;
+
+ gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE);
+}
+
+void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
+{
+ struct netdev_rx_queue *rxq;
+ unsigned long xa_idx;
+ unsigned int rxq_idx;
+
+ if (binding->list.next)
+ list_del(&binding->list);
+
+ xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
+ WARN_ON(rxq->mp_params.mp_priv != binding);
+
+ rxq->mp_params.mp_priv = NULL;
+
+ rxq_idx = get_netdev_rx_queue_index(rxq);
+
+ WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx));
+ }
+
+ xa_erase(&net_devmem_dmabuf_bindings, binding->id);
+
+ net_devmem_dmabuf_binding_put(binding);
+}
+
+int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
+ struct net_devmem_dmabuf_binding *binding,
+ struct netlink_ext_ack *extack)
+{
+ struct netdev_rx_queue *rxq;
+ u32 xa_idx;
+ int err;
+
+ if (rxq_idx >= dev->real_num_rx_queues) {
+ NL_SET_ERR_MSG(extack, "rx queue index out of range");
+ return -ERANGE;
+ }
+
+ rxq = __netif_get_rx_queue(dev, rxq_idx);
+ if (rxq->mp_params.mp_priv) {
+ NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
+ return -EEXIST;
+ }
+
+#ifdef CONFIG_XDP_SOCKETS
+ if (rxq->pool) {
+ NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
+ return -EBUSY;
+ }
+#endif
+
+ err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
+ GFP_KERNEL);
+ if (err)
+ return err;
+
+ rxq->mp_params.mp_priv = binding;
+
+ err = netdev_rx_queue_restart(dev, rxq_idx);
+ if (err)
+ goto err_xa_erase;
+
+ return 0;
+
+err_xa_erase:
+ rxq->mp_params.mp_priv = NULL;
+ xa_erase(&binding->bound_rxqs, xa_idx);
+
+ return err;
+}
+
+struct net_devmem_dmabuf_binding *
+net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
+ struct netlink_ext_ack *extack)
+{
+ struct net_devmem_dmabuf_binding *binding;
+ static u32 id_alloc_next;
+ struct scatterlist *sg;
+ struct dma_buf *dmabuf;
+ unsigned int sg_idx, i;
+ unsigned long virtual;
+ int err;
+
+ dmabuf = dma_buf_get(dmabuf_fd);
+ if (IS_ERR(dmabuf))
+ return ERR_CAST(dmabuf);
+
+ binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
+ dev_to_node(&dev->dev));
+ if (!binding) {
+ err = -ENOMEM;
+ goto err_put_dmabuf;
+ }
+
+ binding->dev = dev;
+
+ err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
+ binding, xa_limit_32b, &id_alloc_next,
+ GFP_KERNEL);
+ if (err < 0)
+ goto err_free_binding;
+
+ xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
+
+ refcount_set(&binding->ref, 1);
+
+ binding->dmabuf = dmabuf;
+
+ binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
+ if (IS_ERR(binding->attachment)) {
+ err = PTR_ERR(binding->attachment);
+ NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
+ goto err_free_id;
+ }
+
+ binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
+ DMA_FROM_DEVICE);
+ if (IS_ERR(binding->sgt)) {
+ err = PTR_ERR(binding->sgt);
+ NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
+ goto err_detach;
+ }
+
+ /* For simplicity we expect to make PAGE_SIZE allocations, but the
+ * binding can be much more flexible than that. We may be able to
+ * allocate MTU sized chunks here. Leave that for future work...
+ */
+ binding->chunk_pool =
+ gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev));
+ if (!binding->chunk_pool) {
+ err = -ENOMEM;
+ goto err_unmap;
+ }
+
+ virtual = 0;
+ for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
+ dma_addr_t dma_addr = sg_dma_address(sg);
+ struct dmabuf_genpool_chunk_owner *owner;
+ size_t len = sg_dma_len(sg);
+ struct net_iov *niov;
+
+ owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
+ dev_to_node(&dev->dev));
+ if (!owner) {
+ err = -ENOMEM;
+ goto err_free_chunks;
+ }
+
+ owner->base_virtual = virtual;
+ owner->base_dma_addr = dma_addr;
+ owner->num_niovs = len / PAGE_SIZE;
+ owner->binding = binding;
+
+ err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
+ dma_addr, len, dev_to_node(&dev->dev),
+ owner);
+ if (err) {
+ kfree(owner);
+ err = -EINVAL;
+ goto err_free_chunks;
+ }
+
+ owner->niovs = kvmalloc_array(owner->num_niovs,
+ sizeof(*owner->niovs),
+ GFP_KERNEL);
+ if (!owner->niovs) {
+ err = -ENOMEM;
+ goto err_free_chunks;
+ }
+
+ for (i = 0; i < owner->num_niovs; i++) {
+ niov = &owner->niovs[i];
+ niov->owner = owner;
+ page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
+ net_devmem_get_dma_addr(niov));
+ }
+
+ virtual += len;
+ }
+
+ return binding;
+
+err_free_chunks:
+ gen_pool_for_each_chunk(binding->chunk_pool,
+ net_devmem_dmabuf_free_chunk_owner, NULL);
+ gen_pool_destroy(binding->chunk_pool);
+err_unmap:
+ dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
+ DMA_FROM_DEVICE);
+err_detach:
+ dma_buf_detach(dmabuf, binding->attachment);
+err_free_id:
+ xa_erase(&net_devmem_dmabuf_bindings, binding->id);
+err_free_binding:
+ kfree(binding);
+err_put_dmabuf:
+ dma_buf_put(dmabuf);
+ return ERR_PTR(err);
+}
+
+void dev_dmabuf_uninstall(struct net_device *dev)
+{
+ struct net_devmem_dmabuf_binding *binding;
+ struct netdev_rx_queue *rxq;
+ unsigned long xa_idx;
+ unsigned int i;
+
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ binding = dev->_rx[i].mp_params.mp_priv;
+ if (!binding)
+ continue;
+
+ xa_for_each(&binding->bound_rxqs, xa_idx, rxq)
+ if (rxq == &dev->_rx[i]) {
+ xa_erase(&binding->bound_rxqs, xa_idx);
+ break;
+ }
+ }
+}
+
+/*** "Dmabuf devmem memory provider" ***/
+
+int mp_dmabuf_devmem_init(struct page_pool *pool)
+{
+ struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
+
+ if (!binding)
+ return -EINVAL;
+
+ if (!pool->dma_map)
+ return -EOPNOTSUPP;
+
+ if (pool->dma_sync)
+ return -EOPNOTSUPP;
+
+ if (pool->p.order != 0)
+ return -E2BIG;
+
+ net_devmem_dmabuf_binding_get(binding);
+ return 0;
+}
+
+netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
+{
+ struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
+ struct net_iov *niov;
+ netmem_ref netmem;
+
+ niov = net_devmem_alloc_dmabuf(binding);
+ if (!niov)
+ return 0;
+
+ netmem = net_iov_to_netmem(niov);
+
+ page_pool_set_pp_info(pool, netmem);
+
+ pool->pages_state_hold_cnt++;
+ trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
+ return netmem;
+}
+
+void mp_dmabuf_devmem_destroy(struct page_pool *pool)
+{
+ struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
+
+ net_devmem_dmabuf_binding_put(binding);
+}
+
+bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
+{
+ long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem));
+
+ if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
+ return false;
+
+ if (WARN_ON_ONCE(refcount != 1))
+ return false;
+
+ page_pool_clear_pp_info(netmem);
+
+ net_devmem_free_dmabuf(netmem_to_net_iov(netmem));
+
+ /* We don't want the page pool put_page()ing our net_iovs. */
+ return false;
+}
diff --git a/net/core/devmem.h b/net/core/devmem.h
new file mode 100644
index 000000000000..76099ef9c482
--- /dev/null
+++ b/net/core/devmem.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Device memory TCP support
+ *
+ * Authors: Mina Almasry <almasrymina@google.com>
+ * Willem de Bruijn <willemb@google.com>
+ * Kaiyuan Zhang <kaiyuanz@google.com>
+ *
+ */
+#ifndef _NET_DEVMEM_H
+#define _NET_DEVMEM_H
+
+struct netlink_ext_ack;
+
+struct net_devmem_dmabuf_binding {
+ struct dma_buf *dmabuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ struct net_device *dev;
+ struct gen_pool *chunk_pool;
+
+ /* The user holds a ref (via the netlink API) for as long as they want
+ * the binding to remain alive. Each page pool using this binding holds
+ * a ref to keep the binding alive. Each allocated net_iov holds a
+ * ref.
+ *
+ * The binding undos itself and unmaps the underlying dmabuf once all
+ * those refs are dropped and the binding is no longer desired or in
+ * use.
+ */
+ refcount_t ref;
+
+ /* The list of bindings currently active. Used for netlink to notify us
+ * of the user dropping the bind.
+ */
+ struct list_head list;
+
+ /* rxq's this binding is active on. */
+ struct xarray bound_rxqs;
+
+ /* ID of this binding. Globally unique to all bindings currently
+ * active.
+ */
+ u32 id;
+};
+
+#if defined(CONFIG_NET_DEVMEM)
+/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
+ * entry from the dmabuf is inserted into the genpool as a chunk, and needs
+ * this owner struct to keep track of some metadata necessary to create
+ * allocations from this chunk.
+ */
+struct dmabuf_genpool_chunk_owner {
+ /* Offset into the dma-buf where this chunk starts. */
+ unsigned long base_virtual;
+
+ /* dma_addr of the start of the chunk. */
+ dma_addr_t base_dma_addr;
+
+ /* Array of net_iovs for this chunk. */
+ struct net_iov *niovs;
+ size_t num_niovs;
+
+ struct net_devmem_dmabuf_binding *binding;
+};
+
+void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
+struct net_devmem_dmabuf_binding *
+net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
+ struct netlink_ext_ack *extack);
+void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
+int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
+ struct net_devmem_dmabuf_binding *binding,
+ struct netlink_ext_ack *extack);
+void dev_dmabuf_uninstall(struct net_device *dev);
+
+static inline struct dmabuf_genpool_chunk_owner *
+net_iov_owner(const struct net_iov *niov)
+{
+ return niov->owner;
+}
+
+static inline unsigned int net_iov_idx(const struct net_iov *niov)
+{
+ return niov - net_iov_owner(niov)->niovs;
+}
+
+static inline struct net_devmem_dmabuf_binding *
+net_iov_binding(const struct net_iov *niov)
+{
+ return net_iov_owner(niov)->binding;
+}
+
+static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
+{
+ struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);
+
+ return owner->base_virtual +
+ ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
+}
+
+static inline u32 net_iov_binding_id(const struct net_iov *niov)
+{
+ return net_iov_owner(niov)->binding->id;
+}
+
+static inline void
+net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
+{
+ refcount_inc(&binding->ref);
+}
+
+static inline void
+net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
+{
+ if (!refcount_dec_and_test(&binding->ref))
+ return;
+
+ __net_devmem_dmabuf_binding_free(binding);
+}
+
+struct net_iov *
+net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
+void net_devmem_free_dmabuf(struct net_iov *ppiov);
+
+#else
+struct net_devmem_dmabuf_binding;
+
+static inline void
+__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
+{
+}
+
+static inline struct net_devmem_dmabuf_binding *
+net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
+ struct netlink_ext_ack *extack)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void
+net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
+{
+}
+
+static inline int
+net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
+ struct net_devmem_dmabuf_binding *binding,
+ struct netlink_ext_ack *extack)
+
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void dev_dmabuf_uninstall(struct net_device *dev)
+{
+}
+
+static inline struct net_iov *
+net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
+{
+ return NULL;
+}
+
+static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
+{
+}
+
+static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
+{
+ return 0;
+}
+
+static inline u32 net_iov_binding_id(const struct net_iov *niov)
+{
+ return 0;
+}
+#endif
+
+#endif /* _NET_DEVMEM_H */
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 6ebffbc63236..154a2681f55c 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <net/net_namespace.h>
+#include <net/inet_dscp.h>
#include <net/sock.h>
#include <net/fib_rules.h>
#include <net/ip_tunnels.h>
@@ -72,7 +73,7 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
r->suppress_prefixlen = -1;
r->suppress_ifgroup = -1;
- /* The lock is not required here, the list in unreacheable
+ /* The lock is not required here, the list in unreachable
* at the moment this function is called */
list_add_tail(&r->list, &ops->rules_list);
return 0;
@@ -766,7 +767,8 @@ static const struct nla_policy fib_rule_policy[FRA_MAX + 1] = {
[FRA_PROTOCOL] = { .type = NLA_U8 },
[FRA_IP_PROTO] = { .type = NLA_U8 },
[FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) },
- [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }
+ [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) },
+ [FRA_DSCP] = NLA_POLICY_MAX(NLA_U8, INET_DSCP_MASK >> 2),
};
int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1205,8 +1207,7 @@ static void notify_rule_change(int event, struct fib_rule *rule,
rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, ops->nlgroup, err);
+ rtnl_set_sk_err(net, ops->nlgroup, err);
}
static void attach_rules(struct list_head *rules, struct net_device *dev)
diff --git a/net/core/filter.c b/net/core/filter.c
index f3c72cf86099..cd3524cb326b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -84,6 +84,7 @@
#include <net/netkit.h>
#include <linux/un.h>
#include <net/xdp_sock_drv.h>
+#include <net/inet_dscp.h>
#include "dev.h"
@@ -1265,8 +1266,8 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
* so we need to keep the user BPF around until the 2nd
* pass. At this time, the user BPF is stored in fp->insns.
*/
- old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
- GFP_KERNEL | __GFP_NOWARN);
+ old_prog = kmemdup_array(fp->insns, old_len, sizeof(struct sock_filter),
+ GFP_KERNEL | __GFP_NOWARN);
if (!old_prog) {
err = -ENOMEM;
goto out_err;
@@ -2371,7 +2372,7 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
struct flowi4 fl4 = {
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
- .flowi4_tos = RT_TOS(ip4h->tos),
+ .flowi4_tos = ip4h->tos & INET_DSCP_MASK,
.flowi4_oif = dev->ifindex,
.flowi4_proto = ip4h->protocol,
.daddr = ip4h->daddr,
@@ -3189,6 +3190,7 @@ BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
bpf_push_mac_rcsum(skb);
ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
bpf_pull_mac_rcsum(skb);
+ skb_reset_mac_len(skb);
bpf_compute_data_pointers(skb);
return ret;
@@ -5278,6 +5280,11 @@ static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname,
return -EINVAL;
inet_csk(sk)->icsk_rto_min = timeout;
break;
+ case TCP_BPF_SOCK_OPS_CB_FLAGS:
+ if (val & ~(BPF_SOCK_OPS_ALL_CB_FLAGS))
+ return -EINVAL;
+ tp->bpf_sock_ops_cb_flags = val;
+ break;
default:
return -EINVAL;
}
@@ -5366,6 +5373,17 @@ static int sol_tcp_sockopt(struct sock *sk, int optname,
if (*optlen < 1)
return -EINVAL;
break;
+ case TCP_BPF_SOCK_OPS_CB_FLAGS:
+ if (*optlen != sizeof(int))
+ return -EINVAL;
+ if (getopt) {
+ struct tcp_sock *tp = tcp_sk(sk);
+ int cb_flags = tp->bpf_sock_ops_cb_flags;
+
+ memcpy(optval, &cb_flags, *optlen);
+ return 0;
+ }
+ return bpf_sol_tcp_setsockopt(sk, optname, optval, *optlen);
default:
if (getopt)
return -EINVAL;
@@ -5899,7 +5917,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
fl4.flowi4_iif = params->ifindex;
fl4.flowi4_oif = 0;
}
- fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
+ fl4.flowi4_tos = params->tos & INET_DSCP_MASK;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = 0;
@@ -6262,20 +6280,25 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
int ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
struct net_device *dev = skb->dev;
int skb_len, dev_len;
- int mtu;
+ int mtu = 0;
- if (unlikely(flags & ~(BPF_MTU_CHK_SEGS)))
- return -EINVAL;
+ if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len)))
- return -EINVAL;
+ if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) {
+ ret = -EINVAL;
+ goto out;
+ }
dev = __dev_via_ifindex(dev, ifindex);
- if (unlikely(!dev))
- return -ENODEV;
+ if (unlikely(!dev)) {
+ ret = -ENODEV;
+ goto out;
+ }
mtu = READ_ONCE(dev->mtu);
-
dev_len = mtu + dev->hard_header_len;
/* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
@@ -6293,15 +6316,12 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
*/
if (skb_is_gso(skb)) {
ret = BPF_MTU_CHK_RET_SUCCESS;
-
if (flags & BPF_MTU_CHK_SEGS &&
!skb_gso_validate_network_len(skb, mtu))
ret = BPF_MTU_CHK_RET_SEGS_TOOBIG;
}
out:
- /* BPF verifier guarantees valid pointer */
*mtu_len = mtu;
-
return ret;
}
@@ -6311,19 +6331,21 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
struct net_device *dev = xdp->rxq->dev;
int xdp_len = xdp->data_end - xdp->data;
int ret = BPF_MTU_CHK_RET_SUCCESS;
- int mtu, dev_len;
+ int mtu = 0, dev_len;
/* XDP variant doesn't support multi-buffer segment check (yet) */
- if (unlikely(flags))
- return -EINVAL;
+ if (unlikely(flags)) {
+ ret = -EINVAL;
+ goto out;
+ }
dev = __dev_via_ifindex(dev, ifindex);
- if (unlikely(!dev))
- return -ENODEV;
+ if (unlikely(!dev)) {
+ ret = -ENODEV;
+ goto out;
+ }
mtu = READ_ONCE(dev->mtu);
-
- /* Add L2-header as dev MTU is L3 size */
dev_len = mtu + dev->hard_header_len;
/* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
@@ -6333,10 +6355,8 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
xdp_len += len_diff; /* minus result pass check */
if (xdp_len > dev_len)
ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
-
- /* BPF verifier guarantees valid pointer */
+out:
*mtu_len = mtu;
-
return ret;
}
@@ -6346,7 +6366,8 @@ static const struct bpf_func_proto bpf_skb_check_mtu_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
- .arg3_type = ARG_PTR_TO_INT,
+ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg3_size = sizeof(u32),
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6357,7 +6378,8 @@ static const struct bpf_func_proto bpf_xdp_check_mtu_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
- .arg3_type = ARG_PTR_TO_INT,
+ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg3_size = sizeof(u32),
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -8579,13 +8601,16 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
if (off + size > offsetofend(struct __sk_buff, cb[4]))
return false;
break;
+ case bpf_ctx_range(struct __sk_buff, data):
+ case bpf_ctx_range(struct __sk_buff, data_meta):
+ case bpf_ctx_range(struct __sk_buff, data_end):
+ if (info->is_ldsx || size != size_default)
+ return false;
+ break;
case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
- case bpf_ctx_range(struct __sk_buff, data):
- case bpf_ctx_range(struct __sk_buff, data_meta):
- case bpf_ctx_range(struct __sk_buff, data_end):
if (size != size_default)
return false;
break;
@@ -9029,6 +9054,14 @@ static bool xdp_is_valid_access(int off, int size,
}
}
return false;
+ } else {
+ switch (off) {
+ case offsetof(struct xdp_md, data_meta):
+ case offsetof(struct xdp_md, data):
+ case offsetof(struct xdp_md, data_end):
+ if (info->is_ldsx)
+ return false;
+ }
}
switch (off) {
@@ -9354,12 +9387,12 @@ static bool flow_dissector_is_valid_access(int off, int size,
switch (off) {
case bpf_ctx_range(struct __sk_buff, data):
- if (size != size_default)
+ if (info->is_ldsx || size != size_default)
return false;
info->reg_type = PTR_TO_PACKET;
return true;
case bpf_ctx_range(struct __sk_buff, data_end):
- if (size != size_default)
+ if (info->is_ldsx || size != size_default)
return false;
info->reg_type = PTR_TO_PACKET_END;
return true;
@@ -12045,7 +12078,7 @@ int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
}
BTF_KFUNCS_START(bpf_kfunc_check_set_skb)
-BTF_ID_FLAGS(func, bpf_dynptr_from_skb)
+BTF_ID_FLAGS(func, bpf_dynptr_from_skb, KF_TRUSTED_ARGS)
BTF_KFUNCS_END(bpf_kfunc_check_set_skb)
BTF_KFUNCS_START(bpf_kfunc_check_set_xdp)
@@ -12094,6 +12127,7 @@ static int __init bpf_kfunc_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_XMIT, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_NETFILTER, &bpf_kfunc_set_skb);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
&bpf_kfunc_set_sock_addr);
diff --git a/net/core/gro.c b/net/core/gro.c
index b3b43de1a650..802b4a062400 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -374,7 +374,7 @@ static void gro_list_prepare(const struct list_head *head,
skb_mac_header(skb),
maclen);
- /* in most common scenarions 'slow_gro' is 0
+ /* in most common scenarios 'slow_gro' is 0
* otherwise we are already on some slower paths
* either skip all the infrequent tests altogether or
* avoid trying too hard to skip each of them individually
@@ -408,7 +408,8 @@ static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
pinfo = skb_shinfo(skb);
frag0 = &pinfo->frags[0];
- if (pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0)) &&
+ if (pinfo->nr_frags && skb_frag_page(frag0) &&
+ !PageHighMem(skb_frag_page(frag0)) &&
(!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index afb05f58b64c..1a14f915b7a4 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -12,6 +12,7 @@
#include <net/gre.h>
#include <net/ip6_route.h>
#include <net/ipv6_stubs.h>
+#include <net/inet_dscp.h>
struct bpf_lwt_prog {
struct bpf_prog *prog;
@@ -205,7 +206,7 @@ static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
fl4.flowi4_oif = oif;
fl4.flowi4_mark = skb->mark;
fl4.flowi4_uid = sock_net_uid(net, sk);
- fl4.flowi4_tos = RT_TOS(iph->tos);
+ fl4.flowi4_tos = iph->tos & INET_DSCP_MASK;
fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
fl4.flowi4_proto = iph->protocol;
fl4.daddr = iph->daddr;
diff --git a/net/core/mp_dmabuf_devmem.h b/net/core/mp_dmabuf_devmem.h
new file mode 100644
index 000000000000..67cd0dd7319c
--- /dev/null
+++ b/net/core/mp_dmabuf_devmem.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Dmabuf device memory provider.
+ *
+ * Authors: Mina Almasry <almasrymina@google.com>
+ *
+ */
+#ifndef _NET_MP_DMABUF_DEVMEM_H
+#define _NET_MP_DMABUF_DEVMEM_H
+
+#include <net/netmem.h>
+
+#if defined(CONFIG_NET_DEVMEM)
+int mp_dmabuf_devmem_init(struct page_pool *pool);
+
+netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp);
+
+void mp_dmabuf_devmem_destroy(struct page_pool *pool);
+
+bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem);
+#else
+static inline int mp_dmabuf_devmem_init(struct page_pool *pool)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline netmem_ref
+mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
+{
+ return 0;
+}
+
+static inline void mp_dmabuf_devmem_destroy(struct page_pool *pool)
+{
+}
+
+static inline bool
+mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
+{
+ return false;
+}
+#endif
+
+#endif /* _NET_MP_DMABUF_DEVMEM_H */
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index a6fe88eca939..77b819cd995b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -3530,8 +3530,7 @@ static void __neigh_notify(struct neighbour *n, int type, int flags,
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
void neigh_app_ns(struct neighbour *n)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 291fdf4a328b..05cf5347f25e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -32,6 +32,7 @@
#ifdef CONFIG_SYSFS
static const char fmt_hex[] = "%#x\n";
static const char fmt_dec[] = "%d\n";
+static const char fmt_uint[] = "%u\n";
static const char fmt_ulong[] = "%lu\n";
static const char fmt_u64[] = "%llu\n";
@@ -425,6 +426,9 @@ NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val)
{
+ if (val > S32_MAX)
+ return -ERANGE;
+
WRITE_ONCE(dev->napi_defer_hard_irqs, val);
return 0;
}
@@ -438,7 +442,7 @@ static ssize_t napi_defer_hard_irqs_store(struct device *dev,
return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs);
}
-NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_dec);
+NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_uint);
static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
@@ -1056,7 +1060,7 @@ static const void *rx_queue_namespace(const struct kobject *kobj)
struct device *dev = &queue->dev->dev;
const void *ns = NULL;
- if (dev->class && dev->class->ns_type)
+ if (dev->class && dev->class->namespace)
ns = dev->class->namespace(dev);
return ns;
@@ -1740,7 +1744,7 @@ static const void *netdev_queue_namespace(const struct kobject *kobj)
struct device *dev = &queue->dev->dev;
const void *ns = NULL;
- if (dev->class && dev->class->ns_type)
+ if (dev->class && dev->class->namespace)
ns = dev->class->namespace(dev);
return ns;
@@ -1764,8 +1768,7 @@ static const struct kobj_type netdev_queue_ktype = {
static bool netdev_uses_bql(const struct net_device *dev)
{
- if (dev->features & NETIF_F_LLTX ||
- dev->priv_flags & IFF_NO_QUEUE)
+ if (dev->lltx || (dev->priv_flags & IFF_NO_QUEUE))
return false;
return IS_ENABLED(CONFIG_BQL);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 6a823ba906c6..e39479f1c9a4 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -125,7 +125,7 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
int err = -ENOMEM;
void *data = NULL;
- if (ops->id && ops->size) {
+ if (ops->id) {
data = kzalloc(ops->size, GFP_KERNEL);
if (!data)
goto out;
@@ -140,7 +140,7 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
if (!err)
return 0;
- if (ops->id && ops->size) {
+ if (ops->id) {
ng = rcu_dereference_protected(net->gen,
lockdep_is_held(&pernet_ops_rwsem));
ng->ptr[*ops->id] = NULL;
@@ -182,7 +182,8 @@ static void ops_free_list(const struct pernet_operations *ops,
struct list_head *net_exit_list)
{
struct net *net;
- if (ops->size && ops->id) {
+
+ if (ops->id) {
list_for_each_entry(net, net_exit_list, exit_list)
kfree(net_generic(net, *ops->id));
}
@@ -308,16 +309,38 @@ struct net *get_net_ns_by_id(const struct net *net, int id)
}
EXPORT_SYMBOL_GPL(get_net_ns_by_id);
+static __net_init void preinit_net_sysctl(struct net *net)
+{
+ net->core.sysctl_somaxconn = SOMAXCONN;
+ /* Limits per socket sk_omem_alloc usage.
+ * TCP zerocopy regular usage needs 128 KB.
+ */
+ net->core.sysctl_optmem_max = 128 * 1024;
+ net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
+}
+
/* init code that must occur even if setup_net() is not called. */
-static __net_init void preinit_net(struct net *net)
+static __net_init void preinit_net(struct net *net, struct user_namespace *user_ns)
{
+ refcount_set(&net->passive, 1);
+ refcount_set(&net->ns.count, 1);
+ ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
+
+ get_random_bytes(&net->hash_mix, sizeof(u32));
+ net->dev_base_seq = 1;
+ net->user_ns = user_ns;
+
+ idr_init(&net->netns_ids);
+ spin_lock_init(&net->nsid_lock);
+ mutex_init(&net->ipv4.ra_mutex);
+ preinit_net_sysctl(net);
}
/*
* setup_net runs the initializers for the network namespace object.
*/
-static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
+static __net_init int setup_net(struct net *net)
{
/* Must be called with pernet_ops_rwsem held */
const struct pernet_operations *ops, *saved_ops;
@@ -325,19 +348,9 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
LIST_HEAD(dev_kill_list);
int error = 0;
- refcount_set(&net->ns.count, 1);
- ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
-
- refcount_set(&net->passive, 1);
- get_random_bytes(&net->hash_mix, sizeof(u32));
preempt_disable();
net->net_cookie = gen_cookie_next(&net_cookie);
preempt_enable();
- net->dev_base_seq = 1;
- net->user_ns = user_ns;
- idr_init(&net->netns_ids);
- spin_lock_init(&net->nsid_lock);
- mutex_init(&net->ipv4.ra_mutex);
list_for_each_entry(ops, &pernet_list, list) {
error = ops_init(ops, net);
@@ -382,32 +395,6 @@ out_undo:
goto out;
}
-static int __net_init net_defaults_init_net(struct net *net)
-{
- net->core.sysctl_somaxconn = SOMAXCONN;
- /* Limits per socket sk_omem_alloc usage.
- * TCP zerocopy regular usage needs 128 KB.
- */
- net->core.sysctl_optmem_max = 128 * 1024;
- net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
-
- return 0;
-}
-
-static struct pernet_operations net_defaults_ops = {
- .init = net_defaults_init_net,
-};
-
-static __init int net_defaults_init(void)
-{
- if (register_pernet_subsys(&net_defaults_ops))
- panic("Cannot initialize net default settings");
-
- return 0;
-}
-
-core_initcall(net_defaults_init);
-
#ifdef CONFIG_NET_NS
static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
{
@@ -496,8 +483,7 @@ struct net *copy_net_ns(unsigned long flags,
goto dec_ucounts;
}
- preinit_net(net);
- refcount_set(&net->passive, 1);
+ preinit_net(net, user_ns);
net->ucounts = ucounts;
get_user_ns(user_ns);
@@ -505,7 +491,7 @@ struct net *copy_net_ns(unsigned long flags,
if (rv < 0)
goto put_userns;
- rv = setup_net(net, user_ns);
+ rv = setup_net(net);
up_read(&pernet_ops_rwsem);
@@ -711,11 +697,11 @@ struct net *get_net_ns_by_fd(int fd)
struct fd f = fdget(fd);
struct net *net = ERR_PTR(-EINVAL);
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-EBADF);
- if (proc_ns_file(f.file)) {
- struct ns_common *ns = get_proc_ns(file_inode(f.file));
+ if (proc_ns_file(fd_file(f))) {
+ struct ns_common *ns = get_proc_ns(file_inode(fd_file(f)));
if (ns->ops == &netns_operations)
net = get_net(container_of(ns, struct net, ns));
}
@@ -1199,9 +1185,10 @@ void __init net_ns_init(void)
#ifdef CONFIG_KEYS
init_net.key_domain = &init_net_key_domain;
#endif
+ preinit_net(&init_net, &init_user_ns);
+
down_write(&pernet_ops_rwsem);
- preinit_net(&init_net);
- if (setup_net(&init_net, &init_user_ns))
+ if (setup_net(&init_net))
panic("Could not setup the initial network namespace");
init_net_initialized = true;
@@ -1244,7 +1231,7 @@ static int __register_pernet_operations(struct list_head *list,
LIST_HEAD(net_exit_list);
list_add_tail(&ops->list, list);
- if (ops->init || (ops->id && ops->size)) {
+ if (ops->init || ops->id) {
/* We held write locked pernet_ops_rwsem, and parallel
* setup_net() and cleanup_net() are not possible.
*/
@@ -1310,6 +1297,9 @@ static int register_pernet_operations(struct list_head *list,
{
int error;
+ if (WARN_ON(!!ops->id ^ !!ops->size))
+ return -EINVAL;
+
if (ops->id) {
error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
GFP_KERNEL);
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index 8350a0afa9ec..b28424ae06d5 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -9,6 +9,7 @@
#include "netdev-genl-gen.h"
#include <uapi/linux/netdev.h>
+#include <linux/list.h>
/* Integer value ranges */
static const struct netlink_range_validation netdev_a_page_pool_id_range = {
@@ -27,6 +28,11 @@ const struct nla_policy netdev_page_pool_info_nl_policy[NETDEV_A_PAGE_POOL_IFIND
[NETDEV_A_PAGE_POOL_IFINDEX] = NLA_POLICY_FULL_RANGE(NLA_U32, &netdev_a_page_pool_ifindex_range),
};
+const struct nla_policy netdev_queue_id_nl_policy[NETDEV_A_QUEUE_TYPE + 1] = {
+ [NETDEV_A_QUEUE_ID] = { .type = NLA_U32, },
+ [NETDEV_A_QUEUE_TYPE] = NLA_POLICY_MAX(NLA_U32, 1),
+};
+
/* NETDEV_CMD_DEV_GET - do */
static const struct nla_policy netdev_dev_get_nl_policy[NETDEV_A_DEV_IFINDEX + 1] = {
[NETDEV_A_DEV_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
@@ -74,6 +80,13 @@ static const struct nla_policy netdev_qstats_get_nl_policy[NETDEV_A_QSTATS_SCOPE
[NETDEV_A_QSTATS_SCOPE] = NLA_POLICY_MASK(NLA_UINT, 0x1),
};
+/* NETDEV_CMD_BIND_RX - do */
+static const struct nla_policy netdev_bind_rx_nl_policy[NETDEV_A_DMABUF_FD + 1] = {
+ [NETDEV_A_DMABUF_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+ [NETDEV_A_DMABUF_FD] = { .type = NLA_U32, },
+ [NETDEV_A_DMABUF_QUEUES] = NLA_POLICY_NESTED(netdev_queue_id_nl_policy),
+};
+
/* Ops table for netdev */
static const struct genl_split_ops netdev_nl_ops[] = {
{
@@ -151,6 +164,13 @@ static const struct genl_split_ops netdev_nl_ops[] = {
.maxattr = NETDEV_A_QSTATS_SCOPE,
.flags = GENL_CMD_CAP_DUMP,
},
+ {
+ .cmd = NETDEV_CMD_BIND_RX,
+ .doit = netdev_nl_bind_rx_doit,
+ .policy = netdev_bind_rx_nl_policy,
+ .maxattr = NETDEV_A_DMABUF_FD,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
};
static const struct genl_multicast_group netdev_nl_mcgrps[] = {
@@ -168,4 +188,7 @@ struct genl_family netdev_nl_family __ro_after_init = {
.n_split_ops = ARRAY_SIZE(netdev_nl_ops),
.mcgrps = netdev_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(netdev_nl_mcgrps),
+ .sock_priv_size = sizeof(struct list_head),
+ .sock_priv_init = (void *)netdev_nl_sock_priv_init,
+ .sock_priv_destroy = (void *)netdev_nl_sock_priv_destroy,
};
diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h
index 4db40fd5b4a9..8cda334fd042 100644
--- a/net/core/netdev-genl-gen.h
+++ b/net/core/netdev-genl-gen.h
@@ -10,9 +10,11 @@
#include <net/genetlink.h>
#include <uapi/linux/netdev.h>
+#include <linux/list.h>
/* Common nested types */
extern const struct nla_policy netdev_page_pool_info_nl_policy[NETDEV_A_PAGE_POOL_IFINDEX + 1];
+extern const struct nla_policy netdev_queue_id_nl_policy[NETDEV_A_QUEUE_TYPE + 1];
int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
@@ -30,6 +32,7 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info);
enum {
NETDEV_NLGRP_MGMT,
@@ -38,4 +41,7 @@ enum {
extern struct genl_family netdev_nl_family;
+void netdev_nl_sock_priv_init(struct list_head *priv);
+void netdev_nl_sock_priv_destroy(struct list_head *priv);
+
#endif /* _LINUX_NETDEV_GEN_H */
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 05f9515d2c05..1cb954f2d39e 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -3,16 +3,17 @@
#include <linux/netdevice.h>
#include <linux/notifier.h>
#include <linux/rtnetlink.h>
+#include <net/busy_poll.h>
#include <net/net_namespace.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
#include <net/sock.h>
#include <net/xdp.h>
#include <net/xdp_sock.h>
-#include <net/netdev_rx_queue.h>
-#include <net/netdev_queues.h>
-#include <net/busy_poll.h>
-#include "netdev-genl-gen.h"
#include "dev.h"
+#include "devmem.h"
+#include "netdev-genl-gen.h"
struct netdev_nl_dump_ctx {
unsigned long ifindex;
@@ -216,10 +217,12 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
napi = napi_by_id(napi_id);
- if (napi)
+ if (napi) {
err = netdev_nl_napi_fill_one(rsp, napi, info);
- else
- err = -EINVAL;
+ } else {
+ NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
+ err = -ENOENT;
+ }
rtnl_unlock();
@@ -292,6 +295,7 @@ static int
netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
u32 q_idx, u32 q_type, const struct genl_info *info)
{
+ struct net_devmem_dmabuf_binding *binding;
struct netdev_rx_queue *rxq;
struct netdev_queue *txq;
void *hdr;
@@ -311,6 +315,12 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
rxq->napi->napi_id))
goto nla_put_failure;
+
+ binding = rxq->mp_params.mp_priv;
+ if (binding &&
+ nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id))
+ goto nla_put_failure;
+
break;
case NETDEV_QUEUE_TYPE_TX:
txq = netdev_get_tx_queue(netdev, q_idx);
@@ -721,6 +731,129 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
return err;
}
+int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
+ struct net_devmem_dmabuf_binding *binding;
+ struct list_head *sock_binding_list;
+ u32 ifindex, dmabuf_fd, rxq_idx;
+ struct net_device *netdev;
+ struct sk_buff *rsp;
+ struct nlattr *attr;
+ int rem, err = 0;
+ void *hdr;
+
+ if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
+ GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) ||
+ GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES))
+ return -EINVAL;
+
+ ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
+ dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]);
+
+ sock_binding_list = genl_sk_priv_get(&netdev_nl_family,
+ NETLINK_CB(skb).sk);
+ if (IS_ERR(sock_binding_list))
+ return PTR_ERR(sock_binding_list);
+
+ rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_genlmsg_free;
+ }
+
+ rtnl_lock();
+
+ netdev = __dev_get_by_index(genl_info_net(info), ifindex);
+ if (!netdev || !netif_device_present(netdev)) {
+ err = -ENODEV;
+ goto err_unlock;
+ }
+
+ if (dev_xdp_prog_count(netdev)) {
+ NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached");
+ err = -EEXIST;
+ goto err_unlock;
+ }
+
+ binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack);
+ if (IS_ERR(binding)) {
+ err = PTR_ERR(binding);
+ goto err_unlock;
+ }
+
+ nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
+ genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ err = nla_parse_nested(
+ tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
+ netdev_queue_id_nl_policy, info->extack);
+ if (err < 0)
+ goto err_unbind;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
+ NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) {
+ err = -EINVAL;
+ goto err_unbind;
+ }
+
+ if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
+ NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
+ err = -EINVAL;
+ goto err_unbind;
+ }
+
+ rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
+
+ err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding,
+ info->extack);
+ if (err)
+ goto err_unbind;
+ }
+
+ list_add(&binding->list, sock_binding_list);
+
+ nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id);
+ genlmsg_end(rsp, hdr);
+
+ err = genlmsg_reply(rsp, info);
+ if (err)
+ goto err_unbind;
+
+ rtnl_unlock();
+
+ return 0;
+
+err_unbind:
+ net_devmem_unbind_dmabuf(binding);
+err_unlock:
+ rtnl_unlock();
+err_genlmsg_free:
+ nlmsg_free(rsp);
+ return err;
+}
+
+void netdev_nl_sock_priv_init(struct list_head *priv)
+{
+ INIT_LIST_HEAD(priv);
+}
+
+void netdev_nl_sock_priv_destroy(struct list_head *priv)
+{
+ struct net_devmem_dmabuf_binding *binding;
+ struct net_devmem_dmabuf_binding *temp;
+
+ list_for_each_entry_safe(binding, temp, priv, list) {
+ rtnl_lock();
+ net_devmem_unbind_dmabuf(binding);
+ rtnl_unlock();
+ }
+}
+
static int netdev_genl_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c
new file mode 100644
index 000000000000..e217a5838c87
--- /dev/null
+++ b/net/core/netdev_rx_queue.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/netdevice.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+
+#include "page_pool_priv.h"
+
+int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
+{
+ struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
+ void *new_mem, *old_mem;
+ int err;
+
+ if (!dev->queue_mgmt_ops || !dev->queue_mgmt_ops->ndo_queue_stop ||
+ !dev->queue_mgmt_ops->ndo_queue_mem_free ||
+ !dev->queue_mgmt_ops->ndo_queue_mem_alloc ||
+ !dev->queue_mgmt_ops->ndo_queue_start)
+ return -EOPNOTSUPP;
+
+ ASSERT_RTNL();
+
+ new_mem = kvzalloc(dev->queue_mgmt_ops->ndo_queue_mem_size, GFP_KERNEL);
+ if (!new_mem)
+ return -ENOMEM;
+
+ old_mem = kvzalloc(dev->queue_mgmt_ops->ndo_queue_mem_size, GFP_KERNEL);
+ if (!old_mem) {
+ err = -ENOMEM;
+ goto err_free_new_mem;
+ }
+
+ err = dev->queue_mgmt_ops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
+ if (err)
+ goto err_free_old_mem;
+
+ err = page_pool_check_memory_provider(dev, rxq);
+ if (err)
+ goto err_free_new_queue_mem;
+
+ err = dev->queue_mgmt_ops->ndo_queue_stop(dev, old_mem, rxq_idx);
+ if (err)
+ goto err_free_new_queue_mem;
+
+ err = dev->queue_mgmt_ops->ndo_queue_start(dev, new_mem, rxq_idx);
+ if (err)
+ goto err_start_queue;
+
+ dev->queue_mgmt_ops->ndo_queue_mem_free(dev, old_mem);
+
+ kvfree(old_mem);
+ kvfree(new_mem);
+
+ return 0;
+
+err_start_queue:
+ /* Restarting the queue with old_mem should be successful as we haven't
+ * changed any of the queue configuration, and there is not much we can
+ * do to recover from a failure here.
+ *
+ * WARN if we fail to recover the old rx queue, and at least free
+ * old_mem so we don't also leak that.
+ */
+ if (dev->queue_mgmt_ops->ndo_queue_start(dev, old_mem, rxq_idx)) {
+ WARN(1,
+ "Failed to restart old queue in error path. RX queue %d may be unhealthy.",
+ rxq_idx);
+ dev->queue_mgmt_ops->ndo_queue_mem_free(dev, old_mem);
+ }
+
+err_free_new_queue_mem:
+ dev->queue_mgmt_ops->ndo_queue_mem_free(dev, new_mem);
+
+err_free_old_mem:
+ kvfree(old_mem);
+
+err_free_new_mem:
+ kvfree(new_mem);
+
+ return err;
+}
diff --git a/net/core/netmem_priv.h b/net/core/netmem_priv.h
new file mode 100644
index 000000000000..7eadb8393e00
--- /dev/null
+++ b/net/core/netmem_priv.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NETMEM_PRIV_H
+#define __NETMEM_PRIV_H
+
+static inline unsigned long netmem_get_pp_magic(netmem_ref netmem)
+{
+ return __netmem_clear_lsb(netmem)->pp_magic;
+}
+
+static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic)
+{
+ __netmem_clear_lsb(netmem)->pp_magic |= pp_magic;
+}
+
+static inline void netmem_clear_pp_magic(netmem_ref netmem)
+{
+ __netmem_clear_lsb(netmem)->pp_magic = 0;
+}
+
+static inline void netmem_set_pp(netmem_ref netmem, struct page_pool *pool)
+{
+ __netmem_clear_lsb(netmem)->pp = pool;
+}
+
+static inline void netmem_set_dma_addr(netmem_ref netmem,
+ unsigned long dma_addr)
+{
+ __netmem_clear_lsb(netmem)->dma_addr = dma_addr;
+}
+#endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index d657b042d5a0..ca52cbe0f63c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -48,8 +48,6 @@
static struct sk_buff_head skb_pool;
-DEFINE_STATIC_SRCU(netpoll_srcu);
-
#define USEC_PER_POLL 50
#define MAX_SKB_SIZE \
@@ -162,7 +160,7 @@ static void poll_one_napi(struct napi_struct *napi)
if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
return;
- /* We explicilty pass the polling call a budget of 0 to
+ /* We explicitly pass the polling call a budget of 0 to
* indicate that we are clearing the Tx path only.
*/
work = napi->poll(napi, 0);
@@ -220,23 +218,20 @@ EXPORT_SYMBOL(netpoll_poll_dev);
void netpoll_poll_disable(struct net_device *dev)
{
struct netpoll_info *ni;
- int idx;
+
might_sleep();
- idx = srcu_read_lock(&netpoll_srcu);
- ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
+ ni = rtnl_dereference(dev->npinfo);
if (ni)
down(&ni->dev_lock);
- srcu_read_unlock(&netpoll_srcu, idx);
}
void netpoll_poll_enable(struct net_device *dev)
{
struct netpoll_info *ni;
- rcu_read_lock();
- ni = rcu_dereference(dev->npinfo);
+
+ ni = rtnl_dereference(dev->npinfo);
if (ni)
up(&ni->dev_lock);
- rcu_read_unlock();
}
static void refill_skbs(void)
@@ -624,12 +619,9 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
const struct net_device_ops *ops;
int err;
- np->dev = ndev;
- strscpy(np->dev_name, ndev->name, IFNAMSIZ);
-
if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
np_err(np, "%s doesn't support polling, aborting\n",
- np->dev_name);
+ ndev->name);
err = -ENOTSUPP;
goto out;
}
@@ -647,7 +639,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
refcount_set(&npinfo->refcnt, 1);
- ops = np->dev->netdev_ops;
+ ops = ndev->netdev_ops;
if (ops->ndo_netpoll_setup) {
err = ops->ndo_netpoll_setup(ndev, npinfo);
if (err)
@@ -658,6 +650,8 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
refcount_inc(&npinfo->refcnt);
}
+ np->dev = ndev;
+ strscpy(np->dev_name, ndev->name, IFNAMSIZ);
npinfo->netpoll = np;
/* last thing to do is link it to the net device structure */
@@ -675,6 +669,7 @@ EXPORT_SYMBOL_GPL(__netpoll_setup);
int netpoll_setup(struct netpoll *np)
{
struct net_device *ndev = NULL;
+ bool ip_overwritten = false;
struct in_device *in_dev;
int err;
@@ -739,6 +734,7 @@ put_noaddr:
}
np->local_ip.ip = ifa->ifa_local;
+ ip_overwritten = true;
np_info(np, "local IP %pI4\n", &np->local_ip.ip);
} else {
#if IS_ENABLED(CONFIG_IPV6)
@@ -755,6 +751,7 @@ put_noaddr:
!!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
continue;
np->local_ip.in6 = ifp->addr;
+ ip_overwritten = true;
err = 0;
break;
}
@@ -785,6 +782,9 @@ put_noaddr:
return 0;
put:
+ DEBUG_NET_WARN_ON_ONCE(np->dev);
+ if (ip_overwritten)
+ memset(&np->local_ip, 0, sizeof(np->local_ip));
netdev_put(ndev, &np->dev_tracker);
unlock:
rtnl_unlock();
@@ -824,8 +824,6 @@ void __netpoll_cleanup(struct netpoll *np)
if (!npinfo)
return;
- synchronize_srcu(&netpoll_srcu);
-
if (refcount_dec_and_test(&npinfo->refcnt)) {
const struct net_device_ops *ops;
@@ -851,14 +849,20 @@ void __netpoll_free(struct netpoll *np)
}
EXPORT_SYMBOL_GPL(__netpoll_free);
+void do_netpoll_cleanup(struct netpoll *np)
+{
+ __netpoll_cleanup(np);
+ netdev_put(np->dev, &np->dev_tracker);
+ np->dev = NULL;
+}
+EXPORT_SYMBOL(do_netpoll_cleanup);
+
void netpoll_cleanup(struct netpoll *np)
{
rtnl_lock();
if (!np->dev)
goto out;
- __netpoll_cleanup(np);
- netdev_put(np->dev, &np->dev_tracker);
- np->dev = NULL;
+ do_netpoll_cleanup(np);
out:
rtnl_unlock();
}
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 2abe6e919224..a813d30d2135 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/device.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
@@ -24,8 +25,12 @@
#include <trace/events/page_pool.h>
+#include "mp_dmabuf_devmem.h"
+#include "netmem_priv.h"
#include "page_pool_priv.h"
+DEFINE_STATIC_KEY_FALSE(page_pool_mem_providers);
+
#define DEFER_TIME (msecs_to_jiffies(1000))
#define DEFER_WARN_INTERVAL (60 * HZ)
@@ -187,6 +192,8 @@ static int page_pool_init(struct page_pool *pool,
int cpuid)
{
unsigned int ring_qsize = 1024; /* Default */
+ struct netdev_rx_queue *rxq;
+ int err;
page_pool_struct_check();
@@ -268,7 +275,37 @@ static int page_pool_init(struct page_pool *pool,
if (pool->dma_map)
get_device(pool->p.dev);
+ if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) {
+ /* We rely on rtnl_lock()ing to make sure netdev_rx_queue
+ * configuration doesn't change while we're initializing
+ * the page_pool.
+ */
+ ASSERT_RTNL();
+ rxq = __netif_get_rx_queue(pool->slow.netdev,
+ pool->slow.queue_idx);
+ pool->mp_priv = rxq->mp_params.mp_priv;
+ }
+
+ if (pool->mp_priv) {
+ err = mp_dmabuf_devmem_init(pool);
+ if (err) {
+ pr_warn("%s() mem-provider init failed %d\n", __func__,
+ err);
+ goto free_ptr_ring;
+ }
+
+ static_branch_inc(&page_pool_mem_providers);
+ }
+
return 0;
+
+free_ptr_ring:
+ ptr_ring_cleanup(&pool->ring, NULL);
+#ifdef CONFIG_PAGE_POOL_STATS
+ if (!pool->system)
+ free_percpu(pool->recycle_stats);
+#endif
+ return err;
}
static void page_pool_uninit(struct page_pool *pool)
@@ -358,7 +395,7 @@ static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
if (unlikely(!netmem))
break;
- if (likely(page_to_nid(netmem_to_page(netmem)) == pref_nid)) {
+ if (likely(netmem_is_pref_nid(netmem, pref_nid))) {
pool->alloc.cache[pool->alloc.count++] = netmem;
} else {
/* NUMA mismatch;
@@ -452,32 +489,6 @@ unmap_failed:
return false;
}
-static void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
-{
- struct page *page = netmem_to_page(netmem);
-
- page->pp = pool;
- page->pp_magic |= PP_SIGNATURE;
-
- /* Ensuring all pages have been split into one fragment initially:
- * page_pool_set_pp_info() is only called once for every page when it
- * is allocated from the page allocator and page_pool_fragment_page()
- * is dirtying the same cache line as the page->pp_magic above, so
- * the overhead is negligible.
- */
- page_pool_fragment_netmem(netmem, 1);
- if (pool->has_init_callback)
- pool->slow.init_callback(netmem, pool->slow.init_arg);
-}
-
-static void page_pool_clear_pp_info(netmem_ref netmem)
-{
- struct page *page = netmem_to_page(netmem);
-
- page->pp_magic = 0;
- page->pp = NULL;
-}
-
static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
gfp_t gfp)
{
@@ -573,7 +584,10 @@ netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp)
return netmem;
/* Slow-path: cache empty, do real allocation */
- netmem = __page_pool_alloc_pages_slow(pool, gfp);
+ if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv)
+ netmem = mp_dmabuf_devmem_alloc_netmems(pool, gfp);
+ else
+ netmem = __page_pool_alloc_pages_slow(pool, gfp);
return netmem;
}
EXPORT_SYMBOL(page_pool_alloc_netmem);
@@ -609,6 +623,28 @@ s32 page_pool_inflight(const struct page_pool *pool, bool strict)
return inflight;
}
+void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
+{
+ netmem_set_pp(netmem, pool);
+ netmem_or_pp_magic(netmem, PP_SIGNATURE);
+
+ /* Ensuring all pages have been split into one fragment initially:
+ * page_pool_set_pp_info() is only called once for every page when it
+ * is allocated from the page allocator and page_pool_fragment_page()
+ * is dirtying the same cache line as the page->pp_magic above, so
+ * the overhead is negligible.
+ */
+ page_pool_fragment_netmem(netmem, 1);
+ if (pool->has_init_callback)
+ pool->slow.init_callback(netmem, pool->slow.init_arg);
+}
+
+void page_pool_clear_pp_info(netmem_ref netmem)
+{
+ netmem_clear_pp_magic(netmem);
+ netmem_set_pp(netmem, NULL);
+}
+
static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
netmem_ref netmem)
{
@@ -637,8 +673,13 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
{
int count;
+ bool put;
- __page_pool_release_page_dma(pool, netmem);
+ put = true;
+ if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv)
+ put = mp_dmabuf_devmem_release_page(pool, netmem);
+ else
+ __page_pool_release_page_dma(pool, netmem);
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.
@@ -646,8 +687,10 @@ void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
trace_page_pool_state_release(pool, netmem, count);
- page_pool_clear_pp_info(netmem);
- put_page(netmem_to_page(netmem));
+ if (put) {
+ page_pool_clear_pp_info(netmem);
+ put_page(netmem_to_page(netmem));
+ }
/* An optimization would be to call __free_pages(page, pool->p.order)
* knowing page is not part of page-cache (thus avoiding a
* __page_cache_release() call).
@@ -692,8 +735,9 @@ static bool page_pool_recycle_in_cache(netmem_ref netmem,
static bool __page_pool_page_can_be_recycled(netmem_ref netmem)
{
- return page_ref_count(netmem_to_page(netmem)) == 1 &&
- !page_is_pfmemalloc(netmem_to_page(netmem));
+ return netmem_is_net_iov(netmem) ||
+ (page_ref_count(netmem_to_page(netmem)) == 1 &&
+ !page_is_pfmemalloc(netmem_to_page(netmem)));
}
/* If the page refcnt == 1, this will try to recycle the page.
@@ -728,6 +772,7 @@ __page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
/* Page found as candidate for recycling */
return netmem;
}
+
/* Fallback/non-XDP mode: API user have elevated refcnt.
*
* Many drivers split up the page into fragments, and some
@@ -949,7 +994,7 @@ static void page_pool_empty_ring(struct page_pool *pool)
/* Empty recycle ring */
while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) {
/* Verify the refcnt invariant of cached pages */
- if (!(page_ref_count(netmem_to_page(netmem)) == 1))
+ if (!(netmem_ref_count(netmem) == 1))
pr_crit("%s() page_pool refcnt %d violation\n",
__func__, netmem_ref_count(netmem));
@@ -964,6 +1009,12 @@ static void __page_pool_destroy(struct page_pool *pool)
page_pool_unlist(pool);
page_pool_uninit(pool);
+
+ if (pool->mp_priv) {
+ mp_dmabuf_devmem_destroy(pool);
+ static_branch_dec(&page_pool_mem_providers);
+ }
+
kfree(pool);
}
diff --git a/net/core/page_pool_priv.h b/net/core/page_pool_priv.h
index 90665d40f1eb..57439787b9c2 100644
--- a/net/core/page_pool_priv.h
+++ b/net/core/page_pool_priv.h
@@ -3,10 +3,56 @@
#ifndef __PAGE_POOL_PRIV_H
#define __PAGE_POOL_PRIV_H
+#include <net/page_pool/helpers.h>
+
+#include "netmem_priv.h"
+
s32 page_pool_inflight(const struct page_pool *pool, bool strict);
int page_pool_list(struct page_pool *pool);
void page_pool_detached(struct page_pool *pool);
void page_pool_unlist(struct page_pool *pool);
+static inline bool
+page_pool_set_dma_addr_netmem(netmem_ref netmem, dma_addr_t addr)
+{
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
+ netmem_set_dma_addr(netmem, addr >> PAGE_SHIFT);
+
+ /* We assume page alignment to shave off bottom bits,
+ * if this "compression" doesn't work we need to drop.
+ */
+ return addr != (dma_addr_t)netmem_get_dma_addr(netmem)
+ << PAGE_SHIFT;
+ }
+
+ netmem_set_dma_addr(netmem, addr);
+ return false;
+}
+
+static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+{
+ return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr);
+}
+
+#if defined(CONFIG_PAGE_POOL)
+void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem);
+void page_pool_clear_pp_info(netmem_ref netmem);
+int page_pool_check_memory_provider(struct net_device *dev,
+ struct netdev_rx_queue *rxq);
+#else
+static inline void page_pool_set_pp_info(struct page_pool *pool,
+ netmem_ref netmem)
+{
+}
+static inline void page_pool_clear_pp_info(netmem_ref netmem)
+{
+}
+static inline int page_pool_check_memory_provider(struct net_device *dev,
+ struct netdev_rx_queue *rxq)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index 3a3277ba167b..48335766c1bf 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -4,10 +4,12 @@
#include <linux/netdevice.h>
#include <linux/xarray.h>
#include <net/net_debug.h>
-#include <net/page_pool/types.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/helpers.h>
+#include <net/page_pool/types.h>
#include <net/sock.h>
+#include "devmem.h"
#include "page_pool_priv.h"
#include "netdev-genl-gen.h"
@@ -212,6 +214,7 @@ static int
page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
const struct genl_info *info)
{
+ struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
size_t inflight, refsz;
void *hdr;
@@ -241,6 +244,9 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
pool->user.detach_time))
goto err_cancel;
+ if (binding && nla_put_u32(rsp, NETDEV_A_PAGE_POOL_DMABUF, binding->id))
+ goto err_cancel;
+
genlmsg_end(rsp, hdr);
return 0;
@@ -344,6 +350,30 @@ void page_pool_unlist(struct page_pool *pool)
mutex_unlock(&page_pools_lock);
}
+int page_pool_check_memory_provider(struct net_device *dev,
+ struct netdev_rx_queue *rxq)
+{
+ struct net_devmem_dmabuf_binding *binding = rxq->mp_params.mp_priv;
+ struct page_pool *pool;
+ struct hlist_node *n;
+
+ if (!binding)
+ return 0;
+
+ mutex_lock(&page_pools_lock);
+ hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) {
+ if (pool->mp_priv != binding)
+ continue;
+
+ if (pool->slow.queue_idx == get_netdev_rx_queue_index(rxq)) {
+ mutex_unlock(&page_pools_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&page_pools_lock);
+ return -ENODATA;
+}
+
static void page_pool_unreg_netdev_wipe(struct net_device *netdev)
{
struct page_pool *pool;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 197a50ef8e2e..34f68ef74b8f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -69,7 +69,7 @@
*
* By design there should only be *one* "controlling" process. In practice
* multiple write accesses gives unpredictable result. Understood by "write"
- * to /proc gives result code thats should be read be the "writer".
+ * to /proc gives result code that should be read be the "writer".
* For practical use this should be no problem.
*
* Note when adding devices to a specific CPU there good idea to also assign
@@ -2371,11 +2371,11 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
if (pkt_dev->spi) {
/* We need as quick as possible to find the right SA
- * Searching with minimum criteria to archieve this.
+ * Searching with minimum criteria to achieve, this.
*/
x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET);
} else {
- /* slow path: we dont already have xfrm_state */
+ /* slow path: we don't already have xfrm_state */
x = xfrm_stateonly_find(pn->net, DUMMY_MARK, 0,
(xfrm_address_t *)&pkt_dev->cur_daddr,
(xfrm_address_t *)&pkt_dev->cur_saddr,
@@ -3838,8 +3838,8 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->ipsmode = XFRM_MODE_TRANSPORT;
pkt_dev->ipsproto = IPPROTO_ESP;
- /* xfrm tunnel mode needs additional dst to extract outter
- * ip header protocol/ttl/id field, here creat a phony one.
+ /* xfrm tunnel mode needs additional dst to extract outer
+ * ip header protocol/ttl/id field, here create a phony one.
* instead of looking for a valid rt, which definitely hurting
* performance under such circumstance.
*/
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 73fd7f543fd0..f0a520987085 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2724,7 +2724,7 @@ static int do_set_proto_down(struct net_device *dev,
bool proto_down;
int err;
- if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
+ if (!dev->change_proto_down) {
NL_SET_ERR_MSG(extack, "Protodown not supported by device");
return -EOPNOTSUPP;
}
@@ -4087,8 +4087,7 @@ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
}
return skb;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+ rtnl_set_sk_err(net, RTNLGRP_LINK, err);
return NULL;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 83f8cd8aa2d1..74149dc4ee31 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -88,6 +88,7 @@
#include <linux/textsearch.h>
#include "dev.h"
+#include "netmem_priv.h"
#include "sock_destructor.h"
#ifdef CONFIG_SKB_EXTENSIONS
@@ -314,8 +315,8 @@ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
fragsz = SKB_DATA_ALIGN(fragsz);
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
- data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
- align_mask);
+ data = __page_frag_alloc_align(&nc->page, fragsz,
+ GFP_ATOMIC | __GFP_NOWARN, align_mask);
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
return data;
@@ -330,7 +331,8 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
fragsz = SKB_DATA_ALIGN(fragsz);
- data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC,
+ data = __page_frag_alloc_align(nc, fragsz,
+ GFP_ATOMIC | __GFP_NOWARN,
align_mask);
} else {
local_bh_disable();
@@ -349,7 +351,7 @@ static struct sk_buff *napi_skb_cache_get(void)
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
if (unlikely(!nc->skb_count)) {
nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
- GFP_ATOMIC,
+ GFP_ATOMIC | __GFP_NOWARN,
NAPI_SKB_CACHE_BULK,
nc->skb_cache);
if (unlikely(!nc->skb_count)) {
@@ -418,7 +420,8 @@ struct sk_buff *slab_build_skb(void *data)
struct sk_buff *skb;
unsigned int size;
- skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
+ GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
@@ -469,7 +472,8 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb;
- skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
+ GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
@@ -917,9 +921,9 @@ static void skb_clone_fraglist(struct sk_buff *skb)
skb_get(list);
}
-static bool is_pp_page(struct page *page)
+static bool is_pp_netmem(netmem_ref netmem)
{
- return (page->pp_magic & ~0x3UL) == PP_SIGNATURE;
+ return (netmem_get_pp_magic(netmem) & ~0x3UL) == PP_SIGNATURE;
}
int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
@@ -1017,9 +1021,7 @@ EXPORT_SYMBOL(skb_cow_data_for_xdp);
#if IS_ENABLED(CONFIG_PAGE_POOL)
bool napi_pp_put_page(netmem_ref netmem)
{
- struct page *page = netmem_to_page(netmem);
-
- page = compound_head(page);
+ netmem = netmem_compound_head(netmem);
/* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
* in order to preserve any existing bits, such as bit 0 for the
@@ -1028,10 +1030,10 @@ bool napi_pp_put_page(netmem_ref netmem)
* and page_is_pfmemalloc() is checked in __page_pool_put_page()
* to avoid recycling the pfmemalloc page.
*/
- if (unlikely(!is_pp_page(page)))
+ if (unlikely(!is_pp_netmem(netmem)))
return false;
- page_pool_put_full_netmem(page->pp, page_to_netmem(page), false);
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false);
return true;
}
@@ -1058,7 +1060,7 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data)
static int skb_pp_frag_ref(struct sk_buff *skb)
{
struct skb_shared_info *shinfo;
- struct page *head_page;
+ netmem_ref head_netmem;
int i;
if (!skb->pp_recycle)
@@ -1067,11 +1069,11 @@ static int skb_pp_frag_ref(struct sk_buff *skb)
shinfo = skb_shinfo(skb);
for (i = 0; i < shinfo->nr_frags; i++) {
- head_page = compound_head(skb_frag_page(&shinfo->frags[i]));
- if (likely(is_pp_page(head_page)))
- page_pool_ref_page(head_page);
+ head_netmem = netmem_compound_head(shinfo->frags[i].netmem);
+ if (likely(is_pp_netmem(head_netmem)))
+ page_pool_ref_netmem(head_netmem);
else
- page_ref_inc(head_page);
+ page_ref_inc(netmem_to_page(head_netmem));
}
return 0;
}
@@ -1369,6 +1371,14 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
struct page *p;
u8 *vaddr;
+ if (skb_frag_is_net_iov(frag)) {
+ printk("%sskb frag %d: not readable\n", level, i);
+ len -= skb_frag_size(frag);
+ if (!len)
+ break;
+ continue;
+ }
+
skb_frag_foreach_page(frag, skb_frag_off(frag),
skb_frag_size(frag), p, p_off, p_len,
copied) {
@@ -1962,6 +1972,9 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
return -EINVAL;
+ if (!skb_frags_readable(skb))
+ return -EFAULT;
+
if (!num_frags)
goto release;
@@ -2135,6 +2148,9 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
unsigned int size;
int headerlen;
+ if (!skb_frags_readable(skb))
+ return NULL;
+
if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
return NULL;
@@ -2473,6 +2489,9 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
struct sk_buff *n;
int oldheadroom;
+ if (!skb_frags_readable(skb))
+ return NULL;
+
if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
return NULL;
@@ -2817,6 +2836,9 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
*/
int i, k, eat = (skb->tail + delta) - skb->end;
+ if (!skb_frags_readable(skb))
+ return NULL;
+
if (eat > 0 || skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
GFP_ATOMIC))
@@ -2970,6 +2992,9 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
to += copy;
}
+ if (!skb_frags_readable(skb))
+ goto fault;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
@@ -3158,9 +3183,15 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
/*
* then map the fragments
*/
+ if (!skb_frags_readable(skb))
+ return false;
+
for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
+ if (WARN_ON_ONCE(!skb_frag_page(f)))
+ return false;
+
if (__splice_segment(skb_frag_page(f),
skb_frag_off(f), skb_frag_size(f),
offset, len, spd, false, sk, pipe))
@@ -3378,6 +3409,9 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
from += copy;
}
+ if (!skb_frags_readable(skb))
+ goto fault;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int end;
@@ -3457,6 +3491,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
pos = copy;
}
+ if (WARN_ON_ONCE(!skb_frags_readable(skb)))
+ return 0;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -3557,6 +3594,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
pos = copy;
}
+ if (!skb_frags_readable(skb))
+ return 0;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
@@ -4048,6 +4088,7 @@ static inline void skb_split_inside_header(struct sk_buff *skb,
skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
+ skb1->unreadable = skb->unreadable;
skb_shinfo(skb)->nr_frags = 0;
skb1->data_len = skb->data_len;
skb1->len += skb1->data_len;
@@ -4095,6 +4136,8 @@ static inline void skb_split_no_header(struct sk_buff *skb,
pos += size;
}
skb_shinfo(skb1)->nr_frags = k;
+
+ skb1->unreadable = skb->unreadable;
}
/**
@@ -4169,8 +4212,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
/* Actual merge is delayed until the point when we know we can
* commit all, so that we don't have to undo partial changes
*/
- if (!to ||
- !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
+ if (!skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
skb_frag_off(fragfrom))) {
merge = -1;
} else {
@@ -4333,6 +4375,9 @@ next_skb:
return block_limit - abs_offset;
}
+ if (!skb_frags_readable(st->cur_skb))
+ return 0;
+
if (st->frag_idx == 0 && !st->frag_data)
st->stepped_offset += skb_headlen(st->cur_skb);
@@ -4409,6 +4454,41 @@ void skb_abort_seq_read(struct skb_seq_state *st)
}
EXPORT_SYMBOL(skb_abort_seq_read);
+/**
+ * skb_copy_seq_read() - copy from a skb_seq_state to a buffer
+ * @st: source skb_seq_state
+ * @offset: offset in source
+ * @to: destination buffer
+ * @len: number of bytes to copy
+ *
+ * Copy @len bytes from @offset bytes into the source @st to the destination
+ * buffer @to. `offset` should increase (or be unchanged) with each subsequent
+ * call to this function. If offset needs to decrease from the previous use `st`
+ * should be reset first.
+ *
+ * Return: 0 on success or -EINVAL if the copy ended early
+ */
+int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len)
+{
+ const u8 *data;
+ u32 sqlen;
+
+ for (;;) {
+ sqlen = skb_seq_read(offset, &data, st);
+ if (sqlen == 0)
+ return -EINVAL;
+ if (sqlen >= len) {
+ memcpy(to, data, len);
+ return 0;
+ }
+ memcpy(to, data, sqlen);
+ to += sqlen;
+ offset += sqlen;
+ len -= sqlen;
+ }
+}
+EXPORT_SYMBOL(skb_copy_seq_read);
+
#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
@@ -5161,7 +5241,7 @@ EXPORT_SYMBOL_GPL(skb_to_sgvec);
* 3. sg_unmark_end
* 4. skb_to_sgvec(payload2)
*
- * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
+ * When mapping multiple payload conditionally, skb_to_sgvec_nomark
* is more preferable.
*/
int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
@@ -5945,7 +6025,10 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
if (to->pp_recycle != from->pp_recycle)
return false;
- if (len <= skb_tailroom(to)) {
+ if (skb_frags_readable(from) != skb_frags_readable(to))
+ return false;
+
+ if (len <= skb_tailroom(to) && skb_frags_readable(from)) {
if (len)
BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
*delta_truesize = 0;
@@ -6019,7 +6102,7 @@ EXPORT_SYMBOL(skb_try_coalesce);
* @skb: buffer to clean
* @xnet: packet is crossing netns
*
- * skb_scrub_packet can be used after encapsulating or decapsulting a packet
+ * skb_scrub_packet can be used after encapsulating or decapsulating a packet
* into/from a tunnel. Some information have to be cleared during these
* operations.
* skb_scrub_packet can also be used to clean a skb before injecting it in
@@ -6122,6 +6205,9 @@ int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
if (!pskb_may_pull(skb, write_len))
return -ENOMEM;
+ if (!skb_frags_readable(skb))
+ return -EFAULT;
+
if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
return 0;
@@ -6241,7 +6327,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
return err;
skb->protocol = skb->vlan_proto;
- skb->mac_len += VLAN_HLEN;
+ skb->network_header -= VLAN_HLEN;
skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
}
@@ -6801,7 +6887,7 @@ void skb_condense(struct sk_buff *skb)
{
if (skb->data_len) {
if (skb->data_len > skb->end - skb->tail ||
- skb_cloned(skb))
+ skb_cloned(skb) || !skb_frags_readable(skb))
return;
/* Nice, we can free page frag(s) right now */
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index bbf40b999713..b1dcbd3be89e 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -293,7 +293,7 @@ out:
/* If we trim data a full sg elem before curr pointer update
* copybreak and current so that any future copy operations
* start at new copy location.
- * However trimed data that has not yet been used in a copy op
+ * However trimmed data that has not yet been used in a copy op
* does not require an update.
*/
if (!msg->sg.size) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 9abc4fe25953..fe87f9bd8f16 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -124,6 +124,7 @@
#include <linux/netdevice.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
+#include <linux/skbuff_ref.h>
#include <net/net_namespace.h>
#include <net/request_sock.h>
#include <net/sock.h>
@@ -1049,6 +1050,69 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
return 0;
}
+#ifdef CONFIG_PAGE_POOL
+
+/* This is the number of tokens that the user can SO_DEVMEM_DONTNEED in
+ * 1 syscall. The limit exists to limit the amount of memory the kernel
+ * allocates to copy these tokens.
+ */
+#define MAX_DONTNEED_TOKENS 128
+
+static noinline_for_stack int
+sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen)
+{
+ unsigned int num_tokens, i, j, k, netmem_num = 0;
+ struct dmabuf_token *tokens;
+ netmem_ref netmems[16];
+ int ret = 0;
+
+ if (!sk_is_tcp(sk))
+ return -EBADF;
+
+ if (optlen % sizeof(struct dmabuf_token) ||
+ optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS)
+ return -EINVAL;
+
+ tokens = kvmalloc_array(optlen, sizeof(*tokens), GFP_KERNEL);
+ if (!tokens)
+ return -ENOMEM;
+
+ num_tokens = optlen / sizeof(struct dmabuf_token);
+ if (copy_from_sockptr(tokens, optval, optlen)) {
+ kvfree(tokens);
+ return -EFAULT;
+ }
+
+ xa_lock_bh(&sk->sk_user_frags);
+ for (i = 0; i < num_tokens; i++) {
+ for (j = 0; j < tokens[i].token_count; j++) {
+ netmem_ref netmem = (__force netmem_ref)__xa_erase(
+ &sk->sk_user_frags, tokens[i].token_start + j);
+
+ if (netmem &&
+ !WARN_ON_ONCE(!netmem_is_net_iov(netmem))) {
+ netmems[netmem_num++] = netmem;
+ if (netmem_num == ARRAY_SIZE(netmems)) {
+ xa_unlock_bh(&sk->sk_user_frags);
+ for (k = 0; k < netmem_num; k++)
+ WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));
+ netmem_num = 0;
+ xa_lock_bh(&sk->sk_user_frags);
+ }
+ ret++;
+ }
+ }
+ }
+
+ xa_unlock_bh(&sk->sk_user_frags);
+ for (k = 0; k < netmem_num; k++)
+ WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));
+
+ kvfree(tokens);
+ return ret;
+}
+#endif
+
void sockopt_lock_sock(struct sock *sk)
{
/* When current->bpf_ctx is set, the setsockopt is called from
@@ -1211,6 +1275,10 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
ret = -EOPNOTSUPP;
return ret;
}
+#ifdef CONFIG_PAGE_POOL
+ case SO_DEVMEM_DONTNEED:
+ return sock_devmem_dontneed(sk, optval, optlen);
+#endif
}
sockopt_lock_sock(sk);
@@ -2048,7 +2116,7 @@ static inline void sock_lock_init(struct sock *sk)
/*
* Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
- * even temporarly, because of RCU lookups. sk_node should also be left as is.
+ * even temporarily, because of RCU lookups. sk_node should also be left as is.
* We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
*/
static void sock_copy(struct sock *nsk, const struct sock *osk)
@@ -2538,7 +2606,7 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
skb_set_hash_from_sk(skb, sk);
/*
* We used to take a refcount on sk, but following operation
- * is enough to guarantee sk_free() wont free this sock until
+ * is enough to guarantee sk_free() won't free this sock until
* all in-flight packets are completed
*/
refcount_add(skb->truesize, &sk->sk_wmem_alloc);
@@ -3429,7 +3497,7 @@ static void sock_def_destruct(struct sock *sk)
void sk_send_sigurg(struct sock *sk)
{
if (sk->sk_socket && sk->sk_socket->file)
- if (send_sigurg(&sk->sk_socket->file->f_owner))
+ if (send_sigurg(sk->sk_socket->file))
sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
}
EXPORT_SYMBOL(sk_send_sigurg);
@@ -3697,7 +3765,7 @@ EXPORT_SYMBOL(sock_recv_errqueue);
*
* FIX: POSIX 1003.1g is very ambiguous here. It states that
* asynchronous errors should be reported by getsockopt. We assume
- * this means if you specify SO_ERROR (otherwise whats the point of it).
+ * this means if you specify SO_ERROR (otherwise what is the point of it).
*/
int sock_common_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index d3dbb92153f2..242c91a6e3d3 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -67,46 +67,39 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
{
- u32 ufd = attr->target_fd;
struct bpf_map *map;
- struct fd f;
int ret;
if (attr->attach_flags || attr->replace_bpf_fd)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->target_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
mutex_lock(&sockmap_mutex);
ret = sock_map_prog_update(map, prog, NULL, NULL, attr->attach_type);
mutex_unlock(&sockmap_mutex);
- fdput(f);
return ret;
}
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
{
- u32 ufd = attr->target_fd;
struct bpf_prog *prog;
struct bpf_map *map;
- struct fd f;
int ret;
if (attr->attach_flags || attr->replace_bpf_fd)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->target_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
prog = bpf_prog_get(attr->attach_bpf_fd);
- if (IS_ERR(prog)) {
- ret = PTR_ERR(prog);
- goto put_map;
- }
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
if (prog->type != ptype) {
ret = -EINVAL;
@@ -118,8 +111,6 @@ int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
mutex_unlock(&sockmap_mutex);
put_prog:
bpf_prog_put(prog);
-put_map:
- fdput(f);
return ret;
}
@@ -1183,6 +1174,7 @@ static void sock_hash_free(struct bpf_map *map)
sock_put(elem->sk);
sock_hash_free_elem(htab, elem);
}
+ cond_resched();
}
/* wait for psock readers accessing its map link */
@@ -1550,18 +1542,17 @@ int sock_map_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
- u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd;
+ u32 prog_cnt = 0, flags = 0;
struct bpf_prog **pprog;
struct bpf_prog *prog;
struct bpf_map *map;
- struct fd f;
u32 id = 0;
int ret;
if (attr->query.query_flags)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->target_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -1593,7 +1584,6 @@ end:
copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
ret = -EFAULT;
- fdput(f);
return ret;
}
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index 5a165286e4d8..4211710393a8 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -173,10 +173,9 @@ static bool __reuseport_detach_closed_sock(struct sock *sk,
static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
{
- unsigned int size = sizeof(struct sock_reuseport) +
- sizeof(struct sock *) * max_socks;
- struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
+ struct sock_reuseport *reuse;
+ reuse = kzalloc(struct_size(reuse, socks, max_socks), GFP_ATOMIC);
if (!reuse)
return NULL;
diff --git a/net/core/utils.c b/net/core/utils.c
index c994e95172ac..27f4cffaae05 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Generic address resultion entity
+ * Generic address resolution entity
*
* Authors:
* net_random Alan Cox
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index ee7b272ab715..281bbac5539d 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -111,9 +111,10 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
* DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
* ---------------------------------------------------------------------------
* tag0 : zero-based value represents port
- * (eg, 0x00=port1, 0x02=port3, 0x06=port7)
+ * (eg, 0x0=port1, 0x2=port3, 0x3=port4)
*/
+#define KSZ8795_TAIL_TAG_EG_PORT_M GENMASK(1, 0)
#define KSZ8795_TAIL_TAG_OVERRIDE BIT(6)
#define KSZ8795_TAIL_TAG_LOOKUP BIT(7)
@@ -141,7 +142,8 @@ static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev)
{
u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
- return ksz_common_rcv(skb, dev, tag[0] & 7, KSZ_EGRESS_TAG_LEN);
+ return ksz_common_rcv(skb, dev, tag[0] & KSZ8795_TAIL_TAG_EG_PORT_M,
+ KSZ_EGRESS_TAG_LEN);
}
static const struct dsa_device_ops ksz8795_netdev_ops = {
@@ -176,8 +178,9 @@ MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ8795, KSZ8795_NAME);
#define KSZ9477_INGRESS_TAG_LEN 2
#define KSZ9477_PTP_TAG_LEN 4
-#define KSZ9477_PTP_TAG_INDICATION 0x80
+#define KSZ9477_PTP_TAG_INDICATION BIT(7)
+#define KSZ9477_TAIL_TAG_EG_PORT_M GENMASK(2, 0)
#define KSZ9477_TAIL_TAG_PRIO GENMASK(8, 7)
#define KSZ9477_TAIL_TAG_OVERRIDE BIT(9)
#define KSZ9477_TAIL_TAG_LOOKUP BIT(10)
@@ -310,7 +313,7 @@ static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev)
{
/* Tag decoding */
u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
- unsigned int port = tag[0] & 7;
+ unsigned int port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
unsigned int len = KSZ_EGRESS_TAG_LEN;
/* Extra 4-bytes PTP timestamp */
diff --git a/net/dsa/user.c b/net/dsa/user.c
index f5adfa1d978a..74eda9b30608 100644
--- a/net/dsa/user.c
+++ b/net/dsa/user.c
@@ -2642,11 +2642,12 @@ void dsa_user_setup_tagger(struct net_device *user)
user->features = conduit->vlan_features | NETIF_F_HW_TC;
user->hw_features |= NETIF_F_HW_TC;
- user->features |= NETIF_F_LLTX;
if (user->needed_tailroom)
user->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
if (ds->needs_standalone_vlan_filtering)
user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ user->lltx = true;
}
int dsa_user_suspend(struct net_device *user_dev)
diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile
index 9a190635fe95..9b540644ba31 100644
--- a/net/ethtool/Makefile
+++ b/net/ethtool/Makefile
@@ -8,4 +8,5 @@ ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o rss.o \
linkstate.o debug.o wol.o features.o privflags.o rings.o \
channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \
tunnels.o fec.o eeprom.o stats.o phc_vclocks.o mm.o \
- module.o cmis_fw_update.o cmis_cdb.o pse-pd.o plca.o mm.o
+ module.o cmis_fw_update.o cmis_cdb.o pse-pd.o plca.o mm.o \
+ phy.o
diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c
index f6f136ec7ddf..f22051f33868 100644
--- a/net/ethtool/cabletest.c
+++ b/net/ethtool/cabletest.c
@@ -13,7 +13,7 @@
const struct nla_policy ethnl_cable_test_act_policy[] = {
[ETHTOOL_A_CABLE_TEST_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_phy),
};
static int ethnl_cable_test_started(struct phy_device *phydev, u8 cmd)
@@ -58,6 +58,7 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info)
struct ethnl_req_info req_info = {};
const struct ethtool_phy_ops *ops;
struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
struct net_device *dev;
int ret;
@@ -69,12 +70,16 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info)
return ret;
dev = req_info.dev;
- if (!dev->phydev) {
+
+ rtnl_lock();
+ phydev = ethnl_req_get_phydev(&req_info,
+ tb[ETHTOOL_A_CABLE_TEST_HEADER],
+ info->extack);
+ if (IS_ERR_OR_NULL(phydev)) {
ret = -EOPNOTSUPP;
- goto out_dev_put;
+ goto out_rtnl;
}
- rtnl_lock();
ops = ethtool_phy_ops;
if (!ops || !ops->start_cable_test) {
ret = -EOPNOTSUPP;
@@ -85,17 +90,15 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info)
if (ret < 0)
goto out_rtnl;
- ret = ops->start_cable_test(dev->phydev, info->extack);
+ ret = ops->start_cable_test(phydev, info->extack);
ethnl_ops_complete(dev);
if (!ret)
- ethnl_cable_test_started(dev->phydev,
- ETHTOOL_MSG_CABLE_TEST_NTF);
+ ethnl_cable_test_started(phydev, ETHTOOL_MSG_CABLE_TEST_NTF);
out_rtnl:
rtnl_unlock();
-out_dev_put:
ethnl_parse_header_dev_put(&req_info);
return ret;
}
@@ -160,7 +163,8 @@ void ethnl_cable_test_finished(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(ethnl_cable_test_finished);
-int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, u8 result)
+int ethnl_cable_test_result_with_src(struct phy_device *phydev, u8 pair,
+ u8 result, u32 src)
{
struct nlattr *nest;
int ret = -EMSGSIZE;
@@ -173,6 +177,10 @@ int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, u8 result)
goto err;
if (nla_put_u8(phydev->skb, ETHTOOL_A_CABLE_RESULT_CODE, result))
goto err;
+ if (src != ETHTOOL_A_CABLE_INF_SRC_UNSPEC) {
+ if (nla_put_u32(phydev->skb, ETHTOOL_A_CABLE_RESULT_SRC, src))
+ goto err;
+ }
nla_nest_end(phydev->skb, nest);
return 0;
@@ -181,9 +189,10 @@ err:
nla_nest_cancel(phydev->skb, nest);
return ret;
}
-EXPORT_SYMBOL_GPL(ethnl_cable_test_result);
+EXPORT_SYMBOL_GPL(ethnl_cable_test_result_with_src);
-int ethnl_cable_test_fault_length(struct phy_device *phydev, u8 pair, u32 cm)
+int ethnl_cable_test_fault_length_with_src(struct phy_device *phydev, u8 pair,
+ u32 cm, u32 src)
{
struct nlattr *nest;
int ret = -EMSGSIZE;
@@ -197,6 +206,11 @@ int ethnl_cable_test_fault_length(struct phy_device *phydev, u8 pair, u32 cm)
goto err;
if (nla_put_u32(phydev->skb, ETHTOOL_A_CABLE_FAULT_LENGTH_CM, cm))
goto err;
+ if (src != ETHTOOL_A_CABLE_INF_SRC_UNSPEC) {
+ if (nla_put_u32(phydev->skb, ETHTOOL_A_CABLE_FAULT_LENGTH_SRC,
+ src))
+ goto err;
+ }
nla_nest_end(phydev->skb, nest);
return 0;
@@ -205,7 +219,7 @@ err:
nla_nest_cancel(phydev->skb, nest);
return ret;
}
-EXPORT_SYMBOL_GPL(ethnl_cable_test_fault_length);
+EXPORT_SYMBOL_GPL(ethnl_cable_test_fault_length_with_src);
static const struct nla_policy cable_test_tdr_act_cfg_policy[] = {
[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST] = { .type = NLA_U32 },
@@ -216,7 +230,7 @@ static const struct nla_policy cable_test_tdr_act_cfg_policy[] = {
const struct nla_policy ethnl_cable_test_tdr_act_policy[] = {
[ETHTOOL_A_CABLE_TEST_TDR_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_phy),
[ETHTOOL_A_CABLE_TEST_TDR_CFG] = { .type = NLA_NESTED },
};
@@ -305,6 +319,7 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
struct ethnl_req_info req_info = {};
const struct ethtool_phy_ops *ops;
struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
struct phy_tdr_config cfg;
struct net_device *dev;
int ret;
@@ -317,10 +332,6 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
return ret;
dev = req_info.dev;
- if (!dev->phydev) {
- ret = -EOPNOTSUPP;
- goto out_dev_put;
- }
ret = ethnl_act_cable_test_tdr_cfg(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG],
info, &cfg);
@@ -328,6 +339,14 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
goto out_dev_put;
rtnl_lock();
+ phydev = ethnl_req_get_phydev(&req_info,
+ tb[ETHTOOL_A_CABLE_TEST_TDR_HEADER],
+ info->extack);
+ if (IS_ERR_OR_NULL(phydev)) {
+ ret = -EOPNOTSUPP;
+ goto out_rtnl;
+ }
+
ops = ethtool_phy_ops;
if (!ops || !ops->start_cable_test_tdr) {
ret = -EOPNOTSUPP;
@@ -338,12 +357,12 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
if (ret < 0)
goto out_rtnl;
- ret = ops->start_cable_test_tdr(dev->phydev, info->extack, &cfg);
+ ret = ops->start_cable_test_tdr(phydev, info->extack, &cfg);
ethnl_ops_complete(dev);
if (!ret)
- ethnl_cable_test_started(dev->phydev,
+ ethnl_cable_test_started(phydev,
ETHTOOL_MSG_CABLE_TEST_TDR_NTF);
out_rtnl:
diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
index cee188da54f8..ca4f80282448 100644
--- a/net/ethtool/channels.c
+++ b/net/ethtool/channels.c
@@ -114,8 +114,7 @@ ethnl_set_channels(struct ethnl_req_info *req_info, struct genl_info *info)
struct net_device *dev = req_info->dev;
struct ethtool_channels channels = {};
struct nlattr **tb = info->attrs;
- u32 err_attr, max_rxfh_in_use;
- u64 max_rxnfc_in_use;
+ u32 err_attr;
int ret;
dev->ethtool_ops->get_channels(dev, &channels);
@@ -166,20 +165,9 @@ ethnl_set_channels(struct ethnl_req_info *req_info, struct genl_info *info)
return -EINVAL;
}
- /* ensure the new Rx count fits within the configured Rx flow
- * indirection table/rxnfc settings
- */
- if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use))
- max_rxnfc_in_use = 0;
- max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev);
- if (channels.combined_count + channels.rx_count <= max_rxfh_in_use) {
- GENL_SET_ERR_MSG_FMT(info, "requested channel counts are too low for existing indirection table (%d)", max_rxfh_in_use);
- return -EINVAL;
- }
- if (channels.combined_count + channels.rx_count <= max_rxnfc_in_use) {
- GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing ntuple filter settings");
- return -EINVAL;
- }
+ ret = ethtool_check_max_channel(dev, channels, info);
+ if (ret)
+ return ret;
/* Disabling channels, query zero-copy AF_XDP sockets */
from_channel = channels.combined_count +
diff --git a/net/ethtool/cmis.h b/net/ethtool/cmis.h
index e71cc3e1b7eb..3e7c293af78c 100644
--- a/net/ethtool/cmis.h
+++ b/net/ethtool/cmis.h
@@ -108,7 +108,6 @@ void ethtool_cmis_cdb_check_completion_flag(u8 cmis_rev, u8 *flags);
void ethtool_cmis_page_init(struct ethtool_module_eeprom *page_data,
u8 page, u32 offset, u32 length);
-void ethtool_cmis_page_fini(struct ethtool_module_eeprom *page_data);
struct ethtool_cmis_cdb *
ethtool_cmis_cdb_init(struct net_device *dev,
diff --git a/net/ethtool/cmis_cdb.c b/net/ethtool/cmis_cdb.c
index 1bb08783b60d..4d5581147952 100644
--- a/net/ethtool/cmis_cdb.c
+++ b/net/ethtool/cmis_cdb.c
@@ -100,7 +100,8 @@ static u8 cmis_cdb_advert_rpl_inst_supported(struct cmis_cdb_advert_rpl *rpl)
}
static int cmis_cdb_advertisement_get(struct ethtool_cmis_cdb *cdb,
- struct net_device *dev)
+ struct net_device *dev,
+ struct ethnl_module_fw_flash_ntf_params *ntf_params)
{
const struct ethtool_ops *ops = dev->ethtool_ops;
struct ethtool_module_eeprom page_data = {};
@@ -119,8 +120,12 @@ static int cmis_cdb_advertisement_get(struct ethtool_cmis_cdb *cdb,
return err;
}
- if (!cmis_cdb_advert_rpl_inst_supported(&rpl))
+ if (!cmis_cdb_advert_rpl_inst_supported(&rpl)) {
+ ethnl_module_fw_flash_ntf_err(dev, ntf_params,
+ "CDB functionality is not supported",
+ NULL);
return -EOPNOTSUPP;
+ }
cdb->read_write_len_ext = rpl.read_write_len_ext;
@@ -282,7 +287,7 @@ ethtool_cmis_cdb_init(struct net_device *dev,
goto err;
}
- err = cmis_cdb_advertisement_get(cdb, dev);
+ err = cmis_cdb_advertisement_get(cdb, dev, ntf_params);
if (err < 0)
goto err;
@@ -444,6 +449,9 @@ static void cmis_cdb_status_fail_msg_get(u8 status, char **err_msg)
case 0b01000101:
*err_msg = "CDB status failed: CdbChkCode error";
break;
+ case 0b01000110:
+ *err_msg = "CDB status failed: Password error";
+ break;
default:
*err_msg = "Unknown failure reason";
}
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 07032babd1b6..dd345efa114b 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -6,6 +6,7 @@
#include <linux/rtnetlink.h>
#include <linux/ptp_clock_kernel.h>
+#include "netlink.h"
#include "common.h"
const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
@@ -24,8 +25,6 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
[NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter",
[NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged",
[NETIF_F_GSO_BIT] = "tx-generic-segmentation",
- [NETIF_F_LLTX_BIT] = "tx-lockless",
- [NETIF_F_NETNS_LOCAL_BIT] = "netns-local",
[NETIF_F_GRO_BIT] = "rx-gro",
[NETIF_F_GRO_HW_BIT] = "rx-gro-hw",
[NETIF_F_LRO_BIT] = "rx-lro",
@@ -51,7 +50,6 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
[NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
[NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp",
- [NETIF_F_FCOE_MTU_BIT] = "fcoe-mtu",
[NETIF_F_NTUPLE_BIT] = "rx-ntuple-filter",
[NETIF_F_RXHASH_BIT] = "rx-hashing",
[NETIF_F_RXCSUM_BIT] = "rx-checksum",
@@ -429,6 +427,7 @@ const char sof_timestamping_names[][ETH_GSTRING_LEN] = {
[const_ilog2(SOF_TIMESTAMPING_OPT_TX_SWHW)] = "option-tx-swhw",
[const_ilog2(SOF_TIMESTAMPING_BIND_PHC)] = "bind-phc",
[const_ilog2(SOF_TIMESTAMPING_OPT_ID_TCP)] = "option-id-tcp",
+ [const_ilog2(SOF_TIMESTAMPING_OPT_RX_FILTER)] = "option-rx-filter",
};
static_assert(ARRAY_SIZE(sof_timestamping_names) == __SOF_TIMESTAMPING_CNT);
@@ -539,7 +538,7 @@ static int ethtool_get_rxnfc_rule_count(struct net_device *dev)
return info.rule_cnt;
}
-int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max)
+static int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max)
{
const struct ethtool_ops *ops = dev->ethtool_ops;
struct ethtool_rxnfc *info;
@@ -609,7 +608,7 @@ static u32 ethtool_get_max_rss_ctx_channel(struct net_device *dev)
return max_ring;
}
-u32 ethtool_get_max_rxfh_channel(struct net_device *dev)
+static u32 ethtool_get_max_rxfh_channel(struct net_device *dev)
{
struct ethtool_rxfh_param rxfh = {};
u32 dev_size, current_max;
@@ -650,10 +649,47 @@ out_free:
return current_max;
}
+int ethtool_check_max_channel(struct net_device *dev,
+ struct ethtool_channels channels,
+ struct genl_info *info)
+{
+ u64 max_rxnfc_in_use;
+ u32 max_rxfh_in_use;
+ int max_mp_in_use;
+
+ /* ensure the new Rx count fits within the configured Rx flow
+ * indirection table/rxnfc settings
+ */
+ if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use))
+ max_rxnfc_in_use = 0;
+ max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev);
+ if (channels.combined_count + channels.rx_count <= max_rxfh_in_use) {
+ if (info)
+ GENL_SET_ERR_MSG_FMT(info, "requested channel counts are too low for existing indirection table (%d)", max_rxfh_in_use);
+ return -EINVAL;
+ }
+ if (channels.combined_count + channels.rx_count <= max_rxnfc_in_use) {
+ if (info)
+ GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing ntuple filter settings");
+ return -EINVAL;
+ }
+
+ max_mp_in_use = dev_get_min_mp_channel_count(dev);
+ if (channels.combined_count + channels.rx_count <= max_mp_in_use) {
+ if (info)
+ GENL_SET_ERR_MSG_FMT(info, "requested channel counts are too low for existing memory provider setting (%d)", max_mp_in_use);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int ethtool_check_ops(const struct ethtool_ops *ops)
{
if (WARN_ON(ops->set_coalesce && !ops->supported_coalesce_params))
return -EINVAL;
+ if (WARN_ON(ops->rxfh_max_num_contexts == 1))
+ return -EINVAL;
/* NOTE: sufficiently insane drivers may swap ethtool_ops at runtime,
* the fact that ops are checked at registration time does not
* mean the ops attached to a netdev later on are sane.
@@ -665,20 +701,21 @@ int __ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info
{
const struct ethtool_ops *ops = dev->ethtool_ops;
struct phy_device *phydev = dev->phydev;
+ int err = 0;
memset(info, 0, sizeof(*info));
info->cmd = ETHTOOL_GET_TS_INFO;
+ info->phc_index = -1;
if (phy_is_default_hwtstamp(phydev) && phy_has_tsinfo(phydev))
- return phy_ts_info(phydev, info);
- if (ops->get_ts_info)
- return ops->get_ts_info(dev, info);
+ err = phy_ts_info(phydev, info);
+ else if (ops->get_ts_info)
+ err = ops->get_ts_info(dev, info);
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
+ info->so_timestamping |= SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
- return 0;
+ return err;
}
int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index)
diff --git a/net/ethtool/common.h b/net/ethtool/common.h
index 863806fcf01a..d55d5201b085 100644
--- a/net/ethtool/common.h
+++ b/net/ethtool/common.h
@@ -20,6 +20,8 @@ struct link_mode_info {
u8 duplex;
};
+struct genl_info;
+
extern const char
netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN];
extern const char
@@ -42,8 +44,9 @@ int __ethtool_get_link(struct net_device *dev);
bool convert_legacy_settings_to_link_ksettings(
struct ethtool_link_ksettings *link_ksettings,
const struct ethtool_cmd *legacy_settings);
-u32 ethtool_get_max_rxfh_channel(struct net_device *dev);
-int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max);
+int ethtool_check_max_channel(struct net_device *dev,
+ struct ethtool_channels channels,
+ struct genl_info *info);
int __ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info);
extern const struct ethtool_phy_ops *ethtool_phy_ops;
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index ae041f51cd2d..65cfe76dafbe 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -1230,7 +1230,8 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32)
return -EINVAL;
/* Most drivers don't handle rss_context, check it's 0 as well */
- if (rxfh.rss_context && !ops->cap_rss_ctx_supported)
+ if (rxfh.rss_context && !(ops->cap_rss_ctx_supported ||
+ ops->create_rxfh_context))
return -EOPNOTSUPP;
rxfh.indir_size = rxfh_dev.indir_size;
@@ -1263,10 +1264,15 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
if (rxfh_dev.indir)
memcpy(rxfh_dev.indir, ethtool_rxfh_context_indir(ctx),
indir_bytes);
- if (rxfh_dev.key)
- memcpy(rxfh_dev.key, ethtool_rxfh_context_key(ctx),
- user_key_size);
- rxfh_dev.hfunc = ctx->hfunc;
+ if (!ops->rxfh_per_ctx_key) {
+ rxfh_dev.key_size = 0;
+ } else {
+ if (rxfh_dev.key)
+ memcpy(rxfh_dev.key,
+ ethtool_rxfh_context_key(ctx),
+ user_key_size);
+ rxfh_dev.hfunc = ctx->hfunc;
+ }
rxfh_dev.input_xfrm = ctx->input_xfrm;
ret = 0;
} else {
@@ -1284,6 +1290,11 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
sizeof(rxfh.input_xfrm))) {
ret = -EFAULT;
} else if (copy_to_user(useraddr +
+ offsetof(struct ethtool_rxfh, key_size),
+ &rxfh_dev.key_size,
+ sizeof(rxfh.key_size))) {
+ ret = -EFAULT;
+ } else if (copy_to_user(useraddr +
offsetof(struct ethtool_rxfh, rss_config[0]),
rss_config, total_size)) {
ret = -EFAULT;
@@ -1360,7 +1371,8 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32)
return -EINVAL;
/* Most drivers don't handle rss_context, check it's 0 as well */
- if (rxfh.rss_context && !ops->cap_rss_ctx_supported)
+ if (rxfh.rss_context && !(ops->cap_rss_ctx_supported ||
+ ops->create_rxfh_context))
return -EOPNOTSUPP;
/* Check input data transformation capabilities */
if (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_SYM_XOR &&
@@ -1390,6 +1402,13 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]);
+ /* Check settings which may be global rather than per RSS-context */
+ if (rxfh.rss_context && !ops->rxfh_per_ctx_key)
+ if (rxfh.key_size ||
+ (rxfh.hfunc && rxfh.hfunc != ETH_RSS_HASH_NO_CHANGE) ||
+ (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_NO_CHANGE))
+ return -EOPNOTSUPP;
+
rss_config = kzalloc(indir_bytes + dev_key_size, GFP_USER);
if (!rss_config)
return -ENOMEM;
@@ -2072,8 +2091,6 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
{
struct ethtool_channels channels, curr = { .cmd = ETHTOOL_GCHANNELS };
u16 from_channel, to_channel;
- u64 max_rxnfc_in_use;
- u32 max_rxfh_in_use;
unsigned int i;
int ret;
@@ -2103,14 +2120,9 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
(!channels.rx_count || !channels.tx_count))
return -EINVAL;
- /* ensure the new Rx count fits within the configured Rx flow
- * indirection table/rxnfc settings */
- if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use))
- max_rxnfc_in_use = 0;
- max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev);
- if (channels.combined_count + channels.rx_count <=
- max_t(u64, max_rxnfc_in_use, max_rxfh_in_use))
- return -EINVAL;
+ ret = ethtool_check_max_channel(dev, channels, NULL);
+ if (ret)
+ return ret;
/* Disabling channels, query zero-copy AF_XDP sockets */
from_channel = channels.combined_count +
diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c
index 5c317d23787b..30b8ce275159 100644
--- a/net/ethtool/linkinfo.c
+++ b/net/ethtool/linkinfo.c
@@ -35,7 +35,7 @@ static int linkinfo_prepare_data(const struct ethnl_req_info *req_base,
if (ret < 0)
return ret;
ret = __ethtool_get_link_ksettings(dev, &data->ksettings);
- if (ret < 0 && info)
+ if (ret < 0)
GENL_SET_ERR_MSG(info, "failed to retrieve link settings");
ethnl_ops_complete(dev);
diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c
index b2591db49f7d..259cd9ef1f2a 100644
--- a/net/ethtool/linkmodes.c
+++ b/net/ethtool/linkmodes.c
@@ -40,7 +40,7 @@ static int linkmodes_prepare_data(const struct ethnl_req_info *req_base,
return ret;
ret = __ethtool_get_link_ksettings(dev, &data->ksettings);
- if (ret < 0 && info) {
+ if (ret < 0) {
GENL_SET_ERR_MSG(info, "failed to retrieve link settings");
goto out;
}
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index cb1eea00e349..e3f0ef6b851b 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -2,6 +2,7 @@
#include <net/sock.h>
#include <linux/ethtool_netlink.h>
+#include <linux/phy_link_topology.h>
#include <linux/pm_runtime.h>
#include "netlink.h"
#include "module_fw.h"
@@ -31,6 +32,24 @@ const struct nla_policy ethnl_header_policy_stats[] = {
ETHTOOL_FLAGS_STATS),
};
+const struct nla_policy ethnl_header_policy_phy[] = {
+ [ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 },
+ [ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING,
+ .len = ALTIFNAMSIZ - 1 },
+ [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ ETHTOOL_FLAGS_BASIC),
+ [ETHTOOL_A_HEADER_PHY_INDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+};
+
+const struct nla_policy ethnl_header_policy_phy_stats[] = {
+ [ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 },
+ [ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING,
+ .len = ALTIFNAMSIZ - 1 },
+ [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ ETHTOOL_FLAGS_STATS),
+ [ETHTOOL_A_HEADER_PHY_INDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+};
+
int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid,
enum ethnl_sock_type type)
{
@@ -119,7 +138,7 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
const struct nlattr *header, struct net *net,
struct netlink_ext_ack *extack, bool require_dev)
{
- struct nlattr *tb[ARRAY_SIZE(ethnl_header_policy)];
+ struct nlattr *tb[ARRAY_SIZE(ethnl_header_policy_phy)];
const struct nlattr *devname_attr;
struct net_device *dev = NULL;
u32 flags = 0;
@@ -134,7 +153,7 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
/* No validation here, command policy should have a nested policy set
* for the header, therefore validation should have already been done.
*/
- ret = nla_parse_nested(tb, ARRAY_SIZE(ethnl_header_policy) - 1, header,
+ ret = nla_parse_nested(tb, ARRAY_SIZE(ethnl_header_policy_phy) - 1, header,
NULL, extack);
if (ret < 0)
return ret;
@@ -175,11 +194,45 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
return -EINVAL;
}
+ if (tb[ETHTOOL_A_HEADER_PHY_INDEX]) {
+ if (dev) {
+ req_info->phy_index = nla_get_u32(tb[ETHTOOL_A_HEADER_PHY_INDEX]);
+ } else {
+ NL_SET_ERR_MSG_ATTR(extack, header,
+ "phy_index set without a netdev");
+ return -EINVAL;
+ }
+ }
+
req_info->dev = dev;
req_info->flags = flags;
return 0;
}
+struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info,
+ const struct nlattr *header,
+ struct netlink_ext_ack *extack)
+{
+ struct phy_device *phydev;
+
+ ASSERT_RTNL();
+
+ if (!req_info->dev)
+ return NULL;
+
+ if (!req_info->phy_index)
+ return req_info->dev->phydev;
+
+ phydev = phy_link_topo_get_phy(req_info->dev, req_info->phy_index);
+ if (!phydev) {
+ NL_SET_ERR_MSG_ATTR(extack, header,
+ "no phy matching phyindex");
+ return ERR_PTR(-ENODEV);
+ }
+
+ return phydev;
+}
+
/**
* ethnl_fill_reply_header() - Put common header into a reply message
* @skb: skb with the message
@@ -1128,6 +1181,8 @@ static const struct genl_ops ethtool_genl_ops[] = {
{
.cmd = ETHTOOL_MSG_RSS_GET,
.doit = ethnl_default_doit,
+ .start = ethnl_rss_dump_start,
+ .dumpit = ethnl_rss_dumpit,
.policy = ethnl_rss_get_policy,
.maxattr = ARRAY_SIZE(ethnl_rss_get_policy) - 1,
},
@@ -1179,6 +1234,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.policy = ethnl_module_fw_flash_act_policy,
.maxattr = ARRAY_SIZE(ethnl_module_fw_flash_act_policy) - 1,
},
+ {
+ .cmd = ETHTOOL_MSG_PHY_GET,
+ .doit = ethnl_phy_doit,
+ .start = ethnl_phy_start,
+ .dumpit = ethnl_phy_dumpit,
+ .done = ethnl_phy_done,
+ .policy = ethnl_phy_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_phy_get_policy) - 1,
+ },
};
static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 46ec273a87c5..203b08eb6c6f 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -251,6 +251,9 @@ static inline unsigned int ethnl_reply_header_size(void)
* @dev: network device the request is for (may be null)
* @dev_tracker: refcount tracker for @dev reference
* @flags: request flags common for all request types
+ * @phy_index: phy_device index connected to @dev this request is for. Can be
+ * 0 if the request doesn't target a phy, or if the @dev's attached
+ * phy is targeted.
*
* This is a common base for request specific structures holding data from
* parsed userspace request. These always embed struct ethnl_req_info at
@@ -260,6 +263,7 @@ struct ethnl_req_info {
struct net_device *dev;
netdevice_tracker dev_tracker;
u32 flags;
+ u32 phy_index;
};
static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info)
@@ -268,6 +272,27 @@ static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info)
}
/**
+ * ethnl_req_get_phydev() - Gets the phy_device targeted by this request,
+ * if any. Must be called under rntl_lock().
+ * @req_info: The ethnl request to get the phy from.
+ * @header: The netlink header, used for error reporting.
+ * @extack: The netlink extended ACK, for error reporting.
+ *
+ * The caller must hold RTNL, until it's done interacting with the returned
+ * phy_device.
+ *
+ * Return: A phy_device pointer corresponding either to the passed phy_index
+ * if one is provided. If not, the phy_device attached to the
+ * net_device targeted by this request is returned. If there's no
+ * targeted net_device, or no phy_device is attached, NULL is
+ * returned. If the provided phy_index is invalid, an error pointer
+ * is returned.
+ */
+struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info,
+ const struct nlattr *header,
+ struct netlink_ext_ack *extack);
+
+/**
* struct ethnl_reply_data - base type of reply data for GET requests
* @dev: device for current reply message; in single shot requests it is
* equal to &ethnl_req_info.dev; in dumps it's different for each
@@ -409,9 +434,12 @@ extern const struct ethnl_request_ops ethnl_rss_request_ops;
extern const struct ethnl_request_ops ethnl_plca_cfg_request_ops;
extern const struct ethnl_request_ops ethnl_plca_status_request_ops;
extern const struct ethnl_request_ops ethnl_mm_request_ops;
+extern const struct ethnl_request_ops ethnl_phy_request_ops;
extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1];
extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1];
+extern const struct nla_policy ethnl_header_policy_phy[ETHTOOL_A_HEADER_PHY_INDEX + 1];
+extern const struct nla_policy ethnl_header_policy_phy_stats[ETHTOOL_A_HEADER_PHY_INDEX + 1];
extern const struct nla_policy ethnl_strset_get_policy[ETHTOOL_A_STRSET_COUNTS_ONLY + 1];
extern const struct nla_policy ethnl_linkinfo_get_policy[ETHTOOL_A_LINKINFO_HEADER + 1];
extern const struct nla_policy ethnl_linkinfo_set_policy[ETHTOOL_A_LINKINFO_TP_MDIX_CTRL + 1];
@@ -449,13 +477,14 @@ extern const struct nla_policy ethnl_module_get_policy[ETHTOOL_A_MODULE_HEADER +
extern const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MODE_POLICY + 1];
extern const struct nla_policy ethnl_pse_get_policy[ETHTOOL_A_PSE_HEADER + 1];
extern const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1];
-extern const struct nla_policy ethnl_rss_get_policy[ETHTOOL_A_RSS_CONTEXT + 1];
+extern const struct nla_policy ethnl_rss_get_policy[ETHTOOL_A_RSS_START_CONTEXT + 1];
extern const struct nla_policy ethnl_plca_get_cfg_policy[ETHTOOL_A_PLCA_HEADER + 1];
extern const struct nla_policy ethnl_plca_set_cfg_policy[ETHTOOL_A_PLCA_MAX + 1];
extern const struct nla_policy ethnl_plca_get_status_policy[ETHTOOL_A_PLCA_HEADER + 1];
extern const struct nla_policy ethnl_mm_get_policy[ETHTOOL_A_MM_HEADER + 1];
extern const struct nla_policy ethnl_mm_set_policy[ETHTOOL_A_MM_MAX + 1];
extern const struct nla_policy ethnl_module_fw_flash_act_policy[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD + 1];
+extern const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1];
int ethnl_set_features(struct sk_buff *skb, struct genl_info *info);
int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info);
@@ -464,6 +493,12 @@ int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info);
int ethnl_tunnel_info_start(struct netlink_callback *cb);
int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int ethnl_act_module_fw_flash(struct sk_buff *skb, struct genl_info *info);
+int ethnl_rss_dump_start(struct netlink_callback *cb);
+int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int ethnl_phy_start(struct netlink_callback *cb);
+int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info);
+int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int ethnl_phy_done(struct netlink_callback *cb);
extern const char stats_std_names[__ETHTOOL_STATS_CNT][ETH_GSTRING_LEN];
extern const char stats_eth_phy_names[__ETHTOOL_A_STATS_ETH_PHY_CNT][ETH_GSTRING_LEN];
diff --git a/net/ethtool/phy.c b/net/ethtool/phy.c
new file mode 100644
index 000000000000..ed8f690f6bac
--- /dev/null
+++ b/net/ethtool/phy.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Bootlin
+ *
+ */
+#include "common.h"
+#include "netlink.h"
+
+#include <linux/phy.h>
+#include <linux/phy_link_topology.h>
+#include <linux/sfp.h>
+
+struct phy_req_info {
+ struct ethnl_req_info base;
+ struct phy_device_node *pdn;
+};
+
+#define PHY_REQINFO(__req_base) \
+ container_of(__req_base, struct phy_req_info, base)
+
+const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1] = {
+ [ETHTOOL_A_PHY_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+};
+
+/* Caller holds rtnl */
+static ssize_t
+ethnl_phy_reply_size(const struct ethnl_req_info *req_base,
+ struct netlink_ext_ack *extack)
+{
+ struct phy_req_info *req_info = PHY_REQINFO(req_base);
+ struct phy_device_node *pdn = req_info->pdn;
+ struct phy_device *phydev = pdn->phy;
+ size_t size = 0;
+
+ ASSERT_RTNL();
+
+ /* ETHTOOL_A_PHY_INDEX */
+ size += nla_total_size(sizeof(u32));
+
+ /* ETHTOOL_A_DRVNAME */
+ if (phydev->drv)
+ size += nla_total_size(strlen(phydev->drv->name) + 1);
+
+ /* ETHTOOL_A_NAME */
+ size += nla_total_size(strlen(dev_name(&phydev->mdio.dev)) + 1);
+
+ /* ETHTOOL_A_PHY_UPSTREAM_TYPE */
+ size += nla_total_size(sizeof(u32));
+
+ if (phy_on_sfp(phydev)) {
+ const char *upstream_sfp_name = sfp_get_name(pdn->parent_sfp_bus);
+
+ /* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */
+ if (upstream_sfp_name)
+ size += nla_total_size(strlen(upstream_sfp_name) + 1);
+
+ /* ETHTOOL_A_PHY_UPSTREAM_INDEX */
+ size += nla_total_size(sizeof(u32));
+ }
+
+ /* ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME */
+ if (phydev->sfp_bus) {
+ const char *sfp_name = sfp_get_name(phydev->sfp_bus);
+
+ if (sfp_name)
+ size += nla_total_size(strlen(sfp_name) + 1);
+ }
+
+ return size;
+}
+
+static int
+ethnl_phy_fill_reply(const struct ethnl_req_info *req_base, struct sk_buff *skb)
+{
+ struct phy_req_info *req_info = PHY_REQINFO(req_base);
+ struct phy_device_node *pdn = req_info->pdn;
+ struct phy_device *phydev = pdn->phy;
+ enum phy_upstream ptype;
+
+ ptype = pdn->upstream_type;
+
+ if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, phydev->phyindex) ||
+ nla_put_string(skb, ETHTOOL_A_PHY_NAME, dev_name(&phydev->mdio.dev)) ||
+ nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, ptype))
+ return -EMSGSIZE;
+
+ if (phydev->drv &&
+ nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, phydev->drv->name))
+ return -EMSGSIZE;
+
+ if (ptype == PHY_UPSTREAM_PHY) {
+ struct phy_device *upstream = pdn->upstream.phydev;
+ const char *sfp_upstream_name;
+
+ /* Parent index */
+ if (nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX, upstream->phyindex))
+ return -EMSGSIZE;
+
+ if (pdn->parent_sfp_bus) {
+ sfp_upstream_name = sfp_get_name(pdn->parent_sfp_bus);
+ if (sfp_upstream_name &&
+ nla_put_string(skb, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME,
+ sfp_upstream_name))
+ return -EMSGSIZE;
+ }
+ }
+
+ if (phydev->sfp_bus) {
+ const char *sfp_name = sfp_get_name(phydev->sfp_bus);
+
+ if (sfp_name &&
+ nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME,
+ sfp_name))
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+static int ethnl_phy_parse_request(struct ethnl_req_info *req_base,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct phy_link_topology *topo = req_base->dev->link_topo;
+ struct phy_req_info *req_info = PHY_REQINFO(req_base);
+ struct phy_device *phydev;
+
+ phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PHY_HEADER],
+ extack);
+ if (!phydev)
+ return 0;
+
+ if (IS_ERR(phydev))
+ return PTR_ERR(phydev);
+
+ if (!topo)
+ return 0;
+
+ req_info->pdn = xa_load(&topo->phys, phydev->phyindex);
+
+ return 0;
+}
+
+int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct phy_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
+ struct sk_buff *rskb;
+ void *reply_payload;
+ int reply_len;
+ int ret;
+
+ ret = ethnl_parse_header_dev_get(&req_info.base,
+ tb[ETHTOOL_A_PHY_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+
+ rtnl_lock();
+
+ ret = ethnl_phy_parse_request(&req_info.base, tb, info->extack);
+ if (ret < 0)
+ goto err_unlock_rtnl;
+
+ /* No PHY, return early */
+ if (!req_info.pdn)
+ goto err_unlock_rtnl;
+
+ ret = ethnl_phy_reply_size(&req_info.base, info->extack);
+ if (ret < 0)
+ goto err_unlock_rtnl;
+ reply_len = ret + ethnl_reply_header_size();
+
+ rskb = ethnl_reply_init(reply_len, req_info.base.dev,
+ ETHTOOL_MSG_PHY_GET_REPLY,
+ ETHTOOL_A_PHY_HEADER,
+ info, &reply_payload);
+ if (!rskb) {
+ ret = -ENOMEM;
+ goto err_unlock_rtnl;
+ }
+
+ ret = ethnl_phy_fill_reply(&req_info.base, rskb);
+ if (ret)
+ goto err_free_msg;
+
+ rtnl_unlock();
+ ethnl_parse_header_dev_put(&req_info.base);
+ genlmsg_end(rskb, reply_payload);
+
+ return genlmsg_reply(rskb, info);
+
+err_free_msg:
+ nlmsg_free(rskb);
+err_unlock_rtnl:
+ rtnl_unlock();
+ ethnl_parse_header_dev_put(&req_info.base);
+ return ret;
+}
+
+struct ethnl_phy_dump_ctx {
+ struct phy_req_info *phy_req_info;
+ unsigned long ifindex;
+ unsigned long phy_index;
+};
+
+int ethnl_phy_start(struct netlink_callback *cb)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+ struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
+
+ ctx->phy_req_info = kzalloc(sizeof(*ctx->phy_req_info), GFP_KERNEL);
+ if (!ctx->phy_req_info)
+ return -ENOMEM;
+
+ ret = ethnl_parse_header_dev_get(&ctx->phy_req_info->base,
+ info->attrs[ETHTOOL_A_PHY_HEADER],
+ sock_net(cb->skb->sk), cb->extack,
+ false);
+ ctx->ifindex = 0;
+ ctx->phy_index = 0;
+
+ if (ret)
+ kfree(ctx->phy_req_info);
+
+ return ret;
+}
+
+int ethnl_phy_done(struct netlink_callback *cb)
+{
+ struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
+
+ if (ctx->phy_req_info->base.dev)
+ ethnl_parse_header_dev_put(&ctx->phy_req_info->base);
+
+ kfree(ctx->phy_req_info);
+
+ return 0;
+}
+
+static int ethnl_phy_dump_one_dev(struct sk_buff *skb, struct net_device *dev,
+ struct netlink_callback *cb)
+{
+ struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
+ struct phy_req_info *pri = ctx->phy_req_info;
+ struct phy_device_node *pdn;
+ int ret = 0;
+ void *ehdr;
+
+ if (!dev->link_topo)
+ return 0;
+
+ xa_for_each_start(&dev->link_topo->phys, ctx->phy_index, pdn, ctx->phy_index) {
+ ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_PHY_GET_REPLY);
+ if (!ehdr) {
+ ret = -EMSGSIZE;
+ break;
+ }
+
+ ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_PHY_HEADER);
+ if (ret < 0) {
+ genlmsg_cancel(skb, ehdr);
+ break;
+ }
+
+ pri->pdn = pdn;
+ ret = ethnl_phy_fill_reply(&pri->base, skb);
+ if (ret < 0) {
+ genlmsg_cancel(skb, ehdr);
+ break;
+ }
+
+ genlmsg_end(skb, ehdr);
+ }
+
+ return ret;
+}
+
+int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
+ struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
+ int ret = 0;
+
+ rtnl_lock();
+
+ if (ctx->phy_req_info->base.dev) {
+ ret = ethnl_phy_dump_one_dev(skb, ctx->phy_req_info->base.dev, cb);
+ } else {
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ ret = ethnl_phy_dump_one_dev(skb, dev, cb);
+ if (ret)
+ break;
+
+ ctx->phy_index = 0;
+ }
+ }
+ rtnl_unlock();
+
+ return ret;
+}
diff --git a/net/ethtool/plca.c b/net/ethtool/plca.c
index b1e2e3b5027f..d95d92f173a6 100644
--- a/net/ethtool/plca.c
+++ b/net/ethtool/plca.c
@@ -25,7 +25,7 @@ struct plca_reply_data {
const struct nla_policy ethnl_plca_get_cfg_policy[] = {
[ETHTOOL_A_PLCA_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_phy),
};
static void plca_update_sint(int *dst, struct nlattr **tb, u32 attrid,
@@ -58,10 +58,14 @@ static int plca_get_cfg_prepare_data(const struct ethnl_req_info *req_base,
struct plca_reply_data *data = PLCA_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
const struct ethtool_phy_ops *ops;
+ struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
int ret;
+ phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PLCA_HEADER],
+ info->extack);
// check that the PHY device is available and connected
- if (!dev->phydev) {
+ if (IS_ERR_OR_NULL(phydev)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -80,7 +84,7 @@ static int plca_get_cfg_prepare_data(const struct ethnl_req_info *req_base,
memset(&data->plca_cfg, 0xff,
sizeof_field(struct plca_reply_data, plca_cfg));
- ret = ops->get_plca_cfg(dev->phydev, &data->plca_cfg);
+ ret = ops->get_plca_cfg(phydev, &data->plca_cfg);
ethnl_ops_complete(dev);
out:
@@ -129,7 +133,7 @@ static int plca_get_cfg_fill_reply(struct sk_buff *skb,
const struct nla_policy ethnl_plca_set_cfg_policy[] = {
[ETHTOOL_A_PLCA_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_phy),
[ETHTOOL_A_PLCA_ENABLED] = NLA_POLICY_MAX(NLA_U8, 1),
[ETHTOOL_A_PLCA_NODE_ID] = NLA_POLICY_MAX(NLA_U32, 255),
[ETHTOOL_A_PLCA_NODE_CNT] = NLA_POLICY_RANGE(NLA_U32, 1, 255),
@@ -141,15 +145,17 @@ const struct nla_policy ethnl_plca_set_cfg_policy[] = {
static int
ethnl_set_plca(struct ethnl_req_info *req_info, struct genl_info *info)
{
- struct net_device *dev = req_info->dev;
const struct ethtool_phy_ops *ops;
struct nlattr **tb = info->attrs;
struct phy_plca_cfg plca_cfg;
+ struct phy_device *phydev;
bool mod = false;
int ret;
+ phydev = ethnl_req_get_phydev(req_info, tb[ETHTOOL_A_PLCA_HEADER],
+ info->extack);
// check that the PHY device is available and connected
- if (!dev->phydev)
+ if (IS_ERR_OR_NULL(phydev))
return -EOPNOTSUPP;
ops = ethtool_phy_ops;
@@ -168,7 +174,7 @@ ethnl_set_plca(struct ethnl_req_info *req_info, struct genl_info *info)
if (!mod)
return 0;
- ret = ops->set_plca_cfg(dev->phydev, &plca_cfg, info->extack);
+ ret = ops->set_plca_cfg(phydev, &plca_cfg, info->extack);
return ret < 0 ? ret : 1;
}
@@ -191,7 +197,7 @@ const struct ethnl_request_ops ethnl_plca_cfg_request_ops = {
const struct nla_policy ethnl_plca_get_status_policy[] = {
[ETHTOOL_A_PLCA_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_phy),
};
static int plca_get_status_prepare_data(const struct ethnl_req_info *req_base,
@@ -201,10 +207,14 @@ static int plca_get_status_prepare_data(const struct ethnl_req_info *req_base,
struct plca_reply_data *data = PLCA_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
const struct ethtool_phy_ops *ops;
+ struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
int ret;
+ phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PLCA_HEADER],
+ info->extack);
// check that the PHY device is available and connected
- if (!dev->phydev) {
+ if (IS_ERR_OR_NULL(phydev)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -223,7 +233,7 @@ static int plca_get_status_prepare_data(const struct ethnl_req_info *req_base,
memset(&data->plca_st, 0xff,
sizeof_field(struct plca_reply_data, plca_st));
- ret = ops->get_plca_status(dev->phydev, &data->plca_st);
+ ret = ops->get_plca_status(phydev, &data->plca_st);
ethnl_ops_complete(dev);
out:
return ret;
diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c
index ff81aa749784..a0705edca22a 100644
--- a/net/ethtool/pse-pd.c
+++ b/net/ethtool/pse-pd.c
@@ -28,17 +28,15 @@ struct pse_reply_data {
/* PSE_GET */
const struct nla_policy ethnl_pse_get_policy[ETHTOOL_A_PSE_HEADER + 1] = {
- [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy_phy),
};
-static int pse_get_pse_attributes(struct net_device *dev,
+static int pse_get_pse_attributes(struct phy_device *phydev,
struct netlink_ext_ack *extack,
struct pse_reply_data *data)
{
- struct phy_device *phydev = dev->phydev;
-
if (!phydev) {
- NL_SET_ERR_MSG(extack, "No PHY is attached");
+ NL_SET_ERR_MSG(extack, "No PHY found");
return -EOPNOTSUPP;
}
@@ -58,13 +56,20 @@ static int pse_prepare_data(const struct ethnl_req_info *req_base,
{
struct pse_reply_data *data = PSE_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
+ struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
int ret;
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
- ret = pse_get_pse_attributes(dev, info->extack, data);
+ phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PSE_HEADER],
+ info->extack);
+ if (IS_ERR(phydev))
+ return -ENODEV;
+
+ ret = pse_get_pse_attributes(phydev, info->extack, data);
ethnl_ops_complete(dev);
@@ -206,7 +211,7 @@ static void pse_cleanup_data(struct ethnl_reply_data *reply_base)
/* PSE_SET */
const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = {
- [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy_phy),
[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] =
NLA_POLICY_RANGE(NLA_U32, ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED,
ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED),
@@ -217,14 +222,11 @@ const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = {
};
static int
-ethnl_set_pse_validate(struct ethnl_req_info *req_info, struct genl_info *info)
+ethnl_set_pse_validate(struct phy_device *phydev, struct genl_info *info)
{
- struct net_device *dev = req_info->dev;
struct nlattr **tb = info->attrs;
- struct phy_device *phydev;
- phydev = dev->phydev;
- if (!phydev) {
+ if (IS_ERR_OR_NULL(phydev)) {
NL_SET_ERR_MSG(info->extack, "No PHY is attached");
return -EOPNOTSUPP;
}
@@ -249,18 +251,21 @@ ethnl_set_pse_validate(struct ethnl_req_info *req_info, struct genl_info *info)
return -EOPNOTSUPP;
}
- return 1;
+ return 0;
}
static int
ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info)
{
- struct net_device *dev = req_info->dev;
struct nlattr **tb = info->attrs;
struct phy_device *phydev;
- int ret = 0;
+ int ret;
- phydev = dev->phydev;
+ phydev = ethnl_req_get_phydev(req_info, tb[ETHTOOL_A_PSE_HEADER],
+ info->extack);
+ ret = ethnl_set_pse_validate(phydev, info);
+ if (ret)
+ return ret;
if (tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]) {
unsigned int pw_limit;
@@ -307,7 +312,6 @@ const struct ethnl_request_ops ethnl_pse_request_ops = {
.fill_reply = pse_fill_reply,
.cleanup_data = pse_cleanup_data,
- .set_validate = ethnl_set_pse_validate,
.set = ethnl_set_pse,
/* PSE has no notification */
};
diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
index 5c4c4505ab9a..e07386275e14 100644
--- a/net/ethtool/rss.c
+++ b/net/ethtool/rss.c
@@ -10,6 +10,7 @@ struct rss_req_info {
struct rss_reply_data {
struct ethnl_reply_data base;
+ bool no_key_fields;
u32 indir_size;
u32 hkey_size;
u32 hfunc;
@@ -27,6 +28,7 @@ struct rss_reply_data {
const struct nla_policy ethnl_rss_get_policy[] = {
[ETHTOOL_A_RSS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_RSS_CONTEXT] = { .type = NLA_U32 },
+ [ETHTOOL_A_RSS_START_CONTEXT] = { .type = NLA_U32 },
};
static int
@@ -37,18 +39,18 @@ rss_parse_request(struct ethnl_req_info *req_info, struct nlattr **tb,
if (tb[ETHTOOL_A_RSS_CONTEXT])
request->rss_context = nla_get_u32(tb[ETHTOOL_A_RSS_CONTEXT]);
+ if (tb[ETHTOOL_A_RSS_START_CONTEXT]) {
+ NL_SET_BAD_ATTR(extack, tb[ETHTOOL_A_RSS_START_CONTEXT]);
+ return -EINVAL;
+ }
return 0;
}
static int
-rss_prepare_data(const struct ethnl_req_info *req_base,
- struct ethnl_reply_data *reply_base,
- const struct genl_info *info)
+rss_prepare_get(const struct rss_req_info *request, struct net_device *dev,
+ struct rss_reply_data *data, const struct genl_info *info)
{
- struct rss_reply_data *data = RSS_REPDATA(reply_base);
- struct rss_req_info *request = RSS_REQINFO(req_base);
- struct net_device *dev = reply_base->dev;
struct ethtool_rxfh_param rxfh = {};
const struct ethtool_ops *ops;
u32 total_size, indir_bytes;
@@ -56,12 +58,6 @@ rss_prepare_data(const struct ethnl_req_info *req_base,
int ret;
ops = dev->ethtool_ops;
- if (!ops->get_rxfh)
- return -EOPNOTSUPP;
-
- /* Some drivers don't handle rss_context */
- if (request->rss_context && !ops->cap_rss_ctx_supported)
- return -EOPNOTSUPP;
ret = ethnl_ops_begin(dev);
if (ret < 0)
@@ -91,7 +87,6 @@ rss_prepare_data(const struct ethnl_req_info *req_base,
rxfh.indir = data->indir_table;
rxfh.key_size = data->hkey_size;
rxfh.key = data->hkey;
- rxfh.rss_context = request->rss_context;
ret = ops->get_rxfh(dev, &rxfh);
if (ret)
@@ -105,6 +100,67 @@ out_ops:
}
static int
+rss_prepare_ctx(const struct rss_req_info *request, struct net_device *dev,
+ struct rss_reply_data *data, const struct genl_info *info)
+{
+ struct ethtool_rxfh_context *ctx;
+ u32 total_size, indir_bytes;
+ u8 *rss_config;
+
+ ctx = xa_load(&dev->ethtool->rss_ctx, request->rss_context);
+ if (!ctx)
+ return -ENOENT;
+
+ data->indir_size = ctx->indir_size;
+ data->hkey_size = ctx->key_size;
+ data->hfunc = ctx->hfunc;
+ data->input_xfrm = ctx->input_xfrm;
+
+ indir_bytes = data->indir_size * sizeof(u32);
+ total_size = indir_bytes + data->hkey_size;
+ rss_config = kzalloc(total_size, GFP_KERNEL);
+ if (!rss_config)
+ return -ENOMEM;
+
+ data->indir_table = (u32 *)rss_config;
+ memcpy(data->indir_table, ethtool_rxfh_context_indir(ctx), indir_bytes);
+
+ if (data->hkey_size) {
+ data->hkey = rss_config + indir_bytes;
+ memcpy(data->hkey, ethtool_rxfh_context_key(ctx),
+ data->hkey_size);
+ }
+
+ return 0;
+}
+
+static int
+rss_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ const struct genl_info *info)
+{
+ struct rss_reply_data *data = RSS_REPDATA(reply_base);
+ struct rss_req_info *request = RSS_REQINFO(req_base);
+ struct net_device *dev = reply_base->dev;
+ const struct ethtool_ops *ops;
+
+ ops = dev->ethtool_ops;
+ if (!ops->get_rxfh)
+ return -EOPNOTSUPP;
+
+ /* Some drivers don't handle rss_context */
+ if (request->rss_context) {
+ if (!ops->cap_rss_ctx_supported && !ops->create_rxfh_context)
+ return -EOPNOTSUPP;
+
+ data->no_key_fields = !ops->rxfh_per_ctx_key;
+ return rss_prepare_ctx(request, dev, data, info);
+ }
+
+ return rss_prepare_get(request, dev, data, info);
+}
+
+static int
rss_reply_size(const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
{
@@ -131,13 +187,18 @@ rss_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base,
nla_put_u32(skb, ETHTOOL_A_RSS_CONTEXT, request->rss_context))
return -EMSGSIZE;
+ if ((data->indir_size &&
+ nla_put(skb, ETHTOOL_A_RSS_INDIR,
+ sizeof(u32) * data->indir_size, data->indir_table)))
+ return -EMSGSIZE;
+
+ if (data->no_key_fields)
+ return 0;
+
if ((data->hfunc &&
nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) ||
(data->input_xfrm &&
nla_put_u32(skb, ETHTOOL_A_RSS_INPUT_XFRM, data->input_xfrm)) ||
- (data->indir_size &&
- nla_put(skb, ETHTOOL_A_RSS_INDIR,
- sizeof(u32) * data->indir_size, data->indir_table)) ||
(data->hkey_size &&
nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey)))
return -EMSGSIZE;
@@ -152,6 +213,146 @@ static void rss_cleanup_data(struct ethnl_reply_data *reply_base)
kfree(data->indir_table);
}
+struct rss_nl_dump_ctx {
+ unsigned long ifindex;
+ unsigned long ctx_idx;
+
+ /* User wants to only dump contexts from given ifindex */
+ unsigned int match_ifindex;
+ unsigned int start_ctx;
+};
+
+static struct rss_nl_dump_ctx *rss_dump_ctx(struct netlink_callback *cb)
+{
+ NL_ASSERT_DUMP_CTX_FITS(struct rss_nl_dump_ctx);
+
+ return (struct rss_nl_dump_ctx *)cb->ctx;
+}
+
+int ethnl_rss_dump_start(struct netlink_callback *cb)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+ struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb);
+ struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
+ int ret;
+
+ /* Filtering by context not supported */
+ if (tb[ETHTOOL_A_RSS_CONTEXT]) {
+ NL_SET_BAD_ATTR(info->extack, tb[ETHTOOL_A_RSS_CONTEXT]);
+ return -EINVAL;
+ }
+ if (tb[ETHTOOL_A_RSS_START_CONTEXT]) {
+ ctx->start_ctx = nla_get_u32(tb[ETHTOOL_A_RSS_START_CONTEXT]);
+ ctx->ctx_idx = ctx->start_ctx;
+ }
+
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_RSS_HEADER],
+ sock_net(cb->skb->sk), cb->extack,
+ false);
+ if (req_info.dev) {
+ ctx->match_ifindex = req_info.dev->ifindex;
+ ctx->ifindex = ctx->match_ifindex;
+ ethnl_parse_header_dev_put(&req_info);
+ req_info.dev = NULL;
+ }
+
+ return ret;
+}
+
+static int
+rss_dump_one_ctx(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev, u32 rss_context)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+ struct rss_reply_data data = {};
+ struct rss_req_info req = {};
+ void *ehdr;
+ int ret;
+
+ req.rss_context = rss_context;
+
+ ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_RSS_GET_REPLY);
+ if (!ehdr)
+ return -EMSGSIZE;
+
+ ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_RSS_HEADER);
+ if (ret < 0)
+ goto err_cancel;
+
+ /* Context 0 is not currently storred or cached in the XArray */
+ if (!rss_context)
+ ret = rss_prepare_get(&req, dev, &data, info);
+ else
+ ret = rss_prepare_ctx(&req, dev, &data, info);
+ if (ret)
+ goto err_cancel;
+
+ ret = rss_fill_reply(skb, &req.base, &data.base);
+ if (ret)
+ goto err_cleanup;
+ genlmsg_end(skb, ehdr);
+
+ rss_cleanup_data(&data.base);
+ return 0;
+
+err_cleanup:
+ rss_cleanup_data(&data.base);
+err_cancel:
+ genlmsg_cancel(skb, ehdr);
+ return ret;
+}
+
+static int
+rss_dump_one_dev(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev)
+{
+ struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb);
+ int ret;
+
+ if (!dev->ethtool_ops->get_rxfh)
+ return 0;
+
+ if (!ctx->ctx_idx) {
+ ret = rss_dump_one_ctx(skb, cb, dev, 0);
+ if (ret)
+ return ret;
+ ctx->ctx_idx++;
+ }
+
+ for (; xa_find(&dev->ethtool->rss_ctx, &ctx->ctx_idx,
+ ULONG_MAX, XA_PRESENT); ctx->ctx_idx++) {
+ ret = rss_dump_one_ctx(skb, cb, dev, ctx->ctx_idx);
+ if (ret)
+ return ret;
+ }
+ ctx->ctx_idx = ctx->start_ctx;
+
+ return 0;
+}
+
+int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb);
+ struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
+ int ret = 0;
+
+ rtnl_lock();
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ if (ctx->match_ifindex && ctx->match_ifindex != ctx->ifindex)
+ break;
+
+ ret = rss_dump_one_dev(skb, cb, dev);
+ if (ret)
+ break;
+ }
+ rtnl_unlock();
+
+ return ret;
+}
+
const struct ethnl_request_ops ethnl_rss_request_ops = {
.request_cmd = ETHTOOL_MSG_RSS_GET,
.reply_cmd = ETHTOOL_MSG_RSS_GET_REPLY,
diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
index c678b484a079..b3382b3cf325 100644
--- a/net/ethtool/strset.c
+++ b/net/ethtool/strset.c
@@ -126,7 +126,7 @@ struct strset_reply_data {
const struct nla_policy ethnl_strset_get_policy[] = {
[ETHTOOL_A_STRSET_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_phy),
[ETHTOOL_A_STRSET_STRINGSETS] = { .type = NLA_NESTED },
[ETHTOOL_A_STRSET_COUNTS_ONLY] = { .type = NLA_FLAG },
};
@@ -233,17 +233,18 @@ static void strset_cleanup_data(struct ethnl_reply_data *reply_base)
}
static int strset_prepare_set(struct strset_info *info, struct net_device *dev,
- unsigned int id, bool counts_only)
+ struct phy_device *phydev, unsigned int id,
+ bool counts_only)
{
const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops;
const struct ethtool_ops *ops = dev->ethtool_ops;
void *strings;
int count, ret;
- if (id == ETH_SS_PHY_STATS && dev->phydev &&
+ if (id == ETH_SS_PHY_STATS && phydev &&
!ops->get_ethtool_phy_stats && phy_ops &&
phy_ops->get_sset_count)
- ret = phy_ops->get_sset_count(dev->phydev);
+ ret = phy_ops->get_sset_count(phydev);
else if (ops->get_sset_count && ops->get_strings)
ret = ops->get_sset_count(dev, id);
else
@@ -258,10 +259,10 @@ static int strset_prepare_set(struct strset_info *info, struct net_device *dev,
strings = kcalloc(count, ETH_GSTRING_LEN, GFP_KERNEL);
if (!strings)
return -ENOMEM;
- if (id == ETH_SS_PHY_STATS && dev->phydev &&
+ if (id == ETH_SS_PHY_STATS && phydev &&
!ops->get_ethtool_phy_stats && phy_ops &&
phy_ops->get_strings)
- phy_ops->get_strings(dev->phydev, strings);
+ phy_ops->get_strings(phydev, strings);
else
ops->get_strings(dev, id, strings);
info->strings = strings;
@@ -279,6 +280,8 @@ static int strset_prepare_data(const struct ethnl_req_info *req_base,
const struct strset_req_info *req_info = STRSET_REQINFO(req_base);
struct strset_reply_data *data = STRSET_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
+ struct nlattr **tb = info->attrs;
+ struct phy_device *phydev;
unsigned int i;
int ret;
@@ -289,14 +292,20 @@ static int strset_prepare_data(const struct ethnl_req_info *req_base,
for (i = 0; i < ETH_SS_COUNT; i++) {
if ((req_info->req_ids & (1U << i)) &&
data->sets[i].per_dev) {
- if (info)
- GENL_SET_ERR_MSG(info, "requested per device strings without dev");
+ GENL_SET_ERR_MSG(info, "requested per device strings without dev");
return -EINVAL;
}
}
return 0;
}
+ phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_HEADER_FLAGS],
+ info->extack);
+
+ /* phydev can be NULL, check for errors only */
+ if (IS_ERR(phydev))
+ return PTR_ERR(phydev);
+
ret = ethnl_ops_begin(dev);
if (ret < 0)
goto err_strset;
@@ -305,7 +314,7 @@ static int strset_prepare_data(const struct ethnl_req_info *req_base,
!data->sets[i].per_dev)
continue;
- ret = strset_prepare_set(&data->sets[i], dev, i,
+ ret = strset_prepare_set(&data->sets[i], dev, phydev, i,
req_info->counts_only);
if (ret < 0)
goto err_ops;
diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c
index 89637e732866..7e46d130dce2 100644
--- a/net/handshake/netlink.c
+++ b/net/handshake/netlink.c
@@ -153,7 +153,7 @@ int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info)
if (!req) {
err = -EBUSY;
trace_handshake_cmd_done_err(net, req, sock->sk, err);
- fput(sock->file);
+ sockfd_put(sock);
return err;
}
@@ -164,7 +164,7 @@ int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info)
status = nla_get_u32(info->attrs[HANDSHAKE_A_DONE_STATUS]);
handshake_complete(req, status, info);
- fput(sock->file);
+ sockfd_put(sock);
return 0;
}
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index e4cc6b78dcfc..ebdfd5b64e17 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -427,6 +427,9 @@ static void hsr_proxy_announce(struct timer_list *t)
* of SAN nodes stored in ProxyNodeTable.
*/
interlink = hsr_port_get_hsr(hsr, HSR_PT_INTERLINK);
+ if (!interlink)
+ goto done;
+
list_for_each_entry_rcu(node, &hsr->proxy_node_db, mac_list) {
if (hsr_addr_is_redbox(hsr, node->macaddress_A))
continue;
@@ -441,6 +444,7 @@ static void hsr_proxy_announce(struct timer_list *t)
mod_timer(&hsr->announce_proxy_timer, jiffies + interval);
}
+done:
rcu_read_unlock();
}
@@ -554,6 +558,12 @@ void hsr_dev_setup(struct net_device *dev)
dev->netdev_ops = &hsr_device_ops;
SET_NETDEV_DEVTYPE(dev, &hsr_type);
dev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
+ /* Prevent recursive tx locking */
+ dev->lltx = true;
+ /* Not sure about this. Taken from bridge code. netdevice.h says
+ * it means "Does not change network namespaces".
+ */
+ dev->netns_local = true;
dev->needs_free_netdev = true;
@@ -563,16 +573,10 @@ void hsr_dev_setup(struct net_device *dev)
dev->features = dev->hw_features;
- /* Prevent recursive tx locking */
- dev->features |= NETIF_F_LLTX;
/* VLAN on top of HSR needs testing and probably some work on
* hsr_header_create() etc.
*/
dev->features |= NETIF_F_VLAN_CHALLENGED;
- /* Not sure about this. Taken from bridge code. netdev_features.h says
- * it means "Does not change network namespaces".
- */
- dev->features |= NETIF_F_NETNS_LOCAL;
}
/* Return true if dev is a HSR master; return false otherwise.
@@ -625,7 +629,6 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
/* Overflow soon to find bugs easier: */
hsr->sequence_nr = HSR_SEQNR_START;
hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
- hsr->interlink_sequence_nr = HSR_SEQNR_START;
timer_setup(&hsr->announce_timer, hsr_announce, 0);
timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index ab1f8d35d9dc..fcfeb79bb040 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -203,7 +203,6 @@ struct hsr_priv {
struct timer_list prune_proxy_timer;
int announce_count;
u16 sequence_nr;
- u16 interlink_sequence_nr; /* Interlink port seq_nr */
u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */
enum hsr_version prot_version; /* Indicate if HSRv0, HSRv1 or PRPv1 */
spinlock_t seqnr_lock; /* locking for sequence_nr */
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index af6cf64a00e0..464f683e016d 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -67,7 +67,16 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
skb_reset_mac_len(skb);
- hsr_forward_skb(skb, port);
+ /* Only the frames received over the interlink port will assign a
+ * sequence number and require synchronisation vs other sender.
+ */
+ if (port->type == HSR_PT_INTERLINK) {
+ spin_lock_bh(&hsr->seqnr_lock);
+ hsr_forward_skb(skb, port);
+ spin_unlock_bh(&hsr->seqnr_lock);
+ } else {
+ hsr_forward_skb(skb, port);
+ }
finish_consume:
return RX_HANDLER_CONSUMED;
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 77b4e92027c5..175efd860f7b 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -116,7 +116,7 @@ static void lowpan_setup(struct net_device *ldev)
ldev->netdev_ops = &lowpan_netdev_ops;
ldev->header_ops = &lowpan_header_ops;
ldev->needs_free_netdev = true;
- ldev->features |= NETIF_F_NETNS_LOCAL;
+ ldev->netns_local = true;
}
static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c
index 60e8fff1347e..88adb04e4072 100644
--- a/net/ieee802154/core.c
+++ b/net/ieee802154/core.c
@@ -226,11 +226,11 @@ int cfg802154_switch_netns(struct cfg802154_registered_device *rdev,
list_for_each_entry(wpan_dev, &rdev->wpan_dev_list, list) {
if (!wpan_dev->netdev)
continue;
- wpan_dev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
+ wpan_dev->netdev->netns_local = false;
err = dev_change_net_namespace(wpan_dev->netdev, net, "wpan%d");
if (err)
break;
- wpan_dev->netdev->features |= NETIF_F_NETNS_LOCAL;
+ wpan_dev->netdev->netns_local = true;
}
if (err) {
@@ -242,11 +242,11 @@ int cfg802154_switch_netns(struct cfg802154_registered_device *rdev,
list) {
if (!wpan_dev->netdev)
continue;
- wpan_dev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
+ wpan_dev->netdev->netns_local = false;
err = dev_change_net_namespace(wpan_dev->netdev, net,
"wpan%d");
WARN_ON(err);
- wpan_dev->netdev->features |= NETIF_F_NETNS_LOCAL;
+ wpan_dev->netdev->netns_local = true;
}
return err;
@@ -291,7 +291,7 @@ static int cfg802154_netdev_notifier_call(struct notifier_block *nb,
switch (state) {
/* TODO NETDEV_DEVTYPE */
case NETDEV_REGISTER:
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netns_local = true;
wpan_dev->identifier = ++rdev->wpan_dev_id;
list_add_rcu(&wpan_dev->list, &rdev->wpan_dev_list);
rdev->devlist_generation++;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 8e94ed7c56a0..6d2c97f8e9ef 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -661,7 +661,8 @@ config TCP_CONG_CDG
For further details see:
D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using
- delay gradients." In Networking 2011. Preprint: http://goo.gl/No3vdg
+ delay gradients." In Networking 2011. Preprint:
+ http://caia.swin.edu.au/cv/dahayes/content/networking2011-cdg-preprint.pdf
config TCP_CONG_BBR
tristate "BBR TCP"
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index 3f88d0961e5b..554804774628 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -14,10 +14,6 @@
/* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */
static struct bpf_struct_ops bpf_tcp_congestion_ops;
-static u32 unsupported_ops[] = {
- offsetof(struct tcp_congestion_ops, get_info),
-};
-
static const struct btf_type *tcp_sock_type;
static u32 tcp_sock_id, sock_id;
static const struct btf_type *tcp_congestion_ops_type;
@@ -45,18 +41,6 @@ static int bpf_tcp_ca_init(struct btf *btf)
return 0;
}
-static bool is_unsupported(u32 member_offset)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
- if (member_offset == unsupported_ops[i])
- return true;
- }
-
- return false;
-}
-
static bool bpf_tcp_ca_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
@@ -251,15 +235,6 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
return 0;
}
-static int bpf_tcp_ca_check_member(const struct btf_type *t,
- const struct btf_member *member,
- const struct bpf_prog *prog)
-{
- if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
- return -ENOTSUPP;
- return 0;
-}
-
static int bpf_tcp_ca_reg(void *kdata, struct bpf_link *link)
{
return tcp_register_congestion_control(kdata);
@@ -354,7 +329,6 @@ static struct bpf_struct_ops bpf_tcp_congestion_ops = {
.reg = bpf_tcp_ca_reg,
.unreg = bpf_tcp_ca_unreg,
.update = bpf_tcp_ca_update,
- .check_member = bpf_tcp_ca_check_member,
.init_member = bpf_tcp_ca_init_member,
.init = bpf_tcp_ca_init,
.validate = bpf_tcp_ca_validate,
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index d96f3e452fef..ab76744383cf 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -216,17 +216,27 @@ static void devinet_sysctl_unregister(struct in_device *idev)
/* Locks all the inet devices. */
-static struct in_ifaddr *inet_alloc_ifa(void)
+static struct in_ifaddr *inet_alloc_ifa(struct in_device *in_dev)
{
- return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL_ACCOUNT);
+ struct in_ifaddr *ifa;
+
+ ifa = kzalloc(sizeof(*ifa), GFP_KERNEL_ACCOUNT);
+ if (!ifa)
+ return NULL;
+
+ in_dev_hold(in_dev);
+ ifa->ifa_dev = in_dev;
+
+ INIT_HLIST_NODE(&ifa->hash);
+
+ return ifa;
}
static void inet_rcu_free_ifa(struct rcu_head *head)
{
struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
- if (ifa->ifa_dev)
- in_dev_put(ifa->ifa_dev);
+ in_dev_put(ifa->ifa_dev);
kfree(ifa);
}
@@ -574,17 +584,9 @@ static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
ASSERT_RTNL();
- if (!in_dev) {
- inet_free_ifa(ifa);
- return -ENOBUFS;
- }
ipv4_devconf_setall(in_dev);
neigh_parms_data_state_setall(in_dev->arp_parms);
- if (ifa->ifa_dev != in_dev) {
- WARN_ON(ifa->ifa_dev);
- in_dev_hold(in_dev);
- ifa->ifa_dev = in_dev;
- }
+
if (ipv4_is_loopback(ifa->ifa_local))
ifa->ifa_scope = RT_SCOPE_HOST;
return inet_insert_ifa(ifa);
@@ -701,8 +703,6 @@ errout:
return err;
}
-#define INFINITY_LIFE_TIME 0xFFFFFFFF
-
static void check_lifetime(struct work_struct *work)
{
unsigned long now, next, next_sec, next_sched;
@@ -875,7 +875,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
if (!in_dev)
goto errout;
- ifa = inet_alloc_ifa();
+ ifa = inet_alloc_ifa(in_dev);
if (!ifa)
/*
* A potential indev allocation can be left alive, it stays
@@ -885,19 +885,15 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
ipv4_devconf_setall(in_dev);
neigh_parms_data_state_setall(in_dev->arp_parms);
- in_dev_hold(in_dev);
if (!tb[IFA_ADDRESS])
tb[IFA_ADDRESS] = tb[IFA_LOCAL];
- INIT_HLIST_NODE(&ifa->hash);
ifa->ifa_prefixlen = ifm->ifa_prefixlen;
ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
ifm->ifa_flags;
ifa->ifa_scope = ifm->ifa_scope;
- ifa->ifa_dev = in_dev;
-
ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
@@ -1184,10 +1180,12 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
if (!ifa) {
ret = -ENOBUFS;
- ifa = inet_alloc_ifa();
+ if (!in_dev)
+ break;
+ ifa = inet_alloc_ifa(in_dev);
if (!ifa)
break;
- INIT_HLIST_NODE(&ifa->hash);
+
if (colon)
memcpy(ifa->ifa_label, ifr->ifr_name, IFNAMSIZ);
else
@@ -1586,16 +1584,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
if (!inetdev_valid_mtu(dev->mtu))
break;
if (dev->flags & IFF_LOOPBACK) {
- struct in_ifaddr *ifa = inet_alloc_ifa();
+ struct in_ifaddr *ifa = inet_alloc_ifa(in_dev);
if (ifa) {
- INIT_HLIST_NODE(&ifa->hash);
ifa->ifa_local =
ifa->ifa_address = htonl(INADDR_LOOPBACK);
ifa->ifa_prefixlen = 8;
ifa->ifa_mask = inet_make_mask(8);
- in_dev_hold(in_dev);
- ifa->ifa_dev = in_dev;
ifa->ifa_scope = RT_SCOPE_HOST;
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
@@ -1948,8 +1943,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
}
static size_t inet_get_link_af_size(const struct net_device *dev,
@@ -2145,8 +2139,7 @@ void inet_netconf_notify_devconf(struct net *net, int event, int type,
rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
}
static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 47378ca41904..f3281312eb5e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -115,7 +115,8 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
*/
if (req->src != req->dst)
for (sg = sg_next(req->src); sg; sg = sg_next(sg))
- skb_page_unref(sg_page(sg), skb->pp_recycle);
+ skb_page_unref(page_to_netmem(sg_page(sg)),
+ skb->pp_recycle);
}
#ifdef CONFIG_INET_ESPINTCP
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 7ad2cafb9276..793e6781399a 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -293,7 +293,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_l3mdev = l3mdev_master_ifindex_rcu(dev),
.daddr = ip_hdr(skb)->saddr,
- .flowi4_tos = ip_hdr(skb)->tos & IPTOS_RT_MASK,
+ .flowi4_tos = ip_hdr(skb)->tos & INET_DSCP_MASK,
.flowi4_scope = scope,
.flowi4_mark = vmark ? skb->mark : 0,
};
@@ -1343,7 +1343,7 @@ static void nl_fib_lookup(struct net *net, struct fib_result_nl *frn)
struct flowi4 fl4 = {
.flowi4_mark = frn->fl_mark,
.daddr = frn->fl_addr,
- .flowi4_tos = frn->fl_tos,
+ .flowi4_tos = frn->fl_tos & INET_DSCP_MASK,
.flowi4_scope = frn->fl_scope,
};
struct fib_table *tb;
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 5bdd1c016009..b07292d50ee7 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -37,6 +37,7 @@ struct fib4_rule {
u8 dst_len;
u8 src_len;
dscp_t dscp;
+ u8 dscp_full:1; /* DSCP or TOS selector */
__be32 src;
__be32 srcmask;
__be32 dst;
@@ -186,7 +187,15 @@ INDIRECT_CALLABLE_SCOPE int fib4_rule_match(struct fib_rule *rule,
((daddr ^ r->dst) & r->dstmask))
return 0;
- if (r->dscp && r->dscp != inet_dsfield_to_dscp(fl4->flowi4_tos))
+ /* When DSCP selector is used we need to match on the entire DSCP field
+ * in the flow information structure. When TOS selector is used we need
+ * to mask the upper three DSCP bits prior to matching to maintain
+ * legacy behavior.
+ */
+ if (r->dscp_full && r->dscp != inet_dsfield_to_dscp(fl4->flowi4_tos))
+ return 0;
+ else if (!r->dscp_full && r->dscp &&
+ !fib_dscp_masked_match(r->dscp, fl4))
return 0;
if (rule->ip_proto && (rule->ip_proto != fl4->flowi4_proto))
@@ -217,6 +226,20 @@ static struct fib_table *fib_empty_table(struct net *net)
return NULL;
}
+static int fib4_nl2rule_dscp(const struct nlattr *nla, struct fib4_rule *rule4,
+ struct netlink_ext_ack *extack)
+{
+ if (rule4->dscp) {
+ NL_SET_ERR_MSG(extack, "Cannot specify both TOS and DSCP");
+ return -EINVAL;
+ }
+
+ rule4->dscp = inet_dsfield_to_dscp(nla_get_u8(nla) << 2);
+ rule4->dscp_full = true;
+
+ return 0;
+}
+
static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
struct fib_rule_hdr *frh,
struct nlattr **tb,
@@ -238,6 +261,10 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
}
rule4->dscp = inet_dsfield_to_dscp(frh->tos);
+ if (tb[FRA_DSCP] &&
+ fib4_nl2rule_dscp(tb[FRA_DSCP], rule4, extack) < 0)
+ goto errout;
+
/* split local/main if they are not already split */
err = fib_unmerge(net);
if (err)
@@ -320,9 +347,19 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
if (frh->dst_len && (rule4->dst_len != frh->dst_len))
return 0;
- if (frh->tos && inet_dscp_to_dsfield(rule4->dscp) != frh->tos)
+ if (frh->tos &&
+ (rule4->dscp_full ||
+ inet_dscp_to_dsfield(rule4->dscp) != frh->tos))
return 0;
+ if (tb[FRA_DSCP]) {
+ dscp_t dscp;
+
+ dscp = inet_dsfield_to_dscp(nla_get_u8(tb[FRA_DSCP]) << 2);
+ if (!rule4->dscp_full || rule4->dscp != dscp)
+ return 0;
+ }
+
#ifdef CONFIG_IP_ROUTE_CLASSID
if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW])))
return 0;
@@ -344,7 +381,15 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
frh->dst_len = rule4->dst_len;
frh->src_len = rule4->src_len;
- frh->tos = inet_dscp_to_dsfield(rule4->dscp);
+
+ if (rule4->dscp_full) {
+ frh->tos = 0;
+ if (nla_put_u8(skb, FRA_DSCP,
+ inet_dscp_to_dsfield(rule4->dscp) >> 2))
+ goto nla_put_failure;
+ } else {
+ frh->tos = inet_dscp_to_dsfield(rule4->dscp);
+ }
if ((rule4->dst_len &&
nla_put_in_addr(skb, FRA_DST, rule4->dst)) ||
@@ -366,7 +411,8 @@ static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule)
{
return nla_total_size(4) /* dst */
+ nla_total_size(4) /* src */
- + nla_total_size(4); /* flow */
+ + nla_total_size(4) /* flow */
+ + nla_total_size(1); /* dscp */
}
static void fib4_rule_flush_cache(struct fib_rules_ops *ops)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 2b57cd2b96e2..ba2df3d2ac15 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -543,8 +543,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
info->nlh, GFP_KERNEL);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
+ rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
}
static int fib_detect_death(struct fib_info *fi, int order,
@@ -2066,8 +2065,7 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
if (fa->fa_slen != slen)
continue;
- if (fa->fa_dscp &&
- fa->fa_dscp != inet_dsfield_to_dscp(flp->flowi4_tos))
+ if (fa->fa_dscp && !fib_dscp_masked_match(fa->fa_dscp, flp))
continue;
if (fa->tb_id != tb->tb_id)
continue;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 8f30e3f00b7f..09e31757e96c 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1580,8 +1580,7 @@ found:
if (index >= (1ul << fa->fa_slen))
continue;
}
- if (fa->fa_dscp &&
- inet_dscp_to_dsfield(fa->fa_dscp) != flp->flowi4_tos)
+ if (fa->fa_dscp && !fib_dscp_masked_match(fa->fa_dscp, flp))
continue;
/* Paired with WRITE_ONCE() in fib_release_info() */
if (READ_ONCE(fi->fib_dead))
diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
index 78b869b31492..3e30745e2c09 100644
--- a/net/ipv4/fou_core.c
+++ b/net/ipv4/fou_core.c
@@ -336,11 +336,11 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
struct gro_remcsum grc;
u8 proto;
+ skb_gro_remcsum_init(&grc);
+
if (!fou)
goto out;
- skb_gro_remcsum_init(&grc);
-
off = skb_gro_offset(skb);
len = off + sizeof(*guehdr);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index ab6d0d98dbc3..e1384e7331d8 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -93,6 +93,7 @@
#include <net/ip_fib.h>
#include <net/l3mdev.h>
#include <net/addrconf.h>
+#include <net/inet_dscp.h>
#define CREATE_TRACE_POINTS
#include <trace/events/icmp.h>
@@ -220,61 +221,56 @@ static inline void icmp_xmit_unlock(struct sock *sk)
spin_unlock(&sk->sk_lock.slock);
}
-int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
-int sysctl_icmp_msgs_burst __read_mostly = 50;
-
-static struct {
- spinlock_t lock;
- u32 credit;
- u32 stamp;
-} icmp_global = {
- .lock = __SPIN_LOCK_UNLOCKED(icmp_global.lock),
-};
-
/**
* icmp_global_allow - Are we allowed to send one more ICMP message ?
+ * @net: network namespace
*
* Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
* Returns false if we reached the limit and can not send another packet.
- * Note: called with BH disabled
+ * Works in tandem with icmp_global_consume().
*/
-bool icmp_global_allow(void)
+bool icmp_global_allow(struct net *net)
{
- u32 credit, delta, incr = 0, now = (u32)jiffies;
- bool rc = false;
+ u32 delta, now, oldstamp;
+ int incr, new, old;
- /* Check if token bucket is empty and cannot be refilled
- * without taking the spinlock. The READ_ONCE() are paired
- * with the following WRITE_ONCE() in this same function.
+ /* Note: many cpus could find this condition true.
+ * Then later icmp_global_consume() could consume more credits,
+ * this is an acceptable race.
*/
- if (!READ_ONCE(icmp_global.credit)) {
- delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ);
- if (delta < HZ / 50)
- return false;
- }
+ if (atomic_read(&net->ipv4.icmp_global_credit) > 0)
+ return true;
- spin_lock(&icmp_global.lock);
- delta = min_t(u32, now - icmp_global.stamp, HZ);
- if (delta >= HZ / 50) {
- incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ;
- if (incr)
- WRITE_ONCE(icmp_global.stamp, now);
- }
- credit = min_t(u32, icmp_global.credit + incr,
- READ_ONCE(sysctl_icmp_msgs_burst));
- if (credit) {
- /* We want to use a credit of one in average, but need to randomize
- * it for security reasons.
- */
- credit = max_t(int, credit - get_random_u32_below(3), 0);
- rc = true;
+ now = jiffies;
+ oldstamp = READ_ONCE(net->ipv4.icmp_global_stamp);
+ delta = min_t(u32, now - oldstamp, HZ);
+ if (delta < HZ / 50)
+ return false;
+
+ incr = READ_ONCE(net->ipv4.sysctl_icmp_msgs_per_sec) * delta / HZ;
+ if (!incr)
+ return false;
+
+ if (cmpxchg(&net->ipv4.icmp_global_stamp, oldstamp, now) == oldstamp) {
+ old = atomic_read(&net->ipv4.icmp_global_credit);
+ do {
+ new = min(old + incr, READ_ONCE(net->ipv4.sysctl_icmp_msgs_burst));
+ } while (!atomic_try_cmpxchg(&net->ipv4.icmp_global_credit, &old, new));
}
- WRITE_ONCE(icmp_global.credit, credit);
- spin_unlock(&icmp_global.lock);
- return rc;
+ return true;
}
EXPORT_SYMBOL(icmp_global_allow);
+void icmp_global_consume(struct net *net)
+{
+ int credits = get_random_u32_below(3);
+
+ /* Note: this might make icmp_global.credit negative. */
+ if (credits)
+ atomic_sub(credits, &net->ipv4.icmp_global_credit);
+}
+EXPORT_SYMBOL(icmp_global_consume);
+
static bool icmpv4_mask_allow(struct net *net, int type, int code)
{
if (type > NR_ICMP_TYPES)
@@ -291,14 +287,16 @@ static bool icmpv4_mask_allow(struct net *net, int type, int code)
return false;
}
-static bool icmpv4_global_allow(struct net *net, int type, int code)
+static bool icmpv4_global_allow(struct net *net, int type, int code,
+ bool *apply_ratelimit)
{
if (icmpv4_mask_allow(net, type, code))
return true;
- if (icmp_global_allow())
+ if (icmp_global_allow(net)) {
+ *apply_ratelimit = true;
return true;
-
+ }
__ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL);
return false;
}
@@ -308,15 +306,16 @@ static bool icmpv4_global_allow(struct net *net, int type, int code)
*/
static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
- struct flowi4 *fl4, int type, int code)
+ struct flowi4 *fl4, int type, int code,
+ bool apply_ratelimit)
{
struct dst_entry *dst = &rt->dst;
struct inet_peer *peer;
bool rc = true;
int vif;
- if (icmpv4_mask_allow(net, type, code))
- goto out;
+ if (!apply_ratelimit)
+ return true;
/* No rate limit on loopback */
if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
@@ -331,6 +330,8 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
out:
if (!rc)
__ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST);
+ else
+ icmp_global_consume(net);
return rc;
}
@@ -402,6 +403,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
struct ipcm_cookie ipc;
struct rtable *rt = skb_rtable(skb);
struct net *net = dev_net(rt->dst.dev);
+ bool apply_ratelimit = false;
struct flowi4 fl4;
struct sock *sk;
struct inet_sock *inet;
@@ -413,11 +415,11 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb))
return;
- /* Needed by both icmp_global_allow and icmp_xmit_lock */
+ /* Needed by both icmpv4_global_allow and icmp_xmit_lock */
local_bh_disable();
- /* global icmp_msgs_per_sec */
- if (!icmpv4_global_allow(net, type, code))
+ /* is global icmp_msgs_per_sec exhausted ? */
+ if (!icmpv4_global_allow(net, type, code, &apply_ratelimit))
goto out_bh_enable;
sk = icmp_xmit_lock(net);
@@ -443,14 +445,14 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
fl4.saddr = saddr;
fl4.flowi4_mark = mark;
fl4.flowi4_uid = sock_net_uid(net, NULL);
- fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
+ fl4.flowi4_tos = ip_hdr(skb)->tos & INET_DSCP_MASK;
fl4.flowi4_proto = IPPROTO_ICMP;
fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev);
security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
goto out_unlock;
- if (icmpv4_xrlim_allow(net, rt, &fl4, type, code))
+ if (icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit))
icmp_push_reply(sk, icmp_param, &fl4, &ipc, &rt);
ip_rt_put(rt);
out_unlock:
@@ -496,7 +498,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
fl4->saddr = saddr;
fl4->flowi4_mark = mark;
fl4->flowi4_uid = sock_net_uid(net, NULL);
- fl4->flowi4_tos = RT_TOS(tos);
+ fl4->flowi4_tos = tos & INET_DSCP_MASK;
fl4->flowi4_proto = IPPROTO_ICMP;
fl4->fl4_icmp_type = type;
fl4->fl4_icmp_code = code;
@@ -545,7 +547,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
orefdst = skb_in->_skb_refdst; /* save old refdst */
skb_dst_set(skb_in, NULL);
err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
- RT_TOS(tos), rt2->dst.dev);
+ tos, rt2->dst.dev);
dst_release(&rt2->dst);
rt2 = skb_rtable(skb_in);
@@ -596,6 +598,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
int room;
struct icmp_bxm icmp_param;
struct rtable *rt = skb_rtable(skb_in);
+ bool apply_ratelimit = false;
struct ipcm_cookie ipc;
struct flowi4 fl4;
__be32 saddr;
@@ -677,7 +680,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
}
}
- /* Needed by both icmp_global_allow and icmp_xmit_lock */
+ /* Needed by both icmpv4_global_allow and icmp_xmit_lock */
local_bh_disable();
/* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
@@ -685,7 +688,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
* loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
*/
if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
- !icmpv4_global_allow(net, type, code))
+ !icmpv4_global_allow(net, type, code, &apply_ratelimit))
goto out_bh_enable;
sk = icmp_xmit_lock(net);
@@ -744,7 +747,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
goto out_unlock;
/* peer icmp_ratelimit */
- if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code))
+ if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit))
goto ende;
/* RFC says return as much as we can without exceeding 576 bytes. */
@@ -1487,6 +1490,8 @@ static int __net_init icmp_sk_init(struct net *net)
net->ipv4.sysctl_icmp_ratelimit = 1 * HZ;
net->ipv4.sysctl_icmp_ratemask = 0x1818;
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
+ net->ipv4.sysctl_icmp_msgs_per_sec = 1000;
+ net->ipv4.sysctl_icmp_msgs_burst = 50;
return 0;
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 64d07b842e73..2c5632d4fddb 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -236,7 +236,7 @@ static bool inet_bhash2_conflict(const struct sock *sk,
#define sk_for_each_bound_bhash(__sk, __tb2, __tb) \
hlist_for_each_entry(__tb2, &(__tb)->bhash2, bhash_node) \
- sk_for_each_bound(sk2, &(__tb2)->owners)
+ sk_for_each_bound((__sk), &(__tb2)->owners)
/* This should be called only when the tb and tb2 hashbuckets' locks are held */
static int inet_csk_bind_conflict(const struct sock *sk,
@@ -714,6 +714,7 @@ struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg)
out:
release_sock(sk);
if (newsk && mem_cgroup_sockets_enabled) {
+ gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
int amt = 0;
/* atomically get the memory usage, set and charge the
@@ -731,8 +732,8 @@ out:
}
if (amt)
- mem_cgroup_charge_skmem(newsk->sk_memcg, amt,
- GFP_KERNEL | __GFP_NOFAIL);
+ mem_cgroup_charge_skmem(newsk->sk_memcg, amt, gfp);
+ kmem_cache_charge(newsk, gfp);
release_sock(newsk);
}
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 9712cdb8087c..67639309163d 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -442,7 +442,7 @@ static int inet_twsk_diag_fill(struct sock *sk,
inet_diag_msg_common_fill(r, sk);
r->idiag_retrans = 0;
- r->idiag_state = tw->tw_substate;
+ r->idiag_state = READ_ONCE(tw->tw_substate);
r->idiag_timer = 3;
tmo = tw->tw_timer.expires - jiffies;
r->idiag_expires = jiffies_delta_to_msecs(tmo);
@@ -1209,7 +1209,7 @@ next_chunk:
if (num < s_num)
goto next_normal;
state = (sk->sk_state == TCP_TIME_WAIT) ?
- inet_twsk(sk)->tw_substate : sk->sk_state;
+ READ_ONCE(inet_twsk(sk)->tw_substate) : sk->sk_state;
if (!(idiag_states & (1 << state)))
goto next_normal;
if (r->sdiag_family != AF_UNSPEC &&
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 48d0d494185b..9bfcfd016e18 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -310,7 +310,7 @@ inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
return inet_lhash2_bucket(h, hash);
}
-static inline int compute_score(struct sock *sk, struct net *net,
+static inline int compute_score(struct sock *sk, const struct net *net,
const unsigned short hnum, const __be32 daddr,
const int dif, const int sdif)
{
@@ -348,7 +348,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
* Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to
* the selected sock or an error.
*/
-struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
+struct sock *inet_lookup_reuseport(const struct net *net, struct sock *sk,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned short hnum,
@@ -374,7 +374,7 @@ EXPORT_SYMBOL_GPL(inet_lookup_reuseport);
*/
/* called with rcu_read_lock() : No refcount taken on the socket */
-static struct sock *inet_lhash2_lookup(struct net *net,
+static struct sock *inet_lhash2_lookup(const struct net *net,
struct inet_listen_hashbucket *ilb2,
struct sk_buff *skb, int doff,
const __be32 saddr, __be16 sport,
@@ -401,7 +401,7 @@ static struct sock *inet_lhash2_lookup(struct net *net,
return result;
}
-struct sock *inet_lookup_run_sk_lookup(struct net *net,
+struct sock *inet_lookup_run_sk_lookup(const struct net *net,
int protocol,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
@@ -423,7 +423,7 @@ struct sock *inet_lookup_run_sk_lookup(struct net *net,
return sk;
}
-struct sock *__inet_lookup_listener(struct net *net,
+struct sock *__inet_lookup_listener(const struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, __be16 sport,
@@ -488,7 +488,7 @@ void sock_edemux(struct sk_buff *skb)
}
EXPORT_SYMBOL(sock_edemux);
-struct sock *__inet_lookup_established(struct net *net,
+struct sock *__inet_lookup_established(const struct net *net,
struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 hnum,
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index ba205473522e..5f6fd382af38 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -44,6 +44,7 @@
#include <net/gre.h>
#include <net/dst_metadata.h>
#include <net/erspan.h>
+#include <net/inet_dscp.h>
/*
Problems & solutions
@@ -930,7 +931,7 @@ static int ipgre_open(struct net_device *dev)
t->parms.iph.daddr,
t->parms.iph.saddr,
t->parms.o_key,
- RT_TOS(t->parms.iph.tos),
+ t->parms.iph.tos & INET_DSCP_MASK,
t->parms.link);
if (IS_ERR(rt))
return -EADDRNOTAVAIL;
@@ -996,7 +997,7 @@ static void __gre_tunnel_init(struct net_device *dev)
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
- dev->features |= GRE_FEATURES | NETIF_F_LLTX;
+ dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES;
/* TCP offload with GRE SEQ is not supported, nor can we support 2
@@ -1010,6 +1011,8 @@ static void __gre_tunnel_init(struct net_device *dev)
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+
+ dev->lltx = true;
}
static int ipgre_tunnel_init(struct net_device *dev)
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index d6fbcbd2358a..b6e7d4921309 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -596,9 +596,8 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
{
struct sk_buff *skb, *next, *hint = NULL;
struct dst_entry *curr_dst = NULL;
- struct list_head sublist;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct net_device *dev = skb->dev;
struct dst_entry *dst;
@@ -646,9 +645,8 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
struct net_device *curr_dev = NULL;
struct net *curr_net = NULL;
struct sk_buff *skb, *next;
- struct list_head sublist;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index b90d0f78ac80..49811c9281d4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -77,6 +77,7 @@
#include <net/inetpeer.h>
#include <net/inet_ecn.h>
#include <net/lwtunnel.h>
+#include <net/inet_dscp.h>
#include <linux/bpf-cgroup.h>
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
@@ -493,7 +494,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
inet->inet_dport,
inet->inet_sport,
sk->sk_protocol,
- RT_TOS(tos),
+ tos & INET_DSCP_MASK,
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
@@ -1621,7 +1622,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
flowi4_init_output(&fl4, oif,
IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
- RT_TOS(arg->tos),
+ arg->tos & INET_DSCP_MASK,
RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
ip_reply_arg_flowi_flags(arg),
daddr, saddr,
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 5cffad42fe8c..d591c73e2c0e 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -43,6 +43,7 @@
#include <net/rtnetlink.h>
#include <net/udp.h>
#include <net/dst_metadata.h>
+#include <net/inet_dscp.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -293,7 +294,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
iph->saddr, tunnel->parms.o_key,
- RT_TOS(iph->tos), dev_net(dev),
+ iph->tos & INET_DSCP_MASK, dev_net(dev),
tunnel->parms.link, tunnel->fwmark, 0, 0);
rt = ip_route_output_key(tunnel->net, &fl4);
@@ -609,9 +610,9 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
}
ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
- tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
- dev_net(dev), 0, skb->mark, skb_get_hash(skb),
- key->flow_flags);
+ tunnel_id_to_key32(key->tun_id),
+ tos & INET_DSCP_MASK, dev_net(dev), 0, skb->mark,
+ skb_get_hash(skb), key->flow_flags);
if (!tunnel_hlen)
tunnel_hlen = ip_encap_hlen(&tun_info->encap);
@@ -772,7 +773,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
- tunnel->parms.o_key, RT_TOS(tos),
+ tunnel->parms.o_key, tos & INET_DSCP_MASK,
dev_net(dev), READ_ONCE(tunnel->parms.link),
tunnel->fwmark, skb_get_hash(skb), 0);
@@ -1161,7 +1162,7 @@ int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
* Allowing to move it to another netns is clearly unsafe.
*/
if (!IS_ERR(itn->fb_tunnel_dev)) {
- itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+ itn->fb_tunnel_dev->netns_local = true;
itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
itn->type = itn->fb_tunnel_dev->type;
@@ -1326,7 +1327,7 @@ int ip_tunnel_init(struct net_device *dev)
tunnel->dev = dev;
tunnel->net = dev_net(dev);
- strcpy(tunnel->parms.name, dev->name);
+ strscpy(tunnel->parms.name, dev->name);
iph->version = 4;
iph->ihl = 5;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 14536da9f5dc..f0b4419cef34 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -443,7 +443,7 @@ static int vti_tunnel_init(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->addr_len = 4;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
netif_keep_dst(dev);
return ip_tunnel_init(dev);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 923a2ef68c2f..dc0db5895e0e 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -378,7 +378,7 @@ static void ipip_tunnel_setup(struct net_device *dev)
dev->type = ARPHRD_TUNNEL;
dev->flags = IFF_NOARP;
dev->addr_len = 4;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
netif_keep_dst(dev);
dev->features |= IPIP_FEATURES;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 6c750bd13dd8..089864c6a35e 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -62,6 +62,7 @@
#include <net/fib_rules.h>
#include <linux/netconf.h>
#include <net/rtnh.h>
+#include <net/inet_dscp.h>
#include <linux/nospec.h>
@@ -536,7 +537,7 @@ static void reg_vif_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->netdev_ops = &reg_vif_netdev_ops;
dev->needs_free_netdev = true;
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netns_local = true;
}
static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
@@ -1868,7 +1869,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
vif->remote, vif->local,
0, 0,
IPPROTO_IPIP,
- RT_TOS(iph->tos), vif->link);
+ iph->tos & INET_DSCP_MASK, vif->link);
if (IS_ERR(rt))
goto out_free;
encap = sizeof(struct iphdr);
@@ -1876,7 +1877,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
0, 0,
IPPROTO_IPIP,
- RT_TOS(iph->tos), vif->link);
+ iph->tos & INET_DSCP_MASK, vif->link);
if (IS_ERR(rt))
goto out_free;
}
@@ -2080,7 +2081,7 @@ static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
struct flowi4 fl4 = {
.daddr = iph->daddr,
.saddr = iph->saddr,
- .flowi4_tos = RT_TOS(iph->tos),
+ .flowi4_tos = iph->tos & INET_DSCP_MASK,
.flowi4_oif = (rt_is_output_route(rt) ?
skb->dev->ifindex : 0),
.flowi4_iif = (rt_is_output_route(rt) ?
@@ -2406,8 +2407,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
errout:
kfree_skb(skb);
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
}
static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 591a2737808e..e0aab66cd925 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -14,6 +14,7 @@
#include <net/route.h>
#include <net/xfrm.h>
#include <net/ip.h>
+#include <net/inet_dscp.h>
#include <net/netfilter/nf_queue.h>
/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
@@ -43,7 +44,7 @@ int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, un
*/
fl4.daddr = iph->daddr;
fl4.saddr = saddr;
- fl4.flowi4_tos = RT_TOS(iph->tos);
+ fl4.flowi4_tos = iph->tos & INET_DSCP_MASK;
fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0;
fl4.flowi4_l3mdev = l3mdev_master_ifindex(dev);
fl4.flowi4_mark = skb->mark;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 14365b20f1c5..1cdd9c28ab2d 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -826,7 +826,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
sizeof(info.underflow));
info.num_entries = private->number;
info.size = private->size;
- strcpy(info.name, name);
+ strscpy(info.name, name);
if (copy_to_user(user, &info, *len) != 0)
ret = -EFAULT;
@@ -1547,7 +1547,7 @@ int arpt_register_table(struct net *net,
goto out_free;
}
- ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
+ ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
if (!ops) {
ret = -ENOMEM;
goto out_free;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index fe89a056eb06..3d101613f27f 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -981,7 +981,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
sizeof(info.underflow));
info.num_entries = private->number;
info.size = private->size;
- strcpy(info.name, name);
+ strscpy(info.name, name);
if (copy_to_user(user, &info, *len) != 0)
ret = -EFAULT;
@@ -1767,7 +1767,7 @@ int ipt_register_table(struct net *net, const struct xt_table *table,
goto out_free;
}
- ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
+ ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
if (!ops) {
ret = -ENOMEM;
goto out_free;
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index ded5bef02f77..1ce7a1655b97 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <net/inet_dscp.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <net/ip_fib.h>
@@ -75,7 +76,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
flow.daddr = iph->saddr;
flow.saddr = rpfilter_get_saddr(iph->daddr);
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
- flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
+ flow.flowi4_tos = iph->tos & INET_DSCP_MASK;
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
flow.flowi4_l3mdev = l3mdev_master_ifindex_rcu(xt_in(par));
flow.flowi4_uid = sock_net_uid(xt_net(par), NULL);
diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c
index 6cc5743c553a..f4aed0789d69 100644
--- a/net/ipv4/netfilter/nf_dup_ipv4.c
+++ b/net/ipv4/netfilter/nf_dup_ipv4.c
@@ -15,6 +15,7 @@
#include <net/icmp.h>
#include <net/ip.h>
#include <net/route.h>
+#include <net/inet_dscp.h>
#include <net/netfilter/ipv4/nf_dup_ipv4.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
@@ -32,7 +33,7 @@ static bool nf_dup_ipv4_route(struct net *net, struct sk_buff *skb,
fl4.flowi4_oif = oif;
fl4.daddr = gw->s_addr;
- fl4.flowi4_tos = RT_TOS(iph->tos);
+ fl4.flowi4_tos = iph->tos & INET_DSCP_MASK;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
rt = ip_route_output_key(net, &fl4);
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
index a522c3a3be52..ef5dd88107dd 100644
--- a/net/ipv4/netfilter/nft_dup_ipv4.c
+++ b/net/ipv4/netfilter/nft_dup_ipv4.c
@@ -40,13 +40,13 @@ static int nft_dup_ipv4_init(const struct nft_ctx *ctx,
if (tb[NFTA_DUP_SREG_ADDR] == NULL)
return -EINVAL;
- err = nft_parse_register_load(tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
+ err = nft_parse_register_load(ctx, tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
sizeof(struct in_addr));
if (err < 0)
return err;
if (tb[NFTA_DUP_SREG_DEV])
- err = nft_parse_register_load(tb[NFTA_DUP_SREG_DEV],
+ err = nft_parse_register_load(ctx, tb[NFTA_DUP_SREG_DEV],
&priv->sreg_dev, sizeof(int));
return err;
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index 9eee535c64dd..00da1332bbf1 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -10,6 +10,7 @@
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_fib.h>
+#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <net/route.h>
@@ -22,8 +23,6 @@ static __be32 get_saddr(__be32 addr)
return addr;
}
-#define DSCP_BITS 0xfc
-
void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
@@ -110,7 +109,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
if (priv->flags & NFTA_FIB_F_MARK)
fl4.flowi4_mark = pkt->skb->mark;
- fl4.flowi4_tos = iph->tos & DSCP_BITS;
+ fl4.flowi4_tos = iph->tos & INET_DSCP_MASK;
if (priv->flags & NFTA_FIB_F_DADDR) {
fl4.daddr = iph->daddr;
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 6b9787ee8601..93aaea0006ba 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -865,15 +865,18 @@ out:
}
static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
- u32 op_flags)
+ u32 op_flags, u32 *resp_op_flags)
{
struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
struct nexthop_grp *p;
size_t len = nhg->num_nh * sizeof(*p);
struct nlattr *nla;
u16 group_type = 0;
+ u16 weight;
int i;
+ *resp_op_flags |= NHA_OP_FLAG_RESP_GRP_RESVD_0;
+
if (nhg->hash_threshold)
group_type = NEXTHOP_GRP_TYPE_MPATH;
else if (nhg->resilient)
@@ -888,9 +891,12 @@ static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
p = nla_data(nla);
for (i = 0; i < nhg->num_nh; ++i) {
+ weight = nhg->nh_entries[i].weight - 1;
+
*p++ = (struct nexthop_grp) {
.id = nhg->nh_entries[i].nh->id,
- .weight = nhg->nh_entries[i].weight - 1,
+ .weight = weight,
+ .weight_high = weight >> 8,
};
}
@@ -934,10 +940,12 @@ static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
if (nh->is_group) {
struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
+ u32 resp_op_flags = 0;
if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
goto nla_put_failure;
- if (nla_put_nh_group(skb, nh, op_flags))
+ if (nla_put_nh_group(skb, nh, op_flags, &resp_op_flags) ||
+ nla_put_u32(skb, NHA_OP_FLAGS, resp_op_flags))
goto nla_put_failure;
goto out;
}
@@ -1050,7 +1058,9 @@ static size_t nh_nlmsg_size(struct nexthop *nh)
sz += nla_total_size(4); /* NHA_ID */
if (nh->is_group)
- sz += nh_nlmsg_size_grp(nh);
+ sz += nh_nlmsg_size_grp(nh) +
+ nla_total_size(4) + /* NHA_OP_FLAGS */
+ 0;
else
sz += nh_nlmsg_size_single(nh);
@@ -1080,8 +1090,7 @@ static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
info->nlh, gfp_any());
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
+ rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
}
static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
@@ -1201,8 +1210,7 @@ static void nexthop_bucket_notify(struct nh_res_table *res_table,
rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
+ rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
}
static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
@@ -1280,11 +1288,14 @@ static int nh_check_attr_group(struct net *net,
nhg = nla_data(tb[NHA_GROUP]);
for (i = 0; i < len; ++i) {
- if (nhg[i].resvd1 || nhg[i].resvd2) {
- NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0");
+ if (nhg[i].resvd2) {
+ NL_SET_ERR_MSG(extack, "Reserved field in nexthop_grp must be 0");
return -EINVAL;
}
- if (nhg[i].weight > 254) {
+ if (nexthop_grp_weight(&nhg[i]) == 0) {
+ /* 0xffff got passed in, representing weight of 0x10000,
+ * which is too heavy.
+ */
NL_SET_ERR_MSG(extack, "Invalid value for weight");
return -EINVAL;
}
@@ -1880,9 +1891,9 @@ static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
static void nh_res_group_rebalance(struct nh_group *nhg,
struct nh_res_table *res_table)
{
- int prev_upper_bound = 0;
- int total = 0;
- int w = 0;
+ u16 prev_upper_bound = 0;
+ u32 total = 0;
+ u32 w = 0;
int i;
INIT_LIST_HEAD(&res_table->uw_nh_entries);
@@ -1892,11 +1903,12 @@ static void nh_res_group_rebalance(struct nh_group *nhg,
for (i = 0; i < nhg->num_nh; ++i) {
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
- int upper_bound;
+ u16 upper_bound;
+ u64 btw;
w += nhge->weight;
- upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w,
- total);
+ btw = ((u64)res_table->num_nh_buckets) * w;
+ upper_bound = DIV_ROUND_CLOSEST_ULL(btw, total);
nhge->res.wants_buckets = upper_bound - prev_upper_bound;
prev_upper_bound = upper_bound;
@@ -1962,8 +1974,8 @@ static void replace_nexthop_grp_res(struct nh_group *oldg,
static void nh_hthr_group_rebalance(struct nh_group *nhg)
{
- int total = 0;
- int w = 0;
+ u32 total = 0;
+ u32 w = 0;
int i;
for (i = 0; i < nhg->num_nh; ++i)
@@ -1971,7 +1983,7 @@ static void nh_hthr_group_rebalance(struct nh_group *nhg)
for (i = 0; i < nhg->num_nh; ++i) {
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
- int upper_bound;
+ u32 upper_bound;
w += nhge->weight;
upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
@@ -2713,7 +2725,8 @@ static struct nexthop *nexthop_create_group(struct net *net,
goto out_no_nh;
}
nhg->nh_entries[i].nh = nhe;
- nhg->nh_entries[i].weight = entry[i].weight + 1;
+ nhg->nh_entries[i].weight = nexthop_grp_weight(&entry[i]);
+
list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
nhg->nh_entries[i].nh_parent = nh;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 13c0f1d455f3..723ac9181558 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -512,7 +512,7 @@ static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
sk->sk_protocol;
}
- flowi4_init_output(fl4, oif, mark, tos & IPTOS_RT_MASK, scope,
+ flowi4_init_output(fl4, oif, mark, tos & INET_DSCP_MASK, scope,
prot, flow_flags, iph->daddr, iph->saddr, 0, 0,
sock_net_uid(net, sk));
}
@@ -541,7 +541,7 @@ static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark),
- ip_sock_rt_tos(sk) & IPTOS_RT_MASK,
+ ip_sock_rt_tos(sk),
ip_sock_rt_scope(sk),
inet_test_bit(HDRINCL, sk) ?
IPPROTO_RAW : sk->sk_protocol,
@@ -1263,7 +1263,7 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
struct flowi4 fl4 = {
.daddr = iph->daddr,
.saddr = iph->saddr,
- .flowi4_tos = iph->tos & IPTOS_RT_MASK,
+ .flowi4_tos = iph->tos & INET_DSCP_MASK,
.flowi4_oif = rt->dst.dev->ifindex,
.flowi4_iif = skb->dev->ifindex,
.flowi4_mark = skb->mark,
@@ -2160,7 +2160,7 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (rt->rt_type != RTN_LOCAL)
goto skip_validate_source;
- tos &= IPTOS_RT_MASK;
+ tos &= INET_DSCP_MASK;
err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
if (err < 0)
goto martian_source;
@@ -2470,7 +2470,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
struct fib_result res;
int err;
- tos &= IPTOS_RT_MASK;
+ tos &= INET_DSCP_MASK;
rcu_read_lock();
err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
rcu_read_unlock();
@@ -2618,7 +2618,7 @@ struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
struct rtable *rth;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
- fl4->flowi4_tos &= IPTOS_RT_MASK;
+ fl4->flowi4_tos &= INET_DSCP_MASK;
rcu_read_lock();
rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
@@ -3261,7 +3261,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
fl4.daddr = dst;
fl4.saddr = src;
- fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
+ fl4.flowi4_tos = rtm->rtm_tos & INET_DSCP_MASK;
fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
fl4.flowi4_mark = mark;
fl4.flowi4_uid = uid;
@@ -3286,7 +3286,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
skb->dev = dev;
skb->mark = mark;
err = ip_route_input_rcu(skb, dst, src,
- rtm->rtm_tos & IPTOS_RT_MASK, dev,
+ rtm->rtm_tos & INET_DSCP_MASK, dev,
&res);
rt = skb_rtable(skb);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 4af0c234d8d7..a79b2a52ce01 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -601,22 +601,6 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_tcp_available_ulp,
},
{
- .procname = "icmp_msgs_per_sec",
- .data = &sysctl_icmp_msgs_per_sec,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- },
- {
- .procname = "icmp_msgs_burst",
- .data = &sysctl_icmp_msgs_burst,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- },
- {
.procname = "udp_mem",
.data = &sysctl_udp_mem,
.maxlen = sizeof(sysctl_udp_mem),
@@ -702,6 +686,22 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "icmp_msgs_per_sec",
+ .data = &init_net.ipv4.sysctl_icmp_msgs_per_sec,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
+ {
+ .procname = "icmp_msgs_burst",
+ .data = &init_net.ipv4.sysctl_icmp_msgs_burst,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
+ {
.procname = "ping_group_range",
.data = &init_net.ipv4.ping_group_range.range,
.maxlen = sizeof(gid_t)*2,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 831a18dc7aa6..4f77bd862e95 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -285,6 +285,8 @@
#include <trace/events/tcp.h>
#include <net/rps.h>
+#include "../core/devmem.h"
+
/* Track pending CMSGs. */
enum {
TCP_CMSG_INQ = 1,
@@ -471,6 +473,7 @@ void tcp_init_sock(struct sock *sk)
set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
sk_sockets_allocated_inc(sk);
+ xa_init_flags(&sk->sk_user_frags, XA_FLAGS_ALLOC1);
}
EXPORT_SYMBOL(tcp_init_sock);
@@ -2160,6 +2163,9 @@ static int tcp_zerocopy_receive(struct sock *sk,
skb = tcp_recv_skb(sk, seq, &offset);
}
+ if (!skb_frags_readable(skb))
+ break;
+
if (TCP_SKB_CB(skb)->has_rxtstamp) {
tcp_update_recv_tstamps(skb, tss);
zc->msg_flags |= TCP_CMSG_TS;
@@ -2177,6 +2183,9 @@ static int tcp_zerocopy_receive(struct sock *sk,
break;
}
page = skb_frag_page(frags);
+ if (WARN_ON_ONCE(!page))
+ break;
+
prefetchw(page);
pages[pages_to_map++] = page;
length += PAGE_SIZE;
@@ -2235,6 +2244,7 @@ void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
struct scm_timestamping_internal *tss)
{
int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
bool has_timestamping = false;
if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) {
@@ -2274,14 +2284,18 @@ void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
}
}
- if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_SOFTWARE)
+ if (tsflags & SOF_TIMESTAMPING_SOFTWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))
has_timestamping = true;
else
tss->ts[0] = (struct timespec64) {0};
}
if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
- if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_RAW_HARDWARE)
+ if (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_HARDWARE ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))
has_timestamping = true;
else
tss->ts[2] = (struct timespec64) {0};
@@ -2317,6 +2331,220 @@ static int tcp_inq_hint(struct sock *sk)
return inq;
}
+/* batch __xa_alloc() calls and reduce xa_lock()/xa_unlock() overhead. */
+struct tcp_xa_pool {
+ u8 max; /* max <= MAX_SKB_FRAGS */
+ u8 idx; /* idx <= max */
+ __u32 tokens[MAX_SKB_FRAGS];
+ netmem_ref netmems[MAX_SKB_FRAGS];
+};
+
+static void tcp_xa_pool_commit_locked(struct sock *sk, struct tcp_xa_pool *p)
+{
+ int i;
+
+ /* Commit part that has been copied to user space. */
+ for (i = 0; i < p->idx; i++)
+ __xa_cmpxchg(&sk->sk_user_frags, p->tokens[i], XA_ZERO_ENTRY,
+ (__force void *)p->netmems[i], GFP_KERNEL);
+ /* Rollback what has been pre-allocated and is no longer needed. */
+ for (; i < p->max; i++)
+ __xa_erase(&sk->sk_user_frags, p->tokens[i]);
+
+ p->max = 0;
+ p->idx = 0;
+}
+
+static void tcp_xa_pool_commit(struct sock *sk, struct tcp_xa_pool *p)
+{
+ if (!p->max)
+ return;
+
+ xa_lock_bh(&sk->sk_user_frags);
+
+ tcp_xa_pool_commit_locked(sk, p);
+
+ xa_unlock_bh(&sk->sk_user_frags);
+}
+
+static int tcp_xa_pool_refill(struct sock *sk, struct tcp_xa_pool *p,
+ unsigned int max_frags)
+{
+ int err, k;
+
+ if (p->idx < p->max)
+ return 0;
+
+ xa_lock_bh(&sk->sk_user_frags);
+
+ tcp_xa_pool_commit_locked(sk, p);
+
+ for (k = 0; k < max_frags; k++) {
+ err = __xa_alloc(&sk->sk_user_frags, &p->tokens[k],
+ XA_ZERO_ENTRY, xa_limit_31b, GFP_KERNEL);
+ if (err)
+ break;
+ }
+
+ xa_unlock_bh(&sk->sk_user_frags);
+
+ p->max = k;
+ p->idx = 0;
+ return k ? 0 : err;
+}
+
+/* On error, returns the -errno. On success, returns number of bytes sent to the
+ * user. May not consume all of @remaining_len.
+ */
+static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
+ unsigned int offset, struct msghdr *msg,
+ int remaining_len)
+{
+ struct dmabuf_cmsg dmabuf_cmsg = { 0 };
+ struct tcp_xa_pool tcp_xa_pool;
+ unsigned int start;
+ int i, copy, n;
+ int sent = 0;
+ int err = 0;
+
+ tcp_xa_pool.max = 0;
+ tcp_xa_pool.idx = 0;
+ do {
+ start = skb_headlen(skb);
+
+ if (skb_frags_readable(skb)) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* Copy header. */
+ copy = start - offset;
+ if (copy > 0) {
+ copy = min(copy, remaining_len);
+
+ n = copy_to_iter(skb->data + offset, copy,
+ &msg->msg_iter);
+ if (n != copy) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ offset += copy;
+ remaining_len -= copy;
+
+ /* First a dmabuf_cmsg for # bytes copied to user
+ * buffer.
+ */
+ memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg));
+ dmabuf_cmsg.frag_size = copy;
+ err = put_cmsg(msg, SOL_SOCKET, SO_DEVMEM_LINEAR,
+ sizeof(dmabuf_cmsg), &dmabuf_cmsg);
+ if (err || msg->msg_flags & MSG_CTRUNC) {
+ msg->msg_flags &= ~MSG_CTRUNC;
+ if (!err)
+ err = -ETOOSMALL;
+ goto out;
+ }
+
+ sent += copy;
+
+ if (remaining_len == 0)
+ goto out;
+ }
+
+ /* after that, send information of dmabuf pages through a
+ * sequence of cmsg
+ */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct net_iov *niov;
+ u64 frag_offset;
+ int end;
+
+ /* !skb_frags_readable() should indicate that ALL the
+ * frags in this skb are dmabuf net_iovs. We're checking
+ * for that flag above, but also check individual frags
+ * here. If the tcp stack is not setting
+ * skb_frags_readable() correctly, we still don't want
+ * to crash here.
+ */
+ if (!skb_frag_net_iov(frag)) {
+ net_err_ratelimited("Found non-dmabuf skb with net_iov");
+ err = -ENODEV;
+ goto out;
+ }
+
+ niov = skb_frag_net_iov(frag);
+ end = start + skb_frag_size(frag);
+ copy = end - offset;
+
+ if (copy > 0) {
+ copy = min(copy, remaining_len);
+
+ frag_offset = net_iov_virtual_addr(niov) +
+ skb_frag_off(frag) + offset -
+ start;
+ dmabuf_cmsg.frag_offset = frag_offset;
+ dmabuf_cmsg.frag_size = copy;
+ err = tcp_xa_pool_refill(sk, &tcp_xa_pool,
+ skb_shinfo(skb)->nr_frags - i);
+ if (err)
+ goto out;
+
+ /* Will perform the exchange later */
+ dmabuf_cmsg.frag_token = tcp_xa_pool.tokens[tcp_xa_pool.idx];
+ dmabuf_cmsg.dmabuf_id = net_iov_binding_id(niov);
+
+ offset += copy;
+ remaining_len -= copy;
+
+ err = put_cmsg(msg, SOL_SOCKET,
+ SO_DEVMEM_DMABUF,
+ sizeof(dmabuf_cmsg),
+ &dmabuf_cmsg);
+ if (err || msg->msg_flags & MSG_CTRUNC) {
+ msg->msg_flags &= ~MSG_CTRUNC;
+ if (!err)
+ err = -ETOOSMALL;
+ goto out;
+ }
+
+ atomic_long_inc(&niov->pp_ref_count);
+ tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag);
+
+ sent += copy;
+
+ if (remaining_len == 0)
+ goto out;
+ }
+ start = end;
+ }
+
+ tcp_xa_pool_commit(sk, &tcp_xa_pool);
+ if (!remaining_len)
+ goto out;
+
+ /* if remaining_len is not satisfied yet, we need to go to the
+ * next frag in the frag_list to satisfy remaining_len.
+ */
+ skb = skb_shinfo(skb)->frag_list ?: skb->next;
+
+ offset = offset - start;
+ } while (skb);
+
+ if (remaining_len) {
+ err = -EFAULT;
+ goto out;
+ }
+
+out:
+ tcp_xa_pool_commit(sk, &tcp_xa_pool);
+ if (!sent)
+ sent = err;
+
+ return sent;
+}
+
/*
* This routine copies from a sock struct into the user buffer.
*
@@ -2330,6 +2558,7 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
int *cmsg_flags)
{
struct tcp_sock *tp = tcp_sk(sk);
+ int last_copied_dmabuf = -1; /* uninitialized */
int copied = 0;
u32 peek_seq;
u32 *seq;
@@ -2509,15 +2738,44 @@ found_ok_skb:
}
if (!(flags & MSG_TRUNC)) {
- err = skb_copy_datagram_msg(skb, offset, msg, used);
- if (err) {
- /* Exception. Bailout! */
- if (!copied)
- copied = -EFAULT;
+ if (last_copied_dmabuf != -1 &&
+ last_copied_dmabuf != !skb_frags_readable(skb))
break;
+
+ if (skb_frags_readable(skb)) {
+ err = skb_copy_datagram_msg(skb, offset, msg,
+ used);
+ if (err) {
+ /* Exception. Bailout! */
+ if (!copied)
+ copied = -EFAULT;
+ break;
+ }
+ } else {
+ if (!(flags & MSG_SOCK_DEVMEM)) {
+ /* dmabuf skbs can only be received
+ * with the MSG_SOCK_DEVMEM flag.
+ */
+ if (!copied)
+ copied = -EFAULT;
+
+ break;
+ }
+
+ err = tcp_recvmsg_dmabuf(sk, skb, offset, msg,
+ used);
+ if (err <= 0) {
+ if (!copied)
+ copied = -EFAULT;
+
+ break;
+ }
+ used = err;
}
}
+ last_copied_dmabuf = !skb_frags_readable(skb);
+
WRITE_ONCE(*seq, *seq + used);
copied += used;
len -= used;
@@ -2833,7 +3091,7 @@ void __tcp_close(struct sock *sk, long timeout)
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, sk->sk_allocation,
- SK_RST_REASON_NOT_SPECIFIED);
+ SK_RST_REASON_TCP_ABORT_ON_CLOSE);
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
/* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0);
@@ -2908,7 +3166,7 @@ adjudge_to_death:
if (READ_ONCE(tp->linger2) < 0) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC,
- SK_RST_REASON_NOT_SPECIFIED);
+ SK_RST_REASON_TCP_ABORT_ON_LINGER);
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPABORTONLINGER);
} else {
@@ -2927,7 +3185,7 @@ adjudge_to_death:
if (tcp_check_oom(sk, 0)) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC,
- SK_RST_REASON_NOT_SPECIFIED);
+ SK_RST_REASON_TCP_ABORT_ON_MEMORY);
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPABORTONMEMORY);
} else if (!check_net(sock_net(sk))) {
@@ -3025,13 +3283,16 @@ int tcp_disconnect(struct sock *sk, int flags)
inet_csk_listen_stop(sk);
} else if (unlikely(tp->repair)) {
WRITE_ONCE(sk->sk_err, ECONNABORTED);
- } else if (tcp_need_reset(old_state) ||
- (tp->snd_nxt != tp->write_seq &&
- (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
+ } else if (tcp_need_reset(old_state)) {
+ tcp_send_active_reset(sk, gfp_any(), SK_RST_REASON_TCP_STATE);
+ WRITE_ONCE(sk->sk_err, ECONNRESET);
+ } else if (tp->snd_nxt != tp->write_seq &&
+ (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) {
/* The last check adjusts for discrepancy of Linux wrt. RFC
* states
*/
- tcp_send_active_reset(sk, gfp_any(), SK_RST_REASON_NOT_SPECIFIED);
+ tcp_send_active_reset(sk, gfp_any(),
+ SK_RST_REASON_TCP_DISCONNECT_WITH_DATA);
WRITE_ONCE(sk->sk_err, ECONNRESET);
} else if (old_state == TCP_SYN_SENT)
WRITE_ONCE(sk->sk_err, ECONNRESET);
@@ -4655,7 +4916,7 @@ int tcp_abort(struct sock *sk, int err)
if (tcp_need_reset(sk->sk_state))
tcp_send_active_reset(sk, GFP_ATOMIC,
- SK_RST_REASON_NOT_SPECIFIED);
+ SK_RST_REASON_TCP_STATE);
tcp_done_with_error(sk, err);
bh_unlock_sock(sk);
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index fe6178715ba0..e7658c5d6b79 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -30,7 +30,7 @@ void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
}
static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
- struct sk_msg *msg, u32 apply_bytes, int flags)
+ struct sk_msg *msg, u32 apply_bytes)
{
bool apply = apply_bytes;
struct scatterlist *sge;
@@ -167,7 +167,7 @@ int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
if (unlikely(!psock))
return -EPIPE;
- ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) :
+ ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes) :
tcp_bpf_push_locked(sk, msg, bytes, flags, false);
sk_psock_put(sk, psock);
return ret;
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 52b1f2665dfa..81b96331b2bb 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -185,7 +185,7 @@ static inline void htcp_alpha_update(struct htcp *ca)
u32 scale = (HZ << 3) / (10 * minRTT);
/* clamping ratio to interval [0.5,10]<<3 */
- scale = min(max(scale, 1U << 2), 10U << 3);
+ scale = clamp(scale, 1U << 2, 10U << 3);
factor = (factor << 3) / scale;
if (!factor)
factor = 1;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e37488d3453f..9f314dfa1490 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5391,6 +5391,9 @@ restart:
for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
n = tcp_skb_next(skb, list);
+ if (!skb_frags_readable(skb))
+ goto skip_this;
+
/* No new bits? It is possible on ofo queue. */
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
skb = tcp_collapse_one(sk, skb, list, root);
@@ -5411,17 +5414,20 @@ restart:
break;
}
- if (n && n != tail && tcp_skb_can_collapse_rx(skb, n) &&
+ if (n && n != tail && skb_frags_readable(n) &&
+ tcp_skb_can_collapse_rx(skb, n) &&
TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
end_of_skbs = false;
break;
}
+skip_this:
/* Decided to skip this, advance start seq. */
start = TCP_SKB_CB(skb)->end_seq;
}
if (end_of_skbs ||
- (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
+ (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
+ !skb_frags_readable(skb))
return;
__skb_queue_head_init(&tmp);
@@ -5463,7 +5469,8 @@ restart:
if (!skb ||
skb == tail ||
!tcp_skb_can_collapse_rx(nskb, skb) ||
- (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
+ (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
+ !skb_frags_readable(skb))
goto end;
}
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a4e510846905..5afe5e57c89b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -79,6 +79,7 @@
#include <linux/seq_file.h>
#include <linux/inetdevice.h>
#include <linux/btf_ids.h>
+#include <linux/skbuff_ref.h>
#include <crypto/hash.h>
#include <linux/scatterlist.h>
@@ -120,6 +121,9 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
struct tcp_sock *tp = tcp_sk(sk);
int ts_recent_stamp;
+ if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2)
+ reuse = 0;
+
if (reuse == 2) {
/* Still does not detect *everything* that goes through
* lo, since we require a loopback src or dst address
@@ -1070,7 +1074,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
}
tcp_v4_send_ack(sk, skb,
- tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ tcptw->tw_snd_nxt, READ_ONCE(tcptw->tw_rcv_nxt),
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_tw_tsval(tcptw),
READ_ONCE(tcptw->tw_ts_recent),
@@ -2509,10 +2513,25 @@ static void tcp_md5sig_info_free_rcu(struct rcu_head *head)
}
#endif
+static void tcp_release_user_frags(struct sock *sk)
+{
+#ifdef CONFIG_PAGE_POOL
+ unsigned long index;
+ void *netmem;
+
+ xa_for_each(&sk->sk_user_frags, index, netmem)
+ WARN_ON_ONCE(!napi_pp_put_page((__force netmem_ref)netmem));
+#endif
+}
+
void tcp_v4_destroy_sock(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
+ tcp_release_user_frags(sk);
+
+ xa_destroy(&sk->sk_user_frags);
+
trace_tcp_destroy_sock(sk);
tcp_clear_xmit_timers(sk);
@@ -2945,7 +2964,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
- i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
+ i, src, srcp, dest, destp, READ_ONCE(tw->tw_substate), 0, 0,
3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
refcount_read(&tw->tw_refcnt), tw);
}
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index b01eb6d94413..95669935494e 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -617,9 +617,13 @@ static struct genl_family tcp_metrics_nl_family;
static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
[TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
- [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
- .len = sizeof(struct in6_addr), },
+ [TCP_METRICS_ATTR_ADDR_IPV6] =
+ NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
+
[TCP_METRICS_ATTR_SADDR_IPV4] = { .type = NLA_U32, },
+ [TCP_METRICS_ATTR_SADDR_IPV6] =
+ NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
+
/* Following attributes are not received for GET/DEL,
* we keep them for reference
*/
@@ -811,8 +815,6 @@ static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
if (a) {
struct in6_addr in6;
- if (nla_len(a) != sizeof(struct in6_addr))
- return -EINVAL;
in6 = nla_get_in6_addr(a);
inetpeer_set_addr_v6(addr, &in6);
if (hash)
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index a19a9dbd3409..bb1fe1ba867a 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -52,16 +52,17 @@ tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
return TCP_TW_SUCCESS;
}
-static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq)
+static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq,
+ u32 rcv_nxt)
{
#ifdef CONFIG_TCP_AO
struct tcp_ao_info *ao;
ao = rcu_dereference(tcptw->ao_info);
- if (unlikely(ao && seq < tcptw->tw_rcv_nxt))
+ if (unlikely(ao && seq < rcv_nxt))
WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
#endif
- tcptw->tw_rcv_nxt = seq;
+ WRITE_ONCE(tcptw->tw_rcv_nxt, seq);
}
/*
@@ -98,8 +99,9 @@ enum tcp_tw_status
tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
const struct tcphdr *th, u32 *tw_isn)
{
- struct tcp_options_received tmp_opt;
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
+ u32 rcv_nxt = READ_ONCE(tcptw->tw_rcv_nxt);
+ struct tcp_options_received tmp_opt;
bool paws_reject = false;
int ts_recent_stamp;
@@ -117,26 +119,26 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
}
}
- if (tw->tw_substate == TCP_FIN_WAIT2) {
+ if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2) {
/* Just repeat all the checks of tcp_rcv_state_process() */
/* Out of window, send ACK */
if (paws_reject ||
!tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
- tcptw->tw_rcv_nxt,
- tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
+ rcv_nxt,
+ rcv_nxt + tcptw->tw_rcv_wnd))
return tcp_timewait_check_oow_rate_limit(
tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
if (th->rst)
goto kill;
- if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
+ if (th->syn && !before(TCP_SKB_CB(skb)->seq, rcv_nxt))
return TCP_TW_RST;
/* Dup ACK? */
if (!th->ack ||
- !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
+ !after(TCP_SKB_CB(skb)->end_seq, rcv_nxt) ||
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
@@ -146,12 +148,13 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
* reset.
*/
if (!th->fin ||
- TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
+ TCP_SKB_CB(skb)->end_seq != rcv_nxt + 1)
return TCP_TW_RST;
/* FIN arrived, enter true time-wait state. */
- tw->tw_substate = TCP_TIME_WAIT;
- twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq);
+ WRITE_ONCE(tw->tw_substate, TCP_TIME_WAIT);
+ twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq,
+ rcv_nxt);
if (tmp_opt.saw_tstamp) {
WRITE_ONCE(tcptw->tw_ts_recent_stamp,
@@ -182,7 +185,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
*/
if (!paws_reject &&
- (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
+ (TCP_SKB_CB(skb)->seq == rcv_nxt &&
(TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
/* In window segment, it may be only reset or bare ack. */
@@ -229,7 +232,7 @@ kill:
*/
if (th->syn && !th->rst && !th->ack && !paws_reject &&
- (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
+ (after(TCP_SKB_CB(skb)->seq, rcv_nxt) ||
(tmp_opt.saw_tstamp &&
(s32)(READ_ONCE(tcptw->tw_ts_recent) - tmp_opt.rcv_tsval) < 0))) {
u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
@@ -625,6 +628,8 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
+ xa_init_flags(&newsk->sk_user_frags, XA_FLAGS_ALLOC1);
+
return newsk;
}
EXPORT_SYMBOL(tcp_create_openreq_child);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 16c48df8df4c..4fd746bd4d54 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2344,7 +2344,8 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
if (unlikely(TCP_SKB_CB(skb)->eor) ||
tcp_has_tx_tstamp(skb) ||
- !skb_pure_zcopy_same(skb, next))
+ !skb_pure_zcopy_same(skb, next) ||
+ skb_frags_readable(skb) != skb_frags_readable(next))
return false;
len -= skb->len;
@@ -3264,6 +3265,8 @@ static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
return false;
if (skb_cloned(skb))
return false;
+ if (!skb_frags_readable(skb))
+ return false;
/* Some heuristics for collapsing over SACK'd could be invented */
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
return false;
@@ -3649,7 +3652,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority,
/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
* skb here is different to the troublesome skb, so use NULL
*/
- trace_tcp_send_reset(sk, NULL, SK_RST_REASON_NOT_SPECIFIED);
+ trace_tcp_send_reset(sk, NULL, reason);
}
/* Send a crossed SYN-ACK during socket establishment.
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 4d40615dc8fc..79064580c8c0 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -125,7 +125,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
do_reset = true;
if (do_reset)
tcp_send_active_reset(sk, GFP_ATOMIC,
- SK_RST_REASON_NOT_SPECIFIED);
+ SK_RST_REASON_TCP_ABORT_ON_MEMORY);
tcp_done(sk);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
return 1;
@@ -282,6 +282,7 @@ static int tcp_write_timeout(struct sock *sk)
expired = retransmits_timed_out(sk, retry_until,
READ_ONCE(icsk->icsk_user_timeout));
tcp_fastopen_active_detect_blackhole(sk, expired);
+ mptcp_active_detect_blackhole(sk, expired);
if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
@@ -779,7 +780,7 @@ static void tcp_keepalive_timer (struct timer_list *t)
goto out;
}
}
- tcp_send_active_reset(sk, GFP_ATOMIC, SK_RST_REASON_NOT_SPECIFIED);
+ tcp_send_active_reset(sk, GFP_ATOMIC, SK_RST_REASON_TCP_STATE);
goto death;
}
@@ -807,7 +808,7 @@ static void tcp_keepalive_timer (struct timer_list *t)
(user_timeout == 0 &&
icsk->icsk_probes_out >= keepalive_probes(tp))) {
tcp_send_active_reset(sk, GFP_ATOMIC,
- SK_RST_REASON_NOT_SPECIFIED);
+ SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT);
tcp_write_err(sk);
goto out;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 49c622e743e8..8accbf4cb295 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -115,6 +115,7 @@
#include <net/addrconf.h>
#include <net/udp_tunnel.h>
#include <net/gro.h>
+#include <net/inet_dscp.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6_stubs.h>
#endif
@@ -365,7 +366,7 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum)
return udp_lib_get_port(sk, snum, hash2_nulladdr);
}
-static int compute_score(struct sock *sk, struct net *net,
+static int compute_score(struct sock *sk, const struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned short hnum,
int dif, int sdif)
@@ -420,7 +421,7 @@ u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
}
/* called with rcu_read_lock() */
-static struct sock *udp4_lib_lookup2(struct net *net,
+static struct sock *udp4_lib_lookup2(const struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum,
int dif, int sdif,
@@ -480,7 +481,7 @@ rescore:
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
* harder than this. -DaveM
*/
-struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
+struct sock *__udp4_lib_lookup(const struct net *net, __be32 saddr,
__be16 sport, __be32 daddr, __be16 dport, int dif,
int sdif, struct udp_table *udptable, struct sk_buff *skb)
{
@@ -561,7 +562,7 @@ struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
* Does increment socket refcount.
*/
#if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
-struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+struct sock *udp4_lib_lookup(const struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif)
{
struct sock *sk;
@@ -2618,7 +2619,7 @@ int udp_v4_early_demux(struct sk_buff *skb)
if (!inet_sk(sk)->inet_daddr && in_dev)
return ip_mc_validate_source(skb, iph->daddr,
iph->saddr,
- iph->tos & IPTOS_RT_MASK,
+ iph->tos & INET_DSCP_MASK,
skb->dev, in_dev, &itag);
}
return 0;
diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
index e4e0fa869fa4..619a53eb672d 100644
--- a/net/ipv4/udp_tunnel_core.c
+++ b/net/ipv4/udp_tunnel_core.c
@@ -6,6 +6,7 @@
#include <net/dst_metadata.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
+#include <net/inet_dscp.h>
int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
struct socket **sockp)
@@ -232,7 +233,7 @@ struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
fl4.saddr = key->u.ipv4.src;
fl4.fl4_dport = dport;
fl4.fl4_sport = sport;
- fl4.flowi4_tos = RT_TOS(tos);
+ fl4.flowi4_tos = tos & INET_DSCP_MASK;
fl4.flowi4_flags = key->flow_flags;
rt = ip_route_output_key(net, &fl4);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f70d8757af1a..d680beb91b0a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -92,8 +92,6 @@
#include <linux/export.h>
#include <linux/ioam6.h>
-#define INFINITY_LIFE_TIME 0xFFFFFFFF
-
#define IPV6_MAX_STRLEN \
sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
@@ -239,6 +237,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
.ndisc_evict_nocarrier = 1,
.ra_honor_pio_life = 0,
+ .ra_honor_pio_pflag = 0,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -302,6 +301,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
.ndisc_evict_nocarrier = 1,
.ra_honor_pio_life = 0,
+ .ra_honor_pio_pflag = 0,
};
/* Check if link is ready: is it up and is a valid qdisc available */
@@ -2762,6 +2762,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
u32 addr_flags = 0;
struct inet6_dev *in6_dev;
struct net *net = dev_net(dev);
+ bool ignore_autoconf = false;
pinfo = (struct prefix_info *) opt;
@@ -2864,7 +2865,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
/* Try to figure out our local address for this prefix */
- if (pinfo->autoconf && in6_dev->cnf.autoconf) {
+ ignore_autoconf = READ_ONCE(in6_dev->cnf.ra_honor_pio_pflag) && pinfo->preferpd;
+ if (pinfo->autoconf && in6_dev->cnf.autoconf && !ignore_autoconf) {
struct in6_addr addr;
bool tokenized = false, dev_addr_generated = false;
@@ -5617,8 +5619,7 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
}
static void ipv6_store_devconf(const struct ipv6_devconf *cnf,
@@ -6173,8 +6174,7 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
}
static inline size_t inet6_prefix_nlmsg_size(void)
@@ -6241,8 +6241,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
}
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
@@ -6926,6 +6925,15 @@ static const struct ctl_table addrconf_sysctl[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
+ {
+ .procname = "ra_honor_pio_pflag",
+ .data = &ipv6_devconf.ra_honor_pio_pflag,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
#ifdef CONFIG_IPV6_ROUTER_PREF
{
.procname = "accept_ra_rtr_pref",
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 90d2c7e3f5e9..ba69b86f1c7d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -708,6 +708,7 @@ const struct proto_ops inet6_stream_ops = {
.splice_eof = inet_splice_eof,
.sendmsg_locked = tcp_sendmsg_locked,
.splice_read = tcp_splice_read,
+ .set_peek_off = sk_set_peek_off,
.read_sock = tcp_read_sock,
.read_skb = tcp_read_skb,
.peek_len = tcp_peek_len,
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 3920e8aa1031..b2400c226a32 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -132,7 +132,8 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
*/
if (req->src != req->dst)
for (sg = sg_next(req->src); sg; sg = sg_next(sg))
- skb_page_unref(sg_page(sg), skb->pp_recycle);
+ skb_page_unref(page_to_netmem(sg_page(sg)),
+ skb->pp_recycle);
}
#ifdef CONFIG_INET6_ESPINTCP
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 9e254de7462f..04a9ed5e8310 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -27,6 +27,7 @@ struct fib6_rule {
struct rt6key src;
struct rt6key dst;
dscp_t dscp;
+ u8 dscp_full:1; /* DSCP or TOS selector */
};
static bool fib6_rule_matchall(const struct fib_rule *rule)
@@ -345,6 +346,20 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_match(struct fib_rule *rule,
return 1;
}
+static int fib6_nl2rule_dscp(const struct nlattr *nla, struct fib6_rule *rule6,
+ struct netlink_ext_ack *extack)
+{
+ if (rule6->dscp) {
+ NL_SET_ERR_MSG(extack, "Cannot specify both TOS and DSCP");
+ return -EINVAL;
+ }
+
+ rule6->dscp = inet_dsfield_to_dscp(nla_get_u8(nla) << 2);
+ rule6->dscp_full = true;
+
+ return 0;
+}
+
static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
struct fib_rule_hdr *frh,
struct nlattr **tb,
@@ -361,6 +376,9 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
}
rule6->dscp = inet_dsfield_to_dscp(frh->tos);
+ if (tb[FRA_DSCP] && fib6_nl2rule_dscp(tb[FRA_DSCP], rule6, extack) < 0)
+ goto errout;
+
if (rule->action == FR_ACT_TO_TBL && !rule->l3mdev) {
if (rule->table == RT6_TABLE_UNSPEC) {
NL_SET_ERR_MSG(extack, "Invalid table");
@@ -413,9 +431,19 @@ static int fib6_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
if (frh->dst_len && (rule6->dst.plen != frh->dst_len))
return 0;
- if (frh->tos && inet_dscp_to_dsfield(rule6->dscp) != frh->tos)
+ if (frh->tos &&
+ (rule6->dscp_full ||
+ inet_dscp_to_dsfield(rule6->dscp) != frh->tos))
return 0;
+ if (tb[FRA_DSCP]) {
+ dscp_t dscp;
+
+ dscp = inet_dsfield_to_dscp(nla_get_u8(tb[FRA_DSCP]) << 2);
+ if (!rule6->dscp_full || rule6->dscp != dscp)
+ return 0;
+ }
+
if (frh->src_len &&
nla_memcmp(tb[FRA_SRC], &rule6->src.addr, sizeof(struct in6_addr)))
return 0;
@@ -434,7 +462,15 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
frh->dst_len = rule6->dst.plen;
frh->src_len = rule6->src.plen;
- frh->tos = inet_dscp_to_dsfield(rule6->dscp);
+
+ if (rule6->dscp_full) {
+ frh->tos = 0;
+ if (nla_put_u8(skb, FRA_DSCP,
+ inet_dscp_to_dsfield(rule6->dscp) >> 2))
+ goto nla_put_failure;
+ } else {
+ frh->tos = inet_dscp_to_dsfield(rule6->dscp);
+ }
if ((rule6->dst.plen &&
nla_put_in6_addr(skb, FRA_DST, &rule6->dst.addr)) ||
@@ -450,7 +486,8 @@ nla_put_failure:
static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
{
return nla_total_size(16) /* dst */
- + nla_total_size(16); /* src */
+ + nla_total_size(16) /* src */
+ + nla_total_size(1); /* dscp */
}
static void fib6_rule_flush_cache(struct fib_rules_ops *ops)
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 7b31674644ef..071b0bc1179d 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -175,14 +175,16 @@ static bool icmpv6_mask_allow(struct net *net, int type)
return false;
}
-static bool icmpv6_global_allow(struct net *net, int type)
+static bool icmpv6_global_allow(struct net *net, int type,
+ bool *apply_ratelimit)
{
if (icmpv6_mask_allow(net, type))
return true;
- if (icmp_global_allow())
+ if (icmp_global_allow(net)) {
+ *apply_ratelimit = true;
return true;
-
+ }
__ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL);
return false;
}
@@ -191,13 +193,13 @@ static bool icmpv6_global_allow(struct net *net, int type)
* Check the ICMP output rate limit
*/
static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
- struct flowi6 *fl6)
+ struct flowi6 *fl6, bool apply_ratelimit)
{
struct net *net = sock_net(sk);
struct dst_entry *dst;
bool res = false;
- if (icmpv6_mask_allow(net, type))
+ if (!apply_ratelimit)
return true;
/*
@@ -228,6 +230,8 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
if (!res)
__ICMP6_INC_STATS(net, ip6_dst_idev(dst),
ICMP6_MIB_RATELIMITHOST);
+ else
+ icmp_global_consume(net);
dst_release(dst);
return res;
}
@@ -452,6 +456,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
struct net *net;
struct ipv6_pinfo *np;
const struct in6_addr *saddr = NULL;
+ bool apply_ratelimit = false;
struct dst_entry *dst;
struct icmp6hdr tmp_hdr;
struct flowi6 fl6;
@@ -533,11 +538,12 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
return;
}
- /* Needed by both icmp_global_allow and icmpv6_xmit_lock */
+ /* Needed by both icmpv6_global_allow and icmpv6_xmit_lock */
local_bh_disable();
/* Check global sysctl_icmp_msgs_per_sec ratelimit */
- if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type))
+ if (!(skb->dev->flags & IFF_LOOPBACK) &&
+ !icmpv6_global_allow(net, type, &apply_ratelimit))
goto out_bh_enable;
mip6_addr_swap(skb, parm);
@@ -575,7 +581,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
np = inet6_sk(sk);
- if (!icmpv6_xrlim_allow(sk, type, &fl6))
+ if (!icmpv6_xrlim_allow(sk, type, &fl6, apply_ratelimit))
goto out;
tmp_hdr.icmp6_type = type;
@@ -717,6 +723,7 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
struct ipv6_pinfo *np;
const struct in6_addr *saddr = NULL;
struct icmp6hdr *icmph = icmp6_hdr(skb);
+ bool apply_ratelimit = false;
struct icmp6hdr tmp_hdr;
struct flowi6 fl6;
struct icmpv6_msg msg;
@@ -781,8 +788,9 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
goto out;
/* Check the ratelimit */
- if ((!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY)) ||
- !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6))
+ if ((!(skb->dev->flags & IFF_LOOPBACK) &&
+ !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY, &apply_ratelimit)) ||
+ !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6, apply_ratelimit))
goto out_dst_release;
idev = __in6_dev_get(skb->dev);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 6db71bb1cd30..9ec05e354baa 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(inet6_ehashfn);
*
* The sockhash lock must be held as a reader here.
*/
-struct sock *__inet6_lookup_established(struct net *net,
+struct sock *__inet6_lookup_established(const struct net *net,
struct inet_hashinfo *hashinfo,
const struct in6_addr *saddr,
const __be16 sport,
@@ -89,7 +89,7 @@ found:
}
EXPORT_SYMBOL(__inet6_lookup_established);
-static inline int compute_score(struct sock *sk, struct net *net,
+static inline int compute_score(struct sock *sk, const struct net *net,
const unsigned short hnum,
const struct in6_addr *daddr,
const int dif, const int sdif)
@@ -126,7 +126,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
* Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to
* the selected sock or an error.
*/
-struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
+struct sock *inet6_lookup_reuseport(const struct net *net, struct sock *sk,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
__be16 sport,
@@ -147,7 +147,7 @@ struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
EXPORT_SYMBOL_GPL(inet6_lookup_reuseport);
/* called with rcu_read_lock() */
-static struct sock *inet6_lhash2_lookup(struct net *net,
+static struct sock *inet6_lhash2_lookup(const struct net *net,
struct inet_listen_hashbucket *ilb2,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
@@ -174,7 +174,7 @@ static struct sock *inet6_lhash2_lookup(struct net *net,
return result;
}
-struct sock *inet6_lookup_run_sk_lookup(struct net *net,
+struct sock *inet6_lookup_run_sk_lookup(const struct net *net,
int protocol,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
@@ -199,7 +199,7 @@ struct sock *inet6_lookup_run_sk_lookup(struct net *net,
}
EXPORT_SYMBOL_GPL(inet6_lookup_run_sk_lookup);
-struct sock *inet6_lookup_listener(struct net *net,
+struct sock *inet6_lookup_listener(const struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
@@ -243,7 +243,8 @@ done:
}
EXPORT_SYMBOL_GPL(inet6_lookup_listener);
-struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
+struct sock *inet6_lookup(const struct net *net,
+ struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr, const __be16 sport,
const struct in6_addr *daddr, const __be16 dport,
diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
index bf7120ecea1e..beb6b4cfc551 100644
--- a/net/ipv6/ioam6_iptunnel.c
+++ b/net/ipv6/ioam6_iptunnel.c
@@ -42,8 +42,10 @@ struct ioam6_lwt {
struct ioam6_lwt_freq freq;
atomic_t pkt_cnt;
u8 mode;
+ bool has_tunsrc;
+ struct in6_addr tunsrc;
struct in6_addr tundst;
- struct ioam6_lwt_encap tuninfo;
+ struct ioam6_lwt_encap tuninfo;
};
static const struct netlink_range_validation freq_range = {
@@ -72,8 +74,10 @@ static const struct nla_policy ioam6_iptunnel_policy[IOAM6_IPTUNNEL_MAX + 1] = {
[IOAM6_IPTUNNEL_MODE] = NLA_POLICY_RANGE(NLA_U8,
IOAM6_IPTUNNEL_MODE_MIN,
IOAM6_IPTUNNEL_MODE_MAX),
+ [IOAM6_IPTUNNEL_SRC] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
[IOAM6_IPTUNNEL_DST] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
- [IOAM6_IPTUNNEL_TRACE] = NLA_POLICY_EXACT_LEN(sizeof(struct ioam6_trace_hdr)),
+ [IOAM6_IPTUNNEL_TRACE] = NLA_POLICY_EXACT_LEN(
+ sizeof(struct ioam6_trace_hdr)),
};
static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr *trace)
@@ -85,7 +89,7 @@ static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr *trace)
trace->type.bit12 | trace->type.bit13 | trace->type.bit14 |
trace->type.bit15 | trace->type.bit16 | trace->type.bit17 |
trace->type.bit18 | trace->type.bit19 | trace->type.bit20 |
- trace->type.bit21)
+ trace->type.bit21 | trace->type.bit23)
return false;
trace->nodelen = 0;
@@ -143,6 +147,11 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla,
else
mode = nla_get_u8(tb[IOAM6_IPTUNNEL_MODE]);
+ if (tb[IOAM6_IPTUNNEL_SRC] && mode == IOAM6_IPTUNNEL_MODE_INLINE) {
+ NL_SET_ERR_MSG(extack, "no tunnel src expected with this mode");
+ return -EINVAL;
+ }
+
if (!tb[IOAM6_IPTUNNEL_DST] && mode != IOAM6_IPTUNNEL_MODE_INLINE) {
NL_SET_ERR_MSG(extack, "this mode needs a tunnel destination");
return -EINVAL;
@@ -167,19 +176,40 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla,
ilwt = ioam6_lwt_state(lwt);
err = dst_cache_init(&ilwt->cache, GFP_ATOMIC);
- if (err) {
- kfree(lwt);
- return err;
- }
+ if (err)
+ goto free_lwt;
atomic_set(&ilwt->pkt_cnt, 0);
ilwt->freq.k = freq_k;
ilwt->freq.n = freq_n;
ilwt->mode = mode;
- if (tb[IOAM6_IPTUNNEL_DST])
+
+ if (!tb[IOAM6_IPTUNNEL_SRC]) {
+ ilwt->has_tunsrc = false;
+ } else {
+ ilwt->has_tunsrc = true;
+ ilwt->tunsrc = nla_get_in6_addr(tb[IOAM6_IPTUNNEL_SRC]);
+
+ if (ipv6_addr_any(&ilwt->tunsrc)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IOAM6_IPTUNNEL_SRC],
+ "invalid tunnel source address");
+ err = -EINVAL;
+ goto free_cache;
+ }
+ }
+
+ if (tb[IOAM6_IPTUNNEL_DST]) {
ilwt->tundst = nla_get_in6_addr(tb[IOAM6_IPTUNNEL_DST]);
+ if (ipv6_addr_any(&ilwt->tundst)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IOAM6_IPTUNNEL_DST],
+ "invalid tunnel dest address");
+ err = -EINVAL;
+ goto free_cache;
+ }
+ }
+
tuninfo = ioam6_lwt_info(lwt);
tuninfo->eh.hdrlen = ((sizeof(*tuninfo) + len_aligned) >> 3) - 1;
tuninfo->pad[0] = IPV6_TLV_PADN;
@@ -201,6 +231,11 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla,
*ts = lwt;
return 0;
+free_cache:
+ dst_cache_destroy(&ilwt->cache);
+free_lwt:
+ kfree(lwt);
+ return err;
}
static int ioam6_do_fill(struct net *net, struct sk_buff *skb)
@@ -256,6 +291,8 @@ static int ioam6_do_inline(struct net *net, struct sk_buff *skb,
static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
struct ioam6_lwt_encap *tuninfo,
+ bool has_tunsrc,
+ struct in6_addr *tunsrc,
struct in6_addr *tundst)
{
struct dst_entry *dst = skb_dst(skb);
@@ -285,8 +322,12 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
hdr->nexthdr = NEXTHDR_HOP;
hdr->payload_len = cpu_to_be16(skb->len - sizeof(*hdr));
hdr->daddr = *tundst;
- ipv6_dev_get_saddr(net, dst->dev, &hdr->daddr,
- IPV6_PREFER_SRC_PUBLIC, &hdr->saddr);
+
+ if (has_tunsrc)
+ memcpy(&hdr->saddr, tunsrc, sizeof(*tunsrc));
+ else
+ ipv6_dev_get_saddr(net, dst->dev, &hdr->daddr,
+ IPV6_PREFER_SRC_PUBLIC, &hdr->saddr);
skb_postpush_rcsum(skb, hdr, len);
@@ -328,7 +369,9 @@ do_inline:
case IOAM6_IPTUNNEL_MODE_ENCAP:
do_encap:
/* Encapsulation (ip6ip6) */
- err = ioam6_do_encap(net, skb, &ilwt->tuninfo, &ilwt->tundst);
+ err = ioam6_do_encap(net, skb, &ilwt->tuninfo,
+ ilwt->has_tunsrc, &ilwt->tunsrc,
+ &ilwt->tundst);
if (unlikely(err))
goto drop;
@@ -414,6 +457,13 @@ static int ioam6_fill_encap_info(struct sk_buff *skb,
goto ret;
if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE) {
+ if (ilwt->has_tunsrc) {
+ err = nla_put_in6_addr(skb, IOAM6_IPTUNNEL_SRC,
+ &ilwt->tunsrc);
+ if (err)
+ goto ret;
+ }
+
err = nla_put_in6_addr(skb, IOAM6_IPTUNNEL_DST, &ilwt->tundst);
if (err)
goto ret;
@@ -435,8 +485,12 @@ static int ioam6_encap_nlsize(struct lwtunnel_state *lwtstate)
nla_total_size(sizeof(ilwt->mode)) +
nla_total_size(sizeof(ilwt->tuninfo.traceh));
- if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE)
+ if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE) {
+ if (ilwt->has_tunsrc)
+ nlsize += nla_total_size(sizeof(ilwt->tunsrc));
+
nlsize += nla_total_size(sizeof(ilwt->tundst));
+ }
return nlsize;
}
@@ -451,17 +505,21 @@ static int ioam6_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
return (ilwt_a->freq.k != ilwt_b->freq.k ||
ilwt_a->freq.n != ilwt_b->freq.n ||
ilwt_a->mode != ilwt_b->mode ||
+ ilwt_a->has_tunsrc != ilwt_b->has_tunsrc ||
(ilwt_a->mode != IOAM6_IPTUNNEL_MODE_INLINE &&
!ipv6_addr_equal(&ilwt_a->tundst, &ilwt_b->tundst)) ||
+ (ilwt_a->mode != IOAM6_IPTUNNEL_MODE_INLINE &&
+ ilwt_a->has_tunsrc &&
+ !ipv6_addr_equal(&ilwt_a->tunsrc, &ilwt_b->tunsrc)) ||
trace_a->namespace_id != trace_b->namespace_id);
}
static const struct lwtunnel_encap_ops ioam6_iptun_ops = {
.build_state = ioam6_build_state,
.destroy_state = ioam6_destroy_state,
- .output = ioam6_output,
+ .output = ioam6_output,
.fill_encap = ioam6_fill_encap_info,
- .get_encap_size = ioam6_encap_nlsize,
+ .get_encap_size = ioam6_encap_nlsize,
.cmp_encap = ioam6_encap_cmp,
.owner = THIS_MODULE,
};
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 3942bd2ade78..235808cfec70 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1471,7 +1471,7 @@ static void ip6gre_tnl_init_features(struct net_device *dev)
{
struct ip6_tnl *nt = netdev_priv(dev);
- dev->features |= GRE6_FEATURES | NETIF_F_LLTX;
+ dev->features |= GRE6_FEATURES;
dev->hw_features |= GRE6_FEATURES;
/* TCP offload with GRE SEQ is not supported, nor can we support 2
@@ -1485,6 +1485,8 @@ static void ip6gre_tnl_init_features(struct net_device *dev)
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+
+ dev->lltx = true;
}
static int ip6gre_tunnel_init_common(struct net_device *dev)
@@ -1619,8 +1621,7 @@ static int __net_init ip6gre_init_net(struct net *net)
/* FB netdevice is special: we have one, and only one per netns.
* Allowing to move it to another netns is clearly unsafe.
*/
- ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
-
+ ign->fb_tunnel_dev->netns_local = true;
ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 133610a49da6..70c0e16c0ae6 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -111,9 +111,8 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
{
struct sk_buff *skb, *next, *hint = NULL;
struct dst_entry *curr_dst = NULL;
- struct list_head sublist;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct dst_entry *dst;
@@ -327,9 +326,8 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
struct net_device *curr_dev = NULL;
struct net *curr_net = NULL;
struct sk_buff *skb, *next;
- struct list_head sublist;
+ LIST_HEAD(sublist);
- INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 87dfb565a9f8..b60e13c42bca 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -53,6 +53,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/dst_metadata.h>
+#include <net/inet_dscp.h>
MODULE_AUTHOR("Ville Nuorvala");
MODULE_DESCRIPTION("IPv6 tunneling device");
@@ -608,7 +609,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
/* Try to guess incoming interface */
rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
- 0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
+ 0, 0, 0, IPPROTO_IPIP,
+ eiph->tos & INET_DSCP_MASK, 0);
if (IS_ERR(rt))
goto out;
@@ -619,7 +621,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (rt->rt_flags & RTCF_LOCAL) {
rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
eiph->daddr, eiph->saddr, 0, 0,
- IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
+ IPPROTO_IPIP,
+ eiph->tos & INET_DSCP_MASK, 0);
if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
if (!IS_ERR(rt))
ip_rt_put(rt);
@@ -1849,7 +1852,7 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
dev->type = ARPHRD_TUNNEL6;
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netif_keep_dst(dev);
@@ -2258,7 +2261,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
/* FB netdevice is special: we have one, and only one per netns.
* Allowing to move it to another netns is clearly unsafe.
*/
- ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
+ ip6n->fb_tnl_dev->netns_local = true;
err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
if (err < 0)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index dd342e6ecf3f..2ce4ae0d8dc3 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -640,7 +640,7 @@ static void reg_vif_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->netdev_ops = &reg_vif_netdev_ops;
dev->needs_free_netdev = true;
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netns_local = true;
}
static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
@@ -2431,8 +2431,7 @@ static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
errout:
kfree_skb(skb);
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
}
static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index cd342d5015c6..1e225e6489ea 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -985,7 +985,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
int err;
if (level == SOL_IP && sk->sk_type != SOCK_RAW)
- return udp_prot.setsockopt(sk, level, optname, optval, optlen);
+ return ip_setsockopt(sk, level, optname, optval, optlen);
if (level != SOL_IPV6)
return -ENOPROTOOPT;
@@ -1475,7 +1475,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
int err;
if (level == SOL_IP && sk->sk_type != SOCK_RAW)
- return udp_prot.getsockopt(sk, level, optname, optval, optlen);
+ return ip_getsockopt(sk, level, optname, optval, optlen);
if (level != SOL_IPV6)
return -ENOPROTOOPT;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 7ba01d8cfbae..b244dbf61d5f 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -586,7 +586,8 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
const struct in6_addr *group;
struct ipv6_mc_socklist *pmc;
struct ip6_sf_socklist *psl;
- int i, count, copycount;
+ unsigned int count;
+ int i, copycount;
group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
@@ -610,7 +611,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
psl = sock_dereference(pmc->sflist, sk);
count = psl ? psl->sl_count : 0;
- copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
+ copycount = min(count, gsf->gf_numsrc);
gsf->gf_numsrc = count;
for (i = 0; i < copycount; i++) {
struct sockaddr_in6 *psin6;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index b8eec1b6cc2c..aba94a348673 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -200,9 +200,9 @@ static inline int ndisc_is_useropt(const struct net_device *dev,
return opt->nd_opt_type == ND_OPT_PREFIX_INFO ||
opt->nd_opt_type == ND_OPT_RDNSS ||
opt->nd_opt_type == ND_OPT_DNSSL ||
+ opt->nd_opt_type == ND_OPT_6CO ||
opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL ||
- opt->nd_opt_type == ND_OPT_PREF64 ||
- ndisc_ops_is_useropt(dev, opt->nd_opt_type);
+ opt->nd_opt_type == ND_OPT_PREF64;
}
static struct nd_opt_hdr *ndisc_next_useropt(const struct net_device *dev,
@@ -1944,7 +1944,7 @@ static void ndisc_warn_deprecated_sysctl(const struct ctl_table *ctl,
static char warncomm[TASK_COMM_LEN];
static int warned;
if (strcmp(warncomm, current->comm) && warned < 5) {
- strcpy(warncomm, current->comm);
+ strscpy(warncomm, current->comm);
pr_warn("process `%s' is using deprecated sysctl (%s) net.ipv6.neigh.%s.%s - use net.ipv6.neigh.%s.%s_ms instead\n",
warncomm, func,
dev_name, ctl->procname,
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 131f7bb2110d..7d5602950ae7 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1773,7 +1773,7 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
goto out_free;
}
- ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
+ ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
if (!ops) {
ret = -ENOMEM;
goto out_free;
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
index c82f3fdd4a65..492a811828a7 100644
--- a/net/ipv6/netfilter/nft_dup_ipv6.c
+++ b/net/ipv6/netfilter/nft_dup_ipv6.c
@@ -38,13 +38,13 @@ static int nft_dup_ipv6_init(const struct nft_ctx *ctx,
if (tb[NFTA_DUP_SREG_ADDR] == NULL)
return -EINVAL;
- err = nft_parse_register_load(tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
+ err = nft_parse_register_load(ctx, tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
sizeof(struct in6_addr));
if (err < 0)
return err;
if (tb[NFTA_DUP_SREG_DEV])
- err = nft_parse_register_load(tb[NFTA_DUP_SREG_DEV],
+ err = nft_parse_register_load(ctx, tb[NFTA_DUP_SREG_DEV],
&priv->sreg_dev, sizeof(int));
return err;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 219701caba1e..b4251915585f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -174,7 +174,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev)
struct net_device *rt_dev = rt->dst.dev;
bool handled = false;
- if (rt_idev->dev == dev) {
+ if (rt_idev && rt_idev->dev == dev) {
rt->rt6i_idev = in6_dev_get(blackhole_netdev);
in6_dev_put(rt_idev);
handled = true;
@@ -6193,8 +6193,7 @@ void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
info->nlh, gfp_any());
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
}
void fib6_rt_update(struct net *net, struct fib6_info *rt,
@@ -6220,8 +6219,7 @@ void fib6_rt_update(struct net *net, struct fib6_info *rt,
info->nlh, gfp_any());
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
}
void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
index 2c83b7586422..db3c19a42e1c 100644
--- a/net/ipv6/rpl_iptunnel.c
+++ b/net/ipv6/rpl_iptunnel.c
@@ -263,10 +263,8 @@ static int rpl_input(struct sk_buff *skb)
rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
err = rpl_do_srh(skb, rlwt);
- if (unlikely(err)) {
- kfree_skb(skb);
- return err;
- }
+ if (unlikely(err))
+ goto drop;
local_bh_disable();
dst = dst_cache_get(&rlwt->cache);
@@ -286,9 +284,13 @@ static int rpl_input(struct sk_buff *skb)
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
if (unlikely(err))
- return err;
+ goto drop;
return dst_input(skb);
+
+drop:
+ kfree_skb(skb);
+ return err;
}
static int nla_put_rpl_srh(struct sk_buff *skb, int attrtype,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 83b195f09561..39bd8951bfca 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -51,6 +51,7 @@
#include <net/dsfield.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/inet_dscp.h>
/*
This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c
@@ -935,8 +936,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
}
flowi4_init_output(&fl4, tunnel->parms.link, tunnel->fwmark,
- RT_TOS(tos), RT_SCOPE_UNIVERSE, IPPROTO_IPV6,
- 0, dst, tiph->saddr, 0, 0,
+ tos & INET_DSCP_MASK, RT_SCOPE_UNIVERSE,
+ IPPROTO_IPV6, 0, dst, tiph->saddr, 0, 0,
sock_net_uid(tunnel->net, NULL));
rt = dst_cache_get_ip4(&tunnel->dst_cache, &fl4.saddr);
@@ -1111,7 +1112,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
iph->daddr, iph->saddr,
0, 0,
IPPROTO_IPV6,
- RT_TOS(iph->tos),
+ iph->tos & INET_DSCP_MASK,
tunnel->parms.link);
if (!IS_ERR(rt)) {
@@ -1435,7 +1436,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
netif_keep_dst(dev);
dev->addr_len = 4;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
dev->features |= SIT_FEATURES;
dev->hw_features |= SIT_FEATURES;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
@@ -1855,7 +1856,7 @@ static int __net_init sit_init_net(struct net *net)
/* FB netdevice is special: we have one, and only one per netns.
* Allowing to move it to another netns is clearly unsafe.
*/
- sitn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+ sitn->fb_tunnel_dev->netns_local = true;
err = register_netdev(sitn->fb_tunnel_dev);
if (err)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 200fea92f12f..d71ab4e1efe1 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1193,7 +1193,8 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
#endif
}
- tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt,
+ READ_ONCE(tcptw->tw_rcv_nxt),
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_tw_tsval(tcptw),
READ_ONCE(tcptw->tw_ts_recent), tw->tw_bound_dev_if,
@@ -2258,7 +2259,7 @@ static void get_timewait6_sock(struct seq_file *seq,
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
- tw->tw_substate, 0, 0,
+ READ_ONCE(tw->tw_substate), 0, 0,
3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
refcount_read(&tw->tw_refcnt), tw);
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 6602a2e9cdb5..52dfbb2ff1a8 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -114,7 +114,7 @@ void udp_v6_rehash(struct sock *sk)
udp_lib_rehash(sk, new_hash);
}
-static int compute_score(struct sock *sk, struct net *net,
+static int compute_score(struct sock *sk, const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned short hnum,
int dif, int sdif)
@@ -160,7 +160,7 @@ static int compute_score(struct sock *sk, struct net *net,
}
/* called with rcu_read_lock() */
-static struct sock *udp6_lib_lookup2(struct net *net,
+static struct sock *udp6_lib_lookup2(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned int hnum,
int dif, int sdif, struct udp_hslot *hslot2,
@@ -217,7 +217,7 @@ rescore:
}
/* rcu_read_lock() must be held */
-struct sock *__udp6_lib_lookup(struct net *net,
+struct sock *__udp6_lib_lookup(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *udptable,
@@ -300,7 +300,7 @@ struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
* Does increment socket refcount.
*/
#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
-struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
+struct sock *udp6_lib_lookup(const struct net *net, const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport, int dif)
{
struct sock *sk;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 2e86f520f799..3eec23ac5ab1 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -117,12 +117,12 @@ struct l2tp_net {
struct hlist_head l2tp_v3_session_htable[16];
};
-static inline u32 l2tp_v2_session_key(u16 tunnel_id, u16 session_id)
+static u32 l2tp_v2_session_key(u16 tunnel_id, u16 session_id)
{
return ((u32)tunnel_id) << 16 | session_id;
}
-static inline unsigned long l2tp_v3_session_hashkey(struct sock *sk, u32 session_id)
+static unsigned long l2tp_v3_session_hashkey(struct sock *sk, u32 session_id)
{
return ((unsigned long)sk) + session_id;
}
@@ -135,63 +135,81 @@ static bool l2tp_sk_is_v6(struct sock *sk)
}
#endif
-static inline struct l2tp_net *l2tp_pernet(const struct net *net)
+static struct l2tp_net *l2tp_pernet(const struct net *net)
{
return net_generic(net, l2tp_net_id);
}
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
{
+ struct sock *sk = tunnel->sock;
+
trace_free_tunnel(tunnel);
- sock_put(tunnel->sock);
- /* the tunnel is freed in the socket destructor */
+
+ if (sk) {
+ /* Disable udp encapsulation */
+ switch (tunnel->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ /* No longer an encapsulation socket. See net/ipv4/udp.c */
+ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
+ udp_sk(sk)->encap_rcv = NULL;
+ udp_sk(sk)->encap_destroy = NULL;
+ break;
+ case L2TP_ENCAPTYPE_IP:
+ break;
+ }
+
+ tunnel->sock = NULL;
+ sock_put(sk);
+ }
+
+ kfree_rcu(tunnel, rcu);
}
static void l2tp_session_free(struct l2tp_session *session)
{
trace_free_session(session);
if (session->tunnel)
- l2tp_tunnel_dec_refcount(session->tunnel);
- kfree(session);
+ l2tp_tunnel_put(session->tunnel);
+ kfree_rcu(session, rcu);
}
-struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk)
+struct l2tp_tunnel *l2tp_sk_to_tunnel(const struct sock *sk)
{
- struct l2tp_tunnel *tunnel = sk->sk_user_data;
+ const struct net *net = sock_net(sk);
+ unsigned long tunnel_id, tmp;
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_net *pn;
- if (tunnel)
- if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
- return NULL;
+ rcu_read_lock_bh();
+ pn = l2tp_pernet(net);
+ idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
+ if (tunnel &&
+ tunnel->sock == sk &&
+ refcount_inc_not_zero(&tunnel->ref_count)) {
+ rcu_read_unlock_bh();
+ return tunnel;
+ }
+ }
+ rcu_read_unlock_bh();
- return tunnel;
+ return NULL;
}
EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
-void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
-{
- refcount_inc(&tunnel->ref_count);
-}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_inc_refcount);
-
-void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
+void l2tp_tunnel_put(struct l2tp_tunnel *tunnel)
{
if (refcount_dec_and_test(&tunnel->ref_count))
l2tp_tunnel_free(tunnel);
}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_dec_refcount);
+EXPORT_SYMBOL_GPL(l2tp_tunnel_put);
-void l2tp_session_inc_refcount(struct l2tp_session *session)
-{
- refcount_inc(&session->ref_count);
-}
-EXPORT_SYMBOL_GPL(l2tp_session_inc_refcount);
-
-void l2tp_session_dec_refcount(struct l2tp_session *session)
+void l2tp_session_put(struct l2tp_session *session)
{
if (refcount_dec_and_test(&session->ref_count))
l2tp_session_free(session);
}
-EXPORT_SYMBOL_GPL(l2tp_session_dec_refcount);
+EXPORT_SYMBOL_GPL(l2tp_session_put);
/* Lookup a tunnel. A new reference is held on the returned tunnel. */
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
@@ -211,26 +229,27 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
-struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
+struct l2tp_tunnel *l2tp_tunnel_get_next(const struct net *net, unsigned long *key)
{
struct l2tp_net *pn = l2tp_pernet(net);
- unsigned long tunnel_id, tmp;
- struct l2tp_tunnel *tunnel;
- int count = 0;
+ struct l2tp_tunnel *tunnel = NULL;
rcu_read_lock_bh();
- idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
- if (tunnel && ++count > nth &&
- refcount_inc_not_zero(&tunnel->ref_count)) {
+again:
+ tunnel = idr_get_next_ul(&pn->l2tp_tunnel_idr, key);
+ if (tunnel) {
+ if (refcount_inc_not_zero(&tunnel->ref_count)) {
rcu_read_unlock_bh();
return tunnel;
}
+ (*key)++;
+ goto again;
}
rcu_read_unlock_bh();
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
+EXPORT_SYMBOL_GPL(l2tp_tunnel_get_next);
struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id)
{
@@ -254,7 +273,15 @@ struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk,
hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
hlist, key) {
- if (session->tunnel->sock == sk &&
+ /* session->tunnel may be NULL if another thread is in
+ * l2tp_session_register and has added an item to
+ * l2tp_v3_session_htable but hasn't yet added the
+ * session to its tunnel's session_list.
+ */
+ struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
+
+ if (session->session_id == session_id &&
+ tunnel && tunnel->sock == sk &&
refcount_inc_not_zero(&session->ref_count)) {
rcu_read_unlock_bh();
return session;
@@ -295,24 +322,109 @@ struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, in
}
EXPORT_SYMBOL_GPL(l2tp_session_get);
-struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
+static struct l2tp_session *l2tp_v2_session_get_next(const struct net *net,
+ u16 tid,
+ unsigned long *key)
{
- struct l2tp_session *session;
- int count = 0;
+ struct l2tp_net *pn = l2tp_pernet(net);
+ struct l2tp_session *session = NULL;
+
+ /* Start searching within the range of the tid */
+ if (*key == 0)
+ *key = l2tp_v2_session_key(tid, 0);
rcu_read_lock_bh();
- list_for_each_entry_rcu(session, &tunnel->session_list, list) {
- if (++count > nth) {
- l2tp_session_inc_refcount(session);
+again:
+ session = idr_get_next_ul(&pn->l2tp_v2_session_idr, key);
+ if (session) {
+ struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
+
+ /* ignore sessions with id 0 as they are internal for pppol2tp */
+ if (session->session_id == 0) {
+ (*key)++;
+ goto again;
+ }
+
+ if (tunnel->tunnel_id == tid &&
+ refcount_inc_not_zero(&session->ref_count)) {
rcu_read_unlock_bh();
return session;
}
+
+ (*key)++;
+ if (tunnel->tunnel_id == tid)
+ goto again;
}
rcu_read_unlock_bh();
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
+
+static struct l2tp_session *l2tp_v3_session_get_next(const struct net *net,
+ u32 tid, struct sock *sk,
+ unsigned long *key)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ struct l2tp_session *session = NULL;
+
+ rcu_read_lock_bh();
+again:
+ session = idr_get_next_ul(&pn->l2tp_v3_session_idr, key);
+ if (session && !hash_hashed(&session->hlist)) {
+ struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
+
+ if (tunnel && tunnel->tunnel_id == tid &&
+ refcount_inc_not_zero(&session->ref_count)) {
+ rcu_read_unlock_bh();
+ return session;
+ }
+
+ (*key)++;
+ goto again;
+ }
+
+ /* If we get here and session is non-NULL, the IDR entry may be one
+ * where the session_id collides with one in another tunnel. Check
+ * session_htable for a match. There can only be one session of a given
+ * ID per tunnel so we can return as soon as a match is found.
+ */
+ if (session && hash_hashed(&session->hlist)) {
+ unsigned long hkey = l2tp_v3_session_hashkey(sk, session->session_id);
+ u32 sid = session->session_id;
+
+ hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
+ hlist, hkey) {
+ struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
+
+ if (session->session_id == sid &&
+ tunnel && tunnel->tunnel_id == tid &&
+ refcount_inc_not_zero(&session->ref_count)) {
+ rcu_read_unlock_bh();
+ return session;
+ }
+ }
+
+ /* If no match found, the colliding session ID isn't in our
+ * tunnel so try the next session ID.
+ */
+ (*key)++;
+ goto again;
+ }
+
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+
+struct l2tp_session *l2tp_session_get_next(const struct net *net, struct sock *sk, int pver,
+ u32 tunnel_id, unsigned long *key)
+{
+ if (pver == L2TP_HDR_VER_2)
+ return l2tp_v2_session_get_next(net, tunnel_id, key);
+ else
+ return l2tp_v3_session_get_next(net, tunnel_id, sk, key);
+}
+EXPORT_SYMBOL_GPL(l2tp_session_get_next);
/* Lookup a session by interface name.
* This is very inefficient but is only used by management interfaces.
@@ -330,7 +442,7 @@ struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
if (tunnel) {
list_for_each_entry_rcu(session, &tunnel->session_list, list) {
if (!strcmp(session->ifname, ifname)) {
- l2tp_session_inc_refcount(session);
+ refcount_inc(&session->ref_count);
rcu_read_unlock_bh();
return session;
@@ -347,7 +459,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
static void l2tp_session_coll_list_add(struct l2tp_session_coll_list *clist,
struct l2tp_session *session)
{
- l2tp_session_inc_refcount(session);
+ refcount_inc(&session->ref_count);
WARN_ON_ONCE(session->coll_list);
session->coll_list = clist;
spin_lock(&clist->lock);
@@ -387,12 +499,12 @@ static int l2tp_session_collision_add(struct l2tp_net *pn,
/* If existing session isn't already in the session hlist, add it. */
if (!hash_hashed(&session2->hlist))
- hash_add(pn->l2tp_v3_session_htable, &session2->hlist,
- session2->hlist_key);
+ hash_add_rcu(pn->l2tp_v3_session_htable, &session2->hlist,
+ session2->hlist_key);
/* Add new session to the hlist and collision list */
- hash_add(pn->l2tp_v3_session_htable, &session1->hlist,
- session1->hlist_key);
+ hash_add_rcu(pn->l2tp_v3_session_htable, &session1->hlist,
+ session1->hlist_key);
refcount_inc(&clist->ref_count);
l2tp_session_coll_list_add(clist, session1);
@@ -408,7 +520,7 @@ static void l2tp_session_collision_del(struct l2tp_net *pn,
lockdep_assert_held(&pn->l2tp_session_idr_lock);
- hash_del(&session->hlist);
+ hash_del_rcu(&session->hlist);
if (clist) {
/* Remove session from its collision list. If there
@@ -433,7 +545,7 @@ static void l2tp_session_collision_del(struct l2tp_net *pn,
spin_unlock(&clist->lock);
if (refcount_dec_and_test(&clist->ref_count))
kfree(clist);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
}
}
@@ -442,6 +554,7 @@ int l2tp_session_register(struct l2tp_session *session,
{
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
struct l2tp_session *other_session = NULL;
+ void *old = NULL;
u32 session_key;
int err;
@@ -481,16 +594,23 @@ int l2tp_session_register(struct l2tp_session *session,
goto out;
}
- l2tp_tunnel_inc_refcount(tunnel);
- list_add(&session->list, &tunnel->session_list);
+ refcount_inc(&tunnel->ref_count);
+ WRITE_ONCE(session->tunnel, tunnel);
+ list_add_rcu(&session->list, &tunnel->session_list);
+ /* this makes session available to lockless getters */
if (tunnel->version == L2TP_HDR_VER_3) {
if (!other_session)
- idr_replace(&pn->l2tp_v3_session_idr, session, session_key);
+ old = idr_replace(&pn->l2tp_v3_session_idr, session, session_key);
} else {
- idr_replace(&pn->l2tp_v2_session_idr, session, session_key);
+ old = idr_replace(&pn->l2tp_v2_session_idr, session, session_key);
}
+ /* old should be NULL, unless something removed or modified
+ * the IDR entry after our idr_alloc_32 above (which shouldn't
+ * happen).
+ */
+ WARN_ON_ONCE(old);
out:
spin_unlock_bh(&pn->l2tp_session_idr_lock);
spin_unlock_bh(&tunnel->list_lock);
@@ -797,7 +917,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
if (!session->lns_mode && !session->send_seq) {
trace_session_seqnum_lns_enable(session);
session->send_seq = 1;
- l2tp_session_set_header_len(session, tunnel->version);
+ l2tp_session_set_header_len(session, tunnel->version,
+ tunnel->encap);
}
} else {
/* No sequence numbers.
@@ -818,7 +939,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
if (!session->lns_mode && session->send_seq) {
trace_session_seqnum_lns_disable(session);
session->send_seq = 0;
- l2tp_session_set_header_len(session, tunnel->version);
+ l2tp_session_set_header_len(session, tunnel->version,
+ tunnel->encap);
} else if (session->send_seq) {
pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
session->name);
@@ -955,7 +1077,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (!session || !session->recv_skb) {
if (session)
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
/* Not found? Pass to userspace to deal with */
goto pass;
@@ -969,12 +1091,12 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (version == L2TP_HDR_VER_3 &&
l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
goto invalid;
}
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
return 0;
@@ -1218,44 +1340,6 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
* Tinnel and session create/destroy.
*****************************************************************************/
-/* Tunnel socket destruct hook.
- * The tunnel context is deleted only when all session sockets have been
- * closed.
- */
-static void l2tp_tunnel_destruct(struct sock *sk)
-{
- struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
-
- if (!tunnel)
- goto end;
-
- /* Disable udp encapsulation */
- switch (tunnel->encap) {
- case L2TP_ENCAPTYPE_UDP:
- /* No longer an encapsulation socket. See net/ipv4/udp.c */
- WRITE_ONCE(udp_sk(sk)->encap_type, 0);
- udp_sk(sk)->encap_rcv = NULL;
- udp_sk(sk)->encap_destroy = NULL;
- break;
- case L2TP_ENCAPTYPE_IP:
- break;
- }
-
- /* Remove hooks into tunnel socket */
- write_lock_bh(&sk->sk_callback_lock);
- sk->sk_destruct = tunnel->old_sk_destruct;
- sk->sk_user_data = NULL;
- write_unlock_bh(&sk->sk_callback_lock);
-
- /* Call the original destructor */
- if (sk->sk_destruct)
- (*sk->sk_destruct)(sk);
-
- kfree_rcu(tunnel, rcu);
-end:
- return;
-}
-
/* Remove an l2tp session from l2tp_core's lists. */
static void l2tp_session_unhash(struct l2tp_session *session)
{
@@ -1288,8 +1372,6 @@ static void l2tp_session_unhash(struct l2tp_session *session)
spin_unlock_bh(&pn->l2tp_session_idr_lock);
spin_unlock_bh(&tunnel->list_lock);
-
- synchronize_rcu();
}
}
@@ -1301,28 +1383,21 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
spin_lock_bh(&tunnel->list_lock);
tunnel->acpt_newsess = false;
- for (;;) {
- session = list_first_entry_or_null(&tunnel->session_list,
- struct l2tp_session, list);
- if (!session)
- break;
- l2tp_session_inc_refcount(session);
- list_del_init(&session->list);
- spin_unlock_bh(&tunnel->list_lock);
+ list_for_each_entry(session, &tunnel->session_list, list)
l2tp_session_delete(session);
- spin_lock_bh(&tunnel->list_lock);
- l2tp_session_dec_refcount(session);
- }
spin_unlock_bh(&tunnel->list_lock);
}
/* Tunnel socket destroy hook for UDP encapsulation */
static void l2tp_udp_encap_destroy(struct sock *sk)
{
- struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
+ struct l2tp_tunnel *tunnel;
- if (tunnel)
+ tunnel = l2tp_sk_to_tunnel(sk);
+ if (tunnel) {
l2tp_tunnel_delete(tunnel);
+ l2tp_tunnel_put(tunnel);
+ }
}
static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
@@ -1356,10 +1431,10 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
/* drop initial ref */
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
/* drop workqueue ref */
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
}
/* Create a socket for the tunnel, if one isn't set up by
@@ -1505,7 +1580,6 @@ int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
tunnel->tunnel_id = tunnel_id;
tunnel->peer_tunnel_id = peer_tunnel_id;
- tunnel->magic = L2TP_TUNNEL_MAGIC;
sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
spin_lock_init(&tunnel->list_lock);
tunnel->acpt_newsess = true;
@@ -1531,6 +1605,8 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
enum l2tp_encap_type encap)
{
+ struct l2tp_tunnel *tunnel;
+
if (!net_eq(sock_net(sk), net))
return -EINVAL;
@@ -1544,8 +1620,14 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
(encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
return -EPROTONOSUPPORT;
- if (sk->sk_user_data)
+ if (encap == L2TP_ENCAPTYPE_UDP && sk->sk_user_data)
+ return -EBUSY;
+
+ tunnel = l2tp_sk_to_tunnel(sk);
+ if (tunnel) {
+ l2tp_tunnel_put(tunnel);
return -EBUSY;
+ }
return 0;
}
@@ -1584,12 +1666,10 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
ret = l2tp_validate_socket(sk, net, tunnel->encap);
if (ret < 0)
goto err_inval_sock;
- rcu_assign_sk_user_data(sk, tunnel);
write_unlock_bh(&sk->sk_callback_lock);
if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
struct udp_tunnel_sock_cfg udp_cfg = {
- .sk_user_data = tunnel,
.encap_type = UDP_ENCAP_L2TPINUDP,
.encap_rcv = l2tp_udp_encap_recv,
.encap_err_rcv = l2tp_udp_encap_err_recv,
@@ -1599,8 +1679,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
setup_udp_tunnel_sock(net, sock, &udp_cfg);
}
- tunnel->old_sk_destruct = sk->sk_destruct;
- sk->sk_destruct = &l2tp_tunnel_destruct;
sk->sk_allocation = GFP_ATOMIC;
release_sock(sk);
@@ -1639,7 +1717,7 @@ void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
{
if (!test_and_set_bit(0, &tunnel->dead)) {
trace_delete_tunnel(tunnel);
- l2tp_tunnel_inc_refcount(tunnel);
+ refcount_inc(&tunnel->ref_count);
queue_work(l2tp_wq, &tunnel->del_work);
}
}
@@ -1647,23 +1725,37 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
void l2tp_session_delete(struct l2tp_session *session)
{
- if (test_and_set_bit(0, &session->dead))
- return;
+ if (!test_and_set_bit(0, &session->dead)) {
+ trace_delete_session(session);
+ refcount_inc(&session->ref_count);
+ queue_work(l2tp_wq, &session->del_work);
+ }
+}
+EXPORT_SYMBOL_GPL(l2tp_session_delete);
+
+/* Workqueue session deletion function */
+static void l2tp_session_del_work(struct work_struct *work)
+{
+ struct l2tp_session *session = container_of(work, struct l2tp_session,
+ del_work);
- trace_delete_session(session);
l2tp_session_unhash(session);
l2tp_session_queue_purge(session);
if (session->session_close)
(*session->session_close)(session);
- l2tp_session_dec_refcount(session);
+ /* drop initial ref */
+ l2tp_session_put(session);
+
+ /* drop workqueue ref */
+ l2tp_session_put(session);
}
-EXPORT_SYMBOL_GPL(l2tp_session_delete);
/* We come here whenever a session's send_seq, cookie_len or
* l2specific_type parameters are set.
*/
-void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+void l2tp_session_set_header_len(struct l2tp_session *session, int version,
+ enum l2tp_encap_type encap)
{
if (version == L2TP_HDR_VER_2) {
session->hdr_len = 6;
@@ -1672,7 +1764,7 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
} else {
session->hdr_len = 4 + session->cookie_len;
session->hdr_len += l2tp_get_l2specific_len(session);
- if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
+ if (encap == L2TP_ENCAPTYPE_UDP)
session->hdr_len += 4;
}
}
@@ -1686,7 +1778,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
if (session) {
session->magic = L2TP_SESSION_MAGIC;
- session->tunnel = tunnel;
session->session_id = session_id;
session->peer_session_id = peer_session_id;
@@ -1710,6 +1801,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
INIT_HLIST_NODE(&session->hlist);
INIT_LIST_HEAD(&session->clist);
INIT_LIST_HEAD(&session->list);
+ INIT_WORK(&session->del_work, l2tp_session_del_work);
if (cfg) {
session->pwtype = cfg->pw_type;
@@ -1724,7 +1816,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
}
- l2tp_session_set_header_len(session, tunnel->version);
+ l2tp_session_set_header_len(session, tunnel->version, tunnel->encap);
refcount_set(&session->ref_count, 1);
@@ -1753,7 +1845,7 @@ static __net_init int l2tp_init_net(struct net *net)
return 0;
}
-static __net_exit void l2tp_exit_net(struct net *net)
+static __net_exit void l2tp_pre_exit_net(struct net *net)
{
struct l2tp_net *pn = l2tp_pernet(net);
struct l2tp_tunnel *tunnel = NULL;
@@ -1766,18 +1858,34 @@ static __net_exit void l2tp_exit_net(struct net *net)
}
rcu_read_unlock_bh();
- if (l2tp_wq)
- flush_workqueue(l2tp_wq);
- rcu_barrier();
+ if (l2tp_wq) {
+ /* Run all TUNNEL_DELETE work items just queued. */
+ __flush_workqueue(l2tp_wq);
+
+ /* Each TUNNEL_DELETE work item will queue a SESSION_DELETE
+ * work item for each session in the tunnel. Flush the
+ * workqueue again to process these.
+ */
+ __flush_workqueue(l2tp_wq);
+ }
+}
+
+static __net_exit void l2tp_exit_net(struct net *net)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ WARN_ON_ONCE(!idr_is_empty(&pn->l2tp_v2_session_idr));
idr_destroy(&pn->l2tp_v2_session_idr);
+ WARN_ON_ONCE(!idr_is_empty(&pn->l2tp_v3_session_idr));
idr_destroy(&pn->l2tp_v3_session_idr);
+ WARN_ON_ONCE(!idr_is_empty(&pn->l2tp_tunnel_idr));
idr_destroy(&pn->l2tp_tunnel_idr);
}
static struct pernet_operations l2tp_net_ops = {
.init = l2tp_init_net,
.exit = l2tp_exit_net,
+ .pre_exit = l2tp_pre_exit_net,
.id = &l2tp_net_id,
.size = sizeof(struct l2tp_net),
};
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 8ac81bc1bc6f..ffd8ced3a51f 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -16,7 +16,6 @@
#endif
/* Random numbers used for internal consistency checks of tunnel and session structures */
-#define L2TP_TUNNEL_MAGIC 0x42114DDA
#define L2TP_SESSION_MAGIC 0x0C04EB7D
struct sk_buff;
@@ -67,6 +66,7 @@ struct l2tp_session_coll_list {
struct l2tp_session {
int magic; /* should be L2TP_SESSION_MAGIC */
long dead;
+ struct rcu_head rcu;
struct l2tp_tunnel *tunnel; /* back pointer to tunnel context */
u32 session_id;
@@ -103,6 +103,7 @@ struct l2tp_session {
int reorder_skip; /* set if skip to next nr */
enum l2tp_pwtype pwtype;
struct l2tp_stats stats;
+ struct work_struct del_work;
/* Session receive handler for data packets.
* Each pseudowire implementation should implement this callback in order to
@@ -155,8 +156,6 @@ struct l2tp_tunnel_cfg {
*/
#define L2TP_TUNNEL_NAME_MAX 20
struct l2tp_tunnel {
- int magic; /* Should be L2TP_TUNNEL_MAGIC */
-
unsigned long dead;
struct rcu_head rcu;
@@ -176,7 +175,6 @@ struct l2tp_tunnel {
struct net *l2tp_net; /* the net we belong to */
refcount_t ref_count;
- void (*old_sk_destruct)(struct sock *sk);
struct sock *sock; /* parent socket */
int fd; /* parent fd, if tunnel socket was created
* by userspace
@@ -211,23 +209,22 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
}
/* Tunnel and session refcounts */
-void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel);
-void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel);
-void l2tp_session_inc_refcount(struct l2tp_session *session);
-void l2tp_session_dec_refcount(struct l2tp_session *session);
+void l2tp_tunnel_put(struct l2tp_tunnel *tunnel);
+void l2tp_session_put(struct l2tp_session *session);
/* Tunnel and session lookup.
* These functions take a reference on the instances they return, so
* the caller must ensure that the reference is dropped appropriately.
*/
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
-struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth);
+struct l2tp_tunnel *l2tp_tunnel_get_next(const struct net *net, unsigned long *key);
struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id);
struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id);
struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver,
u32 tunnel_id, u32 session_id);
-struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth);
+struct l2tp_session *l2tp_session_get_next(const struct net *net, struct sock *sk, int pver,
+ u32 tunnel_id, unsigned long *key);
struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
const char *ifname);
@@ -260,7 +257,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
/* Transmit path helpers for sending packets over the tunnel socket. */
-void l2tp_session_set_header_len(struct l2tp_session *session, int version);
+void l2tp_session_set_header_len(struct l2tp_session *session, int version,
+ enum l2tp_encap_type encap);
int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb);
/* Pseudowire management.
@@ -273,10 +271,7 @@ void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
/* IOCTL helper for IP encap modules. */
int l2tp_ioctl(struct sock *sk, int cmd, int *karg);
-/* Extract the tunnel structure from a socket's sk_user_data pointer,
- * validating the tunnel magic feather.
- */
-struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk);
+struct l2tp_tunnel *l2tp_sk_to_tunnel(const struct sock *sk);
static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
{
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 8755ae521154..2d0c8275a3a8 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -34,8 +34,8 @@ static struct dentry *rootdir;
struct l2tp_dfs_seq_data {
struct net *net;
netns_tracker ns_tracker;
- int tunnel_idx; /* current tunnel */
- int session_idx; /* index of session within current tunnel */
+ unsigned long tkey; /* lookup key of current tunnel */
+ unsigned long skey; /* lookup key of current session */
struct l2tp_tunnel *tunnel;
struct l2tp_session *session; /* NULL means get next tunnel */
};
@@ -44,23 +44,25 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
{
/* Drop reference taken during previous invocation */
if (pd->tunnel)
- l2tp_tunnel_dec_refcount(pd->tunnel);
+ l2tp_tunnel_put(pd->tunnel);
- pd->tunnel = l2tp_tunnel_get_nth(pd->net, pd->tunnel_idx);
- pd->tunnel_idx++;
+ pd->tunnel = l2tp_tunnel_get_next(pd->net, &pd->tkey);
+ pd->tkey++;
}
static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
{
/* Drop reference taken during previous invocation */
if (pd->session)
- l2tp_session_dec_refcount(pd->session);
+ l2tp_session_put(pd->session);
- pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx);
- pd->session_idx++;
+ pd->session = l2tp_session_get_next(pd->net, pd->tunnel->sock,
+ pd->tunnel->version,
+ pd->tunnel->tunnel_id, &pd->skey);
+ pd->skey++;
if (!pd->session) {
- pd->session_idx = 0;
+ pd->skey = 0;
l2tp_dfs_next_tunnel(pd);
}
}
@@ -109,11 +111,11 @@ static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
* or l2tp_dfs_next_tunnel().
*/
if (pd->session) {
- l2tp_session_dec_refcount(pd->session);
+ l2tp_session_put(pd->session);
pd->session = NULL;
}
if (pd->tunnel) {
- l2tp_tunnel_dec_refcount(pd->tunnel);
+ l2tp_tunnel_put(pd->tunnel);
pd->tunnel = NULL;
}
}
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 8ba00ad433c2..d692b902e120 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -72,31 +72,19 @@ static netdev_tx_t l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev
unsigned int len = skb->len;
int ret = l2tp_xmit_skb(session, skb);
- if (likely(ret == NET_XMIT_SUCCESS)) {
- DEV_STATS_ADD(dev, tx_bytes, len);
- DEV_STATS_INC(dev, tx_packets);
- } else {
+ if (likely(ret == NET_XMIT_SUCCESS))
+ dev_sw_netstats_tx_add(dev, 1, len);
+ else
DEV_STATS_INC(dev, tx_dropped);
- }
- return NETDEV_TX_OK;
-}
-static void l2tp_eth_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
-{
- stats->tx_bytes = DEV_STATS_READ(dev, tx_bytes);
- stats->tx_packets = DEV_STATS_READ(dev, tx_packets);
- stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
- stats->rx_bytes = DEV_STATS_READ(dev, rx_bytes);
- stats->rx_packets = DEV_STATS_READ(dev, rx_packets);
- stats->rx_errors = DEV_STATS_READ(dev, rx_errors);
+ return NETDEV_TX_OK;
}
static const struct net_device_ops l2tp_eth_netdev_ops = {
.ndo_init = l2tp_eth_dev_init,
.ndo_uninit = l2tp_eth_dev_uninit,
.ndo_start_xmit = l2tp_eth_dev_xmit,
- .ndo_get_stats64 = l2tp_eth_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
};
@@ -109,9 +97,10 @@ static void l2tp_eth_dev_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &l2tpeth_type);
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
dev->netdev_ops = &l2tp_eth_netdev_ops;
dev->needs_free_netdev = true;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
}
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
@@ -138,12 +127,11 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
if (!dev)
goto error_rcu;
- if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
- DEV_STATS_INC(dev, rx_packets);
- DEV_STATS_ADD(dev, rx_bytes, data_len);
- } else {
+ if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS)
+ dev_sw_netstats_rx_add(dev, data_len);
+ else
DEV_STATS_INC(dev, rx_errors);
- }
+
rcu_read_unlock();
return;
@@ -283,7 +271,7 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
spriv = l2tp_session_priv(session);
- l2tp_session_inc_refcount(session);
+ refcount_inc(&session->ref_count);
rtnl_lock();
@@ -301,7 +289,7 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
if (rc < 0) {
rtnl_unlock();
l2tp_session_delete(session);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
free_netdev(dev);
return rc;
@@ -312,17 +300,17 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
rtnl_unlock();
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
__module_get(THIS_MODULE);
return 0;
err_sess_dev:
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
free_netdev(dev);
err_sess:
- kfree(session);
+ l2tp_session_put(session);
err:
return rc;
}
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index e48aa177d74c..4bc24fddfd52 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -22,9 +22,19 @@
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include "l2tp_core.h"
+/* per-net private data for this module */
+static unsigned int l2tp_ip_net_id;
+struct l2tp_ip_net {
+ rwlock_t l2tp_ip_lock;
+ struct hlist_head l2tp_ip_table;
+ struct hlist_head l2tp_ip_bind_table;
+};
+
struct l2tp_ip_sock {
/* inet_sock has to be the first member of l2tp_ip_sock */
struct inet_sock inet;
@@ -33,21 +43,23 @@ struct l2tp_ip_sock {
u32 peer_conn_id;
};
-static DEFINE_RWLOCK(l2tp_ip_lock);
-static struct hlist_head l2tp_ip_table;
-static struct hlist_head l2tp_ip_bind_table;
-
-static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
+static struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
{
return (struct l2tp_ip_sock *)sk;
}
+static struct l2tp_ip_net *l2tp_ip_pernet(const struct net *net)
+{
+ return net_generic(net, l2tp_ip_net_id);
+}
+
static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
__be32 raddr, int dif, u32 tunnel_id)
{
+ struct l2tp_ip_net *pn = l2tp_ip_pernet(net);
struct sock *sk;
- sk_for_each_bound(sk, &l2tp_ip_bind_table) {
+ sk_for_each_bound(sk, &pn->l2tp_ip_bind_table) {
const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
const struct inet_sock *inet = inet_sk(sk);
int bound_dev_if;
@@ -113,6 +125,7 @@ found:
static int l2tp_ip_recv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
+ struct l2tp_ip_net *pn;
struct sock *sk;
u32 session_id;
u32 tunnel_id;
@@ -121,6 +134,8 @@ static int l2tp_ip_recv(struct sk_buff *skb)
struct l2tp_tunnel *tunnel = NULL;
struct iphdr *iph;
+ pn = l2tp_ip_pernet(net);
+
if (!pskb_may_pull(skb, 4))
goto discard;
@@ -152,7 +167,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
goto discard_sess;
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
return 0;
@@ -167,15 +182,15 @@ pass_up:
tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
iph = (struct iphdr *)skb_network_header(skb);
- read_lock_bh(&l2tp_ip_lock);
+ read_lock_bh(&pn->l2tp_ip_lock);
sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
tunnel_id);
if (!sk) {
- read_unlock_bh(&l2tp_ip_lock);
+ read_unlock_bh(&pn->l2tp_ip_lock);
goto discard;
}
sock_hold(sk);
- read_unlock_bh(&l2tp_ip_lock);
+ read_unlock_bh(&pn->l2tp_ip_lock);
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
@@ -185,7 +200,7 @@ pass_up:
return sk_receive_skb(sk, skb, 1);
discard_sess:
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
goto discard;
discard_put:
@@ -198,21 +213,25 @@ discard:
static int l2tp_ip_hash(struct sock *sk)
{
+ struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
+
if (sk_unhashed(sk)) {
- write_lock_bh(&l2tp_ip_lock);
- sk_add_node(sk, &l2tp_ip_table);
- write_unlock_bh(&l2tp_ip_lock);
+ write_lock_bh(&pn->l2tp_ip_lock);
+ sk_add_node(sk, &pn->l2tp_ip_table);
+ write_unlock_bh(&pn->l2tp_ip_lock);
}
return 0;
}
static void l2tp_ip_unhash(struct sock *sk)
{
+ struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
+
if (sk_unhashed(sk))
return;
- write_lock_bh(&l2tp_ip_lock);
+ write_lock_bh(&pn->l2tp_ip_lock);
sk_del_node_init(sk);
- write_unlock_bh(&l2tp_ip_lock);
+ write_unlock_bh(&pn->l2tp_ip_lock);
}
static int l2tp_ip_open(struct sock *sk)
@@ -226,23 +245,26 @@ static int l2tp_ip_open(struct sock *sk)
static void l2tp_ip_close(struct sock *sk, long timeout)
{
- write_lock_bh(&l2tp_ip_lock);
+ struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
+
+ write_lock_bh(&pn->l2tp_ip_lock);
hlist_del_init(&sk->sk_bind_node);
sk_del_node_init(sk);
- write_unlock_bh(&l2tp_ip_lock);
+ write_unlock_bh(&pn->l2tp_ip_lock);
sk_common_release(sk);
}
static void l2tp_ip_destroy_sock(struct sock *sk)
{
- struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
- struct sk_buff *skb;
+ struct l2tp_tunnel *tunnel;
- while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
- kfree_skb(skb);
+ __skb_queue_purge(&sk->sk_write_queue);
- if (tunnel)
+ tunnel = l2tp_sk_to_tunnel(sk);
+ if (tunnel) {
l2tp_tunnel_delete(tunnel);
+ l2tp_tunnel_put(tunnel);
+ }
}
static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -250,6 +272,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr;
struct net *net = sock_net(sk);
+ struct l2tp_ip_net *pn;
int ret;
int chk_addr_ret;
@@ -280,10 +303,11 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
- write_lock_bh(&l2tp_ip_lock);
+ pn = l2tp_ip_pernet(net);
+ write_lock_bh(&pn->l2tp_ip_lock);
if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
- write_unlock_bh(&l2tp_ip_lock);
+ write_unlock_bh(&pn->l2tp_ip_lock);
ret = -EADDRINUSE;
goto out;
}
@@ -291,9 +315,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk_dst_reset(sk);
l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
- sk_add_bind_node(sk, &l2tp_ip_bind_table);
+ sk_add_bind_node(sk, &pn->l2tp_ip_bind_table);
sk_del_node_init(sk);
- write_unlock_bh(&l2tp_ip_lock);
+ write_unlock_bh(&pn->l2tp_ip_lock);
ret = 0;
sock_reset_flag(sk, SOCK_ZAPPED);
@@ -307,6 +331,7 @@ out:
static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
+ struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
int rc;
if (addr_len < sizeof(*lsa))
@@ -329,10 +354,10 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
- write_lock_bh(&l2tp_ip_lock);
+ write_lock_bh(&pn->l2tp_ip_lock);
hlist_del_init(&sk->sk_bind_node);
- sk_add_bind_node(sk, &l2tp_ip_bind_table);
- write_unlock_bh(&l2tp_ip_lock);
+ sk_add_bind_node(sk, &pn->l2tp_ip_bind_table);
+ write_unlock_bh(&pn->l2tp_ip_lock);
out_sk:
release_sock(sk);
@@ -637,25 +662,58 @@ static struct net_protocol l2tp_ip_protocol __read_mostly = {
.handler = l2tp_ip_recv,
};
+static __net_init int l2tp_ip_init_net(struct net *net)
+{
+ struct l2tp_ip_net *pn = net_generic(net, l2tp_ip_net_id);
+
+ rwlock_init(&pn->l2tp_ip_lock);
+ INIT_HLIST_HEAD(&pn->l2tp_ip_table);
+ INIT_HLIST_HEAD(&pn->l2tp_ip_bind_table);
+ return 0;
+}
+
+static __net_exit void l2tp_ip_exit_net(struct net *net)
+{
+ struct l2tp_ip_net *pn = l2tp_ip_pernet(net);
+
+ write_lock_bh(&pn->l2tp_ip_lock);
+ WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip_table) != 0);
+ WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip_bind_table) != 0);
+ write_unlock_bh(&pn->l2tp_ip_lock);
+}
+
+static struct pernet_operations l2tp_ip_net_ops = {
+ .init = l2tp_ip_init_net,
+ .exit = l2tp_ip_exit_net,
+ .id = &l2tp_ip_net_id,
+ .size = sizeof(struct l2tp_ip_net),
+};
+
static int __init l2tp_ip_init(void)
{
int err;
pr_info("L2TP IP encapsulation support (L2TPv3)\n");
+ err = register_pernet_device(&l2tp_ip_net_ops);
+ if (err)
+ goto out;
+
err = proto_register(&l2tp_ip_prot, 1);
if (err != 0)
- goto out;
+ goto out1;
err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
if (err)
- goto out1;
+ goto out2;
inet_register_protosw(&l2tp_ip_protosw);
return 0;
-out1:
+out2:
proto_unregister(&l2tp_ip_prot);
+out1:
+ unregister_pernet_device(&l2tp_ip_net_ops);
out:
return err;
}
@@ -665,6 +723,7 @@ static void __exit l2tp_ip_exit(void)
inet_unregister_protosw(&l2tp_ip_protosw);
inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
proto_unregister(&l2tp_ip_prot);
+ unregister_pernet_device(&l2tp_ip_net_ops);
}
module_init(l2tp_ip_init);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index d217ff1f229e..f4c1da070826 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -22,6 +22,8 @@
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <net/transp_v6.h>
#include <net/addrconf.h>
@@ -29,6 +31,14 @@
#include "l2tp_core.h"
+/* per-net private data for this module */
+static unsigned int l2tp_ip6_net_id;
+struct l2tp_ip6_net {
+ rwlock_t l2tp_ip6_lock;
+ struct hlist_head l2tp_ip6_table;
+ struct hlist_head l2tp_ip6_bind_table;
+};
+
struct l2tp_ip6_sock {
/* inet_sock has to be the first member of l2tp_ip6_sock */
struct inet_sock inet;
@@ -39,23 +49,25 @@ struct l2tp_ip6_sock {
struct ipv6_pinfo inet6;
};
-static DEFINE_RWLOCK(l2tp_ip6_lock);
-static struct hlist_head l2tp_ip6_table;
-static struct hlist_head l2tp_ip6_bind_table;
-
-static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
+static struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
{
return (struct l2tp_ip6_sock *)sk;
}
+static struct l2tp_ip6_net *l2tp_ip6_pernet(const struct net *net)
+{
+ return net_generic(net, l2tp_ip6_net_id);
+}
+
static struct sock *__l2tp_ip6_bind_lookup(const struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *raddr,
int dif, u32 tunnel_id)
{
+ struct l2tp_ip6_net *pn = l2tp_ip6_pernet(net);
struct sock *sk;
- sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
+ sk_for_each_bound(sk, &pn->l2tp_ip6_bind_table) {
const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
const struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
@@ -123,6 +135,7 @@ found:
static int l2tp_ip6_recv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
+ struct l2tp_ip6_net *pn;
struct sock *sk;
u32 session_id;
u32 tunnel_id;
@@ -131,6 +144,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
struct l2tp_tunnel *tunnel = NULL;
struct ipv6hdr *iph;
+ pn = l2tp_ip6_pernet(net);
+
if (!pskb_may_pull(skb, 4))
goto discard;
@@ -162,7 +177,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
goto discard_sess;
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
return 0;
@@ -177,15 +192,15 @@ pass_up:
tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
iph = ipv6_hdr(skb);
- read_lock_bh(&l2tp_ip6_lock);
+ read_lock_bh(&pn->l2tp_ip6_lock);
sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
inet6_iif(skb), tunnel_id);
if (!sk) {
- read_unlock_bh(&l2tp_ip6_lock);
+ read_unlock_bh(&pn->l2tp_ip6_lock);
goto discard;
}
sock_hold(sk);
- read_unlock_bh(&l2tp_ip6_lock);
+ read_unlock_bh(&pn->l2tp_ip6_lock);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
@@ -195,7 +210,7 @@ pass_up:
return sk_receive_skb(sk, skb, 1);
discard_sess:
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
goto discard;
discard_put:
@@ -208,21 +223,25 @@ discard:
static int l2tp_ip6_hash(struct sock *sk)
{
+ struct l2tp_ip6_net *pn = l2tp_ip6_pernet(sock_net(sk));
+
if (sk_unhashed(sk)) {
- write_lock_bh(&l2tp_ip6_lock);
- sk_add_node(sk, &l2tp_ip6_table);
- write_unlock_bh(&l2tp_ip6_lock);
+ write_lock_bh(&pn->l2tp_ip6_lock);
+ sk_add_node(sk, &pn->l2tp_ip6_table);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
}
return 0;
}
static void l2tp_ip6_unhash(struct sock *sk)
{
+ struct l2tp_ip6_net *pn = l2tp_ip6_pernet(sock_net(sk));
+
if (sk_unhashed(sk))
return;
- write_lock_bh(&l2tp_ip6_lock);
+ write_lock_bh(&pn->l2tp_ip6_lock);
sk_del_node_init(sk);
- write_unlock_bh(&l2tp_ip6_lock);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
}
static int l2tp_ip6_open(struct sock *sk)
@@ -236,24 +255,29 @@ static int l2tp_ip6_open(struct sock *sk)
static void l2tp_ip6_close(struct sock *sk, long timeout)
{
- write_lock_bh(&l2tp_ip6_lock);
+ struct l2tp_ip6_net *pn = l2tp_ip6_pernet(sock_net(sk));
+
+ write_lock_bh(&pn->l2tp_ip6_lock);
hlist_del_init(&sk->sk_bind_node);
sk_del_node_init(sk);
- write_unlock_bh(&l2tp_ip6_lock);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
sk_common_release(sk);
}
static void l2tp_ip6_destroy_sock(struct sock *sk)
{
- struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
+ struct l2tp_tunnel *tunnel;
lock_sock(sk);
ip6_flush_pending_frames(sk);
release_sock(sk);
- if (tunnel)
+ tunnel = l2tp_sk_to_tunnel(sk);
+ if (tunnel) {
l2tp_tunnel_delete(tunnel);
+ l2tp_tunnel_put(tunnel);
+ }
}
static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -262,11 +286,14 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *)uaddr;
struct net *net = sock_net(sk);
+ struct l2tp_ip6_net *pn;
__be32 v4addr = 0;
int bound_dev_if;
int addr_type;
int err;
+ pn = l2tp_ip6_pernet(net);
+
if (addr->l2tp_family != AF_INET6)
return -EINVAL;
if (addr_len < sizeof(*addr))
@@ -324,10 +351,10 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
rcu_read_unlock();
- write_lock_bh(&l2tp_ip6_lock);
+ write_lock_bh(&pn->l2tp_ip6_lock);
if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
addr->l2tp_conn_id)) {
- write_unlock_bh(&l2tp_ip6_lock);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
err = -EADDRINUSE;
goto out_unlock;
}
@@ -340,9 +367,9 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
- sk_add_bind_node(sk, &l2tp_ip6_bind_table);
+ sk_add_bind_node(sk, &pn->l2tp_ip6_bind_table);
sk_del_node_init(sk);
- write_unlock_bh(&l2tp_ip6_lock);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
sock_reset_flag(sk, SOCK_ZAPPED);
release_sock(sk);
@@ -364,6 +391,7 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
struct in6_addr *daddr;
int addr_type;
int rc;
+ struct l2tp_ip6_net *pn;
if (addr_len < sizeof(*lsa))
return -EINVAL;
@@ -395,10 +423,11 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
- write_lock_bh(&l2tp_ip6_lock);
+ pn = l2tp_ip6_pernet(sock_net(sk));
+ write_lock_bh(&pn->l2tp_ip6_lock);
hlist_del_init(&sk->sk_bind_node);
- sk_add_bind_node(sk, &l2tp_ip6_bind_table);
- write_unlock_bh(&l2tp_ip6_lock);
+ sk_add_bind_node(sk, &pn->l2tp_ip6_bind_table);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
out_sk:
release_sock(sk);
@@ -765,25 +794,58 @@ static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
.handler = l2tp_ip6_recv,
};
+static __net_init int l2tp_ip6_init_net(struct net *net)
+{
+ struct l2tp_ip6_net *pn = net_generic(net, l2tp_ip6_net_id);
+
+ rwlock_init(&pn->l2tp_ip6_lock);
+ INIT_HLIST_HEAD(&pn->l2tp_ip6_table);
+ INIT_HLIST_HEAD(&pn->l2tp_ip6_bind_table);
+ return 0;
+}
+
+static __net_exit void l2tp_ip6_exit_net(struct net *net)
+{
+ struct l2tp_ip6_net *pn = l2tp_ip6_pernet(net);
+
+ write_lock_bh(&pn->l2tp_ip6_lock);
+ WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip6_table) != 0);
+ WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip6_bind_table) != 0);
+ write_unlock_bh(&pn->l2tp_ip6_lock);
+}
+
+static struct pernet_operations l2tp_ip6_net_ops = {
+ .init = l2tp_ip6_init_net,
+ .exit = l2tp_ip6_exit_net,
+ .id = &l2tp_ip6_net_id,
+ .size = sizeof(struct l2tp_ip6_net),
+};
+
static int __init l2tp_ip6_init(void)
{
int err;
pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
+ err = register_pernet_device(&l2tp_ip6_net_ops);
+ if (err)
+ goto out;
+
err = proto_register(&l2tp_ip6_prot, 1);
if (err != 0)
- goto out;
+ goto out1;
err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
if (err)
- goto out1;
+ goto out2;
inet6_register_protosw(&l2tp_ip6_protosw);
return 0;
-out1:
+out2:
proto_unregister(&l2tp_ip6_prot);
+out1:
+ unregister_pernet_device(&l2tp_ip6_net_ops);
out:
return err;
}
@@ -793,6 +855,7 @@ static void __exit l2tp_ip6_exit(void)
inet6_unregister_protosw(&l2tp_ip6_protosw);
inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
proto_unregister(&l2tp_ip6_prot);
+ unregister_pernet_device(&l2tp_ip6_net_ops);
}
module_init(l2tp_ip6_init);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index d105030520f9..284f1dec1b56 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -63,7 +63,7 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info)
if (tunnel) {
session = l2tp_session_get(net, tunnel->sock, tunnel->version,
tunnel_id, session_id);
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
}
}
@@ -242,7 +242,7 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
if (ret < 0)
goto out;
- l2tp_tunnel_inc_refcount(tunnel);
+ refcount_inc(&tunnel->ref_count);
ret = l2tp_tunnel_register(tunnel, net, &cfg);
if (ret < 0) {
kfree(tunnel);
@@ -250,7 +250,7 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
}
ret = l2tp_tunnel_notify(&l2tp_nl_family, info, tunnel,
L2TP_CMD_TUNNEL_CREATE);
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
out:
return ret;
@@ -280,7 +280,7 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
l2tp_tunnel_delete(tunnel);
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
out:
return ret;
@@ -308,7 +308,7 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
tunnel, L2TP_CMD_TUNNEL_MODIFY);
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
out:
return ret;
@@ -479,42 +479,48 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
if (ret < 0)
goto err_nlmsg_tunnel;
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
return genlmsg_unicast(net, msg, info->snd_portid);
err_nlmsg_tunnel:
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
err_nlmsg:
nlmsg_free(msg);
err:
return ret;
}
+struct l2tp_nl_cb_data {
+ unsigned long tkey;
+ unsigned long skey;
+};
+
static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
- int ti = cb->args[0];
+ struct l2tp_nl_cb_data *cbd = (void *)&cb->ctx[0];
+ unsigned long key = cbd->tkey;
struct l2tp_tunnel *tunnel;
struct net *net = sock_net(skb->sk);
for (;;) {
- tunnel = l2tp_tunnel_get_nth(net, ti);
+ tunnel = l2tp_tunnel_get_next(net, &key);
if (!tunnel)
goto out;
if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
tunnel, L2TP_CMD_TUNNEL_GET) < 0) {
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
goto out;
}
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
- ti++;
+ key++;
}
out:
- cb->args[0] = ti;
+ cbd->tkey = key;
return skb->len;
}
@@ -641,12 +647,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
if (session) {
ret = l2tp_session_notify(&l2tp_nl_family, info, session,
L2TP_CMD_SESSION_CREATE);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
}
}
out_tunnel:
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_tunnel_put(tunnel);
out:
return ret;
}
@@ -671,7 +677,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
l2tp_nl_cmd_ops[pw_type]->session_delete(session);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
out:
return ret;
@@ -692,8 +698,10 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
if (info->attrs[L2TP_ATTR_SEND_SEQ]) {
+ struct l2tp_tunnel *tunnel = session->tunnel;
+
session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
- l2tp_session_set_header_len(session, session->tunnel->version);
+ l2tp_session_set_header_len(session, tunnel->version, tunnel->encap);
}
if (info->attrs[L2TP_ATTR_LNS_MODE])
@@ -705,7 +713,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
ret = l2tp_session_notify(&l2tp_nl_family, info,
session, L2TP_CMD_SESSION_MODIFY);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
out:
return ret;
@@ -816,57 +824,59 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
return ret;
err_ref_msg:
nlmsg_free(msg);
err_ref:
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
err:
return ret;
}
static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct l2tp_nl_cb_data *cbd = (void *)&cb->ctx[0];
struct net *net = sock_net(skb->sk);
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
- int ti = cb->args[0];
- int si = cb->args[1];
+ unsigned long tkey = cbd->tkey;
+ unsigned long skey = cbd->skey;
for (;;) {
if (!tunnel) {
- tunnel = l2tp_tunnel_get_nth(net, ti);
+ tunnel = l2tp_tunnel_get_next(net, &tkey);
if (!tunnel)
goto out;
}
- session = l2tp_session_get_nth(tunnel, si);
+ session = l2tp_session_get_next(net, tunnel->sock, tunnel->version,
+ tunnel->tunnel_id, &skey);
if (!session) {
- ti++;
- l2tp_tunnel_dec_refcount(tunnel);
+ tkey++;
+ l2tp_tunnel_put(tunnel);
tunnel = NULL;
- si = 0;
+ skey = 0;
continue;
}
if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
session, L2TP_CMD_SESSION_GET) < 0) {
- l2tp_session_dec_refcount(session);
- l2tp_tunnel_dec_refcount(tunnel);
+ l2tp_session_put(session);
+ l2tp_tunnel_put(tunnel);
break;
}
- l2tp_session_dec_refcount(session);
+ l2tp_session_put(session);
- si++;
+ skey++;
}
out:
- cb->args[0] = ti;
- cb->args[1] = si;
+ cbd->tkey = tkey;
+ cbd->skey = skey;
return skb->len;
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c